[kernel] r10324 - in dists/trunk/linux-2.6/debian/patches: bugfix/all series

Maximilian Attems maks at alioth.debian.org
Thu Jan 31 09:04:17 UTC 2008


Author: maks
Date: Thu Jan 31 09:04:04 2008
New Revision: 10324

Log:
update to patch-2.6.24-git9

kvm merge, rumors say that xen runs under kvm.
nuke all merged firewire patches.
powerpc merge maybe it will compile again..
conflicting mkvmlinuz support patch needs update.


Added:
   dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git9
      - copied, changed from r10323, /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git8
Removed:
   dists/trunk/linux-2.6/debian/patches/bugfix/all/fw-ohci-dyn-buffers-dma-descriptors.patch
   dists/trunk/linux-2.6/debian/patches/bugfix/all/fw-sbp2-incr-login-orb-reply-timeout.patch
   dists/trunk/linux-2.6/debian/patches/bugfix/all/fw-sbp2-skip-unnecessary-logout.patch
   dists/trunk/linux-2.6/debian/patches/bugfix/all/fw-sbp2-try-increase-reconnect_hold.patch
   dists/trunk/linux-2.6/debian/patches/bugfix/all/git-ieee1394.patch
   dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git8
   dists/trunk/linux-2.6/debian/patches/bugfix/all/ppc-vio_find_name-compile_fix.patch
Modified:
   dists/trunk/linux-2.6/debian/patches/series/1~experimental.1
   dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra

Copied: dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git9 (from r10323, /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git8)
==============================================================================
--- /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git8	(original)
+++ dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git9	Thu Jan 31 09:04:04 2008
@@ -57,6 +57,25 @@
  
  ###
  # The build process is as follows (targets):
+diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
+index aa38cc5..77436d7 100644
+--- a/Documentation/DocBook/kernel-api.tmpl
++++ b/Documentation/DocBook/kernel-api.tmpl
+@@ -419,7 +419,13 @@ X!Edrivers/pnp/system.c
+ 
+   <chapter id="blkdev">
+      <title>Block Devices</title>
+-!Eblock/ll_rw_blk.c
++!Eblock/blk-core.c
++!Eblock/blk-map.c
++!Iblock/blk-sysfs.c
++!Eblock/blk-settings.c
++!Eblock/blk-exec.c
++!Eblock/blk-barrier.c
++!Eblock/blk-tag.c
+   </chapter>
+ 
+   <chapter id="chrdev">
 diff --git a/Documentation/DocBook/s390-drivers.tmpl b/Documentation/DocBook/s390-drivers.tmpl
 index 254e769..3d2f31b 100644
 --- a/Documentation/DocBook/s390-drivers.tmpl
@@ -1852,7 +1871,7 @@
 +$ find . -name Kconfig\* | xargs grep -ns "depends on.*=.*||.*=" | grep -v orig
 +
 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index c417877..5d171b7 100644
+index c417877..92c40d1 100644
 --- a/Documentation/kernel-parameters.txt
 +++ b/Documentation/kernel-parameters.txt
 @@ -34,6 +34,7 @@ parameter is applicable:
@@ -1934,7 +1953,15 @@
  	gdth=		[HW,SCSI]
  			See header of drivers/scsi/gdth.c.
  
-@@ -786,6 +817,16 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -686,6 +717,7 @@ and is between 256 and 4096 characters. It is defined in the file
+ 			See Documentation/isdn/README.HiSax.
+ 
+ 	hugepages=	[HW,X86-32,IA-64] Maximal number of HugeTLB pages.
++	hugepagesz=	[HW,IA-64,PPC] The size of the HugeTLB pages.
+ 
+ 	i8042.direct	[HW] Put keyboard port into non-translated mode
+ 	i8042.dumbkbd	[HW] Pretend that controller can only read data from
+@@ -786,6 +818,16 @@ and is between 256 and 4096 characters. It is defined in the file
  			for translation below 32 bit and if not available
  			then look in the higher range.
  
@@ -1951,7 +1978,7 @@
  	io7=		[HW] IO7 for Marvel based alpha systems
  			See comment before marvel_specify_io7 in
  			arch/alpha/kernel/core_marvel.c.
-@@ -1051,6 +1092,11 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1051,6 +1093,11 @@ and is between 256 and 4096 characters. It is defined in the file
  			Multi-Function General Purpose Timers on AMD Geode
  			platforms.
  
@@ -1963,7 +1990,7 @@
  	mga=		[HW,DRM]
  
  	mousedev.tap_time=
-@@ -1123,6 +1169,10 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1123,6 +1170,10 @@ and is between 256 and 4096 characters. It is defined in the file
  			of returning the full 64-bit number.
  			The default is to return 64-bit inode numbers.
  
@@ -1974,7 +2001,7 @@
  	nmi_watchdog=	[KNL,BUGS=X86-32] Debugging features for SMP kernels
  
  	no387		[BUGS=X86-32] Tells the kernel to use the 387 maths
-@@ -1147,6 +1197,8 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1147,6 +1198,8 @@ and is between 256 and 4096 characters. It is defined in the file
  
  	nodisconnect	[HW,SCSI,M68K] Disables SCSI disconnects.
  
@@ -1983,7 +2010,7 @@
  	noexec		[IA-64]
  
  	noexec		[X86-32,X86-64]
-@@ -1157,6 +1209,8 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1157,6 +1210,8 @@ and is between 256 and 4096 characters. It is defined in the file
  			register save and restore. The kernel will only save
  			legacy floating-point registers on task switch.
  
@@ -1992,7 +2019,7 @@
  	nohlt		[BUGS=ARM]
  
  	no-hlt		[BUGS=X86-32] Tells the kernel that the hlt
-@@ -1593,7 +1647,13 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1593,7 +1648,13 @@ and is between 256 and 4096 characters. It is defined in the file
  			Format: <vendor>:<model>:<flags>
  			(flags are integer value)
  
@@ -2007,7 +2034,7 @@
  
  	scsi_mod.scan=	[SCSI] sync (default) scans SCSI busses as they are
  			discovered.  async scans them in kernel threads,
-@@ -1960,6 +2020,11 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1960,6 +2021,11 @@ and is between 256 and 4096 characters. It is defined in the file
  			vdso=1: enable VDSO (default)
  			vdso=0: disable VDSO mapping
  
@@ -2618,6 +2645,130 @@
  
  3. Configuring Kprobes
  
+diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
+index 9b0e322..6c8a238 100644
+--- a/Documentation/lguest/lguest.c
++++ b/Documentation/lguest/lguest.c
+@@ -79,6 +79,9 @@ static void *guest_base;
+ /* The maximum guest physical address allowed, and maximum possible. */
+ static unsigned long guest_limit, guest_max;
+ 
++/* a per-cpu variable indicating whose vcpu is currently running */
++static unsigned int __thread cpu_id;
++
+ /* This is our list of devices. */
+ struct device_list
+ {
+@@ -153,6 +156,9 @@ struct virtqueue
+ 	void (*handle_output)(int fd, struct virtqueue *me);
+ };
+ 
++/* Remember the arguments to the program so we can "reboot" */
++static char **main_args;
++
+ /* Since guest is UP and we don't run at the same time, we don't need barriers.
+  * But I include them in the code in case others copy it. */
+ #define wmb()
+@@ -554,7 +560,7 @@ static void wake_parent(int pipefd, int lguest_fd)
+ 			else
+ 				FD_CLR(-fd - 1, &devices.infds);
+ 		} else /* Send LHREQ_BREAK command. */
+-			write(lguest_fd, args, sizeof(args));
++			pwrite(lguest_fd, args, sizeof(args), cpu_id);
+ 	}
+ }
+ 
+@@ -1489,7 +1495,9 @@ static void setup_block_file(const char *filename)
+ 
+ 	/* Create stack for thread and run it */
+ 	stack = malloc(32768);
+-	if (clone(io_thread, stack + 32768, CLONE_VM, dev) == -1)
++	/* SIGCHLD - We dont "wait" for our cloned thread, so prevent it from
++	 * becoming a zombie. */
++	if (clone(io_thread, stack + 32768,  CLONE_VM | SIGCHLD, dev) == -1)
+ 		err(1, "Creating clone");
+ 
+ 	/* We don't need to keep the I/O thread's end of the pipes open. */
+@@ -1499,7 +1507,21 @@ static void setup_block_file(const char *filename)
+ 	verbose("device %u: virtblock %llu sectors\n",
+ 		devices.device_num, cap);
+ }
+-/* That's the end of device setup. */
++/* That's the end of device setup. :*/
++
++/* Reboot */
++static void __attribute__((noreturn)) restart_guest(void)
++{
++	unsigned int i;
++
++	/* Closing pipes causes the waker thread and io_threads to die, and
++	 * closing /dev/lguest cleans up the Guest.  Since we don't track all
++	 * open fds, we simply close everything beyond stderr. */
++	for (i = 3; i < FD_SETSIZE; i++)
++		close(i);
++	execv(main_args[0], main_args);
++	err(1, "Could not exec %s", main_args[0]);
++}
+ 
+ /*L:220 Finally we reach the core of the Launcher, which runs the Guest, serves
+  * its input and output, and finally, lays it to rest. */
+@@ -1511,7 +1533,8 @@ static void __attribute__((noreturn)) run_guest(int lguest_fd)
+ 		int readval;
+ 
+ 		/* We read from the /dev/lguest device to run the Guest. */
+-		readval = read(lguest_fd, &notify_addr, sizeof(notify_addr));
++		readval = pread(lguest_fd, &notify_addr,
++				sizeof(notify_addr), cpu_id);
+ 
+ 		/* One unsigned long means the Guest did HCALL_NOTIFY */
+ 		if (readval == sizeof(notify_addr)) {
+@@ -1521,16 +1544,23 @@ static void __attribute__((noreturn)) run_guest(int lguest_fd)
+ 		/* ENOENT means the Guest died.  Reading tells us why. */
+ 		} else if (errno == ENOENT) {
+ 			char reason[1024] = { 0 };
+-			read(lguest_fd, reason, sizeof(reason)-1);
++			pread(lguest_fd, reason, sizeof(reason)-1, cpu_id);
+ 			errx(1, "%s", reason);
++		/* ERESTART means that we need to reboot the guest */
++		} else if (errno == ERESTART) {
++			restart_guest();
+ 		/* EAGAIN means the Waker wanted us to look at some input.
+ 		 * Anything else means a bug or incompatible change. */
+ 		} else if (errno != EAGAIN)
+ 			err(1, "Running guest failed");
+ 
++		/* Only service input on thread for CPU 0. */
++		if (cpu_id != 0)
++			continue;
++
+ 		/* Service input, then unset the BREAK to release the Waker. */
+ 		handle_input(lguest_fd);
+-		if (write(lguest_fd, args, sizeof(args)) < 0)
++		if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
+ 			err(1, "Resetting break");
+ 	}
+ }
+@@ -1571,6 +1601,12 @@ int main(int argc, char *argv[])
+ 	/* If they specify an initrd file to load. */
+ 	const char *initrd_name = NULL;
+ 
++	/* Save the args: we "reboot" by execing ourselves again. */
++	main_args = argv;
++	/* We don't "wait" for the children, so prevent them from becoming
++	 * zombies. */
++	signal(SIGCHLD, SIG_IGN);
++
+ 	/* First we initialize the device list.  Since console and network
+ 	 * device receive input from a file descriptor, we keep an fdset
+ 	 * (infds) and the maximum fd number (max_infd) with the head of the
+@@ -1582,6 +1618,7 @@ int main(int argc, char *argv[])
+ 	devices.lastdev = &devices.dev;
+ 	devices.next_irq = 1;
+ 
++	cpu_id = 0;
+ 	/* We need to know how much memory so we can set up the device
+ 	 * descriptor and memory pages for the devices as we parse the command
+ 	 * line.  So we quickly look through the arguments to find the amount
 diff --git a/Documentation/m68k/kernel-options.txt b/Documentation/m68k/kernel-options.txt
 index 248589e..c93bed6 100644
 --- a/Documentation/m68k/kernel-options.txt
@@ -4017,6 +4168,935 @@
  device's directory:
  id - displays a list of support EISA IDs
  options - displays possible resource configurations
+diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
+index 94a3c57..3be84aa 100644
+--- a/Documentation/powerpc/00-INDEX
++++ b/Documentation/powerpc/00-INDEX
+@@ -28,3 +28,6 @@ sound.txt
+ 	- info on sound support under Linux/PPC
+ zImage_layout.txt
+ 	- info on the kernel images for Linux/PPC
++qe_firmware.txt
++	- describes the layout of firmware binaries for the Freescale QUICC
++	  Engine and the code that parses and uploads the microcode therein.
+diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
+index e9a3cb1..b5e46ef 100644
+--- a/Documentation/powerpc/booting-without-of.txt
++++ b/Documentation/powerpc/booting-without-of.txt
+@@ -52,7 +52,11 @@ Table of Contents
+       i) Freescale QUICC Engine module (QE)
+       j) CFI or JEDEC memory-mapped NOR flash
+       k) Global Utilities Block
+-      l) Xilinx IP cores
++      l) Freescale Communications Processor Module
++      m) Chipselect/Local Bus
++      n) 4xx/Axon EMAC ethernet nodes
++      o) Xilinx IP cores
++      p) Freescale Synchronous Serial Interface
+ 
+   VII - Specifying interrupt information for devices
+     1) interrupts property
+@@ -671,10 +675,10 @@ device or bus to be described by the device tree.
+ 
+ In general, the format of an address for a device is defined by the
+ parent bus type, based on the #address-cells and #size-cells
+-property. In the absence of such a property, the parent's parent
+-values are used, etc... The kernel requires the root node to have
+-those properties defining addresses format for devices directly mapped
+-on the processor bus.
++properties.  Note that the parent's parent definitions of #address-cells
++and #size-cells are not inhereted so every node with children must specify
++them.  The kernel requires the root node to have those properties defining
++addresses format for devices directly mapped on the processor bus.
+ 
+ Those 2 properties define 'cells' for representing an address and a
+ size. A "cell" is a 32-bit number. For example, if both contain 2
+@@ -711,13 +715,14 @@ define a bus type with a more complex address format, including things
+ like address space bits, you'll have to add a bus translator to the
+ prom_parse.c file of the recent kernels for your bus type.
+ 
+-The "reg" property only defines addresses and sizes (if #size-cells
+-is non-0) within a given bus. In order to translate addresses upward
++The "reg" property only defines addresses and sizes (if #size-cells is
++non-0) within a given bus. In order to translate addresses upward
+ (that is into parent bus addresses, and possibly into CPU physical
+ addresses), all busses must contain a "ranges" property. If the
+ "ranges" property is missing at a given level, it's assumed that
+-translation isn't possible. The format of the "ranges" property for a
+-bus is a list of:
++translation isn't possible, i.e., the registers are not visible on the
++parent bus.  The format of the "ranges" property for a bus is a list
++of:
+ 
+ 	bus address, parent bus address, size
+ 
+@@ -735,6 +740,10 @@ fit in a single 32-bit word.   New 32-bit powerpc boards should use a
+ 1/1 format, unless the processor supports physical addresses greater
+ than 32-bits, in which case a 2/1 format is recommended.
+ 
++Alternatively, the "ranges" property may be empty, indicating that the
++registers are visible on the parent bus using an identity mapping
++translation.  In other words, the parent bus address space is the same
++as the child bus address space.
+ 
+ 2) Note about "compatible" properties
+ -------------------------------------
+@@ -1218,16 +1227,14 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+   Required properties:
+     - reg : Offset and length of the register set for the device
+-    - device_type : Should be "mdio"
+     - compatible : Should define the compatible device type for the
+-      mdio.  Currently, this is most likely to be "gianfar"
++      mdio.  Currently, this is most likely to be "fsl,gianfar-mdio"
+ 
+   Example:
+ 
+ 	mdio at 24520 {
+ 		reg = <24520 20>;
+-		device_type = "mdio"; 
+-		compatible = "gianfar";
++		compatible = "fsl,gianfar-mdio";
+ 
+ 		ethernet-phy at 0 {
+ 			......
+@@ -1254,6 +1261,10 @@ platforms are moved over to use the flattened-device-tree model.
+       services interrupts for this device.
+     - phy-handle : The phandle for the PHY connected to this ethernet
+       controller.
++    - fixed-link : <a b c d e> where a is emulated phy id - choose any,
++      but unique to the all specified fixed-links, b is duplex - 0 half,
++      1 full, c is link speed - d#10/d#100/d#1000, d is pause - 0 no
++      pause, 1 pause, e is asym_pause - 0 no asym_pause, 1 asym_pause.
+ 
+   Recommended properties:
+ 
+@@ -1408,7 +1419,6 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    Example multi port host USB controller device node :
+ 	usb at 22000 {
+-	        device_type = "usb";
+ 		compatible = "fsl-usb2-mph";
+ 		reg = <22000 1000>;
+ 		#address-cells = <1>;
+@@ -1422,7 +1432,6 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    Example dual role USB controller device node :
+ 	usb at 23000 {
+-		device_type = "usb";
+ 		compatible = "fsl-usb2-dr";
+ 		reg = <23000 1000>;
+ 		#address-cells = <1>;
+@@ -1534,7 +1543,7 @@ platforms are moved over to use the flattened-device-tree model.
+    i) Root QE device
+ 
+    Required properties:
+-   - device_type : should be "qe";
++   - compatible : should be "fsl,qe";
+    - model : precise model of the QE, Can be "QE", "CPM", or "CPM2"
+    - reg : offset and length of the device registers.
+    - bus-frequency : the clock frequency for QUICC Engine.
+@@ -1548,8 +1557,7 @@ platforms are moved over to use the flattened-device-tree model.
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		#interrupt-cells = <2>;
+-		device_type = "qe";
+-		model = "QE";
++		compatible = "fsl,qe";
+ 		ranges = <0 e0100000 00100000>;
+ 		reg = <e0100000 480>;
+ 		brg-frequency = <0>;
+@@ -1560,8 +1568,8 @@ platforms are moved over to use the flattened-device-tree model.
+    ii) SPI (Serial Peripheral Interface)
+ 
+    Required properties:
+-   - device_type : should be "spi".
+-   - compatible : should be "fsl_spi".
++   - cell-index : SPI controller index.
++   - compatible : should be "fsl,spi".
+    - mode : the SPI operation mode, it can be "cpu" or "cpu-qe".
+    - reg : Offset and length of the register set for the device
+    - interrupts : <a b> where a is the interrupt number and b is a
+@@ -1574,8 +1582,8 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    Example:
+ 	spi at 4c0 {
+-		device_type = "spi";
+-		compatible = "fsl_spi";
++		cell-index = <0>;
++		compatible = "fsl,spi";
+ 		reg = <4c0 40>;
+ 		interrupts = <82 0>;
+ 		interrupt-parent = <700>;
+@@ -1586,7 +1594,6 @@ platforms are moved over to use the flattened-device-tree model.
+    iii) USB (Universal Serial Bus Controller)
+ 
+    Required properties:
+-   - device_type : should be "usb".
+    - compatible : could be "qe_udc" or "fhci-hcd".
+    - mode : the could be "host" or "slave".
+    - reg : Offset and length of the register set for the device
+@@ -1600,7 +1607,6 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    Example(slave):
+ 	usb at 6c0 {
+-		device_type = "usb";
+ 		compatible = "qe_udc";
+ 		reg = <6c0 40>;
+ 		interrupts = <8b 0>;
+@@ -1613,7 +1619,7 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    Required properties:
+    - device_type : should be "network", "hldc", "uart", "transparent"
+-    "bisync" or "atm".
++     "bisync", "atm", or "serial".
+    - compatible : could be "ucc_geth" or "fsl_atm" and so on.
+    - model : should be "UCC".
+    - device-id : the ucc number(1-8), corresponding to UCCx in UM.
+@@ -1626,6 +1632,26 @@ platforms are moved over to use the flattened-device-tree model.
+    - interrupt-parent : the phandle for the interrupt controller that
+      services interrupts for this device.
+    - pio-handle : The phandle for the Parallel I/O port configuration.
++   - port-number : for UART drivers, the port number to use, between 0 and 3.
++     This usually corresponds to the /dev/ttyQE device, e.g. <0> = /dev/ttyQE0.
++     The port number is added to the minor number of the device.  Unlike the
++     CPM UART driver, the port-number is required for the QE UART driver.
++   - soft-uart : for UART drivers, if specified this means the QE UART device
++     driver should use "Soft-UART" mode, which is needed on some SOCs that have
++     broken UART hardware.  Soft-UART is provided via a microcode upload.
++   - rx-clock-name: the UCC receive clock source
++     "none": clock source is disabled
++     "brg1" through "brg16": clock source is BRG1-BRG16, respectively
++     "clk1" through "clk24": clock source is CLK1-CLK24, respectively
++   - tx-clock-name: the UCC transmit clock source
++     "none": clock source is disabled
++     "brg1" through "brg16": clock source is BRG1-BRG16, respectively
++     "clk1" through "clk24": clock source is CLK1-CLK24, respectively
++   The following two properties are deprecated.  rx-clock has been replaced
++   with rx-clock-name, and tx-clock has been replaced with tx-clock-name.
++   Drivers that currently use the deprecated properties should continue to
++   do so, in order to support older device trees, but they should be updated
++   to check for the new properties first.
+    - rx-clock : represents the UCC receive clock source.
+      0x00 : clock source is disabled;
+      0x1~0x10 : clock source is BRG1~BRG16 respectively;
+@@ -1754,7 +1780,7 @@ platforms are moved over to use the flattened-device-tree model.
+    vii) Multi-User RAM (MURAM)
+ 
+    Required properties:
+-   - device_type : should be "muram".
++   - compatible : should be "fsl,qe-muram", "fsl,cpm-muram".
+    - mode : the could be "host" or "slave".
+    - ranges : Should be defined as specified in 1) to describe the
+       translation of MURAM addresses.
+@@ -1764,14 +1790,42 @@ platforms are moved over to use the flattened-device-tree model.
+    Example:
+ 
+ 	muram at 10000 {
+-		device_type = "muram";
++		compatible = "fsl,qe-muram", "fsl,cpm-muram";
+ 		ranges = <0 00010000 0000c000>;
+ 
+ 		data-only at 0{
++			compatible = "fsl,qe-muram-data",
++				     "fsl,cpm-muram-data";
+ 			reg = <0 c000>;
+ 		};
+ 	};
+ 
++   viii) Uploaded QE firmware
++
++	 If a new firwmare has been uploaded to the QE (usually by the
++	 boot loader), then a 'firmware' child node should be added to the QE
++	 node.  This node provides information on the uploaded firmware that
++	 device drivers may need.
++
++	 Required properties:
++	 - id: The string name of the firmware.  This is taken from the 'id'
++	       member of the qe_firmware structure of the uploaded firmware.
++	       Device drivers can search this string to determine if the
++	       firmware they want is already present.
++	 - extended-modes: The Extended Modes bitfield, taken from the
++			   firmware binary.  It is a 64-bit number represented
++			   as an array of two 32-bit numbers.
++	 - virtual-traps: The virtual traps, taken from the firmware binary.
++			  It is an array of 8 32-bit numbers.
++
++	 Example:
++
++		firmware {
++			id = "Soft-UART";
++			extended-modes = <0 0>;
++			virtual-traps = <0 0 0 0 0 0 0 0>;
++		}
++
+    j) CFI or JEDEC memory-mapped NOR flash
+ 
+     Flash chips (Memory Technology Devices) are often used for solid state
+@@ -2075,8 +2129,7 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    Example:
+ 	localbus at f0010100 {
+-		compatible = "fsl,mpc8272ads-localbus",
+-		             "fsl,mpc8272-localbus",
++		compatible = "fsl,mpc8272-localbus",
+ 		             "fsl,pq2-localbus";
+ 		#address-cells = <2>;
+ 		#size-cells = <1>;
+@@ -2254,7 +2307,7 @@ platforms are moved over to use the flattened-device-tree model.
+ 			   available.
+ 			   For Axon: 0x0000012a
+ 
+-   l) Xilinx IP cores
++   o) Xilinx IP cores
+ 
+    The Xilinx EDK toolchain ships with a set of IP cores (devices) for use
+    in Xilinx Spartan and Virtex FPGAs.  The devices cover the whole range
+@@ -2276,7 +2329,7 @@ platforms are moved over to use the flattened-device-tree model.
+    properties of the device node.  In general, device nodes for IP-cores
+    will take the following form:
+ 
+-	(name)@(base-address) {
++	(name): (generic-name)@(base-address) {
+ 		compatible = "xlnx,(ip-core-name)-(HW_VER)"
+ 			     [, (list of compatible devices), ...];
+ 		reg = <(baseaddr) (size)>;
+@@ -2286,6 +2339,9 @@ platforms are moved over to use the flattened-device-tree model.
+ 		xlnx,(parameter2) = <(int-value)>;
+ 	};
+ 
++	(generic-name):   an open firmware-style name that describes the
++			generic class of device.  Preferably, this is one word, such
++			as 'serial' or 'ethernet'.
+ 	(ip-core-name):	the name of the ip block (given after the BEGIN
+ 			directive in system.mhs).  Should be in lowercase
+ 			and all underscores '_' converted to dashes '-'.
+@@ -2294,9 +2350,9 @@ platforms are moved over to use the flattened-device-tree model.
+ 			dropped from the parameter name, the name is converted
+ 			to lowercase and all underscore '_' characters are
+ 			converted to dashes '-'.
+-	(baseaddr):	the C_BASEADDR parameter.
++	(baseaddr):	the baseaddr parameter value (often named C_BASEADDR).
+ 	(HW_VER):	from the HW_VER parameter.
+-	(size):		equals C_HIGHADDR - C_BASEADDR + 1
++	(size):		the address range size (often C_HIGHADDR - C_BASEADDR + 1).
+ 
+    Typically, the compatible list will include the exact IP core version
+    followed by an older IP core version which implements the same
+@@ -2326,11 +2382,11 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    becomes the following device tree node:
+ 
+-	opb-uartlite-0 at ec100000 {
++	opb_uartlite_0: serial at ec100000 {
+ 		device_type = "serial";
+ 		compatible = "xlnx,opb-uartlite-1.00.b";
+ 		reg = <ec100000 10000>;
+-		interrupt-parent = <&opb-intc>;
++		interrupt-parent = <&opb_intc_0>;
+ 		interrupts = <1 0>; // got this from the opb_intc parameters
+ 		current-speed = <d#115200>;	// standard serial device prop
+ 		clock-frequency = <d#50000000>;	// standard serial device prop
+@@ -2339,16 +2395,19 @@ platforms are moved over to use the flattened-device-tree model.
+ 		xlnx,use-parity = <0>;
+ 	};
+ 
+-   Some IP cores actually implement 2 or more logical devices.  In this case,
+-   the device should still describe the whole IP core with a single node
+-   and add a child node for each logical device.  The ranges property can
+-   be used to translate from parent IP-core to the registers of each device.
+-   (Note: this makes the assumption that both logical devices have the same
+-   bus binding.  If this is not true, then separate nodes should be used for
+-   each logical device).  The 'cell-index' property can be used to enumerate
+-   logical devices within an IP core.  For example, the following is the
+-   system.mhs entry for the dual ps2 controller found on the ml403 reference
+-   design.
++   Some IP cores actually implement 2 or more logical devices.  In
++   this case, the device should still describe the whole IP core with
++   a single node and add a child node for each logical device.  The
++   ranges property can be used to translate from parent IP-core to the
++   registers of each device.  In addition, the parent node should be
++   compatible with the bus type 'xlnx,compound', and should contain
++   #address-cells and #size-cells, as with any other bus.  (Note: this
++   makes the assumption that both logical devices have the same bus
++   binding.  If this is not true, then separate nodes should be used
++   for each logical device).  The 'cell-index' property can be used to
++   enumerate logical devices within an IP core.  For example, the
++   following is the system.mhs entry for the dual ps2 controller found
++   on the ml403 reference design.
+ 
+ 	BEGIN opb_ps2_dual_ref
+ 		PARAMETER INSTANCE = opb_ps2_dual_ref_0
+@@ -2370,21 +2429,24 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    It would result in the following device tree nodes:
+ 
+-	opb_ps2_dual_ref_0 at a9000000 {
++	opb_ps2_dual_ref_0: opb-ps2-dual-ref at a9000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "xlnx,compound";
+ 		ranges = <0 a9000000 2000>;
+ 		// If this device had extra parameters, then they would
+ 		// go here.
+ 		ps2 at 0 {
+ 			compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
+ 			reg = <0 40>;
+-			interrupt-parent = <&opb-intc>;
++			interrupt-parent = <&opb_intc_0>;
+ 			interrupts = <3 0>;
+ 			cell-index = <0>;
+ 		};
+ 		ps2 at 1000 {
+ 			compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
+ 			reg = <1000 40>;
+-			interrupt-parent = <&opb-intc>;
++			interrupt-parent = <&opb_intc_0>;
+ 			interrupts = <3 0>;
+ 			cell-index = <0>;
+ 		};
+@@ -2447,17 +2509,18 @@ platforms are moved over to use the flattened-device-tree model.
+ 
+    Gives this device tree (some properties removed for clarity):
+ 
+-	plb-v34-0 {
++	plb at 0 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
++		compatible = "xlnx,plb-v34-1.02.a";
+ 		device_type = "ibm,plb";
+ 		ranges; // 1:1 translation
+ 
+-		plb-bram-if-cntrl-0 at ffff0000 {
++		plb_bram_if_cntrl_0: bram at ffff0000 {
+ 			reg = <ffff0000 10000>;
+ 		}
+ 
+-		opb-v20-0 {
++		opb at 20000000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			ranges = <20000000 20000000 20000000
+@@ -2465,11 +2528,11 @@ platforms are moved over to use the flattened-device-tree model.
+ 				  80000000 80000000 40000000
+ 				  c0000000 c0000000 20000000>;
+ 
+-			opb-uart16550-0 at a0000000 {
++			opb_uart16550_0: serial at a0000000 {
+ 				reg = <a00000000 2000>;
+ 			};
+ 
+-			opb-intc-0 at d1000fc0 {
++			opb_intc_0: interrupt-controller at d1000fc0 {
+ 				reg = <d1000fc0 20>;
+ 			};
+ 		};
+@@ -2514,6 +2577,204 @@ platforms are moved over to use the flattened-device-tree model.
+       Requred properties:
+        - current-speed : Baud rate of uartlite
+ 
++    p) Freescale Synchronous Serial Interface
++
++       The SSI is a serial device that communicates with audio codecs.  It can
++       be programmed in AC97, I2S, left-justified, or right-justified modes.
++
++       Required properties:
++       - compatible	  : compatible list, containing "fsl,ssi"
++       - cell-index	  : the SSI, <0> = SSI1, <1> = SSI2, and so on
++       - reg		  : offset and length of the register set for the device
++       - interrupts	  : <a b> where a is the interrupt number and b is a
++                            field that represents an encoding of the sense and
++			    level information for the interrupt.  This should be
++			    encoded based on the information in section 2)
++			    depending on the type of interrupt controller you
++			    have.
++       - interrupt-parent : the phandle for the interrupt controller that
++                            services interrupts for this device.
++       - fsl,mode	  : the operating mode for the SSI interface
++			    "i2s-slave" - I2S mode, SSI is clock slave
++			    "i2s-master" - I2S mode, SSI is clock master
++			    "lj-slave" - left-justified mode, SSI is clock slave
++			    "lj-master" - l.j. mode, SSI is clock master
++			    "rj-slave" - right-justified mode, SSI is clock slave
++			    "rj-master" - r.j., SSI is clock master
++			    "ac97-slave" - AC97 mode, SSI is clock slave
++			    "ac97-master" - AC97 mode, SSI is clock master
++
++       Optional properties:
++       - codec-handle	  : phandle to a 'codec' node that defines an audio
++			    codec connected to this SSI.  This node is typically
++			    a child of an I2C or other control node.
++
++       Child 'codec' node required properties:
++       - compatible	  : compatible list, contains the name of the codec
++
++       Child 'codec' node optional properties:
++       - clock-frequency  : The frequency of the input clock, which typically
++                            comes from an on-board dedicated oscillator.
++
++    * Freescale 83xx DMA Controller
++
++    Freescale PowerPC 83xx have on chip general purpose DMA controllers.
++
++    Required properties:
++
++    - compatible        : compatible list, contains 2 entries, first is
++			 "fsl,CHIP-dma", where CHIP is the processor
++			 (mpc8349, mpc8360, etc.) and the second is
++			 "fsl,elo-dma"
++    - reg               : <registers mapping for DMA general status reg>
++    - ranges 		: Should be defined as specified in 1) to describe the
++			  DMA controller channels.
++    - cell-index        : controller index.  0 for controller @ 0x8100
++    - interrupts        : <interrupt mapping for DMA IRQ>
++    - interrupt-parent  : optional, if needed for interrupt mapping
++
++
++    - DMA channel nodes:
++	    - compatible        : compatible list, contains 2 entries, first is
++				 "fsl,CHIP-dma-channel", where CHIP is the processor
++				 (mpc8349, mpc8350, etc.) and the second is
++				 "fsl,elo-dma-channel"
++	    - reg               : <registers mapping for channel>
++	    - cell-index        : dma channel index starts at 0.
++
++    Optional properties:
++	    - interrupts        : <interrupt mapping for DMA channel IRQ>
++				  (on 83xx this is expected to be identical to
++				   the interrupts property of the parent node)
++	    - interrupt-parent  : optional, if needed for interrupt mapping
++
++  Example:
++	dma at 82a8 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8349-dma", "fsl,elo-dma";
++		reg = <82a8 4>;
++		ranges = <0 8100 1a4>;
++		interrupt-parent = <&ipic>;
++		interrupts = <47 8>;
++		cell-index = <0>;
++		dma-channel at 0 {
++			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
++			cell-index = <0>;
++			reg = <0 80>;
++		};
++		dma-channel at 80 {
++			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
++			cell-index = <1>;
++			reg = <80 80>;
++		};
++		dma-channel at 100 {
++			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
++			cell-index = <2>;
++			reg = <100 80>;
++		};
++		dma-channel at 180 {
++			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
++			cell-index = <3>;
++			reg = <180 80>;
++		};
++	};
++
++   * Freescale 85xx/86xx DMA Controller
++
++    Freescale PowerPC 85xx/86xx have on chip general purpose DMA controllers.
++
++    Required properties:
++
++    - compatible        : compatible list, contains 2 entries, first is
++			 "fsl,CHIP-dma", where CHIP is the processor
++			 (mpc8540, mpc8540, etc.) and the second is
++			 "fsl,eloplus-dma"
++    - reg               : <registers mapping for DMA general status reg>
++    - cell-index        : controller index.  0 for controller @ 0x21000,
++                                             1 for controller @ 0xc000
++    - ranges 		: Should be defined as specified in 1) to describe the
++			  DMA controller channels.
++
++    - DMA channel nodes:
++	    - compatible        : compatible list, contains 2 entries, first is
++				 "fsl,CHIP-dma-channel", where CHIP is the processor
++				 (mpc8540, mpc8560, etc.) and the second is
++				 "fsl,eloplus-dma-channel"
++	    - cell-index        : dma channel index starts at 0.
++	    - reg               : <registers mapping for channel>
++	    - interrupts        : <interrupt mapping for DMA channel IRQ>
++	    - interrupt-parent  : optional, if needed for interrupt mapping
++
++  Example:
++	dma at 21300 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8540-dma", "fsl,eloplus-dma";
++		reg = <21300 4>;
++		ranges = <0 21100 200>;
++		cell-index = <0>;
++		dma-channel at 0 {
++			compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
++			reg = <0 80>;
++			cell-index = <0>;
++			interrupt-parent = <&mpic>;
++			interrupts = <14 2>;
++		};
++		dma-channel at 80 {
++			compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
++			reg = <80 80>;
++			cell-index = <1>;
++			interrupt-parent = <&mpic>;
++			interrupts = <15 2>;
++		};
++		dma-channel at 100 {
++			compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
++			reg = <100 80>;
++			cell-index = <2>;
++			interrupt-parent = <&mpic>;
++			interrupts = <16 2>;
++		};
++		dma-channel at 180 {
++			compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
++			reg = <180 80>;
++			cell-index = <3>;
++			interrupt-parent = <&mpic>;
++			interrupts = <17 2>;
++		};
++	};
++
++    * Freescale 8xxx/3.0 Gb/s SATA nodes
++
++    SATA nodes are defined to describe on-chip Serial ATA controllers.
++    Each SATA port should have its own node.
++
++    Required properties:
++    - compatible        : compatible list, contains 2 entries, first is
++			 "fsl,CHIP-sata", where CHIP is the processor
++			 (mpc8315, mpc8379, etc.) and the second is
++			 "fsl,pq-sata"
++    - interrupts        : <interrupt mapping for SATA IRQ>
++    - cell-index        : controller index.
++                              1 for controller @ 0x18000
++                              2 for controller @ 0x19000
++                              3 for controller @ 0x1a000
++                              4 for controller @ 0x1b000
++
++    Optional properties:
++    - interrupt-parent  : optional, if needed for interrupt mapping
++    - reg               : <registers mapping>
++
++   Example:
++
++	sata at 18000 {
++		compatible = "fsl,mpc8379-sata", "fsl,pq-sata";
++		reg = <0x18000 0x1000>;
++		cell-index = <1>;
++		interrupts = <2c 8>;
++		interrupt-parent = < &ipic >;
++        };
++
+    More devices will be defined as this spec matures.
+ 
+ VII - Specifying interrupt information for devices
+diff --git a/Documentation/powerpc/qe_firmware.txt b/Documentation/powerpc/qe_firmware.txt
+new file mode 100644
+index 0000000..8962664
+--- /dev/null
++++ b/Documentation/powerpc/qe_firmware.txt
+@@ -0,0 +1,295 @@
++	   Freescale QUICC Engine Firmware Uploading
++	   -----------------------------------------
++
++(c) 2007 Timur Tabi <timur at freescale.com>,
++    Freescale Semiconductor
++
++Table of Contents
++=================
++
++  I - Software License for Firmware
++
++  II - Microcode Availability
++
++  III - Description and Terminology
++
++  IV - Microcode Programming Details
++
++  V - Firmware Structure Layout
++
++  VI - Sample Code for Creating Firmware Files
++
++Revision Information
++====================
++
++November 30, 2007: Rev 1.0 - Initial version
++
++I - Software License for Firmware
++=================================
++
++Each firmware file comes with its own software license.  For information on
++the particular license, please see the license text that is distributed with
++the firmware.
++
++II - Microcode Availability
++===========================
++
++Firmware files are distributed through various channels.  Some are available on
++http://opensource.freescale.com.  For other firmware files, please contact
++your Freescale representative or your operating system vendor.
++
++III - Description and Terminology
++================================
++
++In this document, the term 'microcode' refers to the sequence of 32-bit
++integers that compose the actual QE microcode.
++
++The term 'firmware' refers to a binary blob that contains the microcode as
++well as other data that
++
++	1) describes the microcode's purpose
++	2) describes how and where to upload the microcode
++	3) specifies the values of various registers
++	4) includes additional data for use by specific device drivers
++
++Firmware files are binary files that contain only a firmware.
++
++IV - Microcode Programming Details
++===================================
++
++The QE architecture allows for only one microcode present in I-RAM for each
++RISC processor.  To replace any current microcode, a full QE reset (which
++disables the microcode) must be performed first.
++
++QE microcode is uploaded using the following procedure:
++
++1) The microcode is placed into I-RAM at a specific location, using the
++   IRAM.IADD and IRAM.IDATA registers.
++
++2) The CERCR.CIR bit is set to 0 or 1, depending on whether the firmware
++   needs split I-RAM.  Split I-RAM is only meaningful for SOCs that have
++   QEs with multiple RISC processors, such as the 8360.  Splitting the I-RAM
++   allows each processor to run a different microcode, effectively creating an
++   asymmetric multiprocessing (AMP) system.
++
++3) The TIBCR trap registers are loaded with the addresses of the trap handlers
++   in the microcode.
++
++4) The RSP.ECCR register is programmed with the value provided.
++
++5) If necessary, device drivers that need the virtual traps and extended mode
++   data will use them.
++
++Virtual Microcode Traps
++
++These virtual traps are conditional branches in the microcode.  These are
++"soft" provisional introduced in the ROMcode in order to enable higher
++flexibility and save h/w traps If new features are activated or an issue is
++being fixed in the RAM package utilizing they should be activated.  This data
++structure signals the microcode which of these virtual traps is active.
++
++This structure contains 6 words that the application should copy to some
++specific been defined.  This table describes the structure.
++
++	---------------------------------------------------------------
++	| Offset in |                  | Destination Offset | Size of |
++	|   array   |     Protocol     |   within PRAM      | Operand |
++	--------------------------------------------------------------|
++	|     0     | Ethernet         |      0xF8          | 4 bytes |
++	|           | interworking     |                    |         |
++	---------------------------------------------------------------
++	|     4     | ATM              |      0xF8          | 4 bytes |
++	|           | interworking     |                    |         |
++	---------------------------------------------------------------
++	|     8     | PPP              |      0xF8          | 4 bytes |
++	|           | interworking     |                    |         |
++	---------------------------------------------------------------
++	|     12    | Ethernet RX      |      0x22          | 1 byte  |
++	|           | Distributor Page |                    |         |
++	---------------------------------------------------------------
++	|     16    | ATM Globtal      |      0x28          | 1 byte  |
++	|           | Params Table     |                    |         |
++	---------------------------------------------------------------
++	|     20    | Insert Frame     |      0xF8          | 4 bytes |
++	---------------------------------------------------------------
++
++
++Extended Modes
++
++This is a double word bit array (64 bits) that defines special functionality
++which has an impact on the softwarew drivers.  Each bit has its own impact
++and has special instructions for the s/w associated with it.  This structure is
++described in this table:
++
++	-----------------------------------------------------------------------
++	| Bit #  |     Name     |   Description                               |
++	-----------------------------------------------------------------------
++	|   0    | General      | Indicates that prior to each host command   |
++	|        | push command | given by the application, the software must |
++	|        |              | assert a special host command (push command)|
++	|        |              | CECDR = 0x00800000.                         |
++	|        |              | CECR = 0x01c1000f.                          |
++	-----------------------------------------------------------------------
++	|   1    | UCC ATM      | Indicates that after issuing ATM RX INIT    |
++	|        | RX INIT      | command, the host must issue another special|
++	|        | push command | command (push command) and immediately      |
++	|        |              | following that re-issue the ATM RX INIT     |
++	|        |              | command. (This makes the sequence of        |
++	|        |              | initializing the ATM receiver a sequence of |
++	|        |              | three host commands)                        |
++	|        |              | CECDR = 0x00800000.                         |
++	|        |              | CECR = 0x01c1000f.                          |
++	-----------------------------------------------------------------------
++	|   2    | Add/remove   | Indicates that following the specific host  |
++	|        | command      | command: "Add/Remove entry in Hash Lookup   |
++	|        | validation   | Table" used in Interworking setup, the user |
++	|        |              | must issue another command.                 |
++	|        |              | CECDR = 0xce000003.                         |
++	|        |              | CECR = 0x01c10f58.                          |
++	-----------------------------------------------------------------------
++	|   3    | General push | Indicates that the s/w has to initialize    |
++	|        | command      | some pointers in the Ethernet thread pages  |
++	|        |              | which are used when Header Compression is   |
++	|        |              | activated.  The full details of these       |
++	|        |              | pointers is located in the software drivers.|
++	-----------------------------------------------------------------------
++	|   4    | General push | Indicates that after issuing Ethernet TX    |
++	|        | command      | INIT command, user must issue this command  |
++	|        |              | for each SNUM of Ethernet TX thread.        |
++	|        |              | CECDR = 0x00800003.                         |
++	|        |              | CECR = 0x7'b{0}, 8'b{Enet TX thread SNUM},  |
++	|        |              |        1'b{1}, 12'b{0}, 4'b{1}              |
++	-----------------------------------------------------------------------
++	| 5 - 31 |     N/A      | Reserved, set to zero.                      |
++	-----------------------------------------------------------------------
++
++V - Firmware Structure Layout
++==============================
++
++QE microcode from Freescale is typically provided as a header file.  This
++header file contains macros that define the microcode binary itself as well as
++some other data used in uploading that microcode.  The format of these files
++do not lend themselves to simple inclusion into other code.  Hence,
++the need for a more portable format.  This section defines that format.
++
++Instead of distributing a header file, the microcode and related data are
++embedded into a binary blob.  This blob is passed to the qe_upload_firmware()
++function, which parses the blob and performs everything necessary to upload
++the microcode.
++
++All integers are big-endian.  See the comments for function
++qe_upload_firmware() for up-to-date implementation information.
++
++This structure supports versioning, where the version of the structure is
++embedded into the structure itself.  To ensure forward and backwards
++compatibility, all versions of the structure must use the same 'qe_header'
++structure at the beginning.
++
++'header' (type: struct qe_header):
++	The 'length' field is the size, in bytes, of the entire structure,
++	including all the microcode embedded in it, as well as the CRC (if
++	present).
++
++	The 'magic' field is an array of three bytes that contains the letters
++	'Q', 'E', and 'F'.  This is an identifier that indicates that this
++	structure is a QE Firmware structure.
++
++	The 'version' field is a single byte that indicates the version of this
++	structure.  If the layout of the structure should ever need to be
++	changed to add support for additional types of microcode, then the
++	version number should also be changed.
++
++The 'id' field is a null-terminated string(suitable for printing) that
++identifies the firmware.
++
++The 'count' field indicates the number of 'microcode' structures.  There
++must be one and only one 'microcode' structure for each RISC processor.
++Therefore, this field also represents the number of RISC processors for this
++SOC.
++
++The 'soc' structure contains the SOC numbers and revisions used to match
++the microcode to the SOC itself.  Normally, the microcode loader should
++check the data in this structure with the SOC number and revisions, and
++only upload the microcode if there's a match.  However, this check is not
++made on all platforms.
++
++Although it is not recommended, you can specify '0' in the soc.model
++field to skip matching SOCs altogether.
++
++The 'model' field is a 16-bit number that matches the actual SOC. The
++'major' and 'minor' fields are the major and minor revision numbrs,
++respectively, of the SOC.
++
++For example, to match the 8323, revision 1.0:
++     soc.model = 8323
++     soc.major = 1
++     soc.minor = 0
++
++'padding' is neccessary for structure alignment.  This field ensures that the
++'extended_modes' field is aligned on a 64-bit boundary.
++
++'extended_modes' is a bitfield that defines special functionality which has an
++impact on the device drivers.  Each bit has its own impact and has special
++instructions for the driver associated with it.  This field is stored in
++the QE library and available to any driver that calles qe_get_firmware_info().
++
++'vtraps' is an array of 8 words that contain virtual trap values for each
++virtual traps.  As with 'extended_modes', this field is stored in the QE
++library and available to any driver that calles qe_get_firmware_info().
++
++'microcode' (type: struct qe_microcode):
++	For each RISC processor there is one 'microcode' structure.  The first
++	'microcode' structure is for the first RISC, and so on.
++
++	The 'id' field is a null-terminated string suitable for printing that
++	identifies this particular microcode.
++
++	'traps' is an array of 16 words that contain hardware trap values
++	for each of the 16 traps.  If trap[i] is 0, then this particular
++	trap is to be ignored (i.e. not written to TIBCR[i]).  The entire value
++	is written as-is to the TIBCR[i] register, so be sure to set the EN
++	and T_IBP bits if necessary.
++
++	'eccr' is the value to program into the ECCR register.
++
++	'iram_offset' is the offset into IRAM to start writing the
++	microcode.
++
++	'count' is the number of 32-bit words in the microcode.
++
++	'code_offset' is the offset, in bytes, from the beginning of this
++	structure where the microcode itself can be found.  The first
++	microcode binary should be located immediately after the 'microcode'
++	array.
++
++	'major', 'minor', and 'revision' are the major, minor, and revision
++	version numbers, respectively, of the microcode.  If all values are 0,
++	then these fields are ignored.
++
++	'reserved' is necessary for structure alignment.  Since 'microcode'
++	is an array, the 64-bit 'extended_modes' field needs to be aligned
++	on a 64-bit boundary, and this can only happen if the size of
++	'microcode' is a multiple of 8 bytes.  To ensure that, we add
++	'reserved'.
++
++After the last microcode is a 32-bit CRC.  It can be calculated using
++this algorithm:
++
++u32 crc32(const u8 *p, unsigned int len)
++{
++	unsigned int i;
++	u32 crc = 0;
++
++	while (len--) {
++	   crc ^= *p++;
++	   for (i = 0; i < 8; i++)
++		   crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
++	}
++	return crc;
++}
++
++VI - Sample Code for Creating Firmware Files
++============================================
++
++A Python program that creates firmware binaries from the header files normally
++distributed by Freescale can be found on http://opensource.freescale.com.
 diff --git a/Documentation/s390/CommonIO b/Documentation/s390/CommonIO
 index 86320aa..8fbc0a8 100644
 --- a/Documentation/s390/CommonIO
@@ -7354,7 +8434,7 @@
 +参考Satyam Sharma,Johannes Stezenbach,Jesper Juhl,Heikki Orsila,
 +H. Peter Anvin,Philipp Hahn和Stefan Richter的意见改善了本档。
 diff --git a/MAINTAINERS b/MAINTAINERS
-index 2340cfb..ba05e80 100644
+index 2340cfb..2d5ff3e 100644
 --- a/MAINTAINERS
 +++ b/MAINTAINERS
 @@ -646,6 +646,17 @@ M:	ecashin at coraid.com
@@ -7425,6 +8505,24 @@
  CALGARY x86-64 IOMMU
  P:	Muli Ben-Yehuda
  M:	muli at il.ibm.com
+@@ -1577,7 +1595,7 @@ P:	Alexander Viro
+ M:	viro at zeniv.linux.org.uk
+ S:	Maintained
+ 
+-FIREWIRE SUBSYSTEM
++FIREWIRE SUBSYSTEM (drivers/firewire, <linux/firewire*.h>)
+ P:	Kristian Hoegsberg, Stefan Richter
+ M:	krh at redhat.com, stefanr at s5r6.in-berlin.de
+ L:	linux1394-devel at lists.sourceforge.net
+@@ -1899,7 +1917,7 @@ L:	linux-ide at vger.kernel.org
+ L:	linux-scsi at vger.kernel.org
+ S:	Orphan
+ 
+-IEEE 1394 SUBSYSTEM
++IEEE 1394 SUBSYSTEM (drivers/ieee1394)
+ P:	Ben Collins
+ M:	ben.collins at ubuntu.com
+ P:	Stefan Richter
 @@ -2029,10 +2047,12 @@ W:	http://sourceforge.net/projects/e1000/
  S:	Supported
  
@@ -59397,7 +60495,7 @@
  		. = ALIGN(4);
  	___initramfs_start = .;
 diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
-index bef4772..5a41e75 100644
+index bef4772..c9307c9 100644
 --- a/arch/ia64/Kconfig
 +++ b/arch/ia64/Kconfig
 @@ -42,6 +42,11 @@ config MMU
@@ -59416,7 +60514,7 @@
  	bool
  	default y
  
-+config ARCH_SETS_UP_PER_CPU_AREA
++config HAVE_SETUP_PER_CPU_AREA
 +	def_bool y
 +
  config DMI
@@ -59438,6 +60536,18 @@
  	/*
  	 * very simple loop because we get interrupts only when receiving
  	 */
+diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
+index 6ef9b52..7661bb0 100644
+--- a/arch/ia64/hp/sim/simscsi.c
++++ b/arch/ia64/hp/sim/simscsi.c
+@@ -360,7 +360,6 @@ static struct scsi_host_template driver_template = {
+ 	.max_sectors		= 1024,
+ 	.cmd_per_lun		= SIMSCSI_REQ_QUEUE_LEN,
+ 	.use_clustering		= DISABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ 
+ static int __init
 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
 index 3e35987..4f0c30c 100644
 --- a/arch/ia64/ia32/binfmt_elf32.c
@@ -59453,18 +60563,24 @@
  	unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
  
 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
-index 1962879..e699eb6 100644
+index 1962879..e58f436 100644
 --- a/arch/ia64/kernel/module.c
 +++ b/arch/ia64/kernel/module.c
-@@ -947,7 +947,7 @@ percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
- {
- 	unsigned int i;
- 	for_each_possible_cpu(i) {
--		memcpy(pcpudst + __per_cpu_offset[i], src, size);
-+		memcpy(pcpudst + per_cpu_offset(i), src, size);
- 	}
+@@ -940,14 +940,3 @@ module_arch_cleanup (struct module *mod)
+ 	if (mod->arch.core_unw_table)
+ 		unw_remove_unwind_table(mod->arch.core_unw_table);
  }
- #endif /* CONFIG_SMP */
+-
+-#ifdef CONFIG_SMP
+-void
+-percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
+-{
+-	unsigned int i;
+-	for_each_possible_cpu(i) {
+-		memcpy(pcpudst + __per_cpu_offset[i], src, size);
+-	}
+-}
+-#endif /* CONFIG_SMP */
 diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
 index 4ac2b1f..86028c6 100644
 --- a/arch/ia64/kernel/setup.c
@@ -70823,14 +71939,14 @@
  #ifdef CONFIG_BLK_DEV_INITRD
  	. = ALIGN(PAGE_SIZE);
 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 232c298..fb85f6b 100644
+index 232c298..9c44af3 100644
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
 @@ -42,6 +42,9 @@ config GENERIC_HARDIRQS
  	bool
  	default y
  
-+config ARCH_SETS_UP_PER_CPU_AREA
++config HAVE_SETUP_PER_CPU_AREA
 +	def_bool PPC64
 +
  config IRQ_PER_CPU
@@ -70848,10107 +71964,29968 @@
  config ARCH_HAS_ILOG2_U32
  	bool
  	default y
-diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
-index 18e3271..4b1d98b 100644
---- a/arch/powerpc/boot/Makefile
-+++ b/arch/powerpc/boot/Makefile
-@@ -65,7 +65,7 @@ obj-wlib := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-wlib))))
- obj-plat := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-plat))))
+@@ -140,6 +148,9 @@ config DEFAULT_UIMAGE
+ 	  Used to allow a board to specify it wants a uImage built by default
+ 	default n
  
- quiet_cmd_copy_zlib = COPY    $@
--      cmd_copy_zlib = sed "s at __attribute_used__@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
-+      cmd_copy_zlib = sed "s at __used@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
++config REDBOOT
++	bool
++
+ config PPC64_SWSUSP
+ 	bool
+ 	depends on PPC64 && (BROKEN || (PPC_PMAC64 && EXPERIMENTAL))
+@@ -160,11 +171,13 @@ config PPC_DCR
  
- quiet_cmd_copy_zlibheader = COPY    $@
-       cmd_copy_zlibheader = sed "s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
-diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
-index 3e17d15..8b056d2 100644
---- a/arch/powerpc/kernel/ptrace.c
-+++ b/arch/powerpc/kernel/ptrace.c
-@@ -256,7 +256,7 @@ static int set_evrregs(struct task_struct *task, unsigned long *data)
- #endif /* CONFIG_SPE */
+ config PPC_OF_PLATFORM_PCI
+ 	bool
++	depends on PCI
+ 	depends on PPC64 # not supported on 32 bits yet
+ 	default n
  
+ source "init/Kconfig"
  
--static void set_single_step(struct task_struct *task)
-+void user_enable_single_step(struct task_struct *task)
- {
- 	struct pt_regs *regs = task->thread.regs;
++source "arch/powerpc/sysdev/Kconfig"
+ source "arch/powerpc/platforms/Kconfig"
  
-@@ -271,7 +271,7 @@ static void set_single_step(struct task_struct *task)
- 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
- }
+ menu "Kernel options"
+@@ -340,6 +353,14 @@ config PPC_64K_PAGES
+ 	  while on hardware with such support, it will be used to map
+ 	  normal application pages.
+ 
++config PPC_SUBPAGE_PROT
++	bool "Support setting protections for 4k subpages"
++	depends on PPC_64K_PAGES
++	help
++	  This option adds support for a system call to allow user programs
++	  to set access permissions (read/write, readonly, or no access)
++	  on the 4k subpages of each 64k page.
++
+ config SCHED_SMT
+ 	bool "SMT (Hyperthreading) scheduler support"
+ 	depends on PPC64 && SMP
+@@ -417,7 +438,7 @@ endmenu
  
--static void clear_single_step(struct task_struct *task)
-+void user_disable_single_step(struct task_struct *task)
- {
- 	struct pt_regs *regs = task->thread.regs;
+ config ISA_DMA_API
+ 	bool
+-	default y
++	default !PPC_ISERIES || PCI
  
-@@ -313,7 +313,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
- void ptrace_disable(struct task_struct *child)
+ menu "Bus options"
+ 
+@@ -467,7 +488,7 @@ config MCA
+ config PCI
+ 	bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx \
+ 		|| PPC_MPC52xx || (EMBEDDED && (PPC_PSERIES || PPC_ISERIES)) \
+-		|| PPC_PS3
++		|| PPC_PS3 || 44x
+ 	default y if !40x && !CPM2 && !8xx && !PPC_83xx \
+ 		&& !PPC_85xx && !PPC_86xx
+ 	default PCI_PERMEDIA if !4xx && !CPM2 && !8xx
+diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
+index 6a79fe4..db7cc34 100644
+--- a/arch/powerpc/Kconfig.debug
++++ b/arch/powerpc/Kconfig.debug
+@@ -151,6 +151,13 @@ config BOOTX_TEXT
+ 
+ config PPC_EARLY_DEBUG
+ 	bool "Early debugging (dangerous)"
++	help
++	  Say Y to enable some early debugging facilities that may be available
++	  for your processor/board combination. Those facilities are hacks
++	  intended to debug problems early during boot, this should not be
++	  enabled in a production kernel.
++	  Note that enabling this will also cause the kernel default log level
++	  to be pushed to max automatically very early during boot
+ 
+ choice
+ 	prompt "Early debugging console"
+@@ -218,7 +225,16 @@ config PPC_EARLY_DEBUG_44x
+ 	depends on 44x
+ 	help
+ 	  Select this to enable early debugging for IBM 44x chips via the
+-	  inbuilt serial port.
++	  inbuilt serial port.  If you enable this, ensure you set
++          PPC_EARLY_DEBUG_44x_PHYSLOW below to suit your target board.
++
++config PPC_EARLY_DEBUG_40x
++	bool "Early serial debugging for IBM/AMCC 40x CPUs"
++	depends on 40x
++	help
++	  Select this to enable early debugging for IBM 40x chips via the
++	  inbuilt serial port. This works on chips with a 16550 compatible
++	  UART. Xilinx chips with uartlite cannot use this option.
+ 
+ config PPC_EARLY_DEBUG_CPM
+ 	bool "Early serial debugging for Freescale CPM-based serial ports"
+@@ -235,12 +251,20 @@ config PPC_EARLY_DEBUG_44x_PHYSLOW
+ 	hex "Low 32 bits of early debug UART physical address"
+ 	depends on PPC_EARLY_DEBUG_44x
+ 	default "0x40000200"
++	help
++	  You probably want 0x40000200 for ebony boards and
++          0x40000300 for taishan
+ 
+ config PPC_EARLY_DEBUG_44x_PHYSHIGH
+ 	hex "EPRN of early debug UART physical address"
+ 	depends on PPC_EARLY_DEBUG_44x
+ 	default "0x1"
+ 
++config PPC_EARLY_DEBUG_40x_PHYSADDR
++	hex "Early debug UART physical address"
++	depends on PPC_EARLY_DEBUG_40x
++	default "0xef600300"
++
+ config PPC_EARLY_DEBUG_CPM_ADDR
+ 	hex "CPM UART early debug transmit descriptor address"
+ 	depends on PPC_EARLY_DEBUG_CPM
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index bd87626..f70df9b 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -167,6 +167,9 @@ boot := arch/$(ARCH)/boot
+ $(BOOT_TARGETS): vmlinux
+ 	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ 
++bootwrapper_install:
++	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
++
+ define archhelp
+   @echo '* zImage          - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
+   @echo '  install         - Install kernel using'
+diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
+index 65f4118..5ef2bdf 100644
+--- a/arch/powerpc/boot/.gitignore
++++ b/arch/powerpc/boot/.gitignore
+@@ -1,4 +1,5 @@
+ addnote
++dtc
+ empty.c
+ hack-coff
+ infblock.c
+@@ -30,6 +31,7 @@ zImage.*lds
+ zImage.miboot
+ zImage.pmac
+ zImage.pseries
++zImage.redboot*
+ zImage.sandpoint
+ zImage.vmode
+ zconf.h
+diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c
+index ebf9e21..758edf1 100644
+--- a/arch/powerpc/boot/4xx.c
++++ b/arch/powerpc/boot/4xx.c
+@@ -22,16 +22,14 @@
+ #include "dcr.h"
+ 
+ /* Read the 4xx SDRAM controller to get size of system memory. */
+-void ibm4xx_fixup_memsize(void)
++void ibm4xx_sdram_fixup_memsize(void)
  {
- 	/* make sure the single step bit is not set. */
--	clear_single_step(child);
-+	user_disable_single_step(child);
- }
+ 	int i;
+ 	unsigned long memsize, bank_config;
  
- /*
-@@ -445,52 +445,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
- 		break;
+ 	memsize = 0;
+ 	for (i = 0; i < ARRAY_SIZE(sdram_bxcr); i++) {
+-		mtdcr(DCRN_SDRAM0_CFGADDR, sdram_bxcr[i]);
+-		bank_config = mfdcr(DCRN_SDRAM0_CFGDATA);
+-
++		bank_config = SDRAM0_READ(sdram_bxcr[i]);
+ 		if (bank_config & SDRAM_CONFIG_BANK_ENABLE)
+ 			memsize += SDRAM_CONFIG_BANK_SIZE(bank_config);
  	}
+@@ -39,6 +37,69 @@ void ibm4xx_fixup_memsize(void)
+ 	dt_fixup_memory(0, memsize);
+ }
  
--	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
--	case PTRACE_CONT: { /* restart after signal. */
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		if (request == PTRACE_SYSCALL)
--			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		else
--			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		child->exit_code = data;
--		/* make sure the single step bit is not set. */
--		clear_single_step(child);
--		wake_up_process(child);
--		ret = 0;
--		break;
--	}
--
--/*
-- * make the child exit.  Best I can do is send it a sigkill.
-- * perhaps it should be put in the status that it wants to
-- * exit.
-- */
--	case PTRACE_KILL: {
--		ret = 0;
--		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
--			break;
--		child->exit_code = SIGKILL;
--		/* make sure the single step bit is not set. */
--		clear_single_step(child);
--		wake_up_process(child);
--		break;
--	}
--
--	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		set_single_step(child);
--		child->exit_code = data;
--		/* give it a chance to run. */
--		wake_up_process(child);
--		ret = 0;
--		break;
--	}
++/* Read the 440SPe MQ controller to get size of system memory. */
++#define DCRN_MQ0_B0BAS		0x40
++#define DCRN_MQ0_B1BAS		0x41
++#define DCRN_MQ0_B2BAS		0x42
++#define DCRN_MQ0_B3BAS		0x43
++
++static u64 ibm440spe_decode_bas(u32 bas)
++{
++	u64 base = ((u64)(bas & 0xFFE00000u)) << 2;
++
++	/* open coded because I'm paranoid about invalid values */
++	switch ((bas >> 4) & 0xFFF) {
++	case 0:
++		return 0;
++	case 0xffc:
++		return base + 0x000800000ull;
++	case 0xff8:
++		return base + 0x001000000ull;
++	case 0xff0:
++		return base + 0x002000000ull;
++	case 0xfe0:
++		return base + 0x004000000ull;
++	case 0xfc0:
++		return base + 0x008000000ull;
++	case 0xf80:
++		return base + 0x010000000ull;
++	case 0xf00:
++		return base + 0x020000000ull;
++	case 0xe00:
++		return base + 0x040000000ull;
++	case 0xc00:
++		return base + 0x080000000ull;
++	case 0x800:
++		return base + 0x100000000ull;
++	}
++	printf("Memory BAS value 0x%08x unsupported !\n", bas);
++	return 0;
++}
++
++void ibm440spe_fixup_memsize(void)
++{
++	u64 banktop, memsize = 0;
++
++	/* Ultimately, we should directly construct the memory node
++	 * so we are able to handle holes in the memory address space
++	 */
++	banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B0BAS));
++	if (banktop > memsize)
++		memsize = banktop;
++	banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B1BAS));
++	if (banktop > memsize)
++		memsize = banktop;
++	banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B2BAS));
++	if (banktop > memsize)
++		memsize = banktop;
++	banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B3BAS));
++	if (banktop > memsize)
++		memsize = banktop;
++
++	dt_fixup_memory(0, memsize);
++}
++
++
+ /* 4xx DDR1/2 Denali memory controller support */
+ /* DDR0 registers */
+ #define DDR0_02			2
+@@ -77,19 +138,13 @@ void ibm4xx_fixup_memsize(void)
+ 
+ #define DDR_GET_VAL(val, mask, shift)	(((val) >> (shift)) & (mask))
+ 
+-static inline u32 mfdcr_sdram0(u32 reg)
+-{
+-        mtdcr(DCRN_SDRAM0_CFGADDR, reg);
+-        return mfdcr(DCRN_SDRAM0_CFGDATA);
+-}
 -
- 	case PTRACE_GET_DEBUGREG: {
- 		ret = -EINVAL;
- 		/* We only support one DABR and no IABRS at the moment */
-diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
-index 25d9a96..c8127f8 100644
---- a/arch/powerpc/kernel/sysfs.c
-+++ b/arch/powerpc/kernel/sysfs.c
-@@ -158,7 +158,7 @@ static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
- 	unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
- 	return sprintf(buf, "%lx\n", val); \
- } \
--static ssize_t __attribute_used__ \
-+static ssize_t __used \
- 	store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
- { \
- 	struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
-diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
-index f66fa5d..0afb9e3 100644
---- a/arch/powerpc/kernel/vmlinux.lds.S
-+++ b/arch/powerpc/kernel/vmlinux.lds.S
-@@ -23,7 +23,7 @@ SECTIONS
- 	/* Sections to be discarded. */
- 	/DISCARD/ : {
- 	*(.exitcall.exit)
--	*(.exit.data)
-+	EXIT_DATA
- 	}
+ void ibm4xx_denali_fixup_memsize(void)
+ {
+ 	u32 val, max_cs, max_col, max_row;
+ 	u32 cs, col, row, bank, dpath;
+ 	unsigned long memsize;
  
- 	. = KERNELBASE;
-@@ -76,17 +76,19 @@ SECTIONS
+-	val = mfdcr_sdram0(DDR0_02);
++	val = SDRAM0_READ(DDR0_02);
+ 	if (!DDR_GET_VAL(val, DDR_START, DDR_START_SHIFT))
+ 		fatal("DDR controller is not initialized\n");
+ 
+@@ -99,12 +154,12 @@ void ibm4xx_denali_fixup_memsize(void)
+ 	max_row = DDR_GET_VAL(val, DDR_MAX_ROW_REG, DDR_MAX_ROW_REG_SHIFT);
+ 
+ 	/* get CS value */
+-	val = mfdcr_sdram0(DDR0_10);
++	val = SDRAM0_READ(DDR0_10);
+ 
+ 	val = DDR_GET_VAL(val, DDR_CS_MAP, DDR_CS_MAP_SHIFT);
+ 	cs = 0;
+ 	while (val) {
+-		if (val && 0x1)
++		if (val & 0x1)
+ 			cs++;
+ 		val = val >> 1;
+ 	}
+@@ -115,15 +170,15 @@ void ibm4xx_denali_fixup_memsize(void)
+ 		fatal("DDR wrong CS configuration\n");
+ 
+ 	/* get data path bytes */
+-	val = mfdcr_sdram0(DDR0_14);
++	val = SDRAM0_READ(DDR0_14);
  
- 	.init.text : {
- 		_sinittext = .;
--		*(.init.text)
-+		INIT_TEXT
- 		_einittext = .;
- 	}
+ 	if (DDR_GET_VAL(val, DDR_REDUC, DDR_REDUC_SHIFT))
+ 		dpath = 8; /* 64 bits */
+ 	else
+ 		dpath = 4; /* 32 bits */
  
- 	/* .exit.text is discarded at runtime, not link time,
- 	 * to deal with references from __bug_table
- 	 */
--	.exit.text : { *(.exit.text) }
-+	.exit.text : {
-+		EXIT_TEXT
-+	}
+-	/* get adress pins (rows) */
+-	val = mfdcr_sdram0(DDR0_42);
++	/* get address pins (rows) */
++ 	val = SDRAM0_READ(DDR0_42);
  
- 	.init.data : {
--		*(.init.data);
-+		INIT_DATA
- 		__vtop_table_begin = .;
- 		*(.vtop_fixup);
- 		__vtop_table_end = .;
-diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
-index cddc250..446a8bb 100644
---- a/arch/powerpc/oprofile/op_model_power4.c
-+++ b/arch/powerpc/oprofile/op_model_power4.c
-@@ -172,15 +172,15 @@ static void power4_stop(void)
- }
+ 	row = DDR_GET_VAL(val, DDR_APIN, DDR_APIN_SHIFT);
+ 	if (row > max_row)
+@@ -131,7 +186,7 @@ void ibm4xx_denali_fixup_memsize(void)
+ 	row = max_row - row;
  
- /* Fake functions used by canonicalize_pc */
--static void __attribute_used__ hypervisor_bucket(void)
-+static void __used hypervisor_bucket(void)
+ 	/* get collomn size and banks */
+-	val = mfdcr_sdram0(DDR0_43);
++	val = SDRAM0_READ(DDR0_43);
+ 
+ 	col = DDR_GET_VAL(val, DDR_COL_SZ, DDR_COL_SZ_SHIFT);
+ 	if (col > max_col)
+@@ -179,13 +234,17 @@ void ibm40x_dbcr_reset(void)
+ #define EMAC_RESET 0x20000000
+ void ibm4xx_quiesce_eth(u32 *emac0, u32 *emac1)
  {
+-	/* Quiesce the MAL and EMAC(s) since PIBS/OpenBIOS don't do this for us */
++	/* Quiesce the MAL and EMAC(s) since PIBS/OpenBIOS don't
++	 * do this for us
++	 */
+ 	if (emac0)
+ 		*emac0 = EMAC_RESET;
+ 	if (emac1)
+ 		*emac1 = EMAC_RESET;
+ 
+ 	mtdcr(DCRN_MAL0_CFG, MAL_RESET);
++	while (mfdcr(DCRN_MAL0_CFG) & MAL_RESET)
++		; /* loop until reset takes effect */
  }
  
--static void __attribute_used__ rtas_bucket(void)
-+static void __used rtas_bucket(void)
- {
+ /* Read 4xx EBC bus bridge registers to get mappings of the peripheral
+@@ -217,84 +276,335 @@ void ibm4xx_fixup_ebc_ranges(const char *ebc)
+ 	setprop(devp, "ranges", ranges, (p - ranges) * sizeof(u32));
  }
  
--static void __attribute_used__ kernel_unknown_bucket(void)
-+static void __used kernel_unknown_bucket(void)
+-#define SPRN_CCR1 0x378
+-void ibm440ep_fixup_clocks(unsigned int sysclk, unsigned int ser_clk)
++/* Calculate 440GP clocks */
++void ibm440gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk)
  {
- }
+-	u32 cpu, plb, opb, ebc, tb, uart0, m, vco;
+-	u32 reg;
+-	u32 fwdva, fwdvb, fbdv, lfbdv, opbdv0, perdv0, spcid0, prbdv0, tmp;
+-
+-	mtdcr(DCRN_CPR0_ADDR, CPR0_PLLD0);
+-	reg = mfdcr(DCRN_CPR0_DATA);
+-	tmp = (reg & 0x000F0000) >> 16;
+-	fwdva = tmp ? tmp : 16;
+-	tmp = (reg & 0x00000700) >> 8;
+-	fwdvb = tmp ? tmp : 8;
+-	tmp = (reg & 0x1F000000) >> 24;
+-	fbdv = tmp ? tmp : 32;
+-	lfbdv = (reg & 0x0000007F);
+-
+-	mtdcr(DCRN_CPR0_ADDR, CPR0_OPBD0);
+-	reg = mfdcr(DCRN_CPR0_DATA);
+-	tmp = (reg & 0x03000000) >> 24;
+-	opbdv0 = tmp ? tmp : 4;
+-
+-	mtdcr(DCRN_CPR0_ADDR, CPR0_PERD0);
+-	reg = mfdcr(DCRN_CPR0_DATA);
+-	tmp = (reg & 0x07000000) >> 24;
+-	perdv0 = tmp ? tmp : 8;
+-
+-	mtdcr(DCRN_CPR0_ADDR, CPR0_PRIMBD0);
+-	reg = mfdcr(DCRN_CPR0_DATA);
+-	tmp = (reg & 0x07000000) >> 24;
+-	prbdv0 = tmp ? tmp : 8;
+-
+-	mtdcr(DCRN_CPR0_ADDR, CPR0_SCPID);
+-	reg = mfdcr(DCRN_CPR0_DATA);
+-	tmp = (reg & 0x03000000) >> 24;
+-	spcid0 = tmp ? tmp : 4;
+-
+-	/* Calculate M */
+-	mtdcr(DCRN_CPR0_ADDR, CPR0_PLLC0);
+-	reg = mfdcr(DCRN_CPR0_DATA);
+-	tmp = (reg & 0x03000000) >> 24;
+-	if (tmp == 0) { /* PLL output */
+-		tmp = (reg & 0x20000000) >> 29;
+-		if (!tmp) /* PLLOUTA */
+-			m = fbdv * lfbdv * fwdva;
++	u32 sys0 = mfdcr(DCRN_CPC0_SYS0);
++	u32 cr0 = mfdcr(DCRN_CPC0_CR0);
++	u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
++	u32 opdv = CPC0_SYS0_OPDV(sys0);
++	u32 epdv = CPC0_SYS0_EPDV(sys0);
++
++	if (sys0 & CPC0_SYS0_BYPASS) {
++		/* Bypass system PLL */
++		cpu = plb = sys_clk;
++	} else {
++		if (sys0 & CPC0_SYS0_EXTSL)
++			/* PerClk */
++			m = CPC0_SYS0_FWDVB(sys0) * opdv * epdv;
+ 		else
+-			m = fbdv * lfbdv * fwdvb;
++			/* CPU clock */
++			m = CPC0_SYS0_FBDV(sys0) * CPC0_SYS0_FWDVA(sys0);
++		cpu = sys_clk * m / CPC0_SYS0_FWDVA(sys0);
++		plb = sys_clk * m / CPC0_SYS0_FWDVB(sys0);
+ 	}
+-	else if (tmp == 1) /* CPU output */
+-		m = fbdv * fwdva;
++
++	opb = plb / opdv;
++	ebc = opb / epdv;
++
++	/* FIXME: Check if this is for all 440GP, or just Ebony */
++	if ((mfpvr() & 0xf0000fff) == 0x40000440)
++		/* Rev. B 440GP, use external system clock */
++		tb = sys_clk;
+ 	else
+-		m = perdv0 * opbdv0 * fwdvb;
++		/* Rev. C 440GP, errata force us to use internal clock */
++		tb = cpu;
  
-diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
-index c83c3e3..a088622 100644
---- a/arch/powerpc/platforms/cell/spu_base.c
-+++ b/arch/powerpc/platforms/cell/spu_base.c
-@@ -459,7 +459,7 @@ static int spu_shutdown(struct sys_device *sysdev)
- }
+-	vco = (m * sysclk) + (m >> 1);
+-	cpu = vco / fwdva;
+-	plb = vco / fwdvb / prbdv0;
+-	opb = plb / opbdv0;
+-	ebc = plb / perdv0;
++	if (cr0 & CPC0_CR0_U0EC)
++		/* External UART clock */
++		uart0 = ser_clk;
++	else
++		/* Internal UART clock */
++		uart0 = plb / CPC0_CR0_UDIV(cr0);
++
++	if (cr0 & CPC0_CR0_U1EC)
++		/* External UART clock */
++		uart1 = ser_clk;
++	else
++		/* Internal UART clock */
++		uart1 = plb / CPC0_CR0_UDIV(cr0);
  
- static struct sysdev_class spu_sysdev_class = {
--	set_kset_name("spu"),
-+	.name = "spu",
- 	.shutdown = spu_shutdown,
- };
+-	/* FIXME */
+-	uart0 = ser_clk;
++	printf("PPC440GP: SysClk = %dMHz (%x)\n\r",
++	       (sys_clk + 500000) / 1000000, sys_clk);
++
++	dt_fixup_cpu_clocks(cpu, tb, 0);
++
++	dt_fixup_clock("/plb", plb);
++	dt_fixup_clock("/plb/opb", opb);
++	dt_fixup_clock("/plb/opb/ebc", ebc);
++	dt_fixup_clock("/plb/opb/serial at 40000200", uart0);
++	dt_fixup_clock("/plb/opb/serial at 40000300", uart1);
++}
++
++#define SPRN_CCR1 0x378
++
++static inline u32 __fix_zero(u32 v, u32 def)
++{
++	return v ? v : def;
++}
++
++static unsigned int __ibm440eplike_fixup_clocks(unsigned int sys_clk,
++						unsigned int tmr_clk,
++						int per_clk_from_opb)
++{
++	/* PLL config */
++	u32 pllc  = CPR0_READ(DCRN_CPR0_PLLC);
++	u32 plld  = CPR0_READ(DCRN_CPR0_PLLD);
++
++	/* Dividers */
++	u32 fbdv   = __fix_zero((plld >> 24) & 0x1f, 32);
++	u32 fwdva  = __fix_zero((plld >> 16) & 0xf, 16);
++	u32 fwdvb  = __fix_zero((plld >> 8) & 7, 8);
++	u32 lfbdv  = __fix_zero(plld & 0x3f, 64);
++	u32 pradv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PRIMAD) >> 24) & 7, 8);
++	u32 prbdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PRIMBD) >> 24) & 7, 8);
++	u32 opbdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_OPBD) >> 24) & 3, 4);
++	u32 perdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PERD) >> 24) & 3, 4);
++
++	/* Input clocks for primary dividers */
++	u32 clk_a, clk_b;
++
++	/* Resulting clocks */
++	u32 cpu, plb, opb, ebc, vco;
++
++	/* Timebase */
++	u32 ccr1, tb = tmr_clk;
++
++	if (pllc & 0x40000000) {
++		u32 m;
++
++		/* Feedback path */
++		switch ((pllc >> 24) & 7) {
++		case 0:
++			/* PLLOUTx */
++			m = ((pllc & 0x20000000) ? fwdvb : fwdva) * lfbdv;
++			break;
++		case 1:
++			/* CPU */
++			m = fwdva * pradv0;
++			break;
++		case 5:
++			/* PERClk */
++			m = fwdvb * prbdv0 * opbdv0 * perdv0;
++			break;
++		default:
++			printf("WARNING ! Invalid PLL feedback source !\n");
++			goto bypass;
++		}
++		m *= fbdv;
++		vco = sys_clk * m;
++		clk_a = vco / fwdva;
++		clk_b = vco / fwdvb;
++	} else {
++bypass:
++		/* Bypass system PLL */
++		vco = 0;
++		clk_a = clk_b = sys_clk;
++	}
++
++	cpu = clk_a / pradv0;
++	plb = clk_b / prbdv0;
++	opb = plb / opbdv0;
++	ebc = (per_clk_from_opb ? opb : plb) / perdv0;
+ 
+ 	/* Figure out timebase.  Either CPU or default TmrClk */
+-	asm volatile (
+-			"mfspr	%0,%1\n"
+-			:
+-			"=&r"(reg) : "i"(SPRN_CCR1));
+-	if (reg & 0x0080)
+-		tb = 25000000; /* TmrClk is 25MHz */
+-	else
++	ccr1 = mfspr(SPRN_CCR1);
++
++	/* If passed a 0 tmr_clk, force CPU clock */
++	if (tb == 0) {
++		ccr1 &= ~0x80u;
++		mtspr(SPRN_CCR1, ccr1);
++	}
++	if ((ccr1 & 0x0080) == 0)
+ 		tb = cpu;
  
-diff --git a/arch/powerpc/platforms/pasemi/Makefile b/arch/powerpc/platforms/pasemi/Makefile
-index f47fcac..e636daa 100644
---- a/arch/powerpc/platforms/pasemi/Makefile
-+++ b/arch/powerpc/platforms/pasemi/Makefile
-@@ -1,4 +1,4 @@
--obj-y	+= setup.o pci.o time.o idle.o powersave.o iommu.o
-+obj-y	+= setup.o pci.o time.o idle.o powersave.o iommu.o dma_lib.o
- obj-$(CONFIG_PPC_PASEMI_MDIO)	+= gpio_mdio.o
- obj-$(CONFIG_ELECTRA_IDE) += electra_ide.o
- obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += cpufreq.o
-diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
+ 	dt_fixup_cpu_clocks(cpu, tb, 0);
+ 	dt_fixup_clock("/plb", plb);
+ 	dt_fixup_clock("/plb/opb", opb);
+ 	dt_fixup_clock("/plb/opb/ebc", ebc);
++
++	return plb;
++}
++
++static void eplike_fixup_uart_clk(int index, const char *path,
++				  unsigned int ser_clk,
++				  unsigned int plb_clk)
++{
++	unsigned int sdr;
++	unsigned int clock;
++
++	switch (index) {
++	case 0:
++		sdr = SDR0_READ(DCRN_SDR0_UART0);
++		break;
++	case 1:
++		sdr = SDR0_READ(DCRN_SDR0_UART1);
++		break;
++	case 2:
++		sdr = SDR0_READ(DCRN_SDR0_UART2);
++		break;
++	case 3:
++		sdr = SDR0_READ(DCRN_SDR0_UART3);
++		break;
++	default:
++		return;
++	}
++
++	if (sdr & 0x00800000u)
++		clock = ser_clk;
++	else
++		clock = plb_clk / __fix_zero(sdr & 0xff, 256);
++
++	dt_fixup_clock(path, clock);
++}
++
++void ibm440ep_fixup_clocks(unsigned int sys_clk,
++			   unsigned int ser_clk,
++			   unsigned int tmr_clk)
++{
++	unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 0);
++
++	/* serial clocks beed fixup based on int/ext */
++	eplike_fixup_uart_clk(0, "/plb/opb/serial at ef600300", ser_clk, plb_clk);
++	eplike_fixup_uart_clk(1, "/plb/opb/serial at ef600400", ser_clk, plb_clk);
++	eplike_fixup_uart_clk(2, "/plb/opb/serial at ef600500", ser_clk, plb_clk);
++	eplike_fixup_uart_clk(3, "/plb/opb/serial at ef600600", ser_clk, plb_clk);
++}
++
++void ibm440gx_fixup_clocks(unsigned int sys_clk,
++			   unsigned int ser_clk,
++			   unsigned int tmr_clk)
++{
++	unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1);
++
++	/* serial clocks beed fixup based on int/ext */
++	eplike_fixup_uart_clk(0, "/plb/opb/serial at 40000200", ser_clk, plb_clk);
++	eplike_fixup_uart_clk(1, "/plb/opb/serial at 40000300", ser_clk, plb_clk);
++}
++
++void ibm440spe_fixup_clocks(unsigned int sys_clk,
++			    unsigned int ser_clk,
++			    unsigned int tmr_clk)
++{
++	unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1);
++
++	/* serial clocks beed fixup based on int/ext */
++	eplike_fixup_uart_clk(0, "/plb/opb/serial at 10000200", ser_clk, plb_clk);
++	eplike_fixup_uart_clk(1, "/plb/opb/serial at 10000300", ser_clk, plb_clk);
++	eplike_fixup_uart_clk(2, "/plb/opb/serial at 10000600", ser_clk, plb_clk);
++}
++
++void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk)
++{
++	u32 pllmr = mfdcr(DCRN_CPC0_PLLMR);
++	u32 cpc0_cr0 = mfdcr(DCRN_405_CPC0_CR0);
++	u32 cpc0_cr1 = mfdcr(DCRN_405_CPC0_CR1);
++	u32 psr = mfdcr(DCRN_405_CPC0_PSR);
++	u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
++	u32 fwdv, fwdvb, fbdv, cbdv, opdv, epdv, ppdv, udiv;
++
++	fwdv = (8 - ((pllmr & 0xe0000000) >> 29));
++	fbdv = (pllmr & 0x1e000000) >> 25;
++	if (fbdv == 0)
++		fbdv = 16;
++	cbdv = ((pllmr & 0x00060000) >> 17) + 1; /* CPU:PLB */
++	opdv = ((pllmr & 0x00018000) >> 15) + 1; /* PLB:OPB */
++	ppdv = ((pllmr & 0x00001800) >> 13) + 1; /* PLB:PCI */
++	epdv = ((pllmr & 0x00001800) >> 11) + 2; /* PLB:EBC */
++	udiv = ((cpc0_cr0 & 0x3e) >> 1) + 1;
++
++	/* check for 405GPr */
++	if ((mfpvr() & 0xfffffff0) == (0x50910951 & 0xfffffff0)) {
++		fwdvb = 8 - (pllmr & 0x00000007);
++		if (!(psr & 0x00001000)) /* PCI async mode enable == 0 */
++			if (psr & 0x00000020) /* New mode enable */
++				m = fwdvb * 2 * ppdv;
++			else
++				m = fwdvb * cbdv * ppdv;
++		else if (psr & 0x00000020) /* New mode enable */
++			if (psr & 0x00000800) /* PerClk synch mode */
++				m = fwdvb * 2 * epdv;
++			else
++				m = fbdv * fwdv;
++		else if (epdv == fbdv)
++			m = fbdv * cbdv * epdv;
++		else
++			m = fbdv * fwdvb * cbdv;
++
++		cpu = sys_clk * m / fwdv;
++		plb = sys_clk * m / (fwdvb * cbdv);
++	} else {
++		m = fwdv * fbdv * cbdv;
++		cpu = sys_clk * m / fwdv;
++		plb = cpu / cbdv;
++	}
++	opb = plb / opdv;
++	ebc = plb / epdv;
++
++	if (cpc0_cr0 & 0x80)
++		/* uart0 uses the external clock */
++		uart0 = ser_clk;
++	else
++		uart0 = cpu / udiv;
++
++	if (cpc0_cr0 & 0x40)
++		/* uart1 uses the external clock */
++		uart1 = ser_clk;
++	else
++		uart1 = cpu / udiv;
++
++	/* setup the timebase clock to tick at the cpu frequency */
++	cpc0_cr1 = cpc0_cr1 & ~0x00800000;
++	mtdcr(DCRN_405_CPC0_CR1, cpc0_cr1);
++	tb = cpu;
++
++	dt_fixup_cpu_clocks(cpu, tb, 0);
++	dt_fixup_clock("/plb", plb);
++	dt_fixup_clock("/plb/opb", opb);
++	dt_fixup_clock("/plb/ebc", ebc);
++	dt_fixup_clock("/plb/opb/serial at ef600300", uart0);
++	dt_fixup_clock("/plb/opb/serial at ef600400", uart1);
++}
++
++
++void ibm405ep_fixup_clocks(unsigned int sys_clk)
++{
++	u32 pllmr0 = mfdcr(DCRN_CPC0_PLLMR0);
++	u32 pllmr1 = mfdcr(DCRN_CPC0_PLLMR1);
++	u32 cpc0_ucr = mfdcr(DCRN_CPC0_UCR);
++	u32 cpu, plb, opb, ebc, uart0, uart1;
++	u32 fwdva, fwdvb, fbdv, cbdv, opdv, epdv;
++	u32 pllmr0_ccdv, tb, m;
++
++	fwdva = 8 - ((pllmr1 & 0x00070000) >> 16);
++	fwdvb = 8 - ((pllmr1 & 0x00007000) >> 12);
++	fbdv = (pllmr1 & 0x00f00000) >> 20;
++	if (fbdv == 0)
++		fbdv = 16;
++
++	cbdv = ((pllmr0 & 0x00030000) >> 16) + 1; /* CPU:PLB */
++	epdv = ((pllmr0 & 0x00000300) >> 8) + 2;  /* PLB:EBC */
++	opdv = ((pllmr0 & 0x00003000) >> 12) + 1; /* PLB:OPB */
++
++	m = fbdv * fwdvb;
++
++	pllmr0_ccdv = ((pllmr0 & 0x00300000) >> 20) + 1;
++	if (pllmr1 & 0x80000000)
++		cpu = sys_clk * m / (fwdva * pllmr0_ccdv);
++	else
++		cpu = sys_clk / pllmr0_ccdv;
++
++	plb = cpu / cbdv;
++	opb = plb / opdv;
++	ebc = plb / epdv;
++	tb = cpu;
++	uart0 = cpu / (cpc0_ucr & 0x0000007f);
++	uart1 = cpu / ((cpc0_ucr & 0x00007f00) >> 8);
++
++	dt_fixup_cpu_clocks(cpu, tb, 0);
++	dt_fixup_clock("/plb", plb);
++	dt_fixup_clock("/plb/opb", opb);
++	dt_fixup_clock("/plb/ebc", ebc);
+ 	dt_fixup_clock("/plb/opb/serial at ef600300", uart0);
+-	dt_fixup_clock("/plb/opb/serial at ef600400", uart0);
+-	dt_fixup_clock("/plb/opb/serial at ef600500", uart0);
+-	dt_fixup_clock("/plb/opb/serial at ef600600", uart0);
++	dt_fixup_clock("/plb/opb/serial at ef600400", uart1);
+ }
+diff --git a/arch/powerpc/boot/4xx.h b/arch/powerpc/boot/4xx.h
+index adba6a5..2606e64 100644
+--- a/arch/powerpc/boot/4xx.h
++++ b/arch/powerpc/boot/4xx.h
+@@ -11,12 +11,22 @@
+ #ifndef _POWERPC_BOOT_4XX_H_
+ #define _POWERPC_BOOT_4XX_H_
+ 
+-void ibm4xx_fixup_memsize(void);
++void ibm4xx_sdram_fixup_memsize(void);
++void ibm440spe_fixup_memsize(void);
+ void ibm4xx_denali_fixup_memsize(void);
+ void ibm44x_dbcr_reset(void);
+ void ibm40x_dbcr_reset(void);
+ void ibm4xx_quiesce_eth(u32 *emac0, u32 *emac1);
+ void ibm4xx_fixup_ebc_ranges(const char *ebc);
+-void ibm440ep_fixup_clocks(unsigned int sysclk, unsigned int ser_clk);
++
++void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk);
++void ibm405ep_fixup_clocks(unsigned int sys_clk);
++void ibm440gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk);
++void ibm440ep_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk,
++			   unsigned int tmr_clk);
++void ibm440gx_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk,
++			   unsigned int tmr_clk);
++void ibm440spe_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk,
++			    unsigned int tmr_clk);
+ 
+ #endif /* _POWERPC_BOOT_4XX_H_ */
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index 18e3271..122a270 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -33,12 +33,15 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
+ BOOTCFLAGS	+= -fno-stack-protector
+ endif
+ 
+-BOOTCFLAGS	+= -I$(obj) -I$(srctree)/$(obj)
++BOOTCFLAGS	+= -I$(obj) -I$(srctree)/$(obj) -I$(srctree)/$(src)/libfdt
+ 
+ $(obj)/4xx.o: BOOTCFLAGS += -mcpu=440
+ $(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
++$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
++$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
+ $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
+ 
++
+ zlib       := inffast.c inflate.c inftrees.c
+ zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h
+ zliblinuxheader := zlib.h zconf.h zutil.h
+@@ -46,17 +49,21 @@ zliblinuxheader := zlib.h zconf.h zutil.h
+ $(addprefix $(obj)/,$(zlib) gunzip_util.o main.o): \
+ 	$(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
+ 
+-src-wlib := string.S crt0.S stdio.c main.c flatdevtree.c flatdevtree_misc.c \
++src-libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
++src-wlib := string.S crt0.S stdio.c main.c \
++		$(addprefix libfdt/,$(src-libfdt)) libfdt-wrapper.c \
+ 		ns16550.c serial.c simple_alloc.c div64.S util.S \
+ 		gunzip_util.c elf_util.c $(zlib) devtree.c oflib.c ofconsole.c \
+ 		4xx.c ebony.c mv64x60.c mpsc.c mv64x60_i2c.c cuboot.c bamboo.c \
+ 		cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \
+ 		fsl-soc.c mpc8xx.c pq2.c
+-src-plat := of.c cuboot-52xx.c cuboot-83xx.c cuboot-85xx.c holly.c \
++src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \
+ 		cuboot-ebony.c treeboot-ebony.c prpmc2800.c \
+ 		ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \
+ 		cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c cuboot-bamboo.c \
+-		fixed-head.S ep88xc.c cuboot-hpc2.c
++		fixed-head.S ep88xc.c cuboot-hpc2.c ep405.c cuboot-taishan.c \
++		cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \
++		cuboot-warp.c cuboot-85xx-cpm2.c
+ src-boot := $(src-wlib) $(src-plat) empty.c
+ 
+ src-boot := $(addprefix $(obj)/, $(src-boot))
+@@ -65,7 +72,7 @@ obj-wlib := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-wlib))))
+ obj-plat := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-plat))))
+ 
+ quiet_cmd_copy_zlib = COPY    $@
+-      cmd_copy_zlib = sed "s at __attribute_used__@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
++      cmd_copy_zlib = sed "s at __used@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
+ 
+ quiet_cmd_copy_zlibheader = COPY    $@
+       cmd_copy_zlibheader = sed "s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
+@@ -101,24 +108,61 @@ quiet_cmd_bootar = BOOTAR  $@
+       cmd_bootar = $(CROSS32AR) -cr $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@
+ 
+ $(patsubst %.c,%.o, $(filter %.c, $(src-boot))): %.o: %.c FORCE
++	$(Q)mkdir -p $(dir $@)
+ 	$(call if_changed_dep,bootcc)
+ $(patsubst %.S,%.o, $(filter %.S, $(src-boot))): %.o: %.S FORCE
++	$(Q)mkdir -p $(dir $@)
+ 	$(call if_changed_dep,bootas)
+ 
+ $(obj)/wrapper.a: $(obj-wlib) FORCE
+ 	$(call if_changed,bootar)
+ 
+-hostprogs-y	:= addnote addRamDisk hack-coff mktree
++hostprogs-y	:= addnote addRamDisk hack-coff mktree dtc
+ 
+ targets		+= $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
+ extra-y		:= $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
+ 		   $(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
+ 
+ wrapper		:=$(srctree)/$(src)/wrapper
+-wrapperbits	:= $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree) \
++wrapperbits	:= $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree dtc) \
+ 			$(wrapper) FORCE
+ 
+ #############
++# Bits for building dtc
++# DTC_GENPARSER      := 1    # Uncomment to rebuild flex/bison output
++
++dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o srcpos.o checks.o
++dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o
++dtc-objs := $(addprefix dtc-src/, $(dtc-objs))
++
++# prerequisites on generated files needs to be explicit
++$(obj)/dtc-src/dtc-parser.tab.o: $(obj)/dtc-src/dtc-parser.tab.c $(obj)/dtc-src/dtc-parser.tab.h
++$(obj)/dtc-src/dtc-lexer.lex.o:  $(obj)/dtc-src/dtc-lexer.lex.c $(obj)/dtc-src/dtc-parser.tab.h
++
++HOSTCFLAGS += -I$(src)/dtc-src/ -I$(src)/libfdt/
++
++targets += dtc-src/dtc-parser.tab.c
++targets += dtc-src/dtc-lexer.lex.c
++
++ifdef DTC_GENPARSER
++BISON = bison
++FLEX = flex
++
++quiet_cmd_bison = BISON   $@
++      cmd_bison = $(BISON) -o$@ -d $<; cp $@ $@_shipped
++quiet_cmd_flex = FLEX    $@
++      cmd_flex = $(FLEX) -o$@ $<; cp $@ $@_shipped
++
++$(obj)/dtc-src/dtc-parser.tab.c: $(src)/dtc-src/dtc-parser.y FORCE
++     $(call if_changed,bison)
++
++$(obj)/dtc-src/dtc-parser.tab.h: $(obj)/dtc-src/dtc-parser.tab.c
++
++$(obj)/dtc-src/dtc-lexer.lex.c: $(src)/dtc-src/dtc-lexer.l FORCE
++     $(call if_changed,flex)
++endif
++
++#############
+ # Bits for building various flavours of zImage
+ 
+ ifneq ($(CROSS32_COMPILE),)
+@@ -150,15 +194,29 @@ image-$(CONFIG_DEFAULT_UIMAGE)		+= uImage
+ ifneq ($(CONFIG_DEVICE_TREE),"")
+ image-$(CONFIG_PPC_8xx)			+= cuImage.8xx
+ image-$(CONFIG_PPC_EP88XC)		+= zImage.ep88xc
++image-$(CONFIG_EP405)			+= zImage.ep405
+ image-$(CONFIG_8260)			+= cuImage.pq2
++image-$(CONFIG_EP8248E)			+= zImage.ep8248e
+ image-$(CONFIG_PPC_MPC52xx)		+= cuImage.52xx
++image-$(CONFIG_STORCENTER)		+= cuImage.824x
+ image-$(CONFIG_PPC_83xx)		+= cuImage.83xx
+ image-$(CONFIG_PPC_85xx)		+= cuImage.85xx
++ifeq ($(CONFIG_CPM2),y)
++image-$(CONFIG_PPC_85xx)		+= cuImage.85xx-cpm2
++endif
+ image-$(CONFIG_MPC7448HPC2)		+= cuImage.hpc2
+ image-$(CONFIG_EBONY)			+= treeImage.ebony cuImage.ebony
+ image-$(CONFIG_BAMBOO)			+= treeImage.bamboo cuImage.bamboo
+ image-$(CONFIG_SEQUOIA)			+= cuImage.sequoia
++image-$(CONFIG_RAINIER)			+= cuImage.rainier
+ image-$(CONFIG_WALNUT)			+= treeImage.walnut
++image-$(CONFIG_TAISHAN)			+= cuImage.taishan
++image-$(CONFIG_KATMAI)			+= cuImage.katmai
++image-$(CONFIG_WARP)			+= cuImage.warp
++endif
++
++ifneq ($(CONFIG_REDBOOT),"")
++image-$(CONFIG_PPC_8xx)			+= zImage.redboot-8xx
+ endif
+ 
+ # For 32-bit powermacs, build the COFF and miboot images
+@@ -243,3 +301,51 @@ clean-kernel := vmlinux.strip vmlinux.bin
+ clean-kernel += $(addsuffix .gz,$(clean-kernel))
+ # If not absolute clean-files are relative to $(obj).
+ clean-files += $(addprefix $(objtree)/, $(clean-kernel))
++
++WRAPPER_OBJDIR := /usr/lib/kernel-wrapper
++WRAPPER_DTSDIR := /usr/lib/kernel-wrapper/dts
++WRAPPER_BINDIR := /usr/sbin
++INSTALL := install
++
++extra-installed		:= $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
++hostprogs-installed	:= $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y))
++wrapper-installed	:= $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
++dts-installed		:= $(patsubst $(obj)/dts/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(obj)/dts/*.dts))
++
++all-installed		:= $(extra-installed) $(hostprogs-installed) $(wrapper-installed) $(dts-installed)
++
++quiet_cmd_mkdir           = MKDIR   $(patsubst $(INSTALL_HDR_PATH)/%,%,$@)
++      cmd_mkdir           = mkdir -p $@
++
++quiet_cmd_install	  = INSTALL $(patsubst $(DESTDIR)$(WRAPPER_OBJDIR)/%,%,$@)
++      cmd_install	  = $(INSTALL)  -m0644 $(patsubst $(DESTDIR)$(WRAPPER_OBJDIR)/%,$(obj)/%,$@) $@
++
++quiet_cmd_install_dts	  = INSTALL $(patsubst $(DESTDIR)$(WRAPPER_DTSDIR)/%,dts/%,$@)
++      cmd_install_dts	  = $(INSTALL)  -m0644 $(patsubst $(DESTDIR)$(WRAPPER_DTSDIR)/%,$(srctree)/$(obj)/dts/%,$@) $@
++
++quiet_cmd_install_exe	  = INSTALL $(patsubst $(DESTDIR)$(WRAPPER_BINDIR)/%,%,$@)
++      cmd_install_exe	  = $(INSTALL)  -m0755 $(patsubst $(DESTDIR)$(WRAPPER_BINDIR)/%,$(obj)/%,$@) $@
++
++quiet_cmd_install_wrapper = INSTALL $(patsubst $(DESTDIR)$(WRAPPER_BINDIR)/%,%,$@)
++      cmd_install_wrapper = $(INSTALL)  -m0755 $(patsubst $(DESTDIR)$(WRAPPER_BINDIR)/%,$(srctree)/$(obj)/%,$@) $@ ;\
++				sed -i $@ -e 's%^object=.*%object=$(WRAPPER_OBJDIR)%' \
++					  -e 's%^objbin=.*%objbin=$(WRAPPER_BINDIR)%' \
++
++
++$(DESTDIR)$(WRAPPER_OBJDIR) $(DESTDIR)$(WRAPPER_DTSDIR) $(DESTDIR)$(WRAPPER_BINDIR):
++	$(call cmd,mkdir)
++
++$(extra-installed)	: $(DESTDIR)$(WRAPPER_OBJDIR)/% : $(obj)/% | $(DESTDIR)$(WRAPPER_OBJDIR)
++	$(call cmd,install)
++
++$(hostprogs-installed)  : $(DESTDIR)$(WRAPPER_BINDIR)/% : $(obj)/% | $(DESTDIR)$(WRAPPER_BINDIR)
++	$(call cmd,install_exe)
++
++$(dts-installed)	: $(DESTDIR)$(WRAPPER_DTSDIR)/% : $(srctree)/$(obj)/dts/% | $(DESTDIR)$(WRAPPER_DTSDIR)
++	$(call cmd,install_dts)
++
++$(wrapper-installed): $(DESTDIR)$(WRAPPER_BINDIR) $(srctree)/$(obj)/wrapper | $(DESTDIR)$(WRAPPER_BINDIR)
++	$(call cmd,install_wrapper)
++
++$(obj)/bootwrapper_install: $(all-installed)
++
+diff --git a/arch/powerpc/boot/bamboo.c b/arch/powerpc/boot/bamboo.c
+index f61fcda..54b33f1 100644
+--- a/arch/powerpc/boot/bamboo.c
++++ b/arch/powerpc/boot/bamboo.c
+@@ -30,8 +30,8 @@ static void bamboo_fixups(void)
+ {
+ 	unsigned long sysclk = 33333333;
+ 
+-	ibm440ep_fixup_clocks(sysclk, 11059200);
+-	ibm4xx_fixup_memsize();
++	ibm440ep_fixup_clocks(sysclk, 11059200, 25000000);
++	ibm4xx_sdram_fixup_memsize();
+ 	ibm4xx_quiesce_eth((u32 *)0xef600e00, (u32 *)0xef600f00);
+ 	dt_fixup_mac_addresses(bamboo_mac0, bamboo_mac1);
+ }
+@@ -42,6 +42,6 @@ void bamboo_init(void *mac0, void *mac1)
+ 	platform_ops.exit = ibm44x_dbcr_reset;
+ 	bamboo_mac0 = mac0;
+ 	bamboo_mac1 = mac1;
+-	ft_init(_dtb_start, 0, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ }
+diff --git a/arch/powerpc/boot/cuboot-52xx.c b/arch/powerpc/boot/cuboot-52xx.c
+index 9256a26..a861154 100644
+--- a/arch/powerpc/boot/cuboot-52xx.c
++++ b/arch/powerpc/boot/cuboot-52xx.c
+@@ -53,7 +53,7 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                    unsigned long r6, unsigned long r7)
+ {
+ 	CUBOOT_INIT();
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ 	platform_ops.fixups = platform_fixups;
+ }
+diff --git a/arch/powerpc/boot/cuboot-824x.c b/arch/powerpc/boot/cuboot-824x.c
 new file mode 100644
-index 0000000..c529d8d
+index 0000000..ced90c5
 --- /dev/null
-+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
-@@ -0,0 +1,488 @@
++++ b/arch/powerpc/boot/cuboot-824x.c
+@@ -0,0 +1,53 @@
 +/*
-+ * Copyright (C) 2006-2007 PA Semi, Inc
-+ *
-+ * Common functions for DMA access on PA Semi PWRficient
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * Old U-boot compatibility for 824x
 + *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
++ * Copyright (c) 2007 Freescale Semiconductor, Inc.
 + *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
 + */
 +
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/of.h>
++#include "ops.h"
++#include "stdio.h"
++#include "cuboot.h"
 +
-+#include <asm/pasemi_dma.h>
++#define TARGET_824x
++#include "ppcboot.h"
 +
-+#define MAX_TXCH 64
-+#define MAX_RXCH 64
++static bd_t bd;
 +
-+static struct pasdma_status *dma_status;
 +
-+static void __iomem *iob_regs;
-+static void __iomem *mac_regs[6];
-+static void __iomem *dma_regs;
++static void platform_fixups(void)
++{
++	void *soc;
 +
-+static int base_hw_irq;
++	dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
++	dt_fixup_mac_addresses(bd.bi_enetaddr);
++	dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq);
 +
-+static int num_txch, num_rxch;
++	soc = find_node_by_devtype(NULL, "soc");
++	if (soc) {
++		void *serial = NULL;
 +
-+static struct pci_dev *dma_pdev;
++		setprop(soc, "bus-frequency", &bd.bi_busfreq,
++		        sizeof(bd.bi_busfreq));
 +
-+/* Bitmaps to handle allocation of channels */
++		while ((serial = find_node_by_devtype(serial, "serial"))) {
++			if (get_parent(serial) != soc)
++				continue;
 +
-+static DECLARE_BITMAP(txch_free, MAX_TXCH);
-+static DECLARE_BITMAP(rxch_free, MAX_RXCH);
++			setprop(serial, "clock-frequency", &bd.bi_busfreq,
++			        sizeof(bd.bi_busfreq));
++		}
++	}
++}
 +
-+/* pasemi_read_iob_reg - read IOB register
-+ * @reg: Register to read (offset into PCI CFG space)
-+ */
-+unsigned int pasemi_read_iob_reg(unsigned int reg)
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++                   unsigned long r6, unsigned long r7)
 +{
-+	return in_le32(iob_regs+reg);
++	CUBOOT_INIT();
++	fdt_init(_dtb_start);
++	serial_console_init();
++	platform_ops.fixups = platform_fixups;
 +}
-+EXPORT_SYMBOL(pasemi_read_iob_reg);
-+
-+/* pasemi_write_iob_reg - write IOB register
-+ * @reg: Register to write to (offset into PCI CFG space)
-+ * @val: Value to write
+diff --git a/arch/powerpc/boot/cuboot-83xx.c b/arch/powerpc/boot/cuboot-83xx.c
+index a050550..61af1c1 100644
+--- a/arch/powerpc/boot/cuboot-83xx.c
++++ b/arch/powerpc/boot/cuboot-83xx.c
+@@ -24,7 +24,8 @@ static void platform_fixups(void)
+ 	void *soc;
+ 
+ 	dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
+-	dt_fixup_mac_addresses(bd.bi_enetaddr, bd.bi_enet1addr);
++	dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
++	dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
+ 	dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq);
+ 
+ 	/* Unfortunately, the specific model number is encoded in the
+@@ -52,7 +53,7 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                    unsigned long r6, unsigned long r7)
+ {
+ 	CUBOOT_INIT();
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ 	platform_ops.fixups = platform_fixups;
+ }
+diff --git a/arch/powerpc/boot/cuboot-85xx-cpm2.c b/arch/powerpc/boot/cuboot-85xx-cpm2.c
+new file mode 100644
+index 0000000..723872d
+--- /dev/null
++++ b/arch/powerpc/boot/cuboot-85xx-cpm2.c
+@@ -0,0 +1,66 @@
++/*
++ * Old U-boot compatibility for 85xx
++ *
++ * Author: Scott Wood <scottwood at freescale.com>
++ *
++ * Copyright (c) 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
 + */
-+void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
++
++#include "ops.h"
++#include "stdio.h"
++#include "cuboot.h"
++
++#define TARGET_85xx
++#define TARGET_CPM2
++#include "ppcboot.h"
++
++static bd_t bd;
++
++static void platform_fixups(void)
 +{
-+	out_le32(iob_regs+reg, val);
-+}
-+EXPORT_SYMBOL(pasemi_write_iob_reg);
++	void *devp;
 +
-+/* pasemi_read_mac_reg - read MAC register
-+ * @intf: MAC interface
-+ * @reg: Register to read (offset into PCI CFG space)
++	dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
++	dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
++	dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
++	dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr);
++	dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 8, bd.bi_busfreq);
++
++	/* Unfortunately, the specific model number is encoded in the
++	 * soc node name in existing dts files -- once that is fixed,
++	 * this can do a simple path lookup.
++	 */
++	devp = find_node_by_devtype(NULL, "soc");
++	if (devp) {
++		void *serial = NULL;
++
++		setprop(devp, "bus-frequency", &bd.bi_busfreq,
++		        sizeof(bd.bi_busfreq));
++
++		while ((serial = find_node_by_devtype(serial, "serial"))) {
++			if (get_parent(serial) != devp)
++				continue;
++
++			setprop(serial, "clock-frequency", &bd.bi_busfreq,
++			        sizeof(bd.bi_busfreq));
++		}
++	}
++
++	devp = find_node_by_compatible(NULL, "fsl,cpm2-brg");
++	if (devp)
++		setprop(devp, "clock-frequency", &bd.bi_brgfreq,
++		        sizeof(bd.bi_brgfreq));
++}
++
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++                   unsigned long r6, unsigned long r7)
++{
++	CUBOOT_INIT();
++	fdt_init(_dtb_start);
++	serial_console_init();
++	platform_ops.fixups = platform_fixups;
++}
+diff --git a/arch/powerpc/boot/cuboot-85xx.c b/arch/powerpc/boot/cuboot-85xx.c
+index 345dcbe..6776a1a 100644
+--- a/arch/powerpc/boot/cuboot-85xx.c
++++ b/arch/powerpc/boot/cuboot-85xx.c
+@@ -24,8 +24,9 @@ static void platform_fixups(void)
+ 	void *soc;
+ 
+ 	dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
+-	dt_fixup_mac_addresses(bd.bi_enetaddr, bd.bi_enet1addr,
+-	                       bd.bi_enet2addr);
++	dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
++	dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
++	dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr);
+ 	dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 8, bd.bi_busfreq);
+ 
+ 	/* Unfortunately, the specific model number is encoded in the
+@@ -53,7 +54,7 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                    unsigned long r6, unsigned long r7)
+ {
+ 	CUBOOT_INIT();
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ 	platform_ops.fixups = platform_fixups;
+ }
+diff --git a/arch/powerpc/boot/cuboot-8xx.c b/arch/powerpc/boot/cuboot-8xx.c
+index 0e82015..c202c88 100644
+--- a/arch/powerpc/boot/cuboot-8xx.c
++++ b/arch/powerpc/boot/cuboot-8xx.c
+@@ -41,7 +41,7 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                    unsigned long r6, unsigned long r7)
+ {
+ 	CUBOOT_INIT();
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ 	platform_ops.fixups = platform_fixups;
+ }
+diff --git a/arch/powerpc/boot/cuboot-hpc2.c b/arch/powerpc/boot/cuboot-hpc2.c
+index d333898..1b89532 100644
+--- a/arch/powerpc/boot/cuboot-hpc2.c
++++ b/arch/powerpc/boot/cuboot-hpc2.c
+@@ -42,7 +42,7 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ 		unsigned long r6, unsigned long r7)
+ {
+ 	CUBOOT_INIT();
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ 	platform_ops.fixups = platform_fixups;
+ }
+diff --git a/arch/powerpc/boot/cuboot-katmai.c b/arch/powerpc/boot/cuboot-katmai.c
+new file mode 100644
+index 0000000..c021167
+--- /dev/null
++++ b/arch/powerpc/boot/cuboot-katmai.c
+@@ -0,0 +1,56 @@
++/*
++ * Old U-boot compatibility for Katmai
++ *
++ * Author: Hugh Blemings <hugh at au.ibm.com>
++ *
++ * Copyright 2007 Hugh Blemings, IBM Corporation.
++ *   Based on cuboot-ebony.c which is:
++ * Copyright 2007 David Gibson, IBM Corporation.
++ *   Based on cuboot-83xx.c, which is:
++ * Copyright (c) 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
 + */
-+unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
++
++#include "ops.h"
++#include "stdio.h"
++#include "reg.h"
++#include "dcr.h"
++#include "4xx.h"
++#include "44x.h"
++#include "cuboot.h"
++
++#define TARGET_44x
++#include "ppcboot.h"
++
++static bd_t bd;
++
++BSS_STACK(4096);
++
++static void katmai_fixups(void)
 +{
-+	return in_le32(mac_regs[intf]+reg);
++	unsigned long sysclk = 33333000;
++
++	/* 440SP Clock logic is all but identical to 440GX
++	 * so we just use that code for now at least
++	 */
++	ibm440spe_fixup_clocks(sysclk, 6 * 1843200, 0);
++
++	ibm440spe_fixup_memsize();
++
++	dt_fixup_mac_address(0, bd.bi_enetaddr);
++
++	ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
 +}
-+EXPORT_SYMBOL(pasemi_read_mac_reg);
 +
-+/* pasemi_write_mac_reg - write MAC register
-+ * @intf: MAC interface
-+ * @reg: Register to write to (offset into PCI CFG space)
-+ * @val: Value to write
-+ */
-+void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++		   unsigned long r6, unsigned long r7)
 +{
-+	out_le32(mac_regs[intf]+reg, val);
++	CUBOOT_INIT();
++
++	platform_ops.fixups = katmai_fixups;
++	fdt_init(_dtb_start);
++	serial_console_init();
 +}
-+EXPORT_SYMBOL(pasemi_write_mac_reg);
+diff --git a/arch/powerpc/boot/cuboot-pq2.c b/arch/powerpc/boot/cuboot-pq2.c
+index 61574f3..f56ac6c 100644
+--- a/arch/powerpc/boot/cuboot-pq2.c
++++ b/arch/powerpc/boot/cuboot-pq2.c
+@@ -255,7 +255,7 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                    unsigned long r6, unsigned long r7)
+ {
+ 	CUBOOT_INIT();
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ 	platform_ops.fixups = pq2_platform_fixups;
+ }
+diff --git a/arch/powerpc/boot/cuboot-rainier.c b/arch/powerpc/boot/cuboot-rainier.c
+new file mode 100644
+index 0000000..cf452b6
+--- /dev/null
++++ b/arch/powerpc/boot/cuboot-rainier.c
+@@ -0,0 +1,56 @@
++/*
++ * Old U-boot compatibility for Rainier
++ *
++ * Valentine Barshak <vbarshak at ru.mvista.com>
++ * Copyright 2007 MontaVista Software, Inc
++ *
++ * Based on Ebony code by David Gibson <david at gibson.dropbear.id.au>
++ * Copyright IBM Corporation, 2007
++ *
++ * Based on Bamboo code by Josh Boyer <jwboyer at linux.vnet.ibm.com>
++ * Copyright IBM Corporation, 2007
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2 of the License
++ */
 +
-+/* pasemi_read_dma_reg - read DMA register
-+ * @reg: Register to read (offset into PCI CFG space)
++#include <stdarg.h>
++#include <stddef.h>
++#include "types.h"
++#include "elf.h"
++#include "string.h"
++#include "stdio.h"
++#include "page.h"
++#include "ops.h"
++#include "dcr.h"
++#include "4xx.h"
++#include "44x.h"
++#include "cuboot.h"
++
++#define TARGET_4xx
++#define TARGET_44x
++#include "ppcboot.h"
++
++static bd_t bd;
++
++
++static void rainier_fixups(void)
++{
++	unsigned long sysclk = 33333333;
++
++	ibm440ep_fixup_clocks(sysclk, 11059200, 50000000);
++	ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
++	ibm4xx_denali_fixup_memsize();
++	dt_fixup_mac_addresses(&bd.bi_enetaddr, &bd.bi_enet1addr);
++}
++
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++                   unsigned long r6, unsigned long r7)
++{
++	CUBOOT_INIT();
++	platform_ops.fixups = rainier_fixups;
++	platform_ops.exit = ibm44x_dbcr_reset;
++	fdt_init(_dtb_start);
++	serial_console_init();
++}
+diff --git a/arch/powerpc/boot/cuboot-sequoia.c b/arch/powerpc/boot/cuboot-sequoia.c
+index ec635e0..f555575 100644
+--- a/arch/powerpc/boot/cuboot-sequoia.c
++++ b/arch/powerpc/boot/cuboot-sequoia.c
+@@ -39,7 +39,7 @@ static void sequoia_fixups(void)
+ {
+ 	unsigned long sysclk = 33333333;
+ 
+-	ibm440ep_fixup_clocks(sysclk, 11059200);
++	ibm440ep_fixup_clocks(sysclk, 11059200, 50000000);
+ 	ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
+ 	ibm4xx_denali_fixup_memsize();
+ 	dt_fixup_mac_addresses(&bd.bi_enetaddr, &bd.bi_enet1addr);
+@@ -51,6 +51,6 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ 	CUBOOT_INIT();
+ 	platform_ops.fixups = sequoia_fixups;
+ 	platform_ops.exit = ibm44x_dbcr_reset;
+-	ft_init(_dtb_start, 0, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ }
+diff --git a/arch/powerpc/boot/cuboot-taishan.c b/arch/powerpc/boot/cuboot-taishan.c
+new file mode 100644
+index 0000000..f66455a
+--- /dev/null
++++ b/arch/powerpc/boot/cuboot-taishan.c
+@@ -0,0 +1,54 @@
++/*
++ * Old U-boot compatibility for Taishan
++ *
++ * Author: Hugh Blemings <hugh at au.ibm.com>
++ *
++ * Copyright 2007 Hugh Blemings, IBM Corporation.
++ *   Based on cuboot-ebony.c which is:
++ * Copyright 2007 David Gibson, IBM Corporation.
++ *   Based on cuboot-83xx.c, which is:
++ * Copyright (c) 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
 + */
-+unsigned int pasemi_read_dma_reg(unsigned int reg)
++
++#include "ops.h"
++#include "stdio.h"
++#include "cuboot.h"
++#include "reg.h"
++#include "dcr.h"
++#include "4xx.h"
++
++#define TARGET_44x
++#include "ppcboot.h"
++
++static bd_t bd;
++
++BSS_STACK(4096);
++
++static void taishan_fixups(void)
 +{
-+	return in_le32(dma_regs+reg);
++	/* FIXME: sysclk should be derived by reading the FPGA
++	   registers */
++	unsigned long sysclk = 33000000;
++
++	ibm440gx_fixup_clocks(sysclk, 6 * 1843200, 25000000);
++
++	ibm4xx_sdram_fixup_memsize();
++
++	dt_fixup_mac_addresses(bd.bi_enetaddr, bd.bi_enet1addr);
++
++	ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
 +}
-+EXPORT_SYMBOL(pasemi_read_dma_reg);
 +
-+/* pasemi_write_dma_reg - write DMA register
-+ * @reg: Register to write to (offset into PCI CFG space)
-+ * @val: Value to write
-+ */
-+void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++		   unsigned long r6, unsigned long r7)
 +{
-+	out_le32(dma_regs+reg, val);
++	CUBOOT_INIT();
++
++	platform_ops.fixups = taishan_fixups;
++	fdt_init(_dtb_start);
++	serial_console_init();
 +}
-+EXPORT_SYMBOL(pasemi_write_dma_reg);
+diff --git a/arch/powerpc/boot/cuboot-warp.c b/arch/powerpc/boot/cuboot-warp.c
+new file mode 100644
+index 0000000..bdedebe
+--- /dev/null
++++ b/arch/powerpc/boot/cuboot-warp.c
+@@ -0,0 +1,39 @@
++/*
++ * Copyright (c) 2008 PIKA Technologies
++ *   Sean MacLennan <smaclennan at pikatech.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
 +
-+static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
-+{
-+	int bit;
-+	int start, limit;
++#include "ops.h"
++#include "4xx.h"
++#include "cuboot.h"
 +
-+	switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
-+	case TXCHAN_EVT0:
-+		start = 0;
-+		limit = 10;
-+		break;
-+	case TXCHAN_EVT1:
-+		start = 10;
-+		limit = MAX_TXCH;
-+		break;
-+	default:
-+		start = 0;
-+		limit = MAX_TXCH;
-+		break;
-+	}
-+retry:
-+	bit = find_next_bit(txch_free, MAX_TXCH, start);
-+	if (bit >= limit)
-+		return -ENOSPC;
-+	if (!test_and_clear_bit(bit, txch_free))
-+		goto retry;
++#define TARGET_44x
++#include "ppcboot.h"
 +
-+	return bit;
-+}
++static bd_t bd;
 +
-+static void pasemi_free_tx_chan(int chan)
++static void warp_fixups(void)
 +{
-+	BUG_ON(test_bit(chan, txch_free));
-+	set_bit(chan, txch_free);
++	unsigned long sysclk = 66000000;
++
++	ibm440ep_fixup_clocks(sysclk, 11059200, 50000000);
++	ibm4xx_sdram_fixup_memsize();
++	ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
++	dt_fixup_mac_addresses(&bd.bi_enetaddr);
 +}
 +
-+static int pasemi_alloc_rx_chan(void)
++
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++		   unsigned long r6, unsigned long r7)
 +{
-+	int bit;
-+retry:
-+	bit = find_first_bit(rxch_free, MAX_RXCH);
-+	if (bit >= MAX_TXCH)
-+		return -ENOSPC;
-+	if (!test_and_clear_bit(bit, rxch_free))
-+		goto retry;
++	CUBOOT_INIT();
 +
-+	return bit;
++	platform_ops.fixups = warp_fixups;
++	platform_ops.exit = ibm44x_dbcr_reset;
++	fdt_init(_dtb_start);
++	serial_console_init();
 +}
+diff --git a/arch/powerpc/boot/dcr.h b/arch/powerpc/boot/dcr.h
+index 83b88aa..95b9f53 100644
+--- a/arch/powerpc/boot/dcr.h
++++ b/arch/powerpc/boot/dcr.h
+@@ -14,12 +14,20 @@
+ #define DCRN_SDRAM0_CFGADDR				0x010
+ #define DCRN_SDRAM0_CFGDATA				0x011
+ 
++#define SDRAM0_READ(offset) ({\
++	mtdcr(DCRN_SDRAM0_CFGADDR, offset); \
++	mfdcr(DCRN_SDRAM0_CFGDATA); })
++#define SDRAM0_WRITE(offset, data) ({\
++	mtdcr(DCRN_SDRAM0_CFGADDR, offset); \
++	mtdcr(DCRN_SDRAM0_CFGDATA, data); })
 +
-+static void pasemi_free_rx_chan(int chan)
+ #define 	SDRAM0_B0CR				0x40
+ #define 	SDRAM0_B1CR				0x44
+ #define 	SDRAM0_B2CR				0x48
+ #define 	SDRAM0_B3CR				0x4c
+ 
+-static const unsigned long sdram_bxcr[] = { SDRAM0_B0CR, SDRAM0_B1CR, SDRAM0_B2CR, SDRAM0_B3CR };
++static const unsigned long sdram_bxcr[] = { SDRAM0_B0CR, SDRAM0_B1CR,
++					    SDRAM0_B2CR, SDRAM0_B3CR };
+ 
+ #define			SDRAM_CONFIG_BANK_ENABLE        0x00000001
+ #define			SDRAM_CONFIG_SIZE_MASK          0x000e0000
+@@ -138,5 +146,54 @@ static const unsigned long sdram_bxcr[] = { SDRAM0_B0CR, SDRAM0_B1CR, SDRAM0_B2C
+ #define DCRN_CPC0_PLLMR 0xb0
+ #define DCRN_405_CPC0_CR0 0xb1
+ #define DCRN_405_CPC0_CR1 0xb2
++#define DCRN_405_CPC0_PSR 0xb4
++
++/* 405EP Clocking/Power Management/Chip Control regs */
++#define DCRN_CPC0_PLLMR0  0xf0
++#define DCRN_CPC0_PLLMR1  0xf4
++#define DCRN_CPC0_UCR     0xf5
++
++/* 440GX Clock control etc */
++
++
++#define DCRN_CPR0_CLKUPD				0x020
++#define DCRN_CPR0_PLLC					0x040
++#define DCRN_CPR0_PLLD					0x060
++#define DCRN_CPR0_PRIMAD				0x080
++#define DCRN_CPR0_PRIMBD				0x0a0
++#define DCRN_CPR0_OPBD					0x0c0
++#define DCRN_CPR0_PERD					0x0e0
++#define DCRN_CPR0_MALD					0x100
++
++#define DCRN_SDR0_CONFIG_ADDR 	0xe
++#define DCRN_SDR0_CONFIG_DATA	0xf
++
++/* SDR read/write helper macros */
++#define SDR0_READ(offset) ({\
++	mtdcr(DCRN_SDR0_CONFIG_ADDR, offset); \
++	mfdcr(DCRN_SDR0_CONFIG_DATA); })
++#define SDR0_WRITE(offset, data) ({\
++	mtdcr(DCRN_SDR0_CONFIG_ADDR, offset); \
++	mtdcr(DCRN_SDR0_CONFIG_DATA, data); })
++
++#define DCRN_SDR0_UART0		0x0120
++#define DCRN_SDR0_UART1		0x0121
++#define DCRN_SDR0_UART2		0x0122
++#define DCRN_SDR0_UART3		0x0123
++
++
++/* CPRs read/write helper macros - based off include/asm-ppc/ibm44x.h */
++
++#define DCRN_CPR0_CFGADDR				0xc
++#define DCRN_CPR0_CFGDATA				0xd
++
++#define CPR0_READ(offset) ({\
++	mtdcr(DCRN_CPR0_CFGADDR, offset); \
++	mfdcr(DCRN_CPR0_CFGDATA); })
++#define CPR0_WRITE(offset, data) ({\
++	mtdcr(DCRN_CPR0_CFGADDR, offset); \
++	mtdcr(DCRN_CPR0_CFGDATA, data); })
++
++
+ 
+ #endif	/* _PPC_BOOT_DCR_H_ */
+diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
+index e5dfe44..60f561e 100644
+--- a/arch/powerpc/boot/devtree.c
++++ b/arch/powerpc/boot/devtree.c
+@@ -88,6 +88,20 @@ void dt_fixup_clock(const char *path, u32 freq)
+ 	}
+ }
+ 
++void dt_fixup_mac_address_by_alias(const char *alias, const u8 *addr)
 +{
-+	BUG_ON(test_bit(chan, rxch_free));
-+	set_bit(chan, rxch_free);
++	void *devp = find_node_by_alias(alias);
++
++	if (devp) {
++		printf("%s: local-mac-address <-"
++		       " %02x:%02x:%02x:%02x:%02x:%02x\n\r", alias,
++		       addr[0], addr[1], addr[2],
++		       addr[3], addr[4], addr[5]);
++
++		setprop(devp, "local-mac-address", addr, 6);
++	}
 +}
 +
-+/* pasemi_dma_alloc_chan - Allocate a DMA channel
-+ * @type: Type of channel to allocate
-+ * @total_size: Total size of structure to allocate (to allow for more
-+ *		room behind the structure to be used by the client)
-+ * @offset: Offset in bytes from start of the total structure to the beginning
-+ *	    of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
-+ *	    not the first member of the client structure.
+ void dt_fixup_mac_address(u32 index, const u8 *addr)
+ {
+ 	void *devp = find_node_by_prop_value(NULL, "linux,network-index",
+diff --git a/arch/powerpc/boot/dtc-src/.gitignore b/arch/powerpc/boot/dtc-src/.gitignore
+new file mode 100644
+index 0000000..a7c3f94
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/.gitignore
+@@ -0,0 +1,3 @@
++dtc-lexer.lex.c
++dtc-parser.tab.c
++dtc-parser.tab.h
+diff --git a/arch/powerpc/boot/dtc-src/Makefile.dtc b/arch/powerpc/boot/dtc-src/Makefile.dtc
+new file mode 100644
+index 0000000..d607fdb
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/Makefile.dtc
+@@ -0,0 +1,25 @@
++# Makefile.dtc
++#
++# This is not a complete Makefile of itself.  Instead, it is designed to
++# be easily embeddable into other systems of Makefiles.
++#
++DTC_SRCS = dtc.c flattree.c fstree.c data.c livetree.c treesource.c srcpos.c \
++	checks.c
++DTC_EXTRA = dtc.h srcpos.h
++DTC_LEXFILES = dtc-lexer.l
++DTC_BISONFILES = dtc-parser.y
++
++DTC_LEX_SRCS = $(DTC_LEXFILES:%.l=%.lex.c)
++DTC_BISON_SRCS = $(DTC_BISONFILES:%.y=%.tab.c)
++DTC_BISON_INCLUDES = $(DTC_BISONFILES:%.y=%.tab.h)
++
++DTC_GEN_SRCS = $(DTC_LEX_SRCS) $(DTC_BISON_SRCS)
++DTC_GEN_ALL = $(DTC_GEN_SRCS) $(DTC_BISON_INCLUDES)
++DTC_OBJS = $(DTC_SRCS:%.c=%.o) $(DTC_GEN_SRCS:%.c=%.o)
++
++DTC_CLEANFILES = $(DTC_GEN_ALL)
++
++# We assume the containing Makefile system can do auto-dependencies for most
++# things, but we supply the dependencies on generated header files explicitly
++
++$(addprefix $(DTC_objdir)/,$(DTC_GEN_SRCS:%.c=%.o)): $(addprefix $(DTC_objdir)/,$(DTC_BISON_INCLUDES))
+diff --git a/arch/powerpc/boot/dtc-src/checks.c b/arch/powerpc/boot/dtc-src/checks.c
+new file mode 100644
+index 0000000..2ce961c
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/checks.c
+@@ -0,0 +1,750 @@
++/*
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2007.
 + *
-+ * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
-+ * type argument specifies whether it's a RX or TX channel, and in the case
-+ * of TX channels which group it needs to belong to (if any).
 + *
-+ * Returns a pointer to the total structure allocated on success, NULL
-+ * on failure.
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
 + */
-+void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
-+			    int total_size, int offset)
++
++#include "dtc.h"
++
++#ifdef TRACE_CHECKS
++#define TRACE(c, ...) \
++	do { \
++		fprintf(stderr, "=== %s: ", (c)->name); \
++		fprintf(stderr, __VA_ARGS__); \
++		fprintf(stderr, "\n"); \
++	} while (0)
++#else
++#define TRACE(c, fmt, ...)	do { } while (0)
++#endif
++
++enum checklevel {
++	IGNORE = 0,
++	WARN = 1,
++	ERROR = 2,
++};
++
++enum checkstatus {
++	UNCHECKED = 0,
++	PREREQ,
++	PASSED,
++	FAILED,
++};
++
++struct check;
++
++typedef void (*tree_check_fn)(struct check *c, struct node *dt);
++typedef void (*node_check_fn)(struct check *c, struct node *dt, struct node *node);
++typedef void (*prop_check_fn)(struct check *c, struct node *dt,
++			      struct node *node, struct property *prop);
++
++struct check {
++	const char *name;
++	tree_check_fn tree_fn;
++	node_check_fn node_fn;
++	prop_check_fn prop_fn;
++	void *data;
++	enum checklevel level;
++	enum checkstatus status;
++	int inprogress;
++	int num_prereqs;
++	struct check **prereq;
++};
++
++#define CHECK(nm, tfn, nfn, pfn, d, lvl, ...) \
++	static struct check *nm##_prereqs[] = { __VA_ARGS__ }; \
++	static struct check nm = { \
++		.name = #nm, \
++		.tree_fn = (tfn), \
++		.node_fn = (nfn), \
++		.prop_fn = (pfn), \
++		.data = (d), \
++		.level = (lvl), \
++		.status = UNCHECKED, \
++		.num_prereqs = ARRAY_SIZE(nm##_prereqs), \
++		.prereq = nm##_prereqs, \
++	};
++
++#define TREE_CHECK(nm, d, lvl, ...) \
++	CHECK(nm, check_##nm, NULL, NULL, d, lvl, __VA_ARGS__)
++#define NODE_CHECK(nm, d, lvl, ...) \
++	CHECK(nm, NULL, check_##nm, NULL, d, lvl, __VA_ARGS__)
++#define PROP_CHECK(nm, d, lvl, ...) \
++	CHECK(nm, NULL, NULL, check_##nm, d, lvl, __VA_ARGS__)
++#define BATCH_CHECK(nm, lvl, ...) \
++	CHECK(nm, NULL, NULL, NULL, NULL, lvl, __VA_ARGS__)
++
++#ifdef __GNUC__
++static inline void check_msg(struct check *c, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
++#endif
++static inline void check_msg(struct check *c, const char *fmt, ...)
 +{
-+	void *buf;
-+	struct pasemi_dmachan *chan;
-+	int chno;
++	va_list ap;
++	va_start(ap, fmt);
 +
-+	BUG_ON(total_size < sizeof(struct pasemi_dmachan));
++	if ((c->level < WARN) || (c->level <= quiet))
++		return; /* Suppress message */
 +
-+	buf = kzalloc(total_size, GFP_KERNEL);
++	fprintf(stderr, "%s (%s): ",
++		(c->level == ERROR) ? "ERROR" : "Warning", c->name);
++	vfprintf(stderr, fmt, ap);
++	fprintf(stderr, "\n");
++}
 +
-+	if (!buf)
-+		return NULL;
-+	chan = buf + offset;
++#define FAIL(c, ...) \
++	do { \
++		TRACE((c), "\t\tFAILED at %s:%d", __FILE__, __LINE__); \
++		(c)->status = FAILED; \
++		check_msg((c), __VA_ARGS__); \
++	} while (0)
 +
-+	chan->priv = buf;
++static void check_nodes_props(struct check *c, struct node *dt, struct node *node)
++{
++	struct node *child;
++	struct property *prop;
 +
-+	switch (type & (TXCHAN|RXCHAN)) {
-+	case RXCHAN:
-+		chno = pasemi_alloc_rx_chan();
-+		chan->chno = chno;
-+		chan->irq = irq_create_mapping(NULL,
-+					       base_hw_irq + num_txch + chno);
-+		chan->status = &dma_status->rx_sta[chno];
-+		break;
-+	case TXCHAN:
-+		chno = pasemi_alloc_tx_chan(type);
-+		chan->chno = chno;
-+		chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
-+		chan->status = &dma_status->tx_sta[chno];
-+		break;
-+	}
++	TRACE(c, "%s", node->fullpath);
++	if (c->node_fn)
++		c->node_fn(c, dt, node);
 +
-+	chan->chan_type = type;
++	if (c->prop_fn)
++		for_each_property(node, prop) {
++			TRACE(c, "%s\t'%s'", node->fullpath, prop->name);
++			c->prop_fn(c, dt, node, prop);
++		}
 +
-+	return chan;
++	for_each_child(node, child)
++		check_nodes_props(c, dt, child);
 +}
-+EXPORT_SYMBOL(pasemi_dma_alloc_chan);
 +
-+/* pasemi_dma_free_chan - Free a previously allocated channel
-+ * @chan: Channel to free
-+ *
-+ * Frees a previously allocated channel. It will also deallocate any
-+ * descriptor ring associated with the channel, if allocated.
-+ */
-+void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
++static int run_check(struct check *c, struct node *dt)
 +{
-+	if (chan->ring_virt)
-+		pasemi_dma_free_ring(chan);
++	int error = 0;
++	int i;
 +
-+	switch (chan->chan_type & (RXCHAN|TXCHAN)) {
-+	case RXCHAN:
-+		pasemi_free_rx_chan(chan->chno);
-+		break;
-+	case TXCHAN:
-+		pasemi_free_tx_chan(chan->chno);
-+		break;
++	assert(!c->inprogress);
++
++	if (c->status != UNCHECKED)
++		goto out;
++
++	c->inprogress = 1;
++
++	for (i = 0; i < c->num_prereqs; i++) {
++		struct check *prq = c->prereq[i];
++		error |= run_check(prq, dt);
++		if (prq->status != PASSED) {
++			c->status = PREREQ;
++			check_msg(c, "Failed prerequisite '%s'",
++				  c->prereq[i]->name);
++		}
 +	}
 +
-+	kfree(chan->priv);
++	if (c->status != UNCHECKED)
++		goto out;
++
++	if (c->node_fn || c->prop_fn)
++		check_nodes_props(c, dt, dt);
++
++	if (c->tree_fn)
++		c->tree_fn(c, dt);
++	if (c->status == UNCHECKED)
++		c->status = PASSED;
++
++	TRACE(c, "\tCompleted, status %d", c->status);
++
++out:
++	c->inprogress = 0;
++	if ((c->status != PASSED) && (c->level == ERROR))
++		error = 1;
++	return error;
 +}
-+EXPORT_SYMBOL(pasemi_dma_free_chan);
 +
-+/* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
-+ * @chan: Channel for which to allocate
-+ * @ring_size: Ring size in 64-bit (8-byte) words
-+ *
-+ * Allocate a descriptor ring for a channel. Returns 0 on success, errno
-+ * on failure. The passed in struct pasemi_dmachan is updated with the
-+ * virtual and DMA addresses of the ring.
++/*
++ * Utility check functions
 + */
-+int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
++
++static void check_is_string(struct check *c, struct node *root,
++			    struct node *node)
 +{
-+	BUG_ON(chan->ring_virt);
++	struct property *prop;
++	char *propname = c->data;
 +
-+	chan->ring_size = ring_size;
++	prop = get_property(node, propname);
++	if (!prop)
++		return; /* Not present, assumed ok */
 +
-+	chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
-+					     ring_size * sizeof(u64),
-+					     &chan->ring_dma, GFP_KERNEL);
++	if (!data_is_one_string(prop->val))
++		FAIL(c, "\"%s\" property in %s is not a string",
++		     propname, node->fullpath);
++}
++#define CHECK_IS_STRING(nm, propname, lvl) \
++	CHECK(nm, NULL, check_is_string, NULL, (propname), (lvl))
 +
-+	if (!chan->ring_virt)
-+		return -ENOMEM;
++static void check_is_cell(struct check *c, struct node *root,
++			  struct node *node)
++{
++	struct property *prop;
++	char *propname = c->data;
 +
-+	memset(chan->ring_virt, 0, ring_size * sizeof(u64));
++	prop = get_property(node, propname);
++	if (!prop)
++		return; /* Not present, assumed ok */
 +
-+	return 0;
++	if (prop->val.len != sizeof(cell_t))
++		FAIL(c, "\"%s\" property in %s is not a single cell",
++		     propname, node->fullpath);
 +}
-+EXPORT_SYMBOL(pasemi_dma_alloc_ring);
++#define CHECK_IS_CELL(nm, propname, lvl) \
++	CHECK(nm, NULL, check_is_cell, NULL, (propname), (lvl))
 +
-+/* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
-+ * @chan: Channel for which to free the descriptor ring
-+ *
-+ * Frees a previously allocated descriptor ring for a channel.
++/*
++ * Structural check functions
 + */
-+void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
++
++static void check_duplicate_node_names(struct check *c, struct node *dt,
++				       struct node *node)
 +{
-+	BUG_ON(!chan->ring_virt);
++	struct node *child, *child2;
 +
-+	dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
-+			  chan->ring_virt, chan->ring_dma);
-+	chan->ring_virt = NULL;
-+	chan->ring_size = 0;
-+	chan->ring_dma = 0;
++	for_each_child(node, child)
++		for (child2 = child->next_sibling;
++		     child2;
++		     child2 = child2->next_sibling)
++			if (streq(child->name, child2->name))
++				FAIL(c, "Duplicate node name %s",
++				     child->fullpath);
 +}
-+EXPORT_SYMBOL(pasemi_dma_free_ring);
++NODE_CHECK(duplicate_node_names, NULL, ERROR);
 +
-+/* pasemi_dma_start_chan - Start a DMA channel
-+ * @chan: Channel to start
-+ * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
-+ *
-+ * Enables (starts) a DMA channel with optional additional arguments.
-+ */
-+void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
++static void check_duplicate_property_names(struct check *c, struct node *dt,
++					   struct node *node)
 +{
-+	if (chan->chan_type == RXCHAN)
-+		pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
-+				     cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
-+	else
-+		pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
-+				     cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
++	struct property *prop, *prop2;
++
++	for_each_property(node, prop)
++		for (prop2 = prop->next; prop2; prop2 = prop2->next)
++			if (streq(prop->name, prop2->name))
++				FAIL(c, "Duplicate property name %s in %s",
++				     prop->name, node->fullpath);
 +}
-+EXPORT_SYMBOL(pasemi_dma_start_chan);
++NODE_CHECK(duplicate_property_names, NULL, ERROR);
 +
-+/* pasemi_dma_stop_chan - Stop a DMA channel
-+ * @chan: Channel to stop
-+ *
-+ * Stops (disables) a DMA channel. This is done by setting the ST bit in the
-+ * CMDSTA register and waiting on the ACT (active) bit to clear, then
-+ * finally disabling the whole channel.
-+ *
-+ * This function will only try for a short while for the channel to stop, if
-+ * it doesn't it will return failure.
-+ *
-+ * Returns 1 on success, 0 on failure.
-+ */
-+#define MAX_RETRIES 5000
-+int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
++static void check_explicit_phandles(struct check *c, struct node *root,
++					  struct node *node)
 +{
-+	int reg, retries;
-+	u32 sta;
++	struct property *prop;
++	struct node *other;
++	cell_t phandle;
 +
-+	if (chan->chan_type == RXCHAN) {
-+		reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
-+		pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
-+		for (retries = 0; retries < MAX_RETRIES; retries++) {
-+			sta = pasemi_read_dma_reg(reg);
-+			if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
-+				pasemi_write_dma_reg(reg, 0);
-+				return 1;
-+			}
-+			cond_resched();
-+		}
-+	} else {
-+		reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
-+		pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
-+		for (retries = 0; retries < MAX_RETRIES; retries++) {
-+			sta = pasemi_read_dma_reg(reg);
-+			if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
-+				pasemi_write_dma_reg(reg, 0);
-+				return 1;
-+			}
-+			cond_resched();
-+		}
++	prop = get_property(node, "linux,phandle");
++	if (! prop)
++		return; /* No phandle, that's fine */
++
++	if (prop->val.len != sizeof(cell_t)) {
++		FAIL(c, "%s has bad length (%d) linux,phandle property",
++		     node->fullpath, prop->val.len);
++		return;
 +	}
 +
-+	return 0;
++	phandle = propval_cell(prop);
++	if ((phandle == 0) || (phandle == -1)) {
++		FAIL(c, "%s has invalid linux,phandle value 0x%x",
++		     node->fullpath, phandle);
++		return;
++	}
++
++	other = get_node_by_phandle(root, phandle);
++	if (other) {
++		FAIL(c, "%s has duplicated phandle 0x%x (seen before at %s)",
++		     node->fullpath, phandle, other->fullpath);
++		return;
++	}
++
++	node->phandle = phandle;
 +}
-+EXPORT_SYMBOL(pasemi_dma_stop_chan);
++NODE_CHECK(explicit_phandles, NULL, ERROR);
 +
-+/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
-+ * @chan: Channel to allocate for
-+ * @size: Size of buffer in bytes
-+ * @handle: DMA handle
-+ *
-+ * Allocate a buffer to be used by the DMA engine for read/write,
-+ * similar to dma_alloc_coherent().
-+ *
-+ * Returns the virtual address of the buffer, or NULL in case of failure.
-+ */
-+void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
-+			   dma_addr_t *handle)
++static void check_name_properties(struct check *c, struct node *root,
++				  struct node *node)
 +{
-+	return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
++	struct property *prop;
++
++	prop = get_property(node, "name");
++	if (!prop)
++		return; /* No name property, that's fine */
++
++	if ((prop->val.len != node->basenamelen+1)
++	    || (memcmp(prop->val.val, node->name, node->basenamelen) != 0))
++		FAIL(c, "\"name\" property in %s is incorrect (\"%s\" instead"
++		     " of base node name)", node->fullpath, prop->val.val);
 +}
-+EXPORT_SYMBOL(pasemi_dma_alloc_buf);
++CHECK_IS_STRING(name_is_string, "name", ERROR);
++NODE_CHECK(name_properties, NULL, ERROR, &name_is_string);
 +
-+/* pasemi_dma_free_buf - Free a buffer used for DMA
-+ * @chan: Channel the buffer was allocated for
-+ * @size: Size of buffer in bytes
-+ * @handle: DMA handle
-+ *
-+ * Frees a previously allocated buffer.
++/*
++ * Reference fixup functions
 + */
-+void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
-+			 dma_addr_t *handle)
++
++static void fixup_phandle_references(struct check *c, struct node *dt,
++				     struct node *node, struct property *prop)
 +{
-+	dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
++      struct marker *m = prop->val.markers;
++      struct node *refnode;
++      cell_t phandle;
++
++      for_each_marker_of_type(m, REF_PHANDLE) {
++	      assert(m->offset + sizeof(cell_t) <= prop->val.len);
++
++	      refnode = get_node_by_ref(dt, m->ref);
++	      if (! refnode) {
++		      FAIL(c, "Reference to non-existent node or label \"%s\"\n",
++			   m->ref);
++		      continue;
++	      }
++
++	      phandle = get_node_phandle(dt, refnode);
++	      *((cell_t *)(prop->val.val + m->offset)) = cpu_to_be32(phandle);
++      }
 +}
-+EXPORT_SYMBOL(pasemi_dma_free_buf);
++CHECK(phandle_references, NULL, NULL, fixup_phandle_references, NULL, ERROR,
++      &duplicate_node_names, &explicit_phandles);
 +
-+static void *map_onedev(struct pci_dev *p, int index)
++static void fixup_path_references(struct check *c, struct node *dt,
++				  struct node *node, struct property *prop)
 +{
-+	struct device_node *dn;
-+	void __iomem *ret;
++	struct marker *m = prop->val.markers;
++	struct node *refnode;
++	char *path;
 +
-+	dn = pci_device_to_OF_node(p);
-+	if (!dn)
-+		goto fallback;
++	for_each_marker_of_type(m, REF_PATH) {
++		assert(m->offset <= prop->val.len);
 +
-+	ret = of_iomap(dn, index);
-+	if (!ret)
-+		goto fallback;
++		refnode = get_node_by_ref(dt, m->ref);
++		if (!refnode) {
++			FAIL(c, "Reference to non-existent node or label \"%s\"\n",
++			     m->ref);
++			continue;
++		}
 +
-+	return ret;
-+fallback:
-+	/* This is hardcoded and ugly, but we have some firmware versions
-+	 * that don't provide the register space in the device tree. Luckily
-+	 * they are at well-known locations so we can just do the math here.
-+	 */
-+	return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
++		path = refnode->fullpath;
++		prop->val = data_insert_at_marker(prop->val, m, path,
++						  strlen(path) + 1);
++	}
 +}
++CHECK(path_references, NULL, NULL, fixup_path_references, NULL, ERROR,
++      &duplicate_node_names);
 +
-+/* pasemi_dma_init - Initialize the PA Semi DMA library
-+ *
-+ * This function initializes the DMA library. It must be called before
-+ * any other function in the library.
-+ *
-+ * Returns 0 on success, errno on failure.
++/*
++ * Semantic checks
 + */
-+int pasemi_dma_init(void)
++CHECK_IS_CELL(address_cells_is_cell, "#address-cells", WARN);
++CHECK_IS_CELL(size_cells_is_cell, "#size-cells", WARN);
++CHECK_IS_CELL(interrupt_cells_is_cell, "#interrupt-cells", WARN);
++
++CHECK_IS_STRING(device_type_is_string, "device_type", WARN);
++CHECK_IS_STRING(model_is_string, "model", WARN);
++CHECK_IS_STRING(status_is_string, "status", WARN);
++
++static void fixup_addr_size_cells(struct check *c, struct node *dt,
++				  struct node *node)
 +{
-+	static spinlock_t init_lock = SPIN_LOCK_UNLOCKED;
-+	struct pci_dev *iob_pdev;
-+	struct pci_dev *pdev;
-+	struct resource res;
-+	struct device_node *dn;
-+	int i, intf, err = 0;
-+	u32 tmp;
++	struct property *prop;
 +
-+	if (!machine_is(pasemi))
-+		return -ENODEV;
++	node->addr_cells = -1;
++	node->size_cells = -1;
 +
-+	spin_lock(&init_lock);
++	prop = get_property(node, "#address-cells");
++	if (prop)
++		node->addr_cells = propval_cell(prop);
++
++	prop = get_property(node, "#size-cells");
++	if (prop)
++		node->size_cells = propval_cell(prop);
++}
++CHECK(addr_size_cells, NULL, fixup_addr_size_cells, NULL, NULL, WARN,
++      &address_cells_is_cell, &size_cells_is_cell);
++
++#define node_addr_cells(n) \
++	(((n)->addr_cells == -1) ? 2 : (n)->addr_cells)
++#define node_size_cells(n) \
++	(((n)->size_cells == -1) ? 1 : (n)->size_cells)
 +
-+	/* Make sure we haven't already initialized */
-+	if (dma_pdev)
-+		goto out;
++static void check_reg_format(struct check *c, struct node *dt,
++			     struct node *node)
++{
++	struct property *prop;
++	int addr_cells, size_cells, entrylen;
 +
-+	iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
-+	if (!iob_pdev) {
-+		BUG();
-+		printk(KERN_WARNING "Can't find I/O Bridge\n");
-+		err = -ENODEV;
-+		goto out;
-+	}
-+	iob_regs = map_onedev(iob_pdev, 0);
++	prop = get_property(node, "reg");
++	if (!prop)
++		return; /* No "reg", that's fine */
 +
-+	dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
-+	if (!dma_pdev) {
-+		BUG();
-+		printk(KERN_WARNING "Can't find DMA controller\n");
-+		err = -ENODEV;
-+		goto out;
++	if (!node->parent) {
++		FAIL(c, "Root node has a \"reg\" property");
++		return;
 +	}
-+	dma_regs = map_onedev(dma_pdev, 0);
-+	base_hw_irq = virq_to_hw(dma_pdev->irq);
 +
-+	pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
-+	num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
++	if (prop->val.len == 0)
++		FAIL(c, "\"reg\" property in %s is empty", node->fullpath);
 +
-+	pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
-+	num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
++	addr_cells = node_addr_cells(node->parent);
++	size_cells = node_size_cells(node->parent);
++	entrylen = (addr_cells + size_cells) * sizeof(cell_t);
 +
-+	intf = 0;
-+	for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
-+	     pdev;
-+	     pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
-+		mac_regs[intf++] = map_onedev(pdev, 0);
++	if ((prop->val.len % entrylen) != 0)
++		FAIL(c, "\"reg\" property in %s has invalid length (%d bytes) "
++		     "(#address-cells == %d, #size-cells == %d)",
++		     node->fullpath, prop->val.len, addr_cells, size_cells);
++}
++NODE_CHECK(reg_format, NULL, WARN, &addr_size_cells);
 +
-+	pci_dev_put(pdev);
++static void check_ranges_format(struct check *c, struct node *dt,
++				struct node *node)
++{
++	struct property *prop;
++	int c_addr_cells, p_addr_cells, c_size_cells, p_size_cells, entrylen;
 +
-+	for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
-+	     pdev;
-+	     pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
-+		mac_regs[intf++] = map_onedev(pdev, 0);
++	prop = get_property(node, "ranges");
++	if (!prop)
++		return;
 +
-+	pci_dev_put(pdev);
++	if (!node->parent) {
++		FAIL(c, "Root node has a \"ranges\" property");
++		return;
++	}
 +
-+	dn = pci_device_to_OF_node(iob_pdev);
-+	if (dn)
-+		err = of_address_to_resource(dn, 1, &res);
-+	if (!dn || err) {
-+		/* Fallback for old firmware */
-+		res.start = 0xfd800000;
-+		res.end = res.start + 0x1000;
++	p_addr_cells = node_addr_cells(node->parent);
++	p_size_cells = node_size_cells(node->parent);
++	c_addr_cells = node_addr_cells(node);
++	c_size_cells = node_size_cells(node);
++	entrylen = (p_addr_cells + c_addr_cells + c_size_cells) * sizeof(cell_t);
++
++	if (prop->val.len == 0) {
++		if (p_addr_cells != c_addr_cells)
++			FAIL(c, "%s has empty \"ranges\" property but its "
++			     "#address-cells (%d) differs from %s (%d)",
++			     node->fullpath, c_addr_cells, node->parent->fullpath,
++			     p_addr_cells);
++		if (p_size_cells != c_size_cells)
++			FAIL(c, "%s has empty \"ranges\" property but its "
++			     "#size-cells (%d) differs from %s (%d)",
++			     node->fullpath, c_size_cells, node->parent->fullpath,
++			     p_size_cells);
++	} else if ((prop->val.len % entrylen) != 0) {
++		FAIL(c, "\"ranges\" property in %s has invalid length (%d bytes) "
++		     "(parent #address-cells == %d, child #address-cells == %d, "
++		     "#size-cells == %d)", node->fullpath, prop->val.len,
++		     p_addr_cells, c_addr_cells, c_size_cells);
 +	}
-+	dma_status = __ioremap(res.start, res.end-res.start, 0);
-+	pci_dev_put(iob_pdev);
++}
++NODE_CHECK(ranges_format, NULL, WARN, &addr_size_cells);
 +
-+	for (i = 0; i < MAX_TXCH; i++)
-+		__set_bit(i, txch_free);
++/*
++ * Style checks
++ */
++static void check_avoid_default_addr_size(struct check *c, struct node *dt,
++					  struct node *node)
++{
++	struct property *reg, *ranges;
 +
-+	for (i = 0; i < MAX_RXCH; i++)
-+		__set_bit(i, rxch_free);
++	if (!node->parent)
++		return; /* Ignore root node */
 +
-+	printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
-+		"(%d tx, %d rx channels)\n", num_txch, num_rxch);
++	reg = get_property(node, "reg");
++	ranges = get_property(node, "ranges");
 +
-+out:
-+	spin_unlock(&init_lock);
-+	return err;
++	if (!reg && !ranges)
++		return;
++
++	if ((node->parent->addr_cells == -1))
++		FAIL(c, "Relying on default #address-cells value for %s",
++		     node->fullpath);
++
++	if ((node->parent->size_cells == -1))
++		FAIL(c, "Relying on default #size-cells value for %s",
++		     node->fullpath);
 +}
-+EXPORT_SYMBOL(pasemi_dma_init);
-diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
-index 516acab..58b344c 100644
---- a/arch/powerpc/platforms/pasemi/pasemi.h
-+++ b/arch/powerpc/platforms/pasemi/pasemi.h
-@@ -9,6 +9,7 @@ extern void __devinit pas_pci_dma_dev_setup(struct pci_dev *dev);
- extern void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
- 
- extern void __init alloc_iobmap_l2(void);
-+extern void __init pasemi_map_registers(void);
- 
- /* Power savings modes, implemented in asm */
- extern void idle_spin(void);
-diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
-index 999f5e1..84c0d4e 100644
---- a/arch/powerpc/platforms/powermac/pic.c
-+++ b/arch/powerpc/platforms/powermac/pic.c
-@@ -663,7 +663,7 @@ static int pmacpic_resume(struct sys_device *sysdev)
- #endif /* CONFIG_PM && CONFIG_PPC32 */
- 
- static struct sysdev_class pmacpic_sysclass = {
--	set_kset_name("pmac_pic"),
-+	.name = "pmac_pic",
- };
- 
- static struct sys_device device_pmacpic = {
-diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
-index 412e6b4..c4ad54e 100644
---- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
-+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
-@@ -153,7 +153,7 @@ static int pseries_add_processor(struct device_node *np)
- 	for (i = 0; i < nthreads; i++)
- 		cpu_set(i, tmp);
- 
--	lock_cpu_hotplug();
-+	cpu_maps_update_begin();
- 
- 	BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
- 
-@@ -190,7 +190,7 @@ static int pseries_add_processor(struct device_node *np)
- 	}
- 	err = 0;
- out_unlock:
--	unlock_cpu_hotplug();
-+	cpu_maps_update_done();
- 	return err;
- }
- 
-@@ -211,7 +211,7 @@ static void pseries_remove_processor(struct device_node *np)
- 
- 	nthreads = len / sizeof(u32);
- 
--	lock_cpu_hotplug();
-+	cpu_maps_update_begin();
- 	for (i = 0; i < nthreads; i++) {
- 		for_each_present_cpu(cpu) {
- 			if (get_hard_smp_processor_id(cpu) != intserv[i])
-@@ -225,7 +225,7 @@ static void pseries_remove_processor(struct device_node *np)
- 			printk(KERN_WARNING "Could not find cpu to remove "
- 			       "with physical id 0x%x\n", intserv[i]);
- 	}
--	unlock_cpu_hotplug();
-+	cpu_maps_update_done();
- }
- 
- static int pseries_smp_notifier(struct notifier_block *nb,
-diff --git a/arch/powerpc/platforms/pseries/power.c b/arch/powerpc/platforms/pseries/power.c
-index 73e6902..e95fc15 100644
---- a/arch/powerpc/platforms/pseries/power.c
-+++ b/arch/powerpc/platforms/pseries/power.c
-@@ -28,13 +28,15 @@
- 
- unsigned long rtas_poweron_auto; /* default and normal state is 0 */
- 
--static ssize_t auto_poweron_show(struct kset *kset, char *buf)
-+static ssize_t auto_poweron_show(struct kobject *kobj,
-+				 struct kobj_attribute *attr, char *buf)
- {
-         return sprintf(buf, "%lu\n", rtas_poweron_auto);
- }
- 
--static ssize_t
--auto_poweron_store(struct kset *kset, const char *buf, size_t n)
-+static ssize_t auto_poweron_store(struct kobject *kobj,
-+				  struct kobj_attribute *attr,
-+				  const char *buf, size_t n)
- {
- 	int ret;
- 	unsigned long ups_restart;
-@@ -47,17 +49,11 @@ auto_poweron_store(struct kset *kset, const char *buf, size_t n)
- 	return -EINVAL;
- }
- 
--static struct subsys_attribute auto_poweron_attr = {
--        .attr   = {
--                .name = __stringify(auto_poweron),
--                .mode = 0644,
--        },
--        .show   = auto_poweron_show,
--        .store  = auto_poweron_store,
--};
-+static struct kobj_attribute auto_poweron_attr =
-+	__ATTR(auto_poweron, 0644, auto_poweron_show, auto_poweron_store);
- 
- #ifndef CONFIG_PM
--decl_subsys(power,NULL,NULL);
-+struct kobject *power_kobj;
- 
- static struct attribute *g[] = {
-         &auto_poweron_attr.attr,
-@@ -70,18 +66,16 @@ static struct attribute_group attr_group = {
- 
- static int __init pm_init(void)
- {
--        int error = subsystem_register(&power_subsys);
--        if (!error)
--                error = sysfs_create_group(&power_subsys.kobj, &attr_group);
--        return error;
-+	power_kobj = kobject_create_and_add("power", NULL);
-+	if (!power_kobj)
-+		return -ENOMEM;
-+	return sysfs_create_group(power_kobj, &attr_group);
- }
- core_initcall(pm_init);
- #else
--extern struct kset power_subsys;
--
- static int __init apo_pm_init(void)
- {
--	return (subsys_create_file(&power_subsys, &auto_poweron_attr));
-+	return (sysfs_create_file(power_kobj, &auto_poweron_attr));
- }
- __initcall(apo_pm_init);
- #endif
-diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
-index 73401c8..e3078ce 100644
---- a/arch/powerpc/platforms/pseries/rtasd.c
-+++ b/arch/powerpc/platforms/pseries/rtasd.c
-@@ -382,7 +382,7 @@ static void do_event_scan_all_cpus(long delay)
- {
- 	int cpu;
- 
--	lock_cpu_hotplug();
-+	get_online_cpus();
- 	cpu = first_cpu(cpu_online_map);
- 	for (;;) {
- 		set_cpus_allowed(current, cpumask_of_cpu(cpu));
-@@ -390,15 +390,15 @@ static void do_event_scan_all_cpus(long delay)
- 		set_cpus_allowed(current, CPU_MASK_ALL);
- 
- 		/* Drop hotplug lock, and sleep for the specified delay */
--		unlock_cpu_hotplug();
-+		put_online_cpus();
- 		msleep_interruptible(delay);
--		lock_cpu_hotplug();
-+		get_online_cpus();
- 
- 		cpu = next_cpu(cpu, cpu_online_map);
- 		if (cpu == NR_CPUS)
- 			break;
- 	}
--	unlock_cpu_hotplug();
-+	put_online_cpus();
- }
- 
- static int rtasd(void *unused)
-diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
-index 05a56e5..e898ff4 100644
---- a/arch/powerpc/sysdev/ipic.c
-+++ b/arch/powerpc/sysdev/ipic.c
-@@ -725,7 +725,7 @@ unsigned int ipic_get_irq(void)
- }
- 
- static struct sysdev_class ipic_sysclass = {
--	set_kset_name("ipic"),
-+	.name = "ipic",
- };
- 
- static struct sys_device device_ipic = {
-diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
-index e479388..212a94f 100644
---- a/arch/powerpc/sysdev/mpic.c
-+++ b/arch/powerpc/sysdev/mpic.c
-@@ -1584,7 +1584,7 @@ static struct sysdev_class mpic_sysclass = {
- 	.resume = mpic_resume,
- 	.suspend = mpic_suspend,
- #endif
--	set_kset_name("mpic"),
-+	.name = "mpic",
- };
- 
- static int mpic_init_sys(void)
-diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
-index 548a320..4316f5a 100644
---- a/arch/powerpc/sysdev/mv64x60_dev.c
-+++ b/arch/powerpc/sysdev/mv64x60_dev.c
-@@ -361,12 +361,6 @@ static int __init mv64x60_i2c_device_setup(struct device_node *np, int id)
- 	else
- 		pdata.timeout = 1000;	/* 1 second */
- 
--	prop = of_get_property(np, "retries", NULL);
--	if (prop)
--		pdata.retries = *prop;
--	else
--		pdata.retries = 1;
--
- 	pdev = platform_device_alloc(MV64XXX_I2C_CTLR_NAME, id);
- 	if (!pdev)
- 		return -ENOMEM;
-diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
-index e1c0fd6..f59444d 100644
---- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
-+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
-@@ -483,7 +483,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
- }
- 
- static struct sysdev_class qe_ic_sysclass = {
--	set_kset_name("qe_ic"),
-+	.name = "qe_ic",
- };
- 
- static struct sys_device device_qe_ic = {
-diff --git a/arch/ppc/8260_io/enet.c b/arch/ppc/8260_io/enet.c
-index 615b658..06bb5b7 100644
---- a/arch/ppc/8260_io/enet.c
-+++ b/arch/ppc/8260_io/enet.c
-@@ -272,7 +272,7 @@ scc_enet_timeout(struct net_device *dev)
-  * This is called from the CPM handler, not the MPC core interrupt.
-  */
- static irqreturn_t
--scc_enet_interrupt(int irq, void * dev_id)
-+scc_enet_interrupt(int irq, void *dev_id)
- {
- 	struct	net_device *dev = dev_id;
- 	volatile struct	scc_enet_private *cep;
-@@ -280,7 +280,7 @@ scc_enet_interrupt(int irq, void * dev_id)
- 	ushort	int_events;
- 	int	must_restart;
- 
--	cep = (struct scc_enet_private *)dev->priv;
-+	cep = dev->priv;
- 
- 	/* Get the interrupt events that caused us to be here.
- 	*/
-diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
-index 6f3ed6a..a3a27da 100644
---- a/arch/ppc/8260_io/fcc_enet.c
-+++ b/arch/ppc/8260_io/fcc_enet.c
-@@ -524,7 +524,7 @@ fcc_enet_timeout(struct net_device *dev)
- 
- /* The interrupt handler. */
- static irqreturn_t
--fcc_enet_interrupt(int irq, void * dev_id)
-+fcc_enet_interrupt(int irq, void *dev_id)
- {
- 	struct	net_device *dev = dev_id;
- 	volatile struct	fcc_enet_private *cep;
-@@ -532,7 +532,7 @@ fcc_enet_interrupt(int irq, void * dev_id)
- 	ushort	int_events;
- 	int	must_restart;
- 
--	cep = (struct fcc_enet_private *)dev->priv;
-+	cep = dev->priv;
- 
- 	/* Get the interrupt events that caused us to be here.
- 	*/
-diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
-index 98c1212..52b64fc 100644
---- a/arch/ppc/kernel/vmlinux.lds.S
-+++ b/arch/ppc/kernel/vmlinux.lds.S
-@@ -97,14 +97,14 @@ SECTIONS
-   __init_begin = .;
-   .init.text : {
- 	_sinittext = .;
--	*(.init.text)
-+	INIT_TEXT
- 	_einittext = .;
-   }
-   /* .exit.text is discarded at runtime, not link time,
-      to deal with references from __bug_table */
--  .exit.text : { *(.exit.text) }
-+  .exit.text : { EXIT_TEXT }
-   .init.data : {
--    *(.init.data);
-+    INIT_DATA
-     __vtop_table_begin = .;
-     *(.vtop_fixup);
-     __vtop_table_end = .;
-@@ -164,6 +164,6 @@ SECTIONS
-   /* Sections to be discarded. */
-   /DISCARD/ : {
-     *(.exitcall.exit)
--    *(.exit.data)
-+    EXIT_DATA
-   }
- }
-diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.c b/arch/ppc/platforms/83xx/mpc834x_sys.c
-index b84f8df..cb0a749 100644
---- a/arch/ppc/platforms/83xx/mpc834x_sys.c
-+++ b/arch/ppc/platforms/83xx/mpc834x_sys.c
-@@ -224,26 +224,6 @@ mpc834x_sys_init_IRQ(void)
- 	ipic_set_default_priority();
- }
- 
--#if defined(CONFIG_I2C_MPC) && defined(CONFIG_SENSORS_DS1374)
--extern ulong	ds1374_get_rtc_time(void);
--extern int	ds1374_set_rtc_time(ulong);
--
--static int __init
--mpc834x_rtc_hookup(void)
--{
--	struct timespec	tv;
--
--	ppc_md.get_rtc_time = ds1374_get_rtc_time;
--	ppc_md.set_rtc_time = ds1374_set_rtc_time;
--
--	tv.tv_nsec = 0;
--	tv.tv_sec = (ppc_md.get_rtc_time)();
--	do_settimeofday(&tv);
--
--	return 0;
--}
--late_initcall(mpc834x_rtc_hookup);
--#endif
- static __inline__ void
- mpc834x_sys_set_bat(void)
- {
-diff --git a/arch/ppc/platforms/85xx/tqm85xx.c b/arch/ppc/platforms/85xx/tqm85xx.c
-index 4ee2bd1..27ce389 100644
---- a/arch/ppc/platforms/85xx/tqm85xx.c
-+++ b/arch/ppc/platforms/85xx/tqm85xx.c
-@@ -258,27 +258,6 @@ int tqm85xx_show_cpuinfo(struct seq_file *m)
- 	return 0;
- }
- 
--#if defined(CONFIG_I2C) && defined(CONFIG_SENSORS_DS1337)
--extern ulong ds1337_get_rtc_time(void);
--extern int ds1337_set_rtc_time(unsigned long nowtime);
--
--static int __init
--tqm85xx_rtc_hookup(void)
--{
--	struct timespec	tv;
--
--        ppc_md.set_rtc_time = ds1337_set_rtc_time;
--        ppc_md.get_rtc_time = ds1337_get_rtc_time;
--
--	tv.tv_nsec = 0;
--	tv.tv_sec = (ppc_md.get_rtc_time)();
--	do_settimeofday(&tv);
--
--	return 0;
--}
--late_initcall(tqm85xx_rtc_hookup);
--#endif
--
- #ifdef CONFIG_PCI
- /*
-  * interrupt routing
-diff --git a/arch/ppc/platforms/katana.c b/arch/ppc/platforms/katana.c
-index 52f63e6..fe6e88c 100644
---- a/arch/ppc/platforms/katana.c
-+++ b/arch/ppc/platforms/katana.c
-@@ -838,27 +838,6 @@ katana_find_end_of_memory(void)
- 	return bdp->bi_memsize;
- }
- 
--#if defined(CONFIG_I2C_MV64XXX) && defined(CONFIG_SENSORS_M41T00)
--extern ulong	m41t00_get_rtc_time(void);
--extern int	m41t00_set_rtc_time(ulong);
--
--static int __init
--katana_rtc_hookup(void)
--{
--	struct timespec	tv;
--
--	ppc_md.get_rtc_time = m41t00_get_rtc_time;
--	ppc_md.set_rtc_time = m41t00_set_rtc_time;
--
--	tv.tv_nsec = 0;
--	tv.tv_sec = (ppc_md.get_rtc_time)();
--	do_settimeofday(&tv);
--
--	return 0;
--}
--late_initcall(katana_rtc_hookup);
--#endif
--
- #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE)
- static void __init
- katana_map_io(void)
-diff --git a/arch/ppc/syslib/ipic.c b/arch/ppc/syslib/ipic.c
-index 9192777..4f163e2 100644
---- a/arch/ppc/syslib/ipic.c
-+++ b/arch/ppc/syslib/ipic.c
-@@ -614,7 +614,7 @@ int ipic_get_irq(void)
- }
- 
- static struct sysdev_class ipic_sysclass = {
--	set_kset_name("ipic"),
-+	.name = "ipic",
- };
- 
- static struct sys_device device_ipic = {
-diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
-index 2744b8a..90fe904 100644
---- a/arch/ppc/syslib/mv64x60.c
-+++ b/arch/ppc/syslib/mv64x60.c
-@@ -411,7 +411,6 @@ static struct mv64xxx_i2c_pdata mv64xxx_i2c_pdata = {
- 	.freq_m			= 8,
- 	.freq_n			= 3,
- 	.timeout		= 1000, /* Default timeout of 1 second */
--	.retries		= 1,
- };
- 
- static struct resource mv64xxx_i2c_resources[] = {
-diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
-index 18ec947..da36522 100644
---- a/arch/ppc/syslib/open_pic.c
-+++ b/arch/ppc/syslib/open_pic.c
-@@ -1043,7 +1043,7 @@ int openpic_resume(struct sys_device *sysdev)
- #endif /* CONFIG_PM */
- 
- static struct sysdev_class openpic_sysclass = {
--	set_kset_name("openpic"),
-+	.name = "openpic",
- };
- 
- static struct sys_device device_openpic = {
-diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c
-index d585207..449075a 100644
---- a/arch/ppc/syslib/open_pic2.c
-+++ b/arch/ppc/syslib/open_pic2.c
-@@ -666,7 +666,7 @@ int openpic2_resume(struct sys_device *sysdev)
- 
- /* HACK ALERT */
- static struct sysdev_class openpic2_sysclass = {
--	set_kset_name("openpic2"),
-+	.name = "openpic2",
- };
- 
- static struct sys_device device_openpic2 = {
-diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
-index 1330061..6ef54d2 100644
---- a/arch/s390/Kconfig
-+++ b/arch/s390/Kconfig
-@@ -276,9 +276,6 @@ source "kernel/Kconfig.preempt"
- 
- source "mm/Kconfig"
- 
--config HOLES_IN_ZONE
--	def_bool y
--
- comment "I/O subsystem configuration"
- 
- config MACHCHK_WARNING
-diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
-deleted file mode 100644
-index d1defbb..0000000
---- a/arch/s390/crypto/Kconfig
-+++ /dev/null
-@@ -1,60 +0,0 @@
--config CRYPTO_SHA1_S390
--	tristate "SHA1 digest algorithm"
--	depends on S390
--	select CRYPTO_ALGAPI
--	help
--	  This is the s390 hardware accelerated implementation of the
--	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
--
--config CRYPTO_SHA256_S390
--	tristate "SHA256 digest algorithm"
--	depends on S390
--	select CRYPTO_ALGAPI
--	help
--	  This is the s390 hardware accelerated implementation of the
--	  SHA256 secure hash standard (DFIPS 180-2).
--
--	  This version of SHA implements a 256 bit hash with 128 bits of
--	  security against collision attacks.
--
--config CRYPTO_DES_S390
--	tristate "DES and Triple DES cipher algorithms"
--	depends on S390
--	select CRYPTO_ALGAPI
--	select CRYPTO_BLKCIPHER
--	help
--	  This us the s390 hardware accelerated implementation of the
--	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
--
--config CRYPTO_AES_S390
--	tristate "AES cipher algorithms"
--	depends on S390
--	select CRYPTO_ALGAPI
--	select CRYPTO_BLKCIPHER
--	help
--	  This is the s390 hardware accelerated implementation of the
--	  AES cipher algorithms (FIPS-197). AES uses the Rijndael
--	  algorithm.
--
--	  Rijndael appears to be consistently a very good performer in
--	  both hardware and software across a wide range of computing
--	  environments regardless of its use in feedback or non-feedback
--	  modes. Its key setup time is excellent, and its key agility is
--	  good. Rijndael's very low memory requirements make it very well
--	  suited for restricted-space environments, in which it also
--	  demonstrates excellent performance. Rijndael's operations are
--	  among the easiest to defend against power and timing attacks.
--
--	  On s390 the System z9-109 currently only supports the key size
--	  of 128 bit.
--
--config S390_PRNG
--	tristate "Pseudo random number generator device driver"
--	depends on S390
--	default "m"
--	help
--	  Select this option if you want to use the s390 pseudo random number
--	  generator. The PRNG is part of the cryptographic processor functions
--	  and uses triple-DES to generate secure random numbers like the
--	  ANSI X9.17 standard. The PRNG is usable via the char device
--	  /dev/prandom.
-diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
-index 5126696..a3f67f8 100644
---- a/arch/s390/crypto/aes_s390.c
-+++ b/arch/s390/crypto/aes_s390.c
-@@ -6,6 +6,7 @@
-  * s390 Version:
-  *   Copyright IBM Corp. 2005,2007
-  *   Author(s): Jan Glauber (jang at de.ibm.com)
-+ *		Sebastian Siewior (sebastian at breakpoint.cc> SW-Fallback
-  *
-  * Derived from "crypto/aes_generic.c"
-  *
-@@ -16,17 +17,13 @@
-  *
-  */
- 
-+#include <crypto/aes.h>
- #include <crypto/algapi.h>
-+#include <linux/err.h>
- #include <linux/module.h>
- #include <linux/init.h>
- #include "crypt_s390.h"
- 
--#define AES_MIN_KEY_SIZE	16
--#define AES_MAX_KEY_SIZE	32
--
--/* data block size for all key lengths */
--#define AES_BLOCK_SIZE		16
--
- #define AES_KEYLEN_128		1
- #define AES_KEYLEN_192		2
- #define AES_KEYLEN_256		4
-@@ -39,45 +36,89 @@ struct s390_aes_ctx {
- 	long enc;
- 	long dec;
- 	int key_len;
-+	union {
-+		struct crypto_blkcipher *blk;
-+		struct crypto_cipher *cip;
-+	} fallback;
- };
- 
--static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
--		       unsigned int key_len)
++NODE_CHECK(avoid_default_addr_size, NULL, WARN, &addr_size_cells);
++
++static void check_obsolete_chosen_interrupt_controller(struct check *c,
++						       struct node *dt)
++{
++	struct node *chosen;
++	struct property *prop;
++
++	chosen = get_node_by_path(dt, "/chosen");
++	if (!chosen)
++		return;
++
++	prop = get_property(chosen, "interrupt-controller");
++	if (prop)
++		FAIL(c, "/chosen has obsolete \"interrupt-controller\" "
++		     "property");
++}
++TREE_CHECK(obsolete_chosen_interrupt_controller, NULL, WARN);
++
++static struct check *check_table[] = {
++	&duplicate_node_names, &duplicate_property_names,
++	&name_is_string, &name_properties,
++	&explicit_phandles,
++	&phandle_references, &path_references,
++
++	&address_cells_is_cell, &size_cells_is_cell, &interrupt_cells_is_cell,
++	&device_type_is_string, &model_is_string, &status_is_string,
++
++	&addr_size_cells, &reg_format, &ranges_format,
++
++	&avoid_default_addr_size,
++	&obsolete_chosen_interrupt_controller,
++};
++
++int check_semantics(struct node *dt, int outversion, int boot_cpuid_phys);
++
++void process_checks(int force, struct boot_info *bi,
++		    int checkflag, int outversion, int boot_cpuid_phys)
++{
++	struct node *dt = bi->dt;
++	int i;
++	int error = 0;
++
++	for (i = 0; i < ARRAY_SIZE(check_table); i++) {
++		struct check *c = check_table[i];
++
++		if (c->level != IGNORE)
++			error = error || run_check(c, dt);
++	}
++
++	if (error) {
++		if (!force) {
++			fprintf(stderr, "ERROR: Input tree has errors, aborting "
++				"(use -f to force output)\n");
++			exit(2);
++		} else if (quiet < 3) {
++			fprintf(stderr, "Warning: Input tree has errors, "
++				"output forced\n");
++		}
++	}
++
++	if (checkflag) {
++		if (error) {
++			fprintf(stderr, "Warning: Skipping semantic checks due to structural errors\n");
++		} else {
++			if (!check_semantics(bi->dt, outversion,
++					     boot_cpuid_phys))
++				fprintf(stderr, "Warning: Input tree has semantic errors\n");
++		}
++	}
++}
++
 +/*
-+ * Check if the key_len is supported by the HW.
-+ * Returns 0 if it is, a positive number if it is not and software fallback is
-+ * required or a negative number in case the key size is not valid
++ * Semantic check functions
 + */
-+static int need_fallback(unsigned int key_len)
- {
--	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
--	u32 *flags = &tfm->crt_flags;
--
- 	switch (key_len) {
- 	case 16:
- 		if (!(keylen_flag & AES_KEYLEN_128))
--			goto fail;
-+			return 1;
- 		break;
- 	case 24:
- 		if (!(keylen_flag & AES_KEYLEN_192))
--			goto fail;
--
-+			return 1;
- 		break;
- 	case 32:
- 		if (!(keylen_flag & AES_KEYLEN_256))
--			goto fail;
-+			return 1;
- 		break;
- 	default:
--		goto fail;
-+		return -1;
- 		break;
- 	}
-+	return 0;
++
++#define ERRMSG(...) if (quiet < 2) fprintf(stderr, "ERROR: " __VA_ARGS__)
++#define WARNMSG(...) if (quiet < 1) fprintf(stderr, "Warning: " __VA_ARGS__)
++
++#define DO_ERR(...) do {ERRMSG(__VA_ARGS__); ok = 0; } while (0)
++
++#define CHECK_HAVE(node, propname) \
++	do { \
++		if (! (prop = get_property((node), (propname)))) \
++			DO_ERR("Missing \"%s\" property in %s\n", (propname), \
++				(node)->fullpath); \
++	} while (0);
++
++#define CHECK_HAVE_WARN(node, propname) \
++	do { \
++		if (! (prop  = get_property((node), (propname)))) \
++			WARNMSG("%s has no \"%s\" property\n", \
++				(node)->fullpath, (propname)); \
++	} while (0)
++
++#define CHECK_HAVE_STRING(node, propname) \
++	do { \
++		CHECK_HAVE((node), (propname)); \
++		if (prop && !data_is_one_string(prop->val)) \
++			DO_ERR("\"%s\" property in %s is not a string\n", \
++				(propname), (node)->fullpath); \
++	} while (0)
++
++#define CHECK_HAVE_STREQ(node, propname, value) \
++	do { \
++		CHECK_HAVE_STRING((node), (propname)); \
++		if (prop && !streq(prop->val.val, (value))) \
++			DO_ERR("%s has wrong %s, %s (should be %s\n", \
++				(node)->fullpath, (propname), \
++				prop->val.val, (value)); \
++	} while (0)
++
++#define CHECK_HAVE_ONECELL(node, propname) \
++	do { \
++		CHECK_HAVE((node), (propname)); \
++		if (prop && (prop->val.len != sizeof(cell_t))) \
++			DO_ERR("\"%s\" property in %s has wrong size %d (should be 1 cell)\n", (propname), (node)->fullpath, prop->val.len); \
++	} while (0)
++
++#define CHECK_HAVE_WARN_ONECELL(node, propname) \
++	do { \
++		CHECK_HAVE_WARN((node), (propname)); \
++		if (prop && (prop->val.len != sizeof(cell_t))) \
++			DO_ERR("\"%s\" property in %s has wrong size %d (should be 1 cell)\n", (propname), (node)->fullpath, prop->val.len); \
++	} while (0)
++
++#define CHECK_HAVE_WARN_PHANDLE(xnode, propname, root) \
++	do { \
++		struct node *ref; \
++		CHECK_HAVE_WARN_ONECELL((xnode), (propname)); \
++		if (prop) {\
++			cell_t phandle = propval_cell(prop); \
++			if ((phandle == 0) || (phandle == -1)) { \
++				DO_ERR("\"%s\" property in %s contains an invalid phandle %x\n", (propname), (xnode)->fullpath, phandle); \
++			} else { \
++				ref = get_node_by_phandle((root), propval_cell(prop)); \
++				if (! ref) \
++					DO_ERR("\"%s\" property in %s refers to non-existant phandle %x\n", (propname), (xnode)->fullpath, propval_cell(prop)); \
++			} \
++		} \
++	} while (0)
++
++#define CHECK_HAVE_WARN_STRING(node, propname) \
++	do { \
++		CHECK_HAVE_WARN((node), (propname)); \
++		if (prop && !data_is_one_string(prop->val)) \
++			DO_ERR("\"%s\" property in %s is not a string\n", \
++				(propname), (node)->fullpath); \
++	} while (0)
++
++static int check_root(struct node *root)
++{
++	struct property *prop;
++	int ok = 1;
++
++	CHECK_HAVE_STRING(root, "model");
++	CHECK_HAVE_WARN(root, "compatible");
++
++	return ok;
 +}
 +
-+static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
-+		unsigned int key_len)
++static int check_cpus(struct node *root, int outversion, int boot_cpuid_phys)
 +{
-+	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-+	int ret;
++	struct node *cpus, *cpu;
++	struct property *prop;
++	struct node *bootcpu = NULL;
++	int ok = 1;
 +
-+	sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-+	sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
-+			CRYPTO_TFM_REQ_MASK);
++	cpus = get_subnode(root, "cpus");
++	if (! cpus) {
++		ERRMSG("Missing /cpus node\n");
++		return 0;
++	}
 +
-+	ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
-+	if (ret) {
-+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
-+		tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
-+				CRYPTO_TFM_RES_MASK);
++	if (cpus->addr_cells != 1)
++		DO_ERR("%s has bad #address-cells value %d (should be 1)\n",
++		       cpus->fullpath, cpus->addr_cells);
++	if (cpus->size_cells != 0)
++		DO_ERR("%s has bad #size-cells value %d (should be 0)\n",
++		       cpus->fullpath, cpus->size_cells);
++
++	for_each_child(cpus, cpu) {
++		CHECK_HAVE_STREQ(cpu, "device_type", "cpu");
++
++		CHECK_HAVE_ONECELL(cpu, "reg");
++		if (prop) {
++			cell_t unitnum;
++			char *eptr;
++
++			unitnum = strtol(get_unitname(cpu), &eptr, 16);
++			if (*eptr) {
++				WARNMSG("%s has bad format unit name %s (should be CPU number\n",
++					cpu->fullpath, get_unitname(cpu));
++			} else if (unitnum != propval_cell(prop)) {
++				WARNMSG("%s unit name \"%s\" does not match \"reg\" property <%x>\n",
++				       cpu->fullpath, get_unitname(cpu),
++				       propval_cell(prop));
++			}
++		}
++
++/* 		CHECK_HAVE_ONECELL(cpu, "d-cache-line-size"); */
++/* 		CHECK_HAVE_ONECELL(cpu, "i-cache-line-size"); */
++		CHECK_HAVE_ONECELL(cpu, "d-cache-size");
++		CHECK_HAVE_ONECELL(cpu, "i-cache-size");
++
++		CHECK_HAVE_WARN_ONECELL(cpu, "clock-frequency");
++		CHECK_HAVE_WARN_ONECELL(cpu, "timebase-frequency");
++
++		prop = get_property(cpu, "linux,boot-cpu");
++		if (prop) {
++			if (prop->val.len)
++				WARNMSG("\"linux,boot-cpu\" property in %s is non-empty\n",
++					cpu->fullpath);
++			if (bootcpu)
++				DO_ERR("Multiple boot cpus (%s and %s)\n",
++				       bootcpu->fullpath, cpu->fullpath);
++			else
++				bootcpu = cpu;
++		}
 +	}
-+	return ret;
++
++	if (outversion < 2) {
++		if (! bootcpu)
++			WARNMSG("No cpu has \"linux,boot-cpu\" property\n");
++	} else {
++		if (bootcpu)
++			WARNMSG("\"linux,boot-cpu\" property is deprecated in blob version 2 or higher\n");
++		if (boot_cpuid_phys == 0xfeedbeef)
++			WARNMSG("physical boot CPU not set.  Use -b option to set\n");
++	}
++
++	return ok;
 +}
 +
-+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
-+		       unsigned int key_len)
++static int check_memory(struct node *root)
 +{
-+	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-+	u32 *flags = &tfm->crt_flags;
-+	int ret;
++	struct node *mem;
++	struct property *prop;
++	int nnodes = 0;
++	int ok = 1;
 +
-+	ret = need_fallback(key_len);
-+	if (ret < 0) {
-+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-+		return -EINVAL;
++	for_each_child(root, mem) {
++		if (! strneq(mem->name, "memory", mem->basenamelen))
++			continue;
++
++		nnodes++;
++
++		CHECK_HAVE_STREQ(mem, "device_type", "memory");
++		CHECK_HAVE(mem, "reg");
 +	}
- 
- 	sctx->key_len = key_len;
--	memcpy(sctx->key, in_key, key_len);
--	return 0;
--fail:
--	*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
--	return -EINVAL;
-+	if (!ret) {
-+		memcpy(sctx->key, in_key, key_len);
++
++	if (nnodes == 0) {
++		ERRMSG("No memory nodes\n");
 +		return 0;
 +	}
 +
-+	return setkey_fallback_cip(tfm, in_key, key_len);
- }
- 
- static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- {
- 	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- 
-+	if (unlikely(need_fallback(sctx->key_len))) {
-+		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
-+		return;
-+	}
++	return ok;
++}
 +
- 	switch (sctx->key_len) {
- 	case 16:
- 		crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
-@@ -98,6 +139,11 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- {
- 	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- 
-+	if (unlikely(need_fallback(sctx->key_len))) {
-+		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
-+		return;
++int check_semantics(struct node *dt, int outversion, int boot_cpuid_phys)
++{
++	int ok = 1;
++
++	ok = ok && check_root(dt);
++	ok = ok && check_cpus(dt, outversion, boot_cpuid_phys);
++	ok = ok && check_memory(dt);
++	if (! ok)
++		return 0;
++
++	return 1;
++}
+diff --git a/arch/powerpc/boot/dtc-src/data.c b/arch/powerpc/boot/dtc-src/data.c
+new file mode 100644
+index 0000000..a94718c
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/data.c
+@@ -0,0 +1,321 @@
++/*
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
++ */
++
++#include "dtc.h"
++
++void data_free(struct data d)
++{
++	struct marker *m, *nm;
++
++	m = d.markers;
++	while (m) {
++		nm = m->next;
++		free(m->ref);
++		free(m);
++		m = nm;
 +	}
 +
- 	switch (sctx->key_len) {
- 	case 16:
- 		crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
-@@ -114,6 +160,29 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- 	}
- }
- 
-+static int fallback_init_cip(struct crypto_tfm *tfm)
++	assert(!d.val || d.asize);
++
++	if (d.val)
++		free(d.val);
++}
++
++struct data data_grow_for(struct data d, int xlen)
 +{
-+	const char *name = tfm->__crt_alg->cra_name;
-+	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	struct data nd;
++	int newsize;
 +
-+	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
-+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
++	/* we must start with an allocated datum */
++	assert(!d.val || d.asize);
 +
-+	if (IS_ERR(sctx->fallback.cip)) {
-+		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
-+		return PTR_ERR(sctx->fallback.blk);
++	if (xlen == 0)
++		return d;
++
++	nd = d;
++
++	newsize = xlen;
++
++	while ((d.len + xlen) > newsize)
++		newsize *= 2;
++
++	nd.asize = newsize;
++	nd.val = xrealloc(d.val, newsize);
++
++	assert(nd.asize >= (d.len + xlen));
++
++	return nd;
++}
++
++struct data data_copy_mem(const char *mem, int len)
++{
++	struct data d;
++
++	d = data_grow_for(empty_data, len);
++
++	d.len = len;
++	memcpy(d.val, mem, len);
++
++	return d;
++}
++
++static char get_oct_char(const char *s, int *i)
++{
++	char x[4];
++	char *endx;
++	long val;
++
++	x[3] = '\0';
++	x[0] = s[(*i)];
++	if (x[0]) {
++		x[1] = s[(*i)+1];
++		if (x[1])
++			x[2] = s[(*i)+2];
 +	}
 +
-+	return 0;
++	val = strtol(x, &endx, 8);
++	if ((endx - x) == 0)
++		fprintf(stderr, "Empty \\nnn escape\n");
++
++	(*i) += endx - x;
++	return val;
 +}
 +
-+static void fallback_exit_cip(struct crypto_tfm *tfm)
++static char get_hex_char(const char *s, int *i)
 +{
-+	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	char x[3];
++	char *endx;
++	long val;
++
++	x[2] = '\0';
++	x[0] = s[(*i)];
++	if (x[0])
++		x[1] = s[(*i)+1];
++
++	val = strtol(x, &endx, 16);
++	if ((endx - x) == 0)
++		fprintf(stderr, "Empty \\x escape\n");
 +
-+	crypto_free_cipher(sctx->fallback.cip);
-+	sctx->fallback.cip = NULL;
++	(*i) += endx - x;
++	return val;
 +}
- 
- static struct crypto_alg aes_alg = {
- 	.cra_name		=	"aes",
-@@ -125,6 +194,8 @@ static struct crypto_alg aes_alg = {
- 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
- 	.cra_module		=	THIS_MODULE,
- 	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
-+	.cra_init               =       fallback_init_cip,
-+	.cra_exit               =       fallback_exit_cip,
- 	.cra_u			=	{
- 		.cipher = {
- 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
-@@ -136,10 +207,70 @@ static struct crypto_alg aes_alg = {
- 	}
- };
- 
-+static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
-+		unsigned int len)
++
++struct data data_copy_escape_string(const char *s, int len)
 +{
-+	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-+	unsigned int ret;
++	int i = 0;
++	struct data d;
++	char *q;
 +
-+	sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-+	sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
-+			CRYPTO_TFM_REQ_MASK);
++	d = data_grow_for(empty_data, strlen(s)+1);
 +
-+	ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
-+	if (ret) {
-+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
-+		tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
-+				CRYPTO_TFM_RES_MASK);
++	q = d.val;
++	while (i < len) {
++		char c = s[i++];
++
++		if (c != '\\') {
++			q[d.len++] = c;
++			continue;
++		}
++
++		c = s[i++];
++		assert(c);
++		switch (c) {
++		case 'a':
++			q[d.len++] = '\a';
++			break;
++		case 'b':
++			q[d.len++] = '\b';
++			break;
++		case 't':
++			q[d.len++] = '\t';
++			break;
++		case 'n':
++			q[d.len++] = '\n';
++			break;
++		case 'v':
++			q[d.len++] = '\v';
++			break;
++		case 'f':
++			q[d.len++] = '\f';
++			break;
++		case 'r':
++			q[d.len++] = '\r';
++			break;
++		case '0':
++		case '1':
++		case '2':
++		case '3':
++		case '4':
++		case '5':
++		case '6':
++		case '7':
++			i--; /* need to re-read the first digit as
++			      * part of the octal value */
++			q[d.len++] = get_oct_char(s, &i);
++			break;
++		case 'x':
++			q[d.len++] = get_hex_char(s, &i);
++			break;
++		default:
++			q[d.len++] = c;
++		}
 +	}
-+	return ret;
++
++	q[d.len++] = '\0';
++	return d;
 +}
 +
-+static int fallback_blk_dec(struct blkcipher_desc *desc,
-+		struct scatterlist *dst, struct scatterlist *src,
-+		unsigned int nbytes)
++struct data data_copy_file(FILE *f, size_t len)
 +{
-+	unsigned int ret;
-+	struct crypto_blkcipher *tfm;
-+	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
++	struct data d;
 +
-+	tfm = desc->tfm;
-+	desc->tfm = sctx->fallback.blk;
++	d = data_grow_for(empty_data, len);
 +
-+	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
++	d.len = len;
++	fread(d.val, len, 1, f);
 +
-+	desc->tfm = tfm;
-+	return ret;
++	return d;
 +}
 +
-+static int fallback_blk_enc(struct blkcipher_desc *desc,
-+		struct scatterlist *dst, struct scatterlist *src,
-+		unsigned int nbytes)
++struct data data_append_data(struct data d, const void *p, int len)
 +{
-+	unsigned int ret;
-+	struct crypto_blkcipher *tfm;
-+	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
++	d = data_grow_for(d, len);
++	memcpy(d.val + d.len, p, len);
++	d.len += len;
++	return d;
++}
 +
-+	tfm = desc->tfm;
-+	desc->tfm = sctx->fallback.blk;
++struct data data_insert_at_marker(struct data d, struct marker *m,
++				  const void *p, int len)
++{
++	d = data_grow_for(d, len);
++	memmove(d.val + m->offset + len, d.val + m->offset, d.len - m->offset);
++	memcpy(d.val + m->offset, p, len);
++	d.len += len;
++
++	/* Adjust all markers after the one we're inserting at */
++	m = m->next;
++	for_each_marker(m)
++		m->offset += len;
++	return d;
++}
 +
-+	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
++struct data data_append_markers(struct data d, struct marker *m)
++{
++	struct marker **mp = &d.markers;
 +
-+	desc->tfm = tfm;
-+	return ret;
++	/* Find the end of the markerlist */
++	while (*mp)
++		mp = &((*mp)->next);
++	*mp = m;
++	return d;
 +}
 +
- static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- 			   unsigned int key_len)
- {
- 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-+	int ret;
++struct data data_merge(struct data d1, struct data d2)
++{
++	struct data d;
++	struct marker *m2 = d2.markers;
 +
-+	ret = need_fallback(key_len);
-+	if (ret > 0) {
-+		sctx->key_len = key_len;
-+		return setkey_fallback_blk(tfm, in_key, key_len);
-+	}
- 
- 	switch (key_len) {
- 	case 16:
-@@ -188,6 +319,9 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
- 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- 	struct blkcipher_walk walk;
- 
-+	if (unlikely(need_fallback(sctx->key_len)))
-+		return fallback_blk_enc(desc, dst, src, nbytes);
++	d = data_append_markers(data_append_data(d1, d2.val, d2.len), m2);
 +
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
- }
-@@ -199,10 +333,37 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
- 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- 	struct blkcipher_walk walk;
- 
-+	if (unlikely(need_fallback(sctx->key_len)))
-+		return fallback_blk_dec(desc, dst, src, nbytes);
++	/* Adjust for the length of d1 */
++	for_each_marker(m2)
++		m2->offset += d1.len;
 +
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
- }
- 
-+static int fallback_init_blk(struct crypto_tfm *tfm)
++	d2.markers = NULL; /* So data_free() doesn't clobber them */
++	data_free(d2);
++
++	return d;
++}
++
++struct data data_append_cell(struct data d, cell_t word)
 +{
-+	const char *name = tfm->__crt_alg->cra_name;
-+	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	cell_t beword = cpu_to_be32(word);
 +
-+	sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
-+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
++	return data_append_data(d, &beword, sizeof(beword));
++}
 +
-+	if (IS_ERR(sctx->fallback.blk)) {
-+		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
-+		return PTR_ERR(sctx->fallback.blk);
-+	}
++struct data data_append_re(struct data d, const struct fdt_reserve_entry *re)
++{
++	struct fdt_reserve_entry bere;
 +
-+	return 0;
++	bere.address = cpu_to_be64(re->address);
++	bere.size = cpu_to_be64(re->size);
++
++	return data_append_data(d, &bere, sizeof(bere));
 +}
 +
-+static void fallback_exit_blk(struct crypto_tfm *tfm)
++struct data data_append_addr(struct data d, u64 addr)
 +{
-+	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	u64 beaddr = cpu_to_be64(addr);
 +
-+	crypto_free_blkcipher(sctx->fallback.blk);
-+	sctx->fallback.blk = NULL;
++	return data_append_data(d, &beaddr, sizeof(beaddr));
 +}
 +
- static struct crypto_alg ecb_aes_alg = {
- 	.cra_name		=	"ecb(aes)",
- 	.cra_driver_name	=	"ecb-aes-s390",
-@@ -214,6 +375,8 @@ static struct crypto_alg ecb_aes_alg = {
- 	.cra_type		=	&crypto_blkcipher_type,
- 	.cra_module		=	THIS_MODULE,
- 	.cra_list		=	LIST_HEAD_INIT(ecb_aes_alg.cra_list),
-+	.cra_init		=	fallback_init_blk,
-+	.cra_exit		=	fallback_exit_blk,
- 	.cra_u			=	{
- 		.blkcipher = {
- 			.min_keysize		=	AES_MIN_KEY_SIZE,
-@@ -229,6 +392,13 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- 			   unsigned int key_len)
- {
- 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-+	int ret;
++struct data data_append_byte(struct data d, uint8_t byte)
++{
++	return data_append_data(d, &byte, 1);
++}
 +
-+	ret = need_fallback(key_len);
-+	if (ret > 0) {
-+		sctx->key_len = key_len;
-+		return setkey_fallback_blk(tfm, in_key, key_len);
-+	}
- 
- 	switch (key_len) {
- 	case 16:
-@@ -283,6 +453,9 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
- 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- 	struct blkcipher_walk walk;
- 
-+	if (unlikely(need_fallback(sctx->key_len)))
-+		return fallback_blk_enc(desc, dst, src, nbytes);
++struct data data_append_zeroes(struct data d, int len)
++{
++	d = data_grow_for(d, len);
 +
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
- }
-@@ -294,6 +467,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
- 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- 	struct blkcipher_walk walk;
- 
-+	if (unlikely(need_fallback(sctx->key_len)))
-+		return fallback_blk_dec(desc, dst, src, nbytes);
++	memset(d.val + d.len, 0, len);
++	d.len += len;
++	return d;
++}
 +
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
- }
-@@ -309,6 +485,8 @@ static struct crypto_alg cbc_aes_alg = {
- 	.cra_type		=	&crypto_blkcipher_type,
- 	.cra_module		=	THIS_MODULE,
- 	.cra_list		=	LIST_HEAD_INIT(cbc_aes_alg.cra_list),
-+	.cra_init		=	fallback_init_blk,
-+	.cra_exit		=	fallback_exit_blk,
- 	.cra_u			=	{
- 		.blkcipher = {
- 			.min_keysize		=	AES_MIN_KEY_SIZE,
-@@ -336,14 +514,10 @@ static int __init aes_init(void)
- 		return -EOPNOTSUPP;
- 
- 	/* z9 109 and z9 BC/EC only support 128 bit key length */
--	if (keylen_flag == AES_KEYLEN_128) {
--		aes_alg.cra_u.cipher.cia_max_keysize = AES_MIN_KEY_SIZE;
--		ecb_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
--		cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
-+	if (keylen_flag == AES_KEYLEN_128)
- 		printk(KERN_INFO
--		       "aes_s390: hardware acceleration only available for"
-+		       "aes_s390: hardware acceleration only available for "
- 		       "128 bit keys\n");
--	}
- 
- 	ret = crypto_register_alg(&aes_alg);
- 	if (ret)
-@@ -382,4 +556,3 @@ MODULE_ALIAS("aes");
- 
- MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
- MODULE_LICENSE("GPL");
--
-diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
-index 8eb3a1a..0cfefdd 100644
---- a/arch/s390/crypto/prng.c
-+++ b/arch/s390/crypto/prng.c
-@@ -90,7 +90,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
- 	int ret = 0;
- 	int tmp;
- 
--	/* nbytes can be arbitrary long, we spilt it into chunks */
-+	/* nbytes can be arbitrary length, we split it into chunks */
- 	while (nbytes) {
- 		/* same as in extract_entropy_user in random.c */
- 		if (need_resched()) {
-@@ -146,7 +146,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
- 	return ret;
- }
- 
--static struct file_operations prng_fops = {
-+static const struct file_operations prng_fops = {
- 	.owner		= THIS_MODULE,
- 	.open		= &prng_open,
- 	.release	= NULL,
-diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
-index 5245717..4b010ff 100644
---- a/arch/s390/hypfs/inode.c
-+++ b/arch/s390/hypfs/inode.c
-@@ -490,7 +490,7 @@ static struct super_operations hypfs_s_ops = {
- 	.show_options	= hypfs_show_options,
- };
- 
--static decl_subsys(s390, NULL, NULL);
-+static struct kobject *s390_kobj;
- 
- static int __init hypfs_init(void)
- {
-@@ -506,17 +506,18 @@ static int __init hypfs_init(void)
- 			goto fail_diag;
- 		}
- 	}
--	kobj_set_kset_s(&s390_subsys, hypervisor_subsys);
--	rc = subsystem_register(&s390_subsys);
--	if (rc)
-+	s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
-+	if (!s390_kobj) {
-+		rc = -ENOMEM;;
- 		goto fail_sysfs;
-+	}
- 	rc = register_filesystem(&hypfs_type);
- 	if (rc)
- 		goto fail_filesystem;
- 	return 0;
- 
- fail_filesystem:
--	subsystem_unregister(&s390_subsys);
-+	kobject_put(s390_kobj);
- fail_sysfs:
- 	if (!MACHINE_IS_VM)
- 		hypfs_diag_exit();
-@@ -530,7 +531,7 @@ static void __exit hypfs_exit(void)
- 	if (!MACHINE_IS_VM)
- 		hypfs_diag_exit();
- 	unregister_filesystem(&hypfs_type);
--	subsystem_unregister(&s390_subsys);
-+	kobject_put(s390_kobj);
- }
- 
- module_init(hypfs_init)
-diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
-index 56cb710..b3b650a 100644
---- a/arch/s390/kernel/Makefile
-+++ b/arch/s390/kernel/Makefile
-@@ -31,7 +31,3 @@ S390_KEXEC_OBJS := machine_kexec.o crash.o
- S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
- obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
- 
--#
--# This is just to get the dependencies...
--#
--binfmt_elf32.o:	$(TOPDIR)/fs/binfmt_elf.c
-diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
-index 1b3af7d..9f7b73b 100644
---- a/arch/s390/kernel/early.c
-+++ b/arch/s390/kernel/early.c
-@@ -276,7 +276,7 @@ void __init startup_init(void)
- 	create_kernel_nss();
- 	sort_main_extable();
- 	setup_lowcore_early();
--	sclp_readinfo_early();
-+	sclp_read_info_early();
- 	sclp_facilities_detect();
- 	memsize = sclp_memory_detect();
- #ifndef CONFIG_64BIT
-diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
-index a87b197..79dccd2 100644
---- a/arch/s390/kernel/head64.S
-+++ b/arch/s390/kernel/head64.S
-@@ -157,7 +157,7 @@ startup_continue:
- 	.long	0xb2b10000		# store facility list
- 	tm	0xc8,0x08		# check bit for clearing-by-ASCE
- 	bno	0f-.LPG1(%r13)
--	lhi	%r1,2094
-+	lhi	%r1,2048
- 	lhi	%r2,0
- 	.long	0xb98e2001
- 	oi	7(%r12),0x80		# set IDTE flag
-diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
-index ce0856d..db28cca 100644
---- a/arch/s390/kernel/ipl.c
-+++ b/arch/s390/kernel/ipl.c
-@@ -2,7 +2,7 @@
-  *  arch/s390/kernel/ipl.c
-  *    ipl/reipl/dump support for Linux on s390.
-  *
-- *    Copyright (C) IBM Corp. 2005,2006
-+ *    Copyright IBM Corp. 2005,2007
-  *    Author(s): Michael Holzheu <holzheu at de.ibm.com>
-  *		 Heiko Carstens <heiko.carstens at de.ibm.com>
-  *		 Volker Sameske <sameske at de.ibm.com>
-@@ -31,6 +31,43 @@
- #define IPL_FCP_DUMP_STR	"fcp_dump"
- #define IPL_NSS_STR		"nss"
- 
-+#define DUMP_CCW_STR		"ccw"
-+#define DUMP_FCP_STR		"fcp"
-+#define DUMP_NONE_STR		"none"
++struct data data_append_align(struct data d, int align)
++{
++	int newlen = ALIGN(d.len, align);
++	return data_append_zeroes(d, newlen - d.len);
++}
++
++struct data data_add_marker(struct data d, enum markertype type, char *ref)
++{
++	struct marker *m;
++
++	m = xmalloc(sizeof(*m));
++	m->offset = d.len;
++	m->type = type;
++	m->ref = ref;
++	m->next = NULL;
++
++	return data_append_markers(d, m);
++}
++
++int data_is_one_string(struct data d)
++{
++	int i;
++	int len = d.len;
++
++	if (len == 0)
++		return 0;
++
++	for (i = 0; i < len-1; i++)
++		if (d.val[i] == '\0')
++			return 0;
++
++	if (d.val[len-1] != '\0')
++		return 0;
 +
++	return 1;
++}
+diff --git a/arch/powerpc/boot/dtc-src/dtc-lexer.l b/arch/powerpc/boot/dtc-src/dtc-lexer.l
+new file mode 100644
+index 0000000..c811b22
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/dtc-lexer.l
+@@ -0,0 +1,328 @@
 +/*
-+ * Four shutdown trigger types are supported:
-+ * - panic
-+ * - halt
-+ * - power off
-+ * - reipl
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
 + */
-+#define ON_PANIC_STR		"on_panic"
-+#define ON_HALT_STR		"on_halt"
-+#define ON_POFF_STR		"on_poff"
-+#define ON_REIPL_STR		"on_reboot"
 +
-+struct shutdown_action;
-+struct shutdown_trigger {
-+	char *name;
-+	struct shutdown_action *action;
-+};
++%option noyywrap nounput yylineno
++
++%x INCLUDE
++%x BYTESTRING
++%x PROPNODENAME
++%s V1
++
++PROPNODECHAR	[a-zA-Z0-9,._+*#?@-]
++PATHCHAR	({PROPNODECHAR}|[/])
++LABEL		[a-zA-Z_][a-zA-Z0-9_]*
++
++%{
++#include "dtc.h"
++#include "srcpos.h"
++#include "dtc-parser.tab.h"
++
++
++/*#define LEXDEBUG	1*/
++
++#ifdef LEXDEBUG
++#define DPRINT(fmt, ...)	fprintf(stderr, fmt, ##__VA_ARGS__)
++#else
++#define DPRINT(fmt, ...)	do { } while (0)
++#endif
++
++static int dts_version; /* = 0 */
++
++#define BEGIN_DEFAULT()	if (dts_version == 0) { \
++				DPRINT("<INITIAL>\n"); \
++				BEGIN(INITIAL); \
++			} else { \
++				DPRINT("<V1>\n"); \
++				BEGIN(V1); \
++			}
++%}
++
++%%
++<*>"/include/"		BEGIN(INCLUDE);
++
++<INCLUDE>\"[^"\n]*\"	{
++			yytext[strlen(yytext) - 1] = 0;
++			if (!push_input_file(yytext + 1)) {
++				/* Some unrecoverable error.*/
++				exit(1);
++			}
++			BEGIN_DEFAULT();
++		}
++
++
++<*><<EOF>>		{
++			if (!pop_input_file()) {
++				yyterminate();
++			}
++		}
++
++<*>\"([^\\"]|\\.)*\"	{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("String: %s\n", yytext);
++			yylval.data = data_copy_escape_string(yytext+1,
++					yyleng-2);
++			yylloc.first_line = yylineno;
++			return DT_STRING;
++		}
++
++<*>"/dts-v1/"	{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Keyword: /dts-v1/\n");
++			dts_version = 1;
++			BEGIN_DEFAULT();
++			return DT_V1;
++		}
++
++<*>"/memreserve/"	{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Keyword: /memreserve/\n");
++			BEGIN_DEFAULT();
++			return DT_MEMRESERVE;
++		}
++
++<*>{LABEL}:	{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Label: %s\n", yytext);
++			yylval.labelref = strdup(yytext);
++			yylval.labelref[yyleng-1] = '\0';
++			return DT_LABEL;
++		}
++
++<INITIAL>[bodh]# {
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			if (*yytext == 'b')
++				yylval.cbase = 2;
++			else if (*yytext == 'o')
++				yylval.cbase = 8;
++			else if (*yytext == 'd')
++				yylval.cbase = 10;
++			else
++				yylval.cbase = 16;
++			DPRINT("Base: %d\n", yylval.cbase);
++			return DT_BASE;
++		}
++
++<INITIAL>[0-9a-fA-F]+	{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			yylval.literal = strdup(yytext);
++			DPRINT("Literal: '%s'\n", yylval.literal);
++			return DT_LEGACYLITERAL;
++		}
++
++<V1>[0-9]+|0[xX][0-9a-fA-F]+      {
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			yylval.literal = strdup(yytext);
++			DPRINT("Literal: '%s'\n", yylval.literal);
++			return DT_LITERAL;
++		}
++
++\&{LABEL}	{	/* label reference */
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Ref: %s\n", yytext+1);
++			yylval.labelref = strdup(yytext+1);
++			return DT_REF;
++		}
++
++"&{/"{PATHCHAR}+\}	{	/* new-style path reference */
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			yytext[yyleng-1] = '\0';
++			DPRINT("Ref: %s\n", yytext+2);
++			yylval.labelref = strdup(yytext+2);
++			return DT_REF;
++		}
++
++<INITIAL>"&/"{PATHCHAR}+ {	/* old-style path reference */
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Ref: %s\n", yytext+1);
++			yylval.labelref = strdup(yytext+1);
++			return DT_REF;
++		}
++
++<BYTESTRING>[0-9a-fA-F]{2} {
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			yylval.byte = strtol(yytext, NULL, 16);
++			DPRINT("Byte: %02x\n", (int)yylval.byte);
++			return DT_BYTE;
++		}
++
++<BYTESTRING>"]"	{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("/BYTESTRING\n");
++			BEGIN_DEFAULT();
++			return ']';
++		}
++
++<PROPNODENAME>{PROPNODECHAR}+ {
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("PropNodeName: %s\n", yytext);
++			yylval.propnodename = strdup(yytext);
++			BEGIN_DEFAULT();
++			return DT_PROPNODENAME;
++		}
++
++
++<*>[[:space:]]+	/* eat whitespace */
++
++<*>"/*"([^*]|\*+[^*/])*\*+"/"	{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Comment: %s\n", yytext);
++			/* eat comments */
++		}
++
++<*>"//".*\n	/* eat line comments */
++
++<*>.		{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Char: %c (\\x%02x)\n", yytext[0],
++				(unsigned)yytext[0]);
++			if (yytext[0] == '[') {
++				DPRINT("<BYTESTRING>\n");
++				BEGIN(BYTESTRING);
++			}
++			if ((yytext[0] == '{')
++			    || (yytext[0] == ';')) {
++				DPRINT("<PROPNODENAME>\n");
++				BEGIN(PROPNODENAME);
++			}
++			return yytext[0];
++		}
++
++%%
++
 +
 +/*
-+ * Five shutdown action types are supported:
++ * Stack of nested include file contexts.
 + */
-+#define SHUTDOWN_ACTION_IPL_STR		"ipl"
-+#define SHUTDOWN_ACTION_REIPL_STR	"reipl"
-+#define SHUTDOWN_ACTION_DUMP_STR	"dump"
-+#define SHUTDOWN_ACTION_VMCMD_STR	"vmcmd"
-+#define SHUTDOWN_ACTION_STOP_STR	"stop"
 +
-+struct shutdown_action {
-+	char *name;
-+	void (*fn) (struct shutdown_trigger *trigger);
-+	int (*init) (void);
++struct incl_file {
++	int filenum;
++	FILE *file;
++	YY_BUFFER_STATE yy_prev_buf;
++	int yy_prev_lineno;
++	struct incl_file *prev;
 +};
 +
- static char *ipl_type_str(enum ipl_type type)
- {
- 	switch (type) {
-@@ -54,10 +91,6 @@ enum dump_type {
- 	DUMP_TYPE_FCP	= 4,
- };
- 
--#define DUMP_NONE_STR	 "none"
--#define DUMP_CCW_STR	 "ccw"
--#define DUMP_FCP_STR	 "fcp"
--
- static char *dump_type_str(enum dump_type type)
- {
- 	switch (type) {
-@@ -99,30 +132,6 @@ enum dump_method {
- 	DUMP_METHOD_FCP_DIAG,
- };
- 
--enum shutdown_action {
--	SHUTDOWN_REIPL,
--	SHUTDOWN_DUMP,
--	SHUTDOWN_STOP,
--};
--
--#define SHUTDOWN_REIPL_STR "reipl"
--#define SHUTDOWN_DUMP_STR  "dump"
--#define SHUTDOWN_STOP_STR  "stop"
--
--static char *shutdown_action_str(enum shutdown_action action)
--{
--	switch (action) {
--	case SHUTDOWN_REIPL:
--		return SHUTDOWN_REIPL_STR;
--	case SHUTDOWN_DUMP:
--		return SHUTDOWN_DUMP_STR;
--	case SHUTDOWN_STOP:
--		return SHUTDOWN_STOP_STR;
--	default:
--		return NULL;
--	}
--}
--
- static int diag308_set_works = 0;
- 
- static int reipl_capabilities = IPL_TYPE_UNKNOWN;
-@@ -140,8 +149,6 @@ static enum dump_method dump_method = DUMP_METHOD_NONE;
- static struct ipl_parameter_block *dump_block_fcp;
- static struct ipl_parameter_block *dump_block_ccw;
- 
--static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
--
- static struct sclp_ipl_info sclp_ipl_info;
- 
- int diag308(unsigned long subcode, void *addr)
-@@ -162,22 +169,25 @@ EXPORT_SYMBOL_GPL(diag308);
- /* SYSFS */
- 
- #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value)		\
--static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset,	\
-+static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj,	\
-+		struct kobj_attribute *attr,				\
- 		char *page)						\
- {									\
- 	return sprintf(page, _format, _value);				\
- }									\
--static struct subsys_attribute sys_##_prefix##_##_name##_attr =		\
-+static struct kobj_attribute sys_##_prefix##_##_name##_attr =		\
- 	__ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
- 
- #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)	\
--static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset,	\
-+static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj,	\
-+		struct kobj_attribute *attr,				\
- 		char *page)						\
- {									\
- 	return sprintf(page, _fmt_out,					\
- 			(unsigned long long) _value);			\
- }									\
--static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset,	\
-+static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj,	\
-+		struct kobj_attribute *attr,				\
- 		const char *buf, size_t len)				\
- {									\
- 	unsigned long long value;					\
-@@ -186,25 +196,27 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset,	\
- 	_value = value;							\
- 	return len;							\
- }									\
--static struct subsys_attribute sys_##_prefix##_##_name##_attr =		\
-+static struct kobj_attribute sys_##_prefix##_##_name##_attr =		\
- 	__ATTR(_name,(S_IRUGO | S_IWUSR),				\
- 			sys_##_prefix##_##_name##_show,			\
- 			sys_##_prefix##_##_name##_store);
- 
- #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
--static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset,	\
-+static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj,	\
-+		struct kobj_attribute *attr,				\
- 		char *page)						\
- {									\
- 	return sprintf(page, _fmt_out, _value);				\
- }									\
--static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset,	\
-+static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj,	\
-+		struct kobj_attribute *attr,				\
- 		const char *buf, size_t len)				\
- {									\
--	if (sscanf(buf, _fmt_in, _value) != 1)				\
--		return -EINVAL;						\
-+	strncpy(_value, buf, sizeof(_value) - 1);			\
-+	strstrip(_value);						\
- 	return len;							\
- }									\
--static struct subsys_attribute sys_##_prefix##_##_name##_attr =		\
-+static struct kobj_attribute sys_##_prefix##_##_name##_attr =		\
- 	__ATTR(_name,(S_IRUGO | S_IWUSR),				\
- 			sys_##_prefix##_##_name##_show,			\
- 			sys_##_prefix##_##_name##_store);
-@@ -240,44 +252,19 @@ static __init enum ipl_type get_ipl_type(void)
- 	return IPL_TYPE_FCP;
- }
- 
--void __init setup_ipl_info(void)
--{
--	ipl_info.type = get_ipl_type();
--	switch (ipl_info.type) {
--	case IPL_TYPE_CCW:
--		ipl_info.data.ccw.dev_id.devno = ipl_devno;
--		ipl_info.data.ccw.dev_id.ssid = 0;
--		break;
--	case IPL_TYPE_FCP:
--	case IPL_TYPE_FCP_DUMP:
--		ipl_info.data.fcp.dev_id.devno =
--			IPL_PARMBLOCK_START->ipl_info.fcp.devno;
--		ipl_info.data.fcp.dev_id.ssid = 0;
--		ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
--		ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
--		break;
--	case IPL_TYPE_NSS:
--		strncpy(ipl_info.data.nss.name, kernel_nss_name,
--			sizeof(ipl_info.data.nss.name));
--		break;
--	case IPL_TYPE_UNKNOWN:
--	default:
--		/* We have no info to copy */
--		break;
--	}
--}
--
- struct ipl_info ipl_info;
- EXPORT_SYMBOL_GPL(ipl_info);
- 
--static ssize_t ipl_type_show(struct kset *kset, char *page)
-+static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
-+			     char *page)
- {
- 	return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
- }
- 
--static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
-+static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
- 
--static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
-+static ssize_t sys_ipl_device_show(struct kobject *kobj,
-+				   struct kobj_attribute *attr, char *page)
- {
- 	struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
- 
-@@ -292,7 +279,7 @@ static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
- 	}
- }
- 
--static struct subsys_attribute sys_ipl_device_attr =
-+static struct kobj_attribute sys_ipl_device_attr =
- 	__ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
- 
- static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
-@@ -367,7 +354,8 @@ static struct attribute_group ipl_fcp_attr_group = {
- 
- /* CCW ipl device attributes */
- 
--static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
-+static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
-+				     struct kobj_attribute *attr, char *page)
- {
- 	char loadparm[LOADPARM_LEN + 1] = {};
- 
-@@ -379,7 +367,7 @@ static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
- 	return sprintf(page, "%s\n", loadparm);
- }
- 
--static struct subsys_attribute sys_ipl_ccw_loadparm_attr =
-+static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
- 	__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
- 
- static struct attribute *ipl_ccw_attrs[] = {
-@@ -418,10 +406,76 @@ static struct attribute_group ipl_unknown_attr_group = {
- 	.attrs = ipl_unknown_attrs,
- };
- 
--static decl_subsys(ipl, NULL, NULL);
-+static struct kset *ipl_kset;
++struct incl_file *incl_file_stack;
 +
-+static int __init ipl_register_fcp_files(void)
++
++/*
++ * Detect infinite include recursion.
++ */
++#define MAX_INCLUDE_DEPTH	(100)
++
++static int incl_depth = 0;
++
++
++int push_input_file(const char *filename)
 +{
-+	int rc;
++	FILE *f;
++	struct incl_file *incl_file;
 +
-+	rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
-+	if (rc)
-+		goto out;
-+	rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
-+	if (rc)
-+		goto out_ipl_parm;
-+	rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
-+	if (!rc)
-+		goto out;
++	if (!filename) {
++		yyerror("No include file name given.");
++		return 0;
++	}
 +
-+	sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
++	if (incl_depth++ >= MAX_INCLUDE_DEPTH) {
++		yyerror("Includes nested too deeply");
++		return 0;
++	}
 +
-+out_ipl_parm:
-+	sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
-+out:
-+	return rc;
++	f = dtc_open_file(filename);
++
++	incl_file = malloc(sizeof(struct incl_file));
++	if (!incl_file) {
++		yyerror("Can not allocate include file space.");
++		return 0;
++	}
++
++	/*
++	 * Save current context.
++	 */
++	incl_file->yy_prev_buf = YY_CURRENT_BUFFER;
++	incl_file->yy_prev_lineno = yylineno;
++	incl_file->filenum = srcpos_filenum;
++	incl_file->file = yyin;
++	incl_file->prev = incl_file_stack;
++
++	incl_file_stack = incl_file;
++
++	/*
++	 * Establish new context.
++	 */
++	srcpos_filenum = lookup_file_name(filename, 0);
++	yylineno = 1;
++	yyin = f;
++	yy_switch_to_buffer(yy_create_buffer(yyin, YY_BUF_SIZE));
++
++	return 1;
 +}
 +
-+static void ipl_run(struct shutdown_trigger *trigger)
++
++int pop_input_file(void)
 +{
-+	diag308(DIAG308_IPL, NULL);
-+	if (MACHINE_IS_VM)
-+		__cpcmd("IPL", NULL, 0, NULL);
-+	else if (ipl_info.type == IPL_TYPE_CCW)
-+		reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
++	struct incl_file *incl_file;
++
++	if (incl_file_stack == 0)
++		return 0;
++
++	fclose(yyin);
++
++	/*
++	 * Pop.
++	 */
++	--incl_depth;
++	incl_file = incl_file_stack;
++	incl_file_stack = incl_file->prev;
++
++	/*
++	 * Recover old context.
++	 */
++	yy_delete_buffer(YY_CURRENT_BUFFER);
++	yy_switch_to_buffer(incl_file->yy_prev_buf);
++	yylineno = incl_file->yy_prev_lineno;
++	srcpos_filenum = incl_file->filenum;
++	yyin = incl_file->file;
++
++	/*
++	 * Free old state.
++	 */
++	free(incl_file);
++
++	if (YY_CURRENT_BUFFER == 0)
++		return 0;
++
++	return 1;
 +}
+diff --git a/arch/powerpc/boot/dtc-src/dtc-lexer.lex.c_shipped b/arch/powerpc/boot/dtc-src/dtc-lexer.lex.c_shipped
+new file mode 100644
+index 0000000..d0f7424
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/dtc-lexer.lex.c_shipped
+@@ -0,0 +1,2174 @@
++#line 2 "dtc-lexer.lex.c"
 +
-+static int ipl_init(void)
-+{
-+	int rc;
++#line 4 "dtc-lexer.lex.c"
 +
-+	ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
-+	if (!ipl_kset) {
-+		rc = -ENOMEM;
-+		goto out;
++#define  YY_INT_ALIGNED short int
++
++/* A lexical scanner generated by flex */
++
++#define FLEX_SCANNER
++#define YY_FLEX_MAJOR_VERSION 2
++#define YY_FLEX_MINOR_VERSION 5
++#define YY_FLEX_SUBMINOR_VERSION 33
++#if YY_FLEX_SUBMINOR_VERSION > 0
++#define FLEX_BETA
++#endif
++
++/* First, we deal with  platform-specific or compiler-specific issues. */
++
++/* begin standard C headers. */
++#include <stdio.h>
++#include <string.h>
++#include <errno.h>
++#include <stdlib.h>
++
++/* end standard C headers. */
++
++/* flex integer type definitions */
++
++#ifndef FLEXINT_H
++#define FLEXINT_H
++
++/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
++
++#if __STDC_VERSION__ >= 199901L
++
++/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
++ * if you want the limit (max/min) macros for int types. 
++ */
++#ifndef __STDC_LIMIT_MACROS
++#define __STDC_LIMIT_MACROS 1
++#endif
++
++#include <inttypes.h>
++typedef int8_t flex_int8_t;
++typedef uint8_t flex_uint8_t;
++typedef int16_t flex_int16_t;
++typedef uint16_t flex_uint16_t;
++typedef int32_t flex_int32_t;
++typedef uint32_t flex_uint32_t;
++#else
++typedef signed char flex_int8_t;
++typedef short int flex_int16_t;
++typedef int flex_int32_t;
++typedef unsigned char flex_uint8_t; 
++typedef unsigned short int flex_uint16_t;
++typedef unsigned int flex_uint32_t;
++#endif /* ! C99 */
++
++/* Limits of integral types. */
++#ifndef INT8_MIN
++#define INT8_MIN               (-128)
++#endif
++#ifndef INT16_MIN
++#define INT16_MIN              (-32767-1)
++#endif
++#ifndef INT32_MIN
++#define INT32_MIN              (-2147483647-1)
++#endif
++#ifndef INT8_MAX
++#define INT8_MAX               (127)
++#endif
++#ifndef INT16_MAX
++#define INT16_MAX              (32767)
++#endif
++#ifndef INT32_MAX
++#define INT32_MAX              (2147483647)
++#endif
++#ifndef UINT8_MAX
++#define UINT8_MAX              (255U)
++#endif
++#ifndef UINT16_MAX
++#define UINT16_MAX             (65535U)
++#endif
++#ifndef UINT32_MAX
++#define UINT32_MAX             (4294967295U)
++#endif
++
++#endif /* ! FLEXINT_H */
++
++#ifdef __cplusplus
++
++/* The "const" storage-class-modifier is valid. */
++#define YY_USE_CONST
++
++#else	/* ! __cplusplus */
++
++#if __STDC__
++
++#define YY_USE_CONST
++
++#endif	/* __STDC__ */
++#endif	/* ! __cplusplus */
++
++#ifdef YY_USE_CONST
++#define yyconst const
++#else
++#define yyconst
++#endif
++
++/* Returned upon end-of-file. */
++#define YY_NULL 0
++
++/* Promotes a possibly negative, possibly signed char to an unsigned
++ * integer for use as an array index.  If the signed char is negative,
++ * we want to instead treat it as an 8-bit unsigned char, hence the
++ * double cast.
++ */
++#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
++
++/* Enter a start condition.  This macro really ought to take a parameter,
++ * but we do it the disgusting crufty way forced on us by the ()-less
++ * definition of BEGIN.
++ */
++#define BEGIN (yy_start) = 1 + 2 *
++
++/* Translate the current start state into a value that can be later handed
++ * to BEGIN to return to the state.  The YYSTATE alias is for lex
++ * compatibility.
++ */
++#define YY_START (((yy_start) - 1) / 2)
++#define YYSTATE YY_START
++
++/* Action number for EOF rule of a given start state. */
++#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
++
++/* Special action meaning "start processing a new file". */
++#define YY_NEW_FILE yyrestart(yyin  )
++
++#define YY_END_OF_BUFFER_CHAR 0
++
++/* Size of default input buffer. */
++#ifndef YY_BUF_SIZE
++#define YY_BUF_SIZE 16384
++#endif
++
++/* The state buf must be large enough to hold one state per character in the main buffer.
++ */
++#define YY_STATE_BUF_SIZE   ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
++
++#ifndef YY_TYPEDEF_YY_BUFFER_STATE
++#define YY_TYPEDEF_YY_BUFFER_STATE
++typedef struct yy_buffer_state *YY_BUFFER_STATE;
++#endif
++
++extern int yyleng;
++
++extern FILE *yyin, *yyout;
++
++#define EOB_ACT_CONTINUE_SCAN 0
++#define EOB_ACT_END_OF_FILE 1
++#define EOB_ACT_LAST_MATCH 2
++
++    /* Note: We specifically omit the test for yy_rule_can_match_eol because it requires
++     *       access to the local variable yy_act. Since yyless() is a macro, it would break
++     *       existing scanners that call yyless() from OUTSIDE yylex. 
++     *       One obvious solution it to make yy_act a global. I tried that, and saw
++     *       a 5% performance hit in a non-yylineno scanner, because yy_act is
++     *       normally declared as a register variable-- so it is not worth it.
++     */
++    #define  YY_LESS_LINENO(n) \
++            do { \
++                int yyl;\
++                for ( yyl = n; yyl < yyleng; ++yyl )\
++                    if ( yytext[yyl] == '\n' )\
++                        --yylineno;\
++            }while(0)
++    
++/* Return all but the first "n" matched characters back to the input stream. */
++#define yyless(n) \
++	do \
++		{ \
++		/* Undo effects of setting up yytext. */ \
++        int yyless_macro_arg = (n); \
++        YY_LESS_LINENO(yyless_macro_arg);\
++		*yy_cp = (yy_hold_char); \
++		YY_RESTORE_YY_MORE_OFFSET \
++		(yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
++		YY_DO_BEFORE_ACTION; /* set up yytext again */ \
++		} \
++	while ( 0 )
++
++#define unput(c) yyunput( c, (yytext_ptr)  )
++
++/* The following is because we cannot portably get our hands on size_t
++ * (without autoconf's help, which isn't available because we want
++ * flex-generated scanners to compile on their own).
++ */
++
++#ifndef YY_TYPEDEF_YY_SIZE_T
++#define YY_TYPEDEF_YY_SIZE_T
++typedef unsigned int yy_size_t;
++#endif
++
++#ifndef YY_STRUCT_YY_BUFFER_STATE
++#define YY_STRUCT_YY_BUFFER_STATE
++struct yy_buffer_state
++	{
++	FILE *yy_input_file;
++
++	char *yy_ch_buf;		/* input buffer */
++	char *yy_buf_pos;		/* current position in input buffer */
++
++	/* Size of input buffer in bytes, not including room for EOB
++	 * characters.
++	 */
++	yy_size_t yy_buf_size;
++
++	/* Number of characters read into yy_ch_buf, not including EOB
++	 * characters.
++	 */
++	int yy_n_chars;
++
++	/* Whether we "own" the buffer - i.e., we know we created it,
++	 * and can realloc() it to grow it, and should free() it to
++	 * delete it.
++	 */
++	int yy_is_our_buffer;
++
++	/* Whether this is an "interactive" input source; if so, and
++	 * if we're using stdio for input, then we want to use getc()
++	 * instead of fread(), to make sure we stop fetching input after
++	 * each newline.
++	 */
++	int yy_is_interactive;
++
++	/* Whether we're considered to be at the beginning of a line.
++	 * If so, '^' rules will be active on the next match, otherwise
++	 * not.
++	 */
++	int yy_at_bol;
++
++    int yy_bs_lineno; /**< The line count. */
++    int yy_bs_column; /**< The column count. */
++    
++	/* Whether to try to fill the input buffer when we reach the
++	 * end of it.
++	 */
++	int yy_fill_buffer;
++
++	int yy_buffer_status;
++
++#define YY_BUFFER_NEW 0
++#define YY_BUFFER_NORMAL 1
++	/* When an EOF's been seen but there's still some text to process
++	 * then we mark the buffer as YY_EOF_PENDING, to indicate that we
++	 * shouldn't try reading from the input source any more.  We might
++	 * still have a bunch of tokens to match, though, because of
++	 * possible backing-up.
++	 *
++	 * When we actually see the EOF, we change the status to "new"
++	 * (via yyrestart()), so that the user can continue scanning by
++	 * just pointing yyin at a new input file.
++	 */
++#define YY_BUFFER_EOF_PENDING 2
++
++	};
++#endif /* !YY_STRUCT_YY_BUFFER_STATE */
++
++/* Stack of input buffers. */
++static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
++static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
++static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
++
++/* We provide macros for accessing buffer states in case in the
++ * future we want to put the buffer states in a more general
++ * "scanner state".
++ *
++ * Returns the top of the stack, or NULL.
++ */
++#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
++                          ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
++                          : NULL)
++
++/* Same as previous macro, but useful when we know that the buffer stack is not
++ * NULL or when we need an lvalue. For internal use only.
++ */
++#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
++
++/* yy_hold_char holds the character lost when yytext is formed. */
++static char yy_hold_char;
++static int yy_n_chars;		/* number of characters read into yy_ch_buf */
++int yyleng;
++
++/* Points to current character in buffer. */
++static char *yy_c_buf_p = (char *) 0;
++static int yy_init = 0;		/* whether we need to initialize */
++static int yy_start = 0;	/* start state number */
++
++/* Flag which is used to allow yywrap()'s to do buffer switches
++ * instead of setting up a fresh yyin.  A bit of a hack ...
++ */
++static int yy_did_buffer_switch_on_eof;
++
++void yyrestart (FILE *input_file  );
++void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer  );
++YY_BUFFER_STATE yy_create_buffer (FILE *file,int size  );
++void yy_delete_buffer (YY_BUFFER_STATE b  );
++void yy_flush_buffer (YY_BUFFER_STATE b  );
++void yypush_buffer_state (YY_BUFFER_STATE new_buffer  );
++void yypop_buffer_state (void );
++
++static void yyensure_buffer_stack (void );
++static void yy_load_buffer_state (void );
++static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file  );
++
++#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER )
++
++YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size  );
++YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str  );
++YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len  );
++
++void *yyalloc (yy_size_t  );
++void *yyrealloc (void *,yy_size_t  );
++void yyfree (void *  );
++
++#define yy_new_buffer yy_create_buffer
++
++#define yy_set_interactive(is_interactive) \
++	{ \
++	if ( ! YY_CURRENT_BUFFER ){ \
++        yyensure_buffer_stack (); \
++		YY_CURRENT_BUFFER_LVALUE =    \
++            yy_create_buffer(yyin,YY_BUF_SIZE ); \
++	} \
++	YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
 +	}
-+	switch (ipl_info.type) {
-+	case IPL_TYPE_CCW:
-+		rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group);
-+		break;
-+	case IPL_TYPE_FCP:
-+	case IPL_TYPE_FCP_DUMP:
-+		rc = ipl_register_fcp_files();
-+		break;
-+	case IPL_TYPE_NSS:
-+		rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
-+		break;
-+	default:
-+		rc = sysfs_create_group(&ipl_kset->kobj,
-+					&ipl_unknown_attr_group);
-+		break;
++
++#define yy_set_bol(at_bol) \
++	{ \
++	if ( ! YY_CURRENT_BUFFER ){\
++        yyensure_buffer_stack (); \
++		YY_CURRENT_BUFFER_LVALUE =    \
++            yy_create_buffer(yyin,YY_BUF_SIZE ); \
++	} \
++	YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
 +	}
-+out:
-+	if (rc)
-+		panic("ipl_init failed: rc = %i\n", rc);
 +
-+	return 0;
-+}
++#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
 +
-+static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run,
-+					    ipl_init};
- 
- /*
-- * reipl section
-+ * reipl shutdown action: Reboot Linux on shutdown.
-  */
- 
- /* FCP reipl device attributes */
-@@ -465,7 +519,8 @@ static void reipl_get_ascii_loadparm(char *loadparm)
- 	strstrip(loadparm);
- }
- 
--static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
-+static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
-+				       struct kobj_attribute *attr, char *page)
- {
- 	char buf[LOADPARM_LEN + 1];
- 
-@@ -473,7 +528,8 @@ static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
- 	return sprintf(page, "%s\n", buf);
- }
- 
--static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
-+static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
-+					struct kobj_attribute *attr,
- 					const char *buf, size_t len)
- {
- 	int i, lp_len;
-@@ -500,7 +556,7 @@ static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
- 	return len;
- }
- 
--static struct subsys_attribute sys_reipl_ccw_loadparm_attr =
-+static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
- 	__ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
- 	       reipl_ccw_loadparm_store);
- 
-@@ -539,7 +595,9 @@ static int reipl_set_type(enum ipl_type type)
- 
- 	switch(type) {
- 	case IPL_TYPE_CCW:
--		if (MACHINE_IS_VM)
-+		if (diag308_set_works)
-+			reipl_method = REIPL_METHOD_CCW_DIAG;
-+		else if (MACHINE_IS_VM)
- 			reipl_method = REIPL_METHOD_CCW_VM;
- 		else
- 			reipl_method = REIPL_METHOD_CCW_CIO;
-@@ -568,13 +626,15 @@ static int reipl_set_type(enum ipl_type type)
- 	return 0;
- }
- 
--static ssize_t reipl_type_show(struct kset *kset, char *page)
-+static ssize_t reipl_type_show(struct kobject *kobj,
-+			       struct kobj_attribute *attr, char *page)
- {
- 	return sprintf(page, "%s\n", ipl_type_str(reipl_type));
- }
- 
--static ssize_t reipl_type_store(struct kset *kset, const char *buf,
--				size_t len)
-+static ssize_t reipl_type_store(struct kobject *kobj,
-+				struct kobj_attribute *attr,
-+				const char *buf, size_t len)
- {
- 	int rc = -EINVAL;
- 
-@@ -587,140 +647,12 @@ static ssize_t reipl_type_store(struct kset *kset, const char *buf,
- 	return (rc != 0) ? rc : len;
- }
- 
--static struct subsys_attribute reipl_type_attr =
--		__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
--
--static decl_subsys(reipl, NULL, NULL);
--
--/*
-- * dump section
-- */
--
--/* FCP dump device attributes */
--
--DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
--		   dump_block_fcp->ipl_info.fcp.wwpn);
--DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
--		   dump_block_fcp->ipl_info.fcp.lun);
--DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
--		   dump_block_fcp->ipl_info.fcp.bootprog);
--DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
--		   dump_block_fcp->ipl_info.fcp.br_lba);
--DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
--		   dump_block_fcp->ipl_info.fcp.devno);
--
--static struct attribute *dump_fcp_attrs[] = {
--	&sys_dump_fcp_device_attr.attr,
--	&sys_dump_fcp_wwpn_attr.attr,
--	&sys_dump_fcp_lun_attr.attr,
--	&sys_dump_fcp_bootprog_attr.attr,
--	&sys_dump_fcp_br_lba_attr.attr,
--	NULL,
--};
--
--static struct attribute_group dump_fcp_attr_group = {
--	.name  = IPL_FCP_STR,
--	.attrs = dump_fcp_attrs,
--};
--
--/* CCW dump device attributes */
--
--DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
--		   dump_block_ccw->ipl_info.ccw.devno);
--
--static struct attribute *dump_ccw_attrs[] = {
--	&sys_dump_ccw_device_attr.attr,
--	NULL,
--};
--
--static struct attribute_group dump_ccw_attr_group = {
--	.name  = IPL_CCW_STR,
--	.attrs = dump_ccw_attrs,
--};
--
--/* dump type */
--
--static int dump_set_type(enum dump_type type)
--{
--	if (!(dump_capabilities & type))
--		return -EINVAL;
--	switch(type) {
--	case DUMP_TYPE_CCW:
--		if (MACHINE_IS_VM)
--			dump_method = DUMP_METHOD_CCW_VM;
--		else if (diag308_set_works)
--			dump_method = DUMP_METHOD_CCW_DIAG;
--		else
--			dump_method = DUMP_METHOD_CCW_CIO;
--		break;
--	case DUMP_TYPE_FCP:
--		dump_method = DUMP_METHOD_FCP_DIAG;
--		break;
--	default:
--		dump_method = DUMP_METHOD_NONE;
--	}
--	dump_type = type;
--	return 0;
--}
--
--static ssize_t dump_type_show(struct kset *kset, char *page)
--{
--	return sprintf(page, "%s\n", dump_type_str(dump_type));
--}
--
--static ssize_t dump_type_store(struct kset *kset, const char *buf,
--			       size_t len)
--{
--	int rc = -EINVAL;
--
--	if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
--		rc = dump_set_type(DUMP_TYPE_NONE);
--	else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
--		rc = dump_set_type(DUMP_TYPE_CCW);
--	else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
--		rc = dump_set_type(DUMP_TYPE_FCP);
--	return (rc != 0) ? rc : len;
--}
--
--static struct subsys_attribute dump_type_attr =
--		__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
--
--static decl_subsys(dump, NULL, NULL);
--
--/*
-- * Shutdown actions section
-- */
--
--static decl_subsys(shutdown_actions, NULL, NULL);
--
--/* on panic */
--
--static ssize_t on_panic_show(struct kset *kset, char *page)
--{
--	return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
--}
--
--static ssize_t on_panic_store(struct kset *kset, const char *buf,
--			      size_t len)
--{
--	if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
--		on_panic_action = SHUTDOWN_REIPL;
--	else if (strncmp(buf, SHUTDOWN_DUMP_STR,
--			 strlen(SHUTDOWN_DUMP_STR)) == 0)
--		on_panic_action = SHUTDOWN_DUMP;
--	else if (strncmp(buf, SHUTDOWN_STOP_STR,
--			 strlen(SHUTDOWN_STOP_STR)) == 0)
--		on_panic_action = SHUTDOWN_STOP;
--	else
--		return -EINVAL;
--
--	return len;
--}
-+static struct kobj_attribute reipl_type_attr =
-+	__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
- 
--static struct subsys_attribute on_panic_attr =
--		__ATTR(on_panic, 0644, on_panic_show, on_panic_store);
-+static struct kset *reipl_kset;
- 
--void do_reipl(void)
-+void reipl_run(struct shutdown_trigger *trigger)
- {
- 	struct ccw_dev_id devid;
- 	static char buf[100];
-@@ -729,8 +661,6 @@ void do_reipl(void)
- 	switch (reipl_method) {
- 	case REIPL_METHOD_CCW_CIO:
- 		devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
--		if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
--			diag308(DIAG308_IPL, NULL);
- 		devid.ssid  = 0;
- 		reipl_ccw_dev(&devid);
- 		break;
-@@ -771,98 +701,6 @@ void do_reipl(void)
- 	default:
- 		break;
- 	}
--	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
--}
--
--static void do_dump(void)
--{
--	struct ccw_dev_id devid;
--	static char buf[100];
--
--	switch (dump_method) {
--	case DUMP_METHOD_CCW_CIO:
--		smp_send_stop();
--		devid.devno = dump_block_ccw->ipl_info.ccw.devno;
--		devid.ssid  = 0;
--		reipl_ccw_dev(&devid);
--		break;
--	case DUMP_METHOD_CCW_VM:
--		smp_send_stop();
--		sprintf(buf, "STORE STATUS");
--		__cpcmd(buf, NULL, 0, NULL);
--		sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
--		__cpcmd(buf, NULL, 0, NULL);
--		break;
--	case DUMP_METHOD_CCW_DIAG:
--		diag308(DIAG308_SET, dump_block_ccw);
--		diag308(DIAG308_DUMP, NULL);
--		break;
--	case DUMP_METHOD_FCP_DIAG:
--		diag308(DIAG308_SET, dump_block_fcp);
--		diag308(DIAG308_DUMP, NULL);
--		break;
--	case DUMP_METHOD_NONE:
--	default:
--		return;
--	}
--	printk(KERN_EMERG "Dump failed!\n");
--}
--
--/* init functions */
--
--static int __init ipl_register_fcp_files(void)
--{
--	int rc;
--
--	rc = sysfs_create_group(&ipl_subsys.kobj,
--				&ipl_fcp_attr_group);
--	if (rc)
--		goto out;
--	rc = sysfs_create_bin_file(&ipl_subsys.kobj,
--				   &ipl_parameter_attr);
--	if (rc)
--		goto out_ipl_parm;
--	rc = sysfs_create_bin_file(&ipl_subsys.kobj,
--				   &ipl_scp_data_attr);
--	if (!rc)
--		goto out;
--
--	sysfs_remove_bin_file(&ipl_subsys.kobj, &ipl_parameter_attr);
--
--out_ipl_parm:
--	sysfs_remove_group(&ipl_subsys.kobj, &ipl_fcp_attr_group);
--out:
--	return rc;
--}
--
--static int __init ipl_init(void)
--{
--	int rc;
--
--	rc = firmware_register(&ipl_subsys);
--	if (rc)
--		return rc;
--	switch (ipl_info.type) {
--	case IPL_TYPE_CCW:
--		rc = sysfs_create_group(&ipl_subsys.kobj,
--					&ipl_ccw_attr_group);
--		break;
--	case IPL_TYPE_FCP:
--	case IPL_TYPE_FCP_DUMP:
--		rc = ipl_register_fcp_files();
--		break;
--	case IPL_TYPE_NSS:
--		rc = sysfs_create_group(&ipl_subsys.kobj,
--					&ipl_nss_attr_group);
--		break;
--	default:
--		rc = sysfs_create_group(&ipl_subsys.kobj,
--					&ipl_unknown_attr_group);
--		break;
--	}
--	if (rc)
--		firmware_unregister(&ipl_subsys);
--	return rc;
- }
- 
- static void __init reipl_probe(void)
-@@ -883,7 +721,7 @@ static int __init reipl_nss_init(void)
- 
- 	if (!MACHINE_IS_VM)
- 		return 0;
--	rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_nss_attr_group);
-+	rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
- 	if (rc)
- 		return rc;
- 	strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
-@@ -898,7 +736,7 @@ static int __init reipl_ccw_init(void)
- 	reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
- 	if (!reipl_block_ccw)
- 		return -ENOMEM;
--	rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_ccw_attr_group);
-+	rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group);
- 	if (rc) {
- 		free_page((unsigned long)reipl_block_ccw);
- 		return rc;
-@@ -907,6 +745,7 @@ static int __init reipl_ccw_init(void)
- 	reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
- 	reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
- 	reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
-+	reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID;
- 	/* check if read scp info worked and set loadparm */
- 	if (sclp_ipl_info.is_valid)
- 		memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
-@@ -915,8 +754,7 @@ static int __init reipl_ccw_init(void)
- 		/* read scp info failed: set empty loadparm (EBCDIC blanks) */
- 		memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
- 		       LOADPARM_LEN);
--	/* FIXME: check for diag308_set_works when enabling diag ccw reipl */
--	if (!MACHINE_IS_VM)
-+	if (!MACHINE_IS_VM && !diag308_set_works)
- 		sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
- 	if (ipl_info.type == IPL_TYPE_CCW)
- 		reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
-@@ -936,7 +774,7 @@ static int __init reipl_fcp_init(void)
- 	reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
- 	if (!reipl_block_fcp)
- 		return -ENOMEM;
--	rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_fcp_attr_group);
-+	rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group);
- 	if (rc) {
- 		free_page((unsigned long)reipl_block_fcp);
- 		return rc;
-@@ -954,16 +792,16 @@ static int __init reipl_fcp_init(void)
- 	return 0;
- }
- 
--static int __init reipl_init(void)
-+static int reipl_init(void)
- {
- 	int rc;
- 
--	rc = firmware_register(&reipl_subsys);
--	if (rc)
--		return rc;
--	rc = subsys_create_file(&reipl_subsys, &reipl_type_attr);
-+	reipl_kset = kset_create_and_add("reipl", NULL, firmware_kobj);
-+	if (!reipl_kset)
-+		return -ENOMEM;
-+	rc = sysfs_create_file(&reipl_kset->kobj, &reipl_type_attr.attr);
- 	if (rc) {
--		firmware_unregister(&reipl_subsys);
-+		kset_unregister(reipl_kset);
- 		return rc;
- 	}
- 	rc = reipl_ccw_init();
-@@ -981,6 +819,140 @@ static int __init reipl_init(void)
- 	return 0;
- }
- 
-+static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR,
-+					      reipl_run, reipl_init};
++/* Begin user sect3 */
++
++#define yywrap() 1
++#define YY_SKIP_YYWRAP
++
++typedef unsigned char YY_CHAR;
++
++FILE *yyin = (FILE *) 0, *yyout = (FILE *) 0;
++
++typedef int yy_state_type;
 +
++extern int yylineno;
++
++int yylineno = 1;
++
++extern char *yytext;
++#define yytext_ptr yytext
++
++static yy_state_type yy_get_previous_state (void );
++static yy_state_type yy_try_NUL_trans (yy_state_type current_state  );
++static int yy_get_next_buffer (void );
++static void yy_fatal_error (yyconst char msg[]  );
++
++/* Done after the current pattern has been matched and before the
++ * corresponding action - sets up yytext.
++ */
++#define YY_DO_BEFORE_ACTION \
++	(yytext_ptr) = yy_bp; \
++	yyleng = (size_t) (yy_cp - yy_bp); \
++	(yy_hold_char) = *yy_cp; \
++	*yy_cp = '\0'; \
++	(yy_c_buf_p) = yy_cp;
++
++#define YY_NUM_RULES 20
++#define YY_END_OF_BUFFER 21
++/* This struct is not used in this scanner,
++   but its presence is necessary. */
++struct yy_trans_info
++	{
++	flex_int32_t yy_verify;
++	flex_int32_t yy_nxt;
++	};
++static yyconst flex_int16_t yy_accept[94] =
++    {   0,
++        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
++       21,   19,   16,   16,   19,   19,   19,    8,    8,   19,
++        8,   19,   19,   19,   19,   14,   15,   15,   19,    9,
++        9,   16,    0,    3,    0,    0,   10,    0,    0,    0,
++        0,    0,    0,    8,    8,    6,    0,    7,    0,    2,
++        0,   13,   13,   15,   15,    9,    0,   12,   10,    0,
++        0,    0,    0,   18,    0,    0,    0,    2,    9,    0,
++       17,    0,    0,    0,   11,    0,    0,    0,    0,    0,
++        0,    0,    0,    0,    4,    0,    0,    1,    0,    0,
++        0,    5,    0
++
++    } ;
++
++static yyconst flex_int32_t yy_ec[256] =
++    {   0,
++        1,    1,    1,    1,    1,    1,    1,    1,    2,    3,
++        2,    2,    2,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    2,    1,    4,    5,    1,    1,    6,    1,    1,
++        1,    7,    8,    8,    9,    8,   10,   11,   12,   13,
++       13,   13,   13,   13,   13,   13,   13,   14,    1,    1,
++        1,    1,    8,    8,   15,   15,   15,   15,   15,   15,
++       16,   16,   16,   16,   16,   16,   16,   16,   16,   16,
++       16,   16,   16,   16,   16,   16,   16,   17,   16,   16,
++        1,   18,   19,    1,   16,    1,   15,   20,   21,   22,
++
++       23,   15,   16,   24,   25,   16,   16,   26,   27,   28,
++       24,   16,   16,   29,   30,   31,   32,   33,   16,   17,
++       16,   16,   34,    1,   35,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1
++    } ;
++
++static yyconst flex_int32_t yy_meta[36] =
++    {   0,
++        1,    1,    1,    1,    2,    1,    2,    2,    2,    3,
++        4,    4,    4,    5,    6,    7,    7,    1,    1,    6,
++        6,    6,    6,    7,    7,    7,    7,    7,    7,    7,
++        7,    7,    7,    8,    1
++    } ;
++
++static yyconst flex_int16_t yy_base[107] =
++    {   0,
++        0,    0,   32,    0,   53,    0,   76,    0,  108,  111,
++      280,  288,   37,   39,   33,   36,  106,    0,  123,  146,
++      255,  251,   45,    0,  159,  288,    0,   53,  108,  172,
++      114,  127,  158,  288,  245,    0,    0,  234,  235,  236,
++      197,  195,  199,    0,    0,  288,    0,  288,  160,  288,
++      183,  288,    0,    0,  183,  182,    0,    0,    0,    0,
++      204,  189,  207,  288,  179,  187,  180,  194,    0,  171,
++      288,  196,  178,  174,  288,  169,  169,  177,  165,  153,
++      143,  155,  137,  118,  288,  122,   42,  288,   36,   36,
++       40,  288,  288,  212,  218,  223,  229,  234,  239,  245,
++
++      251,  255,  262,  270,  275,  280
++    } ;
++
++static yyconst flex_int16_t yy_def[107] =
++    {   0,
++       93,    1,    1,    3,    3,    5,   93,    7,    3,    3,
++       93,   93,   93,   93,   94,   95,   93,   96,   93,   19,
++       19,   20,   97,   98,   20,   93,   99,  100,   95,   93,
++       93,   93,   94,   93,   94,  101,  102,   93,  103,  104,
++       93,   93,   93,   96,   19,   93,   20,   93,   97,   93,
++       97,   93,   20,   99,  100,   93,  105,  101,  102,  106,
++      103,  103,  104,   93,   93,   93,   93,   94,  105,  106,
++       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
++       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
++       93,   93,    0,   93,   93,   93,   93,   93,   93,   93,
++
++       93,   93,   93,   93,   93,   93
++    } ;
++
++static yyconst flex_int16_t yy_nxt[324] =
++    {   0,
++       12,   13,   14,   15,   12,   16,   12,   12,   12,   17,
++       18,   18,   18,   12,   19,   20,   20,   12,   12,   21,
++       19,   21,   19,   22,   20,   20,   20,   20,   20,   20,
++       20,   20,   20,   12,   12,   23,   34,   12,   32,   32,
++       32,   32,   12,   12,   12,   36,   20,   33,   50,   92,
++       35,   20,   20,   20,   20,   20,   15,   54,   91,   54,
++       54,   54,   51,   24,   24,   24,   46,   25,   90,   38,
++       89,   26,   25,   25,   25,   25,   12,   13,   14,   15,
++       27,   12,   27,   27,   27,   17,   27,   27,   27,   12,
++       28,   28,   28,   12,   12,   28,   28,   28,   28,   28,
++
++       28,   28,   28,   28,   28,   28,   28,   28,   28,   12,
++       12,   15,   39,   29,   15,   40,   29,   93,   30,   31,
++       31,   30,   31,   31,   56,   56,   56,   41,   32,   32,
++       42,   88,   43,   45,   45,   45,   46,   45,   47,   47,
++       87,   38,   45,   45,   45,   45,   47,   47,   47,   47,
++       47,   47,   47,   47,   47,   47,   47,   47,   47,   86,
++       47,   34,   33,   50,   85,   47,   47,   47,   47,   53,
++       53,   53,   84,   53,   83,   35,   82,   51,   53,   53,
++       53,   53,   56,   56,   56,   93,   68,   54,   57,   54,
++       54,   54,   56,   56,   56,   62,   46,   34,   71,   81,
++
++       80,   79,   78,   77,   76,   75,   74,   73,   72,   64,
++       62,   35,   33,   33,   33,   33,   33,   33,   33,   33,
++       37,   67,   66,   37,   37,   37,   44,   65,   44,   49,
++       49,   49,   49,   49,   49,   49,   49,   52,   64,   52,
++       54,   62,   54,   60,   54,   54,   55,   93,   55,   55,
++       55,   55,   58,   58,   58,   48,   58,   58,   59,   48,
++       59,   59,   61,   61,   61,   61,   61,   61,   61,   61,
++       63,   63,   63,   63,   63,   63,   63,   63,   69,   93,
++       69,   70,   70,   70,   93,   70,   70,   11,   93,   93,
++       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
++
++       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
++       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
++       93,   93,   93
++    } ;
++
++static yyconst flex_int16_t yy_chk[324] =
++    {   0,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    3,   15,    3,   13,   13,
++       14,   14,    3,    3,    3,   16,    3,   23,   23,   91,
++       15,    3,    3,    3,    3,    3,    5,   28,   90,   28,
++       28,   28,   23,    5,    5,    5,   28,    5,   89,   16,
++       87,    5,    5,    5,    5,    5,    7,    7,    7,    7,
++        7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
++        7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
++
++        7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
++        7,    9,   17,    9,   10,   17,   10,   29,    9,    9,
++        9,   10,   10,   10,   31,   31,   31,   17,   32,   32,
++       17,   86,   17,   19,   19,   19,   19,   19,   19,   19,
++       84,   29,   19,   19,   19,   19,   19,   19,   19,   19,
++       19,   19,   19,   19,   19,   19,   20,   20,   20,   83,
++       20,   33,   49,   49,   82,   20,   20,   20,   20,   25,
++       25,   25,   81,   25,   80,   33,   79,   49,   25,   25,
++       25,   25,   30,   30,   30,   51,   51,   55,   30,   55,
++       55,   55,   56,   56,   56,   62,   55,   68,   62,   78,
++
++       77,   76,   74,   73,   72,   70,   67,   66,   65,   63,
++       61,   68,   94,   94,   94,   94,   94,   94,   94,   94,
++       95,   43,   42,   95,   95,   95,   96,   41,   96,   97,
++       97,   97,   97,   97,   97,   97,   97,   98,   40,   98,
++       99,   39,   99,   38,   99,   99,  100,   35,  100,  100,
++      100,  100,  101,  101,  101,   22,  101,  101,  102,   21,
++      102,  102,  103,  103,  103,  103,  103,  103,  103,  103,
++      104,  104,  104,  104,  104,  104,  104,  104,  105,   11,
++      105,  106,  106,  106,    0,  106,  106,   93,   93,   93,
++       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
++
++       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
++       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
++       93,   93,   93
++    } ;
++
++/* Table of booleans, true if rule could match eol. */
++static yyconst flex_int32_t yy_rule_can_match_eol[21] =
++    {   0,
++0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 
++    0,     };
++
++static yy_state_type yy_last_accepting_state;
++static char *yy_last_accepting_cpos;
++
++extern int yy_flex_debug;
++int yy_flex_debug = 0;
++
++/* The intent behind this definition is that it'll catch
++ * any uses of REJECT which flex missed.
++ */
++#define REJECT reject_used_but_not_detected
++#define yymore() yymore_used_but_not_detected
++#define YY_MORE_ADJ 0
++#define YY_RESTORE_YY_MORE_OFFSET
++char *yytext;
++#line 1 "dtc-lexer.l"
 +/*
-+ * dump shutdown action: Dump Linux on shutdown.
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
 + */
 +
-+/* FCP dump device attributes */
 +
-+DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
-+		   dump_block_fcp->ipl_info.fcp.wwpn);
-+DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
-+		   dump_block_fcp->ipl_info.fcp.lun);
-+DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
-+		   dump_block_fcp->ipl_info.fcp.bootprog);
-+DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
-+		   dump_block_fcp->ipl_info.fcp.br_lba);
-+DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
-+		   dump_block_fcp->ipl_info.fcp.devno);
 +
-+static struct attribute *dump_fcp_attrs[] = {
-+	&sys_dump_fcp_device_attr.attr,
-+	&sys_dump_fcp_wwpn_attr.attr,
-+	&sys_dump_fcp_lun_attr.attr,
-+	&sys_dump_fcp_bootprog_attr.attr,
-+	&sys_dump_fcp_br_lba_attr.attr,
-+	NULL,
-+};
 +
-+static struct attribute_group dump_fcp_attr_group = {
-+	.name  = IPL_FCP_STR,
-+	.attrs = dump_fcp_attrs,
-+};
++#line 33 "dtc-lexer.l"
++#include "dtc.h"
++#include "srcpos.h"
++#include "dtc-parser.tab.h"
 +
-+/* CCW dump device attributes */
 +
-+DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
-+		   dump_block_ccw->ipl_info.ccw.devno);
++/*#define LEXDEBUG	1*/
 +
-+static struct attribute *dump_ccw_attrs[] = {
-+	&sys_dump_ccw_device_attr.attr,
-+	NULL,
-+};
++#ifdef LEXDEBUG
++#define DPRINT(fmt, ...)	fprintf(stderr, fmt, ##__VA_ARGS__)
++#else
++#define DPRINT(fmt, ...)	do { } while (0)
++#endif
 +
-+static struct attribute_group dump_ccw_attr_group = {
-+	.name  = IPL_CCW_STR,
-+	.attrs = dump_ccw_attrs,
-+};
++static int dts_version; /* = 0 */
 +
-+/* dump type */
++#define BEGIN_DEFAULT()	if (dts_version == 0) { \
++				DPRINT("<INITIAL>\n"); \
++				BEGIN(INITIAL); \
++			} else { \
++				DPRINT("<V1>\n"); \
++				BEGIN(V1); \
++			}
++#line 627 "dtc-lexer.lex.c"
 +
-+static int dump_set_type(enum dump_type type)
++#define INITIAL 0
++#define INCLUDE 1
++#define BYTESTRING 2
++#define PROPNODENAME 3
++#define V1 4
++
++#ifndef YY_NO_UNISTD_H
++/* Special case for "unistd.h", since it is non-ANSI. We include it way
++ * down here because we want the user's section 1 to have been scanned first.
++ * The user has a chance to override it with an option.
++ */
++#include <unistd.h>
++#endif
++
++#ifndef YY_EXTRA_TYPE
++#define YY_EXTRA_TYPE void *
++#endif
++
++static int yy_init_globals (void );
++
++/* Macros after this point can all be overridden by user definitions in
++ * section 1.
++ */
++
++#ifndef YY_SKIP_YYWRAP
++#ifdef __cplusplus
++extern "C" int yywrap (void );
++#else
++extern int yywrap (void );
++#endif
++#endif
++
++#ifndef yytext_ptr
++static void yy_flex_strncpy (char *,yyconst char *,int );
++#endif
++
++#ifdef YY_NEED_STRLEN
++static int yy_flex_strlen (yyconst char * );
++#endif
++
++#ifndef YY_NO_INPUT
++
++#ifdef __cplusplus
++static int yyinput (void );
++#else
++static int input (void );
++#endif
++
++#endif
++
++/* Amount of stuff to slurp up with each read. */
++#ifndef YY_READ_BUF_SIZE
++#define YY_READ_BUF_SIZE 8192
++#endif
++
++/* Copy whatever the last rule matched to the standard output. */
++#ifndef ECHO
++/* This used to be an fputs(), but since the string might contain NUL's,
++ * we now use fwrite().
++ */
++#define ECHO (void) fwrite( yytext, yyleng, 1, yyout )
++#endif
++
++/* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
++ * is returned in "result".
++ */
++#ifndef YY_INPUT
++#define YY_INPUT(buf,result,max_size) \
++	if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
++		{ \
++		int c = '*'; \
++		size_t n; \
++		for ( n = 0; n < max_size && \
++			     (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
++			buf[n] = (char) c; \
++		if ( c == '\n' ) \
++			buf[n++] = (char) c; \
++		if ( c == EOF && ferror( yyin ) ) \
++			YY_FATAL_ERROR( "input in flex scanner failed" ); \
++		result = n; \
++		} \
++	else \
++		{ \
++		errno=0; \
++		while ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \
++			{ \
++			if( errno != EINTR) \
++				{ \
++				YY_FATAL_ERROR( "input in flex scanner failed" ); \
++				break; \
++				} \
++			errno=0; \
++			clearerr(yyin); \
++			} \
++		}\
++\
++
++#endif
++
++/* No semi-colon after return; correct usage is to write "yyterminate();" -
++ * we don't want an extra ';' after the "return" because that will cause
++ * some compilers to complain about unreachable statements.
++ */
++#ifndef yyterminate
++#define yyterminate() return YY_NULL
++#endif
++
++/* Number of entries by which start-condition stack grows. */
++#ifndef YY_START_STACK_INCR
++#define YY_START_STACK_INCR 25
++#endif
++
++/* Report a fatal error. */
++#ifndef YY_FATAL_ERROR
++#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
++#endif
++
++/* end tables serialization structures and prototypes */
++
++/* Default declaration of generated scanner - a define so the user can
++ * easily add parameters.
++ */
++#ifndef YY_DECL
++#define YY_DECL_IS_OURS 1
++
++extern int yylex (void);
++
++#define YY_DECL int yylex (void)
++#endif /* !YY_DECL */
++
++/* Code executed at the beginning of each rule, after yytext and yyleng
++ * have been set up.
++ */
++#ifndef YY_USER_ACTION
++#define YY_USER_ACTION
++#endif
++
++/* Code executed at the end of each rule. */
++#ifndef YY_BREAK
++#define YY_BREAK break;
++#endif
++
++#define YY_RULE_SETUP \
++	YY_USER_ACTION
++
++/** The main scanner function which does all the work.
++ */
++YY_DECL
 +{
-+	if (!(dump_capabilities & type))
-+		return -EINVAL;
-+	switch (type) {
-+	case DUMP_TYPE_CCW:
-+		if (diag308_set_works)
-+			dump_method = DUMP_METHOD_CCW_DIAG;
-+		else if (MACHINE_IS_VM)
-+			dump_method = DUMP_METHOD_CCW_VM;
-+		else
-+			dump_method = DUMP_METHOD_CCW_CIO;
-+		break;
-+	case DUMP_TYPE_FCP:
-+		dump_method = DUMP_METHOD_FCP_DIAG;
++	register yy_state_type yy_current_state;
++	register char *yy_cp, *yy_bp;
++	register int yy_act;
++    
++#line 57 "dtc-lexer.l"
++
++#line 784 "dtc-lexer.lex.c"
++
++	if ( !(yy_init) )
++		{
++		(yy_init) = 1;
++
++#ifdef YY_USER_INIT
++		YY_USER_INIT;
++#endif
++
++		if ( ! (yy_start) )
++			(yy_start) = 1;	/* first start state */
++
++		if ( ! yyin )
++			yyin = stdin;
++
++		if ( ! yyout )
++			yyout = stdout;
++
++		if ( ! YY_CURRENT_BUFFER ) {
++			yyensure_buffer_stack ();
++			YY_CURRENT_BUFFER_LVALUE =
++				yy_create_buffer(yyin,YY_BUF_SIZE );
++		}
++
++		yy_load_buffer_state( );
++		}
++
++	while ( 1 )		/* loops until end-of-file is reached */
++		{
++		yy_cp = (yy_c_buf_p);
++
++		/* Support of yytext. */
++		*yy_cp = (yy_hold_char);
++
++		/* yy_bp points to the position in yy_ch_buf of the start of
++		 * the current run.
++		 */
++		yy_bp = yy_cp;
++
++		yy_current_state = (yy_start);
++yy_match:
++		do
++			{
++			register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];
++			if ( yy_accept[yy_current_state] )
++				{
++				(yy_last_accepting_state) = yy_current_state;
++				(yy_last_accepting_cpos) = yy_cp;
++				}
++			while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
++				{
++				yy_current_state = (int) yy_def[yy_current_state];
++				if ( yy_current_state >= 94 )
++					yy_c = yy_meta[(unsigned int) yy_c];
++				}
++			yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
++			++yy_cp;
++			}
++		while ( yy_base[yy_current_state] != 288 );
++
++yy_find_action:
++		yy_act = yy_accept[yy_current_state];
++		if ( yy_act == 0 )
++			{ /* have to back up */
++			yy_cp = (yy_last_accepting_cpos);
++			yy_current_state = (yy_last_accepting_state);
++			yy_act = yy_accept[yy_current_state];
++			}
++
++		YY_DO_BEFORE_ACTION;
++
++		if ( yy_act != YY_END_OF_BUFFER && yy_rule_can_match_eol[yy_act] )
++			{
++			int yyl;
++			for ( yyl = 0; yyl < yyleng; ++yyl )
++				if ( yytext[yyl] == '\n' )
++					   
++    yylineno++;
++;
++			}
++
++do_action:	/* This label is used only to access EOF actions. */
++
++		switch ( yy_act )
++	{ /* beginning of action switch */
++			case 0: /* must back up */
++			/* undo the effects of YY_DO_BEFORE_ACTION */
++			*yy_cp = (yy_hold_char);
++			yy_cp = (yy_last_accepting_cpos);
++			yy_current_state = (yy_last_accepting_state);
++			goto yy_find_action;
++
++case 1:
++YY_RULE_SETUP
++#line 58 "dtc-lexer.l"
++BEGIN(INCLUDE);
++	YY_BREAK
++case 2:
++YY_RULE_SETUP
++#line 60 "dtc-lexer.l"
++{
++			yytext[strlen(yytext) - 1] = 0;
++			if (!push_input_file(yytext + 1)) {
++				/* Some unrecoverable error.*/
++				exit(1);
++			}
++			BEGIN_DEFAULT();
++		}
++	YY_BREAK
++case YY_STATE_EOF(INITIAL):
++case YY_STATE_EOF(INCLUDE):
++case YY_STATE_EOF(BYTESTRING):
++case YY_STATE_EOF(PROPNODENAME):
++case YY_STATE_EOF(V1):
++#line 70 "dtc-lexer.l"
++{
++			if (!pop_input_file()) {
++				yyterminate();
++			}
++		}
++	YY_BREAK
++case 3:
++/* rule 3 can match eol */
++YY_RULE_SETUP
++#line 76 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("String: %s\n", yytext);
++			yylval.data = data_copy_escape_string(yytext+1,
++					yyleng-2);
++			yylloc.first_line = yylineno;
++			return DT_STRING;
++		}
++	YY_BREAK
++case 4:
++YY_RULE_SETUP
++#line 86 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Keyword: /dts-v1/\n");
++			dts_version = 1;
++			BEGIN_DEFAULT();
++			return DT_V1;
++		}
++	YY_BREAK
++case 5:
++YY_RULE_SETUP
++#line 95 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Keyword: /memreserve/\n");
++			BEGIN_DEFAULT();
++			return DT_MEMRESERVE;
++		}
++	YY_BREAK
++case 6:
++YY_RULE_SETUP
++#line 103 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Label: %s\n", yytext);
++			yylval.labelref = strdup(yytext);
++			yylval.labelref[yyleng-1] = '\0';
++			return DT_LABEL;
++		}
++	YY_BREAK
++case 7:
++YY_RULE_SETUP
++#line 112 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			if (*yytext == 'b')
++				yylval.cbase = 2;
++			else if (*yytext == 'o')
++				yylval.cbase = 8;
++			else if (*yytext == 'd')
++				yylval.cbase = 10;
++			else
++				yylval.cbase = 16;
++			DPRINT("Base: %d\n", yylval.cbase);
++			return DT_BASE;
++		}
++	YY_BREAK
++case 8:
++YY_RULE_SETUP
++#line 127 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			yylval.literal = strdup(yytext);
++			DPRINT("Literal: '%s'\n", yylval.literal);
++			return DT_LEGACYLITERAL;
++		}
++	YY_BREAK
++case 9:
++YY_RULE_SETUP
++#line 135 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			yylval.literal = strdup(yytext);
++			DPRINT("Literal: '%s'\n", yylval.literal);
++			return DT_LITERAL;
++		}
++	YY_BREAK
++case 10:
++YY_RULE_SETUP
++#line 143 "dtc-lexer.l"
++{	/* label reference */
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Ref: %s\n", yytext+1);
++			yylval.labelref = strdup(yytext+1);
++			return DT_REF;
++		}
++	YY_BREAK
++case 11:
++YY_RULE_SETUP
++#line 151 "dtc-lexer.l"
++{	/* new-style path reference */
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			yytext[yyleng-1] = '\0';
++			DPRINT("Ref: %s\n", yytext+2);
++			yylval.labelref = strdup(yytext+2);
++			return DT_REF;
++		}
++	YY_BREAK
++case 12:
++YY_RULE_SETUP
++#line 160 "dtc-lexer.l"
++{	/* old-style path reference */
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Ref: %s\n", yytext+1);
++			yylval.labelref = strdup(yytext+1);
++			return DT_REF;
++		}
++	YY_BREAK
++case 13:
++YY_RULE_SETUP
++#line 168 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			yylval.byte = strtol(yytext, NULL, 16);
++			DPRINT("Byte: %02x\n", (int)yylval.byte);
++			return DT_BYTE;
++		}
++	YY_BREAK
++case 14:
++YY_RULE_SETUP
++#line 176 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("/BYTESTRING\n");
++			BEGIN_DEFAULT();
++			return ']';
++		}
++	YY_BREAK
++case 15:
++YY_RULE_SETUP
++#line 184 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("PropNodeName: %s\n", yytext);
++			yylval.propnodename = strdup(yytext);
++			BEGIN_DEFAULT();
++			return DT_PROPNODENAME;
++		}
++	YY_BREAK
++case 16:
++/* rule 16 can match eol */
++YY_RULE_SETUP
++#line 194 "dtc-lexer.l"
++/* eat whitespace */
++	YY_BREAK
++case 17:
++/* rule 17 can match eol */
++YY_RULE_SETUP
++#line 196 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Comment: %s\n", yytext);
++			/* eat comments */
++		}
++	YY_BREAK
++case 18:
++/* rule 18 can match eol */
++YY_RULE_SETUP
++#line 203 "dtc-lexer.l"
++/* eat line comments */
++	YY_BREAK
++case 19:
++YY_RULE_SETUP
++#line 205 "dtc-lexer.l"
++{
++			yylloc.filenum = srcpos_filenum;
++			yylloc.first_line = yylineno;
++			DPRINT("Char: %c (\\x%02x)\n", yytext[0],
++				(unsigned)yytext[0]);
++			if (yytext[0] == '[') {
++				DPRINT("<BYTESTRING>\n");
++				BEGIN(BYTESTRING);
++			}
++			if ((yytext[0] == '{')
++			    || (yytext[0] == ';')) {
++				DPRINT("<PROPNODENAME>\n");
++				BEGIN(PROPNODENAME);
++			}
++			return yytext[0];
++		}
++	YY_BREAK
++case 20:
++YY_RULE_SETUP
++#line 222 "dtc-lexer.l"
++ECHO;
++	YY_BREAK
++#line 1111 "dtc-lexer.lex.c"
++
++	case YY_END_OF_BUFFER:
++		{
++		/* Amount of text matched not including the EOB char. */
++		int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
++
++		/* Undo the effects of YY_DO_BEFORE_ACTION. */
++		*yy_cp = (yy_hold_char);
++		YY_RESTORE_YY_MORE_OFFSET
++
++		if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
++			{
++			/* We're scanning a new file or input source.  It's
++			 * possible that this happened because the user
++			 * just pointed yyin at a new source and called
++			 * yylex().  If so, then we have to assure
++			 * consistency between YY_CURRENT_BUFFER and our
++			 * globals.  Here is the right place to do so, because
++			 * this is the first action (other than possibly a
++			 * back-up) that will match for the new input source.
++			 */
++			(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
++			YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
++			YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
++			}
++
++		/* Note that here we test for yy_c_buf_p "<=" to the position
++		 * of the first EOB in the buffer, since yy_c_buf_p will
++		 * already have been incremented past the NUL character
++		 * (since all states make transitions on EOB to the
++		 * end-of-buffer state).  Contrast this with the test
++		 * in input().
++		 */
++		if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
++			{ /* This was really a NUL. */
++			yy_state_type yy_next_state;
++
++			(yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
++
++			yy_current_state = yy_get_previous_state(  );
++
++			/* Okay, we're now positioned to make the NUL
++			 * transition.  We couldn't have
++			 * yy_get_previous_state() go ahead and do it
++			 * for us because it doesn't know how to deal
++			 * with the possibility of jamming (and we don't
++			 * want to build jamming into it because then it
++			 * will run more slowly).
++			 */
++
++			yy_next_state = yy_try_NUL_trans( yy_current_state );
++
++			yy_bp = (yytext_ptr) + YY_MORE_ADJ;
++
++			if ( yy_next_state )
++				{
++				/* Consume the NUL. */
++				yy_cp = ++(yy_c_buf_p);
++				yy_current_state = yy_next_state;
++				goto yy_match;
++				}
++
++			else
++				{
++				yy_cp = (yy_c_buf_p);
++				goto yy_find_action;
++				}
++			}
++
++		else switch ( yy_get_next_buffer(  ) )
++			{
++			case EOB_ACT_END_OF_FILE:
++				{
++				(yy_did_buffer_switch_on_eof) = 0;
++
++				if ( yywrap( ) )
++					{
++					/* Note: because we've taken care in
++					 * yy_get_next_buffer() to have set up
++					 * yytext, we can now set up
++					 * yy_c_buf_p so that if some total
++					 * hoser (like flex itself) wants to
++					 * call the scanner after we return the
++					 * YY_NULL, it'll still work - another
++					 * YY_NULL will get returned.
++					 */
++					(yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
++
++					yy_act = YY_STATE_EOF(YY_START);
++					goto do_action;
++					}
++
++				else
++					{
++					if ( ! (yy_did_buffer_switch_on_eof) )
++						YY_NEW_FILE;
++					}
++				break;
++				}
++
++			case EOB_ACT_CONTINUE_SCAN:
++				(yy_c_buf_p) =
++					(yytext_ptr) + yy_amount_of_matched_text;
++
++				yy_current_state = yy_get_previous_state(  );
++
++				yy_cp = (yy_c_buf_p);
++				yy_bp = (yytext_ptr) + YY_MORE_ADJ;
++				goto yy_match;
++
++			case EOB_ACT_LAST_MATCH:
++				(yy_c_buf_p) =
++				&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
++
++				yy_current_state = yy_get_previous_state(  );
++
++				yy_cp = (yy_c_buf_p);
++				yy_bp = (yytext_ptr) + YY_MORE_ADJ;
++				goto yy_find_action;
++			}
 +		break;
++		}
++
 +	default:
-+		dump_method = DUMP_METHOD_NONE;
-+	}
-+	dump_type = type;
-+	return 0;
++		YY_FATAL_ERROR(
++			"fatal flex scanner internal error--no action found" );
++	} /* end of action switch */
++		} /* end of scanning one token */
++} /* end of yylex */
++
++/* yy_get_next_buffer - try to read in a new buffer
++ *
++ * Returns a code representing an action:
++ *	EOB_ACT_LAST_MATCH -
++ *	EOB_ACT_CONTINUE_SCAN - continue scanning from current position
++ *	EOB_ACT_END_OF_FILE - end of file
++ */
++static int yy_get_next_buffer (void)
++{
++    	register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
++	register char *source = (yytext_ptr);
++	register int number_to_move, i;
++	int ret_val;
++
++	if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
++		YY_FATAL_ERROR(
++		"fatal flex scanner internal error--end of buffer missed" );
++
++	if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
++		{ /* Don't try to fill the buffer, so this is an EOF. */
++		if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
++			{
++			/* We matched a single character, the EOB, so
++			 * treat this as a final EOF.
++			 */
++			return EOB_ACT_END_OF_FILE;
++			}
++
++		else
++			{
++			/* We matched some text prior to the EOB, first
++			 * process it.
++			 */
++			return EOB_ACT_LAST_MATCH;
++			}
++		}
++
++	/* Try to read more data. */
++
++	/* First move last chars to start of buffer. */
++	number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
++
++	for ( i = 0; i < number_to_move; ++i )
++		*(dest++) = *(source++);
++
++	if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
++		/* don't do the read, it's not guaranteed to return an EOF,
++		 * just force an EOF
++		 */
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
++
++	else
++		{
++			int num_to_read =
++			YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
++
++		while ( num_to_read <= 0 )
++			{ /* Not enough room in the buffer - grow it. */
++
++			/* just a shorter name for the current buffer */
++			YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
++
++			int yy_c_buf_p_offset =
++				(int) ((yy_c_buf_p) - b->yy_ch_buf);
++
++			if ( b->yy_is_our_buffer )
++				{
++				int new_size = b->yy_buf_size * 2;
++
++				if ( new_size <= 0 )
++					b->yy_buf_size += b->yy_buf_size / 8;
++				else
++					b->yy_buf_size *= 2;
++
++				b->yy_ch_buf = (char *)
++					/* Include room in for 2 EOB chars. */
++					yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2  );
++				}
++			else
++				/* Can't grow it, we don't own it. */
++				b->yy_ch_buf = 0;
++
++			if ( ! b->yy_ch_buf )
++				YY_FATAL_ERROR(
++				"fatal error - scanner input buffer overflow" );
++
++			(yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
++
++			num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
++						number_to_move - 1;
++
++			}
++
++		if ( num_to_read > YY_READ_BUF_SIZE )
++			num_to_read = YY_READ_BUF_SIZE;
++
++		/* Read in more data. */
++		YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
++			(yy_n_chars), (size_t) num_to_read );
++
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
++		}
++
++	if ( (yy_n_chars) == 0 )
++		{
++		if ( number_to_move == YY_MORE_ADJ )
++			{
++			ret_val = EOB_ACT_END_OF_FILE;
++			yyrestart(yyin  );
++			}
++
++		else
++			{
++			ret_val = EOB_ACT_LAST_MATCH;
++			YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
++				YY_BUFFER_EOF_PENDING;
++			}
++		}
++
++	else
++		ret_val = EOB_ACT_CONTINUE_SCAN;
++
++	(yy_n_chars) += number_to_move;
++	YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
++	YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
++
++	(yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
++
++	return ret_val;
 +}
 +
-+static ssize_t dump_type_show(struct kobject *kobj,
-+			      struct kobj_attribute *attr, char *page)
++/* yy_get_previous_state - get the state just before the EOB char was reached */
++
++    static yy_state_type yy_get_previous_state (void)
 +{
-+	return sprintf(page, "%s\n", dump_type_str(dump_type));
++	register yy_state_type yy_current_state;
++	register char *yy_cp;
++    
++	yy_current_state = (yy_start);
++
++	for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
++		{
++		register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
++		if ( yy_accept[yy_current_state] )
++			{
++			(yy_last_accepting_state) = yy_current_state;
++			(yy_last_accepting_cpos) = yy_cp;
++			}
++		while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
++			{
++			yy_current_state = (int) yy_def[yy_current_state];
++			if ( yy_current_state >= 94 )
++				yy_c = yy_meta[(unsigned int) yy_c];
++			}
++		yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
++		}
++
++	return yy_current_state;
 +}
 +
-+static ssize_t dump_type_store(struct kobject *kobj,
-+			       struct kobj_attribute *attr,
-+			       const char *buf, size_t len)
++/* yy_try_NUL_trans - try to make a transition on the NUL character
++ *
++ * synopsis
++ *	next_state = yy_try_NUL_trans( current_state );
++ */
++    static yy_state_type yy_try_NUL_trans  (yy_state_type yy_current_state )
 +{
-+	int rc = -EINVAL;
++	register int yy_is_jam;
++    	register char *yy_cp = (yy_c_buf_p);
 +
-+	if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
-+		rc = dump_set_type(DUMP_TYPE_NONE);
-+	else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
-+		rc = dump_set_type(DUMP_TYPE_CCW);
-+	else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
-+		rc = dump_set_type(DUMP_TYPE_FCP);
-+	return (rc != 0) ? rc : len;
++	register YY_CHAR yy_c = 1;
++	if ( yy_accept[yy_current_state] )
++		{
++		(yy_last_accepting_state) = yy_current_state;
++		(yy_last_accepting_cpos) = yy_cp;
++		}
++	while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
++		{
++		yy_current_state = (int) yy_def[yy_current_state];
++		if ( yy_current_state >= 94 )
++			yy_c = yy_meta[(unsigned int) yy_c];
++		}
++	yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
++	yy_is_jam = (yy_current_state == 93);
++
++	return yy_is_jam ? 0 : yy_current_state;
 +}
 +
-+static struct kobj_attribute dump_type_attr =
-+	__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
++#ifndef YY_NO_INPUT
++#ifdef __cplusplus
++    static int yyinput (void)
++#else
++    static int input  (void)
++#endif
 +
-+static struct kset *dump_kset;
++{
++	int c;
++    
++	*(yy_c_buf_p) = (yy_hold_char);
 +
-+static void dump_run(struct shutdown_trigger *trigger)
++	if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
++		{
++		/* yy_c_buf_p now points to the character we want to return.
++		 * If this occurs *before* the EOB characters, then it's a
++		 * valid NUL; if not, then we've hit the end of the buffer.
++		 */
++		if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
++			/* This was really a NUL. */
++			*(yy_c_buf_p) = '\0';
++
++		else
++			{ /* need more input */
++			int offset = (yy_c_buf_p) - (yytext_ptr);
++			++(yy_c_buf_p);
++
++			switch ( yy_get_next_buffer(  ) )
++				{
++				case EOB_ACT_LAST_MATCH:
++					/* This happens because yy_g_n_b()
++					 * sees that we've accumulated a
++					 * token and flags that we need to
++					 * try matching the token before
++					 * proceeding.  But for input(),
++					 * there's no matching to consider.
++					 * So convert the EOB_ACT_LAST_MATCH
++					 * to EOB_ACT_END_OF_FILE.
++					 */
++
++					/* Reset buffer status. */
++					yyrestart(yyin );
++
++					/*FALLTHROUGH*/
++
++				case EOB_ACT_END_OF_FILE:
++					{
++					if ( yywrap( ) )
++						return EOF;
++
++					if ( ! (yy_did_buffer_switch_on_eof) )
++						YY_NEW_FILE;
++#ifdef __cplusplus
++					return yyinput();
++#else
++					return input();
++#endif
++					}
++
++				case EOB_ACT_CONTINUE_SCAN:
++					(yy_c_buf_p) = (yytext_ptr) + offset;
++					break;
++				}
++			}
++		}
++
++	c = *(unsigned char *) (yy_c_buf_p);	/* cast for 8-bit char's */
++	*(yy_c_buf_p) = '\0';	/* preserve yytext */
++	(yy_hold_char) = *++(yy_c_buf_p);
++
++	if ( c == '\n' )
++		   
++    yylineno++;
++;
++
++	return c;
++}
++#endif	/* ifndef YY_NO_INPUT */
++
++/** Immediately switch to a different input stream.
++ * @param input_file A readable stream.
++ * 
++ * @note This function does not reset the start condition to @c INITIAL .
++ */
++    void yyrestart  (FILE * input_file )
 +{
-+	struct ccw_dev_id devid;
-+	static char buf[100];
++    
++	if ( ! YY_CURRENT_BUFFER ){
++        yyensure_buffer_stack ();
++		YY_CURRENT_BUFFER_LVALUE =
++            yy_create_buffer(yyin,YY_BUF_SIZE );
++	}
 +
-+	switch (dump_method) {
-+	case DUMP_METHOD_CCW_CIO:
-+		smp_send_stop();
-+		devid.devno = dump_block_ccw->ipl_info.ccw.devno;
-+		devid.ssid  = 0;
-+		reipl_ccw_dev(&devid);
-+		break;
-+	case DUMP_METHOD_CCW_VM:
-+		smp_send_stop();
-+		sprintf(buf, "STORE STATUS");
-+		__cpcmd(buf, NULL, 0, NULL);
-+		sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
-+		__cpcmd(buf, NULL, 0, NULL);
-+		break;
-+	case DUMP_METHOD_CCW_DIAG:
-+		diag308(DIAG308_SET, dump_block_ccw);
-+		diag308(DIAG308_DUMP, NULL);
-+		break;
-+	case DUMP_METHOD_FCP_DIAG:
-+		diag308(DIAG308_SET, dump_block_fcp);
-+		diag308(DIAG308_DUMP, NULL);
-+		break;
-+	case DUMP_METHOD_NONE:
-+	default:
++	yy_init_buffer(YY_CURRENT_BUFFER,input_file );
++	yy_load_buffer_state( );
++}
++
++/** Switch to a different input buffer.
++ * @param new_buffer The new input buffer.
++ * 
++ */
++    void yy_switch_to_buffer  (YY_BUFFER_STATE  new_buffer )
++{
++    
++	/* TODO. We should be able to replace this entire function body
++	 * with
++	 *		yypop_buffer_state();
++	 *		yypush_buffer_state(new_buffer);
++     */
++	yyensure_buffer_stack ();
++	if ( YY_CURRENT_BUFFER == new_buffer )
 +		return;
-+	}
-+	printk(KERN_EMERG "Dump failed!\n");
++
++	if ( YY_CURRENT_BUFFER )
++		{
++		/* Flush out information for old buffer. */
++		*(yy_c_buf_p) = (yy_hold_char);
++		YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
++		}
++
++	YY_CURRENT_BUFFER_LVALUE = new_buffer;
++	yy_load_buffer_state( );
++
++	/* We don't actually know whether we did this switch during
++	 * EOF (yywrap()) processing, but the only time this flag
++	 * is looked at is after yywrap() is called, so it's safe
++	 * to go ahead and always set it.
++	 */
++	(yy_did_buffer_switch_on_eof) = 1;
 +}
 +
- static int __init dump_ccw_init(void)
- {
- 	int rc;
-@@ -988,7 +960,7 @@ static int __init dump_ccw_init(void)
- 	dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
- 	if (!dump_block_ccw)
- 		return -ENOMEM;
--	rc = sysfs_create_group(&dump_subsys.kobj, &dump_ccw_attr_group);
-+	rc = sysfs_create_group(&dump_kset->kobj, &dump_ccw_attr_group);
- 	if (rc) {
- 		free_page((unsigned long)dump_block_ccw);
- 		return rc;
-@@ -1012,7 +984,7 @@ static int __init dump_fcp_init(void)
- 	dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
- 	if (!dump_block_fcp)
- 		return -ENOMEM;
--	rc = sysfs_create_group(&dump_subsys.kobj, &dump_fcp_attr_group);
-+	rc = sysfs_create_group(&dump_kset->kobj, &dump_fcp_attr_group);
- 	if (rc) {
- 		free_page((unsigned long)dump_block_fcp);
- 		return rc;
-@@ -1026,33 +998,16 @@ static int __init dump_fcp_init(void)
- 	return 0;
- }
- 
--#define SHUTDOWN_ON_PANIC_PRIO 0
--
--static int shutdown_on_panic_notify(struct notifier_block *self,
--				    unsigned long event, void *data)
--{
--	if (on_panic_action == SHUTDOWN_DUMP)
--		do_dump();
--	else if (on_panic_action == SHUTDOWN_REIPL)
--		do_reipl();
--	return NOTIFY_OK;
--}
--
--static struct notifier_block shutdown_on_panic_nb = {
--	.notifier_call = shutdown_on_panic_notify,
--	.priority = SHUTDOWN_ON_PANIC_PRIO
--};
--
--static int __init dump_init(void)
-+static int dump_init(void)
- {
- 	int rc;
- 
--	rc = firmware_register(&dump_subsys);
--	if (rc)
--		return rc;
--	rc = subsys_create_file(&dump_subsys, &dump_type_attr);
-+	dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
-+	if (!dump_kset)
-+		return -ENOMEM;
-+	rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
- 	if (rc) {
--		firmware_unregister(&dump_subsys);
-+		kset_unregister(dump_kset);
- 		return rc;
- 	}
- 	rc = dump_ccw_init();
-@@ -1065,46 +1020,381 @@ static int __init dump_init(void)
- 	return 0;
- }
- 
--static int __init shutdown_actions_init(void)
-+static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR,
-+					     dump_run, dump_init};
++static void yy_load_buffer_state  (void)
++{
++    	(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
++	(yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
++	yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
++	(yy_hold_char) = *(yy_c_buf_p);
++}
 +
-+/*
-+ * vmcmd shutdown action: Trigger vm command on shutdown.
++/** Allocate and initialize an input buffer state.
++ * @param file A readable stream.
++ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
++ * 
++ * @return the allocated buffer state.
 + */
++    YY_BUFFER_STATE yy_create_buffer  (FILE * file, int  size )
++{
++	YY_BUFFER_STATE b;
++    
++	b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state )  );
++	if ( ! b )
++		YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
 +
-+static char vmcmd_on_reboot[128];
-+static char vmcmd_on_panic[128];
-+static char vmcmd_on_halt[128];
-+static char vmcmd_on_poff[128];
++	b->yy_buf_size = size;
 +
-+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
-+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
-+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
-+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
++	/* yy_ch_buf has to be 2 characters longer than the size given because
++	 * we need to put in 2 end-of-buffer characters.
++	 */
++	b->yy_ch_buf = (char *) yyalloc(b->yy_buf_size + 2  );
++	if ( ! b->yy_ch_buf )
++		YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
 +
-+static struct attribute *vmcmd_attrs[] = {
-+	&sys_vmcmd_on_reboot_attr.attr,
-+	&sys_vmcmd_on_panic_attr.attr,
-+	&sys_vmcmd_on_halt_attr.attr,
-+	&sys_vmcmd_on_poff_attr.attr,
-+	NULL,
-+};
++	b->yy_is_our_buffer = 1;
 +
-+static struct attribute_group vmcmd_attr_group = {
-+	.attrs = vmcmd_attrs,
-+};
++	yy_init_buffer(b,file );
 +
-+static struct kset *vmcmd_kset;
++	return b;
++}
 +
-+static void vmcmd_run(struct shutdown_trigger *trigger)
++/** Destroy the buffer.
++ * @param b a buffer created with yy_create_buffer()
++ * 
++ */
++    void yy_delete_buffer (YY_BUFFER_STATE  b )
 +{
-+	char *cmd, *next_cmd;
++    
++	if ( ! b )
++		return;
 +
-+	if (strcmp(trigger->name, ON_REIPL_STR) == 0)
-+		cmd = vmcmd_on_reboot;
-+	else if (strcmp(trigger->name, ON_PANIC_STR) == 0)
-+		cmd = vmcmd_on_panic;
-+	else if (strcmp(trigger->name, ON_HALT_STR) == 0)
-+		cmd = vmcmd_on_halt;
-+	else if (strcmp(trigger->name, ON_POFF_STR) == 0)
-+		cmd = vmcmd_on_poff;
-+	else
++	if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
++		YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
++
++	if ( b->yy_is_our_buffer )
++		yyfree((void *) b->yy_ch_buf  );
++
++	yyfree((void *) b  );
++}
++
++#ifndef __cplusplus
++extern int isatty (int );
++#endif /* __cplusplus */
++    
++/* Initializes or reinitializes a buffer.
++ * This function is sometimes called more than once on the same buffer,
++ * such as during a yyrestart() or at EOF.
++ */
++    static void yy_init_buffer  (YY_BUFFER_STATE  b, FILE * file )
++
++{
++	int oerrno = errno;
++    
++	yy_flush_buffer(b );
++
++	b->yy_input_file = file;
++	b->yy_fill_buffer = 1;
++
++    /* If b is the current buffer, then yy_init_buffer was _probably_
++     * called from yyrestart() or through yy_get_next_buffer.
++     * In that case, we don't want to reset the lineno or column.
++     */
++    if (b != YY_CURRENT_BUFFER){
++        b->yy_bs_lineno = 1;
++        b->yy_bs_column = 0;
++    }
++
++        b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0;
++    
++	errno = oerrno;
++}
++
++/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
++ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
++ * 
++ */
++    void yy_flush_buffer (YY_BUFFER_STATE  b )
++{
++    	if ( ! b )
 +		return;
 +
-+	if (strlen(cmd) == 0)
++	b->yy_n_chars = 0;
++
++	/* We always need two end-of-buffer characters.  The first causes
++	 * a transition to the end-of-buffer state.  The second causes
++	 * a jam in that state.
++	 */
++	b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
++	b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
++
++	b->yy_buf_pos = &b->yy_ch_buf[0];
++
++	b->yy_at_bol = 1;
++	b->yy_buffer_status = YY_BUFFER_NEW;
++
++	if ( b == YY_CURRENT_BUFFER )
++		yy_load_buffer_state( );
++}
++
++/** Pushes the new state onto the stack. The new state becomes
++ *  the current state. This function will allocate the stack
++ *  if necessary.
++ *  @param new_buffer The new state.
++ *  
++ */
++void yypush_buffer_state (YY_BUFFER_STATE new_buffer )
++{
++    	if (new_buffer == NULL)
 +		return;
-+	do {
-+		next_cmd = strchr(cmd, '\n');
-+		if (next_cmd) {
-+			next_cmd[0] = 0;
-+			next_cmd += 1;
++
++	yyensure_buffer_stack();
++
++	/* This block is copied from yy_switch_to_buffer. */
++	if ( YY_CURRENT_BUFFER )
++		{
++		/* Flush out information for old buffer. */
++		*(yy_c_buf_p) = (yy_hold_char);
++		YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
 +		}
-+		__cpcmd(cmd, NULL, 0, NULL);
-+		cmd = next_cmd;
-+	} while (cmd != NULL);
-+}
 +
-+static int vmcmd_init(void)
- {
--	int rc;
-+	if (!MACHINE_IS_VM)
-+		return -ENOTSUPP;
-+	vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
-+	if (!vmcmd_kset)
-+		return -ENOMEM;
-+	return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group);
++	/* Only push if top exists. Otherwise, replace top. */
++	if (YY_CURRENT_BUFFER)
++		(yy_buffer_stack_top)++;
++	YY_CURRENT_BUFFER_LVALUE = new_buffer;
++
++	/* copied from yy_switch_to_buffer. */
++	yy_load_buffer_state( );
++	(yy_did_buffer_switch_on_eof) = 1;
 +}
- 
--	rc = firmware_register(&shutdown_actions_subsys);
--	if (rc)
--		return rc;
--	rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr);
--	if (rc) {
--		firmware_unregister(&shutdown_actions_subsys);
--		return rc;
-+static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
-+					      vmcmd_run, vmcmd_init};
 +
-+/*
-+ * stop shutdown action: Stop Linux on shutdown.
++/** Removes and deletes the top of the stack, if present.
++ *  The next element becomes the new top.
++ *  
 + */
-+
-+static void stop_run(struct shutdown_trigger *trigger)
++void yypop_buffer_state (void)
 +{
-+	if (strcmp(trigger->name, ON_PANIC_STR) == 0)
-+		disabled_wait((unsigned long) __builtin_return_address(0));
-+	else {
-+		signal_processor(smp_processor_id(), sigp_stop);
-+		for (;;);
- 	}
--	atomic_notifier_chain_register(&panic_notifier_list,
--				       &shutdown_on_panic_nb);
--	return 0;
- }
- 
--static int __init s390_ipl_init(void)
-+static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
-+					     stop_run, NULL};
++    	if (!YY_CURRENT_BUFFER)
++		return;
 +
-+/* action list */
++	yy_delete_buffer(YY_CURRENT_BUFFER );
++	YY_CURRENT_BUFFER_LVALUE = NULL;
++	if ((yy_buffer_stack_top) > 0)
++		--(yy_buffer_stack_top);
 +
-+static struct shutdown_action *shutdown_actions_list[] = {
-+	&ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action};
-+#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
++	if (YY_CURRENT_BUFFER) {
++		yy_load_buffer_state( );
++		(yy_did_buffer_switch_on_eof) = 1;
++	}
++}
 +
-+/*
-+ * Trigger section
++/* Allocates the stack if it does not exist.
++ *  Guarantees space for at least one push.
 + */
++static void yyensure_buffer_stack (void)
++{
++	int num_to_alloc;
++    
++	if (!(yy_buffer_stack)) {
 +
-+static struct kset *shutdown_actions_kset;
++		/* First allocation is just for 2 elements, since we don't know if this
++		 * scanner will even need a stack. We use 2 instead of 1 to avoid an
++		 * immediate realloc on the next call.
++         */
++		num_to_alloc = 1;
++		(yy_buffer_stack) = (struct yy_buffer_state**)yyalloc
++								(num_to_alloc * sizeof(struct yy_buffer_state*)
++								);
++		
++		memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
++				
++		(yy_buffer_stack_max) = num_to_alloc;
++		(yy_buffer_stack_top) = 0;
++		return;
++	}
 +
-+static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
-+		       size_t len)
- {
--	int rc;
-+	int i;
-+	for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
-+		if (!shutdown_actions_list[i])
-+			continue;
-+		if (strncmp(buf, shutdown_actions_list[i]->name,
-+			    strlen(shutdown_actions_list[i]->name)) == 0) {
-+			trigger->action = shutdown_actions_list[i];
-+			return len;
-+		}
++	if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
++
++		/* Increase the buffer to prepare for a possible push. */
++		int grow_size = 8 /* arbitrary grow size */;
++
++		num_to_alloc = (yy_buffer_stack_max) + grow_size;
++		(yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc
++								((yy_buffer_stack),
++								num_to_alloc * sizeof(struct yy_buffer_state*)
++								);
++
++		/* zero only the new slots.*/
++		memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
++		(yy_buffer_stack_max) = num_to_alloc;
 +	}
-+	return -EINVAL;
 +}
- 
--	sclp_get_ipl_info(&sclp_ipl_info);
-+/* on reipl */
 +
-+static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
-+						    &reipl_action};
++/** Setup the input buffer state to scan directly from a user-specified character buffer.
++ * @param base the character buffer
++ * @param size the size in bytes of the character buffer
++ * 
++ * @return the newly allocated buffer state object. 
++ */
++YY_BUFFER_STATE yy_scan_buffer  (char * base, yy_size_t  size )
++{
++	YY_BUFFER_STATE b;
++    
++	if ( size < 2 ||
++	     base[size-2] != YY_END_OF_BUFFER_CHAR ||
++	     base[size-1] != YY_END_OF_BUFFER_CHAR )
++		/* They forgot to leave room for the EOB's. */
++		return 0;
 +
-+static ssize_t on_reboot_show(struct kobject *kobj,
-+			      struct kobj_attribute *attr, char *page)
++	b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state )  );
++	if ( ! b )
++		YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
++
++	b->yy_buf_size = size - 2;	/* "- 2" to take care of EOB's */
++	b->yy_buf_pos = b->yy_ch_buf = base;
++	b->yy_is_our_buffer = 0;
++	b->yy_input_file = 0;
++	b->yy_n_chars = b->yy_buf_size;
++	b->yy_is_interactive = 0;
++	b->yy_at_bol = 1;
++	b->yy_fill_buffer = 0;
++	b->yy_buffer_status = YY_BUFFER_NEW;
++
++	yy_switch_to_buffer(b  );
++
++	return b;
++}
++
++/** Setup the input buffer state to scan a string. The next call to yylex() will
++ * scan from a @e copy of @a str.
++ * @param yystr a NUL-terminated string to scan
++ * 
++ * @return the newly allocated buffer state object.
++ * @note If you want to scan bytes that may contain NUL values, then use
++ *       yy_scan_bytes() instead.
++ */
++YY_BUFFER_STATE yy_scan_string (yyconst char * yystr )
 +{
-+	return sprintf(page, "%s\n", on_reboot_trigger.action->name);
++    
++	return yy_scan_bytes(yystr,strlen(yystr) );
 +}
 +
-+static ssize_t on_reboot_store(struct kobject *kobj,
-+			       struct kobj_attribute *attr,
-+			       const char *buf, size_t len)
++/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
++ * scan from a @e copy of @a bytes.
++ * @param bytes the byte buffer to scan
++ * @param len the number of bytes in the buffer pointed to by @a bytes.
++ * 
++ * @return the newly allocated buffer state object.
++ */
++YY_BUFFER_STATE yy_scan_bytes  (yyconst char * yybytes, int  _yybytes_len )
 +{
-+	return set_trigger(buf, &on_reboot_trigger, len);
++	YY_BUFFER_STATE b;
++	char *buf;
++	yy_size_t n;
++	int i;
++    
++	/* Get memory for full buffer, including space for trailing EOB's. */
++	n = _yybytes_len + 2;
++	buf = (char *) yyalloc(n  );
++	if ( ! buf )
++		YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
++
++	for ( i = 0; i < _yybytes_len; ++i )
++		buf[i] = yybytes[i];
++
++	buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
++
++	b = yy_scan_buffer(buf,n );
++	if ( ! b )
++		YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
++
++	/* It's okay to grow etc. this buffer, and we should throw it
++	 * away when we're done.
++	 */
++	b->yy_is_our_buffer = 1;
++
++	return b;
 +}
 +
-+static struct kobj_attribute on_reboot_attr =
-+	__ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
++#ifndef YY_EXIT_FAILURE
++#define YY_EXIT_FAILURE 2
++#endif
 +
-+static void do_machine_restart(char *__unused)
++static void yy_fatal_error (yyconst char* msg )
 +{
-+	smp_send_stop();
-+	on_reboot_trigger.action->fn(&on_reboot_trigger);
-+	reipl_run(NULL);
++    	(void) fprintf( stderr, "%s\n", msg );
++	exit( YY_EXIT_FAILURE );
 +}
-+void (*_machine_restart)(char *command) = do_machine_restart;
 +
-+/* on panic */
++/* Redefine yyless() so it works in section 3 code. */
 +
-+static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
++#undef yyless
++#define yyless(n) \
++	do \
++		{ \
++		/* Undo effects of setting up yytext. */ \
++        int yyless_macro_arg = (n); \
++        YY_LESS_LINENO(yyless_macro_arg);\
++		yytext[yyleng] = (yy_hold_char); \
++		(yy_c_buf_p) = yytext + yyless_macro_arg; \
++		(yy_hold_char) = *(yy_c_buf_p); \
++		*(yy_c_buf_p) = '\0'; \
++		yyleng = yyless_macro_arg; \
++		} \
++	while ( 0 )
 +
-+static ssize_t on_panic_show(struct kobject *kobj,
-+			     struct kobj_attribute *attr, char *page)
++/* Accessor  methods (get/set functions) to struct members. */
++
++/** Get the current line number.
++ * 
++ */
++int yyget_lineno  (void)
 +{
-+	return sprintf(page, "%s\n", on_panic_trigger.action->name);
++        
++    return yylineno;
 +}
 +
-+static ssize_t on_panic_store(struct kobject *kobj,
-+			      struct kobj_attribute *attr,
-+			      const char *buf, size_t len)
++/** Get the input stream.
++ * 
++ */
++FILE *yyget_in  (void)
 +{
-+	return set_trigger(buf, &on_panic_trigger, len);
++        return yyin;
 +}
 +
-+static struct kobj_attribute on_panic_attr =
-+	__ATTR(on_panic, 0644, on_panic_show, on_panic_store);
++/** Get the output stream.
++ * 
++ */
++FILE *yyget_out  (void)
++{
++        return yyout;
++}
 +
-+static void do_panic(void)
++/** Get the length of the current token.
++ * 
++ */
++int yyget_leng  (void)
 +{
-+	on_panic_trigger.action->fn(&on_panic_trigger);
-+	stop_run(&on_panic_trigger);
++        return yyleng;
 +}
 +
-+/* on halt */
++/** Get the current token.
++ * 
++ */
 +
-+static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
++char *yyget_text  (void)
++{
++        return yytext;
++}
 +
-+static ssize_t on_halt_show(struct kobject *kobj,
-+			    struct kobj_attribute *attr, char *page)
++/** Set the current line number.
++ * @param line_number
++ * 
++ */
++void yyset_lineno (int  line_number )
 +{
-+	return sprintf(page, "%s\n", on_halt_trigger.action->name);
++    
++    yylineno = line_number;
 +}
 +
-+static ssize_t on_halt_store(struct kobject *kobj,
-+			     struct kobj_attribute *attr,
-+			     const char *buf, size_t len)
++/** Set the input stream. This does not discard the current
++ * input buffer.
++ * @param in_str A readable stream.
++ * 
++ * @see yy_switch_to_buffer
++ */
++void yyset_in (FILE *  in_str )
 +{
-+	return set_trigger(buf, &on_halt_trigger, len);
++        yyin = in_str ;
 +}
 +
-+static struct kobj_attribute on_halt_attr =
-+	__ATTR(on_halt, 0644, on_halt_show, on_halt_store);
++void yyset_out (FILE *  out_str )
++{
++        yyout = out_str ;
++}
 +
++int yyget_debug  (void)
++{
++        return yy_flex_debug;
++}
 +
-+static void do_machine_halt(void)
++void yyset_debug (int  bdebug )
 +{
-+	smp_send_stop();
-+	on_halt_trigger.action->fn(&on_halt_trigger);
-+	stop_run(&on_halt_trigger);
++        yy_flex_debug = bdebug ;
 +}
-+void (*_machine_halt)(void) = do_machine_halt;
 +
-+/* on power off */
++static int yy_init_globals (void)
++{
++        /* Initialization is the same as for the non-reentrant scanner.
++     * This function is called from yylex_destroy(), so don't allocate here.
++     */
 +
-+static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
++    /* We do not touch yylineno unless the option is enabled. */
++    yylineno =  1;
++    
++    (yy_buffer_stack) = 0;
++    (yy_buffer_stack_top) = 0;
++    (yy_buffer_stack_max) = 0;
++    (yy_c_buf_p) = (char *) 0;
++    (yy_init) = 0;
++    (yy_start) = 0;
 +
-+static ssize_t on_poff_show(struct kobject *kobj,
-+			    struct kobj_attribute *attr, char *page)
-+{
-+	return sprintf(page, "%s\n", on_poff_trigger.action->name);
++/* Defined in main.c */
++#ifdef YY_STDINIT
++    yyin = stdin;
++    yyout = stdout;
++#else
++    yyin = (FILE *) 0;
++    yyout = (FILE *) 0;
++#endif
++
++    /* For future reference: Set errno on error, since we are called by
++     * yylex_init()
++     */
++    return 0;
 +}
 +
-+static ssize_t on_poff_store(struct kobject *kobj,
-+			     struct kobj_attribute *attr,
-+			     const char *buf, size_t len)
++/* yylex_destroy is for both reentrant and non-reentrant scanners. */
++int yylex_destroy  (void)
 +{
-+	return set_trigger(buf, &on_poff_trigger, len);
-+}
++    
++    /* Pop the buffer stack, destroying each element. */
++	while(YY_CURRENT_BUFFER){
++		yy_delete_buffer(YY_CURRENT_BUFFER  );
++		YY_CURRENT_BUFFER_LVALUE = NULL;
++		yypop_buffer_state();
++	}
++
++	/* Destroy the stack itself. */
++	yyfree((yy_buffer_stack) );
++	(yy_buffer_stack) = NULL;
++
++    /* Reset the globals. This is important in a non-reentrant scanner so the next time
++     * yylex() is called, initialization will occur. */
++    yy_init_globals( );
 +
-+static struct kobj_attribute on_poff_attr =
-+	__ATTR(on_poff, 0644, on_poff_show, on_poff_store);
++    return 0;
++}
 +
++/*
++ * Internal utility routines.
++ */
 +
-+static void do_machine_power_off(void)
++#ifndef yytext_ptr
++static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
 +{
-+	smp_send_stop();
-+	on_poff_trigger.action->fn(&on_poff_trigger);
-+	stop_run(&on_poff_trigger);
++	register int i;
++	for ( i = 0; i < n; ++i )
++		s1[i] = s2[i];
 +}
-+void (*_machine_power_off)(void) = do_machine_power_off;
++#endif
 +
-+static void __init shutdown_triggers_init(void)
++#ifdef YY_NEED_STRLEN
++static int yy_flex_strlen (yyconst char * s )
 +{
-+	shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
-+						    firmware_kobj);
-+	if (!shutdown_actions_kset)
-+		goto fail;
-+	if (sysfs_create_file(&shutdown_actions_kset->kobj,
-+			      &on_reboot_attr.attr))
-+		goto fail;
-+	if (sysfs_create_file(&shutdown_actions_kset->kobj,
-+			      &on_panic_attr.attr))
-+		goto fail;
-+	if (sysfs_create_file(&shutdown_actions_kset->kobj,
-+			      &on_halt_attr.attr))
-+		goto fail;
-+	if (sysfs_create_file(&shutdown_actions_kset->kobj,
-+			      &on_poff_attr.attr))
-+		goto fail;
++	register int n;
++	for ( n = 0; s[n]; ++n )
++		;
 +
-+	return;
-+fail:
-+	panic("shutdown_triggers_init failed\n");
++	return n;
 +}
++#endif
 +
-+static void __init shutdown_actions_init(void)
++void *yyalloc (yy_size_t  size )
 +{
-+	int i;
++	return (void *) malloc( size );
++}
 +
-+	for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
-+		if (!shutdown_actions_list[i]->init)
-+			continue;
-+		if (shutdown_actions_list[i]->init())
-+			shutdown_actions_list[i] = NULL;
-+	}
++void *yyrealloc  (void * ptr, yy_size_t  size )
++{
++	/* The cast to (char *) in the following accommodates both
++	 * implementations that use char* generic pointers, and those
++	 * that use void* generic pointers.  It works with the latter
++	 * because both ANSI C and C++ allow castless assignment from
++	 * any pointer type to void*, and deal with argument conversions
++	 * as though doing an assignment.
++	 */
++	return (void *) realloc( (char *) ptr, size );
 +}
 +
-+static int __init s390_ipl_init(void)
++void yyfree (void * ptr )
 +{
- 	reipl_probe();
--	rc = ipl_init();
--	if (rc)
--		return rc;
--	rc = reipl_init();
--	if (rc)
--		return rc;
--	rc = dump_init();
--	if (rc)
--		return rc;
--	rc = shutdown_actions_init();
--	if (rc)
--		return rc;
-+	sclp_get_ipl_info(&sclp_ipl_info);
-+	shutdown_actions_init();
-+	shutdown_triggers_init();
- 	return 0;
- }
- 
- __initcall(s390_ipl_init);
- 
-+static void __init strncpy_skip_quote(char *dst, char *src, int n)
++	free( (char *) ptr );	/* see yyrealloc() for (char *) cast */
++}
++
++#define YYTABLES_NAME "yytables"
++
++#line 222 "dtc-lexer.l"
++
++
++
++
++/*
++ * Stack of nested include file contexts.
++ */
++
++struct incl_file {
++	int filenum;
++	FILE *file;
++	YY_BUFFER_STATE yy_prev_buf;
++	int yy_prev_lineno;
++	struct incl_file *prev;
++};
++
++struct incl_file *incl_file_stack;
++
++
++/*
++ * Detect infinite include recursion.
++ */
++#define MAX_INCLUDE_DEPTH	(100)
++
++static int incl_depth = 0;
++
++
++int push_input_file(const char *filename)
 +{
-+	int sx, dx;
++	FILE *f;
++	struct incl_file *incl_file;
 +
-+	dx = 0;
-+	for (sx = 0; src[sx] != 0; sx++) {
-+		if (src[sx] == '"')
-+			continue;
-+		dst[dx++] = src[sx];
-+		if (dx >= n)
-+			break;
++	if (!filename) {
++		yyerror("No include file name given.");
++		return 0;
 +	}
-+}
 +
-+static int __init vmcmd_on_reboot_setup(char *str)
-+{
-+	if (!MACHINE_IS_VM)
-+		return 1;
-+	strncpy_skip_quote(vmcmd_on_reboot, str, 127);
-+	vmcmd_on_reboot[127] = 0;
-+	on_reboot_trigger.action = &vmcmd_action;
++	if (incl_depth++ >= MAX_INCLUDE_DEPTH) {
++		yyerror("Includes nested too deeply");
++		return 0;
++	}
++
++	f = dtc_open_file(filename);
++
++	incl_file = malloc(sizeof(struct incl_file));
++	if (!incl_file) {
++		yyerror("Can not allocate include file space.");
++		return 0;
++	}
++
++	/*
++	 * Save current context.
++	 */
++	incl_file->yy_prev_buf = YY_CURRENT_BUFFER;
++	incl_file->yy_prev_lineno = yylineno;
++	incl_file->filenum = srcpos_filenum;
++	incl_file->file = yyin;
++	incl_file->prev = incl_file_stack;
++
++	incl_file_stack = incl_file;
++
++	/*
++	 * Establish new context.
++	 */
++	srcpos_filenum = lookup_file_name(filename, 0);
++	yylineno = 1;
++	yyin = f;
++	yy_switch_to_buffer(yy_create_buffer(yyin,YY_BUF_SIZE));
++
 +	return 1;
 +}
-+__setup("vmreboot=", vmcmd_on_reboot_setup);
 +
-+static int __init vmcmd_on_panic_setup(char *str)
++
++int pop_input_file(void)
 +{
-+	if (!MACHINE_IS_VM)
-+		return 1;
-+	strncpy_skip_quote(vmcmd_on_panic, str, 127);
-+	vmcmd_on_panic[127] = 0;
-+	on_panic_trigger.action = &vmcmd_action;
++	struct incl_file *incl_file;
++
++	if (incl_file_stack == 0)
++		return 0;
++
++	fclose(yyin);
++
++	/*
++	 * Pop.
++	 */
++	--incl_depth;
++	incl_file = incl_file_stack;
++	incl_file_stack = incl_file->prev;
++
++	/*
++	 * Recover old context.
++	 */
++	yy_delete_buffer(YY_CURRENT_BUFFER);
++	yy_switch_to_buffer(incl_file->yy_prev_buf);
++	yylineno = incl_file->yy_prev_lineno;
++	srcpos_filenum = incl_file->filenum;
++	yyin = incl_file->file;
++
++	/*
++	 * Free old state.
++	 */
++	free(incl_file);
++
++	if (YY_CURRENT_BUFFER == 0)
++		return 0;
++
 +	return 1;
 +}
-+__setup("vmpanic=", vmcmd_on_panic_setup);
 +
-+static int __init vmcmd_on_halt_setup(char *str)
+diff --git a/arch/powerpc/boot/dtc-src/dtc-parser.tab.c_shipped b/arch/powerpc/boot/dtc-src/dtc-parser.tab.c_shipped
+new file mode 100644
+index 0000000..28e6ec0
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/dtc-parser.tab.c_shipped
+@@ -0,0 +1,1983 @@
++/* A Bison parser, made by GNU Bison 2.3.  */
++
++/* Skeleton implementation for Bison's Yacc-like parsers in C
++
++   Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
++   Free Software Foundation, Inc.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 2, or (at your option)
++   any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 51 Franklin Street, Fifth Floor,
++   Boston, MA 02110-1301, USA.  */
++
++/* As a special exception, you may create a larger work that contains
++   part or all of the Bison parser skeleton and distribute that work
++   under terms of your choice, so long as that work isn't itself a
++   parser generator using the skeleton or a modified version thereof
++   as a parser skeleton.  Alternatively, if you modify or redistribute
++   the parser skeleton itself, you may (at your option) remove this
++   special exception, which will cause the skeleton and the resulting
++   Bison output files to be licensed under the GNU General Public
++   License without this special exception.
++
++   This special exception was added by the Free Software Foundation in
++   version 2.2 of Bison.  */
++
++/* C LALR(1) parser skeleton written by Richard Stallman, by
++   simplifying the original so-called "semantic" parser.  */
++
++/* All symbols defined below should begin with yy or YY, to avoid
++   infringing on user name space.  This should be done even for local
++   variables, as they might otherwise be expanded by user macros.
++   There are some unavoidable exceptions within include files to
++   define necessary library symbols; they are noted "INFRINGES ON
++   USER NAME SPACE" below.  */
++
++/* Identify Bison output.  */
++#define YYBISON 1
++
++/* Bison version.  */
++#define YYBISON_VERSION "2.3"
++
++/* Skeleton name.  */
++#define YYSKELETON_NAME "yacc.c"
++
++/* Pure parsers.  */
++#define YYPURE 0
++
++/* Using locations.  */
++#define YYLSP_NEEDED 1
++
++
++
++/* Tokens.  */
++#ifndef YYTOKENTYPE
++# define YYTOKENTYPE
++   /* Put the tokens into the symbol table, so that GDB and other debuggers
++      know about them.  */
++   enum yytokentype {
++     DT_V1 = 258,
++     DT_MEMRESERVE = 259,
++     DT_PROPNODENAME = 260,
++     DT_LITERAL = 261,
++     DT_LEGACYLITERAL = 262,
++     DT_BASE = 263,
++     DT_BYTE = 264,
++     DT_STRING = 265,
++     DT_LABEL = 266,
++     DT_REF = 267
++   };
++#endif
++/* Tokens.  */
++#define DT_V1 258
++#define DT_MEMRESERVE 259
++#define DT_PROPNODENAME 260
++#define DT_LITERAL 261
++#define DT_LEGACYLITERAL 262
++#define DT_BASE 263
++#define DT_BYTE 264
++#define DT_STRING 265
++#define DT_LABEL 266
++#define DT_REF 267
++
++
++
++
++/* Copy the first part of user declarations.  */
++#line 23 "dtc-parser.y"
++
++#include "dtc.h"
++#include "srcpos.h"
++
++int yylex(void);
++unsigned long long eval_literal(const char *s, int base, int bits);
++
++extern struct boot_info *the_boot_info;
++
++
++
++/* Enabling traces.  */
++#ifndef YYDEBUG
++# define YYDEBUG 0
++#endif
++
++/* Enabling verbose error messages.  */
++#ifdef YYERROR_VERBOSE
++# undef YYERROR_VERBOSE
++# define YYERROR_VERBOSE 1
++#else
++# define YYERROR_VERBOSE 0
++#endif
++
++/* Enabling the token table.  */
++#ifndef YYTOKEN_TABLE
++# define YYTOKEN_TABLE 0
++#endif
++
++#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
++typedef union YYSTYPE
++#line 34 "dtc-parser.y"
 +{
-+	if (!MACHINE_IS_VM)
-+		return 1;
-+	strncpy_skip_quote(vmcmd_on_halt, str, 127);
-+	vmcmd_on_halt[127] = 0;
-+	on_halt_trigger.action = &vmcmd_action;
-+	return 1;
++	char *propnodename;
++	char *literal;
++	char *labelref;
++	unsigned int cbase;
++	u8 byte;
++	struct data data;
++
++	u64 addr;
++	cell_t cell;
++	struct property *prop;
++	struct property *proplist;
++	struct node *node;
++	struct node *nodelist;
++	struct reserve_info *re;
 +}
-+__setup("vmhalt=", vmcmd_on_halt_setup);
++/* Line 187 of yacc.c.  */
++#line 148 "dtc-parser.tab.c"
++	YYSTYPE;
++# define yystype YYSTYPE /* obsolescent; will be withdrawn */
++# define YYSTYPE_IS_DECLARED 1
++# define YYSTYPE_IS_TRIVIAL 1
++#endif
 +
-+static int __init vmcmd_on_poff_setup(char *str)
++#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
++typedef struct YYLTYPE
 +{
-+	if (!MACHINE_IS_VM)
-+		return 1;
-+	strncpy_skip_quote(vmcmd_on_poff, str, 127);
-+	vmcmd_on_poff[127] = 0;
-+	on_poff_trigger.action = &vmcmd_action;
-+	return 1;
-+}
-+__setup("vmpoff=", vmcmd_on_poff_setup);
++  int first_line;
++  int first_column;
++  int last_line;
++  int last_column;
++} YYLTYPE;
++# define yyltype YYLTYPE /* obsolescent; will be withdrawn */
++# define YYLTYPE_IS_DECLARED 1
++# define YYLTYPE_IS_TRIVIAL 1
++#endif
 +
-+static int on_panic_notify(struct notifier_block *self,
-+			   unsigned long event, void *data)
++
++/* Copy the second part of user declarations.  */
++
++
++/* Line 216 of yacc.c.  */
++#line 173 "dtc-parser.tab.c"
++
++#ifdef short
++# undef short
++#endif
++
++#ifdef YYTYPE_UINT8
++typedef YYTYPE_UINT8 yytype_uint8;
++#else
++typedef unsigned char yytype_uint8;
++#endif
++
++#ifdef YYTYPE_INT8
++typedef YYTYPE_INT8 yytype_int8;
++#elif (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++typedef signed char yytype_int8;
++#else
++typedef short int yytype_int8;
++#endif
++
++#ifdef YYTYPE_UINT16
++typedef YYTYPE_UINT16 yytype_uint16;
++#else
++typedef unsigned short int yytype_uint16;
++#endif
++
++#ifdef YYTYPE_INT16
++typedef YYTYPE_INT16 yytype_int16;
++#else
++typedef short int yytype_int16;
++#endif
++
++#ifndef YYSIZE_T
++# ifdef __SIZE_TYPE__
++#  define YYSIZE_T __SIZE_TYPE__
++# elif defined size_t
++#  define YYSIZE_T size_t
++# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++#  include <stddef.h> /* INFRINGES ON USER NAME SPACE */
++#  define YYSIZE_T size_t
++# else
++#  define YYSIZE_T unsigned int
++# endif
++#endif
++
++#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
++
++#ifndef YY_
++# if YYENABLE_NLS
++#  if ENABLE_NLS
++#   include <libintl.h> /* INFRINGES ON USER NAME SPACE */
++#   define YY_(msgid) dgettext ("bison-runtime", msgid)
++#  endif
++# endif
++# ifndef YY_
++#  define YY_(msgid) msgid
++# endif
++#endif
++
++/* Suppress unused-variable warnings by "using" E.  */
++#if ! defined lint || defined __GNUC__
++# define YYUSE(e) ((void) (e))
++#else
++# define YYUSE(e) /* empty */
++#endif
++
++/* Identity function, used to suppress warnings about constant conditions.  */
++#ifndef lint
++# define YYID(n) (n)
++#else
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++static int
++YYID (int i)
++#else
++static int
++YYID (i)
++    int i;
++#endif
 +{
-+	do_panic();
-+	return NOTIFY_OK;
++  return i;
 +}
++#endif
 +
-+static struct notifier_block on_panic_nb = {
-+	.notifier_call = on_panic_notify,
-+	.priority = 0,
++#if ! defined yyoverflow || YYERROR_VERBOSE
++
++/* The parser invokes alloca or malloc; define the necessary symbols.  */
++
++# ifdef YYSTACK_USE_ALLOCA
++#  if YYSTACK_USE_ALLOCA
++#   ifdef __GNUC__
++#    define YYSTACK_ALLOC __builtin_alloca
++#   elif defined __BUILTIN_VA_ARG_INCR
++#    include <alloca.h> /* INFRINGES ON USER NAME SPACE */
++#   elif defined _AIX
++#    define YYSTACK_ALLOC __alloca
++#   elif defined _MSC_VER
++#    include <malloc.h> /* INFRINGES ON USER NAME SPACE */
++#    define alloca _alloca
++#   else
++#    define YYSTACK_ALLOC alloca
++#    if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++#     include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
++#     ifndef _STDLIB_H
++#      define _STDLIB_H 1
++#     endif
++#    endif
++#   endif
++#  endif
++# endif
++
++# ifdef YYSTACK_ALLOC
++   /* Pacify GCC's `empty if-body' warning.  */
++#  define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
++#  ifndef YYSTACK_ALLOC_MAXIMUM
++    /* The OS might guarantee only one guard page at the bottom of the stack,
++       and a page size can be as small as 4096 bytes.  So we cannot safely
++       invoke alloca (N) if N exceeds 4096.  Use a slightly smaller number
++       to allow for a few compiler-allocated temporary stack slots.  */
++#   define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
++#  endif
++# else
++#  define YYSTACK_ALLOC YYMALLOC
++#  define YYSTACK_FREE YYFREE
++#  ifndef YYSTACK_ALLOC_MAXIMUM
++#   define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
++#  endif
++#  if (defined __cplusplus && ! defined _STDLIB_H \
++       && ! ((defined YYMALLOC || defined malloc) \
++	     && (defined YYFREE || defined free)))
++#   include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
++#   ifndef _STDLIB_H
++#    define _STDLIB_H 1
++#   endif
++#  endif
++#  ifndef YYMALLOC
++#   define YYMALLOC malloc
++#   if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
++#   endif
++#  endif
++#  ifndef YYFREE
++#   define YYFREE free
++#   if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++void free (void *); /* INFRINGES ON USER NAME SPACE */
++#   endif
++#  endif
++# endif
++#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
++
++
++#if (! defined yyoverflow \
++     && (! defined __cplusplus \
++	 || (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \
++	     && defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
++
++/* A type that is properly aligned for any stack member.  */
++union yyalloc
++{
++  yytype_int16 yyss;
++  YYSTYPE yyvs;
++    YYLTYPE yyls;
 +};
 +
-+void __init setup_ipl(void)
++/* The size of the maximum gap between one aligned stack and the next.  */
++# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
++
++/* The size of an array large to enough to hold all stacks, each with
++   N elements.  */
++# define YYSTACK_BYTES(N) \
++     ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE) + sizeof (YYLTYPE)) \
++      + 2 * YYSTACK_GAP_MAXIMUM)
++
++/* Copy COUNT objects from FROM to TO.  The source and destination do
++   not overlap.  */
++# ifndef YYCOPY
++#  if defined __GNUC__ && 1 < __GNUC__
++#   define YYCOPY(To, From, Count) \
++      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
++#  else
++#   define YYCOPY(To, From, Count)		\
++      do					\
++	{					\
++	  YYSIZE_T yyi;				\
++	  for (yyi = 0; yyi < (Count); yyi++)	\
++	    (To)[yyi] = (From)[yyi];		\
++	}					\
++      while (YYID (0))
++#  endif
++# endif
++
++/* Relocate STACK from its old location to the new one.  The
++   local variables YYSIZE and YYSTACKSIZE give the old and new number of
++   elements in the stack, and YYPTR gives the new location of the
++   stack.  Advance YYPTR to a properly aligned location for the next
++   stack.  */
++# define YYSTACK_RELOCATE(Stack)					\
++    do									\
++      {									\
++	YYSIZE_T yynewbytes;						\
++	YYCOPY (&yyptr->Stack, Stack, yysize);				\
++	Stack = &yyptr->Stack;						\
++	yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
++	yyptr += yynewbytes / sizeof (*yyptr);				\
++      }									\
++    while (YYID (0))
++
++#endif
++
++/* YYFINAL -- State number of the termination state.  */
++#define YYFINAL  9
++/* YYLAST -- Last index in YYTABLE.  */
++#define YYLAST   60
++
++/* YYNTOKENS -- Number of terminals.  */
++#define YYNTOKENS  24
++/* YYNNTS -- Number of nonterminals.  */
++#define YYNNTS  20
++/* YYNRULES -- Number of rules.  */
++#define YYNRULES  43
++/* YYNRULES -- Number of states.  */
++#define YYNSTATES  67
++
++/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
++#define YYUNDEFTOK  2
++#define YYMAXUTOK   267
++
++#define YYTRANSLATE(YYX)						\
++  ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
++
++/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX.  */
++static const yytype_uint8 yytranslate[] =
++{
++       0,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,    23,    14,     2,    15,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,    13,
++      19,    18,    20,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,    21,     2,    22,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,    16,     2,    17,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     1,     2,     3,     4,
++       5,     6,     7,     8,     9,    10,    11,    12
++};
++
++#if YYDEBUG
++/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
++   YYRHS.  */
++static const yytype_uint8 yyprhs[] =
++{
++       0,     0,     3,     8,    11,    12,    15,    21,    22,    25,
++      27,    34,    36,    38,    41,    47,    48,    51,    57,    61,
++      64,    69,    74,    77,    80,    81,    84,    87,    88,    91,
++      94,    97,    98,   100,   102,   105,   106,   109,   112,   113,
++     116,   119,   123,   124
++};
++
++/* YYRHS -- A `-1'-separated list of the rules' RHS.  */
++static const yytype_int8 yyrhs[] =
++{
++      25,     0,    -1,     3,    13,    26,    31,    -1,    28,    31,
++      -1,    -1,    27,    26,    -1,    43,     4,    30,    30,    13,
++      -1,    -1,    29,    28,    -1,    27,    -1,    43,     4,    30,
++      14,    30,    13,    -1,     6,    -1,     7,    -1,    15,    32,
++      -1,    16,    33,    41,    17,    13,    -1,    -1,    33,    34,
++      -1,    43,     5,    18,    35,    13,    -1,    43,     5,    13,
++      -1,    36,    10,    -1,    36,    19,    37,    20,    -1,    36,
++      21,    40,    22,    -1,    36,    12,    -1,    35,    11,    -1,
++      -1,    35,    23,    -1,    36,    11,    -1,    -1,    37,    39,
++      -1,    37,    12,    -1,    37,    11,    -1,    -1,     8,    -1,
++       6,    -1,    38,     7,    -1,    -1,    40,     9,    -1,    40,
++      11,    -1,    -1,    42,    41,    -1,    42,    34,    -1,    43,
++       5,    32,    -1,    -1,    11,    -1
++};
++
++/* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
++static const yytype_uint16 yyrline[] =
++{
++       0,    85,    85,    89,    97,   100,   107,   115,   118,   125,
++     129,   136,   140,   147,   154,   162,   165,   172,   176,   183,
++     187,   191,   195,   199,   207,   210,   214,   222,   225,   229,
++     234,   242,   245,   249,   253,   261,   264,   268,   276,   279,
++     283,   291,   299,   302
++};
++#endif
++
++#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
++/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
++   First, the terminals, then, starting at YYNTOKENS, nonterminals.  */
++static const char *const yytname[] =
++{
++  "$end", "error", "$undefined", "DT_V1", "DT_MEMRESERVE",
++  "DT_PROPNODENAME", "DT_LITERAL", "DT_LEGACYLITERAL", "DT_BASE",
++  "DT_BYTE", "DT_STRING", "DT_LABEL", "DT_REF", "';'", "'-'", "'/'", "'{'",
++  "'}'", "'='", "'<'", "'>'", "'['", "']'", "','", "$accept", "sourcefile",
++  "memreserves", "memreserve", "v0_memreserves", "v0_memreserve", "addr",
++  "devicetree", "nodedef", "proplist", "propdef", "propdata",
++  "propdataprefix", "celllist", "cellbase", "cellval", "bytestring",
++  "subnodes", "subnode", "label", 0
++};
++#endif
++
++# ifdef YYPRINT
++/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
++   token YYLEX-NUM.  */
++static const yytype_uint16 yytoknum[] =
++{
++       0,   256,   257,   258,   259,   260,   261,   262,   263,   264,
++     265,   266,   267,    59,    45,    47,   123,   125,    61,    60,
++      62,    91,    93,    44
++};
++# endif
++
++/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
++static const yytype_uint8 yyr1[] =
 +{
-+	ipl_info.type = get_ipl_type();
-+	switch (ipl_info.type) {
-+	case IPL_TYPE_CCW:
-+		ipl_info.data.ccw.dev_id.devno = ipl_devno;
-+		ipl_info.data.ccw.dev_id.ssid = 0;
-+		break;
-+	case IPL_TYPE_FCP:
-+	case IPL_TYPE_FCP_DUMP:
-+		ipl_info.data.fcp.dev_id.devno =
-+			IPL_PARMBLOCK_START->ipl_info.fcp.devno;
-+		ipl_info.data.fcp.dev_id.ssid = 0;
-+		ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
-+		ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
-+		break;
-+	case IPL_TYPE_NSS:
-+		strncpy(ipl_info.data.nss.name, kernel_nss_name,
-+			sizeof(ipl_info.data.nss.name));
-+		break;
-+	case IPL_TYPE_UNKNOWN:
-+	default:
-+		/* We have no info to copy */
-+		break;
-+	}
-+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
-+}
++       0,    24,    25,    25,    26,    26,    27,    28,    28,    29,
++      29,    30,    30,    31,    32,    33,    33,    34,    34,    35,
++      35,    35,    35,    35,    36,    36,    36,    37,    37,    37,
++      37,    38,    38,    39,    39,    40,    40,    40,    41,    41,
++      41,    42,    43,    43
++};
++
++/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
++static const yytype_uint8 yyr2[] =
++{
++       0,     2,     4,     2,     0,     2,     5,     0,     2,     1,
++       6,     1,     1,     2,     5,     0,     2,     5,     3,     2,
++       4,     4,     2,     2,     0,     2,     2,     0,     2,     2,
++       2,     0,     1,     1,     2,     0,     2,     2,     0,     2,
++       2,     3,     0,     1
++};
++
++/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
++   STATE-NUM when YYTABLE doesn't specify something else to do.  Zero
++   means the default is an error.  */
++static const yytype_uint8 yydefact[] =
++{
++       7,     0,    43,     0,     9,     0,     7,     0,     4,     1,
++       0,     3,     8,     0,     0,     4,     0,    15,    13,    11,
++      12,     0,     2,     5,     0,    38,     0,     0,     0,    16,
++       0,    38,     0,     0,     6,     0,    40,    39,     0,    10,
++      14,    18,    24,    41,     0,     0,    23,    17,    25,    19,
++      26,    22,    27,    35,    31,     0,    33,    32,    30,    29,
++      20,     0,    28,    36,    37,    21,    34
++};
++
++/* YYDEFGOTO[NTERM-NUM].  */
++static const yytype_int8 yydefgoto[] =
++{
++      -1,     3,    14,     4,     5,     6,    27,    11,    18,    25,
++      29,    44,    45,    54,    61,    62,    55,    30,    31,     7
++};
++
++/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
++   STATE-NUM.  */
++#define YYPACT_NINF -13
++static const yytype_int8 yypact[] =
++{
++      23,    11,   -13,    37,   -13,    -4,    18,    39,    18,   -13,
++      28,   -13,   -13,    34,    -4,    18,    41,   -13,   -13,   -13,
++     -13,    25,   -13,   -13,    34,    -3,    34,    33,    34,   -13,
++      30,    -3,    43,    36,   -13,    38,   -13,   -13,    20,   -13,
++     -13,   -13,   -13,   -13,     2,     9,   -13,   -13,   -13,   -13,
++     -13,   -13,   -13,   -13,    -2,    -6,   -13,   -13,   -13,   -13,
++     -13,    45,   -13,   -13,   -13,   -13,   -13
++};
++
++/* YYPGOTO[NTERM-NUM].  */
++static const yytype_int8 yypgoto[] =
++{
++     -13,   -13,    35,    27,    47,   -13,   -12,    40,    17,   -13,
++      26,   -13,   -13,   -13,   -13,   -13,   -13,    29,   -13,    -8
++};
++
++/* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
++   positive, shift that token.  If negative, reduce the rule which
++   number is the opposite.  If zero, do what YYDEFACT says.
++   If YYTABLE_NINF, syntax error.  */
++#define YYTABLE_NINF -43
++static const yytype_int8 yytable[] =
++{
++      16,    21,   -42,    63,    56,    64,    57,    16,     2,    58,
++      59,    10,    28,    46,    33,    47,    65,    32,    60,    49,
++      50,    51,   -42,    32,     8,    48,     1,   -42,    52,     2,
++      53,    19,    20,    41,     2,    15,    17,     9,    42,    26,
++      19,    20,    15,    13,    17,    24,    34,    35,    38,    39,
++      23,    40,    66,    12,    22,    43,     0,    36,     0,     0,
++      37
++};
++
++static const yytype_int8 yycheck[] =
++{
++       8,    13,     5,     9,     6,    11,     8,    15,    11,    11,
++      12,    15,    24,    11,    26,    13,    22,    25,    20,    10,
++      11,    12,     4,    31,    13,    23,     3,     4,    19,    11,
++      21,     6,     7,    13,    11,     8,    16,     0,    18,    14,
++       6,     7,    15,     4,    16,     4,    13,    17,     5,    13,
++      15,    13,     7,     6,    14,    38,    -1,    31,    -1,    -1,
++      31
++};
++
++/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
++   symbol of state STATE-NUM.  */
++static const yytype_uint8 yystos[] =
++{
++       0,     3,    11,    25,    27,    28,    29,    43,    13,     0,
++      15,    31,    28,     4,    26,    27,    43,    16,    32,     6,
++       7,    30,    31,    26,     4,    33,    14,    30,    30,    34,
++      41,    42,    43,    30,    13,    17,    34,    41,     5,    13,
++      13,    13,    18,    32,    35,    36,    11,    13,    23,    10,
++      11,    12,    19,    21,    37,    40,     6,     8,    11,    12,
++      20,    38,    39,     9,    11,    22,     7
++};
++
++#define yyerrok		(yyerrstatus = 0)
++#define yyclearin	(yychar = YYEMPTY)
++#define YYEMPTY		(-2)
++#define YYEOF		0
++
++#define YYACCEPT	goto yyacceptlab
++#define YYABORT		goto yyabortlab
++#define YYERROR		goto yyerrorlab
++
++
++/* Like YYERROR except do call yyerror.  This remains here temporarily
++   to ease the transition to the new meaning of YYERROR, for GCC.
++   Once GCC version 2 has supplanted version 1, this can go.  */
++
++#define YYFAIL		goto yyerrlab
++
++#define YYRECOVERING()  (!!yyerrstatus)
++
++#define YYBACKUP(Token, Value)					\
++do								\
++  if (yychar == YYEMPTY && yylen == 1)				\
++    {								\
++      yychar = (Token);						\
++      yylval = (Value);						\
++      yytoken = YYTRANSLATE (yychar);				\
++      YYPOPSTACK (1);						\
++      goto yybackup;						\
++    }								\
++  else								\
++    {								\
++      yyerror (YY_("syntax error: cannot back up")); \
++      YYERROR;							\
++    }								\
++while (YYID (0))
++
++
++#define YYTERROR	1
++#define YYERRCODE	256
++
++
++/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
++   If N is 0, then set CURRENT to the empty location which ends
++   the previous symbol: RHS[0] (always defined).  */
++
++#define YYRHSLOC(Rhs, K) ((Rhs)[K])
++#ifndef YYLLOC_DEFAULT
++# define YYLLOC_DEFAULT(Current, Rhs, N)				\
++    do									\
++      if (YYID (N))                                                    \
++	{								\
++	  (Current).first_line   = YYRHSLOC (Rhs, 1).first_line;	\
++	  (Current).first_column = YYRHSLOC (Rhs, 1).first_column;	\
++	  (Current).last_line    = YYRHSLOC (Rhs, N).last_line;		\
++	  (Current).last_column  = YYRHSLOC (Rhs, N).last_column;	\
++	}								\
++      else								\
++	{								\
++	  (Current).first_line   = (Current).last_line   =		\
++	    YYRHSLOC (Rhs, 0).last_line;				\
++	  (Current).first_column = (Current).last_column =		\
++	    YYRHSLOC (Rhs, 0).last_column;				\
++	}								\
++    while (YYID (0))
++#endif
 +
- void __init ipl_save_parameters(void)
- {
- 	struct cio_iplinfo iplinfo;
-@@ -1185,3 +1475,4 @@ void s390_reset_system(void)
- 
- 	do_reset_calls();
- }
 +
-diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
-index 29f7884..0e7aca0 100644
---- a/arch/s390/kernel/process.c
-+++ b/arch/s390/kernel/process.c
-@@ -36,7 +36,7 @@
- #include <linux/init.h>
- #include <linux/module.h>
- #include <linux/notifier.h>
--
-+#include <linux/utsname.h>
- #include <asm/uaccess.h>
- #include <asm/pgtable.h>
- #include <asm/system.h>
-@@ -182,13 +182,15 @@ void cpu_idle(void)
- 
- void show_regs(struct pt_regs *regs)
- {
--	struct task_struct *tsk = current;
--
--        printk("CPU:    %d    %s\n", task_thread_info(tsk)->cpu, print_tainted());
--        printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
--	       current->comm, task_pid_nr(current), (void *) tsk,
--	       (void *) tsk->thread.ksp);
--
-+	print_modules();
-+	printk("CPU: %d %s %s %.*s\n",
-+	       task_thread_info(current)->cpu, print_tainted(),
-+	       init_utsname()->release,
-+	       (int)strcspn(init_utsname()->version, " "),
-+	       init_utsname()->version);
-+	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
-+	       current->comm, current->pid, current,
-+	       (void *) current->thread.ksp);
- 	show_registers(regs);
- 	/* Show stack backtrace if pt_regs is from kernel mode */
- 	if (!(regs->psw.mask & PSW_MASK_PSTATE))
-diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
-index 1d81bf9..6e036ba 100644
---- a/arch/s390/kernel/ptrace.c
-+++ b/arch/s390/kernel/ptrace.c
-@@ -86,13 +86,13 @@ FixPerRegisters(struct task_struct *task)
- 		per_info->control_regs.bits.storage_alt_space_ctl = 0;
- }
- 
--static void set_single_step(struct task_struct *task)
-+void user_enable_single_step(struct task_struct *task)
- {
- 	task->thread.per_info.single_step = 1;
- 	FixPerRegisters(task);
- }
- 
--static void clear_single_step(struct task_struct *task)
-+void user_disable_single_step(struct task_struct *task)
- {
- 	task->thread.per_info.single_step = 0;
- 	FixPerRegisters(task);
-@@ -107,7 +107,7 @@ void
- ptrace_disable(struct task_struct *child)
- {
- 	/* make sure the single step bit is not set. */
--	clear_single_step(child);
-+	user_disable_single_step(child);
- }
- 
- #ifndef CONFIG_64BIT
-@@ -651,7 +651,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
- 			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- 		child->exit_code = data;
- 		/* make sure the single step bit is not set. */
--		clear_single_step(child);
-+		user_disable_single_step(child);
- 		wake_up_process(child);
- 		return 0;
- 
-@@ -665,7 +665,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
- 			return 0;
- 		child->exit_code = SIGKILL;
- 		/* make sure the single step bit is not set. */
--		clear_single_step(child);
-+		user_disable_single_step(child);
- 		wake_up_process(child);
- 		return 0;
- 
-@@ -675,10 +675,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
- 			return -EIO;
- 		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- 		child->exit_code = data;
--		if (data)
--			set_tsk_thread_flag(child, TIF_SINGLE_STEP);
--		else
--			set_single_step(child);
-+		user_enable_single_step(child);
- 		/* give it a chance to run. */
- 		wake_up_process(child);
- 		return 0;
-diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
-index 577aa7d..766c783 100644
---- a/arch/s390/kernel/setup.c
-+++ b/arch/s390/kernel/setup.c
-@@ -126,75 +126,6 @@ void __cpuinit cpu_init(void)
- }
- 
- /*
-- * VM halt and poweroff setup routines
-- */
--char vmhalt_cmd[128] = "";
--char vmpoff_cmd[128] = "";
--static char vmpanic_cmd[128] = "";
--
--static void strncpy_skip_quote(char *dst, char *src, int n)
--{
--        int sx, dx;
--
--        dx = 0;
--        for (sx = 0; src[sx] != 0; sx++) {
--                if (src[sx] == '"') continue;
--                dst[dx++] = src[sx];
--                if (dx >= n) break;
--        }
--}
--
--static int __init vmhalt_setup(char *str)
--{
--        strncpy_skip_quote(vmhalt_cmd, str, 127);
--        vmhalt_cmd[127] = 0;
--        return 1;
--}
--
--__setup("vmhalt=", vmhalt_setup);
--
--static int __init vmpoff_setup(char *str)
--{
--        strncpy_skip_quote(vmpoff_cmd, str, 127);
--        vmpoff_cmd[127] = 0;
--        return 1;
--}
--
--__setup("vmpoff=", vmpoff_setup);
--
--static int vmpanic_notify(struct notifier_block *self, unsigned long event,
--			  void *data)
--{
--	if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
--		cpcmd(vmpanic_cmd, NULL, 0, NULL);
--
--	return NOTIFY_OK;
--}
--
--#define PANIC_PRI_VMPANIC	0
--
--static struct notifier_block vmpanic_nb = {
--	.notifier_call = vmpanic_notify,
--	.priority = PANIC_PRI_VMPANIC
--};
--
--static int __init vmpanic_setup(char *str)
--{
--	static int register_done __initdata = 0;
--
--	strncpy_skip_quote(vmpanic_cmd, str, 127);
--	vmpanic_cmd[127] = 0;
--	if (!register_done) {
--		register_done = 1;
--		atomic_notifier_chain_register(&panic_notifier_list,
--					       &vmpanic_nb);
--	}
--	return 1;
--}
--
--__setup("vmpanic=", vmpanic_setup);
--
--/*
-  * condev= and conmode= setup parameter.
-  */
- 
-@@ -308,38 +239,6 @@ static void __init setup_zfcpdump(unsigned int console_devno)
- static inline void setup_zfcpdump(unsigned int console_devno) {}
- #endif /* CONFIG_ZFCPDUMP */
- 
--#ifdef CONFIG_SMP
--void (*_machine_restart)(char *command) = machine_restart_smp;
--void (*_machine_halt)(void) = machine_halt_smp;
--void (*_machine_power_off)(void) = machine_power_off_smp;
--#else
--/*
-- * Reboot, halt and power_off routines for non SMP.
-- */
--static void do_machine_restart_nonsmp(char * __unused)
--{
--	do_reipl();
--}
--
--static void do_machine_halt_nonsmp(void)
--{
--        if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
--		__cpcmd(vmhalt_cmd, NULL, 0, NULL);
--        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
--}
--
--static void do_machine_power_off_nonsmp(void)
--{
--        if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
--		__cpcmd(vmpoff_cmd, NULL, 0, NULL);
--        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
--}
--
--void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
--void (*_machine_halt)(void) = do_machine_halt_nonsmp;
--void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
--#endif
--
-  /*
-  * Reboot, halt and power_off stubs. They just call _machine_restart,
-  * _machine_halt or _machine_power_off. 
-@@ -559,7 +458,9 @@ setup_resources(void)
- 	data_resource.start = (unsigned long) &_etext;
- 	data_resource.end = (unsigned long) &_edata - 1;
- 
--	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
-+	for (i = 0; i < MEMORY_CHUNKS; i++) {
-+		if (!memory_chunk[i].size)
-+			continue;
- 		res = alloc_bootmem_low(sizeof(struct resource));
- 		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
- 		switch (memory_chunk[i].type) {
-@@ -617,7 +518,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
- static void __init setup_memory_end(void)
- {
- 	unsigned long memory_size;
--	unsigned long max_mem, max_phys;
-+	unsigned long max_mem;
- 	int i;
- 
- #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
-@@ -625,10 +526,31 @@ static void __init setup_memory_end(void)
- 		memory_end = ZFCPDUMP_HSA_SIZE;
- #endif
- 	memory_size = 0;
--	max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
- 	memory_end &= PAGE_MASK;
- 
--	max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
-+	max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
-+	memory_end = min(max_mem, memory_end);
-+
-+	/*
-+	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
-+	 * extra checks that HOLES_IN_ZONE would require.
-+	 */
-+	for (i = 0; i < MEMORY_CHUNKS; i++) {
-+		unsigned long start, end;
-+		struct mem_chunk *chunk;
-+		unsigned long align;
++/* YY_LOCATION_PRINT -- Print the location on the stream.
++   This macro was not mandated originally: define only if we know
++   we won't break user code: when these are the locations we know.  */
 +
-+		chunk = &memory_chunk[i];
-+		align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
-+		start = (chunk->addr + align - 1) & ~(align - 1);
-+		end = (chunk->addr + chunk->size) & ~(align - 1);
-+		if (start >= end)
-+			memset(chunk, 0, sizeof(*chunk));
-+		else {
-+			chunk->addr = start;
-+			chunk->size = end - start;
-+		}
-+	}
- 
- 	for (i = 0; i < MEMORY_CHUNKS; i++) {
- 		struct mem_chunk *chunk = &memory_chunk[i];
-@@ -890,7 +812,7 @@ setup_arch(char **cmdline_p)
- 
- 	parse_early_param();
- 
--	setup_ipl_info();
-+	setup_ipl();
- 	setup_memory_end();
- 	setup_addressing_mode();
- 	setup_memory();
-@@ -899,7 +821,6 @@ setup_arch(char **cmdline_p)
- 
-         cpu_init();
-         __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
--	smp_setup_cpu_possible_map();
- 
- 	/*
- 	 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
-@@ -920,7 +841,7 @@ setup_arch(char **cmdline_p)
- 
- void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
- {
--   printk("cpu %d "
-+   printk(KERN_INFO "cpu %d "
- #ifdef CONFIG_SMP
-            "phys_idx=%d "
- #endif
-@@ -996,7 +917,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
- static void c_stop(struct seq_file *m, void *v)
- {
- }
--struct seq_operations cpuinfo_op = {
-+const struct seq_operations cpuinfo_op = {
- 	.start	= c_start,
- 	.next	= c_next,
- 	.stop	= c_stop,
-diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
-index d264671..4449bf3 100644
---- a/arch/s390/kernel/signal.c
-+++ b/arch/s390/kernel/signal.c
-@@ -471,6 +471,7 @@ void do_signal(struct pt_regs *regs)
- 
- 	if (signr > 0) {
- 		/* Whee!  Actually deliver the signal.  */
-+		int ret;
- #ifdef CONFIG_COMPAT
- 		if (test_thread_flag(TIF_31BIT)) {
- 			extern int handle_signal32(unsigned long sig,
-@@ -478,15 +479,12 @@ void do_signal(struct pt_regs *regs)
- 						   siginfo_t *info,
- 						   sigset_t *oldset,
- 						   struct pt_regs *regs);
--			if (handle_signal32(
--				    signr, &ka, &info, oldset, regs) == 0) {
--				if (test_thread_flag(TIF_RESTORE_SIGMASK))
--					clear_thread_flag(TIF_RESTORE_SIGMASK);
--			}
--			return;
-+			ret = handle_signal32(signr, &ka, &info, oldset, regs);
- 	        }
-+		else
- #endif
--		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-+			ret = handle_signal(signr, &ka, &info, oldset, regs);
-+		if (!ret) {
- 			/*
- 			 * A signal was successfully delivered; the saved
- 			 * sigmask will have been stored in the signal frame,
-@@ -495,6 +493,14 @@ void do_signal(struct pt_regs *regs)
- 			 */
- 			if (test_thread_flag(TIF_RESTORE_SIGMASK))
- 				clear_thread_flag(TIF_RESTORE_SIGMASK);
++#ifndef YY_LOCATION_PRINT
++# if YYLTYPE_IS_TRIVIAL
++#  define YY_LOCATION_PRINT(File, Loc)			\
++     fprintf (File, "%d.%d-%d.%d",			\
++	      (Loc).first_line, (Loc).first_column,	\
++	      (Loc).last_line,  (Loc).last_column)
++# else
++#  define YY_LOCATION_PRINT(File, Loc) ((void) 0)
++# endif
++#endif
 +
-+			/*
-+			 * If we would have taken a single-step trap
-+			 * for a normal instruction, act like we took
-+			 * one for the handler setup.
-+			 */
-+			if (current->thread.per_info.single_step)
-+				set_thread_flag(TIF_SINGLE_STEP);
- 		}
- 		return;
- 	}
-diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
-index 264ea90..aa37fa1 100644
---- a/arch/s390/kernel/smp.c
-+++ b/arch/s390/kernel/smp.c
-@@ -42,6 +42,7 @@
- #include <asm/tlbflush.h>
- #include <asm/timer.h>
- #include <asm/lowcore.h>
-+#include <asm/sclp.h>
- #include <asm/cpu.h>
- 
- /*
-@@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr);
- cpumask_t cpu_online_map = CPU_MASK_NONE;
- EXPORT_SYMBOL(cpu_online_map);
- 
--cpumask_t cpu_possible_map = CPU_MASK_NONE;
-+cpumask_t cpu_possible_map = CPU_MASK_ALL;
- EXPORT_SYMBOL(cpu_possible_map);
- 
- static struct task_struct *current_set[NR_CPUS];
- 
-+static u8 smp_cpu_type;
-+static int smp_use_sigp_detection;
 +
-+enum s390_cpu_state {
-+	CPU_STATE_STANDBY,
-+	CPU_STATE_CONFIGURED,
-+};
++/* YYLEX -- calling `yylex' with the right arguments.  */
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+static DEFINE_MUTEX(smp_cpu_state_mutex);
++#ifdef YYLEX_PARAM
++# define YYLEX yylex (YYLEX_PARAM)
++#else
++# define YYLEX yylex ()
 +#endif
-+static int smp_cpu_state[NR_CPUS];
 +
-+static DEFINE_PER_CPU(struct cpu, cpu_devices);
-+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
++/* Enable debugging if requested.  */
++#if YYDEBUG
 +
- static void smp_ext_bitcall(int, ec_bit_sig);
- 
- /*
-@@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- }
- EXPORT_SYMBOL(smp_call_function_single);
- 
-+/**
-+ * smp_call_function_mask(): Run a function on a set of other CPUs.
-+ * @mask: The set of cpus to run on.  Must not include the current cpu.
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
-+ *
-+ * Returns 0 on success, else a negative status code.
-+ *
-+ * If @wait is true, then returns once @func has returned; otherwise
-+ * it returns just before the target cpu calls @func.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ */
-+int
-+smp_call_function_mask(cpumask_t mask,
-+			void (*func)(void *), void *info,
-+			int wait)
++# ifndef YYFPRINTF
++#  include <stdio.h> /* INFRINGES ON USER NAME SPACE */
++#  define YYFPRINTF fprintf
++# endif
++
++# define YYDPRINTF(Args)			\
++do {						\
++  if (yydebug)					\
++    YYFPRINTF Args;				\
++} while (YYID (0))
++
++# define YY_SYMBOL_PRINT(Title, Type, Value, Location)			  \
++do {									  \
++  if (yydebug)								  \
++    {									  \
++      YYFPRINTF (stderr, "%s ", Title);					  \
++      yy_symbol_print (stderr,						  \
++		  Type, Value, Location); \
++      YYFPRINTF (stderr, "\n");						  \
++    }									  \
++} while (YYID (0))
++
++
++/*--------------------------------.
++| Print this symbol on YYOUTPUT.  |
++`--------------------------------*/
++
++/*ARGSUSED*/
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++static void
++yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp)
++#else
++static void
++yy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp)
++    FILE *yyoutput;
++    int yytype;
++    YYSTYPE const * const yyvaluep;
++    YYLTYPE const * const yylocationp;
++#endif
 +{
-+	preempt_disable();
-+	__smp_call_function_map(func, info, 0, wait, mask);
-+	preempt_enable();
-+	return 0;
++  if (!yyvaluep)
++    return;
++  YYUSE (yylocationp);
++# ifdef YYPRINT
++  if (yytype < YYNTOKENS)
++    YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
++# else
++  YYUSE (yyoutput);
++# endif
++  switch (yytype)
++    {
++      default:
++	break;
++    }
 +}
-+EXPORT_SYMBOL(smp_call_function_mask);
 +
- void smp_send_stop(void)
- {
- 	int cpu, rc;
-@@ -217,33 +261,6 @@ void smp_send_stop(void)
- }
- 
- /*
-- * Reboot, halt and power_off routines for SMP.
-- */
--void machine_restart_smp(char *__unused)
--{
--	smp_send_stop();
--	do_reipl();
--}
--
--void machine_halt_smp(void)
--{
--	smp_send_stop();
--	if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
--		__cpcmd(vmhalt_cmd, NULL, 0, NULL);
--	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
--	for (;;);
--}
--
--void machine_power_off_smp(void)
--{
--	smp_send_stop();
--	if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
--		__cpcmd(vmpoff_cmd, NULL, 0, NULL);
--	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
--	for (;;);
--}
--
--/*
-  * This is the main routine where commands issued by other
-  * cpus are handled.
-  */
-@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
- }
- EXPORT_SYMBOL(smp_ctl_clear_bit);
- 
-+/*
-+ * In early ipl state a temp. logically cpu number is needed, so the sigp
-+ * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
-+ * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
-+ */
-+#define CPU_INIT_NO	1
 +
- #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
- 
- /*
-@@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
- 		       "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
- 		return;
- 	}
--	zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area));
--	__cpu_logical_map[1] = (__u16) phy_cpu;
--	while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy)
-+	zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
-+	__cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
-+	while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
-+	       sigp_busy)
- 		cpu_relax();
- 	memcpy(zfcpdump_save_areas[cpu],
- 	       (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
-@@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
- 
- #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
- 
--/*
-- * Lets check how many CPUs we have.
-- */
--static unsigned int __init smp_count_cpus(void)
-+static int cpu_stopped(int cpu)
- {
--	unsigned int cpu, num_cpus;
--	__u16 boot_cpu_addr;
-+	__u32 status;
- 
--	/*
--	 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
--	 */
-+	/* Check for stopped state */
-+	if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
-+	    sigp_status_stored) {
-+		if (status & 0x40)
-+			return 1;
-+	}
-+	return 0;
-+}
++/*--------------------------------.
++| Print this symbol on YYOUTPUT.  |
++`--------------------------------*/
 +
-+static int cpu_known(int cpu_id)
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++static void
++yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp)
++#else
++static void
++yy_symbol_print (yyoutput, yytype, yyvaluep, yylocationp)
++    FILE *yyoutput;
++    int yytype;
++    YYSTYPE const * const yyvaluep;
++    YYLTYPE const * const yylocationp;
++#endif
 +{
-+	int cpu;
++  if (yytype < YYNTOKENS)
++    YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
++  else
++    YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
 +
-+	for_each_present_cpu(cpu) {
-+		if (__cpu_logical_map[cpu] == cpu_id)
-+			return 1;
-+	}
-+	return 0;
++  YY_LOCATION_PRINT (yyoutput, *yylocationp);
++  YYFPRINTF (yyoutput, ": ");
++  yy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp);
++  YYFPRINTF (yyoutput, ")");
 +}
 +
-+static int smp_rescan_cpus_sigp(cpumask_t avail)
-+{
-+	int cpu_id, logical_cpu;
-+
-+	logical_cpu = first_cpu(avail);
-+	if (logical_cpu == NR_CPUS)
-+		return 0;
-+	for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
-+		if (cpu_known(cpu_id))
-+			continue;
-+		__cpu_logical_map[logical_cpu] = cpu_id;
-+		if (!cpu_stopped(logical_cpu))
-+			continue;
-+		cpu_set(logical_cpu, cpu_present_map);
-+		smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
-+		logical_cpu = next_cpu(logical_cpu, avail);
-+		if (logical_cpu == NR_CPUS)
-+			break;
-+	}
-+	return 0;
-+}
++/*------------------------------------------------------------------.
++| yy_stack_print -- Print the state stack from its BOTTOM up to its |
++| TOP (included).                                                   |
++`------------------------------------------------------------------*/
 +
-+static int smp_rescan_cpus_sclp(cpumask_t avail)
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++static void
++yy_stack_print (yytype_int16 *bottom, yytype_int16 *top)
++#else
++static void
++yy_stack_print (bottom, top)
++    yytype_int16 *bottom;
++    yytype_int16 *top;
++#endif
 +{
-+	struct sclp_cpu_info *info;
-+	int cpu_id, logical_cpu, cpu;
-+	int rc;
-+
-+	logical_cpu = first_cpu(avail);
-+	if (logical_cpu == NR_CPUS)
-+		return 0;
-+	info = kmalloc(sizeof(*info), GFP_KERNEL);
-+	if (!info)
-+		return -ENOMEM;
-+	rc = sclp_get_cpu_info(info);
-+	if (rc)
-+		goto out;
-+	for (cpu = 0; cpu < info->combined; cpu++) {
-+		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
-+			continue;
-+		cpu_id = info->cpu[cpu].address;
-+		if (cpu_known(cpu_id))
-+			continue;
-+		__cpu_logical_map[logical_cpu] = cpu_id;
-+		cpu_set(logical_cpu, cpu_present_map);
-+		if (cpu >= info->configured)
-+			smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
-+		else
-+			smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
-+		logical_cpu = next_cpu(logical_cpu, avail);
-+		if (logical_cpu == NR_CPUS)
-+			break;
-+	}
-+out:
-+	kfree(info);
-+	return rc;
++  YYFPRINTF (stderr, "Stack now");
++  for (; bottom <= top; ++bottom)
++    YYFPRINTF (stderr, " %d", *bottom);
++  YYFPRINTF (stderr, "\n");
 +}
 +
-+static int smp_rescan_cpus(void)
-+{
-+	cpumask_t avail;
++# define YY_STACK_PRINT(Bottom, Top)				\
++do {								\
++  if (yydebug)							\
++    yy_stack_print ((Bottom), (Top));				\
++} while (YYID (0))
 +
-+	cpus_xor(avail, cpu_possible_map, cpu_present_map);
-+	if (smp_use_sigp_detection)
-+		return smp_rescan_cpus_sigp(avail);
-+	else
-+		return smp_rescan_cpus_sclp(avail);
-+}
 +
-+static void __init smp_detect_cpus(void)
-+{
-+	unsigned int cpu, c_cpus, s_cpus;
-+	struct sclp_cpu_info *info;
-+	u16 boot_cpu_addr, cpu_addr;
++/*------------------------------------------------.
++| Report that the YYRULE is going to be reduced.  |
++`------------------------------------------------*/
 +
-+	c_cpus = 1;
-+	s_cpus = 0;
- 	boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
--	current_thread_info()->cpu = 0;
--	num_cpus = 1;
--	for (cpu = 0; cpu <= 65535; cpu++) {
--		if ((__u16) cpu == boot_cpu_addr)
-+	info = kmalloc(sizeof(*info), GFP_KERNEL);
-+	if (!info)
-+		panic("smp_detect_cpus failed to allocate memory\n");
-+	/* Use sigp detection algorithm if sclp doesn't work. */
-+	if (sclp_get_cpu_info(info)) {
-+		smp_use_sigp_detection = 1;
-+		for (cpu = 0; cpu <= 65535; cpu++) {
-+			if (cpu == boot_cpu_addr)
-+				continue;
-+			__cpu_logical_map[CPU_INIT_NO] = cpu;
-+			if (!cpu_stopped(CPU_INIT_NO))
-+				continue;
-+			smp_get_save_area(c_cpus, cpu);
-+			c_cpus++;
-+		}
-+		goto out;
-+	}
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++static void
++yy_reduce_print (YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule)
++#else
++static void
++yy_reduce_print (yyvsp, yylsp, yyrule)
++    YYSTYPE *yyvsp;
++    YYLTYPE *yylsp;
++    int yyrule;
++#endif
++{
++  int yynrhs = yyr2[yyrule];
++  int yyi;
++  unsigned long int yylno = yyrline[yyrule];
++  YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
++	     yyrule - 1, yylno);
++  /* The symbols being reduced.  */
++  for (yyi = 0; yyi < yynrhs; yyi++)
++    {
++      fprintf (stderr, "   $%d = ", yyi + 1);
++      yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
++		       &(yyvsp[(yyi + 1) - (yynrhs)])
++		       , &(yylsp[(yyi + 1) - (yynrhs)])		       );
++      fprintf (stderr, "\n");
++    }
++}
 +
-+	if (info->has_cpu_type) {
-+		for (cpu = 0; cpu < info->combined; cpu++) {
-+			if (info->cpu[cpu].address == boot_cpu_addr) {
-+				smp_cpu_type = info->cpu[cpu].type;
-+				break;
-+			}
-+		}
-+	}
++# define YY_REDUCE_PRINT(Rule)		\
++do {					\
++  if (yydebug)				\
++    yy_reduce_print (yyvsp, yylsp, Rule); \
++} while (YYID (0))
++
++/* Nonzero means print parse trace.  It is left uninitialized so that
++   multiple parsers can coexist.  */
++int yydebug;
++#else /* !YYDEBUG */
++# define YYDPRINTF(Args)
++# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
++# define YY_STACK_PRINT(Bottom, Top)
++# define YY_REDUCE_PRINT(Rule)
++#endif /* !YYDEBUG */
++
++
++/* YYINITDEPTH -- initial size of the parser's stacks.  */
++#ifndef	YYINITDEPTH
++# define YYINITDEPTH 200
++#endif
++
++/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
++   if the built-in stack extension method is used).
++
++   Do not make this value too large; the results are undefined if
++   YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
++   evaluated with infinite-precision integer arithmetic.  */
 +
-+	for (cpu = 0; cpu < info->combined; cpu++) {
-+		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
-+			continue;
-+		cpu_addr = info->cpu[cpu].address;
-+		if (cpu_addr == boot_cpu_addr)
- 			continue;
--		__cpu_logical_map[1] = (__u16) cpu;
--		if (signal_processor(1, sigp_sense) == sigp_not_operational)
-+		__cpu_logical_map[CPU_INIT_NO] = cpu_addr;
-+		if (!cpu_stopped(CPU_INIT_NO)) {
-+			s_cpus++;
- 			continue;
--		smp_get_save_area(num_cpus, cpu);
--		num_cpus++;
-+		}
-+		smp_get_save_area(c_cpus, cpu_addr);
-+		c_cpus++;
- 	}
--	printk("Detected %d CPU's\n", (int) num_cpus);
--	printk("Boot cpu address %2X\n", boot_cpu_addr);
--	return num_cpus;
-+out:
-+	kfree(info);
-+	printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
-+	get_online_cpus();
-+	smp_rescan_cpus();
-+	put_online_cpus();
- }
- 
- /*
-@@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid)
- 	return 0;
- }
- 
--DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
--
- static void __init smp_create_idle(unsigned int cpu)
- {
- 	struct task_struct *p;
-@@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu)
- 	spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
- }
- 
--static int cpu_stopped(int cpu)
-+static int __cpuinit smp_alloc_lowcore(int cpu)
- {
--	__u32 status;
-+	unsigned long async_stack, panic_stack;
-+	struct _lowcore *lowcore;
-+	int lc_order;
++#ifndef YYMAXDEPTH
++# define YYMAXDEPTH 10000
++#endif
 +
-+	lc_order = sizeof(long) == 8 ? 1 : 0;
-+	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
-+	if (!lowcore)
-+		return -ENOMEM;
-+	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
-+	if (!async_stack)
-+		goto out_async_stack;
-+	panic_stack = __get_free_page(GFP_KERNEL);
-+	if (!panic_stack)
-+		goto out_panic_stack;
++
 +
-+	*lowcore = S390_lowcore;
-+	lowcore->async_stack = async_stack + ASYNC_SIZE;
-+	lowcore->panic_stack = panic_stack + PAGE_SIZE;
- 
--	/* Check for stopped state */
--	if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
--	    sigp_status_stored) {
--		if (status & 0x40)
--			return 1;
-+#ifndef CONFIG_64BIT
-+	if (MACHINE_HAS_IEEE) {
-+		unsigned long save_area;
++#if YYERROR_VERBOSE
 +
-+		save_area = get_zeroed_page(GFP_KERNEL);
-+		if (!save_area)
-+			goto out_save_area;
-+		lowcore->extended_save_area_addr = (u32) save_area;
- 	}
++# ifndef yystrlen
++#  if defined __GLIBC__ && defined _STRING_H
++#   define yystrlen strlen
++#  else
++/* Return the length of YYSTR.  */
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++static YYSIZE_T
++yystrlen (const char *yystr)
++#else
++static YYSIZE_T
++yystrlen (yystr)
++    const char *yystr;
 +#endif
-+	lowcore_ptr[cpu] = lowcore;
- 	return 0;
++{
++  YYSIZE_T yylen;
++  for (yylen = 0; yystr[yylen]; yylen++)
++    continue;
++  return yylen;
++}
++#  endif
++# endif
 +
-+#ifndef CONFIG_64BIT
-+out_save_area:
-+	free_page(panic_stack);
++# ifndef yystpcpy
++#  if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
++#   define yystpcpy stpcpy
++#  else
++/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
++   YYDEST.  */
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++static char *
++yystpcpy (char *yydest, const char *yysrc)
++#else
++static char *
++yystpcpy (yydest, yysrc)
++    char *yydest;
++    const char *yysrc;
 +#endif
-+out_panic_stack:
-+	free_pages(async_stack, ASYNC_ORDER);
-+out_async_stack:
-+	free_pages((unsigned long) lowcore, lc_order);
-+	return -ENOMEM;
- }
- 
--/* Upping and downing of CPUs */
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void smp_free_lowcore(int cpu)
 +{
-+	struct _lowcore *lowcore;
-+	int lc_order;
++  char *yyd = yydest;
++  const char *yys = yysrc;
 +
-+	lc_order = sizeof(long) == 8 ? 1 : 0;
-+	lowcore = lowcore_ptr[cpu];
-+#ifndef CONFIG_64BIT
-+	if (MACHINE_HAS_IEEE)
-+		free_page((unsigned long) lowcore->extended_save_area_addr);
-+#endif
-+	free_page(lowcore->panic_stack - PAGE_SIZE);
-+	free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
-+	free_pages((unsigned long) lowcore, lc_order);
-+	lowcore_ptr[cpu] = NULL;
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
- 
--int __cpu_up(unsigned int cpu)
-+/* Upping and downing of CPUs */
-+int __cpuinit __cpu_up(unsigned int cpu)
- {
- 	struct task_struct *idle;
- 	struct _lowcore *cpu_lowcore;
- 	struct stack_frame *sf;
- 	sigp_ccode ccode;
--	int curr_cpu;
- 
--	for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
--		__cpu_logical_map[cpu] = (__u16) curr_cpu;
--		if (cpu_stopped(cpu))
--			break;
--	}
--
--	if (!cpu_stopped(cpu))
--		return -ENODEV;
-+	if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
-+		return -EIO;
-+	if (smp_alloc_lowcore(cpu))
-+		return -ENOMEM;
- 
- 	ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
- 				   cpu, sigp_set_prefix);
-@@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu)
- 	cpu_lowcore = lowcore_ptr[cpu];
- 	cpu_lowcore->kernel_stack = (unsigned long)
- 		task_stack_page(idle) + THREAD_SIZE;
-+	cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
- 	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
- 				     - sizeof(struct pt_regs)
- 				     - sizeof(struct stack_frame));
-@@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu)
- 	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
- 	cpu_lowcore->current_task = (unsigned long) idle;
- 	cpu_lowcore->cpu_data.cpu_nr = cpu;
-+	cpu_lowcore->softirq_pending = 0;
-+	cpu_lowcore->ext_call_fast = 0;
- 	eieio();
- 
- 	while (signal_processor(cpu, sigp_restart) == sigp_busy)
-@@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu)
- 	return 0;
- }
- 
--static unsigned int __initdata additional_cpus;
--static unsigned int __initdata possible_cpus;
--
--void __init smp_setup_cpu_possible_map(void)
-+static int __init setup_possible_cpus(char *s)
- {
--	unsigned int phy_cpus, pos_cpus, cpu;
--
--	phy_cpus = smp_count_cpus();
--	pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
--
--	if (possible_cpus)
--		pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
-+	int pcpus, cpu;
- 
--	for (cpu = 0; cpu < pos_cpus; cpu++)
-+	pcpus = simple_strtoul(s, NULL, 0);
-+	cpu_possible_map = cpumask_of_cpu(0);
-+	for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
- 		cpu_set(cpu, cpu_possible_map);
--
--	phy_cpus = min(phy_cpus, pos_cpus);
--
--	for (cpu = 0; cpu < phy_cpus; cpu++)
--		cpu_set(cpu, cpu_present_map);
--}
--
--#ifdef CONFIG_HOTPLUG_CPU
--
--static int __init setup_additional_cpus(char *s)
--{
--	additional_cpus = simple_strtoul(s, NULL, 0);
--	return 0;
--}
--early_param("additional_cpus", setup_additional_cpus);
--
--static int __init setup_possible_cpus(char *s)
--{
--	possible_cpus = simple_strtoul(s, NULL, 0);
- 	return 0;
- }
- early_param("possible_cpus", setup_possible_cpus);
- 
-+#ifdef CONFIG_HOTPLUG_CPU
++  while ((*yyd++ = *yys++) != '\0')
++    continue;
 +
- int __cpu_disable(void)
- {
- 	struct ec_creg_mask_parms cr_parms;
-@@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu)
- 	/* Wait until target cpu is down */
- 	while (!smp_cpu_not_running(cpu))
- 		cpu_relax();
--	printk("Processor %d spun down\n", cpu);
-+	smp_free_lowcore(cpu);
-+	printk(KERN_INFO "Processor %d spun down\n", cpu);
- }
- 
- void cpu_die(void)
-@@ -625,49 +796,19 @@ void cpu_die(void)
- 
- #endif /* CONFIG_HOTPLUG_CPU */
- 
--/*
-- *	Cycle through the processors and setup structures.
-- */
--
- void __init smp_prepare_cpus(unsigned int max_cpus)
- {
--	unsigned long stack;
- 	unsigned int cpu;
--	int i;
++  return yyd - 1;
++}
++#  endif
++# endif
 +
-+	smp_detect_cpus();
- 
- 	/* request the 0x1201 emergency signal external interrupt */
- 	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
- 		panic("Couldn't request external interrupt 0x1201");
- 	memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
--	/*
--	 *  Initialize prefix pages and stacks for all possible cpus
--	 */
- 	print_cpu_info(&S390_lowcore.cpu_data);
-+	smp_alloc_lowcore(smp_processor_id());
- 
--	for_each_possible_cpu(i) {
--		lowcore_ptr[i] = (struct _lowcore *)
--			__get_free_pages(GFP_KERNEL | GFP_DMA,
--					 sizeof(void*) == 8 ? 1 : 0);
--		stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
--		if (!lowcore_ptr[i] || !stack)
--			panic("smp_boot_cpus failed to allocate memory\n");
--
--		*(lowcore_ptr[i]) = S390_lowcore;
--		lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
--		stack = __get_free_pages(GFP_KERNEL, 0);
--		if (!stack)
--			panic("smp_boot_cpus failed to allocate memory\n");
--		lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
--#ifndef CONFIG_64BIT
--		if (MACHINE_HAS_IEEE) {
--			lowcore_ptr[i]->extended_save_area_addr =
--				(__u32) __get_free_pages(GFP_KERNEL, 0);
--			if (!lowcore_ptr[i]->extended_save_area_addr)
--				panic("smp_boot_cpus failed to "
--				      "allocate memory\n");
--		}
--#endif
--	}
- #ifndef CONFIG_64BIT
- 	if (MACHINE_HAS_IEEE)
- 		ctl_set_bit(14, 29); /* enable extended save area */
-@@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void)
- {
- 	BUG_ON(smp_processor_id() != 0);
- 
-+	current_thread_info()->cpu = 0;
-+	cpu_set(0, cpu_present_map);
- 	cpu_set(0, cpu_online_map);
- 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
- 	current_set[0] = current;
-+	smp_cpu_state[0] = CPU_STATE_CONFIGURED;
- 	spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
- }
- 
- void __init smp_cpus_done(unsigned int max_cpus)
- {
--	cpu_present_map = cpu_possible_map;
- }
- 
- /*
-@@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier)
- 	return 0;
- }
- 
--static DEFINE_PER_CPU(struct cpu, cpu_devices);
-+#ifdef CONFIG_HOTPLUG_CPU
-+static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
++# ifndef yytnamerr
++/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
++   quotes and backslashes, so that it's suitable for yyerror.  The
++   heuristic is that double-quoting is unnecessary unless the string
++   contains an apostrophe, a comma, or backslash (other than
++   backslash-backslash).  YYSTR is taken from yytname.  If YYRES is
++   null, do not copy; instead, return the length of what the result
++   would have been.  */
++static YYSIZE_T
++yytnamerr (char *yyres, const char *yystr)
 +{
-+	ssize_t count;
++  if (*yystr == '"')
++    {
++      YYSIZE_T yyn = 0;
++      char const *yyp = yystr;
 +
-+	mutex_lock(&smp_cpu_state_mutex);
-+	count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
-+	mutex_unlock(&smp_cpu_state_mutex);
-+	return count;
++      for (;;)
++	switch (*++yyp)
++	  {
++	  case '\'':
++	  case ',':
++	    goto do_not_strip_quotes;
++
++	  case '\\':
++	    if (*++yyp != '\\')
++	      goto do_not_strip_quotes;
++	    /* Fall through.  */
++	  default:
++	    if (yyres)
++	      yyres[yyn] = *yyp;
++	    yyn++;
++	    break;
++
++	  case '"':
++	    if (yyres)
++	      yyres[yyn] = '\0';
++	    return yyn;
++	  }
++    do_not_strip_quotes: ;
++    }
++
++  if (! yyres)
++    return yystrlen (yystr);
++
++  return yystpcpy (yyres, yystr) - yyres;
 +}
++# endif
 +
-+static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
-+				   size_t count)
++/* Copy into YYRESULT an error message about the unexpected token
++   YYCHAR while in state YYSTATE.  Return the number of bytes copied,
++   including the terminating null byte.  If YYRESULT is null, do not
++   copy anything; just return the number of bytes that would be
++   copied.  As a special case, return 0 if an ordinary "syntax error"
++   message will do.  Return YYSIZE_MAXIMUM if overflow occurs during
++   size calculation.  */
++static YYSIZE_T
++yysyntax_error (char *yyresult, int yystate, int yychar)
 +{
-+	int cpu = dev->id;
-+	int val, rc;
-+	char delim;
++  int yyn = yypact[yystate];
 +
-+	if (sscanf(buf, "%d %c", &val, &delim) != 1)
-+		return -EINVAL;
-+	if (val != 0 && val != 1)
-+		return -EINVAL;
++  if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
++    return 0;
++  else
++    {
++      int yytype = YYTRANSLATE (yychar);
++      YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
++      YYSIZE_T yysize = yysize0;
++      YYSIZE_T yysize1;
++      int yysize_overflow = 0;
++      enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
++      char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
++      int yyx;
++
++# if 0
++      /* This is so xgettext sees the translatable formats that are
++	 constructed on the fly.  */
++      YY_("syntax error, unexpected %s");
++      YY_("syntax error, unexpected %s, expecting %s");
++      YY_("syntax error, unexpected %s, expecting %s or %s");
++      YY_("syntax error, unexpected %s, expecting %s or %s or %s");
++      YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
++# endif
++      char *yyfmt;
++      char const *yyf;
++      static char const yyunexpected[] = "syntax error, unexpected %s";
++      static char const yyexpecting[] = ", expecting %s";
++      static char const yyor[] = " or %s";
++      char yyformat[sizeof yyunexpected
++		    + sizeof yyexpecting - 1
++		    + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
++		       * (sizeof yyor - 1))];
++      char const *yyprefix = yyexpecting;
++
++      /* Start YYX at -YYN if negative to avoid negative indexes in
++	 YYCHECK.  */
++      int yyxbegin = yyn < 0 ? -yyn : 0;
++
++      /* Stay within bounds of both yycheck and yytname.  */
++      int yychecklim = YYLAST - yyn + 1;
++      int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
++      int yycount = 1;
++
++      yyarg[0] = yytname[yytype];
++      yyfmt = yystpcpy (yyformat, yyunexpected);
++
++      for (yyx = yyxbegin; yyx < yyxend; ++yyx)
++	if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
++	  {
++	    if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
++	      {
++		yycount = 1;
++		yysize = yysize0;
++		yyformat[sizeof yyunexpected - 1] = '\0';
++		break;
++	      }
++	    yyarg[yycount++] = yytname[yyx];
++	    yysize1 = yysize + yytnamerr (0, yytname[yyx]);
++	    yysize_overflow |= (yysize1 < yysize);
++	    yysize = yysize1;
++	    yyfmt = yystpcpy (yyfmt, yyprefix);
++	    yyprefix = yyor;
++	  }
++
++      yyf = YY_(yyformat);
++      yysize1 = yysize + yystrlen (yyf);
++      yysize_overflow |= (yysize1 < yysize);
++      yysize = yysize1;
 +
-+	mutex_lock(&smp_cpu_state_mutex);
-+	get_online_cpus();
-+	rc = -EBUSY;
-+	if (cpu_online(cpu))
-+		goto out;
-+	rc = 0;
-+	switch (val) {
-+	case 0:
-+		if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
-+			rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
-+			if (!rc)
-+				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
++      if (yysize_overflow)
++	return YYSIZE_MAXIMUM;
++
++      if (yyresult)
++	{
++	  /* Avoid sprintf, as that infringes on the user's name space.
++	     Don't have undefined behavior even if the translation
++	     produced a string with the wrong number of "%s"s.  */
++	  char *yyp = yyresult;
++	  int yyi = 0;
++	  while ((*yyp = *yyf) != '\0')
++	    {
++	      if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
++		{
++		  yyp += yytnamerr (yyp, yyarg[yyi++]);
++		  yyf += 2;
 +		}
-+		break;
-+	case 1:
-+		if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
-+			rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
-+			if (!rc)
-+				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
++	      else
++		{
++		  yyp++;
++		  yyf++;
 +		}
-+		break;
-+	default:
-+		break;
++	    }
 +	}
-+out:
-+	put_online_cpus();
-+	mutex_unlock(&smp_cpu_state_mutex);
-+	return rc ? rc : count;
++      return yysize;
++    }
 +}
-+static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
-+#endif /* CONFIG_HOTPLUG_CPU */
++#endif /* YYERROR_VERBOSE */
++
 +
-+static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
++/*-----------------------------------------------.
++| Release the memory associated to this symbol.  |
++`-----------------------------------------------*/
++
++/*ARGSUSED*/
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++static void
++yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp)
++#else
++static void
++yydestruct (yymsg, yytype, yyvaluep, yylocationp)
++    const char *yymsg;
++    int yytype;
++    YYSTYPE *yyvaluep;
++    YYLTYPE *yylocationp;
++#endif
 +{
-+	return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
++  YYUSE (yyvaluep);
++  YYUSE (yylocationp);
++
++  if (!yymsg)
++    yymsg = "Deleting";
++  YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
++
++  switch (yytype)
++    {
++
++      default:
++	break;
++    }
 +}
-+static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
++
 +
++/* Prevent warnings from -Wmissing-prototypes.  */
 +
-+static struct attribute *cpu_common_attrs[] = {
-+#ifdef CONFIG_HOTPLUG_CPU
-+	&attr_configure.attr,
++#ifdef YYPARSE_PARAM
++#if defined __STDC__ || defined __cplusplus
++int yyparse (void *YYPARSE_PARAM);
++#else
++int yyparse ();
 +#endif
-+	&attr_address.attr,
-+	NULL,
-+};
++#else /* ! YYPARSE_PARAM */
++#if defined __STDC__ || defined __cplusplus
++int yyparse (void);
++#else
++int yyparse ();
++#endif
++#endif /* ! YYPARSE_PARAM */
 +
-+static struct attribute_group cpu_common_attr_group = {
-+	.attrs = cpu_common_attrs,
-+};
- 
- static ssize_t show_capability(struct sys_device *dev, char *buf)
- {
-@@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
- }
- static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
- 
--static struct attribute *cpu_attrs[] = {
-+static struct attribute *cpu_online_attrs[] = {
- 	&attr_capability.attr,
- 	&attr_idle_count.attr,
- 	&attr_idle_time_us.attr,
- 	NULL,
- };
- 
--static struct attribute_group cpu_attr_group = {
--	.attrs = cpu_attrs,
-+static struct attribute_group cpu_online_attr_group = {
-+	.attrs = cpu_online_attrs,
- };
- 
- static int __cpuinit smp_cpu_notify(struct notifier_block *self,
-@@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
- 		idle->idle_time = 0;
- 		idle->idle_count = 0;
- 		spin_unlock_irq(&idle->lock);
--		if (sysfs_create_group(&s->kobj, &cpu_attr_group))
-+		if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
- 			return NOTIFY_BAD;
- 		break;
- 	case CPU_DEAD:
- 	case CPU_DEAD_FROZEN:
--		sysfs_remove_group(&s->kobj, &cpu_attr_group);
-+		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
- 		break;
- 	}
- 	return NOTIFY_OK;
-@@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
- 	.notifier_call = smp_cpu_notify,
- };
- 
-+static int smp_add_present_cpu(int cpu)
-+{
-+	struct cpu *c = &per_cpu(cpu_devices, cpu);
-+	struct sys_device *s = &c->sysdev;
-+	int rc;
 +
-+	c->hotpluggable = 1;
-+	rc = register_cpu(c, cpu);
-+	if (rc)
-+		goto out;
-+	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
-+	if (rc)
-+		goto out_cpu;
-+	if (!cpu_online(cpu))
-+		goto out;
-+	rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
-+	if (!rc)
-+		return 0;
-+	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
-+out_cpu:
-+#ifdef CONFIG_HOTPLUG_CPU
-+	unregister_cpu(c);
-+#endif
-+out:
-+	return rc;
-+}
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+static ssize_t rescan_store(struct sys_device *dev, const char *buf,
-+			    size_t count)
-+{
-+	cpumask_t newcpus;
-+	int cpu;
-+	int rc;
++/* The look-ahead symbol.  */
++int yychar;
 +
-+	mutex_lock(&smp_cpu_state_mutex);
-+	get_online_cpus();
-+	newcpus = cpu_present_map;
-+	rc = smp_rescan_cpus();
-+	if (rc)
-+		goto out;
-+	cpus_andnot(newcpus, cpu_present_map, newcpus);
-+	for_each_cpu_mask(cpu, newcpus) {
-+		rc = smp_add_present_cpu(cpu);
-+		if (rc)
-+			cpu_clear(cpu, cpu_present_map);
-+	}
-+	rc = 0;
-+out:
-+	put_online_cpus();
-+	mutex_unlock(&smp_cpu_state_mutex);
-+	return rc ? rc : count;
-+}
-+static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
-+#endif /* CONFIG_HOTPLUG_CPU */
++/* The semantic value of the look-ahead symbol.  */
++YYSTYPE yylval;
 +
- static int __init topology_init(void)
- {
- 	int cpu;
-@@ -800,16 +1071,14 @@ static int __init topology_init(void)
- 
- 	register_cpu_notifier(&smp_cpu_nb);
- 
--	for_each_possible_cpu(cpu) {
--		struct cpu *c = &per_cpu(cpu_devices, cpu);
--		struct sys_device *s = &c->sysdev;
--
--		c->hotpluggable = 1;
--		register_cpu(c, cpu);
--		if (!cpu_online(cpu))
--			continue;
--		s = &c->sysdev;
--		rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
-+#ifdef CONFIG_HOTPLUG_CPU
-+	rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
-+			       &attr_rescan.attr);
-+	if (rc)
-+		return rc;
++/* Number of syntax errors so far.  */
++int yynerrs;
++/* Location data for the look-ahead symbol.  */
++YYLTYPE yylloc;
++
++
++
++/*----------.
++| yyparse.  |
++`----------*/
++
++#ifdef YYPARSE_PARAM
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++int
++yyparse (void *YYPARSE_PARAM)
++#else
++int
++yyparse (YYPARSE_PARAM)
++    void *YYPARSE_PARAM;
 +#endif
-+	for_each_present_cpu(cpu) {
-+		rc = smp_add_present_cpu(cpu);
- 		if (rc)
- 			return rc;
- 	}
-diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
-index 22b800c..3bbac12 100644
---- a/arch/s390/kernel/time.c
-+++ b/arch/s390/kernel/time.c
-@@ -1145,7 +1145,7 @@ static void etr_work_fn(struct work_struct *work)
-  * Sysfs interface functions
-  */
- static struct sysdev_class etr_sysclass = {
--	set_kset_name("etr")
-+	.name	= "etr",
- };
- 
- static struct sys_device etr_port0_dev = {
-diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
-index 8ed16a8..52b8342 100644
---- a/arch/s390/kernel/traps.c
-+++ b/arch/s390/kernel/traps.c
-@@ -31,6 +31,7 @@
- #include <linux/reboot.h>
- #include <linux/kprobes.h>
- #include <linux/bug.h>
-+#include <linux/utsname.h>
- #include <asm/system.h>
- #include <asm/uaccess.h>
- #include <asm/io.h>
-@@ -168,9 +169,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
-  */
- void dump_stack(void)
- {
-+	printk("CPU: %d %s %s %.*s\n",
-+	       task_thread_info(current)->cpu, print_tainted(),
-+	       init_utsname()->release,
-+	       (int)strcspn(init_utsname()->version, " "),
-+	       init_utsname()->version);
-+	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
-+	       current->comm, current->pid, current,
-+	       (void *) current->thread.ksp);
- 	show_stack(NULL, NULL);
- }
--
- EXPORT_SYMBOL(dump_stack);
- 
- static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
-@@ -258,8 +266,14 @@ void die(const char * str, struct pt_regs * regs, long err)
- 	console_verbose();
- 	spin_lock_irq(&die_lock);
- 	bust_spinlocks(1);
--	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
--	print_modules();
-+	printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
-+#ifdef CONFIG_PREEMPT
-+	printk("PREEMPT ");
++#else /* ! YYPARSE_PARAM */
++#if (defined __STDC__ || defined __C99__FUNC__ \
++     || defined __cplusplus || defined _MSC_VER)
++int
++yyparse (void)
++#else
++int
++yyparse ()
++
 +#endif
-+#ifdef CONFIG_SMP
-+	printk("SMP");
 +#endif
-+	printk("\n");
- 	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
- 	show_regs(regs);
- 	bust_spinlocks(0);
-diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
-index 849120e..7d43c3c 100644
---- a/arch/s390/kernel/vmlinux.lds.S
-+++ b/arch/s390/kernel/vmlinux.lds.S
-@@ -17,6 +17,12 @@ ENTRY(_start)
- jiffies = jiffies_64;
- #endif
- 
-+PHDRS {
-+	text PT_LOAD FLAGS(5);	/* R_E */
-+	data PT_LOAD FLAGS(7);	/* RWE */
-+	note PT_NOTE FLAGS(0);	/* ___ */
-+}
++{
++  
++  int yystate;
++  int yyn;
++  int yyresult;
++  /* Number of tokens to shift before error messages enabled.  */
++  int yyerrstatus;
++  /* Look-ahead token as an internal (translated) token number.  */
++  int yytoken = 0;
++#if YYERROR_VERBOSE
++  /* Buffer for error messages, and its allocated size.  */
++  char yymsgbuf[128];
++  char *yymsg = yymsgbuf;
++  YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
++#endif
++
++  /* Three stacks and their tools:
++     `yyss': related to states,
++     `yyvs': related to semantic values,
++     `yyls': related to locations.
++
++     Refer to the stacks thru separate pointers, to allow yyoverflow
++     to reallocate them elsewhere.  */
++
++  /* The state stack.  */
++  yytype_int16 yyssa[YYINITDEPTH];
++  yytype_int16 *yyss = yyssa;
++  yytype_int16 *yyssp;
++
++  /* The semantic value stack.  */
++  YYSTYPE yyvsa[YYINITDEPTH];
++  YYSTYPE *yyvs = yyvsa;
++  YYSTYPE *yyvsp;
++
++  /* The location stack.  */
++  YYLTYPE yylsa[YYINITDEPTH];
++  YYLTYPE *yyls = yylsa;
++  YYLTYPE *yylsp;
++  /* The locations where the error started and ended.  */
++  YYLTYPE yyerror_range[2];
++
++#define YYPOPSTACK(N)   (yyvsp -= (N), yyssp -= (N), yylsp -= (N))
++
++  YYSIZE_T yystacksize = YYINITDEPTH;
++
++  /* The variables used to return semantic value and location from the
++     action routines.  */
++  YYSTYPE yyval;
++  YYLTYPE yyloc;
++
++  /* The number of symbols on the RHS of the reduced rule.
++     Keep to zero when no symbol should be popped.  */
++  int yylen = 0;
++
++  YYDPRINTF ((stderr, "Starting parse\n"));
++
++  yystate = 0;
++  yyerrstatus = 0;
++  yynerrs = 0;
++  yychar = YYEMPTY;		/* Cause a token to be read.  */
++
++  /* Initialize stack pointers.
++     Waste one element of value and location stack
++     so that they stay on the same level as the state stack.
++     The wasted elements are never initialized.  */
++
++  yyssp = yyss;
++  yyvsp = yyvs;
++  yylsp = yyls;
++#if YYLTYPE_IS_TRIVIAL
++  /* Initialize the default location before parsing starts.  */
++  yylloc.first_line   = yylloc.last_line   = 1;
++  yylloc.first_column = yylloc.last_column = 0;
++#endif
++
++  goto yysetstate;
++
++/*------------------------------------------------------------.
++| yynewstate -- Push a new state, which is found in yystate.  |
++`------------------------------------------------------------*/
++ yynewstate:
++  /* In all cases, when you get here, the value and location stacks
++     have just been pushed.  So pushing a state here evens the stacks.  */
++  yyssp++;
 +
- SECTIONS
- {
- 	. = 0x00000000;
-@@ -33,6 +39,9 @@ SECTIONS
- 
- 	_etext = .;		/* End of text section */
- 
-+	NOTES :text :note
-+	BUG_TABLE :text
++ yysetstate:
++  *yyssp = yystate;
 +
- 	RODATA
- 
- #ifdef CONFIG_SHARED_KERNEL
-@@ -49,9 +58,6 @@ SECTIONS
- 		__stop___ex_table = .;
- 	}
- 
--	NOTES
--	BUG_TABLE
--
- 	.data : {		/* Data */
- 		DATA_DATA
- 		CONSTRUCTORS
-@@ -91,7 +97,7 @@ SECTIONS
- 	__init_begin = .;
- 	.init.text : {
- 		_sinittext = .;
--		*(.init.text)
-+		INIT_TEXT
- 		_einittext = .;
- 	}
- 	/*
-@@ -99,11 +105,11 @@ SECTIONS
- 	 * to deal with references from __bug_table
- 	*/
- 	.exit.text : {
--		*(.exit.text)
-+		EXIT_TEXT
- 	}
- 
- 	.init.data : {
--		*(.init.data)
-+		INIT_DATA
- 	}
- 	. = ALIGN(0x100);
- 	.init.setup : {
-@@ -150,7 +156,7 @@ SECTIONS
- 
- 	/* Sections to be discarded */
- 	/DISCARD/ : {
--		*(.exit.data)
-+		EXIT_DATA
- 		*(.exitcall.exit)
- 	}
- 
-diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
-index 8d76403..e41f400 100644
---- a/arch/s390/lib/spinlock.c
-+++ b/arch/s390/lib/spinlock.c
-@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
- 		_raw_yield();
- }
- 
--void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
-+void _raw_spin_lock_wait(raw_spinlock_t *lp)
- {
- 	int count = spin_retry;
- 	unsigned int cpu = ~smp_processor_id();
-@@ -53,15 +53,36 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
- 		}
- 		if (__raw_spin_is_locked(lp))
- 			continue;
--		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
--			lp->owner_pc = pc;
-+		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
- 			return;
--		}
- 	}
- }
- EXPORT_SYMBOL(_raw_spin_lock_wait);
- 
--int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
-+void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
-+{
-+	int count = spin_retry;
-+	unsigned int cpu = ~smp_processor_id();
++  if (yyss + yystacksize - 1 <= yyssp)
++    {
++      /* Get the current used size of the three stacks, in elements.  */
++      YYSIZE_T yysize = yyssp - yyss + 1;
 +
-+	local_irq_restore(flags);
-+	while (1) {
-+		if (count-- <= 0) {
-+			unsigned int owner = lp->owner_cpu;
-+			if (owner != 0)
-+				_raw_yield_cpu(~owner);
-+			count = spin_retry;
-+		}
-+		if (__raw_spin_is_locked(lp))
-+			continue;
-+		local_irq_disable();
-+		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
-+			return;
-+		local_irq_restore(flags);
-+	}
-+}
-+EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
++#ifdef yyoverflow
++      {
++	/* Give user a chance to reallocate the stack.  Use copies of
++	   these so that the &'s don't force the real ones into
++	   memory.  */
++	YYSTYPE *yyvs1 = yyvs;
++	yytype_int16 *yyss1 = yyss;
++	YYLTYPE *yyls1 = yyls;
++
++	/* Each stack pointer address is followed by the size of the
++	   data in use in that stack, in bytes.  This used to be a
++	   conditional around just the two extra args, but that might
++	   be undefined if yyoverflow is a macro.  */
++	yyoverflow (YY_("memory exhausted"),
++		    &yyss1, yysize * sizeof (*yyssp),
++		    &yyvs1, yysize * sizeof (*yyvsp),
++		    &yyls1, yysize * sizeof (*yylsp),
++		    &yystacksize);
++	yyls = yyls1;
++	yyss = yyss1;
++	yyvs = yyvs1;
++      }
++#else /* no yyoverflow */
++# ifndef YYSTACK_RELOCATE
++      goto yyexhaustedlab;
++# else
++      /* Extend the stack our own way.  */
++      if (YYMAXDEPTH <= yystacksize)
++	goto yyexhaustedlab;
++      yystacksize *= 2;
++      if (YYMAXDEPTH < yystacksize)
++	yystacksize = YYMAXDEPTH;
++
++      {
++	yytype_int16 *yyss1 = yyss;
++	union yyalloc *yyptr =
++	  (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
++	if (! yyptr)
++	  goto yyexhaustedlab;
++	YYSTACK_RELOCATE (yyss);
++	YYSTACK_RELOCATE (yyvs);
++	YYSTACK_RELOCATE (yyls);
++#  undef YYSTACK_RELOCATE
++	if (yyss1 != yyssa)
++	  YYSTACK_FREE (yyss1);
++      }
++# endif
++#endif /* no yyoverflow */
 +
-+int _raw_spin_trylock_retry(raw_spinlock_t *lp)
- {
- 	unsigned int cpu = ~smp_processor_id();
- 	int count;
-@@ -69,10 +90,8 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
- 	for (count = spin_retry; count > 0; count--) {
- 		if (__raw_spin_is_locked(lp))
- 			continue;
--		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
--			lp->owner_pc = pc;
-+		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
- 			return 1;
--		}
- 	}
- 	return 0;
- }
-diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
-index 394980b..880b0eb 100644
---- a/arch/s390/mm/extmem.c
-+++ b/arch/s390/mm/extmem.c
-@@ -83,7 +83,7 @@ struct dcss_segment {
- };
- 
- static DEFINE_MUTEX(dcss_lock);
--static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
-+static LIST_HEAD(dcss_list);
- static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
- 					"EW/EN-MIXED" };
- 
-diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
-index fb9c5a8..79d13a1 100644
---- a/arch/s390/mm/vmem.c
-+++ b/arch/s390/mm/vmem.c
-@@ -15,10 +15,6 @@
- #include <asm/setup.h>
- #include <asm/tlbflush.h>
- 
--unsigned long vmalloc_end;
--EXPORT_SYMBOL(vmalloc_end);
--
--static struct page *vmem_map;
- static DEFINE_MUTEX(vmem_mutex);
- 
- struct memory_segment {
-@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
- 	pte_t  pte;
- 	int ret = -ENOMEM;
- 
--	map_start = vmem_map + PFN_DOWN(start);
--	map_end	= vmem_map + PFN_DOWN(start + size);
-+	map_start = VMEM_MAP + PFN_DOWN(start);
-+	map_end	= VMEM_MAP + PFN_DOWN(start + size);
- 
- 	start_addr = (unsigned long) map_start & PAGE_MASK;
- 	end_addr = PFN_ALIGN((unsigned long) map_end);
-@@ -240,10 +236,10 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
- {
- 	int ret;
- 
--	ret = vmem_add_range(start, size);
-+	ret = vmem_add_mem_map(start, size);
- 	if (ret)
- 		return ret;
--	return vmem_add_mem_map(start, size);
-+	return vmem_add_range(start, size);
- }
- 
- /*
-@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
- {
- 	struct memory_segment *tmp;
- 
--	if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
-+	if (seg->start + seg->size >= VMALLOC_START ||
- 	    seg->start + seg->size < seg->start)
- 		return -ERANGE;
- 
-@@ -357,17 +353,15 @@ out:
- 
- /*
-  * map whole physical memory to virtual memory (identity mapping)
-+ * we reserve enough space in the vmalloc area for vmemmap to hotplug
-+ * additional memory segments.
-  */
- void __init vmem_map_init(void)
- {
--	unsigned long map_size;
- 	int i;
- 
--	map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
--	vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
--	vmem_map = (struct page *) vmalloc_end;
--	NODE_DATA(0)->node_mem_map = vmem_map;
--
-+	BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
-+	NODE_DATA(0)->node_mem_map = VMEM_MAP;
- 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
- 		vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
- }
-@@ -382,7 +376,7 @@ static int __init vmem_convert_memory_chunk(void)
- 	int i;
- 
- 	mutex_lock(&vmem_mutex);
--	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
-+	for (i = 0; i < MEMORY_CHUNKS; i++) {
- 		if (!memory_chunk[i].size)
- 			continue;
- 		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
-diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
-index 496d635..1cd9c8f 100644
---- a/arch/sh/Kconfig
-+++ b/arch/sh/Kconfig
-@@ -6,8 +6,7 @@
- mainmenu "Linux/SuperH Kernel Configuration"
- 
- config SUPERH
--	bool
--	default y
-+	def_bool y
- 	select EMBEDDED
- 	help
- 	  The SuperH is a RISC processor targeted for use in embedded systems
-@@ -15,36 +14,36 @@ config SUPERH
- 	  gaming console.  The SuperH port has a home page at
- 	  <http://www.linux-sh.org/>.
- 
-+config SUPERH32
-+	def_bool !SUPERH64
++      yyssp = yyss + yysize - 1;
++      yyvsp = yyvs + yysize - 1;
++      yylsp = yyls + yysize - 1;
 +
-+config SUPERH64
-+	def_bool y if CPU_SH5
++      YYDPRINTF ((stderr, "Stack size increased to %lu\n",
++		  (unsigned long int) yystacksize));
 +
- config RWSEM_GENERIC_SPINLOCK
--	bool
--	default y
-+	def_bool y
- 
- config RWSEM_XCHGADD_ALGORITHM
- 	bool
- 
- config GENERIC_BUG
- 	def_bool y
--	depends on BUG
-+	depends on BUG && SUPERH32
- 
- config GENERIC_FIND_NEXT_BIT
--	bool
--	default y
-+	def_bool y
- 
- config GENERIC_HWEIGHT
--	bool
--	default y
-+	def_bool y
- 
- config GENERIC_HARDIRQS
--	bool
--	default y
-+	def_bool y
- 
- config GENERIC_IRQ_PROBE
--	bool
--	default y
-+	def_bool y
- 
- config GENERIC_CALIBRATE_DELAY
--	bool
--	default y
-+	def_bool y
- 
- config GENERIC_IOMAP
- 	bool
-@@ -75,20 +74,16 @@ config ARCH_MAY_HAVE_PC_FDC
- 	bool
- 
- config STACKTRACE_SUPPORT
--	bool
--	default y
-+	def_bool y
- 
- config LOCKDEP_SUPPORT
--	bool
--	default y
-+	def_bool y
- 
- config ARCH_HAS_ILOG2_U32
--	bool
--	default n
-+	def_bool n
- 
- config ARCH_HAS_ILOG2_U64
--	bool
--	default n
-+	def_bool n
- 
- config ARCH_NO_VIRT_TO_BUS
- 	def_bool y
-@@ -97,110 +92,234 @@ source "init/Kconfig"
- 
- menu "System type"
- 
--source "arch/sh/mm/Kconfig"
-+#
-+# Processor families
-+#
-+config CPU_SH2
-+	bool
- 
--menu "Processor features"
-+config CPU_SH2A
-+	bool
-+	select CPU_SH2
++      if (yyss + yystacksize - 1 <= yyssp)
++	YYABORT;
++    }
 +
-+config CPU_SH3
-+	bool
-+	select CPU_HAS_INTEVT
-+	select CPU_HAS_SR_RB
++  YYDPRINTF ((stderr, "Entering state %d\n", yystate));
 +
-+config CPU_SH4
-+	bool
-+	select CPU_HAS_INTEVT
-+	select CPU_HAS_SR_RB
-+	select CPU_HAS_PTEA if !CPU_SH4A || CPU_SHX2
-+	select CPU_HAS_FPU if !CPU_SH4AL_DSP
++  goto yybackup;
 +
-+config CPU_SH4A
-+	bool
-+	select CPU_SH4
++/*-----------.
++| yybackup.  |
++`-----------*/
++yybackup:
 +
-+config CPU_SH4AL_DSP
-+	bool
-+	select CPU_SH4A
-+	select CPU_HAS_DSP
++  /* Do appropriate processing given the current state.  Read a
++     look-ahead token if we need one and don't already have one.  */
 +
-+config CPU_SH5
-+	bool
-+	select CPU_HAS_FPU
++  /* First try to decide what to do without reference to look-ahead token.  */
++  yyn = yypact[yystate];
++  if (yyn == YYPACT_NINF)
++    goto yydefault;
 +
-+config CPU_SHX2
-+	bool
++  /* Not known => get a look-ahead token if don't already have one.  */
 +
-+config CPU_SHX3
-+	bool
- 
- choice
--	prompt "Endianess selection" 
--	default CPU_LITTLE_ENDIAN
--	help
--	  Some SuperH machines can be configured for either little or big
--	  endian byte order. These modes require different kernels.
-+	prompt "Processor sub-type selection"
- 
--config CPU_LITTLE_ENDIAN
--	bool "Little Endian"
-+#
-+# Processor subtypes
-+#
- 
--config CPU_BIG_ENDIAN
--	bool "Big Endian"
-+# SH-2 Processor Support
- 
--endchoice
-+config CPU_SUBTYPE_SH7619
-+	bool "Support SH7619 processor"
-+	select CPU_SH2
++  /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol.  */
++  if (yychar == YYEMPTY)
++    {
++      YYDPRINTF ((stderr, "Reading a token: "));
++      yychar = YYLEX;
++    }
 +
-+# SH-2A Processor Support
++  if (yychar <= YYEOF)
++    {
++      yychar = yytoken = YYEOF;
++      YYDPRINTF ((stderr, "Now at end of input.\n"));
++    }
++  else
++    {
++      yytoken = YYTRANSLATE (yychar);
++      YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
++    }
 +
-+config CPU_SUBTYPE_SH7203
-+	bool "Support SH7203 processor"
-+	select CPU_SH2A
-+	select CPU_HAS_FPU
++  /* If the proper action on seeing token YYTOKEN is to reduce or to
++     detect an error, take that action.  */
++  yyn += yytoken;
++  if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
++    goto yydefault;
++  yyn = yytable[yyn];
++  if (yyn <= 0)
++    {
++      if (yyn == 0 || yyn == YYTABLE_NINF)
++	goto yyerrlab;
++      yyn = -yyn;
++      goto yyreduce;
++    }
 +
-+config CPU_SUBTYPE_SH7206
-+	bool "Support SH7206 processor"
-+	select CPU_SH2A
- 
--config SH_FPU
--	bool "FPU support"
--	depends on CPU_HAS_FPU
--	default y
-+config CPU_SUBTYPE_SH7263
-+	bool "Support SH7263 processor"
-+	select CPU_SH2A
-+	select CPU_HAS_FPU
++  if (yyn == YYFINAL)
++    YYACCEPT;
 +
-+# SH-3 Processor Support
++  /* Count tokens shifted since error; after three, turn off error
++     status.  */
++  if (yyerrstatus)
++    yyerrstatus--;
++
++  /* Shift the look-ahead token.  */
++  YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
++
++  /* Discard the shifted token unless it is eof.  */
++  if (yychar != YYEOF)
++    yychar = YYEMPTY;
++
++  yystate = yyn;
++  *++yyvsp = yylval;
++  *++yylsp = yylloc;
++  goto yynewstate;
++
++
++/*-----------------------------------------------------------.
++| yydefault -- do the default action for the current state.  |
++`-----------------------------------------------------------*/
++yydefault:
++  yyn = yydefact[yystate];
++  if (yyn == 0)
++    goto yyerrlab;
++  goto yyreduce;
++
++
++/*-----------------------------.
++| yyreduce -- Do a reduction.  |
++`-----------------------------*/
++yyreduce:
++  /* yyn is the number of a rule to reduce with.  */
++  yylen = yyr2[yyn];
++
++  /* If YYLEN is nonzero, implement the default value of the action:
++     `$$ = $1'.
++
++     Otherwise, the following line sets YYVAL to garbage.
++     This behavior is undocumented and Bison
++     users should not rely upon it.  Assigning to YYVAL
++     unconditionally makes the parser a bit smaller, and it avoids a
++     GCC warning that YYVAL may be used uninitialized.  */
++  yyval = yyvsp[1-yylen];
++
++  /* Default location.  */
++  YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen);
++  YY_REDUCE_PRINT (yyn);
++  switch (yyn)
++    {
++        case 2:
++#line 86 "dtc-parser.y"
++    {
++			the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node));
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH7705
-+	bool "Support SH7705 processor"
-+	select CPU_SH3
++  case 3:
++#line 90 "dtc-parser.y"
++    {
++			the_boot_info = build_boot_info((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].node));
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH7706
-+	bool "Support SH7706 processor"
-+	select CPU_SH3
- 	help
--	  Selecting this option will enable support for SH processors that
--	  have FPU units (ie, SH77xx).
-+	  Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU.
- 
--	  This option must be set in order to enable the FPU.
-+config CPU_SUBTYPE_SH7707
-+	bool "Support SH7707 processor"
-+	select CPU_SH3
-+	help
-+	  Select SH7707 if you have a  60 Mhz SH-3 HD6417707 CPU.
- 
--config SH_FPU_EMU
--	bool "FPU emulation support"
--	depends on !SH_FPU && EXPERIMENTAL
--	default n
-+config CPU_SUBTYPE_SH7708
-+	bool "Support SH7708 processor"
-+	select CPU_SH3
- 	help
--	  Selecting this option will enable support for software FPU emulation.
--	  Most SH-3 users will want to say Y here, whereas most SH-4 users will
--	  want to say N.
-+	  Select SH7708 if you have a  60 Mhz SH-3 HD6417708S or
-+	  if you have a 100 Mhz SH-3 HD6417708R CPU.
- 
--config SH_DSP
--	bool "DSP support"
--	depends on CPU_HAS_DSP
--	default y
-+config CPU_SUBTYPE_SH7709
-+	bool "Support SH7709 processor"
-+	select CPU_SH3
- 	help
--	  Selecting this option will enable support for SH processors that
--	  have DSP units (ie, SH2-DSP, SH3-DSP, and SH4AL-DSP).
-+	  Select SH7709 if you have a  80 Mhz SH-3 HD6417709 CPU.
- 
--	  This option must be set in order to enable the DSP.
-+config CPU_SUBTYPE_SH7710
-+	bool "Support SH7710 processor"
-+	select CPU_SH3
-+	select CPU_HAS_DSP
-+	help
-+	  Select SH7710 if you have a SH3-DSP SH7710 CPU.
- 
--config SH_ADC
--	bool "ADC support"
--	depends on CPU_SH3
--	default y
-+config CPU_SUBTYPE_SH7712
-+	bool "Support SH7712 processor"
-+	select CPU_SH3
-+	select CPU_HAS_DSP
- 	help
--	  Selecting this option will allow the Linux kernel to use SH3 on-chip
--	  ADC module.
-+	  Select SH7712 if you have a SH3-DSP SH7712 CPU.
- 
--	  If unsure, say N.
-+config CPU_SUBTYPE_SH7720
-+	bool "Support SH7720 processor"
-+	select CPU_SH3
-+	select CPU_HAS_DSP
-+	help
-+	  Select SH7720 if you have a SH3-DSP SH7720 CPU.
- 
--config SH_STORE_QUEUES
--	bool "Support for Store Queues"
--	depends on CPU_SH4
-+config CPU_SUBTYPE_SH7721
-+	bool "Support SH7721 processor"
-+	select CPU_SH3
-+	select CPU_HAS_DSP
- 	help
--	  Selecting this option will enable an in-kernel API for manipulating
--	  the store queues integrated in the SH-4 processors.
-+	  Select SH7721 if you have a SH3-DSP SH7721 CPU.
- 
--config SPECULATIVE_EXECUTION
--	bool "Speculative subroutine return"
--	depends on CPU_SUBTYPE_SH7780 && EXPERIMENTAL
-+# SH-4 Processor Support
++  case 4:
++#line 97 "dtc-parser.y"
++    {
++			(yyval.re) = NULL;
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH7750
-+	bool "Support SH7750 processor"
-+	select CPU_SH4
- 	help
--	  This enables support for a speculative instruction fetch for
--	  subroutine return. There are various pitfalls associated with
--	  this, as outlined in the SH7780 hardware manual.
-+	  Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
- 
--	  If unsure, say N.
-+config CPU_SUBTYPE_SH7091
-+	bool "Support SH7091 processor"
-+	select CPU_SH4
-+	help
-+	  Select SH7091 if you have an SH-4 based Sega device (such as
-+	  the Dreamcast, Naomi, and Naomi 2).
- 
--config CPU_HAS_INTEVT
--	bool
-+config CPU_SUBTYPE_SH7750R
-+	bool "Support SH7750R processor"
-+	select CPU_SH4
- 
--config CPU_HAS_MASKREG_IRQ
--	bool
-+config CPU_SUBTYPE_SH7750S
-+	bool "Support SH7750S processor"
-+	select CPU_SH4
- 
--config CPU_HAS_IPR_IRQ
--	bool
-+config CPU_SUBTYPE_SH7751
-+	bool "Support SH7751 processor"
-+	select CPU_SH4
-+	help
-+	  Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
-+	  or if you have a HD6417751R CPU.
- 
--config CPU_HAS_SR_RB
--	bool
-+config CPU_SUBTYPE_SH7751R
-+	bool "Support SH7751R processor"
-+	select CPU_SH4
++  case 5:
++#line 101 "dtc-parser.y"
++    {
++			(yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH7760
-+	bool "Support SH7760 processor"
-+	select CPU_SH4
++  case 6:
++#line 108 "dtc-parser.y"
++    {
++			(yyval.re) = build_reserve_entry((yyvsp[(3) - (5)].addr), (yyvsp[(4) - (5)].addr), (yyvsp[(1) - (5)].labelref));
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH4_202
-+	bool "Support SH4-202 processor"
-+	select CPU_SH4
++  case 7:
++#line 115 "dtc-parser.y"
++    {
++			(yyval.re) = NULL;
++		;}
++    break;
 +
-+# SH-4A Processor Support
++  case 8:
++#line 119 "dtc-parser.y"
++    {
++			(yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH7763
-+	bool "Support SH7763 processor"
-+	select CPU_SH4A
- 	help
--	  This will enable the use of SR.RB register bank usage. Processors
--	  that are lacking this bit must have another method in place for
--	  accomplishing what is taken care of by the banked registers.
-+	  Select SH7763 if you have a SH4A SH7763(R5S77631) CPU.
- 
--	  See <file:Documentation/sh/register-banks.txt> for further
--	  information on SR.RB and register banking in the kernel in general.
-+config CPU_SUBTYPE_SH7770
-+	bool "Support SH7770 processor"
-+	select CPU_SH4A
- 
--config CPU_HAS_PTEA
--	bool
-+config CPU_SUBTYPE_SH7780
-+	bool "Support SH7780 processor"
-+	select CPU_SH4A
- 
--config CPU_HAS_DSP
--	bool
-+config CPU_SUBTYPE_SH7785
-+	bool "Support SH7785 processor"
-+	select CPU_SH4A
-+	select CPU_SHX2
-+	select ARCH_SPARSEMEM_ENABLE
-+	select SYS_SUPPORTS_NUMA
- 
--config CPU_HAS_FPU
--	bool
-+config CPU_SUBTYPE_SHX3
-+	bool "Support SH-X3 processor"
-+	select CPU_SH4A
-+	select CPU_SHX3
-+	select ARCH_SPARSEMEM_ENABLE
-+	select SYS_SUPPORTS_NUMA
-+	select SYS_SUPPORTS_SMP
- 
--endmenu
-+# SH4AL-DSP Processor Support
++  case 9:
++#line 126 "dtc-parser.y"
++    {
++			(yyval.re) = (yyvsp[(1) - (1)].re);
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH7343
-+	bool "Support SH7343 processor"
-+	select CPU_SH4AL_DSP
++  case 10:
++#line 130 "dtc-parser.y"
++    {
++			(yyval.re) = build_reserve_entry((yyvsp[(3) - (6)].addr), (yyvsp[(5) - (6)].addr) - (yyvsp[(3) - (6)].addr) + 1, (yyvsp[(1) - (6)].labelref));
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH7722
-+	bool "Support SH7722 processor"
-+	select CPU_SH4AL_DSP
-+	select CPU_SHX2
-+	select ARCH_SPARSEMEM_ENABLE
-+	select SYS_SUPPORTS_NUMA
++  case 11:
++#line 137 "dtc-parser.y"
++    {
++			(yyval.addr) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64);
++		;}
++    break;
 +
-+# SH-5 Processor Support
++  case 12:
++#line 141 "dtc-parser.y"
++    {
++			(yyval.addr) = eval_literal((yyvsp[(1) - (1)].literal), 16, 64);
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH5_101
-+	bool "Support SH5-101 processor"
-+	select CPU_SH5
++  case 13:
++#line 148 "dtc-parser.y"
++    {
++			(yyval.node) = name_node((yyvsp[(2) - (2)].node), "", NULL);
++		;}
++    break;
 +
-+config CPU_SUBTYPE_SH5_103
-+	bool "Support SH5-103 processor"
++  case 14:
++#line 155 "dtc-parser.y"
++    {
++			(yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist));
++		;}
++    break;
 +
-+endchoice
++  case 15:
++#line 162 "dtc-parser.y"
++    {
++			(yyval.proplist) = NULL;
++		;}
++    break;
 +
-+source "arch/sh/mm/Kconfig"
-+source "arch/sh/Kconfig.cpu"
- 
- menu "Board support"
- 
-@@ -321,13 +440,6 @@ config SH_SECUREEDGE5410
- 	  This includes both the OEM SecureEdge products as well as the
- 	  SME product line.
- 
--config SH_HS7751RVOIP
--	bool "HS7751RVOIP"
--	depends on CPU_SUBTYPE_SH7751R
--	help
--	  Select HS7751RVOIP if configuring for a Renesas Technology
--	  Sales VoIP board.
--
- config SH_7710VOIPGW
- 	bool "SH7710-VOIP-GW"
- 	depends on CPU_SUBTYPE_SH7710
-@@ -343,6 +455,14 @@ config SH_RTS7751R2D
- 	  Select RTS7751R2D if configuring for a Renesas Technology
- 	  Sales SH-Graphics board.
- 
-+config SH_SDK7780
-+	bool "SDK7780R3"
-+	depends on CPU_SUBTYPE_SH7780
-+	select SYS_SUPPORTS_PCI
-+	help
-+	  Select SDK7780 if configuring for a Renesas SH7780 SDK7780R3
-+	  evaluation board.
++  case 16:
++#line 166 "dtc-parser.y"
++    {
++			(yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist));
++		;}
++    break;
 +
- config SH_HIGHLANDER
- 	bool "Highlander"
- 	depends on CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
-@@ -399,41 +519,47 @@ config SH_MAGIC_PANEL_R2
- 	help
- 	  Select Magic Panel R2 if configuring for Magic Panel R2.
- 
-+config SH_CAYMAN
-+	bool "Hitachi Cayman"
-+	depends on CPU_SUBTYPE_SH5_101 || CPU_SUBTYPE_SH5_103
-+	select SYS_SUPPORTS_PCI
++  case 17:
++#line 173 "dtc-parser.y"
++    {
++			(yyval.prop) = build_property((yyvsp[(2) - (5)].propnodename), (yyvsp[(4) - (5)].data), (yyvsp[(1) - (5)].labelref));
++		;}
++    break;
 +
- endmenu
- 
--source "arch/sh/boards/renesas/hs7751rvoip/Kconfig"
- source "arch/sh/boards/renesas/rts7751r2d/Kconfig"
- source "arch/sh/boards/renesas/r7780rp/Kconfig"
-+source "arch/sh/boards/renesas/sdk7780/Kconfig"
- source "arch/sh/boards/magicpanelr2/Kconfig"
- 
- menu "Timer and clock configuration"
- 
- config SH_TMU
--	bool "TMU timer support"
-+	def_bool y
-+	prompt "TMU timer support"
- 	depends on CPU_SH3 || CPU_SH4
- 	select GENERIC_TIME
- 	select GENERIC_CLOCKEVENTS
--	default y
- 	help
- 	  This enables the use of the TMU as the system timer.
- 
- config SH_CMT
--	bool "CMT timer support"
-+	def_bool y
-+	prompt "CMT timer support"
- 	depends on CPU_SH2
--	default y
- 	help
- 	  This enables the use of the CMT as the system timer.
- 
- config SH_MTU2
--	bool "MTU2 timer support"
-+	def_bool n
-+	prompt "MTU2 timer support"
- 	depends on CPU_SH2A
--	default n
- 	help
- 	  This enables the use of the MTU2 as the system timer.
- 
- config SH_TIMER_IRQ
- 	int
--	default "28" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
-+	default "28" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \
-+			CPU_SUBTYPE_SH7763
- 	default "86" if CPU_SUBTYPE_SH7619
- 	default "140" if CPU_SUBTYPE_SH7206
- 	default "16"
-@@ -445,7 +571,8 @@ config SH_PCLK_FREQ
- 	default "32000000" if CPU_SUBTYPE_SH7722
- 	default "33333333" if CPU_SUBTYPE_SH7770 || \
- 			      CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \
--			      CPU_SUBTYPE_SH7206
-+			      CPU_SUBTYPE_SH7203 || CPU_SUBTYPE_SH7206 || \
-+			      CPU_SUBTYPE_SH7263
- 	default "60000000" if CPU_SUBTYPE_SH7751 || CPU_SUBTYPE_SH7751R
- 	default "66000000" if CPU_SUBTYPE_SH4_202
- 	default "50000000"
-@@ -456,7 +583,7 @@ config SH_PCLK_FREQ
- 
- config SH_CLK_MD
- 	int "CPU Mode Pin Setting"
--	depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206
-+	depends on CPU_SH2
- 	default 6 if CPU_SUBTYPE_SH7206
- 	default 5 if CPU_SUBTYPE_SH7619
- 	default 0
-@@ -490,9 +617,8 @@ source "arch/sh/drivers/Kconfig"
- endmenu
- 
- config ISA_DMA_API
--	bool
-+	def_bool y
- 	depends on SH_MPC1211
--	default y
- 
- menu "Kernel features"
- 
-@@ -570,7 +696,7 @@ source "kernel/Kconfig.preempt"
- 
- config GUSA
- 	def_bool y
--	depends on !SMP
-+	depends on !SMP && SUPERH32
- 	help
- 	  This enables support for gUSA (general UserSpace Atomicity).
- 	  This is the default implementation for both UP and non-ll/sc
-@@ -582,6 +708,16 @@ config GUSA
- 	  This should only be disabled for special cases where alternate
- 	  atomicity implementations exist.
- 
-+config GUSA_RB
-+	bool "Implement atomic operations by roll-back (gRB) (EXPERIMENTAL)"
-+	depends on GUSA && CPU_SH3 || (CPU_SH4 && !CPU_SH4A)
-+	help
-+	  Enabling this option will allow the kernel to implement some
-+	  atomic operations using a software implemention of load-locked/
-+	  store-conditional (LLSC). On machines which do not have hardware
-+	  LLSC, this should be more efficient than the other alternative of
-+	  disabling insterrupts around the atomic sequence.
++  case 18:
++#line 177 "dtc-parser.y"
++    {
++			(yyval.prop) = build_property((yyvsp[(2) - (3)].propnodename), empty_data, (yyvsp[(1) - (3)].labelref));
++		;}
++    break;
 +
- endmenu
- 
- menu "Boot options"
-diff --git a/arch/sh/Kconfig.cpu b/arch/sh/Kconfig.cpu
-new file mode 100644
-index 0000000..d850184
---- /dev/null
-+++ b/arch/sh/Kconfig.cpu
-@@ -0,0 +1,115 @@
-+menu "Processor features"
++  case 19:
++#line 184 "dtc-parser.y"
++    {
++			(yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data));
++		;}
++    break;
 +
-+choice
-+	prompt "Endianess selection" 
-+	default CPU_LITTLE_ENDIAN
-+	help
-+	  Some SuperH machines can be configured for either little or big
-+	  endian byte order. These modes require different kernels.
++  case 20:
++#line 188 "dtc-parser.y"
++    {
++			(yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
++		;}
++    break;
 +
-+config CPU_LITTLE_ENDIAN
-+	bool "Little Endian"
++  case 21:
++#line 192 "dtc-parser.y"
++    {
++			(yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
++		;}
++    break;
 +
-+config CPU_BIG_ENDIAN
-+	bool "Big Endian"
++  case 22:
++#line 196 "dtc-parser.y"
++    {
++			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref));
++		;}
++    break;
 +
-+endchoice
++  case 23:
++#line 200 "dtc-parser.y"
++    {
++			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
++		;}
++    break;
 +
-+config SH_FPU
-+	def_bool y
-+	prompt "FPU support"
-+	depends on CPU_HAS_FPU
-+	help
-+	  Selecting this option will enable support for SH processors that
-+	  have FPU units (ie, SH77xx).
++  case 24:
++#line 207 "dtc-parser.y"
++    {
++			(yyval.data) = empty_data;
++		;}
++    break;
 +
-+	  This option must be set in order to enable the FPU.
++  case 25:
++#line 211 "dtc-parser.y"
++    {
++			(yyval.data) = (yyvsp[(1) - (2)].data);
++		;}
++    break;
 +
-+config SH64_FPU_DENORM_FLUSH
-+	bool "Flush floating point denorms to zero"
-+	depends on SH_FPU && SUPERH64
++  case 26:
++#line 215 "dtc-parser.y"
++    {
++			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
++		;}
++    break;
 +
-+config SH_FPU_EMU
-+	def_bool n
-+	prompt "FPU emulation support"
-+	depends on !SH_FPU && EXPERIMENTAL
-+	help
-+	  Selecting this option will enable support for software FPU emulation.
-+	  Most SH-3 users will want to say Y here, whereas most SH-4 users will
-+	  want to say N.
++  case 27:
++#line 222 "dtc-parser.y"
++    {
++			(yyval.data) = empty_data;
++		;}
++    break;
 +
-+config SH_DSP
-+	def_bool y
-+	prompt "DSP support"
-+	depends on CPU_HAS_DSP
-+	help
-+	  Selecting this option will enable support for SH processors that
-+	  have DSP units (ie, SH2-DSP, SH3-DSP, and SH4AL-DSP).
++  case 28:
++#line 226 "dtc-parser.y"
++    {
++			(yyval.data) = data_append_cell((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].cell));
++		;}
++    break;
 +
-+	  This option must be set in order to enable the DSP.
++  case 29:
++#line 230 "dtc-parser.y"
++    {
++			(yyval.data) = data_append_cell(data_add_marker((yyvsp[(1) - (2)].data), REF_PHANDLE,
++							      (yyvsp[(2) - (2)].labelref)), -1);
++		;}
++    break;
 +
-+config SH_ADC
-+	def_bool y
-+	prompt "ADC support"
-+	depends on CPU_SH3
-+	help
-+	  Selecting this option will allow the Linux kernel to use SH3 on-chip
-+	  ADC module.
++  case 30:
++#line 235 "dtc-parser.y"
++    {
++			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
++		;}
++    break;
 +
-+	  If unsure, say N.
++  case 31:
++#line 242 "dtc-parser.y"
++    {
++			(yyval.cbase) = 16;
++		;}
++    break;
 +
-+config SH_STORE_QUEUES
-+	bool "Support for Store Queues"
-+	depends on CPU_SH4
-+	help
-+	  Selecting this option will enable an in-kernel API for manipulating
-+	  the store queues integrated in the SH-4 processors.
++  case 33:
++#line 250 "dtc-parser.y"
++    {
++			(yyval.cell) = eval_literal((yyvsp[(1) - (1)].literal), 0, 32);
++		;}
++    break;
 +
-+config SPECULATIVE_EXECUTION
-+	bool "Speculative subroutine return"
-+	depends on CPU_SUBTYPE_SH7780 && EXPERIMENTAL
-+	help
-+	  This enables support for a speculative instruction fetch for
-+	  subroutine return. There are various pitfalls associated with
-+	  this, as outlined in the SH7780 hardware manual.
++  case 34:
++#line 254 "dtc-parser.y"
++    {
++			(yyval.cell) = eval_literal((yyvsp[(2) - (2)].literal), (yyvsp[(1) - (2)].cbase), 32);
++		;}
++    break;
 +
-+	  If unsure, say N.
++  case 35:
++#line 261 "dtc-parser.y"
++    {
++			(yyval.data) = empty_data;
++		;}
++    break;
 +
-+config SH64_USER_MISALIGNED_FIXUP
-+	def_bool y
-+	prompt "Fixup misaligned loads/stores occurring in user mode"
-+	depends on SUPERH64
++  case 36:
++#line 265 "dtc-parser.y"
++    {
++			(yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte));
++		;}
++    break;
 +
-+config SH64_ID2815_WORKAROUND
-+	bool "Include workaround for SH5-101 cut2 silicon defect ID2815"
-+	depends on CPU_SUBTYPE_SH5_101
++  case 37:
++#line 269 "dtc-parser.y"
++    {
++			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
++		;}
++    break;
 +
-+config CPU_HAS_INTEVT
-+	bool
++  case 38:
++#line 276 "dtc-parser.y"
++    {
++			(yyval.nodelist) = NULL;
++		;}
++    break;
 +
-+config CPU_HAS_MASKREG_IRQ
-+	bool
++  case 39:
++#line 280 "dtc-parser.y"
++    {
++			(yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist));
++		;}
++    break;
 +
-+config CPU_HAS_IPR_IRQ
-+	bool
++  case 40:
++#line 284 "dtc-parser.y"
++    {
++			yyerror("syntax error: properties must precede subnodes\n");
++			YYERROR;
++		;}
++    break;
 +
-+config CPU_HAS_SR_RB
-+	bool
-+	help
-+	  This will enable the use of SR.RB register bank usage. Processors
-+	  that are lacking this bit must have another method in place for
-+	  accomplishing what is taken care of by the banked registers.
++  case 41:
++#line 292 "dtc-parser.y"
++    {
++			(yyval.node) = name_node((yyvsp[(3) - (3)].node), (yyvsp[(2) - (3)].propnodename), (yyvsp[(1) - (3)].labelref));
++		;}
++    break;
 +
-+	  See <file:Documentation/sh/register-banks.txt> for further
-+	  information on SR.RB and register banking in the kernel in general.
++  case 42:
++#line 299 "dtc-parser.y"
++    {
++			(yyval.labelref) = NULL;
++		;}
++    break;
 +
-+config CPU_HAS_PTEA
-+	bool
++  case 43:
++#line 303 "dtc-parser.y"
++    {
++			(yyval.labelref) = (yyvsp[(1) - (1)].labelref);
++		;}
++    break;
 +
-+config CPU_HAS_DSP
-+	bool
 +
-+config CPU_HAS_FPU
-+	bool
++/* Line 1267 of yacc.c.  */
++#line 1734 "dtc-parser.tab.c"
++      default: break;
++    }
++  YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
 +
-+endmenu
-diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
-index 722da68..f7c7161 100644
---- a/arch/sh/Kconfig.debug
-+++ b/arch/sh/Kconfig.debug
-@@ -1,8 +1,7 @@
- menu "Kernel hacking"
- 
- config TRACE_IRQFLAGS_SUPPORT
--	bool
--	default y
-+	def_bool y
- 
- source "lib/Kconfig.debug"
- 
-@@ -30,12 +29,13 @@ config EARLY_SCIF_CONSOLE
- config EARLY_SCIF_CONSOLE_PORT
- 	hex
- 	depends on EARLY_SCIF_CONSOLE
--	default "0xffe00000" if CPU_SUBTYPE_SH7780
-+ 	default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763
- 	default "0xffea0000" if CPU_SUBTYPE_SH7785
--	default "0xfffe9800" if CPU_SUBTYPE_SH7206
-+	default "0xfffe8000" if CPU_SUBTYPE_SH7203
-+	default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263
- 	default "0xf8420000" if CPU_SUBTYPE_SH7619
- 	default "0xa4400000" if CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7705
--	default "0xa4430000" if CPU_SUBTYPE_SH7720
-+	default "0xa4430000" if CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721
- 	default "0xffc30000" if CPU_SUBTYPE_SHX3
- 	default "0xffe80000" if CPU_SH4
- 	default "0x00000000"
-@@ -62,7 +62,7 @@ config DEBUG_BOOTMEM
- 
- config DEBUG_STACKOVERFLOW
- 	bool "Check for stack overflows"
--	depends on DEBUG_KERNEL
-+	depends on DEBUG_KERNEL && SUPERH32
- 	help
- 	  This option will cause messages to be printed if free stack space
- 	  drops below a certain limit.
-@@ -88,7 +88,7 @@ config 4KSTACKS
- 
- config IRQSTACKS
- 	bool "Use separate kernel stacks when processing interrupts"
--	depends on DEBUG_KERNEL
-+	depends on DEBUG_KERNEL && SUPERH32
- 	help
- 	  If you say Y here the kernel will use separate kernel stacks
- 	  for handling hard and soft interrupts.  This can help avoid
-@@ -119,19 +119,19 @@ config COMPILE_OPTIONS
- 	depends on MORE_COMPILE_OPTIONS
- 
- config KGDB_NMI
--	bool "Enter KGDB on NMI"
--	default n
-+	def_bool n
-+	prompt "Enter KGDB on NMI"
- 
- config SH_KGDB_CONSOLE
--	bool "Console messages through GDB"
-+	def_bool n
-+	prompt "Console messages through GDB"
- 	depends on !SERIAL_SH_SCI_CONSOLE && SERIAL_SH_SCI=y
- 	select SERIAL_CORE_CONSOLE
--	default n
- 
- config KGDB_SYSRQ
--	bool "Allow SysRq 'G' to enter KGDB"
-+	def_bool y
-+	prompt "Allow SysRq 'G' to enter KGDB"
- 	depends on MAGIC_SYSRQ
--	default y
- 
- comment "Serial port setup"
- 
-@@ -174,4 +174,29 @@ endchoice
- 
- endmenu
- 
-+if SUPERH64
++  YYPOPSTACK (yylen);
++  yylen = 0;
++  YY_STACK_PRINT (yyss, yyssp);
 +
-+config SH64_PROC_ASIDS
-+	bool "Debug: report ASIDs through /proc/asids"
-+	depends on PROC_FS
++  *++yyvsp = yyval;
++  *++yylsp = yyloc;
 +
-+config SH64_SR_WATCH
-+	bool "Debug: set SR.WATCH to enable hardware watchpoints and trace"
++  /* Now `shift' the result of the reduction.  Determine what state
++     that goes to, based on the state we popped back to and the rule
++     number reduced by.  */
 +
-+config POOR_MANS_STRACE
-+	bool "Debug: enable rudimentary strace facility"
-+	help
-+	  This option allows system calls to be traced to the console.  It also
-+	  aids in detecting kernel stack underflow.  It is useful for debugging
-+	  early-userland problems (e.g. init incurring fatal exceptions.)
++  yyn = yyr1[yyn];
 +
-+config SH_ALPHANUMERIC
-+	bool "Enable debug outputs to on-board alphanumeric display"
-+	depends on SH_CAYMAN
++  yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
++  if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
++    yystate = yytable[yystate];
++  else
++    yystate = yydefgoto[yyn - YYNTOKENS];
 +
-+config SH_NO_BSS_INIT
-+	bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)"
++  goto yynewstate;
 +
-+endif
 +
- endmenu
-diff --git a/arch/sh/Makefile b/arch/sh/Makefile
-index e189fae..17fc361 100644
---- a/arch/sh/Makefile
-+++ b/arch/sh/Makefile
-@@ -1,17 +1,13 @@
--# $Id: Makefile,v 1.35 2004/04/15 03:39:20 sugioka Exp $
- #
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
-+# arch/sh/Makefile
- #
- # Copyright (C) 1999  Kaz Kojima
- # Copyright (C) 2002, 2003, 2004  Paul Mundt
- # Copyright (C) 2002  M. R. Brown
- #
--# This file is included by the global makefile so that you can add your own
--# architecture-specific flags and dependencies. Remember to do have actions
--# for "archclean" and "archdep" for cleaning up and making dependencies for
--# this architecture
-+# This file is subject to the terms and conditions of the GNU General Public
-+# License.  See the file "COPYING" in the main directory of this archive
-+# for more details.
- #
- isa-y					:= any
- isa-$(CONFIG_SH_DSP)			:= sh
-@@ -21,13 +17,9 @@ isa-$(CONFIG_CPU_SH3)			:= sh3
- isa-$(CONFIG_CPU_SH4)			:= sh4
- isa-$(CONFIG_CPU_SH4A)			:= sh4a
- isa-$(CONFIG_CPU_SH4AL_DSP)		:= sh4al
--
-+isa-$(CONFIG_CPU_SH5)			:= shmedia
- isa-$(CONFIG_SH_DSP)			:= $(isa-y)-dsp
- 
--ifndef CONFIG_MMU
--isa-y			:= $(isa-y)-nommu
--endif
--
- ifndef CONFIG_SH_DSP
- ifndef CONFIG_SH_FPU
- isa-y			:= $(isa-y)-nofpu
-@@ -44,6 +36,7 @@ cflags-$(CONFIG_CPU_SH4)		:= $(call cc-option,-m4,) \
- 	$(call cc-option,-mno-implicit-fp,-m4-nofpu)
- cflags-$(CONFIG_CPU_SH4A)		+= $(call cc-option,-m4a,) \
- 					   $(call cc-option,-m4a-nofpu,)
-+cflags-$(CONFIG_CPU_SH5)		:= $(call cc-option,-m5-32media-nofpu,)
- 
- cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= -mb
- cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -ml
-@@ -66,22 +59,27 @@ cflags-y	+= $(isaflags-y) -ffreestanding
- cflags-$(CONFIG_MORE_COMPILE_OPTIONS)	+= \
- 	$(shell echo $(CONFIG_COMPILE_OPTIONS) | sed -e 's/"//g')
- 
--OBJCOPYFLAGS	:= -O binary -R .note -R .note.gnu.build-id -R .comment -R .stab -R .stabstr -S
-+OBJCOPYFLAGS	:= -O binary -R .note -R .note.gnu.build-id -R .comment \
-+		   -R .stab -R .stabstr -S
- 
--#
--# arch/sh/defconfig doesn't reflect any real hardware, and as such should
--# never be used by anyone. Use a board-specific defconfig that has a
--# reasonable chance of being current instead.
--#
--KBUILD_DEFCONFIG := r7780rp_defconfig
-+# Give the various platforms the opportunity to set default image types
-+defaultimage-$(CONFIG_SUPERH32)	:= zImage
- 
--KBUILD_IMAGE	:= arch/sh/boot/zImage
-+# Set some sensible Kbuild defaults
-+KBUILD_DEFCONFIG	:= r7780mp_defconfig
-+KBUILD_IMAGE		:= $(defaultimage-y)
- 
- #
- # Choosing incompatible machines durings configuration will result in
- # error messages during linking.
- #
--LDFLAGS_vmlinux     += -e _stext
-+ifdef CONFIG_SUPERH32
-+LDFLAGS_vmlinux	+= -e _stext
-+else
-+LDFLAGS_vmlinux	+= --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
-+		   --defsym phys_stext_shmedia=phys_stext+1 \
-+		   -e phys_stext_shmedia
-+endif
- 
- ifdef CONFIG_CPU_LITTLE_ENDIAN
- LDFLAGS_vmlinux		+= --defsym 'jiffies=jiffies_64'
-@@ -94,7 +92,9 @@ endif
- KBUILD_CFLAGS		+= -pipe $(cflags-y)
- KBUILD_AFLAGS		+= $(cflags-y)
- 
--head-y := arch/sh/kernel/head.o arch/sh/kernel/init_task.o
-+head-y			:= arch/sh/kernel/init_task.o
-+head-$(CONFIG_SUPERH32)	+= arch/sh/kernel/head_32.o
-+head-$(CONFIG_SUPERH64)	+= arch/sh/kernel/head_64.o
- 
- LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
- 
-@@ -112,11 +112,11 @@ machdir-$(CONFIG_SH_DREAMCAST)			+= dreamcast
- machdir-$(CONFIG_SH_MPC1211)			+= mpc1211
- machdir-$(CONFIG_SH_SH03)			+= sh03
- machdir-$(CONFIG_SH_SECUREEDGE5410)		+= snapgear
--machdir-$(CONFIG_SH_HS7751RVOIP)		+= renesas/hs7751rvoip
- machdir-$(CONFIG_SH_RTS7751R2D)			+= renesas/rts7751r2d
- machdir-$(CONFIG_SH_7751_SYSTEMH)		+= renesas/systemh
- machdir-$(CONFIG_SH_EDOSK7705)			+= renesas/edosk7705
- machdir-$(CONFIG_SH_HIGHLANDER)			+= renesas/r7780rp
-+machdir-$(CONFIG_SH_SDK7780)			+= renesas/sdk7780
- machdir-$(CONFIG_SH_7710VOIPGW)			+= renesas/sh7710voipgw
- machdir-$(CONFIG_SH_X3PROTO)			+= renesas/x3proto
- machdir-$(CONFIG_SH_SH4202_MICRODEV)		+= superh/microdev
-@@ -127,6 +127,7 @@ machdir-$(CONFIG_SH_7206_SOLUTION_ENGINE)	+= se/7206
- machdir-$(CONFIG_SH_7619_SOLUTION_ENGINE)	+= se/7619
- machdir-$(CONFIG_SH_LBOX_RE2)			+= lboxre2
- machdir-$(CONFIG_SH_MAGIC_PANEL_R2)		+= magicpanelr2
-+machdir-$(CONFIG_SH_CAYMAN)			+= cayman
- 
- incdir-y	:= $(notdir $(machdir-y))
- 
-@@ -137,22 +138,22 @@ endif
- 
- # Companion chips
- core-$(CONFIG_HD6446X_SERIES)	+= arch/sh/cchips/hd6446x/
--core-$(CONFIG_MFD_SM501)	+= arch/sh/cchips/voyagergx/
- 
- cpuincdir-$(CONFIG_CPU_SH2)	:= cpu-sh2
- cpuincdir-$(CONFIG_CPU_SH2A)	:= cpu-sh2a
- cpuincdir-$(CONFIG_CPU_SH3)	:= cpu-sh3
- cpuincdir-$(CONFIG_CPU_SH4)	:= cpu-sh4
-+cpuincdir-$(CONFIG_CPU_SH5)	:= cpu-sh5
- 
--libs-y				:= arch/sh/lib/	$(libs-y) $(LIBGCC)
-+libs-$(CONFIG_SUPERH32)		:= arch/sh/lib/	$(libs-y)
-+libs-$(CONFIG_SUPERH64)		:= arch/sh/lib64/ $(libs-y)
-+libs-y				+= $(LIBGCC)
- 
- drivers-y			+= arch/sh/drivers/
- drivers-$(CONFIG_OPROFILE)	+= arch/sh/oprofile/
- 
- boot := arch/sh/boot
- 
--CPPFLAGS_vmlinux.lds := -traditional
--
- incdir-prefix	:= $(srctree)/include/asm-sh/
- 
- #	Update machine arch and proc symlinks if something which affects
-@@ -196,29 +197,61 @@ include/asm-sh/.mach: $(wildcard include/config/sh/*.h) \
- 	done
- 	@touch $@
- 
--archprepare: include/asm-sh/.cpu include/asm-sh/.mach maketools
--
- PHONY += maketools FORCE
++/*------------------------------------.
++| yyerrlab -- here on detecting error |
++`------------------------------------*/
++yyerrlab:
++  /* If not already recovering from an error, report this error.  */
++  if (!yyerrstatus)
++    {
++      ++yynerrs;
++#if ! YYERROR_VERBOSE
++      yyerror (YY_("syntax error"));
++#else
++      {
++	YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
++	if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
++	  {
++	    YYSIZE_T yyalloc = 2 * yysize;
++	    if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
++	      yyalloc = YYSTACK_ALLOC_MAXIMUM;
++	    if (yymsg != yymsgbuf)
++	      YYSTACK_FREE (yymsg);
++	    yymsg = (char *) YYSTACK_ALLOC (yyalloc);
++	    if (yymsg)
++	      yymsg_alloc = yyalloc;
++	    else
++	      {
++		yymsg = yymsgbuf;
++		yymsg_alloc = sizeof yymsgbuf;
++	      }
++	  }
++
++	if (0 < yysize && yysize <= yymsg_alloc)
++	  {
++	    (void) yysyntax_error (yymsg, yystate, yychar);
++	    yyerror (yymsg);
++	  }
++	else
++	  {
++	    yyerror (YY_("syntax error"));
++	    if (yysize != 0)
++	      goto yyexhaustedlab;
++	  }
++      }
++#endif
++    }
 +
- maketools:  include/linux/version.h FORCE
- 	$(Q)$(MAKE) $(build)=arch/sh/tools include/asm-sh/machtypes.h
- 
--all: zImage
-+all: $(KBUILD_IMAGE)
- 
- zImage uImage uImage.srec vmlinux.srec: vmlinux
- 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
- 
- compressed: zImage
- 
-+archprepare: include/asm-sh/.cpu include/asm-sh/.mach maketools \
-+	     arch/sh/lib64/syscalltab.h
++  yyerror_range[0] = yylloc;
 +
- archclean:
- 	$(Q)$(MAKE) $(clean)=$(boot)
- 
--CLEAN_FILES += include/asm-sh/machtypes.h \
--	       include/asm-sh/cpu include/asm-sh/.cpu \
--	       include/asm-sh/mach include/asm-sh/.mach
--
- define archhelp
- 	@echo '* zImage 	           - Compressed kernel image'
- 	@echo '  vmlinux.srec	           - Create an ELF S-record'
- 	@echo '  uImage  	           - Create a bootable image for U-Boot'
- 	@echo '  uImage.srec  	           - Create an S-record for U-Boot'
- endef
++  if (yyerrstatus == 3)
++    {
++      /* If just tried and failed to reuse look-ahead token after an
++	 error, discard it.  */
 +
-+define filechk_gen-syscalltab
-+       (set -e; \
-+	echo "/*"; \
-+	echo " * DO NOT MODIFY."; \
-+	echo " *"; \
-+	echo " * This file was generated by arch/sh/Makefile"; \
-+	echo " * Any changes will be reverted at build time."; \
-+	echo " */"; \
-+	echo ""; \
-+	echo "#ifndef __SYSCALLTAB_H"; \
-+	echo "#define __SYSCALLTAB_H"; \
-+	echo ""; \
-+	echo "#include <linux/kernel.h>"; \
-+	echo ""; \
-+	echo "struct syscall_info {"; \
-+	echo "	const char *name;"; \
-+	echo "} syscall_info_table[] = {"; \
-+	sed -e '/^.*\.long /!d;s//	{ "/;s/\(\([^/]*\)\/\)\{1\}.*/\2/; \
-+		s/[ \t]*$$//g;s/$$/" },/;s/\("\)sys_/\1/g'; \
-+	echo "};"; \
-+	echo ""; \
-+	echo "#define NUM_SYSCALL_INFO_ENTRIES ARRAY_SIZE(syscall_info_table)";\
-+	echo ""; \
-+	echo "#endif /* __SYSCALLTAB_H */" )
-+endef
++      if (yychar <= YYEOF)
++	{
++	  /* Return failure if at end of input.  */
++	  if (yychar == YYEOF)
++	    YYABORT;
++	}
++      else
++	{
++	  yydestruct ("Error: discarding",
++		      yytoken, &yylval, &yylloc);
++	  yychar = YYEMPTY;
++	}
++    }
 +
-+arch/sh/lib64/syscalltab.h: arch/sh/kernel/syscalls_64.S
-+	$(call filechk,gen-syscalltab)
++  /* Else will try to reuse look-ahead token after shifting the error
++     token.  */
++  goto yyerrlab1;
++
++
++/*---------------------------------------------------.
++| yyerrorlab -- error raised explicitly by YYERROR.  |
++`---------------------------------------------------*/
++yyerrorlab:
++
++  /* Pacify compilers like GCC when the user code never invokes
++     YYERROR and the label yyerrorlab therefore never appears in user
++     code.  */
++  if (/*CONSTCOND*/ 0)
++     goto yyerrorlab;
++
++  yyerror_range[0] = yylsp[1-yylen];
++  /* Do not reclaim the symbols of the rule which action triggered
++     this YYERROR.  */
++  YYPOPSTACK (yylen);
++  yylen = 0;
++  YY_STACK_PRINT (yyss, yyssp);
++  yystate = *yyssp;
++  goto yyerrlab1;
++
++
++/*-------------------------------------------------------------.
++| yyerrlab1 -- common code for both syntax error and YYERROR.  |
++`-------------------------------------------------------------*/
++yyerrlab1:
++  yyerrstatus = 3;	/* Each real token shifted decrements this.  */
 +
-+CLEAN_FILES += arch/sh/lib64/syscalltab.h \
-+	       include/asm-sh/machtypes.h \
-+	       include/asm-sh/cpu include/asm-sh/.cpu \
-+	       include/asm-sh/mach include/asm-sh/.mach
-diff --git a/arch/sh/boards/cayman/Makefile b/arch/sh/boards/cayman/Makefile
++  for (;;)
++    {
++      yyn = yypact[yystate];
++      if (yyn != YYPACT_NINF)
++	{
++	  yyn += YYTERROR;
++	  if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
++	    {
++	      yyn = yytable[yyn];
++	      if (0 < yyn)
++		break;
++	    }
++	}
++
++      /* Pop the current state because it cannot handle the error token.  */
++      if (yyssp == yyss)
++	YYABORT;
++
++      yyerror_range[0] = *yylsp;
++      yydestruct ("Error: popping",
++		  yystos[yystate], yyvsp, yylsp);
++      YYPOPSTACK (1);
++      yystate = *yyssp;
++      YY_STACK_PRINT (yyss, yyssp);
++    }
++
++  if (yyn == YYFINAL)
++    YYACCEPT;
++
++  *++yyvsp = yylval;
++
++  yyerror_range[1] = yylloc;
++  /* Using YYLLOC is tempting, but would change the location of
++     the look-ahead.  YYLOC is available though.  */
++  YYLLOC_DEFAULT (yyloc, (yyerror_range - 1), 2);
++  *++yylsp = yyloc;
++
++  /* Shift the error token.  */
++  YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
++
++  yystate = yyn;
++  goto yynewstate;
++
++
++/*-------------------------------------.
++| yyacceptlab -- YYACCEPT comes here.  |
++`-------------------------------------*/
++yyacceptlab:
++  yyresult = 0;
++  goto yyreturn;
++
++/*-----------------------------------.
++| yyabortlab -- YYABORT comes here.  |
++`-----------------------------------*/
++yyabortlab:
++  yyresult = 1;
++  goto yyreturn;
++
++#ifndef yyoverflow
++/*-------------------------------------------------.
++| yyexhaustedlab -- memory exhaustion comes here.  |
++`-------------------------------------------------*/
++yyexhaustedlab:
++  yyerror (YY_("memory exhausted"));
++  yyresult = 2;
++  /* Fall through.  */
++#endif
++
++yyreturn:
++  if (yychar != YYEOF && yychar != YYEMPTY)
++     yydestruct ("Cleanup: discarding lookahead",
++		 yytoken, &yylval, &yylloc);
++  /* Do not reclaim the symbols of the rule which action triggered
++     this YYABORT or YYACCEPT.  */
++  YYPOPSTACK (yylen);
++  YY_STACK_PRINT (yyss, yyssp);
++  while (yyssp != yyss)
++    {
++      yydestruct ("Cleanup: popping",
++		  yystos[*yyssp], yyvsp, yylsp);
++      YYPOPSTACK (1);
++    }
++#ifndef yyoverflow
++  if (yyss != yyssa)
++    YYSTACK_FREE (yyss);
++#endif
++#if YYERROR_VERBOSE
++  if (yymsg != yymsgbuf)
++    YYSTACK_FREE (yymsg);
++#endif
++  /* Make sure YYID is used.  */
++  return YYID (yyresult);
++}
++
++
++#line 308 "dtc-parser.y"
++
++
++void yyerror (char const *s)
++{
++	const char *fname = srcpos_filename_for_num(yylloc.filenum);
++
++	if (strcmp(fname, "-") == 0)
++		fname = "stdin";
++
++	fprintf(stderr, "%s:%d %s\n",
++		fname, yylloc.first_line, s);
++}
++
++unsigned long long eval_literal(const char *s, int base, int bits)
++{
++	unsigned long long val;
++	char *e;
++
++	errno = 0;
++	val = strtoull(s, &e, base);
++	if (*e)
++		yyerror("bad characters in literal");
++	else if ((errno == ERANGE)
++		 || ((bits < 64) && (val >= (1ULL << bits))))
++		yyerror("literal out of range");
++	else if (errno != 0)
++		yyerror("bad literal");
++	return val;
++}
++
+diff --git a/arch/powerpc/boot/dtc-src/dtc-parser.tab.h_shipped b/arch/powerpc/boot/dtc-src/dtc-parser.tab.h_shipped
 new file mode 100644
-index 0000000..489a8f8
+index 0000000..4707b02
 --- /dev/null
-+++ b/arch/sh/boards/cayman/Makefile
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for the Hitachi Cayman specific parts of the kernel
-+#
-+obj-y := setup.o irq.o
-+obj-$(CONFIG_HEARTBEAT)	+= led.o
-diff --git a/arch/sh/boards/cayman/irq.c b/arch/sh/boards/cayman/irq.c
++++ b/arch/powerpc/boot/dtc-src/dtc-parser.tab.h_shipped
+@@ -0,0 +1,111 @@
++/* A Bison parser, made by GNU Bison 2.3.  */
++
++/* Skeleton interface for Bison's Yacc-like parsers in C
++
++   Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
++   Free Software Foundation, Inc.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 2, or (at your option)
++   any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 51 Franklin Street, Fifth Floor,
++   Boston, MA 02110-1301, USA.  */
++
++/* As a special exception, you may create a larger work that contains
++   part or all of the Bison parser skeleton and distribute that work
++   under terms of your choice, so long as that work isn't itself a
++   parser generator using the skeleton or a modified version thereof
++   as a parser skeleton.  Alternatively, if you modify or redistribute
++   the parser skeleton itself, you may (at your option) remove this
++   special exception, which will cause the skeleton and the resulting
++   Bison output files to be licensed under the GNU General Public
++   License without this special exception.
++
++   This special exception was added by the Free Software Foundation in
++   version 2.2 of Bison.  */
++
++/* Tokens.  */
++#ifndef YYTOKENTYPE
++# define YYTOKENTYPE
++   /* Put the tokens into the symbol table, so that GDB and other debuggers
++      know about them.  */
++   enum yytokentype {
++     DT_V1 = 258,
++     DT_MEMRESERVE = 259,
++     DT_PROPNODENAME = 260,
++     DT_LITERAL = 261,
++     DT_LEGACYLITERAL = 262,
++     DT_BASE = 263,
++     DT_BYTE = 264,
++     DT_STRING = 265,
++     DT_LABEL = 266,
++     DT_REF = 267
++   };
++#endif
++/* Tokens.  */
++#define DT_V1 258
++#define DT_MEMRESERVE 259
++#define DT_PROPNODENAME 260
++#define DT_LITERAL 261
++#define DT_LEGACYLITERAL 262
++#define DT_BASE 263
++#define DT_BYTE 264
++#define DT_STRING 265
++#define DT_LABEL 266
++#define DT_REF 267
++
++
++
++
++#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
++typedef union YYSTYPE
++#line 34 "dtc-parser.y"
++{
++	char *propnodename;
++	char *literal;
++	char *labelref;
++	unsigned int cbase;
++	u8 byte;
++	struct data data;
++
++	u64 addr;
++	cell_t cell;
++	struct property *prop;
++	struct property *proplist;
++	struct node *node;
++	struct node *nodelist;
++	struct reserve_info *re;
++}
++/* Line 1489 of yacc.c.  */
++#line 90 "dtc-parser.tab.h"
++	YYSTYPE;
++# define yystype YYSTYPE /* obsolescent; will be withdrawn */
++# define YYSTYPE_IS_DECLARED 1
++# define YYSTYPE_IS_TRIVIAL 1
++#endif
++
++extern YYSTYPE yylval;
++
++#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
++typedef struct YYLTYPE
++{
++  int first_line;
++  int first_column;
++  int last_line;
++  int last_column;
++} YYLTYPE;
++# define yyltype YYLTYPE /* obsolescent; will be withdrawn */
++# define YYLTYPE_IS_DECLARED 1
++# define YYLTYPE_IS_TRIVIAL 1
++#endif
++
++extern YYLTYPE yylloc;
+diff --git a/arch/powerpc/boot/dtc-src/dtc-parser.y b/arch/powerpc/boot/dtc-src/dtc-parser.y
 new file mode 100644
-index 0000000..30ec7be
+index 0000000..002ea7f
 --- /dev/null
-+++ b/arch/sh/boards/cayman/irq.c
-@@ -0,0 +1,197 @@
++++ b/arch/powerpc/boot/dtc-src/dtc-parser.y
+@@ -0,0 +1,336 @@
 +/*
-+ * arch/sh/mach-cayman/irq.c - SH-5 Cayman Interrupt Support
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
 + *
-+ * This file handles the board specific parts of the Cayman interrupt system
 + *
-+ * Copyright (C) 2002 Stuart Menefy
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
 + *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
 + */
-+#include <linux/io.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/signal.h>
-+#include <asm/cpu/irq.h>
-+#include <asm/page.h>
 +
-+/* Setup for the SMSC FDC37C935 / LAN91C100FD */
-+#define SMSC_IRQ         IRQ_IRL1
++%locations
 +
-+/* Setup for PCI Bus 2, which transmits interrupts via the EPLD */
-+#define PCI2_IRQ         IRQ_IRL3
++%{
++#include "dtc.h"
++#include "srcpos.h"
 +
-+unsigned long epld_virt;
++int yylex(void);
++unsigned long long eval_literal(const char *s, int base, int bits);
 +
-+#define EPLD_BASE        0x04002000
-+#define EPLD_STATUS_BASE (epld_virt + 0x10)
-+#define EPLD_MASK_BASE   (epld_virt + 0x20)
++extern struct boot_info *the_boot_info;
 +
-+/* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto
-+   the same SH-5 interrupt */
++%}
 +
-+static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id)
++%union {
++	char *propnodename;
++	char *literal;
++	char *labelref;
++	unsigned int cbase;
++	u8 byte;
++	struct data data;
++
++	u64 addr;
++	cell_t cell;
++	struct property *prop;
++	struct property *proplist;
++	struct node *node;
++	struct node *nodelist;
++	struct reserve_info *re;
++}
++
++%token DT_V1
++%token DT_MEMRESERVE
++%token <propnodename> DT_PROPNODENAME
++%token <literal> DT_LITERAL
++%token <literal> DT_LEGACYLITERAL
++%token <cbase> DT_BASE
++%token <byte> DT_BYTE
++%token <data> DT_STRING
++%token <labelref> DT_LABEL
++%token <labelref> DT_REF
++
++%type <data> propdata
++%type <data> propdataprefix
++%type <re> memreserve
++%type <re> memreserves
++%type <re> v0_memreserve
++%type <re> v0_memreserves
++%type <addr> addr
++%type <data> celllist
++%type <cbase> cellbase
++%type <cell> cellval
++%type <data> bytestring
++%type <prop> propdef
++%type <proplist> proplist
++
++%type <node> devicetree
++%type <node> nodedef
++%type <node> subnode
++%type <nodelist> subnodes
++%type <labelref> label
++
++%%
++
++sourcefile:
++	  DT_V1 ';' memreserves devicetree
++		{
++			the_boot_info = build_boot_info($3, $4);
++		}
++	| v0_memreserves devicetree
++		{
++			the_boot_info = build_boot_info($1, $2);
++		}
++	;
++
++memreserves:
++	  /* empty */
++		{
++			$$ = NULL;
++		}
++	| memreserve memreserves
++		{
++			$$ = chain_reserve_entry($1, $2);
++		}
++	;
++
++memreserve:
++	  label DT_MEMRESERVE addr addr ';'
++		{
++			$$ = build_reserve_entry($3, $4, $1);
++		}
++	;
++
++v0_memreserves:
++	  /* empty */
++		{
++			$$ = NULL;
++		}
++	| v0_memreserve v0_memreserves
++		{
++			$$ = chain_reserve_entry($1, $2);
++		};
++	;
++
++v0_memreserve:
++	  memreserve
++		{
++			$$ = $1;
++		}
++	| label DT_MEMRESERVE addr '-' addr ';'
++		{
++			$$ = build_reserve_entry($3, $5 - $3 + 1, $1);
++		}
++	;
++
++addr:
++	  DT_LITERAL
++		{
++			$$ = eval_literal($1, 0, 64);
++		}
++	| DT_LEGACYLITERAL
++		{
++			$$ = eval_literal($1, 16, 64);
++		}
++	  ;
++
++devicetree:
++	  '/' nodedef
++		{
++			$$ = name_node($2, "", NULL);
++		}
++	;
++
++nodedef:
++	  '{' proplist subnodes '}' ';'
++		{
++			$$ = build_node($2, $3);
++		}
++	;
++
++proplist:
++	  /* empty */
++		{
++			$$ = NULL;
++		}
++	| proplist propdef
++		{
++			$$ = chain_property($2, $1);
++		}
++	;
++
++propdef:
++	  label DT_PROPNODENAME '=' propdata ';'
++		{
++			$$ = build_property($2, $4, $1);
++		}
++	| label DT_PROPNODENAME ';'
++		{
++			$$ = build_property($2, empty_data, $1);
++		}
++	;
++
++propdata:
++	  propdataprefix DT_STRING
++		{
++			$$ = data_merge($1, $2);
++		}
++	| propdataprefix '<' celllist '>'
++		{
++			$$ = data_merge($1, $3);
++		}
++	| propdataprefix '[' bytestring ']'
++		{
++			$$ = data_merge($1, $3);
++		}
++	| propdataprefix DT_REF
++		{
++			$$ = data_add_marker($1, REF_PATH, $2);
++		}
++	| propdata DT_LABEL
++		{
++			$$ = data_add_marker($1, LABEL, $2);
++		}
++	;
++
++propdataprefix:
++	  /* empty */
++		{
++			$$ = empty_data;
++		}
++	| propdata ','
++		{
++			$$ = $1;
++		}
++	| propdataprefix DT_LABEL
++		{
++			$$ = data_add_marker($1, LABEL, $2);
++		}
++	;
++
++celllist:
++	  /* empty */
++		{
++			$$ = empty_data;
++		}
++	| celllist cellval
++		{
++			$$ = data_append_cell($1, $2);
++		}
++	| celllist DT_REF
++		{
++			$$ = data_append_cell(data_add_marker($1, REF_PHANDLE,
++							      $2), -1);
++		}
++	| celllist DT_LABEL
++		{
++			$$ = data_add_marker($1, LABEL, $2);
++		}
++	;
++
++cellbase:
++	  /* empty */
++		{
++			$$ = 16;
++		}
++	| DT_BASE
++	;
++
++cellval:
++	  DT_LITERAL
++		{
++			$$ = eval_literal($1, 0, 32);
++		}
++	| cellbase DT_LEGACYLITERAL
++		{
++			$$ = eval_literal($2, $1, 32);
++		}
++	;
++
++bytestring:
++	  /* empty */
++		{
++			$$ = empty_data;
++		}
++	| bytestring DT_BYTE
++		{
++			$$ = data_append_byte($1, $2);
++		}
++	| bytestring DT_LABEL
++		{
++			$$ = data_add_marker($1, LABEL, $2);
++		}
++	;
++
++subnodes:
++	  /* empty */
++		{
++			$$ = NULL;
++		}
++	|  subnode subnodes
++		{
++			$$ = chain_node($1, $2);
++		}
++	| subnode propdef
++		{
++			yyerror("syntax error: properties must precede subnodes\n");
++			YYERROR;
++		}
++	;
++
++subnode:
++	  label DT_PROPNODENAME nodedef
++		{
++			$$ = name_node($3, $2, $1);
++		}
++	;
++
++label:
++	  /* empty */
++		{
++			$$ = NULL;
++		}
++	| DT_LABEL
++		{
++			$$ = $1;
++		}
++	;
++
++%%
++
++void yyerror (char const *s)
 +{
-+        printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n");
-+	return IRQ_NONE;
++	const char *fname = srcpos_filename_for_num(yylloc.filenum);
++
++	if (strcmp(fname, "-") == 0)
++		fname = "stdin";
++
++	fprintf(stderr, "%s:%d %s\n",
++		fname, yylloc.first_line, s);
 +}
 +
-+static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id)
++unsigned long long eval_literal(const char *s, int base, int bits)
 +{
-+        printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq);
-+	return IRQ_NONE;
++	unsigned long long val;
++	char *e;
++
++	errno = 0;
++	val = strtoull(s, &e, base);
++	if (*e)
++		yyerror("bad characters in literal");
++	else if ((errno == ERANGE)
++		 || ((bits < 64) && (val >= (1ULL << bits))))
++		yyerror("literal out of range");
++	else if (errno != 0)
++		yyerror("bad literal");
++	return val;
++}
+diff --git a/arch/powerpc/boot/dtc-src/dtc.c b/arch/powerpc/boot/dtc-src/dtc.c
+new file mode 100644
+index 0000000..01131d7
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/dtc.c
+@@ -0,0 +1,231 @@
++/*
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
++ */
++
++#include "dtc.h"
++#include "srcpos.h"
++
++#include "version_gen.h"
++
++/*
++ * Command line options
++ */
++int quiet;		/* Level of quietness */
++int reservenum;		/* Number of memory reservation slots */
++int minsize;		/* Minimum blob size */
++int padsize;		/* Additional padding to blob */
++
++char *join_path(const char *path, const char *name)
++{
++	int lenp = strlen(path);
++	int lenn = strlen(name);
++	int len;
++	int needslash = 1;
++	char *str;
++
++	len = lenp + lenn + 2;
++	if ((lenp > 0) && (path[lenp-1] == '/')) {
++		needslash = 0;
++		len--;
++	}
++
++	str = xmalloc(len);
++	memcpy(str, path, lenp);
++	if (needslash) {
++		str[lenp] = '/';
++		lenp++;
++	}
++	memcpy(str+lenp, name, lenn+1);
++	return str;
 +}
 +
-+static struct irqaction cayman_action_smsc = {
-+	.name		= "Cayman SMSC Mux",
-+	.handler	= cayman_interrupt_smsc,
-+	.flags		= IRQF_DISABLED,
++void fill_fullpaths(struct node *tree, const char *prefix)
++{
++	struct node *child;
++	const char *unit;
++
++	tree->fullpath = join_path(prefix, tree->name);
++
++	unit = strchr(tree->name, '@');
++	if (unit)
++		tree->basenamelen = unit - tree->name;
++	else
++		tree->basenamelen = strlen(tree->name);
++
++	for_each_child(tree, child)
++		fill_fullpaths(child, tree->fullpath);
++}
++
++static void  __attribute__ ((noreturn)) usage(void)
++{
++	fprintf(stderr, "Usage:\n");
++	fprintf(stderr, "\tdtc [options] <input file>\n");
++	fprintf(stderr, "\nOptions:\n");
++	fprintf(stderr, "\t-h\n");
++	fprintf(stderr, "\t\tThis help text\n");
++	fprintf(stderr, "\t-q\n");
++	fprintf(stderr, "\t\tQuiet: -q suppress warnings, -qq errors, -qqq all\n");
++	fprintf(stderr, "\t-I <input format>\n");
++	fprintf(stderr, "\t\tInput formats are:\n");
++	fprintf(stderr, "\t\t\tdts - device tree source text\n");
++	fprintf(stderr, "\t\t\tdtb - device tree blob\n");
++	fprintf(stderr, "\t\t\tfs - /proc/device-tree style directory\n");
++	fprintf(stderr, "\t-o <output file>\n");
++	fprintf(stderr, "\t-O <output format>\n");
++	fprintf(stderr, "\t\tOutput formats are:\n");
++	fprintf(stderr, "\t\t\tdts - device tree source text\n");
++	fprintf(stderr, "\t\t\tdtb - device tree blob\n");
++	fprintf(stderr, "\t\t\tasm - assembler source\n");
++	fprintf(stderr, "\t-V <output version>\n");
++	fprintf(stderr, "\t\tBlob version to produce, defaults to %d (relevant for dtb\n\t\tand asm output only)\n", DEFAULT_FDT_VERSION);
++	fprintf(stderr, "\t-R <number>\n");
++	fprintf(stderr, "\t\tMake space for <number> reserve map entries (relevant for \n\t\tdtb and asm output only)\n");
++	fprintf(stderr, "\t-S <bytes>\n");
++	fprintf(stderr, "\t\tMake the blob at least <bytes> long (extra space)\n");
++	fprintf(stderr, "\t-p <bytes>\n");
++	fprintf(stderr, "\t\tAdd padding to the blob of <bytes> long (extra space)\n");
++	fprintf(stderr, "\t-b <number>\n");
++	fprintf(stderr, "\t\tSet the physical boot cpu\n");
++	fprintf(stderr, "\t-f\n");
++	fprintf(stderr, "\t\tForce - try to produce output even if the input tree has errors\n");
++	fprintf(stderr, "\t-v\n");
++	fprintf(stderr, "\t\tPrint DTC version and exit\n");
++	exit(2);
++}
++
++int main(int argc, char *argv[])
++{
++	struct boot_info *bi;
++	const char *inform = "dts";
++	const char *outform = "dts";
++	const char *outname = "-";
++	int force = 0, check = 0;
++	const char *arg;
++	int opt;
++	FILE *inf = NULL;
++	FILE *outf = NULL;
++	int outversion = DEFAULT_FDT_VERSION;
++	int boot_cpuid_phys = 0xfeedbeef;
++
++	quiet      = 0;
++	reservenum = 0;
++	minsize    = 0;
++	padsize    = 0;
++
++	while ((opt = getopt(argc, argv, "hI:O:o:V:R:S:p:fcqb:v")) != EOF) {
++		switch (opt) {
++		case 'I':
++			inform = optarg;
++			break;
++		case 'O':
++			outform = optarg;
++			break;
++		case 'o':
++			outname = optarg;
++			break;
++		case 'V':
++			outversion = strtol(optarg, NULL, 0);
++			break;
++		case 'R':
++			reservenum = strtol(optarg, NULL, 0);
++			break;
++		case 'S':
++			minsize = strtol(optarg, NULL, 0);
++			break;
++		case 'p':
++			padsize = strtol(optarg, NULL, 0);
++			break;
++		case 'f':
++			force = 1;
++			break;
++		case 'c':
++			check = 1;
++			break;
++		case 'q':
++			quiet++;
++			break;
++		case 'b':
++			boot_cpuid_phys = strtol(optarg, NULL, 0);
++			break;
++		case 'v':
++		    printf("Version: %s\n", DTC_VERSION);
++		    exit(0);
++		case 'h':
++		default:
++			usage();
++		}
++	}
++
++	if (argc > (optind+1))
++		usage();
++	else if (argc < (optind+1))
++		arg = "-";
++	else
++		arg = argv[optind];
++
++	/* minsize and padsize are mutually exclusive */
++	if ((minsize) && (padsize)) {
++		die("Can't set both -p and -S\n");
++	}
++
++	fprintf(stderr, "DTC: %s->%s  on file \"%s\"\n",
++		inform, outform, arg);
++
++	if (streq(inform, "dts")) {
++		bi = dt_from_source(arg);
++	} else if (streq(inform, "fs")) {
++		bi = dt_from_fs(arg);
++	} else if(streq(inform, "dtb")) {
++		inf = dtc_open_file(arg);
++		bi = dt_from_blob(inf);
++	} else {
++		die("Unknown input format \"%s\"\n", inform);
++	}
++
++	if (inf && (inf != stdin))
++		fclose(inf);
++
++	if (! bi || ! bi->dt)
++		die("Couldn't read input tree\n");
++
++	process_checks(force, bi, check, outversion, boot_cpuid_phys);
++
++	if (streq(outname, "-")) {
++		outf = stdout;
++	} else {
++		outf = fopen(outname, "w");
++		if (! outf)
++			die("Couldn't open output file %s: %s\n",
++			    outname, strerror(errno));
++	}
++
++	if (streq(outform, "dts")) {
++		dt_to_source(outf, bi);
++	} else if (streq(outform, "dtb")) {
++		dt_to_blob(outf, bi, outversion, boot_cpuid_phys);
++	} else if (streq(outform, "asm")) {
++		dt_to_asm(outf, bi, outversion, boot_cpuid_phys);
++	} else if (streq(outform, "null")) {
++		/* do nothing */
++	} else {
++		die("Unknown output format \"%s\"\n", outform);
++	}
++
++	exit(0);
++}
+diff --git a/arch/powerpc/boot/dtc-src/dtc.h b/arch/powerpc/boot/dtc-src/dtc.h
+new file mode 100644
+index 0000000..6528177
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/dtc.h
+@@ -0,0 +1,269 @@
++#ifndef _DTC_H
++#define _DTC_H
++
++/*
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
++ */
++
++#include <stdio.h>
++#include <string.h>
++#include <stdlib.h>
++#include <stdint.h>
++#include <stdarg.h>
++#include <assert.h>
++#include <ctype.h>
++#include <errno.h>
++#include <unistd.h>
++#include <netinet/in.h>
++#include <endian.h>
++#include <byteswap.h>
++
++#include <fdt.h>
++
++#define DEFAULT_FDT_VERSION	17
++/*
++ * Command line options
++ */
++extern int quiet;		/* Level of quietness */
++extern int reservenum;		/* Number of memory reservation slots */
++extern int minsize;		/* Minimum blob size */
++extern int padsize;		/* Additional padding to blob */
++
++static inline void __attribute__((noreturn)) die(char * str, ...)
++{
++	va_list ap;
++
++	va_start(ap, str);
++	fprintf(stderr, "FATAL ERROR: ");
++	vfprintf(stderr, str, ap);
++	exit(1);
++}
++
++static inline void *xmalloc(size_t len)
++{
++	void *new = malloc(len);
++
++	if (! new)
++		die("malloc() failed\n");
++
++	return new;
++}
++
++static inline void *xrealloc(void *p, size_t len)
++{
++	void *new = realloc(p, len);
++
++	if (! new)
++		die("realloc() failed (len=%d)\n", len);
++
++	return new;
++}
++
++typedef uint8_t u8;
++typedef uint16_t u16;
++typedef uint32_t u32;
++typedef uint64_t u64;
++typedef u32 cell_t;
++
++#define cpu_to_be16(x)	htons(x)
++#define be16_to_cpu(x)	ntohs(x)
++
++#define cpu_to_be32(x)	htonl(x)
++#define be32_to_cpu(x)	ntohl(x)
++
++#if __BYTE_ORDER == __BIG_ENDIAN
++#define cpu_to_be64(x)	(x)
++#define be64_to_cpu(x)	(x)
++#else
++#define cpu_to_be64(x)	bswap_64(x)
++#define be64_to_cpu(x)	bswap_64(x)
++#endif
++
++#define streq(a, b)	(strcmp((a), (b)) == 0)
++#define strneq(a, b, n)	(strncmp((a), (b), (n)) == 0)
++
++#define ALIGN(x, a)	(((x) + (a) - 1) & ~((a) - 1))
++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
++
++/* Data blobs */
++enum markertype {
++	REF_PHANDLE,
++	REF_PATH,
++	LABEL,
 +};
 +
-+static struct irqaction cayman_action_pci2 = {
-+	.name		= "Cayman PCI2 Mux",
-+	.handler	= cayman_interrupt_pci2,
-+	.flags		= IRQF_DISABLED,
++struct  marker {
++	enum markertype type;
++	int offset;
++	char *ref;
++	struct marker *next;
 +};
 +
-+static void enable_cayman_irq(unsigned int irq)
++struct data {
++	int len;
++	char *val;
++	int asize;
++	struct marker *markers;
++};
++
++
++#define empty_data ((struct data){ /* all .members = 0 or NULL */ })
++
++#define for_each_marker(m) \
++	for (; (m); (m) = (m)->next)
++#define for_each_marker_of_type(m, t) \
++	for_each_marker(m) \
++		if ((m)->type == (t))
++
++void data_free(struct data d);
++
++struct data data_grow_for(struct data d, int xlen);
++
++struct data data_copy_mem(const char *mem, int len);
++struct data data_copy_escape_string(const char *s, int len);
++struct data data_copy_file(FILE *f, size_t len);
++
++struct data data_append_data(struct data d, const void *p, int len);
++struct data data_insert_at_marker(struct data d, struct marker *m,
++				  const void *p, int len);
++struct data data_merge(struct data d1, struct data d2);
++struct data data_append_cell(struct data d, cell_t word);
++struct data data_append_re(struct data d, const struct fdt_reserve_entry *re);
++struct data data_append_addr(struct data d, u64 addr);
++struct data data_append_byte(struct data d, uint8_t byte);
++struct data data_append_zeroes(struct data d, int len);
++struct data data_append_align(struct data d, int align);
++
++struct data data_add_marker(struct data d, enum markertype type, char *ref);
++
++int data_is_one_string(struct data d);
++
++/* DT constraints */
++
++#define MAX_PROPNAME_LEN	31
++#define MAX_NODENAME_LEN	31
++
++/* Live trees */
++struct property {
++	char *name;
++	struct data val;
++
++	struct property *next;
++
++	char *label;
++};
++
++struct node {
++	char *name;
++	struct property *proplist;
++	struct node *children;
++
++	struct node *parent;
++	struct node *next_sibling;
++
++	char *fullpath;
++	int basenamelen;
++
++	cell_t phandle;
++	int addr_cells, size_cells;
++
++	char *label;
++};
++
++#define for_each_property(n, p) \
++	for ((p) = (n)->proplist; (p); (p) = (p)->next)
++
++#define for_each_child(n, c)	\
++	for ((c) = (n)->children; (c); (c) = (c)->next_sibling)
++
++struct property *build_property(char *name, struct data val, char *label);
++struct property *chain_property(struct property *first, struct property *list);
++struct property *reverse_properties(struct property *first);
++
++struct node *build_node(struct property *proplist, struct node *children);
++struct node *name_node(struct node *node, char *name, char *label);
++struct node *chain_node(struct node *first, struct node *list);
++
++void add_property(struct node *node, struct property *prop);
++void add_child(struct node *parent, struct node *child);
++
++const char *get_unitname(struct node *node);
++struct property *get_property(struct node *node, const char *propname);
++cell_t propval_cell(struct property *prop);
++struct node *get_subnode(struct node *node, const char *nodename);
++struct node *get_node_by_path(struct node *tree, const char *path);
++struct node *get_node_by_label(struct node *tree, const char *label);
++struct node *get_node_by_phandle(struct node *tree, cell_t phandle);
++struct node *get_node_by_ref(struct node *tree, const char *ref);
++cell_t get_node_phandle(struct node *root, struct node *node);
++
++/* Boot info (tree plus memreserve information */
++
++struct reserve_info {
++	struct fdt_reserve_entry re;
++
++	struct reserve_info *next;
++
++	char *label;
++};
++
++struct reserve_info *build_reserve_entry(u64 start, u64 len, char *label);
++struct reserve_info *chain_reserve_entry(struct reserve_info *first,
++					 struct reserve_info *list);
++struct reserve_info *add_reserve_entry(struct reserve_info *list,
++				       struct reserve_info *new);
++
++
++struct boot_info {
++	struct reserve_info *reservelist;
++	struct node *dt;		/* the device tree */
++};
++
++struct boot_info *build_boot_info(struct reserve_info *reservelist,
++				  struct node *tree);
++
++/* Checks */
++
++void process_checks(int force, struct boot_info *bi,
++		    int checkflag, int outversion, int boot_cpuid_phys);
++
++/* Flattened trees */
++
++void dt_to_blob(FILE *f, struct boot_info *bi, int version,
++		int boot_cpuid_phys);
++void dt_to_asm(FILE *f, struct boot_info *bi, int version,
++	       int boot_cpuid_phys);
++
++struct boot_info *dt_from_blob(FILE *f);
++
++/* Tree source */
++
++void dt_to_source(FILE *f, struct boot_info *bi);
++struct boot_info *dt_from_source(const char *f);
++
++/* FS trees */
++
++struct boot_info *dt_from_fs(const char *dirname);
++
++/* misc */
++
++char *join_path(const char *path, const char *name);
++void fill_fullpaths(struct node *tree, const char *prefix);
++
++#endif /* _DTC_H */
+diff --git a/arch/powerpc/boot/dtc-src/flattree.c b/arch/powerpc/boot/dtc-src/flattree.c
+new file mode 100644
+index 0000000..a7cfb84
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/flattree.c
+@@ -0,0 +1,968 @@
++/*
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
++ */
++
++#include "dtc.h"
++
++#define FTF_FULLPATH	0x1
++#define FTF_VARALIGN	0x2
++#define FTF_NAMEPROPS	0x4
++#define FTF_BOOTCPUID	0x8
++#define FTF_STRTABSIZE	0x10
++#define FTF_STRUCTSIZE	0x20
++#define FTF_NOPS	0x40
++
++static struct version_info {
++	int version;
++	int last_comp_version;
++	int hdr_size;
++	int flags;
++} version_table[] = {
++	{1, 1, FDT_V1_SIZE,
++	 FTF_FULLPATH|FTF_VARALIGN|FTF_NAMEPROPS},
++	{2, 1, FDT_V2_SIZE,
++	 FTF_FULLPATH|FTF_VARALIGN|FTF_NAMEPROPS|FTF_BOOTCPUID},
++	{3, 1, FDT_V3_SIZE,
++	 FTF_FULLPATH|FTF_VARALIGN|FTF_NAMEPROPS|FTF_BOOTCPUID|FTF_STRTABSIZE},
++	{16, 16, FDT_V3_SIZE,
++	 FTF_BOOTCPUID|FTF_STRTABSIZE|FTF_NOPS},
++	{17, 16, FDT_V17_SIZE,
++	 FTF_BOOTCPUID|FTF_STRTABSIZE|FTF_STRUCTSIZE|FTF_NOPS},
++};
++
++struct emitter {
++	void (*cell)(void *, cell_t);
++	void (*string)(void *, char *, int);
++	void (*align)(void *, int);
++	void (*data)(void *, struct data);
++	void (*beginnode)(void *, const char *);
++	void (*endnode)(void *, const char *);
++	void (*property)(void *, const char *);
++};
++
++static void bin_emit_cell(void *e, cell_t val)
 +{
-+	unsigned long flags;
-+	unsigned long mask;
-+	unsigned int reg;
-+	unsigned char bit;
++	struct data *dtbuf = e;
 +
-+	irq -= START_EXT_IRQS;
-+	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
-+	bit = 1<<(irq % 8);
-+	local_irq_save(flags);
-+	mask = ctrl_inl(reg);
-+	mask |= bit;
-+	ctrl_outl(mask, reg);
-+	local_irq_restore(flags);
++	*dtbuf = data_append_cell(*dtbuf, val);
 +}
 +
-+void disable_cayman_irq(unsigned int irq)
++static void bin_emit_string(void *e, char *str, int len)
 +{
-+	unsigned long flags;
-+	unsigned long mask;
-+	unsigned int reg;
-+	unsigned char bit;
++	struct data *dtbuf = e;
 +
-+	irq -= START_EXT_IRQS;
-+	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
-+	bit = 1<<(irq % 8);
-+	local_irq_save(flags);
-+	mask = ctrl_inl(reg);
-+	mask &= ~bit;
-+	ctrl_outl(mask, reg);
-+	local_irq_restore(flags);
++	if (len == 0)
++		len = strlen(str);
++
++	*dtbuf = data_append_data(*dtbuf, str, len);
++	*dtbuf = data_append_byte(*dtbuf, '\0');
 +}
 +
-+static void ack_cayman_irq(unsigned int irq)
++static void bin_emit_align(void *e, int a)
 +{
-+	disable_cayman_irq(irq);
++	struct data *dtbuf = e;
++
++	*dtbuf = data_append_align(*dtbuf, a);
 +}
 +
-+static void end_cayman_irq(unsigned int irq)
++static void bin_emit_data(void *e, struct data d)
 +{
-+	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-+		enable_cayman_irq(irq);
++	struct data *dtbuf = e;
++
++	*dtbuf = data_append_data(*dtbuf, d.val, d.len);
 +}
 +
-+static unsigned int startup_cayman_irq(unsigned int irq)
++static void bin_emit_beginnode(void *e, const char *label)
 +{
-+	enable_cayman_irq(irq);
-+	return 0; /* never anything pending */
++	bin_emit_cell(e, FDT_BEGIN_NODE);
 +}
 +
-+static void shutdown_cayman_irq(unsigned int irq)
++static void bin_emit_endnode(void *e, const char *label)
 +{
-+	disable_cayman_irq(irq);
++	bin_emit_cell(e, FDT_END_NODE);
 +}
 +
-+struct hw_interrupt_type cayman_irq_type = {
-+	.typename	= "Cayman-IRQ",
-+	.startup	= startup_cayman_irq,
-+	.shutdown	= shutdown_cayman_irq,
-+	.enable		= enable_cayman_irq,
-+	.disable	= disable_cayman_irq,
-+	.ack		= ack_cayman_irq,
-+	.end		= end_cayman_irq,
++static void bin_emit_property(void *e, const char *label)
++{
++	bin_emit_cell(e, FDT_PROP);
++}
++
++static struct emitter bin_emitter = {
++	.cell = bin_emit_cell,
++	.string = bin_emit_string,
++	.align = bin_emit_align,
++	.data = bin_emit_data,
++	.beginnode = bin_emit_beginnode,
++	.endnode = bin_emit_endnode,
++	.property = bin_emit_property,
 +};
 +
-+int cayman_irq_demux(int evt)
++static void emit_label(FILE *f, const char *prefix, const char *label)
 +{
-+	int irq = intc_evt_to_irq[evt];
++	fprintf(f, "\t.globl\t%s_%s\n", prefix, label);
++	fprintf(f, "%s_%s:\n", prefix, label);
++	fprintf(f, "_%s_%s:\n", prefix, label);
++}
 +
-+	if (irq == SMSC_IRQ) {
-+		unsigned long status;
-+		int i;
++static void emit_offset_label(FILE *f, const char *label, int offset)
++{
++	fprintf(f, "\t.globl\t%s\n", label);
++	fprintf(f, "%s\t= . + %d\n", label, offset);
++}
 +
-+		status = ctrl_inl(EPLD_STATUS_BASE) &
-+			 ctrl_inl(EPLD_MASK_BASE) & 0xff;
-+		if (status == 0) {
-+			irq = -1;
-+		} else {
-+			for (i=0; i<8; i++) {
-+				if (status & (1<<i))
-+					break;
-+			}
-+			irq = START_EXT_IRQS + i;
++static void asm_emit_cell(void *e, cell_t val)
++{
++	FILE *f = e;
++
++	fprintf(f, "\t.long\t0x%x\n", val);
++}
++
++static void asm_emit_string(void *e, char *str, int len)
++{
++	FILE *f = e;
++	char c = 0;
++
++	if (len != 0) {
++		/* XXX: ewww */
++		c = str[len];
++		str[len] = '\0';
++	}
++
++	fprintf(f, "\t.string\t\"%s\"\n", str);
++
++	if (len != 0) {
++		str[len] = c;
++	}
++}
++
++static void asm_emit_align(void *e, int a)
++{
++	FILE *f = e;
++
++	fprintf(f, "\t.balign\t%d\n", a);
++}
++
++static void asm_emit_data(void *e, struct data d)
++{
++	FILE *f = e;
++	int off = 0;
++	struct marker *m;
++
++	m = d.markers;
++	while (m) {
++		if (m->type == LABEL)
++			emit_offset_label(f, m->ref, m->offset);
++		m = m->next;
++	}
++
++	while ((d.len - off) >= sizeof(u32)) {
++		fprintf(f, "\t.long\t0x%x\n",
++			be32_to_cpu(*((u32 *)(d.val+off))));
++		off += sizeof(u32);
++	}
++
++	if ((d.len - off) >= sizeof(u16)) {
++		fprintf(f, "\t.short\t0x%hx\n",
++			be16_to_cpu(*((u16 *)(d.val+off))));
++		off += sizeof(u16);
++	}
++
++	if ((d.len - off) >= 1) {
++		fprintf(f, "\t.byte\t0x%hhx\n", d.val[off]);
++		off += 1;
++	}
++
++	assert(off == d.len);
++}
++
++static void asm_emit_beginnode(void *e, const char *label)
++{
++	FILE *f = e;
++
++	if (label) {
++		fprintf(f, "\t.globl\t%s\n", label);
++		fprintf(f, "%s:\n", label);
++	}
++	fprintf(f, "\t.long\tFDT_BEGIN_NODE\n");
++}
++
++static void asm_emit_endnode(void *e, const char *label)
++{
++	FILE *f = e;
++
++	fprintf(f, "\t.long\tFDT_END_NODE\n");
++	if (label) {
++		fprintf(f, "\t.globl\t%s_end\n", label);
++		fprintf(f, "%s_end:\n", label);
++	}
++}
++
++static void asm_emit_property(void *e, const char *label)
++{
++	FILE *f = e;
++
++	if (label) {
++		fprintf(f, "\t.globl\t%s\n", label);
++		fprintf(f, "%s:\n", label);
++	}
++	fprintf(f, "\t.long\tFDT_PROP\n");
++}
++
++static struct emitter asm_emitter = {
++	.cell = asm_emit_cell,
++	.string = asm_emit_string,
++	.align = asm_emit_align,
++	.data = asm_emit_data,
++	.beginnode = asm_emit_beginnode,
++	.endnode = asm_emit_endnode,
++	.property = asm_emit_property,
++};
++
++static int stringtable_insert(struct data *d, const char *str)
++{
++	int i;
++
++	/* FIXME: do this more efficiently? */
++
++	for (i = 0; i < d->len; i++) {
++		if (streq(str, d->val + i))
++			return i;
++	}
++
++	*d = data_append_data(*d, str, strlen(str)+1);
++	return i;
++}
++
++static void flatten_tree(struct node *tree, struct emitter *emit,
++			 void *etarget, struct data *strbuf,
++			 struct version_info *vi)
++{
++	struct property *prop;
++	struct node *child;
++	int seen_name_prop = 0;
++
++	emit->beginnode(etarget, tree->label);
++
++	if (vi->flags & FTF_FULLPATH)
++		emit->string(etarget, tree->fullpath, 0);
++	else
++		emit->string(etarget, tree->name, 0);
++
++	emit->align(etarget, sizeof(cell_t));
++
++	for_each_property(tree, prop) {
++		int nameoff;
++
++		if (streq(prop->name, "name"))
++			seen_name_prop = 1;
++
++		nameoff = stringtable_insert(strbuf, prop->name);
++
++		emit->property(etarget, prop->label);
++		emit->cell(etarget, prop->val.len);
++		emit->cell(etarget, nameoff);
++
++		if ((vi->flags & FTF_VARALIGN) && (prop->val.len >= 8))
++			emit->align(etarget, 8);
++
++		emit->data(etarget, prop->val);
++		emit->align(etarget, sizeof(cell_t));
++	}
++
++	if ((vi->flags & FTF_NAMEPROPS) && !seen_name_prop) {
++		emit->property(etarget, NULL);
++		emit->cell(etarget, tree->basenamelen+1);
++		emit->cell(etarget, stringtable_insert(strbuf, "name"));
++
++		if ((vi->flags & FTF_VARALIGN) && ((tree->basenamelen+1) >= 8))
++			emit->align(etarget, 8);
++
++		emit->string(etarget, tree->name, tree->basenamelen);
++		emit->align(etarget, sizeof(cell_t));
++	}
++
++	for_each_child(tree, child) {
++		flatten_tree(child, emit, etarget, strbuf, vi);
++	}
++
++	emit->endnode(etarget, tree->label);
++}
++
++static struct data flatten_reserve_list(struct reserve_info *reservelist,
++				 struct version_info *vi)
++{
++	struct reserve_info *re;
++	struct data d = empty_data;
++	static struct fdt_reserve_entry null_re = {0,0};
++	int    j;
++
++	for (re = reservelist; re; re = re->next) {
++		d = data_append_re(d, &re->re);
++	}
++	/*
++	 * Add additional reserved slots if the user asked for them.
++	 */
++	for (j = 0; j < reservenum; j++) {
++		d = data_append_re(d, &null_re);
++	}
++
++	return d;
++}
++
++static void make_fdt_header(struct fdt_header *fdt,
++			    struct version_info *vi,
++			    int reservesize, int dtsize, int strsize,
++			    int boot_cpuid_phys)
++{
++	int reserve_off;
++
++	reservesize += sizeof(struct fdt_reserve_entry);
++
++	memset(fdt, 0xff, sizeof(*fdt));
++
++	fdt->magic = cpu_to_be32(FDT_MAGIC);
++	fdt->version = cpu_to_be32(vi->version);
++	fdt->last_comp_version = cpu_to_be32(vi->last_comp_version);
++
++	/* Reserve map should be doubleword aligned */
++	reserve_off = ALIGN(vi->hdr_size, 8);
++
++	fdt->off_mem_rsvmap = cpu_to_be32(reserve_off);
++	fdt->off_dt_struct = cpu_to_be32(reserve_off + reservesize);
++	fdt->off_dt_strings = cpu_to_be32(reserve_off + reservesize
++					  + dtsize);
++	fdt->totalsize = cpu_to_be32(reserve_off + reservesize + dtsize + strsize);
++
++	if (vi->flags & FTF_BOOTCPUID)
++		fdt->boot_cpuid_phys = cpu_to_be32(boot_cpuid_phys);
++	if (vi->flags & FTF_STRTABSIZE)
++		fdt->size_dt_strings = cpu_to_be32(strsize);
++	if (vi->flags & FTF_STRUCTSIZE)
++		fdt->size_dt_struct = cpu_to_be32(dtsize);
++}
++
++void dt_to_blob(FILE *f, struct boot_info *bi, int version,
++		int boot_cpuid_phys)
++{
++	struct version_info *vi = NULL;
++	int i;
++	struct data blob       = empty_data;
++	struct data reservebuf = empty_data;
++	struct data dtbuf      = empty_data;
++	struct data strbuf     = empty_data;
++	struct fdt_header fdt;
++	int padlen = 0;
++
++	for (i = 0; i < ARRAY_SIZE(version_table); i++) {
++		if (version_table[i].version == version)
++			vi = &version_table[i];
++	}
++	if (!vi)
++		die("Unknown device tree blob version %d\n", version);
++
++	flatten_tree(bi->dt, &bin_emitter, &dtbuf, &strbuf, vi);
++	bin_emit_cell(&dtbuf, FDT_END);
++
++	reservebuf = flatten_reserve_list(bi->reservelist, vi);
++
++	/* Make header */
++	make_fdt_header(&fdt, vi, reservebuf.len, dtbuf.len, strbuf.len,
++			boot_cpuid_phys);
++
++	/*
++	 * If the user asked for more space than is used, adjust the totalsize.
++	 */
++	if (minsize > 0) {
++		padlen = minsize - be32_to_cpu(fdt.totalsize);
++		if ((padlen < 0) && (quiet < 1))
++			fprintf(stderr,
++				"Warning: blob size %d >= minimum size %d\n",
++				be32_to_cpu(fdt.totalsize), minsize);
++	}
++
++	if (padsize > 0)
++		padlen = padsize;
++
++	if (padlen > 0) {
++		int tsize = be32_to_cpu(fdt.totalsize);
++		tsize += padlen;
++		fdt.totalsize = cpu_to_be32(tsize);
++	}
++
++	/*
++	 * Assemble the blob: start with the header, add with alignment
++	 * the reserve buffer, add the reserve map terminating zeroes,
++	 * the device tree itself, and finally the strings.
++	 */
++	blob = data_append_data(blob, &fdt, sizeof(fdt));
++	blob = data_append_align(blob, 8);
++	blob = data_merge(blob, reservebuf);
++	blob = data_append_zeroes(blob, sizeof(struct fdt_reserve_entry));
++	blob = data_merge(blob, dtbuf);
++	blob = data_merge(blob, strbuf);
++
++	/*
++	 * If the user asked for more space than is used, pad out the blob.
++	 */
++	if (padlen > 0)
++		blob = data_append_zeroes(blob, padlen);
++
++	fwrite(blob.val, blob.len, 1, f);
++
++	if (ferror(f))
++		die("Error writing device tree blob: %s\n", strerror(errno));
++
++	/*
++	 * data_merge() frees the right-hand element so only the blob
++	 * remains to be freed.
++	 */
++	data_free(blob);
++}
++
++static void dump_stringtable_asm(FILE *f, struct data strbuf)
++{
++	const char *p;
++	int len;
++
++	p = strbuf.val;
++
++	while (p < (strbuf.val + strbuf.len)) {
++		len = strlen(p);
++		fprintf(f, "\t.string \"%s\"\n", p);
++		p += len+1;
++	}
++}
++
++void dt_to_asm(FILE *f, struct boot_info *bi, int version, int boot_cpuid_phys)
++{
++	struct version_info *vi = NULL;
++	int i;
++	struct data strbuf = empty_data;
++	struct reserve_info *re;
++	const char *symprefix = "dt";
++
++	for (i = 0; i < ARRAY_SIZE(version_table); i++) {
++		if (version_table[i].version == version)
++			vi = &version_table[i];
++	}
++	if (!vi)
++		die("Unknown device tree blob version %d\n", version);
++
++	fprintf(f, "/* autogenerated by dtc, do not edit */\n\n");
++	fprintf(f, "#define FDT_MAGIC 0x%x\n", FDT_MAGIC);
++	fprintf(f, "#define FDT_BEGIN_NODE 0x%x\n", FDT_BEGIN_NODE);
++	fprintf(f, "#define FDT_END_NODE 0x%x\n", FDT_END_NODE);
++	fprintf(f, "#define FDT_PROP 0x%x\n", FDT_PROP);
++	fprintf(f, "#define FDT_END 0x%x\n", FDT_END);
++	fprintf(f, "\n");
++
++	emit_label(f, symprefix, "blob_start");
++	emit_label(f, symprefix, "header");
++	fprintf(f, "\t.long\tFDT_MAGIC\t\t\t\t/* magic */\n");
++	fprintf(f, "\t.long\t_%s_blob_abs_end - _%s_blob_start\t/* totalsize */\n",
++		symprefix, symprefix);
++	fprintf(f, "\t.long\t_%s_struct_start - _%s_blob_start\t/* off_dt_struct */\n",
++		symprefix, symprefix);
++	fprintf(f, "\t.long\t_%s_strings_start - _%s_blob_start\t/* off_dt_strings */\n",
++		symprefix, symprefix);
++	fprintf(f, "\t.long\t_%s_reserve_map - _%s_blob_start\t/* off_dt_strings */\n",
++		symprefix, symprefix);
++	fprintf(f, "\t.long\t%d\t\t\t\t\t/* version */\n", vi->version);
++	fprintf(f, "\t.long\t%d\t\t\t\t\t/* last_comp_version */\n",
++		vi->last_comp_version);
++
++	if (vi->flags & FTF_BOOTCPUID)
++		fprintf(f, "\t.long\t%i\t\t\t\t\t/* boot_cpuid_phys */\n",
++			boot_cpuid_phys);
++
++	if (vi->flags & FTF_STRTABSIZE)
++		fprintf(f, "\t.long\t_%s_strings_end - _%s_strings_start\t/* size_dt_strings */\n",
++			symprefix, symprefix);
++
++	if (vi->flags & FTF_STRUCTSIZE)
++		fprintf(f, "\t.long\t_%s_struct_end - _%s_struct_start\t/* size_dt_struct */\n",
++			symprefix, symprefix);
++
++	/*
++	 * Reserve map entries.
++	 * Align the reserve map to a doubleword boundary.
++	 * Each entry is an (address, size) pair of u64 values.
++	 * Always supply a zero-sized temination entry.
++	 */
++	asm_emit_align(f, 8);
++	emit_label(f, symprefix, "reserve_map");
++
++	fprintf(f, "/* Memory reserve map from source file */\n");
++
++	/*
++	 * Use .long on high and low halfs of u64s to avoid .quad
++	 * as it appears .quad isn't available in some assemblers.
++	 */
++	for (re = bi->reservelist; re; re = re->next) {
++		if (re->label) {
++			fprintf(f, "\t.globl\t%s\n", re->label);
++			fprintf(f, "%s:\n", re->label);
 +		}
++		fprintf(f, "\t.long\t0x%08x, 0x%08x\n",
++			(unsigned int)(re->re.address >> 32),
++			(unsigned int)(re->re.address & 0xffffffff));
++		fprintf(f, "\t.long\t0x%08x, 0x%08x\n",
++			(unsigned int)(re->re.size >> 32),
++			(unsigned int)(re->re.size & 0xffffffff));
++	}
++	for (i = 0; i < reservenum; i++) {
++		fprintf(f, "\t.long\t0, 0\n\t.long\t0, 0\n");
 +	}
 +
-+	if (irq == PCI2_IRQ) {
-+		unsigned long status;
-+		int i;
++	fprintf(f, "\t.long\t0, 0\n\t.long\t0, 0\n");
 +
-+		status = ctrl_inl(EPLD_STATUS_BASE + 3 * sizeof(u32)) &
-+			 ctrl_inl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff;
-+		if (status == 0) {
-+			irq = -1;
-+		} else {
-+			for (i=0; i<8; i++) {
-+				if (status & (1<<i))
-+					break;
++	emit_label(f, symprefix, "struct_start");
++	flatten_tree(bi->dt, &asm_emitter, f, &strbuf, vi);
++	fprintf(f, "\t.long\tFDT_END\n");
++	emit_label(f, symprefix, "struct_end");
++
++	emit_label(f, symprefix, "strings_start");
++	dump_stringtable_asm(f, strbuf);
++	emit_label(f, symprefix, "strings_end");
++
++	emit_label(f, symprefix, "blob_end");
++
++	/*
++	 * If the user asked for more space than is used, pad it out.
++	 */
++	if (minsize > 0) {
++		fprintf(f, "\t.space\t%d - (_%s_blob_end - _%s_blob_start), 0\n",
++			minsize, symprefix, symprefix);
++	}
++	if (padsize > 0) {
++		fprintf(f, "\t.space\t%d, 0\n", padsize);
++	}
++	emit_label(f, symprefix, "blob_abs_end");
++
++	data_free(strbuf);
++}
++
++struct inbuf {
++	char *base, *limit, *ptr;
++};
++
++static void inbuf_init(struct inbuf *inb, void *base, void *limit)
++{
++	inb->base = base;
++	inb->limit = limit;
++	inb->ptr = inb->base;
++}
++
++static void flat_read_chunk(struct inbuf *inb, void *p, int len)
++{
++	if ((inb->ptr + len) > inb->limit)
++		die("Premature end of data parsing flat device tree\n");
++
++	memcpy(p, inb->ptr, len);
++
++	inb->ptr += len;
++}
++
++static u32 flat_read_word(struct inbuf *inb)
++{
++	u32 val;
++
++	assert(((inb->ptr - inb->base) % sizeof(val)) == 0);
++
++	flat_read_chunk(inb, &val, sizeof(val));
++
++	return be32_to_cpu(val);
++}
++
++static void flat_realign(struct inbuf *inb, int align)
++{
++	int off = inb->ptr - inb->base;
++
++	inb->ptr = inb->base + ALIGN(off, align);
++	if (inb->ptr > inb->limit)
++		die("Premature end of data parsing flat device tree\n");
++}
++
++static char *flat_read_string(struct inbuf *inb)
++{
++	int len = 0;
++	const char *p = inb->ptr;
++	char *str;
++
++	do {
++		if (p >= inb->limit)
++			die("Premature end of data parsing flat device tree\n");
++		len++;
++	} while ((*p++) != '\0');
++
++	str = strdup(inb->ptr);
++
++	inb->ptr += len;
++
++	flat_realign(inb, sizeof(u32));
++
++	return str;
++}
++
++static struct data flat_read_data(struct inbuf *inb, int len)
++{
++	struct data d = empty_data;
++
++	if (len == 0)
++		return empty_data;
++
++	d = data_grow_for(d, len);
++	d.len = len;
++
++	flat_read_chunk(inb, d.val, len);
++
++	flat_realign(inb, sizeof(u32));
++
++	return d;
++}
++
++static char *flat_read_stringtable(struct inbuf *inb, int offset)
++{
++	const char *p;
++
++	p = inb->base + offset;
++	while (1) {
++		if (p >= inb->limit || p < inb->base)
++			die("String offset %d overruns string table\n",
++			    offset);
++
++		if (*p == '\0')
++			break;
++
++		p++;
++	}
++
++	return strdup(inb->base + offset);
++}
++
++static struct property *flat_read_property(struct inbuf *dtbuf,
++					   struct inbuf *strbuf, int flags)
++{
++	u32 proplen, stroff;
++	char *name;
++	struct data val;
++
++	proplen = flat_read_word(dtbuf);
++	stroff = flat_read_word(dtbuf);
++
++	name = flat_read_stringtable(strbuf, stroff);
++
++	if ((flags & FTF_VARALIGN) && (proplen >= 8))
++		flat_realign(dtbuf, 8);
++
++	val = flat_read_data(dtbuf, proplen);
++
++	return build_property(name, val, NULL);
++}
++
++
++static struct reserve_info *flat_read_mem_reserve(struct inbuf *inb)
++{
++	struct reserve_info *reservelist = NULL;
++	struct reserve_info *new;
++	const char *p;
++	struct fdt_reserve_entry re;
++
++	/*
++	 * Each entry is a pair of u64 (addr, size) values for 4 cell_t's.
++	 * List terminates at an entry with size equal to zero.
++	 *
++	 * First pass, count entries.
++	 */
++	p = inb->ptr;
++	while (1) {
++		flat_read_chunk(inb, &re, sizeof(re));
++		re.address  = be64_to_cpu(re.address);
++		re.size = be64_to_cpu(re.size);
++		if (re.size == 0)
++			break;
++
++		new = build_reserve_entry(re.address, re.size, NULL);
++		reservelist = add_reserve_entry(reservelist, new);
++	}
++
++	return reservelist;
++}
++
++
++static char *nodename_from_path(const char *ppath, const char *cpath)
++{
++	const char *lslash;
++	int plen;
++
++	lslash = strrchr(cpath, '/');
++	if (! lslash)
++		return NULL;
++
++	plen = lslash - cpath;
++
++	if (streq(cpath, "/") && streq(ppath, ""))
++		return "";
++
++	if ((plen == 0) && streq(ppath, "/"))
++		return strdup(lslash+1);
++
++	if (! strneq(ppath, cpath, plen))
++		return NULL;
++
++	return strdup(lslash+1);
++}
++
++static const char PROPCHAR[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789,._+*#?-";
++static const char UNITCHAR[] = "0123456789abcdef,";
++
++static int check_node_name(const char *name)
++{
++	const char *atpos;
++	int basenamelen;
++
++	atpos = strrchr(name, '@');
++
++	if (atpos)
++		basenamelen = atpos - name;
++	else
++		basenamelen = strlen(name);
++
++	if (strspn(name, PROPCHAR) < basenamelen)
++		return -1;
++
++	if (atpos
++	    && ((basenamelen + 1 + strspn(atpos+1, UNITCHAR)) < strlen(name)))
++		return -1;
++
++	return basenamelen;
++}
++
++static struct node *unflatten_tree(struct inbuf *dtbuf,
++				   struct inbuf *strbuf,
++				   const char *parent_path, int flags)
++{
++	struct node *node;
++	u32 val;
++
++	node = build_node(NULL, NULL);
++
++	if (flags & FTF_FULLPATH) {
++		node->fullpath = flat_read_string(dtbuf);
++		node->name = nodename_from_path(parent_path, node->fullpath);
++
++		if (! node->name)
++			die("Path \"%s\" is not valid as a child of \"%s\"\n",
++			    node->fullpath, parent_path);
++	} else {
++		node->name = flat_read_string(dtbuf);
++		node->fullpath = join_path(parent_path, node->name);
++	}
++
++	node->basenamelen = check_node_name(node->name);
++	if (node->basenamelen < 0) {
++		fprintf(stderr, "Warning \"%s\" has incorrect format\n", node->name);
++	}
++
++	do {
++		struct property *prop;
++		struct node *child;
++
++		val = flat_read_word(dtbuf);
++		switch (val) {
++		case FDT_PROP:
++			if (node->children)
++				fprintf(stderr, "Warning: Flat tree input has "
++					"subnodes preceding a property.\n");
++			prop = flat_read_property(dtbuf, strbuf, flags);
++			add_property(node, prop);
++			break;
++
++		case FDT_BEGIN_NODE:
++			child = unflatten_tree(dtbuf,strbuf, node->fullpath,
++					       flags);
++			add_child(node, child);
++			break;
++
++		case FDT_END_NODE:
++			break;
++
++		case FDT_END:
++			die("Premature FDT_END in device tree blob\n");
++			break;
++
++		case FDT_NOP:
++			if (!(flags & FTF_NOPS))
++				fprintf(stderr, "Warning: NOP tag found in flat tree"
++					" version <16\n");
++
++			/* Ignore */
++			break;
++
++		default:
++			die("Invalid opcode word %08x in device tree blob\n",
++			    val);
++		}
++	} while (val != FDT_END_NODE);
++
++	return node;
++}
++
++
++struct boot_info *dt_from_blob(FILE *f)
++{
++	u32 magic, totalsize, version, size_str, size_dt;
++	u32 off_dt, off_str, off_mem_rsvmap;
++	int rc;
++	char *blob;
++	struct fdt_header *fdt;
++	char *p;
++	struct inbuf dtbuf, strbuf;
++	struct inbuf memresvbuf;
++	int sizeleft;
++	struct reserve_info *reservelist;
++	struct node *tree;
++	u32 val;
++	int flags = 0;
++
++	rc = fread(&magic, sizeof(magic), 1, f);
++	if (ferror(f))
++		die("Error reading DT blob magic number: %s\n",
++		    strerror(errno));
++	if (rc < 1) {
++		if (feof(f))
++			die("EOF reading DT blob magic number\n");
++		else
++			die("Mysterious short read reading magic number\n");
++	}
++
++	magic = be32_to_cpu(magic);
++	if (magic != FDT_MAGIC)
++		die("Blob has incorrect magic number\n");
++
++	rc = fread(&totalsize, sizeof(totalsize), 1, f);
++	if (ferror(f))
++		die("Error reading DT blob size: %s\n", strerror(errno));
++	if (rc < 1) {
++		if (feof(f))
++			die("EOF reading DT blob size\n");
++		else
++			die("Mysterious short read reading blob size\n");
++	}
++
++	totalsize = be32_to_cpu(totalsize);
++	if (totalsize < FDT_V1_SIZE)
++		die("DT blob size (%d) is too small\n", totalsize);
++
++	blob = xmalloc(totalsize);
++
++	fdt = (struct fdt_header *)blob;
++	fdt->magic = cpu_to_be32(magic);
++	fdt->totalsize = cpu_to_be32(totalsize);
++
++	sizeleft = totalsize - sizeof(magic) - sizeof(totalsize);
++	p = blob + sizeof(magic)  + sizeof(totalsize);
++
++	while (sizeleft) {
++		if (feof(f))
++			die("EOF before reading %d bytes of DT blob\n",
++			    totalsize);
++
++		rc = fread(p, 1, sizeleft, f);
++		if (ferror(f))
++			die("Error reading DT blob: %s\n",
++			    strerror(errno));
++
++		sizeleft -= rc;
++		p += rc;
++	}
++
++	off_dt = be32_to_cpu(fdt->off_dt_struct);
++	off_str = be32_to_cpu(fdt->off_dt_strings);
++	off_mem_rsvmap = be32_to_cpu(fdt->off_mem_rsvmap);
++	version = be32_to_cpu(fdt->version);
++
++	fprintf(stderr, "\tmagic:\t\t\t0x%x\n", magic);
++	fprintf(stderr, "\ttotalsize:\t\t%d\n", totalsize);
++	fprintf(stderr, "\toff_dt_struct:\t\t0x%x\n", off_dt);
++	fprintf(stderr, "\toff_dt_strings:\t\t0x%x\n", off_str);
++	fprintf(stderr, "\toff_mem_rsvmap:\t\t0x%x\n", off_mem_rsvmap);
++	fprintf(stderr, "\tversion:\t\t0x%x\n", version );
++	fprintf(stderr, "\tlast_comp_version:\t0x%x\n",
++		be32_to_cpu(fdt->last_comp_version));
++
++	if (off_mem_rsvmap >= totalsize)
++		die("Mem Reserve structure offset exceeds total size\n");
++
++	if (off_dt >= totalsize)
++		die("DT structure offset exceeds total size\n");
++
++	if (off_str > totalsize)
++		die("String table offset exceeds total size\n");
++
++	if (version >= 2)
++		fprintf(stderr, "\tboot_cpuid_phys:\t0x%x\n",
++			be32_to_cpu(fdt->boot_cpuid_phys));
++
++	size_str = -1;
++	if (version >= 3) {
++		size_str = be32_to_cpu(fdt->size_dt_strings);
++		fprintf(stderr, "\tsize_dt_strings:\t%d\n", size_str);
++		if (off_str+size_str > totalsize)
++			die("String table extends past total size\n");
++	}
++
++	if (version >= 17) {
++		size_dt = be32_to_cpu(fdt->size_dt_struct);
++		fprintf(stderr, "\tsize_dt_struct:\t\t%d\n", size_dt);
++		if (off_dt+size_dt > totalsize)
++			die("Structure block extends past total size\n");
++	}
++
++	if (version < 16) {
++		flags |= FTF_FULLPATH | FTF_NAMEPROPS | FTF_VARALIGN;
++	} else {
++		flags |= FTF_NOPS;
++	}
++
++	inbuf_init(&memresvbuf,
++		   blob + off_mem_rsvmap, blob + totalsize);
++	inbuf_init(&dtbuf, blob + off_dt, blob + totalsize);
++	if (size_str >= 0)
++		inbuf_init(&strbuf, blob + off_str, blob + off_str + size_str);
++	else
++		inbuf_init(&strbuf, blob + off_str, blob + totalsize);
++
++	reservelist = flat_read_mem_reserve(&memresvbuf);
++
++	val = flat_read_word(&dtbuf);
++
++	if (val != FDT_BEGIN_NODE)
++		die("Device tree blob doesn't begin with FDT_BEGIN_NODE (begins with 0x%08x)\n", val);
++
++	tree = unflatten_tree(&dtbuf, &strbuf, "", flags);
++
++	val = flat_read_word(&dtbuf);
++	if (val != FDT_END)
++		die("Device tree blob doesn't end with FDT_END\n");
++
++	free(blob);
++
++	return build_boot_info(reservelist, tree);
++}
+diff --git a/arch/powerpc/boot/dtc-src/fstree.c b/arch/powerpc/boot/dtc-src/fstree.c
+new file mode 100644
+index 0000000..2a160a4
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/fstree.c
+@@ -0,0 +1,94 @@
++/*
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
++ */
++
++#include "dtc.h"
++
++#include <dirent.h>
++#include <sys/stat.h>
++
++static struct node *read_fstree(const char *dirname)
++{
++	DIR *d;
++	struct dirent *de;
++	struct stat st;
++	struct node *tree;
++
++	d = opendir(dirname);
++	if (! d)
++		die("opendir(): %s\n", strerror(errno));
++
++	tree = build_node(NULL, NULL);
++
++	while ((de = readdir(d)) != NULL) {
++		char *tmpnam;
++
++		if (streq(de->d_name, ".")
++		    || streq(de->d_name, ".."))
++			continue;
++
++		tmpnam = join_path(dirname, de->d_name);
++
++		if (lstat(tmpnam, &st) < 0)
++			die("stat(%s): %s\n", tmpnam, strerror(errno));
++
++		if (S_ISREG(st.st_mode)) {
++			struct property *prop;
++			FILE *pfile;
++
++			pfile = fopen(tmpnam, "r");
++			if (! pfile) {
++				fprintf(stderr,
++					"WARNING: Cannot open %s: %s\n",
++					tmpnam, strerror(errno));
++			} else {
++				prop = build_property(strdup(de->d_name),
++						      data_copy_file(pfile,
++								     st.st_size),
++						      NULL);
++				add_property(tree, prop);
++				fclose(pfile);
 +			}
-+			irq = START_EXT_IRQS + (3 * 8) + i;
++		} else if (S_ISDIR(st.st_mode)) {
++			struct node *newchild;
++
++			newchild = read_fstree(tmpnam);
++			newchild = name_node(newchild, strdup(de->d_name),
++					     NULL);
++			add_child(tree, newchild);
 +		}
++
++		free(tmpnam);
 +	}
 +
-+	return irq;
++	return tree;
 +}
 +
-+#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
-+int cayman_irq_describe(char* p, int irq)
++struct boot_info *dt_from_fs(const char *dirname)
 +{
-+	if (irq < NR_INTC_IRQS) {
-+		return intc_irq_describe(p, irq);
-+	} else if (irq < NR_INTC_IRQS + 8) {
-+		return sprintf(p, "(SMSC %d)", irq - NR_INTC_IRQS);
-+	} else if ((irq >= NR_INTC_IRQS + 24) && (irq < NR_INTC_IRQS + 32)) {
-+		return sprintf(p, "(PCI2 %d)", irq - (NR_INTC_IRQS + 24));
++	struct node *tree;
++
++	tree = read_fstree(dirname);
++	tree = name_node(tree, "", NULL);
++
++	fill_fullpaths(tree, "");
++
++	return build_boot_info(NULL, tree);
++}
++
+diff --git a/arch/powerpc/boot/dtc-src/livetree.c b/arch/powerpc/boot/dtc-src/livetree.c
+new file mode 100644
+index 0000000..6ba0846
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/livetree.c
+@@ -0,0 +1,305 @@
++/*
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
++ */
++
++#include "dtc.h"
++
++/*
++ * Tree building functions
++ */
++
++struct property *build_property(char *name, struct data val, char *label)
++{
++	struct property *new = xmalloc(sizeof(*new));
++
++	new->name = name;
++	new->val = val;
++
++	new->next = NULL;
++
++	new->label = label;
++
++	return new;
++}
++
++struct property *chain_property(struct property *first, struct property *list)
++{
++	assert(first->next == NULL);
++
++	first->next = list;
++	return first;
++}
++
++struct property *reverse_properties(struct property *first)
++{
++	struct property *p = first;
++	struct property *head = NULL;
++	struct property *next;
++
++	while (p) {
++		next = p->next;
++		p->next = head;
++		head = p;
++		p = next;
++	}
++	return head;
++}
++
++struct node *build_node(struct property *proplist, struct node *children)
++{
++	struct node *new = xmalloc(sizeof(*new));
++	struct node *child;
++
++	memset(new, 0, sizeof(*new));
++
++	new->proplist = reverse_properties(proplist);
++	new->children = children;
++
++	for_each_child(new, child) {
++		child->parent = new;
 +	}
 +
-+	return 0;
++	return new;
 +}
-+#endif
 +
-+void init_cayman_irq(void)
++struct node *name_node(struct node *node, char *name, char * label)
 +{
-+	int i;
++	assert(node->name == NULL);
 +
-+	epld_virt = onchip_remap(EPLD_BASE, 1024, "EPLD");
-+	if (!epld_virt) {
-+		printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n");
-+		return;
++	node->name = name;
++
++	node->label = label;
++
++	return node;
++}
++
++struct node *chain_node(struct node *first, struct node *list)
++{
++	assert(first->next_sibling == NULL);
++
++	first->next_sibling = list;
++	return first;
++}
++
++void add_property(struct node *node, struct property *prop)
++{
++	struct property **p;
++
++	prop->next = NULL;
++
++	p = &node->proplist;
++	while (*p)
++		p = &((*p)->next);
++
++	*p = prop;
++}
++
++void add_child(struct node *parent, struct node *child)
++{
++	struct node **p;
++
++	child->next_sibling = NULL;
++
++	p = &parent->children;
++	while (*p)
++		p = &((*p)->next_sibling);
++
++	*p = child;
++}
++
++struct reserve_info *build_reserve_entry(u64 address, u64 size, char *label)
++{
++	struct reserve_info *new = xmalloc(sizeof(*new));
++
++	new->re.address = address;
++	new->re.size = size;
++
++	new->next = NULL;
++
++	new->label = label;
++
++	return new;
++}
++
++struct reserve_info *chain_reserve_entry(struct reserve_info *first,
++					struct reserve_info *list)
++{
++	assert(first->next == NULL);
++
++	first->next = list;
++	return first;
++}
++
++struct reserve_info *add_reserve_entry(struct reserve_info *list,
++				      struct reserve_info *new)
++{
++	struct reserve_info *last;
++
++	new->next = NULL;
++
++	if (! list)
++		return new;
++
++	for (last = list; last->next; last = last->next)
++		;
++
++	last->next = new;
++
++	return list;
++}
++
++struct boot_info *build_boot_info(struct reserve_info *reservelist,
++				  struct node *tree)
++{
++	struct boot_info *bi;
++
++	bi = xmalloc(sizeof(*bi));
++	bi->reservelist = reservelist;
++	bi->dt = tree;
++
++	return bi;
++}
++
++/*
++ * Tree accessor functions
++ */
++
++const char *get_unitname(struct node *node)
++{
++	if (node->name[node->basenamelen] == '\0')
++		return "";
++	else
++		return node->name + node->basenamelen + 1;
++}
++
++struct property *get_property(struct node *node, const char *propname)
++{
++	struct property *prop;
++
++	for_each_property(node, prop)
++		if (streq(prop->name, propname))
++			return prop;
++
++	return NULL;
++}
++
++cell_t propval_cell(struct property *prop)
++{
++	assert(prop->val.len == sizeof(cell_t));
++	return be32_to_cpu(*((cell_t *)prop->val.val));
++}
++
++struct node *get_subnode(struct node *node, const char *nodename)
++{
++	struct node *child;
++
++	for_each_child(node, child)
++		if (streq(child->name, nodename))
++			return child;
++
++	return NULL;
++}
++
++struct node *get_node_by_path(struct node *tree, const char *path)
++{
++	const char *p;
++	struct node *child;
++
++	if (!path || ! (*path))
++		return tree;
++
++	while (path[0] == '/')
++		path++;
++
++	p = strchr(path, '/');
++
++	for_each_child(tree, child) {
++		if (p && strneq(path, child->name, p-path))
++			return get_node_by_path(child, p+1);
++		else if (!p && streq(path, child->name))
++			return child;
 +	}
 +
-+	for (i=0; i<NR_EXT_IRQS; i++) {
-+		irq_desc[START_EXT_IRQS + i].chip = &cayman_irq_type;
++	return NULL;
++}
++
++struct node *get_node_by_label(struct node *tree, const char *label)
++{
++	struct node *child, *node;
++
++	assert(label && (strlen(label) > 0));
++
++	if (tree->label && streq(tree->label, label))
++		return tree;
++
++	for_each_child(tree, child) {
++		node = get_node_by_label(child, label);
++		if (node)
++			return node;
 +	}
 +
-+	/* Setup the SMSC interrupt */
-+	setup_irq(SMSC_IRQ, &cayman_action_smsc);
-+	setup_irq(PCI2_IRQ, &cayman_action_pci2);
++	return NULL;
 +}
-diff --git a/arch/sh/boards/cayman/led.c b/arch/sh/boards/cayman/led.c
++
++struct node *get_node_by_phandle(struct node *tree, cell_t phandle)
++{
++	struct node *child, *node;
++
++	assert((phandle != 0) && (phandle != -1));
++
++	if (tree->phandle == phandle)
++		return tree;
++
++	for_each_child(tree, child) {
++		node = get_node_by_phandle(child, phandle);
++		if (node)
++			return node;
++	}
++
++	return NULL;
++}
++
++struct node *get_node_by_ref(struct node *tree, const char *ref)
++{
++	if (ref[0] == '/')
++		return get_node_by_path(tree, ref);
++	else
++		return get_node_by_label(tree, ref);
++}
++
++cell_t get_node_phandle(struct node *root, struct node *node)
++{
++	static cell_t phandle = 1; /* FIXME: ick, static local */
++
++	if ((node->phandle != 0) && (node->phandle != -1))
++		return node->phandle;
++
++	assert(! get_property(node, "linux,phandle"));
++
++	while (get_node_by_phandle(root, phandle))
++		phandle++;
++
++	node->phandle = phandle;
++	add_property(node,
++		     build_property("linux,phandle",
++				    data_append_cell(empty_data, phandle),
++				    NULL));
++
++	return node->phandle;
++}
+diff --git a/arch/powerpc/boot/dtc-src/srcpos.c b/arch/powerpc/boot/dtc-src/srcpos.c
 new file mode 100644
-index 0000000..a808eac
+index 0000000..352b0fe
 --- /dev/null
-+++ b/arch/sh/boards/cayman/led.c
-@@ -0,0 +1,51 @@
++++ b/arch/powerpc/boot/dtc-src/srcpos.c
+@@ -0,0 +1,105 @@
 +/*
-+ * arch/sh/boards/cayman/led.c
++ * Copyright 2007 Jon Loeliger, Freescale Semiconductor, Inc.
 + *
-+ * Copyright (C) 2002 Stuart Menefy <stuart.menefy at st.com>
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
 + *
-+ * May be copied or modified under the terms of the GNU General Public
-+ * License.  See linux/COPYING for more information.
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
 + *
-+ * Flash the LEDs
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
 + */
-+#include <asm/io.h>
++
++#include "dtc.h"
++#include "srcpos.h"
++
 +
 +/*
-+** It is supposed these functions to be used for a low level
-+** debugging (via Cayman LEDs), hence to be available as soon
-+** as possible.
-+** Unfortunately Cayman LEDs relies on Cayman EPLD to be mapped
-+** (this happen when IRQ are initialized... quite late).
-+** These triky dependencies should be removed. Temporary, it
-+** may be enough to NOP until EPLD is mapped.
-+*/
++ * Record the complete unique set of opened file names.
++ * Primarily used to cache source position file names.
++ */
++#define MAX_N_FILE_NAMES	(100)
 +
-+extern unsigned long epld_virt;
++const char *file_names[MAX_N_FILE_NAMES];
++static int n_file_names = 0;
 +
-+#define LED_ADDR      (epld_virt + 0x008)
-+#define HDSP2534_ADDR (epld_virt + 0x100)
++/*
++ * Like yylineno, this is the current open file pos.
++ */
 +
-+void mach_led(int position, int value)
++int srcpos_filenum = -1;
++
++
++
++FILE *dtc_open_file(const char *fname)
 +{
-+	if (!epld_virt)
-+		return;
++	FILE *f;
 +
-+	if (value)
-+		ctrl_outl(0, LED_ADDR);
++	if (lookup_file_name(fname, 1) < 0)
++		die("Too many files opened\n");
++
++	if (streq(fname, "-"))
++		f = stdin;
 +	else
-+		ctrl_outl(1, LED_ADDR);
++		f = fopen(fname, "r");
 +
++	if (! f)
++		die("Couldn't open \"%s\": %s\n", fname, strerror(errno));
++
++	return f;
 +}
 +
-+void mach_alphanum(int position, unsigned char value)
++
++
++/*
++ * Locate and optionally add filename fname in the file_names[] array.
++ *
++ * If the filename is currently not in the array and the boolean
++ * add_it is non-zero, an attempt to add the filename will be made.
++ *
++ * Returns;
++ *    Index [0..MAX_N_FILE_NAMES) where the filename is kept
++ *    -1 if the name can not be recorded
++ */
++
++int lookup_file_name(const char *fname, int add_it)
 +{
-+	if (!epld_virt)
-+		return;
++	int i;
 +
-+	ctrl_outb(value, HDSP2534_ADDR + 0xe0 + (position << 2));
++	for (i = 0; i < n_file_names; i++) {
++		if (strcmp(file_names[i], fname) == 0)
++			return i;
++	}
++
++	if (add_it) {
++		if (n_file_names < MAX_N_FILE_NAMES) {
++			file_names[n_file_names] = strdup(fname);
++			return n_file_names++;
++		}
++	}
++
++	return -1;
 +}
 +
-+void mach_alphanum_brightness(int setting)
++
++const char *srcpos_filename_for_num(int filenum)
 +{
-+	ctrl_outb(setting & 7, HDSP2534_ADDR + 0xc0);
++	if (0 <= filenum && filenum < n_file_names) {
++		return file_names[filenum];
++	}
++
++	return 0;
 +}
-diff --git a/arch/sh/boards/cayman/setup.c b/arch/sh/boards/cayman/setup.c
++
++
++const char *srcpos_get_filename(void)
++{
++	return srcpos_filename_for_num(srcpos_filenum);
++}
+diff --git a/arch/powerpc/boot/dtc-src/srcpos.h b/arch/powerpc/boot/dtc-src/srcpos.h
 new file mode 100644
-index 0000000..8c9fa47
+index 0000000..ce7ab5b
 --- /dev/null
-+++ b/arch/sh/boards/cayman/setup.c
-@@ -0,0 +1,187 @@
++++ b/arch/powerpc/boot/dtc-src/srcpos.h
+@@ -0,0 +1,75 @@
 +/*
-+ * arch/sh/mach-cayman/setup.c
++ * Copyright 2007 Jon Loeliger, Freescale Semiconductor, Inc.
 + *
-+ * SH5 Cayman support
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
 + *
-+ * Copyright (C) 2002  David J. Mckay & Benedict Gaster
-+ * Copyright (C) 2003 - 2007  Paul Mundt
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
 + *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
 + */
-+#include <linux/init.h>
-+#include <linux/io.h>
-+#include <linux/kernel.h>
-+#include <asm/cpu/irq.h>
 +
 +/*
-+ * Platform Dependent Interrupt Priorities.
++ * Augment the standard YYLTYPE with a filenum index into an
++ * array of all opened filenames.
 + */
 +
-+/* Using defaults defined in irq.h */
-+#define	RES NO_PRIORITY		/* Disabled */
-+#define IR0 IRL0_PRIORITY	/* IRLs */
-+#define IR1 IRL1_PRIORITY
-+#define IR2 IRL2_PRIORITY
-+#define IR3 IRL3_PRIORITY
-+#define PCA INTA_PRIORITY	/* PCI Ints */
-+#define PCB INTB_PRIORITY
-+#define PCC INTC_PRIORITY
-+#define PCD INTD_PRIORITY
-+#define SER TOP_PRIORITY
-+#define ERR TOP_PRIORITY
-+#define PW0 TOP_PRIORITY
-+#define PW1 TOP_PRIORITY
-+#define PW2 TOP_PRIORITY
-+#define PW3 TOP_PRIORITY
-+#define DM0 NO_PRIORITY		/* DMA Ints */
-+#define DM1 NO_PRIORITY
-+#define DM2 NO_PRIORITY
-+#define DM3 NO_PRIORITY
-+#define DAE NO_PRIORITY
-+#define TU0 TIMER_PRIORITY	/* TMU Ints */
-+#define TU1 NO_PRIORITY
-+#define TU2 NO_PRIORITY
-+#define TI2 NO_PRIORITY
-+#define ATI NO_PRIORITY		/* RTC Ints */
-+#define PRI NO_PRIORITY
-+#define CUI RTC_PRIORITY
-+#define ERI SCIF_PRIORITY	/* SCIF Ints */
-+#define RXI SCIF_PRIORITY
-+#define BRI SCIF_PRIORITY
-+#define TXI SCIF_PRIORITY
-+#define ITI TOP_PRIORITY	/* WDT Ints */
++#if ! defined(YYLTYPE) && ! defined(YYLTYPE_IS_DECLARED)
++typedef struct YYLTYPE {
++    int first_line;
++    int first_column;
++    int last_line;
++    int last_column;
++    int filenum;
++} YYLTYPE;
 +
-+/* Setup for the SMSC FDC37C935 */
-+#define SMSC_SUPERIO_BASE	0x04000000
-+#define SMSC_CONFIG_PORT_ADDR	0x3f0
-+#define SMSC_INDEX_PORT_ADDR	SMSC_CONFIG_PORT_ADDR
-+#define SMSC_DATA_PORT_ADDR	0x3f1
++#define YYLTYPE_IS_DECLARED	1
++#define YYLTYPE_IS_TRIVIAL	1
++#endif
 +
-+#define SMSC_ENTER_CONFIG_KEY	0x55
-+#define SMSC_EXIT_CONFIG_KEY	0xaa
++/* Cater to old parser templates. */
++#ifndef YYID
++#define YYID(n)	(n)
++#endif
 +
-+#define SMCS_LOGICAL_DEV_INDEX	0x07
-+#define SMSC_DEVICE_ID_INDEX	0x20
-+#define SMSC_DEVICE_REV_INDEX	0x21
-+#define SMSC_ACTIVATE_INDEX	0x30
-+#define SMSC_PRIMARY_BASE_INDEX  0x60
-+#define SMSC_SECONDARY_BASE_INDEX 0x62
-+#define SMSC_PRIMARY_INT_INDEX	0x70
-+#define SMSC_SECONDARY_INT_INDEX 0x72
++#define YYLLOC_DEFAULT(Current, Rhs, N)					\
++    do									\
++      if (YYID (N))							\
++	{								\
++	  (Current).first_line   = YYRHSLOC (Rhs, 1).first_line;	\
++	  (Current).first_column = YYRHSLOC (Rhs, 1).first_column;	\
++	  (Current).last_line    = YYRHSLOC (Rhs, N).last_line;		\
++	  (Current).last_column  = YYRHSLOC (Rhs, N).last_column;	\
++	  (Current).filenum      = YYRHSLOC (Rhs, N).filenum;		\
++	}								\
++      else								\
++	{								\
++	  (Current).first_line   = (Current).last_line   =		\
++	    YYRHSLOC (Rhs, 0).last_line;				\
++	  (Current).first_column = (Current).last_column =		\
++	    YYRHSLOC (Rhs, 0).last_column;				\
++	  (Current).filenum      = YYRHSLOC (Rhs, 0).filenum;		\
++	}								\
++    while (YYID (0))
 +
-+#define SMSC_IDE1_DEVICE	1
-+#define SMSC_KEYBOARD_DEVICE	7
-+#define SMSC_CONFIG_REGISTERS	8
 +
-+#define SMSC_SUPERIO_READ_INDEXED(index) ({ \
-+	outb((index), SMSC_INDEX_PORT_ADDR); \
-+	inb(SMSC_DATA_PORT_ADDR); })
-+#define SMSC_SUPERIO_WRITE_INDEXED(val, index) ({ \
-+	outb((index), SMSC_INDEX_PORT_ADDR); \
-+	outb((val),   SMSC_DATA_PORT_ADDR); })
 +
-+#define IDE1_PRIMARY_BASE	0x01f0
-+#define IDE1_SECONDARY_BASE	0x03f6
++extern void yyerror(char const *);
 +
-+unsigned long smsc_superio_virt;
++extern int srcpos_filenum;
 +
-+int platform_int_priority[NR_INTC_IRQS] = {
-+	IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD,	/* IRQ  0- 7 */
-+	RES, RES, RES, RES, SER, ERR, PW3, PW2,	/* IRQ  8-15 */
-+	PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES,	/* IRQ 16-23 */
-+	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 24-31 */
-+	TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI,	/* IRQ 32-39 */
-+	RXI, BRI, TXI, RES, RES, RES, RES, RES,	/* IRQ 40-47 */
-+	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 48-55 */
-+	RES, RES, RES, RES, RES, RES, RES, ITI,	/* IRQ 56-63 */
-+};
++extern int push_input_file(const char *filename);
++extern int pop_input_file(void);
 +
-+static int __init smsc_superio_setup(void)
++extern FILE *dtc_open_file(const char *fname);
++extern int lookup_file_name(const char *fname, int add_it);
++extern const char *srcpos_filename_for_num(int filenum);
++const char *srcpos_get_filename(void);
+diff --git a/arch/powerpc/boot/dtc-src/treesource.c b/arch/powerpc/boot/dtc-src/treesource.c
+new file mode 100644
+index 0000000..a6a7767
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/treesource.c
+@@ -0,0 +1,275 @@
++/*
++ * (C) Copyright David Gibson <dwg at au1.ibm.com>, IBM Corporation.  2005.
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ *                                                                   USA
++ */
++
++#include "dtc.h"
++#include "srcpos.h"
++
++extern FILE *yyin;
++extern int yyparse(void);
++extern void yyerror(char const *);
++
++struct boot_info *the_boot_info;
++
++struct boot_info *dt_from_source(const char *fname)
 +{
-+	unsigned char devid, devrev;
++	the_boot_info = NULL;
 +
-+	smsc_superio_virt = onchip_remap(SMSC_SUPERIO_BASE, 1024, "SMSC SuperIO");
-+	if (!smsc_superio_virt) {
-+		panic("Unable to remap SMSC SuperIO\n");
++	push_input_file(fname);
++
++	if (yyparse() != 0)
++		return NULL;
++
++	fill_fullpaths(the_boot_info->dt, "");
++
++	return the_boot_info;
++}
++
++static void write_prefix(FILE *f, int level)
++{
++	int i;
++
++	for (i = 0; i < level; i++)
++		fputc('\t', f);
++}
++
++int isstring(char c)
++{
++	return (isprint(c)
++		|| (c == '\0')
++		|| strchr("\a\b\t\n\v\f\r", c));
++}
++
++static void write_propval_string(FILE *f, struct data val)
++{
++	const char *str = val.val;
++	int i;
++	int newchunk = 1;
++	struct marker *m = val.markers;
++
++	assert(str[val.len-1] == '\0');
++
++	for (i = 0; i < (val.len-1); i++) {
++		char c = str[i];
++
++		if (newchunk) {
++			while (m && (m->offset <= i)) {
++				if (m->type == LABEL) {
++					assert(m->offset == i);
++					fprintf(f, "%s: ", m->ref);
++				}
++				m = m->next;
++			}
++			fprintf(f, "\"");
++			newchunk = 0;
++		}
++
++		switch (c) {
++		case '\a':
++			fprintf(f, "\\a");
++			break;
++		case '\b':
++			fprintf(f, "\\b");
++			break;
++		case '\t':
++			fprintf(f, "\\t");
++			break;
++		case '\n':
++			fprintf(f, "\\n");
++			break;
++		case '\v':
++			fprintf(f, "\\v");
++			break;
++		case '\f':
++			fprintf(f, "\\f");
++			break;
++		case '\r':
++			fprintf(f, "\\r");
++			break;
++		case '\\':
++			fprintf(f, "\\\\");
++			break;
++		case '\"':
++			fprintf(f, "\\\"");
++			break;
++		case '\0':
++			fprintf(f, "\", ");
++			newchunk = 1;
++			break;
++		default:
++			if (isprint(c))
++				fprintf(f, "%c", c);
++			else
++				fprintf(f, "\\x%02hhx", c);
++		}
 +	}
++	fprintf(f, "\"");
 +
-+	/* Initially the chip is in run state */
-+	/* Put it into configuration state */
-+	outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
-+	outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
++	/* Wrap up any labels at the end of the value */
++	for_each_marker_of_type(m, LABEL) {
++		assert (m->offset == val.len);
++		fprintf(f, " %s:", m->ref);
++	}
++}
 +
-+	/* Read device ID info */
-+	devid = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_ID_INDEX);
-+	devrev = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_REV_INDEX);
-+	printk("SMSC SuperIO devid %02x rev %02x\n", devid, devrev);
++static void write_propval_cells(FILE *f, struct data val)
++{
++	void *propend = val.val + val.len;
++	cell_t *cp = (cell_t *)val.val;
++	struct marker *m = val.markers;
 +
-+	/* Select the keyboard device */
-+	SMSC_SUPERIO_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX);
++	fprintf(f, "<");
++	for (;;) {
++		while (m && (m->offset <= ((char *)cp - val.val))) {
++			if (m->type == LABEL) {
++				assert(m->offset == ((char *)cp - val.val));
++				fprintf(f, "%s: ", m->ref);
++			}
++			m = m->next;
++		}
 +
-+	/* enable it */
-+	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
++		fprintf(f, "0x%x", be32_to_cpu(*cp++));
++		if ((void *)cp >= propend)
++			break;
++		fprintf(f, " ");
++	}
 +
-+	/* Select the interrupts */
-+	/* On a PC keyboard is IRQ1, mouse is IRQ12 */
-+	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
-+	SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
++	/* Wrap up any labels at the end of the value */
++	for_each_marker_of_type(m, LABEL) {
++		assert (m->offset == val.len);
++		fprintf(f, " %s:", m->ref);
++	}
++	fprintf(f, ">");
++}
 +
-+#ifdef CONFIG_IDE
-+	/*
-+	 * Only IDE1 exists on the Cayman
-+	 */
++static void write_propval_bytes(FILE *f, struct data val)
++{
++	void *propend = val.val + val.len;
++	const char *bp = val.val;
++	struct marker *m = val.markers;
 +
-+	/* Power it on */
-+	SMSC_SUPERIO_WRITE_INDEXED(1 << SMSC_IDE1_DEVICE, 0x22);
++	fprintf(f, "[");
++	for (;;) {
++		while (m && (m->offset == (bp-val.val))) {
++			if (m->type == LABEL)
++				fprintf(f, "%s: ", m->ref);
++			m = m->next;
++		}
 +
-+	SMSC_SUPERIO_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX);
-+	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
++		fprintf(f, "%02hhx", *bp++);
++		if ((void *)bp >= propend)
++			break;
++		fprintf(f, " ");
++	}
 +
-+	SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE >> 8,
-+				   SMSC_PRIMARY_BASE_INDEX + 0);
-+	SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE & 0xff,
-+				   SMSC_PRIMARY_BASE_INDEX + 1);
++	/* Wrap up any labels at the end of the value */
++	for_each_marker_of_type(m, LABEL) {
++		assert (m->offset == val.len);
++		fprintf(f, " %s:", m->ref);
++	}
++	fprintf(f, "]");
++}
 +
-+	SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE >> 8,
-+				   SMSC_SECONDARY_BASE_INDEX + 0);
-+	SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE & 0xff,
-+				   SMSC_SECONDARY_BASE_INDEX + 1);
++static void write_propval(FILE *f, struct property *prop)
++{
++	int len = prop->val.len;
++	const char *p = prop->val.val;
++	struct marker *m = prop->val.markers;
++	int nnotstring = 0, nnul = 0;
++	int nnotstringlbl = 0, nnotcelllbl = 0;
++	int i;
 +
-+	SMSC_SUPERIO_WRITE_INDEXED(14, SMSC_PRIMARY_INT_INDEX);
++	if (len == 0) {
++		fprintf(f, ";\n");
++		return;
++	}
 +
-+	SMSC_SUPERIO_WRITE_INDEXED(SMSC_CONFIG_REGISTERS,
-+				   SMCS_LOGICAL_DEV_INDEX);
++	for (i = 0; i < len; i++) {
++		if (! isstring(p[i]))
++			nnotstring++;
++		if (p[i] == '\0')
++			nnul++;
++	}
 +
-+	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */
-+	SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
-+	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
-+	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
-+#endif
++	for_each_marker_of_type(m, LABEL) {
++		if ((m->offset > 0) && (prop->val.val[m->offset - 1] != '\0'))
++			nnotstringlbl++;
++		if ((m->offset % sizeof(cell_t)) != 0)
++			nnotcelllbl++;
++	}
 +
-+	/* Exit the configuration state */
-+	outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
++	fprintf(f, " = ");
++	if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul))
++	    && (nnotstringlbl == 0)) {
++		write_propval_string(f, prop->val);
++	} else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) {
++		write_propval_cells(f, prop->val);
++	} else {
++		write_propval_bytes(f, prop->val);
++	}
 +
-+	return 0;
++	fprintf(f, ";\n");
 +}
-+__initcall(smsc_superio_setup);
 +
-+static void __iomem *cayman_ioport_map(unsigned long port, unsigned int len)
++static void write_tree_source_node(FILE *f, struct node *tree, int level)
 +{
-+	if (port < 0x400) {
-+		extern unsigned long smsc_superio_virt;
-+		return (void __iomem *)((port << 2) | smsc_superio_virt);
++	struct property *prop;
++	struct node *child;
++
++	write_prefix(f, level);
++	if (tree->label)
++		fprintf(f, "%s: ", tree->label);
++	if (tree->name && (*tree->name))
++		fprintf(f, "%s {\n", tree->name);
++	else
++		fprintf(f, "/ {\n");
++
++	for_each_property(tree, prop) {
++		write_prefix(f, level+1);
++		if (prop->label)
++			fprintf(f, "%s: ", prop->label);
++		fprintf(f, "%s", prop->name);
++		write_propval(f, prop);
++	}
++	for_each_child(tree, child) {
++		fprintf(f, "\n");
++		write_tree_source_node(f, child, level+1);
 +	}
++	write_prefix(f, level);
++	fprintf(f, "};\n");
++}
 +
-+	return (void __iomem *)port;
++
++void dt_to_source(FILE *f, struct boot_info *bi)
++{
++	struct reserve_info *re;
++
++	fprintf(f, "/dts-v1/;\n\n");
++
++	for (re = bi->reservelist; re; re = re->next) {
++		if (re->label)
++			fprintf(f, "%s: ", re->label);
++		fprintf(f, "/memreserve/\t0x%016llx 0x%016llx;\n",
++			(unsigned long long)re->re.address,
++			(unsigned long long)re->re.size);
++	}
++
++	write_tree_source_node(f, bi->dt, 0);
 +}
 +
-+extern void init_cayman_irq(void);
+diff --git a/arch/powerpc/boot/dtc-src/version_gen.h b/arch/powerpc/boot/dtc-src/version_gen.h
+new file mode 100644
+index 0000000..6c34303
+--- /dev/null
++++ b/arch/powerpc/boot/dtc-src/version_gen.h
+@@ -0,0 +1 @@
++#define DTC_VERSION "DTC 1.0.0-gd6f9b62f"
+diff --git a/arch/powerpc/boot/dts/adder875-redboot.dts b/arch/powerpc/boot/dts/adder875-redboot.dts
+new file mode 100644
+index 0000000..930bfb3
+--- /dev/null
++++ b/arch/powerpc/boot/dts/adder875-redboot.dts
+@@ -0,0 +1,184 @@
++/*
++ * Device Tree Source for MPC885 ADS running RedBoot
++ *
++ * Copyright 2006 MontaVista Software, Inc.
++ * Copyright 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
 +
-+static struct sh_machine_vector mv_cayman __initmv = {
-+	.mv_name		= "Hitachi Cayman",
-+	.mv_nr_irqs		= 64,
-+	.mv_ioport_map		= cayman_ioport_map,
-+	.mv_init_irq		= init_cayman_irq,
++/dts-v1/;
++/ {
++	model = "Analogue & Micro Adder MPC875";
++	compatible = "analogue-and-micro,adder875";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		console = &console;
++		ethernet0 = &eth0;
++		ethernet1 = &eth1;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,875 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <16>;
++			i-cache-line-size = <16>;
++			d-cache-size = <8192>;
++			i-cache-size = <8192>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++			interrupts = <15 2>;	// decrementer interrupt
++			interrupt-parent = <&PIC>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0 0x01000000>;
++	};
++
++	localbus at fa200100 {
++		compatible = "fsl,mpc885-localbus", "fsl,pq1-localbus",
++		             "simple-bus";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		reg = <0xfa200100 0x40>;
++
++		ranges = <
++			0 0 0xfe000000 0x00800000
++			2 0 0xfa100000 0x00008000
++		>;
++
++		flash at 0,0 {
++			compatible = "cfi-flash";
++			reg = <0 0 0x800000>;
++			bank-width = <2>;
++			device-width = <2>;
++		};
++	};
++
++	soc at fa200000 {
++		compatible = "fsl,mpc875-immr", "fsl,pq1-soc", "simple-bus";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0 0xfa200000 0x00004000>;
++
++		// Temporary until code stops depending on it.
++		device_type = "soc";
++
++		// Temporary until get_immrbase() is fixed.
++		reg = <0xfa200000 0x4000>;
++
++		mdio at e00 {
++			compatible = "fsl,mpc875-fec-mdio", "fsl,pq1-fec-mdio";
++			reg = <0xe00 0x188>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			PHY0: ethernet-phy at 0 {
++				reg = <0>;
++				device_type = "ethernet-phy";
++			};
++
++			PHY1: ethernet-phy at 1 {
++				reg = <1>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		eth0: ethernet at e00 {
++			device_type = "network";
++			compatible = "fsl,mpc875-fec-enet",
++			             "fsl,pq1-fec-enet";
++			reg = <0xe00 0x188>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <3 1>;
++			interrupt-parent = <&PIC>;
++			phy-handle = <&PHY0>;
++			linux,network-index = <0>;
++		};
++
++		eth1: ethernet at 1e00 {
++			device_type = "network";
++			compatible = "fsl,mpc875-fec-enet",
++			             "fsl,pq1-fec-enet";
++			reg = <0x1e00 0x188>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <7 1>;
++			interrupt-parent = <&PIC>;
++			phy-handle = <&PHY1>;
++			linux,network-index = <1>;
++		};
++
++		PIC: interrupt-controller at 0 {
++			interrupt-controller;
++			#interrupt-cells = <2>;
++			reg = <0 0x24>;
++			compatible = "fsl,mpc875-pic", "fsl,pq1-pic";
++		};
++
++		cpm at 9c0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc875-cpm", "fsl,cpm1", "simple-bus";
++			interrupts = <0>;	// cpm error interrupt
++			interrupt-parent = <&CPM_PIC>;
++			reg = <0x9c0 0x40>;
++			ranges;
++
++			muram {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0 0x2000 0x2000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0 0x1c00>;
++				};
++			};
++
++			brg at 9f0 {
++				compatible = "fsl,mpc875-brg",
++				             "fsl,cpm1-brg",
++				             "fsl,cpm-brg";
++				reg = <0x9f0 0x10>;
++			};
++
++			CPM_PIC: interrupt-controller at 930 {
++				interrupt-controller;
++				#interrupt-cells = <1>;
++				interrupts = <5 2 0 2>;
++				interrupt-parent = <&PIC>;
++				reg = <0x930 0x20>;
++				compatible = "fsl,mpc875-cpm-pic",
++				             "fsl,cpm1-pic";
++			};
++
++			console: serial at a80 {
++				device_type = "serial";
++				compatible = "fsl,mpc875-smc-uart",
++				             "fsl,cpm1-smc-uart";
++				reg = <0xa80 0x10 0x3e80 0x40>;
++				interrupts = <4>;
++				interrupt-parent = <&CPM_PIC>;
++				fsl,cpm-brg = <1>;
++				fsl,cpm-command = <0x0090>;
++				current-speed = <115200>;
++			};
++		};
++	};
++
++	chosen {
++		linux,stdout-path = &console;
++	};
 +};
-diff --git a/arch/sh/boards/dreamcast/irq.c b/arch/sh/boards/dreamcast/irq.c
-index 5bf01f8..9d0673a 100644
---- a/arch/sh/boards/dreamcast/irq.c
-+++ b/arch/sh/boards/dreamcast/irq.c
-@@ -136,7 +136,7 @@ int systemasic_irq_demux(int irq)
-         emr = EMR_BASE + (level << 4) + (level << 2);
-         esr = ESR_BASE + (level << 2);
- 
--        /* Mask the ESR to filter any spurious, unwanted interrtupts */
-+        /* Mask the ESR to filter any spurious, unwanted interrupts */
-         status = inl(esr);
-         status &= inl(emr);
+diff --git a/arch/powerpc/boot/dts/adder875-uboot.dts b/arch/powerpc/boot/dts/adder875-uboot.dts
+new file mode 100644
+index 0000000..0197242
+--- /dev/null
++++ b/arch/powerpc/boot/dts/adder875-uboot.dts
+@@ -0,0 +1,183 @@
++/*
++ * Device Tree Source for MPC885 ADS running U-Boot
++ *
++ * Copyright 2006 MontaVista Software, Inc.
++ * Copyright 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/dts-v1/;
++/ {
++	model = "Analogue & Micro Adder MPC875";
++	compatible = "analogue-and-micro,adder875";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		console = &console;
++		ethernet0 = &eth0;
++		ethernet1 = &eth1;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,875 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <16>;
++			i-cache-line-size = <16>;
++			d-cache-size = <8192>;
++			i-cache-size = <8192>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++			interrupts = <15 2>;	// decrementer interrupt
++			interrupt-parent = <&PIC>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0 0x01000000>;
++	};
++
++	localbus at ff000100 {
++		compatible = "fsl,mpc885-localbus", "fsl,pq1-localbus",
++		             "simple-bus";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		reg = <0xff000100 0x40>;
++
++		ranges = <
++			0 0 0xfe000000 0x01000000
++		>;
++
++		flash at 0,0 {
++			compatible = "cfi-flash";
++			reg = <0 0 0x800000>;
++			bank-width = <2>;
++			device-width = <2>;
++		};
++	};
++
++	soc at ff000000 {
++		compatible = "fsl,mpc875-immr", "fsl,pq1-soc", "simple-bus";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0 0xff000000 0x00004000>;
++
++		// Temporary until code stops depending on it.
++		device_type = "soc";
++
++		// Temporary until get_immrbase() is fixed.
++		reg = <0xff000000 0x4000>;
++
++		mdio at e00 {
++			compatible = "fsl,mpc875-fec-mdio", "fsl,pq1-fec-mdio";
++			reg = <0xe00 0x188>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			PHY0: ethernet-phy at 0 {
++				reg = <0>;
++				device_type = "ethernet-phy";
++			};
++
++			PHY1: ethernet-phy at 1 {
++				reg = <1>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		eth0: ethernet at e00 {
++			device_type = "network";
++			compatible = "fsl,mpc875-fec-enet",
++			             "fsl,pq1-fec-enet";
++			reg = <0xe00 0x188>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <3 1>;
++			interrupt-parent = <&PIC>;
++			phy-handle = <&PHY0>;
++			linux,network-index = <0>;
++		};
++
++		eth1: ethernet at 1e00 {
++			device_type = "network";
++			compatible = "fsl,mpc875-fec-enet",
++			             "fsl,pq1-fec-enet";
++			reg = <0x1e00 0x188>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <7 1>;
++			interrupt-parent = <&PIC>;
++			phy-handle = <&PHY1>;
++			linux,network-index = <1>;
++		};
++
++		PIC: interrupt-controller at 0 {
++			interrupt-controller;
++			#interrupt-cells = <2>;
++			reg = <0 0x24>;
++			compatible = "fsl,mpc875-pic", "fsl,pq1-pic";
++		};
++
++		cpm at 9c0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc875-cpm", "fsl,cpm1", "simple-bus";
++			interrupts = <0>;	// cpm error interrupt
++			interrupt-parent = <&CPM_PIC>;
++			reg = <0x9c0 0x40>;
++			ranges;
++
++			muram {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0 0x2000 0x2000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0 0x1c00>;
++				};
++			};
++
++			brg at 9f0 {
++				compatible = "fsl,mpc875-brg",
++				             "fsl,cpm1-brg",
++				             "fsl,cpm-brg";
++				reg = <0x9f0 0x10>;
++			};
++
++			CPM_PIC: interrupt-controller at 930 {
++				interrupt-controller;
++				#interrupt-cells = <1>;
++				interrupts = <5 2 0 2>;
++				interrupt-parent = <&PIC>;
++				reg = <0x930 0x20>;
++				compatible = "fsl,mpc875-cpm-pic",
++				             "fsl,cpm1-pic";
++			};
++
++			console: serial at a80 {
++				device_type = "serial";
++				compatible = "fsl,mpc875-smc-uart",
++				             "fsl,cpm1-smc-uart";
++				reg = <0xa80 0x10 0x3e80 0x40>;
++				interrupts = <4>;
++				interrupt-parent = <&CPM_PIC>;
++				fsl,cpm-brg = <1>;
++				fsl,cpm-command = <0x0090>;
++				current-speed = <115200>;
++			};
++		};
++	};
++
++	chosen {
++		linux,stdout-path = &console;
++	};
++};
+diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts
+index cb2fb50..29f1a6f 100644
+--- a/arch/powerpc/boot/dts/bamboo.dts
++++ b/arch/powerpc/boot/dts/bamboo.dts
+@@ -16,14 +16,24 @@
+ 	#size-cells = <1>;
+ 	model = "amcc,bamboo";
+ 	compatible = "amcc,bamboo";
+-	dcr-parent = <&/cpus/PowerPC,440EP at 0>;
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		ethernet1 = &EMAC1;
++		serial0 = &UART0;
++		serial1 = &UART1;
++		serial2 = &UART2;
++		serial3 = &UART3;
++	};
  
-diff --git a/arch/sh/boards/dreamcast/setup.c b/arch/sh/boards/dreamcast/setup.c
-index 8799df6..2581c8c 100644
---- a/arch/sh/boards/dreamcast/setup.c
-+++ b/arch/sh/boards/dreamcast/setup.c
-@@ -33,9 +33,6 @@ extern void aica_time_init(void);
- extern int gapspci_init(void);
- extern int systemasic_irq_demux(int);
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		PowerPC,440EP at 0 {
++		cpu at 0 {
+ 			device_type = "cpu";
++			model = "PowerPC,440EP";
+ 			reg = <0>;
+ 			clock-frequency = <0>; /* Filled in by zImage */
+ 			timebase-frequency = <0>; /* Filled in by zImage */
+@@ -126,7 +136,6 @@
+ 				#address-cells = <2>;
+ 				#size-cells = <1>;
+ 				clock-frequency = <0>; /* Filled in by zImage */
+-				ranges;
+ 				interrupts = <5 1>;
+ 				interrupt-parent = <&UIC1>;
+ 			};
+@@ -238,11 +247,56 @@
+ 				zmii-device = <&ZMII0>;
+ 				zmii-channel = <1>;
+ 			};
++
++			usb at ef601000 {
++				compatible = "ohci-be";
++				reg = <ef601000 80>;
++				interrupts = <8 1 9 1>;
++				interrupt-parent = < &UIC1 >;
++			};
++		};
++
++		PCI0: pci at ec000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb440ep-pci", "ibm,plb-pci";
++			primary;
++			reg = <0 eec00000 8	/* Config space access */
++			       0 eed00000 4	/* IACK */
++			       0 eed00000 4	/* Special cycle */
++			       0 ef400000 40>;	/* Internal registers */
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed. Chip supports a second
++			 * IO range but we don't use it for now
++			 */
++			ranges = <02000000 0 a0000000 0 a0000000 0 20000000
++				  01000000 0 00000000 0 e8000000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
++
++			/* Bamboo has all 4 IRQ pins tied together per slot */
++			interrupt-map-mask = <f800 0 0 0>;
++			interrupt-map = <
++				/* IDSEL 1 */
++				0800 0 0 0 &UIC0 1c 8
++
++				/* IDSEL 2 */
++				1000 0 0 0 &UIC0 1b 8
++
++				/* IDSEL 3 */
++				1800 0 0 0 &UIC0 1a 8
++
++				/* IDSEL 4 */
++				2000 0 0 0 &UIC0 19 8
++			>;
+ 		};
+ 	};
  
--void *dreamcast_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t);
--int dreamcast_consistent_free(struct device *, size_t, void *, dma_addr_t);
--
- static void __init dreamcast_setup(char **cmdline_p)
- {
- 	int i;
-@@ -64,9 +61,4 @@ static struct sh_machine_vector mv_dreamcast __initmv = {
- 	.mv_name		= "Sega Dreamcast",
- 	.mv_setup		= dreamcast_setup,
- 	.mv_irq_demux		= systemasic_irq_demux,
--
--#ifdef CONFIG_PCI
--	.mv_consistent_alloc	= dreamcast_consistent_alloc,
--	.mv_consistent_free	= dreamcast_consistent_free,
--#endif
+ 	chosen {
+ 		linux,stdout-path = "/plb/opb/serial at ef600300";
+-		bootargs = "console=ttyS0,115200";
+ 	};
  };
-diff --git a/arch/sh/boards/landisk/gio.c b/arch/sh/boards/landisk/gio.c
-index a37643d..1702508 100644
---- a/arch/sh/boards/landisk/gio.c
-+++ b/arch/sh/boards/landisk/gio.c
-@@ -121,7 +121,7 @@ static int gio_ioctl(struct inode *inode, struct file *filp,
- 	return 0;
- }
+diff --git a/arch/powerpc/boot/dts/cm5200.dts b/arch/powerpc/boot/dts/cm5200.dts
+new file mode 100644
+index 0000000..30737ea
+--- /dev/null
++++ b/arch/powerpc/boot/dts/cm5200.dts
+@@ -0,0 +1,234 @@
++/*
++ * CM5200 board Device Tree Source
++ *
++ * Copyright (C) 2007 Semihalf
++ * Marian Balakowicz <m8 at semihalf.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/*
++ * WARNING: Do not depend on this tree layout remaining static just yet.
++ * The MPC5200 device tree conventions are still in flux
++ * Keep an eye on the linuxppc-dev mailing list for more details
++ */
++
++/ {
++	model = "schindler,cm5200";
++	compatible = "schindler,cm5200";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,5200 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <20>;
++			i-cache-line-size = <20>;
++			d-cache-size = <4000>;		// L1, 16K
++			i-cache-size = <4000>;		// L1, 16K
++			timebase-frequency = <0>;	// from bootloader
++			bus-frequency = <0>;		// from bootloader
++			clock-frequency = <0>;		// from bootloader
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <00000000 04000000>;	// 64MB
++	};
++
++	soc5200 at f0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc5200b-immr";
++		ranges = <0 f0000000 0000c000>;
++		reg = <f0000000 00000100>;
++		bus-frequency = <0>;		// from bootloader
++		system-frequency = <0>;		// from bootloader
++
++		cdm at 200 {
++			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
++			reg = <200 38>;
++		};
++
++		mpc5200_pic: pic at 500 {
++			// 5200 interrupts are encoded into two levels;
++			interrupt-controller;
++			#interrupt-cells = <3>;
++			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
++			reg = <500 80>;
++		};
++
++		timer at 600 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <600 10>;
++			interrupts = <1 9 0>;
++			interrupt-parent = <&mpc5200_pic>;
++			fsl,has-wdt;
++		};
++
++		timer at 610 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <610 10>;
++			interrupts = <1 a 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		timer at 620 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <620 10>;
++			interrupts = <1 b 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		timer at 630 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <630 10>;
++			interrupts = <1 c 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		timer at 640 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <640 10>;
++			interrupts = <1 d 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		timer at 650 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <650 10>;
++			interrupts = <1 e 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		timer at 660 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <660 10>;
++			interrupts = <1 f 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		timer at 670 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <670 10>;
++			interrupts = <1 10 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		rtc at 800 {	// Real time clock
++			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
++			reg = <800 100>;
++			interrupts = <1 5 0 1 6 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		gpio at b00 {
++			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
++			reg = <b00 40>;
++			interrupts = <1 7 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		gpio at c00 {
++			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
++			reg = <c00 40>;
++			interrupts = <1 8 0 0 3 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		spi at f00 {
++			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
++			reg = <f00 20>;
++			interrupts = <2 d 0 2 e 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		usb at 1000 {
++			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
++			reg = <1000 ff>;
++			interrupts = <2 6 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		dma-controller at 1200 {
++			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
++			reg = <1200 80>;
++			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
++			              3 4 0  3 5 0  3 6 0  3 7 0
++			              3 8 0  3 9 0  3 a 0  3 b 0
++			              3 c 0  3 d 0  3 e 0  3 f 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		xlb at 1f00 {
++			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
++			reg = <1f00 100>;
++		};
++
++		serial at 2000 {		// PSC1
++			device_type = "serial";
++			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
++			port-number = <0>;  // Logical port assignment
++			reg = <2000 100>;
++			interrupts = <2 1 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		serial at 2200 {		// PSC2
++			device_type = "serial";
++			compatible = "fsl,mpc5200-psc-uart";
++			port-number = <1>;  // Logical port assignment
++			reg = <2200 100>;
++			interrupts = <2 2 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		serial at 2400 {		// PSC3
++			device_type = "serial";
++			compatible = "fsl,mpc5200-psc-uart";
++			port-number = <2>;  // Logical port assignment
++			reg = <2400 100>;
++			interrupts = <2 3 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		serial at 2c00 {		// PSC6
++			device_type = "serial";
++			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
++			port-number = <5>;  // Logical port assignment
++			reg = <2c00 100>;
++			interrupts = <2 4 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		ethernet at 3000 {
++			device_type = "network";
++			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
++			reg = <3000 800>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <2 5 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		i2c at 3d40 {
++			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
++			reg = <3d40 40>;
++			interrupts = <2 10 0>;
++			interrupt-parent = <&mpc5200_pic>;
++			fsl5200-clocking;
++		};
++
++		sram at 8000 {
++			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
++			reg = <8000 4000>;
++		};
++	};
++};
+diff --git a/arch/powerpc/boot/dts/ebony.dts b/arch/powerpc/boot/dts/ebony.dts
+index bc25997..7aad135 100644
+--- a/arch/powerpc/boot/dts/ebony.dts
++++ b/arch/powerpc/boot/dts/ebony.dts
+@@ -16,14 +16,22 @@
+ 	#size-cells = <1>;
+ 	model = "ibm,ebony";
+ 	compatible = "ibm,ebony";
+-	dcr-parent = <&/cpus/PowerPC,440GP at 0>;
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		ethernet1 = &EMAC1;
++		serial0 = &UART0;
++		serial1 = &UART1;
++	};
  
--static struct file_operations gio_fops = {
-+static const struct file_operations gio_fops = {
- 	.owner = THIS_MODULE,
- 	.open = gio_open,	/* open */
- 	.release = gio_close,	/* release */
-diff --git a/arch/sh/boards/renesas/hs7751rvoip/Kconfig b/arch/sh/boards/renesas/hs7751rvoip/Kconfig
-deleted file mode 100644
-index 1743be4..0000000
---- a/arch/sh/boards/renesas/hs7751rvoip/Kconfig
-+++ /dev/null
-@@ -1,12 +0,0 @@
--if SH_HS7751RVOIP
--
--menu "HS7751RVoIP options"
--
--config HS7751RVOIP_CODEC
--	bool "Support VoIP Codec section"
--	help
--	  Selecting this option will support CODEC section.
--
--endmenu
--
--endif
-diff --git a/arch/sh/boards/renesas/hs7751rvoip/Makefile b/arch/sh/boards/renesas/hs7751rvoip/Makefile
-deleted file mode 100644
-index e626377..0000000
---- a/arch/sh/boards/renesas/hs7751rvoip/Makefile
-+++ /dev/null
-@@ -1,8 +0,0 @@
--#
--# Makefile for the HS7751RVoIP specific parts of the kernel
--#
--
--obj-y	 := setup.o io.o irq.o
--
--obj-$(CONFIG_PCI) += pci.o
--
-diff --git a/arch/sh/boards/renesas/hs7751rvoip/io.c b/arch/sh/boards/renesas/hs7751rvoip/io.c
-deleted file mode 100644
-index bb9aa0d..0000000
---- a/arch/sh/boards/renesas/hs7751rvoip/io.c
-+++ /dev/null
-@@ -1,283 +0,0 @@
--/*
-- * linux/arch/sh/boards/renesas/hs7751rvoip/io.c
-- *
-- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
-- * Based largely on io_se.c.
-- *
-- * I/O routine for Renesas Technology sales HS7751RVoIP
-- *
-- * Initial version only to support LAN access; some
-- * placeholder code from io_hs7751rvoip.c left in with the
-- * expectation of later SuperIO and PCMCIA access.
-- */
--#include <linux/kernel.h>
--#include <linux/types.h>
--#include <linux/module.h>
--#include <linux/pci.h>
--#include <asm/io.h>
--#include <asm/hs7751rvoip.h>
--#include <asm/addrspace.h>
--
--extern void *area6_io8_base;	/* Area 6 8bit I/O Base address */
--extern void *area5_io16_base;	/* Area 5 16bit I/O Base address */
--
--/*
-- * The 7751R HS7751RVoIP uses the built-in PCI controller (PCIC)
-- * of the 7751R processor, and has a SuperIO accessible via the PCI.
-- * The board also includes a PCMCIA controller on its memory bus,
-- * like the other Solution Engine boards.
-- */
--
--#define CODEC_IO_BASE	0x1000
--#define CODEC_IOMAP(a)	((unsigned long)area6_io8_base + ((a) - CODEC_IO_BASE))
--
--static inline unsigned long port2adr(unsigned int port)
--{
--	if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
--		if (port == 0x3f6)
--			return ((unsigned long)area5_io16_base + 0x0c);
--		else
--			return ((unsigned long)area5_io16_base + 0x800 +
--				((port-0x1f0) << 1));
--	else
--		maybebadio((unsigned long)port);
--	return port;
--}
--
--/* The 7751R HS7751RVoIP seems to have everything hooked */
--/* up pretty normally (nothing on high-bytes only...) so this */
--/* shouldn't be needed */
--static inline int shifted_port(unsigned long port)
--{
--	/* For IDE registers, value is not shifted */
--	if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
--		return 0;
--	else
--		return 1;
--}
--
--#if defined(CONFIG_HS7751RVOIP_CODEC)
--#define codec_port(port)	\
--	((CODEC_IO_BASE <= (port)) && ((port) < (CODEC_IO_BASE + 0x20)))
--#else
--#define codec_port(port)	(0)
--#endif
--
--/*
-- * General outline: remap really low stuff [eventually] to SuperIO,
-- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
-- * is mapped through the PCI IO window.  Stuff with high bits (PXSEG)
-- * should be way beyond the window, and is used  w/o translation for
-- * compatibility.
-- */
--unsigned char hs7751rvoip_inb(unsigned long port)
--{
--	if (PXSEG(port))
--		return ctrl_inb(port);
--	else if (codec_port(port))
--		return ctrl_inb(CODEC_IOMAP(port));
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		return ctrl_inb(pci_ioaddr(port));
--	else
--		return ctrl_inw(port2adr(port)) & 0xff;
--}
--
--unsigned char hs7751rvoip_inb_p(unsigned long port)
--{
--	unsigned char v;
--
--        if (PXSEG(port))
--		v = ctrl_inb(port);
--	else if (codec_port(port))
--		v = ctrl_inb(CODEC_IOMAP(port));
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		v = ctrl_inb(pci_ioaddr(port));
--	else
--		v = ctrl_inw(port2adr(port)) & 0xff;
--	ctrl_delay();
--	return v;
--}
--
--unsigned short hs7751rvoip_inw(unsigned long port)
--{
--        if (PXSEG(port))
--		return ctrl_inw(port);
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		return ctrl_inw(pci_ioaddr(port));
--	else
--		maybebadio(port);
--	return 0;
--}
--
--unsigned int hs7751rvoip_inl(unsigned long port)
--{
--        if (PXSEG(port))
--		return ctrl_inl(port);
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		return ctrl_inl(pci_ioaddr(port));
--	else
--		maybebadio(port);
--	return 0;
--}
--
--void hs7751rvoip_outb(unsigned char value, unsigned long port)
--{
--
--        if (PXSEG(port))
--		ctrl_outb(value, port);
--	else if (codec_port(port))
--		ctrl_outb(value, CODEC_IOMAP(port));
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		ctrl_outb(value, pci_ioaddr(port));
--	else
--		ctrl_outb(value, port2adr(port));
--}
--
--void hs7751rvoip_outb_p(unsigned char value, unsigned long port)
--{
--        if (PXSEG(port))
--		ctrl_outb(value, port);
--	else if (codec_port(port))
--		ctrl_outb(value, CODEC_IOMAP(port));
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		ctrl_outb(value, pci_ioaddr(port));
--	else
--		ctrl_outw(value, port2adr(port));
--
--	ctrl_delay();
--}
--
--void hs7751rvoip_outw(unsigned short value, unsigned long port)
--{
--        if (PXSEG(port))
--		ctrl_outw(value, port);
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		ctrl_outw(value, pci_ioaddr(port));
--	else
--		maybebadio(port);
--}
--
--void hs7751rvoip_outl(unsigned int value, unsigned long port)
--{
--        if (PXSEG(port))
--		ctrl_outl(value, port);
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		ctrl_outl(value, pci_ioaddr(port));
--	else
--		maybebadio(port);
--}
--
--void hs7751rvoip_insb(unsigned long port, void *addr, unsigned long count)
--{
--	u8 *buf = addr;
--
--	if (PXSEG(port))
--		while (count--)
--			*buf++ = ctrl_inb(port);
--	else if (codec_port(port))
--		while (count--)
--			*buf++ = ctrl_inb(CODEC_IOMAP(port));
--	else if (is_pci_ioaddr(port) || shifted_port(port)) {
--		volatile u8 *bp = (volatile u8 *)pci_ioaddr(port);
--
--		while (count--)
--			*buf++ = *bp;
--	} else {
--		volatile u16 *p = (volatile u16 *)port2adr(port);
--
--		while (count--)
--			*buf++ = *p & 0xff;
--	}
--}
--
--void hs7751rvoip_insw(unsigned long port, void *addr, unsigned long count)
--{
--	volatile u16 *p;
--	u16 *buf = addr;
--
--	if (PXSEG(port))
--		p = (volatile u16 *)port;
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		p = (volatile u16 *)pci_ioaddr(port);
--	else
--		p = (volatile u16 *)port2adr(port);
--	while (count--)
--		*buf++ = *p;
--}
--
--void hs7751rvoip_insl(unsigned long port, void *addr, unsigned long count)
--{
--
--	if (is_pci_ioaddr(port) || shifted_port(port)) {
--		volatile u32 *p = (volatile u32 *)pci_ioaddr(port);
--		u32 *buf = addr;
--
--		while (count--)
--			*buf++ = *p;
--	} else
--		maybebadio(port);
--}
--
--void hs7751rvoip_outsb(unsigned long port, const void *addr, unsigned long count)
--{
--	const u8 *buf = addr;
--
--	if (PXSEG(port))
--		while (count--)
--			ctrl_outb(*buf++, port);
--	else if (codec_port(port))
--		while (count--)
--			ctrl_outb(*buf++, CODEC_IOMAP(port));
--	else if (is_pci_ioaddr(port) || shifted_port(port)) {
--		volatile u8 *bp = (volatile u8 *)pci_ioaddr(port);
--
--		while (count--)
--			*bp = *buf++;
--	} else {
--		volatile u16 *p = (volatile u16 *)port2adr(port);
--
--		while (count--)
--			*p = *buf++;
--	}
--}
--
--void hs7751rvoip_outsw(unsigned long port, const void *addr, unsigned long count)
--{
--	volatile u16 *p;
--	const u16 *buf = addr;
--
--	if (PXSEG(port))
--		p = (volatile u16 *)port;
--	else if (is_pci_ioaddr(port) || shifted_port(port))
--		p = (volatile u16 *)pci_ioaddr(port);
--	else
--		p = (volatile u16 *)port2adr(port);
--
--	while (count--)
--		*p = *buf++;
--}
--
--void hs7751rvoip_outsl(unsigned long port, const void *addr, unsigned long count)
--{
--	const u32 *buf = addr;
--
--	if (is_pci_ioaddr(port) || shifted_port(port)) {
--		volatile u32 *p = (volatile u32 *)pci_ioaddr(port);
--
--		while (count--)
--			*p = *buf++;
--	} else
--		maybebadio(port);
--}
--
--void __iomem *hs7751rvoip_ioport_map(unsigned long port, unsigned int size)
--{
--        if (PXSEG(port))
--                return (void __iomem *)port;
--	else if (unlikely(codec_port(port) && (size == 1)))
--		return (void __iomem *)CODEC_IOMAP(port);
--        else if (is_pci_ioaddr(port))
--                return (void __iomem *)pci_ioaddr(port);
--
--        return (void __iomem *)port2adr(port);
--}
-diff --git a/arch/sh/boards/renesas/hs7751rvoip/irq.c b/arch/sh/boards/renesas/hs7751rvoip/irq.c
-deleted file mode 100644
-index e55c668..0000000
---- a/arch/sh/boards/renesas/hs7751rvoip/irq.c
-+++ /dev/null
-@@ -1,116 +0,0 @@
--/*
-- * linux/arch/sh/boards/renesas/hs7751rvoip/irq.c
-- *
-- * Copyright (C) 2000  Kazumoto Kojima
-- *
-- * Renesas Technology Sales HS7751RVoIP Support.
-- *
-- * Modified for HS7751RVoIP by
-- * Atom Create Engineering Co., Ltd. 2002.
-- * Lineo uSolutions, Inc. 2003.
-- */
--
--#include <linux/init.h>
--#include <linux/irq.h>
--#include <linux/interrupt.h>
--#include <asm/io.h>
--#include <asm/irq.h>
--#include <asm/hs7751rvoip.h>
--
--static int mask_pos[] = {8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7};
--
--static void enable_hs7751rvoip_irq(unsigned int irq);
--static void disable_hs7751rvoip_irq(unsigned int irq);
--
--/* shutdown is same as "disable" */
--#define shutdown_hs7751rvoip_irq disable_hs7751rvoip_irq
--
--static void ack_hs7751rvoip_irq(unsigned int irq);
--static void end_hs7751rvoip_irq(unsigned int irq);
--
--static unsigned int startup_hs7751rvoip_irq(unsigned int irq)
--{
--	enable_hs7751rvoip_irq(irq);
--	return 0; /* never anything pending */
--}
--
--static void disable_hs7751rvoip_irq(unsigned int irq)
--{
--	unsigned short val;
--	unsigned short mask = 0xffff ^ (0x0001 << mask_pos[irq]);
--
--	/* Set the priority in IPR to 0 */
--	val = ctrl_inw(IRLCNTR3);
--	val &= mask;
--	ctrl_outw(val, IRLCNTR3);
--}
--
--static void enable_hs7751rvoip_irq(unsigned int irq)
--{
--	unsigned short val;
--	unsigned short value = (0x0001 << mask_pos[irq]);
--
--	/* Set priority in IPR back to original value */
--	val = ctrl_inw(IRLCNTR3);
--	val |= value;
--	ctrl_outw(val, IRLCNTR3);
--}
--
--static void ack_hs7751rvoip_irq(unsigned int irq)
--{
--	disable_hs7751rvoip_irq(irq);
--}
--
--static void end_hs7751rvoip_irq(unsigned int irq)
--{
--	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
--		enable_hs7751rvoip_irq(irq);
--}
--
--static struct hw_interrupt_type hs7751rvoip_irq_type = {
--	.typename =  "HS7751RVoIP IRQ",
--	.startup = startup_hs7751rvoip_irq,
--	.shutdown = shutdown_hs7751rvoip_irq,
--	.enable = enable_hs7751rvoip_irq,
--	.disable = disable_hs7751rvoip_irq,
--	.ack = ack_hs7751rvoip_irq,
--	.end = end_hs7751rvoip_irq,
--};
--
--static void make_hs7751rvoip_irq(unsigned int irq)
--{
--	disable_irq_nosync(irq);
--	irq_desc[irq].chip = &hs7751rvoip_irq_type;
--	disable_hs7751rvoip_irq(irq);
--}
--
--/*
-- * Initialize IRQ setting
-- */
--void __init init_hs7751rvoip_IRQ(void)
--{
--	int i;
--
--	/* IRL0=ON HOOK1
--	 * IRL1=OFF HOOK1
--	 * IRL2=ON HOOK2
--	 * IRL3=OFF HOOK2
--	 * IRL4=Ringing Detection
--	 * IRL5=CODEC
--	 * IRL6=Ethernet
--	 * IRL7=Ethernet Hub
--	 * IRL8=USB Communication
--	 * IRL9=USB Connection
--	 * IRL10=USB DMA
--	 * IRL11=CF Card
--	 * IRL12=PCMCIA
--	 * IRL13=PCI Slot
--	 */
--	ctrl_outw(0x9876, IRLCNTR1);
--	ctrl_outw(0xdcba, IRLCNTR2);
--	ctrl_outw(0x0050, IRLCNTR4);
--	ctrl_outw(0x4321, IRLCNTR5);
--
--	for (i=0; i<14; i++)
--		make_hs7751rvoip_irq(i);
--}
-diff --git a/arch/sh/boards/renesas/hs7751rvoip/pci.c b/arch/sh/boards/renesas/hs7751rvoip/pci.c
-deleted file mode 100644
-index 1c0ddee..0000000
---- a/arch/sh/boards/renesas/hs7751rvoip/pci.c
-+++ /dev/null
-@@ -1,149 +0,0 @@
--/*
-- * linux/arch/sh/boards/renesas/hs7751rvoip/pci.c
-- *
-- * Author:  Ian DaSilva (idasilva at mvista.com)
-- *
-- * Highly leveraged from pci-bigsur.c, written by Dustin McIntire.
-- *
-- * May be copied or modified under the terms of the GNU General Public
-- * License.  See linux/COPYING for more information.
-- *
-- * PCI initialization for the Renesas SH7751R HS7751RVoIP board
-- */
--
--#include <linux/kernel.h>
--#include <linux/types.h>
--#include <linux/init.h>
--#include <linux/delay.h>
--#include <linux/pci.h>
--#include <linux/module.h>
--
--#include <asm/io.h>
--#include "../../../drivers/pci/pci-sh7751.h"
--#include <asm/hs7751rvoip/hs7751rvoip.h>
--
--#define PCIMCR_MRSET_OFF	0xBFFFFFFF
--#define PCIMCR_RFSH_OFF		0xFFFFFFFB
--
--/*
-- * Only long word accesses of the PCIC's internal local registers and the
-- * configuration registers from the CPU is supported.
-- */
--#define PCIC_WRITE(x,v) writel((v), PCI_REG(x))
--#define PCIC_READ(x) readl(PCI_REG(x))
--
--/*
-- * Description:  This function sets up and initializes the pcic, sets
-- * up the BARS, maps the DRAM into the address space etc, etc.
-- */
--int __init pcibios_init_platform(void)
--{
--	unsigned long bcr1, wcr1, wcr2, wcr3, mcr;
--	unsigned short bcr2, bcr3;
--
--	/*
--	 * Initialize the slave bus controller on the pcic.  The values used
--	 * here should not be hardcoded, but they should be taken from the bsc
--	 * on the processor, to make this function as generic as possible.
--	 * (i.e. Another sbc may usr different SDRAM timing settings -- in order
--	 * for the pcic to work, its settings need to be exactly the same.)
--	 */
--	bcr1 = (*(volatile unsigned long *)(SH7751_BCR1));
--	bcr2 = (*(volatile unsigned short *)(SH7751_BCR2));
--	bcr3 = (*(volatile unsigned short *)(SH7751_BCR3));
--	wcr1 = (*(volatile unsigned long *)(SH7751_WCR1));
--	wcr2 = (*(volatile unsigned long *)(SH7751_WCR2));
--	wcr3 = (*(volatile unsigned long *)(SH7751_WCR3));
--	mcr = (*(volatile unsigned long *)(SH7751_MCR));
--
--	bcr1 = bcr1 | 0x00080000;  /* Enable Bit 19, BREQEN */
--	(*(volatile unsigned long *)(SH7751_BCR1)) = bcr1;
--
--	bcr1 = bcr1 | 0x40080000;  /* Enable Bit 19 BREQEN, set PCIC to slave */
--	PCIC_WRITE(SH7751_PCIBCR1, bcr1);	/* PCIC BCR1 */
--	PCIC_WRITE(SH7751_PCIBCR2, bcr2);	/* PCIC BCR2 */
--	PCIC_WRITE(SH7751_PCIBCR3, bcr3);	/* PCIC BCR3 */
--	PCIC_WRITE(SH7751_PCIWCR1, wcr1);	/* PCIC WCR1 */
--	PCIC_WRITE(SH7751_PCIWCR2, wcr2);	/* PCIC WCR2 */
--	PCIC_WRITE(SH7751_PCIWCR3, wcr3);	/* PCIC WCR3 */
--	mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
--	PCIC_WRITE(SH7751_PCIMCR, mcr);		/* PCIC MCR */
--
--	/* Enable all interrupts, so we know what to fix */
--	PCIC_WRITE(SH7751_PCIINTM, 0x0000c3ff);
--	PCIC_WRITE(SH7751_PCIAINTM, 0x0000380f);
--
--	/* Set up standard PCI config registers */
--	PCIC_WRITE(SH7751_PCICONF1, 0xFB900047); /* Bus Master, Mem & I/O access */
--	PCIC_WRITE(SH7751_PCICONF2, 0x00000000); /* PCI Class code & Revision ID */
--	PCIC_WRITE(SH7751_PCICONF4, 0xab000001); /* PCI I/O address (local regs) */
--	PCIC_WRITE(SH7751_PCICONF5, 0x0c000000); /* PCI MEM address (local RAM)  */
--	PCIC_WRITE(SH7751_PCICONF6, 0xd0000000); /* PCI MEM address (unused) */
--	PCIC_WRITE(SH7751_PCICONF11, 0x35051054); /* PCI Subsystem ID & Vendor ID */
--	PCIC_WRITE(SH7751_PCILSR0, 0x03f00000);	/* MEM (full 64M exposed) */
--	PCIC_WRITE(SH7751_PCILSR1, 0x00000000); /* MEM (unused) */
--	PCIC_WRITE(SH7751_PCILAR0, 0x0c000000); /* MEM (direct map from PCI) */
--	PCIC_WRITE(SH7751_PCILAR1, 0x00000000); /* MEM (unused) */
--
--	/* Now turn it on... */
--	PCIC_WRITE(SH7751_PCICR, 0xa5000001);
--
--	/*
--	 * Set PCIMBR and PCIIOBR here, assuming a single window
--	 * (16M MEM, 256K IO) is enough.  If a larger space is
--	 * needed, the readx/writex and inx/outx functions will
--	 * have to do more (e.g. setting registers for each call).
--	 */
--
--	/*
--	 * Set the MBR so PCI address is one-to-one with window,
--	 * meaning all calls go straight through... use ifdef to
--	 * catch erroneous assumption.
--	 */
--	BUG_ON(PCIBIOS_MIN_MEM != SH7751_PCI_MEMORY_BASE);
--
--	PCIC_WRITE(SH7751_PCIMBR, PCIBIOS_MIN_MEM);
--
--	/* Set IOBR for window containing area specified in pci.h */
--	PCIC_WRITE(SH7751_PCIIOBR, (PCIBIOS_MIN_IO & SH7751_PCIIOBR_MASK));
--
--	/* All done, may as well say so... */
--	printk("SH7751R PCI: Finished initialization of the PCI controller\n");
--
--	return 1;
--}
--
--int __init pcibios_map_platform_irq(u8 slot, u8 pin)
--{
--        switch (slot) {
--	case 0: return IRQ_PCISLOT;	/* PCI Extend slot */
--	case 1: return IRQ_PCMCIA;	/* PCI Cardbus Bridge */
--	case 2: return IRQ_PCIETH;	/* Realtek Ethernet controller */
--	case 3: return IRQ_PCIHUB;	/* Realtek Ethernet Hub controller */
--	default:
--		printk("PCI: Bad IRQ mapping request for slot %d\n", slot);
--		return -1;
--	}
--}
--
--static struct resource sh7751_io_resource = {
--	.name	= "SH7751_IO",
--	.start	= 0x4000,
--	.end	= 0x4000 + SH7751_PCI_IO_SIZE - 1,
--	.flags	= IORESOURCE_IO
--};
--
--static struct resource sh7751_mem_resource = {
--	.name	= "SH7751_mem",
--	.start	= SH7751_PCI_MEMORY_BASE,
--	.end	= SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1,
--	.flags	= IORESOURCE_MEM
--};
--
--extern struct pci_ops sh7751_pci_ops;
--
--struct pci_channel board_pci_channels[] = {
--	{ &sh7751_pci_ops, &sh7751_io_resource, &sh7751_mem_resource, 0, 0xff },
--	{ NULL, NULL, NULL, 0, 0 },
--};
--EXPORT_SYMBOL(board_pci_channels);
-diff --git a/arch/sh/boards/renesas/hs7751rvoip/setup.c b/arch/sh/boards/renesas/hs7751rvoip/setup.c
-deleted file mode 100644
-index c056259..0000000
---- a/arch/sh/boards/renesas/hs7751rvoip/setup.c
-+++ /dev/null
-@@ -1,105 +0,0 @@
--/*
-- * Renesas Technology Sales HS7751RVoIP Support.
-- *
-- * Copyright (C) 2000  Kazumoto Kojima
-- *
-- * Modified for HS7751RVoIP by
-- * Atom Create Engineering Co., Ltd. 2002.
-- * Lineo uSolutions, Inc. 2003.
-- */
--#include <linux/init.h>
--#include <linux/irq.h>
--#include <linux/mm.h>
--#include <linux/pm.h>
--#include <asm/hs7751rvoip.h>
--#include <asm/io.h>
--#include <asm/machvec.h>
--
--static void hs7751rvoip_power_off(void)
--{
--	ctrl_outw(ctrl_inw(PA_OUTPORTR) & 0xffdf, PA_OUTPORTR);
--}
--
--void *area5_io8_base;
--void *area6_io8_base;
--void *area5_io16_base;
--void *area6_io16_base;
--
--static int __init hs7751rvoip_cf_init(void)
--{
--	pgprot_t prot;
--	unsigned long paddrbase;
--
--	/* open I/O area window */
--	paddrbase = virt_to_phys((void *)(PA_AREA5_IO+0x00000800));
--	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_COM16);
--	area5_io16_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot);
--	if (!area5_io16_base) {
--		printk("allocate_cf_area : can't open CF I/O window!\n");
--		return -ENOMEM;
--	}
--
--	/* XXX : do we need attribute and common-memory area also? */
--
--	paddrbase = virt_to_phys((void *)PA_AREA6_IO);
--#if defined(CONFIG_HS7751RVOIP_CODEC)
--	prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_COM8);
--#else
--	prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_IO8);
--#endif
--	area6_io8_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot);
--	if (!area6_io8_base) {
--		printk("allocate_cf_area : can't open CODEC I/O 8bit window!\n");
--		return -ENOMEM;
--	}
--	prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_IO16);
--	area6_io16_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot);
--	if (!area6_io16_base) {
--		printk("allocate_cf_area : can't open CODEC I/O 16bit window!\n");
--		return -ENOMEM;
--	}
--
--	return 0;
--}
--device_initcall(hs7751rvoip_cf_init);
--
--/*
-- * Initialize the board
-- */
--static void __init hs7751rvoip_setup(char **cmdline_p)
--{
--	ctrl_outb(0xf0, PA_OUTPORTR);
--	pm_power_off = hs7751rvoip_power_off;
--
--	printk(KERN_INFO "Renesas Technology Sales HS7751RVoIP-2 support.\n");
--}
--
--static struct sh_machine_vector mv_hs7751rvoip __initmv = {
--	.mv_name		= "HS7751RVoIP",
--	.mv_setup		= hs7751rvoip_setup,
--	.mv_nr_irqs		= 72,
--
--	.mv_inb			= hs7751rvoip_inb,
--	.mv_inw			= hs7751rvoip_inw,
--	.mv_inl			= hs7751rvoip_inl,
--	.mv_outb		= hs7751rvoip_outb,
--	.mv_outw		= hs7751rvoip_outw,
--	.mv_outl		= hs7751rvoip_outl,
--
--	.mv_inb_p		= hs7751rvoip_inb_p,
--	.mv_inw_p		= hs7751rvoip_inw,
--	.mv_inl_p		= hs7751rvoip_inl,
--	.mv_outb_p		= hs7751rvoip_outb_p,
--	.mv_outw_p		= hs7751rvoip_outw,
--	.mv_outl_p		= hs7751rvoip_outl,
--
--	.mv_insb		= hs7751rvoip_insb,
--	.mv_insw		= hs7751rvoip_insw,
--	.mv_insl		= hs7751rvoip_insl,
--	.mv_outsb		= hs7751rvoip_outsb,
--	.mv_outsw		= hs7751rvoip_outsw,
--	.mv_outsl		= hs7751rvoip_outsl,
--
--	.mv_init_irq		= init_hs7751rvoip_IRQ,
--	.mv_ioport_map		= hs7751rvoip_ioport_map,
--};
-diff --git a/arch/sh/boards/renesas/r7780rp/Makefile b/arch/sh/boards/renesas/r7780rp/Makefile
-index dd26182..20a1008 100644
---- a/arch/sh/boards/renesas/r7780rp/Makefile
-+++ b/arch/sh/boards/renesas/r7780rp/Makefile
-@@ -3,7 +3,7 @@
- #
- irqinit-$(CONFIG_SH_R7780MP)	:= irq-r7780mp.o
- irqinit-$(CONFIG_SH_R7785RP)	:= irq-r7785rp.o
--irqinit-$(CONFIG_SH_R7780RP)	:= irq-r7780rp.o irq.o
-+irqinit-$(CONFIG_SH_R7780RP)	:= irq-r7780rp.o
- obj-y				:= setup.o $(irqinit-y)
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		PowerPC,440GP at 0 {
++		cpu at 0 {
+ 			device_type = "cpu";
++			model = "PowerPC,440GP";
+ 			reg = <0>;
+ 			clock-frequency = <0>; // Filled in by zImage
+ 			timebase-frequency = <0>; // Filled in by zImage
+@@ -150,9 +158,10 @@
+ 					};
+ 				};
+ 
+-				ds1743 at 1,0 {
++				nvram at 1,0 {
+ 					/* NVRAM & RTC */
+-					compatible = "ds1743";
++					compatible = "ds1743-nvram";
++					#bytes = <2000>;
+ 					reg = <1 0 2000>;
+ 				};
  
- ifneq ($(CONFIG_SH_R7785RP),y)
-diff --git a/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c b/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c
-index 59b47fe..1f8f073 100644
---- a/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c
-+++ b/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c
-@@ -47,7 +47,7 @@ static unsigned char irl2irq[HL_NR_IRL] __initdata = {
- };
+@@ -284,12 +293,43 @@
  
- static DECLARE_INTC_DESC(intc_desc, "r7780mp", vectors,
--			 NULL, NULL, mask_registers, NULL, NULL);
-+			 NULL, mask_registers, NULL, NULL);
+ 		};
  
- unsigned char * __init highlander_init_irq_r7780mp(void)
- {
-diff --git a/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c b/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c
-index fa4a534..bd34048 100644
---- a/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c
-+++ b/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c
-@@ -3,21 +3,65 @@
-  *
-  * Copyright (C) 2002  Atom Create Engineering Co., Ltd.
-  * Copyright (C) 2006  Paul Mundt
-+ * Copyright (C) 2008  Magnus Damm
-  *
-  * This file is subject to the terms and conditions of the GNU General Public
-  * License.  See the file "COPYING" in the main directory of this archive
-  * for more details.
-  */
- #include <linux/init.h>
-+#include <linux/irq.h>
- #include <linux/io.h>
- #include <asm/r7780rp.h>
+-		PCIX0: pci at 1234 {
++		PCIX0: pci at 20ec00000 {
+ 			device_type = "pci";
+-			/* FIXME */
+-			reg = <2 0ec00000 8
+-			       2 0ec80000 f0
+-			       2 0ec80100 fc>;
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb440gp-pcix", "ibm,plb-pcix";
++			primary;
++			reg = <2 0ec00000 8	/* Config space access */
++			       0 0 0		/* no IACK cycles */
++			       2 0ed00000 4     /* Special cycles */
++			       2 0ec80000 f0	/* Internal registers */
++			       2 0ec80100 fc>;	/* Internal messaging registers */
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 00000003 80000000 0 80000000
++				  01000000 0 00000000 00000002 08000000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
++
++			/* Ebony has all 4 IRQ pins tied together per slot */
++			interrupt-map-mask = <f800 0 0 0>;
++			interrupt-map = <
++				/* IDSEL 1 */
++				0800 0 0 0 &UIC0 17 8
++
++				/* IDSEL 2 */
++				1000 0 0 0 &UIC0 18 8
++
++				/* IDSEL 3 */
++				1800 0 0 0 &UIC0 19 8
++
++				/* IDSEL 4 */
++				2000 0 0 0 &UIC0 1a 8
++			>;
+ 		};
+ 	};
  
-+enum {
-+	UNUSED = 0,
+diff --git a/arch/powerpc/boot/dts/ep405.dts b/arch/powerpc/boot/dts/ep405.dts
+new file mode 100644
+index 0000000..9293855
+--- /dev/null
++++ b/arch/powerpc/boot/dts/ep405.dts
+@@ -0,0 +1,228 @@
++/*
++ * Device Tree Source for EP405
++ *
++ * Copyright 2007 IBM Corp.
++ * Benjamin Herrenschmidt <benh at kernel.crashing.org>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2.  This program is licensed "as is" without
++ * any warranty of any kind, whether express or implied.
++ */
 +
-+	/* board specific interrupt sources */
++/ {
++	#address-cells = <1>;
++	#size-cells = <1>;
++	model = "ep405";
++	compatible = "ep405";
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC;
++		serial0 = &UART0;
++		serial1 = &UART1;
++	};
 +
-+	AX88796,          /* Ethernet controller */
-+	PSW,              /* Push Switch */
-+	CF,               /* Compact Flash */
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		cpu at 0 {
++			device_type = "cpu";
++			model = "PowerPC,405GP";
++			reg = <0>;
++			clock-frequency = <bebc200>; /* Filled in by zImage */
++			timebase-frequency = <0>; /* Filled in by zImage */
++			i-cache-line-size = <20>;
++			d-cache-line-size = <20>;
++			i-cache-size = <4000>;
++			d-cache-size = <4000>;
++			dcr-controller;
++			dcr-access-method = "native";
++		};
++	};
 +
-+	PCI_A,
-+	PCI_B,
-+	PCI_C,
-+	PCI_D,
-+};
++	memory {
++		device_type = "memory";
++		reg = <0 0>; /* Filled in by zImage */
++	};
 +
-+static struct intc_vect vectors[] __initdata = {
-+	INTC_IRQ(PCI_A, 65), /* dirty: overwrite cpu vectors for pci */
-+	INTC_IRQ(PCI_B, 66),
-+	INTC_IRQ(PCI_C, 67),
-+	INTC_IRQ(PCI_D, 68),
-+	INTC_IRQ(CF, IRQ_CF),
-+	INTC_IRQ(PSW, IRQ_PSW),
-+	INTC_IRQ(AX88796, IRQ_AX88796),
++	UIC0: interrupt-controller {
++		compatible = "ibm,uic";
++		interrupt-controller;
++		cell-index = <0>;
++		dcr-reg = <0c0 9>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++	};
++
++	plb {
++		compatible = "ibm,plb3";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges;
++		clock-frequency = <0>; /* Filled in by zImage */
++
++		SDRAM0: memory-controller {
++			compatible = "ibm,sdram-405gp";
++			dcr-reg = <010 2>;
++		};
++
++		MAL: mcmal {
++			compatible = "ibm,mcmal-405gp", "ibm,mcmal";
++			dcr-reg = <180 62>;
++			num-tx-chans = <1>;
++			num-rx-chans = <1>;
++			interrupt-parent = <&UIC0>;
++			interrupts = <
++				b 4 /* TXEOB */
++				c 4 /* RXEOB */
++				a 4 /* SERR */
++				d 4 /* TXDE */
++				e 4 /* RXDE */>;
++		};
++
++		POB0: opb {
++			compatible = "ibm,opb-405gp", "ibm,opb";
++			#address-cells = <1>;
++			#size-cells = <1>;
++			ranges = <ef600000 ef600000 a00000>;
++			dcr-reg = <0a0 5>;
++			clock-frequency = <0>; /* Filled in by zImage */
++
++			UART0: serial at ef600300 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <ef600300 8>;
++				virtual-reg = <ef600300>;
++				clock-frequency = <0>; /* Filled in by zImage */
++				current-speed = <2580>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <0 4>;
++			};
++
++			UART1: serial at ef600400 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <ef600400 8>;
++				virtual-reg = <ef600400>;
++				clock-frequency = <0>; /* Filled in by zImage */
++				current-speed = <2580>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <1 4>;
++			};
++
++			IIC: i2c at ef600500 {
++				compatible = "ibm,iic-405gp", "ibm,iic";
++				reg = <ef600500 11>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <2 4>;
++			};
++
++			GPIO: gpio at ef600700 {
++				compatible = "ibm,gpio-405gp";
++				reg = <ef600700 20>;
++			};
++
++			EMAC: ethernet at ef600800 {
++				linux,network-index = <0>;
++				device_type = "network";
++				compatible = "ibm,emac-405gp", "ibm,emac";
++				interrupt-parent = <&UIC0>;
++				interrupts = <
++					f 4 /* Ethernet */
++					9 4 /* Ethernet Wake Up */>;
++				local-mac-address = [000000000000]; /* Filled in by zImage */
++				reg = <ef600800 70>;
++				mal-device = <&MAL>;
++				mal-tx-channel = <0>;
++				mal-rx-channel = <0>;
++				cell-index = <0>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rmii";
++				phy-map = <00000000>;
++			};
++
++		};
++
++		EBC0: ebc {
++			compatible = "ibm,ebc-405gp", "ibm,ebc";
++			dcr-reg = <012 2>;
++			#address-cells = <2>;
++			#size-cells = <1>;
++
++
++			/* The ranges property is supplied by the bootwrapper
++			 * and is based on the firmware's configuration of the
++			 * EBC bridge
++			 */
++			clock-frequency = <0>; /* Filled in by zImage */
++
++			/* NVRAM and RTC */
++			nvrtc at 4,200000 {
++				compatible = "ds1742";
++				reg = <4 200000 0>; /* size fixed up by zImage */
++			};
++
++			/* "BCSR" CPLD contains a PCI irq controller */
++			bcsr at 4,0 {
++				compatible = "ep405-bcsr";
++				reg = <4 0 10>;
++				interrupt-controller;
++				/* Routing table */
++				irq-routing = [	00	/* SYSERR */
++						01	/* STTM */
++						01	/* RTC */
++						01	/* FENET */
++						02	/* NB PCIIRQ mux ? */
++						03	/* SB Winbond 8259 ? */
++						04	/* Serial Ring */
++						05	/* USB (ep405pc) */
++						06	/* XIRQ 0 */
++						06	/* XIRQ 1 */
++						06	/* XIRQ 2 */
++						06	/* XIRQ 3 */
++						06	/* XIRQ 4 */
++						06	/* XIRQ 5 */
++						06	/* XIRQ 6 */
++						07];	/* Reserved */
++			};
++		};
++
++		PCI0: pci at ec000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb405gp-pci", "ibm,plb-pci";
++			primary;
++			reg = <eec00000 8	/* Config space access */
++			       eed80000 4	/* IACK */
++			       eed80000 4	/* Special cycle */
++			       ef480000 40>;	/* Internal registers */
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed. Chip supports a second
++			 * IO range but we don't use it for now
++			 */
++			ranges = <02000000 0 80000000 80000000 0 20000000
++				  01000000 0 00000000 e8000000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 80000000>;
++
++			/* That's all I know about IRQs on that thing ... */
++			interrupt-map-mask = <f800 0 0 0>;
++			interrupt-map = <
++				/* USB */
++				7000 0 0 0 &UIC0 1e 8 /* IRQ5 */
++			>;
++		};
++	};
++
++	chosen {
++		linux,stdout-path = "/plb/opb/serial at ef600300";
++	};
 +};
+diff --git a/arch/powerpc/boot/dts/ep8248e.dts b/arch/powerpc/boot/dts/ep8248e.dts
+new file mode 100644
+index 0000000..5d2fb76
+--- /dev/null
++++ b/arch/powerpc/boot/dts/ep8248e.dts
+@@ -0,0 +1,207 @@
++/*
++ * Device Tree for the Embedded Planet EP8248E board running PlanetCore.
++ *
++ * Copyright 2007 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
 +
-+static struct intc_mask_reg mask_registers[] __initdata = {
-+	{ 0xa5000000, 0, 16, /* IRLMSK */
-+	  { PCI_A, PCI_B, PCI_C, PCI_D, CF, 0, 0, 0,
-+	    0, 0, 0, 0, 0, 0, PSW, AX88796 } },
++/dts-v1/;
++/ {
++	model = "EP8248E";
++	compatible = "fsl,ep8248e";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		planetcore-SMC1 = &smc1;
++		planetcore-SCC1 = &scc1;
++		ethernet0 = &eth0;
++		ethernet1 = &eth1;
++		serial0 = &smc1;
++		serial1 = &scc1;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8248 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <16384>;
++			i-cache-size = <16384>;
++			timebase-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	localbus at f0010100 {
++		compatible = "fsl,mpc8248-localbus",
++		             "fsl,pq2-localbus",
++		             "simple-bus";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		reg = <0xf0010100 0x40>;
++
++		ranges = <0 0 0xfc000000 0x04000000
++		          1 0 0xfa000000 0x00008000>;
++
++		flash at 0,3800000 {
++			compatible = "cfi-flash";
++			reg = <0 0x3800000 0x800000>;
++			bank-width = <4>;
++			device-width = <2>;
++		};
++
++		bcsr at 1,0 {
++			#address-cells = <2>;
++			#size-cells = <1>;
++			reg = <1 0 0x10>;
++			compatible = "fsl,ep8248e-bcsr";
++			ranges;
++
++			mdio {
++				device_type = "mdio";
++				compatible = "fsl,ep8248e-mdio-bitbang";
++				#address-cells = <1>;
++				#size-cells = <0>;
++				reg = <1 8 1>;
++
++				PHY0: ethernet-phy at 0 {
++					interrupt-parent = <&PIC>;
++					reg = <0>;
++					device_type = "ethernet-phy";
++				};
++
++				PHY1: ethernet-phy at 1 {
++					interrupt-parent = <&PIC>;
++					reg = <1>;
++					device_type = "ethernet-phy";
++				};
++			};
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0 0>;
++	};
++
++	soc at f0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8248-immr", "fsl,pq2-soc", "simple-bus";
++		ranges = <0x00000000 0xf0000000 0x00053000>;
++
++		// Temporary until code stops depending on it.
++		device_type = "soc";
++
++		// Temporary -- will go away once kernel uses ranges for get_immrbase().
++		reg = <0xf0000000 0x00053000>;
++
++		cpm at 119c0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			#interrupt-cells = <2>;
++			compatible = "fsl,mpc8248-cpm", "fsl,cpm2",
++			             "simple-bus";
++			reg = <0x119c0 0x30>;
++			ranges;
++
++			muram {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0 0 0x10000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0 0x1100 0x1140
++					       0xec0 0x9800 0x800>;
++				};
++			};
++
++			brg at 119f0 {
++				compatible = "fsl,mpc8248-brg",
++				             "fsl,cpm2-brg",
++				             "fsl,cpm-brg";
++				reg = <0x119f0 0x10 0x115f0 0x10>;
++			};
++
++			/* Monitor port/SMC1 */
++			smc1: serial at 11a80 {
++				device_type = "serial";
++				compatible = "fsl,mpc8248-smc-uart",
++				             "fsl,cpm2-smc-uart";
++				reg = <0x11a80 0x20 0x1100 0x40>;
++				interrupts = <4 8>;
++				interrupt-parent = <&PIC>;
++				fsl,cpm-brg = <7>;
++				fsl,cpm-command = <0x1d000000>;
++				linux,planetcore-label = "SMC1";
++			};
++
++			/* "Serial" port/SCC1 */
++			scc1: serial at 11a00 {
++				device_type = "serial";
++				compatible = "fsl,mpc8248-scc-uart",
++				             "fsl,cpm2-scc-uart";
++				reg = <0x11a00 0x20 0x8000 0x100>;
++				interrupts = <40 8>;
++				interrupt-parent = <&PIC>;
++				fsl,cpm-brg = <1>;
++				fsl,cpm-command = <0x00800000>;
++				linux,planetcore-label = "SCC1";
++			};
++
++			eth0: ethernet at 11300 {
++				device_type = "network";
++				compatible = "fsl,mpc8248-fcc-enet",
++				             "fsl,cpm2-fcc-enet";
++				reg = <0x11300 0x20 0x8400 0x100 0x11390 1>;
++				local-mac-address = [ 00 00 00 00 00 00 ];
++				interrupts = <32 8>;
++				interrupt-parent = <&PIC>;
++				phy-handle = <&PHY0>;
++				linux,network-index = <0>;
++				fsl,cpm-command = <0x12000300>;
++			};
++
++			eth1: ethernet at 11320 {
++				device_type = "network";
++				compatible = "fsl,mpc8248-fcc-enet",
++				             "fsl,cpm2-fcc-enet";
++				reg = <0x11320 0x20 0x8500 0x100 0x113b0 1>;
++				local-mac-address = [ 00 00 00 00 00 00 ];
++				interrupts = <33 8>;
++				interrupt-parent = <&PIC>;
++				phy-handle = <&PHY1>;
++				linux,network-index = <1>;
++				fsl,cpm-command = <0x16200300>;
++			};
++
++			usb at 11b60 {
++				#address-cells = <1>;
++				#size-cells = <0>;
++				compatible = "fsl,mpc8248-usb",
++				             "fsl,cpm2-usb";
++				reg = <0x11b60 0x18 0x8b00 0x100>;
++				interrupt-parent = <&PIC>;
++				interrupts = <11 8>;
++				fsl,cpm-command = <0x2e600000>;
++			};
++		};
++
++		PIC: interrupt-controller at 10c00 {
++			#interrupt-cells = <2>;
++			interrupt-controller;
++			reg = <0x10c00 0x80>;
++			compatible = "fsl,mpc8248-pic", "fsl,pq2-pic";
++		};
++	};
 +};
+diff --git a/arch/powerpc/boot/dts/haleakala.dts b/arch/powerpc/boot/dts/haleakala.dts
+new file mode 100644
+index 0000000..5dd3d15
+--- /dev/null
++++ b/arch/powerpc/boot/dts/haleakala.dts
+@@ -0,0 +1,274 @@
++/*
++ * Device Tree Source for AMCC Haleakala (405EXr)
++ *
++ * Copyright 2008 DENX Software Engineering, Stefan Roese <sr at denx.de>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2.  This program is licensed "as is" without
++ * any warranty of any kind, whether express or implied.
++ */
 +
-+static unsigned char irl2irq[HL_NR_IRL] __initdata = {
-+	65, 66, 67, 68,
-+	IRQ_CF, 0, 0, 0,
-+	0, 0, 0, 0,
-+	IRQ_AX88796, IRQ_PSW
++/ {
++	#address-cells = <1>;
++	#size-cells = <1>;
++	model = "amcc,haleakala";
++	compatible = "amcc,kilauea";
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		serial0 = &UART0;
++		serial1 = &UART1;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		cpu at 0 {
++			device_type = "cpu";
++			model = "PowerPC,405EXr";
++			reg = <0>;
++			clock-frequency = <0>; /* Filled in by U-Boot */
++			timebase-frequency = <0>; /* Filled in by U-Boot */
++			i-cache-line-size = <20>;
++			d-cache-line-size = <20>;
++			i-cache-size = <4000>; /* 16 kB */
++			d-cache-size = <4000>; /* 16 kB */
++			dcr-controller;
++			dcr-access-method = "native";
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0 0>; /* Filled in by U-Boot */
++	};
++
++	UIC0: interrupt-controller {
++		compatible = "ibm,uic-405exr", "ibm,uic";
++		interrupt-controller;
++		cell-index = <0>;
++		dcr-reg = <0c0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++	};
++
++	UIC1: interrupt-controller1 {
++		compatible = "ibm,uic-405exr","ibm,uic";
++		interrupt-controller;
++		cell-index = <1>;
++		dcr-reg = <0d0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <1e 4 1f 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
++
++	UIC2: interrupt-controller2 {
++		compatible = "ibm,uic-405exr","ibm,uic";
++		interrupt-controller;
++		cell-index = <2>;
++		dcr-reg = <0e0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <1c 4 1d 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
++
++	plb {
++		compatible = "ibm,plb-405exr", "ibm,plb4";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges;
++		clock-frequency = <0>; /* Filled in by U-Boot */
++
++		SDRAM0: memory-controller {
++			compatible = "ibm,sdram-405exr";
++			dcr-reg = <010 2>;
++		};
++
++		MAL0: mcmal {
++			compatible = "ibm,mcmal-405exr", "ibm,mcmal2";
++			dcr-reg = <180 62>;
++			num-tx-chans = <2>;
++			num-rx-chans = <2>;
++			interrupt-parent = <&MAL0>;
++			interrupts = <0 1 2 3 4>;
++			#interrupt-cells = <1>;
++			#address-cells = <0>;
++			#size-cells = <0>;
++			interrupt-map = </*TXEOB*/ 0 &UIC0 a 4
++					/*RXEOB*/ 1 &UIC0 b 4
++					/*SERR*/  2 &UIC1 0 4
++					/*TXDE*/  3 &UIC1 1 4
++					/*RXDE*/  4 &UIC1 2 4>;
++			interrupt-map-mask = <ffffffff>;
++		};
++
++		POB0: opb {
++			compatible = "ibm,opb-405exr", "ibm,opb";
++			#address-cells = <1>;
++			#size-cells = <1>;
++			ranges = <80000000 80000000 10000000
++				  ef600000 ef600000 a00000
++				  f0000000 f0000000 10000000>;
++			dcr-reg = <0a0 5>;
++			clock-frequency = <0>; /* Filled in by U-Boot */
++
++			EBC0: ebc {
++				compatible = "ibm,ebc-405exr", "ibm,ebc";
++				dcr-reg = <012 2>;
++				#address-cells = <2>;
++				#size-cells = <1>;
++				clock-frequency = <0>; /* Filled in by U-Boot */
++				/* ranges property is supplied by U-Boot */
++				interrupts = <5 1>;
++				interrupt-parent = <&UIC1>;
++
++				nor_flash at 0,0 {
++					compatible = "amd,s29gl512n", "cfi-flash";
++					bank-width = <2>;
++					reg = <0 000000 4000000>;
++					#address-cells = <1>;
++					#size-cells = <1>;
++					partition at 0 {
++						label = "kernel";
++						reg = <0 200000>;
++					};
++					partition at 200000 {
++						label = "root";
++						reg = <200000 200000>;
++					};
++					partition at 400000 {
++						label = "user";
++						reg = <400000 3b60000>;
++					};
++					partition at 3f60000 {
++						label = "env";
++						reg = <3f60000 40000>;
++					};
++					partition at 3fa0000 {
++						label = "u-boot";
++						reg = <3fa0000 60000>;
++					};
++				};
++			};
++
++			UART0: serial at ef600200 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <ef600200 8>;
++				virtual-reg = <ef600200>;
++				clock-frequency = <0>; /* Filled in by U-Boot */
++				current-speed = <0>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <1a 4>;
++			};
++
++			UART1: serial at ef600300 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <ef600300 8>;
++				virtual-reg = <ef600300>;
++				clock-frequency = <0>; /* Filled in by U-Boot */
++				current-speed = <0>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <1 4>;
++			};
++
++			IIC0: i2c at ef600400 {
++				compatible = "ibm,iic-405exr", "ibm,iic";
++				reg = <ef600400 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <2 4>;
++			};
++
++			IIC1: i2c at ef600500 {
++				compatible = "ibm,iic-405exr", "ibm,iic";
++				reg = <ef600500 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <7 4>;
++			};
++
++
++			RGMII0: emac-rgmii at ef600b00 {
++				compatible = "ibm,rgmii-405exr", "ibm,rgmii";
++				reg = <ef600b00 104>;
++				has-mdio;
++			};
++
++			EMAC0: ethernet at ef600900 {
++				linux,network-index = <0>;
++				device_type = "network";
++				compatible = "ibm,emac-405exr", "ibm,emac4";
++				interrupt-parent = <&EMAC0>;
++				interrupts = <0 1>;
++				#interrupt-cells = <1>;
++				#address-cells = <0>;
++				#size-cells = <0>;
++				interrupt-map = </*Status*/ 0 &UIC0 18 4
++						/*Wake*/  1 &UIC1 1d 4>;
++				reg = <ef600900 70>;
++				local-mac-address = [000000000000]; /* Filled in by U-Boot */
++				mal-device = <&MAL0>;
++				mal-tx-channel = <0>;
++				mal-rx-channel = <0>;
++				cell-index = <0>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rgmii";
++				phy-map = <00000000>;
++				rgmii-device = <&RGMII0>;
++				rgmii-channel = <0>;
++				has-inverted-stacr-oc;
++				has-new-stacr-staopc;
++			};
++		};
++
++		PCIE0: pciex at 0a0000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pciex-405exr", "ibm,plb-pciex";
++			primary;
++			port = <0>; /* port number */
++			reg = <a0000000 20000000	/* Config space access */
++			       ef000000 00001000>;	/* Registers */
++			dcr-reg = <040 020>;
++			sdr-base = <400>;
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 90000000 0 08000000
++				  01000000 0 00000000 e0000000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 80000000>;
++
++			/* This drives busses 0x00 to 0x3f */
++			bus-range = <00 3f>;
++
++			/* Legacy interrupts (note the weird polarity, the bridge seems
++			 * to invert PCIe legacy interrupts).
++			 * We are de-swizzling here because the numbers are actually for
++			 * port of the root complex virtual P2P bridge. But I want
++			 * to avoid putting a node for it in the tree, so the numbers
++			 * below are basically de-swizzled numbers.
++			 * The real slot is on idsel 0, so the swizzling is 1:1
++			 */
++			interrupt-map-mask = <0000 0 0 7>;
++			interrupt-map = <
++				0000 0 0 1 &UIC2 0 4 /* swizzled int A */
++				0000 0 0 2 &UIC2 1 4 /* swizzled int B */
++				0000 0 0 3 &UIC2 2 4 /* swizzled int C */
++				0000 0 0 4 &UIC2 3 4 /* swizzled int D */>;
++		};
++	};
 +};
+diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
+new file mode 100644
+index 0000000..9bdfc0f
+--- /dev/null
++++ b/arch/powerpc/boot/dts/katmai.dts
+@@ -0,0 +1,400 @@
++/*
++ * Device Tree Source for AMCC Katmai eval board
++ *
++ * Copyright (c) 2006, 2007 IBM Corp.
++ * Benjamin Herrenschmidt <benh at kernel.crashing.org>
++ *
++ * Copyright (c) 2006, 2007 IBM Corp.
++ * Josh Boyer <jwboyer at linux.vnet.ibm.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2.  This program is licensed "as is" without
++ * any warranty of any kind, whether express or implied.
++ */
 +
-+static DECLARE_INTC_DESC(intc_desc, "r7780rp", vectors,
-+			 NULL, mask_registers, NULL, NULL);
++/ {
++	#address-cells = <2>;
++	#size-cells = <1>;
++	model = "amcc,katmai";
++	compatible = "amcc,katmai";
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		serial0 = &UART0;
++		serial1 = &UART1;
++		serial2 = &UART2;
++	};
 +
- unsigned char * __init highlander_init_irq_r7780rp(void)
- {
--	int i;
--
--	for (i = 0; i < 15; i++)
--		make_r7780rp_irq(i);
-+	if (ctrl_inw(0xa5000600)) {
-+		printk(KERN_INFO "Using r7780rp interrupt controller.\n");
-+		register_intc_controller(&intc_desc);
-+		return irl2irq;
-+	}
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		cpu at 0 {
++			device_type = "cpu";
++			model = "PowerPC,440SPe";
++			reg = <0>;
++			clock-frequency = <0>; /* Filled in by zImage */
++			timebase-frequency = <0>; /* Filled in by zImage */
++			i-cache-line-size = <20>;
++			d-cache-line-size = <20>;
++			i-cache-size = <20000>;
++			d-cache-size = <20000>;
++			dcr-controller;
++			dcr-access-method = "native";
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0 0 0>; /* Filled in by zImage */
++	};
++
++	UIC0: interrupt-controller0 {
++		compatible = "ibm,uic-440spe","ibm,uic";
++		interrupt-controller;
++		cell-index = <0>;
++		dcr-reg = <0c0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++	};
++
++	UIC1: interrupt-controller1 {
++		compatible = "ibm,uic-440spe","ibm,uic";
++		interrupt-controller;
++		cell-index = <1>;
++		dcr-reg = <0d0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <1e 4 1f 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
++
++	UIC2: interrupt-controller2 {
++		compatible = "ibm,uic-440spe","ibm,uic";
++		interrupt-controller;
++		cell-index = <2>;
++		dcr-reg = <0e0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <a 4 b 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
++
++	UIC3: interrupt-controller3 {
++		compatible = "ibm,uic-440spe","ibm,uic";
++		interrupt-controller;
++		cell-index = <3>;
++		dcr-reg = <0f0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <10 4 11 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
++
++	SDR0: sdr {
++		compatible = "ibm,sdr-440spe";
++		dcr-reg = <00e 002>;
++	};
++
++	CPR0: cpr {
++		compatible = "ibm,cpr-440spe";
++		dcr-reg = <00c 002>;
++	};
++
++	plb {
++		compatible = "ibm,plb-440spe", "ibm,plb-440gp", "ibm,plb4";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		ranges;
++		clock-frequency = <0>; /* Filled in by zImage */
++
++		SDRAM0: sdram {
++			compatible = "ibm,sdram-440spe", "ibm,sdram-405gp";
++			dcr-reg = <010 2>;
++		};
++
++		MAL0: mcmal {
++			compatible = "ibm,mcmal-440spe", "ibm,mcmal2";
++			dcr-reg = <180 62>;
++			num-tx-chans = <2>;
++			num-rx-chans = <1>;
++			interrupt-parent = <&MAL0>;
++			interrupts = <0 1 2 3 4>;
++			#interrupt-cells = <1>;
++			#address-cells = <0>;
++			#size-cells = <0>;
++			interrupt-map = </*TXEOB*/ 0 &UIC1 6 4
++					 /*RXEOB*/ 1 &UIC1 7 4
++					 /*SERR*/  2 &UIC1 1 4
++					 /*TXDE*/  3 &UIC1 2 4
++					 /*RXDE*/  4 &UIC1 3 4>;
++		};
++
++		POB0: opb {
++		  	compatible = "ibm,opb-440spe", "ibm,opb-440gp", "ibm,opb";
++			#address-cells = <1>;
++			#size-cells = <1>;
++		  	ranges = <00000000 4 e0000000 20000000>;
++		  	clock-frequency = <0>; /* Filled in by zImage */
++
++			EBC0: ebc {
++				compatible = "ibm,ebc-440spe", "ibm,ebc-440gp", "ibm,ebc";
++				dcr-reg = <012 2>;
++				#address-cells = <2>;
++				#size-cells = <1>;
++				clock-frequency = <0>; /* Filled in by zImage */
++				interrupts = <5 1>;
++				interrupt-parent = <&UIC1>;
++			};
++
++			UART0: serial at 10000200 {
++		   		device_type = "serial";
++		   		compatible = "ns16550";
++		   		reg = <10000200 8>;
++				virtual-reg = <a0000200>;
++		   		clock-frequency = <0>; /* Filled in by zImage */
++		   		current-speed = <1c200>;
++		   		interrupt-parent = <&UIC0>;
++		   		interrupts = <0 4>;
++	   		};
++
++			UART1: serial at 10000300 {
++		   		device_type = "serial";
++		   		compatible = "ns16550";
++		   		reg = <10000300 8>;
++				virtual-reg = <a0000300>;
++		   		clock-frequency = <0>;
++		   		current-speed = <0>;
++		   		interrupt-parent = <&UIC0>;
++		   		interrupts = <1 4>;
++	   		};
++
++
++			UART2: serial at 10000600 {
++		   		device_type = "serial";
++		   		compatible = "ns16550";
++		   		reg = <10000600 8>;
++				virtual-reg = <a0000600>;
++		   		clock-frequency = <0>;
++		   		current-speed = <0>;
++		   		interrupt-parent = <&UIC1>;
++		   		interrupts = <5 4>;
++	   		};
++
++			IIC0: i2c at 10000400 {
++				device_type = "i2c";
++				compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic";
++				reg = <10000400 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <2 4>;
++			};
++
++			IIC1: i2c at 10000500 {
++				device_type = "i2c";
++				compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic";
++				reg = <10000500 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <3 4>;
++			};
++
++			EMAC0: ethernet at 10000800 {
++				linux,network-index = <0>;
++				device_type = "network";
++				compatible = "ibm,emac-440spe", "ibm,emac4";
++				interrupt-parent = <&UIC1>;
++				interrupts = <1c 4 1d 4>;
++				reg = <10000800 70>;
++				local-mac-address = [000000000000];
++				mal-device = <&MAL0>;
++				mal-tx-channel = <0>;
++				mal-rx-channel = <0>;
++				cell-index = <0>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "gmii";
++				phy-map = <00000000>;
++				has-inverted-stacr-oc;
++				has-new-stacr-staopc;
++			};
++		};
++
++		PCIX0: pci at c0ec00000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pcix-440spe", "ibm,plb-pcix";
++			primary;
++			large-inbound-windows;
++			enable-msi-hole;
++			reg = <c 0ec00000   8	/* Config space access */
++			       0 0 0		/* no IACK cycles */
++			       c 0ed00000   4   /* Special cycles */
++			       c 0ec80000 100	/* Internal registers */
++			       c 0ec80100  fc>;	/* Internal messaging registers */
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 0000000d 80000000 0 80000000
++				  01000000 0 00000000 0000000c 08000000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
++
++			/* This drives busses 0 to 0xf */
++			bus-range = <0 f>;
++
++			/*
++			 * On Katmai, the following PCI-X interrupts signals
++			 * have to be enabled via jumpers (only INTA is
++			 * enabled per default):
++			 *
++			 * INTB: J3: 1-2
++			 * INTC: J2: 1-2
++			 * INTD: J1: 1-2
++			 */
++			interrupt-map-mask = <f800 0 0 7>;
++			interrupt-map = <
++				/* IDSEL 1 */
++				0800 0 0 1 &UIC1 14 8
++				0800 0 0 2 &UIC1 13 8
++				0800 0 0 3 &UIC1 12 8
++				0800 0 0 4 &UIC1 11 8
++			>;
++		};
++
++		PCIE0: pciex at d00000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pciex-440spe", "ibm,plb-pciex";
++			primary;
++			port = <0>; /* port number */
++			reg = <d 00000000 20000000	/* Config space access */
++			       c 10000000 00001000>;	/* Registers */
++			dcr-reg = <100 020>;
++			sdr-base = <300>;
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 0000000e 00000000 0 80000000
++				  01000000 0 00000000 0000000f 80000000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
++
++			/* This drives busses 10 to 0x1f */
++			bus-range = <10 1f>;
++
++			/* Legacy interrupts (note the weird polarity, the bridge seems
++			 * to invert PCIe legacy interrupts).
++			 * We are de-swizzling here because the numbers are actually for
++			 * port of the root complex virtual P2P bridge. But I want
++			 * to avoid putting a node for it in the tree, so the numbers
++			 * below are basically de-swizzled numbers.
++			 * The real slot is on idsel 0, so the swizzling is 1:1
++			 */
++			interrupt-map-mask = <0000 0 0 7>;
++			interrupt-map = <
++				0000 0 0 1 &UIC3 0 4 /* swizzled int A */
++				0000 0 0 2 &UIC3 1 4 /* swizzled int B */
++				0000 0 0 3 &UIC3 2 4 /* swizzled int C */
++				0000 0 0 4 &UIC3 3 4 /* swizzled int D */>;
++		};
++
++		PCIE1: pciex at d20000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pciex-440spe", "ibm,plb-pciex";
++			primary;
++			port = <1>; /* port number */
++			reg = <d 20000000 20000000	/* Config space access */
++			       c 10001000 00001000>;	/* Registers */
++			dcr-reg = <120 020>;
++			sdr-base = <340>;
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 0000000e 80000000 0 80000000
++				  01000000 0 00000000 0000000f 80010000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
++
++			/* This drives busses 10 to 0x1f */
++			bus-range = <20 2f>;
++
++			/* Legacy interrupts (note the weird polarity, the bridge seems
++			 * to invert PCIe legacy interrupts).
++			 * We are de-swizzling here because the numbers are actually for
++			 * port of the root complex virtual P2P bridge. But I want
++			 * to avoid putting a node for it in the tree, so the numbers
++			 * below are basically de-swizzled numbers.
++			 * The real slot is on idsel 0, so the swizzling is 1:1
++			 */
++			interrupt-map-mask = <0000 0 0 7>;
++			interrupt-map = <
++				0000 0 0 1 &UIC3 4 4 /* swizzled int A */
++				0000 0 0 2 &UIC3 5 4 /* swizzled int B */
++				0000 0 0 3 &UIC3 6 4 /* swizzled int C */
++				0000 0 0 4 &UIC3 7 4 /* swizzled int D */>;
++		};
++
++		PCIE2: pciex at d40000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pciex-440spe", "ibm,plb-pciex";
++			primary;
++			port = <2>; /* port number */
++			reg = <d 40000000 20000000	/* Config space access */
++			       c 10002000 00001000>;	/* Registers */
++			dcr-reg = <140 020>;
++			sdr-base = <370>;
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 0000000f 00000000 0 80000000
++				  01000000 0 00000000 0000000f 80020000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
++
++			/* This drives busses 10 to 0x1f */
++			bus-range = <30 3f>;
++
++			/* Legacy interrupts (note the weird polarity, the bridge seems
++			 * to invert PCIe legacy interrupts).
++			 * We are de-swizzling here because the numbers are actually for
++			 * port of the root complex virtual P2P bridge. But I want
++			 * to avoid putting a node for it in the tree, so the numbers
++			 * below are basically de-swizzled numbers.
++			 * The real slot is on idsel 0, so the swizzling is 1:1
++			 */
++			interrupt-map-mask = <0000 0 0 7>;
++			interrupt-map = <
++				0000 0 0 1 &UIC3 8 4 /* swizzled int A */
++				0000 0 0 2 &UIC3 9 4 /* swizzled int B */
++				0000 0 0 3 &UIC3 a 4 /* swizzled int C */
++				0000 0 0 4 &UIC3 b 4 /* swizzled int D */>;
++		};
++	};
++
++	chosen {
++		linux,stdout-path = "/plb/opb/serial at 10000200";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
+index c824e8f..67c7ea1 100644
+--- a/arch/powerpc/boot/dts/kilauea.dts
++++ b/arch/powerpc/boot/dts/kilauea.dts
+@@ -13,14 +13,22 @@
+ 	#size-cells = <1>;
+ 	model = "amcc,kilauea";
+ 	compatible = "amcc,kilauea";
+-	dcr-parent = <&/cpus/PowerPC,405EX at 0>;
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		ethernet1 = &EMAC1;
++		serial0 = &UART0;
++		serial1 = &UART1;
++	};
  
- 	return NULL;
- }
-diff --git a/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c b/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c
-index b2c6a84..bf7ec10 100644
---- a/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c
-+++ b/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c
-@@ -2,7 +2,7 @@
-  * Renesas Solutions Highlander R7785RP Support.
-  *
-  * Copyright (C) 2002  Atom Create Engineering Co., Ltd.
-- * Copyright (C) 2006  Paul Mundt
-+ * Copyright (C) 2006 - 2008  Paul Mundt
-  * Copyright (C) 2007  Magnus Damm
-  *
-  * This file is subject to the terms and conditions of the GNU General Public
-@@ -17,31 +17,52 @@
- enum {
- 	UNUSED = 0,
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		PowerPC,405EX at 0 {
++		cpu at 0 {
+ 			device_type = "cpu";
++			model = "PowerPC,405EX";
+ 			reg = <0>;
+ 			clock-frequency = <0>; /* Filled in by U-Boot */
+ 			timebase-frequency = <0>; /* Filled in by U-Boot */
+@@ -194,6 +202,7 @@
+ 				device_type = "rgmii-interface";
+ 				compatible = "ibm,rgmii-405ex", "ibm,rgmii";
+ 				reg = <ef600b00 104>;
++				has-mdio;
+ 			};
  
--	/* board specific interrupt sources */
--	AX88796,          /* Ethernet controller */
--	CF,               /* Compact Flash */
-+	/* FPGA specific interrupt sources */
-+	CF,		/* Compact Flash */
-+	SMBUS,		/* SMBUS */
-+	TP,		/* Touch panel */
-+	RTC,		/* RTC Alarm */
-+	TH_ALERT,	/* Temperature sensor */
-+	AX88796,	/* Ethernet controller */
-+
-+	/* external bus connector */
-+	EXT0, EXT1, EXT2, EXT3, EXT4, EXT5, EXT6, EXT7,
- };
+ 			EMAC0: ethernet at ef600900 {
+@@ -220,6 +229,8 @@
+ 				phy-map = <00000000>;
+ 				rgmii-device = <&RGMII0>;
+ 				rgmii-channel = <0>;
++				has-inverted-stacr-oc;
++				has-new-stacr-staopc;
+ 			};
  
- static struct intc_vect vectors[] __initdata = {
- 	INTC_IRQ(CF, IRQ_CF),
-+	INTC_IRQ(SMBUS, IRQ_SMBUS),
-+	INTC_IRQ(TP, IRQ_TP),
-+	INTC_IRQ(RTC, IRQ_RTC),
-+	INTC_IRQ(TH_ALERT, IRQ_TH_ALERT),
+ 			EMAC1: ethernet at ef600a00 {
+@@ -246,7 +257,91 @@
+ 				phy-map = <00000000>;
+ 				rgmii-device = <&RGMII0>;
+ 				rgmii-channel = <1>;
++				has-inverted-stacr-oc;
++				has-new-stacr-staopc;
+ 			};
+ 		};
 +
-+	INTC_IRQ(EXT0, IRQ_EXT0), INTC_IRQ(EXT1, IRQ_EXT1),
-+	INTC_IRQ(EXT2, IRQ_EXT2), INTC_IRQ(EXT3, IRQ_EXT3),
++		PCIE0: pciex at 0a0000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex";
++			primary;
++			port = <0>; /* port number */
++			reg = <a0000000 20000000	/* Config space access */
++			       ef000000 00001000>;	/* Registers */
++			dcr-reg = <040 020>;
++			sdr-base = <400>;
 +
-+	INTC_IRQ(EXT4, IRQ_EXT4), INTC_IRQ(EXT5, IRQ_EXT5),
-+	INTC_IRQ(EXT6, IRQ_EXT6), INTC_IRQ(EXT7, IRQ_EXT7),
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 90000000 0 08000000
++				  01000000 0 00000000 e0000000 0 00010000>;
 +
- 	INTC_IRQ(AX88796, IRQ_AX88796),
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 80000000>;
++
++			/* This drives busses 0x00 to 0x3f */
++			bus-range = <00 3f>;
++
++			/* Legacy interrupts (note the weird polarity, the bridge seems
++			 * to invert PCIe legacy interrupts).
++			 * We are de-swizzling here because the numbers are actually for
++			 * port of the root complex virtual P2P bridge. But I want
++			 * to avoid putting a node for it in the tree, so the numbers
++			 * below are basically de-swizzled numbers.
++			 * The real slot is on idsel 0, so the swizzling is 1:1
++			 */
++			interrupt-map-mask = <0000 0 0 7>;
++			interrupt-map = <
++				0000 0 0 1 &UIC2 0 4 /* swizzled int A */
++				0000 0 0 2 &UIC2 1 4 /* swizzled int B */
++				0000 0 0 3 &UIC2 2 4 /* swizzled int C */
++				0000 0 0 4 &UIC2 3 4 /* swizzled int D */>;
++		};
++
++		PCIE1: pciex at 0c0000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex";
++			primary;
++			port = <1>; /* port number */
++			reg = <c0000000 20000000	/* Config space access */
++			       ef001000 00001000>;	/* Registers */
++			dcr-reg = <060 020>;
++			sdr-base = <440>;
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 98000000 0 08000000
++				  01000000 0 00000000 e0010000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 80000000>;
++
++			/* This drives busses 0x40 to 0x7f */
++			bus-range = <40 7f>;
++
++			/* Legacy interrupts (note the weird polarity, the bridge seems
++			 * to invert PCIe legacy interrupts).
++			 * We are de-swizzling here because the numbers are actually for
++			 * port of the root complex virtual P2P bridge. But I want
++			 * to avoid putting a node for it in the tree, so the numbers
++			 * below are basically de-swizzled numbers.
++			 * The real slot is on idsel 0, so the swizzling is 1:1
++			 */
++			interrupt-map-mask = <0000 0 0 7>;
++			interrupt-map = <
++				0000 0 0 1 &UIC2 b 4 /* swizzled int A */
++				0000 0 0 2 &UIC2 c 4 /* swizzled int B */
++				0000 0 0 3 &UIC2 d 4 /* swizzled int C */
++				0000 0 0 4 &UIC2 e 4 /* swizzled int D */>;
++		};
+ 	};
  };
+diff --git a/arch/powerpc/boot/dts/kuroboxHD.dts b/arch/powerpc/boot/dts/kuroboxHD.dts
+index ec71ab8..4469588 100644
+--- a/arch/powerpc/boot/dts/kuroboxHD.dts
++++ b/arch/powerpc/boot/dts/kuroboxHD.dts
+@@ -23,6 +23,12 @@ XXXX add flash parts, rtc, ??
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -60,7 +66,7 @@ XXXX add flash parts, rtc, ??
+ 		i2c at 80003000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <80003000 1000>;
+ 			interrupts = <5 2>;
+@@ -73,7 +79,8 @@ XXXX add flash parts, rtc, ??
+ 			};
+ 		};
  
- static struct intc_mask_reg mask_registers[] __initdata = {
- 	{ 0xa4000010, 0, 16, /* IRLMCR1 */
--	  { 0, 0, 0, 0, CF, AX88796, 0, 0,
--	    0, 0, 0, 0, 0, 0, 0, 0 } },
-+	  { 0, 0, 0, 0, CF, AX88796, SMBUS, TP,
-+	    RTC, 0, TH_ALERT, 0, 0, 0, 0, 0 } },
-+	{ 0xa4000012, 0, 16, /* IRLMCR2 */
-+	  { 0, 0, 0, 0, 0, 0, 0, 0,
-+	    EXT7, EXT6, EXT5, EXT4, EXT3, EXT2, EXT1, EXT0 } },
- };
+-		serial at 80004500 {
++		serial0: serial at 80004500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <80004500 8>;
+@@ -83,7 +90,8 @@ XXXX add flash parts, rtc, ??
+ 			interrupt-parent = <&mpic>;
+ 		};
  
- static unsigned char irl2irq[HL_NR_IRL] __initdata = {
--	0, IRQ_CF, 0, 0,
--	0, 0, 0, 0,
--	0, 0, IRQ_AX88796, 0,
--	0, 0, 0,
-+	0, IRQ_CF, IRQ_EXT4, IRQ_EXT5,
-+	IRQ_EXT6, IRQ_EXT7, IRQ_SMBUS, IRQ_TP,
-+	IRQ_RTC, IRQ_TH_ALERT, IRQ_AX88796, IRQ_EXT0,
-+	IRQ_EXT1, IRQ_EXT2, IRQ_EXT3,
- };
+-		serial at 80004600 {
++		serial1: serial at 80004600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <80004600 8>;
+@@ -102,7 +110,7 @@ XXXX add flash parts, rtc, ??
+ 			reg = <80040000 40000>;
+ 		};
  
- static DECLARE_INTC_DESC(intc_desc, "r7785rp", vectors,
--			 NULL, NULL, mask_registers, NULL, NULL);
-+			 NULL, mask_registers, NULL, NULL);
+-		pci at fec00000 {
++		pci0: pci at fec00000 {
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 			#interrupt-cells = <1>;
+diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
+index 32ecd23..8443c85 100644
+--- a/arch/powerpc/boot/dts/kuroboxHG.dts
++++ b/arch/powerpc/boot/dts/kuroboxHG.dts
+@@ -23,6 +23,12 @@ XXXX add flash parts, rtc, ??
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -60,7 +66,7 @@ XXXX add flash parts, rtc, ??
+ 		i2c at 80003000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <80003000 1000>;
+ 			interrupts = <5 2>;
+@@ -73,7 +79,8 @@ XXXX add flash parts, rtc, ??
+ 			};
+ 		};
  
- unsigned char * __init highlander_init_irq_r7785rp(void)
- {
-@@ -58,7 +79,7 @@ unsigned char * __init highlander_init_irq_r7785rp(void)
- 	ctrl_outw(0x7060, PA_IRLPRC);	/* FPGA IRLC */
- 	ctrl_outw(0x0000, PA_IRLPRD);	/* FPGA IRLD */
- 	ctrl_outw(0x4321, PA_IRLPRE);	/* FPGA IRLE */
--	ctrl_outw(0x0000, PA_IRLPRF);	/* FPGA IRLF */
-+	ctrl_outw(0xdcba, PA_IRLPRF);	/* FPGA IRLF */
+-		serial at 80004500 {
++		serial0: serial at 80004500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <80004500 8>;
+@@ -83,7 +90,8 @@ XXXX add flash parts, rtc, ??
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 80004600 {
++		serial1: serial at 80004600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <80004600 8>;
+@@ -102,7 +110,7 @@ XXXX add flash parts, rtc, ??
+ 			reg = <80040000 40000>;
+ 		};
+ 
+-		pci at fec00000 {
++		pci0: pci at fec00000 {
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 			#interrupt-cells = <1>;
+diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
+index 6731763..0d701c1 100644
+--- a/arch/powerpc/boot/dts/lite5200.dts
++++ b/arch/powerpc/boot/dts/lite5200.dts
+@@ -10,16 +10,9 @@
+  * option) any later version.
+  */
  
- 	register_intc_controller(&intc_desc);
- 	return irl2irq;
-diff --git a/arch/sh/boards/renesas/r7780rp/irq.c b/arch/sh/boards/renesas/r7780rp/irq.c
-deleted file mode 100644
-index e0b8eb5..0000000
---- a/arch/sh/boards/renesas/r7780rp/irq.c
-+++ /dev/null
-@@ -1,51 +0,0 @@
 -/*
-- * Renesas Solutions Highlander R7780RP-1 Support.
-- *
-- * Copyright (C) 2002  Atom Create Engineering Co., Ltd.
-- * Copyright (C) 2006  Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/init.h>
--#include <linux/irq.h>
--#include <linux/interrupt.h>
--#include <linux/io.h>
--#include <asm/r7780rp.h>
--
--#ifdef CONFIG_SH_R7780RP
--static int mask_pos[] = {15, 14, 13, 12, 11, 10, 9, 8, 7, 5, 6, 4, 0, 1, 2, 0};
--#elif defined(CONFIG_SH_R7780MP)
--static int mask_pos[] = {12, 11, 9, 14, 15, 8, 13, 6, 5, 4, 3, 2, 0, 0, 1, 0};
--#elif defined(CONFIG_SH_R7785RP)
--static int mask_pos[] = {2, 11, 2, 2, 2, 2, 9, 8, 7, 5, 10, 2, 2, 2, 2, 2};
--#endif
--
--static void enable_r7780rp_irq(unsigned int irq)
--{
--	/* Set priority in IPR back to original value */
--	ctrl_outw(ctrl_inw(IRLCNTR1) | (1 << mask_pos[irq]), IRLCNTR1);
--}
--
--static void disable_r7780rp_irq(unsigned int irq)
--{
--	/* Set the priority in IPR to 0 */
--	ctrl_outw(ctrl_inw(IRLCNTR1) & (0xffff ^ (1 << mask_pos[irq])),
--		  IRLCNTR1);
--}
--
--static struct irq_chip r7780rp_irq_chip __read_mostly = {
--	.name		= "R7780RP",
--	.mask		= disable_r7780rp_irq,
--	.unmask		= enable_r7780rp_irq,
--	.mask_ack	= disable_r7780rp_irq,
--};
--
--void make_r7780rp_irq(unsigned int irq)
--{
--	disable_irq_nosync(irq);
--	set_irq_chip_and_handler_name(irq, &r7780rp_irq_chip,
--				      handle_level_irq, "level");
--	enable_r7780rp_irq(irq);
--}
-diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
-index 0fdc0bc..a43b477 100644
---- a/arch/sh/boards/renesas/r7780rp/setup.c
-+++ b/arch/sh/boards/renesas/r7780rp/setup.c
-@@ -179,9 +179,11 @@ static struct platform_device ax88796_device = {
- static struct platform_device *r7780rp_devices[] __initdata = {
- 	&r8a66597_usb_host_device,
- 	&m66592_usb_peripheral_device,
--	&cf_ide_device,
- 	&heartbeat_device,
-+#ifndef CONFIG_SH_R7780RP
-+	&cf_ide_device,
- 	&ax88796_device,
-+#endif
- };
+- * WARNING: Do not depend on this tree layout remaining static just yet.
+- * The MPC5200 device tree conventions are still in flux
+- * Keep an eye on the linuxppc-dev mailing list for more details
+- */
+-
+ / {
+ 	model = "fsl,lite5200";
+-	// revision = "1.0";
+-	compatible = "fsl,lite5200","generic-mpc5200";
++	compatible = "fsl,lite5200";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
  
- static int __init r7780rp_devices_setup(void)
-@@ -316,9 +318,9 @@ void __init highlander_init_irq(void)
- 			break;
- #endif
- #ifdef CONFIG_SH_R7780RP
--		highlander_init_irq_r7780rp();
--		ucp = irl2irq;
--		break;
-+		ucp = highlander_init_irq_r7780rp();
-+		if (ucp)
-+			break;
- #endif
- 	} while (0);
+@@ -46,30 +39,29 @@
+ 	};
  
-diff --git a/arch/sh/boards/renesas/rts7751r2d/irq.c b/arch/sh/boards/renesas/rts7751r2d/irq.c
-index 7cc2813..8e49f6e 100644
---- a/arch/sh/boards/renesas/rts7751r2d/irq.c
-+++ b/arch/sh/boards/renesas/rts7751r2d/irq.c
-@@ -13,7 +13,6 @@
- #include <linux/irq.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
--#include <asm/voyagergx.h>
- #include <asm/rts7751r2d.h>
+ 	soc5200 at f0000000 {
+-		model = "fsl,mpc5200";
+-		compatible = "mpc5200";
+-		revision = "";			// from bootloader
+-		device_type = "soc";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc5200-immr";
+ 		ranges = <0 f0000000 0000c000>;
+ 		reg = <f0000000 00000100>;
+ 		bus-frequency = <0>;		// from bootloader
+ 		system-frequency = <0>;		// from bootloader
+ 
+ 		cdm at 200 {
+-			compatible = "mpc5200-cdm";
++			compatible = "fsl,mpc5200-cdm";
+ 			reg = <200 38>;
+ 		};
  
- #define R2D_NR_IRL 13
-@@ -71,7 +70,7 @@ static unsigned char irl2irq_r2d_1[R2D_NR_IRL] __initdata = {
- };
+-		mpc5200_pic: pic at 500 {
++		mpc5200_pic: interrupt-controller at 500 {
+ 			// 5200 interrupts are encoded into two levels;
+ 			interrupt-controller;
+ 			#interrupt-cells = <3>;
+ 			device_type = "interrupt-controller";
+-			compatible = "mpc5200-pic";
++			compatible = "fsl,mpc5200-pic";
+ 			reg = <500 80>;
+ 		};
  
- static DECLARE_INTC_DESC(intc_desc_r2d_1, "r2d-1", vectors_r2d_1,
--			 NULL, NULL, mask_registers_r2d_1, NULL, NULL);
-+			 NULL, mask_registers_r2d_1, NULL, NULL);
+-		gpt at 600 {	// General Purpose Timer
++		timer at 600 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200-gpt";
+ 			cell-index = <0>;
+ 			reg = <600 10>;
+@@ -78,7 +70,7 @@
+ 			fsl,has-wdt;
+ 		};
  
- #endif /* CONFIG_RTS7751R2D_1 */
+-		gpt at 610 {	// General Purpose Timer
++		timer at 610 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200-gpt";
+ 			cell-index = <1>;
+ 			reg = <610 10>;
+@@ -86,7 +78,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
  
-@@ -109,7 +108,7 @@ static unsigned char irl2irq_r2d_plus[R2D_NR_IRL] __initdata = {
- };
+-		gpt at 620 {	// General Purpose Timer
++		timer at 620 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200-gpt";
+ 			cell-index = <2>;
+ 			reg = <620 10>;
+@@ -94,7 +86,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
  
- static DECLARE_INTC_DESC(intc_desc_r2d_plus, "r2d-plus", vectors_r2d_plus,
--			 NULL, NULL, mask_registers_r2d_plus, NULL, NULL);
-+			 NULL, mask_registers_r2d_plus, NULL, NULL);
+-		gpt at 630 {	// General Purpose Timer
++		timer at 630 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200-gpt";
+ 			cell-index = <3>;
+ 			reg = <630 10>;
+@@ -102,7 +94,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
  
- #endif /* CONFIG_RTS7751R2D_PLUS */
+-		gpt at 640 {	// General Purpose Timer
++		timer at 640 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200-gpt";
+ 			cell-index = <4>;
+ 			reg = <640 10>;
+@@ -110,7 +102,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
  
-@@ -153,7 +152,4 @@ void __init init_rts7751r2d_IRQ(void)
- 	}
+-		gpt at 650 {	// General Purpose Timer
++		timer at 650 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200-gpt";
+ 			cell-index = <5>;
+ 			reg = <650 10>;
+@@ -118,7 +110,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
  
- 	register_intc_controller(d);
--#ifdef CONFIG_MFD_SM501
--	setup_voyagergx_irq();
--#endif
- }
-diff --git a/arch/sh/boards/renesas/rts7751r2d/setup.c b/arch/sh/boards/renesas/rts7751r2d/setup.c
-index 8125d20..3452b07 100644
---- a/arch/sh/boards/renesas/rts7751r2d/setup.c
-+++ b/arch/sh/boards/renesas/rts7751r2d/setup.c
-@@ -13,34 +13,15 @@
- #include <linux/pata_platform.h>
- #include <linux/serial_8250.h>
- #include <linux/sm501.h>
-+#include <linux/sm501-regs.h>
- #include <linux/pm.h>
-+#include <linux/fb.h>
-+#include <linux/spi/spi.h>
-+#include <linux/spi/spi_bitbang.h>
- #include <asm/machvec.h>
- #include <asm/rts7751r2d.h>
--#include <asm/voyagergx.h>
- #include <asm/io.h>
--
--static void __init voyagergx_serial_init(void)
--{
--	unsigned long val;
--
--	/*
--	 * GPIO Control
--	 */
--	val = readl((void __iomem *)GPIO_MUX_HIGH);
--	val |= 0x00001fe0;
--	writel(val, (void __iomem *)GPIO_MUX_HIGH);
--
--	/*
--	 * Power Mode Gate
--	 */
--	val = readl((void __iomem *)POWER_MODE0_GATE);
--	val |= (POWER_MODE0_GATE_U0 | POWER_MODE0_GATE_U1);
--	writel(val, (void __iomem *)POWER_MODE0_GATE);
--
--	val = readl((void __iomem *)POWER_MODE1_GATE);
--	val |= (POWER_MODE1_GATE_U0 | POWER_MODE1_GATE_U1);
--	writel(val, (void __iomem *)POWER_MODE1_GATE);
--}
-+#include <asm/spi.h>
+-		gpt at 660 {	// General Purpose Timer
++		timer at 660 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200-gpt";
+ 			cell-index = <6>;
+ 			reg = <660 10>;
+@@ -126,7 +118,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
  
- static struct resource cf_ide_resources[] = {
- 	[0] = {
-@@ -75,6 +56,43 @@ static struct platform_device cf_ide_device  = {
- 	},
- };
+-		gpt at 670 {	// General Purpose Timer
++		timer at 670 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200-gpt";
+ 			cell-index = <7>;
+ 			reg = <670 10>;
+@@ -135,25 +127,23 @@
+ 		};
  
-+static struct spi_board_info spi_bus[] = {
-+	{
-+		.modalias	= "rtc-r9701",
-+		.max_speed_hz	= 1000000,
-+		.mode		= SPI_MODE_3,
-+	},
-+};
+ 		rtc at 800 {	// Real time clock
+-			compatible = "mpc5200-rtc";
++			compatible = "fsl,mpc5200-rtc";
+ 			device_type = "rtc";
+ 			reg = <800 100>;
+ 			interrupts = <1 5 0 1 6 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		mscan at 900 {
+-			device_type = "mscan";
+-			compatible = "mpc5200-mscan";
++		can at 900 {
++			compatible = "fsl,mpc5200-mscan";
+ 			cell-index = <0>;
+ 			interrupts = <2 11 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 			reg = <900 80>;
+ 		};
+ 
+-		mscan at 980 {
+-			device_type = "mscan";
+-			compatible = "mpc5200-mscan";
++		can at 980 {
++			compatible = "fsl,mpc5200-mscan";
+ 			cell-index = <1>;
+ 			interrupts = <2 12 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+@@ -161,38 +151,36 @@
+ 		};
+ 
+ 		gpio at b00 {
+-			compatible = "mpc5200-gpio";
++			compatible = "fsl,mpc5200-gpio";
+ 			reg = <b00 40>;
+ 			interrupts = <1 7 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		gpio-wkup at c00 {
+-			compatible = "mpc5200-gpio-wkup";
++		gpio at c00 {
++			compatible = "fsl,mpc5200-gpio-wkup";
+ 			reg = <c00 40>;
+ 			interrupts = <1 8 0 0 3 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+ 		spi at f00 {
+-			device_type = "spi";
+-			compatible = "mpc5200-spi";
++			compatible = "fsl,mpc5200-spi";
+ 			reg = <f00 20>;
+ 			interrupts = <2 d 0 2 e 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+ 		usb at 1000 {
+-			device_type = "usb-ohci-be";
+-			compatible = "mpc5200-ohci","ohci-be";
++			compatible = "fsl,mpc5200-ohci","ohci-be";
+ 			reg = <1000 ff>;
+ 			interrupts = <2 6 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		bestcomm at 1200 {
++		dma-controller at 1200 {
+ 			device_type = "dma-controller";
+-			compatible = "mpc5200-bestcomm";
++			compatible = "fsl,mpc5200-bestcomm";
+ 			reg = <1200 80>;
+ 			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
+ 			              3 4 0  3 5 0  3 6 0  3 7 0
+@@ -202,13 +190,13 @@
+ 		};
+ 
+ 		xlb at 1f00 {
+-			compatible = "mpc5200-xlb";
++			compatible = "fsl,mpc5200-xlb";
+ 			reg = <1f00 100>;
+ 		};
+ 
+ 		serial at 2000 {		// PSC1
+ 			device_type = "serial";
+-			compatible = "mpc5200-psc-uart";
++			compatible = "fsl,mpc5200-psc-uart";
+ 			port-number = <0>;  // Logical port assignment
+ 			cell-index = <0>;
+ 			reg = <2000 100>;
+@@ -218,8 +206,7 @@
+ 
+ 		// PSC2 in ac97 mode example
+ 		//ac97 at 2200 {		// PSC2
+-		//	device_type = "sound";
+-		//	compatible = "mpc5200-psc-ac97";
++		//	compatible = "fsl,mpc5200-psc-ac97";
+ 		//	cell-index = <1>;
+ 		//	reg = <2200 100>;
+ 		//	interrupts = <2 2 0>;
+@@ -228,8 +215,7 @@
+ 
+ 		// PSC3 in CODEC mode example
+ 		//i2s at 2400 {		// PSC3
+-		//	device_type = "sound";
+-		//	compatible = "mpc5200-psc-i2s";
++		//	compatible = "fsl,mpc5200-psc-i2s";
+ 		//	cell-index = <2>;
+ 		//	reg = <2400 100>;
+ 		//	interrupts = <2 3 0>;
+@@ -239,7 +225,7 @@
+ 		// PSC4 in uart mode example
+ 		//serial at 2600 {		// PSC4
+ 		//	device_type = "serial";
+-		//	compatible = "mpc5200-psc-uart";
++		//	compatible = "fsl,mpc5200-psc-uart";
+ 		//	cell-index = <3>;
+ 		//	reg = <2600 100>;
+ 		//	interrupts = <2 b 0>;
+@@ -249,7 +235,7 @@
+ 		// PSC5 in uart mode example
+ 		//serial at 2800 {		// PSC5
+ 		//	device_type = "serial";
+-		//	compatible = "mpc5200-psc-uart";
++		//	compatible = "fsl,mpc5200-psc-uart";
+ 		//	cell-index = <4>;
+ 		//	reg = <2800 100>;
+ 		//	interrupts = <2 c 0>;
+@@ -258,8 +244,7 @@
+ 
+ 		// PSC6 in spi mode example
+ 		//spi at 2c00 {		// PSC6
+-		//	device_type = "spi";
+-		//	compatible = "mpc5200-psc-spi";
++		//	compatible = "fsl,mpc5200-psc-spi";
+ 		//	cell-index = <5>;
+ 		//	reg = <2c00 100>;
+ 		//	interrupts = <2 4 0>;
+@@ -268,24 +253,25 @@
+ 
+ 		ethernet at 3000 {
+ 			device_type = "network";
+-			compatible = "mpc5200-fec";
++			compatible = "fsl,mpc5200-fec";
+ 			reg = <3000 800>;
+-			mac-address = [ 02 03 04 05 06 07 ]; // Bad!
++			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <2 5 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+ 		ata at 3a00 {
+ 			device_type = "ata";
+-			compatible = "mpc5200-ata";
++			compatible = "fsl,mpc5200-ata";
+ 			reg = <3a00 100>;
+ 			interrupts = <2 7 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+ 		i2c at 3d00 {
+-			device_type = "i2c";
+-			compatible = "mpc5200-i2c","fsl-i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,mpc5200-i2c","fsl-i2c";
+ 			cell-index = <0>;
+ 			reg = <3d00 40>;
+ 			interrupts = <2 f 0>;
+@@ -294,8 +280,9 @@
+ 		};
+ 
+ 		i2c at 3d40 {
+-			device_type = "i2c";
+-			compatible = "mpc5200-i2c","fsl-i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,mpc5200-i2c","fsl-i2c";
+ 			cell-index = <1>;
+ 			reg = <3d40 40>;
+ 			interrupts = <2 10 0>;
+@@ -303,8 +290,7 @@
+ 			fsl5200-clocking;
+ 		};
+ 		sram at 8000 {
+-			device_type = "sram";
+-			compatible = "mpc5200-sram","sram";
++			compatible = "fsl,mpc5200-sram","sram";
+ 			reg = <8000 4000>;
+ 		};
+ 	};
+@@ -314,7 +300,7 @@
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+ 		device_type = "pci";
+-		compatible = "mpc5200-pci";
++		compatible = "fsl,mpc5200-pci";
+ 		reg = <f0000d00 100>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <c000 0 0 1 &mpc5200_pic 0 0 3
+diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
+index b540388..571ba02 100644
+--- a/arch/powerpc/boot/dts/lite5200b.dts
++++ b/arch/powerpc/boot/dts/lite5200b.dts
+@@ -18,8 +18,7 @@
+ 
+ / {
+ 	model = "fsl,lite5200b";
+-	// revision = "1.0";
+-	compatible = "fsl,lite5200b","generic-mpc5200";
++	compatible = "fsl,lite5200b";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
+@@ -46,30 +45,29 @@
+ 	};
+ 
+ 	soc5200 at f0000000 {
+-		model = "fsl,mpc5200b";
+-		compatible = "mpc5200";
+-		revision = "";			// from bootloader
+-		device_type = "soc";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc5200b-immr";
+ 		ranges = <0 f0000000 0000c000>;
+ 		reg = <f0000000 00000100>;
+ 		bus-frequency = <0>;		// from bootloader
+ 		system-frequency = <0>;		// from bootloader
+ 
+ 		cdm at 200 {
+-			compatible = "mpc5200b-cdm","mpc5200-cdm";
++			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
+ 			reg = <200 38>;
+ 		};
+ 
+-		mpc5200_pic: pic at 500 {
++		mpc5200_pic: interrupt-controller at 500 {
+ 			// 5200 interrupts are encoded into two levels;
+ 			interrupt-controller;
+ 			#interrupt-cells = <3>;
+ 			device_type = "interrupt-controller";
+-			compatible = "mpc5200b-pic","mpc5200-pic";
++			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
+ 			reg = <500 80>;
+ 		};
+ 
+-		gpt at 600 {	// General Purpose Timer
++		timer at 600 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ 			cell-index = <0>;
+ 			reg = <600 10>;
+@@ -78,7 +76,7 @@
+ 			fsl,has-wdt;
+ 		};
+ 
+-		gpt at 610 {	// General Purpose Timer
++		timer at 610 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ 			cell-index = <1>;
+ 			reg = <610 10>;
+@@ -86,7 +84,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		gpt at 620 {	// General Purpose Timer
++		timer at 620 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ 			cell-index = <2>;
+ 			reg = <620 10>;
+@@ -94,7 +92,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		gpt at 630 {	// General Purpose Timer
++		timer at 630 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ 			cell-index = <3>;
+ 			reg = <630 10>;
+@@ -102,7 +100,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		gpt at 640 {	// General Purpose Timer
++		timer at 640 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ 			cell-index = <4>;
+ 			reg = <640 10>;
+@@ -110,7 +108,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		gpt at 650 {	// General Purpose Timer
++		timer at 650 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ 			cell-index = <5>;
+ 			reg = <650 10>;
+@@ -118,7 +116,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		gpt at 660 {	// General Purpose Timer
++		timer at 660 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ 			cell-index = <6>;
+ 			reg = <660 10>;
+@@ -126,7 +124,7 @@
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		gpt at 670 {	// General Purpose Timer
++		timer at 670 {	// General Purpose Timer
+ 			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ 			cell-index = <7>;
+ 			reg = <670 10>;
+@@ -135,25 +133,23 @@
+ 		};
+ 
+ 		rtc at 800 {	// Real time clock
+-			compatible = "mpc5200b-rtc","mpc5200-rtc";
++			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
+ 			device_type = "rtc";
+ 			reg = <800 100>;
+ 			interrupts = <1 5 0 1 6 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		mscan at 900 {
+-			device_type = "mscan";
+-			compatible = "mpc5200b-mscan","mpc5200-mscan";
++		can at 900 {
++			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
+ 			cell-index = <0>;
+ 			interrupts = <2 11 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 			reg = <900 80>;
+ 		};
+ 
+-		mscan at 980 {
+-			device_type = "mscan";
+-			compatible = "mpc5200b-mscan","mpc5200-mscan";
++		can at 980 {
++			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
+ 			cell-index = <1>;
+ 			interrupts = <2 12 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+@@ -161,38 +157,36 @@
+ 		};
+ 
+ 		gpio at b00 {
+-			compatible = "mpc5200b-gpio","mpc5200-gpio";
++			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
+ 			reg = <b00 40>;
+ 			interrupts = <1 7 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		gpio-wkup at c00 {
+-			compatible = "mpc5200b-gpio-wkup","mpc5200-gpio-wkup";
++		gpio at c00 {
++			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
+ 			reg = <c00 40>;
+ 			interrupts = <1 8 0 0 3 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+ 		spi at f00 {
+-			device_type = "spi";
+-			compatible = "mpc5200b-spi","mpc5200-spi";
++			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
+ 			reg = <f00 20>;
+ 			interrupts = <2 d 0 2 e 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+ 		usb at 1000 {
+-			device_type = "usb-ohci-be";
+-			compatible = "mpc5200b-ohci","mpc5200-ohci","ohci-be";
++			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
+ 			reg = <1000 ff>;
+ 			interrupts = <2 6 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+-		bestcomm at 1200 {
++		dma-controller at 1200 {
+ 			device_type = "dma-controller";
+-			compatible = "mpc5200b-bestcomm","mpc5200-bestcomm";
++			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
+ 			reg = <1200 80>;
+ 			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
+ 			              3 4 0  3 5 0  3 6 0  3 7 0
+@@ -202,13 +196,13 @@
+ 		};
+ 
+ 		xlb at 1f00 {
+-			compatible = "mpc5200b-xlb","mpc5200-xlb";
++			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
+ 			reg = <1f00 100>;
+ 		};
+ 
+ 		serial at 2000 {		// PSC1
+ 			device_type = "serial";
+-			compatible = "mpc5200b-psc-uart","mpc5200-psc-uart";
++			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ 			port-number = <0>;  // Logical port assignment
+ 			cell-index = <0>;
+ 			reg = <2000 100>;
+@@ -218,8 +212,7 @@
+ 
+ 		// PSC2 in ac97 mode example
+ 		//ac97 at 2200 {		// PSC2
+-		//	device_type = "sound";
+-		//	compatible = "mpc5200b-psc-ac97","mpc5200-psc-ac97";
++		//	compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97";
+ 		//	cell-index = <1>;
+ 		//	reg = <2200 100>;
+ 		//	interrupts = <2 2 0>;
+@@ -228,8 +221,7 @@
+ 
+ 		// PSC3 in CODEC mode example
+ 		//i2s at 2400 {		// PSC3
+-		//	device_type = "sound";
+-		//	compatible = "mpc5200b-psc-i2s"; //not 5200 compatible
++		//	compatible = "fsl,mpc5200b-psc-i2s"; //not 5200 compatible
+ 		//	cell-index = <2>;
+ 		//	reg = <2400 100>;
+ 		//	interrupts = <2 3 0>;
+@@ -239,7 +231,7 @@
+ 		// PSC4 in uart mode example
+ 		//serial at 2600 {		// PSC4
+ 		//	device_type = "serial";
+-		//	compatible = "mpc5200b-psc-uart","mpc5200-psc-uart";
++		//	compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ 		//	cell-index = <3>;
+ 		//	reg = <2600 100>;
+ 		//	interrupts = <2 b 0>;
+@@ -249,7 +241,7 @@
+ 		// PSC5 in uart mode example
+ 		//serial at 2800 {		// PSC5
+ 		//	device_type = "serial";
+-		//	compatible = "mpc5200b-psc-uart","mpc5200-psc-uart";
++		//	compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ 		//	cell-index = <4>;
+ 		//	reg = <2800 100>;
+ 		//	interrupts = <2 c 0>;
+@@ -258,8 +250,7 @@
+ 
+ 		// PSC6 in spi mode example
+ 		//spi at 2c00 {		// PSC6
+-		//	device_type = "spi";
+-		//	compatible = "mpc5200b-psc-spi","mpc5200-psc-spi";
++		//	compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi";
+ 		//	cell-index = <5>;
+ 		//	reg = <2c00 100>;
+ 		//	interrupts = <2 4 0>;
+@@ -268,9 +259,9 @@
+ 
+ 		ethernet at 3000 {
+ 			device_type = "network";
+-			compatible = "mpc5200b-fec","mpc5200-fec";
++			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
+ 			reg = <3000 400>;
+-			mac-address = [ 02 03 04 05 06 07 ]; // Bad!
++			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <2 5 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 			phy-handle = <&phy0>;
+@@ -279,8 +270,7 @@
+ 		mdio at 3000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "mpc5200b-fec-phy";
++			compatible = "fsl,mpc5200b-mdio";
+ 			reg = <3000 400>;	// fec range, since we need to setup fec interrupts
+ 			interrupts = <2 5 0>;	// these are for "mii command finished", not link changes & co.
+ 			interrupt-parent = <&mpc5200_pic>;
+@@ -293,15 +283,16 @@
+ 
+ 		ata at 3a00 {
+ 			device_type = "ata";
+-			compatible = "mpc5200b-ata","mpc5200-ata";
++			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
+ 			reg = <3a00 100>;
+ 			interrupts = <2 7 0>;
+ 			interrupt-parent = <&mpc5200_pic>;
+ 		};
+ 
+ 		i2c at 3d00 {
+-			device_type = "i2c";
+-			compatible = "mpc5200b-i2c","mpc5200-i2c","fsl-i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
+ 			cell-index = <0>;
+ 			reg = <3d00 40>;
+ 			interrupts = <2 f 0>;
+@@ -310,8 +301,9 @@
+ 		};
+ 
+ 		i2c at 3d40 {
+-			device_type = "i2c";
+-			compatible = "mpc5200b-i2c","mpc5200-i2c","fsl-i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
+ 			cell-index = <1>;
+ 			reg = <3d40 40>;
+ 			interrupts = <2 10 0>;
+@@ -319,8 +311,7 @@
+ 			fsl5200-clocking;
+ 		};
+ 		sram at 8000 {
+-			device_type = "sram";
+-			compatible = "mpc5200b-sram","mpc5200-sram","sram";
++			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram","sram";
+ 			reg = <8000 4000>;
+ 		};
+ 	};
+@@ -330,7 +321,7 @@
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+ 		device_type = "pci";
+-		compatible = "mpc5200b-pci","mpc5200-pci";
++		compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
+ 		reg = <f0000d00 100>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <c000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot
+diff --git a/arch/powerpc/boot/dts/makalu.dts b/arch/powerpc/boot/dts/makalu.dts
+new file mode 100644
+index 0000000..bdd70e4
+--- /dev/null
++++ b/arch/powerpc/boot/dts/makalu.dts
+@@ -0,0 +1,347 @@
++/*
++ * Device Tree Source for AMCC Makalu (405EX)
++ *
++ * Copyright 2007 DENX Software Engineering, Stefan Roese <sr at denx.de>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2.  This program is licensed "as is" without
++ * any warranty of any kind, whether express or implied.
++ */
 +
-+static void r2d_chip_select(struct sh_spi_info *spi, int cs, int state)
-+{
-+	BUG_ON(cs != 0);  /* Single Epson RTC-9701JE attached on CS0 */
-+	ctrl_outw(state == BITBANG_CS_ACTIVE, PA_RTCCE);
-+}
++/ {
++	#address-cells = <1>;
++	#size-cells = <1>;
++	model = "amcc,makalu";
++	compatible = "amcc,makalu";
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		ethernet1 = &EMAC1;
++		serial0 = &UART0;
++		serial1 = &UART1;
++	};
 +
-+static struct sh_spi_info spi_info = {
-+	.num_chipselect = 1,
-+	.chip_select = r2d_chip_select,
-+};
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		cpu at 0 {
++			device_type = "cpu";
++			model = "PowerPC,405EX";
++			reg = <0>;
++			clock-frequency = <0>; /* Filled in by U-Boot */
++			timebase-frequency = <0>; /* Filled in by U-Boot */
++			i-cache-line-size = <20>;
++			d-cache-line-size = <20>;
++			i-cache-size = <4000>; /* 16 kB */
++			d-cache-size = <4000>; /* 16 kB */
++			dcr-controller;
++			dcr-access-method = "native";
++		};
++	};
 +
-+static struct resource spi_sh_sci_resources[] = {
-+	{
-+		.start	= 0xffe00000,
-+		.end	= 0xffe0001f,
-+		.flags	= IORESOURCE_MEM,
-+	},
-+};
++	memory {
++		device_type = "memory";
++		reg = <0 0>; /* Filled in by U-Boot */
++	};
 +
-+static struct platform_device spi_sh_sci_device  = {
-+	.name		= "spi_sh_sci",
-+	.id		= -1,
-+	.num_resources	= ARRAY_SIZE(spi_sh_sci_resources),
-+	.resource	= spi_sh_sci_resources,
-+	.dev	= {
-+		.platform_data	= &spi_info,
-+	},
-+};
++	UIC0: interrupt-controller {
++		compatible = "ibm,uic-405ex", "ibm,uic";
++		interrupt-controller;
++		cell-index = <0>;
++		dcr-reg = <0c0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++	};
 +
- static struct resource heartbeat_resources[] = {
- 	[0] = {
- 		.start	= PA_OUTPORT,
-@@ -93,11 +111,11 @@ static struct platform_device heartbeat_device = {
- #ifdef CONFIG_MFD_SM501
- static struct plat_serial8250_port uart_platform_data[] = {
- 	{
--		.membase	= (void __iomem *)VOYAGER_UART_BASE,
--		.mapbase	= VOYAGER_UART_BASE,
-+		.membase	= (void __iomem *)0xb3e30000,
-+		.mapbase	= 0xb3e30000,
- 		.iotype		= UPIO_MEM,
--		.irq		= IRQ_SM501_U0,
--		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
-+		.irq		= IRQ_VOYAGER,
-+		.flags		= UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ,
- 		.regshift	= 2,
- 		.uartclk	= (9600 * 16),
- 	},
-@@ -124,14 +142,67 @@ static struct resource sm501_resources[] = {
- 		.flags	= IORESOURCE_MEM,
- 	},
- 	[2]	= {
--		.start	= IRQ_SM501_CV,
-+		.start	= IRQ_VOYAGER,
- 		.flags	= IORESOURCE_IRQ,
- 	},
- };
- 
-+static struct fb_videomode sm501_default_mode = {
-+	.pixclock	= 35714,
-+	.xres		= 640,
-+	.yres		= 480,
-+	.left_margin	= 105,
-+	.right_margin	= 50,
-+	.upper_margin	= 35,
-+	.lower_margin	= 0,
-+	.hsync_len	= 96,
-+	.vsync_len	= 2,
-+	.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-+};
++	UIC1: interrupt-controller1 {
++		compatible = "ibm,uic-405ex","ibm,uic";
++		interrupt-controller;
++		cell-index = <1>;
++		dcr-reg = <0d0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <1e 4 1f 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
 +
-+static struct sm501_platdata_fbsub sm501_pdata_fbsub_pnl = {
-+	.def_bpp	= 16,
-+	.def_mode	= &sm501_default_mode,
-+	.flags		= SM501FB_FLAG_USE_INIT_MODE |
-+			  SM501FB_FLAG_USE_HWCURSOR |
-+			  SM501FB_FLAG_USE_HWACCEL |
-+			  SM501FB_FLAG_DISABLE_AT_EXIT,
-+};
++	UIC2: interrupt-controller2 {
++		compatible = "ibm,uic-405ex","ibm,uic";
++		interrupt-controller;
++		cell-index = <2>;
++		dcr-reg = <0e0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <1c 4 1d 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
 +
-+static struct sm501_platdata_fbsub sm501_pdata_fbsub_crt = {
-+	.flags		= (SM501FB_FLAG_USE_INIT_MODE |
-+			   SM501FB_FLAG_USE_HWCURSOR |
-+			   SM501FB_FLAG_USE_HWACCEL |
-+			   SM501FB_FLAG_DISABLE_AT_EXIT),
++	plb {
++		compatible = "ibm,plb-405ex", "ibm,plb4";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges;
++		clock-frequency = <0>; /* Filled in by U-Boot */
++
++		SDRAM0: memory-controller {
++			compatible = "ibm,sdram-405ex";
++			dcr-reg = <010 2>;
++		};
 +
-+};
++		MAL0: mcmal {
++			compatible = "ibm,mcmal-405ex", "ibm,mcmal2";
++			dcr-reg = <180 62>;
++			num-tx-chans = <2>;
++			num-rx-chans = <2>;
++			interrupt-parent = <&MAL0>;
++			interrupts = <0 1 2 3 4>;
++			#interrupt-cells = <1>;
++			#address-cells = <0>;
++			#size-cells = <0>;
++			interrupt-map = </*TXEOB*/ 0 &UIC0 a 4
++					/*RXEOB*/ 1 &UIC0 b 4
++					/*SERR*/  2 &UIC1 0 4
++					/*TXDE*/  3 &UIC1 1 4
++					/*RXDE*/  4 &UIC1 2 4>;
++			interrupt-map-mask = <ffffffff>;
++		};
 +
-+static struct sm501_platdata_fb sm501_fb_pdata = {
-+	.fb_route	= SM501_FB_OWN,
-+	.fb_crt		= &sm501_pdata_fbsub_crt,
-+	.fb_pnl		= &sm501_pdata_fbsub_pnl,
-+	.flags		= SM501_FBPD_SWAP_FB_ENDIAN,
-+};
++		POB0: opb {
++			compatible = "ibm,opb-405ex", "ibm,opb";
++			#address-cells = <1>;
++			#size-cells = <1>;
++			ranges = <80000000 80000000 10000000
++				  ef600000 ef600000 a00000
++				  f0000000 f0000000 10000000>;
++			dcr-reg = <0a0 5>;
++			clock-frequency = <0>; /* Filled in by U-Boot */
++
++			EBC0: ebc {
++				compatible = "ibm,ebc-405ex", "ibm,ebc";
++				dcr-reg = <012 2>;
++				#address-cells = <2>;
++				#size-cells = <1>;
++				clock-frequency = <0>; /* Filled in by U-Boot */
++				/* ranges property is supplied by U-Boot */
++				interrupts = <5 1>;
++				interrupt-parent = <&UIC1>;
++
++				nor_flash at 0,0 {
++					compatible = "amd,s29gl512n", "cfi-flash";
++					bank-width = <2>;
++					reg = <0 000000 4000000>;
++					#address-cells = <1>;
++					#size-cells = <1>;
++					partition at 0 {
++						label = "kernel";
++						reg = <0 200000>;
++					};
++					partition at 200000 {
++						label = "root";
++						reg = <200000 200000>;
++					};
++					partition at 400000 {
++						label = "user";
++						reg = <400000 3b60000>;
++					};
++					partition at 3f60000 {
++						label = "env";
++						reg = <3f60000 40000>;
++					};
++					partition at 3fa0000 {
++						label = "u-boot";
++						reg = <3fa0000 60000>;
++					};
++				};
++			};
 +
-+static struct sm501_initdata sm501_initdata = {
-+	.gpio_high	= {
-+		.set	= 0x00001fe0,
-+		.mask	= 0x0,
-+	},
-+	.devices	= SM501_USE_USB_HOST,
-+};
++			UART0: serial at ef600200 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <ef600200 8>;
++				virtual-reg = <ef600200>;
++				clock-frequency = <0>; /* Filled in by U-Boot */
++				current-speed = <0>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <1a 4>;
++			};
 +
-+static struct sm501_platdata sm501_platform_data = {
-+	.init		= &sm501_initdata,
-+	.fb		= &sm501_fb_pdata,
-+};
++			UART1: serial at ef600300 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <ef600300 8>;
++				virtual-reg = <ef600300>;
++				clock-frequency = <0>; /* Filled in by U-Boot */
++				current-speed = <0>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <1 4>;
++			};
 +
- static struct platform_device sm501_device = {
- 	.name		= "sm501",
- 	.id		= -1,
-+	.dev		= {
-+		.platform_data	= &sm501_platform_data,
-+	},
- 	.num_resources	= ARRAY_SIZE(sm501_resources),
- 	.resource	= sm501_resources,
- };
-@@ -145,10 +216,12 @@ static struct platform_device *rts7751r2d_devices[] __initdata = {
- #endif
- 	&cf_ide_device,
- 	&heartbeat_device,
-+	&spi_sh_sci_device,
- };
- 
- static int __init rts7751r2d_devices_setup(void)
- {
-+	spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
- 	return platform_add_devices(rts7751r2d_devices,
- 				    ARRAY_SIZE(rts7751r2d_devices));
- }
-@@ -192,6 +265,7 @@ u8 rts7751r2d_readb(void __iomem *addr)
-  */
- static void __init rts7751r2d_setup(char **cmdline_p)
- {
-+	void __iomem *sm501_reg;
- 	u16 ver = ctrl_inw(PA_VERREG);
- 
- 	printk(KERN_INFO "Renesas Technology Sales RTS7751R2D support.\n");
-@@ -202,7 +276,30 @@ static void __init rts7751r2d_setup(char **cmdline_p)
- 	ctrl_outw(0x0000, PA_OUTPORT);
- 	pm_power_off = rts7751r2d_power_off;
- 
--	voyagergx_serial_init();
-+	/* sm501 dram configuration:
-+	 * ColSizeX = 11 - External Memory Column Size: 256 words.
-+	 * APX = 1 - External Memory Active to Pre-Charge Delay: 7 clocks.
-+	 * RstX = 1 - External Memory Reset: Normal.
-+	 * Rfsh = 1 - Local Memory Refresh to Command Delay: 12 clocks.
-+	 * BwC =  1 - Local Memory Block Write Cycle Time: 2 clocks.
-+	 * BwP =  1 - Local Memory Block Write to Pre-Charge Delay: 1 clock.
-+	 * AP = 1 - Internal Memory Active to Pre-Charge Delay: 7 clocks.
-+	 * Rst = 1 - Internal Memory Reset: Normal.
-+	 * RA = 1 - Internal Memory Remain in Active State: Do not remain.
-+	 */
++			IIC0: i2c at ef600400 {
++				device_type = "i2c";
++				compatible = "ibm,iic-405ex", "ibm,iic";
++				reg = <ef600400 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <2 4>;
++			};
 +
-+	sm501_reg = (void __iomem *)0xb3e00000 + SM501_DRAM_CONTROL;
-+	writel(readl(sm501_reg) | 0x00f107c0, sm501_reg);
++			IIC1: i2c at ef600500 {
++				device_type = "i2c";
++				compatible = "ibm,iic-405ex", "ibm,iic";
++				reg = <ef600500 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <7 4>;
++			};
 +
-+	/*
-+	 * Power Mode Gate - Enable UART0
-+	 */
 +
-+	sm501_reg = (void __iomem *)0xb3e00000 + SM501_POWER_MODE_0_GATE;
-+	writel(readl(sm501_reg) | (1 << SM501_GATE_UART0), sm501_reg);
++			RGMII0: emac-rgmii at ef600b00 {
++				device_type = "rgmii-interface";
++				compatible = "ibm,rgmii-405ex", "ibm,rgmii";
++				reg = <ef600b00 104>;
++				has-mdio;
++			};
 +
-+	sm501_reg = (void __iomem *)0xb3e00000 + SM501_POWER_MODE_1_GATE;
-+	writel(readl(sm501_reg) | (1 << SM501_GATE_UART0), sm501_reg);
- }
- 
- /*
-@@ -215,8 +312,4 @@ static struct sh_machine_vector mv_rts7751r2d __initmv = {
- 	.mv_irq_demux		= rts7751r2d_irq_demux,
- 	.mv_writeb		= rts7751r2d_writeb,
- 	.mv_readb		= rts7751r2d_readb,
--#if defined(CONFIG_MFD_SM501) && defined(CONFIG_USB_OHCI_HCD)
--	.mv_consistent_alloc	= voyagergx_consistent_alloc,
--	.mv_consistent_free	= voyagergx_consistent_free,
--#endif
- };
-diff --git a/arch/sh/boards/renesas/sdk7780/Kconfig b/arch/sh/boards/renesas/sdk7780/Kconfig
-new file mode 100644
-index 0000000..e4f5b69
---- /dev/null
-+++ b/arch/sh/boards/renesas/sdk7780/Kconfig
-@@ -0,0 +1,23 @@
-+if SH_SDK7780
++			EMAC0: ethernet at ef600900 {
++				linux,network-index = <0>;
++				device_type = "network";
++				compatible = "ibm,emac-405ex", "ibm,emac4";
++				interrupt-parent = <&EMAC0>;
++				interrupts = <0 1>;
++				#interrupt-cells = <1>;
++				#address-cells = <0>;
++				#size-cells = <0>;
++				interrupt-map = </*Status*/ 0 &UIC0 18 4
++						/*Wake*/  1 &UIC1 1d 4>;
++				reg = <ef600900 70>;
++				local-mac-address = [000000000000]; /* Filled in by U-Boot */
++				mal-device = <&MAL0>;
++				mal-tx-channel = <0>;
++				mal-rx-channel = <0>;
++				cell-index = <0>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rgmii";
++				phy-map = <0000003f>;	/* Start at 6 */
++				rgmii-device = <&RGMII0>;
++				rgmii-channel = <0>;
++				has-inverted-stacr-oc;
++				has-new-stacr-staopc;
++			};
 +
-+choice
-+	prompt "SDK7780 options"
-+	default SH_SDK7780_BASE
++			EMAC1: ethernet at ef600a00 {
++				linux,network-index = <1>;
++				device_type = "network";
++				compatible = "ibm,emac-405ex", "ibm,emac4";
++				interrupt-parent = <&EMAC1>;
++				interrupts = <0 1>;
++				#interrupt-cells = <1>;
++				#address-cells = <0>;
++				#size-cells = <0>;
++				interrupt-map = </*Status*/ 0 &UIC0 19 4
++						/*Wake*/  1 &UIC1 1f 4>;
++				reg = <ef600a00 70>;
++				local-mac-address = [000000000000]; /* Filled in by U-Boot */
++				mal-device = <&MAL0>;
++				mal-tx-channel = <1>;
++				mal-rx-channel = <1>;
++				cell-index = <1>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rgmii";
++				phy-map = <00000000>;
++				rgmii-device = <&RGMII0>;
++				rgmii-channel = <1>;
++				has-inverted-stacr-oc;
++				has-new-stacr-staopc;
++			};
++		};
 +
-+config SH_SDK7780_STANDALONE
-+	bool "SDK7780 board support"
-+	depends on CPU_SUBTYPE_SH7780
-+	help
-+	  Selecting this option will enable support for the
-+	  standalone version of the SDK7780. If in doubt, say Y.
++		PCIE0: pciex at 0a0000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex";
++			primary;
++			port = <0>; /* port number */
++			reg = <a0000000 20000000	/* Config space access */
++			       ef000000 00001000>;	/* Registers */
++			dcr-reg = <040 020>;
++			sdr-base = <400>;
 +
-+config SH_SDK7780_BASE
-+	bool "SDK7780 with base-board support"
-+	depends on CPU_SUBTYPE_SH7780
-+	help
-+	  Selecting this option will enable support for the expansion
-+	  baseboard devices. If in doubt, say Y.
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 90000000 0 08000000
++				  01000000 0 00000000 e0000000 0 00010000>;
 +
-+endchoice
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 80000000>;
 +
-+endif
-diff --git a/arch/sh/boards/renesas/sdk7780/Makefile b/arch/sh/boards/renesas/sdk7780/Makefile
-new file mode 100644
-index 0000000..3d8f0be
---- /dev/null
-+++ b/arch/sh/boards/renesas/sdk7780/Makefile
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for the SDK7780 specific parts of the kernel
-+#
-+obj-y	 := setup.o irq.o
++			/* This drives busses 0x00 to 0x3f */
++			bus-range = <00 3f>;
 +
-diff --git a/arch/sh/boards/renesas/sdk7780/irq.c b/arch/sh/boards/renesas/sdk7780/irq.c
++			/* Legacy interrupts (note the weird polarity, the bridge seems
++			 * to invert PCIe legacy interrupts).
++			 * We are de-swizzling here because the numbers are actually for
++			 * port of the root complex virtual P2P bridge. But I want
++			 * to avoid putting a node for it in the tree, so the numbers
++			 * below are basically de-swizzled numbers.
++			 * The real slot is on idsel 0, so the swizzling is 1:1
++			 */
++			interrupt-map-mask = <0000 0 0 7>;
++			interrupt-map = <
++				0000 0 0 1 &UIC2 0 4 /* swizzled int A */
++				0000 0 0 2 &UIC2 1 4 /* swizzled int B */
++				0000 0 0 3 &UIC2 2 4 /* swizzled int C */
++				0000 0 0 4 &UIC2 3 4 /* swizzled int D */>;
++		};
++
++		PCIE1: pciex at 0c0000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex";
++			primary;
++			port = <1>; /* port number */
++			reg = <c0000000 20000000	/* Config space access */
++			       ef001000 00001000>;	/* Registers */
++			dcr-reg = <060 020>;
++			sdr-base = <440>;
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 98000000 0 08000000
++				  01000000 0 00000000 e0010000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 80000000>;
++
++			/* This drives busses 0x40 to 0x7f */
++			bus-range = <40 7f>;
++
++			/* Legacy interrupts (note the weird polarity, the bridge seems
++			 * to invert PCIe legacy interrupts).
++			 * We are de-swizzling here because the numbers are actually for
++			 * port of the root complex virtual P2P bridge. But I want
++			 * to avoid putting a node for it in the tree, so the numbers
++			 * below are basically de-swizzled numbers.
++			 * The real slot is on idsel 0, so the swizzling is 1:1
++			 */
++			interrupt-map-mask = <0000 0 0 7>;
++			interrupt-map = <
++				0000 0 0 1 &UIC2 b 4 /* swizzled int A */
++				0000 0 0 2 &UIC2 c 4 /* swizzled int B */
++				0000 0 0 3 &UIC2 d 4 /* swizzled int C */
++				0000 0 0 4 &UIC2 e 4 /* swizzled int D */>;
++		};
++	};
++};
+diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts
 new file mode 100644
-index 0000000..87cdc57
+index 0000000..76951ab
 --- /dev/null
-+++ b/arch/sh/boards/renesas/sdk7780/irq.c
-@@ -0,0 +1,46 @@
++++ b/arch/powerpc/boot/dts/motionpro.dts
+@@ -0,0 +1,301 @@
 +/*
-+ * linux/arch/sh/boards/renesas/sdk7780/irq.c
++ * Motion-PRO board Device Tree Source
 + *
-+ * Renesas Technology Europe SDK7780 Support.
-+ *
-+ * Copyright (C) 2008  Nicholas Beck <nbeck at mpc-data.co.uk>
++ * Copyright (C) 2007 Semihalf
++ * Marian Balakowicz <m8 at semihalf.com>
 + *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
 + */
-+#include <linux/init.h>
-+#include <linux/irq.h>
-+#include <linux/io.h>
-+#include <asm/sdk7780.h>
 +
-+enum {
-+	UNUSED = 0,
-+	/* board specific interrupt sources */
-+	SMC91C111,	/* Ethernet controller */
-+};
++/ {
++	model = "promess,motionpro";
++	compatible = "promess,motionpro";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,5200 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <20>;
++			i-cache-line-size = <20>;
++			d-cache-size = <4000>;		// L1, 16K
++			i-cache-size = <4000>;		// L1, 16K
++			timebase-frequency = <0>;	// from bootloader
++			bus-frequency = <0>;		// from bootloader
++			clock-frequency = <0>;		// from bootloader
++		};
++	};
 +
-+static struct intc_vect fpga_vectors[] __initdata = {
-+	INTC_IRQ(SMC91C111, IRQ_ETHERNET),
-+};
++	memory {
++		device_type = "memory";
++		reg = <00000000 04000000>;	// 64MB
++	};
 +
-+static struct intc_mask_reg fpga_mask_registers[] __initdata = {
-+	{ 0, FPGA_IRQ0MR, 16,
-+	  { 0, 0, 0, 0, 0, 0, 0, 0,
-+	    0, 0, 0, SMC91C111, 0, 0, 0, 0 } },
-+};
++	soc5200 at f0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc5200b-immr";
++		ranges = <0 f0000000 0000c000>;
++		reg = <f0000000 00000100>;
++		bus-frequency = <0>;		// from bootloader
++		system-frequency = <0>;		// from bootloader
++
++		cdm at 200 {
++			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
++			reg = <200 38>;
++		};
 +
-+static DECLARE_INTC_DESC(fpga_intc_desc, "sdk7780-irq", fpga_vectors,
-+			 NULL, fpga_mask_registers, NULL, NULL);
++		mpc5200_pic: interrupt-controller at 500 {
++			// 5200 interrupts are encoded into two levels;
++			interrupt-controller;
++			#interrupt-cells = <3>;
++			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
++			reg = <500 80>;
++		};
 +
-+void __init init_sdk7780_IRQ(void)
-+{
-+	printk(KERN_INFO "Using SDK7780 interrupt controller.\n");
++		timer at 600 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <600 10>;
++			interrupts = <1 9 0>;
++			interrupt-parent = <&mpc5200_pic>;
++			fsl,has-wdt;
++		};
 +
-+	ctrl_outw(0xFFFF, FPGA_IRQ0MR);
-+	/* Setup IRL 0-3 */
-+	ctrl_outw(0x0003, FPGA_IMSR);
-+	plat_irq_setup_pins(IRQ_MODE_IRL3210);
++		timer at 610 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <610 10>;
++			interrupts = <1 a 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+	register_intc_controller(&fpga_intc_desc);
-+}
-diff --git a/arch/sh/boards/renesas/sdk7780/setup.c b/arch/sh/boards/renesas/sdk7780/setup.c
-new file mode 100644
-index 0000000..5df32f2
---- /dev/null
-+++ b/arch/sh/boards/renesas/sdk7780/setup.c
-@@ -0,0 +1,109 @@
-+/*
-+ * arch/sh/boards/renesas/sdk7780/setup.c
-+ *
-+ * Renesas Solutions SH7780 SDK Support
-+ * Copyright (C) 2008 Nicholas Beck <nbeck at mpc-data.co.uk>
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/init.h>
-+#include <linux/types.h>
-+#include <linux/platform_device.h>
-+#include <linux/pata_platform.h>
-+#include <asm/machvec.h>
-+#include <asm/sdk7780.h>
-+#include <asm/heartbeat.h>
-+#include <asm/io.h>
-+#include <asm/addrspace.h>
++		timer at 620 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <620 10>;
++			interrupts = <1 b 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+#define GPIO_PECR        0xFFEA0008
++		timer at 630 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <630 10>;
++			interrupts = <1 c 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+//* Heartbeat */
-+static struct heartbeat_data heartbeat_data = {
-+	.regsize = 16,
-+};
++		timer at 640 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <640 10>;
++			interrupts = <1 d 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+static struct resource heartbeat_resources[] = {
-+	[0] = {
-+		.start  = PA_LED,
-+		.end    = PA_LED,
-+		.flags  = IORESOURCE_MEM,
-+	},
-+};
++		timer at 650 {	// General Purpose Timer
++			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
++			reg = <650 10>;
++			interrupts = <1 e 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+static struct platform_device heartbeat_device = {
-+	.name           = "heartbeat",
-+	.id             = -1,
-+	.dev = {
-+		.platform_data = &heartbeat_data,
-+	},
-+	.num_resources  = ARRAY_SIZE(heartbeat_resources),
-+	.resource       = heartbeat_resources,
-+};
++		motionpro-led at 660 {	// Motion-PRO status LED
++			compatible = "promess,motionpro-led";
++			label = "motionpro-statusled";
++			reg = <660 10>;
++			interrupts = <1 f 0>;
++			interrupt-parent = <&mpc5200_pic>;
++			blink-delay = <64>; // 100 msec
++		};
 +
-+/* SMC91x */
-+static struct resource smc91x_eth_resources[] = {
-+	[0] = {
-+		.name   = "smc91x-regs" ,
-+		.start  = PA_LAN + 0x300,
-+		.end    = PA_LAN + 0x300 + 0x10 ,
-+		.flags  = IORESOURCE_MEM,
-+	},
-+	[1] = {
-+		.start  = IRQ_ETHERNET,
-+		.end    = IRQ_ETHERNET,
-+		.flags  = IORESOURCE_IRQ,
-+	},
-+};
++		motionpro-led at 670 {	// Motion-PRO ready LED
++			compatible = "promess,motionpro-led";
++			label = "motionpro-readyled";
++			reg = <670 10>;
++			interrupts = <1 10 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+static struct platform_device smc91x_eth_device = {
-+	.name           = "smc91x",
-+	.id             = 0,
-+	.dev = {
-+		.dma_mask               = NULL,         /* don't use dma */
-+		.coherent_dma_mask      = 0xffffffff,
-+	},
-+	.num_resources  = ARRAY_SIZE(smc91x_eth_resources),
-+	.resource       = smc91x_eth_resources,
-+};
++		rtc at 800 {	// Real time clock
++			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
++			reg = <800 100>;
++			interrupts = <1 5 0 1 6 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+static struct platform_device *sdk7780_devices[] __initdata = {
-+	&heartbeat_device,
-+	&smc91x_eth_device,
-+};
++		mscan at 980 {
++			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
++			interrupts = <2 12 0>;
++			interrupt-parent = <&mpc5200_pic>;
++			reg = <980 80>;
++		};
 +
-+static int __init sdk7780_devices_setup(void)
-+{
-+	return platform_add_devices(sdk7780_devices,
-+		ARRAY_SIZE(sdk7780_devices));
-+}
-+device_initcall(sdk7780_devices_setup);
++		gpio at b00 {
++			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
++			reg = <b00 40>;
++			interrupts = <1 7 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+static void __init sdk7780_setup(char **cmdline_p)
-+{
-+	u16 ver = ctrl_inw(FPGA_FPVERR);
-+	u16 dateStamp = ctrl_inw(FPGA_FPDATER);
++		gpio at c00 {
++			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
++			reg = <c00 40>;
++			interrupts = <1 8 0 0 3 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+	printk(KERN_INFO "Renesas Technology Europe SDK7780 support.\n");
-+	printk(KERN_INFO "Board version: %d (revision %d), "
-+			 "FPGA version: %d (revision %d), datestamp : %d\n",
-+			 (ver >> 12) & 0xf, (ver >> 8) & 0xf,
-+			 (ver >>  4) & 0xf, ver & 0xf,
-+			 dateStamp);
 +
-+	/* Setup pin mux'ing for PCIC */
-+	ctrl_outw(0x0000, GPIO_PECR);
-+}
++		spi at f00 {
++			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
++			reg = <f00 20>;
++			interrupts = <2 d 0 2 e 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
 +
-+/*
-+ * The Machine Vector
-+ */
-+static struct sh_machine_vector mv_se7780 __initmv = {
-+	.mv_name        = "Renesas SDK7780-R3" ,
-+	.mv_setup		= sdk7780_setup,
-+	.mv_nr_irqs		= 111,
-+	.mv_init_irq	= init_sdk7780_IRQ,
++		usb at 1000 {
++			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
++			reg = <1000 ff>;
++			interrupts = <2 6 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		dma-controller at 1200 {
++			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
++			reg = <1200 80>;
++			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
++			              3 4 0  3 5 0  3 6 0  3 7 0
++			              3 8 0  3 9 0  3 a 0  3 b 0
++			              3 c 0  3 d 0  3 e 0  3 f 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		xlb at 1f00 {
++			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
++			reg = <1f00 100>;
++		};
++
++		serial at 2000 {		// PSC1
++			device_type = "serial";
++			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
++			port-number = <0>;  // Logical port assignment
++			reg = <2000 100>;
++			interrupts = <2 1 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		// PSC2 in spi master mode 
++		spi at 2200 {		// PSC2
++			compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi";
++			cell-index = <1>;
++			reg = <2200 100>;
++			interrupts = <2 2 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		// PSC5 in uart mode
++		serial at 2800 {		// PSC5
++			device_type = "serial";
++			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
++			port-number = <4>;  // Logical port assignment
++			reg = <2800 100>;
++			interrupts = <2 c 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		ethernet at 3000 {
++			device_type = "network";
++			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
++			reg = <3000 800>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <2 5 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		ata at 3a00 {
++			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
++			reg = <3a00 100>;
++			interrupts = <2 7 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		i2c at 3d40 {
++			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
++			reg = <3d40 40>;
++			interrupts = <2 10 0>;
++			interrupt-parent = <&mpc5200_pic>;
++			fsl5200-clocking;
++		};
++
++		sram at 8000 {
++			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
++			reg = <8000 4000>;
++		};
++	};
++
++	lpb {
++		compatible = "fsl,lpb";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		ranges = <1 0 50000000 00010000
++			  2 0 50010000 00010000
++			  3 0 50020000 00010000>;
++
++		// 8-bit DualPort SRAM on LocalPlus Bus CS1
++		kollmorgen at 1,0 {
++			compatible = "promess,motionpro-kollmorgen";
++			reg = <1 0 10000>;
++			interrupts = <1 1 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		// 8-bit board CPLD on LocalPlus Bus CS2
++		cpld at 2,0 {
++			compatible = "promess,motionpro-cpld";
++			reg = <2 0 10000>;
++		};
++
++		// 8-bit custom Anybus Module on LocalPlus Bus CS3
++		anybus at 3,0 {
++			compatible = "promess,motionpro-anybus";
++			reg = <3 0 10000>;
++		};
++		pro_module_general at 3,0 {
++			compatible = "promess,pro_module_general";
++			reg = <3 0 3>;
++		};
++		pro_module_dio at 3,800 {
++			compatible = "promess,pro_module_dio";
++			reg = <3 800 2>;
++		};
++	};
++
++	pci at f0000d00 {
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		device_type = "pci";
++		compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
++		reg = <f0000d00 100>;
++		interrupt-map-mask = <f800 0 0 7>;
++		interrupt-map = <c000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot
++				 c000 0 0 2 &mpc5200_pic 1 1 3
++				 c000 0 0 3 &mpc5200_pic 1 2 3
++				 c000 0 0 4 &mpc5200_pic 1 3 3
++
++				 c800 0 0 1 &mpc5200_pic 1 1 3 // 2nd slot
++				 c800 0 0 2 &mpc5200_pic 1 2 3
++				 c800 0 0 3 &mpc5200_pic 1 3 3
++				 c800 0 0 4 &mpc5200_pic 0 0 3>;
++		clock-frequency = <0>; // From boot loader
++		interrupts = <2 8 0 2 9 0 2 a 0>;
++		interrupt-parent = <&mpc5200_pic>;
++		bus-range = <0 0>;
++		ranges = <42000000 0 80000000 80000000 0 20000000
++			  02000000 0 a0000000 a0000000 0 10000000
++			  01000000 0 00000000 b0000000 0 01000000>;
++	};
 +};
+diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
+index 9e7eba9..2d6653f 100644
+--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
++++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
+@@ -9,23 +9,33 @@
+  * option) any later version.
+  */
+ 
++/dts-v1/;
++
+ / {
+ 	model = "MPC8313ERDB";
+ 	compatible = "MPC8313ERDB", "MPC831xRDB", "MPC83xxRDB";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
-index 1b0f5be..59f552c 100644
---- a/arch/sh/boot/Makefile
-+++ b/arch/sh/boot/Makefile
-@@ -35,17 +35,28 @@ $(obj)/compressed/vmlinux: FORCE
- KERNEL_LOAD	:= $(shell /bin/bash -c 'printf "0x%8x" \
- 		     $$[$(CONFIG_PAGE_OFFSET)  + \
- 			$(CONFIG_MEMORY_START) + \
-+			$(CONFIG_ZERO_PAGE_OFFSET)]')
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+ 		PowerPC,8313 at 0 {
+ 			device_type = "cpu";
+-			reg = <0>;
+-			d-cache-line-size = <20>;	// 32 bytes
+-			i-cache-line-size = <20>;	// 32 bytes
+-			d-cache-size = <4000>;		// L1, 16K
+-			i-cache-size = <4000>;		// L1, 16K
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <16384>;
++			i-cache-size = <16384>;
+ 			timebase-frequency = <0>;	// from bootloader
+ 			bus-frequency = <0>;		// from bootloader
+ 			clock-frequency = <0>;		// from bootloader
+@@ -34,134 +44,188 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 08000000>;	// 128MB at 0
++		reg = <0x00000000 0x08000000>;	// 128MB at 0
++	};
 +
-+KERNEL_ENTRY	:= $(shell /bin/bash -c 'printf "0x%8x" \
-+		     $$[$(CONFIG_PAGE_OFFSET)  + \
-+			$(CONFIG_MEMORY_START) + \
- 			$(CONFIG_ZERO_PAGE_OFFSET)+0x1000]')
++	localbus at e0005000 {
++		#address-cells = <2>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8313-elbc", "fsl,elbc", "simple-bus";
++		reg = <0xe0005000 0x1000>;
++		interrupts = <77 0x8>;
++		interrupt-parent = <&ipic>;
++
++		// CS0 and CS1 are swapped when
++		// booting from nand, but the
++		// addresses are the same.
++		ranges = <0x0 0x0 0xfe000000 0x00800000
++		          0x1 0x0 0xe2800000 0x00008000
++		          0x2 0x0 0xf0000000 0x00020000
++		          0x3 0x0 0xfa000000 0x00008000>;
++
++		flash at 0,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "cfi-flash";
++			reg = <0x0 0x0 0x800000>;
++			bank-width = <2>;
++			device-width = <1>;
++		};
++
++		nand at 1,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8313-fcm-nand",
++			             "fsl,elbc-fcm-nand";
++			reg = <0x1 0x0 0x2000>;
++
++			u-boot at 0 {
++				reg = <0x0 0x100000>;
++				read-only;
++			};
++
++			kernel at 100000 {
++				reg = <0x100000 0x300000>;
++			};
++
++			fs at 400000 {
++				reg = <0x400000 0x1c00000>;
++			};
++		};
+ 	};
  
- quiet_cmd_uimage = UIMAGE  $@
-       cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sh -O linux -T kernel \
--		   -C none -a $(KERNEL_LOAD) -e $(KERNEL_LOAD) \
-+		   -C none -a $(KERNEL_LOAD) -e $(KERNEL_ENTRY) \
- 		   -n 'Linux-$(KERNELRELEASE)' -d $< $@
+ 	soc8313 at e0000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+-		ranges = <0 e0000000 00100000>;
+-		reg = <e0000000 00000200>;
++		compatible = "simple-bus";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
+ 		bus-frequency = <0>;
+ 
+ 		wdt at 200 {
+ 			device_type = "watchdog";
+ 			compatible = "mpc83xx_wdt";
+-			reg = <200 100>;
++			reg = <0x200 0x100>;
+ 		};
  
--$(obj)/uImage: $(obj)/zImage FORCE
-+$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
- 	$(call if_changed,uimage)
- 	@echo '  Image $@ is ready'
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+-			reg = <3000 100>;
+-			interrupts = <e 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 		};
  
-+$(obj)/vmlinux.bin: vmlinux FORCE
-+	$(call if_changed,objcopy)
-+
-+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
-+	$(call if_changed,gzip)
-+
- OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
- $(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
- 	$(call if_changed,objcopy)
-@@ -54,4 +65,5 @@ OBJCOPYFLAGS_uImage.srec := -I binary -O srec
- $(obj)/uImage.srec: $(obj)/uImage
- 	$(call if_changed,objcopy)
+ 		i2c at 3100 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
+ 			compatible = "fsl-i2c";
+-			reg = <3100 100>;
+-			interrupts = <f 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 		};
  
--clean-files	+= uImage uImage.srec vmlinux.srec
-+clean-files	+= uImage uImage.srec vmlinux.srec \
-+		   vmlinux.bin vmlinux.bin.gz
-diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
-index 906a13f..efb01dc 100644
---- a/arch/sh/boot/compressed/Makefile
-+++ b/arch/sh/boot/compressed/Makefile
-@@ -1,43 +1,5 @@
--#
--# linux/arch/sh/boot/compressed/Makefile
--#
--# create a compressed vmlinux image from the original vmlinux
--#
--
--targets		:= vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
--EXTRA_AFLAGS	:= -traditional
--
--OBJECTS = $(obj)/head.o $(obj)/misc.o
--
--ifdef CONFIG_SH_STANDARD_BIOS
--OBJECTS += $(obj)/../../kernel/sh_bios.o
-+ifeq ($(CONFIG_SUPERH32),y)
-+include ${srctree}/arch/sh/boot/compressed/Makefile_32
-+else
-+include ${srctree}/arch/sh/boot/compressed/Makefile_64
- endif
--
--#
--# IMAGE_OFFSET is the load offset of the compression loader
--#
--IMAGE_OFFSET	:= $(shell /bin/bash -c 'printf "0x%08x" \
--		     $$[$(CONFIG_PAGE_OFFSET)  + \
--			$(CONFIG_MEMORY_START) + \
--			$(CONFIG_BOOT_LINK_OFFSET)]')
--
--LIBGCC	:= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
--
--LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds
--
--
--$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
--	$(call if_changed,ld)
--	@:
--
--$(obj)/vmlinux.bin: vmlinux FORCE
--	$(call if_changed,objcopy)
--
--$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
--	$(call if_changed,gzip)
--
--LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh-linux -T
--OBJCOPYFLAGS += -R .empty_zero_page
--
--$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
--	$(call if_changed,ld)
-diff --git a/arch/sh/boot/compressed/Makefile_32 b/arch/sh/boot/compressed/Makefile_32
+ 		spi at 7000 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <7000 1000>;
+-			interrupts = <10 8>;
+-			interrupt-parent = < &ipic >;
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
+ 			mode = "cpu";
+ 		};
+ 
+ 		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
+ 		usb at 23000 {
+-			device_type = "usb";
+ 			compatible = "fsl-usb2-dr";
+-			reg = <23000 1000>;
++			reg = <0x23000 0x1000>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			interrupt-parent = < &ipic >;
+-			interrupts = <26 8>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
+ 			phy_type = "utmi_wide";
+ 		};
+ 
+ 		mdio at 24520 {
+-			device_type = "mdio";
+-			compatible = "gianfar";
+-			reg = <24520 20>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
+ 			phy1: ethernet-phy at 1 {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <13 8>;
+-				reg = <1>;
++				interrupt-parent = <&ipic>;
++				interrupts = <19 0x8>;
++				reg = <0x1>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 			phy4: ethernet-phy at 4 {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <14 8>;
+-				reg = <4>;
++				interrupt-parent = <&ipic>;
++				interrupts = <20 0x8>;
++				reg = <0x4>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+-			reg = <24000 1000>;
++			reg = <0x24000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <25 8 24 8 23 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <37 0x8 36 0x8 35 0x8>;
++			interrupt-parent = <&ipic>;
+ 			phy-handle = < &phy1 >;
+ 		};
+ 
+-		ethernet at 25000 {
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+-			reg = <25000 1000>;
++			reg = <0x25000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <22 8 21 8 20 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <34 0x8 33 0x8 32 0x8>;
++			interrupt-parent = <&ipic>;
+ 			phy-handle = < &phy4 >;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
++			reg = <0x4500 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <9 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
++			reg = <0x4600 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <a 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+ 		crypto at 30000 {
+ 			device_type = "crypto";
+ 			model = "SEC2";
+ 			compatible = "talitos";
+-			reg = <30000 7000>;
+-			interrupts = <b 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x30000 0x7000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
+ 			/* Rev. 2.2 */
+ 			num-channels = <1>;
+-			channel-fifo-len = <18>;
+-			exec-units-mask = <0000004c>;
+-			descriptor-types-mask = <0122003f>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x0000004c>;
++			descriptor-types-mask = <0x0122003f>;
+ 		};
+ 
+ 		/* IPIC
+@@ -174,37 +238,38 @@
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <700 100>;
++			reg = <0x700 0x100>;
+ 			device_type = "ipic";
+ 		};
+ 	};
+ 
+-	pci at e0008500 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci0: pci at e0008500 {
++		cell-index = <1>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 
+ 				/* IDSEL 0x0E -mini PCI */
+-				 7000 0 0 1 &ipic 12 8
+-				 7000 0 0 2 &ipic 12 8
+-				 7000 0 0 3 &ipic 12 8
+-				 7000 0 0 4 &ipic 12 8
++				 0x7000 0x0 0x0 0x1 &ipic 18 0x8
++				 0x7000 0x0 0x0 0x2 &ipic 18 0x8
++				 0x7000 0x0 0x0 0x3 &ipic 18 0x8
++				 0x7000 0x0 0x0 0x4 &ipic 18 0x8
+ 
+ 				/* IDSEL 0x0F - PCI slot */
+-				 7800 0 0 1 &ipic 11 8
+-				 7800 0 0 2 &ipic 12 8
+-				 7800 0 0 3 &ipic 11 8
+-				 7800 0 0 4 &ipic 12 8>;
+-		interrupt-parent = < &ipic >;
+-		interrupts = <42 8>;
+-		bus-range = <0 0>;
+-		ranges = <02000000 0 90000000 90000000 0 10000000
+-			  42000000 0 80000000 80000000 0 10000000
+-			  01000000 0 00000000 e2000000 0 00100000>;
+-		clock-frequency = <3f940aa>;
++				 0x7800 0x0 0x0 0x1 &ipic 17 0x8
++				 0x7800 0x0 0x0 0x2 &ipic 18 0x8
++				 0x7800 0x0 0x0 0x3 &ipic 17 0x8
++				 0x7800 0x0 0x0 0x4 &ipic 18 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++			  0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++		clock-frequency = <66666666>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008500 100>;
++		reg = <0xe0008500 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
 new file mode 100644
-index 0000000..6ac8d4a
+index 0000000..b582032
 --- /dev/null
-+++ b/arch/sh/boot/compressed/Makefile_32
-@@ -0,0 +1,43 @@
-+#
-+# linux/arch/sh/boot/compressed/Makefile
-+#
-+# create a compressed vmlinux image from the original vmlinux
-+#
++++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
+@@ -0,0 +1,287 @@
++/*
++ * MPC8315E RDB Device Tree Source
++ *
++ * Copyright 2007 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
 +
-+targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
-+		   head_32.o misc_32.o piggy.o
-+EXTRA_AFLAGS	:= -traditional
++/dts-v1/;
 +
-+OBJECTS = $(obj)/head_32.o $(obj)/misc_32.o
++/ {
++	compatible = "fsl,mpc8315erdb";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+ifdef CONFIG_SH_STANDARD_BIOS
-+OBJECTS += $(obj)/../../kernel/sh_bios.o
-+endif
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8315 at 0 {
++			device_type = "cpu";
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <16384>;
++			i-cache-size = <16384>;
++			timebase-frequency = <0>;	// from bootloader
++			bus-frequency = <0>;		// from bootloader
++			clock-frequency = <0>;		// from bootloader
++		};
++	};
 +
-+#
-+# IMAGE_OFFSET is the load offset of the compression loader
-+#
-+IMAGE_OFFSET	:= $(shell /bin/bash -c 'printf "0x%08x" \
-+		     $$[$(CONFIG_PAGE_OFFSET)  + \
-+			$(CONFIG_MEMORY_START) + \
-+			$(CONFIG_BOOT_LINK_OFFSET)]')
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x08000000>;	// 128MB at 0
++	};
 +
-+LIBGCC	:= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
++	localbus at e0005000 {
++		#address-cells = <2>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8315-elbc", "fsl,elbc", "simple-bus";
++		reg = <0xe0005000 0x1000>;
++		interrupts = <77 0x8>;
++		interrupt-parent = <&ipic>;
++
++		// CS0 and CS1 are swapped when
++		// booting from nand, but the
++		// addresses are the same.
++		ranges = <0x0 0x0 0xfe000000 0x00800000
++		          0x1 0x0 0xe0600000 0x00002000
++		          0x2 0x0 0xf0000000 0x00020000
++		          0x3 0x0 0xfa000000 0x00008000>;
++
++		flash at 0,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "cfi-flash";
++			reg = <0x0 0x0 0x800000>;
++			bank-width = <2>;
++			device-width = <1>;
++		};
 +
-+LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds
++		nand at 1,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8315-fcm-nand",
++			             "fsl,elbc-fcm-nand";
++			reg = <0x1 0x0 0x2000>;
++
++			u-boot at 0 {
++				reg = <0x0 0x100000>;
++				read-only;
++			};
 +
-+$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
-+	$(call if_changed,ld)
-+	@:
++			kernel at 100000 {
++				reg = <0x100000 0x300000>;
++			};
++			fs at 400000 {
++				reg = <0x400000 0x1c00000>;
++			};
++		};
++	};
 +
-+$(obj)/vmlinux.bin: vmlinux FORCE
-+	$(call if_changed,objcopy)
++	immr at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		compatible = "simple-bus";
++		ranges = <0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <0>;
++
++		wdt at 200 {
++			device_type = "watchdog";
++			compatible = "mpc83xx_wdt";
++			reg = <0x200 0x100>;
++		};
 +
-+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
-+	$(call if_changed,gzip)
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++			rtc at 68 {
++				device_type = "rtc";
++				compatible = "dallas,ds1339";
++				reg = <0x68>;
++			};
++		};
 +
-+LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh-linux -T
-+OBJCOPYFLAGS += -R .empty_zero_page
++		spi at 7000 {
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
++			mode = "cpu";
++		};
 +
-+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
-+	$(call if_changed,ld)
-diff --git a/arch/sh/boot/compressed/Makefile_64 b/arch/sh/boot/compressed/Makefile_64
-new file mode 100644
-index 0000000..4334f2b
---- /dev/null
-+++ b/arch/sh/boot/compressed/Makefile_64
-@@ -0,0 +1,45 @@
-+#
-+# arch/sh/boot/compressed/Makefile_64
-+#
-+# create a compressed vmlinux image from the original vmlinux
-+#
-+# Copyright (C) 2002 Stuart Menefy
-+# Copyright (C) 2004 Paul Mundt
-+#
-+# This file is subject to the terms and conditions of the GNU General Public
-+# License.  See the file "COPYING" in the main directory of this archive
-+# for more details.
-+#
++		usb at 23000 {
++			compatible = "fsl-usb2-dr";
++			reg = <0x23000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
++			phy_type = "utmi";
++		};
 +
-+targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
-+		   head_64.o misc_64.o cache.o piggy.o
-+EXTRA_AFLAGS	:= -traditional
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++			phy0: ethernet-phy at 0 {
++				interrupt-parent = <&ipic>;
++				interrupts = <20 0x8>;
++				reg = <0x0>;
++				device_type = "ethernet-phy";
++			};
++			phy1: ethernet-phy at 1 {
++				interrupt-parent = <&ipic>;
++				interrupts = <19 0x8>;
++				reg = <0x1>;
++				device_type = "ethernet-phy";
++			};
++		};
 +
-+OBJECTS		:= $(obj)/vmlinux_64.lds $(obj)/head_64.o $(obj)/misc_64.o \
-+		   $(obj)/cache.o
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			interrupt-parent = <&ipic>;
++			phy-handle = < &phy0 >;
++		};
 +
-+#
-+# ZIMAGE_OFFSET is the load offset of the compression loader
-+# (4M for the kernel plus 64K for this loader)
-+#
-+ZIMAGE_OFFSET	:= $(shell /bin/bash -c 'printf "0x%08x" \
-+		     $$[$(CONFIG_PAGE_OFFSET)+0x400000+0x10000]')
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			interrupt-parent = <&ipic>;
++			phy-handle = < &phy1 >;
++		};
 +
-+LDFLAGS_vmlinux := -Ttext $(ZIMAGE_OFFSET) -e startup \
-+		    -T $(obj)/../../kernel/vmlinux.lds
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;
++			clock-frequency = <0>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
-+	$(call if_changed,ld)
-+	@:
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;
++			clock-frequency = <0>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+$(obj)/vmlinux.bin: vmlinux FORCE
-+	$(call if_changed,objcopy)
++		crypto at 30000 {
++			model = "SEC3";
++			device_type = "crypto";
++			compatible = "talitos";
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
++			/* Rev. 3.0 geometry */
++			num-channels = <4>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x000001fe>;
++			descriptor-types-mask = <0x03ab0ebf>;
++		};
 +
-+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
-+	$(call if_changed,gzip)
++		sata at 18000 {
++			compatible = "fsl,mpc8315-sata", "fsl,pq-sata";
++			reg = <0x18000 0x1000>;
++			cell-index = <1>;
++			interrupts = <44 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh64-linux -T
-+OBJCOPYFLAGS += -R .empty_zero_page
++		sata at 19000 {
++			compatible = "fsl,mpc8315-sata", "fsl,pq-sata";
++			reg = <0x19000 0x1000>;
++			cell-index = <2>;
++			interrupts = <45 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
-+	$(call if_changed,ld)
-diff --git a/arch/sh/boot/compressed/cache.c b/arch/sh/boot/compressed/cache.c
-new file mode 100644
-index 0000000..e27fc74
---- /dev/null
-+++ b/arch/sh/boot/compressed/cache.c
-@@ -0,0 +1,12 @@
-+int cache_control(unsigned int command)
-+{
-+	volatile unsigned int *p = (volatile unsigned int *) 0x80000000;
-+	int i;
++		/* IPIC
++		 * interrupts cell = <intr #, sense>
++		 * sense values match linux IORESOURCE_IRQ_* defines:
++		 * sense == 8: Level, low assertion
++		 * sense == 2: Edge, high-to-low change
++		 */
++		ipic: interrupt-controller at 700 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x700 0x100>;
++			device_type = "ipic";
++		};
++	};
 +
-+	for (i = 0; i < (32 * 1024); i += 32) {
-+		(void)*p;
-+		p += (32 / sizeof (int));
-+	}
++	pci0: pci at e0008500 {
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++				/* IDSEL 0x0E -mini PCI */
++				 0x7000 0x0 0x0 0x1 &ipic 18 0x8
++				 0x7000 0x0 0x0 0x2 &ipic 18 0x8
++				 0x7000 0x0 0x0 0x3 &ipic 18 0x8
++				 0x7000 0x0 0x0 0x4 &ipic 18 0x8
++
++				/* IDSEL 0x0F -mini PCI */
++				 0x7800 0x0 0x0 0x1 &ipic 17 0x8
++				 0x7800 0x0 0x0 0x2 &ipic 17 0x8
++				 0x7800 0x0 0x0 0x3 &ipic 17 0x8
++				 0x7800 0x0 0x0 0x4 &ipic 17 0x8
++
++				/* IDSEL 0x10 - PCI slot */
++				 0x8000 0x0 0x0 0x1 &ipic 48 0x8
++				 0x8000 0x0 0x0 0x2 &ipic 17 0x8
++				 0x8000 0x0 0x0 0x3 &ipic 48 0x8
++				 0x8000 0x0 0x0 0x4 &ipic 17 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x02000000 0 0x90000000 0x90000000 0 0x10000000
++			  0x42000000 0 0x80000000 0x80000000 0 0x10000000
++			  0x01000000 0 0x00000000 0xe0300000 0 0x00100000>;
++		clock-frequency = <66666666>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008500 0x100>;
++		compatible = "fsl,mpc8349-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
+index c64f303..9bb4083 100644
+--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
++++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
+@@ -7,25 +7,47 @@
+  * under  the terms of  the GNU General  Public License as published by the
+  * Free Software Foundation;  either version 2 of the  License, or (at your
+  * option) any later version.
++
++ * To enable external serial I/O on a Freescale MPC 8323 SYS/MDS board, do
++ * this:
++ *
++ * 1) On chip U61, lift (disconnect) pins 21 (TXD) and 22 (RXD) from the board.
++ * 2) Solder a wire from U61-21 to P19A-23.  P19 is a grid of pins on the board
++ *    next to the serial ports.
++ * 3) Solder a wire from U61-22 to P19K-22.
++ *
++ * Note that there's a typo in the schematic.  The board labels the last column
++ * of pins "P19K", but in the schematic, that column is called "P19J".  So if
++ * you're going by the schematic, the pin is called "P19J-K22".
+  */
+ 
++/dts-v1/;
++
+ / {
+ 	model = "MPC8323EMDS";
+ 	compatible = "MPC8323EMDS", "MPC832xMDS", "MPC83xxMDS";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+	return 0;
-+}
-diff --git a/arch/sh/boot/compressed/head.S b/arch/sh/boot/compressed/head.S
-deleted file mode 100644
-index a8399b0..0000000
---- a/arch/sh/boot/compressed/head.S
-+++ /dev/null
-@@ -1,120 +0,0 @@
--/*
-- *  linux/arch/sh/boot/compressed/head.S
-- *
-- *  Copyright (C) 1999 Stuart Menefy
-- *  Copyright (C) 2003 SUGIOKA Toshinobu
-- */
--
--.text
--
--#include <linux/linkage.h>
--#include <asm/page.h>
--
--	.global	startup
--startup:
--	/* Load initial status register */
--	mov.l   init_sr, r1
--	ldc     r1, sr
--
--	/* Move myself to proper location if necessary */
--	mova	1f, r0
--	mov.l	1f, r2
--	cmp/eq	r2, r0
--	bt	clear_bss
--	sub	r0, r2
--	mov.l	bss_start_addr, r0
--	mov	#0xe0, r1
--	and	r1, r0			! align cache line
--	mov.l	text_start_addr, r3
--	mov	r0, r1
--	sub	r2, r1
--3:
--	mov.l	@r1, r4
--	mov.l	@(4,r1), r5
--	mov.l	@(8,r1), r6
--	mov.l	@(12,r1), r7
--	mov.l	@(16,r1), r8
--	mov.l	@(20,r1), r9
--	mov.l	@(24,r1), r10
--	mov.l	@(28,r1), r11
--	mov.l	r4, @r0
--	mov.l	r5, @(4,r0)
--	mov.l	r6, @(8,r0)
--	mov.l	r7, @(12,r0)
--	mov.l	r8, @(16,r0)
--	mov.l	r9, @(20,r0)
--	mov.l	r10, @(24,r0)
--	mov.l	r11, @(28,r0)
--#ifdef CONFIG_CPU_SH4
--	ocbwb	@r0
--#endif
--	cmp/hi	r3, r0
--	add	#-32, r0
--	bt/s	3b
--	 add	#-32, r1
--	mov.l	2f, r0
--	jmp	@r0
--	 nop
--
--	.align 2
--1:	.long	1b
--2:	.long	clear_bss
--text_start_addr:
--	.long	startup
--
--	/* Clear BSS */
--clear_bss:
--	mov.l	end_addr, r1
--	mov.l	bss_start_addr, r2
--	mov	#0, r0
--l1:
--	mov.l	r0, @-r1
--	cmp/eq	r1,r2
--	bf	l1
--
--	/* Set the initial pointer. */
--	mov.l	init_stack_addr, r0
--	mov.l	@r0, r15
--
--	/* Decompress the kernel */
--	mov.l	decompress_kernel_addr, r0
--	jsr	@r0
--	nop
--
--	/* Jump to the start of the decompressed kernel */
--	mov.l	kernel_start_addr, r0
--	jmp	@r0
--	nop
--	
--	.align	2
--bss_start_addr:
--	.long	__bss_start
--end_addr:
--	.long	_end
--init_sr:
--	.long	0x400000F0	/* Privileged mode, Bank=0, Block=0, IMASK=0xF */
--init_stack_addr:
--	.long	stack_start
--decompress_kernel_addr:
--	.long	decompress_kernel
--kernel_start_addr:
--	.long	_text+PAGE_SIZE
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+ 		PowerPC,8323 at 0 {
+ 			device_type = "cpu";
+-			reg = <0>;
+-			d-cache-line-size = <20>;	// 32 bytes
+-			i-cache-line-size = <20>;	// 32 bytes
+-			d-cache-size = <4000>;		// L1, 16K
+-			i-cache-size = <4000>;		// L1, 16K
++			reg = <0x0>;
++			d-cache-line-size = <32>;	// 32 bytes
++			i-cache-line-size = <32>;	// 32 bytes
++			d-cache-size = <16384>;		// L1, 16K
++			i-cache-size = <16384>;		// L1, 16K
+ 			timebase-frequency = <0>;
+ 			bus-frequency = <0>;
+ 			clock-frequency = <0>;
+@@ -34,86 +56,88 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 08000000>;
++		reg = <0x00000000 0x08000000>;
+ 	};
+ 
+ 	bcsr at f8000000 {
+ 		device_type = "board-control";
+-		reg = <f8000000 8000>;
++		reg = <0xf8000000 0x8000>;
+ 	};
+ 
+ 	soc8323 at e0000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+-		ranges = <0 e0000000 00100000>;
+-		reg = <e0000000 00000200>;
+-		bus-frequency = <7DE2900>;
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <132000000>;
+ 
+ 		wdt at 200 {
+ 			device_type = "watchdog";
+ 			compatible = "mpc83xx_wdt";
+-			reg = <200 100>;
++			reg = <0x200 0x100>;
+ 		};
+ 
+ 		i2c at 3000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+-			reg = <3000 100>;
+-			interrupts = <e 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 
+ 			rtc at 68 {
+ 				compatible = "dallas,ds1374";
+-				reg = <68>;
++				reg = <0x68>;
+ 			};
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
++			reg = <0x4500 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <9 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
++			reg = <0x4600 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <a 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+ 		crypto at 30000 {
+ 			device_type = "crypto";
+ 			model = "SEC2";
+ 			compatible = "talitos";
+-			reg = <30000 7000>;
+-			interrupts = <b 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x30000 0x7000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
+ 			/* Rev. 2.2 */
+ 			num-channels = <1>;
+-			channel-fifo-len = <18>;
+-			exec-units-mask = <0000004c>;
+-			descriptor-types-mask = <0122003f>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x0000004c>;
++			descriptor-types-mask = <0x0122003f>;
+ 		};
+ 
+ 		ipic: pic at 700 {
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <700 100>;
++			reg = <0x700 0x100>;
+ 			device_type = "ipic";
+ 		};
+ 
+ 		par_io at 1400 {
+-			reg = <1400 100>;
++			reg = <0x1400 0x100>;
+ 			device_type = "par_io";
+ 			num-ports = <7>;
+ 
+@@ -122,8 +146,8 @@
+ 			/* port  pin  dir  open_drain  assignment  has_irq */
+ 					3  4  3  0  2  0  /* MDIO */
+ 					3  5  1  0  2  0  /* MDC */
+-					0  d  2  0  1  0 	/* RX_CLK (CLK9) */
+-					3 18  2  0  1  0 	/* TX_CLK (CLK10) */
++					0 13  2  0  1  0 	/* RX_CLK (CLK9) */
++					3 24  2  0  1  0 	/* TX_CLK (CLK10) */
+ 					1  0  1  0  1  0 	/* TxD0 */
+ 					1  1  1  0  1  0 	/* TxD1 */
+ 					1  2  1  0  1  0 	/* TxD2 */
+@@ -134,31 +158,48 @@
+ 					1  7  2  0  1  0 	/* RxD3 */
+ 					1  8  2  0  1  0 	/* RX_ER */
+ 					1  9  1  0  1  0 	/* TX_ER */
+-					1  a  2  0  1  0 	/* RX_DV */
+-					1  b  2  0  1  0 	/* COL */
+-					1  c  1  0  1  0 	/* TX_EN */
+-					1  d  2  0  1  0>;/* CRS */
++					1 10  2  0  1  0 	/* RX_DV */
++					1 11  2  0  1  0 	/* COL */
++					1 12  1  0  1  0 	/* TX_EN */
++					1 13  2  0  1  0>;	/* CRS */
+ 			};
+ 			pio4: ucc_pin at 04 {
+ 				pio-map = <
+ 			/* port  pin  dir  open_drain  assignment  has_irq */
+-					3 1f  2  0  1  0 	/* RX_CLK (CLK7) */
++					3 31  2  0  1  0 	/* RX_CLK (CLK7) */
+ 					3  6  2  0  1  0 	/* TX_CLK (CLK8) */
+-					1 12  1  0  1  0 	/* TxD0 */
+-					1 13  1  0  1  0 	/* TxD1 */
+-					1 14  1  0  1  0 	/* TxD2 */
+-					1 15  1  0  1  0 	/* TxD3 */
+-					1 16  2  0  1  0 	/* RxD0 */
+-					1 17  2  0  1  0 	/* RxD1 */
+-					1 18  2  0  1  0 	/* RxD2 */
+-					1 19  2  0  1  0 	/* RxD3 */
+-					1 1a  2  0  1  0 	/* RX_ER */
+-					1 1b  1  0  1  0 	/* TX_ER */
+-					1 1c  2  0  1  0 	/* RX_DV */
+-					1 1d  2  0  1  0 	/* COL */
+-					1 1e  1  0  1  0 	/* TX_EN */
+-					1 1f  2  0  1  0>;/* CRS */
++					1 18  1  0  1  0 	/* TxD0 */
++					1 19  1  0  1  0 	/* TxD1 */
++					1 20  1  0  1  0 	/* TxD2 */
++					1 21  1  0  1  0 	/* TxD3 */
++					1 22  2  0  1  0 	/* RxD0 */
++					1 23  2  0  1  0 	/* RxD1 */
++					1 24  2  0  1  0 	/* RxD2 */
++					1 25  2  0  1  0 	/* RxD3 */
++					1 26  2  0  1  0 	/* RX_ER */
++					1 27  1  0  1  0 	/* TX_ER */
++					1 28  2  0  1  0 	/* RX_DV */
++					1 29  2  0  1  0 	/* COL */
++					1 30  1  0  1  0 	/* TX_EN */
++					1 31  2  0  1  0>;	/* CRS */
+ 			};
++			pio5: ucc_pin at 05 {
++				pio-map = <
++				/*
++				 *    		      open       has
++				 *   port  pin  dir  drain  sel  irq
++				 */
++					2    0    1      0    2    0  /* TxD5 */
++					2    8    2      0    2    0  /* RxD5 */
++
++					2   29    2      0    0    0  /* CTS5 */
++					2   31    1      0    2    0  /* RTS5 */
++
++					2   24    2      0    0    0  /* CD */
++
++				>;
++			};
++
+ 		};
+ 	};
+ 
+@@ -166,178 +207,191 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "qe";
+-		model = "QE";
+-		ranges = <0 e0100000 00100000>;
+-		reg = <e0100000 480>;
++		compatible = "fsl,qe";
++		ranges = <0x0 0xe0100000 0x00100000>;
++		reg = <0xe0100000 0x480>;
+ 		brg-frequency = <0>;
+-		bus-frequency = <BCD3D80>;
++		bus-frequency = <198000000>;
+ 
+ 		muram at 10000 {
+-			device_type = "muram";
+-			ranges = <0 00010000 00004000>;
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,qe-muram", "fsl,cpm-muram";
++			ranges = <0x0 0x00010000 0x00004000>;
+ 
+ 			data-only at 0 {
+-				reg = <0 4000>;
++				compatible = "fsl,qe-muram-data",
++					     "fsl,cpm-muram-data";
++				reg = <0x0 0x4000>;
+ 			};
+ 		};
+ 
+ 		spi at 4c0 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <4c0 40>;
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x4c0 0x40>;
+ 			interrupts = <2>;
+-			interrupt-parent = < &qeic >;
++			interrupt-parent = <&qeic>;
+ 			mode = "cpu";
+ 		};
+ 
+ 		spi at 500 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <500 40>;
++			cell-index = <1>;
++			compatible = "fsl,spi";
++			reg = <0x500 0x40>;
+ 			interrupts = <1>;
+-			interrupt-parent = < &qeic >;
++			interrupt-parent = <&qeic>;
+ 			mode = "cpu";
+ 		};
+ 
+ 		usb at 6c0 {
+-			device_type = "usb";
+ 			compatible = "qe_udc";
+-			reg = <6c0 40 8B00 100>;
+-			interrupts = <b>;
+-			interrupt-parent = < &qeic >;
++			reg = <0x6c0 0x40 0x8b00 0x100>;
++			interrupts = <11>;
++			interrupt-parent = <&qeic>;
+ 			mode = "slave";
+ 		};
+ 
+-		ucc at 2200 {
++		enet0: ucc at 2200 {
+ 			device_type = "network";
+ 			compatible = "ucc_geth";
+ 			model = "UCC";
++			cell-index = <3>;
+ 			device-id = <3>;
+-			reg = <2200 200>;
+-			interrupts = <22>;
+-			interrupt-parent = < &qeic >;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
++			reg = <0x2200 0x200>;
++			interrupts = <34>;
++			interrupt-parent = <&qeic>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			rx-clock = <19>;
+-			tx-clock = <1a>;
+-			phy-handle = < &phy3 >;
+-			pio-handle = < &pio3 >;
++			rx-clock-name = "clk9";
++			tx-clock-name = "clk10";
++			phy-handle = <&phy3>;
++			pio-handle = <&pio3>;
+ 		};
+ 
+-		ucc at 3200 {
++		enet1: ucc at 3200 {
+ 			device_type = "network";
+ 			compatible = "ucc_geth";
+ 			model = "UCC";
++			cell-index = <4>;
+ 			device-id = <4>;
+-			reg = <3200 200>;
+-			interrupts = <23>;
++			reg = <0x3200 0x200>;
++			interrupts = <35>;
++			interrupt-parent = <&qeic>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			rx-clock-name = "clk7";
++			tx-clock-name = "clk8";
++			phy-handle = <&phy4>;
++			pio-handle = <&pio4>;
++		};
++
++		ucc at 2400 {
++			device_type = "serial";
++			compatible = "ucc_uart";
++			model = "UCC";
++			device-id = <5>;	/* The UCC number, 1-7*/
++			port-number = <0>;	/* Which ttyQEx device */
++			soft-uart;		/* We need Soft-UART */
++			reg = <0x2400 0x200>;
++			interrupts = <40>;	/* From Table 18-12 */
+ 			interrupt-parent = < &qeic >;
+ 			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
++			 * For Soft-UART, we need to set TX to 1X, which
++			 * means specifying separate clock sources.
+ 			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
+-			local-mac-address = [ 00 00 00 00 00 00 ];
+-			rx-clock = <17>;
+-			tx-clock = <18>;
+-			phy-handle = < &phy4 >;
+-			pio-handle = < &pio4 >;
++			rx-clock-name = "brg5";
++			tx-clock-name = "brg6";
++			pio-handle = < &pio5 >;
+ 		};
+ 
++
+ 		mdio at 2320 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			reg = <2320 18>;
+-			device_type = "mdio";
+-			compatible = "ucc_geth_phy";
++			reg = <0x2320 0x18>;
++			compatible = "fsl,ucc-mdio";
+ 
+ 			phy3: ethernet-phy at 03 {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <11 8>;
+-				reg = <3>;
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x3>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 			phy4: ethernet-phy at 04 {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <12 8>;
+-				reg = <4>;
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x4>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		qeic: qeic at 80 {
++		qeic: interrupt-controller at 80 {
+ 			interrupt-controller;
+-			device_type = "qeic";
++			compatible = "fsl,qe-ic";
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <1>;
+-			reg = <80 80>;
++			reg = <0x80 0x80>;
+ 			big-endian;
+-			interrupts = <20 8 21 8>; //high:32 low:33
+-			interrupt-parent = < &ipic >;
++			interrupts = <32 0x8 33 0x8>; //high:32 low:33
++			interrupt-parent = <&ipic>;
+ 		};
+ 	};
+ 
+-	pci at e0008500 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci0: pci at e0008500 {
++		cell-index = <1>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 				/* IDSEL 0x11 AD17 */
+-				 8800 0 0 1 &ipic 14 8
+-				 8800 0 0 2 &ipic 15 8
+-				 8800 0 0 3 &ipic 16 8
+-				 8800 0 0 4 &ipic 17 8
++				 0x8800 0x0 0x0 0x1 &ipic 20 0x8
++				 0x8800 0x0 0x0 0x2 &ipic 21 0x8
++				 0x8800 0x0 0x0 0x3 &ipic 22 0x8
++				 0x8800 0x0 0x0 0x4 &ipic 23 0x8
+ 
+ 				/* IDSEL 0x12 AD18 */
+-				 9000 0 0 1 &ipic 16 8
+-				 9000 0 0 2 &ipic 17 8
+-				 9000 0 0 3 &ipic 14 8
+-				 9000 0 0 4 &ipic 15 8
++				 0x9000 0x0 0x0 0x1 &ipic 22 0x8
++				 0x9000 0x0 0x0 0x2 &ipic 23 0x8
++				 0x9000 0x0 0x0 0x3 &ipic 20 0x8
++				 0x9000 0x0 0x0 0x4 &ipic 21 0x8
+ 
+ 				/* IDSEL 0x13 AD19 */
+-				 9800 0 0 1 &ipic 17 8
+-				 9800 0 0 2 &ipic 14 8
+-				 9800 0 0 3 &ipic 15 8
+-				 9800 0 0 4 &ipic 16 8
++				 0x9800 0x0 0x0 0x1 &ipic 23 0x8
++				 0x9800 0x0 0x0 0x2 &ipic 20 0x8
++				 0x9800 0x0 0x0 0x3 &ipic 21 0x8
++				 0x9800 0x0 0x0 0x4 &ipic 22 0x8
+ 
+ 				/* IDSEL 0x15 AD21*/
+-				 a800 0 0 1 &ipic 14 8
+-				 a800 0 0 2 &ipic 15 8
+-				 a800 0 0 3 &ipic 16 8
+-				 a800 0 0 4 &ipic 17 8
++				 0xa800 0x0 0x0 0x1 &ipic 20 0x8
++				 0xa800 0x0 0x0 0x2 &ipic 21 0x8
++				 0xa800 0x0 0x0 0x3 &ipic 22 0x8
++				 0xa800 0x0 0x0 0x4 &ipic 23 0x8
+ 
+ 				/* IDSEL 0x16 AD22*/
+-				 b000 0 0 1 &ipic 17 8
+-				 b000 0 0 2 &ipic 14 8
+-				 b000 0 0 3 &ipic 15 8
+-				 b000 0 0 4 &ipic 16 8
++				 0xb000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xb000 0x0 0x0 0x2 &ipic 20 0x8
++				 0xb000 0x0 0x0 0x3 &ipic 21 0x8
++				 0xb000 0x0 0x0 0x4 &ipic 22 0x8
+ 
+ 				/* IDSEL 0x17 AD23*/
+-				 b800 0 0 1 &ipic 16 8
+-				 b800 0 0 2 &ipic 17 8
+-				 b800 0 0 3 &ipic 14 8
+-				 b800 0 0 4 &ipic 15 8
++				 0xb800 0x0 0x0 0x1 &ipic 22 0x8
++				 0xb800 0x0 0x0 0x2 &ipic 23 0x8
++				 0xb800 0x0 0x0 0x3 &ipic 20 0x8
++				 0xb800 0x0 0x0 0x4 &ipic 21 0x8
+ 
+ 				/* IDSEL 0x18 AD24*/
+-				 c000 0 0 1 &ipic 15 8
+-				 c000 0 0 2 &ipic 16 8
+-				 c000 0 0 3 &ipic 17 8
+-				 c000 0 0 4 &ipic 14 8>;
+-		interrupt-parent = < &ipic >;
+-		interrupts = <42 8>;
+-		bus-range = <0 0>;
+-		ranges = <02000000 0 90000000 90000000 0 10000000
+-			  42000000 0 80000000 80000000 0 10000000
+-			  01000000 0 00000000 d0000000 0 00100000>;
++				 0xc000 0x0 0x0 0x1 &ipic 21 0x8
++				 0xc000 0x0 0x0 0x2 &ipic 22 0x8
++				 0xc000 0x0 0x0 0x3 &ipic 23 0x8
++				 0xc000 0x0 0x0 0x4 &ipic 20 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++			  0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xd0000000 0x0 0x00100000>;
+ 		clock-frequency = <0>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008500 100>;
++		reg = <0xe0008500 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
+index 388c8a7..94f93d2 100644
+--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
++++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
+@@ -9,23 +9,33 @@
+  * option) any later version.
+  */
+ 
++/dts-v1/;
++
+ / {
+ 	model = "MPC8323ERDB";
+ 	compatible = "MPC8323ERDB", "MPC832xRDB", "MPC83xxRDB";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+ 		PowerPC,8323 at 0 {
+ 			device_type = "cpu";
+-			reg = <0>;
+-			d-cache-line-size = <20>;	// 32 bytes
+-			i-cache-line-size = <20>;	// 32 bytes
+-			d-cache-size = <4000>;		// L1, 16K
+-			i-cache-size = <4000>;		// L1, 16K
++			reg = <0x0>;
++			d-cache-line-size = <0x20>;	// 32 bytes
++			i-cache-line-size = <0x20>;	// 32 bytes
++			d-cache-size = <16384>;	// L1, 16K
++			i-cache-size = <16384>;	// L1, 16K
+ 			timebase-frequency = <0>;
+ 			bus-frequency = <0>;
+ 			clock-frequency = <0>;
+@@ -34,47 +44,51 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 04000000>;
++		reg = <0x00000000 0x04000000>;
+ 	};
+ 
+ 	soc8323 at e0000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+-		ranges = <0 e0000000 00100000>;
+-		reg = <e0000000 00000200>;
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
+ 		bus-frequency = <0>;
+ 
+ 		wdt at 200 {
+ 			device_type = "watchdog";
+ 			compatible = "mpc83xx_wdt";
+-			reg = <200 100>;
++			reg = <0x200 0x100>;
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+-			reg = <3000 100>;
+-			interrupts = <e 8>;
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
+ 			interrupt-parent = <&pic>;
+ 			dfsrr;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
++			reg = <0x4500 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <9 8>;
++			interrupts = <9 0x8>;
+ 			interrupt-parent = <&pic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
++			reg = <0x4600 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <a 8>;
++			interrupts = <10 0x8>;
+ 			interrupt-parent = <&pic>;
+ 		};
+ 
+@@ -82,26 +96,26 @@
+ 			device_type = "crypto";
+ 			model = "SEC2";
+ 			compatible = "talitos";
+-			reg = <30000 7000>;
+-			interrupts = <b 8>;
++			reg = <0x30000 0x7000>;
++			interrupts = <11 0x8>;
+ 			interrupt-parent = <&pic>;
+ 			/* Rev. 2.2 */
+ 			num-channels = <1>;
+-			channel-fifo-len = <18>;
+-			exec-units-mask = <0000004c>;
+-			descriptor-types-mask = <0122003f>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x0000004c>;
++			descriptor-types-mask = <0x0122003f>;
+ 		};
+ 
+ 		pic:pic at 700 {
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <700 100>;
++			reg = <0x700 0x100>;
+ 			device_type = "ipic";
+ 		};
+ 
+ 		par_io at 1400 {
+-			reg = <1400 100>;
++			reg = <0x1400 0x100>;
+ 			device_type = "par_io";
+ 			num-ports = <7>;
+ 
+@@ -110,28 +124,28 @@
+ 			/* port  pin  dir  open_drain  assignment  has_irq */
+ 					3  4  3  0  2  0 	/* MDIO */
+ 					3  5  1  0  2  0 	/* MDC */
+-					3 15  2  0  1  0 	/* RX_CLK (CLK16) */
+-					3 17  2  0  1  0 	/* TX_CLK (CLK3) */
+-					0 12  1  0  1  0 	/* TxD0 */
+-					0 13  1  0  1  0 	/* TxD1 */
+-					0 14  1  0  1  0 	/* TxD2 */
+-					0 15  1  0  1  0 	/* TxD3 */
+-					0 16  2  0  1  0 	/* RxD0 */
+-					0 17  2  0  1  0 	/* RxD1 */
+-					0 18  2  0  1  0 	/* RxD2 */
+-					0 19  2  0  1  0 	/* RxD3 */
+-					0 1a  2  0  1  0 	/* RX_ER */
+-					0 1b  1  0  1  0 	/* TX_ER */
+-					0 1c  2  0  1  0 	/* RX_DV */
+-					0 1d  2  0  1  0 	/* COL */
+-					0 1e  1  0  1  0 	/* TX_EN */
+-					0 1f  2  0  1  0>;      /* CRS */
++					3 21  2  0  1  0 	/* RX_CLK (CLK16) */
++					3 23  2  0  1  0 	/* TX_CLK (CLK3) */
++					0 18  1  0  1  0 	/* TxD0 */
++					0 19  1  0  1  0 	/* TxD1 */
++					0 20  1  0  1  0 	/* TxD2 */
++					0 21  1  0  1  0 	/* TxD3 */
++					0 22  2  0  1  0 	/* RxD0 */
++					0 23  2  0  1  0 	/* RxD1 */
++					0 24  2  0  1  0 	/* RxD2 */
++					0 25  2  0  1  0 	/* RxD3 */
++					0 26  2  0  1  0 	/* RX_ER */
++					0 27  1  0  1  0 	/* TX_ER */
++					0 28  2  0  1  0 	/* RX_DV */
++					0 29  2  0  1  0 	/* COL */
++					0 30  1  0  1  0 	/* TX_EN */
++					0 31  2  0  1  0>;      /* CRS */
+ 			};
+ 			ucc3pio:ucc_pin at 03 {
+ 				pio-map = <
+ 			/* port  pin  dir  open_drain  assignment  has_irq */
+-					0  d  2  0  1  0 	/* RX_CLK (CLK9) */
+-					3 18  2  0  1  0 	/* TX_CLK (CLK10) */
++					0 13  2  0  1  0 	/* RX_CLK (CLK9) */
++					3 24  2  0  1  0 	/* TX_CLK (CLK10) */
+ 					1  0  1  0  1  0 	/* TxD0 */
+ 					1  1  1  0  1  0 	/* TxD1 */
+ 					1  2  1  0  1  0 	/* TxD2 */
+@@ -142,10 +156,10 @@
+ 					1  7  2  0  1  0 	/* RxD3 */
+ 					1  8  2  0  1  0 	/* RX_ER */
+ 					1  9  1  0  1  0 	/* TX_ER */
+-					1  a  2  0  1  0 	/* RX_DV */
+-					1  b  2  0  1  0 	/* COL */
+-					1  c  1  0  1  0 	/* TX_EN */
+-					1  d  2  0  1  0>;      /* CRS */
++					1 10  2  0  1  0 	/* RX_DV */
++					1 11  2  0  1  0 	/* COL */
++					1 12  1  0  1  0 	/* TX_EN */
++					1 13  2  0  1  0>;      /* CRS */
+ 			};
+ 		};
+ 	};
+@@ -154,77 +168,71 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "qe";
+-		model = "QE";
+-		ranges = <0 e0100000 00100000>;
+-		reg = <e0100000 480>;
++		compatible = "fsl,qe";
++		ranges = <0x0 0xe0100000 0x00100000>;
++		reg = <0xe0100000 0x480>;
+ 		brg-frequency = <0>;
+-		bus-frequency = <BCD3D80>;
++		bus-frequency = <198000000>;
+ 
+ 		muram at 10000 {
+-			device_type = "muram";
+-			ranges = <0 00010000 00004000>;
++ 			#address-cells = <1>;
++ 			#size-cells = <1>;
++			compatible = "fsl,qe-muram", "fsl,cpm-muram";
++			ranges = <0x0 0x00010000 0x00004000>;
+ 
+ 			data-only at 0 {
+-				reg = <0 4000>;
++				compatible = "fsl,qe-muram-data",
++					     "fsl,cpm-muram-data";
++				reg = <0x0 0x4000>;
+ 			};
+ 		};
+ 
+ 		spi at 4c0 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <4c0 40>;
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x4c0 0x40>;
+ 			interrupts = <2>;
+ 			interrupt-parent = <&qeic>;
+ 			mode = "cpu-qe";
+ 		};
+ 
+ 		spi at 500 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <500 40>;
++			cell-index = <1>;
++			compatible = "fsl,spi";
++			reg = <0x500 0x40>;
+ 			interrupts = <1>;
+ 			interrupt-parent = <&qeic>;
+ 			mode = "cpu";
+ 		};
+ 
+-		ucc at 3000 {
++		enet0: ucc at 3000 {
+ 			device_type = "network";
+ 			compatible = "ucc_geth";
+ 			model = "UCC";
++			cell-index = <2>;
+ 			device-id = <2>;
+-			reg = <3000 200>;
+-			interrupts = <21>;
++			reg = <0x3000 0x200>;
++			interrupts = <33>;
+ 			interrupt-parent = <&qeic>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			rx-clock = <20>;
+-			tx-clock = <13>;
++			rx-clock-name = "clk16";
++			tx-clock-name = "clk3";
+ 			phy-handle = <&phy00>;
+ 			pio-handle = <&ucc2pio>;
+ 		};
+ 
+-		ucc at 2200 {
++		enet1: ucc at 2200 {
+ 			device_type = "network";
+ 			compatible = "ucc_geth";
+ 			model = "UCC";
++			cell-index = <3>;
+ 			device-id = <3>;
+-			reg = <2200 200>;
+-			interrupts = <22>;
++			reg = <0x2200 0x200>;
++			interrupts = <34>;
+ 			interrupt-parent = <&qeic>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			rx-clock = <19>;
+-			tx-clock = <1a>;
++			rx-clock-name = "clk9";
++			tx-clock-name = "clk10";
+ 			phy-handle = <&phy04>;
+ 			pio-handle = <&ucc3pio>;
+ 		};
+@@ -232,65 +240,65 @@
+ 		mdio at 3120 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			reg = <3120 18>;
+-			device_type = "mdio";
+-			compatible = "ucc_geth_phy";
++			reg = <0x3120 0x18>;
++			compatible = "fsl,ucc-mdio";
+ 
+ 			phy00:ethernet-phy at 00 {
+ 				interrupt-parent = <&pic>;
+ 				interrupts = <0>;
+-				reg = <0>;
++				reg = <0x0>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 			phy04:ethernet-phy at 04 {
+ 				interrupt-parent = <&pic>;
+ 				interrupts = <0>;
+-				reg = <4>;
++				reg = <0x4>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		qeic:qeic at 80 {
++		qeic:interrupt-controller at 80 {
+ 			interrupt-controller;
+-			device_type = "qeic";
++			compatible = "fsl,qe-ic";
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <1>;
+-			reg = <80 80>;
++			reg = <0x80 0x80>;
+ 			big-endian;
+-			interrupts = <20 8 21 8>; //high:32 low:33
++			interrupts = <32 0x8 33 0x8>; //high:32 low:33
+ 			interrupt-parent = <&pic>;
+ 		};
+ 	};
+ 
+-	pci at e0008500 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci0: pci at e0008500 {
++		cell-index = <1>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 				/* IDSEL 0x10 AD16 (USB) */
+-				 8000 0 0 1 &pic 11 8
++				 0x8000 0x0 0x0 0x1 &pic 17 0x8
+ 
+ 				/* IDSEL 0x11 AD17 (Mini1)*/
+-				 8800 0 0 1 &pic 12 8
+-				 8800 0 0 2 &pic 13 8
+-				 8800 0 0 3 &pic 14 8
+-				 8800 0 0 4 &pic 30 8
++				 0x8800 0x0 0x0 0x1 &pic 18 0x8
++				 0x8800 0x0 0x0 0x2 &pic 19 0x8
++				 0x8800 0x0 0x0 0x3 &pic 20 0x8
++				 0x8800 0x0 0x0 0x4 &pic 48 0x8
+ 
+ 				/* IDSEL 0x12 AD18 (PCI/Mini2) */
+-				 9000 0 0 1 &pic 13 8
+-				 9000 0 0 2 &pic 14 8
+-				 9000 0 0 3 &pic 30 8
+-				 9000 0 0 4 &pic 11 8>;
++				 0x9000 0x0 0x0 0x1 &pic 19 0x8
++				 0x9000 0x0 0x0 0x2 &pic 20 0x8
++				 0x9000 0x0 0x0 0x3 &pic 48 0x8
++				 0x9000 0x0 0x0 0x4 &pic 17 0x8>;
+ 
+ 		interrupt-parent = <&pic>;
+-		interrupts = <42 8>;
+-		bus-range = <0 0>;
+-		ranges = <42000000 0 80000000 80000000 0 10000000
+-			  02000000 0 90000000 90000000 0 10000000
+-			  01000000 0 d0000000 d0000000 0 04000000>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++			  0x01000000 0x0 0xd0000000 0xd0000000 0x0 0x04000000>;
+ 		clock-frequency = <0>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008500 100>;
++		reg = <0xe0008500 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
+index 5072f6d..9426676 100644
+--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
++++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
+@@ -8,23 +8,35 @@
+  * Free Software Foundation; either version 2 of the License, or (at your
+  * option) any later version.
+  */
++
++/dts-v1/;
++
+ / {
+ 	model = "MPC8349EMITX";
+ 	compatible = "MPC8349EMITX", "MPC834xMITX", "MPC83xxMITX";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++	};
++
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+ 		PowerPC,8349 at 0 {
+ 			device_type = "cpu";
+-			reg = <0>;
+-			d-cache-line-size = <20>;
+-			i-cache-line-size = <20>;
+-			d-cache-size = <8000>;
+-			i-cache-size = <8000>;
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
+ 			timebase-frequency = <0>;	// from bootloader
+ 			bus-frequency = <0>;		// from bootloader
+ 			clock-frequency = <0>;		// from bootloader
+@@ -33,222 +45,223 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 10000000>;
++		reg = <0x00000000 0x10000000>;
+ 	};
+ 
+ 	soc8349 at e0000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+-		ranges = <0 e0000000 00100000>;
+-		reg = <e0000000 00000200>;
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
+ 		bus-frequency = <0>;                    // from bootloader
+ 
+ 		wdt at 200 {
+ 			device_type = "watchdog";
+ 			compatible = "mpc83xx_wdt";
+-			reg = <200 100>;
++			reg = <0x200 0x100>;
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+-			reg = <3000 100>;
+-			interrupts = <e 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 		};
+ 
+ 		i2c at 3100 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
+ 			compatible = "fsl-i2c";
+-			reg = <3100 100>;
+-			interrupts = <f 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 		};
+ 
+ 		spi at 7000 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <7000 1000>;
+-			interrupts = <10 8>;
+-			interrupt-parent = < &ipic >;
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
+ 			mode = "cpu";
+ 		};
+ 
+ 		usb at 22000 {
+-			device_type = "usb";
+ 			compatible = "fsl-usb2-mph";
+-			reg = <22000 1000>;
++			reg = <0x22000 0x1000>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			interrupt-parent = < &ipic >;
+-			interrupts = <27 8>;
++			interrupt-parent = <&ipic>;
++			interrupts = <39 0x8>;
+ 			phy_type = "ulpi";
+ 			port1;
+ 		};
+ 
+ 		usb at 23000 {
+-			device_type = "usb";
+ 			compatible = "fsl-usb2-dr";
+-			reg = <23000 1000>;
++			reg = <0x23000 0x1000>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			interrupt-parent = < &ipic >;
+-			interrupts = <26 8>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
+ 			dr_mode = "peripheral";
+ 			phy_type = "ulpi";
+ 		};
+ 
+ 		mdio at 24520 {
+-			device_type = "mdio";
+-			compatible = "gianfar";
+-			reg = <24520 20>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
+ 
+ 			/* Vitesse 8201 */
+ 			phy1c: ethernet-phy at 1c {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <12 8>;
+-				reg = <1c>;
+-				device_type = "ethernet-phy";
+-			};
 -
--	.align	9
--fake_headers_as_bzImage:
--	.word	0
--	.ascii	"HdrS"		! header signature
--	.word	0x0202		! header version number (>= 0x0105)
--				! or else old loadlin-1.5 will fail)
--	.word	0		! default_switch
--	.word	0		! SETUPSEG
--	.word	0x1000
--	.word	0		! pointing to kernel version string
--	.byte	0		! = 0, old one (LILO, Loadlin,
--				! 0xTV: T=0 for LILO
--				!       V = version
--	.byte	1		! Load flags bzImage=1
--	.word	0x8000		! size to move, when setup is not
--	.long	0x100000	! 0x100000 = default for big kernel
--	.long	0		! address of loaded ramdisk image
--	.long	0		# its size in bytes
-diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S
+-			/* Vitesse 7385 */
+-			phy1f: ethernet-phy at 1f {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <12 8>;
+-				reg = <1f>;
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x1c>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <24000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
++			reg = <0x24000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <20 8 21 8 22 8>;
+-			interrupt-parent = < &ipic >;
+-			phy-handle = < &phy1c >;
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy1c>;
+ 			linux,network-index = <0>;
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <25000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
++			reg = <0x25000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <23 8 24 8 25 8>;
+-			interrupt-parent = < &ipic >;
+-			phy-handle = < &phy1f >;
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			interrupt-parent = <&ipic>;
++			/* Vitesse 7385 isn't on the MDIO bus */
++			fixed-link = <1 1 1000 0 0>;
+ 			linux,network-index = <1>;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
++			reg = <0x4500 0x100>;
+ 			clock-frequency = <0>;		// from bootloader
+-			interrupts = <9 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
++			reg = <0x4600 0x100>;
+ 			clock-frequency = <0>;		// from bootloader
+-			interrupts = <a 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+ 		crypto at 30000 {
+ 			device_type = "crypto";
+ 			model = "SEC2";
+ 			compatible = "talitos";
+-			reg = <30000 10000>;
+-			interrupts = <b 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
+ 			num-channels = <4>;
+-			channel-fifo-len = <18>;
+-			exec-units-mask = <0000007e>;
+-			descriptor-types-mask = <01010ebf>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x0000007e>;
++			descriptor-types-mask = <0x01010ebf>;
+ 		};
+ 
+ 		ipic: pic at 700 {
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <700 100>;
++			reg = <0x700 0x100>;
+ 			device_type = "ipic";
+ 		};
+ 	};
+ 
+-	pci at e0008500 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci0: pci at e0008500 {
++		cell-index = <1>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 				/* IDSEL 0x10 - SATA */
+-				8000 0 0 1 &ipic 16 8 /* SATA_INTA */
++				0x8000 0x0 0x0 0x1 &ipic 22 0x8 /* SATA_INTA */
+ 				>;
+-		interrupt-parent = < &ipic >;
+-		interrupts = <42 8>;
+-		bus-range = <0 0>;
+-		ranges = <42000000 0 80000000 80000000 0 10000000
+-			  02000000 0 90000000 90000000 0 10000000
+-			  01000000 0 00000000 e2000000 0 01000000>;
+-		clock-frequency = <3f940aa>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x01000000>;
++		clock-frequency = <66666666>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008500 100>;
++		reg = <0xe0008500 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+ 
+-	pci at e0008600 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci1: pci at e0008600 {
++		cell-index = <2>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 				/* IDSEL 0x0E - MiniPCI Slot */
+-				7000 0 0 1 &ipic 15 8 /* PCI_INTA */
++				0x7000 0x0 0x0 0x1 &ipic 21 0x8 /* PCI_INTA */
+ 
+ 				/* IDSEL 0x0F - PCI Slot */
+-				7800 0 0 1 &ipic 14 8 /* PCI_INTA */
+-				7800 0 0 2 &ipic 15 8 /* PCI_INTB */
+-				 >;
+-		interrupt-parent = < &ipic >;
+-		interrupts = <43 8>;
+-		bus-range = <0 0>;
+-		ranges = <42000000 0 a0000000 a0000000 0 10000000
+-			  02000000 0 b0000000 b0000000 0 10000000
+-			  01000000 0 00000000 e3000000 0 01000000>;
+-		clock-frequency = <3f940aa>;
++				0x7800 0x0 0x0 0x1 &ipic 20 0x8 /* PCI_INTA */
++				0x7800 0x0 0x0 0x2 &ipic 21 0x8 /* PCI_INTB */
++				>;
++		interrupt-parent = <&ipic>;
++		interrupts = <67 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x42000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
++			  0x02000000 0x0 0xb0000000 0xb0000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe3000000 0x0 0x01000000>;
++		clock-frequency = <66666666>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008600 100>;
++		reg = <0xe0008600 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+ 
++	localbus at e0005000 {
++		#address-cells = <2>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8349e-localbus",
++			     "fsl,pq2pro-localbus";
++		reg = <0xe0005000 0xd8>;
++		ranges = <0x3 0x0 0xf0000000 0x210>;
+ 
+-
++		pata at 3,0 {
++			compatible = "fsl,mpc8349emitx-pata", "ata-generic";
++			reg = <0x3 0x0 0x10 0x3 0x20c 0x4>;
++			reg-shift = <1>;
++			pio-mode = <6>;
++			interrupts = <23 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++	};
+ };
+diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+index 074f7a2..f81d735 100644
+--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
++++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+@@ -8,23 +8,33 @@
+  * Free Software Foundation; either version 2 of the License, or (at your
+  * option) any later version.
+  */
++
++/dts-v1/;
++
+ / {
+ 	model = "MPC8349EMITXGP";
+ 	compatible = "MPC8349EMITXGP", "MPC834xMITX", "MPC83xxMITX";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+ 		PowerPC,8349 at 0 {
+ 			device_type = "cpu";
+-			reg = <0>;
+-			d-cache-line-size = <20>;
+-			i-cache-line-size = <20>;
+-			d-cache-size = <8000>;
+-			i-cache-size = <8000>;
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
+ 			timebase-frequency = <0>;	// from bootloader
+ 			bus-frequency = <0>;		// from bootloader
+ 			clock-frequency = <0>;		// from bootloader
+@@ -33,148 +43,154 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 10000000>;
++		reg = <0x00000000 0x10000000>;
+ 	};
+ 
+ 	soc8349 at e0000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+-		ranges = <0 e0000000 00100000>;
+-		reg = <e0000000 00000200>;
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
+ 		bus-frequency = <0>;                    // from bootloader
+ 
+ 		wdt at 200 {
+ 			device_type = "watchdog";
+ 			compatible = "mpc83xx_wdt";
+-			reg = <200 100>;
++			reg = <0x200 0x100>;
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+-			reg = <3000 100>;
+-			interrupts = <e 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 		};
+ 
+ 		i2c at 3100 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
+ 			compatible = "fsl-i2c";
+-			reg = <3100 100>;
+-			interrupts = <f 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 		};
+ 
+ 		spi at 7000 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <7000 1000>;
+-			interrupts = <10 8>;
+-			interrupt-parent = < &ipic >;
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
+ 			mode = "cpu";
+ 		};
+ 
+ 		usb at 23000 {
+-			device_type = "usb";
+ 			compatible = "fsl-usb2-dr";
+-			reg = <23000 1000>;
++			reg = <0x23000 0x1000>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			interrupt-parent = < &ipic >;
+-			interrupts = <26 8>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
+ 			dr_mode = "otg";
+ 			phy_type = "ulpi";
+ 		};
+ 
+ 		mdio at 24520 {
+-			device_type = "mdio";
+-			compatible = "gianfar";
+-			reg = <24520 20>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
+ 
+ 			/* Vitesse 8201 */
+ 			phy1c: ethernet-phy at 1c {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <12 8>;
+-				reg = <1c>;
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x1c>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <24000 1000>;
++			reg = <0x24000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <20 8 21 8 22 8>;
+-			interrupt-parent = < &ipic >;
+-			phy-handle = < &phy1c >;
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy1c>;
+ 			linux,network-index = <0>;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
++			reg = <0x4500 0x100>;
+ 			clock-frequency = <0>;		// from bootloader
+-			interrupts = <9 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
++			reg = <0x4600 0x100>;
+ 			clock-frequency = <0>;		// from bootloader
+-			interrupts = <a 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+ 		crypto at 30000 {
+ 			device_type = "crypto";
+ 			model = "SEC2";
+ 			compatible = "talitos";
+-			reg = <30000 10000>;
+-			interrupts = <b 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
+ 			num-channels = <4>;
+-			channel-fifo-len = <18>;
+-			exec-units-mask = <0000007e>;
+-			descriptor-types-mask = <01010ebf>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x0000007e>;
++			descriptor-types-mask = <0x01010ebf>;
+ 		};
+ 
+ 		ipic: pic at 700 {
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <700 100>;
++			reg = <0x700 0x100>;
+ 			device_type = "ipic";
+ 		};
+ 	};
+ 
+-	pci at e0008600 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci0: pci at e0008600 {
++		cell-index = <2>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 				/* IDSEL 0x0F - PCI Slot */
+-				7800 0 0 1 &ipic 14 8 /* PCI_INTA */
+-				7800 0 0 2 &ipic 15 8 /* PCI_INTB */
++				0x7800 0x0 0x0 0x1 &ipic 20 0x8 /* PCI_INTA */
++				0x7800 0x0 0x0 0x2 &ipic 21 0x8 /* PCI_INTB */
+ 				 >;
+-		interrupt-parent = < &ipic >;
+-		interrupts = <43 8>;
+-		bus-range = <1 1>;
+-		ranges = <42000000 0 a0000000 a0000000 0 10000000
+-			  02000000 0 b0000000 b0000000 0 10000000
+-			  01000000 0 00000000 e3000000 0 01000000>;
+-		clock-frequency = <3f940aa>;
++		interrupt-parent = <&ipic>;
++		interrupts = <67 0x8>;
++		bus-range = <0x1 0x1>;
++		ranges = <0x42000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
++			  0x02000000 0x0 0xb0000000 0xb0000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe3000000 0x0 0x01000000>;
++		clock-frequency = <66666666>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008600 100>;
++		reg = <0xe0008600 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
+index 49363f8..7480eda 100644
+--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
++++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
+@@ -9,23 +9,34 @@
+  * option) any later version.
+  */
+ 
++/dts-v1/;
++
+ / {
+ 	model = "MPC8349EMDS";
+ 	compatible = "MPC8349EMDS", "MPC834xMDS", "MPC83xxMDS";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++	};
++
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+ 		PowerPC,8349 at 0 {
+ 			device_type = "cpu";
+-			reg = <0>;
+-			d-cache-line-size = <20>;	// 32 bytes
+-			i-cache-line-size = <20>;	// 32 bytes
+-			d-cache-size = <8000>;		// L1, 32K
+-			i-cache-size = <8000>;		// L1, 32K
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
+ 			timebase-frequency = <0>;	// from bootloader
+ 			bus-frequency = <0>;		// from bootloader
+ 			clock-frequency = <0>;		// from bootloader
+@@ -34,164 +45,152 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 10000000>;	// 256MB at 0
++		reg = <0x00000000 0x10000000>;	// 256MB at 0
+ 	};
+ 
+ 	bcsr at e2400000 {
+ 		device_type = "board-control";
+-		reg = <e2400000 8000>;
++		reg = <0xe2400000 0x8000>;
+ 	};
+ 
+ 	soc8349 at e0000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+-		ranges = <0 e0000000 00100000>;
+-		reg = <e0000000 00000200>;
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
+ 		bus-frequency = <0>;
+ 
+ 		wdt at 200 {
+ 			device_type = "watchdog";
+ 			compatible = "mpc83xx_wdt";
+-			reg = <200 100>;
++			reg = <0x200 0x100>;
+ 		};
+ 
+ 		i2c at 3000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+-			reg = <3000 100>;
+-			interrupts = <e 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 
+ 			rtc at 68 {
+ 				compatible = "dallas,ds1374";
+-				reg = <68>;
++				reg = <0x68>;
+ 			};
+ 		};
+ 
+ 		i2c at 3100 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <1>;
+ 			compatible = "fsl-i2c";
+-			reg = <3100 100>;
+-			interrupts = <f 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 		};
+ 
+ 		spi at 7000 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <7000 1000>;
+-			interrupts = <10 8>;
+-			interrupt-parent = < &ipic >;
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
+ 			mode = "cpu";
+ 		};
+ 
+-		/* phy type (ULPI or SERIAL) are only types supportted for MPH */
++		/* phy type (ULPI or SERIAL) are only types supported for MPH */
+ 		/* port = 0 or 1 */
+ 		usb at 22000 {
+-			device_type = "usb";
+ 			compatible = "fsl-usb2-mph";
+-			reg = <22000 1000>;
++			reg = <0x22000 0x1000>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			interrupt-parent = < &ipic >;
+-			interrupts = <27 8>;
++			interrupt-parent = <&ipic>;
++			interrupts = <39 0x8>;
+ 			phy_type = "ulpi";
+ 			port1;
+ 		};
+ 		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
+ 		usb at 23000 {
+-			device_type = "usb";
+ 			compatible = "fsl-usb2-dr";
+-			reg = <23000 1000>;
++			reg = <0x23000 0x1000>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			interrupt-parent = < &ipic >;
+-			interrupts = <26 8>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
+ 			dr_mode = "otg";
+ 			phy_type = "ulpi";
+ 		};
+ 
+ 		mdio at 24520 {
+-			device_type = "mdio";
+-			compatible = "gianfar";
+-			reg = <24520 20>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
+ 			phy0: ethernet-phy at 0 {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <11 8>;
+-				reg = <0>;
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x0>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 			phy1: ethernet-phy at 1 {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <12 8>;
+-				reg = <1>;
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x1>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <24000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
++			reg = <0x24000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <20 8 21 8 22 8>;
+-			interrupt-parent = < &ipic >;
+-			phy-handle = < &phy0 >;
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy0>;
+ 			linux,network-index = <0>;
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <25000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
++			reg = <0x25000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <23 8 24 8 25 8>;
+-			interrupt-parent = < &ipic >;
+-			phy-handle = < &phy1 >;
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy1>;
+ 			linux,network-index = <1>;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
++			reg = <0x4500 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <9 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
++			reg = <0x4600 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <a 8>;
+-			interrupt-parent = < &ipic >;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+ 		/* May need to remove if on a part without crypto engine */
+@@ -199,15 +198,15 @@
+ 			device_type = "crypto";
+ 			model = "SEC2";
+ 			compatible = "talitos";
+-			reg = <30000 10000>;
+-			interrupts = <b 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
+ 			num-channels = <4>;
+-			channel-fifo-len = <18>;
+-			exec-units-mask = <0000007e>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x0000007e>;
+ 			/* desc mask is for rev2.0,
+ 			 * we need runtime fixup for >2.0 */
+-			descriptor-types-mask = <01010ebf>;
++			descriptor-types-mask = <0x01010ebf>;
+ 		};
+ 
+ 		/* IPIC
+@@ -220,127 +219,129 @@
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <700 100>;
++			reg = <0x700 0x100>;
+ 			device_type = "ipic";
+ 		};
+ 	};
+ 
+-	pci at e0008500 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci0: pci at e0008500 {
++		cell-index = <1>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 
+ 				/* IDSEL 0x11 */
+-				 8800 0 0 1 &ipic 14 8
+-				 8800 0 0 2 &ipic 15 8
+-				 8800 0 0 3 &ipic 16 8
+-				 8800 0 0 4 &ipic 17 8
++				 0x8800 0x0 0x0 0x1 &ipic 20 0x8
++				 0x8800 0x0 0x0 0x2 &ipic 21 0x8
++				 0x8800 0x0 0x0 0x3 &ipic 22 0x8
++				 0x8800 0x0 0x0 0x4 &ipic 23 0x8
+ 
+ 				/* IDSEL 0x12 */
+-				 9000 0 0 1 &ipic 16 8
+-				 9000 0 0 2 &ipic 17 8
+-				 9000 0 0 3 &ipic 14 8
+-				 9000 0 0 4 &ipic 15 8
++				 0x9000 0x0 0x0 0x1 &ipic 22 0x8
++				 0x9000 0x0 0x0 0x2 &ipic 23 0x8
++				 0x9000 0x0 0x0 0x3 &ipic 20 0x8
++				 0x9000 0x0 0x0 0x4 &ipic 21 0x8
+ 
+ 				/* IDSEL 0x13 */
+-				 9800 0 0 1 &ipic 17 8
+-				 9800 0 0 2 &ipic 14 8
+-				 9800 0 0 3 &ipic 15 8
+-				 9800 0 0 4 &ipic 16 8
++				 0x9800 0x0 0x0 0x1 &ipic 23 0x8
++				 0x9800 0x0 0x0 0x2 &ipic 20 0x8
++				 0x9800 0x0 0x0 0x3 &ipic 21 0x8
++				 0x9800 0x0 0x0 0x4 &ipic 22 0x8
+ 
+ 				/* IDSEL 0x15 */
+-				 a800 0 0 1 &ipic 14 8
+-				 a800 0 0 2 &ipic 15 8
+-				 a800 0 0 3 &ipic 16 8
+-				 a800 0 0 4 &ipic 17 8
++				 0xa800 0x0 0x0 0x1 &ipic 20 0x8
++				 0xa800 0x0 0x0 0x2 &ipic 21 0x8
++				 0xa800 0x0 0x0 0x3 &ipic 22 0x8
++				 0xa800 0x0 0x0 0x4 &ipic 23 0x8
+ 
+ 				/* IDSEL 0x16 */
+-				 b000 0 0 1 &ipic 17 8
+-				 b000 0 0 2 &ipic 14 8
+-				 b000 0 0 3 &ipic 15 8
+-				 b000 0 0 4 &ipic 16 8
++				 0xb000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xb000 0x0 0x0 0x2 &ipic 20 0x8
++				 0xb000 0x0 0x0 0x3 &ipic 21 0x8
++				 0xb000 0x0 0x0 0x4 &ipic 22 0x8
+ 
+ 				/* IDSEL 0x17 */
+-				 b800 0 0 1 &ipic 16 8
+-				 b800 0 0 2 &ipic 17 8
+-				 b800 0 0 3 &ipic 14 8
+-				 b800 0 0 4 &ipic 15 8
++				 0xb800 0x0 0x0 0x1 &ipic 22 0x8
++				 0xb800 0x0 0x0 0x2 &ipic 23 0x8
++				 0xb800 0x0 0x0 0x3 &ipic 20 0x8
++				 0xb800 0x0 0x0 0x4 &ipic 21 0x8
+ 
+ 				/* IDSEL 0x18 */
+-				 c000 0 0 1 &ipic 15 8
+-				 c000 0 0 2 &ipic 16 8
+-				 c000 0 0 3 &ipic 17 8
+-				 c000 0 0 4 &ipic 14 8>;
+-		interrupt-parent = < &ipic >;
+-		interrupts = <42 8>;
++				 0xc000 0x0 0x0 0x1 &ipic 21 0x8
++				 0xc000 0x0 0x0 0x2 &ipic 22 0x8
++				 0xc000 0x0 0x0 0x3 &ipic 23 0x8
++				 0xc000 0x0 0x0 0x4 &ipic 20 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
+ 		bus-range = <0 0>;
+-		ranges = <02000000 0 90000000 90000000 0 10000000
+-			  42000000 0 80000000 80000000 0 10000000
+-			  01000000 0 00000000 e2000000 0 00100000>;
+-		clock-frequency = <3f940aa>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++			  0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++		clock-frequency = <66666666>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008500 100>;
++		reg = <0xe0008500 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+ 
+-	pci at e0008600 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci1: pci at e0008600 {
++		cell-index = <2>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 
+ 				/* IDSEL 0x11 */
+-				 8800 0 0 1 &ipic 14 8
+-				 8800 0 0 2 &ipic 15 8
+-				 8800 0 0 3 &ipic 16 8
+-				 8800 0 0 4 &ipic 17 8
++				 0x8800 0x0 0x0 0x1 &ipic 20 0x8
++				 0x8800 0x0 0x0 0x2 &ipic 21 0x8
++				 0x8800 0x0 0x0 0x3 &ipic 22 0x8
++				 0x8800 0x0 0x0 0x4 &ipic 23 0x8
+ 
+ 				/* IDSEL 0x12 */
+-				 9000 0 0 1 &ipic 16 8
+-				 9000 0 0 2 &ipic 17 8
+-				 9000 0 0 3 &ipic 14 8
+-				 9000 0 0 4 &ipic 15 8
++				 0x9000 0x0 0x0 0x1 &ipic 22 0x8
++				 0x9000 0x0 0x0 0x2 &ipic 23 0x8
++				 0x9000 0x0 0x0 0x3 &ipic 20 0x8
++				 0x9000 0x0 0x0 0x4 &ipic 21 0x8
+ 
+ 				/* IDSEL 0x13 */
+-				 9800 0 0 1 &ipic 17 8
+-				 9800 0 0 2 &ipic 14 8
+-				 9800 0 0 3 &ipic 15 8
+-				 9800 0 0 4 &ipic 16 8
++				 0x9800 0x0 0x0 0x1 &ipic 23 0x8
++				 0x9800 0x0 0x0 0x2 &ipic 20 0x8
++				 0x9800 0x0 0x0 0x3 &ipic 21 0x8
++				 0x9800 0x0 0x0 0x4 &ipic 22 0x8
+ 
+ 				/* IDSEL 0x15 */
+-				 a800 0 0 1 &ipic 14 8
+-				 a800 0 0 2 &ipic 15 8
+-				 a800 0 0 3 &ipic 16 8
+-				 a800 0 0 4 &ipic 17 8
++				 0xa800 0x0 0x0 0x1 &ipic 20 0x8
++				 0xa800 0x0 0x0 0x2 &ipic 21 0x8
++				 0xa800 0x0 0x0 0x3 &ipic 22 0x8
++				 0xa800 0x0 0x0 0x4 &ipic 23 0x8
+ 
+ 				/* IDSEL 0x16 */
+-				 b000 0 0 1 &ipic 17 8
+-				 b000 0 0 2 &ipic 14 8
+-				 b000 0 0 3 &ipic 15 8
+-				 b000 0 0 4 &ipic 16 8
++				 0xb000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xb000 0x0 0x0 0x2 &ipic 20 0x8
++				 0xb000 0x0 0x0 0x3 &ipic 21 0x8
++				 0xb000 0x0 0x0 0x4 &ipic 22 0x8
+ 
+ 				/* IDSEL 0x17 */
+-				 b800 0 0 1 &ipic 16 8
+-				 b800 0 0 2 &ipic 17 8
+-				 b800 0 0 3 &ipic 14 8
+-				 b800 0 0 4 &ipic 15 8
++				 0xb800 0x0 0x0 0x1 &ipic 22 0x8
++				 0xb800 0x0 0x0 0x2 &ipic 23 0x8
++				 0xb800 0x0 0x0 0x3 &ipic 20 0x8
++				 0xb800 0x0 0x0 0x4 &ipic 21 0x8
+ 
+ 				/* IDSEL 0x18 */
+-				 c000 0 0 1 &ipic 15 8
+-				 c000 0 0 2 &ipic 16 8
+-				 c000 0 0 3 &ipic 17 8
+-				 c000 0 0 4 &ipic 14 8>;
+-		interrupt-parent = < &ipic >;
+-		interrupts = <42 8>;
++				 0xc000 0x0 0x0 0x1 &ipic 21 0x8
++				 0xc000 0x0 0x0 0x2 &ipic 22 0x8
++				 0xc000 0x0 0x0 0x3 &ipic 23 0x8
++				 0xc000 0x0 0x0 0x4 &ipic 20 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
+ 		bus-range = <0 0>;
+-		ranges = <02000000 0 b0000000 b0000000 0 10000000
+-			  42000000 0 a0000000 a0000000 0 10000000
+-			  01000000 0 00000000 e2100000 0 00100000>;
+-		clock-frequency = <3f940aa>;
++		ranges = <0x02000000 0x0 0xb0000000 0xb0000000 0x0 0x10000000
++			  0x42000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe2100000 0x0 0x00100000>;
++		clock-frequency = <66666666>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008600 100>;
++		reg = <0xe0008600 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
+index 0b2d2b5..55f03e8 100644
+--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
++++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
+@@ -14,122 +14,134 @@
+ /memreserve/	00000000 1000000;
+ */
+ 
++/dts-v1/;
++
+ / {
+ 	model = "MPC8360MDS";
+ 	compatible = "MPC8360EMDS", "MPC836xMDS", "MPC83xxMDS";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+ 		PowerPC,8360 at 0 {
+ 			device_type = "cpu";
+-			reg = <0>;
+-			d-cache-line-size = <20>;	// 32 bytes
+-			i-cache-line-size = <20>;	// 32 bytes
+-			d-cache-size = <8000>;		// L1, 32K
+-			i-cache-size = <8000>;		// L1, 32K
+-			timebase-frequency = <3EF1480>;
+-			bus-frequency = <FBC5200>;
+-			clock-frequency = <1F78A400>;
++			reg = <0x0>;
++			d-cache-line-size = <32>;	// 32 bytes
++			i-cache-line-size = <32>;	// 32 bytes
++			d-cache-size = <32768>;		// L1, 32K
++			i-cache-size = <32768>;		// L1, 32K
++			timebase-frequency = <66000000>;
++			bus-frequency = <264000000>;
++			clock-frequency = <528000000>;
+ 		};
+ 	};
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 10000000>;
++		reg = <0x00000000 0x10000000>;
+ 	};
+ 
+ 	bcsr at f8000000 {
+ 		device_type = "board-control";
+-		reg = <f8000000 8000>;
++		reg = <0xf8000000 0x8000>;
+ 	};
+ 
+ 	soc8360 at e0000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+-		ranges = <0 e0000000 00100000>;
+-		reg = <e0000000 00000200>;
+-		bus-frequency = <FBC5200>;
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <264000000>;
+ 
+ 		wdt at 200 {
+ 			device_type = "watchdog";
+ 			compatible = "mpc83xx_wdt";
+-			reg = <200 100>;
++			reg = <0x200 0x100>;
+ 		};
+ 
+ 		i2c at 3000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+-			reg = <3000 100>;
+-			interrupts = <e 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 
+ 			rtc at 68 {
+ 				compatible = "dallas,ds1374";
+-				reg = <68>;
++				reg = <0x68>;
+ 			};
+ 		};
+ 
+ 		i2c at 3100 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <1>;
+ 			compatible = "fsl-i2c";
+-			reg = <3100 100>;
+-			interrupts = <f 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
+ 			dfsrr;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
+-			clock-frequency = <FBC5200>;
+-			interrupts = <9 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x4500 0x100>;
++			clock-frequency = <264000000>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
+-			clock-frequency = <FBC5200>;
+-			interrupts = <a 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x4600 0x100>;
++			clock-frequency = <264000000>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
+ 		};
+ 
+ 		crypto at 30000 {
+ 			device_type = "crypto";
+ 			model = "SEC2";
+ 			compatible = "talitos";
+-			reg = <30000 10000>;
+-			interrupts = <b 8>;
+-			interrupt-parent = < &ipic >;
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
+ 			num-channels = <4>;
+-			channel-fifo-len = <18>;
+-			exec-units-mask = <0000007e>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x0000007e>;
+ 			/* desc mask is for rev1.x, we need runtime fixup for >=2.x */
+-			descriptor-types-mask = <01010ebf>;
++			descriptor-types-mask = <0x01010ebf>;
+ 		};
+ 
+ 		ipic: pic at 700 {
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <700 100>;
++			reg = <0x700 0x100>;
+ 			device_type = "ipic";
+ 		};
+ 
+ 		par_io at 1400 {
+-			reg = <1400 100>;
++			reg = <0x1400 0x100>;
+ 			device_type = "par_io";
+ 			num-ports = <7>;
+ 
+@@ -143,19 +155,19 @@
+ 					1  6  1  0  3  0 	/* TxD4 */
+ 					1  7  1  0  1  0 	/* TxD5 */
+ 					1  9  1  0  2  0 	/* TxD6 */
+-					1  a  1  0  2  0 	/* TxD7 */
++					1  10 1  0  2  0 	/* TxD7 */
+ 					0  9  2  0  1  0 	/* RxD0 */
+-					0  a  2  0  1  0 	/* RxD1 */
+-					0  b  2  0  1  0 	/* RxD2 */
+-					0  c  2  0  1  0 	/* RxD3 */
+-					0  d  2  0  1  0 	/* RxD4 */
++					0  10 2  0  1  0 	/* RxD1 */
++					0  11 2  0  1  0 	/* RxD2 */
++					0  12 2  0  1  0 	/* RxD3 */
++					0  13 2  0  1  0 	/* RxD4 */
+ 					1  1  2  0  2  0 	/* RxD5 */
+ 					1  0  2  0  2  0 	/* RxD6 */
+ 					1  4  2  0  2  0 	/* RxD7 */
+ 					0  7  1  0  1  0 	/* TX_EN */
+ 					0  8  1  0  1  0 	/* TX_ER */
+-					0  f  2  0  1  0 	/* RX_DV */
+-					0  10 2  0  1  0 	/* RX_ER */
++					0  15 2  0  1  0 	/* RX_DV */
++					0  16 2  0  1  0 	/* RX_ER */
+ 					0  0  2  0  1  0 	/* RX_CLK */
+ 					2  9  1  0  3  0 	/* GTX_CLK - CLK10 */
+ 					2  8  2  0  1  0>;	/* GTX125 - CLK9 */
+@@ -163,27 +175,27 @@
+ 			pio2: ucc_pin at 02 {
+ 				pio-map = <
+ 			/* port  pin  dir  open_drain  assignment  has_irq */
+-					0  11 1  0  1  0   /* TxD0 */
+-					0  12 1  0  1  0   /* TxD1 */
+-					0  13 1  0  1  0   /* TxD2 */
+-					0  14 1  0  1  0   /* TxD3 */
++					0  17 1  0  1  0   /* TxD0 */
++					0  18 1  0  1  0   /* TxD1 */
++					0  19 1  0  1  0   /* TxD2 */
++					0  20 1  0  1  0   /* TxD3 */
+ 					1  2  1  0  1  0   /* TxD4 */
+ 					1  3  1  0  2  0   /* TxD5 */
+ 					1  5  1  0  3  0   /* TxD6 */
+ 					1  8  1  0  3  0   /* TxD7 */
+-					0  17 2  0  1  0   /* RxD0 */
+-					0  18 2  0  1  0   /* RxD1 */
+-					0  19 2  0  1  0   /* RxD2 */
+-					0  1a 2  0  1  0   /* RxD3 */
+-					0  1b 2  0  1  0   /* RxD4 */
+-					1  c  2  0  2  0   /* RxD5 */
+-					1  d  2  0  3  0   /* RxD6 */
+-					1  b  2  0  2  0   /* RxD7 */
+-					0  15 1  0  1  0   /* TX_EN */
+-					0  16 1  0  1  0   /* TX_ER */
+-					0  1d 2  0  1  0   /* RX_DV */
+-					0  1e 2  0  1  0   /* RX_ER */
+-					0  1f 2  0  1  0   /* RX_CLK */
++					0  23 2  0  1  0   /* RxD0 */
++					0  24 2  0  1  0   /* RxD1 */
++					0  25 2  0  1  0   /* RxD2 */
++					0  26 2  0  1  0   /* RxD3 */
++					0  27 2  0  1  0   /* RxD4 */
++					1  12 2  0  2  0   /* RxD5 */
++					1  13 2  0  3  0   /* RxD6 */
++					1  11 2  0  2  0   /* RxD7 */
++					0  21 1  0  1  0   /* TX_EN */
++					0  22 1  0  1  0   /* TX_ER */
++					0  29 2  0  1  0   /* RX_DV */
++					0  30 2  0  1  0   /* RX_ER */
++					0  31 2  0  1  0   /* RX_CLK */
+ 					2  2  1  0  2  0   /* GTX_CLK - CLK10 */
+ 					2  3  2  0  1  0   /* GTX125 - CLK4 */
+ 					0  1  3  0  2  0   /* MDIO */
+@@ -197,181 +209,174 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "qe";
+-		model = "QE";
+-		ranges = <0 e0100000 00100000>;
+-		reg = <e0100000 480>;
++		compatible = "fsl,qe";
++		ranges = <0x0 0xe0100000 0x00100000>;
++		reg = <0xe0100000 0x480>;
+ 		brg-frequency = <0>;
+-		bus-frequency = <179A7B00>;
++		bus-frequency = <396000000>;
+ 
+ 		muram at 10000 {
+-			device_type = "muram";
+-			ranges = <0 00010000 0000c000>;
+-
+-			data-only at 0{
+-				reg = <0 c000>;
++ 			#address-cells = <1>;
++ 			#size-cells = <1>;
++			compatible = "fsl,qe-muram", "fsl,cpm-muram";
++			ranges = <0x0 0x00010000 0x0000c000>;
++
++			data-only at 0 {
++				compatible = "fsl,qe-muram-data",
++					     "fsl,cpm-muram-data";
++				reg = <0x0 0xc000>;
+ 			};
+ 		};
+ 
+ 		spi at 4c0 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <4c0 40>;
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x4c0 0x40>;
+ 			interrupts = <2>;
+-			interrupt-parent = < &qeic >;
++			interrupt-parent = <&qeic>;
+ 			mode = "cpu";
+ 		};
+ 
+ 		spi at 500 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
+-			reg = <500 40>;
++			cell-index = <1>;
++			compatible = "fsl,spi";
++			reg = <0x500 0x40>;
+ 			interrupts = <1>;
+-			interrupt-parent = < &qeic >;
++			interrupt-parent = <&qeic>;
+ 			mode = "cpu";
+ 		};
+ 
+ 		usb at 6c0 {
+-			device_type = "usb";
+ 			compatible = "qe_udc";
+-			reg = <6c0 40 8B00 100>;
+-			interrupts = <b>;
+-			interrupt-parent = < &qeic >;
++			reg = <0x6c0 0x40 0x8b00 0x100>;
++			interrupts = <11>;
++			interrupt-parent = <&qeic>;
+ 			mode = "slave";
+ 		};
+ 
+-		ucc at 2000 {
++		enet0: ucc at 2000 {
+ 			device_type = "network";
+ 			compatible = "ucc_geth";
+ 			model = "UCC";
++			cell-index = <1>;
+ 			device-id = <1>;
+-			reg = <2000 200>;
+-			interrupts = <20>;
+-			interrupt-parent = < &qeic >;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
++			reg = <0x2000 0x200>;
++			interrupts = <32>;
++			interrupt-parent = <&qeic>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			rx-clock = <0>;
+-			tx-clock = <19>;
+-			phy-handle = < &phy0 >;
++			rx-clock-name = "none";
++			tx-clock-name = "clk9";
++			phy-handle = <&phy0>;
+ 			phy-connection-type = "rgmii-id";
+-			pio-handle = < &pio1 >;
++			pio-handle = <&pio1>;
+ 		};
+ 
+-		ucc at 3000 {
++		enet1: ucc at 3000 {
+ 			device_type = "network";
+ 			compatible = "ucc_geth";
+ 			model = "UCC";
++			cell-index = <2>;
+ 			device-id = <2>;
+-			reg = <3000 200>;
+-			interrupts = <21>;
+-			interrupt-parent = < &qeic >;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
++			reg = <0x3000 0x200>;
++			interrupts = <33>;
++			interrupt-parent = <&qeic>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			rx-clock = <0>;
+-			tx-clock = <14>;
+-			phy-handle = < &phy1 >;
++			rx-clock-name = "none";
++			tx-clock-name = "clk4";
++			phy-handle = <&phy1>;
+ 			phy-connection-type = "rgmii-id";
+-			pio-handle = < &pio2 >;
++			pio-handle = <&pio2>;
+ 		};
+ 
+ 		mdio at 2120 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			reg = <2120 18>;
+-			device_type = "mdio";
+-			compatible = "ucc_geth_phy";
++			reg = <0x2120 0x18>;
++			compatible = "fsl,ucc-mdio";
+ 
+ 			phy0: ethernet-phy at 00 {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <11 8>;
+-				reg = <0>;
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x0>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 			phy1: ethernet-phy at 01 {
+-				interrupt-parent = < &ipic >;
+-				interrupts = <12 8>;
+-				reg = <1>;
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x1>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		qeic: qeic at 80 {
++		qeic: interrupt-controller at 80 {
+ 			interrupt-controller;
+-			device_type = "qeic";
++			compatible = "fsl,qe-ic";
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <1>;
+-			reg = <80 80>;
++			reg = <0x80 0x80>;
+ 			big-endian;
+-			interrupts = <20 8 21 8>; //high:32 low:33
+-			interrupt-parent = < &ipic >;
++			interrupts = <32 0x8 33 0x8>; // high:32 low:33
++			interrupt-parent = <&ipic>;
+ 		};
+ 	};
+ 
+-	pci at e0008500 {
+-		interrupt-map-mask = <f800 0 0 7>;
++	pci0: pci at e0008500 {
++		cell-index = <1>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ 		interrupt-map = <
+ 
+ 				/* IDSEL 0x11 AD17 */
+-				 8800 0 0 1 &ipic 14 8
+-				 8800 0 0 2 &ipic 15 8
+-				 8800 0 0 3 &ipic 16 8
+-				 8800 0 0 4 &ipic 17 8
++				 0x8800 0x0 0x0 0x1 &ipic 20 0x8
++				 0x8800 0x0 0x0 0x2 &ipic 21 0x8
++				 0x8800 0x0 0x0 0x3 &ipic 22 0x8
++				 0x8800 0x0 0x0 0x4 &ipic 23 0x8
+ 
+ 				/* IDSEL 0x12 AD18 */
+-				 9000 0 0 1 &ipic 16 8
+-				 9000 0 0 2 &ipic 17 8
+-				 9000 0 0 3 &ipic 14 8
+-				 9000 0 0 4 &ipic 15 8
++				 0x9000 0x0 0x0 0x1 &ipic 22 0x8
++				 0x9000 0x0 0x0 0x2 &ipic 23 0x8
++				 0x9000 0x0 0x0 0x3 &ipic 20 0x8
++				 0x9000 0x0 0x0 0x4 &ipic 21 0x8
+ 
+ 				/* IDSEL 0x13 AD19 */
+-				 9800 0 0 1 &ipic 17 8
+-				 9800 0 0 2 &ipic 14 8
+-				 9800 0 0 3 &ipic 15 8
+-				 9800 0 0 4 &ipic 16 8
++				 0x9800 0x0 0x0 0x1 &ipic 23 0x8
++				 0x9800 0x0 0x0 0x2 &ipic 20 0x8
++				 0x9800 0x0 0x0 0x3 &ipic 21 0x8
++				 0x9800 0x0 0x0 0x4 &ipic 22 0x8
+ 
+ 				/* IDSEL 0x15 AD21*/
+-				 a800 0 0 1 &ipic 14 8
+-				 a800 0 0 2 &ipic 15 8
+-				 a800 0 0 3 &ipic 16 8
+-				 a800 0 0 4 &ipic 17 8
++				 0xa800 0x0 0x0 0x1 &ipic 20 0x8
++				 0xa800 0x0 0x0 0x2 &ipic 21 0x8
++				 0xa800 0x0 0x0 0x3 &ipic 22 0x8
++				 0xa800 0x0 0x0 0x4 &ipic 23 0x8
+ 
+ 				/* IDSEL 0x16 AD22*/
+-				 b000 0 0 1 &ipic 17 8
+-				 b000 0 0 2 &ipic 14 8
+-				 b000 0 0 3 &ipic 15 8
+-				 b000 0 0 4 &ipic 16 8
++				 0xb000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xb000 0x0 0x0 0x2 &ipic 20 0x8
++				 0xb000 0x0 0x0 0x3 &ipic 21 0x8
++				 0xb000 0x0 0x0 0x4 &ipic 22 0x8
+ 
+ 				/* IDSEL 0x17 AD23*/
+-				 b800 0 0 1 &ipic 16 8
+-				 b800 0 0 2 &ipic 17 8
+-				 b800 0 0 3 &ipic 14 8
+-				 b800 0 0 4 &ipic 15 8
++				 0xb800 0x0 0x0 0x1 &ipic 22 0x8
++				 0xb800 0x0 0x0 0x2 &ipic 23 0x8
++				 0xb800 0x0 0x0 0x3 &ipic 20 0x8
++				 0xb800 0x0 0x0 0x4 &ipic 21 0x8
+ 
+ 				/* IDSEL 0x18 AD24*/
+-				 c000 0 0 1 &ipic 15 8
+-				 c000 0 0 2 &ipic 16 8
+-				 c000 0 0 3 &ipic 17 8
+-				 c000 0 0 4 &ipic 14 8>;
+-		interrupt-parent = < &ipic >;
+-		interrupts = <42 8>;
++				 0xc000 0x0 0x0 0x1 &ipic 21 0x8
++				 0xc000 0x0 0x0 0x2 &ipic 22 0x8
++				 0xc000 0x0 0x0 0x3 &ipic 23 0x8
++				 0xc000 0x0 0x0 0x4 &ipic 20 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
+ 		bus-range = <0 0>;
+-		ranges = <02000000 0 a0000000 a0000000 0 10000000
+-			  42000000 0 80000000 80000000 0 10000000
+-			  01000000 0 00000000 e2000000 0 00100000>;
+-		clock-frequency = <3f940aa>;
++		ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
++			  0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++		clock-frequency = <66666666>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008500 100>;
++		reg = <0xe0008500 0x100>;
+ 		compatible = "fsl,mpc8349-pci";
+ 		device_type = "pci";
+ 	};
+diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts
 new file mode 100644
-index 0000000..a8399b0
+index 0000000..a3637ff
 --- /dev/null
-+++ b/arch/sh/boot/compressed/head_32.S
-@@ -0,0 +1,120 @@
++++ b/arch/powerpc/boot/dts/mpc8377_mds.dts
+@@ -0,0 +1,280 @@
 +/*
-+ *  linux/arch/sh/boot/compressed/head.S
++ * MPC8377E MDS Device Tree Source
 + *
-+ *  Copyright (C) 1999 Stuart Menefy
-+ *  Copyright (C) 2003 SUGIOKA Toshinobu
++ * Copyright 2007 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
 + */
 +
-+.text
++/dts-v1/;
 +
-+#include <linux/linkage.h>
-+#include <asm/page.h>
++/ {
++	model = "fsl,mpc8377emds";
++	compatible = "fsl,mpc8377emds","fsl,mpc837xmds";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+	.global	startup
-+startup:
-+	/* Load initial status register */
-+	mov.l   init_sr, r1
-+	ldc     r1, sr
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8377 at 0 {
++			device_type = "cpu";
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
 +
-+	/* Move myself to proper location if necessary */
-+	mova	1f, r0
-+	mov.l	1f, r2
-+	cmp/eq	r2, r0
-+	bt	clear_bss
-+	sub	r0, r2
-+	mov.l	bss_start_addr, r0
-+	mov	#0xe0, r1
-+	and	r1, r0			! align cache line
-+	mov.l	text_start_addr, r3
-+	mov	r0, r1
-+	sub	r2, r1
-+3:
-+	mov.l	@r1, r4
-+	mov.l	@(4,r1), r5
-+	mov.l	@(8,r1), r6
-+	mov.l	@(12,r1), r7
-+	mov.l	@(16,r1), r8
-+	mov.l	@(20,r1), r9
-+	mov.l	@(24,r1), r10
-+	mov.l	@(28,r1), r11
-+	mov.l	r4, @r0
-+	mov.l	r5, @(4,r0)
-+	mov.l	r6, @(8,r0)
-+	mov.l	r7, @(12,r0)
-+	mov.l	r8, @(16,r0)
-+	mov.l	r9, @(20,r0)
-+	mov.l	r10, @(24,r0)
-+	mov.l	r11, @(28,r0)
-+#ifdef CONFIG_CPU_SH4
-+	ocbwb	@r0
-+#endif
-+	cmp/hi	r3, r0
-+	add	#-32, r0
-+	bt/s	3b
-+	 add	#-32, r1
-+	mov.l	2f, r0
-+	jmp	@r0
-+	 nop
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x20000000>;	// 512MB at 0
++	};
 +
-+	.align 2
-+1:	.long	1b
-+2:	.long	clear_bss
-+text_start_addr:
-+	.long	startup
++	soc at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <0>;
++
++		wdt at 200 {
++			compatible = "mpc83xx_wdt";
++			reg = <0x200 0x100>;
++		};
 +
-+	/* Clear BSS */
-+clear_bss:
-+	mov.l	end_addr, r1
-+	mov.l	bss_start_addr, r2
-+	mov	#0, r0
-+l1:
-+	mov.l	r0, @-r1
-+	cmp/eq	r1,r2
-+	bf	l1
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
 +
-+	/* Set the initial pointer. */
-+	mov.l	init_stack_addr, r0
-+	mov.l	@r0, r15
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
 +
-+	/* Decompress the kernel */
-+	mov.l	decompress_kernel_addr, r0
-+	jsr	@r0
-+	nop
++		spi at 7000 {
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
++			mode = "cpu";
++		};
 +
-+	/* Jump to the start of the decompressed kernel */
-+	mov.l	kernel_start_addr, r0
-+	jmp	@r0
-+	nop
-+	
-+	.align	2
-+bss_start_addr:
-+	.long	__bss_start
-+end_addr:
-+	.long	_end
-+init_sr:
-+	.long	0x400000F0	/* Privileged mode, Bank=0, Block=0, IMASK=0xF */
-+init_stack_addr:
-+	.long	stack_start
-+decompress_kernel_addr:
-+	.long	decompress_kernel
-+kernel_start_addr:
-+	.long	_text+PAGE_SIZE
++		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
++		usb at 23000 {
++			compatible = "fsl-usb2-dr";
++			reg = <0x23000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
++			phy_type = "utmi_wide";
++		};
 +
-+	.align	9
-+fake_headers_as_bzImage:
-+	.word	0
-+	.ascii	"HdrS"		! header signature
-+	.word	0x0202		! header version number (>= 0x0105)
-+				! or else old loadlin-1.5 will fail)
-+	.word	0		! default_switch
-+	.word	0		! SETUPSEG
-+	.word	0x1000
-+	.word	0		! pointing to kernel version string
-+	.byte	0		! = 0, old one (LILO, Loadlin,
-+				! 0xTV: T=0 for LILO
-+				!       V = version
-+	.byte	1		! Load flags bzImage=1
-+	.word	0x8000		! size to move, when setup is not
-+	.long	0x100000	! 0x100000 = default for big kernel
-+	.long	0		! address of loaded ramdisk image
-+	.long	0		# its size in bytes
-diff --git a/arch/sh/boot/compressed/head_64.S b/arch/sh/boot/compressed/head_64.S
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x3>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy3>;
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;
++			clock-frequency = <0>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;
++			clock-frequency = <0>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		crypto at 30000 {
++			model = "SEC3";
++			compatible = "talitos";
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
++			/* Rev. 3.0 geometry */
++			num-channels = <4>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x000001fe>;
++			descriptor-types-mask = <0x03ab0ebf>;
++		};
++
++		sdhc at 2e000 {
++			model = "eSDHC";
++			compatible = "fsl,esdhc";
++			reg = <0x2e000 0x1000>;
++			interrupts = <42 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		sata at 18000 {
++			compatible = "fsl,mpc8379-sata";
++			reg = <0x18000 0x1000>;
++			interrupts = <44 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		sata at 19000 {
++			compatible = "fsl,mpc8379-sata";
++			reg = <0x19000 0x1000>;
++			interrupts = <45 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		/* IPIC
++		 * interrupts cell = <intr #, sense>
++		 * sense values match linux IORESOURCE_IRQ_* defines:
++		 * sense == 8: Level, low assertion
++		 * sense == 2: Edge, high-to-low change
++		 */
++		ipic: pic at 700 {
++			compatible = "fsl,ipic";
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x700 0x100>;
++		};
++	};
++
++	pci0: pci at e0008500 {
++		cell-index = <0>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++
++				/* IDSEL 0x11 */
++				 0x8800 0x0 0x0 0x1 &ipic 20 0x8
++				 0x8800 0x0 0x0 0x2 &ipic 21 0x8
++				 0x8800 0x0 0x0 0x3 &ipic 22 0x8
++				 0x8800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL 0x12 */
++				 0x9000 0x0 0x0 0x1 &ipic 22 0x8
++				 0x9000 0x0 0x0 0x2 &ipic 23 0x8
++				 0x9000 0x0 0x0 0x3 &ipic 20 0x8
++				 0x9000 0x0 0x0 0x4 &ipic 21 0x8
++
++				/* IDSEL 0x13 */
++				 0x9800 0x0 0x0 0x1 &ipic 23 0x8
++				 0x9800 0x0 0x0 0x2 &ipic 20 0x8
++				 0x9800 0x0 0x0 0x3 &ipic 21 0x8
++				 0x9800 0x0 0x0 0x4 &ipic 22 0x8
++
++				/* IDSEL 0x15 */
++				 0xa800 0x0 0x0 0x1 &ipic 20 0x8
++				 0xa800 0x0 0x0 0x2 &ipic 21 0x8
++				 0xa800 0x0 0x0 0x3 &ipic 22 0x8
++				 0xa800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL 0x16 */
++				 0xb000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xb000 0x0 0x0 0x2 &ipic 20 0x8
++				 0xb000 0x0 0x0 0x3 &ipic 21 0x8
++				 0xb000 0x0 0x0 0x4 &ipic 22 0x8
++
++				/* IDSEL 0x17 */
++				 0xb800 0x0 0x0 0x1 &ipic 22 0x8
++				 0xb800 0x0 0x0 0x2 &ipic 23 0x8
++				 0xb800 0x0 0x0 0x3 &ipic 20 0x8
++				 0xb800 0x0 0x0 0x4 &ipic 21 0x8
++
++				/* IDSEL 0x18 */
++				 0xc000 0x0 0x0 0x1 &ipic 21 0x8
++				 0xc000 0x0 0x0 0x2 &ipic 22 0x8
++				 0xc000 0x0 0x0 0x3 &ipic 23 0x8
++				 0xc000 0x0 0x0 0x4 &ipic 20 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++		          0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++		          0x01000000 0x0 0x00000000 0xe0300000 0x0 0x00100000>;
++		clock-frequency = <0>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008500 0x100>;
++		compatible = "fsl,mpc8349-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
 new file mode 100644
-index 0000000..1d4ecbf
+index 0000000..440aa4d
 --- /dev/null
-+++ b/arch/sh/boot/compressed/head_64.S
-@@ -0,0 +1,163 @@
++++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
+@@ -0,0 +1,296 @@
 +/*
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * arch/shmedia/boot/compressed/head.S
++ * MPC8377E RDB Device Tree Source
 + *
-+ * Copied from
-+ *   arch/shmedia/kernel/head.S
-+ * which carried the copyright:
-+ *   Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright 2007, 2008 Freescale Semiconductor Inc.
 + *
-+ * Modification for compressed loader:
-+ *   Copyright (C) 2002 Stuart Menefy (stuart.menefy at st.com)
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
 + */
-+#include <linux/linkage.h>
-+#include <asm/cache.h>
-+#include <asm/cpu/mmu_context.h>
-+#include <asm/cpu/registers.h>
 +
++/dts-v1/;
++
++/ {
++	compatible = "fsl,mpc8377rdb";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8377 at 0 {
++			device_type = "cpu";
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;	// 256MB at 0
++	};
++
++	localbus at e0005000 {
++		#address-cells = <2>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8377-elbc", "fsl,elbc", "simple-bus";
++		reg = <0xe0005000 0x1000>;
++		interrupts = <77 0x8>;
++		interrupt-parent = <&ipic>;
++
++		// CS0 and CS1 are swapped when
++		// booting from nand, but the
++		// addresses are the same.
++		ranges = <0x0 0x0 0xfe000000 0x00800000
++		          0x1 0x0 0xe0600000 0x00008000
++		          0x2 0x0 0xf0000000 0x00020000
++		          0x3 0x0 0xfa000000 0x00008000>;
++
++		flash at 0,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "cfi-flash";
++			reg = <0x0 0x0 0x800000>;
++			bank-width = <2>;
++			device-width = <1>;
++		};
++
++		nand at 1,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8377-fcm-nand",
++			             "fsl,elbc-fcm-nand";
++			reg = <0x1 0x0 0x8000>;
++
++			u-boot at 0 {
++				reg = <0x0 0x100000>;
++				read-only;
++			};
++
++			kernel at 100000 {
++				reg = <0x100000 0x300000>;
++			};
++			fs at 400000 {
++				reg = <0x400000 0x1c00000>;
++			};
++		};
++	};
++
++	immr at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		compatible = "simple-bus";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <0>;
++
++		wdt at 200 {
++			device_type = "watchdog";
++			compatible = "mpc83xx_wdt";
++			reg = <0x200 0x100>;
++		};
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++			rtc at 68 {
++				device_type = "rtc";
++				compatible = "dallas,ds1339";
++				reg = <0x68>;
++			};
++		};
++
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
++
++		spi at 7000 {
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
++			mode = "cpu";
++		};
++
++		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
++		usb at 23000 {
++			compatible = "fsl-usb2-dr";
++			reg = <0x23000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
++			phy_type = "utmi";
++		};
++
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x3>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy3>;
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;
++			clock-frequency = <0>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;
++			clock-frequency = <0>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		crypto at 30000 {
++			model = "SEC3";
++			device_type = "crypto";
++			compatible = "talitos";
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
++			/* Rev. 3.0 geometry */
++			num-channels = <4>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x000001fe>;
++			descriptor-types-mask = <0x03ab0ebf>;
++		};
++
++		sata at 18000 {
++			compatible = "fsl,mpc8377-sata", "fsl,pq-sata";
++			reg = <0x18000 0x1000>;
++			interrupts = <44 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		sata at 19000 {
++			compatible = "fsl,mpc8377-sata", "fsl,pq-sata";
++			reg = <0x19000 0x1000>;
++			interrupts = <45 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		/* IPIC
++		 * interrupts cell = <intr #, sense>
++		 * sense values match linux IORESOURCE_IRQ_* defines:
++		 * sense == 8: Level, low assertion
++		 * sense == 2: Edge, high-to-low change
++		 */
++		ipic: interrupt-controller at 700 {
++			compatible = "fsl,ipic";
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x700 0x100>;
++		};
++	};
++
++	pci0: pci at e0008500 {
++		interrupt-map-mask = <0xf800 0 0 7>;
++		interrupt-map = <
++				/* IRQ5 = 21 = 0x15, IRQ6 = 0x16, IRQ7 = 23 = 0x17 */
++
++				/* IDSEL AD14 IRQ6 inta */
++				 0x7000 0x0 0x0 0x1 &ipic 22 0x8
++
++				/* IDSEL AD15 IRQ5 inta, IRQ6 intb, IRQ7 intd */
++				 0x7800 0x0 0x0 0x1 &ipic 21 0x8
++				 0x7800 0x0 0x0 0x2 &ipic 22 0x8
++				 0x7800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL AD28 IRQ7 inta, IRQ5 intb IRQ6 intc*/
++				 0xE000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xE000 0x0 0x0 0x2 &ipic 21 0x8
++				 0xE000 0x0 0x0 0x3 &ipic 22 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++		          0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++		          0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++		clock-frequency = <66666666>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008500 0x100>;
++		compatible = "fsl,mpc8349-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts
+new file mode 100644
+index 0000000..533e9b0
+--- /dev/null
++++ b/arch/powerpc/boot/dts/mpc8378_mds.dts
+@@ -0,0 +1,266 @@
 +/*
-+ * Fixed TLB entries to identity map the beginning of RAM
++ * MPC8378E MDS Device Tree Source
++ *
++ * Copyright 2007 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
 + */
-+#define MMUIR_TEXT_H	0x0000000000000003 | CONFIG_MEMORY_START
-+			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-+#define MMUIR_TEXT_L	0x000000000000009a | CONFIG_MEMORY_START
-+			/* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */
 +
-+#define MMUDR_CACHED_H	0x0000000000000003 | CONFIG_MEMORY_START
-+			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-+#define MMUDR_CACHED_L	0x000000000000015a | CONFIG_MEMORY_START
-+			/* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */
++/dts-v1/;
 +
-+#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
-+#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
++/ {
++	model = "fsl,mpc8378emds";
++	compatible = "fsl,mpc8378emds","fsl,mpc837xmds";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+#if 1
-+#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	/* OCE + OCI + WB */
-+#else
-+#define	OCCR0_INIT_VAL	OCCR0_OFF
-+#endif
-+#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			/* No locking */
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8378 at 0 {
++			device_type = "cpu";
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
 +
-+	.text
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x20000000>;	// 512MB at 0
++	};
 +
-+	.global	startup
-+startup:
-+	/*
-+	 * Prevent speculative fetch on device memory due to
-+	 * uninitialized target registers.
-+	 * This must be executed before the first branch.
-+	 */
-+	ptabs/u	r63, tr0
-+	ptabs/u	r63, tr1
-+	ptabs/u	r63, tr2
-+	ptabs/u	r63, tr3
-+	ptabs/u	r63, tr4
-+	ptabs/u	r63, tr5
-+	ptabs/u	r63, tr6
-+	ptabs/u	r63, tr7
-+	synci
++	soc at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <0>;
++
++		wdt at 200 {
++			compatible = "mpc83xx_wdt";
++			reg = <0x200 0x100>;
++		};
 +
-+	/*
-+	 * Set initial TLB entries for cached and uncached regions.
-+	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
-+	 */
-+	/* Clear ITLBs */
-+	pta	1f, tr1
-+	movi	ITLB_FIXED, r21
-+	movi	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
-+1:	putcfg	r21, 0, r63		/* Clear MMUIR[n].PTEH.V */
-+	addi	r21, TLB_STEP, r21
-+        bne	r21, r22, tr1
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
 +
-+	/* Clear DTLBs */
-+	pta	1f, tr1
-+	movi	DTLB_FIXED, r21
-+	movi	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
-+1:	putcfg	r21, 0, r63		/* Clear MMUDR[n].PTEH.V */
-+	addi	r21, TLB_STEP, r21
-+        bne	r21, r22, tr1
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
 +
-+	/* Map one big (512Mb) page for ITLB */
-+	movi	ITLB_FIXED, r21
-+	movi	MMUIR_TEXT_L, r22	/* PTEL first */
-+	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
-+	movi	MMUIR_TEXT_H, r22	/* PTEH last */
-+	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
++		spi at 7000 {
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
++			mode = "cpu";
++		};
 +
-+	/* Map one big CACHED (512Mb) page for DTLB */
-+	movi	DTLB_FIXED, r21
-+	movi	MMUDR_CACHED_L, r22	/* PTEL first */
-+	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
-+	movi	MMUDR_CACHED_H, r22	/* PTEH last */
-+	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
++		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
++		usb at 23000 {
++			compatible = "fsl-usb2-dr";
++			reg = <0x23000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
++			phy_type = "utmi_wide";
++		};
 +
-+	/* ICache */
-+	movi	ICCR_BASE, r21
-+	movi	ICCR0_INIT_VAL, r22
-+	movi	ICCR1_INIT_VAL, r23
-+	putcfg	r21, ICCR_REG0, r22
-+	putcfg	r21, ICCR_REG1, r23
-+	synci
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x3>;
++				device_type = "ethernet-phy";
++			};
++		};
 +
-+	/* OCache */
-+	movi	OCCR_BASE, r21
-+	movi	OCCR0_INIT_VAL, r22
-+	movi	OCCR1_INIT_VAL, r23
-+	putcfg	r21, OCCR_REG0, r22
-+	putcfg	r21, OCCR_REG1, r23
-+	synco
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy2>;
++		};
 +
-+	/*
-+	 * Enable the MMU.
-+	 * From here-on code can be non-PIC.
-+	 */
-+	movi	SR_HARMLESS | SR_ENABLE_MMU, r22
-+	putcon	r22, SSR
-+	movi	1f, r22
-+	putcon	r22, SPC
-+	synco
-+	rte				/* And now go into the hyperspace ... */
-+1:					/* ... that's the next instruction ! */
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy3>;
++		};
 +
-+	/* Set initial stack pointer */
-+	movi	datalabel stack_start, r0
-+	ld.l	r0, 0, r15
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;
++			clock-frequency = <0>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+	/*
-+	 * Clear bss
-+	 */
-+	pt	1f, tr1
-+	movi	datalabel __bss_start, r22
-+	movi	datalabel _end, r23
-+1:	st.l	r22, 0, r63
-+	addi	r22, 4, r22
-+	bne	r22, r23, tr1
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;
++			clock-frequency = <0>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+	/*
-+	 * Decompress the kernel.
-+	 */
-+	pt	decompress_kernel, tr0
-+	blink	tr0, r18
++		crypto at 30000 {
++			model = "SEC3";
++			compatible = "talitos";
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
++			/* Rev. 3.0 geometry */
++			num-channels = <4>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x000001fe>;
++			descriptor-types-mask = <0x03ab0ebf>;
++		};
 +
-+	/*
-+	 * Disable the MMU.
-+	 */
-+	movi	SR_HARMLESS, r22
-+	putcon	r22, SSR
-+	movi	1f, r22
-+	putcon	r22, SPC
-+	synco
-+	rte				/* And now go into the hyperspace ... */
-+1:					/* ... that's the next instruction ! */
++		sdhc at 2e000 {
++			model = "eSDHC";
++			compatible = "fsl,esdhc";
++			reg = <0x2e000 0x1000>;
++			interrupts = <42 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+	/* Jump into the decompressed kernel */
-+	movi	datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19
-+	ptabs	r19, tr0
-+	blink	tr0, r18
++		/* IPIC
++		 * interrupts cell = <intr #, sense>
++		 * sense values match linux IORESOURCE_IRQ_* defines:
++		 * sense == 8: Level, low assertion
++		 * sense == 2: Edge, high-to-low change
++		 */
++		ipic: pic at 700 {
++			compatible = "fsl,ipic";
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x700 0x100>;
++		};
++	};
 +
-+	/* Shouldn't return here, but just in case, loop forever */
-+	pt	1f, tr0
-+1:	blink	tr0, r63
-diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
-deleted file mode 100644
-index df65e30..0000000
---- a/arch/sh/boot/compressed/misc.c
-+++ /dev/null
-@@ -1,241 +0,0 @@
--/*
-- * arch/sh/boot/compressed/misc.c
-- *
-- * This is a collection of several routines from gzip-1.0.3
-- * adapted for Linux.
-- *
-- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
-- *
-- * Adapted for SH by Stuart Menefy, Aug 1999
-- *
-- * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000
-- */
--
--#include <asm/uaccess.h>
--#include <asm/addrspace.h>
--#include <asm/page.h>
--#ifdef CONFIG_SH_STANDARD_BIOS
--#include <asm/sh_bios.h>
--#endif
--
--/*
-- * gzip declarations
-- */
--
--#define OF(args)  args
--#define STATIC static
--
--#undef memset
--#undef memcpy
--#define memzero(s, n)     memset ((s), 0, (n))
--
--typedef unsigned char  uch;
--typedef unsigned short ush;
--typedef unsigned long  ulg;
--
--#define WSIZE 0x8000		/* Window size must be at least 32k, */
--				/* and a power of two */
--
--static uch *inbuf;	     /* input buffer */
--static uch window[WSIZE];    /* Sliding window buffer */
--
--static unsigned insize = 0;  /* valid bytes in inbuf */
--static unsigned inptr = 0;   /* index of next byte to be processed in inbuf */
--static unsigned outcnt = 0;  /* bytes in output buffer */
--
--/* gzip flag byte */
--#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
--#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
--#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
--#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
--#define COMMENT      0x10 /* bit 4 set: file comment present */
--#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
--#define RESERVED     0xC0 /* bit 6,7:   reserved */
--
--#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
--
--/* Diagnostic functions */
--#ifdef DEBUG
--#  define Assert(cond,msg) {if(!(cond)) error(msg);}
--#  define Trace(x) fprintf x
--#  define Tracev(x) {if (verbose) fprintf x ;}
--#  define Tracevv(x) {if (verbose>1) fprintf x ;}
--#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
--#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
--#else
--#  define Assert(cond,msg)
--#  define Trace(x)
--#  define Tracev(x)
--#  define Tracevv(x)
--#  define Tracec(c,x)
--#  define Tracecv(c,x)
--#endif
--
--static int  fill_inbuf(void);
--static void flush_window(void);
--static void error(char *m);
--static void gzip_mark(void **);
--static void gzip_release(void **);
--
--extern char input_data[];
--extern int input_len;
--
--static long bytes_out = 0;
--static uch *output_data;
--static unsigned long output_ptr = 0;
--
--static void *malloc(int size);
--static void free(void *where);
--static void error(char *m);
--static void gzip_mark(void **);
--static void gzip_release(void **);
--
--int puts(const char *);
--
--extern int _text;		/* Defined in vmlinux.lds.S */
--extern int _end;
--static unsigned long free_mem_ptr;
--static unsigned long free_mem_end_ptr;
--
--#define HEAP_SIZE             0x10000
--
--#include "../../../../lib/inflate.c"
--
--static void *malloc(int size)
--{
--	void *p;
--
--	if (size <0) error("Malloc error");
--	if (free_mem_ptr == 0) error("Memory error");
--
--	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
--
--	p = (void *)free_mem_ptr;
--	free_mem_ptr += size;
--
--	if (free_mem_ptr >= free_mem_end_ptr)
--		error("Out of memory");
--
--	return p;
--}
--
--static void free(void *where)
--{	/* Don't care */
--}
--
--static void gzip_mark(void **ptr)
--{
--	*ptr = (void *) free_mem_ptr;
--}
--
--static void gzip_release(void **ptr)
--{
--	free_mem_ptr = (long) *ptr;
--}
--
--#ifdef CONFIG_SH_STANDARD_BIOS
--size_t strlen(const char *s)
--{
--	int i = 0;
--
--	while (*s++)
--		i++;
--	return i;
--}
--
--int puts(const char *s)
--{
--	int len = strlen(s);
--	sh_bios_console_write(s, len);
--	return len;
--}
--#else
--int puts(const char *s)
--{
--	/* This should be updated to use the sh-sci routines */
--	return 0;
--}
--#endif
--
--void* memset(void* s, int c, size_t n)
--{
--	int i;
--	char *ss = (char*)s;
--
--	for (i=0;i<n;i++) ss[i] = c;
--	return s;
--}
--
--void* memcpy(void* __dest, __const void* __src,
--			    size_t __n)
--{
--	int i;
--	char *d = (char *)__dest, *s = (char *)__src;
--
--	for (i=0;i<__n;i++) d[i] = s[i];
--	return __dest;
--}
--
--/* ===========================================================================
-- * Fill the input buffer. This is called only when the buffer is empty
-- * and at least one byte is really needed.
-- */
--static int fill_inbuf(void)
--{
--	if (insize != 0) {
--		error("ran out of input data");
--	}
--
--	inbuf = input_data;
--	insize = input_len;
--	inptr = 1;
--	return inbuf[0];
--}
--
--/* ===========================================================================
-- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
-- * (Used for the decompressed data only.)
-- */
--static void flush_window(void)
--{
--    ulg c = crc;         /* temporary variable */
--    unsigned n;
--    uch *in, *out, ch;
--
--    in = window;
--    out = &output_data[output_ptr];
--    for (n = 0; n < outcnt; n++) {
--	    ch = *out++ = *in++;
--	    c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
--    }
--    crc = c;
--    bytes_out += (ulg)outcnt;
--    output_ptr += (ulg)outcnt;
--    outcnt = 0;
--}
--
--static void error(char *x)
--{
--	puts("\n\n");
--	puts(x);
--	puts("\n\n -- System halted");
--
--	while(1);	/* Halt */
--}
--
--#define STACK_SIZE (4096)
--long user_stack [STACK_SIZE];
--long* stack_start = &user_stack[STACK_SIZE];
--
--void decompress_kernel(void)
--{
--	output_data = 0;
--	output_ptr = P2SEGADDR((unsigned long)&_text+PAGE_SIZE);
--	free_mem_ptr = (unsigned long)&_end;
--	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
--
--	makecrc();
--	puts("Uncompressing Linux... ");
--	gunzip();
--	puts("Ok, booting the kernel.\n");
--}
-diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c
++	pci0: pci at e0008500 {
++		cell-index = <0>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++
++				/* IDSEL 0x11 */
++				 0x8800 0x0 0x0 0x1 &ipic 20 0x8
++				 0x8800 0x0 0x0 0x2 &ipic 21 0x8
++				 0x8800 0x0 0x0 0x3 &ipic 22 0x8
++				 0x8800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL 0x12 */
++				 0x9000 0x0 0x0 0x1 &ipic 22 0x8
++				 0x9000 0x0 0x0 0x2 &ipic 23 0x8
++				 0x9000 0x0 0x0 0x3 &ipic 20 0x8
++				 0x9000 0x0 0x0 0x4 &ipic 21 0x8
++
++				/* IDSEL 0x13 */
++				 0x9800 0x0 0x0 0x1 &ipic 23 0x8
++				 0x9800 0x0 0x0 0x2 &ipic 20 0x8
++				 0x9800 0x0 0x0 0x3 &ipic 21 0x8
++				 0x9800 0x0 0x0 0x4 &ipic 22 0x8
++
++				/* IDSEL 0x15 */
++				 0xa800 0x0 0x0 0x1 &ipic 20 0x8
++				 0xa800 0x0 0x0 0x2 &ipic 21 0x8
++				 0xa800 0x0 0x0 0x3 &ipic 22 0x8
++				 0xa800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL 0x16 */
++				 0xb000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xb000 0x0 0x0 0x2 &ipic 20 0x8
++				 0xb000 0x0 0x0 0x3 &ipic 21 0x8
++				 0xb000 0x0 0x0 0x4 &ipic 22 0x8
++
++				/* IDSEL 0x17 */
++				 0xb800 0x0 0x0 0x1 &ipic 22 0x8
++				 0xb800 0x0 0x0 0x2 &ipic 23 0x8
++				 0xb800 0x0 0x0 0x3 &ipic 20 0x8
++				 0xb800 0x0 0x0 0x4 &ipic 21 0x8
++
++				/* IDSEL 0x18 */
++				 0xc000 0x0 0x0 0x1 &ipic 21 0x8
++				 0xc000 0x0 0x0 0x2 &ipic 22 0x8
++				 0xc000 0x0 0x0 0x3 &ipic 23 0x8
++				 0xc000 0x0 0x0 0x4 &ipic 20 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++		          0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++		          0x01000000 0x0 0x00000000 0xe0300000 0x0 0x00100000>;
++		clock-frequency = <0>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008500 0x100>;
++		compatible = "fsl,mpc8349-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
 new file mode 100644
-index 0000000..adcea31
+index 0000000..9271153
 --- /dev/null
-+++ b/arch/sh/boot/compressed/misc_32.c
-@@ -0,0 +1,244 @@
++++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
+@@ -0,0 +1,282 @@
 +/*
-+ * arch/sh/boot/compressed/misc.c
++ * MPC8378E RDB Device Tree Source
 + *
-+ * This is a collection of several routines from gzip-1.0.3
-+ * adapted for Linux.
++ * Copyright 2007, 2008 Freescale Semiconductor Inc.
 + *
-+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/dts-v1/;
++
++/ {
++	compatible = "fsl,mpc8378rdb";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8378 at 0 {
++			device_type = "cpu";
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;	// 256MB at 0
++	};
++
++	localbus at e0005000 {
++		#address-cells = <2>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8378-elbc", "fsl,elbc", "simple-bus";
++		reg = <0xe0005000 0x1000>;
++		interrupts = <77 0x8>;
++		interrupt-parent = <&ipic>;
++
++		// CS0 and CS1 are swapped when
++		// booting from nand, but the
++		// addresses are the same.
++		ranges = <0x0 0x0 0xfe000000 0x00800000
++		          0x1 0x0 0xe0600000 0x00008000
++		          0x2 0x0 0xf0000000 0x00020000
++		          0x3 0x0 0xfa000000 0x00008000>;
++
++		flash at 0,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "cfi-flash";
++			reg = <0x0 0x0 0x800000>;
++			bank-width = <2>;
++			device-width = <1>;
++		};
++
++		nand at 1,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8378-fcm-nand",
++			             "fsl,elbc-fcm-nand";
++			reg = <0x1 0x0 0x8000>;
++
++			u-boot at 0 {
++				reg = <0x0 0x100000>;
++				read-only;
++			};
++
++			kernel at 100000 {
++				reg = <0x100000 0x300000>;
++			};
++			fs at 400000 {
++				reg = <0x400000 0x1c00000>;
++			};
++		};
++	};
++
++	immr at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		compatible = "simple-bus";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <0>;
++
++		wdt at 200 {
++			device_type = "watchdog";
++			compatible = "mpc83xx_wdt";
++			reg = <0x200 0x100>;
++		};
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++			rtc at 68 {
++				device_type = "rtc";
++				compatible = "dallas,ds1339";
++				reg = <0x68>;
++			};
++		};
++
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
++
++		spi at 7000 {
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
++			mode = "cpu";
++		};
++
++		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
++		usb at 23000 {
++			compatible = "fsl-usb2-dr";
++			reg = <0x23000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
++			phy_type = "utmi";
++		};
++
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x3>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy3>;
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;
++			clock-frequency = <0>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;
++			clock-frequency = <0>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		crypto at 30000 {
++			model = "SEC3";
++			device_type = "crypto";
++			compatible = "talitos";
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
++			/* Rev. 3.0 geometry */
++			num-channels = <4>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x000001fe>;
++			descriptor-types-mask = <0x03ab0ebf>;
++		};
++
++		/* IPIC
++		 * interrupts cell = <intr #, sense>
++		 * sense values match linux IORESOURCE_IRQ_* defines:
++		 * sense == 8: Level, low assertion
++		 * sense == 2: Edge, high-to-low change
++		 */
++		ipic: interrupt-controller at 700 {
++			compatible = "fsl,ipic";
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x700 0x100>;
++		};
++	};
++
++	pci0: pci at e0008500 {
++		interrupt-map-mask = <0xf800 0 0 7>;
++		interrupt-map = <
++				/* IRQ5 = 21 = 0x15, IRQ6 = 0x16, IRQ7 = 23 = 0x17 */
++
++				/* IDSEL AD14 IRQ6 inta */
++				 0x7000 0x0 0x0 0x1 &ipic 22 0x8
++
++				/* IDSEL AD15 IRQ5 inta, IRQ6 intb, IRQ7 intd */
++				 0x7800 0x0 0x0 0x1 &ipic 21 0x8
++				 0x7800 0x0 0x0 0x2 &ipic 22 0x8
++				 0x7800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL AD28 IRQ7 inta, IRQ5 intb IRQ6 intc*/
++				 0xE000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xE000 0x0 0x0 0x2 &ipic 21 0x8
++				 0xE000 0x0 0x0 0x3 &ipic 22 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++		          0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++		          0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++		clock-frequency = <66666666>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008500 0x100>;
++		compatible = "fsl,mpc8349-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts
+new file mode 100644
+index 0000000..c270685
+--- /dev/null
++++ b/arch/powerpc/boot/dts/mpc8379_mds.dts
+@@ -0,0 +1,294 @@
++/*
++ * MPC8379E MDS Device Tree Source
 + *
-+ * Adapted for SH by Stuart Menefy, Aug 1999
++ * Copyright 2007 Freescale Semiconductor Inc.
 + *
-+ * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
 + */
 +
-+#include <asm/uaccess.h>
-+#include <asm/addrspace.h>
-+#include <asm/page.h>
-+#ifdef CONFIG_SH_STANDARD_BIOS
-+#include <asm/sh_bios.h>
-+#endif
++/dts-v1/;
++
++/ {
++	model = "fsl,mpc8379emds";
++	compatible = "fsl,mpc8379emds","fsl,mpc837xmds";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8379 at 0 {
++			device_type = "cpu";
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x20000000>;	// 512MB at 0
++	};
++
++	soc at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <0>;
++
++		wdt at 200 {
++			compatible = "mpc83xx_wdt";
++			reg = <0x200 0x100>;
++		};
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
++
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
++
++		spi at 7000 {
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
++			mode = "cpu";
++		};
++
++		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
++		usb at 23000 {
++			compatible = "fsl-usb2-dr";
++			reg = <0x23000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
++			phy_type = "utmi_wide";
++		};
++
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x3>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy3>;
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;
++			clock-frequency = <0>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;
++			clock-frequency = <0>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		crypto at 30000 {
++			model = "SEC3";
++			compatible = "talitos";
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
++			/* Rev. 3.0 geometry */
++			num-channels = <4>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x000001fe>;
++			descriptor-types-mask = <0x03ab0ebf>;
++		};
++
++		sdhc at 2e000 {
++			model = "eSDHC";
++			compatible = "fsl,esdhc";
++			reg = <0x2e000 0x1000>;
++			interrupts = <42 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		sata at 18000 {
++			compatible = "fsl,mpc8379-sata";
++			reg = <0x18000 0x1000>;
++			interrupts = <44 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		sata at 19000 {
++			compatible = "fsl,mpc8379-sata";
++			reg = <0x19000 0x1000>;
++			interrupts = <45 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		sata at 1a000 {
++			compatible = "fsl,mpc8379-sata";
++			reg = <0x1a000 0x1000>;
++			interrupts = <46 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		sata at 1b000 {
++			compatible = "fsl,mpc8379-sata";
++			reg = <0x1b000 0x1000>;
++			interrupts = <47 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		/* IPIC
++		 * interrupts cell = <intr #, sense>
++		 * sense values match linux IORESOURCE_IRQ_* defines:
++		 * sense == 8: Level, low assertion
++		 * sense == 2: Edge, high-to-low change
++		 */
++		ipic: pic at 700 {
++			compatible = "fsl,ipic";
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x700 0x100>;
++		};
++	};
++
++	pci0: pci at e0008500 {
++		cell-index = <0>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++
++				/* IDSEL 0x11 */
++				 0x8800 0x0 0x0 0x1 &ipic 20 0x8
++				 0x8800 0x0 0x0 0x2 &ipic 21 0x8
++				 0x8800 0x0 0x0 0x3 &ipic 22 0x8
++				 0x8800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL 0x12 */
++				 0x9000 0x0 0x0 0x1 &ipic 22 0x8
++				 0x9000 0x0 0x0 0x2 &ipic 23 0x8
++				 0x9000 0x0 0x0 0x3 &ipic 20 0x8
++				 0x9000 0x0 0x0 0x4 &ipic 21 0x8
++
++				/* IDSEL 0x13 */
++				 0x9800 0x0 0x0 0x1 &ipic 23 0x8
++				 0x9800 0x0 0x0 0x2 &ipic 20 0x8
++				 0x9800 0x0 0x0 0x3 &ipic 21 0x8
++				 0x9800 0x0 0x0 0x4 &ipic 22 0x8
++
++				/* IDSEL 0x15 */
++				 0xa800 0x0 0x0 0x1 &ipic 20 0x8
++				 0xa800 0x0 0x0 0x2 &ipic 21 0x8
++				 0xa800 0x0 0x0 0x3 &ipic 22 0x8
++				 0xa800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL 0x16 */
++				 0xb000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xb000 0x0 0x0 0x2 &ipic 20 0x8
++				 0xb000 0x0 0x0 0x3 &ipic 21 0x8
++				 0xb000 0x0 0x0 0x4 &ipic 22 0x8
++
++				/* IDSEL 0x17 */
++				 0xb800 0x0 0x0 0x1 &ipic 22 0x8
++				 0xb800 0x0 0x0 0x2 &ipic 23 0x8
++				 0xb800 0x0 0x0 0x3 &ipic 20 0x8
++				 0xb800 0x0 0x0 0x4 &ipic 21 0x8
++
++				/* IDSEL 0x18 */
++				 0xc000 0x0 0x0 0x1 &ipic 21 0x8
++				 0xc000 0x0 0x0 0x2 &ipic 22 0x8
++				 0xc000 0x0 0x0 0x3 &ipic 23 0x8
++				 0xc000 0x0 0x0 0x4 &ipic 20 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++		          0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++		          0x01000000 0x0 0x00000000 0xe0300000 0x0 0x00100000>;
++		clock-frequency = <0>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008500 0x100>;
++		compatible = "fsl,mpc8349-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
+new file mode 100644
+index 0000000..0dda2fc
+--- /dev/null
++++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
+@@ -0,0 +1,310 @@
 +/*
-+ * gzip declarations
++ * MPC8379E RDB Device Tree Source
++ *
++ * Copyright 2007, 2008 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
 + */
 +
-+#define OF(args)  args
-+#define STATIC static
++/dts-v1/;
 +
-+#undef memset
-+#undef memcpy
-+#define memzero(s, n)     memset ((s), 0, (n))
++/ {
++	compatible = "fsl,mpc8379rdb";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+typedef unsigned char  uch;
-+typedef unsigned short ush;
-+typedef unsigned long  ulg;
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8379 at 0 {
++			device_type = "cpu";
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
 +
-+#define WSIZE 0x8000		/* Window size must be at least 32k, */
-+				/* and a power of two */
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;	// 256MB at 0
++	};
 +
-+static uch *inbuf;	     /* input buffer */
-+static uch window[WSIZE];    /* Sliding window buffer */
++	localbus at e0005000 {
++		#address-cells = <2>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8379-elbc", "fsl,elbc", "simple-bus";
++		reg = <0xe0005000 0x1000>;
++		interrupts = <77 0x8>;
++		interrupt-parent = <&ipic>;
++
++		// CS0 and CS1 are swapped when
++		// booting from nand, but the
++		// addresses are the same.
++		ranges = <0x0 0x0 0xfe000000 0x00800000
++		          0x1 0x0 0xe0600000 0x00008000
++		          0x2 0x0 0xf0000000 0x00020000
++		          0x3 0x0 0xfa000000 0x00008000>;
++
++		flash at 0,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "cfi-flash";
++			reg = <0x0 0x0 0x800000>;
++			bank-width = <2>;
++			device-width = <1>;
++		};
 +
-+static unsigned insize = 0;  /* valid bytes in inbuf */
-+static unsigned inptr = 0;   /* index of next byte to be processed in inbuf */
-+static unsigned outcnt = 0;  /* bytes in output buffer */
++		nand at 1,0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8379-fcm-nand",
++			             "fsl,elbc-fcm-nand";
++			reg = <0x1 0x0 0x8000>;
++
++			u-boot at 0 {
++				reg = <0x0 0x100000>;
++				read-only;
++			};
 +
-+/* gzip flag byte */
-+#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
-+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-+#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
-+#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
-+#define COMMENT      0x10 /* bit 4 set: file comment present */
-+#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
-+#define RESERVED     0xC0 /* bit 6,7:   reserved */
++			kernel at 100000 {
++				reg = <0x100000 0x300000>;
++			};
++			fs at 400000 {
++				reg = <0x400000 0x1c00000>;
++			};
++		};
++	};
 +
-+#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
++	immr at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		compatible = "simple-bus";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <0>;
++
++		wdt at 200 {
++			device_type = "watchdog";
++			compatible = "mpc83xx_wdt";
++			reg = <0x200 0x100>;
++		};
 +
-+/* Diagnostic functions */
-+#ifdef DEBUG
-+#  define Assert(cond,msg) {if(!(cond)) error(msg);}
-+#  define Trace(x) fprintf x
-+#  define Tracev(x) {if (verbose) fprintf x ;}
-+#  define Tracevv(x) {if (verbose>1) fprintf x ;}
-+#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-+#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-+#else
-+#  define Assert(cond,msg)
-+#  define Trace(x)
-+#  define Tracev(x)
-+#  define Tracevv(x)
-+#  define Tracec(c,x)
-+#  define Tracecv(c,x)
-+#endif
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++			rtc at 68 {
++				device_type = "rtc";
++				compatible = "dallas,ds1339";
++				reg = <0x68>;
++			};
++		};
 +
-+static int  fill_inbuf(void);
-+static void flush_window(void);
-+static void error(char *m);
-+static void gzip_mark(void **);
-+static void gzip_release(void **);
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
 +
-+extern char input_data[];
-+extern int input_len;
++		spi at 7000 {
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
++			mode = "cpu";
++		};
 +
-+static long bytes_out = 0;
-+static uch *output_data;
-+static unsigned long output_ptr = 0;
++		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
++		usb at 23000 {
++			compatible = "fsl-usb2-dr";
++			reg = <0x23000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
++			phy_type = "utmi";
++		};
 +
-+static void *malloc(int size);
-+static void free(void *where);
-+static void error(char *m);
-+static void gzip_mark(void **);
-+static void gzip_release(void **);
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&ipic>;
++				interrupts = <17 0x8>;
++				reg = <0x2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&ipic>;
++				interrupts = <18 0x8>;
++				reg = <0x3>;
++				device_type = "ethernet-phy";
++			};
++		};
 +
-+int puts(const char *);
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy2>;
++		};
 +
-+extern int _text;		/* Defined in vmlinux.lds.S */
-+extern int _end;
-+static unsigned long free_mem_ptr;
-+static unsigned long free_mem_end_ptr;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			phy-connection-type = "mii";
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy3>;
++		};
 +
-+#define HEAP_SIZE             0x10000
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;
++			clock-frequency = <0>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+#include "../../../../lib/inflate.c"
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;
++			clock-frequency = <0>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+static void *malloc(int size)
-+{
-+	void *p;
++		crypto at 30000 {
++			model = "SEC3";
++			device_type = "crypto";
++			compatible = "talitos";
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
++			/* Rev. 3.0 geometry */
++			num-channels = <4>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x000001fe>;
++			descriptor-types-mask = <0x03ab0ebf>;
++		};
 +
-+	if (size <0) error("Malloc error");
-+	if (free_mem_ptr == 0) error("Memory error");
++		sata at 18000 {
++			compatible = "fsl,mpc8379-sata", "fsl,pq-sata";
++			reg = <0x18000 0x1000>;
++			interrupts = <44 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
++		sata at 19000 {
++			compatible = "fsl,mpc8379-sata", "fsl,pq-sata";
++			reg = <0x19000 0x1000>;
++			interrupts = <45 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+	p = (void *)free_mem_ptr;
-+	free_mem_ptr += size;
++		sata at 1a000 {
++			compatible = "fsl,mpc8379-sata", "fsl,pq-sata";
++			reg = <0x1a000 0x1000>;
++			interrupts = <46 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+	if (free_mem_ptr >= free_mem_end_ptr)
-+		error("Out of memory");
++		sata at 1b000 {
++			compatible = "fsl,mpc8379-sata", "fsl,pq-sata";
++			reg = <0x1b000 0x1000>;
++			interrupts = <47 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+	return p;
-+}
++		/* IPIC
++		 * interrupts cell = <intr #, sense>
++		 * sense values match linux IORESOURCE_IRQ_* defines:
++		 * sense == 8: Level, low assertion
++		 * sense == 2: Edge, high-to-low change
++		 */
++		ipic: interrupt-controller at 700 {
++			compatible = "fsl,ipic";
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x700 0x100>;
++		};
++	};
 +
-+static void free(void *where)
-+{	/* Don't care */
-+}
++	pci0: pci at e0008500 {
++		interrupt-map-mask = <0xf800 0 0 7>;
++		interrupt-map = <
++				/* IRQ5 = 21 = 0x15, IRQ6 = 0x16, IRQ7 = 23 = 0x17 */
++
++				/* IDSEL AD14 IRQ6 inta */
++				 0x7000 0x0 0x0 0x1 &ipic 22 0x8
++
++				/* IDSEL AD15 IRQ5 inta, IRQ6 intb, IRQ7 intd */
++				 0x7800 0x0 0x0 0x1 &ipic 21 0x8
++				 0x7800 0x0 0x0 0x2 &ipic 22 0x8
++				 0x7800 0x0 0x0 0x4 &ipic 23 0x8
++
++				/* IDSEL AD28 IRQ7 inta, IRQ5 intb IRQ6 intc*/
++				 0xE000 0x0 0x0 0x1 &ipic 23 0x8
++				 0xE000 0x0 0x0 0x2 &ipic 21 0x8
++				 0xE000 0x0 0x0 0x3 &ipic 22 0x8>;
++		interrupt-parent = <&ipic>;
++		interrupts = <66 0x8>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++		          0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++		          0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++		clock-frequency = <66666666>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008500 0x100>;
++		compatible = "fsl,mpc8349-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
+index 6442a71..9752484 100644
+--- a/arch/powerpc/boot/dts/mpc8540ads.dts
++++ b/arch/powerpc/boot/dts/mpc8540ads.dts
+@@ -16,6 +16,15 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		ethernet2 = &enet2;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+static void gzip_mark(void **ptr)
-+{
-+	*ptr = (void *) free_mem_ptr;
-+}
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -63,7 +72,9 @@
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3000 100>;
+ 			interrupts = <2b 2>;
+@@ -74,9 +85,9 @@
+ 		mdio at 24520 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "gianfar";
++			compatible = "fsl,gianfar-mdio";
+ 			reg = <24520 20>;
++
+ 			phy0: ethernet-phy at 0 {
+ 				interrupt-parent = <&mpic>;
+ 				interrupts = <5 1>;
+@@ -97,64 +108,44 @@
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+ 			reg = <24000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <1d 2 1e 2 22 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy0>;
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+ 			reg = <25000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <23 2 24 2 28 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy1>;
+ 		};
+ 
+-		ethernet at 26000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet2: ethernet at 26000 {
++			cell-index = <2>;
+ 			device_type = "network";
+ 			model = "FEC";
+ 			compatible = "gianfar";
+ 			reg = <26000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <29 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy3>;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4500 100>; 	// reg base, size
+@@ -163,7 +154,8 @@
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4600 100>;	// reg base, size
+@@ -183,7 +175,8 @@
+ 		};
+ 	};
+ 
+-	pci at e0008000 {
++	pci0: pci at e0008000 {
++		cell-index = <0>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 
+diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
+index f3f4d79..fa8d9aa 100644
+--- a/arch/powerpc/boot/dts/mpc8541cds.dts
++++ b/arch/powerpc/boot/dts/mpc8541cds.dts
+@@ -16,6 +16,15 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++	};
 +
-+static void gzip_release(void **ptr)
-+{
-+	free_mem_ptr = (long) *ptr;
-+}
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -63,7 +72,9 @@
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3000 100>;
+ 			interrupts = <2b 2>;
+@@ -74,9 +85,9 @@
+ 		mdio at 24520 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "gianfar";
++			compatible = "fsl,gianfar-mdio";
+ 			reg = <24520 20>;
++
+ 			phy0: ethernet-phy at 0 {
+ 				interrupt-parent = <&mpic>;
+ 				interrupts = <5 1>;
+@@ -91,9 +102,8 @@
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+@@ -104,9 +114,8 @@
+ 			phy-handle = <&phy0>;
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+@@ -117,7 +126,8 @@
+ 			phy-handle = <&phy1>;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4500 100>; 	// reg base, size
+@@ -126,7 +136,8 @@
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4600 100>;	// reg base, size
+@@ -183,7 +194,8 @@
+ 		};
+ 	};
+ 
+-	pci1: pci at e0008000 {
++	pci0: pci at e0008000 {
++		cell-index = <0>;
+ 		interrupt-map-mask = <1f800 0 0 7>;
+ 		interrupt-map = <
+ 
+@@ -250,11 +262,12 @@
+ 			#interrupt-cells = <2>;
+ 			compatible = "chrp,iic";
+ 			interrupts = <1>;
+-			interrupt-parent = <&pci1>;
++			interrupt-parent = <&pci0>;
+ 		};
+ 	};
+ 
+-	pci at e0009000 {
++	pci1: pci at e0009000 {
++		cell-index = <1>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 
+diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
+index 6c608de..688af9d 100644
+--- a/arch/powerpc/boot/dts/mpc8544ds.dts
++++ b/arch/powerpc/boot/dts/mpc8544ds.dts
+@@ -15,6 +15,17 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++		pci2 = &pci2;
++		pci3 = &pci3;
++	};
 +
-+#ifdef CONFIG_SH_STANDARD_BIOS
-+size_t strlen(const char *s)
-+{
-+	int i = 0;
+ 	cpus {
+ 		#cpus = <1>;
+ 		#address-cells = <1>;
+@@ -64,7 +75,9 @@
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3000 100>;
+ 			interrupts = <2b 2>;
+@@ -72,12 +85,23 @@
+ 			dfsrr;
+ 		};
+ 
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <3100 100>;
++			interrupts = <2b 2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++		};
 +
-+	while (*s++)
-+		i++;
-+	return i;
-+}
+ 		mdio at 24520 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "gianfar";
++			compatible = "fsl,gianfar-mdio";
+ 			reg = <24520 20>;
++
+ 			phy0: ethernet-phy at 0 {
+ 				interrupt-parent = <&mpic>;
+ 				interrupts = <a 1>;
+@@ -92,9 +116,8 @@
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+@@ -106,9 +129,8 @@
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		ethernet at 26000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 26000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+@@ -120,7 +142,8 @@
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4500 100>;
+@@ -129,7 +152,8 @@
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4600 100>;
+@@ -156,7 +180,8 @@
+ 		};
+ 	};
+ 
+-	pci at e0008000 {
++	pci0: pci at e0008000 {
++		cell-index = <0>;
+ 		compatible = "fsl,mpc8540-pci";
+ 		device_type = "pci";
+ 		interrupt-map-mask = <f800 0 0 7>;
+@@ -187,7 +212,8 @@
+ 		reg = <e0008000 1000>;
+ 	};
+ 
+-	pcie at e0009000 {
++	pci1: pcie at e0009000 {
++		cell-index = <1>;
+ 		compatible = "fsl,mpc8548-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+@@ -223,7 +249,8 @@
+ 		};
+ 	};
+ 
+-	pcie at e000a000 {
++	pci2: pcie at e000a000 {
++		cell-index = <2>;
+ 		compatible = "fsl,mpc8548-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+@@ -259,7 +286,8 @@
+ 		};
+ 	};
+ 
+-	pcie at e000b000 {
++	pci3: pcie at e000b000 {
++		cell-index = <3>;
+ 		compatible = "fsl,mpc8548-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+@@ -276,9 +304,9 @@
+ 		interrupt-map = <
+ 			// IDSEL 0x1c  USB
+ 			e000 0 0 1 &i8259 c 2
+-			e100 0 0 1 &i8259 9 2
+-			e200 0 0 1 &i8259 a 2
+-			e300 0 0 1 &i8259 b 2
++			e100 0 0 2 &i8259 9 2
++			e200 0 0 3 &i8259 a 2
++			e300 0 0 4 &i8259 b 2
+ 
+ 			// IDSEL 0x1d  Audio
+ 			e800 0 0 1 &i8259 6 2
+@@ -369,6 +397,5 @@
+ 				};
+ 			};
+ 		};
+-
+ 	};
+ };
+diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
+index 69ca502..1f470c6 100644
+--- a/arch/powerpc/boot/dts/mpc8548cds.dts
++++ b/arch/powerpc/boot/dts/mpc8548cds.dts
+@@ -16,6 +16,20 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++/*
++		ethernet2 = &enet2;
++		ethernet3 = &enet3;
++*/
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++		pci2 = &pci2;
++	};
 +
-+int puts(const char *s)
-+{
-+	int len = strlen(s);
-+	sh_bios_console_write(s, len);
-+	return len;
-+}
-+#else
-+int puts(const char *s)
-+{
-+	/* This should be updated to use the sh-sci routines */
-+	return 0;
-+}
-+#endif
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -63,7 +77,9 @@
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3000 100>;
+ 			interrupts = <2b 2>;
+@@ -71,12 +87,23 @@
+ 			dfsrr;
+ 		};
+ 
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <3100 100>;
++			interrupts = <2b 2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++		};
 +
-+void* memset(void* s, int c, size_t n)
-+{
-+	int i;
-+	char *ss = (char*)s;
+ 		mdio at 24520 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "gianfar";
++			compatible = "fsl,gianfar-mdio";
+ 			reg = <24520 20>;
++
+ 			phy0: ethernet-phy at 0 {
+ 				interrupt-parent = <&mpic>;
+ 				interrupts = <5 1>;
+@@ -103,9 +130,8 @@
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+@@ -116,9 +142,8 @@
+ 			phy-handle = <&phy0>;
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+@@ -130,9 +155,8 @@
+ 		};
+ 
+ /* eTSEC 3/4 are currently broken
+-		ethernet at 26000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet2: ethernet at 26000 {
++			cell-index = <2>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+@@ -143,9 +167,8 @@
+ 			phy-handle = <&phy2>;
+ 		};
+ 
+-		ethernet at 27000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet3: ethernet at 27000 {
++			cell-index = <3>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+@@ -157,7 +180,8 @@
+ 		};
+  */
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4500 100>;	// reg base, size
+@@ -166,7 +190,8 @@
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4600 100>;	// reg base, size
+@@ -193,7 +218,8 @@
+ 		};
+ 	};
+ 
+-	pci at e0008000 {
++	pci0: pci at e0008000 {
++		cell-index = <0>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 			/* IDSEL 0x4 (PCIX Slot 2) */
+@@ -342,7 +368,8 @@
+ 		};
+ 	};
+ 
+-	pci at e0009000 {
++	pci1: pci at e0009000 {
++		cell-index = <1>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 
+@@ -366,7 +393,8 @@
+ 		device_type = "pci";
+ 	};
+ 
+-	pcie at e000a000 {
++	pci2: pcie at e000a000 {
++		cell-index = <2>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 
+diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
+index 57029cc..4538f3c 100644
+--- a/arch/powerpc/boot/dts/mpc8555cds.dts
++++ b/arch/powerpc/boot/dts/mpc8555cds.dts
+@@ -16,6 +16,15 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++	};
 +
-+	for (i=0;i<n;i++) ss[i] = c;
-+	return s;
-+}
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -63,7 +72,9 @@
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3000 100>;
+ 			interrupts = <2b 2>;
+@@ -74,9 +85,9 @@
+ 		mdio at 24520 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "gianfar";
++			compatible = "fsl,gianfar-mdio";
+ 			reg = <24520 20>;
++
+ 			phy0: ethernet-phy at 0 {
+ 				interrupt-parent = <&mpic>;
+ 				interrupts = <5 1>;
+@@ -91,9 +102,8 @@
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+@@ -104,9 +114,8 @@
+ 			phy-handle = <&phy0>;
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+@@ -117,7 +126,8 @@
+ 			phy-handle = <&phy1>;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4500 100>; 	// reg base, size
+@@ -126,7 +136,8 @@
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4600 100>;	// reg base, size
+@@ -183,7 +194,8 @@
+ 		};
+ 	};
+ 
+-	pci1: pci at e0008000 {
++	pci0: pci at e0008000 {
++		cell-index = <0>;
+ 		interrupt-map-mask = <1f800 0 0 7>;
+ 		interrupt-map = <
+ 
+@@ -250,11 +262,12 @@
+ 			#interrupt-cells = <2>;
+ 			compatible = "chrp,iic";
+ 			interrupts = <1>;
+-			interrupt-parent = <&pci1>;
++			interrupt-parent = <&pci0>;
+ 		};
+ 	};
+ 
+-	pci at e0009000 {
++	pci1: pci at e0009000 {
++		cell-index = <1>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 
+diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts
+index 6b362f8..639ce8a 100644
+--- a/arch/powerpc/boot/dts/mpc8560ads.dts
++++ b/arch/powerpc/boot/dts/mpc8560ads.dts
+@@ -16,6 +16,16 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		ethernet2 = &enet2;
++		ethernet3 = &enet3;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+void* memcpy(void* __dest, __const void* __src,
-+			    size_t __n)
-+{
-+	int i;
-+	char *d = (char *)__dest, *s = (char *)__src;
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -63,11 +73,11 @@
+ 		};
+ 
+ 		mdio at 24520 {
+-			device_type = "mdio";
+-			compatible = "gianfar";
+-			reg = <24520 20>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <24520 20>;
++
+ 			phy0: ethernet-phy at 0 {
+ 				interrupt-parent = <&mpic>;
+ 				interrupts = <5 1>;
+@@ -94,36 +104,24 @@
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+ 			reg = <24000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <1d 2 1e 2 22 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy0>;
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+ 			reg = <25000 1000>;
+-			/*
+-			 * address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <23 2 24 2 28 2>;
+ 			interrupt-parent = <&mpic>;
+@@ -174,7 +172,7 @@
+ 				compatible = "fsl,mpc8560-cpm-pic", "fsl,cpm2-pic";
+ 			};
+ 
+-			serial at 91a00 {
++			serial0: serial at 91a00 {
+ 				device_type = "serial";
+ 				compatible = "fsl,mpc8560-scc-uart",
+ 				             "fsl,cpm2-scc-uart";
+@@ -186,7 +184,7 @@
+ 				interrupt-parent = <&cpmpic>;
+ 			};
+ 
+-			serial at 91a20 {
++			serial1: serial at 91a20 {
+ 				device_type = "serial";
+ 				compatible = "fsl,mpc8560-scc-uart",
+ 				             "fsl,cpm2-scc-uart";
+@@ -198,17 +196,11 @@
+ 				interrupt-parent = <&cpmpic>;
+ 			};
+ 
+-			ethernet at 91320 {
++			enet2: ethernet at 91320 {
+ 				device_type = "network";
+ 				compatible = "fsl,mpc8560-fcc-enet",
+ 				             "fsl,cpm2-fcc-enet";
+ 				reg = <91320 20 88500 100 913b0 1>;
+-				/*
+-				 * mac-address is deprecated and will be removed
+-				 * in 2.6.25.  Only recent versions of
+-				 * U-Boot support local-mac-address, however.
+-				 */
+-				mac-address = [ 00 00 00 00 00 00 ];
+ 				local-mac-address = [ 00 00 00 00 00 00 ];
+ 				fsl,cpm-command = <16200300>;
+ 				interrupts = <21 8>;
+@@ -216,17 +208,11 @@
+ 				phy-handle = <&phy2>;
+ 			};
+ 
+-			ethernet at 91340 {
++			enet3: ethernet at 91340 {
+ 				device_type = "network";
+ 				compatible = "fsl,mpc8560-fcc-enet",
+ 				             "fsl,cpm2-fcc-enet";
+ 				reg = <91340 20 88600 100 913d0 1>;
+-				/*
+-				 * mac-address is deprecated and will be removed
+-				 * in 2.6.25.  Only recent versions of
+-				 * U-Boot support local-mac-address, however.
+-				 */
+-				mac-address = [ 00 00 00 00 00 00 ];
+ 				local-mac-address = [ 00 00 00 00 00 00 ];
+ 				fsl,cpm-command = <1a400300>;
+ 				interrupts = <22 8>;
+@@ -236,7 +222,8 @@
+ 		};
+ 	};
+ 
+-	pci at e0008000 {
++	pci0: pci at e0008000 {
++		cell-index = <0>;
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
+index 5439437..97bc048 100644
+--- a/arch/powerpc/boot/dts/mpc8568mds.dts
++++ b/arch/powerpc/boot/dts/mpc8568mds.dts
+@@ -20,6 +20,17 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		ethernet2 = &enet2;
++		ethernet3 = &enet3;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++	};
 +
-+	for (i=0;i<__n;i++) d[i] = s[i];
-+	return __dest;
-+}
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -74,7 +85,7 @@
+ 		i2c at 3000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3000 100>;
+ 			interrupts = <2b 2>;
+@@ -90,7 +101,7 @@
+ 		i2c at 3100 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "i2c";
++			cell-index = <1>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3100 100>;
+ 			interrupts = <2b 2>;
+@@ -101,9 +112,9 @@
+ 		mdio at 24520 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "gianfar";
++			compatible = "fsl,gianfar-mdio";
+ 			reg = <24520 20>;
++
+ 			phy0: ethernet-phy at 7 {
+ 				interrupt-parent = <&mpic>;
+ 				interrupts = <1 1>;
+@@ -130,45 +141,32 @@
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+ 			reg = <24000 1000>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+  			interrupts = <1d 2 1e 2 22 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy2>;
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+ 			reg = <25000 1000>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+  			interrupts = <23 2 24 2 28 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy3>;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4500 100>;
+@@ -183,7 +181,8 @@
+ 			fsl,has-rstcr;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4600 100>;
+@@ -285,24 +284,28 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "qe";
+-		model = "QE";
++		compatible = "fsl,qe";
+ 		ranges = <0 e0080000 00040000>;
+ 		reg = <e0080000 480>;
+ 		brg-frequency = <0>;
+ 		bus-frequency = <179A7B00>;
+ 
+ 		muram at 10000 {
+-			device_type = "muram";
++ 			#address-cells = <1>;
++ 			#size-cells = <1>;
++			compatible = "fsl,qe-muram", "fsl,cpm-muram";
+ 			ranges = <0 00010000 0000c000>;
+ 
+-			data-only at 0{
++			data-only at 0 {
++				compatible = "fsl,qe-muram-data",
++					     "fsl,cpm-muram-data";
+ 				reg = <0 c000>;
+ 			};
+ 		};
+ 
+ 		spi at 4c0 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
++			cell-index = <0>;
++			compatible = "fsl,spi";
+ 			reg = <4c0 40>;
+ 			interrupts = <2>;
+ 			interrupt-parent = <&qeic>;
+@@ -310,53 +313,43 @@
+ 		};
+ 
+ 		spi at 500 {
+-			device_type = "spi";
+-			compatible = "fsl_spi";
++			cell-index = <1>;
++			compatible = "fsl,spi";
+ 			reg = <500 40>;
+ 			interrupts = <1>;
+ 			interrupt-parent = <&qeic>;
+ 			mode = "cpu";
+ 		};
+ 
+-		ucc at 2000 {
++		enet2: ucc at 2000 {
+ 			device_type = "network";
+ 			compatible = "ucc_geth";
+ 			model = "UCC";
++			cell-index = <1>;
+ 			device-id = <1>;
+ 			reg = <2000 200>;
+ 			interrupts = <20>;
+ 			interrupt-parent = <&qeic>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			rx-clock = <0>;
+-			tx-clock = <20>;
++			rx-clock-name = "none";
++			tx-clock-name = "clk16";
+ 			pio-handle = <&pio1>;
+ 			phy-handle = <&phy0>;
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		ucc at 3000 {
++		enet3: ucc at 3000 {
+ 			device_type = "network";
+ 			compatible = "ucc_geth";
+ 			model = "UCC";
++			cell-index = <2>;
+ 			device-id = <2>;
+ 			reg = <3000 200>;
+ 			interrupts = <21>;
+ 			interrupt-parent = <&qeic>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			rx-clock = <0>;
+-			tx-clock = <20>;
++			rx-clock-name = "none";
++			tx-clock-name = "clk16";
+ 			pio-handle = <&pio2>;
+ 			phy-handle = <&phy1>;
+ 			phy-connection-type = "rgmii-id";
+@@ -366,8 +359,7 @@
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			reg = <2120 18>;
+-			device_type = "mdio";
+-			compatible = "ucc_geth_phy";
++			compatible = "fsl,ucc-mdio";
+ 
+ 			/* These are the same PHYs as on
+ 			 * gianfar's MDIO bus */
+@@ -397,9 +389,9 @@
+ 			};
+ 		};
+ 
+-		qeic: qeic at 80 {
++		qeic: interrupt-controller at 80 {
+ 			interrupt-controller;
+-			device_type = "qeic";
++			compatible = "fsl,qe-ic";
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <1>;
+ 			reg = <80 80>;
+@@ -410,7 +402,8 @@
+ 
+ 	};
+ 
+-	pci at e0008000 {
++	pci0: pci at e0008000 {
++		cell-index = <0>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 			/* IDSEL 0x12 AD18 */
+@@ -440,7 +433,8 @@
+ 	};
+ 
+ 	/* PCI Express */
+-	pcie at e000a000 {
++	pci1: pcie at e000a000 {
++		cell-index = <2>;
+ 		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 
+diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
+index 0eb44fb..813c259 100644
+--- a/arch/powerpc/boot/dts/mpc8572ds.dts
++++ b/arch/powerpc/boot/dts/mpc8572ds.dts
+@@ -15,6 +15,18 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		ethernet2 = &enet2;
++		ethernet3 = &enet3;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++		pci2 = &pci2;
++	};
 +
-+/* ===========================================================================
-+ * Fill the input buffer. This is called only when the buffer is empty
-+ * and at least one byte is really needed.
-+ */
-+static int fill_inbuf(void)
-+{
-+	if (insize != 0) {
-+		error("ran out of input data");
-+	}
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -69,7 +81,9 @@
+ 		};
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3000 100>;
+ 			interrupts = <2b 2>;
+@@ -78,7 +92,9 @@
+ 		};
+ 
+ 		i2c at 3100 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
+ 			compatible = "fsl-i2c";
+ 			reg = <3100 100>;
+ 			interrupts = <2b 2>;
+@@ -89,9 +105,9 @@
+ 		mdio at 24520 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "gianfar";
++			compatible = "fsl,gianfar-mdio";
+ 			reg = <24520 20>;
++
+ 			phy0: ethernet-phy at 0 {
+ 				interrupt-parent = <&mpic>;
+ 				interrupts = <a 1>;
+@@ -114,9 +130,8 @@
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+@@ -128,9 +143,8 @@
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+@@ -142,9 +156,8 @@
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		ethernet at 26000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet2: ethernet at 26000 {
++			cell-index = <2>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+@@ -156,9 +169,8 @@
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		ethernet at 27000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet3: ethernet at 27000 {
++			cell-index = <3>;
+ 			device_type = "network";
+ 			model = "eTSEC";
+ 			compatible = "gianfar";
+@@ -170,7 +182,8 @@
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4500 100>;
+@@ -179,7 +192,8 @@
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+ 			reg = <4600 100>;
+@@ -206,7 +220,8 @@
+ 		};
+ 	};
+ 
+-	pcie at ffe08000 {
++	pci0: pcie at ffe08000 {
++		cell-index = <0>;
+ 		compatible = "fsl,mpc8548-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+@@ -319,9 +334,9 @@
+ 
+ 			// IDSEL 0x1c  USB
+ 			e000 0 0 1 &i8259 c 2
+-			e100 0 0 1 &i8259 9 2
+-			e200 0 0 1 &i8259 a 2
+-			e300 0 0 1 &i8259 b 2
++			e100 0 0 2 &i8259 9 2
++			e200 0 0 3 &i8259 a 2
++			e300 0 0 4 &i8259 b 2
+ 
+ 			// IDSEL 0x1d  Audio
+ 			e800 0 0 1 &i8259 6 2
+@@ -415,7 +430,8 @@
+ 
+ 	};
+ 
+-	pcie at ffe09000 {
++	pci1: pcie at ffe09000 {
++		cell-index = <1>;
+ 		compatible = "fsl,mpc8548-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+@@ -451,7 +467,8 @@
+ 		};
+ 	};
+ 
+-	pcie at ffe0a000 {
++	pci2: pcie at ffe0a000 {
++		cell-index = <2>;
+ 		compatible = "fsl,mpc8548-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+@@ -464,6 +481,7 @@
+ 		clock-frequency = <1fca055>;
+ 		interrupt-parent = <&mpic>;
+ 		interrupts = <1b 2>;
++		interrupt-map-mask = <f800 0 0 7>;
+ 		interrupt-map = <
+ 			/* IDSEL 0x0 */
+ 			0000 0 0 1 &mpic 0 1
+diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+index 966edf1..16c947b 100644
+--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
++++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+@@ -1,13 +1,14 @@
+ /*
+  * MPC8610 HPCD Device Tree Source
+  *
+- * Copyright 2007 Freescale Semiconductor Inc.
++ * Copyright 2007-2008 Freescale Semiconductor Inc.
+  *
+  * This program is free software; you can redistribute  it and/or modify it
+  * under the terms of the GNU General Public License Version 2 as published
+  * by the Free Software Foundation.
+  */
+ 
++/dts-v1/;
+ 
+ / {
+ 	model = "MPC8610HPCD";
+@@ -15,6 +16,13 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++	};
 +
-+	inbuf = input_data;
-+	insize = input_len;
-+	inptr = 1;
-+	return inbuf[0];
-+}
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -22,11 +30,11 @@
+ 		PowerPC,8610 at 0 {
+ 			device_type = "cpu";
+ 			reg = <0>;
+-			d-cache-line-size = <d# 32>;	// bytes
+-			i-cache-line-size = <d# 32>;	// bytes
+-			d-cache-size = <8000>;		// L1, 32K
+-			i-cache-size = <8000>;		// L1, 32K
+-			timebase-frequency = <0>;	// 33 MHz, from uboot
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;		// L1
++			i-cache-size = <32768>;		// L1
++			timebase-frequency = <0>;	// From uboot
+ 			bus-frequency = <0>;		// From uboot
+ 			clock-frequency = <0>;		// From uboot
+ 		};
+@@ -34,7 +42,7 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 20000000>;	// 512M at 0x0
++		reg = <0x00000000 0x20000000>;	// 512M at 0x0
+ 	};
+ 
+ 	soc at e0000000 {
+@@ -42,57 +50,66 @@
+ 		#size-cells = <1>;
+ 		#interrupt-cells = <2>;
+ 		device_type = "soc";
+-		ranges = <0 e0000000 00100000>;
+-		reg = <e0000000 1000>;
++		compatible = "fsl,mpc8610-immr", "simple-bus";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x1000>;
+ 		bus-frequency = <0>;
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
+-			compatible = "fsl-i2c";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			reg = <3000 100>;
+-			interrupts = <2b 2>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <43 2>;
+ 			interrupt-parent = <&mpic>;
+ 			dfsrr;
++
++			cs4270:codec at 4f {
++				compatible = "cirrus,cs4270";
++				reg = <0x4f>;
++				/* MCLK source is a stand-alone oscillator */
++				clock-frequency = <12288000>;
++			};
+ 		};
+ 
+ 		i2c at 3100 {
+-			device_type = "i2c";
+-			compatible = "fsl-i2c";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			reg = <3100 100>;
+-			interrupts = <2b 2>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <43 2>;
+ 			interrupt-parent = <&mpic>;
+ 			dfsrr;
+ 		};
+ 
+-		serial at 4500 {
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
++			reg = <0x4500 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <2a 2>;
++			interrupts = <42 2>;
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
++			reg = <0x4600 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <1c 2>;
++			interrupts = <28 2>;
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-
+ 		mpic: interrupt-controller at 40000 {
+ 			clock-frequency = <0>;
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <40000 40000>;
++			reg = <0x40000 0x40000>;
+ 			compatible = "chrp,open-pic";
+ 			device_type = "open-pic";
+ 			big-endian;
+@@ -100,68 +117,173 @@
+ 
+ 		global-utilities at e0000 {
+ 			compatible = "fsl,mpc8610-guts";
+-			reg = <e0000 1000>;
++			reg = <0xe0000 0x1000>;
+ 			fsl,has-rstcr;
+ 		};
 +
-+/* ===========================================================================
-+ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
-+ * (Used for the decompressed data only.)
-+ */
-+static void flush_window(void)
-+{
-+    ulg c = crc;         /* temporary variable */
-+    unsigned n;
-+    uch *in, *out, ch;
++		i2s at 16000 {
++			compatible = "fsl,mpc8610-ssi";
++			cell-index = <0>;
++			reg = <0x16000 0x100>;
++			interrupt-parent = <&mpic>;
++			interrupts = <62 2>;
++			fsl,mode = "i2s-slave";
++			codec-handle = <&cs4270>;
++		};
 +
-+    in = window;
-+    out = &output_data[output_ptr];
-+    for (n = 0; n < outcnt; n++) {
-+	    ch = *out++ = *in++;
-+	    c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-+    }
-+    crc = c;
-+    bytes_out += (ulg)outcnt;
-+    output_ptr += (ulg)outcnt;
-+    outcnt = 0;
-+}
++		ssi at 16100 {
++			compatible = "fsl,mpc8610-ssi";
++			cell-index = <1>;
++			reg = <0x16100 0x100>;
++			interrupt-parent = <&mpic>;
++			interrupts = <63 2>;
++		};
 +
-+static void error(char *x)
-+{
-+	puts("\n\n");
-+	puts(x);
-+	puts("\n\n -- System halted");
++		dma at 21300 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8610-dma", "fsl,eloplus-dma";
++			cell-index = <0>;
++			reg = <0x21300 0x4>; /* DMA general status register */
++			ranges = <0x0 0x21100 0x200>;
++
++			dma-channel at 0 {
++				compatible = "fsl,mpc8610-dma-channel",
++					"fsl,eloplus-dma-channel";
++				cell-index = <0>;
++				reg = <0x0 0x80>;
++				interrupt-parent = <&mpic>;
++				interrupts = <20 2>;
++			};
++			dma-channel at 1 {
++				compatible = "fsl,mpc8610-dma-channel",
++					"fsl,eloplus-dma-channel";
++				cell-index = <1>;
++				reg = <0x80 0x80>;
++				interrupt-parent = <&mpic>;
++				interrupts = <21 2>;
++			};
++			dma-channel at 2 {
++				compatible = "fsl,mpc8610-dma-channel",
++					"fsl,eloplus-dma-channel";
++				cell-index = <2>;
++				reg = <0x100 0x80>;
++				interrupt-parent = <&mpic>;
++				interrupts = <22 2>;
++			};
++			dma-channel at 3 {
++				compatible = "fsl,mpc8610-dma-channel",
++					"fsl,eloplus-dma-channel";
++				cell-index = <3>;
++				reg = <0x180 0x80>;
++				interrupt-parent = <&mpic>;
++				interrupts = <23 2>;
++			};
++		};
 +
-+	while(1);	/* Halt */
-+}
++		dma at c300 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8610-dma", "fsl,mpc8540-dma";
++			cell-index = <1>;
++			reg = <0xc300 0x4>; /* DMA general status register */
++			ranges = <0x0 0xc100 0x200>;
++
++			dma-channel at 0 {
++				compatible = "fsl,mpc8610-dma-channel",
++					"fsl,mpc8540-dma-channel";
++				cell-index = <0>;
++				reg = <0x0 0x80>;
++				interrupt-parent = <&mpic>;
++				interrupts = <60 2>;
++			};
++			dma-channel at 1 {
++				compatible = "fsl,mpc8610-dma-channel",
++					"fsl,mpc8540-dma-channel";
++				cell-index = <1>;
++				reg = <0x80 0x80>;
++				interrupt-parent = <&mpic>;
++				interrupts = <61 2>;
++			};
++			dma-channel at 2 {
++				compatible = "fsl,mpc8610-dma-channel",
++					"fsl,mpc8540-dma-channel";
++				cell-index = <2>;
++				reg = <0x100 0x80>;
++				interrupt-parent = <&mpic>;
++				interrupts = <62 2>;
++			};
++			dma-channel at 3 {
++				compatible = "fsl,mpc8610-dma-channel",
++					"fsl,mpc8540-dma-channel";
++				cell-index = <3>;
++				reg = <0x180 0x80>;
++				interrupt-parent = <&mpic>;
++				interrupts = <63 2>;
++			};
++		};
 +
-+#define STACK_SIZE (4096)
-+long user_stack [STACK_SIZE];
-+long* stack_start = &user_stack[STACK_SIZE];
+ 	};
+ 
+-	pci at e0008000 {
++	pci0: pci at e0008000 {
++		cell-index = <0>;
+ 		compatible = "fsl,mpc8610-pci";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e0008000 1000>;
++		reg = <0xe0008000 0x1000>;
+ 		bus-range = <0 0>;
+-		ranges = <02000000 0 80000000 80000000 0 10000000
+-			  01000000 0 00000000 e1000000 0 00100000>;
+-		clock-frequency = <1fca055>;
++		ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe1000000 0x0 0x00100000>;
++		clock-frequency = <33333333>;
+ 		interrupt-parent = <&mpic>;
+-		interrupts = <18 2>;
+-		interrupt-map-mask = <f800 0 0 7>;
++		interrupts = <24 2>;
++		interrupt-map-mask = <0xf800 0 0 7>;
+ 		interrupt-map = <
+ 			/* IDSEL 0x11 */
+-			8800 0 0 1 &mpic 4 1
+-			8800 0 0 2 &mpic 5 1
+-			8800 0 0 3 &mpic 6 1
+-			8800 0 0 4 &mpic 7 1
++			0x8800 0 0 1 &mpic 4 1
++			0x8800 0 0 2 &mpic 5 1
++			0x8800 0 0 3 &mpic 6 1
++			0x8800 0 0 4 &mpic 7 1
+ 
+ 			/* IDSEL 0x12 */
+-			9000 0 0 1 &mpic 5 1
+-			9000 0 0 2 &mpic 6 1
+-			9000 0 0 3 &mpic 7 1
+-			9000 0 0 4 &mpic 4 1
++			0x9000 0 0 1 &mpic 5 1
++			0x9000 0 0 2 &mpic 6 1
++			0x9000 0 0 3 &mpic 7 1
++			0x9000 0 0 4 &mpic 4 1
+ 			>;
+ 	};
+ 
+-	pcie at e000a000 {
++	pci1: pcie at e000a000 {
++		cell-index = <1>;
+ 		compatible = "fsl,mpc8641-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <e000a000 1000>;
++		reg = <0xe000a000 0x1000>;
+ 		bus-range = <1 3>;
+-		ranges = <02000000 0 a0000000 a0000000 0 10000000
+-			  01000000 0 00000000 e3000000 0 00100000>;
+-		clock-frequency = <1fca055>;
++		ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>;
++		clock-frequency = <33333333>;
+ 		interrupt-parent = <&mpic>;
+-		interrupts = <1a 2>;
+-		interrupt-map-mask = <f800 0 0 7>;
++		interrupts = <26 2>;
++		interrupt-map-mask = <0xf800 0 0 7>;
+ 
+ 		interrupt-map = <
+ 			/* IDSEL 0x1b */
+-			d800 0 0 1 &mpic 2 1
++			0xd800 0 0 1 &mpic 2 1
+ 
+ 			/* IDSEL 0x1c*/
+-			e000 0 0 1 &mpic 1 1
+-			e000 0 0 2 &mpic 1 1
+-			e000 0 0 3 &mpic 1 1
+-			e000 0 0 4 &mpic 1 1
++			0xe000 0 0 1 &mpic 1 1
++			0xe000 0 0 2 &mpic 1 1
++			0xe000 0 0 3 &mpic 1 1
++			0xe000 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x1f */
+-			f800 0 0 1 &mpic 3 0
+-			f800 0 0 2 &mpic 0 1
++			0xf800 0 0 1 &mpic 3 0
++			0xf800 0 0 2 &mpic 0 1
+ 		>;
+ 
+ 		pcie at 0 {
+@@ -169,22 +291,22 @@
+ 			#size-cells = <2>;
+ 			#address-cells = <3>;
+ 			device_type = "pci";
+-			ranges = <02000000 0 a0000000
+-				  02000000 0 a0000000
+-				  0 10000000
+-				  01000000 0 00000000
+-				  01000000 0 00000000
+-				  0 00100000>;
++			ranges = <0x02000000 0x0 0xa0000000
++				  0x02000000 0x0 0xa0000000
++				  0x0 0x10000000
++				  0x01000000 0x0 0x00000000
++				  0x01000000 0x0 0x00000000
++				  0x0 0x00100000>;
+ 			uli1575 at 0 {
+ 				reg = <0 0 0 0 0>;
+ 				#size-cells = <2>;
+ 				#address-cells = <3>;
+-				ranges = <02000000 0 a0000000
+-					  02000000 0 a0000000
+-					  0 10000000
+-					  01000000 0 00000000
+-					  01000000 0 00000000
+-					  0 00100000>;
++				ranges = <0x02000000 0x0 0xa0000000
++					  0x02000000 0x0 0xa0000000
++					  0x0 0x10000000
++					  0x01000000 0x0 0x00000000
++					  0x01000000 0x0 0x00000000
++					  0x0 0x00100000>;
+ 			};
+ 		};
+ 	};
+diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+index abb26dc..79385bc 100644
+--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
++++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+@@ -9,6 +9,7 @@
+  * option) any later version.
+  */
+ 
++/dts-v1/;
+ 
+ / {
+ 	model = "MPC8641HPCN";
+@@ -16,6 +17,17 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		ethernet2 = &enet2;
++		ethernet3 = &enet3;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		pci1 = &pci1;
++	};
 +
-+void decompress_kernel(void)
-+{
-+	output_data = 0;
-+	output_ptr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
-+#ifdef CONFIG_29BIT
-+	output_ptr |= P2SEG;
-+#endif
-+	free_mem_ptr = (unsigned long)&_end;
-+	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -23,22 +35,22 @@
+ 		PowerPC,8641 at 0 {
+ 			device_type = "cpu";
+ 			reg = <0>;
+-			d-cache-line-size = <20>;	// 32 bytes
+-			i-cache-line-size = <20>;	// 32 bytes
+-			d-cache-size = <8000>;		// L1, 32K
+-			i-cache-size = <8000>;		// L1, 32K
+-			timebase-frequency = <0>;	// 33 MHz, from uboot
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;		// L1
++			i-cache-size = <32768>;		// L1
++			timebase-frequency = <0>;	// From uboot
+ 			bus-frequency = <0>;		// From uboot
+ 			clock-frequency = <0>;		// From uboot
+ 		};
+ 		PowerPC,8641 at 1 {
+ 			device_type = "cpu";
+ 			reg = <1>;
+-			d-cache-line-size = <20>;	// 32 bytes
+-			i-cache-line-size = <20>;	// 32 bytes
+-			d-cache-size = <8000>;		// L1, 32K
+-			i-cache-size = <8000>;		// L1, 32K
+-			timebase-frequency = <0>;	// 33 MHz, from uboot
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;	// From uboot
+ 			bus-frequency = <0>;		// From uboot
+ 			clock-frequency = <0>;		// From uboot
+ 		};
+@@ -46,31 +58,77 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <00000000 40000000>;	// 1G at 0x0
++		reg = <0x00000000 0x40000000>;	// 1G at 0x0
++	};
 +
-+	makecrc();
-+	puts("Uncompressing Linux... ");
-+	gunzip();
-+	puts("Ok, booting the kernel.\n");
-+}
-diff --git a/arch/sh/boot/compressed/misc_64.c b/arch/sh/boot/compressed/misc_64.c
++	localbus at f8005000 {
++		#address-cells = <2>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc8641-localbus", "simple-bus";
++		reg = <0xf8005000 0x1000>;
++		interrupts = <19 2>;
++		interrupt-parent = <&mpic>;
++
++		ranges = <0 0 0xff800000 0x00800000
++			  1 0 0xfe000000 0x01000000
++			  2 0 0xf8200000 0x00100000
++			  3 0 0xf8100000 0x00100000>;
++
++		flash at 0,0 {
++			compatible = "cfi-flash";
++			reg = <0 0 0x00800000>;
++			bank-width = <2>;
++			device-width = <2>;
++			#address-cells = <1>;
++			#size-cells = <1>;
++			partition at 0 {
++				label = "kernel";
++				reg = <0x00000000 0x00300000>;
++			};
++			partition at 300000 {
++				label = "firmware b";
++				reg = <0x00300000 0x00100000>;
++				read-only;
++			};
++			partition at 400000 {
++				label = "fs";
++				reg = <0x00400000 0x00300000>;
++			};
++			partition at 700000 {
++				label = "firmware a";
++				reg = <0x00700000 0x00100000>;
++				read-only;
++			};
++		};
+ 	};
+ 
+ 	soc8641 at f8000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+-		ranges = <00000000 f8000000 00100000>;
+-		reg = <f8000000 00001000>;	// CCSRBAR
++		compatible = "simple-bus";
++		ranges = <0x00000000 0xf8000000 0x00100000>;
++		reg = <0xf8000000 0x00001000>;	// CCSRBAR
+ 		bus-frequency = <0>;
+ 
+ 		i2c at 3000 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
+ 			compatible = "fsl-i2c";
+-			reg = <3000 100>;
+-			interrupts = <2b 2>;
++			reg = <0x3000 0x100>;
++			interrupts = <43 2>;
+ 			interrupt-parent = <&mpic>;
+ 			dfsrr;
+ 		};
+ 
+ 		i2c at 3100 {
+-			device_type = "i2c";
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
+ 			compatible = "fsl-i2c";
+-			reg = <3100 100>;
+-			interrupts = <2b 2>;
++			reg = <0x3100 0x100>;
++			interrupts = <43 2>;
+ 			interrupt-parent = <&mpic>;
+ 			dfsrr;
+ 		};
+@@ -78,129 +136,104 @@
+ 		mdio at 24520 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			device_type = "mdio";
+-			compatible = "gianfar";
+-			reg = <24520 20>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
+ 			phy0: ethernet-phy at 0 {
+ 				interrupt-parent = <&mpic>;
+-				interrupts = <a 1>;
++				interrupts = <10 1>;
+ 				reg = <0>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 			phy1: ethernet-phy at 1 {
+ 				interrupt-parent = <&mpic>;
+-				interrupts = <a 1>;
++				interrupts = <10 1>;
+ 				reg = <1>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 			phy2: ethernet-phy at 2 {
+ 				interrupt-parent = <&mpic>;
+-				interrupts = <a 1>;
++				interrupts = <10 1>;
+ 				reg = <2>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 			phy3: ethernet-phy at 3 {
+ 				interrupt-parent = <&mpic>;
+-				interrupts = <a 1>;
++				interrupts = <10 1>;
+ 				reg = <3>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		ethernet at 24000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <24000 1000>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
++			reg = <0x24000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <1d 2 1e 2 22 2>;
++			interrupts = <29 2 30  2 34 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy0>;
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		ethernet at 25000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <25000 1000>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
++			reg = <0x25000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <23 2 24 2 28 2>;
++			interrupts = <35 2 36 2 40 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy1>;
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 		
+-		ethernet at 26000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet2: ethernet at 26000 {
++			cell-index = <2>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <26000 1000>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
++			reg = <0x26000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <1F 2 20 2 21 2>;
++			interrupts = <31 2 32 2 33 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy2>;
+ 			phy-connection-type = "rgmii-id";
+ 		};
+ 
+-		ethernet at 27000 {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
++		enet3: ethernet at 27000 {
++			cell-index = <3>;
+ 			device_type = "network";
+ 			model = "TSEC";
+ 			compatible = "gianfar";
+-			reg = <27000 1000>;
+-			/*
+-			 * mac-address is deprecated and will be removed
+-			 * in 2.6.25.  Only recent versions of
+-			 * U-Boot support local-mac-address, however.
+-			 */
+-			mac-address = [ 00 00 00 00 00 00 ];
++			reg = <0x27000 0x1000>;
+ 			local-mac-address = [ 00 00 00 00 00 00 ];
+-			interrupts = <25 2 26 2 27 2>;
++			interrupts = <37 2 38 2 39 2>;
+ 			interrupt-parent = <&mpic>;
+ 			phy-handle = <&phy3>;
+ 			phy-connection-type = "rgmii-id";
+ 		};
+-		serial at 4500 {
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4500 100>;
++			reg = <0x4500 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <2a 2>;
++			interrupts = <42 2>;
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+-		serial at 4600 {
++		serial1: serial at 4600 {
++			cell-index = <1>;
+ 			device_type = "serial";
+ 			compatible = "ns16550";
+-			reg = <4600 100>;
++			reg = <0x4600 0x100>;
+ 			clock-frequency = <0>;
+-			interrupts = <1c 2>;
++			interrupts = <28 2>;
+ 			interrupt-parent = <&mpic>;
+ 		};
+ 
+@@ -209,7 +242,7 @@
+ 			interrupt-controller;
+ 			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+-			reg = <40000 40000>;
++			reg = <0x40000 0x40000>;
+ 			compatible = "chrp,open-pic";
+ 			device_type = "open-pic";
+ 			big-endian;
+@@ -217,138 +250,139 @@
+ 
+ 		global-utilities at e0000 {
+ 			compatible = "fsl,mpc8641-guts";
+-			reg = <e0000 1000>;
++			reg = <0xe0000 0x1000>;
+ 			fsl,has-rstcr;
+ 		};
+ 	};
+ 
+-	pcie at f8008000 {
++	pci0: pcie at f8008000 {
++		cell-index = <0>;
+ 		compatible = "fsl,mpc8641-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <f8008000 1000>;
+-		bus-range = <0 ff>;
+-		ranges = <02000000 0 80000000 80000000 0 20000000
+-			  01000000 0 00000000 e2000000 0 00100000>;
+-		clock-frequency = <1fca055>;
++		reg = <0xf8008000 0x1000>;
++		bus-range = <0x0 0xff>;
++		ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000
++			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++		clock-frequency = <33333333>;
+ 		interrupt-parent = <&mpic>;
+-		interrupts = <18 2>;
+-		interrupt-map-mask = <ff00 0 0 7>;
++		interrupts = <24 2>;
++		interrupt-map-mask = <0xff00 0 0 7>;
+ 		interrupt-map = <
+ 			/* IDSEL 0x11 func 0 - PCI slot 1 */
+-			8800 0 0 1 &mpic 2 1
+-			8800 0 0 2 &mpic 3 1
+-			8800 0 0 3 &mpic 4 1
+-			8800 0 0 4 &mpic 1 1
++			0x8800 0 0 1 &mpic 2 1
++			0x8800 0 0 2 &mpic 3 1
++			0x8800 0 0 3 &mpic 4 1
++			0x8800 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x11 func 1 - PCI slot 1 */
+-			8900 0 0 1 &mpic 2 1
+-			8900 0 0 2 &mpic 3 1
+-			8900 0 0 3 &mpic 4 1
+-			8900 0 0 4 &mpic 1 1
++			0x8900 0 0 1 &mpic 2 1
++			0x8900 0 0 2 &mpic 3 1
++			0x8900 0 0 3 &mpic 4 1
++			0x8900 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x11 func 2 - PCI slot 1 */
+-			8a00 0 0 1 &mpic 2 1
+-			8a00 0 0 2 &mpic 3 1
+-			8a00 0 0 3 &mpic 4 1
+-			8a00 0 0 4 &mpic 1 1
++			0x8a00 0 0 1 &mpic 2 1
++			0x8a00 0 0 2 &mpic 3 1
++			0x8a00 0 0 3 &mpic 4 1
++			0x8a00 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x11 func 3 - PCI slot 1 */
+-			8b00 0 0 1 &mpic 2 1
+-			8b00 0 0 2 &mpic 3 1
+-			8b00 0 0 3 &mpic 4 1
+-			8b00 0 0 4 &mpic 1 1
++			0x8b00 0 0 1 &mpic 2 1
++			0x8b00 0 0 2 &mpic 3 1
++			0x8b00 0 0 3 &mpic 4 1
++			0x8b00 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x11 func 4 - PCI slot 1 */
+-			8c00 0 0 1 &mpic 2 1
+-			8c00 0 0 2 &mpic 3 1
+-			8c00 0 0 3 &mpic 4 1
+-			8c00 0 0 4 &mpic 1 1
++			0x8c00 0 0 1 &mpic 2 1
++			0x8c00 0 0 2 &mpic 3 1
++			0x8c00 0 0 3 &mpic 4 1
++			0x8c00 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x11 func 5 - PCI slot 1 */
+-			8d00 0 0 1 &mpic 2 1
+-			8d00 0 0 2 &mpic 3 1
+-			8d00 0 0 3 &mpic 4 1
+-			8d00 0 0 4 &mpic 1 1
++			0x8d00 0 0 1 &mpic 2 1
++			0x8d00 0 0 2 &mpic 3 1
++			0x8d00 0 0 3 &mpic 4 1
++			0x8d00 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x11 func 6 - PCI slot 1 */
+-			8e00 0 0 1 &mpic 2 1
+-			8e00 0 0 2 &mpic 3 1
+-			8e00 0 0 3 &mpic 4 1
+-			8e00 0 0 4 &mpic 1 1
++			0x8e00 0 0 1 &mpic 2 1
++			0x8e00 0 0 2 &mpic 3 1
++			0x8e00 0 0 3 &mpic 4 1
++			0x8e00 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x11 func 7 - PCI slot 1 */
+-			8f00 0 0 1 &mpic 2 1
+-			8f00 0 0 2 &mpic 3 1
+-			8f00 0 0 3 &mpic 4 1
+-			8f00 0 0 4 &mpic 1 1
++			0x8f00 0 0 1 &mpic 2 1
++			0x8f00 0 0 2 &mpic 3 1
++			0x8f00 0 0 3 &mpic 4 1
++			0x8f00 0 0 4 &mpic 1 1
+ 
+ 			/* IDSEL 0x12 func 0 - PCI slot 2 */
+-			9000 0 0 1 &mpic 3 1
+-			9000 0 0 2 &mpic 4 1
+-			9000 0 0 3 &mpic 1 1
+-			9000 0 0 4 &mpic 2 1
++			0x9000 0 0 1 &mpic 3 1
++			0x9000 0 0 2 &mpic 4 1
++			0x9000 0 0 3 &mpic 1 1
++			0x9000 0 0 4 &mpic 2 1
+ 
+ 			/* IDSEL 0x12 func 1 - PCI slot 2 */
+-			9100 0 0 1 &mpic 3 1
+-			9100 0 0 2 &mpic 4 1
+-			9100 0 0 3 &mpic 1 1
+-			9100 0 0 4 &mpic 2 1
++			0x9100 0 0 1 &mpic 3 1
++			0x9100 0 0 2 &mpic 4 1
++			0x9100 0 0 3 &mpic 1 1
++			0x9100 0 0 4 &mpic 2 1
+ 
+ 			/* IDSEL 0x12 func 2 - PCI slot 2 */
+-			9200 0 0 1 &mpic 3 1
+-			9200 0 0 2 &mpic 4 1
+-			9200 0 0 3 &mpic 1 1
+-			9200 0 0 4 &mpic 2 1
++			0x9200 0 0 1 &mpic 3 1
++			0x9200 0 0 2 &mpic 4 1
++			0x9200 0 0 3 &mpic 1 1
++			0x9200 0 0 4 &mpic 2 1
+ 
+ 			/* IDSEL 0x12 func 3 - PCI slot 2 */
+-			9300 0 0 1 &mpic 3 1
+-			9300 0 0 2 &mpic 4 1
+-			9300 0 0 3 &mpic 1 1
+-			9300 0 0 4 &mpic 2 1
++			0x9300 0 0 1 &mpic 3 1
++			0x9300 0 0 2 &mpic 4 1
++			0x9300 0 0 3 &mpic 1 1
++			0x9300 0 0 4 &mpic 2 1
+ 
+ 			/* IDSEL 0x12 func 4 - PCI slot 2 */
+-			9400 0 0 1 &mpic 3 1
+-			9400 0 0 2 &mpic 4 1
+-			9400 0 0 3 &mpic 1 1
+-			9400 0 0 4 &mpic 2 1
++			0x9400 0 0 1 &mpic 3 1
++			0x9400 0 0 2 &mpic 4 1
++			0x9400 0 0 3 &mpic 1 1
++			0x9400 0 0 4 &mpic 2 1
+ 
+ 			/* IDSEL 0x12 func 5 - PCI slot 2 */
+-			9500 0 0 1 &mpic 3 1
+-			9500 0 0 2 &mpic 4 1
+-			9500 0 0 3 &mpic 1 1
+-			9500 0 0 4 &mpic 2 1
++			0x9500 0 0 1 &mpic 3 1
++			0x9500 0 0 2 &mpic 4 1
++			0x9500 0 0 3 &mpic 1 1
++			0x9500 0 0 4 &mpic 2 1
+ 
+ 			/* IDSEL 0x12 func 6 - PCI slot 2 */
+-			9600 0 0 1 &mpic 3 1
+-			9600 0 0 2 &mpic 4 1
+-			9600 0 0 3 &mpic 1 1
+-			9600 0 0 4 &mpic 2 1
++			0x9600 0 0 1 &mpic 3 1
++			0x9600 0 0 2 &mpic 4 1
++			0x9600 0 0 3 &mpic 1 1
++			0x9600 0 0 4 &mpic 2 1
+ 
+ 			/* IDSEL 0x12 func 7 - PCI slot 2 */
+-			9700 0 0 1 &mpic 3 1
+-			9700 0 0 2 &mpic 4 1
+-			9700 0 0 3 &mpic 1 1
+-			9700 0 0 4 &mpic 2 1
++			0x9700 0 0 1 &mpic 3 1
++			0x9700 0 0 2 &mpic 4 1
++			0x9700 0 0 3 &mpic 1 1
++			0x9700 0 0 4 &mpic 2 1
+ 
+ 			// IDSEL 0x1c  USB
+-			e000 0 0 1 &i8259 c 2
+-			e100 0 0 1 &i8259 9 2
+-			e200 0 0 1 &i8259 a 2
+-			e300 0 0 1 &i8259 b 2
++			0xe000 0 0 1 &i8259 12 2
++			0xe100 0 0 2 &i8259 9 2
++			0xe200 0 0 3 &i8259 10 2
++			0xe300 0 0 4 &i8259 112
+ 
+ 			// IDSEL 0x1d  Audio
+-			e800 0 0 1 &i8259 6 2
++			0xe800 0 0 1 &i8259 6 2
+ 
+ 			// IDSEL 0x1e Legacy
+-			f000 0 0 1 &i8259 7 2
+-			f100 0 0 1 &i8259 7 2
++			0xf000 0 0 1 &i8259 7 2
++			0xf100 0 0 1 &i8259 7 2
+ 
+ 			// IDSEL 0x1f IDE/SATA
+-			f800 0 0 1 &i8259 e 2
+-			f900 0 0 1 &i8259 5 2
++			0xf800 0 0 1 &i8259 14 2
++			0xf900 0 0 1 &i8259 5 2
+ 			>;
+ 
+ 		pcie at 0 {
+@@ -356,37 +390,37 @@
+ 			#size-cells = <2>;
+ 			#address-cells = <3>;
+ 			device_type = "pci";
+-			ranges = <02000000 0 80000000
+-				  02000000 0 80000000
+-				  0 20000000
++			ranges = <0x02000000 0x0 0x80000000
++				  0x02000000 0x0 0x80000000
++				  0x0 0x20000000
+ 
+-				  01000000 0 00000000
+-				  01000000 0 00000000
+-				  0 00100000>;
++				  0x01000000 0x0 0x00000000
++				  0x01000000 0x0 0x00000000
++				  0x0 0x00100000>;
+ 			uli1575 at 0 {
+ 				reg = <0 0 0 0 0>;
+ 				#size-cells = <2>;
+ 				#address-cells = <3>;
+-				ranges = <02000000 0 80000000
+-					  02000000 0 80000000
+-					  0 20000000
+-					  01000000 0 00000000
+-					  01000000 0 00000000
+-					  0 00100000>;
++				ranges = <0x02000000 0x0 0x80000000
++					  0x02000000 0x0 0x80000000
++					  0x0 0x20000000
++					  0x01000000 0x0 0x00000000
++					  0x01000000 0x0 0x00000000
++					  0x0 0x00100000>;
+ 				isa at 1e {
+ 					device_type = "isa";
+ 					#interrupt-cells = <2>;
+ 					#size-cells = <1>;
+ 					#address-cells = <2>;
+-					reg = <f000 0 0 0 0>;
+-					ranges = <1 0 01000000 0 0
+-						  00001000>;
++					reg = <0xf000 0 0 0 0>;
++					ranges = <1 0 0x01000000 0 0
++						  0x00001000>;
+ 					interrupt-parent = <&i8259>;
+ 
+ 					i8259: interrupt-controller at 20 {
+-						reg = <1 20 2
+-						       1 a0 2
+-						       1 4d0 2>;
++						reg = <1 0x20 2
++						       1 0xa0 2
++						       1 0x4d0 2>;
+ 						interrupt-controller;
+ 						device_type = "interrupt-controller";
+ 						#address-cells = <0>;
+@@ -399,8 +433,8 @@
+ 					i8042 at 60 {
+ 						#size-cells = <0>;
+ 						#address-cells = <1>;
+-						reg = <1 60 1 1 64 1>;
+-						interrupts = <1 3 c 3>;
++						reg = <1 0x60 1 1 0x64 1>;
++						interrupts = <1 3 12 3>;
+ 						interrupt-parent =
+ 							<&i8259>;
+ 
+@@ -418,11 +452,11 @@
+ 					rtc at 70 {
+ 						compatible =
+ 							"pnpPNP,b00";
+-						reg = <1 70 2>;
++						reg = <1 0x70 2>;
+ 					};
+ 
+ 					gpio at 400 {
+-						reg = <1 400 80>;
++						reg = <1 0x400 0x80>;
+ 					};
+ 				};
+ 			};
+@@ -430,39 +464,40 @@
+ 
+ 	};
+ 
+-	pcie at f8009000 {
++	pci1: pcie at f8009000 {
++		cell-index = <1>;
+ 		compatible = "fsl,mpc8641-pcie";
+ 		device_type = "pci";
+ 		#interrupt-cells = <1>;
+ 		#size-cells = <2>;
+ 		#address-cells = <3>;
+-		reg = <f8009000 1000>;
+-		bus-range = <0 ff>;
+-		ranges = <02000000 0 a0000000 a0000000 0 20000000
+-			  01000000 0 00000000 e3000000 0 00100000>;
+-		clock-frequency = <1fca055>;
++		reg = <0xf8009000 0x1000>;
++		bus-range = <0 0xff>;
++		ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
++			  0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>;
++		clock-frequency = <33333333>;
+ 		interrupt-parent = <&mpic>;
+-		interrupts = <19 2>;
+-		interrupt-map-mask = <f800 0 0 7>;
++		interrupts = <25 2>;
++		interrupt-map-mask = <0xf800 0 0 7>;
+ 		interrupt-map = <
+ 			/* IDSEL 0x0 */
+-			0000 0 0 1 &mpic 4 1
+-			0000 0 0 2 &mpic 5 1
+-			0000 0 0 3 &mpic 6 1
+-			0000 0 0 4 &mpic 7 1
++			0x0000 0 0 1 &mpic 4 1
++			0x0000 0 0 2 &mpic 5 1
++			0x0000 0 0 3 &mpic 6 1
++			0x0000 0 0 4 &mpic 7 1
+ 			>;
+ 		pcie at 0 {
+ 			reg = <0 0 0 0 0>;
+ 			#size-cells = <2>;
+ 			#address-cells = <3>;
+ 			device_type = "pci";
+-			ranges = <02000000 0 a0000000
+-				  02000000 0 a0000000
+-				  0 20000000
++			ranges = <0x02000000 0x0 0xa0000000
++				  0x02000000 0x0 0xa0000000
++				  0x0 0x20000000
+ 
+-				  01000000 0 00000000
+-				  01000000 0 00000000
+-				  0 00100000>;
++				  0x01000000 0x0 0x00000000
++				  0x01000000 0x0 0x00000000
++				  0x0 0x00100000>;
+ 		};
+ 	};
+ };
+diff --git a/arch/powerpc/boot/dts/mpc866ads.dts b/arch/powerpc/boot/dts/mpc866ads.dts
+index 90f2293..daf9433 100644
+--- a/arch/powerpc/boot/dts/mpc866ads.dts
++++ b/arch/powerpc/boot/dts/mpc866ads.dts
+@@ -12,7 +12,7 @@
+ 
+ / {
+ 	model = "MPC866ADS";
+-	compatible = "mpc8xx";
++	compatible = "fsl,mpc866ads";
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
+ 
+@@ -23,15 +23,15 @@
+ 		PowerPC,866 at 0 {
+ 			device_type = "cpu";
+ 			reg = <0>;
+-			d-cache-line-size = <20>;	// 32 bytes
+-			i-cache-line-size = <20>;	// 32 bytes
++			d-cache-line-size = <10>;	// 16 bytes
++			i-cache-line-size = <10>;	// 16 bytes
+ 			d-cache-size = <2000>;		// L1, 8K
+ 			i-cache-size = <4000>;		// L1, 16K
+ 			timebase-frequency = <0>;
+ 			bus-frequency = <0>;
+ 			clock-frequency = <0>;
+ 			interrupts = <f 2>;	// decrementer interrupt
+-			interrupt-parent = <&Mpc8xx_pic>;
++			interrupt-parent = <&PIC>;
+ 		};
+ 	};
+ 
+@@ -40,107 +40,139 @@
+ 		reg = <00000000 800000>;
+ 	};
+ 
+-	soc866 at ff000000 {
++	localbus at ff000100 {
++		compatible = "fsl,mpc866-localbus", "fsl,pq1-localbus";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		reg = <ff000100 40>;
++
++		ranges = <
++			1 0 ff080000 00008000
++			5 0 ff0a0000 00008000
++		>;
++
++		board-control at 1,0 {
++			reg = <1 0 20 5 300 4>;
++			compatible = "fsl,mpc866ads-bcsr";
++		};
++	};
++
++	soc at ff000000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		device_type = "soc";
+ 		ranges = <0 ff000000 00100000>;
+ 		reg = <ff000000 00000200>;
+ 		bus-frequency = <0>;
+-		mdio at e80 {
+-			device_type = "mdio";
+-			compatible = "fs_enet";
+-			reg = <e80 8>;
++
++		mdio at e00 {
++			compatible = "fsl,mpc866-fec-mdio", "fsl,pq1-fec-mdio";
++			reg = <e00 188>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			phy: ethernet-phy at f {
++			PHY: ethernet-phy at f {
+ 				reg = <f>;
+ 				device_type = "ethernet-phy";
+ 			};
+ 		};
+ 
+-		fec at e00 {
++		ethernet at e00 {
+ 			device_type = "network";
+-			compatible = "fs_enet";
+-			model = "FEC";
+-			device-id = <1>;
++			compatible = "fsl,mpc866-fec-enet",
++			             "fsl,pq1-fec-enet";
+ 			reg = <e00 188>;
+-			mac-address = [ 00 00 0C 00 01 FD ];
++			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <3 1>;
+-			interrupt-parent = <&Mpc8xx_pic>;
+-			phy-handle = <&Phy>;
++			interrupt-parent = <&PIC>;
++			phy-handle = <&PHY>;
++			linux,network-index = <0>;
+ 		};
+ 
+-		mpc8xx_pic: pic at ff000000 {
++		PIC: pic at 0 {
+ 			interrupt-controller;
+-			#address-cells = <0>;
+ 			#interrupt-cells = <2>;
+ 			reg = <0 24>;
+-			device_type = "mpc8xx-pic";
+-			compatible = "CPM";
++			compatible = "fsl,mpc866-pic", "fsl,pq1-pic";
+ 		};
+ 
+-		cpm at ff000000 {
++		cpm at 9c0 {
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+-			device_type = "cpm";
+-			model = "CPM";
+-			ranges = <0 0 4000>;
+-			reg = <860 f0>;
+-			command-proc = <9c0>;
++			compatible = "fsl,mpc866-cpm", "fsl,cpm1";
++			ranges;
++			reg = <9c0 40>;
+ 			brg-frequency = <0>;
+ 			interrupts = <0 2>;	// cpm error interrupt
+-			interrupt-parent = <&Cpm_pic>;
++			interrupt-parent = <&CPM_PIC>;
+ 
+-			cpm_pic: pic at 930 {
++			muram at 2000 {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0 2000 2000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0 1c00>;
++				};
++			};
++
++			brg at 9f0 {
++				compatible = "fsl,mpc866-brg",
++					     "fsl,cpm1-brg",
++					     "fsl,cpm-brg";
++				reg = <9f0 10>;
++				clock-frequency = <0>;
++			};
++
++			CPM_PIC: pic at 930 {
+ 				interrupt-controller;
+ 				#address-cells = <0>;
+-				#interrupt-cells = <2>;
++				#interrupt-cells = <1>;
+ 				interrupts = <5 2 0 2>;
+-				interrupt-parent = <&Mpc8xx_pic>;
++				interrupt-parent = <&PIC>;
+ 				reg = <930 20>;
+-				device_type = "cpm-pic";
+-				compatible = "CPM";
++				compatible = "fsl,mpc866-cpm-pic",
++				             "fsl,cpm1-pic";
+ 			};
+ 
+-			smc at a80 {
++
++			serial at a80 {
+ 				device_type = "serial";
+-				compatible = "cpm_uart";
+-				model = "SMC";
+-				device-id = <1>;
++				compatible = "fsl,mpc866-smc-uart",
++				             "fsl,cpm1-smc-uart";
+ 				reg = <a80 10 3e80 40>;
+-				clock-setup = <00ffffff 0>;
+-				rx-clock = <1>;
+-				tx-clock = <1>;
+-				current-speed = <0>;
+-				interrupts = <4 3>;
+-				interrupt-parent = <&Cpm_pic>;
++				interrupts = <4>;
++				interrupt-parent = <&CPM_PIC>;
++				fsl,cpm-brg = <1>;
++				fsl,cpm-command = <0090>;
+ 			};
+ 
+-			smc at a90 {
++			serial at a90 {
+ 				device_type = "serial";
+-				compatible = "cpm_uart";
+-				model = "SMC";
+-				device-id = <2>;
+-				reg = <a90 20 3f80 40>;
+-				clock-setup = <ff00ffff 90000>;
+-				rx-clock = <2>;
+-				tx-clock = <2>;
+-				current-speed = <0>;
+-				interrupts = <3 3>;
+-				interrupt-parent = <&Cpm_pic>;
++				compatible = "fsl,mpc866-smc-uart",
++				             "fsl,cpm1-smc-uart";
++				reg = <a90 10 3f80 40>;
++				interrupts = <3>;
++				interrupt-parent = <&CPM_PIC>;
++				fsl,cpm-brg = <2>;
++				fsl,cpm-command = <00d0>;
+ 			};
+ 
+-			scc at a00 {
++			ethernet at a00 {
+ 				device_type = "network";
+-				compatible = "fs_enet";
+-				model = "SCC";
+-				device-id = <1>;
+-				reg = <a00 18 3c00 80>;
+-				mac-address = [ 00 00 0C 00 03 FD ];
+-				interrupts = <1e 3>;
+-				interrupt-parent = <&Cpm_pic>;
++				compatible = "fsl,mpc866-scc-enet",
++				             "fsl,cpm1-scc-enet";
++				reg = <a00 18 3c00 100>;
++				local-mac-address = [ 00 00 00 00 00 00 ];
++				interrupts = <1e>;
++				interrupt-parent = <&CPM_PIC>;
++				fsl,cpm-command = <0000>;
++				linux,network-index = <1>;
+ 			};
+ 		};
+ 	};
++
++	chosen {
++		linux,stdout-path = "/soc/cpm/serial at a80";
++	};
+ };
+diff --git a/arch/powerpc/boot/dts/rainier.dts b/arch/powerpc/boot/dts/rainier.dts
 new file mode 100644
-index 0000000..a006ef8
+index 0000000..d3c2ac3
 --- /dev/null
-+++ b/arch/sh/boot/compressed/misc_64.c
-@@ -0,0 +1,250 @@
++++ b/arch/powerpc/boot/dts/rainier.dts
+@@ -0,0 +1,353 @@
 +/*
-+ * arch/sh/boot/compressed/misc_64.c
++ * Device Tree Source for AMCC Rainier
 + *
-+ * This is a collection of several routines from gzip-1.0.3
-+ * adapted for Linux.
++ * Based on Sequoia code
++ * Copyright (c) 2007 MontaVista Software, Inc.
 + *
-+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
++ * FIXME: Draft only!
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2.  This program is licensed "as is" without
++ * any warranty of any kind, whether express or implied.
 + *
-+ * Adapted for SHmedia from sh by Stuart Menefy, May 2002
 + */
 +
-+#include <asm/uaccess.h>
++/ {
++	#address-cells = <2>;
++	#size-cells = <1>;
++	model = "amcc,rainier";
++	compatible = "amcc,rainier";
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		ethernet1 = &EMAC1;
++		serial0 = &UART0;
++		serial1 = &UART1;
++		serial2 = &UART2;
++		serial3 = &UART3;
++	};
 +
-+/* cache.c */
-+#define CACHE_ENABLE      0
-+#define CACHE_DISABLE     1
-+int cache_control(unsigned int command);
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		cpu at 0 {
++			device_type = "cpu";
++			model = "PowerPC,440GRx";
++			reg = <0>;
++			clock-frequency = <0>; /* Filled in by zImage */
++			timebase-frequency = <0>; /* Filled in by zImage */
++			i-cache-line-size = <20>;
++			d-cache-line-size = <20>;
++			i-cache-size = <8000>;
++			d-cache-size = <8000>;
++			dcr-controller;
++			dcr-access-method = "native";
++		};
++	};
 +
-+/*
-+ * gzip declarations
-+ */
++	memory {
++		device_type = "memory";
++		reg = <0 0 0>; /* Filled in by zImage */
++	};
 +
-+#define OF(args)  args
-+#define STATIC static
++	UIC0: interrupt-controller0 {
++		compatible = "ibm,uic-440grx","ibm,uic";
++		interrupt-controller;
++		cell-index = <0>;
++		dcr-reg = <0c0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++	};
 +
-+#undef memset
-+#undef memcpy
-+#define memzero(s, n)     memset ((s), 0, (n))
++	UIC1: interrupt-controller1 {
++		compatible = "ibm,uic-440grx","ibm,uic";
++		interrupt-controller;
++		cell-index = <1>;
++		dcr-reg = <0d0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <1e 4 1f 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
 +
-+typedef unsigned char uch;
-+typedef unsigned short ush;
-+typedef unsigned long ulg;
++	UIC2: interrupt-controller2 {
++		compatible = "ibm,uic-440grx","ibm,uic";
++		interrupt-controller;
++		cell-index = <2>;
++		dcr-reg = <0e0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <1c 4 1d 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
 +
-+#define WSIZE 0x8000		/* Window size must be at least 32k, */
-+				/* and a power of two */
++	SDR0: sdr {
++		compatible = "ibm,sdr-440grx", "ibm,sdr-440ep";
++		dcr-reg = <00e 002>;
++	};
 +
-+static uch *inbuf;		/* input buffer */
-+static uch window[WSIZE];	/* Sliding window buffer */
++	CPR0: cpr {
++		compatible = "ibm,cpr-440grx", "ibm,cpr-440ep";
++		dcr-reg = <00c 002>;
++	};
 +
-+static unsigned insize = 0;	/* valid bytes in inbuf */
-+static unsigned inptr = 0;	/* index of next byte to be processed in inbuf */
-+static unsigned outcnt = 0;	/* bytes in output buffer */
++	plb {
++		compatible = "ibm,plb-440grx", "ibm,plb4";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		ranges;
++		clock-frequency = <0>; /* Filled in by zImage */
++
++		SDRAM0: sdram {
++			compatible = "ibm,sdram-440grx", "ibm,sdram-44x-ddr2denali";
++			dcr-reg = <010 2>;
++		};
 +
-+/* gzip flag byte */
-+#define ASCII_FLAG   0x01	/* bit 0 set: file probably ASCII text */
-+#define CONTINUATION 0x02	/* bit 1 set: continuation of multi-part gzip file */
-+#define EXTRA_FIELD  0x04	/* bit 2 set: extra field present */
-+#define ORIG_NAME    0x08	/* bit 3 set: original file name present */
-+#define COMMENT      0x10	/* bit 4 set: file comment present */
-+#define ENCRYPTED    0x20	/* bit 5 set: file is encrypted */
-+#define RESERVED     0xC0	/* bit 6,7:   reserved */
++		DMA0: dma {
++			compatible = "ibm,dma-440grx", "ibm,dma-4xx";
++			dcr-reg = <100 027>;
++		};
 +
-+#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
++		MAL0: mcmal {
++			compatible = "ibm,mcmal-440grx", "ibm,mcmal2";
++			dcr-reg = <180 62>;
++			num-tx-chans = <2>;
++			num-rx-chans = <2>;
++			interrupt-parent = <&MAL0>;
++			interrupts = <0 1 2 3 4>;
++			#interrupt-cells = <1>;
++			#address-cells = <0>;
++			#size-cells = <0>;
++			interrupt-map = </*TXEOB*/ 0 &UIC0 a 4
++					/*RXEOB*/ 1 &UIC0 b 4
++					/*SERR*/  2 &UIC1 0 4
++					/*TXDE*/  3 &UIC1 1 4
++					/*RXDE*/  4 &UIC1 2 4>;
++			interrupt-map-mask = <ffffffff>;
++		};
 +
-+/* Diagnostic functions */
-+#ifdef DEBUG
-+#  define Assert(cond,msg) {if(!(cond)) error(msg);}
-+#  define Trace(x) fprintf x
-+#  define Tracev(x) {if (verbose) fprintf x ;}
-+#  define Tracevv(x) {if (verbose>1) fprintf x ;}
-+#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-+#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-+#else
-+#  define Assert(cond,msg)
-+#  define Trace(x)
-+#  define Tracev(x)
-+#  define Tracevv(x)
-+#  define Tracec(c,x)
-+#  define Tracecv(c,x)
-+#endif
++		POB0: opb {
++		  	compatible = "ibm,opb-440grx", "ibm,opb";
++			#address-cells = <1>;
++			#size-cells = <1>;
++		  	ranges = <00000000 1 00000000 80000000
++			          80000000 1 80000000 80000000>;
++		  	interrupt-parent = <&UIC1>;
++		  	interrupts = <7 4>;
++		  	clock-frequency = <0>; /* Filled in by zImage */
++
++			EBC0: ebc {
++				compatible = "ibm,ebc-440grx", "ibm,ebc";
++				dcr-reg = <012 2>;
++				#address-cells = <2>;
++				#size-cells = <1>;
++				clock-frequency = <0>; /* Filled in by zImage */
++				interrupts = <5 1>;
++				interrupt-parent = <&UIC1>;
++
++				nor_flash at 0,0 {
++					compatible = "amd,s29gl256n", "cfi-flash";
++					bank-width = <2>;
++					reg = <0 000000 4000000>;
++					#address-cells = <1>;
++					#size-cells = <1>;
++					partition at 0 {
++						label = "Kernel";
++						reg = <0 180000>;
++					};
++					partition at 180000 {
++						label = "ramdisk";
++						reg = <180000 200000>;
++					};
++					partition at 380000 {
++						label = "file system";
++						reg = <380000 3aa0000>;
++					};
++					partition at 3e20000 {
++						label = "kozio";
++						reg = <3e20000 140000>;
++					};
++					partition at 3f60000 {
++						label = "env";
++						reg = <3f60000 40000>;
++					};
++					partition at 3fa0000 {
++						label = "u-boot";
++						reg = <3fa0000 60000>;
++					};
++				};
 +
-+static int fill_inbuf(void);
-+static void flush_window(void);
-+static void error(char *m);
-+static void gzip_mark(void **);
-+static void gzip_release(void **);
++			};
 +
-+extern char input_data[];
-+extern int input_len;
++			UART0: serial at ef600300 {
++		   		device_type = "serial";
++		   		compatible = "ns16550";
++		   		reg = <ef600300 8>;
++		   		virtual-reg = <ef600300>;
++		   		clock-frequency = <0>; /* Filled in by zImage */
++		   		current-speed = <1c200>;
++		   		interrupt-parent = <&UIC0>;
++		   		interrupts = <0 4>;
++	   		};
++
++			UART1: serial at ef600400 {
++		   		device_type = "serial";
++		   		compatible = "ns16550";
++		   		reg = <ef600400 8>;
++		   		virtual-reg = <ef600400>;
++		   		clock-frequency = <0>;
++		   		current-speed = <0>;
++		   		interrupt-parent = <&UIC0>;
++		   		interrupts = <1 4>;
++	   		};
++
++			UART2: serial at ef600500 {
++		   		device_type = "serial";
++		   		compatible = "ns16550";
++		   		reg = <ef600500 8>;
++		   		virtual-reg = <ef600500>;
++		   		clock-frequency = <0>;
++		   		current-speed = <0>;
++		   		interrupt-parent = <&UIC1>;
++		   		interrupts = <3 4>;
++	   		};
++
++			UART3: serial at ef600600 {
++		   		device_type = "serial";
++		   		compatible = "ns16550";
++		   		reg = <ef600600 8>;
++		   		virtual-reg = <ef600600>;
++		   		clock-frequency = <0>;
++		   		current-speed = <0>;
++		   		interrupt-parent = <&UIC1>;
++		   		interrupts = <4 4>;
++	   		};
++
++			IIC0: i2c at ef600700 {
++				device_type = "i2c";
++				compatible = "ibm,iic-440grx", "ibm,iic";
++				reg = <ef600700 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <2 4>;
++			};
 +
-+static long bytes_out = 0;
-+static uch *output_data;
-+static unsigned long output_ptr = 0;
++			IIC1: i2c at ef600800 {
++				device_type = "i2c";
++				compatible = "ibm,iic-440grx", "ibm,iic";
++				reg = <ef600800 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <7 4>;
++			};
 +
-+static void *malloc(int size);
-+static void free(void *where);
-+static void error(char *m);
-+static void gzip_mark(void **);
-+static void gzip_release(void **);
++			ZMII0: emac-zmii at ef600d00 {
++				device_type = "zmii-interface";
++				compatible = "ibm,zmii-440grx", "ibm,zmii";
++				reg = <ef600d00 c>;
++			};
 +
-+static void puts(const char *);
++			RGMII0: emac-rgmii at ef601000 {
++				device_type = "rgmii-interface";
++				compatible = "ibm,rgmii-440grx", "ibm,rgmii";
++				reg = <ef601000 8>;
++				has-mdio;
++			};
 +
-+extern int _text;		/* Defined in vmlinux.lds.S */
-+extern int _end;
-+static unsigned long free_mem_ptr;
-+static unsigned long free_mem_end_ptr;
++			EMAC0: ethernet at ef600e00 {
++				linux,network-index = <0>;
++				device_type = "network";
++				compatible = "ibm,emac-440grx", "ibm,emac-440epx", "ibm,emac4";
++				interrupt-parent = <&EMAC0>;
++				interrupts = <0 1>;
++				#interrupt-cells = <1>;
++				#address-cells = <0>;
++				#size-cells = <0>;
++				interrupt-map = </*Status*/ 0 &UIC0 18 4
++						/*Wake*/  1 &UIC1 1d 4>;
++				reg = <ef600e00 70>;
++				local-mac-address = [000000000000];
++				mal-device = <&MAL0>;
++				mal-tx-channel = <0>;
++				mal-rx-channel = <0>;
++				cell-index = <0>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rgmii";
++				phy-map = <00000000>;
++				zmii-device = <&ZMII0>;
++				zmii-channel = <0>;
++				rgmii-device = <&RGMII0>;
++				rgmii-channel = <0>;
++				has-inverted-stacr-oc;
++				has-new-stacr-staopc;
++			};
 +
-+#define HEAP_SIZE             0x10000
++			EMAC1: ethernet at ef600f00 {
++				linux,network-index = <1>;
++				device_type = "network";
++				compatible = "ibm,emac-440grx", "ibm,emac-440epx", "ibm,emac4";
++				interrupt-parent = <&EMAC1>;
++				interrupts = <0 1>;
++				#interrupt-cells = <1>;
++				#address-cells = <0>;
++				#size-cells = <0>;
++				interrupt-map = </*Status*/ 0 &UIC0 19 4
++						/*Wake*/  1 &UIC1 1f 4>;
++				reg = <ef600f00 70>;
++				local-mac-address = [000000000000];
++				mal-device = <&MAL0>;
++				mal-tx-channel = <1>;
++				mal-rx-channel = <1>;
++				cell-index = <1>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rgmii";
++				phy-map = <00000000>;
++				zmii-device = <&ZMII0>;
++				zmii-channel = <1>;
++				rgmii-device = <&RGMII0>;
++				rgmii-channel = <1>;
++				has-inverted-stacr-oc;
++				has-new-stacr-staopc;
++			};
++		};
 +
-+#include "../../../../lib/inflate.c"
++		PCI0: pci at 1ec000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb440grx-pci", "ibm,plb-pci";
++			primary;
++			reg = <1 eec00000 8	/* Config space access */
++			       1 eed00000 4	/* IACK */
++			       1 eed00000 4	/* Special cycle */
++			       1 ef400000 40>;	/* Internal registers */
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed. Chip supports a second
++			 * IO range but we don't use it for now
++			 */
++			ranges = <02000000 0 80000000 1 80000000 0 10000000
++				01000000 0 00000000 1 e8000000 0 00100000>;
 +
-+static void *malloc(int size)
-+{
-+	void *p;
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
 +
-+	if (size < 0)
-+		error("Malloc error\n");
-+	if (free_mem_ptr == 0)
-+		error("Memory error\n");
++			/* All PCI interrupts are routed to IRQ 67 */
++			interrupt-map-mask = <0000 0 0 0>;
++			interrupt-map = < 0000 0 0 0 &UIC2 3 8 >;
++		};
++	};
 +
-+	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
++	chosen {
++		linux,stdout-path = "/plb/opb/serial at ef600300";
++		bootargs = "console=ttyS0,115200";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts
+new file mode 100644
+index 0000000..3839d4b
+--- /dev/null
++++ b/arch/powerpc/boot/dts/sbc8349.dts
+@@ -0,0 +1,244 @@
++/*
++ * SBC8349E Device Tree Source
++ *
++ * Copyright 2007 Wind River Inc.
++ *
++ * Paul Gortmaker (see MAINTAINERS for contact information)
++ *
++ *	-based largely on the Freescale MPC834x_MDS dts.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
 +
-+	p = (void *) free_mem_ptr;
-+	free_mem_ptr += size;
++/dts-v1/;
 +
-+	if (free_mem_ptr >= free_mem_end_ptr)
-+		error("\nOut of memory\n");
++/ {
++	model = "SBC8349E";
++	compatible = "SBC834xE";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+	return p;
-+}
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8349 at 0 {
++			device_type = "cpu";
++			reg = <0x0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;	// from bootloader
++			bus-frequency = <0>;		// from bootloader
++			clock-frequency = <0>;		// from bootloader
++		};
++	};
 +
-+static void free(void *where)
-+{				/* Don't care */
-+}
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;	// 256MB at 0
++	};
 +
-+static void gzip_mark(void **ptr)
-+{
-+	*ptr = (void *) free_mem_ptr;
-+}
++	soc8349 at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00000200>;
++		bus-frequency = <0>;
++
++		wdt at 200 {
++			compatible = "mpc83xx_wdt";
++			reg = <0x200 0x100>;
++		};
 +
-+static void gzip_release(void **ptr)
-+{
-+	free_mem_ptr = (long) *ptr;
-+}
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <14 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
 +
-+void puts(const char *s)
-+{
-+}
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <15 0x8>;
++			interrupt-parent = <&ipic>;
++			dfsrr;
++		};
 +
-+void *memset(void *s, int c, size_t n)
-+{
-+	int i;
-+	char *ss = (char *) s;
++		spi at 7000 {
++			cell-index = <0>;
++			compatible = "fsl,spi";
++			reg = <0x7000 0x1000>;
++			interrupts = <16 0x8>;
++			interrupt-parent = <&ipic>;
++			mode = "cpu";
++		};
 +
-+	for (i = 0; i < n; i++)
-+		ss[i] = c;
-+	return s;
-+}
++		/* phy type (ULPI or SERIAL) are only types supported for MPH */
++		/* port = 0 or 1 */
++		usb at 22000 {
++			compatible = "fsl-usb2-mph";
++			reg = <0x22000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <39 0x8>;
++			phy_type = "ulpi";
++			port1;
++		};
++		/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
++		usb at 23000 {
++			device_type = "usb";
++			compatible = "fsl-usb2-dr";
++			reg = <0x23000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			interrupt-parent = <&ipic>;
++			interrupts = <38 0x8>;
++			dr_mode = "otg";
++			phy_type = "ulpi";
++		};
 +
-+void *memcpy(void *__dest, __const void *__src, size_t __n)
-+{
-+	int i;
-+	char *d = (char *) __dest, *s = (char *) __src;
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
++			phy0: ethernet-phy at 19 {
++				interrupt-parent = <&ipic>;
++				interrupts = <20 0x8>;
++				reg = <0x19>;
++				device_type = "ethernet-phy";
++			};
++			phy1: ethernet-phy at 1a {
++				interrupt-parent = <&ipic>;
++				interrupts = <21 0x8>;
++				reg = <0x1a>;
++				device_type = "ethernet-phy";
++			};
++		};
 +
-+	for (i = 0; i < __n; i++)
-+		d[i] = s[i];
-+	return __dest;
-+}
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <32 0x8 33 0x8 34 0x8>;
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy0>;
++			linux,network-index = <0>;
++		};
 +
-+/* ===========================================================================
-+ * Fill the input buffer. This is called only when the buffer is empty
-+ * and at least one byte is really needed.
-+ */
-+static int fill_inbuf(void)
-+{
-+	if (insize != 0) {
-+		error("ran out of input data\n");
-+	}
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 0x8 36 0x8 37 0x8>;
++			interrupt-parent = <&ipic>;
++			phy-handle = <&phy1>;
++			linux,network-index = <1>;
++		};
 +
-+	inbuf = input_data;
-+	insize = input_len;
-+	inptr = 1;
-+	return inbuf[0];
-+}
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;
++			clock-frequency = <0>;
++			interrupts = <9 0x8>;
++			interrupt-parent = <&ipic>;
++		};
 +
-+/* ===========================================================================
-+ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
-+ * (Used for the decompressed data only.)
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;
++			clock-frequency = <0>;
++			interrupts = <10 0x8>;
++			interrupt-parent = <&ipic>;
++		};
++
++		/* May need to remove if on a part without crypto engine */
++		crypto at 30000 {
++			model = "SEC2";
++			compatible = "talitos";
++			reg = <0x30000 0x10000>;
++			interrupts = <11 0x8>;
++			interrupt-parent = <&ipic>;
++			num-channels = <4>;
++			channel-fifo-len = <24>;
++			exec-units-mask = <0x0000007e>;
++			/* desc mask is for rev2.0,
++			 * we need runtime fixup for >2.0 */
++			descriptor-types-mask = <0x01010ebf>;
++		};
++
++		/* IPIC
++		 * interrupts cell = <intr #, sense>
++		 * sense values match linux IORESOURCE_IRQ_* defines:
++		 * sense == 8: Level, low assertion
++		 * sense == 2: Edge, high-to-low change
++		 */
++		ipic: pic at 700 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x700 0x100>;
++			device_type = "ipic";
++		};
++	};
++
++	pci0: pci at e0008500 {
++		cell-index = <1>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++
++				/* IDSEL 0x11 */
++				 0x8800 0x0 0x0 0x1 &ipic 20 0x8
++				 0x8800 0x0 0x0 0x2 &ipic 21 0x8
++				 0x8800 0x0 0x0 0x3 &ipic 22 0x8
++				 0x8800 0x0 0x0 0x4 &ipic 23 0x8>;
++
++		interrupt-parent = <&ipic>;
++		interrupts = <0x42 0x8>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000
++			  0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++		clock-frequency = <66666666>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008500 0x100>;
++		compatible = "fsl,mpc8349-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts
+new file mode 100644
+index 0000000..14be38a
+--- /dev/null
++++ b/arch/powerpc/boot/dts/sbc8548.dts
+@@ -0,0 +1,244 @@
++/*
++ * SBC8548 Device Tree Source
++ *
++ * Copyright 2007 Wind River Systems Inc.
++ *
++ * Paul Gortmaker (see MAINTAINERS for contact information)
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
 + */
-+static void flush_window(void)
-+{
-+	ulg c = crc;		/* temporary variable */
-+	unsigned n;
-+	uch *in, *out, ch;
 +
-+	in = window;
-+	out = &output_data[output_ptr];
-+	for (n = 0; n < outcnt; n++) {
-+		ch = *out++ = *in++;
-+		c = crc_32_tab[((int) c ^ ch) & 0xff] ^ (c >> 8);
-+	}
-+	crc = c;
-+	bytes_out += (ulg) outcnt;
-+	output_ptr += (ulg) outcnt;
-+	outcnt = 0;
-+	puts(".");
-+}
 +
-+static void error(char *x)
-+{
-+	puts("\n\n");
-+	puts(x);
-+	puts("\n\n -- System halted");
++/dts-v1/;
 +
-+	while (1) ;		/* Halt */
-+}
++/ {
++	model = "SBC8548";
++	compatible = "SBC8548";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++		/* pci1 doesn't have a corresponding physical connector */
++		pci2 = &pci2;
++	};
 +
-+#define STACK_SIZE (4096)
-+long __attribute__ ((aligned(8))) user_stack[STACK_SIZE];
-+long *stack_start = &user_stack[STACK_SIZE];
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8548 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <0x20>;	// 32 bytes
++			i-cache-line-size = <0x20>;	// 32 bytes
++			d-cache-size = <0x8000>;	// L1, 32K
++			i-cache-size = <0x8000>;	// L1, 32K
++			timebase-frequency = <0>;	// From uboot
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
 +
-+void decompress_kernel(void)
-+{
-+	output_data = (uch *) (CONFIG_MEMORY_START + 0x2000);
-+	free_mem_ptr = (unsigned long) &_end;
-+	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;
++	};
 +
-+	makecrc();
-+	puts("Uncompressing Linux... ");
-+	cache_control(CACHE_ENABLE);
-+	gunzip();
-+	puts("\n");
++	soc8548 at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x00000000 0xe0000000 0x00100000>;
++		reg = <0xe0000000 0x00001000>;	// CCSRBAR
++		bus-frequency = <0>;
++
++		memory-controller at 2000 {
++			compatible = "fsl,8548-memory-controller";
++			reg = <0x2000 0x1000>;
++			interrupt-parent = <&mpic>;
++			interrupts = <0x12 0x2>;
++		};
 +
-+#if 0
-+	/* When booting from ROM may want to do something like this if the
-+	 * boot loader doesn't.
-+	 */
++		l2-cache-controller at 20000 {
++			compatible = "fsl,8548-l2-cache-controller";
++			reg = <0x20000 0x1000>;
++			cache-line-size = <0x20>;	// 32 bytes
++			cache-size = <0x80000>;	// L2, 512K
++			interrupt-parent = <&mpic>;
++			interrupts = <0x10 0x2>;
++		};
 +
-+	/* Set up the parameters and command line */
-+	{
-+		volatile unsigned int *parambase =
-+		    (int *) (CONFIG_MEMORY_START + 0x1000);
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <0x2b 0x2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++		};
 +
-+		parambase[0] = 0x1;	/* MOUNT_ROOT_RDONLY */
-+		parambase[1] = 0x0;	/* RAMDISK_FLAGS */
-+		parambase[2] = 0x0200;	/* ORIG_ROOT_DEV */
-+		parambase[3] = 0x0;	/* LOADER_TYPE */
-+		parambase[4] = 0x0;	/* INITRD_START */
-+		parambase[5] = 0x0;	/* INITRD_SIZE */
-+		parambase[6] = 0;
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <0x2b 0x2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++		};
 +
-+		strcpy((char *) ((int) parambase + 0x100),
-+		       "console=ttySC0,38400");
-+	}
-+#endif
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
++			phy0: ethernet-phy at 19 {
++				interrupt-parent = <&mpic>;
++				interrupts = <0x6 0x1>;
++				reg = <0x19>;
++				device_type = "ethernet-phy";
++			};
++			phy1: ethernet-phy at 1a {
++				interrupt-parent = <&mpic>;
++				interrupts = <0x7 0x1>;
++				reg = <0x1a>;
++				device_type = "ethernet-phy";
++			};
++		};
 +
-+	puts("Ok, booting the kernel.\n");
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy0>;
++		};
 +
-+	cache_control(CACHE_DISABLE);
-+}
-diff --git a/arch/sh/boot/compressed/vmlinux_64.lds b/arch/sh/boot/compressed/vmlinux_64.lds
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "eTSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy1>;
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>;	// reg base, size
++			clock-frequency = <0>;	// should we fill in in uboot?
++			interrupts = <0x2a 0x2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;	// reg base, size
++			clock-frequency = <0>;	// should we fill in in uboot?
++			interrupts = <0x2a 0x2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		global-utilities at e0000 {	//global utilities reg
++			compatible = "fsl,mpc8548-guts";
++			reg = <0xe0000 0x1000>;
++			fsl,has-rstcr;
++		};
++
++		mpic: pic at 40000 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#size-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x40000 0x40000>;
++			compatible = "chrp,open-pic";
++			device_type = "open-pic";
++                        big-endian;
++		};
++	};
++
++	pci0: pci at e0008000 {
++		cell-index = <0>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++			/* IDSEL 0x01 (PCI-X slot) */
++			0x0800 0x0 0x0 0x1 &mpic 0x0 0x1
++			0x0800 0x0 0x0 0x2 &mpic 0x1 0x1
++			0x0800 0x0 0x0 0x3 &mpic 0x2 0x1
++			0x0800 0x0 0x0 0x4 &mpic 0x3 0x1>;
++
++		interrupt-parent = <&mpic>;
++		interrupts = <0x18 0x2>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000
++			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00800000>;
++		clock-frequency = <66666666>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe0008000 0x1000>;
++		compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
++		device_type = "pci";
++	};
++
++	pci2: pcie at e000a000 {
++		cell-index = <2>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++
++			/* IDSEL 0x0 (PEX) */
++			0x0000 0x0 0x0 0x1 &mpic 0x0 0x1
++			0x0000 0x0 0x0 0x2 &mpic 0x1 0x1
++			0x0000 0x0 0x0 0x3 &mpic 0x2 0x1
++			0x0000 0x0 0x0 0x4 &mpic 0x3 0x1>;
++
++		interrupt-parent = <&mpic>;
++		interrupts = <0x1a 0x2>;
++		bus-range = <0x0 0xff>;
++		ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
++			  0x01000000 0x0 0x00000000 0xe3000000 0x0 0x08000000>;
++		clock-frequency = <33333333>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xe000a000 0x1000>;
++		compatible = "fsl,mpc8548-pcie";
++		device_type = "pci";
++		pcie at 0 {
++			reg = <0x0 0x0 0x0 0x0 0x0>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			device_type = "pci";
++			ranges = <0x02000000 0x0 0xa0000000
++				  0x02000000 0x0 0xa0000000
++				  0x0 0x20000000
++
++				  0x01000000 0x0 0x00000000
++				  0x01000000 0x0 0x00000000
++				  0x0 0x08000000>;
++		};
++	};
++};
+diff --git a/arch/powerpc/boot/dts/sbc8560.dts b/arch/powerpc/boot/dts/sbc8560.dts
 new file mode 100644
-index 0000000..59c2ef4
+index 0000000..0476802
 --- /dev/null
-+++ b/arch/sh/boot/compressed/vmlinux_64.lds
-@@ -0,0 +1,64 @@
++++ b/arch/powerpc/boot/dts/sbc8560.dts
+@@ -0,0 +1,330 @@
 +/*
-+ * ld script to make compressed SuperH/shmedia Linux kernel+decompression
-+ *		bootstrap
-+ * Modified by Stuart Menefy from arch/sh/vmlinux.lds.S written by Niibe Yutaka
++ * SBC8560 Device Tree Source
++ *
++ * Copyright 2007 Wind River Systems Inc.
++ *
++ * Paul Gortmaker (see MAINTAINERS for contact information)
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
 + */
 +
++/dts-v1/;
 +
-+#ifdef CONFIG_LITTLE_ENDIAN
-+/* OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux") */
-+#define NOP 0x6ff0fff0
-+#else
-+/* OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64") */
-+#define NOP 0xf0fff06f
-+#endif
++/ {
++	model = "SBC8560";
++	compatible = "SBC8560";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		ethernet2 = &enet2;
++		ethernet3 = &enet3;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
 +
-+OUTPUT_FORMAT("elf32-sh64-linux")
-+OUTPUT_ARCH(sh)
-+ENTRY(_start)
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8560 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <0x20>;	// 32 bytes
++			i-cache-line-size = <0x20>;	// 32 bytes
++			d-cache-size = <0x8000>;	// L1, 32K
++			i-cache-size = <0x8000>;	// L1, 32K
++			timebase-frequency = <0>;	// From uboot
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
 +
-+#define ALIGNED_GAP(section, align) (((ADDR(section)+SIZEOF(section)+(align)-1) & ~((align)-1))-ADDR(section))
-+#define FOLLOWING(section, align) AT (LOADADDR(section) + ALIGNED_GAP(section,align))
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x20000000>;
++	};
 +
-+SECTIONS
-+{
-+  _text = .;			/* Text and read-only data */
++	soc at ff700000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xff700000 0x00100000>;
++		reg = <0xff700000 0x00100000>;
++		clock-frequency = <0>;
++
++		memory-controller at 2000 {
++			compatible = "fsl,8560-memory-controller";
++			reg = <0x2000 0x1000>;
++			interrupt-parent = <&mpic>;
++			interrupts = <0x12 0x2>;
++		};
 +
-+  .text : {
-+	*(.text)
-+	*(.text64)
-+	*(.text..SHmedia32)
-+	*(.fixup)
-+	*(.gnu.warning)
-+	} = NOP
-+  . = ALIGN(4);
-+  .rodata : { *(.rodata) }
++		l2-cache-controller at 20000 {
++			compatible = "fsl,8560-l2-cache-controller";
++			reg = <0x20000 0x1000>;
++			cache-line-size = <0x20>;	// 32 bytes
++			cache-size = <0x40000>;		// L2, 256K
++			interrupt-parent = <&mpic>;
++			interrupts = <0x10 0x2>;
++		};
 +
-+  /* There is no 'real' reason for eight byte alignment, four would work
-+   * as well, but gdb downloads much (*4) faster with this.
-+   */
-+  . = ALIGN(8);
-+  .image : { *(.image) }
-+  . = ALIGN(4);
-+  _etext = .;			/* End of text section */
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <0x2b 0x2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++		};
 +
-+  .data :			/* Data */
-+	FOLLOWING(.image, 4)
-+	{
-+	_data = .;
-+	*(.data)
-+	}
-+  _data_image = LOADADDR(.data);/* Address of data section in ROM */
++		i2c at 3100 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <1>;
++			compatible = "fsl-i2c";
++			reg = <0x3100 0x100>;
++			interrupts = <0x2b 0x2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++		};
 +
-+  _edata = .;			/* End of data section */
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++			phy0: ethernet-phy at 19 {
++				interrupt-parent = <&mpic>;
++				interrupts = <0x6 0x1>;
++				reg = <0x19>;
++				device_type = "ethernet-phy";
++			};
++			phy1: ethernet-phy at 1a {
++				interrupt-parent = <&mpic>;
++				interrupts = <0x7 0x1>;
++				reg = <0x1a>;
++				device_type = "ethernet-phy";
++			};
++			phy2: ethernet-phy at 1b {
++				interrupt-parent = <&mpic>;
++				interrupts = <0x8 0x1>;
++				reg = <0x1b>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 1c {
++				interrupt-parent = <&mpic>;
++				interrupts = <0x8 0x1>;
++				reg = <0x1c>;
++				device_type = "ethernet-phy";
++			};
++		};
 +
-+  .stack : { stack = .;  _stack = .; }
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy0>;
++		};
 +
-+  . = ALIGN(4);
-+  __bss_start = .;		/* BSS */
-+  .bss : {
-+	*(.bss)
-+	}
-+  . = ALIGN(4);
-+  _end = . ;
-+}
-diff --git a/arch/sh/cchips/voyagergx/Makefile b/arch/sh/cchips/voyagergx/Makefile
-deleted file mode 100644
-index f73963c..0000000
---- a/arch/sh/cchips/voyagergx/Makefile
-+++ /dev/null
-@@ -1,9 +0,0 @@
--#
--# Makefile for VoyagerGX
--#
--
--obj-y	:= irq.o setup.o
--
--obj-$(CONFIG_USB_OHCI_HCD)	+= consistent.o
--
--EXTRA_CFLAGS += -Werror
-diff --git a/arch/sh/cchips/voyagergx/consistent.c b/arch/sh/cchips/voyagergx/consistent.c
-deleted file mode 100644
-index 07e8b9c..0000000
---- a/arch/sh/cchips/voyagergx/consistent.c
-+++ /dev/null
-@@ -1,121 +0,0 @@
--/*
-- * arch/sh/cchips/voyagergx/consistent.c
-- *
-- * Copyright (C) 2004  Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/mm.h>
--#include <linux/dma-mapping.h>
--#include <linux/slab.h>
--#include <linux/list.h>
--#include <linux/types.h>
--#include <linux/module.h>
--#include <linux/device.h>
--#include <asm/io.h>
--
--
--struct voya_alloc_entry {
--	struct list_head list;
--	unsigned long ofs;
--	unsigned long len;
--};
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy1>;
++		};
++
++		mpic: pic at 40000 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#size-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x40000 0x40000>;
++			device_type = "open-pic";
++		};
++
++		cpm at 919c0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8560-cpm", "fsl,cpm2";
++			reg = <0x919c0 0x30>;
++			ranges;
++
++			muram at 80000 {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0x0 0x80000 0x10000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0x0 0x4000 0x9000 0x2000>;
++				};
++			};
++
++			brg at 919f0 {
++				compatible = "fsl,mpc8560-brg",
++				             "fsl,cpm2-brg",
++				             "fsl,cpm-brg";
++				reg = <0x919f0 0x10 0x915f0 0x10>;
++				clock-frequency = <165000000>;
++			};
++
++			cpmpic: pic at 90c00 {
++				interrupt-controller;
++				#address-cells = <0>;
++				#interrupt-cells = <2>;
++				interrupts = <0x2e 0x2>;
++				interrupt-parent = <&mpic>;
++				reg = <0x90c00 0x80>;
++				compatible = "fsl,mpc8560-cpm-pic", "fsl,cpm2-pic";
++			};
++
++			enet2: ethernet at 91320 {
++				device_type = "network";
++				compatible = "fsl,mpc8560-fcc-enet",
++				             "fsl,cpm2-fcc-enet";
++				reg = <0x91320 0x20 0x88500 0x100 0x913b0 0x1>;
++				local-mac-address = [ 00 00 00 00 00 00 ];
++				fsl,cpm-command = <0x16200300>;
++				interrupts = <0x21 0x8>;
++				interrupt-parent = <&cpmpic>;
++				phy-handle = <&phy2>;
++			};
++
++			enet3: ethernet at 91340 {
++				device_type = "network";
++				compatible = "fsl,mpc8560-fcc-enet",
++				             "fsl,cpm2-fcc-enet";
++				reg = <0x91340 0x20 0x88600 0x100 0x913d0 0x1>;
++				local-mac-address = [ 00 00 00 00 00 00 ];
++				fsl,cpm-command = <0x1a400300>;
++				interrupts = <0x22 0x8>;
++				interrupt-parent = <&cpmpic>;
++				phy-handle = <&phy3>;
++			};
++		};
++
++		global-utilities at e0000 {
++			compatible = "fsl,mpc8560-guts";
++			reg = <0xe0000 0x1000>;
++			fsl,has-rstcr;
++		};
++	};
++
++	pci0: pci at ff708000 {
++		cell-index = <0>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
++		device_type = "pci";
++		reg = <0xff708000 0x1000>;
++		clock-frequency = <66666666>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++
++			/* IDSEL 0x02 */
++			0x1000 0x0 0x0 0x1 &mpic 0x2 0x1
++			0x1000 0x0 0x0 0x2 &mpic 0x3 0x1
++			0x1000 0x0 0x0 0x3 &mpic 0x4 0x1
++			0x1000 0x0 0x0 0x4 &mpic 0x5 0x1>;
++
++		interrupt-parent = <&mpic>;
++		interrupts = <0x18 0x2>;
++		bus-range = <0x0 0x0>;
++		ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000
++			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
++	};
++
++	localbus at ff705000 {
++		compatible = "fsl,mpc8560-localbus";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		reg = <0xff705000 0x100>;	// BRx, ORx, etc.
++
++		ranges = <
++			0x0 0x0 0xff800000 0x0800000	// 8MB boot flash
++			0x1 0x0 0xe4000000 0x4000000	// 64MB flash
++			0x3 0x0 0x20000000 0x4000000	// 64MB SDRAM
++			0x4 0x0 0x24000000 0x4000000	// 64MB SDRAM
++			0x5 0x0 0xfc000000 0x0c00000	// EPLD
++			0x6 0x0 0xe0000000 0x4000000	// 64MB flash
++			0x7 0x0 0x80000000 0x0200000	// ATM1,2
++		>;
++
++		epld at 5,0 {
++			compatible = "wrs,epld-localbus";
++			#address-cells = <2>;
++			#size-cells = <1>;
++			reg = <0x5 0x0 0xc00000>;
++			ranges = <
++				0x0 0x0 0x5 0x000000 0x1fff	// LED disp.
++				0x1 0x0 0x5 0x100000 0x1fff	// switches
++				0x2 0x0 0x5 0x200000 0x1fff	// ID reg.
++				0x3 0x0 0x5 0x300000 0x1fff	// status reg.
++				0x4 0x0 0x5 0x400000 0x1fff	// reset reg.
++				0x5 0x0 0x5 0x500000 0x1fff	// Wind port
++				0x7 0x0 0x5 0x700000 0x1fff	// UART #1
++				0x8 0x0 0x5 0x800000 0x1fff	// UART #2
++				0x9 0x0 0x5 0x900000 0x1fff	// RTC
++				0xb 0x0 0x5 0xb00000 0x1fff	// EEPROM
++			>;
++
++			bidr at 2,0 {
++				compatible = "wrs,sbc8560-bidr";
++				reg = <0x2 0x0 0x10>;
++			};
++
++			bcsr at 3,0 {
++				compatible = "wrs,sbc8560-bcsr";
++				reg = <0x3 0x0 0x10>;
++			};
++
++			brstcr at 4,0 {
++				compatible = "wrs,sbc8560-brstcr";
++				reg = <0x4 0x0 0x10>;
++			};
++
++			serial0: serial at 7,0 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <0x7 0x0 0x100>;
++				clock-frequency = <1843200>;
++				interrupts = <0x9 0x2>;
++				interrupt-parent = <&mpic>;
++			};
++
++			serial1: serial at 8,0 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <0x8 0x0 0x100>;
++				clock-frequency = <1843200>;
++				interrupts = <0xa 0x2>;
++				interrupt-parent = <&mpic>;
++			};
++
++			rtc at 9,0 {
++				compatible = "m48t59";
++				reg = <0x9 0x0 0x1fff>;
++			};
++		};
++	};
++};
+diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
+index 10784ff..d9046c1 100644
+--- a/arch/powerpc/boot/dts/sequoia.dts
++++ b/arch/powerpc/boot/dts/sequoia.dts
+@@ -17,14 +17,24 @@
+ 	#size-cells = <1>;
+ 	model = "amcc,sequoia";
+ 	compatible = "amcc,sequoia";
+-	dcr-parent = <&/cpus/PowerPC,440EPx at 0>;
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		ethernet1 = &EMAC1;
++		serial0 = &UART0;
++		serial1 = &UART1;
++		serial2 = &UART2;
++		serial3 = &UART3;
++	};
+ 
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		PowerPC,440EPx at 0 {
++		cpu at 0 {
+ 			device_type = "cpu";
++			model = "PowerPC,440EPx";
+ 			reg = <0>;
+ 			clock-frequency = <0>; /* Filled in by zImage */
+ 			timebase-frequency = <0>; /* Filled in by zImage */
+@@ -94,7 +104,6 @@
+ 		clock-frequency = <0>; /* Filled in by zImage */
+ 
+ 		SDRAM0: sdram {
+-			device_type = "memory-controller";
+ 			compatible = "ibm,sdram-440epx", "ibm,sdram-44x-ddr2denali";
+ 			dcr-reg = <010 2>;
+ 		};
+@@ -122,6 +131,13 @@
+ 			interrupt-map-mask = <ffffffff>;
+ 		};
+ 
++		USB1: usb at e0000400 {
++			compatible = "ohci-be";
++			reg = <0 e0000400 60>;
++			interrupt-parent = <&UIC0>;
++			interrupts = <15 8>;
++		};
++
+ 		POB0: opb {
+ 		  	compatible = "ibm,opb-440epx", "ibm,opb";
+ 			#address-cells = <1>;
+@@ -308,6 +324,33 @@
+ 				has-new-stacr-staopc;
+ 			};
+ 		};
++
++		PCI0: pci at 1ec000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb440epx-pci", "ibm,plb-pci";
++			primary;
++			reg = <1 eec00000 8	/* Config space access */
++			       1 eed00000 4	/* IACK */
++			       1 eed00000 4	/* Special cycle */
++			       1 ef400000 40>;	/* Internal registers */
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed. Chip supports a second
++			 * IO range but we don't use it for now
++			 */
++			ranges = <02000000 0 80000000 1 80000000 0 10000000
++				01000000 0 00000000 1 e8000000 0 00100000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
++
++			/* All PCI interrupts are routed to IRQ 67 */
++			interrupt-map-mask = <0000 0 0 0>;
++			interrupt-map = < 0000 0 0 0 &UIC2 3 8 >;
++		};
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/powerpc/boot/dts/storcenter.dts b/arch/powerpc/boot/dts/storcenter.dts
+new file mode 100644
+index 0000000..2204874
+--- /dev/null
++++ b/arch/powerpc/boot/dts/storcenter.dts
+@@ -0,0 +1,141 @@
++/*
++ * Device Tree Source for IOMEGA StorCenter
++ *
++ * Copyright 2007 Oyvind Repvik
++ * Copyright 2007 Jon Loeliger
++ *
++ * Based on the Kurobox DTS by G. Liakhovetski <g.liakhovetski at gmx.de>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2.  This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++/dts-v1/;
++
++/ {
++	model = "StorCenter";
++	compatible = "storcenter";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8241 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			clock-frequency = <200000000>;
++			timebase-frequency = <25000000>;
++			bus-frequency = <0>;	/* from bootwrapper */
++			i-cache-line-size = <32>;
++			d-cache-line-size = <32>;
++			i-cache-size = <16384>;
++			d-cache-size = <16384>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x04000000>;	/* 64MB @ 0x0 */
++	};
++
++	soc at fc000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		compatible = "fsl,mpc8241", "mpc10x";
++		store-gathering = <0>; /* 0 == off, !0 == on */
++		ranges = <0x0 0xfc000000 0x100000>;
++		reg = <0xfc000000 0x100000>;	/* EUMB */
++		bus-frequency = <0>;		/* fixed by loader */
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <5 2>;
++			interrupt-parent = <&mpic>;
++
++			rtc at 68 {
++				compatible = "dallas,ds1337";
++				reg = <68>;
++			};
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x20>;
++			clock-frequency = <97553800>; /* Hz */
++			current-speed = <115200>;
++			interrupts = <9 2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x20>;
++			clock-frequency = <97553800>; /* Hz */
++			current-speed = <9600>;
++			interrupts = <10 2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		mpic: interrupt-controller at 40000 {
++			#interrupt-cells = <2>;
++			device_type = "open-pic";
++			compatible = "chrp,open-pic";
++			interrupt-controller;
++			reg = <0x40000 0x40000>;
++		};
++
++	};
++
++	pci0: pci at fe800000 {
++		#address-cells = <3>;
++		#size-cells = <2>;
++		#interrupt-cells = <1>;
++		device_type = "pci";
++		compatible = "mpc10x-pci";
++		reg = <0xfe800000 0x1000>;
++		ranges = <0x01000000 0x0        0x0 0xfe000000 0x0 0x00c00000
++			  0x02000000 0x0 0x80000000 0x80000000 0x0 0x70000000>;
++		bus-range = <0 0xff>;
++		clock-frequency = <97553800>;
++		interrupt-parent = <&mpic>;
++		interrupt-map-mask = <0xf800 0 0 7>;
++		interrupt-map = <
++			/* IDSEL 13 - IDE */
++			0x6800 0 0 1 &mpic 0 1
++			0x6800 0 0 2 &mpic 0 1
++			0x6800 0 0 3 &mpic 0 1
++			0x6800 0 0 4 &mpic 0 1
++			/* IDSEL 14 - USB */
++			0x7000 0 0 1 &mpic 0 1
++			0x7000 0 0 2 &mpic 0 1
++			0x7000 0 0 3 &mpic 0 1
++			0x7000 0 0 4 &mpic 0 1
++			/* IDSEL 15 - ETH */
++			0x7800 0 0 1 &mpic 0 1
++			0x7800 0 0 2 &mpic 0 1
++			0x7800 0 0 3 &mpic 0 1
++			0x7800 0 0 4 &mpic 0 1
++		>;
++	};
++
++	chosen {
++		linux,stdout-path = "/soc/serial at 4500";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/stx_gp3_8560.dts b/arch/powerpc/boot/dts/stx_gp3_8560.dts
+new file mode 100644
+index 0000000..f81fd7f
+--- /dev/null
++++ b/arch/powerpc/boot/dts/stx_gp3_8560.dts
+@@ -0,0 +1,228 @@
++/*
++ * STX GP3 - 8560 ADS Device Tree Source
++ *
++ * Copyright 2008 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/dts-v1/;
++
++/ {
++	model = "stx,gp3";
++	compatible = "stx,gp3-8560", "stx,gp3";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		pci0 = &pci0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8560 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;
++	};
++
++	soc at fdf00000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0 0xfdf00000 0x100000>;
++		reg = <0xfdf00000 0x1000>;
++		bus-frequency = <0>;
++		compatible = "fsl,mpc8560-immr", "simple-bus";
++
++		memory-controller at 2000 {
++			compatible = "fsl,8540-memory-controller";
++			reg = <0x2000 0x1000>;
++			interrupt-parent = <&mpic>;
++			interrupts = <18 2>;
++		};
++
++		l2-cache-controller at 20000 {
++			compatible = "fsl,8540-l2-cache-controller";
++			reg = <0x20000 0x1000>;
++			cache-line-size = <32>;
++			cache-size = <0x40000>;	// L2, 256K
++			interrupt-parent = <&mpic>;
++			interrupts = <16 2>;
++		};
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <43 2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++		};
++
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&mpic>;
++				interrupts = <5 4>;
++				reg = <2>;
++				device_type = "ethernet-phy";
++			};
++			phy4: ethernet-phy at 4 {
++				interrupt-parent = <&mpic>;
++				interrupts = <5 4>;
++				reg = <4>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <29 2 30 2 34 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 2 36 2 40 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy4>;
++		};
++
++		mpic: pic at 40000 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x40000 0x40000>;
++			device_type = "open-pic";
++		};
++
++		cpm at 919c0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8560-cpm", "fsl,cpm2", "simple-bus";
++			reg = <0x919c0 0x30>;
++			ranges;
++
++			muram at 80000 {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0 0x80000 0x10000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0 0x4000 0x9000 0x2000>;
++				};
++			};
++
++			brg at 919f0 {
++				compatible = "fsl,mpc8560-brg",
++				             "fsl,cpm2-brg",
++				             "fsl,cpm-brg";
++				reg = <0x919f0 0x10 0x915f0 0x10>;
++				clock-frequency = <0>;
++			};
++
++			cpmpic: pic at 90c00 {
++				interrupt-controller;
++				#address-cells = <0>;
++				#interrupt-cells = <2>;
++				interrupts = <46 2>;
++				interrupt-parent = <&mpic>;
++				reg = <0x90c00 0x80>;
++				compatible = "fsl,mpc8560-cpm-pic", "fsl,cpm2-pic";
++			};
++
++			serial0: serial at 91a20 {
++				device_type = "serial";
++				compatible = "fsl,mpc8560-scc-uart",
++				             "fsl,cpm2-scc-uart";
++				reg = <0x91a20 0x20 0x88100 0x100>;
++				fsl,cpm-brg = <2>;
++				fsl,cpm-command = <0x4a00000>;
++				interrupts = <41 8>;
++				interrupt-parent = <&cpmpic>;
++			};
++		};
++	};
++
++	pci0: pci at fdf08000 {
++		cell-index = <0>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++
++			/* IDSEL 0x0c */
++			0x6000 0 0 1 &mpic 1 1
++			0x6000 0 0 2 &mpic 2 1
++			0x6000 0 0 3 &mpic 3 1
++			0x6000 0 0 4 &mpic 4 1
++
++			/* IDSEL 0x0d */
++			0x6800 0 0 1 &mpic 4 1
++			0x6800 0 0 2 &mpic 1 1
++			0x6800 0 0 3 &mpic 2 1
++			0x6800 0 0 4 &mpic 3 1
++
++			/* IDSEL 0x0e */
++			0x7000 0 0 1 &mpic 3 1
++			0x7000 0 0 2 &mpic 4 1
++			0x7000 0 0 3 &mpic 1 1
++			0x7000 0 0 4 &mpic 2 1
++
++			/* IDSEL 0x0f */
++			0x7800 0 0 1 &mpic 2 1
++			0x7800 0 0 2 &mpic 3 1
++			0x7800 0 0 3 &mpic 4 1
++			0x7800 0 0 4 &mpic 1 1>;
++
++		interrupt-parent = <&mpic>;
++		interrupts = <24 2>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0 0x80000000 0x80000000 0 0x20000000
++			  0x01000000 0 0x00000000 0xe2000000 0 0x00100000>;
++		clock-frequency = <66666666>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		reg = <0xfdf08000 0x1000>;
++		compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
++		device_type = "pci";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/taishan.dts b/arch/powerpc/boot/dts/taishan.dts
+new file mode 100644
+index 0000000..0706a4a
+--- /dev/null
++++ b/arch/powerpc/boot/dts/taishan.dts
+@@ -0,0 +1,383 @@
++/*
++ * Device Tree Source for IBM/AMCC Taishan
++ *
++ * Copyright 2007 IBM Corp.
++ * Hugh Blemings <hugh at au.ibm.com> based off code by
++ * Josh Boyer <jwboyer at linux.vnet.ibm.com>, David Gibson <dwg at au1.ibm.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2.  This program is licensed "as is" without
++ * any warranty of any kind, whether express or implied.
++ */
++
++/ {
++	#address-cells = <2>;
++	#size-cells = <1>;
++	model = "amcc,taishan";
++	compatible = "amcc,taishan";
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC2;
++		ethernet1 = &EMAC3;
++		serial0 = &UART0;
++		serial1 = &UART1;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		cpu at 0 {
++			device_type = "cpu";
++			model = "PowerPC,440GX";
++			reg = <0>;
++			clock-frequency = <2FAF0800>; // 800MHz
++			timebase-frequency = <0>; // Filled in by zImage
++			i-cache-line-size = <32>;
++			d-cache-line-size = <32>;
++			i-cache-size = <8000>; /* 32 kB */
++			d-cache-size = <8000>; /* 32 kB */
++			dcr-controller;
++			dcr-access-method = "native";
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0 0 0>; // Filled in by zImage
++	};
++
++
++	UICB0: interrupt-controller-base {
++		compatible = "ibm,uic-440gx", "ibm,uic";
++		interrupt-controller;
++		cell-index = <3>;
++		dcr-reg = <200 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++	};
++
++
++	UIC0: interrupt-controller0 {
++		compatible = "ibm,uic-440gx", "ibm,uic";
++		interrupt-controller;
++		cell-index = <0>;
++		dcr-reg = <0c0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <01 4 00 4>; /* cascade - first non-critical */
++		interrupt-parent = <&UICB0>;
++
++	};
++
++	UIC1: interrupt-controller1 {
++		compatible = "ibm,uic-440gx", "ibm,uic";
++		interrupt-controller;
++		cell-index = <1>;
++		dcr-reg = <0d0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <03 4 02 4>; /* cascade */
++		interrupt-parent = <&UICB0>;
++	};
++
++	UIC2: interrupt-controller2 {
++		compatible = "ibm,uic-440gx", "ibm,uic";
++		interrupt-controller;
++		cell-index = <2>; /* was 1 */
++		dcr-reg = <210 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <05 4 04 4>; /* cascade */
++		interrupt-parent = <&UICB0>;
++	};
++
++
++	CPC0: cpc {
++		compatible = "ibm,cpc-440gp";
++		dcr-reg = <0b0 003 0e0 010>;
++		// FIXME: anything else?
++	};
++
++	plb {
++		compatible = "ibm,plb-440gx", "ibm,plb4";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		ranges;
++		clock-frequency = <9896800>; // 160MHz
++
++		SDRAM0: memory-controller {
++			compatible = "ibm,sdram-440gp";
++			dcr-reg = <010 2>;
++			// FIXME: anything else?
++		};
++
++		SRAM0: sram {
++			compatible = "ibm,sram-440gp";
++			dcr-reg = <020 8 00a 1>;
++		};
++
++		DMA0: dma {
++			// FIXME: ???
++			compatible = "ibm,dma-440gp";
++			dcr-reg = <100 027>;
++		};
++
++		MAL0: mcmal {
++			compatible = "ibm,mcmal-440gx", "ibm,mcmal2";
++			dcr-reg = <180 62>;
++			num-tx-chans = <4>;
++			num-rx-chans = <4>;
++			interrupt-parent = <&MAL0>;
++			interrupts = <0 1 2 3 4>;
++			#interrupt-cells = <1>;
++			#address-cells = <0>;
++			#size-cells = <0>;
++			interrupt-map = </*TXEOB*/ 0 &UIC0 a 4
++					 /*RXEOB*/ 1 &UIC0 b 4
++					 /*SERR*/  2 &UIC1 0 4
++					 /*TXDE*/  3 &UIC1 1 4
++					 /*RXDE*/  4 &UIC1 2 4>;
++			interrupt-map-mask = <ffffffff>;
++		};
++
++		POB0: opb {
++			compatible = "ibm,opb-440gx", "ibm,opb";
++			#address-cells = <1>;
++			#size-cells = <1>;
++			/* Wish there was a nicer way of specifying a full 32-bit
++			   range */
++			ranges = <00000000 1 00000000 80000000
++				  80000000 1 80000000 80000000>;
++			dcr-reg = <090 00b>;
++			interrupt-parent = <&UIC1>;
++			interrupts = <7 4>;
++			clock-frequency = <4C4B400>; // 80MHz
++
++
++			EBC0: ebc {
++				compatible = "ibm,ebc-440gx", "ibm,ebc";
++				dcr-reg = <012 2>;
++				#address-cells = <2>;
++				#size-cells = <1>;
++				clock-frequency = <4C4B400>; // 80MHz
++
++				/* ranges property is supplied by zImage
++				 * based on firmware's configuration of the
++				 * EBC bridge */
++
++				interrupts = <5 4>;
++				interrupt-parent = <&UIC1>;
++
++				/* TODO: Add other EBC devices */
++			};
++
++
++
++			UART0: serial at 40000200 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <40000200 8>;
++				virtual-reg = <e0000200>;
++ 				clock-frequency = <A8C000>;
++				current-speed = <1C200>; /* 115200 */
++				interrupt-parent = <&UIC0>;
++				interrupts = <0 4>;
++			};
++
++			UART1: serial at 40000300 {
++				device_type = "serial";
++				compatible = "ns16550";
++				reg = <40000300 8>;
++				virtual-reg = <e0000300>;
++				clock-frequency = <A8C000>;
++				current-speed = <1C200>; /* 115200 */
++				interrupt-parent = <&UIC0>;
++				interrupts = <1 4>;
++			};
++
++			IIC0: i2c at 40000400 {
++				/* FIXME */
++				device_type = "i2c";
++				compatible = "ibm,iic-440gp", "ibm,iic";
++				reg = <40000400 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <2 4>;
++			};
++			IIC1: i2c at 40000500 {
++				/* FIXME */
++				device_type = "i2c";
++				compatible = "ibm,iic-440gp", "ibm,iic";
++				reg = <40000500 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <3 4>;
++			};
++
++			GPIO0: gpio at 40000700 {
++				/* FIXME */
++				compatible = "ibm,gpio-440gp";
++				reg = <40000700 20>;
++			};
++
++			ZMII0: emac-zmii at 40000780 {
++				device_type = "zgmii-interface";
++				compatible = "ibm,zmii-440gx", "ibm,zmii";
++				reg = <40000780 c>;
++			};
++
++			RGMII0: emac-rgmii at 40000790 {
++				device_type = "rgmii-interface";
++				compatible = "ibm,rgmii";
++				reg = <40000790 8>;
++			};
++
++
++			EMAC0: ethernet at 40000800 {
++				unused = <1>;
++				linux,network-index = <2>;
++				device_type = "network";
++				compatible = "ibm,emac-440gx", "ibm,emac4";
++				interrupt-parent = <&UIC1>;
++				interrupts = <1c 4 1d 4>;
++				reg = <40000800 70>;
++				local-mac-address = [000000000000]; // Filled in by zImage
++				mal-device = <&MAL0>;
++				mal-tx-channel = <0>;
++				mal-rx-channel = <0>;
++				cell-index = <0>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rmii";
++				phy-map = <00000001>;
++				zmii-device = <&ZMII0>;
++				zmii-channel = <0>;
++			};
++		 	EMAC1: ethernet at 40000900 {
++				unused = <1>;
++				linux,network-index = <3>;
++				device_type = "network";
++				compatible = "ibm,emac-440gx", "ibm,emac4";
++				interrupt-parent = <&UIC1>;
++				interrupts = <1e 4 1f 4>;
++				reg = <40000900 70>;
++				local-mac-address = [000000000000]; // Filled in by zImage
++				mal-device = <&MAL0>;
++				mal-tx-channel = <1>;
++				mal-rx-channel = <1>;
++				cell-index = <1>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rmii";
++				phy-map = <00000001>;
++ 				zmii-device = <&ZMII0>;
++				zmii-channel = <1>;
++			};
++
++		 	EMAC2: ethernet at 40000c00 {
++				linux,network-index = <0>;
++				device_type = "network";
++				compatible = "ibm,emac-440gx", "ibm,emac4";
++				interrupt-parent = <&UIC2>;
++				interrupts = <0 4 1 4>;
++				reg = <40000c00 70>;
++				local-mac-address = [000000000000]; // Filled in by zImage
++				mal-device = <&MAL0>;
++				mal-tx-channel = <2>;
++				mal-rx-channel = <2>;
++				cell-index = <2>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rgmii";
++				phy-map = <00000001>;
++				rgmii-device = <&RGMII0>;
++				rgmii-channel = <0>;
++ 				zmii-device = <&ZMII0>;
++				zmii-channel = <2>;
++			};
++
++		 	EMAC3: ethernet at 40000e00 {
++				linux,network-index = <1>;
++				device_type = "network";
++				compatible = "ibm,emac-440gx", "ibm,emac4";
++				interrupt-parent = <&UIC2>;
++				interrupts = <2 4 3 4>;
++				reg = <40000e00 70>;
++				local-mac-address = [000000000000]; // Filled in by zImage
++				mal-device = <&MAL0>;
++				mal-tx-channel = <3>;
++				mal-rx-channel = <3>;
++				cell-index = <3>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rgmii";
++				phy-map = <00000003>;
++				rgmii-device = <&RGMII0>;
++				rgmii-channel = <1>;
++ 				zmii-device = <&ZMII0>;
++				zmii-channel = <3>;
++			};
++
++
++			GPT0: gpt at 40000a00 {
++				/* FIXME */
++				reg = <40000a00 d4>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <12 4 13 4 14 4 15 4 16 4>;
++			};
++
++		};
++
++		PCIX0: pci at 20ec00000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb440gp-pcix", "ibm,plb-pcix";
++			primary;
++			large-inbound-windows;
++			enable-msi-hole;
++			reg = <2 0ec00000   8	/* Config space access */
++			       0 0 0		/* no IACK cycles */
++			       2 0ed00000   4   /* Special cycles */
++			       2 0ec80000 100	/* Internal registers */
++			       2 0ec80100  fc>;	/* Internal messaging registers */
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed
++			 */
++			ranges = <02000000 0 80000000 00000003 80000000 0 80000000
++				  01000000 0 00000000 00000002 08000000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 0 80000000>;
++
++			interrupt-map-mask = <f800 0 0 7>;
++			interrupt-map = <
++				/* IDSEL 1 */
++				0800 0 0 1 &UIC0 17 8
++				0800 0 0 2 &UIC0 18 8
++				0800 0 0 3 &UIC0 19 8
++				0800 0 0 4 &UIC0 1a 8
++
++				/* IDSEL 2 */
++				1000 0 0 1 &UIC0 18 8
++				1000 0 0 2 &UIC0 19 8
++				1000 0 0 3 &UIC0 1a 8
++				1000 0 0 4 &UIC0 17 8
++			>;
++		};
++	};
++
++	chosen {
++		linux,stdout-path = "/plb/opb/serial at 40000300";
++	};
++};
+diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts
+new file mode 100644
+index 0000000..c86464f
+--- /dev/null
++++ b/arch/powerpc/boot/dts/tqm5200.dts
+@@ -0,0 +1,177 @@
++/*
++ * TQM5200 board Device Tree Source
++ *
++ * Copyright (C) 2007 Semihalf
++ * Marian Balakowicz <m8 at semihalf.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/ {
++	model = "tqc,tqm5200";
++	compatible = "tqc,tqm5200";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,5200 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <20>;
++			i-cache-line-size = <20>;
++			d-cache-size = <4000>;		// L1, 16K
++			i-cache-size = <4000>;		// L1, 16K
++			timebase-frequency = <0>;	// from bootloader
++			bus-frequency = <0>;		// from bootloader
++			clock-frequency = <0>;		// from bootloader
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <00000000 04000000>;	// 64MB
++	};
++
++	soc5200 at f0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc5200-immr";
++		ranges = <0 f0000000 0000c000>;
++		reg = <f0000000 00000100>;
++		bus-frequency = <0>;		// from bootloader
++		system-frequency = <0>;		// from bootloader
++
++		cdm at 200 {
++			compatible = "fsl,mpc5200-cdm";
++			reg = <200 38>;
++		};
++
++		mpc5200_pic: interrupt-controller at 500 {
++			// 5200 interrupts are encoded into two levels;
++			interrupt-controller;
++			#interrupt-cells = <3>;
++			compatible = "fsl,mpc5200-pic";
++			reg = <500 80>;
++		};
++
++		timer at 600 {	// General Purpose Timer
++			compatible = "fsl,mpc5200-gpt";
++			reg = <600 10>;
++			interrupts = <1 9 0>;
++			interrupt-parent = <&mpc5200_pic>;
++			fsl,has-wdt;
++		};
++
++		gpio at b00 {
++			compatible = "fsl,mpc5200-gpio";
++			reg = <b00 40>;
++			interrupts = <1 7 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		usb at 1000 {
++			compatible = "fsl,mpc5200-ohci","ohci-be";
++			reg = <1000 ff>;
++			interrupts = <2 6 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		dma-controller at 1200 {
++			compatible = "fsl,mpc5200-bestcomm";
++			reg = <1200 80>;
++			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
++			              3 4 0  3 5 0  3 6 0  3 7 0
++			              3 8 0  3 9 0  3 a 0  3 b 0
++			              3 c 0  3 d 0  3 e 0  3 f 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		xlb at 1f00 {
++			compatible = "fsl,mpc5200-xlb";
++			reg = <1f00 100>;
++		};
++
++		serial at 2000 {		// PSC1
++			device_type = "serial";
++			compatible = "fsl,mpc5200-psc-uart";
++			port-number = <0>;  // Logical port assignment
++			reg = <2000 100>;
++			interrupts = <2 1 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		serial at 2200 {		// PSC2
++			device_type = "serial";
++			compatible = "fsl,mpc5200-psc-uart";
++			port-number = <1>;  // Logical port assignment
++			reg = <2200 100>;
++			interrupts = <2 2 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		serial at 2400 {		// PSC3
++			device_type = "serial";
++			compatible = "fsl,mpc5200-psc-uart";
++			port-number = <2>;  // Logical port assignment
++			reg = <2400 100>;
++			interrupts = <2 3 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		ethernet at 3000 {
++			device_type = "network";
++			compatible = "fsl,mpc5200-fec";
++			reg = <3000 800>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <2 5 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		ata at 3a00 {
++			compatible = "fsl,mpc5200-ata";
++			reg = <3a00 100>;
++			interrupts = <2 7 0>;
++			interrupt-parent = <&mpc5200_pic>;
++		};
++
++		i2c at 3d40 {
++			compatible = "fsl,mpc5200-i2c","fsl-i2c";
++			reg = <3d40 40>;
++			interrupts = <2 10 0>;
++			interrupt-parent = <&mpc5200_pic>;
++			fsl5200-clocking;
++		};
++
++		sram at 8000 {
++			compatible = "fsl,mpc5200-sram";
++			reg = <8000 4000>;
++		};
++	};
++
++	pci at f0000d00 {
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		device_type = "pci";
++		compatible = "fsl,mpc5200-pci";
++		reg = <f0000d00 100>;
++		interrupt-map-mask = <f800 0 0 7>;
++		interrupt-map = <c000 0 0 1 &mpc5200_pic 0 0 3
++				 c000 0 0 2 &mpc5200_pic 0 0 3
++				 c000 0 0 3 &mpc5200_pic 0 0 3
++				 c000 0 0 4 &mpc5200_pic 0 0 3>;
++		clock-frequency = <0>; // From boot loader
++		interrupts = <2 8 0 2 9 0 2 a 0>;
++		interrupt-parent = <&mpc5200_pic>;
++		bus-range = <0 0>;
++		ranges = <42000000 0 80000000 80000000 0 10000000
++			  02000000 0 90000000 90000000 0 10000000
++			  01000000 0 00000000 a0000000 0 01000000>;
++	};
++};
+diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
+new file mode 100644
+index 0000000..1addb3a
+--- /dev/null
++++ b/arch/powerpc/boot/dts/tqm8540.dts
+@@ -0,0 +1,204 @@
++/*
++ * TQM 8540 Device Tree Source
++ *
++ * Copyright 2008 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/dts-v1/;
++
++/ {
++	model = "tqm,8540";
++	compatible = "tqm,8540", "tqm,85xx";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		ethernet2 = &enet2;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8540 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;
++	};
++
++	soc at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xe0000000 0x100000>;
++		reg = <0xe0000000 0x200>;
++		bus-frequency = <0>;
++		compatible = "fsl,mpc8540-immr", "simple-bus";
++
++		memory-controller at 2000 {
++			compatible = "fsl,8540-memory-controller";
++			reg = <0x2000 0x1000>;
++			interrupt-parent = <&mpic>;
++			interrupts = <18 2>;
++		};
++
++		l2-cache-controller at 20000 {
++			compatible = "fsl,8540-l2-cache-controller";
++			reg = <0x20000 0x1000>;
++			cache-line-size = <32>;
++			cache-size = <0x40000>;	// L2, 256K
++			interrupt-parent = <&mpic>;
++			interrupts = <16 2>;
++		};
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <43 2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++
++			rtc at 68 {
++				compatible = "dallas,ds1337";
++				reg = <0x68>;
++			};
++		};
++
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
++			phy1: ethernet-phy at 1 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <1>;
++				device_type = "ethernet-phy";
++			};
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <3>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <29 2 30 2 34 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 2 36 2 40 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy1>;
++		};
++
++		enet2: ethernet at 26000 {
++			cell-index = <2>;
++			device_type = "network";
++			model = "FEC";
++			compatible = "gianfar";
++			reg = <0x26000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <41 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy3>;
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>; 	// reg base, size
++			clock-frequency = <0>; 	// should we fill in in uboot?
++			interrupts = <42 2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;	// reg base, size
++			clock-frequency = <0>; 	// should we fill in in uboot?
++			interrupts = <42 2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		mpic: pic at 40000 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x40000 0x40000>;
++			device_type = "open-pic";
++		};
++	};
++
++	pci0: pci at e0008000 {
++		cell-index = <0>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
++		device_type = "pci";
++		reg = <0xe0008000 0x1000>;
++		clock-frequency = <66666666>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++				/* IDSEL 28 */
++				 0xe000 0 0 1 &mpic 2 1
++				 0xe000 0 0 2 &mpic 3 1>;
++
++		interrupt-parent = <&mpic>;
++		interrupts = <24 2>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0 0x80000000 0x80000000 0 0x20000000
++			  0x01000000 0 0x00000000 0xe2000000 0 0x01000000>;
++	};
++};
+diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
+new file mode 100644
+index 0000000..9e01093
+--- /dev/null
++++ b/arch/powerpc/boot/dts/tqm8541.dts
+@@ -0,0 +1,228 @@
++/*
++ * TQM 8541 Device Tree Source
++ *
++ * Copyright 2008 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/dts-v1/;
++
++/ {
++	model = "tqm,8541";
++	compatible = "tqm,8541", "tqm,85xx";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8541 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;
++	};
++
++	soc at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xe0000000 0x100000>;
++		reg = <0xe0000000 0x200>;
++		bus-frequency = <0>;
++		compatible = "fsl,mpc8541-immr", "simple-bus";
++
++		memory-controller at 2000 {
++			compatible = "fsl,8540-memory-controller";
++			reg = <0x2000 0x1000>;
++			interrupt-parent = <&mpic>;
++			interrupts = <18 2>;
++		};
++
++		l2-cache-controller at 20000 {
++			compatible = "fsl,8540-l2-cache-controller";
++			reg = <0x20000 0x1000>;
++			cache-line-size = <32>;
++			cache-size = <0x40000>;	// L2, 256K
++			interrupt-parent = <&mpic>;
++			interrupts = <16 2>;
++		};
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <43 2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++
++			rtc at 68 {
++				compatible = "dallas,ds1337";
++				reg = <0x68>;
++			};
++		};
++
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
++			phy1: ethernet-phy at 1 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <1>;
++				device_type = "ethernet-phy";
++			};
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <3>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <29 2 30 2 34 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 2 36 2 40 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy1>;
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>; 	// reg base, size
++			clock-frequency = <0>; 	// should we fill in in uboot?
++			interrupts = <42 2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;	// reg base, size
++			clock-frequency = <0>; 	// should we fill in in uboot?
++			interrupts = <42 2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		mpic: pic at 40000 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x40000 0x40000>;
++			device_type = "open-pic";
++		};
++
++		cpm at 919c0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8541-cpm", "fsl,cpm2", "simple-bus";
++			reg = <0x919c0 0x30>;
++			ranges;
++
++			muram at 80000 {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0 0x80000 0x10000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0 0x2000 0x9000 0x1000>;
++				};
++			};
++
++			brg at 919f0 {
++				compatible = "fsl,mpc8541-brg",
++				             "fsl,cpm2-brg",
++				             "fsl,cpm-brg";
++				reg = <0x919f0 0x10 0x915f0 0x10>;
++				clock-frequency = <0>;
++			};
++
++			cpmpic: pic at 90c00 {
++				interrupt-controller;
++				#address-cells = <0>;
++				#interrupt-cells = <2>;
++				interrupts = <46 2>;
++				interrupt-parent = <&mpic>;
++				reg = <0x90c00 0x80>;
++				compatible = "fsl,mpc8541-cpm-pic", "fsl,cpm2-pic";
++			};
++		};
++	};
++
++	pci0: pci at e0008000 {
++		cell-index = <0>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
++		device_type = "pci";
++		reg = <0xe0008000 0x1000>;
++		clock-frequency = <66666666>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++				/* IDSEL 28 */
++				 0xe000 0 0 1 &mpic 2 1
++				 0xe000 0 0 2 &mpic 3 1>;
++
++		interrupt-parent = <&mpic>;
++		interrupts = <24 2>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0 0x80000000 0x80000000 0 0x20000000
++			  0x01000000 0 0x00000000 0xe2000000 0 0x01000000>;
++	};
++};
+diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
+new file mode 100644
+index 0000000..a20eb06
+--- /dev/null
++++ b/arch/powerpc/boot/dts/tqm8555.dts
+@@ -0,0 +1,228 @@
++/*
++ * TQM 8555 Device Tree Source
++ *
++ * Copyright 2008 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/dts-v1/;
++
++/ {
++	model = "tqm,8555";
++	compatible = "tqm,8555", "tqm,85xx";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8555 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;
++	};
++
++	soc at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xe0000000 0x100000>;
++		reg = <0xe0000000 0x200>;
++		bus-frequency = <0>;
++		compatible = "fsl,mpc8555-immr", "simple-bus";
++
++		memory-controller at 2000 {
++			compatible = "fsl,8540-memory-controller";
++			reg = <0x2000 0x1000>;
++			interrupt-parent = <&mpic>;
++			interrupts = <18 2>;
++		};
++
++		l2-cache-controller at 20000 {
++			compatible = "fsl,8540-l2-cache-controller";
++			reg = <0x20000 0x1000>;
++			cache-line-size = <32>;
++			cache-size = <0x40000>;	// L2, 256K
++			interrupt-parent = <&mpic>;
++			interrupts = <16 2>;
++		};
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <43 2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++
++			rtc at 68 {
++				compatible = "dallas,ds1337";
++				reg = <0x68>;
++			};
++		};
++
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
++			phy1: ethernet-phy at 1 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <1>;
++				device_type = "ethernet-phy";
++			};
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <3>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <29 2 30 2 34 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 2 36 2 40 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy1>;
++		};
++
++		serial0: serial at 4500 {
++			cell-index = <0>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4500 0x100>; 	// reg base, size
++			clock-frequency = <0>; 	// should we fill in in uboot?
++			interrupts = <42 2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		serial1: serial at 4600 {
++			cell-index = <1>;
++			device_type = "serial";
++			compatible = "ns16550";
++			reg = <0x4600 0x100>;	// reg base, size
++			clock-frequency = <0>; 	// should we fill in in uboot?
++			interrupts = <42 2>;
++			interrupt-parent = <&mpic>;
++		};
++
++		mpic: pic at 40000 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x40000 0x40000>;
++			device_type = "open-pic";
++		};
++
++		cpm at 919c0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8555-cpm", "fsl,cpm2", "simple-bus";
++			reg = <0x919c0 0x30>;
++			ranges;
++
++			muram at 80000 {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0 0x80000 0x10000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0 0x2000 0x9000 0x1000>;
++				};
++			};
++
++			brg at 919f0 {
++				compatible = "fsl,mpc8555-brg",
++				             "fsl,cpm2-brg",
++				             "fsl,cpm-brg";
++				reg = <0x919f0 0x10 0x915f0 0x10>;
++				clock-frequency = <0>;
++			};
++
++			cpmpic: pic at 90c00 {
++				interrupt-controller;
++				#address-cells = <0>;
++				#interrupt-cells = <2>;
++				interrupts = <46 2>;
++				interrupt-parent = <&mpic>;
++				reg = <0x90c00 0x80>;
++				compatible = "fsl,mpc8555-cpm-pic", "fsl,cpm2-pic";
++			};
++		};
++	};
++
++	pci0: pci at e0008000 {
++		cell-index = <0>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
++		device_type = "pci";
++		reg = <0xe0008000 0x1000>;
++		clock-frequency = <66666666>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++				/* IDSEL 28 */
++				 0xe000 0 0 1 &mpic 2 1
++				 0xe000 0 0 2 &mpic 3 1>;
++
++		interrupt-parent = <&mpic>;
++		interrupts = <24 2>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0 0x80000000 0x80000000 0 0x20000000
++			  0x01000000 0 0x00000000 0xe2000000 0 0x01000000>;
++	};
++};
+diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
+new file mode 100644
+index 0000000..b9ac6c9
+--- /dev/null
++++ b/arch/powerpc/boot/dts/tqm8560.dts
+@@ -0,0 +1,245 @@
++/*
++ * TQM 8560 Device Tree Source
++ *
++ * Copyright 2008 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/dts-v1/;
++
++/ {
++	model = "tqm,8560";
++	compatible = "tqm,8560", "tqm,85xx";
++	#address-cells = <1>;
++	#size-cells = <1>;
++
++	aliases {
++		ethernet0 = &enet0;
++		ethernet1 = &enet1;
++		ethernet2 = &enet2;
++		serial0 = &serial0;
++		serial1 = &serial1;
++		pci0 = &pci0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,8560 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <32768>;
++			i-cache-size = <32768>;
++			timebase-frequency = <0>;
++			bus-frequency = <0>;
++			clock-frequency = <0>;
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x10000000>;
++	};
++
++	soc at e0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		device_type = "soc";
++		ranges = <0x0 0xe0000000 0x100000>;
++		reg = <0xe0000000 0x200>;
++		bus-frequency = <0>;
++		compatible = "fsl,mpc8560-immr", "simple-bus";
++
++		memory-controller at 2000 {
++			compatible = "fsl,8540-memory-controller";
++			reg = <0x2000 0x1000>;
++			interrupt-parent = <&mpic>;
++			interrupts = <18 2>;
++		};
++
++		l2-cache-controller at 20000 {
++			compatible = "fsl,8540-l2-cache-controller";
++			reg = <0x20000 0x1000>;
++			cache-line-size = <32>;
++			cache-size = <0x40000>;	// L2, 256K
++			interrupt-parent = <&mpic>;
++			interrupts = <16 2>;
++		};
++
++		i2c at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			cell-index = <0>;
++			compatible = "fsl-i2c";
++			reg = <0x3000 0x100>;
++			interrupts = <43 2>;
++			interrupt-parent = <&mpic>;
++			dfsrr;
++
++			rtc at 68 {
++				compatible = "dallas,ds1337";
++				reg = <0x68>;
++			};
++		};
++
++		mdio at 24520 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,gianfar-mdio";
++			reg = <0x24520 0x20>;
++
++			phy1: ethernet-phy at 1 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <1>;
++				device_type = "ethernet-phy";
++			};
++			phy2: ethernet-phy at 2 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <2>;
++				device_type = "ethernet-phy";
++			};
++			phy3: ethernet-phy at 3 {
++				interrupt-parent = <&mpic>;
++				interrupts = <8 1>;
++				reg = <3>;
++				device_type = "ethernet-phy";
++			};
++		};
++
++		enet0: ethernet at 24000 {
++			cell-index = <0>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x24000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <29 2 30 2 34 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy2>;
++		};
++
++		enet1: ethernet at 25000 {
++			cell-index = <1>;
++			device_type = "network";
++			model = "TSEC";
++			compatible = "gianfar";
++			reg = <0x25000 0x1000>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <35 2 36 2 40 2>;
++			interrupt-parent = <&mpic>;
++			phy-handle = <&phy1>;
++		};
++
++		mpic: pic at 40000 {
++			interrupt-controller;
++			#address-cells = <0>;
++			#interrupt-cells = <2>;
++			reg = <0x40000 0x40000>;
++			device_type = "open-pic";
++		};
++
++		cpm at 919c0 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "fsl,mpc8560-cpm", "fsl,cpm2", "simple-bus";
++			reg = <0x919c0 0x30>;
++			ranges;
++
++			muram at 80000 {
++				#address-cells = <1>;
++				#size-cells = <1>;
++				ranges = <0 0x80000 0x10000>;
++
++				data at 0 {
++					compatible = "fsl,cpm-muram-data";
++					reg = <0 0x4000 0x9000 0x2000>;
++				};
++			};
++
++			brg at 919f0 {
++				compatible = "fsl,mpc8560-brg",
++				             "fsl,cpm2-brg",
++				             "fsl,cpm-brg";
++				reg = <0x919f0 0x10 0x915f0 0x10>;
++				clock-frequency = <0>;
++			};
++
++			cpmpic: pic at 90c00 {
++				interrupt-controller;
++				#address-cells = <0>;
++				#interrupt-cells = <2>;
++				interrupts = <46 2>;
++				interrupt-parent = <&mpic>;
++				reg = <0x90c00 0x80>;
++				compatible = "fsl,mpc8560-cpm-pic", "fsl,cpm2-pic";
++			};
++
++			serial0: serial at 91a00 {
++				device_type = "serial";
++				compatible = "fsl,mpc8560-scc-uart",
++				             "fsl,cpm2-scc-uart";
++				reg = <0x91a00 0x20 0x88000 0x100>;
++				fsl,cpm-brg = <1>;
++				fsl,cpm-command = <0x800000>;
++				current-speed = <115200>;
++				interrupts = <40 8>;
++				interrupt-parent = <&cpmpic>;
++			};
++
++			serial1: serial at 91a20 {
++				device_type = "serial";
++				compatible = "fsl,mpc8560-scc-uart",
++				             "fsl,cpm2-scc-uart";
++				reg = <0x91a20 0x20 0x88100 0x100>;
++				fsl,cpm-brg = <2>;
++				fsl,cpm-command = <0x4a00000>;
++				current-speed = <115200>;
++				interrupts = <41 8>;
++				interrupt-parent = <&cpmpic>;
++			};
++
++			enet2: ethernet at 91340 {
++				device_type = "network";
++				compatible = "fsl,mpc8560-fcc-enet",
++				             "fsl,cpm2-fcc-enet";
++				reg = <0x91340 0x20 0x88600 0x100 0x913d0 0x1>;
++				local-mac-address = [ 00 00 00 00 00 00 ];
++				fsl,cpm-command = <0x1a400300>;
++				interrupts = <34 8>;
++				interrupt-parent = <&cpmpic>;
++				phy-handle = <&phy3>;
++			};
++		};
++	};
++
++	pci0: pci at e0008000 {
++		cell-index = <0>;
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
++		device_type = "pci";
++		reg = <0xe0008000 0x1000>;
++		clock-frequency = <66666666>;
++		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
++		interrupt-map = <
++				/* IDSEL 28 */
++				 0xe000 0 0 1 &mpic 2 1
++				 0xe000 0 0 2 &mpic 3 1>;
++
++		interrupt-parent = <&mpic>;
++		interrupts = <24 2>;
++		bus-range = <0 0>;
++		ranges = <0x02000000 0 0x80000000 0x80000000 0 0x20000000
++			  0x01000000 0 0x00000000 0xe2000000 0 0x01000000>;
++	};
++};
+diff --git a/arch/powerpc/boot/dts/walnut.dts b/arch/powerpc/boot/dts/walnut.dts
+index 754fa39..dcc21b0 100644
+--- a/arch/powerpc/boot/dts/walnut.dts
++++ b/arch/powerpc/boot/dts/walnut.dts
+@@ -14,14 +14,21 @@
+ 	#size-cells = <1>;
+ 	model = "ibm,walnut";
+ 	compatible = "ibm,walnut";
+-	dcr-parent = <&/cpus/PowerPC,405GP at 0>;
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC;
++		serial0 = &UART0;
++		serial1 = &UART1;
++	};
+ 
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		PowerPC,405GP at 0 {
++		cpu at 0 {
+ 			device_type = "cpu";
++			model = "PowerPC,405GP";
+ 			reg = <0>;
+ 			clock-frequency = <bebc200>; /* Filled in by zImage */
+ 			timebase-frequency = <0>; /* Filled in by zImage */
+@@ -168,9 +175,10 @@
+ 				};
+ 			};
+ 
+-			ds1743 at 1,0 {
++			nvram at 1,0 {
+ 				/* NVRAM and RTC */
+-				compatible = "ds1743";
++				compatible = "ds1743-nvram";
++				#bytes = <2000>;
+ 				reg = <1 0 2000>;
+ 			};
+ 
+@@ -190,6 +198,45 @@
+ 				virtual-reg = <f0300005>;
+ 			};
+ 		};
++
++		PCI0: pci at ec000000 {
++			device_type = "pci";
++			#interrupt-cells = <1>;
++			#size-cells = <2>;
++			#address-cells = <3>;
++			compatible = "ibm,plb405gp-pci", "ibm,plb-pci";
++			primary;
++			reg = <eec00000 8	/* Config space access */
++			       eed80000 4	/* IACK */
++			       eed80000 4	/* Special cycle */
++			       ef480000 40>;	/* Internal registers */
++
++			/* Outbound ranges, one memory and one IO,
++			 * later cannot be changed. Chip supports a second
++			 * IO range but we don't use it for now
++			 */
++			ranges = <02000000 0 80000000 80000000 0 20000000
++				  01000000 0 00000000 e8000000 0 00010000>;
++
++			/* Inbound 2GB range starting at 0 */
++			dma-ranges = <42000000 0 0 0 0 80000000>;
++
++			/* Walnut has all 4 IRQ pins tied together per slot */
++			interrupt-map-mask = <f800 0 0 0>;
++			interrupt-map = <
++				/* IDSEL 1 */
++				0800 0 0 0 &UIC0 1c 8
++
++				/* IDSEL 2 */
++				1000 0 0 0 &UIC0 1d 8
++
++				/* IDSEL 3 */
++				1800 0 0 0 &UIC0 1e 8
++
++				/* IDSEL 4 */
++				2000 0 0 0 &UIC0 1f 8
++			>;
++		};
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/powerpc/boot/dts/warp.dts b/arch/powerpc/boot/dts/warp.dts
+new file mode 100644
+index 0000000..dc1499d
+--- /dev/null
++++ b/arch/powerpc/boot/dts/warp.dts
+@@ -0,0 +1,239 @@
++/*
++ * Device Tree Source for PIKA Warp
++ *
++ * Copyright (c) 2008 PIKA Technologies
++ *   Sean MacLennan <smaclennan at pikatech.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2.  This program is licensed "as is" without
++ * any warranty of any kind, whether express or implied.
++ */
++
++/ {
++	#address-cells = <2>;
++	#size-cells = <1>;
++	model = "pika,warp";
++	compatible = "pika,warp";
++	dcr-parent = <&/cpus/cpu at 0>;
++
++	aliases {
++		ethernet0 = &EMAC0;
++		serial0 = &UART0;
++	};
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		cpu at 0 {
++			device_type = "cpu";
++			model = "PowerPC,440EP";
++			reg = <0>;
++			clock-frequency = <0>; /* Filled in by zImage */
++			timebase-frequency = <0>; /* Filled in by zImage */
++			i-cache-line-size = <20>;
++			d-cache-line-size = <20>;
++			i-cache-size = <8000>;
++			d-cache-size = <8000>;
++			dcr-controller;
++			dcr-access-method = "native";
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0 0 0>; /* Filled in by zImage */
++	};
++
++	UIC0: interrupt-controller0 {
++		compatible = "ibm,uic-440ep","ibm,uic";
++		interrupt-controller;
++		cell-index = <0>;
++		dcr-reg = <0c0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++	};
++
++	UIC1: interrupt-controller1 {
++		compatible = "ibm,uic-440ep","ibm,uic";
++		interrupt-controller;
++		cell-index = <1>;
++		dcr-reg = <0d0 009>;
++		#address-cells = <0>;
++		#size-cells = <0>;
++		#interrupt-cells = <2>;
++		interrupts = <1e 4 1f 4>; /* cascade */
++		interrupt-parent = <&UIC0>;
++	};
++
++	SDR0: sdr {
++		compatible = "ibm,sdr-440ep";
++		dcr-reg = <00e 002>;
++	};
++
++	CPR0: cpr {
++		compatible = "ibm,cpr-440ep";
++		dcr-reg = <00c 002>;
++	};
++
++	plb {
++		compatible = "ibm,plb-440ep", "ibm,plb-440gp", "ibm,plb4";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		ranges;
++		clock-frequency = <0>; /* Filled in by zImage */
++
++		SDRAM0: sdram {
++			compatible = "ibm,sdram-440ep", "ibm,sdram-405gp";
++			dcr-reg = <010 2>;
++		};
++
++		DMA0: dma {
++			compatible = "ibm,dma-440ep", "ibm,dma-440gp";
++			dcr-reg = <100 027>;
++		};
++
++		MAL0: mcmal {
++			compatible = "ibm,mcmal-440ep", "ibm,mcmal-440gp", "ibm,mcmal";
++			dcr-reg = <180 62>;
++			num-tx-chans = <4>;
++			num-rx-chans = <2>;
++			interrupt-parent = <&MAL0>;
++			interrupts = <0 1 2 3 4>;
++			#interrupt-cells = <1>;
++			#address-cells = <0>;
++			#size-cells = <0>;
++			interrupt-map = </*TXEOB*/ 0 &UIC0 a 4
++					/*RXEOB*/ 1 &UIC0 b 4
++					/*SERR*/  2 &UIC1 0 4
++					/*TXDE*/  3 &UIC1 1 4
++					/*RXDE*/  4 &UIC1 2 4>;
++		};
++
++		POB0: opb {
++		  	compatible = "ibm,opb-440ep", "ibm,opb-440gp", "ibm,opb";
++			#address-cells = <1>;
++			#size-cells = <1>;
++		  	ranges = <00000000 0 00000000 80000000
++			          80000000 0 80000000 80000000>;
++		  	interrupt-parent = <&UIC1>;
++		  	interrupts = <7 4>;
++		  	clock-frequency = <0>; /* Filled in by zImage */
++
++			EBC0: ebc {
++				compatible = "ibm,ebc-440ep", "ibm,ebc-440gp", "ibm,ebc";
++				dcr-reg = <012 2>;
++				#address-cells = <2>;
++				#size-cells = <1>;
++				clock-frequency = <0>; /* Filled in by zImage */
++				interrupts = <5 1>;
++				interrupt-parent = <&UIC1>;
++
++				fpga at 2,0 {
++					compatible = "pika,fpga";
++			   		reg = <2 0 2200>;
++					interrupts = <18 8>;
++					interrupt-parent = <&UIC0>;
++				};
++
++				nor_flash at 0,0 {
++					compatible = "amd,s29gl512n", "cfi-flash";
++					bank-width = <2>;
++					reg = <0 0 4000000>;
++					#address-cells = <1>;
++					#size-cells = <1>;
++					partition at 0 {
++						label = "kernel";
++						reg = <0 180000>;
++					};
++					partition at 180000 {
++						label = "root";
++						reg = <180000 3480000>;
++					};
++					partition at 3600000 {
++						label = "user";
++						reg = <3600000 900000>;
++					};
++					partition at 3f00000 {
++						label = "fpga";
++						reg = <3f00000 40000>;
++					};
++					partition at 3f40000 {
++						label = "env";
++						reg = <3f40000 40000>;
++					};
++					partition at 3f80000 {
++						label = "u-boot";
++						reg = <3f80000 80000>;
++					};
++				};
++			};
++
++			UART0: serial at ef600300 {
++		   		device_type = "serial";
++		   		compatible = "ns16550";
++		   		reg = <ef600300 8>;
++		   		virtual-reg = <ef600300>;
++		   		clock-frequency = <0>; /* Filled in by zImage */
++		   		current-speed = <1c200>;
++		   		interrupt-parent = <&UIC0>;
++		   		interrupts = <0 4>;
++	   		};
++
++			IIC0: i2c at ef600700 {
++				compatible = "ibm,iic-440ep", "ibm,iic-440gp", "ibm,iic";
++				reg = <ef600700 14>;
++				interrupt-parent = <&UIC0>;
++				interrupts = <2 4>;
++			};
++
++			GPIO0: gpio at ef600b00 {
++				compatible = "ibm,gpio-440ep";
++				reg = <ef600b00 48>;
++			};
++
++			GPIO1: gpio at ef600c00 {
++				compatible = "ibm,gpio-440ep";
++				reg = <ef600c00 48>;
++			};
++
++			ZMII0: emac-zmii at ef600d00 {
++				compatible = "ibm,zmii-440ep", "ibm,zmii-440gp", "ibm,zmii";
++				reg = <ef600d00 c>;
++			};
++
++			EMAC0: ethernet at ef600e00 {
++				linux,network-index = <0>;
++				device_type = "network";
++				compatible = "ibm,emac-440ep", "ibm,emac-440gp", "ibm,emac";
++				interrupt-parent = <&UIC1>;
++				interrupts = <1c 4 1d 4>;
++				reg = <ef600e00 70>;
++				local-mac-address = [000000000000];
++				mal-device = <&MAL0>;
++				mal-tx-channel = <0 1>;
++				mal-rx-channel = <0>;
++				cell-index = <0>;
++				max-frame-size = <5dc>;
++				rx-fifo-size = <1000>;
++				tx-fifo-size = <800>;
++				phy-mode = "rmii";
++				phy-map = <00000000>;
++				zmii-device = <&ZMII0>;
++				zmii-channel = <0>;
++			};
++
++			usb at ef601000 {
++				compatible = "ohci-be";
++				reg = <ef601000 80>;
++				interrupts = <8 1 9 1>;
++				interrupt-parent = < &UIC1 >;
++			};
++		};
++	};
++
++	chosen {
++		linux,stdout-path = "/plb/opb/serial at ef600300";
++	};
++};
+diff --git a/arch/powerpc/boot/ebony.c b/arch/powerpc/boot/ebony.c
+index 86c0f5d..f61364c 100644
+--- a/arch/powerpc/boot/ebony.c
++++ b/arch/powerpc/boot/ebony.c
+@@ -31,66 +31,6 @@
+ 
+ static u8 *ebony_mac0, *ebony_mac1;
+ 
+-/* Calculate 440GP clocks */
+-void ibm440gp_fixup_clocks(unsigned int sysclk, unsigned int ser_clk)
+-{
+-	u32 sys0 = mfdcr(DCRN_CPC0_SYS0);
+-	u32 cr0 = mfdcr(DCRN_CPC0_CR0);
+-	u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
+-	u32 opdv = CPC0_SYS0_OPDV(sys0);
+-	u32 epdv = CPC0_SYS0_EPDV(sys0);
+-
+-	if (sys0 & CPC0_SYS0_BYPASS) {
+-		/* Bypass system PLL */
+-		cpu = plb = sysclk;
+-	} else {
+-		if (sys0 & CPC0_SYS0_EXTSL)
+-			/* PerClk */
+-			m = CPC0_SYS0_FWDVB(sys0) * opdv * epdv;
+-		else
+-			/* CPU clock */
+-			m = CPC0_SYS0_FBDV(sys0) * CPC0_SYS0_FWDVA(sys0);
+-		cpu = sysclk * m / CPC0_SYS0_FWDVA(sys0);
+-		plb = sysclk * m / CPC0_SYS0_FWDVB(sys0);
+-	}
 -
--static DEFINE_SPINLOCK(voya_list_lock);
--static LIST_HEAD(voya_alloc_list);
+-	opb = plb / opdv;
+-	ebc = opb / epdv;
+-
+-	/* FIXME: Check if this is for all 440GP, or just Ebony */
+-	if ((mfpvr() & 0xf0000fff) == 0x40000440)
+-		/* Rev. B 440GP, use external system clock */
+-		tb = sysclk;
+-	else
+-		/* Rev. C 440GP, errata force us to use internal clock */
+-		tb = cpu;
 -
--#define OHCI_SRAM_START	0xb0000000
--#define OHCI_HCCA_SIZE	0x100
--#define OHCI_SRAM_SIZE	0x10000
+-	if (cr0 & CPC0_CR0_U0EC)
+-		/* External UART clock */
+-		uart0 = ser_clk;
+-	else
+-		/* Internal UART clock */
+-		uart0 = plb / CPC0_CR0_UDIV(cr0);
 -
--#define VOYAGER_OHCI_NAME	"voyager-ohci"
+-	if (cr0 & CPC0_CR0_U1EC)
+-		/* External UART clock */
+-		uart1 = ser_clk;
+-	else
+-		/* Internal UART clock */
+-		uart1 = plb / CPC0_CR0_UDIV(cr0);
 -
--void *voyagergx_consistent_alloc(struct device *dev, size_t size,
--				 dma_addr_t *handle, gfp_t flag)
+-	printf("PPC440GP: SysClk = %dMHz (%x)\n\r",
+-	       (sysclk + 500000) / 1000000, sysclk);
+-
+-	dt_fixup_cpu_clocks(cpu, tb, 0);
+-
+-	dt_fixup_clock("/plb", plb);
+-	dt_fixup_clock("/plb/opb", opb);
+-	dt_fixup_clock("/plb/opb/ebc", ebc);
+-	dt_fixup_clock("/plb/opb/serial at 40000200", uart0);
+-	dt_fixup_clock("/plb/opb/serial at 40000300", uart1);
+-}
+-
+ #define EBONY_FPGA_PATH		"/plb/opb/ebc/fpga"
+ #define	EBONY_FPGA_FLASH_SEL	0x01
+ #define EBONY_SMALL_FLASH_PATH	"/plb/opb/ebc/small-flash"
+@@ -134,7 +74,7 @@ static void ebony_fixups(void)
+ 	unsigned long sysclk = 33000000;
+ 
+ 	ibm440gp_fixup_clocks(sysclk, 6 * 1843200);
+-	ibm4xx_fixup_memsize();
++	ibm4xx_sdram_fixup_memsize();
+ 	dt_fixup_mac_addresses(ebony_mac0, ebony_mac1);
+ 	ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
+ 	ebony_flashsel_fixup();
+@@ -146,6 +86,6 @@ void ebony_init(void *mac0, void *mac1)
+ 	platform_ops.exit = ibm44x_dbcr_reset;
+ 	ebony_mac0 = mac0;
+ 	ebony_mac1 = mac1;
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ }
+diff --git a/arch/powerpc/boot/ep405.c b/arch/powerpc/boot/ep405.c
+new file mode 100644
+index 0000000..2d08a86
+--- /dev/null
++++ b/arch/powerpc/boot/ep405.c
+@@ -0,0 +1,74 @@
++/*
++ * Embedded Planet EP405 with PlanetCore firmware
++ *
++ * (c) Benjamin Herrenschmidt <benh at kernel.crashing.org>, IBM Corp,\
++ *
++ * Based on ep88xc.c by
++ *
++ * Scott Wood <scottwood at freescale.com>
++ *
++ * Copyright (c) 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#include "ops.h"
++#include "stdio.h"
++#include "planetcore.h"
++#include "dcr.h"
++#include "4xx.h"
++#include "io.h"
++
++static char *table;
++static u64 mem_size;
++
++static void platform_fixups(void)
++{
++	u64 val;
++	void *nvrtc;
++
++	dt_fixup_memory(0, mem_size);
++	planetcore_set_mac_addrs(table);
++
++	if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
++		printf("No PlanetCore crystal frequency key.\r\n");
++		return;
++	}
++	ibm405gp_fixup_clocks(val, 0xa8c000);
++	ibm4xx_quiesce_eth((u32 *)0xef600800, NULL);
++	ibm4xx_fixup_ebc_ranges("/plb/ebc");
++
++	if (!planetcore_get_decimal(table, PLANETCORE_KEY_KB_NVRAM, &val)) {
++		printf("No PlanetCore NVRAM size key.\r\n");
++		return;
++	}
++	nvrtc = finddevice("/plb/ebc/nvrtc at 4,200000");
++	if (nvrtc != NULL) {
++		u32 reg[3] = { 4, 0x200000, 0};
++		getprop(nvrtc, "reg", reg, 3);
++		reg[2] = (val << 10) & 0xffffffff;
++		setprop(nvrtc, "reg", reg, 3);
++	}
++}
++
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++		   unsigned long r6, unsigned long r7)
++{
++	table = (char *)r3;
++	planetcore_prepare_table(table);
++
++	if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
++		return;
++
++	mem_size *= 1024 * 1024;
++	simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64);
++
++	fdt_init(_dtb_start);
++
++	planetcore_set_stdout_path(table);
++
++	serial_console_init();
++	platform_ops.fixups = platform_fixups;
++}
+diff --git a/arch/powerpc/boot/ep8248e.c b/arch/powerpc/boot/ep8248e.c
+new file mode 100644
+index 0000000..f57d14d
+--- /dev/null
++++ b/arch/powerpc/boot/ep8248e.c
+@@ -0,0 +1,55 @@
++/*
++ * Embedded Planet EP8248E with PlanetCore firmware
++ *
++ * Author: Scott Wood <scottwood at freescale.com>
++ *
++ * Copyright (c) 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#include "ops.h"
++#include "stdio.h"
++#include "planetcore.h"
++#include "pq2.h"
++
++static char *table;
++static u64 mem_size;
++
++#include <io.h>
++
++static void platform_fixups(void)
++{
++	u64 val;
++
++	dt_fixup_memory(0, mem_size);
++	planetcore_set_mac_addrs(table);
++
++	if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
++		printf("No PlanetCore crystal frequency key.\r\n");
++		return;
++	}
++
++	pq2_fixup_clocks(val);
++}
++
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++                   unsigned long r6, unsigned long r7)
++{
++	table = (char *)r3;
++	planetcore_prepare_table(table);
++
++	if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
++		return;
++
++	mem_size *= 1024 * 1024;
++	simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64);
++
++	fdt_init(_dtb_start);
++
++	planetcore_set_stdout_path(table);
++	serial_console_init();
++	platform_ops.fixups = platform_fixups;
++}
+diff --git a/arch/powerpc/boot/ep88xc.c b/arch/powerpc/boot/ep88xc.c
+index 6b87cdc..a400f54 100644
+--- a/arch/powerpc/boot/ep88xc.c
++++ b/arch/powerpc/boot/ep88xc.c
+@@ -45,7 +45,7 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ 	mem_size *= 1024 * 1024;
+ 	simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64);
+ 
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 
+ 	planetcore_set_stdout_path(table);
+ 
+diff --git a/arch/powerpc/boot/flatdevtree.c b/arch/powerpc/boot/flatdevtree.c
+deleted file mode 100644
+index cf30675..0000000
+--- a/arch/powerpc/boot/flatdevtree.c
++++ /dev/null
+@@ -1,1036 +0,0 @@
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+- *
+- * Copyright Pantelis Antoniou 2006
+- * Copyright (C) IBM Corporation 2006
+- *
+- * Authors: Pantelis Antoniou <pantelis at embeddedalley.com>
+- *	    Hollis Blanchard <hollisb at us.ibm.com>
+- *	    Mark A. Greer <mgreer at mvista.com>
+- *	    Paul Mackerras <paulus at samba.org>
+- */
+-
+-#include <string.h>
+-#include <stddef.h>
+-#include "flatdevtree.h"
+-#include "flatdevtree_env.h"
+-
+-#define _ALIGN(x, al)	(((x) + (al) - 1) & ~((al) - 1))
+-
+-static char *ft_root_node(struct ft_cxt *cxt)
 -{
--	struct list_head *list = &voya_alloc_list;
--	struct voya_alloc_entry *entry;
--	unsigned long start, end;
--	unsigned long flags;
+-	return cxt->rgn[FT_STRUCT].start;
+-}
 -
--	/*
--	 * The SM501 contains an integrated 8051 with its own SRAM.
--	 * Devices within the cchip can all hook into the 8051 SRAM.
--	 * We presently use this for the OHCI.
--	 *
--	 * Everything else goes through consistent_alloc().
--	 */
--	if (!dev || strcmp(dev->driver->name, VOYAGER_OHCI_NAME))
+-/* Routines for keeping node ptrs returned by ft_find_device current */
+-/* First entry not used b/c it would return 0 and be taken as NULL/error */
+-static void *ft_get_phandle(struct ft_cxt *cxt, char *node)
+-{
+-	unsigned int i;
+-
+-	if (!node)
 -		return NULL;
 -
--	start = OHCI_SRAM_START + OHCI_HCCA_SIZE;
+-	for (i = 1; i < cxt->nodes_used; i++)	/* already there? */
+-		if (cxt->node_tbl[i] == node)
+-			return (void *)i;
+-
+-	if (cxt->nodes_used < cxt->node_max) {
+-		cxt->node_tbl[cxt->nodes_used] = node;
+-		return (void *)cxt->nodes_used++;
+-	}
 -
--	entry = kmalloc(sizeof(struct voya_alloc_entry), GFP_ATOMIC);
--	if (!entry)
--		return ERR_PTR(-ENOMEM);
+-	return NULL;
+-}
 -
--	entry->len = (size + 15) & ~15;
+-static char *ft_node_ph2node(struct ft_cxt *cxt, const void *phandle)
+-{
+-	unsigned int i = (unsigned int)phandle;
 -
--	/*
--	 * The basis for this allocator is dwmw2's malloc.. the
--	 * Matrox allocator :-)
--	 */
--	spin_lock_irqsave(&voya_list_lock, flags);
--	list_for_each(list, &voya_alloc_list) {
--		struct voya_alloc_entry *p;
+-	if (i < cxt->nodes_used)
+-		return cxt->node_tbl[i];
+-	return NULL;
+-}
 -
--		p = list_entry(list, struct voya_alloc_entry, list);
+-static void ft_node_update_before(struct ft_cxt *cxt, char *addr, int shift)
+-{
+-	unsigned int i;
 -
--		if (p->ofs - start >= size)
--			goto out;
+-	if (shift == 0)
+-		return;
 -
--		start = p->ofs + p->len;
+-	for (i = 1; i < cxt->nodes_used; i++)
+-		if (cxt->node_tbl[i] < addr)
+-			cxt->node_tbl[i] += shift;
+-}
+-
+-static void ft_node_update_after(struct ft_cxt *cxt, char *addr, int shift)
+-{
+-	unsigned int i;
+-
+-	if (shift == 0)
+-		return;
+-
+-	for (i = 1; i < cxt->nodes_used; i++)
+-		if (cxt->node_tbl[i] >= addr)
+-			cxt->node_tbl[i] += shift;
+-}
+-
+-/* Struct used to return info from ft_next() */
+-struct ft_atom {
+-	u32 tag;
+-	const char *name;
+-	void *data;
+-	u32 size;
+-};
+-
+-/* Set ptrs to current one's info; return addr of next one */
+-static char *ft_next(struct ft_cxt *cxt, char *p, struct ft_atom *ret)
+-{
+-	u32 sz;
+-
+-	if (p >= cxt->rgn[FT_STRUCT].start + cxt->rgn[FT_STRUCT].size)
+-		return NULL;
+-
+-	ret->tag = be32_to_cpu(*(u32 *) p);
+-	p += 4;
+-
+-	switch (ret->tag) {	/* Tag */
+-	case OF_DT_BEGIN_NODE:
+-		ret->name = p;
+-		ret->data = (void *)(p - 4);	/* start of node */
+-		p += _ALIGN(strlen(p) + 1, 4);
+-		break;
+-	case OF_DT_PROP:
+-		ret->size = sz = be32_to_cpu(*(u32 *) p);
+-		ret->name = cxt->str_anchor + be32_to_cpu(*(u32 *) (p + 4));
+-		ret->data = (void *)(p + 8);
+-		p += 8 + _ALIGN(sz, 4);
+-		break;
+-	case OF_DT_END_NODE:
+-	case OF_DT_NOP:
+-		break;
+-	case OF_DT_END:
+-	default:
+-		p = NULL;
+-		break;
 -	}
 -
--	end  = start + (OHCI_SRAM_SIZE  - OHCI_HCCA_SIZE);
--	list = &voya_alloc_list;
+-	return p;
+-}
 -
--	if (end - start >= size) {
--out:
--		entry->ofs = start;
--		list_add_tail(&entry->list, list);
--		spin_unlock_irqrestore(&voya_list_lock, flags);
+-#define HDR_SIZE	_ALIGN(sizeof(struct boot_param_header), 8)
+-#define EXPAND_INCR	1024	/* alloc this much extra when expanding */
 -
--		*handle = start;
--		return (void *)start;
+-/* Copy the tree to a newly-allocated region and put things in order */
+-static int ft_reorder(struct ft_cxt *cxt, int nextra)
+-{
+-	unsigned long tot;
+-	enum ft_rgn_id r;
+-	char *p, *pend;
+-	int stroff;
+-
+-	tot = HDR_SIZE + EXPAND_INCR;
+-	for (r = FT_RSVMAP; r <= FT_STRINGS; ++r)
+-		tot += cxt->rgn[r].size;
+-	if (nextra > 0)
+-		tot += nextra;
+-	tot = _ALIGN(tot, 8);
+-
+-	if (!cxt->realloc)
+-		return 0;
+-	p = cxt->realloc(NULL, tot);
+-	if (!p)
+-		return 0;
+-
+-	memcpy(p, cxt->bph, sizeof(struct boot_param_header));
+-	/* offsets get fixed up later */
+-
+-	cxt->bph = (struct boot_param_header *)p;
+-	cxt->max_size = tot;
+-	pend = p + tot;
+-	p += HDR_SIZE;
+-
+-	memcpy(p, cxt->rgn[FT_RSVMAP].start, cxt->rgn[FT_RSVMAP].size);
+-	cxt->rgn[FT_RSVMAP].start = p;
+-	p += cxt->rgn[FT_RSVMAP].size;
+-
+-	memcpy(p, cxt->rgn[FT_STRUCT].start, cxt->rgn[FT_STRUCT].size);
+-	ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
+-			p - cxt->rgn[FT_STRUCT].start);
+-	cxt->p += p - cxt->rgn[FT_STRUCT].start;
+-	cxt->rgn[FT_STRUCT].start = p;
+-
+-	p = pend - cxt->rgn[FT_STRINGS].size;
+-	memcpy(p, cxt->rgn[FT_STRINGS].start, cxt->rgn[FT_STRINGS].size);
+-	stroff = cxt->str_anchor - cxt->rgn[FT_STRINGS].start;
+-	cxt->rgn[FT_STRINGS].start = p;
+-	cxt->str_anchor = p + stroff;
+-
+-	cxt->isordered = 1;
+-	return 1;
+-}
+-
+-static inline char *prev_end(struct ft_cxt *cxt, enum ft_rgn_id r)
+-{
+-	if (r > FT_RSVMAP)
+-		return cxt->rgn[r - 1].start + cxt->rgn[r - 1].size;
+-	return (char *)cxt->bph + HDR_SIZE;
+-}
+-
+-static inline char *next_start(struct ft_cxt *cxt, enum ft_rgn_id r)
+-{
+-	if (r < FT_STRINGS)
+-		return cxt->rgn[r + 1].start;
+-	return (char *)cxt->bph + cxt->max_size;
+-}
+-
+-/*
+- * See if we can expand region rgn by nextra bytes by using up
+- * free space after or before the region.
+- */
+-static int ft_shuffle(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn,
+-		int nextra)
+-{
+-	char *p = *pp;
+-	char *rgn_start, *rgn_end;
+-
+-	rgn_start = cxt->rgn[rgn].start;
+-	rgn_end = rgn_start + cxt->rgn[rgn].size;
+-	if (nextra <= 0 || rgn_end + nextra <= next_start(cxt, rgn)) {
+-		/* move following stuff */
+-		if (p < rgn_end) {
+-			if (nextra < 0)
+-				memmove(p, p - nextra, rgn_end - p + nextra);
+-			else
+-				memmove(p + nextra, p, rgn_end - p);
+-			if (rgn == FT_STRUCT)
+-				ft_node_update_after(cxt, p, nextra);
+-		}
+-		cxt->rgn[rgn].size += nextra;
+-		if (rgn == FT_STRINGS)
+-			/* assumes strings only added at beginning */
+-			cxt->str_anchor += nextra;
+-		return 1;
+-	}
+-	if (prev_end(cxt, rgn) <= rgn_start - nextra) {
+-		/* move preceding stuff */
+-		if (p > rgn_start) {
+-			memmove(rgn_start - nextra, rgn_start, p - rgn_start);
+-			if (rgn == FT_STRUCT)
+-				ft_node_update_before(cxt, p, -nextra);
+-		}
+-		*pp -= nextra;
+-		cxt->rgn[rgn].start -= nextra;
+-		cxt->rgn[rgn].size += nextra;
+-		return 1;
 -	}
+-	return 0;
+-}
 -
--	kfree(entry);
--	spin_unlock_irqrestore(&voya_list_lock, flags);
+-static int ft_make_space(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn,
+-			 int nextra)
+-{
+-	unsigned long size, ssize, tot;
+-	char *str, *next;
+-	enum ft_rgn_id r;
 -
--	return ERR_PTR(-EINVAL);
+-	if (!cxt->isordered) {
+-		unsigned long rgn_off = *pp - cxt->rgn[rgn].start;
+-
+-		if (!ft_reorder(cxt, nextra))
+-			return 0;
+-
+-		*pp = cxt->rgn[rgn].start + rgn_off;
+-	}
+-	if (ft_shuffle(cxt, pp, rgn, nextra))
+-		return 1;
+-
+-	/* See if there is space after the strings section */
+-	ssize = cxt->rgn[FT_STRINGS].size;
+-	if (cxt->rgn[FT_STRINGS].start + ssize
+-			< (char *)cxt->bph + cxt->max_size) {
+-		/* move strings up as far as possible */
+-		str = (char *)cxt->bph + cxt->max_size - ssize;
+-		cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start;
+-		memmove(str, cxt->rgn[FT_STRINGS].start, ssize);
+-		cxt->rgn[FT_STRINGS].start = str;
+-		/* enough space now? */
+-		if (rgn >= FT_STRUCT && ft_shuffle(cxt, pp, rgn, nextra))
+-			return 1;
+-	}
+-
+-	/* how much total free space is there following this region? */
+-	tot = 0;
+-	for (r = rgn; r < FT_STRINGS; ++r) {
+-		char *r_end = cxt->rgn[r].start + cxt->rgn[r].size;
+-		tot += next_start(cxt, rgn) - r_end;
+-	}
+-
+-	/* cast is to shut gcc up; we know nextra >= 0 */
+-	if (tot < (unsigned int)nextra) {
+-		/* have to reallocate */
+-		char *newp, *new_start;
+-		int shift;
+-
+-		if (!cxt->realloc)
+-			return 0;
+-		size = _ALIGN(cxt->max_size + (nextra - tot) + EXPAND_INCR, 8);
+-		newp = cxt->realloc(cxt->bph, size);
+-		if (!newp)
+-			return 0;
+-		cxt->max_size = size;
+-		shift = newp - (char *)cxt->bph;
+-
+-		if (shift) { /* realloc can return same addr */
+-			cxt->bph = (struct boot_param_header *)newp;
+-			ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
+-					shift);
+-			for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) {
+-				new_start = cxt->rgn[r].start + shift;
+-				cxt->rgn[r].start = new_start;
+-			}
+-			*pp += shift;
+-			cxt->str_anchor += shift;
+-		}
+-
+-		/* move strings up to the end */
+-		str = newp + size - ssize;
+-		cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start;
+-		memmove(str, cxt->rgn[FT_STRINGS].start, ssize);
+-		cxt->rgn[FT_STRINGS].start = str;
+-
+-		if (ft_shuffle(cxt, pp, rgn, nextra))
+-			return 1;
+-	}
+-
+-	/* must be FT_RSVMAP and we need to move FT_STRUCT up */
+-	if (rgn == FT_RSVMAP) {
+-		next = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size
+-			+ nextra;
+-		ssize = cxt->rgn[FT_STRUCT].size;
+-		if (next + ssize >= cxt->rgn[FT_STRINGS].start)
+-			return 0;	/* "can't happen" */
+-		memmove(next, cxt->rgn[FT_STRUCT].start, ssize);
+-		ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start, nextra);
+-		cxt->rgn[FT_STRUCT].start = next;
+-
+-		if (ft_shuffle(cxt, pp, rgn, nextra))
+-			return 1;
+-	}
+-
+-	return 0;		/* "can't happen" */
 -}
 -
--int voyagergx_consistent_free(struct device *dev, size_t size,
--			      void *vaddr, dma_addr_t handle)
+-static void ft_put_word(struct ft_cxt *cxt, u32 v)
 -{
--	struct voya_alloc_entry *entry;
--	unsigned long flags;
+-	*(u32 *) cxt->p = cpu_to_be32(v);
+-	cxt->p += 4;
+-}
 -
--	if (!dev || strcmp(dev->driver->name, VOYAGER_OHCI_NAME))
--		return -EINVAL;
+-static void ft_put_bin(struct ft_cxt *cxt, const void *data, unsigned int sz)
+-{
+-	unsigned long sza = _ALIGN(sz, 4);
 -
--	spin_lock_irqsave(&voya_list_lock, flags);
--	list_for_each_entry(entry, &voya_alloc_list, list) {
--		if (entry->ofs != handle)
--			continue;
+-	/* zero out the alignment gap if necessary */
+-	if (sz < sza)
+-		*(u32 *) (cxt->p + sza - 4) = 0;
 -
--		list_del(&entry->list);
--		kfree(entry);
+-	/* copy in the data */
+-	memcpy(cxt->p, data, sz);
 -
--		break;
+-	cxt->p += sza;
+-}
+-
+-char *ft_begin_node(struct ft_cxt *cxt, const char *name)
+-{
+-	unsigned long nlen = strlen(name) + 1;
+-	unsigned long len = 8 + _ALIGN(nlen, 4);
+-	char *ret;
+-
+-	if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
+-		return NULL;
+-
+-	ret = cxt->p;
+-
+-	ft_put_word(cxt, OF_DT_BEGIN_NODE);
+-	ft_put_bin(cxt, name, strlen(name) + 1);
+-
+-	return ret;
+-}
+-
+-void ft_end_node(struct ft_cxt *cxt)
+-{
+-	ft_put_word(cxt, OF_DT_END_NODE);
+-}
+-
+-void ft_nop(struct ft_cxt *cxt)
+-{
+-	if (ft_make_space(cxt, &cxt->p, FT_STRUCT, 4))
+-		ft_put_word(cxt, OF_DT_NOP);
+-}
+-
+-#define NO_STRING	0x7fffffff
+-
+-static int lookup_string(struct ft_cxt *cxt, const char *name)
+-{
+-	char *p, *end;
+-
+-	p = cxt->rgn[FT_STRINGS].start;
+-	end = p + cxt->rgn[FT_STRINGS].size;
+-	while (p < end) {
+-		if (strcmp(p, (char *)name) == 0)
+-			return p - cxt->str_anchor;
+-		p += strlen(p) + 1;
 -	}
--	spin_unlock_irqrestore(&voya_list_lock, flags);
 -
+-	return NO_STRING;
+-}
+-
+-/* lookup string and insert if not found */
+-static int map_string(struct ft_cxt *cxt, const char *name)
+-{
+-	int off;
+-	char *p;
+-
+-	off = lookup_string(cxt, name);
+-	if (off != NO_STRING)
+-		return off;
+-	p = cxt->rgn[FT_STRINGS].start;
+-	if (!ft_make_space(cxt, &p, FT_STRINGS, strlen(name) + 1))
+-		return NO_STRING;
+-	strcpy(p, name);
+-	return p - cxt->str_anchor;
+-}
+-
+-int ft_prop(struct ft_cxt *cxt, const char *name, const void *data,
+-		unsigned int sz)
+-{
+-	int off, len;
+-
+-	off = map_string(cxt, name);
+-	if (off == NO_STRING)
+-		return -1;
+-
+-	len = 12 + _ALIGN(sz, 4);
+-	if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
+-		return -1;
+-
+-	ft_put_word(cxt, OF_DT_PROP);
+-	ft_put_word(cxt, sz);
+-	ft_put_word(cxt, off);
+-	ft_put_bin(cxt, data, sz);
 -	return 0;
 -}
 -
--EXPORT_SYMBOL(voyagergx_consistent_alloc);
--EXPORT_SYMBOL(voyagergx_consistent_free);
-diff --git a/arch/sh/cchips/voyagergx/irq.c b/arch/sh/cchips/voyagergx/irq.c
-deleted file mode 100644
-index ade3038..0000000
---- a/arch/sh/cchips/voyagergx/irq.c
-+++ /dev/null
-@@ -1,101 +0,0 @@
--/* -------------------------------------------------------------------- */
--/* setup_voyagergx.c:                                                     */
--/* -------------------------------------------------------------------- */
--/*  This program is free software; you can redistribute it and/or modify
--    it under the terms of the GNU General Public License as published by
--    the Free Software Foundation; either version 2 of the License, or
--    (at your option) any later version.
+-int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str)
+-{
+-	return ft_prop(cxt, name, str, strlen(str) + 1);
+-}
 -
--    This program is distributed in the hope that it will be useful,
--    but WITHOUT ANY WARRANTY; without even the implied warranty of
--    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
--    GNU General Public License for more details.
+-int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val)
+-{
+-	u32 v = cpu_to_be32((u32) val);
 -
--    You should have received a copy of the GNU General Public License
--    along with this program; if not, write to the Free Software
--    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+-	return ft_prop(cxt, name, &v, 4);
+-}
 -
--    Copyright 2003 (c) Lineo uSolutions,Inc.
--*/
--#include <linux/interrupt.h>
--#include <linux/init.h>
--#include <linux/io.h>
--#include <asm/voyagergx.h>
--#include <asm/rts7751r2d.h>
+-/* Calculate the size of the reserved map */
+-static unsigned long rsvmap_size(struct ft_cxt *cxt)
+-{
+-	struct ft_reserve *res;
 -
--enum {
--	UNUSED = 0,
+-	res = (struct ft_reserve *)cxt->rgn[FT_RSVMAP].start;
+-	while (res->start || res->len)
+-		++res;
+-	return (char *)(res + 1) - cxt->rgn[FT_RSVMAP].start;
+-}
 -
--	/* voyager specific interrupt sources */
--	UP, G54, G53, G52, G51, G50, G49, G48,
--	I2C, PW, DMA, PCI, I2S, AC, US,
--	U1, U0, CV, MC, S1, S0,
--	UH, TWOD, ZD, PV, CI,
--};
+-/* Calculate the size of the struct region by stepping through it */
+-static unsigned long struct_size(struct ft_cxt *cxt)
+-{
+-	char *p = cxt->rgn[FT_STRUCT].start;
+-	char *next;
+-	struct ft_atom atom;
 -
--static struct intc_vect vectors[] __initdata = {
--	INTC_IRQ(UP, IRQ_SM501_UP), INTC_IRQ(G54, IRQ_SM501_G54),
--	INTC_IRQ(G53, IRQ_SM501_G53), INTC_IRQ(G52, IRQ_SM501_G52),
--	INTC_IRQ(G51, IRQ_SM501_G51), INTC_IRQ(G50, IRQ_SM501_G50),
--	INTC_IRQ(G49, IRQ_SM501_G49), INTC_IRQ(G48, IRQ_SM501_G48),
--	INTC_IRQ(I2C, IRQ_SM501_I2C), INTC_IRQ(PW, IRQ_SM501_PW),
--	INTC_IRQ(DMA, IRQ_SM501_DMA), INTC_IRQ(PCI, IRQ_SM501_PCI),
--	INTC_IRQ(I2S, IRQ_SM501_I2S), INTC_IRQ(AC, IRQ_SM501_AC),
--	INTC_IRQ(US, IRQ_SM501_US), INTC_IRQ(U1, IRQ_SM501_U1),
--	INTC_IRQ(U0, IRQ_SM501_U0), INTC_IRQ(CV, IRQ_SM501_CV),
--	INTC_IRQ(MC, IRQ_SM501_MC), INTC_IRQ(S1, IRQ_SM501_S1),
--	INTC_IRQ(S0, IRQ_SM501_S0), INTC_IRQ(UH, IRQ_SM501_UH),
--	INTC_IRQ(TWOD, IRQ_SM501_2D), INTC_IRQ(ZD, IRQ_SM501_ZD),
--	INTC_IRQ(PV, IRQ_SM501_PV), INTC_IRQ(CI, IRQ_SM501_CI),
--};
+-	/* make check in ft_next happy */
+-	if (cxt->rgn[FT_STRUCT].size == 0)
+-		cxt->rgn[FT_STRUCT].size = 0xfffffffful - (unsigned long)p;
 -
--static struct intc_mask_reg mask_registers[] __initdata = {
--	{ VOYAGER_INT_MASK, 0, 32, /* "Interrupt Mask", MMIO_base + 0x30 */
--	  { UP, G54, G53, G52, G51, G50, G49, G48,
--	    I2C, PW, 0, DMA, PCI, I2S, AC, US,
--	    0, 0, U1, U0, CV, MC, S1, S0,
--	    0, UH, 0, 0, TWOD, ZD, PV, CI } },
--};
+-	while ((next = ft_next(cxt, p, &atom)) != NULL)
+-		p = next;
+-	return p + 4 - cxt->rgn[FT_STRUCT].start;
+-}
 -
--static DECLARE_INTC_DESC(intc_desc, "voyagergx", vectors,
--			 NULL, NULL, mask_registers, NULL, NULL);
+-/* add `adj' on to all string offset values in the struct area */
+-static void adjust_string_offsets(struct ft_cxt *cxt, int adj)
+-{
+-	char *p = cxt->rgn[FT_STRUCT].start;
+-	char *next;
+-	struct ft_atom atom;
+-	int off;
 -
--static unsigned int voyagergx_stat2irq[32] = {
--	IRQ_SM501_CI, IRQ_SM501_PV, IRQ_SM501_ZD, IRQ_SM501_2D,
--	0, 0, IRQ_SM501_UH, 0,
--	IRQ_SM501_S0, IRQ_SM501_S1, IRQ_SM501_MC, IRQ_SM501_CV,
--	IRQ_SM501_U0, IRQ_SM501_U1, 0, 0,
--	IRQ_SM501_US, IRQ_SM501_AC, IRQ_SM501_I2S, IRQ_SM501_PCI,
--	IRQ_SM501_DMA, 0, IRQ_SM501_PW, IRQ_SM501_I2C,
--	IRQ_SM501_G48, IRQ_SM501_G49, IRQ_SM501_G50, IRQ_SM501_G51,
--	IRQ_SM501_G52, IRQ_SM501_G53, IRQ_SM501_G54, IRQ_SM501_UP
--};
+-	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+-		if (atom.tag == OF_DT_PROP) {
+-			off = be32_to_cpu(*(u32 *) (p + 8));
+-			*(u32 *) (p + 8) = cpu_to_be32(off + adj);
+-		}
+-		p = next;
+-	}
+-}
 -
--static void voyagergx_irq_demux(unsigned int irq, struct irq_desc *desc)
+-/* start construction of the flat OF tree from scratch */
+-void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+-		void *(*realloc_fn) (void *, unsigned long))
 -{
--	unsigned long intv = ctrl_inl(INT_STATUS);
--	struct irq_desc *ext_desc;
--	unsigned int ext_irq;
--	unsigned int k = 0;
+-	struct boot_param_header *bph = blob;
+-	char *p;
+-	struct ft_reserve *pres;
 -
--	while (intv) {
--		ext_irq = voyagergx_stat2irq[k];
--		if (ext_irq && (intv & 1)) {
--			ext_desc = irq_desc + ext_irq;
--			handle_level_irq(ext_irq, ext_desc);
+-	/* clear the cxt */
+-	memset(cxt, 0, sizeof(*cxt));
+-
+-	cxt->bph = bph;
+-	cxt->max_size = max_size;
+-	cxt->realloc = realloc_fn;
+-	cxt->isordered = 1;
+-
+-	/* zero everything in the header area */
+-	memset(bph, 0, sizeof(*bph));
+-
+-	bph->magic = cpu_to_be32(OF_DT_HEADER);
+-	bph->version = cpu_to_be32(0x10);
+-	bph->last_comp_version = cpu_to_be32(0x10);
+-
+-	/* start pointers */
+-	cxt->rgn[FT_RSVMAP].start = p = blob + HDR_SIZE;
+-	cxt->rgn[FT_RSVMAP].size = sizeof(struct ft_reserve);
+-	pres = (struct ft_reserve *)p;
+-	cxt->rgn[FT_STRUCT].start = p += sizeof(struct ft_reserve);
+-	cxt->rgn[FT_STRUCT].size = 4;
+-	cxt->rgn[FT_STRINGS].start = blob + max_size;
+-	cxt->rgn[FT_STRINGS].size = 0;
+-
+-	/* init rsvmap and struct */
+-	pres->start = 0;
+-	pres->len = 0;
+-	*(u32 *) p = cpu_to_be32(OF_DT_END);
+-
+-	cxt->str_anchor = blob;
+-}
+-
+-/* open up an existing blob to be examined or modified */
+-int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+-		unsigned int max_find_device,
+-		void *(*realloc_fn) (void *, unsigned long))
+-{
+-	struct boot_param_header *bph = blob;
+-
+-	/* can't cope with version < 16 */
+-	if (be32_to_cpu(bph->version) < 16)
+-		return -1;
+-
+-	/* clear the cxt */
+-	memset(cxt, 0, sizeof(*cxt));
+-
+-	/* alloc node_tbl to track node ptrs returned by ft_find_device */
+-	++max_find_device;
+-	cxt->node_tbl = realloc_fn(NULL, max_find_device * sizeof(char *));
+-	if (!cxt->node_tbl)
+-		return -1;
+-	memset(cxt->node_tbl, 0, max_find_device * sizeof(char *));
+-	cxt->node_max = max_find_device;
+-	cxt->nodes_used = 1;	/* don't use idx 0 b/c looks like NULL */
+-
+-	cxt->bph = bph;
+-	cxt->max_size = max_size;
+-	cxt->realloc = realloc_fn;
+-
+-	cxt->rgn[FT_RSVMAP].start = blob + be32_to_cpu(bph->off_mem_rsvmap);
+-	cxt->rgn[FT_RSVMAP].size = rsvmap_size(cxt);
+-	cxt->rgn[FT_STRUCT].start = blob + be32_to_cpu(bph->off_dt_struct);
+-	cxt->rgn[FT_STRUCT].size = struct_size(cxt);
+-	cxt->rgn[FT_STRINGS].start = blob + be32_to_cpu(bph->off_dt_strings);
+-	cxt->rgn[FT_STRINGS].size = be32_to_cpu(bph->dt_strings_size);
+-
+-	cxt->p = cxt->rgn[FT_STRUCT].start;
+-	cxt->str_anchor = cxt->rgn[FT_STRINGS].start;
+-
+-	return 0;
+-}
+-
+-/* add a reserver physical area to the rsvmap */
+-int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size)
+-{
+-	char *p;
+-	struct ft_reserve *pres;
+-
+-	p = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size
+-		- sizeof(struct ft_reserve);
+-	if (!ft_make_space(cxt, &p, FT_RSVMAP, sizeof(struct ft_reserve)))
+-		return -1;
+-
+-	pres = (struct ft_reserve *)p;
+-	pres->start = cpu_to_be64(physaddr);
+-	pres->len = cpu_to_be64(size);
+-
+-	return 0;
+-}
+-
+-void ft_begin_tree(struct ft_cxt *cxt)
+-{
+-	cxt->p = ft_root_node(cxt);
+-}
+-
+-void ft_end_tree(struct ft_cxt *cxt)
+-{
+-	struct boot_param_header *bph = cxt->bph;
+-	char *p, *oldstr, *str, *endp;
+-	unsigned long ssize;
+-	int adj;
+-
+-	if (!cxt->isordered)
+-		return;		/* we haven't touched anything */
+-
+-	/* adjust string offsets */
+-	oldstr = cxt->rgn[FT_STRINGS].start;
+-	adj = cxt->str_anchor - oldstr;
+-	if (adj)
+-		adjust_string_offsets(cxt, adj);
+-
+-	/* make strings end on 8-byte boundary */
+-	ssize = cxt->rgn[FT_STRINGS].size;
+-	endp = (char *)_ALIGN((unsigned long)cxt->rgn[FT_STRUCT].start
+-			+ cxt->rgn[FT_STRUCT].size + ssize, 8);
+-	str = endp - ssize;
+-
+-	/* move strings down to end of structs */
+-	memmove(str, oldstr, ssize);
+-	cxt->str_anchor = str;
+-	cxt->rgn[FT_STRINGS].start = str;
+-
+-	/* fill in header fields */
+-	p = (char *)bph;
+-	bph->totalsize = cpu_to_be32(endp - p);
+-	bph->off_mem_rsvmap = cpu_to_be32(cxt->rgn[FT_RSVMAP].start - p);
+-	bph->off_dt_struct = cpu_to_be32(cxt->rgn[FT_STRUCT].start - p);
+-	bph->off_dt_strings = cpu_to_be32(cxt->rgn[FT_STRINGS].start - p);
+-	bph->dt_strings_size = cpu_to_be32(ssize);
+-}
+-
+-void *ft_find_device(struct ft_cxt *cxt, const void *top, const char *srch_path)
+-{
+-	char *node;
+-
+-	if (top) {
+-		node = ft_node_ph2node(cxt, top);
+-		if (node == NULL)
+-			return NULL;
+-	} else {
+-		node = ft_root_node(cxt);
+-	}
+-
+-	node = ft_find_descendent(cxt, node, srch_path);
+-	return ft_get_phandle(cxt, node);
+-}
+-
+-void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path)
+-{
+-	struct ft_atom atom;
+-	char *p;
+-	const char *cp, *q;
+-	int cl;
+-	int depth = -1;
+-	int dmatch = 0;
+-	const char *path_comp[FT_MAX_DEPTH];
+-
+-	cp = srch_path;
+-	cl = 0;
+-	p = top;
+-
+-	while ((p = ft_next(cxt, p, &atom)) != NULL) {
+-		switch (atom.tag) {
+-		case OF_DT_BEGIN_NODE:
+-			++depth;
+-			if (depth != dmatch)
+-				break;
+-			cxt->genealogy[depth] = atom.data;
+-			cxt->genealogy[depth + 1] = NULL;
+-			if (depth && !(strncmp(atom.name, cp, cl) == 0
+-					&& (atom.name[cl] == '/'
+-						|| atom.name[cl] == '\0'
+-						|| atom.name[cl] == '@')))
+-				break;
+-			path_comp[dmatch] = cp;
+-			/* it matches so far, advance to next path component */
+-			cp += cl;
+-			/* skip slashes */
+-			while (*cp == '/')
+-				++cp;
+-			/* we're done if this is the end of the string */
+-			if (*cp == 0)
+-				return atom.data;
+-			/* look for end of this component */
+-			q = strchr(cp, '/');
+-			if (q)
+-				cl = q - cp;
+-			else
+-				cl = strlen(cp);
+-			++dmatch;
+-			break;
+-		case OF_DT_END_NODE:
+-			if (depth == 0)
+-				return NULL;
+-			if (dmatch > depth) {
+-				--dmatch;
+-				cl = cp - path_comp[dmatch] - 1;
+-				cp = path_comp[dmatch];
+-				while (cl > 0 && cp[cl - 1] == '/')
+-					--cl;
+-			}
+-			--depth;
+-			break;
 -		}
--		intv >>= 1;
--		k++;
 -	}
+-	return NULL;
 -}
 -
--void __init setup_voyagergx_irq(void)
+-void *__ft_get_parent(struct ft_cxt *cxt, void *node)
 -{
--	printk(KERN_INFO "VoyagerGX on irq %d (mapped into %d to %d)\n",
--	       IRQ_VOYAGER,
--	       VOYAGER_IRQ_BASE,
--	       VOYAGER_IRQ_BASE + VOYAGER_IRQ_NUM - 1);
+-	int d;
+-	struct ft_atom atom;
+-	char *p;
 -
--	register_intc_controller(&intc_desc);
--	set_irq_chained_handler(IRQ_VOYAGER, voyagergx_irq_demux);
+-	for (d = 0; cxt->genealogy[d] != NULL; ++d)
+-		if (cxt->genealogy[d] == node)
+-			return d > 0 ? cxt->genealogy[d - 1] : NULL;
+-
+-	/* have to do it the hard way... */
+-	p = ft_root_node(cxt);
+-	d = 0;
+-	while ((p = ft_next(cxt, p, &atom)) != NULL) {
+-		switch (atom.tag) {
+-		case OF_DT_BEGIN_NODE:
+-			cxt->genealogy[d] = atom.data;
+-			if (node == atom.data) {
+-				/* found it */
+-				cxt->genealogy[d + 1] = NULL;
+-				return d > 0 ? cxt->genealogy[d - 1] : NULL;
+-			}
+-			++d;
+-			break;
+-		case OF_DT_END_NODE:
+-			--d;
+-			break;
+-		}
+-	}
+-	return NULL;
 -}
-diff --git a/arch/sh/cchips/voyagergx/setup.c b/arch/sh/cchips/voyagergx/setup.c
+-
+-void *ft_get_parent(struct ft_cxt *cxt, const void *phandle)
+-{
+-	void *node = ft_node_ph2node(cxt, phandle);
+-	if (node == NULL)
+-		return NULL;
+-
+-	node = __ft_get_parent(cxt, node);
+-	return ft_get_phandle(cxt, node);
+-}
+-
+-static const void *__ft_get_prop(struct ft_cxt *cxt, void *node,
+-                                 const char *propname, unsigned int *len)
+-{
+-	struct ft_atom atom;
+-	int depth = 0;
+-
+-	while ((node = ft_next(cxt, node, &atom)) != NULL) {
+-		switch (atom.tag) {
+-		case OF_DT_BEGIN_NODE:
+-			++depth;
+-			break;
+-
+-		case OF_DT_PROP:
+-			if (depth != 1 || strcmp(atom.name, propname))
+-				break;
+-
+-			if (len)
+-				*len = atom.size;
+-
+-			return atom.data;
+-
+-		case OF_DT_END_NODE:
+-			if (--depth <= 0)
+-				return NULL;
+-		}
+-	}
+-
+-	return NULL;
+-}
+-
+-int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+-		void *buf, const unsigned int buflen)
+-{
+-	const void *data;
+-	unsigned int size;
+-
+-	void *node = ft_node_ph2node(cxt, phandle);
+-	if (!node)
+-		return -1;
+-
+-	data = __ft_get_prop(cxt, node, propname, &size);
+-	if (data) {
+-		unsigned int clipped_size = min(size, buflen);
+-		memcpy(buf, data, clipped_size);
+-		return size;
+-	}
+-
+-	return -1;
+-}
+-
+-void *__ft_find_node_by_prop_value(struct ft_cxt *cxt, void *prev,
+-                                   const char *propname, const char *propval,
+-                                   unsigned int proplen)
+-{
+-	struct ft_atom atom;
+-	char *p = ft_root_node(cxt);
+-	char *next;
+-	int past_prev = prev ? 0 : 1;
+-	int depth = -1;
+-
+-	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+-		const void *data;
+-		unsigned int size;
+-
+-		switch (atom.tag) {
+-		case OF_DT_BEGIN_NODE:
+-			depth++;
+-
+-			if (prev == p) {
+-				past_prev = 1;
+-				break;
+-			}
+-
+-			if (!past_prev || depth < 1)
+-				break;
+-
+-			data = __ft_get_prop(cxt, p, propname, &size);
+-			if (!data || size != proplen)
+-				break;
+-			if (memcmp(data, propval, size))
+-				break;
+-
+-			return p;
+-
+-		case OF_DT_END_NODE:
+-			if (depth-- == 0)
+-				return NULL;
+-
+-			break;
+-		}
+-
+-		p = next;
+-	}
+-
+-	return NULL;
+-}
+-
+-void *ft_find_node_by_prop_value(struct ft_cxt *cxt, const void *prev,
+-                                 const char *propname, const char *propval,
+-                                 int proplen)
+-{
+-	void *node = NULL;
+-
+-	if (prev) {
+-		node = ft_node_ph2node(cxt, prev);
+-
+-		if (!node)
+-			return NULL;
+-	}
+-
+-	node = __ft_find_node_by_prop_value(cxt, node, propname,
+-	                                    propval, proplen);
+-	return ft_get_phandle(cxt, node);
+-}
+-
+-int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+-		const void *buf, const unsigned int buflen)
+-{
+-	struct ft_atom atom;
+-	void *node;
+-	char *p, *next;
+-	int nextra;
+-
+-	node = ft_node_ph2node(cxt, phandle);
+-	if (node == NULL)
+-		return -1;
+-
+-	next = ft_next(cxt, node, &atom);
+-	if (atom.tag != OF_DT_BEGIN_NODE)
+-		/* phandle didn't point to a node */
+-		return -1;
+-	p = next;
+-
+-	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+-		switch (atom.tag) {
+-		case OF_DT_BEGIN_NODE: /* properties must go before subnodes */
+-		case OF_DT_END_NODE:
+-			/* haven't found the property, insert here */
+-			cxt->p = p;
+-			return ft_prop(cxt, propname, buf, buflen);
+-		case OF_DT_PROP:
+-			if (strcmp(atom.name, propname))
+-				break;
+-			/* found an existing property, overwrite it */
+-			nextra = _ALIGN(buflen, 4) - _ALIGN(atom.size, 4);
+-			cxt->p = atom.data;
+-			if (nextra && !ft_make_space(cxt, &cxt->p, FT_STRUCT,
+-						nextra))
+-				return -1;
+-			*(u32 *) (cxt->p - 8) = cpu_to_be32(buflen);
+-			ft_put_bin(cxt, buf, buflen);
+-			return 0;
+-		}
+-		p = next;
+-	}
+-	return -1;
+-}
+-
+-int ft_del_prop(struct ft_cxt *cxt, const void *phandle, const char *propname)
+-{
+-	struct ft_atom atom;
+-	void *node;
+-	char *p, *next;
+-	int size;
+-
+-	node = ft_node_ph2node(cxt, phandle);
+-	if (node == NULL)
+-		return -1;
+-
+-	p = node;
+-	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+-		switch (atom.tag) {
+-		case OF_DT_BEGIN_NODE:
+-		case OF_DT_END_NODE:
+-			return -1;
+-		case OF_DT_PROP:
+-			if (strcmp(atom.name, propname))
+-				break;
+-			/* found the property, remove it */
+-			size = 12 + -_ALIGN(atom.size, 4);
+-			cxt->p = p;
+-			if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, -size))
+-				return -1;
+-			return 0;
+-		}
+-		p = next;
+-	}
+-	return -1;
+-}
+-
+-void *ft_create_node(struct ft_cxt *cxt, const void *parent, const char *name)
+-{
+-	struct ft_atom atom;
+-	char *p, *next, *ret;
+-	int depth = 0;
+-
+-	if (parent) {
+-		p = ft_node_ph2node(cxt, parent);
+-		if (!p)
+-			return NULL;
+-	} else {
+-		p = ft_root_node(cxt);
+-	}
+-
+-	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+-		switch (atom.tag) {
+-		case OF_DT_BEGIN_NODE:
+-			++depth;
+-			if (depth == 1 && strcmp(atom.name, name) == 0)
+-				/* duplicate node name, return error */
+-				return NULL;
+-			break;
+-		case OF_DT_END_NODE:
+-			--depth;
+-			if (depth > 0)
+-				break;
+-			/* end of node, insert here */
+-			cxt->p = p;
+-			ret = ft_begin_node(cxt, name);
+-			ft_end_node(cxt);
+-			return ft_get_phandle(cxt, ret);
+-		}
+-		p = next;
+-	}
+-	return NULL;
+-}
+-
+-/* Returns the start of the path within the provided buffer, or NULL on
+- * error.
+- */
+-char *ft_get_path(struct ft_cxt *cxt, const void *phandle,
+-                  char *buf, int len)
+-{
+-	const char *path_comp[FT_MAX_DEPTH];
+-	struct ft_atom atom;
+-	char *p, *next, *pos;
+-	int depth = 0, i;
+-	void *node;
+-
+-	node = ft_node_ph2node(cxt, phandle);
+-	if (node == NULL)
+-		return NULL;
+-
+-	p = ft_root_node(cxt);
+-
+-	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+-		switch (atom.tag) {
+-		case OF_DT_BEGIN_NODE:
+-			path_comp[depth++] = atom.name;
+-			if (p == node)
+-				goto found;
+-
+-			break;
+-
+-		case OF_DT_END_NODE:
+-			if (--depth == 0)
+-				return NULL;
+-		}
+-
+-		p = next;
+-	}
+-
+-found:
+-	pos = buf;
+-	for (i = 1; i < depth; i++) {
+-		int this_len;
+-
+-		if (len <= 1)
+-			return NULL;
+-
+-		*pos++ = '/';
+-		len--;
+-
+-		strncpy(pos, path_comp[i], len);
+-
+-		if (pos[len - 1] != 0)
+-			return NULL;
+-
+-		this_len = strlen(pos);
+-		len -= this_len;
+-		pos += this_len;
+-	}
+-
+-	return buf;
+-}
+diff --git a/arch/powerpc/boot/flatdevtree.h b/arch/powerpc/boot/flatdevtree.h
 deleted file mode 100644
-index 33f0302..0000000
---- a/arch/sh/cchips/voyagergx/setup.c
+index b0957a2..0000000
+--- a/arch/powerpc/boot/flatdevtree.h
 +++ /dev/null
-@@ -1,37 +0,0 @@
+@@ -1,113 +0,0 @@
 -/*
-- * arch/sh/cchips/voyagergx/setup.c
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
 - *
-- * Setup routines for VoyagerGX cchip.
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
 - *
-- * Copyright (C) 2003 Lineo uSolutions, Inc.
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+- */
+-
+-#ifndef FLATDEVTREE_H
+-#define FLATDEVTREE_H
+-
+-#include "flatdevtree_env.h"
+-
+-/* Definitions used by the flattened device tree */
+-#define OF_DT_HEADER            0xd00dfeed      /* marker */
+-#define OF_DT_BEGIN_NODE        0x1     /* Start of node, full name */
+-#define OF_DT_END_NODE          0x2     /* End node */
+-#define OF_DT_PROP              0x3     /* Property: name off, size, content */
+-#define OF_DT_NOP               0x4     /* nop */
+-#define OF_DT_END               0x9
+-
+-#define OF_DT_VERSION           0x10
+-
+-struct boot_param_header {
+-	u32 magic;              /* magic word OF_DT_HEADER */
+-	u32 totalsize;          /* total size of DT block */
+-	u32 off_dt_struct;      /* offset to structure */
+-	u32 off_dt_strings;     /* offset to strings */
+-	u32 off_mem_rsvmap;     /* offset to memory reserve map */
+-	u32 version;            /* format version */
+-	u32 last_comp_version;  /* last compatible version */
+-	/* version 2 fields below */
+-	u32 boot_cpuid_phys;    /* Physical CPU id we're booting on */
+-	/* version 3 fields below */
+-	u32 dt_strings_size;    /* size of the DT strings block */
+-};
+-
+-struct ft_reserve {
+-	u64 start;
+-	u64 len;
+-};
+-
+-struct ft_region {
+-	char *start;
+-	unsigned long size;
+-};
+-
+-enum ft_rgn_id {
+-	FT_RSVMAP,
+-	FT_STRUCT,
+-	FT_STRINGS,
+-	FT_N_REGION
+-};
+-
+-#define FT_MAX_DEPTH	50
+-
+-struct ft_cxt {
+-	struct boot_param_header *bph;
+-	int max_size;           /* maximum size of tree */
+-	int isordered;		/* everything in standard order */
+-	void *(*realloc)(void *, unsigned long);
+-	char *str_anchor;
+-	char *p;		/* current insertion point in structs */
+-	struct ft_region rgn[FT_N_REGION];
+-	void *genealogy[FT_MAX_DEPTH+1];
+-	char **node_tbl;
+-	unsigned int node_max;
+-	unsigned int nodes_used;
+-};
+-
+-char *ft_begin_node(struct ft_cxt *cxt, const char *name);
+-void ft_end_node(struct ft_cxt *cxt);
+-
+-void ft_begin_tree(struct ft_cxt *cxt);
+-void ft_end_tree(struct ft_cxt *cxt);
+-
+-void ft_nop(struct ft_cxt *cxt);
+-int ft_prop(struct ft_cxt *cxt, const char *name,
+-	    const void *data, unsigned int sz);
+-int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str);
+-int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val);
+-void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+-	      void *(*realloc_fn)(void *, unsigned long));
+-int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+-		unsigned int max_find_device,
+-		void *(*realloc_fn)(void *, unsigned long));
+-int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size);
+-
+-void ft_dump_blob(const void *bphp);
+-void ft_merge_blob(struct ft_cxt *cxt, void *blob);
+-void *ft_find_device(struct ft_cxt *cxt, const void *top,
+-                     const char *srch_path);
+-void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path);
+-int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+-		void *buf, const unsigned int buflen);
+-int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+-		const void *buf, const unsigned int buflen);
+-void *ft_get_parent(struct ft_cxt *cxt, const void *phandle);
+-void *ft_find_node_by_prop_value(struct ft_cxt *cxt, const void *prev,
+-                                 const char *propname, const char *propval,
+-                                 int proplen);
+-void *ft_create_node(struct ft_cxt *cxt, const void *parent, const char *name);
+-char *ft_get_path(struct ft_cxt *cxt, const void *phandle, char *buf, int len);
+-
+-#endif /* FLATDEVTREE_H */
+diff --git a/arch/powerpc/boot/flatdevtree_misc.c b/arch/powerpc/boot/flatdevtree_misc.c
+deleted file mode 100644
+index b367009..0000000
+--- a/arch/powerpc/boot/flatdevtree_misc.c
++++ /dev/null
+@@ -1,79 +0,0 @@
+-/*
+- * This file does the necessary interface mapping between the bootwrapper
+- * device tree operations and the interface provided by shared source
+- * files flatdevicetree.[ch].
 - *
-- * This program is free software; you can redistribute it and/or modify it
-- * under the terms of the GNU General Public License as published by the
-- * Free Software Foundation; either version 2 of the License, or (at your
-- * option) any later version.
+- * Author: Mark A. Greer <mgreer at mvista.com>
+- *
+- * 2006 (c) MontaVista Software, Inc.  This file is licensed under
+- * the terms of the GNU General Public License version 2.  This program
+- * is licensed "as is" without any warranty of any kind, whether express
+- * or implied.
 - */
--#include <linux/init.h>
--#include <linux/module.h>
--#include <asm/io.h>
--#include <asm/voyagergx.h>
+-#include <stddef.h>
+-#include "flatdevtree.h"
+-#include "ops.h"
 -
--static int __init setup_voyagergx(void)
+-static struct ft_cxt cxt;
+-
+-static void *fdtm_finddevice(const char *name)
 -{
--	unsigned long val;
+-	return ft_find_device(&cxt, NULL, name);
+-}
 -
--	val = readl((void __iomem *)DRAM_CTRL);
--	val |= (DRAM_CTRL_CPU_COLUMN_SIZE_256	|
--		DRAM_CTRL_CPU_ACTIVE_PRECHARGE	|
--		DRAM_CTRL_CPU_RESET		|
--		DRAM_CTRL_REFRESH_COMMAND	|
--		DRAM_CTRL_BLOCK_WRITE_TIME	|
--		DRAM_CTRL_BLOCK_WRITE_PRECHARGE	|
--		DRAM_CTRL_ACTIVE_PRECHARGE	|
--		DRAM_CTRL_RESET			|
--		DRAM_CTRL_REMAIN_ACTIVE);
--	writel(val, (void __iomem *)DRAM_CTRL);
+-static int fdtm_getprop(const void *phandle, const char *propname,
+-                        void *buf, const int buflen)
+-{
+-	return ft_get_prop(&cxt, phandle, propname, buf, buflen);
+-}
 -
--	return 0;
+-static int fdtm_setprop(const void *phandle, const char *propname,
+-                        const void *buf, const int buflen)
+-{
+-	return ft_set_prop(&cxt, phandle, propname, buf, buflen);
 -}
 -
--module_init(setup_voyagergx);
-diff --git a/arch/sh/configs/cayman_defconfig b/arch/sh/configs/cayman_defconfig
+-static void *fdtm_get_parent(const void *phandle)
+-{
+-	return ft_get_parent(&cxt, phandle);
+-}
+-
+-static void *fdtm_create_node(const void *phandle, const char *name)
+-{
+-	return ft_create_node(&cxt, phandle, name);
+-}
+-
+-static void *fdtm_find_node_by_prop_value(const void *prev,
+-                                          const char *propname,
+-                                          const char *propval,
+-                                          int proplen)
+-{
+-	return ft_find_node_by_prop_value(&cxt, prev, propname,
+-	                                  propval, proplen);
+-}
+-
+-static unsigned long fdtm_finalize(void)
+-{
+-	ft_end_tree(&cxt);
+-	return (unsigned long)cxt.bph;
+-}
+-
+-static char *fdtm_get_path(const void *phandle, char *buf, int len)
+-{
+-	return ft_get_path(&cxt, phandle, buf, len);
+-}
+-
+-int ft_init(void *dt_blob, unsigned int max_size, unsigned int max_find_device)
+-{
+-	dt_ops.finddevice = fdtm_finddevice;
+-	dt_ops.getprop = fdtm_getprop;
+-	dt_ops.setprop = fdtm_setprop;
+-	dt_ops.get_parent = fdtm_get_parent;
+-	dt_ops.create_node = fdtm_create_node;
+-	dt_ops.find_node_by_prop_value = fdtm_find_node_by_prop_value;
+-	dt_ops.finalize = fdtm_finalize;
+-	dt_ops.get_path = fdtm_get_path;
+-
+-	return ft_open(&cxt, dt_blob, max_size, max_find_device,
+-			platform_ops.realloc);
+-}
+diff --git a/arch/powerpc/boot/holly.c b/arch/powerpc/boot/holly.c
+index 199e783..58013b9 100644
+--- a/arch/powerpc/boot/holly.c
++++ b/arch/powerpc/boot/holly.c
+@@ -28,6 +28,6 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+ 	u32 heapsize = 0x8000000 - (u32)_end; /* 128M */
+ 
+ 	simple_alloc_init(_end, heapsize, 32, 64);
+-	ft_init(_dtb_start, 0, 4);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ }
+diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
 new file mode 100644
-index 0000000..a05b278
+index 0000000..59016be
 --- /dev/null
-+++ b/arch/sh/configs/cayman_defconfig
-@@ -0,0 +1,1166 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.24-rc3
-+# Fri Nov 23 14:15:55 2007
-+#
-+CONFIG_SUPERH=y
-+# CONFIG_SUPERH32 is not set
-+CONFIG_SUPERH64=y
-+CONFIG_RWSEM_GENERIC_SPINLOCK=y
-+CONFIG_GENERIC_FIND_NEXT_BIT=y
-+CONFIG_GENERIC_HWEIGHT=y
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+# CONFIG_GENERIC_TIME is not set
-+# CONFIG_GENERIC_CLOCKEVENTS is not set
-+CONFIG_SYS_SUPPORTS_PCI=y
-+CONFIG_STACKTRACE_SUPPORT=y
-+CONFIG_LOCKDEP_SUPPORT=y
-+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-+CONFIG_ARCH_NO_VIRT_TO_BUS=y
-+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++++ b/arch/powerpc/boot/libfdt-wrapper.c
+@@ -0,0 +1,193 @@
++/*
++ * This file does the necessary interface mapping between the bootwrapper
++ * device tree operations and the interface provided by shared source
++ * files flatdevicetree.[ch].
++ *
++ * Copyright 2007 David Gibson, IBM Corporation.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#include <stddef.h>
++#include <stdio.h>
++#include <page.h>
++#include <libfdt.h>
++#include "ops.h"
++
++#define DEBUG	0
++#define BAD_ERROR(err)	(((err) < 0) \
++			 && ((err) != -FDT_ERR_NOTFOUND) \
++			 && ((err) != -FDT_ERR_EXISTS))
++
++#define check_err(err) \
++	({ \
++		if (BAD_ERROR(err) || ((err < 0) && DEBUG)) \
++			printf("%s():%d  %s\n\r", __FUNCTION__, __LINE__, \
++			       fdt_strerror(err)); \
++		if (BAD_ERROR(err)) \
++			exit(); \
++		(err < 0) ? -1 : 0; \
++	})
 +
-+#
-+# General setup
-+#
-+CONFIG_EXPERIMENTAL=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+CONFIG_LOCALVERSION=""
-+CONFIG_LOCALVERSION_AUTO=y
-+CONFIG_SWAP=y
-+# CONFIG_SYSVIPC is not set
-+CONFIG_POSIX_MQUEUE=y
-+# CONFIG_BSD_PROCESS_ACCT is not set
-+# CONFIG_TASKSTATS is not set
-+# CONFIG_USER_NS is not set
-+# CONFIG_PID_NS is not set
-+# CONFIG_AUDIT is not set
-+# CONFIG_IKCONFIG is not set
-+CONFIG_LOG_BUF_SHIFT=14
-+# CONFIG_CGROUPS is not set
-+CONFIG_FAIR_GROUP_SCHED=y
-+CONFIG_FAIR_USER_SCHED=y
-+# CONFIG_FAIR_CGROUP_SCHED is not set
-+CONFIG_SYSFS_DEPRECATED=y
-+# CONFIG_RELAY is not set
-+# CONFIG_BLK_DEV_INITRD is not set
-+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-+CONFIG_SYSCTL=y
-+CONFIG_EMBEDDED=y
-+CONFIG_UID16=y
-+CONFIG_SYSCTL_SYSCALL=y
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_ELF_CORE=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_ANON_INODES=y
-+CONFIG_EPOLL=y
-+CONFIG_SIGNALFD=y
-+CONFIG_EVENTFD=y
-+CONFIG_SHMEM=y
-+CONFIG_VM_EVENT_COUNTERS=y
-+CONFIG_SLAB=y
-+# CONFIG_SLUB is not set
-+# CONFIG_SLOB is not set
-+CONFIG_RT_MUTEXES=y
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+# CONFIG_MODULE_FORCE_UNLOAD is not set
-+# CONFIG_MODVERSIONS is not set
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+CONFIG_BLOCK=y
-+# CONFIG_LBD is not set
-+# CONFIG_BLK_DEV_IO_TRACE is not set
-+# CONFIG_LSF is not set
-+# CONFIG_BLK_DEV_BSG is not set
++#define offset_devp(off)	\
++	({ \
++		int _offset = (off); \
++		check_err(_offset) ? NULL : (void *)(_offset+1); \
++	})
 +
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+# CONFIG_DEFAULT_AS is not set
-+# CONFIG_DEFAULT_DEADLINE is not set
-+CONFIG_DEFAULT_CFQ=y
-+# CONFIG_DEFAULT_NOOP is not set
-+CONFIG_DEFAULT_IOSCHED="cfq"
++#define devp_offset_find(devp)	(((int)(devp))-1)
++#define devp_offset(devp)	(devp ? ((int)(devp))-1 : 0)
 +
-+#
-+# System type
-+#
-+CONFIG_CPU_SH5=y
-+# CONFIG_CPU_SUBTYPE_SH7619 is not set
-+# CONFIG_CPU_SUBTYPE_SH7206 is not set
-+# CONFIG_CPU_SUBTYPE_SH7705 is not set
-+# CONFIG_CPU_SUBTYPE_SH7706 is not set
-+# CONFIG_CPU_SUBTYPE_SH7707 is not set
-+# CONFIG_CPU_SUBTYPE_SH7708 is not set
-+# CONFIG_CPU_SUBTYPE_SH7709 is not set
-+# CONFIG_CPU_SUBTYPE_SH7710 is not set
-+# CONFIG_CPU_SUBTYPE_SH7712 is not set
-+# CONFIG_CPU_SUBTYPE_SH7720 is not set
-+# CONFIG_CPU_SUBTYPE_SH7750 is not set
-+# CONFIG_CPU_SUBTYPE_SH7091 is not set
-+# CONFIG_CPU_SUBTYPE_SH7750R is not set
-+# CONFIG_CPU_SUBTYPE_SH7750S is not set
-+# CONFIG_CPU_SUBTYPE_SH7751 is not set
-+# CONFIG_CPU_SUBTYPE_SH7751R is not set
-+# CONFIG_CPU_SUBTYPE_SH7760 is not set
-+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
-+# CONFIG_CPU_SUBTYPE_SH7770 is not set
-+# CONFIG_CPU_SUBTYPE_SH7780 is not set
-+# CONFIG_CPU_SUBTYPE_SH7785 is not set
-+# CONFIG_CPU_SUBTYPE_SHX3 is not set
-+# CONFIG_CPU_SUBTYPE_SH7343 is not set
-+# CONFIG_CPU_SUBTYPE_SH7722 is not set
-+CONFIG_CPU_SUBTYPE_SH5_101=y
-+# CONFIG_CPU_SUBTYPE_SH5_103 is not set
++static void *fdt;
++static void *buf; /* = NULL */
 +
-+#
-+# Memory management options
-+#
-+CONFIG_QUICKLIST=y
-+CONFIG_MMU=y
-+CONFIG_PAGE_OFFSET=0x20000000
-+CONFIG_MEMORY_START=0x80000000
-+CONFIG_MEMORY_SIZE=0x00400000
-+CONFIG_32BIT=y
-+CONFIG_ARCH_FLATMEM_ENABLE=y
-+CONFIG_ARCH_SPARSEMEM_ENABLE=y
-+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
-+CONFIG_MAX_ACTIVE_REGIONS=1
-+CONFIG_ARCH_POPULATES_NODE_MAP=y
-+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-+CONFIG_PAGE_SIZE_4KB=y
-+# CONFIG_PAGE_SIZE_8KB is not set
-+# CONFIG_PAGE_SIZE_64KB is not set
-+CONFIG_HUGETLB_PAGE_SIZE_64K=y
-+# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
-+CONFIG_SELECT_MEMORY_MODEL=y
-+CONFIG_FLATMEM_MANUAL=y
-+# CONFIG_DISCONTIGMEM_MANUAL is not set
-+# CONFIG_SPARSEMEM_MANUAL is not set
-+CONFIG_FLATMEM=y
-+CONFIG_FLAT_NODE_MEM_MAP=y
-+CONFIG_SPARSEMEM_STATIC=y
-+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
-+CONFIG_SPLIT_PTLOCK_CPUS=4
-+CONFIG_RESOURCES_64BIT=y
-+CONFIG_ZONE_DMA_FLAG=0
-+CONFIG_NR_QUICK=2
++#define EXPAND_GRANULARITY	1024
 +
-+#
-+# Cache configuration
-+#
-+# CONFIG_SH_DIRECT_MAPPED is not set
-+# CONFIG_CACHE_WRITEBACK is not set
-+# CONFIG_CACHE_WRITETHROUGH is not set
-+CONFIG_CACHE_OFF=y
++static void expand_buf(int minexpand)
++{
++	int size = fdt_totalsize(fdt);
++	int rc;
 +
-+#
-+# Processor features
-+#
-+CONFIG_CPU_LITTLE_ENDIAN=y
-+# CONFIG_CPU_BIG_ENDIAN is not set
-+CONFIG_SH_FPU=y
-+# CONFIG_SH64_FPU_DENORM_FLUSH is not set
-+CONFIG_SH64_USER_MISALIGNED_FIXUP=y
-+CONFIG_SH64_ID2815_WORKAROUND=y
-+CONFIG_CPU_HAS_FPU=y
++	size = _ALIGN(size + minexpand, EXPAND_GRANULARITY);
++	buf = platform_ops.realloc(buf, size);
++	if (!buf)
++		fatal("Couldn't find %d bytes to expand device tree\n\r", size);
++	rc = fdt_open_into(fdt, buf, size);
++	if (rc != 0)
++		fatal("Couldn't expand fdt into new buffer: %s\n\r",
++		      fdt_strerror(rc));
 +
-+#
-+# Board support
-+#
-+CONFIG_SH_CAYMAN=y
++	fdt = buf;
++}
 +
-+#
-+# Timer and clock configuration
-+#
-+CONFIG_SH_TIMER_IRQ=16
-+CONFIG_SH_PCLK_FREQ=50000000
-+# CONFIG_TICK_ONESHOT is not set
++static void *fdt_wrapper_finddevice(const char *path)
++{
++	return offset_devp(fdt_path_offset(fdt, path));
++}
 +
-+#
-+# CPU Frequency scaling
-+#
-+# CONFIG_CPU_FREQ is not set
++static int fdt_wrapper_getprop(const void *devp, const char *name,
++			       void *buf, const int buflen)
++{
++	const void *p;
++	int len;
 +
-+#
-+# DMA support
-+#
++	p = fdt_getprop(fdt, devp_offset(devp), name, &len);
++	if (!p)
++		return check_err(len);
++	memcpy(buf, p, min(len, buflen));
++	return len;
++}
 +
-+#
-+# Companion Chips
-+#
++static int fdt_wrapper_setprop(const void *devp, const char *name,
++			       const void *buf, const int len)
++{
++	int rc;
 +
-+#
-+# Additional SuperH Device Drivers
-+#
-+CONFIG_HEARTBEAT=y
-+# CONFIG_PUSH_SWITCH is not set
++	rc = fdt_setprop(fdt, devp_offset(devp), name, buf, len);
++	if (rc == -FDT_ERR_NOSPACE) {
++		expand_buf(len + 16);
++		rc = fdt_setprop(fdt, devp_offset(devp), name, buf, len);
++	}
 +
-+#
-+# Kernel features
-+#
-+# CONFIG_HZ_100 is not set
-+CONFIG_HZ_250=y
-+# CONFIG_HZ_300 is not set
-+# CONFIG_HZ_1000 is not set
-+CONFIG_HZ=250
-+# CONFIG_KEXEC is not set
-+# CONFIG_CRASH_DUMP is not set
-+# CONFIG_PREEMPT_NONE is not set
-+# CONFIG_PREEMPT_VOLUNTARY is not set
-+CONFIG_PREEMPT=y
-+CONFIG_PREEMPT_BKL=y
-+CONFIG_GUSA=y
++	return check_err(rc);
++}
 +
-+#
-+# Boot options
-+#
-+CONFIG_ZERO_PAGE_OFFSET=0x00001000
-+CONFIG_BOOT_LINK_OFFSET=0x00800000
-+# CONFIG_CMDLINE_BOOL is not set
++static void *fdt_wrapper_get_parent(const void *devp)
++{
++	return offset_devp(fdt_parent_offset(fdt, devp_offset(devp)));
++}
 +
-+#
-+# Bus options
-+#
-+CONFIG_PCI=y
-+CONFIG_SH_PCIDMA_NONCOHERENT=y
-+CONFIG_PCI_AUTO=y
-+CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
-+# CONFIG_ARCH_SUPPORTS_MSI is not set
-+CONFIG_PCI_LEGACY=y
-+# CONFIG_PCI_DEBUG is not set
-+# CONFIG_PCCARD is not set
-+# CONFIG_HOTPLUG_PCI is not set
++static void *fdt_wrapper_create_node(const void *devp, const char *name)
++{
++	int offset;
 +
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+# CONFIG_BINFMT_MISC is not set
++	offset = fdt_add_subnode(fdt, devp_offset(devp), name);
++	if (offset == -FDT_ERR_NOSPACE) {
++		expand_buf(strlen(name) + 16);
++		offset = fdt_add_subnode(fdt, devp_offset(devp), name);
++	}
 +
-+#
-+# Networking
-+#
-+CONFIG_NET=y
++	return offset_devp(offset);
++}
 +
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+# CONFIG_PACKET_MMAP is not set
-+CONFIG_UNIX=y
-+CONFIG_XFRM=y
-+# CONFIG_XFRM_USER is not set
-+# CONFIG_XFRM_SUB_POLICY is not set
-+# CONFIG_XFRM_MIGRATE is not set
-+# CONFIG_NET_KEY is not set
-+CONFIG_INET=y
-+# CONFIG_IP_MULTICAST is not set
-+# CONFIG_IP_ADVANCED_ROUTER is not set
-+CONFIG_IP_FIB_HASH=y
-+CONFIG_IP_PNP=y
-+# CONFIG_IP_PNP_DHCP is not set
-+# CONFIG_IP_PNP_BOOTP is not set
-+# CONFIG_IP_PNP_RARP is not set
-+# CONFIG_NET_IPIP is not set
-+# CONFIG_NET_IPGRE is not set
-+# CONFIG_ARPD is not set
-+# CONFIG_SYN_COOKIES is not set
-+# CONFIG_INET_AH is not set
-+# CONFIG_INET_ESP is not set
-+# CONFIG_INET_IPCOMP is not set
-+# CONFIG_INET_XFRM_TUNNEL is not set
-+# CONFIG_INET_TUNNEL is not set
-+CONFIG_INET_XFRM_MODE_TRANSPORT=y
-+CONFIG_INET_XFRM_MODE_TUNNEL=y
-+CONFIG_INET_XFRM_MODE_BEET=y
-+# CONFIG_INET_LRO is not set
-+CONFIG_INET_DIAG=y
-+CONFIG_INET_TCP_DIAG=y
-+# CONFIG_TCP_CONG_ADVANCED is not set
-+CONFIG_TCP_CONG_CUBIC=y
-+CONFIG_DEFAULT_TCP_CONG="cubic"
-+# CONFIG_TCP_MD5SIG is not set
-+# CONFIG_IPV6 is not set
-+# CONFIG_INET6_XFRM_TUNNEL is not set
-+# CONFIG_INET6_TUNNEL is not set
-+# CONFIG_NETWORK_SECMARK is not set
-+# CONFIG_NETFILTER is not set
-+# CONFIG_IP_DCCP is not set
-+# CONFIG_IP_SCTP is not set
-+# CONFIG_TIPC is not set
-+# CONFIG_ATM is not set
-+# CONFIG_BRIDGE is not set
-+# CONFIG_VLAN_8021Q is not set
-+# CONFIG_DECNET is not set
-+# CONFIG_LLC2 is not set
-+# CONFIG_IPX is not set
-+# CONFIG_ATALK is not set
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+# CONFIG_ECONET is not set
-+# CONFIG_WAN_ROUTER is not set
-+# CONFIG_NET_SCHED is not set
++static void *fdt_wrapper_find_node_by_prop_value(const void *prev,
++						 const char *name,
++						 const char *val,
++						 int len)
++{
++	int offset = fdt_node_offset_by_prop_value(fdt, devp_offset_find(prev),
++	                                           name, val, len);
++	return offset_devp(offset);
++}
 +
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+# CONFIG_HAMRADIO is not set
-+# CONFIG_IRDA is not set
-+# CONFIG_BT is not set
-+# CONFIG_AF_RXRPC is not set
++static void *fdt_wrapper_find_node_by_compatible(const void *prev,
++						 const char *val)
++{
++	int offset = fdt_node_offset_by_compatible(fdt, devp_offset_find(prev),
++	                                           val);
++	return offset_devp(offset);
++}
 +
-+#
-+# Wireless
-+#
-+# CONFIG_CFG80211 is not set
-+# CONFIG_WIRELESS_EXT is not set
-+# CONFIG_MAC80211 is not set
-+# CONFIG_IEEE80211 is not set
-+# CONFIG_RFKILL is not set
-+# CONFIG_NET_9P is not set
++static char *fdt_wrapper_get_path(const void *devp, char *buf, int len)
++{
++	int rc;
 +
-+#
-+# Device Drivers
-+#
++	rc = fdt_get_path(fdt, devp_offset(devp), buf, len);
++	if (check_err(rc))
++		return NULL;
++	return buf;
++}
 +
-+#
-+# Generic Driver Options
-+#
-+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+# CONFIG_FW_LOADER is not set
-+# CONFIG_DEBUG_DRIVER is not set
-+# CONFIG_DEBUG_DEVRES is not set
-+# CONFIG_SYS_HYPERVISOR is not set
-+# CONFIG_CONNECTOR is not set
-+# CONFIG_MTD is not set
-+# CONFIG_PARPORT is not set
-+CONFIG_BLK_DEV=y
-+# CONFIG_BLK_CPQ_CISS_DA is not set
-+# CONFIG_BLK_DEV_DAC960 is not set
-+# CONFIG_BLK_DEV_UMEM is not set
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=y
-+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-+# CONFIG_BLK_DEV_NBD is not set
-+# CONFIG_BLK_DEV_SX8 is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=4096
-+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
-+# CONFIG_CDROM_PKTCDVD is not set
-+# CONFIG_ATA_OVER_ETH is not set
-+CONFIG_MISC_DEVICES=y
-+# CONFIG_PHANTOM is not set
-+# CONFIG_EEPROM_93CX6 is not set
-+# CONFIG_SGI_IOC4 is not set
-+# CONFIG_TIFM_CORE is not set
-+# CONFIG_IDE is not set
++static unsigned long fdt_wrapper_finalize(void)
++{
++	int rc;
 +
-+#
-+# SCSI device support
-+#
-+# CONFIG_RAID_ATTRS is not set
-+CONFIG_SCSI=y
-+CONFIG_SCSI_DMA=y
-+# CONFIG_SCSI_TGT is not set
-+# CONFIG_SCSI_NETLINK is not set
-+CONFIG_SCSI_PROC_FS=y
++	rc = fdt_pack(fdt);
++	if (rc != 0)
++		fatal("Couldn't pack flat tree: %s\n\r",
++		      fdt_strerror(rc));
++	return (unsigned long)fdt;
++}
 +
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=y
-+# CONFIG_CHR_DEV_ST is not set
-+# CONFIG_CHR_DEV_OSST is not set
-+# CONFIG_BLK_DEV_SR is not set
-+# CONFIG_CHR_DEV_SG is not set
-+# CONFIG_CHR_DEV_SCH is not set
++void fdt_init(void *blob)
++{
++	int err;
 +
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+CONFIG_SCSI_MULTI_LUN=y
-+# CONFIG_SCSI_CONSTANTS is not set
-+# CONFIG_SCSI_LOGGING is not set
-+# CONFIG_SCSI_SCAN_ASYNC is not set
-+CONFIG_SCSI_WAIT_SCAN=m
++	dt_ops.finddevice = fdt_wrapper_finddevice;
++	dt_ops.getprop = fdt_wrapper_getprop;
++	dt_ops.setprop = fdt_wrapper_setprop;
++	dt_ops.get_parent = fdt_wrapper_get_parent;
++	dt_ops.create_node = fdt_wrapper_create_node;
++	dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value;
++	dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible;
++	dt_ops.get_path = fdt_wrapper_get_path;
++	dt_ops.finalize = fdt_wrapper_finalize;
++
++	/* Make sure the dt blob is the right version and so forth */
++	fdt = blob;
++	err = fdt_open_into(fdt, fdt, fdt_totalsize(blob));
++	if (err == -FDT_ERR_NOSPACE) {
++		int bufsize = fdt_totalsize(fdt) + 4;
++		buf = malloc(bufsize);
++		err = fdt_open_into(fdt, buf, bufsize);
++	}
 +
-+#
-+# SCSI Transports
-+#
-+CONFIG_SCSI_SPI_ATTRS=y
-+# CONFIG_SCSI_FC_ATTRS is not set
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+# CONFIG_SCSI_SAS_LIBSAS is not set
-+# CONFIG_SCSI_SRP_ATTRS is not set
-+CONFIG_SCSI_LOWLEVEL=y
-+# CONFIG_ISCSI_TCP is not set
-+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-+# CONFIG_SCSI_3W_9XXX is not set
-+# CONFIG_SCSI_ACARD is not set
-+# CONFIG_SCSI_AACRAID is not set
-+# CONFIG_SCSI_AIC7XXX is not set
-+# CONFIG_SCSI_AIC7XXX_OLD is not set
-+# CONFIG_SCSI_AIC79XX is not set
-+# CONFIG_SCSI_AIC94XX is not set
-+# CONFIG_SCSI_ARCMSR is not set
-+# CONFIG_MEGARAID_NEWGEN is not set
-+# CONFIG_MEGARAID_LEGACY is not set
-+# CONFIG_MEGARAID_SAS is not set
-+# CONFIG_SCSI_HPTIOP is not set
-+# CONFIG_SCSI_DMX3191D is not set
-+# CONFIG_SCSI_FUTURE_DOMAIN is not set
-+# CONFIG_SCSI_IPS is not set
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+# CONFIG_SCSI_STEX is not set
-+# CONFIG_SCSI_SYM53C8XX_2 is not set
-+# CONFIG_SCSI_QLOGIC_1280 is not set
-+# CONFIG_SCSI_QLA_FC is not set
-+# CONFIG_SCSI_QLA_ISCSI is not set
-+# CONFIG_SCSI_LPFC is not set
-+# CONFIG_SCSI_DC395x is not set
-+# CONFIG_SCSI_DC390T is not set
-+# CONFIG_SCSI_NSP32 is not set
-+# CONFIG_SCSI_DEBUG is not set
-+# CONFIG_SCSI_SRP is not set
-+# CONFIG_ATA is not set
-+# CONFIG_MD is not set
-+# CONFIG_FUSION is not set
++	if (err != 0)
++		fatal("fdt_init(): %s\n\r", fdt_strerror(err));
 +
++	if (buf)
++		fdt = buf;
++}
+diff --git a/arch/powerpc/boot/libfdt/Makefile.libfdt b/arch/powerpc/boot/libfdt/Makefile.libfdt
+new file mode 100644
+index 0000000..82f9c6a
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/Makefile.libfdt
+@@ -0,0 +1,14 @@
++# Makefile.libfdt
 +#
-+# IEEE 1394 (FireWire) support
++# This is not a complete Makefile of itself.  Instead, it is designed to
++# be easily embeddable into other systems of Makefiles.
 +#
-+# CONFIG_FIREWIRE is not set
-+# CONFIG_IEEE1394 is not set
-+# CONFIG_I2O is not set
-+CONFIG_NETDEVICES=y
-+# CONFIG_NETDEVICES_MULTIQUEUE is not set
-+# CONFIG_DUMMY is not set
-+# CONFIG_BONDING is not set
-+# CONFIG_MACVLAN is not set
-+# CONFIG_EQUALIZER is not set
-+# CONFIG_TUN is not set
-+# CONFIG_VETH is not set
-+# CONFIG_IP1000 is not set
-+# CONFIG_ARCNET is not set
-+# CONFIG_PHYLIB is not set
-+CONFIG_NET_ETHERNET=y
-+# CONFIG_MII is not set
-+# CONFIG_AX88796 is not set
-+# CONFIG_STNIC is not set
-+# CONFIG_HAPPYMEAL is not set
-+# CONFIG_SUNGEM is not set
-+# CONFIG_CASSINI is not set
-+# CONFIG_NET_VENDOR_3COM is not set
-+# CONFIG_SMC91X is not set
-+# CONFIG_SMC911X is not set
-+# CONFIG_NET_TULIP is not set
-+# CONFIG_HP100 is not set
-+# CONFIG_IBM_NEW_EMAC_ZMII is not set
-+# CONFIG_IBM_NEW_EMAC_RGMII is not set
-+# CONFIG_IBM_NEW_EMAC_TAH is not set
-+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-+# CONFIG_NET_PCI is not set
-+# CONFIG_B44 is not set
-+CONFIG_NETDEV_1000=y
-+# CONFIG_ACENIC is not set
-+# CONFIG_DL2K is not set
-+# CONFIG_E1000 is not set
-+# CONFIG_E1000E is not set
-+# CONFIG_NS83820 is not set
-+# CONFIG_HAMACHI is not set
-+# CONFIG_YELLOWFIN is not set
-+# CONFIG_R8169 is not set
-+# CONFIG_SIS190 is not set
-+# CONFIG_SKGE is not set
-+# CONFIG_SKY2 is not set
-+# CONFIG_SK98LIN is not set
-+# CONFIG_VIA_VELOCITY is not set
-+# CONFIG_TIGON3 is not set
-+# CONFIG_BNX2 is not set
-+# CONFIG_QLA3XXX is not set
-+# CONFIG_ATL1 is not set
-+CONFIG_NETDEV_10000=y
-+# CONFIG_CHELSIO_T1 is not set
-+# CONFIG_CHELSIO_T3 is not set
-+# CONFIG_IXGBE is not set
-+# CONFIG_IXGB is not set
-+# CONFIG_S2IO is not set
-+# CONFIG_MYRI10GE is not set
-+# CONFIG_NETXEN_NIC is not set
-+# CONFIG_NIU is not set
-+# CONFIG_MLX4_CORE is not set
-+# CONFIG_TEHUTI is not set
-+# CONFIG_TR is not set
++LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
++LIBFDT_INCLUDES = fdt.h libfdt.h
++LIBFDT_EXTRA = libfdt_internal.h
++LIBFDT_LIB = libfdt/libfdt.a
 +
-+#
-+# Wireless LAN
-+#
-+# CONFIG_WLAN_PRE80211 is not set
-+# CONFIG_WLAN_80211 is not set
-+# CONFIG_WAN is not set
-+# CONFIG_FDDI is not set
-+# CONFIG_HIPPI is not set
-+# CONFIG_PPP is not set
-+# CONFIG_SLIP is not set
-+# CONFIG_NET_FC is not set
-+# CONFIG_SHAPER is not set
-+# CONFIG_NETCONSOLE is not set
-+# CONFIG_NETPOLL is not set
-+# CONFIG_NET_POLL_CONTROLLER is not set
-+# CONFIG_ISDN is not set
-+# CONFIG_PHONE is not set
++LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o)
 +
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+# CONFIG_INPUT_FF_MEMLESS is not set
-+# CONFIG_INPUT_POLLDEV is not set
++$(LIBFDT_objdir)/$(LIBFDT_LIB): $(addprefix $(LIBFDT_objdir)/,$(LIBFDT_OBJS))
 +
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+# CONFIG_INPUT_JOYDEV is not set
-+# CONFIG_INPUT_EVDEV is not set
-+# CONFIG_INPUT_EVBUG is not set
+diff --git a/arch/powerpc/boot/libfdt/fdt.c b/arch/powerpc/boot/libfdt/fdt.c
+new file mode 100644
+index 0000000..586a361
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/fdt.c
+@@ -0,0 +1,156 @@
++/*
++ * libfdt - Flat Device Tree manipulation
++ * Copyright (C) 2006 David Gibson, IBM Corporation.
++ *
++ * libfdt is dual licensed: you can use it either under the terms of
++ * the GPL, or the BSD license, at your option.
++ *
++ *  a) This library is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License as
++ *     published by the Free Software Foundation; either version 2 of the
++ *     License, or (at your option) any later version.
++ *
++ *     This library is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ *     You should have received a copy of the GNU General Public
++ *     License along with this library; if not, write to the Free
++ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
++ *     MA 02110-1301 USA
++ *
++ * Alternatively,
++ *
++ *  b) Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *     1. Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *     2. Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
++ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
++ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
++ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
++ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "libfdt_env.h"
 +
-+#
-+# Input Device Drivers
-+#
-+# CONFIG_INPUT_KEYBOARD is not set
-+# CONFIG_INPUT_MOUSE is not set
-+# CONFIG_INPUT_JOYSTICK is not set
-+# CONFIG_INPUT_TABLET is not set
-+# CONFIG_INPUT_TOUCHSCREEN is not set
-+# CONFIG_INPUT_MISC is not set
++#include <fdt.h>
++#include <libfdt.h>
 +
-+#
-+# Hardware I/O ports
-+#
-+# CONFIG_SERIO is not set
-+# CONFIG_GAMEPORT is not set
++#include "libfdt_internal.h"
 +
-+#
-+# Character devices
-+#
-+CONFIG_VT=y
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+# CONFIG_VT_HW_CONSOLE_BINDING is not set
-+# CONFIG_SERIAL_NONSTANDARD is not set
++int fdt_check_header(const void *fdt)
++{
++	if (fdt_magic(fdt) == FDT_MAGIC) {
++		/* Complete tree */
++		if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
++			return -FDT_ERR_BADVERSION;
++		if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)
++			return -FDT_ERR_BADVERSION;
++	} else if (fdt_magic(fdt) == SW_MAGIC) {
++		/* Unfinished sequential-write blob */
++		if (fdt_size_dt_struct(fdt) == 0)
++			return -FDT_ERR_BADSTATE;
++	} else {
++		return -FDT_ERR_BADMAGIC;
++	}
 +
-+#
-+# Serial drivers
-+#
-+# CONFIG_SERIAL_8250 is not set
++	return 0;
++}
 +
-+#
-+# Non-8250 serial port support
-+#
-+# CONFIG_SERIAL_SH_SCI is not set
-+# CONFIG_SERIAL_JSM is not set
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+# CONFIG_IPMI_HANDLER is not set
-+CONFIG_HW_RANDOM=y
-+# CONFIG_R3964 is not set
-+# CONFIG_APPLICOM is not set
-+# CONFIG_RAW_DRIVER is not set
-+# CONFIG_TCG_TPM is not set
-+CONFIG_DEVPORT=y
-+CONFIG_I2C=m
-+CONFIG_I2C_BOARDINFO=y
-+# CONFIG_I2C_CHARDEV is not set
++const void *fdt_offset_ptr(const void *fdt, int offset, int len)
++{
++	const void *p;
 +
-+#
-+# I2C Algorithms
-+#
-+# CONFIG_I2C_ALGOBIT is not set
-+# CONFIG_I2C_ALGOPCF is not set
-+# CONFIG_I2C_ALGOPCA is not set
++	if (fdt_version(fdt) >= 0x11)
++		if (((offset + len) < offset)
++		    || ((offset + len) > fdt_size_dt_struct(fdt)))
++			return NULL;
 +
-+#
-+# I2C Hardware Bus support
-+#
-+# CONFIG_I2C_ALI1535 is not set
-+# CONFIG_I2C_ALI1563 is not set
-+# CONFIG_I2C_ALI15X3 is not set
-+# CONFIG_I2C_AMD756 is not set
-+# CONFIG_I2C_AMD8111 is not set
-+# CONFIG_I2C_I801 is not set
-+# CONFIG_I2C_I810 is not set
-+# CONFIG_I2C_PIIX4 is not set
-+# CONFIG_I2C_NFORCE2 is not set
-+# CONFIG_I2C_OCORES is not set
-+# CONFIG_I2C_PARPORT_LIGHT is not set
-+# CONFIG_I2C_PROSAVAGE is not set
-+# CONFIG_I2C_SAVAGE4 is not set
-+# CONFIG_I2C_SIMTEC is not set
-+# CONFIG_I2C_SIS5595 is not set
-+# CONFIG_I2C_SIS630 is not set
-+# CONFIG_I2C_SIS96X is not set
-+# CONFIG_I2C_TAOS_EVM is not set
-+# CONFIG_I2C_STUB is not set
-+# CONFIG_I2C_VIA is not set
-+# CONFIG_I2C_VIAPRO is not set
-+# CONFIG_I2C_VOODOO3 is not set
++	p = _fdt_offset_ptr(fdt, offset);
 +
-+#
-+# Miscellaneous I2C Chip support
-+#
-+# CONFIG_SENSORS_DS1337 is not set
-+# CONFIG_SENSORS_DS1374 is not set
-+# CONFIG_DS1682 is not set
-+# CONFIG_SENSORS_EEPROM is not set
-+# CONFIG_SENSORS_PCF8574 is not set
-+# CONFIG_SENSORS_PCA9539 is not set
-+# CONFIG_SENSORS_PCF8591 is not set
-+# CONFIG_SENSORS_MAX6875 is not set
-+# CONFIG_SENSORS_TSL2550 is not set
-+# CONFIG_I2C_DEBUG_CORE is not set
-+# CONFIG_I2C_DEBUG_ALGO is not set
-+# CONFIG_I2C_DEBUG_BUS is not set
-+# CONFIG_I2C_DEBUG_CHIP is not set
++	if (p + len < p)
++		return NULL;
++	return p;
++}
 +
-+#
-+# SPI support
-+#
-+# CONFIG_SPI is not set
-+# CONFIG_SPI_MASTER is not set
-+# CONFIG_W1 is not set
-+# CONFIG_POWER_SUPPLY is not set
-+CONFIG_HWMON=y
-+# CONFIG_HWMON_VID is not set
-+# CONFIG_SENSORS_AD7418 is not set
-+# CONFIG_SENSORS_ADM1021 is not set
-+# CONFIG_SENSORS_ADM1025 is not set
-+# CONFIG_SENSORS_ADM1026 is not set
-+# CONFIG_SENSORS_ADM1029 is not set
-+# CONFIG_SENSORS_ADM1031 is not set
-+# CONFIG_SENSORS_ADM9240 is not set
-+# CONFIG_SENSORS_ADT7470 is not set
-+# CONFIG_SENSORS_ATXP1 is not set
-+# CONFIG_SENSORS_DS1621 is not set
-+# CONFIG_SENSORS_I5K_AMB is not set
-+# CONFIG_SENSORS_F71805F is not set
-+# CONFIG_SENSORS_F71882FG is not set
-+# CONFIG_SENSORS_F75375S is not set
-+# CONFIG_SENSORS_GL518SM is not set
-+# CONFIG_SENSORS_GL520SM is not set
-+# CONFIG_SENSORS_IT87 is not set
-+# CONFIG_SENSORS_LM63 is not set
-+# CONFIG_SENSORS_LM75 is not set
-+# CONFIG_SENSORS_LM77 is not set
-+# CONFIG_SENSORS_LM78 is not set
-+# CONFIG_SENSORS_LM80 is not set
-+# CONFIG_SENSORS_LM83 is not set
-+# CONFIG_SENSORS_LM85 is not set
-+# CONFIG_SENSORS_LM87 is not set
-+# CONFIG_SENSORS_LM90 is not set
-+# CONFIG_SENSORS_LM92 is not set
-+# CONFIG_SENSORS_LM93 is not set
-+# CONFIG_SENSORS_MAX1619 is not set
-+# CONFIG_SENSORS_MAX6650 is not set
-+# CONFIG_SENSORS_PC87360 is not set
-+# CONFIG_SENSORS_PC87427 is not set
-+# CONFIG_SENSORS_SIS5595 is not set
-+# CONFIG_SENSORS_DME1737 is not set
-+# CONFIG_SENSORS_SMSC47M1 is not set
-+# CONFIG_SENSORS_SMSC47M192 is not set
-+# CONFIG_SENSORS_SMSC47B397 is not set
-+# CONFIG_SENSORS_THMC50 is not set
-+# CONFIG_SENSORS_VIA686A is not set
-+# CONFIG_SENSORS_VT1211 is not set
-+# CONFIG_SENSORS_VT8231 is not set
-+# CONFIG_SENSORS_W83781D is not set
-+# CONFIG_SENSORS_W83791D is not set
-+# CONFIG_SENSORS_W83792D is not set
-+# CONFIG_SENSORS_W83793 is not set
-+# CONFIG_SENSORS_W83L785TS is not set
-+# CONFIG_SENSORS_W83627HF is not set
-+# CONFIG_SENSORS_W83627EHF is not set
-+# CONFIG_HWMON_DEBUG_CHIP is not set
-+CONFIG_WATCHDOG=y
-+# CONFIG_WATCHDOG_NOWAYOUT is not set
++uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset)
++{
++	const uint32_t *tagp, *lenp;
++	uint32_t tag;
++	const char *p;
 +
-+#
-+# Watchdog Device Drivers
-+#
-+# CONFIG_SOFT_WATCHDOG is not set
++	if (offset % FDT_TAGSIZE)
++		return -1;
 +
-+#
-+# PCI-based Watchdog Cards
-+#
-+# CONFIG_PCIPCWATCHDOG is not set
-+# CONFIG_WDTPCI is not set
++	tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE);
++	if (! tagp)
++		return FDT_END; /* premature end */
++	tag = fdt32_to_cpu(*tagp);
++	offset += FDT_TAGSIZE;
 +
-+#
-+# Sonics Silicon Backplane
-+#
-+CONFIG_SSB_POSSIBLE=y
-+# CONFIG_SSB is not set
++	switch (tag) {
++	case FDT_BEGIN_NODE:
++		/* skip name */
++		do {
++			p = fdt_offset_ptr(fdt, offset++, 1);
++		} while (p && (*p != '\0'));
++		if (! p)
++			return FDT_END;
++		break;
++	case FDT_PROP:
++		lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp));
++		if (! lenp)
++			return FDT_END;
++		/* skip name offset, length and value */
++		offset += 2*FDT_TAGSIZE + fdt32_to_cpu(*lenp);
++		break;
++	}
 +
-+#
-+# Multifunction device drivers
-+#
-+# CONFIG_MFD_SM501 is not set
++	if (nextoffset)
++		*nextoffset = ALIGN(offset, FDT_TAGSIZE);
 +
-+#
-+# Multimedia devices
-+#
-+CONFIG_VIDEO_DEV=m
-+# CONFIG_VIDEO_V4L1 is not set
-+# CONFIG_VIDEO_V4L1_COMPAT is not set
-+CONFIG_VIDEO_V4L2=y
-+CONFIG_VIDEO_CAPTURE_DRIVERS=y
-+# CONFIG_VIDEO_ADV_DEBUG is not set
-+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
-+# CONFIG_VIDEO_VIVI is not set
-+# CONFIG_VIDEO_SAA5246A is not set
-+# CONFIG_VIDEO_SAA5249 is not set
-+# CONFIG_VIDEO_SAA7134 is not set
-+# CONFIG_VIDEO_HEXIUM_ORION is not set
-+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
-+# CONFIG_VIDEO_CX88 is not set
-+# CONFIG_VIDEO_CX23885 is not set
-+# CONFIG_VIDEO_CAFE_CCIC is not set
-+# CONFIG_RADIO_ADAPTERS is not set
-+CONFIG_DVB_CORE=y
-+# CONFIG_DVB_CORE_ATTACH is not set
-+CONFIG_DVB_CAPTURE_DRIVERS=y
++	return tag;
++}
 +
-+#
-+# Supported SAA7146 based PCI Adapters
-+#
++const char *_fdt_find_string(const char *strtab, int tabsize, const char *s)
++{
++	int len = strlen(s) + 1;
++	const char *last = strtab + tabsize - len;
++	const char *p;
 +
-+#
-+# Supported FlexCopII (B2C2) Adapters
-+#
-+# CONFIG_DVB_B2C2_FLEXCOP is not set
++	for (p = strtab; p <= last; p++)
++		if (memeq(p, s, len))
++			return p;
++	return NULL;
++}
 +
-+#
-+# Supported BT878 Adapters
-+#
++int fdt_move(const void *fdt, void *buf, int bufsize)
++{
++	int err = fdt_check_header(fdt);
 +
-+#
-+# Supported Pluto2 Adapters
-+#
-+# CONFIG_DVB_PLUTO2 is not set
++	if (err)
++		return err;
 +
-+#
-+# Supported DVB Frontends
-+#
++	if (fdt_totalsize(fdt) > bufsize)
++		return -FDT_ERR_NOSPACE;
 +
-+#
-+# Customise DVB Frontends
-+#
-+# CONFIG_DVB_FE_CUSTOMISE is not set
++	memmove(buf, fdt, fdt_totalsize(fdt));
++	return 0;
++}
+diff --git a/arch/powerpc/boot/libfdt/fdt.h b/arch/powerpc/boot/libfdt/fdt.h
+new file mode 100644
+index 0000000..48ccfd9
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/fdt.h
+@@ -0,0 +1,60 @@
++#ifndef _FDT_H
++#define _FDT_H
 +
-+#
-+# DVB-S (satellite) frontends
-+#
-+# CONFIG_DVB_STV0299 is not set
-+# CONFIG_DVB_CX24110 is not set
-+# CONFIG_DVB_CX24123 is not set
-+# CONFIG_DVB_TDA8083 is not set
-+# CONFIG_DVB_MT312 is not set
-+# CONFIG_DVB_VES1X93 is not set
-+# CONFIG_DVB_S5H1420 is not set
-+# CONFIG_DVB_TDA10086 is not set
++#ifndef __ASSEMBLY__
++
++struct fdt_header {
++	uint32_t magic;			 /* magic word FDT_MAGIC */
++	uint32_t totalsize;		 /* total size of DT block */
++	uint32_t off_dt_struct;		 /* offset to structure */
++	uint32_t off_dt_strings;	 /* offset to strings */
++	uint32_t off_mem_rsvmap;	 /* offset to memory reserve map */
++	uint32_t version;		 /* format version */
++	uint32_t last_comp_version;	 /* last compatible version */
++
++	/* version 2 fields below */
++	uint32_t boot_cpuid_phys;	 /* Which physical CPU id we're
++					    booting on */
++	/* version 3 fields below */
++	uint32_t size_dt_strings;	 /* size of the strings block */
++
++	/* version 17 fields below */
++	uint32_t size_dt_struct;	 /* size of the structure block */
++};
++
++struct fdt_reserve_entry {
++	uint64_t address;
++	uint64_t size;
++};
++
++struct fdt_node_header {
++	uint32_t tag;
++	char name[0];
++};
++
++struct fdt_property {
++	uint32_t tag;
++	uint32_t len;
++	uint32_t nameoff;
++	char data[0];
++};
++
++#endif /* !__ASSEMBLY */
++
++#define FDT_MAGIC	0xd00dfeed	/* 4: version, 4: total size */
++#define FDT_TAGSIZE	sizeof(uint32_t)
++
++#define FDT_BEGIN_NODE	0x1		/* Start node: full name */
++#define FDT_END_NODE	0x2		/* End node */
++#define FDT_PROP	0x3		/* Property: name off,
++					   size, content */
++#define FDT_NOP		0x4		/* nop */
++#define FDT_END		0x9
++
++#define FDT_V1_SIZE	(7*sizeof(uint32_t))
++#define FDT_V2_SIZE	(FDT_V1_SIZE + sizeof(uint32_t))
++#define FDT_V3_SIZE	(FDT_V2_SIZE + sizeof(uint32_t))
++#define FDT_V16_SIZE	FDT_V3_SIZE
++#define FDT_V17_SIZE	(FDT_V16_SIZE + sizeof(uint32_t))
++
++#endif /* _FDT_H */
+diff --git a/arch/powerpc/boot/libfdt/fdt_ro.c b/arch/powerpc/boot/libfdt/fdt_ro.c
+new file mode 100644
+index 0000000..12a37d5
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/fdt_ro.c
+@@ -0,0 +1,583 @@
++/*
++ * libfdt - Flat Device Tree manipulation
++ * Copyright (C) 2006 David Gibson, IBM Corporation.
++ *
++ * libfdt is dual licensed: you can use it either under the terms of
++ * the GPL, or the BSD license, at your option.
++ *
++ *  a) This library is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License as
++ *     published by the Free Software Foundation; either version 2 of the
++ *     License, or (at your option) any later version.
++ *
++ *     This library is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ *     You should have received a copy of the GNU General Public
++ *     License along with this library; if not, write to the Free
++ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
++ *     MA 02110-1301 USA
++ *
++ * Alternatively,
++ *
++ *  b) Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *     1. Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *     2. Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
++ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
++ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
++ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
++ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "libfdt_env.h"
++
++#include <fdt.h>
++#include <libfdt.h>
++
++#include "libfdt_internal.h"
++
++#define CHECK_HEADER(fdt) \
++	{ \
++		int err; \
++		if ((err = fdt_check_header(fdt)) != 0) \
++			return err; \
++	}
++
++static int nodename_eq(const void *fdt, int offset,
++		       const char *s, int len)
++{
++	const char *p = fdt_offset_ptr(fdt, offset, len+1);
++
++	if (! p)
++		/* short match */
++		return 0;
++
++	if (memcmp(p, s, len) != 0)
++		return 0;
++
++	if (p[len] == '\0')
++		return 1;
++	else if (!memchr(s, '@', len) && (p[len] == '@'))
++		return 1;
++	else
++		return 0;
++}
++
++const char *fdt_string(const void *fdt, int stroffset)
++{
++	return (char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
++}
++
++int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
++{
++	CHECK_HEADER(fdt);
++	*address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address);
++	*size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size);
++	return 0;
++}
++
++int fdt_num_mem_rsv(const void *fdt)
++{
++	int i = 0;
++
++	while (fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0)
++		i++;
++	return i;
++}
++
++int fdt_subnode_offset_namelen(const void *fdt, int parentoffset,
++			       const char *name, int namelen)
++{
++	int level = 0;
++	uint32_t tag;
++	int offset, nextoffset;
++
++	CHECK_HEADER(fdt);
++
++	tag = fdt_next_tag(fdt, parentoffset, &nextoffset);
++	if (tag != FDT_BEGIN_NODE)
++		return -FDT_ERR_BADOFFSET;
++
++	do {
++		offset = nextoffset;
++		tag = fdt_next_tag(fdt, offset, &nextoffset);
++
++		switch (tag) {
++		case FDT_END:
++			return -FDT_ERR_TRUNCATED;
++
++		case FDT_BEGIN_NODE:
++			level++;
++			if (level != 1)
++				continue;
++			if (nodename_eq(fdt, offset+FDT_TAGSIZE, name, namelen))
++				/* Found it! */
++				return offset;
++			break;
++
++		case FDT_END_NODE:
++			level--;
++			break;
++
++		case FDT_PROP:
++		case FDT_NOP:
++			break;
++
++		default:
++			return -FDT_ERR_BADSTRUCTURE;
++		}
++	} while (level >= 0);
++
++	return -FDT_ERR_NOTFOUND;
++}
++
++int fdt_subnode_offset(const void *fdt, int parentoffset,
++		       const char *name)
++{
++	return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
++}
++
++int fdt_path_offset(const void *fdt, const char *path)
++{
++	const char *end = path + strlen(path);
++	const char *p = path;
++	int offset = 0;
++
++	CHECK_HEADER(fdt);
++
++	if (*path != '/')
++		return -FDT_ERR_BADPATH;
++
++	while (*p) {
++		const char *q;
++
++		while (*p == '/')
++			p++;
++		if (! *p)
++			return offset;
++		q = strchr(p, '/');
++		if (! q)
++			q = end;
++
++		offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
++		if (offset < 0)
++			return offset;
++
++		p = q;
++	}
++
++	return offset;
++}
++
++const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
++{
++	const struct fdt_node_header *nh;
++	int err;
++
++	if ((err = fdt_check_header(fdt)) != 0)
++		goto fail;
++
++	err = -FDT_ERR_BADOFFSET;
++	nh = fdt_offset_ptr(fdt, nodeoffset, sizeof(*nh));
++	if (!nh || (fdt32_to_cpu(nh->tag) != FDT_BEGIN_NODE))
++		goto fail;
++
++	if (len)
++		*len = strlen(nh->name);
++
++	return nh->name;
++
++ fail:
++	if (len)
++		*len = err;
++	return NULL;
++}
++
++const struct fdt_property *fdt_get_property(const void *fdt,
++					    int nodeoffset,
++					    const char *name, int *lenp)
++{
++	uint32_t tag;
++	const struct fdt_property *prop;
++	int namestroff;
++	int offset, nextoffset;
++	int err;
++
++	if ((err = fdt_check_header(fdt)) != 0)
++		goto fail;
++
++	err = -FDT_ERR_BADOFFSET;
++	if (nodeoffset % FDT_TAGSIZE)
++		goto fail;
++
++	tag = fdt_next_tag(fdt, nodeoffset, &nextoffset);
++	if (tag != FDT_BEGIN_NODE)
++		goto fail;
++
++	do {
++		offset = nextoffset;
++
++		tag = fdt_next_tag(fdt, offset, &nextoffset);
++		switch (tag) {
++		case FDT_END:
++			err = -FDT_ERR_TRUNCATED;
++			goto fail;
++
++		case FDT_BEGIN_NODE:
++		case FDT_END_NODE:
++		case FDT_NOP:
++			break;
++
++		case FDT_PROP:
++			err = -FDT_ERR_BADSTRUCTURE;
++			prop = fdt_offset_ptr(fdt, offset, sizeof(*prop));
++			if (! prop)
++				goto fail;
++			namestroff = fdt32_to_cpu(prop->nameoff);
++			if (streq(fdt_string(fdt, namestroff), name)) {
++				/* Found it! */
++				int len = fdt32_to_cpu(prop->len);
++				prop = fdt_offset_ptr(fdt, offset,
++						      sizeof(*prop)+len);
++				if (! prop)
++					goto fail;
++
++				if (lenp)
++					*lenp = len;
++
++				return prop;
++			}
++			break;
++
++		default:
++			err = -FDT_ERR_BADSTRUCTURE;
++			goto fail;
++		}
++	} while ((tag != FDT_BEGIN_NODE) && (tag != FDT_END_NODE));
++
++	err = -FDT_ERR_NOTFOUND;
++ fail:
++	if (lenp)
++		*lenp = err;
++	return NULL;
++}
++
++const void *fdt_getprop(const void *fdt, int nodeoffset,
++		  const char *name, int *lenp)
++{
++	const struct fdt_property *prop;
++
++	prop = fdt_get_property(fdt, nodeoffset, name, lenp);
++	if (! prop)
++		return NULL;
++
++	return prop->data;
++}
++
++uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
++{
++	const uint32_t *php;
++	int len;
++
++	php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
++	if (!php || (len != sizeof(*php)))
++		return 0;
++
++	return fdt32_to_cpu(*php);
++}
++
++int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
++{
++	uint32_t tag;
++	int p = 0, overflow = 0;
++	int offset, nextoffset, namelen;
++	const char *name;
++
++	CHECK_HEADER(fdt);
++
++	tag = fdt_next_tag(fdt, 0, &nextoffset);
++	if (tag != FDT_BEGIN_NODE)
++		return -FDT_ERR_BADSTRUCTURE;
++
++	if (buflen < 2)
++		return -FDT_ERR_NOSPACE;
++	buf[0] = '/';
++	p = 1;
++
++	while (nextoffset <= nodeoffset) {
++		offset = nextoffset;
++		tag = fdt_next_tag(fdt, offset, &nextoffset);
++		switch (tag) {
++		case FDT_END:
++			return -FDT_ERR_BADOFFSET;
++
++		case FDT_BEGIN_NODE:
++			name = fdt_get_name(fdt, offset, &namelen);
++			if (!name)
++				return namelen;
++			if (overflow || ((p + namelen + 1) > buflen)) {
++				overflow++;
++				break;
++			}
++			memcpy(buf + p, name, namelen);
++			p += namelen;
++			buf[p++] = '/';
++			break;
++
++		case FDT_END_NODE:
++			if (overflow) {
++				overflow--;
++				break;
++			}
++			do {
++				p--;
++			} while  (buf[p-1] != '/');
++			break;
++
++		case FDT_PROP:
++		case FDT_NOP:
++			break;
++
++		default:
++			return -FDT_ERR_BADSTRUCTURE;
++		}
++	}
++
++	if (overflow)
++		return -FDT_ERR_NOSPACE;
++
++	if (p > 1) /* special case so that root path is "/", not "" */
++		p--;
++	buf[p] = '\0';
++	return p;
++}
++
++int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
++				 int supernodedepth, int *nodedepth)
++{
++	int level = -1;
++	uint32_t tag;
++	int offset, nextoffset = 0;
++	int supernodeoffset = -FDT_ERR_INTERNAL;
++
++	CHECK_HEADER(fdt);
++
++	if (supernodedepth < 0)
++		return -FDT_ERR_NOTFOUND;
++
++	do {
++		offset = nextoffset;
++		tag = fdt_next_tag(fdt, offset, &nextoffset);
++		switch (tag) {
++		case FDT_END:
++			return -FDT_ERR_BADOFFSET;
++
++		case FDT_BEGIN_NODE:
++			level++;
++			if (level == supernodedepth)
++				supernodeoffset = offset;
++			break;
++
++		case FDT_END_NODE:
++			level--;
++			break;
++
++		case FDT_PROP:
++		case FDT_NOP:
++			break;
++
++		default:
++			return -FDT_ERR_BADSTRUCTURE;
++		}
++	} while (offset < nodeoffset);
++
++	if (nodedepth)
++		*nodedepth = level;
++
++	if (supernodedepth > level)
++		return -FDT_ERR_NOTFOUND;
++	return supernodeoffset;
++}
++
++int fdt_node_depth(const void *fdt, int nodeoffset)
++{
++	int nodedepth;
++	int err;
++
++	err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
++	if (err)
++		return (err < 0) ? err : -FDT_ERR_INTERNAL;
++	return nodedepth;
++}
++
++int fdt_parent_offset(const void *fdt, int nodeoffset)
++{
++	int nodedepth = fdt_node_depth(fdt, nodeoffset);
++
++	if (nodedepth < 0)
++		return nodedepth;
++	return fdt_supernode_atdepth_offset(fdt, nodeoffset,
++					    nodedepth - 1, NULL);
++}
++
++int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
++				  const char *propname,
++				  const void *propval, int proplen)
++{
++	uint32_t tag;
++	int offset, nextoffset;
++	const void *val;
++	int len;
++
++	CHECK_HEADER(fdt);
++
++	if (startoffset >= 0) {
++		tag = fdt_next_tag(fdt, startoffset, &nextoffset);
++		if (tag != FDT_BEGIN_NODE)
++			return -FDT_ERR_BADOFFSET;
++	} else {
++		nextoffset = 0;
++	}
++
++	/* FIXME: The algorithm here is pretty horrible: we scan each
++	 * property of a node in fdt_getprop(), then if that didn't
++	 * find what we want, we scan over them again making our way
++	 * to the next node.  Still it's the easiest to implement
++	 * approach; performance can come later. */
++	do {
++		offset = nextoffset;
++		tag = fdt_next_tag(fdt, offset, &nextoffset);
++
++		switch (tag) {
++		case FDT_BEGIN_NODE:
++			val = fdt_getprop(fdt, offset, propname, &len);
++			if (val
++			    && (len == proplen)
++			    && (memcmp(val, propval, len) == 0))
++				return offset;
++			break;
++
++		case FDT_PROP:
++		case FDT_END:
++		case FDT_END_NODE:
++		case FDT_NOP:
++			break;
++
++		default:
++			return -FDT_ERR_BADSTRUCTURE;
++		}
++	} while (tag != FDT_END);
++
++	return -FDT_ERR_NOTFOUND;
++}
++
++int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
++{
++	if ((phandle == 0) || (phandle == -1))
++		return -FDT_ERR_BADPHANDLE;
++	phandle = cpu_to_fdt32(phandle);
++	return fdt_node_offset_by_prop_value(fdt, -1, "linux,phandle",
++					     &phandle, sizeof(phandle));
++}
++
++int _stringlist_contains(const void *strlist, int listlen, const char *str)
++{
++	int len = strlen(str);
++	const void *p;
++
++	while (listlen >= len) {
++		if (memcmp(str, strlist, len+1) == 0)
++			return 1;
++		p = memchr(strlist, '\0', listlen);
++		if (!p)
++			return 0; /* malformed strlist.. */
++		listlen -= (p-strlist) + 1;
++		strlist = p + 1;
++	}
++	return 0;
++}
++
++int fdt_node_check_compatible(const void *fdt, int nodeoffset,
++			      const char *compatible)
++{
++	const void *prop;
++	int len;
++
++	prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
++	if (!prop)
++		return len;
++	if (_stringlist_contains(prop, len, compatible))
++		return 0;
++	else
++		return 1;
++}
++
++int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
++				  const char *compatible)
++{
++	uint32_t tag;
++	int offset, nextoffset;
++	int err;
++
++	CHECK_HEADER(fdt);
++
++	if (startoffset >= 0) {
++		tag = fdt_next_tag(fdt, startoffset, &nextoffset);
++		if (tag != FDT_BEGIN_NODE)
++			return -FDT_ERR_BADOFFSET;
++	} else {
++		nextoffset = 0;
++	}
++
++	/* FIXME: The algorithm here is pretty horrible: we scan each
++	 * property of a node in fdt_node_check_compatible(), then if
++	 * that didn't find what we want, we scan over them again
++	 * making our way to the next node.  Still it's the easiest to
++	 * implement approach; performance can come later. */
++	do {
++		offset = nextoffset;
++		tag = fdt_next_tag(fdt, offset, &nextoffset);
++
++		switch (tag) {
++		case FDT_BEGIN_NODE:
++			err = fdt_node_check_compatible(fdt, offset,
++							compatible);
++			if ((err < 0)
++			    && (err != -FDT_ERR_NOTFOUND))
++				return err;
++			else if (err == 0)
++				return offset;
++			break;
++
++		case FDT_PROP:
++		case FDT_END:
++		case FDT_END_NODE:
++		case FDT_NOP:
++			break;
++
++		default:
++			return -FDT_ERR_BADSTRUCTURE;
++		}
++	} while (tag != FDT_END);
++
++	return -FDT_ERR_NOTFOUND;
++}
+diff --git a/arch/powerpc/boot/libfdt/fdt_rw.c b/arch/powerpc/boot/libfdt/fdt_rw.c
+new file mode 100644
+index 0000000..6673f8e
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/fdt_rw.c
+@@ -0,0 +1,447 @@
++/*
++ * libfdt - Flat Device Tree manipulation
++ * Copyright (C) 2006 David Gibson, IBM Corporation.
++ *
++ * libfdt is dual licensed: you can use it either under the terms of
++ * the GPL, or the BSD license, at your option.
++ *
++ *  a) This library is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License as
++ *     published by the Free Software Foundation; either version 2 of the
++ *     License, or (at your option) any later version.
++ *
++ *     This library is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ *     You should have received a copy of the GNU General Public
++ *     License along with this library; if not, write to the Free
++ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
++ *     MA 02110-1301 USA
++ *
++ * Alternatively,
++ *
++ *  b) Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *     1. Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *     2. Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
++ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
++ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
++ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
++ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "libfdt_env.h"
++
++#include <fdt.h>
++#include <libfdt.h>
++
++#include "libfdt_internal.h"
++
++static int _blocks_misordered(const void *fdt,
++			      int mem_rsv_size, int struct_size)
++{
++	return (fdt_off_mem_rsvmap(fdt) < ALIGN(sizeof(struct fdt_header), 8))
++		|| (fdt_off_dt_struct(fdt) <
++		    (fdt_off_mem_rsvmap(fdt) + mem_rsv_size))
++		|| (fdt_off_dt_strings(fdt) <
++		    (fdt_off_dt_struct(fdt) + struct_size))
++		|| (fdt_totalsize(fdt) <
++		    (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt)));
++}
++
++static int rw_check_header(void *fdt)
++{
++	int err;
++
++	if ((err = fdt_check_header(fdt)))
++		return err;
++	if (fdt_version(fdt) < 17)
++		return -FDT_ERR_BADVERSION;
++	if (_blocks_misordered(fdt, sizeof(struct fdt_reserve_entry),
++			       fdt_size_dt_struct(fdt)))
++		return -FDT_ERR_BADLAYOUT;
++	if (fdt_version(fdt) > 17)
++		fdt_set_version(fdt, 17);
++
++	return 0;
++}
++
++#define RW_CHECK_HEADER(fdt) \
++	{ \
++		int err; \
++		if ((err = rw_check_header(fdt)) != 0) \
++			return err; \
++	}
++
++static inline int _blob_data_size(void *fdt)
++{
++	return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
++}
++
++static int _blob_splice(void *fdt, void *p, int oldlen, int newlen)
++{
++	void *end = fdt + _blob_data_size(fdt);
++
++	if (((p + oldlen) < p) || ((p + oldlen) > end))
++		return -FDT_ERR_BADOFFSET;
++	if ((end - oldlen + newlen) > (fdt + fdt_totalsize(fdt)))
++		return -FDT_ERR_NOSPACE;
++	memmove(p + newlen, p + oldlen, end - p - oldlen);
++	return 0;
++}
++
++static int _blob_splice_mem_rsv(void *fdt, struct fdt_reserve_entry *p,
++				int oldn, int newn)
++{
++	int delta = (newn - oldn) * sizeof(*p);
++	int err;
++	err = _blob_splice(fdt, p, oldn * sizeof(*p), newn * sizeof(*p));
++	if (err)
++		return err;
++	fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta);
++	fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
++	return 0;
++}
++
++static int _blob_splice_struct(void *fdt, void *p,
++			       int oldlen, int newlen)
++{
++	int delta = newlen - oldlen;
++	int err;
++
++	if ((err = _blob_splice(fdt, p, oldlen, newlen)))
++		return err;
++
++	fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta);
++	fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
++	return 0;
++}
++
++static int _blob_splice_string(void *fdt, int newlen)
++{
++	void *p = fdt + fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
++	int err;
++
++	if ((err = _blob_splice(fdt, p, 0, newlen)))
++		return err;
++
++	fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen);
++	return 0;
++}
++
++static int _find_add_string(void *fdt, const char *s)
++{
++	char *strtab = (char *)fdt + fdt_off_dt_strings(fdt);
++	const char *p;
++	char *new;
++	int len = strlen(s) + 1;
++	int err;
++
++	p = _fdt_find_string(strtab, fdt_size_dt_strings(fdt), s);
++	if (p)
++		/* found it */
++		return (p - strtab);
++
++	new = strtab + fdt_size_dt_strings(fdt);
++	err = _blob_splice_string(fdt, len);
++	if (err)
++		return err;
++
++	memcpy(new, s, len);
++	return (new - strtab);
++}
++
++int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size)
++{
++	struct fdt_reserve_entry *re;
++	int err;
++
++	if ((err = rw_check_header(fdt)))
++		return err;
++
++	re = _fdt_mem_rsv_w(fdt, fdt_num_mem_rsv(fdt));
++	err = _blob_splice_mem_rsv(fdt, re, 0, 1);
++	if (err)
++		return err;
++
++	re->address = cpu_to_fdt64(address);
++	re->size = cpu_to_fdt64(size);
++	return 0;
++}
++
++int fdt_del_mem_rsv(void *fdt, int n)
++{
++	struct fdt_reserve_entry *re = _fdt_mem_rsv_w(fdt, n);
++	int err;
++
++	if ((err = rw_check_header(fdt)))
++		return err;
++	if (n >= fdt_num_mem_rsv(fdt))
++		return -FDT_ERR_NOTFOUND;
++
++	err = _blob_splice_mem_rsv(fdt, re, 1, 0);
++	if (err)
++		return err;
++	return 0;
++}
++
++static int _resize_property(void *fdt, int nodeoffset, const char *name, int len,
++			    struct fdt_property **prop)
++{
++	int oldlen;
++	int err;
++
++	*prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
++	if (! (*prop))
++		return oldlen;
++
++	if ((err = _blob_splice_struct(fdt, (*prop)->data,
++				       ALIGN(oldlen, FDT_TAGSIZE),
++				       ALIGN(len, FDT_TAGSIZE))))
++		return err;
++
++	(*prop)->len = cpu_to_fdt32(len);
++	return 0;
++}
++
++static int _add_property(void *fdt, int nodeoffset, const char *name, int len,
++			 struct fdt_property **prop)
++{
++	uint32_t tag;
++	int proplen;
++	int nextoffset;
++	int namestroff;
++	int err;
++
++	tag = fdt_next_tag(fdt, nodeoffset, &nextoffset);
++	if (tag != FDT_BEGIN_NODE)
++		return -FDT_ERR_BADOFFSET;
++
++	namestroff = _find_add_string(fdt, name);
++	if (namestroff < 0)
++		return namestroff;
++
++	*prop = _fdt_offset_ptr_w(fdt, nextoffset);
++	proplen = sizeof(**prop) + ALIGN(len, FDT_TAGSIZE);
++
++	err = _blob_splice_struct(fdt, *prop, 0, proplen);
++	if (err)
++		return err;
++
++	(*prop)->tag = cpu_to_fdt32(FDT_PROP);
++	(*prop)->nameoff = cpu_to_fdt32(namestroff);
++	(*prop)->len = cpu_to_fdt32(len);
++	return 0;
++}
++
++int fdt_setprop(void *fdt, int nodeoffset, const char *name,
++		const void *val, int len)
++{
++	struct fdt_property *prop;
++	int err;
++
++	if ((err = rw_check_header(fdt)))
++		return err;
++
++	err = _resize_property(fdt, nodeoffset, name, len, &prop);
++	if (err == -FDT_ERR_NOTFOUND)
++		err = _add_property(fdt, nodeoffset, name, len, &prop);
++	if (err)
++		return err;
++
++	memcpy(prop->data, val, len);
++	return 0;
++}
++
++int fdt_delprop(void *fdt, int nodeoffset, const char *name)
++{
++	struct fdt_property *prop;
++	int len, proplen;
++
++	RW_CHECK_HEADER(fdt);
++
++	prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
++	if (! prop)
++		return len;
++
++	proplen = sizeof(*prop) + ALIGN(len, FDT_TAGSIZE);
++	return _blob_splice_struct(fdt, prop, proplen, 0);
++}
++
++int fdt_add_subnode_namelen(void *fdt, int parentoffset,
++			    const char *name, int namelen)
++{
++	struct fdt_node_header *nh;
++	int offset, nextoffset;
++	int nodelen;
++	int err;
++	uint32_t tag;
++	uint32_t *endtag;
++
++	RW_CHECK_HEADER(fdt);
++
++	offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen);
++	if (offset >= 0)
++		return -FDT_ERR_EXISTS;
++	else if (offset != -FDT_ERR_NOTFOUND)
++		return offset;
++
++	/* Try to place the new node after the parent's properties */
++	fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */
++	do {
++		offset = nextoffset;
++		tag = fdt_next_tag(fdt, offset, &nextoffset);
++	} while (tag == FDT_PROP);
++
++	nh = _fdt_offset_ptr_w(fdt, offset);
++	nodelen = sizeof(*nh) + ALIGN(namelen+1, FDT_TAGSIZE) + FDT_TAGSIZE;
++
++	err = _blob_splice_struct(fdt, nh, 0, nodelen);
++	if (err)
++		return err;
++
++	nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
++	memset(nh->name, 0, ALIGN(namelen+1, FDT_TAGSIZE));
++	memcpy(nh->name, name, namelen);
++	endtag = (uint32_t *)((void *)nh + nodelen - FDT_TAGSIZE);
++	*endtag = cpu_to_fdt32(FDT_END_NODE);
++
++	return offset;
++}
++
++int fdt_add_subnode(void *fdt, int parentoffset, const char *name)
++{
++	return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name));
++}
++
++int fdt_del_node(void *fdt, int nodeoffset)
++{
++	int endoffset;
++
++	RW_CHECK_HEADER(fdt);
++
++	endoffset = _fdt_node_end_offset(fdt, nodeoffset);
++	if (endoffset < 0)
++		return endoffset;
++
++	return _blob_splice_struct(fdt, _fdt_offset_ptr_w(fdt, nodeoffset),
++				   endoffset - nodeoffset, 0);
++}
++
++static void _packblocks(const void *fdt, void *buf,
++		       int mem_rsv_size, int struct_size)
++{
++	int mem_rsv_off, struct_off, strings_off;
++
++	mem_rsv_off = ALIGN(sizeof(struct fdt_header), 8);
++	struct_off = mem_rsv_off + mem_rsv_size;
++	strings_off = struct_off + struct_size;
++
++	memmove(buf + mem_rsv_off, fdt + fdt_off_mem_rsvmap(fdt), mem_rsv_size);
++	fdt_set_off_mem_rsvmap(buf, mem_rsv_off);
++
++	memmove(buf + struct_off, fdt + fdt_off_dt_struct(fdt), struct_size);
++	fdt_set_off_dt_struct(buf, struct_off);
++	fdt_set_size_dt_struct(buf, struct_size);
++
++	memmove(buf + strings_off, fdt + fdt_off_dt_strings(fdt),
++		fdt_size_dt_strings(fdt));
++	fdt_set_off_dt_strings(buf, strings_off);
++	fdt_set_size_dt_strings(buf, fdt_size_dt_strings(fdt));
++}
++
++int fdt_open_into(const void *fdt, void *buf, int bufsize)
++{
++	int err;
++	int mem_rsv_size, struct_size;
++	int newsize;
++	void *tmp;
++
++	err = fdt_check_header(fdt);
++	if (err)
++		return err;
++
++	mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
++		* sizeof(struct fdt_reserve_entry);
++
++	if (fdt_version(fdt) >= 17) {
++		struct_size = fdt_size_dt_struct(fdt);
++	} else {
++		struct_size = 0;
++		while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END)
++			;
++	}
++
++	if (!_blocks_misordered(fdt, mem_rsv_size, struct_size)) {
++		/* no further work necessary */
++		err = fdt_move(fdt, buf, bufsize);
++		if (err)
++			return err;
++		fdt_set_version(buf, 17);
++		fdt_set_size_dt_struct(buf, struct_size);
++		fdt_set_totalsize(buf, bufsize);
++		return 0;
++	}
++
++	/* Need to reorder */
++	newsize = ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size
++		+ struct_size + fdt_size_dt_strings(fdt);
++
++	if (bufsize < newsize)
++		return -FDT_ERR_NOSPACE;
++
++	if (((buf + newsize) <= fdt)
++	    || (buf >= (fdt + fdt_totalsize(fdt)))) {
++		tmp = buf;
++	} else {
++		tmp = (void *)fdt + fdt_totalsize(fdt);
++		if ((tmp + newsize) > (buf + bufsize))
++			return -FDT_ERR_NOSPACE;
++	}
++
++	_packblocks(fdt, tmp, mem_rsv_size, struct_size);
++	memmove(buf, tmp, newsize);
++
++	fdt_set_magic(buf, FDT_MAGIC);
++	fdt_set_totalsize(buf, bufsize);
++	fdt_set_version(buf, 17);
++	fdt_set_last_comp_version(buf, 16);
++	fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt));
++
++	return 0;
++}
++
++int fdt_pack(void *fdt)
++{
++	int mem_rsv_size;
++	int err;
++
++	err = rw_check_header(fdt);
++	if (err)
++		return err;
++
++	mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
++		* sizeof(struct fdt_reserve_entry);
++	_packblocks(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt));
++	fdt_set_totalsize(fdt, _blob_data_size(fdt));
++
++	return 0;
++}
+diff --git a/arch/powerpc/boot/libfdt/fdt_strerror.c b/arch/powerpc/boot/libfdt/fdt_strerror.c
+new file mode 100644
+index 0000000..f9d32ef
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/fdt_strerror.c
+@@ -0,0 +1,96 @@
++/*
++ * libfdt - Flat Device Tree manipulation
++ * Copyright (C) 2006 David Gibson, IBM Corporation.
++ *
++ * libfdt is dual licensed: you can use it either under the terms of
++ * the GPL, or the BSD license, at your option.
++ *
++ *  a) This library is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License as
++ *     published by the Free Software Foundation; either version 2 of the
++ *     License, or (at your option) any later version.
++ *
++ *     This library is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ *     You should have received a copy of the GNU General Public
++ *     License along with this library; if not, write to the Free
++ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
++ *     MA 02110-1301 USA
++ *
++ * Alternatively,
++ *
++ *  b) Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *     1. Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *     2. Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
++ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
++ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
++ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
++ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "libfdt_env.h"
++
++#include <fdt.h>
++#include <libfdt.h>
++
++#include "libfdt_internal.h"
++
++struct errtabent {
++	const char *str;
++};
++
++#define ERRTABENT(val) \
++	[(val)] = { .str = #val, }
++
++static struct errtabent errtable[] = {
++	ERRTABENT(FDT_ERR_NOTFOUND),
++	ERRTABENT(FDT_ERR_EXISTS),
++	ERRTABENT(FDT_ERR_NOSPACE),
++
++	ERRTABENT(FDT_ERR_BADOFFSET),
++	ERRTABENT(FDT_ERR_BADPATH),
++	ERRTABENT(FDT_ERR_BADSTATE),
++
++	ERRTABENT(FDT_ERR_TRUNCATED),
++	ERRTABENT(FDT_ERR_BADMAGIC),
++	ERRTABENT(FDT_ERR_BADVERSION),
++	ERRTABENT(FDT_ERR_BADSTRUCTURE),
++	ERRTABENT(FDT_ERR_BADLAYOUT),
++};
++#define ERRTABSIZE	(sizeof(errtable) / sizeof(errtable[0]))
++
++const char *fdt_strerror(int errval)
++{
++	if (errval > 0)
++		return "<valid offset/length>";
++	else if (errval == 0)
++		return "<no error>";
++	else if (errval > -ERRTABSIZE) {
++		const char *s = errtable[-errval].str;
++
++		if (s)
++			return s;
++	}
++
++	return "<unknown error>";
++}
+diff --git a/arch/powerpc/boot/libfdt/fdt_sw.c b/arch/powerpc/boot/libfdt/fdt_sw.c
+new file mode 100644
+index 0000000..dda2de3
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/fdt_sw.c
+@@ -0,0 +1,258 @@
++/*
++ * libfdt - Flat Device Tree manipulation
++ * Copyright (C) 2006 David Gibson, IBM Corporation.
++ *
++ * libfdt is dual licensed: you can use it either under the terms of
++ * the GPL, or the BSD license, at your option.
++ *
++ *  a) This library is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License as
++ *     published by the Free Software Foundation; either version 2 of the
++ *     License, or (at your option) any later version.
++ *
++ *     This library is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ *     You should have received a copy of the GNU General Public
++ *     License along with this library; if not, write to the Free
++ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
++ *     MA 02110-1301 USA
++ *
++ * Alternatively,
++ *
++ *  b) Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *     1. Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *     2. Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
++ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
++ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
++ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
++ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "libfdt_env.h"
++
++#include <fdt.h>
++#include <libfdt.h>
++
++#include "libfdt_internal.h"
++
++static int check_header_sw(void *fdt)
++{
++	if (fdt_magic(fdt) != SW_MAGIC)
++		return -FDT_ERR_BADMAGIC;
++	return 0;
++}
++
++static void *grab_space(void *fdt, int len)
++{
++	int offset = fdt_size_dt_struct(fdt);
++	int spaceleft;
++
++	spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt)
++		- fdt_size_dt_strings(fdt);
++
++	if ((offset + len < offset) || (offset + len > spaceleft))
++		return NULL;
++
++	fdt_set_size_dt_struct(fdt, offset + len);
++	return fdt_offset_ptr_w(fdt, offset, len);
++}
++
++int fdt_create(void *buf, int bufsize)
++{
++	void *fdt = buf;
++
++	if (bufsize < sizeof(struct fdt_header))
++		return -FDT_ERR_NOSPACE;
++
++	memset(buf, 0, bufsize);
++
++	fdt_set_magic(fdt, SW_MAGIC);
++	fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION);
++	fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION);
++	fdt_set_totalsize(fdt,  bufsize);
++
++	fdt_set_off_mem_rsvmap(fdt, ALIGN(sizeof(struct fdt_header),
++					  sizeof(struct fdt_reserve_entry)));
++	fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt));
++	fdt_set_off_dt_strings(fdt, bufsize);
++
++	return 0;
++}
++
++int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
++{
++	struct fdt_reserve_entry *re;
++	int err = check_header_sw(fdt);
++	int offset;
++
++	if (err)
++		return err;
++	if (fdt_size_dt_struct(fdt))
++		return -FDT_ERR_BADSTATE;
++
++	offset = fdt_off_dt_struct(fdt);
++	if ((offset + sizeof(*re)) > fdt_totalsize(fdt))
++		return -FDT_ERR_NOSPACE;
++
++	re = (struct fdt_reserve_entry *)(fdt + offset);
++	re->address = cpu_to_fdt64(addr);
++	re->size = cpu_to_fdt64(size);
++
++	fdt_set_off_dt_struct(fdt, offset + sizeof(*re));
++
++	return 0;
++}
++
++int fdt_finish_reservemap(void *fdt)
++{
++	return fdt_add_reservemap_entry(fdt, 0, 0);
++}
++
++int fdt_begin_node(void *fdt, const char *name)
++{
++	struct fdt_node_header *nh;
++	int err = check_header_sw(fdt);
++	int namelen = strlen(name) + 1;
++
++	if (err)
++		return err;
++
++	nh = grab_space(fdt, sizeof(*nh) + ALIGN(namelen, FDT_TAGSIZE));
++	if (! nh)
++		return -FDT_ERR_NOSPACE;
++
++	nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
++	memcpy(nh->name, name, namelen);
++	return 0;
++}
++
++int fdt_end_node(void *fdt)
++{
++	uint32_t *en;
++	int err = check_header_sw(fdt);
++
++	if (err)
++		return err;
++
++	en = grab_space(fdt, FDT_TAGSIZE);
++	if (! en)
++		return -FDT_ERR_NOSPACE;
++
++	*en = cpu_to_fdt32(FDT_END_NODE);
++	return 0;
++}
++
++static int find_add_string(void *fdt, const char *s)
++{
++	char *strtab = (char *)fdt + fdt_totalsize(fdt);
++	const char *p;
++	int strtabsize = fdt_size_dt_strings(fdt);
++	int len = strlen(s) + 1;
++	int struct_top, offset;
++
++	p = _fdt_find_string(strtab - strtabsize, strtabsize, s);
++	if (p)
++		return p - strtab;
++
++	/* Add it */
++	offset = -strtabsize - len;
++	struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
++	if (fdt_totalsize(fdt) + offset < struct_top)
++		return 0; /* no more room :( */
++
++	memcpy(strtab + offset, s, len);
++	fdt_set_size_dt_strings(fdt, strtabsize + len);
++	return offset;
++}
++
++int fdt_property(void *fdt, const char *name, const void *val, int len)
++{
++	struct fdt_property *prop;
++	int err = check_header_sw(fdt);
++	int nameoff;
++
++	if (err)
++		return err;
++
++	nameoff = find_add_string(fdt, name);
++	if (nameoff == 0)
++		return -FDT_ERR_NOSPACE;
++
++	prop = grab_space(fdt, sizeof(*prop) + ALIGN(len, FDT_TAGSIZE));
++	if (! prop)
++		return -FDT_ERR_NOSPACE;
++
++	prop->tag = cpu_to_fdt32(FDT_PROP);
++	prop->nameoff = cpu_to_fdt32(nameoff);
++	prop->len = cpu_to_fdt32(len);
++	memcpy(prop->data, val, len);
++	return 0;
++}
++
++int fdt_finish(void *fdt)
++{
++	int err = check_header_sw(fdt);
++	char *p = (char *)fdt;
++	uint32_t *end;
++	int oldstroffset, newstroffset;
++	uint32_t tag;
++	int offset, nextoffset;
++
++	if (err)
++		return err;
++
++	/* Add terminator */
++	end = grab_space(fdt, sizeof(*end));
++	if (! end)
++		return -FDT_ERR_NOSPACE;
++	*end = cpu_to_fdt32(FDT_END);
++
++	/* Relocate the string table */
++	oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt);
++	newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
++	memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt));
++	fdt_set_off_dt_strings(fdt, newstroffset);
++
++	/* Walk the structure, correcting string offsets */
++	offset = 0;
++	while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) {
++		if (tag == FDT_PROP) {
++			struct fdt_property *prop =
++				fdt_offset_ptr_w(fdt, offset, sizeof(*prop));
++			int nameoff;
++
++			if (! prop)
++				return -FDT_ERR_BADSTRUCTURE;
++
++			nameoff = fdt32_to_cpu(prop->nameoff);
++			nameoff += fdt_size_dt_strings(fdt);
++			prop->nameoff = cpu_to_fdt32(nameoff);
++		}
++		offset = nextoffset;
++	}
++
++	/* Finally, adjust the header */
++	fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt));
++	fdt_set_magic(fdt, FDT_MAGIC);
++	return 0;
++}
+diff --git a/arch/powerpc/boot/libfdt/fdt_wip.c b/arch/powerpc/boot/libfdt/fdt_wip.c
+new file mode 100644
+index 0000000..88e24b8
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/fdt_wip.c
+@@ -0,0 +1,144 @@
++/*
++ * libfdt - Flat Device Tree manipulation
++ * Copyright (C) 2006 David Gibson, IBM Corporation.
++ *
++ * libfdt is dual licensed: you can use it either under the terms of
++ * the GPL, or the BSD license, at your option.
++ *
++ *  a) This library is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License as
++ *     published by the Free Software Foundation; either version 2 of the
++ *     License, or (at your option) any later version.
++ *
++ *     This library is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ *     You should have received a copy of the GNU General Public
++ *     License along with this library; if not, write to the Free
++ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
++ *     MA 02110-1301 USA
++ *
++ * Alternatively,
++ *
++ *  b) Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *     1. Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *     2. Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
++ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
++ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
++ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
++ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "libfdt_env.h"
++
++#include <fdt.h>
++#include <libfdt.h>
++
++#include "libfdt_internal.h"
++
++int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
++			const void *val, int len)
++{
++	void *propval;
++	int proplen;
++
++	propval = fdt_getprop_w(fdt, nodeoffset, name, &proplen);
++	if (! propval)
++		return proplen;
++
++	if (proplen != len)
++		return -FDT_ERR_NOSPACE;
++
++	memcpy(propval, val, len);
++	return 0;
++}
++
++static void nop_region(void *start, int len)
++{
++	uint32_t *p;
++
++	for (p = start; (void *)p < (start + len); p++)
++		*p = cpu_to_fdt32(FDT_NOP);
++}
++
++int fdt_nop_property(void *fdt, int nodeoffset, const char *name)
++{
++	struct fdt_property *prop;
++	int len;
++
++	prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
++	if (! prop)
++		return len;
++
++	nop_region(prop, len + sizeof(*prop));
++
++	return 0;
++}
++
++int _fdt_node_end_offset(void *fdt, int nodeoffset)
++{
++	int level = 0;
++	uint32_t tag;
++	int offset, nextoffset;
++
++	tag = fdt_next_tag(fdt, nodeoffset, &nextoffset);
++	if (tag != FDT_BEGIN_NODE)
++		return -FDT_ERR_BADOFFSET;
++	do {
++		offset = nextoffset;
++		tag = fdt_next_tag(fdt, offset, &nextoffset);
++
++		switch (tag) {
++		case FDT_END:
++			return offset;
++
++		case FDT_BEGIN_NODE:
++			level++;
++			break;
++
++		case FDT_END_NODE:
++			level--;
++			break;
++
++		case FDT_PROP:
++		case FDT_NOP:
++			break;
++
++		default:
++			return -FDT_ERR_BADSTRUCTURE;
++		}
++	} while (level >= 0);
++
++	return nextoffset;
++}
++
++int fdt_nop_node(void *fdt, int nodeoffset)
++{
++	int endoffset;
++
++	endoffset = _fdt_node_end_offset(fdt, nodeoffset);
++	if (endoffset < 0)
++		return endoffset;
++
++	nop_region(fdt_offset_ptr_w(fdt, nodeoffset, 0), endoffset - nodeoffset);
++	return 0;
++}
+diff --git a/arch/powerpc/boot/libfdt/libfdt.h b/arch/powerpc/boot/libfdt/libfdt.h
+new file mode 100644
+index 0000000..6b2fb92
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/libfdt.h
+@@ -0,0 +1,721 @@
++#ifndef _LIBFDT_H
++#define _LIBFDT_H
++/*
++ * libfdt - Flat Device Tree manipulation
++ * Copyright (C) 2006 David Gibson, IBM Corporation.
++ *
++ * libfdt is dual licensed: you can use it either under the terms of
++ * the GPL, or the BSD license, at your option.
++ *
++ *  a) This library is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License as
++ *     published by the Free Software Foundation; either version 2 of the
++ *     License, or (at your option) any later version.
++ *
++ *     This library is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ *     You should have received a copy of the GNU General Public
++ *     License along with this library; if not, write to the Free
++ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
++ *     MA 02110-1301 USA
++ *
++ * Alternatively,
++ *
++ *  b) Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *     1. Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *     2. Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
++ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
++ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
++ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
++ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <libfdt_env.h>
++#include <fdt.h>
++
++#define FDT_FIRST_SUPPORTED_VERSION	0x10
++#define FDT_LAST_SUPPORTED_VERSION	0x11
++
++/* Error codes: informative error codes */
++#define FDT_ERR_NOTFOUND	1
++	/* FDT_ERR_NOTFOUND: The requested node or property does not exist */
++#define FDT_ERR_EXISTS		2
++	/* FDT_ERR_EXISTS: Attemped to create a node or property which
++	 * already exists */
++#define FDT_ERR_NOSPACE		3
++	/* FDT_ERR_NOSPACE: Operation needed to expand the device
++	 * tree, but its buffer did not have sufficient space to
++	 * contain the expanded tree. Use fdt_open_into() to move the
++	 * device tree to a buffer with more space. */
++
++/* Error codes: codes for bad parameters */
++#define FDT_ERR_BADOFFSET	4
++	/* FDT_ERR_BADOFFSET: Function was passed a structure block
++	 * offset which is out-of-bounds, or which points to an
++	 * unsuitable part of the structure for the operation. */
++#define FDT_ERR_BADPATH		5
++	/* FDT_ERR_BADPATH: Function was passed a badly formatted path
++	 * (e.g. missing a leading / for a function which requires an
++	 * absolute path) */
++#define FDT_ERR_BADPHANDLE	6
++	/* FDT_ERR_BADPHANDLE: Function was passed an invalid phandle
++	 * value.  phandle values of 0 and -1 are not permitted. */
++#define FDT_ERR_BADSTATE	7
++	/* FDT_ERR_BADSTATE: Function was passed an incomplete device
++	 * tree created by the sequential-write functions, which is
++	 * not sufficiently complete for the requested operation. */
++
++/* Error codes: codes for bad device tree blobs */
++#define FDT_ERR_TRUNCATED	8
++	/* FDT_ERR_TRUNCATED: Structure block of the given device tree
++	 * ends without an FDT_END tag. */
++#define FDT_ERR_BADMAGIC	9
++	/* FDT_ERR_BADMAGIC: Given "device tree" appears not to be a
++	 * device tree at all - it is missing the flattened device
++	 * tree magic number. */
++#define FDT_ERR_BADVERSION	10
++	/* FDT_ERR_BADVERSION: Given device tree has a version which
++	 * can't be handled by the requested operation.  For
++	 * read-write functions, this may mean that fdt_open_into() is
++	 * required to convert the tree to the expected version. */
++#define FDT_ERR_BADSTRUCTURE	11
++	/* FDT_ERR_BADSTRUCTURE: Given device tree has a corrupt
++	 * structure block or other serious error (e.g. misnested
++	 * nodes, or subnodes preceding properties). */
++#define FDT_ERR_BADLAYOUT	12
++	/* FDT_ERR_BADLAYOUT: For read-write functions, the given
++	 * device tree has it's sub-blocks in an order that the
++	 * function can't handle (memory reserve map, then structure,
++	 * then strings).  Use fdt_open_into() to reorganize the tree
++	 * into a form suitable for the read-write operations. */
++
++/* "Can't happen" error indicating a bug in libfdt */
++#define FDT_ERR_INTERNAL	13
++	/* FDT_ERR_INTERNAL: libfdt has failed an internal assertion.
++	 * Should never be returned, if it is, it indicates a bug in
++	 * libfdt itself. */
++
++#define FDT_ERR_MAX		13
++
++/**********************************************************************/
++/* Low-level functions (you probably don't need these)                */
++/**********************************************************************/
++
++const void *fdt_offset_ptr(const void *fdt, int offset, int checklen);
++static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen)
++{
++	return (void *)fdt_offset_ptr(fdt, offset, checklen);
++}
++
++uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset);
++
++/**********************************************************************/
++/* General functions                                                  */
++/**********************************************************************/
++
++#define fdt_get_header(fdt, field) \
++	(fdt32_to_cpu(((const struct fdt_header *)(fdt))->field))
++#define fdt_magic(fdt) 			(fdt_get_header(fdt, magic))
++#define fdt_totalsize(fdt)		(fdt_get_header(fdt, totalsize))
++#define fdt_off_dt_struct(fdt)		(fdt_get_header(fdt, off_dt_struct))
++#define fdt_off_dt_strings(fdt)		(fdt_get_header(fdt, off_dt_strings))
++#define fdt_off_mem_rsvmap(fdt)		(fdt_get_header(fdt, off_mem_rsvmap))
++#define fdt_version(fdt)		(fdt_get_header(fdt, version))
++#define fdt_last_comp_version(fdt) 	(fdt_get_header(fdt, last_comp_version))
++#define fdt_boot_cpuid_phys(fdt) 	(fdt_get_header(fdt, boot_cpuid_phys))
++#define fdt_size_dt_strings(fdt) 	(fdt_get_header(fdt, size_dt_strings))
++#define fdt_size_dt_struct(fdt)		(fdt_get_header(fdt, size_dt_struct))
++
++#define __fdt_set_hdr(name) \
++	static inline void fdt_set_##name(void *fdt, uint32_t val) \
++	{ \
++		struct fdt_header *fdth = fdt; \
++		fdth->name = cpu_to_fdt32(val); \
++	}
++__fdt_set_hdr(magic);
++__fdt_set_hdr(totalsize);
++__fdt_set_hdr(off_dt_struct);
++__fdt_set_hdr(off_dt_strings);
++__fdt_set_hdr(off_mem_rsvmap);
++__fdt_set_hdr(version);
++__fdt_set_hdr(last_comp_version);
++__fdt_set_hdr(boot_cpuid_phys);
++__fdt_set_hdr(size_dt_strings);
++__fdt_set_hdr(size_dt_struct);
++#undef __fdt_set_hdr
++
++/**
++ * fdt_check_header - sanity check a device tree or possible device tree
++ * @fdt: pointer to data which might be a flattened device tree
++ *
++ * fdt_check_header() checks that the given buffer contains what
++ * appears to be a flattened device tree with sane information in its
++ * header.
++ *
++ * returns:
++ *     0, if the buffer appears to contain a valid device tree
++ *     -FDT_ERR_BADMAGIC,
++ *     -FDT_ERR_BADVERSION,
++ *     -FDT_ERR_BADSTATE, standard meanings, as above
++ */
++int fdt_check_header(const void *fdt);
++
++/**
++ * fdt_move - move a device tree around in memory
++ * @fdt: pointer to the device tree to move
++ * @buf: pointer to memory where the device is to be moved
++ * @bufsize: size of the memory space at buf
++ *
++ * fdt_move() relocates, if possible, the device tree blob located at
++ * fdt to the buffer at buf of size bufsize.  The buffer may overlap
++ * with the existing device tree blob at fdt.  Therefore,
++ *     fdt_move(fdt, fdt, fdt_totalsize(fdt))
++ * should always succeed.
++ *
++ * returns:
++ *     0, on success
++ *     -FDT_ERR_NOSPACE, bufsize is insufficient to contain the device tree
++ *     -FDT_ERR_BADMAGIC,
++ *     -FDT_ERR_BADVERSION,
++ *     -FDT_ERR_BADSTATE, standard meanings
++ */
++int fdt_move(const void *fdt, void *buf, int bufsize);
++
++/**********************************************************************/
++/* Read-only functions                                                */
++/**********************************************************************/
++
++/**
++ * fdt_string - retreive a string from the strings block of a device tree
++ * @fdt: pointer to the device tree blob
++ * @stroffset: offset of the string within the strings block (native endian)
++ *
++ * fdt_string() retrieves a pointer to a single string from the
++ * strings block of the device tree blob at fdt.
++ *
++ * returns:
++ *     a pointer to the string, on success
++ *     NULL, if stroffset is out of bounds
++ */
++const char *fdt_string(const void *fdt, int stroffset);
++
++/**
++ * fdt_num_mem_rsv - retreive the number of memory reserve map entries
++ * @fdt: pointer to the device tree blob
++ *
++ * Returns the number of entries in the device tree blob's memory
++ * reservation map.  This does not include the terminating 0,0 entry
++ * or any other (0,0) entries reserved for expansion.
++ *
++ * returns:
++ *     the number of entries
++ */
++int fdt_num_mem_rsv(const void *fdt);
++
++/**
++ * fdt_get_mem_rsv - retreive one memory reserve map entry
++ * @fdt: pointer to the device tree blob
++ * @address, @size: pointers to 64-bit variables
++ *
++ * On success, *address and *size will contain the address and size of
++ * the n-th reserve map entry from the device tree blob, in
++ * native-endian format.
++ *
++ * returns:
++ *     0, on success
++ *     -FDT_ERR_BADMAGIC,
++ *     -FDT_ERR_BADVERSION,
++ *     -FDT_ERR_BADSTATE, standard meanings
++ */
++int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size);
++
++/**
++ * fdt_subnode_offset_namelen - find a subnode based on substring
++ * @fdt: pointer to the device tree blob
++ * @parentoffset: structure block offset of a node
++ * @name: name of the subnode to locate
++ * @namelen: number of characters of name to consider
++ *
++ * Identical to fdt_subnode_offset(), but only examine the first
++ * namelen characters of name for matching the subnode name.  This is
++ * useful for finding subnodes based on a portion of a larger string,
++ * such as a full path.
++ */
++int fdt_subnode_offset_namelen(const void *fdt, int parentoffset,
++			       const char *name, int namelen);
++/**
++ * fdt_subnode_offset - find a subnode of a given node
++ * @fdt: pointer to the device tree blob
++ * @parentoffset: structure block offset of a node
++ * @name: name of the subnode to locate
++ *
++ * fdt_subnode_offset() finds a subnode of the node at structure block
++ * offset parentoffset with the given name.  name may include a unit
++ * address, in which case fdt_subnode_offset() will find the subnode
++ * with that unit address, or the unit address may be omitted, in
++ * which case fdt_subnode_offset() will find an arbitrary subnode
++ * whose name excluding unit address matches the given name.
++ *
++ * returns:
++ *	structure block offset of the requested subnode (>=0), on success
++ *	-FDT_ERR_NOTFOUND, if the requested subnode does not exist
++ *	-FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE tag
++ *      -FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE,
++ *	-FDT_ERR_TRUNCATED, standard meanings.
++ */
++int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name);
++
++/**
++ * fdt_path_offset - find a tree node by its full path
++ * @fdt: pointer to the device tree blob
++ * @path: full path of the node to locate
++ *
++ * fdt_path_offset() finds a node of a given path in the device tree.
++ * Each path component may omit the unit address portion, but the
++ * results of this are undefined if any such path component is
++ * ambiguous (that is if there are multiple nodes at the relevant
++ * level matching the given component, differentiated only by unit
++ * address).
++ *
++ * returns:
++ *	structure block offset of the node with the requested path (>=0), on success
++ *	-FDT_ERR_BADPATH, given path does not begin with '/' or is invalid
++ *	-FDT_ERR_NOTFOUND, if the requested node does not exist
++ *      -FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE,
++ *	-FDT_ERR_TRUNCATED, standard meanings.
++ */
++int fdt_path_offset(const void *fdt, const char *path);
++
++/**
++ * fdt_get_name - retreive the name of a given node
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: structure block offset of the starting node
++ * @lenp: pointer to an integer variable (will be overwritten) or NULL
++ *
++ * fdt_get_name() retrieves the name (including unit address) of the
++ * device tree node at structure block offset nodeoffset.  If lenp is
++ * non-NULL, the length of this name is also returned, in the integer
++ * pointed to by lenp.
++ *
++ * returns:
++ *	pointer to the node's name, on success
++ *		If lenp is non-NULL, *lenp contains the length of that name (>=0)
++ *	NULL, on error
++ *		if lenp is non-NULL *lenp contains an error code (<0):
++ *		-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
++ *		-FDT_ERR_BADMAGIC,
++ *		-FDT_ERR_BADVERSION,
++ *		-FDT_ERR_BADSTATE, standard meanings
++ */
++const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp);
++
++/**
++ * fdt_get_property - find a given property in a given node
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: offset of the node whose property to find
++ * @name: name of the property to find
++ * @lenp: pointer to an integer variable (will be overwritten) or NULL
++ *
++ * fdt_get_property() retrieves a pointer to the fdt_property
++ * structure within the device tree blob corresponding to the property
++ * named 'name' of the node at offset nodeoffset.  If lenp is
++ * non-NULL, the length of the property value also returned, in the
++ * integer pointed to by lenp.
++ *
++ * returns:
++ *	pointer to the structure representing the property
++ *		if lenp is non-NULL, *lenp contains the length of the property
++ *		value (>=0)
++ *	NULL, on error
++ *		if lenp is non-NULL, *lenp contains an error code (<0):
++ *		-FDT_ERR_NOTFOUND, node does not have named property
++ *		-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
++ *		-FDT_ERR_BADMAGIC,
++ *		-FDT_ERR_BADVERSION,
++ *		-FDT_ERR_BADSTATE,
++ *		-FDT_ERR_BADSTRUCTURE,
++ *		-FDT_ERR_TRUNCATED, standard meanings
++ */
++const struct fdt_property *fdt_get_property(const void *fdt, int nodeoffset,
++					    const char *name, int *lenp);
++static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
++						      const char *name,
++						      int *lenp)
++{
++	return (struct fdt_property *)fdt_get_property(fdt, nodeoffset,
++						       name, lenp);
++}
++
++/**
++ * fdt_getprop - retrieve the value of a given property
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: offset of the node whose property to find
++ * @name: name of the property to find
++ * @lenp: pointer to an integer variable (will be overwritten) or NULL
++ *
++ * fdt_getprop() retrieves a pointer to the value of the property
++ * named 'name' of the node at offset nodeoffset (this will be a
++ * pointer to within the device blob itself, not a copy of the value).
++ * If lenp is non-NULL, the length of the property value also
++ * returned, in the integer pointed to by lenp.
++ *
++ * returns:
++ *	pointer to the property's value
++ *		if lenp is non-NULL, *lenp contains the length of the property
++ *		value (>=0)
++ *	NULL, on error
++ *		if lenp is non-NULL, *lenp contains an error code (<0):
++ *		-FDT_ERR_NOTFOUND, node does not have named property
++ *		-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
++ *		-FDT_ERR_BADMAGIC,
++ *		-FDT_ERR_BADVERSION,
++ *		-FDT_ERR_BADSTATE,
++ *		-FDT_ERR_BADSTRUCTURE,
++ *		-FDT_ERR_TRUNCATED, standard meanings
++ */
++const void *fdt_getprop(const void *fdt, int nodeoffset,
++			const char *name, int *lenp);
++static inline void *fdt_getprop_w(void *fdt, int nodeoffset,
++				  const char *name, int *lenp)
++{
++	return (void *)fdt_getprop(fdt, nodeoffset, name, lenp);
++}
++
++/**
++ * fdt_get_phandle - retreive the phandle of a given node
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: structure block offset of the node
++ *
++ * fdt_get_phandle() retrieves the phandle of the device tree node at
++ * structure block offset nodeoffset.
++ *
++ * returns:
++ *	the phandle of the node at nodeoffset, on succes (!= 0, != -1)
++ *	0, if the node has no phandle, or another error occurs
++ */
++uint32_t fdt_get_phandle(const void *fdt, int nodeoffset);
++
++/**
++ * fdt_get_path - determine the full path of a node
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: offset of the node whose path to find
++ * @buf: character buffer to contain the returned path (will be overwritten)
++ * @buflen: size of the character buffer at buf
++ *
++ * fdt_get_path() computes the full path of the node at offset
++ * nodeoffset, and records that path in the buffer at buf.
++ *
++ * NOTE: This function is expensive, as it must scan the device tree
++ * structure from the start to nodeoffset.
++ *
++ * returns:
++ *	0, on success
++ *		buf contains the absolute path of the node at
++ *		nodeoffset, as a NUL-terminated string.
++ * 	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
++ *	-FDT_ERR_NOSPACE, the path of the given node is longer than (bufsize-1)
++ *		characters and will not fit in the given buffer.
++ *	-FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE, standard meanings
++ */
++int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen);
++
++/**
++ * fdt_supernode_atdepth_offset - find a specific ancestor of a node
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: offset of the node whose parent to find
++ * @supernodedepth: depth of the ancestor to find
++ * @nodedepth: pointer to an integer variable (will be overwritten) or NULL
++ *
++ * fdt_supernode_atdepth_offset() finds an ancestor of the given node
++ * at a specific depth from the root (where the root itself has depth
++ * 0, its immediate subnodes depth 1 and so forth).  So
++ *	fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, NULL);
++ * will always return 0, the offset of the root node.  If the node at
++ * nodeoffset has depth D, then:
++ *	fdt_supernode_atdepth_offset(fdt, nodeoffset, D, NULL);
++ * will return nodeoffset itself.
++ *
++ * NOTE: This function is expensive, as it must scan the device tree
++ * structure from the start to nodeoffset.
++ *
++ * returns:
++
++ *	structure block offset of the node at node offset's ancestor
++ *		of depth supernodedepth (>=0), on success
++ * 	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
++*	-FDT_ERR_NOTFOUND, supernodedepth was greater than the depth of nodeoffset
++ *	-FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE, standard meanings
++ */
++int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
++				 int supernodedepth, int *nodedepth);
++
++/**
++ * fdt_node_depth - find the depth of a given node
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: offset of the node whose parent to find
++ *
++ * fdt_node_depth() finds the depth of a given node.  The root node
++ * has depth 0, its immediate subnodes depth 1 and so forth.
++ *
++ * NOTE: This function is expensive, as it must scan the device tree
++ * structure from the start to nodeoffset.
++ *
++ * returns:
++ *	depth of the node at nodeoffset (>=0), on success
++ * 	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
++ *	-FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE, standard meanings
++ */
++int fdt_node_depth(const void *fdt, int nodeoffset);
++
++/**
++ * fdt_parent_offset - find the parent of a given node
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: offset of the node whose parent to find
++ *
++ * fdt_parent_offset() locates the parent node of a given node (that
++ * is, it finds the offset of the node which contains the node at
++ * nodeoffset as a subnode).
++ *
++ * NOTE: This function is expensive, as it must scan the device tree
++ * structure from the start to nodeoffset, *twice*.
++ *
++ * returns:
++ *	stucture block offset of the parent of the node at nodeoffset
++ *		(>=0), on success
++ * 	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
++ *	-FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE, standard meanings
++ */
++int fdt_parent_offset(const void *fdt, int nodeoffset);
++
++/**
++ * fdt_node_offset_by_prop_value - find nodes with a given property value
++ * @fdt: pointer to the device tree blob
++ * @startoffset: only find nodes after this offset
++ * @propname: property name to check
++ * @propval: property value to search for
++ * @proplen: length of the value in propval
++ *
++ * fdt_node_offset_by_prop_value() returns the offset of the first
++ * node after startoffset, which has a property named propname whose
++ * value is of length proplen and has value equal to propval; or if
++ * startoffset is -1, the very first such node in the tree.
++ *
++ * To iterate through all nodes matching the criterion, the following
++ * idiom can be used:
++ *	offset = fdt_node_offset_by_prop_value(fdt, -1, propname,
++ *					       propval, proplen);
++ *	while (offset != -FDT_ERR_NOTFOUND) {
++ *		// other code here
++ *		offset = fdt_node_offset_by_prop_value(fdt, offset, propname,
++ *						       propval, proplen);
++ *	}
++ *
++ * Note the -1 in the first call to the function, if 0 is used here
++ * instead, the function will never locate the root node, even if it
++ * matches the criterion.
++ *
++ * returns:
++ *	structure block offset of the located node (>= 0, >startoffset),
++ *		 on success
++ *	-FDT_ERR_NOTFOUND, no node matching the criterion exists in the
++ *		tree after startoffset
++ * 	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
++ *	-FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE, standard meanings
++ */
++int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
++				  const char *propname,
++				  const void *propval, int proplen);
++
++/**
++ * fdt_node_offset_by_phandle - find the node with a given phandle
++ * @fdt: pointer to the device tree blob
++ * @phandle: phandle value
++ *
++ * fdt_node_offset_by_prop_value() returns the offset of the node
++ * which has the given phandle value.  If there is more than one node
++ * in the tree with the given phandle (an invalid tree), results are
++ * undefined.
++ *
++ * returns:
++ *	structure block offset of the located node (>= 0), on success
++ *	-FDT_ERR_NOTFOUND, no node with that phandle exists
++ *	-FDT_ERR_BADPHANDLE, given phandle value was invalid (0 or -1)
++ *	-FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE, standard meanings
++ */
++int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle);
++
++/**
++ * fdt_node_check_compatible: check a node's compatible property
++ * @fdt: pointer to the device tree blob
++ * @nodeoffset: offset of a tree node
++ * @compatible: string to match against
++ *
++ *
++ * fdt_node_check_compatible() returns 0 if the given node contains a
++ * 'compatible' property with the given string as one of its elements,
++ * it returns non-zero otherwise, or on error.
++ *
++ * returns:
++ *	0, if the node has a 'compatible' property listing the given string
++ *	1, if the node has a 'compatible' property, but it does not list
++ *		the given string
++ *	-FDT_ERR_NOTFOUND, if the given node has no 'compatible' property
++ * 	-FDT_ERR_BADOFFSET, if nodeoffset does not refer to a BEGIN_NODE tag
++ *	-FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE, standard meanings
++ */
++int fdt_node_check_compatible(const void *fdt, int nodeoffset,
++			      const char *compatible);
++
++/**
++ * fdt_node_offset_by_compatible - find nodes with a given 'compatible' value
++ * @fdt: pointer to the device tree blob
++ * @startoffset: only find nodes after this offset
++ * @compatible: 'compatible' string to match against
++ *
++ * fdt_node_offset_by_compatible() returns the offset of the first
++ * node after startoffset, which has a 'compatible' property which
++ * lists the given compatible string; or if startoffset is -1, the
++ * very first such node in the tree.
++ *
++ * To iterate through all nodes matching the criterion, the following
++ * idiom can be used:
++ *	offset = fdt_node_offset_by_compatible(fdt, -1, compatible);
++ *	while (offset != -FDT_ERR_NOTFOUND) {
++ *		// other code here
++ *		offset = fdt_node_offset_by_compatible(fdt, offset, compatible);
++ *	}
++ *
++ * Note the -1 in the first call to the function, if 0 is used here
++ * instead, the function will never locate the root node, even if it
++ * matches the criterion.
++ *
++ * returns:
++ *	structure block offset of the located node (>= 0, >startoffset),
++ *		 on success
++ *	-FDT_ERR_NOTFOUND, no node matching the criterion exists in the
++ *		tree after startoffset
++ * 	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
++ *	-FDT_ERR_BADMAGIC,
++ *	-FDT_ERR_BADVERSION,
++ *	-FDT_ERR_BADSTATE,
++ *	-FDT_ERR_BADSTRUCTURE, standard meanings
++ */
++int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
++				  const char *compatible);
++
++/**********************************************************************/
++/* Write-in-place functions                                           */
++/**********************************************************************/
++
++int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
++			const void *val, int len);
++static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset,
++					   const char *name, uint32_t val)
++{
++	val = cpu_to_fdt32(val);
++	return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val));
++}
++
++int fdt_nop_property(void *fdt, int nodeoffset, const char *name);
++int fdt_nop_node(void *fdt, int nodeoffset);
++
++/**********************************************************************/
++/* Sequential write functions                                         */
++/**********************************************************************/
++
++int fdt_create(void *buf, int bufsize);
++int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size);
++int fdt_finish_reservemap(void *fdt);
++int fdt_begin_node(void *fdt, const char *name);
++int fdt_property(void *fdt, const char *name, const void *val, int len);
++static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
++{
++	val = cpu_to_fdt32(val);
++	return fdt_property(fdt, name, &val, sizeof(val));
++}
++#define fdt_property_string(fdt, name, str) \
++	fdt_property(fdt, name, str, strlen(str)+1)
++int fdt_end_node(void *fdt);
++int fdt_finish(void *fdt);
++
++/**********************************************************************/
++/* Read-write functions                                               */
++/**********************************************************************/
++
++int fdt_open_into(const void *fdt, void *buf, int bufsize);
++int fdt_pack(void *fdt);
++
++int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size);
++int fdt_del_mem_rsv(void *fdt, int n);
++
++int fdt_setprop(void *fdt, int nodeoffset, const char *name,
++		const void *val, int len);
++static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
++				   uint32_t val)
++{
++	val = cpu_to_fdt32(val);
++	return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val));
++}
++#define fdt_setprop_string(fdt, nodeoffset, name, str) \
++	fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
++int fdt_delprop(void *fdt, int nodeoffset, const char *name);
++int fdt_add_subnode_namelen(void *fdt, int parentoffset,
++			    const char *name, int namelen);
++int fdt_add_subnode(void *fdt, int parentoffset, const char *name);
++int fdt_del_node(void *fdt, int nodeoffset);
++
++/**********************************************************************/
++/* Debugging / informational functions                                */
++/**********************************************************************/
++
++const char *fdt_strerror(int errval);
++
++#endif /* _LIBFDT_H */
+diff --git a/arch/powerpc/boot/libfdt/libfdt_internal.h b/arch/powerpc/boot/libfdt/libfdt_internal.h
+new file mode 100644
+index 0000000..1e60936
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt/libfdt_internal.h
+@@ -0,0 +1,89 @@
++#ifndef _LIBFDT_INTERNAL_H
++#define _LIBFDT_INTERNAL_H
++/*
++ * libfdt - Flat Device Tree manipulation
++ * Copyright (C) 2006 David Gibson, IBM Corporation.
++ *
++ * libfdt is dual licensed: you can use it either under the terms of
++ * the GPL, or the BSD license, at your option.
++ *
++ *  a) This library is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License as
++ *     published by the Free Software Foundation; either version 2 of the
++ *     License, or (at your option) any later version.
++ *
++ *     This library is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ *     You should have received a copy of the GNU General Public
++ *     License along with this library; if not, write to the Free
++ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
++ *     MA 02110-1301 USA
++ *
++ * Alternatively,
++ *
++ *  b) Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *     1. Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *     2. Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
++ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
++ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
++ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
++ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fdt.h>
++
++#define ALIGN(x, a)	(((x) + (a) - 1) & ~((a) - 1))
++#define PALIGN(p, a)	((void *)ALIGN((unsigned long)(p), (a)))
++
++#define memeq(p, q, n)	(memcmp((p), (q), (n)) == 0)
++#define streq(p, q)	(strcmp((p), (q)) == 0)
++
++uint32_t _fdt_next_tag(const void *fdt, int startoffset, int *nextoffset);
++const char *_fdt_find_string(const char *strtab, int tabsize, const char *s);
++int _fdt_node_end_offset(void *fdt, int nodeoffset);
++
++static inline const void *_fdt_offset_ptr(const void *fdt, int offset)
++{
++	return fdt + fdt_off_dt_struct(fdt) + offset;
++}
++
++static inline void *_fdt_offset_ptr_w(void *fdt, int offset)
++{
++	return (void *)_fdt_offset_ptr(fdt, offset);
++}
++
++static inline const struct fdt_reserve_entry *_fdt_mem_rsv(const void *fdt, int n)
++{
++	const struct fdt_reserve_entry *rsv_table =
++		fdt + fdt_off_mem_rsvmap(fdt);
++
++	return rsv_table + n;
++}
++static inline struct fdt_reserve_entry *_fdt_mem_rsv_w(void *fdt, int n)
++{
++	return (void *)_fdt_mem_rsv(fdt, n);
++}
++
++#define SW_MAGIC		(~FDT_MAGIC)
++
++#endif /* _LIBFDT_INTERNAL_H */
+diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
+new file mode 100644
+index 0000000..a4b0fc9
+--- /dev/null
++++ b/arch/powerpc/boot/libfdt_env.h
+@@ -0,0 +1,17 @@
++#ifndef _ARCH_POWERPC_BOOT_LIBFDT_ENV_H
++#define _ARCH_POWERPC_BOOT_LIBFDT_ENV_H
++
++#include <types.h>
++#include <string.h>
++
++typedef u32 uint32_t;
++typedef u64 uint64_t;
++
++#define fdt16_to_cpu(x)		(x)
++#define cpu_to_fdt16(x)		(x)
++#define fdt32_to_cpu(x)		(x)
++#define cpu_to_fdt32(x)		(x)
++#define fdt64_to_cpu(x)		(x)
++#define cpu_to_fdt64(x)		(x)
++
++#endif /* _ARCH_POWERPC_BOOT_LIBFDT_ENV_H */
+diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
+index 1b496b3..9e7f3dd 100644
+--- a/arch/powerpc/boot/main.c
++++ b/arch/powerpc/boot/main.c
+@@ -16,7 +16,6 @@
+ #include "stdio.h"
+ #include "ops.h"
+ #include "gunzip_util.h"
+-#include "flatdevtree.h"
+ #include "reg.h"
+ 
+ static struct gunzip_state gzstate;
+diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
+index a180b65..4b0544b 100644
+--- a/arch/powerpc/boot/ops.h
++++ b/arch/powerpc/boot/ops.h
+@@ -46,6 +46,8 @@ struct dt_ops {
+ 	void *(*find_node_by_prop_value)(const void *prev,
+ 	                                 const char *propname,
+ 	                                 const char *propval, int proplen);
++	void *(*find_node_by_compatible)(const void *prev,
++	                                 const char *compat);
+ 	unsigned long (*finalize)(void);
+ 	char *(*get_path)(const void *phandle, char *buf, int len);
+ };
+@@ -79,7 +81,7 @@ struct loader_info {
+ extern struct loader_info loader_info;
+ 
+ void start(void);
+-int ft_init(void *dt_blob, unsigned int max_size, unsigned int max_find_device);
++void fdt_init(void *blob);
+ int serial_console_init(void);
+ int ns16550_console_init(void *devp, struct serial_console_data *scdp);
+ int mpsc_console_init(void *devp, struct serial_console_data *scdp);
+@@ -159,9 +161,32 @@ static inline void *find_node_by_devtype(const void *prev,
+ 	return find_node_by_prop_value_str(prev, "device_type", type);
+ }
+ 
++static inline void *find_node_by_alias(const char *alias)
++{
++	void *devp = finddevice("/aliases");
++
++	if (devp) {
++		char path[MAX_PATH_LEN];
++		if (getprop(devp, alias, path, MAX_PATH_LEN) > 0)
++			return finddevice(path);
++	}
++
++	return NULL;
++}
++
++static inline void *find_node_by_compatible(const void *prev,
++                                            const char *compat)
++{
++	if (dt_ops.find_node_by_compatible)
++		return dt_ops.find_node_by_compatible(prev, compat);
++
++	return NULL;
++}
++
+ void dt_fixup_memory(u64 start, u64 size);
+ void dt_fixup_cpu_clocks(u32 cpufreq, u32 tbfreq, u32 busfreq);
+ void dt_fixup_clock(const char *path, u32 freq);
++void dt_fixup_mac_address_by_alias(const char *alias, const u8 *addr);
+ void dt_fixup_mac_address(u32 index, const u8 *addr);
+ void __dt_fixup_mac_addresses(u32 startindex, ...);
+ #define dt_fixup_mac_addresses(...) \
+diff --git a/arch/powerpc/boot/prpmc2800.c b/arch/powerpc/boot/prpmc2800.c
+index 9614e1d..05c3245 100644
+--- a/arch/powerpc/boot/prpmc2800.c
++++ b/arch/powerpc/boot/prpmc2800.c
+@@ -547,8 +547,7 @@ void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ 	if (!dtb)
+ 		exit();
+ 	memmove(dtb, _dtb_start, dt_size);
+-	if (ft_init(dtb, dt_size, 16))
+-		exit();
++	fdt_init(dtb);
+ 
+ 	bridge_base = mv64x60_get_bridge_base();
+ 
+diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
+index d666115..3b0ac4d 100644
+--- a/arch/powerpc/boot/ps3.c
++++ b/arch/powerpc/boot/ps3.c
+@@ -131,7 +131,7 @@ void platform_init(void)
+ 	printf("\n-- PS3 bootwrapper --\n");
+ 
+ 	simple_alloc_init(_end, heapsize, 32, 64);
+-	ft_init(_dtb_start, 0, 4);
++	fdt_init(_dtb_start);
+ 
+ 	chosen = finddevice("/chosen");
+ 
+diff --git a/arch/powerpc/boot/redboot-8xx.c b/arch/powerpc/boot/redboot-8xx.c
+new file mode 100644
+index 0000000..f7945ad
+--- /dev/null
++++ b/arch/powerpc/boot/redboot-8xx.c
+@@ -0,0 +1,58 @@
++/*
++ * RedBoot firmware support
++ *
++ * Author: Scott Wood <scottwood at freescale.com>
++ *
++ * Copyright (c) 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#include "ops.h"
++#include "stdio.h"
++#include "redboot.h"
++#include "fsl-soc.h"
++#include "io.h"
++
++static bd_t bd;
++BSS_STACK(4096);
++
++#define MHZ(x)	((x + 500000) / 1000000)
++
++static void platform_fixups(void)
++{
++	void *node;
++
++	dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
++	dt_fixup_mac_addresses(bd.bi_enetaddr);
++	dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 16, bd.bi_busfreq);
++
++	node = finddevice("/soc/cpm/brg");
++	if (node) {
++		printf("BRG clock-frequency <- 0x%x (%dMHz)\r\n",
++		       bd.bi_busfreq, MHZ(bd.bi_busfreq));
++		setprop(node, "clock-frequency",  &bd.bi_busfreq, 4);
++	}
++}
++
++void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
++                   unsigned long r6, unsigned long r7)
++{
++	memcpy(&bd, (char *)r3, sizeof(bd));
++
++	if (bd.bi_tag != 0x42444944)
++		return;
++
++	simple_alloc_init(_end,
++	                  bd.bi_memstart + bd.bi_memsize - (unsigned long)_end,
++	                  32, 64);
++
++	fdt_init(_dtb_start);
++	serial_console_init();
++	platform_ops.fixups = platform_fixups;
++
++	loader_info.cmdline = (char *)bd.bi_cmdline;
++	loader_info.cmdline_len = strlen((char *)bd.bi_cmdline);
++}
+diff --git a/arch/powerpc/boot/redboot.h b/arch/powerpc/boot/redboot.h
+new file mode 100644
+index 0000000..ace0b7f
+--- /dev/null
++++ b/arch/powerpc/boot/redboot.h
+@@ -0,0 +1,56 @@
++#ifndef _PPC_REDBOOT_H
++#define _PPC_REDBOOT_H
 +
++//=========================================================================
++// include/asm-ppc/redboot.h
++//   Copyright (c) 2002, 2003 Gary Thomas (<gary at mlbassoc.com>
++//   Copyright (c) 1997 Dan Malek (dmalek at jlc.net)
++
++//
++// Board specific details, as provided by RedBoot
++//
++
++/* A Board Information structure that is given to a program when
++ * RedBoot starts it up.  Note: not all fields make sense for all
++ * architectures and it's up to the platform specific code to fill
++ * in the details.
++ */
++typedef struct bd_info {
++    unsigned int   bi_tag;        /* Should be 0x42444944 "BDID" */
++    unsigned int   bi_size;       /* Size of this structure */
++    unsigned int   bi_revision;   /* revision of this structure */
++    unsigned int   bi_bdate;      /* bootstrap date, i.e. 0x19971106 */
++    unsigned int   bi_memstart;   /* Memory start address */
++    unsigned int   bi_memsize;    /* Memory (end) size in bytes */
++    unsigned int   bi_intfreq;    /* Internal Freq, in Hz */
++    unsigned int   bi_busfreq;    /* Bus Freq, in Hz */
++    unsigned int   bi_cpmfreq;    /* CPM Freq, in Hz */
++    unsigned int   bi_brgfreq;    /* BRG Freq, in Hz */
++    unsigned int   bi_vco;        /* VCO Out from PLL */
++    unsigned int   bi_pci_freq;   /* PCI Freq, in Hz */
++    unsigned int   bi_baudrate;   /* Default console baud rate */
++    unsigned int   bi_immr;       /* IMMR when called from boot rom */
++    unsigned char  bi_enetaddr[6];
++    unsigned int   bi_flashbase;  /* Physical address of FLASH memory */
++    unsigned int   bi_flashsize;  /* Length of FLASH memory */
++    int            bi_flashwidth; /* Width (8,16,32,64) */
++    unsigned char *bi_cmdline;    /* Pointer to command line */
++    unsigned char  bi_esa[3][6];  /* Ethernet station addresses */
++    unsigned int   bi_ramdisk_begin, bi_ramdisk_end;
++    struct {                      /* Information about [main] video screen */
++        short x_res;              /*   Horizontal resolution in pixels */
++        short y_res;              /*   Vertical resolution in pixels */
++        short bpp;                /*   Bits/pixel */
++        short mode;               /*   Type of pixels (packed, indexed) */
++        unsigned long fb;         /*   Pointer to frame buffer (pixel) memory */
++    } bi_video;
++    void         (*bi_cputc)(char);   /* Write a character to the RedBoot console */
++    char         (*bi_cgetc)(void);   /* Read a character from the RedBoot console */
++    int          (*bi_ctstc)(void);   /* Test for input on the RedBoot console */
++} bd_t;
++
++#define BI_REV 0x0102    /* Version 1.02 */
++
++#define bi_pci_busfreq bi_pci_freq
++#define bi_immr_base   bi_immr
++#endif
+diff --git a/arch/powerpc/boot/reg.h b/arch/powerpc/boot/reg.h
+index d3cd9ee..9c2c997 100644
+--- a/arch/powerpc/boot/reg.h
++++ b/arch/powerpc/boot/reg.h
+@@ -16,6 +16,14 @@ static inline u32 mfpvr(void)
+ 	return pvr;
+ }
+ 
++#define __stringify_1(x)	#x
++#define __stringify(x)		__stringify_1(x)
++
++#define mfspr(rn)	({unsigned long rval; \
++			asm volatile("mfspr %0," __stringify(rn) \
++				: "=r" (rval)); rval; })
++#define mtspr(rn, v)	asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
++
+ register void *__stack_pointer asm("r1");
+ #define get_sp()	(__stack_pointer)
+ 
+diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
+index cafeece..9960421 100644
+--- a/arch/powerpc/boot/serial.c
++++ b/arch/powerpc/boot/serial.c
+@@ -126,9 +126,10 @@ int serial_console_init(void)
+ 	         dt_is_compatible(devp, "fsl,cpm2-scc-uart") ||
+ 	         dt_is_compatible(devp, "fsl,cpm2-smc-uart"))
+ 		rc = cpm_console_init(devp, &serial_cd);
+-	else if (dt_is_compatible(devp, "mpc5200-psc-uart"))
++	else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
+ 		rc = mpc5200_psc_console_init(devp, &serial_cd);
+-	else if (dt_is_compatible(devp, "xilinx,uartlite"))
++	else if (dt_is_compatible(devp, "xlnx,opb-uartlite-1.00.b") ||
++		 dt_is_compatible(devp, "xlnx,xps-uartlite-1.00.a"))
+ 		rc = uartlite_console_init(devp, &serial_cd);
+ 
+ 	/* Add other serial console driver calls here */
+diff --git a/arch/powerpc/boot/treeboot-walnut.c b/arch/powerpc/boot/treeboot-walnut.c
+index bb2c309..472e366 100644
+--- a/arch/powerpc/boot/treeboot-walnut.c
++++ b/arch/powerpc/boot/treeboot-walnut.c
+@@ -20,55 +20,6 @@
+ 
+ BSS_STACK(4096);
+ 
+-void ibm405gp_fixup_clocks(unsigned int sysclk, unsigned int ser_clk)
+-{
+-	u32 pllmr = mfdcr(DCRN_CPC0_PLLMR);
+-	u32 cpc0_cr0 = mfdcr(DCRN_405_CPC0_CR0);
+-	u32 cpc0_cr1 = mfdcr(DCRN_405_CPC0_CR1);
+-	u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
+-	u32 fwdv, fbdv, cbdv, opdv, epdv, udiv;
+-
+-	fwdv = (8 - ((pllmr & 0xe0000000) >> 29));
+-	fbdv = (pllmr & 0x1e000000) >> 25;
+-	cbdv = ((pllmr & 0x00060000) >> 17) + 1;
+-	opdv = ((pllmr & 0x00018000) >> 15) + 1;
+-	epdv = ((pllmr & 0x00001800) >> 13) + 2;
+-	udiv = ((cpc0_cr0 & 0x3e) >> 1) + 1;
+-
+-	m = fwdv * fbdv * cbdv;
+-
+-	cpu = sysclk * m / fwdv;
+-	plb = cpu / cbdv;
+-	opb = plb / opdv;
+-	ebc = plb / epdv;
+-
+-	if (cpc0_cr0 & 0x80) {
+-		/* uart0 uses the external clock */
+-		uart0 = ser_clk;
+-	} else {
+-		uart0 = cpu / udiv;
+-	}
+-
+-	if (cpc0_cr0 & 0x40) {
+-		/* uart1 uses the external clock */
+-		uart1 = ser_clk;
+-	} else {
+-		uart1 = cpu / udiv;
+-	}
+-
+-	/* setup the timebase clock to tick at the cpu frequency */
+-	cpc0_cr1 = cpc0_cr1 & ~0x00800000;
+-	mtdcr(DCRN_405_CPC0_CR1, cpc0_cr1);
+-	tb = cpu;
+-
+-	dt_fixup_cpu_clocks(cpu, tb, 0);
+-	dt_fixup_clock("/plb", plb);
+-	dt_fixup_clock("/plb/opb", opb);
+-	dt_fixup_clock("/plb/ebc", ebc);
+-	dt_fixup_clock("/plb/opb/serial at ef600300", uart0);
+-	dt_fixup_clock("/plb/opb/serial at ef600400", uart1);
+-}
+-
+ static void walnut_flashsel_fixup(void)
+ {
+ 	void *devp, *sram;
+@@ -112,7 +63,7 @@ static void walnut_flashsel_fixup(void)
+ #define WALNUT_OPENBIOS_MAC_OFF 0xfffffe0b
+ static void walnut_fixups(void)
+ {
+-	ibm4xx_fixup_memsize();
++	ibm4xx_sdram_fixup_memsize();
+ 	ibm405gp_fixup_clocks(33330000, 0xa8c000);
+ 	ibm4xx_quiesce_eth((u32 *)0xef600800, NULL);
+ 	ibm4xx_fixup_ebc_ranges("/plb/ebc");
+@@ -128,6 +79,6 @@ void platform_init(void)
+ 	simple_alloc_init(_end, avail_ram, 32, 32);
+ 	platform_ops.fixups = walnut_fixups;
+ 	platform_ops.exit = ibm40x_dbcr_reset;
+-	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
++	fdt_init(_dtb_start);
+ 	serial_console_init();
+ }
+diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
+index 31147a0..763a0c4 100755
+--- a/arch/powerpc/boot/wrapper
++++ b/arch/powerpc/boot/wrapper
+@@ -45,6 +45,7 @@ CROSS=
+ 
+ # directory for object and other files used by this script
+ object=arch/powerpc/boot
++objbin=$object
+ 
+ # directory for working files
+ tmpdir=.
+@@ -95,6 +96,7 @@ while [ "$#" -gt 0 ]; do
+ 	shift
+ 	[ "$#" -gt 0 ] || usage
+ 	object="$1"
++	objbin="$1"
+ 	;;
+     -W)
+ 	shift
+@@ -116,10 +118,13 @@ while [ "$#" -gt 0 ]; do
+ done
+ 
+ if [ -n "$dts" ]; then
++    if [ ! -r "$dts" -a -r "$object/dts/$dts" ]; then
++	dts="$object/dts/$dts"
++    fi
+     if [ -z "$dtb" ]; then
+ 	dtb="$platform.dtb"
+     fi
+-    dtc -O dtb -o "$dtb" -b 0 -V 16 "$dts"
++    $object/dtc -O dtb -o "$dtb" -b 0 "$dts"
+ fi
+ 
+ if [ -z "$kernel" ]; then
+@@ -163,7 +168,7 @@ ps3)
+     ksection=.kernel:vmlinux.bin
+     isection=.kernel:initrd
+     ;;
+-ep88xc)
++ep88xc|ep405|redboot*|ep8248e)
+     platformo="$object/fixed-head.o $object/$platform.o"
+     binary=y
+     ;;
+@@ -246,11 +251,11 @@ fi
+ # post-processing needed for some platforms
+ case "$platform" in
+ pseries|chrp)
+-    $object/addnote "$ofile"
++    $objbin/addnote "$ofile"
+     ;;
+ coff)
+     ${CROSS}objcopy -O aixcoff-rs6000 --set-start "$entry" "$ofile"
+-    $object/hack-coff "$ofile"
++    $objbin/hack-coff "$ofile"
+     ;;
+ cuboot*)
+     gzip -f -9 "$ofile"
+@@ -259,7 +264,7 @@ cuboot*)
+     ;;
+ treeboot*)
+     mv "$ofile" "$ofile.elf"
+-    $object/mktree "$ofile.elf" "$ofile" "$base" "$entry"
++    $objbin/mktree "$ofile.elf" "$ofile" "$base" "$entry"
+     if [ -z "$cacheit" ]; then
+ 	rm -f "$ofile.elf"
+     fi
+@@ -287,8 +292,6 @@ ps3)
+     overlay_dest="256"
+     overlay_size="256"
+ 
+-    rm -f "$object/otheros.bld"
+-
+     ${CROSS}objcopy -O binary "$ofile" "$ofile.bin"
+ 
+     dd if="$ofile.bin" of="$ofile.bin" conv=notrunc   \
+@@ -299,6 +302,8 @@ ps3)
+         skip=$system_reset_overlay seek=$overlay_dest \
+         count=$overlay_size bs=1
+ 
+-    gzip --force -9 --stdout "$ofile.bin" > "$object/otheros.bld"
++    odir="$(dirname "$ofile.bin")"
++    rm -f "$odir/otheros.bld"
++    gzip --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld"
+     ;;
+ esac
+diff --git a/arch/powerpc/configs/adder875-redboot_defconfig b/arch/powerpc/configs/adder875-redboot_defconfig
+new file mode 100644
+index 0000000..cab5f9b
+--- /dev/null
++++ b/arch/powerpc/configs/adder875-redboot_defconfig
+@@ -0,0 +1,798 @@
 +#
-+# DVB-T (terrestrial) frontends
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Thu Jan 17 16:17:38 2008
 +#
-+# CONFIG_DVB_SP8870 is not set
-+# CONFIG_DVB_SP887X is not set
-+# CONFIG_DVB_CX22700 is not set
-+# CONFIG_DVB_CX22702 is not set
-+# CONFIG_DVB_L64781 is not set
-+# CONFIG_DVB_TDA1004X is not set
-+# CONFIG_DVB_NXT6000 is not set
-+# CONFIG_DVB_MT352 is not set
-+# CONFIG_DVB_ZL10353 is not set
-+# CONFIG_DVB_DIB3000MB is not set
-+# CONFIG_DVB_DIB3000MC is not set
-+# CONFIG_DVB_DIB7000M is not set
-+# CONFIG_DVB_DIB7000P is not set
++# CONFIG_PPC64 is not set
 +
 +#
-+# DVB-C (cable) frontends
++# Processor support
 +#
-+# CONFIG_DVB_VES1820 is not set
-+# CONFIG_DVB_TDA10021 is not set
-+# CONFIG_DVB_TDA10023 is not set
-+# CONFIG_DVB_STV0297 is not set
++# CONFIG_6xx is not set
++# CONFIG_PPC_85xx is not set
++CONFIG_PPC_8xx=y
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_8xx=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_NOT_COHERENT_CACHE=y
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++# CONFIG_PPC_UDBG_16550 is not set
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_REDBOOT=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 +
 +#
-+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++# General setup
 +#
-+# CONFIG_DVB_NXT200X is not set
-+# CONFIG_DVB_OR51211 is not set
-+# CONFIG_DVB_OR51132 is not set
-+# CONFIG_DVB_BCM3510 is not set
-+# CONFIG_DVB_LGDT330X is not set
-+# CONFIG_DVB_S5H1409 is not set
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++# CONFIG_SWAP is not set
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++# CONFIG_BLK_DEV_INITRD is not set
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++# CONFIG_SYSCTL_SYSCALL is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++# CONFIG_ELF_CORE is not set
++# CONFIG_BASE_FULL is not set
++# CONFIG_FUTEX is not set
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++# CONFIG_VM_EVENT_COUNTERS is not set
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=1
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
 +
 +#
-+# Tuners/PLL support
++# IO Schedulers
 +#
-+# CONFIG_DVB_PLL is not set
-+# CONFIG_DVB_TDA826X is not set
-+# CONFIG_DVB_TDA827X is not set
-+# CONFIG_DVB_TUNER_QT1010 is not set
-+# CONFIG_DVB_TUNER_MT2060 is not set
-+# CONFIG_DVB_TUNER_MT2266 is not set
-+# CONFIG_DVB_TUNER_MT2131 is not set
-+# CONFIG_DVB_TUNER_DIB0070 is not set
++CONFIG_IOSCHED_NOOP=y
++# CONFIG_IOSCHED_AS is not set
++CONFIG_IOSCHED_DEADLINE=y
++# CONFIG_IOSCHED_CFQ is not set
++# CONFIG_DEFAULT_AS is not set
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
 +
 +#
-+# Miscellaneous devices
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++CONFIG_CPM1=y
++# CONFIG_MPC8XXFADS is not set
++# CONFIG_MPC86XADS is not set
++# CONFIG_MPC885ADS is not set
++# CONFIG_PPC_EP88XC is not set
++CONFIG_PPC_ADDER875=y
++
++#
++# MPC8xx CPM Options
 +#
-+# CONFIG_DVB_LNBP21 is not set
-+# CONFIG_DVB_ISL6421 is not set
-+# CONFIG_DVB_TUA6100 is not set
-+CONFIG_DAB=y
 +
 +#
-+# Graphics support
++# Generic MPC8xx Options
++#
++CONFIG_8xx_COPYBACK=y
++# CONFIG_8xx_CPU6 is not set
++CONFIG_8xx_CPU15=y
++CONFIG_NO_UCODE_PATCH=y
++# CONFIG_USB_SOF_UCODE_PATCH is not set
++# CONFIG_I2C_SPI_UCODE_PATCH is not set
++# CONFIG_I2C_SPI_SMC1_UCODE_PATCH is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++CONFIG_PPC_CPM_NEW_BINDING=y
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_CPM=y
++
 +#
-+# CONFIG_DRM is not set
-+# CONFIG_VGASTATE is not set
-+CONFIG_VIDEO_OUTPUT_CONTROL=y
-+CONFIG_FB=y
-+CONFIG_FIRMWARE_EDID=y
-+# CONFIG_FB_DDC is not set
-+# CONFIG_FB_CFB_FILLRECT is not set
-+# CONFIG_FB_CFB_COPYAREA is not set
-+# CONFIG_FB_CFB_IMAGEBLIT is not set
-+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-+# CONFIG_FB_SYS_FILLRECT is not set
-+# CONFIG_FB_SYS_COPYAREA is not set
-+# CONFIG_FB_SYS_IMAGEBLIT is not set
-+# CONFIG_FB_SYS_FOPS is not set
-+CONFIG_FB_DEFERRED_IO=y
-+# CONFIG_FB_SVGALIB is not set
-+# CONFIG_FB_MACMODES is not set
-+# CONFIG_FB_BACKLIGHT is not set
-+CONFIG_FB_MODE_HELPERS=y
-+# CONFIG_FB_TILEBLITTING is not set
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_300 is not set
++CONFIG_HZ_1000=y
++CONFIG_HZ=1000
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++# CONFIG_MATH_EMULATION is not set
++# CONFIG_8XX_MINIMAL_FPEMU is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++# CONFIG_PROC_DEVICETREE is not set
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++# CONFIG_SECCOMP is not set
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="adder875-redboot.dts"
++CONFIG_ISA_DMA_API=y
 +
 +#
-+# Frame buffer hardware drivers
++# Bus options
 +#
-+# CONFIG_FB_CIRRUS is not set
-+# CONFIG_FB_PM2 is not set
-+# CONFIG_FB_CYBER2000 is not set
-+# CONFIG_FB_ASILIANT is not set
-+# CONFIG_FB_IMSTT is not set
-+# CONFIG_FB_S1D13XXX is not set
-+# CONFIG_FB_NVIDIA is not set
-+# CONFIG_FB_RIVA is not set
-+# CONFIG_FB_MATROX is not set
-+# CONFIG_FB_RADEON is not set
-+# CONFIG_FB_ATY128 is not set
-+# CONFIG_FB_ATY is not set
-+# CONFIG_FB_S3 is not set
-+# CONFIG_FB_SAVAGE is not set
-+# CONFIG_FB_SIS is not set
-+# CONFIG_FB_NEOMAGIC is not set
-+# CONFIG_FB_KYRO is not set
-+# CONFIG_FB_3DFX is not set
-+# CONFIG_FB_VOODOO1 is not set
-+# CONFIG_FB_VT8623 is not set
-+# CONFIG_FB_TRIDENT is not set
-+# CONFIG_FB_ARK is not set
-+# CONFIG_FB_PM3 is not set
-+# CONFIG_FB_VIRTUAL is not set
-+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++CONFIG_ZONE_DMA=y
++CONFIG_FSL_SOC=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_PCI_QSPAN is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
 +
 +#
-+# Display device support
++# Advanced setup
 +#
-+# CONFIG_DISPLAY_SUPPORT is not set
++# CONFIG_ADVANCED_OPTIONS is not set
 +
 +#
-+# Console display driver support
++# Default settings for advanced configuration options are used
 +#
-+CONFIG_DUMMY_CONSOLE=y
-+CONFIG_FRAMEBUFFER_CONSOLE=y
-+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
-+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-+CONFIG_FONTS=y
-+# CONFIG_FONT_8x8 is not set
-+CONFIG_FONT_8x16=y
-+# CONFIG_FONT_6x11 is not set
-+# CONFIG_FONT_7x14 is not set
-+# CONFIG_FONT_PEARL_8x8 is not set
-+# CONFIG_FONT_ACORN_8x8 is not set
-+# CONFIG_FONT_MINI_4x6 is not set
-+# CONFIG_FONT_SUN8x16 is not set
-+# CONFIG_FONT_SUN12x22 is not set
-+# CONFIG_FONT_10x18 is not set
-+CONFIG_LOGO=y
-+# CONFIG_LOGO_LINUX_MONO is not set
-+# CONFIG_LOGO_LINUX_VGA16 is not set
-+# CONFIG_LOGO_LINUX_CLUT224 is not set
-+# CONFIG_LOGO_SUPERH_MONO is not set
-+# CONFIG_LOGO_SUPERH_VGA16 is not set
-+CONFIG_LOGO_SUPERH_CLUT224=y
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0x80000000
++CONFIG_CONSISTENT_START=0xfd000000
++CONFIG_CONSISTENT_SIZE=0x00200000
++CONFIG_BOOT_LOAD=0x00400000
 +
 +#
-+# Sound
++# Networking
 +#
-+# CONFIG_SOUND is not set
-+CONFIG_HID_SUPPORT=y
-+CONFIG_HID=y
-+# CONFIG_HID_DEBUG is not set
-+# CONFIG_HIDRAW is not set
-+CONFIG_USB_SUPPORT=y
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB_ARCH_HAS_EHCI=y
-+# CONFIG_USB is not set
++CONFIG_NET=y
 +
 +#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
 +#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
 +
 +#
-+# USB Gadget Support
++# Wireless
 +#
-+# CONFIG_USB_GADGET is not set
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++# CONFIG_MTD_PARTITIONS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_CFI_FLAGADM is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++# CONFIG_BLK_DEV is not set
++# CONFIG_MISC_DEVICES is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++CONFIG_DAVICOM_PHY=y
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_B44 is not set
++CONFIG_FS_ENET=y
++# CONFIG_FS_ENET_HAS_SCC is not set
++CONFIG_FS_ENET_HAS_FEC=y
++CONFIG_FS_ENET_MDIO_FEC=y
++# CONFIG_NETDEV_1000 is not set
++# CONFIG_NETDEV_10000 is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_LIFEBOOK=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_CPM=y
++CONFIG_SERIAL_CPM_CONSOLE=y
++# CONFIG_SERIAL_CPM_SCC1 is not set
++# CONFIG_SERIAL_CPM_SCC2 is not set
++# CONFIG_SERIAL_CPM_SCC3 is not set
++# CONFIG_SERIAL_CPM_SCC4 is not set
++CONFIG_SERIAL_CPM_SMC1=y
++CONFIG_SERIAL_CPM_SMC2=y
++CONFIG_UNIX98_PTYS=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=y
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++# CONFIG_HID_SUPPORT is not set
++# CONFIG_USB_SUPPORT is not set
 +# CONFIG_MMC is not set
 +# CONFIG_NEW_LEDS is not set
-+# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
 +# CONFIG_RTC_CLASS is not set
 +
 +#
@@ -80959,29 +101936,20 @@
 +#
 +# File systems
 +#
-+CONFIG_EXT2_FS=y
-+# CONFIG_EXT2_FS_XATTR is not set
-+# CONFIG_EXT2_FS_XIP is not set
-+CONFIG_EXT3_FS=y
-+CONFIG_EXT3_FS_XATTR=y
-+# CONFIG_EXT3_FS_POSIX_ACL is not set
-+# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
 +# CONFIG_EXT4DEV_FS is not set
-+CONFIG_JBD=y
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
 +# CONFIG_REISERFS_FS is not set
 +# CONFIG_JFS_FS is not set
 +# CONFIG_FS_POSIX_ACL is not set
 +# CONFIG_XFS_FS is not set
 +# CONFIG_GFS2_FS is not set
 +# CONFIG_OCFS2_FS is not set
-+CONFIG_MINIX_FS=y
-+CONFIG_ROMFS_FS=y
-+CONFIG_INOTIFY=y
-+CONFIG_INOTIFY_USER=y
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_INOTIFY is not set
 +# CONFIG_QUOTA is not set
-+CONFIG_DNOTIFY=y
++# CONFIG_DNOTIFY is not set
 +# CONFIG_AUTOFS_FS is not set
 +# CONFIG_AUTOFS4_FS is not set
 +# CONFIG_FUSE_FS is not set
@@ -81003,13 +101971,12 @@
 +# Pseudo filesystems
 +#
 +CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
++# CONFIG_PROC_KCORE is not set
 +CONFIG_PROC_SYSCTL=y
 +CONFIG_SYSFS=y
 +CONFIG_TMPFS=y
 +# CONFIG_TMPFS_POSIX_ACL is not set
-+CONFIG_HUGETLBFS=y
-+CONFIG_HUGETLB_PAGE=y
++# CONFIG_HUGETLB_PAGE is not set
 +# CONFIG_CONFIGFS_FS is not set
 +
 +#
@@ -81022,7 +101989,8 @@
 +# CONFIG_BEFS_FS is not set
 +# CONFIG_BFS_FS is not set
 +# CONFIG_EFS_FS is not set
-+# CONFIG_CRAMFS is not set
++# CONFIG_JFFS2_FS is not set
++CONFIG_CRAMFS=y
 +# CONFIG_VXFS_FS is not set
 +# CONFIG_HPFS_FS is not set
 +# CONFIG_QNX4FS_FS is not set
@@ -81072,6 +102040,21 @@
 +# CONFIG_SYSV68_PARTITION is not set
 +# CONFIG_NLS is not set
 +# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++# CONFIG_CRC32 is not set
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
 +CONFIG_INSTRUMENTATION=y
 +# CONFIG_PROFILING is not set
 +# CONFIG_MARKERS is not set
@@ -81079,53 +102062,40 @@
 +#
 +# Kernel hacking
 +#
-+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 +# CONFIG_PRINTK_TIME is not set
 +CONFIG_ENABLE_WARN_DEPRECATED=y
 +CONFIG_ENABLE_MUST_CHECK=y
 +CONFIG_MAGIC_SYSRQ=y
 +# CONFIG_UNUSED_SYMBOLS is not set
-+CONFIG_DEBUG_FS=y
++# CONFIG_DEBUG_FS is not set
 +# CONFIG_HEADERS_CHECK is not set
 +CONFIG_DEBUG_KERNEL=y
 +# CONFIG_DEBUG_SHIRQ is not set
 +CONFIG_DETECT_SOFTLOCKUP=y
 +CONFIG_SCHED_DEBUG=y
-+CONFIG_SCHEDSTATS=y
++# CONFIG_SCHEDSTATS is not set
 +# CONFIG_TIMER_STATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+CONFIG_DEBUG_PREEMPT=y
-+# CONFIG_DEBUG_RT_MUTEXES is not set
-+# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_SLUB_DEBUG_ON is not set
 +# CONFIG_DEBUG_SPINLOCK is not set
 +# CONFIG_DEBUG_MUTEXES is not set
-+# CONFIG_DEBUG_LOCK_ALLOC is not set
-+# CONFIG_PROVE_LOCKING is not set
-+# CONFIG_LOCK_STAT is not set
 +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
 +# CONFIG_DEBUG_KOBJECT is not set
 +CONFIG_DEBUG_BUGVERBOSE=y
-+# CONFIG_DEBUG_INFO is not set
++CONFIG_DEBUG_INFO=y
 +# CONFIG_DEBUG_VM is not set
 +# CONFIG_DEBUG_LIST is not set
 +# CONFIG_DEBUG_SG is not set
-+CONFIG_FRAME_POINTER=y
 +CONFIG_FORCED_INLINING=y
 +# CONFIG_BOOT_PRINTK_DELAY is not set
-+# CONFIG_RCU_TORTURE_TEST is not set
 +# CONFIG_FAULT_INJECTION is not set
 +# CONFIG_SAMPLES is not set
-+# CONFIG_SH_STANDARD_BIOS is not set
-+# CONFIG_EARLY_SCIF_CONSOLE is not set
-+# CONFIG_DEBUG_BOOTMEM is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
 +# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_4KSTACKS is not set
-+CONFIG_SH64_PROC_ASIDS=y
-+CONFIG_SH64_SR_WATCH=y
-+# CONFIG_POOR_MANS_STRACE is not set
-+# CONFIG_SH_ALPHANUMERIC is not set
-+# CONFIG_SH_NO_BSS_INIT is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
 +
 +#
 +# Security options
@@ -81134,55 +102104,84742 @@
 +# CONFIG_SECURITY is not set
 +# CONFIG_SECURITY_FILE_CAPABILITIES is not set
 +# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/adder875-uboot_defconfig b/arch/powerpc/configs/adder875-uboot_defconfig
+new file mode 100644
+index 0000000..1faf7ef
+--- /dev/null
++++ b/arch/powerpc/configs/adder875-uboot_defconfig
+@@ -0,0 +1,798 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Thu Jan 17 16:17:18 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++# CONFIG_PPC_85xx is not set
++CONFIG_PPC_8xx=y
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_8xx=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_NOT_COHERENT_CACHE=y
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++# CONFIG_PPC_UDBG_16550 is not set
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_REDBOOT=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++# CONFIG_SWAP is not set
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++# CONFIG_BLK_DEV_INITRD is not set
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++# CONFIG_SYSCTL_SYSCALL is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++# CONFIG_ELF_CORE is not set
++# CONFIG_BASE_FULL is not set
++# CONFIG_FUTEX is not set
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++# CONFIG_VM_EVENT_COUNTERS is not set
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=1
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++# CONFIG_IOSCHED_AS is not set
++CONFIG_IOSCHED_DEADLINE=y
++# CONFIG_IOSCHED_CFQ is not set
++# CONFIG_DEFAULT_AS is not set
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++CONFIG_CPM1=y
++# CONFIG_MPC8XXFADS is not set
++# CONFIG_MPC86XADS is not set
++# CONFIG_MPC885ADS is not set
++# CONFIG_PPC_EP88XC is not set
++CONFIG_PPC_ADDER875=y
++
++#
++# MPC8xx CPM Options
++#
++
++#
++# Generic MPC8xx Options
++#
++CONFIG_8xx_COPYBACK=y
++# CONFIG_8xx_CPU6 is not set
++CONFIG_8xx_CPU15=y
++CONFIG_NO_UCODE_PATCH=y
++# CONFIG_USB_SOF_UCODE_PATCH is not set
++# CONFIG_I2C_SPI_UCODE_PATCH is not set
++# CONFIG_I2C_SPI_SMC1_UCODE_PATCH is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++CONFIG_PPC_CPM_NEW_BINDING=y
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_CPM=y
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_300 is not set
++CONFIG_HZ_1000=y
++CONFIG_HZ=1000
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++# CONFIG_MATH_EMULATION is not set
++# CONFIG_8XX_MINIMAL_FPEMU is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++# CONFIG_PROC_DEVICETREE is not set
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++# CONFIG_SECCOMP is not set
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="adder875-uboot.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_FSL_SOC=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_PCI_QSPAN is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0x80000000
++CONFIG_CONSISTENT_START=0xfd000000
++CONFIG_CONSISTENT_SIZE=0x00200000
++CONFIG_BOOT_LOAD=0x00400000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++# CONFIG_MTD_PARTITIONS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_CFI_FLAGADM is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++# CONFIG_BLK_DEV is not set
++# CONFIG_MISC_DEVICES is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++CONFIG_DAVICOM_PHY=y
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_B44 is not set
++CONFIG_FS_ENET=y
++# CONFIG_FS_ENET_HAS_SCC is not set
++CONFIG_FS_ENET_HAS_FEC=y
++CONFIG_FS_ENET_MDIO_FEC=y
++# CONFIG_NETDEV_1000 is not set
++# CONFIG_NETDEV_10000 is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_LIFEBOOK=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_CPM=y
++CONFIG_SERIAL_CPM_CONSOLE=y
++# CONFIG_SERIAL_CPM_SCC1 is not set
++# CONFIG_SERIAL_CPM_SCC2 is not set
++# CONFIG_SERIAL_CPM_SCC3 is not set
++# CONFIG_SERIAL_CPM_SCC4 is not set
++CONFIG_SERIAL_CPM_SMC1=y
++CONFIG_SERIAL_CPM_SMC2=y
++CONFIG_UNIX98_PTYS=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=y
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++# CONFIG_HID_SUPPORT is not set
++# CONFIG_USB_SUPPORT is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_INOTIFY is not set
++# CONFIG_QUOTA is not set
++# CONFIG_DNOTIFY is not set
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++# CONFIG_PROC_KCORE is not set
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_JFFS2_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
 +
 +#
 +# Library routines
 +#
-+CONFIG_BITREVERSE=y
 +# CONFIG_CRC_CCITT is not set
 +# CONFIG_CRC16 is not set
 +# CONFIG_CRC_ITU_T is not set
-+CONFIG_CRC32=y
++# CONFIG_CRC32 is not set
 +# CONFIG_CRC7 is not set
 +# CONFIG_LIBCRC32C is not set
-+CONFIG_PLIST=y
++CONFIG_ZLIB_INFLATE=y
 +CONFIG_HAS_IOMEM=y
 +CONFIG_HAS_IOPORT=y
 +CONFIG_HAS_DMA=y
-diff --git a/arch/sh/configs/hs7751rvoip_defconfig b/arch/sh/configs/hs7751rvoip_defconfig
-deleted file mode 100644
-index 5d9da5a..0000000
---- a/arch/sh/configs/hs7751rvoip_defconfig
-+++ /dev/null
-@@ -1,908 +0,0 @@
--#
--# Automatically generated make config: don't edit
--# Linux kernel version: 2.6.18
--# Tue Oct  3 13:04:52 2006
--#
--CONFIG_SUPERH=y
--CONFIG_RWSEM_GENERIC_SPINLOCK=y
--CONFIG_GENERIC_FIND_NEXT_BIT=y
--CONFIG_GENERIC_HWEIGHT=y
--CONFIG_GENERIC_HARDIRQS=y
--CONFIG_GENERIC_IRQ_PROBE=y
--CONFIG_GENERIC_CALIBRATE_DELAY=y
--CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
--
--#
--# Code maturity level options
--#
--CONFIG_EXPERIMENTAL=y
--CONFIG_BROKEN_ON_SMP=y
--CONFIG_LOCK_KERNEL=y
--CONFIG_INIT_ENV_ARG_LIMIT=32
--
--#
--# General setup
--#
--CONFIG_LOCALVERSION=""
--CONFIG_LOCALVERSION_AUTO=y
--CONFIG_SWAP=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/bamboo_defconfig b/arch/powerpc/configs/bamboo_defconfig
+index 76d883e..1ed9afc 100644
+--- a/arch/powerpc/configs/bamboo_defconfig
++++ b/arch/powerpc/configs/bamboo_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:48:04 2007
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 10:49:50 2007
+ #
+ # CONFIG_PPC64 is not set
+ 
+@@ -131,6 +131,7 @@ CONFIG_DEFAULT_AS=y
+ # CONFIG_DEFAULT_CFQ is not set
+ # CONFIG_DEFAULT_NOOP is not set
+ CONFIG_DEFAULT_IOSCHED="anticipatory"
++# CONFIG_PPC4xx_PCI_EXPRESS is not set
+ 
+ #
+ # Platform support
+@@ -143,6 +144,9 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
+ CONFIG_BAMBOO=y
+ # CONFIG_EBONY is not set
+ # CONFIG_SEQUOIA is not set
++# CONFIG_TAISHAN is not set
++# CONFIG_KATMAI is not set
++# CONFIG_RAINIER is not set
+ CONFIG_440EP=y
+ CONFIG_IBM440EP_ERR42=y
+ # CONFIG_MPIC is not set
+@@ -372,9 +376,7 @@ CONFIG_MISC_DEVICES=y
+ # CONFIG_FIREWIRE is not set
+ # CONFIG_IEEE1394 is not set
+ # CONFIG_I2O is not set
+-CONFIG_MACINTOSH_DRIVERS=y
+-# CONFIG_MAC_EMUMOUSEBTN is not set
+-# CONFIG_WINDFARM is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
+ CONFIG_NETDEVICES=y
+ # CONFIG_NETDEVICES_MULTIQUEUE is not set
+ # CONFIG_DUMMY is not set
+@@ -736,19 +738,7 @@ CONFIG_DEBUGGER=y
+ # CONFIG_KGDB is not set
+ # CONFIG_XMON is not set
+ # CONFIG_BDI_SWITCH is not set
+-CONFIG_PPC_EARLY_DEBUG=y
+-# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+-# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+-# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+-# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+-# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+-# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+-# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+-# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
+-CONFIG_PPC_EARLY_DEBUG_44x=y
+-# CONFIG_PPC_EARLY_DEBUG_CPM is not set
+-CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0xef600300
+-CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x0
++# CONFIG_PPC_EARLY_DEBUG is not set
+ 
+ #
+ # Security options
+diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig
+index 421e08e..9ed2e09 100644
+--- a/arch/powerpc/configs/celleb_defconfig
++++ b/arch/powerpc/configs/celleb_defconfig
+@@ -50,7 +50,8 @@ CONFIG_AUDIT_ARCH=y
+ CONFIG_GENERIC_BUG=y
+ # CONFIG_DEFAULT_UIMAGE is not set
+ # CONFIG_PPC_DCR_NATIVE is not set
+-# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_PPC_DCR_MMIO=y
++CONFIG_PPC_DCR=y
+ CONFIG_PPC_OF_PLATFORM_PCI=y
+ CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+ 
+@@ -148,7 +149,7 @@ CONFIG_PPC_MULTIPLATFORM=y
+ CONFIG_PPC_CELLEB=y
+ # CONFIG_PPC_PS3 is not set
+ CONFIG_PPC_CELL=y
+-# CONFIG_PPC_CELL_NATIVE is not set
++CONFIG_PPC_CELL_NATIVE=y
+ # CONFIG_PPC_IBM_CELL_BLADE is not set
+ 
+ #
+@@ -157,13 +158,19 @@ CONFIG_PPC_CELL=y
+ CONFIG_SPU_FS=y
+ CONFIG_SPU_FS_64K_LS=y
+ CONFIG_SPU_BASE=y
++CONFIG_CBE_RAS=y
++# CONFIG_CBE_THERM is not set
+ # CONFIG_PQ2ADS is not set
++CONFIG_PPC_NATIVE=y
++CONFIG_UDBG_RTAS_CONSOLE=y
+ CONFIG_PPC_UDBG_BEAT=y
+-# CONFIG_MPIC is not set
++CONFIG_MPIC=y
+ # CONFIG_MPIC_WEIRD is not set
+ # CONFIG_PPC_I8259 is not set
+ # CONFIG_U3_DART is not set
+-# CONFIG_PPC_RTAS is not set
++CONFIG_PPC_RTAS=y
++# CONFIG_RTAS_ERROR_LOGGING is not set
++# CONFIG_RTAS_PROC is not set
+ # CONFIG_MMIO_NVRAM is not set
+ # CONFIG_PPC_MPC106 is not set
+ # CONFIG_PPC_970_NAP is not set
+@@ -593,10 +600,11 @@ CONFIG_MII=y
+ # CONFIG_NET_VENDOR_3COM is not set
+ # CONFIG_NET_TULIP is not set
+ # CONFIG_HP100 is not set
+-# CONFIG_IBM_NEW_EMAC_ZMII is not set
+-# CONFIG_IBM_NEW_EMAC_RGMII is not set
+-# CONFIG_IBM_NEW_EMAC_TAH is not set
+-# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_IBM_NEW_EMAC is not set
++CONFIG_IBM_NEW_EMAC_ZMII=y
++CONFIG_IBM_NEW_EMAC_RGMII=y
++CONFIG_IBM_NEW_EMAC_TAH=y
++CONFIG_IBM_NEW_EMAC_EMAC4=y
+ # CONFIG_NET_PCI is not set
+ # CONFIG_B44 is not set
+ CONFIG_NETDEV_1000=y
+@@ -741,6 +749,7 @@ CONFIG_SERIAL_TXX9_CONSOLE=y
+ CONFIG_UNIX98_PTYS=y
+ # CONFIG_LEGACY_PTYS is not set
+ CONFIG_HVC_DRIVER=y
++CONFIG_HVC_RTAS=y
+ CONFIG_HVC_BEAT=y
+ # CONFIG_IPMI_HANDLER is not set
+ # CONFIG_HW_RANDOM is not set
+@@ -822,6 +831,7 @@ CONFIG_WATCHDOG=y
+ # Watchdog Device Drivers
+ #
+ # CONFIG_SOFT_WATCHDOG is not set
++# CONFIG_WATCHDOG_RTAS is not set
+ 
+ #
+ # PCI-based Watchdog Cards
+@@ -1245,17 +1255,7 @@ CONFIG_XMON_DISASSEMBLY=y
+ CONFIG_IRQSTACKS=y
+ # CONFIG_VIRQ_DEBUG is not set
+ # CONFIG_BOOTX_TEXT is not set
+-CONFIG_PPC_EARLY_DEBUG=y
+-# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+-# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+-# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+-# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+-# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+-# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+-# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+-CONFIG_PPC_EARLY_DEBUG_BEAT=y
+-# CONFIG_PPC_EARLY_DEBUG_44x is not set
+-# CONFIG_PPC_EARLY_DEBUG_CPM is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
+ 
+ #
+ # Security options
+diff --git a/arch/powerpc/configs/ebony_defconfig b/arch/powerpc/configs/ebony_defconfig
+index b84298c..cf860f1 100644
+--- a/arch/powerpc/configs/ebony_defconfig
++++ b/arch/powerpc/configs/ebony_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:48:11 2007
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 11:16:26 2007
+ #
+ # CONFIG_PPC64 is not set
+ 
+@@ -130,6 +130,7 @@ CONFIG_DEFAULT_AS=y
+ # CONFIG_DEFAULT_CFQ is not set
+ # CONFIG_DEFAULT_NOOP is not set
+ CONFIG_DEFAULT_IOSCHED="anticipatory"
++# CONFIG_PPC4xx_PCI_EXPRESS is not set
+ 
+ #
+ # Platform support
+@@ -142,6 +143,9 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
+ # CONFIG_BAMBOO is not set
+ CONFIG_EBONY=y
+ # CONFIG_SEQUOIA is not set
++# CONFIG_TAISHAN is not set
++# CONFIG_KATMAI is not set
++# CONFIG_RAINIER is not set
+ CONFIG_440GP=y
+ # CONFIG_MPIC is not set
+ # CONFIG_MPIC_WEIRD is not set
+diff --git a/arch/powerpc/configs/ep405_defconfig b/arch/powerpc/configs/ep405_defconfig
+new file mode 100644
+index 0000000..3829c91
+--- /dev/null
++++ b/arch/powerpc/configs/ep405_defconfig
+@@ -0,0 +1,952 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 11:17:13 2007
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++CONFIG_40x=y
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_4xx=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_NOT_COHERENT_CACHE=y
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_PPC_DCR_NATIVE=y
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_PPC_DCR=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++# CONFIG_PPC4xx_PCI_EXPRESS is not set
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++CONFIG_EP405=y
++# CONFIG_KILAUEA is not set
++# CONFIG_MAKALU is not set
++# CONFIG_WALNUT is not set
++# CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
++CONFIG_405GP=y
++CONFIG_IBM405_ERR77=y
++CONFIG_IBM405_ERR51=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++# CONFIG_MATH_EMULATION is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="ep405.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_CONSISTENT_START=0xff100000
++CONFIG_CONSISTENT_SIZE=0x00200000
++CONFIG_BOOT_LOAD=0x00400000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=m
++CONFIG_MTD_BLOCK=m
++# CONFIG_MTD_BLOCK_RO is not set
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++CONFIG_MTD_JEDECPROBE=y
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++# CONFIG_BLK_DEV_LOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=35000
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_XILINX_SYSACE is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++CONFIG_IBM_NEW_EMAC=y
++CONFIG_IBM_NEW_EMAC_RXB=128
++CONFIG_IBM_NEW_EMAC_TXB=64
++CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
++CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
++CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
++# CONFIG_IBM_NEW_EMAC_DEBUG is not set
++CONFIG_IBM_NEW_EMAC_ZMII=y
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++# CONFIG_SERIAL_8250_MANY_PORTS is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_SERIAL_OF_PLATFORM=y
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++# CONFIG_DAB is not set
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_DEVICE_CLASS=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_EHCI_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PPC_OF=y
++CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
++CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
++CONFIG_USB_OHCI_HCD_PCI=y
++CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
++CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++# CONFIG_USB_UHCI_HCD is not set
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_BERRY_CHARGE is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGET is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_JFFS2_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
+new file mode 100644
+index 0000000..01ad595
+--- /dev/null
++++ b/arch/powerpc/configs/ep8248e_defconfig
+@@ -0,0 +1,821 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Fri Jan 11 14:02:06 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++CONFIG_6xx=y
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_PPC_FPU=y
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_32=y
++# CONFIG_PPC_MM_SLICES is not set
++# CONFIG_SMP is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++# CONFIG_PPC_UDBG_16550 is not set
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++# CONFIG_EXPERIMENTAL is not set
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++# CONFIG_BLK_DEV_INITRD is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++# CONFIG_IOSCHED_AS is not set
++CONFIG_IOSCHED_DEADLINE=y
++# CONFIG_IOSCHED_CFQ is not set
++# CONFIG_DEFAULT_AS is not set
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MULTIPLATFORM is not set
++CONFIG_PPC_82xx=y
++# CONFIG_PPC_83xx is not set
++# CONFIG_PPC_86xx is not set
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_MPC8272_ADS is not set
++# CONFIG_PQ2FADS is not set
++CONFIG_EP8248E=y
++# CONFIG_PQ2ADS is not set
++CONFIG_8260=y
++CONFIG_8272=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++CONFIG_CPM2=y
++CONFIG_PPC_CPM_NEW_BINDING=y
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_CPM=y
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++# CONFIG_SECCOMP is not set
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="ep8248e.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_FSL_SOC=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00400000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++CONFIG_INET_TUNNEL=y
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_IP_VS is not set
++CONFIG_IPV6=y
++# CONFIG_IPV6_PRIVACY is not set
++# CONFIG_IPV6_ROUTER_PREF is not set
++# CONFIG_INET6_AH is not set
++# CONFIG_INET6_ESP is not set
++# CONFIG_INET6_IPCOMP is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++CONFIG_INET6_XFRM_MODE_TRANSPORT=y
++CONFIG_INET6_XFRM_MODE_TUNNEL=y
++CONFIG_INET6_XFRM_MODE_BEET=y
++CONFIG_IPV6_SIT=y
++# CONFIG_IPV6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK_ENABLED is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_QUEUE is not set
++# CONFIG_IP_NF_IPTABLES is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++# CONFIG_MTD_PARTITIONS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++CONFIG_MTD_CFI_ADV_OPTIONS=y
++CONFIG_MTD_CFI_NOSWAP=y
++# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
++CONFIG_MTD_CFI_GEOMETRY=y
++# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_2 is not set
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++# CONFIG_MTD_CFI_I1 is not set
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_OTP is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_RAM is not set
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_MISC_DEVICES is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++CONFIG_DAVICOM_PHY=y
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++CONFIG_MDIO_BITBANG=y
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_B44 is not set
++CONFIG_FS_ENET=y
++# CONFIG_FS_ENET_HAS_SCC is not set
++CONFIG_FS_ENET_HAS_FCC=y
++# CONFIG_FS_ENET_MDIO_FCC is not set
++CONFIG_NETDEV_1000=y
++CONFIG_NETDEV_10000=y
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_CPM=y
++CONFIG_SERIAL_CPM_CONSOLE=y
++CONFIG_SERIAL_CPM_SCC1=y
++# CONFIG_SERIAL_CPM_SCC2 is not set
++# CONFIG_SERIAL_CPM_SCC3 is not set
++CONFIG_SERIAL_CPM_SCC4=y
++# CONFIG_SERIAL_CPM_SMC1 is not set
++# CONFIG_SERIAL_CPM_SMC2 is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++# CONFIG_USB_SUPPORT is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++# CONFIG_EXT3_FS_XATTR is not set
++CONFIG_JBD=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_JFFS2_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++CONFIG_NLS_UTF8=y
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++# CONFIG_CRC32 is not set
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++# CONFIG_DETECT_SOFTLOCKUP is not set
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_KGDB_CONSOLE is not set
++CONFIG_BDI_SWITCH=y
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=y
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++# CONFIG_CRYPTO_HW is not set
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/katmai_defconfig b/arch/powerpc/configs/katmai_defconfig
+new file mode 100644
+index 0000000..c8804ec
+--- /dev/null
++++ b/arch/powerpc/configs/katmai_defconfig
+@@ -0,0 +1,790 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 11:17:43 2007
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++CONFIG_44x=y
++# CONFIG_E200 is not set
++CONFIG_4xx=y
++CONFIG_BOOKE=y
++CONFIG_PTE_64BIT=y
++CONFIG_PHYS_64BIT=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_NOT_COHERENT_CACHE=y
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_PPC_DCR_NATIVE=y
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_PPC_DCR=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++CONFIG_PPC4xx_PCI_EXPRESS=y
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_BAMBOO is not set
++# CONFIG_EBONY is not set
++# CONFIG_SEQUOIA is not set
++# CONFIG_TAISHAN is not set
++CONFIG_KATMAI=y
++# CONFIG_RAINIER is not set
++CONFIG_440SPe=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++# CONFIG_MATH_EMULATION is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++CONFIG_CMDLINE_BOOL=y
++CONFIG_CMDLINE=""
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="katmai.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_CONSISTENT_START=0xff100000
++CONFIG_CONSISTENT_SIZE=0x00200000
++CONFIG_BOOT_LOAD=0x01000000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++# CONFIG_BLK_DEV_LOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=35000
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_XILINX_SYSACE is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++CONFIG_MACINTOSH_DRIVERS=y
++# CONFIG_MAC_EMUMOUSEBTN is not set
++# CONFIG_WINDFARM is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++CONFIG_IBM_NEW_EMAC=y
++CONFIG_IBM_NEW_EMAC_RXB=128
++CONFIG_IBM_NEW_EMAC_TXB=64
++CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
++CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
++CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
++# CONFIG_IBM_NEW_EMAC_DEBUG is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++CONFIG_IBM_NEW_EMAC_EMAC4=y
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++# CONFIG_SERIAL_8250_PCI is not set
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++# CONFIG_SERIAL_8250_MANY_PORTS is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_SERIAL_OF_PLATFORM=y
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++CONFIG_DEBUGGER=y
++# CONFIG_KGDB is not set
++# CONFIG_XMON is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/kilauea_defconfig b/arch/powerpc/configs/kilauea_defconfig
+index 28dee12..8dca3d4 100644
+--- a/arch/powerpc/configs/kilauea_defconfig
++++ b/arch/powerpc/configs/kilauea_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:48:20 2007
++# Linux kernel version: 2.6.24-rc6
++# Thu Jan  3 14:21:31 2008
+ #
+ # CONFIG_PPC64 is not set
+ 
+@@ -40,7 +40,7 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+ CONFIG_PPC_OF=y
+ CONFIG_OF=y
+-# CONFIG_PPC_UDBG_16550 is not set
++CONFIG_PPC_UDBG_16550=y
+ # CONFIG_GENERIC_TBSYNC is not set
+ CONFIG_AUDIT_ARCH=y
+ CONFIG_GENERIC_BUG=y
+@@ -125,6 +125,7 @@ CONFIG_DEFAULT_AS=y
+ # CONFIG_DEFAULT_CFQ is not set
+ # CONFIG_DEFAULT_NOOP is not set
+ CONFIG_DEFAULT_IOSCHED="anticipatory"
++CONFIG_PPC4xx_PCI_EXPRESS=y
+ 
+ #
+ # Platform support
+@@ -134,9 +135,12 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
+ # CONFIG_PPC_CELL is not set
+ # CONFIG_PPC_CELL_NATIVE is not set
+ # CONFIG_PQ2ADS is not set
++# CONFIG_EP405 is not set
+ CONFIG_KILAUEA=y
++# CONFIG_MAKALU is not set
+ # CONFIG_WALNUT is not set
+ # CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
++CONFIG_405EX=y
+ # CONFIG_MPIC is not set
+ # CONFIG_MPIC_WEIRD is not set
+ # CONFIG_PPC_I8259 is not set
+@@ -199,11 +203,17 @@ CONFIG_ISA_DMA_API=y
+ # Bus options
+ #
+ CONFIG_ZONE_DMA=y
+-# CONFIG_PCI is not set
+-# CONFIG_PCI_DOMAINS is not set
+-# CONFIG_PCI_SYSCALL is not set
+-# CONFIG_ARCH_SUPPORTS_MSI is not set
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
+ # CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
+ 
+ #
+ # Advanced setup
+@@ -368,11 +378,13 @@ CONFIG_MTD_CFI_UTIL=y
+ # CONFIG_MTD_COMPLEX_MAPPINGS is not set
+ # CONFIG_MTD_PHYSMAP is not set
+ CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_INTEL_VR_NOR is not set
+ # CONFIG_MTD_PLATRAM is not set
+ 
+ #
+ # Self-contained MTD device drivers
+ #
++# CONFIG_MTD_PMC551 is not set
+ # CONFIG_MTD_SLRAM is not set
+ # CONFIG_MTD_PHRAM is not set
+ # CONFIG_MTD_MTDRAM is not set
+@@ -395,9 +407,14 @@ CONFIG_OF_DEVICE=y
+ # CONFIG_PARPORT is not set
+ CONFIG_BLK_DEV=y
+ # CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
+ # CONFIG_BLK_DEV_COW_COMMON is not set
+ # CONFIG_BLK_DEV_LOOP is not set
+ # CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
+ CONFIG_BLK_DEV_RAM=y
+ CONFIG_BLK_DEV_RAM_COUNT=16
+ CONFIG_BLK_DEV_RAM_SIZE=35000
+@@ -417,6 +434,14 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+ # CONFIG_SCSI_NETLINK is not set
+ # CONFIG_ATA is not set
+ # CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
+ # CONFIG_MACINTOSH_DRIVERS is not set
+ CONFIG_NETDEVICES=y
+ # CONFIG_NETDEVICES_MULTIQUEUE is not set
+@@ -426,9 +451,33 @@ CONFIG_NETDEVICES=y
+ # CONFIG_EQUALIZER is not set
+ # CONFIG_TUN is not set
+ # CONFIG_VETH is not set
+-# CONFIG_NET_ETHERNET is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++CONFIG_IBM_NEW_EMAC=y
++CONFIG_IBM_NEW_EMAC_RXB=256
++CONFIG_IBM_NEW_EMAC_TXB=256
++CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
++CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
++CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
++# CONFIG_IBM_NEW_EMAC_DEBUG is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++CONFIG_IBM_NEW_EMAC_RGMII=y
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++CONFIG_IBM_NEW_EMAC_EMAC4=y
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
+ # CONFIG_NETDEV_1000 is not set
+ # CONFIG_NETDEV_10000 is not set
++# CONFIG_TR is not set
+ 
+ #
+ # Wireless LAN
+@@ -436,6 +485,8 @@ CONFIG_NETDEVICES=y
+ # CONFIG_WLAN_PRE80211 is not set
+ # CONFIG_WLAN_80211 is not set
+ # CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
+ # CONFIG_PPP is not set
+ # CONFIG_SLIP is not set
+ # CONFIG_SHAPER is not set
+@@ -467,6 +518,7 @@ CONFIG_NETDEVICES=y
+ #
+ CONFIG_SERIAL_8250=y
+ CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
+ CONFIG_SERIAL_8250_NR_UARTS=4
+ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+ CONFIG_SERIAL_8250_EXTENDED=y
+@@ -481,6 +533,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
+ # CONFIG_SERIAL_UARTLITE is not set
+ CONFIG_SERIAL_CORE=y
+ CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
+ CONFIG_SERIAL_OF_PLATFORM=y
+ CONFIG_UNIX98_PTYS=y
+ CONFIG_LEGACY_PTYS=y
+@@ -490,8 +543,10 @@ CONFIG_LEGACY_PTY_COUNT=256
+ # CONFIG_NVRAM is not set
+ # CONFIG_GEN_RTC is not set
+ # CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
+ # CONFIG_RAW_DRIVER is not set
+ # CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
+ # CONFIG_I2C is not set
+ 
+ #
+@@ -525,6 +580,8 @@ CONFIG_SSB_POSSIBLE=y
+ #
+ # Graphics support
+ #
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
+ # CONFIG_VGASTATE is not set
+ # CONFIG_VIDEO_OUTPUT_CONTROL is not set
+ # CONFIG_FB is not set
+@@ -542,6 +599,7 @@ CONFIG_SSB_POSSIBLE=y
+ # CONFIG_USB_SUPPORT is not set
+ # CONFIG_MMC is not set
+ # CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
+ # CONFIG_EDAC is not set
+ # CONFIG_RTC_CLASS is not set
+ 
+diff --git a/arch/powerpc/configs/lite5200_defconfig b/arch/powerpc/configs/lite5200_defconfig
+deleted file mode 100644
+index 02bb7e5..0000000
+--- a/arch/powerpc/configs/lite5200_defconfig
++++ /dev/null
+@@ -1,847 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:48:24 2007
+-#
+-# CONFIG_PPC64 is not set
+-
+-#
+-# Processor support
+-#
+-CONFIG_6xx=y
+-# CONFIG_PPC_85xx is not set
+-# CONFIG_PPC_8xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_E200 is not set
+-CONFIG_PPC_FPU=y
+-# CONFIG_ALTIVEC is not set
+-CONFIG_PPC_STD_MMU=y
+-CONFIG_PPC_STD_MMU_32=y
+-# CONFIG_PPC_MM_SLICES is not set
+-# CONFIG_SMP is not set
+-CONFIG_PPC32=y
+-CONFIG_WORD_SIZE=32
+-CONFIG_PPC_MERGE=y
+-CONFIG_MMU=y
+-CONFIG_GENERIC_CMOS_UPDATE=y
+-CONFIG_GENERIC_TIME=y
+-CONFIG_GENERIC_TIME_VSYSCALL=y
+-CONFIG_GENERIC_CLOCKEVENTS=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_IRQ_PER_CPU=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_ARCH_HAS_ILOG2_U32=y
+-CONFIG_GENERIC_HWEIGHT=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_GENERIC_FIND_NEXT_BIT=y
+-# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+-CONFIG_PPC=y
+-CONFIG_EARLY_PRINTK=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+-CONFIG_PPC_OF=y
+-CONFIG_OF=y
+-# CONFIG_PPC_UDBG_16550 is not set
+-# CONFIG_GENERIC_TBSYNC is not set
+-CONFIG_AUDIT_ARCH=y
+-CONFIG_GENERIC_BUG=y
+-# CONFIG_DEFAULT_UIMAGE is not set
+-# CONFIG_PPC_DCR_NATIVE is not set
+-# CONFIG_PPC_DCR_MMIO is not set
+-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+-
+-#
+-# General setup
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-CONFIG_SYSVIPC_SYSCTL=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-# CONFIG_TASKSTATS is not set
+-# CONFIG_USER_NS is not set
+-# CONFIG_PID_NS is not set
+-# CONFIG_AUDIT is not set
+-# CONFIG_IKCONFIG is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_CGROUPS is not set
+-# CONFIG_FAIR_GROUP_SCHED is not set
+-CONFIG_SYSFS_DEPRECATED=y
+-# CONFIG_RELAY is not set
+-CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE=""
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SYSCTL=y
+-CONFIG_EMBEDDED=y
+-# CONFIG_SYSCTL_SYSCALL is not set
+-# CONFIG_KALLSYMS is not set
+-CONFIG_HOTPLUG=y
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_ELF_CORE=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-CONFIG_ANON_INODES=y
+-# CONFIG_EPOLL is not set
+-CONFIG_SIGNALFD=y
+-CONFIG_EVENTFD=y
+-CONFIG_SHMEM=y
+-CONFIG_VM_EVENT_COUNTERS=y
+-CONFIG_SLUB_DEBUG=y
+-# CONFIG_SLAB is not set
+-CONFIG_SLUB=y
+-# CONFIG_SLOB is not set
+-CONFIG_RT_MUTEXES=y
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-CONFIG_MODULES=y
+-CONFIG_MODULE_UNLOAD=y
+-# CONFIG_MODULE_FORCE_UNLOAD is not set
+-# CONFIG_MODVERSIONS is not set
+-# CONFIG_MODULE_SRCVERSION_ALL is not set
+-# CONFIG_KMOD is not set
+-CONFIG_BLOCK=y
+-# CONFIG_LBD is not set
+-# CONFIG_BLK_DEV_IO_TRACE is not set
+-# CONFIG_LSF is not set
+-# CONFIG_BLK_DEV_BSG is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-CONFIG_DEFAULT_AS=y
+-# CONFIG_DEFAULT_DEADLINE is not set
+-# CONFIG_DEFAULT_CFQ is not set
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="anticipatory"
+-
+-#
+-# Platform support
+-#
+-CONFIG_PPC_MULTIPLATFORM=y
+-# CONFIG_PPC_82xx is not set
+-# CONFIG_PPC_83xx is not set
+-# CONFIG_PPC_86xx is not set
+-CONFIG_CLASSIC32=y
+-# CONFIG_PPC_CHRP is not set
+-CONFIG_PPC_MPC52xx=y
+-CONFIG_PPC_MPC5200=y
+-CONFIG_PPC_MPC5200_BUGFIX=y
+-# CONFIG_PPC_EFIKA is not set
+-CONFIG_PPC_LITE5200=y
+-# CONFIG_PPC_PMAC is not set
+-# CONFIG_PPC_CELL is not set
+-# CONFIG_PPC_CELL_NATIVE is not set
+-# CONFIG_PQ2ADS is not set
+-# CONFIG_EMBEDDED6xx is not set
+-# CONFIG_MPIC is not set
+-# CONFIG_MPIC_WEIRD is not set
+-# CONFIG_PPC_I8259 is not set
+-# CONFIG_PPC_RTAS is not set
+-# CONFIG_MMIO_NVRAM is not set
+-# CONFIG_PPC_MPC106 is not set
+-# CONFIG_PPC_970_NAP is not set
+-# CONFIG_PPC_INDIRECT_IO is not set
+-# CONFIG_GENERIC_IOMAP is not set
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_TAU is not set
+-# CONFIG_CPM2 is not set
+-# CONFIG_FSL_ULI1575 is not set
+-CONFIG_PPC_BESTCOMM=y
+-CONFIG_PPC_BESTCOMM_ATA=y
+-CONFIG_PPC_BESTCOMM_FEC=y
+-CONFIG_PPC_BESTCOMM_GEN_BD=y
+-
+-#
+-# Kernel options
+-#
+-# CONFIG_HIGHMEM is not set
+-CONFIG_TICK_ONESHOT=y
+-CONFIG_NO_HZ=y
+-CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+-# CONFIG_HZ_100 is not set
+-CONFIG_HZ_250=y
+-# CONFIG_HZ_300 is not set
+-# CONFIG_HZ_1000 is not set
+-CONFIG_HZ=250
+-CONFIG_PREEMPT_NONE=y
+-# CONFIG_PREEMPT_VOLUNTARY is not set
+-# CONFIG_PREEMPT is not set
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+-# CONFIG_KEXEC is not set
+-CONFIG_ARCH_FLATMEM_ENABLE=y
+-CONFIG_ARCH_POPULATES_NODE_MAP=y
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-# CONFIG_RESOURCES_64BIT is not set
+-CONFIG_ZONE_DMA_FLAG=1
+-CONFIG_BOUNCE=y
+-CONFIG_VIRT_TO_BUS=y
+-CONFIG_PROC_DEVICETREE=y
+-# CONFIG_CMDLINE_BOOL is not set
+-CONFIG_PM=y
+-# CONFIG_PM_LEGACY is not set
+-# CONFIG_PM_DEBUG is not set
+-CONFIG_PM_SLEEP=y
+-CONFIG_SUSPEND_UP_POSSIBLE=y
+-CONFIG_SUSPEND=y
+-CONFIG_HIBERNATION_UP_POSSIBLE=y
+-# CONFIG_HIBERNATION is not set
+-CONFIG_SECCOMP=y
+-CONFIG_WANT_DEVICE_TREE=y
+-CONFIG_DEVICE_TREE=""
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_ZONE_DMA=y
+-CONFIG_GENERIC_ISA_DMA=y
+-# CONFIG_PPC_INDIRECT_PCI is not set
+-CONFIG_FSL_SOC=y
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-CONFIG_PCI_SYSCALL=y
+-# CONFIG_PCIEPORTBUS is not set
+-CONFIG_ARCH_SUPPORTS_MSI=y
+-# CONFIG_PCI_MSI is not set
+-CONFIG_PCI_LEGACY=y
+-# CONFIG_PCI_DEBUG is not set
+-# CONFIG_PCCARD is not set
+-# CONFIG_HOTPLUG_PCI is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0xc0000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-CONFIG_XFRM=y
+-CONFIG_XFRM_USER=m
+-# CONFIG_XFRM_SUB_POLICY is not set
+-# CONFIG_XFRM_MIGRATE is not set
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_XFRM_TUNNEL is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_XFRM_MODE_TRANSPORT=y
+-CONFIG_INET_XFRM_MODE_TUNNEL=y
+-CONFIG_INET_XFRM_MODE_BEET=y
+-# CONFIG_INET_LRO is not set
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_CUBIC=y
+-CONFIG_DEFAULT_TCP_CONG="cubic"
+-# CONFIG_TCP_MD5SIG is not set
+-# CONFIG_IPV6 is not set
+-# CONFIG_INET6_XFRM_TUNNEL is not set
+-# CONFIG_INET6_TUNNEL is not set
+-# CONFIG_NETWORK_SECMARK is not set
+-# CONFIG_NETFILTER is not set
+-# CONFIG_IP_DCCP is not set
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_TIPC is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-# CONFIG_NET_SCHED is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_AF_RXRPC is not set
+-
+-#
+-# Wireless
+-#
+-# CONFIG_CFG80211 is not set
+-# CONFIG_WIRELESS_EXT is not set
+-# CONFIG_MAC80211 is not set
+-# CONFIG_IEEE80211 is not set
+-# CONFIG_RFKILL is not set
+-# CONFIG_NET_9P is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-# CONFIG_DEBUG_DRIVER is not set
+-# CONFIG_DEBUG_DEVRES is not set
+-# CONFIG_SYS_HYPERVISOR is not set
+-# CONFIG_CONNECTOR is not set
+-# CONFIG_MTD is not set
+-CONFIG_OF_DEVICE=y
+-# CONFIG_PARPORT is not set
+-CONFIG_BLK_DEV=y
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+-# CONFIG_CDROM_PKTCDVD is not set
+-# CONFIG_ATA_OVER_ETH is not set
+-CONFIG_MISC_DEVICES=y
+-# CONFIG_PHANTOM is not set
+-# CONFIG_EEPROM_93CX6 is not set
+-# CONFIG_SGI_IOC4 is not set
+-# CONFIG_TIFM_CORE is not set
+-# CONFIG_IDE is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-CONFIG_SCSI=y
+-CONFIG_SCSI_DMA=y
+-# CONFIG_SCSI_TGT is not set
+-# CONFIG_SCSI_NETLINK is not set
+-# CONFIG_SCSI_PROC_FS is not set
+-
+-#
+-# SCSI support type (disk, tape, CD-ROM)
+-#
+-# CONFIG_BLK_DEV_SD is not set
+-# CONFIG_CHR_DEV_ST is not set
+-# CONFIG_CHR_DEV_OSST is not set
+-# CONFIG_BLK_DEV_SR is not set
+-# CONFIG_CHR_DEV_SG is not set
+-# CONFIG_CHR_DEV_SCH is not set
+-
+-#
+-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+-#
+-# CONFIG_SCSI_MULTI_LUN is not set
+-# CONFIG_SCSI_CONSTANTS is not set
+-# CONFIG_SCSI_LOGGING is not set
+-# CONFIG_SCSI_SCAN_ASYNC is not set
+-CONFIG_SCSI_WAIT_SCAN=m
+-
+-#
+-# SCSI Transports
+-#
+-# CONFIG_SCSI_SPI_ATTRS is not set
+-# CONFIG_SCSI_FC_ATTRS is not set
+-# CONFIG_SCSI_ISCSI_ATTRS is not set
+-# CONFIG_SCSI_SAS_LIBSAS is not set
+-# CONFIG_SCSI_SRP_ATTRS is not set
+-CONFIG_SCSI_LOWLEVEL=y
+-# CONFIG_ISCSI_TCP is not set
+-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+-# CONFIG_SCSI_3W_9XXX is not set
+-# CONFIG_SCSI_ACARD is not set
+-# CONFIG_SCSI_AACRAID is not set
+-# CONFIG_SCSI_AIC7XXX is not set
+-# CONFIG_SCSI_AIC7XXX_OLD is not set
+-# CONFIG_SCSI_AIC79XX is not set
+-# CONFIG_SCSI_AIC94XX is not set
+-# CONFIG_SCSI_DPT_I2O is not set
+-# CONFIG_SCSI_ADVANSYS is not set
+-# CONFIG_SCSI_ARCMSR is not set
+-# CONFIG_MEGARAID_NEWGEN is not set
+-# CONFIG_MEGARAID_LEGACY is not set
+-# CONFIG_MEGARAID_SAS is not set
+-# CONFIG_SCSI_HPTIOP is not set
+-# CONFIG_SCSI_BUSLOGIC is not set
+-# CONFIG_SCSI_DMX3191D is not set
+-# CONFIG_SCSI_EATA is not set
+-# CONFIG_SCSI_FUTURE_DOMAIN is not set
+-# CONFIG_SCSI_GDTH is not set
+-# CONFIG_SCSI_IPS is not set
+-# CONFIG_SCSI_INITIO is not set
+-# CONFIG_SCSI_INIA100 is not set
+-# CONFIG_SCSI_STEX is not set
+-# CONFIG_SCSI_SYM53C8XX_2 is not set
+-# CONFIG_SCSI_IPR is not set
+-# CONFIG_SCSI_QLOGIC_1280 is not set
+-# CONFIG_SCSI_QLA_FC is not set
+-# CONFIG_SCSI_QLA_ISCSI is not set
+-# CONFIG_SCSI_LPFC is not set
+-# CONFIG_SCSI_DC395x is not set
+-# CONFIG_SCSI_DC390T is not set
+-# CONFIG_SCSI_NSP32 is not set
+-# CONFIG_SCSI_DEBUG is not set
+-# CONFIG_SCSI_SRP is not set
+-CONFIG_ATA=y
+-# CONFIG_ATA_NONSTANDARD is not set
+-# CONFIG_SATA_AHCI is not set
+-# CONFIG_SATA_SVW is not set
+-# CONFIG_ATA_PIIX is not set
+-# CONFIG_SATA_MV is not set
+-# CONFIG_SATA_NV is not set
+-# CONFIG_PDC_ADMA is not set
+-# CONFIG_SATA_QSTOR is not set
+-# CONFIG_SATA_PROMISE is not set
+-# CONFIG_SATA_SX4 is not set
+-# CONFIG_SATA_SIL is not set
+-# CONFIG_SATA_SIL24 is not set
+-# CONFIG_SATA_SIS is not set
+-# CONFIG_SATA_ULI is not set
+-# CONFIG_SATA_VIA is not set
+-# CONFIG_SATA_VITESSE is not set
+-# CONFIG_SATA_INIC162X is not set
+-# CONFIG_PATA_ALI is not set
+-# CONFIG_PATA_AMD is not set
+-# CONFIG_PATA_ARTOP is not set
+-# CONFIG_PATA_ATIIXP is not set
+-# CONFIG_PATA_CMD640_PCI is not set
+-# CONFIG_PATA_CMD64X is not set
+-# CONFIG_PATA_CS5520 is not set
+-# CONFIG_PATA_CS5530 is not set
+-# CONFIG_PATA_CYPRESS is not set
+-# CONFIG_PATA_EFAR is not set
+-# CONFIG_ATA_GENERIC is not set
+-# CONFIG_PATA_HPT366 is not set
+-# CONFIG_PATA_HPT37X is not set
+-# CONFIG_PATA_HPT3X2N is not set
+-# CONFIG_PATA_HPT3X3 is not set
+-# CONFIG_PATA_IT821X is not set
+-# CONFIG_PATA_IT8213 is not set
+-# CONFIG_PATA_JMICRON is not set
+-# CONFIG_PATA_TRIFLEX is not set
+-# CONFIG_PATA_MARVELL is not set
+-CONFIG_PATA_MPC52xx=y
+-# CONFIG_PATA_MPIIX is not set
+-# CONFIG_PATA_OLDPIIX is not set
+-# CONFIG_PATA_NETCELL is not set
+-# CONFIG_PATA_NS87410 is not set
+-# CONFIG_PATA_NS87415 is not set
+-# CONFIG_PATA_OPTI is not set
+-# CONFIG_PATA_OPTIDMA is not set
+-# CONFIG_PATA_PDC_OLD is not set
+-# CONFIG_PATA_RADISYS is not set
+-# CONFIG_PATA_RZ1000 is not set
+-# CONFIG_PATA_SC1200 is not set
+-# CONFIG_PATA_SERVERWORKS is not set
+-# CONFIG_PATA_PDC2027X is not set
+-# CONFIG_PATA_SIL680 is not set
+-# CONFIG_PATA_SIS is not set
+-# CONFIG_PATA_VIA is not set
+-# CONFIG_PATA_WINBOND is not set
+-# CONFIG_PATA_PLATFORM is not set
+-# CONFIG_MD is not set
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_FIREWIRE is not set
+-# CONFIG_IEEE1394 is not set
+-# CONFIG_I2O is not set
+-# CONFIG_MACINTOSH_DRIVERS is not set
+-CONFIG_NETDEVICES=y
+-# CONFIG_NETDEVICES_MULTIQUEUE is not set
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_MACVLAN is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-# CONFIG_VETH is not set
+-# CONFIG_IP1000 is not set
+-# CONFIG_ARCNET is not set
+-# CONFIG_NET_ETHERNET is not set
+-CONFIG_NETDEV_1000=y
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_E1000E is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SIS190 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SKY2 is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_VIA_VELOCITY is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-# CONFIG_MV643XX_ETH is not set
+-# CONFIG_QLA3XXX is not set
+-# CONFIG_ATL1 is not set
+-CONFIG_NETDEV_10000=y
+-# CONFIG_CHELSIO_T1 is not set
+-# CONFIG_CHELSIO_T3 is not set
+-# CONFIG_IXGBE is not set
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-# CONFIG_MYRI10GE is not set
+-# CONFIG_NETXEN_NIC is not set
+-# CONFIG_NIU is not set
+-# CONFIG_MLX4_CORE is not set
+-# CONFIG_TEHUTI is not set
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN
+-#
+-# CONFIG_WLAN_PRE80211 is not set
+-# CONFIG_WLAN_80211 is not set
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_NET_FC is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-# CONFIG_ISDN is not set
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-# CONFIG_INPUT is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-# CONFIG_SERIAL_8250 is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-# CONFIG_SERIAL_UARTLITE is not set
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_SERIAL_MPC52xx=y
+-CONFIG_SERIAL_MPC52xx_CONSOLE=y
+-CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-# CONFIG_IPMI_HANDLER is not set
+-# CONFIG_HW_RANDOM is not set
+-# CONFIG_NVRAM is not set
+-# CONFIG_GEN_RTC is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-# CONFIG_RAW_DRIVER is not set
+-# CONFIG_TCG_TPM is not set
+-CONFIG_DEVPORT=y
+-# CONFIG_I2C is not set
+-
+-#
+-# SPI support
+-#
+-# CONFIG_SPI is not set
+-# CONFIG_SPI_MASTER is not set
+-# CONFIG_W1 is not set
+-# CONFIG_POWER_SUPPLY is not set
+-# CONFIG_HWMON is not set
+-# CONFIG_WATCHDOG is not set
+-
+-#
+-# Sonics Silicon Backplane
+-#
+-CONFIG_SSB_POSSIBLE=y
+-# CONFIG_SSB is not set
+-
+-#
+-# Multifunction device drivers
+-#
+-# CONFIG_MFD_SM501 is not set
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-# CONFIG_DVB_CORE is not set
+-# CONFIG_DAB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_VGASTATE is not set
+-CONFIG_VIDEO_OUTPUT_CONTROL=m
+-# CONFIG_FB is not set
+-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-
+-#
+-# Display device support
+-#
+-# CONFIG_DISPLAY_SUPPORT is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-CONFIG_USB_SUPPORT=y
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-CONFIG_USB_ARCH_HAS_EHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-# CONFIG_MMC is not set
+-# CONFIG_NEW_LEDS is not set
+-# CONFIG_INFINIBAND is not set
+-# CONFIG_EDAC is not set
+-# CONFIG_RTC_CLASS is not set
+-
+-#
+-# Userspace I/O
+-#
+-# CONFIG_UIO is not set
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-# CONFIG_EXT4DEV_FS is not set
+-CONFIG_JBD=y
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_GFS2_FS is not set
+-# CONFIG_OCFS2_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-CONFIG_INOTIFY=y
+-CONFIG_INOTIFY_USER=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_PROC_SYSCTL=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_POSIX_ACL is not set
+-# CONFIG_HUGETLB_PAGE is not set
+-# CONFIG_CONFIGFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-CONFIG_NETWORK_FILESYSTEMS=y
+-# CONFIG_NFS_FS is not set
+-# CONFIG_NFSD is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-# CONFIG_PARTITION_ADVANCED is not set
+-CONFIG_MSDOS_PARTITION=y
+-# CONFIG_NLS is not set
+-# CONFIG_DLM is not set
+-# CONFIG_UCC_SLOW is not set
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-# CONFIG_CRC_ITU_T is not set
+-# CONFIG_CRC32 is not set
+-# CONFIG_CRC7 is not set
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_PLIST=y
+-CONFIG_HAS_IOMEM=y
+-CONFIG_HAS_IOPORT=y
+-CONFIG_HAS_DMA=y
+-# CONFIG_INSTRUMENTATION is not set
+-
+-#
+-# Kernel hacking
+-#
+-CONFIG_PRINTK_TIME=y
+-CONFIG_ENABLE_WARN_DEPRECATED=y
+-CONFIG_ENABLE_MUST_CHECK=y
+-# CONFIG_MAGIC_SYSRQ is not set
+-# CONFIG_UNUSED_SYMBOLS is not set
+-# CONFIG_DEBUG_FS is not set
+-# CONFIG_HEADERS_CHECK is not set
+-CONFIG_DEBUG_KERNEL=y
+-# CONFIG_DEBUG_SHIRQ is not set
+-CONFIG_DETECT_SOFTLOCKUP=y
+-CONFIG_SCHED_DEBUG=y
+-# CONFIG_SCHEDSTATS is not set
+-# CONFIG_TIMER_STATS is not set
+-# CONFIG_SLUB_DEBUG_ON is not set
+-# CONFIG_DEBUG_RT_MUTEXES is not set
+-# CONFIG_RT_MUTEX_TESTER is not set
+-# CONFIG_DEBUG_SPINLOCK is not set
+-# CONFIG_DEBUG_MUTEXES is not set
+-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+-# CONFIG_DEBUG_KOBJECT is not set
+-# CONFIG_DEBUG_BUGVERBOSE is not set
+-CONFIG_DEBUG_INFO=y
+-# CONFIG_DEBUG_VM is not set
+-# CONFIG_DEBUG_LIST is not set
+-# CONFIG_DEBUG_SG is not set
+-CONFIG_FORCED_INLINING=y
+-# CONFIG_BOOT_PRINTK_DELAY is not set
+-# CONFIG_RCU_TORTURE_TEST is not set
+-# CONFIG_FAULT_INJECTION is not set
+-# CONFIG_SAMPLES is not set
+-# CONFIG_DEBUG_STACKOVERFLOW is not set
+-# CONFIG_DEBUG_STACK_USAGE is not set
+-# CONFIG_DEBUG_PAGEALLOC is not set
+-# CONFIG_DEBUGGER is not set
+-# CONFIG_BDI_SWITCH is not set
+-# CONFIG_BOOTX_TEXT is not set
+-# CONFIG_PPC_EARLY_DEBUG is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+-# CONFIG_CRYPTO is not set
+-CONFIG_PPC_CLOCK=y
+-CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/makalu_defconfig b/arch/powerpc/configs/makalu_defconfig
+new file mode 100644
+index 0000000..c5db026
+--- /dev/null
++++ b/arch/powerpc/configs/makalu_defconfig
+@@ -0,0 +1,812 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 11:18:32 2007
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++CONFIG_40x=y
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_4xx=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_NOT_COHERENT_CACHE=y
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_PPC_DCR_NATIVE=y
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_PPC_DCR=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++# CONFIG_FAIR_GROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++CONFIG_PPC4xx_PCI_EXPRESS=y
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_EP405 is not set
++# CONFIG_KILAUEA is not set
++CONFIG_MAKALU=y
++# CONFIG_WALNUT is not set
++# CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
++CONFIG_405EX=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++# CONFIG_MATH_EMULATION is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="kilauea.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_CONSISTENT_START=0xff100000
++CONFIG_CONSISTENT_SIZE=0x00200000
++CONFIG_BOOT_LOAD=0x00400000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=m
++CONFIG_MTD_BLOCK=m
++# CONFIG_MTD_BLOCK_RO is not set
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++CONFIG_MTD_JEDECPROBE=y
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++# CONFIG_BLK_DEV_LOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=35000
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_XILINX_SYSACE is not set
++# CONFIG_MISC_DEVICES is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++CONFIG_IBM_NEW_EMAC=y
++CONFIG_IBM_NEW_EMAC_RXB=256
++CONFIG_IBM_NEW_EMAC_TXB=256
++CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
++CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
++CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
++# CONFIG_IBM_NEW_EMAC_DEBUG is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++CONFIG_IBM_NEW_EMAC_RGMII=y
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++CONFIG_IBM_NEW_EMAC_EMAC4=y
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++# CONFIG_NETDEV_1000 is not set
++# CONFIG_NETDEV_10000 is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++# CONFIG_SERIAL_8250_MANY_PORTS is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_SERIAL_OF_PLATFORM=y
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++# CONFIG_DAB is not set
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++# CONFIG_USB_SUPPORT is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_JFFS2_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
+new file mode 100644
+index 0000000..740c9f2
+--- /dev/null
++++ b/arch/powerpc/configs/mpc5200_defconfig
+@@ -0,0 +1,1286 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Fri Jan 18 14:19:54 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++CONFIG_6xx=y
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_PPC_FPU=y
++# CONFIG_ALTIVEC is not set
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_32=y
++# CONFIG_PPC_MM_SLICES is not set
++# CONFIG_SMP is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++# CONFIG_PPC_UDBG_16550 is not set
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++# CONFIG_SYSCTL_SYSCALL is not set
++# CONFIG_KALLSYMS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++CONFIG_PPC_MULTIPLATFORM=y
++# CONFIG_PPC_82xx is not set
++# CONFIG_PPC_83xx is not set
++# CONFIG_PPC_86xx is not set
++CONFIG_CLASSIC32=y
++# CONFIG_PPC_CHRP is not set
++CONFIG_PPC_MPC52xx=y
++CONFIG_PPC_MPC5200=y
++CONFIG_PPC_MPC5200_BUGFIX=y
++CONFIG_PPC_MPC5200_SIMPLE=y
++CONFIG_PPC_EFIKA=y
++CONFIG_PPC_LITE5200=y
++# CONFIG_PPC_PMAC is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_EMBEDDED6xx is not set
++CONFIG_PPC_NATIVE=y
++# CONFIG_UDBG_RTAS_CONSOLE is not set
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++CONFIG_PPC_RTAS=y
++# CONFIG_RTAS_ERROR_LOGGING is not set
++CONFIG_RTAS_PROC=y
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_TAU is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_PPC_BESTCOMM=y
++CONFIG_PPC_BESTCOMM_ATA=y
++CONFIG_PPC_BESTCOMM_FEC=y
++CONFIG_PPC_BESTCOMM_GEN_BD=y
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_KEXEC is not set
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++CONFIG_PM=y
++# CONFIG_PM_LEGACY is not set
++# CONFIG_PM_DEBUG is not set
++CONFIG_PM_SLEEP=y
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_SUSPEND=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++# CONFIG_HIBERNATION is not set
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_GENERIC_ISA_DMA=y
++# CONFIG_PPC_INDIRECT_PCI is not set
++CONFIG_FSL_SOC=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=y
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++CONFIG_MTD_RAM=y
++CONFIG_MTD_ROM=y
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++CONFIG_SCSI_TGT=y
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_AIC94XX is not set
++# CONFIG_SCSI_DPT_I2O is not set
++# CONFIG_SCSI_ADVANSYS is not set
++# CONFIG_SCSI_ARCMSR is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_STEX is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_QLA_ISCSI is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_SRP is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++# CONFIG_SATA_AHCI is not set
++# CONFIG_SATA_SVW is not set
++# CONFIG_ATA_PIIX is not set
++# CONFIG_SATA_MV is not set
++# CONFIG_SATA_NV is not set
++# CONFIG_PDC_ADMA is not set
++# CONFIG_SATA_QSTOR is not set
++# CONFIG_SATA_PROMISE is not set
++# CONFIG_SATA_SX4 is not set
++# CONFIG_SATA_SIL is not set
++# CONFIG_SATA_SIL24 is not set
++# CONFIG_SATA_SIS is not set
++# CONFIG_SATA_ULI is not set
++# CONFIG_SATA_VIA is not set
++# CONFIG_SATA_VITESSE is not set
++# CONFIG_SATA_INIC162X is not set
++# CONFIG_PATA_ALI is not set
++# CONFIG_PATA_AMD is not set
++# CONFIG_PATA_ARTOP is not set
++# CONFIG_PATA_ATIIXP is not set
++# CONFIG_PATA_CMD640_PCI is not set
++# CONFIG_PATA_CMD64X is not set
++# CONFIG_PATA_CS5520 is not set
++# CONFIG_PATA_CS5530 is not set
++# CONFIG_PATA_CYPRESS is not set
++# CONFIG_PATA_EFAR is not set
++# CONFIG_ATA_GENERIC is not set
++# CONFIG_PATA_HPT366 is not set
++# CONFIG_PATA_HPT37X is not set
++# CONFIG_PATA_HPT3X2N is not set
++# CONFIG_PATA_HPT3X3 is not set
++# CONFIG_PATA_IT821X is not set
++# CONFIG_PATA_IT8213 is not set
++# CONFIG_PATA_JMICRON is not set
++# CONFIG_PATA_TRIFLEX is not set
++# CONFIG_PATA_MARVELL is not set
++CONFIG_PATA_MPC52xx=y
++# CONFIG_PATA_MPIIX is not set
++# CONFIG_PATA_OLDPIIX is not set
++# CONFIG_PATA_NETCELL is not set
++# CONFIG_PATA_NS87410 is not set
++# CONFIG_PATA_NS87415 is not set
++# CONFIG_PATA_OPTI is not set
++# CONFIG_PATA_OPTIDMA is not set
++# CONFIG_PATA_PDC_OLD is not set
++# CONFIG_PATA_RADISYS is not set
++# CONFIG_PATA_RZ1000 is not set
++# CONFIG_PATA_SC1200 is not set
++# CONFIG_PATA_SERVERWORKS is not set
++# CONFIG_PATA_PDC2027X is not set
++# CONFIG_PATA_SIL680 is not set
++# CONFIG_PATA_SIS is not set
++# CONFIG_PATA_VIA is not set
++# CONFIG_PATA_WINBOND is not set
++CONFIG_PATA_PLATFORM=y
++# CONFIG_PATA_OF_PLATFORM is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++CONFIG_FEC_MPC52xx=y
++CONFIG_FEC_MPC52xx_MDIO=y
++# CONFIG_NETDEV_1000 is not set
++# CONFIG_NETDEV_10000 is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_MPC52xx=y
++CONFIG_SERIAL_MPC52xx_CONSOLE=y
++CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_HVC_RTAS is not set
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_TINY_USB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++# CONFIG_MPC5200_WDT is not set
++# CONFIG_WATCHDOG_RTAS is not set
++
++#
++# PCI-based Watchdog Cards
++#
++# CONFIG_PCIPCWATCHDOG is not set
++# CONFIG_WDTPCI is not set
++
++#
++# USB-based Watchdog Cards
++#
++# CONFIG_USBPCWATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++# CONFIG_USB_DEVICE_CLASS is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_PERSIST is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_EHCI_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PPC_SOC=y
++CONFIG_USB_OHCI_HCD_PPC_OF=y
++CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
++# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set
++CONFIG_USB_OHCI_HCD_PCI=y
++CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
++CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++# CONFIG_USB_UHCI_HCD is not set
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_BERRY_CHARGE is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGET is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=y
++# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_BIND34 is not set
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++CONFIG_PRINTK_TIME=y
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_BOOTX_TEXT is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++# CONFIG_CRYPTO_ECB is not set
++CONFIG_CRYPTO_CBC=y
++# CONFIG_CRYPTO_PCBC is not set
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++CONFIG_PPC_CLOCK=y
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/mpc8313_rdb_defconfig b/arch/powerpc/configs/mpc8313_rdb_defconfig
+index c9af905..3b29ac5 100644
+--- a/arch/powerpc/configs/mpc8313_rdb_defconfig
++++ b/arch/powerpc/configs/mpc8313_rdb_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:48:31 2007
++# Linux kernel version: 2.6.24-rc6
++# Thu Jan 17 16:35:55 2008
+ #
+ # CONFIG_PPC64 is not set
+ 
+@@ -138,12 +138,13 @@ CONFIG_PPC_83xx=y
+ # CONFIG_PPC_CELL is not set
+ # CONFIG_PPC_CELL_NATIVE is not set
+ # CONFIG_PQ2ADS is not set
+-CONFIG_MPC8313_RDB=y
++CONFIG_MPC831x_RDB=y
+ # CONFIG_MPC832x_MDS is not set
+ # CONFIG_MPC832x_RDB is not set
+ # CONFIG_MPC834x_MDS is not set
+ # CONFIG_MPC834x_ITX is not set
+ # CONFIG_MPC836x_MDS is not set
++# CONFIG_MPC837x_MDS is not set
+ CONFIG_PPC_MPC831x=y
+ # CONFIG_MPIC is not set
+ # CONFIG_MPIC_WEIRD is not set
+@@ -336,15 +337,16 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
+ CONFIG_MTD=y
+ # CONFIG_MTD_DEBUG is not set
+ # CONFIG_MTD_CONCAT is not set
+-# CONFIG_MTD_PARTITIONS is not set
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
+ 
+ #
+ # User Modules And Translation Layers
+ #
+ CONFIG_MTD_CHAR=y
+-# CONFIG_MTD_BLKDEVS is not set
+-# CONFIG_MTD_BLOCK is not set
+-# CONFIG_MTD_BLOCK_RO is not set
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
+ # CONFIG_FTL is not set
+ # CONFIG_NFTL is not set
+ # CONFIG_INFTL is not set
+@@ -381,11 +383,8 @@ CONFIG_MTD_CFI_UTIL=y
+ # Mapping drivers for chip access
+ #
+ # CONFIG_MTD_COMPLEX_MAPPINGS is not set
+-CONFIG_MTD_PHYSMAP=y
+-CONFIG_MTD_PHYSMAP_START=0xfe000000
+-CONFIG_MTD_PHYSMAP_LEN=0x1000000
+-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+-# CONFIG_MTD_PHYSMAP_OF is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
+ # CONFIG_MTD_INTEL_VR_NOR is not set
+ # CONFIG_MTD_PLATRAM is not set
+ 
+@@ -406,7 +405,16 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+ # CONFIG_MTD_DOC2000 is not set
+ # CONFIG_MTD_DOC2001 is not set
+ # CONFIG_MTD_DOC2001PLUS is not set
+-# CONFIG_MTD_NAND is not set
++CONFIG_MTD_NAND=y
++CONFIG_MTD_NAND_VERIFY_WRITE=y
++# CONFIG_MTD_NAND_ECC_SMC is not set
++# CONFIG_MTD_NAND_MUSEUM_IDS is not set
++CONFIG_MTD_NAND_IDS=y
++# CONFIG_MTD_NAND_DISKONCHIP is not set
++# CONFIG_MTD_NAND_CAFE is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++# CONFIG_MTD_NAND_PLATFORM is not set
++# CONFIG_MTD_ALAUDA is not set
+ # CONFIG_MTD_ONENAND is not set
+ 
+ #
+@@ -1178,7 +1186,17 @@ CONFIG_TMPFS=y
+ # CONFIG_BEFS_FS is not set
+ # CONFIG_BFS_FS is not set
+ # CONFIG_EFS_FS is not set
+-# CONFIG_JFFS2_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
+ # CONFIG_CRAMFS is not set
+ # CONFIG_VXFS_FS is not set
+ # CONFIG_HPFS_FS is not set
+@@ -1242,6 +1260,8 @@ CONFIG_BITREVERSE=y
+ CONFIG_CRC32=y
+ # CONFIG_CRC7 is not set
+ # CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
+ CONFIG_PLIST=y
+ CONFIG_HAS_IOMEM=y
+ CONFIG_HAS_IOPORT=y
+diff --git a/arch/powerpc/configs/mpc8315_rdb_defconfig b/arch/powerpc/configs/mpc8315_rdb_defconfig
+new file mode 100644
+index 0000000..9adf7f9
+--- /dev/null
++++ b/arch/powerpc/configs/mpc8315_rdb_defconfig
+@@ -0,0 +1,1417 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Wed Jan 23 20:02:25 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++CONFIG_6xx=y
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_83xx=y
++CONFIG_PPC_FPU=y
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_32=y
++# CONFIG_PPC_MM_SLICES is not set
++# CONFIG_SMP is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++# CONFIG_FAIR_GROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++# CONFIG_KALLSYMS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MULTIPLATFORM is not set
++# CONFIG_PPC_82xx is not set
++CONFIG_PPC_83xx=y
++# CONFIG_PPC_86xx is not set
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++CONFIG_MPC831x_RDB=y
++# CONFIG_MPC832x_MDS is not set
++# CONFIG_MPC832x_RDB is not set
++# CONFIG_MPC834x_MDS is not set
++# CONFIG_MPC834x_ITX is not set
++# CONFIG_MPC836x_MDS is not set
++# CONFIG_MPC837x_MDS is not set
++CONFIG_PPC_MPC831x=y
++CONFIG_IPIC=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_DATAFLASH is not set
++# CONFIG_MTD_M25P80 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++CONFIG_MTD_NAND=y
++CONFIG_MTD_NAND_VERIFY_WRITE=y
++# CONFIG_MTD_NAND_ECC_SMC is not set
++# CONFIG_MTD_NAND_MUSEUM_IDS is not set
++CONFIG_MTD_NAND_IDS=y
++# CONFIG_MTD_NAND_DISKONCHIP is not set
++# CONFIG_MTD_NAND_CAFE is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++# CONFIG_MTD_NAND_PLATFORM is not set
++# CONFIG_MTD_ALAUDA is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++# CONFIG_BLK_DEV_SD is not set
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_AIC94XX is not set
++# CONFIG_SCSI_DPT_I2O is not set
++# CONFIG_SCSI_ADVANSYS is not set
++# CONFIG_SCSI_ARCMSR is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_STEX is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_QLA_ISCSI is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_SRP is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++# CONFIG_SATA_AHCI is not set
++# CONFIG_SATA_SVW is not set
++# CONFIG_ATA_PIIX is not set
++# CONFIG_SATA_MV is not set
++# CONFIG_SATA_NV is not set
++# CONFIG_PDC_ADMA is not set
++# CONFIG_SATA_QSTOR is not set
++# CONFIG_SATA_PROMISE is not set
++# CONFIG_SATA_SX4 is not set
++# CONFIG_SATA_SIL is not set
++# CONFIG_SATA_SIL24 is not set
++# CONFIG_SATA_SIS is not set
++# CONFIG_SATA_ULI is not set
++# CONFIG_SATA_VIA is not set
++# CONFIG_SATA_VITESSE is not set
++# CONFIG_SATA_INIC162X is not set
++CONFIG_SATA_FSL=y
++# CONFIG_PATA_ALI is not set
++# CONFIG_PATA_AMD is not set
++# CONFIG_PATA_ARTOP is not set
++# CONFIG_PATA_ATIIXP is not set
++# CONFIG_PATA_CMD640_PCI is not set
++# CONFIG_PATA_CMD64X is not set
++# CONFIG_PATA_CS5520 is not set
++# CONFIG_PATA_CS5530 is not set
++# CONFIG_PATA_CYPRESS is not set
++# CONFIG_PATA_EFAR is not set
++# CONFIG_ATA_GENERIC is not set
++# CONFIG_PATA_HPT366 is not set
++# CONFIG_PATA_HPT37X is not set
++# CONFIG_PATA_HPT3X2N is not set
++# CONFIG_PATA_HPT3X3 is not set
++# CONFIG_PATA_IT821X is not set
++# CONFIG_PATA_IT8213 is not set
++# CONFIG_PATA_JMICRON is not set
++# CONFIG_PATA_TRIFLEX is not set
++# CONFIG_PATA_MARVELL is not set
++# CONFIG_PATA_MPIIX is not set
++# CONFIG_PATA_OLDPIIX is not set
++# CONFIG_PATA_NETCELL is not set
++# CONFIG_PATA_NS87410 is not set
++# CONFIG_PATA_NS87415 is not set
++# CONFIG_PATA_OPTI is not set
++# CONFIG_PATA_OPTIDMA is not set
++# CONFIG_PATA_PDC_OLD is not set
++# CONFIG_PATA_RADISYS is not set
++# CONFIG_PATA_RZ1000 is not set
++# CONFIG_PATA_SC1200 is not set
++# CONFIG_PATA_SERVERWORKS is not set
++# CONFIG_PATA_PDC2027X is not set
++# CONFIG_PATA_SIL680 is not set
++# CONFIG_PATA_SIS is not set
++# CONFIG_PATA_VIA is not set
++# CONFIG_PATA_WINBOND is not set
++# CONFIG_PATA_PLATFORM is not set
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=y
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++# CONFIG_MD_RAID456 is not set
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++# CONFIG_BLK_DEV_DM is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_TLAN is not set
++# CONFIG_VIA_RHINE is not set
++# CONFIG_SC92031 is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IP1000 is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_TINY_USB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++CONFIG_SPI_BITBANG=y
++CONFIG_SPI_MPC83xx=y
++
++#
++# SPI Protocol Masters
++#
++# CONFIG_SPI_AT25 is not set
++# CONFIG_SPI_SPIDEV is not set
++# CONFIG_SPI_TLE62X0 is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM70 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++CONFIG_83xx_WDT=y
++
++#
++# PCI-based Watchdog Cards
++#
++# CONFIG_PCIPCWATCHDOG is not set
++# CONFIG_WDTPCI is not set
++
++#
++# USB-based Watchdog Cards
++#
++# CONFIG_USBPCWATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++
++#
++# USB Input Devices
++#
++# CONFIG_USB_HID is not set
++
++#
++# USB HID Boot Protocol drivers
++#
++# CONFIG_USB_KBD is not set
++# CONFIG_USB_MOUSE is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_DEVICE_CLASS=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++CONFIG_USB_EHCI_FSL=y
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PPC_OF=y
++CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
++# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set
++CONFIG_USB_OHCI_HCD_PCI=y
++CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
++CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_BERRY_CHARGE is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGET is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++CONFIG_USB_GADGET=y
++# CONFIG_USB_GADGET_DEBUG is not set
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++CONFIG_USB_GADGET_SELECTED=y
++# CONFIG_USB_GADGET_AMD5536UDC is not set
++# CONFIG_USB_GADGET_ATMEL_USBA is not set
++# CONFIG_USB_GADGET_FSL_USB2 is not set
++CONFIG_USB_GADGET_NET2280=y
++CONFIG_USB_NET2280=y
++# CONFIG_USB_GADGET_PXA2XX is not set
++# CONFIG_USB_GADGET_M66592 is not set
++# CONFIG_USB_GADGET_GOKU is not set
++# CONFIG_USB_GADGET_LH7A40X is not set
++# CONFIG_USB_GADGET_OMAP is not set
++# CONFIG_USB_GADGET_S3C2410 is not set
++# CONFIG_USB_GADGET_AT91 is not set
++# CONFIG_USB_GADGET_DUMMY_HCD is not set
++CONFIG_USB_GADGET_DUALSPEED=y
++# CONFIG_USB_ZERO is not set
++CONFIG_USB_ETH=y
++CONFIG_USB_ETH_RNDIS=y
++# CONFIG_USB_GADGETFS is not set
++# CONFIG_USB_FILE_STORAGE is not set
++# CONFIG_USB_G_SERIAL is not set
++# CONFIG_USB_MIDI_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++CONFIG_RTC_DRV_DS1307=y
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++
++#
++# SPI RTC drivers
++#
++# CONFIG_RTC_DRV_RS5C348 is not set
++# CONFIG_RTC_DRV_MAX6902 is not set
++
++#
++# Platform RTC drivers
++#
++# CONFIG_RTC_DRV_CMOS is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# on-CPU RTC drivers
++#
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_BIND34 is not set
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++# CONFIG_CRYPTO_ECB is not set
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=m
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/mpc834x_itx_defconfig b/arch/powerpc/configs/mpc834x_itx_defconfig
+index 6feb86e..2fbe4e5 100644
+--- a/arch/powerpc/configs/mpc834x_itx_defconfig
++++ b/arch/powerpc/configs/mpc834x_itx_defconfig
+@@ -570,7 +570,8 @@ CONFIG_SATA_SIL=y
+ # CONFIG_PATA_SIS is not set
+ # CONFIG_PATA_VIA is not set
+ # CONFIG_PATA_WINBOND is not set
+-# CONFIG_PATA_PLATFORM is not set
++CONFIG_PATA_PLATFORM=y
++CONFIG_PATA_OF_PLATFORM=y
+ CONFIG_MD=y
+ CONFIG_BLK_DEV_MD=y
+ CONFIG_MD_LINEAR=y
+diff --git a/arch/powerpc/configs/mpc837x_mds_defconfig b/arch/powerpc/configs/mpc837x_mds_defconfig
+new file mode 100644
+index 0000000..4f49aee
+--- /dev/null
++++ b/arch/powerpc/configs/mpc837x_mds_defconfig
+@@ -0,0 +1,878 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.23
++# Wed Oct 10 16:31:39 2007
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++CONFIG_6xx=y
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_83xx=y
++CONFIG_PPC_FPU=y
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_32=y
++# CONFIG_PPC_MM_SLICES is not set
++# CONFIG_SMP is not set
++CONFIG_PPC32=y
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MULTIPLATFORM is not set
++# CONFIG_EMBEDDED6xx is not set
++# CONFIG_PPC_82xx is not set
++CONFIG_PPC_83xx=y
++# CONFIG_PPC_86xx is not set
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8313_RDB is not set
++# CONFIG_MPC832x_MDS is not set
++# CONFIG_MPC832x_RDB is not set
++# CONFIG_MPC834x_MDS is not set
++# CONFIG_MPC834x_ITX is not set
++# CONFIG_MPC836x_MDS is not set
++CONFIG_MPC837x_MDS=y
++CONFIG_PPC_MPC837x=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_FSL_SERDES=y
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0x80000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_SCSI_DEBUG is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_SATA_FSL=y
++# CONFIG_PATA_PLATFORM is not set
++# CONFIG_MD is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++CONFIG_MARVELL_PHY=y
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++CONFIG_NETDEV_1000=y
++CONFIG_GIANFAR=y
++# CONFIG_GFAR_NAPI is not set
++CONFIG_NETDEV_10000=y
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++CONFIG_83xx_WDT=y
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_ABITUGURU is not set
++# CONFIG_SENSORS_ABITUGURU3 is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_FB_IBM_GXT4500 is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++# CONFIG_USB_ARCH_HAS_OHCI is not set
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_BIND34 is not set
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++
++#
++# Native Language Support
++#
++# CONFIG_NLS is not set
++
++#
++# Distributed Lock Manager
++#
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=m
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=m
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++CONFIG_CRYPTO_HW=y
+diff --git a/arch/powerpc/configs/mpc837x_rdb_defconfig b/arch/powerpc/configs/mpc837x_rdb_defconfig
+new file mode 100644
+index 0000000..91d291e
+--- /dev/null
++++ b/arch/powerpc/configs/mpc837x_rdb_defconfig
+@@ -0,0 +1,887 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Thu Jan 24 20:04:39 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++CONFIG_6xx=y
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_83xx=y
++CONFIG_PPC_FPU=y
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_32=y
++# CONFIG_PPC_MM_SLICES is not set
++# CONFIG_SMP is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MULTIPLATFORM is not set
++# CONFIG_PPC_82xx is not set
++CONFIG_PPC_83xx=y
++# CONFIG_PPC_86xx is not set
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC831x_RDB is not set
++# CONFIG_MPC832x_MDS is not set
++# CONFIG_MPC832x_RDB is not set
++# CONFIG_MPC834x_MDS is not set
++# CONFIG_MPC834x_ITX is not set
++# CONFIG_MPC836x_MDS is not set
++# CONFIG_MPC837x_MDS is not set
++CONFIG_MPC837x_RDB=y
++CONFIG_PPC_MPC837x=y
++CONFIG_IPIC=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_SCSI_DEBUG is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_SATA_FSL=y
++# CONFIG_PATA_PLATFORM is not set
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++# CONFIG_MD_LINEAR is not set
++# CONFIG_MD_RAID0 is not set
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++CONFIG_MD_RAID456=y
++CONFIG_MD_RAID5_RESHAPE=y
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++# CONFIG_BLK_DEV_DM is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++CONFIG_MARVELL_PHY=y
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_NETDEV_10000 is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++CONFIG_83xx_WDT=y
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++# CONFIG_USB_ARCH_HAS_OHCI is not set
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_BIND34 is not set
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++# CONFIG_ENABLE_MUST_CHECK is not set
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_XOR_BLOCKS=y
++CONFIG_ASYNC_CORE=y
++CONFIG_ASYNC_MEMCPY=y
++CONFIG_ASYNC_XOR=y
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=m
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=m
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
+new file mode 100644
+index 0000000..31bdbf3
+--- /dev/null
++++ b/arch/powerpc/configs/mpc83xx_defconfig
+@@ -0,0 +1,887 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Mon Jan 28 13:14:19 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++CONFIG_6xx=y
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_PPC_FPU=y
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_32=y
++# CONFIG_PPC_MM_SLICES is not set
++# CONFIG_SMP is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MULTIPLATFORM is not set
++# CONFIG_PPC_82xx is not set
++CONFIG_PPC_83xx=y
++# CONFIG_PPC_86xx is not set
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++CONFIG_MPC83xx=y
++CONFIG_MPC831x_RDB=y
++CONFIG_MPC832x_MDS=y
++CONFIG_MPC832x_RDB=y
++CONFIG_MPC834x_MDS=y
++CONFIG_MPC834x_ITX=y
++CONFIG_MPC836x_MDS=y
++CONFIG_MPC837x_MDS=y
++CONFIG_MPC837x_RDB=y
++CONFIG_SBC834x=y
++CONFIG_PPC_MPC831x=y
++CONFIG_PPC_MPC832x=y
++CONFIG_PPC_MPC834x=y
++CONFIG_PPC_MPC837x=y
++CONFIG_IPIC=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++CONFIG_QUICC_ENGINE=y
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++# CONFIG_MATH_EMULATION is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_SCSI_DEBUG is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_SATA_FSL=y
++# CONFIG_PATA_PLATFORM is not set
++# CONFIG_MD is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++CONFIG_MARVELL_PHY=y
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++CONFIG_GIANFAR=y
++# CONFIG_GFAR_NAPI is not set
++# CONFIG_UCC_GETH is not set
++CONFIG_NETDEV_10000=y
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_OF_PLATFORM is not set
++# CONFIG_SERIAL_QE is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++CONFIG_83xx_WDT=y
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++# CONFIG_USB_ARCH_HAS_OHCI is not set
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_FSL=y
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_BIND34 is not set
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=m
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=m
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
+new file mode 100644
+index 0000000..90e38ba
+--- /dev/null
++++ b/arch/powerpc/configs/mpc85xx_defconfig
+@@ -0,0 +1,1523 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Mon Jan 28 13:12:07 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++CONFIG_PPC_85xx=y
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_E500=y
++CONFIG_BOOKE=y
++CONFIG_FSL_BOOKE=y
++# CONFIG_PHYS_64BIT is not set
++CONFIG_SPE=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++CONFIG_AUDIT=y
++# CONFIG_AUDITSYSCALL is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++# CONFIG_FAIR_GROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_AS is not set
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++CONFIG_MPC85xx=y
++CONFIG_MPC8540_ADS=y
++CONFIG_MPC8560_ADS=y
++CONFIG_MPC85xx_CDS=y
++CONFIG_MPC85xx_MDS=y
++CONFIG_MPC85xx_DS=y
++# CONFIG_STX_GP3 is not set
++CONFIG_TQM8540=y
++CONFIG_TQM8541=y
++CONFIG_TQM8555=y
++CONFIG_TQM8560=y
++CONFIG_SBC8548=y
++# CONFIG_SBC8560 is not set
++CONFIG_TQM85xx=y
++# CONFIG_IPIC is not set
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++CONFIG_PPC_I8259=y
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++CONFIG_QUICC_ENGINE=y
++CONFIG_CPM2=y
++CONFIG_PPC_CPM_NEW_BINDING=y
++CONFIG_FSL_ULI1575=y
++CONFIG_CPM=y
++
++#
++# Kernel options
++#
++CONFIG_HIGHMEM=y
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=m
++CONFIG_MATH_EMULATION=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_FSL_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=y
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++CONFIG_NET_KEY=m
++# CONFIG_NET_KEY_MIGRATE is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_ASK_IP_FIB_HASH=y
++# CONFIG_IP_FIB_TRIE is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++CONFIG_NET_IPIP=y
++CONFIG_NET_IPGRE=y
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_ARPD=y
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++CONFIG_INET_TUNNEL=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++CONFIG_IPV6=y
++# CONFIG_IPV6_PRIVACY is not set
++# CONFIG_IPV6_ROUTER_PREF is not set
++# CONFIG_IPV6_OPTIMISTIC_DAD is not set
++# CONFIG_INET6_AH is not set
++# CONFIG_INET6_ESP is not set
++# CONFIG_INET6_IPCOMP is not set
++# CONFIG_IPV6_MIP6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++CONFIG_INET6_XFRM_MODE_TRANSPORT=y
++CONFIG_INET6_XFRM_MODE_TUNNEL=y
++CONFIG_INET6_XFRM_MODE_BEET=y
++# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
++CONFIG_IPV6_SIT=y
++# CONFIG_IPV6_TUNNEL is not set
++# CONFIG_IPV6_MULTIPLE_TABLES is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++CONFIG_FIB_RULES=y
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=y
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=131072
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++# CONFIG_SCSI_CONSTANTS is not set
++CONFIG_SCSI_LOGGING=y
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_AIC94XX is not set
++# CONFIG_SCSI_DPT_I2O is not set
++# CONFIG_SCSI_ADVANSYS is not set
++# CONFIG_SCSI_ARCMSR is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_STEX is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_QLA_ISCSI is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_SRP is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_SATA_AHCI=y
++# CONFIG_SATA_SVW is not set
++# CONFIG_ATA_PIIX is not set
++# CONFIG_SATA_MV is not set
++# CONFIG_SATA_NV is not set
++# CONFIG_PDC_ADMA is not set
++# CONFIG_SATA_QSTOR is not set
++# CONFIG_SATA_PROMISE is not set
++# CONFIG_SATA_SX4 is not set
++# CONFIG_SATA_SIL is not set
++# CONFIG_SATA_SIL24 is not set
++# CONFIG_SATA_SIS is not set
++# CONFIG_SATA_ULI is not set
++# CONFIG_SATA_VIA is not set
++# CONFIG_SATA_VITESSE is not set
++# CONFIG_SATA_INIC162X is not set
++# CONFIG_SATA_FSL is not set
++CONFIG_PATA_ALI=y
++# CONFIG_PATA_AMD is not set
++# CONFIG_PATA_ARTOP is not set
++# CONFIG_PATA_ATIIXP is not set
++# CONFIG_PATA_CMD640_PCI is not set
++# CONFIG_PATA_CMD64X is not set
++# CONFIG_PATA_CS5520 is not set
++# CONFIG_PATA_CS5530 is not set
++# CONFIG_PATA_CYPRESS is not set
++# CONFIG_PATA_EFAR is not set
++# CONFIG_ATA_GENERIC is not set
++# CONFIG_PATA_HPT366 is not set
++# CONFIG_PATA_HPT37X is not set
++# CONFIG_PATA_HPT3X2N is not set
++# CONFIG_PATA_HPT3X3 is not set
++# CONFIG_PATA_IT821X is not set
++# CONFIG_PATA_IT8213 is not set
++# CONFIG_PATA_JMICRON is not set
++# CONFIG_PATA_TRIFLEX is not set
++# CONFIG_PATA_MARVELL is not set
++# CONFIG_PATA_MPIIX is not set
++# CONFIG_PATA_OLDPIIX is not set
++# CONFIG_PATA_NETCELL is not set
++# CONFIG_PATA_NS87410 is not set
++# CONFIG_PATA_NS87415 is not set
++# CONFIG_PATA_OPTI is not set
++# CONFIG_PATA_OPTIDMA is not set
++# CONFIG_PATA_PDC_OLD is not set
++# CONFIG_PATA_RADISYS is not set
++# CONFIG_PATA_RZ1000 is not set
++# CONFIG_PATA_SC1200 is not set
++# CONFIG_PATA_SERVERWORKS is not set
++# CONFIG_PATA_PDC2027X is not set
++# CONFIG_PATA_SIL680 is not set
++# CONFIG_PATA_SIS is not set
++# CONFIG_PATA_VIA is not set
++# CONFIG_PATA_WINBOND is not set
++# CONFIG_PATA_PLATFORM is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++CONFIG_VITESSE_PHY=y
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++# CONFIG_FS_ENET is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IP1000 is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_UCC_GETH is not set
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=2
++CONFIG_SERIAL_8250_RUNTIME_UARTS=2
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++CONFIG_SERIAL_8250_DETECT_IRQ=y
++CONFIG_SERIAL_8250_RSA=y
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_CPM is not set
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_OF_PLATFORM is not set
++# CONFIG_SERIAL_QE is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_NVRAM=y
++CONFIG_GEN_RTC=y
++CONFIG_GEN_RTC_X=y
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++# CONFIG_I2C_CHARDEV is not set
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_TINY_USB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++CONFIG_SENSORS_EEPROM=y
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++CONFIG_DVB_CORE=m
++# CONFIG_DVB_CORE_ATTACH is not set
++CONFIG_DVB_CAPTURE_DRIVERS=y
++
++#
++# Supported SAA7146 based PCI Adapters
++#
++
++#
++# Supported USB Adapters
++#
++# CONFIG_DVB_USB is not set
++# CONFIG_DVB_TTUSB_BUDGET is not set
++# CONFIG_DVB_TTUSB_DEC is not set
++# CONFIG_DVB_CINERGYT2 is not set
++
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++# CONFIG_DVB_B2C2_FLEXCOP is not set
++
++#
++# Supported BT878 Adapters
++#
++
++#
++# Supported Pluto2 Adapters
++#
++# CONFIG_DVB_PLUTO2 is not set
++
++#
++# Supported DVB Frontends
++#
++
++#
++# Customise DVB Frontends
++#
++# CONFIG_DVB_FE_CUSTOMISE is not set
++
++#
++# DVB-S (satellite) frontends
++#
++# CONFIG_DVB_STV0299 is not set
++# CONFIG_DVB_CX24110 is not set
++# CONFIG_DVB_CX24123 is not set
++# CONFIG_DVB_TDA8083 is not set
++# CONFIG_DVB_MT312 is not set
++# CONFIG_DVB_VES1X93 is not set
++# CONFIG_DVB_S5H1420 is not set
++# CONFIG_DVB_TDA10086 is not set
++
++#
++# DVB-T (terrestrial) frontends
++#
++# CONFIG_DVB_SP8870 is not set
++# CONFIG_DVB_SP887X is not set
++# CONFIG_DVB_CX22700 is not set
++# CONFIG_DVB_CX22702 is not set
++# CONFIG_DVB_L64781 is not set
++# CONFIG_DVB_TDA1004X is not set
++# CONFIG_DVB_NXT6000 is not set
++# CONFIG_DVB_MT352 is not set
++# CONFIG_DVB_ZL10353 is not set
++# CONFIG_DVB_DIB3000MB is not set
++# CONFIG_DVB_DIB3000MC is not set
++# CONFIG_DVB_DIB7000M is not set
++# CONFIG_DVB_DIB7000P is not set
++
++#
++# DVB-C (cable) frontends
++#
++# CONFIG_DVB_VES1820 is not set
++# CONFIG_DVB_TDA10021 is not set
++# CONFIG_DVB_TDA10023 is not set
++# CONFIG_DVB_STV0297 is not set
++
++#
++# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++#
++# CONFIG_DVB_NXT200X is not set
++# CONFIG_DVB_OR51211 is not set
++# CONFIG_DVB_OR51132 is not set
++# CONFIG_DVB_BCM3510 is not set
++# CONFIG_DVB_LGDT330X is not set
++# CONFIG_DVB_S5H1409 is not set
++
++#
++# Tuners/PLL support
++#
++# CONFIG_DVB_PLL is not set
++# CONFIG_DVB_TDA826X is not set
++# CONFIG_DVB_TDA827X is not set
++# CONFIG_DVB_TUNER_QT1010 is not set
++# CONFIG_DVB_TUNER_MT2060 is not set
++# CONFIG_DVB_TUNER_MT2266 is not set
++# CONFIG_DVB_TUNER_MT2131 is not set
++# CONFIG_DVB_TUNER_DIB0070 is not set
++
++#
++# Miscellaneous devices
++#
++# CONFIG_DVB_LNBP21 is not set
++# CONFIG_DVB_ISL6421 is not set
++# CONFIG_DVB_TUA6100 is not set
++CONFIG_DAB=y
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=y
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++# CONFIG_SND_SEQUENCER is not set
++# CONFIG_SND_MIXER_OSS is not set
++# CONFIG_SND_PCM_OSS is not set
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_AC97_CODEC=y
++# CONFIG_SND_DUMMY is not set
++# CONFIG_SND_MTPAV is not set
++# CONFIG_SND_SERIAL_U16550 is not set
++# CONFIG_SND_MPU401 is not set
++
++#
++# PCI devices
++#
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALS4000 is not set
++# CONFIG_SND_ALI5451 is not set
++# CONFIG_SND_ATIIXP is not set
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_CS5530 is not set
++# CONFIG_SND_DARLA20 is not set
++# CONFIG_SND_GINA20 is not set
++# CONFIG_SND_LAYLA20 is not set
++# CONFIG_SND_DARLA24 is not set
++# CONFIG_SND_GINA24 is not set
++# CONFIG_SND_LAYLA24 is not set
++# CONFIG_SND_MONA is not set
++# CONFIG_SND_MIA is not set
++# CONFIG_SND_ECHO3G is not set
++# CONFIG_SND_INDIGO is not set
++# CONFIG_SND_INDIGOIO is not set
++# CONFIG_SND_INDIGODJ is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++# CONFIG_SND_FM801 is not set
++# CONFIG_SND_HDA_INTEL is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++CONFIG_SND_INTEL8X0=y
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RIPTIDE is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++# CONFIG_SND_AC97_POWER_SAVE is not set
++
++#
++# ALSA PowerMac devices
++#
++
++#
++# ALSA PowerPC devices
++#
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
++# CONFIG_SND_USB_USX2Y is not set
++# CONFIG_SND_USB_CAIAQ is not set
++
++#
++# System on Chip audio support
++#
++# CONFIG_SND_SOC is not set
++
++#
++# SoC Audio support for SuperH
++#
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++CONFIG_AC97_BUS=y
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++# CONFIG_USB_HIDDEV is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_DEVICE_CLASS=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PPC_OF=y
++CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
++CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
++CONFIG_USB_OHCI_HCD_PCI=y
++CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
++CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++# CONFIG_USB_UHCI_HCD is not set
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_BERRY_CHARGE is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGET is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++# CONFIG_RTC_DRV_DS1307 is not set
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++
++#
++# SPI RTC drivers
++#
++
++#
++# Platform RTC drivers
++#
++CONFIG_RTC_DRV_CMOS=y
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# on-CPU RTC drivers
++#
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=y
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_CRAMFS=y
++CONFIG_VXFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=y
++# CONFIG_NFSD_V3 is not set
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_BIND34 is not set
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++# CONFIG_NLS_CODEPAGE_437 is not set
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++# CONFIG_NLS_ISO8859_1 is not set
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++CONFIG_NLS_UTF8=m
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_KGDB_CONSOLE is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_HASH=y
++CONFIG_CRYPTO_MANAGER=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=m
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++# CONFIG_CRYPTO_ECB is not set
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=m
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/mpc8610_hpcd_defconfig b/arch/powerpc/configs/mpc8610_hpcd_defconfig
+index 9614d24..2500ef4 100644
+--- a/arch/powerpc/configs/mpc8610_hpcd_defconfig
++++ b/arch/powerpc/configs/mpc8610_hpcd_defconfig
+@@ -696,7 +696,7 @@ CONFIG_SERIAL_8250_RSA=y
+ CONFIG_SERIAL_CORE=y
+ CONFIG_SERIAL_CORE_CONSOLE=y
+ # CONFIG_SERIAL_JSM is not set
+-CONFIG_SERIAL_OF_PLATFORM=y
++# CONFIG_SERIAL_OF_PLATFORM is not set
+ CONFIG_UNIX98_PTYS=y
+ # CONFIG_LEGACY_PTYS is not set
+ # CONFIG_IPMI_HANDLER is not set
+@@ -708,7 +708,60 @@ CONFIG_UNIX98_PTYS=y
+ # CONFIG_RAW_DRIVER is not set
+ # CONFIG_TCG_TPM is not set
+ CONFIG_DEVPORT=y
+-# CONFIG_I2C is not set
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++# CONFIG_I2C_CHARDEV is not set
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
+ 
+ #
+ # SPI support
+@@ -763,7 +816,119 @@ CONFIG_DUMMY_CONSOLE=y
+ #
+ # Sound
+ #
+-# CONFIG_SOUND is not set
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++# CONFIG_SND_SEQUENCER is not set
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++# CONFIG_SND_PCM_OSS_PLUGINS is not set
++# CONFIG_SND_DYNAMIC_MINORS is not set
++# CONFIG_SND_SUPPORT_OLD_API is not set
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++# CONFIG_SND_DUMMY is not set
++# CONFIG_SND_MTPAV is not set
++# CONFIG_SND_SERIAL_U16550 is not set
++# CONFIG_SND_MPU401 is not set
++
++#
++# PCI devices
++#
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALS4000 is not set
++# CONFIG_SND_ALI5451 is not set
++# CONFIG_SND_ATIIXP is not set
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_CS5530 is not set
++# CONFIG_SND_DARLA20 is not set
++# CONFIG_SND_GINA20 is not set
++# CONFIG_SND_LAYLA20 is not set
++# CONFIG_SND_DARLA24 is not set
++# CONFIG_SND_GINA24 is not set
++# CONFIG_SND_LAYLA24 is not set
++# CONFIG_SND_MONA is not set
++# CONFIG_SND_MIA is not set
++# CONFIG_SND_ECHO3G is not set
++# CONFIG_SND_INDIGO is not set
++# CONFIG_SND_INDIGOIO is not set
++# CONFIG_SND_INDIGODJ is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++# CONFIG_SND_FM801 is not set
++# CONFIG_SND_HDA_INTEL is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RIPTIDE is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++
++#
++# ALSA PowerMac devices
++#
++
++#
++# ALSA PowerPC devices
++#
++
++#
++# System on Chip audio support
++#
++CONFIG_SND_SOC=y
++
++#
++# SoC Audio support for SuperH
++#
++
++#
++# ALSA SoC audio for Freescale SOCs
++#
++CONFIG_SND_SOC_MPC8610=y
++CONFIG_SND_SOC_MPC8610_HPCD=y
++CONFIG_SND_SOC_CS4270=y
++CONFIG_SND_SOC_CS4270_VD33_ERRATA=y
++
+ CONFIG_HID_SUPPORT=y
+ CONFIG_HID=y
+ # CONFIG_HID_DEBUG is not set
+diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
+index 292de3d..797f0df 100644
+--- a/arch/powerpc/configs/pasemi_defconfig
++++ b/arch/powerpc/configs/pasemi_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:49:03 2007
++# Linux kernel version: 2.6.24-rc6
++# Tue Jan 15 10:26:10 2008
+ #
+ CONFIG_PPC64=y
+ 
+@@ -152,7 +152,6 @@ CONFIG_PPC_PASEMI=y
+ CONFIG_PPC_PASEMI_IOMMU=y
+ # CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE is not set
+ CONFIG_PPC_PASEMI_MDIO=y
+-CONFIG_ELECTRA_IDE=y
+ # CONFIG_PPC_CELLEB is not set
+ # CONFIG_PPC_PS3 is not set
+ # CONFIG_PPC_CELL is not set
+@@ -256,7 +255,7 @@ CONFIG_PCI_DOMAINS=y
+ CONFIG_PCI_SYSCALL=y
+ # CONFIG_PCIEPORTBUS is not set
+ CONFIG_ARCH_SUPPORTS_MSI=y
+-# CONFIG_PCI_MSI is not set
++CONFIG_PCI_MSI=y
+ CONFIG_PCI_LEGACY=y
+ # CONFIG_PCI_DEBUG is not set
+ CONFIG_PCCARD=y
+@@ -663,7 +662,26 @@ CONFIG_PATA_PCMCIA=y
+ # CONFIG_PATA_VIA is not set
+ # CONFIG_PATA_WINBOND is not set
+ CONFIG_PATA_PLATFORM=y
+-# CONFIG_MD is not set
++CONFIG_PATA_OF_PLATFORM=y
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=y
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++CONFIG_MD_RAID10=y
++CONFIG_MD_RAID456=y
++CONFIG_MD_RAID5_RESHAPE=y
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_DEBUG is not set
++CONFIG_DM_CRYPT=y
++# CONFIG_DM_SNAPSHOT is not set
++# CONFIG_DM_MIRROR is not set
++# CONFIG_DM_ZERO is not set
++# CONFIG_DM_MULTIPATH is not set
++# CONFIG_DM_DELAY is not set
++# CONFIG_DM_UEVENT is not set
+ # CONFIG_FUSION is not set
+ 
+ #
+@@ -1686,6 +1704,10 @@ CONFIG_XMON_DISASSEMBLY=y
+ # CONFIG_KEYS is not set
+ # CONFIG_SECURITY is not set
+ # CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_XOR_BLOCKS=y
++CONFIG_ASYNC_CORE=y
++CONFIG_ASYNC_MEMCPY=y
++CONFIG_ASYNC_XOR=y
+ CONFIG_CRYPTO=y
+ CONFIG_CRYPTO_ALGAPI=y
+ CONFIG_CRYPTO_BLKCIPHER=y
+diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
+index 5760b9f..7695a4c 100644
+--- a/arch/powerpc/configs/ppc64_defconfig
++++ b/arch/powerpc/configs/ppc64_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+ # Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:49:07 2007
++# Fri Dec 21 14:47:29 2007
+ #
+ CONFIG_PPC64=y
+ 
+@@ -211,7 +211,7 @@ CONFIG_MMIO_NVRAM=y
+ CONFIG_MPIC_U3_HT_IRQS=y
+ CONFIG_MPIC_BROKEN_REGREAD=y
+ CONFIG_IBMVIO=y
+-# CONFIG_IBMEBUS is not set
++CONFIG_IBMEBUS=y
+ # CONFIG_PPC_MPC106 is not set
+ CONFIG_PPC_970_NAP=y
+ CONFIG_PPC_INDIRECT_IO=y
+@@ -375,7 +375,7 @@ CONFIG_INET_TUNNEL=y
+ CONFIG_INET_XFRM_MODE_TRANSPORT=y
+ CONFIG_INET_XFRM_MODE_TUNNEL=y
+ CONFIG_INET_XFRM_MODE_BEET=y
+-# CONFIG_INET_LRO is not set
++CONFIG_INET_LRO=m
+ CONFIG_INET_DIAG=y
+ CONFIG_INET_TCP_DIAG=y
+ # CONFIG_TCP_CONG_ADVANCED is not set
+@@ -929,6 +929,7 @@ CONFIG_SPIDER_NET=m
+ CONFIG_NETDEV_10000=y
+ # CONFIG_CHELSIO_T1 is not set
+ # CONFIG_CHELSIO_T3 is not set
++CONFIG_EHEA=m
+ # CONFIG_IXGBE is not set
+ CONFIG_IXGB=m
+ # CONFIG_IXGB_NAPI is not set
+@@ -1558,6 +1559,7 @@ CONFIG_INFINIBAND_ADDR_TRANS=y
+ CONFIG_INFINIBAND_MTHCA=m
+ CONFIG_INFINIBAND_MTHCA_DEBUG=y
+ # CONFIG_INFINIBAND_IPATH is not set
++CONFIG_INFINIBAND_EHCA=m
+ # CONFIG_INFINIBAND_AMSO1100 is not set
+ # CONFIG_MLX4_INFINIBAND is not set
+ CONFIG_INFINIBAND_IPOIB=m
+diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
+index 0b5469f..7994955 100644
+--- a/arch/powerpc/configs/ps3_defconfig
++++ b/arch/powerpc/configs/ps3_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Tue Dec  4 22:49:57 2007
++# Linux kernel version: 2.6.24-rc8
++# Wed Jan 16 14:31:21 2008
+ #
+ CONFIG_PPC64=y
+ 
+@@ -103,6 +103,7 @@ CONFIG_VM_EVENT_COUNTERS=y
+ CONFIG_SLAB=y
+ # CONFIG_SLUB is not set
+ # CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
+ CONFIG_RT_MUTEXES=y
+ # CONFIG_TINY_SHMEM is not set
+ CONFIG_BASE_SMALL=0
+@@ -154,7 +155,6 @@ CONFIG_PPC_PS3=y
+ # CONFIG_PS3_ADVANCED is not set
+ CONFIG_PS3_HTAB_SIZE=20
+ # CONFIG_PS3_DYNAMIC_DMA is not set
+-CONFIG_PS3_USE_LPAR_ADDR=y
+ CONFIG_PS3_VUART=y
+ CONFIG_PS3_PS3AV=y
+ CONFIG_PS3_SYS_MANAGER=y
+@@ -162,6 +162,7 @@ CONFIG_PS3_STORAGE=y
+ CONFIG_PS3_DISK=y
+ CONFIG_PS3_ROM=y
+ CONFIG_PS3_FLASH=y
++CONFIG_PS3_LPM=m
+ CONFIG_PPC_CELL=y
+ # CONFIG_PPC_CELL_NATIVE is not set
+ # CONFIG_PPC_IBM_CELL_BLADE is not set
+@@ -225,7 +226,7 @@ CONFIG_HAVE_MEMORY_PRESENT=y
+ # CONFIG_SPARSEMEM_STATIC is not set
+ CONFIG_SPARSEMEM_EXTREME=y
+ CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+-CONFIG_SPARSEMEM_VMEMMAP=y
++# CONFIG_SPARSEMEM_VMEMMAP is not set
+ CONFIG_MEMORY_HOTPLUG=y
+ CONFIG_MEMORY_HOTPLUG_SPARSE=y
+ CONFIG_SPLIT_PTLOCK_CPUS=4
+@@ -338,7 +339,26 @@ CONFIG_IPV6_SIT=y
+ # CONFIG_NET_PKTGEN is not set
+ # CONFIG_HAMRADIO is not set
+ # CONFIG_IRDA is not set
+-# CONFIG_BT is not set
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++# CONFIG_BT_HCIUART is not set
++# CONFIG_BT_HCIBCM203X is not set
++# CONFIG_BT_HCIBPA10X is not set
++# CONFIG_BT_HCIBFUSB is not set
++# CONFIG_BT_HCIVHCI is not set
+ # CONFIG_AF_RXRPC is not set
+ 
+ #
+@@ -666,14 +686,14 @@ CONFIG_LOGO_LINUX_CLUT224=y
+ #
+ # Sound
+ #
+-CONFIG_SOUND=y
++CONFIG_SOUND=m
+ 
+ #
+ # Advanced Linux Sound Architecture
+ #
+-CONFIG_SND=y
+-CONFIG_SND_TIMER=y
+-CONFIG_SND_PCM=y
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
+ # CONFIG_SND_SEQUENCER is not set
+ # CONFIG_SND_MIXER_OSS is not set
+ # CONFIG_SND_PCM_OSS is not set
+@@ -702,7 +722,7 @@ CONFIG_SND_VERBOSE_PROCFS=y
+ #
+ # ALSA PowerPC devices
+ #
+-CONFIG_SND_PS3=y
++CONFIG_SND_PS3=m
+ CONFIG_SND_PS3_DEFAULT_START_DELAY=2000
+ 
+ #
+@@ -747,7 +767,7 @@ CONFIG_USB_SUPPORT=y
+ CONFIG_USB_ARCH_HAS_HCD=y
+ CONFIG_USB_ARCH_HAS_OHCI=y
+ CONFIG_USB_ARCH_HAS_EHCI=y
+-CONFIG_USB=y
++CONFIG_USB=m
+ # CONFIG_USB_DEBUG is not set
+ 
+ #
+@@ -761,13 +781,13 @@ CONFIG_USB_DEVICEFS=y
+ #
+ # USB Host Controller Drivers
+ #
+-CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_HCD=m
+ # CONFIG_USB_EHCI_SPLIT_ISO is not set
+ # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+ # CONFIG_USB_EHCI_TT_NEWSCHED is not set
+ CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y
+ # CONFIG_USB_ISP116X_HCD is not set
+-CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD=m
+ # CONFIG_USB_OHCI_HCD_PPC_OF is not set
+ # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+ CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
+@@ -1033,7 +1053,8 @@ CONFIG_HAS_IOMEM=y
+ CONFIG_HAS_IOPORT=y
+ CONFIG_HAS_DMA=y
+ CONFIG_INSTRUMENTATION=y
+-# CONFIG_PROFILING is not set
++CONFIG_PROFILING=y
++CONFIG_OPROFILE=m
+ # CONFIG_KPROBES is not set
+ # CONFIG_MARKERS is not set
+ 
+diff --git a/arch/powerpc/configs/rainier_defconfig b/arch/powerpc/configs/rainier_defconfig
+new file mode 100644
+index 0000000..7b95001
+--- /dev/null
++++ b/arch/powerpc/configs/rainier_defconfig
+@@ -0,0 +1,873 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 11:22:40 2007
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++CONFIG_44x=y
++# CONFIG_E200 is not set
++CONFIG_4xx=y
++CONFIG_BOOKE=y
++CONFIG_PTE_64BIT=y
++CONFIG_PHYS_64BIT=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_NOT_COHERENT_CACHE=y
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_PPC_DCR_NATIVE=y
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_PPC_DCR=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++# CONFIG_PPC4xx_PCI_EXPRESS is not set
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_BAMBOO is not set
++# CONFIG_EBONY is not set
++# CONFIG_SEQUOIA is not set
++# CONFIG_TAISHAN is not set
++# CONFIG_KATMAI is not set
++CONFIG_RAINIER=y
++CONFIG_440GRX=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_MATH_EMULATION=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++CONFIG_CMDLINE_BOOL=y
++CONFIG_CMDLINE=""
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="rainier.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_CONSISTENT_START=0xff100000
++CONFIG_CONSISTENT_SIZE=0x00200000
++CONFIG_BOOT_LOAD=0x01000000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++# CONFIG_MTD_BLKDEVS is not set
++# CONFIG_MTD_BLOCK is not set
++# CONFIG_MTD_BLOCK_RO is not set
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++CONFIG_MTD_JEDECPROBE=y
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++CONFIG_MTD_CFI_INTELEXT=y
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++# CONFIG_BLK_DEV_LOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=35000
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_XILINX_SYSACE is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++CONFIG_MACINTOSH_DRIVERS=y
++# CONFIG_MAC_EMUMOUSEBTN is not set
++# CONFIG_WINDFARM is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++# CONFIG_NET_ETHERNET is not set
++CONFIG_IBM_NEW_EMAC_ZMII=y
++CONFIG_IBM_NEW_EMAC_RGMII=y
++CONFIG_IBM_NEW_EMAC_EMAC4=y
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++# CONFIG_SERIAL_8250_PCI is not set
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++# CONFIG_SERIAL_8250_MANY_PORTS is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_SERIAL_OF_PLATFORM=y
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++CONFIG_DEBUGGER=y
++# CONFIG_KGDB is not set
++# CONFIG_XMON is not set
++# CONFIG_BDI_SWITCH is not set
++CONFIG_PPC_EARLY_DEBUG=y
++# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
++# CONFIG_PPC_EARLY_DEBUG_G5 is not set
++# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
++# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
++# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
++# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
++# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
++# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
++CONFIG_PPC_EARLY_DEBUG_44x=y
++# CONFIG_PPC_EARLY_DEBUG_40x is not set
++# CONFIG_PPC_EARLY_DEBUG_CPM is not set
++CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0xef600300
++CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x1
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/sbc834x_defconfig b/arch/powerpc/configs/sbc834x_defconfig
+new file mode 100644
+index 0000000..9245bcc
+--- /dev/null
++++ b/arch/powerpc/configs/sbc834x_defconfig
+@@ -0,0 +1,800 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Thu Jan 24 15:54:27 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++CONFIG_6xx=y
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_83xx=y
++CONFIG_PPC_FPU=y
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_32=y
++# CONFIG_PPC_MM_SLICES is not set
++# CONFIG_SMP is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++# CONFIG_KALLSYMS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MULTIPLATFORM is not set
++# CONFIG_PPC_82xx is not set
++CONFIG_PPC_83xx=y
++# CONFIG_PPC_86xx is not set
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8313_RDB is not set
++# CONFIG_MPC832x_MDS is not set
++# CONFIG_MPC832x_RDB is not set
++# CONFIG_MPC834x_MDS is not set
++# CONFIG_MPC834x_ITX is not set
++# CONFIG_MPC836x_MDS is not set
++# CONFIG_MPC837x_MDS is not set
++CONFIG_SBC834x=y
++CONFIG_MPC834x=y
++CONFIG_IPIC=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++CONFIG_BROADCOM_PHY=y
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++CONFIG_GIANFAR=y
++# CONFIG_GFAR_NAPI is not set
++# CONFIG_NETDEV_10000 is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++CONFIG_83xx_WDT=y
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++# CONFIG_DAB is not set
++
++#
++# Graphics support
++#
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++# CONFIG_USB_SUPPORT is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_BIND34 is not set
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=m
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=m
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++# CONFIG_CRYPTO_HW is not set
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/sbc8548_defconfig b/arch/powerpc/configs/sbc8548_defconfig
+new file mode 100644
+index 0000000..3b7fa53
+--- /dev/null
++++ b/arch/powerpc/configs/sbc8548_defconfig
+@@ -0,0 +1,741 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Thu Jan 24 15:19:12 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++CONFIG_PPC_85xx=y
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_85xx=y
++CONFIG_E500=y
++CONFIG_BOOKE=y
++CONFIG_FSL_BOOKE=y
++# CONFIG_PHYS_64BIT is not set
++CONFIG_SPE=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8540_ADS is not set
++# CONFIG_MPC8560_ADS is not set
++# CONFIG_MPC85xx_CDS is not set
++# CONFIG_MPC85xx_MDS is not set
++# CONFIG_MPC85xx_DS is not set
++CONFIG_SBC8548=y
++# CONFIG_SBC8560 is not set
++CONFIG_MPC8540=y
++CONFIG_MPC85xx=y
++# CONFIG_IPIC is not set
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++CONFIG_MATH_EMULATION=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++# CONFIG_SECCOMP is not set
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_FSL_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=y
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++CONFIG_BROADCOM_PHY=y
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IP1000 is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=y
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++# CONFIG_HID_SUPPORT is not set
++# CONFIG_USB_SUPPORT is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++# CONFIG_NFS_V3 is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/sbc8560_defconfig b/arch/powerpc/configs/sbc8560_defconfig
+new file mode 100644
+index 0000000..d89fce0
+--- /dev/null
++++ b/arch/powerpc/configs/sbc8560_defconfig
+@@ -0,0 +1,764 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc4
++# Wed Jan 23 14:59:20 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++CONFIG_PPC_85xx=y
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_85xx=y
++CONFIG_E500=y
++CONFIG_BOOKE=y
++CONFIG_FSL_BOOKE=y
++# CONFIG_PHYS_64BIT is not set
++CONFIG_SPE=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8540_ADS is not set
++# CONFIG_MPC8560_ADS is not set
++# CONFIG_MPC85xx_CDS is not set
++# CONFIG_MPC85xx_MDS is not set
++# CONFIG_MPC85xx_DS is not set
++CONFIG_SBC8560=y
++CONFIG_MPC8560=y
++CONFIG_MPC85xx=y
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++# CONFIG_MATH_EMULATION is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++# CONFIG_SECCOMP is not set
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE=""
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_FSL_SOC=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=y
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++CONFIG_BROADCOM_PHY=y
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++CONFIG_NETDEV_10000=y
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=2
++CONFIG_SERIAL_8250_RUNTIME_UARTS=2
++# CONFIG_SERIAL_8250_EXTENDED is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=y
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++# CONFIG_USB_ARCH_HAS_HCD is not set
++# CONFIG_USB_ARCH_HAS_OHCI is not set
++# CONFIG_USB_ARCH_HAS_EHCI is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_EDAC is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# SPI RTC drivers
++#
++
++#
++# Platform RTC drivers
++#
++# CONFIG_RTC_DRV_CMOS is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++CONFIG_RTC_DRV_M48T59=y
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# on-CPU RTC drivers
++#
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++# CONFIG_NFS_V3 is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++# CONFIG_MSDOS_PARTITION is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_BDI_SWITCH is not set
++CONFIG_PPC_EARLY_DEBUG=y
++# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
++# CONFIG_PPC_EARLY_DEBUG_G5 is not set
++# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
++# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
++# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
++# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
++# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
++# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
++# CONFIG_PPC_EARLY_DEBUG_44x is not set
++# CONFIG_PPC_EARLY_DEBUG_CPM is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/sequoia_defconfig b/arch/powerpc/configs/sequoia_defconfig
+index bc3c086..abbfed6 100644
+--- a/arch/powerpc/configs/sequoia_defconfig
++++ b/arch/powerpc/configs/sequoia_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:49:17 2007
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 11:23:22 2007
+ #
+ # CONFIG_PPC64 is not set
+ 
+@@ -129,6 +129,7 @@ CONFIG_DEFAULT_AS=y
+ # CONFIG_DEFAULT_CFQ is not set
+ # CONFIG_DEFAULT_NOOP is not set
+ CONFIG_DEFAULT_IOSCHED="anticipatory"
++# CONFIG_PPC4xx_PCI_EXPRESS is not set
+ 
+ #
+ # Platform support
+@@ -141,8 +142,10 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
+ # CONFIG_BAMBOO is not set
+ # CONFIG_EBONY is not set
+ CONFIG_SEQUOIA=y
++# CONFIG_TAISHAN is not set
++# CONFIG_KATMAI is not set
++# CONFIG_RAINIER is not set
+ CONFIG_440EPX=y
+-CONFIG_440A=y
+ # CONFIG_MPIC is not set
+ # CONFIG_MPIC_WEIRD is not set
+ # CONFIG_PPC_I8259 is not set
+@@ -446,9 +449,7 @@ CONFIG_MISC_DEVICES=y
+ # CONFIG_FIREWIRE is not set
+ # CONFIG_IEEE1394 is not set
+ # CONFIG_I2O is not set
+-CONFIG_MACINTOSH_DRIVERS=y
+-# CONFIG_MAC_EMUMOUSEBTN is not set
+-# CONFIG_WINDFARM is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
+ CONFIG_NETDEVICES=y
+ # CONFIG_NETDEVICES_MULTIQUEUE is not set
+ # CONFIG_DUMMY is not set
+@@ -459,10 +460,28 @@ CONFIG_NETDEVICES=y
+ # CONFIG_VETH is not set
+ # CONFIG_IP1000 is not set
+ # CONFIG_ARCNET is not set
+-# CONFIG_NET_ETHERNET is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++CONFIG_IBM_NEW_EMAC=y
++CONFIG_IBM_NEW_EMAC_RXB=128
++CONFIG_IBM_NEW_EMAC_TXB=64
++CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
++CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
++CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
++# CONFIG_IBM_NEW_EMAC_DEBUG is not set
+ CONFIG_IBM_NEW_EMAC_ZMII=y
+ CONFIG_IBM_NEW_EMAC_RGMII=y
++# CONFIG_IBM_NEW_EMAC_TAH is not set
+ CONFIG_IBM_NEW_EMAC_EMAC4=y
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
+ CONFIG_NETDEV_1000=y
+ # CONFIG_ACENIC is not set
+ # CONFIG_DL2K is not set
+@@ -811,6 +830,7 @@ CONFIG_PPC_EARLY_DEBUG=y
+ # CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+ # CONFIG_PPC_EARLY_DEBUG_BEAT is not set
+ CONFIG_PPC_EARLY_DEBUG_44x=y
++# CONFIG_PPC_EARLY_DEBUG_40x is not set
+ # CONFIG_PPC_EARLY_DEBUG_CPM is not set
+ CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0xef600300
+ CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x1
+diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
+new file mode 100644
+index 0000000..a034a5e
+--- /dev/null
++++ b/arch/powerpc/configs/storcenter_defconfig
+@@ -0,0 +1,1174 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Tue Jan  8 09:33:54 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++CONFIG_6xx=y
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_PPC_FPU=y
++# CONFIG_ALTIVEC is not set
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_32=y
++# CONFIG_PPC_MM_SLICES is not set
++# CONFIG_SMP is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++# CONFIG_BLK_DEV_INITRD is not set
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++# CONFIG_KALLSYMS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_AS is not set
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
++
++#
++# Platform support
++#
++CONFIG_PPC_MULTIPLATFORM=y
++# CONFIG_PPC_82xx is not set
++# CONFIG_PPC_83xx is not set
++# CONFIG_PPC_86xx is not set
++CONFIG_CLASSIC32=y
++# CONFIG_PPC_CHRP is not set
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_EFIKA is not set
++# CONFIG_PPC_LITE5200 is not set
++# CONFIG_PPC_PMAC is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++CONFIG_EMBEDDED6xx=y
++# CONFIG_LINKSTATION is not set
++CONFIG_STORCENTER=y
++# CONFIG_MPC7448HPC2 is not set
++# CONFIG_PPC_HOLLY is not set
++# CONFIG_PPC_PRPMC2800 is not set
++CONFIG_MPC10X_BRIDGE=y
++CONFIG_MPC10X_OPENPIC=y
++# CONFIG_MPC10X_STORE_GATHERING is not set
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_TAU is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_KEXEC is not set
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++CONFIG_CMDLINE_BOOL=y
++CONFIG_CMDLINE="console=ttyS0,115200"
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++# CONFIG_SECCOMP is not set
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="storcenter.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=m
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++CONFIG_FTL=y
++CONFIG_NFTL=y
++CONFIG_NFTL_RW=y
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++CONFIG_MTD_PHYSMAP=y
++CONFIG_MTD_PHYSMAP_START=0xFF800000
++CONFIG_MTD_PHYSMAP_LEN=0x00800000
++CONFIG_MTD_PHYSMAP_BANKWIDTH=1
++# CONFIG_MTD_PHYSMAP_OF is not set
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++# CONFIG_BLK_DEV_LOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++# CONFIG_BLK_DEV_RAM is not set
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++CONFIG_IDEDISK_MULTI_MODE=y
++# CONFIG_BLK_DEV_IDECD is not set
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++CONFIG_IDE_PROC_FS=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_PLATFORM is not set
++
++#
++# PCI IDE chipsets support
++#
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++CONFIG_IDEPCI_PCIBUS_ORDER=y
++# CONFIG_BLK_DEV_GENERIC is not set
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_JMICRON is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT8213 is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SL82C105 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_BLK_DEV_TC86C001 is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++CONFIG_IDE_ARCH_OBSOLETE_INIT=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_AIC94XX is not set
++# CONFIG_SCSI_DPT_I2O is not set
++# CONFIG_SCSI_ADVANSYS is not set
++# CONFIG_SCSI_ARCMSR is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_STEX is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_QLA_ISCSI is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_SRP is not set
++# CONFIG_ATA is not set
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=y
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++CONFIG_MD_RAID456=y
++CONFIG_MD_RAID5_RESHAPE=y
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++# CONFIG_BLK_DEV_DM is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++CONFIG_DUMMY=m
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++# CONFIG_NET_ETHERNET is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++CONFIG_R8169=y
++# CONFIG_R8169_NAPI is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++# CONFIG_MV643XX_ETH is not set
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++# CONFIG_NETDEV_10000 is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++# CONFIG_SERIAL_8250_PCI is not set
++CONFIG_SERIAL_8250_NR_UARTS=2
++CONFIG_SERIAL_8250_RUNTIME_UARTS=2
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=m
++CONFIG_NVRAM=y
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_TINY_USB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++# CONFIG_DAB is not set
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_DEVICE_CLASS=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_HCD_PPC_OF is not set
++# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
++# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++# CONFIG_USB_UHCI_HCD is not set
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++# CONFIG_USB_MON is not set
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_BERRY_CHARGE is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGET is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++CONFIG_RTC_DRV_DS1307=y
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++
++#
++# SPI RTC drivers
++#
++
++#
++# Platform RTC drivers
++#
++# CONFIG_RTC_DRV_CMOS is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# on-CPU RTC drivers
++#
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++CONFIG_XFS_FS=m
++# CONFIG_XFS_QUOTA is not set
++# CONFIG_XFS_SECURITY is not set
++# CONFIG_XFS_POSIX_ACL is not set
++# CONFIG_XFS_RT is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++# CONFIG_NETWORK_FILESYSTEMS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++CONFIG_NLS_UTF8=y
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++# CONFIG_ENABLE_WARN_DEPRECATED is not set
++# CONFIG_ENABLE_MUST_CHECK is not set
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_BOOTX_TEXT is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_XOR_BLOCKS=y
++CONFIG_ASYNC_CORE=y
++CONFIG_ASYNC_MEMCPY=y
++CONFIG_ASYNC_XOR=y
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/stx_gp3_defconfig b/arch/powerpc/configs/stx_gp3_defconfig
+new file mode 100644
+index 0000000..e8137a8
+--- /dev/null
++++ b/arch/powerpc/configs/stx_gp3_defconfig
+@@ -0,0 +1,1183 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Thu Jan 24 02:02:30 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++CONFIG_PPC_85xx=y
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_85xx=y
++CONFIG_E500=y
++CONFIG_BOOKE=y
++CONFIG_FSL_BOOKE=y
++# CONFIG_PHYS_64BIT is not set
++CONFIG_SPE=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++# CONFIG_MODULE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_AS is not set
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8540_ADS is not set
++# CONFIG_MPC8560_ADS is not set
++# CONFIG_MPC85xx_CDS is not set
++# CONFIG_MPC85xx_MDS is not set
++# CONFIG_MPC85xx_DS is not set
++CONFIG_STX_GP3=y
++CONFIG_MPC8560=y
++CONFIG_MPC85xx=y
++# CONFIG_IPIC is not set
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++CONFIG_CPM2=y
++CONFIG_PPC_CPM_NEW_BINDING=y
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_CPM=y
++
++#
++# Kernel options
++#
++CONFIG_HIGHMEM=y
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=m
++CONFIG_MATH_EMULATION=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="stx_gp3_8560.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_FSL_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK_ENABLED is not set
++# CONFIG_NF_CONNTRACK is not set
++CONFIG_NETFILTER_XTABLES=m
++# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
++# CONFIG_NETFILTER_XT_TARGET_MARK is not set
++# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
++# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
++# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
++# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
++# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
++# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
++# CONFIG_NETFILTER_XT_MATCH_ESP is not set
++# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
++# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
++# CONFIG_NETFILTER_XT_MATCH_MAC is not set
++# CONFIG_NETFILTER_XT_MATCH_MARK is not set
++# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
++# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
++# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
++# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
++# CONFIG_NETFILTER_XT_MATCH_REALM is not set
++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
++# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
++# CONFIG_NETFILTER_XT_MATCH_STRING is not set
++# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
++# CONFIG_NETFILTER_XT_MATCH_TIME is not set
++# CONFIG_NETFILTER_XT_MATCH_U32 is not set
++# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_QUEUE is not set
++CONFIG_IP_NF_IPTABLES=m
++# CONFIG_IP_NF_MATCH_IPRANGE is not set
++# CONFIG_IP_NF_MATCH_TOS is not set
++# CONFIG_IP_NF_MATCH_RECENT is not set
++# CONFIG_IP_NF_MATCH_ECN is not set
++# CONFIG_IP_NF_MATCH_AH is not set
++# CONFIG_IP_NF_MATCH_TTL is not set
++# CONFIG_IP_NF_MATCH_OWNER is not set
++# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
++CONFIG_IP_NF_FILTER=m
++# CONFIG_IP_NF_TARGET_REJECT is not set
++# CONFIG_IP_NF_TARGET_LOG is not set
++# CONFIG_IP_NF_TARGET_ULOG is not set
++# CONFIG_IP_NF_MANGLE is not set
++# CONFIG_IP_NF_RAW is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=y
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++# CONFIG_PARPORT_PC_FIFO is not set
++# CONFIG_PARPORT_PC_SUPERIO is not set
++# CONFIG_PARPORT_GSC is not set
++# CONFIG_PARPORT_AX88796 is not set
++# CONFIG_PARPORT_1284 is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_PARIDE is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=m
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=m
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++CONFIG_IDE_PROC_FS=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_PLATFORM is not set
++
++#
++# PCI IDE chipsets support
++#
++# CONFIG_IDEPCI_PCIBUS_ORDER is not set
++# CONFIG_BLK_DEV_GENERIC is not set
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_JMICRON is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT8213 is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SL82C105 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_BLK_DEV_TC86C001 is not set
++# CONFIG_IDE_ARM is not set
++# CONFIG_BLK_DEV_IDEDMA is not set
++CONFIG_IDE_ARCH_OBSOLETE_INIT=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=m
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=m
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=m
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_AIC94XX is not set
++# CONFIG_SCSI_DPT_I2O is not set
++# CONFIG_SCSI_ADVANSYS is not set
++# CONFIG_SCSI_ARCMSR is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_PPA is not set
++# CONFIG_SCSI_IMM is not set
++# CONFIG_SCSI_STEX is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_QLA_ISCSI is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_SRP is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++CONFIG_MARVELL_PHY=y
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++# CONFIG_NET_POCKET is not set
++# CONFIG_FS_ENET is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IP1000 is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PLIP is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1280
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1024
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_EVDEV=m
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_LIFEBOOK=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_APPLETOUCH is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_PARKBD is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_CPM=y
++CONFIG_SERIAL_CPM_CONSOLE=y
++# CONFIG_SERIAL_CPM_SCC1 is not set
++CONFIG_SERIAL_CPM_SCC2=y
++# CONFIG_SERIAL_CPM_SCC3 is not set
++# CONFIG_SERIAL_CPM_SCC4 is not set
++# CONFIG_SERIAL_CPM_SMC1 is not set
++# CONFIG_SERIAL_CPM_SMC2 is not set
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++CONFIG_PRINTER=m
++# CONFIG_LP_CONSOLE is not set
++# CONFIG_PPDEV is not set
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=m
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=m
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=m
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_MPC is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++CONFIG_AGP=m
++CONFIG_DRM=m
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_R128 is not set
++# CONFIG_DRM_RADEON is not set
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=m
++
++#
++# Advanced Linux Sound Architecture
++#
++# CONFIG_SND is not set
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++# CONFIG_AUXDISPLAY is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++# CONFIG_JOLIET is not set
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++# CONFIG_PROC_KCORE is not set
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=m
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++CONFIG_SMB_FS=m
++# CONFIG_SMB_NLS_DEFAULT is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++# CONFIG_NLS_CODEPAGE_437 is not set
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++# CONFIG_NLS_ISO8859_1 is not set
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++CONFIG_CRC_CCITT=y
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=m
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++# CONFIG_KGDB_CONSOLE is not set
++CONFIG_BDI_SWITCH=y
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/taishan_defconfig b/arch/powerpc/configs/taishan_defconfig
+new file mode 100644
+index 0000000..ade84b9
+--- /dev/null
++++ b/arch/powerpc/configs/taishan_defconfig
+@@ -0,0 +1,790 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 11:23:39 2007
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++CONFIG_44x=y
++# CONFIG_E200 is not set
++CONFIG_4xx=y
++CONFIG_BOOKE=y
++CONFIG_PTE_64BIT=y
++CONFIG_PHYS_64BIT=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_NOT_COHERENT_CACHE=y
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_PPC_DCR_NATIVE=y
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_PPC_DCR=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++# CONFIG_PPC4xx_PCI_EXPRESS is not set
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_BAMBOO is not set
++# CONFIG_EBONY is not set
++# CONFIG_SEQUOIA is not set
++CONFIG_TAISHAN=y
++# CONFIG_KATMAI is not set
++# CONFIG_RAINIER is not set
++CONFIG_440GX=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++# CONFIG_MATH_EMULATION is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++CONFIG_CMDLINE_BOOL=y
++CONFIG_CMDLINE=""
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="taishan.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_CONSISTENT_START=0xff100000
++CONFIG_CONSISTENT_SIZE=0x00200000
++CONFIG_BOOT_LOAD=0x01000000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++# CONFIG_MTD is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++# CONFIG_BLK_DEV_LOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=35000
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_XILINX_SYSACE is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++CONFIG_MACINTOSH_DRIVERS=y
++# CONFIG_MAC_EMUMOUSEBTN is not set
++# CONFIG_WINDFARM is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++CONFIG_IBM_NEW_EMAC=y
++CONFIG_IBM_NEW_EMAC_RXB=128
++CONFIG_IBM_NEW_EMAC_TXB=64
++CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
++CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
++CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
++# CONFIG_IBM_NEW_EMAC_DEBUG is not set
++CONFIG_IBM_NEW_EMAC_ZMII=y
++CONFIG_IBM_NEW_EMAC_RGMII=y
++CONFIG_IBM_NEW_EMAC_TAH=y
++CONFIG_IBM_NEW_EMAC_EMAC4=y
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++# CONFIG_SERIAL_8250_PCI is not set
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++# CONFIG_SERIAL_8250_MANY_PORTS is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_SERIAL_OF_PLATFORM=y
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++CONFIG_DEBUGGER=y
++# CONFIG_KGDB is not set
++# CONFIG_XMON is not set
++# CONFIG_BDI_SWITCH is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_PCBC=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/tqm8540_defconfig b/arch/powerpc/configs/tqm8540_defconfig
+new file mode 100644
+index 0000000..732de34
+--- /dev/null
++++ b/arch/powerpc/configs/tqm8540_defconfig
+@@ -0,0 +1,1032 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Fri Jan 25 01:32:05 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++CONFIG_PPC_85xx=y
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_85xx=y
++CONFIG_E500=y
++CONFIG_BOOKE=y
++CONFIG_FSL_BOOKE=y
++# CONFIG_PHYS_64BIT is not set
++CONFIG_SPE=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++# CONFIG_KALLSYMS is not set
++# CONFIG_HOTPLUG is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8540_ADS is not set
++# CONFIG_MPC8560_ADS is not set
++# CONFIG_MPC85xx_CDS is not set
++# CONFIG_MPC85xx_MDS is not set
++# CONFIG_MPC85xx_DS is not set
++# CONFIG_STX_GP3 is not set
++CONFIG_TQM8540=y
++# CONFIG_TQM8541 is not set
++# CONFIG_TQM8555 is not set
++# CONFIG_TQM8560 is not set
++CONFIG_TQM85xx=y
++CONFIG_MPC85xx=y
++# CONFIG_IPIC is not set
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++CONFIG_PPC_CPM_NEW_BINDING=y
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_MATH_EMULATION=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++# CONFIG_PROC_DEVICETREE is not set
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="tqm8540.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_FSL_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=y
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++# CONFIG_MTD_PHYSMAP_OF is not set
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++# CONFIG_BLK_DEV_IDECD is not set
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++CONFIG_IDE_PROC_FS=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_PLATFORM is not set
++
++#
++# PCI IDE chipsets support
++#
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++CONFIG_IDEPCI_PCIBUS_ORDER=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_JMICRON is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT8213 is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SL82C105 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_BLK_DEV_TC86C001 is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++CONFIG_IDE_ARCH_OBSOLETE_INIT=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_TLAN is not set
++# CONFIG_VIA_RHINE is not set
++# CONFIG_SC92031 is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IP1000 is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++CONFIG_SENSORS_DS1337=y
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++CONFIG_SENSORS_LM75=y
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++CONFIG_HWMON_DEBUG_CHIP=y
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++# CONFIG_NFS_V3 is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++# CONFIG_MSDOS_PARTITION is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/configs/tqm8541_defconfig b/arch/powerpc/configs/tqm8541_defconfig
+new file mode 100644
+index 0000000..1aff35f
+--- /dev/null
++++ b/arch/powerpc/configs/tqm8541_defconfig
+@@ -0,0 +1,1044 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Fri Jan 25 01:31:28 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++CONFIG_PPC_85xx=y
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_85xx=y
++CONFIG_E500=y
++CONFIG_BOOKE=y
++CONFIG_FSL_BOOKE=y
++# CONFIG_PHYS_64BIT is not set
++CONFIG_SPE=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++# CONFIG_KALLSYMS is not set
++# CONFIG_HOTPLUG is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8540_ADS is not set
++# CONFIG_MPC8560_ADS is not set
++# CONFIG_MPC85xx_CDS is not set
++# CONFIG_MPC85xx_MDS is not set
++# CONFIG_MPC85xx_DS is not set
++# CONFIG_STX_GP3 is not set
++# CONFIG_TQM8540 is not set
++CONFIG_TQM8541=y
++# CONFIG_TQM8555 is not set
++# CONFIG_TQM8560 is not set
++CONFIG_TQM85xx=y
++CONFIG_MPC85xx=y
++# CONFIG_IPIC is not set
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++CONFIG_CPM2=y
++CONFIG_PPC_CPM_NEW_BINDING=y
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_CPM=y
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_MATH_EMULATION=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++# CONFIG_PROC_DEVICETREE is not set
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="tqm8541.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_FSL_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=y
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++# CONFIG_MTD_PHYSMAP_OF is not set
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++# CONFIG_BLK_DEV_IDECD is not set
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++CONFIG_IDE_PROC_FS=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_PLATFORM is not set
++
++#
++# PCI IDE chipsets support
++#
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++CONFIG_IDEPCI_PCIBUS_ORDER=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_JMICRON is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT8213 is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SL82C105 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_BLK_DEV_TC86C001 is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++CONFIG_IDE_ARCH_OBSOLETE_INIT=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_TLAN is not set
++# CONFIG_VIA_RHINE is not set
++# CONFIG_SC92031 is not set
++# CONFIG_FS_ENET is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IP1000 is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_CPM=y
++CONFIG_SERIAL_CPM_CONSOLE=y
++CONFIG_SERIAL_CPM_SCC1=y
++# CONFIG_SERIAL_CPM_SCC2 is not set
++# CONFIG_SERIAL_CPM_SCC3 is not set
++# CONFIG_SERIAL_CPM_SCC4 is not set
++# CONFIG_SERIAL_CPM_SMC1 is not set
++# CONFIG_SERIAL_CPM_SMC2 is not set
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++CONFIG_SENSORS_DS1337=y
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++CONFIG_SENSORS_LM75=y
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++CONFIG_HWMON_DEBUG_CHIP=y
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++# CONFIG_NFS_V3 is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++# CONFIG_MSDOS_PARTITION is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_KGDB_CONSOLE is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/tqm8555_defconfig b/arch/powerpc/configs/tqm8555_defconfig
+new file mode 100644
+index 0000000..a3af226
+--- /dev/null
++++ b/arch/powerpc/configs/tqm8555_defconfig
+@@ -0,0 +1,1044 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Fri Jan 25 01:15:24 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++CONFIG_PPC_85xx=y
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_85xx=y
++CONFIG_E500=y
++CONFIG_BOOKE=y
++CONFIG_FSL_BOOKE=y
++# CONFIG_PHYS_64BIT is not set
++CONFIG_SPE=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++# CONFIG_KALLSYMS is not set
++# CONFIG_HOTPLUG is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8540_ADS is not set
++# CONFIG_MPC8560_ADS is not set
++# CONFIG_MPC85xx_CDS is not set
++# CONFIG_MPC85xx_MDS is not set
++# CONFIG_MPC85xx_DS is not set
++# CONFIG_STX_GP3 is not set
++# CONFIG_TQM8540 is not set
++# CONFIG_TQM8541 is not set
++CONFIG_TQM8555=y
++# CONFIG_TQM8560 is not set
++CONFIG_TQM85xx=y
++CONFIG_MPC85xx=y
++# CONFIG_IPIC is not set
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++CONFIG_CPM2=y
++CONFIG_PPC_CPM_NEW_BINDING=y
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_CPM=y
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_MATH_EMULATION=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++# CONFIG_PROC_DEVICETREE is not set
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="tqm8555.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_FSL_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=y
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++# CONFIG_MTD_PHYSMAP_OF is not set
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++# CONFIG_BLK_DEV_IDECD is not set
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++CONFIG_IDE_PROC_FS=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_PLATFORM is not set
++
++#
++# PCI IDE chipsets support
++#
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++CONFIG_IDEPCI_PCIBUS_ORDER=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_JMICRON is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT8213 is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SL82C105 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_BLK_DEV_TC86C001 is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++CONFIG_IDE_ARCH_OBSOLETE_INIT=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_TLAN is not set
++# CONFIG_VIA_RHINE is not set
++# CONFIG_SC92031 is not set
++# CONFIG_FS_ENET is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IP1000 is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_CPM=y
++CONFIG_SERIAL_CPM_CONSOLE=y
++CONFIG_SERIAL_CPM_SCC1=y
++# CONFIG_SERIAL_CPM_SCC2 is not set
++# CONFIG_SERIAL_CPM_SCC3 is not set
++# CONFIG_SERIAL_CPM_SCC4 is not set
++# CONFIG_SERIAL_CPM_SMC1 is not set
++# CONFIG_SERIAL_CPM_SMC2 is not set
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++CONFIG_SENSORS_DS1337=y
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++CONFIG_SENSORS_LM75=y
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++CONFIG_HWMON_DEBUG_CHIP=y
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++# CONFIG_NFS_V3 is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++# CONFIG_MSDOS_PARTITION is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_KGDB_CONSOLE is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/tqm8560_defconfig b/arch/powerpc/configs/tqm8560_defconfig
+new file mode 100644
+index 0000000..0832e89
+--- /dev/null
++++ b/arch/powerpc/configs/tqm8560_defconfig
+@@ -0,0 +1,1044 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc8
++# Thu Jan 24 23:50:42 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++CONFIG_PPC_85xx=y
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++# CONFIG_44x is not set
++# CONFIG_E200 is not set
++CONFIG_85xx=y
++CONFIG_E500=y
++CONFIG_BOOKE=y
++CONFIG_FSL_BOOKE=y
++# CONFIG_PHYS_64BIT is not set
++CONFIG_SPE=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFAULT_UIMAGE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++# CONFIG_KALLSYMS is not set
++# CONFIG_HOTPLUG is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++# CONFIG_EPOLL is not set
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_MODULES is not set
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_MPC8540_ADS is not set
++# CONFIG_MPC8560_ADS is not set
++# CONFIG_MPC85xx_CDS is not set
++# CONFIG_MPC85xx_MDS is not set
++# CONFIG_MPC85xx_DS is not set
++# CONFIG_STX_GP3 is not set
++# CONFIG_TQM8540 is not set
++# CONFIG_TQM8541 is not set
++# CONFIG_TQM8555 is not set
++CONFIG_TQM8560=y
++CONFIG_TQM85xx=y
++CONFIG_MPC85xx=y
++# CONFIG_IPIC is not set
++CONFIG_MPIC=y
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++CONFIG_CPM2=y
++CONFIG_PPC_CPM_NEW_BINDING=y
++# CONFIG_FSL_ULI1575 is not set
++CONFIG_CPM=y
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_MATH_EMULATION=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_RESOURCES_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++# CONFIG_PROC_DEVICETREE is not set
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SUSPEND_UP_POSSIBLE=y
++CONFIG_HIBERNATION_UP_POSSIBLE=y
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="tqm8560.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_FSL_SOC=y
++CONFIG_FSL_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY=y
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_BOOT_LOAD=0x00800000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=y
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++# CONFIG_MTD_PHYSMAP_OF is not set
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++# CONFIG_BLK_DEV_IDECD is not set
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++CONFIG_IDE_PROC_FS=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_PLATFORM is not set
++
++#
++# PCI IDE chipsets support
++#
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++CONFIG_IDEPCI_PCIBUS_ORDER=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_JMICRON is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT8213 is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SL82C105 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_BLK_DEV_TC86C001 is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++CONFIG_IDE_ARCH_OBSOLETE_INIT=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++# CONFIG_SCSI is not set
++# CONFIG_SCSI_DMA is not set
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_TLAN is not set
++# CONFIG_VIA_RHINE is not set
++# CONFIG_SC92031 is not set
++# CONFIG_FS_ENET is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IP1000 is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++CONFIG_GIANFAR=y
++CONFIG_GFAR_NAPI=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_CPM=y
++CONFIG_SERIAL_CPM_CONSOLE=y
++CONFIG_SERIAL_CPM_SCC1=y
++# CONFIG_SERIAL_CPM_SCC2 is not set
++# CONFIG_SERIAL_CPM_SCC3 is not set
++# CONFIG_SERIAL_CPM_SCC4 is not set
++# CONFIG_SERIAL_CPM_SMC1 is not set
++# CONFIG_SERIAL_CPM_SMC2 is not set
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_MPC=y
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++CONFIG_SENSORS_DS1337=y
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++CONFIG_SENSORS_LM75=y
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++CONFIG_HWMON_DEBUG_CHIP=y
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++# CONFIG_NFS_V3 is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++# CONFIG_MSDOS_PARTITION is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_KGDB_CONSOLE is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
++CONFIG_PPC_LIB_RHEAP=y
+diff --git a/arch/powerpc/configs/walnut_defconfig b/arch/powerpc/configs/walnut_defconfig
+index 7934463..e431128 100644
+--- a/arch/powerpc/configs/walnut_defconfig
++++ b/arch/powerpc/configs/walnut_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc4
+-# Thu Dec  6 16:49:33 2007
++# Linux kernel version: 2.6.24-rc6
++# Mon Dec 24 11:23:58 2007
+ #
+ # CONFIG_PPC64 is not set
+ 
+@@ -40,7 +40,7 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+ CONFIG_PPC_OF=y
+ CONFIG_OF=y
+-# CONFIG_PPC_UDBG_16550 is not set
++CONFIG_PPC_UDBG_16550=y
+ # CONFIG_GENERIC_TBSYNC is not set
+ CONFIG_AUDIT_ARCH=y
+ CONFIG_GENERIC_BUG=y
+@@ -127,6 +127,7 @@ CONFIG_DEFAULT_AS=y
+ # CONFIG_DEFAULT_CFQ is not set
+ # CONFIG_DEFAULT_NOOP is not set
+ CONFIG_DEFAULT_IOSCHED="anticipatory"
++# CONFIG_PPC4xx_PCI_EXPRESS is not set
+ 
+ #
+ # Platform support
+@@ -136,7 +137,9 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
+ # CONFIG_PPC_CELL is not set
+ # CONFIG_PPC_CELL_NATIVE is not set
+ # CONFIG_PQ2ADS is not set
++# CONFIG_EP405 is not set
+ # CONFIG_KILAUEA is not set
++# CONFIG_MAKALU is not set
+ CONFIG_WALNUT=y
+ # CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
+ CONFIG_405GP=y
+@@ -204,11 +207,17 @@ CONFIG_ISA_DMA_API=y
+ # Bus options
+ #
+ CONFIG_ZONE_DMA=y
+-# CONFIG_PCI is not set
+-# CONFIG_PCI_DOMAINS is not set
+-# CONFIG_PCI_SYSCALL is not set
+-# CONFIG_ARCH_SUPPORTS_MSI is not set
++CONFIG_PPC_INDIRECT_PCI=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++# CONFIG_PCIEPORTBUS is not set
++CONFIG_ARCH_SUPPORTS_MSI=y
++# CONFIG_PCI_MSI is not set
++# CONFIG_PCI_LEGACY is not set
++# CONFIG_PCI_DEBUG is not set
+ # CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
+ 
+ #
+ # Advanced setup
+@@ -373,11 +382,13 @@ CONFIG_MTD_CFI_UTIL=y
+ # CONFIG_MTD_COMPLEX_MAPPINGS is not set
+ # CONFIG_MTD_PHYSMAP is not set
+ CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_INTEL_VR_NOR is not set
+ # CONFIG_MTD_PLATRAM is not set
+ 
+ #
+ # Self-contained MTD device drivers
+ #
++# CONFIG_MTD_PMC551 is not set
+ # CONFIG_MTD_SLRAM is not set
+ # CONFIG_MTD_PHRAM is not set
+ # CONFIG_MTD_MTDRAM is not set
+@@ -400,9 +411,14 @@ CONFIG_OF_DEVICE=y
+ # CONFIG_PARPORT is not set
+ CONFIG_BLK_DEV=y
+ # CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
+ # CONFIG_BLK_DEV_COW_COMMON is not set
+ # CONFIG_BLK_DEV_LOOP is not set
+ # CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
+ CONFIG_BLK_DEV_RAM=y
+ CONFIG_BLK_DEV_RAM_COUNT=16
+ CONFIG_BLK_DEV_RAM_SIZE=35000
+@@ -411,7 +427,10 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+ # CONFIG_ATA_OVER_ETH is not set
+ # CONFIG_XILINX_SYSACE is not set
+ CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
+ # CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
+ # CONFIG_IDE is not set
+ 
+ #
+@@ -423,6 +442,14 @@ CONFIG_MISC_DEVICES=y
+ # CONFIG_SCSI_NETLINK is not set
+ # CONFIG_ATA is not set
+ # CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
+ # CONFIG_MACINTOSH_DRIVERS is not set
+ CONFIG_NETDEVICES=y
+ # CONFIG_NETDEVICES_MULTIQUEUE is not set
+@@ -432,9 +459,17 @@ CONFIG_NETDEVICES=y
+ # CONFIG_EQUALIZER is not set
+ # CONFIG_TUN is not set
+ # CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
+ # CONFIG_PHYLIB is not set
+ CONFIG_NET_ETHERNET=y
+ # CONFIG_MII is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
+ CONFIG_IBM_NEW_EMAC=y
+ CONFIG_IBM_NEW_EMAC_RXB=128
+ CONFIG_IBM_NEW_EMAC_TXB=64
+@@ -446,9 +481,38 @@ CONFIG_IBM_NEW_EMAC_ZMII=y
+ # CONFIG_IBM_NEW_EMAC_RGMII is not set
+ # CONFIG_IBM_NEW_EMAC_TAH is not set
+ # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_NET_PCI is not set
+ # CONFIG_B44 is not set
+ CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
+ CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
+ 
+ #
+ # Wireless LAN
+@@ -456,6 +520,8 @@ CONFIG_NETDEV_10000=y
+ # CONFIG_WLAN_PRE80211 is not set
+ # CONFIG_WLAN_80211 is not set
+ # CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
+ # CONFIG_PPP is not set
+ # CONFIG_SLIP is not set
+ # CONFIG_SHAPER is not set
+@@ -487,6 +553,7 @@ CONFIG_NETDEV_10000=y
+ #
+ CONFIG_SERIAL_8250=y
+ CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
+ CONFIG_SERIAL_8250_NR_UARTS=4
+ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+ CONFIG_SERIAL_8250_EXTENDED=y
+@@ -501,6 +568,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
+ # CONFIG_SERIAL_UARTLITE is not set
+ CONFIG_SERIAL_CORE=y
+ CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
+ CONFIG_SERIAL_OF_PLATFORM=y
+ CONFIG_UNIX98_PTYS=y
+ CONFIG_LEGACY_PTYS=y
+@@ -510,8 +578,10 @@ CONFIG_LEGACY_PTY_COUNT=256
+ # CONFIG_NVRAM is not set
+ # CONFIG_GEN_RTC is not set
+ # CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
+ # CONFIG_RAW_DRIVER is not set
+ # CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
+ # CONFIG_I2C is not set
+ 
+ #
+@@ -545,6 +615,8 @@ CONFIG_SSB_POSSIBLE=y
+ #
+ # Graphics support
+ #
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
+ # CONFIG_VGASTATE is not set
+ CONFIG_VIDEO_OUTPUT_CONTROL=m
+ # CONFIG_FB is not set
+@@ -560,9 +632,10 @@ CONFIG_VIDEO_OUTPUT_CONTROL=m
+ #
+ # CONFIG_SOUND is not set
+ CONFIG_USB_SUPPORT=y
+-# CONFIG_USB_ARCH_HAS_HCD is not set
+-# CONFIG_USB_ARCH_HAS_OHCI is not set
+-# CONFIG_USB_ARCH_HAS_EHCI is not set
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
+ 
+ #
+ # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+@@ -574,6 +647,7 @@ CONFIG_USB_SUPPORT=y
+ # CONFIG_USB_GADGET is not set
+ # CONFIG_MMC is not set
+ # CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
+ # CONFIG_EDAC is not set
+ # CONFIG_RTC_CLASS is not set
+ 
+diff --git a/arch/powerpc/configs/warp_defconfig b/arch/powerpc/configs/warp_defconfig
+new file mode 100644
+index 0000000..312557b
+--- /dev/null
++++ b/arch/powerpc/configs/warp_defconfig
+@@ -0,0 +1,1057 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc6
++# Tue Jan  8 12:23:23 2008
++#
++# CONFIG_PPC64 is not set
++
++#
++# Processor support
++#
++# CONFIG_6xx is not set
++# CONFIG_PPC_85xx is not set
++# CONFIG_PPC_8xx is not set
++# CONFIG_40x is not set
++CONFIG_44x=y
++# CONFIG_E200 is not set
++CONFIG_PPC_FPU=y
++CONFIG_4xx=y
++CONFIG_BOOKE=y
++CONFIG_PTE_64BIT=y
++CONFIG_PHYS_64BIT=y
++# CONFIG_PPC_MM_SLICES is not set
++CONFIG_NOT_COHERENT_CACHE=y
++CONFIG_PPC32=y
++CONFIG_WORD_SIZE=32
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_NVRAM=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_OF=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_PPC_DCR_NATIVE=y
++# CONFIG_PPC_DCR_MMIO is not set
++CONFIG_PPC_DCR=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION="-pika"
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++# CONFIG_HOTPLUG is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Platform support
++#
++# CONFIG_PPC_MPC52xx is not set
++# CONFIG_PPC_MPC5200 is not set
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++# CONFIG_BAMBOO is not set
++# CONFIG_EBONY is not set
++# CONFIG_SEQUOIA is not set
++# CONFIG_TAISHAN is not set
++# CONFIG_KATMAI is not set
++# CONFIG_RAINIER is not set
++CONFIG_WARP=y
++CONFIG_440EP=y
++CONFIG_IBM440EP_ERR42=y
++# CONFIG_MPIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_PPC_I8259 is not set
++# CONFIG_PPC_RTAS is not set
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++# CONFIG_PPC_INDIRECT_IO is not set
++# CONFIG_GENERIC_IOMAP is not set
++# CONFIG_CPU_FREQ is not set
++# CONFIG_CPM2 is not set
++# CONFIG_FSL_ULI1575 is not set
++
++#
++# Kernel options
++#
++# CONFIG_HIGHMEM is not set
++# CONFIG_TICK_ONESHOT is not set
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++# CONFIG_HZ_100 is not set
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_300 is not set
++CONFIG_HZ_1000=y
++CONFIG_HZ=1000
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++# CONFIG_MATH_EMULATION is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_PROC_DEVICETREE=y
++CONFIG_CMDLINE_BOOL=y
++CONFIG_CMDLINE="ip=on"
++CONFIG_SECCOMP=y
++CONFIG_WANT_DEVICE_TREE=y
++CONFIG_DEVICE_TREE="warp.dts"
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++# CONFIG_PCI is not set
++# CONFIG_PCI_DOMAINS is not set
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++
++#
++# Advanced setup
++#
++# CONFIG_ADVANCED_OPTIONS is not set
++
++#
++# Default settings for advanced configuration options are used
++#
++CONFIG_HIGHMEM_START=0xfe000000
++CONFIG_LOWMEM_SIZE=0x30000000
++CONFIG_KERNEL_START=0xc0000000
++CONFIG_TASK_SIZE=0xc0000000
++CONFIG_CONSISTENT_START=0xff100000
++CONFIG_CONSISTENT_SIZE=0x00200000
++CONFIG_BOOT_LOAD=0x01000000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK_ENABLED is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_QUEUE is not set
++# CONFIG_IP_NF_IPTABLES is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++CONFIG_VLAN_8021Q=y
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++# CONFIG_STANDALONE is not set
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++CONFIG_MTD_OOPS=m
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++CONFIG_MTD_CFI_AMDSTD=y
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++CONFIG_MTD_NAND=y
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++CONFIG_MTD_NAND_ECC_SMC=y
++# CONFIG_MTD_NAND_MUSEUM_IDS is not set
++CONFIG_MTD_NAND_IDS=y
++CONFIG_MTD_NAND_NDFC=y
++# CONFIG_MTD_NAND_DISKONCHIP is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++# CONFIG_MTD_NAND_PLATFORM is not set
++# CONFIG_MTD_ALAUDA is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
++CONFIG_OF_DEVICE=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++# CONFIG_BLK_DEV_LOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_XILINX_SYSACE is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++# CONFIG_SCSI_WAIT_SCAN is not set
++
++#
++# SCSI Transports
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++# CONFIG_SCSI_LOWLEVEL is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++CONFIG_IBM_NEW_EMAC=y
++CONFIG_IBM_NEW_EMAC_RXB=128
++CONFIG_IBM_NEW_EMAC_TXB=64
++CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
++CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
++CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
++# CONFIG_IBM_NEW_EMAC_DEBUG is not set
++CONFIG_IBM_NEW_EMAC_ZMII=y
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_B44 is not set
++# CONFIG_NETDEV_1000 is not set
++# CONFIG_NETDEV_10000 is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++# CONFIG_INPUT is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++# CONFIG_SERIAL_8250_MANY_PORTS is not set
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_OF_PLATFORM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++# CONFIG_I2C_CHARDEV is not set
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_IBM_IIC=y
++# CONFIG_I2C_MPC is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_TINY_USB is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++CONFIG_SENSORS_EEPROM=y
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_M41T00 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++CONFIG_SENSORS_AD7414=y
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++# CONFIG_DAB is not set
++
++#
++# Graphics support
++#
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++# CONFIG_USB_ARCH_HAS_EHCI is not set
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++CONFIG_USB_DEVICE_CLASS=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PPC_OF=y
++CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
++# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set
++CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
++CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_BERRY_CHARGE is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGET is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++CONFIG_MMC=m
++# CONFIG_MMC_DEBUG is not set
++# CONFIG_MMC_UNSAFE_RESUME is not set
++
++#
++# MMC/SD Card Drivers
++#
++CONFIG_MMC_BLOCK=m
++CONFIG_MMC_BLOCK_BOUNCE=y
++# CONFIG_SDIO_UART is not set
++
++#
++# MMC/SD Host Controller Drivers
++#
++# CONFIG_MMC_WBSD is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_EDAC is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4DEV_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++# CONFIG_TMPFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++CONFIG_NLS_CODEPAGE_850=y
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++CONFIG_NLS_ISO8859_15=y
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++CONFIG_NLS_UTF8=y
++# CONFIG_DLM is not set
++# CONFIG_UCC_SLOW is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++CONFIG_CRC_CCITT=y
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_TIMER_STATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUGGER is not set
++CONFIG_BDI_SWITCH=y
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++# CONFIG_PPC_CLOCK is not set
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index ca51f0c..58dbfef 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -3,7 +3,7 @@
+ #
+ 
+ ifeq ($(CONFIG_PPC64),y)
+-EXTRA_CFLAGS	+= -mno-minimal-toc
++CFLAGS_prom_init.o	+= -mno-minimal-toc
+ endif
+ ifeq ($(CONFIG_PPC32),y)
+ CFLAGS_prom_init.o      += -fPIC
+@@ -70,6 +70,7 @@ pci64-$(CONFIG_PPC64)		+= pci_dn.o isa-bridge.o
+ obj-$(CONFIG_PCI)		+= pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
+ 				   pci-common.o
+ obj-$(CONFIG_PCI_MSI)		+= msi.o
++obj-$(CONFIG_RAPIDIO)		+= rio.o
+ obj-$(CONFIG_KEXEC)		+= machine_kexec.o crash.o \
+ 				   machine_kexec_$(CONFIG_WORD_SIZE).o
+ obj-$(CONFIG_AUDIT)		+= audit.o
+@@ -91,3 +92,13 @@ obj-$(CONFIG_PPC64)		+= $(obj64-y)
+ 
+ extra-$(CONFIG_PPC_FPU)		+= fpu.o
+ extra-$(CONFIG_PPC64)		+= entry_64.o
++
++extra-y				+= systbl_chk.i
++$(obj)/systbl.o:		systbl_chk
++
++quiet_cmd_systbl_chk = CALL    $<
++      cmd_systbl_chk = $(CONFIG_SHELL) $< $(obj)/systbl_chk.i
++
++PHONY += systbl_chk
++systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
++	$(call cmd,systbl_chk)
+diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
+index 9c74fdf..80e2eef 100644
+--- a/arch/powerpc/kernel/btext.c
++++ b/arch/powerpc/kernel/btext.c
+@@ -236,7 +236,7 @@ int __init btext_find_display(int allow_nonstdout)
+ 	if (rc == 0 || !allow_nonstdout)
+ 		return rc;
+ 
+-	for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
++	for_each_node_by_type(np, "display") {
+ 		if (of_get_property(np, "linux,opened", NULL)) {
+ 			printk("trying %s ...\n", np->full_name);
+ 			rc = btext_initialize(np);
+diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S
+index 8e1812e..6250443 100644
+--- a/arch/powerpc/kernel/cpu_setup_44x.S
++++ b/arch/powerpc/kernel/cpu_setup_44x.S
+@@ -23,11 +23,24 @@ _GLOBAL(__setup_cpu_440epx)
+ 	mflr	r4
+ 	bl	__init_fpu_44x
+ 	bl	__plb_disable_wrp
++	bl	__fixup_440A_mcheck
+ 	mtlr	r4
+ 	blr
+ _GLOBAL(__setup_cpu_440grx)
+-	b	__plb_disable_wrp
++	mflr	r4
++	bl	__plb_disable_wrp
++	bl	__fixup_440A_mcheck
++	mtlr	r4
++	blr
++_GLOBAL(__setup_cpu_440gx)
++_GLOBAL(__setup_cpu_440spe)
++	b	__fixup_440A_mcheck
+ 
++ /* Temporary fixup for arch/ppc until we kill the whole thing */
++#ifndef CONFIG_PPC_MERGE
++_GLOBAL(__fixup_440A_mcheck)
++	blr
++#endif
+ 
+ /* enable APU between CPU and FPU */
+ _GLOBAL(__init_fpu_44x)
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index 9ed351f..a4c2771 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -33,7 +33,9 @@ EXPORT_SYMBOL(cur_cpu_spec);
+ #ifdef CONFIG_PPC32
+ extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec);
+ extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
++extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
+ extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
++extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
+ extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
+ extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
+ extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
+@@ -85,6 +87,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/power3",
+ 		.oprofile_type		= PPC_OPROFILE_RS64,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power3",
+ 	},
+ 	{	/* Power3+ */
+@@ -99,6 +102,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/power3",
+ 		.oprofile_type		= PPC_OPROFILE_RS64,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power3",
+ 	},
+ 	{	/* Northstar */
+@@ -113,6 +117,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/rs64",
+ 		.oprofile_type		= PPC_OPROFILE_RS64,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "rs64",
+ 	},
+ 	{	/* Pulsar */
+@@ -127,6 +132,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/rs64",
+ 		.oprofile_type		= PPC_OPROFILE_RS64,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "rs64",
+ 	},
+ 	{	/* I-star */
+@@ -141,6 +147,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/rs64",
+ 		.oprofile_type		= PPC_OPROFILE_RS64,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "rs64",
+ 	},
+ 	{	/* S-star */
+@@ -155,6 +162,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/rs64",
+ 		.oprofile_type		= PPC_OPROFILE_RS64,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "rs64",
+ 	},
+ 	{	/* Power4 */
+@@ -169,6 +177,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/power4",
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power4",
+ 	},
+ 	{	/* Power4+ */
+@@ -183,6 +192,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/power4",
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power4",
+ 	},
+ 	{	/* PPC970 */
+@@ -200,6 +210,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_restore		= __restore_cpu_ppc970,
+ 		.oprofile_cpu_type	= "ppc64/970",
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc970",
+ 	},
+ 	{	/* PPC970FX */
+@@ -217,6 +228,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_restore		= __restore_cpu_ppc970,
+ 		.oprofile_cpu_type	= "ppc64/970",
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc970",
+ 	},
+ 	{	/* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
+@@ -234,6 +246,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_restore		= __restore_cpu_ppc970,
+ 		.oprofile_cpu_type	= "ppc64/970MP",
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc970",
+ 	},
+ 	{	/* PPC970MP */
+@@ -251,6 +264,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_restore		= __restore_cpu_ppc970,
+ 		.oprofile_cpu_type	= "ppc64/970MP",
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc970",
+ 	},
+ 	{	/* PPC970GX */
+@@ -267,6 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_ppc970,
+ 		.oprofile_cpu_type	= "ppc64/970",
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc970",
+ 	},
+ 	{	/* Power5 GR */
+@@ -286,6 +301,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		 */
+ 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
+ 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power5",
+ 	},
+ 	{	/* Power5++ */
+@@ -301,6 +317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
+ 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
+ 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power5+",
+ 	},
+ 	{	/* Power5 GS */
+@@ -317,6 +334,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.oprofile_type		= PPC_OPROFILE_POWER4,
+ 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
+ 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power5+",
+ 	},
+ 	{	/* POWER6 in P5+ mode; 2.04-compliant processor */
+@@ -327,6 +345,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_POWER5_PLUS,
+ 		.icache_bsize		= 128,
+ 		.dcache_bsize		= 128,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power5+",
+ 	},
+ 	{	/* Power6 */
+@@ -346,6 +365,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.oprofile_mmcra_sipr	= POWER6_MMCRA_SIPR,
+ 		.oprofile_mmcra_clear	= POWER6_MMCRA_THRM |
+ 			POWER6_MMCRA_OTHER,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power6x",
+ 	},
+ 	{	/* 2.05-compliant processor, i.e. Power6 "architected" mode */
+@@ -356,6 +376,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_POWER6,
+ 		.icache_bsize		= 128,
+ 		.dcache_bsize		= 128,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power6",
+ 	},
+ 	{	/* Cell Broadband Engine */
+@@ -372,6 +393,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.pmc_type		= PPC_PMC_IBM,
+ 		.oprofile_cpu_type	= "ppc64/cell-be",
+ 		.oprofile_type		= PPC_OPROFILE_CELL,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc-cell-be",
+ 	},
+ 	{	/* PA Semi PA6T */
+@@ -388,6 +410,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_restore		= __restore_cpu_pa6t,
+ 		.oprofile_cpu_type	= "ppc64/pa6t",
+ 		.oprofile_type		= PPC_OPROFILE_PA6T,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "pa6t",
+ 	},
+ 	{	/* default match */
+@@ -400,6 +423,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 128,
+ 		.num_pmcs		= 6,
+ 		.pmc_type		= PPC_PMC_IBM,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "power4",
+ 	}
+ #endif	/* CONFIG_PPC64 */
+@@ -414,6 +438,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc601",
+ 	},
+ 	{	/* 603 */
+@@ -425,6 +450,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_603,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc603",
+ 	},
+ 	{	/* 603e */
+@@ -436,6 +462,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_603,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc603",
+ 	},
+ 	{	/* 603ev */
+@@ -447,6 +474,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_603,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc603",
+ 	},
+ 	{	/* 604 */
+@@ -459,6 +487,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 2,
+ 		.cpu_setup		= __setup_cpu_604,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc604",
+ 	},
+ 	{	/* 604e */
+@@ -471,6 +500,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_604,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc604",
+ 	},
+ 	{	/* 604r */
+@@ -483,6 +513,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_604,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc604",
+ 	},
+ 	{	/* 604ev */
+@@ -495,6 +526,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_604,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc604",
+ 	},
+ 	{	/* 740/750 (0x4202, don't support TAU ?) */
+@@ -507,6 +539,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750CX (80100 and 8010x?) */
+@@ -519,6 +552,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750cx,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750CX (82201 and 82202) */
+@@ -531,6 +565,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750cx,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750CXe (82214) */
+@@ -543,6 +578,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750cx,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750CXe "Gekko" (83214) */
+@@ -555,6 +591,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750cx,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750CL */
+@@ -567,6 +604,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 745/755 */
+@@ -579,6 +617,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750FX rev 1.x */
+@@ -591,6 +630,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750FX rev 2.0 must disable HID0[DPM] */
+@@ -603,6 +643,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750FX (All revs except 2.0) */
+@@ -615,6 +656,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750fx,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 750GX */
+@@ -627,6 +669,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750fx,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 740/750 (L2CR bit need fixup for 740) */
+@@ -639,6 +682,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_750,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc750",
+ 	},
+ 	{	/* 7400 rev 1.1 ? (no TAU) */
+@@ -652,6 +696,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_7400,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7400",
+ 	},
+ 	{	/* 7400 */
+@@ -665,6 +710,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_7400,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7400",
+ 	},
+ 	{	/* 7410 */
+@@ -678,6 +724,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.dcache_bsize		= 32,
+ 		.num_pmcs		= 4,
+ 		.cpu_setup		= __setup_cpu_7410,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7400",
+ 	},
+ 	{	/* 7450 2.0 - no doze/nap */
+@@ -693,6 +740,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7450 2.1 */
+@@ -708,6 +756,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7450 2.3 and newer */
+@@ -723,6 +772,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7455 rev 1.x */
+@@ -738,6 +788,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7455 rev 2.0 */
+@@ -753,6 +804,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7455 others */
+@@ -768,6 +820,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7447/7457 Rev 1.0 */
+@@ -783,6 +836,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7447/7457 Rev 1.1 */
+@@ -798,6 +852,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7447/7457 Rev 1.2 and later */
+@@ -812,6 +867,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7447A */
+@@ -827,6 +883,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 7448 */
+@@ -842,6 +899,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_745x,
+ 		.oprofile_cpu_type      = "ppc/7450",
+ 		.oprofile_type		= PPC_OPROFILE_G4,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc7450",
+ 	},
+ 	{	/* 82xx (8240, 8245, 8260 are all 603e cores) */
+@@ -853,6 +911,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_603,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc603",
+ 	},
+ 	{	/* All G2_LE (603e core, plus some) have the same pvr */
+@@ -864,6 +923,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_603,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc603",
+ 	},
+ 	{	/* e300c1 (a 603e core, plus some) on 83xx */
+@@ -875,6 +935,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_603,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc603",
+ 	},
+ 	{	/* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
+@@ -886,9 +947,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_603,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc603",
+ 	},
+-	{	/* e300c3 on 83xx  */
++	{	/* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
+ 		.pvr_mask		= 0x7fff0000,
+ 		.pvr_value		= 0x00850000,
+ 		.cpu_name		= "e300c3",
+@@ -899,6 +961,18 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_setup		= __setup_cpu_603,
+ 		.platform		= "ppc603",
+ 	},
++	{	/* e300c4 (e300c1, plus one IU) */
++		.pvr_mask		= 0x7fff0000,
++		.pvr_value		= 0x00860000,
++		.cpu_name		= "e300c4",
++		.cpu_features		= CPU_FTRS_E300,
++		.cpu_user_features	= COMMON_USER,
++		.icache_bsize		= 32,
++		.dcache_bsize		= 32,
++		.cpu_setup		= __setup_cpu_603,
++		.machine_check		= machine_check_generic,
++		.platform		= "ppc603",
++	},
+ 	{	/* default match, we assume split I/D cache & TB (non-601)... */
+ 		.pvr_mask		= 0x00000000,
+ 		.pvr_value		= 0x00000000,
+@@ -907,6 +981,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_generic,
+ 		.platform		= "ppc603",
+ 	},
+ #endif /* CLASSIC_PPC */
+@@ -933,6 +1008,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ 		.icache_bsize		= 16,
+ 		.dcache_bsize		= 16,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc403",
+ 	},
+ 	{	/* 403GCX */
+@@ -944,6 +1020,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		 	PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
+ 		.icache_bsize		= 16,
+ 		.dcache_bsize		= 16,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc403",
+ 	},
+ 	{	/* 403G ?? */
+@@ -954,6 +1031,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ 		.icache_bsize		= 16,
+ 		.dcache_bsize		= 16,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc403",
+ 	},
+ 	{	/* 405GP */
+@@ -965,6 +1043,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* STB 03xxx */
+@@ -976,6 +1055,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* STB 04xxx */
+@@ -987,6 +1067,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* NP405L */
+@@ -998,6 +1079,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* NP4GS3 */
+@@ -1009,6 +1091,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{   /* NP405H */
+@@ -1020,6 +1103,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* 405GPr */
+@@ -1031,6 +1115,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{   /* STBx25xx */
+@@ -1042,6 +1127,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* 405LP */
+@@ -1052,6 +1138,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* Xilinx Virtex-II Pro  */
+@@ -1063,6 +1150,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* Xilinx Virtex-4 FX */
+@@ -1074,6 +1162,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* 405EP */
+@@ -1085,17 +1174,31 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 	{	/* 405EX */
+-		.pvr_mask		= 0xffff0000,
+-		.pvr_value		= 0x12910000,
++		.pvr_mask		= 0xffff0004,
++		.pvr_value		= 0x12910004,
+ 		.cpu_name		= "405EX",
+ 		.cpu_features		= CPU_FTRS_40X,
+ 		.cpu_user_features	= PPC_FEATURE_32 |
+ 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
++		.platform		= "ppc405",
++	},
++	{	/* 405EXr */
++		.pvr_mask		= 0xffff0004,
++		.pvr_value		= 0x12910000,
++		.cpu_name		= "405EXr",
++		.cpu_features		= CPU_FTRS_40X,
++		.cpu_user_features	= PPC_FEATURE_32 |
++			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
++		.icache_bsize		= 32,
++		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc405",
+ 	},
+ 
+@@ -1109,6 +1212,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
+@@ -1120,6 +1224,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_440ep,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc440",
+ 	},
+ 	{
+@@ -1130,6 +1235,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
++		.platform		= "ppc440",
++	},
++	{ /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */
++		.pvr_mask		= 0xf0000ff7,
++		.pvr_value		= 0x400008d4,
++		.cpu_name		= "440EP Rev. C",
++		.cpu_features		= CPU_FTRS_44X,
++		.cpu_user_features	= COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
++		.icache_bsize		= 32,
++		.dcache_bsize		= 32,
++		.cpu_setup		= __setup_cpu_440ep,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
+@@ -1141,6 +1259,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_440ep,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* 440GRX */
+@@ -1152,6 +1271,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_440grx,
++		.machine_check		= machine_check_440A,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */
+@@ -1163,6 +1283,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
+ 		.cpu_setup		= __setup_cpu_440epx,
++		.machine_check		= machine_check_440A,
+ 		.platform		= "ppc440",
+ 	},
+ 	{	/* 440GP Rev. B */
+@@ -1173,6 +1294,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc440gp",
+ 	},
+ 	{	/* 440GP Rev. C */
+@@ -1183,6 +1305,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc440gp",
+ 	},
+ 	{ /* 440GX Rev. A */
+@@ -1193,6 +1316,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.cpu_setup		= __setup_cpu_440gx,
++		.machine_check		= machine_check_440A,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* 440GX Rev. B */
+@@ -1203,6 +1328,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.cpu_setup		= __setup_cpu_440gx,
++		.machine_check		= machine_check_440A,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* 440GX Rev. C */
+@@ -1213,6 +1340,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.cpu_setup		= __setup_cpu_440gx,
++		.machine_check		= machine_check_440A,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* 440GX Rev. F */
+@@ -1223,6 +1352,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.cpu_setup		= __setup_cpu_440gx,
++		.machine_check		= machine_check_440A,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* 440SP Rev. A */
+@@ -1233,6 +1364,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_4xx,
+ 		.platform		= "ppc440",
+ 	},
+ 	{ /* 440SPe Rev. A */
+@@ -1243,6 +1375,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features      = COMMON_USER_BOOKE,
+ 		.icache_bsize           = 32,
+ 		.dcache_bsize           = 32,
++		.cpu_setup		= __setup_cpu_440spe,
++		.machine_check		= machine_check_440A,
+ 		.platform               = "ppc440",
+ 	},
+ 	{ /* 440SPe Rev. B */
+@@ -1253,10 +1387,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.cpu_user_features	= COMMON_USER_BOOKE,
+ 		.icache_bsize		= 32,
+ 		.dcache_bsize		= 32,
++		.cpu_setup		= __setup_cpu_440spe,
++		.machine_check		= machine_check_440A,
+ 		.platform		= "ppc440",
+ 	},
+ #endif /* CONFIG_44x */
+ #ifdef CONFIG_FSL_BOOKE
++#ifdef CONFIG_E200
+ 	{	/* e200z5 */
+ 		.pvr_mask		= 0xfff00000,
+ 		.pvr_value		= 0x81000000,
+@@ -1267,6 +1404,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_EFP_SINGLE |
+ 			PPC_FEATURE_UNIFIED_CACHE,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_e200,
+ 		.platform		= "ppc5554",
+ 	},
+ 	{	/* e200z6 */
+@@ -1280,8 +1418,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 			PPC_FEATURE_HAS_EFP_SINGLE_COMP |
+ 			PPC_FEATURE_UNIFIED_CACHE,
+ 		.dcache_bsize		= 32,
++		.machine_check		= machine_check_e200,
+ 		.platform		= "ppc5554",
+ 	},
++#elif defined(CONFIG_E500)
+ 	{	/* e500 */
+ 		.pvr_mask		= 0xffff0000,
+ 		.pvr_value		= 0x80200000,
+@@ -1296,6 +1436,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.num_pmcs		= 4,
+ 		.oprofile_cpu_type	= "ppc/e500",
+ 		.oprofile_type		= PPC_OPROFILE_BOOKE,
++		.machine_check		= machine_check_e500,
+ 		.platform		= "ppc8540",
+ 	},
+ 	{	/* e500v2 */
+@@ -1313,9 +1454,11 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ 		.num_pmcs		= 4,
+ 		.oprofile_cpu_type	= "ppc/e500",
+ 		.oprofile_type		= PPC_OPROFILE_BOOKE,
++		.machine_check		= machine_check_e500,
+ 		.platform		= "ppc8548",
+ 	},
+ #endif
++#endif
+ #if !CLASSIC_PPC
+ 	{	/* default match */
+ 		.pvr_mask		= 0x00000000,
+diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
+index 77c749a..571132e 100644
+--- a/arch/powerpc/kernel/crash.c
++++ b/arch/powerpc/kernel/crash.c
+@@ -32,6 +32,8 @@
+ #include <asm/lmb.h>
+ #include <asm/firmware.h>
+ #include <asm/smp.h>
++#include <asm/system.h>
++#include <asm/setjmp.h>
+ 
+ #ifdef DEBUG
+ #include <asm/udbg.h>
+@@ -45,6 +47,11 @@ int crashing_cpu = -1;
+ static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+ cpumask_t cpus_in_sr = CPU_MASK_NONE;
+ 
++#define CRASH_HANDLER_MAX 1
++/* NULL terminated list of shutdown handles */
++static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
++static DEFINE_SPINLOCK(crash_handlers_lock);
++
+ #ifdef CONFIG_SMP
+ static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
+ 
+@@ -285,9 +292,72 @@ static inline void crash_kexec_stop_spus(void)
+ }
+ #endif /* CONFIG_SPU_BASE */
+ 
++/*
++ * Register a function to be called on shutdown.  Only use this if you
++ * can't reset your device in the second kernel.
++ */
++int crash_shutdown_register(crash_shutdown_t handler)
++{
++	unsigned int i, rc;
++
++	spin_lock(&crash_handlers_lock);
++	for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
++		if (!crash_shutdown_handles[i]) {
++			/* Insert handle at first empty entry */
++			crash_shutdown_handles[i] = handler;
++			rc = 0;
++			break;
++		}
++
++	if (i == CRASH_HANDLER_MAX) {
++		printk(KERN_ERR "Crash shutdown handles full, "
++		       "not registered.\n");
++		rc = 1;
++	}
++
++	spin_unlock(&crash_handlers_lock);
++	return rc;
++}
++EXPORT_SYMBOL(crash_shutdown_register);
++
++int crash_shutdown_unregister(crash_shutdown_t handler)
++{
++	unsigned int i, rc;
++
++	spin_lock(&crash_handlers_lock);
++	for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
++		if (crash_shutdown_handles[i] == handler)
++			break;
++
++	if (i == CRASH_HANDLER_MAX) {
++		printk(KERN_ERR "Crash shutdown handle not found\n");
++		rc = 1;
++	} else {
++		/* Shift handles down */
++		for (; crash_shutdown_handles[i]; i++)
++			crash_shutdown_handles[i] =
++				crash_shutdown_handles[i+1];
++		rc = 0;
++	}
++
++	spin_unlock(&crash_handlers_lock);
++	return rc;
++}
++EXPORT_SYMBOL(crash_shutdown_unregister);
++
++static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
++
++static int handle_fault(struct pt_regs *regs)
++{
++	longjmp(crash_shutdown_buf, 1);
++	return 0;
++}
++
+ void default_machine_crash_shutdown(struct pt_regs *regs)
+ {
+-	unsigned int irq;
++	unsigned int i;
++	int (*old_handler)(struct pt_regs *regs);
++
+ 
+ 	/*
+ 	 * This function is only called after the system
+@@ -301,15 +371,36 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
+ 	 */
+ 	hard_irq_disable();
+ 
+-	for_each_irq(irq) {
+-		struct irq_desc *desc = irq_desc + irq;
++	for_each_irq(i) {
++		struct irq_desc *desc = irq_desc + i;
+ 
+ 		if (desc->status & IRQ_INPROGRESS)
+-			desc->chip->eoi(irq);
++			desc->chip->eoi(i);
+ 
+ 		if (!(desc->status & IRQ_DISABLED))
+-			desc->chip->disable(irq);
++			desc->chip->disable(i);
++	}
++
++	/*
++	 * Call registered shutdown routines savely.  Swap out
++	 * __debugger_fault_handler, and replace on exit.
++	 */
++	old_handler = __debugger_fault_handler;
++	__debugger_fault_handler = handle_fault;
++	for (i = 0; crash_shutdown_handles[i]; i++) {
++		if (setjmp(crash_shutdown_buf) == 0) {
++			/*
++			 * Insert syncs and delay to ensure
++			 * instructions in the dangerous region don't
++			 * leak away from this protected region.
++			 */
++			asm volatile("sync; isync");
++			/* dangerous region */
++			crash_shutdown_handles[i]();
++			asm volatile("sync; isync");
++		}
+ 	}
++	__debugger_fault_handler = old_handler;
+ 
+ 	/*
+ 	 * Make a note of crashing cpu. Will be used in machine_kexec
+diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
+index 14206e3..8423907 100644
+--- a/arch/powerpc/kernel/dma_64.c
++++ b/arch/powerpc/kernel/dma_64.c
+@@ -112,10 +112,16 @@ EXPORT_SYMBOL(dma_iommu_ops);
+ /*
+  * Generic direct DMA implementation
+  *
+- * This implementation supports a global offset that can be applied if
+- * the address at which memory is visible to devices is not 0.
++ * This implementation supports a per-device offset that can be applied if
++ * the address at which memory is visible to devices is not 0. Platform code
++ * can set archdata.dma_data to an unsigned long holding the offset. By
++ * default the offset is zero.
+  */
+-unsigned long dma_direct_offset;
++
++static unsigned long get_dma_direct_offset(struct device *dev)
++{
++	return (unsigned long)dev->archdata.dma_data;
++}
+ 
+ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+ 				       dma_addr_t *dma_handle, gfp_t flag)
+@@ -124,13 +130,12 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+ 	void *ret;
+ 	int node = dev->archdata.numa_node;
+ 
+-	/* TODO: Maybe use the numa node here too ? */
+ 	page = alloc_pages_node(node, flag, get_order(size));
+ 	if (page == NULL)
+ 		return NULL;
+ 	ret = page_address(page);
+ 	memset(ret, 0, size);
+-	*dma_handle = virt_to_abs(ret) | dma_direct_offset;
++	*dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
+ 
+ 	return ret;
+ }
+@@ -145,7 +150,7 @@ static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
+ 					size_t size,
+ 					enum dma_data_direction direction)
+ {
+-	return virt_to_abs(ptr) | dma_direct_offset;
++	return virt_to_abs(ptr) + get_dma_direct_offset(dev);
+ }
+ 
+ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
+@@ -161,7 +166,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+ 	int i;
+ 
+ 	for_each_sg(sgl, sg, nents, i) {
+-		sg->dma_address = sg_phys(sg) | dma_direct_offset;
++		sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
+ 		sg->dma_length = sg->length;
+ 	}
+ 
+diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
+index 56aba84..ad071a1 100644
+--- a/arch/powerpc/kernel/head_44x.S
++++ b/arch/powerpc/kernel/head_44x.S
+@@ -289,11 +289,8 @@ interrupt_base:
+ 	CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+ 
+ 	/* Machine Check Interrupt */
+-#ifdef CONFIG_440A
+-	MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+-#else
+ 	CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+-#endif
++	MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
+ 
+ 	/* Data Storage Interrupt */
+ 	START_EXCEPTION(DataStorage)
+@@ -674,6 +671,15 @@ finish_tlb_load:
+  */
+ 
+ /*
++ * Adjust the machine check IVOR on 440A cores
++ */
++_GLOBAL(__fixup_440A_mcheck)
++	li	r3,MachineCheckA at l
++	mtspr	SPRN_IVOR1,r3
++	sync
++	blr
++
++/*
+  * extern void giveup_altivec(struct task_struct *prev)
+  *
+  * The 44x core does not have an AltiVec unit.
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index c349868..11b4f6d 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -903,6 +903,7 @@ handle_page_fault:
+  * the PTE insertion
+  */
+ 12:	bl	.save_nvgprs
++	mr	r5,r3
+ 	addi	r3,r1,STACK_FRAME_OVERHEAD
+ 	ld	r4,_DAR(r1)
+ 	bl	.low_hash_fault
+diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
+index 8536e76..ba9393f 100644
+--- a/arch/powerpc/kernel/head_booke.h
++++ b/arch/powerpc/kernel/head_booke.h
+@@ -166,7 +166,7 @@ label:
+ 	mfspr	r5,SPRN_ESR;					\
+ 	stw	r5,_ESR(r11);					\
+ 	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
+-	EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
++	EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
+ 			  NOCOPY, mcheck_transfer_to_handler,   \
+ 			  ret_from_mcheck_exc)
+ 
+diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
+index 7aecb39..d9cc2c2 100644
+--- a/arch/powerpc/kernel/head_fsl_booke.S
++++ b/arch/powerpc/kernel/head_fsl_booke.S
+@@ -73,8 +73,8 @@ _ENTRY(_start);
+ /* We try to not make any assumptions about how the boot loader
+  * setup or used the TLBs.  We invalidate all mappings from the
+  * boot loader and load a single entry in TLB1[0] to map the
+- * first 16M of kernel memory.  Any boot info passed from the
+- * bootloader needs to live in this first 16M.
++ * first 64M of kernel memory.  Any boot info passed from the
++ * bootloader needs to live in this first 64M.
+  *
+  * Requirement on bootloader:
+  *  - The page we're executing in needs to reside in TLB1 and
+@@ -167,7 +167,7 @@ skpinv:	addi	r6,r6,1				/* Increment */
+ 	mtspr	SPRN_MAS0,r7
+ 	tlbre
+ 
+-	/* Just modify the entry ID and EPN for the temp mapping */
++	/* Just modify the entry ID, EPN and RPN for the temp mapping */
+ 	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+ 	rlwimi	r7,r5,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r5) */
+ 	mtspr	SPRN_MAS0,r7
+@@ -177,9 +177,12 @@ skpinv:	addi	r6,r6,1				/* Increment */
+ 	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
+ 	mtspr	SPRN_MAS1,r6
+ 	mfspr	r6,SPRN_MAS2
+-	li	r7,0		/* temp EPN = 0 */
++	lis	r7,PHYSICAL_START at h
+ 	rlwimi	r7,r6,0,20,31
+ 	mtspr	SPRN_MAS2,r7
++	mfspr	r6,SPRN_MAS3
++	rlwimi	r7,r6,0,20,31
++	mtspr	SPRN_MAS3,r7
+ 	tlbwe
+ 
+ 	xori	r6,r4,1
+@@ -222,11 +225,11 @@ skpinv:	addi	r6,r6,1				/* Increment */
+ 	lis	r6,0x1000		/* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
+ 	mtspr	SPRN_MAS0,r6
+ 	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
+-	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
++	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
+ 	mtspr	SPRN_MAS1,r6
+ 	li	r7,0
+-	lis	r6,KERNELBASE at h
+-	ori	r6,r6,KERNELBASE at l
++	lis	r6,PAGE_OFFSET at h
++	ori	r6,r6,PAGE_OFFSET at l
+ 	rlwimi	r6,r7,0,20,31
+ 	mtspr	SPRN_MAS2,r6
+ 	li	r7,(MAS3_SX|MAS3_SW|MAS3_SR)
+@@ -234,6 +237,9 @@ skpinv:	addi	r6,r6,1				/* Increment */
+ 	tlbwe
+ 
+ /* 7. Jump to KERNELBASE mapping */
++	lis	r6,KERNELBASE at h
++	ori	r6,r6,KERNELBASE at l
++	rlwimi	r6,r7,0,20,31
+ 	lis	r7,MSR_KERNEL at h
+ 	ori	r7,r7,MSR_KERNEL at l
+ 	bl	1f			/* Find our address */
+diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
+index 72fd871..2f50bb5 100644
+--- a/arch/powerpc/kernel/ibmebus.c
++++ b/arch/powerpc/kernel/ibmebus.c
+@@ -41,6 +41,7 @@
+ #include <linux/kobject.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/interrupt.h>
++#include <linux/of.h>
+ #include <linux/of_platform.h>
+ #include <asm/ibmebus.h>
+ #include <asm/abs_addr.h>
+@@ -52,7 +53,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */
+ struct bus_type ibmebus_bus_type;
+ 
+ /* These devices will automatically be added to the bus during init */
+-static struct of_device_id builtin_matches[] = {
++static struct of_device_id __initdata builtin_matches[] = {
+ 	{ .compatible = "IBM,lhca" },
+ 	{ .compatible = "IBM,lhea" },
+ 	{},
+@@ -171,7 +172,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
+ 
+ 	root = of_find_node_by_path("/");
+ 
+-	for (child = NULL; (child = of_get_next_child(root, child)); ) {
++	for_each_child_of_node(root, child) {
+ 		if (!of_match_node(matches, child))
+ 			continue;
+ 
+@@ -197,16 +198,13 @@ int ibmebus_register_driver(struct of_platform_driver *drv)
+ 	/* If the driver uses devices that ibmebus doesn't know, add them */
+ 	ibmebus_create_devices(drv->match_table);
+ 
+-	drv->driver.name   = drv->name;
+-	drv->driver.bus    = &ibmebus_bus_type;
+-
+-	return driver_register(&drv->driver);
++	return of_register_driver(drv, &ibmebus_bus_type);
+ }
+ EXPORT_SYMBOL(ibmebus_register_driver);
+ 
+ void ibmebus_unregister_driver(struct of_platform_driver *drv)
+ {
+-	driver_unregister(&drv->driver);
++	of_unregister_driver(drv);
+ }
+ EXPORT_SYMBOL(ibmebus_unregister_driver);
+ 
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 79a85d6..a3c406a 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -532,16 +532,14 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
+ 	return tbl;
+ }
+ 
+-void iommu_free_table(struct device_node *dn)
++void iommu_free_table(struct iommu_table *tbl, const char *node_name)
+ {
+-	struct pci_dn *pdn = dn->data;
+-	struct iommu_table *tbl = pdn->iommu_table;
+ 	unsigned long bitmap_sz, i;
+ 	unsigned int order;
+ 
+ 	if (!tbl || !tbl->it_map) {
+ 		printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
+-				dn->full_name);
++				node_name);
+ 		return;
+ 	}
+ 
+@@ -550,7 +548,7 @@ void iommu_free_table(struct device_node *dn)
+ 	for (i = 0; i < (tbl->it_size/64); i++) {
+ 		if (tbl->it_map[i] != 0) {
+ 			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
+-				__FUNCTION__, dn->full_name);
++				__FUNCTION__, node_name);
+ 			break;
+ 		}
+ 	}
+diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
+index f0f49d1..ee172aa 100644
+--- a/arch/powerpc/kernel/isa-bridge.c
++++ b/arch/powerpc/kernel/isa-bridge.c
+@@ -108,7 +108,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
+ 	if (size > 0x10000)
+ 		size = 0x10000;
+ 
+-	printk(KERN_ERR "no ISA IO ranges or unexpected isa range,"
++	printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
+ 	       "mapping 64k\n");
+ 
+ 	__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
+@@ -116,7 +116,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
+ 	return;
+ 
+ inval_range:
+-	printk(KERN_ERR "no ISA IO ranges or unexpected isa range,"
++	printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
+ 	       "mapping 64k\n");
+ 	__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
+ 		     0x10000, _PAGE_NO_CACHE|_PAGE_GUARDED);
+@@ -145,7 +145,7 @@ void __init isa_bridge_find_early(struct pci_controller *hose)
+ 	for_each_node_by_type(np, "isa") {
+ 		/* Look for our hose being a parent */
+ 		for (parent = of_get_parent(np); parent;) {
+-			if (parent == hose->arch_data) {
++			if (parent == hose->dn) {
+ 				of_node_put(parent);
+ 				break;
+ 			}
+diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
+index 4ed5887..76b862b 100644
+--- a/arch/powerpc/kernel/legacy_serial.c
++++ b/arch/powerpc/kernel/legacy_serial.c
+@@ -4,6 +4,7 @@
+ #include <linux/serial_core.h>
+ #include <linux/console.h>
+ #include <linux/pci.h>
++#include <linux/of_device.h>
+ #include <asm/io.h>
+ #include <asm/mmu.h>
+ #include <asm/prom.h>
+@@ -31,6 +32,15 @@ static struct legacy_serial_info {
+ 	int				irq_check_parent;
+ 	phys_addr_t			taddr;
+ } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
++
++static struct __initdata of_device_id parents[] = {
++	{.type = "soc",},
++	{.type = "tsi-bridge",},
++	{.type = "opb", .compatible = "ibm,opb",},
++	{.compatible = "simple-bus",},
++	{.compatible = "wrs,epld-localbus",},
++};
++
+ static unsigned int legacy_serial_count;
+ static int legacy_serial_console = -1;
+ 
+@@ -306,19 +316,21 @@ void __init find_legacy_serial_ports(void)
+ 		DBG(" no linux,stdout-path !\n");
+ 	}
+ 
+-	/* First fill our array with SOC ports */
+-	for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
+-		struct device_node *soc = of_get_parent(np);
+-		if (soc && !strcmp(soc->type, "soc")) {
++	/* Iterate over all the 16550 ports, looking for known parents */
++	for_each_compatible_node(np, "serial", "ns16550") {
++		struct device_node *parent = of_get_parent(np);
++		if (!parent)
++			continue;
++		if (of_match_node(parents, parent) != NULL) {
+ 			index = add_legacy_soc_port(np, np);
+ 			if (index >= 0 && np == stdout)
+ 				legacy_serial_console = index;
+ 		}
+-		of_node_put(soc);
++		of_node_put(parent);
+ 	}
+ 
+-	/* First fill our array with ISA ports */
+-	for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
++	/* Next, fill our array with ISA ports */
++	for_each_node_by_type(np, "serial") {
+ 		struct device_node *isa = of_get_parent(np);
+ 		if (isa && !strcmp(isa->name, "isa")) {
+ 			index = add_legacy_isa_port(np, isa);
+@@ -328,29 +340,6 @@ void __init find_legacy_serial_ports(void)
+ 		of_node_put(isa);
+ 	}
+ 
+-	/* First fill our array with tsi-bridge ports */
+-	for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
+-		struct device_node *tsi = of_get_parent(np);
+-		if (tsi && !strcmp(tsi->type, "tsi-bridge")) {
+-			index = add_legacy_soc_port(np, np);
+-			if (index >= 0 && np == stdout)
+-				legacy_serial_console = index;
+-		}
+-		of_node_put(tsi);
+-	}
+-
+-	/* First fill our array with opb bus ports */
+-	for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
+-		struct device_node *opb = of_get_parent(np);
+-		if (opb && (!strcmp(opb->type, "opb") ||
+-			    of_device_is_compatible(opb, "ibm,opb"))) {
+-			index = add_legacy_soc_port(np, np);
+-			if (index >= 0 && np == stdout)
+-				legacy_serial_console = index;
+-		}
+-		of_node_put(opb);
+-	}
+-
+ #ifdef CONFIG_PCI
+ 	/* Next, try to locate PCI ports */
+ 	for (np = NULL; (np = of_find_all_nodes(np));) {
+@@ -474,7 +463,7 @@ static int __init serial_dev_init(void)
+ 
+ 	/*
+ 	 * Before we register the platfrom serial devices, we need
+-	 * to fixup their interrutps and their IO ports.
++	 * to fixup their interrupts and their IO ports.
+ 	 */
+ 	DBG("Fixing serial ports interrupts and IO ports ...\n");
+ 
+diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
+index ff781b2..dcb89a8 100644
+--- a/arch/powerpc/kernel/lparcfg.c
++++ b/arch/powerpc/kernel/lparcfg.c
+@@ -41,7 +41,6 @@
+ /* #define LPARCFG_DEBUG */
+ 
+ static struct proc_dir_entry *proc_ppc64_lparcfg;
+-#define LPARCFG_BUFF_SIZE 4096
+ 
+ /*
+  * Track sum of all purrs across all processors. This is used to further
+@@ -595,13 +594,6 @@ int __init lparcfg_init(void)
+ 	ent = create_proc_entry("ppc64/lparcfg", mode, NULL);
+ 	if (ent) {
+ 		ent->proc_fops = &lparcfg_fops;
+-		ent->data = kmalloc(LPARCFG_BUFF_SIZE, GFP_KERNEL);
+-		if (!ent->data) {
+-			printk(KERN_ERR
+-			       "Failed to allocate buffer for lparcfg\n");
+-			remove_proc_entry("lparcfg", ent->parent);
+-			return -ENOMEM;
+-		}
+ 	} else {
+ 		printk(KERN_ERR "Failed to create ppc64/lparcfg\n");
+ 		return -EIO;
+@@ -613,10 +605,8 @@ int __init lparcfg_init(void)
+ 
+ void __exit lparcfg_cleanup(void)
+ {
+-	if (proc_ppc64_lparcfg) {
+-		kfree(proc_ppc64_lparcfg->data);
++	if (proc_ppc64_lparcfg)
+ 		remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
+-	}
+ }
+ 
+ module_init(lparcfg_init);
+diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
+index 330c9dc..7b91602 100644
+--- a/arch/powerpc/kernel/misc.S
++++ b/arch/powerpc/kernel/misc.S
+@@ -8,12 +8,17 @@
+  * Adapted for iSeries by Mike Corrigan (mikejc at us.ibm.com)
+  * PPC64 updates by Dave Engebretsen (engebret at us.ibm.com)
+  *
++ * setjmp/longjmp code by Paul Mackerras.
++ *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License
+  * as published by the Free Software Foundation; either version
+  * 2 of the License, or (at your option) any later version.
+  */
+ #include <asm/ppc_asm.h>
++#include <asm/unistd.h>
++#include <asm/asm-compat.h>
++#include <asm/asm-offsets.h>
+ 
+ 	.text
+ 
+@@ -43,3 +48,71 @@ _GLOBAL(add_reloc_offset)
+ 	add	r3,r3,r5
+ 	mtlr	r0
+ 	blr
++
++_GLOBAL(kernel_execve)
++	li	r0,__NR_execve
++	sc
++	bnslr
++	neg	r3,r3
++	blr
++
++_GLOBAL(setjmp)
++	mflr	r0
++	PPC_STL	r0,0(r3)
++	PPC_STL	r1,SZL(r3)
++	PPC_STL	r2,2*SZL(r3)
++	mfcr	r0
++	PPC_STL	r0,3*SZL(r3)
++	PPC_STL	r13,4*SZL(r3)
++	PPC_STL	r14,5*SZL(r3)
++	PPC_STL	r15,6*SZL(r3)
++	PPC_STL	r16,7*SZL(r3)
++	PPC_STL	r17,8*SZL(r3)
++	PPC_STL	r18,9*SZL(r3)
++	PPC_STL	r19,10*SZL(r3)
++	PPC_STL	r20,11*SZL(r3)
++	PPC_STL	r21,12*SZL(r3)
++	PPC_STL	r22,13*SZL(r3)
++	PPC_STL	r23,14*SZL(r3)
++	PPC_STL	r24,15*SZL(r3)
++	PPC_STL	r25,16*SZL(r3)
++	PPC_STL	r26,17*SZL(r3)
++	PPC_STL	r27,18*SZL(r3)
++	PPC_STL	r28,19*SZL(r3)
++	PPC_STL	r29,20*SZL(r3)
++	PPC_STL	r30,21*SZL(r3)
++	PPC_STL	r31,22*SZL(r3)
++	li	r3,0
++	blr
++
++_GLOBAL(longjmp)
++	PPC_LCMPI r4,0
++	bne	1f
++	li	r4,1
++1:	PPC_LL	r13,4*SZL(r3)
++	PPC_LL	r14,5*SZL(r3)
++	PPC_LL	r15,6*SZL(r3)
++	PPC_LL	r16,7*SZL(r3)
++	PPC_LL	r17,8*SZL(r3)
++	PPC_LL	r18,9*SZL(r3)
++	PPC_LL	r19,10*SZL(r3)
++	PPC_LL	r20,11*SZL(r3)
++	PPC_LL	r21,12*SZL(r3)
++	PPC_LL	r22,13*SZL(r3)
++	PPC_LL	r23,14*SZL(r3)
++	PPC_LL	r24,15*SZL(r3)
++	PPC_LL	r25,16*SZL(r3)
++	PPC_LL	r26,17*SZL(r3)
++	PPC_LL	r27,18*SZL(r3)
++	PPC_LL	r28,19*SZL(r3)
++	PPC_LL	r29,20*SZL(r3)
++	PPC_LL	r30,21*SZL(r3)
++	PPC_LL	r31,22*SZL(r3)
++	PPC_LL	r0,3*SZL(r3)
++	mtcrf	0x38,r0
++	PPC_LL	r0,0(r3)
++	PPC_LL	r1,SZL(r3)
++	PPC_LL	r2,2*SZL(r3)
++	mtlr	r0
++	mr	r3,r4
++	blr
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index 8b642ab..5c2e253 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -206,6 +206,45 @@ _GLOBAL(_nmask_and_or_msr)
+ 	isync
+ 	blr			/* Done */
+ 
++#ifdef CONFIG_40x
++
++/*
++ * Do an IO access in real mode
++ */
++_GLOBAL(real_readb)
++	mfmsr	r7
++	ori	r0,r7,MSR_DR
++	xori	r0,r0,MSR_DR
++	sync
++	mtmsr	r0
++	sync
++	isync
++	lbz	r3,0(r3)
++	sync
++	mtmsr	r7
++	sync
++	isync
++	blr
++
++	/*
++ * Do an IO access in real mode
++ */
++_GLOBAL(real_writeb)
++	mfmsr	r7
++	ori	r0,r7,MSR_DR
++	xori	r0,r0,MSR_DR
++	sync
++	mtmsr	r0
++	sync
++	isync
++	stb	r3,0(r4)
++	sync
++	mtmsr	r7
++	sync
++	isync
++	blr
++
++#endif /* CONFIG_40x */
+ 
+ /*
+  * Flush MMU TLB
+@@ -236,12 +275,6 @@ _GLOBAL(_tlbia)
+ 	/* Invalidate all entries in TLB1 */
+ 	li	r3, 0x0c
+ 	tlbivax	0,3
+-	/* Invalidate all entries in TLB2 */
+-	li	r3, 0x14
+-	tlbivax	0,3
+-	/* Invalidate all entries in TLB3 */
+-	li	r3, 0x1c
+-	tlbivax	0,3
+ 	msync
+ #ifdef CONFIG_SMP
+ 	tlbsync
+@@ -336,12 +369,8 @@ _GLOBAL(_tlbie)
+ #elif defined(CONFIG_FSL_BOOKE)
+ 	rlwinm	r4, r3, 0, 0, 19
+ 	ori	r5, r4, 0x08	/* TLBSEL = 1 */
+-	ori	r6, r4, 0x10	/* TLBSEL = 2 */
+-	ori	r7, r4, 0x18	/* TLBSEL = 3 */
+ 	tlbivax	0, r4
+ 	tlbivax	0, r5
+-	tlbivax	0, r6
+-	tlbivax	0, r7
+ 	msync
+ #if defined(CONFIG_SMP)
+ 	tlbsync
+@@ -793,13 +822,6 @@ _GLOBAL(kernel_thread)
+ 	addi	r1,r1,16
+ 	blr
+ 
+-_GLOBAL(kernel_execve)
+-	li	r0,__NR_execve
+-	sc
+-	bnslr
+-	neg	r3,r3
+-	blr
+-
+ /*
+  * This routine is just here to keep GCC happy - sigh...
+  */
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index bbb3ba5..a3c491e 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -518,13 +518,6 @@ _GLOBAL(giveup_altivec)
+ 
+ #endif /* CONFIG_ALTIVEC */
+ 
+-_GLOBAL(kernel_execve)
+-	li	r0,__NR_execve
+-	sc
+-	bnslr
+-	neg	r3,r3
+-	blr
+-
+ /* kexec_wait(phys_cpu)
+  *
+  * wait for the flag to change, indicating this kernel is going away but
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 07a89a3..eab3138 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -24,6 +24,7 @@
+ #include <linux/kernel.h>
+ #include <linux/cache.h>
+ #include <linux/bug.h>
++#include <linux/sort.h>
+ 
+ #include "setup.h"
+ 
+@@ -54,22 +55,60 @@ void module_free(struct module *mod, void *module_region)
+    addend) */
+ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
+ {
+-	unsigned int i, j, ret = 0;
+-
+-	/* Sure, this is order(n^2), but it's usually short, and not
+-           time critical */
+-	for (i = 0; i < num; i++) {
+-		for (j = 0; j < i; j++) {
+-			/* If this addend appeared before, it's
+-                           already been counted */
+-			if (ELF32_R_SYM(rela[i].r_info)
+-			    == ELF32_R_SYM(rela[j].r_info)
+-			    && rela[i].r_addend == rela[j].r_addend)
+-				break;
++	unsigned int i, r_info, r_addend, _count_relocs;
++
++	_count_relocs = 0;
++	r_info = 0;
++	r_addend = 0;
++	for (i = 0; i < num; i++)
++		/* Only count 24-bit relocs, others don't need stubs */
++		if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
++		    (r_info != ELF32_R_SYM(rela[i].r_info) ||
++		     r_addend != rela[i].r_addend)) {
++			_count_relocs++;
++			r_info = ELF32_R_SYM(rela[i].r_info);
++			r_addend = rela[i].r_addend;
+ 		}
+-		if (j == i) ret++;
++
++	return _count_relocs;
++}
++
++static int relacmp(const void *_x, const void *_y)
++{
++	const Elf32_Rela *x, *y;
++
++	y = (Elf32_Rela *)_x;
++	x = (Elf32_Rela *)_y;
++
++	/* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
++	 * make the comparison cheaper/faster. It won't affect the sorting or
++	 * the counting algorithms' performance
++	 */
++	if (x->r_info < y->r_info)
++		return -1;
++	else if (x->r_info > y->r_info)
++		return 1;
++	else if (x->r_addend < y->r_addend)
++		return -1;
++	else if (x->r_addend > y->r_addend)
++		return 1;
++	else
++		return 0;
++}
++
++static void relaswap(void *_x, void *_y, int size)
++{
++	uint32_t *x, *y, tmp;
++	int i;
++
++	y = (uint32_t *)_x;
++	x = (uint32_t *)_y;
++
++	for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
++		tmp = x[i];
++		x[i] = y[i];
++		y[i] = tmp;
+ 	}
+-	return ret;
+ }
+ 
+ /* Get the potential trampolines size required of the init and
+@@ -100,6 +139,16 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
+ 			DEBUGP("Ptr: %p.  Number: %u\n",
+ 			       (void *)hdr + sechdrs[i].sh_offset,
+ 			       sechdrs[i].sh_size / sizeof(Elf32_Rela));
++
++			/* Sort the relocation information based on a symbol and
++			 * addend key. This is a stable O(n*log n) complexity
++			 * alogrithm but it will reduce the complexity of
++			 * count_relocs() to linear complexity O(n)
++			 */
++			sort((void *)hdr + sechdrs[i].sh_offset,
++			     sechdrs[i].sh_size / sizeof(Elf32_Rela),
++			     sizeof(Elf32_Rela), relacmp, relaswap);
++
+ 			ret += count_relocs((void *)hdr
+ 					     + sechdrs[i].sh_offset,
+ 					     sechdrs[i].sh_size
+diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
+index 75c7c4f..3a82b02 100644
+--- a/arch/powerpc/kernel/module_64.c
++++ b/arch/powerpc/kernel/module_64.c
+@@ -24,6 +24,7 @@
+ #include <asm/module.h>
+ #include <asm/uaccess.h>
+ #include <asm/firmware.h>
++#include <linux/sort.h>
+ 
+ #include "setup.h"
+ 
+@@ -81,25 +82,23 @@ static struct ppc64_stub_entry ppc64_stub =
+    different addend) */
+ static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
+ {
+-	unsigned int i, j, ret = 0;
++	unsigned int i, r_info, r_addend, _count_relocs;
+ 
+ 	/* FIXME: Only count external ones --RR */
+-	/* Sure, this is order(n^2), but it's usually short, and not
+-           time critical */
+-	for (i = 0; i < num; i++) {
++	_count_relocs = 0;
++	r_info = 0;
++	r_addend = 0;
++	for (i = 0; i < num; i++)
+ 		/* Only count 24-bit relocs, others don't need stubs */
+-		if (ELF64_R_TYPE(rela[i].r_info) != R_PPC_REL24)
+-			continue;
+-		for (j = 0; j < i; j++) {
+-			/* If this addend appeared before, it's
+-                           already been counted */
+-			if (rela[i].r_info == rela[j].r_info
+-			    && rela[i].r_addend == rela[j].r_addend)
+-				break;
++		if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
++		    (r_info != ELF64_R_SYM(rela[i].r_info) ||
++		     r_addend != rela[i].r_addend)) {
++			_count_relocs++;
++			r_info = ELF64_R_SYM(rela[i].r_info);
++			r_addend = rela[i].r_addend;
+ 		}
+-		if (j == i) ret++;
+-	}
+-	return ret;
++
++	return _count_relocs;
+ }
+ 
+ void *module_alloc(unsigned long size)
+@@ -118,6 +117,44 @@ void module_free(struct module *mod, void *module_region)
+            table entries. */
+ }
+ 
++static int relacmp(const void *_x, const void *_y)
++{
++	const Elf64_Rela *x, *y;
++
++	y = (Elf64_Rela *)_x;
++	x = (Elf64_Rela *)_y;
++
++	/* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
++	 * make the comparison cheaper/faster. It won't affect the sorting or
++	 * the counting algorithms' performance
++	 */
++	if (x->r_info < y->r_info)
++		return -1;
++	else if (x->r_info > y->r_info)
++		return 1;
++	else if (x->r_addend < y->r_addend)
++		return -1;
++	else if (x->r_addend > y->r_addend)
++		return 1;
++	else
++		return 0;
++}
++
++static void relaswap(void *_x, void *_y, int size)
++{
++	uint64_t *x, *y, tmp;
++	int i;
++
++	y = (uint64_t *)_x;
++	x = (uint64_t *)_y;
++
++	for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
++		tmp = x[i];
++		x[i] = y[i];
++		y[i] = tmp;
++	}
++}
++
+ /* Get size of potential trampolines required. */
+ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
+ 				    const Elf64_Shdr *sechdrs)
+@@ -133,6 +170,16 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
+ 			DEBUGP("Ptr: %p.  Number: %lu\n",
+ 			       (void *)sechdrs[i].sh_addr,
+ 			       sechdrs[i].sh_size / sizeof(Elf64_Rela));
++
++			/* Sort the relocation information based on a symbol and
++			 * addend key. This is a stable O(n*log n) complexity
++			 * alogrithm but it will reduce the complexity of
++			 * count_relocs() to linear complexity O(n)
++			 */
++			sort((void *)sechdrs[i].sh_addr,
++			     sechdrs[i].sh_size / sizeof(Elf64_Rela),
++			     sizeof(Elf64_Rela), relacmp, relaswap);
++
+ 			relocs += count_relocs((void *)sechdrs[i].sh_addr,
+ 					       sechdrs[i].sh_size
+ 					       / sizeof(Elf64_Rela));
+@@ -343,7 +390,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ 			/* Simply set it */
+ 			*(u32 *)location = value;
+ 			break;
+-			
++
+ 		case R_PPC64_ADDR64:
+ 			/* Simply set it */
+ 			*(unsigned long *)location = value;
+@@ -399,7 +446,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ 			}
+ 
+ 			/* Only replace bits 2 through 26 */
+-			*(uint32_t *)location 
++			*(uint32_t *)location
+ 				= (*(uint32_t *)location & ~0x03fffffc)
+ 				| (value & 0x03fffffc);
+ 			break;
+diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
+index 3388ad6..5748ddb 100644
+--- a/arch/powerpc/kernel/of_device.c
++++ b/arch/powerpc/kernel/of_device.c
+@@ -5,10 +5,10 @@
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/slab.h>
++#include <linux/of_device.h>
+ 
+ #include <asm/errno.h>
+ #include <asm/dcr.h>
+-#include <asm/of_device.h>
+ 
+ static void of_device_make_bus_id(struct of_device *dev)
+ {
+diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
+index aeaa202..fb698d4 100644
+--- a/arch/powerpc/kernel/of_platform.c
++++ b/arch/powerpc/kernel/of_platform.c
+@@ -19,6 +19,7 @@
+ #include <linux/mod_devicetable.h>
+ #include <linux/slab.h>
+ #include <linux/pci.h>
++#include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/of_platform.h>
+ 
+@@ -40,7 +41,7 @@
+  * a bus type in the list
+  */
+ 
+-static struct of_device_id of_default_bus_ids[] = {
++static const struct of_device_id of_default_bus_ids[] = {
+ 	{ .type = "soc", },
+ 	{ .compatible = "soc", },
+ 	{ .type = "spider", },
+@@ -64,26 +65,6 @@ static int __init of_bus_driver_init(void)
+ 
+ postcore_initcall(of_bus_driver_init);
+ 
+-int of_register_platform_driver(struct of_platform_driver *drv)
+-{
+-	/* initialize common driver fields */
+-	if (!drv->driver.name)
+-		drv->driver.name = drv->name;
+-	if (!drv->driver.owner)
+-		drv->driver.owner = drv->owner;
+-	drv->driver.bus = &of_platform_bus_type;
+-
+-	/* register with core */
+-	return driver_register(&drv->driver);
+-}
+-EXPORT_SYMBOL(of_register_platform_driver);
+-
+-void of_unregister_platform_driver(struct of_platform_driver *drv)
+-{
+-	driver_unregister(&drv->driver);
+-}
+-EXPORT_SYMBOL(of_unregister_platform_driver);
+-
+ struct of_device* of_platform_device_create(struct device_node *np,
+ 					    const char *bus_id,
+ 					    struct device *parent)
+@@ -120,15 +101,15 @@ EXPORT_SYMBOL(of_platform_device_create);
+  * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
+  * disallow recursive creation of child busses
+  */
+-static int of_platform_bus_create(struct device_node *bus,
+-				  struct of_device_id *matches,
++static int of_platform_bus_create(const struct device_node *bus,
++				  const struct of_device_id *matches,
+ 				  struct device *parent)
+ {
+ 	struct device_node *child;
+ 	struct of_device *dev;
+ 	int rc = 0;
+ 
+-	for (child = NULL; (child = of_get_next_child(bus, child)); ) {
++	for_each_child_of_node(bus, child) {
+ 		pr_debug("   create child: %s\n", child->full_name);
+ 		dev = of_platform_device_create(child, NULL, parent);
+ 		if (dev == NULL)
+@@ -157,7 +138,7 @@ static int of_platform_bus_create(struct device_node *bus,
+  */
+ 
+ int of_platform_bus_probe(struct device_node *root,
+-			  struct of_device_id *matches,
++			  const struct of_device_id *matches,
+ 			  struct device *parent)
+ {
+ 	struct device_node *child;
+@@ -190,7 +171,7 @@ int of_platform_bus_probe(struct device_node *root,
+ 		rc = of_platform_bus_create(root, matches, &dev->dev);
+ 		goto bail;
+ 	}
+-	for (child = NULL; (child = of_get_next_child(root, child)); ) {
++	for_each_child_of_node(root, child) {
+ 		if (!of_match_node(matches, child))
+ 			continue;
+ 
+diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
+index 2ae3b6f..980fe32 100644
+--- a/arch/powerpc/kernel/pci-common.c
++++ b/arch/powerpc/kernel/pci-common.c
+@@ -48,32 +48,26 @@
+ static DEFINE_SPINLOCK(hose_spinlock);
+ 
+ /* XXX kill that some day ... */
+-int global_phb_number;		/* Global phb counter */
++static int global_phb_number;		/* Global phb counter */
+ 
+-extern struct list_head hose_list;
++/* ISA Memory physical address */
++resource_size_t isa_mem_base;
+ 
+-/*
+- * pci_controller(phb) initialized common variables.
+- */
+-static void __devinit pci_setup_pci_controller(struct pci_controller *hose)
+-{
+-	memset(hose, 0, sizeof(struct pci_controller));
+-
+-	spin_lock(&hose_spinlock);
+-	hose->global_number = global_phb_number++;
+-	list_add_tail(&hose->list_node, &hose_list);
+-	spin_unlock(&hose_spinlock);
+-}
++/* Default PCI flags is 0 */
++unsigned int ppc_pci_flags;
+ 
+-struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
++struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
+ {
+ 	struct pci_controller *phb;
+ 
+-	phb = alloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
++	phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
+ 	if (phb == NULL)
+ 		return NULL;
+-	pci_setup_pci_controller(phb);
+-	phb->arch_data = dev;
++	spin_lock(&hose_spinlock);
++	phb->global_number = global_phb_number++;
++	list_add_tail(&phb->list_node, &hose_list);
++	spin_unlock(&hose_spinlock);
++	phb->dn = dev;
+ 	phb->is_dynamic = mem_init_done;
+ #ifdef CONFIG_PPC64
+ 	if (dev) {
+@@ -126,15 +120,10 @@ int pcibios_vaddr_is_ioport(void __iomem *address)
+  */
+ int pci_domain_nr(struct pci_bus *bus)
+ {
+-	if (firmware_has_feature(FW_FEATURE_ISERIES))
+-		return 0;
+-	else {
+-		struct pci_controller *hose = pci_bus_to_host(bus);
++	struct pci_controller *hose = pci_bus_to_host(bus);
+ 
+-		return hose->global_number;
+-	}
++	return hose->global_number;
+ }
+-
+ EXPORT_SYMBOL(pci_domain_nr);
+ 
+ #ifdef CONFIG_PPC_OF
+@@ -153,7 +142,7 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
+ 	while(node) {
+ 		struct pci_controller *hose, *tmp;
+ 		list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+-			if (hose->arch_data == node)
++			if (hose->dn == node)
+ 				return hose;
+ 		node = node->parent;
+ 	}
+@@ -201,6 +190,20 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
+ 	struct of_irq oirq;
+ 	unsigned int virq;
+ 
++	/* The current device-tree that iSeries generates from the HV
++	 * PCI informations doesn't contain proper interrupt routing,
++	 * and all the fallback would do is print out crap, so we
++	 * don't attempt to resolve the interrupts here at all, some
++	 * iSeries specific fixup does it.
++	 *
++	 * In the long run, we will hopefully fix the generated device-tree
++	 * instead.
++	 */
++#ifdef CONFIG_PPC_ISERIES
++	if (firmware_has_feature(FW_FEATURE_ISERIES))
++		return -1;
++#endif
++
+ 	DBG("Try to map irq for %s...\n", pci_name(pci_dev));
+ 
+ #ifdef DEBUG
+@@ -222,10 +225,11 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
+ 		if (pin == 0)
+ 			return -1;
+ 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
+-		    line == 0xff) {
++		    line == 0xff || line == 0) {
+ 			return -1;
+ 		}
+-		DBG(" -> no map ! Using irq line %d from PCI config\n", line);
++		DBG(" -> no map ! Using line %d (pin %d) from PCI config\n",
++		    line, pin);
+ 
+ 		virq = irq_create_mapping(NULL, line);
+ 		if (virq != NO_IRQ)
+@@ -475,3 +479,717 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ 	*start = rsrc->start - offset;
+ 	*end = rsrc->end - offset;
+ }
++
++/**
++ * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
++ * @hose: newly allocated pci_controller to be setup
++ * @dev: device node of the host bridge
++ * @primary: set if primary bus (32 bits only, soon to be deprecated)
++ *
++ * This function will parse the "ranges" property of a PCI host bridge device
++ * node and setup the resource mapping of a pci controller based on its
++ * content.
++ *
++ * Life would be boring if it wasn't for a few issues that we have to deal
++ * with here:
++ *
++ *   - We can only cope with one IO space range and up to 3 Memory space
++ *     ranges. However, some machines (thanks Apple !) tend to split their
++ *     space into lots of small contiguous ranges. So we have to coalesce.
++ *
++ *   - We can only cope with all memory ranges having the same offset
++ *     between CPU addresses and PCI addresses. Unfortunately, some bridges
++ *     are setup for a large 1:1 mapping along with a small "window" which
++ *     maps PCI address 0 to some arbitrary high address of the CPU space in
++ *     order to give access to the ISA memory hole.
++ *     The way out of here that I've chosen for now is to always set the
++ *     offset based on the first resource found, then override it if we
++ *     have a different offset and the previous was set by an ISA hole.
++ *
++ *   - Some busses have IO space not starting at 0, which causes trouble with
++ *     the way we do our IO resource renumbering. The code somewhat deals with
++ *     it for 64 bits but I would expect problems on 32 bits.
++ *
++ *   - Some 32 bits platforms such as 4xx can have physical space larger than
++ *     32 bits so we need to use 64 bits values for the parsing
++ */
++void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
++					    struct device_node *dev,
++					    int primary)
++{
++	const u32 *ranges;
++	int rlen;
++	int pna = of_n_addr_cells(dev);
++	int np = pna + 5;
++	int memno = 0, isa_hole = -1;
++	u32 pci_space;
++	unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
++	unsigned long long isa_mb = 0;
++	struct resource *res;
++
++	printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
++	       dev->full_name, primary ? "(primary)" : "");
++
++	/* Get ranges property */
++	ranges = of_get_property(dev, "ranges", &rlen);
++	if (ranges == NULL)
++		return;
++
++	/* Parse it */
++	while ((rlen -= np * 4) >= 0) {
++		/* Read next ranges element */
++		pci_space = ranges[0];
++		pci_addr = of_read_number(ranges + 1, 2);
++		cpu_addr = of_translate_address(dev, ranges + 3);
++		size = of_read_number(ranges + pna + 3, 2);
++		ranges += np;
++		if (cpu_addr == OF_BAD_ADDR || size == 0)
++			continue;
++
++		/* Now consume following elements while they are contiguous */
++		for (; rlen >= np * sizeof(u32);
++		     ranges += np, rlen -= np * 4) {
++			if (ranges[0] != pci_space)
++				break;
++			pci_next = of_read_number(ranges + 1, 2);
++			cpu_next = of_translate_address(dev, ranges + 3);
++			if (pci_next != pci_addr + size ||
++			    cpu_next != cpu_addr + size)
++				break;
++			size += of_read_number(ranges + pna + 3, 2);
++		}
++
++		/* Act based on address space type */
++		res = NULL;
++		switch ((pci_space >> 24) & 0x3) {
++		case 1:		/* PCI IO space */
++			printk(KERN_INFO
++			       "  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
++			       cpu_addr, cpu_addr + size - 1, pci_addr);
++
++			/* We support only one IO range */
++			if (hose->pci_io_size) {
++				printk(KERN_INFO
++				       " \\--> Skipped (too many) !\n");
++				continue;
++			}
++#ifdef CONFIG_PPC32
++			/* On 32 bits, limit I/O space to 16MB */
++			if (size > 0x01000000)
++				size = 0x01000000;
++
++			/* 32 bits needs to map IOs here */
++			hose->io_base_virt = ioremap(cpu_addr, size);
++
++			/* Expect trouble if pci_addr is not 0 */
++			if (primary)
++				isa_io_base =
++					(unsigned long)hose->io_base_virt;
++#endif /* CONFIG_PPC32 */
++			/* pci_io_size and io_base_phys always represent IO
++			 * space starting at 0 so we factor in pci_addr
++			 */
++			hose->pci_io_size = pci_addr + size;
++			hose->io_base_phys = cpu_addr - pci_addr;
++
++			/* Build resource */
++			res = &hose->io_resource;
++			res->flags = IORESOURCE_IO;
++			res->start = pci_addr;
++			break;
++		case 2:		/* PCI Memory space */
++			printk(KERN_INFO
++			       " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
++			       cpu_addr, cpu_addr + size - 1, pci_addr,
++			       (pci_space & 0x40000000) ? "Prefetch" : "");
++
++			/* We support only 3 memory ranges */
++			if (memno >= 3) {
++				printk(KERN_INFO
++				       " \\--> Skipped (too many) !\n");
++				continue;
++			}
++			/* Handles ISA memory hole space here */
++			if (pci_addr == 0) {
++				isa_mb = cpu_addr;
++				isa_hole = memno;
++				if (primary || isa_mem_base == 0)
++					isa_mem_base = cpu_addr;
++			}
++
++			/* We get the PCI/Mem offset from the first range or
++			 * the, current one if the offset came from an ISA
++			 * hole. If they don't match, bugger.
++			 */
++			if (memno == 0 ||
++			    (isa_hole >= 0 && pci_addr != 0 &&
++			     hose->pci_mem_offset == isa_mb))
++				hose->pci_mem_offset = cpu_addr - pci_addr;
++			else if (pci_addr != 0 &&
++				 hose->pci_mem_offset != cpu_addr - pci_addr) {
++				printk(KERN_INFO
++				       " \\--> Skipped (offset mismatch) !\n");
++				continue;
++			}
++
++			/* Build resource */
++			res = &hose->mem_resources[memno++];
++			res->flags = IORESOURCE_MEM;
++			if (pci_space & 0x40000000)
++				res->flags |= IORESOURCE_PREFETCH;
++			res->start = cpu_addr;
++			break;
++		}
++		if (res != NULL) {
++			res->name = dev->full_name;
++			res->end = res->start + size - 1;
++			res->parent = NULL;
++			res->sibling = NULL;
++			res->child = NULL;
++		}
++	}
++
++	/* Out of paranoia, let's put the ISA hole last if any */
++	if (isa_hole >= 0 && memno > 0 && isa_hole != (memno-1)) {
++		struct resource tmp = hose->mem_resources[isa_hole];
++		hose->mem_resources[isa_hole] = hose->mem_resources[memno-1];
++		hose->mem_resources[memno-1] = tmp;
++	}
++}
++
++/* Decide whether to display the domain number in /proc */
++int pci_proc_domain(struct pci_bus *bus)
++{
++	struct pci_controller *hose = pci_bus_to_host(bus);
++#ifdef CONFIG_PPC64
++	return hose->buid != 0;
++#else
++	if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
++		return 0;
++	if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
++		return hose->global_number != 0;
++	return 1;
++#endif
++}
++
++void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
++			     struct resource *res)
++{
++	resource_size_t offset = 0, mask = (resource_size_t)-1;
++	struct pci_controller *hose = pci_bus_to_host(dev->bus);
++
++	if (!hose)
++		return;
++	if (res->flags & IORESOURCE_IO) {
++		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
++		mask = 0xffffffffu;
++	} else if (res->flags & IORESOURCE_MEM)
++		offset = hose->pci_mem_offset;
++
++	region->start = (res->start - offset) & mask;
++	region->end = (res->end - offset) & mask;
++}
++EXPORT_SYMBOL(pcibios_resource_to_bus);
++
++void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
++			     struct pci_bus_region *region)
++{
++	resource_size_t offset = 0, mask = (resource_size_t)-1;
++	struct pci_controller *hose = pci_bus_to_host(dev->bus);
++
++	if (!hose)
++		return;
++	if (res->flags & IORESOURCE_IO) {
++		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
++		mask = 0xffffffffu;
++	} else if (res->flags & IORESOURCE_MEM)
++		offset = hose->pci_mem_offset;
++	res->start = (region->start + offset) & mask;
++	res->end = (region->end + offset) & mask;
++}
++EXPORT_SYMBOL(pcibios_bus_to_resource);
++
++/* Fixup a bus resource into a linux resource */
++static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
++{
++	struct pci_controller *hose = pci_bus_to_host(dev->bus);
++	resource_size_t offset = 0, mask = (resource_size_t)-1;
++
++	if (res->flags & IORESOURCE_IO) {
++		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
++		mask = 0xffffffffu;
++	} else if (res->flags & IORESOURCE_MEM)
++		offset = hose->pci_mem_offset;
++
++	res->start = (res->start + offset) & mask;
++	res->end = (res->end + offset) & mask;
++
++	pr_debug("PCI:%s            %016llx-%016llx\n",
++		 pci_name(dev),
++		 (unsigned long long)res->start,
++		 (unsigned long long)res->end);
++}
++
++
++/* This header fixup will do the resource fixup for all devices as they are
++ * probed, but not for bridge ranges
++ */
++static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
++{
++	struct pci_controller *hose = pci_bus_to_host(dev->bus);
++	int i;
++
++	if (!hose) {
++		printk(KERN_ERR "No host bridge for PCI dev %s !\n",
++		       pci_name(dev));
++		return;
++	}
++	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++		struct resource *res = dev->resource + i;
++		if (!res->flags)
++			continue;
++		if (res->end == 0xffffffff) {
++			pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] is unassigned\n",
++				 pci_name(dev), i,
++				 (unsigned long long)res->start,
++				 (unsigned long long)res->end,
++				 (unsigned int)res->flags);
++			res->end -= res->start;
++			res->start = 0;
++			res->flags |= IORESOURCE_UNSET;
++			continue;
++		}
++
++		pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
++			 pci_name(dev), i,
++			 (unsigned long long)res->start,\
++			 (unsigned long long)res->end,
++			 (unsigned int)res->flags);
++
++		fixup_resource(res, dev);
++	}
++
++	/* Call machine specific resource fixup */
++	if (ppc_md.pcibios_fixup_resources)
++		ppc_md.pcibios_fixup_resources(dev);
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
++
++static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
++{
++	struct pci_controller *hose = pci_bus_to_host(bus);
++	struct pci_dev *dev = bus->self;
++
++	pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB");
++
++	/* Fixup PCI<->PCI bridges. Host bridges are handled separately, for
++	 * now differently between 32 and 64 bits.
++	 */
++	if (dev != NULL) {
++		struct resource *res;
++		int i;
++
++		for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
++			if ((res = bus->resource[i]) == NULL)
++				continue;
++			if (!res->flags)
++				continue;
++			if (i >= 3 && bus->self->transparent)
++				continue;
++			/* On PowerMac, Apple leaves bridge windows open over
++			 * an inaccessible region of memory space (0...fffff)
++			 * which is somewhat bogus, but that's what they think
++			 * means disabled...
++			 *
++			 * We clear those to force them to be reallocated later
++			 *
++			 * We detect such regions by the fact that the base is
++			 * equal to the pci_mem_offset of the host bridge and
++			 * their size is smaller than 1M.
++			 */
++			if (res->flags & IORESOURCE_MEM &&
++			    res->start == hose->pci_mem_offset &&
++			    res->end < 0x100000) {
++				printk(KERN_INFO
++				       "PCI: Closing bogus Apple Firmware"
++				       " region %d on bus 0x%02x\n",
++				       i, bus->number);
++				res->flags = 0;
++				continue;
++			}
++
++			pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
++				 pci_name(dev), i,
++				 (unsigned long long)res->start,\
++				 (unsigned long long)res->end,
++				 (unsigned int)res->flags);
++
++			fixup_resource(res, dev);
++		}
++	}
++
++	/* Additional setup that is different between 32 and 64 bits for now */
++	pcibios_do_bus_setup(bus);
++
++	/* Platform specific bus fixups */
++	if (ppc_md.pcibios_fixup_bus)
++		ppc_md.pcibios_fixup_bus(bus);
++
++	/* Read default IRQs and fixup if necessary */
++	list_for_each_entry(dev, &bus->devices, bus_list) {
++		pci_read_irq_line(dev);
++		if (ppc_md.pci_irq_fixup)
++			ppc_md.pci_irq_fixup(dev);
++	}
++}
++
++void __devinit pcibios_fixup_bus(struct pci_bus *bus)
++{
++	/* When called from the generic PCI probe, read PCI<->PCI bridge
++	 * bases before proceeding
++	 */
++	if (bus->self != NULL)
++		pci_read_bridge_bases(bus);
++	__pcibios_fixup_bus(bus);
++}
++EXPORT_SYMBOL(pcibios_fixup_bus);
++
++/* When building a bus from the OF tree rather than probing, we need a
++ * slightly different version of the fixup which doesn't read the
++ * bridge bases using config space accesses
++ */
++void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus)
++{
++	__pcibios_fixup_bus(bus);
++}
++
++static int skip_isa_ioresource_align(struct pci_dev *dev)
++{
++	if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) &&
++	    !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
++		return 1;
++	return 0;
++}
++
++/*
++ * We need to avoid collisions with `mirrored' VGA ports
++ * and other strange ISA hardware, so we always want the
++ * addresses to be allocated in the 0x000-0x0ff region
++ * modulo 0x400.
++ *
++ * Why? Because some silly external IO cards only decode
++ * the low 10 bits of the IO address. The 0x00-0xff region
++ * is reserved for motherboard devices that decode all 16
++ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
++ * but we want to try to avoid allocating at 0x2900-0x2bff
++ * which might have be mirrored at 0x0100-0x03ff..
++ */
++void pcibios_align_resource(void *data, struct resource *res,
++				resource_size_t size, resource_size_t align)
++{
++	struct pci_dev *dev = data;
++
++	if (res->flags & IORESOURCE_IO) {
++		resource_size_t start = res->start;
++
++		if (skip_isa_ioresource_align(dev))
++			return;
++		if (start & 0x300) {
++			start = (start + 0x3ff) & ~0x3ff;
++			res->start = start;
++		}
++	}
++}
++EXPORT_SYMBOL(pcibios_align_resource);
++
++/*
++ * Reparent resource children of pr that conflict with res
++ * under res, and make res replace those children.
++ */
++static int __init reparent_resources(struct resource *parent,
++				     struct resource *res)
++{
++	struct resource *p, **pp;
++	struct resource **firstpp = NULL;
++
++	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
++		if (p->end < res->start)
++			continue;
++		if (res->end < p->start)
++			break;
++		if (p->start < res->start || p->end > res->end)
++			return -1;	/* not completely contained */
++		if (firstpp == NULL)
++			firstpp = pp;
++	}
++	if (firstpp == NULL)
++		return -1;	/* didn't find any conflicting entries? */
++	res->parent = parent;
++	res->child = *firstpp;
++	res->sibling = *pp;
++	*firstpp = res;
++	*pp = NULL;
++	for (p = res->child; p != NULL; p = p->sibling) {
++		p->parent = res;
++		DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
++		    p->name,
++		    (unsigned long long)p->start,
++		    (unsigned long long)p->end, res->name);
++	}
++	return 0;
++}
++
++/*
++ *  Handle resources of PCI devices.  If the world were perfect, we could
++ *  just allocate all the resource regions and do nothing more.  It isn't.
++ *  On the other hand, we cannot just re-allocate all devices, as it would
++ *  require us to know lots of host bridge internals.  So we attempt to
++ *  keep as much of the original configuration as possible, but tweak it
++ *  when it's found to be wrong.
++ *
++ *  Known BIOS problems we have to work around:
++ *	- I/O or memory regions not configured
++ *	- regions configured, but not enabled in the command register
++ *	- bogus I/O addresses above 64K used
++ *	- expansion ROMs left enabled (this may sound harmless, but given
++ *	  the fact the PCI specs explicitly allow address decoders to be
++ *	  shared between expansion ROMs and other resource regions, it's
++ *	  at least dangerous)
++ *
++ *  Our solution:
++ *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
++ *	    This gives us fixed barriers on where we can allocate.
++ *	(2) Allocate resources for all enabled devices.  If there is
++ *	    a collision, just mark the resource as unallocated. Also
++ *	    disable expansion ROMs during this step.
++ *	(3) Try to allocate resources for disabled devices.  If the
++ *	    resources were assigned correctly, everything goes well,
++ *	    if they weren't, they won't disturb allocation of other
++ *	    resources.
++ *	(4) Assign new addresses to resources which were either
++ *	    not configured at all or misconfigured.  If explicitly
++ *	    requested by the user, configure expansion ROM address
++ *	    as well.
++ */
++
++static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
++{
++	struct pci_bus *bus;
++	int i;
++	struct resource *res, *pr;
++
++	/* Depth-First Search on bus tree */
++	list_for_each_entry(bus, bus_list, node) {
++		for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
++			if ((res = bus->resource[i]) == NULL || !res->flags
++			    || res->start > res->end)
++				continue;
++			if (bus->parent == NULL)
++				pr = (res->flags & IORESOURCE_IO) ?
++					&ioport_resource : &iomem_resource;
++			else {
++				/* Don't bother with non-root busses when
++				 * re-assigning all resources. We clear the
++				 * resource flags as if they were colliding
++				 * and as such ensure proper re-allocation
++				 * later.
++				 */
++				if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)
++					goto clear_resource;
++				pr = pci_find_parent_resource(bus->self, res);
++				if (pr == res) {
++					/* this happens when the generic PCI
++					 * code (wrongly) decides that this
++					 * bridge is transparent  -- paulus
++					 */
++					continue;
++				}
++			}
++
++			DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
++			    "[0x%x], parent %p (%s)\n",
++			    bus->self ? pci_name(bus->self) : "PHB",
++			    bus->number, i,
++			    (unsigned long long)res->start,
++			    (unsigned long long)res->end,
++			    (unsigned int)res->flags,
++			    pr, (pr && pr->name) ? pr->name : "nil");
++
++			if (pr && !(pr->flags & IORESOURCE_UNSET)) {
++				if (request_resource(pr, res) == 0)
++					continue;
++				/*
++				 * Must be a conflict with an existing entry.
++				 * Move that entry (or entries) under the
++				 * bridge resource and try again.
++				 */
++				if (reparent_resources(pr, res) == 0)
++					continue;
++			}
++			printk(KERN_WARNING
++			       "PCI: Cannot allocate resource region "
++			       "%d of PCI bridge %d, will remap\n",
++			       i, bus->number);
++clear_resource:
++			res->flags = 0;
++		}
++		pcibios_allocate_bus_resources(&bus->children);
++	}
++}
++
++static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
++{
++	struct resource *pr, *r = &dev->resource[idx];
++
++	DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
++	    pci_name(dev), idx,
++	    (unsigned long long)r->start,
++	    (unsigned long long)r->end,
++	    (unsigned int)r->flags);
++
++	pr = pci_find_parent_resource(dev, r);
++	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
++	    request_resource(pr, r) < 0) {
++		printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
++		       " of device %s, will remap\n", idx, pci_name(dev));
++		if (pr)
++			DBG("PCI:  parent is %p: %016llx-%016llx [%x]\n", pr,
++			    (unsigned long long)pr->start,
++			    (unsigned long long)pr->end,
++			    (unsigned int)pr->flags);
++		/* We'll assign a new address later */
++		r->flags |= IORESOURCE_UNSET;
++		r->end -= r->start;
++		r->start = 0;
++	}
++}
++
++static void __init pcibios_allocate_resources(int pass)
++{
++	struct pci_dev *dev = NULL;
++	int idx, disabled;
++	u16 command;
++	struct resource *r;
++
++	for_each_pci_dev(dev) {
++		pci_read_config_word(dev, PCI_COMMAND, &command);
++		for (idx = 0; idx < 6; idx++) {
++			r = &dev->resource[idx];
++			if (r->parent)		/* Already allocated */
++				continue;
++			if (!r->flags || (r->flags & IORESOURCE_UNSET))
++				continue;	/* Not assigned at all */
++			if (r->flags & IORESOURCE_IO)
++				disabled = !(command & PCI_COMMAND_IO);
++			else
++				disabled = !(command & PCI_COMMAND_MEMORY);
++			if (pass == disabled)
++				alloc_resource(dev, idx);
++		}
++		if (pass)
++			continue;
++		r = &dev->resource[PCI_ROM_RESOURCE];
++		if (r->flags & IORESOURCE_ROM_ENABLE) {
++			/* Turn the ROM off, leave the resource region,
++			 * but keep it unregistered.
++			 */
++			u32 reg;
++			DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
++			r->flags &= ~IORESOURCE_ROM_ENABLE;
++			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
++			pci_write_config_dword(dev, dev->rom_base_reg,
++					       reg & ~PCI_ROM_ADDRESS_ENABLE);
++		}
++	}
++}
++
++void __init pcibios_resource_survey(void)
++{
++	/* Allocate and assign resources. If we re-assign everything, then
++	 * we skip the allocate phase
++	 */
++	pcibios_allocate_bus_resources(&pci_root_buses);
++
++	if (!(ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)) {
++		pcibios_allocate_resources(0);
++		pcibios_allocate_resources(1);
++	}
++
++	if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
++		DBG("PCI: Assigning unassigned resouces...\n");
++		pci_assign_unassigned_resources();
++	}
++
++	/* Call machine dependent fixup */
++	if (ppc_md.pcibios_fixup)
++		ppc_md.pcibios_fixup();
++}
++
++#ifdef CONFIG_HOTPLUG
++/* This is used by the pSeries hotplug driver to allocate resource
++ * of newly plugged busses. We can try to consolidate with the
++ * rest of the code later, for now, keep it as-is
++ */
++void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
++{
++	struct pci_dev *dev;
++	struct pci_bus *child_bus;
++
++	list_for_each_entry(dev, &bus->devices, bus_list) {
++		int i;
++
++		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++			struct resource *r = &dev->resource[i];
++
++			if (r->parent || !r->start || !r->flags)
++				continue;
++			pci_claim_resource(dev, i);
++		}
++	}
++
++	list_for_each_entry(child_bus, &bus->children, node)
++		pcibios_claim_one_bus(child_bus);
++}
++EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
++#endif /* CONFIG_HOTPLUG */
++
++int pcibios_enable_device(struct pci_dev *dev, int mask)
++{
++	u16 cmd, old_cmd;
++	int idx;
++	struct resource *r;
++
++	if (ppc_md.pcibios_enable_device_hook)
++		if (ppc_md.pcibios_enable_device_hook(dev))
++			return -EINVAL;
++
++	pci_read_config_word(dev, PCI_COMMAND, &cmd);
++	old_cmd = cmd;
++	for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
++		/* Only set up the requested stuff */
++		if (!(mask & (1 << idx)))
++			continue;
++		r = &dev->resource[idx];
++		if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
++			continue;
++		if ((idx == PCI_ROM_RESOURCE) &&
++				(!(r->flags & IORESOURCE_ROM_ENABLE)))
++			continue;
++		if (r->parent == NULL) {
++			printk(KERN_ERR "PCI: Device %s not available because"
++			       " of resource collisions\n", pci_name(dev));
++			return -EINVAL;
++		}
++		if (r->flags & IORESOURCE_IO)
++			cmd |= PCI_COMMAND_IO;
++		if (r->flags & IORESOURCE_MEM)
++			cmd |= PCI_COMMAND_MEMORY;
++	}
++	if (cmd != old_cmd) {
++		printk("PCI: Enabling device %s (%04x -> %04x)\n",
++		       pci_name(dev), old_cmd, cmd);
++		pci_write_config_word(dev, PCI_COMMAND, cmd);
++	}
++	return 0;
++}
++
+diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
+index 0e2bee4..88db4ff 100644
+--- a/arch/powerpc/kernel/pci_32.c
++++ b/arch/powerpc/kernel/pci_32.c
+@@ -13,6 +13,7 @@
+ #include <linux/bootmem.h>
+ #include <linux/irq.h>
+ #include <linux/list.h>
++#include <linux/of.h>
+ 
+ #include <asm/processor.h>
+ #include <asm/io.h>
+@@ -32,19 +33,12 @@
+ #endif
+ 
+ unsigned long isa_io_base     = 0;
+-unsigned long isa_mem_base    = 0;
+ unsigned long pci_dram_offset = 0;
+ int pcibios_assign_bus_offset = 1;
+ 
+ void pcibios_make_OF_bus_map(void);
+ 
+-static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
+-static int probe_resource(struct pci_bus *parent, struct resource *pr,
+-			  struct resource *res, struct resource **conflict);
+-static void update_bridge_base(struct pci_bus *bus, int i);
+-static void pcibios_fixup_resources(struct pci_dev* dev);
+ static void fixup_broken_pcnet32(struct pci_dev* dev);
+-static int reparent_resources(struct resource *parent, struct resource *res);
+ static void fixup_cpc710_pci64(struct pci_dev* dev);
+ #ifdef CONFIG_PPC_OF
+ static u8* pci_to_OF_bus_map;
+@@ -53,7 +47,7 @@ static u8* pci_to_OF_bus_map;
+ /* By default, we don't re-assign bus numbers. We do this only on
+  * some pmacs
+  */
+-int pci_assign_all_buses;
++static int pci_assign_all_buses;
+ 
+ LIST_HEAD(hose_list);
+ 
+@@ -100,505 +94,6 @@ fixup_cpc710_pci64(struct pci_dev* dev)
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CPC710_PCI64,	fixup_cpc710_pci64);
+ 
+-static void
+-pcibios_fixup_resources(struct pci_dev *dev)
+-{
+-	struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
+-	int i;
+-	unsigned long offset;
+-
+-	if (!hose) {
+-		printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
+-		return;
+-	}
+-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+-		struct resource *res = dev->resource + i;
+-		if (!res->flags)
+-			continue;
+-		if (res->end == 0xffffffff) {
+-			DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
+-			    pci_name(dev), i, (u64)res->start, (u64)res->end);
+-			res->end -= res->start;
+-			res->start = 0;
+-			res->flags |= IORESOURCE_UNSET;
+-			continue;
+-		}
+-		offset = 0;
+-		if (res->flags & IORESOURCE_MEM) {
+-			offset = hose->pci_mem_offset;
+-		} else if (res->flags & IORESOURCE_IO) {
+-			offset = (unsigned long) hose->io_base_virt
+-				- isa_io_base;
+-		}
+-		if (offset != 0) {
+-			res->start += offset;
+-			res->end += offset;
+-			DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
+-			    i, res->flags, pci_name(dev),
+-			    (u64)res->start - offset, (u64)res->start);
+-		}
+-	}
+-
+-	/* Call machine specific resource fixup */
+-	if (ppc_md.pcibios_fixup_resources)
+-		ppc_md.pcibios_fixup_resources(dev);
+-}
+-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID,		PCI_ANY_ID,			pcibios_fixup_resources);
+-
+-void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+-			struct resource *res)
+-{
+-	unsigned long offset = 0;
+-	struct pci_controller *hose = dev->sysdata;
+-
+-	if (hose && res->flags & IORESOURCE_IO)
+-		offset = (unsigned long)hose->io_base_virt - isa_io_base;
+-	else if (hose && res->flags & IORESOURCE_MEM)
+-		offset = hose->pci_mem_offset;
+-	region->start = res->start - offset;
+-	region->end = res->end - offset;
+-}
+-EXPORT_SYMBOL(pcibios_resource_to_bus);
+-
+-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+-			     struct pci_bus_region *region)
+-{
+-	unsigned long offset = 0;
+-	struct pci_controller *hose = dev->sysdata;
+-
+-	if (hose && res->flags & IORESOURCE_IO)
+-		offset = (unsigned long)hose->io_base_virt - isa_io_base;
+-	else if (hose && res->flags & IORESOURCE_MEM)
+-		offset = hose->pci_mem_offset;
+-	res->start = region->start + offset;
+-	res->end = region->end + offset;
+-}
+-EXPORT_SYMBOL(pcibios_bus_to_resource);
+-
+-/*
+- * We need to avoid collisions with `mirrored' VGA ports
+- * and other strange ISA hardware, so we always want the
+- * addresses to be allocated in the 0x000-0x0ff region
+- * modulo 0x400.
+- *
+- * Why? Because some silly external IO cards only decode
+- * the low 10 bits of the IO address. The 0x00-0xff region
+- * is reserved for motherboard devices that decode all 16
+- * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+- * but we want to try to avoid allocating at 0x2900-0x2bff
+- * which might have be mirrored at 0x0100-0x03ff..
+- */
+-void pcibios_align_resource(void *data, struct resource *res,
+-				resource_size_t size, resource_size_t align)
+-{
+-	struct pci_dev *dev = data;
+-
+-	if (res->flags & IORESOURCE_IO) {
+-		resource_size_t start = res->start;
+-
+-		if (size > 0x100) {
+-			printk(KERN_ERR "PCI: I/O Region %s/%d too large"
+-			       " (%lld bytes)\n", pci_name(dev),
+-			       dev->resource - res, (unsigned long long)size);
+-		}
+-
+-		if (start & 0x300) {
+-			start = (start + 0x3ff) & ~0x3ff;
+-			res->start = start;
+-		}
+-	}
+-}
+-EXPORT_SYMBOL(pcibios_align_resource);
+-
+-/*
+- *  Handle resources of PCI devices.  If the world were perfect, we could
+- *  just allocate all the resource regions and do nothing more.  It isn't.
+- *  On the other hand, we cannot just re-allocate all devices, as it would
+- *  require us to know lots of host bridge internals.  So we attempt to
+- *  keep as much of the original configuration as possible, but tweak it
+- *  when it's found to be wrong.
+- *
+- *  Known BIOS problems we have to work around:
+- *	- I/O or memory regions not configured
+- *	- regions configured, but not enabled in the command register
+- *	- bogus I/O addresses above 64K used
+- *	- expansion ROMs left enabled (this may sound harmless, but given
+- *	  the fact the PCI specs explicitly allow address decoders to be
+- *	  shared between expansion ROMs and other resource regions, it's
+- *	  at least dangerous)
+- *
+- *  Our solution:
+- *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
+- *	    This gives us fixed barriers on where we can allocate.
+- *	(2) Allocate resources for all enabled devices.  If there is
+- *	    a collision, just mark the resource as unallocated. Also
+- *	    disable expansion ROMs during this step.
+- *	(3) Try to allocate resources for disabled devices.  If the
+- *	    resources were assigned correctly, everything goes well,
+- *	    if they weren't, they won't disturb allocation of other
+- *	    resources.
+- *	(4) Assign new addresses to resources which were either
+- *	    not configured at all or misconfigured.  If explicitly
+- *	    requested by the user, configure expansion ROM address
+- *	    as well.
+- */
+-
+-static void __init
+-pcibios_allocate_bus_resources(struct list_head *bus_list)
+-{
+-	struct pci_bus *bus;
+-	int i;
+-	struct resource *res, *pr;
+-
+-	/* Depth-First Search on bus tree */
+-	list_for_each_entry(bus, bus_list, node) {
+-		for (i = 0; i < 4; ++i) {
+-			if ((res = bus->resource[i]) == NULL || !res->flags
+-			    || res->start > res->end)
+-				continue;
+-			if (bus->parent == NULL)
+-				pr = (res->flags & IORESOURCE_IO)?
+-					&ioport_resource: &iomem_resource;
+-			else {
+-				pr = pci_find_parent_resource(bus->self, res);
+-				if (pr == res) {
+-					/* this happens when the generic PCI
+-					 * code (wrongly) decides that this
+-					 * bridge is transparent  -- paulus
+-					 */
+-					continue;
+-				}
+-			}
+-
+-			DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
+-			    (u64)res->start, (u64)res->end, res->flags, pr);
+-			if (pr) {
+-				if (request_resource(pr, res) == 0)
+-					continue;
+-				/*
+-				 * Must be a conflict with an existing entry.
+-				 * Move that entry (or entries) under the
+-				 * bridge resource and try again.
+-				 */
+-				if (reparent_resources(pr, res) == 0)
+-					continue;
+-			}
+-			printk(KERN_ERR "PCI: Cannot allocate resource region "
+-			       "%d of PCI bridge %d\n", i, bus->number);
+-			if (pci_relocate_bridge_resource(bus, i))
+-				bus->resource[i] = NULL;
+-		}
+-		pcibios_allocate_bus_resources(&bus->children);
+-	}
+-}
+-
+-/*
+- * Reparent resource children of pr that conflict with res
+- * under res, and make res replace those children.
+- */
+-static int __init
+-reparent_resources(struct resource *parent, struct resource *res)
+-{
+-	struct resource *p, **pp;
+-	struct resource **firstpp = NULL;
+-
+-	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
+-		if (p->end < res->start)
+-			continue;
+-		if (res->end < p->start)
+-			break;
+-		if (p->start < res->start || p->end > res->end)
+-			return -1;	/* not completely contained */
+-		if (firstpp == NULL)
+-			firstpp = pp;
+-	}
+-	if (firstpp == NULL)
+-		return -1;	/* didn't find any conflicting entries? */
+-	res->parent = parent;
+-	res->child = *firstpp;
+-	res->sibling = *pp;
+-	*firstpp = res;
+-	*pp = NULL;
+-	for (p = res->child; p != NULL; p = p->sibling) {
+-		p->parent = res;
+-		DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
+-		    p->name, (u64)p->start, (u64)p->end, res->name);
+-	}
+-	return 0;
+-}
+-
+-/*
+- * A bridge has been allocated a range which is outside the range
+- * of its parent bridge, so it needs to be moved.
+- */
+-static int __init
+-pci_relocate_bridge_resource(struct pci_bus *bus, int i)
+-{
+-	struct resource *res, *pr, *conflict;
+-	unsigned long try, size;
+-	int j;
+-	struct pci_bus *parent = bus->parent;
+-
+-	if (parent == NULL) {
+-		/* shouldn't ever happen */
+-		printk(KERN_ERR "PCI: can't move host bridge resource\n");
+-		return -1;
+-	}
+-	res = bus->resource[i];
+-	if (res == NULL)
+-		return -1;
+-	pr = NULL;
+-	for (j = 0; j < 4; j++) {
+-		struct resource *r = parent->resource[j];
+-		if (!r)
+-			continue;
+-		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
+-			continue;
+-		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
+-			pr = r;
+-			break;
+-		}
+-		if (res->flags & IORESOURCE_PREFETCH)
+-			pr = r;
+-	}
+-	if (pr == NULL)
+-		return -1;
+-	size = res->end - res->start;
+-	if (pr->start > pr->end || size > pr->end - pr->start)
+-		return -1;
+-	try = pr->end;
+-	for (;;) {
+-		res->start = try - size;
+-		res->end = try;
+-		if (probe_resource(bus->parent, pr, res, &conflict) == 0)
+-			break;
+-		if (conflict->start <= pr->start + size)
+-			return -1;
+-		try = conflict->start - 1;
+-	}
+-	if (request_resource(pr, res)) {
+-		DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
+-		    (u64)res->start, (u64)res->end);
+-		return -1;		/* "can't happen" */
+-	}
+-	update_bridge_base(bus, i);
+-	printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
+-	       bus->number, i, (unsigned long long)res->start,
+-	       (unsigned long long)res->end);
+-	return 0;
+-}
+-
+-static int __init
+-probe_resource(struct pci_bus *parent, struct resource *pr,
+-	       struct resource *res, struct resource **conflict)
+-{
+-	struct pci_bus *bus;
+-	struct pci_dev *dev;
+-	struct resource *r;
+-	int i;
+-
+-	for (r = pr->child; r != NULL; r = r->sibling) {
+-		if (r->end >= res->start && res->end >= r->start) {
+-			*conflict = r;
+-			return 1;
+-		}
+-	}
+-	list_for_each_entry(bus, &parent->children, node) {
+-		for (i = 0; i < 4; ++i) {
+-			if ((r = bus->resource[i]) == NULL)
+-				continue;
+-			if (!r->flags || r->start > r->end || r == res)
+-				continue;
+-			if (pci_find_parent_resource(bus->self, r) != pr)
+-				continue;
+-			if (r->end >= res->start && res->end >= r->start) {
+-				*conflict = r;
+-				return 1;
+-			}
+-		}
+-	}
+-	list_for_each_entry(dev, &parent->devices, bus_list) {
+-		for (i = 0; i < 6; ++i) {
+-			r = &dev->resource[i];
+-			if (!r->flags || (r->flags & IORESOURCE_UNSET))
+-				continue;
+-			if (pci_find_parent_resource(dev, r) != pr)
+-				continue;
+-			if (r->end >= res->start && res->end >= r->start) {
+-				*conflict = r;
+-				return 1;
+-			}
+-		}
+-	}
+-	return 0;
+-}
+-
+-void __init
+-update_bridge_resource(struct pci_dev *dev, struct resource *res)
+-{
+-	u8 io_base_lo, io_limit_lo;
+-	u16 mem_base, mem_limit;
+-	u16 cmd;
+-	unsigned long start, end, off;
+-	struct pci_controller *hose = dev->sysdata;
+-
+-	if (!hose) {
+-		printk("update_bridge_base: no hose?\n");
+-		return;
+-	}
+-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+-	pci_write_config_word(dev, PCI_COMMAND,
+-			      cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
+-	if (res->flags & IORESOURCE_IO) {
+-		off = (unsigned long) hose->io_base_virt - isa_io_base;
+-		start = res->start - off;
+-		end = res->end - off;
+-		io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
+-		io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
+-		if (end > 0xffff)
+-			io_base_lo |= PCI_IO_RANGE_TYPE_32;
+-		else
+-			io_base_lo |= PCI_IO_RANGE_TYPE_16;
+-		pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
+-				start >> 16);
+-		pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
+-				end >> 16);
+-		pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
+-		pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
+-
+-	} else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
+-		   == IORESOURCE_MEM) {
+-		off = hose->pci_mem_offset;
+-		mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
+-		mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
+-		pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
+-		pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
+-
+-	} else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
+-		   == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
+-		off = hose->pci_mem_offset;
+-		mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
+-		mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
+-		pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
+-		pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
+-
+-	} else {
+-		DBG(KERN_ERR "PCI: ugh, bridge %s res has flags=%lx\n",
+-		    pci_name(dev), res->flags);
+-	}
+-	pci_write_config_word(dev, PCI_COMMAND, cmd);
+-}
+-
+-static void __init
+-update_bridge_base(struct pci_bus *bus, int i)
+-{
+-	struct resource *res = bus->resource[i];
+-	struct pci_dev *dev = bus->self;
+-	update_bridge_resource(dev, res);
+-}
+-
+-static inline void alloc_resource(struct pci_dev *dev, int idx)
+-{
+-	struct resource *pr, *r = &dev->resource[idx];
+-
+-	DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
+-	    pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
+-	pr = pci_find_parent_resource(dev, r);
+-	if (!pr || request_resource(pr, r) < 0) {
+-		printk(KERN_ERR "PCI: Cannot allocate resource region %d"
+-		       " of device %s\n", idx, pci_name(dev));
+-		if (pr)
+-			DBG("PCI:  parent is %p: %016llx-%016llx (f=%lx)\n",
+-			    pr, (u64)pr->start, (u64)pr->end, pr->flags);
+-		/* We'll assign a new address later */
+-		r->flags |= IORESOURCE_UNSET;
+-		r->end -= r->start;
+-		r->start = 0;
+-	}
+-}
+-
+-static void __init
+-pcibios_allocate_resources(int pass)
+-{
+-	struct pci_dev *dev = NULL;
+-	int idx, disabled;
+-	u16 command;
+-	struct resource *r;
+-
+-	for_each_pci_dev(dev) {
+-		pci_read_config_word(dev, PCI_COMMAND, &command);
+-		for (idx = 0; idx < 6; idx++) {
+-			r = &dev->resource[idx];
+-			if (r->parent)		/* Already allocated */
+-				continue;
+-			if (!r->flags || (r->flags & IORESOURCE_UNSET))
+-				continue;	/* Not assigned at all */
+-			if (r->flags & IORESOURCE_IO)
+-				disabled = !(command & PCI_COMMAND_IO);
+-			else
+-				disabled = !(command & PCI_COMMAND_MEMORY);
+-			if (pass == disabled)
+-				alloc_resource(dev, idx);
+-		}
+-		if (pass)
+-			continue;
+-		r = &dev->resource[PCI_ROM_RESOURCE];
+-		if (r->flags & IORESOURCE_ROM_ENABLE) {
+-			/* Turn the ROM off, leave the resource region, but keep it unregistered. */
+-			u32 reg;
+-			DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
+-			r->flags &= ~IORESOURCE_ROM_ENABLE;
+-			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
+-			pci_write_config_dword(dev, dev->rom_base_reg,
+-					       reg & ~PCI_ROM_ADDRESS_ENABLE);
+-		}
+-	}
+-}
+-
+-static void __init
+-pcibios_assign_resources(void)
+-{
+-	struct pci_dev *dev = NULL;
+-	int idx;
+-	struct resource *r;
+-
+-	for_each_pci_dev(dev) {
+-		int class = dev->class >> 8;
+-
+-		/* Don't touch classless devices and host bridges */
+-		if (!class || class == PCI_CLASS_BRIDGE_HOST)
+-			continue;
+-
+-		for (idx = 0; idx < 6; idx++) {
+-			r = &dev->resource[idx];
+-
+-			/*
+-			 * We shall assign a new address to this resource,
+-			 * either because the BIOS (sic) forgot to do so
+-			 * or because we have decided the old address was
+-			 * unusable for some reason.
+-			 */
+-			if ((r->flags & IORESOURCE_UNSET) && r->end &&
+-			    (!ppc_md.pcibios_enable_device_hook ||
+-			     !ppc_md.pcibios_enable_device_hook(dev, 1))) {
+-				int rc;
+-
+-				r->flags &= ~IORESOURCE_UNSET;
+-				rc = pci_assign_resource(dev, idx);
+-				BUG_ON(rc);
+-			}
+-		}
+-
+-#if 0 /* don't assign ROMs */
+-		r = &dev->resource[PCI_ROM_RESOURCE];
+-		r->end -= r->start;
+-		r->start = 0;
+-		if (r->end)
+-			pci_assign_resource(dev, PCI_ROM_RESOURCE);
+-#endif
+-	}
+-}
+-
+ #ifdef CONFIG_PPC_OF
+ /*
+  * Functions below are used on OpenFirmware machines.
+@@ -619,7 +114,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
+ 	} else
+ 		pci_to_OF_bus_map[pci_bus] = bus_range[0];
+ 
+-	for (node=node->child; node != 0;node = node->sibling) {
++	for_each_child_of_node(node, node) {
+ 		struct pci_dev* dev;
+ 		const unsigned int *class_code, *reg;
+ 	
+@@ -662,8 +157,8 @@ pcibios_make_OF_bus_map(void)
+ 
+ 	/* For each hose, we begin searching bridges */
+ 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+-		struct device_node* node;	
+-		node = (struct device_node *)hose->arch_data;
++		struct device_node* node = hose->dn;
++
+ 		if (!node)
+ 			continue;
+ 		make_one_node_map(node, hose->first_busno);
+@@ -688,15 +183,18 @@ pcibios_make_OF_bus_map(void)
+ typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
+ 
+ static struct device_node*
+-scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
++scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data)
+ {
++	struct device_node *node;
+ 	struct device_node* sub_node;
+ 
+-	for (; node != 0;node = node->sibling) {
++	for_each_child_of_node(parent, node) {
+ 		const unsigned int *class_code;
+ 	
+-		if (filter(node, data))
++		if (filter(node, data)) {
++			of_node_put(node);
+ 			return node;
++		}
+ 
+ 		/* For PCI<->PCI bridges or CardBus bridges, we go down
+ 		 * Note: some OFs create a parent node "multifunc-device" as
+@@ -708,9 +206,11 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
+ 			(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
+ 			strcmp(node->name, "multifunc-device"))
+ 			continue;
+-		sub_node = scan_OF_pci_childs(node->child, filter, data);
+-		if (sub_node)
++		sub_node = scan_OF_pci_childs(node, filter, data);
++		if (sub_node) {
++			of_node_put(node);
+ 			return sub_node;
++		}
+ 	}
+ 	return NULL;
+ }
+@@ -718,11 +218,11 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
+ static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
+ 					       unsigned int devfn)
+ {
+-	struct device_node *np = NULL;
++	struct device_node *np;
+ 	const u32 *reg;
+ 	unsigned int psize;
+ 
+-	while ((np = of_get_next_child(parent, np)) != NULL) {
++	for_each_child_of_node(parent, np) {
+ 		reg = of_get_property(np, "reg", &psize);
+ 		if (reg == NULL || psize < 4)
+ 			continue;
+@@ -742,7 +242,7 @@ static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
+ 		struct pci_controller *hose = pci_bus_to_host(bus);
+ 		if (hose == NULL)
+ 			return NULL;
+-		return of_node_get(hose->arch_data);
++		return of_node_get(hose->dn);
+ 	}
+ 
+ 	/* not a root bus, we need to get our parent */
+@@ -812,9 +312,9 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
+ 		return -ENODEV;
+ 	/* Make sure it's really a PCI device */
+ 	hose = pci_find_hose_for_OF_device(node);
+-	if (!hose || !hose->arch_data)
++	if (!hose || !hose->dn)
+ 		return -ENODEV;
+-	if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
++	if (!scan_OF_pci_childs(hose->dn,
+ 			find_OF_pci_device_filter, (void *)node))
+ 		return -ENODEV;
+ 	reg = of_get_property(node, "reg", NULL);
+@@ -843,120 +343,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
+ }
+ EXPORT_SYMBOL(pci_device_from_OF_node);
+ 
+-void __init
+-pci_process_bridge_OF_ranges(struct pci_controller *hose,
+-			   struct device_node *dev, int primary)
+-{
+-	static unsigned int static_lc_ranges[256] __initdata;
+-	const unsigned int *dt_ranges;
+-	unsigned int *lc_ranges, *ranges, *prev, size;
+-	int rlen = 0, orig_rlen;
+-	int memno = 0;
+-	struct resource *res;
+-	int np, na = of_n_addr_cells(dev);
+-	np = na + 5;
+-
+-	/* First we try to merge ranges to fix a problem with some pmacs
+-	 * that can have more than 3 ranges, fortunately using contiguous
+-	 * addresses -- BenH
+-	 */
+-	dt_ranges = of_get_property(dev, "ranges", &rlen);
+-	if (!dt_ranges)
+-		return;
+-	/* Sanity check, though hopefully that never happens */
+-	if (rlen > sizeof(static_lc_ranges)) {
+-		printk(KERN_WARNING "OF ranges property too large !\n");
+-		rlen = sizeof(static_lc_ranges);
+-	}
+-	lc_ranges = static_lc_ranges;
+-	memcpy(lc_ranges, dt_ranges, rlen);
+-	orig_rlen = rlen;
+-
+-	/* Let's work on a copy of the "ranges" property instead of damaging
+-	 * the device-tree image in memory
+-	 */
+-	ranges = lc_ranges;
+-	prev = NULL;
+-	while ((rlen -= np * sizeof(unsigned int)) >= 0) {
+-		if (prev) {
+-			if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
+-				(prev[2] + prev[na+4]) == ranges[2] &&
+-				(prev[na+2] + prev[na+4]) == ranges[na+2]) {
+-				prev[na+4] += ranges[na+4];
+-				ranges[0] = 0;
+-				ranges += np;
+-				continue;
+-			}
+-		}
+-		prev = ranges;
+-		ranges += np;
+-	}
+-
+-	/*
+-	 * The ranges property is laid out as an array of elements,
+-	 * each of which comprises:
+-	 *   cells 0 - 2:	a PCI address
+-	 *   cells 3 or 3+4:	a CPU physical address
+-	 *			(size depending on dev->n_addr_cells)
+-	 *   cells 4+5 or 5+6:	the size of the range
+-	 */
+-	ranges = lc_ranges;
+-	rlen = orig_rlen;
+-	while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
+-		res = NULL;
+-		size = ranges[na+4];
+-		switch ((ranges[0] >> 24) & 0x3) {
+-		case 1:		/* I/O space */
+-			if (ranges[2] != 0)
+-				break;
+-			hose->io_base_phys = ranges[na+2];
+-			/* limit I/O space to 16MB */
+-			if (size > 0x01000000)
+-				size = 0x01000000;
+-			hose->io_base_virt = ioremap(ranges[na+2], size);
+-			if (primary)
+-				isa_io_base = (unsigned long) hose->io_base_virt;
+-			res = &hose->io_resource;
+-			res->flags = IORESOURCE_IO;
+-			res->start = ranges[2];
+-			DBG("PCI: IO 0x%llx -> 0x%llx\n",
+-			    (u64)res->start, (u64)res->start + size - 1);
+-			break;
+-		case 2:		/* memory space */
+-			memno = 0;
+-			if (ranges[1] == 0 && ranges[2] == 0
+-			    && ranges[na+4] <= (16 << 20)) {
+-				/* 1st 16MB, i.e. ISA memory area */
+-				if (primary)
+-					isa_mem_base = ranges[na+2];
+-				memno = 1;
+-			}
+-			while (memno < 3 && hose->mem_resources[memno].flags)
+-				++memno;
+-			if (memno == 0)
+-				hose->pci_mem_offset = ranges[na+2] - ranges[2];
+-			if (memno < 3) {
+-				res = &hose->mem_resources[memno];
+-				res->flags = IORESOURCE_MEM;
+-				if(ranges[0] & 0x40000000)
+-					res->flags |= IORESOURCE_PREFETCH;
+-				res->start = ranges[na+2];
+-				DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
+-				    (u64)res->start, (u64)res->start + size - 1);
+-			}
+-			break;
+-		}
+-		if (res != NULL) {
+-			res->name = dev->full_name;
+-			res->end = res->start + size - 1;
+-			res->parent = NULL;
+-			res->sibling = NULL;
+-			res->child = NULL;
+-		}
+-		ranges += np;
+-	}
+-}
+-
+ /* We create the "pci-OF-bus-map" property now so it appears in the
+  * /proc device tree
+  */
+@@ -986,219 +372,7 @@ void pcibios_make_OF_bus_map(void)
+ }
+ #endif /* CONFIG_PPC_OF */
+ 
+-#ifdef CONFIG_PPC_PMAC
+-/*
+- * This set of routines checks for PCI<->PCI bridges that have closed
+- * IO resources and have child devices. It tries to re-open an IO
+- * window on them.
+- *
+- * This is a _temporary_ fix to workaround a problem with Apple's OF
+- * closing IO windows on P2P bridges when the OF drivers of cards
+- * below this bridge don't claim any IO range (typically ATI or
+- * Adaptec).
+- *
+- * A more complete fix would be to use drivers/pci/setup-bus.c, which
+- * involves a working pcibios_fixup_pbus_ranges(), some more care about
+- * ordering when creating the host bus resources, and maybe a few more
+- * minor tweaks
+- */
+-
+-/* Initialize bridges with base/limit values we have collected */
+-static void __init
+-do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
+-{
+-	struct pci_dev *bridge = bus->self;
+-	struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
+-	u32 l;
+-	u16 w;
+-	struct resource res;
+-
+-	if (bus->resource[0] == NULL)
+-		return;
+- 	res = *(bus->resource[0]);
+-
+-	DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
+-	res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
+-	res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
+-	DBG("  IO window: %016llx-%016llx\n", res.start, res.end);
+-
+-	/* Set up the top and bottom of the PCI I/O segment for this bus. */
+-	pci_read_config_dword(bridge, PCI_IO_BASE, &l);
+-	l &= 0xffff000f;
+-	l |= (res.start >> 8) & 0x00f0;
+-	l |= res.end & 0xf000;
+-	pci_write_config_dword(bridge, PCI_IO_BASE, l);
+-
+-	if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
+-		l = (res.start >> 16) | (res.end & 0xffff0000);
+-		pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
+-	}
+-
+-	pci_read_config_word(bridge, PCI_COMMAND, &w);
+-	w |= PCI_COMMAND_IO;
+-	pci_write_config_word(bridge, PCI_COMMAND, w);
+-
+-#if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
+-	if (enable_vga) {
+-		pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
+-		w |= PCI_BRIDGE_CTL_VGA;
+-		pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
+-	}
+-#endif
+-}
+-
+-/* This function is pretty basic and actually quite broken for the
+- * general case, it's enough for us right now though. It's supposed
+- * to tell us if we need to open an IO range at all or not and what
+- * size.
+- */
+-static int __init
+-check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
+-{
+-	struct pci_dev *dev;
+-	int	i;
+-	int	rc = 0;
+-
+-#define push_end(res, mask) do {		\
+-	BUG_ON((mask+1) & mask);		\
+-	res->end = (res->end + mask) | mask;	\
+-} while (0)
+-
+-	list_for_each_entry(dev, &bus->devices, bus_list) {
+-		u16 class = dev->class >> 8;
+-
+-		if (class == PCI_CLASS_DISPLAY_VGA ||
+-		    class == PCI_CLASS_NOT_DEFINED_VGA)
+-			*found_vga = 1;
+-		if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
+-			rc |= check_for_io_childs(dev->subordinate, res, found_vga);
+-		if (class == PCI_CLASS_BRIDGE_CARDBUS)
+-			push_end(res, 0xfff);
+-
+-		for (i=0; i<PCI_NUM_RESOURCES; i++) {
+-			struct resource *r;
+-			unsigned long r_size;
+-
+-			if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
+-			    && i >= PCI_BRIDGE_RESOURCES)
+-				continue;
+-			r = &dev->resource[i];
+-			r_size = r->end - r->start;
+-			if (r_size < 0xfff)
+-				r_size = 0xfff;
+-			if (r->flags & IORESOURCE_IO && (r_size) != 0) {
+-				rc = 1;
+-				push_end(res, r_size);
+-			}
+-		}
+-	}
+-
+-	return rc;
+-}
+-
+-/* Here we scan all P2P bridges of a given level that have a closed
+- * IO window. Note that the test for the presence of a VGA card should
+- * be improved to take into account already configured P2P bridges,
+- * currently, we don't see them and might end up configuring 2 bridges
+- * with VGA pass through enabled
+- */
+-static void __init
+-do_fixup_p2p_level(struct pci_bus *bus)
+-{
+-	struct pci_bus *b;
+-	int i, parent_io;
+-	int has_vga = 0;
+-
+-	for (parent_io=0; parent_io<4; parent_io++)
+-		if (bus->resource[parent_io]
+-		    && bus->resource[parent_io]->flags & IORESOURCE_IO)
+-			break;
+-	if (parent_io >= 4)
+-		return;
+-
+-	list_for_each_entry(b, &bus->children, node) {
+-		struct pci_dev *d = b->self;
+-		struct pci_controller* hose = (struct pci_controller *)d->sysdata;
+-		struct resource *res = b->resource[0];
+-		struct resource tmp_res;
+-		unsigned long max;
+-		int found_vga = 0;
+-
+-		memset(&tmp_res, 0, sizeof(tmp_res));
+-		tmp_res.start = bus->resource[parent_io]->start;
+-
+-		/* We don't let low addresses go through that closed P2P bridge, well,
+-		 * that may not be necessary but I feel safer that way
+-		 */
+-		if (tmp_res.start == 0)
+-			tmp_res.start = 0x1000;
+-	
+-		if (!list_empty(&b->devices) && res && res->flags == 0 &&
+-		    res != bus->resource[parent_io] &&
+-		    (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
+-		    check_for_io_childs(b, &tmp_res, &found_vga)) {
+-			u8 io_base_lo;
+-
+-			printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
+-
+-			if (found_vga) {
+-				if (has_vga) {
+-					printk(KERN_WARNING "Skipping VGA, already active"
+-					    " on bus segment\n");
+-					found_vga = 0;
+-				} else
+-					has_vga = 1;
+-			}
+-			pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
+-
+-			if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
+-				max = ((unsigned long) hose->io_base_virt
+-					- isa_io_base) + 0xffffffff;
+-			else
+-				max = ((unsigned long) hose->io_base_virt
+-					- isa_io_base) + 0xffff;
+-
+-			*res = tmp_res;
+-			res->flags = IORESOURCE_IO;
+-			res->name = b->name;
+-		
+-			/* Find a resource in the parent where we can allocate */
+-			for (i = 0 ; i < 4; i++) {
+-				struct resource *r = bus->resource[i];
+-				if (!r)
+-					continue;
+-				if ((r->flags & IORESOURCE_IO) == 0)
+-					continue;
+-				DBG("Trying to allocate from %016llx, size %016llx from parent"
+-				    " res %d: %016llx -> %016llx\n",
+-					res->start, res->end, i, r->start, r->end);
+-			
+-				if (allocate_resource(r, res, res->end + 1, res->start, max,
+-				    res->end + 1, NULL, NULL) < 0) {
+-					DBG("Failed !\n");
+-					continue;
+-				}
+-				do_update_p2p_io_resource(b, found_vga);
+-				break;
+-			}
+-		}
+-		do_fixup_p2p_level(b);
+-	}
+-}
+-
+-static void
+-pcibios_fixup_p2p_bridges(void)
+-{
+-	struct pci_bus *b;
+-
+-	list_for_each_entry(b, &pci_root_buses, node)
+-		do_fixup_p2p_level(b);
+-}
+-
+-#endif /* CONFIG_PPC_PMAC */
+-
+-static int __init
+-pcibios_init(void)
++static int __init pcibios_init(void)
+ {
+ 	struct pci_controller *hose, *tmp;
+ 	struct pci_bus *bus;
+@@ -1206,6 +380,9 @@ pcibios_init(void)
+ 
+ 	printk(KERN_INFO "PCI: Probing PCI hardware\n");
+ 
++	if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS)
++		pci_assign_all_buses = 1;
++
+ 	/* Scan all of the recorded PCI controllers.  */
+ 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ 		if (pci_assign_all_buses)
+@@ -1213,9 +390,10 @@ pcibios_init(void)
+ 		hose->last_busno = 0xff;
+ 		bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
+ 					    hose->ops, hose);
+-		if (bus)
++		if (bus) {
+ 			pci_bus_add_devices(bus);
+-		hose->last_busno = bus->subordinate;
++			hose->last_busno = bus->subordinate;
++		}
+ 		if (pci_assign_all_buses || next_busno <= hose->last_busno)
+ 			next_busno = hose->last_busno + pcibios_assign_bus_offset;
+ 	}
+@@ -1228,18 +406,8 @@ pcibios_init(void)
+ 	if (pci_assign_all_buses && have_of)
+ 		pcibios_make_OF_bus_map();
+ 
+-	/* Call machine dependent fixup */
+-	if (ppc_md.pcibios_fixup)
+-		ppc_md.pcibios_fixup();
+-
+-	/* Allocate and assign resources */
+-	pcibios_allocate_bus_resources(&pci_root_buses);
+-	pcibios_allocate_resources(0);
+-	pcibios_allocate_resources(1);
+-#ifdef CONFIG_PPC_PMAC
+-	pcibios_fixup_p2p_bridges();
+-#endif /* CONFIG_PPC_PMAC */
+-	pcibios_assign_resources();
++	/* Call common code to handle resource allocation */
++	pcibios_resource_survey();
+ 
+ 	/* Call machine dependent post-init code */
+ 	if (ppc_md.pcibios_after_init)
+@@ -1250,14 +418,14 @@ pcibios_init(void)
+ 
+ subsys_initcall(pcibios_init);
+ 
+-void pcibios_fixup_bus(struct pci_bus *bus)
++void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
+ {
+ 	struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
+ 	unsigned long io_offset;
+ 	struct resource *res;
+-	struct pci_dev *dev;
+ 	int i;
+ 
++	/* Hookup PHB resources */
+ 	io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
+ 	if (bus->parent == NULL) {
+ 		/* This is a host bridge - fill in its resources */
+@@ -1272,8 +440,8 @@ void pcibios_fixup_bus(struct pci_bus *bus)
+ 			res->end = IO_SPACE_LIMIT;
+ 			res->flags = IORESOURCE_IO;
+ 		}
+-		res->start += io_offset;
+-		res->end += io_offset;
++		res->start = (res->start + io_offset) & 0xffffffffu;
++		res->end = (res->end + io_offset) & 0xffffffffu;
+ 
+ 		for (i = 0; i < 3; ++i) {
+ 			res = &hose->mem_resources[i];
+@@ -1288,35 +456,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
+ 			}
+ 			bus->resource[i+1] = res;
+ 		}
+-	} else {
+-		/* This is a subordinate bridge */
+-		pci_read_bridge_bases(bus);
+-
+-		for (i = 0; i < 4; ++i) {
+-			if ((res = bus->resource[i]) == NULL)
+-				continue;
+-			if (!res->flags || bus->self->transparent)
+-				continue;
+-			if (io_offset && (res->flags & IORESOURCE_IO)) {
+-				res->start += io_offset;
+-				res->end += io_offset;
+-			} else if (hose->pci_mem_offset
+-				   && (res->flags & IORESOURCE_MEM)) {
+-				res->start += hose->pci_mem_offset;
+-				res->end += hose->pci_mem_offset;
+-			}
+-		}
+-	}
+-
+-	/* Platform specific bus fixups */
+-	if (ppc_md.pcibios_fixup_bus)
+-		ppc_md.pcibios_fixup_bus(bus);
+-
+-	/* Read default IRQs and fixup if necessary */
+-	list_for_each_entry(dev, &bus->devices, bus_list) {
+-		pci_read_irq_line(dev);
+-		if (ppc_md.pci_irq_fixup)
+-			ppc_md.pci_irq_fixup(dev);
+ 	}
+ }
+ 
+@@ -1328,37 +467,6 @@ pcibios_update_irq(struct pci_dev *dev, int irq)
+ 	/* XXX FIXME - update OF device tree node interrupt property */
+ }
+ 
+-int pcibios_enable_device(struct pci_dev *dev, int mask)
+-{
+-	u16 cmd, old_cmd;
+-	int idx;
+-	struct resource *r;
+-
+-	if (ppc_md.pcibios_enable_device_hook)
+-		if (ppc_md.pcibios_enable_device_hook(dev, 0))
+-			return -EINVAL;
+-		
+-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+-	old_cmd = cmd;
+-	for (idx=0; idx<6; idx++) {
+-		r = &dev->resource[idx];
+-		if (r->flags & IORESOURCE_UNSET) {
+-			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
+-			return -EINVAL;
+-		}
+-		if (r->flags & IORESOURCE_IO)
+-			cmd |= PCI_COMMAND_IO;
+-		if (r->flags & IORESOURCE_MEM)
+-			cmd |= PCI_COMMAND_MEMORY;
+-	}
+-	if (cmd != old_cmd) {
+-		printk("PCI: Enabling device %s (%04x -> %04x)\n",
+-		       pci_name(dev), old_cmd, cmd);
+-		pci_write_config_word(dev, PCI_COMMAND, cmd);
+-	}
+-	return 0;
+-}
+-
+ static struct pci_controller*
+ pci_bus_to_hose(int bus)
+ {
+@@ -1381,17 +489,6 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
+ 	struct pci_controller* hose;
+ 	long result = -EOPNOTSUPP;
+ 
+-	/* Argh ! Please forgive me for that hack, but that's the
+-	 * simplest way to get existing XFree to not lockup on some
+-	 * G5 machines... So when something asks for bus 0 io base
+-	 * (bus 0 is HT root), we return the AGP one instead.
+-	 */
+-#ifdef CONFIG_PPC_PMAC
+-	if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
+-		if (bus == 0)
+-			bus = 0xf0;
+-#endif /* CONFIG_PPC_PMAC */
+-
+ 	hose = pci_bus_to_hose(bus);
+ 	if (!hose)
+ 		return -ENODEV;
+diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
+index 9f63bdc..5275074 100644
+--- a/arch/powerpc/kernel/pci_64.c
++++ b/arch/powerpc/kernel/pci_64.c
+@@ -31,7 +31,6 @@
+ #include <asm/byteorder.h>
+ #include <asm/machdep.h>
+ #include <asm/ppc-pci.h>
+-#include <asm/firmware.h>
+ 
+ #ifdef DEBUG
+ #include <asm/udbg.h>
+@@ -41,10 +40,6 @@
+ #endif
+ 
+ unsigned long pci_probe_only = 1;
+-int pci_assign_all_buses = 0;
+-
+-static void fixup_resource(struct resource *res, struct pci_dev *dev);
+-static void do_bus_setup(struct pci_bus *bus);
+ 
+ /* pci_io_base -- the base address from which io bars are offsets.
+  * This is the lowest I/O base address (so bar values are always positive),
+@@ -70,139 +65,31 @@ struct dma_mapping_ops *get_pci_dma_ops(void)
+ }
+ EXPORT_SYMBOL(get_pci_dma_ops);
+ 
+-static void fixup_broken_pcnet32(struct pci_dev* dev)
+-{
+-	if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
+-		dev->vendor = PCI_VENDOR_ID_AMD;
+-		pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
+-	}
+-}
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
+ 
+-void  pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+-			      struct resource *res)
++int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+ {
+-	unsigned long offset = 0;
+-	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+-
+-	if (!hose)
+-		return;
+-
+-	if (res->flags & IORESOURCE_IO)
+-	        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+-
+-	if (res->flags & IORESOURCE_MEM)
+-		offset = hose->pci_mem_offset;
+-
+-	region->start = res->start - offset;
+-	region->end = res->end - offset;
++	return dma_set_mask(&dev->dev, mask);
+ }
+ 
+-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+-			      struct pci_bus_region *region)
++int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+ {
+-	unsigned long offset = 0;
+-	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+-
+-	if (!hose)
+-		return;
++	int rc;
+ 
+-	if (res->flags & IORESOURCE_IO)
+-	        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
++	rc = dma_set_mask(&dev->dev, mask);
++	dev->dev.coherent_dma_mask = dev->dma_mask;
+ 
+-	if (res->flags & IORESOURCE_MEM)
+-		offset = hose->pci_mem_offset;
+-
+-	res->start = region->start + offset;
+-	res->end = region->end + offset;
++	return rc;
+ }
+ 
+-#ifdef CONFIG_HOTPLUG
+-EXPORT_SYMBOL(pcibios_resource_to_bus);
+-EXPORT_SYMBOL(pcibios_bus_to_resource);
+-#endif
+-
+-/*
+- * We need to avoid collisions with `mirrored' VGA ports
+- * and other strange ISA hardware, so we always want the
+- * addresses to be allocated in the 0x000-0x0ff region
+- * modulo 0x400.
+- *
+- * Why? Because some silly external IO cards only decode
+- * the low 10 bits of the IO address. The 0x00-0xff region
+- * is reserved for motherboard devices that decode all 16
+- * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+- * but we want to try to avoid allocating at 0x2900-0x2bff
+- * which might have be mirrored at 0x0100-0x03ff..
+- */
+-void pcibios_align_resource(void *data, struct resource *res,
+-			    resource_size_t size, resource_size_t align)
+-{
+-	struct pci_dev *dev = data;
+-	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+-	resource_size_t start = res->start;
+-	unsigned long alignto;
+-
+-	if (res->flags & IORESOURCE_IO) {
+-	        unsigned long offset = (unsigned long)hose->io_base_virt -
+-					_IO_BASE;
+-		/* Make sure we start at our min on all hoses */
+-		if (start - offset < PCIBIOS_MIN_IO)
+-			start = PCIBIOS_MIN_IO + offset;
+-
+-		/*
+-		 * Put everything into 0x00-0xff region modulo 0x400
+-		 */
+-		if (start & 0x300)
+-			start = (start + 0x3ff) & ~0x3ff;
+-
+-	} else if (res->flags & IORESOURCE_MEM) {
+-		/* Make sure we start at our min on all hoses */
+-		if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
+-			start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
+-
+-		/* Align to multiple of size of minimum base.  */
+-		alignto = max(0x1000UL, align);
+-		start = ALIGN(start, alignto);
+-	}
+-
+-	res->start = start;
+-}
+-
+-void __devinit pcibios_claim_one_bus(struct pci_bus *b)
++static void fixup_broken_pcnet32(struct pci_dev* dev)
+ {
+-	struct pci_dev *dev;
+-	struct pci_bus *child_bus;
+-
+-	list_for_each_entry(dev, &b->devices, bus_list) {
+-		int i;
+-
+-		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+-			struct resource *r = &dev->resource[i];
+-
+-			if (r->parent || !r->start || !r->flags)
+-				continue;
+-			pci_claim_resource(dev, i);
+-		}
++	if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
++		dev->vendor = PCI_VENDOR_ID_AMD;
++		pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
+ 	}
+-
+-	list_for_each_entry(child_bus, &b->children, node)
+-		pcibios_claim_one_bus(child_bus);
+ }
+-#ifdef CONFIG_HOTPLUG
+-EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
+-#endif
+-
+-static void __init pcibios_claim_of_setup(void)
+-{
+-	struct pci_bus *b;
+-
+-	if (firmware_has_feature(FW_FEATURE_ISERIES))
+-		return;
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
+ 
+-	list_for_each_entry(b, &pci_root_buses, node)
+-		pcibios_claim_one_bus(b);
+-}
+ 
+ static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
+ {
+@@ -270,7 +157,6 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
+ 		res->end = base + size - 1;
+ 		res->flags = flags;
+ 		res->name = pci_name(dev);
+-		fixup_resource(res, dev);
+ 	}
+ }
+ 
+@@ -339,16 +225,17 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
+ EXPORT_SYMBOL(of_create_pci_dev);
+ 
+ void __devinit of_scan_bus(struct device_node *node,
+-				  struct pci_bus *bus)
++			   struct pci_bus *bus)
+ {
+-	struct device_node *child = NULL;
++	struct device_node *child;
+ 	const u32 *reg;
+ 	int reglen, devfn;
+ 	struct pci_dev *dev;
+ 
+ 	DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
+ 
+-	while ((child = of_get_next_child(node, child)) != NULL) {
++	/* Scan direct children */
++	for_each_child_of_node(node, child) {
+ 		DBG("  * %s\n", child->full_name);
+ 		reg = of_get_property(child, "reg", &reglen);
+ 		if (reg == NULL || reglen < 20)
+@@ -359,19 +246,26 @@ void __devinit of_scan_bus(struct device_node *node,
+ 		dev = of_create_pci_dev(child, bus, devfn);
+ 		if (!dev)
+ 			continue;
+-		DBG("dev header type: %x\n", dev->hdr_type);
++		DBG("    dev header type: %x\n", dev->hdr_type);
++	}
+ 
++	/* Ally all fixups */
++	pcibios_fixup_of_probed_bus(bus);
++
++	/* Now scan child busses */
++	list_for_each_entry(dev, &bus->devices, bus_list) {
+ 		if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+-		    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+-			of_scan_pci_bridge(child, dev);
++		    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
++			struct device_node *child = pci_device_to_OF_node(dev);
++			if (dev)
++				of_scan_pci_bridge(child, dev);
++		}
+ 	}
+-
+-	do_bus_setup(bus);
+ }
+ EXPORT_SYMBOL(of_scan_bus);
+ 
+ void __devinit of_scan_pci_bridge(struct device_node *node,
+-			 	struct pci_dev *dev)
++				  struct pci_dev *dev)
+ {
+ 	struct pci_bus *bus;
+ 	const u32 *busrange, *ranges;
+@@ -441,7 +335,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
+ 		res->start = of_read_number(&ranges[1], 2);
+ 		res->end = res->start + size - 1;
+ 		res->flags = flags;
+-		fixup_resource(res, dev);
+ 	}
+ 	sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
+ 		bus->number);
+@@ -462,12 +355,12 @@ EXPORT_SYMBOL(of_scan_pci_bridge);
+ void __devinit scan_phb(struct pci_controller *hose)
+ {
+ 	struct pci_bus *bus;
+-	struct device_node *node = hose->arch_data;
++	struct device_node *node = hose->dn;
+ 	int i, mode;
+-	struct resource *res;
+ 
+-	DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
++	DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
+ 
++	/* Create an empty bus for the toplevel */
+ 	bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
+ 	if (bus == NULL) {
+ 		printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
+@@ -477,27 +370,27 @@ void __devinit scan_phb(struct pci_controller *hose)
+ 	bus->secondary = hose->first_busno;
+ 	hose->bus = bus;
+ 
+-	if (!firmware_has_feature(FW_FEATURE_ISERIES))
+-		pcibios_map_io_space(bus);
+-
+-	bus->resource[0] = res = &hose->io_resource;
+-	if (res->flags && request_resource(&ioport_resource, res)) {
+-		printk(KERN_ERR "Failed to request PCI IO region "
+-		       "on PCI domain %04x\n", hose->global_number);
+-		DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
+-		    res->start, res->end);
+-	}
++	/* Get some IO space for the new PHB */
++	pcibios_map_io_space(bus);
+ 
++	/* Wire up PHB bus resources */
++	DBG("PCI: PHB IO resource    = %016lx-%016lx [%lx]\n",
++	    hose->io_resource.start, hose->io_resource.end,
++	    hose->io_resource.flags);
++	bus->resource[0] = &hose->io_resource;
+ 	for (i = 0; i < 3; ++i) {
+-		res = &hose->mem_resources[i];
+-		bus->resource[i+1] = res;
+-		if (res->flags && request_resource(&iomem_resource, res))
+-			printk(KERN_ERR "Failed to request PCI memory region "
+-			       "on PCI domain %04x\n", hose->global_number);
++		DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
++		    hose->mem_resources[i].start,
++		    hose->mem_resources[i].end,
++		    hose->mem_resources[i].flags);
++		bus->resource[i+1] = &hose->mem_resources[i];
+ 	}
++	DBG("PCI: PHB MEM offset     = %016lx\n", hose->pci_mem_offset);
++	DBG("PCI: PHB IO  offset     = %08lx\n",
++	    (unsigned long)hose->io_base_virt - _IO_BASE);
+ 
++	/* Get probe mode and perform scan */
+ 	mode = PCI_PROBE_NORMAL;
+-
+ 	if (node && ppc_md.pci_probe_mode)
+ 		mode = ppc_md.pci_probe_mode(bus);
+ 	DBG("    probe mode: %d\n", mode);
+@@ -514,15 +407,15 @@ static int __init pcibios_init(void)
+ {
+ 	struct pci_controller *hose, *tmp;
+ 
++	printk(KERN_INFO "PCI: Probing PCI hardware\n");
++
+ 	/* For now, override phys_mem_access_prot. If we need it,
+ 	 * later, we may move that initialization to each ppc_md
+ 	 */
+ 	ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
+ 
+-	if (firmware_has_feature(FW_FEATURE_ISERIES))
+-		iSeries_pcibios_init();
+-
+-	printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
++	if (pci_probe_only)
++		ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
+ 
+ 	/* Scan all of the recorded PCI controllers.  */
+ 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+@@ -530,19 +423,8 @@ static int __init pcibios_init(void)
+ 		pci_bus_add_devices(hose->bus);
+ 	}
+ 
+-	if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
+-		if (pci_probe_only)
+-			pcibios_claim_of_setup();
+-		else
+-			/* FIXME: `else' will be removed when
+-			   pci_assign_unassigned_resources() is able to work
+-			   correctly with [partially] allocated PCI tree. */
+-			pci_assign_unassigned_resources();
+-	}
+-
+-	/* Call machine dependent final fixup */
+-	if (ppc_md.pcibios_fixup)
+-		ppc_md.pcibios_fixup();
++	/* Call common code to handle resource allocation */
++	pcibios_resource_survey();
+ 
+ 	printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
+ 
+@@ -551,141 +433,6 @@ static int __init pcibios_init(void)
+ 
+ subsys_initcall(pcibios_init);
+ 
+-int pcibios_enable_device(struct pci_dev *dev, int mask)
+-{
+-	u16 cmd, oldcmd;
+-	int i;
+-
+-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+-	oldcmd = cmd;
+-
+-	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+-		struct resource *res = &dev->resource[i];
+-
+-		/* Only set up the requested stuff */
+-		if (!(mask & (1<<i)))
+-			continue;
+-
+-		if (res->flags & IORESOURCE_IO)
+-			cmd |= PCI_COMMAND_IO;
+-		if (res->flags & IORESOURCE_MEM)
+-			cmd |= PCI_COMMAND_MEMORY;
+-	}
+-
+-	if (cmd != oldcmd) {
+-		printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
+-		       pci_name(dev), cmd);
+-                /* Enable the appropriate bits in the PCI command register.  */
+-		pci_write_config_word(dev, PCI_COMMAND, cmd);
+-	}
+-	return 0;
+-}
+-
+-/* Decide whether to display the domain number in /proc */
+-int pci_proc_domain(struct pci_bus *bus)
+-{
+-	if (firmware_has_feature(FW_FEATURE_ISERIES))
+-		return 0;
+-	else {
+-		struct pci_controller *hose = pci_bus_to_host(bus);
+-		return hose->buid != 0;
+-	}
+-}
+-
+-void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
+-					    struct device_node *dev, int prim)
+-{
+-	const unsigned int *ranges;
+-	unsigned int pci_space;
+-	unsigned long size;
+-	int rlen = 0;
+-	int memno = 0;
+-	struct resource *res;
+-	int np, na = of_n_addr_cells(dev);
+-	unsigned long pci_addr, cpu_phys_addr;
+-
+-	np = na + 5;
+-
+-	/* From "PCI Binding to 1275"
+-	 * The ranges property is laid out as an array of elements,
+-	 * each of which comprises:
+-	 *   cells 0 - 2:	a PCI address
+-	 *   cells 3 or 3+4:	a CPU physical address
+-	 *			(size depending on dev->n_addr_cells)
+-	 *   cells 4+5 or 5+6:	the size of the range
+-	 */
+-	ranges = of_get_property(dev, "ranges", &rlen);
+-	if (ranges == NULL)
+-		return;
+-	hose->io_base_phys = 0;
+-	while ((rlen -= np * sizeof(unsigned int)) >= 0) {
+-		res = NULL;
+-		pci_space = ranges[0];
+-		pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
+-		cpu_phys_addr = of_translate_address(dev, &ranges[3]);
+-		size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
+-		ranges += np;
+-		if (size == 0)
+-			continue;
+-
+-		/* Now consume following elements while they are contiguous */
+-		while (rlen >= np * sizeof(unsigned int)) {
+-			unsigned long addr, phys;
+-
+-			if (ranges[0] != pci_space)
+-				break;
+-			addr = ((unsigned long)ranges[1] << 32) | ranges[2];
+-			phys = ranges[3];
+-			if (na >= 2)
+-				phys = (phys << 32) | ranges[4];
+-			if (addr != pci_addr + size ||
+-			    phys != cpu_phys_addr + size)
+-				break;
+-
+-			size += ((unsigned long)ranges[na+3] << 32)
+-				| ranges[na+4];
+-			ranges += np;
+-			rlen -= np * sizeof(unsigned int);
+-		}
+-
+-		switch ((pci_space >> 24) & 0x3) {
+-		case 1:		/* I/O space */
+-			hose->io_base_phys = cpu_phys_addr - pci_addr;
+-			/* handle from 0 to top of I/O window */
+-			hose->pci_io_size = pci_addr + size;
+-
+-			res = &hose->io_resource;
+-			res->flags = IORESOURCE_IO;
+-			res->start = pci_addr;
+-			DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
+-				    res->start, res->start + size - 1);
+-			break;
+-		case 2:		/* memory space */
+-			memno = 0;
+-			while (memno < 3 && hose->mem_resources[memno].flags)
+-				++memno;
+-
+-			if (memno == 0)
+-				hose->pci_mem_offset = cpu_phys_addr - pci_addr;
+-			if (memno < 3) {
+-				res = &hose->mem_resources[memno];
+-				res->flags = IORESOURCE_MEM;
+-				res->start = cpu_phys_addr;
+-				DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
+-					    res->start, res->start + size - 1);
+-			}
+-			break;
+-		}
+-		if (res != NULL) {
+-			res->name = dev->full_name;
+-			res->end = res->start + size - 1;
+-			res->parent = NULL;
+-			res->sibling = NULL;
+-			res->child = NULL;
+-		}
+-	}
+-}
+-
+ #ifdef CONFIG_HOTPLUG
+ 
+ int pcibios_unmap_io_space(struct pci_bus *bus)
+@@ -719,8 +466,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
+ 	if (hose->io_base_alloc == 0)
+ 		return 0;
+ 
+-	DBG("IO unmapping for PHB %s\n",
+-	    ((struct device_node *)hose->arch_data)->full_name);
++	DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
+ 	DBG("  alloc=0x%p\n", hose->io_base_alloc);
+ 
+ 	/* This is a PHB, we fully unmap the IO area */
+@@ -779,8 +525,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
+ 	hose->io_base_virt = (void __iomem *)(area->addr +
+ 					      hose->io_base_phys - phys_page);
+ 
+-	DBG("IO mapping for PHB %s\n",
+-	    ((struct device_node *)hose->arch_data)->full_name);
++	DBG("IO mapping for PHB %s\n", hose->dn->full_name);
+ 	DBG("  phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
+ 	    hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
+ 	DBG("  size=0x%016lx (alloc=0x%016lx)\n",
+@@ -803,51 +548,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
+ }
+ EXPORT_SYMBOL_GPL(pcibios_map_io_space);
+ 
+-static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
+-{
+-	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+-	unsigned long offset;
+-
+-	if (res->flags & IORESOURCE_IO) {
+-		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+-		res->start += offset;
+-		res->end += offset;
+-	} else if (res->flags & IORESOURCE_MEM) {
+-		res->start += hose->pci_mem_offset;
+-		res->end += hose->pci_mem_offset;
+-	}
+-}
+-
+-void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
+-					      struct pci_bus *bus)
+-{
+-	/* Update device resources.  */
+-	int i;
+-
+-	DBG("%s: Fixup resources:\n", pci_name(dev));
+-	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+-		struct resource *res = &dev->resource[i];
+-		if (!res->flags)
+-			continue;
+-
+-		DBG("  0x%02x < %08lx:0x%016lx...0x%016lx\n",
+-		    i, res->flags, res->start, res->end);
+-
+-		fixup_resource(res, dev);
+-
+-		DBG("       > %08lx:0x%016lx...0x%016lx\n",
+-		    res->flags, res->start, res->end);
+-	}
+-}
+-EXPORT_SYMBOL(pcibios_fixup_device_resources);
+-
+ void __devinit pcibios_setup_new_device(struct pci_dev *dev)
+ {
+ 	struct dev_archdata *sd = &dev->dev.archdata;
+ 
+ 	sd->of_node = pci_device_to_OF_node(dev);
+ 
+-	DBG("PCI device %s OF node: %s\n", pci_name(dev),
++	DBG("PCI: device %s OF node: %s\n", pci_name(dev),
+ 	    sd->of_node ? sd->of_node->full_name : "<none>");
+ 
+ 	sd->dma_ops = pci_dma_ops;
+@@ -861,7 +568,7 @@ void __devinit pcibios_setup_new_device(struct pci_dev *dev)
+ }
+ EXPORT_SYMBOL(pcibios_setup_new_device);
+ 
+-static void __devinit do_bus_setup(struct pci_bus *bus)
++void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
+ {
+ 	struct pci_dev *dev;
+ 
+@@ -870,42 +577,7 @@ static void __devinit do_bus_setup(struct pci_bus *bus)
+ 
+ 	list_for_each_entry(dev, &bus->devices, bus_list)
+ 		pcibios_setup_new_device(dev);
+-
+-	/* Read default IRQs and fixup if necessary */
+-	list_for_each_entry(dev, &bus->devices, bus_list) {
+-		pci_read_irq_line(dev);
+-		if (ppc_md.pci_irq_fixup)
+-			ppc_md.pci_irq_fixup(dev);
+-	}
+-}
+-
+-void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+-{
+-	struct pci_dev *dev = bus->self;
+-	struct device_node *np;
+-
+-	np = pci_bus_to_OF_node(bus);
+-
+-	DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
+-
+-	if (dev && pci_probe_only &&
+-	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+-		/* This is a subordinate bridge */
+-
+-		pci_read_bridge_bases(bus);
+-		pcibios_fixup_device_resources(dev, bus);
+-	}
+-
+-	do_bus_setup(bus);
+-
+-	if (!pci_probe_only)
+-		return;
+-
+-	list_for_each_entry(dev, &bus->devices, bus_list)
+-		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+-			pcibios_fixup_device_resources(dev, bus);
+ }
+-EXPORT_SYMBOL(pcibios_fixup_bus);
+ 
+ unsigned long pci_address_to_pio(phys_addr_t address)
+ {
+diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
+index b483903..1c67de5 100644
+--- a/arch/powerpc/kernel/pci_dn.c
++++ b/arch/powerpc/kernel/pci_dn.c
+@@ -56,11 +56,6 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
+ 		pdn->busno = (regs[0] >> 16) & 0xff;
+ 		pdn->devfn = (regs[0] >> 8) & 0xff;
+ 	}
+-	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+-		const u32 *busp = of_get_property(dn, "linux,subbus", NULL);
+-		if (busp)
+-			pdn->bussubno = *busp;
+-	}
+ 
+ 	pdn->pci_ext_config_space = (type && *type == 1);
+ 	return NULL;
+@@ -133,7 +128,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
+  */
+ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
+ {
+-	struct device_node * dn = (struct device_node *) phb->arch_data;
++	struct device_node *dn = phb->dn;
+ 	struct pci_dn *pdn;
+ 
+ 	/* PHB nodes themselves must not match */
+diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
+index 13ebeb2..aa9ff35 100644
+--- a/arch/powerpc/kernel/ppc_ksyms.c
++++ b/arch/powerpc/kernel/ppc_ksyms.c
+@@ -59,6 +59,7 @@ extern void single_step_exception(struct pt_regs *regs);
+ extern int sys_sigreturn(struct pt_regs *regs);
+ 
+ EXPORT_SYMBOL(clear_pages);
++EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
+ EXPORT_SYMBOL(DMA_MODE_READ);
+ EXPORT_SYMBOL(DMA_MODE_WRITE);
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index acc0d24..8b5efbc 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features(unsigned long node)
+ 		      ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
+ }
+ 
++#ifdef CONFIG_PPC64
++static void __init check_cpu_slb_size(unsigned long node)
++{
++	u32 *slb_size_ptr;
++
++	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
++	if (slb_size_ptr != NULL) {
++		mmu_slb_size = *slb_size_ptr;
++	}
++}
++#else
++#define check_cpu_slb_size(node) do { } while(0)
++#endif
++
+ static struct feature_property {
+ 	const char *name;
+ 	u32 min_value;
+@@ -600,6 +614,29 @@ static struct feature_property {
+ #endif /* CONFIG_PPC64 */
+ };
+ 
++#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
++static inline void identical_pvr_fixup(unsigned long node)
++{
++	unsigned int pvr;
++	char *model = of_get_flat_dt_prop(node, "model", NULL);
++
++	/*
++	 * Since 440GR(x)/440EP(x) processors have the same pvr,
++	 * we check the node path and set bit 28 in the cur_cpu_spec
++	 * pvr for EP(x) processor version. This bit is always 0 in
++	 * the "real" pvr. Then we call identify_cpu again with
++	 * the new logical pvr to enable FPU support.
++	 */
++	if (model && strstr(model, "440EP")) {
++		pvr = cur_cpu_spec->pvr_value | 0x8;
++		identify_cpu(0, pvr);
++		DBG("Using logical pvr %x for %s\n", pvr, model);
++	}
++}
++#else
++#define identical_pvr_fixup(node) do { } while(0)
++#endif
++
+ static void __init check_cpu_feature_properties(unsigned long node)
+ {
+ 	unsigned long i;
+@@ -697,22 +734,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ 		prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+ 		if (prop && (*prop & 0xff000000) == 0x0f000000)
+ 			identify_cpu(0, *prop);
+-#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
+-		/*
+-		 * Since 440GR(x)/440EP(x) processors have the same pvr,
+-		 * we check the node path and set bit 28 in the cur_cpu_spec
+-		 * pvr for EP(x) processor version. This bit is always 0 in
+-		 * the "real" pvr. Then we call identify_cpu again with
+-		 * the new logical pvr to enable FPU support.
+-		 */
+-		if (strstr(uname, "440EP")) {
+-			identify_cpu(0, cur_cpu_spec->pvr_value | 0x8);
+-		}
+-#endif
++
++		identical_pvr_fixup(node);
+ 	}
+ 
+ 	check_cpu_feature_properties(node);
+ 	check_cpu_pa_features(node);
++	check_cpu_slb_size(node);
+ 
+ #ifdef CONFIG_PPC_PSERIES
+ 	if (nthreads > 1)
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index 5d89a21..5ab4c84 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -2142,82 +2142,34 @@ static void __init fixup_device_tree_pmac(void)
+ #endif
+ 
+ #ifdef CONFIG_PPC_EFIKA
+-/* The current fw of the Efika has a device tree needs quite a few
+- * fixups to be compliant with the mpc52xx bindings. It's currently
+- * unknown if it will ever be compliant (come on bPlan ...) so we do fixups.
+- * NOTE that we (barely) tolerate it because the EFIKA was out before
+- * the bindings were finished, for any new boards -> RTFM ! */
+-
+-struct subst_entry {
+-	char *path;
+-	char *property;
+-	void *value;
+-	int value_len;
+-};
+-
+-static void __init fixup_device_tree_efika(void)
++/*
++ * The MPC5200 FEC driver requires an phy-handle property to tell it how
++ * to talk to the phy.  If the phy-handle property is missing, then this
++ * function is called to add the appropriate nodes and link it to the
++ * ethernet node.
++ */
++static void __init fixup_device_tree_efika_add_phy(void)
+ {
+-	/* Substitution table */
+-	#define prop_cstr(x) x, sizeof(x)
+-	int prop_sound_irq[3] = { 2, 2, 0 };
+-	int prop_bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
+-	                             3,4,0, 3,5,0, 3,6,0, 3,7,0,
+-	                             3,8,0, 3,9,0, 3,10,0, 3,11,0,
+-	                             3,12,0, 3,13,0, 3,14,0, 3,15,0 };
+-	struct subst_entry efika_subst_table[] = {
+-		{ "/",			"device_type",	prop_cstr("efika") },
+-		{ "/builtin",		"device_type",	prop_cstr("soc") },
+-		{ "/builtin/ata",	"compatible",	prop_cstr("mpc5200b-ata\0mpc5200-ata"), },
+-		{ "/builtin/bestcomm",	"compatible",	prop_cstr("mpc5200b-bestcomm\0mpc5200-bestcomm") },
+-		{ "/builtin/bestcomm",	"interrupts",	prop_bcomm_irq, sizeof(prop_bcomm_irq) },
+-		{ "/builtin/ethernet",	"compatible",	prop_cstr("mpc5200b-fec\0mpc5200-fec") },
+-		{ "/builtin/pic",	"compatible",	prop_cstr("mpc5200b-pic\0mpc5200-pic") },
+-		{ "/builtin/serial",	"compatible",	prop_cstr("mpc5200b-psc-uart\0mpc5200-psc-uart") },
+-		{ "/builtin/sound",	"compatible",	prop_cstr("mpc5200b-psc-ac97\0mpc5200-psc-ac97") },
+-		{ "/builtin/sound",	"interrupts",	prop_sound_irq, sizeof(prop_sound_irq) },
+-		{ "/builtin/sram",	"compatible",	prop_cstr("mpc5200b-sram\0mpc5200-sram") },
+-		{ "/builtin/sram",	"device_type",	prop_cstr("sram") },
+-		{}
+-	};
+-	#undef prop_cstr
+-
+-	/* Vars */
+ 	u32 node;
+ 	char prop[64];
+-	int rv, i;
++	int rv;
+ 
+-	/* Check if we're really running on a EFIKA */
+-	node = call_prom("finddevice", 1, 1, ADDR("/"));
++	/* Check if /builtin/ethernet exists - bail if it doesn't */
++	node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
+ 	if (!PHANDLE_VALID(node))
+ 		return;
+ 
+-	rv = prom_getprop(node, "model", prop, sizeof(prop));
+-	if (rv == PROM_ERROR)
+-		return;
+-	if (strcmp(prop, "EFIKA5K2"))
++	/* Check if the phy-handle property exists - bail if it does */
++	rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
++	if (!rv)
+ 		return;
+ 
+-	prom_printf("Applying EFIKA device tree fixups\n");
+-
+-	/* Process substitution table */
+-	for (i=0; efika_subst_table[i].path; i++) {
+-		struct subst_entry *se = &efika_subst_table[i];
+-
+-		node = call_prom("finddevice", 1, 1, ADDR(se->path));
+-		if (!PHANDLE_VALID(node)) {
+-			prom_printf("fixup_device_tree_efika: ",
+-				"skipped entry %x - not found\n", i);
+-			continue;
+-		}
+-
+-		rv = prom_setprop(node, se->path, se->property,
+-					se->value, se->value_len );
+-		if (rv == PROM_ERROR)
+-			prom_printf("fixup_device_tree_efika: ",
+-				"skipped entry %x - setprop error\n", i);
+-	}
++	/*
++	 * At this point the ethernet device doesn't have a phy described.
++	 * Now we need to add the missing phy node and linkage
++	 */
+ 
+-	/* Make sure ethernet mdio bus node exists */
++	/* Check for an MDIO bus node - if missing then create one */
+ 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
+ 	if (!PHANDLE_VALID(node)) {
+ 		prom_printf("Adding Ethernet MDIO node\n");
+@@ -2226,8 +2178,8 @@ static void __init fixup_device_tree_efika(void)
+ 			" new-device"
+ 				" 1 encode-int s\" #address-cells\" property"
+ 				" 0 encode-int s\" #size-cells\" property"
+-				" s\" mdio\" 2dup device-name device-type"
+-				" s\" mpc5200b-fec-phy\" encode-string"
++				" s\" mdio\" device-name"
++				" s\" fsl,mpc5200b-mdio\" encode-string"
+ 				" s\" compatible\" property"
+ 				" 0xf0003000 0x400 reg"
+ 				" 0x2 encode-int"
+@@ -2237,8 +2189,10 @@ static void __init fixup_device_tree_efika(void)
+ 			" finish-device");
+ 	};
+ 
+-	/* Make sure ethernet phy device node exist */
+-	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio/ethernet-phy"));
++	/* Check for a PHY device node - if missing then create one and
++	 * give it's phandle to the ethernet node */
++	node = call_prom("finddevice", 1, 1,
++			 ADDR("/builtin/mdio/ethernet-phy"));
+ 	if (!PHANDLE_VALID(node)) {
+ 		prom_printf("Adding Ethernet PHY node\n");
+ 		call_prom("interpret", 1, 1,
+@@ -2254,7 +2208,62 @@ static void __init fixup_device_tree_efika(void)
+ 				" s\" phy-handle\" property"
+ 			" device-end");
+ 	}
++}
++
++static void __init fixup_device_tree_efika(void)
++{
++	int sound_irq[3] = { 2, 2, 0 };
++	int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
++				3,4,0, 3,5,0, 3,6,0, 3,7,0,
++				3,8,0, 3,9,0, 3,10,0, 3,11,0,
++				3,12,0, 3,13,0, 3,14,0, 3,15,0 };
++	u32 node;
++	char prop[64];
++	int rv, len;
++
++	/* Check if we're really running on a EFIKA */
++	node = call_prom("finddevice", 1, 1, ADDR("/"));
++	if (!PHANDLE_VALID(node))
++		return;
++
++	rv = prom_getprop(node, "model", prop, sizeof(prop));
++	if (rv == PROM_ERROR)
++		return;
++	if (strcmp(prop, "EFIKA5K2"))
++		return;
++
++	prom_printf("Applying EFIKA device tree fixups\n");
++
++	/* Claiming to be 'chrp' is death */
++	node = call_prom("finddevice", 1, 1, ADDR("/"));
++	rv = prom_getprop(node, "device_type", prop, sizeof(prop));
++	if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
++		prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
++
++	/* Fixup bestcomm interrupts property */
++	node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
++	if (PHANDLE_VALID(node)) {
++		len = prom_getproplen(node, "interrupts");
++		if (len == 12) {
++			prom_printf("Fixing bestcomm interrupts property\n");
++			prom_setprop(node, "/builtin/bestcom", "interrupts",
++				     bcomm_irq, sizeof(bcomm_irq));
++		}
++	}
++
++	/* Fixup sound interrupts property */
++	node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
++	if (PHANDLE_VALID(node)) {
++		rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
++		if (rv == PROM_ERROR) {
++			prom_printf("Adding sound interrupts property\n");
++			prom_setprop(node, "/builtin/sound", "interrupts",
++				     sound_irq, sizeof(sound_irq));
++		}
++	}
+ 
++	/* Make sure ethernet phy-handle property exists */
++	fixup_device_tree_efika_add_phy();
+ }
+ #else
+ #define fixup_device_tree_efika()
+diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
+index b5c96af..90eb3a3 100644
+--- a/arch/powerpc/kernel/prom_parse.c
++++ b/arch/powerpc/kernel/prom_parse.c
+@@ -273,7 +273,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+ #else
+ 			struct pci_controller *host;
+ 			host = pci_bus_to_host(pdev->bus);
+-			ppnode = host ? host->arch_data : NULL;
++			ppnode = host ? host->dn : NULL;
+ #endif
+ 			/* No node for host bridge ? give up */
+ 			if (ppnode == NULL)
+@@ -419,7 +419,7 @@ static struct of_bus *of_match_bus(struct device_node *np)
+ 
+ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ 			    struct of_bus *pbus, u32 *addr,
+-			    int na, int ns, int pna)
++			    int na, int ns, int pna, const char *rprop)
+ {
+ 	const u32 *ranges;
+ 	unsigned int rlen;
+@@ -438,7 +438,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ 	 * to translate addresses that aren't supposed to be translated in
+ 	 * the first place. --BenH.
+ 	 */
+-	ranges = of_get_property(parent, "ranges", &rlen);
++	ranges = of_get_property(parent, rprop, &rlen);
+ 	if (ranges == NULL || rlen == 0) {
+ 		offset = of_read_number(addr, na);
+ 		memset(addr, 0, pna * 4);
+@@ -481,7 +481,8 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+  * that can be mapped to a cpu physical address). This is not really specified
+  * that way, but this is traditionally the way IBM at least do things
+  */
+-u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
++u64 __of_translate_address(struct device_node *dev, const u32 *in_addr,
++			   const char *rprop)
+ {
+ 	struct device_node *parent = NULL;
+ 	struct of_bus *bus, *pbus;
+@@ -540,7 +541,7 @@ u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
+ 		    pbus->name, pna, pns, parent->full_name);
+ 
+ 		/* Apply bus translation */
+-		if (of_translate_one(dev, bus, pbus, addr, na, ns, pna))
++		if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
+ 			break;
+ 
+ 		/* Complete the move up one level */
+@@ -556,8 +557,19 @@ u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
+ 
+ 	return result;
+ }
++
++u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
++{
++	return __of_translate_address(dev, in_addr, "ranges");
++}
+ EXPORT_SYMBOL(of_translate_address);
+ 
++u64 of_translate_dma_address(struct device_node *dev, const u32 *in_addr)
++{
++	return __of_translate_address(dev, in_addr, "dma-ranges");
++}
++EXPORT_SYMBOL(of_translate_dma_address);
++
+ const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
+ 		    unsigned int *flags)
+ {
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 3e17d15..8b056d2 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -256,7 +256,7 @@ static int set_evrregs(struct task_struct *task, unsigned long *data)
+ #endif /* CONFIG_SPE */
+ 
+ 
+-static void set_single_step(struct task_struct *task)
++void user_enable_single_step(struct task_struct *task)
+ {
+ 	struct pt_regs *regs = task->thread.regs;
+ 
+@@ -271,7 +271,7 @@ static void set_single_step(struct task_struct *task)
+ 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
+ }
+ 
+-static void clear_single_step(struct task_struct *task)
++void user_disable_single_step(struct task_struct *task)
+ {
+ 	struct pt_regs *regs = task->thread.regs;
+ 
+@@ -313,7 +313,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
+ void ptrace_disable(struct task_struct *child)
+ {
+ 	/* make sure the single step bit is not set. */
+-	clear_single_step(child);
++	user_disable_single_step(child);
+ }
+ 
+ /*
+@@ -445,52 +445,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+ 		break;
+ 	}
+ 
+-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+-	case PTRACE_CONT: { /* restart after signal. */
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		if (request == PTRACE_SYSCALL)
+-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		else
+-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		child->exit_code = data;
+-		/* make sure the single step bit is not set. */
+-		clear_single_step(child);
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
+-	}
+-
+-/*
+- * make the child exit.  Best I can do is send it a sigkill.
+- * perhaps it should be put in the status that it wants to
+- * exit.
+- */
+-	case PTRACE_KILL: {
+-		ret = 0;
+-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
+-			break;
+-		child->exit_code = SIGKILL;
+-		/* make sure the single step bit is not set. */
+-		clear_single_step(child);
+-		wake_up_process(child);
+-		break;
+-	}
+-
+-	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		set_single_step(child);
+-		child->exit_code = data;
+-		/* give it a chance to run. */
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
+-	}
+-
+ 	case PTRACE_GET_DEBUGREG: {
+ 		ret = -EINVAL;
+ 		/* We only support one DABR and no IABRS at the moment */
+diff --git a/arch/powerpc/kernel/rio.c b/arch/powerpc/kernel/rio.c
+new file mode 100644
+index 0000000..29487fe
+--- /dev/null
++++ b/arch/powerpc/kernel/rio.c
+@@ -0,0 +1,52 @@
++/*
++ * RapidIO PPC32 support
++ *
++ * Copyright 2005 MontaVista Software, Inc.
++ * Matt Porter <mporter at kernel.crashing.org>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/rio.h>
++
++#include <asm/rio.h>
++
++/**
++ * platform_rio_init - Do platform specific RIO init
++ *
++ * Any platform specific initialization of RapdIO
++ * hardware is done here as well as registration
++ * of any active master ports in the system.
++ */
++void __attribute__ ((weak))
++    platform_rio_init(void)
++{
++	printk(KERN_WARNING "RIO: No platform_rio_init() present\n");
++}
++
++/**
++ * ppc_rio_init - Do PPC32 RIO init
++ *
++ * Calls platform-specific RIO init code and then calls
++ * rio_init_mports() to initialize any master ports that
++ * have been registered with the RIO subsystem.
++ */
++static int __init ppc_rio_init(void)
++{
++	printk(KERN_INFO "RIO: RapidIO init\n");
++
++	/* Platform specific initialization */
++	platform_rio_init();
++
++	/* Enumerate all registered ports */
++	rio_init_mports();
++
++	return 0;
++}
++
++subsys_initcall(ppc_rio_init);
+diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
+index 21f14e5..433a0a0 100644
+--- a/arch/powerpc/kernel/rtas_pci.c
++++ b/arch/powerpc/kernel/rtas_pci.c
+@@ -260,7 +260,7 @@ static int phb_set_bus_ranges(struct device_node *dev,
+ 
+ int __devinit rtas_setup_phb(struct pci_controller *phb)
+ {
+-	struct device_node *dev = phb->arch_data;
++	struct device_node *dev = phb->dn;
+ 
+ 	if (is_python(dev))
+ 		python_countermeasures(dev);
+@@ -280,10 +280,7 @@ void __init find_and_init_phbs(void)
+ 	struct pci_controller *phb;
+ 	struct device_node *root = of_find_node_by_path("/");
+ 
+-	for (node = of_get_next_child(root, NULL);
+-	     node != NULL;
+-	     node = of_get_next_child(root, node)) {
+-
++	for_each_child_of_node(root, node) {
+ 		if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
+ 					   strcmp(node->type, "pciex") != 0))
+ 			continue;
+@@ -311,10 +308,12 @@ void __init find_and_init_phbs(void)
+ 		if (prop)
+ 			pci_probe_only = *prop;
+ 
++#ifdef CONFIG_PPC32 /* Will be made generic soon */
+ 		prop = of_get_property(of_chosen,
+ 				"linux,pci-assign-all-buses", NULL);
+-		if (prop)
+-			pci_assign_all_buses = *prop;
++		if (prop && *prop)
++			ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
++#endif /* CONFIG_PPC32 */
+ 	}
+ }
+ 
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 2de00f8..6adb5a1 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -33,6 +33,7 @@
+ #include <linux/serial.h>
+ #include <linux/serial_8250.h>
+ #include <linux/debugfs.h>
++#include <linux/percpu.h>
+ #include <asm/io.h>
+ #include <asm/prom.h>
+ #include <asm/processor.h>
+@@ -57,6 +58,7 @@
+ #include <asm/mmu.h>
+ #include <asm/lmb.h>
+ #include <asm/xmon.h>
++#include <asm/cputhreads.h>
+ 
+ #include "setup.h"
+ 
+@@ -327,6 +329,31 @@ void __init check_for_initrd(void)
+ 
+ #ifdef CONFIG_SMP
+ 
++int threads_per_core, threads_shift;
++cpumask_t threads_core_mask;
++
++static void __init cpu_init_thread_core_maps(int tpc)
++{
++	int i;
++
++	threads_per_core = tpc;
++	threads_core_mask = CPU_MASK_NONE;
++
++	/* This implementation only supports power of 2 number of threads
++	 * for simplicity and performance
++	 */
++	threads_shift = ilog2(tpc);
++	BUG_ON(tpc != (1 << threads_shift));
++
++	for (i = 0; i < tpc; i++)
++		cpu_set(i, threads_core_mask);
++
++	printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
++	       tpc, tpc > 1 ? "s" : "");
++	printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
++}
++
++
+ /**
+  * setup_cpu_maps - initialize the following cpu maps:
+  *                  cpu_possible_map
+@@ -350,22 +377,32 @@ void __init smp_setup_cpu_maps(void)
+ {
+ 	struct device_node *dn = NULL;
+ 	int cpu = 0;
++	int nthreads = 1;
++
++	DBG("smp_setup_cpu_maps()\n");
+ 
+ 	while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
+ 		const int *intserv;
+-		int j, len = sizeof(u32), nthreads = 1;
++		int j, len;
++
++		DBG("  * %s...\n", dn->full_name);
+ 
+ 		intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
+ 				&len);
+-		if (intserv)
++		if (intserv) {
+ 			nthreads = len / sizeof(int);
+-		else {
++			DBG("    ibm,ppc-interrupt-server#s -> %d threads\n",
++			    nthreads);
++		} else {
++			DBG("    no ibm,ppc-interrupt-server#s -> 1 thread\n");
+ 			intserv = of_get_property(dn, "reg", NULL);
+ 			if (!intserv)
+ 				intserv = &cpu;	/* assume logical == phys */
+ 		}
+ 
+ 		for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
++			DBG("    thread %d -> cpu %d (hard id %d)\n",
++			    j, cpu, intserv[j]);
+ 			cpu_set(cpu, cpu_present_map);
+ 			set_hard_smp_processor_id(cpu, intserv[j]);
+ 			cpu_set(cpu, cpu_possible_map);
+@@ -373,6 +410,12 @@ void __init smp_setup_cpu_maps(void)
+ 		}
+ 	}
+ 
++	/* If no SMT supported, nthreads is forced to 1 */
++	if (!cpu_has_feature(CPU_FTR_SMT)) {
++		DBG("  SMT disabled ! nthreads forced to 1\n");
++		nthreads = 1;
++	}
++
+ #ifdef CONFIG_PPC64
+ 	/*
+ 	 * On pSeries LPAR, we need to know how many cpus
+@@ -395,7 +438,7 @@ void __init smp_setup_cpu_maps(void)
+ 
+ 		/* Double maxcpus for processors which have SMT capability */
+ 		if (cpu_has_feature(CPU_FTR_SMT))
+-			maxcpus *= 2;
++			maxcpus *= nthreads;
+ 
+ 		if (maxcpus > NR_CPUS) {
+ 			printk(KERN_WARNING
+@@ -412,9 +455,16 @@ void __init smp_setup_cpu_maps(void)
+ 	out:
+ 		of_node_put(dn);
+ 	}
+-
+ 	vdso_data->processorCount = num_present_cpus();
+ #endif /* CONFIG_PPC64 */
++
++        /* Initialize CPU <=> thread mapping/
++	 *
++	 * WARNING: We assume that the number of threads is the same for
++	 * every CPU in the system. If that is not the case, then some code
++	 * here will have to be reworked
++	 */
++	cpu_init_thread_core_maps(nthreads);
+ }
+ 
+ /*
+@@ -424,17 +474,19 @@ void __init smp_setup_cpu_maps(void)
+  */
+ void __init smp_setup_cpu_sibling_map(void)
+ {
+-#if defined(CONFIG_PPC64)
+-	int cpu;
++#ifdef CONFIG_PPC64
++	int i, cpu, base;
+ 
+-	/*
+-	 * Do the sibling map; assume only two threads per processor.
+-	 */
+ 	for_each_possible_cpu(cpu) {
+-		cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
+-		if (cpu_has_feature(CPU_FTR_SMT))
+-			cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu));
++		DBG("Sibling map for CPU %d:", cpu);
++		base = cpu_first_thread_in_core(cpu);
++		for (i = 0; i < threads_per_core; i++) {
++			cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
++			DBG(" %d", base + i);
++		}
++		DBG("\n");
+ 	}
++
+ #endif /* CONFIG_PPC64 */
+ }
+ #endif /* CONFIG_SMP */
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 6126bca..d840bc7 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -24,13 +24,12 @@
+ #include <linux/signal.h>
+ #include <linux/errno.h>
+ #include <linux/elf.h>
++#include <linux/ptrace.h>
+ #ifdef CONFIG_PPC64
+ #include <linux/syscalls.h>
+ #include <linux/compat.h>
+-#include <linux/ptrace.h>
+ #else
+ #include <linux/wait.h>
+-#include <linux/ptrace.h>
+ #include <linux/unistd.h>
+ #include <linux/stddef.h>
+ #include <linux/tty.h>
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 338950a..be35ffa 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -76,6 +76,8 @@ void smp_call_function_interrupt(void);
+ 
+ int smt_enabled_at_boot = 1;
+ 
++static int ipi_fail_ok;
++
+ static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
+ 
+ #ifdef CONFIG_PPC64
+@@ -181,12 +183,13 @@ static struct call_data_struct {
+  * <wait> If true, wait (atomically) until function has completed on other CPUs.
+  * [RETURNS] 0 on success, else a negative status code. Does not return until
+  * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ * <map> is a cpu map of the cpus to send IPI to.
+  *
+  * You must not call this function with disabled interrupts or from a
+  * hardware interrupt handler or from a bottom half handler.
+  */
+-int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
+-			int wait, cpumask_t map)
++static int __smp_call_function_map(void (*func) (void *info), void *info,
++				   int nonatomic, int wait, cpumask_t map)
+ {
+ 	struct call_data_struct data;
+ 	int ret = -1, num_cpus;
+@@ -203,8 +206,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
+ 	if (wait)
+ 		atomic_set(&data.finished, 0);
+ 
+-	spin_lock(&call_lock);
+-
+ 	/* remove 'self' from the map */
+ 	if (cpu_isset(smp_processor_id(), map))
+ 		cpu_clear(smp_processor_id(), map);
+@@ -231,7 +232,8 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
+ 			printk("smp_call_function on cpu %d: other cpus not "
+ 				"responding (%d)\n", smp_processor_id(),
+ 				atomic_read(&data.started));
+-			debugger(NULL);
++			if (!ipi_fail_ok)
++				debugger(NULL);
+ 			goto out;
+ 		}
+ 	}
+@@ -258,14 +260,18 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
+  out:
+ 	call_data = NULL;
+ 	HMT_medium();
+-	spin_unlock(&call_lock);
+ 	return ret;
+ }
+ 
+ static int __smp_call_function(void (*func)(void *info), void *info,
+ 			       int nonatomic, int wait)
+ {
+-	return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
++	int ret;
++	spin_lock(&call_lock);
++	ret =__smp_call_function_map(func, info, nonatomic, wait,
++				       cpu_online_map);
++	spin_unlock(&call_lock);
++	return ret;
+ }
+ 
+ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+@@ -278,8 +284,8 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ }
+ EXPORT_SYMBOL(smp_call_function);
+ 
+-int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic,
+-			int wait)
++int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
++			     int nonatomic, int wait)
+ {
+ 	cpumask_t map = CPU_MASK_NONE;
+ 	int ret = 0;
+@@ -291,9 +297,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int
+ 		return -EINVAL;
+ 
+ 	cpu_set(cpu, map);
+-	if (cpu != get_cpu())
+-		ret = smp_call_function_map(func,info,nonatomic,wait,map);
+-	else {
++	if (cpu != get_cpu()) {
++		spin_lock(&call_lock);
++		ret = __smp_call_function_map(func, info, nonatomic, wait, map);
++		spin_unlock(&call_lock);
++	} else {
+ 		local_irq_disable();
+ 		func(info);
+ 		local_irq_enable();
+@@ -305,7 +313,22 @@ EXPORT_SYMBOL(smp_call_function_single);
+ 
+ void smp_send_stop(void)
+ {
+-	__smp_call_function(stop_this_cpu, NULL, 1, 0);
++	int nolock;
++
++	/* It's OK to fail sending the IPI, since the alternative is to
++	 * be stuck forever waiting on the other CPU to take the interrupt.
++	 *
++	 * It's better to at least continue and go through reboot, since this
++	 * function is usually called at panic or reboot time in the first
++	 * place.
++	 */
++	ipi_fail_ok = 1;
++
++	/* Don't deadlock in case we got called through panic */
++	nolock = !spin_trylock(&call_lock);
++	__smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
++	if (!nolock)
++		spin_unlock(&call_lock);
+ }
+ 
+ void smp_call_function_interrupt(void)
+diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
+index 25d9a96..c8127f8 100644
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -158,7 +158,7 @@ static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
+ 	unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
+ 	return sprintf(buf, "%lx\n", val); \
+ } \
+-static ssize_t __attribute_used__ \
++static ssize_t __used \
+ 	store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
+ { \
+ 	struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
+diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
+new file mode 100644
+index 0000000..238aa63
+--- /dev/null
++++ b/arch/powerpc/kernel/systbl_chk.c
+@@ -0,0 +1,58 @@
++/*
++ * This file, when run through CPP produces a list of syscall numbers
++ * in the order of systbl.h.  That way we can check for gaps and syscalls
++ * that are out of order.
++ *
++ * Unfortunately, we cannot check for the correct ordering of entries
++ * using SYSX().
++ *
++ * Copyright © IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++#include <asm/unistd.h>
++
++#define SYSCALL(func)		__NR_##func
++#define COMPAT_SYS(func)	__NR_##func
++#define PPC_SYS(func)		__NR_##func
++#ifdef CONFIG_PPC64
++#define OLDSYS(func)		-1
++#define SYS32ONLY(func)		-1
++#else
++#define OLDSYS(func)		__NR_old##func
++#define SYS32ONLY(func)		__NR_##func
++#endif
++#define SYSX(f, f3264, f32)	-1
++
++#define SYSCALL_SPU(func)	SYSCALL(func)
++#define COMPAT_SYS_SPU(func)	COMPAT_SYS(func)
++#define PPC_SYS_SPU(func)	PPC_SYS(func)
++#define SYSX_SPU(f, f3264, f32)	SYSX(f, f3264, f32)
++
++/* Just insert a marker for ni_syscalls */
++#define	__NR_ni_syscall		-1
++
++/*
++ * These are the known exceptions.
++ * Hopefully, there will be no more.
++ */
++#define	__NR_llseek		__NR__llseek
++#undef	__NR_umount
++#define	__NR_umount		__NR_umount2
++#define	__NR_old_getrlimit	__NR_getrlimit
++#define	__NR_newstat		__NR_stat
++#define	__NR_newlstat		__NR_lstat
++#define	__NR_newfstat		__NR_fstat
++#define	__NR_newuname		__NR_uname
++#define	__NR_sysctl		__NR__sysctl
++#define __NR_olddebug_setcontext	__NR_sys_debug_setcontext
++
++/* We call sys_ugetrlimit for syscall number __NR_getrlimit */
++#define getrlimit		ugetrlimit
++
++START_TABLE
++#include <asm/systbl.h>
++END_TABLE __NR_syscalls
+diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh
+new file mode 100644
+index 0000000..19415e7
+--- /dev/null
++++ b/arch/powerpc/kernel/systbl_chk.sh
+@@ -0,0 +1,33 @@
++#!/bin/sh
++#
++# Just process the CPP output from systbl_chk.c and complain
++# if anything is out of order.
++#
++# Copyright © 2008 IBM Corporation
++#
++# This program is free software; you can redistribute it and/or
++# modify it under the terms of the GNU General Public License
++# as published by the Free Software Foundation; either version
++# 2 of the License, or (at your option) any later version.
++
++awk	'BEGIN { num = -1; }	# Ignore the beginning of the file
++	/^#/ { next; }
++	/^[ \t]*$/ { next; }
++	/^START_TABLE/ { num = 0; next; }
++	/^END_TABLE/ {
++		if (num != $2) {
++			printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n",
++				$2, num - 1;
++			exit(1);
++		}
++		num = -1;	# Ignore the rest of the file
++	}
++	{
++		if (num == -1) next;
++		if (($1 != -1) && ($1 != num)) {
++			printf "Syscall %s out of order (expected %s)\n",
++				$1, num;
++			exit(1);
++		};
++		num++;
++	}' "$1"
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index a925a8e..5cd3db5 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -116,9 +116,12 @@ static struct clock_event_device decrementer_clockevent = {
+        .features       = CLOCK_EVT_FEAT_ONESHOT,
+ };
+ 
+-static DEFINE_PER_CPU(struct clock_event_device, decrementers);
+-void init_decrementer_clockevent(void);
+-static DEFINE_PER_CPU(u64, decrementer_next_tb);
++struct decrementer_clock {
++	struct clock_event_device event;
++	u64 next_tb;
++};
++
++static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
+ 
+ #ifdef CONFIG_PPC_ISERIES
+ static unsigned long __initdata iSeries_recal_titan;
+@@ -216,7 +219,11 @@ static u64 read_purr(void)
+  */
+ static u64 read_spurr(u64 purr)
+ {
+-	if (cpu_has_feature(CPU_FTR_SPURR))
++	/*
++	 * cpus without PURR won't have a SPURR
++	 * We already know the former when we use this, so tell gcc
++	 */
++	if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
+ 		return mfspr(SPRN_SPURR);
+ 	return purr;
+ }
+@@ -227,29 +234,30 @@ static u64 read_spurr(u64 purr)
+  */
+ void account_system_vtime(struct task_struct *tsk)
+ {
+-	u64 now, nowscaled, delta, deltascaled;
++	u64 now, nowscaled, delta, deltascaled, sys_time;
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+ 	now = read_purr();
+-	delta = now - get_paca()->startpurr;
+-	get_paca()->startpurr = now;
+ 	nowscaled = read_spurr(now);
++	delta = now - get_paca()->startpurr;
+ 	deltascaled = nowscaled - get_paca()->startspurr;
++	get_paca()->startpurr = now;
+ 	get_paca()->startspurr = nowscaled;
+ 	if (!in_interrupt()) {
+ 		/* deltascaled includes both user and system time.
+ 		 * Hence scale it based on the purr ratio to estimate
+ 		 * the system time */
++		sys_time = get_paca()->system_time;
+ 		if (get_paca()->user_time)
+-			deltascaled = deltascaled * get_paca()->system_time /
+-			     (get_paca()->system_time + get_paca()->user_time);
+-		delta += get_paca()->system_time;
++			deltascaled = deltascaled * sys_time /
++			     (sys_time + get_paca()->user_time);
++		delta += sys_time;
+ 		get_paca()->system_time = 0;
+ 	}
+ 	account_system_time(tsk, 0, delta);
+-	get_paca()->purrdelta = delta;
+ 	account_system_time_scaled(tsk, deltascaled);
++	get_paca()->purrdelta = delta;
+ 	get_paca()->spurrdelta = deltascaled;
+ 	local_irq_restore(flags);
+ }
+@@ -326,11 +334,9 @@ void calculate_steal_time(void)
+ 	s64 stolen;
+ 	struct cpu_purr_data *pme;
+ 
+-	if (!cpu_has_feature(CPU_FTR_PURR))
+-		return;
+-	pme = &per_cpu(cpu_purr_data, smp_processor_id());
++	pme = &__get_cpu_var(cpu_purr_data);
+ 	if (!pme->initialized)
+-		return;		/* this can happen in early boot */
++		return;		/* !CPU_FTR_PURR or early in early boot */
+ 	tb = mftb();
+ 	purr = mfspr(SPRN_PURR);
+ 	stolen = (tb - pme->tb) - (purr - pme->purr);
+@@ -353,7 +359,7 @@ static void snapshot_purr(void)
+ 	if (!cpu_has_feature(CPU_FTR_PURR))
+ 		return;
+ 	local_irq_save(flags);
+-	pme = &per_cpu(cpu_purr_data, smp_processor_id());
++	pme = &__get_cpu_var(cpu_purr_data);
+ 	pme->tb = mftb();
+ 	pme->purr = mfspr(SPRN_PURR);
+ 	pme->initialized = 1;
+@@ -556,8 +562,8 @@ void __init iSeries_time_init_early(void)
+ void timer_interrupt(struct pt_regs * regs)
+ {
+ 	struct pt_regs *old_regs;
+-	int cpu = smp_processor_id();
+-	struct clock_event_device *evt = &per_cpu(decrementers, cpu);
++	struct decrementer_clock *decrementer =  &__get_cpu_var(decrementers);
++	struct clock_event_device *evt = &decrementer->event;
+ 	u64 now;
+ 
+ 	/* Ensure a positive value is written to the decrementer, or else
+@@ -570,9 +576,9 @@ void timer_interrupt(struct pt_regs * regs)
+ #endif
+ 
+ 	now = get_tb_or_rtc();
+-	if (now < per_cpu(decrementer_next_tb, cpu)) {
++	if (now < decrementer->next_tb) {
+ 		/* not time for this event yet */
+-		now = per_cpu(decrementer_next_tb, cpu) - now;
++		now = decrementer->next_tb - now;
+ 		if (now <= DECREMENTER_MAX)
+ 			set_dec((int)now);
+ 		return;
+@@ -623,6 +629,45 @@ void wakeup_decrementer(void)
+ 	set_dec(ticks);
+ }
+ 
++#ifdef CONFIG_SUSPEND
++void generic_suspend_disable_irqs(void)
++{
++	preempt_disable();
++
++	/* Disable the decrementer, so that it doesn't interfere
++	 * with suspending.
++	 */
++
++	set_dec(0x7fffffff);
++	local_irq_disable();
++	set_dec(0x7fffffff);
++}
++
++void generic_suspend_enable_irqs(void)
++{
++	wakeup_decrementer();
++
++	local_irq_enable();
++	preempt_enable();
++}
++
++/* Overrides the weak version in kernel/power/main.c */
++void arch_suspend_disable_irqs(void)
++{
++	if (ppc_md.suspend_disable_irqs)
++		ppc_md.suspend_disable_irqs();
++	generic_suspend_disable_irqs();
++}
++
++/* Overrides the weak version in kernel/power/main.c */
++void arch_suspend_enable_irqs(void)
++{
++	generic_suspend_enable_irqs();
++	if (ppc_md.suspend_enable_irqs)
++		ppc_md.suspend_enable_irqs();
++}
++#endif
++
+ #ifdef CONFIG_SMP
+ void __init smp_space_timers(unsigned int max_cpus)
+ {
+@@ -811,7 +856,7 @@ void __init clocksource_init(void)
+ static int decrementer_set_next_event(unsigned long evt,
+ 				      struct clock_event_device *dev)
+ {
+-	__get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt;
++	__get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
+ 	set_dec(evt);
+ 	return 0;
+ }
+@@ -825,7 +870,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
+ 
+ static void register_decrementer_clockevent(int cpu)
+ {
+-	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
++	struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
+ 
+ 	*dec = decrementer_clockevent;
+ 	dec->cpumask = cpumask_of_cpu(cpu);
+@@ -836,7 +881,7 @@ static void register_decrementer_clockevent(int cpu)
+ 	clockevents_register_device(dec);
+ }
+ 
+-void init_decrementer_clockevent(void)
++static void __init init_decrementer_clockevent(void)
+ {
+ 	int cpu = smp_processor_id();
+ 
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 59c464e..848a204 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -334,18 +334,25 @@ static inline int check_io_access(struct pt_regs *regs)
+ #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
+ #endif
+ 
+-static int generic_machine_check_exception(struct pt_regs *regs)
++#if defined(CONFIG_4xx)
++int machine_check_4xx(struct pt_regs *regs)
+ {
+ 	unsigned long reason = get_mc_reason(regs);
+ 
+-#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
+ 	if (reason & ESR_IMCP) {
+ 		printk("Instruction");
+ 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+ 	} else
+ 		printk("Data");
+ 	printk(" machine check in kernel mode.\n");
+-#elif defined(CONFIG_440A)
++
++	return 0;
++}
++
++int machine_check_440A(struct pt_regs *regs)
++{
++	unsigned long reason = get_mc_reason(regs);
++
+ 	printk("Machine check in kernel mode.\n");
+ 	if (reason & ESR_IMCP){
+ 		printk("Instruction Synchronous Machine Check exception\n");
+@@ -375,7 +382,13 @@ static int generic_machine_check_exception(struct pt_regs *regs)
+ 		/* Clear MCSR */
+ 		mtspr(SPRN_MCSR, mcsr);
+ 	}
+-#elif defined (CONFIG_E500)
++	return 0;
++}
++#elif defined(CONFIG_E500)
++int machine_check_e500(struct pt_regs *regs)
++{
++	unsigned long reason = get_mc_reason(regs);
++
+ 	printk("Machine check in kernel mode.\n");
+ 	printk("Caused by (from MCSR=%lx): ", reason);
+ 
+@@ -403,7 +416,14 @@ static int generic_machine_check_exception(struct pt_regs *regs)
+ 		printk("Bus - Instruction Parity Error\n");
+ 	if (reason & MCSR_BUS_RPERR)
+ 		printk("Bus - Read Parity Error\n");
+-#elif defined (CONFIG_E200)
++
++	return 0;
++}
++#elif defined(CONFIG_E200)
++int machine_check_e200(struct pt_regs *regs)
++{
++	unsigned long reason = get_mc_reason(regs);
++
+ 	printk("Machine check in kernel mode.\n");
+ 	printk("Caused by (from MCSR=%lx): ", reason);
+ 
+@@ -421,7 +441,14 @@ static int generic_machine_check_exception(struct pt_regs *regs)
+ 		printk("Bus - Read Bus Error on data load\n");
+ 	if (reason & MCSR_BUS_WRERR)
+ 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
+-#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
++
++	return 0;
++}
++#else
++int machine_check_generic(struct pt_regs *regs)
++{
++	unsigned long reason = get_mc_reason(regs);
++
+ 	printk("Machine check in kernel mode.\n");
+ 	printk("Caused by (from SRR1=%lx): ", reason);
+ 	switch (reason & 0x601F0000) {
+@@ -451,22 +478,26 @@ static int generic_machine_check_exception(struct pt_regs *regs)
+ 	default:
+ 		printk("Unknown values in msr\n");
+ 	}
+-#endif /* CONFIG_4xx */
+-
+ 	return 0;
+ }
++#endif /* everything else */
+ 
+ void machine_check_exception(struct pt_regs *regs)
+ {
+ 	int recover = 0;
+ 
+-	/* See if any machine dependent calls */
++	/* See if any machine dependent calls. In theory, we would want
++	 * to call the CPU first, and call the ppc_md. one if the CPU
++	 * one returns a positive number. However there is existing code
++	 * that assumes the board gets a first chance, so let's keep it
++	 * that way for now and fix things later. --BenH.
++	 */
+ 	if (ppc_md.machine_check_exception)
+ 		recover = ppc_md.machine_check_exception(regs);
+-	else
+-		recover = generic_machine_check_exception(regs);
++	else if (cur_cpu_spec->machine_check)
++		recover = cur_cpu_spec->machine_check(regs);
+ 
+-	if (recover)
++	if (recover > 0)
+ 		return;
+ 
+ 	if (user_mode(regs)) {
+@@ -476,7 +507,12 @@ void machine_check_exception(struct pt_regs *regs)
+ 	}
+ 
+ #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
+-	/* the qspan pci read routines can cause machine checks -- Cort */
++	/* the qspan pci read routines can cause machine checks -- Cort
++	 *
++	 * yuck !!! that totally needs to go away ! There are better ways
++	 * to deal with that than having a wart in the mcheck handler.
++	 * -- BenH
++	 */
+ 	bad_page_fault(regs, regs->dar, SIGBUS);
+ 	return;
+ #endif
+@@ -622,6 +658,9 @@ static void parse_fpe(struct pt_regs *regs)
+ #define INST_POPCNTB		0x7c0000f4
+ #define INST_POPCNTB_MASK	0xfc0007fe
+ 
++#define INST_ISEL		0x7c00001e
++#define INST_ISEL_MASK		0xfc00003e
++
+ static int emulate_string_inst(struct pt_regs *regs, u32 instword)
+ {
+ 	u8 rT = (instword >> 21) & 0x1f;
+@@ -707,6 +746,23 @@ static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
+ 	return 0;
+ }
+ 
++static int emulate_isel(struct pt_regs *regs, u32 instword)
++{
++	u8 rT = (instword >> 21) & 0x1f;
++	u8 rA = (instword >> 16) & 0x1f;
++	u8 rB = (instword >> 11) & 0x1f;
++	u8 BC = (instword >> 6) & 0x1f;
++	u8 bit;
++	unsigned long tmp;
++
++	tmp = (rA == 0) ? 0 : regs->gpr[rA];
++	bit = (regs->ccr >> (31 - BC)) & 0x1;
++
++	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
++
++	return 0;
++}
++
+ static int emulate_instruction(struct pt_regs *regs)
+ {
+ 	u32 instword;
+@@ -749,6 +805,11 @@ static int emulate_instruction(struct pt_regs *regs)
+ 		return emulate_popcntb_inst(regs, instword);
+ 	}
+ 
++	/* Emulate isel (Integer Select) instruction */
++	if ((instword & INST_ISEL_MASK) == INST_ISEL) {
++		return emulate_isel(regs, instword);
++	}
++
+ 	return -EINVAL;
+ }
+ 
+diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
+index d723070..7aad620 100644
+--- a/arch/powerpc/kernel/udbg.c
++++ b/arch/powerpc/kernel/udbg.c
+@@ -54,9 +54,16 @@ void __init udbg_early_init(void)
+ #elif defined(CONFIG_PPC_EARLY_DEBUG_44x)
+ 	/* PPC44x debug */
+ 	udbg_init_44x_as1();
++#elif defined(CONFIG_PPC_EARLY_DEBUG_40x)
++	/* PPC40x debug */
++	udbg_init_40x_realmode();
+ #elif defined(CONFIG_PPC_EARLY_DEBUG_CPM)
+ 	udbg_init_cpm();
+ #endif
++
++#ifdef CONFIG_PPC_EARLY_DEBUG
++	console_loglevel = 10;
++#endif
+ }
+ 
+ /* udbg library, used by xmon et al */
+diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
+index 833a3d0..cb01ebc 100644
+--- a/arch/powerpc/kernel/udbg_16550.c
++++ b/arch/powerpc/kernel/udbg_16550.c
+@@ -46,7 +46,7 @@ struct NS16550 {
+ 
+ #define LCR_DLAB 0x80
+ 
+-static volatile struct NS16550 __iomem *udbg_comport;
++static struct NS16550 __iomem *udbg_comport;
+ 
+ static void udbg_550_putc(char c)
+ {
+@@ -117,7 +117,7 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
+ {
+ 	unsigned int dll, dlm, divisor, prescaler, speed;
+ 	u8 old_lcr;
+-	volatile struct NS16550 __iomem *port = comport;
++	struct NS16550 __iomem *port = comport;
+ 
+ 	old_lcr = in_8(&port->lcr);
+ 
+@@ -162,7 +162,7 @@ void udbg_maple_real_putc(char c)
+ 
+ void __init udbg_init_maple_realmode(void)
+ {
+-	udbg_comport = (volatile struct NS16550 __iomem *)0xf40003f8;
++	udbg_comport = (struct NS16550 __iomem *)0xf40003f8;
+ 
+ 	udbg_putc = udbg_maple_real_putc;
+ 	udbg_getc = NULL;
+@@ -184,7 +184,7 @@ void udbg_pas_real_putc(char c)
+ 
+ void udbg_init_pas_realmode(void)
+ {
+-	udbg_comport = (volatile struct NS16550 __iomem *)0xfcff03f8UL;
++	udbg_comport = (struct NS16550 __iomem *)0xfcff03f8UL;
+ 
+ 	udbg_putc = udbg_pas_real_putc;
+ 	udbg_getc = NULL;
+@@ -219,9 +219,42 @@ static int udbg_44x_as1_getc(void)
+ void __init udbg_init_44x_as1(void)
+ {
+ 	udbg_comport =
+-		(volatile struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
++		(struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
+ 
+ 	udbg_putc = udbg_44x_as1_putc;
+ 	udbg_getc = udbg_44x_as1_getc;
+ }
+ #endif /* CONFIG_PPC_EARLY_DEBUG_44x */
++
++#ifdef CONFIG_PPC_EARLY_DEBUG_40x
++static void udbg_40x_real_putc(char c)
++{
++	if (udbg_comport) {
++		while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
++			/* wait for idle */;
++		real_writeb(c, &udbg_comport->thr); eieio();
++		if (c == '\n')
++			udbg_40x_real_putc('\r');
++	}
++}
++
++static int udbg_40x_real_getc(void)
++{
++	if (udbg_comport) {
++		while ((real_readb(&udbg_comport->lsr) & LSR_DR) == 0)
++			; /* wait for char */
++		return real_readb(&udbg_comport->rbr);
++	}
++	return -1;
++}
++
++void __init udbg_init_40x_realmode(void)
++{
++	udbg_comport = (struct NS16550 __iomem *)
++		CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR;
++
++	udbg_putc = udbg_40x_real_putc;
++	udbg_getc = udbg_40x_real_getc;
++	udbg_getc_poll = NULL;
++}
++#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
+diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
+index 19a5656..f0bad70 100644
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -37,8 +37,6 @@
+ #include <asm/iseries/hv_call_xm.h>
+ #include <asm/iseries/iommu.h>
+ 
+-extern struct kset devices_subsys; /* needed for vio_find_name() */
+-
+ static struct bus_type vio_bus_type;
+ 
+ static struct vio_dev vio_bus_device  = { /* fake "parent" device */
+@@ -361,19 +359,16 @@ EXPORT_SYMBOL(vio_get_attribute);
+ #ifdef CONFIG_PPC_PSERIES
+ /* vio_find_name() - internal because only vio.c knows how we formatted the
+  * kobject name
+- * XXX once vio_bus_type.devices is actually used as a kset in
+- * drivers/base/bus.c, this function should be removed in favor of
+- * "device_find(kobj_name, &vio_bus_type)"
+  */
+-static struct vio_dev *vio_find_name(const char *kobj_name)
++static struct vio_dev *vio_find_name(const char *name)
+ {
+-	struct kobject *found;
++	struct device *found;
+ 
+-	found = kset_find_obj(&devices_subsys, kobj_name);
++	found = bus_find_device_by_name(&vio_bus_type, NULL, name);
+ 	if (!found)
+ 		return NULL;
+ 
+-	return to_vio_dev(container_of(found, struct device, kobj));
++	return to_vio_dev(found);
+ }
+ 
+ /**
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index f66fa5d..0afb9e3 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -23,7 +23,7 @@ SECTIONS
+ 	/* Sections to be discarded. */
+ 	/DISCARD/ : {
+ 	*(.exitcall.exit)
+-	*(.exit.data)
++	EXIT_DATA
+ 	}
+ 
+ 	. = KERNELBASE;
+@@ -76,17 +76,19 @@ SECTIONS
+ 
+ 	.init.text : {
+ 		_sinittext = .;
+-		*(.init.text)
++		INIT_TEXT
+ 		_einittext = .;
+ 	}
+ 
+ 	/* .exit.text is discarded at runtime, not link time,
+ 	 * to deal with references from __bug_table
+ 	 */
+-	.exit.text : { *(.exit.text) }
++	.exit.text : {
++		EXIT_TEXT
++	}
+ 
+ 	.init.data : {
+-		*(.init.data);
++		INIT_DATA
+ 		__vtop_table_begin = .;
+ 		*(.vtop_fixup);
+ 		__vtop_table_end = .;
+diff --git a/arch/powerpc/math-emu/op-4.h b/arch/powerpc/math-emu/op-4.h
+index fcdd6d0..c9ae626 100644
+--- a/arch/powerpc/math-emu/op-4.h
++++ b/arch/powerpc/math-emu/op-4.h
+@@ -194,19 +194,39 @@
+   (X##_f[3] = I3, X##_f[2] = I2, X##_f[1] = I1, X##_f[0] = I0)
+ 
+ #ifndef __FP_FRAC_ADD_4
+-#define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0)		\
+-  (r0 = x0 + y0,							\
+-   r1 = x1 + y1 + (r0 < x0),						\
+-   r2 = x2 + y2 + (r1 < x1),						\
+-   r3 = x3 + y3 + (r2 < x2))
++#define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0)	\
++  do {								\
++    int _c1, _c2, _c3;						\
++    r0 = x0 + y0;						\
++    _c1 = r0 < x0;						\
++    r1 = x1 + y1;						\
++    _c2 = r1 < x1;						\
++    r1 += _c1;							\
++    _c2 |= r1 < _c1;						\
++    r2 = x2 + y2;						\
++    _c3 = r2 < x2;						\
++    r2 += _c2;							\
++    _c3 |= r2 < _c2;						\
++    r3 = x3 + y3 + _c3;						\
++  } while (0)
+ #endif
+ 
+ #ifndef __FP_FRAC_SUB_4
+-#define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0)		\
+-  (r0 = x0 - y0,                                                        \
+-   r1 = x1 - y1 - (r0 > x0),                                            \
+-   r2 = x2 - y2 - (r1 > x1),                                            \
+-   r3 = x3 - y3 - (r2 > x2))
++#define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0)	\
++  do {								\
++    int _c1, _c2, _c3;						\
++    r0 = x0 - y0;						\
++    _c1 = r0 > x0;						\
++    r1 = x1 - y1;						\
++    _c2 = r1 > x1;						\
++    r1 -= _c1;							\
++    _c2 |= r1 > _c1;						\
++    r2 = x2 - y2;						\
++    _c3 = r2 > x2;						\
++    r2 -= _c2;							\
++    _c3 |= r2 > _c2;						\
++    r3 = x3 - y3 - _c3;						\
++  } while (0)
+ #endif
+ 
+ #ifndef __FP_FRAC_ADDI_4
+diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
+index 20629ae..41649a5 100644
+--- a/arch/powerpc/mm/Makefile
++++ b/arch/powerpc/mm/Makefile
+@@ -22,3 +22,4 @@ obj-$(CONFIG_FSL_BOOKE)		+= fsl_booke_mmu.o
+ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
+ obj-$(CONFIG_PPC_MM_SLICES)	+= slice.o
+ obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
++obj-$(CONFIG_PPC_SUBPAGE_PROT)	+= subpage-prot.o
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 8135da0..7b25107 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -167,10 +167,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ 	if (notify_page_fault(regs))
+ 		return 0;
+ 
+-	if (trap == 0x300) {
+-		if (debugger_fault_handler(regs))
+-			return 0;
+-	}
++	if (unlikely(debugger_fault_handler(regs)))
++		return 0;
+ 
+ 	/* On a kernel SLB miss we can only check for a valid exception entry */
+ 	if (!user_mode(regs) && (address >= TASK_SIZE))
+@@ -189,7 +187,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ 			return SIGSEGV;
+ 		/* in_atomic() in user mode is really bad,
+ 		   as is current->mm == NULL. */
+-		printk(KERN_EMERG "Page fault in user mode with"
++		printk(KERN_EMERG "Page fault in user mode with "
+ 		       "in_atomic() = %d mm = %p\n", in_atomic(), mm);
+ 		printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
+ 		       regs->nip, regs->msr);
+diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
+index 17139da..c93a966 100644
+--- a/arch/powerpc/mm/fsl_booke_mmu.c
++++ b/arch/powerpc/mm/fsl_booke_mmu.c
+@@ -165,15 +165,15 @@ void invalidate_tlbcam_entry(int index)
+ void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
+ 		unsigned long cam2)
+ {
+-	settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
++	settlbcam(0, PAGE_OFFSET, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
+ 	tlbcam_index++;
+ 	if (cam1) {
+ 		tlbcam_index++;
+-		settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
++		settlbcam(1, PAGE_OFFSET+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
+ 	}
+ 	if (cam2) {
+ 		tlbcam_index++;
+-		settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
++		settlbcam(2, PAGE_OFFSET+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
+ 	}
+ }
+ 
+diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
+index e935edd..21d2484 100644
+--- a/arch/powerpc/mm/hash_low_64.S
++++ b/arch/powerpc/mm/hash_low_64.S
+@@ -331,7 +331,8 @@ htab_pte_insert_failure:
+  *****************************************************************************/
+ 
+ /* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
+- *		 pte_t *ptep, unsigned long trap, int local, int ssize)
++ *		 pte_t *ptep, unsigned long trap, int local, int ssize,
++ *		 int subpg_prot)
+  */
+ 
+ /*
+@@ -429,12 +430,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+ 	xor	r28,r28,r0		/* hash */
+ 
+ 	/* Convert linux PTE bits into HW equivalents */
+-4:	andi.	r3,r30,0x1fe		/* Get basic set of flags */
+-	xori	r3,r3,HPTE_R_N		/* _PAGE_EXEC -> NOEXEC */
++4:
++#ifdef CONFIG_PPC_SUBPAGE_PROT
++	andc	r10,r30,r10
++	andi.	r3,r10,0x1fe		/* Get basic set of flags */
++	rlwinm	r0,r10,32-9+1,30,30	/* _PAGE_RW -> _PAGE_USER (r0) */
++#else
++	andi.	r3,r30,0x1fe		/* Get basic set of flags */
+ 	rlwinm	r0,r30,32-9+1,30,30	/* _PAGE_RW -> _PAGE_USER (r0) */
++#endif
++	xori	r3,r3,HPTE_R_N		/* _PAGE_EXEC -> NOEXEC */
+ 	rlwinm	r4,r30,32-7+1,30,30	/* _PAGE_DIRTY -> _PAGE_USER (r4) */
+ 	and	r0,r0,r4		/* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
+-	andc	r0,r30,r0		/* r0 = pte & ~r0 */
++	andc	r0,r3,r0		/* r0 = pte & ~r0 */
+ 	rlwimi	r3,r0,32-1,31,31	/* Insert result into PP lsb */
+ 	ori	r3,r3,HPTE_R_C		/* Always add "C" bit for perf. */
+ 
+diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
+index f09730b..32f4161 100644
+--- a/arch/powerpc/mm/hash_utils_64.c
++++ b/arch/powerpc/mm/hash_utils_64.c
+@@ -96,6 +96,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
+ int mmu_io_psize = MMU_PAGE_4K;
+ int mmu_kernel_ssize = MMU_SEGSIZE_256M;
+ int mmu_highuser_ssize = MMU_SEGSIZE_256M;
++u16 mmu_slb_size = 64;
+ #ifdef CONFIG_HUGETLB_PAGE
+ int mmu_huge_psize = MMU_PAGE_16M;
+ unsigned int HPAGE_SHIFT;
+@@ -368,18 +369,11 @@ static void __init htab_init_page_sizes(void)
+ 	 * on what is available
+ 	 */
+ 	if (mmu_psize_defs[MMU_PAGE_16M].shift)
+-		mmu_huge_psize = MMU_PAGE_16M;
++		set_huge_psize(MMU_PAGE_16M);
+ 	/* With 4k/4level pagetables, we can't (for now) cope with a
+ 	 * huge page size < PMD_SIZE */
+ 	else if (mmu_psize_defs[MMU_PAGE_1M].shift)
+-		mmu_huge_psize = MMU_PAGE_1M;
+-
+-	/* Calculate HPAGE_SHIFT and sanity check it */
+-	if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
+-	    mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
+-		HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
+-	else
+-		HPAGE_SHIFT = 0; /* No huge pages dude ! */
++		set_huge_psize(MMU_PAGE_1M);
+ #endif /* CONFIG_HUGETLB_PAGE */
+ }
+ 
+@@ -477,7 +471,7 @@ void __init htab_initialize(void)
+ 	unsigned long table;
+ 	unsigned long pteg_count;
+ 	unsigned long mode_rw;
+-	unsigned long base = 0, size = 0;
++	unsigned long base = 0, size = 0, limit;
+ 	int i;
+ 
+ 	extern unsigned long tce_alloc_start, tce_alloc_end;
+@@ -511,9 +505,15 @@ void __init htab_initialize(void)
+ 		_SDR1 = 0; 
+ 	} else {
+ 		/* Find storage for the HPT.  Must be contiguous in
+-		 * the absolute address space.
++		 * the absolute address space. On cell we want it to be
++		 * in the first 1 Gig.
+ 		 */
+-		table = lmb_alloc(htab_size_bytes, htab_size_bytes);
++		if (machine_is(cell))
++			limit = 0x40000000;
++		else
++			limit = 0;
++
++		table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
+ 
+ 		DBG("Hash table allocated at %lx, size: %lx\n", table,
+ 		    htab_size_bytes);
+@@ -643,7 +643,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
+  * For now this makes the whole process use 4k pages.
+  */
+ #ifdef CONFIG_PPC_64K_PAGES
+-static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
++void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
+ {
+ 	if (mm->context.user_psize == MMU_PAGE_4K)
+ 		return;
+@@ -651,13 +651,62 @@ static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
+ #ifdef CONFIG_SPU_BASE
+ 	spu_flush_all_slbs(mm);
+ #endif
++	if (get_paca()->context.user_psize != MMU_PAGE_4K) {
++		get_paca()->context = mm->context;
++		slb_flush_and_rebolt();
++	}
+ }
+ #endif /* CONFIG_PPC_64K_PAGES */
+ 
++#ifdef CONFIG_PPC_SUBPAGE_PROT
++/*
++ * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
++ * Userspace sets the subpage permissions using the subpage_prot system call.
++ *
++ * Result is 0: full permissions, _PAGE_RW: read-only,
++ * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
++ */
++static int subpage_protection(pgd_t *pgdir, unsigned long ea)
++{
++	struct subpage_prot_table *spt = pgd_subpage_prot(pgdir);
++	u32 spp = 0;
++	u32 **sbpm, *sbpp;
++
++	if (ea >= spt->maxaddr)
++		return 0;
++	if (ea < 0x100000000) {
++		/* addresses below 4GB use spt->low_prot */
++		sbpm = spt->low_prot;
++	} else {
++		sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
++		if (!sbpm)
++			return 0;
++	}
++	sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
++	if (!sbpp)
++		return 0;
++	spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
++
++	/* extract 2-bit bitfield for this 4k subpage */
++	spp >>= 30 - 2 * ((ea >> 12) & 0xf);
++
++	/* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
++	spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
++	return spp;
++}
++
++#else /* CONFIG_PPC_SUBPAGE_PROT */
++static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
++{
++	return 0;
++}
++#endif
++
+ /* Result code is:
+  *  0 - handled
+  *  1 - normal page fault
+  * -1 - critical hash insertion error
++ * -2 - access not permitted by subpage protection mechanism
+  */
+ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+ {
+@@ -808,7 +857,14 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+ 		rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
+ 	else
+ #endif /* CONFIG_PPC_HAS_HASH_64K */
+-		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
++	{
++		int spp = subpage_protection(pgdir, ea);
++		if (access & spp)
++			rc = -2;
++		else
++			rc = __hash_page_4K(ea, access, vsid, ptep, trap,
++					    local, ssize, spp);
++	}
+ 
+ #ifndef CONFIG_PPC_64K_PAGES
+ 	DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
+@@ -880,7 +936,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
+ 		__hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
+ 	else
+ #endif /* CONFIG_PPC_HAS_HASH_64K */
+-		__hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
++		__hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
++			       subpage_protection(pgdir, ea));
+ 
+ 	local_irq_restore(flags);
+ }
+@@ -925,19 +982,17 @@ void flush_hash_range(unsigned long number, int local)
+  * low_hash_fault is called when we the low level hash code failed
+  * to instert a PTE due to an hypervisor error
+  */
+-void low_hash_fault(struct pt_regs *regs, unsigned long address)
++void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
+ {
+ 	if (user_mode(regs)) {
+-		siginfo_t info;
+-
+-		info.si_signo = SIGBUS;
+-		info.si_errno = 0;
+-		info.si_code = BUS_ADRERR;
+-		info.si_addr = (void __user *)address;
+-		force_sig_info(SIGBUS, &info, current);
+-		return;
+-	}
+-	bad_page_fault(regs, address, SIGBUS);
++#ifdef CONFIG_PPC_SUBPAGE_PROT
++		if (rc == -2)
++			_exception(SIGSEGV, regs, SEGV_ACCERR, address);
++		else
++#endif
++			_exception(SIGBUS, regs, BUS_ADRERR, address);
++	} else
++		bad_page_fault(regs, address, SIGBUS);
+ }
+ 
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 71efb38..a02266d 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -24,18 +24,17 @@
+ #include <asm/cputable.h>
+ #include <asm/spu.h>
+ 
++#define HPAGE_SHIFT_64K	16
++#define HPAGE_SHIFT_16M	24
++
+ #define NUM_LOW_AREAS	(0x100000000UL >> SID_SHIFT)
+ #define NUM_HIGH_AREAS	(PGTABLE_RANGE >> HTLB_AREA_SHIFT)
+ 
+-#ifdef CONFIG_PPC_64K_PAGES
+-#define HUGEPTE_INDEX_SIZE	(PMD_SHIFT-HPAGE_SHIFT)
+-#else
+-#define HUGEPTE_INDEX_SIZE	(PUD_SHIFT-HPAGE_SHIFT)
+-#endif
+-#define PTRS_PER_HUGEPTE	(1 << HUGEPTE_INDEX_SIZE)
+-#define HUGEPTE_TABLE_SIZE	(sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
++unsigned int hugepte_shift;
++#define PTRS_PER_HUGEPTE	(1 << hugepte_shift)
++#define HUGEPTE_TABLE_SIZE	(sizeof(pte_t) << hugepte_shift)
+ 
+-#define HUGEPD_SHIFT		(HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
++#define HUGEPD_SHIFT		(HPAGE_SHIFT + hugepte_shift)
+ #define HUGEPD_SIZE		(1UL << HUGEPD_SHIFT)
+ #define HUGEPD_MASK		(~(HUGEPD_SIZE-1))
+ 
+@@ -82,11 +81,35 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
+ 	return 0;
+ }
+ 
++/* Base page size affects how we walk hugetlb page tables */
++#ifdef CONFIG_PPC_64K_PAGES
++#define hpmd_offset(pud, addr)		pmd_offset(pud, addr)
++#define hpmd_alloc(mm, pud, addr)	pmd_alloc(mm, pud, addr)
++#else
++static inline
++pmd_t *hpmd_offset(pud_t *pud, unsigned long addr)
++{
++	if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
++		return pmd_offset(pud, addr);
++	else
++		return (pmd_t *) pud;
++}
++static inline
++pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
++{
++	if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
++		return pmd_alloc(mm, pud, addr);
++	else
++		return (pmd_t *) pud;
++}
++#endif
++
+ /* Modelled after find_linux_pte() */
+ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+ {
+ 	pgd_t *pg;
+ 	pud_t *pu;
++	pmd_t *pm;
+ 
+ 	BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
+ 
+@@ -96,14 +119,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+ 	if (!pgd_none(*pg)) {
+ 		pu = pud_offset(pg, addr);
+ 		if (!pud_none(*pu)) {
+-#ifdef CONFIG_PPC_64K_PAGES
+-			pmd_t *pm;
+-			pm = pmd_offset(pu, addr);
++			pm = hpmd_offset(pu, addr);
+ 			if (!pmd_none(*pm))
+ 				return hugepte_offset((hugepd_t *)pm, addr);
+-#else
+-			return hugepte_offset((hugepd_t *)pu, addr);
+-#endif
+ 		}
+ 	}
+ 
+@@ -114,6 +132,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+ {
+ 	pgd_t *pg;
+ 	pud_t *pu;
++	pmd_t *pm;
+ 	hugepd_t *hpdp = NULL;
+ 
+ 	BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
+@@ -124,14 +143,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+ 	pu = pud_alloc(mm, pg, addr);
+ 
+ 	if (pu) {
+-#ifdef CONFIG_PPC_64K_PAGES
+-		pmd_t *pm;
+-		pm = pmd_alloc(mm, pu, addr);
++		pm = hpmd_alloc(mm, pu, addr);
+ 		if (pm)
+ 			hpdp = (hugepd_t *)pm;
+-#else
+-		hpdp = (hugepd_t *)pu;
+-#endif
+ 	}
+ 
+ 	if (! hpdp)
+@@ -158,7 +172,6 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
+ 						 PGF_CACHENUM_MASK));
+ }
+ 
+-#ifdef CONFIG_PPC_64K_PAGES
+ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ 				   unsigned long addr, unsigned long end,
+ 				   unsigned long floor, unsigned long ceiling)
+@@ -191,7 +204,6 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ 	pud_clear(pud);
+ 	pmd_free_tlb(tlb, pmd);
+ }
+-#endif
+ 
+ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+ 				   unsigned long addr, unsigned long end,
+@@ -210,9 +222,15 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+ 			continue;
+ 		hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
+ #else
+-		if (pud_none(*pud))
+-			continue;
+-		free_hugepte_range(tlb, (hugepd_t *)pud);
++		if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
++			if (pud_none_or_clear_bad(pud))
++				continue;
++			hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
++		} else {
++			if (pud_none(*pud))
++				continue;
++			free_hugepte_range(tlb, (hugepd_t *)pud);
++		}
+ #endif
+ 	} while (pud++, addr = next, addr != end);
+ 
+@@ -526,6 +544,57 @@ repeat:
+ 	return err;
+ }
+ 
++void set_huge_psize(int psize)
++{
++	/* Check that it is a page size supported by the hardware and
++	 * that it fits within pagetable limits. */
++	if (mmu_psize_defs[psize].shift && mmu_psize_defs[psize].shift < SID_SHIFT &&
++		(mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
++			mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) {
++		HPAGE_SHIFT = mmu_psize_defs[psize].shift;
++		mmu_huge_psize = psize;
++#ifdef CONFIG_PPC_64K_PAGES
++		hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
++#else
++		if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
++			hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
++		else
++			hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT);
++#endif
++
++	} else
++		HPAGE_SHIFT = 0;
++}
++
++static int __init hugepage_setup_sz(char *str)
++{
++	unsigned long long size;
++	int mmu_psize = -1;
++	int shift;
++
++	size = memparse(str, &str);
++
++	shift = __ffs(size);
++	switch (shift) {
++#ifndef CONFIG_PPC_64K_PAGES
++	case HPAGE_SHIFT_64K:
++		mmu_psize = MMU_PAGE_64K;
++		break;
++#endif
++	case HPAGE_SHIFT_16M:
++		mmu_psize = MMU_PAGE_16M;
++		break;
++	}
++
++	if (mmu_psize >=0 && mmu_psize_defs[mmu_psize].shift)
++		set_huge_psize(mmu_psize);
++	else
++		printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
++
++	return 1;
++}
++__setup("hugepagesz=", hugepage_setup_sz);
++
+ static void zero_ctor(struct kmem_cache *cache, void *addr)
+ {
+ 	memset(addr, 0, kmem_cache_size(cache));
+diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
+index 8f4d2dc..4ce23bc 100644
+--- a/arch/powerpc/mm/lmb.c
++++ b/arch/powerpc/mm/lmb.c
+@@ -342,3 +342,16 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit)
+ 		}
+ 	}
+ }
++
++int __init lmb_is_reserved(unsigned long addr)
++{
++	int i;
++
++	for (i = 0; i < lmb.reserved.cnt; i++) {
++		unsigned long upper = lmb.reserved.region[i].base +
++				      lmb.reserved.region[i].size - 1;
++		if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
++			return 1;
++	}
++	return 0;
++}
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 5402fb6..e812244 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -213,15 +213,30 @@ void __init do_init_bootmem(void)
+ 	 */
+ #ifdef CONFIG_HIGHMEM
+ 	free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
++
++	/* reserve the sections we're already using */
++	for (i = 0; i < lmb.reserved.cnt; i++) {
++		unsigned long addr = lmb.reserved.region[i].base +
++				     lmb_size_bytes(&lmb.reserved, i) - 1;
++		if (addr < total_lowmem)
++			reserve_bootmem(lmb.reserved.region[i].base,
++					lmb_size_bytes(&lmb.reserved, i));
++		else if (lmb.reserved.region[i].base < total_lowmem) {
++			unsigned long adjusted_size = total_lowmem -
++				      lmb.reserved.region[i].base;
++			reserve_bootmem(lmb.reserved.region[i].base,
++					adjusted_size);
++		}
++	}
+ #else
+ 	free_bootmem_with_active_regions(0, max_pfn);
+-#endif
+ 
+ 	/* reserve the sections we're already using */
+ 	for (i = 0; i < lmb.reserved.cnt; i++)
+ 		reserve_bootmem(lmb.reserved.region[i].base,
+ 				lmb_size_bytes(&lmb.reserved, i));
+ 
++#endif
+ 	/* XXX need to clip this if using highmem? */
+ 	sparse_memory_present_with_active_regions(0);
+ 
+@@ -334,11 +349,13 @@ void __init mem_init(void)
+ 		highmem_mapnr = total_lowmem >> PAGE_SHIFT;
+ 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
+ 			struct page *page = pfn_to_page(pfn);
+-
++			if (lmb_is_reserved(pfn << PAGE_SHIFT))
++				continue;
+ 			ClearPageReserved(page);
+ 			init_page_count(page);
+ 			__free_page(page);
+ 			totalhigh_pages++;
++			reservedpages--;
+ 		}
+ 		totalram_pages += totalhigh_pages;
+ 		printk(KERN_DEBUG "High memory: %luk\n",
+diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
+index 50d7372..47b06ba 100644
+--- a/arch/powerpc/mm/slb.c
++++ b/arch/powerpc/mm/slb.c
+@@ -256,6 +256,7 @@ void slb_initialize(void)
+ 	static int slb_encoding_inited;
+ 	extern unsigned int *slb_miss_kernel_load_linear;
+ 	extern unsigned int *slb_miss_kernel_load_io;
++	extern unsigned int *slb_compare_rr_to_size;
+ 
+ 	/* Prepare our SLB miss handler based on our page size */
+ 	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
+@@ -269,6 +270,8 @@ void slb_initialize(void)
+ 				   SLB_VSID_KERNEL | linear_llp);
+ 		patch_slb_encoding(slb_miss_kernel_load_io,
+ 				   SLB_VSID_KERNEL | io_llp);
++		patch_slb_encoding(slb_compare_rr_to_size,
++				   mmu_slb_size);
+ 
+ 		DBG("SLB: linear  LLP = %04x\n", linear_llp);
+ 		DBG("SLB: io      LLP = %04x\n", io_llp);
+diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
+index 1328a81..657f6b3 100644
+--- a/arch/powerpc/mm/slb_low.S
++++ b/arch/powerpc/mm/slb_low.S
+@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+ 
+ 7:	ld	r10,PACASTABRR(r13)
+ 	addi	r10,r10,1
+-	/* use a cpu feature mask if we ever change our slb size */
+-	cmpldi	r10,SLB_NUM_ENTRIES
++	/* This gets soft patched on boot. */
++_GLOBAL(slb_compare_rr_to_size)
++	cmpldi	r10,0
+ 
+ 	blt+	4f
+ 	li	r10,SLB_NUM_BOLTED
+diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
+new file mode 100644
+index 0000000..4cafc0c
+--- /dev/null
++++ b/arch/powerpc/mm/subpage-prot.c
+@@ -0,0 +1,213 @@
++/*
++ * Copyright 2007-2008 Paul Mackerras, IBM Corp.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlbflush.h>
++
++/*
++ * Free all pages allocated for subpage protection maps and pointers.
++ * Also makes sure that the subpage_prot_table structure is
++ * reinitialized for the next user.
++ */
++void subpage_prot_free(pgd_t *pgd)
++{
++	struct subpage_prot_table *spt = pgd_subpage_prot(pgd);
++	unsigned long i, j, addr;
++	u32 **p;
++
++	for (i = 0; i < 4; ++i) {
++		if (spt->low_prot[i]) {
++			free_page((unsigned long)spt->low_prot[i]);
++			spt->low_prot[i] = NULL;
++		}
++	}
++	addr = 0;
++	for (i = 0; i < 2; ++i) {
++		p = spt->protptrs[i];
++		if (!p)
++			continue;
++		spt->protptrs[i] = NULL;
++		for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
++		     ++j, addr += PAGE_SIZE)
++			if (p[j])
++				free_page((unsigned long)p[j]);
++		free_page((unsigned long)p);
++	}
++	spt->maxaddr = 0;
++}
++
++static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
++			     int npages)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	spinlock_t *ptl;
++
++	pgd = pgd_offset(mm, addr);
++	if (pgd_none(*pgd))
++		return;
++	pud = pud_offset(pgd, addr);
++	if (pud_none(*pud))
++		return;
++	pmd = pmd_offset(pud, addr);
++	if (pmd_none(*pmd))
++		return;
++	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++	arch_enter_lazy_mmu_mode();
++	for (; npages > 0; --npages) {
++		pte_update(mm, addr, pte, 0, 0);
++		addr += PAGE_SIZE;
++		++pte;
++	}
++	arch_leave_lazy_mmu_mode();
++	pte_unmap_unlock(pte - 1, ptl);
++}
++
++/*
++ * Clear the subpage protection map for an address range, allowing
++ * all accesses that are allowed by the pte permissions.
++ */
++static void subpage_prot_clear(unsigned long addr, unsigned long len)
++{
++	struct mm_struct *mm = current->mm;
++	struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
++	u32 **spm, *spp;
++	int i, nw;
++	unsigned long next, limit;
++
++	down_write(&mm->mmap_sem);
++	limit = addr + len;
++	if (limit > spt->maxaddr)
++		limit = spt->maxaddr;
++	for (; addr < limit; addr = next) {
++		next = pmd_addr_end(addr, limit);
++		if (addr < 0x100000000) {
++			spm = spt->low_prot;
++		} else {
++			spm = spt->protptrs[addr >> SBP_L3_SHIFT];
++			if (!spm)
++				continue;
++		}
++		spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
++		if (!spp)
++			continue;
++		spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
++
++		i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
++		nw = PTRS_PER_PTE - i;
++		if (addr + (nw << PAGE_SHIFT) > next)
++			nw = (next - addr) >> PAGE_SHIFT;
++
++		memset(spp, 0, nw * sizeof(u32));
++
++		/* now flush any existing HPTEs for the range */
++		hpte_flush_range(mm, addr, nw);
++	}
++	up_write(&mm->mmap_sem);
++}
++
++/*
++ * Copy in a subpage protection map for an address range.
++ * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
++ * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
++ * 2 or 3 to prevent all accesses.
++ * Note that the normal page protections also apply; the subpage
++ * protection mechanism is an additional constraint, so putting 0
++ * in a 2-bit field won't allow writes to a page that is otherwise
++ * write-protected.
++ */
++long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
++{
++	struct mm_struct *mm = current->mm;
++	struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
++	u32 **spm, *spp;
++	int i, nw;
++	unsigned long next, limit;
++	int err;
++
++	/* Check parameters */
++	if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
++	    addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
++		return -EINVAL;
++
++	if (is_hugepage_only_range(mm, addr, len))
++		return -EINVAL;
++
++	if (!map) {
++		/* Clear out the protection map for the address range */
++		subpage_prot_clear(addr, len);
++		return 0;
++	}
++
++	if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
++		return -EFAULT;
++
++	down_write(&mm->mmap_sem);
++	for (limit = addr + len; addr < limit; addr = next) {
++		next = pmd_addr_end(addr, limit);
++		err = -ENOMEM;
++		if (addr < 0x100000000) {
++			spm = spt->low_prot;
++		} else {
++			spm = spt->protptrs[addr >> SBP_L3_SHIFT];
++			if (!spm) {
++				spm = (u32 **)get_zeroed_page(GFP_KERNEL);
++				if (!spm)
++					goto out;
++				spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
++			}
++		}
++		spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
++		spp = *spm;
++		if (!spp) {
++			spp = (u32 *)get_zeroed_page(GFP_KERNEL);
++			if (!spp)
++				goto out;
++			*spm = spp;
++		}
++		spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
++
++		local_irq_disable();
++		demote_segment_4k(mm, addr);
++		local_irq_enable();
++
++		i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
++		nw = PTRS_PER_PTE - i;
++		if (addr + (nw << PAGE_SHIFT) > next)
++			nw = (next - addr) >> PAGE_SHIFT;
++
++		up_write(&mm->mmap_sem);
++		err = -EFAULT;
++		if (__copy_from_user(spp, map, nw * sizeof(u32)))
++			goto out2;
++		map += nw;
++		down_write(&mm->mmap_sem);
++
++		/* now flush any existing HPTEs for the range */
++		hpte_flush_range(mm, addr, nw);
++	}
++	if (limit > spt->maxaddr)
++		spt->maxaddr = limit;
++	err = 0;
++ out:
++	up_write(&mm->mmap_sem);
++ out2:
++	return err;
++}
+diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
+index bb6bff5..1392977 100644
+--- a/arch/powerpc/oprofile/op_model_cell.c
++++ b/arch/powerpc/oprofile/op_model_cell.c
+@@ -61,7 +61,7 @@ static unsigned int spu_cycle_reset;
+ #define NUM_THREADS 2         /* number of physical threads in
+ 			       * physical processor
+ 			       */
+-#define NUM_TRACE_BUS_WORDS 4
++#define NUM_DEBUG_BUS_WORDS 4
+ #define NUM_INPUT_BUS_WORDS 2
+ 
+ #define MAX_SPU_COUNT 0xFFFFFF	/* maximum 24 bit LFSR value */
+@@ -169,7 +169,6 @@ static DEFINE_SPINLOCK(virt_cntr_lock);
+ 
+ static u32 ctr_enabled;
+ 
+-static unsigned char trace_bus[NUM_TRACE_BUS_WORDS];
+ static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
+ 
+ /*
+@@ -298,7 +297,7 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
+ 
+ 	p->signal_group = event / 100;
+ 	p->bus_word = bus_word;
+-	p->sub_unit = (unit_mask & 0x0000f000) >> 12;
++	p->sub_unit = GET_SUB_UNIT(unit_mask);
+ 
+ 	pm_regs.pm07_cntrl[ctr] = 0;
+ 	pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
+@@ -334,16 +333,16 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
+ 		p->bit = signal_bit;
+ 	}
+ 
+-	for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) {
++	for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
+ 		if (bus_word & (1 << i)) {
+ 			pm_regs.debug_bus_control |=
+-			    (bus_type << (31 - (2 * i) + 1));
++			    (bus_type << (30 - (2 * i)));
+ 
+ 			for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
+ 				if (input_bus[j] == 0xff) {
+ 					input_bus[j] = i;
+ 					pm_regs.group_control |=
+-					    (i << (31 - i));
++					    (i << (30 - (2 * j)));
+ 
+ 					break;
+ 				}
+@@ -450,6 +449,12 @@ static void cell_virtual_cntr(unsigned long data)
+ 	hdw_thread = 1 ^ hdw_thread;
+ 	next_hdw_thread = hdw_thread;
+ 
++	pm_regs.group_control = 0;
++	pm_regs.debug_bus_control = 0;
++
++	for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
++		input_bus[i] = 0xff;
++
+ 	/*
+ 	 * There are some per thread events.  Must do the
+ 	 * set event, for the thread that is being started
+@@ -619,9 +624,6 @@ static int cell_reg_setup(struct op_counter_config *ctr,
+ 		pmc_cntrl[1][i].vcntr = i;
+ 	}
+ 
+-	for (i = 0; i < NUM_TRACE_BUS_WORDS; i++)
+-		trace_bus[i] = 0xff;
+-
+ 	for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
+ 		input_bus[i] = 0xff;
+ 
+diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
+index cddc250..446a8bb 100644
+--- a/arch/powerpc/oprofile/op_model_power4.c
++++ b/arch/powerpc/oprofile/op_model_power4.c
+@@ -172,15 +172,15 @@ static void power4_stop(void)
+ }
+ 
+ /* Fake functions used by canonicalize_pc */
+-static void __attribute_used__ hypervisor_bucket(void)
++static void __used hypervisor_bucket(void)
+ {
+ }
+ 
+-static void __attribute_used__ rtas_bucket(void)
++static void __used rtas_bucket(void)
+ {
+ }
+ 
+-static void __attribute_used__ kernel_unknown_bucket(void)
++static void __used kernel_unknown_bucket(void)
+ {
+ }
+ 
+diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
+index 8f6699f..74f3117 100644
+--- a/arch/powerpc/platforms/40x/Kconfig
++++ b/arch/powerpc/platforms/40x/Kconfig
+@@ -14,28 +14,34 @@
+ #	help
+ #	  This option enables support for the CPCI405 board.
+ 
+-#config EP405
+-#	bool "EP405/EP405PC"
+-#	depends on 40x
+-#	default n
+-#	select 405GP
+-#	help
+-#	  This option enables support for the EP405/EP405PC boards.
+-
+-#config EP405PC
+-#	bool "EP405PC Support"
+-#	depends on EP405
+-#	default y
+-#	help
+-#	  This option enables support for the extra features of the EP405PC board.
++config EP405
++	bool "EP405/EP405PC"
++	depends on 40x
++	default n
++	select 405GP
++	select PCI
++	help
++	  This option enables support for the EP405/EP405PC boards.
+ 
+ config KILAUEA
+ 	bool "Kilauea"
+ 	depends on 40x
+ 	default n
++	select 405EX
++	select PPC4xx_PCI_EXPRESS
+ 	help
+ 	  This option enables support for the AMCC PPC405EX evaluation board.
+ 
++config MAKALU
++	bool "Makalu"
++	depends on 40x
++	default n
++	select 405EX
++	select PCI
++	select PPC4xx_PCI_EXPRESS
++	help
++	  This option enables support for the AMCC PPC405EX board.
++
+ #config REDWOOD_5
+ #	bool "Redwood-5"
+ #	depends on 40x
+@@ -65,6 +71,7 @@ config WALNUT
+ 	depends on 40x
+ 	default y
+ 	select 405GP
++	select PCI
+ 	help
+ 	  This option enables support for the IBM PPC405GP evaluation board.
+ 
+@@ -105,6 +112,11 @@ config 405GP
+ config 405EP
+ 	bool
+ 
++config 405EX
++	bool
++	select IBM_NEW_EMAC_EMAC4
++	select IBM_NEW_EMAC_RGMII
++
+ config 405GPR
+ 	bool
+ 
+diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile
+index 51dadee..5533a5c 100644
+--- a/arch/powerpc/platforms/40x/Makefile
++++ b/arch/powerpc/platforms/40x/Makefile
+@@ -1,3 +1,5 @@
+ obj-$(CONFIG_KILAUEA)				+= kilauea.o
++obj-$(CONFIG_MAKALU)				+= makalu.o
+ obj-$(CONFIG_WALNUT)				+= walnut.o
+ obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD)	+= virtex.o
++obj-$(CONFIG_EP405)				+= ep405.o
+diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
+new file mode 100644
+index 0000000..13d1345
+--- /dev/null
++++ b/arch/powerpc/platforms/40x/ep405.c
+@@ -0,0 +1,123 @@
++/*
++ * Architecture- / platform-specific boot-time initialization code for
++ * IBM PowerPC 4xx based boards. Adapted from original
++ * code by Gary Thomas, Cort Dougan <cort at fsmlabs.com>, and Dan Malek
++ * <dan at net4x.com>.
++ *
++ * Copyright(c) 1999-2000 Grant Erickson <grant at lcse.umn.edu>
++ *
++ * Rewritten and ported to the merged powerpc tree:
++ * Copyright 2007 IBM Corporation
++ * Josh Boyer <jwboyer at linux.vnet.ibm.com>
++ *
++ * Adapted to EP405 by Ben. Herrenschmidt <benh at kernel.crashing.org>
++ *
++ * TODO: Wire up the PCI IRQ mux and the southbridge interrupts
++ *
++ * 2002 (c) MontaVista, Software, Inc.  This file is licensed under
++ * the terms of the GNU General Public License version 2.  This program
++ * is licensed "as is" without any warranty of any kind, whether express
++ * or implied.
++ */
++
++#include <linux/init.h>
++#include <linux/of_platform.h>
++
++#include <asm/machdep.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/time.h>
++#include <asm/uic.h>
++#include <asm/pci-bridge.h>
++
++static struct device_node *bcsr_node;
++static void __iomem *bcsr_regs;
++
++/* BCSR registers  */
++#define BCSR_ID			0
++#define BCSR_PCI_CTRL	       	1
++#define BCSR_FLASH_NV_POR_CTRL	2
++#define BCSR_FENET_UART_CTRL	3
++#define BCSR_PCI_IRQ		4
++#define BCSR_XIRQ_SELECT	5
++#define BCSR_XIRQ_ROUTING	6
++#define BCSR_XIRQ_STATUS	7
++#define BCSR_XIRQ_STATUS2	8
++#define BCSR_SW_STAT_LED_CTRL	9
++#define BCSR_GPIO_IRQ_PAR_CTRL	10
++/* there's more, can't be bothered typing them tho */
++
++
++static __initdata struct of_device_id ep405_of_bus[] = {
++	{ .compatible = "ibm,plb3", },
++	{ .compatible = "ibm,opb", },
++	{ .compatible = "ibm,ebc", },
++	{},
++};
++
++static int __init ep405_device_probe(void)
++{
++	of_platform_bus_probe(NULL, ep405_of_bus, NULL);
++
++	return 0;
++}
++machine_device_initcall(ep405, ep405_device_probe);
++
++static void __init ep405_init_bcsr(void)
++{
++	const u8 *irq_routing;
++	int i;
++
++	/* Find the bloody thing & map it */
++	bcsr_node = of_find_compatible_node(NULL, NULL, "ep405-bcsr");
++	if (bcsr_node == NULL) {
++		printk(KERN_ERR "EP405 BCSR not found !\n");
++		return;
++	}
++	bcsr_regs = of_iomap(bcsr_node, 0);
++	if (bcsr_regs == NULL) {
++		printk(KERN_ERR "EP405 BCSR failed to map !\n");
++		return;
++	}
++
++	/* Get the irq-routing property and apply the routing to the CPLD */
++	irq_routing = of_get_property(bcsr_node, "irq-routing", NULL);
++	if (irq_routing == NULL)
++		return;
++	for (i = 0; i < 16; i++) {
++		u8 irq = irq_routing[i];
++		out_8(bcsr_regs + BCSR_XIRQ_SELECT, i);
++		out_8(bcsr_regs + BCSR_XIRQ_ROUTING, irq);
++	}
++	in_8(bcsr_regs + BCSR_XIRQ_SELECT);
++	mb();
++	out_8(bcsr_regs + BCSR_GPIO_IRQ_PAR_CTRL, 0xfe);
++}
++
++static void __init ep405_setup_arch(void)
++{
++	/* Find & init the BCSR CPLD */
++	ep405_init_bcsr();
++
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++}
++
++static int __init ep405_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	if (!of_flat_dt_is_compatible(root, "ep405"))
++		return 0;
++
++	return 1;
++}
++
++define_machine(ep405) {
++	.name			= "EP405",
++	.probe			= ep405_probe,
++	.setup_arch		= ep405_setup_arch,
++	.progress		= udbg_progress,
++	.init_IRQ		= uic_init_tree,
++	.get_irq		= uic_get_irq,
++	.calibrate_decr		= generic_calibrate_decr,
++};
+diff --git a/arch/powerpc/platforms/40x/kilauea.c b/arch/powerpc/platforms/40x/kilauea.c
+index 1bffdbd..f9206a7 100644
+--- a/arch/powerpc/platforms/40x/kilauea.c
++++ b/arch/powerpc/platforms/40x/kilauea.c
+@@ -19,8 +19,9 @@
+ #include <asm/udbg.h>
+ #include <asm/time.h>
+ #include <asm/uic.h>
++#include <asm/pci-bridge.h>
+ 
+-static struct of_device_id kilauea_of_bus[] = {
++static __initdata struct of_device_id kilauea_of_bus[] = {
+ 	{ .compatible = "ibm,plb4", },
+ 	{ .compatible = "ibm,opb", },
+ 	{ .compatible = "ibm,ebc", },
+@@ -29,14 +30,11 @@ static struct of_device_id kilauea_of_bus[] = {
+ 
+ static int __init kilauea_device_probe(void)
+ {
+-	if (!machine_is(kilauea))
+-		return 0;
+-
+ 	of_platform_bus_probe(NULL, kilauea_of_bus, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(kilauea_device_probe);
++machine_device_initcall(kilauea, kilauea_device_probe);
+ 
+ static int __init kilauea_probe(void)
+ {
+@@ -45,6 +43,8 @@ static int __init kilauea_probe(void)
+ 	if (!of_flat_dt_is_compatible(root, "amcc,kilauea"))
+ 		return 0;
+ 
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
+ 	return 1;
+ }
+ 
+diff --git a/arch/powerpc/platforms/40x/makalu.c b/arch/powerpc/platforms/40x/makalu.c
+new file mode 100644
+index 0000000..4e4df72
+--- /dev/null
++++ b/arch/powerpc/platforms/40x/makalu.c
+@@ -0,0 +1,58 @@
++/*
++ * Makalu board specific routines
++ *
++ * Copyright 2007 DENX Software Engineering, Stefan Roese <sr at denx.de>
++ *
++ * Based on the Walnut code by
++ * Josh Boyer <jwboyer at linux.vnet.ibm.com>
++ * Copyright 2007 IBM Corporation
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++#include <linux/init.h>
++#include <linux/of_platform.h>
++#include <asm/machdep.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/time.h>
++#include <asm/uic.h>
++#include <asm/pci-bridge.h>
++
++static __initdata struct of_device_id makalu_of_bus[] = {
++	{ .compatible = "ibm,plb4", },
++	{ .compatible = "ibm,opb", },
++	{ .compatible = "ibm,ebc", },
++	{},
++};
++
++static int __init makalu_device_probe(void)
++{
++	of_platform_bus_probe(NULL, makalu_of_bus, NULL);
++
++	return 0;
++}
++machine_device_initcall(makalu, makalu_device_probe);
++
++static int __init makalu_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	if (!of_flat_dt_is_compatible(root, "amcc,makalu"))
++		return 0;
++
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
++	return 1;
++}
++
++define_machine(makalu) {
++	.name 				= "Makalu",
++	.probe 				= makalu_probe,
++	.progress 			= udbg_progress,
++	.init_IRQ 			= uic_init_tree,
++	.get_irq 			= uic_get_irq,
++	.calibrate_decr			= generic_calibrate_decr,
++};
+diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
+index 14bbc32..88b6644 100644
+--- a/arch/powerpc/platforms/40x/virtex.c
++++ b/arch/powerpc/platforms/40x/virtex.c
+@@ -15,16 +15,23 @@
+ #include <asm/time.h>
+ #include <asm/xilinx_intc.h>
+ 
++static struct of_device_id xilinx_of_bus_ids[] __initdata = {
++	{ .compatible = "xlnx,plb-v46-1.00.a", },
++	{ .compatible = "xlnx,plb-v34-1.01.a", },
++	{ .compatible = "xlnx,plb-v34-1.02.a", },
++	{ .compatible = "xlnx,opb-v20-1.10.c", },
++	{ .compatible = "xlnx,dcr-v29-1.00.a", },
++	{ .compatible = "xlnx,compound", },
++	{}
++};
++
+ static int __init virtex_device_probe(void)
+ {
+-	if (!machine_is(virtex))
+-		return 0;
+-
+-	of_platform_bus_probe(NULL, NULL, NULL);
++	of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(virtex_device_probe);
++machine_device_initcall(virtex, virtex_device_probe);
+ 
+ static int __init virtex_probe(void)
+ {
+diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c
+index ff6db24..5d9edd9 100644
+--- a/arch/powerpc/platforms/40x/walnut.c
++++ b/arch/powerpc/platforms/40x/walnut.c
+@@ -24,8 +24,9 @@
+ #include <asm/udbg.h>
+ #include <asm/time.h>
+ #include <asm/uic.h>
++#include <asm/pci-bridge.h>
+ 
+-static struct of_device_id walnut_of_bus[] = {
++static __initdata struct of_device_id walnut_of_bus[] = {
+ 	{ .compatible = "ibm,plb3", },
+ 	{ .compatible = "ibm,opb", },
+ 	{ .compatible = "ibm,ebc", },
+@@ -34,15 +35,12 @@ static struct of_device_id walnut_of_bus[] = {
+ 
+ static int __init walnut_device_probe(void)
+ {
+-	if (!machine_is(walnut))
+-		return 0;
+-
+-	/* FIXME: do bus probe here */
+ 	of_platform_bus_probe(NULL, walnut_of_bus, NULL);
++	of_instantiate_rtc();
+ 
+ 	return 0;
+ }
+-device_initcall(walnut_device_probe);
++machine_device_initcall(walnut, walnut_device_probe);
+ 
+ static int __init walnut_probe(void)
+ {
+@@ -51,6 +49,8 @@ static int __init walnut_probe(void)
+ 	if (!of_flat_dt_is_compatible(root, "ibm,walnut"))
+ 		return 0;
+ 
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
+ 	return 1;
+ }
+ 
+diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
+index 8390cc1..c062c4c 100644
+--- a/arch/powerpc/platforms/44x/Kconfig
++++ b/arch/powerpc/platforms/44x/Kconfig
+@@ -3,6 +3,7 @@ config BAMBOO
+ 	depends on 44x
+ 	default n
+ 	select 440EP
++	select PCI
+ 	help
+ 	  This option enables support for the IBM PPC440EP evaluation board.
+ 
+@@ -11,6 +12,8 @@ config EBONY
+ 	depends on 44x
+ 	default y
+ 	select 440GP
++	select PCI
++	select OF_RTC
+ 	help
+ 	  This option enables support for the IBM PPC440GP evaluation board.
+ 
+@@ -22,6 +25,48 @@ config SEQUOIA
+ 	help
+ 	  This option enables support for the AMCC PPC440EPX evaluation board.
+ 
++config TAISHAN
++	bool "Taishan"
++	depends on 44x
++	default n
++	select 440GX
++	select PCI
++	help
++	  This option enables support for the AMCC PPC440GX "Taishan"
++	  evaluation board.
++
++config KATMAI
++	bool "Katmai"
++	depends on 44x
++	default n
++	select 440SPe
++	select PCI
++	select PPC4xx_PCI_EXPRESS
++	help
++	  This option enables support for the AMCC PPC440SPe evaluation board.
++
++config RAINIER
++	bool "Rainier"
++	depends on 44x
++	default n
++	select 440GRX
++	select PCI
++	help
++	  This option enables support for the AMCC PPC440GRX evaluation board.
++
++config WARP
++	bool "PIKA Warp"
++	depends on 44x
++	default n
++	select 440EP
++	help
++	  This option enables support for the PIKA Warp(tm) Appliance. The Warp
++          is a small computer replacement with up to 9 ports of FXO/FXS plus VOIP
++	  stations and trunks.
++
++	  See http://www.pikatechnologies.com/ and follow the "PIKA for Computer
++	  Telephony Developers" link for more information.
++
+ #config LUAN
+ #	bool "Luan"
+ #	depends on 44x
+@@ -44,6 +89,7 @@ config 440EP
+ 	select PPC_FPU
+ 	select IBM440EP_ERR42
+ 	select IBM_NEW_EMAC_ZMII
++	select USB_ARCH_HAS_OHCI
+ 
+ config 440EPX
+ 	bool
+@@ -52,20 +98,29 @@ config 440EPX
+ 	select IBM_NEW_EMAC_RGMII
+ 	select IBM_NEW_EMAC_ZMII
+ 
++config 440GRX
++	bool
++	select IBM_NEW_EMAC_EMAC4
++	select IBM_NEW_EMAC_RGMII
++	select IBM_NEW_EMAC_ZMII
++
+ config 440GP
+ 	bool
+ 	select IBM_NEW_EMAC_ZMII
+ 
+ config 440GX
+ 	bool
++        select IBM_NEW_EMAC_EMAC4
++	select IBM_NEW_EMAC_RGMII
++        select IBM_NEW_EMAC_ZMII #test only
++        select IBM_NEW_EMAC_TAH  #test only
+ 
+ config 440SP
+ 	bool
+ 
+-config 440A
++config 440SPe
++        select IBM_NEW_EMAC_EMAC4
+ 	bool
+-	depends on 440GX || 440EPX
+-	default y
+ 
+ # 44x errata/workaround config symbols, selected by the CPU models above
+ config IBM440EP_ERR42
+diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
+index 10ce674..0864d4f 100644
+--- a/arch/powerpc/platforms/44x/Makefile
++++ b/arch/powerpc/platforms/44x/Makefile
+@@ -1,4 +1,9 @@
+ obj-$(CONFIG_44x)	:= misc_44x.o
+ obj-$(CONFIG_EBONY)	+= ebony.o
+-obj-$(CONFIG_BAMBOO) += bamboo.o
++obj-$(CONFIG_TAISHAN)	+= taishan.o
++obj-$(CONFIG_BAMBOO)	+= bamboo.o
+ obj-$(CONFIG_SEQUOIA)	+= sequoia.o
++obj-$(CONFIG_KATMAI)	+= katmai.o
++obj-$(CONFIG_RAINIER)	+= rainier.o
++obj-$(CONFIG_WARP)	+= warp.o
++obj-$(CONFIG_WARP)	+= warp-nand.o
+diff --git a/arch/powerpc/platforms/44x/bamboo.c b/arch/powerpc/platforms/44x/bamboo.c
+index be23f11..fb9a22a 100644
+--- a/arch/powerpc/platforms/44x/bamboo.c
++++ b/arch/powerpc/platforms/44x/bamboo.c
+@@ -21,9 +21,11 @@
+ #include <asm/udbg.h>
+ #include <asm/time.h>
+ #include <asm/uic.h>
++#include <asm/pci-bridge.h>
++
+ #include "44x.h"
+ 
+-static struct of_device_id bamboo_of_bus[] = {
++static __initdata struct of_device_id bamboo_of_bus[] = {
+ 	{ .compatible = "ibm,plb4", },
+ 	{ .compatible = "ibm,opb", },
+ 	{ .compatible = "ibm,ebc", },
+@@ -32,14 +34,11 @@ static struct of_device_id bamboo_of_bus[] = {
+ 
+ static int __init bamboo_device_probe(void)
+ {
+-	if (!machine_is(bamboo))
+-		return 0;
+-
+ 	of_platform_bus_probe(NULL, bamboo_of_bus, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(bamboo_device_probe);
++machine_device_initcall(bamboo, bamboo_device_probe);
+ 
+ static int __init bamboo_probe(void)
+ {
+@@ -48,6 +47,8 @@ static int __init bamboo_probe(void)
+ 	if (!of_flat_dt_is_compatible(root, "amcc,bamboo"))
+ 		return 0;
+ 
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
+ 	return 1;
+ }
+ 
+diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c
+index 6cd3476..1a8d467 100644
+--- a/arch/powerpc/platforms/44x/ebony.c
++++ b/arch/powerpc/platforms/44x/ebony.c
+@@ -18,16 +18,18 @@
+ 
+ #include <linux/init.h>
+ #include <linux/of_platform.h>
++#include <linux/rtc.h>
+ 
+ #include <asm/machdep.h>
+ #include <asm/prom.h>
+ #include <asm/udbg.h>
+ #include <asm/time.h>
+ #include <asm/uic.h>
++#include <asm/pci-bridge.h>
+ 
+ #include "44x.h"
+ 
+-static struct of_device_id ebony_of_bus[] = {
++static __initdata struct of_device_id ebony_of_bus[] = {
+ 	{ .compatible = "ibm,plb4", },
+ 	{ .compatible = "ibm,opb", },
+ 	{ .compatible = "ibm,ebc", },
+@@ -36,14 +38,12 @@ static struct of_device_id ebony_of_bus[] = {
+ 
+ static int __init ebony_device_probe(void)
+ {
+-	if (!machine_is(ebony))
+-		return 0;
+-
+ 	of_platform_bus_probe(NULL, ebony_of_bus, NULL);
++	of_instantiate_rtc();
+ 
+ 	return 0;
+ }
+-device_initcall(ebony_device_probe);
++machine_device_initcall(ebony, ebony_device_probe);
+ 
+ /*
+  * Called very early, MMU is off, device-tree isn't unflattened
+@@ -55,6 +55,8 @@ static int __init ebony_probe(void)
+ 	if (!of_flat_dt_is_compatible(root, "ibm,ebony"))
+ 		return 0;
+ 
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
+ 	return 1;
+ }
+ 
+diff --git a/arch/powerpc/platforms/44x/katmai.c b/arch/powerpc/platforms/44x/katmai.c
+new file mode 100644
+index 0000000..1113412
+--- /dev/null
++++ b/arch/powerpc/platforms/44x/katmai.c
+@@ -0,0 +1,63 @@
++/*
++ * Katmai board specific routines
++ *
++ * Benjamin Herrenschmidt <benh at kernel.crashing.org>
++ * Copyright 2007 IBM Corp.
++ *
++ * Based on the Bamboo code by
++ * Josh Boyer <jwboyer at linux.vnet.ibm.com>
++ * Copyright 2007 IBM Corporation
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++#include <linux/init.h>
++#include <linux/of_platform.h>
++
++#include <asm/machdep.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/time.h>
++#include <asm/uic.h>
++#include <asm/pci-bridge.h>
++
++#include "44x.h"
++
++static __initdata struct of_device_id katmai_of_bus[] = {
++	{ .compatible = "ibm,plb4", },
++	{ .compatible = "ibm,opb", },
++	{ .compatible = "ibm,ebc", },
++	{},
++};
++
++static int __init katmai_device_probe(void)
++{
++	of_platform_bus_probe(NULL, katmai_of_bus, NULL);
++
++	return 0;
++}
++machine_device_initcall(katmai, katmai_device_probe);
++
++static int __init katmai_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	if (!of_flat_dt_is_compatible(root, "amcc,katmai"))
++		return 0;
++
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
++	return 1;
++}
++
++define_machine(katmai) {
++	.name 				= "Katmai",
++	.probe 				= katmai_probe,
++	.progress 			= udbg_progress,
++	.init_IRQ 			= uic_init_tree,
++	.get_irq 			= uic_get_irq,
++	.restart			= ppc44x_reset_system,
++	.calibrate_decr			= generic_calibrate_decr,
++};
+diff --git a/arch/powerpc/platforms/44x/rainier.c b/arch/powerpc/platforms/44x/rainier.c
+new file mode 100644
+index 0000000..a7fae1c
+--- /dev/null
++++ b/arch/powerpc/platforms/44x/rainier.c
+@@ -0,0 +1,62 @@
++/*
++ * Rainier board specific routines
++ *
++ * Valentine Barshak <vbarshak at ru.mvista.com>
++ * Copyright 2007 MontaVista Software Inc.
++ *
++ * Based on the Bamboo code by
++ * Josh Boyer <jwboyer at linux.vnet.ibm.com>
++ * Copyright 2007 IBM Corporation
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++#include <linux/init.h>
++#include <linux/of_platform.h>
++
++#include <asm/machdep.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/time.h>
++#include <asm/uic.h>
++#include <asm/pci-bridge.h>
++#include "44x.h"
++
++static __initdata struct of_device_id rainier_of_bus[] = {
++	{ .compatible = "ibm,plb4", },
++	{ .compatible = "ibm,opb", },
++	{ .compatible = "ibm,ebc", },
++	{},
++};
++
++static int __init rainier_device_probe(void)
++{
++	of_platform_bus_probe(NULL, rainier_of_bus, NULL);
++
++	return 0;
++}
++machine_device_initcall(rainier, rainier_device_probe);
++
++static int __init rainier_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	if (!of_flat_dt_is_compatible(root, "amcc,rainier"))
++		return 0;
++
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
++	return 1;
++}
++
++define_machine(rainier) {
++	.name 				= "Rainier",
++	.probe 				= rainier_probe,
++	.progress 			= udbg_progress,
++	.init_IRQ 			= uic_init_tree,
++	.get_irq 			= uic_get_irq,
++	.restart			= ppc44x_reset_system,
++	.calibrate_decr			= generic_calibrate_decr,
++};
+diff --git a/arch/powerpc/platforms/44x/sequoia.c b/arch/powerpc/platforms/44x/sequoia.c
+index 21a9dd1..d279db4 100644
+--- a/arch/powerpc/platforms/44x/sequoia.c
++++ b/arch/powerpc/platforms/44x/sequoia.c
+@@ -21,9 +21,11 @@
+ #include <asm/udbg.h>
+ #include <asm/time.h>
+ #include <asm/uic.h>
++#include <asm/pci-bridge.h>
++
+ #include "44x.h"
+ 
+-static struct of_device_id sequoia_of_bus[] = {
++static __initdata struct of_device_id sequoia_of_bus[] = {
+ 	{ .compatible = "ibm,plb4", },
+ 	{ .compatible = "ibm,opb", },
+ 	{ .compatible = "ibm,ebc", },
+@@ -32,14 +34,11 @@ static struct of_device_id sequoia_of_bus[] = {
+ 
+ static int __init sequoia_device_probe(void)
+ {
+-	if (!machine_is(sequoia))
+-		return 0;
+-
+ 	of_platform_bus_probe(NULL, sequoia_of_bus, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(sequoia_device_probe);
++machine_device_initcall(sequoia, sequoia_device_probe);
+ 
+ static int __init sequoia_probe(void)
+ {
+@@ -48,6 +47,8 @@ static int __init sequoia_probe(void)
+ 	if (!of_flat_dt_is_compatible(root, "amcc,sequoia"))
+ 		return 0;
+ 
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
+ 	return 1;
+ }
+ 
+diff --git a/arch/powerpc/platforms/44x/taishan.c b/arch/powerpc/platforms/44x/taishan.c
+new file mode 100644
+index 0000000..28ab7e2
+--- /dev/null
++++ b/arch/powerpc/platforms/44x/taishan.c
+@@ -0,0 +1,73 @@
++/*
++ * Taishan board specific routines based off ebony.c code
++ * original copyrights below
++ *
++ * Matt Porter <mporter at kernel.crashing.org>
++ * Copyright 2002-2005 MontaVista Software Inc.
++ *
++ * Eugene Surovegin <eugene.surovegin at zultys.com> or <ebs at ebshome.net>
++ * Copyright (c) 2003-2005 Zultys Technologies
++ *
++ * Rewritten and ported to the merged powerpc tree:
++ * Copyright 2007 David Gibson <dwg at au1.ibm.com>, IBM Corporation.
++ *
++ * Modified from ebony.c for taishan:
++ * Copyright 2007 Hugh Blemings <hugh at au.ibm.com>, IBM Corporation.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/init.h>
++#include <linux/of_platform.h>
++
++#include <asm/machdep.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/time.h>
++#include <asm/uic.h>
++#include <asm/pci-bridge.h>
++
++#include "44x.h"
++
++static __initdata struct of_device_id taishan_of_bus[] = {
++	{ .compatible = "ibm,plb4", },
++	{ .compatible = "ibm,opb", },
++	{ .compatible = "ibm,ebc", },
++	{},
++};
++
++static int __init taishan_device_probe(void)
++{
++	of_platform_bus_probe(NULL, taishan_of_bus, NULL);
++
++	return 0;
++}
++machine_device_initcall(taishan, taishan_device_probe);
++
++/*
++ * Called very early, MMU is off, device-tree isn't unflattened
++ */
++static int __init taishan_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	if (!of_flat_dt_is_compatible(root, "amcc,taishan"))
++		return 0;
++
++	ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
++
++	return 1;
++}
++
++define_machine(taishan) {
++	.name			= "Taishan",
++	.probe			= taishan_probe,
++	.progress		= udbg_progress,
++	.init_IRQ		= uic_init_tree,
++	.get_irq		= uic_get_irq,
++	.restart		= ppc44x_reset_system,
++	.calibrate_decr		= generic_calibrate_decr,
++};
+diff --git a/arch/powerpc/platforms/44x/warp-nand.c b/arch/powerpc/platforms/44x/warp-nand.c
+new file mode 100644
+index 0000000..84ab78f
+--- /dev/null
++++ b/arch/powerpc/platforms/44x/warp-nand.c
+@@ -0,0 +1,105 @@
++/*
++ * PIKA Warp(tm) NAND flash specific routines
++ *
++ * Copyright (c) 2008 PIKA Technologies
++ *   Sean MacLennan <smaclennan at pikatech.com>
++ */
++
++#include <linux/platform_device.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/map.h>
++#include <linux/mtd/partitions.h>
++#include <linux/mtd/nand.h>
++#include <linux/mtd/ndfc.h>
++
++#ifdef CONFIG_MTD_NAND_NDFC
++
++#define CS_NAND_0	1	/* use chip select 1 for NAND device 0 */
++
++#define WARP_NAND_FLASH_REG_ADDR	0xD0000000UL
++#define WARP_NAND_FLASH_REG_SIZE	0x2000
++
++static struct resource warp_ndfc = {
++	.start = WARP_NAND_FLASH_REG_ADDR,
++	.end   = WARP_NAND_FLASH_REG_ADDR + WARP_NAND_FLASH_REG_SIZE,
++	.flags = IORESOURCE_MEM,
++};
++
++static struct mtd_partition nand_parts[] = {
++	{
++		.name   = "kernel",
++		.offset = 0,
++		.size   = 0x0200000
++	},
++	{
++		.name   = "root",
++		.offset = 0x0200000,
++		.size   = 0x3400000
++	},
++	{
++		.name   = "user",
++		.offset = 0x3600000,
++		.size   = 0x0A00000
++	},
++};
++
++struct ndfc_controller_settings warp_ndfc_settings = {
++	.ccr_settings = (NDFC_CCR_BS(CS_NAND_0) | NDFC_CCR_ARAC1),
++	.ndfc_erpn = 0,
++};
++
++static struct ndfc_chip_settings warp_chip0_settings = {
++	.bank_settings = 0x80002222,
++};
++
++struct platform_nand_ctrl warp_nand_ctrl = {
++	.priv = &warp_ndfc_settings,
++};
++
++static struct platform_device warp_ndfc_device = {
++	.name = "ndfc-nand",
++	.id = 0,
++	.dev = {
++		.platform_data = &warp_nand_ctrl,
++	},
++	.num_resources = 1,
++	.resource = &warp_ndfc,
++};
++
++static struct nand_ecclayout nand_oob_16 = {
++	.eccbytes = 3,
++	.eccpos = { 0, 1, 2, 3, 6, 7 },
++	.oobfree = { {.offset = 8, .length = 16} }
++};
++
++static struct platform_nand_chip warp_nand_chip0 = {
++	.nr_chips = 1,
++	.chip_offset = CS_NAND_0,
++	.nr_partitions = ARRAY_SIZE(nand_parts),
++	.partitions = nand_parts,
++	.chip_delay = 50,
++	.ecclayout = &nand_oob_16,
++	.priv = &warp_chip0_settings,
++};
++
++static struct platform_device warp_nand_device = {
++	.name = "ndfc-chip",
++	.id = 0,
++	.num_resources = 1,
++	.resource = &warp_ndfc,
++	.dev = {
++		.platform_data = &warp_nand_chip0,
++		.parent = &warp_ndfc_device.dev,
++	}
++};
++
++static int warp_setup_nand_flash(void)
++{
++	platform_device_register(&warp_ndfc_device);
++	platform_device_register(&warp_nand_device);
++
++	return 0;
++}
++device_initcall(warp_setup_nand_flash);
++
++#endif
+diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
+new file mode 100644
+index 0000000..8f01563
+--- /dev/null
++++ b/arch/powerpc/platforms/44x/warp.c
+@@ -0,0 +1,153 @@
++/*
++ * PIKA Warp(tm) board specific routines
++ *
++ * Copyright (c) 2008 PIKA Technologies
++ *   Sean MacLennan <smaclennan at pikatech.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++#include <linux/init.h>
++#include <linux/of_platform.h>
++#include <linux/kthread.h>
++
++#include <asm/machdep.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/time.h>
++#include <asm/uic.h>
++
++#include "44x.h"
++
++
++static __initdata struct of_device_id warp_of_bus[] = {
++	{ .compatible = "ibm,plb4", },
++	{ .compatible = "ibm,opb", },
++	{ .compatible = "ibm,ebc", },
++	{},
++};
++
++static int __init warp_device_probe(void)
++{
++	of_platform_bus_probe(NULL, warp_of_bus, NULL);
++	return 0;
++}
++machine_device_initcall(warp, warp_device_probe);
++
++static int __init warp_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	return of_flat_dt_is_compatible(root, "pika,warp");
++}
++
++define_machine(warp) {
++	.name		= "Warp",
++	.probe 		= warp_probe,
++	.progress 	= udbg_progress,
++	.init_IRQ 	= uic_init_tree,
++	.get_irq 	= uic_get_irq,
++	.restart	= ppc44x_reset_system,
++	.calibrate_decr = generic_calibrate_decr,
++};
++
++
++#define LED_GREEN (0x80000000 >> 0)
++#define LED_RED   (0x80000000 >> 1)
++
++
++/* This is for the power LEDs 1 = on, 0 = off, -1 = leave alone */
++void warp_set_power_leds(int green, int red)
++{
++	static void __iomem *gpio_base = NULL;
++	unsigned leds;
++
++	if (gpio_base == NULL) {
++		struct device_node *np;
++
++		/* Power LEDS are on the second GPIO controller */
++		np = of_find_compatible_node(NULL, NULL, "ibm,gpio-440EP");
++		if (np)
++			np = of_find_compatible_node(np, NULL, "ibm,gpio-440EP");
++		if (np == NULL) {
++			printk(KERN_ERR __FILE__ ": Unable to find gpio\n");
++			return;
++		}
++
++		gpio_base = of_iomap(np, 0);
++		of_node_put(np);
++		if (gpio_base == NULL) {
++			printk(KERN_ERR __FILE__ ": Unable to map gpio");
++			return;
++		}
++	}
++
++	leds = in_be32(gpio_base);
++
++	switch (green) {
++	case 0: leds &= ~LED_GREEN; break;
++	case 1: leds |=  LED_GREEN; break;
++	}
++	switch (red) {
++	case 0: leds &= ~LED_RED; break;
++	case 1: leds |=  LED_RED; break;
++	}
++
++	out_be32(gpio_base, leds);
++}
++EXPORT_SYMBOL(warp_set_power_leds);
++
++
++#ifdef CONFIG_SENSORS_AD7414
++static int pika_dtm_thread(void __iomem *fpga)
++{
++	extern int ad7414_get_temp(int index);
++
++	while (!kthread_should_stop()) {
++		int temp = ad7414_get_temp(0);
++
++		out_be32(fpga, temp);
++
++		set_current_state(TASK_INTERRUPTIBLE);
++		schedule_timeout(HZ);
++	}
++
++	return 0;
++}
++
++static int __init pika_dtm_start(void)
++{
++	struct task_struct *dtm_thread;
++	struct device_node *np;
++	struct resource res;
++	void __iomem *fpga;
++
++	np = of_find_compatible_node(NULL, NULL, "pika,fpga");
++	if (np == NULL)
++		return -ENOENT;
++
++	/* We do not call of_iomap here since it would map in the entire
++	 * fpga space, which is over 8k.
++	 */
++	if (of_address_to_resource(np, 0, &res)) {
++		of_node_put(np);
++		return -ENOENT;
++	}
++	of_node_put(np);
++
++	fpga = ioremap(res.start + 0x20, 4);
++	if (fpga == NULL)
++		return -ENOENT;
++
++	dtm_thread = kthread_run(pika_dtm_thread, fpga + 0x20, "pika-dtm");
++	if (IS_ERR(dtm_thread)) {
++		iounmap(fpga);
++		return PTR_ERR(dtm_thread);
++	}
++
++	return 0;
++}
++device_initcall(pika_dtm_start);
++#endif
+diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
+index 2938d49..515f244 100644
+--- a/arch/powerpc/platforms/52xx/Kconfig
++++ b/arch/powerpc/platforms/52xx/Kconfig
+@@ -1,38 +1,48 @@
+ config PPC_MPC52xx
+-	bool
++	bool "52xx-based boards"
++	depends on PPC_MULTIPLATFORM && PPC32
+ 	select FSL_SOC
+ 	select PPC_CLOCK
+-	default n
+-
+-config PPC_MPC5200
+-	bool
+-	select PPC_MPC52xx
+-	default n
+ 
+-config PPC_MPC5200_BUGFIX
+-	bool "MPC5200 (L25R) bugfix support"
+-	depends on PPC_MPC5200
+-	default n
++config PPC_MPC5200_SIMPLE
++	bool "Generic support for simple MPC5200 based boards"
++	depends on PPC_MPC52xx
++	select DEFAULT_UIMAGE
++	select WANT_DEVICE_TREE
+ 	help
+-	  Enable workarounds for original MPC5200 errata.  This is not required
+-	  for MPC5200B based boards.
++	  This option enables support for a simple MPC52xx based boards which
++	  do not need a custom platform specific setup. Such boards are
++	  supported assuming the following:
+ 
+-	  It is safe to say 'Y' here
++	  - GPIO pins are configured by the firmware,
++	  - CDM configuration (clocking) is setup correctly by firmware,
++	  - if the 'fsl,has-wdt' property is present in one of the
++	    gpt nodes, then it is safe to use such gpt to reset the board,
++	  - PCI is supported if enabled in the kernel configuration
++	    and if there is a PCI bus node defined in the device tree.
++
++	  Boards that are compatible with this generic platform support
++	  are: 'tqc,tqm5200', 'promess,motionpro', 'schindler,cm5200'.
+ 
+ config PPC_EFIKA
+ 	bool "bPlan Efika 5k2. MPC5200B based computer"
+-	depends on PPC_MULTIPLATFORM && PPC32
++	depends on PPC_MPC52xx
+ 	select PPC_RTAS
+ 	select RTAS_PROC
+-	select PPC_MPC52xx
+ 	select PPC_NATIVE
+-	default n
+ 
+ config PPC_LITE5200
+ 	bool "Freescale Lite5200 Eval Board"
+-	depends on PPC_MULTIPLATFORM && PPC32
++	depends on PPC_MPC52xx
++	select DEFAULT_UIMAGE
+ 	select WANT_DEVICE_TREE
+-	select PPC_MPC5200
+-	default n
+ 
++config PPC_MPC5200_BUGFIX
++	bool "MPC5200 (L25R) bugfix support"
++	depends on PPC_MPC52xx
++	help
++	  Enable workarounds for original MPC5200 errata.  This is not required
++	  for MPC5200B based boards.
++
++	  It is safe to say 'Y' here
+ 
+diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
+index 307dbc1..fe1b81b 100644
+--- a/arch/powerpc/platforms/52xx/Makefile
++++ b/arch/powerpc/platforms/52xx/Makefile
+@@ -6,6 +6,7 @@ obj-y				+= mpc52xx_pic.o mpc52xx_common.o
+ obj-$(CONFIG_PCI)		+= mpc52xx_pci.o
+ endif
+ 
++obj-$(CONFIG_PPC_MPC5200_SIMPLE) += mpc5200_simple.o
+ obj-$(CONFIG_PPC_EFIKA)		+= efika.o
+ obj-$(CONFIG_PPC_LITE5200)	+= lite5200.o
+ 
+diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
+index a0da70c..a2068fa 100644
+--- a/arch/powerpc/platforms/52xx/efika.c
++++ b/arch/powerpc/platforms/52xx/efika.c
+@@ -180,6 +180,9 @@ static void __init efika_setup_arch(void)
+ {
+ 	rtas_initialize();
+ 
++	/* Map important registers from the internal memory map */
++	mpc52xx_map_common_devices();
++
+ 	efika_pcisetup();
+ 
+ #ifdef CONFIG_PM
+diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
+index 25d2bfa..956f459 100644
+--- a/arch/powerpc/platforms/52xx/lite5200.c
++++ b/arch/powerpc/platforms/52xx/lite5200.c
+@@ -32,6 +32,19 @@
+  *
+  */
+ 
++/* mpc5200 device tree match tables */
++static struct of_device_id mpc5200_cdm_ids[] __initdata = {
++	{ .compatible = "fsl,mpc5200-cdm", },
++	{ .compatible = "mpc5200-cdm", },
++	{}
++};
++
++static struct of_device_id mpc5200_gpio_ids[] __initdata = {
++	{ .compatible = "fsl,mpc5200-gpio", },
++	{ .compatible = "mpc5200-gpio", },
++	{}
++};
++
+ /*
+  * Fix clock configuration.
+  *
+@@ -42,10 +55,12 @@
+ static void __init
+ lite5200_fix_clock_config(void)
+ {
++	struct device_node *np;
+ 	struct mpc52xx_cdm  __iomem *cdm;
+-
+ 	/* Map zones */
+-	cdm = mpc52xx_find_and_map("mpc5200-cdm");
++	np = of_find_matching_node(NULL, mpc5200_cdm_ids);
++	cdm = of_iomap(np, 0);
++	of_node_put(np);
+ 	if (!cdm) {
+ 		printk(KERN_ERR "%s() failed; expect abnormal behaviour\n",
+ 		       __FUNCTION__);
+@@ -74,10 +89,13 @@ lite5200_fix_clock_config(void)
+ static void __init
+ lite5200_fix_port_config(void)
+ {
++	struct device_node *np;
+ 	struct mpc52xx_gpio __iomem *gpio;
+ 	u32 port_config;
+ 
+-	gpio = mpc52xx_find_and_map("mpc5200-gpio");
++	np = of_find_matching_node(NULL, mpc5200_gpio_ids);
++	gpio = of_iomap(np, 0);
++	of_node_put(np);
+ 	if (!gpio) {
+ 		printk(KERN_ERR "%s() failed. expect abnormal behavior\n",
+ 		       __FUNCTION__);
+@@ -131,22 +149,18 @@ static void lite5200_resume_finish(void __iomem *mbar)
+ 
+ static void __init lite5200_setup_arch(void)
+ {
+-#ifdef CONFIG_PCI
+-	struct device_node *np;
+-#endif
+-
+ 	if (ppc_md.progress)
+ 		ppc_md.progress("lite5200_setup_arch()", 0);
+ 
+-	/* Fix things that firmware should have done. */
+-	lite5200_fix_clock_config();
+-	lite5200_fix_port_config();
++	/* Map important registers from the internal memory map */
++	mpc52xx_map_common_devices();
+ 
+ 	/* Some mpc5200 & mpc5200b related configuration */
+ 	mpc5200_setup_xlb_arbiter();
+ 
+-	/* Map wdt for mpc52xx_restart() */
+-	mpc52xx_map_wdt();
++	/* Fix things that firmware should have done. */
++	lite5200_fix_clock_config();
++	lite5200_fix_port_config();
+ 
+ #ifdef CONFIG_PM
+ 	mpc52xx_suspend.board_suspend_prepare = lite5200_suspend_prepare;
+@@ -154,13 +168,7 @@ static void __init lite5200_setup_arch(void)
+ 	lite5200_pm_init();
+ #endif
+ 
+-#ifdef CONFIG_PCI
+-	np = of_find_node_by_type(NULL, "pci");
+-	if (np) {
+-		mpc52xx_add_bridge(np);
+-		of_node_put(np);
+-	}
+-#endif
++	mpc52xx_setup_pci();
+ }
+ 
+ /*
+diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
+index ffa14af..c0f13e8 100644
+--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
++++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
+@@ -42,6 +42,15 @@ static int lite5200_pm_set_target(suspend_state_t state)
+ 
+ static int lite5200_pm_prepare(void)
+ {
++	struct device_node *np;
++	const struct of_device_id immr_ids[] = {
++		{ .compatible = "fsl,mpc5200-immr", },
++		{ .compatible = "fsl,mpc5200b-immr", },
++		{ .type = "soc", .compatible = "mpc5200", }, /* lite5200 */
++		{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
++		{}
++	};
++
+ 	/* deep sleep? let mpc52xx code handle that */
+ 	if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
+ 		return mpc52xx_pm_prepare();
+@@ -50,7 +59,9 @@ static int lite5200_pm_prepare(void)
+ 		return -EINVAL;
+ 
+ 	/* map registers */
+-	mbar = mpc52xx_find_and_map("mpc5200");
++	np = of_find_matching_node(NULL, immr_ids);
++	mbar = of_iomap(np, 0);
++	of_node_put(np);
+ 	if (!mbar) {
+ 		printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__);
+ 		return -ENOSYS;
+diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
+new file mode 100644
+index 0000000..c48b82b
+--- /dev/null
++++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
+@@ -0,0 +1,85 @@
++/*
++ * Support for 'mpc5200-simple-platform' compatible boards.
++ *
++ * Written by Marian Balakowicz <m8 at semihalf.com>
++ * Copyright (C) 2007 Semihalf
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ * Description:
++ * This code implements support for a simple MPC52xx based boards which
++ * do not need a custom platform specific setup. Such boards are
++ * supported assuming the following:
++ *
++ * - GPIO pins are configured by the firmware,
++ * - CDM configuration (clocking) is setup correctly by firmware,
++ * - if the 'fsl,has-wdt' property is present in one of the
++ *   gpt nodes, then it is safe to use such gpt to reset the board,
++ * - PCI is supported if enabled in the kernel configuration
++ *   and if there is a PCI bus node defined in the device tree.
++ *
++ * Boards that are compatible with this generic platform support
++ * are listed in a 'board' table.
++ */
++
++#undef DEBUG
++#include <asm/time.h>
++#include <asm/prom.h>
++#include <asm/machdep.h>
++#include <asm/mpc52xx.h>
++
++/*
++ * Setup the architecture
++ */
++static void __init mpc5200_simple_setup_arch(void)
++{
++	if (ppc_md.progress)
++		ppc_md.progress("mpc5200_simple_setup_arch()", 0);
++
++	/* Map important registers from the internal memory map */
++	mpc52xx_map_common_devices();
++
++	/* Some mpc5200 & mpc5200b related configuration */
++	mpc5200_setup_xlb_arbiter();
++
++	mpc52xx_setup_pci();
++}
++
++/* list of the supported boards */
++static char *board[] __initdata = {
++	"promess,motionpro",
++	"schindler,cm5200",
++	"tqc,tqm5200",
++	NULL
++};
++
++/*
++ * Called very early, MMU is off, device-tree isn't unflattened
++ */
++static int __init mpc5200_simple_probe(void)
++{
++	unsigned long node = of_get_flat_dt_root();
++	int i = 0;
++
++	while (board[i]) {
++		if (of_flat_dt_is_compatible(node, board[i]))
++			break;
++		i++;
++	}
++	
++	return (board[i] != NULL);
++}
++
++define_machine(mpc5200_simple_platform) {
++	.name		= "mpc5200-simple-platform",
++	.probe		= mpc5200_simple_probe,
++	.setup_arch	= mpc5200_simple_setup_arch,
++	.init		= mpc52xx_declare_of_platform_devices,
++	.init_IRQ	= mpc52xx_init_irq,
++	.get_irq	= mpc52xx_get_irq,
++	.restart	= mpc52xx_restart,
++	.calibrate_decr	= generic_calibrate_decr,
++};
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
+index 9850685..9aa4425 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
+@@ -13,57 +13,38 @@
+ #undef DEBUG
+ 
+ #include <linux/kernel.h>
++#include <linux/spinlock.h>
+ #include <linux/of_platform.h>
+ #include <asm/io.h>
+ #include <asm/prom.h>
+ #include <asm/mpc52xx.h>
+ 
++/* MPC5200 device tree match tables */
++static struct of_device_id mpc52xx_xlb_ids[] __initdata = {
++	{ .compatible = "fsl,mpc5200-xlb", },
++	{ .compatible = "mpc5200-xlb", },
++	{}
++};
++static struct of_device_id mpc52xx_bus_ids[] __initdata = {
++	{ .compatible = "fsl,mpc5200-immr", },
++	{ .compatible = "fsl,mpc5200b-immr", },
++	{ .compatible = "fsl,lpb", },
++
++	/* depreciated matches; shouldn't be used in new device trees */
++	{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
++	{ .type = "soc", .compatible = "mpc5200", }, /* lite5200 */
++	{}
++};
++
+ /*
+  * This variable is mapped in mpc52xx_map_wdt() and used in mpc52xx_restart().
+  * Permanent mapping is required because mpc52xx_restart() can be called
+  * from interrupt context while node mapping (which calls ioremap())
+  * cannot be used at such point.
+  */
+-static volatile struct mpc52xx_gpt *mpc52xx_wdt = NULL;
+-
+-static void __iomem *
+-mpc52xx_map_node(struct device_node *ofn)
+-{
+-	const u32 *regaddr_p;
+-	u64 regaddr64, size64;
+-
+-	if (!ofn)
+-		return NULL;
+-
+-	regaddr_p = of_get_address(ofn, 0, &size64, NULL);
+-	if (!regaddr_p) {
+-		of_node_put(ofn);
+-		return NULL;
+-	}
+-
+-	regaddr64 = of_translate_address(ofn, regaddr_p);
+-
+-	of_node_put(ofn);
+-
+-	return ioremap((u32)regaddr64, (u32)size64);
+-}
+-
+-void __iomem *
+-mpc52xx_find_and_map(const char *compatible)
+-{
+-	return mpc52xx_map_node(
+-		of_find_compatible_node(NULL, NULL, compatible));
+-}
+-
+-EXPORT_SYMBOL(mpc52xx_find_and_map);
+-
+-void __iomem *
+-mpc52xx_find_and_map_path(const char *path)
+-{
+-	return mpc52xx_map_node(of_find_node_by_path(path));
+-}
+-
+-EXPORT_SYMBOL(mpc52xx_find_and_map_path);
++static spinlock_t mpc52xx_lock = SPIN_LOCK_UNLOCKED;
++static struct mpc52xx_gpt __iomem *mpc52xx_wdt;
++static struct mpc52xx_cdm __iomem *mpc52xx_cdm;
+ 
+ /**
+  * 	mpc52xx_find_ipb_freq - Find the IPB bus frequency for a device
+@@ -101,9 +82,12 @@ EXPORT_SYMBOL(mpc52xx_find_ipb_freq);
+ void __init
+ mpc5200_setup_xlb_arbiter(void)
+ {
++	struct device_node *np;
+ 	struct mpc52xx_xlb  __iomem *xlb;
+ 
+-	xlb = mpc52xx_find_and_map("mpc5200-xlb");
++	np = of_find_matching_node(NULL, mpc52xx_xlb_ids);
++	xlb = of_iomap(np, 0);
++	of_node_put(np);
+ 	if (!xlb) {
+ 		printk(KERN_ERR __FILE__ ": "
+ 			"Error mapping XLB in mpc52xx_setup_cpu().  "
+@@ -124,41 +108,101 @@ mpc5200_setup_xlb_arbiter(void)
+ 	iounmap(xlb);
+ }
+ 
++/**
++ * mpc52xx_declare_of_platform_devices: register internal devices and children
++ *					of the localplus bus to the of_platform
++ *					bus.
++ */
+ void __init
+ mpc52xx_declare_of_platform_devices(void)
+ {
+ 	/* Find every child of the SOC node and add it to of_platform */
+-	if (of_platform_bus_probe(NULL, NULL, NULL))
++	if (of_platform_bus_probe(NULL, mpc52xx_bus_ids, NULL))
+ 		printk(KERN_ERR __FILE__ ": "
+ 			"Error while probing of_platform bus\n");
+ }
+ 
++/*
++ * match tables used by mpc52xx_map_common_devices()
++ */
++static struct of_device_id mpc52xx_gpt_ids[] __initdata = {
++	{ .compatible = "fsl,mpc5200-gpt", },
++	{ .compatible = "mpc5200-gpt", }, /* old */
++	{}
++};
++static struct of_device_id mpc52xx_cdm_ids[] __initdata = {
++	{ .compatible = "fsl,mpc5200-cdm", },
++	{ .compatible = "mpc5200-cdm", }, /* old */
++	{}
++};
++
++/**
++ * mpc52xx_map_common_devices: iomap devices required by common code
++ */
+ void __init
+-mpc52xx_map_wdt(void)
++mpc52xx_map_common_devices(void)
+ {
+-	const void *has_wdt;
+ 	struct device_node *np;
+ 
+ 	/* mpc52xx_wdt is mapped here and used in mpc52xx_restart,
+ 	 * possibly from a interrupt context. wdt is only implement
+ 	 * on a gpt0, so check has-wdt property before mapping.
+ 	 */
+-	for_each_compatible_node(np, NULL, "fsl,mpc5200-gpt") {
+-		has_wdt = of_get_property(np, "fsl,has-wdt", NULL);
+-		if (has_wdt) {
+-			mpc52xx_wdt = mpc52xx_map_node(np);
+-			return;
++	for_each_matching_node(np, mpc52xx_gpt_ids) {
++		if (of_get_property(np, "fsl,has-wdt", NULL) ||
++		    of_get_property(np, "has-wdt", NULL)) {
++			mpc52xx_wdt = of_iomap(np, 0);
++			of_node_put(np);
++			break;
+ 		}
+ 	}
+-	for_each_compatible_node(np, NULL, "mpc5200-gpt") {
+-		has_wdt = of_get_property(np, "has-wdt", NULL);
+-		if (has_wdt) {
+-			mpc52xx_wdt = mpc52xx_map_node(np);
+-			return;
+-		}
++
++	/* Clock Distribution Module, used by PSC clock setting function */
++	np = of_find_matching_node(NULL, mpc52xx_cdm_ids);
++	mpc52xx_cdm = of_iomap(np, 0);
++	of_node_put(np);
++}
++
++/**
++ * mpc52xx_set_psc_clkdiv: Set clock divider in the CDM for PSC ports
++ *
++ * @psc_id: id of psc port; must be 1,2,3 or 6
++ * @clkdiv: clock divider value to put into CDM PSC register.
++ */
++int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv)
++{
++	unsigned long flags;
++	u16 __iomem *reg;
++	u32 val;
++	u32 mask;
++	u32 mclken_div;
++
++	if (!mpc52xx_cdm)
++		return -ENODEV;
++
++	mclken_div = 0x8000 | (clkdiv & 0x1FF);
++	switch (psc_id) {
++	case 1: reg = &mpc52xx_cdm->mclken_div_psc1; mask = 0x20; break;
++	case 2: reg = &mpc52xx_cdm->mclken_div_psc2; mask = 0x40; break;
++	case 3: reg = &mpc52xx_cdm->mclken_div_psc3; mask = 0x80; break;
++	case 6: reg = &mpc52xx_cdm->mclken_div_psc6; mask = 0x10; break;
++	default:
++		return -ENODEV;
+ 	}
++
++	/* Set the rate and enable the clock */
++	spin_lock_irqsave(&mpc52xx_lock, flags);
++	out_be16(reg, mclken_div);
++	val = in_be32(&mpc52xx_cdm->clk_enables);
++	out_be32(&mpc52xx_cdm->clk_enables, val | mask);
++	spin_unlock_irqrestore(&mpc52xx_lock, flags);
++
++	return 0;
+ }
+ 
++/**
++ * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
++ */
+ void
+ mpc52xx_restart(char *cmd)
+ {
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+index 4c6c82a..e3428dd 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+@@ -99,6 +99,12 @@ struct mpc52xx_pci {
+ 	u8	reserved6[4];	/* PCI + 0xFC */
+ };
+ 
++/* MPC5200 device tree match tables */
++const struct of_device_id mpc52xx_pci_ids[] __initdata = {
++	{ .type = "pci", .compatible = "fsl,mpc5200-pci", },
++	{ .type = "pci", .compatible = "mpc5200-pci", },
++	{}
++};
+ 
+ /* ======================================================================== */
+ /* PCI configuration acess                                                  */
+@@ -363,7 +369,7 @@ mpc52xx_add_bridge(struct device_node *node)
+ 
+ 	pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name);
+ 
+-	pci_assign_all_buses = 1;
++	ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ 
+ 	if (of_address_to_resource(node, 0, &rsrc) != 0) {
+ 		printk(KERN_ERR "Can't get %s resources\n", node->full_name);
+@@ -406,3 +412,15 @@ mpc52xx_add_bridge(struct device_node *node)
+ 
+ 	return 0;
+ }
++
++void __init mpc52xx_setup_pci(void)
++{
++	struct device_node *pci;
++
++	pci = of_find_matching_node(NULL, mpc52xx_pci_ids);
++	if (!pci)
++		return;
++
++	mpc52xx_add_bridge(pci);
++	of_node_put(pci);
++}
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+index 61100f2..d0dead8 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+@@ -29,6 +29,18 @@
+  *
+ */
+ 
++/* MPC5200 device tree match tables */
++static struct of_device_id mpc52xx_pic_ids[] __initdata = {
++	{ .compatible = "fsl,mpc5200-pic", },
++	{ .compatible = "mpc5200-pic", },
++	{}
++};
++static struct of_device_id mpc52xx_sdma_ids[] __initdata = {
++	{ .compatible = "fsl,mpc5200-bestcomm", },
++	{ .compatible = "mpc5200-bestcomm", },
++	{}
++};
++
+ static struct mpc52xx_intr __iomem *intr;
+ static struct mpc52xx_sdma __iomem *sdma;
+ static struct irq_host *mpc52xx_irqhost = NULL;
+@@ -364,16 +376,18 @@ void __init mpc52xx_init_irq(void)
+ {
+ 	u32 intr_ctrl;
+ 	struct device_node *picnode;
++	struct device_node *np;
+ 
+ 	/* Remap the necessary zones */
+-	picnode = of_find_compatible_node(NULL, NULL, "mpc5200-pic");
+-
+-	intr = mpc52xx_find_and_map("mpc5200-pic");
++	picnode = of_find_matching_node(NULL, mpc52xx_pic_ids);
++	intr = of_iomap(picnode, 0);
+ 	if (!intr)
+ 		panic(__FILE__	": find_and_map failed on 'mpc5200-pic'. "
+ 				"Check node !");
+ 
+-	sdma = mpc52xx_find_and_map("mpc5200-bestcomm");
++	np = of_find_matching_node(NULL, mpc52xx_sdma_ids);
++	sdma = of_iomap(np, 0);
++	of_node_put(np);
+ 	if (!sdma)
+ 		panic(__FILE__	": find_and_map failed on 'mpc5200-bestcomm'. "
+ 				"Check node !");
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+index 7ffa7ba..c72d330 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+@@ -59,10 +59,21 @@ int mpc52xx_set_wakeup_gpio(u8 pin, u8 level)
+ 
+ int mpc52xx_pm_prepare(void)
+ {
++	struct device_node *np;
++	const struct of_device_id immr_ids[] = {
++		{ .compatible = "fsl,mpc5200-immr", },
++		{ .compatible = "fsl,mpc5200b-immr", },
++		{ .type = "soc", .compatible = "mpc5200", }, /* lite5200 */
++		{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
++		{}
++	};
++
+ 	/* map the whole register space */
+-	mbar = mpc52xx_find_and_map("mpc5200");
++	np = of_find_matching_node(NULL, immr_ids);
++	mbar = of_iomap(np, 0);
++	of_node_put(np);
+ 	if (!mbar) {
+-		printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__);
++		pr_err("mpc52xx_pm_prepare(): could not map registers\n");
+ 		return -ENOSYS;
+ 	}
+ 	/* these offsets are from mpc5200 users manual */
+diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
+index 541fbb8..4fad6c7 100644
+--- a/arch/powerpc/platforms/82xx/Kconfig
++++ b/arch/powerpc/platforms/82xx/Kconfig
+@@ -26,6 +26,19 @@ config PQ2FADS
+ 	help
+ 	  This option enables support for the PQ2FADS board
+ 
++config EP8248E
++	bool "Embedded Planet EP8248E (a.k.a. CWH-PPC-8248N-VE)"
++	select 8272
++	select 8260
++	select FSL_SOC
++	select PPC_CPM_NEW_BINDING
++	select MDIO_BITBANG
++	help
++	  This enables support for the Embedded Planet EP8248E board.
++
++	  This board is also resold by Freescale as the QUICCStart
++	  MPC8248 Evaluation System and/or the CWH-PPC-8248N-VE.
++
+ endchoice
+ 
+ config PQ2ADS
+diff --git a/arch/powerpc/platforms/82xx/Makefile b/arch/powerpc/platforms/82xx/Makefile
+index 68c8b0c..6cd5cd5 100644
+--- a/arch/powerpc/platforms/82xx/Makefile
++++ b/arch/powerpc/platforms/82xx/Makefile
+@@ -5,3 +5,4 @@ obj-$(CONFIG_MPC8272_ADS) += mpc8272_ads.o
+ obj-$(CONFIG_CPM2) += pq2.o
+ obj-$(CONFIG_PQ2_ADS_PCI_PIC) += pq2ads-pci-pic.o
+ obj-$(CONFIG_PQ2FADS) += pq2fads.o
++obj-$(CONFIG_EP8248E) += ep8248e.o
+diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
+new file mode 100644
+index 0000000..ba93d8a
+--- /dev/null
++++ b/arch/powerpc/platforms/82xx/ep8248e.c
+@@ -0,0 +1,324 @@
++/*
++ * Embedded Planet EP8248E support
++ *
++ * Copyright 2007 Freescale Semiconductor, Inc.
++ * Author: Scott Wood <scottwood at freescale.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/fsl_devices.h>
++#include <linux/mdio-bitbang.h>
++#include <linux/of_platform.h>
++
++#include <asm/io.h>
++#include <asm/cpm2.h>
++#include <asm/udbg.h>
++#include <asm/machdep.h>
++#include <asm/time.h>
++#include <asm/mpc8260.h>
++#include <asm/prom.h>
++
++#include <sysdev/fsl_soc.h>
++#include <sysdev/cpm2_pic.h>
++
++#include "pq2.h"
++
++static u8 __iomem *ep8248e_bcsr;
++static struct device_node *ep8248e_bcsr_node;
++
++#define BCSR7_SCC2_ENABLE 0x10
++
++#define BCSR8_PHY1_ENABLE 0x80
++#define BCSR8_PHY1_POWER  0x40
++#define BCSR8_PHY2_ENABLE 0x20
++#define BCSR8_PHY2_POWER  0x10
++#define BCSR8_MDIO_READ   0x04
++#define BCSR8_MDIO_CLOCK  0x02
++#define BCSR8_MDIO_DATA   0x01
++
++#define BCSR9_USB_ENABLE  0x80
++#define BCSR9_USB_POWER   0x40
++#define BCSR9_USB_HOST    0x20
++#define BCSR9_USB_FULL_SPEED_TARGET 0x10
++
++static void __init ep8248e_pic_init(void)
++{
++	struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic");
++	if (!np) {
++		printk(KERN_ERR "PIC init: can not find cpm-pic node\n");
++		return;
++	}
++
++	cpm2_pic_init(np);
++	of_node_put(np);
++}
++
++static void ep8248e_set_mdc(struct mdiobb_ctrl *ctrl, int level)
++{
++	if (level)
++		setbits8(&ep8248e_bcsr[8], BCSR8_MDIO_CLOCK);
++	else
++		clrbits8(&ep8248e_bcsr[8], BCSR8_MDIO_CLOCK);
++
++	/* Read back to flush the write. */
++	in_8(&ep8248e_bcsr[8]);
++}
++
++static void ep8248e_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
++{
++	if (output)
++		clrbits8(&ep8248e_bcsr[8], BCSR8_MDIO_READ);
++	else
++		setbits8(&ep8248e_bcsr[8], BCSR8_MDIO_READ);
++
++	/* Read back to flush the write. */
++	in_8(&ep8248e_bcsr[8]);
++}
++
++static void ep8248e_set_mdio_data(struct mdiobb_ctrl *ctrl, int data)
++{
++	if (data)
++		setbits8(&ep8248e_bcsr[8], BCSR8_MDIO_DATA);
++	else
++		clrbits8(&ep8248e_bcsr[8], BCSR8_MDIO_DATA);
++
++	/* Read back to flush the write. */
++	in_8(&ep8248e_bcsr[8]);
++}
++
++static int ep8248e_get_mdio_data(struct mdiobb_ctrl *ctrl)
++{
++	return in_8(&ep8248e_bcsr[8]) & BCSR8_MDIO_DATA;
++}
++
++static const struct mdiobb_ops ep8248e_mdio_ops = {
++	.set_mdc = ep8248e_set_mdc,
++	.set_mdio_dir = ep8248e_set_mdio_dir,
++	.set_mdio_data = ep8248e_set_mdio_data,
++	.get_mdio_data = ep8248e_get_mdio_data,
++	.owner = THIS_MODULE,
++};
++
++static struct mdiobb_ctrl ep8248e_mdio_ctrl = {
++	.ops = &ep8248e_mdio_ops,
++};
++
++static int __devinit ep8248e_mdio_probe(struct of_device *ofdev,
++                                        const struct of_device_id *match)
++{
++	struct mii_bus *bus;
++	struct resource res;
++	struct device_node *node;
++	int ret, i;
++
++	node = of_get_parent(ofdev->node);
++	of_node_put(node);
++	if (node != ep8248e_bcsr_node)
++		return -ENODEV;
++
++	ret = of_address_to_resource(ofdev->node, 0, &res);
++	if (ret)
++		return ret;
++
++	bus = alloc_mdio_bitbang(&ep8248e_mdio_ctrl);
++	if (!bus)
++		return -ENOMEM;
++
++	bus->phy_mask = 0;
++	bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
++
++	for (i = 0; i < PHY_MAX_ADDR; i++)
++		bus->irq[i] = -1;
++
++	bus->name = "ep8248e-mdio-bitbang";
++	bus->dev = &ofdev->dev;
++	bus->id = res.start;
++
++	return mdiobus_register(bus);
++}
++
++static int ep8248e_mdio_remove(struct of_device *ofdev)
++{
++	BUG();
++	return 0;
++}
++
++static const struct of_device_id ep8248e_mdio_match[] = {
++	{
++		.compatible = "fsl,ep8248e-mdio-bitbang",
++	},
++	{},
++};
++
++static struct of_platform_driver ep8248e_mdio_driver = {
++	.driver = {
++		.name = "ep8248e-mdio-bitbang",
++	},
++	.match_table = ep8248e_mdio_match,
++	.probe = ep8248e_mdio_probe,
++	.remove = ep8248e_mdio_remove,
++};
++
++struct cpm_pin {
++	int port, pin, flags;
++};
++
++static __initdata struct cpm_pin ep8248e_pins[] = {
++	/* SMC1 */
++	{2, 4, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{2, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++
++	/* SCC1 */
++	{2, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{2, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++
++	/* FCC1 */
++	{0, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{0, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{0, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{0, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{0, 18, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{0, 19, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{0, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{0, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{0, 26, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
++	{0, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
++	{0, 28, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{0, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{0, 30, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
++	{0, 31, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
++	{2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{2, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++
++	/* FCC2 */
++	{1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++
++	/* I2C */
++	{4, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
++	{4, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
++
++	/* USB */
++	{2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++};
++
++static void __init init_ioports(void)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(ep8248e_pins); i++) {
++		const struct cpm_pin *pin = &ep8248e_pins[i];
++		cpm2_set_pin(pin->port, pin->pin, pin->flags);
++	}
++
++	cpm2_smc_clk_setup(CPM_CLK_SMC1, CPM_BRG7);
++	cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX);
++	cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX);
++	cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK8, CPM_CLK_TX); /* USB */
++	cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK11, CPM_CLK_RX);
++	cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_TX);
++	cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX);
++	cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX);
++}
++
++static void __init ep8248e_setup_arch(void)
++{
++	if (ppc_md.progress)
++		ppc_md.progress("ep8248e_setup_arch()", 0);
++
++	cpm2_reset();
++
++	/* When this is set, snooping CPM DMA from RAM causes
++	 * machine checks.  See erratum SIU18.
++	 */
++	clrbits32(&cpm2_immr->im_siu_conf.siu_82xx.sc_bcr, MPC82XX_BCR_PLDP);
++
++	ep8248e_bcsr_node =
++		of_find_compatible_node(NULL, NULL, "fsl,ep8248e-bcsr");
++	if (!ep8248e_bcsr_node) {
++		printk(KERN_ERR "No bcsr in device tree\n");
++		return;
++	}
++
++	ep8248e_bcsr = of_iomap(ep8248e_bcsr_node, 0);
++	if (!ep8248e_bcsr) {
++		printk(KERN_ERR "Cannot map BCSR registers\n");
++		of_node_put(ep8248e_bcsr_node);
++		ep8248e_bcsr_node = NULL;
++		return;
++	}
++
++	setbits8(&ep8248e_bcsr[7], BCSR7_SCC2_ENABLE);
++	setbits8(&ep8248e_bcsr[8], BCSR8_PHY1_ENABLE | BCSR8_PHY1_POWER |
++	                           BCSR8_PHY2_ENABLE | BCSR8_PHY2_POWER);
++
++	init_ioports();
++
++	if (ppc_md.progress)
++		ppc_md.progress("ep8248e_setup_arch(), finish", 0);
++}
++
++static  __initdata struct of_device_id of_bus_ids[] = {
++	{ .compatible = "simple-bus", },
++	{ .compatible = "fsl,ep8248e-bcsr", },
++	{},
++};
++
++static int __init declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++	of_register_platform_driver(&ep8248e_mdio_driver);
++
++	return 0;
++}
++machine_device_initcall(ep8248e, declare_of_platform_devices);
++
++/*
++ * Called very early, device-tree isn't unflattened
++ */
++static int __init ep8248e_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++	return of_flat_dt_is_compatible(root, "fsl,ep8248e");
++}
++
++define_machine(ep8248e)
++{
++	.name = "Embedded Planet EP8248E",
++	.probe = ep8248e_probe,
++	.setup_arch = ep8248e_setup_arch,
++	.init_IRQ = ep8248e_pic_init,
++	.get_irq = cpm2_get_irq,
++	.calibrate_decr = generic_calibrate_decr,
++	.restart = pq2_restart,
++	.progress = udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
+index fd83440..3fce6b3 100644
+--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
++++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
+@@ -165,14 +165,11 @@ static struct of_device_id __initdata of_bus_ids[] = {
+ 
+ static int __init declare_of_platform_devices(void)
+ {
+-	if (!machine_is(mpc8272_ads))
+-		return 0;
+-
+ 	/* Publish the QE devices */
+ 	of_platform_bus_probe(NULL, of_bus_ids, NULL);
+ 	return 0;
+ }
+-device_initcall(declare_of_platform_devices);
++machine_device_initcall(mpc8272_ads, declare_of_platform_devices);
+ 
+ /*
+  * Called very early, device-tree isn't unflattened
+diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
+index a497cba..1b75902 100644
+--- a/arch/powerpc/platforms/82xx/pq2.c
++++ b/arch/powerpc/platforms/82xx/pq2.c
+@@ -53,13 +53,13 @@ static void __init pq2_pci_add_bridge(struct device_node *np)
+ 	if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b)
+ 		goto err;
+ 
+-	pci_assign_all_buses = 1;
++	ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ 
+ 	hose = pcibios_alloc_controller(np);
+ 	if (!hose)
+ 		return;
+ 
+-	hose->arch_data = np;
++	hose->dn = np;
+ 
+ 	setup_indirect_pci(hose, r.start + 0x100, r.start + 0x104, 0);
+ 	pci_process_bridge_OF_ranges(hose, np, 1);
+diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
+index 4f457a9..68196e3 100644
+--- a/arch/powerpc/platforms/82xx/pq2fads.c
++++ b/arch/powerpc/platforms/82xx/pq2fads.c
+@@ -15,12 +15,12 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/fsl_devices.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/io.h>
+ #include <asm/cpm2.h>
+ #include <asm/udbg.h>
+ #include <asm/machdep.h>
+-#include <asm/of_platform.h>
+ #include <asm/time.h>
+ 
+ #include <sysdev/fsl_soc.h>
+@@ -176,14 +176,11 @@ static struct of_device_id __initdata of_bus_ids[] = {
+ 
+ static int __init declare_of_platform_devices(void)
+ {
+-	if (!machine_is(pq2fads))
+-		return 0;
+-
+ 	/* Publish the QE devices */
+ 	of_platform_bus_probe(NULL, of_bus_ids, NULL);
+ 	return 0;
+ }
+-device_initcall(declare_of_platform_devices);
++machine_device_initcall(pq2fads, declare_of_platform_devices);
+ 
+ define_machine(pq2fads)
+ {
+diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
+index ec305f1..13587e2 100644
+--- a/arch/powerpc/platforms/83xx/Kconfig
++++ b/arch/powerpc/platforms/83xx/Kconfig
+@@ -1,18 +1,23 @@
+-choice
+-	prompt "83xx Board Type"
++menuconfig MPC83xx
++	bool "83xx Board Type"
+ 	depends on PPC_83xx
+-	default MPC834x_MDS
++	select PPC_UDBG_16550
++	select PPC_INDIRECT_PCI
++
++if MPC83xx
+ 
+-config MPC8313_RDB
+-	bool "Freescale MPC8313 RDB"
++config MPC831x_RDB
++	bool "Freescale MPC831x RDB"
+ 	select DEFAULT_UIMAGE
++	select PPC_MPC831x
+ 	help
+-	  This option enables support for the MPC8313 RDB board.
++	  This option enables support for the MPC8313 RDB and MPC8315 RDB boards.
+ 
+ config MPC832x_MDS
+ 	bool "Freescale MPC832x MDS"
+ 	select DEFAULT_UIMAGE
+ 	select QUICC_ENGINE
++	select PPC_MPC832x
+ 	help
+ 	  This option enables support for the MPC832x MDS evaluation board.
+ 
+@@ -20,12 +25,14 @@ config MPC832x_RDB
+ 	bool "Freescale MPC832x RDB"
+ 	select DEFAULT_UIMAGE
+ 	select QUICC_ENGINE
++	select PPC_MPC832x
+ 	help
+ 	  This option enables support for the MPC8323 RDB board.
+ 
+ config MPC834x_MDS
+ 	bool "Freescale MPC834x MDS"
+ 	select DEFAULT_UIMAGE
++	select PPC_MPC834x
+ 	help
+ 	  This option enables support for the MPC 834x MDS evaluation board.
+ 
+@@ -37,6 +44,7 @@ config MPC834x_MDS
+ config MPC834x_ITX
+ 	bool "Freescale MPC834x ITX"
+ 	select DEFAULT_UIMAGE
++	select PPC_MPC834x
+ 	help
+ 	  This option enables support for the MPC 834x ITX evaluation board.
+ 
+@@ -50,28 +58,41 @@ config MPC836x_MDS
+ 	help
+ 	  This option enables support for the MPC836x MDS Processor Board.
+ 
+-endchoice
++config MPC837x_MDS
++	bool "Freescale MPC837x MDS"
++	select DEFAULT_UIMAGE
++	select PPC_MPC837x
++	help
++	  This option enables support for the MPC837x MDS Processor Board.
++
++config MPC837x_RDB
++	bool "Freescale MPC837x RDB"
++	select DEFAULT_UIMAGE
++	select PPC_MPC837x
++	help
++	  This option enables support for the MPC837x RDB Board.
++
++config SBC834x
++	bool "Wind River SBC834x"
++	select DEFAULT_UIMAGE
++	select PPC_MPC834x
++	help
++	  This option enables support for the Wind River SBC834x board.
++
++endif
+ 
++# used for usb
+ config PPC_MPC831x
+ 	bool
+-	select PPC_UDBG_16550
+-	select PPC_INDIRECT_PCI
+-	default y if MPC8313_RDB
+ 
++# used for math-emu
+ config PPC_MPC832x
+ 	bool
+-	select PPC_UDBG_16550
+-	select PPC_INDIRECT_PCI
+-	default y if MPC832x_MDS || MPC832x_RDB
+ 
+-config MPC834x
++# used for usb
++config PPC_MPC834x
+ 	bool
+-	select PPC_UDBG_16550
+-	select PPC_INDIRECT_PCI
+-	default y if MPC834x_MDS || MPC834x_ITX
+ 
+-config PPC_MPC836x
++# used for usb
++config PPC_MPC837x
+ 	bool
+-	select PPC_UDBG_16550
+-	select PPC_INDIRECT_PCI
+-	default y if MPC836x_MDS
+diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile
+index 5a98f88..7e6dd3e 100644
+--- a/arch/powerpc/platforms/83xx/Makefile
++++ b/arch/powerpc/platforms/83xx/Makefile
+@@ -3,9 +3,12 @@
+ #
+ obj-y				:= misc.o usb.o
+ obj-$(CONFIG_PCI)		+= pci.o
+-obj-$(CONFIG_MPC8313_RDB)	+= mpc8313_rdb.o
++obj-$(CONFIG_MPC831x_RDB)	+= mpc831x_rdb.o
+ obj-$(CONFIG_MPC832x_RDB)	+= mpc832x_rdb.o
+ obj-$(CONFIG_MPC834x_MDS)	+= mpc834x_mds.o
+ obj-$(CONFIG_MPC834x_ITX)	+= mpc834x_itx.o
+ obj-$(CONFIG_MPC836x_MDS)	+= mpc836x_mds.o
+ obj-$(CONFIG_MPC832x_MDS)	+= mpc832x_mds.o
++obj-$(CONFIG_MPC837x_MDS)	+= mpc837x_mds.o
++obj-$(CONFIG_SBC834x)		+= sbc834x.o
++obj-$(CONFIG_MPC837x_RDB)	+= mpc837x_rdb.o
+diff --git a/arch/powerpc/platforms/83xx/mpc8313_rdb.c b/arch/powerpc/platforms/83xx/mpc8313_rdb.c
+deleted file mode 100644
+index 33766b8..0000000
+--- a/arch/powerpc/platforms/83xx/mpc8313_rdb.c
++++ /dev/null
+@@ -1,88 +0,0 @@
+-/*
+- * arch/powerpc/platforms/83xx/mpc8313_rdb.c
+- *
+- * Description: MPC8313x RDB board specific routines.
+- * This file is based on mpc834x_sys.c
+- * Author: Lo Wlison <r43300 at freescale.com>
+- *
+- * Copyright (C) Freescale Semiconductor, Inc. 2006. All rights reserved.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/pci.h>
+-
+-#include <asm/time.h>
+-#include <asm/ipic.h>
+-#include <asm/udbg.h>
+-
+-#include "mpc83xx.h"
+-
+-#undef DEBUG
+-#ifdef DEBUG
+-#define DBG(fmt...) udbg_printf(fmt)
+-#else
+-#define DBG(fmt...)
+-#endif
+-
+-/* ************************************************************************
+- *
+- * Setup the architecture
+- *
+- */
+-static void __init mpc8313_rdb_setup_arch(void)
+-{
+-#ifdef CONFIG_PCI
+-	struct device_node *np;
+-#endif
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("mpc8313_rdb_setup_arch()", 0);
+-
+-#ifdef CONFIG_PCI
+-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
+-		mpc83xx_add_bridge(np);
+-#endif
+-	mpc831x_usb_cfg();
+-}
+-
+-void __init mpc8313_rdb_init_IRQ(void)
+-{
+-	struct device_node *np;
+-
+-	np = of_find_node_by_type(NULL, "ipic");
+-	if (!np)
+-		return;
+-
+-	ipic_init(np, 0);
+-
+-	/* Initialize the default interrupt mapping priorities,
+-	 * in case the boot rom changed something on us.
+-	 */
+-	ipic_set_default_priority();
+-}
+-
+-/*
+- * Called very early, MMU is off, device-tree isn't unflattened
+- */
+-static int __init mpc8313_rdb_probe(void)
+-{
+-        unsigned long root = of_get_flat_dt_root();
+-
+-        return of_flat_dt_is_compatible(root, "MPC8313ERDB");
+-}
+-
+-define_machine(mpc8313_rdb) {
+-	.name			= "MPC8313 RDB",
+-	.probe			= mpc8313_rdb_probe,
+-	.setup_arch		= mpc8313_rdb_setup_arch,
+-	.init_IRQ		= mpc8313_rdb_init_IRQ,
+-	.get_irq		= ipic_get_irq,
+-	.restart		= mpc83xx_restart,
+-	.time_init		= mpc83xx_time_init,
+-	.calibrate_decr		= generic_calibrate_decr,
+-	.progress		= udbg_progress,
+-};
+diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+new file mode 100644
+index 0000000..c4db517
+--- /dev/null
++++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+@@ -0,0 +1,93 @@
++/*
++ * arch/powerpc/platforms/83xx/mpc831x_rdb.c
++ *
++ * Description: MPC831x RDB board specific routines.
++ * This file is based on mpc834x_sys.c
++ * Author: Lo Wlison <r43300 at freescale.com>
++ *
++ * Copyright (C) Freescale Semiconductor, Inc. 2006. All rights reserved.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/pci.h>
++#include <linux/of_platform.h>
++
++#include <asm/time.h>
++#include <asm/ipic.h>
++#include <asm/udbg.h>
++
++#include "mpc83xx.h"
++
++/*
++ * Setup the architecture
++ */
++static void __init mpc831x_rdb_setup_arch(void)
++{
++#ifdef CONFIG_PCI
++	struct device_node *np;
++#endif
++
++	if (ppc_md.progress)
++		ppc_md.progress("mpc831x_rdb_setup_arch()", 0);
++
++#ifdef CONFIG_PCI
++	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
++		mpc83xx_add_bridge(np);
++#endif
++	mpc831x_usb_cfg();
++}
++
++void __init mpc831x_rdb_init_IRQ(void)
++{
++	struct device_node *np;
++
++	np = of_find_node_by_type(NULL, "ipic");
++	if (!np)
++		return;
++
++	ipic_init(np, 0);
++
++	/* Initialize the default interrupt mapping priorities,
++	 * in case the boot rom changed something on us.
++	 */
++	ipic_set_default_priority();
++}
++
++/*
++ * Called very early, MMU is off, device-tree isn't unflattened
++ */
++static int __init mpc831x_rdb_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	return of_flat_dt_is_compatible(root, "MPC8313ERDB") ||
++	       of_flat_dt_is_compatible(root, "fsl,mpc8315erdb");
++}
++
++static struct of_device_id __initdata of_bus_ids[] = {
++	{ .compatible = "simple-bus" },
++	{},
++};
++
++static int __init declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++	return 0;
++}
++machine_device_initcall(mpc831x_rdb, declare_of_platform_devices);
++
++define_machine(mpc831x_rdb) {
++	.name			= "MPC831x RDB",
++	.probe			= mpc831x_rdb_probe,
++	.setup_arch		= mpc831x_rdb_setup_arch,
++	.init_IRQ		= mpc831x_rdb_init_IRQ,
++	.get_irq		= ipic_get_irq,
++	.restart		= mpc83xx_restart,
++	.time_init		= mpc83xx_time_init,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
+index 39ee7a1..6dbc6ea 100644
+--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
++++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
+@@ -23,9 +23,9 @@
+ #include <linux/seq_file.h>
+ #include <linux/root_dev.h>
+ #include <linux/initrd.h>
++#include <linux/of_platform.h>
++#include <linux/of_device.h>
+ 
+-#include <asm/of_device.h>
+-#include <asm/of_platform.h>
+ #include <asm/system.h>
+ #include <asm/atomic.h>
+ #include <asm/time.h>
+@@ -105,20 +105,18 @@ static struct of_device_id mpc832x_ids[] = {
+ 	{ .type = "soc", },
+ 	{ .compatible = "soc", },
+ 	{ .type = "qe", },
++	{ .compatible = "fsl,qe", },
+ 	{},
+ };
+ 
+ static int __init mpc832x_declare_of_platform_devices(void)
+ {
+-	if (!machine_is(mpc832x_mds))
+-		return 0;
+-
+ 	/* Publish the QE devices */
+ 	of_platform_bus_probe(NULL, mpc832x_ids, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(mpc832x_declare_of_platform_devices);
++machine_device_initcall(mpc832x_mds, mpc832x_declare_of_platform_devices);
+ 
+ static void __init mpc832x_sys_init_IRQ(void)
+ {
+@@ -137,10 +135,12 @@ static void __init mpc832x_sys_init_IRQ(void)
+ 	of_node_put(np);
+ 
+ #ifdef CONFIG_QUICC_ENGINE
+-	np = of_find_node_by_type(NULL, "qeic");
+-	if (!np)
+-		return;
+-
++	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
++	if (!np) {
++		np = of_find_node_by_type(NULL, "qeic");
++		if (!np)
++			return;
++	}
+ 	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
+ 	of_node_put(np);
+ #endif				/* CONFIG_QUICC_ENGINE */
+diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+index d4bd040..9f0fd88 100644
+--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
++++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+@@ -19,8 +19,8 @@
+ #include <linux/spi/spi.h>
+ #include <linux/spi/mmc_spi.h>
+ #include <linux/mmc/host.h>
++#include <linux/of_platform.h>
+ 
+-#include <asm/of_platform.h>
+ #include <asm/time.h>
+ #include <asm/ipic.h>
+ #include <asm/udbg.h>
+@@ -63,9 +63,6 @@ static struct spi_board_info mpc832x_spi_boardinfo = {
+ 
+ static int __init mpc832x_spi_init(void)
+ {
+-	if (!machine_is(mpc832x_rdb))
+-		return 0;
+-
+ 	par_io_config_pin(3,  0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */
+ 	par_io_config_pin(3,  1, 3, 0, 1, 0); /* SPI1 MISO, I/O */
+ 	par_io_config_pin(3,  2, 3, 0, 1, 0); /* SPI1 CLK,  I/O */
+@@ -80,7 +77,7 @@ static int __init mpc832x_spi_init(void)
+ 			    mpc83xx_spi_deactivate_cs);
+ }
+ 
+-device_initcall(mpc832x_spi_init);
++machine_device_initcall(mpc832x_rdb, mpc832x_spi_init);
+ 
+ /* ************************************************************************
+  *
+@@ -118,20 +115,18 @@ static struct of_device_id mpc832x_ids[] = {
+ 	{ .type = "soc", },
+ 	{ .compatible = "soc", },
+ 	{ .type = "qe", },
++	{ .compatible = "fsl,qe", },
+ 	{},
+ };
+ 
+ static int __init mpc832x_declare_of_platform_devices(void)
+ {
+-	if (!machine_is(mpc832x_rdb))
+-		return 0;
+-
+ 	/* Publish the QE devices */
+ 	of_platform_bus_probe(NULL, mpc832x_ids, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(mpc832x_declare_of_platform_devices);
++machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices);
+ 
+ void __init mpc832x_rdb_init_IRQ(void)
+ {
+@@ -151,10 +146,12 @@ void __init mpc832x_rdb_init_IRQ(void)
+ 	of_node_put(np);
+ 
+ #ifdef CONFIG_QUICC_ENGINE
+-	np = of_find_node_by_type(NULL, "qeic");
+-	if (!np)
+-		return;
+-
++	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
++	if (!np) {
++		np = of_find_node_by_type(NULL, "qeic");
++		if (!np)
++			return;
++	}
+ 	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
+ 	of_node_put(np);
+ #endif				/* CONFIG_QUICC_ENGINE */
+diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
+index aa76819..50e8f63 100644
+--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
++++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
+@@ -23,6 +23,7 @@
+ #include <linux/delay.h>
+ #include <linux/seq_file.h>
+ #include <linux/root_dev.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/system.h>
+ #include <asm/atomic.h>
+@@ -37,6 +38,17 @@
+ 
+ #include "mpc83xx.h"
+ 
++static struct of_device_id __initdata mpc834x_itx_ids[] = {
++	{ .compatible = "fsl,pq2pro-localbus", },
++	{},
++};
++
++static int __init mpc834x_itx_declare_of_platform_devices(void)
++{
++	return of_platform_bus_probe(NULL, mpc834x_itx_ids, NULL);
++}
++machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices);
++
+ /* ************************************************************************
+  *
+  * Setup the architecture
+diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
+index a81bb3c..2b8a0a3 100644
+--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
++++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
+@@ -23,6 +23,7 @@
+ #include <linux/delay.h>
+ #include <linux/seq_file.h>
+ #include <linux/root_dev.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/system.h>
+ #include <asm/atomic.h>
+@@ -106,14 +107,27 @@ static void __init mpc834x_mds_init_IRQ(void)
+ 	ipic_set_default_priority();
+ }
+ 
++static struct of_device_id mpc834x_ids[] = {
++	{ .type = "soc", },
++	{ .compatible = "soc", },
++	{},
++};
++
++static int __init mpc834x_declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, mpc834x_ids, NULL);
++	return 0;
++}
++machine_device_initcall(mpc834x_mds, mpc834x_declare_of_platform_devices);
++
+ /*
+  * Called very early, MMU is off, device-tree isn't unflattened
+  */
+ static int __init mpc834x_mds_probe(void)
+ {
+-        unsigned long root = of_get_flat_dt_root();
++	unsigned long root = of_get_flat_dt_root();
+ 
+-        return of_flat_dt_is_compatible(root, "MPC834xMDS");
++	return of_flat_dt_is_compatible(root, "MPC834xMDS");
+ }
+ 
+ define_machine(mpc834x_mds) {
+diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
+index e40012f..c2e5de6 100644
+--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
++++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
+@@ -29,9 +29,9 @@
+ #include <linux/seq_file.h>
+ #include <linux/root_dev.h>
+ #include <linux/initrd.h>
++#include <linux/of_platform.h>
++#include <linux/of_device.h>
+ 
+-#include <asm/of_device.h>
+-#include <asm/of_platform.h>
+ #include <asm/system.h>
+ #include <asm/atomic.h>
+ #include <asm/time.h>
+@@ -136,20 +136,18 @@ static struct of_device_id mpc836x_ids[] = {
+ 	{ .type = "soc", },
+ 	{ .compatible = "soc", },
+ 	{ .type = "qe", },
++	{ .compatible = "fsl,qe", },
+ 	{},
+ };
+ 
+ static int __init mpc836x_declare_of_platform_devices(void)
+ {
+-	if (!machine_is(mpc836x_mds))
+-		return 0;
+-
+ 	/* Publish the QE devices */
+ 	of_platform_bus_probe(NULL, mpc836x_ids, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(mpc836x_declare_of_platform_devices);
++machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices);
+ 
+ static void __init mpc836x_mds_init_IRQ(void)
+ {
+@@ -168,10 +166,12 @@ static void __init mpc836x_mds_init_IRQ(void)
+ 	of_node_put(np);
+ 
+ #ifdef CONFIG_QUICC_ENGINE
+-	np = of_find_node_by_type(NULL, "qeic");
+-	if (!np)
+-		return;
+-
++	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
++	if (!np) {
++		np = of_find_node_by_type(NULL, "qeic");
++		if (!np)
++			return;
++	}
+ 	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
+ 	of_node_put(np);
+ #endif				/* CONFIG_QUICC_ENGINE */
+diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
+new file mode 100644
+index 0000000..8a9c269
+--- /dev/null
++++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
+@@ -0,0 +1,147 @@
++/*
++ * arch/powerpc/platforms/83xx/mpc837x_mds.c
++ *
++ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
++ *
++ * MPC837x MDS board specific routines
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation;  either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/pci.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++
++#include <asm/time.h>
++#include <asm/ipic.h>
++#include <asm/udbg.h>
++#include <asm/prom.h>
++
++#include "mpc83xx.h"
++
++#define BCSR12_USB_SER_MASK	0x8a
++#define BCSR12_USB_SER_PIN	0x80
++#define BCSR12_USB_SER_DEVICE	0x02
++extern int mpc837x_usb_cfg(void);
++
++static int mpc837xmds_usb_cfg(void)
++{
++	struct device_node *np;
++	const void *phy_type, *mode;
++	void __iomem *bcsr_regs = NULL;
++	u8 bcsr12;
++	int ret;
++
++	ret = mpc837x_usb_cfg();
++	if (ret)
++		return ret;
++	/* Map BCSR area */
++	np = of_find_node_by_name(NULL, "bcsr");
++	if (np) {
++		struct resource res;
++
++		of_address_to_resource(np, 0, &res);
++		bcsr_regs = ioremap(res.start, res.end - res.start + 1);
++		of_node_put(np);
++	}
++	if (!bcsr_regs)
++		return -1;
++
++	np = of_find_node_by_name(NULL, "usb");
++	if (!np)
++		return -ENODEV;
++	phy_type = of_get_property(np, "phy_type", NULL);
++	if (phy_type && !strcmp(phy_type, "ulpi")) {
++		clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN);
++	} else if (phy_type && !strcmp(phy_type, "serial")) {
++		mode = of_get_property(np, "dr_mode", NULL);
++		bcsr12 = in_8(bcsr_regs + 12) & ~BCSR12_USB_SER_MASK;
++		bcsr12 |= BCSR12_USB_SER_PIN;
++		if (mode && !strcmp(mode, "peripheral"))
++			bcsr12 |= BCSR12_USB_SER_DEVICE;
++		out_8(bcsr_regs + 12, bcsr12);
++	} else {
++		printk(KERN_ERR "USB DR: unsupported PHY\n");
++	}
++
++	of_node_put(np);
++	iounmap(bcsr_regs);
++	return 0;
++}
++
++/* ************************************************************************
++ *
++ * Setup the architecture
++ *
++ */
++static void __init mpc837x_mds_setup_arch(void)
++{
++#ifdef CONFIG_PCI
++	struct device_node *np;
++#endif
++
++	if (ppc_md.progress)
++		ppc_md.progress("mpc837x_mds_setup_arch()", 0);
++
++#ifdef CONFIG_PCI
++	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
++		mpc83xx_add_bridge(np);
++#endif
++	mpc837xmds_usb_cfg();
++}
++
++static struct of_device_id mpc837x_ids[] = {
++	{ .type = "soc", },
++	{ .compatible = "soc", },
++	{},
++};
++
++static int __init mpc837x_declare_of_platform_devices(void)
++{
++	/* Publish of_device */
++	of_platform_bus_probe(NULL, mpc837x_ids, NULL);
++
++	return 0;
++}
++machine_device_initcall(mpc837x_mds, mpc837x_declare_of_platform_devices);
++
++static void __init mpc837x_mds_init_IRQ(void)
++{
++	struct device_node *np;
++
++	np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
++	if (!np)
++		return;
++
++	ipic_init(np, 0);
++
++	/* Initialize the default interrupt mapping priorities,
++	 * in case the boot rom changed something on us.
++	 */
++	ipic_set_default_priority();
++}
++
++/*
++ * Called very early, MMU is off, device-tree isn't unflattened
++ */
++static int __init mpc837x_mds_probe(void)
++{
++        unsigned long root = of_get_flat_dt_root();
++
++        return of_flat_dt_is_compatible(root, "fsl,mpc837xmds");
++}
++
++define_machine(mpc837x_mds) {
++	.name			= "MPC837x MDS",
++	.probe			= mpc837x_mds_probe,
++	.setup_arch		= mpc837x_mds_setup_arch,
++	.init_IRQ		= mpc837x_mds_init_IRQ,
++	.get_irq		= ipic_get_irq,
++	.restart		= mpc83xx_restart,
++	.time_init		= mpc83xx_time_init,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+new file mode 100644
+index 0000000..2293ae5
+--- /dev/null
++++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+@@ -0,0 +1,99 @@
++/*
++ * arch/powerpc/platforms/83xx/mpc837x_rdb.c
++ *
++ * Copyright (C) 2007 Freescale Semicondutor, Inc. All rights reserved.
++ *
++ * MPC837x RDB board specific routines
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/pci.h>
++#include <linux/of_platform.h>
++
++#include <asm/time.h>
++#include <asm/ipic.h>
++#include <asm/udbg.h>
++
++#include "mpc83xx.h"
++
++extern int mpc837x_usb_cfg(void);
++
++/* ************************************************************************
++ *
++ * Setup the architecture
++ *
++ */
++static void __init mpc837x_rdb_setup_arch(void)
++{
++#ifdef CONFIG_PCI
++	struct device_node *np;
++#endif
++
++	if (ppc_md.progress)
++		ppc_md.progress("mpc837x_rdb_setup_arch()", 0);
++
++#ifdef CONFIG_PCI
++	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
++		mpc83xx_add_bridge(np);
++#endif
++	mpc837x_usb_cfg();
++}
++
++static struct of_device_id mpc837x_ids[] = {
++	{ .type = "soc", },
++	{ .compatible = "soc", },
++	{},
++};
++
++static int __init mpc837x_declare_of_platform_devices(void)
++{
++	/* Publish of_device */
++	of_platform_bus_probe(NULL, mpc837x_ids, NULL);
++
++	return 0;
++}
++machine_device_initcall(mpc837x_rdb, mpc837x_declare_of_platform_devices);
++
++static void __init mpc837x_rdb_init_IRQ(void)
++{
++	struct device_node *np;
++
++	np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
++	if (!np)
++		return;
++
++	ipic_init(np, 0);
++
++	/* Initialize the default interrupt mapping priorities,
++	 * in case the boot rom changed something on us.
++	 */
++	ipic_set_default_priority();
++}
++
++/*
++ * Called very early, MMU is off, device-tree isn't unflattened
++ */
++static int __init mpc837x_rdb_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	return of_flat_dt_is_compatible(root, "fsl,mpc8377rdb") ||
++	       of_flat_dt_is_compatible(root, "fsl,mpc8378rdb") ||
++	       of_flat_dt_is_compatible(root, "fsl,mpc8379rdb");
++}
++
++define_machine(mpc837x_rdb) {
++	.name			= "MPC837x RDB",
++	.probe			= mpc837x_rdb_probe,
++	.setup_arch		= mpc837x_rdb_setup_arch,
++	.init_IRQ		= mpc837x_rdb_init_IRQ,
++	.get_irq		= ipic_get_irq,
++	.restart		= mpc83xx_restart,
++	.time_init		= mpc83xx_time_init,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
+index b778cb4..88bb748 100644
+--- a/arch/powerpc/platforms/83xx/mpc83xx.h
++++ b/arch/powerpc/platforms/83xx/mpc83xx.h
+@@ -14,6 +14,7 @@
+ #define MPC83XX_SCCR_USB_DRCM_11   0x00300000
+ #define MPC83XX_SCCR_USB_DRCM_01   0x00100000
+ #define MPC83XX_SCCR_USB_DRCM_10   0x00200000
++#define MPC837X_SCCR_USB_DRCM_11   0x00c00000
+ 
+ /* system i/o configuration register low */
+ #define MPC83XX_SICRL_OFFS         0x114
+@@ -22,6 +23,8 @@
+ #define MPC834X_SICRL_USB1         0x20000000
+ #define MPC831X_SICRL_USB_MASK     0x00000c00
+ #define MPC831X_SICRL_USB_ULPI     0x00000800
++#define MPC837X_SICRL_USB_MASK     0xf0000000
++#define MPC837X_SICRL_USB_ULPI     0x50000000
+ 
+ /* system i/o configuration register high */
+ #define MPC83XX_SICRH_OFFS         0x118
+diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
+index 80425d7..14f1080 100644
+--- a/arch/powerpc/platforms/83xx/pci.c
++++ b/arch/powerpc/platforms/83xx/pci.c
+@@ -54,7 +54,7 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
+ 		       " bus 0\n", dev->full_name);
+ 	}
+ 
+-	pci_assign_all_buses = 1;
++	ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ 	hose = pcibios_alloc_controller(dev);
+ 	if (!hose)
+ 		return -ENOMEM;
+diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
+new file mode 100644
+index 0000000..cf38247
+--- /dev/null
++++ b/arch/powerpc/platforms/83xx/sbc834x.c
+@@ -0,0 +1,115 @@
++/*
++ * arch/powerpc/platforms/83xx/sbc834x.c
++ *
++ * Wind River SBC834x board specific routines
++ *
++ * By Paul Gortmaker (see MAINTAINERS for contact information)
++ *
++ * Based largely on the mpc834x_mds.c support by Kumar Gala.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/stddef.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/reboot.h>
++#include <linux/pci.h>
++#include <linux/kdev_t.h>
++#include <linux/major.h>
++#include <linux/console.h>
++#include <linux/delay.h>
++#include <linux/seq_file.h>
++#include <linux/root_dev.h>
++#include <linux/of_platform.h>
++
++#include <asm/system.h>
++#include <asm/atomic.h>
++#include <asm/time.h>
++#include <asm/io.h>
++#include <asm/machdep.h>
++#include <asm/ipic.h>
++#include <asm/irq.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <sysdev/fsl_soc.h>
++
++#include "mpc83xx.h"
++
++/* ************************************************************************
++ *
++ * Setup the architecture
++ *
++ */
++static void __init sbc834x_setup_arch(void)
++{
++#ifdef CONFIG_PCI
++	struct device_node *np;
++#endif
++
++	if (ppc_md.progress)
++		ppc_md.progress("sbc834x_setup_arch()", 0);
++
++#ifdef CONFIG_PCI
++	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
++		mpc83xx_add_bridge(np);
++#endif
++
++}
++
++static void __init sbc834x_init_IRQ(void)
++{
++	struct device_node *np;
++
++	np = of_find_node_by_type(NULL, "ipic");
++	if (!np)
++		return;
++
++	ipic_init(np, 0);
++
++	/* Initialize the default interrupt mapping priorities,
++	 * in case the boot rom changed something on us.
++	 */
++	ipic_set_default_priority();
++
++	of_node_put(np);
++}
++
++static struct __initdata of_device_id sbc834x_ids[] = {
++	{ .type = "soc", },
++	{ .compatible = "soc", },
++	{},
++};
++
++static int __init sbc834x_declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, sbc834x_ids, NULL);
++	return 0;
++}
++machine_device_initcall(sbc834x, sbc834x_declare_of_platform_devices);
++
++/*
++ * Called very early, MMU is off, device-tree isn't unflattened
++ */
++static int __init sbc834x_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	return of_flat_dt_is_compatible(root, "SBC834x");
++}
++
++define_machine(sbc834x) {
++	.name			= "SBC834x",
++	.probe			= sbc834x_probe,
++	.setup_arch		= sbc834x_setup_arch,
++	.init_IRQ		= sbc834x_init_IRQ,
++	.get_irq		= ipic_get_irq,
++	.restart		= mpc83xx_restart,
++	.time_init		= mpc83xx_time_init,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
+index b45160f..681230a 100644
+--- a/arch/powerpc/platforms/83xx/usb.c
++++ b/arch/powerpc/platforms/83xx/usb.c
+@@ -22,7 +22,7 @@
+ #include "mpc83xx.h"
+ 
+ 
+-#ifdef CONFIG_MPC834x
++#ifdef CONFIG_PPC_MPC834x
+ int mpc834x_usb_cfg(void)
+ {
+ 	unsigned long sccr, sicrl, sicrh;
+@@ -41,7 +41,7 @@ int mpc834x_usb_cfg(void)
+ 	sicrl = in_be32(immap + MPC83XX_SICRL_OFFS) & ~MPC834X_SICRL_USB_MASK;
+ 	sicrh = in_be32(immap + MPC83XX_SICRH_OFFS) & ~MPC834X_SICRH_USB_UTMI;
+ 
+-	np = of_find_compatible_node(NULL, "usb", "fsl-usb2-dr");
++	np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr");
+ 	if (np) {
+ 		sccr |= MPC83XX_SCCR_USB_DRCM_11;  /* 1:3 */
+ 
+@@ -67,7 +67,7 @@ int mpc834x_usb_cfg(void)
+ 		port0_is_dr = 1;
+ 		of_node_put(np);
+ 	}
+-	np = of_find_compatible_node(NULL, "usb", "fsl-usb2-mph");
++	np = of_find_compatible_node(NULL, NULL, "fsl-usb2-mph");
+ 	if (np) {
+ 		sccr |= MPC83XX_SCCR_USB_MPHCM_11; /* 1:3 */
+ 
+@@ -96,7 +96,7 @@ int mpc834x_usb_cfg(void)
+ 	iounmap(immap);
+ 	return 0;
+ }
+-#endif /* CONFIG_MPC834x */
++#endif /* CONFIG_PPC_MPC834x */
+ 
+ #ifdef CONFIG_PPC_MPC831x
+ int mpc831x_usb_cfg(void)
+@@ -111,7 +111,7 @@ int mpc831x_usb_cfg(void)
+ 	const void *dr_mode;
+ #endif
+ 
+-	np = of_find_compatible_node(NULL, "usb", "fsl-usb2-dr");
++	np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr");
+ 	if (!np)
+ 		return -ENODEV;
+ 	prop = of_get_property(np, "phy_type", NULL);
+@@ -179,3 +179,43 @@ int mpc831x_usb_cfg(void)
+ 	return ret;
+ }
+ #endif /* CONFIG_PPC_MPC831x */
++
++#ifdef CONFIG_PPC_MPC837x
++int mpc837x_usb_cfg(void)
++{
++	void __iomem *immap;
++	struct device_node *np = NULL;
++	const void *prop;
++	int ret = 0;
++
++	np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr");
++	if (!np)
++		return -ENODEV;
++	prop = of_get_property(np, "phy_type", NULL);
++
++	if (!prop || (strcmp(prop, "ulpi") && strcmp(prop, "serial"))) {
++		printk(KERN_WARNING "837x USB PHY type not supported\n");
++		of_node_put(np);
++		return -EINVAL;
++	}
++
++	/* Map IMMR space for pin and clock settings */
++	immap = ioremap(get_immrbase(), 0x1000);
++	if (!immap) {
++		of_node_put(np);
++		return -ENOMEM;
++	}
++
++	/* Configure clock */
++	clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, MPC837X_SCCR_USB_DRCM_11,
++			MPC837X_SCCR_USB_DRCM_11);
++
++	/* Configure pin mux for ULPI/serial */
++	clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, MPC837X_SICRL_USB_MASK,
++			MPC837X_SICRL_USB_ULPI);
++
++	iounmap(immap);
++	of_node_put(np);
++	return ret;
++}
++#endif /* CONFIG_PPC_MPC837x */
+diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
+index 7748a3a..7e76ddb 100644
+--- a/arch/powerpc/platforms/85xx/Kconfig
++++ b/arch/powerpc/platforms/85xx/Kconfig
+@@ -1,7 +1,14 @@
+-choice
+-	prompt "Machine Type"
++menuconfig MPC85xx
++	bool "Machine Type"
+ 	depends on PPC_85xx
+-	default MPC8540_ADS
++	select PPC_UDBG_16550
++	select PPC_INDIRECT_PCI if PCI
++	select MPIC
++	select FSL_PCI if PCI
++	select SERIAL_8250_SHARE_IRQ if SERIAL_8250
++	default y
++
++if MPC85xx
+ 
+ config MPC8540_ADS
+ 	bool "Freescale MPC8540 ADS"
+@@ -13,6 +20,7 @@ config MPC8560_ADS
+ 	bool "Freescale MPC8560 ADS"
+ 	select DEFAULT_UIMAGE
+ 	select PPC_CPM_NEW_BINDING
++	select CPM2
+ 	help
+ 	  This option enables support for the MPC 8560 ADS board
+ 
+@@ -38,25 +46,64 @@ config MPC85xx_DS
+ 	help
+ 	  This option enables support for the MPC85xx DS (MPC8544 DS) board
+ 
+-endchoice
++config STX_GP3
++	bool "Silicon Turnkey Express GP3"
++	help
++	  This option enables support for the Silicon Turnkey Express GP3
++	  board.
++	select CPM2
++	select DEFAULT_UIMAGE
++	select PPC_CPM_NEW_BINDING
+ 
+-config MPC8540
+-	bool
+-	select PPC_UDBG_16550
+-	select PPC_INDIRECT_PCI
+-	default y if MPC8540_ADS || MPC85xx_CDS
++config TQM8540
++	bool "TQ Components TQM8540"
++	help
++	  This option enables support for the TQ Components TQM8540 board.
++	select DEFAULT_UIMAGE
++	select PPC_CPM_NEW_BINDING
++	select TQM85xx
+ 
+-config MPC8560
+-	bool
++config TQM8541
++	bool "TQ Components TQM8541"
++	help
++	  This option enables support for the TQ Components TQM8541 board.
++	select DEFAULT_UIMAGE
++	select PPC_CPM_NEW_BINDING
++	select TQM85xx
++	select CPM2
++
++config TQM8555
++	bool "TQ Components TQM8555"
++	help
++	  This option enables support for the TQ Components TQM8555 board.
++	select DEFAULT_UIMAGE
++	select PPC_CPM_NEW_BINDING
++	select TQM85xx
+ 	select CPM2
+-	default y if MPC8560_ADS
+ 
+-config MPC85xx
++config TQM8560
++	bool "TQ Components TQM8560"
++	help
++	  This option enables support for the TQ Components TQM8560 board.
++	select DEFAULT_UIMAGE
++	select PPC_CPM_NEW_BINDING
++	select TQM85xx
++	select CPM2
++
++config SBC8548
++	bool "Wind River SBC8548"
++	select DEFAULT_UIMAGE
++	help
++	  This option enables support for the Wind River SBC8548 board
++
++config SBC8560
++	bool "Wind River SBC8560"
++	select DEFAULT_UIMAGE
++	select PPC_CPM_NEW_BINDING if CPM2
++	help
++	  This option enables support for the Wind River SBC8560 board
++
++endif # MPC85xx
++
++config TQM85xx
+ 	bool
+-	select PPC_UDBG_16550
+-	select PPC_INDIRECT_PCI if PCI
+-	select MPIC
+-	select FSL_PCI if PCI
+-	select SERIAL_8250_SHARE_IRQ if SERIAL_8250
+-	default y if MPC8540_ADS || MPC85xx_CDS || MPC8560_ADS \
+-		|| MPC85xx_MDS || MPC85xx_DS
+diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
+index 5eca920..cb7af4e 100644
+--- a/arch/powerpc/platforms/85xx/Makefile
++++ b/arch/powerpc/platforms/85xx/Makefile
+@@ -6,3 +6,7 @@ obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
+ obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
+ obj-$(CONFIG_MPC85xx_DS)  += mpc85xx_ds.o
+ obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
++obj-$(CONFIG_STX_GP3)	  += stx_gp3.o
++obj-$(CONFIG_TQM85xx)	  += tqm85xx.o
++obj-$(CONFIG_SBC8560)     += sbc8560.o
++obj-$(CONFIG_SBC8548)     += sbc8548.o
+diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+index bccdc25..4e03050 100644
+--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
++++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+@@ -52,9 +52,9 @@ static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
+ {
+ 	int cascade_irq;
+ 
+-	while ((cascade_irq = cpm2_get_irq()) >= 0) {
++	while ((cascade_irq = cpm2_get_irq()) >= 0)
+ 		generic_handle_irq(cascade_irq);
+-	}
++
+ 	desc->chip->eoi(irq);
+ }
+ 
+@@ -70,13 +70,12 @@ static void __init mpc85xx_ads_pic_init(void)
+ #endif
+ 
+ 	np = of_find_node_by_type(np, "open-pic");
+-
+-	if (np == NULL) {
++	if (!np) {
+ 		printk(KERN_ERR "Could not find open-pic node\n");
+ 		return;
+ 	}
+ 
+-	if(of_address_to_resource(np, 0, &r)) {
++	if (of_address_to_resource(np, 0, &r)) {
+ 		printk(KERN_ERR "Could not map mpic register space\n");
+ 		of_node_put(np);
+ 		return;
+@@ -100,6 +99,7 @@ static void __init mpc85xx_ads_pic_init(void)
+ 	irq = irq_of_parse_and_map(np, 0);
+ 
+ 	cpm2_pic_init(np);
++	of_node_put(np);
+ 	set_irq_chained_handler(irq, cpm2_cascade);
+ #endif
+ }
+@@ -112,7 +112,7 @@ struct cpm_pin {
+ 	int port, pin, flags;
+ };
+ 
+-static struct cpm_pin mpc8560_ads_pins[] = {
++static const struct cpm_pin mpc8560_ads_pins[] = {
+ 	/* SCC1 */
+ 	{3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
+ 	{3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
+@@ -233,13 +233,11 @@ static struct of_device_id __initdata of_bus_ids[] = {
+ 
+ static int __init declare_of_platform_devices(void)
+ {
+-	if (!machine_is(mpc85xx_ads))
+-		return 0;
+-
+ 	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++
+ 	return 0;
+ }
+-device_initcall(declare_of_platform_devices);
++machine_device_initcall(mpc85xx_ads, declare_of_platform_devices);
+ 
+ /*
+  * Called very early, device-tree isn't unflattened
+diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+index 4d063ee..8b1de78 100644
+--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
++++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+@@ -222,9 +222,6 @@ static int mpc85xx_cds_8259_attach(void)
+ 	struct device_node *cascade_node = NULL;
+ 	int cascade_irq;
+ 
+-	if (!machine_is(mpc85xx_cds))
+-		return 0;
+-
+ 	/* Initialize the i8259 controller */
+ 	for_each_node_by_type(np, "interrupt-controller")
+ 		if (of_device_is_compatible(np, "chrp,iic")) {
+@@ -262,8 +259,7 @@ static int mpc85xx_cds_8259_attach(void)
+ 
+ 	return 0;
+ }
+-
+-device_initcall(mpc85xx_cds_8259_attach);
++machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach);
+ 
+ #endif /* CONFIG_PPC_I8259 */
+ 
+diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+index 59c121a..bdb3d0b 100644
+--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
++++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+@@ -123,7 +123,7 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
+ 	struct device_node* node;
+ 	struct resource rsrc;
+ 
+-	node = (struct device_node *)hose->arch_data;
++	node = hose->dn;
+ 	of_address_to_resource(node, 0, &rsrc);
+ 
+ 	if ((rsrc.start & 0xfffff) == primary_phb_addr) {
+diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+index 61b3eed..25f8bc7 100644
+--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
++++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+@@ -30,9 +30,9 @@
+ #include <linux/initrd.h>
+ #include <linux/module.h>
+ #include <linux/fsl_devices.h>
++#include <linux/of_platform.h>
++#include <linux/of_device.h>
+ 
+-#include <asm/of_device.h>
+-#include <asm/of_platform.h>
+ #include <asm/system.h>
+ #include <asm/atomic.h>
+ #include <asm/time.h>
+@@ -94,21 +94,25 @@ static void __init mpc85xx_mds_setup_arch(void)
+ #endif
+ 
+ #ifdef CONFIG_QUICC_ENGINE
+-	if ((np = of_find_node_by_name(NULL, "qe")) != NULL) {
+-		qe_reset();
+-		of_node_put(np);
++	np = of_find_compatible_node(NULL, NULL, "fsl,qe");
++	if (!np) {
++		np = of_find_node_by_name(NULL, "qe");
++		if (!np)
++			return;
+ 	}
+ 
+-	if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
+-		struct device_node *ucc = NULL;
++	qe_reset();
++	of_node_put(np);
++
++	np = of_find_node_by_name(NULL, "par_io");
++	if (np) {
++		struct device_node *ucc;
+ 
+ 		par_io_init(np);
+ 		of_node_put(np);
+ 
+-		for ( ;(ucc = of_find_node_by_name(ucc, "ucc")) != NULL;)
++		for_each_node_by_name(ucc, "ucc")
+ 			par_io_of_config(ucc);
+-
+-		of_node_put(ucc);
+ 	}
+ 
+ 	if (bcsr_regs) {
+@@ -131,7 +135,6 @@ static void __init mpc85xx_mds_setup_arch(void)
+ 
+ 		iounmap(bcsr_regs);
+ 	}
+-
+ #endif	/* CONFIG_QUICC_ENGINE */
+ }
+ 
+@@ -139,20 +142,18 @@ static struct of_device_id mpc85xx_ids[] = {
+ 	{ .type = "soc", },
+ 	{ .compatible = "soc", },
+ 	{ .type = "qe", },
++	{ .compatible = "fsl,qe", },
+ 	{},
+ };
+ 
+ static int __init mpc85xx_publish_devices(void)
+ {
+-	if (!machine_is(mpc85xx_mds))
+-		return 0;
+-
+ 	/* Publish the QE devices */
+-	of_platform_bus_probe(NULL,mpc85xx_ids,NULL);
++	of_platform_bus_probe(NULL, mpc85xx_ids, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(mpc85xx_publish_devices);
++machine_device_initcall(mpc85xx_mds, mpc85xx_publish_devices);
+ 
+ static void __init mpc85xx_mds_pic_init(void)
+ {
+@@ -179,10 +180,12 @@ static void __init mpc85xx_mds_pic_init(void)
+ 	mpic_init(mpic);
+ 
+ #ifdef CONFIG_QUICC_ENGINE
+-	np = of_find_node_by_type(NULL, "qeic");
+-	if (!np)
+-		return;
+-
++	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
++	if (!np) {
++		np = of_find_node_by_type(NULL, "qeic");
++		if (!np)
++			return;
++	}
+ 	qe_ic_init(np, 0, qe_ic_cascade_muxed_mpic, NULL);
+ 	of_node_put(np);
+ #endif				/* CONFIG_QUICC_ENGINE */
+diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
+new file mode 100644
+index 0000000..488facb
+--- /dev/null
++++ b/arch/powerpc/platforms/85xx/sbc8548.c
+@@ -0,0 +1,167 @@
++/*
++ * Wind River SBC8548 setup and early boot code.
++ *
++ * Copyright 2007 Wind River Systems Inc.
++ *
++ * By Paul Gortmaker (see MAINTAINERS for contact information)
++ *
++ * Based largely on the MPC8548CDS support - Copyright 2005 Freescale Inc.
++ *
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/stddef.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/reboot.h>
++#include <linux/pci.h>
++#include <linux/kdev_t.h>
++#include <linux/major.h>
++#include <linux/console.h>
++#include <linux/delay.h>
++#include <linux/seq_file.h>
++#include <linux/initrd.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/fsl_devices.h>
++#include <linux/of_platform.h>
++
++#include <asm/system.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/atomic.h>
++#include <asm/time.h>
++#include <asm/io.h>
++#include <asm/machdep.h>
++#include <asm/ipic.h>
++#include <asm/pci-bridge.h>
++#include <asm/irq.h>
++#include <mm/mmu_decl.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/mpic.h>
++
++#include <sysdev/fsl_soc.h>
++#include <sysdev/fsl_pci.h>
++
++static void __init sbc8548_pic_init(void)
++{
++	struct mpic *mpic;
++	struct resource r;
++	struct device_node *np = NULL;
++
++	np = of_find_node_by_type(np, "open-pic");
++
++	if (np == NULL) {
++		printk(KERN_ERR "Could not find open-pic node\n");
++		return;
++	}
++
++	if (of_address_to_resource(np, 0, &r)) {
++		printk(KERN_ERR "Failed to map mpic register space\n");
++		of_node_put(np);
++		return;
++	}
++
++	mpic = mpic_alloc(np, r.start,
++			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
++			0, 256, " OpenPIC  ");
++	BUG_ON(mpic == NULL);
++
++	/* Return the mpic node */
++	of_node_put(np);
++
++	mpic_init(mpic);
++}
++
++/*
++ * Setup the architecture
++ */
++static void __init sbc8548_setup_arch(void)
++{
++#ifdef CONFIG_PCI
++	struct device_node *np;
++#endif
++
++	if (ppc_md.progress)
++		ppc_md.progress("sbc8548_setup_arch()", 0);
++
++#ifdef CONFIG_PCI
++	for_each_node_by_type(np, "pci") {
++		if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
++		    of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
++			struct resource rsrc;
++			of_address_to_resource(np, 0, &rsrc);
++			if ((rsrc.start & 0xfffff) == 0x8000)
++				fsl_add_bridge(np, 1);
++			else
++				fsl_add_bridge(np, 0);
++		}
++	}
++#endif
++}
++
++static void sbc8548_show_cpuinfo(struct seq_file *m)
++{
++	uint pvid, svid, phid1;
++	uint memsize = total_memory;
++
++	pvid = mfspr(SPRN_PVR);
++	svid = mfspr(SPRN_SVR);
++
++	seq_printf(m, "Vendor\t\t: Wind River\n");
++	seq_printf(m, "Machine\t\t: SBC8548\n");
++	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
++	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
++
++	/* Display cpu Pll setting */
++	phid1 = mfspr(SPRN_HID1);
++	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
++
++	/* Display the amount of memory */
++	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
++}
++
++static struct of_device_id __initdata of_bus_ids[] = {
++	{ .name = "soc", },
++	{ .type = "soc", },
++	{},
++};
++
++static int __init declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++
++	return 0;
++}
++machine_device_initcall(sbc8548, declare_of_platform_devices);
++
++/*
++ * Called very early, device-tree isn't unflattened
++ */
++static int __init sbc8548_probe(void)
++{
++        unsigned long root = of_get_flat_dt_root();
++
++        return of_flat_dt_is_compatible(root, "SBC8548");
++}
++
++define_machine(sbc8548) {
++	.name		= "SBC8548",
++	.probe		= sbc8548_probe,
++	.setup_arch	= sbc8548_setup_arch,
++	.init_IRQ	= sbc8548_pic_init,
++	.show_cpuinfo	= sbc8548_show_cpuinfo,
++	.get_irq	= mpic_get_irq,
++	.restart	= fsl_rstcr_restart,
++#ifdef CONFIG_PCI
++	.pcibios_fixup_bus	= fsl_pcibios_fixup_bus,
++#endif
++	.calibrate_decr = generic_calibrate_decr,
++	.progress	= udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
+new file mode 100644
+index 0000000..2c580cd
+--- /dev/null
++++ b/arch/powerpc/platforms/85xx/sbc8560.c
+@@ -0,0 +1,283 @@
++/*
++ * Wind River SBC8560 setup and early boot code.
++ *
++ * Copyright 2007 Wind River Systems Inc.
++ *
++ * By Paul Gortmaker (see MAINTAINERS for contact information)
++ *
++ * Based largely on the MPC8560ADS support - Copyright 2005 Freescale Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/stddef.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/kdev_t.h>
++#include <linux/delay.h>
++#include <linux/seq_file.h>
++#include <linux/of_platform.h>
++
++#include <asm/system.h>
++#include <asm/time.h>
++#include <asm/machdep.h>
++#include <asm/pci-bridge.h>
++#include <asm/mpic.h>
++#include <mm/mmu_decl.h>
++#include <asm/udbg.h>
++
++#include <sysdev/fsl_soc.h>
++#include <sysdev/fsl_pci.h>
++
++#ifdef CONFIG_CPM2
++#include <asm/cpm2.h>
++#include <sysdev/cpm2_pic.h>
++#endif
++
++#ifdef CONFIG_CPM2
++
++static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
++{
++	int cascade_irq;
++
++	while ((cascade_irq = cpm2_get_irq()) >= 0)
++		generic_handle_irq(cascade_irq);
++
++	desc->chip->eoi(irq);
++}
++
++#endif /* CONFIG_CPM2 */
++
++static void __init sbc8560_pic_init(void)
++{
++	struct mpic *mpic;
++	struct resource r;
++	struct device_node *np = NULL;
++#ifdef CONFIG_CPM2
++	int irq;
++#endif
++
++	np = of_find_node_by_type(np, "open-pic");
++	if (!np) {
++		printk(KERN_ERR "Could not find open-pic node\n");
++		return;
++	}
++
++	if (of_address_to_resource(np, 0, &r)) {
++		printk(KERN_ERR "Could not map mpic register space\n");
++		of_node_put(np);
++		return;
++	}
++
++	mpic = mpic_alloc(np, r.start,
++			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
++			0, 256, " OpenPIC  ");
++	BUG_ON(mpic == NULL);
++	of_node_put(np);
++
++	mpic_init(mpic);
++
++#ifdef CONFIG_CPM2
++	/* Setup CPM2 PIC */
++	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
++	if (np == NULL) {
++		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
++		return;
++	}
++	irq = irq_of_parse_and_map(np, 0);
++
++	cpm2_pic_init(np);
++	of_node_put(np);
++	set_irq_chained_handler(irq, cpm2_cascade);
++#endif
++}
++
++/*
++ * Setup the architecture
++ */
++#ifdef CONFIG_CPM2
++struct cpm_pin {
++	int port, pin, flags;
++};
++
++static const struct cpm_pin sbc8560_pins[] = {
++	/* SCC1 */
++	{3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++
++	/* SCC2 */
++	{3, 26, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{3, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{3, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++
++	/* FCC2 */
++	{1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK14 */
++	{2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK13 */
++
++	/* FCC3 */
++	{1, 4, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 6, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 7, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 9, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 12, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 13, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 14, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 15, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
++	{1, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{1, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
++	{2, 16, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* CLK16 */
++	{2, 17, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* CLK15 */
++};
++
++static void __init init_ioports(void)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(sbc8560_pins); i++) {
++		struct cpm_pin *pin = &sbc8560_pins[i];
++		cpm2_set_pin(pin->port, pin->pin, pin->flags);
++	}
++
++	cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX);
++	cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX);
++	cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX);
++	cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX);
++	cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX);
++	cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX);
++	cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK15, CPM_CLK_RX);
++	cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK16, CPM_CLK_TX);
++}
++#endif
++
++static void __init sbc8560_setup_arch(void)
++{
++#ifdef CONFIG_PCI
++	struct device_node *np;
++#endif
++
++	if (ppc_md.progress)
++		ppc_md.progress("sbc8560_setup_arch()", 0);
++
++#ifdef CONFIG_CPM2
++	cpm2_reset();
++	init_ioports();
++#endif
++
++#ifdef CONFIG_PCI
++	for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
++		fsl_add_bridge(np, 1);
++#endif
++}
++
++static void sbc8560_show_cpuinfo(struct seq_file *m)
++{
++	uint pvid, svid, phid1;
++	uint memsize = total_memory;
++
++	pvid = mfspr(SPRN_PVR);
++	svid = mfspr(SPRN_SVR);
++
++	seq_printf(m, "Vendor\t\t: Wind River\n");
++	seq_printf(m, "Machine\t\t: SBC8560\n");
++	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
++	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
++
++	/* Display cpu Pll setting */
++	phid1 = mfspr(SPRN_HID1);
++	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
++
++	/* Display the amount of memory */
++	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
++}
++
++static struct of_device_id __initdata of_bus_ids[] = {
++	{ .name = "soc", },
++	{ .type = "soc", },
++	{ .name = "cpm", },
++	{ .name = "localbus", },
++	{},
++};
++
++static int __init declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++
++	return 0;
++}
++machine_device_initcall(sbc8560, declare_of_platform_devices);
++
++/*
++ * Called very early, device-tree isn't unflattened
++ */
++static int __init sbc8560_probe(void)
++{
++        unsigned long root = of_get_flat_dt_root();
++
++        return of_flat_dt_is_compatible(root, "SBC8560");
++}
++
++#ifdef CONFIG_RTC_DRV_M48T59
++static int __init sbc8560_rtc_init(void)
++{
++	struct device_node *np;
++	struct resource res;
++	struct platform_device *rtc_dev;
++
++	np = of_find_compatible_node(NULL, NULL, "m48t59");
++	if (np == NULL) {
++		printk("No RTC in DTB. Has it been eaten by wild dogs?\n");
++		return -ENODEV;
++	}
++
++	of_address_to_resource(np, 0, &res);
++	of_node_put(np);
++
++	printk("Found RTC (m48t59) at i/o 0x%x\n", res.start);
++
++	rtc_dev = platform_device_register_simple("rtc-m48t59", 0, &res, 1);
++
++	if (IS_ERR(rtc_dev)) {
++		printk("Registering sbc8560 RTC device failed\n");
++		return PTR_ERR(rtc_dev);
++	}
++
++	return 0;
++}
++
++arch_initcall(sbc8560_rtc_init);
++
++#endif	/* M48T59 */
++
++define_machine(sbc8560) {
++	.name			= "SBC8560",
++	.probe			= sbc8560_probe,
++	.setup_arch		= sbc8560_setup_arch,
++	.init_IRQ		= sbc8560_pic_init,
++	.show_cpuinfo		= sbc8560_show_cpuinfo,
++	.get_irq		= mpic_get_irq,
++	.restart		= fsl_rstcr_restart,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
+new file mode 100644
+index 0000000..18499d7
+--- /dev/null
++++ b/arch/powerpc/platforms/85xx/stx_gp3.c
+@@ -0,0 +1,183 @@
++/*
++ * Based on MPC8560 ADS and arch/ppc stx_gp3 ports
++ *
++ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
++ *
++ * Copyright 2008 Freescale Semiconductor Inc.
++ *
++ * Dan Malek <dan at embeddededge.com>
++ * Copyright 2004 Embedded Edge, LLC
++ *
++ * Copied from mpc8560_ads.c
++ * Copyright 2002, 2003 Motorola Inc.
++ *
++ * Ported to 2.6, Matt Porter <mporter at kernel.crashing.org>
++ * Copyright 2004-2005 MontaVista Software, Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/stddef.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/kdev_t.h>
++#include <linux/delay.h>
++#include <linux/seq_file.h>
++#include <linux/of_platform.h>
++
++#include <asm/system.h>
++#include <asm/time.h>
++#include <asm/machdep.h>
++#include <asm/pci-bridge.h>
++#include <asm/mpic.h>
++#include <asm/prom.h>
++#include <mm/mmu_decl.h>
++#include <asm/udbg.h>
++
++#include <sysdev/fsl_soc.h>
++#include <sysdev/fsl_pci.h>
++
++#ifdef CONFIG_CPM2
++#include <asm/cpm2.h>
++#include <sysdev/cpm2_pic.h>
++
++static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
++{
++	int cascade_irq;
++
++	while ((cascade_irq = cpm2_get_irq()) >= 0)
++		generic_handle_irq(cascade_irq);
++
++	desc->chip->eoi(irq);
++}
++#endif /* CONFIG_CPM2 */
++
++static void __init stx_gp3_pic_init(void)
++{
++	struct mpic *mpic;
++	struct resource r;
++	struct device_node *np;
++#ifdef CONFIG_CPM2
++	int irq;
++#endif
++
++	np = of_find_node_by_type(NULL, "open-pic");
++	if (!np) {
++		printk(KERN_ERR "Could not find open-pic node\n");
++		return;
++	}
++
++	if (of_address_to_resource(np, 0, &r)) {
++		printk(KERN_ERR "Could not map mpic register space\n");
++		of_node_put(np);
++		return;
++	}
++
++	mpic = mpic_alloc(np, r.start,
++			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
++			0, 256, " OpenPIC  ");
++	BUG_ON(mpic == NULL);
++	of_node_put(np);
++
++	mpic_init(mpic);
++
++#ifdef CONFIG_CPM2
++	/* Setup CPM2 PIC */
++	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
++	if (np == NULL) {
++		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
++		return;
++	}
++	irq = irq_of_parse_and_map(np, 0);
++
++	if (irq == NO_IRQ) {
++		of_node_put(np);
++		printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
++		return;
++	}
++
++	cpm2_pic_init(np);
++	of_node_put(np);
++	set_irq_chained_handler(irq, cpm2_cascade);
++#endif
++}
++
++/*
++ * Setup the architecture
++ */
++static void __init stx_gp3_setup_arch(void)
++{
++#ifdef CONFIG_PCI
++	struct device_node *np;
++#endif
++
++	if (ppc_md.progress)
++		ppc_md.progress("stx_gp3_setup_arch()", 0);
++
++#ifdef CONFIG_CPM2
++	cpm2_reset();
++#endif
++
++#ifdef CONFIG_PCI
++	for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
++		fsl_add_bridge(np, 1);
++#endif
++}
++
++static void stx_gp3_show_cpuinfo(struct seq_file *m)
++{
++	uint pvid, svid, phid1;
++	uint memsize = total_memory;
++
++	pvid = mfspr(SPRN_PVR);
++	svid = mfspr(SPRN_SVR);
++
++	seq_printf(m, "Vendor\t\t: RPC Electronics STx \n");
++	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
++	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
++
++	/* Display cpu Pll setting */
++	phid1 = mfspr(SPRN_HID1);
++	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
++
++	/* Display the amount of memory */
++	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
++}
++
++static struct of_device_id __initdata of_bus_ids[] = {
++	{ .compatible = "simple-bus", },
++	{},
++};
++
++static int __init declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++
++	return 0;
++}
++machine_device_initcall(stx_gp3, declare_of_platform_devices);
++
++/*
++ * Called very early, device-tree isn't unflattened
++ */
++static int __init stx_gp3_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	return of_flat_dt_is_compatible(root, "stx,gp3-8560");
++}
++
++define_machine(stx_gp3) {
++	.name			= "STX GP3",
++	.probe			= stx_gp3_probe,
++	.setup_arch		= stx_gp3_setup_arch,
++	.init_IRQ		= stx_gp3_pic_init,
++	.show_cpuinfo		= stx_gp3_show_cpuinfo,
++	.get_irq		= mpic_get_irq,
++	.restart		= fsl_rstcr_restart,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
+new file mode 100644
+index 0000000..77681ac
+--- /dev/null
++++ b/arch/powerpc/platforms/85xx/tqm85xx.c
+@@ -0,0 +1,187 @@
++/*
++ * Based on MPC8560 ADS and arch/ppc tqm85xx ports
++ *
++ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
++ *
++ * Copyright 2008 Freescale Semiconductor Inc.
++ *
++ * Copyright (c) 2005-2006 DENX Software Engineering
++ * Stefan Roese <sr at denx.de>
++ *
++ * Based on original work by
++ * 	Kumar Gala <kumar.gala at freescale.com>
++ *      Copyright 2004 Freescale Semiconductor Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/stddef.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/kdev_t.h>
++#include <linux/delay.h>
++#include <linux/seq_file.h>
++#include <linux/of_platform.h>
++
++#include <asm/system.h>
++#include <asm/time.h>
++#include <asm/machdep.h>
++#include <asm/pci-bridge.h>
++#include <asm/mpic.h>
++#include <asm/prom.h>
++#include <mm/mmu_decl.h>
++#include <asm/udbg.h>
++
++#include <sysdev/fsl_soc.h>
++#include <sysdev/fsl_pci.h>
++
++#ifdef CONFIG_CPM2
++#include <asm/cpm2.h>
++#include <sysdev/cpm2_pic.h>
++
++static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
++{
++	int cascade_irq;
++
++	while ((cascade_irq = cpm2_get_irq()) >= 0)
++		generic_handle_irq(cascade_irq);
++
++	desc->chip->eoi(irq);
++}
++#endif /* CONFIG_CPM2 */
++
++static void __init tqm85xx_pic_init(void)
++{
++	struct mpic *mpic;
++	struct resource r;
++	struct device_node *np;
++#ifdef CONFIG_CPM2
++	int irq;
++#endif
++
++	np = of_find_node_by_type(NULL, "open-pic");
++	if (!np) {
++		printk(KERN_ERR "Could not find open-pic node\n");
++		return;
++	}
++
++	if (of_address_to_resource(np, 0, &r)) {
++		printk(KERN_ERR "Could not map mpic register space\n");
++		of_node_put(np);
++		return;
++	}
++
++	mpic = mpic_alloc(np, r.start,
++			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
++			0, 256, " OpenPIC  ");
++	BUG_ON(mpic == NULL);
++	of_node_put(np);
++
++	mpic_init(mpic);
++
++#ifdef CONFIG_CPM2
++	/* Setup CPM2 PIC */
++	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
++	if (np == NULL) {
++		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
++		return;
++	}
++	irq = irq_of_parse_and_map(np, 0);
++
++	if (irq == NO_IRQ) {
++		of_node_put(np);
++		printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
++		return;
++	}
++
++	cpm2_pic_init(np);
++	of_node_put(np);
++	set_irq_chained_handler(irq, cpm2_cascade);
++#endif
++}
++
++/*
++ * Setup the architecture
++ */
++static void __init tqm85xx_setup_arch(void)
++{
++#ifdef CONFIG_PCI
++	struct device_node *np;
++#endif
++
++	if (ppc_md.progress)
++		ppc_md.progress("tqm85xx_setup_arch()", 0);
++
++#ifdef CONFIG_CPM2
++	cpm2_reset();
++#endif
++
++#ifdef CONFIG_PCI
++	for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
++		fsl_add_bridge(np, 1);
++#endif
++}
++
++static void tqm85xx_show_cpuinfo(struct seq_file *m)
++{
++	uint pvid, svid, phid1;
++	uint memsize = total_memory;
++
++	pvid = mfspr(SPRN_PVR);
++	svid = mfspr(SPRN_SVR);
++
++	seq_printf(m, "Vendor\t\t: TQ Components\n");
++	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
++	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
++
++	/* Display cpu Pll setting */
++	phid1 = mfspr(SPRN_HID1);
++	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
++
++	/* Display the amount of memory */
++	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
++}
++
++static struct of_device_id __initdata of_bus_ids[] = {
++	{ .compatible = "simple-bus", },
++	{},
++};
++
++static int __init declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++
++	return 0;
++}
++machine_device_initcall(tqm85xx, declare_of_platform_devices);
++
++/*
++ * Called very early, device-tree isn't unflattened
++ */
++static int __init tqm85xx_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	if ((of_flat_dt_is_compatible(root, "tqm,8540")) ||
++	    (of_flat_dt_is_compatible(root, "tqm,8541")) ||
++	    (of_flat_dt_is_compatible(root, "tqm,8555")) ||
++	    (of_flat_dt_is_compatible(root, "tqm,8560")))
++		return 1;
++
++	return 0;
++}
++
++define_machine(tqm85xx) {
++	.name			= "TQM85xx",
++	.probe			= tqm85xx_probe,
++	.setup_arch		= tqm85xx_setup_arch,
++	.init_IRQ		= tqm85xx_pic_init,
++	.show_cpuinfo		= tqm85xx_show_cpuinfo,
++	.get_irq		= mpic_get_irq,
++	.restart		= fsl_rstcr_restart,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+index 6390895..0b07485 100644
+--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
++++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+@@ -34,9 +34,24 @@
+ 
+ #include <asm/mpic.h>
+ 
++#include <linux/of_platform.h>
+ #include <sysdev/fsl_pci.h>
+ #include <sysdev/fsl_soc.h>
+ 
++static struct of_device_id __initdata mpc8610_ids[] = {
++	{ .compatible = "fsl,mpc8610-immr", },
++	{}
++};
++
++static int __init mpc8610_declare_of_platform_devices(void)
++{
++	/* Without this call, the SSI device driver won't get probed. */
++	of_platform_bus_probe(NULL, mpc8610_ids, NULL);
++
++	return 0;
++}
++machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
++
+ void __init
+ mpc86xx_hpcd_init_irq(void)
+ {
+@@ -124,7 +139,7 @@ static void __devinit quirk_uli5229(struct pci_dev *dev)
+ static void __devinit final_uli5288(struct pci_dev *dev)
+ {
+ 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+-	struct device_node *hosenode = hose ? hose->arch_data : NULL;
++	struct device_node *hosenode = hose ? hose->dn : NULL;
+ 	struct of_irq oirq;
+ 	int virq, pin = 2;
+ 	u32 laddr[3];
+diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+index 32a531a..cfbe8c5 100644
+--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
++++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+@@ -18,6 +18,7 @@
+ #include <linux/kdev_t.h>
+ #include <linux/delay.h>
+ #include <linux/seq_file.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/system.h>
+ #include <asm/time.h>
+@@ -116,7 +117,7 @@ static int mpc86xx_exclude_device(struct pci_controller *hose,
+ 	struct device_node* node;	
+ 	struct resource rsrc;
+ 
+-	node = (struct device_node *)hose->arch_data;
++	node = hose->dn;
+ 	of_address_to_resource(node, 0, &rsrc);
+ 
+ 	if ((rsrc.start & 0xfffff) == 0x8000) {
+@@ -212,6 +213,19 @@ mpc86xx_time_init(void)
+ 	return 0;
+ }
+ 
++static __initdata struct of_device_id of_bus_ids[] = {
++	{ .compatible = "simple-bus", },
++	{},
++};
++
++static int __init declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++
++	return 0;
++}
++machine_device_initcall(mpc86xx_hpcn, declare_of_platform_devices);
++
+ define_machine(mpc86xx_hpcn) {
+ 	.name			= "MPC86xx HPCN",
+ 	.probe			= mpc86xx_hpcn_probe,
+diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
+index bd28655..7fd224c 100644
+--- a/arch/powerpc/platforms/8xx/Kconfig
++++ b/arch/powerpc/platforms/8xx/Kconfig
+@@ -18,6 +18,7 @@ config MPC8XXFADS
+ config MPC86XADS
+ 	bool "MPC86XADS"
+ 	select CPM1
++	select PPC_CPM_NEW_BINDING
+ 	help
+ 	  MPC86x Application Development System by Freescale Semiconductor.
+ 	  The MPC86xADS is meant to serve as a platform for s/w and h/w
+@@ -43,6 +44,15 @@ config PPC_EP88XC
+ 	  This board is also resold by Freescale as the QUICCStart
+ 	  MPC885 Evaluation System and/or the CWH-PPC-885XN-VE.
+ 
++config PPC_ADDER875
++	bool "Analogue & Micro Adder 875"
++	select CPM1
++	select PPC_CPM_NEW_BINDING
++	select REDBOOT
++	help
++	  This enables support for the Analogue & Micro Adder 875
++	  board.
++
+ endchoice
+ 
+ menu "Freescale Ethernet driver platform-specific options"
+diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile
+index 8b70980..7b71d9c 100644
+--- a/arch/powerpc/platforms/8xx/Makefile
++++ b/arch/powerpc/platforms/8xx/Makefile
+@@ -5,3 +5,4 @@ obj-$(CONFIG_PPC_8xx)	  += m8xx_setup.o
+ obj-$(CONFIG_MPC885ADS)   += mpc885ads_setup.o
+ obj-$(CONFIG_MPC86XADS)   += mpc86xads_setup.o
+ obj-$(CONFIG_PPC_EP88XC)  += ep88xc.o
++obj-$(CONFIG_PPC_ADDER875) += adder875.o
+diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c
+new file mode 100644
+index 0000000..c6bc078
+--- /dev/null
++++ b/arch/powerpc/platforms/8xx/adder875.c
+@@ -0,0 +1,118 @@
++/* Analogue & Micro Adder MPC875 board support
++ *
++ * Author: Scott Wood <scottwood at freescale.com>
++ *
++ * Copyright (c) 2007 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute  it and/or modify
++ * it under the terms of the GNU General Public License, version 2, as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/fs_enet_pd.h>
++#include <linux/of_platform.h>
++
++#include <asm/time.h>
++#include <asm/machdep.h>
++#include <asm/commproc.h>
++#include <asm/fs_pd.h>
++#include <asm/udbg.h>
++#include <asm/prom.h>
++
++#include <sysdev/commproc.h>
++
++struct cpm_pin {
++	int port, pin, flags;
++};
++
++static __initdata struct cpm_pin adder875_pins[] = {
++	/* SMC1 */
++	{CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
++	{CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
++
++	/* MII1 */
++	{CPM_PORTA, 0, CPM_PIN_INPUT},
++	{CPM_PORTA, 1, CPM_PIN_INPUT},
++	{CPM_PORTA, 2, CPM_PIN_INPUT},
++	{CPM_PORTA, 3, CPM_PIN_INPUT},
++	{CPM_PORTA, 4, CPM_PIN_OUTPUT},
++	{CPM_PORTA, 10, CPM_PIN_OUTPUT},
++	{CPM_PORTA, 11, CPM_PIN_OUTPUT},
++	{CPM_PORTB, 19, CPM_PIN_INPUT},
++	{CPM_PORTB, 31, CPM_PIN_INPUT},
++	{CPM_PORTC, 12, CPM_PIN_INPUT},
++	{CPM_PORTC, 13, CPM_PIN_INPUT},
++	{CPM_PORTE, 30, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 31, CPM_PIN_OUTPUT},
++
++	/* MII2 */
++	{CPM_PORTE, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{CPM_PORTE, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{CPM_PORTE, 16, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{CPM_PORTE, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{CPM_PORTE, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{CPM_PORTE, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
++	{CPM_PORTE, 21, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 22, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 23, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 24, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 25, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 26, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 27, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 28, CPM_PIN_OUTPUT},
++	{CPM_PORTE, 29, CPM_PIN_OUTPUT},
++};
++
++static void __init init_ioports(void)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(adder875_pins); i++) {
++		const struct cpm_pin *pin = &adder875_pins[i];
++		cpm1_set_pin(pin->port, pin->pin, pin->flags);
++	}
++
++	cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
++
++	/* Set FEC1 and FEC2 to MII mode */
++	clrbits32(&mpc8xx_immr->im_cpm.cp_cptr, 0x00000180);
++}
++
++static void __init adder875_setup(void)
++{
++	cpm_reset();
++	init_ioports();
++}
++
++static int __init adder875_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++	return of_flat_dt_is_compatible(root, "analogue-and-micro,adder875");
++}
++
++static __initdata struct of_device_id of_bus_ids[] = {
++	{ .compatible = "simple-bus", },
++	{},
++};
++
++static int __init declare_of_platform_devices(void)
++{
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
++	return 0;
++}
++machine_device_initcall(adder875, declare_of_platform_devices);
++
++define_machine(adder875) {
++	.name = "Adder MPC875",
++	.probe = adder875_probe,
++	.setup_arch = adder875_setup,
++	.init_IRQ = m8xx_pic_init,
++	.get_irq = mpc8xx_get_irq,
++	.restart = mpc8xx_restart,
++	.calibrate_decr = generic_calibrate_decr,
++	.set_rtc_time = mpc8xx_set_rtc_time,
++	.get_rtc_time = mpc8xx_get_rtc_time,
++	.progress = udbg_progress,
++};
+diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
+index c518b6c..a8dffa0 100644
+--- a/arch/powerpc/platforms/8xx/ep88xc.c
++++ b/arch/powerpc/platforms/8xx/ep88xc.c
+@@ -16,8 +16,9 @@
+ #include <asm/io.h>
+ #include <asm/udbg.h>
+ #include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+-#include <sysdev/commproc.h>
++#include "mpc8xx.h"
+ 
+ struct cpm_pin {
+ 	int port, pin, flags;
+@@ -155,18 +156,17 @@ static struct of_device_id __initdata of_bus_ids[] = {
+ static int __init declare_of_platform_devices(void)
+ {
+ 	/* Publish the QE devices */
+-	if (machine_is(ep88xc))
+-		of_platform_bus_probe(NULL, of_bus_ids, NULL);
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(declare_of_platform_devices);
++machine_device_initcall(ep88xc, declare_of_platform_devices);
+ 
+ define_machine(ep88xc) {
+ 	.name = "Embedded Planet EP88xC",
+ 	.probe = ep88xc_probe,
+ 	.setup_arch = ep88xc_setup_arch,
+-	.init_IRQ = m8xx_pic_init,
++	.init_IRQ = mpc8xx_pics_init,
+ 	.get_irq	= mpc8xx_get_irq,
+ 	.restart = mpc8xx_restart,
+ 	.calibrate_decr = mpc8xx_calibrate_decr,
+diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
+index d35eda8..184f998 100644
+--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
++++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
+@@ -16,6 +16,7 @@
+ #include <linux/init.h>
+ #include <linux/time.h>
+ #include <linux/rtc.h>
++#include <linux/fsl_devices.h>
+ 
+ #include <asm/io.h>
+ #include <asm/mpc8xx.h>
+@@ -25,13 +26,11 @@
+ #include <mm/mmu_decl.h>
+ 
+ #include <sysdev/mpc8xx_pic.h>
+-#include <sysdev/commproc.h>
+ 
+-#ifdef CONFIG_PCMCIA_M8XX
++#include "mpc8xx.h"
++
+ struct mpc8xx_pcmcia_ops m8xx_pcmcia_ops;
+-#endif
+ 
+-void m8xx_calibrate_decr(void);
+ extern int cpm_pic_init(void);
+ extern int cpm_get_irq(void);
+ 
+@@ -120,7 +119,7 @@ void __init mpc8xx_calibrate_decr(void)
+ 	ppc_tb_freq /= 16;
+ 	ppc_proc_freq = 50000000;
+ 	if (!get_freq("clock-frequency", &ppc_proc_freq))
+-		printk(KERN_ERR "WARNING: Estimating processor frequency"
++		printk(KERN_ERR "WARNING: Estimating processor frequency "
+ 		                "(not found)\n");
+ 
+ 	printk("Decrementer Frequency = 0x%lx\n", ppc_tb_freq);
+@@ -237,13 +236,13 @@ static void cpm_cascade(unsigned int irq, struct irq_desc *desc)
+ 	desc->chip->eoi(irq);
+ }
+ 
+-/* Initialize the internal interrupt controller.  The number of
++/* Initialize the internal interrupt controllers.  The number of
+  * interrupts supported can vary with the processor type, and the
+  * 82xx family can have up to 64.
+  * External interrupts can be either edge or level triggered, and
+  * need to be initialized by the appropriate driver.
+  */
+-void __init m8xx_pic_init(void)
++void __init mpc8xx_pics_init(void)
+ {
+ 	int irq;
+ 
+diff --git a/arch/powerpc/platforms/8xx/mpc86xads.h b/arch/powerpc/platforms/8xx/mpc86xads.h
+index cffa194..17b1fe7 100644
+--- a/arch/powerpc/platforms/8xx/mpc86xads.h
++++ b/arch/powerpc/platforms/8xx/mpc86xads.h
+@@ -15,27 +15,6 @@
+ #ifndef __ASM_MPC86XADS_H__
+ #define __ASM_MPC86XADS_H__
+ 
+-#include <sysdev/fsl_soc.h>
+-
+-/* U-Boot maps BCSR to 0xff080000 */
+-#define BCSR_ADDR		((uint)0xff080000)
+-#define BCSR_SIZE		((uint)32)
+-#define BCSR0			((uint)(BCSR_ADDR + 0x00))
+-#define BCSR1			((uint)(BCSR_ADDR + 0x04))
+-#define BCSR2			((uint)(BCSR_ADDR + 0x08))
+-#define BCSR3			((uint)(BCSR_ADDR + 0x0c))
+-#define BCSR4			((uint)(BCSR_ADDR + 0x10))
+-
+-#define CFG_PHYDEV_ADDR		((uint)0xff0a0000)
+-#define BCSR5			((uint)(CFG_PHYDEV_ADDR + 0x300))
+-
+-#define MPC8xx_CPM_OFFSET	(0x9c0)
+-#define CPM_MAP_ADDR		(get_immrbase() + MPC8xx_CPM_OFFSET)
+-#define CPM_IRQ_OFFSET		16     // for compability with cpm_uart driver
+-
+-#define PCMCIA_MEM_ADDR		((uint)0xff020000)
+-#define PCMCIA_MEM_SIZE		((uint)(64 * 1024))
+-
+ /* Bits of interest in the BCSRs.
+  */
+ #define BCSR1_ETHEN		((uint)0x20000000)
+@@ -64,28 +43,5 @@
+ #define BCSR5_MII1_EN		0x02
+ #define BCSR5_MII1_RST		0x01
+ 
+-/* Interrupt level assignments */
+-#define PHY_INTERRUPT	SIU_IRQ7	/* PHY link change interrupt */
+-#define SIU_INT_FEC1	SIU_LEVEL1	/* FEC1 interrupt */
+-#define FEC_INTERRUPT	SIU_INT_FEC1	/* FEC interrupt */
+-
+-/* We don't use the 8259 */
+-#define NR_8259_INTS	0
+-
+-/* CPM Ethernet through SCC1 */
+-#define PA_ENET_RXD     ((ushort)0x0001)
+-#define PA_ENET_TXD     ((ushort)0x0002)
+-#define PA_ENET_TCLK    ((ushort)0x0100)
+-#define PA_ENET_RCLK    ((ushort)0x0200)
+-#define PB_ENET_TENA    ((uint)0x00001000)
+-#define PC_ENET_CLSN    ((ushort)0x0010)
+-#define PC_ENET_RENA    ((ushort)0x0020)
+-
+-/* Control bits in the SICR to route TCLK (CLK1) and RCLK (CLK2) to
+- * SCC1.  Also, make sure GR1 (bit 24) and SC1 (bit 25) are zero.
+- */
+-#define SICR_ENET_MASK  ((uint)0x000000ff)
+-#define SICR_ENET_CLKRT ((uint)0x0000002c)
+-
+ #endif /* __ASM_MPC86XADS_H__ */
+ #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+index 4901283..c028a5b 100644
+--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
++++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+@@ -6,273 +6,141 @@
+  *
+  * Copyright 2005 MontaVista Software Inc.
+  *
++ * Heavily modified by Scott Wood <scottwood at freescale.com>
++ * Copyright 2007 Freescale Semiconductor, Inc.
++ *
+  * This file is licensed under the terms of the GNU General Public License
+  * version 2. This program is licensed "as is" without any warranty of any
+  * kind, whether express or implied.
+  */
+ 
+ #include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/param.h>
+-#include <linux/string.h>
+-#include <linux/ioport.h>
+-#include <linux/device.h>
+-#include <linux/delay.h>
+-#include <linux/root_dev.h>
+-
+-#include <linux/fs_enet_pd.h>
+-#include <linux/fs_uart_pd.h>
+-#include <linux/mii.h>
++#include <linux/of_platform.h>
+ 
+-#include <asm/delay.h>
+ #include <asm/io.h>
+ #include <asm/machdep.h>
+-#include <asm/page.h>
+-#include <asm/processor.h>
+ #include <asm/system.h>
+ #include <asm/time.h>
+-#include <asm/mpc8xx.h>
+ #include <asm/8xx_immap.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #include <asm/fs_pd.h>
+-#include <asm/prom.h>
++#include <asm/udbg.h>
+ 
+-#include <sysdev/commproc.h>
++#include "mpc86xads.h"
++#include "mpc8xx.h"
+ 
+-static void init_smc1_uart_ioports(struct fs_uart_platform_info* fpi);
+-static void init_smc2_uart_ioports(struct fs_uart_platform_info* fpi);
+-static void init_scc1_ioports(struct fs_platform_info* ptr);
++struct cpm_pin {
++	int port, pin, flags;
++};
+ 
+-void __init mpc86xads_board_setup(void)
+-{
+-	cpm8xx_t *cp;
+- 	unsigned int *bcsr_io;
+-	u8 tmpval8;
++static struct cpm_pin mpc866ads_pins[] = {
++	/* SMC1 */
++	{CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
++	{CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
++
++	/* SMC2 */
++	{CPM_PORTB, 21, CPM_PIN_INPUT}, /* RX */
++	{CPM_PORTB, 20, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
++
++	/* SCC1 */
++	{CPM_PORTA, 6, CPM_PIN_INPUT}, /* CLK1 */
++	{CPM_PORTA, 7, CPM_PIN_INPUT}, /* CLK2 */
++	{CPM_PORTA, 14, CPM_PIN_INPUT}, /* TX */
++	{CPM_PORTA, 15, CPM_PIN_INPUT}, /* RX */
++	{CPM_PORTB, 19, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TENA */
++	{CPM_PORTC, 10, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* RENA */
++	{CPM_PORTC, 11, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CLSN */
++
++	/* MII */
++	{CPM_PORTD, 3, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 4, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 5, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 6, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 7, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 8, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 9, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 10, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 11, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 12, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 13, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 14, CPM_PIN_OUTPUT},
++	{CPM_PORTD, 15, CPM_PIN_OUTPUT},
++};
+ 
+-	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+-	cp = (cpm8xx_t *)immr_map(im_cpm);
++static void __init init_ioports(void)
++{
++	int i;
+ 
+-	if (bcsr_io == NULL) {
+-		printk(KERN_CRIT "Could not remap BCSR\n");
+-		return;
++	for (i = 0; i < ARRAY_SIZE(mpc866ads_pins); i++) {
++		struct cpm_pin *pin = &mpc866ads_pins[i];
++		cpm1_set_pin(pin->port, pin->pin, pin->flags);
+ 	}
+-#ifdef CONFIG_SERIAL_CPM_SMC1
+-	clrbits32(bcsr_io, BCSR1_RS232EN_1);
+-	clrbits32(&cp->cp_simode, 0xe0000000 >> 17);	/* brg1 */
+-	tmpval8 = in_8(&(cp->cp_smc[0].smc_smcm)) | (SMCM_RX | SMCM_TX);
+-	out_8(&(cp->cp_smc[0].smc_smcm), tmpval8);
+-	clrbits16(&cp->cp_smc[0].smc_smcmr, SMCMR_REN | SMCMR_TEN);
+-#else
+-	setbits32(bcsr_io,BCSR1_RS232EN_1);
+-	out_be16(&cp->cp_smc[0].smc_smcmr, 0);
+-	out_8(&cp->cp_smc[0].smc_smce, 0);
+-#endif
+ 
+-#ifdef CONFIG_SERIAL_CPM_SMC2
+-	clrbits32(bcsr_io,BCSR1_RS232EN_2);
+-	clrbits32(&cp->cp_simode, 0xe0000000 >> 1);
+-	setbits32(&cp->cp_simode, 0x20000000 >> 1);	/* brg2 */
+-	tmpval8 = in_8(&(cp->cp_smc[1].smc_smcm)) | (SMCM_RX | SMCM_TX);
+-	out_8(&(cp->cp_smc[1].smc_smcm), tmpval8);
+-	clrbits16(&cp->cp_smc[1].smc_smcmr, SMCMR_REN | SMCMR_TEN);
++	cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
++	cpm1_clk_setup(CPM_CLK_SMC2, CPM_BRG2, CPM_CLK_RTX);
++	cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK1, CPM_CLK_TX);
++	cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_RX);
+ 
+-	init_smc2_uart_ioports(0);
+-#else
+-	setbits32(bcsr_io,BCSR1_RS232EN_2);
+-	out_be16(&cp->cp_smc[1].smc_smcmr, 0);
+-	out_8(&cp->cp_smc[1].smc_smce, 0);
+-#endif
+-	immr_unmap(cp);
+-	iounmap(bcsr_io);
++	/* Set FEC1 and FEC2 to MII mode */
++	clrbits32(&mpc8xx_immr->im_cpm.cp_cptr, 0x00000180);
+ }
+ 
+-
+-static void init_fec1_ioports(struct fs_platform_info* ptr)
++static void __init mpc86xads_setup_arch(void)
+ {
+-	iop8xx_t *io_port = (iop8xx_t *)immr_map(im_ioport);
+-
+-	/* configure FEC1 pins  */
+-
+-	setbits16(&io_port->iop_pdpar, 0x1fff);
+-	setbits16(&io_port->iop_pddir, 0x1fff);
+-
+-	immr_unmap(io_port);
+-}
++	struct device_node *np;
++	u32 __iomem *bcsr_io;
+ 
+-void init_fec_ioports(struct fs_platform_info *fpi)
+-{
+-	int fec_no = fs_get_fec_index(fpi->fs_no);
++	cpm_reset();
++	init_ioports();
+ 
+-	switch (fec_no) {
+-	case 0:
+-		init_fec1_ioports(fpi);
+-		break;
+-	default:
+-		printk(KERN_ERR "init_fec_ioports: invalid FEC number\n");
++	np = of_find_compatible_node(NULL, NULL, "fsl,mpc866ads-bcsr");
++	if (!np) {
++		printk(KERN_CRIT "Could not find fsl,mpc866ads-bcsr node\n");
+ 		return;
+ 	}
+-}
+-
+-static void init_scc1_ioports(struct fs_platform_info* fpi)
+-{
+-	unsigned *bcsr_io;
+-	iop8xx_t *io_port;
+-	cpm8xx_t *cp;
+ 
+-	bcsr_io = ioremap(BCSR_ADDR, BCSR_SIZE);
+-	io_port = (iop8xx_t *)immr_map(im_ioport);
+-	cp = (cpm8xx_t *)immr_map(im_cpm);
++	bcsr_io = of_iomap(np, 0);
++	of_node_put(np);
+ 
+ 	if (bcsr_io == NULL) {
+ 		printk(KERN_CRIT "Could not remap BCSR\n");
+ 		return;
+ 	}
+ 
+-	/* Configure port A pins for Txd and Rxd.
+-	 */
+-	setbits16(&io_port->iop_papar, PA_ENET_RXD | PA_ENET_TXD);
+-	clrbits16(&io_port->iop_padir, PA_ENET_RXD | PA_ENET_TXD);
+-	clrbits16(&io_port->iop_paodr, PA_ENET_TXD);
+-
+-	/* Configure port C pins to enable CLSN and RENA.
+-	 */
+-	clrbits16(&io_port->iop_pcpar, PC_ENET_CLSN | PC_ENET_RENA);
+-	clrbits16(&io_port->iop_pcdir, PC_ENET_CLSN | PC_ENET_RENA);
+-	setbits16(&io_port->iop_pcso, PC_ENET_CLSN | PC_ENET_RENA);
+-
+-	/* Configure port A for TCLK and RCLK.
+-	 */
+-	setbits16(&io_port->iop_papar, PA_ENET_TCLK | PA_ENET_RCLK);
+-        clrbits16(&io_port->iop_padir, PA_ENET_TCLK | PA_ENET_RCLK);
+-        clrbits32(&cp->cp_pbpar, PB_ENET_TENA);
+-        clrbits32(&cp->cp_pbdir, PB_ENET_TENA);
+-
+-	/* Configure Serial Interface clock routing.
+-	 * First, clear all SCC bits to zero, then set the ones we want.
+-	 */
+-	clrbits32(&cp->cp_sicr, SICR_ENET_MASK);
+-	setbits32(&cp->cp_sicr, SICR_ENET_CLKRT);
+-
+-	/* In the original SCC enet driver the following code is placed at
+-	   the end of the initialization */
+-        setbits32(&cp->cp_pbpar, PB_ENET_TENA);
+-        setbits32(&cp->cp_pbdir, PB_ENET_TENA);
+-
+-	clrbits32(bcsr_io+1, BCSR1_ETHEN);
++	clrbits32(bcsr_io, BCSR1_RS232EN_1 | BCSR1_RS232EN_2 | BCSR1_ETHEN);
+ 	iounmap(bcsr_io);
+-	immr_unmap(cp);
+-	immr_unmap(io_port);
+ }
+ 
+-void init_scc_ioports(struct fs_platform_info *fpi)
+-{
+-	int scc_no = fs_get_scc_index(fpi->fs_no);
+-
+-	switch (scc_no) {
+-	case 0:
+-		init_scc1_ioports(fpi);
+-		break;
+-	default:
+-		printk(KERN_ERR "init_scc_ioports: invalid SCC number\n");
+-		return;
+-	}
+-}
+-
+-
+-
+-static void init_smc1_uart_ioports(struct fs_uart_platform_info* ptr)
++static int __init mpc86xads_probe(void)
+ {
+-        unsigned *bcsr_io;
+-	cpm8xx_t *cp = (cpm8xx_t *)immr_map(im_cpm);
+-
+-	setbits32(&cp->cp_pbpar, 0x000000c0);
+-	clrbits32(&cp->cp_pbdir, 0x000000c0);
+-	clrbits16(&cp->cp_pbodr, 0x00c0);
+-	immr_unmap(cp);
+-
+-        bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+-
+-        if (bcsr_io == NULL) {
+-                printk(KERN_CRIT "Could not remap BCSR1\n");
+-                return;
+-        }
+-        clrbits32(bcsr_io,BCSR1_RS232EN_1);
+-        iounmap(bcsr_io);
++	unsigned long root = of_get_flat_dt_root();
++	return of_flat_dt_is_compatible(root, "fsl,mpc866ads");
+ }
+ 
+-static void init_smc2_uart_ioports(struct fs_uart_platform_info* fpi)
+-{
+-        unsigned *bcsr_io;
+-	cpm8xx_t *cp = (cpm8xx_t *)immr_map(im_cpm);
+-
+-	setbits32(&cp->cp_pbpar, 0x00000c00);
+-	clrbits32(&cp->cp_pbdir, 0x00000c00);
+-	clrbits16(&cp->cp_pbodr, 0x0c00);
+-	immr_unmap(cp);
+-
+-        bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+-
+-        if (bcsr_io == NULL) {
+-                printk(KERN_CRIT "Could not remap BCSR1\n");
+-                return;
+-        }
+-        clrbits32(bcsr_io,BCSR1_RS232EN_2);
+-        iounmap(bcsr_io);
+-}
++static struct of_device_id __initdata of_bus_ids[] = {
++	{ .name = "soc", },
++	{ .name = "cpm", },
++	{ .name = "localbus", },
++	{},
++};
+ 
+-void init_smc_ioports(struct fs_uart_platform_info *data)
++static int __init declare_of_platform_devices(void)
+ {
+-	int smc_no = fs_uart_id_fsid2smc(data->fs_no);
+-
+-	switch (smc_no) {
+-	case 0:
+-		init_smc1_uart_ioports(data);
+-		data->brg = data->clk_rx;
+-		break;
+-	case 1:
+-		init_smc2_uart_ioports(data);
+-		data->brg = data->clk_rx;
+-		break;
+-	default:
+-		printk(KERN_ERR "init_scc_ioports: invalid SCC number\n");
+-		return;
+-	}
+-}
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
+ 
+-int platform_device_skip(const char *model, int id)
+-{
+ 	return 0;
+ }
+-
+-static void __init mpc86xads_setup_arch(void)
+-{
+-	cpm_reset();
+-
+-	mpc86xads_board_setup();
+-
+-	ROOT_DEV = Root_NFS;
+-}
+-
+-static int __init mpc86xads_probe(void)
+-{
+-	char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
+-					  "model", NULL);
+-	if (model == NULL)
+-		return 0;
+-	if (strcmp(model, "MPC866ADS"))
+-		return 0;
+-
+-	return 1;
+-}
++machine_device_initcall(mpc86x_ads, declare_of_platform_devices);
+ 
+ define_machine(mpc86x_ads) {
+ 	.name			= "MPC86x ADS",
+ 	.probe			= mpc86xads_probe,
+ 	.setup_arch		= mpc86xads_setup_arch,
+-	.init_IRQ		= m8xx_pic_init,
++	.init_IRQ		= mpc8xx_pics_init,
+ 	.get_irq		= mpc8xx_get_irq,
+ 	.restart		= mpc8xx_restart,
+ 	.calibrate_decr		= mpc8xx_calibrate_decr,
+ 	.set_rtc_time		= mpc8xx_set_rtc_time,
+ 	.get_rtc_time		= mpc8xx_get_rtc_time,
++	.progress		= udbg_progress,
+ };
+diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+index 2cf1b6a..6e7ded0 100644
+--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
++++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+@@ -36,11 +36,12 @@
+ #include <asm/time.h>
+ #include <asm/mpc8xx.h>
+ #include <asm/8xx_immap.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #include <asm/fs_pd.h>
+ #include <asm/udbg.h>
+ 
+-#include <sysdev/commproc.h>
++#include "mpc885ads.h"
++#include "mpc8xx.h"
+ 
+ static u32 __iomem *bcsr, *bcsr5;
+ 
+@@ -264,18 +265,17 @@ static struct of_device_id __initdata of_bus_ids[] = {
+ static int __init declare_of_platform_devices(void)
+ {
+ 	/* Publish the QE devices */
+-	if (machine_is(mpc885_ads))
+-		of_platform_bus_probe(NULL, of_bus_ids, NULL);
++	of_platform_bus_probe(NULL, of_bus_ids, NULL);
+ 
+ 	return 0;
+ }
+-device_initcall(declare_of_platform_devices);
++machine_device_initcall(mpc885_ads, declare_of_platform_devices);
+ 
+ define_machine(mpc885_ads) {
+ 	.name			= "Freescale MPC885 ADS",
+ 	.probe			= mpc885ads_probe,
+ 	.setup_arch		= mpc885ads_setup_arch,
+-	.init_IRQ		= m8xx_pic_init,
++	.init_IRQ		= mpc8xx_pics_init,
+ 	.get_irq		= mpc8xx_get_irq,
+ 	.restart		= mpc8xx_restart,
+ 	.calibrate_decr		= mpc8xx_calibrate_decr,
+diff --git a/arch/powerpc/platforms/8xx/mpc8xx.h b/arch/powerpc/platforms/8xx/mpc8xx.h
+new file mode 100644
+index 0000000..239a243
+--- /dev/null
++++ b/arch/powerpc/platforms/8xx/mpc8xx.h
+@@ -0,0 +1,21 @@
++/*
++ * Prototypes, etc. for the Freescale MPC8xx embedded cpu chips
++ * May need to be cleaned as the port goes on ...
++ *
++ * Copyright (C) 2008 Jochen Friedrich <jochen at scram.de>
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++#ifndef __MPC8xx_H
++#define __MPC8xx_H
++
++extern void mpc8xx_restart(char *cmd);
++extern void mpc8xx_calibrate_decr(void);
++extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
++extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
++extern void mpc8xx_pics_init(void);
++extern unsigned int mpc8xx_get_irq(void);
++
++#endif /* __MPC8xx_H */
+diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
+index ea22cad..fdce10c 100644
+--- a/arch/powerpc/platforms/Kconfig
++++ b/arch/powerpc/platforms/Kconfig
+@@ -21,7 +21,8 @@ config PPC_83xx
+ 	bool "Freescale 83xx"
+ 	depends on 6xx
+ 	select FSL_SOC
+-	select 83xx
++	select MPC83xx
++	select IPIC
+ 	select WANT_DEVICE_TREE
+ 
+ config PPC_86xx
+@@ -80,6 +81,10 @@ config XICS
+ 	bool
+ 	default y
+ 
++config IPIC
++	bool
++	default n
++
+ config MPIC
+ 	bool
+ 	default n
+@@ -265,6 +270,7 @@ config TAU_AVERAGE
+ config QUICC_ENGINE
+ 	bool
+ 	select PPC_LIB_RHEAP
++	select CRC32
+ 	help
+ 	  The QUICC Engine (QE) is a new generation of communications
+ 	  coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
+@@ -272,8 +278,8 @@ config QUICC_ENGINE
+ 	  for a machine with a QE coprocessor.
+ 
+ config CPM2
+-	bool
+-	default n
++	bool "Enable support for the CPM2 (Communications Processor Module)"
++	depends on MPC85xx || 8260
+ 	select CPM
+ 	select PPC_LIB_RHEAP
+ 	help
+@@ -315,6 +321,12 @@ config FSL_ULI1575
+ config CPM
+ 	bool
+ 
++config OF_RTC
++	bool
++	help
++	  Uses information from the OF or flattened device tree to instatiate
++	  platform devices for direct mapped RTC chips like the DS1742 or DS1743.
++
+ source "arch/powerpc/sysdev/bestcomm/Kconfig"
+ 
+ endmenu
+diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
+index 99684ea..7fc4110 100644
+--- a/arch/powerpc/platforms/Kconfig.cputype
++++ b/arch/powerpc/platforms/Kconfig.cputype
+@@ -29,8 +29,8 @@ config PPC_85xx
+ 	bool "Freescale 85xx"
+ 	select E500
+ 	select FSL_SOC
+-	select 85xx
+ 	select WANT_DEVICE_TREE
++	select MPC85xx
+ 
+ config PPC_8xx
+ 	bool "Freescale 8xx"
+@@ -43,6 +43,7 @@ config 40x
+ 	bool "AMCC 40x"
+ 	select PPC_DCR_NATIVE
+ 	select WANT_DEVICE_TREE
++	select PPC_UDBG_16550
+ 
+ config 44x
+ 	bool "AMCC 44x"
+@@ -92,14 +93,6 @@ config 6xx
+ config 8xx
+ 	bool
+ 
+-# this is temp to handle compat with arch=ppc
+-config 83xx
+-	bool
+-
+-# this is temp to handle compat with arch=ppc
+-config 85xx
+-	bool
+-
+ config E500
+ 	bool
+ 
+diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
+index 39d695c..c89964c 100644
+--- a/arch/powerpc/platforms/cell/Makefile
++++ b/arch/powerpc/platforms/cell/Makefile
+@@ -20,7 +20,7 @@ spu-manage-$(CONFIG_PPC_CELL_NATIVE)	+= spu_manage.o
+ 
+ obj-$(CONFIG_SPU_BASE)			+= spu_callbacks.o spu_base.o \
+ 					   spu_notify.o \
+-					   spu_syscalls.o \
++					   spu_syscalls.o spu_fault.o \
+ 					   $(spu-priv1-y) \
+ 					   $(spu-manage-y) \
+ 					   spufs/
+diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
+index 13d5a87..ec7c8f4 100644
+--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
++++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
+@@ -21,8 +21,9 @@
+  */
+ 
+ #include <linux/cpufreq.h>
++#include <linux/of_platform.h>
++
+ #include <asm/machdep.h>
+-#include <asm/of_platform.h>
+ #include <asm/prom.h>
+ #include <asm/cell-regs.h>
+ #include "cbe_cpufreq.h"
+diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+index 6a2c1b0..69288f6 100644
+--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
++++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+@@ -23,7 +23,8 @@
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <linux/timer.h>
+-#include <asm/of_platform.h>
++#include <linux/of_platform.h>
++
+ #include <asm/processor.h>
+ #include <asm/prom.h>
+ #include <asm/pmi.h>
+diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
+index 16a9b07..dbc338f 100644
+--- a/arch/powerpc/platforms/cell/cbe_regs.c
++++ b/arch/powerpc/platforms/cell/cbe_regs.c
+@@ -9,13 +9,13 @@
+ #include <linux/percpu.h>
+ #include <linux/types.h>
+ #include <linux/module.h>
++#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/io.h>
+ #include <asm/pgtable.h>
+ #include <asm/prom.h>
+ #include <asm/ptrace.h>
+-#include <asm/of_device.h>
+-#include <asm/of_platform.h>
+ #include <asm/cell-regs.h>
+ 
+ /*
+@@ -256,6 +256,7 @@ void __init cbe_regs_init(void)
+ 			printk(KERN_ERR "cbe_regs: More BE chips than supported"
+ 			       "!\n");
+ 			cbe_regs_map_count--;
++			of_node_put(cpu);
+ 			return;
+ 		}
+ 		map->cpu_node = cpu;
+diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
+index 9d7c2ef..979d4b6 100644
+--- a/arch/powerpc/platforms/cell/io-workarounds.c
++++ b/arch/powerpc/platforms/cell/io-workarounds.c
+@@ -238,7 +238,7 @@ static void __init spider_pci_setup_chip(struct spider_pci_bus *bus)
+ static void __init spider_pci_add_one(struct pci_controller *phb)
+ {
+ 	struct spider_pci_bus *bus = &spider_pci_busses[spider_pci_count];
+-	struct device_node *np = phb->arch_data;
++	struct device_node *np = phb->dn;
+ 	struct resource rsrc;
+ 	void __iomem *regs;
+ 
+@@ -309,15 +309,12 @@ static int __init spider_pci_workaround_init(void)
+ {
+ 	struct pci_controller *phb;
+ 
+-	if (!machine_is(cell))
+-		return 0;
+-
+ 	/* Find spider bridges. We assume they have been all probed
+ 	 * in setup_arch(). If that was to change, we would need to
+ 	 * update this code to cope with dynamically added busses
+ 	 */
+ 	list_for_each_entry(phb, &hose_list, list_node) {
+-		struct device_node *np = phb->arch_data;
++		struct device_node *np = phb->dn;
+ 		const char *model = of_get_property(np, "model", NULL);
+ 
+ 		/* If no model property or name isn't exactly "pci", skip */
+@@ -343,4 +340,4 @@ static int __init spider_pci_workaround_init(void)
+ 
+ 	return 0;
+ }
+-arch_initcall(spider_pci_workaround_init);
++machine_arch_initcall(cell, spider_pci_workaround_init);
+diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
+index faabc3f..df33066 100644
+--- a/arch/powerpc/platforms/cell/iommu.c
++++ b/arch/powerpc/platforms/cell/iommu.c
+@@ -1,7 +1,7 @@
+ /*
+  * IOMMU implementation for Cell Broadband Processor Architecture
+  *
+- * (C) Copyright IBM Corporation 2006
++ * (C) Copyright IBM Corporation 2006-2008
+  *
+  * Author: Jeremy Kerr <jk at ozlabs.org>
+  *
+@@ -26,14 +26,15 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/notifier.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/prom.h>
+ #include <asm/iommu.h>
+ #include <asm/machdep.h>
+ #include <asm/pci-bridge.h>
+ #include <asm/udbg.h>
+-#include <asm/of_platform.h>
+ #include <asm/lmb.h>
++#include <asm/firmware.h>
+ #include <asm/cell-regs.h>
+ 
+ #include "interrupt.h"
+@@ -305,29 +306,28 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
+ 	return -ENODEV;
+ }
+ 
+-static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long size)
++static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu,
++				unsigned long dbase, unsigned long dsize,
++				unsigned long fbase, unsigned long fsize)
+ {
+ 	struct page *page;
+-	int ret, i;
+-	unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages;
+-	unsigned long xlate_base;
+-	unsigned int virq;
+-
+-	if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
+-		panic("%s: missing IOC register mappings for node %d\n",
+-		      __FUNCTION__, iommu->nid);
++	int i;
++	unsigned long reg, segments, pages_per_segment, ptab_size, stab_size,
++		      n_pte_pages, base;
+ 
+-	iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
+-	iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
++	base = dbase;
++	if (fsize != 0)
++		base = min(fbase, dbase);
+ 
+-	segments = size >> IO_SEGMENT_SHIFT;
++	segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
+ 	pages_per_segment = 1ull << IO_PAGENO_BITS;
+ 
+ 	pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
+ 			__FUNCTION__, iommu->nid, segments, pages_per_segment);
+ 
+ 	/* set up the segment table */
+-	page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
++	stab_size = segments * sizeof(unsigned long);
++	page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
+ 	BUG_ON(!page);
+ 	iommu->stab = page_address(page);
+ 	clear_page(iommu->stab);
+@@ -371,11 +371,25 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long siz
+ 	}
+ 
+ 	pr_debug("Setting up IOMMU stab:\n");
+-	for (i = 0; i * (1ul << IO_SEGMENT_SHIFT) < size; i++) {
++	for (i = base >> IO_SEGMENT_SHIFT; i < segments; i++) {
+ 		iommu->stab[i] = reg |
+ 			(__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i);
+ 		pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
+ 	}
++}
++
++static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
++{
++	int ret;
++	unsigned long reg, xlate_base;
++	unsigned int virq;
++
++	if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
++		panic("%s: missing IOC register mappings for node %d\n",
++		      __FUNCTION__, iommu->nid);
++
++	iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
++	iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
+ 
+ 	/* ensure that the STEs have updated */
+ 	mb();
+@@ -405,6 +419,13 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long siz
+ 	out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
+ }
+ 
++static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
++	unsigned long base, unsigned long size)
++{
++	cell_iommu_setup_page_tables(iommu, base, size, 0, 0);
++	cell_iommu_enable_hardware(iommu);
++}
++
+ #if 0/* Unused for now */
+ static struct iommu_window *find_window(struct cbe_iommu *iommu,
+ 		unsigned long offset, unsigned long size)
+@@ -422,25 +443,36 @@ static struct iommu_window *find_window(struct cbe_iommu *iommu,
+ }
+ #endif
+ 
++static inline u32 cell_iommu_get_ioid(struct device_node *np)
++{
++	const u32 *ioid;
++
++	ioid = of_get_property(np, "ioid", NULL);
++	if (ioid == NULL) {
++		printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
++		       np->full_name);
++		return 0;
++	}
++
++	return *ioid;
++}
++
+ static struct iommu_window * __init
+ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
+ 			unsigned long offset, unsigned long size,
+ 			unsigned long pte_offset)
+ {
+ 	struct iommu_window *window;
+-	const unsigned int *ioid;
++	u32 ioid;
+ 
+-	ioid = of_get_property(np, "ioid", NULL);
+-	if (ioid == NULL)
+-		printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
+-		       np->full_name);
++	ioid = cell_iommu_get_ioid(np);
+ 
+ 	window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+ 	BUG_ON(window == NULL);
+ 
+ 	window->offset = offset;
+ 	window->size = size;
+-	window->ioid = ioid ? *ioid : 0;
++	window->ioid = ioid;
+ 	window->iommu = iommu;
+ 	window->pte_offset = pte_offset;
+ 
+@@ -489,16 +521,17 @@ static struct cbe_iommu *cell_iommu_for_node(int nid)
+ 	return NULL;
+ }
+ 
+-static void cell_dma_dev_setup(struct device *dev)
++static unsigned long cell_dma_direct_offset;
++
++static unsigned long dma_iommu_fixed_base;
++struct dma_mapping_ops dma_iommu_fixed_ops;
++
++static void cell_dma_dev_setup_iommu(struct device *dev)
+ {
+ 	struct iommu_window *window;
+ 	struct cbe_iommu *iommu;
+ 	struct dev_archdata *archdata = &dev->archdata;
+ 
+-	/* If we run without iommu, no need to do anything */
+-	if (get_pci_dma_ops() == &dma_direct_ops)
+-		return;
+-
+ 	/* Current implementation uses the first window available in that
+ 	 * node's iommu. We -might- do something smarter later though it may
+ 	 * never be necessary
+@@ -515,6 +548,23 @@ static void cell_dma_dev_setup(struct device *dev)
+ 	archdata->dma_data = &window->table;
+ }
+ 
++static void cell_dma_dev_setup_static(struct device *dev);
++
++static void cell_dma_dev_setup(struct device *dev)
++{
++	struct dev_archdata *archdata = &dev->archdata;
++
++	/* Order is important here, these are not mutually exclusive */
++	if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
++		cell_dma_dev_setup_static(dev);
++	else if (get_pci_dma_ops() == &dma_iommu_ops)
++		cell_dma_dev_setup_iommu(dev);
++	else if (get_pci_dma_ops() == &dma_direct_ops)
++		archdata->dma_data = (void *)cell_dma_direct_offset;
++	else
++		BUG();
++}
++
+ static void cell_pci_dma_dev_setup(struct pci_dev *dev)
+ {
+ 	cell_dma_dev_setup(&dev->dev);
+@@ -560,10 +610,9 @@ static int __init cell_iommu_get_window(struct device_node *np,
+ 	return 0;
+ }
+ 
+-static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset)
++static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
+ {
+ 	struct cbe_iommu *iommu;
+-	unsigned long base, size;
+ 	int nid, i;
+ 
+ 	/* Get node ID */
+@@ -571,7 +620,7 @@ static void __init cell_iommu_init_one(struct device_node *np, unsigned long off
+ 	if (nid < 0) {
+ 		printk(KERN_ERR "iommu: failed to get node for %s\n",
+ 		       np->full_name);
+-		return;
++		return NULL;
+ 	}
+ 	pr_debug("iommu: setting up iommu for node %d (%s)\n",
+ 		 nid, np->full_name);
+@@ -587,7 +636,7 @@ static void __init cell_iommu_init_one(struct device_node *np, unsigned long off
+ 	if (cbe_nr_iommus >= NR_IOMMUS) {
+ 		printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
+ 		       np->full_name);
+-		return;
++		return NULL;
+ 	}
+ 
+ 	/* Init base fields */
+@@ -598,6 +647,19 @@ static void __init cell_iommu_init_one(struct device_node *np, unsigned long off
+ 	snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
+ 	INIT_LIST_HEAD(&iommu->windows);
+ 
++	return iommu;
++}
++
++static void __init cell_iommu_init_one(struct device_node *np,
++				       unsigned long offset)
++{
++	struct cbe_iommu *iommu;
++	unsigned long base, size;
++
++	iommu = cell_iommu_alloc(np);
++	if (!iommu)
++		return;
++
+ 	/* Obtain a window for it */
+ 	cell_iommu_get_window(np, &base, &size);
+ 
+@@ -605,7 +667,7 @@ static void __init cell_iommu_init_one(struct device_node *np, unsigned long off
+ 		 base, base + size - 1);
+ 
+ 	/* Initialize the hardware */
+-	cell_iommu_setup_hardware(iommu, size);
++	cell_iommu_setup_hardware(iommu, base, size);
+ 
+ 	/* Setup the iommu_table */
+ 	cell_iommu_setup_window(iommu, np, base, size,
+@@ -653,7 +715,7 @@ static int __init cell_iommu_init_disabled(void)
+ 
+ 	/* If we have no Axon, we set up the spider DMA magic offset */
+ 	if (of_find_node_by_name(NULL, "axon") == NULL)
+-		dma_direct_offset = SPIDER_DMA_OFFSET;
++		cell_dma_direct_offset = SPIDER_DMA_OFFSET;
+ 
+ 	/* Now we need to check to see where the memory is mapped
+ 	 * in PCI space. We assume that all busses use the same dma
+@@ -687,20 +749,274 @@ static int __init cell_iommu_init_disabled(void)
+ 		return -ENODEV;
+ 	}
+ 
+-	dma_direct_offset += base;
++	cell_dma_direct_offset += base;
++
++	if (cell_dma_direct_offset != 0)
++		ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ 
+ 	printk("iommu: disabled, direct DMA offset is 0x%lx\n",
+-	       dma_direct_offset);
++	       cell_dma_direct_offset);
+ 
+ 	return 0;
+ }
+ 
+-static int __init cell_iommu_init(void)
++/*
++ *  Fixed IOMMU mapping support
++ *
++ *  This code adds support for setting up a fixed IOMMU mapping on certain
++ *  cell machines. For 64-bit devices this avoids the performance overhead of
++ *  mapping and unmapping pages at runtime. 32-bit devices are unable to use
++ *  the fixed mapping.
++ *
++ *  The fixed mapping is established at boot, and maps all of physical memory
++ *  1:1 into device space at some offset. On machines with < 30 GB of memory
++ *  we setup the fixed mapping immediately above the normal IOMMU window.
++ *
++ *  For example a machine with 4GB of memory would end up with the normal
++ *  IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
++ *  this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
++ *  3GB, plus any offset required by firmware. The firmware offset is encoded
++ *  in the "dma-ranges" property.
++ *
++ *  On machines with 30GB or more of memory, we are unable to place the fixed
++ *  mapping above the normal IOMMU window as we would run out of address space.
++ *  Instead we move the normal IOMMU window to coincide with the hash page
++ *  table, this region does not need to be part of the fixed mapping as no
++ *  device should ever be DMA'ing to it. We then setup the fixed mapping
++ *  from 0 to 32GB.
++ */
++
++static u64 cell_iommu_get_fixed_address(struct device *dev)
+ {
++	u64 cpu_addr, size, best_size, pci_addr = OF_BAD_ADDR;
++	struct device_node *tmp, *np;
++	const u32 *ranges = NULL;
++	int i, len, best;
++
++	np = dev->archdata.of_node;
++	of_node_get(np);
++	ranges = of_get_property(np, "dma-ranges", &len);
++	while (!ranges && np) {
++		tmp = of_get_parent(np);
++		of_node_put(np);
++		np = tmp;
++		ranges = of_get_property(np, "dma-ranges", &len);
++	}
++
++	if (!ranges) {
++		dev_dbg(dev, "iommu: no dma-ranges found\n");
++		goto out;
++	}
++
++	len /= sizeof(u32);
++
++	/* dma-ranges format:
++	 * 1 cell:  pci space
++	 * 2 cells: pci address
++	 * 2 cells: parent address
++	 * 2 cells: size
++	 */
++	for (i = 0, best = -1, best_size = 0; i < len; i += 7) {
++		cpu_addr = of_translate_dma_address(np, ranges +i + 3);
++		size = of_read_number(ranges + i + 5, 2);
++
++		if (cpu_addr == 0 && size > best_size) {
++			best = i;
++			best_size = size;
++		}
++	}
++
++	if (best >= 0)
++		pci_addr = of_read_number(ranges + best + 1, 2);
++	else
++		dev_dbg(dev, "iommu: no suitable range found!\n");
++
++out:
++	of_node_put(np);
++
++	return pci_addr;
++}
++
++static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
++{
++	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
++		return -EIO;
++
++	if (dma_mask == DMA_BIT_MASK(64)) {
++		if (cell_iommu_get_fixed_address(dev) == OF_BAD_ADDR)
++			dev_dbg(dev, "iommu: 64-bit OK, but bad addr\n");
++		else {
++			dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
++			set_dma_ops(dev, &dma_iommu_fixed_ops);
++			cell_dma_dev_setup(dev);
++		}
++	} else {
++		dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
++		set_dma_ops(dev, get_pci_dma_ops());
++	}
++
++	*dev->dma_mask = dma_mask;
++
++	return 0;
++}
++
++static void cell_dma_dev_setup_static(struct device *dev)
++{
++	struct dev_archdata *archdata = &dev->archdata;
++	u64 addr;
++
++	addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
++	archdata->dma_data = (void *)addr;
++
++	dev_dbg(dev, "iommu: fixed addr = %lx\n", addr);
++}
++
++static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
++	struct device_node *np, unsigned long dbase, unsigned long dsize,
++	unsigned long fbase, unsigned long fsize)
++{
++	unsigned long base_pte, uaddr, *io_pte;
++	int i;
++
++	dma_iommu_fixed_base = fbase;
++
++	/* convert from bytes into page table indices */
++	dbase = dbase >> IOMMU_PAGE_SHIFT;
++	dsize = dsize >> IOMMU_PAGE_SHIFT;
++	fbase = fbase >> IOMMU_PAGE_SHIFT;
++	fsize = fsize >> IOMMU_PAGE_SHIFT;
++
++	pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
++
++	io_pte = iommu->ptab;
++	base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW
++		    | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
++
++	uaddr = 0;
++	for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) {
++		/* Don't touch the dynamic region */
++		if (i >= dbase && i < (dbase + dsize)) {
++			pr_debug("iommu: static/dynamic overlap, skipping\n");
++			continue;
++		}
++		io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
++	}
++
++	mb();
++}
++
++static int __init cell_iommu_fixed_mapping_init(void)
++{
++	unsigned long dbase, dsize, fbase, fsize, hbase, hend;
++	struct cbe_iommu *iommu;
+ 	struct device_node *np;
+ 
+-	if (!machine_is(cell))
+-		return -ENODEV;
++	/* The fixed mapping is only supported on axon machines */
++	np = of_find_node_by_name(NULL, "axon");
++	if (!np) {
++		pr_debug("iommu: fixed mapping disabled, no axons found\n");
++		return -1;
++	}
++
++	/* The default setup is to have the fixed mapping sit after the
++	 * dynamic region, so find the top of the largest IOMMU window
++	 * on any axon, then add the size of RAM and that's our max value.
++	 * If that is > 32GB we have to do other shennanigans.
++	 */
++	fbase = 0;
++	for_each_node_by_name(np, "axon") {
++		cell_iommu_get_window(np, &dbase, &dsize);
++		fbase = max(fbase, dbase + dsize);
++	}
++
++	fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
++	fsize = lmb_phys_mem_size();
++
++	if ((fbase + fsize) <= 0x800000000)
++		hbase = 0; /* use the device tree window */
++	else {
++		/* If we're over 32 GB we need to cheat. We can't map all of
++		 * RAM with the fixed mapping, and also fit the dynamic
++		 * region. So try to place the dynamic region where the hash
++		 * table sits, drivers never need to DMA to it, we don't
++		 * need a fixed mapping for that area.
++		 */
++		if (!htab_address) {
++			pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
++			return -1;
++		}
++		hbase = __pa(htab_address);
++		hend  = hbase + htab_size_bytes;
++
++		/* The window must start and end on a segment boundary */
++		if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
++		    (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
++			pr_debug("iommu: hash window not segment aligned\n");
++			return -1;
++		}
++
++		/* Check the hash window fits inside the real DMA window */
++		for_each_node_by_name(np, "axon") {
++			cell_iommu_get_window(np, &dbase, &dsize);
++
++			if (hbase < dbase || (hend > (dbase + dsize))) {
++				pr_debug("iommu: hash window doesn't fit in"
++					 "real DMA window\n");
++				return -1;
++			}
++		}
++
++		fbase = 0;
++	}
++
++	/* Setup the dynamic regions */
++	for_each_node_by_name(np, "axon") {
++		iommu = cell_iommu_alloc(np);
++		BUG_ON(!iommu);
++
++		if (hbase == 0)
++			cell_iommu_get_window(np, &dbase, &dsize);
++		else {
++			dbase = hbase;
++			dsize = htab_size_bytes;
++		}
++
++		pr_debug("iommu: setting up %d, dynamic window %lx-%lx " \
++			 "fixed window %lx-%lx\n", iommu->nid, dbase,
++			 dbase + dsize, fbase, fbase + fsize);
++
++		cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize);
++		cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
++					     fbase, fsize);
++		cell_iommu_enable_hardware(iommu);
++		cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
++	}
++
++	dma_iommu_fixed_ops = dma_direct_ops;
++	dma_iommu_fixed_ops.set_dma_mask = dma_set_mask_and_switch;
++
++	dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
++	set_pci_dma_ops(&dma_iommu_ops);
++
++	printk(KERN_DEBUG "IOMMU fixed mapping established.\n");
++
++	return 0;
++}
++
++static int iommu_fixed_disabled;
++
++static int __init setup_iommu_fixed(char *str)
++{
++	if (strcmp(str, "off") == 0)
++		iommu_fixed_disabled = 1;
++
++	return 1;
++}
++__setup("iommu_fixed=", setup_iommu_fixed);
++
++static int __init cell_iommu_init(void)
++{
++	struct device_node *np;
+ 
+ 	/* If IOMMU is disabled or we have little enough RAM to not need
+ 	 * to enable it, we setup a direct mapping.
+@@ -717,6 +1033,9 @@ static int __init cell_iommu_init(void)
+ 	ppc_md.tce_build = tce_build_cell;
+ 	ppc_md.tce_free = tce_free_cell;
+ 
++	if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
++		goto bail;
++
+ 	/* Create an iommu for each /axon node.  */
+ 	for_each_node_by_name(np, "axon") {
+ 		if (np->parent == NULL || np->parent->parent != NULL)
+@@ -744,5 +1063,6 @@ static int __init cell_iommu_init(void)
+ 
+ 	return 0;
+ }
+-arch_initcall(cell_iommu_init);
++machine_arch_initcall(cell, cell_iommu_init);
++machine_arch_initcall(celleb_native, cell_iommu_init);
+ 
+diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
+index 1ed3036..69ed0d7 100644
+--- a/arch/powerpc/platforms/cell/pmu.c
++++ b/arch/powerpc/platforms/cell/pmu.c
+@@ -213,7 +213,7 @@ u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
+ 		break;
+ 
+ 	case pm_interval:
+-		READ_SHADOW_REG(val, pm_interval);
++		READ_MMIO_UPPER32(val, pm_interval);
+ 		break;
+ 
+ 	case pm_start_stop:
+@@ -381,9 +381,6 @@ static int __init cbe_init_pm_irq(void)
+ 	unsigned int irq;
+ 	int rc, node;
+ 
+-	if (!machine_is(cell))
+-		return 0;
+-
+ 	for_each_node(node) {
+ 		irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
+ 					       (node << IIC_IRQ_NODE_SHIFT));
+@@ -404,7 +401,7 @@ static int __init cbe_init_pm_irq(void)
+ 
+ 	return 0;
+ }
+-arch_initcall(cbe_init_pm_irq);
++machine_arch_initcall(cell, cbe_init_pm_irq);
+ 
+ void cbe_sync_irq(int node)
+ {
+diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
+index 98e7ef8..e6534b5 100644
+--- a/arch/powerpc/platforms/cell/setup.c
++++ b/arch/powerpc/platforms/cell/setup.c
+@@ -30,6 +30,7 @@
+ #include <linux/console.h>
+ #include <linux/mutex.h>
+ #include <linux/memory_hotplug.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/mmu.h>
+ #include <asm/processor.h>
+@@ -51,7 +52,6 @@
+ #include <asm/spu_priv1.h>
+ #include <asm/udbg.h>
+ #include <asm/mpic.h>
+-#include <asm/of_platform.h>
+ #include <asm/cell-regs.h>
+ 
+ #include "interrupt.h"
+@@ -85,9 +85,6 @@ static int __init cell_publish_devices(void)
+ {
+ 	int node;
+ 
+-	if (!machine_is(cell))
+-		return 0;
+-
+ 	/* Publish OF platform devices for southbridge IOs */
+ 	of_platform_bus_probe(NULL, NULL, NULL);
+ 
+@@ -101,7 +98,7 @@ static int __init cell_publish_devices(void)
+ 	}
+ 	return 0;
+ }
+-device_initcall(cell_publish_devices);
++machine_device_initcall(cell, cell_publish_devices);
+ 
+ static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
+ {
+diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
+index e443845..efb3964 100644
+--- a/arch/powerpc/platforms/cell/smp.c
++++ b/arch/powerpc/platforms/cell/smp.c
+@@ -42,6 +42,7 @@
+ #include <asm/firmware.h>
+ #include <asm/system.h>
+ #include <asm/rtas.h>
++#include <asm/cputhreads.h>
+ 
+ #include "interrupt.h"
+ #include <asm/udbg.h>
+@@ -182,7 +183,7 @@ static int smp_cell_cpu_bootable(unsigned int nr)
+ 	 */
+ 	if (system_state < SYSTEM_RUNNING &&
+ 	    cpu_has_feature(CPU_FTR_SMT) &&
+-	    !smt_enabled_at_boot && nr % 2 != 0)
++	    !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+ 		return 0;
+ 
+ 	return 1;
+diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
+index c83c3e3..e45cfa8 100644
+--- a/arch/powerpc/platforms/cell/spu_base.c
++++ b/arch/powerpc/platforms/cell/spu_base.c
+@@ -34,6 +34,7 @@
+ #include <linux/linux_logo.h>
+ #include <asm/spu.h>
+ #include <asm/spu_priv1.h>
++#include <asm/spu_csa.h>
+ #include <asm/xmon.h>
+ #include <asm/prom.h>
+ 
+@@ -47,6 +48,13 @@ struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
+ EXPORT_SYMBOL_GPL(cbe_spu_info);
+ 
+ /*
++ * The spufs fault-handling code needs to call force_sig_info to raise signals
++ * on DMA errors. Export it here to avoid general kernel-wide access to this
++ * function
++ */
++EXPORT_SYMBOL_GPL(force_sig_info);
++
++/*
+  * Protects cbe_spu_info and spu->number.
+  */
+ static DEFINE_SPINLOCK(spu_lock);
+@@ -66,6 +74,10 @@ static LIST_HEAD(spu_full_list);
+ static DEFINE_SPINLOCK(spu_full_list_lock);
+ static DEFINE_MUTEX(spu_full_list_mutex);
+ 
++struct spu_slb {
++	u64 esid, vsid;
++};
++
+ void spu_invalidate_slbs(struct spu *spu)
+ {
+ 	struct spu_priv2 __iomem *priv2 = spu->priv2;
+@@ -114,40 +126,36 @@ void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
+ }
+ EXPORT_SYMBOL_GPL(spu_associate_mm);
+ 
+-static int __spu_trap_invalid_dma(struct spu *spu)
++int spu_64k_pages_available(void)
+ {
+-	pr_debug("%s\n", __FUNCTION__);
+-	spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
+-	return 0;
++	return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
+ }
++EXPORT_SYMBOL_GPL(spu_64k_pages_available);
+ 
+-static int __spu_trap_dma_align(struct spu *spu)
++static void spu_restart_dma(struct spu *spu)
+ {
+-	pr_debug("%s\n", __FUNCTION__);
+-	spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
+-	return 0;
+-}
++	struct spu_priv2 __iomem *priv2 = spu->priv2;
+ 
+-static int __spu_trap_error(struct spu *spu)
+-{
+-	pr_debug("%s\n", __FUNCTION__);
+-	spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
+-	return 0;
++	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
++		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
+ }
+ 
+-static void spu_restart_dma(struct spu *spu)
++static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
+ {
+ 	struct spu_priv2 __iomem *priv2 = spu->priv2;
+ 
+-	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
+-		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
++	pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n",
++			__func__, slbe, slb->vsid, slb->esid);
++
++	out_be64(&priv2->slb_index_W, slbe);
++	out_be64(&priv2->slb_vsid_RW, slb->vsid);
++	out_be64(&priv2->slb_esid_RW, slb->esid);
+ }
+ 
+ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
+ {
+-	struct spu_priv2 __iomem *priv2 = spu->priv2;
+ 	struct mm_struct *mm = spu->mm;
+-	u64 esid, vsid, llp;
++	struct spu_slb slb;
+ 	int psize;
+ 
+ 	pr_debug("%s\n", __FUNCTION__);
+@@ -159,7 +167,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
+ 		printk("%s: invalid access during switch!\n", __func__);
+ 		return 1;
+ 	}
+-	esid = (ea & ESID_MASK) | SLB_ESID_V;
++	slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
+ 
+ 	switch(REGION_ID(ea)) {
+ 	case USER_REGION_ID:
+@@ -168,21 +176,21 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
+ #else
+ 		psize = mm->context.user_psize;
+ #endif
+-		vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
+-				SLB_VSID_USER;
++		slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
++				<< SLB_VSID_SHIFT) | SLB_VSID_USER;
+ 		break;
+ 	case VMALLOC_REGION_ID:
+ 		if (ea < VMALLOC_END)
+ 			psize = mmu_vmalloc_psize;
+ 		else
+ 			psize = mmu_io_psize;
+-		vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
+-			SLB_VSID_KERNEL;
++		slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
++				<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
+ 		break;
+ 	case KERNEL_REGION_ID:
+ 		psize = mmu_linear_psize;
+-		vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
+-			SLB_VSID_KERNEL;
++		slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
++				<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
+ 		break;
+ 	default:
+ 		/* Future: support kernel segments so that drivers
+@@ -191,11 +199,9 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
+ 		pr_debug("invalid region access at %016lx\n", ea);
+ 		return 1;
+ 	}
+-	llp = mmu_psize_defs[psize].sllp;
++	slb.vsid |= mmu_psize_defs[psize].sllp;
+ 
+-	out_be64(&priv2->slb_index_W, spu->slb_replace);
+-	out_be64(&priv2->slb_vsid_RW, vsid | llp);
+-	out_be64(&priv2->slb_esid_RW, esid);
++	spu_load_slb(spu, spu->slb_replace, &slb);
+ 
+ 	spu->slb_replace++;
+ 	if (spu->slb_replace >= 8)
+@@ -225,13 +231,83 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
+ 		return 1;
+ 	}
+ 
++	spu->class_0_pending = 0;
+ 	spu->dar = ea;
+ 	spu->dsisr = dsisr;
+-	mb();
++
+ 	spu->stop_callback(spu);
++
++	return 0;
++}
++
++static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
++{
++	unsigned long ea = (unsigned long)addr;
++	u64 llp;
++
++	if (REGION_ID(ea) == KERNEL_REGION_ID)
++		llp = mmu_psize_defs[mmu_linear_psize].sllp;
++	else
++		llp = mmu_psize_defs[mmu_virtual_psize].sllp;
++
++	slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
++		SLB_VSID_KERNEL | llp;
++	slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
++}
++
++/**
++ * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
++ * address @new_addr is present.
++ */
++static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
++		void *new_addr)
++{
++	unsigned long ea = (unsigned long)new_addr;
++	int i;
++
++	for (i = 0; i < nr_slbs; i++)
++		if (!((slbs[i].esid ^ ea) & ESID_MASK))
++			return 1;
++
+ 	return 0;
+ }
+ 
++/**
++ * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
++ * need to map both the context save area, and the save/restore code.
++ *
++ * Because the lscsa and code may cross segment boundaires, we check to see
++ * if mappings are required for the start and end of each range. We currently
++ * assume that the mappings are smaller that one segment - if not, something
++ * is seriously wrong.
++ */
++void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
++		void *code, int code_size)
++{
++	struct spu_slb slbs[4];
++	int i, nr_slbs = 0;
++	/* start and end addresses of both mappings */
++	void *addrs[] = {
++		lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
++		code, code + code_size - 1
++	};
++
++	/* check the set of addresses, and create a new entry in the slbs array
++	 * if there isn't already a SLB for that address */
++	for (i = 0; i < ARRAY_SIZE(addrs); i++) {
++		if (__slb_present(slbs, nr_slbs, addrs[i]))
++			continue;
++
++		__spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
++		nr_slbs++;
++	}
++
++	/* Add the set of SLBs */
++	for (i = 0; i < nr_slbs; i++)
++		spu_load_slb(spu, i, &slbs[i]);
++}
++EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
++
+ static irqreturn_t
+ spu_irq_class_0(int irq, void *data)
+ {
+@@ -240,12 +316,13 @@ spu_irq_class_0(int irq, void *data)
+ 
+ 	spu = data;
+ 
++	spin_lock(&spu->register_lock);
+ 	mask = spu_int_mask_get(spu, 0);
+-	stat = spu_int_stat_get(spu, 0);
+-	stat &= mask;
++	stat = spu_int_stat_get(spu, 0) & mask;
+ 
+-	spin_lock(&spu->register_lock);
+ 	spu->class_0_pending |= stat;
++	spu->dsisr = spu_mfc_dsisr_get(spu);
++	spu->dar = spu_mfc_dar_get(spu);
+ 	spin_unlock(&spu->register_lock);
+ 
+ 	spu->stop_callback(spu);
+@@ -255,31 +332,6 @@ spu_irq_class_0(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
+-int
+-spu_irq_class_0_bottom(struct spu *spu)
+-{
+-	unsigned long flags;
+-	unsigned long stat;
+-
+-	spin_lock_irqsave(&spu->register_lock, flags);
+-	stat = spu->class_0_pending;
+-	spu->class_0_pending = 0;
+-
+-	if (stat & 1) /* invalid DMA alignment */
+-		__spu_trap_dma_align(spu);
+-
+-	if (stat & 2) /* invalid MFC DMA */
+-		__spu_trap_invalid_dma(spu);
+-
+-	if (stat & 4) /* error on SPU */
+-		__spu_trap_error(spu);
+-
+-	spin_unlock_irqrestore(&spu->register_lock, flags);
+-
+-	return (stat & 0x7) ? -EIO : 0;
+-}
+-EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
+-
+ static irqreturn_t
+ spu_irq_class_1(int irq, void *data)
+ {
+@@ -294,24 +346,23 @@ spu_irq_class_1(int irq, void *data)
+ 	stat  = spu_int_stat_get(spu, 1) & mask;
+ 	dar   = spu_mfc_dar_get(spu);
+ 	dsisr = spu_mfc_dsisr_get(spu);
+-	if (stat & 2) /* mapping fault */
++	if (stat & CLASS1_STORAGE_FAULT_INTR)
+ 		spu_mfc_dsisr_set(spu, 0ul);
+ 	spu_int_stat_clear(spu, 1, stat);
+ 	spin_unlock(&spu->register_lock);
+ 	pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
+ 			dar, dsisr);
+ 
+-	if (stat & 1) /* segment fault */
++	if (stat & CLASS1_SEGMENT_FAULT_INTR)
+ 		__spu_trap_data_seg(spu, dar);
+ 
+-	if (stat & 2) { /* mapping fault */
++	if (stat & CLASS1_STORAGE_FAULT_INTR)
+ 		__spu_trap_data_map(spu, dar, dsisr);
+-	}
+ 
+-	if (stat & 4) /* ls compare & suspend on get */
++	if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
+ 		;
+ 
+-	if (stat & 8) /* ls compare & suspend on put */
++	if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
+ 		;
+ 
+ 	return stat ? IRQ_HANDLED : IRQ_NONE;
+@@ -323,6 +374,8 @@ spu_irq_class_2(int irq, void *data)
+ 	struct spu *spu;
+ 	unsigned long stat;
+ 	unsigned long mask;
++	const int mailbox_intrs =
++		CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
+ 
+ 	spu = data;
+ 	spin_lock(&spu->register_lock);
+@@ -330,31 +383,30 @@ spu_irq_class_2(int irq, void *data)
+ 	mask = spu_int_mask_get(spu, 2);
+ 	/* ignore interrupts we're not waiting for */
+ 	stat &= mask;
+-	/*
+-	 * mailbox interrupts (0x1 and 0x10) are level triggered.
+-	 * mask them now before acknowledging.
+-	 */
+-	if (stat & 0x11)
+-		spu_int_mask_and(spu, 2, ~(stat & 0x11));
++
++	/* mailbox interrupts are level triggered. mask them now before
++	 * acknowledging */
++	if (stat & mailbox_intrs)
++		spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
+ 	/* acknowledge all interrupts before the callbacks */
+ 	spu_int_stat_clear(spu, 2, stat);
+ 	spin_unlock(&spu->register_lock);
+ 
+ 	pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
+ 
+-	if (stat & 1)  /* PPC core mailbox */
++	if (stat & CLASS2_MAILBOX_INTR)
+ 		spu->ibox_callback(spu);
+ 
+-	if (stat & 2) /* SPU stop-and-signal */
++	if (stat & CLASS2_SPU_STOP_INTR)
+ 		spu->stop_callback(spu);
+ 
+-	if (stat & 4) /* SPU halted */
++	if (stat & CLASS2_SPU_HALT_INTR)
+ 		spu->stop_callback(spu);
+ 
+-	if (stat & 8) /* DMA tag group complete */
++	if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
+ 		spu->mfc_callback(spu);
+ 
+-	if (stat & 0x10) /* SPU mailbox threshold */
++	if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
+ 		spu->wbox_callback(spu);
+ 
+ 	spu->stats.class2_intr++;
+@@ -459,7 +511,7 @@ static int spu_shutdown(struct sys_device *sysdev)
+ }
+ 
+ static struct sysdev_class spu_sysdev_class = {
+-	set_kset_name("spu"),
++	.name = "spu",
+ 	.shutdown = spu_shutdown,
+ };
+ 
+@@ -479,13 +531,27 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
+ int spu_add_sysdev_attr_group(struct attribute_group *attrs)
+ {
+ 	struct spu *spu;
++	int rc = 0;
+ 
+ 	mutex_lock(&spu_full_list_mutex);
+-	list_for_each_entry(spu, &spu_full_list, full_list)
+-		sysfs_create_group(&spu->sysdev.kobj, attrs);
++	list_for_each_entry(spu, &spu_full_list, full_list) {
++		rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
++
++		/* we're in trouble here, but try unwinding anyway */
++		if (rc) {
++			printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
++					__func__, attrs->name);
++
++			list_for_each_entry_continue_reverse(spu,
++					&spu_full_list, full_list)
++				sysfs_remove_group(&spu->sysdev.kobj, attrs);
++			break;
++		}
++	}
++
+ 	mutex_unlock(&spu_full_list_mutex);
+ 
+-	return 0;
++	return rc;
+ }
+ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
+ 
+diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
+new file mode 100644
+index 0000000..c8b1cd4
+--- /dev/null
++++ b/arch/powerpc/platforms/cell/spu_fault.c
+@@ -0,0 +1,98 @@
++/*
++ * SPU mm fault handler
++ *
++ * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
++ *
++ * Author: Arnd Bergmann <arndb at de.ibm.com>
++ * Author: Jeremy Kerr <jk at ozlabs.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++
++#include <asm/spu.h>
++#include <asm/spu_csa.h>
++
++/*
++ * This ought to be kept in sync with the powerpc specific do_page_fault
++ * function. Currently, there are a few corner cases that we haven't had
++ * to handle fortunately.
++ */
++int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
++		unsigned long dsisr, unsigned *flt)
++{
++	struct vm_area_struct *vma;
++	unsigned long is_write;
++	int ret;
++
++#if 0
++	if (!IS_VALID_EA(ea)) {
++		return -EFAULT;
++	}
++#endif /* XXX */
++	if (mm == NULL) {
++		return -EFAULT;
++	}
++	if (mm->pgd == NULL) {
++		return -EFAULT;
++	}
++
++	down_read(&mm->mmap_sem);
++	vma = find_vma(mm, ea);
++	if (!vma)
++		goto bad_area;
++	if (vma->vm_start <= ea)
++		goto good_area;
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		goto bad_area;
++	if (expand_stack(vma, ea))
++		goto bad_area;
++good_area:
++	is_write = dsisr & MFC_DSISR_ACCESS_PUT;
++	if (is_write) {
++		if (!(vma->vm_flags & VM_WRITE))
++			goto bad_area;
++	} else {
++		if (dsisr & MFC_DSISR_ACCESS_DENIED)
++			goto bad_area;
++		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++			goto bad_area;
++	}
++	ret = 0;
++	*flt = handle_mm_fault(mm, vma, ea, is_write);
++	if (unlikely(*flt & VM_FAULT_ERROR)) {
++		if (*flt & VM_FAULT_OOM) {
++			ret = -ENOMEM;
++			goto bad_area;
++		} else if (*flt & VM_FAULT_SIGBUS) {
++			ret = -EFAULT;
++			goto bad_area;
++		}
++		BUG();
++	}
++	if (*flt & VM_FAULT_MAJOR)
++		current->maj_flt++;
++	else
++		current->min_flt++;
++	up_read(&mm->mmap_sem);
++	return ret;
++
++bad_area:
++	up_read(&mm->mmap_sem);
++	return -EFAULT;
++}
++EXPORT_SYMBOL_GPL(spu_handle_mm_fault);
+diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
+index 1b01070..d351bde 100644
+--- a/arch/powerpc/platforms/cell/spu_manage.c
++++ b/arch/powerpc/platforms/cell/spu_manage.c
+@@ -35,6 +35,7 @@
+ #include <asm/firmware.h>
+ #include <asm/prom.h>
+ 
++#include "spufs/spufs.h"
+ #include "interrupt.h"
+ 
+ struct device_node *spu_devnode(struct spu *spu)
+@@ -345,7 +346,7 @@ static int __init of_create_spu(struct spu *spu, void *data)
+ 		}
+ 		ret = spu_map_interrupts_old(spu, spe);
+ 		if (ret) {
+-			printk(KERN_ERR "%s: could not map interrupts",
++			printk(KERN_ERR "%s: could not map interrupts\n",
+ 				spu->name);
+ 			goto out_unmap;
+ 		}
+@@ -369,6 +370,16 @@ static int of_destroy_spu(struct spu *spu)
+ 	return 0;
+ }
+ 
++static void enable_spu_by_master_run(struct spu_context *ctx)
++{
++	ctx->ops->master_start(ctx);
++}
++
++static void disable_spu_by_master_run(struct spu_context *ctx)
++{
++	ctx->ops->master_stop(ctx);
++}
++
+ /* Hardcoded affinity idxs for qs20 */
+ #define QS20_SPES_PER_BE 8
+ static int qs20_reg_idxs[QS20_SPES_PER_BE] =   { 0, 2, 4, 6, 7, 5, 3, 1 };
+@@ -411,10 +422,15 @@ static void init_affinity_qs20_harcoded(void)
+ 
+ static int of_has_vicinity(void)
+ {
+-	struct spu* spu;
++	struct device_node *dn;
+ 
+-	spu = list_first_entry(&cbe_spu_info[0].spus, struct spu, cbe_list);
+-	return of_find_property(spu_devnode(spu), "vicinity", NULL) != NULL;
++	for_each_node_by_type(dn, "spe") {
++		if (of_find_property(dn, "vicinity", NULL))  {
++			of_node_put(dn);
++			return 1;
++		}
++	}
++	return 0;
+ }
+ 
+ static struct spu *devnode_spu(int cbe, struct device_node *dn)
+@@ -525,7 +541,7 @@ static int __init init_affinity(void)
+ 		if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ 			init_affinity_qs20_harcoded();
+ 		else
+-			printk("No affinity configuration found");
++			printk("No affinity configuration found\n");
+ 	}
+ 
+ 	return 0;
+@@ -535,5 +551,7 @@ const struct spu_management_ops spu_management_of_ops = {
+ 	.enumerate_spus = of_enumerate_spus,
+ 	.create_spu = of_create_spu,
+ 	.destroy_spu = of_destroy_spu,
++	.enable_spu = enable_spu_by_master_run,
++	.disable_spu = disable_spu_by_master_run,
+ 	.init_affinity = init_affinity,
+ };
+diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
+index 328afcf..d3a349f 100644
+--- a/arch/powerpc/platforms/cell/spufs/Makefile
++++ b/arch/powerpc/platforms/cell/spufs/Makefile
+@@ -1,8 +1,8 @@
+-obj-y += switch.o fault.o lscsa_alloc.o
+ 
+ obj-$(CONFIG_SPU_FS) += spufs.o
+ spufs-y += inode.o file.o context.o syscalls.o coredump.o
+ spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
++spufs-y += switch.o fault.o lscsa_alloc.o
+ 
+ # Rules to build switch.o with the help of SPU tool chain
+ SPU_CROSS	:= spu-
+diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
+index ec01214..50d98a1 100644
+--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
++++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
+@@ -106,16 +106,20 @@ static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
+ 		if (stat & 0xff0000)
+ 			ret |= POLLIN | POLLRDNORM;
+ 		else {
+-			ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
+-			ctx->csa.priv1.int_mask_class2_RW |= 0x1;
++			ctx->csa.priv1.int_stat_class2_RW &=
++				~CLASS2_MAILBOX_INTR;
++			ctx->csa.priv1.int_mask_class2_RW |=
++				CLASS2_ENABLE_MAILBOX_INTR;
+ 		}
+ 	}
+ 	if (events & (POLLOUT | POLLWRNORM)) {
+ 		if (stat & 0x00ff00)
+ 			ret = POLLOUT | POLLWRNORM;
+ 		else {
+-			ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
+-			ctx->csa.priv1.int_mask_class2_RW |= 0x10;
++			ctx->csa.priv1.int_stat_class2_RW &=
++				~CLASS2_MAILBOX_THRESHOLD_INTR;
++			ctx->csa.priv1.int_mask_class2_RW |=
++				CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
+ 		}
+ 	}
+ 	spin_unlock_irq(&ctx->csa.register_lock);
+@@ -139,7 +143,7 @@ static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
+ 		ret = 4;
+ 	} else {
+ 		/* make sure we get woken up by the interrupt */
+-		ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
++		ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
+ 		ret = 0;
+ 	}
+ 	spin_unlock(&ctx->csa.register_lock);
+@@ -169,7 +173,8 @@ static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
+ 	} else {
+ 		/* make sure we get woken up by the interrupt when space
+ 		   becomes available */
+-		ctx->csa.priv1.int_mask_class2_RW |= 0x10;
++		ctx->csa.priv1.int_mask_class2_RW |=
++			CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
+ 		ret = 0;
+ 	}
+ 	spin_unlock(&ctx->csa.register_lock);
+@@ -268,6 +273,11 @@ static char *spu_backing_get_ls(struct spu_context *ctx)
+ 	return ctx->csa.lscsa->ls;
+ }
+ 
++static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
++{
++	ctx->csa.priv2.spu_privcntl_RW = val;
++}
++
+ static u32 spu_backing_runcntl_read(struct spu_context *ctx)
+ {
+ 	return ctx->csa.prob.spu_runcntl_RW;
+@@ -285,6 +295,11 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
+ 	spin_unlock(&ctx->csa.register_lock);
+ }
+ 
++static void spu_backing_runcntl_stop(struct spu_context *ctx)
++{
++	spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
++}
++
+ static void spu_backing_master_start(struct spu_context *ctx)
+ {
+ 	struct spu_state *csa = &ctx->csa;
+@@ -358,7 +373,7 @@ static int spu_backing_send_mfc_command(struct spu_context *ctx,
+ 
+ static void spu_backing_restart_dma(struct spu_context *ctx)
+ {
+-	/* nothing to do here */
++	ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
+ }
+ 
+ struct spu_context_ops spu_backing_ops = {
+@@ -379,8 +394,10 @@ struct spu_context_ops spu_backing_ops = {
+ 	.npc_write = spu_backing_npc_write,
+ 	.status_read = spu_backing_status_read,
+ 	.get_ls = spu_backing_get_ls,
++	.privcntl_write = spu_backing_privcntl_write,
+ 	.runcntl_read = spu_backing_runcntl_read,
+ 	.runcntl_write = spu_backing_runcntl_write,
++	.runcntl_stop = spu_backing_runcntl_stop,
+ 	.master_start = spu_backing_master_start,
+ 	.master_stop = spu_backing_master_stop,
+ 	.set_mfc_query = spu_backing_set_mfc_query,
+diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
+index adf0a03..133995e 100644
+--- a/arch/powerpc/platforms/cell/spufs/context.c
++++ b/arch/powerpc/platforms/cell/spufs/context.c
+@@ -52,6 +52,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
+ 	init_waitqueue_head(&ctx->wbox_wq);
+ 	init_waitqueue_head(&ctx->stop_wq);
+ 	init_waitqueue_head(&ctx->mfc_wq);
++	init_waitqueue_head(&ctx->run_wq);
+ 	ctx->state = SPU_STATE_SAVED;
+ 	ctx->ops = &spu_backing_ops;
+ 	ctx->owner = get_task_mm(current);
+@@ -105,7 +106,17 @@ int put_spu_context(struct spu_context *ctx)
+ void spu_forget(struct spu_context *ctx)
+ {
+ 	struct mm_struct *mm;
+-	spu_acquire_saved(ctx);
++
++	/*
++	 * This is basically an open-coded spu_acquire_saved, except that
++	 * we don't acquire the state mutex interruptible.
++	 */
++	mutex_lock(&ctx->state_mutex);
++	if (ctx->state != SPU_STATE_SAVED) {
++		set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
++		spu_deactivate(ctx);
++	}
++
+ 	mm = ctx->owner;
+ 	ctx->owner = NULL;
+ 	mmput(mm);
+@@ -133,47 +144,23 @@ void spu_unmap_mappings(struct spu_context *ctx)
+ }
+ 
+ /**
+- * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
++ * spu_acquire_saved - lock spu contex and make sure it is in saved state
+  * @ctx:	spu contex to lock
+- *
+- * Note:
+- *	Returns 0 and with the context locked on success
+- *	Returns negative error and with the context _unlocked_ on failure.
+  */
+-int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
++int spu_acquire_saved(struct spu_context *ctx)
+ {
+-	int ret = -EINVAL;
+-
+-	spu_acquire(ctx);
+-	if (ctx->state == SPU_STATE_SAVED) {
+-		/*
+-		 * Context is about to be freed, so we can't acquire it anymore.
+-		 */
+-		if (!ctx->owner)
+-			goto out_unlock;
+-		ret = spu_activate(ctx, flags);
+-		if (ret)
+-			goto out_unlock;
+-	}
++	int ret;
+ 
+-	return 0;
+-
+- out_unlock:
+-	spu_release(ctx);
+-	return ret;
+-}
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 
+-/**
+- * spu_acquire_saved - lock spu contex and make sure it is in saved state
+- * @ctx:	spu contex to lock
+- */
+-void spu_acquire_saved(struct spu_context *ctx)
+-{
+-	spu_acquire(ctx);
+ 	if (ctx->state != SPU_STATE_SAVED) {
+ 		set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
+ 		spu_deactivate(ctx);
+ 	}
++
++	return 0;
+ }
+ 
+ /**
+diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
+index 80f6236..0c6a96b 100644
+--- a/arch/powerpc/platforms/cell/spufs/coredump.c
++++ b/arch/powerpc/platforms/cell/spufs/coredump.c
+@@ -148,7 +148,9 @@ int spufs_coredump_extra_notes_size(void)
+ 
+ 	fd = 0;
+ 	while ((ctx = coredump_next_context(&fd)) != NULL) {
+-		spu_acquire_saved(ctx);
++		rc = spu_acquire_saved(ctx);
++		if (rc)
++			break;
+ 		rc = spufs_ctx_note_size(ctx, fd);
+ 		spu_release_saved(ctx);
+ 		if (rc < 0)
+@@ -224,7 +226,9 @@ int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
+ 
+ 	fd = 0;
+ 	while ((ctx = coredump_next_context(&fd)) != NULL) {
+-		spu_acquire_saved(ctx);
++		rc = spu_acquire_saved(ctx);
++		if (rc)
++			return rc;
+ 
+ 		for (j = 0; spufs_coredump_read[j].name != NULL; j++) {
+ 			rc = spufs_arch_write_note(ctx, j, file, fd, foffset);
+diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
+index 917eab4..eff4d29 100644
+--- a/arch/powerpc/platforms/cell/spufs/fault.c
++++ b/arch/powerpc/platforms/cell/spufs/fault.c
+@@ -28,117 +28,71 @@
+ 
+ #include "spufs.h"
+ 
+-/*
+- * This ought to be kept in sync with the powerpc specific do_page_fault
+- * function. Currently, there are a few corner cases that we haven't had
+- * to handle fortunately.
++/**
++ * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag.
++ *
++ * If the context was created with events, we just set the return event.
++ * Otherwise, send an appropriate signal to the process.
+  */
+-static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+-		unsigned long dsisr, unsigned *flt)
++static void spufs_handle_event(struct spu_context *ctx,
++				unsigned long ea, int type)
+ {
+-	struct vm_area_struct *vma;
+-	unsigned long is_write;
+-	int ret;
++	siginfo_t info;
+ 
+-#if 0
+-	if (!IS_VALID_EA(ea)) {
+-		return -EFAULT;
+-	}
+-#endif /* XXX */
+-	if (mm == NULL) {
+-		return -EFAULT;
+-	}
+-	if (mm->pgd == NULL) {
+-		return -EFAULT;
++	if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
++		ctx->event_return |= type;
++		wake_up_all(&ctx->stop_wq);
++		return;
+ 	}
+ 
+-	down_read(&mm->mmap_sem);
+-	vma = find_vma(mm, ea);
+-	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= ea)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (expand_stack(vma, ea))
+-		goto bad_area;
+-good_area:
+-	is_write = dsisr & MFC_DSISR_ACCESS_PUT;
+-	if (is_write) {
+-		if (!(vma->vm_flags & VM_WRITE))
+-			goto bad_area;
+-	} else {
+-		if (dsisr & MFC_DSISR_ACCESS_DENIED)
+-			goto bad_area;
+-		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+-			goto bad_area;
++	memset(&info, 0, sizeof(info));
++
++	switch (type) {
++	case SPE_EVENT_INVALID_DMA:
++		info.si_signo = SIGBUS;
++		info.si_code = BUS_OBJERR;
++		break;
++	case SPE_EVENT_SPE_DATA_STORAGE:
++		info.si_signo = SIGSEGV;
++		info.si_addr = (void __user *)ea;
++		info.si_code = SEGV_ACCERR;
++		ctx->ops->restart_dma(ctx);
++		break;
++	case SPE_EVENT_DMA_ALIGNMENT:
++		info.si_signo = SIGBUS;
++		/* DAR isn't set for an alignment fault :( */
++		info.si_code = BUS_ADRALN;
++		break;
++	case SPE_EVENT_SPE_ERROR:
++		info.si_signo = SIGILL;
++		info.si_addr = (void __user *)(unsigned long)
++			ctx->ops->npc_read(ctx) - 4;
++		info.si_code = ILL_ILLOPC;
++		break;
+ 	}
+-	ret = 0;
+-	*flt = handle_mm_fault(mm, vma, ea, is_write);
+-	if (unlikely(*flt & VM_FAULT_ERROR)) {
+-		if (*flt & VM_FAULT_OOM) {
+-			ret = -ENOMEM;
+-			goto bad_area;
+-		} else if (*flt & VM_FAULT_SIGBUS) {
+-			ret = -EFAULT;
+-			goto bad_area;
+-		}
+-		BUG();
+-	}
+-	if (*flt & VM_FAULT_MAJOR)
+-		current->maj_flt++;
+-	else
+-		current->min_flt++;
+-	up_read(&mm->mmap_sem);
+-	return ret;
+ 
+-bad_area:
+-	up_read(&mm->mmap_sem);
+-	return -EFAULT;
++	if (info.si_signo)
++		force_sig_info(info.si_signo, &info, current);
+ }
+ 
+-static void spufs_handle_dma_error(struct spu_context *ctx,
+-				unsigned long ea, int type)
++int spufs_handle_class0(struct spu_context *ctx)
+ {
+-	if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
+-		ctx->event_return |= type;
+-		wake_up_all(&ctx->stop_wq);
+-	} else {
+-		siginfo_t info;
+-		memset(&info, 0, sizeof(info));
+-
+-		switch (type) {
+-		case SPE_EVENT_INVALID_DMA:
+-			info.si_signo = SIGBUS;
+-			info.si_code = BUS_OBJERR;
+-			break;
+-		case SPE_EVENT_SPE_DATA_STORAGE:
+-			info.si_signo = SIGBUS;
+-			info.si_addr = (void __user *)ea;
+-			info.si_code = BUS_ADRERR;
+-			break;
+-		case SPE_EVENT_DMA_ALIGNMENT:
+-			info.si_signo = SIGBUS;
+-			/* DAR isn't set for an alignment fault :( */
+-			info.si_code = BUS_ADRALN;
+-			break;
+-		case SPE_EVENT_SPE_ERROR:
+-			info.si_signo = SIGILL;
+-			info.si_addr = (void __user *)(unsigned long)
+-				ctx->ops->npc_read(ctx) - 4;
+-			info.si_code = ILL_ILLOPC;
+-			break;
+-		}
+-		if (info.si_signo)
+-			force_sig_info(info.si_signo, &info, current);
+-	}
+-}
++	unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK;
+ 
+-void spufs_dma_callback(struct spu *spu, int type)
+-{
+-	spufs_handle_dma_error(spu->ctx, spu->dar, type);
++	if (likely(!stat))
++		return 0;
++
++	if (stat & CLASS0_DMA_ALIGNMENT_INTR)
++		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
++
++	if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
++		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
++
++	if (stat & CLASS0_SPU_ERROR_INTR)
++		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
++
++	return -EIO;
+ }
+-EXPORT_SYMBOL_GPL(spufs_dma_callback);
+ 
+ /*
+  * bottom half handler for page faults, we can't do this from
+@@ -154,7 +108,7 @@ int spufs_handle_class1(struct spu_context *ctx)
+ 	u64 ea, dsisr, access;
+ 	unsigned long flags;
+ 	unsigned flt = 0;
+-	int ret;
++	int ret, ret2;
+ 
+ 	/*
+ 	 * dar and dsisr get passed from the registers
+@@ -165,16 +119,8 @@ int spufs_handle_class1(struct spu_context *ctx)
+ 	 * in time, we can still expect to get the same fault
+ 	 * the immediately after the context restore.
+ 	 */
+-	if (ctx->state == SPU_STATE_RUNNABLE) {
+-		ea = ctx->spu->dar;
+-		dsisr = ctx->spu->dsisr;
+-		ctx->spu->dar= ctx->spu->dsisr = 0;
+-	} else {
+-		ea = ctx->csa.priv1.mfc_dar_RW;
+-		dsisr = ctx->csa.priv1.mfc_dsisr_RW;
+-		ctx->csa.priv1.mfc_dar_RW = 0;
+-		ctx->csa.priv1.mfc_dsisr_RW = 0;
+-	}
++	ea = ctx->csa.dar;
++	dsisr = ctx->csa.dsisr;
+ 
+ 	if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
+ 		return 0;
+@@ -201,7 +147,22 @@ int spufs_handle_class1(struct spu_context *ctx)
+ 	if (ret)
+ 		ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
+ 
+-	spu_acquire(ctx);
++	/*
++	 * If spu_acquire fails due to a pending signal we just want to return
++	 * EINTR to userspace even if that means missing the dma restart or
++	 * updating the page fault statistics.
++	 */
++	ret2 = spu_acquire(ctx);
++	if (ret2)
++		goto out;
++
++	/*
++	 * Clear dsisr under ctxt lock after handling the fault, so that
++	 * time slicing will not preempt the context while the page fault
++	 * handler is running. Context switch code removes mappings.
++	 */
++	ctx->csa.dar = ctx->csa.dsisr = 0;
++
+ 	/*
+ 	 * If we handled the fault successfully and are in runnable
+ 	 * state, restart the DMA.
+@@ -222,9 +183,9 @@ int spufs_handle_class1(struct spu_context *ctx)
+ 		if (ctx->spu)
+ 			ctx->ops->restart_dma(ctx);
+ 	} else
+-		spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
++		spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
+ 
++ out:
+ 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(spufs_handle_class1);
+diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
+index d9e56a5..3fcd064 100644
+--- a/arch/powerpc/platforms/cell/spufs/file.c
++++ b/arch/powerpc/platforms/cell/spufs/file.c
+@@ -40,6 +40,120 @@
+ 
+ #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
+ 
++/* Simple attribute files */
++struct spufs_attr {
++	int (*get)(void *, u64 *);
++	int (*set)(void *, u64);
++	char get_buf[24];       /* enough to store a u64 and "\n\0" */
++	char set_buf[24];
++	void *data;
++	const char *fmt;        /* format for read operation */
++	struct mutex mutex;     /* protects access to these buffers */
++};
++
++static int spufs_attr_open(struct inode *inode, struct file *file,
++		int (*get)(void *, u64 *), int (*set)(void *, u64),
++		const char *fmt)
++{
++	struct spufs_attr *attr;
++
++	attr = kmalloc(sizeof(*attr), GFP_KERNEL);
++	if (!attr)
++		return -ENOMEM;
++
++	attr->get = get;
++	attr->set = set;
++	attr->data = inode->i_private;
++	attr->fmt = fmt;
++	mutex_init(&attr->mutex);
++	file->private_data = attr;
++
++	return nonseekable_open(inode, file);
++}
++
++static int spufs_attr_release(struct inode *inode, struct file *file)
++{
++       kfree(file->private_data);
++	return 0;
++}
++
++static ssize_t spufs_attr_read(struct file *file, char __user *buf,
++		size_t len, loff_t *ppos)
++{
++	struct spufs_attr *attr;
++	size_t size;
++	ssize_t ret;
++
++	attr = file->private_data;
++	if (!attr->get)
++		return -EACCES;
++
++	ret = mutex_lock_interruptible(&attr->mutex);
++	if (ret)
++		return ret;
++
++	if (*ppos) {		/* continued read */
++		size = strlen(attr->get_buf);
++	} else {		/* first read */
++		u64 val;
++		ret = attr->get(attr->data, &val);
++		if (ret)
++			goto out;
++
++		size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
++				 attr->fmt, (unsigned long long)val);
++	}
++
++	ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
++out:
++	mutex_unlock(&attr->mutex);
++	return ret;
++}
++
++static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
++		size_t len, loff_t *ppos)
++{
++	struct spufs_attr *attr;
++	u64 val;
++	size_t size;
++	ssize_t ret;
++
++	attr = file->private_data;
++	if (!attr->set)
++		return -EACCES;
++
++	ret = mutex_lock_interruptible(&attr->mutex);
++	if (ret)
++		return ret;
++
++	ret = -EFAULT;
++	size = min(sizeof(attr->set_buf) - 1, len);
++	if (copy_from_user(attr->set_buf, buf, size))
++		goto out;
++
++	ret = len; /* claim we got the whole input */
++	attr->set_buf[size] = '\0';
++	val = simple_strtol(attr->set_buf, NULL, 0);
++	attr->set(attr->data, val);
++out:
++	mutex_unlock(&attr->mutex);
++	return ret;
++}
++
++#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)	\
++static int __fops ## _open(struct inode *inode, struct file *file)	\
++{									\
++	__simple_attr_check_format(__fmt, 0ull);			\
++	return spufs_attr_open(inode, file, __get, __set, __fmt);	\
++}									\
++static struct file_operations __fops = {				\
++	.owner	 = THIS_MODULE,						\
++	.open	 = __fops ## _open,					\
++	.release = spufs_attr_release,					\
++	.read	 = spufs_attr_read,					\
++	.write	 = spufs_attr_write,					\
++};
++
+ 
+ static int
+ spufs_mem_open(struct inode *inode, struct file *file)
+@@ -84,9 +198,12 @@ spufs_mem_read(struct file *file, char __user *buffer,
+ 	struct spu_context *ctx = file->private_data;
+ 	ssize_t ret;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	ret = __spufs_mem_read(ctx, buffer, size, pos);
+ 	spu_release(ctx);
++
+ 	return ret;
+ }
+ 
+@@ -106,7 +223,10 @@ spufs_mem_write(struct file *file, const char __user *buffer,
+ 	if (size > LS_SIZE - pos)
+ 		size = LS_SIZE - pos;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
++
+ 	local_store = ctx->ops->get_ls(ctx);
+ 	ret = copy_from_user(local_store + pos, buffer, size);
+ 	spu_release(ctx);
+@@ -146,7 +266,8 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
+ 	pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
+ 		 addr0, address, offset);
+ 
+-	spu_acquire(ctx);
++	if (spu_acquire(ctx))
++		return NOPFN_REFAULT;
+ 
+ 	if (ctx->state == SPU_STATE_SAVED) {
+ 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+@@ -236,23 +357,32 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
+ {
+ 	struct spu_context *ctx = vma->vm_file->private_data;
+ 	unsigned long area, offset = address - vma->vm_start;
+-	int ret;
+ 
+ 	offset += vma->vm_pgoff << PAGE_SHIFT;
+ 	if (offset >= ps_size)
+ 		return NOPFN_SIGBUS;
+ 
+-	/* error here usually means a signal.. we might want to test
+-	 * the error code more precisely though
++	/*
++	 * We have to wait for context to be loaded before we have
++	 * pages to hand out to the user, but we don't want to wait
++	 * with the mmap_sem held.
++	 * It is possible to drop the mmap_sem here, but then we need
++	 * to return NOPFN_REFAULT because the mappings may have
++	 * hanged.
+ 	 */
+-	ret = spu_acquire_runnable(ctx, 0);
+-	if (ret)
++	if (spu_acquire(ctx))
+ 		return NOPFN_REFAULT;
+ 
+-	area = ctx->spu->problem_phys + ps_offs;
+-	vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+-	spu_release(ctx);
++	if (ctx->state == SPU_STATE_SAVED) {
++		up_read(&current->mm->mmap_sem);
++		spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
++		down_read(&current->mm->mmap_sem);
++	} else {
++		area = ctx->spu->problem_phys + ps_offs;
++		vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
++	}
+ 
++	spu_release(ctx);
+ 	return NOPFN_REFAULT;
+ }
+ 
+@@ -286,25 +416,32 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
+ #define spufs_cntl_mmap NULL
+ #endif /* !SPUFS_MMAP_4K */
+ 
+-static u64 spufs_cntl_get(void *data)
++static int spufs_cntl_get(void *data, u64 *val)
+ {
+ 	struct spu_context *ctx = data;
+-	u64 val;
++	int ret;
+ 
+-	spu_acquire(ctx);
+-	val = ctx->ops->status_read(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
++	*val = ctx->ops->status_read(ctx);
+ 	spu_release(ctx);
+ 
+-	return val;
++	return 0;
+ }
+ 
+-static void spufs_cntl_set(void *data, u64 val)
++static int spufs_cntl_set(void *data, u64 val)
+ {
+ 	struct spu_context *ctx = data;
++	int ret;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	ctx->ops->runcntl_write(ctx, val);
+ 	spu_release(ctx);
++
++	return 0;
+ }
+ 
+ static int spufs_cntl_open(struct inode *inode, struct file *file)
+@@ -317,7 +454,7 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
+ 	if (!i->i_openers++)
+ 		ctx->cntl = inode->i_mapping;
+ 	mutex_unlock(&ctx->mapping_lock);
+-	return simple_attr_open(inode, file, spufs_cntl_get,
++	return spufs_attr_open(inode, file, spufs_cntl_get,
+ 					spufs_cntl_set, "0x%08lx");
+ }
+ 
+@@ -327,7 +464,7 @@ spufs_cntl_release(struct inode *inode, struct file *file)
+ 	struct spufs_inode_info *i = SPUFS_I(inode);
+ 	struct spu_context *ctx = i->i_ctx;
+ 
+-	simple_attr_close(inode, file);
++	spufs_attr_release(inode, file);
+ 
+ 	mutex_lock(&ctx->mapping_lock);
+ 	if (!--i->i_openers)
+@@ -339,8 +476,8 @@ spufs_cntl_release(struct inode *inode, struct file *file)
+ static const struct file_operations spufs_cntl_fops = {
+ 	.open = spufs_cntl_open,
+ 	.release = spufs_cntl_release,
+-	.read = simple_attr_read,
+-	.write = simple_attr_write,
++	.read = spufs_attr_read,
++	.write = spufs_attr_write,
+ 	.mmap = spufs_cntl_mmap,
+ };
+ 
+@@ -368,7 +505,9 @@ spufs_regs_read(struct file *file, char __user *buffer,
+ 	int ret;
+ 	struct spu_context *ctx = file->private_data;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	ret = __spufs_regs_read(ctx, buffer, size, pos);
+ 	spu_release_saved(ctx);
+ 	return ret;
+@@ -387,7 +526,9 @@ spufs_regs_write(struct file *file, const char __user *buffer,
+ 		return -EFBIG;
+ 	*pos += size;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 
+ 	ret = copy_from_user(lscsa->gprs + *pos - size,
+ 			     buffer, size) ? -EFAULT : size;
+@@ -419,7 +560,9 @@ spufs_fpcr_read(struct file *file, char __user * buffer,
+ 	int ret;
+ 	struct spu_context *ctx = file->private_data;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
+ 	spu_release_saved(ctx);
+ 	return ret;
+@@ -436,10 +579,12 @@ spufs_fpcr_write(struct file *file, const char __user * buffer,
+ 	size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
+ 	if (size <= 0)
+ 		return -EFBIG;
+-	*pos += size;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 
++	*pos += size;
+ 	ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
+ 			     buffer, size) ? -EFAULT : size;
+ 
+@@ -486,7 +631,10 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
+ 
+ 	udata = (void __user *)buf;
+ 
+-	spu_acquire(ctx);
++	count = spu_acquire(ctx);
++	if (count)
++		return count;
++
+ 	for (count = 0; (count + 4) <= len; count += 4, udata++) {
+ 		int ret;
+ 		ret = ctx->ops->mbox_read(ctx, &mbox_data);
+@@ -522,12 +670,15 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
+ 			size_t len, loff_t *pos)
+ {
+ 	struct spu_context *ctx = file->private_data;
++	ssize_t ret;
+ 	u32 mbox_stat;
+ 
+ 	if (len < 4)
+ 		return -EINVAL;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 
+ 	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
+ 
+@@ -562,6 +713,9 @@ void spufs_ibox_callback(struct spu *spu)
+ {
+ 	struct spu_context *ctx = spu->ctx;
+ 
++	if (!ctx)
++		return;
++
+ 	wake_up_all(&ctx->ibox_wq);
+ 	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
+ }
+@@ -593,7 +747,9 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
+ 
+ 	udata = (void __user *)buf;
+ 
+-	spu_acquire(ctx);
++	count = spu_acquire(ctx);
++	if (count)
++		return count;
+ 
+ 	/* wait only for the first element */
+ 	count = 0;
+@@ -639,7 +795,11 @@ static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
+ 
+ 	poll_wait(file, &ctx->ibox_wq, wait);
+ 
+-	spu_acquire(ctx);
++	/*
++	 * For now keep this uninterruptible and also ignore the rule
++	 * that poll should not sleep.  Will be fixed later.
++	 */
++	mutex_lock(&ctx->state_mutex);
+ 	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
+ 	spu_release(ctx);
+ 
+@@ -657,12 +817,15 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
+ 			size_t len, loff_t *pos)
+ {
+ 	struct spu_context *ctx = file->private_data;
++	ssize_t ret;
+ 	u32 ibox_stat;
+ 
+ 	if (len < 4)
+ 		return -EINVAL;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
+ 	spu_release(ctx);
+ 
+@@ -698,6 +861,9 @@ void spufs_wbox_callback(struct spu *spu)
+ {
+ 	struct spu_context *ctx = spu->ctx;
+ 
++	if (!ctx)
++		return;
++
+ 	wake_up_all(&ctx->wbox_wq);
+ 	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
+ }
+@@ -731,7 +897,9 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
+ 	if (__get_user(wbox_data, udata))
+ 		return -EFAULT;
+ 
+-	spu_acquire(ctx);
++	count = spu_acquire(ctx);
++	if (count)
++		return count;
+ 
+ 	/*
+ 	 * make sure we can at least write one element, by waiting
+@@ -772,7 +940,11 @@ static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
+ 
+ 	poll_wait(file, &ctx->wbox_wq, wait);
+ 
+-	spu_acquire(ctx);
++	/*
++	 * For now keep this uninterruptible and also ignore the rule
++	 * that poll should not sleep.  Will be fixed later.
++	 */
++	mutex_lock(&ctx->state_mutex);
+ 	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
+ 	spu_release(ctx);
+ 
+@@ -790,12 +962,15 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
+ 			size_t len, loff_t *pos)
+ {
+ 	struct spu_context *ctx = file->private_data;
++	ssize_t ret;
+ 	u32 wbox_stat;
+ 
+ 	if (len < 4)
+ 		return -EINVAL;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
+ 	spu_release(ctx);
+ 
+@@ -866,7 +1041,9 @@ static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+ 	int ret;
+ 	struct spu_context *ctx = file->private_data;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	ret = __spufs_signal1_read(ctx, buf, len, pos);
+ 	spu_release_saved(ctx);
+ 
+@@ -877,6 +1054,7 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
+ 			size_t len, loff_t *pos)
+ {
+ 	struct spu_context *ctx;
++	ssize_t ret;
+ 	u32 data;
+ 
+ 	ctx = file->private_data;
+@@ -887,7 +1065,9 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
+ 	if (copy_from_user(&data, buf, 4))
+ 		return -EFAULT;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	ctx->ops->signal1_write(ctx, data);
+ 	spu_release(ctx);
+ 
+@@ -997,7 +1177,9 @@ static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+ 	struct spu_context *ctx = file->private_data;
+ 	int ret;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	ret = __spufs_signal2_read(ctx, buf, len, pos);
+ 	spu_release_saved(ctx);
+ 
+@@ -1008,6 +1190,7 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
+ 			size_t len, loff_t *pos)
+ {
+ 	struct spu_context *ctx;
++	ssize_t ret;
+ 	u32 data;
+ 
+ 	ctx = file->private_data;
+@@ -1018,7 +1201,9 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
+ 	if (copy_from_user(&data, buf, 4))
+ 		return -EFAULT;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	ctx->ops->signal2_write(ctx, data);
+ 	spu_release(ctx);
+ 
+@@ -1086,33 +1271,42 @@ static const struct file_operations spufs_signal2_nosched_fops = {
+ #define SPU_ATTR_ACQUIRE_SAVED	2
+ 
+ #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)	\
+-static u64 __##__get(void *data)					\
++static int __##__get(void *data, u64 *val)				\
+ {									\
+ 	struct spu_context *ctx = data;					\
+-	u64 ret;							\
++	int ret = 0;							\
+ 									\
+ 	if (__acquire == SPU_ATTR_ACQUIRE) {				\
+-		spu_acquire(ctx);					\
+-		ret = __get(ctx);					\
++		ret = spu_acquire(ctx);					\
++		if (ret)						\
++			return ret;					\
++		*val = __get(ctx);					\
+ 		spu_release(ctx);					\
+ 	} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED)	{		\
+-		spu_acquire_saved(ctx);					\
+-		ret = __get(ctx);					\
++		ret = spu_acquire_saved(ctx);				\
++		if (ret)						\
++			return ret;					\
++		*val = __get(ctx);					\
+ 		spu_release_saved(ctx);					\
+ 	} else								\
+-		ret = __get(ctx);					\
++		*val = __get(ctx);					\
+ 									\
+-	return ret;							\
++	return 0;							\
+ }									\
+-DEFINE_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
++DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
+ 
+-static void spufs_signal1_type_set(void *data, u64 val)
++static int spufs_signal1_type_set(void *data, u64 val)
+ {
+ 	struct spu_context *ctx = data;
++	int ret;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	ctx->ops->signal1_type_set(ctx, val);
+ 	spu_release(ctx);
++
++	return 0;
+ }
+ 
+ static u64 spufs_signal1_type_get(struct spu_context *ctx)
+@@ -1123,13 +1317,18 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
+ 		       spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE);
+ 
+ 
+-static void spufs_signal2_type_set(void *data, u64 val)
++static int spufs_signal2_type_set(void *data, u64 val)
+ {
+ 	struct spu_context *ctx = data;
++	int ret;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	ctx->ops->signal2_type_set(ctx, val);
+ 	spu_release(ctx);
++
++	return 0;
+ }
+ 
+ static u64 spufs_signal2_type_get(struct spu_context *ctx)
+@@ -1329,6 +1528,9 @@ void spufs_mfc_callback(struct spu *spu)
+ {
+ 	struct spu_context *ctx = spu->ctx;
+ 
++	if (!ctx)
++		return;
++
+ 	wake_up_all(&ctx->mfc_wq);
+ 
+ 	pr_debug("%s %s\n", __FUNCTION__, spu->name);
+@@ -1375,12 +1577,17 @@ static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
+ 	if (size != 4)
+ 		goto out;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
++
++	ret = -EINVAL;
+ 	if (file->f_flags & O_NONBLOCK) {
+ 		status = ctx->ops->read_mfc_tagstatus(ctx);
+ 		if (!(status & ctx->tagwait))
+ 			ret = -EAGAIN;
+ 		else
++			/* XXX(hch): shouldn't we clear ret here? */
+ 			ctx->tagwait &= ~status;
+ 	} else {
+ 		ret = spufs_wait(ctx->mfc_wq,
+@@ -1505,7 +1712,11 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = spu_acquire_runnable(ctx, 0);
++	ret = spu_acquire(ctx);
++	if (ret)
++		goto out;
++
++	ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -1539,7 +1750,11 @@ static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
+ 
+ 	poll_wait(file, &ctx->mfc_wq, wait);
+ 
+-	spu_acquire(ctx);
++	/*
++	 * For now keep this uninterruptible and also ignore the rule
++	 * that poll should not sleep.  Will be fixed later.
++	 */
++	mutex_lock(&ctx->state_mutex);
+ 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
+ 	free_elements = ctx->ops->get_mfc_free_elements(ctx);
+ 	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
+@@ -1562,7 +1777,9 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
+ 	struct spu_context *ctx = file->private_data;
+ 	int ret;
+ 
+-	spu_acquire(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ #if 0
+ /* this currently hangs */
+ 	ret = spufs_wait(ctx->mfc_wq,
+@@ -1605,12 +1822,18 @@ static const struct file_operations spufs_mfc_fops = {
+ 	.mmap	 = spufs_mfc_mmap,
+ };
+ 
+-static void spufs_npc_set(void *data, u64 val)
++static int spufs_npc_set(void *data, u64 val)
+ {
+ 	struct spu_context *ctx = data;
+-	spu_acquire(ctx);
++	int ret;
++
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 	ctx->ops->npc_write(ctx, val);
+ 	spu_release(ctx);
++
++	return 0;
+ }
+ 
+ static u64 spufs_npc_get(struct spu_context *ctx)
+@@ -1620,13 +1843,19 @@ static u64 spufs_npc_get(struct spu_context *ctx)
+ DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
+ 		       "0x%llx\n", SPU_ATTR_ACQUIRE);
+ 
+-static void spufs_decr_set(void *data, u64 val)
++static int spufs_decr_set(void *data, u64 val)
+ {
+ 	struct spu_context *ctx = data;
+ 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
+-	spu_acquire_saved(ctx);
++	int ret;
++
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	lscsa->decr.slot[0] = (u32) val;
+ 	spu_release_saved(ctx);
++
++	return 0;
+ }
+ 
+ static u64 spufs_decr_get(struct spu_context *ctx)
+@@ -1637,15 +1866,21 @@ static u64 spufs_decr_get(struct spu_context *ctx)
+ DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
+ 		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
+ 
+-static void spufs_decr_status_set(void *data, u64 val)
++static int spufs_decr_status_set(void *data, u64 val)
+ {
+ 	struct spu_context *ctx = data;
+-	spu_acquire_saved(ctx);
++	int ret;
++
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	if (val)
+ 		ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
+ 	else
+ 		ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
+ 	spu_release_saved(ctx);
++
++	return 0;
+ }
+ 
+ static u64 spufs_decr_status_get(struct spu_context *ctx)
+@@ -1659,13 +1894,19 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
+ 		       spufs_decr_status_set, "0x%llx\n",
+ 		       SPU_ATTR_ACQUIRE_SAVED);
+ 
+-static void spufs_event_mask_set(void *data, u64 val)
++static int spufs_event_mask_set(void *data, u64 val)
+ {
+ 	struct spu_context *ctx = data;
+ 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
+-	spu_acquire_saved(ctx);
++	int ret;
++
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	lscsa->event_mask.slot[0] = (u32) val;
+ 	spu_release_saved(ctx);
++
++	return 0;
+ }
+ 
+ static u64 spufs_event_mask_get(struct spu_context *ctx)
+@@ -1690,13 +1931,19 @@ static u64 spufs_event_status_get(struct spu_context *ctx)
+ DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
+ 		       NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
+ 
+-static void spufs_srr0_set(void *data, u64 val)
++static int spufs_srr0_set(void *data, u64 val)
+ {
+ 	struct spu_context *ctx = data;
+ 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
+-	spu_acquire_saved(ctx);
++	int ret;
++
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	lscsa->srr0.slot[0] = (u32) val;
+ 	spu_release_saved(ctx);
++
++	return 0;
+ }
+ 
+ static u64 spufs_srr0_get(struct spu_context *ctx)
+@@ -1727,10 +1974,12 @@ static u64 spufs_object_id_get(struct spu_context *ctx)
+ 	return ctx->object_id;
+ }
+ 
+-static void spufs_object_id_set(void *data, u64 id)
++static int spufs_object_id_set(void *data, u64 id)
+ {
+ 	struct spu_context *ctx = data;
+ 	ctx->object_id = id;
++
++	return 0;
+ }
+ 
+ DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
+@@ -1777,13 +2026,13 @@ static const struct file_operations spufs_caps_fops = {
+ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
+ 			char __user *buf, size_t len, loff_t *pos)
+ {
+-	u32 mbox_stat;
+ 	u32 data;
+ 
+-	mbox_stat = ctx->csa.prob.mb_stat_R;
+-	if (mbox_stat & 0x0000ff) {
+-		data = ctx->csa.prob.pu_mb_R;
+-	}
++	/* EOF if there's no entry in the mbox */
++	if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
++		return 0;
++
++	data = ctx->csa.prob.pu_mb_R;
+ 
+ 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+ }
+@@ -1797,7 +2046,9 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
+ 	if (!access_ok(VERIFY_WRITE, buf, len))
+ 		return -EFAULT;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	spin_lock(&ctx->csa.register_lock);
+ 	ret = __spufs_mbox_info_read(ctx, buf, len, pos);
+ 	spin_unlock(&ctx->csa.register_lock);
+@@ -1815,13 +2066,13 @@ static const struct file_operations spufs_mbox_info_fops = {
+ static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
+ 				char __user *buf, size_t len, loff_t *pos)
+ {
+-	u32 ibox_stat;
+ 	u32 data;
+ 
+-	ibox_stat = ctx->csa.prob.mb_stat_R;
+-	if (ibox_stat & 0xff0000) {
+-		data = ctx->csa.priv2.puint_mb_R;
+-	}
++	/* EOF if there's no entry in the ibox */
++	if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
++		return 0;
++
++	data = ctx->csa.priv2.puint_mb_R;
+ 
+ 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+ }
+@@ -1835,7 +2086,9 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
+ 	if (!access_ok(VERIFY_WRITE, buf, len))
+ 		return -EFAULT;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	spin_lock(&ctx->csa.register_lock);
+ 	ret = __spufs_ibox_info_read(ctx, buf, len, pos);
+ 	spin_unlock(&ctx->csa.register_lock);
+@@ -1876,7 +2129,9 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
+ 	if (!access_ok(VERIFY_WRITE, buf, len))
+ 		return -EFAULT;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	spin_lock(&ctx->csa.register_lock);
+ 	ret = __spufs_wbox_info_read(ctx, buf, len, pos);
+ 	spin_unlock(&ctx->csa.register_lock);
+@@ -1926,7 +2181,9 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
+ 	if (!access_ok(VERIFY_WRITE, buf, len))
+ 		return -EFAULT;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	spin_lock(&ctx->csa.register_lock);
+ 	ret = __spufs_dma_info_read(ctx, buf, len, pos);
+ 	spin_unlock(&ctx->csa.register_lock);
+@@ -1977,7 +2234,9 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
+ 	struct spu_context *ctx = file->private_data;
+ 	int ret;
+ 
+-	spu_acquire_saved(ctx);
++	ret = spu_acquire_saved(ctx);
++	if (ret)
++		return ret;
+ 	spin_lock(&ctx->csa.register_lock);
+ 	ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
+ 	spin_unlock(&ctx->csa.register_lock);
+@@ -2066,8 +2325,12 @@ static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
+ static int spufs_show_stat(struct seq_file *s, void *private)
+ {
+ 	struct spu_context *ctx = s->private;
++	int ret;
++
++	ret = spu_acquire(ctx);
++	if (ret)
++		return ret;
+ 
+-	spu_acquire(ctx);
+ 	seq_printf(s, "%s %llu %llu %llu %llu "
+ 		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+ 		ctx_state_names[ctx->stats.util_state],
+diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
+index fc4ed1f..64f8540 100644
+--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
++++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
+@@ -76,16 +76,18 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
+ 		if (stat & 0xff0000)
+ 			ret |= POLLIN | POLLRDNORM;
+ 		else {
+-			spu_int_stat_clear(spu, 2, 0x1);
+-			spu_int_mask_or(spu, 2, 0x1);
++			spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
++			spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
+ 		}
+ 	}
+ 	if (events & (POLLOUT | POLLWRNORM)) {
+ 		if (stat & 0x00ff00)
+ 			ret = POLLOUT | POLLWRNORM;
+ 		else {
+-			spu_int_stat_clear(spu, 2, 0x10);
+-			spu_int_mask_or(spu, 2, 0x10);
++			spu_int_stat_clear(spu, 2,
++					CLASS2_MAILBOX_THRESHOLD_INTR);
++			spu_int_mask_or(spu, 2,
++					CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
+ 		}
+ 	}
+ 	spin_unlock_irq(&spu->register_lock);
+@@ -106,7 +108,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
+ 		ret = 4;
+ 	} else {
+ 		/* make sure we get woken up by the interrupt */
+-		spu_int_mask_or(spu, 2, 0x1);
++		spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
+ 		ret = 0;
+ 	}
+ 	spin_unlock_irq(&spu->register_lock);
+@@ -127,7 +129,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
+ 	} else {
+ 		/* make sure we get woken up by the interrupt when space
+ 		   becomes available */
+-		spu_int_mask_or(spu, 2, 0x10);
++		spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
+ 		ret = 0;
+ 	}
+ 	spin_unlock_irq(&spu->register_lock);
+@@ -206,6 +208,11 @@ static char *spu_hw_get_ls(struct spu_context *ctx)
+ 	return ctx->spu->local_store;
+ }
+ 
++static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val)
++{
++	out_be64(&ctx->spu->priv2->spu_privcntl_RW, val);
++}
++
+ static u32 spu_hw_runcntl_read(struct spu_context *ctx)
+ {
+ 	return in_be32(&ctx->spu->problem->spu_runcntl_RW);
+@@ -215,11 +222,21 @@ static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
+ {
+ 	spin_lock_irq(&ctx->spu->register_lock);
+ 	if (val & SPU_RUNCNTL_ISOLATE)
+-		out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL);
++		spu_hw_privcntl_write(ctx,
++			SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK);
+ 	out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
+ 	spin_unlock_irq(&ctx->spu->register_lock);
+ }
+ 
++static void spu_hw_runcntl_stop(struct spu_context *ctx)
++{
++	spin_lock_irq(&ctx->spu->register_lock);
++	out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
++	while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
++		cpu_relax();
++	spin_unlock_irq(&ctx->spu->register_lock);
++}
++
+ static void spu_hw_master_start(struct spu_context *ctx)
+ {
+ 	struct spu *spu = ctx->spu;
+@@ -319,8 +336,10 @@ struct spu_context_ops spu_hw_ops = {
+ 	.npc_write = spu_hw_npc_write,
+ 	.status_read = spu_hw_status_read,
+ 	.get_ls = spu_hw_get_ls,
++	.privcntl_write = spu_hw_privcntl_write,
+ 	.runcntl_read = spu_hw_runcntl_read,
+ 	.runcntl_write = spu_hw_runcntl_write,
++	.runcntl_stop = spu_hw_runcntl_stop,
+ 	.master_start = spu_hw_master_start,
+ 	.master_stop = spu_hw_master_stop,
+ 	.set_mfc_query = spu_hw_set_mfc_query,
+diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+index f4b3c05..0e9f325 100644
+--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
++++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+@@ -28,6 +28,8 @@
+ #include <asm/spu_csa.h>
+ #include <asm/mmu.h>
+ 
++#include "spufs.h"
++
+ static int spu_alloc_lscsa_std(struct spu_state *csa)
+ {
+ 	struct spu_lscsa *lscsa;
+@@ -73,7 +75,7 @@ int spu_alloc_lscsa(struct spu_state *csa)
+ 	int		i, j, n_4k;
+ 
+ 	/* Check availability of 64K pages */
+-	if (mmu_psize_defs[MMU_PAGE_64K].shift == 0)
++	if (!spu_64k_pages_available())
+ 		goto fail;
+ 
+ 	csa->use_big_pages = 1;
+diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
+index 1ce5e22..c01a09d 100644
+--- a/arch/powerpc/platforms/cell/spufs/run.c
++++ b/arch/powerpc/platforms/cell/spufs/run.c
+@@ -15,24 +15,55 @@ void spufs_stop_callback(struct spu *spu)
+ {
+ 	struct spu_context *ctx = spu->ctx;
+ 
+-	wake_up_all(&ctx->stop_wq);
++	/*
++	 * It should be impossible to preempt a context while an exception
++	 * is being processed, since the context switch code is specially
++	 * coded to deal with interrupts ... But, just in case, sanity check
++	 * the context pointer.  It is OK to return doing nothing since
++	 * the exception will be regenerated when the context is resumed.
++	 */
++	if (ctx) {
++		/* Copy exception arguments into module specific structure */
++		ctx->csa.class_0_pending = spu->class_0_pending;
++		ctx->csa.dsisr = spu->dsisr;
++		ctx->csa.dar = spu->dar;
++
++		/* ensure that the exception status has hit memory before a
++		 * thread waiting on the context's stop queue is woken */
++		smp_wmb();
++
++		wake_up_all(&ctx->stop_wq);
++	}
++
++	/* Clear callback arguments from spu structure */
++	spu->class_0_pending = 0;
++	spu->dsisr = 0;
++	spu->dar = 0;
+ }
+ 
+-static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
++int spu_stopped(struct spu_context *ctx, u32 *stat)
+ {
+-	struct spu *spu;
+-	u64 pte_fault;
++	u64 dsisr;
++	u32 stopped;
+ 
+ 	*stat = ctx->ops->status_read(ctx);
+ 
+-	spu = ctx->spu;
+-	if (ctx->state != SPU_STATE_RUNNABLE ||
+-	    test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
++	if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
+ 		return 1;
+-	pte_fault = spu->dsisr &
+-	    (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
+-	return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
+-		1 : 0;
++
++	stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
++		SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
++	if (*stat & stopped)
++		return 1;
++
++	dsisr = ctx->csa.dsisr;
++	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
++		return 1;
++
++	if (ctx->csa.class_0_pending)
++		return 1;
++
++	return 0;
+ }
+ 
+ static int spu_setup_isolated(struct spu_context *ctx)
+@@ -128,34 +159,66 @@ out:
+ 
+ static int spu_run_init(struct spu_context *ctx, u32 *npc)
+ {
++	unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
++	int ret;
++
+ 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
+ 
+-	if (ctx->flags & SPU_CREATE_ISOLATE) {
+-		unsigned long runcntl;
++	/*
++	 * NOSCHED is synchronous scheduling with respect to the caller.
++	 * The caller waits for the context to be loaded.
++	 */
++	if (ctx->flags & SPU_CREATE_NOSCHED) {
++		if (ctx->state == SPU_STATE_SAVED) {
++			ret = spu_activate(ctx, 0);
++			if (ret)
++				return ret;
++		}
++	}
+ 
++	/*
++	 * Apply special setup as required.
++	 */
++	if (ctx->flags & SPU_CREATE_ISOLATE) {
+ 		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
+-			int ret = spu_setup_isolated(ctx);
++			ret = spu_setup_isolated(ctx);
+ 			if (ret)
+ 				return ret;
+ 		}
+ 
+-		/* if userspace has set the runcntrl register (eg, to issue an
+-		 * isolated exit), we need to re-set it here */
++		/*
++		 * If userspace has set the runcntrl register (eg, to
++		 * issue an isolated exit), we need to re-set it here
++		 */
+ 		runcntl = ctx->ops->runcntl_read(ctx) &
+ 			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
+ 		if (runcntl == 0)
+ 			runcntl = SPU_RUNCNTL_RUNNABLE;
++	}
++
++	if (ctx->flags & SPU_CREATE_NOSCHED) {
++		spuctx_switch_state(ctx, SPU_UTIL_USER);
+ 		ctx->ops->runcntl_write(ctx, runcntl);
+ 	} else {
+-		unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL;
+-		ctx->ops->npc_write(ctx, *npc);
++		unsigned long privcntl;
++
+ 		if (test_thread_flag(TIF_SINGLESTEP))
+-			mode = SPU_PRIVCNTL_MODE_SINGLE_STEP;
+-		out_be64(&ctx->spu->priv2->spu_privcntl_RW, mode);
+-		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
+-	}
++			privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
++		else
++			privcntl = SPU_PRIVCNTL_MODE_NORMAL;
+ 
+-	spuctx_switch_state(ctx, SPU_UTIL_USER);
++		ctx->ops->npc_write(ctx, *npc);
++		ctx->ops->privcntl_write(ctx, privcntl);
++		ctx->ops->runcntl_write(ctx, runcntl);
++
++		if (ctx->state == SPU_STATE_SAVED) {
++			ret = spu_activate(ctx, 0);
++			if (ret)
++				return ret;
++		} else {
++			spuctx_switch_state(ctx, SPU_UTIL_USER);
++		}
++	}
+ 
+ 	return 0;
+ }
+@@ -165,6 +228,8 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
+ {
+ 	int ret = 0;
+ 
++	spu_del_from_rq(ctx);
++
+ 	*status = ctx->ops->status_read(ctx);
+ 	*npc = ctx->ops->npc_read(ctx);
+ 
+@@ -177,26 +242,6 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
+ 	return ret;
+ }
+ 
+-static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
+-				         u32 *status)
+-{
+-	int ret;
+-
+-	ret = spu_run_fini(ctx, npc, status);
+-	if (ret)
+-		return ret;
+-
+-	if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
+-		return *status;
+-
+-	ret = spu_acquire_runnable(ctx, 0);
+-	if (ret)
+-		return ret;
+-
+-	spuctx_switch_state(ctx, SPU_UTIL_USER);
+-	return 0;
+-}
+-
+ /*
+  * SPU syscall restarting is tricky because we violate the basic
+  * assumption that the signal handler is running on the interrupted
+@@ -247,7 +292,7 @@ static int spu_process_callback(struct spu_context *ctx)
+ 	u32 ls_pointer, npc;
+ 	void __iomem *ls;
+ 	long spu_ret;
+-	int ret;
++	int ret, ret2;
+ 
+ 	/* get syscall block from local store */
+ 	npc = ctx->ops->npc_read(ctx) & ~3;
+@@ -269,9 +314,11 @@ static int spu_process_callback(struct spu_context *ctx)
+ 		if (spu_ret <= -ERESTARTSYS) {
+ 			ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
+ 		}
+-		spu_acquire(ctx);
++		ret2 = spu_acquire(ctx);
+ 		if (ret == -ERESTARTSYS)
+ 			return ret;
++		if (ret2)
++			return -EINTR;
+ 	}
+ 
+ 	/* write result, jump over indirect pointer */
+@@ -281,18 +328,6 @@ static int spu_process_callback(struct spu_context *ctx)
+ 	return ret;
+ }
+ 
+-static inline int spu_process_events(struct spu_context *ctx)
+-{
+-	struct spu *spu = ctx->spu;
+-	int ret = 0;
+-
+-	if (spu->class_0_pending)
+-		ret = spu_irq_class_0_bottom(spu);
+-	if (!ret && signal_pending(current))
+-		ret = -ERESTARTSYS;
+-	return ret;
+-}
+-
+ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
+ {
+ 	int ret;
+@@ -302,29 +337,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
+ 	if (mutex_lock_interruptible(&ctx->run_mutex))
+ 		return -ERESTARTSYS;
+ 
+-	ctx->ops->master_start(ctx);
++	spu_enable_spu(ctx);
+ 	ctx->event_return = 0;
+ 
+-	spu_acquire(ctx);
+-	if (ctx->state == SPU_STATE_SAVED) {
+-		__spu_update_sched_info(ctx);
+-		spu_set_timeslice(ctx);
++	ret = spu_acquire(ctx);
++	if (ret)
++		goto out_unlock;
+ 
+-		ret = spu_activate(ctx, 0);
+-		if (ret) {
+-			spu_release(ctx);
+-			goto out;
+-		}
+-	} else {
+-		/*
+-		 * We have to update the scheduling priority under active_mutex
+-		 * to protect against find_victim().
+-		 *
+-		 * No need to update the timeslice ASAP, it will get updated
+-		 * once the current one has expired.
+-		 */
+-		spu_update_sched_info(ctx);
+-	}
++	spu_update_sched_info(ctx);
+ 
+ 	ret = spu_run_init(ctx, npc);
+ 	if (ret) {
+@@ -358,14 +378,12 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
+ 		if (ret)
+ 			break;
+ 
+-		if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
+-			ret = spu_reacquire_runnable(ctx, npc, &status);
+-			if (ret)
+-				goto out2;
+-			continue;
+-		}
+-		ret = spu_process_events(ctx);
++		ret = spufs_handle_class0(ctx);
++		if (ret)
++			break;
+ 
++		if (signal_pending(current))
++			ret = -ERESTARTSYS;
+ 	} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
+ 				      SPU_STATUS_STOPPED_BY_HALT |
+ 				       SPU_STATUS_SINGLE_STEP)));
+@@ -376,11 +394,10 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
+ 		ctx->stats.libassist++;
+ 
+ 
+-	ctx->ops->master_stop(ctx);
++	spu_disable_spu(ctx);
+ 	ret = spu_run_fini(ctx, npc, &status);
+ 	spu_yield(ctx);
+ 
+-out2:
+ 	if ((ret == 0) ||
+ 	    ((ret == -ERESTARTSYS) &&
+ 	     ((status & SPU_STATUS_STOPPED_BY_HALT) ||
+@@ -401,6 +418,7 @@ out2:
+ 
+ out:
+ 	*event = ctx->event_return;
++out_unlock:
+ 	mutex_unlock(&ctx->run_mutex);
+ 	return ret;
+ }
+diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
+index 9ad53e6..00d9142 100644
+--- a/arch/powerpc/platforms/cell/spufs/sched.c
++++ b/arch/powerpc/platforms/cell/spufs/sched.c
+@@ -58,6 +58,7 @@ static unsigned long spu_avenrun[3];
+ static struct spu_prio_array *spu_prio;
+ static struct task_struct *spusched_task;
+ static struct timer_list spusched_timer;
++static struct timer_list spuloadavg_timer;
+ 
+ /*
+  * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+@@ -105,15 +106,21 @@ void spu_set_timeslice(struct spu_context *ctx)
+ void __spu_update_sched_info(struct spu_context *ctx)
+ {
+ 	/*
+-	 * 32-Bit assignment are atomic on powerpc, and we don't care about
+-	 * memory ordering here because retriving the controlling thread is
+-	 * per defintion racy.
++	 * assert that the context is not on the runqueue, so it is safe
++	 * to change its scheduling parameters.
++	 */
++	BUG_ON(!list_empty(&ctx->rq));
++
++	/*
++	 * 32-Bit assignments are atomic on powerpc, and we don't care about
++	 * memory ordering here because retrieving the controlling thread is
++	 * per definition racy.
+ 	 */
+ 	ctx->tid = current->pid;
+ 
+ 	/*
+ 	 * We do our own priority calculations, so we normally want
+-	 * ->static_prio to start with. Unfortunately thies field
++	 * ->static_prio to start with. Unfortunately this field
+ 	 * contains junk for threads with a realtime scheduling
+ 	 * policy so we have to look at ->prio in this case.
+ 	 */
+@@ -124,23 +131,32 @@ void __spu_update_sched_info(struct spu_context *ctx)
+ 	ctx->policy = current->policy;
+ 
+ 	/*
+-	 * A lot of places that don't hold list_mutex poke into
+-	 * cpus_allowed, including grab_runnable_context which
+-	 * already holds the runq_lock.  So abuse runq_lock
+-	 * to protect this field aswell.
++	 * TO DO: the context may be loaded, so we may need to activate
++	 * it again on a different node. But it shouldn't hurt anything
++	 * to update its parameters, because we know that the scheduler
++	 * is not actively looking at this field, since it is not on the
++	 * runqueue. The context will be rescheduled on the proper node
++	 * if it is timesliced or preempted.
+ 	 */
+-	spin_lock(&spu_prio->runq_lock);
+ 	ctx->cpus_allowed = current->cpus_allowed;
+-	spin_unlock(&spu_prio->runq_lock);
+ }
+ 
+ void spu_update_sched_info(struct spu_context *ctx)
+ {
+-	int node = ctx->spu->node;
++	int node;
+ 
+-	mutex_lock(&cbe_spu_info[node].list_mutex);
+-	__spu_update_sched_info(ctx);
+-	mutex_unlock(&cbe_spu_info[node].list_mutex);
++	if (ctx->state == SPU_STATE_RUNNABLE) {
++		node = ctx->spu->node;
++
++		/*
++		 * Take list_mutex to sync with find_victim().
++		 */
++		mutex_lock(&cbe_spu_info[node].list_mutex);
++		__spu_update_sched_info(ctx);
++		mutex_unlock(&cbe_spu_info[node].list_mutex);
++	} else {
++		__spu_update_sched_info(ctx);
++	}
+ }
+ 
+ static int __node_allowed(struct spu_context *ctx, int node)
+@@ -174,7 +190,7 @@ void do_notify_spus_active(void)
+ 	 * Wake up the active spu_contexts.
+ 	 *
+ 	 * When the awakened processes see their "notify_active" flag is set,
+-	 * they will call spu_switch_notify();
++	 * they will call spu_switch_notify().
+ 	 */
+ 	for_each_online_node(node) {
+ 		struct spu *spu;
+@@ -221,7 +237,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
+ 	spu->wbox_callback = spufs_wbox_callback;
+ 	spu->stop_callback = spufs_stop_callback;
+ 	spu->mfc_callback = spufs_mfc_callback;
+-	spu->dma_callback = spufs_dma_callback;
+ 	mb();
+ 	spu_unmap_mappings(ctx);
+ 	spu_restore(&ctx->csa, spu);
+@@ -409,7 +424,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
+ 	spu->wbox_callback = NULL;
+ 	spu->stop_callback = NULL;
+ 	spu->mfc_callback = NULL;
+-	spu->dma_callback = NULL;
+ 	spu_associate_mm(spu, NULL);
+ 	spu->pid = 0;
+ 	spu->tgid = 0;
+@@ -454,6 +468,13 @@ static void __spu_add_to_rq(struct spu_context *ctx)
+ 	}
+ }
+ 
++static void spu_add_to_rq(struct spu_context *ctx)
++{
++	spin_lock(&spu_prio->runq_lock);
++	__spu_add_to_rq(ctx);
++	spin_unlock(&spu_prio->runq_lock);
++}
++
+ static void __spu_del_from_rq(struct spu_context *ctx)
+ {
+ 	int prio = ctx->prio;
+@@ -468,10 +489,24 @@ static void __spu_del_from_rq(struct spu_context *ctx)
+ 	}
+ }
+ 
++void spu_del_from_rq(struct spu_context *ctx)
++{
++	spin_lock(&spu_prio->runq_lock);
++	__spu_del_from_rq(ctx);
++	spin_unlock(&spu_prio->runq_lock);
++}
++
+ static void spu_prio_wait(struct spu_context *ctx)
+ {
+ 	DEFINE_WAIT(wait);
+ 
++	/*
++	 * The caller must explicitly wait for a context to be loaded
++	 * if the nosched flag is set.  If NOSCHED is not set, the caller
++	 * queues the context and waits for an spu event or error.
++	 */
++	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
++
+ 	spin_lock(&spu_prio->runq_lock);
+ 	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
+ 	if (!signal_pending(current)) {
+@@ -555,7 +590,7 @@ static struct spu *find_victim(struct spu_context *ctx)
+ 	/*
+ 	 * Look for a possible preemption candidate on the local node first.
+ 	 * If there is no candidate look at the other nodes.  This isn't
+-	 * exactly fair, but so far the whole spu schedule tries to keep
++	 * exactly fair, but so far the whole spu scheduler tries to keep
+ 	 * a strong node affinity.  We might want to fine-tune this in
+ 	 * the future.
+ 	 */
+@@ -571,6 +606,7 @@ static struct spu *find_victim(struct spu_context *ctx)
+ 			struct spu_context *tmp = spu->ctx;
+ 
+ 			if (tmp && tmp->prio > ctx->prio &&
++			    !(tmp->flags & SPU_CREATE_NOSCHED) &&
+ 			    (!victim || tmp->prio > victim->prio))
+ 				victim = spu->ctx;
+ 		}
+@@ -582,6 +618,10 @@ static struct spu *find_victim(struct spu_context *ctx)
+ 			 * higher priority contexts before lower priority
+ 			 * ones, so this is safe until we introduce
+ 			 * priority inheritance schemes.
++			 *
++			 * XXX if the highest priority context is locked,
++			 * this can loop a long time.  Might be better to
++			 * look at another context or give up after X retries.
+ 			 */
+ 			if (!mutex_trylock(&victim->state_mutex)) {
+ 				victim = NULL;
+@@ -589,10 +629,10 @@ static struct spu *find_victim(struct spu_context *ctx)
+ 			}
+ 
+ 			spu = victim->spu;
+-			if (!spu) {
++			if (!spu || victim->prio <= ctx->prio) {
+ 				/*
+ 				 * This race can happen because we've dropped
+-				 * the active list mutex.  No a problem, just
++				 * the active list mutex.  Not a problem, just
+ 				 * restart the search.
+ 				 */
+ 				mutex_unlock(&victim->state_mutex);
+@@ -607,13 +647,10 @@ static struct spu *find_victim(struct spu_context *ctx)
+ 
+ 			victim->stats.invol_ctx_switch++;
+ 			spu->stats.invol_ctx_switch++;
++			spu_add_to_rq(victim);
++
+ 			mutex_unlock(&victim->state_mutex);
+-			/*
+-			 * We need to break out of the wait loop in spu_run
+-			 * manually to ensure this context gets put on the
+-			 * runqueue again ASAP.
+-			 */
+-			wake_up(&victim->stop_wq);
++
+ 			return spu;
+ 		}
+ 	}
+@@ -621,6 +658,50 @@ static struct spu *find_victim(struct spu_context *ctx)
+ 	return NULL;
+ }
+ 
++static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
++{
++	int node = spu->node;
++	int success = 0;
++
++	spu_set_timeslice(ctx);
++
++	mutex_lock(&cbe_spu_info[node].list_mutex);
++	if (spu->ctx == NULL) {
++		spu_bind_context(spu, ctx);
++		cbe_spu_info[node].nr_active++;
++		spu->alloc_state = SPU_USED;
++		success = 1;
++	}
++	mutex_unlock(&cbe_spu_info[node].list_mutex);
++
++	if (success)
++		wake_up_all(&ctx->run_wq);
++	else
++		spu_add_to_rq(ctx);
++}
++
++static void spu_schedule(struct spu *spu, struct spu_context *ctx)
++{
++	/* not a candidate for interruptible because it's called either
++	   from the scheduler thread or from spu_deactivate */
++	mutex_lock(&ctx->state_mutex);
++	__spu_schedule(spu, ctx);
++	spu_release(ctx);
++}
++
++static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
++{
++	int node = spu->node;
++
++	mutex_lock(&cbe_spu_info[node].list_mutex);
++	cbe_spu_info[node].nr_active--;
++	spu->alloc_state = SPU_FREE;
++	spu_unbind_context(spu, ctx);
++	ctx->stats.invol_ctx_switch++;
++	spu->stats.invol_ctx_switch++;
++	mutex_unlock(&cbe_spu_info[node].list_mutex);
++}
++
+ /**
+  * spu_activate - find a free spu for a context and execute it
+  * @ctx:	spu context to schedule
+@@ -632,39 +713,47 @@ static struct spu *find_victim(struct spu_context *ctx)
+  */
+ int spu_activate(struct spu_context *ctx, unsigned long flags)
+ {
+-	do {
+-		struct spu *spu;
++	struct spu *spu;
+ 
+-		/*
+-		 * If there are multiple threads waiting for a single context
+-		 * only one actually binds the context while the others will
+-		 * only be able to acquire the state_mutex once the context
+-		 * already is in runnable state.
+-		 */
+-		if (ctx->spu)
+-			return 0;
++	/*
++	 * If there are multiple threads waiting for a single context
++	 * only one actually binds the context while the others will
++	 * only be able to acquire the state_mutex once the context
++	 * already is in runnable state.
++	 */
++	if (ctx->spu)
++		return 0;
+ 
+-		spu = spu_get_idle(ctx);
+-		/*
+-		 * If this is a realtime thread we try to get it running by
+-		 * preempting a lower priority thread.
+-		 */
+-		if (!spu && rt_prio(ctx->prio))
+-			spu = find_victim(ctx);
+-		if (spu) {
+-			int node = spu->node;
++spu_activate_top:
++	if (signal_pending(current))
++		return -ERESTARTSYS;
+ 
+-			mutex_lock(&cbe_spu_info[node].list_mutex);
+-			spu_bind_context(spu, ctx);
+-			cbe_spu_info[node].nr_active++;
+-			mutex_unlock(&cbe_spu_info[node].list_mutex);
+-			return 0;
+-		}
++	spu = spu_get_idle(ctx);
++	/*
++	 * If this is a realtime thread we try to get it running by
++	 * preempting a lower priority thread.
++	 */
++	if (!spu && rt_prio(ctx->prio))
++		spu = find_victim(ctx);
++	if (spu) {
++		unsigned long runcntl;
++
++		runcntl = ctx->ops->runcntl_read(ctx);
++		__spu_schedule(spu, ctx);
++		if (runcntl & SPU_RUNCNTL_RUNNABLE)
++			spuctx_switch_state(ctx, SPU_UTIL_USER);
+ 
++		return 0;
++	}
++
++	if (ctx->flags & SPU_CREATE_NOSCHED) {
+ 		spu_prio_wait(ctx);
+-	} while (!signal_pending(current));
++		goto spu_activate_top;
++	}
+ 
+-	return -ERESTARTSYS;
++	spu_add_to_rq(ctx);
++
++	return 0;
+ }
+ 
+ /**
+@@ -706,21 +795,19 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
+ 	if (spu) {
+ 		new = grab_runnable_context(max_prio, spu->node);
+ 		if (new || force) {
+-			int node = spu->node;
+-
+-			mutex_lock(&cbe_spu_info[node].list_mutex);
+-			spu_unbind_context(spu, ctx);
+-			spu->alloc_state = SPU_FREE;
+-			cbe_spu_info[node].nr_active--;
+-			mutex_unlock(&cbe_spu_info[node].list_mutex);
+-
+-			ctx->stats.vol_ctx_switch++;
+-			spu->stats.vol_ctx_switch++;
+-
+-			if (new)
+-				wake_up(&new->stop_wq);
++			spu_unschedule(spu, ctx);
++			if (new) {
++				if (new->flags & SPU_CREATE_NOSCHED)
++					wake_up(&new->stop_wq);
++				else {
++					spu_release(ctx);
++					spu_schedule(spu, new);
++					/* this one can't easily be made
++					   interruptible */
++					mutex_lock(&ctx->state_mutex);
++				}
++			}
+ 		}
+-
+ 	}
+ 
+ 	return new != NULL;
+@@ -757,43 +844,38 @@ void spu_yield(struct spu_context *ctx)
+ 
+ static noinline void spusched_tick(struct spu_context *ctx)
+ {
++	struct spu_context *new = NULL;
++	struct spu *spu = NULL;
++	u32 status;
++
++	if (spu_acquire(ctx))
++		BUG();	/* a kernel thread never has signals pending */
++
++	if (ctx->state != SPU_STATE_RUNNABLE)
++		goto out;
++	if (spu_stopped(ctx, &status))
++		goto out;
+ 	if (ctx->flags & SPU_CREATE_NOSCHED)
+-		return;
++		goto out;
+ 	if (ctx->policy == SCHED_FIFO)
+-		return;
++		goto out;
+ 
+ 	if (--ctx->time_slice)
+-		return;
++		goto out;
+ 
+-	/*
+-	 * Unfortunately list_mutex ranks outside of state_mutex, so
+-	 * we have to trylock here.  If we fail give the context another
+-	 * tick and try again.
+-	 */
+-	if (mutex_trylock(&ctx->state_mutex)) {
+-		struct spu *spu = ctx->spu;
+-		struct spu_context *new;
+-
+-		new = grab_runnable_context(ctx->prio + 1, spu->node);
+-		if (new) {
+-			spu_unbind_context(spu, ctx);
+-			ctx->stats.invol_ctx_switch++;
+-			spu->stats.invol_ctx_switch++;
+-			spu->alloc_state = SPU_FREE;
+-			cbe_spu_info[spu->node].nr_active--;
+-			wake_up(&new->stop_wq);
+-			/*
+-			 * We need to break out of the wait loop in
+-			 * spu_run manually to ensure this context
+-			 * gets put on the runqueue again ASAP.
+-			 */
+-			wake_up(&ctx->stop_wq);
+-		}
+-		spu_set_timeslice(ctx);
+-		mutex_unlock(&ctx->state_mutex);
++	spu = ctx->spu;
++	new = grab_runnable_context(ctx->prio + 1, spu->node);
++	if (new) {
++		spu_unschedule(spu, ctx);
++		spu_add_to_rq(ctx);
+ 	} else {
+ 		ctx->time_slice++;
+ 	}
++out:
++	spu_release(ctx);
++
++	if (new)
++		spu_schedule(spu, new);
+ }
+ 
+ /**
+@@ -817,35 +899,31 @@ static unsigned long count_active_contexts(void)
+ }
+ 
+ /**
+- * spu_calc_load - given tick count, update the avenrun load estimates.
+- * @tick:	tick count
++ * spu_calc_load - update the avenrun load estimates.
+  *
+  * No locking against reading these values from userspace, as for
+  * the CPU loadavg code.
+  */
+-static void spu_calc_load(unsigned long ticks)
++static void spu_calc_load(void)
+ {
+ 	unsigned long active_tasks; /* fixed-point */
+-	static int count = LOAD_FREQ;
+-
+-	count -= ticks;
+-
+-	if (unlikely(count < 0)) {
+-		active_tasks = count_active_contexts() * FIXED_1;
+-		do {
+-			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
+-			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
+-			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
+-			count += LOAD_FREQ;
+-		} while (count < 0);
+-	}
++
++	active_tasks = count_active_contexts() * FIXED_1;
++	CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
++	CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
++	CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
+ }
+ 
+ static void spusched_wake(unsigned long data)
+ {
+ 	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+ 	wake_up_process(spusched_task);
+-	spu_calc_load(SPUSCHED_TICK);
++}
++
++static void spuloadavg_wake(unsigned long data)
++{
++	mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
++	spu_calc_load();
+ }
+ 
+ static int spusched_thread(void *unused)
+@@ -857,17 +935,58 @@ static int spusched_thread(void *unused)
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 		schedule();
+ 		for (node = 0; node < MAX_NUMNODES; node++) {
+-			mutex_lock(&cbe_spu_info[node].list_mutex);
+-			list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
+-				if (spu->ctx)
+-					spusched_tick(spu->ctx);
+-			mutex_unlock(&cbe_spu_info[node].list_mutex);
++			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
++
++			mutex_lock(mtx);
++			list_for_each_entry(spu, &cbe_spu_info[node].spus,
++					cbe_list) {
++				struct spu_context *ctx = spu->ctx;
++
++				if (ctx) {
++					mutex_unlock(mtx);
++					spusched_tick(ctx);
++					mutex_lock(mtx);
++				}
++			}
++			mutex_unlock(mtx);
+ 		}
+ 	}
+ 
+ 	return 0;
+ }
+ 
++void spuctx_switch_state(struct spu_context *ctx,
++		enum spu_utilization_state new_state)
++{
++	unsigned long long curtime;
++	signed long long delta;
++	struct timespec ts;
++	struct spu *spu;
++	enum spu_utilization_state old_state;
++
++	ktime_get_ts(&ts);
++	curtime = timespec_to_ns(&ts);
++	delta = curtime - ctx->stats.tstamp;
++
++	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
++	WARN_ON(delta < 0);
++
++	spu = ctx->spu;
++	old_state = ctx->stats.util_state;
++	ctx->stats.util_state = new_state;
++	ctx->stats.tstamp = curtime;
++
++	/*
++	 * Update the physical SPU utilization statistics.
++	 */
++	if (spu) {
++		ctx->stats.times[old_state] += delta;
++		spu->stats.times[old_state] += delta;
++		spu->stats.util_state = new_state;
++		spu->stats.tstamp = curtime;
++	}
++}
++
+ #define LOAD_INT(x) ((x) >> FSHIFT)
+ #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+ 
+@@ -881,7 +1000,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
+ 
+ 	/*
+ 	 * Note that last_pid doesn't really make much sense for the
+-	 * SPU loadavg (it even seems very odd on the CPU side..),
++	 * SPU loadavg (it even seems very odd on the CPU side...),
+ 	 * but we include it here to have a 100% compatible interface.
+ 	 */
+ 	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
+@@ -922,6 +1041,7 @@ int __init spu_sched_init(void)
+ 	spin_lock_init(&spu_prio->runq_lock);
+ 
+ 	setup_timer(&spusched_timer, spusched_wake, 0);
++	setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
+ 
+ 	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
+ 	if (IS_ERR(spusched_task)) {
+@@ -929,6 +1049,8 @@ int __init spu_sched_init(void)
+ 		goto out_free_spu_prio;
+ 	}
+ 
++	mod_timer(&spuloadavg_timer, 0);
++
+ 	entry = create_proc_entry("spu_loadavg", 0, NULL);
+ 	if (!entry)
+ 		goto out_stop_kthread;
+@@ -954,6 +1076,7 @@ void spu_sched_exit(void)
+ 	remove_proc_entry("spu_loadavg", NULL);
+ 
+ 	del_timer_sync(&spusched_timer);
++	del_timer_sync(&spuloadavg_timer);
+ 	kthread_stop(spusched_task);
+ 
+ 	for (node = 0; node < MAX_NUMNODES; node++) {
+diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
+index ca47b99..0e11403 100644
+--- a/arch/powerpc/platforms/cell/spufs/spufs.h
++++ b/arch/powerpc/platforms/cell/spufs/spufs.h
+@@ -71,6 +71,7 @@ struct spu_context {
+ 	wait_queue_head_t wbox_wq;
+ 	wait_queue_head_t stop_wq;
+ 	wait_queue_head_t mfc_wq;
++	wait_queue_head_t run_wq;
+ 	struct fasync_struct *ibox_fasync;
+ 	struct fasync_struct *wbox_fasync;
+ 	struct fasync_struct *mfc_fasync;
+@@ -168,8 +169,10 @@ struct spu_context_ops {
+ 	void (*npc_write) (struct spu_context * ctx, u32 data);
+ 	 u32(*status_read) (struct spu_context * ctx);
+ 	char*(*get_ls) (struct spu_context * ctx);
++	void (*privcntl_write) (struct spu_context *ctx, u64 data);
+ 	 u32 (*runcntl_read) (struct spu_context * ctx);
+ 	void (*runcntl_write) (struct spu_context * ctx, u32 data);
++	void (*runcntl_stop) (struct spu_context * ctx);
+ 	void (*master_start) (struct spu_context * ctx);
+ 	void (*master_stop) (struct spu_context * ctx);
+ 	int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
+@@ -219,15 +222,16 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
+ 
+ /* fault handling */
+ int spufs_handle_class1(struct spu_context *ctx);
++int spufs_handle_class0(struct spu_context *ctx);
+ 
+ /* affinity */
+ struct spu *affinity_check(struct spu_context *ctx);
+ 
+ /* context management */
+ extern atomic_t nr_spu_contexts;
+-static inline void spu_acquire(struct spu_context *ctx)
++static inline int __must_check spu_acquire(struct spu_context *ctx)
+ {
+-	mutex_lock(&ctx->state_mutex);
++	return mutex_lock_interruptible(&ctx->state_mutex);
+ }
+ 
+ static inline void spu_release(struct spu_context *ctx)
+@@ -242,10 +246,11 @@ int put_spu_context(struct spu_context *ctx);
+ void spu_unmap_mappings(struct spu_context *ctx);
+ 
+ void spu_forget(struct spu_context *ctx);
+-int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
+-void spu_acquire_saved(struct spu_context *ctx);
++int __must_check spu_acquire_saved(struct spu_context *ctx);
+ void spu_release_saved(struct spu_context *ctx);
+ 
++int spu_stopped(struct spu_context *ctx, u32 * stat);
++void spu_del_from_rq(struct spu_context *ctx);
+ int spu_activate(struct spu_context *ctx, unsigned long flags);
+ void spu_deactivate(struct spu_context *ctx);
+ void spu_yield(struct spu_context *ctx);
+@@ -279,7 +284,9 @@ extern char *isolated_loader;
+ 		}							\
+ 		spu_release(ctx);					\
+ 		schedule();						\
+-		spu_acquire(ctx);					\
++		__ret = spu_acquire(ctx);				\
++		if (__ret)						\
++			break;						\
+ 	}								\
+ 	finish_wait(&(wq), &__wait);					\
+ 	__ret;								\
+@@ -306,41 +313,16 @@ struct spufs_coredump_reader {
+ extern struct spufs_coredump_reader spufs_coredump_read[];
+ extern int spufs_coredump_num_notes;
+ 
+-/*
+- * This function is a little bit too large for an inline, but
+- * as fault.c is built into the kernel we can't move it out of
+- * line.
+- */
+-static inline void spuctx_switch_state(struct spu_context *ctx,
+-		enum spu_utilization_state new_state)
+-{
+-	unsigned long long curtime;
+-	signed long long delta;
+-	struct timespec ts;
+-	struct spu *spu;
+-	enum spu_utilization_state old_state;
+-
+-	ktime_get_ts(&ts);
+-	curtime = timespec_to_ns(&ts);
+-	delta = curtime - ctx->stats.tstamp;
+-
+-	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
+-	WARN_ON(delta < 0);
+-
+-	spu = ctx->spu;
+-	old_state = ctx->stats.util_state;
+-	ctx->stats.util_state = new_state;
+-	ctx->stats.tstamp = curtime;
+-
+-	/*
+-	 * Update the physical SPU utilization statistics.
+-	 */
+-	if (spu) {
+-		ctx->stats.times[old_state] += delta;
+-		spu->stats.times[old_state] += delta;
+-		spu->stats.util_state = new_state;
+-		spu->stats.tstamp = curtime;
+-	}
+-}
++extern int spu_init_csa(struct spu_state *csa);
++extern void spu_fini_csa(struct spu_state *csa);
++extern int spu_save(struct spu_state *prev, struct spu *spu);
++extern int spu_restore(struct spu_state *new, struct spu *spu);
++extern int spu_switch(struct spu_state *prev, struct spu_state *new,
++		      struct spu *spu);
++extern int spu_alloc_lscsa(struct spu_state *csa);
++extern void spu_free_lscsa(struct spu_state *csa);
++
++extern void spuctx_switch_state(struct spu_context *ctx,
++		enum spu_utilization_state new_state);
+ 
+ #endif
+diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
+index 3d64c81..6063c88 100644
+--- a/arch/powerpc/platforms/cell/spufs/switch.c
++++ b/arch/powerpc/platforms/cell/spufs/switch.c
+@@ -48,6 +48,8 @@
+ #include <asm/spu_csa.h>
+ #include <asm/mmu_context.h>
+ 
++#include "spufs.h"
++
+ #include "spu_save_dump.h"
+ #include "spu_restore_dump.h"
+ 
+@@ -691,35 +693,9 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
+ 	out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
+ }
+ 
+-static inline void get_kernel_slb(u64 ea, u64 slb[2])
++static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
++		unsigned int *code, int code_size)
+ {
+-	u64 llp;
+-
+-	if (REGION_ID(ea) == KERNEL_REGION_ID)
+-		llp = mmu_psize_defs[mmu_linear_psize].sllp;
+-	else
+-		llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+-	slb[0] = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
+-		SLB_VSID_KERNEL | llp;
+-	slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
+-}
+-
+-static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)
+-{
+-	struct spu_priv2 __iomem *priv2 = spu->priv2;
+-
+-	out_be64(&priv2->slb_index_W, slbe);
+-	eieio();
+-	out_be64(&priv2->slb_vsid_RW, slb[0]);
+-	out_be64(&priv2->slb_esid_RW, slb[1]);
+-	eieio();
+-}
+-
+-static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
+-{
+-	u64 code_slb[2];
+-	u64 lscsa_slb[2];
+-
+ 	/* Save, Step 47:
+ 	 * Restore, Step 30.
+ 	 *     If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
+@@ -735,11 +711,7 @@ static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
+ 	 *     translation is desired by OS environment).
+ 	 */
+ 	spu_invalidate_slbs(spu);
+-	get_kernel_slb((unsigned long)&spu_save_code[0], code_slb);
+-	get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb);
+-	load_mfc_slb(spu, code_slb, 0);
+-	if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1]))
+-		load_mfc_slb(spu, lscsa_slb, 1);
++	spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
+ }
+ 
+ static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
+@@ -768,9 +740,9 @@ static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
+ 	 *     (translation) interrupts.
+ 	 */
+ 	spin_lock_irq(&spu->register_lock);
+-	spu_int_stat_clear(spu, 0, ~0ul);
+-	spu_int_stat_clear(spu, 1, ~0ul);
+-	spu_int_stat_clear(spu, 2, ~0ul);
++	spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
++	spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
++	spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
+ 	spu_int_mask_set(spu, 0, 0ul);
+ 	spu_int_mask_set(spu, 1, class1_mask);
+ 	spu_int_mask_set(spu, 2, 0ul);
+@@ -927,8 +899,8 @@ static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
+ 	POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
+ 
+ 	local_irq_save(flags);
+-	spu_int_stat_clear(spu, 0, ~(0ul));
+-	spu_int_stat_clear(spu, 2, ~(0ul));
++	spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
++	spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
+ 	local_irq_restore(flags);
+ }
+ 
+@@ -946,8 +918,8 @@ static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
+ 	POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
+ 
+ 	local_irq_save(flags);
+-	spu_int_stat_clear(spu, 0, ~(0ul));
+-	spu_int_stat_clear(spu, 2, ~(0ul));
++	spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
++	spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
+ 	local_irq_restore(flags);
+ }
+ 
+@@ -1423,9 +1395,9 @@ static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
+ 	spu_int_mask_set(spu, 0, 0ul);
+ 	spu_int_mask_set(spu, 1, 0ul);
+ 	spu_int_mask_set(spu, 2, 0ul);
+-	spu_int_stat_clear(spu, 0, ~0ul);
+-	spu_int_stat_clear(spu, 1, ~0ul);
+-	spu_int_stat_clear(spu, 2, ~0ul);
++	spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
++	spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
++	spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
+ 	spin_unlock_irq(&spu->register_lock);
+ }
+ 
+@@ -1866,7 +1838,8 @@ static void save_lscsa(struct spu_state *prev, struct spu *spu)
+ 	 */
+ 
+ 	resume_mfc_queue(prev, spu);	/* Step 46. */
+-	setup_mfc_slbs(prev, spu);	/* Step 47. */
++	/* Step 47. */
++	setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
+ 	set_switch_active(prev, spu);	/* Step 48. */
+ 	enable_interrupts(prev, spu);	/* Step 49. */
+ 	save_ls_16kb(prev, spu);	/* Step 50. */
+@@ -1971,7 +1944,8 @@ static void restore_lscsa(struct spu_state *next, struct spu *spu)
+ 	setup_spu_status_part1(next, spu);	/* Step 27. */
+ 	setup_spu_status_part2(next, spu);	/* Step 28. */
+ 	restore_mfc_rag(next, spu);	        /* Step 29. */
+-	setup_mfc_slbs(next, spu);	        /* Step 30. */
++	/* Step 30. */
++	setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
+ 	set_spu_npc(next, spu);	                /* Step 31. */
+ 	set_signot1(next, spu);	                /* Step 32. */
+ 	set_signot2(next, spu);	                /* Step 33. */
+@@ -2103,10 +2077,6 @@ int spu_save(struct spu_state *prev, struct spu *spu)
+ 	int rc;
+ 
+ 	acquire_spu_lock(spu);	        /* Step 1.     */
+-	prev->dar = spu->dar;
+-	prev->dsisr = spu->dsisr;
+-	spu->dar = 0;
+-	spu->dsisr = 0;
+ 	rc = __do_spu_save(prev, spu);	/* Steps 2-53. */
+ 	release_spu_lock(spu);
+ 	if (rc != 0 && rc != 2 && rc != 6) {
+@@ -2133,9 +2103,6 @@ int spu_restore(struct spu_state *new, struct spu *spu)
+ 	acquire_spu_lock(spu);
+ 	harvest(NULL, spu);
+ 	spu->slb_replace = 0;
+-	new->dar = 0;
+-	new->dsisr = 0;
+-	spu->class_0_pending = 0;
+ 	rc = __do_spu_restore(new, spu);
+ 	release_spu_lock(spu);
+ 	if (rc) {
+@@ -2215,10 +2182,8 @@ int spu_init_csa(struct spu_state *csa)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(spu_init_csa);
+ 
+ void spu_fini_csa(struct spu_state *csa)
+ {
+ 	spu_free_lscsa(csa);
+ }
+-EXPORT_SYMBOL_GPL(spu_fini_csa);
+diff --git a/arch/powerpc/platforms/celleb/Kconfig b/arch/powerpc/platforms/celleb/Kconfig
+index 04748d4..372891e 100644
+--- a/arch/powerpc/platforms/celleb/Kconfig
++++ b/arch/powerpc/platforms/celleb/Kconfig
+@@ -2,6 +2,8 @@ config PPC_CELLEB
+ 	bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
+ 	depends on PPC_MULTIPLATFORM && PPC64
+ 	select PPC_CELL
++	select PPC_CELL_NATIVE
++	select PPC_RTAS
+ 	select PPC_INDIRECT_IO
+ 	select PPC_OF_PLATFORM_PCI
+ 	select HAS_TXX9_SERIAL
+diff --git a/arch/powerpc/platforms/celleb/io-workarounds.c b/arch/powerpc/platforms/celleb/io-workarounds.c
+index 2b91214..423339b 100644
+--- a/arch/powerpc/platforms/celleb/io-workarounds.c
++++ b/arch/powerpc/platforms/celleb/io-workarounds.c
+@@ -22,6 +22,7 @@
+ 
+ #undef DEBUG
+ 
++#include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/irq.h>
+ 
+@@ -222,7 +223,7 @@ void __init celleb_pci_add_one(struct pci_controller *phb,
+ 			       void (*dummy_read)(struct pci_controller *))
+ {
+ 	struct celleb_pci_bus *bus = &celleb_pci_busses[celleb_pci_count];
+-	struct device_node *np = phb->arch_data;
++	struct device_node *np = phb->dn;
+ 
+ 	if (celleb_pci_count >= MAX_CELLEB_PCI_BUS) {
+ 		printk(KERN_ERR "Too many pci bridges, workarounds"
+@@ -256,13 +257,13 @@ int __init celleb_pci_workaround_init(void)
+ 
+ 	celleb_dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ 	if (!celleb_dummy_page_va) {
+-		printk(KERN_ERR "Celleb: dummy read disabled."
++		printk(KERN_ERR "Celleb: dummy read disabled. "
+ 			"Alloc celleb_dummy_page_va failed\n");
+ 		return 1;
+ 	}
+ 
+ 	list_for_each_entry(phb, &hose_list, list_node) {
+-		node = phb->arch_data;
++		node = phb->dn;
+ 		match = of_match_node(celleb_pci_workaround_match, node);
+ 
+ 		if (match) {
+diff --git a/arch/powerpc/platforms/celleb/iommu.c b/arch/powerpc/platforms/celleb/iommu.c
+index 755d869..93b0efd 100644
+--- a/arch/powerpc/platforms/celleb/iommu.c
++++ b/arch/powerpc/platforms/celleb/iommu.c
+@@ -22,8 +22,9 @@
+ #include <linux/init.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/pci.h>
++#include <linux/of_platform.h>
+ 
+-#include <asm/of_platform.h>
++#include <asm/machdep.h>
+ 
+ #include "beat_wrapper.h"
+ 
+@@ -51,6 +52,8 @@ static int __init find_dma_window(u64 *io_space_id, u64 *ioid,
+ 	return 0;
+ }
+ 
++static unsigned long celleb_dma_direct_offset;
++
+ static void __init celleb_init_direct_mapping(void)
+ {
+ 	u64 lpar_addr, io_addr;
+@@ -68,7 +71,18 @@ static void __init celleb_init_direct_mapping(void)
+ 				     ioid, DMA_FLAGS);
+ 	}
+ 
+-	dma_direct_offset = dma_base;
++	celleb_dma_direct_offset = dma_base;
++}
++
++static void celleb_dma_dev_setup(struct device *dev)
++{
++	dev->archdata.dma_ops = get_pci_dma_ops();
++	dev->archdata.dma_data = (void *)celleb_dma_direct_offset;
++}
++
++static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
++{
++	celleb_dma_dev_setup(&pdev->dev);
+ }
+ 
+ static int celleb_of_bus_notify(struct notifier_block *nb,
+@@ -80,7 +94,7 @@ static int celleb_of_bus_notify(struct notifier_block *nb,
+ 	if (action != BUS_NOTIFY_ADD_DEVICE)
+ 		return 0;
+ 
+-	dev->archdata.dma_ops = get_pci_dma_ops();
++	celleb_dma_dev_setup(dev);
+ 
+ 	return 0;
+ }
+@@ -91,14 +105,12 @@ static struct notifier_block celleb_of_bus_notifier = {
+ 
+ static int __init celleb_init_iommu(void)
+ {
+-	if (!machine_is(celleb))
+-		return -ENODEV;
+-
+ 	celleb_init_direct_mapping();
+ 	set_pci_dma_ops(&dma_direct_ops);
++	ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
+ 	bus_register_notifier(&of_platform_bus_type, &celleb_of_bus_notifier);
+ 
+ 	return 0;
+ }
+ 
+-arch_initcall(celleb_init_iommu);
++machine_arch_initcall(celleb_beat, celleb_init_iommu);
+diff --git a/arch/powerpc/platforms/celleb/pci.c b/arch/powerpc/platforms/celleb/pci.c
+index 6bc32fd..51b390d 100644
+--- a/arch/powerpc/platforms/celleb/pci.c
++++ b/arch/powerpc/platforms/celleb/pci.c
+@@ -31,6 +31,7 @@
+ #include <linux/init.h>
+ #include <linux/bootmem.h>
+ #include <linux/pci_regs.h>
++#include <linux/of.h>
+ #include <linux/of_device.h>
+ 
+ #include <asm/io.h>
+@@ -138,8 +139,6 @@ static void celleb_config_read_fake(unsigned char *config, int where,
+ 		*val = celleb_fake_config_readl(p);
+ 		break;
+ 	}
+-
+-	return;
+ }
+ 
+ static void celleb_config_write_fake(unsigned char *config, int where,
+@@ -158,7 +157,6 @@ static void celleb_config_write_fake(unsigned char *config, int where,
+ 		celleb_fake_config_writel(val, p);
+ 		break;
+ 	}
+-	return;
+ }
+ 
+ static int celleb_fake_pci_read_config(struct pci_bus *bus,
+@@ -351,6 +349,10 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
+ 	wi1 = of_get_property(node, "vendor-id", NULL);
+ 	wi2 = of_get_property(node, "class-code", NULL);
+ 	wi3 = of_get_property(node, "revision-id", NULL);
++	if (!wi0 || !wi1 || !wi2 || !wi3) {
++		printk(KERN_ERR "PCI: Missing device tree properties.\n");
++		goto error;
++	}
+ 
+ 	celleb_config_write_fake(*config, PCI_DEVICE_ID, 2, wi0[0] & 0xffff);
+ 	celleb_config_write_fake(*config, PCI_VENDOR_ID, 2, wi1[0] & 0xffff);
+@@ -372,6 +374,10 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
+ 	celleb_setup_pci_base_addrs(hose, devno, fn, num_base_addr);
+ 
+ 	li = of_get_property(node, "interrupts", &rlen);
++	if (!li) {
++		printk(KERN_ERR "PCI: interrupts not found.\n");
++		goto error;
++	}
+ 	val = li[0];
+ 	celleb_config_write_fake(*config, PCI_INTERRUPT_PIN, 1, 1);
+ 	celleb_config_write_fake(*config, PCI_INTERRUPT_LINE, 1, val);
+@@ -475,7 +481,7 @@ static struct of_device_id celleb_phb_match[] __initdata = {
+ 
+ int __init celleb_setup_phb(struct pci_controller *phb)
+ {
+-	struct device_node *dev = phb->arch_data;
++	struct device_node *dev = phb->dn;
+ 	const struct of_device_id *match;
+ 	int (*setup_func)(struct device_node *, struct pci_controller *);
+ 
+diff --git a/arch/powerpc/platforms/celleb/scc_epci.c b/arch/powerpc/platforms/celleb/scc_epci.c
+index 9d07642..a3c7cfb 100644
+--- a/arch/powerpc/platforms/celleb/scc_epci.c
++++ b/arch/powerpc/platforms/celleb/scc_epci.c
+@@ -95,7 +95,7 @@ void __init epci_workaround_init(struct pci_controller *hose)
+ 	private->dummy_page_da = dma_map_single(hose->parent,
+ 		celleb_dummy_page_va, PAGE_SIZE, DMA_FROM_DEVICE);
+ 	if (private->dummy_page_da == DMA_ERROR_CODE) {
+-		printk(KERN_ERR "EPCI: dummy read disabled."
++		printk(KERN_ERR "EPCI: dummy read disabled. "
+ 		       "Map dummy page failed.\n");
+ 		return;
+ 	}
+diff --git a/arch/powerpc/platforms/celleb/scc_uhc.c b/arch/powerpc/platforms/celleb/scc_uhc.c
+index b59c38a..cb43079 100644
+--- a/arch/powerpc/platforms/celleb/scc_uhc.c
++++ b/arch/powerpc/platforms/celleb/scc_uhc.c
+@@ -47,7 +47,8 @@ static void enable_scc_uhc(struct pci_dev *dev)
+ 	u32 val = 0;
+ 	int i;
+ 
+-	if (!machine_is(celleb))
++	if (!machine_is(celleb_beat) &&
++	    !machine_is(celleb_native))
+ 		return;
+ 
+ 	uhc_base = ioremap(pci_resource_start(dev, 0),
+diff --git a/arch/powerpc/platforms/celleb/setup.c b/arch/powerpc/platforms/celleb/setup.c
+index ddfb35a..f27ae1e 100644
+--- a/arch/powerpc/platforms/celleb/setup.c
++++ b/arch/powerpc/platforms/celleb/setup.c
+@@ -40,6 +40,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/root_dev.h>
+ #include <linux/console.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/mmu.h>
+ #include <asm/processor.h>
+@@ -52,12 +53,16 @@
+ #include <asm/time.h>
+ #include <asm/spu_priv1.h>
+ #include <asm/firmware.h>
+-#include <asm/of_platform.h>
++#include <asm/rtas.h>
++#include <asm/cell-regs.h>
+ 
+ #include "interrupt.h"
+ #include "beat_wrapper.h"
+ #include "beat.h"
+ #include "pci.h"
++#include "../cell/interrupt.h"
++#include "../cell/pervasive.h"
++#include "../cell/ras.h"
+ 
+ static char celleb_machine_type[128] = "Celleb";
+ 
+@@ -88,61 +93,122 @@ static void celleb_progress(char *s, unsigned short hex)
+ 	printk("*** %04x : %s\n", hex, s ? s : "");
+ }
+ 
+-static void __init celleb_setup_arch(void)
++static void __init celleb_setup_arch_common(void)
++{
++	/* init to some ~sane value until calibrate_delay() runs */
++	loops_per_jiffy = 50000000;
++
++#ifdef CONFIG_DUMMY_CONSOLE
++	conswitchp = &dummy_con;
++#endif
++}
++
++static struct of_device_id celleb_bus_ids[] __initdata = {
++	{ .type = "scc", },
++	{ .type = "ioif", },	/* old style */
++	{},
++};
++
++static int __init celleb_publish_devices(void)
++{
++	/* Publish OF platform devices for southbridge IOs */
++	of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
++
++	celleb_pci_workaround_init();
++
++	return 0;
++}
++machine_device_initcall(celleb_beat, celleb_publish_devices);
++machine_device_initcall(celleb_native, celleb_publish_devices);
++
++
++/*
++ * functions for Celleb-Beat
++ */
++static void __init celleb_setup_arch_beat(void)
+ {
+ #ifdef CONFIG_SPU_BASE
+-	spu_priv1_ops = &spu_priv1_beat_ops;
+-	spu_management_ops = &spu_management_of_ops;
++	spu_priv1_ops		= &spu_priv1_beat_ops;
++	spu_management_ops	= &spu_management_of_ops;
+ #endif
+ 
+ #ifdef CONFIG_SMP
+ 	smp_init_celleb();
+ #endif
+ 
+-	/* init to some ~sane value until calibrate_delay() runs */
+-	loops_per_jiffy = 50000000;
+-
+-#ifdef CONFIG_DUMMY_CONSOLE
+-	conswitchp = &dummy_con;
+-#endif
++	celleb_setup_arch_common();
+ }
+ 
+-static int __init celleb_probe(void)
++static int __init celleb_probe_beat(void)
+ {
+ 	unsigned long root = of_get_flat_dt_root();
+ 
+ 	if (!of_flat_dt_is_compatible(root, "Beat"))
+ 		return 0;
+ 
+-	powerpc_firmware_features |= FW_FEATURE_CELLEB_POSSIBLE;
++	powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
++		| FW_FEATURE_BEAT | FW_FEATURE_LPAR;
+ 	hpte_init_beat_v3();
++
+ 	return 1;
+ }
+ 
+-static struct of_device_id celleb_bus_ids[] __initdata = {
+-	{ .type = "scc", },
+-	{ .type = "ioif", },	/* old style */
+-	{},
+-};
+ 
+-static int __init celleb_publish_devices(void)
++/*
++ * functions for Celleb-native
++ */
++static void __init celleb_init_IRQ_native(void)
+ {
+-	if (!machine_is(celleb))
+-		return 0;
++	iic_init_IRQ();
++	spider_init_IRQ();
++}
+ 
+-	/* Publish OF platform devices for southbridge IOs */
+-	of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
++static void __init celleb_setup_arch_native(void)
++{
++#ifdef CONFIG_SPU_BASE
++	spu_priv1_ops		= &spu_priv1_mmio_ops;
++	spu_management_ops	= &spu_management_of_ops;
++#endif
+ 
+-	celleb_pci_workaround_init();
++	cbe_regs_init();
+ 
+-	return 0;
++#ifdef CONFIG_CBE_RAS
++	cbe_ras_init();
++#endif
++
++#ifdef CONFIG_SMP
++	smp_init_cell();
++#endif
++
++	cbe_pervasive_init();
++
++	/* XXX: nvram initialization should be added */
++
++	celleb_setup_arch_common();
+ }
+-device_initcall(celleb_publish_devices);
+ 
+-define_machine(celleb) {
+-	.name			= "Cell Reference Set",
+-	.probe			= celleb_probe,
+-	.setup_arch		= celleb_setup_arch,
++static int __init celleb_probe_native(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	if (of_flat_dt_is_compatible(root, "Beat") ||
++	    !of_flat_dt_is_compatible(root, "TOSHIBA,Celleb"))
++		return 0;
++
++	powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
++	hpte_init_native();
++
++	return 1;
++}
++
++
++/*
++ * machine definitions
++ */
++define_machine(celleb_beat) {
++	.name			= "Cell Reference Set (Beat)",
++	.probe			= celleb_probe_beat,
++	.setup_arch		= celleb_setup_arch_beat,
+ 	.show_cpuinfo		= celleb_show_cpuinfo,
+ 	.restart		= beat_restart,
+ 	.power_off		= beat_power_off,
+@@ -167,3 +233,26 @@ define_machine(celleb) {
+ 	.machine_crash_shutdown	= default_machine_crash_shutdown,
+ #endif
+ };
++
++define_machine(celleb_native) {
++	.name			= "Cell Reference Set (native)",
++	.probe			= celleb_probe_native,
++	.setup_arch		= celleb_setup_arch_native,
++	.show_cpuinfo		= celleb_show_cpuinfo,
++	.restart		= rtas_restart,
++	.power_off		= rtas_power_off,
++	.halt			= rtas_halt,
++	.get_boot_time		= rtas_get_boot_time,
++	.get_rtc_time		= rtas_get_rtc_time,
++	.set_rtc_time		= rtas_set_rtc_time,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= celleb_progress,
++	.pci_probe_mode 	= celleb_pci_probe_mode,
++	.pci_setup_phb		= celleb_setup_phb,
++	.init_IRQ		= celleb_init_IRQ_native,
++#ifdef CONFIG_KEXEC
++	.machine_kexec		= default_machine_kexec,
++	.machine_kexec_prepare	= default_machine_kexec_prepare,
++	.machine_crash_shutdown	= default_machine_crash_shutdown,
++#endif
++};
+diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
+index 0340a34..609c46d 100644
+--- a/arch/powerpc/platforms/chrp/pci.c
++++ b/arch/powerpc/platforms/chrp/pci.c
+@@ -198,7 +198,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
+ 		printk ("RTAS supporting Pegasos OF not found, please upgrade"
+ 			" your firmware\n");
+ 	}
+-	pci_assign_all_buses = 1;
++	ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ 	/* keep the reference to the root node */
+ }
+ 
+@@ -354,7 +354,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
+  * mode as well. The same fixup must be done to the class-code property in
+  * the IDE node /pci at 80000000/ide at C,1
+  */
+-static void __devinit chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide)
++static void chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide)
+ {
+ 	u8 progif;
+ 	struct pci_dev *viaisa;
+@@ -375,4 +375,4 @@ static void __devinit chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide)
+ 
+ 	pci_dev_put(viaisa);
+ }
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, chrp_pci_fixup_vt8231_ata);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, chrp_pci_fixup_vt8231_ata);
+diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
+index 5930626..116babb 100644
+--- a/arch/powerpc/platforms/chrp/setup.c
++++ b/arch/powerpc/platforms/chrp/setup.c
+@@ -115,7 +115,7 @@ void chrp_show_cpuinfo(struct seq_file *m)
+ 	seq_printf(m, "machine\t\t: CHRP %s\n", model);
+ 
+ 	/* longtrail (goldengate) stuff */
+-	if (!strncmp(model, "IBM,LongTrail", 13)) {
++	if (model && !strncmp(model, "IBM,LongTrail", 13)) {
+ 		/* VLSI VAS96011/12 `Golden Gate 2' */
+ 		/* Memory banks */
+ 		sdramen = (in_le32(gg2_pci_config_base + GG2_PCI_DRAM_CTRL)
+@@ -203,15 +203,20 @@ static void __init sio_fixup_irq(const char *name, u8 device, u8 level,
+ static void __init sio_init(void)
+ {
+ 	struct device_node *root;
++	const char *model;
+ 
+-	if ((root = of_find_node_by_path("/")) &&
+-	    !strncmp(of_get_property(root, "model", NULL),
+-			"IBM,LongTrail", 13)) {
++	root = of_find_node_by_path("/");
++	if (!root)
++		return;
++
++	model = of_get_property(root, "model", NULL);
++	if (model && !strncmp(model, "IBM,LongTrail", 13)) {
+ 		/* logical device 0 (KBC/Keyboard) */
+ 		sio_fixup_irq("keyboard", 0, 1, 2);
+ 		/* select logical device 1 (KBC/Mouse) */
+ 		sio_fixup_irq("mouse", 1, 12, 2);
+ 	}
++
+ 	of_node_put(root);
+ }
+ 
+@@ -251,6 +256,57 @@ static void briq_restart(char *cmd)
+ 	for(;;);
+ }
+ 
++/*
++ * Per default, input/output-device points to the keyboard/screen
++ * If no card is installed, the built-in serial port is used as a fallback.
++ * But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
++ * the the built-in serial node. Instead, a /failsafe node is created.
++ */
++static void chrp_init_early(void)
++{
++	struct device_node *node;
++	const char *property;
++
++	if (strstr(cmd_line, "console="))
++		return;
++	/* find the boot console from /chosen/stdout */
++	if (!of_chosen)
++		return;
++	node = of_find_node_by_path("/");
++	if (!node)
++		return;
++	property = of_get_property(node, "model", NULL);
++	if (!property)
++		goto out_put;
++	if (strcmp(property, "Pegasos2"))
++		goto out_put;
++	/* this is a Pegasos2 */
++	property = of_get_property(of_chosen, "linux,stdout-path", NULL);
++	if (!property)
++		goto out_put;
++	of_node_put(node);
++	node = of_find_node_by_path(property);
++	if (!node)
++		return;
++	property = of_get_property(node, "device_type", NULL);
++	if (!property)
++		goto out_put;
++	if (strcmp(property, "serial"))
++		goto out_put;
++	/*
++	 * The 9pin connector is either /failsafe
++	 * or /pci at 80000000/isa at C/serial at i2F8
++	 * The optional graphics card has also type 'serial' in VGA mode.
++	 */
++	property = of_get_property(node, "name", NULL);
++	if (!property)
++		goto out_put;
++	if (!strcmp(property, "failsafe") || !strcmp(property, "serial"))
++		add_preferred_console("ttyS", 0, NULL);
++out_put:
++	of_node_put(node);
++}
++
+ void __init chrp_setup_arch(void)
+ {
+ 	struct device_node *root = of_find_node_by_path("/");
+@@ -594,6 +650,7 @@ define_machine(chrp) {
+ 	.probe			= chrp_probe,
+ 	.setup_arch		= chrp_setup_arch,
+ 	.init			= chrp_init2,
++	.init_early		= chrp_init_early,
+ 	.show_cpuinfo		= chrp_show_cpuinfo,
+ 	.init_IRQ		= chrp_init_IRQ,
+ 	.restart		= rtas_restart,
+diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
+index 8924095..6c80837 100644
+--- a/arch/powerpc/platforms/embedded6xx/Kconfig
++++ b/arch/powerpc/platforms/embedded6xx/Kconfig
+@@ -9,6 +9,8 @@ config LINKSTATION
+ 	select FSL_SOC
+ 	select PPC_UDBG_16550 if SERIAL_8250
+ 	select DEFAULT_UIMAGE
++	select MPC10X_OPENPIC
++	select MPC10X_BRIDGE
+ 	help
+ 	  Select LINKSTATION if configuring for one of PPC- (MPC8241)
+ 	  based NAS systems from Buffalo Technology. So far only
+@@ -16,6 +18,19 @@ config LINKSTATION
+ 	  Linkstation-I HD-HLAN and HD-HGLAN versions, and PPC-based
+ 	  Terastation systems should be supported too.
+ 
++config STORCENTER
++	bool "IOMEGA StorCenter"
++	depends on EMBEDDED6xx
++	select MPIC
++	select FSL_SOC
++	select PPC_UDBG_16550 if SERIAL_8250
++	select WANT_DEVICE_TREE
++	select MPC10X_OPENPIC
++	select MPC10X_BRIDGE
++	help
++	  Select STORCENTER if configuring for the iomega StorCenter
++	  with an 8241 CPU in it.
++
+ config MPC7448HPC2
+ 	bool "Freescale MPC7448HPC2(Taiga)"
+ 	depends on EMBEDDED6xx
+@@ -23,6 +38,7 @@ config MPC7448HPC2
+ 	select DEFAULT_UIMAGE
+ 	select PPC_UDBG_16550
+ 	select WANT_DEVICE_TREE
++	select TSI108_BRIDGE
+ 	help
+ 	  Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga)
+ 	  platform
+@@ -33,6 +49,7 @@ config PPC_HOLLY
+ 	select TSI108_BRIDGE
+ 	select PPC_UDBG_16550
+ 	select WANT_DEVICE_TREE
++	select TSI108_BRIDGE
+ 	help
+ 	  Select PPC_HOLLY if configuring for an IBM 750GX/CL Eval
+ 	  Board with TSI108/9 bridge (Hickory/Holly)
+@@ -48,17 +65,13 @@ config PPC_PRPMC2800
+ 
+ config TSI108_BRIDGE
+ 	bool
+-	depends on MPC7448HPC2 || PPC_HOLLY
+ 	select PCI
+ 	select MPIC
+ 	select MPIC_WEIRD
+-	default y
+ 
+ config MPC10X_BRIDGE
+ 	bool
+-	depends on LINKSTATION
+ 	select PPC_INDIRECT_PCI
+-	default y
+ 
+ config MV64X60
+ 	bool
+@@ -67,8 +80,6 @@ config MV64X60
+ 
+ config MPC10X_OPENPIC
+ 	bool
+-	depends on LINKSTATION
+-	default y
+ 
+ config MPC10X_STORE_GATHERING
+ 	bool "Enable MPC10x store gathering"
+diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
+index 844947c..06524d3 100644
+--- a/arch/powerpc/platforms/embedded6xx/Makefile
++++ b/arch/powerpc/platforms/embedded6xx/Makefile
+@@ -3,5 +3,6 @@
+ #
+ obj-$(CONFIG_MPC7448HPC2)	+= mpc7448_hpc2.o
+ obj-$(CONFIG_LINKSTATION)	+= linkstation.o ls_uart.o
++obj-$(CONFIG_STORCENTER)	+= storcenter.o
+ obj-$(CONFIG_PPC_HOLLY)		+= holly.o
+ obj-$(CONFIG_PPC_PRPMC2800)	+= prpmc2800.o
+diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
+index b6de2b5..b21fde5 100644
+--- a/arch/powerpc/platforms/embedded6xx/holly.c
++++ b/arch/powerpc/platforms/embedded6xx/holly.c
+@@ -20,12 +20,12 @@
+ #include <linux/console.h>
+ #include <linux/delay.h>
+ #include <linux/irq.h>
+-#include <linux/ide.h>
+ #include <linux/seq_file.h>
+ #include <linux/root_dev.h>
+ #include <linux/serial.h>
+ #include <linux/tty.h>
+ #include <linux/serial_core.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/system.h>
+ #include <asm/time.h>
+@@ -39,7 +39,6 @@
+ #include <asm/tsi108_irq.h>
+ #include <asm/tsi108_pci.h>
+ #include <asm/mpic.h>
+-#include <asm/of_platform.h>
+ 
+ #undef DEBUG
+ 
+diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
+index c99264c..9d891bd 100644
+--- a/arch/powerpc/platforms/embedded6xx/ls_uart.c
++++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
+@@ -117,9 +117,6 @@ static int __init ls_uarts_init(void)
+ 	phys_addr_t phys_addr;
+ 	int len;
+ 
+-	if (!machine_is(linkstation))
+-		return 0;
+-
+ 	avr = of_find_node_by_path("/soc10x/serial at 80004500");
+ 	if (!avr)
+ 		return -EINVAL;
+@@ -142,4 +139,4 @@ static int __init ls_uarts_init(void)
+ 	return 0;
+ }
+ 
+-late_initcall(ls_uarts_init);
++machine_late_initcall(linkstation, ls_uarts_init);
+diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+index a2c04b9..d4f8bf5 100644
+--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
++++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+@@ -53,8 +53,6 @@
+ 
+ #define MPC7448HPC2_PCI_CFG_PHYS 0xfb000000
+ 
+-extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
+-
+ int mpc7448_hpc2_exclude_device(struct pci_controller *hose,
+ 				u_char bus, u_char devfn)
+ {
+diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
+new file mode 100644
+index 0000000..e12e9d2
+--- /dev/null
++++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
+@@ -0,0 +1,192 @@
++/*
++ * Board setup routines for the storcenter
++ *
++ * Copyright 2007 (C) Oyvind Repvik (nail at nslu2-linux.org)
++ * Copyright 2007 Andy Wilcox, Jon Loeliger
++ *
++ * Based on linkstation.c by G. Liakhovetski
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2.  This program is licensed "as is" without any warranty of
++ * any kind, whether express or implied.
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/initrd.h>
++#include <linux/mtd/physmap.h>
++#include <linux/of_platform.h>
++
++#include <asm/system.h>
++#include <asm/time.h>
++#include <asm/prom.h>
++#include <asm/mpic.h>
++#include <asm/pci-bridge.h>
++
++#include "mpc10x.h"
++
++
++#ifdef CONFIG_MTD_PHYSMAP
++static struct mtd_partition storcenter_physmap_partitions[] = {
++	{
++		.name   = "kernel",
++		.offset = 0x000000,
++		.size   = 0x170000,
++	},
++	{
++		.name   = "rootfs",
++		.offset = 0x170000,
++		.size   = 0x590000,
++	},
++	{
++		.name   = "uboot",
++		.offset = 0x700000,
++		.size   = 0x040000,
++	},
++	{
++		.name   = "config",
++		.offset = 0x740000,
++		.size   = 0x0c0000,
++	},
++};
++#endif
++
++
++static __initdata struct of_device_id storcenter_of_bus[] = {
++	{ .name = "soc", },
++	{},
++};
++
++static int __init storcenter_device_probe(void)
++{
++	of_platform_bus_probe(NULL, storcenter_of_bus, NULL);
++	return 0;
++}
++machine_device_initcall(storcenter, storcenter_device_probe);
++
++
++static int __init storcenter_add_bridge(struct device_node *dev)
++{
++#ifdef CONFIG_PCI
++	int len;
++	struct pci_controller *hose;
++	const int *bus_range;
++
++	printk("Adding PCI host bridge %s\n", dev->full_name);
++
++	hose = pcibios_alloc_controller(dev);
++	if (hose == NULL)
++		return -ENOMEM;
++
++	bus_range = of_get_property(dev, "bus-range", &len);
++	hose->first_busno = bus_range ? bus_range[0] : 0;
++	hose->last_busno = bus_range ? bus_range[1] : 0xff;
++
++	setup_indirect_pci(hose, MPC10X_MAPB_CNFG_ADDR, MPC10X_MAPB_CNFG_DATA, 0);
++
++	/* Interpret the "ranges" property */
++	/* This also maps the I/O region and sets isa_io/mem_base */
++	pci_process_bridge_OF_ranges(hose, dev, 1);
++#endif
++
++	return 0;
++}
++
++static void __init storcenter_setup_arch(void)
++{
++	struct device_node *np;
++
++#ifdef CONFIG_MTD_PHYSMAP
++	physmap_set_partitions(storcenter_physmap_partitions,
++			       ARRAY_SIZE(storcenter_physmap_partitions));
++#endif
++
++	/* Lookup PCI host bridges */
++	for_each_compatible_node(np, "pci", "mpc10x-pci")
++		storcenter_add_bridge(np);
++
++	printk(KERN_INFO "IOMEGA StorCenter\n");
++}
++
++/*
++ * Interrupt setup and service.  Interrrupts on the turbostation come
++ * from the four PCI slots plus onboard 8241 devices: I2C, DUART.
++ */
++static void __init storcenter_init_IRQ(void)
++{
++	struct mpic *mpic;
++	struct device_node *dnp;
++	const void *prop;
++	int size;
++	phys_addr_t paddr;
++
++	dnp = of_find_node_by_type(NULL, "open-pic");
++	if (dnp == NULL)
++		return;
++
++	prop = of_get_property(dnp, "reg", &size);
++	if (prop == NULL) {
++		of_node_put(dnp);
++		return;
++	}
++
++	paddr = (phys_addr_t)of_translate_address(dnp, prop);
++	mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET,
++			4, 32, " EPIC     ");
++
++	of_node_put(dnp);
++
++	BUG_ON(mpic == NULL);
++
++	/* PCI IRQs */
++	/*
++	 * 2.6.12 patch:
++	 *         openpic_set_sources(0, 5, OpenPIC_Addr + 0x10200);
++	 *         openpic_set_sources(5, 2, OpenPIC_Addr + 0x11120);
++	 *         first_irq, num_irqs, __iomem first_ISR
++	 *         o_ss: i, src: 0, fdf50200
++	 *         o_ss: i, src: 1, fdf50220
++	 *         o_ss: i, src: 2, fdf50240
++	 *         o_ss: i, src: 3, fdf50260
++	 *         o_ss: i, src: 4, fdf50280
++	 *         o_ss: i, src: 5, fdf51120
++	 *         o_ss: i, src: 6, fdf51140
++	 */
++	mpic_assign_isu(mpic, 0, paddr + 0x10200);
++	mpic_assign_isu(mpic, 1, paddr + 0x10220);
++	mpic_assign_isu(mpic, 2, paddr + 0x10240);
++	mpic_assign_isu(mpic, 3, paddr + 0x10260);
++	mpic_assign_isu(mpic, 4, paddr + 0x10280);
++	mpic_assign_isu(mpic, 5, paddr + 0x11120);
++	mpic_assign_isu(mpic, 6, paddr + 0x11140);
++
++	mpic_init(mpic);
++}
++
++static void storcenter_restart(char *cmd)
++{
++	local_irq_disable();
++
++	/* Set exception prefix high - to the firmware */
++	_nmask_and_or_msr(0, MSR_IP);
++
++	/* Wait for reset to happen */
++	for (;;) ;
++}
++
++static int __init storcenter_probe(void)
++{
++	unsigned long root = of_get_flat_dt_root();
++
++	return of_flat_dt_is_compatible(root, "storcenter");
++}
++
++define_machine(storcenter){
++	.name 			= "IOMEGA StorCenter",
++	.probe 			= storcenter_probe,
++	.setup_arch 		= storcenter_setup_arch,
++	.init_IRQ 		= storcenter_init_IRQ,
++	.get_irq 		= mpic_get_irq,
++	.restart 		= storcenter_restart,
++	.calibrate_decr 	= generic_calibrate_decr,
++};
+diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
+index a65f1b4..cc7161f 100644
+--- a/arch/powerpc/platforms/iseries/Makefile
++++ b/arch/powerpc/platforms/iseries/Makefile
+@@ -5,7 +5,7 @@ extra-y += dt.o
+ obj-y += exception.o
+ obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt_mod.o mf.o lpevents.o \
+ 	hvcall.o proc.o htab.o iommu.o misc.o irq.o
+-obj-$(CONFIG_PCI) += pci.o vpdinfo.o
++obj-$(CONFIG_PCI) += pci.o
+ obj-$(CONFIG_SMP) += smp.o
+ obj-$(CONFIG_VIOPATH) += viopath.o vio.o
+ obj-$(CONFIG_MODULES) += ksyms.o
+diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
+index 49e9c66..6a0c6f6 100644
+--- a/arch/powerpc/platforms/iseries/iommu.c
++++ b/arch/powerpc/platforms/iseries/iommu.c
+@@ -163,8 +163,10 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
+ 		    (it->it_type == TCE_PCI) &&
+ 		    (it->it_offset == tbl->it_offset) &&
+ 		    (it->it_index == tbl->it_index) &&
+-		    (it->it_size == tbl->it_size))
++		    (it->it_size == tbl->it_size)) {
++			of_node_put(node);
+ 			return it;
++		}
+ 	}
+ 	return NULL;
+ }
+diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
+index 275f494..e5b40e3 100644
+--- a/arch/powerpc/platforms/iseries/lpevents.c
++++ b/arch/powerpc/platforms/iseries/lpevents.c
+@@ -239,7 +239,7 @@ int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
+ 			 * other CPUs, and that the deleted handler isn't
+ 			 * still running on another CPU when we return.
+ 			 */
+-			synchronize_rcu();
++			synchronize_sched();
+ 			return 0;
+ 		}
+ 	}
+diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
+index da87162..cc562e4 100644
+--- a/arch/powerpc/platforms/iseries/pci.c
++++ b/arch/powerpc/platforms/iseries/pci.c
+@@ -1,5 +1,6 @@
+ /*
+  * Copyright (C) 2001 Allan Trautman, IBM Corporation
++ * Copyright (C) 2005,2007  Stephen Rothwell, IBM Corp
+  *
+  * iSeries specific routines for PCI.
+  *
+@@ -19,13 +20,18 @@
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+  */
++
++#undef DEBUG
++
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+ #include <linux/string.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
++#include <linux/of.h>
+ 
++#include <asm/types.h>
+ #include <asm/io.h>
+ #include <asm/irq.h>
+ #include <asm/prom.h>
+@@ -35,6 +41,7 @@
+ #include <asm/abs_addr.h>
+ #include <asm/firmware.h>
+ 
++#include <asm/iseries/hv_types.h>
+ #include <asm/iseries/hv_call_xm.h>
+ #include <asm/iseries/mf.h>
+ #include <asm/iseries/iommu.h>
+@@ -45,15 +52,8 @@
+ #include "pci.h"
+ #include "call_pci.h"
+ 
+-/*
+- * Forward declares of prototypes.
+- */
+-static struct device_node *find_Device_Node(int bus, int devfn);
+-
+-static int Pci_Retry_Max = 3;	/* Only retry 3 times  */
+-static int Pci_Error_Flag = 1;	/* Set Retry Error on. */
+-
+-static struct pci_ops iSeries_pci_ops;
++#define PCI_RETRY_MAX	3
++static int limit_pci_retries = 1;	/* Set Retry Error on. */
+ 
+ /*
+  * Table defines
+@@ -62,6 +62,7 @@ static struct pci_ops iSeries_pci_ops;
+ #define IOMM_TABLE_MAX_ENTRIES	1024
+ #define IOMM_TABLE_ENTRY_SIZE	0x0000000000400000UL
+ #define BASE_IO_MEMORY		0xE000000000000000UL
++#define END_IO_MEMORY		0xEFFFFFFFFFFFFFFFUL
+ 
+ static unsigned long max_io_memory = BASE_IO_MEMORY;
+ static long current_iomm_table_entry;
+@@ -70,12 +71,237 @@ static long current_iomm_table_entry;
+  * Lookup Tables.
+  */
+ static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
+-static u8 iobar_table[IOMM_TABLE_MAX_ENTRIES];
++static u64 ds_addr_table[IOMM_TABLE_MAX_ENTRIES];
+ 
+-static const char pci_io_text[] = "iSeries PCI I/O";
+ static DEFINE_SPINLOCK(iomm_table_lock);
+ 
+ /*
++ * Generate a Direct Select Address for the Hypervisor
++ */
++static inline u64 iseries_ds_addr(struct device_node *node)
++{
++	struct pci_dn *pdn = PCI_DN(node);
++	const u32 *sbp = of_get_property(node, "linux,subbus", NULL);
++
++	return ((u64)pdn->busno << 48) + ((u64)(sbp ? *sbp : 0) << 40)
++			+ ((u64)0x10 << 32);
++}
++
++/*
++ * Size of Bus VPD data
++ */
++#define BUS_VPDSIZE      1024
++
++/*
++ * Bus Vpd Tags
++ */
++#define VPD_END_OF_AREA		0x79
++#define VPD_ID_STRING		0x82
++#define VPD_VENDOR_AREA		0x84
++
++/*
++ * Mfg Area Tags
++ */
++#define VPD_FRU_FRAME_ID	0x4649	/* "FI" */
++#define VPD_SLOT_MAP_FORMAT	0x4D46	/* "MF" */
++#define VPD_SLOT_MAP		0x534D	/* "SM" */
++
++/*
++ * Structures of the areas
++ */
++struct mfg_vpd_area {
++	u16	tag;
++	u8	length;
++	u8	data1;
++	u8	data2;
++};
++#define MFG_ENTRY_SIZE   3
++
++struct slot_map {
++	u8	agent;
++	u8	secondary_agent;
++	u8	phb;
++	char	card_location[3];
++	char	parms[8];
++	char	reserved[2];
++};
++#define SLOT_ENTRY_SIZE   16
++
++/*
++ * Parse the Slot Area
++ */
++static void __init iseries_parse_slot_area(struct slot_map *map, int len,
++		HvAgentId agent, u8 *phb, char card[4])
++{
++	/*
++	 * Parse Slot label until we find the one requested
++	 */
++	while (len > 0) {
++		if (map->agent == agent) {
++			/*
++			 * If Phb wasn't found, grab the entry first one found.
++			 */
++			if (*phb == 0xff)
++				*phb = map->phb;
++			/* Found it, extract the data. */
++			if (map->phb == *phb) {
++				memcpy(card, &map->card_location, 3);
++				card[3]  = 0;
++				break;
++			}
++		}
++		/* Point to the next Slot */
++		map = (struct slot_map *)((char *)map + SLOT_ENTRY_SIZE);
++		len -= SLOT_ENTRY_SIZE;
++	}
++}
++
++/*
++ * Parse the Mfg Area
++ */
++static void __init iseries_parse_mfg_area(struct mfg_vpd_area *area, int len,
++		HvAgentId agent, u8 *phb, u8 *frame, char card[4])
++{
++	u16 slot_map_fmt = 0;
++
++	/* Parse Mfg Data */
++	while (len > 0) {
++		int mfg_tag_len = area->length;
++		/* Frame ID         (FI 4649020310 ) */
++		if (area->tag == VPD_FRU_FRAME_ID)
++			*frame = area->data1;
++		/* Slot Map Format  (MF 4D46020004 ) */
++		else if (area->tag == VPD_SLOT_MAP_FORMAT)
++			slot_map_fmt = (area->data1 * 256)
++				+ area->data2;
++		/* Slot Map         (SM 534D90 */
++		else if (area->tag == VPD_SLOT_MAP) {
++			struct slot_map *slot_map;
++
++			if (slot_map_fmt == 0x1004)
++				slot_map = (struct slot_map *)((char *)area
++						+ MFG_ENTRY_SIZE + 1);
++			else
++				slot_map = (struct slot_map *)((char *)area
++						+ MFG_ENTRY_SIZE);
++			iseries_parse_slot_area(slot_map, mfg_tag_len,
++					agent, phb, card);
++		}
++		/*
++		 * Point to the next Mfg Area
++		 * Use defined size, sizeof give wrong answer
++		 */
++		area = (struct mfg_vpd_area *)((char *)area + mfg_tag_len
++				+ MFG_ENTRY_SIZE);
++		len -= (mfg_tag_len + MFG_ENTRY_SIZE);
++	}
++}
++
++/*
++ * Look for "BUS".. Data is not Null terminated.
++ * PHBID of 0xFF indicates PHB was not found in VPD Data.
++ */
++static u8 __init iseries_parse_phbid(u8 *area, int len)
++{
++	while (len > 0) {
++		if ((*area == 'B') && (*(area + 1) == 'U')
++				&& (*(area + 2) == 'S')) {
++			area += 3;
++			while (*area == ' ')
++				area++;
++			return *area & 0x0F;
++		}
++		area++;
++		len--;
++	}
++	return 0xff;
++}
++
++/*
++ * Parse out the VPD Areas
++ */
++static void __init iseries_parse_vpd(u8 *data, int data_len,
++		HvAgentId agent, u8 *frame, char card[4])
++{
++	u8 phb = 0xff;
++
++	while (data_len > 0) {
++		int len;
++		u8 tag = *data;
++
++		if (tag == VPD_END_OF_AREA)
++			break;
++		len = *(data + 1) + (*(data + 2) * 256);
++		data += 3;
++		data_len -= 3;
++		if (tag == VPD_ID_STRING)
++			phb = iseries_parse_phbid(data, len);
++		else if (tag == VPD_VENDOR_AREA)
++			iseries_parse_mfg_area((struct mfg_vpd_area *)data, len,
++					agent, &phb, frame, card);
++		/* Point to next Area. */
++		data += len;
++		data_len -= len;
++	}
++}
++
++static int __init iseries_get_location_code(u16 bus, HvAgentId agent,
++		u8 *frame, char card[4])
++{
++	int status = 0;
++	int bus_vpd_len = 0;
++	u8 *bus_vpd = kmalloc(BUS_VPDSIZE, GFP_KERNEL);
++
++	if (bus_vpd == NULL) {
++		printk("PCI: Bus VPD Buffer allocation failure.\n");
++		return 0;
++	}
++	bus_vpd_len = HvCallPci_getBusVpd(bus, iseries_hv_addr(bus_vpd),
++					BUS_VPDSIZE);
++	if (bus_vpd_len == 0) {
++		printk("PCI: Bus VPD Buffer zero length.\n");
++		goto out_free;
++	}
++	/* printk("PCI: bus_vpd: %p, %d\n",bus_vpd, bus_vpd_len); */
++	/* Make sure this is what I think it is */
++	if (*bus_vpd != VPD_ID_STRING) {
++		printk("PCI: Bus VPD Buffer missing starting tag.\n");
++		goto out_free;
++	}
++	iseries_parse_vpd(bus_vpd, bus_vpd_len, agent, frame, card);
++	status = 1;
++out_free:
++	kfree(bus_vpd);
++	return status;
++}
++
++/*
++ * Prints the device information.
++ * - Pass in pci_dev* pointer to the device.
++ * - Pass in the device count
++ *
++ * Format:
++ * PCI: Bus  0, Device 26, Vendor 0x12AE  Frame  1, Card  C10  Ethernet
++ * controller
++ */
++static void __init iseries_device_information(struct pci_dev *pdev,
++					      u16 bus, HvSubBusNumber subbus)
++{
++	u8 frame = 0;
++	char card[4];
++	HvAgentId agent;
++
++	agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
++			ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
++
++	if (iseries_get_location_code(bus, agent, &frame, card)) {
++		printk(KERN_INFO "PCI: %s, Vendor %04X Frame%3d, "
++		       "Card %4s  0x%04X\n", pci_name(pdev), pdev->vendor,
++		       frame, card, (int)(pdev->class >> 8));
++	}
++}
++
++/*
+  * iomm_table_allocate_entry
+  *
+  * Adds pci_dev entry in address translation table
+@@ -87,7 +313,7 @@ static DEFINE_SPINLOCK(iomm_table_lock);
+  * - CurrentIndex is incremented to keep track of the last entry.
+  * - Builds the resource entry for allocated BARs.
+  */
+-static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
++static void __init iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
+ {
+ 	struct resource *bar_res = &dev->resource[bar_num];
+ 	long bar_size = pci_resource_len(dev, bar_num);
+@@ -101,7 +327,6 @@ static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
+ 	 * Set Resource values.
+ 	 */
+ 	spin_lock(&iomm_table_lock);
+-	bar_res->name = pci_io_text;
+ 	bar_res->start = BASE_IO_MEMORY +
+ 		IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
+ 	bar_res->end = bar_res->start + bar_size - 1;
+@@ -110,7 +335,8 @@ static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
+ 	 */
+ 	while (bar_size > 0 ) {
+ 		iomm_table[current_iomm_table_entry] = dev->sysdata;
+-		iobar_table[current_iomm_table_entry] = bar_num;
++		ds_addr_table[current_iomm_table_entry] =
++			iseries_ds_addr(dev->sysdata) | (bar_num << 24);
+ 		bar_size -= IOMM_TABLE_ENTRY_SIZE;
+ 		++current_iomm_table_entry;
+ 	}
+@@ -130,7 +356,7 @@ static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
+  * - Loops through The Bar resources(0 - 5) including the ROM
+  *   is resource(6).
+  */
+-static void allocate_device_bars(struct pci_dev *dev)
++static void __init allocate_device_bars(struct pci_dev *dev)
+ {
+ 	int bar_num;
+ 
+@@ -145,79 +371,19 @@ static void allocate_device_bars(struct pci_dev *dev)
+  * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
+  * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
+  */
+-static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
+-		int AgentId, int HvRc)
++static void pci_log_error(char *error, int bus, int subbus,
++		int agent, int hv_res)
+ {
+-	if (HvRc == 0x0302)
++	if (hv_res == 0x0302)
+ 		return;
+ 	printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
+-	       Error_Text, Bus, SubBus, AgentId, HvRc);
+-}
+-
+-/*
+- * iSeries_pci_final_fixup(void)
+- */
+-void __init iSeries_pci_final_fixup(void)
+-{
+-	struct pci_dev *pdev = NULL;
+-	struct device_node *node;
+-	int DeviceCount = 0;
+-
+-	/* Fix up at the device node and pci_dev relationship */
+-	mf_display_src(0xC9000100);
+-
+-	printk("pcibios_final_fixup\n");
+-	for_each_pci_dev(pdev) {
+-		node = find_Device_Node(pdev->bus->number, pdev->devfn);
+-		printk("pci dev %p (%x.%x), node %p\n", pdev,
+-		       pdev->bus->number, pdev->devfn, node);
+-
+-		if (node != NULL) {
+-			struct pci_dn *pdn = PCI_DN(node);
+-			const u32 *agent;
+-
+-			agent = of_get_property(node, "linux,agent-id", NULL);
+-			if ((pdn != NULL) && (agent != NULL)) {
+-				u8 irq = iSeries_allocate_IRQ(pdn->busno, 0,
+-						pdn->bussubno);
+-				int err;
+-
+-				err = HvCallXm_connectBusUnit(pdn->busno, pdn->bussubno,
+-						*agent, irq);
+-				if (err)
+-					pci_Log_Error("Connect Bus Unit",
+-						pdn->busno, pdn->bussubno, *agent, err);
+-				else {
+-					err = HvCallPci_configStore8(pdn->busno, pdn->bussubno,
+-							*agent,
+-							PCI_INTERRUPT_LINE,
+-							irq);
+-					if (err)
+-						pci_Log_Error("PciCfgStore Irq Failed!",
+-							pdn->busno, pdn->bussubno, *agent, err);
+-				}
+-				if (!err)
+-					pdev->irq = irq;
+-			}
+-
+-			++DeviceCount;
+-			pdev->sysdata = (void *)node;
+-			PCI_DN(node)->pcidev = pdev;
+-			allocate_device_bars(pdev);
+-			iSeries_Device_Information(pdev, DeviceCount);
+-			iommu_devnode_init_iSeries(pdev, node);
+-		} else
+-			printk("PCI: Device Tree not found for 0x%016lX\n",
+-					(unsigned long)pdev);
+-	}
+-	iSeries_activate_IRQs();
+-	mf_display_src(0xC9000200);
++	       error, bus, subbus, agent, hv_res);
+ }
+ 
+ /*
+  * Look down the chain to find the matching Device Device
+  */
+-static struct device_node *find_Device_Node(int bus, int devfn)
++static struct device_node *find_device_node(int bus, int devfn)
+ {
+ 	struct device_node *node;
+ 
+@@ -230,22 +396,66 @@ static struct device_node *find_Device_Node(int bus, int devfn)
+ 	return NULL;
+ }
+ 
+-#if 0
+ /*
+- * Returns the device node for the passed pci_dev
+- * Sanity Check Node PciDev to passed pci_dev
+- * If none is found, returns a NULL which the client must handle.
++ * iSeries_pcibios_fixup_resources
++ *
++ * Fixes up all resources for devices
+  */
+-static struct device_node *get_Device_Node(struct pci_dev *pdev)
++void __init iSeries_pcibios_fixup_resources(struct pci_dev *pdev)
+ {
++	const u32 *agent;
++	const u32 *sub_bus;
++	unsigned char bus = pdev->bus->number;
+ 	struct device_node *node;
++	int i;
++
++	node = find_device_node(bus, pdev->devfn);
++	pr_debug("PCI: iSeries %s, pdev %p, node %p\n",
++		 pci_name(pdev), pdev, node);
++	if (!node) {
++		printk("PCI: %s disabled, device tree entry not found !\n",
++		       pci_name(pdev));
++		for (i = 0; i <= PCI_ROM_RESOURCE; i++)
++			pdev->resource[i].flags = 0;
++		return;
++	}
++	sub_bus = of_get_property(node, "linux,subbus", NULL);
++	agent = of_get_property(node, "linux,agent-id", NULL);
++	if (agent && sub_bus) {
++		u8 irq = iSeries_allocate_IRQ(bus, 0, *sub_bus);
++		int err;
++
++		err = HvCallXm_connectBusUnit(bus, *sub_bus, *agent, irq);
++		if (err)
++			pci_log_error("Connect Bus Unit",
++				      bus, *sub_bus, *agent, err);
++		else {
++			err = HvCallPci_configStore8(bus, *sub_bus,
++					*agent, PCI_INTERRUPT_LINE, irq);
++			if (err)
++				pci_log_error("PciCfgStore Irq Failed!",
++						bus, *sub_bus, *agent, err);
++			else
++				pdev->irq = irq;
++		}
++	}
+ 
+-	node = pdev->sysdata;
+-	if (node == NULL || PCI_DN(node)->pcidev != pdev)
+-		node = find_Device_Node(pdev->bus->number, pdev->devfn);
+-	return node;
++	pdev->sysdata = node;
++	allocate_device_bars(pdev);
++	iseries_device_information(pdev, bus, *sub_bus);
++	iommu_devnode_init_iSeries(pdev, node);
++}
++
++/*
++ * iSeries_pci_final_fixup(void)
++ */
++void __init iSeries_pci_final_fixup(void)
++{
++	/* Fix up at the device node and pci_dev relationship */
++	mf_display_src(0xC9000100);
++	iSeries_activate_IRQs();
++	mf_display_src(0xC9000200);
+ }
+-#endif
+ 
+ /*
+  * Config space read and write functions.
+@@ -269,7 +479,7 @@ static u64 hv_cfg_write_func[4] = {
+ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ 		int offset, int size, u32 *val)
+ {
+-	struct device_node *node = find_Device_Node(bus->number, devfn);
++	struct device_node *node = find_device_node(bus->number, devfn);
+ 	u64 fn;
+ 	struct HvCallPci_LoadReturn ret;
+ 
+@@ -299,7 +509,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ 		int offset, int size, u32 val)
+ {
+-	struct device_node *node = find_Device_Node(bus->number, devfn);
++	struct device_node *node = find_device_node(bus->number, devfn);
+ 	u64 fn;
+ 	u64 ret;
+ 
+@@ -331,22 +541,22 @@ static struct pci_ops iSeries_pci_ops = {
+  * PCI: Device 23.90 ReadL Retry( 1)
+  * PCI: Device 23.90 ReadL Retry Successful(1)
+  */
+-static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
++static int check_return_code(char *type, struct device_node *dn,
+ 		int *retry, u64 ret)
+ {
+ 	if (ret != 0)  {
+-		struct pci_dn *pdn = PCI_DN(DevNode);
++		struct pci_dn *pdn = PCI_DN(dn);
+ 
+ 		(*retry)++;
+ 		printk("PCI: %s: Device 0x%04X:%02X  I/O Error(%2d): 0x%04X\n",
+-				TextHdr, pdn->busno, pdn->devfn,
++				type, pdn->busno, pdn->devfn,
+ 				*retry, (int)ret);
+ 		/*
+ 		 * Bump the retry and check for retry count exceeded.
+ 		 * If, Exceeded, panic the system.
+ 		 */
+-		if (((*retry) > Pci_Retry_Max) &&
+-				(Pci_Error_Flag > 0)) {
++		if (((*retry) > PCI_RETRY_MAX) &&
++				(limit_pci_retries > 0)) {
+ 			mf_display_src(0xB6000103);
+ 			panic_timeout = 0;
+ 			panic("PCI: Hardware I/O Error, SRC B6000103, "
+@@ -363,28 +573,39 @@ static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
+  * the exposure of being device global.
+  */
+ static inline struct device_node *xlate_iomm_address(
+-		const volatile void __iomem *IoAddress,
+-		u64 *dsaptr, u64 *BarOffsetPtr)
++		const volatile void __iomem *addr,
++		u64 *dsaptr, u64 *bar_offset, const char *func)
+ {
+-	unsigned long OrigIoAddr;
+-	unsigned long BaseIoAddr;
+-	unsigned long TableIndex;
+-	struct device_node *DevNode;
++	unsigned long orig_addr;
++	unsigned long base_addr;
++	unsigned long ind;
++	struct device_node *dn;
++
++	orig_addr = (unsigned long __force)addr;
++	if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
++		static unsigned long last_jiffies;
++		static int num_printed;
+ 
+-	OrigIoAddr = (unsigned long __force)IoAddress;
+-	if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
++		if ((jiffies - last_jiffies) > 60 * HZ) {
++			last_jiffies = jiffies;
++			num_printed = 0;
++		}
++		if (num_printed++ < 10)
++			printk(KERN_ERR
++				"iSeries_%s: invalid access at IO address %p\n",
++				func, addr);
+ 		return NULL;
+-	BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY;
+-	TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE;
+-	DevNode = iomm_table[TableIndex];
+-
+-	if (DevNode != NULL) {
+-		int barnum = iobar_table[TableIndex];
+-		*dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
+-		*BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
++	}
++	base_addr = orig_addr - BASE_IO_MEMORY;
++	ind = base_addr / IOMM_TABLE_ENTRY_SIZE;
++	dn = iomm_table[ind];
++
++	if (dn != NULL) {
++		*dsaptr = ds_addr_table[ind];
++		*bar_offset = base_addr % IOMM_TABLE_ENTRY_SIZE;
+ 	} else
+-		panic("PCI: Invalid PCI IoAddress detected!\n");
+-	return DevNode;
++		panic("PCI: Invalid PCI IO address detected!\n");
++	return dn;
+ }
+ 
+ /*
+@@ -392,91 +613,58 @@ static inline struct device_node *xlate_iomm_address(
+  * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
+  * else, data is returned in Big Endian format.
+  */
+-static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
++static u8 iseries_readb(const volatile void __iomem *addr)
+ {
+-	u64 BarOffset;
++	u64 bar_offset;
+ 	u64 dsa;
+ 	int retry = 0;
+ 	struct HvCallPci_LoadReturn ret;
+-	struct device_node *DevNode =
+-		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
+-
+-	if (DevNode == NULL) {
+-		static unsigned long last_jiffies;
+-		static int num_printed;
++	struct device_node *dn =
++		xlate_iomm_address(addr, &dsa, &bar_offset, "read_byte");
+ 
+-		if ((jiffies - last_jiffies) > 60 * HZ) {
+-			last_jiffies = jiffies;
+-			num_printed = 0;
+-		}
+-		if (num_printed++ < 10)
+-			printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n",
+-			       IoAddress);
++	if (dn == NULL)
+ 		return 0xff;
+-	}
+ 	do {
+-		HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
+-	} while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
++		HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, bar_offset, 0);
++	} while (check_return_code("RDB", dn, &retry, ret.rc) != 0);
+ 
+ 	return ret.value;
+ }
+ 
+-static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
++static u16 iseries_readw_be(const volatile void __iomem *addr)
+ {
+-	u64 BarOffset;
++	u64 bar_offset;
+ 	u64 dsa;
+ 	int retry = 0;
+ 	struct HvCallPci_LoadReturn ret;
+-	struct device_node *DevNode =
+-		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
++	struct device_node *dn =
++		xlate_iomm_address(addr, &dsa, &bar_offset, "read_word");
+ 
+-	if (DevNode == NULL) {
+-		static unsigned long last_jiffies;
+-		static int num_printed;
+-
+-		if ((jiffies - last_jiffies) > 60 * HZ) {
+-			last_jiffies = jiffies;
+-			num_printed = 0;
+-		}
+-		if (num_printed++ < 10)
+-			printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n",
+-			       IoAddress);
++	if (dn == NULL)
+ 		return 0xffff;
+-	}
+ 	do {
+ 		HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
+-				BarOffset, 0);
+-	} while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
++				bar_offset, 0);
++	} while (check_return_code("RDW", dn, &retry, ret.rc) != 0);
+ 
+ 	return ret.value;
+ }
+ 
+-static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
++static u32 iseries_readl_be(const volatile void __iomem *addr)
+ {
+-	u64 BarOffset;
++	u64 bar_offset;
+ 	u64 dsa;
+ 	int retry = 0;
+ 	struct HvCallPci_LoadReturn ret;
+-	struct device_node *DevNode =
+-		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
+-
+-	if (DevNode == NULL) {
+-		static unsigned long last_jiffies;
+-		static int num_printed;
++	struct device_node *dn =
++		xlate_iomm_address(addr, &dsa, &bar_offset, "read_long");
+ 
+-		if ((jiffies - last_jiffies) > 60 * HZ) {
+-			last_jiffies = jiffies;
+-			num_printed = 0;
+-		}
+-		if (num_printed++ < 10)
+-			printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n",
+-			       IoAddress);
++	if (dn == NULL)
+ 		return 0xffffffff;
+-	}
+ 	do {
+ 		HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
+-				BarOffset, 0);
+-	} while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
++				bar_offset, 0);
++	} while (check_return_code("RDL", dn, &retry, ret.rc) != 0);
+ 
+ 	return ret.value;
+ }
+@@ -485,134 +673,72 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
+  * Write MM I/O Instructions for the iSeries
+  *
+  */
+-static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
++static void iseries_writeb(u8 data, volatile void __iomem *addr)
+ {
+-	u64 BarOffset;
++	u64 bar_offset;
+ 	u64 dsa;
+ 	int retry = 0;
+ 	u64 rc;
+-	struct device_node *DevNode =
+-		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
+-
+-	if (DevNode == NULL) {
+-		static unsigned long last_jiffies;
+-		static int num_printed;
++	struct device_node *dn =
++		xlate_iomm_address(addr, &dsa, &bar_offset, "write_byte");
+ 
+-		if ((jiffies - last_jiffies) > 60 * HZ) {
+-			last_jiffies = jiffies;
+-			num_printed = 0;
+-		}
+-		if (num_printed++ < 10)
+-			printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress);
++	if (dn == NULL)
+ 		return;
+-	}
+ 	do {
+-		rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
+-	} while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
++		rc = HvCall4(HvCallPciBarStore8, dsa, bar_offset, data, 0);
++	} while (check_return_code("WWB", dn, &retry, rc) != 0);
+ }
+ 
+-static void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
++static void iseries_writew_be(u16 data, volatile void __iomem *addr)
+ {
+-	u64 BarOffset;
++	u64 bar_offset;
+ 	u64 dsa;
+ 	int retry = 0;
+ 	u64 rc;
+-	struct device_node *DevNode =
+-		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
++	struct device_node *dn =
++		xlate_iomm_address(addr, &dsa, &bar_offset, "write_word");
+ 
+-	if (DevNode == NULL) {
+-		static unsigned long last_jiffies;
+-		static int num_printed;
+-
+-		if ((jiffies - last_jiffies) > 60 * HZ) {
+-			last_jiffies = jiffies;
+-			num_printed = 0;
+-		}
+-		if (num_printed++ < 10)
+-			printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n",
+-			       IoAddress);
++	if (dn == NULL)
+ 		return;
+-	}
+ 	do {
+-		rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, data, 0);
+-	} while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
++		rc = HvCall4(HvCallPciBarStore16, dsa, bar_offset, data, 0);
++	} while (check_return_code("WWW", dn, &retry, rc) != 0);
+ }
+ 
+-static void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
++static void iseries_writel_be(u32 data, volatile void __iomem *addr)
+ {
+-	u64 BarOffset;
++	u64 bar_offset;
+ 	u64 dsa;
+ 	int retry = 0;
+ 	u64 rc;
+-	struct device_node *DevNode =
+-		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
+-
+-	if (DevNode == NULL) {
+-		static unsigned long last_jiffies;
+-		static int num_printed;
++	struct device_node *dn =
++		xlate_iomm_address(addr, &dsa, &bar_offset, "write_long");
+ 
+-		if ((jiffies - last_jiffies) > 60 * HZ) {
+-			last_jiffies = jiffies;
+-			num_printed = 0;
+-		}
+-		if (num_printed++ < 10)
+-			printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n",
+-			       IoAddress);
++	if (dn == NULL)
+ 		return;
+-	}
+ 	do {
+-		rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, data, 0);
+-	} while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
+-}
+-
+-static u8 iseries_readb(const volatile void __iomem *addr)
+-{
+-	return iSeries_Read_Byte(addr);
++		rc = HvCall4(HvCallPciBarStore32, dsa, bar_offset, data, 0);
++	} while (check_return_code("WWL", dn, &retry, rc) != 0);
+ }
+ 
+ static u16 iseries_readw(const volatile void __iomem *addr)
+ {
+-	return le16_to_cpu(iSeries_Read_Word(addr));
++	return le16_to_cpu(iseries_readw_be(addr));
+ }
+ 
+ static u32 iseries_readl(const volatile void __iomem *addr)
+ {
+-	return le32_to_cpu(iSeries_Read_Long(addr));
+-}
+-
+-static u16 iseries_readw_be(const volatile void __iomem *addr)
+-{
+-	return iSeries_Read_Word(addr);
+-}
+-
+-static u32 iseries_readl_be(const volatile void __iomem *addr)
+-{
+-	return iSeries_Read_Long(addr);
+-}
+-
+-static void iseries_writeb(u8 data, volatile void __iomem *addr)
+-{
+-	iSeries_Write_Byte(data, addr);
++	return le32_to_cpu(iseries_readl_be(addr));
+ }
+ 
+ static void iseries_writew(u16 data, volatile void __iomem *addr)
+ {
+-	iSeries_Write_Word(cpu_to_le16(data), addr);
++	iseries_writew_be(cpu_to_le16(data), addr);
+ }
+ 
+ static void iseries_writel(u32 data, volatile void __iomem *addr)
+ {
+-	iSeries_Write_Long(cpu_to_le32(data), addr);
+-}
+-
+-static void iseries_writew_be(u16 data, volatile void __iomem *addr)
+-{
+-	iSeries_Write_Word(data, addr);
+-}
+-
+-static void iseries_writel_be(u32 data, volatile void __iomem *addr)
+-{
+-	iSeries_Write_Long(data, addr);
++	iseries_writel(cpu_to_le32(data), addr);
+ }
+ 
+ static void iseries_readsb(const volatile void __iomem *addr, void *buf,
+@@ -620,7 +746,7 @@ static void iseries_readsb(const volatile void __iomem *addr, void *buf,
+ {
+ 	u8 *dst = buf;
+ 	while(count-- > 0)
+-		*(dst++) = iSeries_Read_Byte(addr);
++		*(dst++) = iseries_readb(addr);
+ }
+ 
+ static void iseries_readsw(const volatile void __iomem *addr, void *buf,
+@@ -628,7 +754,7 @@ static void iseries_readsw(const volatile void __iomem *addr, void *buf,
+ {
+ 	u16 *dst = buf;
+ 	while(count-- > 0)
+-		*(dst++) = iSeries_Read_Word(addr);
++		*(dst++) = iseries_readw_be(addr);
+ }
+ 
+ static void iseries_readsl(const volatile void __iomem *addr, void *buf,
+@@ -636,7 +762,7 @@ static void iseries_readsl(const volatile void __iomem *addr, void *buf,
+ {
+ 	u32 *dst = buf;
+ 	while(count-- > 0)
+-		*(dst++) = iSeries_Read_Long(addr);
++		*(dst++) = iseries_readl_be(addr);
+ }
+ 
+ static void iseries_writesb(volatile void __iomem *addr, const void *buf,
+@@ -644,7 +770,7 @@ static void iseries_writesb(volatile void __iomem *addr, const void *buf,
+ {
+ 	const u8 *src = buf;
+ 	while(count-- > 0)
+-		iSeries_Write_Byte(*(src++), addr);
++		iseries_writeb(*(src++), addr);
+ }
+ 
+ static void iseries_writesw(volatile void __iomem *addr, const void *buf,
+@@ -652,7 +778,7 @@ static void iseries_writesw(volatile void __iomem *addr, const void *buf,
+ {
+ 	const u16 *src = buf;
+ 	while(count-- > 0)
+-		iSeries_Write_Word(*(src++), addr);
++		iseries_writew_be(*(src++), addr);
+ }
+ 
+ static void iseries_writesl(volatile void __iomem *addr, const void *buf,
+@@ -660,7 +786,7 @@ static void iseries_writesl(volatile void __iomem *addr, const void *buf,
+ {
+ 	const u32 *src = buf;
+ 	while(count-- > 0)
+-		iSeries_Write_Long(*(src++), addr);
++		iseries_writel_be(*(src++), addr);
+ }
+ 
+ static void iseries_memset_io(volatile void __iomem *addr, int c,
+@@ -669,7 +795,7 @@ static void iseries_memset_io(volatile void __iomem *addr, int c,
+ 	volatile char __iomem *d = addr;
+ 
+ 	while (n-- > 0)
+-		iSeries_Write_Byte(c, d++);
++		iseries_writeb(c, d++);
+ }
+ 
+ static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
+@@ -679,7 +805,7 @@ static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
+ 	const volatile char __iomem *s = src;
+ 
+ 	while (n-- > 0)
+-		*d++ = iSeries_Read_Byte(s++);
++		*d++ = iseries_readb(s++);
+ }
+ 
+ static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
+@@ -689,7 +815,7 @@ static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
+ 	volatile char __iomem *d = dest;
+ 
+ 	while (n-- > 0)
+-		iSeries_Write_Byte(*s++, d++);
++		iseries_writeb(*s++, d++);
+ }
+ 
+ /* We only set MMIO ops. The default PIO ops will be default
+@@ -742,6 +868,8 @@ void __init iSeries_pcibios_init(void)
+ 	/* Install IO hooks */
+ 	ppc_pci_io = iseries_pci_io;
+ 
++	pci_probe_only = 1;
++
+ 	/* iSeries has no IO space in the common sense, it needs to set
+ 	 * the IO base to 0
+ 	 */
+@@ -767,11 +895,21 @@ void __init iSeries_pcibios_init(void)
+ 		phb = pcibios_alloc_controller(node);
+ 		if (phb == NULL)
+ 			continue;
++		/* All legacy iSeries PHBs are in domain zero */
++		phb->global_number = 0;
+ 
+-		phb->pci_mem_offset = bus;
+ 		phb->first_busno = bus;
+ 		phb->last_busno = bus;
+ 		phb->ops = &iSeries_pci_ops;
++		phb->io_base_virt = (void __iomem *)_IO_BASE;
++		phb->io_resource.flags = IORESOURCE_IO;
++		phb->io_resource.start = BASE_IO_MEMORY;
++		phb->io_resource.end = END_IO_MEMORY;
++		phb->io_resource.name = "iSeries PCI IO";
++		phb->mem_resources[0].flags = IORESOURCE_MEM;
++		phb->mem_resources[0].start = BASE_IO_MEMORY;
++		phb->mem_resources[0].end = END_IO_MEMORY;
++		phb->mem_resources[0].name = "Series PCI MEM";
+ 	}
+ 
+ 	of_node_put(root);
+diff --git a/arch/powerpc/platforms/iseries/pci.h b/arch/powerpc/platforms/iseries/pci.h
+index 33a8489..d9cf974 100644
+--- a/arch/powerpc/platforms/iseries/pci.h
++++ b/arch/powerpc/platforms/iseries/pci.h
+@@ -30,10 +30,6 @@
+  * End Change Activity
+  */
+ 
+-#include <asm/pci-bridge.h>
+-
+-struct pci_dev;				/* For Forward Reference */
+-
+ /*
+  * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
+  * For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h
+@@ -47,17 +43,16 @@ struct pci_dev;				/* For Forward Reference */
+ #define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus)		((subbus >> 5) & 0x7)
+ #define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus)	((subbus >> 2) & 0x7)
+ 
+-/*
+- * Generate a Direct Select Address for the Hypervisor
+- */
+-static inline u64 iseries_ds_addr(struct device_node *node)
+-{
+-	struct pci_dn *pdn = PCI_DN(node);
+-
+-	return ((u64)pdn->busno << 48) + ((u64)pdn->bussubno << 40)
+-			+ ((u64)0x10 << 32);
+-}
+-
+-extern void	iSeries_Device_Information(struct pci_dev*, int);
++struct pci_dev;
++
++#ifdef CONFIG_PCI
++extern void	iSeries_pcibios_init(void);
++extern void	iSeries_pci_final_fixup(void);
++extern void 	iSeries_pcibios_fixup_resources(struct pci_dev *dev);
++#else
++static inline void	iSeries_pcibios_init(void) { }
++static inline void	iSeries_pci_final_fixup(void) { }
++static inline void 	iSeries_pcibios_fixup_resources(struct pci_dev *dev) {}
++#endif
+ 
+ #endif /* _PLATFORMS_ISERIES_PCI_H */
+diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
+index 0877a88..b721207 100644
+--- a/arch/powerpc/platforms/iseries/setup.c
++++ b/arch/powerpc/platforms/iseries/setup.c
+@@ -63,6 +63,7 @@
+ #include "main_store.h"
+ #include "call_sm.h"
+ #include "call_hpt.h"
++#include "pci.h"
+ 
+ #ifdef DEBUG
+ #define DBG(fmt...) udbg_printf(fmt)
+@@ -74,11 +75,6 @@
+ static unsigned long build_iSeries_Memory_Map(void);
+ static void iseries_shared_idle(void);
+ static void iseries_dedicated_idle(void);
+-#ifdef CONFIG_PCI
+-extern void iSeries_pci_final_fixup(void);
+-#else
+-static void iSeries_pci_final_fixup(void) { }
+-#endif
+ 
+ 
+ struct MemoryBlock {
+@@ -112,13 +108,13 @@ static unsigned long iSeries_process_Condor_mainstore_vpd(
+ 	 * correctly.
+ 	 */
+ 	mb_array[0].logicalStart = 0;
+-	mb_array[0].logicalEnd = 0x100000000;
++	mb_array[0].logicalEnd = 0x100000000UL;
+ 	mb_array[0].absStart = 0;
+-	mb_array[0].absEnd = 0x100000000;
++	mb_array[0].absEnd = 0x100000000UL;
+ 
+ 	if (holeSize) {
+ 		numMemoryBlocks = 2;
+-		holeStart = holeStart & 0x000fffffffffffff;
++		holeStart = holeStart & 0x000fffffffffffffUL;
+ 		holeStart = addr_to_chunk(holeStart);
+ 		holeFirstChunk = holeStart;
+ 		holeSize = addr_to_chunk(holeSize);
+@@ -128,9 +124,9 @@ static unsigned long iSeries_process_Condor_mainstore_vpd(
+ 		mb_array[0].logicalEnd = holeFirstChunk;
+ 		mb_array[0].absEnd = holeFirstChunk;
+ 		mb_array[1].logicalStart = holeFirstChunk;
+-		mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
++		mb_array[1].logicalEnd = 0x100000000UL - holeSizeChunks;
+ 		mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
+-		mb_array[1].absEnd = 0x100000000;
++		mb_array[1].absEnd = 0x100000000UL;
+ 	}
+ 	return numMemoryBlocks;
+ }
+@@ -234,9 +230,9 @@ static unsigned long iSeries_process_Regatta_mainstore_vpd(
+ 				mb_array[i].logicalEnd,
+ 				mb_array[i].absStart, mb_array[i].absEnd);
+ 		mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
+-				0x000fffffffffffff);
++				0x000fffffffffffffUL);
+ 		mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
+-				0x000fffffffffffff);
++				0x000fffffffffffffUL);
+ 		mb_array[i].logicalStart =
+ 			addr_to_chunk(mb_array[i].logicalStart);
+ 		mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
+@@ -320,7 +316,7 @@ struct mschunks_map mschunks_map = {
+ };
+ EXPORT_SYMBOL(mschunks_map);
+ 
+-void mschunks_alloc(unsigned long num_chunks)
++static void mschunks_alloc(unsigned long num_chunks)
+ {
+ 	klimit = _ALIGN(klimit, sizeof(u32));
+ 	mschunks_map.mapping = (u32 *)klimit;
+@@ -499,6 +495,8 @@ static void __init iSeries_setup_arch(void)
+ 			itVpdAreas.xSlicMaxLogicalProcs);
+ 	printk("Max physical processors = %d\n",
+ 			itVpdAreas.xSlicMaxPhysicalProcs);
++
++	iSeries_pcibios_init();
+ }
+ 
+ static void iSeries_show_cpuinfo(struct seq_file *m)
+@@ -641,24 +639,25 @@ static int __init iseries_probe(void)
+ }
+ 
+ define_machine(iseries) {
+-	.name		= "iSeries",
+-	.setup_arch	= iSeries_setup_arch,
+-	.show_cpuinfo	= iSeries_show_cpuinfo,
+-	.init_IRQ	= iSeries_init_IRQ,
+-	.get_irq	= iSeries_get_irq,
+-	.init_early	= iSeries_init_early,
+-	.pcibios_fixup	= iSeries_pci_final_fixup,
+-	.restart	= mf_reboot,
+-	.power_off	= mf_power_off,
+-	.halt		= mf_power_off,
+-	.get_boot_time	= iSeries_get_boot_time,
+-	.set_rtc_time	= iSeries_set_rtc_time,
+-	.get_rtc_time	= iSeries_get_rtc_time,
+-	.calibrate_decr	= generic_calibrate_decr,
+-	.progress	= iSeries_progress,
+-	.probe		= iseries_probe,
+-	.ioremap	= iseries_ioremap,
+-	.iounmap	= iseries_iounmap,
++	.name			= "iSeries",
++	.setup_arch		= iSeries_setup_arch,
++	.show_cpuinfo		= iSeries_show_cpuinfo,
++	.init_IRQ		= iSeries_init_IRQ,
++	.get_irq		= iSeries_get_irq,
++	.init_early		= iSeries_init_early,
++	.pcibios_fixup		= iSeries_pci_final_fixup,
++	.pcibios_fixup_resources= iSeries_pcibios_fixup_resources,
++	.restart		= mf_reboot,
++	.power_off		= mf_power_off,
++	.halt			= mf_power_off,
++	.get_boot_time		= iSeries_get_boot_time,
++	.set_rtc_time		= iSeries_set_rtc_time,
++	.get_rtc_time		= iSeries_get_rtc_time,
++	.calibrate_decr		= generic_calibrate_decr,
++	.progress		= iSeries_progress,
++	.probe			= iseries_probe,
++	.ioremap		= iseries_ioremap,
++	.iounmap		= iseries_iounmap,
+ 	/* XXX Implement enable_pmcs for iSeries */
+ };
+ 
+diff --git a/arch/powerpc/platforms/iseries/setup.h b/arch/powerpc/platforms/iseries/setup.h
+index 0a47ac5..729754b 100644
+--- a/arch/powerpc/platforms/iseries/setup.h
++++ b/arch/powerpc/platforms/iseries/setup.h
+@@ -17,6 +17,7 @@
+ #ifndef	__ISERIES_SETUP_H__
+ #define	__ISERIES_SETUP_H__
+ 
++extern void *iSeries_early_setup(void);
+ extern unsigned long iSeries_get_boot_time(void);
+ extern int iSeries_set_rtc_time(struct rtc_time *tm);
+ extern void iSeries_get_rtc_time(struct rtc_time *tm);
+diff --git a/arch/powerpc/platforms/iseries/vpdinfo.c b/arch/powerpc/platforms/iseries/vpdinfo.c
+deleted file mode 100644
+index 9f83878..0000000
+--- a/arch/powerpc/platforms/iseries/vpdinfo.c
++++ /dev/null
+@@ -1,275 +0,0 @@
+-/*
+- * This code gets the card location of the hardware
+- * Copyright (C) 2001  <Allan H Trautman> <IBM Corp>
+- * Copyright (C) 2005  Stephen Rothwel, IBM Corp
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the:
+- * Free Software Foundation, Inc.,
+- * 59 Temple Place, Suite 330,
+- * Boston, MA  02111-1307  USA
+- *
+- * Change Activity:
+- *   Created, Feb 2, 2001
+- *   Ported to ppc64, August 20, 2001
+- * End Change Activity
+- */
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/pci.h>
+-
+-#include <asm/types.h>
+-#include <asm/resource.h>
+-#include <asm/abs_addr.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/iseries/hv_types.h>
+-
+-#include "pci.h"
+-#include "call_pci.h"
+-
+-/*
+- * Size of Bus VPD data
+- */
+-#define BUS_VPDSIZE      1024
+-
+-/*
+- * Bus Vpd Tags
+- */
+-#define  VpdEndOfAreaTag   0x79
+-#define  VpdIdStringTag    0x82
+-#define  VpdVendorAreaTag  0x84
+-
+-/*
+- * Mfg Area Tags
+- */
+-#define  VpdFruFrameId    0x4649     // "FI"
+-#define  VpdSlotMapFormat 0x4D46     // "MF"
+-#define  VpdSlotMap       0x534D     // "SM"
+-
+-/*
+- * Structures of the areas
+- */
+-struct MfgVpdAreaStruct {
+-	u16 Tag;
+-	u8  TagLength;
+-	u8  AreaData1;
+-	u8  AreaData2;
+-};
+-typedef struct MfgVpdAreaStruct MfgArea;
+-#define MFG_ENTRY_SIZE   3
+-
+-struct SlotMapStruct {
+-	u8   AgentId;
+-	u8   SecondaryAgentId;
+-	u8   PhbId;
+-	char CardLocation[3];
+-	char Parms[8];
+-	char Reserved[2];
+-};
+-typedef struct SlotMapStruct SlotMap;
+-#define SLOT_ENTRY_SIZE   16
+-
+-/*
+- * Parse the Slot Area
+- */
+-static void __init iSeries_Parse_SlotArea(SlotMap *MapPtr, int MapLen,
+-		HvAgentId agent, u8 *PhbId, char card[4])
+-{
+-	int SlotMapLen = MapLen;
+-	SlotMap *SlotMapPtr = MapPtr;
+-
+-	/*
+-	 * Parse Slot label until we find the one requested
+-	 */
+-	while (SlotMapLen > 0) {
+-		if (SlotMapPtr->AgentId == agent) {
+-			/*
+-			 * If Phb wasn't found, grab the entry first one found.
+-			 */
+-			if (*PhbId == 0xff)
+-				*PhbId = SlotMapPtr->PhbId;
+-			/* Found it, extract the data. */
+-			if (SlotMapPtr->PhbId == *PhbId) {
+-				memcpy(card, &SlotMapPtr->CardLocation, 3);
+-				card[3]  = 0;
+-				break;
+-			}
+-		}
+-		/* Point to the next Slot */
+-		SlotMapPtr = (SlotMap *)((char *)SlotMapPtr + SLOT_ENTRY_SIZE);
+-		SlotMapLen -= SLOT_ENTRY_SIZE;
+-	}
+-}
+-
+-/*
+- * Parse the Mfg Area
+- */
+-static void __init iSeries_Parse_MfgArea(u8 *AreaData, int AreaLen,
+-		HvAgentId agent, u8 *PhbId,
+-		u8 *frame, char card[4])
+-{
+-	MfgArea *MfgAreaPtr = (MfgArea *)AreaData;
+-	int MfgAreaLen = AreaLen;
+-	u16 SlotMapFmt = 0;
+-
+-	/* Parse Mfg Data */
+-	while (MfgAreaLen > 0) {
+-		int MfgTagLen = MfgAreaPtr->TagLength;
+-		/* Frame ID         (FI 4649020310 ) */
+-		if (MfgAreaPtr->Tag == VpdFruFrameId)		/* FI  */
+-			*frame = MfgAreaPtr->AreaData1;
+-		/* Slot Map Format  (MF 4D46020004 ) */
+-		else if (MfgAreaPtr->Tag == VpdSlotMapFormat)	/* MF  */
+-			SlotMapFmt = (MfgAreaPtr->AreaData1 * 256)
+-				+ MfgAreaPtr->AreaData2;
+-		/* Slot Map         (SM 534D90 */
+-		else if (MfgAreaPtr->Tag == VpdSlotMap)	{	/* SM  */
+-			SlotMap *SlotMapPtr;
+-
+-			if (SlotMapFmt == 0x1004)
+-				SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr
+-						+ MFG_ENTRY_SIZE + 1);
+-			else
+-				SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr
+-						+ MFG_ENTRY_SIZE);
+-			iSeries_Parse_SlotArea(SlotMapPtr, MfgTagLen,
+-					agent, PhbId, card);
+-		}
+-		/*
+-		 * Point to the next Mfg Area
+-		 * Use defined size, sizeof give wrong answer
+-		 */
+-		MfgAreaPtr = (MfgArea *)((char *)MfgAreaPtr + MfgTagLen
+-				+ MFG_ENTRY_SIZE);
+-		MfgAreaLen -= (MfgTagLen + MFG_ENTRY_SIZE);
+-	}
+-}
+-
+-/*
+- * Look for "BUS".. Data is not Null terminated.
+- * PHBID of 0xFF indicates PHB was not found in VPD Data.
+- */
+-static int __init iSeries_Parse_PhbId(u8 *AreaPtr, int AreaLength)
+-{
+-	u8 *PhbPtr = AreaPtr;
+-	int DataLen = AreaLength;
+-	char PhbId = 0xFF;
+-
+-	while (DataLen > 0) {
+-		if ((*PhbPtr == 'B') && (*(PhbPtr + 1) == 'U')
+-				&& (*(PhbPtr + 2) == 'S')) {
+-			PhbPtr += 3;
+-			while (*PhbPtr == ' ')
+-				++PhbPtr;
+-			PhbId = (*PhbPtr & 0x0F);
+-			break;
+-		}
+-		++PhbPtr;
+-		--DataLen;
+-	}
+-	return PhbId;
+-}
+-
+-/*
+- * Parse out the VPD Areas
+- */
+-static void __init iSeries_Parse_Vpd(u8 *VpdData, int VpdDataLen,
+-		HvAgentId agent, u8 *frame, char card[4])
+-{
+-	u8 *TagPtr = VpdData;
+-	int DataLen = VpdDataLen - 3;
+-	u8 PhbId = 0xff;
+-
+-	while ((*TagPtr != VpdEndOfAreaTag) && (DataLen > 0)) {
+-		int AreaLen = *(TagPtr + 1) + (*(TagPtr + 2) * 256);
+-		u8 *AreaData  = TagPtr + 3;
+-
+-		if (*TagPtr == VpdIdStringTag)
+-			PhbId = iSeries_Parse_PhbId(AreaData, AreaLen);
+-		else if (*TagPtr == VpdVendorAreaTag)
+-			iSeries_Parse_MfgArea(AreaData, AreaLen,
+-					agent, &PhbId, frame, card);
+-		/* Point to next Area. */
+-		TagPtr  = AreaData + AreaLen;
+-		DataLen -= AreaLen;
+-	}
+-}
+-
+-static int __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent,
+-		u8 *frame, char card[4])
+-{
+-	int status = 0;
+-	int BusVpdLen = 0;
+-	u8 *BusVpdPtr = kmalloc(BUS_VPDSIZE, GFP_KERNEL);
+-
+-	if (BusVpdPtr == NULL) {
+-		printk("PCI: Bus VPD Buffer allocation failure.\n");
+-		return 0;
+-	}
+-	BusVpdLen = HvCallPci_getBusVpd(bus, iseries_hv_addr(BusVpdPtr),
+-					BUS_VPDSIZE);
+-	if (BusVpdLen == 0) {
+-		printk("PCI: Bus VPD Buffer zero length.\n");
+-		goto out_free;
+-	}
+-	/* printk("PCI: BusVpdPtr: %p, %d\n",BusVpdPtr, BusVpdLen); */
+-	/* Make sure this is what I think it is */
+-	if (*BusVpdPtr != VpdIdStringTag) {	/* 0x82 */
+-		printk("PCI: Bus VPD Buffer missing starting tag.\n");
+-		goto out_free;
+-	}
+-	iSeries_Parse_Vpd(BusVpdPtr, BusVpdLen, agent, frame, card);
+-	status = 1;
+-out_free:
+-	kfree(BusVpdPtr);
+-	return status;
+-}
+-
+-/*
+- * Prints the device information.
+- * - Pass in pci_dev* pointer to the device.
+- * - Pass in the device count
+- *
+- * Format:
+- * PCI: Bus  0, Device 26, Vendor 0x12AE  Frame  1, Card  C10  Ethernet
+- * controller
+- */
+-void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
+-{
+-	struct device_node *DevNode = PciDev->sysdata;
+-	struct pci_dn *pdn;
+-	u16 bus;
+-	u8 frame = 0;
+-	char card[4];
+-	HvSubBusNumber subbus;
+-	HvAgentId agent;
+-
+-	if (DevNode == NULL) {
+-		printk("%d. PCI: iSeries_Device_Information DevNode is NULL\n",
+-				count);
+-		return;
+-	}
+-
+-	pdn = PCI_DN(DevNode);
+-	bus = pdn->busno;
+-	subbus = pdn->bussubno;
+-	agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
+-			ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
+-
+-	if (iSeries_Get_Location_Code(bus, agent, &frame, card)) {
+-		printk("%d. PCI: Bus%3d, Device%3d, Vendor %04X Frame%3d, "
+-			"Card %4s  0x%04X\n", count, bus,
+-			PCI_SLOT(PciDev->devfn), PciDev->vendor, frame,
+-			card, (int)(PciDev->class >> 8));
+-	}
+-}
+diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig
+index f7c95eb..a6467a5 100644
+--- a/arch/powerpc/platforms/maple/Kconfig
++++ b/arch/powerpc/platforms/maple/Kconfig
+@@ -1,6 +1,7 @@
+ config PPC_MAPLE
+ 	depends on PPC_MULTIPLATFORM && PPC64
+ 	bool "Maple 970FX Evaluation Board"
++	select PCI
+ 	select MPIC
+ 	select U3_DART
+ 	select MPIC_U3_HT_IRQS
+diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
+index 771ed0c..3ffa0ac 100644
+--- a/arch/powerpc/platforms/maple/pci.c
++++ b/arch/powerpc/platforms/maple/pci.c
+@@ -558,7 +558,7 @@ void __init maple_pci_init(void)
+ 	 * safe assumptions hopefully.
+ 	 */
+ 	if (u3_agp) {
+-		struct device_node *np = u3_agp->arch_data;
++		struct device_node *np = u3_agp->dn;
+ 		PCI_DN(np)->busno = 0xf0;
+ 		for (np = np->child; np; np = np->sibling)
+ 			PCI_DN(np)->busno = 0xf0;
+diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
+index 144177d..3ce2d73 100644
+--- a/arch/powerpc/platforms/maple/setup.c
++++ b/arch/powerpc/platforms/maple/setup.c
+@@ -42,6 +42,7 @@
+ #include <linux/serial.h>
+ #include <linux/smp.h>
+ #include <linux/bitops.h>
++#include <linux/of_device.h>
+ 
+ #include <asm/processor.h>
+ #include <asm/sections.h>
+@@ -56,7 +57,6 @@
+ #include <asm/dma.h>
+ #include <asm/cputable.h>
+ #include <asm/time.h>
+-#include <asm/of_device.h>
+ #include <asm/lmb.h>
+ #include <asm/mpic.h>
+ #include <asm/rtas.h>
+diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig
+index 735e153..348e061 100644
+--- a/arch/powerpc/platforms/pasemi/Kconfig
++++ b/arch/powerpc/platforms/pasemi/Kconfig
+@@ -3,6 +3,7 @@ config PPC_PASEMI
+ 	bool "PA Semi SoC-based platforms"
+ 	default n
+ 	select MPIC
++	select PCI
+ 	select PPC_UDBG_16550
+ 	select PPC_NATIVE
+ 	select MPIC_BROKEN_REGREAD
+@@ -17,7 +18,7 @@ config PPC_PASEMI_IOMMU
+ 	bool "PA Semi IOMMU support"
+ 	depends on PPC_PASEMI
+ 	help
+-	  IOMMU support for PA6T-1682M
++	  IOMMU support for PA Semi PWRficient
+ 
+ config PPC_PASEMI_IOMMU_DMA_FORCE
+ 	bool "Force DMA engine to use IOMMU"
+@@ -36,13 +37,4 @@ config PPC_PASEMI_MDIO
+ 	help
+ 	  Driver for MDIO via GPIO on PWRficient platforms
+ 
+-config ELECTRA_IDE
+-      tristate "Electra IDE driver"
+-      default y
+-      depends on PPC_PASEMI && ATA
+-      select PATA_PLATFORM
+-      help
+-	This includes driver support for the Electra on-board IDE
+-	interface.
+-
+ endmenu
+diff --git a/arch/powerpc/platforms/pasemi/Makefile b/arch/powerpc/platforms/pasemi/Makefile
+index f47fcac..8f52d75 100644
+--- a/arch/powerpc/platforms/pasemi/Makefile
++++ b/arch/powerpc/platforms/pasemi/Makefile
+@@ -1,4 +1,3 @@
+-obj-y	+= setup.o pci.o time.o idle.o powersave.o iommu.o
++obj-y	+= setup.o pci.o time.o idle.o powersave.o iommu.o dma_lib.o
+ obj-$(CONFIG_PPC_PASEMI_MDIO)	+= gpio_mdio.o
+-obj-$(CONFIG_ELECTRA_IDE) += electra_ide.o
+ obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += cpufreq.o
+diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
+index 1cfb8b0..58556b0 100644
+--- a/arch/powerpc/platforms/pasemi/cpufreq.c
++++ b/arch/powerpc/platforms/pasemi/cpufreq.c
+@@ -32,6 +32,7 @@
+ #include <asm/io.h>
+ #include <asm/prom.h>
+ #include <asm/time.h>
++#include <asm/smp.h>
+ 
+ #define SDCASR_REG		0x0100
+ #define SDCASR_REG_STRIDE	0x1000
+@@ -124,6 +125,11 @@ static void set_astate(int cpu, unsigned int astate)
+ 	local_irq_restore(flags);
+ }
+ 
++int check_astate(void)
++{
++	return get_cur_astate(hard_smp_processor_id());
++}
++
+ void restore_astate(int cpu)
+ {
+ 	set_astate(cpu, current_astate);
+@@ -147,7 +153,10 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	if (!cpu)
+ 		goto out;
+ 
+-	dn = of_find_compatible_node(NULL, "sdc", "1682m-sdc");
++	dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
++	if (!dn)
++		dn = of_find_compatible_node(NULL, NULL,
++					     "pasemi,pwrficient-sdc");
+ 	if (!dn)
+ 		goto out;
+ 	err = of_address_to_resource(dn, 0, &res);
+@@ -160,7 +169,10 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 		goto out;
+ 	}
+ 
+-	dn = of_find_compatible_node(NULL, "gizmo", "1682m-gizmo");
++	dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo");
++	if (!dn)
++		dn = of_find_compatible_node(NULL, NULL,
++					     "pasemi,pwrficient-gizmo");
+ 	if (!dn) {
+ 		err = -ENODEV;
+ 		goto out_unmap_sdcasr;
+@@ -292,7 +304,8 @@ static struct cpufreq_driver pas_cpufreq_driver = {
+ 
+ static int __init pas_cpufreq_init(void)
+ {
+-	if (!machine_is_compatible("PA6T-1682M"))
++	if (!machine_is_compatible("PA6T-1682M") &&
++	    !machine_is_compatible("pasemi,pwrficient"))
+ 		return -ENODEV;
+ 
+ 	return cpufreq_register_driver(&pas_cpufreq_driver);
+diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
+new file mode 100644
+index 0000000..c529d8d
+--- /dev/null
++++ b/arch/powerpc/platforms/pasemi/dma_lib.c
+@@ -0,0 +1,488 @@
++/*
++ * Copyright (C) 2006-2007 PA Semi, Inc
++ *
++ * Common functions for DMA access on PA Semi PWRficient
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/of.h>
++
++#include <asm/pasemi_dma.h>
++
++#define MAX_TXCH 64
++#define MAX_RXCH 64
++
++static struct pasdma_status *dma_status;
++
++static void __iomem *iob_regs;
++static void __iomem *mac_regs[6];
++static void __iomem *dma_regs;
++
++static int base_hw_irq;
++
++static int num_txch, num_rxch;
++
++static struct pci_dev *dma_pdev;
++
++/* Bitmaps to handle allocation of channels */
++
++static DECLARE_BITMAP(txch_free, MAX_TXCH);
++static DECLARE_BITMAP(rxch_free, MAX_RXCH);
++
++/* pasemi_read_iob_reg - read IOB register
++ * @reg: Register to read (offset into PCI CFG space)
++ */
++unsigned int pasemi_read_iob_reg(unsigned int reg)
++{
++	return in_le32(iob_regs+reg);
++}
++EXPORT_SYMBOL(pasemi_read_iob_reg);
++
++/* pasemi_write_iob_reg - write IOB register
++ * @reg: Register to write to (offset into PCI CFG space)
++ * @val: Value to write
++ */
++void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
++{
++	out_le32(iob_regs+reg, val);
++}
++EXPORT_SYMBOL(pasemi_write_iob_reg);
++
++/* pasemi_read_mac_reg - read MAC register
++ * @intf: MAC interface
++ * @reg: Register to read (offset into PCI CFG space)
++ */
++unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
++{
++	return in_le32(mac_regs[intf]+reg);
++}
++EXPORT_SYMBOL(pasemi_read_mac_reg);
++
++/* pasemi_write_mac_reg - write MAC register
++ * @intf: MAC interface
++ * @reg: Register to write to (offset into PCI CFG space)
++ * @val: Value to write
++ */
++void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
++{
++	out_le32(mac_regs[intf]+reg, val);
++}
++EXPORT_SYMBOL(pasemi_write_mac_reg);
++
++/* pasemi_read_dma_reg - read DMA register
++ * @reg: Register to read (offset into PCI CFG space)
++ */
++unsigned int pasemi_read_dma_reg(unsigned int reg)
++{
++	return in_le32(dma_regs+reg);
++}
++EXPORT_SYMBOL(pasemi_read_dma_reg);
++
++/* pasemi_write_dma_reg - write DMA register
++ * @reg: Register to write to (offset into PCI CFG space)
++ * @val: Value to write
++ */
++void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
++{
++	out_le32(dma_regs+reg, val);
++}
++EXPORT_SYMBOL(pasemi_write_dma_reg);
++
++static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
++{
++	int bit;
++	int start, limit;
++
++	switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
++	case TXCHAN_EVT0:
++		start = 0;
++		limit = 10;
++		break;
++	case TXCHAN_EVT1:
++		start = 10;
++		limit = MAX_TXCH;
++		break;
++	default:
++		start = 0;
++		limit = MAX_TXCH;
++		break;
++	}
++retry:
++	bit = find_next_bit(txch_free, MAX_TXCH, start);
++	if (bit >= limit)
++		return -ENOSPC;
++	if (!test_and_clear_bit(bit, txch_free))
++		goto retry;
++
++	return bit;
++}
++
++static void pasemi_free_tx_chan(int chan)
++{
++	BUG_ON(test_bit(chan, txch_free));
++	set_bit(chan, txch_free);
++}
++
++static int pasemi_alloc_rx_chan(void)
++{
++	int bit;
++retry:
++	bit = find_first_bit(rxch_free, MAX_RXCH);
++	if (bit >= MAX_TXCH)
++		return -ENOSPC;
++	if (!test_and_clear_bit(bit, rxch_free))
++		goto retry;
++
++	return bit;
++}
++
++static void pasemi_free_rx_chan(int chan)
++{
++	BUG_ON(test_bit(chan, rxch_free));
++	set_bit(chan, rxch_free);
++}
++
++/* pasemi_dma_alloc_chan - Allocate a DMA channel
++ * @type: Type of channel to allocate
++ * @total_size: Total size of structure to allocate (to allow for more
++ *		room behind the structure to be used by the client)
++ * @offset: Offset in bytes from start of the total structure to the beginning
++ *	    of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
++ *	    not the first member of the client structure.
++ *
++ * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
++ * type argument specifies whether it's a RX or TX channel, and in the case
++ * of TX channels which group it needs to belong to (if any).
++ *
++ * Returns a pointer to the total structure allocated on success, NULL
++ * on failure.
++ */
++void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
++			    int total_size, int offset)
++{
++	void *buf;
++	struct pasemi_dmachan *chan;
++	int chno;
++
++	BUG_ON(total_size < sizeof(struct pasemi_dmachan));
++
++	buf = kzalloc(total_size, GFP_KERNEL);
++
++	if (!buf)
++		return NULL;
++	chan = buf + offset;
++
++	chan->priv = buf;
++
++	switch (type & (TXCHAN|RXCHAN)) {
++	case RXCHAN:
++		chno = pasemi_alloc_rx_chan();
++		chan->chno = chno;
++		chan->irq = irq_create_mapping(NULL,
++					       base_hw_irq + num_txch + chno);
++		chan->status = &dma_status->rx_sta[chno];
++		break;
++	case TXCHAN:
++		chno = pasemi_alloc_tx_chan(type);
++		chan->chno = chno;
++		chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
++		chan->status = &dma_status->tx_sta[chno];
++		break;
++	}
++
++	chan->chan_type = type;
++
++	return chan;
++}
++EXPORT_SYMBOL(pasemi_dma_alloc_chan);
++
++/* pasemi_dma_free_chan - Free a previously allocated channel
++ * @chan: Channel to free
++ *
++ * Frees a previously allocated channel. It will also deallocate any
++ * descriptor ring associated with the channel, if allocated.
++ */
++void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
++{
++	if (chan->ring_virt)
++		pasemi_dma_free_ring(chan);
++
++	switch (chan->chan_type & (RXCHAN|TXCHAN)) {
++	case RXCHAN:
++		pasemi_free_rx_chan(chan->chno);
++		break;
++	case TXCHAN:
++		pasemi_free_tx_chan(chan->chno);
++		break;
++	}
++
++	kfree(chan->priv);
++}
++EXPORT_SYMBOL(pasemi_dma_free_chan);
++
++/* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
++ * @chan: Channel for which to allocate
++ * @ring_size: Ring size in 64-bit (8-byte) words
++ *
++ * Allocate a descriptor ring for a channel. Returns 0 on success, errno
++ * on failure. The passed in struct pasemi_dmachan is updated with the
++ * virtual and DMA addresses of the ring.
++ */
++int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
++{
++	BUG_ON(chan->ring_virt);
++
++	chan->ring_size = ring_size;
++
++	chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
++					     ring_size * sizeof(u64),
++					     &chan->ring_dma, GFP_KERNEL);
++
++	if (!chan->ring_virt)
++		return -ENOMEM;
++
++	memset(chan->ring_virt, 0, ring_size * sizeof(u64));
++
++	return 0;
++}
++EXPORT_SYMBOL(pasemi_dma_alloc_ring);
++
++/* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
++ * @chan: Channel for which to free the descriptor ring
++ *
++ * Frees a previously allocated descriptor ring for a channel.
++ */
++void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
++{
++	BUG_ON(!chan->ring_virt);
++
++	dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
++			  chan->ring_virt, chan->ring_dma);
++	chan->ring_virt = NULL;
++	chan->ring_size = 0;
++	chan->ring_dma = 0;
++}
++EXPORT_SYMBOL(pasemi_dma_free_ring);
++
++/* pasemi_dma_start_chan - Start a DMA channel
++ * @chan: Channel to start
++ * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
++ *
++ * Enables (starts) a DMA channel with optional additional arguments.
++ */
++void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
++{
++	if (chan->chan_type == RXCHAN)
++		pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
++				     cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
++	else
++		pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
++				     cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
++}
++EXPORT_SYMBOL(pasemi_dma_start_chan);
++
++/* pasemi_dma_stop_chan - Stop a DMA channel
++ * @chan: Channel to stop
++ *
++ * Stops (disables) a DMA channel. This is done by setting the ST bit in the
++ * CMDSTA register and waiting on the ACT (active) bit to clear, then
++ * finally disabling the whole channel.
++ *
++ * This function will only try for a short while for the channel to stop, if
++ * it doesn't it will return failure.
++ *
++ * Returns 1 on success, 0 on failure.
++ */
++#define MAX_RETRIES 5000
++int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
++{
++	int reg, retries;
++	u32 sta;
++
++	if (chan->chan_type == RXCHAN) {
++		reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
++		pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
++		for (retries = 0; retries < MAX_RETRIES; retries++) {
++			sta = pasemi_read_dma_reg(reg);
++			if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
++				pasemi_write_dma_reg(reg, 0);
++				return 1;
++			}
++			cond_resched();
++		}
++	} else {
++		reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
++		pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
++		for (retries = 0; retries < MAX_RETRIES; retries++) {
++			sta = pasemi_read_dma_reg(reg);
++			if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
++				pasemi_write_dma_reg(reg, 0);
++				return 1;
++			}
++			cond_resched();
++		}
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL(pasemi_dma_stop_chan);
++
++/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
++ * @chan: Channel to allocate for
++ * @size: Size of buffer in bytes
++ * @handle: DMA handle
++ *
++ * Allocate a buffer to be used by the DMA engine for read/write,
++ * similar to dma_alloc_coherent().
++ *
++ * Returns the virtual address of the buffer, or NULL in case of failure.
++ */
++void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
++			   dma_addr_t *handle)
++{
++	return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
++}
++EXPORT_SYMBOL(pasemi_dma_alloc_buf);
++
++/* pasemi_dma_free_buf - Free a buffer used for DMA
++ * @chan: Channel the buffer was allocated for
++ * @size: Size of buffer in bytes
++ * @handle: DMA handle
++ *
++ * Frees a previously allocated buffer.
++ */
++void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
++			 dma_addr_t *handle)
++{
++	dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
++}
++EXPORT_SYMBOL(pasemi_dma_free_buf);
++
++static void *map_onedev(struct pci_dev *p, int index)
++{
++	struct device_node *dn;
++	void __iomem *ret;
++
++	dn = pci_device_to_OF_node(p);
++	if (!dn)
++		goto fallback;
++
++	ret = of_iomap(dn, index);
++	if (!ret)
++		goto fallback;
++
++	return ret;
++fallback:
++	/* This is hardcoded and ugly, but we have some firmware versions
++	 * that don't provide the register space in the device tree. Luckily
++	 * they are at well-known locations so we can just do the math here.
++	 */
++	return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
++}
++
++/* pasemi_dma_init - Initialize the PA Semi DMA library
++ *
++ * This function initializes the DMA library. It must be called before
++ * any other function in the library.
++ *
++ * Returns 0 on success, errno on failure.
++ */
++int pasemi_dma_init(void)
++{
++	static spinlock_t init_lock = SPIN_LOCK_UNLOCKED;
++	struct pci_dev *iob_pdev;
++	struct pci_dev *pdev;
++	struct resource res;
++	struct device_node *dn;
++	int i, intf, err = 0;
++	u32 tmp;
++
++	if (!machine_is(pasemi))
++		return -ENODEV;
++
++	spin_lock(&init_lock);
++
++	/* Make sure we haven't already initialized */
++	if (dma_pdev)
++		goto out;
++
++	iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
++	if (!iob_pdev) {
++		BUG();
++		printk(KERN_WARNING "Can't find I/O Bridge\n");
++		err = -ENODEV;
++		goto out;
++	}
++	iob_regs = map_onedev(iob_pdev, 0);
++
++	dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
++	if (!dma_pdev) {
++		BUG();
++		printk(KERN_WARNING "Can't find DMA controller\n");
++		err = -ENODEV;
++		goto out;
++	}
++	dma_regs = map_onedev(dma_pdev, 0);
++	base_hw_irq = virq_to_hw(dma_pdev->irq);
++
++	pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
++	num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
++
++	pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
++	num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
++
++	intf = 0;
++	for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
++	     pdev;
++	     pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
++		mac_regs[intf++] = map_onedev(pdev, 0);
++
++	pci_dev_put(pdev);
++
++	for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
++	     pdev;
++	     pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
++		mac_regs[intf++] = map_onedev(pdev, 0);
++
++	pci_dev_put(pdev);
++
++	dn = pci_device_to_OF_node(iob_pdev);
++	if (dn)
++		err = of_address_to_resource(dn, 1, &res);
++	if (!dn || err) {
++		/* Fallback for old firmware */
++		res.start = 0xfd800000;
++		res.end = res.start + 0x1000;
++	}
++	dma_status = __ioremap(res.start, res.end-res.start, 0);
++	pci_dev_put(iob_pdev);
++
++	for (i = 0; i < MAX_TXCH; i++)
++		__set_bit(i, txch_free);
++
++	for (i = 0; i < MAX_RXCH; i++)
++		__set_bit(i, rxch_free);
++
++	printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
++		"(%d tx, %d rx channels)\n", num_txch, num_rxch);
++
++out:
++	spin_unlock(&init_lock);
++	return err;
++}
++EXPORT_SYMBOL(pasemi_dma_init);
+diff --git a/arch/powerpc/platforms/pasemi/electra_ide.c b/arch/powerpc/platforms/pasemi/electra_ide.c
+deleted file mode 100644
+index 12fb0c9..0000000
+--- a/arch/powerpc/platforms/pasemi/electra_ide.c
++++ /dev/null
+@@ -1,96 +0,0 @@
+-/*
+- * Copyright (C) 2007 PA Semi, Inc
+- *
+- * Maintained by: Olof Johansson <olof at lixom.net>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+- */
+-
+-#include <linux/platform_device.h>
+-
+-#include <asm/prom.h>
+-#include <asm/system.h>
+-
+-/* The electra IDE interface is incredibly simple: Just a device on the localbus
+- * with interrupts hooked up to one of the GPIOs. The device tree contains the
+- * address window and interrupt mappings already, and the pata_platform driver handles
+- * the rest. We just need to hook the two up.
+- */
+-
+-#define MAX_IFS	4	/* really, we have only one */
+-
+-static struct platform_device *pdevs[MAX_IFS];
+-
+-static int __devinit electra_ide_init(void)
+-{
+-	struct device_node *np;
+-	struct resource r[3];
+-	int ret = 0;
+-	int i;
+-
+-	np = of_find_compatible_node(NULL, "ide", "electra-ide");
+-	i = 0;
+-
+-	while (np && i < MAX_IFS) {
+-		memset(r, 0, sizeof(r));
+-
+-		/* pata_platform wants two address ranges: one for the base registers,
+-		 * another for the control (altstatus). It's located at offset 0x3f6 in
+-		 * the window, but the device tree only has one large register window
+-		 * that covers both ranges. So we need to split it up by hand here:
+-		 */
+-
+-		ret = of_address_to_resource(np, 0, &r[0]);
+-		if (ret)
+-			goto out;
+-		ret = of_address_to_resource(np, 0, &r[1]);
+-		if (ret)
+-			goto out;
+-
+-		r[1].start += 0x3f6;
+-		r[0].end = r[1].start-1;
+-
+-		r[2].start = irq_of_parse_and_map(np, 0);
+-		r[2].end = irq_of_parse_and_map(np, 0);
+-		r[2].flags = IORESOURCE_IRQ;
+-
+-		pr_debug("registering platform device at 0x%lx/0x%lx, irq is %ld\n",
+-			 r[0].start, r[1].start, r[2].start);
+-		pdevs[i] = platform_device_register_simple("pata_platform", i, r, 3);
+-		if (IS_ERR(pdevs[i])) {
+-			ret = PTR_ERR(pdevs[i]);
+-			pdevs[i] = NULL;
+-			goto out;
+-		}
+-		np = of_find_compatible_node(np, "ide", "electra-ide");
+-	}
+-out:
+-	return ret;
+-}
+-module_init(electra_ide_init);
+-
+-static void __devexit electra_ide_exit(void)
+-{
+-	int i;
+-
+-	for (i = 0; i < MAX_IFS; i++)
+-		if (pdevs[i])
+-			platform_device_unregister(pdevs[i]);
+-}
+-module_exit(electra_ide_exit);
+-
+-
+-MODULE_LICENSE("GPL");
+-MODULE_AUTHOR ("Olof Johansson <olof at lixom.net>");
+-MODULE_DESCRIPTION("PA Semi Electra IDE driver");
+diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
+index dae9f65..b465429 100644
+--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
++++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
+@@ -30,7 +30,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
+-#include <asm/of_platform.h>
++#include <linux/of_platform.h>
+ 
+ #define DELAY 1
+ 
+@@ -218,45 +218,27 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev,
+ 				     const struct of_device_id *match)
+ {
+ 	struct device *dev = &ofdev->dev;
+-	struct device_node *np = ofdev->node;
+-	struct device_node *gpio_np;
++	struct device_node *phy_dn, *np = ofdev->node;
+ 	struct mii_bus *new_bus;
+-	struct resource res;
+ 	struct gpio_priv *priv;
+ 	const unsigned int *prop;
+-	int err = 0;
++	int err;
+ 	int i;
+ 
+-	gpio_np = of_find_compatible_node(NULL, "gpio", "1682m-gpio");
+-
+-	if (!gpio_np)
+-		return -ENODEV;
+-
+-	err = of_address_to_resource(gpio_np, 0, &res);
+-	of_node_put(gpio_np);
+-
+-	if (err)
+-		return -EINVAL;
+-
+-	if (!gpio_regs)
+-		gpio_regs = ioremap(res.start, 0x100);
+-
+-	if (!gpio_regs)
+-		return -EPERM;
+-
++	err = -ENOMEM;
+ 	priv = kzalloc(sizeof(struct gpio_priv), GFP_KERNEL);
+-	if (priv == NULL)
+-		return -ENOMEM;
++	if (!priv)
++		goto out;
+ 
+ 	new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
+ 
+-	if (new_bus == NULL)
+-		return -ENOMEM;
++	if (!new_bus)
++		goto out_free_priv;
+ 
+-	new_bus->name = "pasemi gpio mdio bus",
+-	new_bus->read = &gpio_mdio_read,
+-	new_bus->write = &gpio_mdio_write,
+-	new_bus->reset = &gpio_mdio_reset,
++	new_bus->name = "pasemi gpio mdio bus";
++	new_bus->read = &gpio_mdio_read;
++	new_bus->write = &gpio_mdio_write;
++	new_bus->reset = &gpio_mdio_reset;
+ 
+ 	prop = of_get_property(np, "reg", NULL);
+ 	new_bus->id = *prop;
+@@ -265,9 +247,24 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev,
+ 	new_bus->phy_mask = 0;
+ 
+ 	new_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+-	for(i = 0; i < PHY_MAX_ADDR; ++i)
+-		new_bus->irq[i] = irq_create_mapping(NULL, 10);
+ 
++	if (!new_bus->irq)
++		goto out_free_bus;
++
++	for (i = 0; i < PHY_MAX_ADDR; i++)
++		new_bus->irq[i] = NO_IRQ;
++
++	for (phy_dn = of_get_next_child(np, NULL);
++	     phy_dn != NULL;
++	     phy_dn = of_get_next_child(np, phy_dn)) {
++		const unsigned int *ip, *regp;
++
++		ip = of_get_property(phy_dn, "interrupts", NULL);
++		regp = of_get_property(phy_dn, "reg", NULL);
++		if (!ip || !regp || *regp >= PHY_MAX_ADDR)
++			continue;
++		new_bus->irq[*regp] = irq_create_mapping(NULL, *ip);
++	}
+ 
+ 	prop = of_get_property(np, "mdc-pin", NULL);
+ 	priv->mdc_pin = *prop;
+@@ -280,17 +277,21 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev,
+ 
+ 	err = mdiobus_register(new_bus);
+ 
+-	if (0 != err) {
++	if (err != 0) {
+ 		printk(KERN_ERR "%s: Cannot register as MDIO bus, err %d\n",
+ 				new_bus->name, err);
+-		goto bus_register_fail;
++		goto out_free_irq;
+ 	}
+ 
+ 	return 0;
+ 
+-bus_register_fail:
++out_free_irq:
++	kfree(new_bus->irq);
++out_free_bus:
+ 	kfree(new_bus);
+-
++out_free_priv:
++	kfree(priv);
++out:
+ 	return err;
+ }
+ 
+@@ -317,6 +318,7 @@ static struct of_device_id gpio_mdio_match[] =
+ 	},
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, gpio_mdio_match);
+ 
+ static struct of_platform_driver gpio_mdio_driver =
+ {
+@@ -330,12 +332,32 @@ static struct of_platform_driver gpio_mdio_driver =
+ 
+ int gpio_mdio_init(void)
+ {
++	struct device_node *np;
++
++	np = of_find_compatible_node(NULL, NULL, "1682m-gpio");
++	if (!np)
++		np = of_find_compatible_node(NULL, NULL,
++					     "pasemi,pwrficient-gpio");
++	if (!np)
++		return -ENODEV;
++	gpio_regs = of_iomap(np, 0);
++	of_node_put(np);
++
++	if (!gpio_regs)
++		return -ENODEV;
++
+ 	return of_register_platform_driver(&gpio_mdio_driver);
+ }
++module_init(gpio_mdio_init);
+ 
+ void gpio_mdio_exit(void)
+ {
+ 	of_unregister_platform_driver(&gpio_mdio_driver);
++	if (gpio_regs)
++		iounmap(gpio_regs);
+ }
+-device_initcall(gpio_mdio_init);
++module_exit(gpio_mdio_exit);
+ 
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Olof Johansson <olof at lixom.net>");
++MODULE_DESCRIPTION("Driver for MDIO over GPIO on PA Semi PWRficient-based boards");
+diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
+index d8e1fcc..43911d8 100644
+--- a/arch/powerpc/platforms/pasemi/idle.c
++++ b/arch/powerpc/platforms/pasemi/idle.c
+@@ -74,9 +74,6 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
+ 
+ static int __init pasemi_idle_init(void)
+ {
+-	if (!machine_is(pasemi))
+-		return -ENODEV;
+-
+ #ifndef CONFIG_PPC_PASEMI_CPUFREQ
+ 	printk(KERN_WARNING "No cpufreq driver, powersavings modes disabled\n");
+ 	current_mode = 0;
+@@ -88,7 +85,7 @@ static int __init pasemi_idle_init(void)
+ 
+ 	return 0;
+ }
+-late_initcall(pasemi_idle_init);
++machine_late_initcall(pasemi, pasemi_idle_init);
+ 
+ static int __init idle_param(char *p)
+ {
+diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
+index 516acab..b1e524f 100644
+--- a/arch/powerpc/platforms/pasemi/pasemi.h
++++ b/arch/powerpc/platforms/pasemi/pasemi.h
+@@ -9,6 +9,7 @@ extern void __devinit pas_pci_dma_dev_setup(struct pci_dev *dev);
+ extern void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
+ 
+ extern void __init alloc_iobmap_l2(void);
++extern void __init pasemi_map_registers(void);
+ 
+ /* Power savings modes, implemented in asm */
+ extern void idle_spin(void);
+@@ -16,8 +17,14 @@ extern void idle_doze(void);
+ 
+ /* Restore astate to last set */
+ #ifdef CONFIG_PPC_PASEMI_CPUFREQ
++extern int check_astate(void);
+ extern void restore_astate(int cpu);
+ #else
++static inline int check_astate(void)
++{
++	/* Always return >0 so we never power save */
++	return 1;
++}
+ static inline void restore_astate(int cpu)
+ {
+ }
+diff --git a/arch/powerpc/platforms/pasemi/powersave.S b/arch/powerpc/platforms/pasemi/powersave.S
+index 6d0fba6..56f45ad 100644
+--- a/arch/powerpc/platforms/pasemi/powersave.S
++++ b/arch/powerpc/platforms/pasemi/powersave.S
+@@ -62,7 +62,16 @@ sleep_common:
+ 	mflr	r0
+ 	std	r0, 16(r1)
+ 	stdu	r1,-64(r1)
++#ifdef CONFIG_PPC_PASEMI_CPUFREQ
++	std	r3, 48(r1)
+ 
++	/* Only do power savings when in astate 0 */
++	bl	.check_astate
++	cmpwi	r3,0
++	bne	1f
++
++	ld	r3, 48(r1)
++#endif
+ 	LOAD_REG_IMMEDIATE(r6,MSR_DR|MSR_IR|MSR_ME|MSR_EE)
+ 	mfmsr	r4
+ 	andc	r5,r4,r6
+@@ -73,7 +82,7 @@ sleep_common:
+ 
+ 	mtmsrd	r4,0
+ 
+-	addi	r1,r1,64
++1:	addi	r1,r1,64
+ 	ld	r0,16(r1)
+ 	mtlr	r0
+ 	blr
+diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
+index 3d62060..c64fb5b 100644
+--- a/arch/powerpc/platforms/pasemi/setup.c
++++ b/arch/powerpc/platforms/pasemi/setup.c
+@@ -27,6 +27,7 @@
+ #include <linux/delay.h>
+ #include <linux/console.h>
+ #include <linux/pci.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/prom.h>
+ #include <asm/system.h>
+@@ -35,7 +36,7 @@
+ #include <asm/mpic.h>
+ #include <asm/smp.h>
+ #include <asm/time.h>
+-#include <asm/of_platform.h>
++#include <asm/mmu.h>
+ 
+ #include <pcmcia/ss.h>
+ #include <pcmcia/cistpl.h>
+@@ -43,6 +44,10 @@
+ 
+ #include "pasemi.h"
+ 
++#if !defined(CONFIG_SMP)
++static void smp_send_stop(void) {}
++#endif
++
+ /* SDC reset register, must be pre-mapped at reset time */
+ static void __iomem *reset_reg;
+ 
+@@ -56,10 +61,14 @@ struct mce_regs {
+ 
+ static struct mce_regs mce_regs[MAX_MCE_REGS];
+ static int num_mce_regs;
++static int nmi_virq = NO_IRQ;
+ 
+ 
+ static void pas_restart(char *cmd)
+ {
++	/* Need to put others cpu in hold loop so they're not sleeping */
++	smp_send_stop();
++	udelay(10000);
+ 	printk("Restarting...\n");
+ 	while (1)
+ 		out_le32(reset_reg, 0x6000000);
+@@ -126,9 +135,6 @@ static int __init pas_setup_mce_regs(void)
+ 	struct pci_dev *dev;
+ 	int reg;
+ 
+-	if (!machine_is(pasemi))
+-		return -ENODEV;
+-
+ 	/* Remap various SoC status registers for use by the MCE handler */
+ 
+ 	reg = 0;
+@@ -172,7 +178,7 @@ static int __init pas_setup_mce_regs(void)
+ 
+ 	return 0;
+ }
+-device_initcall(pas_setup_mce_regs);
++machine_device_initcall(pasemi, pas_setup_mce_regs);
+ 
+ static __init void pas_init_IRQ(void)
+ {
+@@ -181,6 +187,8 @@ static __init void pas_init_IRQ(void)
+ 	unsigned long openpic_addr;
+ 	const unsigned int *opprop;
+ 	int naddr, opplen;
++	int mpic_flags;
++	const unsigned int *nmiprop;
+ 	struct mpic *mpic;
+ 
+ 	mpic_node = NULL;
+@@ -213,13 +221,26 @@ static __init void pas_init_IRQ(void)
+ 	openpic_addr = of_read_number(opprop, naddr);
+ 	printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
+ 
++	mpic_flags = MPIC_PRIMARY | MPIC_LARGE_VECTORS | MPIC_NO_BIAS;
++
++	nmiprop = of_get_property(mpic_node, "nmi-source", NULL);
++	if (nmiprop)
++		mpic_flags |= MPIC_ENABLE_MCK;
++
+ 	mpic = mpic_alloc(mpic_node, openpic_addr,
+-			  MPIC_PRIMARY|MPIC_LARGE_VECTORS,
+-			  0, 0, " PAS-OPIC  ");
++			  mpic_flags, 0, 0, "PASEMI-OPIC");
+ 	BUG_ON(!mpic);
+ 
+ 	mpic_assign_isu(mpic, 0, openpic_addr + 0x10000);
+ 	mpic_init(mpic);
++	/* The NMI/MCK source needs to be prio 15 */
++	if (nmiprop) {
++		nmi_virq = irq_create_mapping(NULL, *nmiprop);
++		mpic_irq_set_priority(nmi_virq, 15);
++		set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING);
++		mpic_unmask_irq(nmi_virq);
++	}
++
+ 	of_node_put(mpic_node);
+ 	of_node_put(root);
+ }
+@@ -239,6 +260,14 @@ static int pas_machine_check_handler(struct pt_regs *regs)
+ 
+ 	srr0 = regs->nip;
+ 	srr1 = regs->msr;
++
++	if (nmi_virq != NO_IRQ && mpic_get_mcirq() == nmi_virq) {
++		printk(KERN_ERR "NMI delivered\n");
++		debugger(regs);
++		mpic_end_irq(nmi_virq);
++		goto out;
++	}
++
+ 	dsisr = mfspr(SPRN_DSISR);
+ 	printk(KERN_ERR "Machine Check on CPU %d\n", cpu);
+ 	printk(KERN_ERR "SRR0  0x%016lx SRR1 0x%016lx\n", srr0, srr1);
+@@ -295,14 +324,14 @@ static int pas_machine_check_handler(struct pt_regs *regs)
+ 		int i;
+ 
+ 		printk(KERN_ERR "slb contents:\n");
+-		for (i = 0; i < SLB_NUM_ENTRIES; i++) {
++		for (i = 0; i < mmu_slb_size; i++) {
+ 			asm volatile("slbmfee  %0,%1" : "=r" (e) : "r" (i));
+ 			asm volatile("slbmfev  %0,%1" : "=r" (v) : "r" (i));
+ 			printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
+ 		}
+ 	}
+ 
+-
++out:
+ 	/* SRR1[62] is from MSR[62] if recoverable, so pass that back */
+ 	return !!(srr1 & 0x2);
+ }
+@@ -362,16 +391,17 @@ static inline void pasemi_pcmcia_init(void)
+ 
+ 
+ static struct of_device_id pasemi_bus_ids[] = {
++	/* Unfortunately needed for legacy firmwares */
+ 	{ .type = "localbus", },
+ 	{ .type = "sdc", },
++	/* These are the proper entries, which newer firmware uses */
++	{ .compatible = "pasemi,localbus", },
++	{ .compatible = "pasemi,sdc", },
+ 	{},
+ };
+ 
+ static int __init pasemi_publish_devices(void)
+ {
+-	if (!machine_is(pasemi))
+-		return 0;
+-
+ 	pasemi_pcmcia_init();
+ 
+ 	/* Publish OF platform devices for SDC and other non-PCI devices */
+@@ -379,7 +409,7 @@ static int __init pasemi_publish_devices(void)
+ 
+ 	return 0;
+ }
+-device_initcall(pasemi_publish_devices);
++machine_device_initcall(pasemi, pasemi_publish_devices);
+ 
+ 
+ /*
+@@ -389,7 +419,8 @@ static int __init pas_probe(void)
+ {
+ 	unsigned long root = of_get_flat_dt_root();
+ 
+-	if (!of_flat_dt_is_compatible(root, "PA6T-1682M"))
++	if (!of_flat_dt_is_compatible(root, "PA6T-1682M") &&
++	    !of_flat_dt_is_compatible(root, "pasemi,pwrficient"))
+ 		return 0;
+ 
+ 	hpte_init_native();
+@@ -400,7 +431,7 @@ static int __init pas_probe(void)
+ }
+ 
+ define_machine(pasemi) {
+-	.name			= "PA Semi PA6T-1682M",
++	.name			= "PA Semi PWRficient",
+ 	.probe			= pas_probe,
+ 	.setup_arch		= pas_setup_arch,
+ 	.init_early		= pas_init_early,
+diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
+index da2007e..21226b7 100644
+--- a/arch/powerpc/platforms/powermac/low_i2c.c
++++ b/arch/powerpc/platforms/powermac/low_i2c.c
+@@ -585,8 +585,7 @@ static void __init kw_i2c_probe(void)
+ 	struct device_node *np, *child, *parent;
+ 
+ 	/* Probe keywest-i2c busses */
+-	for (np = NULL;
+-	     (np = of_find_compatible_node(np, "i2c","keywest-i2c")) != NULL;){
++	for_each_compatible_node(np, "i2c","keywest-i2c") {
+ 		struct pmac_i2c_host_kw *host;
+ 		int multibus, chans, i;
+ 
+@@ -1462,9 +1461,6 @@ int __init pmac_i2c_init(void)
+ 		return 0;
+ 	i2c_inited = 1;
+ 
+-	if (!machine_is(powermac))
+-		return 0;
+-
+ 	/* Probe keywest-i2c busses */
+ 	kw_i2c_probe();
+ 
+@@ -1483,7 +1479,7 @@ int __init pmac_i2c_init(void)
+ 
+ 	return 0;
+ }
+-arch_initcall(pmac_i2c_init);
++machine_arch_initcall(powermac, pmac_i2c_init);
+ 
+ /* Since pmac_i2c_init can be called too early for the platform device
+  * registration, we need to do it at a later time. In our case, subsys
+@@ -1515,4 +1511,4 @@ static int __init pmac_i2c_create_platform_devices(void)
+ 
+ 	return 0;
+ }
+-subsys_initcall(pmac_i2c_create_platform_devices);
++machine_subsys_initcall(powermac, pmac_i2c_create_platform_devices);
+diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
+index f852ae3..1c58db9 100644
+--- a/arch/powerpc/platforms/powermac/pci.c
++++ b/arch/powerpc/platforms/powermac/pci.c
+@@ -40,8 +40,6 @@
+ static int has_uninorth;
+ #ifdef CONFIG_PPC64
+ static struct pci_controller *u3_agp;
+-static struct pci_controller *u4_pcie;
+-static struct pci_controller *u3_ht;
+ #else
+ static int has_second_ohare;
+ #endif /* CONFIG_PPC64 */
+@@ -314,12 +312,15 @@ static int u3_ht_skip_device(struct pci_controller *hose,
+ 
+ 	/* We only allow config cycles to devices that are in OF device-tree
+ 	 * as we are apparently having some weird things going on with some
+-	 * revs of K2 on recent G5s
++	 * revs of K2 on recent G5s, except for the host bridge itself, which
++	 * is missing from the tree but we know we can probe.
+ 	 */
+ 	if (bus->self)
+ 		busdn = pci_device_to_OF_node(bus->self);
++	else if (devfn == 0)
++		return 0;
+ 	else
+-		busdn = hose->arch_data;
++		busdn = hose->dn;
+ 	for (dn = busdn->child; dn; dn = dn->sibling)
+ 		if (PCI_DN(dn) && PCI_DN(dn)->devfn == devfn)
+ 			break;
+@@ -344,14 +345,15 @@ static int u3_ht_skip_device(struct pci_controller *hose,
+ 		+ (((unsigned int)bus) << 16) \
+ 		+ 0x01000000UL)
+ 
+-static volatile void __iomem *u3_ht_cfg_access(struct pci_controller* hose,
+-					     u8 bus, u8 devfn, u8 offset)
++static void __iomem *u3_ht_cfg_access(struct pci_controller *hose, u8 bus,
++				      u8 devfn, u8 offset, int *swap)
+ {
++	*swap = 1;
+ 	if (bus == hose->first_busno) {
+-		/* For now, we don't self probe U3 HT bridge */
+-		if (PCI_SLOT(devfn) == 0)
+-			return NULL;
+-		return hose->cfg_data + U3_HT_CFA0(devfn, offset);
++		if (devfn != 0)
++			return hose->cfg_data + U3_HT_CFA0(devfn, offset);
++		*swap = 0;
++		return ((void __iomem *)hose->cfg_addr) + (offset << 2);
+ 	} else
+ 		return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
+ }
+@@ -360,14 +362,15 @@ static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
+ 				    int offset, int len, u32 *val)
+ {
+ 	struct pci_controller *hose;
+-	volatile void __iomem *addr;
++	void __iomem *addr;
++	int swap;
+ 
+ 	hose = pci_bus_to_host(bus);
+ 	if (hose == NULL)
+ 		return PCIBIOS_DEVICE_NOT_FOUND;
+ 	if (offset >= 0x100)
+ 		return  PCIBIOS_BAD_REGISTER_NUMBER;
+-	addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
++	addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap);
+ 	if (!addr)
+ 		return PCIBIOS_DEVICE_NOT_FOUND;
+ 
+@@ -397,10 +400,10 @@ static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
+ 		*val = in_8(addr);
+ 		break;
+ 	case 2:
+-		*val = in_le16(addr);
++		*val = swap ? in_le16(addr) : in_be16(addr);
+ 		break;
+ 	default:
+-		*val = in_le32(addr);
++		*val = swap ? in_le32(addr) : in_be32(addr);
+ 		break;
+ 	}
+ 	return PCIBIOS_SUCCESSFUL;
+@@ -410,14 +413,15 @@ static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
+ 				     int offset, int len, u32 val)
+ {
+ 	struct pci_controller *hose;
+-	volatile void __iomem *addr;
++	void __iomem *addr;
++	int swap;
+ 
+ 	hose = pci_bus_to_host(bus);
+ 	if (hose == NULL)
+ 		return PCIBIOS_DEVICE_NOT_FOUND;
+ 	if (offset >= 0x100)
+ 		return  PCIBIOS_BAD_REGISTER_NUMBER;
+-	addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
++	addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap);
+ 	if (!addr)
+ 		return PCIBIOS_DEVICE_NOT_FOUND;
+ 
+@@ -439,10 +443,10 @@ static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
+ 		out_8(addr, val);
+ 		break;
+ 	case 2:
+-		out_le16(addr, val);
++		swap ? out_le16(addr, val) : out_be16(addr, val);
+ 		break;
+ 	default:
+-		out_le32((u32 __iomem *)addr, val);
++		swap ? out_le32(addr, val) : out_be32(addr, val);
+ 		break;
+ 	}
+ 	return PCIBIOS_SUCCESSFUL;
+@@ -725,7 +729,7 @@ static void __init setup_bandit(struct pci_controller *hose,
+ static int __init setup_uninorth(struct pci_controller *hose,
+ 				 struct resource *addr)
+ {
+-	pci_assign_all_buses = 1;
++	ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ 	has_uninorth = 1;
+ 	hose->ops = &macrisc_pci_ops;
+ 	hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
+@@ -773,31 +777,72 @@ static void __init setup_u4_pcie(struct pci_controller* hose)
+ 	 */
+ 	hose->first_busno = 0x00;
+ 	hose->last_busno = 0xff;
+-	u4_pcie = hose;
+ }
+ 
+-static void __init setup_u3_ht(struct pci_controller* hose)
++static void __init parse_region_decode(struct pci_controller *hose,
++				       u32 decode)
+ {
+-	struct device_node *np = (struct device_node *)hose->arch_data;
+-	struct pci_controller *other = NULL;
+-	int i, cur;
++	unsigned long base, end, next = -1;
++	int i, cur = -1;
+ 
++	/* Iterate through all bits. We ignore the last bit as this region is
++	 * reserved for the ROM among other niceties
++	 */
++	for (i = 0; i < 31; i++) {
++		if ((decode & (0x80000000 >> i)) == 0)
++			continue;
++		if (i < 16) {
++			base = 0xf0000000 | (((u32)i) << 24);
++			end = base + 0x00ffffff;
++		} else {
++			base = ((u32)i-16) << 28;
++			end = base + 0x0fffffff;
++		}
++		if (base != next) {
++			if (++cur >= 3) {
++				printk(KERN_WARNING "PCI: Too many ranges !\n");
++				break;
++			}
++			hose->mem_resources[cur].flags = IORESOURCE_MEM;
++			hose->mem_resources[cur].name = hose->dn->full_name;
++			hose->mem_resources[cur].start = base;
++			hose->mem_resources[cur].end = end;
++			DBG("  %d: 0x%08lx-0x%08lx\n", cur, base, end);
++		} else {
++			DBG("   :           -0x%08lx\n", end);
++			hose->mem_resources[cur].end = end;
++		}
++		next = end + 1;
++	}
++}
++
++static void __init setup_u3_ht(struct pci_controller* hose)
++{
++	struct device_node *np = hose->dn;
++	struct resource cfg_res, self_res;
++	u32 decode;
+ 
+ 	hose->ops = &u3_ht_pci_ops;
+ 
+-	/* We hard code the address because of the different size of
+-	 * the reg address cell, we shall fix that by killing struct
+-	 * reg_property and using some accessor functions instead
++	/* Get base addresses from OF tree
+ 	 */
+-	hose->cfg_data = ioremap(0xf2000000, 0x02000000);
++	if (of_address_to_resource(np, 0, &cfg_res) ||
++	    of_address_to_resource(np, 1, &self_res)) {
++		printk(KERN_ERR "PCI: Failed to get U3/U4 HT resources !\n");
++		return;
++	}
++
++	/* Map external cfg space access into cfg_data and self registers
++	 * into cfg_addr
++	 */
++	hose->cfg_data = ioremap(cfg_res.start, 0x02000000);
++	hose->cfg_addr = ioremap(self_res.start,
++				 self_res.end - self_res.start + 1);
+ 
+ 	/*
+-	 * /ht node doesn't expose a "ranges" property, so we "remove"
+-	 * regions that have been allocated to AGP. So far, this version of
+-	 * the code doesn't assign any of the 0xfxxxxxxx "fine" memory regions
+-	 * to /ht. We need to fix that sooner or later by either parsing all
+-	 * child "ranges" properties or figuring out the U3 address space
+-	 * decoding logic and then read its configuration register (if any).
++	 * /ht node doesn't expose a "ranges" property, we read the register
++	 * that controls the decoding logic and use that for memory regions.
++	 * The IO region is hard coded since it is fixed in HW as well.
+ 	 */
+ 	hose->io_base_phys = 0xf4000000;
+ 	hose->pci_io_size = 0x00400000;
+@@ -808,76 +853,33 @@ static void __init setup_u3_ht(struct pci_controller* hose)
+ 	hose->pci_mem_offset = 0;
+ 	hose->first_busno = 0;
+ 	hose->last_busno = 0xef;
+-	hose->mem_resources[0].name = np->full_name;
+-	hose->mem_resources[0].start = 0x80000000;
+-	hose->mem_resources[0].end = 0xefffffff;
+-	hose->mem_resources[0].flags = IORESOURCE_MEM;
+-
+-	u3_ht = hose;
+ 
+-	if (u3_agp != NULL)
+-		other = u3_agp;
+-	else if (u4_pcie != NULL)
+-		other = u4_pcie;
+-
+-	if (other == NULL) {
+-		DBG("U3/4 has no AGP/PCIE, using full resource range\n");
+-		return;
+-	}
++	/* Note: fix offset when cfg_addr becomes a void * */
++	decode = in_be32(hose->cfg_addr + 0x80);
+ 
+-	/* Fixup bus range vs. PCIE */
+-	if (u4_pcie)
+-		hose->last_busno = u4_pcie->first_busno - 1;
++	DBG("PCI: Apple HT bridge decode register: 0x%08x\n", decode);
+ 
+-	/* We "remove" the AGP resources from the resources allocated to HT,
+-	 * that is we create "holes". However, that code does assumptions
+-	 * that so far happen to be true (cross fingers...), typically that
+-	 * resources in the AGP node are properly ordered
++	/* NOTE: The decode register setup is a bit weird... region
++	 * 0xf8000000 for example is marked as enabled in there while it's
++	 & actually the memory controller registers.
++	 * That means that we are incorrectly attributing it to HT.
++	 *
++	 * In a similar vein, region 0xf4000000 is actually the HT IO space but
++	 * also marked as enabled in here and 0xf9000000 is used by some other
++	 * internal bits of the northbridge.
++	 *
++	 * Unfortunately, we can't just mask out those bit as we would end
++	 * up with more regions than we can cope (linux can only cope with
++	 * 3 memory regions for a PHB at this stage).
++	 *
++	 * So for now, we just do a little hack. We happen to -know- that
++	 * Apple firmware doesn't assign things below 0xfa000000 for that
++	 * bridge anyway so we mask out all bits we don't want.
+ 	 */
+-	cur = 0;
+-	for (i=0; i<3; i++) {
+-		struct resource *res = &other->mem_resources[i];
+-		if (res->flags != IORESOURCE_MEM)
+-			continue;
+-		/* We don't care about "fine" resources */
+-		if (res->start >= 0xf0000000)
+-			continue;
+-		/* Check if it's just a matter of "shrinking" us in one
+-		 * direction
+-		 */
+-		if (hose->mem_resources[cur].start == res->start) {
+-			DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
+-			    cur, hose->mem_resources[cur].start,
+-			    res->end + 1);
+-			hose->mem_resources[cur].start = res->end + 1;
+-			continue;
+-		}
+-		if (hose->mem_resources[cur].end == res->end) {
+-			DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
+-			    cur, hose->mem_resources[cur].end,
+-			    res->start - 1);
+-			hose->mem_resources[cur].end = res->start - 1;
+-			continue;
+-		}
+-		/* No, it's not the case, we need a hole */
+-		if (cur == 2) {
+-			/* not enough resources for a hole, we drop part
+-			 * of the range
+-			 */
+-			printk(KERN_WARNING "Running out of resources"
+-			       " for /ht host !\n");
+-			hose->mem_resources[cur].end = res->start - 1;
+-			continue;
+-		}
+-		cur++;
+-		DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
+-		    cur-1, res->start - 1, cur, res->end + 1);
+-		hose->mem_resources[cur].name = np->full_name;
+-		hose->mem_resources[cur].flags = IORESOURCE_MEM;
+-		hose->mem_resources[cur].start = res->end + 1;
+-		hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
+-		hose->mem_resources[cur-1].end = res->start - 1;
+-	}
++	decode &= 0x003fffff;
++
++	/* Now parse the resulting bits and build resources */
++	parse_region_decode(hose, decode);
+ }
+ #endif /* CONFIG_PPC64 */
+ 
+@@ -994,6 +996,8 @@ void __init pmac_pci_init(void)
+ 	struct device_node *np, *root;
+ 	struct device_node *ht = NULL;
+ 
++	ppc_pci_flags = PPC_PCI_CAN_SKIP_ISA_ALIGN;
++
+ 	root = of_find_node_by_path("/");
+ 	if (root == NULL) {
+ 		printk(KERN_CRIT "pmac_pci_init: can't find root "
+@@ -1032,15 +1036,15 @@ void __init pmac_pci_init(void)
+ 	 * future though
+ 	 */
+ 	if (u3_agp) {
+-		struct device_node *np = u3_agp->arch_data;
++		struct device_node *np = u3_agp->dn;
+ 		PCI_DN(np)->busno = 0xf0;
+ 		for (np = np->child; np; np = np->sibling)
+ 			PCI_DN(np)->busno = 0xf0;
+ 	}
+ 	/* pmac_check_ht_link(); */
+ 
+-	/* Tell pci.c to not use the common resource allocation mechanism */
+-	pci_probe_only = 1;
++	/* We can allocate missing resources if any */
++	pci_probe_only = 0;
+ 
+ #else /* CONFIG_PPC64 */
+ 	init_p2pbridge();
+@@ -1051,13 +1055,13 @@ void __init pmac_pci_init(void)
+ 	 * some offset between bus number and domains for now when we
+ 	 * assign all busses should help for now
+ 	 */
+-	if (pci_assign_all_buses)
++	if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS)
+ 		pcibios_assign_bus_offset = 0x10;
+ #endif
+ }
+ 
+-int
+-pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
++#ifdef CONFIG_PPC32
++int pmac_pci_enable_device_hook(struct pci_dev *dev)
+ {
+ 	struct device_node* node;
+ 	int updatecfg = 0;
+@@ -1099,24 +1103,21 @@ pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
+ 		updatecfg = 1;
+ 	}
+ 
++	/*
++	 * Fixup various header fields on 32 bits. We don't do that on
++	 * 64 bits as some of these have strange values behind the HT
++	 * bridge and we must not, for example, enable MWI or set the
++	 * cache line size on them.
++	 */
+ 	if (updatecfg) {
+ 		u16 cmd;
+ 
+-		/*
+-		 * Make sure PCI is correctly configured
+-		 *
+-		 * We use old pci_bios versions of the function since, by
+-		 * default, gmac is not powered up, and so will be absent
+-		 * from the kernel initial PCI lookup.
+-		 *
+-		 * Should be replaced by 2.4 new PCI mechanisms and really
+-		 * register the device.
+-		 */
+ 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ 		cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
+ 			| PCI_COMMAND_INVALIDATE;
+ 		pci_write_config_word(dev, PCI_COMMAND, cmd);
+ 		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
++
+ 		pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
+ 				      L1_CACHE_BYTES >> 2);
+ 	}
+@@ -1124,6 +1125,18 @@ pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
+ 	return 0;
+ }
+ 
++void __devinit pmac_pci_fixup_ohci(struct pci_dev *dev)
++{
++	struct device_node *node = pci_device_to_OF_node(dev);
++
++	/* We don't want to assign resources to USB controllers
++	 * absent from the OF tree (iBook second controller)
++	 */
++	if (dev->class == PCI_CLASS_SERIAL_USB_OHCI && !node)
++		dev->resource[0].flags = 0;
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_ANY_ID, pmac_pci_fixup_ohci);
++
+ /* We power down some devices after they have been probed. They'll
+  * be powered back on later on
+  */
+@@ -1171,7 +1184,6 @@ void __init pmac_pcibios_after_init(void)
+ 	of_node_put(nd);
+ }
+ 
+-#ifdef CONFIG_PPC32
+ void pmac_pci_fixup_cardbus(struct pci_dev* dev)
+ {
+ 	if (!machine_is(powermac))
+@@ -1259,7 +1271,7 @@ void pmac_pci_fixup_pciata(struct pci_dev* dev)
+ 	}
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
+-#endif
++#endif /* CONFIG_PPC32 */
+ 
+ /*
+  * Disable second function on K2-SATA, it's broken
+diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
+index 45d54b9..db20de5 100644
+--- a/arch/powerpc/platforms/powermac/pfunc_base.c
++++ b/arch/powerpc/platforms/powermac/pfunc_base.c
+@@ -363,8 +363,7 @@ int __init pmac_pfunc_base_install(void)
+ 
+ 	return 0;
+ }
+-
+-arch_initcall(pmac_pfunc_base_install);
++machine_arch_initcall(powermac, pmac_pfunc_base_install);
+ 
+ #ifdef CONFIG_PM
+ 
+diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
+index 999f5e1..4073640 100644
+--- a/arch/powerpc/platforms/powermac/pic.c
++++ b/arch/powerpc/platforms/powermac/pic.c
+@@ -663,7 +663,7 @@ static int pmacpic_resume(struct sys_device *sysdev)
+ #endif /* CONFIG_PM && CONFIG_PPC32 */
+ 
+ static struct sysdev_class pmacpic_sysclass = {
+-	set_kset_name("pmac_pic"),
++	.name = "pmac_pic",
+ };
+ 
+ static struct sys_device device_pmacpic = {
+@@ -690,6 +690,5 @@ static int __init init_pmacpic_sysfs(void)
+ 	sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
+ 	return 0;
+ }
+-
+-subsys_initcall(init_pmacpic_sysfs);
++machine_subsys_initcall(powermac, init_pmacpic_sysfs);
+ 
+diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
+index fcde070..b3abaaf 100644
+--- a/arch/powerpc/platforms/powermac/pmac.h
++++ b/arch/powerpc/platforms/powermac/pmac.h
+@@ -26,7 +26,7 @@ extern void pmac_pci_init(void);
+ extern void pmac_nvram_update(void);
+ extern unsigned char pmac_nvram_read_byte(int addr);
+ extern void pmac_nvram_write_byte(int addr, unsigned char val);
+-extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial);
++extern int pmac_pci_enable_device_hook(struct pci_dev *dev);
+ extern void pmac_pcibios_after_init(void);
+ extern int of_show_percpuinfo(struct seq_file *m, int i);
+ 
+diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
+index 02c5330..36ff1b6 100644
+--- a/arch/powerpc/platforms/powermac/setup.c
++++ b/arch/powerpc/platforms/powermac/setup.c
+@@ -51,6 +51,8 @@
+ #include <linux/root_dev.h>
+ #include <linux/bitops.h>
+ #include <linux/suspend.h>
++#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/reg.h>
+ #include <asm/sections.h>
+@@ -68,8 +70,6 @@
+ #include <asm/btext.h>
+ #include <asm/pmac_feature.h>
+ #include <asm/time.h>
+-#include <asm/of_device.h>
+-#include <asm/of_platform.h>
+ #include <asm/mmu_context.h>
+ #include <asm/iommu.h>
+ #include <asm/smu.h>
+@@ -94,7 +94,6 @@ extern struct machdep_calls pmac_md;
+ #define DEFAULT_ROOT_DEVICE Root_SDA1	/* sda1 - slightly silly choice */
+ 
+ #ifdef CONFIG_PPC64
+-#include <asm/udbg.h>
+ int sccdbg;
+ #endif
+ 
+@@ -398,17 +397,13 @@ static int initializing = 1;
+ 
+ static int pmac_late_init(void)
+ {
+-	if (!machine_is(powermac))
+-		return -ENODEV;
+-
+ 	initializing = 0;
+ 	/* this is udbg (which is __init) and we can later use it during
+ 	 * cpu hotplug (in smp_core99_kick_cpu) */
+ 	ppc_md.progress = NULL;
+ 	return 0;
+ }
+-
+-late_initcall(pmac_late_init);
++machine_late_initcall(powermac, pmac_late_init);
+ 
+ /*
+  * This is __init_refok because we check for "initializing" before
+@@ -535,9 +530,6 @@ static int __init pmac_declare_of_platform_devices(void)
+ 	if (machine_is(chrp))
+ 		return -1;
+ 
+-	if (!machine_is(powermac))
+-		return 0;
+-
+ 	np = of_find_node_by_name(NULL, "valkyrie");
+ 	if (np)
+ 		of_platform_device_create(np, "valkyrie", NULL);
+@@ -552,8 +544,7 @@ static int __init pmac_declare_of_platform_devices(void)
+ 
+ 	return 0;
+ }
+-
+-device_initcall(pmac_declare_of_platform_devices);
++machine_device_initcall(powermac, pmac_declare_of_platform_devices);
+ 
+ /*
+  * Called very early, MMU is off, device-tree isn't unflattened
+@@ -613,9 +604,11 @@ static int pmac_pci_probe_mode(struct pci_bus *bus)
+ 
+ 	/* We need to use normal PCI probing for the AGP bus,
+ 	 * since the device for the AGP bridge isn't in the tree.
++	 * Same for the PCIe host on U4 and the HT host bridge.
+ 	 */
+ 	if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
+-				  of_device_is_compatible(node, "u4-pcie")))
++				  of_device_is_compatible(node, "u4-pcie") ||
++				  of_device_is_compatible(node, "u3-ht")))
+ 		return PCI_PROBE_NORMAL;
+ 	return PCI_PROBE_DEVTREE;
+ }
+diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
+index bf9da56..bbbefd6 100644
+--- a/arch/powerpc/platforms/powermac/time.c
++++ b/arch/powerpc/platforms/powermac/time.c
+@@ -84,12 +84,14 @@ long __init pmac_time_init(void)
+ 	return delta;
+ }
+ 
++#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
+ static void to_rtc_time(unsigned long now, struct rtc_time *tm)
+ {
+ 	to_tm(now, tm);
+ 	tm->tm_year -= 1900;
+ 	tm->tm_mon -= 1;
+ }
++#endif
+ 
+ static unsigned long from_rtc_time(struct rtc_time *tm)
+ {
+diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
+index 298f1c9..a5f4e95 100644
+--- a/arch/powerpc/platforms/ps3/Kconfig
++++ b/arch/powerpc/platforms/ps3/Kconfig
+@@ -61,17 +61,6 @@ config PS3_DYNAMIC_DMA
+ 	  This support is mainly for Linux kernel development.  If unsure,
+ 	  say N.
+ 
+-config PS3_USE_LPAR_ADDR
+-	depends on PPC_PS3 && EXPERIMENTAL
+-	bool "PS3 use lpar address space"
+-	default y
+-	help
+-	  This option is solely for experimentation by experts.  Disables
+-	  translation of lpar addresses.  SPE support currently won't work
+-	  without this set to y.
+-
+-	  If you have any doubt, choose the default y.
+-
+ config PS3_VUART
+ 	depends on PPC_PS3
+ 	tristate
+@@ -138,4 +127,17 @@ config PS3_FLASH
+ 	  be disabled on the kernel command line using "ps3flash=off", to
+ 	  not allocate this fixed buffer.
+ 
++config PS3_LPM
++	tristate "PS3 Logical Performance Monitor support"
++	depends on PPC_PS3
++	help
++	  Include support for the PS3 Logical Performance Monitor.
++
++	  This support is required to use the logical performance monitor
++	  of the PS3's LV1 hypervisor.
++
++	  If you intend to use the advanced performance monitoring and
++	  profiling support of the Cell processor with programs like
++	  oprofile and perfmon2, then say Y or M, otherwise say N.
++
+ endmenu
+diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
+index fd063fe..9d251d0 100644
+--- a/arch/powerpc/platforms/ps3/device-init.c
++++ b/arch/powerpc/platforms/ps3/device-init.c
+@@ -23,6 +23,7 @@
+ #include <linux/kernel.h>
+ #include <linux/kthread.h>
+ #include <linux/init.h>
++#include <linux/reboot.h>
+ 
+ #include <asm/firmware.h>
+ #include <asm/lv1call.h>
+@@ -30,6 +31,89 @@
+ 
+ #include "platform.h"
+ 
++static int __init ps3_register_lpm_devices(void)
++{
++	int result;
++	u64 tmp1;
++	u64 tmp2;
++	struct ps3_system_bus_device *dev;
++
++	pr_debug(" -> %s:%d\n", __func__, __LINE__);
++
++	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++	if (!dev)
++		return -ENOMEM;
++
++	dev->match_id = PS3_MATCH_ID_LPM;
++	dev->dev_type = PS3_DEVICE_TYPE_LPM;
++
++	/* The current lpm driver only supports a single BE processor. */
++
++	result = ps3_repository_read_be_node_id(0, &dev->lpm.node_id);
++
++	if (result) {
++		pr_debug("%s:%d: ps3_repository_read_be_node_id failed \n",
++			__func__, __LINE__);
++		goto fail_read_repo;
++	}
++
++	result = ps3_repository_read_lpm_privileges(dev->lpm.node_id, &tmp1,
++		&dev->lpm.rights);
++
++	if (result) {
++		pr_debug("%s:%d: ps3_repository_read_lpm_privleges failed \n",
++			__func__, __LINE__);
++		goto fail_read_repo;
++	}
++
++	lv1_get_logical_partition_id(&tmp2);
++
++	if (tmp1 != tmp2) {
++		pr_debug("%s:%d: wrong lpar\n",
++			__func__, __LINE__);
++		result = -ENODEV;
++		goto fail_rights;
++	}
++
++	if (!(dev->lpm.rights & PS3_LPM_RIGHTS_USE_LPM)) {
++		pr_debug("%s:%d: don't have rights to use lpm\n",
++			__func__, __LINE__);
++		result = -EPERM;
++		goto fail_rights;
++	}
++
++	pr_debug("%s:%d: pu_id %lu, rights %lu(%lxh)\n",
++		__func__, __LINE__, dev->lpm.pu_id, dev->lpm.rights,
++		dev->lpm.rights);
++
++	result = ps3_repository_read_pu_id(0, &dev->lpm.pu_id);
++
++	if (result) {
++		pr_debug("%s:%d: ps3_repository_read_pu_id failed \n",
++			__func__, __LINE__);
++		goto fail_read_repo;
++	}
++
++	result = ps3_system_bus_device_register(dev);
++
++	if (result) {
++		pr_debug("%s:%d ps3_system_bus_device_register failed\n",
++			__func__, __LINE__);
++		goto fail_register;
++	}
++
++	pr_debug(" <- %s:%d\n", __func__, __LINE__);
++	return 0;
++
++
++fail_register:
++fail_rights:
++fail_read_repo:
++	kfree(dev);
++	pr_debug(" <- %s:%d: failed\n", __func__, __LINE__);
++	return result;
++}
++
+ /**
+  * ps3_setup_gelic_device - Setup and register a gelic device instance.
+  *
+@@ -238,166 +322,6 @@ static int __init ps3_setup_vuart_device(enum ps3_match_id match_id,
+ 	return result;
+ }
+ 
+-static int ps3stor_wait_for_completion(u64 dev_id, u64 tag,
+-				       unsigned int timeout)
+-{
+-	int result = -1;
+-	unsigned int retries = 0;
+-	u64 status;
+-
+-	for (retries = 0; retries < timeout; retries++) {
+-		result = lv1_storage_check_async_status(dev_id, tag, &status);
+-		if (!result)
+-			break;
+-
+-		msleep(1);
+-	}
+-
+-	if (result)
+-		pr_debug("%s:%u: check_async_status: %s, status %lx\n",
+-			 __func__, __LINE__, ps3_result(result), status);
+-
+-	return result;
+-}
+-
+-/**
+- * ps3_storage_wait_for_device - Wait for a storage device to become ready.
+- * @repo: The repository device to wait for.
+- *
+- * Uses the hypervisor's storage device notification mechanism to wait until
+- * a storage device is ready.  The device notification mechanism uses a
+- * psuedo device (id = -1) to asynchronously notify the guest when storage
+- * devices become ready.  The notification device has a block size of 512
+- * bytes.
+- */
+-
+-static int ps3_storage_wait_for_device(const struct ps3_repository_device *repo)
+-{
+-	int error = -ENODEV;
+-	int result;
+-	const u64 notification_dev_id = (u64)-1LL;
+-	const unsigned int timeout = HZ;
+-	u64 lpar;
+-	u64 tag;
+-	void *buf;
+-	enum ps3_notify_type {
+-		notify_device_ready = 0,
+-		notify_region_probe = 1,
+-		notify_region_update = 2,
+-	};
+-	struct {
+-		u64 operation_code;	/* must be zero */
+-		u64 event_mask;		/* OR of 1UL << enum ps3_notify_type */
+-	} *notify_cmd;
+-	struct {
+-		u64 event_type;		/* enum ps3_notify_type */
+-		u64 bus_id;
+-		u64 dev_id;
+-		u64 dev_type;
+-		u64 dev_port;
+-	} *notify_event;
+-
+-	pr_debug(" -> %s:%u: (%u:%u:%u)\n", __func__, __LINE__, repo->bus_id,
+-		 repo->dev_id, repo->dev_type);
+-
+-	buf = kzalloc(512, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	lpar = ps3_mm_phys_to_lpar(__pa(buf));
+-	notify_cmd = buf;
+-	notify_event = buf;
+-
+-	result = lv1_open_device(repo->bus_id, notification_dev_id, 0);
+-	if (result) {
+-		printk(KERN_ERR "%s:%u: lv1_open_device %s\n", __func__,
+-		       __LINE__, ps3_result(result));
+-		goto fail_free;
+-	}
+-
+-	/* Setup and write the request for device notification. */
+-
+-	notify_cmd->operation_code = 0; /* must be zero */
+-	notify_cmd->event_mask = 1UL << notify_region_probe;
+-
+-	result = lv1_storage_write(notification_dev_id, 0, 0, 1, 0, lpar,
+-				   &tag);
+-	if (result) {
+-		printk(KERN_ERR "%s:%u: write failed %s\n", __func__, __LINE__,
+-		       ps3_result(result));
+-		goto fail_close;
+-	}
+-
+-	/* Wait for the write completion */
+-
+-	result = ps3stor_wait_for_completion(notification_dev_id, tag,
+-					     timeout);
+-	if (result) {
+-		printk(KERN_ERR "%s:%u: write not completed %s\n", __func__,
+-		       __LINE__, ps3_result(result));
+-		goto fail_close;
+-	}
+-
+-	/* Loop here processing the requested notification events. */
+-
+-	while (1) {
+-		memset(notify_event, 0, sizeof(*notify_event));
+-
+-		result = lv1_storage_read(notification_dev_id, 0, 0, 1, 0,
+-					  lpar, &tag);
+-		if (result) {
+-			printk(KERN_ERR "%s:%u: write failed %s\n", __func__,
+-			       __LINE__, ps3_result(result));
+-			break;
+-		}
+-
+-		result = ps3stor_wait_for_completion(notification_dev_id, tag,
+-						     timeout);
+-		if (result) {
+-			printk(KERN_ERR "%s:%u: read not completed %s\n",
+-			       __func__, __LINE__, ps3_result(result));
+-			break;
+-		}
+-
+-		pr_debug("%s:%d: notify event (%u:%u:%u): event_type 0x%lx, "
+-			 "port %lu\n", __func__, __LINE__, repo->bus_index,
+-			 repo->dev_index, repo->dev_type,
+-			 notify_event->event_type, notify_event->dev_port);
+-
+-		if (notify_event->event_type != notify_region_probe ||
+-		    notify_event->bus_id != repo->bus_id) {
+-			pr_debug("%s:%u: bad notify_event: event %lu, "
+-				 "dev_id %lu, dev_type %lu\n",
+-				 __func__, __LINE__, notify_event->event_type,
+-				 notify_event->dev_id, notify_event->dev_type);
+-			break;
+-		}
+-
+-		if (notify_event->dev_id == repo->dev_id &&
+-		    notify_event->dev_type == repo->dev_type) {
+-			pr_debug("%s:%u: device ready (%u:%u:%u)\n", __func__,
+-				 __LINE__, repo->bus_index, repo->dev_index,
+-				 repo->dev_type);
+-			error = 0;
+-			break;
+-		}
+-
+-		if (notify_event->dev_id == repo->dev_id &&
+-		    notify_event->dev_type == PS3_DEV_TYPE_NOACCESS) {
+-			pr_debug("%s:%u: no access: dev_id %u\n", __func__,
+-				 __LINE__, repo->dev_id);
+-			break;
+-		}
+-	}
+-
+-fail_close:
+-	lv1_close_device(repo->bus_id, notification_dev_id);
+-fail_free:
+-	kfree(buf);
+-	pr_debug(" <- %s:%u\n", __func__, __LINE__);
+-	return error;
+-}
+-
+ static int ps3_setup_storage_dev(const struct ps3_repository_device *repo,
+ 				 enum ps3_match_id match_id)
+ {
+@@ -449,16 +373,6 @@ static int ps3_setup_storage_dev(const struct ps3_repository_device *repo,
+ 		goto fail_find_interrupt;
+ 	}
+ 
+-	/* FIXME: Arrange to only do this on a 'cold' boot */
+-
+-	result = ps3_storage_wait_for_device(repo);
+-	if (result) {
+-		printk(KERN_ERR "%s:%u: storage_notification failed %d\n",
+-		       __func__, __LINE__, result);
+-		result = -ENODEV;
+-		goto fail_probe_notification;
+-	}
+-
+ 	for (i = 0; i < num_regions; i++) {
+ 		unsigned int id;
+ 		u64 start, size;
+@@ -494,7 +408,6 @@ static int ps3_setup_storage_dev(const struct ps3_repository_device *repo,
+ 
+ fail_device_register:
+ fail_read_region:
+-fail_probe_notification:
+ fail_find_interrupt:
+ 	kfree(p);
+ fail_malloc:
+@@ -659,62 +572,268 @@ static int ps3_register_repository_device(
+ 	return result;
+ }
+ 
++static void ps3_find_and_add_device(u64 bus_id, u64 dev_id)
++{
++	struct ps3_repository_device repo;
++	int res;
++	unsigned int retries;
++	unsigned long rem;
++
++	/*
++	 * On some firmware versions (e.g. 1.90), the device may not show up
++	 * in the repository immediately
++	 */
++	for (retries = 0; retries < 10; retries++) {
++		res = ps3_repository_find_device_by_id(&repo, bus_id, dev_id);
++		if (!res)
++			goto found;
++
++		rem = msleep_interruptible(100);
++		if (rem)
++			break;
++	}
++	pr_warning("%s:%u: device %lu:%lu not found\n", __func__, __LINE__,
++		   bus_id, dev_id);
++	return;
++
++found:
++	if (retries)
++		pr_debug("%s:%u: device %lu:%lu found after %u retries\n",
++			 __func__, __LINE__, bus_id, dev_id, retries);
++
++	ps3_register_repository_device(&repo);
++	return;
++}
++
++#define PS3_NOTIFICATION_DEV_ID		ULONG_MAX
++#define PS3_NOTIFICATION_INTERRUPT_ID	0
++
++struct ps3_notification_device {
++	struct ps3_system_bus_device sbd;
++	spinlock_t lock;
++	u64 tag;
++	u64 lv1_status;
++	struct completion done;
++};
++
++enum ps3_notify_type {
++	notify_device_ready = 0,
++	notify_region_probe = 1,
++	notify_region_update = 2,
++};
++
++struct ps3_notify_cmd {
++	u64 operation_code;		/* must be zero */
++	u64 event_mask;			/* OR of 1UL << enum ps3_notify_type */
++};
++
++struct ps3_notify_event {
++	u64 event_type;			/* enum ps3_notify_type */
++	u64 bus_id;
++	u64 dev_id;
++	u64 dev_type;
++	u64 dev_port;
++};
++
++static irqreturn_t ps3_notification_interrupt(int irq, void *data)
++{
++	struct ps3_notification_device *dev = data;
++	int res;
++	u64 tag, status;
++
++	spin_lock(&dev->lock);
++	res = lv1_storage_get_async_status(PS3_NOTIFICATION_DEV_ID, &tag,
++					   &status);
++	if (tag != dev->tag)
++		pr_err("%s:%u: tag mismatch, got %lx, expected %lx\n",
++		       __func__, __LINE__, tag, dev->tag);
++
++	if (res) {
++		pr_err("%s:%u: res %d status 0x%lx\n", __func__, __LINE__, res,
++		       status);
++	} else {
++		pr_debug("%s:%u: completed, status 0x%lx\n", __func__,
++			 __LINE__, status);
++		dev->lv1_status = status;
++		complete(&dev->done);
++	}
++	spin_unlock(&dev->lock);
++	return IRQ_HANDLED;
++}
++
++static int ps3_notification_read_write(struct ps3_notification_device *dev,
++				       u64 lpar, int write)
++{
++	const char *op = write ? "write" : "read";
++	unsigned long flags;
++	int res;
++
++	init_completion(&dev->done);
++	spin_lock_irqsave(&dev->lock, flags);
++	res = write ? lv1_storage_write(dev->sbd.dev_id, 0, 0, 1, 0, lpar,
++					&dev->tag)
++		    : lv1_storage_read(dev->sbd.dev_id, 0, 0, 1, 0, lpar,
++				       &dev->tag);
++	spin_unlock_irqrestore(&dev->lock, flags);
++	if (res) {
++		pr_err("%s:%u: %s failed %d\n", __func__, __LINE__, op, res);
++		return -EPERM;
++	}
++	pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
++
++	res = wait_event_interruptible(dev->done.wait,
++				       dev->done.done || kthread_should_stop());
++	if (kthread_should_stop())
++		res = -EINTR;
++	if (res) {
++		pr_debug("%s:%u: interrupted %s\n", __func__, __LINE__, op);
++		return res;
++	}
++
++	if (dev->lv1_status) {
++		pr_err("%s:%u: %s not completed, status 0x%lx\n", __func__,
++		       __LINE__, op, dev->lv1_status);
++		return -EIO;
++	}
++	pr_debug("%s:%u: notification %s completed\n", __func__, __LINE__, op);
++
++	return 0;
++}
++
++static struct task_struct *probe_task;
++
+ /**
+  * ps3_probe_thread - Background repository probing at system startup.
+  *
+  * This implementation only supports background probing on a single bus.
++ * It uses the hypervisor's storage device notification mechanism to wait until
++ * a storage device is ready.  The device notification mechanism uses a
++ * pseudo device to asynchronously notify the guest when storage devices become
++ * ready.  The notification device has a block size of 512 bytes.
+  */
+ 
+ static int ps3_probe_thread(void *data)
+ {
+-	struct ps3_repository_device *repo = data;
+-	int result;
+-	unsigned int ms = 250;
++	struct ps3_notification_device dev;
++	int res;
++	unsigned int irq;
++	u64 lpar;
++	void *buf;
++	struct ps3_notify_cmd *notify_cmd;
++	struct ps3_notify_event *notify_event;
+ 
+ 	pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__);
+ 
++	buf = kzalloc(512, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	lpar = ps3_mm_phys_to_lpar(__pa(buf));
++	notify_cmd = buf;
++	notify_event = buf;
++
++	/* dummy system bus device */
++	dev.sbd.bus_id = (u64)data;
++	dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID;
++	dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID;
++
++	res = lv1_open_device(dev.sbd.bus_id, dev.sbd.dev_id, 0);
++	if (res) {
++		pr_err("%s:%u: lv1_open_device failed %s\n", __func__,
++		       __LINE__, ps3_result(res));
++		goto fail_free;
++	}
++
++	res = ps3_sb_event_receive_port_setup(&dev.sbd, PS3_BINDING_CPU_ANY,
++					      &irq);
++	if (res) {
++		pr_err("%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
++		       __func__, __LINE__, res);
++	       goto fail_close_device;
++	}
++
++	spin_lock_init(&dev.lock);
++
++	res = request_irq(irq, ps3_notification_interrupt, IRQF_DISABLED,
++			  "ps3_notification", &dev);
++	if (res) {
++		pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__,
++		       res);
++		goto fail_sb_event_receive_port_destroy;
++	}
++
++	/* Setup and write the request for device notification. */
++	notify_cmd->operation_code = 0; /* must be zero */
++	notify_cmd->event_mask = 1UL << notify_region_probe;
++
++	res = ps3_notification_read_write(&dev, lpar, 1);
++	if (res)
++		goto fail_free_irq;
++
++	/* Loop here processing the requested notification events. */
+ 	do {
+ 		try_to_freeze();
+ 
+-		pr_debug("%s:%u: probing...\n", __func__, __LINE__);
+-
+-		do {
+-			result = ps3_repository_find_device(repo);
+-
+-			if (result == -ENODEV)
+-				pr_debug("%s:%u: nothing new\n", __func__,
+-					__LINE__);
+-			else if (result)
+-				pr_debug("%s:%u: find device error.\n",
+-					__func__, __LINE__);
+-			else {
+-				pr_debug("%s:%u: found device (%u:%u:%u)\n",
+-					 __func__, __LINE__, repo->bus_index,
+-					 repo->dev_index, repo->dev_type);
+-				ps3_register_repository_device(repo);
+-				ps3_repository_bump_device(repo);
+-				ms = 250;
+-			}
+-		} while (!result);
+-
+-		pr_debug("%s:%u: ms %u\n", __func__, __LINE__, ms);
+-
+-		if ( ms > 60000)
++		memset(notify_event, 0, sizeof(*notify_event));
++
++		res = ps3_notification_read_write(&dev, lpar, 0);
++		if (res)
+ 			break;
+ 
+-		msleep_interruptible(ms);
++		pr_debug("%s:%u: notify event type 0x%lx bus id %lu dev id %lu"
++			 " type %lu port %lu\n", __func__, __LINE__,
++			 notify_event->event_type, notify_event->bus_id,
++			 notify_event->dev_id, notify_event->dev_type,
++			 notify_event->dev_port);
+ 
+-		/* An exponential backoff. */
+-		ms <<= 1;
++		if (notify_event->event_type != notify_region_probe ||
++		    notify_event->bus_id != dev.sbd.bus_id) {
++			pr_warning("%s:%u: bad notify_event: event %lu, "
++				   "dev_id %lu, dev_type %lu\n",
++				   __func__, __LINE__, notify_event->event_type,
++				   notify_event->dev_id,
++				   notify_event->dev_type);
++			continue;
++		}
++
++		ps3_find_and_add_device(dev.sbd.bus_id, notify_event->dev_id);
+ 
+ 	} while (!kthread_should_stop());
+ 
++fail_free_irq:
++	free_irq(irq, &dev);
++fail_sb_event_receive_port_destroy:
++	ps3_sb_event_receive_port_destroy(&dev.sbd, irq);
++fail_close_device:
++	lv1_close_device(dev.sbd.bus_id, dev.sbd.dev_id);
++fail_free:
++	kfree(buf);
++
++	probe_task = NULL;
++
+ 	pr_debug(" <- %s:%u: kthread finished\n", __func__, __LINE__);
+ 
+ 	return 0;
+ }
+ 
+ /**
++ * ps3_stop_probe_thread - Stops the background probe thread.
++ *
++ */
++
++static int ps3_stop_probe_thread(struct notifier_block *nb, unsigned long code,
++				 void *data)
++{
++	if (probe_task)
++		kthread_stop(probe_task);
++	return 0;
++}
++
++static struct notifier_block nb = {
++	.notifier_call = ps3_stop_probe_thread
++};
++
++/**
+  * ps3_start_probe_thread - Starts the background probe thread.
+  *
+  */
+@@ -723,7 +842,7 @@ static int __init ps3_start_probe_thread(enum ps3_bus_type bus_type)
+ {
+ 	int result;
+ 	struct task_struct *task;
+-	static struct ps3_repository_device repo; /* must be static */
++	struct ps3_repository_device repo;
+ 
+ 	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+ 
+@@ -746,7 +865,8 @@ static int __init ps3_start_probe_thread(enum ps3_bus_type bus_type)
+ 		return -ENODEV;
+ 	}
+ 
+-	task = kthread_run(ps3_probe_thread, &repo, "ps3-probe-%u", bus_type);
++	task = kthread_run(ps3_probe_thread, (void *)repo.bus_id,
++			   "ps3-probe-%u", bus_type);
+ 
+ 	if (IS_ERR(task)) {
+ 		result = PTR_ERR(task);
+@@ -755,6 +875,9 @@ static int __init ps3_start_probe_thread(enum ps3_bus_type bus_type)
+ 		return result;
+ 	}
+ 
++	probe_task = task;
++	register_reboot_notifier(&nb);
++
+ 	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ 	return 0;
+ }
+@@ -787,6 +910,8 @@ static int __init ps3_register_devices(void)
+ 
+ 	ps3_register_sound_devices();
+ 
++	ps3_register_lpm_devices();
++
+ 	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ 	return 0;
+ }
+diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
+index 7bb3e16..6890047 100644
+--- a/arch/powerpc/platforms/ps3/mm.c
++++ b/arch/powerpc/platforms/ps3/mm.c
+@@ -36,11 +36,6 @@
+ #endif
+ 
+ enum {
+-#if defined(CONFIG_PS3_USE_LPAR_ADDR)
+-	USE_LPAR_ADDR = 1,
+-#else
+-	USE_LPAR_ADDR = 0,
+-#endif
+ #if defined(CONFIG_PS3_DYNAMIC_DMA)
+ 	USE_DYNAMIC_DMA = 1,
+ #else
+@@ -137,11 +132,8 @@ static struct map map;
+ unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
+ {
+ 	BUG_ON(is_kernel_addr(phys_addr));
+-	if (USE_LPAR_ADDR)
+-		return phys_addr;
+-	else
+-		return (phys_addr < map.rm.size || phys_addr >= map.total)
+-			? phys_addr : phys_addr + map.r1.offset;
++	return (phys_addr < map.rm.size || phys_addr >= map.total)
++		? phys_addr : phys_addr + map.r1.offset;
+ }
+ 
+ EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
+@@ -309,7 +301,7 @@ static int __init ps3_mm_add_memory(void)
+ 
+ 	BUG_ON(!mem_init_done);
+ 
+-	start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
++	start_addr = map.rm.size;
+ 	start_pfn = start_addr >> PAGE_SHIFT;
+ 	nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ 
+@@ -359,7 +351,7 @@ static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
+ static void  __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
+ 	const char *func, int line)
+ {
+-	DBG("%s:%d: dev        %u:%u\n", func, line, r->dev->bus_id,
++	DBG("%s:%d: dev        %lu:%lu\n", func, line, r->dev->bus_id,
+ 		r->dev->dev_id);
+ 	DBG("%s:%d: page_size  %u\n", func, line, r->page_size);
+ 	DBG("%s:%d: bus_addr   %lxh\n", func, line, r->bus_addr);
+@@ -394,7 +386,7 @@ struct dma_chunk {
+ static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
+ 	int line)
+ {
+-	DBG("%s:%d: r.dev        %u:%u\n", func, line,
++	DBG("%s:%d: r.dev        %lu:%lu\n", func, line,
+ 		c->region->dev->bus_id, c->region->dev->dev_id);
+ 	DBG("%s:%d: r.bus_addr   %lxh\n", func, line, c->region->bus_addr);
+ 	DBG("%s:%d: r.page_size  %u\n", func, line, c->region->page_size);
+@@ -658,7 +650,7 @@ static int dma_sb_region_create(struct ps3_dma_region *r)
+ 	BUG_ON(!r);
+ 
+ 	if (!r->dev->bus_id) {
+-		pr_info("%s:%d: %u:%u no dma\n", __func__, __LINE__,
++		pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
+ 			r->dev->bus_id, r->dev->dev_id);
+ 		return 0;
+ 	}
+@@ -724,7 +716,7 @@ static int dma_sb_region_free(struct ps3_dma_region *r)
+ 	BUG_ON(!r);
+ 
+ 	if (!r->dev->bus_id) {
+-		pr_info("%s:%d: %u:%u no dma\n", __func__, __LINE__,
++		pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
+ 			r->dev->bus_id, r->dev->dev_id);
+ 		return 0;
+ 	}
+@@ -1007,7 +999,7 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r)
+ 
+ 	if (r->offset + r->len > map.rm.size) {
+ 		/* Map (part of) 2nd RAM chunk */
+-		virt_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
++		virt_addr = map.rm.size;
+ 		len = r->len;
+ 		if (r->offset >= map.rm.size)
+ 			virt_addr += r->offset - map.rm.size;
+diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
+index 01f0c95..235c13e 100644
+--- a/arch/powerpc/platforms/ps3/platform.h
++++ b/arch/powerpc/platforms/ps3/platform.h
+@@ -89,13 +89,11 @@ enum ps3_dev_type {
+ 	PS3_DEV_TYPE_STOR_ROM = TYPE_ROM,	/* 5 */
+ 	PS3_DEV_TYPE_SB_GPIO = 6,
+ 	PS3_DEV_TYPE_STOR_FLASH = TYPE_RBC,	/* 14 */
+-	PS3_DEV_TYPE_STOR_DUMMY = 32,
+-	PS3_DEV_TYPE_NOACCESS = 255,
+ };
+ 
+ int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
+ 	u64 *value);
+-int ps3_repository_read_bus_id(unsigned int bus_index, unsigned int *bus_id);
++int ps3_repository_read_bus_id(unsigned int bus_index, u64 *bus_id);
+ int ps3_repository_read_bus_type(unsigned int bus_index,
+ 	enum ps3_bus_type *bus_type);
+ int ps3_repository_read_bus_num_dev(unsigned int bus_index,
+@@ -119,7 +117,7 @@ enum ps3_reg_type {
+ int ps3_repository_read_dev_str(unsigned int bus_index,
+ 	unsigned int dev_index, const char *dev_str, u64 *value);
+ int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
+-	unsigned int *dev_id);
++	u64 *dev_id);
+ int ps3_repository_read_dev_type(unsigned int bus_index,
+ 	unsigned int dev_index, enum ps3_dev_type *dev_type);
+ int ps3_repository_read_dev_intr(unsigned int bus_index,
+@@ -138,21 +136,17 @@ int ps3_repository_read_dev_reg(unsigned int bus_index,
+ /* repository bus enumerators */
+ 
+ struct ps3_repository_device {
+-	enum ps3_bus_type bus_type;
+ 	unsigned int bus_index;
+-	unsigned int bus_id;
+-	enum ps3_dev_type dev_type;
+ 	unsigned int dev_index;
+-	unsigned int dev_id;
++	enum ps3_bus_type bus_type;
++	enum ps3_dev_type dev_type;
++	u64 bus_id;
++	u64 dev_id;
+ };
+ 
+-static inline struct ps3_repository_device *ps3_repository_bump_device(
+-	struct ps3_repository_device *repo)
+-{
+-	repo->dev_index++;
+-	return repo;
+-}
+ int ps3_repository_find_device(struct ps3_repository_device *repo);
++int ps3_repository_find_device_by_id(struct ps3_repository_device *repo,
++				     u64 bus_id, u64 dev_id);
+ int ps3_repository_find_devices(enum ps3_bus_type bus_type,
+ 	int (*callback)(const struct ps3_repository_device *repo));
+ int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
+@@ -186,10 +180,10 @@ int ps3_repository_read_stor_dev_region(unsigned int bus_index,
+ 	unsigned int dev_index, unsigned int region_index,
+ 	unsigned int *region_id, u64 *region_start, u64 *region_size);
+ 
+-/* repository pu and memory info */
++/* repository logical pu and memory info */
+ 
+-int ps3_repository_read_num_pu(unsigned int *num_pu);
+-int ps3_repository_read_ppe_id(unsigned int *pu_index, unsigned int *ppe_id);
++int ps3_repository_read_num_pu(u64 *num_pu);
++int ps3_repository_read_pu_id(unsigned int pu_index, u64 *pu_id);
+ int ps3_repository_read_rm_base(unsigned int ppe_id, u64 *rm_base);
+ int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size);
+ int ps3_repository_read_region_total(u64 *region_total);
+@@ -200,9 +194,15 @@ int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size,
+ 
+ int ps3_repository_read_num_be(unsigned int *num_be);
+ int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id);
++int ps3_repository_read_be_id(u64 node_id, u64 *be_id);
+ int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq);
+ int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq);
+ 
++/* repository performance monitor info */
++
++int ps3_repository_read_lpm_privileges(unsigned int be_index, u64 *lpar,
++	u64 *rights);
++
+ /* repository 'Other OS' area */
+ 
+ int ps3_repository_read_boot_dat_addr(u64 *lpar_addr);
+diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
+index 1c94824..22063ad 100644
+--- a/arch/powerpc/platforms/ps3/repository.c
++++ b/arch/powerpc/platforms/ps3/repository.c
+@@ -33,7 +33,7 @@ enum ps3_lpar_id {
+ };
+ 
+ #define dump_field(_a, _b) _dump_field(_a, _b, __func__, __LINE__)
+-static void _dump_field(const char *hdr, u64 n, const char* func, int line)
++static void _dump_field(const char *hdr, u64 n, const char *func, int line)
+ {
+ #if defined(DEBUG)
+ 	char s[16];
+@@ -50,8 +50,8 @@ static void _dump_field(const char *hdr, u64 n, const char* func, int line)
+ 
+ #define dump_node_name(_a, _b, _c, _d, _e) \
+ 	_dump_node_name(_a, _b, _c, _d, _e, __func__, __LINE__)
+-static void _dump_node_name (unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
+-	u64 n4, const char* func, int line)
++static void _dump_node_name(unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
++	u64 n4, const char *func, int line)
+ {
+ 	pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+ 	_dump_field("n1: ", n1, func, line);
+@@ -63,7 +63,7 @@ static void _dump_node_name (unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
+ #define dump_node(_a, _b, _c, _d, _e, _f, _g) \
+ 	_dump_node(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
+ static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
+-	u64 v1, u64 v2, const char* func, int line)
++	u64 v1, u64 v2, const char *func, int line)
+ {
+ 	pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+ 	_dump_field("n1: ", n1, func, line);
+@@ -165,21 +165,18 @@ int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
+ 		make_first_field("bus", bus_index),
+ 		make_field(bus_str, 0),
+ 		0, 0,
+-		value, 0);
++		value, NULL);
+ }
+ 
+-int ps3_repository_read_bus_id(unsigned int bus_index, unsigned int *bus_id)
++int ps3_repository_read_bus_id(unsigned int bus_index, u64 *bus_id)
+ {
+ 	int result;
+-	u64 v1;
+-	u64 v2; /* unused */
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("bus", bus_index),
+ 		make_field("id", 0),
+ 		0, 0,
+-		&v1, &v2);
+-	*bus_id = v1;
++		bus_id, NULL);
+ 	return result;
+ }
+ 
+@@ -193,7 +190,7 @@ int ps3_repository_read_bus_type(unsigned int bus_index,
+ 		make_first_field("bus", bus_index),
+ 		make_field("type", 0),
+ 		0, 0,
+-		&v1, 0);
++		&v1, NULL);
+ 	*bus_type = v1;
+ 	return result;
+ }
+@@ -208,7 +205,7 @@ int ps3_repository_read_bus_num_dev(unsigned int bus_index,
+ 		make_first_field("bus", bus_index),
+ 		make_field("num_dev", 0),
+ 		0, 0,
+-		&v1, 0);
++		&v1, NULL);
+ 	*num_dev = v1;
+ 	return result;
+ }
+@@ -221,22 +218,20 @@ int ps3_repository_read_dev_str(unsigned int bus_index,
+ 		make_field("dev", dev_index),
+ 		make_field(dev_str, 0),
+ 		0,
+-		value, 0);
++		value, NULL);
+ }
+ 
+ int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
+-	unsigned int *dev_id)
++	u64 *dev_id)
+ {
+ 	int result;
+-	u64 v1;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("bus", bus_index),
+ 		make_field("dev", dev_index),
+ 		make_field("id", 0),
+ 		0,
+-		&v1, 0);
+-	*dev_id = v1;
++		dev_id, NULL);
+ 	return result;
+ }
+ 
+@@ -251,14 +246,14 @@ int ps3_repository_read_dev_type(unsigned int bus_index,
+ 		make_field("dev", dev_index),
+ 		make_field("type", 0),
+ 		0,
+-		&v1, 0);
++		&v1, NULL);
+ 	*dev_type = v1;
+ 	return result;
+ }
+ 
+ int ps3_repository_read_dev_intr(unsigned int bus_index,
+ 	unsigned int dev_index, unsigned int intr_index,
+-	enum ps3_interrupt_type *intr_type, unsigned int* interrupt_id)
++	enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id)
+ {
+ 	int result;
+ 	u64 v1;
+@@ -287,7 +282,7 @@ int ps3_repository_read_dev_reg_type(unsigned int bus_index,
+ 		make_field("dev", dev_index),
+ 		make_field("reg", reg_index),
+ 		make_field("type", 0),
+-		&v1, 0);
++		&v1, NULL);
+ 	*reg_type = v1;
+ 	return result;
+ }
+@@ -332,7 +327,7 @@ int ps3_repository_find_device(struct ps3_repository_device *repo)
+ 		return result;
+ 	}
+ 
+-	pr_debug("%s:%d: bus_type %u, bus_index %u, bus_id %u, num_dev %u\n",
++	pr_debug("%s:%d: bus_type %u, bus_index %u, bus_id %lu, num_dev %u\n",
+ 		__func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id,
+ 		num_dev);
+ 
+@@ -349,47 +344,95 @@ int ps3_repository_find_device(struct ps3_repository_device *repo)
+ 		return result;
+ 	}
+ 
+-	if (tmp.bus_type == PS3_BUS_TYPE_STORAGE) {
+-		/*
+-		 * A storage device may show up in the repository before the
+-		 * hypervisor has finished probing its type and regions
+-		 */
+-		unsigned int num_regions;
+-
+-		if (tmp.dev_type == PS3_DEV_TYPE_STOR_DUMMY) {
+-			pr_debug("%s:%u storage device not ready\n", __func__,
+-				 __LINE__);
+-			return -ENODEV;
+-		}
++	result = ps3_repository_read_dev_id(tmp.bus_index, tmp.dev_index,
++		&tmp.dev_id);
+ 
+-		result = ps3_repository_read_stor_dev_num_regions(tmp.bus_index,
+-								  tmp.dev_index,
+-								  &num_regions);
++	if (result) {
++		pr_debug("%s:%d ps3_repository_read_dev_id failed\n", __func__,
++		__LINE__);
++		return result;
++	}
++
++	pr_debug("%s:%d: found: dev_type %u, dev_index %u, dev_id %lu\n",
++		__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);
++
++	*repo = tmp;
++	return 0;
++}
++
++int ps3_repository_find_device_by_id(struct ps3_repository_device *repo,
++				     u64 bus_id, u64 dev_id)
++{
++	int result = -ENODEV;
++	struct ps3_repository_device tmp;
++	unsigned int num_dev;
++
++	pr_debug(" -> %s:%u: find device by id %lu:%lu\n", __func__, __LINE__,
++		 bus_id, dev_id);
++
++	for (tmp.bus_index = 0; tmp.bus_index < 10; tmp.bus_index++) {
++		result = ps3_repository_read_bus_id(tmp.bus_index,
++						    &tmp.bus_id);
+ 		if (result) {
+-			pr_debug("%s:%d read_stor_dev_num_regions failed\n",
+-				 __func__, __LINE__);
++			pr_debug("%s:%u read_bus_id(%u) failed\n", __func__,
++				 __LINE__, tmp.bus_index);
+ 			return result;
+ 		}
+ 
+-		if (!num_regions) {
+-			pr_debug("%s:%u storage device has no regions yet\n",
+-				 __func__, __LINE__);
+-			return -ENODEV;
+-		}
++		if (tmp.bus_id == bus_id)
++			goto found_bus;
++
++		pr_debug("%s:%u: skip, bus_id %lu\n", __func__, __LINE__,
++			 tmp.bus_id);
+ 	}
++	pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__);
++	return result;
+ 
+-	result = ps3_repository_read_dev_id(tmp.bus_index, tmp.dev_index,
+-		&tmp.dev_id);
++found_bus:
++	result = ps3_repository_read_bus_type(tmp.bus_index, &tmp.bus_type);
++	if (result) {
++		pr_debug("%s:%u read_bus_type(%u) failed\n", __func__,
++			 __LINE__, tmp.bus_index);
++		return result;
++	}
+ 
++	result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
+ 	if (result) {
+-		pr_debug("%s:%d ps3_repository_read_dev_id failed\n", __func__,
+-		__LINE__);
++		pr_debug("%s:%u read_bus_num_dev failed\n", __func__,
++			 __LINE__);
+ 		return result;
+ 	}
+ 
+-	pr_debug("%s:%d: found: dev_type %u, dev_index %u, dev_id %u\n",
+-		__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);
++	for (tmp.dev_index = 0; tmp.dev_index < num_dev; tmp.dev_index++) {
++		result = ps3_repository_read_dev_id(tmp.bus_index,
++						    tmp.dev_index,
++						    &tmp.dev_id);
++		if (result) {
++			pr_debug("%s:%u read_dev_id(%u:%u) failed\n", __func__,
++				 __LINE__, tmp.bus_index, tmp.dev_index);
++			return result;
++		}
+ 
++		if (tmp.dev_id == dev_id)
++			goto found_dev;
++
++		pr_debug("%s:%u: skip, dev_id %lu\n", __func__, __LINE__,
++			 tmp.dev_id);
++	}
++	pr_debug(" <- %s:%u: dev not found\n", __func__, __LINE__);
++	return result;
++
++found_dev:
++	result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
++					      &tmp.dev_type);
++	if (result) {
++		pr_debug("%s:%u read_dev_type failed\n", __func__, __LINE__);
++		return result;
++	}
++
++	pr_debug(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%lu:%lu)\n",
++		 __func__, __LINE__, tmp.bus_type, tmp.dev_type, tmp.bus_index,
++		 tmp.dev_index, tmp.bus_id, tmp.dev_id);
+ 	*repo = tmp;
+ 	return 0;
+ }
+@@ -402,50 +445,34 @@ int __devinit ps3_repository_find_devices(enum ps3_bus_type bus_type,
+ 
+ 	pr_debug(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type);
+ 
+-	for (repo.bus_index = 0; repo.bus_index < 10; repo.bus_index++) {
++	repo.bus_type = bus_type;
++	result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index);
++	if (result) {
++		pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__);
++		return result;
++	}
+ 
+-		result = ps3_repository_read_bus_type(repo.bus_index,
+-			&repo.bus_type);
++	result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id);
++	if (result) {
++		pr_debug("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__,
++			 repo.bus_index);
++		return result;
++	}
+ 
+-		if (result) {
+-			pr_debug("%s:%d read_bus_type(%u) failed\n",
+-				__func__, __LINE__, repo.bus_index);
++	for (repo.dev_index = 0; ; repo.dev_index++) {
++		result = ps3_repository_find_device(&repo);
++		if (result == -ENODEV) {
++			result = 0;
++			break;
++		} else if (result)
+ 			break;
+-		}
+-
+-		if (repo.bus_type != bus_type) {
+-			pr_debug("%s:%d: skip, bus_type %u\n", __func__,
+-				__LINE__, repo.bus_type);
+-			continue;
+-		}
+-
+-		result = ps3_repository_read_bus_id(repo.bus_index,
+-			&repo.bus_id);
+ 
++		result = callback(&repo);
+ 		if (result) {
+-			pr_debug("%s:%d read_bus_id(%u) failed\n",
+-				__func__, __LINE__, repo.bus_index);
+-			continue;
+-		}
+-
+-		for (repo.dev_index = 0; ; repo.dev_index++) {
+-			result = ps3_repository_find_device(&repo);
+-
+-			if (result == -ENODEV) {
+-				result = 0;
+-				break;
+-			} else if (result)
+-				break;
+-
+-			result = callback(&repo);
+-
+-			if (result) {
+-				pr_debug("%s:%d: abort at callback\n", __func__,
+-					__LINE__);
+-				break;
+-			}
++			pr_debug("%s:%d: abort at callback\n", __func__,
++				__LINE__);
++			break;
+ 		}
+-		break;
+ 	}
+ 
+ 	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+@@ -561,7 +588,7 @@ int ps3_repository_read_stor_dev_port(unsigned int bus_index,
+ 		make_first_field("bus", bus_index),
+ 		make_field("dev", dev_index),
+ 		make_field("port", 0),
+-		0, port, 0);
++		0, port, NULL);
+ }
+ 
+ int ps3_repository_read_stor_dev_blk_size(unsigned int bus_index,
+@@ -571,7 +598,7 @@ int ps3_repository_read_stor_dev_blk_size(unsigned int bus_index,
+ 		make_first_field("bus", bus_index),
+ 		make_field("dev", dev_index),
+ 		make_field("blk_size", 0),
+-		0, blk_size, 0);
++		0, blk_size, NULL);
+ }
+ 
+ int ps3_repository_read_stor_dev_num_blocks(unsigned int bus_index,
+@@ -581,7 +608,7 @@ int ps3_repository_read_stor_dev_num_blocks(unsigned int bus_index,
+ 		make_first_field("bus", bus_index),
+ 		make_field("dev", dev_index),
+ 		make_field("n_blocks", 0),
+-		0, num_blocks, 0);
++		0, num_blocks, NULL);
+ }
+ 
+ int ps3_repository_read_stor_dev_num_regions(unsigned int bus_index,
+@@ -594,7 +621,7 @@ int ps3_repository_read_stor_dev_num_regions(unsigned int bus_index,
+ 		make_first_field("bus", bus_index),
+ 		make_field("dev", dev_index),
+ 		make_field("n_regs", 0),
+-		0, &v1, 0);
++		0, &v1, NULL);
+ 	*num_regions = v1;
+ 	return result;
+ }
+@@ -611,7 +638,7 @@ int ps3_repository_read_stor_dev_region_id(unsigned int bus_index,
+ 	    make_field("dev", dev_index),
+ 	    make_field("region", region_index),
+ 	    make_field("id", 0),
+-	    &v1, 0);
++	    &v1, NULL);
+ 	*region_id = v1;
+ 	return result;
+ }
+@@ -624,7 +651,7 @@ int ps3_repository_read_stor_dev_region_size(unsigned int bus_index,
+ 	    make_field("dev", dev_index),
+ 	    make_field("region", region_index),
+ 	    make_field("size", 0),
+-	    region_size, 0);
++	    region_size, NULL);
+ }
+ 
+ int ps3_repository_read_stor_dev_region_start(unsigned int bus_index,
+@@ -635,7 +662,7 @@ int ps3_repository_read_stor_dev_region_start(unsigned int bus_index,
+ 	    make_field("dev", dev_index),
+ 	    make_field("region", region_index),
+ 	    make_field("start", 0),
+-	    region_start, 0);
++	    region_start, NULL);
+ }
+ 
+ int ps3_repository_read_stor_dev_info(unsigned int bus_index,
+@@ -684,6 +711,35 @@ int ps3_repository_read_stor_dev_region(unsigned int bus_index,
+ 	return result;
+ }
+ 
++/**
++ * ps3_repository_read_num_pu - Number of logical PU processors for this lpar.
++ */
++
++int ps3_repository_read_num_pu(u64 *num_pu)
++{
++	*num_pu = 0;
++	return read_node(PS3_LPAR_ID_CURRENT,
++			   make_first_field("bi", 0),
++			   make_field("pun", 0),
++			   0, 0,
++			   num_pu, NULL);
++}
++
++/**
++ * ps3_repository_read_pu_id - Read the logical PU id.
++ * @pu_index: Zero based index.
++ * @pu_id: The logical PU id.
++ */
++
++int ps3_repository_read_pu_id(unsigned int pu_index, u64 *pu_id)
++{
++	return read_node(PS3_LPAR_ID_CURRENT,
++		make_first_field("bi", 0),
++		make_field("pu", pu_index),
++		0, 0,
++		pu_id, NULL);
++}
++
+ int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size)
+ {
+ 	return read_node(PS3_LPAR_ID_CURRENT,
+@@ -691,7 +747,7 @@ int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size)
+ 		make_field("pu", 0),
+ 		ppe_id,
+ 		make_field("rm_size", 0),
+-		rm_size, 0);
++		rm_size, NULL);
+ }
+ 
+ int ps3_repository_read_region_total(u64 *region_total)
+@@ -700,7 +756,7 @@ int ps3_repository_read_region_total(u64 *region_total)
+ 		make_first_field("bi", 0),
+ 		make_field("rgntotal", 0),
+ 		0, 0,
+-		region_total, 0);
++		region_total, NULL);
+ }
+ 
+ /**
+@@ -736,7 +792,7 @@ int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
+ 		make_first_field("bi", 0),
+ 		make_field("spun", 0),
+ 		0, 0,
+-		&v1, 0);
++		&v1, NULL);
+ 	*num_spu_reserved = v1;
+ 	return result;
+ }
+@@ -755,7 +811,7 @@ int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
+ 		make_first_field("bi", 0),
+ 		make_field("spursvn", 0),
+ 		0, 0,
+-		&v1, 0);
++		&v1, NULL);
+ 	*num_resource_id = v1;
+ 	return result;
+ }
+@@ -768,7 +824,7 @@ int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
+  */
+ 
+ int ps3_repository_read_spu_resource_id(unsigned int res_index,
+-	enum ps3_spu_resource_type* resource_type, unsigned int *resource_id)
++	enum ps3_spu_resource_type *resource_type, unsigned int *resource_id)
+ {
+ 	int result;
+ 	u64 v1;
+@@ -785,14 +841,14 @@ int ps3_repository_read_spu_resource_id(unsigned int res_index,
+ 	return result;
+ }
+ 
+-int ps3_repository_read_boot_dat_address(u64 *address)
++static int ps3_repository_read_boot_dat_address(u64 *address)
+ {
+ 	return read_node(PS3_LPAR_ID_CURRENT,
+ 		make_first_field("bi", 0),
+ 		make_field("boot_dat", 0),
+ 		make_field("address", 0),
+ 		0,
+-		address, 0);
++		address, NULL);
+ }
+ 
+ int ps3_repository_read_boot_dat_size(unsigned int *size)
+@@ -805,7 +861,7 @@ int ps3_repository_read_boot_dat_size(unsigned int *size)
+ 		make_field("boot_dat", 0),
+ 		make_field("size", 0),
+ 		0,
+-		&v1, 0);
++		&v1, NULL);
+ 	*size = v1;
+ 	return result;
+ }
+@@ -820,7 +876,7 @@ int ps3_repository_read_vuart_av_port(unsigned int *port)
+ 		make_field("vir_uart", 0),
+ 		make_field("port", 0),
+ 		make_field("avset", 0),
+-		&v1, 0);
++		&v1, NULL);
+ 	*port = v1;
+ 	return result;
+ }
+@@ -835,7 +891,7 @@ int ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
+ 		make_field("vir_uart", 0),
+ 		make_field("port", 0),
+ 		make_field("sysmgr", 0),
+-		&v1, 0);
++		&v1, NULL);
+ 	*port = v1;
+ 	return result;
+ }
+@@ -856,6 +912,10 @@ int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
+ 		: ps3_repository_read_boot_dat_size(size);
+ }
+ 
++/**
++ * ps3_repository_read_num_be - Number of physical BE processors in the system.
++ */
++
+ int ps3_repository_read_num_be(unsigned int *num_be)
+ {
+ 	int result;
+@@ -866,11 +926,17 @@ int ps3_repository_read_num_be(unsigned int *num_be)
+ 		0,
+ 		0,
+ 		0,
+-		&v1, 0);
++		&v1, NULL);
+ 	*num_be = v1;
+ 	return result;
+ }
+ 
++/**
++ * ps3_repository_read_be_node_id - Read the physical BE processor node id.
++ * @be_index: Zero based index.
++ * @node_id: The BE processor node id.
++ */
++
+ int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id)
+ {
+ 	return read_node(PS3_LPAR_ID_PME,
+@@ -878,7 +944,23 @@ int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id)
+ 		0,
+ 		0,
+ 		0,
+-		node_id, 0);
++		node_id, NULL);
++}
++
++/**
++ * ps3_repository_read_be_id - Read the physical BE processor id.
++ * @node_id: The BE processor node id.
++ * @be_id: The BE processor id.
++ */
++
++int ps3_repository_read_be_id(u64 node_id, u64 *be_id)
++{
++	return read_node(PS3_LPAR_ID_PME,
++		make_first_field("be", 0),
++		node_id,
++		0,
++		0,
++		be_id, NULL);
+ }
+ 
+ int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
+@@ -888,7 +970,7 @@ int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
+ 		node_id,
+ 		make_field("clock", 0),
+ 		0,
+-		tb_freq, 0);
++		tb_freq, NULL);
+ }
+ 
+ int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
+@@ -897,11 +979,29 @@ int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
+ 	u64 node_id;
+ 
+ 	*tb_freq = 0;
+-	result = ps3_repository_read_be_node_id(0, &node_id);
++	result = ps3_repository_read_be_node_id(be_index, &node_id);
+ 	return result ? result
+ 		: ps3_repository_read_tb_freq(node_id, tb_freq);
+ }
+ 
++int ps3_repository_read_lpm_privileges(unsigned int be_index, u64 *lpar,
++	u64 *rights)
++{
++	int result;
++	u64 node_id;
++
++	*lpar = 0;
++	*rights = 0;
++	result = ps3_repository_read_be_node_id(be_index, &node_id);
++	return result ? result
++		: read_node(PS3_LPAR_ID_PME,
++			    make_first_field("be", 0),
++			    node_id,
++			    make_field("lpm", 0),
++			    make_field("priv", 0),
++			    lpar, rights);
++}
++
+ #if defined(DEBUG)
+ 
+ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
+@@ -1034,7 +1134,7 @@ static int dump_device_info(struct ps3_repository_device *repo,
+ 			continue;
+ 		}
+ 
+-		pr_debug("%s:%d  (%u:%u): dev_type %u, dev_id %u\n", __func__,
++		pr_debug("%s:%d  (%u:%u): dev_type %u, dev_id %lu\n", __func__,
+ 			__LINE__, repo->bus_index, repo->dev_index,
+ 			repo->dev_type, repo->dev_id);
+ 
+@@ -1091,7 +1191,7 @@ int ps3_repository_dump_bus_info(void)
+ 			continue;
+ 		}
+ 
+-		pr_debug("%s:%d bus_%u: bus_type %u, bus_id %u, num_dev %u\n",
++		pr_debug("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n",
+ 			__func__, __LINE__, repo.bus_index, repo.bus_type,
+ 			repo.bus_id, num_dev);
+ 
+diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
+index d1630a0..5ad4118 100644
+--- a/arch/powerpc/platforms/ps3/spu.c
++++ b/arch/powerpc/platforms/ps3/spu.c
+@@ -28,6 +28,7 @@
+ #include <asm/spu_priv1.h>
+ #include <asm/lv1call.h>
+ 
++#include "../cell/spufs/spufs.h"
+ #include "platform.h"
+ 
+ /* spu_management_ops */
+@@ -419,10 +420,34 @@ static int ps3_init_affinity(void)
+ 	return 0;
+ }
+ 
++/**
++ * ps3_enable_spu - Enable SPU run control.
++ *
++ * An outstanding enhancement for the PS3 would be to add a guard to check
++ * for incorrect access to the spu problem state when the spu context is
++ * disabled.  This check could be implemented with a flag added to the spu
++ * context that would inhibit mapping problem state pages, and a routine
++ * to unmap spu problem state pages.  When the spu is enabled with
++ * ps3_enable_spu() the flag would be set allowing pages to be mapped,
++ * and when the spu is disabled with ps3_disable_spu() the flag would be
++ * cleared and the mapped problem state pages would be unmapped.
++ */
++
++static void ps3_enable_spu(struct spu_context *ctx)
++{
++}
++
++static void ps3_disable_spu(struct spu_context *ctx)
++{
++	ctx->ops->runcntl_stop(ctx);
++}
++
+ const struct spu_management_ops spu_management_ps3_ops = {
+ 	.enumerate_spus = ps3_enumerate_spus,
+ 	.create_spu = ps3_create_spu,
+ 	.destroy_spu = ps3_destroy_spu,
++	.enable_spu = ps3_enable_spu,
++	.disable_spu = ps3_disable_spu,
+ 	.init_affinity = ps3_init_affinity,
+ };
+ 
+@@ -505,8 +530,6 @@ static void mfc_sr1_set(struct spu *spu, u64 sr1)
+ 	static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
+ 		| MFC_STATE1_PROBLEM_STATE_MASK);
+ 
+-	sr1 |= MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+-
+ 	BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));
+ 
+ 	spu_pdata(spu)->cache.sr1 = sr1;
+diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
+index 6405f4a..43c493f 100644
+--- a/arch/powerpc/platforms/ps3/system-bus.c
++++ b/arch/powerpc/platforms/ps3/system-bus.c
+@@ -42,8 +42,8 @@ struct {
+ 	int gpu;
+ } static usage_hack;
+ 
+-static int ps3_is_device(struct ps3_system_bus_device *dev,
+-			 unsigned int bus_id, unsigned int dev_id)
++static int ps3_is_device(struct ps3_system_bus_device *dev, u64 bus_id,
++			 u64 dev_id)
+ {
+ 	return dev->bus_id == bus_id && dev->dev_id == dev_id;
+ }
+@@ -182,8 +182,8 @@ int ps3_open_hv_device(struct ps3_system_bus_device *dev)
+ 	case PS3_MATCH_ID_SYSTEM_MANAGER:
+ 		pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
+ 			__LINE__, dev->match_id);
+-		pr_debug("%s:%d: bus_id: %u\n", __func__,
+-			__LINE__, dev->bus_id);
++		pr_debug("%s:%d: bus_id: %lu\n", __func__, __LINE__,
++			dev->bus_id);
+ 		BUG();
+ 		return -EINVAL;
+ 
+@@ -220,8 +220,8 @@ int ps3_close_hv_device(struct ps3_system_bus_device *dev)
+ 	case PS3_MATCH_ID_SYSTEM_MANAGER:
+ 		pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
+ 			__LINE__, dev->match_id);
+-		pr_debug("%s:%d: bus_id: %u\n", __func__,
+-			__LINE__, dev->bus_id);
++		pr_debug("%s:%d: bus_id: %lu\n", __func__, __LINE__,
++			dev->bus_id);
+ 		BUG();
+ 		return -EINVAL;
+ 
+@@ -240,7 +240,7 @@ EXPORT_SYMBOL_GPL(ps3_close_hv_device);
+ static void _dump_mmio_region(const struct ps3_mmio_region* r,
+ 	const char* func, int line)
+ {
+-	pr_debug("%s:%d: dev       %u:%u\n", func, line, r->dev->bus_id,
++	pr_debug("%s:%d: dev       %lu:%lu\n", func, line, r->dev->bus_id,
+ 		r->dev->dev_id);
+ 	pr_debug("%s:%d: bus_addr  %lxh\n", func, line, r->bus_addr);
+ 	pr_debug("%s:%d: len       %lxh\n", func, line, r->len);
+@@ -715,6 +715,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
+ 	static unsigned int dev_ioc0_count;
+ 	static unsigned int dev_sb_count;
+ 	static unsigned int dev_vuart_count;
++	static unsigned int dev_lpm_count;
+ 
+ 	if (!dev->core.parent)
+ 		dev->core.parent = &ps3_system_bus;
+@@ -737,6 +738,10 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
+ 		snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
+ 			"vuart_%02x", ++dev_vuart_count);
+ 		break;
++	case PS3_DEVICE_TYPE_LPM:
++		snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
++			"lpm_%02x", ++dev_lpm_count);
++		break;
+ 	default:
+ 		BUG();
+ 	};
+diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
+index fb3d636..9eb539e 100644
+--- a/arch/powerpc/platforms/pseries/eeh.c
++++ b/arch/powerpc/platforms/pseries/eeh.c
+@@ -29,6 +29,8 @@
+ #include <linux/rbtree.h>
+ #include <linux/seq_file.h>
+ #include <linux/spinlock.h>
++#include <linux/of.h>
++
+ #include <asm/atomic.h>
+ #include <asm/eeh.h>
+ #include <asm/eeh_event.h>
+@@ -169,7 +171,6 @@ static void rtas_slot_error_detail(struct pci_dn *pdn, int severity,
+  */
+ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
+ {
+-	struct device_node *dn;
+ 	struct pci_dev *dev = pdn->pcidev;
+ 	u32 cfg;
+ 	int cap, i;
+@@ -243,12 +244,12 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
+ 
+ 	/* Gather status on devices under the bridge */
+ 	if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
+-		dn = pdn->node->child;
+-		while (dn) {
++		struct device_node *dn;
++
++		for_each_child_of_node(pdn->node, dn) {
+ 			pdn = PCI_DN(dn);
+ 			if (pdn)
+ 				n += gather_pci_data(pdn, buf+n, len-n);
+-			dn = dn->sibling;
+ 		}
+ 	}
+ 
+@@ -372,7 +373,7 @@ struct device_node * find_device_pe(struct device_node *dn)
+ 	return dn;
+ }
+ 
+-/** Mark all devices that are peers of this device as failed.
++/** Mark all devices that are children of this device as failed.
+  *  Mark the device driver too, so that it can see the failure
+  *  immediately; this is critical, since some drivers poll
+  *  status registers in interrupts ... If a driver is polling,
+@@ -380,9 +381,11 @@ struct device_node * find_device_pe(struct device_node *dn)
+  *  an interrupt context, which is bad.
+  */
+ 
+-static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
++static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
+ {
+-	while (dn) {
++	struct device_node *dn;
++
++	for_each_child_of_node(parent, dn) {
+ 		if (PCI_DN(dn)) {
+ 			/* Mark the pci device driver too */
+ 			struct pci_dev *dev = PCI_DN(dn)->pcidev;
+@@ -392,10 +395,8 @@ static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
+ 			if (dev && dev->driver)
+ 				dev->error_state = pci_channel_io_frozen;
+ 
+-			if (dn->child)
+-				__eeh_mark_slot (dn->child, mode_flag);
++			__eeh_mark_slot(dn, mode_flag);
+ 		}
+-		dn = dn->sibling;
+ 	}
+ }
+ 
+@@ -415,19 +416,19 @@ void eeh_mark_slot (struct device_node *dn, int mode_flag)
+ 	if (dev)
+ 		dev->error_state = pci_channel_io_frozen;
+ 
+-	__eeh_mark_slot (dn->child, mode_flag);
++	__eeh_mark_slot(dn, mode_flag);
+ }
+ 
+-static void __eeh_clear_slot (struct device_node *dn, int mode_flag)
++static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
+ {
+-	while (dn) {
++	struct device_node *dn;
++
++	for_each_child_of_node(parent, dn) {
+ 		if (PCI_DN(dn)) {
+ 			PCI_DN(dn)->eeh_mode &= ~mode_flag;
+ 			PCI_DN(dn)->eeh_check_count = 0;
+-			if (dn->child)
+-				__eeh_clear_slot (dn->child, mode_flag);
++			__eeh_clear_slot(dn, mode_flag);
+ 		}
+-		dn = dn->sibling;
+ 	}
+ }
+ 
+@@ -444,7 +445,7 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag)
+ 
+ 	PCI_DN(dn)->eeh_mode &= ~mode_flag;
+ 	PCI_DN(dn)->eeh_check_count = 0;
+-	__eeh_clear_slot (dn->child, mode_flag);
++	__eeh_clear_slot(dn, mode_flag);
+ 	spin_unlock_irqrestore(&confirm_error_lock, flags);
+ }
+ 
+@@ -480,6 +481,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
+ 		no_dn++;
+ 		return 0;
+ 	}
++	dn = find_device_pe(dn);
+ 	pdn = PCI_DN(dn);
+ 
+ 	/* Access to IO BARs might get this far and still not want checking. */
+@@ -545,7 +547,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
+ 
+ 	/* Note that config-io to empty slots may fail;
+ 	 * they are empty when they don't have children. */
+-	if ((rets[0] == 5) && (dn->child == NULL)) {
++	if ((rets[0] == 5) && (rets[2] == 0) && (dn->child == NULL)) {
+ 		false_positives++;
+ 		pdn->eeh_false_positives ++;
+ 		rc = 0;
+@@ -848,11 +850,8 @@ void eeh_restore_bars(struct pci_dn *pdn)
+ 	if ((pdn->eeh_mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(pdn->class_code))
+ 		__restore_bars (pdn);
+ 
+-	dn = pdn->node->child;
+-	while (dn) {
++	for_each_child_of_node(pdn->node, dn)
+ 		eeh_restore_bars (PCI_DN(dn));
+-		dn = dn->sibling;
+-	}
+ }
+ 
+ /**
+@@ -1130,7 +1129,8 @@ static void eeh_add_device_early(struct device_node *dn)
+ void eeh_add_device_tree_early(struct device_node *dn)
+ {
+ 	struct device_node *sib;
+-	for (sib = dn->child; sib; sib = sib->sibling)
++
++	for_each_child_of_node(dn, sib)
+ 		eeh_add_device_tree_early(sib);
+ 	eeh_add_device_early(dn);
+ }
+diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
+index 57e025e..68ea5ee 100644
+--- a/arch/powerpc/platforms/pseries/eeh_driver.c
++++ b/arch/powerpc/platforms/pseries/eeh_driver.c
+@@ -310,8 +310,6 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
+ 	const char *location, *pci_str, *drv_str;
+ 
+ 	frozen_dn = find_device_pe(event->dn);
+-	frozen_bus = pcibios_find_pci_bus(frozen_dn);
+-
+ 	if (!frozen_dn) {
+ 
+ 		location = of_get_property(event->dn, "ibm,loc-code", NULL);
+@@ -321,6 +319,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
+ 		        location, pci_name(event->dev));
+ 		return NULL;
+ 	}
++
++	frozen_bus = pcibios_find_pci_bus(frozen_dn);
+ 	location = of_get_property(frozen_dn, "ibm,loc-code", NULL);
+ 	location = location ? location : "unknown";
+ 
+@@ -354,13 +354,6 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
+ 	if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
+ 		goto excess_failures;
+ 
+-	/* Get the current PCI slot state. */
+-	rc = eeh_wait_for_slot_status (frozen_pdn, MAX_WAIT_FOR_RECOVERY*1000);
+-	if (rc < 0) {
+-		printk(KERN_WARNING "EEH: Permanent failure\n");
+-		goto hard_fail;
+-	}
+-
+ 	printk(KERN_WARNING
+ 	   "EEH: This PCI device has failed %d times in the last hour:\n",
+ 		frozen_pdn->eeh_freeze_count);
+@@ -376,6 +369,14 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
+ 	 */
+ 	pci_walk_bus(frozen_bus, eeh_report_error, &result);
+ 
++	/* Get the current PCI slot state. This can take a long time,
++	 * sometimes over 3 seconds for certain systems. */
++	rc = eeh_wait_for_slot_status (frozen_pdn, MAX_WAIT_FOR_RECOVERY*1000);
++	if (rc < 0) {
++		printk(KERN_WARNING "EEH: Permanent failure\n");
++		goto hard_fail;
++	}
++
+ 	/* Since rtas may enable MMIO when posting the error log,
+ 	 * don't post the error log until after all dev drivers
+ 	 * have been informed.
+diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+index 412e6b4..c4ad54e 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+@@ -153,7 +153,7 @@ static int pseries_add_processor(struct device_node *np)
+ 	for (i = 0; i < nthreads; i++)
+ 		cpu_set(i, tmp);
+ 
+-	lock_cpu_hotplug();
++	cpu_maps_update_begin();
+ 
+ 	BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+ 
+@@ -190,7 +190,7 @@ static int pseries_add_processor(struct device_node *np)
+ 	}
+ 	err = 0;
+ out_unlock:
+-	unlock_cpu_hotplug();
++	cpu_maps_update_done();
+ 	return err;
+ }
+ 
+@@ -211,7 +211,7 @@ static void pseries_remove_processor(struct device_node *np)
+ 
+ 	nthreads = len / sizeof(u32);
+ 
+-	lock_cpu_hotplug();
++	cpu_maps_update_begin();
+ 	for (i = 0; i < nthreads; i++) {
+ 		for_each_present_cpu(cpu) {
+ 			if (get_hard_smp_processor_id(cpu) != intserv[i])
+@@ -225,7 +225,7 @@ static void pseries_remove_processor(struct device_node *np)
+ 			printk(KERN_WARNING "Could not find cpu to remove "
+ 			       "with physical id 0x%x\n", intserv[i]);
+ 	}
+-	unlock_cpu_hotplug();
++	cpu_maps_update_done();
+ }
+ 
+ static int pseries_smp_notifier(struct notifier_block *nb,
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index be17d23..a65c763 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -251,7 +251,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
+ 	const unsigned long *basep;
+ 	const u32 *sizep;
+ 
+-	node = (struct device_node *)phb->arch_data;
++	node = phb->dn;
+ 
+ 	basep = of_get_property(node, "linux,tce-base", NULL);
+ 	sizep = of_get_property(node, "linux,tce-size", NULL);
+@@ -296,11 +296,12 @@ static void iommu_table_setparms(struct pci_controller *phb,
+ static void iommu_table_setparms_lpar(struct pci_controller *phb,
+ 				      struct device_node *dn,
+ 				      struct iommu_table *tbl,
+-				      const void *dma_window)
++				      const void *dma_window,
++				      int bussubno)
+ {
+ 	unsigned long offset, size;
+ 
+-	tbl->it_busno  = PCI_DN(dn)->bussubno;
++	tbl->it_busno  = bussubno;
+ 	of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
+ 
+ 	tbl->it_base   = 0;
+@@ -420,17 +421,10 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
+ 	    pdn->full_name, ppci->iommu_table);
+ 
+ 	if (!ppci->iommu_table) {
+-		/* Bussubno hasn't been copied yet.
+-		 * Do it now because iommu_table_setparms_lpar needs it.
+-		 */
+-
+-		ppci->bussubno = bus->number;
+-
+ 		tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ 				   ppci->phb->node);
+-
+-		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
+-
++		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
++			bus->number);
+ 		ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
+ 		DBG("  created table: %p\n", ppci->iommu_table);
+ 	}
+@@ -523,14 +517,10 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
+ 
+ 	pci = PCI_DN(pdn);
+ 	if (!pci->iommu_table) {
+-		/* iommu_table_setparms_lpar needs bussubno. */
+-		pci->bussubno = pci->phb->bus->number;
+-
+ 		tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ 				   pci->phb->node);
+-
+-		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
+-
++		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
++			pci->phb->bus->number);
+ 		pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
+ 		DBG("  created table: %p\n", pci->iommu_table);
+ 	} else {
+@@ -556,7 +546,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
+ 	case PSERIES_RECONFIG_REMOVE:
+ 		if (pci && pci->iommu_table &&
+ 		    of_get_property(np, "ibm,dma-window", NULL))
+-			iommu_free_table(np);
++			iommu_free_table(pci->iommu_table, np->full_name);
+ 		break;
+ 	default:
+ 		err = NOTIFY_DONE;
+diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
+index 47f0e08..5a5a19e 100644
+--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
+@@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
+ 
+ /* Must be called before pci_bus_add_devices */
+ void
+-pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
++pcibios_fixup_new_pci_devices(struct pci_bus *bus)
+ {
+ 	struct pci_dev *dev;
+ 
+@@ -98,8 +98,6 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
+ 			/* Fill device archdata and setup iommu table */
+ 			pcibios_setup_new_device(dev);
+ 
+-			if(fix_bus)
+-				pcibios_fixup_device_resources(dev, bus);
+ 			pci_read_irq_line(dev);
+ 			for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ 				struct resource *r = &dev->resource[i];
+@@ -132,8 +130,8 @@ pcibios_pci_config_bridge(struct pci_dev *dev)
+ 
+ 	pci_scan_child_bus(child_bus);
+ 
+-	/* Fixup new pci devices without touching bus struct */
+-	pcibios_fixup_new_pci_devices(child_bus, 0);
++	/* Fixup new pci devices */
++	pcibios_fixup_new_pci_devices(child_bus);
+ 
+ 	/* Make the discovered devices available */
+ 	pci_bus_add_devices(child_bus);
+@@ -169,7 +167,7 @@ pcibios_add_pci_devices(struct pci_bus * bus)
+ 		/* use ofdt-based probe */
+ 		of_scan_bus(dn, bus);
+ 		if (!list_empty(&bus->devices)) {
+-			pcibios_fixup_new_pci_devices(bus, 0);
++			pcibios_fixup_new_pci_devices(bus);
+ 			pci_bus_add_devices(bus);
+ 			eeh_add_device_tree_late(bus);
+ 		}
+@@ -178,7 +176,7 @@ pcibios_add_pci_devices(struct pci_bus * bus)
+ 		slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
+ 		num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
+ 		if (num) {
+-			pcibios_fixup_new_pci_devices(bus, 1);
++			pcibios_fixup_new_pci_devices(bus);
+ 			pci_bus_add_devices(bus);
+ 			eeh_add_device_tree_late(bus);
+ 		}
+@@ -208,7 +206,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
+ 		eeh_add_device_tree_early(dn);
+ 
+ 	scan_phb(phb);
+-	pcibios_fixup_new_pci_devices(phb->bus, 0);
++	pcibios_fixup_new_pci_devices(phb->bus);
+ 	pci_bus_add_devices(phb->bus);
+ 	eeh_add_device_tree_late(phb->bus);
+ 
+diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
+index d003c80..d8680b5 100644
+--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
++++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
+@@ -8,11 +8,6 @@ static inline long poll_pending(void)
+ 	return plpar_hcall_norets(H_POLL_PENDING);
+ }
+ 
+-static inline long prod_processor(void)
+-{
+-	return plpar_hcall_norets(H_PROD);
+-}
+-
+ static inline long cede_processor(void)
+ {
+ 	return plpar_hcall_norets(H_CEDE);
+diff --git a/arch/powerpc/platforms/pseries/power.c b/arch/powerpc/platforms/pseries/power.c
+index 73e6902..e95fc15 100644
+--- a/arch/powerpc/platforms/pseries/power.c
++++ b/arch/powerpc/platforms/pseries/power.c
+@@ -28,13 +28,15 @@
+ 
+ unsigned long rtas_poweron_auto; /* default and normal state is 0 */
+ 
+-static ssize_t auto_poweron_show(struct kset *kset, char *buf)
++static ssize_t auto_poweron_show(struct kobject *kobj,
++				 struct kobj_attribute *attr, char *buf)
+ {
+         return sprintf(buf, "%lu\n", rtas_poweron_auto);
+ }
+ 
+-static ssize_t
+-auto_poweron_store(struct kset *kset, const char *buf, size_t n)
++static ssize_t auto_poweron_store(struct kobject *kobj,
++				  struct kobj_attribute *attr,
++				  const char *buf, size_t n)
+ {
+ 	int ret;
+ 	unsigned long ups_restart;
+@@ -47,17 +49,11 @@ auto_poweron_store(struct kset *kset, const char *buf, size_t n)
+ 	return -EINVAL;
+ }
+ 
+-static struct subsys_attribute auto_poweron_attr = {
+-        .attr   = {
+-                .name = __stringify(auto_poweron),
+-                .mode = 0644,
+-        },
+-        .show   = auto_poweron_show,
+-        .store  = auto_poweron_store,
+-};
++static struct kobj_attribute auto_poweron_attr =
++	__ATTR(auto_poweron, 0644, auto_poweron_show, auto_poweron_store);
+ 
+ #ifndef CONFIG_PM
+-decl_subsys(power,NULL,NULL);
++struct kobject *power_kobj;
+ 
+ static struct attribute *g[] = {
+         &auto_poweron_attr.attr,
+@@ -70,18 +66,16 @@ static struct attribute_group attr_group = {
+ 
+ static int __init pm_init(void)
+ {
+-        int error = subsystem_register(&power_subsys);
+-        if (!error)
+-                error = sysfs_create_group(&power_subsys.kobj, &attr_group);
+-        return error;
++	power_kobj = kobject_create_and_add("power", NULL);
++	if (!power_kobj)
++		return -ENOMEM;
++	return sysfs_create_group(power_kobj, &attr_group);
+ }
+ core_initcall(pm_init);
+ #else
+-extern struct kset power_subsys;
+-
+ static int __init apo_pm_init(void)
+ {
+-	return (subsys_create_file(&power_subsys, &auto_poweron_attr));
++	return (sysfs_create_file(power_kobj, &auto_poweron_attr));
+ }
+ __initcall(apo_pm_init);
+ #endif
+diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
+index 73401c8..e3078ce 100644
+--- a/arch/powerpc/platforms/pseries/rtasd.c
++++ b/arch/powerpc/platforms/pseries/rtasd.c
+@@ -382,7 +382,7 @@ static void do_event_scan_all_cpus(long delay)
+ {
+ 	int cpu;
+ 
+-	lock_cpu_hotplug();
++	get_online_cpus();
+ 	cpu = first_cpu(cpu_online_map);
+ 	for (;;) {
+ 		set_cpus_allowed(current, cpumask_of_cpu(cpu));
+@@ -390,15 +390,15 @@ static void do_event_scan_all_cpus(long delay)
+ 		set_cpus_allowed(current, CPU_MASK_ALL);
+ 
+ 		/* Drop hotplug lock, and sleep for the specified delay */
+-		unlock_cpu_hotplug();
++		put_online_cpus();
+ 		msleep_interruptible(delay);
+-		lock_cpu_hotplug();
++		get_online_cpus();
+ 
+ 		cpu = next_cpu(cpu, cpu_online_map);
+ 		if (cpu == NR_CPUS)
+ 			break;
+ 	}
+-	unlock_cpu_hotplug();
++	put_online_cpus();
+ }
+ 
+ static int rtasd(void *unused)
+diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
+index 116305b..ea4c659 100644
+--- a/arch/powerpc/platforms/pseries/smp.c
++++ b/arch/powerpc/platforms/pseries/smp.c
+@@ -46,6 +46,7 @@
+ #include <asm/pSeries_reconfig.h>
+ #include <asm/mpic.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/cputhreads.h>
+ 
+ #include "plpar_wrappers.h"
+ #include "pseries.h"
+@@ -202,7 +203,7 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
+ 	 */
+ 	if (system_state < SYSTEM_RUNNING &&
+ 	    cpu_has_feature(CPU_FTR_SMT) &&
+-	    !smt_enabled_at_boot && nr % 2 != 0)
++	    !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+ 		return 0;
+ 
+ 	return 1;
+diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
+index 66e7d68..8f8dd9c 100644
+--- a/arch/powerpc/platforms/pseries/xics.c
++++ b/arch/powerpc/platforms/pseries/xics.c
+@@ -87,19 +87,25 @@ static int ibm_int_off;
+ /* Direct HW low level accessors */
+ 
+ 
+-static inline unsigned int direct_xirr_info_get(int n_cpu)
++static inline unsigned int direct_xirr_info_get(void)
+ {
+-	return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
++	int cpu = smp_processor_id();
++
++	return in_be32(&xics_per_cpu[cpu]->xirr.word);
+ }
+ 
+-static inline void direct_xirr_info_set(int n_cpu, int value)
++static inline void direct_xirr_info_set(int value)
+ {
+-	out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
++	int cpu = smp_processor_id();
++
++	out_be32(&xics_per_cpu[cpu]->xirr.word, value);
+ }
+ 
+-static inline void direct_cppr_info(int n_cpu, u8 value)
++static inline void direct_cppr_info(u8 value)
+ {
+-	out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
++	int cpu = smp_processor_id();
++
++	out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value);
+ }
+ 
+ static inline void direct_qirr_info(int n_cpu, u8 value)
+@@ -111,7 +117,7 @@ static inline void direct_qirr_info(int n_cpu, u8 value)
+ /* LPAR low level accessors */
+ 
+ 
+-static inline unsigned int lpar_xirr_info_get(int n_cpu)
++static inline unsigned int lpar_xirr_info_get(void)
+ {
+ 	unsigned long lpar_rc;
+ 	unsigned long return_value;
+@@ -122,7 +128,7 @@ static inline unsigned int lpar_xirr_info_get(int n_cpu)
+ 	return (unsigned int)return_value;
+ }
+ 
+-static inline void lpar_xirr_info_set(int n_cpu, int value)
++static inline void lpar_xirr_info_set(int value)
+ {
+ 	unsigned long lpar_rc;
+ 	unsigned long val64 = value & 0xffffffff;
+@@ -133,7 +139,7 @@ static inline void lpar_xirr_info_set(int n_cpu, int value)
+ 		      val64);
+ }
+ 
+-static inline void lpar_cppr_info(int n_cpu, u8 value)
++static inline void lpar_cppr_info(u8 value)
+ {
+ 	unsigned long lpar_rc;
+ 
+@@ -275,21 +281,19 @@ static unsigned int xics_startup(unsigned int virq)
+ 
+ static void xics_eoi_direct(unsigned int virq)
+ {
+-	int cpu = smp_processor_id();
+ 	unsigned int irq = (unsigned int)irq_map[virq].hwirq;
+ 
+ 	iosync();
+-	direct_xirr_info_set(cpu, (0xff << 24) | irq);
++	direct_xirr_info_set((0xff << 24) | irq);
+ }
+ 
+ 
+ static void xics_eoi_lpar(unsigned int virq)
+ {
+-	int cpu = smp_processor_id();
+ 	unsigned int irq = (unsigned int)irq_map[virq].hwirq;
+ 
+ 	iosync();
+-	lpar_xirr_info_set(cpu, (0xff << 24) | irq);
++	lpar_xirr_info_set((0xff << 24) | irq);
+ }
+ 
+ static inline unsigned int xics_remap_irq(unsigned int vec)
+@@ -312,16 +316,12 @@ static inline unsigned int xics_remap_irq(unsigned int vec)
+ 
+ static unsigned int xics_get_irq_direct(void)
+ {
+-	unsigned int cpu = smp_processor_id();
+-
+-	return xics_remap_irq(direct_xirr_info_get(cpu));
++	return xics_remap_irq(direct_xirr_info_get());
+ }
+ 
+ static unsigned int xics_get_irq_lpar(void)
+ {
+-	unsigned int cpu = smp_processor_id();
+-
+-	return xics_remap_irq(lpar_xirr_info_get(cpu));
++	return xics_remap_irq(lpar_xirr_info_get());
+ }
+ 
+ #ifdef CONFIG_SMP
+@@ -387,12 +387,12 @@ void xics_cause_IPI(int cpu)
+ 
+ #endif /* CONFIG_SMP */
+ 
+-static void xics_set_cpu_priority(int cpu, unsigned char cppr)
++static void xics_set_cpu_priority(unsigned char cppr)
+ {
+ 	if (firmware_has_feature(FW_FEATURE_LPAR))
+-		lpar_cppr_info(cpu, cppr);
++		lpar_cppr_info(cppr);
+ 	else
+-		direct_cppr_info(cpu, cppr);
++		direct_cppr_info(cppr);
+ 	iosync();
+ }
+ 
+@@ -440,9 +440,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
+ 
+ void xics_setup_cpu(void)
+ {
+-	int cpu = smp_processor_id();
+-
+-	xics_set_cpu_priority(cpu, 0xff);
++	xics_set_cpu_priority(0xff);
+ 
+ 	/*
+ 	 * Put the calling processor into the GIQ.  This is really only
+@@ -783,7 +781,7 @@ void xics_teardown_cpu(int secondary)
+ 	unsigned int ipi;
+ 	struct irq_desc *desc;
+ 
+-	xics_set_cpu_priority(cpu, 0);
++	xics_set_cpu_priority(0);
+ 
+ 	/*
+ 	 * Clear IPI
+@@ -824,10 +822,11 @@ void xics_teardown_cpu(int secondary)
+ void xics_migrate_irqs_away(void)
+ {
+ 	int status;
+-	unsigned int irq, virq, cpu = smp_processor_id();
++	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
++	unsigned int irq, virq;
+ 
+ 	/* Reject any interrupt that was queued to us... */
+-	xics_set_cpu_priority(cpu, 0);
++	xics_set_cpu_priority(0);
+ 
+ 	/* remove ourselves from the global interrupt queue */
+ 	status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
+@@ -835,7 +834,7 @@ void xics_migrate_irqs_away(void)
+ 	WARN_ON(status < 0);
+ 
+ 	/* Allow IPIs again... */
+-	xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
++	xics_set_cpu_priority(DEFAULT_PRIORITY);
+ 
+ 	for_each_irq(virq) {
+ 		struct irq_desc *desc;
+@@ -874,7 +873,7 @@ void xics_migrate_irqs_away(void)
+ 		 * The irq has to be migrated only in the single cpu
+ 		 * case.
+ 		 */
+-		if (xics_status[0] != get_hard_smp_processor_id(cpu))
++		if (xics_status[0] != hw_cpu)
+ 			goto unlock;
+ 
+ 		printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
+diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h
+index db0ec3b..9ffd809 100644
+--- a/arch/powerpc/platforms/pseries/xics.h
++++ b/arch/powerpc/platforms/pseries/xics.h
+@@ -21,9 +21,6 @@ extern void xics_cause_IPI(int cpu);
+ extern  void xics_request_IPIs(void);
+ extern void xics_migrate_irqs_away(void);
+ 
+-/* first argument is ignored for now*/
+-void pSeriesLP_cppr_info(int n_cpu, u8 value);
+-
+ struct xics_ipi_struct {
+ 	volatile unsigned long value;
+ } ____cacheline_aligned;
+diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
+new file mode 100644
+index 0000000..72fb35b
+--- /dev/null
++++ b/arch/powerpc/sysdev/Kconfig
+@@ -0,0 +1,8 @@
++# For a description of the syntax of this configuration file,
++# see Documentation/kbuild/kconfig-language.txt.
++#
++
++config PPC4xx_PCI_EXPRESS
++	bool
++	depends on PCI && 4xx
++	default n
+diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
+index 99a77d7..15f3e85 100644
+--- a/arch/powerpc/sysdev/Makefile
++++ b/arch/powerpc/sysdev/Makefile
+@@ -2,7 +2,7 @@ ifeq ($(CONFIG_PPC64),y)
+ EXTRA_CFLAGS			+= -mno-minimal-toc
+ endif
+ 
+-mpic-msi-obj-$(CONFIG_PCI_MSI)	+= mpic_msi.o mpic_u3msi.o
++mpic-msi-obj-$(CONFIG_PCI_MSI)	+= mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o
+ obj-$(CONFIG_MPIC)		+= mpic.o $(mpic-msi-obj-y)
+ 
+ obj-$(CONFIG_PPC_MPC106)	+= grackle.o
+@@ -12,6 +12,7 @@ obj-$(CONFIG_U3_DART)		+= dart_iommu.o
+ obj-$(CONFIG_MMIO_NVRAM)	+= mmio_nvram.o
+ obj-$(CONFIG_FSL_SOC)		+= fsl_soc.o
+ obj-$(CONFIG_FSL_PCI)		+= fsl_pci.o
++obj-$(CONFIG_RAPIDIO)		+= fsl_rio.o
+ obj-$(CONFIG_TSI108_BRIDGE)	+= tsi108_pci.o tsi108_dev.o
+ obj-$(CONFIG_QUICC_ENGINE)	+= qe_lib/
+ obj-$(CONFIG_PPC_BESTCOMM)	+= bestcomm/
+@@ -24,16 +25,20 @@ obj-$(CONFIG_AXON_RAM)		+= axonram.o
+ ifeq ($(CONFIG_PPC_MERGE),y)
+ obj-$(CONFIG_PPC_INDIRECT_PCI)	+= indirect_pci.o
+ obj-$(CONFIG_PPC_I8259)		+= i8259.o
+-obj-$(CONFIG_PPC_83xx)		+= ipic.o
++obj-$(CONFIG_IPIC)		+= ipic.o
+ obj-$(CONFIG_4xx)		+= uic.o
+ obj-$(CONFIG_XILINX_VIRTEX)	+= xilinx_intc.o
++obj-$(CONFIG_OF_RTC)		+= of_rtc.o
++ifeq ($(CONFIG_PCI),y)
++obj-$(CONFIG_4xx)		+= ppc4xx_pci.o
++endif
+ endif
+ 
+ # Temporary hack until we have migrated to asm-powerpc
+ ifeq ($(ARCH),powerpc)
+ obj-$(CONFIG_CPM)		+= cpm_common.o
+-obj-$(CONFIG_CPM2)		+= cpm2_common.o cpm2_pic.o
++obj-$(CONFIG_CPM2)		+= cpm2.o cpm2_pic.o
+ obj-$(CONFIG_PPC_DCR)		+= dcr.o
+-obj-$(CONFIG_8xx)		+= mpc8xx_pic.o commproc.o
++obj-$(CONFIG_8xx)		+= mpc8xx_pic.o cpm1.o
+ obj-$(CONFIG_UCODE_PATCH)	+= micropatch.o
+ endif
+diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
+index 5eaf3e3..d359d6e 100644
+--- a/arch/powerpc/sysdev/axonram.c
++++ b/arch/powerpc/sysdev/axonram.c
+@@ -42,8 +42,9 @@
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+-#include <asm/of_device.h>
+-#include <asm/of_platform.h>
++#include <linux/of_device.h>
++#include <linux/of_platform.h>
++
+ #include <asm/page.h>
+ #include <asm/prom.h>
+ 
+diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
+index 740ad73..f589999 100644
+--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
++++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
+@@ -29,11 +29,17 @@
+ 
+ #define DRIVER_NAME "bestcomm-core"
+ 
++/* MPC5200 device tree match tables */
++static struct of_device_id mpc52xx_sram_ids[] __devinitdata = {
++	{ .compatible = "fsl,mpc5200-sram", },
++	{ .compatible = "mpc5200-sram", },
++	{}
++};
++
+ 
+ struct bcom_engine *bcom_eng = NULL;
+ EXPORT_SYMBOL_GPL(bcom_eng);	/* needed for inline functions */
+ 
+-
+ /* ======================================================================== */
+ /* Public and private API                                                   */
+ /* ======================================================================== */
+@@ -373,7 +379,7 @@ mpc52xx_bcom_probe(struct of_device *op, const struct of_device_id *match)
+ 	of_node_get(op->node);
+ 
+ 	/* Prepare SRAM */
+-	ofn_sram = of_find_compatible_node(NULL, "sram", "mpc5200-sram");
++	ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
+ 	if (!ofn_sram) {
+ 		printk(KERN_ERR DRIVER_NAME ": "
+ 			"No SRAM found in device tree\n");
+@@ -478,10 +484,8 @@ mpc52xx_bcom_remove(struct of_device *op)
+ }
+ 
+ static struct of_device_id mpc52xx_bcom_of_match[] = {
+-	{
+-		.type		= "dma-controller",
+-		.compatible	= "mpc5200-bestcomm",
+-	},
++	{ .type = "dma-controller", .compatible = "fsl,mpc5200-bestcomm", },
++	{ .type = "dma-controller", .compatible = "mpc5200-bestcomm", },
+ 	{},
+ };
+ 
+diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h
+index e802cb4..c960a8b 100644
+--- a/arch/powerpc/sysdev/bestcomm/bestcomm.h
++++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h
+@@ -20,7 +20,7 @@ struct bcom_bd; /* defined later on ... */
+ 
+ 
+ /* ======================================================================== */
+-/* Generic task managment                                                   */
++/* Generic task management                                                   */
+ /* ======================================================================== */
+ 
+ /**
+diff --git a/arch/powerpc/sysdev/commproc.c b/arch/powerpc/sysdev/commproc.c
+deleted file mode 100644
+index f6a6378..0000000
+--- a/arch/powerpc/sysdev/commproc.c
++++ /dev/null
+@@ -1,609 +0,0 @@
+-/*
+- * General Purpose functions for the global management of the
+- * Communication Processor Module.
+- * Copyright (c) 1997 Dan error_act (dmalek at jlc.net)
+- *
+- * In addition to the individual control of the communication
+- * channels, there are a few functions that globally affect the
+- * communication processor.
+- *
+- * Buffer descriptors must be allocated from the dual ported memory
+- * space.  The allocator for that is here.  When the communication
+- * process is reset, we reclaim the memory available.  There is
+- * currently no deallocator for this memory.
+- * The amount of space available is platform dependent.  On the
+- * MBX, the EPPC software loads additional microcode into the
+- * communication processor, and uses some of the DP ram for this
+- * purpose.  Current, the first 512 bytes and the last 256 bytes of
+- * memory are used.  Right now I am conservative and only use the
+- * memory that can never be used for microcode.  If there are
+- * applications that require more DP ram, we can expand the boundaries
+- * but then we have to be careful of any downloaded microcode.
+- */
+-#include <linux/errno.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/param.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
+-#include <linux/interrupt.h>
+-#include <linux/irq.h>
+-#include <linux/module.h>
+-#include <asm/mpc8xx.h>
+-#include <asm/page.h>
+-#include <asm/pgtable.h>
+-#include <asm/8xx_immap.h>
+-#include <asm/commproc.h>
+-#include <asm/io.h>
+-#include <asm/tlbflush.h>
+-#include <asm/rheap.h>
+-#include <asm/prom.h>
+-#include <asm/cpm.h>
+-
+-#include <asm/fs_pd.h>
+-
+-#define CPM_MAP_SIZE    (0x4000)
+-
+-#ifndef CONFIG_PPC_CPM_NEW_BINDING
+-static void m8xx_cpm_dpinit(void);
+-#endif
+-static uint host_buffer; /* One page of host buffer */
+-static uint host_end;    /* end + 1 */
+-cpm8xx_t __iomem *cpmp;  /* Pointer to comm processor space */
+-immap_t __iomem *mpc8xx_immr;
+-static cpic8xx_t __iomem *cpic_reg;
+-
+-static struct irq_host *cpm_pic_host;
+-
+-static void cpm_mask_irq(unsigned int irq)
+-{
+-	unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
+-
+-	clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
+-}
+-
+-static void cpm_unmask_irq(unsigned int irq)
+-{
+-	unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
+-
+-	setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
+-}
+-
+-static void cpm_end_irq(unsigned int irq)
+-{
+-	unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
+-
+-	out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
+-}
+-
+-static struct irq_chip cpm_pic = {
+-	.typename = " CPM PIC ",
+-	.mask = cpm_mask_irq,
+-	.unmask = cpm_unmask_irq,
+-	.eoi = cpm_end_irq,
+-};
+-
+-int cpm_get_irq(void)
+-{
+-	int cpm_vec;
+-
+-	/* Get the vector by setting the ACK bit and then reading
+-	 * the register.
+-	 */
+-	out_be16(&cpic_reg->cpic_civr, 1);
+-	cpm_vec = in_be16(&cpic_reg->cpic_civr);
+-	cpm_vec >>= 11;
+-
+-	return irq_linear_revmap(cpm_pic_host, cpm_vec);
+-}
+-
+-static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
+-			  irq_hw_number_t hw)
+-{
+-	pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
+-
+-	get_irq_desc(virq)->status |= IRQ_LEVEL;
+-	set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
+-	return 0;
+-}
+-
+-/* The CPM can generate the error interrupt when there is a race condition
+- * between generating and masking interrupts.  All we have to do is ACK it
+- * and return.  This is a no-op function so we don't need any special
+- * tests in the interrupt handler.
+- */
+-static irqreturn_t cpm_error_interrupt(int irq, void *dev)
+-{
+-	return IRQ_HANDLED;
+-}
+-
+-static struct irqaction cpm_error_irqaction = {
+-	.handler = cpm_error_interrupt,
+-	.mask = CPU_MASK_NONE,
+-	.name = "error",
+-};
+-
+-static struct irq_host_ops cpm_pic_host_ops = {
+-	.map = cpm_pic_host_map,
+-};
+-
+-unsigned int cpm_pic_init(void)
+-{
+-	struct device_node *np = NULL;
+-	struct resource res;
+-	unsigned int sirq = NO_IRQ, hwirq, eirq;
+-	int ret;
+-
+-	pr_debug("cpm_pic_init\n");
+-
+-	np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic");
+-	if (np == NULL)
+-		np = of_find_compatible_node(NULL, "cpm-pic", "CPM");
+-	if (np == NULL) {
+-		printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n");
+-		return sirq;
+-	}
+-
+-	ret = of_address_to_resource(np, 0, &res);
+-	if (ret)
+-		goto end;
+-
+-	cpic_reg = ioremap(res.start, res.end - res.start + 1);
+-	if (cpic_reg == NULL)
+-		goto end;
+-
+-	sirq = irq_of_parse_and_map(np, 0);
+-	if (sirq == NO_IRQ)
+-		goto end;
+-
+-	/* Initialize the CPM interrupt controller. */
+-	hwirq = (unsigned int)irq_map[sirq].hwirq;
+-	out_be32(&cpic_reg->cpic_cicr,
+-	    (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
+-		((hwirq/2) << 13) | CICR_HP_MASK);
+-
+-	out_be32(&cpic_reg->cpic_cimr, 0);
+-
+-	cpm_pic_host = irq_alloc_host(of_node_get(np), IRQ_HOST_MAP_LINEAR,
+-				      64, &cpm_pic_host_ops, 64);
+-	if (cpm_pic_host == NULL) {
+-		printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
+-		sirq = NO_IRQ;
+-		goto end;
+-	}
+-
+-	/* Install our own error handler. */
+-	np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
+-	if (np == NULL)
+-		np = of_find_node_by_type(NULL, "cpm");
+-	if (np == NULL) {
+-		printk(KERN_ERR "CPM PIC init: can not find cpm node\n");
+-		goto end;
+-	}
+-
+-	eirq = irq_of_parse_and_map(np, 0);
+-	if (eirq == NO_IRQ)
+-		goto end;
+-
+-	if (setup_irq(eirq, &cpm_error_irqaction))
+-		printk(KERN_ERR "Could not allocate CPM error IRQ!");
+-
+-	setbits32(&cpic_reg->cpic_cicr, CICR_IEN);
+-
+-end:
+-	of_node_put(np);
+-	return sirq;
+-}
+-
+-void __init cpm_reset(void)
+-{
+-	sysconf8xx_t __iomem *siu_conf;
+-
+-	mpc8xx_immr = ioremap(get_immrbase(), 0x4000);
+-	if (!mpc8xx_immr) {
+-		printk(KERN_CRIT "Could not map IMMR\n");
+-		return;
+-	}
+-
+-	cpmp = &mpc8xx_immr->im_cpm;
+-
+-#ifndef CONFIG_PPC_EARLY_DEBUG_CPM
+-	/* Perform a reset.
+-	*/
+-	out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG);
+-
+-	/* Wait for it.
+-	*/
+-	while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG);
+-#endif
+-
+-#ifdef CONFIG_UCODE_PATCH
+-	cpm_load_patch(cpmp);
+-#endif
+-
+-	/* Set SDMA Bus Request priority 5.
+-	 * On 860T, this also enables FEC priority 6.  I am not sure
+-	 * this is what we realy want for some applications, but the
+-	 * manual recommends it.
+-	 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
+-	 */
+-	siu_conf = immr_map(im_siu_conf);
+-	out_be32(&siu_conf->sc_sdcr, 1);
+-	immr_unmap(siu_conf);
+-
+-#ifdef CONFIG_PPC_CPM_NEW_BINDING
+-	cpm_muram_init();
+-#else
+-	/* Reclaim the DP memory for our use. */
+-	m8xx_cpm_dpinit();
+-#endif
+-}
+-
+-/* We used to do this earlier, but have to postpone as long as possible
+- * to ensure the kernel VM is now running.
+- */
+-static void
+-alloc_host_memory(void)
+-{
+-	dma_addr_t	physaddr;
+-
+-	/* Set the host page for allocation.
+-	*/
+-	host_buffer = (uint)dma_alloc_coherent(NULL, PAGE_SIZE, &physaddr,
+-			GFP_KERNEL);
+-	host_end = host_buffer + PAGE_SIZE;
+-}
+-
+-/* We also own one page of host buffer space for the allocation of
+- * UART "fifos" and the like.
+- */
+-uint
+-m8xx_cpm_hostalloc(uint size)
+-{
+-	uint	retloc;
+-
+-	if (host_buffer == 0)
+-		alloc_host_memory();
+-
+-	if ((host_buffer + size) >= host_end)
+-		return(0);
+-
+-	retloc = host_buffer;
+-	host_buffer += size;
+-
+-	return(retloc);
+-}
+-
+-/* Set a baud rate generator.  This needs lots of work.  There are
+- * four BRGs, any of which can be wired to any channel.
+- * The internal baud rate clock is the system clock divided by 16.
+- * This assumes the baudrate is 16x oversampled by the uart.
+- */
+-#define BRG_INT_CLK		(get_brgfreq())
+-#define BRG_UART_CLK		(BRG_INT_CLK/16)
+-#define BRG_UART_CLK_DIV16	(BRG_UART_CLK/16)
+-
+-void
+-cpm_setbrg(uint brg, uint rate)
+-{
+-	u32 __iomem *bp;
+-
+-	/* This is good enough to get SMCs running.....
+-	*/
+-	bp = &cpmp->cp_brgc1;
+-	bp += brg;
+-	/* The BRG has a 12-bit counter.  For really slow baud rates (or
+-	 * really fast processors), we may have to further divide by 16.
+-	 */
+-	if (((BRG_UART_CLK / rate) - 1) < 4096)
+-		out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
+-	else
+-		out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
+-		             CPM_BRG_EN | CPM_BRG_DIV16);
+-}
+-
+-#ifndef CONFIG_PPC_CPM_NEW_BINDING
+-/*
+- * dpalloc / dpfree bits.
+- */
+-static spinlock_t cpm_dpmem_lock;
+-/*
+- * 16 blocks should be enough to satisfy all requests
+- * until the memory subsystem goes up...
+- */
+-static rh_block_t cpm_boot_dpmem_rh_block[16];
+-static rh_info_t cpm_dpmem_info;
+-
+-#define CPM_DPMEM_ALIGNMENT	8
+-static u8 __iomem *dpram_vbase;
+-static phys_addr_t dpram_pbase;
+-
+-static void m8xx_cpm_dpinit(void)
+-{
+-	spin_lock_init(&cpm_dpmem_lock);
+-
+-	dpram_vbase = cpmp->cp_dpmem;
+-	dpram_pbase = get_immrbase() + offsetof(immap_t, im_cpm.cp_dpmem);
+-
+-	/* Initialize the info header */
+-	rh_init(&cpm_dpmem_info, CPM_DPMEM_ALIGNMENT,
+-			sizeof(cpm_boot_dpmem_rh_block) /
+-			sizeof(cpm_boot_dpmem_rh_block[0]),
+-			cpm_boot_dpmem_rh_block);
+-
+-	/*
+-	 * Attach the usable dpmem area.
+-	 * XXX: This is actually crap.  CPM_DATAONLY_BASE and
+-	 * CPM_DATAONLY_SIZE are a subset of the available dparm.  It varies
+-	 * with the processor and the microcode patches applied / activated.
+-	 * But the following should be at least safe.
+-	 */
+-	rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
+-}
+-
+-/*
+- * Allocate the requested size worth of DP memory.
+- * This function returns an offset into the DPRAM area.
+- * Use cpm_dpram_addr() to get the virtual address of the area.
+- */
+-unsigned long cpm_dpalloc(uint size, uint align)
+-{
+-	unsigned long start;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cpm_dpmem_lock, flags);
+-	cpm_dpmem_info.alignment = align;
+-	start = rh_alloc(&cpm_dpmem_info, size, "commproc");
+-	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
+-
+-	return (uint)start;
+-}
+-EXPORT_SYMBOL(cpm_dpalloc);
+-
+-int cpm_dpfree(unsigned long offset)
+-{
+-	int ret;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cpm_dpmem_lock, flags);
+-	ret = rh_free(&cpm_dpmem_info, offset);
+-	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
+-
+-	return ret;
+-}
+-EXPORT_SYMBOL(cpm_dpfree);
+-
+-unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
+-{
+-	unsigned long start;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cpm_dpmem_lock, flags);
+-	cpm_dpmem_info.alignment = align;
+-	start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
+-	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
+-
+-	return start;
+-}
+-EXPORT_SYMBOL(cpm_dpalloc_fixed);
+-
+-void cpm_dpdump(void)
+-{
+-	rh_dump(&cpm_dpmem_info);
+-}
+-EXPORT_SYMBOL(cpm_dpdump);
+-
+-void *cpm_dpram_addr(unsigned long offset)
+-{
+-	return (void *)(dpram_vbase + offset);
+-}
+-EXPORT_SYMBOL(cpm_dpram_addr);
+-
+-uint cpm_dpram_phys(u8 *addr)
+-{
+-	return (dpram_pbase + (uint)(addr - dpram_vbase));
+-}
+-EXPORT_SYMBOL(cpm_dpram_phys);
+-#endif /* !CONFIG_PPC_CPM_NEW_BINDING */
+-
+-struct cpm_ioport16 {
+-	__be16 dir, par, sor, dat, intr;
+-	__be16 res[3];
+-};
+-
+-struct cpm_ioport32 {
+-	__be32 dir, par, sor;
+-};
+-
+-static void cpm1_set_pin32(int port, int pin, int flags)
+-{
+-	struct cpm_ioport32 __iomem *iop;
+-	pin = 1 << (31 - pin);
+-
+-	if (port == CPM_PORTB)
+-		iop = (struct cpm_ioport32 __iomem *)
+-		      &mpc8xx_immr->im_cpm.cp_pbdir;
+-	else
+-		iop = (struct cpm_ioport32 __iomem *)
+-		      &mpc8xx_immr->im_cpm.cp_pedir;
+-
+-	if (flags & CPM_PIN_OUTPUT)
+-		setbits32(&iop->dir, pin);
+-	else
+-		clrbits32(&iop->dir, pin);
+-
+-	if (!(flags & CPM_PIN_GPIO))
+-		setbits32(&iop->par, pin);
+-	else
+-		clrbits32(&iop->par, pin);
+-
+-	if (port == CPM_PORTE) {
+-		if (flags & CPM_PIN_SECONDARY)
+-			setbits32(&iop->sor, pin);
+-		else
+-			clrbits32(&iop->sor, pin);
+-
+-		if (flags & CPM_PIN_OPENDRAIN)
+-			setbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
+-		else
+-			clrbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
+-	}
+-}
+-
+-static void cpm1_set_pin16(int port, int pin, int flags)
+-{
+-	struct cpm_ioport16 __iomem *iop =
+-		(struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport;
+-
+-	pin = 1 << (15 - pin);
+-
+-	if (port != 0)
+-		iop += port - 1;
+-
+-	if (flags & CPM_PIN_OUTPUT)
+-		setbits16(&iop->dir, pin);
+-	else
+-		clrbits16(&iop->dir, pin);
+-
+-	if (!(flags & CPM_PIN_GPIO))
+-		setbits16(&iop->par, pin);
+-	else
+-		clrbits16(&iop->par, pin);
+-
+-	if (port == CPM_PORTC) {
+-		if (flags & CPM_PIN_SECONDARY)
+-			setbits16(&iop->sor, pin);
+-		else
+-			clrbits16(&iop->sor, pin);
+-	}
+-}
+-
+-void cpm1_set_pin(enum cpm_port port, int pin, int flags)
+-{
+-	if (port == CPM_PORTB || port == CPM_PORTE)
+-		cpm1_set_pin32(port, pin, flags);
+-	else
+-		cpm1_set_pin16(port, pin, flags);
+-}
+-
+-int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode)
+-{
+-	int shift;
+-	int i, bits = 0;
+-	u32 __iomem *reg;
+-	u32 mask = 7;
+-
+-	u8 clk_map[][3] = {
+-		{CPM_CLK_SCC1, CPM_BRG1, 0},
+-		{CPM_CLK_SCC1, CPM_BRG2, 1},
+-		{CPM_CLK_SCC1, CPM_BRG3, 2},
+-		{CPM_CLK_SCC1, CPM_BRG4, 3},
+-		{CPM_CLK_SCC1, CPM_CLK1, 4},
+-		{CPM_CLK_SCC1, CPM_CLK2, 5},
+-		{CPM_CLK_SCC1, CPM_CLK3, 6},
+-		{CPM_CLK_SCC1, CPM_CLK4, 7},
+-
+-		{CPM_CLK_SCC2, CPM_BRG1, 0},
+-		{CPM_CLK_SCC2, CPM_BRG2, 1},
+-		{CPM_CLK_SCC2, CPM_BRG3, 2},
+-		{CPM_CLK_SCC2, CPM_BRG4, 3},
+-		{CPM_CLK_SCC2, CPM_CLK1, 4},
+-		{CPM_CLK_SCC2, CPM_CLK2, 5},
+-		{CPM_CLK_SCC2, CPM_CLK3, 6},
+-		{CPM_CLK_SCC2, CPM_CLK4, 7},
+-
+-		{CPM_CLK_SCC3, CPM_BRG1, 0},
+-		{CPM_CLK_SCC3, CPM_BRG2, 1},
+-		{CPM_CLK_SCC3, CPM_BRG3, 2},
+-		{CPM_CLK_SCC3, CPM_BRG4, 3},
+-		{CPM_CLK_SCC3, CPM_CLK5, 4},
+-		{CPM_CLK_SCC3, CPM_CLK6, 5},
+-		{CPM_CLK_SCC3, CPM_CLK7, 6},
+-		{CPM_CLK_SCC3, CPM_CLK8, 7},
+-
+-		{CPM_CLK_SCC4, CPM_BRG1, 0},
+-		{CPM_CLK_SCC4, CPM_BRG2, 1},
+-		{CPM_CLK_SCC4, CPM_BRG3, 2},
+-		{CPM_CLK_SCC4, CPM_BRG4, 3},
+-		{CPM_CLK_SCC4, CPM_CLK5, 4},
+-		{CPM_CLK_SCC4, CPM_CLK6, 5},
+-		{CPM_CLK_SCC4, CPM_CLK7, 6},
+-		{CPM_CLK_SCC4, CPM_CLK8, 7},
+-
+-		{CPM_CLK_SMC1, CPM_BRG1, 0},
+-		{CPM_CLK_SMC1, CPM_BRG2, 1},
+-		{CPM_CLK_SMC1, CPM_BRG3, 2},
+-		{CPM_CLK_SMC1, CPM_BRG4, 3},
+-		{CPM_CLK_SMC1, CPM_CLK1, 4},
+-		{CPM_CLK_SMC1, CPM_CLK2, 5},
+-		{CPM_CLK_SMC1, CPM_CLK3, 6},
+-		{CPM_CLK_SMC1, CPM_CLK4, 7},
+-
+-		{CPM_CLK_SMC2, CPM_BRG1, 0},
+-		{CPM_CLK_SMC2, CPM_BRG2, 1},
+-		{CPM_CLK_SMC2, CPM_BRG3, 2},
+-		{CPM_CLK_SMC2, CPM_BRG4, 3},
+-		{CPM_CLK_SMC2, CPM_CLK5, 4},
+-		{CPM_CLK_SMC2, CPM_CLK6, 5},
+-		{CPM_CLK_SMC2, CPM_CLK7, 6},
+-		{CPM_CLK_SMC2, CPM_CLK8, 7},
+-	};
+-
+-	switch (target) {
+-	case CPM_CLK_SCC1:
+-		reg = &mpc8xx_immr->im_cpm.cp_sicr;
+-		shift = 0;
+-		break;
+-
+-	case CPM_CLK_SCC2:
+-		reg = &mpc8xx_immr->im_cpm.cp_sicr;
+-		shift = 8;
+-		break;
+-
+-	case CPM_CLK_SCC3:
+-		reg = &mpc8xx_immr->im_cpm.cp_sicr;
+-		shift = 16;
+-		break;
+-
+-	case CPM_CLK_SCC4:
+-		reg = &mpc8xx_immr->im_cpm.cp_sicr;
+-		shift = 24;
+-		break;
+-
+-	case CPM_CLK_SMC1:
+-		reg = &mpc8xx_immr->im_cpm.cp_simode;
+-		shift = 12;
+-		break;
+-
+-	case CPM_CLK_SMC2:
+-		reg = &mpc8xx_immr->im_cpm.cp_simode;
+-		shift = 28;
+-		break;
+-
+-	default:
+-		printk(KERN_ERR "cpm1_clock_setup: invalid clock target\n");
+-		return -EINVAL;
+-	}
+-
+-	if (reg == &mpc8xx_immr->im_cpm.cp_sicr && mode == CPM_CLK_RX)
+-		shift += 3;
+-
+-	for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
+-		if (clk_map[i][0] == target && clk_map[i][1] == clock) {
+-			bits = clk_map[i][2];
+-			break;
+-		}
+-	}
+-
+-	if (i == ARRAY_SIZE(clk_map)) {
+-		printk(KERN_ERR "cpm1_clock_setup: invalid clock combination\n");
+-		return -EINVAL;
+-	}
+-
+-	bits <<= shift;
+-	mask <<= shift;
+-	out_be32(reg, (in_be32(reg) & ~mask) | bits);
+-
+-	return 0;
+-}
+diff --git a/arch/powerpc/sysdev/commproc.h b/arch/powerpc/sysdev/commproc.h
+deleted file mode 100644
+index 9155ba4..0000000
+--- a/arch/powerpc/sysdev/commproc.h
++++ /dev/null
+@@ -1,12 +0,0 @@
+-#ifndef _POWERPC_SYSDEV_COMMPROC_H
+-#define _POWERPC_SYSDEV_COMMPROC_H
+-
+-extern void cpm_reset(void);
+-extern void mpc8xx_restart(char *cmd);
+-extern void mpc8xx_calibrate_decr(void);
+-extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
+-extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
+-extern void m8xx_pic_init(void);
+-extern unsigned int mpc8xx_get_irq(void);
+-
+-#endif
+diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
+new file mode 100644
+index 0000000..df8bd2b
+--- /dev/null
++++ b/arch/powerpc/sysdev/cpm1.c
+@@ -0,0 +1,612 @@
++/*
++ * General Purpose functions for the global management of the
++ * Communication Processor Module.
++ * Copyright (c) 1997 Dan error_act (dmalek at jlc.net)
++ *
++ * In addition to the individual control of the communication
++ * channels, there are a few functions that globally affect the
++ * communication processor.
++ *
++ * Buffer descriptors must be allocated from the dual ported memory
++ * space.  The allocator for that is here.  When the communication
++ * process is reset, we reclaim the memory available.  There is
++ * currently no deallocator for this memory.
++ * The amount of space available is platform dependent.  On the
++ * MBX, the EPPC software loads additional microcode into the
++ * communication processor, and uses some of the DP ram for this
++ * purpose.  Current, the first 512 bytes and the last 256 bytes of
++ * memory are used.  Right now I am conservative and only use the
++ * memory that can never be used for microcode.  If there are
++ * applications that require more DP ram, we can expand the boundaries
++ * but then we have to be careful of any downloaded microcode.
++ */
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/dma-mapping.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/module.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/8xx_immap.h>
++#include <asm/cpm1.h>
++#include <asm/io.h>
++#include <asm/tlbflush.h>
++#include <asm/rheap.h>
++#include <asm/prom.h>
++#include <asm/cpm.h>
++
++#include <asm/fs_pd.h>
++
++#define CPM_MAP_SIZE    (0x4000)
++
++#ifndef CONFIG_PPC_CPM_NEW_BINDING
++static void m8xx_cpm_dpinit(void);
++#endif
++cpm8xx_t __iomem *cpmp;  /* Pointer to comm processor space */
++immap_t __iomem *mpc8xx_immr;
++static cpic8xx_t __iomem *cpic_reg;
++
++static struct irq_host *cpm_pic_host;
++
++static void cpm_mask_irq(unsigned int irq)
++{
++	unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
++
++	clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
++}
++
++static void cpm_unmask_irq(unsigned int irq)
++{
++	unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
++
++	setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
++}
++
++static void cpm_end_irq(unsigned int irq)
++{
++	unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
++
++	out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
++}
++
++static struct irq_chip cpm_pic = {
++	.typename = " CPM PIC ",
++	.mask = cpm_mask_irq,
++	.unmask = cpm_unmask_irq,
++	.eoi = cpm_end_irq,
++};
++
++int cpm_get_irq(void)
++{
++	int cpm_vec;
++
++	/* Get the vector by setting the ACK bit and then reading
++	 * the register.
++	 */
++	out_be16(&cpic_reg->cpic_civr, 1);
++	cpm_vec = in_be16(&cpic_reg->cpic_civr);
++	cpm_vec >>= 11;
++
++	return irq_linear_revmap(cpm_pic_host, cpm_vec);
++}
++
++static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
++			  irq_hw_number_t hw)
++{
++	pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
++
++	get_irq_desc(virq)->status |= IRQ_LEVEL;
++	set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
++	return 0;
++}
++
++/* The CPM can generate the error interrupt when there is a race condition
++ * between generating and masking interrupts.  All we have to do is ACK it
++ * and return.  This is a no-op function so we don't need any special
++ * tests in the interrupt handler.
++ */
++static irqreturn_t cpm_error_interrupt(int irq, void *dev)
++{
++	return IRQ_HANDLED;
++}
++
++static struct irqaction cpm_error_irqaction = {
++	.handler = cpm_error_interrupt,
++	.mask = CPU_MASK_NONE,
++	.name = "error",
++};
++
++static struct irq_host_ops cpm_pic_host_ops = {
++	.map = cpm_pic_host_map,
++};
++
++unsigned int cpm_pic_init(void)
++{
++	struct device_node *np = NULL;
++	struct resource res;
++	unsigned int sirq = NO_IRQ, hwirq, eirq;
++	int ret;
++
++	pr_debug("cpm_pic_init\n");
++
++	np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic");
++	if (np == NULL)
++		np = of_find_compatible_node(NULL, "cpm-pic", "CPM");
++	if (np == NULL) {
++		printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n");
++		return sirq;
++	}
++
++	ret = of_address_to_resource(np, 0, &res);
++	if (ret)
++		goto end;
++
++	cpic_reg = ioremap(res.start, res.end - res.start + 1);
++	if (cpic_reg == NULL)
++		goto end;
++
++	sirq = irq_of_parse_and_map(np, 0);
++	if (sirq == NO_IRQ)
++		goto end;
++
++	/* Initialize the CPM interrupt controller. */
++	hwirq = (unsigned int)irq_map[sirq].hwirq;
++	out_be32(&cpic_reg->cpic_cicr,
++	    (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
++		((hwirq/2) << 13) | CICR_HP_MASK);
++
++	out_be32(&cpic_reg->cpic_cimr, 0);
++
++	cpm_pic_host = irq_alloc_host(of_node_get(np), IRQ_HOST_MAP_LINEAR,
++				      64, &cpm_pic_host_ops, 64);
++	if (cpm_pic_host == NULL) {
++		printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
++		sirq = NO_IRQ;
++		goto end;
++	}
++
++	/* Install our own error handler. */
++	np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
++	if (np == NULL)
++		np = of_find_node_by_type(NULL, "cpm");
++	if (np == NULL) {
++		printk(KERN_ERR "CPM PIC init: can not find cpm node\n");
++		goto end;
++	}
++
++	eirq = irq_of_parse_and_map(np, 0);
++	if (eirq == NO_IRQ)
++		goto end;
++
++	if (setup_irq(eirq, &cpm_error_irqaction))
++		printk(KERN_ERR "Could not allocate CPM error IRQ!");
++
++	setbits32(&cpic_reg->cpic_cicr, CICR_IEN);
++
++end:
++	of_node_put(np);
++	return sirq;
++}
++
++void __init cpm_reset(void)
++{
++	sysconf8xx_t __iomem *siu_conf;
++
++	mpc8xx_immr = ioremap(get_immrbase(), 0x4000);
++	if (!mpc8xx_immr) {
++		printk(KERN_CRIT "Could not map IMMR\n");
++		return;
++	}
++
++	cpmp = &mpc8xx_immr->im_cpm;
++
++#ifndef CONFIG_PPC_EARLY_DEBUG_CPM
++	/* Perform a reset.
++	*/
++	out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG);
++
++	/* Wait for it.
++	*/
++	while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG);
++#endif
++
++#ifdef CONFIG_UCODE_PATCH
++	cpm_load_patch(cpmp);
++#endif
++
++	/* Set SDMA Bus Request priority 5.
++	 * On 860T, this also enables FEC priority 6.  I am not sure
++	 * this is what we realy want for some applications, but the
++	 * manual recommends it.
++	 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
++	 */
++	siu_conf = immr_map(im_siu_conf);
++	out_be32(&siu_conf->sc_sdcr, 1);
++	immr_unmap(siu_conf);
++
++#ifdef CONFIG_PPC_CPM_NEW_BINDING
++	cpm_muram_init();
++#else
++	/* Reclaim the DP memory for our use. */
++	m8xx_cpm_dpinit();
++#endif
++}
++
++static DEFINE_SPINLOCK(cmd_lock);
++
++#define MAX_CR_CMD_LOOPS        10000
++
++int cpm_command(u32 command, u8 opcode)
++{
++	int i, ret;
++	unsigned long flags;
++
++	if (command & 0xffffff0f)
++		return -EINVAL;
++
++	spin_lock_irqsave(&cmd_lock, flags);
++
++	ret = 0;
++	out_be16(&cpmp->cp_cpcr, command | CPM_CR_FLG | (opcode << 8));
++	for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
++		if ((in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0)
++			goto out;
++
++	printk(KERN_ERR "%s(): Not able to issue CPM command\n", __FUNCTION__);
++	ret = -EIO;
++out:
++	spin_unlock_irqrestore(&cmd_lock, flags);
++	return ret;
++}
++EXPORT_SYMBOL(cpm_command);
++
++/* Set a baud rate generator.  This needs lots of work.  There are
++ * four BRGs, any of which can be wired to any channel.
++ * The internal baud rate clock is the system clock divided by 16.
++ * This assumes the baudrate is 16x oversampled by the uart.
++ */
++#define BRG_INT_CLK		(get_brgfreq())
++#define BRG_UART_CLK		(BRG_INT_CLK/16)
++#define BRG_UART_CLK_DIV16	(BRG_UART_CLK/16)
++
++void
++cpm_setbrg(uint brg, uint rate)
++{
++	u32 __iomem *bp;
++
++	/* This is good enough to get SMCs running.....
++	*/
++	bp = &cpmp->cp_brgc1;
++	bp += brg;
++	/* The BRG has a 12-bit counter.  For really slow baud rates (or
++	 * really fast processors), we may have to further divide by 16.
++	 */
++	if (((BRG_UART_CLK / rate) - 1) < 4096)
++		out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
++	else
++		out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
++			      CPM_BRG_EN | CPM_BRG_DIV16);
++}
++
++#ifndef CONFIG_PPC_CPM_NEW_BINDING
++/*
++ * dpalloc / dpfree bits.
++ */
++static spinlock_t cpm_dpmem_lock;
++/*
++ * 16 blocks should be enough to satisfy all requests
++ * until the memory subsystem goes up...
++ */
++static rh_block_t cpm_boot_dpmem_rh_block[16];
++static rh_info_t cpm_dpmem_info;
++
++#define CPM_DPMEM_ALIGNMENT	8
++static u8 __iomem *dpram_vbase;
++static phys_addr_t dpram_pbase;
++
++static void m8xx_cpm_dpinit(void)
++{
++	spin_lock_init(&cpm_dpmem_lock);
++
++	dpram_vbase = cpmp->cp_dpmem;
++	dpram_pbase = get_immrbase() + offsetof(immap_t, im_cpm.cp_dpmem);
++
++	/* Initialize the info header */
++	rh_init(&cpm_dpmem_info, CPM_DPMEM_ALIGNMENT,
++			sizeof(cpm_boot_dpmem_rh_block) /
++			sizeof(cpm_boot_dpmem_rh_block[0]),
++			cpm_boot_dpmem_rh_block);
++
++	/*
++	 * Attach the usable dpmem area.
++	 * XXX: This is actually crap.  CPM_DATAONLY_BASE and
++	 * CPM_DATAONLY_SIZE are a subset of the available dparm.  It varies
++	 * with the processor and the microcode patches applied / activated.
++	 * But the following should be at least safe.
++	 */
++	rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
++}
++
++/*
++ * Allocate the requested size worth of DP memory.
++ * This function returns an offset into the DPRAM area.
++ * Use cpm_dpram_addr() to get the virtual address of the area.
++ */
++unsigned long cpm_dpalloc(uint size, uint align)
++{
++	unsigned long start;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cpm_dpmem_lock, flags);
++	cpm_dpmem_info.alignment = align;
++	start = rh_alloc(&cpm_dpmem_info, size, "commproc");
++	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
++
++	return (uint)start;
++}
++EXPORT_SYMBOL(cpm_dpalloc);
++
++int cpm_dpfree(unsigned long offset)
++{
++	int ret;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cpm_dpmem_lock, flags);
++	ret = rh_free(&cpm_dpmem_info, offset);
++	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
++
++	return ret;
++}
++EXPORT_SYMBOL(cpm_dpfree);
++
++unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
++{
++	unsigned long start;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cpm_dpmem_lock, flags);
++	cpm_dpmem_info.alignment = align;
++	start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
++	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
++
++	return start;
++}
++EXPORT_SYMBOL(cpm_dpalloc_fixed);
++
++void cpm_dpdump(void)
++{
++	rh_dump(&cpm_dpmem_info);
++}
++EXPORT_SYMBOL(cpm_dpdump);
++
++void *cpm_dpram_addr(unsigned long offset)
++{
++	return (void *)(dpram_vbase + offset);
++}
++EXPORT_SYMBOL(cpm_dpram_addr);
++
++uint cpm_dpram_phys(u8 *addr)
++{
++	return (dpram_pbase + (uint)(addr - dpram_vbase));
++}
++EXPORT_SYMBOL(cpm_dpram_phys);
++#endif /* !CONFIG_PPC_CPM_NEW_BINDING */
++
++struct cpm_ioport16 {
++	__be16 dir, par, odr_sor, dat, intr;
++	__be16 res[3];
++};
++
++struct cpm_ioport32 {
++	__be32 dir, par, sor;
++};
++
++static void cpm1_set_pin32(int port, int pin, int flags)
++{
++	struct cpm_ioport32 __iomem *iop;
++	pin = 1 << (31 - pin);
++
++	if (port == CPM_PORTB)
++		iop = (struct cpm_ioport32 __iomem *)
++		      &mpc8xx_immr->im_cpm.cp_pbdir;
++	else
++		iop = (struct cpm_ioport32 __iomem *)
++		      &mpc8xx_immr->im_cpm.cp_pedir;
++
++	if (flags & CPM_PIN_OUTPUT)
++		setbits32(&iop->dir, pin);
++	else
++		clrbits32(&iop->dir, pin);
++
++	if (!(flags & CPM_PIN_GPIO))
++		setbits32(&iop->par, pin);
++	else
++		clrbits32(&iop->par, pin);
++
++	if (port == CPM_PORTB) {
++		if (flags & CPM_PIN_OPENDRAIN)
++			setbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin);
++		else
++			clrbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin);
++	}
++
++	if (port == CPM_PORTE) {
++		if (flags & CPM_PIN_SECONDARY)
++			setbits32(&iop->sor, pin);
++		else
++			clrbits32(&iop->sor, pin);
++
++		if (flags & CPM_PIN_OPENDRAIN)
++			setbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
++		else
++			clrbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
++	}
++}
++
++static void cpm1_set_pin16(int port, int pin, int flags)
++{
++	struct cpm_ioport16 __iomem *iop =
++		(struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport;
++
++	pin = 1 << (15 - pin);
++
++	if (port != 0)
++		iop += port - 1;
++
++	if (flags & CPM_PIN_OUTPUT)
++		setbits16(&iop->dir, pin);
++	else
++		clrbits16(&iop->dir, pin);
++
++	if (!(flags & CPM_PIN_GPIO))
++		setbits16(&iop->par, pin);
++	else
++		clrbits16(&iop->par, pin);
++
++	if (port == CPM_PORTA) {
++		if (flags & CPM_PIN_OPENDRAIN)
++			setbits16(&iop->odr_sor, pin);
++		else
++			clrbits16(&iop->odr_sor, pin);
++	}
++	if (port == CPM_PORTC) {
++		if (flags & CPM_PIN_SECONDARY)
++			setbits16(&iop->odr_sor, pin);
++		else
++			clrbits16(&iop->odr_sor, pin);
++	}
++}
++
++void cpm1_set_pin(enum cpm_port port, int pin, int flags)
++{
++	if (port == CPM_PORTB || port == CPM_PORTE)
++		cpm1_set_pin32(port, pin, flags);
++	else
++		cpm1_set_pin16(port, pin, flags);
++}
++
++int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode)
++{
++	int shift;
++	int i, bits = 0;
++	u32 __iomem *reg;
++	u32 mask = 7;
++
++	u8 clk_map[][3] = {
++		{CPM_CLK_SCC1, CPM_BRG1, 0},
++		{CPM_CLK_SCC1, CPM_BRG2, 1},
++		{CPM_CLK_SCC1, CPM_BRG3, 2},
++		{CPM_CLK_SCC1, CPM_BRG4, 3},
++		{CPM_CLK_SCC1, CPM_CLK1, 4},
++		{CPM_CLK_SCC1, CPM_CLK2, 5},
++		{CPM_CLK_SCC1, CPM_CLK3, 6},
++		{CPM_CLK_SCC1, CPM_CLK4, 7},
++
++		{CPM_CLK_SCC2, CPM_BRG1, 0},
++		{CPM_CLK_SCC2, CPM_BRG2, 1},
++		{CPM_CLK_SCC2, CPM_BRG3, 2},
++		{CPM_CLK_SCC2, CPM_BRG4, 3},
++		{CPM_CLK_SCC2, CPM_CLK1, 4},
++		{CPM_CLK_SCC2, CPM_CLK2, 5},
++		{CPM_CLK_SCC2, CPM_CLK3, 6},
++		{CPM_CLK_SCC2, CPM_CLK4, 7},
++
++		{CPM_CLK_SCC3, CPM_BRG1, 0},
++		{CPM_CLK_SCC3, CPM_BRG2, 1},
++		{CPM_CLK_SCC3, CPM_BRG3, 2},
++		{CPM_CLK_SCC3, CPM_BRG4, 3},
++		{CPM_CLK_SCC3, CPM_CLK5, 4},
++		{CPM_CLK_SCC3, CPM_CLK6, 5},
++		{CPM_CLK_SCC3, CPM_CLK7, 6},
++		{CPM_CLK_SCC3, CPM_CLK8, 7},
++
++		{CPM_CLK_SCC4, CPM_BRG1, 0},
++		{CPM_CLK_SCC4, CPM_BRG2, 1},
++		{CPM_CLK_SCC4, CPM_BRG3, 2},
++		{CPM_CLK_SCC4, CPM_BRG4, 3},
++		{CPM_CLK_SCC4, CPM_CLK5, 4},
++		{CPM_CLK_SCC4, CPM_CLK6, 5},
++		{CPM_CLK_SCC4, CPM_CLK7, 6},
++		{CPM_CLK_SCC4, CPM_CLK8, 7},
++
++		{CPM_CLK_SMC1, CPM_BRG1, 0},
++		{CPM_CLK_SMC1, CPM_BRG2, 1},
++		{CPM_CLK_SMC1, CPM_BRG3, 2},
++		{CPM_CLK_SMC1, CPM_BRG4, 3},
++		{CPM_CLK_SMC1, CPM_CLK1, 4},
++		{CPM_CLK_SMC1, CPM_CLK2, 5},
++		{CPM_CLK_SMC1, CPM_CLK3, 6},
++		{CPM_CLK_SMC1, CPM_CLK4, 7},
++
++		{CPM_CLK_SMC2, CPM_BRG1, 0},
++		{CPM_CLK_SMC2, CPM_BRG2, 1},
++		{CPM_CLK_SMC2, CPM_BRG3, 2},
++		{CPM_CLK_SMC2, CPM_BRG4, 3},
++		{CPM_CLK_SMC2, CPM_CLK5, 4},
++		{CPM_CLK_SMC2, CPM_CLK6, 5},
++		{CPM_CLK_SMC2, CPM_CLK7, 6},
++		{CPM_CLK_SMC2, CPM_CLK8, 7},
++	};
++
++	switch (target) {
++	case CPM_CLK_SCC1:
++		reg = &mpc8xx_immr->im_cpm.cp_sicr;
++		shift = 0;
++		break;
++
++	case CPM_CLK_SCC2:
++		reg = &mpc8xx_immr->im_cpm.cp_sicr;
++		shift = 8;
++		break;
++
++	case CPM_CLK_SCC3:
++		reg = &mpc8xx_immr->im_cpm.cp_sicr;
++		shift = 16;
++		break;
++
++	case CPM_CLK_SCC4:
++		reg = &mpc8xx_immr->im_cpm.cp_sicr;
++		shift = 24;
++		break;
++
++	case CPM_CLK_SMC1:
++		reg = &mpc8xx_immr->im_cpm.cp_simode;
++		shift = 12;
++		break;
++
++	case CPM_CLK_SMC2:
++		reg = &mpc8xx_immr->im_cpm.cp_simode;
++		shift = 28;
++		break;
++
++	default:
++		printk(KERN_ERR "cpm1_clock_setup: invalid clock target\n");
++		return -EINVAL;
++	}
++
++	if (reg == &mpc8xx_immr->im_cpm.cp_sicr && mode == CPM_CLK_RX)
++		shift += 3;
++
++	for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
++		if (clk_map[i][0] == target && clk_map[i][1] == clock) {
++			bits = clk_map[i][2];
++			break;
++		}
++	}
++
++	if (i == ARRAY_SIZE(clk_map)) {
++		printk(KERN_ERR "cpm1_clock_setup: invalid clock combination\n");
++		return -EINVAL;
++	}
++
++	bits <<= shift;
++	mask <<= shift;
++	out_be32(reg, (in_be32(reg) & ~mask) | bits);
++
++	return 0;
++}
+diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
+new file mode 100644
+index 0000000..7be7112
+--- /dev/null
++++ b/arch/powerpc/sysdev/cpm2.c
+@@ -0,0 +1,469 @@
++/*
++ * General Purpose functions for the global management of the
++ * 8260 Communication Processor Module.
++ * Copyright (c) 1999-2001 Dan Malek <dan at embeddedalley.com>
++ * Copyright (c) 2000 MontaVista Software, Inc (source at mvista.com)
++ *	2.3.99 Updates
++ *
++ * 2006 (c) MontaVista Software, Inc.
++ * Vitaly Bordug <vbordug at ru.mvista.com>
++ * 	Merged to arch/powerpc from arch/ppc/syslib/cpm2_common.c
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++/*
++ *
++ * In addition to the individual control of the communication
++ * channels, there are a few functions that globally affect the
++ * communication processor.
++ *
++ * Buffer descriptors must be allocated from the dual ported memory
++ * space.  The allocator for that is here.  When the communication
++ * process is reset, we reclaim the memory available.  There is
++ * currently no deallocator for this memory.
++ */
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/of.h>
++
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/mpc8260.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/cpm2.h>
++#include <asm/rheap.h>
++#include <asm/fs_pd.h>
++
++#include <sysdev/fsl_soc.h>
++
++#ifndef CONFIG_PPC_CPM_NEW_BINDING
++static void cpm2_dpinit(void);
++#endif
++
++cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor space */
++
++/* We allocate this here because it is used almost exclusively for
++ * the communication processor devices.
++ */
++cpm2_map_t __iomem *cpm2_immr;
++
++#define CPM_MAP_SIZE	(0x40000)	/* 256k - the PQ3 reserve this amount
++					   of space for CPM as it is larger
++					   than on PQ2 */
++
++void __init cpm2_reset(void)
++{
++#ifdef CONFIG_PPC_85xx
++	cpm2_immr = ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE);
++#else
++	cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
++#endif
++
++	/* Reclaim the DP memory for our use.
++	 */
++#ifdef CONFIG_PPC_CPM_NEW_BINDING
++	cpm_muram_init();
++#else
++	cpm2_dpinit();
++#endif
++
++	/* Tell everyone where the comm processor resides.
++	 */
++	cpmp = &cpm2_immr->im_cpm;
++}
++
++static DEFINE_SPINLOCK(cmd_lock);
++
++#define MAX_CR_CMD_LOOPS        10000
++
++int cpm_command(u32 command, u8 opcode)
++{
++	int i, ret;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cmd_lock, flags);
++
++	ret = 0;
++	out_be32(&cpmp->cp_cpcr, command | opcode | CPM_CR_FLG);
++	for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
++		if ((in_be32(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0)
++			goto out;
++
++	printk(KERN_ERR "%s(): Not able to issue CPM command\n", __FUNCTION__);
++	ret = -EIO;
++out:
++	spin_unlock_irqrestore(&cmd_lock, flags);
++	return ret;
++}
++EXPORT_SYMBOL(cpm_command);
++
++/* Set a baud rate generator.  This needs lots of work.  There are
++ * eight BRGs, which can be connected to the CPM channels or output
++ * as clocks.  The BRGs are in two different block of internal
++ * memory mapped space.
++ * The baud rate clock is the system clock divided by something.
++ * It was set up long ago during the initial boot phase and is
++ * is given to us.
++ * Baud rate clocks are zero-based in the driver code (as that maps
++ * to port numbers).  Documentation uses 1-based numbering.
++ */
++#define BRG_INT_CLK	(get_brgfreq())
++#define BRG_UART_CLK	(BRG_INT_CLK/16)
++
++/* This function is used by UARTS, or anything else that uses a 16x
++ * oversampled clock.
++ */
++void
++cpm_setbrg(uint brg, uint rate)
++{
++	u32 __iomem *bp;
++
++	/* This is good enough to get SMCs running.....
++	*/
++	if (brg < 4) {
++		bp = cpm2_map_size(im_brgc1, 16);
++	} else {
++		bp = cpm2_map_size(im_brgc5, 16);
++		brg -= 4;
++	}
++	bp += brg;
++	out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
++
++	cpm2_unmap(bp);
++}
++
++/* This function is used to set high speed synchronous baud rate
++ * clocks.
++ */
++void
++cpm2_fastbrg(uint brg, uint rate, int div16)
++{
++	u32 __iomem *bp;
++	u32 val;
++
++	if (brg < 4) {
++		bp = cpm2_map_size(im_brgc1, 16);
++	} else {
++		bp = cpm2_map_size(im_brgc5, 16);
++		brg -= 4;
++	}
++	bp += brg;
++	val = ((BRG_INT_CLK / rate) << 1) | CPM_BRG_EN;
++	if (div16)
++		val |= CPM_BRG_DIV16;
++
++	out_be32(bp, val);
++	cpm2_unmap(bp);
++}
++
++int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
++{
++	int ret = 0;
++	int shift;
++	int i, bits = 0;
++	cpmux_t __iomem *im_cpmux;
++	u32 __iomem *reg;
++	u32 mask = 7;
++
++	u8 clk_map[][3] = {
++		{CPM_CLK_FCC1, CPM_BRG5, 0},
++		{CPM_CLK_FCC1, CPM_BRG6, 1},
++		{CPM_CLK_FCC1, CPM_BRG7, 2},
++		{CPM_CLK_FCC1, CPM_BRG8, 3},
++		{CPM_CLK_FCC1, CPM_CLK9, 4},
++		{CPM_CLK_FCC1, CPM_CLK10, 5},
++		{CPM_CLK_FCC1, CPM_CLK11, 6},
++		{CPM_CLK_FCC1, CPM_CLK12, 7},
++		{CPM_CLK_FCC2, CPM_BRG5, 0},
++		{CPM_CLK_FCC2, CPM_BRG6, 1},
++		{CPM_CLK_FCC2, CPM_BRG7, 2},
++		{CPM_CLK_FCC2, CPM_BRG8, 3},
++		{CPM_CLK_FCC2, CPM_CLK13, 4},
++		{CPM_CLK_FCC2, CPM_CLK14, 5},
++		{CPM_CLK_FCC2, CPM_CLK15, 6},
++		{CPM_CLK_FCC2, CPM_CLK16, 7},
++		{CPM_CLK_FCC3, CPM_BRG5, 0},
++		{CPM_CLK_FCC3, CPM_BRG6, 1},
++		{CPM_CLK_FCC3, CPM_BRG7, 2},
++		{CPM_CLK_FCC3, CPM_BRG8, 3},
++		{CPM_CLK_FCC3, CPM_CLK13, 4},
++		{CPM_CLK_FCC3, CPM_CLK14, 5},
++		{CPM_CLK_FCC3, CPM_CLK15, 6},
++		{CPM_CLK_FCC3, CPM_CLK16, 7},
++		{CPM_CLK_SCC1, CPM_BRG1, 0},
++		{CPM_CLK_SCC1, CPM_BRG2, 1},
++		{CPM_CLK_SCC1, CPM_BRG3, 2},
++		{CPM_CLK_SCC1, CPM_BRG4, 3},
++		{CPM_CLK_SCC1, CPM_CLK11, 4},
++		{CPM_CLK_SCC1, CPM_CLK12, 5},
++		{CPM_CLK_SCC1, CPM_CLK3, 6},
++		{CPM_CLK_SCC1, CPM_CLK4, 7},
++		{CPM_CLK_SCC2, CPM_BRG1, 0},
++		{CPM_CLK_SCC2, CPM_BRG2, 1},
++		{CPM_CLK_SCC2, CPM_BRG3, 2},
++		{CPM_CLK_SCC2, CPM_BRG4, 3},
++		{CPM_CLK_SCC2, CPM_CLK11, 4},
++		{CPM_CLK_SCC2, CPM_CLK12, 5},
++		{CPM_CLK_SCC2, CPM_CLK3, 6},
++		{CPM_CLK_SCC2, CPM_CLK4, 7},
++		{CPM_CLK_SCC3, CPM_BRG1, 0},
++		{CPM_CLK_SCC3, CPM_BRG2, 1},
++		{CPM_CLK_SCC3, CPM_BRG3, 2},
++		{CPM_CLK_SCC3, CPM_BRG4, 3},
++		{CPM_CLK_SCC3, CPM_CLK5, 4},
++		{CPM_CLK_SCC3, CPM_CLK6, 5},
++		{CPM_CLK_SCC3, CPM_CLK7, 6},
++		{CPM_CLK_SCC3, CPM_CLK8, 7},
++		{CPM_CLK_SCC4, CPM_BRG1, 0},
++		{CPM_CLK_SCC4, CPM_BRG2, 1},
++		{CPM_CLK_SCC4, CPM_BRG3, 2},
++		{CPM_CLK_SCC4, CPM_BRG4, 3},
++		{CPM_CLK_SCC4, CPM_CLK5, 4},
++		{CPM_CLK_SCC4, CPM_CLK6, 5},
++		{CPM_CLK_SCC4, CPM_CLK7, 6},
++		{CPM_CLK_SCC4, CPM_CLK8, 7},
++	};
++
++	im_cpmux = cpm2_map(im_cpmux);
++
++	switch (target) {
++	case CPM_CLK_SCC1:
++		reg = &im_cpmux->cmx_scr;
++		shift = 24;
++	case CPM_CLK_SCC2:
++		reg = &im_cpmux->cmx_scr;
++		shift = 16;
++		break;
++	case CPM_CLK_SCC3:
++		reg = &im_cpmux->cmx_scr;
++		shift = 8;
++		break;
++	case CPM_CLK_SCC4:
++		reg = &im_cpmux->cmx_scr;
++		shift = 0;
++		break;
++	case CPM_CLK_FCC1:
++		reg = &im_cpmux->cmx_fcr;
++		shift = 24;
++		break;
++	case CPM_CLK_FCC2:
++		reg = &im_cpmux->cmx_fcr;
++		shift = 16;
++		break;
++	case CPM_CLK_FCC3:
++		reg = &im_cpmux->cmx_fcr;
++		shift = 8;
++		break;
++	default:
++		printk(KERN_ERR "cpm2_clock_setup: invalid clock target\n");
++		return -EINVAL;
++	}
++
++	if (mode == CPM_CLK_RX)
++		shift += 3;
++
++	for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
++		if (clk_map[i][0] == target && clk_map[i][1] == clock) {
++			bits = clk_map[i][2];
++			break;
++		}
++	}
++	if (i == ARRAY_SIZE(clk_map))
++	    ret = -EINVAL;
++
++	bits <<= shift;
++	mask <<= shift;
++
++	out_be32(reg, (in_be32(reg) & ~mask) | bits);
++
++	cpm2_unmap(im_cpmux);
++	return ret;
++}
++
++int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock)
++{
++	int ret = 0;
++	int shift;
++	int i, bits = 0;
++	cpmux_t __iomem *im_cpmux;
++	u8 __iomem *reg;
++	u8 mask = 3;
++
++	u8 clk_map[][3] = {
++		{CPM_CLK_SMC1, CPM_BRG1, 0},
++		{CPM_CLK_SMC1, CPM_BRG7, 1},
++		{CPM_CLK_SMC1, CPM_CLK7, 2},
++		{CPM_CLK_SMC1, CPM_CLK9, 3},
++		{CPM_CLK_SMC2, CPM_BRG2, 0},
++		{CPM_CLK_SMC2, CPM_BRG8, 1},
++		{CPM_CLK_SMC2, CPM_CLK4, 2},
++		{CPM_CLK_SMC2, CPM_CLK15, 3},
++	};
++
++	im_cpmux = cpm2_map(im_cpmux);
++
++	switch (target) {
++	case CPM_CLK_SMC1:
++		reg = &im_cpmux->cmx_smr;
++		mask = 3;
++		shift = 4;
++		break;
++	case CPM_CLK_SMC2:
++		reg = &im_cpmux->cmx_smr;
++		mask = 3;
++		shift = 0;
++		break;
++	default:
++		printk(KERN_ERR "cpm2_smc_clock_setup: invalid clock target\n");
++		return -EINVAL;
++	}
++
++	for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
++		if (clk_map[i][0] == target && clk_map[i][1] == clock) {
++			bits = clk_map[i][2];
++			break;
++		}
++	}
++	if (i == ARRAY_SIZE(clk_map))
++	    ret = -EINVAL;
++
++	bits <<= shift;
++	mask <<= shift;
++
++	out_8(reg, (in_8(reg) & ~mask) | bits);
++
++	cpm2_unmap(im_cpmux);
++	return ret;
++}
++
++#ifndef CONFIG_PPC_CPM_NEW_BINDING
++/*
++ * dpalloc / dpfree bits.
++ */
++static spinlock_t cpm_dpmem_lock;
++/* 16 blocks should be enough to satisfy all requests
++ * until the memory subsystem goes up... */
++static rh_block_t cpm_boot_dpmem_rh_block[16];
++static rh_info_t cpm_dpmem_info;
++static u8 __iomem *im_dprambase;
++
++static void cpm2_dpinit(void)
++{
++	spin_lock_init(&cpm_dpmem_lock);
++
++	/* initialize the info header */
++	rh_init(&cpm_dpmem_info, 1,
++			sizeof(cpm_boot_dpmem_rh_block) /
++			sizeof(cpm_boot_dpmem_rh_block[0]),
++			cpm_boot_dpmem_rh_block);
++
++	im_dprambase = cpm2_immr;
++
++	/* Attach the usable dpmem area */
++	/* XXX: This is actually crap. CPM_DATAONLY_BASE and
++	 * CPM_DATAONLY_SIZE is only a subset of the available dpram. It
++	 * varies with the processor and the microcode patches activated.
++	 * But the following should be at least safe.
++	 */
++	rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
++}
++
++/* This function returns an index into the DPRAM area.
++ */
++unsigned long cpm_dpalloc(uint size, uint align)
++{
++	unsigned long start;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cpm_dpmem_lock, flags);
++	cpm_dpmem_info.alignment = align;
++	start = rh_alloc(&cpm_dpmem_info, size, "commproc");
++	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
++
++	return (uint)start;
++}
++EXPORT_SYMBOL(cpm_dpalloc);
++
++int cpm_dpfree(unsigned long offset)
++{
++	int ret;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cpm_dpmem_lock, flags);
++	ret = rh_free(&cpm_dpmem_info, offset);
++	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
++
++	return ret;
++}
++EXPORT_SYMBOL(cpm_dpfree);
++
++/* not sure if this is ever needed */
++unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
++{
++	unsigned long start;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cpm_dpmem_lock, flags);
++	cpm_dpmem_info.alignment = align;
++	start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
++	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
++
++	return start;
++}
++EXPORT_SYMBOL(cpm_dpalloc_fixed);
++
++void cpm_dpdump(void)
++{
++	rh_dump(&cpm_dpmem_info);
++}
++EXPORT_SYMBOL(cpm_dpdump);
++
++void *cpm_dpram_addr(unsigned long offset)
++{
++	return (void *)(im_dprambase + offset);
++}
++EXPORT_SYMBOL(cpm_dpram_addr);
++#endif /* !CONFIG_PPC_CPM_NEW_BINDING */
++
++struct cpm2_ioports {
++	u32 dir, par, sor, odr, dat;
++	u32 res[3];
++};
++
++void cpm2_set_pin(int port, int pin, int flags)
++{
++	struct cpm2_ioports __iomem *iop =
++		(struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport;
++
++	pin = 1 << (31 - pin);
++
++	if (flags & CPM_PIN_OUTPUT)
++		setbits32(&iop[port].dir, pin);
++	else
++		clrbits32(&iop[port].dir, pin);
++
++	if (!(flags & CPM_PIN_GPIO))
++		setbits32(&iop[port].par, pin);
++	else
++		clrbits32(&iop[port].par, pin);
++
++	if (flags & CPM_PIN_SECONDARY)
++		setbits32(&iop[port].sor, pin);
++	else
++		clrbits32(&iop[port].sor, pin);
++
++	if (flags & CPM_PIN_OPENDRAIN)
++		setbits32(&iop[port].odr, pin);
++	else
++		clrbits32(&iop[port].odr, pin);
++}
+diff --git a/arch/powerpc/sysdev/cpm2_common.c b/arch/powerpc/sysdev/cpm2_common.c
+deleted file mode 100644
+index c1d8240..0000000
+--- a/arch/powerpc/sysdev/cpm2_common.c
++++ /dev/null
+@@ -1,445 +0,0 @@
+-/*
+- * General Purpose functions for the global management of the
+- * 8260 Communication Processor Module.
+- * Copyright (c) 1999-2001 Dan Malek <dan at embeddedalley.com>
+- * Copyright (c) 2000 MontaVista Software, Inc (source at mvista.com)
+- *	2.3.99 Updates
+- *
+- * 2006 (c) MontaVista Software, Inc.
+- * Vitaly Bordug <vbordug at ru.mvista.com>
+- * 	Merged to arch/powerpc from arch/ppc/syslib/cpm2_common.c
+- *
+- * This file is licensed under the terms of the GNU General Public License
+- * version 2. This program is licensed "as is" without any warranty of any
+- * kind, whether express or implied.
+- */
+-
+-/*
+- *
+- * In addition to the individual control of the communication
+- * channels, there are a few functions that globally affect the
+- * communication processor.
+- *
+- * Buffer descriptors must be allocated from the dual ported memory
+- * space.  The allocator for that is here.  When the communication
+- * process is reset, we reclaim the memory available.  There is
+- * currently no deallocator for this memory.
+- */
+-#include <linux/errno.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/param.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
+-#include <linux/interrupt.h>
+-#include <linux/module.h>
+-#include <linux/of.h>
+-
+-#include <asm/io.h>
+-#include <asm/irq.h>
+-#include <asm/mpc8260.h>
+-#include <asm/page.h>
+-#include <asm/pgtable.h>
+-#include <asm/cpm2.h>
+-#include <asm/rheap.h>
+-#include <asm/fs_pd.h>
+-
+-#include <sysdev/fsl_soc.h>
+-
+-#ifndef CONFIG_PPC_CPM_NEW_BINDING
+-static void cpm2_dpinit(void);
+-#endif
+-
+-cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor space */
+-
+-/* We allocate this here because it is used almost exclusively for
+- * the communication processor devices.
+- */
+-cpm2_map_t __iomem *cpm2_immr;
+-
+-#define CPM_MAP_SIZE	(0x40000)	/* 256k - the PQ3 reserve this amount
+-					   of space for CPM as it is larger
+-					   than on PQ2 */
+-
+-void __init cpm2_reset(void)
+-{
+-#ifdef CONFIG_PPC_85xx
+-	cpm2_immr = ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE);
+-#else
+-	cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
+-#endif
+-
+-	/* Reclaim the DP memory for our use.
+-	 */
+-#ifdef CONFIG_PPC_CPM_NEW_BINDING
+-	cpm_muram_init();
+-#else
+-	cpm2_dpinit();
+-#endif
+-
+-	/* Tell everyone where the comm processor resides.
+-	 */
+-	cpmp = &cpm2_immr->im_cpm;
+-}
+-
+-/* Set a baud rate generator.  This needs lots of work.  There are
+- * eight BRGs, which can be connected to the CPM channels or output
+- * as clocks.  The BRGs are in two different block of internal
+- * memory mapped space.
+- * The baud rate clock is the system clock divided by something.
+- * It was set up long ago during the initial boot phase and is
+- * is given to us.
+- * Baud rate clocks are zero-based in the driver code (as that maps
+- * to port numbers).  Documentation uses 1-based numbering.
+- */
+-#define BRG_INT_CLK	(get_brgfreq())
+-#define BRG_UART_CLK	(BRG_INT_CLK/16)
+-
+-/* This function is used by UARTS, or anything else that uses a 16x
+- * oversampled clock.
+- */
+-void
+-cpm_setbrg(uint brg, uint rate)
+-{
+-	u32 __iomem *bp;
+-
+-	/* This is good enough to get SMCs running.....
+-	*/
+-	if (brg < 4) {
+-		bp = cpm2_map_size(im_brgc1, 16);
+-	} else {
+-		bp = cpm2_map_size(im_brgc5, 16);
+-		brg -= 4;
+-	}
+-	bp += brg;
+-	out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
+-
+-	cpm2_unmap(bp);
+-}
+-
+-/* This function is used to set high speed synchronous baud rate
+- * clocks.
+- */
+-void
+-cpm2_fastbrg(uint brg, uint rate, int div16)
+-{
+-	u32 __iomem *bp;
+-	u32 val;
+-
+-	if (brg < 4) {
+-		bp = cpm2_map_size(im_brgc1, 16);
+-	}
+-	else {
+-		bp = cpm2_map_size(im_brgc5, 16);
+-		brg -= 4;
+-	}
+-	bp += brg;
+-	val = ((BRG_INT_CLK / rate) << 1) | CPM_BRG_EN;
+-	if (div16)
+-		val |= CPM_BRG_DIV16;
+-
+-	out_be32(bp, val);
+-	cpm2_unmap(bp);
+-}
+-
+-int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
+-{
+-	int ret = 0;
+-	int shift;
+-	int i, bits = 0;
+-	cpmux_t __iomem *im_cpmux;
+-	u32 __iomem *reg;
+-	u32 mask = 7;
+-
+-	u8 clk_map[][3] = {
+-		{CPM_CLK_FCC1, CPM_BRG5, 0},
+-		{CPM_CLK_FCC1, CPM_BRG6, 1},
+-		{CPM_CLK_FCC1, CPM_BRG7, 2},
+-		{CPM_CLK_FCC1, CPM_BRG8, 3},
+-		{CPM_CLK_FCC1, CPM_CLK9, 4},
+-		{CPM_CLK_FCC1, CPM_CLK10, 5},
+-		{CPM_CLK_FCC1, CPM_CLK11, 6},
+-		{CPM_CLK_FCC1, CPM_CLK12, 7},
+-		{CPM_CLK_FCC2, CPM_BRG5, 0},
+-		{CPM_CLK_FCC2, CPM_BRG6, 1},
+-		{CPM_CLK_FCC2, CPM_BRG7, 2},
+-		{CPM_CLK_FCC2, CPM_BRG8, 3},
+-		{CPM_CLK_FCC2, CPM_CLK13, 4},
+-		{CPM_CLK_FCC2, CPM_CLK14, 5},
+-		{CPM_CLK_FCC2, CPM_CLK15, 6},
+-		{CPM_CLK_FCC2, CPM_CLK16, 7},
+-		{CPM_CLK_FCC3, CPM_BRG5, 0},
+-		{CPM_CLK_FCC3, CPM_BRG6, 1},
+-		{CPM_CLK_FCC3, CPM_BRG7, 2},
+-		{CPM_CLK_FCC3, CPM_BRG8, 3},
+-		{CPM_CLK_FCC3, CPM_CLK13, 4},
+-		{CPM_CLK_FCC3, CPM_CLK14, 5},
+-		{CPM_CLK_FCC3, CPM_CLK15, 6},
+-		{CPM_CLK_FCC3, CPM_CLK16, 7},
+-		{CPM_CLK_SCC1, CPM_BRG1, 0},
+-		{CPM_CLK_SCC1, CPM_BRG2, 1},
+-		{CPM_CLK_SCC1, CPM_BRG3, 2},
+-		{CPM_CLK_SCC1, CPM_BRG4, 3},
+-		{CPM_CLK_SCC1, CPM_CLK11, 4},
+-		{CPM_CLK_SCC1, CPM_CLK12, 5},
+-		{CPM_CLK_SCC1, CPM_CLK3, 6},
+-		{CPM_CLK_SCC1, CPM_CLK4, 7},
+-		{CPM_CLK_SCC2, CPM_BRG1, 0},
+-		{CPM_CLK_SCC2, CPM_BRG2, 1},
+-		{CPM_CLK_SCC2, CPM_BRG3, 2},
+-		{CPM_CLK_SCC2, CPM_BRG4, 3},
+-		{CPM_CLK_SCC2, CPM_CLK11, 4},
+-		{CPM_CLK_SCC2, CPM_CLK12, 5},
+-		{CPM_CLK_SCC2, CPM_CLK3, 6},
+-		{CPM_CLK_SCC2, CPM_CLK4, 7},
+-		{CPM_CLK_SCC3, CPM_BRG1, 0},
+-		{CPM_CLK_SCC3, CPM_BRG2, 1},
+-		{CPM_CLK_SCC3, CPM_BRG3, 2},
+-		{CPM_CLK_SCC3, CPM_BRG4, 3},
+-		{CPM_CLK_SCC3, CPM_CLK5, 4},
+-		{CPM_CLK_SCC3, CPM_CLK6, 5},
+-		{CPM_CLK_SCC3, CPM_CLK7, 6},
+-		{CPM_CLK_SCC3, CPM_CLK8, 7},
+-		{CPM_CLK_SCC4, CPM_BRG1, 0},
+-		{CPM_CLK_SCC4, CPM_BRG2, 1},
+-		{CPM_CLK_SCC4, CPM_BRG3, 2},
+-		{CPM_CLK_SCC4, CPM_BRG4, 3},
+-		{CPM_CLK_SCC4, CPM_CLK5, 4},
+-		{CPM_CLK_SCC4, CPM_CLK6, 5},
+-		{CPM_CLK_SCC4, CPM_CLK7, 6},
+-		{CPM_CLK_SCC4, CPM_CLK8, 7},
+-	};
+-
+-	im_cpmux = cpm2_map(im_cpmux);
+-
+-	switch (target) {
+-	case CPM_CLK_SCC1:
+-		reg = &im_cpmux->cmx_scr;
+-		shift = 24;
+-	case CPM_CLK_SCC2:
+-		reg = &im_cpmux->cmx_scr;
+-		shift = 16;
+-		break;
+-	case CPM_CLK_SCC3:
+-		reg = &im_cpmux->cmx_scr;
+-		shift = 8;
+-		break;
+-	case CPM_CLK_SCC4:
+-		reg = &im_cpmux->cmx_scr;
+-		shift = 0;
+-		break;
+-	case CPM_CLK_FCC1:
+-		reg = &im_cpmux->cmx_fcr;
+-		shift = 24;
+-		break;
+-	case CPM_CLK_FCC2:
+-		reg = &im_cpmux->cmx_fcr;
+-		shift = 16;
+-		break;
+-	case CPM_CLK_FCC3:
+-		reg = &im_cpmux->cmx_fcr;
+-		shift = 8;
+-		break;
+-	default:
+-		printk(KERN_ERR "cpm2_clock_setup: invalid clock target\n");
+-		return -EINVAL;
+-	}
+-
+-	if (mode == CPM_CLK_RX)
+-		shift += 3;
+-
+-	for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
+-		if (clk_map[i][0] == target && clk_map[i][1] == clock) {
+-			bits = clk_map[i][2];
+-			break;
+-		}
+-	}
+-	if (i == ARRAY_SIZE(clk_map))
+-	    ret = -EINVAL;
+-
+-	bits <<= shift;
+-	mask <<= shift;
+-
+-	out_be32(reg, (in_be32(reg) & ~mask) | bits);
+-
+-	cpm2_unmap(im_cpmux);
+-	return ret;
+-}
+-
+-int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock)
+-{
+-	int ret = 0;
+-	int shift;
+-	int i, bits = 0;
+-	cpmux_t __iomem *im_cpmux;
+-	u8 __iomem *reg;
+-	u8 mask = 3;
+-
+-	u8 clk_map[][3] = {
+-		{CPM_CLK_SMC1, CPM_BRG1, 0},
+-		{CPM_CLK_SMC1, CPM_BRG7, 1},
+-		{CPM_CLK_SMC1, CPM_CLK7, 2},
+-		{CPM_CLK_SMC1, CPM_CLK9, 3},
+-		{CPM_CLK_SMC2, CPM_BRG2, 0},
+-		{CPM_CLK_SMC2, CPM_BRG8, 1},
+-		{CPM_CLK_SMC2, CPM_CLK4, 2},
+-		{CPM_CLK_SMC2, CPM_CLK15, 3},
+-	};
+-
+-	im_cpmux = cpm2_map(im_cpmux);
+-
+-	switch (target) {
+-	case CPM_CLK_SMC1:
+-		reg = &im_cpmux->cmx_smr;
+-		mask = 3;
+-		shift = 4;
+-		break;
+-	case CPM_CLK_SMC2:
+-		reg = &im_cpmux->cmx_smr;
+-		mask = 3;
+-		shift = 0;
+-		break;
+-	default:
+-		printk(KERN_ERR "cpm2_smc_clock_setup: invalid clock target\n");
+-		return -EINVAL;
+-	}
+-
+-	for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
+-		if (clk_map[i][0] == target && clk_map[i][1] == clock) {
+-			bits = clk_map[i][2];
+-			break;
+-		}
+-	}
+-	if (i == ARRAY_SIZE(clk_map))
+-	    ret = -EINVAL;
+-
+-	bits <<= shift;
+-	mask <<= shift;
+-
+-	out_8(reg, (in_8(reg) & ~mask) | bits);
+-
+-	cpm2_unmap(im_cpmux);
+-	return ret;
+-}
+-
+-#ifndef CONFIG_PPC_CPM_NEW_BINDING
+-/*
+- * dpalloc / dpfree bits.
+- */
+-static spinlock_t cpm_dpmem_lock;
+-/* 16 blocks should be enough to satisfy all requests
+- * until the memory subsystem goes up... */
+-static rh_block_t cpm_boot_dpmem_rh_block[16];
+-static rh_info_t cpm_dpmem_info;
+-static u8 __iomem *im_dprambase;
+-
+-static void cpm2_dpinit(void)
+-{
+-	spin_lock_init(&cpm_dpmem_lock);
+-
+-	/* initialize the info header */
+-	rh_init(&cpm_dpmem_info, 1,
+-			sizeof(cpm_boot_dpmem_rh_block) /
+-			sizeof(cpm_boot_dpmem_rh_block[0]),
+-			cpm_boot_dpmem_rh_block);
+-
+-	im_dprambase = cpm2_immr;
+-
+-	/* Attach the usable dpmem area */
+-	/* XXX: This is actually crap. CPM_DATAONLY_BASE and
+-	 * CPM_DATAONLY_SIZE is only a subset of the available dpram. It
+-	 * varies with the processor and the microcode patches activated.
+-	 * But the following should be at least safe.
+-	 */
+-	rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
+-}
+-
+-/* This function returns an index into the DPRAM area.
+- */
+-unsigned long cpm_dpalloc(uint size, uint align)
+-{
+-	unsigned long start;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cpm_dpmem_lock, flags);
+-	cpm_dpmem_info.alignment = align;
+-	start = rh_alloc(&cpm_dpmem_info, size, "commproc");
+-	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
+-
+-	return (uint)start;
+-}
+-EXPORT_SYMBOL(cpm_dpalloc);
+-
+-int cpm_dpfree(unsigned long offset)
+-{
+-	int ret;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cpm_dpmem_lock, flags);
+-	ret = rh_free(&cpm_dpmem_info, offset);
+-	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
+-
+-	return ret;
+-}
+-EXPORT_SYMBOL(cpm_dpfree);
+-
+-/* not sure if this is ever needed */
+-unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
+-{
+-	unsigned long start;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cpm_dpmem_lock, flags);
+-	cpm_dpmem_info.alignment = align;
+-	start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
+-	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
+-
+-	return start;
+-}
+-EXPORT_SYMBOL(cpm_dpalloc_fixed);
+-
+-void cpm_dpdump(void)
+-{
+-	rh_dump(&cpm_dpmem_info);
+-}
+-EXPORT_SYMBOL(cpm_dpdump);
+-
+-void *cpm_dpram_addr(unsigned long offset)
+-{
+-	return (void *)(im_dprambase + offset);
+-}
+-EXPORT_SYMBOL(cpm_dpram_addr);
+-#endif /* !CONFIG_PPC_CPM_NEW_BINDING */
+-
+-struct cpm2_ioports {
+-	u32 dir, par, sor, odr, dat;
+-	u32 res[3];
+-};
+-
+-void cpm2_set_pin(int port, int pin, int flags)
+-{
+-	struct cpm2_ioports __iomem *iop =
+-		(struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport;
+-
+-	pin = 1 << (31 - pin);
+-
+-	if (flags & CPM_PIN_OUTPUT)
+-		setbits32(&iop[port].dir, pin);
+-	else
+-		clrbits32(&iop[port].dir, pin);
+-
+-	if (!(flags & CPM_PIN_GPIO))
+-		setbits32(&iop[port].par, pin);
+-	else
+-		clrbits32(&iop[port].par, pin);
+-
+-	if (flags & CPM_PIN_SECONDARY)
+-		setbits32(&iop[port].sor, pin);
+-	else
+-		clrbits32(&iop[port].sor, pin);
+-
+-	if (flags & CPM_PIN_OPENDRAIN)
+-		setbits32(&iop[port].odr, pin);
+-	else
+-		clrbits32(&iop[port].odr, pin);
+-}
+diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
+index 33df4c3..bf13c21 100644
+--- a/arch/powerpc/sysdev/fsl_pci.c
++++ b/arch/powerpc/sysdev/fsl_pci.c
+@@ -33,8 +33,8 @@ void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
+ 	struct ccsr_pci __iomem *pci;
+ 	int i;
+ 
+-	pr_debug("PCI memory map start 0x%x, size 0x%x\n", rsrc->start,
+-			rsrc->end - rsrc->start + 1);
++	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
++		    (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1);
+ 	pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);
+ 
+ 	/* Disable all windows (except powar0 since its ignored) */
+@@ -46,17 +46,17 @@ void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
+ 	/* Setup outbound MEM window */
+ 	for(i = 0; i < 3; i++)
+ 		if (hose->mem_resources[i].flags & IORESOURCE_MEM){
+-			pr_debug("PCI MEM resource start 0x%08x, size 0x%08x.\n",
+-				hose->mem_resources[i].start,
+-				hose->mem_resources[i].end
+-				  - hose->mem_resources[i].start + 1);
+-			out_be32(&pci->pow[i+1].potar,
+-				(hose->mem_resources[i].start >> 12)
+-				& 0x000fffff);
++			resource_size_t pci_addr_start =
++				 hose->mem_resources[i].start -
++				 hose->pci_mem_offset;
++			pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
++				(u64)hose->mem_resources[i].start,
++				(u64)hose->mem_resources[i].end
++				  - (u64)hose->mem_resources[i].start + 1);
++			out_be32(&pci->pow[i+1].potar, (pci_addr_start >> 12));
+ 			out_be32(&pci->pow[i+1].potear, 0);
+ 			out_be32(&pci->pow[i+1].powbar,
+-				(hose->mem_resources[i].start >> 12)
+-				& 0x000fffff);
++				(hose->mem_resources[i].start >> 12));
+ 			/* Enable, Mem R/W */
+ 			out_be32(&pci->pow[i+1].powar, 0x80044000
+ 				| (__ilog2(hose->mem_resources[i].end
+@@ -65,15 +65,14 @@ void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
+ 
+ 	/* Setup outbound IO window */
+ 	if (hose->io_resource.flags & IORESOURCE_IO){
+-		pr_debug("PCI IO resource start 0x%08x, size 0x%08x, phy base 0x%08x.\n",
+-			hose->io_resource.start,
+-			hose->io_resource.end - hose->io_resource.start + 1,
+-			hose->io_base_phys);
+-		out_be32(&pci->pow[i+1].potar, (hose->io_resource.start >> 12)
+-				& 0x000fffff);
++		pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
++			 "phy base 0x%016llx.\n",
++			(u64)hose->io_resource.start,
++			(u64)hose->io_resource.end - (u64)hose->io_resource.start + 1,
++			(u64)hose->io_base_phys);
++		out_be32(&pci->pow[i+1].potar, (hose->io_resource.start >> 12));
+ 		out_be32(&pci->pow[i+1].potear, 0);
+-		out_be32(&pci->pow[i+1].powbar, (hose->io_base_phys >> 12)
+-				& 0x000fffff);
++		out_be32(&pci->pow[i+1].powbar, (hose->io_base_phys >> 12));
+ 		/* Enable, IO R/W */
+ 		out_be32(&pci->pow[i+1].powar, 0x80088000
+ 			| (__ilog2(hose->io_resource.end
+@@ -107,55 +106,17 @@ void __init setup_pci_cmd(struct pci_controller *hose)
+ 	}
+ }
+ 
+-static void __init quirk_fsl_pcie_transparent(struct pci_dev *dev)
+-{
+-	struct resource *res;
+-	int i, res_idx = PCI_BRIDGE_RESOURCES;
+-	struct pci_controller *hose;
++static int fsl_pcie_bus_fixup;
+ 
++static void __init quirk_fsl_pcie_header(struct pci_dev *dev)
++{
+ 	/* if we aren't a PCIe don't bother */
+ 	if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
+ 		return ;
+ 
+-	/*
+-	 * Make the bridge be transparent.
+-	 */
+-	dev->transparent = 1;
+-
+-	hose = pci_bus_to_host(dev->bus);
+-	if (!hose) {
+-		printk(KERN_ERR "Can't find hose for bus %d\n",
+-		       dev->bus->number);
+-		return;
+-	}
+-
+-	/* Clear out any of the virtual P2P bridge registers */
+-	pci_write_config_word(dev, PCI_IO_BASE_UPPER16, 0);
+-	pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, 0);
+-	pci_write_config_byte(dev, PCI_IO_BASE, 0x10);
+-	pci_write_config_byte(dev, PCI_IO_LIMIT, 0);
+-	pci_write_config_word(dev, PCI_MEMORY_BASE, 0x10);
+-	pci_write_config_word(dev, PCI_MEMORY_LIMIT, 0);
+-	pci_write_config_word(dev, PCI_PREF_BASE_UPPER32, 0x0);
+-	pci_write_config_word(dev, PCI_PREF_LIMIT_UPPER32, 0x0);
+-	pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, 0x10);
+-	pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, 0);
+-
+-	if (hose->io_resource.flags) {
+-		res = &dev->resource[res_idx++];
+-		res->start = hose->io_resource.start;
+-		res->end = hose->io_resource.end;
+-		res->flags = hose->io_resource.flags;
+-		update_bridge_resource(dev, res);
+-	}
+-
+-	for (i = 0; i < 3; i++) {
+-		res = &dev->resource[res_idx + i];
+-		res->start = hose->mem_resources[i].start;
+-		res->end = hose->mem_resources[i].end;
+-		res->flags = hose->mem_resources[i].flags;
+-		update_bridge_resource(dev, res);
+-	}
++	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
++	fsl_pcie_bus_fixup = 1;
++	return ;
+ }
+ 
+ int __init fsl_pcie_check_link(struct pci_controller *hose)
+@@ -172,11 +133,24 @@ void fsl_pcibios_fixup_bus(struct pci_bus *bus)
+ 	struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
+ 	int i;
+ 
+-	/* deal with bogus pci_bus when we don't have anything connected on PCIe */
+-	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
+-		if (bus->parent) {
+-			for (i = 0; i < 4; ++i)
+-				bus->resource[i] = bus->parent->resource[i];
++	if ((bus->parent == hose->bus) &&
++	    ((fsl_pcie_bus_fixup &&
++	      early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) ||
++	     (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)))
++	{
++		for (i = 0; i < 4; ++i) {
++			struct resource *res = bus->resource[i];
++			struct resource *par = bus->parent->resource[i];
++			if (res) {
++				res->start = 0;
++				res->end   = 0;
++				res->flags = 0;
++			}
++			if (res && par) {
++				res->start = par->start;
++				res->end   = par->end;
++				res->flags = par->flags;
++			}
+ 		}
+ 	}
+ }
+@@ -202,7 +176,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
+ 		printk(KERN_WARNING "Can't get bus-range for %s, assume"
+ 			" bus 0\n", dev->full_name);
+ 
+-	pci_assign_all_buses = 1;
++	ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ 	hose = pcibios_alloc_controller(dev);
+ 	if (!hose)
+ 		return -ENOMEM;
+@@ -222,7 +196,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
+ 			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
+ 	}
+ 
+-	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx."
++	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
+ 		"Firmware bus number: %d->%d\n",
+ 		(unsigned long long)rsrc.start, hose->first_busno,
+ 		hose->last_busno);
+@@ -240,23 +214,23 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
+ 	return 0;
+ }
+ 
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8548E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8548, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8543E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8543, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8547E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8545E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8545, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8568E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8568, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8567E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8567, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8533E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8533, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8572E, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8572, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_transparent);
+-DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_transparent);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8547E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572E, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header);
++DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header);
+diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
+new file mode 100644
+index 0000000..af2425e
+--- /dev/null
++++ b/arch/powerpc/sysdev/fsl_rio.c
+@@ -0,0 +1,932 @@
++/*
++ * MPC85xx RapidIO support
++ *
++ * Copyright 2005 MontaVista Software, Inc.
++ * Matt Porter <mporter at kernel.crashing.org>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++#include <linux/rio.h>
++#include <linux/rio_drv.h>
++
++#include <asm/io.h>
++
++#define RIO_REGS_BASE		(CCSRBAR + 0xc0000)
++#define RIO_ATMU_REGS_OFFSET	0x10c00
++#define RIO_MSG_REGS_OFFSET	0x11000
++#define RIO_MAINT_WIN_SIZE	0x400000
++#define RIO_DBELL_WIN_SIZE	0x1000
++
++#define RIO_MSG_OMR_MUI		0x00000002
++#define RIO_MSG_OSR_TE		0x00000080
++#define RIO_MSG_OSR_QOI		0x00000020
++#define RIO_MSG_OSR_QFI		0x00000010
++#define RIO_MSG_OSR_MUB		0x00000004
++#define RIO_MSG_OSR_EOMI	0x00000002
++#define RIO_MSG_OSR_QEI		0x00000001
++
++#define RIO_MSG_IMR_MI		0x00000002
++#define RIO_MSG_ISR_TE		0x00000080
++#define RIO_MSG_ISR_QFI		0x00000010
++#define RIO_MSG_ISR_DIQI	0x00000001
++
++#define RIO_MSG_DESC_SIZE	32
++#define RIO_MSG_BUFFER_SIZE	4096
++#define RIO_MIN_TX_RING_SIZE	2
++#define RIO_MAX_TX_RING_SIZE	2048
++#define RIO_MIN_RX_RING_SIZE	2
++#define RIO_MAX_RX_RING_SIZE	2048
++
++#define DOORBELL_DMR_DI		0x00000002
++#define DOORBELL_DSR_TE		0x00000080
++#define DOORBELL_DSR_QFI	0x00000010
++#define DOORBELL_DSR_DIQI	0x00000001
++#define DOORBELL_TID_OFFSET	0x03
++#define DOORBELL_SID_OFFSET	0x05
++#define DOORBELL_INFO_OFFSET	0x06
++
++#define DOORBELL_MESSAGE_SIZE	0x08
++#define DBELL_SID(x)		(*(u8 *)(x + DOORBELL_SID_OFFSET))
++#define DBELL_TID(x)		(*(u8 *)(x + DOORBELL_TID_OFFSET))
++#define DBELL_INF(x)		(*(u16 *)(x + DOORBELL_INFO_OFFSET))
++
++struct rio_atmu_regs {
++	u32 rowtar;
++	u32 pad1;
++	u32 rowbar;
++	u32 pad2;
++	u32 rowar;
++	u32 pad3[3];
++};
++
++struct rio_msg_regs {
++	u32 omr;
++	u32 osr;
++	u32 pad1;
++	u32 odqdpar;
++	u32 pad2;
++	u32 osar;
++	u32 odpr;
++	u32 odatr;
++	u32 odcr;
++	u32 pad3;
++	u32 odqepar;
++	u32 pad4[13];
++	u32 imr;
++	u32 isr;
++	u32 pad5;
++	u32 ifqdpar;
++	u32 pad6;
++	u32 ifqepar;
++	u32 pad7[250];
++	u32 dmr;
++	u32 dsr;
++	u32 pad8;
++	u32 dqdpar;
++	u32 pad9;
++	u32 dqepar;
++	u32 pad10[26];
++	u32 pwmr;
++	u32 pwsr;
++	u32 pad11;
++	u32 pwqbar;
++};
++
++struct rio_tx_desc {
++	u32 res1;
++	u32 saddr;
++	u32 dport;
++	u32 dattr;
++	u32 res2;
++	u32 res3;
++	u32 dwcnt;
++	u32 res4;
++};
++
++static u32 regs_win;
++static struct rio_atmu_regs *atmu_regs;
++static struct rio_atmu_regs *maint_atmu_regs;
++static struct rio_atmu_regs *dbell_atmu_regs;
++static u32 dbell_win;
++static u32 maint_win;
++static struct rio_msg_regs *msg_regs;
++
++static struct rio_dbell_ring {
++	void *virt;
++	dma_addr_t phys;
++} dbell_ring;
++
++static struct rio_msg_tx_ring {
++	void *virt;
++	dma_addr_t phys;
++	void *virt_buffer[RIO_MAX_TX_RING_SIZE];
++	dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
++	int tx_slot;
++	int size;
++	void *dev_id;
++} msg_tx_ring;
++
++static struct rio_msg_rx_ring {
++	void *virt;
++	dma_addr_t phys;
++	void *virt_buffer[RIO_MAX_RX_RING_SIZE];
++	int rx_slot;
++	int size;
++	void *dev_id;
++} msg_rx_ring;
++
++/**
++ * mpc85xx_rio_doorbell_send - Send a MPC85xx doorbell message
++ * @index: ID of RapidIO interface
++ * @destid: Destination ID of target device
++ * @data: 16-bit info field of RapidIO doorbell message
++ *
++ * Sends a MPC85xx doorbell message. Returns %0 on success or
++ * %-EINVAL on failure.
++ */
++static int mpc85xx_rio_doorbell_send(int index, u16 destid, u16 data)
++{
++	pr_debug("mpc85xx_doorbell_send: index %d destid %4.4x data %4.4x\n",
++		 index, destid, data);
++	out_be32((void *)&dbell_atmu_regs->rowtar, destid << 22);
++	out_be16((void *)(dbell_win), data);
++
++	return 0;
++}
++
++/**
++ * mpc85xx_local_config_read - Generate a MPC85xx local config space read
++ * @index: ID of RapdiIO interface
++ * @offset: Offset into configuration space
++ * @len: Length (in bytes) of the maintenance transaction
++ * @data: Value to be read into
++ *
++ * Generates a MPC85xx local configuration space read. Returns %0 on
++ * success or %-EINVAL on failure.
++ */
++static int mpc85xx_local_config_read(int index, u32 offset, int len, u32 * data)
++{
++	pr_debug("mpc85xx_local_config_read: index %d offset %8.8x\n", index,
++		 offset);
++	*data = in_be32((void *)(regs_win + offset));
++
++	return 0;
++}
++
++/**
++ * mpc85xx_local_config_write - Generate a MPC85xx local config space write
++ * @index: ID of RapdiIO interface
++ * @offset: Offset into configuration space
++ * @len: Length (in bytes) of the maintenance transaction
++ * @data: Value to be written
++ *
++ * Generates a MPC85xx local configuration space write. Returns %0 on
++ * success or %-EINVAL on failure.
++ */
++static int mpc85xx_local_config_write(int index, u32 offset, int len, u32 data)
++{
++	pr_debug
++	    ("mpc85xx_local_config_write: index %d offset %8.8x data %8.8x\n",
++	     index, offset, data);
++	out_be32((void *)(regs_win + offset), data);
++
++	return 0;
++}
++
++/**
++ * mpc85xx_rio_config_read - Generate a MPC85xx read maintenance transaction
++ * @index: ID of RapdiIO interface
++ * @destid: Destination ID of transaction
++ * @hopcount: Number of hops to target device
++ * @offset: Offset into configuration space
++ * @len: Length (in bytes) of the maintenance transaction
++ * @val: Location to be read into
++ *
++ * Generates a MPC85xx read maintenance transaction. Returns %0 on
++ * success or %-EINVAL on failure.
++ */
++static int
++mpc85xx_rio_config_read(int index, u16 destid, u8 hopcount, u32 offset, int len,
++			u32 * val)
++{
++	u8 *data;
++
++	pr_debug
++	    ("mpc85xx_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
++	     index, destid, hopcount, offset, len);
++	out_be32((void *)&maint_atmu_regs->rowtar,
++		 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
++
++	data = (u8 *) maint_win + offset;
++	switch (len) {
++	case 1:
++		*val = in_8((u8 *) data);
++		break;
++	case 2:
++		*val = in_be16((u16 *) data);
++		break;
++	default:
++		*val = in_be32((u32 *) data);
++		break;
++	}
++
++	return 0;
++}
++
++/**
++ * mpc85xx_rio_config_write - Generate a MPC85xx write maintenance transaction
++ * @index: ID of RapdiIO interface
++ * @destid: Destination ID of transaction
++ * @hopcount: Number of hops to target device
++ * @offset: Offset into configuration space
++ * @len: Length (in bytes) of the maintenance transaction
++ * @val: Value to be written
++ *
++ * Generates an MPC85xx write maintenance transaction. Returns %0 on
++ * success or %-EINVAL on failure.
++ */
++static int
++mpc85xx_rio_config_write(int index, u16 destid, u8 hopcount, u32 offset,
++			 int len, u32 val)
++{
++	u8 *data;
++	pr_debug
++	    ("mpc85xx_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
++	     index, destid, hopcount, offset, len, val);
++	out_be32((void *)&maint_atmu_regs->rowtar,
++		 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
++
++	data = (u8 *) maint_win + offset;
++	switch (len) {
++	case 1:
++		out_8((u8 *) data, val);
++		break;
++	case 2:
++		out_be16((u16 *) data, val);
++		break;
++	default:
++		out_be32((u32 *) data, val);
++		break;
++	}
++
++	return 0;
++}
++
++/**
++ * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue
++ * @mport: Master port with outbound message queue
++ * @rdev: Target of outbound message
++ * @mbox: Outbound mailbox
++ * @buffer: Message to add to outbound queue
++ * @len: Length of message
++ *
++ * Adds the @buffer message to the MPC85xx outbound message queue. Returns
++ * %0 on success or %-EINVAL on failure.
++ */
++int
++rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
++			void *buffer, size_t len)
++{
++	u32 omr;
++	struct rio_tx_desc *desc =
++	    (struct rio_tx_desc *)msg_tx_ring.virt + msg_tx_ring.tx_slot;
++	int ret = 0;
++
++	pr_debug
++	    ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n",
++	     rdev->destid, mbox, (int)buffer, len);
++
++	if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/* Copy and clear rest of buffer */
++	memcpy(msg_tx_ring.virt_buffer[msg_tx_ring.tx_slot], buffer, len);
++	if (len < (RIO_MAX_MSG_SIZE - 4))
++		memset((void *)((u32) msg_tx_ring.
++				virt_buffer[msg_tx_ring.tx_slot] + len), 0,
++		       RIO_MAX_MSG_SIZE - len);
++
++	/* Set mbox field for message */
++	desc->dport = mbox & 0x3;
++
++	/* Enable EOMI interrupt, set priority, and set destid */
++	desc->dattr = 0x28000000 | (rdev->destid << 2);
++
++	/* Set transfer size aligned to next power of 2 (in double words) */
++	desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
++
++	/* Set snooping and source buffer address */
++	desc->saddr = 0x00000004 | msg_tx_ring.phys_buffer[msg_tx_ring.tx_slot];
++
++	/* Increment enqueue pointer */
++	omr = in_be32((void *)&msg_regs->omr);
++	out_be32((void *)&msg_regs->omr, omr | RIO_MSG_OMR_MUI);
++
++	/* Go to next descriptor */
++	if (++msg_tx_ring.tx_slot == msg_tx_ring.size)
++		msg_tx_ring.tx_slot = 0;
++
++      out:
++	return ret;
++}
++
++EXPORT_SYMBOL_GPL(rio_hw_add_outb_message);
++
++/**
++ * mpc85xx_rio_tx_handler - MPC85xx outbound message interrupt handler
++ * @irq: Linux interrupt number
++ * @dev_instance: Pointer to interrupt-specific data
++ *
++ * Handles outbound message interrupts. Executes a register outbound
++ * mailbox event handler and acks the interrupt occurrence.
++ */
++static irqreturn_t
++mpc85xx_rio_tx_handler(int irq, void *dev_instance)
++{
++	int osr;
++	struct rio_mport *port = (struct rio_mport *)dev_instance;
++
++	osr = in_be32((void *)&msg_regs->osr);
++
++	if (osr & RIO_MSG_OSR_TE) {
++		pr_info("RIO: outbound message transmission error\n");
++		out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_TE);
++		goto out;
++	}
++
++	if (osr & RIO_MSG_OSR_QOI) {
++		pr_info("RIO: outbound message queue overflow\n");
++		out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_QOI);
++		goto out;
++	}
++
++	if (osr & RIO_MSG_OSR_EOMI) {
++		u32 dqp = in_be32((void *)&msg_regs->odqdpar);
++		int slot = (dqp - msg_tx_ring.phys) >> 5;
++		port->outb_msg[0].mcback(port, msg_tx_ring.dev_id, -1, slot);
++
++		/* Ack the end-of-message interrupt */
++		out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_EOMI);
++	}
++
++      out:
++	return IRQ_HANDLED;
++}
++
++/**
++ * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox
++ * @mport: Master port implementing the outbound message unit
++ * @dev_id: Device specific pointer to pass on event
++ * @mbox: Mailbox to open
++ * @entries: Number of entries in the outbound mailbox ring
++ *
++ * Initializes buffer ring, request the outbound message interrupt,
++ * and enables the outbound message unit. Returns %0 on success and
++ * %-EINVAL or %-ENOMEM on failure.
++ */
++int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
++{
++	int i, j, rc = 0;
++
++	if ((entries < RIO_MIN_TX_RING_SIZE) ||
++	    (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	/* Initialize shadow copy ring */
++	msg_tx_ring.dev_id = dev_id;
++	msg_tx_ring.size = entries;
++
++	for (i = 0; i < msg_tx_ring.size; i++) {
++		if (!
++		    (msg_tx_ring.virt_buffer[i] =
++		     dma_alloc_coherent(NULL, RIO_MSG_BUFFER_SIZE,
++					&msg_tx_ring.phys_buffer[i],
++					GFP_KERNEL))) {
++			rc = -ENOMEM;
++			for (j = 0; j < msg_tx_ring.size; j++)
++				if (msg_tx_ring.virt_buffer[j])
++					dma_free_coherent(NULL,
++							  RIO_MSG_BUFFER_SIZE,
++							  msg_tx_ring.
++							  virt_buffer[j],
++							  msg_tx_ring.
++							  phys_buffer[j]);
++			goto out;
++		}
++	}
++
++	/* Initialize outbound message descriptor ring */
++	if (!(msg_tx_ring.virt = dma_alloc_coherent(NULL,
++						    msg_tx_ring.size *
++						    RIO_MSG_DESC_SIZE,
++						    &msg_tx_ring.phys,
++						    GFP_KERNEL))) {
++		rc = -ENOMEM;
++		goto out_dma;
++	}
++	memset(msg_tx_ring.virt, 0, msg_tx_ring.size * RIO_MSG_DESC_SIZE);
++	msg_tx_ring.tx_slot = 0;
++
++	/* Point dequeue/enqueue pointers at first entry in ring */
++	out_be32((void *)&msg_regs->odqdpar, msg_tx_ring.phys);
++	out_be32((void *)&msg_regs->odqepar, msg_tx_ring.phys);
++
++	/* Configure for snooping */
++	out_be32((void *)&msg_regs->osar, 0x00000004);
++
++	/* Clear interrupt status */
++	out_be32((void *)&msg_regs->osr, 0x000000b3);
++
++	/* Hook up outbound message handler */
++	if ((rc =
++	     request_irq(MPC85xx_IRQ_RIO_TX, mpc85xx_rio_tx_handler, 0,
++			 "msg_tx", (void *)mport)) < 0)
++		goto out_irq;
++
++	/*
++	 * Configure outbound message unit
++	 *      Snooping
++	 *      Interrupts (all enabled, except QEIE)
++	 *      Chaining mode
++	 *      Disable
++	 */
++	out_be32((void *)&msg_regs->omr, 0x00100220);
++
++	/* Set number of entries */
++	out_be32((void *)&msg_regs->omr,
++		 in_be32((void *)&msg_regs->omr) |
++		 ((get_bitmask_order(entries) - 2) << 12));
++
++	/* Now enable the unit */
++	out_be32((void *)&msg_regs->omr, in_be32((void *)&msg_regs->omr) | 0x1);
++
++      out:
++	return rc;
++
++      out_irq:
++	dma_free_coherent(NULL, msg_tx_ring.size * RIO_MSG_DESC_SIZE,
++			  msg_tx_ring.virt, msg_tx_ring.phys);
++
++      out_dma:
++	for (i = 0; i < msg_tx_ring.size; i++)
++		dma_free_coherent(NULL, RIO_MSG_BUFFER_SIZE,
++				  msg_tx_ring.virt_buffer[i],
++				  msg_tx_ring.phys_buffer[i]);
++
++	return rc;
++}
++
++/**
++ * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox
++ * @mport: Master port implementing the outbound message unit
++ * @mbox: Mailbox to close
++ *
++ * Disables the outbound message unit, free all buffers, and
++ * frees the outbound message interrupt.
++ */
++void rio_close_outb_mbox(struct rio_mport *mport, int mbox)
++{
++	/* Disable inbound message unit */
++	out_be32((void *)&msg_regs->omr, 0);
++
++	/* Free ring */
++	dma_free_coherent(NULL, msg_tx_ring.size * RIO_MSG_DESC_SIZE,
++			  msg_tx_ring.virt, msg_tx_ring.phys);
++
++	/* Free interrupt */
++	free_irq(MPC85xx_IRQ_RIO_TX, (void *)mport);
++}
++
++/**
++ * mpc85xx_rio_rx_handler - MPC85xx inbound message interrupt handler
++ * @irq: Linux interrupt number
++ * @dev_instance: Pointer to interrupt-specific data
++ *
++ * Handles inbound message interrupts. Executes a registered inbound
++ * mailbox event handler and acks the interrupt occurrence.
++ */
++static irqreturn_t
++mpc85xx_rio_rx_handler(int irq, void *dev_instance)
++{
++	int isr;
++	struct rio_mport *port = (struct rio_mport *)dev_instance;
++
++	isr = in_be32((void *)&msg_regs->isr);
++
++	if (isr & RIO_MSG_ISR_TE) {
++		pr_info("RIO: inbound message reception error\n");
++		out_be32((void *)&msg_regs->isr, RIO_MSG_ISR_TE);
++		goto out;
++	}
++
++	/* XXX Need to check/dispatch until queue empty */
++	if (isr & RIO_MSG_ISR_DIQI) {
++		/*
++		 * We implement *only* mailbox 0, but can receive messages
++		 * for any mailbox/letter to that mailbox destination. So,
++		 * make the callback with an unknown/invalid mailbox number
++		 * argument.
++		 */
++		port->inb_msg[0].mcback(port, msg_rx_ring.dev_id, -1, -1);
++
++		/* Ack the queueing interrupt */
++		out_be32((void *)&msg_regs->isr, RIO_MSG_ISR_DIQI);
++	}
++
++      out:
++	return IRQ_HANDLED;
++}
++
++/**
++ * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox
++ * @mport: Master port implementing the inbound message unit
++ * @dev_id: Device specific pointer to pass on event
++ * @mbox: Mailbox to open
++ * @entries: Number of entries in the inbound mailbox ring
++ *
++ * Initializes buffer ring, request the inbound message interrupt,
++ * and enables the inbound message unit. Returns %0 on success
++ * and %-EINVAL or %-ENOMEM on failure.
++ */
++int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
++{
++	int i, rc = 0;
++
++	if ((entries < RIO_MIN_RX_RING_SIZE) ||
++	    (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	/* Initialize client buffer ring */
++	msg_rx_ring.dev_id = dev_id;
++	msg_rx_ring.size = entries;
++	msg_rx_ring.rx_slot = 0;
++	for (i = 0; i < msg_rx_ring.size; i++)
++		msg_rx_ring.virt_buffer[i] = NULL;
++
++	/* Initialize inbound message ring */
++	if (!(msg_rx_ring.virt = dma_alloc_coherent(NULL,
++						    msg_rx_ring.size *
++						    RIO_MAX_MSG_SIZE,
++						    &msg_rx_ring.phys,
++						    GFP_KERNEL))) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	/* Point dequeue/enqueue pointers at first entry in ring */
++	out_be32((void *)&msg_regs->ifqdpar, (u32) msg_rx_ring.phys);
++	out_be32((void *)&msg_regs->ifqepar, (u32) msg_rx_ring.phys);
++
++	/* Clear interrupt status */
++	out_be32((void *)&msg_regs->isr, 0x00000091);
++
++	/* Hook up inbound message handler */
++	if ((rc =
++	     request_irq(MPC85xx_IRQ_RIO_RX, mpc85xx_rio_rx_handler, 0,
++			 "msg_rx", (void *)mport)) < 0) {
++		dma_free_coherent(NULL, RIO_MSG_BUFFER_SIZE,
++				  msg_tx_ring.virt_buffer[i],
++				  msg_tx_ring.phys_buffer[i]);
++		goto out;
++	}
++
++	/*
++	 * Configure inbound message unit:
++	 *      Snooping
++	 *      4KB max message size
++	 *      Unmask all interrupt sources
++	 *      Disable
++	 */
++	out_be32((void *)&msg_regs->imr, 0x001b0060);
++
++	/* Set number of queue entries */
++	out_be32((void *)&msg_regs->imr,
++		 in_be32((void *)&msg_regs->imr) |
++		 ((get_bitmask_order(entries) - 2) << 12));
++
++	/* Now enable the unit */
++	out_be32((void *)&msg_regs->imr, in_be32((void *)&msg_regs->imr) | 0x1);
++
++      out:
++	return rc;
++}
++
++/**
++ * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox
++ * @mport: Master port implementing the inbound message unit
++ * @mbox: Mailbox to close
++ *
++ * Disables the inbound message unit, free all buffers, and
++ * frees the inbound message interrupt.
++ */
++void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
++{
++	/* Disable inbound message unit */
++	out_be32((void *)&msg_regs->imr, 0);
++
++	/* Free ring */
++	dma_free_coherent(NULL, msg_rx_ring.size * RIO_MAX_MSG_SIZE,
++			  msg_rx_ring.virt, msg_rx_ring.phys);
++
++	/* Free interrupt */
++	free_irq(MPC85xx_IRQ_RIO_RX, (void *)mport);
++}
++
++/**
++ * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
++ * @mport: Master port implementing the inbound message unit
++ * @mbox: Inbound mailbox number
++ * @buf: Buffer to add to inbound queue
++ *
++ * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
++ * %0 on success or %-EINVAL on failure.
++ */
++int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
++{
++	int rc = 0;
++
++	pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
++		 msg_rx_ring.rx_slot);
++
++	if (msg_rx_ring.virt_buffer[msg_rx_ring.rx_slot]) {
++		printk(KERN_ERR
++		       "RIO: error adding inbound buffer %d, buffer exists\n",
++		       msg_rx_ring.rx_slot);
++		rc = -EINVAL;
++		goto out;
++	}
++
++	msg_rx_ring.virt_buffer[msg_rx_ring.rx_slot] = buf;
++	if (++msg_rx_ring.rx_slot == msg_rx_ring.size)
++		msg_rx_ring.rx_slot = 0;
++
++      out:
++	return rc;
++}
++
++EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer);
++
++/**
++ * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit
++ * @mport: Master port implementing the inbound message unit
++ * @mbox: Inbound mailbox number
++ *
++ * Gets the next available inbound message from the inbound message queue.
++ * A pointer to the message is returned on success or NULL on failure.
++ */
++void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
++{
++	u32 imr;
++	u32 phys_buf, virt_buf;
++	void *buf = NULL;
++	int buf_idx;
++
++	phys_buf = in_be32((void *)&msg_regs->ifqdpar);
++
++	/* If no more messages, then bail out */
++	if (phys_buf == in_be32((void *)&msg_regs->ifqepar))
++		goto out2;
++
++	virt_buf = (u32) msg_rx_ring.virt + (phys_buf - msg_rx_ring.phys);
++	buf_idx = (phys_buf - msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
++	buf = msg_rx_ring.virt_buffer[buf_idx];
++
++	if (!buf) {
++		printk(KERN_ERR
++		       "RIO: inbound message copy failed, no buffers\n");
++		goto out1;
++	}
++
++	/* Copy max message size, caller is expected to allocate that big */
++	memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
++
++	/* Clear the available buffer */
++	msg_rx_ring.virt_buffer[buf_idx] = NULL;
++
++      out1:
++	imr = in_be32((void *)&msg_regs->imr);
++	out_be32((void *)&msg_regs->imr, imr | RIO_MSG_IMR_MI);
++
++      out2:
++	return buf;
++}
++
++EXPORT_SYMBOL_GPL(rio_hw_get_inb_message);
++
++/**
++ * mpc85xx_rio_dbell_handler - MPC85xx doorbell interrupt handler
++ * @irq: Linux interrupt number
++ * @dev_instance: Pointer to interrupt-specific data
++ *
++ * Handles doorbell interrupts. Parses a list of registered
++ * doorbell event handlers and executes a matching event handler.
++ */
++static irqreturn_t
++mpc85xx_rio_dbell_handler(int irq, void *dev_instance)
++{
++	int dsr;
++	struct rio_mport *port = (struct rio_mport *)dev_instance;
++
++	dsr = in_be32((void *)&msg_regs->dsr);
++
++	if (dsr & DOORBELL_DSR_TE) {
++		pr_info("RIO: doorbell reception error\n");
++		out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_TE);
++		goto out;
++	}
++
++	if (dsr & DOORBELL_DSR_QFI) {
++		pr_info("RIO: doorbell queue full\n");
++		out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_QFI);
++		goto out;
++	}
++
++	/* XXX Need to check/dispatch until queue empty */
++	if (dsr & DOORBELL_DSR_DIQI) {
++		u32 dmsg =
++		    (u32) dbell_ring.virt +
++		    (in_be32((void *)&msg_regs->dqdpar) & 0xfff);
++		u32 dmr;
++		struct rio_dbell *dbell;
++		int found = 0;
++
++		pr_debug
++		    ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n",
++		     DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
++
++		list_for_each_entry(dbell, &port->dbells, node) {
++			if ((dbell->res->start <= DBELL_INF(dmsg)) &&
++			    (dbell->res->end >= DBELL_INF(dmsg))) {
++				found = 1;
++				break;
++			}
++		}
++		if (found) {
++			dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg),
++				    DBELL_INF(dmsg));
++		} else {
++			pr_debug
++			    ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
++			     DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
++		}
++		dmr = in_be32((void *)&msg_regs->dmr);
++		out_be32((void *)&msg_regs->dmr, dmr | DOORBELL_DMR_DI);
++		out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_DIQI);
++	}
++
++      out:
++	return IRQ_HANDLED;
++}
++
++/**
++ * mpc85xx_rio_doorbell_init - MPC85xx doorbell interface init
++ * @mport: Master port implementing the inbound doorbell unit
++ *
++ * Initializes doorbell unit hardware and inbound DMA buffer
++ * ring. Called from mpc85xx_rio_setup(). Returns %0 on success
++ * or %-ENOMEM on failure.
++ */
++static int mpc85xx_rio_doorbell_init(struct rio_mport *mport)
++{
++	int rc = 0;
++
++	/* Map outbound doorbell window immediately after maintenance window */
++	if (!(dbell_win =
++	      (u32) ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
++			    RIO_DBELL_WIN_SIZE))) {
++		printk(KERN_ERR
++		       "RIO: unable to map outbound doorbell window\n");
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	/* Initialize inbound doorbells */
++	if (!(dbell_ring.virt = dma_alloc_coherent(NULL,
++						   512 * DOORBELL_MESSAGE_SIZE,
++						   &dbell_ring.phys,
++						   GFP_KERNEL))) {
++		printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
++		rc = -ENOMEM;
++		iounmap((void *)dbell_win);
++		goto out;
++	}
++
++	/* Point dequeue/enqueue pointers at first entry in ring */
++	out_be32((void *)&msg_regs->dqdpar, (u32) dbell_ring.phys);
++	out_be32((void *)&msg_regs->dqepar, (u32) dbell_ring.phys);
++
++	/* Clear interrupt status */
++	out_be32((void *)&msg_regs->dsr, 0x00000091);
++
++	/* Hook up doorbell handler */
++	if ((rc =
++	     request_irq(MPC85xx_IRQ_RIO_BELL, mpc85xx_rio_dbell_handler, 0,
++			 "dbell_rx", (void *)mport) < 0)) {
++		iounmap((void *)dbell_win);
++		dma_free_coherent(NULL, 512 * DOORBELL_MESSAGE_SIZE,
++				  dbell_ring.virt, dbell_ring.phys);
++		printk(KERN_ERR
++		       "MPC85xx RIO: unable to request inbound doorbell irq");
++		goto out;
++	}
++
++	/* Configure doorbells for snooping, 512 entries, and enable */
++	out_be32((void *)&msg_regs->dmr, 0x00108161);
++
++      out:
++	return rc;
++}
++
++static char *cmdline = NULL;
++
++static int mpc85xx_rio_get_hdid(int index)
++{
++	/* XXX Need to parse multiple entries in some format */
++	if (!cmdline)
++		return -1;
++
++	return simple_strtol(cmdline, NULL, 0);
++}
++
++static int mpc85xx_rio_get_cmdline(char *s)
++{
++	if (!s)
++		return 0;
++
++	cmdline = s;
++	return 1;
++}
++
++__setup("riohdid=", mpc85xx_rio_get_cmdline);
++
++/**
++ * mpc85xx_rio_setup - Setup MPC85xx RapidIO interface
++ * @law_start: Starting physical address of RapidIO LAW
++ * @law_size: Size of RapidIO LAW
++ *
++ * Initializes MPC85xx RapidIO hardware interface, configures
++ * master port with system-specific info, and registers the
++ * master port with the RapidIO subsystem.
++ */
++void mpc85xx_rio_setup(int law_start, int law_size)
++{
++	struct rio_ops *ops;
++	struct rio_mport *port;
++
++	ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL);
++	ops->lcread = mpc85xx_local_config_read;
++	ops->lcwrite = mpc85xx_local_config_write;
++	ops->cread = mpc85xx_rio_config_read;
++	ops->cwrite = mpc85xx_rio_config_write;
++	ops->dsend = mpc85xx_rio_doorbell_send;
++
++	port = kmalloc(sizeof(struct rio_mport), GFP_KERNEL);
++	port->id = 0;
++	port->index = 0;
++	INIT_LIST_HEAD(&port->dbells);
++	port->iores.start = law_start;
++	port->iores.end = law_start + law_size;
++	port->iores.flags = IORESOURCE_MEM;
++
++	rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
++	rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
++	rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
++	strcpy(port->name, "RIO0 mport");
++
++	port->ops = ops;
++	port->host_deviceid = mpc85xx_rio_get_hdid(port->id);
++
++	rio_register_mport(port);
++
++	regs_win = (u32) ioremap(RIO_REGS_BASE, 0x20000);
++	atmu_regs = (struct rio_atmu_regs *)(regs_win + RIO_ATMU_REGS_OFFSET);
++	maint_atmu_regs = atmu_regs + 1;
++	dbell_atmu_regs = atmu_regs + 2;
++	msg_regs = (struct rio_msg_regs *)(regs_win + RIO_MSG_REGS_OFFSET);
++
++	/* Configure maintenance transaction window */
++	out_be32((void *)&maint_atmu_regs->rowbar, 0x000c0000);
++	out_be32((void *)&maint_atmu_regs->rowar, 0x80077015);
++
++	maint_win = (u32) ioremap(law_start, RIO_MAINT_WIN_SIZE);
++
++	/* Configure outbound doorbell window */
++	out_be32((void *)&dbell_atmu_regs->rowbar, 0x000c0400);
++	out_be32((void *)&dbell_atmu_regs->rowar, 0x8004200b);
++	mpc85xx_rio_doorbell_init(port);
++}
+diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h
+new file mode 100644
+index 0000000..6d3ff30
+--- /dev/null
++++ b/arch/powerpc/sysdev/fsl_rio.h
+@@ -0,0 +1,20 @@
++/*
++ * MPC85xx RapidIO definitions
++ *
++ * Copyright 2005 MontaVista Software, Inc.
++ * Matt Porter <mporter at kernel.crashing.org>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++#ifndef __PPC_SYSLIB_PPC85XX_RIO_H
++#define __PPC_SYSLIB_PPC85XX_RIO_H
++
++#include <linux/init.h>
++
++extern void mpc85xx_rio_setup(int law_start, int law_size);
++
++#endif				/* __PPC_SYSLIB_PPC85XX_RIO_H */
+diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
+index 3ace747..e48b20e 100644
+--- a/arch/powerpc/sysdev/fsl_soc.c
++++ b/arch/powerpc/sysdev/fsl_soc.c
+@@ -24,6 +24,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/of_platform.h>
+ #include <linux/phy.h>
++#include <linux/phy_fixed.h>
+ #include <linux/spi/spi.h>
+ #include <linux/fsl_devices.h>
+ #include <linux/fs_enet_pd.h>
+@@ -54,10 +55,18 @@ phys_addr_t get_immrbase(void)
+ 	soc = of_find_node_by_type(NULL, "soc");
+ 	if (soc) {
+ 		int size;
+-		const void *prop = of_get_property(soc, "reg", &size);
++		u32 naddr;
++		const u32 *prop = of_get_property(soc, "#address-cells", &size);
+ 
++		if (prop && size == 4)
++			naddr = *prop;
++		else
++			naddr = 2;
++
++		prop = of_get_property(soc, "ranges", &size);
+ 		if (prop)
+-			immrbase = of_translate_address(soc, prop);
++			immrbase = of_translate_address(soc, prop + naddr);
++
+ 		of_node_put(soc);
+ 	}
+ 
+@@ -66,7 +75,7 @@ phys_addr_t get_immrbase(void)
+ 
+ EXPORT_SYMBOL(get_immrbase);
+ 
+-#if defined(CONFIG_CPM2) || defined(CONFIG_8xx)
++#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
+ 
+ static u32 brgfreq = -1;
+ 
+@@ -91,11 +100,21 @@ u32 get_brgfreq(void)
+ 
+ 	/* Legacy device binding -- will go away when no users are left. */
+ 	node = of_find_node_by_type(NULL, "cpm");
++	if (!node)
++		node = of_find_compatible_node(NULL, NULL, "fsl,qe");
++	if (!node)
++		node = of_find_node_by_type(NULL, "qe");
++
+ 	if (node) {
+ 		prop = of_get_property(node, "brg-frequency", &size);
+ 		if (prop && size == 4)
+ 			brgfreq = *prop;
+ 
++		if (brgfreq == -1 || brgfreq == 0) {
++			prop = of_get_property(node, "bus-frequency", &size);
++			if (prop && size == 4)
++				brgfreq = *prop / 2;
++		}
+ 		of_node_put(node);
+ 	}
+ 
+@@ -130,17 +149,51 @@ u32 get_baudrate(void)
+ EXPORT_SYMBOL(get_baudrate);
+ #endif /* CONFIG_CPM2 */
+ 
+-static int __init gfar_mdio_of_init(void)
++#ifdef CONFIG_FIXED_PHY
++static int __init of_add_fixed_phys(void)
+ {
++	int ret;
+ 	struct device_node *np;
+-	unsigned int i;
++	u32 *fixed_link;
++	struct fixed_phy_status status = {};
++
++	for_each_node_by_name(np, "ethernet") {
++		fixed_link  = (u32 *)of_get_property(np, "fixed-link", NULL);
++		if (!fixed_link)
++			continue;
++
++		status.link = 1;
++		status.duplex = fixed_link[1];
++		status.speed = fixed_link[2];
++		status.pause = fixed_link[3];
++		status.asym_pause = fixed_link[4];
++
++		ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status);
++		if (ret) {
++			of_node_put(np);
++			return ret;
++		}
++	}
++
++	return 0;
++}
++arch_initcall(of_add_fixed_phys);
++#endif /* CONFIG_FIXED_PHY */
++
++static int __init gfar_mdio_of_init(void)
++{
++	struct device_node *np = NULL;
+ 	struct platform_device *mdio_dev;
+ 	struct resource res;
+ 	int ret;
+ 
+-	for (np = NULL, i = 0;
+-	     (np = of_find_compatible_node(np, "mdio", "gianfar")) != NULL;
+-	     i++) {
++	np = of_find_compatible_node(np, NULL, "fsl,gianfar-mdio");
++
++	/* try the deprecated version */
++	if (!np)
++		np = of_find_compatible_node(np, "mdio", "gianfar");
++
++	if (np) {
+ 		int k;
+ 		struct device_node *child = NULL;
+ 		struct gianfar_mdio_data mdio_data;
+@@ -179,11 +232,13 @@ static int __init gfar_mdio_of_init(void)
+ 			goto unreg;
+ 	}
+ 
++	of_node_put(np);
+ 	return 0;
+ 
+ unreg:
+ 	platform_device_unregister(mdio_dev);
+ err:
++	of_node_put(np);
+ 	return ret;
+ }
+ 
+@@ -193,7 +248,6 @@ static const char *gfar_tx_intr = "tx";
+ static const char *gfar_rx_intr = "rx";
+ static const char *gfar_err_intr = "error";
+ 
+-
+ static int __init gfar_of_init(void)
+ {
+ 	struct device_node *np;
+@@ -277,29 +331,43 @@ static int __init gfar_of_init(void)
+ 			gfar_data.interface = PHY_INTERFACE_MODE_MII;
+ 
+ 		ph = of_get_property(np, "phy-handle", NULL);
+-		phy = of_find_node_by_phandle(*ph);
++		if (ph == NULL) {
++			u32 *fixed_link;
+ 
+-		if (phy == NULL) {
+-			ret = -ENODEV;
+-			goto unreg;
+-		}
++			fixed_link = (u32 *)of_get_property(np, "fixed-link",
++							   NULL);
++			if (!fixed_link) {
++				ret = -ENODEV;
++				goto unreg;
++			}
+ 
+-		mdio = of_get_parent(phy);
++			gfar_data.bus_id = 0;
++			gfar_data.phy_id = fixed_link[0];
++		} else {
++			phy = of_find_node_by_phandle(*ph);
++
++			if (phy == NULL) {
++				ret = -ENODEV;
++				goto unreg;
++			}
++
++			mdio = of_get_parent(phy);
++
++			id = of_get_property(phy, "reg", NULL);
++			ret = of_address_to_resource(mdio, 0, &res);
++			if (ret) {
++				of_node_put(phy);
++				of_node_put(mdio);
++				goto unreg;
++			}
++
++			gfar_data.phy_id = *id;
++			gfar_data.bus_id = res.start;
+ 
+-		id = of_get_property(phy, "reg", NULL);
+-		ret = of_address_to_resource(mdio, 0, &res);
+-		if (ret) {
+ 			of_node_put(phy);
+ 			of_node_put(mdio);
+-			goto unreg;
+ 		}
+ 
+-		gfar_data.phy_id = *id;
+-		gfar_data.bus_id = res.start;
+-
+-		of_node_put(phy);
+-		of_node_put(mdio);
+-
+ 		ret =
+ 		    platform_device_add_data(gfar_dev, &gfar_data,
+ 					     sizeof(struct
+@@ -390,13 +458,11 @@ static void __init of_register_i2c_devices(struct device_node *adap_node,
+ static int __init fsl_i2c_of_init(void)
+ {
+ 	struct device_node *np;
+-	unsigned int i;
++	unsigned int i = 0;
+ 	struct platform_device *i2c_dev;
+ 	int ret;
+ 
+-	for (np = NULL, i = 0;
+-	     (np = of_find_compatible_node(np, "i2c", "fsl-i2c")) != NULL;
+-	     i++) {
++	for_each_compatible_node(np, NULL, "fsl-i2c") {
+ 		struct resource r[2];
+ 		struct fsl_i2c_platform_data i2c_data;
+ 		const unsigned char *flags = NULL;
+@@ -432,7 +498,7 @@ static int __init fsl_i2c_of_init(void)
+ 		if (ret)
+ 			goto unreg;
+ 
+-		of_register_i2c_devices(np, i);
++		of_register_i2c_devices(np, i++);
+ 	}
+ 
+ 	return 0;
+@@ -528,14 +594,12 @@ static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
+ static int __init fsl_usb_of_init(void)
+ {
+ 	struct device_node *np;
+-	unsigned int i;
++	unsigned int i = 0;
+ 	struct platform_device *usb_dev_mph = NULL, *usb_dev_dr_host = NULL,
+ 		*usb_dev_dr_client = NULL;
+ 	int ret;
+ 
+-	for (np = NULL, i = 0;
+-	     (np = of_find_compatible_node(np, "usb", "fsl-usb2-mph")) != NULL;
+-	     i++) {
++	for_each_compatible_node(np, NULL, "fsl-usb2-mph") {
+ 		struct resource r[2];
+ 		struct fsl_usb2_platform_data usb_data;
+ 		const unsigned char *prop = NULL;
+@@ -578,11 +642,10 @@ static int __init fsl_usb_of_init(void)
+ 						    fsl_usb2_platform_data));
+ 		if (ret)
+ 			goto unreg_mph;
++		i++;
+ 	}
+ 
+-	for (np = NULL;
+-	     (np = of_find_compatible_node(np, "usb", "fsl-usb2-dr")) != NULL;
+-	     i++) {
++	for_each_compatible_node(np, NULL, "fsl-usb2-dr") {
+ 		struct resource r[2];
+ 		struct fsl_usb2_platform_data usb_data;
+ 		const unsigned char *prop = NULL;
+@@ -654,6 +717,7 @@ static int __init fsl_usb_of_init(void)
+ 						fsl_usb2_platform_data))))
+ 				goto unreg_dr;
+ 		}
++		i++;
+ 	}
+ 	return 0;
+ 
+@@ -1125,13 +1189,12 @@ arch_initcall(fs_enet_of_init);
+ 
+ static int __init fsl_pcmcia_of_init(void)
+ {
+-	struct device_node *np = NULL;
++	struct device_node *np;
+ 	/*
+ 	 * Register all the devices which type is "pcmcia"
+ 	 */
+-	while ((np = of_find_compatible_node(np,
+-			"pcmcia", "fsl,pq-pcmcia")) != NULL)
+-			    of_platform_device_create(np, "m8xx-pcmcia", NULL);
++	for_each_compatible_node(np, "pcmcia", "fsl,pq-pcmcia")
++		of_platform_device_create(np, "m8xx-pcmcia", NULL);
+ 	return 0;
+ }
+ 
+@@ -1213,31 +1276,17 @@ arch_initcall(cpm_smc_uart_of_init);
+ #endif /* CONFIG_8xx */
+ #endif /* CONFIG_PPC_CPM_NEW_BINDING */
+ 
+-int __init fsl_spi_init(struct spi_board_info *board_infos,
+-			unsigned int num_board_infos,
+-			void (*activate_cs)(u8 cs, u8 polarity),
+-			void (*deactivate_cs)(u8 cs, u8 polarity))
++static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
++				   struct spi_board_info *board_infos,
++				   unsigned int num_board_infos,
++				   void (*activate_cs)(u8 cs, u8 polarity),
++				   void (*deactivate_cs)(u8 cs, u8 polarity))
+ {
+ 	struct device_node *np;
+-	unsigned int i;
+-	const u32 *sysclk;
+-
+-	/* SPI controller is either clocked from QE or SoC clock */
+-	np = of_find_node_by_type(NULL, "qe");
+-	if (!np)
+-		np = of_find_node_by_type(NULL, "soc");
+-
+-	if (!np)
+-		return -ENODEV;
+-
+-	sysclk = of_get_property(np, "bus-frequency", NULL);
+-	if (!sysclk)
+-		return -ENODEV;
++	unsigned int i = 0;
+ 
+-	for (np = NULL, i = 1;
+-	     (np = of_find_compatible_node(np, "spi", "fsl_spi")) != NULL;
+-	     i++) {
+-		int ret = 0;
++	for_each_compatible_node(np, type, compatible) {
++		int ret;
+ 		unsigned int j;
+ 		const void *prop;
+ 		struct resource res[2];
+@@ -1249,13 +1298,17 @@ int __init fsl_spi_init(struct spi_board_info *board_infos,
+ 
+ 		memset(res, 0, sizeof(res));
+ 
+-		pdata.sysclk = *sysclk;
++		pdata.sysclk = sysclk;
+ 
+ 		prop = of_get_property(np, "reg", NULL);
+ 		if (!prop)
+ 			goto err;
+ 		pdata.bus_num = *(u32 *)prop;
+ 
++		prop = of_get_property(np, "cell-index", NULL);
++		if (prop)
++			i = *(u32 *)prop;
++
+ 		prop = of_get_property(np, "mode", NULL);
+ 		if (prop && !strcmp(prop, "cpu-qe"))
+ 			pdata.qe_mode = 1;
+@@ -1266,7 +1319,7 @@ int __init fsl_spi_init(struct spi_board_info *board_infos,
+ 		}
+ 
+ 		if (!pdata.max_chipselect)
+-			goto err;
++			continue;
+ 
+ 		ret = of_address_to_resource(np, 0, &res[0]);
+ 		if (ret)
+@@ -1293,13 +1346,58 @@ int __init fsl_spi_init(struct spi_board_info *board_infos,
+ 		if (ret)
+ 			goto unreg;
+ 
+-		continue;
++		goto next;
+ unreg:
+ 		platform_device_del(pdev);
+ err:
+-		continue;
++		pr_err("%s: registration failed\n", np->full_name);
++next:
++		i++;
++	}
++
++	return i;
++}
++
++int __init fsl_spi_init(struct spi_board_info *board_infos,
++			unsigned int num_board_infos,
++			void (*activate_cs)(u8 cs, u8 polarity),
++			void (*deactivate_cs)(u8 cs, u8 polarity))
++{
++	u32 sysclk = -1;
++	int ret;
++
++#ifdef CONFIG_QUICC_ENGINE
++	/* SPI controller is either clocked from QE or SoC clock */
++	sysclk = get_brgfreq();
++#endif
++	if (sysclk == -1) {
++		struct device_node *np;
++		const u32 *freq;
++		int size;
++
++		np = of_find_node_by_type(NULL, "soc");
++		if (!np)
++			return -ENODEV;
++
++		freq = of_get_property(np, "clock-frequency", &size);
++		if (!freq || size != sizeof(*freq) || *freq == 0) {
++			freq = of_get_property(np, "bus-frequency", &size);
++			if (!freq || size != sizeof(*freq) || *freq == 0) {
++				of_node_put(np);
++				return -ENODEV;
++			}
++		}
++
++		sysclk = *freq;
++		of_node_put(np);
+ 	}
+ 
++	ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos,
++			       num_board_infos, activate_cs, deactivate_cs);
++	if (!ret)
++		of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos,
++				 num_board_infos, activate_cs, deactivate_cs);
++
+ 	return spi_register_board_info(board_infos, num_board_infos);
+ }
+ 
+diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
+index 11ad562..d502927 100644
+--- a/arch/powerpc/sysdev/grackle.c
++++ b/arch/powerpc/sysdev/grackle.c
+@@ -57,7 +57,7 @@ void __init setup_grackle(struct pci_controller *hose)
+ {
+ 	setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
+ 	if (machine_is_compatible("PowerMac1,1"))
+-		pci_assign_all_buses = 1;
++		ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ 	if (machine_is_compatible("AAPL,PowerBook1998"))
+ 		grackle_set_loop_snoop(hose, 1);
+ #if 0	/* Disabled for now, HW problems ??? */
+diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
+index 05a56e5..ae0dbf4 100644
+--- a/arch/powerpc/sysdev/ipic.c
++++ b/arch/powerpc/sysdev/ipic.c
+@@ -30,11 +30,67 @@
+ #include "ipic.h"
+ 
+ static struct ipic * primary_ipic;
++static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
+ static DEFINE_SPINLOCK(ipic_lock);
+ 
+ static struct ipic_info ipic_info[] = {
++	[1] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_C,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 16,
++		.prio_mask = 0,
++	},
++	[2] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_C,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 17,
++		.prio_mask = 1,
++	},
++	[3] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_C,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 18,
++		.prio_mask = 2,
++	},
++	[4] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_C,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 19,
++		.prio_mask = 3,
++	},
++	[5] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_C,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 20,
++		.prio_mask = 4,
++	},
++	[6] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_C,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 21,
++		.prio_mask = 5,
++	},
++	[7] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_C,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 22,
++		.prio_mask = 6,
++	},
++	[8] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_C,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 23,
++		.prio_mask = 7,
++	},
+ 	[9] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_D,
+ 		.force	= IPIC_SIFCR_H,
+@@ -42,7 +98,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 0,
+ 	},
+ 	[10] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_D,
+ 		.force	= IPIC_SIFCR_H,
+@@ -50,15 +105,27 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 1,
+ 	},
+ 	[11] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_D,
+ 		.force	= IPIC_SIFCR_H,
+ 		.bit	= 26,
+ 		.prio_mask = 2,
+ 	},
++	[12] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_D,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 27,
++		.prio_mask = 3,
++	},
++	[13] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_D,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 28,
++		.prio_mask = 4,
++	},
+ 	[14] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_D,
+ 		.force	= IPIC_SIFCR_H,
+@@ -66,7 +133,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 5,
+ 	},
+ 	[15] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_D,
+ 		.force	= IPIC_SIFCR_H,
+@@ -74,7 +140,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 6,
+ 	},
+ 	[16] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_D,
+ 		.force	= IPIC_SIFCR_H,
+@@ -82,7 +147,7 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 7,
+ 	},
+ 	[17] = {
+-		.pend	= IPIC_SEPNR,
++		.ack	= IPIC_SEPNR,
+ 		.mask	= IPIC_SEMSR,
+ 		.prio	= IPIC_SMPRR_A,
+ 		.force	= IPIC_SEFCR,
+@@ -90,7 +155,7 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 5,
+ 	},
+ 	[18] = {
+-		.pend	= IPIC_SEPNR,
++		.ack	= IPIC_SEPNR,
+ 		.mask	= IPIC_SEMSR,
+ 		.prio	= IPIC_SMPRR_A,
+ 		.force	= IPIC_SEFCR,
+@@ -98,7 +163,7 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 6,
+ 	},
+ 	[19] = {
+-		.pend	= IPIC_SEPNR,
++		.ack	= IPIC_SEPNR,
+ 		.mask	= IPIC_SEMSR,
+ 		.prio	= IPIC_SMPRR_A,
+ 		.force	= IPIC_SEFCR,
+@@ -106,7 +171,7 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 7,
+ 	},
+ 	[20] = {
+-		.pend	= IPIC_SEPNR,
++		.ack	= IPIC_SEPNR,
+ 		.mask	= IPIC_SEMSR,
+ 		.prio	= IPIC_SMPRR_B,
+ 		.force	= IPIC_SEFCR,
+@@ -114,7 +179,7 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 4,
+ 	},
+ 	[21] = {
+-		.pend	= IPIC_SEPNR,
++		.ack	= IPIC_SEPNR,
+ 		.mask	= IPIC_SEMSR,
+ 		.prio	= IPIC_SMPRR_B,
+ 		.force	= IPIC_SEFCR,
+@@ -122,7 +187,7 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 5,
+ 	},
+ 	[22] = {
+-		.pend	= IPIC_SEPNR,
++		.ack	= IPIC_SEPNR,
+ 		.mask	= IPIC_SEMSR,
+ 		.prio	= IPIC_SMPRR_B,
+ 		.force	= IPIC_SEFCR,
+@@ -130,7 +195,7 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 6,
+ 	},
+ 	[23] = {
+-		.pend	= IPIC_SEPNR,
++		.ack	= IPIC_SEPNR,
+ 		.mask	= IPIC_SEMSR,
+ 		.prio	= IPIC_SMPRR_B,
+ 		.force	= IPIC_SEFCR,
+@@ -138,7 +203,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 7,
+ 	},
+ 	[32] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_A,
+ 		.force	= IPIC_SIFCR_H,
+@@ -146,7 +210,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 0,
+ 	},
+ 	[33] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_A,
+ 		.force	= IPIC_SIFCR_H,
+@@ -154,7 +217,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 1,
+ 	},
+ 	[34] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_A,
+ 		.force	= IPIC_SIFCR_H,
+@@ -162,7 +224,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 2,
+ 	},
+ 	[35] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_A,
+ 		.force	= IPIC_SIFCR_H,
+@@ -170,7 +231,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 3,
+ 	},
+ 	[36] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_A,
+ 		.force	= IPIC_SIFCR_H,
+@@ -178,7 +238,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 4,
+ 	},
+ 	[37] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_A,
+ 		.force	= IPIC_SIFCR_H,
+@@ -186,7 +245,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 5,
+ 	},
+ 	[38] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_A,
+ 		.force	= IPIC_SIFCR_H,
+@@ -194,15 +252,69 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 6,
+ 	},
+ 	[39] = {
+-		.pend	= IPIC_SIPNR_H,
+ 		.mask	= IPIC_SIMSR_H,
+ 		.prio	= IPIC_SIPRR_A,
+ 		.force	= IPIC_SIFCR_H,
+ 		.bit	= 7,
+ 		.prio_mask = 7,
+ 	},
++	[40] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_B,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 8,
++		.prio_mask = 0,
++	},
++	[41] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_B,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 9,
++		.prio_mask = 1,
++	},
++	[42] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_B,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 10,
++		.prio_mask = 2,
++	},
++	[43] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_B,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 11,
++		.prio_mask = 3,
++	},
++	[44] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_B,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 12,
++		.prio_mask = 4,
++	},
++	[45] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_B,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 13,
++		.prio_mask = 5,
++	},
++	[46] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_B,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 14,
++		.prio_mask = 6,
++	},
++	[47] = {
++		.mask	= IPIC_SIMSR_H,
++		.prio	= IPIC_SIPRR_B,
++		.force	= IPIC_SIFCR_H,
++		.bit	= 15,
++		.prio_mask = 7,
++	},
+ 	[48] = {
+-		.pend	= IPIC_SEPNR,
+ 		.mask	= IPIC_SEMSR,
+ 		.prio	= IPIC_SMPRR_A,
+ 		.force	= IPIC_SEFCR,
+@@ -210,7 +322,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 4,
+ 	},
+ 	[64] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= IPIC_SMPRR_A,
+ 		.force	= IPIC_SIFCR_L,
+@@ -218,7 +329,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 0,
+ 	},
+ 	[65] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= IPIC_SMPRR_A,
+ 		.force	= IPIC_SIFCR_L,
+@@ -226,7 +336,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 1,
+ 	},
+ 	[66] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= IPIC_SMPRR_A,
+ 		.force	= IPIC_SIFCR_L,
+@@ -234,7 +343,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 2,
+ 	},
+ 	[67] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= IPIC_SMPRR_A,
+ 		.force	= IPIC_SIFCR_L,
+@@ -242,7 +350,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 3,
+ 	},
+ 	[68] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= IPIC_SMPRR_B,
+ 		.force	= IPIC_SIFCR_L,
+@@ -250,7 +357,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 0,
+ 	},
+ 	[69] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= IPIC_SMPRR_B,
+ 		.force	= IPIC_SIFCR_L,
+@@ -258,7 +364,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 1,
+ 	},
+ 	[70] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= IPIC_SMPRR_B,
+ 		.force	= IPIC_SIFCR_L,
+@@ -266,7 +371,6 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 2,
+ 	},
+ 	[71] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= IPIC_SMPRR_B,
+ 		.force	= IPIC_SIFCR_L,
+@@ -274,96 +378,131 @@ static struct ipic_info ipic_info[] = {
+ 		.prio_mask = 3,
+ 	},
+ 	[72] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 8,
+ 	},
+ 	[73] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 9,
+ 	},
+ 	[74] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 10,
+ 	},
+ 	[75] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 11,
+ 	},
+ 	[76] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 12,
+ 	},
+ 	[77] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 13,
+ 	},
+ 	[78] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 14,
+ 	},
+ 	[79] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 15,
+ 	},
+ 	[80] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 16,
+ 	},
++	[81] = {
++		.mask	= IPIC_SIMSR_L,
++		.prio	= 0,
++		.force	= IPIC_SIFCR_L,
++		.bit	= 17,
++	},
++	[82] = {
++		.mask	= IPIC_SIMSR_L,
++		.prio	= 0,
++		.force	= IPIC_SIFCR_L,
++		.bit	= 18,
++	},
++	[83] = {
++		.mask	= IPIC_SIMSR_L,
++		.prio	= 0,
++		.force	= IPIC_SIFCR_L,
++		.bit	= 19,
++	},
+ 	[84] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 20,
+ 	},
+ 	[85] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 21,
+ 	},
++	[86] = {
++		.mask	= IPIC_SIMSR_L,
++		.prio	= 0,
++		.force	= IPIC_SIFCR_L,
++		.bit	= 22,
++	},
++	[87] = {
++		.mask	= IPIC_SIMSR_L,
++		.prio	= 0,
++		.force	= IPIC_SIFCR_L,
++		.bit	= 23,
++	},
++	[88] = {
++		.mask	= IPIC_SIMSR_L,
++		.prio	= 0,
++		.force	= IPIC_SIFCR_L,
++		.bit	= 24,
++	},
++	[89] = {
++		.mask	= IPIC_SIMSR_L,
++		.prio	= 0,
++		.force	= IPIC_SIFCR_L,
++		.bit	= 25,
++	},
+ 	[90] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 26,
+ 	},
+ 	[91] = {
+-		.pend	= IPIC_SIPNR_L,
+ 		.mask	= IPIC_SIMSR_L,
+ 		.prio	= 0,
+ 		.force	= IPIC_SIFCR_L,
+ 		.bit	= 27,
+ 	},
++	[94] = {
++		.mask	= IPIC_SIMSR_L,
++		.prio	= 0,
++		.force	= IPIC_SIFCR_L,
++		.bit	= 30,
++	},
+ };
+ 
+ static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
+@@ -412,6 +551,10 @@ static void ipic_mask_irq(unsigned int virq)
+ 	temp &= ~(1 << (31 - ipic_info[src].bit));
+ 	ipic_write(ipic->regs, ipic_info[src].mask, temp);
+ 
++	/* mb() can't guarantee that masking is finished.  But it does finish
++	 * for nearly all cases. */
++	mb();
++
+ 	spin_unlock_irqrestore(&ipic_lock, flags);
+ }
+ 
+@@ -424,9 +567,13 @@ static void ipic_ack_irq(unsigned int virq)
+ 
+ 	spin_lock_irqsave(&ipic_lock, flags);
+ 
+-	temp = ipic_read(ipic->regs, ipic_info[src].pend);
++	temp = ipic_read(ipic->regs, ipic_info[src].ack);
+ 	temp |= (1 << (31 - ipic_info[src].bit));
+-	ipic_write(ipic->regs, ipic_info[src].pend, temp);
++	ipic_write(ipic->regs, ipic_info[src].ack, temp);
++
++	/* mb() can't guarantee that ack is finished.  But it does finish
++	 * for nearly all cases. */
++	mb();
+ 
+ 	spin_unlock_irqrestore(&ipic_lock, flags);
+ }
+@@ -444,9 +591,13 @@ static void ipic_mask_irq_and_ack(unsigned int virq)
+ 	temp &= ~(1 << (31 - ipic_info[src].bit));
+ 	ipic_write(ipic->regs, ipic_info[src].mask, temp);
+ 
+-	temp = ipic_read(ipic->regs, ipic_info[src].pend);
++	temp = ipic_read(ipic->regs, ipic_info[src].ack);
+ 	temp |= (1 << (31 - ipic_info[src].bit));
+-	ipic_write(ipic->regs, ipic_info[src].pend, temp);
++	ipic_write(ipic->regs, ipic_info[src].ack, temp);
++
++	/* mb() can't guarantee that ack is finished.  But it does finish
++	 * for nearly all cases. */
++	mb();
+ 
+ 	spin_unlock_irqrestore(&ipic_lock, flags);
+ }
+@@ -468,14 +619,22 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
+ 			flow_type);
+ 		return -EINVAL;
+ 	}
++	/* ipic supports only edge mode on external interrupts */
++	if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) {
++		printk(KERN_ERR "ipic: edge sense not supported on internal "
++				"interrupts\n");
++		return -EINVAL;
++	}
+ 
+ 	desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
+ 	desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
+ 	if (flow_type & IRQ_TYPE_LEVEL_LOW)  {
+ 		desc->status |= IRQ_LEVEL;
+ 		desc->handle_irq = handle_level_irq;
++		desc->chip = &ipic_level_irq_chip;
+ 	} else {
+ 		desc->handle_irq = handle_edge_irq;
++		desc->chip = &ipic_edge_irq_chip;
+ 	}
+ 
+ 	/* only EXT IRQ senses are programmable on ipic
+@@ -500,7 +659,16 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
+ 	return 0;
+ }
+ 
+-static struct irq_chip ipic_irq_chip = {
++/* level interrupts and edge interrupts have different ack operations */
++static struct irq_chip ipic_level_irq_chip = {
++	.typename	= " IPIC  ",
++	.unmask		= ipic_unmask_irq,
++	.mask		= ipic_mask_irq,
++	.mask_ack	= ipic_mask_irq,
++	.set_type	= ipic_set_irq_type,
++};
++
++static struct irq_chip ipic_edge_irq_chip = {
+ 	.typename	= " IPIC  ",
+ 	.unmask		= ipic_unmask_irq,
+ 	.mask		= ipic_mask_irq,
+@@ -519,13 +687,9 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq,
+ 			 irq_hw_number_t hw)
+ {
+ 	struct ipic *ipic = h->host_data;
+-	struct irq_chip *chip;
+-
+-	/* Default chip */
+-	chip = &ipic->hc_irq;
+ 
+ 	set_irq_chip_data(virq, ipic);
+-	set_irq_chip_and_handler(virq, chip, handle_level_irq);
++	set_irq_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
+ 
+ 	/* Set default irq type */
+ 	set_irq_type(virq, IRQ_TYPE_NONE);
+@@ -584,7 +748,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
+ 	ipic->regs = ioremap(res.start, res.end - res.start + 1);
+ 
+ 	ipic->irqhost->host_data = ipic;
+-	ipic->hc_irq = ipic_irq_chip;
+ 
+ 	/* init hw */
+ 	ipic_write(ipic->regs, IPIC_SICNR, 0x0);
+@@ -593,6 +756,10 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
+ 	 * configure SICFR accordingly */
+ 	if (flags & IPIC_SPREADMODE_GRP_A)
+ 		temp |= SICFR_IPSA;
++	if (flags & IPIC_SPREADMODE_GRP_B)
++		temp |= SICFR_IPSB;
++	if (flags & IPIC_SPREADMODE_GRP_C)
++		temp |= SICFR_IPSC;
+ 	if (flags & IPIC_SPREADMODE_GRP_D)
+ 		temp |= SICFR_IPSD;
+ 	if (flags & IPIC_SPREADMODE_MIX_A)
+@@ -600,7 +767,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
+ 	if (flags & IPIC_SPREADMODE_MIX_B)
+ 		temp |= SICFR_MPSB;
+ 
+-	ipic_write(ipic->regs, IPIC_SICNR, temp);
++	ipic_write(ipic->regs, IPIC_SICFR, temp);
+ 
+ 	/* handle MCP route */
+ 	temp = 0;
+@@ -672,10 +839,12 @@ void ipic_set_highest_priority(unsigned int virq)
+ 
+ void ipic_set_default_priority(void)
+ {
+-	ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_SIPRR_A_DEFAULT);
+-	ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_SIPRR_D_DEFAULT);
+-	ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_SMPRR_A_DEFAULT);
+-	ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_SMPRR_B_DEFAULT);
++	ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
++	ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
++	ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT);
++	ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT);
++	ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT);
++	ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT);
+ }
+ 
+ void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
+@@ -725,7 +894,7 @@ unsigned int ipic_get_irq(void)
+ }
+ 
+ static struct sysdev_class ipic_sysclass = {
+-	set_kset_name("ipic"),
++	.name = "ipic",
+ };
+ 
+ static struct sys_device device_ipic = {
+diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h
+index bb309a5..9391c57 100644
+--- a/arch/powerpc/sysdev/ipic.h
++++ b/arch/powerpc/sysdev/ipic.h
+@@ -23,13 +23,12 @@
+ #define IPIC_IRQ_EXT7 23
+ 
+ /* Default Priority Registers */
+-#define IPIC_SIPRR_A_DEFAULT 0x05309770
+-#define IPIC_SIPRR_D_DEFAULT 0x05309770
+-#define IPIC_SMPRR_A_DEFAULT 0x05309770
+-#define IPIC_SMPRR_B_DEFAULT 0x05309770
++#define IPIC_PRIORITY_DEFAULT 0x05309770
+ 
+ /* System Global Interrupt Configuration Register */
+ #define	SICFR_IPSA	0x00010000
++#define	SICFR_IPSB	0x00020000
++#define	SICFR_IPSC	0x00040000
+ #define	SICFR_IPSD	0x00080000
+ #define	SICFR_MPSA	0x00200000
+ #define	SICFR_MPSB	0x00400000
+@@ -45,13 +44,11 @@ struct ipic {
+ 
+ 	/* The remapper for this IPIC */
+ 	struct irq_host		*irqhost;
+-
+-	/* The "linux" controller struct */
+-	struct irq_chip		hc_irq;
+ };
+ 
+ struct ipic_info {
+-	u8	pend;		/* pending register offset from base */
++	u8	ack;		/* pending register offset from base if the irq
++				   supports ack operation */
+ 	u8	mask;		/* mask register offset from base */
+ 	u8	prio;		/* priority register offset from base */
+ 	u8	force;		/* force register offset from base */
+diff --git a/arch/powerpc/sysdev/micropatch.c b/arch/powerpc/sysdev/micropatch.c
+index 712b10a..d8d6028 100644
+--- a/arch/powerpc/sysdev/micropatch.c
++++ b/arch/powerpc/sysdev/micropatch.c
+@@ -16,7 +16,7 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/8xx_immap.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ /*
+  * I2C/SPI relocation patch arrays.
+diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
+index e073e24..7b49633 100644
+--- a/arch/powerpc/sysdev/mmio_nvram.c
++++ b/arch/powerpc/sysdev/mmio_nvram.c
+@@ -99,7 +99,7 @@ int __init mmio_nvram_init(void)
+ 	nvram_addr = r.start;
+ 	mmio_nvram_len = r.end - r.start + 1;
+ 	if ( (!mmio_nvram_len) || (!nvram_addr) ) {
+-		printk(KERN_WARNING "nvram: address or lenght is 0\n");
++		printk(KERN_WARNING "nvram: address or length is 0\n");
+ 		ret = -EIO;
+ 		goto out;
+ 	}
+diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
+index 7aa4ff5..0e74a4b 100644
+--- a/arch/powerpc/sysdev/mpc8xx_pic.c
++++ b/arch/powerpc/sysdev/mpc8xx_pic.c
+@@ -10,7 +10,6 @@
+ #include <asm/irq.h>
+ #include <asm/io.h>
+ #include <asm/8xx_immap.h>
+-#include <asm/mpc8xx.h>
+ 
+ #include "mpc8xx_pic.h"
+ 
+diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
+index e479388..6ffdda2 100644
+--- a/arch/powerpc/sysdev/mpic.c
++++ b/arch/powerpc/sysdev/mpic.c
+@@ -83,6 +83,7 @@ static u32 mpic_infos[][MPIC_IDX_END] = {
+ 		MPIC_CPU_WHOAMI,
+ 		MPIC_CPU_INTACK,
+ 		MPIC_CPU_EOI,
++		MPIC_CPU_MCACK,
+ 
+ 		MPIC_IRQ_BASE,
+ 		MPIC_IRQ_STRIDE,
+@@ -121,6 +122,7 @@ static u32 mpic_infos[][MPIC_IDX_END] = {
+ 		TSI108_CPU_WHOAMI,
+ 		TSI108_CPU_INTACK,
+ 		TSI108_CPU_EOI,
++		TSI108_CPU_MCACK,
+ 
+ 		TSI108_IRQ_BASE,
+ 		TSI108_IRQ_STRIDE,
+@@ -265,7 +267,7 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
+  */
+ 
+ 
+-static void _mpic_map_mmio(struct mpic *mpic, unsigned long phys_addr,
++static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
+ 			   struct mpic_reg_bank *rb, unsigned int offset,
+ 			   unsigned int size)
+ {
+@@ -285,7 +287,7 @@ static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
+ 	BUG_ON(!DCR_MAP_OK(rb->dhost));
+ }
+ 
+-static inline void mpic_map(struct mpic *mpic, unsigned long phys_addr,
++static inline void mpic_map(struct mpic *mpic, phys_addr_t phys_addr,
+ 			    struct mpic_reg_bank *rb, unsigned int offset,
+ 			    unsigned int size)
+ {
+@@ -612,12 +614,11 @@ static inline void mpic_eoi(struct mpic *mpic)
+ }
+ 
+ #ifdef CONFIG_SMP
+-static irqreturn_t mpic_ipi_action(int irq, void *dev_id)
++static irqreturn_t mpic_ipi_action(int irq, void *data)
+ {
+-	struct mpic *mpic;
++	long ipi = (long)data;
+ 
+-	mpic = mpic_find(irq, NULL);
+-	smp_message_recv(mpic_irq_to_hw(irq) - mpic->ipi_vecs[0]);
++	smp_message_recv(ipi);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -842,6 +843,24 @@ int mpic_set_irq_type(unsigned int virq, unsigned int flow_type)
+ 	return 0;
+ }
+ 
++void mpic_set_vector(unsigned int virq, unsigned int vector)
++{
++	struct mpic *mpic = mpic_from_irq(virq);
++	unsigned int src = mpic_irq_to_hw(virq);
++	unsigned int vecpri;
++
++	DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
++	    mpic, virq, src, vector);
++
++	if (src >= mpic->irq_count)
++		return;
++
++	vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
++	vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK);
++	vecpri |= vector;
++	mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
++}
++
+ static struct irq_chip mpic_irq_chip = {
+ 	.mask		= mpic_mask_irq,
+ 	.unmask		= mpic_unmask_irq,
+@@ -1109,6 +1128,11 @@ struct mpic * __init mpic_alloc(struct device_node *node,
+ 			mb();
+ 	}
+ 
++	if (flags & MPIC_ENABLE_MCK)
++		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
++			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
++			   | MPIC_GREG_GCONF_MCK);
++
+ 	/* Read feature register, calculate num CPUs and, for non-ISU
+ 	 * MPICs, num sources as well. On ISU MPICs, sources are counted
+ 	 * as ISUs are added
+@@ -1230,6 +1254,8 @@ void __init mpic_init(struct mpic *mpic)
+ 		mpic_u3msi_init(mpic);
+ 	}
+ 
++	mpic_pasemi_msi_init(mpic);
++
+ 	for (i = 0; i < mpic->num_sources; i++) {
+ 		/* start with vector = source number, and masked */
+ 		u32 vecpri = MPIC_VECPRI_MASK | i |
+@@ -1253,6 +1279,11 @@ void __init mpic_init(struct mpic *mpic)
+ 			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
+ 			   | MPIC_GREG_GCONF_8259_PTHROU_DIS);
+ 
++	if (mpic->flags & MPIC_NO_BIAS)
++		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
++			mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
++			| MPIC_GREG_GCONF_NO_BIAS);
++
+ 	/* Set current processor priority to 0 */
+ 	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
+ 
+@@ -1419,13 +1450,13 @@ void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
+ 		       mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
+ }
+ 
+-unsigned int mpic_get_one_irq(struct mpic *mpic)
++static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
+ {
+ 	u32 src;
+ 
+-	src = mpic_cpu_read(MPIC_INFO(CPU_INTACK)) & MPIC_INFO(VECPRI_VECTOR_MASK);
++	src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK);
+ #ifdef DEBUG_LOW
+-	DBG("%s: get_one_irq(): %d\n", mpic->name, src);
++	DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src);
+ #endif
+ 	if (unlikely(src == mpic->spurious_vec)) {
+ 		if (mpic->flags & MPIC_SPV_EOI)
+@@ -1443,6 +1474,11 @@ unsigned int mpic_get_one_irq(struct mpic *mpic)
+ 	return irq_linear_revmap(mpic->irqhost, src);
+ }
+ 
++unsigned int mpic_get_one_irq(struct mpic *mpic)
++{
++	return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK));
++}
++
+ unsigned int mpic_get_irq(void)
+ {
+ 	struct mpic *mpic = mpic_primary;
+@@ -1452,12 +1488,20 @@ unsigned int mpic_get_irq(void)
+ 	return mpic_get_one_irq(mpic);
+ }
+ 
++unsigned int mpic_get_mcirq(void)
++{
++	struct mpic *mpic = mpic_primary;
++
++	BUG_ON(mpic == NULL);
++
++	return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK));
++}
+ 
+ #ifdef CONFIG_SMP
+ void mpic_request_ipis(void)
+ {
+ 	struct mpic *mpic = mpic_primary;
+-	int i, err;
++	long i, err;
+ 	static char *ipi_names[] = {
+ 		"IPI0 (call function)",
+ 		"IPI1 (reschedule)",
+@@ -1472,14 +1516,14 @@ void mpic_request_ipis(void)
+ 		unsigned int vipi = irq_create_mapping(mpic->irqhost,
+ 						       mpic->ipi_vecs[0] + i);
+ 		if (vipi == NO_IRQ) {
+-			printk(KERN_ERR "Failed to map IPI %d\n", i);
++			printk(KERN_ERR "Failed to map IPI %ld\n", i);
+ 			break;
+ 		}
+ 		err = request_irq(vipi, mpic_ipi_action,
+ 				  IRQF_DISABLED|IRQF_PERCPU,
+-				  ipi_names[i], mpic);
++				  ipi_names[i], (void *)i);
+ 		if (err) {
+-			printk(KERN_ERR "Request of irq %d for IPI %d failed\n",
++			printk(KERN_ERR "Request of irq %d for IPI %ld failed\n",
+ 			       vipi, i);
+ 			break;
+ 		}
+@@ -1584,7 +1628,7 @@ static struct sysdev_class mpic_sysclass = {
+ 	.resume = mpic_resume,
+ 	.suspend = mpic_suspend,
+ #endif
+-	set_kset_name("mpic"),
++	.name = "mpic",
+ };
+ 
+ static int mpic_init_sys(void)
+diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
+index 1cb6bd8..fbf8a26 100644
+--- a/arch/powerpc/sysdev/mpic.h
++++ b/arch/powerpc/sysdev/mpic.h
+@@ -17,6 +17,7 @@ extern int mpic_msi_init_allocator(struct mpic *mpic);
+ extern irq_hw_number_t mpic_msi_alloc_hwirqs(struct mpic *mpic, int num);
+ extern void mpic_msi_free_hwirqs(struct mpic *mpic, int offset, int num);
+ extern int mpic_u3msi_init(struct mpic *mpic);
++extern int mpic_pasemi_msi_init(struct mpic *mpic);
+ #else
+ static inline void mpic_msi_reserve_hwirq(struct mpic *mpic,
+ 					  irq_hw_number_t hwirq)
+@@ -28,12 +29,15 @@ static inline int mpic_u3msi_init(struct mpic *mpic)
+ {
+ 	return -1;
+ }
++
++static inline int mpic_pasemi_msi_init(struct mpic *mpic)
++{
++	return -1;
++}
+ #endif
+ 
+ extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
+-extern void mpic_end_irq(unsigned int irq);
+-extern void mpic_mask_irq(unsigned int irq);
+-extern void mpic_unmask_irq(unsigned int irq);
++extern void mpic_set_vector(unsigned int virq, unsigned int vector);
+ extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask);
+ 
+ #endif /* _POWERPC_SYSDEV_MPIC_H */
+diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+new file mode 100644
+index 0000000..d6bfda3
+--- /dev/null
++++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+@@ -0,0 +1,172 @@
++/*
++ * Copyright 2007, Olof Johansson, PA Semi
++ *
++ * Based on arch/powerpc/sysdev/mpic_u3msi.c:
++ *
++ * Copyright 2006, Segher Boessenkool, IBM Corporation.
++ * Copyright 2006-2007, Michael Ellerman, IBM Corporation.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2 of the
++ * License.
++ *
++ */
++
++#undef DEBUG
++
++#include <linux/irq.h>
++#include <linux/bootmem.h>
++#include <linux/msi.h>
++#include <asm/mpic.h>
++#include <asm/prom.h>
++#include <asm/hw_irq.h>
++#include <asm/ppc-pci.h>
++
++#include "mpic.h"
++
++/* Allocate 16 interrupts per device, to give an alignment of 16,
++ * since that's the size of the grouping w.r.t. affinity. If someone
++ * needs more than 32 MSI's down the road we'll have to rethink this,
++ * but it should be OK for now.
++ */
++#define ALLOC_CHUNK 16
++
++#define PASEMI_MSI_ADDR 0xfc080000
++
++/* A bit ugly, can we get this from the pci_dev somehow? */
++static struct mpic *msi_mpic;
++
++
++static void mpic_pasemi_msi_mask_irq(unsigned int irq)
++{
++	pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq);
++	mask_msi_irq(irq);
++	mpic_mask_irq(irq);
++}
++
++static void mpic_pasemi_msi_unmask_irq(unsigned int irq)
++{
++	pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq);
++	mpic_unmask_irq(irq);
++	unmask_msi_irq(irq);
++}
++
++static struct irq_chip mpic_pasemi_msi_chip = {
++	.shutdown	= mpic_pasemi_msi_mask_irq,
++	.mask		= mpic_pasemi_msi_mask_irq,
++	.unmask		= mpic_pasemi_msi_unmask_irq,
++	.eoi		= mpic_end_irq,
++	.set_type	= mpic_set_irq_type,
++	.set_affinity	= mpic_set_affinity,
++	.typename	= "PASEMI-MSI ",
++};
++
++static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
++{
++	if (type == PCI_CAP_ID_MSIX)
++		pr_debug("pasemi_msi: MSI-X untested, trying anyway\n");
++
++	return 0;
++}
++
++static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
++{
++	struct msi_desc *entry;
++
++	pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
++
++	list_for_each_entry(entry, &pdev->msi_list, list) {
++		if (entry->irq == NO_IRQ)
++			continue;
++
++		set_irq_msi(entry->irq, NULL);
++		mpic_msi_free_hwirqs(msi_mpic, virq_to_hw(entry->irq),
++				     ALLOC_CHUNK);
++		irq_dispose_mapping(entry->irq);
++	}
++
++	return;
++}
++
++static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
++{
++	irq_hw_number_t hwirq;
++	unsigned int virq;
++	struct msi_desc *entry;
++	struct msi_msg msg;
++	u64 addr;
++
++	pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n",
++		 pdev, nvec, type);
++
++	msg.address_hi = 0;
++	msg.address_lo = PASEMI_MSI_ADDR;
++
++	list_for_each_entry(entry, &pdev->msi_list, list) {
++		/* Allocate 16 interrupts for now, since that's the grouping for
++		 * affinity. This can be changed later if it turns out 32 is too
++		 * few MSIs for someone, but restrictions will apply to how the
++		 * sources can be changed independently.
++		 */
++		hwirq = mpic_msi_alloc_hwirqs(msi_mpic, ALLOC_CHUNK);
++		if (hwirq < 0) {
++			pr_debug("pasemi_msi: failed allocating hwirq\n");
++			return hwirq;
++		}
++
++		virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
++		if (virq == NO_IRQ) {
++			pr_debug("pasemi_msi: failed mapping hwirq 0x%lx\n", hwirq);
++			mpic_msi_free_hwirqs(msi_mpic, hwirq, ALLOC_CHUNK);
++			return -ENOSPC;
++		}
++
++		/* Vector on MSI is really an offset, the hardware adds
++		 * it to the value written at the magic address. So set
++		 * it to 0 to remain sane.
++		 */
++		mpic_set_vector(virq, 0);
++
++		set_irq_msi(virq, entry);
++		set_irq_chip(virq, &mpic_pasemi_msi_chip);
++		set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
++
++		pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%lx) addr 0x%lx\n",
++			  virq, hwirq, addr);
++
++		/* Likewise, the device writes [0...511] into the target
++		 * register to generate MSI [512...1023]
++		 */
++		msg.data = hwirq-0x200;
++		write_msi_msg(virq, &msg);
++	}
++
++	return 0;
++}
++
++int mpic_pasemi_msi_init(struct mpic *mpic)
++{
++	int rc;
++
++	if (!mpic->irqhost->of_node ||
++	    !of_device_is_compatible(mpic->irqhost->of_node,
++				     "pasemi,pwrficient-openpic"))
++		return -ENODEV;
++
++	rc = mpic_msi_init_allocator(mpic);
++	if (rc) {
++		pr_debug("pasemi_msi: Error allocating bitmap!\n");
++		return rc;
++	}
++
++	pr_debug("pasemi_msi: Registering PA Semi MPIC MSI callbacks\n");
++
++	msi_mpic = mpic;
++	WARN_ON(ppc_md.setup_msi_irqs);
++	ppc_md.setup_msi_irqs = pasemi_msi_setup_msi_irqs;
++	ppc_md.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs;
++	ppc_md.msi_check_device = pasemi_msi_check_device;
++
++	return 0;
++}
+diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
+index 548a320..efda002 100644
+--- a/arch/powerpc/sysdev/mv64x60_dev.c
++++ b/arch/powerpc/sysdev/mv64x60_dev.c
+@@ -241,7 +241,7 @@ static int __init mv64x60_eth_device_setup(struct device_node *np, int id)
+ 
+ 	/* only register the shared platform device the first time through */
+ 	if (id == 0 && (err = eth_register_shared_pdev(np)))
+-		return err;;
++		return err;
+ 
+ 	memset(r, 0, sizeof(r));
+ 	of_irq_to_resource(np, 0, &r[0]);
+@@ -361,12 +361,6 @@ static int __init mv64x60_i2c_device_setup(struct device_node *np, int id)
+ 	else
+ 		pdata.timeout = 1000;	/* 1 second */
+ 
+-	prop = of_get_property(np, "retries", NULL);
+-	if (prop)
+-		pdata.retries = *prop;
+-	else
+-		pdata.retries = 1;
+-
+ 	pdev = platform_device_alloc(MV64XXX_I2C_CTLR_NAME, id);
+ 	if (!pdev)
+ 		return -ENOMEM;
+@@ -451,22 +445,19 @@ static int __init mv64x60_device_setup(void)
+ 	int id;
+ 	int err;
+ 
+-	for (id = 0;
+-	     (np = of_find_compatible_node(np, "serial", "marvell,mpsc")); id++)
+-		if ((err = mv64x60_mpsc_device_setup(np, id)))
++	id = 0;
++	for_each_compatible_node(np, "serial", "marvell,mpsc")
++		if ((err = mv64x60_mpsc_device_setup(np, id++)))
+ 			goto error;
+ 
+-	for (id = 0;
+-	     (np = of_find_compatible_node(np, "network",
+-					   "marvell,mv64x60-eth"));
+-	     id++)
+-		if ((err = mv64x60_eth_device_setup(np, id)))
++	id = 0;
++	for_each_compatible_node(np, "network", "marvell,mv64x60-eth")
++		if ((err = mv64x60_eth_device_setup(np, id++)))
+ 			goto error;
+ 
+-	for (id = 0;
+-	     (np = of_find_compatible_node(np, "i2c", "marvell,mv64x60-i2c"));
+-	     id++)
+-		if ((err = mv64x60_i2c_device_setup(np, id)))
++	id = 0;
++	for_each_compatible_node(np, "i2c", "marvell,mv64x60-i2c")
++		if ((err = mv64x60_i2c_device_setup(np, id++)))
+ 			goto error;
+ 
+ 	/* support up to one watchdog timer */
+@@ -477,7 +468,6 @@ static int __init mv64x60_device_setup(void)
+ 		of_node_put(np);
+ 	}
+ 
+-
+ 	return 0;
+ 
+ error:
+diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c
+index 6933f9c..d21ab8f 100644
+--- a/arch/powerpc/sysdev/mv64x60_pci.c
++++ b/arch/powerpc/sysdev/mv64x60_pci.c
+@@ -164,8 +164,8 @@ static int __init mv64x60_add_bridge(struct device_node *dev)
+ 
+ void __init mv64x60_pci_init(void)
+ {
+-	struct device_node *np = NULL;
++	struct device_node *np;
+ 
+-	while ((np = of_find_compatible_node(np, "pci", "marvell,mv64x60-pci")))
++	for_each_compatible_node(np, "pci", "marvell,mv64x60-pci")
+ 		mv64x60_add_bridge(np);
+ }
+diff --git a/arch/powerpc/sysdev/mv64x60_udbg.c b/arch/powerpc/sysdev/mv64x60_udbg.c
+index 367e7b1..35c77c7 100644
+--- a/arch/powerpc/sysdev/mv64x60_udbg.c
++++ b/arch/powerpc/sysdev/mv64x60_udbg.c
+@@ -85,10 +85,10 @@ static void mv64x60_udbg_init(void)
+ 	if (!stdout)
+ 		return;
+ 
+-	for (np = NULL;
+-	     (np = of_find_compatible_node(np, "serial", "marvell,mpsc")); )
++	for_each_compatible_node(np, "serial", "marvell,mpsc") {
+ 		if (np == stdout)
+ 			break;
++	}
+ 
+ 	of_node_put(stdout);
+ 	if (!np)
+diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c
+new file mode 100644
+index 0000000..3d54450
+--- /dev/null
++++ b/arch/powerpc/sysdev/of_rtc.c
+@@ -0,0 +1,59 @@
++/*
++ * Instantiate mmio-mapped RTC chips based on device tree information
++ *
++ * Copyright 2007 David Gibson <dwg at au1.ibm.com>, IBM Corporation.
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++#include <linux/kernel.h>
++#include <linux/of.h>
++#include <linux/init.h>
++#include <linux/of_platform.h>
++
++static __initdata struct {
++	const char *compatible;
++	char *plat_name;
++} of_rtc_table[] = {
++	{ "ds1743-nvram", "rtc-ds1742" },
++};
++
++void __init of_instantiate_rtc(void)
++{
++	struct device_node *node;
++	int err;
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(of_rtc_table); i++) {
++		char *plat_name = of_rtc_table[i].plat_name;
++
++		for_each_compatible_node(node, NULL,
++					 of_rtc_table[i].compatible) {
++			struct resource *res;
++
++			res = kmalloc(sizeof(*res), GFP_KERNEL);
++			if (!res) {
++				printk(KERN_ERR "OF RTC: Out of memory "
++				       "allocating resource structure for %s\n",
++				       node->full_name);
++				continue;
++			}
++
++			err = of_address_to_resource(node, 0, res);
++			if (err) {
++				printk(KERN_ERR "OF RTC: Error "
++				       "translating resources for %s\n",
++				       node->full_name);
++				continue;
++			}
++
++			printk(KERN_INFO "OF_RTC: %s is a %s @ 0x%llx-0x%llx\n",
++			       node->full_name, plat_name,
++			       (unsigned long long)res->start,
++			       (unsigned long long)res->end);
++			platform_device_register_simple(plat_name, -1, res, 1);
++		}
++	}
++}
+diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
+index 20edd1e..c858749 100644
+--- a/arch/powerpc/sysdev/pmi.c
++++ b/arch/powerpc/sysdev/pmi.c
+@@ -28,9 +28,9 @@
+ #include <linux/completion.h>
+ #include <linux/spinlock.h>
+ #include <linux/workqueue.h>
++#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ 
+-#include <asm/of_device.h>
+-#include <asm/of_platform.h>
+ #include <asm/io.h>
+ #include <asm/pmi.h>
+ #include <asm/prom.h>
+diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
+new file mode 100644
+index 0000000..5abfcd1
+--- /dev/null
++++ b/arch/powerpc/sysdev/ppc4xx_pci.c
+@@ -0,0 +1,1528 @@
++/*
++ * PCI / PCI-X / PCI-Express support for 4xx parts
++ *
++ * Copyright 2007 Ben. Herrenschmidt <benh at kernel.crashing.org>, IBM Corp.
++ *
++ * Most PCI Express code is coming from Stefan Roese implementation for
++ * arch/ppc in the Denx tree, slightly reworked by me.
++ *
++ * Copyright 2007 DENX Software Engineering, Stefan Roese <sr at denx.de>
++ *
++ * Some of that comes itself from a previous implementation for 440SPE only
++ * by Roland Dreier:
++ *
++ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
++ * Roland Dreier <rolandd at cisco.com>
++ *
++ */
++
++#undef DEBUG
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/of.h>
++#include <linux/bootmem.h>
++#include <linux/delay.h>
++
++#include <asm/io.h>
++#include <asm/pci-bridge.h>
++#include <asm/machdep.h>
++#include <asm/dcr.h>
++#include <asm/dcr-regs.h>
++
++#include "ppc4xx_pci.h"
++
++static int dma_offset_set;
++
++/* Move that to a useable header */
++extern unsigned long total_memory;
++
++#define U64_TO_U32_LOW(val)	((u32)((val) & 0x00000000ffffffffULL))
++#define U64_TO_U32_HIGH(val)	((u32)((val) >> 32))
++
++#ifdef CONFIG_RESOURCES_64BIT
++#define RES_TO_U32_LOW(val)	U64_TO_U32_LOW(val)
++#define RES_TO_U32_HIGH(val)	U64_TO_U32_HIGH(val)
++#else
++#define RES_TO_U32_LOW(val)	(val)
++#define RES_TO_U32_HIGH(val)	(0)
++#endif
++
++static inline int ppc440spe_revA(void)
++{
++	/* Catch both 440SPe variants, with and without RAID6 support */
++        if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
++                return 1;
++        else
++                return 0;
++}
++
++static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
++{
++	struct pci_controller *hose;
++	int i;
++
++	if (dev->devfn != 0 || dev->bus->self != NULL)
++		return;
++
++	hose = pci_bus_to_host(dev->bus);
++	if (hose == NULL)
++		return;
++
++	if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
++	    !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
++	    !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
++		return;
++
++	/* Hide the PCI host BARs from the kernel as their content doesn't
++	 * fit well in the resource management
++	 */
++	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++		dev->resource[i].start = dev->resource[i].end = 0;
++		dev->resource[i].flags = 0;
++	}
++
++	printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
++	       pci_name(dev));
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
++
++static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
++					  void __iomem *reg,
++					  struct resource *res)
++{
++	u64 size;
++	const u32 *ranges;
++	int rlen;
++	int pna = of_n_addr_cells(hose->dn);
++	int np = pna + 5;
++
++	/* Default */
++	res->start = 0;
++	res->end = size = 0x80000000;
++	res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
++
++	/* Get dma-ranges property */
++	ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
++	if (ranges == NULL)
++		goto out;
++
++	/* Walk it */
++	while ((rlen -= np * 4) >= 0) {
++		u32 pci_space = ranges[0];
++		u64 pci_addr = of_read_number(ranges + 1, 2);
++		u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
++		size = of_read_number(ranges + pna + 3, 2);
++		ranges += np;
++		if (cpu_addr == OF_BAD_ADDR || size == 0)
++			continue;
++
++		/* We only care about memory */
++		if ((pci_space & 0x03000000) != 0x02000000)
++			continue;
++
++		/* We currently only support memory at 0, and pci_addr
++		 * within 32 bits space
++		 */
++		if (cpu_addr != 0 || pci_addr > 0xffffffff) {
++			printk(KERN_WARNING "%s: Ignored unsupported dma range"
++			       " 0x%016llx...0x%016llx -> 0x%016llx\n",
++			       hose->dn->full_name,
++			       pci_addr, pci_addr + size - 1, cpu_addr);
++			continue;
++		}
++
++		/* Check if not prefetchable */
++		if (!(pci_space & 0x40000000))
++			res->flags &= ~IORESOURCE_PREFETCH;
++
++
++		/* Use that */
++		res->start = pci_addr;
++#ifndef CONFIG_RESOURCES_64BIT
++		/* Beware of 32 bits resources */
++		if ((pci_addr + size) > 0x100000000ull)
++			res->end = 0xffffffff;
++		else
++#endif
++			res->end = res->start + size - 1;
++		break;
++	}
++
++	/* We only support one global DMA offset */
++	if (dma_offset_set && pci_dram_offset != res->start) {
++		printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
++		       hose->dn->full_name);
++		return -ENXIO;
++	}
++
++	/* Check that we can fit all of memory as we don't support
++	 * DMA bounce buffers
++	 */
++	if (size < total_memory) {
++		printk(KERN_ERR "%s: dma-ranges too small "
++		       "(size=%llx total_memory=%lx)\n",
++		       hose->dn->full_name, size, total_memory);
++		return -ENXIO;
++	}
++
++	/* Check we are a power of 2 size and that base is a multiple of size*/
++	if (!is_power_of_2(size) ||
++	    (res->start & (size - 1)) != 0) {
++		printk(KERN_ERR "%s: dma-ranges unaligned\n",
++		       hose->dn->full_name);
++		return -ENXIO;
++	}
++
++	/* Check that we are fully contained within 32 bits space */
++	if (res->end > 0xffffffff) {
++		printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
++		       hose->dn->full_name);
++		return -ENXIO;
++	}
++ out:
++	dma_offset_set = 1;
++	pci_dram_offset = res->start;
++
++	printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
++	       pci_dram_offset);
++	return 0;
++}
++
++/*
++ * 4xx PCI 2.x part
++ */
++
++static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
++					     void __iomem *reg)
++{
++	u32 la, ma, pcila, pciha;
++	int i, j;
++
++	/* Setup outbound memory windows */
++	for (i = j = 0; i < 3; i++) {
++		struct resource *res = &hose->mem_resources[i];
++
++		/* we only care about memory windows */
++		if (!(res->flags & IORESOURCE_MEM))
++			continue;
++		if (j > 2) {
++			printk(KERN_WARNING "%s: Too many ranges\n",
++			       hose->dn->full_name);
++			break;
++		}
++
++		/* Calculate register values */
++		la = res->start;
++		pciha = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
++		pcila = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
++
++		ma = res->end + 1 - res->start;
++		if (!is_power_of_2(ma) || ma < 0x1000 || ma > 0xffffffffu) {
++			printk(KERN_WARNING "%s: Resource out of range\n",
++			       hose->dn->full_name);
++			continue;
++		}
++		ma = (0xffffffffu << ilog2(ma)) | 0x1;
++		if (res->flags & IORESOURCE_PREFETCH)
++			ma |= 0x2;
++
++		/* Program register values */
++		writel(la, reg + PCIL0_PMM0LA + (0x10 * j));
++		writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * j));
++		writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * j));
++		writel(ma, reg + PCIL0_PMM0MA + (0x10 * j));
++		j++;
++	}
++}
++
++static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
++					     void __iomem *reg,
++					     const struct resource *res)
++{
++	resource_size_t size = res->end - res->start + 1;
++	u32 sa;
++
++	/* Calculate window size */
++	sa = (0xffffffffu << ilog2(size)) | 1;
++	sa |= 0x1;
++
++	/* RAM is always at 0 local for now */
++	writel(0, reg + PCIL0_PTM1LA);
++	writel(sa, reg + PCIL0_PTM1MS);
++
++	/* Map on PCI side */
++	early_write_config_dword(hose, hose->first_busno, 0,
++				 PCI_BASE_ADDRESS_1, res->start);
++	early_write_config_dword(hose, hose->first_busno, 0,
++				 PCI_BASE_ADDRESS_2, 0x00000000);
++	early_write_config_word(hose, hose->first_busno, 0,
++				PCI_COMMAND, 0x0006);
++}
++
++static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
++{
++	/* NYI */
++	struct resource rsrc_cfg;
++	struct resource rsrc_reg;
++	struct resource dma_window;
++	struct pci_controller *hose = NULL;
++	void __iomem *reg = NULL;
++	const int *bus_range;
++	int primary = 0;
++
++	/* Fetch config space registers address */
++	if (of_address_to_resource(np, 0, &rsrc_cfg)) {
++		printk(KERN_ERR "%s:Can't get PCI config register base !",
++		       np->full_name);
++		return;
++	}
++	/* Fetch host bridge internal registers address */
++	if (of_address_to_resource(np, 3, &rsrc_reg)) {
++		printk(KERN_ERR "%s: Can't get PCI internal register base !",
++		       np->full_name);
++		return;
++	}
++
++	/* Check if primary bridge */
++	if (of_get_property(np, "primary", NULL))
++		primary = 1;
++
++	/* Get bus range if any */
++	bus_range = of_get_property(np, "bus-range", NULL);
++
++	/* Map registers */
++	reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start);
++	if (reg == NULL) {
++		printk(KERN_ERR "%s: Can't map registers !", np->full_name);
++		goto fail;
++	}
++
++	/* Allocate the host controller data structure */
++	hose = pcibios_alloc_controller(np);
++	if (!hose)
++		goto fail;
++
++	hose->first_busno = bus_range ? bus_range[0] : 0x0;
++	hose->last_busno = bus_range ? bus_range[1] : 0xff;
++
++	/* Setup config space */
++	setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
++
++	/* Disable all windows */
++	writel(0, reg + PCIL0_PMM0MA);
++	writel(0, reg + PCIL0_PMM1MA);
++	writel(0, reg + PCIL0_PMM2MA);
++	writel(0, reg + PCIL0_PTM1MS);
++	writel(0, reg + PCIL0_PTM2MS);
++
++	/* Parse outbound mapping resources */
++	pci_process_bridge_OF_ranges(hose, np, primary);
++
++	/* Parse inbound mapping resources */
++	if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
++		goto fail;
++
++	/* Configure outbound ranges POMs */
++	ppc4xx_configure_pci_PMMs(hose, reg);
++
++	/* Configure inbound ranges PIMs */
++	ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
++
++	/* We don't need the registers anymore */
++	iounmap(reg);
++	return;
++
++ fail:
++	if (hose)
++		pcibios_free_controller(hose);
++	if (reg)
++		iounmap(reg);
++}
++
++/*
++ * 4xx PCI-X part
++ */
++
++static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
++					      void __iomem *reg)
++{
++	u32 lah, lal, pciah, pcial, sa;
++	int i, j;
++
++	/* Setup outbound memory windows */
++	for (i = j = 0; i < 3; i++) {
++		struct resource *res = &hose->mem_resources[i];
++
++		/* we only care about memory windows */
++		if (!(res->flags & IORESOURCE_MEM))
++			continue;
++		if (j > 1) {
++			printk(KERN_WARNING "%s: Too many ranges\n",
++			       hose->dn->full_name);
++			break;
++		}
++
++		/* Calculate register values */
++		lah = RES_TO_U32_HIGH(res->start);
++		lal = RES_TO_U32_LOW(res->start);
++		pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
++		pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
++		sa = res->end + 1 - res->start;
++		if (!is_power_of_2(sa) || sa < 0x100000 ||
++		    sa > 0xffffffffu) {
++			printk(KERN_WARNING "%s: Resource out of range\n",
++			       hose->dn->full_name);
++			continue;
++		}
++		sa = (0xffffffffu << ilog2(sa)) | 0x1;
++
++		/* Program register values */
++		if (j == 0) {
++			writel(lah, reg + PCIX0_POM0LAH);
++			writel(lal, reg + PCIX0_POM0LAL);
++			writel(pciah, reg + PCIX0_POM0PCIAH);
++			writel(pcial, reg + PCIX0_POM0PCIAL);
++			writel(sa, reg + PCIX0_POM0SA);
++		} else {
++			writel(lah, reg + PCIX0_POM1LAH);
++			writel(lal, reg + PCIX0_POM1LAL);
++			writel(pciah, reg + PCIX0_POM1PCIAH);
++			writel(pcial, reg + PCIX0_POM1PCIAL);
++			writel(sa, reg + PCIX0_POM1SA);
++		}
++		j++;
++	}
++}
++
++static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
++					      void __iomem *reg,
++					      const struct resource *res,
++					      int big_pim,
++					      int enable_msi_hole)
++{
++	resource_size_t size = res->end - res->start + 1;
++	u32 sa;
++
++	/* RAM is always at 0 */
++	writel(0x00000000, reg + PCIX0_PIM0LAH);
++	writel(0x00000000, reg + PCIX0_PIM0LAL);
++
++	/* Calculate window size */
++	sa = (0xffffffffu << ilog2(size)) | 1;
++	sa |= 0x1;
++	if (res->flags & IORESOURCE_PREFETCH)
++		sa |= 0x2;
++	if (enable_msi_hole)
++		sa |= 0x4;
++	writel(sa, reg + PCIX0_PIM0SA);
++	if (big_pim)
++		writel(0xffffffff, reg + PCIX0_PIM0SAH);
++
++	/* Map on PCI side */
++	writel(0x00000000, reg + PCIX0_BAR0H);
++	writel(res->start, reg + PCIX0_BAR0L);
++	writew(0x0006, reg + PCIX0_COMMAND);
++}
++
++static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
++{
++	struct resource rsrc_cfg;
++	struct resource rsrc_reg;
++	struct resource dma_window;
++	struct pci_controller *hose = NULL;
++	void __iomem *reg = NULL;
++	const int *bus_range;
++	int big_pim = 0, msi = 0, primary = 0;
++
++	/* Fetch config space registers address */
++	if (of_address_to_resource(np, 0, &rsrc_cfg)) {
++		printk(KERN_ERR "%s:Can't get PCI-X config register base !",
++		       np->full_name);
++		return;
++	}
++	/* Fetch host bridge internal registers address */
++	if (of_address_to_resource(np, 3, &rsrc_reg)) {
++		printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
++		       np->full_name);
++		return;
++	}
++
++	/* Check if it supports large PIMs (440GX) */
++	if (of_get_property(np, "large-inbound-windows", NULL))
++		big_pim = 1;
++
++	/* Check if we should enable MSIs inbound hole */
++	if (of_get_property(np, "enable-msi-hole", NULL))
++		msi = 1;
++
++	/* Check if primary bridge */
++	if (of_get_property(np, "primary", NULL))
++		primary = 1;
++
++	/* Get bus range if any */
++	bus_range = of_get_property(np, "bus-range", NULL);
++
++	/* Map registers */
++	reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start);
++	if (reg == NULL) {
++		printk(KERN_ERR "%s: Can't map registers !", np->full_name);
++		goto fail;
++	}
++
++	/* Allocate the host controller data structure */
++	hose = pcibios_alloc_controller(np);
++	if (!hose)
++		goto fail;
++
++	hose->first_busno = bus_range ? bus_range[0] : 0x0;
++	hose->last_busno = bus_range ? bus_range[1] : 0xff;
++
++	/* Setup config space */
++	setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
++
++	/* Disable all windows */
++	writel(0, reg + PCIX0_POM0SA);
++	writel(0, reg + PCIX0_POM1SA);
++	writel(0, reg + PCIX0_POM2SA);
++	writel(0, reg + PCIX0_PIM0SA);
++	writel(0, reg + PCIX0_PIM1SA);
++	writel(0, reg + PCIX0_PIM2SA);
++	if (big_pim) {
++		writel(0, reg + PCIX0_PIM0SAH);
++		writel(0, reg + PCIX0_PIM2SAH);
++	}
++
++	/* Parse outbound mapping resources */
++	pci_process_bridge_OF_ranges(hose, np, primary);
++
++	/* Parse inbound mapping resources */
++	if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
++		goto fail;
++
++	/* Configure outbound ranges POMs */
++	ppc4xx_configure_pcix_POMs(hose, reg);
++
++	/* Configure inbound ranges PIMs */
++	ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
++
++	/* We don't need the registers anymore */
++	iounmap(reg);
++	return;
++
++ fail:
++	if (hose)
++		pcibios_free_controller(hose);
++	if (reg)
++		iounmap(reg);
++}
++
++#ifdef CONFIG_PPC4xx_PCI_EXPRESS
++
++/*
++ * 4xx PCI-Express part
++ *
++ * We support 3 parts currently based on the compatible property:
++ *
++ * ibm,plb-pciex-440spe
++ * ibm,plb-pciex-405ex
++ *
++ * Anything else will be rejected for now as they are all subtly
++ * different unfortunately.
++ *
++ */
++
++#define MAX_PCIE_BUS_MAPPED	0x40
++
++struct ppc4xx_pciex_port
++{
++	struct pci_controller	*hose;
++	struct device_node	*node;
++	unsigned int		index;
++	int			endpoint;
++	int			link;
++	int			has_ibpre;
++	unsigned int		sdr_base;
++	dcr_host_t		dcrs;
++	struct resource		cfg_space;
++	struct resource		utl_regs;
++	void __iomem		*utl_base;
++};
++
++static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
++static unsigned int ppc4xx_pciex_port_count;
++
++struct ppc4xx_pciex_hwops
++{
++	int (*core_init)(struct device_node *np);
++	int (*port_init_hw)(struct ppc4xx_pciex_port *port);
++	int (*setup_utl)(struct ppc4xx_pciex_port *port);
++};
++
++static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
++
++#ifdef CONFIG_44x
++
++/* Check various reset bits of the 440SPe PCIe core */
++static int __init ppc440spe_pciex_check_reset(struct device_node *np)
++{
++	u32 valPE0, valPE1, valPE2;
++	int err = 0;
++
++	/* SDR0_PEGPLLLCT1 reset */
++	if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
++		/*
++		 * the PCIe core was probably already initialised
++		 * by firmware - let's re-reset RCSSET regs
++		 *
++		 * -- Shouldn't we also re-reset the whole thing ? -- BenH
++		 */
++		pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
++		mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
++		mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
++		mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
++	}
++
++	valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
++	valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
++	valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
++
++	/* SDR0_PExRCSSET rstgu */
++	if (!(valPE0 & 0x01000000) ||
++	    !(valPE1 & 0x01000000) ||
++	    !(valPE2 & 0x01000000)) {
++		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
++		err = -1;
++	}
++
++	/* SDR0_PExRCSSET rstdl */
++	if (!(valPE0 & 0x00010000) ||
++	    !(valPE1 & 0x00010000) ||
++	    !(valPE2 & 0x00010000)) {
++		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
++		err = -1;
++	}
++
++	/* SDR0_PExRCSSET rstpyn */
++	if ((valPE0 & 0x00001000) ||
++	    (valPE1 & 0x00001000) ||
++	    (valPE2 & 0x00001000)) {
++		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
++		err = -1;
++	}
++
++	/* SDR0_PExRCSSET hldplb */
++	if ((valPE0 & 0x10000000) ||
++	    (valPE1 & 0x10000000) ||
++	    (valPE2 & 0x10000000)) {
++		printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
++		err = -1;
++	}
++
++	/* SDR0_PExRCSSET rdy */
++	if ((valPE0 & 0x00100000) ||
++	    (valPE1 & 0x00100000) ||
++	    (valPE2 & 0x00100000)) {
++		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
++		err = -1;
++	}
++
++	/* SDR0_PExRCSSET shutdown */
++	if ((valPE0 & 0x00000100) ||
++	    (valPE1 & 0x00000100) ||
++	    (valPE2 & 0x00000100)) {
++		printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
++		err = -1;
++	}
++
++	return err;
++}
++
++/* Global PCIe core initializations for 440SPe core */
++static int __init ppc440spe_pciex_core_init(struct device_node *np)
++{
++	int time_out = 20;
++
++	/* Set PLL clock receiver to LVPECL */
++	mtdcri(SDR0, PESDR0_PLLLCT1, mfdcri(SDR0, PESDR0_PLLLCT1) | 1 << 28);
++
++	/* Shouldn't we do all the calibration stuff etc... here ? */
++	if (ppc440spe_pciex_check_reset(np))
++		return -ENXIO;
++
++	if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
++		printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
++		       "failed (0x%08x)\n",
++		       mfdcri(SDR0, PESDR0_PLLLCT2));
++		return -1;
++	}
++
++	/* De-assert reset of PCIe PLL, wait for lock */
++	mtdcri(SDR0, PESDR0_PLLLCT1,
++	       mfdcri(SDR0, PESDR0_PLLLCT1) & ~(1 << 24));
++	udelay(3);
++
++	while (time_out) {
++		if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
++			time_out--;
++			udelay(1);
++		} else
++			break;
++	}
++	if (!time_out) {
++		printk(KERN_INFO "PCIE: VCO output not locked\n");
++		return -1;
++	}
++
++	pr_debug("PCIE initialization OK\n");
++
++	return 3;
++}
++
++static int ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
++{
++	u32 val = 1 << 24;
++
++	if (port->endpoint)
++		val = PTYPE_LEGACY_ENDPOINT << 20;
++	else
++		val = PTYPE_ROOT_PORT << 20;
++
++	if (port->index == 0)
++		val |= LNKW_X8 << 12;
++	else
++		val |= LNKW_X4 << 12;
++
++	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
++	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
++	if (ppc440spe_revA())
++		mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
++	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
++	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
++	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
++	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
++	if (port->index == 0) {
++		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
++		       0x35000000);
++		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
++		       0x35000000);
++		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
++		       0x35000000);
++		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
++		       0x35000000);
++	}
++	val = mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET);
++	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
++	       (val & ~(1 << 24 | 1 << 16)) | 1 << 12);
++
++	return 0;
++}
++
++static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
++{
++	return ppc440spe_pciex_init_port_hw(port);
++}
++
++static int ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
++{
++	int rc = ppc440spe_pciex_init_port_hw(port);
++
++	port->has_ibpre = 1;
++
++	return rc;
++}
++
++static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
++{
++	/* XXX Check what that value means... I hate magic */
++	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
++
++	/*
++	 * Set buffer allocations and then assert VRB and TXE.
++	 */
++	out_be32(port->utl_base + PEUTL_OUTTR,   0x08000000);
++	out_be32(port->utl_base + PEUTL_INTR,    0x02000000);
++	out_be32(port->utl_base + PEUTL_OPDBSZ,  0x10000000);
++	out_be32(port->utl_base + PEUTL_PBBSZ,   0x53000000);
++	out_be32(port->utl_base + PEUTL_IPHBSZ,  0x08000000);
++	out_be32(port->utl_base + PEUTL_IPDBSZ,  0x10000000);
++	out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
++	out_be32(port->utl_base + PEUTL_PCTL,    0x80800066);
++
++	return 0;
++}
++
++static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
++{
++	/* Report CRS to the operating system */
++	out_be32(port->utl_base + PEUTL_PBCTL,    0x08000000);
++
++	return 0;
++}
++
++static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
++{
++	.core_init	= ppc440spe_pciex_core_init,
++	.port_init_hw	= ppc440speA_pciex_init_port_hw,
++	.setup_utl	= ppc440speA_pciex_init_utl,
++};
++
++static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
++{
++	.core_init	= ppc440spe_pciex_core_init,
++	.port_init_hw	= ppc440speB_pciex_init_port_hw,
++	.setup_utl	= ppc440speB_pciex_init_utl,
++};
++
++#endif /* CONFIG_44x */
++
++#ifdef CONFIG_40x
++
++static int __init ppc405ex_pciex_core_init(struct device_node *np)
++{
++	/* Nothing to do, return 2 ports */
++	return 2;
++}
++
++static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
++{
++	/* Assert the PE0_PHY reset */
++	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
++	msleep(1);
++
++	/* deassert the PE0_hotreset */
++	if (port->endpoint)
++		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
++	else
++		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
++
++	/* poll for phy !reset */
++	/* XXX FIXME add timeout */
++	while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
++		;
++
++	/* deassert the PE0_gpl_utl_reset */
++	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
++}
++
++static int ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
++{
++	u32 val;
++
++	if (port->endpoint)
++		val = PTYPE_LEGACY_ENDPOINT;
++	else
++		val = PTYPE_ROOT_PORT;
++
++	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
++	       1 << 24 | val << 20 | LNKW_X1 << 12);
++
++	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
++	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
++	mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
++	mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
++
++	/*
++	 * Only reset the PHY when no link is currently established.
++	 * This is for the Atheros PCIe board which has problems to establish
++	 * the link (again) after this PHY reset. All other currently tested
++	 * PCIe boards don't show this problem.
++	 * This has to be re-tested and fixed in a later release!
++	 */
++#if 0 /* XXX FIXME: Not resetting the PHY will leave all resources
++       * configured as done previously by U-Boot. Then Linux will currently
++       * not reassign them. So the PHY reset is now done always. This will
++       * lead to problems with the Atheros PCIe board again.
++       */
++	val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
++	if (!(val & 0x00001000))
++		ppc405ex_pcie_phy_reset(port);
++#else
++	ppc405ex_pcie_phy_reset(port);
++#endif
++
++	dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000);  /* guarded on */
++
++	port->has_ibpre = 1;
++
++	return 0;
++}
++
++static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
++{
++	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
++
++	/*
++	 * Set buffer allocations and then assert VRB and TXE.
++	 */
++	out_be32(port->utl_base + PEUTL_OUTTR,   0x02000000);
++	out_be32(port->utl_base + PEUTL_INTR,    0x02000000);
++	out_be32(port->utl_base + PEUTL_OPDBSZ,  0x04000000);
++	out_be32(port->utl_base + PEUTL_PBBSZ,   0x21000000);
++	out_be32(port->utl_base + PEUTL_IPHBSZ,  0x02000000);
++	out_be32(port->utl_base + PEUTL_IPDBSZ,  0x04000000);
++	out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
++	out_be32(port->utl_base + PEUTL_PCTL,    0x80800066);
++
++	out_be32(port->utl_base + PEUTL_PBCTL,   0x08000000);
++
++	return 0;
++}
++
++static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
++{
++	.core_init	= ppc405ex_pciex_core_init,
++	.port_init_hw	= ppc405ex_pciex_init_port_hw,
++	.setup_utl	= ppc405ex_pciex_init_utl,
++};
++
++#endif /* CONFIG_40x */
++
++
++/* Check that the core has been initied and if not, do it */
++static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
++{
++	static int core_init;
++	int count = -ENODEV;
++
++	if (core_init++)
++		return 0;
++
++#ifdef CONFIG_44x
++	if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
++		if (ppc440spe_revA())
++			ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
++		else
++			ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
++	}
++#endif /* CONFIG_44x    */
++#ifdef CONFIG_40x
++	if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
++		ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
++#endif
++	if (ppc4xx_pciex_hwops == NULL) {
++		printk(KERN_WARNING "PCIE: unknown host type %s\n",
++		       np->full_name);
++		return -ENODEV;
++	}
++
++	count = ppc4xx_pciex_hwops->core_init(np);
++	if (count > 0) {
++		ppc4xx_pciex_ports =
++		       kzalloc(count * sizeof(struct ppc4xx_pciex_port),
++			       GFP_KERNEL);
++		if (ppc4xx_pciex_ports) {
++			ppc4xx_pciex_port_count = count;
++			return 0;
++		}
++		printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
++		return -ENOMEM;
++	}
++	return -ENODEV;
++}
++
++static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
++{
++	/* We map PCI Express configuration based on the reg property */
++	dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
++		  RES_TO_U32_HIGH(port->cfg_space.start));
++	dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
++		  RES_TO_U32_LOW(port->cfg_space.start));
++
++	/* XXX FIXME: Use size from reg property. For now, map 512M */
++	dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
++
++	/* We map UTL registers based on the reg property */
++	dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
++		  RES_TO_U32_HIGH(port->utl_regs.start));
++	dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
++		  RES_TO_U32_LOW(port->utl_regs.start));
++
++	/* XXX FIXME: Use size from reg property */
++	dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
++
++	/* Disable all other outbound windows */
++	dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
++	dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
++	dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
++	dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
++}
++
++static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
++					   unsigned int sdr_offset,
++					   unsigned int mask,
++					   unsigned int value,
++					   int timeout_ms)
++{
++	u32 val;
++
++	while(timeout_ms--) {
++		val = mfdcri(SDR0, port->sdr_base + sdr_offset);
++		if ((val & mask) == value) {
++			pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
++				 port->index, sdr_offset, timeout_ms, val);
++			return 0;
++		}
++		msleep(1);
++	}
++	return -1;
++}
++
++static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
++{
++	int rc = 0;
++
++	/* Init HW */
++	if (ppc4xx_pciex_hwops->port_init_hw)
++		rc = ppc4xx_pciex_hwops->port_init_hw(port);
++	if (rc != 0)
++		return rc;
++
++	printk(KERN_INFO "PCIE%d: Checking link...\n",
++	       port->index);
++
++	/* Wait for reset to complete */
++	if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
++		printk(KERN_WARNING "PCIE%d: PGRST failed\n",
++		       port->index);
++		return -1;
++	}
++
++	/* Check for card presence detect if supported, if not, just wait for
++	 * link unconditionally.
++	 *
++	 * note that we don't fail if there is no link, we just filter out
++	 * config space accesses. That way, it will be easier to implement
++	 * hotplug later on.
++	 */
++	if (!port->has_ibpre ||
++	    !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
++				      1 << 28, 1 << 28, 100)) {
++		printk(KERN_INFO
++		       "PCIE%d: Device detected, waiting for link...\n",
++		       port->index);
++		if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
++					     0x1000, 0x1000, 2000))
++			printk(KERN_WARNING
++			       "PCIE%d: Link up failed\n", port->index);
++		else {
++			printk(KERN_INFO
++			       "PCIE%d: link is up !\n", port->index);
++			port->link = 1;
++		}
++	} else
++		printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
++
++	/*
++	 * Initialize mapping: disable all regions and configure
++	 * CFG and REG regions based on resources in the device tree
++	 */
++	ppc4xx_pciex_port_init_mapping(port);
++
++	/*
++	 * Map UTL
++	 */
++	port->utl_base = ioremap(port->utl_regs.start, 0x100);
++	BUG_ON(port->utl_base == NULL);
++
++	/*
++	 * Setup UTL registers --BenH.
++	 */
++	if (ppc4xx_pciex_hwops->setup_utl)
++		ppc4xx_pciex_hwops->setup_utl(port);
++
++	/*
++	 * Check for VC0 active and assert RDY.
++	 */
++	if (port->link &&
++	    ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
++				     1 << 16, 1 << 16, 5000)) {
++		printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index);
++		port->link = 0;
++	}
++
++	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
++	       mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) | 1 << 20);
++	msleep(100);
++
++	return 0;
++}
++
++static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
++				     struct pci_bus *bus,
++				     unsigned int devfn)
++{
++	static int message;
++
++	/* Endpoint can not generate upstream(remote) config cycles */
++	if (port->endpoint && bus->number != port->hose->first_busno)
++		return PCIBIOS_DEVICE_NOT_FOUND;
++
++	/* Check we are within the mapped range */
++	if (bus->number > port->hose->last_busno) {
++		if (!message) {
++			printk(KERN_WARNING "Warning! Probing bus %u"
++			       " out of range !\n", bus->number);
++			message++;
++		}
++		return PCIBIOS_DEVICE_NOT_FOUND;
++	}
++
++	/* The root complex has only one device / function */
++	if (bus->number == port->hose->first_busno && devfn != 0)
++		return PCIBIOS_DEVICE_NOT_FOUND;
++
++	/* The other side of the RC has only one device as well */
++	if (bus->number == (port->hose->first_busno + 1) &&
++	    PCI_SLOT(devfn) != 0)
++		return PCIBIOS_DEVICE_NOT_FOUND;
++
++	/* Check if we have a link */
++	if ((bus->number != port->hose->first_busno) && !port->link)
++		return PCIBIOS_DEVICE_NOT_FOUND;
++
++	return 0;
++}
++
++static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
++						  struct pci_bus *bus,
++						  unsigned int devfn)
++{
++	int relbus;
++
++	/* Remove the casts when we finally remove the stupid volatile
++	 * in struct pci_controller
++	 */
++	if (bus->number == port->hose->first_busno)
++		return (void __iomem *)port->hose->cfg_addr;
++
++	relbus = bus->number - (port->hose->first_busno + 1);
++	return (void __iomem *)port->hose->cfg_data +
++		((relbus  << 20) | (devfn << 12));
++}
++
++static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
++				    int offset, int len, u32 *val)
++{
++	struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
++	struct ppc4xx_pciex_port *port =
++		&ppc4xx_pciex_ports[hose->indirect_type];
++	void __iomem *addr;
++	u32 gpl_cfg;
++
++	BUG_ON(hose != port->hose);
++
++	if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
++		return PCIBIOS_DEVICE_NOT_FOUND;
++
++	addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
++
++	/*
++	 * Reading from configuration space of non-existing device can
++	 * generate transaction errors. For the read duration we suppress
++	 * assertion of machine check exceptions to avoid those.
++	 */
++	gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
++	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
++
++	/* Make sure no CRS is recorded */
++	out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
++
++	switch (len) {
++	case 1:
++		*val = in_8((u8 *)(addr + offset));
++		break;
++	case 2:
++		*val = in_le16((u16 *)(addr + offset));
++		break;
++	default:
++		*val = in_le32((u32 *)(addr + offset));
++		break;
++	}
++
++	pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
++		 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
++		 bus->number, hose->first_busno, hose->last_busno,
++		 devfn, offset, len, addr + offset, *val);
++
++	/* Check for CRS (440SPe rev B does that for us but heh ..) */
++	if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
++		pr_debug("Got CRS !\n");
++		if (len != 4 || offset != 0)
++			return PCIBIOS_DEVICE_NOT_FOUND;
++		*val = 0xffff0001;
++	}
++
++	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
++
++	return PCIBIOS_SUCCESSFUL;
++}
++
++static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
++				     int offset, int len, u32 val)
++{
++	struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
++	struct ppc4xx_pciex_port *port =
++		&ppc4xx_pciex_ports[hose->indirect_type];
++	void __iomem *addr;
++	u32 gpl_cfg;
++
++	if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
++		return PCIBIOS_DEVICE_NOT_FOUND;
++
++	addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
++
++	/*
++	 * Reading from configuration space of non-existing device can
++	 * generate transaction errors. For the read duration we suppress
++	 * assertion of machine check exceptions to avoid those.
++	 */
++	gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
++	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
++
++	pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
++		 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
++		 bus->number, hose->first_busno, hose->last_busno,
++		 devfn, offset, len, addr + offset, val);
++
++	switch (len) {
++	case 1:
++		out_8((u8 *)(addr + offset), val);
++		break;
++	case 2:
++		out_le16((u16 *)(addr + offset), val);
++		break;
++	default:
++		out_le32((u32 *)(addr + offset), val);
++		break;
++	}
++
++	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
++
++	return PCIBIOS_SUCCESSFUL;
++}
++
++static struct pci_ops ppc4xx_pciex_pci_ops =
++{
++	.read  = ppc4xx_pciex_read_config,
++	.write = ppc4xx_pciex_write_config,
++};
++
++static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
++					       struct pci_controller *hose,
++					       void __iomem *mbase)
++{
++	u32 lah, lal, pciah, pcial, sa;
++	int i, j;
++
++	/* Setup outbound memory windows */
++	for (i = j = 0; i < 3; i++) {
++		struct resource *res = &hose->mem_resources[i];
++
++		/* we only care about memory windows */
++		if (!(res->flags & IORESOURCE_MEM))
++			continue;
++		if (j > 1) {
++			printk(KERN_WARNING "%s: Too many ranges\n",
++			       port->node->full_name);
++			break;
++		}
++
++		/* Calculate register values */
++		lah = RES_TO_U32_HIGH(res->start);
++		lal = RES_TO_U32_LOW(res->start);
++		pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
++		pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
++		sa = res->end + 1 - res->start;
++		if (!is_power_of_2(sa) || sa < 0x100000 ||
++		    sa > 0xffffffffu) {
++			printk(KERN_WARNING "%s: Resource out of range\n",
++			       port->node->full_name);
++			continue;
++		}
++		sa = (0xffffffffu << ilog2(sa)) | 0x1;
++
++		/* Program register values */
++		switch (j) {
++		case 0:
++			out_le32(mbase + PECFG_POM0LAH, pciah);
++			out_le32(mbase + PECFG_POM0LAL, pcial);
++			dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
++			dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
++			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
++			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
++			break;
++		case 1:
++			out_le32(mbase + PECFG_POM1LAH, pciah);
++			out_le32(mbase + PECFG_POM1LAL, pcial);
++			dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
++			dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
++			dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
++			dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
++			break;
++		}
++		j++;
++	}
++
++	/* Configure IO, always 64K starting at 0 */
++	if (hose->io_resource.flags & IORESOURCE_IO) {
++		lah = RES_TO_U32_HIGH(hose->io_base_phys);
++		lal = RES_TO_U32_LOW(hose->io_base_phys);
++		out_le32(mbase + PECFG_POM2LAH, 0);
++		out_le32(mbase + PECFG_POM2LAL, 0);
++		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
++		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
++		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
++		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0xffff0000 | 3);
++	}
++}
++
++static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
++					       struct pci_controller *hose,
++					       void __iomem *mbase,
++					       struct resource *res)
++{
++	resource_size_t size = res->end - res->start + 1;
++	u64 sa;
++
++	/* Calculate window size */
++	sa = (0xffffffffffffffffull << ilog2(size));;
++	if (res->flags & IORESOURCE_PREFETCH)
++		sa |= 0x8;
++
++	out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
++	out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
++
++	/* The setup of the split looks weird to me ... let's see if it works */
++	out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
++	out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
++	out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
++	out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
++	out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
++	out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
++
++	/* Enable inbound mapping */
++	out_le32(mbase + PECFG_PIMEN, 0x1);
++
++	out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
++	out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
++
++	/* Enable I/O, Mem, and Busmaster cycles */
++	out_le16(mbase + PCI_COMMAND,
++		 in_le16(mbase + PCI_COMMAND) |
++		 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
++}
++
++static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
++{
++	struct resource dma_window;
++	struct pci_controller *hose = NULL;
++	const int *bus_range;
++	int primary = 0, busses;
++	void __iomem *mbase = NULL, *cfg_data = NULL;
++
++	/* XXX FIXME: Handle endpoint mode properly */
++	if (port->endpoint) {
++		printk(KERN_WARNING "PCIE%d: Port in endpoint mode !\n",
++		       port->index);
++		return;
++	}
++
++	/* Check if primary bridge */
++	if (of_get_property(port->node, "primary", NULL))
++		primary = 1;
++
++	/* Get bus range if any */
++	bus_range = of_get_property(port->node, "bus-range", NULL);
++
++	/* Allocate the host controller data structure */
++	hose = pcibios_alloc_controller(port->node);
++	if (!hose)
++		goto fail;
++
++	/* We stick the port number in "indirect_type" so the config space
++	 * ops can retrieve the port data structure easily
++	 */
++	hose->indirect_type = port->index;
++
++	/* Get bus range */
++	hose->first_busno = bus_range ? bus_range[0] : 0x0;
++	hose->last_busno = bus_range ? bus_range[1] : 0xff;
++
++	/* Because of how big mapping the config space is (1M per bus), we
++	 * limit how many busses we support. In the long run, we could replace
++	 * that with something akin to kmap_atomic instead. We set aside 1 bus
++	 * for the host itself too.
++	 */
++	busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
++	if (busses > MAX_PCIE_BUS_MAPPED) {
++		busses = MAX_PCIE_BUS_MAPPED;
++		hose->last_busno = hose->first_busno + busses;
++	}
++
++	/* We map the external config space in cfg_data and the host config
++	 * space in cfg_addr. External space is 1M per bus, internal space
++	 * is 4K
++	 */
++	cfg_data = ioremap(port->cfg_space.start +
++				 (hose->first_busno + 1) * 0x100000,
++				 busses * 0x100000);
++	mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
++	if (cfg_data == NULL || mbase == NULL) {
++		printk(KERN_ERR "%s: Can't map config space !",
++		       port->node->full_name);
++		goto fail;
++	}
++
++	hose->cfg_data = cfg_data;
++	hose->cfg_addr = mbase;
++
++	pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
++		 hose->first_busno, hose->last_busno);
++	pr_debug("     config space mapped at: root @0x%p, other @0x%p\n",
++		 hose->cfg_addr, hose->cfg_data);
++
++	/* Setup config space */
++	hose->ops = &ppc4xx_pciex_pci_ops;
++	port->hose = hose;
++	mbase = (void __iomem *)hose->cfg_addr;
++
++	/*
++	 * Set bus numbers on our root port
++	 */
++	out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
++	out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
++	out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
++
++	/*
++	 * OMRs are already reset, also disable PIMs
++	 */
++	out_le32(mbase + PECFG_PIMEN, 0);
++
++	/* Parse outbound mapping resources */
++	pci_process_bridge_OF_ranges(hose, port->node, primary);
++
++	/* Parse inbound mapping resources */
++	if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
++		goto fail;
++
++	/* Configure outbound ranges POMs */
++	ppc4xx_configure_pciex_POMs(port, hose, mbase);
++
++	/* Configure inbound ranges PIMs */
++	ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
++
++	/* The root complex doesn't show up if we don't set some vendor
++	 * and device IDs into it. Those are the same bogus one that the
++	 * initial code in arch/ppc add. We might want to change that.
++	 */
++	out_le16(mbase + 0x200, 0xaaa0 + port->index);
++	out_le16(mbase + 0x202, 0xbed0 + port->index);
++
++	/* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
++	out_le32(mbase + 0x208, 0x06040001);
++
++	printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
++	       port->index);
++	return;
++ fail:
++	if (hose)
++		pcibios_free_controller(hose);
++	if (cfg_data)
++		iounmap(cfg_data);
++	if (mbase)
++		iounmap(mbase);
++}
++
++static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
++{
++	struct ppc4xx_pciex_port *port;
++	const u32 *pval;
++	int portno;
++	unsigned int dcrs;
++
++	/* First, proceed to core initialization as we assume there's
++	 * only one PCIe core in the system
++	 */
++	if (ppc4xx_pciex_check_core_init(np))
++		return;
++
++	/* Get the port number from the device-tree */
++	pval = of_get_property(np, "port", NULL);
++	if (pval == NULL) {
++		printk(KERN_ERR "PCIE: Can't find port number for %s\n",
++		       np->full_name);
++		return;
++	}
++	portno = *pval;
++	if (portno >= ppc4xx_pciex_port_count) {
++		printk(KERN_ERR "PCIE: port number out of range for %s\n",
++		       np->full_name);
++		return;
++	}
++	port = &ppc4xx_pciex_ports[portno];
++	port->index = portno;
++	port->node = of_node_get(np);
++	pval = of_get_property(np, "sdr-base", NULL);
++	if (pval == NULL) {
++		printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
++		       np->full_name);
++		return;
++	}
++	port->sdr_base = *pval;
++
++	/* XXX Currently, we only support root complex mode */
++	port->endpoint = 0;
++
++	/* Fetch config space registers address */
++	if (of_address_to_resource(np, 0, &port->cfg_space)) {
++		printk(KERN_ERR "%s: Can't get PCI-E config space !",
++		       np->full_name);
++		return;
++	}
++	/* Fetch host bridge internal registers address */
++	if (of_address_to_resource(np, 1, &port->utl_regs)) {
++		printk(KERN_ERR "%s: Can't get UTL register base !",
++		       np->full_name);
++		return;
++	}
++
++	/* Map DCRs */
++	dcrs = dcr_resource_start(np, 0);
++	if (dcrs == 0) {
++		printk(KERN_ERR "%s: Can't get DCR register base !",
++		       np->full_name);
++		return;
++	}
++	port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
++
++	/* Initialize the port specific registers */
++	if (ppc4xx_pciex_port_init(port)) {
++		printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
++		return;
++	}
++
++	/* Setup the linux hose data structure */
++	ppc4xx_pciex_port_setup_hose(port);
++}
++
++#endif /* CONFIG_PPC4xx_PCI_EXPRESS */
++
++static int __init ppc4xx_pci_find_bridges(void)
++{
++	struct device_node *np;
++
++#ifdef CONFIG_PPC4xx_PCI_EXPRESS
++	for_each_compatible_node(np, NULL, "ibm,plb-pciex")
++		ppc4xx_probe_pciex_bridge(np);
++#endif
++	for_each_compatible_node(np, NULL, "ibm,plb-pcix")
++		ppc4xx_probe_pcix_bridge(np);
++	for_each_compatible_node(np, NULL, "ibm,plb-pci")
++		ppc4xx_probe_pci_bridge(np);
++
++	return 0;
++}
++arch_initcall(ppc4xx_pci_find_bridges);
++
+diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h
+new file mode 100644
+index 0000000..1c07908
+--- /dev/null
++++ b/arch/powerpc/sysdev/ppc4xx_pci.h
+@@ -0,0 +1,369 @@
++/*
++ * PCI / PCI-X / PCI-Express support for 4xx parts
++ *
++ * Copyright 2007 Ben. Herrenschmidt <benh at kernel.crashing.org>, IBM Corp.
++ *
++ * Bits and pieces extracted from arch/ppc support by
++ *
++ * Matt Porter <mporter at kernel.crashing.org>
++ *
++ * Copyright 2002-2005 MontaVista Software Inc.
++ */
++#ifndef __PPC4XX_PCI_H__
++#define __PPC4XX_PCI_H__
++
++/*
++ * 4xx PCI-X bridge register definitions
++ */
++#define PCIX0_VENDID		0x000
++#define PCIX0_DEVID		0x002
++#define PCIX0_COMMAND		0x004
++#define PCIX0_STATUS		0x006
++#define PCIX0_REVID		0x008
++#define PCIX0_CLS		0x009
++#define PCIX0_CACHELS		0x00c
++#define PCIX0_LATTIM		0x00d
++#define PCIX0_HDTYPE		0x00e
++#define PCIX0_BIST		0x00f
++#define PCIX0_BAR0L		0x010
++#define PCIX0_BAR0H		0x014
++#define PCIX0_BAR1		0x018
++#define PCIX0_BAR2L		0x01c
++#define PCIX0_BAR2H		0x020
++#define PCIX0_BAR3		0x024
++#define PCIX0_CISPTR		0x028
++#define PCIX0_SBSYSVID		0x02c
++#define PCIX0_SBSYSID		0x02e
++#define PCIX0_EROMBA		0x030
++#define PCIX0_CAP		0x034
++#define PCIX0_RES0		0x035
++#define PCIX0_RES1		0x036
++#define PCIX0_RES2		0x038
++#define PCIX0_INTLN		0x03c
++#define PCIX0_INTPN		0x03d
++#define PCIX0_MINGNT		0x03e
++#define PCIX0_MAXLTNCY		0x03f
++#define PCIX0_BRDGOPT1		0x040
++#define PCIX0_BRDGOPT2		0x044
++#define PCIX0_ERREN		0x050
++#define PCIX0_ERRSTS		0x054
++#define PCIX0_PLBBESR		0x058
++#define PCIX0_PLBBEARL		0x05c
++#define PCIX0_PLBBEARH		0x060
++#define PCIX0_POM0LAL		0x068
++#define PCIX0_POM0LAH		0x06c
++#define PCIX0_POM0SA		0x070
++#define PCIX0_POM0PCIAL		0x074
++#define PCIX0_POM0PCIAH		0x078
++#define PCIX0_POM1LAL		0x07c
++#define PCIX0_POM1LAH		0x080
++#define PCIX0_POM1SA		0x084
++#define PCIX0_POM1PCIAL		0x088
++#define PCIX0_POM1PCIAH		0x08c
++#define PCIX0_POM2SA		0x090
++#define PCIX0_PIM0SAL		0x098
++#define PCIX0_PIM0SA		PCIX0_PIM0SAL
++#define PCIX0_PIM0LAL		0x09c
++#define PCIX0_PIM0LAH		0x0a0
++#define PCIX0_PIM1SA		0x0a4
++#define PCIX0_PIM1LAL		0x0a8
++#define PCIX0_PIM1LAH		0x0ac
++#define PCIX0_PIM2SAL		0x0b0
++#define PCIX0_PIM2SA		PCIX0_PIM2SAL
++#define PCIX0_PIM2LAL		0x0b4
++#define PCIX0_PIM2LAH		0x0b8
++#define PCIX0_OMCAPID		0x0c0
++#define PCIX0_OMNIPTR		0x0c1
++#define PCIX0_OMMC		0x0c2
++#define PCIX0_OMMA		0x0c4
++#define PCIX0_OMMUA		0x0c8
++#define PCIX0_OMMDATA		0x0cc
++#define PCIX0_OMMEOI		0x0ce
++#define PCIX0_PMCAPID		0x0d0
++#define PCIX0_PMNIPTR		0x0d1
++#define PCIX0_PMC		0x0d2
++#define PCIX0_PMCSR		0x0d4
++#define PCIX0_PMCSRBSE		0x0d6
++#define PCIX0_PMDATA		0x0d7
++#define PCIX0_PMSCRR		0x0d8
++#define PCIX0_CAPID		0x0dc
++#define PCIX0_NIPTR		0x0dd
++#define PCIX0_CMD		0x0de
++#define PCIX0_STS		0x0e0
++#define PCIX0_IDR		0x0e4
++#define PCIX0_CID		0x0e8
++#define PCIX0_RID		0x0ec
++#define PCIX0_PIM0SAH		0x0f8
++#define PCIX0_PIM2SAH		0x0fc
++#define PCIX0_MSGIL		0x100
++#define PCIX0_MSGIH		0x104
++#define PCIX0_MSGOL		0x108
++#define PCIX0_MSGOH		0x10c
++#define PCIX0_IM		0x1f8
++
++/*
++ * 4xx PCI bridge register definitions
++ */
++#define PCIL0_PMM0LA		0x00
++#define PCIL0_PMM0MA		0x04
++#define PCIL0_PMM0PCILA		0x08
++#define PCIL0_PMM0PCIHA		0x0c
++#define PCIL0_PMM1LA		0x10
++#define PCIL0_PMM1MA		0x14
++#define PCIL0_PMM1PCILA		0x18
++#define PCIL0_PMM1PCIHA		0x1c
++#define PCIL0_PMM2LA		0x20
++#define PCIL0_PMM2MA		0x24
++#define PCIL0_PMM2PCILA		0x28
++#define PCIL0_PMM2PCIHA		0x2c
++#define PCIL0_PTM1MS		0x30
++#define PCIL0_PTM1LA		0x34
++#define PCIL0_PTM2MS		0x38
++#define PCIL0_PTM2LA		0x3c
++
++/*
++ * 4xx PCIe bridge register definitions
++ */
++
++/* DCR offsets */
++#define DCRO_PEGPL_CFGBAH		0x00
++#define DCRO_PEGPL_CFGBAL		0x01
++#define DCRO_PEGPL_CFGMSK		0x02
++#define DCRO_PEGPL_MSGBAH		0x03
++#define DCRO_PEGPL_MSGBAL		0x04
++#define DCRO_PEGPL_MSGMSK		0x05
++#define DCRO_PEGPL_OMR1BAH		0x06
++#define DCRO_PEGPL_OMR1BAL		0x07
++#define DCRO_PEGPL_OMR1MSKH		0x08
++#define DCRO_PEGPL_OMR1MSKL		0x09
++#define DCRO_PEGPL_OMR2BAH		0x0a
++#define DCRO_PEGPL_OMR2BAL		0x0b
++#define DCRO_PEGPL_OMR2MSKH		0x0c
++#define DCRO_PEGPL_OMR2MSKL		0x0d
++#define DCRO_PEGPL_OMR3BAH		0x0e
++#define DCRO_PEGPL_OMR3BAL		0x0f
++#define DCRO_PEGPL_OMR3MSKH		0x10
++#define DCRO_PEGPL_OMR3MSKL		0x11
++#define DCRO_PEGPL_REGBAH		0x12
++#define DCRO_PEGPL_REGBAL		0x13
++#define DCRO_PEGPL_REGMSK		0x14
++#define DCRO_PEGPL_SPECIAL		0x15
++#define DCRO_PEGPL_CFG			0x16
++#define DCRO_PEGPL_ESR			0x17
++#define DCRO_PEGPL_EARH			0x18
++#define DCRO_PEGPL_EARL			0x19
++#define DCRO_PEGPL_EATR			0x1a
++
++/* DMER mask */
++#define GPL_DMER_MASK_DISA	0x02000000
++
++/*
++ * System DCRs (SDRs)
++ */
++#define PESDR0_PLLLCT1			0x03a0
++#define PESDR0_PLLLCT2			0x03a1
++#define PESDR0_PLLLCT3			0x03a2
++
++/*
++ * 440SPe additional DCRs
++ */
++#define PESDR0_440SPE_UTLSET1		0x0300
++#define PESDR0_440SPE_UTLSET2		0x0301
++#define PESDR0_440SPE_DLPSET		0x0302
++#define PESDR0_440SPE_LOOP		0x0303
++#define PESDR0_440SPE_RCSSET		0x0304
++#define PESDR0_440SPE_RCSSTS		0x0305
++#define PESDR0_440SPE_HSSL0SET1		0x0306
++#define PESDR0_440SPE_HSSL0SET2		0x0307
++#define PESDR0_440SPE_HSSL0STS		0x0308
++#define PESDR0_440SPE_HSSL1SET1		0x0309
++#define PESDR0_440SPE_HSSL1SET2		0x030a
++#define PESDR0_440SPE_HSSL1STS		0x030b
++#define PESDR0_440SPE_HSSL2SET1		0x030c
++#define PESDR0_440SPE_HSSL2SET2		0x030d
++#define PESDR0_440SPE_HSSL2STS		0x030e
++#define PESDR0_440SPE_HSSL3SET1		0x030f
++#define PESDR0_440SPE_HSSL3SET2		0x0310
++#define PESDR0_440SPE_HSSL3STS		0x0311
++#define PESDR0_440SPE_HSSL4SET1		0x0312
++#define PESDR0_440SPE_HSSL4SET2		0x0313
++#define PESDR0_440SPE_HSSL4STS	       	0x0314
++#define PESDR0_440SPE_HSSL5SET1		0x0315
++#define PESDR0_440SPE_HSSL5SET2		0x0316
++#define PESDR0_440SPE_HSSL5STS		0x0317
++#define PESDR0_440SPE_HSSL6SET1		0x0318
++#define PESDR0_440SPE_HSSL6SET2		0x0319
++#define PESDR0_440SPE_HSSL6STS		0x031a
++#define PESDR0_440SPE_HSSL7SET1		0x031b
++#define PESDR0_440SPE_HSSL7SET2		0x031c
++#define PESDR0_440SPE_HSSL7STS		0x031d
++#define PESDR0_440SPE_HSSCTLSET		0x031e
++#define PESDR0_440SPE_LANE_ABCD		0x031f
++#define PESDR0_440SPE_LANE_EFGH		0x0320
++
++#define PESDR1_440SPE_UTLSET1		0x0340
++#define PESDR1_440SPE_UTLSET2		0x0341
++#define PESDR1_440SPE_DLPSET		0x0342
++#define PESDR1_440SPE_LOOP		0x0343
++#define PESDR1_440SPE_RCSSET		0x0344
++#define PESDR1_440SPE_RCSSTS		0x0345
++#define PESDR1_440SPE_HSSL0SET1		0x0346
++#define PESDR1_440SPE_HSSL0SET2		0x0347
++#define PESDR1_440SPE_HSSL0STS		0x0348
++#define PESDR1_440SPE_HSSL1SET1		0x0349
++#define PESDR1_440SPE_HSSL1SET2		0x034a
++#define PESDR1_440SPE_HSSL1STS		0x034b
++#define PESDR1_440SPE_HSSL2SET1		0x034c
++#define PESDR1_440SPE_HSSL2SET2		0x034d
++#define PESDR1_440SPE_HSSL2STS		0x034e
++#define PESDR1_440SPE_HSSL3SET1		0x034f
++#define PESDR1_440SPE_HSSL3SET2		0x0350
++#define PESDR1_440SPE_HSSL3STS		0x0351
++#define PESDR1_440SPE_HSSCTLSET		0x0352
++#define PESDR1_440SPE_LANE_ABCD		0x0353
++
++#define PESDR2_440SPE_UTLSET1		0x0370
++#define PESDR2_440SPE_UTLSET2		0x0371
++#define PESDR2_440SPE_DLPSET		0x0372
++#define PESDR2_440SPE_LOOP		0x0373
++#define PESDR2_440SPE_RCSSET		0x0374
++#define PESDR2_440SPE_RCSSTS		0x0375
++#define PESDR2_440SPE_HSSL0SET1		0x0376
++#define PESDR2_440SPE_HSSL0SET2		0x0377
++#define PESDR2_440SPE_HSSL0STS		0x0378
++#define PESDR2_440SPE_HSSL1SET1		0x0379
++#define PESDR2_440SPE_HSSL1SET2		0x037a
++#define PESDR2_440SPE_HSSL1STS		0x037b
++#define PESDR2_440SPE_HSSL2SET1		0x037c
++#define PESDR2_440SPE_HSSL2SET2		0x037d
++#define PESDR2_440SPE_HSSL2STS		0x037e
++#define PESDR2_440SPE_HSSL3SET1		0x037f
++#define PESDR2_440SPE_HSSL3SET2		0x0380
++#define PESDR2_440SPE_HSSL3STS		0x0381
++#define PESDR2_440SPE_HSSCTLSET		0x0382
++#define PESDR2_440SPE_LANE_ABCD		0x0383
++
++/*
++ * 405EX additional DCRs
++ */
++#define PESDR0_405EX_UTLSET1		0x0400
++#define PESDR0_405EX_UTLSET2		0x0401
++#define PESDR0_405EX_DLPSET		0x0402
++#define PESDR0_405EX_LOOP		0x0403
++#define PESDR0_405EX_RCSSET		0x0404
++#define PESDR0_405EX_RCSSTS		0x0405
++#define PESDR0_405EX_PHYSET1		0x0406
++#define PESDR0_405EX_PHYSET2		0x0407
++#define PESDR0_405EX_BIST		0x0408
++#define PESDR0_405EX_LPB		0x040B
++#define PESDR0_405EX_PHYSTA		0x040C
++
++#define PESDR1_405EX_UTLSET1		0x0440
++#define PESDR1_405EX_UTLSET2		0x0441
++#define PESDR1_405EX_DLPSET		0x0442
++#define PESDR1_405EX_LOOP		0x0443
++#define PESDR1_405EX_RCSSET		0x0444
++#define PESDR1_405EX_RCSSTS		0x0445
++#define PESDR1_405EX_PHYSET1		0x0446
++#define PESDR1_405EX_PHYSET2		0x0447
++#define PESDR1_405EX_BIST		0x0448
++#define PESDR1_405EX_LPB		0x044B
++#define PESDR1_405EX_PHYSTA		0x044C
++
++/*
++ * Of the above, some are common offsets from the base
++ */
++#define PESDRn_UTLSET1			0x00
++#define PESDRn_UTLSET2			0x01
++#define PESDRn_DLPSET			0x02
++#define PESDRn_LOOP			0x03
++#define PESDRn_RCSSET			0x04
++#define PESDRn_RCSSTS			0x05
++
++/* 440spe only */
++#define PESDRn_440SPE_HSSL0SET1		0x06
++#define PESDRn_440SPE_HSSL0SET2		0x07
++#define PESDRn_440SPE_HSSL0STS		0x08
++#define PESDRn_440SPE_HSSL1SET1		0x09
++#define PESDRn_440SPE_HSSL1SET2		0x0a
++#define PESDRn_440SPE_HSSL1STS		0x0b
++#define PESDRn_440SPE_HSSL2SET1		0x0c
++#define PESDRn_440SPE_HSSL2SET2		0x0d
++#define PESDRn_440SPE_HSSL2STS		0x0e
++#define PESDRn_440SPE_HSSL3SET1		0x0f
++#define PESDRn_440SPE_HSSL3SET2		0x10
++#define PESDRn_440SPE_HSSL3STS		0x11
++
++/* 440spe port 0 only */
++#define PESDRn_440SPE_HSSL4SET1		0x12
++#define PESDRn_440SPE_HSSL4SET2		0x13
++#define PESDRn_440SPE_HSSL4STS	       	0x14
++#define PESDRn_440SPE_HSSL5SET1		0x15
++#define PESDRn_440SPE_HSSL5SET2		0x16
++#define PESDRn_440SPE_HSSL5STS		0x17
++#define PESDRn_440SPE_HSSL6SET1		0x18
++#define PESDRn_440SPE_HSSL6SET2		0x19
++#define PESDRn_440SPE_HSSL6STS		0x1a
++#define PESDRn_440SPE_HSSL7SET1		0x1b
++#define PESDRn_440SPE_HSSL7SET2		0x1c
++#define PESDRn_440SPE_HSSL7STS		0x1d
++
++/* 405ex only */
++#define PESDRn_405EX_PHYSET1		0x06
++#define PESDRn_405EX_PHYSET2		0x07
++#define PESDRn_405EX_PHYSTA		0x0c
++
++/*
++ * UTL register offsets
++ */
++#define PEUTL_PBCTL		0x00
++#define PEUTL_PBBSZ		0x20
++#define PEUTL_OPDBSZ		0x68
++#define PEUTL_IPHBSZ		0x70
++#define PEUTL_IPDBSZ		0x78
++#define PEUTL_OUTTR		0x90
++#define PEUTL_INTR		0x98
++#define PEUTL_PCTL		0xa0
++#define PEUTL_RCSTA		0xB0
++#define PEUTL_RCIRQEN		0xb8
++
++/*
++ * Config space register offsets
++ */
++#define PECFG_ECRTCTL		0x074
++
++#define PECFG_BAR0LMPA		0x210
++#define PECFG_BAR0HMPA		0x214
++#define PECFG_BAR1MPA		0x218
++#define PECFG_BAR2LMPA		0x220
++#define PECFG_BAR2HMPA		0x224
++
++#define PECFG_PIMEN		0x33c
++#define PECFG_PIM0LAL		0x340
++#define PECFG_PIM0LAH		0x344
++#define PECFG_PIM1LAL		0x348
++#define PECFG_PIM1LAH		0x34c
++#define PECFG_PIM01SAL		0x350
++#define PECFG_PIM01SAH		0x354
++
++#define PECFG_POM0LAL		0x380
++#define PECFG_POM0LAH		0x384
++#define PECFG_POM1LAL		0x388
++#define PECFG_POM1LAH		0x38c
++#define PECFG_POM2LAL		0x390
++#define PECFG_POM2LAH		0x394
++
++
++enum
++{
++	PTYPE_ENDPOINT		= 0x0,
++	PTYPE_LEGACY_ENDPOINT	= 0x1,
++	PTYPE_ROOT_PORT		= 0x4,
++
++	LNKW_X1			= 0x1,
++	LNKW_X4			= 0x4,
++	LNKW_X8			= 0x8
++};
++
++
++#endif /* __PPC4XX_PCI_H__ */
+diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
+index f611d34..adc6621 100644
+--- a/arch/powerpc/sysdev/qe_lib/Kconfig
++++ b/arch/powerpc/sysdev/qe_lib/Kconfig
+@@ -4,7 +4,7 @@
+ 
+ config UCC_SLOW
+ 	bool
+-	default n
++	default y if SERIAL_QE
+ 	help
+ 	  This option provides qe_lib support to UCC slow
+ 	  protocols: UART, BISYNC, QMC
+diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
+index 3d57d38..5ef844d 100644
+--- a/arch/powerpc/sysdev/qe_lib/qe.c
++++ b/arch/powerpc/sysdev/qe_lib/qe.c
+@@ -25,6 +25,7 @@
+ #include <linux/module.h>
+ #include <linux/delay.h>
+ #include <linux/ioport.h>
++#include <linux/crc32.h>
+ #include <asm/irq.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -64,17 +65,22 @@ static phys_addr_t qebase = -1;
+ phys_addr_t get_qe_base(void)
+ {
+ 	struct device_node *qe;
++	unsigned int size;
++	const void *prop;
+ 
+ 	if (qebase != -1)
+ 		return qebase;
+ 
+-	qe = of_find_node_by_type(NULL, "qe");
+-	if (qe) {
+-		unsigned int size;
+-		const void *prop = of_get_property(qe, "reg", &size);
+-		qebase = of_translate_address(qe, prop);
+-		of_node_put(qe);
+-	};
++	qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
++	if (!qe) {
++		qe = of_find_node_by_type(NULL, "qe");
++		if (!qe)
++			return qebase;
++	}
++
++	prop = of_get_property(qe, "reg", &size);
++	qebase = of_translate_address(qe, prop);
++	of_node_put(qe);
+ 
+ 	return qebase;
+ }
+@@ -152,34 +158,45 @@ static unsigned int brg_clk = 0;
+ unsigned int get_brg_clk(void)
+ {
+ 	struct device_node *qe;
++	unsigned int size;
++	const u32 *prop;
++
+ 	if (brg_clk)
+ 		return brg_clk;
+ 
+-	qe = of_find_node_by_type(NULL, "qe");
+-	if (qe) {
+-		unsigned int size;
+-		const u32 *prop = of_get_property(qe, "brg-frequency", &size);
+-		brg_clk = *prop;
+-		of_node_put(qe);
+-	};
++	qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
++	if (!qe) {
++		qe = of_find_node_by_type(NULL, "qe");
++		if (!qe)
++			return brg_clk;
++	}
++
++	prop = of_get_property(qe, "brg-frequency", &size);
++	if (!prop || size != sizeof(*prop))
++		return brg_clk;
++
++	brg_clk = *prop;
++	of_node_put(qe);
++
+ 	return brg_clk;
+ }
+ 
+ /* Program the BRG to the given sampling rate and multiplier
+  *
+- * @brg: the BRG, 1-16
++ * @brg: the BRG, QE_BRG1 - QE_BRG16
+  * @rate: the desired sampling rate
+  * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
+  * GUMR_L[TDCR].  E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
+  * then 'multiplier' should be 8.
+- *
+- * Also note that the value programmed into the BRGC register must be even.
+  */
+-void qe_setbrg(unsigned int brg, unsigned int rate, unsigned int multiplier)
++int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
+ {
+ 	u32 divisor, tempval;
+ 	u32 div16 = 0;
+ 
++	if ((brg < QE_BRG1) || (brg > QE_BRG16))
++		return -EINVAL;
++
+ 	divisor = get_brg_clk() / (rate * multiplier);
+ 
+ 	if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+@@ -196,8 +213,43 @@ void qe_setbrg(unsigned int brg, unsigned int rate, unsigned int multiplier)
+ 	tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+ 		QE_BRGC_ENABLE | div16;
+ 
+-	out_be32(&qe_immr->brg.brgc[brg - 1], tempval);
++	out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
++
++	return 0;
++}
++EXPORT_SYMBOL(qe_setbrg);
++
++/* Convert a string to a QE clock source enum
++ *
++ * This function takes a string, typically from a property in the device
++ * tree, and returns the corresponding "enum qe_clock" value.
++*/
++enum qe_clock qe_clock_source(const char *source)
++{
++	unsigned int i;
++
++	if (strcasecmp(source, "none") == 0)
++		return QE_CLK_NONE;
++
++	if (strncasecmp(source, "brg", 3) == 0) {
++		i = simple_strtoul(source + 3, NULL, 10);
++		if ((i >= 1) && (i <= 16))
++			return (QE_BRG1 - 1) + i;
++		else
++			return QE_CLK_DUMMY;
++	}
++
++	if (strncasecmp(source, "clk", 3) == 0) {
++		i = simple_strtoul(source + 3, NULL, 10);
++		if ((i >= 1) && (i <= 24))
++			return (QE_CLK1 - 1) + i;
++		else
++			return QE_CLK_DUMMY;
++	}
++
++	return QE_CLK_DUMMY;
+ }
++EXPORT_SYMBOL(qe_clock_source);
+ 
+ /* Initialize SNUMs (thread serial numbers) according to
+  * QE Module Control chapter, SNUM table
+@@ -285,7 +337,7 @@ static rh_info_t qe_muram_info;
+ static void qe_muram_init(void)
+ {
+ 	struct device_node *np;
+-	u32 address;
++	const u32 *address;
+ 	u64 size;
+ 	unsigned int flags;
+ 
+@@ -298,11 +350,21 @@ static void qe_muram_init(void)
+ 	/* XXX: This is a subset of the available muram. It
+ 	 * varies with the processor and the microcode patches activated.
+ 	 */
+-	if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) {
+-		address = *of_get_address(np, 0, &size, &flags);
+-		of_node_put(np);
+-		rh_attach_region(&qe_muram_info, address, (int) size);
++	np = of_find_compatible_node(NULL, NULL, "fsl,qe-muram-data");
++	if (!np) {
++		np = of_find_node_by_name(NULL, "data-only");
++		if (!np) {
++			WARN_ON(1);
++			return;
++		}
+ 	}
++
++	address = of_get_address(np, 0, &size, &flags);
++	WARN_ON(!address);
++
++	of_node_put(np);
++	if (address)
++		rh_attach_region(&qe_muram_info, *address, (int)size);
+ }
+ 
+ /* This function returns an index into the MURAM area.
+@@ -358,3 +420,249 @@ void *qe_muram_addr(unsigned long offset)
+ 	return (void *)&qe_immr->muram[offset];
+ }
+ EXPORT_SYMBOL(qe_muram_addr);
++
++/* The maximum number of RISCs we support */
++#define MAX_QE_RISC     2
++
++/* Firmware information stored here for qe_get_firmware_info() */
++static struct qe_firmware_info qe_firmware_info;
++
++/*
++ * Set to 1 if QE firmware has been uploaded, and therefore
++ * qe_firmware_info contains valid data.
++ */
++static int qe_firmware_uploaded;
++
++/*
++ * Upload a QE microcode
++ *
++ * This function is a worker function for qe_upload_firmware().  It does
++ * the actual uploading of the microcode.
++ */
++static void qe_upload_microcode(const void *base,
++	const struct qe_microcode *ucode)
++{
++	const __be32 *code = base + be32_to_cpu(ucode->code_offset);
++	unsigned int i;
++
++	if (ucode->major || ucode->minor || ucode->revision)
++		printk(KERN_INFO "qe-firmware: "
++			"uploading microcode '%s' version %u.%u.%u\n",
++			ucode->id, ucode->major, ucode->minor, ucode->revision);
++	else
++		printk(KERN_INFO "qe-firmware: "
++			"uploading microcode '%s'\n", ucode->id);
++
++	/* Use auto-increment */
++	out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
++		QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
++
++	for (i = 0; i < be32_to_cpu(ucode->count); i++)
++		out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
++}
++
++/*
++ * Upload a microcode to the I-RAM at a specific address.
++ *
++ * See Documentation/powerpc/qe-firmware.txt for information on QE microcode
++ * uploading.
++ *
++ * Currently, only version 1 is supported, so the 'version' field must be
++ * set to 1.
++ *
++ * The SOC model and revision are not validated, they are only displayed for
++ * informational purposes.
++ *
++ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
++ * all of the microcode structures, minus the CRC.
++ *
++ * 'length' is the size that the structure says it is, including the CRC.
++ */
++int qe_upload_firmware(const struct qe_firmware *firmware)
++{
++	unsigned int i;
++	unsigned int j;
++	u32 crc;
++	size_t calc_size = sizeof(struct qe_firmware);
++	size_t length;
++	const struct qe_header *hdr;
++
++	if (!firmware) {
++		printk(KERN_ERR "qe-firmware: invalid pointer\n");
++		return -EINVAL;
++	}
++
++	hdr = &firmware->header;
++	length = be32_to_cpu(hdr->length);
++
++	/* Check the magic */
++	if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
++	    (hdr->magic[2] != 'F')) {
++		printk(KERN_ERR "qe-firmware: not a microcode\n");
++		return -EPERM;
++	}
++
++	/* Check the version */
++	if (hdr->version != 1) {
++		printk(KERN_ERR "qe-firmware: unsupported version\n");
++		return -EPERM;
++	}
++
++	/* Validate some of the fields */
++	if ((firmware->count < 1) || (firmware->count >= MAX_QE_RISC)) {
++		printk(KERN_ERR "qe-firmware: invalid data\n");
++		return -EINVAL;
++	}
++
++	/* Validate the length and check if there's a CRC */
++	calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
++
++	for (i = 0; i < firmware->count; i++)
++		/*
++		 * For situations where the second RISC uses the same microcode
++		 * as the first, the 'code_offset' and 'count' fields will be
++		 * zero, so it's okay to add those.
++		 */
++		calc_size += sizeof(__be32) *
++			be32_to_cpu(firmware->microcode[i].count);
++
++	/* Validate the length */
++	if (length != calc_size + sizeof(__be32)) {
++		printk(KERN_ERR "qe-firmware: invalid length\n");
++		return -EPERM;
++	}
++
++	/* Validate the CRC */
++	crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
++	if (crc != crc32(0, firmware, calc_size)) {
++		printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
++		return -EIO;
++	}
++
++	/*
++	 * If the microcode calls for it, split the I-RAM.
++	 */
++	if (!firmware->split)
++		setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
++
++	if (firmware->soc.model)
++		printk(KERN_INFO
++			"qe-firmware: firmware '%s' for %u V%u.%u\n",
++			firmware->id, be16_to_cpu(firmware->soc.model),
++			firmware->soc.major, firmware->soc.minor);
++	else
++		printk(KERN_INFO "qe-firmware: firmware '%s'\n",
++			firmware->id);
++
++	/*
++	 * The QE only supports one microcode per RISC, so clear out all the
++	 * saved microcode information and put in the new.
++	 */
++	memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
++	strcpy(qe_firmware_info.id, firmware->id);
++	qe_firmware_info.extended_modes = firmware->extended_modes;
++	memcpy(qe_firmware_info.vtraps, firmware->vtraps,
++		sizeof(firmware->vtraps));
++
++	/* Loop through each microcode. */
++	for (i = 0; i < firmware->count; i++) {
++		const struct qe_microcode *ucode = &firmware->microcode[i];
++
++		/* Upload a microcode if it's present */
++		if (ucode->code_offset)
++			qe_upload_microcode(firmware, ucode);
++
++		/* Program the traps for this processor */
++		for (j = 0; j < 16; j++) {
++			u32 trap = be32_to_cpu(ucode->traps[j]);
++
++			if (trap)
++				out_be32(&qe_immr->rsp[i].tibcr[j], trap);
++		}
++
++		/* Enable traps */
++		out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
++	}
++
++	qe_firmware_uploaded = 1;
++
++	return 0;
++}
++EXPORT_SYMBOL(qe_upload_firmware);
++
++/*
++ * Get info on the currently-loaded firmware
++ *
++ * This function also checks the device tree to see if the boot loader has
++ * uploaded a firmware already.
++ */
++struct qe_firmware_info *qe_get_firmware_info(void)
++{
++	static int initialized;
++	struct property *prop;
++	struct device_node *qe;
++	struct device_node *fw = NULL;
++	const char *sprop;
++	unsigned int i;
++
++	/*
++	 * If we haven't checked yet, and a driver hasn't uploaded a firmware
++	 * yet, then check the device tree for information.
++	 */
++	if (initialized || qe_firmware_uploaded)
++		return NULL;
++
++	initialized = 1;
++
++	/*
++	 * Newer device trees have an "fsl,qe" compatible property for the QE
++	 * node, but we still need to support older device trees.
++	*/
++	qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
++	if (!qe) {
++		qe = of_find_node_by_type(NULL, "qe");
++		if (!qe)
++			return NULL;
++	}
++
++	/* Find the 'firmware' child node */
++	for_each_child_of_node(qe, fw) {
++		if (strcmp(fw->name, "firmware") == 0)
++			break;
++	}
++
++	of_node_put(qe);
++
++	/* Did we find the 'firmware' node? */
++	if (!fw)
++		return NULL;
++
++	qe_firmware_uploaded = 1;
++
++	/* Copy the data into qe_firmware_info*/
++	sprop = of_get_property(fw, "id", NULL);
++	if (sprop)
++		strncpy(qe_firmware_info.id, sprop,
++			sizeof(qe_firmware_info.id) - 1);
++
++	prop = of_find_property(fw, "extended-modes", NULL);
++	if (prop && (prop->length == sizeof(u64))) {
++		const u64 *iprop = prop->value;
++
++		qe_firmware_info.extended_modes = *iprop;
++	}
++
++	prop = of_find_property(fw, "virtual-traps", NULL);
++	if (prop && (prop->length == 32)) {
++		const u32 *iprop = prop->value;
++
++		for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
++			qe_firmware_info.vtraps[i] = iprop[i];
++	}
++
++	of_node_put(fw);
++
++	return &qe_firmware_info;
++}
++EXPORT_SYMBOL(qe_get_firmware_info);
++
+diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
+index e1c0fd6..f59444d 100644
+--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
++++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
+@@ -483,7 +483,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
+ }
+ 
+ static struct sysdev_class qe_ic_sysclass = {
+-	set_kset_name("qe_ic"),
++	.name = "qe_ic",
+ };
+ 
+ static struct sys_device device_qe_ic = {
+diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+index 0174b3a..b2870b2 100644
+--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
++++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+@@ -19,6 +19,7 @@
+ #include <linux/stddef.h>
+ #include <linux/interrupt.h>
+ #include <linux/err.h>
++#include <linux/module.h>
+ 
+ #include <asm/io.h>
+ #include <asm/immap_qe.h>
+@@ -41,6 +42,7 @@ u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
+ 	default: return QE_CR_SUBBLOCK_INVALID;
+ 	}
+ }
++EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
+ 
+ void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
+ {
+@@ -56,6 +58,7 @@ void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
+ 	qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
+ 			 QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ }
++EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
+ 
+ void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
+ {
+@@ -65,6 +68,7 @@ void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
+ 	id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ 	qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ }
++EXPORT_SYMBOL(ucc_slow_stop_tx);
+ 
+ void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
+ {
+@@ -74,6 +78,7 @@ void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
+ 	id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ 	qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ }
++EXPORT_SYMBOL(ucc_slow_restart_tx);
+ 
+ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
+ {
+@@ -94,6 +99,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
+ 	}
+ 	out_be32(&us_regs->gumr_l, gumr_l);
+ }
++EXPORT_SYMBOL(ucc_slow_enable);
+ 
+ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
+ {
+@@ -114,6 +120,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
+ 	}
+ 	out_be32(&us_regs->gumr_l, gumr_l);
+ }
++EXPORT_SYMBOL(ucc_slow_disable);
+ 
+ /* Initialize the UCC for Slow operations
+  *
+@@ -347,6 +354,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
+ 	*uccs_ret = uccs;
+ 	return 0;
+ }
++EXPORT_SYMBOL(ucc_slow_init);
+ 
+ void ucc_slow_free(struct ucc_slow_private * uccs)
+ {
+@@ -366,5 +374,5 @@ void ucc_slow_free(struct ucc_slow_private * uccs)
+ 
+ 	kfree(uccs);
+ }
+-
++EXPORT_SYMBOL(ucc_slow_free);
+ 
+diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
+index a113d80..be2808a 100644
+--- a/arch/powerpc/sysdev/tsi108_dev.c
++++ b/arch/powerpc/sysdev/tsi108_dev.c
+@@ -66,14 +66,12 @@ EXPORT_SYMBOL(get_vir_csrbase);
+ static int __init tsi108_eth_of_init(void)
+ {
+ 	struct device_node *np;
+-	unsigned int i;
++	unsigned int i = 0;
+ 	struct platform_device *tsi_eth_dev;
+ 	struct resource res;
+ 	int ret;
+ 
+-	for (np = NULL, i = 0;
+-	     (np = of_find_compatible_node(np, "network", "tsi108-ethernet")) != NULL;
+-	     i++) {
++	for_each_compatible_node(np, "network", "tsi108-ethernet") {
+ 		struct resource r[2];
+ 		struct device_node *phy, *mdio;
+ 		hw_info tsi_eth_data;
+@@ -98,7 +96,7 @@ static int __init tsi108_eth_of_init(void)
+ 			__FUNCTION__,r[1].name, r[1].start, r[1].end);
+ 
+ 		tsi_eth_dev =
+-		    platform_device_register_simple("tsi-ethernet", i, &r[0],
++		    platform_device_register_simple("tsi-ethernet", i++, &r[0],
+ 						    1);
+ 
+ 		if (IS_ERR(tsi_eth_dev)) {
+@@ -154,6 +152,7 @@ static int __init tsi108_eth_of_init(void)
+ unreg:
+ 	platform_device_unregister(tsi_eth_dev);
+ err:
++	of_node_put(np);
+ 	return ret;
+ }
+ 
+diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
+index 847a549..625b275 100644
+--- a/arch/powerpc/sysdev/uic.c
++++ b/arch/powerpc/sysdev/uic.c
+@@ -53,21 +53,23 @@ struct uic {
+ 
+ 	/* The remapper for this UIC */
+ 	struct irq_host	*irqhost;
+-
+-	/* For secondary UICs, the cascade interrupt's irqaction */
+-	struct irqaction cascade;
+ };
+ 
+ static void uic_unmask_irq(unsigned int virq)
+ {
++	struct irq_desc *desc = get_irq_desc(virq);
+ 	struct uic *uic = get_irq_chip_data(virq);
+ 	unsigned int src = uic_irq_to_hw(virq);
+ 	unsigned long flags;
+-	u32 er;
++	u32 er, sr;
+ 
++	sr = 1 << (31-src);
+ 	spin_lock_irqsave(&uic->lock, flags);
++	/* ack level-triggered interrupts here */
++	if (desc->status & IRQ_LEVEL)
++		mtdcr(uic->dcrbase + UIC_SR, sr);
+ 	er = mfdcr(uic->dcrbase + UIC_ER);
+-	er |= 1 << (31 - src);
++	er |= sr;
+ 	mtdcr(uic->dcrbase + UIC_ER, er);
+ 	spin_unlock_irqrestore(&uic->lock, flags);
+ }
+@@ -99,6 +101,7 @@ static void uic_ack_irq(unsigned int virq)
+ 
+ static void uic_mask_ack_irq(unsigned int virq)
+ {
++	struct irq_desc *desc = get_irq_desc(virq);
+ 	struct uic *uic = get_irq_chip_data(virq);
+ 	unsigned int src = uic_irq_to_hw(virq);
+ 	unsigned long flags;
+@@ -109,7 +112,16 @@ static void uic_mask_ack_irq(unsigned int virq)
+ 	er = mfdcr(uic->dcrbase + UIC_ER);
+ 	er &= ~sr;
+ 	mtdcr(uic->dcrbase + UIC_ER, er);
+-	mtdcr(uic->dcrbase + UIC_SR, sr);
++ 	/* On the UIC, acking (i.e. clearing the SR bit)
++	 * a level irq will have no effect if the interrupt
++	 * is still asserted by the device, even if
++	 * the interrupt is already masked. Therefore
++	 * we only ack the egde interrupts here, while
++	 * level interrupts are ack'ed after the actual
++	 * isr call in the uic_unmask_irq()
++	 */
++	if (!(desc->status & IRQ_LEVEL))
++		mtdcr(uic->dcrbase + UIC_SR, sr);
+ 	spin_unlock_irqrestore(&uic->lock, flags);
+ }
+ 
+@@ -173,64 +185,6 @@ static struct irq_chip uic_irq_chip = {
+ 	.set_type	= uic_set_irq_type,
+ };
+ 
+-/**
+- *	handle_uic_irq - irq flow handler for UIC
+- *	@irq:	the interrupt number
+- *	@desc:	the interrupt description structure for this irq
+- *
+- * This is modified version of the generic handle_level_irq() suitable
+- * for the UIC.  On the UIC, acking (i.e. clearing the SR bit) a level
+- * irq will have no effect if the interrupt is still asserted by the
+- * device, even if the interrupt is already masked.  Therefore, unlike
+- * the standard handle_level_irq(), we must ack the interrupt *after*
+- * invoking the ISR (which should have de-asserted the interrupt in
+- * the external source).  For edge interrupts we ack at the beginning
+- * instead of the end, to keep the window in which we can miss an
+- * interrupt as small as possible.
+- */
+-void fastcall handle_uic_irq(unsigned int irq, struct irq_desc *desc)
+-{
+-	unsigned int cpu = smp_processor_id();
+-	struct irqaction *action;
+-	irqreturn_t action_ret;
+-
+-	spin_lock(&desc->lock);
+-	if (desc->status & IRQ_LEVEL)
+-		desc->chip->mask(irq);
+-	else
+-		desc->chip->mask_ack(irq);
+-
+-	if (unlikely(desc->status & IRQ_INPROGRESS))
+-		goto out_unlock;
+-	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+-	kstat_cpu(cpu).irqs[irq]++;
+-
+-	/*
+-	 * If its disabled or no action available
+-	 * keep it masked and get out of here
+-	 */
+-	action = desc->action;
+-	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
+-		desc->status |= IRQ_PENDING;
+-		goto out_unlock;
+-	}
+-
+-	desc->status |= IRQ_INPROGRESS;
+-	desc->status &= ~IRQ_PENDING;
+-	spin_unlock(&desc->lock);
+-
+-	action_ret = handle_IRQ_event(irq, action);
+-
+-	spin_lock(&desc->lock);
+-	desc->status &= ~IRQ_INPROGRESS;
+-	if (desc->status & IRQ_LEVEL)
+-		desc->chip->ack(irq);
+-	if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
+-		desc->chip->unmask(irq);
+-out_unlock:
+-	spin_unlock(&desc->lock);
+-}
+-
+ static int uic_host_map(struct irq_host *h, unsigned int virq,
+ 			irq_hw_number_t hw)
+ {
+@@ -239,7 +193,7 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
+ 	set_irq_chip_data(virq, uic);
+ 	/* Despite the name, handle_level_irq() works for both level
+ 	 * and edge irqs on UIC.  FIXME: check this is correct */
+-	set_irq_chip_and_handler(virq, &uic_irq_chip, handle_uic_irq);
++	set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
+ 
+ 	/* Set default irq type */
+ 	set_irq_type(virq, IRQ_TYPE_NONE);
+@@ -264,23 +218,36 @@ static struct irq_host_ops uic_host_ops = {
+ 	.xlate	= uic_host_xlate,
+ };
+ 
+-irqreturn_t uic_cascade(int virq, void *data)
++void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
+ {
+-	struct uic *uic = data;
++	struct uic *uic = get_irq_data(virq);
+ 	u32 msr;
+ 	int src;
+ 	int subvirq;
+ 
++	spin_lock(&desc->lock);
++	if (desc->status & IRQ_LEVEL)
++		desc->chip->mask(virq);
++	else
++		desc->chip->mask_ack(virq);
++	spin_unlock(&desc->lock);
++
+ 	msr = mfdcr(uic->dcrbase + UIC_MSR);
+ 	if (!msr) /* spurious interrupt */
+-		return IRQ_HANDLED;
++		goto uic_irq_ret;
+ 
+ 	src = 32 - ffs(msr);
+ 
+ 	subvirq = irq_linear_revmap(uic->irqhost, src);
+ 	generic_handle_irq(subvirq);
+ 
+-	return IRQ_HANDLED;
++uic_irq_ret:
++	spin_lock(&desc->lock);
++	if (desc->status & IRQ_LEVEL)
++		desc->chip->ack(virq);
++	if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
++		desc->chip->unmask(virq);
++	spin_unlock(&desc->lock);
+ }
+ 
+ static struct uic * __init uic_init_one(struct device_node *node)
+@@ -342,33 +309,27 @@ void __init uic_init_tree(void)
+ 	const u32 *interrupts;
+ 
+ 	/* First locate and initialize the top-level UIC */
+-
+-	np = of_find_compatible_node(NULL, NULL, "ibm,uic");
+-	while (np) {
++	for_each_compatible_node(np, NULL, "ibm,uic") {
+ 		interrupts = of_get_property(np, "interrupts", NULL);
+-		if (! interrupts)
++		if (!interrupts)
+ 			break;
+-
+-		np = of_find_compatible_node(np, NULL, "ibm,uic");
+ 	}
+ 
+ 	BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the
+ 		      * top-level interrupt controller */
+ 	primary_uic = uic_init_one(np);
+-	if (! primary_uic)
++	if (!primary_uic)
+ 		panic("Unable to initialize primary UIC %s\n", np->full_name);
+ 
+ 	irq_set_default_host(primary_uic->irqhost);
+ 	of_node_put(np);
+ 
+ 	/* The scan again for cascaded UICs */
+-	np = of_find_compatible_node(NULL, NULL, "ibm,uic");
+-	while (np) {
++	for_each_compatible_node(np, NULL, "ibm,uic") {
+ 		interrupts = of_get_property(np, "interrupts", NULL);
+ 		if (interrupts) {
+ 			/* Secondary UIC */
+ 			int cascade_virq;
+-			int ret;
+ 
+ 			uic = uic_init_one(np);
+ 			if (! uic)
+@@ -377,20 +338,11 @@ void __init uic_init_tree(void)
+ 
+ 			cascade_virq = irq_of_parse_and_map(np, 0);
+ 
+-			uic->cascade.handler = uic_cascade;
+-			uic->cascade.name = "UIC cascade";
+-			uic->cascade.dev_id = uic;
+-
+-			ret = setup_irq(cascade_virq, &uic->cascade);
+-			if (ret)
+-				printk(KERN_ERR "Failed to setup_irq(%d) for "
+-				       "UIC%d cascade\n", cascade_virq,
+-				       uic->index);
++			set_irq_data(cascade_virq, uic);
++			set_irq_chained_handler(cascade_virq, uic_irq_cascade);
+ 
+ 			/* FIXME: setup critical cascade?? */
+ 		}
+-
+-		np = of_find_compatible_node(np, NULL, "ibm,uic");
+ 	}
+ }
+ 
+diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
+index c2f17cc..ba8eea2 100644
+--- a/arch/powerpc/sysdev/xilinx_intc.c
++++ b/arch/powerpc/sysdev/xilinx_intc.c
+@@ -135,10 +135,16 @@ void __init xilinx_intc_init_tree(void)
+ 	struct device_node *np;
+ 
+ 	/* find top level interrupt controller */
+-	for_each_compatible_node(np, NULL, "xilinx,intc") {
++	for_each_compatible_node(np, NULL, "xlnx,opb-intc-1.00.c") {
+ 		if (!of_get_property(np, "interrupts", NULL))
+ 			break;
+ 	}
++	if (!np) {
++		for_each_compatible_node(np, NULL, "xlnx,xps-intc-1.00.a") {
++			if (!of_get_property(np, "interrupts", NULL))
++				break;
++		}
++	}
+ 
+ 	/* xilinx interrupt controller needs to be top level */
+ 	BUG_ON(!np);
+diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/xmon/setjmp.S
+index 96a91f1..04c0b30 100644
+--- a/arch/powerpc/xmon/setjmp.S
++++ b/arch/powerpc/xmon/setjmp.S
+@@ -12,67 +12,6 @@
+ #include <asm/ppc_asm.h>
+ #include <asm/asm-offsets.h>
+ 
+-_GLOBAL(xmon_setjmp)
+-	mflr	r0
+-	PPC_STL	r0,0(r3)
+-	PPC_STL	r1,SZL(r3)
+-	PPC_STL	r2,2*SZL(r3)
+-	mfcr	r0
+-	PPC_STL	r0,3*SZL(r3)
+-	PPC_STL	r13,4*SZL(r3)
+-	PPC_STL	r14,5*SZL(r3)
+-	PPC_STL	r15,6*SZL(r3)
+-	PPC_STL	r16,7*SZL(r3)
+-	PPC_STL	r17,8*SZL(r3)
+-	PPC_STL	r18,9*SZL(r3)
+-	PPC_STL	r19,10*SZL(r3)
+-	PPC_STL	r20,11*SZL(r3)
+-	PPC_STL	r21,12*SZL(r3)
+-	PPC_STL	r22,13*SZL(r3)
+-	PPC_STL	r23,14*SZL(r3)
+-	PPC_STL	r24,15*SZL(r3)
+-	PPC_STL	r25,16*SZL(r3)
+-	PPC_STL	r26,17*SZL(r3)
+-	PPC_STL	r27,18*SZL(r3)
+-	PPC_STL	r28,19*SZL(r3)
+-	PPC_STL	r29,20*SZL(r3)
+-	PPC_STL	r30,21*SZL(r3)
+-	PPC_STL	r31,22*SZL(r3)
+-	li	r3,0
+-	blr
+-
+-_GLOBAL(xmon_longjmp)
+-	PPC_LCMPI r4,0
+-	bne	1f
+-	li	r4,1
+-1:	PPC_LL	r13,4*SZL(r3)
+-	PPC_LL	r14,5*SZL(r3)
+-	PPC_LL	r15,6*SZL(r3)
+-	PPC_LL	r16,7*SZL(r3)
+-	PPC_LL	r17,8*SZL(r3)
+-	PPC_LL	r18,9*SZL(r3)
+-	PPC_LL	r19,10*SZL(r3)
+-	PPC_LL	r20,11*SZL(r3)
+-	PPC_LL	r21,12*SZL(r3)
+-	PPC_LL	r22,13*SZL(r3)
+-	PPC_LL	r23,14*SZL(r3)
+-	PPC_LL	r24,15*SZL(r3)
+-	PPC_LL	r25,16*SZL(r3)
+-	PPC_LL	r26,17*SZL(r3)
+-	PPC_LL	r27,18*SZL(r3)
+-	PPC_LL	r28,19*SZL(r3)
+-	PPC_LL	r29,20*SZL(r3)
+-	PPC_LL	r30,21*SZL(r3)
+-	PPC_LL	r31,22*SZL(r3)
+-	PPC_LL	r0,3*SZL(r3)
+-	mtcrf	0x38,r0
+-	PPC_LL	r0,0(r3)
+-	PPC_LL	r1,SZL(r3)
+-	PPC_LL	r2,2*SZL(r3)
+-	mtlr	r0
+-	mr	r3,r4
+-	blr
+-
+ /*
+  * Grab the register values as they are now.
+  * This won't do a particularily good job because we really
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 121b04d..a34172d 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -40,6 +40,7 @@
+ #include <asm/spu.h>
+ #include <asm/spu_priv1.h>
+ #include <asm/firmware.h>
++#include <asm/setjmp.h>
+ 
+ #ifdef CONFIG_PPC64
+ #include <asm/hvcall.h>
+@@ -71,12 +72,9 @@ static unsigned long ncsum = 4096;
+ static int termch;
+ static char tmpstr[128];
+ 
+-#define JMP_BUF_LEN	23
+ static long bus_error_jmp[JMP_BUF_LEN];
+ static int catch_memory_errors;
+ static long *xmon_fault_jmp[NR_CPUS];
+-#define setjmp xmon_setjmp
+-#define longjmp xmon_longjmp
+ 
+ /* Breakpoint stuff */
+ struct bpt {
+@@ -153,13 +151,15 @@ static const char *getvecname(unsigned long vec);
+ 
+ static int do_spu_cmd(void);
+ 
++#ifdef CONFIG_44x
++static void dump_tlb_44x(void);
++#endif
++
+ int xmon_no_auto_backtrace;
+ 
+ extern void xmon_enter(void);
+ extern void xmon_leave(void);
+ 
+-extern long setjmp(long *);
+-extern void longjmp(long *, long);
+ extern void xmon_save_regs(struct pt_regs *);
+ 
+ #ifdef CONFIG_PPC64
+@@ -231,6 +231,9 @@ Commands:\n\
+ #ifdef CONFIG_PPC_STD_MMU_32
+ "  u	dump segment registers\n"
+ #endif
++#ifdef CONFIG_44x
++"  u	dump TLB\n"
++#endif
+ "  ?	help\n"
+ "  zr	reboot\n\
+   zh	halt\n"
+@@ -856,6 +859,11 @@ cmds(struct pt_regs *excp)
+ 			dump_segments();
+ 			break;
+ #endif
++#ifdef CONFIG_4xx
++		case 'u':
++			dump_tlb_44x();
++			break;
++#endif
+ 		default:
+ 			printf("Unrecognized command: ");
+ 		        do {
+@@ -2527,16 +2535,33 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
+ static void dump_slb(void)
+ {
+ 	int i;
+-	unsigned long tmp;
++	unsigned long esid,vsid,valid;
++	unsigned long llp;
+ 
+ 	printf("SLB contents of cpu %x\n", smp_processor_id());
+ 
+-	for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+-		asm volatile("slbmfee  %0,%1" : "=r" (tmp) : "r" (i));
+-		printf("%02d %016lx ", i, tmp);
+-
+-		asm volatile("slbmfev  %0,%1" : "=r" (tmp) : "r" (i));
+-		printf("%016lx\n", tmp);
++	for (i = 0; i < mmu_slb_size; i++) {
++		asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
++		asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));
++		valid = (esid & SLB_ESID_V);
++		if (valid | esid | vsid) {
++			printf("%02d %016lx %016lx", i, esid, vsid);
++			if (valid) {
++				llp = vsid & SLB_VSID_LLP;
++				if (vsid & SLB_VSID_B_1T) {
++					printf("  1T  ESID=%9lx  VSID=%13lx LLP:%3lx \n",
++						GET_ESID_1T(esid),
++						(vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T,
++						llp);
++				} else {
++					printf(" 256M ESID=%9lx  VSID=%13lx LLP:%3lx \n",
++						GET_ESID(esid),
++						(vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT,
++						llp);
++				}
++			} else
++				printf("\n");
++		}
+ 	}
+ }
+ 
+@@ -2581,6 +2606,32 @@ void dump_segments(void)
+ }
+ #endif
+ 
++#ifdef CONFIG_44x
++static void dump_tlb_44x(void)
++{
++	int i;
++
++	for (i = 0; i < PPC44x_TLB_SIZE; i++) {
++		unsigned long w0,w1,w2;
++		asm volatile("tlbre  %0,%1,0" : "=r" (w0) : "r" (i));
++		asm volatile("tlbre  %0,%1,1" : "=r" (w1) : "r" (i));
++		asm volatile("tlbre  %0,%1,2" : "=r" (w2) : "r" (i));
++		printf("[%02x] %08x %08x %08x ", i, w0, w1, w2);
++		if (w0 & PPC44x_TLB_VALID) {
++			printf("V %08x -> %01x%08x %c%c%c%c%c",
++			       w0 & PPC44x_TLB_EPN_MASK,
++			       w1 & PPC44x_TLB_ERPN_MASK,
++			       w1 & PPC44x_TLB_RPN_MASK,
++			       (w2 & PPC44x_TLB_W) ? 'W' : 'w',
++			       (w2 & PPC44x_TLB_I) ? 'I' : 'i',
++			       (w2 & PPC44x_TLB_M) ? 'M' : 'm',
++			       (w2 & PPC44x_TLB_G) ? 'G' : 'g',
++			       (w2 & PPC44x_TLB_E) ? 'E' : 'e');
++		}
++		printf("\n");
++	}
++}
++#endif /* CONFIG_44x */
+ void xmon_init(int enable)
+ {
+ #ifdef CONFIG_PPC_ISERIES
+diff --git a/arch/ppc/8260_io/enet.c b/arch/ppc/8260_io/enet.c
+index 615b658..25ef55b 100644
+--- a/arch/ppc/8260_io/enet.c
++++ b/arch/ppc/8260_io/enet.c
+@@ -10,7 +10,7 @@
+  * This version of the driver is somewhat selectable for the different
+  * processor/board combinations.  It works for the boards I know about
+  * now, and should be easily modified to include others.  Some of the
+- * configuration information is contained in <asm/commproc.h> and the
++ * configuration information is contained in <asm/cpm1.h> and the
+  * remainder is here.
+  *
+  * Buffer descriptors are kept in the CPM dual port RAM, and the frame
+@@ -272,7 +272,7 @@ scc_enet_timeout(struct net_device *dev)
+  * This is called from the CPM handler, not the MPC core interrupt.
+  */
+ static irqreturn_t
+-scc_enet_interrupt(int irq, void * dev_id)
++scc_enet_interrupt(int irq, void *dev_id)
+ {
+ 	struct	net_device *dev = dev_id;
+ 	volatile struct	scc_enet_private *cep;
+@@ -280,7 +280,7 @@ scc_enet_interrupt(int irq, void * dev_id)
+ 	ushort	int_events;
+ 	int	must_restart;
+ 
+-	cep = (struct scc_enet_private *)dev->priv;
++	cep = dev->priv;
+ 
+ 	/* Get the interrupt events that caused us to be here.
+ 	*/
+diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
+index 6f3ed6a..a3a27da 100644
+--- a/arch/ppc/8260_io/fcc_enet.c
++++ b/arch/ppc/8260_io/fcc_enet.c
+@@ -524,7 +524,7 @@ fcc_enet_timeout(struct net_device *dev)
+ 
+ /* The interrupt handler. */
+ static irqreturn_t
+-fcc_enet_interrupt(int irq, void * dev_id)
++fcc_enet_interrupt(int irq, void *dev_id)
+ {
+ 	struct	net_device *dev = dev_id;
+ 	volatile struct	fcc_enet_private *cep;
+@@ -532,7 +532,7 @@ fcc_enet_interrupt(int irq, void * dev_id)
+ 	ushort	int_events;
+ 	int	must_restart;
+ 
+-	cep = (struct fcc_enet_private *)dev->priv;
++	cep = dev->priv;
+ 
+ 	/* Get the interrupt events that caused us to be here.
+ 	*/
+diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c
+index 9da880b..9d656de 100644
+--- a/arch/ppc/8xx_io/commproc.c
++++ b/arch/ppc/8xx_io/commproc.c
+@@ -34,7 +34,7 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/8xx_immap.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #include <asm/io.h>
+ #include <asm/tlbflush.h>
+ #include <asm/rheap.h>
+@@ -55,8 +55,6 @@
+ })
+ 
+ static void m8xx_cpm_dpinit(void);
+-static	uint	host_buffer;	/* One page of host buffer */
+-static	uint	host_end;	/* end + 1 */
+ cpm8xx_t	*cpmp;		/* Pointer to comm processor space */
+ 
+ /* CPM interrupt vector functions.
+@@ -68,7 +66,6 @@ struct	cpm_action {
+ static	struct	cpm_action cpm_vecs[CPMVEC_NR];
+ static	irqreturn_t cpm_interrupt(int irq, void * dev);
+ static	irqreturn_t cpm_error_interrupt(int irq, void *dev);
+-static	void	alloc_host_memory(void);
+ /* Define a table of names to identify CPM interrupt handlers in
+  * /proc/interrupts.
+  */
+@@ -158,21 +155,6 @@ m8xx_cpm_reset(void)
+ 	cpmp = (cpm8xx_t *)commproc;
+ }
+ 
+-/* We used to do this earlier, but have to postpone as long as possible
+- * to ensure the kernel VM is now running.
+- */
+-static void
+-alloc_host_memory(void)
+-{
+-	dma_addr_t	physaddr;
+-
+-	/* Set the host page for allocation.
+-	*/
+-	host_buffer = (uint)dma_alloc_coherent(NULL, PAGE_SIZE, &physaddr,
+-			GFP_KERNEL);
+-	host_end = host_buffer + PAGE_SIZE;
+-}
+-
+ /* This is called during init_IRQ.  We used to do it above, but this
+  * was too early since init_IRQ was not yet called.
+  */
+@@ -319,26 +301,6 @@ cpm_free_handler(int cpm_vec)
+ 	cpm_vecs[cpm_vec].dev_id = NULL;
+ }
+ 
+-/* We also own one page of host buffer space for the allocation of
+- * UART "fifos" and the like.
+- */
+-uint
+-m8xx_cpm_hostalloc(uint size)
+-{
+-	uint	retloc;
+-
+-	if (host_buffer == 0)
+-		alloc_host_memory();
+-
+-	if ((host_buffer + size) >= host_end)
+-		return(0);
+-
+-	retloc = host_buffer;
+-	host_buffer += size;
+-
+-	return(retloc);
+-}
+-
+ /* Set a baud rate generator.  This needs lots of work.  There are
+  * four BRGs, any of which can be wired to any channel.
+  * The internal baud rate clock is the system clock divided by 16.
+diff --git a/arch/ppc/8xx_io/enet.c b/arch/ppc/8xx_io/enet.c
+index eace3bc..c6d047a 100644
+--- a/arch/ppc/8xx_io/enet.c
++++ b/arch/ppc/8xx_io/enet.c
+@@ -8,7 +8,7 @@
+  * This version of the driver is somewhat selectable for the different
+  * processor/board combinations.  It works for the boards I know about
+  * now, and should be easily modified to include others.  Some of the
+- * configuration information is contained in <asm/commproc.h> and the
++ * configuration information is contained in <asm/cpm1.h> and the
+  * remainder is here.
+  *
+  * Buffer descriptors are kept in the CPM dual port RAM, and the frame
+@@ -43,7 +43,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/mpc8xx.h>
+ #include <asm/uaccess.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #include <asm/cacheflush.h>
+ 
+ /*
+@@ -80,7 +80,7 @@
+  * programming documents for details unique to your board.
+  *
+  * For the TQM8xx(L) modules, there is no control register interface.
+- * All functions are directly controlled using I/O pins.  See <asm/commproc.h>.
++ * All functions are directly controlled using I/O pins.  See <asm/cpm1.h>.
+  */
+ 
+ /* The transmitter timeout
+diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
+index 0288279..11b0aa6 100644
+--- a/arch/ppc/8xx_io/fec.c
++++ b/arch/ppc/8xx_io/fec.c
+@@ -53,7 +53,7 @@
+ #include <asm/mpc8xx.h>
+ #include <asm/irq.h>
+ #include <asm/uaccess.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ #ifdef	CONFIG_USE_MDIO
+ /* Forward declarations of some structures to support different PHYs
+diff --git a/arch/ppc/8xx_io/micropatch.c b/arch/ppc/8xx_io/micropatch.c
+index cfad46b..9a5d95d 100644
+--- a/arch/ppc/8xx_io/micropatch.c
++++ b/arch/ppc/8xx_io/micropatch.c
+@@ -16,7 +16,7 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/8xx_immap.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ /*
+  * I2C/SPI relocation patch arrays.
+diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
+index 6473fa7..db5934c 100644
+--- a/arch/ppc/Kconfig
++++ b/arch/ppc/Kconfig
+@@ -78,18 +78,18 @@ choice
+ 	default 6xx
+ 
+ config 6xx
+-	bool "6xx/7xx/74xx/52xx/82xx/83xx"
++	bool "6xx/7xx/74xx/52xx/82xx"
+ 	select PPC_FPU
+ 	help
+ 	  There are four types of PowerPC chips supported.  The more common
+ 	  types (601, 603, 604, 740, 750, 7400), the older Freescale
+ 	  (formerly Motorola) embedded versions (821, 823, 850, 855, 860,
+-	  52xx, 82xx, 83xx), the IBM embedded versions (403 and 405) and
++	  52xx, 82xx), the IBM embedded versions (403 and 405) and
+ 	  the Book E embedded processors from IBM (44x) and Freescale (85xx).
+ 	  For support for 64-bit processors, set ARCH=powerpc.
+ 	  Unless you are building a kernel for one of the embedded processor
+ 	  systems, choose 6xx.
+-	  Also note that because the 52xx, 82xx, & 83xx family have a 603e
++	  Also note that because the 52xx, 82xx family have a 603e
+ 	  core, specific support for that chipset is asked later on.
+ 
+ config 40x
+@@ -104,12 +104,6 @@ config 8xx
+ 	bool "8xx"
+ 	select PPC_LIB_RHEAP
+ 
+-config E200
+-	bool "e200"
+-
+-config E500
+-	bool "e500"
+-
+ endchoice
+ 
+ config PPC_FPU
+@@ -124,25 +118,14 @@ config PPC_DCR
+ 	depends on PPC_DCR_NATIVE
+ 	default y
+ 
+-config BOOKE
+-	bool
+-	depends on E200 || E500
+-	default y
+-
+-config FSL_BOOKE
+-	bool
+-	depends on E200 || E500
+-	default y
+-
+ config PTE_64BIT
+ 	bool
+-	depends on 44x || E500
++	depends on 44x
+ 	default y if 44x
+-	default y if E500 && PHYS_64BIT
+ 
+ config PHYS_64BIT
+-	bool 'Large physical address support' if E500
+-	depends on 44x || E500
++	bool
++	depends on 44x
+ 	default y if 44x
+ 	---help---
+ 	  This option enables kernel support for larger than 32-bit physical
+@@ -153,7 +136,7 @@ config PHYS_64BIT
+ config ALTIVEC
+ 	bool "AltiVec Support"
+ 	depends on 6xx
+-	depends on !8260 && !83xx
++	depends on !8260
+ 	---help---
+ 	  This option enables kernel support for the Altivec extensions to the
+ 	  PowerPC processor. The kernel currently supports saving and restoring
+@@ -167,24 +150,9 @@ config ALTIVEC
+ 
+ 	  If in doubt, say Y here.
+ 
+-config SPE
+-	bool "SPE Support"
+-	depends on E200 || E500
+-	---help---
+-	  This option enables kernel support for the Signal Processing
+-	  Extensions (SPE) to the PowerPC processor. The kernel currently
+-	  supports saving and restoring SPE registers, and turning on the
+-	  'spe enable' bit so user processes can execute SPE instructions.
+-
+-	  This option is only useful if you have a processor that supports
+-	  SPE (e500, otherwise known as 85xx series), but does not have any
+-	  effect on a non-spe cpu (it does, however add code to the kernel).
+-
+-	  If in doubt, say Y here.
+-
+ config TAU
+ 	bool "Thermal Management Support"
+-	depends on 6xx && !8260 && !83xx
++	depends on 6xx && !8260
+ 	help
+ 	  G3 and G4 processors have an on-chip temperature sensor called the
+ 	  'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die
+@@ -228,7 +196,7 @@ config TAU_AVERAGE
+ 
+ config MATH_EMULATION
+ 	bool "Math emulation"
+-	depends on 4xx || 8xx || E200 || E500
++	depends on 4xx || 8xx
+ 	---help---
+ 	  Some PowerPC chips designed for embedded applications do not have
+ 	  a floating-point unit and therefore do not implement the
+@@ -279,7 +247,6 @@ config PPC601_SYNC_FIX
+ 	  If in doubt, say Y here.
+ 
+ source arch/ppc/platforms/4xx/Kconfig
+-source arch/ppc/platforms/85xx/Kconfig
+ 
+ config PPC_STD_MMU
+ 	bool
+@@ -288,7 +255,7 @@ config PPC_STD_MMU
+ 
+ config NOT_COHERENT_CACHE
+ 	bool
+-	depends on 4xx || 8xx || E200
++	depends on 4xx || 8xx
+ 	default y
+ 
+ endmenu
+@@ -721,16 +688,6 @@ config LITE5200B
+ 	  Support for the LITE5200B dev board for the MPC5200 from Freescale.
+ 	  This is the new board with 2 PCI slots.
+ 
+-config MPC834x_SYS
+-	bool "Freescale MPC834x SYS"
+-	help
+-	  This option enables support for the MPC 834x SYS evaluation board.
+-
+-	  Be aware that PCI buses can only function when SYS board is plugged
+-	  into the PIB (Platform IO Board) board from Freescale which provide
+-	  3 PCI slots.  The PIBs PCI initialization is the bootloader's
+-	  responsibility.
+-
+ config EV64360
+ 	bool "Marvell-EV64360BP"
+ 	help
+@@ -774,18 +731,6 @@ config 8272
+ 	  The MPC8272 CPM has a different internal dpram setup than other CPM2
+ 	  devices
+ 
+-config 83xx
+-	bool
+-	default y if MPC834x_SYS
+-
+-config MPC834x
+-	bool
+-	default y if MPC834x_SYS
+-
+-config PPC_83xx
+-	bool
+-	default y if 83xx
+-
+ config CPM1
+ 	bool
+ 	depends on 8xx
+@@ -811,8 +756,7 @@ config PPC_GEN550
+ 	bool
+ 	depends on SANDPOINT || SPRUCE || PPLUS || \
+ 		PRPMC750 || PRPMC800 || LOPEC || \
+-		(EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
+-		83xx
++		(EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D
+ 	default y
+ 
+ config FORCE
+@@ -1068,13 +1012,13 @@ config GENERIC_ISA_DMA
+ 
+ config PPC_I8259
+ 	bool
+-	default y if 85xx || PPC_PREP
++	default y if PPC_PREP
+ 	default n
+ 
+ config PPC_INDIRECT_PCI
+ 	bool
+ 	depends on PCI
+-	default y if 40x || 44x || 85xx || 83xx || PPC_PREP
++	default y if 40x || 44x || PPC_PREP
+ 	default n
+ 
+ config EISA
+@@ -1091,8 +1035,8 @@ config MCA
+ 	bool
+ 
+ config PCI
+-	bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx
+-	default y if !40x && !CPM2 && !8xx && !83xx && !85xx
++	bool "PCI support" if 40x || CPM2 || PPC_MPC52xx
++	default y if !40x && !CPM2 && !8xx
+ 	default PCI_QSPAN if !4xx && !CPM2 && 8xx
+ 	help
+ 	  Find out whether your system includes a PCI bus. PCI is the name of
+@@ -1106,11 +1050,6 @@ config PCI_DOMAINS
+ config PCI_SYSCALL
+ 	def_bool PCI
+ 
+-config MPC83xx_PCI2
+-	bool "Support for 2nd PCI host controller"
+-	depends on PCI && MPC834x
+-	default y if MPC834x_SYS
+-
+ config PCI_QSPAN
+ 	bool "QSpan PCI"
+ 	depends on !4xx && !CPM2 && 8xx
+diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
+index 95894ef..8df7f0e 100644
+--- a/arch/ppc/Makefile
++++ b/arch/ppc/Makefile
+@@ -36,14 +36,8 @@ LINUXINCLUDE    += -Iarch/$(ARCH)/include
+ 
+ CHECKFLAGS	+= -D__powerpc__
+ 
+-ifndef CONFIG_FSL_BOOKE
+-KBUILD_CFLAGS	+= -mstring
+-endif
+-
+ cpu-as-$(CONFIG_4xx)		+= -Wa,-m405
+ cpu-as-$(CONFIG_6xx)		+= -Wa,-maltivec
+-cpu-as-$(CONFIG_E500)		+= -Wa,-me500
+-cpu-as-$(CONFIG_E200)		+= -Wa,-me200
+ 
+ KBUILD_AFLAGS += $(cpu-as-y)
+ KBUILD_CFLAGS += $(cpu-as-y)
+@@ -55,7 +49,6 @@ head-y				:= arch/ppc/kernel/head.o
+ head-$(CONFIG_8xx)		:= arch/ppc/kernel/head_8xx.o
+ head-$(CONFIG_4xx)		:= arch/ppc/kernel/head_4xx.o
+ head-$(CONFIG_44x)		:= arch/ppc/kernel/head_44x.o
+-head-$(CONFIG_FSL_BOOKE)	:= arch/ppc/kernel/head_fsl_booke.o
+ 
+ head-$(CONFIG_PPC_FPU)		+= arch/powerpc/kernel/fpu.o
+ 
+@@ -65,8 +58,6 @@ core-y				+= arch/ppc/kernel/ arch/powerpc/kernel/ \
+ 				   arch/ppc/syslib/ arch/powerpc/sysdev/ \
+ 				   arch/powerpc/lib/
+ core-$(CONFIG_4xx)		+= arch/ppc/platforms/4xx/
+-core-$(CONFIG_83xx)		+= arch/ppc/platforms/83xx/
+-core-$(CONFIG_85xx)		+= arch/ppc/platforms/85xx/
+ core-$(CONFIG_MATH_EMULATION)	+= arch/powerpc/math-emu/
+ core-$(CONFIG_XMON)		+= arch/ppc/xmon/
+ drivers-$(CONFIG_8xx)		+= arch/ppc/8xx_io/
+diff --git a/arch/ppc/boot/simple/iic.c b/arch/ppc/boot/simple/iic.c
+index e4efd83..5e91489 100644
+--- a/arch/ppc/boot/simple/iic.c
++++ b/arch/ppc/boot/simple/iic.c
+@@ -5,7 +5,7 @@
+ #include <linux/types.h>
+ #include <asm/uaccess.h>
+ #include <asm/mpc8xx.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ 
+ /* IIC functions.
+diff --git a/arch/ppc/boot/simple/m8xx_tty.c b/arch/ppc/boot/simple/m8xx_tty.c
+index ea615d8..f28924e 100644
+--- a/arch/ppc/boot/simple/m8xx_tty.c
++++ b/arch/ppc/boot/simple/m8xx_tty.c
+@@ -11,7 +11,7 @@
+ #include <linux/types.h>
+ #include <asm/uaccess.h>
+ #include <asm/mpc8xx.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ #ifdef CONFIG_MBX
+ #define MBX_CSR1	((volatile u_char *)0xfa100000)
+diff --git a/arch/ppc/configs/TQM8540_defconfig b/arch/ppc/configs/TQM8540_defconfig
+deleted file mode 100644
+index f33f0e7..0000000
+--- a/arch/ppc/configs/TQM8540_defconfig
++++ /dev/null
+@@ -1,973 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.15-rc2
+-# Fri Nov 25 17:26:50 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_INITRAMFS_SOURCE=""
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Block layer
+-#
+-# CONFIG_LBD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-CONFIG_DEFAULT_AS=y
+-# CONFIG_DEFAULT_DEADLINE is not set
+-# CONFIG_DEFAULT_CFQ is not set
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="anticipatory"
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-# CONFIG_E200 is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-# CONFIG_PHYS_64BIT is not set
+-CONFIG_SPE=y
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_KEXEC is not set
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_WANT_EARLY_SERIAL is not set
+-CONFIG_PPC_GEN550=y
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-# CONFIG_MPC8540_ADS is not set
+-# CONFIG_MPC8548_CDS is not set
+-# CONFIG_MPC8555_CDS is not set
+-# CONFIG_MPC8560_ADS is not set
+-# CONFIG_SBC8560 is not set
+-# CONFIG_STX_GP3 is not set
+-CONFIG_TQM8540=y
+-# CONFIG_TQM8541 is not set
+-# CONFIG_TQM8555 is not set
+-# CONFIG_TQM8560 is not set
+-CONFIG_MPC8540=y
+-
+-#
+-# Platform options
+-#
+-# CONFIG_HIGHMEM is not set
+-# CONFIG_HZ_100 is not set
+-CONFIG_HZ_250=y
+-# CONFIG_HZ_1000 is not set
+-CONFIG_HZ=250
+-CONFIG_PREEMPT_NONE=y
+-# CONFIG_PREEMPT_VOLUNTARY is not set
+-# CONFIG_PREEMPT is not set
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-# CONFIG_PM is not set
+-# CONFIG_HIBERNATION is not set
+-CONFIG_SECCOMP=y
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_PPC_I8259=y
+-CONFIG_PPC_INDIRECT_PCI=y
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_PCI_LEGACY_PROC is not set
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-# CONFIG_RAPIDIO is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_BIC=y
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# DCCP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_DCCP is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_IEEE80211 is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Connector - unified userspace <-> kernelspace linker
+-#
+-# CONFIG_CONNECTOR is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-CONFIG_MTD=y
+-# CONFIG_MTD_DEBUG is not set
+-CONFIG_MTD_CONCAT=y
+-CONFIG_MTD_PARTITIONS=y
+-# CONFIG_MTD_REDBOOT_PARTS is not set
+-CONFIG_MTD_CMDLINE_PARTS=y
+-
+-#
+-# User Modules And Translation Layers
+-#
+-CONFIG_MTD_CHAR=y
+-CONFIG_MTD_BLOCK=y
+-# CONFIG_FTL is not set
+-# CONFIG_NFTL is not set
+-# CONFIG_INFTL is not set
+-# CONFIG_RFD_FTL is not set
+-
+-#
+-# RAM/ROM/Flash chip drivers
+-#
+-CONFIG_MTD_CFI=y
+-# CONFIG_MTD_JEDECPROBE is not set
+-CONFIG_MTD_GEN_PROBE=y
+-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+-CONFIG_MTD_MAP_BANK_WIDTH_1=y
+-CONFIG_MTD_MAP_BANK_WIDTH_2=y
+-CONFIG_MTD_MAP_BANK_WIDTH_4=y
+-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+-CONFIG_MTD_CFI_I1=y
+-CONFIG_MTD_CFI_I2=y
+-# CONFIG_MTD_CFI_I4 is not set
+-# CONFIG_MTD_CFI_I8 is not set
+-# CONFIG_MTD_CFI_INTELEXT is not set
+-CONFIG_MTD_CFI_AMDSTD=y
+-CONFIG_MTD_CFI_AMDSTD_RETRY=0
+-# CONFIG_MTD_CFI_STAA is not set
+-CONFIG_MTD_CFI_UTIL=y
+-# CONFIG_MTD_RAM is not set
+-# CONFIG_MTD_ROM is not set
+-# CONFIG_MTD_ABSENT is not set
+-
+-#
+-# Mapping drivers for chip access
+-#
+-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+-# CONFIG_MTD_PHYSMAP is not set
+-CONFIG_MTD_TQM85xx=y
+-# CONFIG_MTD_PLATRAM is not set
+-
+-#
+-# Self-contained MTD device drivers
+-#
+-# CONFIG_MTD_PMC551 is not set
+-# CONFIG_MTD_SLRAM is not set
+-# CONFIG_MTD_PHRAM is not set
+-# CONFIG_MTD_MTDRAM is not set
+-# CONFIG_MTD_BLKMTD is not set
+-# CONFIG_MTD_BLOCK2MTD is not set
+-
+-#
+-# Disk-On-Chip Device Drivers
+-#
+-# CONFIG_MTD_DOC2000 is not set
+-# CONFIG_MTD_DOC2001 is not set
+-# CONFIG_MTD_DOC2001PLUS is not set
+-
+-#
+-# NAND Flash Device Drivers
+-#
+-# CONFIG_MTD_NAND is not set
+-
+-#
+-# OneNAND Flash Device Drivers
+-#
+-# CONFIG_MTD_ONENAND is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-# CONFIG_CDROM_PKTCDVD is not set
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-CONFIG_IDE=y
+-CONFIG_BLK_DEV_IDE=y
+-
+-#
+-# Please see Documentation/ide.txt for help/info on IDE drives
+-#
+-# CONFIG_BLK_DEV_IDE_SATA is not set
+-CONFIG_BLK_DEV_IDEDISK=y
+-# CONFIG_IDEDISK_MULTI_MODE is not set
+-# CONFIG_BLK_DEV_IDECD is not set
+-# CONFIG_BLK_DEV_IDETAPE is not set
+-# CONFIG_BLK_DEV_IDEFLOPPY is not set
+-# CONFIG_IDE_TASK_IOCTL is not set
+-
+-#
+-# IDE chipset support/bugfixes
+-#
+-CONFIG_IDE_GENERIC=y
+-CONFIG_BLK_DEV_IDEPCI=y
+-CONFIG_IDEPCI_SHARE_IRQ=y
+-# CONFIG_BLK_DEV_OFFBOARD is not set
+-CONFIG_BLK_DEV_GENERIC=y
+-# CONFIG_BLK_DEV_OPTI621 is not set
+-# CONFIG_BLK_DEV_SL82C105 is not set
+-CONFIG_BLK_DEV_IDEDMA_PCI=y
+-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+-CONFIG_IDEDMA_PCI_AUTO=y
+-# CONFIG_IDEDMA_ONLYDISK is not set
+-# CONFIG_BLK_DEV_AEC62XX is not set
+-# CONFIG_BLK_DEV_ALI15X3 is not set
+-# CONFIG_BLK_DEV_AMD74XX is not set
+-# CONFIG_BLK_DEV_CMD64X is not set
+-# CONFIG_BLK_DEV_TRIFLEX is not set
+-# CONFIG_BLK_DEV_CY82C693 is not set
+-# CONFIG_BLK_DEV_CS5520 is not set
+-# CONFIG_BLK_DEV_CS5530 is not set
+-# CONFIG_BLK_DEV_HPT34X is not set
+-# CONFIG_BLK_DEV_HPT366 is not set
+-# CONFIG_BLK_DEV_SC1200 is not set
+-# CONFIG_BLK_DEV_PIIX is not set
+-# CONFIG_BLK_DEV_IT821X is not set
+-# CONFIG_BLK_DEV_NS87415 is not set
+-# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+-# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+-# CONFIG_BLK_DEV_SVWKS is not set
+-# CONFIG_BLK_DEV_SIIMAGE is not set
+-# CONFIG_BLK_DEV_SLC90E66 is not set
+-# CONFIG_BLK_DEV_TRM290 is not set
+-CONFIG_BLK_DEV_VIA82CXXX=y
+-# CONFIG_IDE_ARM is not set
+-CONFIG_BLK_DEV_IDEDMA=y
+-# CONFIG_IDEDMA_IVB is not set
+-CONFIG_IDEDMA_AUTO=y
+-# CONFIG_BLK_DEV_HD is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-# CONFIG_WINDFARM is not set
+-
+-#
+-# Network device support
+-#
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# PHY device support
+-#
+-CONFIG_PHYLIB=y
+-
+-#
+-# MII PHY device drivers
+-#
+-# CONFIG_MARVELL_PHY is not set
+-# CONFIG_DAVICOM_PHY is not set
+-# CONFIG_QSEMI_PHY is not set
+-# CONFIG_LXT_PHY is not set
+-# CONFIG_CICADA_PHY is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_CASSINI is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-CONFIG_NET_PCI=y
+-# CONFIG_PCNET32 is not set
+-# CONFIG_AMD8111_ETH is not set
+-# CONFIG_ADAPTEC_STARFIRE is not set
+-# CONFIG_B44 is not set
+-# CONFIG_FORCEDETH is not set
+-# CONFIG_DGRS is not set
+-# CONFIG_EEPRO100 is not set
+-CONFIG_E100=y
+-# CONFIG_FEALNX is not set
+-# CONFIG_NATSEMI is not set
+-# CONFIG_NE2K_PCI is not set
+-# CONFIG_8139CP is not set
+-# CONFIG_8139TOO is not set
+-# CONFIG_SIS900 is not set
+-# CONFIG_EPIC100 is not set
+-# CONFIG_SUNDANCE is not set
+-# CONFIG_TLAN is not set
+-# CONFIG_VIA_RHINE is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SIS190 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_VIA_VELOCITY is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_CHELSIO_T1 is not set
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y
+-CONFIG_SERIAL_8250_NR_UARTS=4
+-# CONFIG_SERIAL_8250_EXTENDED is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-# CONFIG_TELCLOCK is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Miscellaneous I2C Chip support
+-#
+-CONFIG_SENSORS_DS1337=y
+-# CONFIG_SENSORS_DS1374 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCA9539 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_SENSORS_MAX6875 is not set
+-# CONFIG_RTC_X1205_I2C is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Hardware Monitoring support
+-#
+-CONFIG_HWMON=y
+-# CONFIG_HWMON_VID is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ADM9240 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_ATXP1 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-CONFIG_SENSORS_LM75=y
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83792D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-# CONFIG_SENSORS_W83627EHF is not set
+-CONFIG_HWMON_DEBUG_CHIP=y
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia Capabilities Port drivers
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# SN Devices
+-#
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-CONFIG_INOTIFY=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-# CONFIG_RELAYFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_JFFS_FS is not set
+-CONFIG_JFFS2_FS=y
+-CONFIG_JFFS2_FS_DEBUG=0
+-CONFIG_JFFS2_FS_WRITEBUFFER=y
+-# CONFIG_JFFS2_SUMMARY is not set
+-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+-CONFIG_JFFS2_ZLIB=y
+-CONFIG_JFFS2_RTIME=y
+-# CONFIG_JFFS2_RUBIN is not set
+-CONFIG_CRAMFS=y
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-# CONFIG_9P_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_ZLIB_INFLATE=y
+-CONFIG_ZLIB_DEFLATE=y
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_SERIAL_TEXT_DEBUG is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/TQM8541_defconfig b/arch/ppc/configs/TQM8541_defconfig
+deleted file mode 100644
+index e00cd62..0000000
+--- a/arch/ppc/configs/TQM8541_defconfig
++++ /dev/null
+@@ -1,986 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.15-rc2
+-# Wed Nov 30 13:36:28 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_INITRAMFS_SOURCE=""
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Block layer
+-#
+-# CONFIG_LBD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-CONFIG_DEFAULT_AS=y
+-# CONFIG_DEFAULT_DEADLINE is not set
+-# CONFIG_DEFAULT_CFQ is not set
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="anticipatory"
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-# CONFIG_E200 is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-# CONFIG_PHYS_64BIT is not set
+-CONFIG_SPE=y
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_KEXEC is not set
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_WANT_EARLY_SERIAL is not set
+-CONFIG_PPC_GEN550=y
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-# CONFIG_MPC8540_ADS is not set
+-# CONFIG_MPC8548_CDS is not set
+-# CONFIG_MPC8555_CDS is not set
+-# CONFIG_MPC8560_ADS is not set
+-# CONFIG_SBC8560 is not set
+-# CONFIG_STX_GP3 is not set
+-# CONFIG_TQM8540 is not set
+-CONFIG_TQM8541=y
+-# CONFIG_TQM8555 is not set
+-# CONFIG_TQM8560 is not set
+-CONFIG_MPC8555=y
+-
+-#
+-# Platform options
+-#
+-CONFIG_CPM2=y
+-# CONFIG_PC_KEYBOARD is not set
+-# CONFIG_HIGHMEM is not set
+-# CONFIG_HZ_100 is not set
+-CONFIG_HZ_250=y
+-# CONFIG_HZ_1000 is not set
+-CONFIG_HZ=250
+-CONFIG_PREEMPT_NONE=y
+-# CONFIG_PREEMPT_VOLUNTARY is not set
+-# CONFIG_PREEMPT is not set
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-# CONFIG_PM is not set
+-# CONFIG_HIBERNATION is not set
+-CONFIG_SECCOMP=y
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_PPC_I8259=y
+-CONFIG_PPC_INDIRECT_PCI=y
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_PCI_LEGACY_PROC is not set
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_BIC=y
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# DCCP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_DCCP is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_IEEE80211 is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Connector - unified userspace <-> kernelspace linker
+-#
+-# CONFIG_CONNECTOR is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-CONFIG_MTD=y
+-# CONFIG_MTD_DEBUG is not set
+-CONFIG_MTD_CONCAT=y
+-CONFIG_MTD_PARTITIONS=y
+-# CONFIG_MTD_REDBOOT_PARTS is not set
+-CONFIG_MTD_CMDLINE_PARTS=y
+-
+-#
+-# User Modules And Translation Layers
+-#
+-CONFIG_MTD_CHAR=y
+-CONFIG_MTD_BLOCK=y
+-# CONFIG_FTL is not set
+-# CONFIG_NFTL is not set
+-# CONFIG_INFTL is not set
+-# CONFIG_RFD_FTL is not set
+-
+-#
+-# RAM/ROM/Flash chip drivers
+-#
+-CONFIG_MTD_CFI=y
+-# CONFIG_MTD_JEDECPROBE is not set
+-CONFIG_MTD_GEN_PROBE=y
+-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+-CONFIG_MTD_MAP_BANK_WIDTH_1=y
+-CONFIG_MTD_MAP_BANK_WIDTH_2=y
+-CONFIG_MTD_MAP_BANK_WIDTH_4=y
+-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+-CONFIG_MTD_CFI_I1=y
+-CONFIG_MTD_CFI_I2=y
+-# CONFIG_MTD_CFI_I4 is not set
+-# CONFIG_MTD_CFI_I8 is not set
+-# CONFIG_MTD_CFI_INTELEXT is not set
+-CONFIG_MTD_CFI_AMDSTD=y
+-CONFIG_MTD_CFI_AMDSTD_RETRY=0
+-# CONFIG_MTD_CFI_STAA is not set
+-CONFIG_MTD_CFI_UTIL=y
+-# CONFIG_MTD_RAM is not set
+-# CONFIG_MTD_ROM is not set
+-# CONFIG_MTD_ABSENT is not set
+-
+-#
+-# Mapping drivers for chip access
+-#
+-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+-# CONFIG_MTD_PHYSMAP is not set
+-CONFIG_MTD_TQM85xx=y
+-# CONFIG_MTD_PLATRAM is not set
+-
+-#
+-# Self-contained MTD device drivers
+-#
+-# CONFIG_MTD_PMC551 is not set
+-# CONFIG_MTD_SLRAM is not set
+-# CONFIG_MTD_PHRAM is not set
+-# CONFIG_MTD_MTDRAM is not set
+-# CONFIG_MTD_BLKMTD is not set
+-# CONFIG_MTD_BLOCK2MTD is not set
+-
+-#
+-# Disk-On-Chip Device Drivers
+-#
+-# CONFIG_MTD_DOC2000 is not set
+-# CONFIG_MTD_DOC2001 is not set
+-# CONFIG_MTD_DOC2001PLUS is not set
+-
+-#
+-# NAND Flash Device Drivers
+-#
+-# CONFIG_MTD_NAND is not set
+-
+-#
+-# OneNAND Flash Device Drivers
+-#
+-# CONFIG_MTD_ONENAND is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-# CONFIG_CDROM_PKTCDVD is not set
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-CONFIG_IDE=y
+-CONFIG_BLK_DEV_IDE=y
+-
+-#
+-# Please see Documentation/ide.txt for help/info on IDE drives
+-#
+-# CONFIG_BLK_DEV_IDE_SATA is not set
+-CONFIG_BLK_DEV_IDEDISK=y
+-# CONFIG_IDEDISK_MULTI_MODE is not set
+-# CONFIG_BLK_DEV_IDECD is not set
+-# CONFIG_BLK_DEV_IDETAPE is not set
+-# CONFIG_BLK_DEV_IDEFLOPPY is not set
+-# CONFIG_IDE_TASK_IOCTL is not set
+-
+-#
+-# IDE chipset support/bugfixes
+-#
+-CONFIG_IDE_GENERIC=y
+-CONFIG_BLK_DEV_IDEPCI=y
+-CONFIG_IDEPCI_SHARE_IRQ=y
+-# CONFIG_BLK_DEV_OFFBOARD is not set
+-CONFIG_BLK_DEV_GENERIC=y
+-# CONFIG_BLK_DEV_OPTI621 is not set
+-# CONFIG_BLK_DEV_SL82C105 is not set
+-CONFIG_BLK_DEV_IDEDMA_PCI=y
+-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+-CONFIG_IDEDMA_PCI_AUTO=y
+-# CONFIG_IDEDMA_ONLYDISK is not set
+-# CONFIG_BLK_DEV_AEC62XX is not set
+-# CONFIG_BLK_DEV_ALI15X3 is not set
+-# CONFIG_BLK_DEV_AMD74XX is not set
+-# CONFIG_BLK_DEV_CMD64X is not set
+-# CONFIG_BLK_DEV_TRIFLEX is not set
+-# CONFIG_BLK_DEV_CY82C693 is not set
+-# CONFIG_BLK_DEV_CS5520 is not set
+-# CONFIG_BLK_DEV_CS5530 is not set
+-# CONFIG_BLK_DEV_HPT34X is not set
+-# CONFIG_BLK_DEV_HPT366 is not set
+-# CONFIG_BLK_DEV_SC1200 is not set
+-# CONFIG_BLK_DEV_PIIX is not set
+-# CONFIG_BLK_DEV_IT821X is not set
+-# CONFIG_BLK_DEV_NS87415 is not set
+-# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+-# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+-# CONFIG_BLK_DEV_SVWKS is not set
+-# CONFIG_BLK_DEV_SIIMAGE is not set
+-# CONFIG_BLK_DEV_SLC90E66 is not set
+-# CONFIG_BLK_DEV_TRM290 is not set
+-CONFIG_BLK_DEV_VIA82CXXX=y
+-# CONFIG_IDE_ARM is not set
+-CONFIG_BLK_DEV_IDEDMA=y
+-# CONFIG_IDEDMA_IVB is not set
+-CONFIG_IDEDMA_AUTO=y
+-# CONFIG_BLK_DEV_HD is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-# CONFIG_WINDFARM is not set
+-
+-#
+-# Network device support
+-#
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# PHY device support
+-#
+-CONFIG_PHYLIB=y
+-
+-#
+-# MII PHY device drivers
+-#
+-# CONFIG_MARVELL_PHY is not set
+-# CONFIG_DAVICOM_PHY is not set
+-# CONFIG_QSEMI_PHY is not set
+-# CONFIG_LXT_PHY is not set
+-# CONFIG_CICADA_PHY is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_CASSINI is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-CONFIG_NET_PCI=y
+-# CONFIG_PCNET32 is not set
+-# CONFIG_AMD8111_ETH is not set
+-# CONFIG_ADAPTEC_STARFIRE is not set
+-# CONFIG_B44 is not set
+-# CONFIG_FORCEDETH is not set
+-# CONFIG_DGRS is not set
+-# CONFIG_EEPRO100 is not set
+-CONFIG_E100=y
+-# CONFIG_FEALNX is not set
+-# CONFIG_NATSEMI is not set
+-# CONFIG_NE2K_PCI is not set
+-# CONFIG_8139CP is not set
+-# CONFIG_8139TOO is not set
+-# CONFIG_SIS900 is not set
+-# CONFIG_EPIC100 is not set
+-# CONFIG_SUNDANCE is not set
+-# CONFIG_TLAN is not set
+-# CONFIG_VIA_RHINE is not set
+-# CONFIG_FS_ENET is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SIS190 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_VIA_VELOCITY is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_CHELSIO_T1 is not set
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y
+-CONFIG_SERIAL_8250_NR_UARTS=4
+-# CONFIG_SERIAL_8250_EXTENDED is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-# CONFIG_SERIAL_CPM is not set
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-# CONFIG_TELCLOCK is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_MPC8260 is not set
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Miscellaneous I2C Chip support
+-#
+-CONFIG_SENSORS_DS1337=y
+-# CONFIG_SENSORS_DS1374 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_MAX6900 is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCF8563 is not set
+-# CONFIG_SENSORS_PCA9539 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_SENSORS_MAX6875 is not set
+-# CONFIG_RTC_X1205_I2C is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Hardware Monitoring support
+-#
+-CONFIG_HWMON=y
+-# CONFIG_HWMON_VID is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ADM9240 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_ATXP1 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-CONFIG_SENSORS_LM75=y
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83792D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-# CONFIG_SENSORS_W83627EHF is not set
+-CONFIG_HWMON_DEBUG_CHIP=y
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia Capabilities Port drivers
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# SN Devices
+-#
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-CONFIG_INOTIFY=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-# CONFIG_RELAYFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_JFFS_FS is not set
+-CONFIG_JFFS2_FS=y
+-CONFIG_JFFS2_FS_DEBUG=0
+-CONFIG_JFFS2_FS_WRITEBUFFER=y
+-# CONFIG_JFFS2_SUMMARY is not set
+-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+-CONFIG_JFFS2_ZLIB=y
+-CONFIG_JFFS2_RTIME=y
+-# CONFIG_JFFS2_RUBIN is not set
+-CONFIG_CRAMFS=y
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-# CONFIG_9P_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-# CONFIG_SCC_ENET is not set
+-# CONFIG_FEC_ENET is not set
+-
+-#
+-# CPM2 Options
+-#
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_ZLIB_INFLATE=y
+-CONFIG_ZLIB_DEFLATE=y
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_KGDB_CONSOLE is not set
+-# CONFIG_SERIAL_TEXT_DEBUG is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/TQM8555_defconfig b/arch/ppc/configs/TQM8555_defconfig
+deleted file mode 100644
+index 43a0d9d..0000000
+--- a/arch/ppc/configs/TQM8555_defconfig
++++ /dev/null
+@@ -1,983 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.15-rc2
+-# Thu Nov 24 17:10:52 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_INITRAMFS_SOURCE=""
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Block layer
+-#
+-# CONFIG_LBD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-CONFIG_DEFAULT_AS=y
+-# CONFIG_DEFAULT_DEADLINE is not set
+-# CONFIG_DEFAULT_CFQ is not set
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="anticipatory"
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-# CONFIG_E200 is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-# CONFIG_PHYS_64BIT is not set
+-CONFIG_SPE=y
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_KEXEC is not set
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_WANT_EARLY_SERIAL is not set
+-CONFIG_PPC_GEN550=y
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-# CONFIG_MPC8540_ADS is not set
+-# CONFIG_MPC8548_CDS is not set
+-# CONFIG_MPC8555_CDS is not set
+-# CONFIG_MPC8560_ADS is not set
+-# CONFIG_SBC8560 is not set
+-# CONFIG_STX_GP3 is not set
+-# CONFIG_TQM8540 is not set
+-# CONFIG_TQM8541 is not set
+-CONFIG_TQM8555=y
+-# CONFIG_TQM8560 is not set
+-CONFIG_MPC8555=y
+-
+-#
+-# Platform options
+-#
+-CONFIG_CPM2=y
+-# CONFIG_PC_KEYBOARD is not set
+-# CONFIG_HIGHMEM is not set
+-# CONFIG_HZ_100 is not set
+-CONFIG_HZ_250=y
+-# CONFIG_HZ_1000 is not set
+-CONFIG_HZ=250
+-CONFIG_PREEMPT_NONE=y
+-# CONFIG_PREEMPT_VOLUNTARY is not set
+-# CONFIG_PREEMPT is not set
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-# CONFIG_PM is not set
+-# CONFIG_HIBERNATION is not set
+-CONFIG_SECCOMP=y
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_PPC_I8259=y
+-CONFIG_PPC_INDIRECT_PCI=y
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_PCI_LEGACY_PROC is not set
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_BIC=y
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# DCCP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_DCCP is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_IEEE80211 is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Connector - unified userspace <-> kernelspace linker
+-#
+-# CONFIG_CONNECTOR is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-CONFIG_MTD=y
+-# CONFIG_MTD_DEBUG is not set
+-CONFIG_MTD_CONCAT=y
+-CONFIG_MTD_PARTITIONS=y
+-# CONFIG_MTD_REDBOOT_PARTS is not set
+-CONFIG_MTD_CMDLINE_PARTS=y
+-
+-#
+-# User Modules And Translation Layers
+-#
+-CONFIG_MTD_CHAR=y
+-CONFIG_MTD_BLOCK=y
+-# CONFIG_FTL is not set
+-# CONFIG_NFTL is not set
+-# CONFIG_INFTL is not set
+-# CONFIG_RFD_FTL is not set
+-
+-#
+-# RAM/ROM/Flash chip drivers
+-#
+-CONFIG_MTD_CFI=y
+-# CONFIG_MTD_JEDECPROBE is not set
+-CONFIG_MTD_GEN_PROBE=y
+-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+-CONFIG_MTD_MAP_BANK_WIDTH_1=y
+-CONFIG_MTD_MAP_BANK_WIDTH_2=y
+-CONFIG_MTD_MAP_BANK_WIDTH_4=y
+-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+-CONFIG_MTD_CFI_I1=y
+-CONFIG_MTD_CFI_I2=y
+-# CONFIG_MTD_CFI_I4 is not set
+-# CONFIG_MTD_CFI_I8 is not set
+-# CONFIG_MTD_CFI_INTELEXT is not set
+-CONFIG_MTD_CFI_AMDSTD=y
+-CONFIG_MTD_CFI_AMDSTD_RETRY=0
+-# CONFIG_MTD_CFI_STAA is not set
+-CONFIG_MTD_CFI_UTIL=y
+-# CONFIG_MTD_RAM is not set
+-# CONFIG_MTD_ROM is not set
+-# CONFIG_MTD_ABSENT is not set
+-
+-#
+-# Mapping drivers for chip access
+-#
+-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+-# CONFIG_MTD_PHYSMAP is not set
+-CONFIG_MTD_TQM85xx=y
+-# CONFIG_MTD_PLATRAM is not set
+-
+-#
+-# Self-contained MTD device drivers
+-#
+-# CONFIG_MTD_PMC551 is not set
+-# CONFIG_MTD_SLRAM is not set
+-# CONFIG_MTD_PHRAM is not set
+-# CONFIG_MTD_MTDRAM is not set
+-# CONFIG_MTD_BLKMTD is not set
+-# CONFIG_MTD_BLOCK2MTD is not set
+-
+-#
+-# Disk-On-Chip Device Drivers
+-#
+-# CONFIG_MTD_DOC2000 is not set
+-# CONFIG_MTD_DOC2001 is not set
+-# CONFIG_MTD_DOC2001PLUS is not set
+-
+-#
+-# NAND Flash Device Drivers
+-#
+-# CONFIG_MTD_NAND is not set
+-
+-#
+-# OneNAND Flash Device Drivers
+-#
+-# CONFIG_MTD_ONENAND is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-# CONFIG_CDROM_PKTCDVD is not set
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-CONFIG_IDE=y
+-CONFIG_BLK_DEV_IDE=y
+-
+-#
+-# Please see Documentation/ide.txt for help/info on IDE drives
+-#
+-# CONFIG_BLK_DEV_IDE_SATA is not set
+-CONFIG_BLK_DEV_IDEDISK=y
+-# CONFIG_IDEDISK_MULTI_MODE is not set
+-# CONFIG_BLK_DEV_IDECD is not set
+-# CONFIG_BLK_DEV_IDETAPE is not set
+-# CONFIG_BLK_DEV_IDEFLOPPY is not set
+-# CONFIG_IDE_TASK_IOCTL is not set
+-
+-#
+-# IDE chipset support/bugfixes
+-#
+-CONFIG_IDE_GENERIC=y
+-CONFIG_BLK_DEV_IDEPCI=y
+-CONFIG_IDEPCI_SHARE_IRQ=y
+-# CONFIG_BLK_DEV_OFFBOARD is not set
+-CONFIG_BLK_DEV_GENERIC=y
+-# CONFIG_BLK_DEV_OPTI621 is not set
+-# CONFIG_BLK_DEV_SL82C105 is not set
+-CONFIG_BLK_DEV_IDEDMA_PCI=y
+-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+-CONFIG_IDEDMA_PCI_AUTO=y
+-# CONFIG_IDEDMA_ONLYDISK is not set
+-# CONFIG_BLK_DEV_AEC62XX is not set
+-# CONFIG_BLK_DEV_ALI15X3 is not set
+-# CONFIG_BLK_DEV_AMD74XX is not set
+-# CONFIG_BLK_DEV_CMD64X is not set
+-# CONFIG_BLK_DEV_TRIFLEX is not set
+-# CONFIG_BLK_DEV_CY82C693 is not set
+-# CONFIG_BLK_DEV_CS5520 is not set
+-# CONFIG_BLK_DEV_CS5530 is not set
+-# CONFIG_BLK_DEV_HPT34X is not set
+-# CONFIG_BLK_DEV_HPT366 is not set
+-# CONFIG_BLK_DEV_SC1200 is not set
+-# CONFIG_BLK_DEV_PIIX is not set
+-# CONFIG_BLK_DEV_IT821X is not set
+-# CONFIG_BLK_DEV_NS87415 is not set
+-# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+-# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+-# CONFIG_BLK_DEV_SVWKS is not set
+-# CONFIG_BLK_DEV_SIIMAGE is not set
+-# CONFIG_BLK_DEV_SLC90E66 is not set
+-# CONFIG_BLK_DEV_TRM290 is not set
+-CONFIG_BLK_DEV_VIA82CXXX=y
+-# CONFIG_IDE_ARM is not set
+-CONFIG_BLK_DEV_IDEDMA=y
+-# CONFIG_IDEDMA_IVB is not set
+-CONFIG_IDEDMA_AUTO=y
+-# CONFIG_BLK_DEV_HD is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-# CONFIG_WINDFARM is not set
+-
+-#
+-# Network device support
+-#
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# PHY device support
+-#
+-CONFIG_PHYLIB=y
+-
+-#
+-# MII PHY device drivers
+-#
+-# CONFIG_MARVELL_PHY is not set
+-# CONFIG_DAVICOM_PHY is not set
+-# CONFIG_QSEMI_PHY is not set
+-# CONFIG_LXT_PHY is not set
+-# CONFIG_CICADA_PHY is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_CASSINI is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-CONFIG_NET_PCI=y
+-# CONFIG_PCNET32 is not set
+-# CONFIG_AMD8111_ETH is not set
+-# CONFIG_ADAPTEC_STARFIRE is not set
+-# CONFIG_B44 is not set
+-# CONFIG_FORCEDETH is not set
+-# CONFIG_DGRS is not set
+-# CONFIG_EEPRO100 is not set
+-CONFIG_E100=y
+-# CONFIG_FEALNX is not set
+-# CONFIG_NATSEMI is not set
+-# CONFIG_NE2K_PCI is not set
+-# CONFIG_8139CP is not set
+-# CONFIG_8139TOO is not set
+-# CONFIG_SIS900 is not set
+-# CONFIG_EPIC100 is not set
+-# CONFIG_SUNDANCE is not set
+-# CONFIG_TLAN is not set
+-# CONFIG_VIA_RHINE is not set
+-# CONFIG_FS_ENET is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SIS190 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_VIA_VELOCITY is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_CHELSIO_T1 is not set
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y
+-CONFIG_SERIAL_8250_NR_UARTS=4
+-# CONFIG_SERIAL_8250_EXTENDED is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-# CONFIG_SERIAL_CPM is not set
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-# CONFIG_TELCLOCK is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Miscellaneous I2C Chip support
+-#
+-CONFIG_SENSORS_DS1337=y
+-# CONFIG_SENSORS_DS1374 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCA9539 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_SENSORS_MAX6875 is not set
+-# CONFIG_RTC_X1205_I2C is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Hardware Monitoring support
+-#
+-CONFIG_HWMON=y
+-# CONFIG_HWMON_VID is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ADM9240 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_ATXP1 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-CONFIG_SENSORS_LM75=y
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83792D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-# CONFIG_SENSORS_W83627EHF is not set
+-CONFIG_HWMON_DEBUG_CHIP=y
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia Capabilities Port drivers
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# SN Devices
+-#
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-CONFIG_INOTIFY=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-# CONFIG_RELAYFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_JFFS_FS is not set
+-CONFIG_JFFS2_FS=y
+-CONFIG_JFFS2_FS_DEBUG=0
+-CONFIG_JFFS2_FS_WRITEBUFFER=y
+-# CONFIG_JFFS2_SUMMARY is not set
+-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+-CONFIG_JFFS2_ZLIB=y
+-CONFIG_JFFS2_RTIME=y
+-# CONFIG_JFFS2_RUBIN is not set
+-CONFIG_CRAMFS=y
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-# CONFIG_9P_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-# CONFIG_SCC_ENET is not set
+-# CONFIG_FEC_ENET is not set
+-
+-#
+-# CPM2 Options
+-#
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_ZLIB_INFLATE=y
+-CONFIG_ZLIB_DEFLATE=y
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_KGDB_CONSOLE is not set
+-# CONFIG_SERIAL_TEXT_DEBUG is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/TQM8560_defconfig b/arch/ppc/configs/TQM8560_defconfig
+deleted file mode 100644
+index a814d17..0000000
+--- a/arch/ppc/configs/TQM8560_defconfig
++++ /dev/null
+@@ -1,992 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.15-rc2
+-# Wed Nov 30 16:47:53 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_INITRAMFS_SOURCE=""
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Block layer
+-#
+-# CONFIG_LBD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-CONFIG_DEFAULT_AS=y
+-# CONFIG_DEFAULT_DEADLINE is not set
+-# CONFIG_DEFAULT_CFQ is not set
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="anticipatory"
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-# CONFIG_E200 is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-# CONFIG_PHYS_64BIT is not set
+-CONFIG_SPE=y
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_KEXEC is not set
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_WANT_EARLY_SERIAL is not set
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-# CONFIG_MPC8540_ADS is not set
+-# CONFIG_MPC8548_CDS is not set
+-# CONFIG_MPC8555_CDS is not set
+-# CONFIG_MPC8560_ADS is not set
+-# CONFIG_SBC8560 is not set
+-# CONFIG_STX_GP3 is not set
+-# CONFIG_TQM8540 is not set
+-# CONFIG_TQM8541 is not set
+-# CONFIG_TQM8555 is not set
+-CONFIG_TQM8560=y
+-CONFIG_MPC8560=y
+-
+-#
+-# Platform options
+-#
+-CONFIG_CPM2=y
+-# CONFIG_PC_KEYBOARD is not set
+-# CONFIG_HIGHMEM is not set
+-# CONFIG_HZ_100 is not set
+-CONFIG_HZ_250=y
+-# CONFIG_HZ_1000 is not set
+-CONFIG_HZ=250
+-CONFIG_PREEMPT_NONE=y
+-# CONFIG_PREEMPT_VOLUNTARY is not set
+-# CONFIG_PREEMPT is not set
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-# CONFIG_PM is not set
+-# CONFIG_HIBERNATION is not set
+-CONFIG_SECCOMP=y
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_PPC_I8259=y
+-CONFIG_PPC_INDIRECT_PCI=y
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_PCI_LEGACY_PROC is not set
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-# CONFIG_RAPIDIO is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_BIC=y
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# DCCP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_DCCP is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_IEEE80211 is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Connector - unified userspace <-> kernelspace linker
+-#
+-# CONFIG_CONNECTOR is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-CONFIG_MTD=y
+-# CONFIG_MTD_DEBUG is not set
+-CONFIG_MTD_CONCAT=y
+-CONFIG_MTD_PARTITIONS=y
+-# CONFIG_MTD_REDBOOT_PARTS is not set
+-CONFIG_MTD_CMDLINE_PARTS=y
+-
+-#
+-# User Modules And Translation Layers
+-#
+-CONFIG_MTD_CHAR=y
+-CONFIG_MTD_BLOCK=y
+-# CONFIG_FTL is not set
+-# CONFIG_NFTL is not set
+-# CONFIG_INFTL is not set
+-# CONFIG_RFD_FTL is not set
+-
+-#
+-# RAM/ROM/Flash chip drivers
+-#
+-CONFIG_MTD_CFI=y
+-# CONFIG_MTD_JEDECPROBE is not set
+-CONFIG_MTD_GEN_PROBE=y
+-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+-CONFIG_MTD_MAP_BANK_WIDTH_1=y
+-CONFIG_MTD_MAP_BANK_WIDTH_2=y
+-CONFIG_MTD_MAP_BANK_WIDTH_4=y
+-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+-CONFIG_MTD_CFI_I1=y
+-CONFIG_MTD_CFI_I2=y
+-# CONFIG_MTD_CFI_I4 is not set
+-# CONFIG_MTD_CFI_I8 is not set
+-# CONFIG_MTD_CFI_INTELEXT is not set
+-CONFIG_MTD_CFI_AMDSTD=y
+-CONFIG_MTD_CFI_AMDSTD_RETRY=0
+-# CONFIG_MTD_CFI_STAA is not set
+-CONFIG_MTD_CFI_UTIL=y
+-# CONFIG_MTD_RAM is not set
+-# CONFIG_MTD_ROM is not set
+-# CONFIG_MTD_ABSENT is not set
+-
+-#
+-# Mapping drivers for chip access
+-#
+-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+-# CONFIG_MTD_PHYSMAP is not set
+-CONFIG_MTD_TQM85xx=y
+-# CONFIG_MTD_PLATRAM is not set
+-
+-#
+-# Self-contained MTD device drivers
+-#
+-# CONFIG_MTD_PMC551 is not set
+-# CONFIG_MTD_SLRAM is not set
+-# CONFIG_MTD_PHRAM is not set
+-# CONFIG_MTD_MTDRAM is not set
+-# CONFIG_MTD_BLKMTD is not set
+-# CONFIG_MTD_BLOCK2MTD is not set
+-
+-#
+-# Disk-On-Chip Device Drivers
+-#
+-# CONFIG_MTD_DOC2000 is not set
+-# CONFIG_MTD_DOC2001 is not set
+-# CONFIG_MTD_DOC2001PLUS is not set
+-
+-#
+-# NAND Flash Device Drivers
+-#
+-# CONFIG_MTD_NAND is not set
+-
+-#
+-# OneNAND Flash Device Drivers
+-#
+-# CONFIG_MTD_ONENAND is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-# CONFIG_CDROM_PKTCDVD is not set
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-CONFIG_IDE=y
+-CONFIG_BLK_DEV_IDE=y
+-
+-#
+-# Please see Documentation/ide.txt for help/info on IDE drives
+-#
+-# CONFIG_BLK_DEV_IDE_SATA is not set
+-CONFIG_BLK_DEV_IDEDISK=y
+-# CONFIG_IDEDISK_MULTI_MODE is not set
+-# CONFIG_BLK_DEV_IDECD is not set
+-# CONFIG_BLK_DEV_IDETAPE is not set
+-# CONFIG_BLK_DEV_IDEFLOPPY is not set
+-# CONFIG_IDE_TASK_IOCTL is not set
+-
+-#
+-# IDE chipset support/bugfixes
+-#
+-CONFIG_IDE_GENERIC=y
+-CONFIG_BLK_DEV_IDEPCI=y
+-CONFIG_IDEPCI_SHARE_IRQ=y
+-# CONFIG_BLK_DEV_OFFBOARD is not set
+-CONFIG_BLK_DEV_GENERIC=y
+-# CONFIG_BLK_DEV_OPTI621 is not set
+-# CONFIG_BLK_DEV_SL82C105 is not set
+-CONFIG_BLK_DEV_IDEDMA_PCI=y
+-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+-CONFIG_IDEDMA_PCI_AUTO=y
+-# CONFIG_IDEDMA_ONLYDISK is not set
+-# CONFIG_BLK_DEV_AEC62XX is not set
+-# CONFIG_BLK_DEV_ALI15X3 is not set
+-# CONFIG_BLK_DEV_AMD74XX is not set
+-# CONFIG_BLK_DEV_CMD64X is not set
+-# CONFIG_BLK_DEV_TRIFLEX is not set
+-# CONFIG_BLK_DEV_CY82C693 is not set
+-# CONFIG_BLK_DEV_CS5520 is not set
+-# CONFIG_BLK_DEV_CS5530 is not set
+-# CONFIG_BLK_DEV_HPT34X is not set
+-# CONFIG_BLK_DEV_HPT366 is not set
+-# CONFIG_BLK_DEV_SC1200 is not set
+-# CONFIG_BLK_DEV_PIIX is not set
+-# CONFIG_BLK_DEV_IT821X is not set
+-# CONFIG_BLK_DEV_NS87415 is not set
+-# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+-# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+-# CONFIG_BLK_DEV_SVWKS is not set
+-# CONFIG_BLK_DEV_SIIMAGE is not set
+-# CONFIG_BLK_DEV_SLC90E66 is not set
+-# CONFIG_BLK_DEV_TRM290 is not set
+-CONFIG_BLK_DEV_VIA82CXXX=y
+-# CONFIG_IDE_ARM is not set
+-CONFIG_BLK_DEV_IDEDMA=y
+-# CONFIG_IDEDMA_IVB is not set
+-CONFIG_IDEDMA_AUTO=y
+-# CONFIG_BLK_DEV_HD is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-# CONFIG_WINDFARM is not set
+-
+-#
+-# Network device support
+-#
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# PHY device support
+-#
+-CONFIG_PHYLIB=y
+-
+-#
+-# MII PHY device drivers
+-#
+-# CONFIG_MARVELL_PHY is not set
+-# CONFIG_DAVICOM_PHY is not set
+-# CONFIG_QSEMI_PHY is not set
+-# CONFIG_LXT_PHY is not set
+-# CONFIG_CICADA_PHY is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_CASSINI is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-CONFIG_NET_PCI=y
+-# CONFIG_PCNET32 is not set
+-# CONFIG_AMD8111_ETH is not set
+-# CONFIG_ADAPTEC_STARFIRE is not set
+-# CONFIG_B44 is not set
+-# CONFIG_FORCEDETH is not set
+-# CONFIG_DGRS is not set
+-# CONFIG_EEPRO100 is not set
+-CONFIG_E100=y
+-# CONFIG_FEALNX is not set
+-# CONFIG_NATSEMI is not set
+-# CONFIG_NE2K_PCI is not set
+-# CONFIG_8139CP is not set
+-# CONFIG_8139TOO is not set
+-# CONFIG_SIS900 is not set
+-# CONFIG_EPIC100 is not set
+-# CONFIG_SUNDANCE is not set
+-# CONFIG_TLAN is not set
+-# CONFIG_VIA_RHINE is not set
+-# CONFIG_FS_ENET is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SIS190 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_VIA_VELOCITY is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_CHELSIO_T1 is not set
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y
+-CONFIG_SERIAL_8250_NR_UARTS=4
+-# CONFIG_SERIAL_8250_EXTENDED is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_SERIAL_CPM=y
+-CONFIG_SERIAL_CPM_CONSOLE=y
+-CONFIG_SERIAL_CPM_SCC1=y
+-# CONFIG_SERIAL_CPM_SCC2 is not set
+-# CONFIG_SERIAL_CPM_SCC3 is not set
+-# CONFIG_SERIAL_CPM_SCC4 is not set
+-# CONFIG_SERIAL_CPM_SMC1 is not set
+-# CONFIG_SERIAL_CPM_SMC2 is not set
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-# CONFIG_TELCLOCK is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_MPC8260 is not set
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Miscellaneous I2C Chip support
+-#
+-CONFIG_SENSORS_DS1337=y
+-# CONFIG_SENSORS_DS1374 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_MAX6900 is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCF8563 is not set
+-# CONFIG_SENSORS_PCA9539 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_SENSORS_MAX6875 is not set
+-# CONFIG_RTC_X1205_I2C is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Hardware Monitoring support
+-#
+-CONFIG_HWMON=y
+-# CONFIG_HWMON_VID is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ADM9240 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_ATXP1 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-CONFIG_SENSORS_LM75=y
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83792D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-# CONFIG_SENSORS_W83627EHF is not set
+-CONFIG_HWMON_DEBUG_CHIP=y
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia Capabilities Port drivers
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# SN Devices
+-#
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-CONFIG_INOTIFY=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-# CONFIG_RELAYFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_JFFS_FS is not set
+-CONFIG_JFFS2_FS=y
+-CONFIG_JFFS2_FS_DEBUG=0
+-CONFIG_JFFS2_FS_WRITEBUFFER=y
+-# CONFIG_JFFS2_SUMMARY is not set
+-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+-CONFIG_JFFS2_ZLIB=y
+-CONFIG_JFFS2_RTIME=y
+-# CONFIG_JFFS2_RUBIN is not set
+-CONFIG_CRAMFS=y
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-# CONFIG_9P_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-# CONFIG_SCC_ENET is not set
+-# CONFIG_FEC_ENET is not set
+-
+-#
+-# CPM2 Options
+-#
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_ZLIB_INFLATE=y
+-CONFIG_ZLIB_DEFLATE=y
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_KGDB_CONSOLE is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/mpc834x_sys_defconfig b/arch/ppc/configs/mpc834x_sys_defconfig
+deleted file mode 100644
+index d90c8a7..0000000
+--- a/arch/ppc/configs/mpc834x_sys_defconfig
++++ /dev/null
+@@ -1,844 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.14
+-# Mon Nov  7 15:38:29 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_INITRAMFS_SOURCE=""
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Processor
+-#
+-CONFIG_6xx=y
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-# CONFIG_E200 is not set
+-# CONFIG_E500 is not set
+-CONFIG_PPC_FPU=y
+-# CONFIG_KEXEC is not set
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_WANT_EARLY_SERIAL is not set
+-CONFIG_PPC_GEN550=y
+-CONFIG_PPC_STD_MMU=y
+-
+-#
+-# Platform options
+-#
+-# CONFIG_PPC_MULTIPLATFORM is not set
+-# CONFIG_APUS is not set
+-# CONFIG_KATANA is not set
+-# CONFIG_WILLOW is not set
+-# CONFIG_CPCI690 is not set
+-# CONFIG_POWERPMC250 is not set
+-# CONFIG_CHESTNUT is not set
+-# CONFIG_SPRUCE is not set
+-# CONFIG_HDPU is not set
+-# CONFIG_EV64260 is not set
+-# CONFIG_LOPEC is not set
+-# CONFIG_MVME5100 is not set
+-# CONFIG_PPLUS is not set
+-# CONFIG_PRPMC750 is not set
+-# CONFIG_PRPMC800 is not set
+-# CONFIG_SANDPOINT is not set
+-# CONFIG_RADSTONE_PPC7D is not set
+-# CONFIG_PAL4 is not set
+-# CONFIG_GEMINI is not set
+-# CONFIG_EST8260 is not set
+-# CONFIG_SBC82xx is not set
+-# CONFIG_SBS8260 is not set
+-# CONFIG_RPX8260 is not set
+-# CONFIG_TQM8260 is not set
+-# CONFIG_ADS8272 is not set
+-# CONFIG_PQ2FADS is not set
+-# CONFIG_LITE5200 is not set
+-CONFIG_MPC834x_SYS=y
+-# CONFIG_EV64360 is not set
+-CONFIG_83xx=y
+-CONFIG_MPC834x=y
+-# CONFIG_SMP is not set
+-# CONFIG_HIGHMEM is not set
+-# CONFIG_HZ_100 is not set
+-CONFIG_HZ_250=y
+-# CONFIG_HZ_1000 is not set
+-CONFIG_HZ=250
+-CONFIG_PREEMPT_NONE=y
+-# CONFIG_PREEMPT_VOLUNTARY is not set
+-# CONFIG_PREEMPT is not set
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-# CONFIG_PM is not set
+-# CONFIG_HIBERNATION is not set
+-CONFIG_SECCOMP=y
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_GENERIC_ISA_DMA=y
+-# CONFIG_PPC_I8259 is not set
+-CONFIG_PPC_INDIRECT_PCI=y
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_MPC83xx_PCI2 is not set
+-CONFIG_PCI_LEGACY_PROC=y
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_BIC=y
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# DCCP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_DCCP is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-# CONFIG_NET_SCHED is not set
+-# CONFIG_NET_CLS_ROUTE is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_IEEE80211 is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Connector - unified userspace <-> kernelspace linker
+-#
+-# CONFIG_CONNECTOR is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-# CONFIG_MTD is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-# CONFIG_LBD is not set
+-# CONFIG_CDROM_PKTCDVD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-CONFIG_DEFAULT_AS=y
+-# CONFIG_DEFAULT_DEADLINE is not set
+-# CONFIG_DEFAULT_CFQ is not set
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="anticipatory"
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-# CONFIG_IDE is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-
+-#
+-# Network device support
+-#
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# PHY device support
+-#
+-CONFIG_PHYLIB=y
+-
+-#
+-# MII PHY device drivers
+-#
+-CONFIG_MARVELL_PHY=y
+-# CONFIG_DAVICOM_PHY is not set
+-# CONFIG_QSEMI_PHY is not set
+-# CONFIG_LXT_PHY is not set
+-# CONFIG_CICADA_PHY is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_CASSINI is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-CONFIG_NET_PCI=y
+-# CONFIG_PCNET32 is not set
+-# CONFIG_AMD8111_ETH is not set
+-# CONFIG_ADAPTEC_STARFIRE is not set
+-# CONFIG_B44 is not set
+-# CONFIG_FORCEDETH is not set
+-# CONFIG_DGRS is not set
+-# CONFIG_EEPRO100 is not set
+-CONFIG_E100=y
+-# CONFIG_FEALNX is not set
+-# CONFIG_NATSEMI is not set
+-# CONFIG_NE2K_PCI is not set
+-# CONFIG_8139CP is not set
+-# CONFIG_8139TOO is not set
+-# CONFIG_SIS900 is not set
+-# CONFIG_EPIC100 is not set
+-# CONFIG_SUNDANCE is not set
+-# CONFIG_TLAN is not set
+-# CONFIG_VIA_RHINE is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-CONFIG_E1000=y
+-# CONFIG_E1000_NAPI is not set
+-# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SIS190 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_VIA_VELOCITY is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-CONFIG_GIANFAR=y
+-# CONFIG_GFAR_NAPI is not set
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_CHELSIO_T1 is not set
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y
+-CONFIG_SERIAL_8250_NR_UARTS=4
+-# CONFIG_SERIAL_8250_EXTENDED is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-# CONFIG_TELCLOCK is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Miscellaneous I2C Chip support
+-#
+-# CONFIG_SENSORS_DS1337 is not set
+-# CONFIG_SENSORS_DS1374 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCA9539 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_SENSORS_MAX6875 is not set
+-# CONFIG_RTC_X1205_I2C is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Hardware Monitoring support
+-#
+-CONFIG_HWMON=y
+-# CONFIG_HWMON_VID is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ADM9240 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_ATXP1 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-# CONFIG_SENSORS_LM75 is not set
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83792D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-# CONFIG_SENSORS_W83627EHF is not set
+-# CONFIG_HWMON_DEBUG_CHIP is not set
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia Capabilities Port drivers
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# SN Devices
+-#
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-CONFIG_INOTIFY=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-# CONFIG_RELAYFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-# CONFIG_9P_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-
+-#
+-# Profiling support
+-#
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_SERIAL_TEXT_DEBUG is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/mpc8540_ads_defconfig b/arch/ppc/configs/mpc8540_ads_defconfig
+deleted file mode 100644
+index bf676eb..0000000
+--- a/arch/ppc/configs/mpc8540_ads_defconfig
++++ /dev/null
+@@ -1,706 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.11-rc1
+-# Thu Jan 20 01:23:13 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_HAVE_DEC_LOCK=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-CONFIG_SPE=y
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_CPU_FREQ is not set
+-CONFIG_PPC_GEN550=y
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-CONFIG_MPC8540_ADS=y
+-# CONFIG_MPC8555_CDS is not set
+-# CONFIG_MPC8560_ADS is not set
+-# CONFIG_SBC8560 is not set
+-CONFIG_MPC8540=y
+-
+-#
+-# Platform options
+-#
+-# CONFIG_SMP is not set
+-# CONFIG_PREEMPT is not set
+-# CONFIG_HIGHMEM is not set
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-
+-#
+-# Bus options
+-#
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_PCI_LEGACY_PROC is not set
+-CONFIG_PCI_NAMES=y
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-
+-#
+-# PC-card bridges
+-#
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-# CONFIG_MTD is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE=""
+-# CONFIG_LBD is not set
+-# CONFIG_CDROM_PKTCDVD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-# CONFIG_IDE is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-
+-#
+-# Networking support
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-# CONFIG_NETLINK_DEV is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_IP_TCPDIAG=y
+-# CONFIG_IP_TCPDIAG_IPV6 is not set
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-# CONFIG_NET_CLS_ROUTE is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-# CONFIG_NET_PCI is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_TIGON3 is not set
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input I/O drivers
+-#
+-# CONFIG_GAMEPORT is not set
+-CONFIG_SOUND_GAMEPORT=y
+-# CONFIG_SERIO is not set
+-# CONFIG_SERIO_I8042 is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y
+-CONFIG_SERIAL_8250_NR_UARTS=4
+-# CONFIG_SERIAL_8250_EXTENDED is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PIIX4 is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Hardware Sensors Chip support
+-#
+-# CONFIG_I2C_SENSOR is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-# CONFIG_SENSORS_LM75 is not set
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-
+-#
+-# Other I2C Chip support
+-#
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-# CONFIG_USB is not set
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-# CONFIG_DEVFS_FS is not set
+-# CONFIG_DEVPTS_FS_XATTR is not set
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_XATTR is not set
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-# CONFIG_EXPORTFS is not set
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-
+-#
+-# Profiling support
+-#
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_DEBUG_KERNEL is not set
+-# CONFIG_SERIAL_TEXT_DEBUG is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/mpc8548_cds_defconfig b/arch/ppc/configs/mpc8548_cds_defconfig
+deleted file mode 100644
+index f36fc5d..0000000
+--- a/arch/ppc/configs/mpc8548_cds_defconfig
++++ /dev/null
+@@ -1,658 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.12-rc4
+-# Tue May 24 22:36:27 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_HAVE_DEC_LOCK=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-# CONFIG_PHYS_64BIT is not set
+-CONFIG_SPE=y
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_PM is not set
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-# CONFIG_MPC8540_ADS is not set
+-CONFIG_MPC8548_CDS=y
+-# CONFIG_MPC8555_CDS is not set
+-# CONFIG_MPC8560_ADS is not set
+-# CONFIG_SBC8560 is not set
+-# CONFIG_STX_GP3 is not set
+-CONFIG_MPC8548=y
+-
+-#
+-# Platform options
+-#
+-# CONFIG_SMP is not set
+-# CONFIG_PREEMPT is not set
+-# CONFIG_HIGHMEM is not set
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-# CONFIG_PCI is not set
+-# CONFIG_PCI_DOMAINS is not set
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-# CONFIG_MTD is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE=""
+-# CONFIG_LBD is not set
+-# CONFIG_CDROM_PKTCDVD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-# CONFIG_IDE is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-
+-#
+-# I2O device support
+-#
+-
+-#
+-# Macintosh device drivers
+-#
+-
+-#
+-# Networking support
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_IP_TCPDIAG=y
+-# CONFIG_IP_TCPDIAG_IPV6 is not set
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-# CONFIG_NET_CLS_ROUTE is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-
+-#
+-# Token Ring devices
+-#
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-CONFIG_SOUND_GAMEPORT=y
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y
+-CONFIG_SERIAL_8250_NR_UARTS=4
+-# CONFIG_SERIAL_8250_EXTENDED is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Hardware Sensors Chip support
+-#
+-# CONFIG_I2C_SENSOR is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-# CONFIG_SENSORS_LM75 is not set
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-
+-#
+-# Other I2C Chip support
+-#
+-# CONFIG_SENSORS_DS1337 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-# CONFIG_USB_ARCH_HAS_HCD is not set
+-# CONFIG_USB_ARCH_HAS_OHCI is not set
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-
+-#
+-# XFS support
+-#
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-# CONFIG_DEVFS_FS is not set
+-# CONFIG_DEVPTS_FS_XATTR is not set
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_XATTR is not set
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-
+-#
+-# Profiling support
+-#
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/mpc8555_cds_defconfig b/arch/ppc/configs/mpc8555_cds_defconfig
+deleted file mode 100644
+index 4f1e320..0000000
+--- a/arch/ppc/configs/mpc8555_cds_defconfig
++++ /dev/null
+@@ -1,784 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.12-rc4
+-# Tue May 17 11:56:01 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_HAVE_DEC_LOCK=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-# CONFIG_PHYS_64BIT is not set
+-CONFIG_SPE=y
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_CPU_FREQ is not set
+-CONFIG_PPC_GEN550=y
+-# CONFIG_PM is not set
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-# CONFIG_MPC8540_ADS is not set
+-CONFIG_MPC8555_CDS=y
+-# CONFIG_MPC8560_ADS is not set
+-# CONFIG_SBC8560 is not set
+-# CONFIG_STX_GP3 is not set
+-CONFIG_MPC8555=y
+-CONFIG_85xx_PCI2=y
+-
+-#
+-# Platform options
+-#
+-CONFIG_CPM2=y
+-# CONFIG_PC_KEYBOARD is not set
+-# CONFIG_SMP is not set
+-# CONFIG_PREEMPT is not set
+-# CONFIG_HIGHMEM is not set
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_PCI_LEGACY_PROC is not set
+-CONFIG_PCI_NAMES=y
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-# CONFIG_MTD is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE=""
+-# CONFIG_LBD is not set
+-# CONFIG_CDROM_PKTCDVD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-CONFIG_IDE=y
+-CONFIG_BLK_DEV_IDE=y
+-
+-#
+-# Please see Documentation/ide.txt for help/info on IDE drives
+-#
+-# CONFIG_BLK_DEV_IDE_SATA is not set
+-CONFIG_BLK_DEV_IDEDISK=y
+-# CONFIG_IDEDISK_MULTI_MODE is not set
+-# CONFIG_BLK_DEV_IDECD is not set
+-# CONFIG_BLK_DEV_IDETAPE is not set
+-# CONFIG_BLK_DEV_IDEFLOPPY is not set
+-# CONFIG_IDE_TASK_IOCTL is not set
+-
+-#
+-# IDE chipset support/bugfixes
+-#
+-CONFIG_IDE_GENERIC=y
+-CONFIG_BLK_DEV_IDEPCI=y
+-CONFIG_IDEPCI_SHARE_IRQ=y
+-# CONFIG_BLK_DEV_OFFBOARD is not set
+-CONFIG_BLK_DEV_GENERIC=y
+-# CONFIG_BLK_DEV_OPTI621 is not set
+-# CONFIG_BLK_DEV_SL82C105 is not set
+-CONFIG_BLK_DEV_IDEDMA_PCI=y
+-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+-CONFIG_IDEDMA_PCI_AUTO=y
+-# CONFIG_IDEDMA_ONLYDISK is not set
+-# CONFIG_BLK_DEV_AEC62XX is not set
+-# CONFIG_BLK_DEV_ALI15X3 is not set
+-# CONFIG_BLK_DEV_AMD74XX is not set
+-# CONFIG_BLK_DEV_CMD64X is not set
+-# CONFIG_BLK_DEV_TRIFLEX is not set
+-# CONFIG_BLK_DEV_CY82C693 is not set
+-# CONFIG_BLK_DEV_CS5520 is not set
+-# CONFIG_BLK_DEV_CS5530 is not set
+-# CONFIG_BLK_DEV_HPT34X is not set
+-# CONFIG_BLK_DEV_HPT366 is not set
+-# CONFIG_BLK_DEV_SC1200 is not set
+-# CONFIG_BLK_DEV_PIIX is not set
+-# CONFIG_BLK_DEV_NS87415 is not set
+-# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+-# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+-# CONFIG_BLK_DEV_SVWKS is not set
+-# CONFIG_BLK_DEV_SIIMAGE is not set
+-# CONFIG_BLK_DEV_SLC90E66 is not set
+-# CONFIG_BLK_DEV_TRM290 is not set
+-CONFIG_BLK_DEV_VIA82CXXX=y
+-# CONFIG_IDE_ARM is not set
+-CONFIG_BLK_DEV_IDEDMA=y
+-# CONFIG_IDEDMA_IVB is not set
+-CONFIG_IDEDMA_AUTO=y
+-# CONFIG_BLK_DEV_HD is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-
+-#
+-# Networking support
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_IP_TCPDIAG=y
+-# CONFIG_IP_TCPDIAG_IPV6 is not set
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-# CONFIG_NET_CLS_ROUTE is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-# CONFIG_NET_PCI is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_TIGON3 is not set
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-CONFIG_SOUND_GAMEPORT=y
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y
+-CONFIG_SERIAL_8250_NR_UARTS=4
+-# CONFIG_SERIAL_8250_EXTENDED is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-# CONFIG_SERIAL_CPM is not set
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Hardware Sensors Chip support
+-#
+-# CONFIG_I2C_SENSOR is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-# CONFIG_SENSORS_LM75 is not set
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-
+-#
+-# Other I2C Chip support
+-#
+-# CONFIG_SENSORS_DS1337 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-
+-#
+-# XFS support
+-#
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-# CONFIG_DEVFS_FS is not set
+-# CONFIG_DEVPTS_FS_XATTR is not set
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_XATTR is not set
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-# CONFIG_SCC_ENET is not set
+-# CONFIG_FEC_ENET is not set
+-
+-#
+-# CPM2 Options
+-#
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-
+-#
+-# Profiling support
+-#
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_KGDB_CONSOLE is not set
+-# CONFIG_SERIAL_TEXT_DEBUG is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/mpc8560_ads_defconfig b/arch/ppc/configs/mpc8560_ads_defconfig
+deleted file mode 100644
+index f12d48f..0000000
+--- a/arch/ppc/configs/mpc8560_ads_defconfig
++++ /dev/null
+@@ -1,769 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.13-rc6
+-# Thu Aug 11 18:14:45 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_HAVE_DEC_LOCK=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-# CONFIG_HOTPLUG is not set
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_EMBEDDED=y
+-# CONFIG_KALLSYMS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-# CONFIG_EPOLL is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-# CONFIG_MODULES is not set
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-# CONFIG_E200 is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-# CONFIG_PHYS_64BIT is not set
+-CONFIG_SPE=y
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_KEXEC is not set
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_PM is not set
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-# CONFIG_MPC8540_ADS is not set
+-# CONFIG_MPC8548_CDS is not set
+-# CONFIG_MPC8555_CDS is not set
+-CONFIG_MPC8560_ADS=y
+-# CONFIG_SBC8560 is not set
+-# CONFIG_STX_GP3 is not set
+-CONFIG_MPC8560=y
+-
+-#
+-# Platform options
+-#
+-CONFIG_CPM2=y
+-# CONFIG_PC_KEYBOARD is not set
+-# CONFIG_SMP is not set
+-# CONFIG_HIGHMEM is not set
+-# CONFIG_HZ_100 is not set
+-CONFIG_HZ_250=y
+-# CONFIG_HZ_1000 is not set
+-CONFIG_HZ=250
+-CONFIG_PREEMPT_NONE=y
+-# CONFIG_PREEMPT_VOLUNTARY is not set
+-# CONFIG_PREEMPT is not set
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-# CONFIG_CMDLINE_BOOL is not set
+-CONFIG_SECCOMP=y
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_PCI_LEGACY_PROC is not set
+-CONFIG_PCI_NAMES=y
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-CONFIG_IP_MULTICAST=y
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-CONFIG_SYN_COOKIES=y
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_IP_TCPDIAG=y
+-# CONFIG_IP_TCPDIAG_IPV6 is not set
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_BIC=y
+-# CONFIG_IPV6 is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-# CONFIG_NET_SCHED is not set
+-# CONFIG_NET_CLS_ROUTE is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-# CONFIG_MTD is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=32768
+-CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE=""
+-# CONFIG_LBD is not set
+-# CONFIG_CDROM_PKTCDVD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-# CONFIG_IDE is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_SCSI is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-
+-#
+-# Network device support
+-#
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-# CONFIG_NET_PCI is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-# CONFIG_SERIAL_8250 is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_SERIAL_CPM=y
+-CONFIG_SERIAL_CPM_CONSOLE=y
+-CONFIG_SERIAL_CPM_SCC1=y
+-CONFIG_SERIAL_CPM_SCC2=y
+-# CONFIG_SERIAL_CPM_SCC3 is not set
+-# CONFIG_SERIAL_CPM_SCC4 is not set
+-# CONFIG_SERIAL_CPM_SMC1 is not set
+-# CONFIG_SERIAL_CPM_SMC2 is not set
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-CONFIG_GEN_RTC=y
+-# CONFIG_GEN_RTC_X is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_AGP is not set
+-# CONFIG_DRM is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=y
+-CONFIG_I2C_CHARDEV=y
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-CONFIG_I2C_MPC=y
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-# CONFIG_I2C_SENSOR is not set
+-
+-#
+-# Miscellaneous I2C Chip support
+-#
+-# CONFIG_SENSORS_DS1337 is not set
+-# CONFIG_SENSORS_DS1374 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCA9539 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_SENSORS_MAX6875 is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Hardware Monitoring support
+-#
+-CONFIG_HWMON=y
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ADM9240 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_ATXP1 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-# CONFIG_SENSORS_LM75 is not set
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-# CONFIG_SENSORS_W83627EHF is not set
+-# CONFIG_HWMON_DEBUG_CHIP is not set
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# SN Devices
+-#
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-
+-#
+-# XFS support
+-#
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-CONFIG_INOTIFY=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_SYSFS=y
+-# CONFIG_DEVPTS_FS_XATTR is not set
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_XATTR is not set
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-# CONFIG_NFS_V3 is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-# CONFIG_MSDOS_PARTITION is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-# CONFIG_SCC_ENET is not set
+-# CONFIG_FEC_ENET is not set
+-
+-#
+-# CPM2 Options
+-#
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-
+-#
+-# Profiling support
+-#
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_KGDB_CONSOLE is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/configs/stx_gp3_defconfig b/arch/ppc/configs/stx_gp3_defconfig
+deleted file mode 100644
+index 70d6f84..0000000
+--- a/arch/ppc/configs/stx_gp3_defconfig
++++ /dev/null
+@@ -1,989 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.12-rc4
+-# Tue May 24 18:11:04 2005
+-#
+-CONFIG_MMU=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_HAVE_DEC_LOCK=y
+-CONFIG_PPC=y
+-CONFIG_PPC32=y
+-CONFIG_GENERIC_NVRAM=y
+-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_CLEAN_COMPILE=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_POSIX_MQUEUE is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_AUDIT is not set
+-CONFIG_HOTPLUG=y
+-CONFIG_KOBJECT_UEVENT=y
+-# CONFIG_IKCONFIG is not set
+-CONFIG_EMBEDDED=y
+-CONFIG_KALLSYMS=y
+-# CONFIG_KALLSYMS_ALL is not set
+-# CONFIG_KALLSYMS_EXTRA_PASS is not set
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-CONFIG_EPOLL=y
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SHMEM=y
+-CONFIG_CC_ALIGN_FUNCTIONS=0
+-CONFIG_CC_ALIGN_LABELS=0
+-CONFIG_CC_ALIGN_LOOPS=0
+-CONFIG_CC_ALIGN_JUMPS=0
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-
+-#
+-# Loadable module support
+-#
+-CONFIG_MODULES=y
+-# CONFIG_MODULE_UNLOAD is not set
+-CONFIG_OBSOLETE_MODPARM=y
+-CONFIG_MODVERSIONS=y
+-# CONFIG_MODULE_SRCVERSION_ALL is not set
+-CONFIG_KMOD=y
+-
+-#
+-# Processor
+-#
+-# CONFIG_6xx is not set
+-# CONFIG_40x is not set
+-# CONFIG_44x is not set
+-# CONFIG_POWER3 is not set
+-# CONFIG_POWER4 is not set
+-# CONFIG_8xx is not set
+-CONFIG_E500=y
+-CONFIG_BOOKE=y
+-CONFIG_FSL_BOOKE=y
+-# CONFIG_PHYS_64BIT is not set
+-# CONFIG_SPE is not set
+-CONFIG_MATH_EMULATION=y
+-# CONFIG_CPU_FREQ is not set
+-# CONFIG_PM is not set
+-CONFIG_85xx=y
+-CONFIG_PPC_INDIRECT_PCI_BE=y
+-
+-#
+-# Freescale 85xx options
+-#
+-# CONFIG_MPC8540_ADS is not set
+-# CONFIG_MPC8555_CDS is not set
+-# CONFIG_MPC8560_ADS is not set
+-# CONFIG_SBC8560 is not set
+-CONFIG_STX_GP3=y
+-CONFIG_MPC8560=y
+-
+-#
+-# Platform options
+-#
+-CONFIG_CPM2=y
+-# CONFIG_PC_KEYBOARD is not set
+-# CONFIG_SMP is not set
+-# CONFIG_PREEMPT is not set
+-CONFIG_HIGHMEM=y
+-CONFIG_BINFMT_ELF=y
+-CONFIG_BINFMT_MISC=m
+-# CONFIG_CMDLINE_BOOL is not set
+-CONFIG_ISA_DMA_API=y
+-
+-#
+-# Bus options
+-#
+-CONFIG_PCI=y
+-CONFIG_PCI_DOMAINS=y
+-# CONFIG_PCI_LEGACY_PROC is not set
+-# CONFIG_PCI_NAMES is not set
+-# CONFIG_PCI_DEBUG is not set
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-CONFIG_RAPIDIO=y
+-CONFIG_RAPIDIO_8_BIT_TRANSPORT=y
+-CONFIG_RAPIDIO_DISC_TIMEOUT=30
+-
+-#
+-# Advanced setup
+-#
+-# CONFIG_ADVANCED_OPTIONS is not set
+-
+-#
+-# Default settings for advanced configuration options are used
+-#
+-CONFIG_HIGHMEM_START=0xfe000000
+-CONFIG_LOWMEM_SIZE=0x30000000
+-CONFIG_KERNEL_START=0xc0000000
+-CONFIG_TASK_SIZE=0x80000000
+-CONFIG_BOOT_LOAD=0x00800000
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-# CONFIG_DEBUG_DRIVER is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-# CONFIG_MTD is not set
+-
+-#
+-# Parallel port support
+-#
+-CONFIG_PARPORT=m
+-CONFIG_PARPORT_PC=m
+-# CONFIG_PARPORT_PC_FIFO is not set
+-# CONFIG_PARPORT_PC_SUPERIO is not set
+-# CONFIG_PARPORT_GSC is not set
+-# CONFIG_PARPORT_1284 is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_FD is not set
+-# CONFIG_PARIDE is not set
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=m
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-CONFIG_BLK_DEV_NBD=m
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=m
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=4096
+-CONFIG_INITRAMFS_SOURCE=""
+-# CONFIG_LBD is not set
+-# CONFIG_CDROM_PKTCDVD is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-CONFIG_IDE=y
+-CONFIG_BLK_DEV_IDE=y
+-
+-#
+-# Please see Documentation/ide.txt for help/info on IDE drives
+-#
+-# CONFIG_BLK_DEV_IDE_SATA is not set
+-CONFIG_BLK_DEV_IDEDISK=y
+-# CONFIG_IDEDISK_MULTI_MODE is not set
+-CONFIG_BLK_DEV_IDECD=m
+-# CONFIG_BLK_DEV_IDETAPE is not set
+-# CONFIG_BLK_DEV_IDEFLOPPY is not set
+-# CONFIG_BLK_DEV_IDESCSI is not set
+-# CONFIG_IDE_TASK_IOCTL is not set
+-
+-#
+-# IDE chipset support/bugfixes
+-#
+-CONFIG_IDE_GENERIC=y
+-# CONFIG_BLK_DEV_IDEPCI is not set
+-# CONFIG_IDE_ARM is not set
+-# CONFIG_BLK_DEV_IDEDMA is not set
+-# CONFIG_IDEDMA_AUTO is not set
+-# CONFIG_BLK_DEV_HD is not set
+-
+-#
+-# SCSI device support
+-#
+-CONFIG_SCSI=m
+-CONFIG_SCSI_PROC_FS=y
+-
+-#
+-# SCSI support type (disk, tape, CD-ROM)
+-#
+-CONFIG_BLK_DEV_SD=m
+-CONFIG_CHR_DEV_ST=m
+-# CONFIG_CHR_DEV_OSST is not set
+-CONFIG_BLK_DEV_SR=m
+-# CONFIG_BLK_DEV_SR_VENDOR is not set
+-CONFIG_CHR_DEV_SG=m
+-
+-#
+-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+-#
+-CONFIG_SCSI_MULTI_LUN=y
+-CONFIG_SCSI_CONSTANTS=y
+-# CONFIG_SCSI_LOGGING is not set
+-
+-#
+-# SCSI Transport Attributes
+-#
+-# CONFIG_SCSI_SPI_ATTRS is not set
+-# CONFIG_SCSI_FC_ATTRS is not set
+-# CONFIG_SCSI_ISCSI_ATTRS is not set
+-
+-#
+-# SCSI low-level drivers
+-#
+-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+-# CONFIG_SCSI_3W_9XXX is not set
+-# CONFIG_SCSI_ACARD is not set
+-# CONFIG_SCSI_AACRAID is not set
+-# CONFIG_SCSI_AIC7XXX is not set
+-# CONFIG_SCSI_AIC7XXX_OLD is not set
+-# CONFIG_SCSI_AIC79XX is not set
+-# CONFIG_SCSI_DPT_I2O is not set
+-# CONFIG_MEGARAID_NEWGEN is not set
+-# CONFIG_MEGARAID_LEGACY is not set
+-# CONFIG_SCSI_SATA is not set
+-# CONFIG_SCSI_BUSLOGIC is not set
+-# CONFIG_SCSI_DMX3191D is not set
+-# CONFIG_SCSI_EATA is not set
+-# CONFIG_SCSI_FUTURE_DOMAIN is not set
+-# CONFIG_SCSI_GDTH is not set
+-# CONFIG_SCSI_IPS is not set
+-# CONFIG_SCSI_INITIO is not set
+-# CONFIG_SCSI_INIA100 is not set
+-# CONFIG_SCSI_PPA is not set
+-# CONFIG_SCSI_IMM is not set
+-# CONFIG_SCSI_SYM53C8XX_2 is not set
+-# CONFIG_SCSI_IPR is not set
+-# CONFIG_SCSI_QLOGIC_FC is not set
+-# CONFIG_SCSI_QLOGIC_1280 is not set
+-CONFIG_SCSI_QLA2XXX=m
+-# CONFIG_SCSI_QLA21XX is not set
+-# CONFIG_SCSI_QLA22XX is not set
+-# CONFIG_SCSI_QLA2300 is not set
+-# CONFIG_SCSI_QLA2322 is not set
+-# CONFIG_SCSI_QLA6312 is not set
+-# CONFIG_SCSI_LPFC is not set
+-# CONFIG_SCSI_DC395x is not set
+-# CONFIG_SCSI_DC390T is not set
+-# CONFIG_SCSI_NSP32 is not set
+-# CONFIG_SCSI_DEBUG is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_IEEE1394 is not set
+-
+-#
+-# I2O device support
+-#
+-# CONFIG_I2O is not set
+-
+-#
+-# Macintosh device drivers
+-#
+-
+-#
+-# Networking support
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-# CONFIG_IP_MULTICAST is not set
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_PNP=y
+-# CONFIG_IP_PNP_DHCP is not set
+-CONFIG_IP_PNP_BOOTP=y
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_ARPD is not set
+-# CONFIG_SYN_COOKIES is not set
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_IP_TCPDIAG=y
+-# CONFIG_IP_TCPDIAG_IPV6 is not set
+-
+-#
+-# IP: Virtual Server Configuration
+-#
+-# CONFIG_IP_VS is not set
+-# CONFIG_IPV6 is not set
+-CONFIG_NETFILTER=y
+-# CONFIG_NETFILTER_DEBUG is not set
+-
+-#
+-# IP: Netfilter Configuration
+-#
+-CONFIG_IP_NF_CONNTRACK=m
+-# CONFIG_IP_NF_CT_ACCT is not set
+-# CONFIG_IP_NF_CONNTRACK_MARK is not set
+-# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+-CONFIG_IP_NF_FTP=m
+-CONFIG_IP_NF_IRC=m
+-# CONFIG_IP_NF_TFTP is not set
+-# CONFIG_IP_NF_AMANDA is not set
+-# CONFIG_IP_NF_QUEUE is not set
+-CONFIG_IP_NF_IPTABLES=m
+-# CONFIG_IP_NF_MATCH_LIMIT is not set
+-# CONFIG_IP_NF_MATCH_IPRANGE is not set
+-# CONFIG_IP_NF_MATCH_MAC is not set
+-# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+-# CONFIG_IP_NF_MATCH_MARK is not set
+-# CONFIG_IP_NF_MATCH_MULTIPORT is not set
+-# CONFIG_IP_NF_MATCH_TOS is not set
+-# CONFIG_IP_NF_MATCH_RECENT is not set
+-# CONFIG_IP_NF_MATCH_ECN is not set
+-# CONFIG_IP_NF_MATCH_DSCP is not set
+-# CONFIG_IP_NF_MATCH_AH_ESP is not set
+-# CONFIG_IP_NF_MATCH_LENGTH is not set
+-# CONFIG_IP_NF_MATCH_TTL is not set
+-# CONFIG_IP_NF_MATCH_TCPMSS is not set
+-# CONFIG_IP_NF_MATCH_HELPER is not set
+-# CONFIG_IP_NF_MATCH_STATE is not set
+-# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+-# CONFIG_IP_NF_MATCH_OWNER is not set
+-# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+-# CONFIG_IP_NF_MATCH_REALM is not set
+-# CONFIG_IP_NF_MATCH_SCTP is not set
+-# CONFIG_IP_NF_MATCH_COMMENT is not set
+-# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+-CONFIG_IP_NF_FILTER=m
+-# CONFIG_IP_NF_TARGET_REJECT is not set
+-# CONFIG_IP_NF_TARGET_LOG is not set
+-# CONFIG_IP_NF_TARGET_ULOG is not set
+-# CONFIG_IP_NF_TARGET_TCPMSS is not set
+-CONFIG_IP_NF_NAT=m
+-CONFIG_IP_NF_NAT_NEEDED=y
+-CONFIG_IP_NF_TARGET_MASQUERADE=m
+-CONFIG_IP_NF_TARGET_REDIRECT=m
+-# CONFIG_IP_NF_TARGET_NETMAP is not set
+-# CONFIG_IP_NF_TARGET_SAME is not set
+-CONFIG_IP_NF_NAT_SNMP_BASIC=m
+-CONFIG_IP_NF_NAT_IRC=m
+-CONFIG_IP_NF_NAT_FTP=m
+-# CONFIG_IP_NF_MANGLE is not set
+-# CONFIG_IP_NF_RAW is not set
+-# CONFIG_IP_NF_ARPTABLES is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_NET_DIVERT is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-# CONFIG_NET_CLS_ROUTE is not set
+-
+-#
+-# Network testing
+-#
+-CONFIG_NET_PKTGEN=y
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-# CONFIG_MII is not set
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-
+-#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-# CONFIG_NET_PCI is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_TIGON3 is not set
+-CONFIG_GIANFAR=y
+-CONFIG_GFAR_NAPI=y
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-
+-#
+-# Token Ring devices
+-#
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-CONFIG_RIONET=y
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PLIP is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_NET_FC is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-
+-#
+-# Userland interfaces
+-#
+-CONFIG_INPUT_MOUSEDEV=y
+-CONFIG_INPUT_MOUSEDEV_PSAUX=y
+-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1280
+-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1024
+-CONFIG_INPUT_JOYDEV=m
+-# CONFIG_INPUT_TSDEV is not set
+-CONFIG_INPUT_EVDEV=m
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-CONFIG_INPUT_KEYBOARD=y
+-CONFIG_KEYBOARD_ATKBD=y
+-# CONFIG_KEYBOARD_SUNKBD is not set
+-# CONFIG_KEYBOARD_LKKBD is not set
+-# CONFIG_KEYBOARD_XTKBD is not set
+-# CONFIG_KEYBOARD_NEWTON is not set
+-CONFIG_INPUT_MOUSE=y
+-CONFIG_MOUSE_PS2=y
+-# CONFIG_MOUSE_SERIAL is not set
+-# CONFIG_MOUSE_VSXXXAA is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-CONFIG_SERIO=y
+-CONFIG_SERIO_I8042=y
+-CONFIG_SERIO_SERPORT=y
+-# CONFIG_SERIO_PARKBD is not set
+-# CONFIG_SERIO_PCIPS2 is not set
+-CONFIG_SERIO_LIBPS2=y
+-# CONFIG_SERIO_RAW is not set
+-# CONFIG_GAMEPORT is not set
+-CONFIG_SOUND_GAMEPORT=y
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-# CONFIG_SERIAL_8250 is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_SERIAL_CPM=y
+-CONFIG_SERIAL_CPM_CONSOLE=y
+-# CONFIG_SERIAL_CPM_SCC1 is not set
+-CONFIG_SERIAL_CPM_SCC2=y
+-# CONFIG_SERIAL_CPM_SCC3 is not set
+-# CONFIG_SERIAL_CPM_SCC4 is not set
+-# CONFIG_SERIAL_CPM_SMC1 is not set
+-# CONFIG_SERIAL_CPM_SMC2 is not set
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-CONFIG_PRINTER=m
+-# CONFIG_LP_CONSOLE is not set
+-# CONFIG_PPDEV is not set
+-# CONFIG_TIPAR is not set
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-# CONFIG_NVRAM is not set
+-# CONFIG_GEN_RTC is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-CONFIG_AGP=m
+-CONFIG_DRM=m
+-# CONFIG_DRM_TDFX is not set
+-# CONFIG_DRM_R128 is not set
+-# CONFIG_DRM_RADEON is not set
+-# CONFIG_DRM_MGA is not set
+-# CONFIG_DRM_SIS is not set
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-
+-#
+-# I2C support
+-#
+-CONFIG_I2C=m
+-CONFIG_I2C_CHARDEV=m
+-
+-#
+-# I2C Algorithms
+-#
+-CONFIG_I2C_ALGOBIT=m
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-# CONFIG_I2C_MPC is not set
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_PARPORT is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_SCx200_ACB is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_STUB is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-# CONFIG_I2C_PCA_ISA is not set
+-
+-#
+-# Hardware Sensors Chip support
+-#
+-# CONFIG_I2C_SENSOR is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ASB100 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_FSCHER is not set
+-# CONFIG_SENSORS_FSCPOS is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-# CONFIG_SENSORS_LM75 is not set
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-
+-#
+-# Other I2C Chip support
+-#
+-# CONFIG_SENSORS_DS1337 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_RTC8564 is not set
+-# CONFIG_SENSORS_M41T00 is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-# CONFIG_W1 is not set
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_FB is not set
+-
+-#
+-# Sound
+-#
+-CONFIG_SOUND=m
+-
+-#
+-# Advanced Linux Sound Architecture
+-#
+-# CONFIG_SND is not set
+-
+-#
+-# Open Sound System
+-#
+-# CONFIG_SOUND_PRIME is not set
+-
+-#
+-# USB support
+-#
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# InfiniBand support
+-#
+-# CONFIG_INFINIBAND is not set
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-CONFIG_JBD=y
+-CONFIG_JBD_DEBUG=y
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-
+-#
+-# XFS support
+-#
+-# CONFIG_XFS_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-CONFIG_AUTOFS_FS=m
+-CONFIG_AUTOFS4_FS=y
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-CONFIG_ISO9660_FS=m
+-# CONFIG_JOLIET is not set
+-# CONFIG_ZISOFS is not set
+-CONFIG_UDF_FS=m
+-CONFIG_UDF_NLS=y
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-CONFIG_FAT_FS=m
+-CONFIG_MSDOS_FS=m
+-CONFIG_VFAT_FS=m
+-CONFIG_FAT_DEFAULT_CODEPAGE=437
+-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-# CONFIG_PROC_KCORE is not set
+-CONFIG_SYSFS=y
+-CONFIG_DEVFS_FS=y
+-# CONFIG_DEVFS_MOUNT is not set
+-# CONFIG_DEVFS_DEBUG is not set
+-# CONFIG_DEVPTS_FS_XATTR is not set
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_XATTR is not set
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-CONFIG_CRAMFS=m
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-CONFIG_NFS_V3=y
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_LOCKD_V4=y
+-CONFIG_SUNRPC=y
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-CONFIG_SMB_FS=m
+-# CONFIG_SMB_NLS_DEFAULT is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-# CONFIG_PARTITION_ADVANCED is not set
+-CONFIG_MSDOS_PARTITION=y
+-
+-#
+-# Native Language Support
+-#
+-CONFIG_NLS=y
+-CONFIG_NLS_DEFAULT="iso8859-1"
+-# CONFIG_NLS_CODEPAGE_437 is not set
+-# CONFIG_NLS_CODEPAGE_737 is not set
+-# CONFIG_NLS_CODEPAGE_775 is not set
+-# CONFIG_NLS_CODEPAGE_850 is not set
+-# CONFIG_NLS_CODEPAGE_852 is not set
+-# CONFIG_NLS_CODEPAGE_855 is not set
+-# CONFIG_NLS_CODEPAGE_857 is not set
+-# CONFIG_NLS_CODEPAGE_860 is not set
+-# CONFIG_NLS_CODEPAGE_861 is not set
+-# CONFIG_NLS_CODEPAGE_862 is not set
+-# CONFIG_NLS_CODEPAGE_863 is not set
+-# CONFIG_NLS_CODEPAGE_864 is not set
+-# CONFIG_NLS_CODEPAGE_865 is not set
+-# CONFIG_NLS_CODEPAGE_866 is not set
+-# CONFIG_NLS_CODEPAGE_869 is not set
+-# CONFIG_NLS_CODEPAGE_936 is not set
+-# CONFIG_NLS_CODEPAGE_950 is not set
+-# CONFIG_NLS_CODEPAGE_932 is not set
+-# CONFIG_NLS_CODEPAGE_949 is not set
+-# CONFIG_NLS_CODEPAGE_874 is not set
+-# CONFIG_NLS_ISO8859_8 is not set
+-# CONFIG_NLS_CODEPAGE_1250 is not set
+-# CONFIG_NLS_CODEPAGE_1251 is not set
+-# CONFIG_NLS_ASCII is not set
+-# CONFIG_NLS_ISO8859_1 is not set
+-# CONFIG_NLS_ISO8859_2 is not set
+-# CONFIG_NLS_ISO8859_3 is not set
+-# CONFIG_NLS_ISO8859_4 is not set
+-# CONFIG_NLS_ISO8859_5 is not set
+-# CONFIG_NLS_ISO8859_6 is not set
+-# CONFIG_NLS_ISO8859_7 is not set
+-# CONFIG_NLS_ISO8859_9 is not set
+-# CONFIG_NLS_ISO8859_13 is not set
+-# CONFIG_NLS_ISO8859_14 is not set
+-# CONFIG_NLS_ISO8859_15 is not set
+-# CONFIG_NLS_KOI8_R is not set
+-# CONFIG_NLS_KOI8_U is not set
+-# CONFIG_NLS_UTF8 is not set
+-# CONFIG_SCC_ENET is not set
+-# CONFIG_FEC_ENET is not set
+-
+-#
+-# CPM2 Options
+-#
+-
+-#
+-# Library routines
+-#
+-CONFIG_CRC_CCITT=y
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_ZLIB_INFLATE=m
+-
+-#
+-# Profiling support
+-#
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-CONFIG_DEBUG_KERNEL=y
+-# CONFIG_MAGIC_SYSRQ is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_SCHEDSTATS is not set
+-# CONFIG_DEBUG_SLAB is not set
+-# CONFIG_DEBUG_SPINLOCK is not set
+-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+-# CONFIG_DEBUG_KOBJECT is not set
+-# CONFIG_DEBUG_HIGHMEM is not set
+-# CONFIG_DEBUG_INFO is not set
+-# CONFIG_DEBUG_FS is not set
+-# CONFIG_KGDB_CONSOLE is not set
+-# CONFIG_XMON is not set
+-CONFIG_BDI_SWITCH=y
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Hardware crypto devices
+-#
+diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
+index 6b4f022..7b73905 100644
+--- a/arch/ppc/kernel/Makefile
++++ b/arch/ppc/kernel/Makefile
+@@ -4,7 +4,6 @@
+ extra-$(CONFIG_PPC_STD_MMU)	:= head.o
+ extra-$(CONFIG_40x)		:= head_4xx.o
+ extra-$(CONFIG_44x)		:= head_44x.o
+-extra-$(CONFIG_FSL_BOOKE)	:= head_fsl_booke.o
+ extra-$(CONFIG_8xx)		:= head_8xx.o
+ extra-y				+= vmlinux.lds
+ 
+@@ -13,7 +12,6 @@ obj-y				:= entry.o traps.o time.o misc.o \
+ 					ppc_htab.o
+ obj-$(CONFIG_MODULES)		+= ppc_ksyms.o
+ obj-$(CONFIG_PCI)		+= pci.o
+-obj-$(CONFIG_RAPIDIO)		+= rio.o
+ obj-$(CONFIG_KGDB)		+= ppc-stub.o
+ obj-$(CONFIG_SMP)		+= smp.o smp-tbsync.o
+ obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
+diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
+index e8e9432..a51a177 100644
+--- a/arch/ppc/kernel/asm-offsets.c
++++ b/arch/ppc/kernel/asm-offsets.c
+@@ -54,12 +54,6 @@ main(void)
+ 	DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
+ 	DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
+ #endif /* CONFIG_ALTIVEC */
+-#ifdef CONFIG_SPE
+-	DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
+-	DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
+-	DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
+-	DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
+-#endif /* CONFIG_SPE */
+ 	/* Interrupt register frame */
+ 	DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
+ 	DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
+index 59e77eb..5f3a5d0 100644
+--- a/arch/ppc/kernel/entry.S
++++ b/arch/ppc/kernel/entry.S
+@@ -519,12 +519,7 @@ BEGIN_FTR_SECTION
+ 	stw	r12,THREAD+THREAD_VRSAVE(r2)
+ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ #endif /* CONFIG_ALTIVEC */
+-#ifdef CONFIG_SPE
+-	oris	r0,r0,MSR_SPE at h	 /* Disable SPE */
+-	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
+-	stw	r12,THREAD+THREAD_SPEFSCR(r2)
+-#endif /* CONFIG_SPE */
+-	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
++	and.	r0,r0,r11	/* FP or altivec enabled? */
+ 	beq+	1f
+ 	andc	r11,r11,r0
+ 	MTMSRD(r11)
+@@ -557,11 +552,6 @@ BEGIN_FTR_SECTION
+ 	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
+ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ #endif /* CONFIG_ALTIVEC */
+-#ifdef CONFIG_SPE
+-	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
+-	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
+-#endif /* CONFIG_SPE */
+-
+ 	lwz	r0,_CCR(r1)
+ 	mtcrf	0xFF,r0
+ 	/* r3-r12 are destroyed -- Cort */
+diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
+index 75bbc93..ebb5a40 100644
+--- a/arch/ppc/kernel/head_44x.S
++++ b/arch/ppc/kernel/head_44x.S
+@@ -195,7 +195,7 @@ skpinv:	addi	r4,r4,1				/* Increment */
+ 	li	r5,0
+ 	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
+ 
+-        li      r0,0                    /* TLB slot 0 */
++	li	r0,62			/* TLB slot 62 */
+ 
+ 	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
+ 	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
+diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
+index f3d274c..166d597 100644
+--- a/arch/ppc/kernel/head_booke.h
++++ b/arch/ppc/kernel/head_booke.h
+@@ -212,60 +212,6 @@ label:
+  * save (and later restore) the MSR via SPRN_CSRR1, which will still have
+  * the MSR_DE bit set.
+  */
+-#ifdef CONFIG_E200
+-#define DEBUG_EXCEPTION							      \
+-	START_EXCEPTION(Debug);						      \
+-	DEBUG_EXCEPTION_PROLOG;						      \
+-									      \
+-	/*								      \
+-	 * If there is a single step or branch-taken exception in an	      \
+-	 * exception entry sequence, it was probably meant to apply to	      \
+-	 * the code where the exception occurred (since exception entry	      \
+-	 * doesn't turn off DE automatically).  We simulate the effect	      \
+-	 * of turning off DE on entry to an exception handler by turning      \
+-	 * off DE in the CSRR1 value and clearing the debug status.	      \
+-	 */								      \
+-	mfspr	r10,SPRN_DBSR;		/* check single-step/branch taken */  \
+-	andis.	r10,r10,DBSR_IC at h;					      \
+-	beq+	2f;							      \
+-									      \
+-	lis	r10,KERNELBASE at h;	/* check if exception in vectors */   \
+-	ori	r10,r10,KERNELBASE at l;					      \
+-	cmplw	r12,r10;						      \
+-	blt+	2f;			/* addr below exception vectors */    \
+-									      \
+-	lis	r10,Debug at h;						      \
+-	ori	r10,r10,Debug at l;					      \
+-	cmplw	r12,r10;						      \
+-	bgt+	2f;			/* addr above exception vectors */    \
+-									      \
+-	/* here it looks like we got an inappropriate debug exception. */     \
+-1:	rlwinm	r9,r9,0,~MSR_DE;	/* clear DE in the CDRR1 value */     \
+-	lis	r10,DBSR_IC at h;		/* clear the IC event */	      \
+-	mtspr	SPRN_DBSR,r10;						      \
+-	/* restore state and get out */					      \
+-	lwz	r10,_CCR(r11);						      \
+-	lwz	r0,GPR0(r11);						      \
+-	lwz	r1,GPR1(r11);						      \
+-	mtcrf	0x80,r10;						      \
+-	mtspr	SPRN_DSRR0,r12;						      \
+-	mtspr	SPRN_DSRR1,r9;						      \
+-	lwz	r9,GPR9(r11);						      \
+-	lwz	r12,GPR12(r11);						      \
+-	mtspr	DEBUG_SPRG,r8;						      \
+-	BOOKE_LOAD_EXC_LEVEL_STACK(DEBUG); /* r8 points to the debug stack */ \
+-	lwz	r10,GPR10-INT_FRAME_SIZE(r8);				      \
+-	lwz	r11,GPR11-INT_FRAME_SIZE(r8);				      \
+-	mfspr	r8,DEBUG_SPRG;						      \
+-									      \
+-	RFDI;								      \
+-	b	.;							      \
+-									      \
+-	/* continue normal handling for a critical exception... */	      \
+-2:	mfspr	r4,SPRN_DBSR;						      \
+-	addi	r3,r1,STACK_FRAME_OVERHEAD;				      \
+-	EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, debug_transfer_to_handler, ret_from_debug_exc)
+-#else
+ #define DEBUG_EXCEPTION							      \
+ 	START_EXCEPTION(Debug);						      \
+ 	CRITICAL_EXCEPTION_PROLOG;					      \
+@@ -318,7 +264,6 @@ label:
+ 2:	mfspr	r4,SPRN_DBSR;						      \
+ 	addi	r3,r1,STACK_FRAME_OVERHEAD;				      \
+ 	EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
+-#endif
+ 
+ #define INSTRUCTION_STORAGE_EXCEPTION					      \
+ 	START_EXCEPTION(InstructionStorage)				      \
+diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S
+deleted file mode 100644
+index 1f155d3..0000000
+--- a/arch/ppc/kernel/head_fsl_booke.S
++++ /dev/null
+@@ -1,1065 +0,0 @@
+-/*
+- * Kernel execution entry point code.
+- *
+- *    Copyright (c) 1995-1996 Gary Thomas <gdt at linuxppc.org>
+- *      Initial PowerPC version.
+- *    Copyright (c) 1996 Cort Dougan <cort at cs.nmt.edu>
+- *      Rewritten for PReP
+- *    Copyright (c) 1996 Paul Mackerras <paulus at cs.anu.edu.au>
+- *      Low-level exception handers, MMU support, and rewrite.
+- *    Copyright (c) 1997 Dan Malek <dmalek at jlc.net>
+- *      PowerPC 8xx modifications.
+- *    Copyright (c) 1998-1999 TiVo, Inc.
+- *      PowerPC 403GCX modifications.
+- *    Copyright (c) 1999 Grant Erickson <grant at lcse.umn.edu>
+- *      PowerPC 403GCX/405GP modifications.
+- *    Copyright 2000 MontaVista Software Inc.
+- *	PPC405 modifications
+- *      PowerPC 403GCX/405GP modifications.
+- * 	Author: MontaVista Software, Inc.
+- *         	frank_rowand at mvista.com or source at mvista.com
+- * 	   	debbie_chu at mvista.com
+- *    Copyright 2002-2004 MontaVista Software, Inc.
+- *      PowerPC 44x support, Matt Porter <mporter at kernel.crashing.org>
+- *    Copyright 2004 Freescale Semiconductor, Inc
+- *      PowerPC e500 modifications, Kumar Gala <galak at kernel.crashing.org>
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/threads.h>
+-#include <asm/processor.h>
+-#include <asm/page.h>
+-#include <asm/mmu.h>
+-#include <asm/pgtable.h>
+-#include <asm/cputable.h>
+-#include <asm/thread_info.h>
+-#include <asm/ppc_asm.h>
+-#include <asm/asm-offsets.h>
+-#include "head_booke.h"
+-
+-/* As with the other PowerPC ports, it is expected that when code
+- * execution begins here, the following registers contain valid, yet
+- * optional, information:
+- *
+- *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
+- *   r4 - Starting address of the init RAM disk
+- *   r5 - Ending address of the init RAM disk
+- *   r6 - Start of kernel command line string (e.g. "mem=128")
+- *   r7 - End of kernel command line string
+- *
+- */
+-	.text
+-_GLOBAL(_stext)
+-_GLOBAL(_start)
+-	/*
+-	 * Reserve a word at a fixed location to store the address
+-	 * of abatron_pteptrs
+-	 */
+-	nop
+-/*
+- * Save parameters we are passed
+- */
+-	mr	r31,r3
+-	mr	r30,r4
+-	mr	r29,r5
+-	mr	r28,r6
+-	mr	r27,r7
+-	li	r24,0		/* CPU number */
+-
+-/* We try to not make any assumptions about how the boot loader
+- * setup or used the TLBs.  We invalidate all mappings from the
+- * boot loader and load a single entry in TLB1[0] to map the
+- * first 16M of kernel memory.  Any boot info passed from the
+- * bootloader needs to live in this first 16M.
+- *
+- * Requirement on bootloader:
+- *  - The page we're executing in needs to reside in TLB1 and
+- *    have IPROT=1.  If not an invalidate broadcast could
+- *    evict the entry we're currently executing in.
+- *
+- *  r3 = Index of TLB1 were executing in
+- *  r4 = Current MSR[IS]
+- *  r5 = Index of TLB1 temp mapping
+- *
+- * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
+- * if needed
+- */
+-
+-/* 1. Find the index of the entry we're executing in */
+-	bl	invstr				/* Find our address */
+-invstr:	mflr	r6				/* Make it accessible */
+-	mfmsr	r7
+-	rlwinm	r4,r7,27,31,31			/* extract MSR[IS] */
+-	mfspr	r7, SPRN_PID0
+-	slwi	r7,r7,16
+-	or	r7,r7,r4
+-	mtspr	SPRN_MAS6,r7
+-	tlbsx	0,r6				/* search MSR[IS], SPID=PID0 */
+-#ifndef CONFIG_E200
+-	mfspr	r7,SPRN_MAS1
+-	andis.	r7,r7,MAS1_VALID at h
+-	bne	match_TLB
+-	mfspr	r7,SPRN_PID1
+-	slwi	r7,r7,16
+-	or	r7,r7,r4
+-	mtspr	SPRN_MAS6,r7
+-	tlbsx	0,r6				/* search MSR[IS], SPID=PID1 */
+-	mfspr	r7,SPRN_MAS1
+-	andis.	r7,r7,MAS1_VALID at h
+-	bne	match_TLB
+-	mfspr	r7, SPRN_PID2
+-	slwi	r7,r7,16
+-	or	r7,r7,r4
+-	mtspr	SPRN_MAS6,r7
+-	tlbsx	0,r6				/* Fall through, we had to match */
+-#endif
+-match_TLB:
+-	mfspr	r7,SPRN_MAS0
+-	rlwinm	r3,r7,16,20,31			/* Extract MAS0(Entry) */
+-
+-	mfspr	r7,SPRN_MAS1			/* Insure IPROT set */
+-	oris	r7,r7,MAS1_IPROT at h
+-	mtspr	SPRN_MAS1,r7
+-	tlbwe
+-
+-/* 2. Invalidate all entries except the entry we're executing in */
+-	mfspr	r9,SPRN_TLB1CFG
+-	andi.	r9,r9,0xfff
+-	li	r6,0				/* Set Entry counter to 0 */
+-1:	lis	r7,0x1000			/* Set MAS0(TLBSEL) = 1 */
+-	rlwimi	r7,r6,16,4,15			/* Setup MAS0 = TLBSEL | ESEL(r6) */
+-	mtspr	SPRN_MAS0,r7
+-	tlbre
+-	mfspr	r7,SPRN_MAS1
+-	rlwinm	r7,r7,0,2,31			/* Clear MAS1 Valid and IPROT */
+-	cmpw	r3,r6
+-	beq	skpinv				/* Dont update the current execution TLB */
+-	mtspr	SPRN_MAS1,r7
+-	tlbwe
+-	isync
+-skpinv:	addi	r6,r6,1				/* Increment */
+-	cmpw	r6,r9				/* Are we done? */
+-	bne	1b				/* If not, repeat */
+-
+-	/* Invalidate TLB0 */
+-	li      r6,0x04
+-	tlbivax 0,r6
+-#ifdef CONFIG_SMP
+-	tlbsync
+-#endif
+-	/* Invalidate TLB1 */
+-	li      r6,0x0c
+-	tlbivax 0,r6
+-#ifdef CONFIG_SMP
+-	tlbsync
+-#endif
+-	msync
+-
+-/* 3. Setup a temp mapping and jump to it */
+-	andi.	r5, r3, 0x1	/* Find an entry not used and is non-zero */
+-	addi	r5, r5, 0x1
+-	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+-	rlwimi	r7,r3,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r3) */
+-	mtspr	SPRN_MAS0,r7
+-	tlbre
+-
+-	/* Just modify the entry ID and EPN for the temp mapping */
+-	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+-	rlwimi	r7,r5,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r5) */
+-	mtspr	SPRN_MAS0,r7
+-	xori	r6,r4,1		/* Setup TMP mapping in the other Address space */
+-	slwi	r6,r6,12
+-	oris	r6,r6,(MAS1_VALID|MAS1_IPROT)@h
+-	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
+-	mtspr	SPRN_MAS1,r6
+-	mfspr	r6,SPRN_MAS2
+-	li	r7,0		/* temp EPN = 0 */
+-	rlwimi	r7,r6,0,20,31
+-	mtspr	SPRN_MAS2,r7
+-	tlbwe
+-
+-	xori	r6,r4,1
+-	slwi	r6,r6,5		/* setup new context with other address space */
+-	bl	1f		/* Find our address */
+-1:	mflr	r9
+-	rlwimi	r7,r9,0,20,31
+-	addi	r7,r7,24
+-	mtspr	SPRN_SRR0,r7
+-	mtspr	SPRN_SRR1,r6
+-	rfi
+-
+-/* 4. Clear out PIDs & Search info */
+-	li	r6,0
+-	mtspr	SPRN_PID0,r6
+-#ifndef CONFIG_E200
+-	mtspr	SPRN_PID1,r6
+-	mtspr	SPRN_PID2,r6
+-#endif
+-	mtspr	SPRN_MAS6,r6
+-
+-/* 5. Invalidate mapping we started in */
+-	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+-	rlwimi	r7,r3,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r3) */
+-	mtspr	SPRN_MAS0,r7
+-	tlbre
+-	mfspr	r6,SPRN_MAS1
+-	rlwinm	r6,r6,0,2,0	/* clear IPROT */
+-	mtspr	SPRN_MAS1,r6
+-	tlbwe
+-	/* Invalidate TLB1 */
+-	li      r9,0x0c
+-	tlbivax 0,r9
+-#ifdef CONFIG_SMP
+-	tlbsync
+-#endif
+-	msync
+-
+-/* 6. Setup KERNELBASE mapping in TLB1[0] */
+-	lis	r6,0x1000		/* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
+-	mtspr	SPRN_MAS0,r6
+-	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
+-	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
+-	mtspr	SPRN_MAS1,r6
+-	li	r7,0
+-	lis	r6,KERNELBASE at h
+-	ori	r6,r6,KERNELBASE at l
+-	rlwimi	r6,r7,0,20,31
+-	mtspr	SPRN_MAS2,r6
+-	li	r7,(MAS3_SX|MAS3_SW|MAS3_SR)
+-	mtspr	SPRN_MAS3,r7
+-	tlbwe
+-
+-/* 7. Jump to KERNELBASE mapping */
+-	lis	r7,MSR_KERNEL at h
+-	ori	r7,r7,MSR_KERNEL at l
+-	bl	1f			/* Find our address */
+-1:	mflr	r9
+-	rlwimi	r6,r9,0,20,31
+-	addi	r6,r6,24
+-	mtspr	SPRN_SRR0,r6
+-	mtspr	SPRN_SRR1,r7
+-	rfi				/* start execution out of TLB1[0] entry */
+-
+-/* 8. Clear out the temp mapping */
+-	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+-	rlwimi	r7,r5,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r5) */
+-	mtspr	SPRN_MAS0,r7
+-	tlbre
+-	mfspr	r8,SPRN_MAS1
+-	rlwinm	r8,r8,0,2,0	/* clear IPROT */
+-	mtspr	SPRN_MAS1,r8
+-	tlbwe
+-	/* Invalidate TLB1 */
+-	li      r9,0x0c
+-	tlbivax 0,r9
+-#ifdef CONFIG_SMP
+-	tlbsync
+-#endif
+-	msync
+-
+-	/* Establish the interrupt vector offsets */
+-	SET_IVOR(0,  CriticalInput);
+-	SET_IVOR(1,  MachineCheck);
+-	SET_IVOR(2,  DataStorage);
+-	SET_IVOR(3,  InstructionStorage);
+-	SET_IVOR(4,  ExternalInput);
+-	SET_IVOR(5,  Alignment);
+-	SET_IVOR(6,  Program);
+-	SET_IVOR(7,  FloatingPointUnavailable);
+-	SET_IVOR(8,  SystemCall);
+-	SET_IVOR(9,  AuxillaryProcessorUnavailable);
+-	SET_IVOR(10, Decrementer);
+-	SET_IVOR(11, FixedIntervalTimer);
+-	SET_IVOR(12, WatchdogTimer);
+-	SET_IVOR(13, DataTLBError);
+-	SET_IVOR(14, InstructionTLBError);
+-	SET_IVOR(15, Debug);
+-	SET_IVOR(32, SPEUnavailable);
+-	SET_IVOR(33, SPEFloatingPointData);
+-	SET_IVOR(34, SPEFloatingPointRound);
+-#ifndef CONFIG_E200
+-	SET_IVOR(35, PerformanceMonitor);
+-#endif
+-
+-	/* Establish the interrupt vector base */
+-	lis	r4,interrupt_base at h	/* IVPR only uses the high 16-bits */
+-	mtspr	SPRN_IVPR,r4
+-
+-	/* Setup the defaults for TLB entries */
+-	li	r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
+-#ifdef CONFIG_E200
+-	oris	r2,r2,MAS4_TLBSELD(1)@h
+-#endif
+-   	mtspr	SPRN_MAS4, r2
+-
+-#if 0
+-	/* Enable DOZE */
+-	mfspr	r2,SPRN_HID0
+-	oris	r2,r2,HID0_DOZE at h
+-	mtspr	SPRN_HID0, r2
+-#endif
+-#ifdef CONFIG_E200
+-	/* enable dedicated debug exception handling resources (Debug APU) */
+-	mfspr	r2,SPRN_HID0
+-	ori 	r2,r2,HID0_DAPUEN at l
+-	mtspr	SPRN_HID0,r2
+-#endif
+-
+-#if !defined(CONFIG_BDI_SWITCH)
+-	/*
+-	 * The Abatron BDI JTAG debugger does not tolerate others
+-	 * mucking with the debug registers.
+-	 */
+-	lis	r2,DBCR0_IDM at h
+-	mtspr	SPRN_DBCR0,r2
+-	isync
+-	/* clear any residual debug events */
+-	li	r2,-1
+-	mtspr	SPRN_DBSR,r2
+-#endif
+-
+-	/*
+-	 * This is where the main kernel code starts.
+-	 */
+-
+-	/* ptr to current */
+-	lis	r2,init_task at h
+-	ori	r2,r2,init_task at l
+-
+-	/* ptr to current thread */
+-	addi	r4,r2,THREAD	/* init task's THREAD */
+-	mtspr	SPRN_SPRG3,r4
+-
+-	/* stack */
+-	lis	r1,init_thread_union at h
+-	ori	r1,r1,init_thread_union at l
+-	li	r0,0
+-	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+-
+-	bl	early_init
+-
+-	mfspr	r3,SPRN_TLB1CFG
+-	andi.	r3,r3,0xfff
+-	lis	r4,num_tlbcam_entries at ha
+-	stw	r3,num_tlbcam_entries at l(r4)
+-/*
+- * Decide what sort of machine this is and initialize the MMU.
+- */
+-	mr	r3,r31
+-	mr	r4,r30
+-	mr	r5,r29
+-	mr	r6,r28
+-	mr	r7,r27
+-	bl	machine_init
+-	bl	MMU_init
+-
+-	/* Setup PTE pointers for the Abatron bdiGDB */
+-	lis	r6, swapper_pg_dir at h
+-	ori	r6, r6, swapper_pg_dir at l
+-	lis	r5, abatron_pteptrs at h
+-	ori	r5, r5, abatron_pteptrs at l
+-	lis	r4, KERNELBASE at h
+-	ori	r4, r4, KERNELBASE at l
+-	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
+-	stw	r6, 0(r5)
+-
+-	/* Let's move on */
+-	lis	r4,start_kernel at h
+-	ori	r4,r4,start_kernel at l
+-	lis	r3,MSR_KERNEL at h
+-	ori	r3,r3,MSR_KERNEL at l
+-	mtspr	SPRN_SRR0,r4
+-	mtspr	SPRN_SRR1,r3
+-	rfi			/* change context and jump to start_kernel */
+-
+-/* Macros to hide the PTE size differences
+- *
+- * FIND_PTE -- walks the page tables given EA & pgdir pointer
+- *   r10 -- EA of fault
+- *   r11 -- PGDIR pointer
+- *   r12 -- free
+- *   label 2: is the bailout case
+- *
+- * if we find the pte (fall through):
+- *   r11 is low pte word
+- *   r12 is pointer to the pte
+- */
+-#ifdef CONFIG_PTE_64BIT
+-#define PTE_FLAGS_OFFSET	4
+-#define FIND_PTE	\
+-	rlwinm 	r12, r10, 13, 19, 29;	/* Compute pgdir/pmd offset */	\
+-	lwzx	r11, r12, r11;		/* Get pgd/pmd entry */		\
+-	rlwinm.	r12, r11, 0, 0, 20;	/* Extract pt base address */	\
+-	beq	2f;			/* Bail if no table */		\
+-	rlwimi	r12, r10, 23, 20, 28;	/* Compute pte address */	\
+-	lwz	r11, 4(r12);		/* Get pte entry */
+-#else
+-#define PTE_FLAGS_OFFSET	0
+-#define FIND_PTE	\
+-	rlwimi	r11, r10, 12, 20, 29;	/* Create L1 (pgdir/pmd) address */	\
+-	lwz	r11, 0(r11);		/* Get L1 entry */			\
+-	rlwinm.	r12, r11, 0, 0, 19;	/* Extract L2 (pte) base address */	\
+-	beq	2f;			/* Bail if no table */			\
+-	rlwimi	r12, r10, 22, 20, 29;	/* Compute PTE address */		\
+-	lwz	r11, 0(r12);		/* Get Linux PTE */
+-#endif
+-
+-/*
+- * Interrupt vector entry code
+- *
+- * The Book E MMUs are always on so we don't need to handle
+- * interrupts in real mode as with previous PPC processors. In
+- * this case we handle interrupts in the kernel virtual address
+- * space.
+- *
+- * Interrupt vectors are dynamically placed relative to the
+- * interrupt prefix as determined by the address of interrupt_base.
+- * The interrupt vectors offsets are programmed using the labels
+- * for each interrupt vector entry.
+- *
+- * Interrupt vectors must be aligned on a 16 byte boundary.
+- * We align on a 32 byte cache line boundary for good measure.
+- */
+-
+-interrupt_base:
+-	/* Critical Input Interrupt */
+-	CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+-
+-	/* Machine Check Interrupt */
+-#ifdef CONFIG_E200
+-	/* no RFMCI, MCSRRs on E200 */
+-	CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+-#else
+-	MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+-#endif
+-
+-	/* Data Storage Interrupt */
+-	START_EXCEPTION(DataStorage)
+-	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+-	mtspr	SPRN_SPRG1, r11
+-	mtspr	SPRN_SPRG4W, r12
+-	mtspr	SPRN_SPRG5W, r13
+-	mfcr	r11
+-	mtspr	SPRN_SPRG7W, r11
+-
+-	/*
+-	 * Check if it was a store fault, if not then bail
+-	 * because a user tried to access a kernel or
+-	 * read-protected page.  Otherwise, get the
+-	 * offending address and handle it.
+-	 */
+-	mfspr	r10, SPRN_ESR
+-	andis.	r10, r10, ESR_ST at h
+-	beq	2f
+-
+-	mfspr	r10, SPRN_DEAR		/* Get faulting address */
+-
+-	/* If we are faulting a kernel address, we have to use the
+-	 * kernel page tables.
+-	 */
+-	lis	r11, TASK_SIZE at h
+-	ori	r11, r11, TASK_SIZE at l
+-	cmplw	0, r10, r11
+-	bge	2f
+-
+-	/* Get the PGD for the current thread */
+-3:
+-	mfspr	r11,SPRN_SPRG3
+-	lwz	r11,PGDIR(r11)
+-4:
+-	FIND_PTE
+-
+-	/* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
+-	andi.	r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
+-	cmpwi	0, r13, _PAGE_RW|_PAGE_USER
+-	bne	2f			/* Bail if not */
+-
+-	/* Update 'changed'. */
+-	ori	r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+-	stw	r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
+-
+-	/* MAS2 not updated as the entry does exist in the tlb, this
+-	   fault taken to detect state transition (eg: COW -> DIRTY)
+-	 */
+-	andi.	r11, r11, _PAGE_HWEXEC
+-	rlwimi	r11, r11, 31, 27, 27	/* SX <- _PAGE_HWEXEC */
+-	ori     r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
+-
+-	/* update search PID in MAS6, AS = 0 */
+-	mfspr	r12, SPRN_PID0
+-	slwi	r12, r12, 16
+-	mtspr	SPRN_MAS6, r12
+-
+-	/* find the TLB index that caused the fault.  It has to be here. */
+-	tlbsx	0, r10
+-
+-	/* only update the perm bits, assume the RPN is fine */
+-	mfspr	r12, SPRN_MAS3
+-	rlwimi	r12, r11, 0, 20, 31
+-	mtspr	SPRN_MAS3,r12
+-	tlbwe
+-
+-	/* Done...restore registers and get out of here.  */
+-	mfspr	r11, SPRN_SPRG7R
+-	mtcr	r11
+-	mfspr	r13, SPRN_SPRG5R
+-	mfspr	r12, SPRN_SPRG4R
+-	mfspr	r11, SPRN_SPRG1
+-	mfspr	r10, SPRN_SPRG0
+-	rfi			/* Force context change */
+-
+-2:
+-	/*
+-	 * The bailout.  Restore registers to pre-exception conditions
+-	 * and call the heavyweights to help us out.
+-	 */
+-	mfspr	r11, SPRN_SPRG7R
+-	mtcr	r11
+-	mfspr	r13, SPRN_SPRG5R
+-	mfspr	r12, SPRN_SPRG4R
+-	mfspr	r11, SPRN_SPRG1
+-	mfspr	r10, SPRN_SPRG0
+-	b	data_access
+-
+-	/* Instruction Storage Interrupt */
+-	INSTRUCTION_STORAGE_EXCEPTION
+-
+-	/* External Input Interrupt */
+-	EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+-
+-	/* Alignment Interrupt */
+-	ALIGNMENT_EXCEPTION
+-
+-	/* Program Interrupt */
+-	PROGRAM_EXCEPTION
+-
+-	/* Floating Point Unavailable Interrupt */
+-#ifdef CONFIG_PPC_FPU
+-	FP_UNAVAILABLE_EXCEPTION
+-#else
+-#ifdef CONFIG_E200
+-	/* E200 treats 'normal' floating point instructions as FP Unavail exception */
+-	EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
+-#else
+-	EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+-#endif
+-#endif
+-
+-	/* System Call Interrupt */
+-	START_EXCEPTION(SystemCall)
+-	NORMAL_EXCEPTION_PROLOG
+-	EXC_XFER_EE_LITE(0x0c00, DoSyscall)
+-
+-	/* Auxillary Processor Unavailable Interrupt */
+-	EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+-
+-	/* Decrementer Interrupt */
+-	DECREMENTER_EXCEPTION
+-
+-	/* Fixed Internal Timer Interrupt */
+-	/* TODO: Add FIT support */
+-	EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+-
+-	/* Watchdog Timer Interrupt */
+-#ifdef CONFIG_BOOKE_WDT
+-	CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
+-#else
+-	CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
+-#endif
+-
+-	/* Data TLB Error Interrupt */
+-	START_EXCEPTION(DataTLBError)
+-	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+-	mtspr	SPRN_SPRG1, r11
+-	mtspr	SPRN_SPRG4W, r12
+-	mtspr	SPRN_SPRG5W, r13
+-	mfcr	r11
+-	mtspr	SPRN_SPRG7W, r11
+-	mfspr	r10, SPRN_DEAR		/* Get faulting address */
+-
+-	/* If we are faulting a kernel address, we have to use the
+-	 * kernel page tables.
+-	 */
+-	lis	r11, TASK_SIZE at h
+-	ori	r11, r11, TASK_SIZE at l
+-	cmplw	5, r10, r11
+-	blt	5, 3f
+-	lis	r11, swapper_pg_dir at h
+-	ori	r11, r11, swapper_pg_dir at l
+-
+-	mfspr	r12,SPRN_MAS1		/* Set TID to 0 */
+-	rlwinm	r12,r12,0,16,1
+-	mtspr	SPRN_MAS1,r12
+-
+-	b	4f
+-
+-	/* Get the PGD for the current thread */
+-3:
+-	mfspr	r11,SPRN_SPRG3
+-	lwz	r11,PGDIR(r11)
+-
+-4:
+-	FIND_PTE
+-	andi.	r13, r11, _PAGE_PRESENT	/* Is the page present? */
+-	beq	2f			/* Bail if not present */
+-
+-#ifdef CONFIG_PTE_64BIT
+-	lwz	r13, 0(r12)
+-#endif
+-	ori	r11, r11, _PAGE_ACCESSED
+-	stw	r11, PTE_FLAGS_OFFSET(r12)
+-
+-	 /* Jump to common tlb load */
+-	b	finish_tlb_load
+-2:
+-	/* The bailout.  Restore registers to pre-exception conditions
+-	 * and call the heavyweights to help us out.
+-	 */
+-	mfspr	r11, SPRN_SPRG7R
+-	mtcr	r11
+-	mfspr	r13, SPRN_SPRG5R
+-	mfspr	r12, SPRN_SPRG4R
+-	mfspr	r11, SPRN_SPRG1
+-	mfspr	r10, SPRN_SPRG0
+-	b	data_access
+-
+-	/* Instruction TLB Error Interrupt */
+-	/*
+-	 * Nearly the same as above, except we get our
+-	 * information from different registers and bailout
+-	 * to a different point.
+-	 */
+-	START_EXCEPTION(InstructionTLBError)
+-	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+-	mtspr	SPRN_SPRG1, r11
+-	mtspr	SPRN_SPRG4W, r12
+-	mtspr	SPRN_SPRG5W, r13
+-	mfcr	r11
+-	mtspr	SPRN_SPRG7W, r11
+-	mfspr	r10, SPRN_SRR0		/* Get faulting address */
+-
+-	/* If we are faulting a kernel address, we have to use the
+-	 * kernel page tables.
+-	 */
+-	lis	r11, TASK_SIZE at h
+-	ori	r11, r11, TASK_SIZE at l
+-	cmplw	5, r10, r11
+-	blt	5, 3f
+-	lis	r11, swapper_pg_dir at h
+-	ori	r11, r11, swapper_pg_dir at l
+-
+-	mfspr	r12,SPRN_MAS1		/* Set TID to 0 */
+-	rlwinm	r12,r12,0,16,1
+-	mtspr	SPRN_MAS1,r12
+-
+-	b	4f
+-
+-	/* Get the PGD for the current thread */
+-3:
+-	mfspr	r11,SPRN_SPRG3
+-	lwz	r11,PGDIR(r11)
+-
+-4:
+-	FIND_PTE
+-	andi.	r13, r11, _PAGE_PRESENT	/* Is the page present? */
+-	beq	2f			/* Bail if not present */
+-
+-#ifdef CONFIG_PTE_64BIT
+-	lwz	r13, 0(r12)
+-#endif
+-	ori	r11, r11, _PAGE_ACCESSED
+-	stw	r11, PTE_FLAGS_OFFSET(r12)
+-
+-	/* Jump to common TLB load point */
+-	b	finish_tlb_load
+-
+-2:
+-	/* The bailout.  Restore registers to pre-exception conditions
+-	 * and call the heavyweights to help us out.
+-	 */
+-	mfspr	r11, SPRN_SPRG7R
+-	mtcr	r11
+-	mfspr	r13, SPRN_SPRG5R
+-	mfspr	r12, SPRN_SPRG4R
+-	mfspr	r11, SPRN_SPRG1
+-	mfspr	r10, SPRN_SPRG0
+-	b	InstructionStorage
+-
+-#ifdef CONFIG_SPE
+-	/* SPE Unavailable */
+-	START_EXCEPTION(SPEUnavailable)
+-	NORMAL_EXCEPTION_PROLOG
+-	bne	load_up_spe
+-	addi    r3,r1,STACK_FRAME_OVERHEAD
+-	EXC_XFER_EE_LITE(0x2010, KernelSPE)
+-#else
+-	EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
+-#endif /* CONFIG_SPE */
+-
+-	/* SPE Floating Point Data */
+-#ifdef CONFIG_SPE
+-	EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
+-#else
+-	EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
+-#endif /* CONFIG_SPE */
+-
+-	/* SPE Floating Point Round */
+-	EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
+-
+-	/* Performance Monitor */
+-	EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
+-
+-
+-	/* Debug Interrupt */
+-	DEBUG_EXCEPTION
+-
+-/*
+- * Local functions
+- */
+-
+-	/*
+-	 * Data TLB exceptions will bail out to this point
+-	 * if they can't resolve the lightweight TLB fault.
+-	 */
+-data_access:
+-	NORMAL_EXCEPTION_PROLOG
+-	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
+-	stw	r5,_ESR(r11)
+-	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
+-	andis.	r10,r5,(ESR_ILK|ESR_DLK)@h
+-	bne	1f
+-	EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+-1:
+-	addi	r3,r1,STACK_FRAME_OVERHEAD
+-	EXC_XFER_EE_LITE(0x0300, CacheLockingException)
+-
+-/*
+-
+- * Both the instruction and data TLB miss get to this
+- * point to load the TLB.
+- * 	r10 - EA of fault
+- * 	r11 - TLB (info from Linux PTE)
+- * 	r12, r13 - available to use
+- * 	CR5 - results of addr < TASK_SIZE
+- *	MAS0, MAS1 - loaded with proper value when we get here
+- *	MAS2, MAS3 - will need additional info from Linux PTE
+- *	Upon exit, we reload everything and RFI.
+- */
+-finish_tlb_load:
+-	/*
+-	 * We set execute, because we don't have the granularity to
+-	 * properly set this at the page level (Linux problem).
+-	 * Many of these bits are software only.  Bits we don't set
+-	 * here we (properly should) assume have the appropriate value.
+-	 */
+-
+-	mfspr	r12, SPRN_MAS2
+-#ifdef CONFIG_PTE_64BIT
+-	rlwimi	r12, r11, 26, 24, 31	/* extract ...WIMGE from pte */
+-#else
+-	rlwimi	r12, r11, 26, 27, 31	/* extract WIMGE from pte */
+-#endif
+-	mtspr	SPRN_MAS2, r12
+-
+-	bge	5, 1f
+-
+-	/* is user addr */
+-	andi.	r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
+-	andi.	r10, r11, _PAGE_USER	/* Test for _PAGE_USER */
+-	srwi	r10, r12, 1
+-	or	r12, r12, r10	/* Copy user perms into supervisor */
+-	iseleq	r12, 0, r12
+-	b	2f
+-
+-	/* is kernel addr */
+-1:	rlwinm	r12, r11, 31, 29, 29	/* Extract _PAGE_HWWRITE into SW */
+-	ori	r12, r12, (MAS3_SX | MAS3_SR)
+-
+-#ifdef CONFIG_PTE_64BIT
+-2:	rlwimi	r12, r13, 24, 0, 7	/* grab RPN[32:39] */
+-	rlwimi	r12, r11, 24, 8, 19	/* grab RPN[40:51] */
+-	mtspr	SPRN_MAS3, r12
+-BEGIN_FTR_SECTION
+-	srwi	r10, r13, 8		/* grab RPN[8:31] */
+-	mtspr	SPRN_MAS7, r10
+-END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
+-#else
+-2:	rlwimi	r11, r12, 0, 20, 31	/* Extract RPN from PTE and merge with perms */
+-	mtspr	SPRN_MAS3, r11
+-#endif
+-#ifdef CONFIG_E200
+-	/* Round robin TLB1 entries assignment */
+-	mfspr	r12, SPRN_MAS0
+-
+-	/* Extract TLB1CFG(NENTRY) */
+-	mfspr	r11, SPRN_TLB1CFG
+-	andi.	r11, r11, 0xfff
+-
+-	/* Extract MAS0(NV) */
+-	andi.	r13, r12, 0xfff
+-	addi	r13, r13, 1
+-	cmpw	0, r13, r11
+-	addi	r12, r12, 1
+-
+-	/* check if we need to wrap */
+-	blt	7f
+-
+-	/* wrap back to first free tlbcam entry */
+-	lis	r13, tlbcam_index at ha
+-	lwz	r13, tlbcam_index at l(r13)
+-	rlwimi	r12, r13, 0, 20, 31
+-7:
+-	mtspr   SPRN_MAS0,r12
+-#endif /* CONFIG_E200 */
+-
+-	tlbwe
+-
+-	/* Done...restore registers and get out of here.  */
+-	mfspr	r11, SPRN_SPRG7R
+-	mtcr	r11
+-	mfspr	r13, SPRN_SPRG5R
+-	mfspr	r12, SPRN_SPRG4R
+-	mfspr	r11, SPRN_SPRG1
+-	mfspr	r10, SPRN_SPRG0
+-	rfi					/* Force context change */
+-
+-#ifdef CONFIG_SPE
+-/* Note that the SPE support is closely modeled after the AltiVec
+- * support.  Changes to one are likely to be applicable to the
+- * other!  */
+-load_up_spe:
+-/*
+- * Disable SPE for the task which had SPE previously,
+- * and save its SPE registers in its thread_struct.
+- * Enables SPE for use in the kernel on return.
+- * On SMP we know the SPE units are free, since we give it up every
+- * switch.  -- Kumar
+- */
+-	mfmsr	r5
+-	oris	r5,r5,MSR_SPE at h
+-	mtmsr	r5			/* enable use of SPE now */
+-	isync
+-/*
+- * For SMP, we don't do lazy SPE switching because it just gets too
+- * horrendously complex, especially when a task switches from one CPU
+- * to another.  Instead we call giveup_spe in switch_to.
+- */
+-#ifndef CONFIG_SMP
+-	lis	r3,last_task_used_spe at ha
+-	lwz	r4,last_task_used_spe at l(r3)
+-	cmpi	0,r4,0
+-	beq	1f
+-	addi	r4,r4,THREAD	/* want THREAD of last_task_used_spe */
+-	SAVE_32EVRS(0,r10,r4)
+-   	evxor	evr10, evr10, evr10	/* clear out evr10 */
+-	evmwumiaa evr10, evr10, evr10	/* evr10 <- ACC = 0 * 0 + ACC */
+-	li	r5,THREAD_ACC
+-   	evstddx	evr10, r4, r5		/* save off accumulator */
+-	lwz	r5,PT_REGS(r4)
+-	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+-	lis	r10,MSR_SPE at h
+-	andc	r4,r4,r10	/* disable SPE for previous task */
+-	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+-1:
+-#endif /* CONFIG_SMP */
+-	/* enable use of SPE after return */
+-	oris	r9,r9,MSR_SPE at h
+-	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
+-	li	r4,1
+-	li	r10,THREAD_ACC
+-	stw	r4,THREAD_USED_SPE(r5)
+-	evlddx	evr4,r10,r5
+-	evmra	evr4,evr4
+-	REST_32EVRS(0,r10,r5)
+-#ifndef CONFIG_SMP
+-	subi	r4,r5,THREAD
+-	stw	r4,last_task_used_spe at l(r3)
+-#endif /* CONFIG_SMP */
+-	/* restore registers and return */
+-2:	REST_4GPRS(3, r11)
+-	lwz	r10,_CCR(r11)
+-	REST_GPR(1, r11)
+-	mtcr	r10
+-	lwz	r10,_LINK(r11)
+-	mtlr	r10
+-	REST_GPR(10, r11)
+-	mtspr	SPRN_SRR1,r9
+-	mtspr	SPRN_SRR0,r12
+-	REST_GPR(9, r11)
+-	REST_GPR(12, r11)
+-	lwz	r11,GPR11(r11)
+-	rfi
+-
+-/*
+- * SPE unavailable trap from kernel - print a message, but let
+- * the task use SPE in the kernel until it returns to user mode.
+- */
+-KernelSPE:
+-	lwz	r3,_MSR(r1)
+-	oris	r3,r3,MSR_SPE at h
+-	stw	r3,_MSR(r1)	/* enable use of SPE after return */
+-	lis	r3,87f at h
+-	ori	r3,r3,87f at l
+-	mr	r4,r2		/* current */
+-	lwz	r5,_NIP(r1)
+-	bl	printk
+-	b	ret_from_except
+-87:	.string	"SPE used in kernel  (task=%p, pc=%x)  \n"
+-	.align	4,0
+-
+-#endif /* CONFIG_SPE */
+-
+-/*
+- * Global functions
+- */
+-
+-/*
+- * extern void loadcam_entry(unsigned int index)
+- *
+- * Load TLBCAM[index] entry in to the L2 CAM MMU
+- */
+-_GLOBAL(loadcam_entry)
+-	lis	r4,TLBCAM at ha
+-	addi	r4,r4,TLBCAM at l
+-	mulli	r5,r3,20
+-	add	r3,r5,r4
+-	lwz	r4,0(r3)
+-	mtspr	SPRN_MAS0,r4
+-	lwz	r4,4(r3)
+-	mtspr	SPRN_MAS1,r4
+-	lwz	r4,8(r3)
+-	mtspr	SPRN_MAS2,r4
+-	lwz	r4,12(r3)
+-	mtspr	SPRN_MAS3,r4
+-	tlbwe
+-	isync
+-	blr
+-
+-/*
+- * extern void giveup_altivec(struct task_struct *prev)
+- *
+- * The e500 core does not have an AltiVec unit.
+- */
+-_GLOBAL(giveup_altivec)
+-	blr
+-
+-#ifdef CONFIG_SPE
+-/*
+- * extern void giveup_spe(struct task_struct *prev)
+- *
+- */
+-_GLOBAL(giveup_spe)
+-	mfmsr	r5
+-	oris	r5,r5,MSR_SPE at h
+-	mtmsr	r5			/* enable use of SPE now */
+-	isync
+-	cmpi	0,r3,0
+-	beqlr-				/* if no previous owner, done */
+-	addi	r3,r3,THREAD		/* want THREAD of task */
+-	lwz	r5,PT_REGS(r3)
+-	cmpi	0,r5,0
+-	SAVE_32EVRS(0, r4, r3)
+-   	evxor	evr6, evr6, evr6	/* clear out evr6 */
+-	evmwumiaa evr6, evr6, evr6	/* evr6 <- ACC = 0 * 0 + ACC */
+-	li	r4,THREAD_ACC
+-   	evstddx	evr6, r4, r3		/* save off accumulator */
+-	mfspr	r6,SPRN_SPEFSCR
+-	stw	r6,THREAD_SPEFSCR(r3)	/* save spefscr register value */
+-	beq	1f
+-	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+-	lis	r3,MSR_SPE at h
+-	andc	r4,r4,r3		/* disable SPE for previous task */
+-	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+-1:
+-#ifndef CONFIG_SMP
+-	li	r5,0
+-	lis	r4,last_task_used_spe at ha
+-	stw	r5,last_task_used_spe at l(r4)
+-#endif /* CONFIG_SMP */
+-	blr
+-#endif /* CONFIG_SPE */
+-
+-/*
+- * extern void giveup_fpu(struct task_struct *prev)
+- *
+- * Not all FSL Book-E cores have an FPU
+- */
+-#ifndef CONFIG_PPC_FPU
+-_GLOBAL(giveup_fpu)
+-	blr
+-#endif
+-
+-/*
+- * extern void abort(void)
+- *
+- * At present, this routine just applies a system reset.
+- */
+-_GLOBAL(abort)
+-	li	r13,0
+-        mtspr   SPRN_DBCR0,r13		/* disable all debug events */
+-	isync
+-	mfmsr	r13
+-	ori	r13,r13,MSR_DE at l	/* Enable Debug Events */
+-	mtmsr	r13
+-	isync
+-        mfspr   r13,SPRN_DBCR0
+-        lis	r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
+-        mtspr   SPRN_DBCR0,r13
+-	isync
+-
+-_GLOBAL(set_context)
+-
+-#ifdef CONFIG_BDI_SWITCH
+-	/* Context switch the PTE pointer for the Abatron BDI2000.
+-	 * The PGDIR is the second parameter.
+-	 */
+-	lis	r5, abatron_pteptrs at h
+-	ori	r5, r5, abatron_pteptrs at l
+-	stw	r4, 0x4(r5)
+-#endif
+-	mtspr	SPRN_PID,r3
+-	isync			/* Force context change */
+-	blr
+-
+-/*
+- * We put a few things here that have to be page-aligned. This stuff
+- * goes at the beginning of the data segment, which is page-aligned.
+- */
+-	.data
+-	.align	12
+-	.globl	sdata
+-sdata:
+-	.globl	empty_zero_page
+-empty_zero_page:
+-	.space	4096
+-	.globl	swapper_pg_dir
+-swapper_pg_dir:
+-	.space	4096
+-
+-/* Reserved 4k for the critical exception stack & 4k for the machine
+- * check stack per CPU for kernel mode exceptions */
+-	.section .bss
+-        .align 12
+-exception_stack_bottom:
+-	.space	BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
+-	.globl	exception_stack_top
+-exception_stack_top:
+-
+-/*
+- * This space gets a copy of optional info passed to us by the bootstrap
+- * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
+- */
+-	.globl	cmd_line
+-cmd_line:
+-	.space	512
+-
+-/*
+- * Room for two PTE pointers, usually the kernel and current user pointers
+- * to their respective root page table.
+- */
+-abatron_pteptrs:
+-	.space	8
+diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
+index e0c850d..d5e0dfc 100644
+--- a/arch/ppc/kernel/misc.S
++++ b/arch/ppc/kernel/misc.S
+@@ -165,24 +165,7 @@ _GLOBAL(_tlbia)
+ 	ble	1b
+ 
+ 	isync
+-#elif defined(CONFIG_FSL_BOOKE)
+-	/* Invalidate all entries in TLB0 */
+-	li	r3, 0x04
+-	tlbivax	0,3
+-	/* Invalidate all entries in TLB1 */
+-	li	r3, 0x0c
+-	tlbivax	0,3
+-	/* Invalidate all entries in TLB2 */
+-	li	r3, 0x14
+-	tlbivax	0,3
+-	/* Invalidate all entries in TLB3 */
+-	li	r3, 0x1c
+-	tlbivax	0,3
+-	msync
+-#ifdef CONFIG_SMP
+-	tlbsync
+-#endif /* CONFIG_SMP */
+-#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
++#else /* !(CONFIG_40x || CONFIG_44x) */
+ #if defined(CONFIG_SMP)
+ 	rlwinm	r8,r1,0,0,18
+ 	lwz	r8,TI_CPU(r8)
+@@ -268,20 +251,7 @@ _GLOBAL(_tlbie)
+ 	tlbwe	r3, r3, PPC44x_TLB_PAGEID
+ 	isync
+ 10:
+-#elif defined(CONFIG_FSL_BOOKE)
+-	rlwinm	r4, r3, 0, 0, 19
+-	ori	r5, r4, 0x08	/* TLBSEL = 1 */
+-	ori	r6, r4, 0x10	/* TLBSEL = 2 */
+-	ori	r7, r4, 0x18	/* TLBSEL = 3 */
+-	tlbivax	0, r4
+-	tlbivax	0, r5
+-	tlbivax	0, r6
+-	tlbivax	0, r7
+-	msync
+-#if defined(CONFIG_SMP)
+-	tlbsync
+-#endif /* CONFIG_SMP */
+-#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
++#else /* !(CONFIG_40x || CONFIG_44x) */
+ #if defined(CONFIG_SMP)
+ 	rlwinm	r8,r1,0,0,18
+ 	lwz	r8,TI_CPU(r8)
+@@ -338,18 +308,6 @@ _GLOBAL(flush_instruction_cache)
+ 	lis	r3, KERNELBASE at h
+ 	iccci	0,r3
+ #endif
+-#elif CONFIG_FSL_BOOKE
+-BEGIN_FTR_SECTION
+-	mfspr   r3,SPRN_L1CSR0
+-	ori     r3,r3,L1CSR0_CFI|L1CSR0_CLFC
+-	/* msync; isync recommended here */
+-	mtspr   SPRN_L1CSR0,r3
+-	isync
+-	blr
+-END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
+-	mfspr	r3,SPRN_L1CSR1
+-	ori	r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
+-	mtspr	SPRN_L1CSR1,r3
+ #else
+ 	mfspr	r3,SPRN_PVR
+ 	rlwinm	r3,r3,16,16,31
+diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
+index 22494ec..c353502 100644
+--- a/arch/ppc/kernel/ppc_ksyms.c
++++ b/arch/ppc/kernel/ppc_ksyms.c
+@@ -45,7 +45,7 @@
+ #include <asm/dcr.h>
+ 
+ #ifdef  CONFIG_8xx
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #endif
+ 
+ extern void transfer_to_handler(void);
+@@ -166,12 +166,6 @@ EXPORT_SYMBOL(last_task_used_altivec);
+ #endif
+ EXPORT_SYMBOL(giveup_altivec);
+ #endif /* CONFIG_ALTIVEC */
+-#ifdef CONFIG_SPE
+-#ifndef CONFIG_SMP
+-EXPORT_SYMBOL(last_task_used_spe);
+-#endif
+-EXPORT_SYMBOL(giveup_spe);
+-#endif /* CONFIG_SPE */
+ #ifdef CONFIG_SMP
+ EXPORT_SYMBOL(smp_call_function);
+ EXPORT_SYMBOL(smp_hw_index);
+@@ -244,8 +238,7 @@ EXPORT_SYMBOL(debugger_fault_handler);
+ EXPORT_SYMBOL(cpm_install_handler);
+ EXPORT_SYMBOL(cpm_free_handler);
+ #endif /* CONFIG_8xx */
+-#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
+-	defined(CONFIG_83xx)
++#if defined(CONFIG_8xx) || defined(CONFIG_40x)
+ EXPORT_SYMBOL(__res);
+ #endif
+ 
+diff --git a/arch/ppc/kernel/rio.c b/arch/ppc/kernel/rio.c
+deleted file mode 100644
+index 29487fe..0000000
+--- a/arch/ppc/kernel/rio.c
++++ /dev/null
+@@ -1,52 +0,0 @@
+-/*
+- * RapidIO PPC32 support
+- *
+- * Copyright 2005 MontaVista Software, Inc.
+- * Matt Porter <mporter at kernel.crashing.org>
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/init.h>
+-#include <linux/kernel.h>
+-#include <linux/rio.h>
+-
+-#include <asm/rio.h>
+-
+-/**
+- * platform_rio_init - Do platform specific RIO init
+- *
+- * Any platform specific initialization of RapdIO
+- * hardware is done here as well as registration
+- * of any active master ports in the system.
+- */
+-void __attribute__ ((weak))
+-    platform_rio_init(void)
+-{
+-	printk(KERN_WARNING "RIO: No platform_rio_init() present\n");
+-}
+-
+-/**
+- * ppc_rio_init - Do PPC32 RIO init
+- *
+- * Calls platform-specific RIO init code and then calls
+- * rio_init_mports() to initialize any master ports that
+- * have been registered with the RIO subsystem.
+- */
+-static int __init ppc_rio_init(void)
+-{
+-	printk(KERN_INFO "RIO: RapidIO init\n");
+-
+-	/* Platform specific initialization */
+-	platform_rio_init();
+-
+-	/* Enumerate all registered ports */
+-	rio_init_mports();
+-
+-	return 0;
+-}
+-
+-subsys_initcall(ppc_rio_init);
+diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
+index 5255bd8..d51368d 100644
+--- a/arch/ppc/kernel/setup.c
++++ b/arch/ppc/kernel/setup.c
+@@ -37,10 +37,8 @@
+ #include <asm/nvram.h>
+ #include <asm/xmon.h>
+ #include <asm/ocp.h>
+-#include <asm/prom.h>
+ 
+-#define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \
+-		      defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \
++#define USES_PPC_SYS (defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \
+ 		      defined(CONFIG_PPC_MPC52xx))
+ 
+ #if USES_PPC_SYS
+diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
+index c785689..a467a42 100644
+--- a/arch/ppc/kernel/traps.c
++++ b/arch/ppc/kernel/traps.c
+@@ -194,11 +194,7 @@ static inline int check_io_access(struct pt_regs *regs)
+ /* On 4xx, the reason for the machine check or program exception
+    is in the ESR. */
+ #define get_reason(regs)	((regs)->dsisr)
+-#ifndef CONFIG_FSL_BOOKE
+ #define get_mc_reason(regs)	((regs)->dsisr)
+-#else
+-#define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
+-#endif
+ #define REASON_FP		ESR_FP
+ #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
+ #define REASON_PRIVILEGED	ESR_PPR
+@@ -231,39 +227,25 @@ platform_machine_check(struct pt_regs *regs)
+ {
+ }
+ 
+-void machine_check_exception(struct pt_regs *regs)
++#if defined(CONFIG_4xx)
++int machine_check_4xx(struct pt_regs *regs)
+ {
+ 	unsigned long reason = get_mc_reason(regs);
+ 
+-	if (user_mode(regs)) {
+-		regs->msr |= MSR_RI;
+-		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
+-		return;
+-	}
+-
+-#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
+-	/* the qspan pci read routines can cause machine checks -- Cort */
+-	bad_page_fault(regs, regs->dar, SIGBUS);
+-	return;
+-#endif
+-
+-	if (debugger_fault_handler) {
+-		debugger_fault_handler(regs);
+-		regs->msr |= MSR_RI;
+-		return;
+-	}
+-
+-	if (check_io_access(regs))
+-		return;
+-
+-#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
+ 	if (reason & ESR_IMCP) {
+ 		printk("Instruction");
+ 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+ 	} else
+ 		printk("Data");
+ 	printk(" machine check in kernel mode.\n");
+-#elif defined(CONFIG_440A)
++
++	return 0;
++}
++
++int machine_check_440A(struct pt_regs *regs)
++{
++	unsigned long reason = get_mc_reason(regs);
++
+ 	printk("Machine check in kernel mode.\n");
+ 	if (reason & ESR_IMCP){
+ 		printk("Instruction Synchronous Machine Check exception\n");
+@@ -293,55 +275,13 @@ void machine_check_exception(struct pt_regs *regs)
+ 		/* Clear MCSR */
+ 		mtspr(SPRN_MCSR, mcsr);
+ 	}
+-#elif defined (CONFIG_E500)
+-	printk("Machine check in kernel mode.\n");
+-	printk("Caused by (from MCSR=%lx): ", reason);
+-
+-	if (reason & MCSR_MCP)
+-		printk("Machine Check Signal\n");
+-	if (reason & MCSR_ICPERR)
+-		printk("Instruction Cache Parity Error\n");
+-	if (reason & MCSR_DCP_PERR)
+-		printk("Data Cache Push Parity Error\n");
+-	if (reason & MCSR_DCPERR)
+-		printk("Data Cache Parity Error\n");
+-	if (reason & MCSR_GL_CI)
+-		printk("Guarded Load or Cache-Inhibited stwcx.\n");
+-	if (reason & MCSR_BUS_IAERR)
+-		printk("Bus - Instruction Address Error\n");
+-	if (reason & MCSR_BUS_RAERR)
+-		printk("Bus - Read Address Error\n");
+-	if (reason & MCSR_BUS_WAERR)
+-		printk("Bus - Write Address Error\n");
+-	if (reason & MCSR_BUS_IBERR)
+-		printk("Bus - Instruction Data Error\n");
+-	if (reason & MCSR_BUS_RBERR)
+-		printk("Bus - Read Data Bus Error\n");
+-	if (reason & MCSR_BUS_WBERR)
+-		printk("Bus - Write Data Bus Error\n");
+-	if (reason & MCSR_BUS_IPERR)
+-		printk("Bus - Instruction Parity Error\n");
+-	if (reason & MCSR_BUS_RPERR)
+-		printk("Bus - Read Parity Error\n");
+-#elif defined (CONFIG_E200)
+-	printk("Machine check in kernel mode.\n");
+-	printk("Caused by (from MCSR=%lx): ", reason);
+-
+-	if (reason & MCSR_MCP)
+-		printk("Machine Check Signal\n");
+-	if (reason & MCSR_CP_PERR)
+-		printk("Cache Push Parity Error\n");
+-	if (reason & MCSR_CPERR)
+-		printk("Cache Parity Error\n");
+-	if (reason & MCSR_EXCP_ERR)
+-		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
+-	if (reason & MCSR_BUS_IRERR)
+-		printk("Bus - Read Bus Error on instruction fetch\n");
+-	if (reason & MCSR_BUS_DRERR)
+-		printk("Bus - Read Bus Error on data load\n");
+-	if (reason & MCSR_BUS_WRERR)
+-		printk("Bus - Write Bus Error on buffered store or cache line push\n");
+-#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
++	return 0;
++}
++#else
++int machine_check_generic(struct pt_regs *regs)
++{
++	unsigned long reason = get_mc_reason(regs);
++
+ 	printk("Machine check in kernel mode.\n");
+ 	printk("Caused by (from SRR1=%lx): ", reason);
+ 	switch (reason & 0x601F0000) {
+@@ -371,7 +311,39 @@ void machine_check_exception(struct pt_regs *regs)
+ 	default:
+ 		printk("Unknown values in msr\n");
+ 	}
+-#endif /* CONFIG_4xx */
++	return 0;
++}
++#endif /* everything else */
++
++void machine_check_exception(struct pt_regs *regs)
++{
++	int recover = 0;
++
++	if (cur_cpu_spec->machine_check)
++		recover = cur_cpu_spec->machine_check(regs);
++	if (recover > 0)
++		return;
++
++	if (user_mode(regs)) {
++		regs->msr |= MSR_RI;
++		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
++		return;
++	}
++
++#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
++	/* the qspan pci read routines can cause machine checks -- Cort */
++	bad_page_fault(regs, regs->dar, SIGBUS);
++	return;
++#endif
++
++	if (debugger_fault_handler) {
++		debugger_fault_handler(regs);
++		regs->msr |= MSR_RI;
++		return;
++	}
++
++	if (check_io_access(regs))
++		return;
+ 
+ 	/*
+ 	 * Optional platform-provided routine to print out
+@@ -830,63 +802,6 @@ void altivec_assist_exception(struct pt_regs *regs)
+ }
+ #endif /* CONFIG_ALTIVEC */
+ 
+-#ifdef CONFIG_E500
+-void performance_monitor_exception(struct pt_regs *regs)
+-{
+-	perf_irq(regs);
+-}
+-#endif
+-
+-#ifdef CONFIG_FSL_BOOKE
+-void CacheLockingException(struct pt_regs *regs, unsigned long address,
+-			   unsigned long error_code)
+-{
+-	/* We treat cache locking instructions from the user
+-	 * as priv ops, in the future we could try to do
+-	 * something smarter
+-	 */
+-	if (error_code & (ESR_DLK|ESR_ILK))
+-		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
+-	return;
+-}
+-#endif /* CONFIG_FSL_BOOKE */
+-
+-#ifdef CONFIG_SPE
+-void SPEFloatingPointException(struct pt_regs *regs)
+-{
+-	unsigned long spefscr;
+-	int fpexc_mode;
+-	int code = 0;
+-
+-	spefscr = current->thread.spefscr;
+-	fpexc_mode = current->thread.fpexc_mode;
+-
+-	/* Hardware does not necessarily set sticky
+-	 * underflow/overflow/invalid flags */
+-	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
+-		code = FPE_FLTOVF;
+-		spefscr |= SPEFSCR_FOVFS;
+-	}
+-	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
+-		code = FPE_FLTUND;
+-		spefscr |= SPEFSCR_FUNFS;
+-	}
+-	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
+-		code = FPE_FLTDIV;
+-	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
+-		code = FPE_FLTINV;
+-		spefscr |= SPEFSCR_FINVS;
+-	}
+-	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
+-		code = FPE_FLTRES;
+-
+-	current->thread.spefscr = spefscr;
+-
+-	_exception(SIGFPE, regs, code, regs->nip);
+-	return;
+-}
+-#endif
+-
+ #ifdef CONFIG_BOOKE_WDT
+ /*
+  * Default handler for a Watchdog exception,
+diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
+index 98c1212..52b64fc 100644
+--- a/arch/ppc/kernel/vmlinux.lds.S
++++ b/arch/ppc/kernel/vmlinux.lds.S
+@@ -97,14 +97,14 @@ SECTIONS
+   __init_begin = .;
+   .init.text : {
+ 	_sinittext = .;
+-	*(.init.text)
++	INIT_TEXT
+ 	_einittext = .;
+   }
+   /* .exit.text is discarded at runtime, not link time,
+      to deal with references from __bug_table */
+-  .exit.text : { *(.exit.text) }
++  .exit.text : { EXIT_TEXT }
+   .init.data : {
+-    *(.init.data);
++    INIT_DATA
+     __vtop_table_begin = .;
+     *(.vtop_fixup);
+     __vtop_table_end = .;
+@@ -164,6 +164,6 @@ SECTIONS
+   /* Sections to be discarded. */
+   /DISCARD/ : {
+     *(.exitcall.exit)
+-    *(.exit.data)
++    EXIT_DATA
+   }
+ }
+diff --git a/arch/ppc/mm/44x_mmu.c b/arch/ppc/mm/44x_mmu.c
+index 6536a25..fbb577a 100644
+--- a/arch/ppc/mm/44x_mmu.c
++++ b/arch/ppc/mm/44x_mmu.c
+@@ -60,38 +60,28 @@ extern char etext[], _stext[];
+  * Just needed it declared someplace.
+  */
+ unsigned int tlb_44x_index = 0;
+-unsigned int tlb_44x_hwater = 62;
++unsigned int tlb_44x_hwater = PPC4XX_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
+ int icache_44x_need_flush;
+ 
+ /*
+  * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
+  */
+-static void __init
+-ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
++static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
+ {
+-	unsigned long attrib = 0;
+-
+-	__asm__ __volatile__("\
+-	clrrwi	%2,%2,10\n\
+-	ori	%2,%2,%4\n\
+-	clrrwi	%1,%1,10\n\
+-	li	%0,0\n\
+-	ori	%0,%0,%5\n\
+-	tlbwe	%2,%3,%6\n\
+-	tlbwe	%1,%3,%7\n\
+-	tlbwe	%0,%3,%8"
++	__asm__ __volatile__(
++		"tlbwe	%2,%3,%4\n"
++		"tlbwe	%1,%3,%5\n"
++		"tlbwe	%0,%3,%6\n"
+ 	:
+-	: "r" (attrib), "r" (phys), "r" (virt), "r" (slot),
+-	  "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M),
+-	  "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
++	: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
++	  "r" (phys),
++	  "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
++	  "r" (tlb_44x_hwater--), /* slot for this TLB entry */
+ 	  "i" (PPC44x_TLB_PAGEID),
+ 	  "i" (PPC44x_TLB_XLAT),
+ 	  "i" (PPC44x_TLB_ATTRIB));
+ }
+ 
+-/*
+- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+- */
+ void __init MMU_init_hw(void)
+ {
+ 	flush_instruction_cache();
+@@ -99,22 +89,13 @@ void __init MMU_init_hw(void)
+ 
+ unsigned long __init mmu_mapin_ram(void)
+ {
+-	unsigned int pinned_tlbs = 1;
+-	int i;
+-
+-	/* Determine number of entries necessary to cover lowmem */
+-	pinned_tlbs = (unsigned int)
+-		(_ALIGN(total_lowmem, PPC_PIN_SIZE) >> PPC44x_PIN_SHIFT);
+-
+-	/* Write upper watermark to save location */
+-	tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
++	unsigned long addr;
+ 
+-	/* If necessary, set additional pinned TLBs */
+-	if (pinned_tlbs > 1)
+-		for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
+-			unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC_PIN_SIZE;
+-			ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
+-		}
++	/* Pin in enough TLBs to cover any lowmem not covered by the
++	 * initial 256M mapping established in head_44x.S */
++	for (addr = PPC_PIN_SIZE; addr < total_lowmem;
++	     addr += PPC_PIN_SIZE)
++		ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+ 
+ 	return total_lowmem;
+ }
+diff --git a/arch/ppc/mm/Makefile b/arch/ppc/mm/Makefile
+index cd3eae1..691ba2b 100644
+--- a/arch/ppc/mm/Makefile
++++ b/arch/ppc/mm/Makefile
+@@ -8,4 +8,3 @@ obj-y				:= fault.o init.o mem_pieces.o \
+ obj-$(CONFIG_PPC_STD_MMU)	+= hashtable.o ppc_mmu.o tlb.o
+ obj-$(CONFIG_40x)		+= 4xx_mmu.o
+ obj-$(CONFIG_44x)		+= 44x_mmu.o
+-obj-$(CONFIG_FSL_BOOKE)		+= fsl_booke_mmu.o
+diff --git a/arch/ppc/mm/fsl_booke_mmu.c b/arch/ppc/mm/fsl_booke_mmu.c
+deleted file mode 100644
+index 123da03..0000000
+--- a/arch/ppc/mm/fsl_booke_mmu.c
++++ /dev/null
+@@ -1,236 +0,0 @@
+-/*
+- * Modifications by Kumar Gala (galak at kernel.crashing.org) to support
+- * E500 Book E processors.
+- *
+- * Copyright 2004 Freescale Semiconductor, Inc
+- *
+- * This file contains the routines for initializing the MMU
+- * on the 4xx series of chips.
+- *  -- paulus
+- *
+- *  Derived from arch/ppc/mm/init.c:
+- *    Copyright (C) 1995-1996 Gary Thomas (gdt at linuxppc.org)
+- *
+- *  Modifications by Paul Mackerras (PowerMac) (paulus at cs.anu.edu.au)
+- *  and Cort Dougan (PReP) (cort at cs.nmt.edu)
+- *    Copyright (C) 1996 Paul Mackerras
+- *  Amiga/APUS changes by Jesper Skov (jskov at cygnus.co.uk).
+- *
+- *  Derived from "arch/i386/mm/init.c"
+- *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+- *
+- *  This program is free software; you can redistribute it and/or
+- *  modify it under the terms of the GNU General Public License
+- *  as published by the Free Software Foundation; either version
+- *  2 of the License, or (at your option) any later version.
+- *
+- */
+-
+-#include <linux/signal.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/errno.h>
+-#include <linux/string.h>
+-#include <linux/types.h>
+-#include <linux/ptrace.h>
+-#include <linux/mman.h>
+-#include <linux/mm.h>
+-#include <linux/swap.h>
+-#include <linux/stddef.h>
+-#include <linux/vmalloc.h>
+-#include <linux/init.h>
+-#include <linux/delay.h>
+-#include <linux/highmem.h>
+-
+-#include <asm/pgalloc.h>
+-#include <asm/prom.h>
+-#include <asm/io.h>
+-#include <asm/mmu_context.h>
+-#include <asm/pgtable.h>
+-#include <asm/mmu.h>
+-#include <asm/uaccess.h>
+-#include <asm/smp.h>
+-#include <asm/bootx.h>
+-#include <asm/machdep.h>
+-#include <asm/setup.h>
+-
+-extern void loadcam_entry(unsigned int index);
+-unsigned int tlbcam_index;
+-unsigned int num_tlbcam_entries;
+-static unsigned long __cam0, __cam1, __cam2;
+-extern unsigned long total_lowmem;
+-extern unsigned long __max_low_memory;
+-#define MAX_LOW_MEM	CONFIG_LOWMEM_SIZE
+-
+-#define NUM_TLBCAMS	(16)
+-
+-struct tlbcam {
+-   	u32	MAS0;
+-	u32	MAS1;
+-	u32	MAS2;
+-	u32	MAS3;
+-	u32	MAS7;
+-} TLBCAM[NUM_TLBCAMS];
+-
+-struct tlbcamrange {
+-   	unsigned long start;
+-	unsigned long limit;
+-	phys_addr_t phys;
+-} tlbcam_addrs[NUM_TLBCAMS];
+-
+-extern unsigned int tlbcam_index;
+-
+-/*
+- * Return PA for this VA if it is mapped by a CAM, or 0
+- */
+-unsigned long v_mapped_by_tlbcam(unsigned long va)
+-{
+-	int b;
+-	for (b = 0; b < tlbcam_index; ++b)
+-		if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
+-			return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
+-	return 0;
+-}
+-
+-/*
+- * Return VA for a given PA or 0 if not mapped
+- */
+-unsigned long p_mapped_by_tlbcam(unsigned long pa)
+-{
+-	int b;
+-	for (b = 0; b < tlbcam_index; ++b)
+-		if (pa >= tlbcam_addrs[b].phys
+-	    	    && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+-		              +tlbcam_addrs[b].phys)
+-			return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
+-	return 0;
+-}
+-
+-/*
+- * Set up one of the I/D BAT (block address translation) register pairs.
+- * The parameters are not checked; in particular size must be a power
+- * of 4 between 4k and 256M.
+- */
+-void settlbcam(int index, unsigned long virt, phys_addr_t phys,
+-		unsigned int size, int flags, unsigned int pid)
+-{
+-	unsigned int tsize, lz;
+-
+-	asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
+-	tsize = (21 - lz) / 2;
+-
+-#ifdef CONFIG_SMP
+-	if ((flags & _PAGE_NO_CACHE) == 0)
+-		flags |= _PAGE_COHERENT;
+-#endif
+-
+-	TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
+-	TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
+-	TLBCAM[index].MAS2 = virt & PAGE_MASK;
+-
+-	TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
+-	TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
+-	TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
+-	TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
+-	TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
+-
+-	TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
+-	TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
+-
+-#ifndef CONFIG_KGDB /* want user access for breakpoints */
+-	if (flags & _PAGE_USER) {
+-	   TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+-	   TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+-	}
+-#else
+-	TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+-	TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+-#endif
+-
+-	tlbcam_addrs[index].start = virt;
+-	tlbcam_addrs[index].limit = virt + size - 1;
+-	tlbcam_addrs[index].phys = phys;
+-
+-	loadcam_entry(index);
+-}
+-
+-void invalidate_tlbcam_entry(int index)
+-{
+-	TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
+-	TLBCAM[index].MAS1 = ~MAS1_VALID;
+-
+-	loadcam_entry(index);
+-}
+-
+-void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
+-		unsigned long cam2)
+-{
+-	settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
+-	tlbcam_index++;
+-	if (cam1) {
+-		tlbcam_index++;
+-		settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
+-	}
+-	if (cam2) {
+-		tlbcam_index++;
+-		settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
+-	}
+-}
+-
+-/*
+- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+- */
+-void __init MMU_init_hw(void)
+-{
+-	flush_instruction_cache();
+-}
+-
+-unsigned long __init mmu_mapin_ram(void)
+-{
+-	cam_mapin_ram(__cam0, __cam1, __cam2);
+-
+-	return __cam0 + __cam1 + __cam2;
+-}
+-
+-
+-void __init
+-adjust_total_lowmem(void)
+-{
+-	unsigned long max_low_mem = MAX_LOW_MEM;
+-	unsigned long cam_max = 0x10000000;
+-	unsigned long ram;
+-
+-	/* adjust CAM size to max_low_mem */
+-	if (max_low_mem < cam_max)
+-		cam_max = max_low_mem;
+-
+-	/* adjust lowmem size to max_low_mem */
+-	if (max_low_mem < total_lowmem)
+-		ram = max_low_mem;
+-	else
+-		ram = total_lowmem;
+-
+-	/* Calculate CAM values */
+-	__cam0 = 1UL << 2 * (__ilog2(ram) / 2);
+-	if (__cam0 > cam_max)
+-		__cam0 = cam_max;
+-	ram -= __cam0;
+-	if (ram) {
+-		__cam1 = 1UL << 2 * (__ilog2(ram) / 2);
+-		if (__cam1 > cam_max)
+-			__cam1 = cam_max;
+-		ram -= __cam1;
+-	}
+-	if (ram) {
+-		__cam2 = 1UL << 2 * (__ilog2(ram) / 2);
+-		if (__cam2 > cam_max)
+-			__cam2 = cam_max;
+-		ram -= __cam2;
+-	}
+-
+-	printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
+-			" CAM2=%ldMb residual: %ldMb\n",
+-			__cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
+-			(total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
+-	__max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
+-}
+diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
+index dd898d3..7444df3 100644
+--- a/arch/ppc/mm/init.c
++++ b/arch/ppc/mm/init.c
+@@ -241,12 +241,6 @@ void __init MMU_init(void)
+ 	if (__max_memory && total_memory > __max_memory)
+ 		total_memory = __max_memory;
+ 	total_lowmem = total_memory;
+-#ifdef CONFIG_FSL_BOOKE
+-	/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
+-	 * entries, so we need to adjust lowmem to match the amount we can map
+-	 * in the fixed entries */
+-	adjust_total_lowmem();
+-#endif /* CONFIG_FSL_BOOKE */
+ 	if (total_lowmem > __max_low_memory) {
+ 		total_lowmem = __max_low_memory;
+ #ifndef CONFIG_HIGHMEM
+diff --git a/arch/ppc/mm/mmu_context.c b/arch/ppc/mm/mmu_context.c
+index 85afa7f..dacf45c 100644
+--- a/arch/ppc/mm/mmu_context.c
++++ b/arch/ppc/mm/mmu_context.c
+@@ -2,7 +2,7 @@
+  * This file contains the routines for handling the MMU on those
+  * PowerPC implementations where the MMU substantially follows the
+  * architecture specification.  This includes the 6xx, 7xx, 7xxx,
+- * 8260, and 83xx implementations but excludes the 8xx and 4xx.
++ * and 8260 implementations but excludes the 8xx and 4xx.
+  *  -- paulus
+  *
+  *  Derived from arch/ppc/mm/init.c:
+diff --git a/arch/ppc/mm/mmu_decl.h b/arch/ppc/mm/mmu_decl.h
+index b298b60..5f813e3 100644
+--- a/arch/ppc/mm/mmu_decl.h
++++ b/arch/ppc/mm/mmu_decl.h
+@@ -58,12 +58,6 @@ extern unsigned int num_tlbcam_entries;
+ extern void MMU_init_hw(void);
+ extern unsigned long mmu_mapin_ram(void);
+ 
+-#elif defined(CONFIG_FSL_BOOKE)
+-#define flush_HPTE(pid, va, pg)	_tlbie(va, pid)
+-extern void MMU_init_hw(void);
+-extern unsigned long mmu_mapin_ram(void);
+-extern void adjust_total_lowmem(void);
+-
+ #else
+ /* anything except 4xx or 8xx */
+ extern void MMU_init_hw(void);
+diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c
+index 1f51e6c..fadacfd 100644
+--- a/arch/ppc/mm/pgtable.c
++++ b/arch/ppc/mm/pgtable.c
+@@ -42,10 +42,6 @@ int io_bat_index;
+ #define HAVE_BATS	1
+ #endif
+ 
+-#if defined(CONFIG_FSL_BOOKE)
+-#define HAVE_TLBCAM	1
+-#endif
+-
+ extern char etext[], _stext[];
+ 
+ #ifdef CONFIG_SMP
+@@ -63,15 +59,6 @@ void setbat(int index, unsigned long virt, unsigned long phys,
+ #define p_mapped_by_bats(x)	(0UL)
+ #endif /* HAVE_BATS */
+ 
+-#ifdef HAVE_TLBCAM
+-extern unsigned int tlbcam_index;
+-extern unsigned long v_mapped_by_tlbcam(unsigned long va);
+-extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
+-#else /* !HAVE_TLBCAM */
+-#define v_mapped_by_tlbcam(x)	(0UL)
+-#define p_mapped_by_tlbcam(x)	(0UL)
+-#endif /* HAVE_TLBCAM */
+-
+ #ifdef CONFIG_PTE_64BIT
+ /* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
+ #define PGDIR_ORDER	1
+@@ -213,9 +200,6 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
+ 	if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
+ 		goto out;
+ 
+-	if ((v = p_mapped_by_tlbcam(p)))
+-		goto out;
+-
+ 	if (mem_init_done) {
+ 		struct vm_struct *area;
+ 		area = get_vm_area(size, VM_IOREMAP);
+@@ -341,18 +325,6 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
+ 	}
+ #endif /* HAVE_BATS */
+ 
+-#ifdef HAVE_TLBCAM
+-	/*
+-	 * Use a CAM for this if possible...
+-	 */
+-	if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
+-	    && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
+-		settlbcam(tlbcam_index, virt, phys, size, flags, 0);
+-		++tlbcam_index;
+-		return;
+-	}
+-#endif /* HAVE_TLBCAM */
+-
+ 	/* No BATs available, put it in the page tables. */
+ 	for (i = 0; i < size; i += PAGE_SIZE)
+ 		map_page(virt + i, phys + i, flags);
+diff --git a/arch/ppc/mm/ppc_mmu.c b/arch/ppc/mm/ppc_mmu.c
+index 973f1e6..0c1dc15 100644
+--- a/arch/ppc/mm/ppc_mmu.c
++++ b/arch/ppc/mm/ppc_mmu.c
+@@ -2,7 +2,7 @@
+  * This file contains the routines for handling the MMU on those
+  * PowerPC implementations where the MMU substantially follows the
+  * architecture specification.  This includes the 6xx, 7xx, 7xxx,
+- * 8260, and 83xx implementations but excludes the 8xx and 4xx.
++ * and 8260 implementations but excludes the 8xx and 4xx.
+  *  -- paulus
+  *
+  *  Derived from arch/ppc/mm/init.c:
+diff --git a/arch/ppc/platforms/83xx/Makefile b/arch/ppc/platforms/83xx/Makefile
+deleted file mode 100644
+index eb55341..0000000
+--- a/arch/ppc/platforms/83xx/Makefile
++++ /dev/null
+@@ -1,4 +0,0 @@
+-#
+-# Makefile for the PowerPC 83xx linux kernel.
+-#
+-obj-$(CONFIG_MPC834x_SYS)	+= mpc834x_sys.o
+diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.c b/arch/ppc/platforms/83xx/mpc834x_sys.c
+deleted file mode 100644
+index b84f8df..0000000
+--- a/arch/ppc/platforms/83xx/mpc834x_sys.c
++++ /dev/null
+@@ -1,346 +0,0 @@
+-/*
+- * MPC834x SYS board specific routines
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/seq_file.h>
+-#include <linux/root_dev.h>
+-#include <linux/serial.h>
+-#include <linux/tty.h>	/* for linux/serial_core.h */
+-#include <linux/serial_core.h>
+-#include <linux/initrd.h>
+-#include <linux/module.h>
+-#include <linux/fsl_devices.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/ipic.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc83xx.h>
+-#include <asm/irq.h>
+-#include <asm/kgdb.h>
+-#include <asm/ppc_sys.h>
+-#include <mm/mmu_decl.h>
+-
+-#include <syslib/ppc83xx_setup.h>
+-
+-#ifndef CONFIG_PCI
+-unsigned long isa_io_base = 0;
+-unsigned long isa_mem_base = 0;
+-#endif
+-
+-extern unsigned long total_memory;	/* in mm/init */
+-
+-unsigned char __res[sizeof (bd_t)];
+-
+-#ifdef CONFIG_PCI
+-int
+-mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+-{
+-	static char pci_irq_table[][4] =
+-	    /*
+-	     *      PCI IDSEL/INTPIN->INTLINE
+-	     *       A      B      C      D
+-	     */
+-	{
+-		{PIRQA, PIRQB, PIRQC, PIRQD},	/* idsel 0x11 */
+-		{PIRQC, PIRQD, PIRQA, PIRQB},	/* idsel 0x12 */
+-		{PIRQD, PIRQA, PIRQB, PIRQC},	/* idsel 0x13 */
+-		{0, 0, 0, 0},
+-		{PIRQA, PIRQB, PIRQC, PIRQD},	/* idsel 0x15 */
+-		{PIRQD, PIRQA, PIRQB, PIRQC},	/* idsel 0x16 */
+-		{PIRQC, PIRQD, PIRQA, PIRQB},	/* idsel 0x17 */
+-		{PIRQB, PIRQC, PIRQD, PIRQA},	/* idsel 0x18 */
+-		{0, 0, 0, 0},			/* idsel 0x19 */
+-		{0, 0, 0, 0},			/* idsel 0x20 */
+-	};
+-
+-	const long min_idsel = 0x11, max_idsel = 0x20, irqs_per_slot = 4;
+-	return PCI_IRQ_TABLE_LOOKUP;
+-}
+-
+-int
+-mpc83xx_exclude_device(u_char bus, u_char devfn)
+-{
+-	return PCIBIOS_SUCCESSFUL;
+-}
+-#endif /* CONFIG_PCI */
+-
+-/* ************************************************************************
+- *
+- * Setup the architecture
+- *
+- */
+-static void __init
+-mpc834x_sys_setup_arch(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-	struct gianfar_platform_data *pdata;
+-	struct gianfar_mdio_data *mdata;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	/* Set loops_per_jiffy to a half-way reasonable value,
+-	   for use until calibrate_delay gets called. */
+-	loops_per_jiffy = freq / HZ;
+-
+-#ifdef CONFIG_PCI
+-	/* setup PCI host bridges */
+-	mpc83xx_setup_hose();
+-#endif
+-	mpc83xx_early_serial_map();
+-
+-	/* setup the board related info for the MDIO bus */
+-	mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC83xx_MDIO);
+-
+-	mdata->irq[0] = MPC83xx_IRQ_EXT1;
+-	mdata->irq[1] = MPC83xx_IRQ_EXT2;
+-	mdata->irq[2] = PHY_POLL;
+-	mdata->irq[31] = PHY_POLL;
+-
+-	/* setup the board related information for the enet controllers */
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC1);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 0;
+-		memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC2);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 1;
+-		memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+-	}
+-
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (initrd_start)
+-		ROOT_DEV = Root_RAM0;
+-	else
+-#endif
+-#ifdef  CONFIG_ROOT_NFS
+-		ROOT_DEV = Root_NFS;
+-#else
+-		ROOT_DEV = Root_HDA1;
+-#endif
+-}
+-
+-static void __init
+-mpc834x_sys_map_io(void)
+-{
+-	/* we steal the lowest ioremap addr for virt space */
+-	io_block_mapping(VIRT_IMMRBAR, immrbar, 1024*1024, _PAGE_IO);
+-}
+-
+-int
+-mpc834x_sys_show_cpuinfo(struct seq_file *m)
+-{
+-	uint pvid, svid, phid1;
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	pvid = mfspr(SPRN_PVR);
+-	svid = mfspr(SPRN_SVR);
+-
+-	seq_printf(m, "Vendor\t\t: Freescale Inc.\n");
+-	seq_printf(m, "Machine\t\t: mpc%s sys\n", cur_ppc_sys_spec->ppc_sys_name);
+-	seq_printf(m, "core clock\t: %d MHz\n"
+-			"bus  clock\t: %d MHz\n",
+-			(int)(binfo->bi_intfreq / 1000000),
+-			(int)(binfo->bi_busfreq / 1000000));
+-	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+-	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+-
+-	/* Display cpu Pll setting */
+-	phid1 = mfspr(SPRN_HID1);
+-	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+-
+-	/* Display the amount of memory */
+-	seq_printf(m, "Memory\t\t: %d MB\n", (int)(binfo->bi_memsize / (1024 * 1024)));
+-
+-	return 0;
+-}
+-
+-
+-void __init
+-mpc834x_sys_init_IRQ(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	u8 senses[8] = {
+-		0,			/* EXT 0 */
+-		IRQ_SENSE_LEVEL,	/* EXT 1 */
+-		IRQ_SENSE_LEVEL,	/* EXT 2 */
+-		0,			/* EXT 3 */
+-#ifdef CONFIG_PCI
+-		IRQ_SENSE_LEVEL,	/* EXT 4 */
+-		IRQ_SENSE_LEVEL,	/* EXT 5 */
+-		IRQ_SENSE_LEVEL,	/* EXT 6 */
+-		IRQ_SENSE_LEVEL,	/* EXT 7 */
+-#else
+-		0,			/* EXT 4 */
+-		0,			/* EXT 5 */
+-		0,			/* EXT 6 */
+-		0,			/* EXT 7 */
+-#endif
+-	};
+-
+-	ipic_init(binfo->bi_immr_base + 0x00700, 0, MPC83xx_IPIC_IRQ_OFFSET, senses, 8);
+-
+-	/* Initialize the default interrupt mapping priorities,
+-	 * in case the boot rom changed something on us.
+-	 */
+-	ipic_set_default_priority();
+-}
+-
+-#if defined(CONFIG_I2C_MPC) && defined(CONFIG_SENSORS_DS1374)
+-extern ulong	ds1374_get_rtc_time(void);
+-extern int	ds1374_set_rtc_time(ulong);
+-
+-static int __init
+-mpc834x_rtc_hookup(void)
+-{
+-	struct timespec	tv;
+-
+-	ppc_md.get_rtc_time = ds1374_get_rtc_time;
+-	ppc_md.set_rtc_time = ds1374_set_rtc_time;
+-
+-	tv.tv_nsec = 0;
+-	tv.tv_sec = (ppc_md.get_rtc_time)();
+-	do_settimeofday(&tv);
+-
+-	return 0;
+-}
+-late_initcall(mpc834x_rtc_hookup);
+-#endif
+-static __inline__ void
+-mpc834x_sys_set_bat(void)
+-{
+-	/* we steal the lowest ioremap addr for virt space */
+-	mb();
+-	mtspr(SPRN_DBAT1U, VIRT_IMMRBAR | 0x1e);
+-	mtspr(SPRN_DBAT1L, immrbar | 0x2a);
+-	mb();
+-}
+-
+-void __init
+-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+-	      unsigned long r6, unsigned long r7)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	/* parse_bootinfo must always be called first */
+-	parse_bootinfo(find_bootinfo());
+-
+-	/*
+-	 * If we were passed in a board information, copy it into the
+-	 * residual data area.
+-	 */
+-	if (r3) {
+-		memcpy((void *) __res, (void *) (r3 + KERNELBASE),
+-		       sizeof (bd_t));
+-	}
+-
+-#if defined(CONFIG_BLK_DEV_INITRD)
+-	/*
+-	 * If the init RAM disk has been configured in, and there's a valid
+-	 * starting address for it, set it up.
+-	 */
+-	if (r4) {
+-		initrd_start = r4 + KERNELBASE;
+-		initrd_end = r5 + KERNELBASE;
+-	}
+-#endif /* CONFIG_BLK_DEV_INITRD */
+-
+-	/* Copy the kernel command line arguments to a safe place. */
+-	if (r6) {
+-		*(char *) (r7 + KERNELBASE) = 0;
+-		strcpy(cmd_line, (char *) (r6 + KERNELBASE));
+-	}
+-
+-	immrbar = binfo->bi_immr_base;
+-
+-	mpc834x_sys_set_bat();
+-
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+-	{
+-		struct uart_port p;
+-
+-		memset(&p, 0, sizeof (p));
+-		p.iotype = UPIO_MEM;
+-		p.membase = (unsigned char __iomem *)(VIRT_IMMRBAR + 0x4500);
+-		p.uartclk = binfo->bi_busfreq;
+-
+-		gen550_init(0, &p);
+-
+-		memset(&p, 0, sizeof (p));
+-		p.iotype = UPIO_MEM;
+-		p.membase = (unsigned char __iomem *)(VIRT_IMMRBAR + 0x4600);
+-		p.uartclk = binfo->bi_busfreq;
+-
+-		gen550_init(1, &p);
+-	}
+-#endif
+-
+-	identify_ppc_sys_by_id(mfspr(SPRN_SVR));
+-
+-	/* setup the PowerPC module struct */
+-	ppc_md.setup_arch = mpc834x_sys_setup_arch;
+-	ppc_md.show_cpuinfo = mpc834x_sys_show_cpuinfo;
+-
+-	ppc_md.init_IRQ = mpc834x_sys_init_IRQ;
+-	ppc_md.get_irq = ipic_get_irq;
+-
+-	ppc_md.restart = mpc83xx_restart;
+-	ppc_md.power_off = mpc83xx_power_off;
+-	ppc_md.halt = mpc83xx_halt;
+-
+-	ppc_md.find_end_of_memory = mpc83xx_find_end_of_memory;
+-	ppc_md.setup_io_mappings  = mpc834x_sys_map_io;
+-
+-	ppc_md.time_init = mpc83xx_time_init;
+-	ppc_md.set_rtc_time = NULL;
+-	ppc_md.get_rtc_time = NULL;
+-	ppc_md.calibrate_decr = mpc83xx_calibrate_decr;
+-
+-	ppc_md.early_serial_map = mpc83xx_early_serial_map;
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+-	ppc_md.progress = gen550_progress;
+-#endif	/* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("mpc834x_sys_init(): exit", 0);
+-
+-	return;
+-}
+diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.h b/arch/ppc/platforms/83xx/mpc834x_sys.h
+deleted file mode 100644
+index d2e06c9..0000000
+--- a/arch/ppc/platforms/83xx/mpc834x_sys.h
++++ /dev/null
+@@ -1,54 +0,0 @@
+-/*
+- * MPC834X SYS common board definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor, Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __MACH_MPC83XX_SYS_H__
+-#define __MACH_MPC83XX_SYS_H__
+-
+-#include <linux/init.h>
+-#include <syslib/ppc83xx_setup.h>
+-#include <asm/ppcboot.h>
+-
+-#define VIRT_IMMRBAR		((uint)0xfe000000)
+-
+-#define BCSR_PHYS_ADDR		((uint)0xf8000000)
+-#define BCSR_SIZE		((uint)(32 * 1024))
+-
+-#define BCSR_MISC_REG2_OFF	0x07
+-#define BCSR_MISC_REG2_PORESET	0x01
+-
+-#define BCSR_MISC_REG3_OFF	0x08
+-#define BCSR_MISC_REG3_CNFLOCK	0x80
+-
+-#define PIRQA	MPC83xx_IRQ_EXT4
+-#define PIRQB	MPC83xx_IRQ_EXT5
+-#define PIRQC	MPC83xx_IRQ_EXT6
+-#define PIRQD	MPC83xx_IRQ_EXT7
+-
+-#define MPC83xx_PCI1_LOWER_IO	0x00000000
+-#define MPC83xx_PCI1_UPPER_IO	0x00ffffff
+-#define MPC83xx_PCI1_LOWER_MEM	0x80000000
+-#define MPC83xx_PCI1_UPPER_MEM	0x9fffffff
+-#define MPC83xx_PCI1_IO_BASE	0xe2000000
+-#define MPC83xx_PCI1_MEM_OFFSET	0x00000000
+-#define MPC83xx_PCI1_IO_SIZE	0x01000000
+-
+-#define MPC83xx_PCI2_LOWER_IO	0x00000000
+-#define MPC83xx_PCI2_UPPER_IO	0x00ffffff
+-#define MPC83xx_PCI2_LOWER_MEM	0xa0000000
+-#define MPC83xx_PCI2_UPPER_MEM	0xbfffffff
+-#define MPC83xx_PCI2_IO_BASE	0xe3000000
+-#define MPC83xx_PCI2_MEM_OFFSET	0x00000000
+-#define MPC83xx_PCI2_IO_SIZE	0x01000000
+-
+-#endif                /* __MACH_MPC83XX_SYS_H__ */
+diff --git a/arch/ppc/platforms/85xx/Kconfig b/arch/ppc/platforms/85xx/Kconfig
+deleted file mode 100644
+index 6f2d0ad..0000000
+--- a/arch/ppc/platforms/85xx/Kconfig
++++ /dev/null
+@@ -1,106 +0,0 @@
+-config 85xx
+-	bool
+-	depends on E500
+-	default y
+-
+-config PPC_INDIRECT_PCI_BE
+-	bool
+-	depends on 85xx
+-	default y
+-
+-menu "Freescale 85xx options"
+-	depends on E500
+-
+-choice
+-	prompt "Machine Type"
+-	depends on 85xx
+-	default MPC8540_ADS
+-
+-config MPC8540_ADS
+-	bool "Freescale MPC8540 ADS"
+-	help
+-	  This option enables support for the MPC 8540 ADS evaluation board.
+-
+-config MPC8548_CDS
+-	bool "Freescale MPC8548 CDS"
+-	help
+-	  This option enables support for the MPC8548 CDS evaluation board.
+-
+-config MPC8555_CDS
+-	bool "Freescale MPC8555 CDS"
+-	help
+-	  This option enables support for the MPC8555 CDS evaluation board.
+-
+-config MPC8560_ADS
+-	bool "Freescale MPC8560 ADS"
+-	help
+-	  This option enables support for the MPC 8560 ADS evaluation board.
+-
+-config SBC8560
+-	bool "WindRiver PowerQUICC III SBC8560"
+-	help
+-	  This option enables support for the WindRiver PowerQUICC III
+-	  SBC8560 board.
+-
+-config STX_GP3
+-	bool "Silicon Turnkey Express GP3"
+-	help
+-	  This option enables support for the Silicon Turnkey Express GP3
+-	  board.
+-
+-config TQM8540
+-	bool "TQ Components TQM8540"
+-	help
+-	  This option enables support for the TQ Components TQM8540 board.
+-
+-config TQM8541
+-	bool "TQ Components TQM8541"
+-	help
+-	  This option enables support for the TQ Components TQM8541 board.
+-
+-config TQM8555
+-	bool "TQ Components TQM8555"
+-	help
+-	  This option enables support for the TQ Components TQM8555 board.
+-
+-config TQM8560
+-	bool "TQ Components TQM8560"
+-	help
+-	  This option enables support for the TQ Components TQM8560 board.
+-
+-endchoice
+-
+-# It's often necessary to know the specific 85xx processor type.
+-# Fortunately, it is implied (so far) from the board type, so we
+-# don't need to ask more redundant questions.
+-config MPC8540
+-	bool
+-	depends on MPC8540_ADS || TQM8540
+-	default y
+-
+-config MPC8548
+-	bool
+-	depends on MPC8548_CDS
+-	default y
+-
+-config MPC8555
+-	bool
+-	depends on MPC8555_CDS || TQM8541 || TQM8555
+-	default y
+-
+-config MPC8560
+-	bool
+-	depends on SBC8560 || MPC8560_ADS || STX_GP3 || TQM8560
+-	default y
+-
+-config 85xx_PCI2
+-	bool "Support for 2nd PCI host controller"
+-	depends on MPC8555_CDS
+-	default y
+-
+-config PPC_GEN550
+-	bool
+-	depends on MPC8540 || SBC8560 || MPC8555
+-	default y
+-
+-endmenu
+diff --git a/arch/ppc/platforms/85xx/Makefile b/arch/ppc/platforms/85xx/Makefile
+deleted file mode 100644
+index 6c4753c..0000000
+--- a/arch/ppc/platforms/85xx/Makefile
++++ /dev/null
+@@ -1,13 +0,0 @@
+-#
+-# Makefile for the PowerPC 85xx linux kernel.
+-#
+-obj-$(CONFIG_MPC8540_ADS)	+= mpc85xx_ads_common.o mpc8540_ads.o
+-obj-$(CONFIG_MPC8548_CDS)	+= mpc85xx_cds_common.o
+-obj-$(CONFIG_MPC8555_CDS)	+= mpc85xx_cds_common.o
+-obj-$(CONFIG_MPC8560_ADS)	+= mpc85xx_ads_common.o mpc8560_ads.o
+-obj-$(CONFIG_SBC8560)		+= sbc85xx.o sbc8560.o
+-obj-$(CONFIG_STX_GP3)		+= stx_gp3.o
+-obj-$(CONFIG_TQM8540)		+= tqm85xx.o
+-obj-$(CONFIG_TQM8541)		+= tqm85xx.o
+-obj-$(CONFIG_TQM8555)		+= tqm85xx.o
+-obj-$(CONFIG_TQM8560)		+= tqm85xx.o
+diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c
+deleted file mode 100644
+index 00a3ba5..0000000
+--- a/arch/ppc/platforms/85xx/mpc8540_ads.c
++++ /dev/null
+@@ -1,226 +0,0 @@
+-/*
+- * MPC8540ADS board specific routines
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/seq_file.h>
+-#include <linux/root_dev.h>
+-#include <linux/serial.h>
+-#include <linux/tty.h>	/* for linux/serial_core.h */
+-#include <linux/serial_core.h>
+-#include <linux/initrd.h>
+-#include <linux/module.h>
+-#include <linux/fsl_devices.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/open_pic.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/kgdb.h>
+-#include <asm/ppc_sys.h>
+-#include <mm/mmu_decl.h>
+-
+-#include <syslib/ppc85xx_setup.h>
+-
+-/* ************************************************************************
+- *
+- * Setup the architecture
+- *
+- */
+-static void __init
+-mpc8540ads_setup_arch(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-	struct gianfar_platform_data *pdata;
+-	struct gianfar_mdio_data *mdata;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("mpc8540ads_setup_arch()", 0);
+-
+-	/* Set loops_per_jiffy to a half-way reasonable value,
+-	   for use until calibrate_delay gets called. */
+-	loops_per_jiffy = freq / HZ;
+-
+-#ifdef CONFIG_PCI
+-	/* setup PCI host bridges */
+-	mpc85xx_setup_hose();
+-#endif
+-
+-#ifdef CONFIG_SERIAL_8250
+-	mpc85xx_early_serial_map();
+-#endif
+-
+-#ifdef CONFIG_SERIAL_TEXT_DEBUG
+-	/* Invalidate the entry we stole earlier the serial ports
+-	 * should be properly mapped */
+-	invalidate_tlbcam_entry(num_tlbcam_entries - 1);
+-#endif
+-
+-	/* setup the board related info for the MDIO bus */
+-	mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
+-
+-	mdata->irq[0] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[1] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[2] = PHY_POLL;
+-	mdata->irq[3] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[31] = PHY_POLL;
+-
+-	/* setup the board related information for the enet controllers */
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 0;
+-		memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 1;
+-		memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC);
+-	if (pdata) {
+-		pdata->board_flags = 0;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 3;
+-		memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6);
+-	}
+-
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (initrd_start)
+-		ROOT_DEV = Root_RAM0;
+-	else
+-#endif
+-#ifdef  CONFIG_ROOT_NFS
+-		ROOT_DEV = Root_NFS;
+-#else
+-		ROOT_DEV = Root_HDA1;
+-#endif
+-}
+-
+-/* ************************************************************************ */
+-void __init
+-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+-	      unsigned long r6, unsigned long r7)
+-{
+-	/* parse_bootinfo must always be called first */
+-	parse_bootinfo(find_bootinfo());
+-
+-	/*
+-	 * If we were passed in a board information, copy it into the
+-	 * residual data area.
+-	 */
+-	if (r3) {
+-		memcpy((void *) __res, (void *) (r3 + KERNELBASE),
+-		       sizeof (bd_t));
+-	}
+-#ifdef CONFIG_SERIAL_TEXT_DEBUG
+-	{
+-		bd_t *binfo = (bd_t *) __res;
+-		struct uart_port p;
+-
+-		/* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */
+-		settlbcam(num_tlbcam_entries - 1, binfo->bi_immr_base,
+-			  binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0);
+-
+-		memset(&p, 0, sizeof (p));
+-		p.iotype = UPIO_MEM;
+-		p.membase = (void *) binfo->bi_immr_base + MPC85xx_UART0_OFFSET;
+-		p.uartclk = binfo->bi_busfreq;
+-
+-		gen550_init(0, &p);
+-
+-		memset(&p, 0, sizeof (p));
+-		p.iotype = UPIO_MEM;
+-		p.membase = (void *) binfo->bi_immr_base + MPC85xx_UART1_OFFSET;
+-		p.uartclk = binfo->bi_busfreq;
+-
+-		gen550_init(1, &p);
+-	}
+-#endif
+-
+-#if defined(CONFIG_BLK_DEV_INITRD)
+-	/*
+-	 * If the init RAM disk has been configured in, and there's a valid
+-	 * starting address for it, set it up.
+-	 */
+-	if (r4) {
+-		initrd_start = r4 + KERNELBASE;
+-		initrd_end = r5 + KERNELBASE;
+-	}
+-#endif				/* CONFIG_BLK_DEV_INITRD */
+-
+-	/* Copy the kernel command line arguments to a safe place. */
+-
+-	if (r6) {
+-		*(char *) (r7 + KERNELBASE) = 0;
+-		strcpy(cmd_line, (char *) (r6 + KERNELBASE));
+-	}
+-
+-	identify_ppc_sys_by_id(mfspr(SPRN_SVR));
+-
+-	/* setup the PowerPC module struct */
+-	ppc_md.setup_arch = mpc8540ads_setup_arch;
+-	ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo;
+-
+-	ppc_md.init_IRQ = mpc85xx_ads_init_IRQ;
+-	ppc_md.get_irq = openpic_get_irq;
+-
+-	ppc_md.restart = mpc85xx_restart;
+-	ppc_md.power_off = mpc85xx_power_off;
+-	ppc_md.halt = mpc85xx_halt;
+-
+-	ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
+-
+-	ppc_md.time_init = NULL;
+-	ppc_md.set_rtc_time = NULL;
+-	ppc_md.get_rtc_time = NULL;
+-	ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
+-
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+-	ppc_md.progress = gen550_progress;
+-#endif	/* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_KGDB)
+-	ppc_md.early_serial_map = mpc85xx_early_serial_map;
+-#endif	/* CONFIG_SERIAL_8250 && CONFIG_KGDB */
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("mpc8540ads_init(): exit", 0);
+-
+-	return;
+-}
+diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.h b/arch/ppc/platforms/85xx/mpc8540_ads.h
+deleted file mode 100644
+index 7559f9e..0000000
+--- a/arch/ppc/platforms/85xx/mpc8540_ads.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/*
+- * MPC8540ADS board definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __MACH_MPC8540ADS_H__
+-#define __MACH_MPC8540ADS_H__
+-
+-#include <linux/initrd.h>
+-#include <syslib/ppc85xx_setup.h>
+-#include <platforms/85xx/mpc85xx_ads_common.h>
+-
+-#endif /* __MACH_MPC8540ADS_H__ */
+diff --git a/arch/ppc/platforms/85xx/mpc8555_cds.h b/arch/ppc/platforms/85xx/mpc8555_cds.h
+deleted file mode 100644
+index 4f79c37..0000000
+--- a/arch/ppc/platforms/85xx/mpc8555_cds.h
++++ /dev/null
+@@ -1,23 +0,0 @@
+-/*
+- * MPC8555CDS board definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __MACH_MPC8555CDS_H__
+-#define __MACH_MPC8555CDS_H__
+-
+-#include <syslib/ppc85xx_setup.h>
+-#include <platforms/85xx/mpc85xx_cds_common.h>
+-
+-#define CPM_MAP_ADDR	(CCSRBAR + MPC85xx_CPM_OFFSET)
+-
+-#endif /* __MACH_MPC8555CDS_H__ */
+diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
+deleted file mode 100644
+index 3a06046..0000000
+--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
++++ /dev/null
+@@ -1,303 +0,0 @@
+-/*
+- * MPC8560ADS board specific routines
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/seq_file.h>
+-#include <linux/root_dev.h>
+-#include <linux/serial.h>
+-#include <linux/tty.h>	/* for linux/serial_core.h */
+-#include <linux/serial_core.h>
+-#include <linux/initrd.h>
+-#include <linux/module.h>
+-#include <linux/fsl_devices.h>
+-#include <linux/fs_enet_pd.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/open_pic.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/kgdb.h>
+-#include <asm/ppc_sys.h>
+-#include <asm/cpm2.h>
+-#include <mm/mmu_decl.h>
+-
+-#include <syslib/cpm2_pic.h>
+-#include <syslib/ppc85xx_common.h>
+-#include <syslib/ppc85xx_setup.h>
+-
+-
+-/* ************************************************************************
+- *
+- * Setup the architecture
+- *
+- */
+-static void init_fcc_ioports(void)
+-{
+-	struct immap *immap;
+-	struct io_port *io;
+-	u32 tempval;
+-
+-	immap = cpm2_immr;
+-
+-	io = &immap->im_ioport;
+-	/* FCC2/3 are on the ports B/C. */
+-	tempval = in_be32(&io->iop_pdirb);
+-	tempval &= ~PB2_DIRB0;
+-	tempval |= PB2_DIRB1;
+-	out_be32(&io->iop_pdirb, tempval);
+-
+-	tempval = in_be32(&io->iop_psorb);
+-	tempval &= ~PB2_PSORB0;
+-	tempval |= PB2_PSORB1;
+-	out_be32(&io->iop_psorb, tempval);
+-
+-	tempval = in_be32(&io->iop_pparb);
+-	tempval |= (PB2_DIRB0 | PB2_DIRB1);
+-	out_be32(&io->iop_pparb, tempval);
+-
+-	tempval = in_be32(&io->iop_pdirb);
+-	tempval &= ~PB3_DIRB0;
+-	tempval |= PB3_DIRB1;
+-	out_be32(&io->iop_pdirb, tempval);
+-
+-	tempval = in_be32(&io->iop_psorb);
+-	tempval &= ~PB3_PSORB0;
+-	tempval |= PB3_PSORB1;
+-	out_be32(&io->iop_psorb, tempval);
+-
+-	tempval = in_be32(&io->iop_pparb);
+-	tempval |= (PB3_DIRB0 | PB3_DIRB1);
+-	out_be32(&io->iop_pparb, tempval);
+-
+-        tempval = in_be32(&io->iop_pdirc);
+-        tempval |= PC3_DIRC1;
+-        out_be32(&io->iop_pdirc, tempval);
+-
+-        tempval = in_be32(&io->iop_pparc);
+-        tempval |= PC3_DIRC1;
+-        out_be32(&io->iop_pparc, tempval);
+-
+-	/* Port C has clocks......  */
+-	tempval = in_be32(&io->iop_psorc);
+-	tempval &= ~(CLK_TRX);
+-	out_be32(&io->iop_psorc, tempval);
+-
+-	tempval = in_be32(&io->iop_pdirc);
+-	tempval &= ~(CLK_TRX);
+-	out_be32(&io->iop_pdirc, tempval);
+-	tempval = in_be32(&io->iop_pparc);
+-	tempval |= (CLK_TRX);
+-	out_be32(&io->iop_pparc, tempval);
+-
+-	/* Configure Serial Interface clock routing.
+-	 * First,  clear all FCC bits to zero,
+-	 * then set the ones we want.
+-	 */
+-	immap->im_cpmux.cmx_fcr &= ~(CPMUX_CLK_MASK);
+-	immap->im_cpmux.cmx_fcr |= CPMUX_CLK_ROUTE;
+-}
+-
+-static void __init
+-mpc8560ads_setup_arch(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-	struct gianfar_platform_data *pdata;
+-	struct gianfar_mdio_data *mdata;
+-	struct fs_platform_info *fpi;
+-
+-	cpm2_reset();
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("mpc8560ads_setup_arch()", 0);
+-
+-	/* Set loops_per_jiffy to a half-way reasonable value,
+-	   for use until calibrate_delay gets called. */
+-	loops_per_jiffy = freq / HZ;
+-
+-#ifdef CONFIG_PCI
+-	/* setup PCI host bridges */
+-	mpc85xx_setup_hose();
+-#endif
+-
+-	/* setup the board related info for the MDIO bus */
+-	mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
+-
+-	mdata->irq[0] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[1] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[2] = PHY_POLL;
+-	mdata->irq[3] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[31] = PHY_POLL;
+-
+-	/* setup the board related information for the enet controllers */
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 0;
+-		memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 1;
+-		memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+-	}
+-
+-	init_fcc_ioports();
+-	ppc_sys_device_remove(MPC85xx_CPM_FCC1);
+-
+-	fpi = (struct fs_platform_info *) ppc_sys_get_pdata(MPC85xx_CPM_FCC2);
+-	if (fpi) {
+-		memcpy(fpi->macaddr, binfo->bi_enet2addr, 6);
+-		fpi->bus_id = "0:02";
+-		fpi->phy_addr = 2;
+-		fpi->dpram_offset = (u32)cpm2_immr->im_dprambase;
+-		fpi->fcc_regs_c = (u32)&cpm2_immr->im_fcc_c[1];
+-	}
+-
+-	fpi = (struct fs_platform_info *) ppc_sys_get_pdata(MPC85xx_CPM_FCC3);
+-	if (fpi) {
+-		memcpy(fpi->macaddr, binfo->bi_enet2addr, 6);
+-		fpi->macaddr[5] += 1;
+-		fpi->bus_id = "0:03";
+-		fpi->phy_addr = 3;
+-		fpi->dpram_offset = (u32)cpm2_immr->im_dprambase;
+-		fpi->fcc_regs_c = (u32)&cpm2_immr->im_fcc_c[2];
+-	}
+-
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (initrd_start)
+-		ROOT_DEV = Root_RAM0;
+-	else
+-#endif
+-#ifdef  CONFIG_ROOT_NFS
+-		ROOT_DEV = Root_NFS;
+-#else
+-		ROOT_DEV = Root_HDA1;
+-#endif
+-}
+-
+-static irqreturn_t cpm2_cascade(int irq, void *dev_id)
+-{
+-	while ((irq = cpm2_get_irq()) >= 0)
+-		__do_IRQ(irq);
+-	return IRQ_HANDLED;
+-}
+-
+-static struct irqaction cpm2_irqaction = {
+-	.handler = cpm2_cascade,
+-	.flags = IRQF_DISABLED,
+-	.mask = CPU_MASK_NONE,
+-	.name = "cpm2_cascade",
+-};
+-
+-static void __init
+-mpc8560_ads_init_IRQ(void)
+-{
+-	/* Setup OpenPIC */
+-	mpc85xx_ads_init_IRQ();
+-
+-	/* Setup CPM2 PIC */
+-        cpm2_init_IRQ();
+-
+-	setup_irq(MPC85xx_IRQ_CPM, &cpm2_irqaction);
+-
+-	return;
+-}
+-
+-
+-
+-/* ************************************************************************ */
+-void __init
+-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+-	      unsigned long r6, unsigned long r7)
+-{
+-	/* parse_bootinfo must always be called first */
+-	parse_bootinfo(find_bootinfo());
+-
+-	/*
+-	 * If we were passed in a board information, copy it into the
+-	 * residual data area.
+-	 */
+-	if (r3) {
+-		memcpy((void *) __res, (void *) (r3 + KERNELBASE),
+-		       sizeof (bd_t));
+-
+-	}
+-#if defined(CONFIG_BLK_DEV_INITRD)
+-	/*
+-	 * If the init RAM disk has been configured in, and there's a valid
+-	 * starting address for it, set it up.
+-	 */
+-	if (r4) {
+-		initrd_start = r4 + KERNELBASE;
+-		initrd_end = r5 + KERNELBASE;
+-	}
+-#endif				/* CONFIG_BLK_DEV_INITRD */
+-
+-	/* Copy the kernel command line arguments to a safe place. */
+-
+-	if (r6) {
+-		*(char *) (r7 + KERNELBASE) = 0;
+-		strcpy(cmd_line, (char *) (r6 + KERNELBASE));
+-	}
+-
+-	identify_ppc_sys_by_id(mfspr(SPRN_SVR));
+-
+-	/* setup the PowerPC module struct */
+-	ppc_md.setup_arch = mpc8560ads_setup_arch;
+-	ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo;
+-
+-	ppc_md.init_IRQ = mpc8560_ads_init_IRQ;
+-	ppc_md.get_irq = openpic_get_irq;
+-
+-	ppc_md.restart = mpc85xx_restart;
+-	ppc_md.power_off = mpc85xx_power_off;
+-	ppc_md.halt = mpc85xx_halt;
+-
+-	ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
+-
+-	ppc_md.time_init = NULL;
+-	ppc_md.set_rtc_time = NULL;
+-	ppc_md.get_rtc_time = NULL;
+-	ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("mpc8560ads_init(): exit", 0);
+-
+-	return;
+-}
+diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.h b/arch/ppc/platforms/85xx/mpc8560_ads.h
+deleted file mode 100644
+index 9f185ab..0000000
+--- a/arch/ppc/platforms/85xx/mpc8560_ads.h
++++ /dev/null
+@@ -1,24 +0,0 @@
+-/*
+- * MPC8540ADS board definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __MACH_MPC8560ADS_H
+-#define __MACH_MPC8560ADS_H
+-
+-#include <syslib/ppc85xx_setup.h>
+-#include <platforms/85xx/mpc85xx_ads_common.h>
+-
+-#define CPM_MAP_ADDR	(CCSRBAR + MPC85xx_CPM_OFFSET)
+-#define PHY_INTERRUPT	MPC85xx_IRQ_EXT7
+-
+-#endif				/* __MACH_MPC8560ADS_H */
+diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
+deleted file mode 100644
+index 674806e..0000000
+--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
++++ /dev/null
+@@ -1,198 +0,0 @@
+-/*
+- * MPC85xx ADS board common routines
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/seq_file.h>
+-#include <linux/serial.h>
+-#include <linux/module.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/open_pic.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/ppc_sys.h>
+-
+-#include <mm/mmu_decl.h>
+-
+-#include <syslib/ppc85xx_rio.h>
+-
+-#include <platforms/85xx/mpc85xx_ads_common.h>
+-
+-#ifndef CONFIG_PCI
+-unsigned long isa_io_base = 0;
+-unsigned long isa_mem_base = 0;
+-#endif
+-
+-extern unsigned long total_memory;	/* in mm/init */
+-
+-unsigned char __res[sizeof (bd_t)];
+-
+-/* Internal interrupts are all Level Sensitive, and Positive Polarity */
+-static u_char mpc85xx_ads_openpic_initsenses[] __initdata = {
+-	MPC85XX_INTERNAL_IRQ_SENSES,
+-	0x0,						/* External  0: */
+-#if defined(CONFIG_PCI)
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 1: PCI slot 0 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 2: PCI slot 1 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 3: PCI slot 2 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 4: PCI slot 3 */
+-#else
+-	0x0,				/* External  1: */
+-	0x0,				/* External  2: */
+-	0x0,				/* External  3: */
+-	0x0,				/* External  4: */
+-#endif
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 5: PHY */
+-	0x0,				/* External  6: */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 7: PHY */
+-	0x0,				/* External  8: */
+-	0x0,				/* External  9: */
+-	0x0,				/* External 10: */
+-	0x0,				/* External 11: */
+-};
+-
+-/* ************************************************************************ */
+-int
+-mpc85xx_ads_show_cpuinfo(struct seq_file *m)
+-{
+-	uint pvid, svid, phid1;
+-	uint memsize = total_memory;
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	pvid = mfspr(SPRN_PVR);
+-	svid = mfspr(SPRN_SVR);
+-
+-	seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
+-	seq_printf(m, "Machine\t\t: mpc%sads\n", cur_ppc_sys_spec->ppc_sys_name);
+-	seq_printf(m, "clock\t\t: %dMHz\n", freq / 1000000);
+-	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+-	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+-
+-	/* Display cpu Pll setting */
+-	phid1 = mfspr(SPRN_HID1);
+-	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+-
+-	/* Display the amount of memory */
+-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+-
+-	return 0;
+-}
+-
+-void __init
+-mpc85xx_ads_init_IRQ(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	/* Determine the Physical Address of the OpenPIC regs */
+-	phys_addr_t OpenPIC_PAddr =
+-	    binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
+-	OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
+-	OpenPIC_InitSenses = mpc85xx_ads_openpic_initsenses;
+-	OpenPIC_NumInitSenses = sizeof (mpc85xx_ads_openpic_initsenses);
+-
+-	/* Skip reserved space and internal sources */
+-	openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
+-	/* Map PIC IRQs 0-11 */
+-	openpic_set_sources(48, 12, OpenPIC_Addr + 0x10000);
+-
+-	/* we let openpic interrupts starting from an offset, to
+-	 * leave space for cascading interrupts underneath.
+-	 */
+-	openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
+-
+-	return;
+-}
+-
+-#ifdef CONFIG_PCI
+-/*
+- * interrupt routing
+- */
+-
+-int
+-mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+-{
+-	static char pci_irq_table[][4] =
+-	    /*
+-	     * This is little evil, but works around the fact
+-	     * that revA boards have IDSEL starting at 18
+-	     * and others boards (older) start at 12
+-	     *
+-	     *      PCI IDSEL/INTPIN->INTLINE
+-	     *       A      B      C      D
+-	     */
+-	{
+-		{PIRQA, PIRQB, PIRQC, PIRQD},	/* IDSEL 2 */
+-		{PIRQD, PIRQA, PIRQB, PIRQC},
+-		{PIRQC, PIRQD, PIRQA, PIRQB},
+-		{PIRQB, PIRQC, PIRQD, PIRQA},	/* IDSEL 5 */
+-		{0, 0, 0, 0},	/* -- */
+-		{0, 0, 0, 0},	/* -- */
+-		{0, 0, 0, 0},	/* -- */
+-		{0, 0, 0, 0},	/* -- */
+-		{0, 0, 0, 0},	/* -- */
+-		{0, 0, 0, 0},	/* -- */
+-		{PIRQA, PIRQB, PIRQC, PIRQD},	/* IDSEL 12 */
+-		{PIRQD, PIRQA, PIRQB, PIRQC},
+-		{PIRQC, PIRQD, PIRQA, PIRQB},
+-		{PIRQB, PIRQC, PIRQD, PIRQA},	/* IDSEL 15 */
+-		{0, 0, 0, 0},	/* -- */
+-		{0, 0, 0, 0},	/* -- */
+-		{PIRQA, PIRQB, PIRQC, PIRQD},	/* IDSEL 18 */
+-		{PIRQD, PIRQA, PIRQB, PIRQC},
+-		{PIRQC, PIRQD, PIRQA, PIRQB},
+-		{PIRQB, PIRQC, PIRQD, PIRQA},	/* IDSEL 21 */
+-	};
+-
+-	const long min_idsel = 2, max_idsel = 21, irqs_per_slot = 4;
+-	return PCI_IRQ_TABLE_LOOKUP;
+-}
+-
+-int
+-mpc85xx_exclude_device(u_char bus, u_char devfn)
+-{
+-	if (bus == 0 && PCI_SLOT(devfn) == 0)
+-		return PCIBIOS_DEVICE_NOT_FOUND;
+-	else
+-		return PCIBIOS_SUCCESSFUL;
+-}
+-
+-#endif /* CONFIG_PCI */
+-
+-#ifdef CONFIG_RAPIDIO
+-void platform_rio_init(void)
+-{
+-	/* 512MB RIO LAW at 0xc0000000 */
+-	mpc85xx_rio_setup(0xc0000000, 0x20000000);
+-}
+-#endif /* CONFIG_RAPIDIO */
+diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
+deleted file mode 100644
+index c8c322f..0000000
+--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
++++ /dev/null
+@@ -1,67 +0,0 @@
+-/*
+- * MPC85XX ADS common board definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __MACH_MPC85XX_ADS_H__
+-#define __MACH_MPC85XX_ADS_H__
+-
+-#include <linux/init.h>
+-#include <asm/ppcboot.h>
+-
+-#define BOARD_CCSRBAR		((uint)0xe0000000)
+-#define BCSR_ADDR		((uint)0xf8000000)
+-#define BCSR_SIZE		((uint)(32 * 1024))
+-
+-struct seq_file;
+-
+-extern int mpc85xx_ads_show_cpuinfo(struct seq_file *m);
+-extern void mpc85xx_ads_init_IRQ(void) __init;
+-extern void mpc85xx_ads_map_io(void) __init;
+-
+-/* PCI interrupt controller */
+-#define PIRQA		MPC85xx_IRQ_EXT1
+-#define PIRQB		MPC85xx_IRQ_EXT2
+-#define PIRQC		MPC85xx_IRQ_EXT3
+-#define PIRQD		MPC85xx_IRQ_EXT4
+-
+-#define MPC85XX_PCI1_LOWER_IO	0x00000000
+-#define MPC85XX_PCI1_UPPER_IO	0x00ffffff
+-
+-#define MPC85XX_PCI1_LOWER_MEM	0x80000000
+-#define MPC85XX_PCI1_UPPER_MEM	0x9fffffff
+-
+-#define MPC85XX_PCI1_IO_BASE	0xe2000000
+-#define MPC85XX_PCI1_MEM_OFFSET	0x00000000
+-
+-#define MPC85XX_PCI1_IO_SIZE	0x01000000
+-
+-/* FCC1 Clock Source Configuration.  These can be
+- * redefined in the board specific file.
+- *    Can only choose from CLK9-12 */
+-#define F1_RXCLK       12
+-#define F1_TXCLK       11
+-
+-/* FCC2 Clock Source Configuration.  These can be
+- * redefined in the board specific file.
+- *    Can only choose from CLK13-16 */
+-#define F2_RXCLK       13
+-#define F2_TXCLK       14
+-
+-/* FCC3 Clock Source Configuration.  These can be
+- * redefined in the board specific file.
+- *    Can only choose from CLK13-16 */
+-#define F3_RXCLK       15
+-#define F3_TXCLK       16
+-
+-
+-#endif				/* __MACH_MPC85XX_ADS_H__ */
+diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
+deleted file mode 100644
+index 2d59eb7..0000000
+--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
++++ /dev/null
+@@ -1,601 +0,0 @@
+-/*
+- * MPC85xx CDS board specific routines
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor, Inc
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/seq_file.h>
+-#include <linux/serial.h>
+-#include <linux/module.h>
+-#include <linux/root_dev.h>
+-#include <linux/initrd.h>
+-#include <linux/tty.h>
+-#include <linux/serial_core.h>
+-#include <linux/fsl_devices.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/todc.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/open_pic.h>
+-#include <asm/i8259.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/cpm2.h>
+-#include <asm/ppc_sys.h>
+-#include <asm/kgdb.h>
+-
+-#include <mm/mmu_decl.h>
+-#include <syslib/cpm2_pic.h>
+-#include <syslib/ppc85xx_common.h>
+-#include <syslib/ppc85xx_setup.h>
+-
+-
+-#ifndef CONFIG_PCI
+-unsigned long isa_io_base = 0;
+-unsigned long isa_mem_base = 0;
+-#endif
+-
+-extern unsigned long total_memory;      /* in mm/init */
+-
+-unsigned char __res[sizeof (bd_t)];
+-
+-static int cds_pci_slot = 2;
+-static volatile u8 * cadmus;
+-
+-/* Internal interrupts are all Level Sensitive, and Positive Polarity */
+-static u_char mpc85xx_cds_openpic_initsenses[] __initdata = {
+-	MPC85XX_INTERNAL_IRQ_SENSES,
+-#if defined(CONFIG_PCI)
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 0: PCI1 slot */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 1: PCI1 slot */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 2: PCI1 slot */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 3: PCI1 slot */
+-#else
+-	0x0,						/* External  0: */
+-	0x0,						/* External  1: */
+-	0x0,						/* External  2: */
+-	0x0,						/* External  3: */
+-#endif
+-	0x0,						/* External  4: */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External  5: PHY */
+-	0x0,						/* External  6: */
+-	0x0,						/* External  7: */
+-	0x0,						/* External  8: */
+-	0x0,						/* External  9: */
+-	0x0,						/* External 10: */
+-#if defined(CONFIG_85xx_PCI2) && defined(CONFIG_PCI)
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 11: PCI2 slot 0 */
+-#else
+-	0x0,						/* External 11: */
+-#endif
+-};
+-
+-/* ************************************************************************ */
+-int
+-mpc85xx_cds_show_cpuinfo(struct seq_file *m)
+-{
+-	uint pvid, svid, phid1;
+-	uint memsize = total_memory;
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	pvid = mfspr(SPRN_PVR);
+-	svid = mfspr(SPRN_SVR);
+-
+-	seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
+-	seq_printf(m, "Machine\t\t: CDS - MPC%s (%x)\n", cur_ppc_sys_spec->ppc_sys_name, cadmus[CM_VER]);
+-	seq_printf(m, "clock\t\t: %dMHz\n", freq / 1000000);
+-	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+-	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+-
+-	/* Display cpu Pll setting */
+-	phid1 = mfspr(SPRN_HID1);
+-	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+-
+-	/* Display the amount of memory */
+-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+-
+-	return 0;
+-}
+-
+-#ifdef CONFIG_CPM2
+-static irqreturn_t cpm2_cascade(int irq, void *dev_id)
+-{
+-	while((irq = cpm2_get_irq()) >= 0)
+-		__do_IRQ(irq);
+-	return IRQ_HANDLED;
+-}
+-
+-static struct irqaction cpm2_irqaction = {
+-	.handler = cpm2_cascade,
+-	.flags = IRQF_DISABLED,
+-	.mask = CPU_MASK_NONE,
+-	.name = "cpm2_cascade",
+-};
+-#endif /* CONFIG_CPM2 */
+-
+-void __init
+-mpc85xx_cds_init_IRQ(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	int i;
+-
+-	/* Determine the Physical Address of the OpenPIC regs */
+-	phys_addr_t OpenPIC_PAddr = binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
+-	OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
+-	OpenPIC_InitSenses = mpc85xx_cds_openpic_initsenses;
+-	OpenPIC_NumInitSenses = sizeof (mpc85xx_cds_openpic_initsenses);
+-
+-	/* Skip reserved space and internal sources */
+-#ifdef CONFIG_MPC8548
+-	openpic_set_sources(0, 48, OpenPIC_Addr + 0x10200);
+-#else
+-	openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
+-#endif
+-	/* Map PIC IRQs 0-11 */
+-	openpic_set_sources(48, 12, OpenPIC_Addr + 0x10000);
+-
+-	/* we let openpic interrupts starting from an offset, to
+-	 * leave space for cascading interrupts underneath.
+-	 */
+-	openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
+-
+-#ifdef CONFIG_PCI
+-	openpic_hookup_cascade(PIRQ0A, "82c59 cascade", i8259_irq);
+-
+-	i8259_init(0, 0);
+-#endif
+-
+-#ifdef CONFIG_CPM2
+-	/* Setup CPM2 PIC */
+-        cpm2_init_IRQ();
+-
+-	setup_irq(MPC85xx_IRQ_CPM, &cpm2_irqaction);
+-#endif
+-
+-	return;
+-}
+-
+-#ifdef CONFIG_PCI
+-/*
+- * interrupt routing
+- */
+-int
+-mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+-{
+-	struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
+-
+-	if (!hose->index)
+-	{
+-		/* Handle PCI1 interrupts */
+-		char pci_irq_table[][4] =
+-			/*
+-			 *      PCI IDSEL/INTPIN->INTLINE
+-			 *        A      B      C      D
+-			 */
+-
+-			/* Note IRQ assignment for slots is based on which slot the elysium is
+-			 * in -- in this setup elysium is in slot #2 (this PIRQA as first
+-			 * interrupt on slot */
+-		{
+-			{ 0, 1, 2, 3 }, /* 16 - PMC */
+-			{ 0, 1, 2, 3 }, /* 17 P2P (Tsi320) */
+-			{ 0, 1, 2, 3 }, /* 18 - Slot 1 */
+-			{ 1, 2, 3, 0 }, /* 19 - Slot 2 */
+-			{ 2, 3, 0, 1 }, /* 20 - Slot 3 */
+-			{ 3, 0, 1, 2 }, /* 21 - Slot 4 */
+-		};
+-
+-		const long min_idsel = 16, max_idsel = 21, irqs_per_slot = 4;
+-		int i, j;
+-
+-		for (i = 0; i < 6; i++)
+-			for (j = 0; j < 4; j++)
+-				pci_irq_table[i][j] =
+-					((pci_irq_table[i][j] + 5 -
+-					  cds_pci_slot) & 0x3) + PIRQ0A;
+-
+-		return PCI_IRQ_TABLE_LOOKUP;
+-	} else {
+-		/* Handle PCI2 interrupts (if we have one) */
+-		char pci_irq_table[][4] =
+-		{
+-			/*
+-			 * We only have one slot and one interrupt
+-			 * going to PIRQA - PIRQD */
+-			{ PIRQ1A, PIRQ1A, PIRQ1A, PIRQ1A }, /* 21 - slot 0 */
+-		};
+-
+-		const long min_idsel = 21, max_idsel = 21, irqs_per_slot = 4;
+-
+-		return PCI_IRQ_TABLE_LOOKUP;
+-	}
+-}
+-
+-#define ARCADIA_HOST_BRIDGE_IDSEL	17
+-#define ARCADIA_2ND_BRIDGE_IDSEL	3
+-
+-extern int mpc85xx_pci1_last_busno;
+-
+-int
+-mpc85xx_exclude_device(u_char bus, u_char devfn)
+-{
+-	if (bus == 0 && PCI_SLOT(devfn) == 0)
+-		return PCIBIOS_DEVICE_NOT_FOUND;
+-#ifdef CONFIG_85xx_PCI2
+-	if (mpc85xx_pci1_last_busno)
+-		if (bus == (mpc85xx_pci1_last_busno + 1) && PCI_SLOT(devfn) == 0)
+-			return PCIBIOS_DEVICE_NOT_FOUND;
+-#endif
+-	/* We explicitly do not go past the Tundra 320 Bridge */
+-	if ((bus == 1) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL))
+-		return PCIBIOS_DEVICE_NOT_FOUND;
+-	if ((bus == 0) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL))
+-		return PCIBIOS_DEVICE_NOT_FOUND;
+-	else
+-		return PCIBIOS_SUCCESSFUL;
+-}
+-
+-void __init
+-mpc85xx_cds_enable_via(struct pci_controller *hose)
+-{
+-	u32 pci_class;
+-	u16 vid, did;
+-
+-	early_read_config_dword(hose, 0, 0x88, PCI_CLASS_REVISION, &pci_class);
+-	if ((pci_class >> 16) != PCI_CLASS_BRIDGE_PCI)
+-		return;
+-
+-	/* Configure P2P so that we can reach bus 1 */
+-	early_write_config_byte(hose, 0, 0x88, PCI_PRIMARY_BUS, 0);
+-	early_write_config_byte(hose, 0, 0x88, PCI_SECONDARY_BUS, 1);
+-	early_write_config_byte(hose, 0, 0x88, PCI_SUBORDINATE_BUS, 0xff);
+-
+-	early_read_config_word(hose, 1, 0x10, PCI_VENDOR_ID, &vid);
+-	early_read_config_word(hose, 1, 0x10, PCI_DEVICE_ID, &did);
+-
+-	if ((vid != PCI_VENDOR_ID_VIA) ||
+-			(did != PCI_DEVICE_ID_VIA_82C686))
+-		return;
+-
+-	/* Enable USB and IDE functions */
+-	early_write_config_byte(hose, 1, 0x10, 0x48, 0x08);
+-}
+-
+-void __init
+-mpc85xx_cds_fixup_via(struct pci_controller *hose)
+-{
+-	u32 pci_class;
+-	u16 vid, did;
+-
+-	early_read_config_dword(hose, 0, 0x88, PCI_CLASS_REVISION, &pci_class);
+-	if ((pci_class >> 16) != PCI_CLASS_BRIDGE_PCI)
+-		return;
+-
+-	/*
+-	 * Force the backplane P2P bridge to have a window
+-	 * open from 0x00000000-0x00001fff in PCI I/O space.
+-	 * This allows legacy I/O (i8259, etc) on the VIA
+-	 * southbridge to be accessed.
+-	 */
+-	early_write_config_byte(hose, 0, 0x88, PCI_IO_BASE, 0x00);
+-	early_write_config_word(hose, 0, 0x88, PCI_IO_BASE_UPPER16, 0x0000);
+-	early_write_config_byte(hose, 0, 0x88, PCI_IO_LIMIT, 0x10);
+-	early_write_config_word(hose, 0, 0x88, PCI_IO_LIMIT_UPPER16, 0x0000);
+-
+-	early_read_config_word(hose, 1, 0x10, PCI_VENDOR_ID, &vid);
+-	early_read_config_word(hose, 1, 0x10, PCI_DEVICE_ID, &did);
+-	if ((vid != PCI_VENDOR_ID_VIA) ||
+-			(did != PCI_DEVICE_ID_VIA_82C686))
+-		return;
+-
+-	/*
+-	 * Since the P2P window was forced to cover the fixed
+-	 * legacy I/O addresses, it is necessary to manually
+-	 * place the base addresses for the IDE and USB functions
+-	 * within this window.
+-	 */
+-	/* Function 1, IDE */
+-	early_write_config_dword(hose, 1, 0x11, PCI_BASE_ADDRESS_0, 0x1ff8);
+-	early_write_config_dword(hose, 1, 0x11, PCI_BASE_ADDRESS_1, 0x1ff4);
+-	early_write_config_dword(hose, 1, 0x11, PCI_BASE_ADDRESS_2, 0x1fe8);
+-	early_write_config_dword(hose, 1, 0x11, PCI_BASE_ADDRESS_3, 0x1fe4);
+-	early_write_config_dword(hose, 1, 0x11, PCI_BASE_ADDRESS_4, 0x1fd0);
+-
+-	/* Function 2, USB ports 0-1 */
+-	early_write_config_dword(hose, 1, 0x12, PCI_BASE_ADDRESS_4, 0x1fa0);
+-
+-	/* Function 3, USB ports 2-3 */
+-	early_write_config_dword(hose, 1, 0x13, PCI_BASE_ADDRESS_4, 0x1f80);
+-
+-	/* Function 5, Power Management */
+-	early_write_config_dword(hose, 1, 0x15, PCI_BASE_ADDRESS_0, 0x1e00);
+-	early_write_config_dword(hose, 1, 0x15, PCI_BASE_ADDRESS_1, 0x1dfc);
+-	early_write_config_dword(hose, 1, 0x15, PCI_BASE_ADDRESS_2, 0x1df8);
+-
+-	/* Function 6, AC97 Interface */
+-	early_write_config_dword(hose, 1, 0x16, PCI_BASE_ADDRESS_0, 0x1c00);
+-}
+-
+-void __init
+-mpc85xx_cds_pcibios_fixup(void)
+-{
+-        struct pci_dev *dev;
+-	u_char		c;
+-
+-	if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
+-                                        PCI_DEVICE_ID_VIA_82C586_1, NULL))) {
+-                /*
+-                 * U-Boot does not set the enable bits
+-                 * for the IDE device. Force them on here.
+-                 */
+-                pci_read_config_byte(dev, 0x40, &c);
+-                c |= 0x03; /* IDE: Chip Enable Bits */
+-                pci_write_config_byte(dev, 0x40, c);
+-
+-		/*
+-		 * Since only primary interface works, force the
+-		 * IDE function to standard primary IDE interrupt
+-		 * w/ 8259 offset
+-		 */
+-                dev->irq = 14;
+-                pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+-		pci_dev_put(dev);
+-        }
+-
+-	/*
+-	 * Force legacy USB interrupt routing
+-	 */
+-	if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
+-                                        PCI_DEVICE_ID_VIA_82C586_2, NULL))) {
+-                dev->irq = 10;
+-                pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 10);
+-
+-		if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
+-                                        PCI_DEVICE_ID_VIA_82C586_2, dev))) {
+-	                dev->irq = 11;
+-	                pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
+-		}
+-		pci_dev_put(dev);
+-        }
+-}
+-#endif /* CONFIG_PCI */
+-
+-TODC_ALLOC();
+-
+-/* ************************************************************************
+- *
+- * Setup the architecture
+- *
+- */
+-static void __init
+-mpc85xx_cds_setup_arch(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-	struct gianfar_platform_data *pdata;
+-	struct gianfar_mdio_data *mdata;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	printk("mpc85xx_cds_setup_arch\n");
+-
+-#ifdef CONFIG_CPM2
+-	cpm2_reset();
+-#endif
+-
+-	cadmus = ioremap(CADMUS_BASE, CADMUS_SIZE);
+-	cds_pci_slot = ((cadmus[CM_CSR] >> 6) & 0x3) + 1;
+-	printk("CDS Version = %x in PCI slot %d\n", cadmus[CM_VER], cds_pci_slot);
+-
+-	/* Setup TODC access */
+-	TODC_INIT(TODC_TYPE_DS1743,
+-			0,
+-			0,
+-			ioremap(CDS_RTC_ADDR, CDS_RTC_SIZE),
+-			8);
+-
+-	/* Set loops_per_jiffy to a half-way reasonable value,
+-	   for use until calibrate_delay gets called. */
+-	loops_per_jiffy = freq / HZ;
+-
+-#ifdef CONFIG_PCI
+-	/* VIA IDE configuration */
+-        ppc_md.pcibios_fixup = mpc85xx_cds_pcibios_fixup;
+-
+-	/* setup PCI host bridges */
+-	mpc85xx_setup_hose();
+-#endif
+-
+-#ifdef CONFIG_SERIAL_8250
+-	mpc85xx_early_serial_map();
+-#endif
+-
+-#ifdef CONFIG_SERIAL_TEXT_DEBUG
+-	/* Invalidate the entry we stole earlier the serial ports
+-	 * should be properly mapped */
+-	invalidate_tlbcam_entry(num_tlbcam_entries - 1);
+-#endif
+-
+-	/* setup the board related info for the MDIO bus */
+-	mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
+-
+-	mdata->irq[0] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[1] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[2] = PHY_POLL;
+-	mdata->irq[3] = PHY_POLL;
+-	mdata->irq[31] = PHY_POLL;
+-
+-	/* setup the board related information for the enet controllers */
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 0;
+-		memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 1;
+-		memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC1);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 0;
+-		memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC2);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 1;
+-		memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+-	}
+-
+-	ppc_sys_device_remove(MPC85xx_eTSEC3);
+-	ppc_sys_device_remove(MPC85xx_eTSEC4);
+-
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (initrd_start)
+-		ROOT_DEV = Root_RAM0;
+-	else
+-#endif
+-#ifdef  CONFIG_ROOT_NFS
+-		ROOT_DEV = Root_NFS;
+-#else
+-	ROOT_DEV = Root_HDA1;
+-#endif
+-}
+-
+-/* ************************************************************************ */
+-void __init
+-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+-              unsigned long r6, unsigned long r7)
+-{
+-	/* parse_bootinfo must always be called first */
+-	parse_bootinfo(find_bootinfo());
+-
+-	/*
+-	 * If we were passed in a board information, copy it into the
+-	 * residual data area.
+-	 */
+-	if (r3) {
+-		memcpy((void *) __res, (void *) (r3 + KERNELBASE),
+-				sizeof (bd_t));
+-
+-	}
+-#ifdef CONFIG_SERIAL_TEXT_DEBUG
+-	{
+-		bd_t *binfo = (bd_t *) __res;
+-		struct uart_port p;
+-
+-		/* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */
+-		settlbcam(num_tlbcam_entries - 1, binfo->bi_immr_base,
+-				binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0);
+-
+-		memset(&p, 0, sizeof (p));
+-		p.iotype = UPIO_MEM;
+-		p.membase = (void *) binfo->bi_immr_base + MPC85xx_UART0_OFFSET;
+-		p.uartclk = binfo->bi_busfreq;
+-
+-		gen550_init(0, &p);
+-
+-		memset(&p, 0, sizeof (p));
+-		p.iotype = UPIO_MEM;
+-		p.membase = (void *) binfo->bi_immr_base + MPC85xx_UART1_OFFSET;
+-		p.uartclk = binfo->bi_busfreq;
+-
+-		gen550_init(1, &p);
+-	}
+-#endif
+-
+-#if defined(CONFIG_BLK_DEV_INITRD)
+-	/*
+-	 * If the init RAM disk has been configured in, and there's a valid
+-	 * starting address for it, set it up.
+-	 */
+-	if (r4) {
+-		initrd_start = r4 + KERNELBASE;
+-		initrd_end = r5 + KERNELBASE;
+-	}
+-#endif /* CONFIG_BLK_DEV_INITRD */
+-
+-	/* Copy the kernel command line arguments to a safe place. */
+-
+-	if (r6) {
+-		*(char *) (r7 + KERNELBASE) = 0;
+-		strcpy(cmd_line, (char *) (r6 + KERNELBASE));
+-	}
+-
+-	identify_ppc_sys_by_id(mfspr(SPRN_SVR));
+-
+-	/* setup the PowerPC module struct */
+-	ppc_md.setup_arch = mpc85xx_cds_setup_arch;
+-	ppc_md.show_cpuinfo = mpc85xx_cds_show_cpuinfo;
+-
+-	ppc_md.init_IRQ = mpc85xx_cds_init_IRQ;
+-	ppc_md.get_irq = openpic_get_irq;
+-
+-	ppc_md.restart = mpc85xx_restart;
+-	ppc_md.power_off = mpc85xx_power_off;
+-	ppc_md.halt = mpc85xx_halt;
+-
+-	ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
+-
+-	ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
+-
+-	ppc_md.time_init = todc_time_init;
+-	ppc_md.set_rtc_time = todc_set_rtc_time;
+-	ppc_md.get_rtc_time = todc_get_rtc_time;
+-
+-	ppc_md.nvram_read_val = todc_direct_read_val;
+-	ppc_md.nvram_write_val = todc_direct_write_val;
+-
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+-	ppc_md.progress = gen550_progress;
+-#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_KGDB)
+-	ppc_md.early_serial_map = mpc85xx_early_serial_map;
+-#endif	/* CONFIG_SERIAL_8250 && CONFIG_KGDB */
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("mpc85xx_cds_init(): exit", 0);
+-
+-	return;
+-}
+diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.h b/arch/ppc/platforms/85xx/mpc85xx_cds_common.h
+deleted file mode 100644
+index 32c5455..0000000
+--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.h
++++ /dev/null
+@@ -1,80 +0,0 @@
+-/*
+- * MPC85xx CDS board definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor, Inc
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __MACH_MPC85XX_CDS_H__
+-#define __MACH_MPC85XX_CDS_H__
+-
+-#include <linux/serial.h>
+-#include <asm/ppcboot.h>
+-#include <linux/initrd.h>
+-#include <syslib/ppc85xx_setup.h>
+-
+-#define BOARD_CCSRBAR           ((uint)0xe0000000)
+-#define CCSRBAR_SIZE            ((uint)1024*1024)
+-
+-/* CADMUS info */
+-#define CADMUS_BASE (0xf8004000)
+-#define CADMUS_SIZE (256)
+-#define CM_VER	(0)
+-#define CM_CSR	(1)
+-#define CM_RST	(2)
+-
+-/* CDS NVRAM/RTC */
+-#define CDS_RTC_ADDR	(0xf8000000)
+-#define CDS_RTC_SIZE	(8 * 1024)
+-
+-/* PCI config */
+-#define PCI1_CFG_ADDR_OFFSET	(0x8000)
+-#define PCI1_CFG_DATA_OFFSET	(0x8004)
+-
+-#define PCI2_CFG_ADDR_OFFSET	(0x9000)
+-#define PCI2_CFG_DATA_OFFSET	(0x9004)
+-
+-/* PCI interrupt controller */
+-#define PIRQ0A                   MPC85xx_IRQ_EXT0
+-#define PIRQ0B                   MPC85xx_IRQ_EXT1
+-#define PIRQ0C                   MPC85xx_IRQ_EXT2
+-#define PIRQ0D                   MPC85xx_IRQ_EXT3
+-#define PIRQ1A                   MPC85xx_IRQ_EXT11
+-
+-/* PCI 1 memory map */
+-#define MPC85XX_PCI1_LOWER_IO        0x00000000
+-#define MPC85XX_PCI1_UPPER_IO        0x00ffffff
+-
+-#define MPC85XX_PCI1_LOWER_MEM       0x80000000
+-#define MPC85XX_PCI1_UPPER_MEM       0x9fffffff
+-
+-#define MPC85XX_PCI1_IO_BASE         0xe2000000
+-#define MPC85XX_PCI1_MEM_OFFSET      0x00000000
+-
+-#define MPC85XX_PCI1_IO_SIZE         0x01000000
+-
+-/* PCI 2 memory map */
+-/* Note: the standard PPC fixups will cause IO space to get bumped by
+- * hose->io_base_virt - isa_io_base => MPC85XX_PCI1_IO_SIZE */
+-#define MPC85XX_PCI2_LOWER_IO        0x00000000
+-#define MPC85XX_PCI2_UPPER_IO        0x00ffffff
+-
+-#define MPC85XX_PCI2_LOWER_MEM       0xa0000000
+-#define MPC85XX_PCI2_UPPER_MEM       0xbfffffff
+-
+-#define MPC85XX_PCI2_IO_BASE         0xe3000000
+-#define MPC85XX_PCI2_MEM_OFFSET      0x00000000
+-
+-#define MPC85XX_PCI2_IO_SIZE         0x01000000
+-
+-#define NR_8259_INTS		     16
+-#define CPM_IRQ_OFFSET		     NR_8259_INTS
+-
+-#endif /* __MACH_MPC85XX_CDS_H__ */
+diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
+deleted file mode 100644
+index 3d7addb..0000000
+--- a/arch/ppc/platforms/85xx/sbc8560.c
++++ /dev/null
+@@ -1,234 +0,0 @@
+-/*
+- * Wind River SBC8560 board specific routines
+- * 
+- * Maintainer: Kumar Gala
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- * 
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/seq_file.h>
+-#include <linux/root_dev.h>
+-#include <linux/serial.h>
+-#include <linux/tty.h>	/* for linux/serial_core.h */
+-#include <linux/serial_core.h>
+-#include <linux/serial_8250.h>
+-#include <linux/initrd.h>
+-#include <linux/module.h>
+-#include <linux/fsl_devices.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/open_pic.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/kgdb.h>
+-#include <asm/ppc_sys.h>
+-#include <mm/mmu_decl.h>
+-
+-#include <syslib/ppc85xx_common.h>
+-#include <syslib/ppc85xx_setup.h>
+-
+-#ifdef CONFIG_SERIAL_8250
+-static void __init
+-sbc8560_early_serial_map(void)
+-{
+-        struct uart_port uart_req;
+- 
+-        /* Setup serial port access */
+-        memset(&uart_req, 0, sizeof (uart_req));
+-	uart_req.irq = MPC85xx_IRQ_EXT9;
+-	uart_req.flags = STD_COM_FLAGS;
+-	uart_req.uartclk = BASE_BAUD * 16;
+-        uart_req.iotype = UPIO_MEM;
+-        uart_req.mapbase = UARTA_ADDR;
+-        uart_req.membase = ioremap(uart_req.mapbase, MPC85xx_UART0_SIZE);
+-	uart_req.type = PORT_16650;
+-
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+-        gen550_init(0, &uart_req);
+-#endif
+- 
+-        if (early_serial_setup(&uart_req) != 0)
+-                printk("Early serial init of port 0 failed\n");
+- 
+-        /* Assume early_serial_setup() doesn't modify uart_req */
+-	uart_req.line = 1;
+-        uart_req.mapbase = UARTB_ADDR;
+-        uart_req.membase = ioremap(uart_req.mapbase, MPC85xx_UART1_SIZE);
+-	uart_req.irq = MPC85xx_IRQ_EXT10;
+- 
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+-        gen550_init(1, &uart_req);
+-#endif
+- 
+-        if (early_serial_setup(&uart_req) != 0)
+-                printk("Early serial init of port 1 failed\n");
+-}
+-#endif
+-
+-/* ************************************************************************
+- *
+- * Setup the architecture
+- *
+- */
+-static void __init
+-sbc8560_setup_arch(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-	struct gianfar_platform_data *pdata;
+-	struct gianfar_mdio_data *mdata;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("sbc8560_setup_arch()", 0);
+-
+-	/* Set loops_per_jiffy to a half-way reasonable value,
+-	   for use until calibrate_delay gets called. */
+-	loops_per_jiffy = freq / HZ;
+-
+-#ifdef CONFIG_PCI
+-	/* setup PCI host bridges */
+-	mpc85xx_setup_hose();
+-#endif
+-#ifdef CONFIG_SERIAL_8250
+-	sbc8560_early_serial_map();
+-#endif
+-#ifdef CONFIG_SERIAL_TEXT_DEBUG
+-	/* Invalidate the entry we stole earlier the serial ports
+-	 * should be properly mapped */ 
+-	invalidate_tlbcam_entry(num_tlbcam_entries - 1);
+-#endif
+-
+-	/* setup the board related info for the MDIO bus */
+-	mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
+-
+-	mdata->irq[25] = MPC85xx_IRQ_EXT6;
+-	mdata->irq[26] = MPC85xx_IRQ_EXT7;
+-	mdata->irq[31] = PHY_POLL;
+-
+-	/* setup the board related information for the enet controllers */
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 25;
+-		memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 26;
+-		memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+-	}
+-
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (initrd_start)
+-		ROOT_DEV = Root_RAM0;
+-	else
+-#endif
+-#ifdef  CONFIG_ROOT_NFS
+-		ROOT_DEV = Root_NFS;
+-#else
+-		ROOT_DEV = Root_HDA1;
+-#endif
+-}
+-
+-/* ************************************************************************ */
+-void __init
+-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+-	      unsigned long r6, unsigned long r7)
+-{
+-	/* parse_bootinfo must always be called first */
+-	parse_bootinfo(find_bootinfo());
+-
+-	/*
+-	 * If we were passed in a board information, copy it into the
+-	 * residual data area.
+-	 */
+-	if (r3) {
+-		memcpy((void *) __res, (void *) (r3 + KERNELBASE),
+-		       sizeof (bd_t));
+-	}
+-
+-#ifdef CONFIG_SERIAL_TEXT_DEBUG
+-	/* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */
+-	settlbcam(num_tlbcam_entries - 1, UARTA_ADDR,
+-		  UARTA_ADDR, 0x1000, _PAGE_IO, 0);
+-#endif
+-
+-#if defined(CONFIG_BLK_DEV_INITRD)
+-	/*
+-	 * If the init RAM disk has been configured in, and there's a valid
+-	 * starting address for it, set it up.
+-	 */
+-	if (r4) {
+-		initrd_start = r4 + KERNELBASE;
+-		initrd_end = r5 + KERNELBASE;
+-	}
+-#endif				/* CONFIG_BLK_DEV_INITRD */
+-
+-	/* Copy the kernel command line arguments to a safe place. */
+-
+-	if (r6) {
+-		*(char *) (r7 + KERNELBASE) = 0;
+-		strcpy(cmd_line, (char *) (r6 + KERNELBASE));
+-	}
+-
+-	identify_ppc_sys_by_id(mfspr(SPRN_SVR));
+-
+-	/* setup the PowerPC module struct */
+-	ppc_md.setup_arch = sbc8560_setup_arch;
+-	ppc_md.show_cpuinfo = sbc8560_show_cpuinfo;
+-
+-	ppc_md.init_IRQ = sbc8560_init_IRQ;
+-	ppc_md.get_irq = openpic_get_irq;
+-
+-	ppc_md.restart = mpc85xx_restart;
+-	ppc_md.power_off = mpc85xx_power_off;
+-	ppc_md.halt = mpc85xx_halt;
+-
+-	ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
+-
+-	ppc_md.time_init = NULL;
+-	ppc_md.set_rtc_time = NULL;
+-	ppc_md.get_rtc_time = NULL;
+-	ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
+-
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+-	ppc_md.progress = gen550_progress;
+-#endif	/* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_KGDB)
+-	ppc_md.early_serial_map = sbc8560_early_serial_map;
+-#endif	/* CONFIG_SERIAL_8250 && CONFIG_KGDB */
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("sbc8560_init(): exit", 0);
+-}
+diff --git a/arch/ppc/platforms/85xx/sbc8560.h b/arch/ppc/platforms/85xx/sbc8560.h
+deleted file mode 100644
+index e5e156f..0000000
+--- a/arch/ppc/platforms/85xx/sbc8560.h
++++ /dev/null
+@@ -1,47 +0,0 @@
+-/*
+- * Wind River SBC8560 board definitions
+- *
+- * Copyright 2003 Motorola Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+- 
+-#ifndef __MACH_SBC8560_H__
+-#define __MACH_SBC8560_H__
+- 
+-#include <platforms/85xx/sbc85xx.h>
+-#include <asm/irq.h>
+-
+-#define CPM_MAP_ADDR    (CCSRBAR + MPC85xx_CPM_OFFSET)
+- 
+-#ifdef CONFIG_SERIAL_MANY_PORTS
+-#define RS_TABLE_SIZE  64
+-#else
+-#define RS_TABLE_SIZE  2
+-#endif
+- 
+-/* Rate for the 1.8432 Mhz clock for the onboard serial chip */
+-#define BASE_BAUD ( 1843200 / 16 )
+- 
+-#ifdef CONFIG_SERIAL_DETECT_IRQ
+-#define STD_COM_FLAGS (ASYNC_SKIP_TEST|ASYNC_AUTO_IRQ)
+-#else
+-#define STD_COM_FLAGS (ASYNC_SKIP_TEST)
+-#endif
+-
+-#define STD_SERIAL_PORT_DFNS \
+-        { 0, BASE_BAUD, UARTA_ADDR, MPC85xx_IRQ_EXT9, STD_COM_FLAGS, /* ttyS0 */ \
+-                iomem_base: (u8 *)UARTA_ADDR,                       \
+-                io_type: SERIAL_IO_MEM },                                 \
+-        { 0, BASE_BAUD, UARTB_ADDR, MPC85xx_IRQ_EXT10, STD_COM_FLAGS, /* ttyS1 */ \
+-                iomem_base: (u8 *)UARTB_ADDR,                       \
+-                io_type: SERIAL_IO_MEM },
+- 
+-#define SERIAL_PORT_DFNS \
+-        STD_SERIAL_PORT_DFNS
+- 
+-#endif /* __MACH_SBC8560_H__ */
+diff --git a/arch/ppc/platforms/85xx/sbc85xx.c b/arch/ppc/platforms/85xx/sbc85xx.c
+deleted file mode 100644
+index 2c587ca..0000000
+--- a/arch/ppc/platforms/85xx/sbc85xx.c
++++ /dev/null
+@@ -1,166 +0,0 @@
+-/*
+- * WindRiver PowerQUICC III SBC85xx board common routines
+- *
+- * Copyright 2002, 2003 Motorola Inc.
+- * Copyright 2004 Red Hat, Inc.
+- * 
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/seq_file.h>
+-#include <linux/serial.h>
+-#include <linux/module.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/open_pic.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/ppc_sys.h>
+-
+-#include <mm/mmu_decl.h>
+-
+-#include <platforms/85xx/sbc85xx.h>
+-
+-unsigned char __res[sizeof (bd_t)];
+-
+-#ifndef CONFIG_PCI
+-unsigned long isa_io_base = 0;
+-unsigned long isa_mem_base = 0;
+-unsigned long pci_dram_offset = 0;
+-#endif
+-
+-extern unsigned long total_memory;	/* in mm/init */
+-
+-/* Internal interrupts are all Level Sensitive, and Positive Polarity */
+-static u_char sbc8560_openpic_initsenses[] __initdata = {
+-	MPC85XX_INTERNAL_IRQ_SENSES,
+-	0x0,				/* External  0: */
+-	0x0,				/* External  1: */
+-#if defined(CONFIG_PCI)
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 2: PCI slot 0 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 3: PCI slot 1 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 4: PCI slot 2 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 5: PCI slot 3 */
+-#else
+-	0x0,				/* External  2: */
+-	0x0,				/* External  3: */
+-	0x0,				/* External  4: */
+-	0x0,				/* External  5: */
+-#endif
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 6: PHY */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 7: PHY */
+-	0x0,				/* External  8: */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* External 9: PHY */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* External 10: PHY */
+-	0x0,				/* External 11: */
+-};
+-
+-/* ************************************************************************ */
+-int
+-sbc8560_show_cpuinfo(struct seq_file *m)
+-{
+-	uint pvid, svid, phid1;
+-	uint memsize = total_memory;
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	pvid = mfspr(SPRN_PVR);
+-	svid = mfspr(SPRN_SVR);
+-
+-	seq_printf(m, "Vendor\t\t: Wind River\n");
+-	seq_printf(m, "Machine\t\t: SBC%s\n", cur_ppc_sys_spec->ppc_sys_name);
+-	seq_printf(m, "clock\t\t: %dMHz\n", freq / 1000000);
+-	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+-	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+-
+-	/* Display cpu Pll setting */
+-	phid1 = mfspr(SPRN_HID1);
+-	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+-
+-	/* Display the amount of memory */
+-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+-
+-	return 0;
+-}
+-
+-void __init
+-sbc8560_init_IRQ(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	/* Determine the Physical Address of the OpenPIC regs */
+-	phys_addr_t OpenPIC_PAddr =
+-	    binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
+-	OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
+-	OpenPIC_InitSenses = sbc8560_openpic_initsenses;
+-	OpenPIC_NumInitSenses = sizeof (sbc8560_openpic_initsenses);
+-
+-	/* Skip reserved space and internal sources */
+-	openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
+-	/* Map PIC IRQs 0-11 */
+-	openpic_set_sources(48, 12, OpenPIC_Addr + 0x10000);
+-
+-	/* we let openpic interrupts starting from an offset, to 
+-	 * leave space for cascading interrupts underneath.
+-	 */
+-	openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
+-
+-	return;
+-}
+-
+-/*
+- * interrupt routing
+- */
+-
+-#ifdef CONFIG_PCI
+-int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel,
+-		    unsigned char pin)
+-{
+-	static char pci_irq_table[][4] =
+-	    /*
+-	     *      PCI IDSEL/INTPIN->INTLINE
+-	     *        A      B      C      D
+-	     */
+-	{
+-		{PIRQA, PIRQB, PIRQC, PIRQD},
+-		{PIRQD, PIRQA, PIRQB, PIRQC},
+-		{PIRQC, PIRQD, PIRQA, PIRQB},
+-		{PIRQB, PIRQC, PIRQD, PIRQA},
+-	};
+-
+-	const long min_idsel = 12, max_idsel = 15, irqs_per_slot = 4;
+-	return PCI_IRQ_TABLE_LOOKUP;
+-}
+-
+-int mpc85xx_exclude_device(u_char bus, u_char devfn)
+-{
+-	if (bus == 0 && PCI_SLOT(devfn) == 0)
+-		return PCIBIOS_DEVICE_NOT_FOUND;
+-	else
+-		return PCIBIOS_SUCCESSFUL;
+-}
+-#endif /* CONFIG_PCI */
+diff --git a/arch/ppc/platforms/85xx/sbc85xx.h b/arch/ppc/platforms/85xx/sbc85xx.h
+deleted file mode 100644
+index 51df4dc..0000000
+--- a/arch/ppc/platforms/85xx/sbc85xx.h
++++ /dev/null
+@@ -1,70 +0,0 @@
+-/*
+- * WindRiver PowerQUICC III SBC85xx common board definitions
+- *
+- * Copyright 2003 Motorola Inc.
+- * Copyright 2004 Red Hat, Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __PLATFORMS_85XX_SBC85XX_H__
+-#define __PLATFORMS_85XX_SBC85XX_H__
+-
+-#include <linux/init.h>
+-#include <linux/seq_file.h>
+-#include <asm/ppcboot.h>
+-
+-#define BOARD_CCSRBAR		((uint)0xff700000)
+-#define CCSRBAR_SIZE		((uint)1024*1024)
+-
+-#define BCSR_ADDR		((uint)0xfc000000)
+-#define BCSR_SIZE		((uint)(16 * 1024 * 1024))
+-
+-#define UARTA_ADDR		(BCSR_ADDR + 0x00700000)
+-#define UARTB_ADDR		(BCSR_ADDR + 0x00800000)
+-#define RTC_DEVICE_ADDR		(BCSR_ADDR + 0x00900000)
+-#define EEPROM_ADDR		(BCSR_ADDR + 0x00b00000)
+-
+-extern int  sbc8560_show_cpuinfo(struct seq_file *m);
+-extern void sbc8560_init_IRQ(void) __init; 
+-
+-/* PCI interrupt controller */
+-#define PIRQA		MPC85xx_IRQ_EXT1
+-#define PIRQB		MPC85xx_IRQ_EXT2
+-#define PIRQC		MPC85xx_IRQ_EXT3
+-#define PIRQD		MPC85xx_IRQ_EXT4
+-
+-#define MPC85XX_PCI1_LOWER_IO	0x00000000
+-#define MPC85XX_PCI1_UPPER_IO	0x00ffffff
+-
+-#define MPC85XX_PCI1_LOWER_MEM	0x80000000
+-#define MPC85XX_PCI1_UPPER_MEM	0x9fffffff
+-
+-#define MPC85XX_PCI1_IO_BASE	0xe2000000
+-#define MPC85XX_PCI1_MEM_OFFSET	0x00000000
+-
+-#define MPC85XX_PCI1_IO_SIZE	0x01000000
+-
+-/* FCC1 Clock Source Configuration.  These can be
+- * redefined in the board specific file.
+- *    Can only choose from CLK9-12 */
+-#define F1_RXCLK       12
+-#define F1_TXCLK       11
+-
+-/* FCC2 Clock Source Configuration.  These can be
+- * redefined in the board specific file.
+- *    Can only choose from CLK13-16 */
+-#define F2_RXCLK       13
+-#define F2_TXCLK       14
+-
+-/* FCC3 Clock Source Configuration.  These can be
+- * redefined in the board specific file.
+- *    Can only choose from CLK13-16 */
+-#define F3_RXCLK       15
+-#define F3_TXCLK       16
+-
+-#endif /* __PLATFORMS_85XX_SBC85XX_H__ */
+diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
+deleted file mode 100644
+index b1f5b73..0000000
+--- a/arch/ppc/platforms/85xx/stx_gp3.c
++++ /dev/null
+@@ -1,340 +0,0 @@
+-/*
+- * STx GP3 board specific routines
+- *
+- * Dan Malek <dan at embeddededge.com>
+- * Copyright 2004 Embedded Edge, LLC
+- *
+- * Copied from mpc8560_ads.c
+- * Copyright 2002, 2003 Motorola Inc.
+- *
+- * Ported to 2.6, Matt Porter <mporter at kernel.crashing.org>
+- * Copyright 2004-2005 MontaVista Software, Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/blkdev.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/root_dev.h>
+-#include <linux/seq_file.h>
+-#include <linux/serial.h>
+-#include <linux/initrd.h>
+-#include <linux/module.h>
+-#include <linux/fsl_devices.h>
+-#include <linux/interrupt.h>
+-#include <linux/rio.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/open_pic.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/cpm2.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/ppc_sys.h>
+-
+-#include <syslib/cpm2_pic.h>
+-#include <syslib/ppc85xx_common.h>
+-#include <syslib/ppc85xx_rio.h>
+-
+-
+-unsigned char __res[sizeof(bd_t)];
+-
+-#ifndef CONFIG_PCI
+-unsigned long isa_io_base = 0;
+-unsigned long isa_mem_base = 0;
+-unsigned long pci_dram_offset = 0;
+-#endif
+-
+-/* Internal interrupts are all Level Sensitive, and Positive Polarity */
+-static u8 gp3_openpic_initsenses[] __initdata = {
+-	MPC85XX_INTERNAL_IRQ_SENSES,
+-	0x0,						/* External  0: */
+-#if defined(CONFIG_PCI)
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 1: PCI slot 0 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 2: PCI slot 1 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 3: PCI slot 2 */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 4: PCI slot 3 */
+-#else
+-	0x0,				/* External  1: */
+-	0x0,				/* External  2: */
+-	0x0,				/* External  3: */
+-	0x0,				/* External  4: */
+-#endif
+-	0x0,				/* External  5: */
+-	0x0,				/* External  6: */
+-	0x0,				/* External  7: */
+-	0x0,				/* External  8: */
+-	0x0,				/* External  9: */
+-	0x0,				/* External 10: */
+-	0x0,				/* External 11: */
+-};
+-
+-/*
+- * Setup the architecture
+- */
+-static void __init
+-gp3_setup_arch(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-	struct gianfar_platform_data *pdata;
+-	struct gianfar_mdio_data *mdata;
+-
+-	cpm2_reset();
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("gp3_setup_arch()", 0);
+-
+-	/* Set loops_per_jiffy to a half-way reasonable value,
+-	   for use until calibrate_delay gets called. */
+-	loops_per_jiffy = freq / HZ;
+-
+-#ifdef CONFIG_PCI
+-	/* setup PCI host bridges */
+-	mpc85xx_setup_hose();
+-#endif
+-
+-	/* setup the board related info for the MDIO bus */
+-	mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
+-
+-	mdata->irq[2] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[4] = MPC85xx_IRQ_EXT5;
+-	mdata->irq[31] = PHY_POLL;
+-
+-	/* setup the board related information for the enet controllers */
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
+-	if (pdata) {
+-	/*	pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 2;
+-		memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
+-	if (pdata) {
+-	/*	pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 4;
+-		memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+-	}
+-
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (initrd_start)
+-		ROOT_DEV = Root_RAM0;
+-	else
+-#endif
+-#ifdef	CONFIG_ROOT_NFS
+-		ROOT_DEV = Root_NFS;
+-#else
+-		ROOT_DEV = Root_HDA1;
+-#endif
+-
+-	printk ("bi_immr_base = %8.8lx\n", binfo->bi_immr_base);
+-}
+-
+-static irqreturn_t cpm2_cascade(int irq, void *dev_id)
+-{
+-	while ((irq = cpm2_get_irq()) >= 0)
+-		__do_IRQ(irq);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static struct irqaction cpm2_irqaction = {
+-	.handler	= cpm2_cascade,
+-	.flags		= IRQF_DISABLED,
+-	.mask		= CPU_MASK_NONE,
+-	.name		= "cpm2_cascade",
+-};
+-
+-static void __init
+-gp3_init_IRQ(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	/*
+-	 * Setup OpenPIC
+-	 */
+-
+-	/* Determine the Physical Address of the OpenPIC regs */
+-	phys_addr_t OpenPIC_PAddr =
+-	    binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
+-	OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
+-	OpenPIC_InitSenses = gp3_openpic_initsenses;
+-	OpenPIC_NumInitSenses = sizeof (gp3_openpic_initsenses);
+-
+-	/* Skip reserved space and internal sources */
+-	openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
+-
+-	/* Map PIC IRQs 0-11 */
+-	openpic_set_sources(48, 12, OpenPIC_Addr + 0x10000);
+-
+-	/*
+-	 * Let openpic interrupts starting from an offset, to
+-	 * leave space for cascading interrupts underneath.
+-	 */
+-	openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
+-
+-	/* Setup CPM2 PIC */
+-        cpm2_init_IRQ();
+-
+-	setup_irq(MPC85xx_IRQ_CPM, &cpm2_irqaction);
+-
+-	return;
+-}
+-
+-static int
+-gp3_show_cpuinfo(struct seq_file *m)
+-{
+-	uint pvid, svid, phid1;
+-	bd_t *binfo = (bd_t *) __res;
+-	uint	memsize;
+-	unsigned int freq;
+-	extern unsigned long total_memory;	/* in mm/init */
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	pvid = mfspr(SPRN_PVR);
+-	svid = mfspr(SPRN_SVR);
+-
+-	memsize = total_memory;
+-
+-	seq_printf(m, "Vendor\t\t: RPC Electronics STx \n");
+-	seq_printf(m, "Machine\t\t: GP3 - MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
+-	seq_printf(m, "bus freq\t: %u.%.6u MHz\n", freq / 1000000,
+-		   freq % 1000000);
+-	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+-	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+-
+-	/* Display cpu Pll setting */
+-	phid1 = mfspr(SPRN_HID1);
+-	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+-
+-	/* Display the amount of memory */
+-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+-
+-	return 0;
+-}
+-
+-#ifdef CONFIG_PCI
+-int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel,
+-		    unsigned char pin)
+-{
+-	static char pci_irq_table[][4] =
+-	    /*
+-	     *      PCI IDSEL/INTPIN->INTLINE
+-	     *        A      B      C      D
+-	     */
+-	{
+-		{PIRQA, PIRQB, PIRQC, PIRQD},
+-		{PIRQD, PIRQA, PIRQB, PIRQC},
+-		{PIRQC, PIRQD, PIRQA, PIRQB},
+-		{PIRQB, PIRQC, PIRQD, PIRQA},
+-	};
+-
+-	const long min_idsel = 12, max_idsel = 15, irqs_per_slot = 4;
+-	return PCI_IRQ_TABLE_LOOKUP;
+-}
+-
+-int mpc85xx_exclude_device(u_char bus, u_char devfn)
+-{
+-	if (bus == 0 && PCI_SLOT(devfn) == 0)
+-		return PCIBIOS_DEVICE_NOT_FOUND;
+-	else
+-		return PCIBIOS_SUCCESSFUL;
+-}
+-#endif /* CONFIG_PCI */
+-
+-#ifdef CONFIG_RAPIDIO
+-void
+-platform_rio_init(void)
+-{
+-	/*
+-	 * The STx firmware configures the RapidIO Local Access Window
+-	 * at 0xc0000000 with a size of 512MB.
+-	 */
+-	mpc85xx_rio_setup(0xc0000000, 0x20000000);
+-}
+-#endif /* CONFIG_RAPIDIO */
+-
+-void __init
+-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+-	      unsigned long r6, unsigned long r7)
+-{
+-	/* parse_bootinfo must always be called first */
+-	parse_bootinfo(find_bootinfo());
+-
+-	/*
+-	 * If we were passed in a board information, copy it into the
+-	 * residual data area.
+-	 */
+-	if (r3) {
+-		memcpy((void *) __res, (void *) (r3 + KERNELBASE),
+-		       sizeof (bd_t));
+-
+-	}
+-#if defined(CONFIG_BLK_DEV_INITRD)
+-	/*
+-	 * If the init RAM disk has been configured in, and there's a valid
+-	 * starting address for it, set it up.
+-	 */
+-	if (r4) {
+-		initrd_start = r4 + KERNELBASE;
+-		initrd_end = r5 + KERNELBASE;
+-	}
+-#endif				/* CONFIG_BLK_DEV_INITRD */
+-
+-	/* Copy the kernel command line arguments to a safe place. */
+-
+-	if (r6) {
+-		*(char *) (r7 + KERNELBASE) = 0;
+-		strcpy(cmd_line, (char *) (r6 + KERNELBASE));
+-	}
+-
+-	identify_ppc_sys_by_id(mfspr(SPRN_SVR));
+-
+-	/* setup the PowerPC module struct */
+-	ppc_md.setup_arch = gp3_setup_arch;
+-	ppc_md.show_cpuinfo = gp3_show_cpuinfo;
+-
+-	ppc_md.init_IRQ = gp3_init_IRQ;
+-	ppc_md.get_irq = openpic_get_irq;
+-
+-	ppc_md.restart = mpc85xx_restart;
+-	ppc_md.power_off = mpc85xx_power_off;
+-	ppc_md.halt = mpc85xx_halt;
+-
+-	ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
+-
+-	ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("platform_init(): exit", 0);
+-
+-	return;
+-}
+diff --git a/arch/ppc/platforms/85xx/stx_gp3.h b/arch/ppc/platforms/85xx/stx_gp3.h
+deleted file mode 100644
+index c6e34c0..0000000
+--- a/arch/ppc/platforms/85xx/stx_gp3.h
++++ /dev/null
+@@ -1,69 +0,0 @@
+-/*
+- * STx GP3 board definitions
+- *
+- * Dan Malek (dan at embeddededge.com)
+- * Copyright 2004 Embedded Edge, LLC
+- *
+- * Ported to 2.6, Matt Porter <mporter at kernel.crashing.org>
+- * Copyright 2004-2005 MontaVista Software, Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __MACH_STX_GP3_H
+-#define __MACH_STX_GP3_H
+-
+-#include <linux/init.h>
+-#include <asm/ppcboot.h>
+-
+-#define BOARD_CCSRBAR		((uint)0xe0000000)
+-#define CCSRBAR_SIZE		((uint)1024*1024)
+-
+-#define CPM_MAP_ADDR		(CCSRBAR + MPC85xx_CPM_OFFSET)
+-
+-#define BCSR_ADDR		((uint)0xfc000000)
+-#define BCSR_SIZE		((uint)(16 * 1024))
+-
+-#define BCSR_TSEC1_RESET	0x00000080
+-#define BCSR_TSEC2_RESET	0x00000040
+-#define BCSR_LED1		0x00000008
+-#define BCSR_LED2		0x00000004
+-#define BCSR_LED3		0x00000002
+-#define BCSR_LED4		0x00000001
+-
+-extern void mpc85xx_setup_hose(void) __init;
+-extern void mpc85xx_restart(char *cmd);
+-extern void mpc85xx_power_off(void);
+-extern void mpc85xx_halt(void);
+-extern void mpc85xx_init_IRQ(void) __init;
+-extern unsigned long mpc85xx_find_end_of_memory(void) __init;
+-extern void mpc85xx_calibrate_decr(void) __init;
+-
+-#define PCI_CFG_ADDR_OFFSET	(0x8000)
+-#define PCI_CFG_DATA_OFFSET	(0x8004)
+-
+-/* PCI interrupt controller */
+-#define PIRQA		MPC85xx_IRQ_EXT1
+-#define PIRQB		MPC85xx_IRQ_EXT2
+-#define PIRQC		MPC85xx_IRQ_EXT3
+-#define PIRQD		MPC85xx_IRQ_EXT4
+-#define PCI_MIN_IDSEL	16
+-#define PCI_MAX_IDSEL	19
+-#define PCI_IRQ_SLOT	4
+-
+-#define MPC85XX_PCI1_LOWER_IO	0x00000000
+-#define MPC85XX_PCI1_UPPER_IO	0x00ffffff
+-
+-#define MPC85XX_PCI1_LOWER_MEM	0x80000000
+-#define MPC85XX_PCI1_UPPER_MEM	0x9fffffff
+-
+-#define MPC85XX_PCI1_IO_BASE	0xe2000000
+-#define MPC85XX_PCI1_MEM_OFFSET	0x00000000
+-
+-#define MPC85XX_PCI1_IO_SIZE	0x01000000
+-
+-#endif /* __MACH_STX_GP3_H */
+diff --git a/arch/ppc/platforms/85xx/tqm85xx.c b/arch/ppc/platforms/85xx/tqm85xx.c
+deleted file mode 100644
+index 4ee2bd1..0000000
+--- a/arch/ppc/platforms/85xx/tqm85xx.c
++++ /dev/null
+@@ -1,412 +0,0 @@
+-/*
+- * TQM85xx (40/41/55/60) board specific routines
+- *
+- * Copyright (c) 2005 DENX Software Engineering
+- * Stefan Roese <sr at denx.de>
+- *
+- * Based on original work by
+- * 	Kumar Gala <galak at kernel.crashing.org>
+- *      Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/pci.h>
+-#include <linux/kdev_t.h>
+-#include <linux/major.h>
+-#include <linux/console.h>
+-#include <linux/delay.h>
+-#include <linux/seq_file.h>
+-#include <linux/root_dev.h>
+-#include <linux/serial.h>
+-#include <linux/tty.h>	/* for linux/serial_core.h */
+-#include <linux/serial_core.h>
+-#include <linux/initrd.h>
+-#include <linux/module.h>
+-#include <linux/fsl_devices.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/atomic.h>
+-#include <asm/time.h>
+-#include <asm/io.h>
+-#include <asm/machdep.h>
+-#include <asm/open_pic.h>
+-#include <asm/bootinfo.h>
+-#include <asm/pci-bridge.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/kgdb.h>
+-#include <asm/ppc_sys.h>
+-#include <asm/cpm2.h>
+-#include <mm/mmu_decl.h>
+-
+-#include <syslib/ppc85xx_setup.h>
+-#include <syslib/cpm2_pic.h>
+-#include <syslib/ppc85xx_common.h>
+-#include <syslib/ppc85xx_rio.h>
+-
+-#ifndef CONFIG_PCI
+-unsigned long isa_io_base = 0;
+-unsigned long isa_mem_base = 0;
+-#endif
+-
+-
+-extern unsigned long total_memory;	/* in mm/init */
+-
+-unsigned char __res[sizeof (bd_t)];
+-
+-/* Internal interrupts are all Level Sensitive, and Positive Polarity */
+-static u_char tqm85xx_openpic_initsenses[] __initdata = {
+-	MPC85XX_INTERNAL_IRQ_SENSES,
+-	0x0,						/* External  0: */
+-	0x0,						/* External  1: */
+-#if defined(CONFIG_PCI)
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 2: PCI INTA */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 3: PCI INTB */
+-#else
+-	0x0,				/* External  2: */
+-	0x0,				/* External  3: */
+-#endif
+-	0x0,				/* External  4: */
+-	0x0,				/* External  5: */
+-	0x0,				/* External  6: */
+-	0x0,				/* External  7: */
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE),	/* External 8: PHY */
+-	0x0,				/* External  9: */
+-	0x0,				/* External 10: */
+-	0x0,				/* External 11: */
+-};
+-
+-/* ************************************************************************
+- *
+- * Setup the architecture
+- *
+- */
+-static void __init
+-tqm85xx_setup_arch(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-	struct gianfar_platform_data *pdata;
+-	struct gianfar_mdio_data *mdata;
+-
+-#ifdef CONFIG_MPC8560
+-	cpm2_reset();
+-#endif
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("tqm85xx_setup_arch()", 0);
+-
+-	/* Set loops_per_jiffy to a half-way reasonable value,
+-	   for use until calibrate_delay gets called. */
+-	loops_per_jiffy = freq / HZ;
+-
+-#ifdef CONFIG_PCI
+-	/* setup PCI host bridges */
+-	mpc85xx_setup_hose();
+-#endif
+-
+-#ifndef CONFIG_MPC8560
+-#if defined(CONFIG_SERIAL_8250)
+-	mpc85xx_early_serial_map();
+-#endif
+-
+-#ifdef CONFIG_SERIAL_TEXT_DEBUG
+-	/* Invalidate the entry we stole earlier the serial ports
+-	 * should be properly mapped */
+-	invalidate_tlbcam_entry(num_tlbcam_entries - 1);
+-#endif
+-#endif /* CONFIG_MPC8560 */
+-
+-	/* setup the board related info for the MDIO bus */
+-	mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
+-
+-	mdata->irq[0] = MPC85xx_IRQ_EXT8;
+-	mdata->irq[1] = MPC85xx_IRQ_EXT8;
+-	mdata->irq[2] = PHY_POLL;
+-	mdata->irq[3] = MPC85xx_IRQ_EXT8;
+-	mdata->irq[31] = PHY_POLL;
+-
+-	/* setup the board related information for the enet controllers */
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 2;
+-		memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+-	}
+-
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
+-	if (pdata) {
+-		pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 1;
+-		memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+-	}
+-
+-#ifdef CONFIG_MPC8540
+-	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC);
+-	if (pdata) {
+-		pdata->board_flags = 0;
+-		pdata->bus_id = 0;
+-		pdata->phy_id = 3;
+-		memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6);
+-	}
+-#endif
+-
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (initrd_start)
+-		ROOT_DEV = Root_RAM0;
+-	else
+-#endif
+-#ifdef  CONFIG_ROOT_NFS
+-		ROOT_DEV = Root_NFS;
+-#else
+-	ROOT_DEV = Root_HDA1;
+-#endif
+-}
+-
+-#ifdef CONFIG_MPC8560
+-static irqreturn_t cpm2_cascade(int irq, void *dev_id)
+-{
+-	while ((irq = cpm2_get_irq()) >= 0)
+-		__do_IRQ(irq);
+-	return IRQ_HANDLED;
+-}
+-
+-static struct irqaction cpm2_irqaction = {
+-	.handler = cpm2_cascade,
+-	.flags = IRQF_DISABLED,
+-	.mask = CPU_MASK_NONE,
+-	.name = "cpm2_cascade",
+-};
+-#endif /* CONFIG_MPC8560 */
+-
+-void __init
+-tqm85xx_init_IRQ(void)
+-{
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	/* Determine the Physical Address of the OpenPIC regs */
+-	phys_addr_t OpenPIC_PAddr =
+-		binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
+-	OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
+-	OpenPIC_InitSenses = tqm85xx_openpic_initsenses;
+-	OpenPIC_NumInitSenses = sizeof (tqm85xx_openpic_initsenses);
+-
+-	/* Skip reserved space and internal sources */
+-	openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
+-
+-	/* Map PIC IRQs 0-11 */
+-	openpic_set_sources(48, 12, OpenPIC_Addr + 0x10000);
+-
+-	/* we let openpic interrupts starting from an offset, to
+-	 * leave space for cascading interrupts underneath.
+-	 */
+-	openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
+-
+-#ifdef CONFIG_MPC8560
+-	/* Setup CPM2 PIC */
+-        cpm2_init_IRQ();
+-
+-	setup_irq(MPC85xx_IRQ_CPM, &cpm2_irqaction);
+-#endif /* CONFIG_MPC8560 */
+-
+-	return;
+-}
+-
+-int tqm85xx_show_cpuinfo(struct seq_file *m)
+-{
+-	uint pvid, svid, phid1;
+-	uint memsize = total_memory;
+-	bd_t *binfo = (bd_t *) __res;
+-	unsigned int freq;
+-
+-	/* get the core frequency */
+-	freq = binfo->bi_intfreq;
+-
+-	pvid = mfspr(SPRN_PVR);
+-	svid = mfspr(SPRN_SVR);
+-
+-	seq_printf(m, "Vendor\t\t: TQ Components\n");
+-	seq_printf(m, "Machine\t\t: TQM%s\n", cur_ppc_sys_spec->ppc_sys_name);
+-	seq_printf(m, "clock\t\t: %dMHz\n", freq / 1000000);
+-	seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+-	seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+-
+-	/* Display cpu Pll setting */
+-	phid1 = mfspr(SPRN_HID1);
+-	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+-
+-	/* Display the amount of memory */
+-	seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+-
+-	return 0;
+-}
+-
+-#if defined(CONFIG_I2C) && defined(CONFIG_SENSORS_DS1337)
+-extern ulong ds1337_get_rtc_time(void);
+-extern int ds1337_set_rtc_time(unsigned long nowtime);
+-
+-static int __init
+-tqm85xx_rtc_hookup(void)
+-{
+-	struct timespec	tv;
+-
+-        ppc_md.set_rtc_time = ds1337_set_rtc_time;
+-        ppc_md.get_rtc_time = ds1337_get_rtc_time;
+-
+-	tv.tv_nsec = 0;
+-	tv.tv_sec = (ppc_md.get_rtc_time)();
+-	do_settimeofday(&tv);
+-
+-	return 0;
+-}
+-late_initcall(tqm85xx_rtc_hookup);
+-#endif
+-
+-#ifdef CONFIG_PCI
+-/*
+- * interrupt routing
+- */
+-int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+-{
+-	static char pci_irq_table[][4] =
+-		/*
+-		 *      PCI IDSEL/INTPIN->INTLINE
+-		 *       A      B      C      D
+-		 */
+-		{
+-			{PIRQA, PIRQB, 0, 0},
+-		};
+-
+-	const long min_idsel = 0x1c, max_idsel = 0x1c, irqs_per_slot = 4;
+-	return PCI_IRQ_TABLE_LOOKUP;
+-}
+-
+-int mpc85xx_exclude_device(u_char bus, u_char devfn)
+-{
+-	if (bus == 0 && PCI_SLOT(devfn) == 0)
+-		return PCIBIOS_DEVICE_NOT_FOUND;
+-	else
+-		return PCIBIOS_SUCCESSFUL;
+-}
+-
+-#endif /* CONFIG_PCI */
+-
+-#ifdef CONFIG_RAPIDIO
+-void platform_rio_init(void)
+-{
+-	/* 512MB RIO LAW at 0xc0000000 */
+-	mpc85xx_rio_setup(0xc0000000, 0x20000000);
+-}
+-#endif /* CONFIG_RAPIDIO */
+-
+-/* ************************************************************************ */
+-void __init
+-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+-	      unsigned long r6, unsigned long r7)
+-{
+-	/* parse_bootinfo must always be called first */
+-	parse_bootinfo(find_bootinfo());
+-
+-	/*
+-	 * If we were passed in a board information, copy it into the
+-	 * residual data area.
+-	 */
+-	if (r3) {
+-		memcpy((void *) __res, (void *) (r3 + KERNELBASE),
+-		       sizeof (bd_t));
+-	}
+-
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) && !defined(CONFIG_MPC8560)
+-	{
+-		bd_t *binfo = (bd_t *) __res;
+-		struct uart_port p;
+-
+-		/* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */
+-		settlbcam(num_tlbcam_entries - 1, binfo->bi_immr_base,
+-			  binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0);
+-
+-		memset(&p, 0, sizeof (p));
+-		p.iotype = UPIO_MEM;
+-		p.membase = (void *) binfo->bi_immr_base + MPC85xx_UART0_OFFSET;
+-		p.uartclk = binfo->bi_busfreq;
+-
+-		gen550_init(0, &p);
+-
+-		memset(&p, 0, sizeof (p));
+-		p.iotype = UPIO_MEM;
+-		p.membase = (void *) binfo->bi_immr_base + MPC85xx_UART1_OFFSET;
+-		p.uartclk = binfo->bi_busfreq;
+-
+-		gen550_init(1, &p);
+-	}
+-#endif
+-
+-#if defined(CONFIG_BLK_DEV_INITRD)
+-	/*
+-	 * If the init RAM disk has been configured in, and there's a valid
+-	 * starting address for it, set it up.
+-	 */
+-	if (r4) {
+-		initrd_start = r4 + KERNELBASE;
+-		initrd_end = r5 + KERNELBASE;
+-	}
+-#endif				/* CONFIG_BLK_DEV_INITRD */
+-
+-	/* Copy the kernel command line arguments to a safe place. */
+-
+-	if (r6) {
+-		*(char *) (r7 + KERNELBASE) = 0;
+-		strcpy(cmd_line, (char *) (r6 + KERNELBASE));
+-	}
+-
+-	identify_ppc_sys_by_id(mfspr(SPRN_SVR));
+-
+-	/* setup the PowerPC module struct */
+-	ppc_md.setup_arch = tqm85xx_setup_arch;
+-	ppc_md.show_cpuinfo = tqm85xx_show_cpuinfo;
+-
+-	ppc_md.init_IRQ = tqm85xx_init_IRQ;
+-	ppc_md.get_irq = openpic_get_irq;
+-
+-	ppc_md.restart = mpc85xx_restart;
+-	ppc_md.power_off = mpc85xx_power_off;
+-	ppc_md.halt = mpc85xx_halt;
+-
+-	ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
+-
+-	ppc_md.time_init = NULL;
+-	ppc_md.set_rtc_time = NULL;
+-	ppc_md.get_rtc_time = NULL;
+-	ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
+-
+-#ifndef CONFIG_MPC8560
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+-	ppc_md.progress = gen550_progress;
+-#endif	/* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
+-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_KGDB)
+-	ppc_md.early_serial_map = mpc85xx_early_serial_map;
+-#endif	/* CONFIG_SERIAL_8250 && CONFIG_KGDB */
+-#endif /* CONFIG_MPC8560 */
+-
+-	if (ppc_md.progress)
+-		ppc_md.progress("tqm85xx_init(): exit", 0);
+-
+-	return;
+-}
+diff --git a/arch/ppc/platforms/85xx/tqm85xx.h b/arch/ppc/platforms/85xx/tqm85xx.h
+deleted file mode 100644
+index 57284e6..0000000
+--- a/arch/ppc/platforms/85xx/tqm85xx.h
++++ /dev/null
+@@ -1,53 +0,0 @@
+-/*
+- * TQM85xx (40/41/55/60) board definitions
+- *
+- * Copyright (c) 2005 DENX Software Engineering
+- * Stefan Roese <sr at denx.de>
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __MACH_TQM85XX_H
+-#define __MACH_TQM85XX_H
+-
+-#include <linux/init.h>
+-#include <asm/ppcboot.h>
+-
+-#define BOARD_CCSRBAR		((uint)0xe0000000)
+-#define CCSRBAR_SIZE		((uint)1024*1024)
+-
+-#define CPM_MAP_ADDR		(CCSRBAR + MPC85xx_CPM_OFFSET)
+-
+-#define PCI_CFG_ADDR_OFFSET	(0x8000)
+-#define PCI_CFG_DATA_OFFSET	(0x8004)
+-
+-/* PCI interrupt controller */
+-#define PIRQA			MPC85xx_IRQ_EXT2
+-#define PIRQB			MPC85xx_IRQ_EXT3
+-
+-#define MPC85XX_PCI1_LOWER_IO	0x00000000
+-#define MPC85XX_PCI1_UPPER_IO	0x00ffffff
+-
+-#define MPC85XX_PCI1_LOWER_MEM	0x80000000
+-#define MPC85XX_PCI1_UPPER_MEM	0x9fffffff
+-
+-#define MPC85XX_PCI1_IO_BASE	0xe2000000
+-#define MPC85XX_PCI1_MEM_OFFSET	0x00000000
+-
+-#define MPC85XX_PCI1_IO_SIZE	0x01000000
+-
+-#define BASE_BAUD 115200
+-
+-extern void mpc85xx_setup_hose(void) __init;
+-extern void mpc85xx_restart(char *cmd);
+-extern void mpc85xx_power_off(void);
+-extern void mpc85xx_halt(void);
+-extern void mpc85xx_init_IRQ(void) __init;
+-extern unsigned long mpc85xx_find_end_of_memory(void) __init;
+-extern void mpc85xx_calibrate_decr(void) __init;
+-
+-#endif /* __MACH_TQM85XX_H */
+diff --git a/arch/ppc/platforms/ev64260.c b/arch/ppc/platforms/ev64260.c
+index 976270d..c1f77e1 100644
+--- a/arch/ppc/platforms/ev64260.c
++++ b/arch/ppc/platforms/ev64260.c
+@@ -336,7 +336,7 @@ ev64260_early_serial_map(void)
+ #endif
+ 
+ 		if (early_serial_setup(&port) != 0)
+-			printk(KERN_WARNING "Early serial init of port 0"
++			printk(KERN_WARNING "Early serial init of port 0 "
+ 				"failed\n");
+ 
+ 		first_time = 0;
+@@ -388,7 +388,7 @@ ev64260_setup_arch(void)
+ 	ev64260_early_serial_map();
+ #endif
+ 
+-	printk(KERN_INFO "%s %s port (C) 2001 MontaVista Software, Inc."
++	printk(KERN_INFO "%s %s port (C) 2001 MontaVista Software, Inc. "
+ 		"(source at mvista.com)\n", BOARD_VENDOR, BOARD_MACHINE);
+ 
+ 	if (ppc_md.progress)
+diff --git a/arch/ppc/platforms/katana.c b/arch/ppc/platforms/katana.c
+index 52f63e6..fe6e88c 100644
+--- a/arch/ppc/platforms/katana.c
++++ b/arch/ppc/platforms/katana.c
+@@ -838,27 +838,6 @@ katana_find_end_of_memory(void)
+ 	return bdp->bi_memsize;
+ }
+ 
+-#if defined(CONFIG_I2C_MV64XXX) && defined(CONFIG_SENSORS_M41T00)
+-extern ulong	m41t00_get_rtc_time(void);
+-extern int	m41t00_set_rtc_time(ulong);
+-
+-static int __init
+-katana_rtc_hookup(void)
+-{
+-	struct timespec	tv;
+-
+-	ppc_md.get_rtc_time = m41t00_get_rtc_time;
+-	ppc_md.set_rtc_time = m41t00_set_rtc_time;
+-
+-	tv.tv_nsec = 0;
+-	tv.tv_sec = (ppc_md.get_rtc_time)();
+-	do_settimeofday(&tv);
+-
+-	return 0;
+-}
+-late_initcall(katana_rtc_hookup);
+-#endif
+-
+ #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE)
+ static void __init
+ katana_map_io(void)
+diff --git a/arch/ppc/platforms/mpc866ads_setup.c b/arch/ppc/platforms/mpc866ads_setup.c
+index bf72204..62370f4 100644
+--- a/arch/ppc/platforms/mpc866ads_setup.c
++++ b/arch/ppc/platforms/mpc866ads_setup.c
+@@ -32,7 +32,7 @@
+ #include <asm/time.h>
+ #include <asm/ppcboot.h>
+ #include <asm/8xx_immap.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #include <asm/ppc_sys.h>
+ #include <asm/mpc8xx.h>
+ 
+diff --git a/arch/ppc/platforms/mpc885ads_setup.c b/arch/ppc/platforms/mpc885ads_setup.c
+index 87deaef..ba06cc0 100644
+--- a/arch/ppc/platforms/mpc885ads_setup.c
++++ b/arch/ppc/platforms/mpc885ads_setup.c
+@@ -31,7 +31,7 @@
+ #include <asm/time.h>
+ #include <asm/ppcboot.h>
+ #include <asm/8xx_immap.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #include <asm/ppc_sys.h>
+ 
+ extern unsigned char __res[];
+diff --git a/arch/ppc/platforms/prep_pci.c b/arch/ppc/platforms/prep_pci.c
+index 1df3150..8ed433e 100644
+--- a/arch/ppc/platforms/prep_pci.c
++++ b/arch/ppc/platforms/prep_pci.c
+@@ -1099,7 +1099,6 @@ prep_pib_init(void)
+ 				pci_write_config_byte(dev, 0x43, reg);
+ 			}
+ 		}
+-		pci_dev_put(dev);
+ 	}
+ 
+ 	if ((dev = pci_get_device(PCI_VENDOR_ID_WINBOND,
+diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
+index 543795b..52ddebe 100644
+--- a/arch/ppc/syslib/Makefile
++++ b/arch/ppc/syslib/Makefile
+@@ -87,20 +87,6 @@ endif
+ obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
+ obj-$(CONFIG_MPC10X_BRIDGE)	+= mpc10x_common.o ppc_sys.o
+ obj-$(CONFIG_MPC10X_OPENPIC)	+= open_pic.o
+-obj-$(CONFIG_85xx)		+= open_pic.o ppc85xx_common.o ppc85xx_setup.o \
+-					ppc_sys.o mpc85xx_sys.o \
+-					mpc85xx_devices.o
+-ifeq ($(CONFIG_85xx),y)
+-obj-$(CONFIG_PCI)		+= pci_auto.o
+-endif
+-obj-$(CONFIG_RAPIDIO)		+= ppc85xx_rio.o
+-obj-$(CONFIG_83xx)		+= ppc83xx_setup.o ppc_sys.o \
+-					mpc83xx_sys.o mpc83xx_devices.o ipic.o
+-ifeq ($(CONFIG_83xx),y)
+-obj-$(CONFIG_PCI)		+= pci_auto.o
+-endif
+-obj-$(CONFIG_MPC8548_CDS)	+= todc_time.o
+-obj-$(CONFIG_MPC8555_CDS)	+= todc_time.o
+ obj-$(CONFIG_PPC_MPC52xx)	+= mpc52xx_setup.o mpc52xx_pic.o \
+ 					mpc52xx_sys.o mpc52xx_devices.o ppc_sys.o
+ ifeq ($(CONFIG_PPC_MPC52xx),y)
+diff --git a/arch/ppc/syslib/gt64260_pic.c b/arch/ppc/syslib/gt64260_pic.c
+index e84d432..3b4fcca 100644
+--- a/arch/ppc/syslib/gt64260_pic.c
++++ b/arch/ppc/syslib/gt64260_pic.c
+@@ -35,7 +35,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/sched.h>
+ #include <linux/signal.h>
+-#include <linux/stddef.h>
+ #include <linux/delay.h>
+ #include <linux/irq.h>
+ 
+diff --git a/arch/ppc/syslib/ipic.c b/arch/ppc/syslib/ipic.c
+deleted file mode 100644
+index 9192777..0000000
+--- a/arch/ppc/syslib/ipic.c
++++ /dev/null
+@@ -1,646 +0,0 @@
+-/*
+- * arch/ppc/syslib/ipic.c
+- *
+- * IPIC routines implementations.
+- *
+- * Copyright 2005 Freescale Semiconductor, Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/reboot.h>
+-#include <linux/slab.h>
+-#include <linux/stddef.h>
+-#include <linux/sched.h>
+-#include <linux/signal.h>
+-#include <linux/sysdev.h>
+-#include <asm/irq.h>
+-#include <asm/io.h>
+-#include <asm/ipic.h>
+-#include <asm/mpc83xx.h>
+-
+-#include "ipic.h"
+-
+-static struct ipic p_ipic;
+-static struct ipic * primary_ipic;
+-
+-static struct ipic_info ipic_info[] = {
+-	[9] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_D,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 24,
+-		.prio_mask = 0,
+-	},
+-	[10] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_D,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 25,
+-		.prio_mask = 1,
+-	},
+-	[11] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_D,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 26,
+-		.prio_mask = 2,
+-	},
+-	[14] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_D,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 29,
+-		.prio_mask = 5,
+-	},
+-	[15] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_D,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 30,
+-		.prio_mask = 6,
+-	},
+-	[16] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_D,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 31,
+-		.prio_mask = 7,
+-	},
+-	[17] = {
+-		.pend	= IPIC_SEPNR,
+-		.mask	= IPIC_SEMSR,
+-		.prio	= IPIC_SMPRR_A,
+-		.force	= IPIC_SEFCR,
+-		.bit	= 1,
+-		.prio_mask = 5,
+-	},
+-	[18] = {
+-		.pend	= IPIC_SEPNR,
+-		.mask	= IPIC_SEMSR,
+-		.prio	= IPIC_SMPRR_A,
+-		.force	= IPIC_SEFCR,
+-		.bit	= 2,
+-		.prio_mask = 6,
+-	},
+-	[19] = {
+-		.pend	= IPIC_SEPNR,
+-		.mask	= IPIC_SEMSR,
+-		.prio	= IPIC_SMPRR_A,
+-		.force	= IPIC_SEFCR,
+-		.bit	= 3,
+-		.prio_mask = 7,
+-	},
+-	[20] = {
+-		.pend	= IPIC_SEPNR,
+-		.mask	= IPIC_SEMSR,
+-		.prio	= IPIC_SMPRR_B,
+-		.force	= IPIC_SEFCR,
+-		.bit	= 4,
+-		.prio_mask = 4,
+-	},
+-	[21] = {
+-		.pend	= IPIC_SEPNR,
+-		.mask	= IPIC_SEMSR,
+-		.prio	= IPIC_SMPRR_B,
+-		.force	= IPIC_SEFCR,
+-		.bit	= 5,
+-		.prio_mask = 5,
+-	},
+-	[22] = {
+-		.pend	= IPIC_SEPNR,
+-		.mask	= IPIC_SEMSR,
+-		.prio	= IPIC_SMPRR_B,
+-		.force	= IPIC_SEFCR,
+-		.bit	= 6,
+-		.prio_mask = 6,
+-	},
+-	[23] = {
+-		.pend	= IPIC_SEPNR,
+-		.mask	= IPIC_SEMSR,
+-		.prio	= IPIC_SMPRR_B,
+-		.force	= IPIC_SEFCR,
+-		.bit	= 7,
+-		.prio_mask = 7,
+-	},
+-	[32] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_A,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 0,
+-		.prio_mask = 0,
+-	},
+-	[33] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_A,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 1,
+-		.prio_mask = 1,
+-	},
+-	[34] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_A,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 2,
+-		.prio_mask = 2,
+-	},
+-	[35] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_A,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 3,
+-		.prio_mask = 3,
+-	},
+-	[36] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_A,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 4,
+-		.prio_mask = 4,
+-	},
+-	[37] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_A,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 5,
+-		.prio_mask = 5,
+-	},
+-	[38] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_A,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 6,
+-		.prio_mask = 6,
+-	},
+-	[39] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_H,
+-		.prio	= IPIC_SIPRR_A,
+-		.force	= IPIC_SIFCR_H,
+-		.bit	= 7,
+-		.prio_mask = 7,
+-	},
+-	[48] = {
+-		.pend	= IPIC_SEPNR,
+-		.mask	= IPIC_SEMSR,
+-		.prio	= IPIC_SMPRR_A,
+-		.force	= IPIC_SEFCR,
+-		.bit	= 0,
+-		.prio_mask = 4,
+-	},
+-	[64] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= IPIC_SMPRR_A,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 0,
+-		.prio_mask = 0,
+-	},
+-	[65] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= IPIC_SMPRR_A,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 1,
+-		.prio_mask = 1,
+-	},
+-	[66] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= IPIC_SMPRR_A,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 2,
+-		.prio_mask = 2,
+-	},
+-	[67] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= IPIC_SMPRR_A,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 3,
+-		.prio_mask = 3,
+-	},
+-	[68] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= IPIC_SMPRR_B,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 4,
+-		.prio_mask = 0,
+-	},
+-	[69] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= IPIC_SMPRR_B,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 5,
+-		.prio_mask = 1,
+-	},
+-	[70] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= IPIC_SMPRR_B,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 6,
+-		.prio_mask = 2,
+-	},
+-	[71] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= IPIC_SMPRR_B,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 7,
+-		.prio_mask = 3,
+-	},
+-	[72] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 8,
+-	},
+-	[73] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 9,
+-	},
+-	[74] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 10,
+-	},
+-	[75] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 11,
+-	},
+-	[76] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 12,
+-	},
+-	[77] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 13,
+-	},
+-	[78] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 14,
+-	},
+-	[79] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 15,
+-	},
+-	[80] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 16,
+-	},
+-	[84] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 20,
+-	},
+-	[85] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 21,
+-	},
+-	[90] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 26,
+-	},
+-	[91] = {
+-		.pend	= IPIC_SIPNR_H,
+-		.mask	= IPIC_SIMSR_L,
+-		.prio	= 0,
+-		.force	= IPIC_SIFCR_L,
+-		.bit	= 27,
+-	},
+-};
+-
+-static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
+-{
+-	return in_be32(base + (reg >> 2));
+-}
+-
+-static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
+-{
+-	out_be32(base + (reg >> 2), value);
+-}
+-
+-static inline struct ipic * ipic_from_irq(unsigned int irq)
+-{
+-	return primary_ipic;
+-}
+-
+-static void ipic_enable_irq(unsigned int irq)
+-{
+-	struct ipic *ipic = ipic_from_irq(irq);
+-	unsigned int src = irq - ipic->irq_offset;
+-	u32 temp;
+-
+-	temp = ipic_read(ipic->regs, ipic_info[src].mask);
+-	temp |= (1 << (31 - ipic_info[src].bit));
+-	ipic_write(ipic->regs, ipic_info[src].mask, temp);
+-}
+-
+-static void ipic_disable_irq(unsigned int irq)
+-{
+-	struct ipic *ipic = ipic_from_irq(irq);
+-	unsigned int src = irq - ipic->irq_offset;
+-	u32 temp;
+-
+-	temp = ipic_read(ipic->regs, ipic_info[src].mask);
+-	temp &= ~(1 << (31 - ipic_info[src].bit));
+-	ipic_write(ipic->regs, ipic_info[src].mask, temp);
+-}
+-
+-static void ipic_disable_irq_and_ack(unsigned int irq)
+-{
+-	struct ipic *ipic = ipic_from_irq(irq);
+-	unsigned int src = irq - ipic->irq_offset;
+-	u32 temp;
+-
+-	ipic_disable_irq(irq);
+-
+-	temp = ipic_read(ipic->regs, ipic_info[src].pend);
+-	temp |= (1 << (31 - ipic_info[src].bit));
+-	ipic_write(ipic->regs, ipic_info[src].pend, temp);
+-}
+-
+-static void ipic_end_irq(unsigned int irq)
+-{
+-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+-		ipic_enable_irq(irq);
+-}
+-
+-struct hw_interrupt_type ipic = {
+-	.typename = " IPIC  ",
+-	.enable = ipic_enable_irq,
+-	.disable = ipic_disable_irq,
+-	.ack = ipic_disable_irq_and_ack,
+-	.end = ipic_end_irq,
+-};
+-
+-void __init ipic_init(phys_addr_t phys_addr,
+-		unsigned int flags,
+-		unsigned int irq_offset,
+-		unsigned char *senses,
+-		unsigned int senses_count)
+-{
+-	u32 i, temp = 0;
+-
+-	primary_ipic = &p_ipic;
+-	primary_ipic->regs = ioremap(phys_addr, MPC83xx_IPIC_SIZE);
+-
+-	primary_ipic->irq_offset = irq_offset;
+-
+-	ipic_write(primary_ipic->regs, IPIC_SICNR, 0x0);
+-
+-	/* default priority scheme is grouped. If spread mode is required
+-	 * configure SICFR accordingly */
+-	if (flags & IPIC_SPREADMODE_GRP_A)
+-		temp |= SICFR_IPSA;
+-	if (flags & IPIC_SPREADMODE_GRP_D)
+-		temp |= SICFR_IPSD;
+-	if (flags & IPIC_SPREADMODE_MIX_A)
+-		temp |= SICFR_MPSA;
+-	if (flags & IPIC_SPREADMODE_MIX_B)
+-		temp |= SICFR_MPSB;
+-
+-	ipic_write(primary_ipic->regs, IPIC_SICNR, temp);
+-
+-	/* handle MCP route */
+-	temp = 0;
+-	if (flags & IPIC_DISABLE_MCP_OUT)
+-		temp = SERCR_MCPR;
+-	ipic_write(primary_ipic->regs, IPIC_SERCR, temp);
+-
+-	/* handle routing of IRQ0 to MCP */
+-	temp = ipic_read(primary_ipic->regs, IPIC_SEMSR);
+-
+-	if (flags & IPIC_IRQ0_MCP)
+-		temp |= SEMSR_SIRQ0;
+-	else
+-		temp &= ~SEMSR_SIRQ0;
+-
+-	ipic_write(primary_ipic->regs, IPIC_SEMSR, temp);
+-
+-	for (i = 0 ; i < NR_IPIC_INTS ; i++) {
+-		irq_desc[i+irq_offset].chip = &ipic;
+-		irq_desc[i+irq_offset].status = IRQ_LEVEL;
+-	}
+-
+-	temp = 0;
+-	for (i = 0 ; i < senses_count ; i++) {
+-		if ((senses[i] & IRQ_SENSE_MASK) == IRQ_SENSE_EDGE) {
+-			temp |= 1 << (15 - i);
+-			if (i != 0)
+-				irq_desc[i + irq_offset + MPC83xx_IRQ_EXT1 - 1].status = 0;
+-			else
+-				irq_desc[irq_offset + MPC83xx_IRQ_EXT0].status = 0;
+-		}
+-	}
+-	ipic_write(primary_ipic->regs, IPIC_SECNR, temp);
+-
+-	printk ("IPIC (%d IRQ sources, %d External IRQs) at %p\n", NR_IPIC_INTS,
+-			senses_count, primary_ipic->regs);
+-}
+-
+-int ipic_set_priority(unsigned int irq, unsigned int priority)
+-{
+-	struct ipic *ipic = ipic_from_irq(irq);
+-	unsigned int src = irq - ipic->irq_offset;
+-	u32 temp;
+-
+-	if (priority > 7)
+-		return -EINVAL;
+-	if (src > 127)
+-		return -EINVAL;
+-	if (ipic_info[src].prio == 0)
+-		return -EINVAL;
+-
+-	temp = ipic_read(ipic->regs, ipic_info[src].prio);
+-
+-	if (priority < 4) {
+-		temp &= ~(0x7 << (20 + (3 - priority) * 3));
+-		temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3);
+-	} else {
+-		temp &= ~(0x7 << (4 + (7 - priority) * 3));
+-		temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3);
+-	}
+-
+-	ipic_write(ipic->regs, ipic_info[src].prio, temp);
+-
+-	return 0;
+-}
+-
+-void ipic_set_highest_priority(unsigned int irq)
+-{
+-	struct ipic *ipic = ipic_from_irq(irq);
+-	unsigned int src = irq - ipic->irq_offset;
+-	u32 temp;
+-
+-	temp = ipic_read(ipic->regs, IPIC_SICFR);
+-
+-	/* clear and set HPI */
+-	temp &= 0x7f000000;
+-	temp |= (src & 0x7f) << 24;
+-
+-	ipic_write(ipic->regs, IPIC_SICFR, temp);
+-}
+-
+-void ipic_set_default_priority(void)
+-{
+-	ipic_set_priority(MPC83xx_IRQ_TSEC1_TX, 0);
+-	ipic_set_priority(MPC83xx_IRQ_TSEC1_RX, 1);
+-	ipic_set_priority(MPC83xx_IRQ_TSEC1_ERROR, 2);
+-	ipic_set_priority(MPC83xx_IRQ_TSEC2_TX, 3);
+-	ipic_set_priority(MPC83xx_IRQ_TSEC2_RX, 4);
+-	ipic_set_priority(MPC83xx_IRQ_TSEC2_ERROR, 5);
+-	ipic_set_priority(MPC83xx_IRQ_USB2_DR, 6);
+-	ipic_set_priority(MPC83xx_IRQ_USB2_MPH, 7);
+-
+-	ipic_set_priority(MPC83xx_IRQ_UART1, 0);
+-	ipic_set_priority(MPC83xx_IRQ_UART2, 1);
+-	ipic_set_priority(MPC83xx_IRQ_SEC2, 2);
+-	ipic_set_priority(MPC83xx_IRQ_IIC1, 5);
+-	ipic_set_priority(MPC83xx_IRQ_IIC2, 6);
+-	ipic_set_priority(MPC83xx_IRQ_SPI, 7);
+-	ipic_set_priority(MPC83xx_IRQ_RTC_SEC, 0);
+-	ipic_set_priority(MPC83xx_IRQ_PIT, 1);
+-	ipic_set_priority(MPC83xx_IRQ_PCI1, 2);
+-	ipic_set_priority(MPC83xx_IRQ_PCI2, 3);
+-	ipic_set_priority(MPC83xx_IRQ_EXT0, 4);
+-	ipic_set_priority(MPC83xx_IRQ_EXT1, 5);
+-	ipic_set_priority(MPC83xx_IRQ_EXT2, 6);
+-	ipic_set_priority(MPC83xx_IRQ_EXT3, 7);
+-	ipic_set_priority(MPC83xx_IRQ_RTC_ALR, 0);
+-	ipic_set_priority(MPC83xx_IRQ_MU, 1);
+-	ipic_set_priority(MPC83xx_IRQ_SBA, 2);
+-	ipic_set_priority(MPC83xx_IRQ_DMA, 3);
+-	ipic_set_priority(MPC83xx_IRQ_EXT4, 4);
+-	ipic_set_priority(MPC83xx_IRQ_EXT5, 5);
+-	ipic_set_priority(MPC83xx_IRQ_EXT6, 6);
+-	ipic_set_priority(MPC83xx_IRQ_EXT7, 7);
+-}
+-
+-void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
+-{
+-	struct ipic *ipic = primary_ipic;
+-	u32 temp;
+-
+-	temp = ipic_read(ipic->regs, IPIC_SERMR);
+-	temp |= (1 << (31 - mcp_irq));
+-	ipic_write(ipic->regs, IPIC_SERMR, temp);
+-}
+-
+-void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
+-{
+-	struct ipic *ipic = primary_ipic;
+-	u32 temp;
+-
+-	temp = ipic_read(ipic->regs, IPIC_SERMR);
+-	temp &= (1 << (31 - mcp_irq));
+-	ipic_write(ipic->regs, IPIC_SERMR, temp);
+-}
+-
+-u32 ipic_get_mcp_status(void)
+-{
+-	return ipic_read(primary_ipic->regs, IPIC_SERMR);
+-}
+-
+-void ipic_clear_mcp_status(u32 mask)
+-{
+-	ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
+-}
+-
+-/* Return an interrupt vector or -1 if no interrupt is pending. */
+-int ipic_get_irq(void)
+-{
+-	int irq;
+-
+-	irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & 0x7f;
+-
+-	if (irq == 0)    /* 0 --> no irq is pending */
+-		irq = -1;
+-
+-	return irq;
+-}
+-
+-static struct sysdev_class ipic_sysclass = {
+-	set_kset_name("ipic"),
+-};
+-
+-static struct sys_device device_ipic = {
+-	.id		= 0,
+-	.cls		= &ipic_sysclass,
+-};
+-
+-static int __init init_ipic_sysfs(void)
+-{
+-	int rc;
+-
+-	if (!primary_ipic->regs)
+-		return -ENODEV;
+-	printk(KERN_DEBUG "Registering ipic with sysfs...\n");
+-
+-	rc = sysdev_class_register(&ipic_sysclass);
+-	if (rc) {
+-		printk(KERN_ERR "Failed registering ipic sys class\n");
+-		return -ENODEV;
+-	}
+-	rc = sysdev_register(&device_ipic);
+-	if (rc) {
+-		printk(KERN_ERR "Failed registering ipic sys device\n");
+-		return -ENODEV;
+-	}
+-	return 0;
+-}
+-
+-subsys_initcall(init_ipic_sysfs);
+diff --git a/arch/ppc/syslib/ipic.h b/arch/ppc/syslib/ipic.h
+deleted file mode 100644
+index a60c9d1..0000000
+--- a/arch/ppc/syslib/ipic.h
++++ /dev/null
+@@ -1,47 +0,0 @@
+-/*
+- * IPIC private definitions and structure.
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor, Inc
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-#ifndef __IPIC_H__
+-#define __IPIC_H__
+-
+-#include <asm/ipic.h>
+-
+-#define MPC83xx_IPIC_SIZE	(0x00100)
+-
+-/* System Global Interrupt Configuration Register */
+-#define	SICFR_IPSA	0x00010000
+-#define	SICFR_IPSD	0x00080000
+-#define	SICFR_MPSA	0x00200000
+-#define	SICFR_MPSB	0x00400000
+-
+-/* System External Interrupt Mask Register */
+-#define	SEMSR_SIRQ0	0x00008000
+-
+-/* System Error Control Register */
+-#define SERCR_MCPR	0x00000001
+-
+-struct ipic {
+-	volatile u32 __iomem	*regs;
+-	unsigned int		irq_offset;
+-};
+-
+-struct ipic_info {
+-	u8	pend;		/* pending register offset from base */
+-	u8	mask;		/* mask register offset from base */
+-	u8	prio;		/* priority register offset from base */
+-	u8	force;		/* force register offset from base */
+-	u8	bit;		/* register bit position (as per doc)
+-				   bit mask = 1 << (31 - bit) */
+-	u8	prio_mask;	/* priority mask value */
+-};
+-
+-#endif /* __IPIC_H__ */
+diff --git a/arch/ppc/syslib/mpc52xx_pic.c b/arch/ppc/syslib/mpc52xx_pic.c
+index af35a31..f58149c 100644
+--- a/arch/ppc/syslib/mpc52xx_pic.c
++++ b/arch/ppc/syslib/mpc52xx_pic.c
+@@ -20,7 +20,6 @@
+ #include <linux/init.h>
+ #include <linux/sched.h>
+ #include <linux/signal.h>
+-#include <linux/stddef.h>
+ #include <linux/delay.h>
+ #include <linux/irq.h>
+ 
+diff --git a/arch/ppc/syslib/mpc52xx_setup.c b/arch/ppc/syslib/mpc52xx_setup.c
+index ecfa2c0..9f504fc 100644
+--- a/arch/ppc/syslib/mpc52xx_setup.c
++++ b/arch/ppc/syslib/mpc52xx_setup.c
+@@ -16,6 +16,7 @@
+  */
+ 
+ 
++#include <linux/spinlock.h>
+ #include <asm/io.h>
+ #include <asm/time.h>
+ #include <asm/mpc52xx.h>
+@@ -275,3 +276,38 @@ int mpc52xx_match_psc_function(int psc_idx, const char *func)
+ 
+ 	return 0;
+ }
++
++int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv)
++{
++	static spinlock_t lock = SPIN_LOCK_UNLOCKED;
++	struct mpc52xx_cdm __iomem *cdm;
++	unsigned long flags;
++	u16 mclken_div;
++	u16 __iomem *reg;
++	u32 mask;
++
++	cdm = ioremap(MPC52xx_PA(MPC52xx_CDM_OFFSET), MPC52xx_CDM_SIZE);
++	if (!cdm) {
++		printk(KERN_ERR __FILE__ ": Error mapping CDM\n");
++		return -ENODEV;
++	}
++
++	mclken_div = 0x8000 | (clkdiv & 0x1FF);
++	switch (psc_id) {
++	case 1: reg = &cdm->mclken_div_psc1; mask = 0x20; break;
++	case 2: reg = &cdm->mclken_div_psc2; mask = 0x40; break;
++	case 3: reg = &cdm->mclken_div_psc3; mask = 0x80; break;
++	case 6: reg = &cdm->mclken_div_psc6; mask = 0x10; break;
++	default:
++		return -ENODEV;
++	}
++
++	/* Set the rate and enable the clock */
++	spin_lock_irqsave(&lock, flags);
++	out_be16(reg, mclken_div);
++	out_be32(&cdm->clk_enables, in_be32(&cdm->clk_enables) | mask);
++	spin_unlock_irqrestore(&lock, flags);
++
++	iounmap(cdm);
++	return 0;
++}
+diff --git a/arch/ppc/syslib/mpc83xx_devices.c b/arch/ppc/syslib/mpc83xx_devices.c
+deleted file mode 100644
+index 5c4932c..0000000
+--- a/arch/ppc/syslib/mpc83xx_devices.c
++++ /dev/null
+@@ -1,251 +0,0 @@
+-/*
+- * MPC83xx Device descriptions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/device.h>
+-#include <linux/serial_8250.h>
+-#include <linux/fsl_devices.h>
+-#include <asm/mpc83xx.h>
+-#include <asm/irq.h>
+-#include <asm/ppc_sys.h>
+-#include <asm/machdep.h>
+-
+-/* We use offsets for IORESOURCE_MEM since we do not know at compile time
+- * what IMMRBAR is, will get fixed up by mach_mpc83xx_fixup
+- */
+-
+-struct gianfar_mdio_data mpc83xx_mdio_pdata = {
+-};
+-
+-static struct gianfar_platform_data mpc83xx_tsec1_pdata = {
+-	.device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+-	    FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
+-	    FSL_GIANFAR_DEV_HAS_MULTI_INTR,
+-};
+-
+-static struct gianfar_platform_data mpc83xx_tsec2_pdata = {
+-	.device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+-	    FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
+-	    FSL_GIANFAR_DEV_HAS_MULTI_INTR,
+-};
+-
+-static struct fsl_i2c_platform_data mpc83xx_fsl_i2c1_pdata = {
+-	.device_flags = FSL_I2C_DEV_SEPARATE_DFSRR,
+-};
+-
+-static struct fsl_i2c_platform_data mpc83xx_fsl_i2c2_pdata = {
+-	.device_flags = FSL_I2C_DEV_SEPARATE_DFSRR,
+-};
+-
+-static struct plat_serial8250_port serial_platform_data[] = {
+-	[0] = {
+-		.mapbase	= 0x4500,
+-		.irq		= MPC83xx_IRQ_UART1,
+-		.iotype		= UPIO_MEM,
+-		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+-	},
+-	[1] = {
+-		.mapbase	= 0x4600,
+-		.irq		= MPC83xx_IRQ_UART2,
+-		.iotype		= UPIO_MEM,
+-		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+-	},
+-	{ },
+-};
+-
+-struct platform_device ppc_sys_platform_devices[] = {
+-	[MPC83xx_TSEC1] = {
+-		.name = "fsl-gianfar",
+-		.id	= 1,
+-		.dev.platform_data = &mpc83xx_tsec1_pdata,
+-		.num_resources	 = 4,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x24000,
+-				.end	= 0x24fff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "tx",
+-				.start	= MPC83xx_IRQ_TSEC1_TX,
+-				.end	= MPC83xx_IRQ_TSEC1_TX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "rx",
+-				.start	= MPC83xx_IRQ_TSEC1_RX,
+-				.end	= MPC83xx_IRQ_TSEC1_RX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "error",
+-				.start	= MPC83xx_IRQ_TSEC1_ERROR,
+-				.end	= MPC83xx_IRQ_TSEC1_ERROR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC83xx_TSEC2] = {
+-		.name = "fsl-gianfar",
+-		.id	= 2,
+-		.dev.platform_data = &mpc83xx_tsec2_pdata,
+-		.num_resources	 = 4,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x25000,
+-				.end	= 0x25fff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "tx",
+-				.start	= MPC83xx_IRQ_TSEC2_TX,
+-				.end	= MPC83xx_IRQ_TSEC2_TX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "rx",
+-				.start	= MPC83xx_IRQ_TSEC2_RX,
+-				.end	= MPC83xx_IRQ_TSEC2_RX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "error",
+-				.start	= MPC83xx_IRQ_TSEC2_ERROR,
+-				.end	= MPC83xx_IRQ_TSEC2_ERROR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC83xx_IIC1] = {
+-		.name = "fsl-i2c",
+-		.id	= 1,
+-		.dev.platform_data = &mpc83xx_fsl_i2c1_pdata,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x3000,
+-				.end	= 0x30ff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC83xx_IRQ_IIC1,
+-				.end	= MPC83xx_IRQ_IIC1,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC83xx_IIC2] = {
+-		.name = "fsl-i2c",
+-		.id	= 2,
+-		.dev.platform_data = &mpc83xx_fsl_i2c2_pdata,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x3100,
+-				.end	= 0x31ff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC83xx_IRQ_IIC2,
+-				.end	= MPC83xx_IRQ_IIC2,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC83xx_DUART] = {
+-		.name = "serial8250",
+-		.id	= PLAT8250_DEV_PLATFORM,
+-		.dev.platform_data = serial_platform_data,
+-	},
+-	[MPC83xx_SEC2] = {
+-		.name = "fsl-sec2",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x30000,
+-				.end	= 0x3ffff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC83xx_IRQ_SEC2,
+-				.end	= MPC83xx_IRQ_SEC2,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC83xx_USB2_DR] = {
+-		.name = "fsl-ehci",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x23000,
+-				.end	= 0x23fff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC83xx_IRQ_USB2_DR,
+-				.end	= MPC83xx_IRQ_USB2_DR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC83xx_USB2_MPH] = {
+-		.name = "fsl-ehci",
+-		.id	= 2,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x22000,
+-				.end	= 0x22fff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC83xx_IRQ_USB2_MPH,
+-				.end	= MPC83xx_IRQ_USB2_MPH,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC83xx_MDIO] = {
+-		.name = "fsl-gianfar_mdio",
+-		.id = 0,
+-		.dev.platform_data = &mpc83xx_mdio_pdata,
+-		.num_resources = 1,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x24520,
+-				.end	= 0x2453f,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-		},
+-	},
+-};
+-
+-static int __init mach_mpc83xx_fixup(struct platform_device *pdev)
+-{
+-	ppc_sys_fixup_mem_resource(pdev, immrbar);
+-	return 0;
+-}
+-
+-static int __init mach_mpc83xx_init(void)
+-{
+-	if (ppc_md.progress)
+-		ppc_md.progress("mach_mpc83xx_init:enter", 0);
+-	ppc_sys_device_fixup = mach_mpc83xx_fixup;
+-	return 0;
+-}
+-
+-postcore_initcall(mach_mpc83xx_init);
+diff --git a/arch/ppc/syslib/mpc83xx_sys.c b/arch/ppc/syslib/mpc83xx_sys.c
+deleted file mode 100644
+index 0498ae7..0000000
+--- a/arch/ppc/syslib/mpc83xx_sys.c
++++ /dev/null
+@@ -1,122 +0,0 @@
+-/*
+- * MPC83xx System descriptions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/device.h>
+-#include <asm/ppc_sys.h>
+-
+-struct ppc_sys_spec *cur_ppc_sys_spec;
+-struct ppc_sys_spec ppc_sys_specs[] = {
+-	{
+-		.ppc_sys_name	= "8349E",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80500000,
+-		.num_devices	= 9,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
+-			MPC83xx_IIC2, MPC83xx_DUART, MPC83xx_SEC2,
+-			MPC83xx_USB2_DR, MPC83xx_USB2_MPH, MPC83xx_MDIO
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8349",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80510000,
+-		.num_devices	= 8,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
+-			MPC83xx_IIC2, MPC83xx_DUART,
+-			MPC83xx_USB2_DR, MPC83xx_USB2_MPH, MPC83xx_MDIO
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8347E",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80520000,
+-		.num_devices	= 9,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
+-			MPC83xx_IIC2, MPC83xx_DUART, MPC83xx_SEC2,
+-			MPC83xx_USB2_DR, MPC83xx_USB2_MPH, MPC83xx_MDIO
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8347",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80530000,
+-		.num_devices	= 8,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
+-			MPC83xx_IIC2, MPC83xx_DUART,
+-			MPC83xx_USB2_DR, MPC83xx_USB2_MPH, MPC83xx_MDIO
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8347E",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80540000,
+-		.num_devices	= 9,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
+-			MPC83xx_IIC2, MPC83xx_DUART, MPC83xx_SEC2,
+-			MPC83xx_USB2_DR, MPC83xx_USB2_MPH, MPC83xx_MDIO
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8347",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80550000,
+-		.num_devices	= 8,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
+-			MPC83xx_IIC2, MPC83xx_DUART,
+-			MPC83xx_USB2_DR, MPC83xx_USB2_MPH, MPC83xx_MDIO
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8343E",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80560000,
+-		.num_devices	= 8,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
+-			MPC83xx_IIC2, MPC83xx_DUART, MPC83xx_SEC2,
+-			MPC83xx_USB2_DR, MPC83xx_MDIO
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8343",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80570000,
+-		.num_devices	= 7,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
+-			MPC83xx_IIC2, MPC83xx_DUART,
+-			MPC83xx_USB2_DR, MPC83xx_MDIO
+-		},
+-	},
+-	{	/* default match */
+-		.ppc_sys_name	= "",
+-		.mask 		= 0x00000000,
+-		.value 		= 0x00000000,
+-	},
+-};
+diff --git a/arch/ppc/syslib/mpc85xx_devices.c b/arch/ppc/syslib/mpc85xx_devices.c
+deleted file mode 100644
+index 325136e..0000000
+--- a/arch/ppc/syslib/mpc85xx_devices.c
++++ /dev/null
+@@ -1,826 +0,0 @@
+-/*
+- * MPC85xx Device descriptions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/device.h>
+-#include <linux/serial_8250.h>
+-#include <linux/fsl_devices.h>
+-#include <linux/fs_enet_pd.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/irq.h>
+-#include <asm/ppc_sys.h>
+-#include <asm/cpm2.h>
+-
+-/* We use offsets for IORESOURCE_MEM since we do not know at compile time
+- * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup
+- */
+-struct gianfar_mdio_data mpc85xx_mdio_pdata = {
+-};
+-
+-static struct gianfar_platform_data mpc85xx_tsec1_pdata = {
+-	.device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+-	    FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
+-	    FSL_GIANFAR_DEV_HAS_MULTI_INTR,
+-};
+-
+-static struct gianfar_platform_data mpc85xx_tsec2_pdata = {
+-	.device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+-	    FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
+-	    FSL_GIANFAR_DEV_HAS_MULTI_INTR,
+-};
+-
+-static struct gianfar_platform_data mpc85xx_etsec1_pdata = {
+-	.device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+-	    FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
+-	    FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+-	    FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
+-	    FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
+-};
+-
+-static struct gianfar_platform_data mpc85xx_etsec2_pdata = {
+-	.device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+-	    FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
+-	    FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+-	    FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
+-	    FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
+-};
+-
+-static struct gianfar_platform_data mpc85xx_etsec3_pdata = {
+-	.device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+-	    FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
+-	    FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+-	    FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
+-	    FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
+-};
+-
+-static struct gianfar_platform_data mpc85xx_etsec4_pdata = {
+-	.device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+-	    FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
+-	    FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+-	    FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
+-	    FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
+-};
+-
+-static struct gianfar_platform_data mpc85xx_fec_pdata = {
+-	.device_flags = 0,
+-};
+-
+-static struct fsl_i2c_platform_data mpc85xx_fsl_i2c_pdata = {
+-	.device_flags = FSL_I2C_DEV_SEPARATE_DFSRR,
+-};
+-
+-static struct fsl_i2c_platform_data mpc85xx_fsl_i2c2_pdata = {
+-	.device_flags = FSL_I2C_DEV_SEPARATE_DFSRR,
+-};
+-
+-static struct fs_platform_info mpc85xx_fcc1_pdata = {
+-	.fs_no          = fsid_fcc1,
+-	.cp_page        = CPM_CR_FCC1_PAGE,
+-	.cp_block       = CPM_CR_FCC1_SBLOCK,
+-
+-	.rx_ring        = 32,
+-	.tx_ring        = 32,
+-	.rx_copybreak   = 240,
+-	.use_napi       = 0,
+-	.napi_weight    = 17,
+-
+-	.clk_mask	= CMX1_CLK_MASK,
+-	.clk_route	= CMX1_CLK_ROUTE,
+-	.clk_trx	= (PC_F1RXCLK | PC_F1TXCLK),
+-
+-	.mem_offset     = FCC1_MEM_OFFSET,
+-};
+-
+-static struct fs_platform_info mpc85xx_fcc2_pdata = {
+-	.fs_no          = fsid_fcc2,
+-	.cp_page        = CPM_CR_FCC2_PAGE,
+-	.cp_block       = CPM_CR_FCC2_SBLOCK,
+-
+-	.rx_ring        = 32,
+-	.tx_ring        = 32,
+-	.rx_copybreak   = 240,
+-	.use_napi       = 0,
+-	.napi_weight    = 17,
+-
+-	.clk_mask	= CMX2_CLK_MASK,
+-	.clk_route	= CMX2_CLK_ROUTE,
+-	.clk_trx	= (PC_F2RXCLK | PC_F2TXCLK),
+-
+-	.mem_offset     = FCC2_MEM_OFFSET,
+-};
+-
+-static struct fs_platform_info mpc85xx_fcc3_pdata = {
+-	.fs_no          = fsid_fcc3,
+-	.cp_page        = CPM_CR_FCC3_PAGE,
+-	.cp_block       = CPM_CR_FCC3_SBLOCK,
+-
+-	.rx_ring        = 32,
+-	.tx_ring        = 32,
+-	.rx_copybreak   = 240,
+-	.use_napi       = 0,
+-	.napi_weight    = 17,
+-
+-	.clk_mask	= CMX3_CLK_MASK,
+-	.clk_route	= CMX3_CLK_ROUTE,
+-	.clk_trx	= (PC_F3RXCLK | PC_F3TXCLK),
+-
+-	.mem_offset     = FCC3_MEM_OFFSET,
+-};
+-
+-static struct plat_serial8250_port serial_platform_data[] = {
+-	[0] = {
+-		.mapbase	= 0x4500,
+-		.irq		= MPC85xx_IRQ_DUART,
+-		.iotype		= UPIO_MEM,
+-		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ,
+-	},
+-	[1] = {
+-		.mapbase	= 0x4600,
+-		.irq		= MPC85xx_IRQ_DUART,
+-		.iotype		= UPIO_MEM,
+-		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ,
+-	},
+-	{ },
+-};
+-
+-struct platform_device ppc_sys_platform_devices[] = {
+-	[MPC85xx_TSEC1] = {
+-		.name = "fsl-gianfar",
+-		.id	= 1,
+-		.dev.platform_data = &mpc85xx_tsec1_pdata,
+-		.num_resources	 = 4,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_ENET1_OFFSET,
+-				.end	= MPC85xx_ENET1_OFFSET +
+-						MPC85xx_ENET1_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "tx",
+-				.start	= MPC85xx_IRQ_TSEC1_TX,
+-				.end	= MPC85xx_IRQ_TSEC1_TX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "rx",
+-				.start	= MPC85xx_IRQ_TSEC1_RX,
+-				.end	= MPC85xx_IRQ_TSEC1_RX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "error",
+-				.start	= MPC85xx_IRQ_TSEC1_ERROR,
+-				.end	= MPC85xx_IRQ_TSEC1_ERROR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_TSEC2] = {
+-		.name = "fsl-gianfar",
+-		.id	= 2,
+-		.dev.platform_data = &mpc85xx_tsec2_pdata,
+-		.num_resources	 = 4,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_ENET2_OFFSET,
+-				.end	= MPC85xx_ENET2_OFFSET +
+-						MPC85xx_ENET2_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "tx",
+-				.start	= MPC85xx_IRQ_TSEC2_TX,
+-				.end	= MPC85xx_IRQ_TSEC2_TX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "rx",
+-				.start	= MPC85xx_IRQ_TSEC2_RX,
+-				.end	= MPC85xx_IRQ_TSEC2_RX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "error",
+-				.start	= MPC85xx_IRQ_TSEC2_ERROR,
+-				.end	= MPC85xx_IRQ_TSEC2_ERROR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_FEC] =	{
+-		.name = "fsl-gianfar",
+-		.id	= 3,
+-		.dev.platform_data = &mpc85xx_fec_pdata,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_ENET3_OFFSET,
+-				.end	= MPC85xx_ENET3_OFFSET +
+-						MPC85xx_ENET3_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_FEC,
+-				.end	= MPC85xx_IRQ_FEC,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_IIC1] = {
+-		.name = "fsl-i2c",
+-		.id	= 1,
+-		.dev.platform_data = &mpc85xx_fsl_i2c_pdata,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_IIC1_OFFSET,
+-				.end	= MPC85xx_IIC1_OFFSET +
+-						MPC85xx_IIC1_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_IIC1,
+-				.end	= MPC85xx_IRQ_IIC1,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_DMA0] = {
+-		.name = "fsl-dma",
+-		.id	= 0,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_DMA0_OFFSET,
+-				.end	= MPC85xx_DMA0_OFFSET +
+-						MPC85xx_DMA0_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_DMA0,
+-				.end	= MPC85xx_IRQ_DMA0,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_DMA1] = {
+-		.name = "fsl-dma",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_DMA1_OFFSET,
+-				.end	= MPC85xx_DMA1_OFFSET +
+-						MPC85xx_DMA1_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_DMA1,
+-				.end	= MPC85xx_IRQ_DMA1,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_DMA2] = {
+-		.name = "fsl-dma",
+-		.id	= 2,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_DMA2_OFFSET,
+-				.end	= MPC85xx_DMA2_OFFSET +
+-						MPC85xx_DMA2_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_DMA2,
+-				.end	= MPC85xx_IRQ_DMA2,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_DMA3] = {
+-		.name = "fsl-dma",
+-		.id	= 3,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_DMA3_OFFSET,
+-				.end	= MPC85xx_DMA3_OFFSET +
+-						MPC85xx_DMA3_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_DMA3,
+-				.end	= MPC85xx_IRQ_DMA3,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_DUART] = {
+-		.name = "serial8250",
+-		.id	= PLAT8250_DEV_PLATFORM,
+-		.dev.platform_data = serial_platform_data,
+-	},
+-	[MPC85xx_PERFMON] = {
+-		.name = "fsl-perfmon",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_PERFMON_OFFSET,
+-				.end	= MPC85xx_PERFMON_OFFSET +
+-						MPC85xx_PERFMON_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_PERFMON,
+-				.end	= MPC85xx_IRQ_PERFMON,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_SEC2] = {
+-		.name = "fsl-sec2",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_SEC2_OFFSET,
+-				.end	= MPC85xx_SEC2_OFFSET +
+-						MPC85xx_SEC2_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_SEC2,
+-				.end	= MPC85xx_IRQ_SEC2,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_FCC1] = {
+-		.name = "fsl-cpm-fcc",
+-		.id	= 1,
+-		.num_resources	 = 4,
+-		.dev.platform_data = &mpc85xx_fcc1_pdata,
+-		.resource = (struct resource[]) {
+-			{
+-				.name	= "fcc_regs",
+-				.start	= 0x91300,
+-				.end	= 0x9131F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name   = "fcc_regs_c",
+-				.start	= 0x91380,
+-				.end	= 0x9139F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "fcc_pram",
+-				.start	= 0x88400,
+-				.end	= 0x884ff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_FCC1,
+-				.end	= SIU_INT_FCC1,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_FCC2] = {
+-		.name = "fsl-cpm-fcc",
+-		.id	= 2,
+-		.num_resources	 = 4,
+-		.dev.platform_data = &mpc85xx_fcc2_pdata,
+-		.resource = (struct resource[]) {
+-			{
+-				.name	= "fcc_regs",
+-				.start	= 0x91320,
+-				.end	= 0x9133F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name   = "fcc_regs_c",
+-				.start	= 0x913A0,
+-				.end	= 0x913CF,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "fcc_pram",
+-				.start	= 0x88500,
+-				.end	= 0x885ff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_FCC2,
+-				.end	= SIU_INT_FCC2,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_FCC3] = {
+-		.name = "fsl-cpm-fcc",
+-		.id	= 3,
+-		.num_resources	 = 4,
+-		.dev.platform_data = &mpc85xx_fcc3_pdata,
+-		.resource = (struct resource[]) {
+-			{
+-				.name	= "fcc_regs",
+-				.start	= 0x91340,
+-				.end	= 0x9135F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name   = "fcc_regs_c",
+-				.start	= 0x913D0,
+-				.end	= 0x913FF,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "fcc_pram",
+-				.start	= 0x88600,
+-				.end	= 0x886ff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_FCC3,
+-				.end	= SIU_INT_FCC3,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_I2C] = {
+-		.name = "fsl-cpm-i2c",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91860,
+-				.end	= 0x918BF,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_I2C,
+-				.end	= SIU_INT_I2C,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_SCC1] = {
+-		.name = "fsl-cpm-scc",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91A00,
+-				.end	= 0x91A1F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_SCC1,
+-				.end	= SIU_INT_SCC1,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_SCC2] = {
+-		.name = "fsl-cpm-scc",
+-		.id	= 2,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91A20,
+-				.end	= 0x91A3F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_SCC2,
+-				.end	= SIU_INT_SCC2,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_SCC3] = {
+-		.name = "fsl-cpm-scc",
+-		.id	= 3,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91A40,
+-				.end	= 0x91A5F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_SCC3,
+-				.end	= SIU_INT_SCC3,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_SCC4] = {
+-		.name = "fsl-cpm-scc",
+-		.id	= 4,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91A60,
+-				.end	= 0x91A7F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_SCC4,
+-				.end	= SIU_INT_SCC4,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_SPI] = {
+-		.name = "fsl-cpm-spi",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91AA0,
+-				.end	= 0x91AFF,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_SPI,
+-				.end	= SIU_INT_SPI,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_MCC1] = {
+-		.name = "fsl-cpm-mcc",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91B30,
+-				.end	= 0x91B3F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_MCC1,
+-				.end	= SIU_INT_MCC1,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_MCC2] = {
+-		.name = "fsl-cpm-mcc",
+-		.id	= 2,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91B50,
+-				.end	= 0x91B5F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_MCC2,
+-				.end	= SIU_INT_MCC2,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_SMC1] = {
+-		.name = "fsl-cpm-smc",
+-		.id	= 1,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91A80,
+-				.end	= 0x91A8F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_SMC1,
+-				.end	= SIU_INT_SMC1,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_SMC2] = {
+-		.name = "fsl-cpm-smc",
+-		.id	= 2,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91A90,
+-				.end	= 0x91A9F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_SMC2,
+-				.end	= SIU_INT_SMC2,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_CPM_USB] = {
+-		.name = "fsl-cpm-usb",
+-		.id	= 2,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x91B60,
+-				.end	= 0x91B7F,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= SIU_INT_USB,
+-				.end	= SIU_INT_USB,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_eTSEC1] = {
+-		.name = "fsl-gianfar",
+-		.id	= 1,
+-		.dev.platform_data = &mpc85xx_etsec1_pdata,
+-		.num_resources	 = 4,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_ENET1_OFFSET,
+-				.end	= MPC85xx_ENET1_OFFSET +
+-						MPC85xx_ENET1_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "tx",
+-				.start	= MPC85xx_IRQ_TSEC1_TX,
+-				.end	= MPC85xx_IRQ_TSEC1_TX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "rx",
+-				.start	= MPC85xx_IRQ_TSEC1_RX,
+-				.end	= MPC85xx_IRQ_TSEC1_RX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "error",
+-				.start	= MPC85xx_IRQ_TSEC1_ERROR,
+-				.end	= MPC85xx_IRQ_TSEC1_ERROR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_eTSEC2] = {
+-		.name = "fsl-gianfar",
+-		.id	= 2,
+-		.dev.platform_data = &mpc85xx_etsec2_pdata,
+-		.num_resources	 = 4,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_ENET2_OFFSET,
+-				.end	= MPC85xx_ENET2_OFFSET +
+-						MPC85xx_ENET2_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "tx",
+-				.start	= MPC85xx_IRQ_TSEC2_TX,
+-				.end	= MPC85xx_IRQ_TSEC2_TX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "rx",
+-				.start	= MPC85xx_IRQ_TSEC2_RX,
+-				.end	= MPC85xx_IRQ_TSEC2_RX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "error",
+-				.start	= MPC85xx_IRQ_TSEC2_ERROR,
+-				.end	= MPC85xx_IRQ_TSEC2_ERROR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_eTSEC3] = {
+-		.name = "fsl-gianfar",
+-		.id	= 3,
+-		.dev.platform_data = &mpc85xx_etsec3_pdata,
+-		.num_resources	 = 4,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= MPC85xx_ENET3_OFFSET,
+-				.end	= MPC85xx_ENET3_OFFSET +
+-						MPC85xx_ENET3_SIZE - 1,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "tx",
+-				.start	= MPC85xx_IRQ_TSEC3_TX,
+-				.end	= MPC85xx_IRQ_TSEC3_TX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "rx",
+-				.start	= MPC85xx_IRQ_TSEC3_RX,
+-				.end	= MPC85xx_IRQ_TSEC3_RX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "error",
+-				.start	= MPC85xx_IRQ_TSEC3_ERROR,
+-				.end	= MPC85xx_IRQ_TSEC3_ERROR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_eTSEC4] = {
+-		.name = "fsl-gianfar",
+-		.id	= 4,
+-		.dev.platform_data = &mpc85xx_etsec4_pdata,
+-		.num_resources	 = 4,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x27000,
+-				.end	= 0x27fff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.name	= "tx",
+-				.start	= MPC85xx_IRQ_TSEC4_TX,
+-				.end	= MPC85xx_IRQ_TSEC4_TX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "rx",
+-				.start	= MPC85xx_IRQ_TSEC4_RX,
+-				.end	= MPC85xx_IRQ_TSEC4_RX,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-			{
+-				.name	= "error",
+-				.start	= MPC85xx_IRQ_TSEC4_ERROR,
+-				.end	= MPC85xx_IRQ_TSEC4_ERROR,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_IIC2] = {
+-		.name = "fsl-i2c",
+-		.id	= 2,
+-		.dev.platform_data = &mpc85xx_fsl_i2c2_pdata,
+-		.num_resources	 = 2,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x03100,
+-				.end	= 0x031ff,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-			{
+-				.start	= MPC85xx_IRQ_IIC1,
+-				.end	= MPC85xx_IRQ_IIC1,
+-				.flags	= IORESOURCE_IRQ,
+-			},
+-		},
+-	},
+-	[MPC85xx_MDIO] = {
+-		.name = "fsl-gianfar_mdio",
+-		.id = 0,
+-		.dev.platform_data = &mpc85xx_mdio_pdata,
+-		.num_resources = 1,
+-		.resource = (struct resource[]) {
+-			{
+-				.start	= 0x24520,
+-				.end	= 0x2453f,
+-				.flags	= IORESOURCE_MEM,
+-			},
+-		},
+-	},
+-};
+-
+-static int __init mach_mpc85xx_fixup(struct platform_device *pdev)
+-{
+-	ppc_sys_fixup_mem_resource(pdev, CCSRBAR);
+-	return 0;
+-}
+-
+-static int __init mach_mpc85xx_init(void)
+-{
+-	ppc_sys_device_fixup = mach_mpc85xx_fixup;
+-	return 0;
+-}
+-
+-postcore_initcall(mach_mpc85xx_init);
+diff --git a/arch/ppc/syslib/mpc85xx_sys.c b/arch/ppc/syslib/mpc85xx_sys.c
+deleted file mode 100644
+index d96a93d..0000000
+--- a/arch/ppc/syslib/mpc85xx_sys.c
++++ /dev/null
+@@ -1,233 +0,0 @@
+-/*
+- * MPC85xx System descriptions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/device.h>
+-#include <asm/ppc_sys.h>
+-
+-struct ppc_sys_spec *cur_ppc_sys_spec;
+-struct ppc_sys_spec ppc_sys_specs[] = {
+-	{
+-		.ppc_sys_name	= "8540",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80300000,
+-		.num_devices	= 11,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_FEC, MPC85xx_IIC1,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8560",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80700000,
+-		.num_devices	= 20,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON,
+-			MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, MPC85xx_CPM_SCC1,
+-			MPC85xx_CPM_SCC2, MPC85xx_CPM_SCC3, MPC85xx_CPM_SCC4,
+-			MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, MPC85xx_CPM_FCC3,
+-			MPC85xx_CPM_MCC1, MPC85xx_CPM_MCC2, MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8541",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80720000,
+-		.num_devices	= 14,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART,
+-			MPC85xx_CPM_SPI, MPC85xx_CPM_I2C,
+-			MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8541E",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x807A0000,
+-		.num_devices	= 15,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
+-			MPC85xx_CPM_SPI, MPC85xx_CPM_I2C,
+-			MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8555",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80710000,
+-		.num_devices	= 20,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART,
+-			MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, MPC85xx_CPM_SCC1,
+-			MPC85xx_CPM_SCC2, MPC85xx_CPM_SCC3,
+-			MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
+-			MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2,
+-			MPC85xx_CPM_USB,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8555E",
+-		.mask 		= 0xFFFF0000,
+-		.value 		= 0x80790000,
+-		.num_devices	= 21,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
+-			MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, MPC85xx_CPM_SCC1,
+-			MPC85xx_CPM_SCC2, MPC85xx_CPM_SCC3,
+-			MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
+-			MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2,
+-			MPC85xx_CPM_USB,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	/* SVRs on 8548 rev1.0 matches for 8548/8547/8545 */
+-	{
+-		.ppc_sys_name	= "8548E",
+-		.mask 		= 0xFFFF00F0,
+-		.value 		= 0x80390010,
+-		.num_devices	= 14,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
+-			MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8548",
+-		.mask 		= 0xFFFF00F0,
+-		.value 		= 0x80310010,
+-		.num_devices	= 13,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
+-			MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8547E",
+-		.mask 		= 0xFFFF00F0,
+-		.value 		= 0x80390010,
+-		.num_devices	= 14,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
+-			MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8547",
+-		.mask 		= 0xFFFF00F0,
+-		.value 		= 0x80310010,
+-		.num_devices	= 13,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
+-			MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8545E",
+-		.mask 		= 0xFFFF00F0,
+-		.value 		= 0x80390010,
+-		.num_devices	= 12,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_eTSEC1, MPC85xx_eTSEC2,
+-			MPC85xx_IIC1, MPC85xx_IIC2,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8545",
+-		.mask 		= 0xFFFF00F0,
+-		.value 		= 0x80310010,
+-		.num_devices	= 11,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_eTSEC1, MPC85xx_eTSEC2,
+-			MPC85xx_IIC1, MPC85xx_IIC2,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8543E",
+-		.mask 		= 0xFFFF00F0,
+-		.value 		= 0x803A0010,
+-		.num_devices	= 12,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_eTSEC1, MPC85xx_eTSEC2,
+-			MPC85xx_IIC1, MPC85xx_IIC2,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{
+-		.ppc_sys_name	= "8543",
+-		.mask 		= 0xFFFF00F0,
+-		.value 		= 0x80320010,
+-		.num_devices	= 11,
+-		.device_list	= (enum ppc_sys_devices[])
+-		{
+-			MPC85xx_eTSEC1, MPC85xx_eTSEC2,
+-			MPC85xx_IIC1, MPC85xx_IIC2,
+-			MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
+-			MPC85xx_PERFMON, MPC85xx_DUART,
+-			MPC85xx_MDIO,
+-		},
+-	},
+-	{	/* default match */
+-		.ppc_sys_name	= "",
+-		.mask 		= 0x00000000,
+-		.value 		= 0x00000000,
+-	},
+-};
+diff --git a/arch/ppc/syslib/mpc8xx_devices.c b/arch/ppc/syslib/mpc8xx_devices.c
+index c05ac87..80804ee 100644
+--- a/arch/ppc/syslib/mpc8xx_devices.c
++++ b/arch/ppc/syslib/mpc8xx_devices.c
+@@ -16,7 +16,7 @@
+ #include <linux/device.h>
+ #include <linux/serial_8250.h>
+ #include <linux/mii.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #include <asm/mpc8xx.h>
+ #include <asm/irq.h>
+ #include <asm/ppc_sys.h>
+diff --git a/arch/ppc/syslib/mv64360_pic.c b/arch/ppc/syslib/mv64360_pic.c
+index 4b7a333..2dd2dc5 100644
+--- a/arch/ppc/syslib/mv64360_pic.c
++++ b/arch/ppc/syslib/mv64360_pic.c
+@@ -36,7 +36,6 @@
+ #include <linux/init.h>
+ #include <linux/sched.h>
+ #include <linux/signal.h>
+-#include <linux/stddef.h>
+ #include <linux/delay.h>
+ #include <linux/irq.h>
+ #include <linux/interrupt.h>
+diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
+index 2744b8a..90fe904 100644
+--- a/arch/ppc/syslib/mv64x60.c
++++ b/arch/ppc/syslib/mv64x60.c
+@@ -411,7 +411,6 @@ static struct mv64xxx_i2c_pdata mv64xxx_i2c_pdata = {
+ 	.freq_m			= 8,
+ 	.freq_n			= 3,
+ 	.timeout		= 1000, /* Default timeout of 1 second */
+-	.retries		= 1,
+ };
+ 
+ static struct resource mv64xxx_i2c_resources[] = {
+diff --git a/arch/ppc/syslib/ocp.c b/arch/ppc/syslib/ocp.c
+index 3f5be2c..ac80370 100644
+--- a/arch/ppc/syslib/ocp.c
++++ b/arch/ppc/syslib/ocp.c
+@@ -20,7 +20,7 @@
+  *  of peripherals are found on embedded SoC (System On a Chip)
+  *  processors or highly integrated system controllers that have
+  *  a host bridge and many peripherals.  Common examples where
+- *  this is already used include the PPC4xx, PPC85xx, MPC52xx,
++ *  this is already used include the PPC4xx, MPC52xx,
+  *  and MV64xxx parts.
+  *
+  *  This subsystem creates a standard OCP bus type within the
+@@ -376,7 +376,7 @@ ocp_remove_one_device(unsigned int vendor, unsigned int function, int index)
+ 
+ 	down_write(&ocp_devices_sem);
+ 	dev = __ocp_find_device(vendor, function, index);
+-	list_del((struct list_head *)dev);
++	list_del(&dev->link);
+ 	up_write(&ocp_devices_sem);
+ 
+ 	DBG(("ocp: ocp_remove_one_device(vendor: %x, function: %x, index: %d)... done.\n", vendor, function, index));
+diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
+index 18ec947..67dffe2 100644
+--- a/arch/ppc/syslib/open_pic.c
++++ b/arch/ppc/syslib/open_pic.c
+@@ -24,7 +24,7 @@
+ 
+ #include "open_pic_defs.h"
+ 
+-#if defined(CONFIG_PRPMC800) || defined(CONFIG_85xx)
++#if defined(CONFIG_PRPMC800)
+ #define OPENPIC_BIG_ENDIAN
+ #endif
+ 
+@@ -1043,7 +1043,7 @@ int openpic_resume(struct sys_device *sysdev)
+ #endif /* CONFIG_PM */
+ 
+ static struct sysdev_class openpic_sysclass = {
+-	set_kset_name("openpic"),
++	.name = "openpic",
+ };
+ 
+ static struct sys_device device_openpic = {
+diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c
+index d585207..449075a 100644
+--- a/arch/ppc/syslib/open_pic2.c
++++ b/arch/ppc/syslib/open_pic2.c
+@@ -666,7 +666,7 @@ int openpic2_resume(struct sys_device *sysdev)
+ 
+ /* HACK ALERT */
+ static struct sysdev_class openpic2_sysclass = {
+-	set_kset_name("openpic2"),
++	.name = "openpic2",
+ };
+ 
+ static struct sys_device device_openpic2 = {
+diff --git a/arch/ppc/syslib/ppc83xx_pci.h b/arch/ppc/syslib/ppc83xx_pci.h
+deleted file mode 100644
+index ec69164..0000000
+--- a/arch/ppc/syslib/ppc83xx_pci.h
++++ /dev/null
+@@ -1,151 +0,0 @@
+-/* Created by Tony Li <tony.li at freescale.com>
+- * Copyright (c) 2005 freescale semiconductor
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the  GNU General Public License along
+- * with this program; if not, write  to the Free Software Foundation, Inc.,
+- * 675 Mass Ave, Cambridge, MA 02139, USA.
+- */
+-
+-#ifndef __PPC_SYSLIB_PPC83XX_PCI_H
+-#define __PPC_SYSLIB_PPC83XX_PCI_H
+-
+-typedef struct immr_clk {
+-	u32 spmr; /* system PLL mode Register  */
+-	u32 occr; /* output clock control Register  */
+-	u32 sccr; /* system clock control Register  */
+-	u8 res0[0xF4];
+-} immr_clk_t;
+-
+-/*
+- * Sequencer
+- */
+-typedef struct immr_ios {
+-	u32	potar0;
+-	u8	res0[4];
+-	u32	pobar0;
+-	u8	res1[4];
+-	u32	pocmr0;
+-	u8	res2[4];
+-	u32	potar1;
+-	u8	res3[4];
+-	u32	pobar1;
+-	u8	res4[4];
+-	u32	pocmr1;
+-	u8	res5[4];
+-	u32	potar2;
+-	u8	res6[4];
+-	u32	pobar2;
+-	u8	res7[4];
+-	u32	pocmr2;
+-	u8	res8[4];
+-	u32	potar3;
+-	u8	res9[4];
+-	u32	pobar3;
+-	u8	res10[4];
+-	u32	pocmr3;
+-	u8	res11[4];
+-	u32	potar4;
+-	u8	res12[4];
+-	u32	pobar4;
+-	u8	res13[4];
+-	u32	pocmr4;
+-	u8	res14[4];
+-	u32	potar5;
+-	u8	res15[4];
+-	u32	pobar5;
+-	u8	res16[4];
+-	u32	pocmr5;
+-	u8	res17[4];
+-	u8	res18[0x60];
+-	u32	pmcr;
+-	u8	res19[4];
+-	u32	dtcr;
+-	u8	res20[4];
+-} immr_ios_t;
+-#define POTAR_TA_MASK	0x000fffff
+-#define POBAR_BA_MASK	0x000fffff
+-#define POCMR_EN	0x80000000
+-#define POCMR_IO	0x40000000 /* 0--memory space 1--I/O space */
+-#define POCMR_SE	0x20000000 /* streaming enable */
+-#define POCMR_DST	0x10000000 /* 0--PCI1 1--PCI2 */
+-#define POCMR_CM_MASK	0x000fffff
+-
+-/*
+- * PCI Controller Control and Status Registers
+- */
+-typedef struct immr_pcictrl {
+-	u32	esr;
+-	u32	ecdr;
+-	u32	eer;
+-	u32	eatcr;
+-	u32	eacr;
+-	u32	eeacr;
+-	u32	edlcr;
+-	u32	edhcr;
+-	u32	gcr;
+-	u32	ecr;
+-	u32	gsr;
+-	u8	res0[12];
+-	u32	pitar2;
+-	u8	res1[4];
+-	u32	pibar2;
+-	u32	piebar2;
+-	u32	piwar2;
+-	u8	res2[4];
+-	u32	pitar1;
+-	u8	res3[4];
+-	u32	pibar1;
+-	u32	piebar1;
+-	u32	piwar1;
+-	u8	res4[4];
+-	u32	pitar0;
+-	u8	res5[4];
+-	u32	pibar0;
+-	u8	res6[4];
+-	u32	piwar0;
+-	u8	res7[132];
+-} immr_pcictrl_t;
+-#define PITAR_TA_MASK	0x000fffff
+-#define PIBAR_MASK	0xffffffff
+-#define PIEBAR_EBA_MASK	0x000fffff
+-#define PIWAR_EN	0x80000000
+-#define PIWAR_PF	0x20000000
+-#define PIWAR_RTT_MASK	0x000f0000
+-#define PIWAR_RTT_NO_SNOOP	0x00040000
+-#define PIWAR_RTT_SNOOP	0x00050000
+-#define PIWAR_WTT_MASK	0x0000f000
+-#define PIWAR_WTT_NO_SNOOP	0x00004000
+-#define PIWAR_WTT_SNOOP	0x00005000
+-#define PIWAR_IWS_MASK	0x0000003F
+-#define PIWAR_IWS_4K	0x0000000B
+-#define PIWAR_IWS_8K	0x0000000C
+-#define PIWAR_IWS_16K	0x0000000D
+-#define PIWAR_IWS_32K	0x0000000E
+-#define PIWAR_IWS_64K	0x0000000F
+-#define PIWAR_IWS_128K	0x00000010
+-#define PIWAR_IWS_256K	0x00000011
+-#define PIWAR_IWS_512K	0x00000012
+-#define PIWAR_IWS_1M	0x00000013
+-#define PIWAR_IWS_2M	0x00000014
+-#define PIWAR_IWS_4M	0x00000015
+-#define PIWAR_IWS_8M	0x00000016
+-#define PIWAR_IWS_16M	0x00000017
+-#define PIWAR_IWS_32M	0x00000018
+-#define PIWAR_IWS_64M	0x00000019
+-#define PIWAR_IWS_128M	0x0000001A
+-#define PIWAR_IWS_256M	0x0000001B
+-#define PIWAR_IWS_512M	0x0000001C
+-#define PIWAR_IWS_1G	0x0000001D
+-#define PIWAR_IWS_2G	0x0000001E
+-
+-#endif /* __PPC_SYSLIB_PPC83XX_PCI_H */
+diff --git a/arch/ppc/syslib/ppc83xx_setup.c b/arch/ppc/syslib/ppc83xx_setup.c
+deleted file mode 100644
+index ec466db..0000000
+--- a/arch/ppc/syslib/ppc83xx_setup.c
++++ /dev/null
+@@ -1,411 +0,0 @@
+-/*
+- * MPC83XX common board code
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the  GNU General Public License along
+- * with this program; if not, write  to the Free Software Foundation, Inc.,
+- * 675 Mass Ave, Cambridge, MA 02139, USA.
+- *
+- * Added PCI support -- Tony Li <tony.li at freescale.com>
+- */
+-
+-#include <linux/types.h>
+-#include <linux/module.h>
+-#include <linux/init.h>
+-#include <linux/pci.h>
+-#include <linux/serial.h>
+-#include <linux/tty.h>	/* for linux/serial_core.h */
+-#include <linux/serial_core.h>
+-#include <linux/serial_8250.h>
+-
+-#include <asm/time.h>
+-#include <asm/mpc83xx.h>
+-#include <asm/mmu.h>
+-#include <asm/ppc_sys.h>
+-#include <asm/kgdb.h>
+-#include <asm/delay.h>
+-#include <asm/machdep.h>
+-
+-#include <syslib/ppc83xx_setup.h>
+-#if defined(CONFIG_PCI)
+-#include <asm/delay.h>
+-#include <syslib/ppc83xx_pci.h>
+-#endif
+-
+-phys_addr_t immrbar;
+-
+-/* Return the amount of memory */
+-unsigned long __init
+-mpc83xx_find_end_of_memory(void)
+-{
+-        bd_t *binfo;
+-
+-        binfo = (bd_t *) __res;
+-
+-        return binfo->bi_memsize;
+-}
+-
+-long __init
+-mpc83xx_time_init(void)
+-{
+-#define SPCR_OFFS   0x00000110
+-#define SPCR_TBEN   0x00400000
+-
+-	bd_t *binfo = (bd_t *)__res;
+-	u32 *spcr = ioremap(binfo->bi_immr_base + SPCR_OFFS, 4);
+-
+-	*spcr |= SPCR_TBEN;
+-
+-	iounmap(spcr);
+-
+-	return 0;
+-}
+-
+-/* The decrementer counts at the system (internal) clock freq divided by 4 */
+-void __init
+-mpc83xx_calibrate_decr(void)
+-{
+-        bd_t *binfo = (bd_t *) __res;
+-        unsigned int freq, divisor;
+-
+-	freq = binfo->bi_busfreq;
+-	divisor = 4;
+-	tb_ticks_per_jiffy = freq / HZ / divisor;
+-	tb_to_us = mulhwu_scale_factor(freq / divisor, 1000000);
+-}
+-
+-#ifdef CONFIG_SERIAL_8250
+-void __init
+-mpc83xx_early_serial_map(void)
+-{
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+-	struct uart_port serial_req;
+-#endif
+-	struct plat_serial8250_port *pdata;
+-	bd_t *binfo = (bd_t *) __res;
+-	pdata = (struct plat_serial8250_port *) ppc_sys_get_pdata(MPC83xx_DUART);
+-
+-	/* Setup serial port access */
+-	pdata[0].uartclk = binfo->bi_busfreq;
+-	pdata[0].mapbase += binfo->bi_immr_base;
+-	pdata[0].membase = ioremap(pdata[0].mapbase, 0x100);
+-
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+-	memset(&serial_req, 0, sizeof (serial_req));
+-	serial_req.iotype = UPIO_MEM;
+-	serial_req.mapbase = pdata[0].mapbase;
+-	serial_req.membase = pdata[0].membase;
+-	serial_req.regshift = 0;
+-
+-	gen550_init(0, &serial_req);
+-#endif
+-
+-	pdata[1].uartclk = binfo->bi_busfreq;
+-	pdata[1].mapbase += binfo->bi_immr_base;
+-	pdata[1].membase = ioremap(pdata[1].mapbase, 0x100);
+-
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+-	/* Assume gen550_init() doesn't modify serial_req */
+-	serial_req.mapbase = pdata[1].mapbase;
+-	serial_req.membase = pdata[1].membase;
+-
+-	gen550_init(1, &serial_req);
+-#endif
+-}
+-#endif
+-
+-void
+-mpc83xx_restart(char *cmd)
+-{
+-	volatile unsigned char __iomem *reg;
+-	unsigned char tmp;
+-
+-	reg = ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
+-
+-	local_irq_disable();
+-
+-	/*
+-	 * Unlock the BCSR bits so a PRST will update the contents.
+-	 * Otherwise the reset asserts but doesn't clear.
+-	 */
+-	tmp = in_8(reg + BCSR_MISC_REG3_OFF);
+-	tmp |= BCSR_MISC_REG3_CNFLOCK; /* low true, high false */
+-	out_8(reg + BCSR_MISC_REG3_OFF, tmp);
+-
+-	/*
+-	 * Trigger a reset via a low->high transition of the
+-	 * PORESET bit.
+-	 */
+-	tmp = in_8(reg + BCSR_MISC_REG2_OFF);
+-	tmp &= ~BCSR_MISC_REG2_PORESET;
+-	out_8(reg + BCSR_MISC_REG2_OFF, tmp);
+-
+-	udelay(1);
+-
+-	tmp |= BCSR_MISC_REG2_PORESET;
+-	out_8(reg + BCSR_MISC_REG2_OFF, tmp);
+-
+-	for(;;);
+-}
+-
+-void
+-mpc83xx_power_off(void)
+-{
+-	local_irq_disable();
+-	for(;;);
+-}
+-
+-void
+-mpc83xx_halt(void)
+-{
+-	local_irq_disable();
+-	for(;;);
+-}
+-
+-#if defined(CONFIG_PCI)
+-void __init
+-mpc83xx_setup_pci1(struct pci_controller *hose)
+-{
+-	u16 reg16;
+-	volatile immr_pcictrl_t * pci_ctrl;
+-	volatile immr_ios_t * ios;
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	pci_ctrl = ioremap(binfo->bi_immr_base + 0x8500, sizeof(immr_pcictrl_t));
+-	ios = ioremap(binfo->bi_immr_base + 0x8400, sizeof(immr_ios_t));
+-
+-	/*
+-	 * Configure PCI Outbound Translation Windows
+-	 */
+-	ios->potar0 = (MPC83xx_PCI1_LOWER_MEM >> 12) & POTAR_TA_MASK;
+-	ios->pobar0 = (MPC83xx_PCI1_LOWER_MEM >> 12) & POBAR_BA_MASK;
+-	ios->pocmr0 = POCMR_EN |
+-		(((0xffffffff - (MPC83xx_PCI1_UPPER_MEM -
+-				MPC83xx_PCI1_LOWER_MEM)) >> 12) & POCMR_CM_MASK);
+-
+-	/* mapped to PCI1 IO space */
+-	ios->potar1 = (MPC83xx_PCI1_LOWER_IO >> 12) & POTAR_TA_MASK;
+-	ios->pobar1 = (MPC83xx_PCI1_IO_BASE >> 12) & POBAR_BA_MASK;
+-	ios->pocmr1 = POCMR_EN | POCMR_IO |
+-		(((0xffffffff - (MPC83xx_PCI1_UPPER_IO -
+-				MPC83xx_PCI1_LOWER_IO)) >> 12) & POCMR_CM_MASK);
+-
+-	/*
+-	 * Configure PCI Inbound Translation Windows
+-	 */
+-	pci_ctrl->pitar1 = 0x0;
+-	pci_ctrl->pibar1 = 0x0;
+-	pci_ctrl->piebar1 = 0x0;
+-	pci_ctrl->piwar1 = PIWAR_EN | PIWAR_PF | PIWAR_RTT_SNOOP | PIWAR_WTT_SNOOP | PIWAR_IWS_2G;
+-
+-	/*
+-	 * Release PCI RST signal
+-	 */
+-	pci_ctrl->gcr = 0;
+-	udelay(2000);
+-	pci_ctrl->gcr = 1;
+-	udelay(2000);
+-
+-	reg16 = 0xff;
+-	early_read_config_word(hose, hose->first_busno, 0, PCI_COMMAND, &reg16);
+-	reg16 |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+-	early_write_config_word(hose, hose->first_busno, 0, PCI_COMMAND, reg16);
+-
+-	/*
+-	 * Clear non-reserved bits in status register.
+-	 */
+-	early_write_config_word(hose, hose->first_busno, 0, PCI_STATUS, 0xffff);
+-	early_write_config_byte(hose, hose->first_busno, 0, PCI_LATENCY_TIMER, 0x80);
+-
+-	iounmap(pci_ctrl);
+-	iounmap(ios);
+-}
+-
+-void __init
+-mpc83xx_setup_pci2(struct pci_controller *hose)
+-{
+-	u16 reg16;
+-	volatile immr_pcictrl_t * pci_ctrl;
+-	volatile immr_ios_t * ios;
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	pci_ctrl = ioremap(binfo->bi_immr_base + 0x8600, sizeof(immr_pcictrl_t));
+-	ios = ioremap(binfo->bi_immr_base + 0x8400, sizeof(immr_ios_t));
+-
+-	/*
+-	 * Configure PCI Outbound Translation Windows
+-	 */
+-	ios->potar3 = (MPC83xx_PCI2_LOWER_MEM >> 12) & POTAR_TA_MASK;
+-	ios->pobar3 = (MPC83xx_PCI2_LOWER_MEM >> 12) & POBAR_BA_MASK;
+-	ios->pocmr3 = POCMR_EN | POCMR_DST |
+-		(((0xffffffff - (MPC83xx_PCI2_UPPER_MEM -
+-				MPC83xx_PCI2_LOWER_MEM)) >> 12) & POCMR_CM_MASK);
+-
+-	/* mapped to PCI2 IO space */
+-	ios->potar4 = (MPC83xx_PCI2_LOWER_IO >> 12) & POTAR_TA_MASK;
+-	ios->pobar4 = (MPC83xx_PCI2_IO_BASE >> 12) & POBAR_BA_MASK;
+-	ios->pocmr4 = POCMR_EN | POCMR_DST | POCMR_IO |
+-		(((0xffffffff - (MPC83xx_PCI2_UPPER_IO -
+-				MPC83xx_PCI2_LOWER_IO)) >> 12) & POCMR_CM_MASK);
+-
+-	/*
+-	 * Configure PCI Inbound Translation Windows
+-	 */
+-	pci_ctrl->pitar1 = 0x0;
+-	pci_ctrl->pibar1 = 0x0;
+-	pci_ctrl->piebar1 = 0x0;
+-	pci_ctrl->piwar1 = PIWAR_EN | PIWAR_PF | PIWAR_RTT_SNOOP | PIWAR_WTT_SNOOP | PIWAR_IWS_2G;
+-
+-	/*
+-	 * Release PCI RST signal
+-	 */
+-	pci_ctrl->gcr = 0;
+-	udelay(2000);
+-	pci_ctrl->gcr = 1;
+-	udelay(2000);
+-
+-	reg16 = 0xff;
+-	early_read_config_word(hose, hose->first_busno, 0, PCI_COMMAND, &reg16);
+-	reg16 |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+-	early_write_config_word(hose, hose->first_busno, 0, PCI_COMMAND, reg16);
+-
+-	/*
+-	 * Clear non-reserved bits in status register.
+-	 */
+-	early_write_config_word(hose, hose->first_busno, 0, PCI_STATUS, 0xffff);
+-	early_write_config_byte(hose, hose->first_busno, 0, PCI_LATENCY_TIMER, 0x80);
+-
+-	iounmap(pci_ctrl);
+-	iounmap(ios);
+-}
+-
+-/*
+- * PCI buses can be enabled only if SYS board combinates with PIB
+- * (Platform IO Board) board which provide 3 PCI slots. There is 2 PCI buses
+- * and 3 PCI slots, so people must configure the routes between them before
+- * enable PCI bus. This routes are under the control of PCA9555PW device which
+- * can be accessed via I2C bus 2 and are configured by firmware. Refer to
+- * Freescale to get more information about firmware configuration.
+- */
+-
+-extern int mpc83xx_exclude_device(u_char bus, u_char devfn);
+-extern int mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel,
+-		unsigned char pin);
+-void __init
+-mpc83xx_setup_hose(void)
+-{
+-	u32 val32;
+-	volatile immr_clk_t * clk;
+-	struct pci_controller * hose1;
+-#ifdef CONFIG_MPC83xx_PCI2
+-	struct pci_controller * hose2;
+-#endif
+-	bd_t * binfo = (bd_t *)__res;
+-
+-	clk = ioremap(binfo->bi_immr_base + 0xA00,
+-			sizeof(immr_clk_t));
+-
+-	/*
+-	 * Configure PCI controller and PCI_CLK_OUTPUT both in 66M mode
+-	 */
+-	val32 = clk->occr;
+-	udelay(2000);
+-	clk->occr = 0xff000000;
+-	udelay(2000);
+-
+-	iounmap(clk);
+-
+-	hose1 = pcibios_alloc_controller();
+-	if(!hose1)
+-		return;
+-
+-	ppc_md.pci_swizzle = common_swizzle;
+-	ppc_md.pci_map_irq = mpc83xx_map_irq;
+-
+-	hose1->bus_offset = 0;
+-	hose1->first_busno = 0;
+-	hose1->last_busno = 0xff;
+-
+-	setup_indirect_pci(hose1, binfo->bi_immr_base + PCI1_CFG_ADDR_OFFSET,
+-			binfo->bi_immr_base + PCI1_CFG_DATA_OFFSET);
+-	hose1->set_cfg_type = 1;
+-
+-	mpc83xx_setup_pci1(hose1);
+-
+-	hose1->pci_mem_offset = MPC83xx_PCI1_MEM_OFFSET;
+-	hose1->mem_space.start = MPC83xx_PCI1_LOWER_MEM;
+-	hose1->mem_space.end = MPC83xx_PCI1_UPPER_MEM;
+-
+-	hose1->io_base_phys = MPC83xx_PCI1_IO_BASE;
+-	hose1->io_space.start = MPC83xx_PCI1_LOWER_IO;
+-	hose1->io_space.end = MPC83xx_PCI1_UPPER_IO;
+-#ifdef CONFIG_MPC83xx_PCI2
+-	isa_io_base = (unsigned long)ioremap(MPC83xx_PCI1_IO_BASE,
+-			MPC83xx_PCI1_IO_SIZE + MPC83xx_PCI2_IO_SIZE);
+-#else
+-	isa_io_base = (unsigned long)ioremap(MPC83xx_PCI1_IO_BASE,
+-			MPC83xx_PCI1_IO_SIZE);
+-#endif /* CONFIG_MPC83xx_PCI2 */
+-	hose1->io_base_virt = (void *)isa_io_base;
+-	/* setup resources */
+-	pci_init_resource(&hose1->io_resource,
+-			MPC83xx_PCI1_LOWER_IO,
+-			MPC83xx_PCI1_UPPER_IO,
+-			IORESOURCE_IO, "PCI host bridge 1");
+-	pci_init_resource(&hose1->mem_resources[0],
+-			MPC83xx_PCI1_LOWER_MEM,
+-			MPC83xx_PCI1_UPPER_MEM,
+-			IORESOURCE_MEM, "PCI host bridge 1");
+-
+-	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
+-	hose1->last_busno = pciauto_bus_scan(hose1, hose1->first_busno);
+-
+-#ifdef CONFIG_MPC83xx_PCI2
+-	hose2 = pcibios_alloc_controller();
+-	if(!hose2)
+-		return;
+-
+-	hose2->bus_offset = hose1->last_busno + 1;
+-	hose2->first_busno = hose1->last_busno + 1;
+-	hose2->last_busno = 0xff;
+-	setup_indirect_pci(hose2, binfo->bi_immr_base + PCI2_CFG_ADDR_OFFSET,
+-			binfo->bi_immr_base + PCI2_CFG_DATA_OFFSET);
+-	hose2->set_cfg_type = 1;
+-
+-	mpc83xx_setup_pci2(hose2);
+-
+-	hose2->pci_mem_offset = MPC83xx_PCI2_MEM_OFFSET;
+-	hose2->mem_space.start = MPC83xx_PCI2_LOWER_MEM;
+-	hose2->mem_space.end = MPC83xx_PCI2_UPPER_MEM;
+-
+-	hose2->io_base_phys = MPC83xx_PCI2_IO_BASE;
+-	hose2->io_space.start = MPC83xx_PCI2_LOWER_IO;
+-	hose2->io_space.end = MPC83xx_PCI2_UPPER_IO;
+-	hose2->io_base_virt = (void *)(isa_io_base + MPC83xx_PCI1_IO_SIZE);
+-	/* setup resources */
+-	pci_init_resource(&hose2->io_resource,
+-			MPC83xx_PCI2_LOWER_IO,
+-			MPC83xx_PCI2_UPPER_IO,
+-			IORESOURCE_IO, "PCI host bridge 2");
+-	pci_init_resource(&hose2->mem_resources[0],
+-			MPC83xx_PCI2_LOWER_MEM,
+-			MPC83xx_PCI2_UPPER_MEM,
+-			IORESOURCE_MEM, "PCI host bridge 2");
+-
+-	hose2->last_busno = pciauto_bus_scan(hose2, hose2->first_busno);
+-#endif /* CONFIG_MPC83xx_PCI2 */
+-}
+-#endif /*CONFIG_PCI*/
+diff --git a/arch/ppc/syslib/ppc83xx_setup.h b/arch/ppc/syslib/ppc83xx_setup.h
+deleted file mode 100644
+index b918a2d..0000000
+--- a/arch/ppc/syslib/ppc83xx_setup.h
++++ /dev/null
+@@ -1,55 +0,0 @@
+-/*
+- * MPC83XX common board definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the  GNU General Public License along
+- * with this program; if not, write  to the Free Software Foundation, Inc.,
+- * 675 Mass Ave, Cambridge, MA 02139, USA.
+- */
+-
+-#ifndef __PPC_SYSLIB_PPC83XX_SETUP_H
+-#define __PPC_SYSLIB_PPC83XX_SETUP_H
+-
+-#include <linux/init.h>
+-
+-extern unsigned long mpc83xx_find_end_of_memory(void) __init;
+-extern long mpc83xx_time_init(void) __init;
+-extern void mpc83xx_calibrate_decr(void) __init;
+-extern void mpc83xx_early_serial_map(void) __init;
+-extern void mpc83xx_restart(char *cmd);
+-extern void mpc83xx_power_off(void);
+-extern void mpc83xx_halt(void);
+-extern void mpc83xx_setup_hose(void) __init;
+-
+-/* PCI config */
+-#define PCI1_CFG_ADDR_OFFSET (0x8300)
+-#define PCI1_CFG_DATA_OFFSET (0x8304)
+-
+-#define PCI2_CFG_ADDR_OFFSET (0x8380)
+-#define PCI2_CFG_DATA_OFFSET (0x8384)
+-
+-/* Serial Config */
+-#ifdef CONFIG_SERIAL_MANY_PORTS
+-#define RS_TABLE_SIZE  64
+-#else
+-#define RS_TABLE_SIZE  2
+-#endif
+-
+-#ifndef BASE_BAUD
+-#define BASE_BAUD 115200
+-#endif
+-
+-#endif /* __PPC_SYSLIB_PPC83XX_SETUP_H */
+diff --git a/arch/ppc/syslib/ppc85xx_common.c b/arch/ppc/syslib/ppc85xx_common.c
+deleted file mode 100644
+index e5ac699..0000000
+--- a/arch/ppc/syslib/ppc85xx_common.c
++++ /dev/null
+@@ -1,38 +0,0 @@
+-/*
+- * MPC85xx support routines
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/types.h>
+-#include <linux/module.h>
+-#include <linux/init.h>
+-
+-#include <asm/mpc85xx.h>
+-#include <asm/mmu.h>
+-
+-/* ************************************************************************ */
+-/* Return the value of CCSRBAR for the current board */
+-
+-phys_addr_t
+-get_ccsrbar(void)
+-{
+-        return BOARD_CCSRBAR;
+-}
+-
+-EXPORT_SYMBOL(get_ccsrbar);
+-
+-/* For now this is a pass through */
+-phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
+-{
+-	return addr;
+-};
+-EXPORT_SYMBOL(fixup_bigphys_addr);
+-
+diff --git a/arch/ppc/syslib/ppc85xx_common.h b/arch/ppc/syslib/ppc85xx_common.h
+deleted file mode 100644
+index 4fc4054..0000000
+--- a/arch/ppc/syslib/ppc85xx_common.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/*
+- * MPC85xx support routines
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#ifndef __PPC_SYSLIB_PPC85XX_COMMON_H
+-#define __PPC_SYSLIB_PPC85XX_COMMON_H
+-
+-#include <linux/init.h>
+-
+-/* Provide access to ccsrbar for any modules, etc */
+-phys_addr_t get_ccsrbar(void);
+-
+-#endif /* __PPC_SYSLIB_PPC85XX_COMMON_H */
+diff --git a/arch/ppc/syslib/ppc85xx_rio.c b/arch/ppc/syslib/ppc85xx_rio.c
+deleted file mode 100644
+index af2425e..0000000
+--- a/arch/ppc/syslib/ppc85xx_rio.c
++++ /dev/null
+@@ -1,932 +0,0 @@
+-/*
+- * MPC85xx RapidIO support
+- *
+- * Copyright 2005 MontaVista Software, Inc.
+- * Matt Porter <mporter at kernel.crashing.org>
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/interrupt.h>
+-#include <linux/rio.h>
+-#include <linux/rio_drv.h>
+-
+-#include <asm/io.h>
+-
+-#define RIO_REGS_BASE		(CCSRBAR + 0xc0000)
+-#define RIO_ATMU_REGS_OFFSET	0x10c00
+-#define RIO_MSG_REGS_OFFSET	0x11000
+-#define RIO_MAINT_WIN_SIZE	0x400000
+-#define RIO_DBELL_WIN_SIZE	0x1000
+-
+-#define RIO_MSG_OMR_MUI		0x00000002
+-#define RIO_MSG_OSR_TE		0x00000080
+-#define RIO_MSG_OSR_QOI		0x00000020
+-#define RIO_MSG_OSR_QFI		0x00000010
+-#define RIO_MSG_OSR_MUB		0x00000004
+-#define RIO_MSG_OSR_EOMI	0x00000002
+-#define RIO_MSG_OSR_QEI		0x00000001
+-
+-#define RIO_MSG_IMR_MI		0x00000002
+-#define RIO_MSG_ISR_TE		0x00000080
+-#define RIO_MSG_ISR_QFI		0x00000010
+-#define RIO_MSG_ISR_DIQI	0x00000001
+-
+-#define RIO_MSG_DESC_SIZE	32
+-#define RIO_MSG_BUFFER_SIZE	4096
+-#define RIO_MIN_TX_RING_SIZE	2
+-#define RIO_MAX_TX_RING_SIZE	2048
+-#define RIO_MIN_RX_RING_SIZE	2
+-#define RIO_MAX_RX_RING_SIZE	2048
+-
+-#define DOORBELL_DMR_DI		0x00000002
+-#define DOORBELL_DSR_TE		0x00000080
+-#define DOORBELL_DSR_QFI	0x00000010
+-#define DOORBELL_DSR_DIQI	0x00000001
+-#define DOORBELL_TID_OFFSET	0x03
+-#define DOORBELL_SID_OFFSET	0x05
+-#define DOORBELL_INFO_OFFSET	0x06
+-
+-#define DOORBELL_MESSAGE_SIZE	0x08
+-#define DBELL_SID(x)		(*(u8 *)(x + DOORBELL_SID_OFFSET))
+-#define DBELL_TID(x)		(*(u8 *)(x + DOORBELL_TID_OFFSET))
+-#define DBELL_INF(x)		(*(u16 *)(x + DOORBELL_INFO_OFFSET))
+-
+-struct rio_atmu_regs {
+-	u32 rowtar;
+-	u32 pad1;
+-	u32 rowbar;
+-	u32 pad2;
+-	u32 rowar;
+-	u32 pad3[3];
+-};
+-
+-struct rio_msg_regs {
+-	u32 omr;
+-	u32 osr;
+-	u32 pad1;
+-	u32 odqdpar;
+-	u32 pad2;
+-	u32 osar;
+-	u32 odpr;
+-	u32 odatr;
+-	u32 odcr;
+-	u32 pad3;
+-	u32 odqepar;
+-	u32 pad4[13];
+-	u32 imr;
+-	u32 isr;
+-	u32 pad5;
+-	u32 ifqdpar;
+-	u32 pad6;
+-	u32 ifqepar;
+-	u32 pad7[250];
+-	u32 dmr;
+-	u32 dsr;
+-	u32 pad8;
+-	u32 dqdpar;
+-	u32 pad9;
+-	u32 dqepar;
+-	u32 pad10[26];
+-	u32 pwmr;
+-	u32 pwsr;
+-	u32 pad11;
+-	u32 pwqbar;
+-};
+-
+-struct rio_tx_desc {
+-	u32 res1;
+-	u32 saddr;
+-	u32 dport;
+-	u32 dattr;
+-	u32 res2;
+-	u32 res3;
+-	u32 dwcnt;
+-	u32 res4;
+-};
+-
+-static u32 regs_win;
+-static struct rio_atmu_regs *atmu_regs;
+-static struct rio_atmu_regs *maint_atmu_regs;
+-static struct rio_atmu_regs *dbell_atmu_regs;
+-static u32 dbell_win;
+-static u32 maint_win;
+-static struct rio_msg_regs *msg_regs;
+-
+-static struct rio_dbell_ring {
+-	void *virt;
+-	dma_addr_t phys;
+-} dbell_ring;
+-
+-static struct rio_msg_tx_ring {
+-	void *virt;
+-	dma_addr_t phys;
+-	void *virt_buffer[RIO_MAX_TX_RING_SIZE];
+-	dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
+-	int tx_slot;
+-	int size;
+-	void *dev_id;
+-} msg_tx_ring;
+-
+-static struct rio_msg_rx_ring {
+-	void *virt;
+-	dma_addr_t phys;
+-	void *virt_buffer[RIO_MAX_RX_RING_SIZE];
+-	int rx_slot;
+-	int size;
+-	void *dev_id;
+-} msg_rx_ring;
+-
+-/**
+- * mpc85xx_rio_doorbell_send - Send a MPC85xx doorbell message
+- * @index: ID of RapidIO interface
+- * @destid: Destination ID of target device
+- * @data: 16-bit info field of RapidIO doorbell message
+- *
+- * Sends a MPC85xx doorbell message. Returns %0 on success or
+- * %-EINVAL on failure.
+- */
+-static int mpc85xx_rio_doorbell_send(int index, u16 destid, u16 data)
+-{
+-	pr_debug("mpc85xx_doorbell_send: index %d destid %4.4x data %4.4x\n",
+-		 index, destid, data);
+-	out_be32((void *)&dbell_atmu_regs->rowtar, destid << 22);
+-	out_be16((void *)(dbell_win), data);
+-
+-	return 0;
+-}
+-
+-/**
+- * mpc85xx_local_config_read - Generate a MPC85xx local config space read
+- * @index: ID of RapdiIO interface
+- * @offset: Offset into configuration space
+- * @len: Length (in bytes) of the maintenance transaction
+- * @data: Value to be read into
+- *
+- * Generates a MPC85xx local configuration space read. Returns %0 on
+- * success or %-EINVAL on failure.
+- */
+-static int mpc85xx_local_config_read(int index, u32 offset, int len, u32 * data)
+-{
+-	pr_debug("mpc85xx_local_config_read: index %d offset %8.8x\n", index,
+-		 offset);
+-	*data = in_be32((void *)(regs_win + offset));
+-
+-	return 0;
+-}
+-
+-/**
+- * mpc85xx_local_config_write - Generate a MPC85xx local config space write
+- * @index: ID of RapdiIO interface
+- * @offset: Offset into configuration space
+- * @len: Length (in bytes) of the maintenance transaction
+- * @data: Value to be written
+- *
+- * Generates a MPC85xx local configuration space write. Returns %0 on
+- * success or %-EINVAL on failure.
+- */
+-static int mpc85xx_local_config_write(int index, u32 offset, int len, u32 data)
+-{
+-	pr_debug
+-	    ("mpc85xx_local_config_write: index %d offset %8.8x data %8.8x\n",
+-	     index, offset, data);
+-	out_be32((void *)(regs_win + offset), data);
+-
+-	return 0;
+-}
+-
+-/**
+- * mpc85xx_rio_config_read - Generate a MPC85xx read maintenance transaction
+- * @index: ID of RapdiIO interface
+- * @destid: Destination ID of transaction
+- * @hopcount: Number of hops to target device
+- * @offset: Offset into configuration space
+- * @len: Length (in bytes) of the maintenance transaction
+- * @val: Location to be read into
+- *
+- * Generates a MPC85xx read maintenance transaction. Returns %0 on
+- * success or %-EINVAL on failure.
+- */
+-static int
+-mpc85xx_rio_config_read(int index, u16 destid, u8 hopcount, u32 offset, int len,
+-			u32 * val)
+-{
+-	u8 *data;
+-
+-	pr_debug
+-	    ("mpc85xx_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
+-	     index, destid, hopcount, offset, len);
+-	out_be32((void *)&maint_atmu_regs->rowtar,
+-		 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
+-
+-	data = (u8 *) maint_win + offset;
+-	switch (len) {
+-	case 1:
+-		*val = in_8((u8 *) data);
+-		break;
+-	case 2:
+-		*val = in_be16((u16 *) data);
+-		break;
+-	default:
+-		*val = in_be32((u32 *) data);
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * mpc85xx_rio_config_write - Generate a MPC85xx write maintenance transaction
+- * @index: ID of RapdiIO interface
+- * @destid: Destination ID of transaction
+- * @hopcount: Number of hops to target device
+- * @offset: Offset into configuration space
+- * @len: Length (in bytes) of the maintenance transaction
+- * @val: Value to be written
+- *
+- * Generates an MPC85xx write maintenance transaction. Returns %0 on
+- * success or %-EINVAL on failure.
+- */
+-static int
+-mpc85xx_rio_config_write(int index, u16 destid, u8 hopcount, u32 offset,
+-			 int len, u32 val)
+-{
+-	u8 *data;
+-	pr_debug
+-	    ("mpc85xx_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
+-	     index, destid, hopcount, offset, len, val);
+-	out_be32((void *)&maint_atmu_regs->rowtar,
+-		 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
+-
+-	data = (u8 *) maint_win + offset;
+-	switch (len) {
+-	case 1:
+-		out_8((u8 *) data, val);
+-		break;
+-	case 2:
+-		out_be16((u16 *) data, val);
+-		break;
+-	default:
+-		out_be32((u32 *) data, val);
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue
+- * @mport: Master port with outbound message queue
+- * @rdev: Target of outbound message
+- * @mbox: Outbound mailbox
+- * @buffer: Message to add to outbound queue
+- * @len: Length of message
+- *
+- * Adds the @buffer message to the MPC85xx outbound message queue. Returns
+- * %0 on success or %-EINVAL on failure.
+- */
+-int
+-rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
+-			void *buffer, size_t len)
+-{
+-	u32 omr;
+-	struct rio_tx_desc *desc =
+-	    (struct rio_tx_desc *)msg_tx_ring.virt + msg_tx_ring.tx_slot;
+-	int ret = 0;
+-
+-	pr_debug
+-	    ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n",
+-	     rdev->destid, mbox, (int)buffer, len);
+-
+-	if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+-	/* Copy and clear rest of buffer */
+-	memcpy(msg_tx_ring.virt_buffer[msg_tx_ring.tx_slot], buffer, len);
+-	if (len < (RIO_MAX_MSG_SIZE - 4))
+-		memset((void *)((u32) msg_tx_ring.
+-				virt_buffer[msg_tx_ring.tx_slot] + len), 0,
+-		       RIO_MAX_MSG_SIZE - len);
+-
+-	/* Set mbox field for message */
+-	desc->dport = mbox & 0x3;
+-
+-	/* Enable EOMI interrupt, set priority, and set destid */
+-	desc->dattr = 0x28000000 | (rdev->destid << 2);
+-
+-	/* Set transfer size aligned to next power of 2 (in double words) */
+-	desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
+-
+-	/* Set snooping and source buffer address */
+-	desc->saddr = 0x00000004 | msg_tx_ring.phys_buffer[msg_tx_ring.tx_slot];
+-
+-	/* Increment enqueue pointer */
+-	omr = in_be32((void *)&msg_regs->omr);
+-	out_be32((void *)&msg_regs->omr, omr | RIO_MSG_OMR_MUI);
+-
+-	/* Go to next descriptor */
+-	if (++msg_tx_ring.tx_slot == msg_tx_ring.size)
+-		msg_tx_ring.tx_slot = 0;
+-
+-      out:
+-	return ret;
+-}
+-
+-EXPORT_SYMBOL_GPL(rio_hw_add_outb_message);
+-
+-/**
+- * mpc85xx_rio_tx_handler - MPC85xx outbound message interrupt handler
+- * @irq: Linux interrupt number
+- * @dev_instance: Pointer to interrupt-specific data
+- *
+- * Handles outbound message interrupts. Executes a register outbound
+- * mailbox event handler and acks the interrupt occurrence.
+- */
+-static irqreturn_t
+-mpc85xx_rio_tx_handler(int irq, void *dev_instance)
+-{
+-	int osr;
+-	struct rio_mport *port = (struct rio_mport *)dev_instance;
+-
+-	osr = in_be32((void *)&msg_regs->osr);
+-
+-	if (osr & RIO_MSG_OSR_TE) {
+-		pr_info("RIO: outbound message transmission error\n");
+-		out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_TE);
+-		goto out;
+-	}
+-
+-	if (osr & RIO_MSG_OSR_QOI) {
+-		pr_info("RIO: outbound message queue overflow\n");
+-		out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_QOI);
+-		goto out;
+-	}
+-
+-	if (osr & RIO_MSG_OSR_EOMI) {
+-		u32 dqp = in_be32((void *)&msg_regs->odqdpar);
+-		int slot = (dqp - msg_tx_ring.phys) >> 5;
+-		port->outb_msg[0].mcback(port, msg_tx_ring.dev_id, -1, slot);
+-
+-		/* Ack the end-of-message interrupt */
+-		out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_EOMI);
+-	}
+-
+-      out:
+-	return IRQ_HANDLED;
+-}
+-
+-/**
+- * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox
+- * @mport: Master port implementing the outbound message unit
+- * @dev_id: Device specific pointer to pass on event
+- * @mbox: Mailbox to open
+- * @entries: Number of entries in the outbound mailbox ring
+- *
+- * Initializes buffer ring, request the outbound message interrupt,
+- * and enables the outbound message unit. Returns %0 on success and
+- * %-EINVAL or %-ENOMEM on failure.
+- */
+-int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
+-{
+-	int i, j, rc = 0;
+-
+-	if ((entries < RIO_MIN_TX_RING_SIZE) ||
+-	    (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	/* Initialize shadow copy ring */
+-	msg_tx_ring.dev_id = dev_id;
+-	msg_tx_ring.size = entries;
+-
+-	for (i = 0; i < msg_tx_ring.size; i++) {
+-		if (!
+-		    (msg_tx_ring.virt_buffer[i] =
+-		     dma_alloc_coherent(NULL, RIO_MSG_BUFFER_SIZE,
+-					&msg_tx_ring.phys_buffer[i],
+-					GFP_KERNEL))) {
+-			rc = -ENOMEM;
+-			for (j = 0; j < msg_tx_ring.size; j++)
+-				if (msg_tx_ring.virt_buffer[j])
+-					dma_free_coherent(NULL,
+-							  RIO_MSG_BUFFER_SIZE,
+-							  msg_tx_ring.
+-							  virt_buffer[j],
+-							  msg_tx_ring.
+-							  phys_buffer[j]);
+-			goto out;
+-		}
+-	}
+-
+-	/* Initialize outbound message descriptor ring */
+-	if (!(msg_tx_ring.virt = dma_alloc_coherent(NULL,
+-						    msg_tx_ring.size *
+-						    RIO_MSG_DESC_SIZE,
+-						    &msg_tx_ring.phys,
+-						    GFP_KERNEL))) {
+-		rc = -ENOMEM;
+-		goto out_dma;
+-	}
+-	memset(msg_tx_ring.virt, 0, msg_tx_ring.size * RIO_MSG_DESC_SIZE);
+-	msg_tx_ring.tx_slot = 0;
+-
+-	/* Point dequeue/enqueue pointers at first entry in ring */
+-	out_be32((void *)&msg_regs->odqdpar, msg_tx_ring.phys);
+-	out_be32((void *)&msg_regs->odqepar, msg_tx_ring.phys);
+-
+-	/* Configure for snooping */
+-	out_be32((void *)&msg_regs->osar, 0x00000004);
+-
+-	/* Clear interrupt status */
+-	out_be32((void *)&msg_regs->osr, 0x000000b3);
+-
+-	/* Hook up outbound message handler */
+-	if ((rc =
+-	     request_irq(MPC85xx_IRQ_RIO_TX, mpc85xx_rio_tx_handler, 0,
+-			 "msg_tx", (void *)mport)) < 0)
+-		goto out_irq;
+-
+-	/*
+-	 * Configure outbound message unit
+-	 *      Snooping
+-	 *      Interrupts (all enabled, except QEIE)
+-	 *      Chaining mode
+-	 *      Disable
+-	 */
+-	out_be32((void *)&msg_regs->omr, 0x00100220);
+-
+-	/* Set number of entries */
+-	out_be32((void *)&msg_regs->omr,
+-		 in_be32((void *)&msg_regs->omr) |
+-		 ((get_bitmask_order(entries) - 2) << 12));
+-
+-	/* Now enable the unit */
+-	out_be32((void *)&msg_regs->omr, in_be32((void *)&msg_regs->omr) | 0x1);
+-
+-      out:
+-	return rc;
+-
+-      out_irq:
+-	dma_free_coherent(NULL, msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+-			  msg_tx_ring.virt, msg_tx_ring.phys);
+-
+-      out_dma:
+-	for (i = 0; i < msg_tx_ring.size; i++)
+-		dma_free_coherent(NULL, RIO_MSG_BUFFER_SIZE,
+-				  msg_tx_ring.virt_buffer[i],
+-				  msg_tx_ring.phys_buffer[i]);
+-
+-	return rc;
+-}
+-
+-/**
+- * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox
+- * @mport: Master port implementing the outbound message unit
+- * @mbox: Mailbox to close
+- *
+- * Disables the outbound message unit, free all buffers, and
+- * frees the outbound message interrupt.
+- */
+-void rio_close_outb_mbox(struct rio_mport *mport, int mbox)
+-{
+-	/* Disable inbound message unit */
+-	out_be32((void *)&msg_regs->omr, 0);
+-
+-	/* Free ring */
+-	dma_free_coherent(NULL, msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+-			  msg_tx_ring.virt, msg_tx_ring.phys);
+-
+-	/* Free interrupt */
+-	free_irq(MPC85xx_IRQ_RIO_TX, (void *)mport);
+-}
+-
+-/**
+- * mpc85xx_rio_rx_handler - MPC85xx inbound message interrupt handler
+- * @irq: Linux interrupt number
+- * @dev_instance: Pointer to interrupt-specific data
+- *
+- * Handles inbound message interrupts. Executes a registered inbound
+- * mailbox event handler and acks the interrupt occurrence.
+- */
+-static irqreturn_t
+-mpc85xx_rio_rx_handler(int irq, void *dev_instance)
+-{
+-	int isr;
+-	struct rio_mport *port = (struct rio_mport *)dev_instance;
+-
+-	isr = in_be32((void *)&msg_regs->isr);
+-
+-	if (isr & RIO_MSG_ISR_TE) {
+-		pr_info("RIO: inbound message reception error\n");
+-		out_be32((void *)&msg_regs->isr, RIO_MSG_ISR_TE);
+-		goto out;
+-	}
+-
+-	/* XXX Need to check/dispatch until queue empty */
+-	if (isr & RIO_MSG_ISR_DIQI) {
+-		/*
+-		 * We implement *only* mailbox 0, but can receive messages
+-		 * for any mailbox/letter to that mailbox destination. So,
+-		 * make the callback with an unknown/invalid mailbox number
+-		 * argument.
+-		 */
+-		port->inb_msg[0].mcback(port, msg_rx_ring.dev_id, -1, -1);
+-
+-		/* Ack the queueing interrupt */
+-		out_be32((void *)&msg_regs->isr, RIO_MSG_ISR_DIQI);
+-	}
+-
+-      out:
+-	return IRQ_HANDLED;
+-}
+-
+-/**
+- * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox
+- * @mport: Master port implementing the inbound message unit
+- * @dev_id: Device specific pointer to pass on event
+- * @mbox: Mailbox to open
+- * @entries: Number of entries in the inbound mailbox ring
+- *
+- * Initializes buffer ring, request the inbound message interrupt,
+- * and enables the inbound message unit. Returns %0 on success
+- * and %-EINVAL or %-ENOMEM on failure.
+- */
+-int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
+-{
+-	int i, rc = 0;
+-
+-	if ((entries < RIO_MIN_RX_RING_SIZE) ||
+-	    (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	/* Initialize client buffer ring */
+-	msg_rx_ring.dev_id = dev_id;
+-	msg_rx_ring.size = entries;
+-	msg_rx_ring.rx_slot = 0;
+-	for (i = 0; i < msg_rx_ring.size; i++)
+-		msg_rx_ring.virt_buffer[i] = NULL;
+-
+-	/* Initialize inbound message ring */
+-	if (!(msg_rx_ring.virt = dma_alloc_coherent(NULL,
+-						    msg_rx_ring.size *
+-						    RIO_MAX_MSG_SIZE,
+-						    &msg_rx_ring.phys,
+-						    GFP_KERNEL))) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	/* Point dequeue/enqueue pointers at first entry in ring */
+-	out_be32((void *)&msg_regs->ifqdpar, (u32) msg_rx_ring.phys);
+-	out_be32((void *)&msg_regs->ifqepar, (u32) msg_rx_ring.phys);
+-
+-	/* Clear interrupt status */
+-	out_be32((void *)&msg_regs->isr, 0x00000091);
+-
+-	/* Hook up inbound message handler */
+-	if ((rc =
+-	     request_irq(MPC85xx_IRQ_RIO_RX, mpc85xx_rio_rx_handler, 0,
+-			 "msg_rx", (void *)mport)) < 0) {
+-		dma_free_coherent(NULL, RIO_MSG_BUFFER_SIZE,
+-				  msg_tx_ring.virt_buffer[i],
+-				  msg_tx_ring.phys_buffer[i]);
+-		goto out;
+-	}
+-
+-	/*
+-	 * Configure inbound message unit:
+-	 *      Snooping
+-	 *      4KB max message size
+-	 *      Unmask all interrupt sources
+-	 *      Disable
+-	 */
+-	out_be32((void *)&msg_regs->imr, 0x001b0060);
+-
+-	/* Set number of queue entries */
+-	out_be32((void *)&msg_regs->imr,
+-		 in_be32((void *)&msg_regs->imr) |
+-		 ((get_bitmask_order(entries) - 2) << 12));
+-
+-	/* Now enable the unit */
+-	out_be32((void *)&msg_regs->imr, in_be32((void *)&msg_regs->imr) | 0x1);
+-
+-      out:
+-	return rc;
+-}
+-
+-/**
+- * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox
+- * @mport: Master port implementing the inbound message unit
+- * @mbox: Mailbox to close
+- *
+- * Disables the inbound message unit, free all buffers, and
+- * frees the inbound message interrupt.
+- */
+-void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
+-{
+-	/* Disable inbound message unit */
+-	out_be32((void *)&msg_regs->imr, 0);
+-
+-	/* Free ring */
+-	dma_free_coherent(NULL, msg_rx_ring.size * RIO_MAX_MSG_SIZE,
+-			  msg_rx_ring.virt, msg_rx_ring.phys);
+-
+-	/* Free interrupt */
+-	free_irq(MPC85xx_IRQ_RIO_RX, (void *)mport);
+-}
+-
+-/**
+- * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
+- * @mport: Master port implementing the inbound message unit
+- * @mbox: Inbound mailbox number
+- * @buf: Buffer to add to inbound queue
+- *
+- * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
+- * %0 on success or %-EINVAL on failure.
+- */
+-int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
+-{
+-	int rc = 0;
+-
+-	pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
+-		 msg_rx_ring.rx_slot);
+-
+-	if (msg_rx_ring.virt_buffer[msg_rx_ring.rx_slot]) {
+-		printk(KERN_ERR
+-		       "RIO: error adding inbound buffer %d, buffer exists\n",
+-		       msg_rx_ring.rx_slot);
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	msg_rx_ring.virt_buffer[msg_rx_ring.rx_slot] = buf;
+-	if (++msg_rx_ring.rx_slot == msg_rx_ring.size)
+-		msg_rx_ring.rx_slot = 0;
+-
+-      out:
+-	return rc;
+-}
+-
+-EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer);
+-
+-/**
+- * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit
+- * @mport: Master port implementing the inbound message unit
+- * @mbox: Inbound mailbox number
+- *
+- * Gets the next available inbound message from the inbound message queue.
+- * A pointer to the message is returned on success or NULL on failure.
+- */
+-void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
+-{
+-	u32 imr;
+-	u32 phys_buf, virt_buf;
+-	void *buf = NULL;
+-	int buf_idx;
+-
+-	phys_buf = in_be32((void *)&msg_regs->ifqdpar);
+-
+-	/* If no more messages, then bail out */
+-	if (phys_buf == in_be32((void *)&msg_regs->ifqepar))
+-		goto out2;
+-
+-	virt_buf = (u32) msg_rx_ring.virt + (phys_buf - msg_rx_ring.phys);
+-	buf_idx = (phys_buf - msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
+-	buf = msg_rx_ring.virt_buffer[buf_idx];
+-
+-	if (!buf) {
+-		printk(KERN_ERR
+-		       "RIO: inbound message copy failed, no buffers\n");
+-		goto out1;
+-	}
+-
+-	/* Copy max message size, caller is expected to allocate that big */
+-	memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
+-
+-	/* Clear the available buffer */
+-	msg_rx_ring.virt_buffer[buf_idx] = NULL;
+-
+-      out1:
+-	imr = in_be32((void *)&msg_regs->imr);
+-	out_be32((void *)&msg_regs->imr, imr | RIO_MSG_IMR_MI);
+-
+-      out2:
+-	return buf;
+-}
+-
+-EXPORT_SYMBOL_GPL(rio_hw_get_inb_message);
+-
+-/**
+- * mpc85xx_rio_dbell_handler - MPC85xx doorbell interrupt handler
+- * @irq: Linux interrupt number
+- * @dev_instance: Pointer to interrupt-specific data
+- *
+- * Handles doorbell interrupts. Parses a list of registered
+- * doorbell event handlers and executes a matching event handler.
+- */
+-static irqreturn_t
+-mpc85xx_rio_dbell_handler(int irq, void *dev_instance)
+-{
+-	int dsr;
+-	struct rio_mport *port = (struct rio_mport *)dev_instance;
+-
+-	dsr = in_be32((void *)&msg_regs->dsr);
+-
+-	if (dsr & DOORBELL_DSR_TE) {
+-		pr_info("RIO: doorbell reception error\n");
+-		out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_TE);
+-		goto out;
+-	}
+-
+-	if (dsr & DOORBELL_DSR_QFI) {
+-		pr_info("RIO: doorbell queue full\n");
+-		out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_QFI);
+-		goto out;
+-	}
+-
+-	/* XXX Need to check/dispatch until queue empty */
+-	if (dsr & DOORBELL_DSR_DIQI) {
+-		u32 dmsg =
+-		    (u32) dbell_ring.virt +
+-		    (in_be32((void *)&msg_regs->dqdpar) & 0xfff);
+-		u32 dmr;
+-		struct rio_dbell *dbell;
+-		int found = 0;
+-
+-		pr_debug
+-		    ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n",
+-		     DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
+-
+-		list_for_each_entry(dbell, &port->dbells, node) {
+-			if ((dbell->res->start <= DBELL_INF(dmsg)) &&
+-			    (dbell->res->end >= DBELL_INF(dmsg))) {
+-				found = 1;
+-				break;
+-			}
+-		}
+-		if (found) {
+-			dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg),
+-				    DBELL_INF(dmsg));
+-		} else {
+-			pr_debug
+-			    ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
+-			     DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
+-		}
+-		dmr = in_be32((void *)&msg_regs->dmr);
+-		out_be32((void *)&msg_regs->dmr, dmr | DOORBELL_DMR_DI);
+-		out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_DIQI);
+-	}
+-
+-      out:
+-	return IRQ_HANDLED;
+-}
+-
+-/**
+- * mpc85xx_rio_doorbell_init - MPC85xx doorbell interface init
+- * @mport: Master port implementing the inbound doorbell unit
+- *
+- * Initializes doorbell unit hardware and inbound DMA buffer
+- * ring. Called from mpc85xx_rio_setup(). Returns %0 on success
+- * or %-ENOMEM on failure.
+- */
+-static int mpc85xx_rio_doorbell_init(struct rio_mport *mport)
+-{
+-	int rc = 0;
+-
+-	/* Map outbound doorbell window immediately after maintenance window */
+-	if (!(dbell_win =
+-	      (u32) ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
+-			    RIO_DBELL_WIN_SIZE))) {
+-		printk(KERN_ERR
+-		       "RIO: unable to map outbound doorbell window\n");
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	/* Initialize inbound doorbells */
+-	if (!(dbell_ring.virt = dma_alloc_coherent(NULL,
+-						   512 * DOORBELL_MESSAGE_SIZE,
+-						   &dbell_ring.phys,
+-						   GFP_KERNEL))) {
+-		printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
+-		rc = -ENOMEM;
+-		iounmap((void *)dbell_win);
+-		goto out;
+-	}
+-
+-	/* Point dequeue/enqueue pointers at first entry in ring */
+-	out_be32((void *)&msg_regs->dqdpar, (u32) dbell_ring.phys);
+-	out_be32((void *)&msg_regs->dqepar, (u32) dbell_ring.phys);
+-
+-	/* Clear interrupt status */
+-	out_be32((void *)&msg_regs->dsr, 0x00000091);
+-
+-	/* Hook up doorbell handler */
+-	if ((rc =
+-	     request_irq(MPC85xx_IRQ_RIO_BELL, mpc85xx_rio_dbell_handler, 0,
+-			 "dbell_rx", (void *)mport) < 0)) {
+-		iounmap((void *)dbell_win);
+-		dma_free_coherent(NULL, 512 * DOORBELL_MESSAGE_SIZE,
+-				  dbell_ring.virt, dbell_ring.phys);
+-		printk(KERN_ERR
+-		       "MPC85xx RIO: unable to request inbound doorbell irq");
+-		goto out;
+-	}
+-
+-	/* Configure doorbells for snooping, 512 entries, and enable */
+-	out_be32((void *)&msg_regs->dmr, 0x00108161);
+-
+-      out:
+-	return rc;
+-}
+-
+-static char *cmdline = NULL;
+-
+-static int mpc85xx_rio_get_hdid(int index)
+-{
+-	/* XXX Need to parse multiple entries in some format */
+-	if (!cmdline)
+-		return -1;
+-
+-	return simple_strtol(cmdline, NULL, 0);
+-}
+-
+-static int mpc85xx_rio_get_cmdline(char *s)
+-{
+-	if (!s)
+-		return 0;
+-
+-	cmdline = s;
+-	return 1;
+-}
+-
+-__setup("riohdid=", mpc85xx_rio_get_cmdline);
+-
+-/**
+- * mpc85xx_rio_setup - Setup MPC85xx RapidIO interface
+- * @law_start: Starting physical address of RapidIO LAW
+- * @law_size: Size of RapidIO LAW
+- *
+- * Initializes MPC85xx RapidIO hardware interface, configures
+- * master port with system-specific info, and registers the
+- * master port with the RapidIO subsystem.
+- */
+-void mpc85xx_rio_setup(int law_start, int law_size)
+-{
+-	struct rio_ops *ops;
+-	struct rio_mport *port;
+-
+-	ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL);
+-	ops->lcread = mpc85xx_local_config_read;
+-	ops->lcwrite = mpc85xx_local_config_write;
+-	ops->cread = mpc85xx_rio_config_read;
+-	ops->cwrite = mpc85xx_rio_config_write;
+-	ops->dsend = mpc85xx_rio_doorbell_send;
+-
+-	port = kmalloc(sizeof(struct rio_mport), GFP_KERNEL);
+-	port->id = 0;
+-	port->index = 0;
+-	INIT_LIST_HEAD(&port->dbells);
+-	port->iores.start = law_start;
+-	port->iores.end = law_start + law_size;
+-	port->iores.flags = IORESOURCE_MEM;
+-
+-	rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
+-	rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
+-	rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+-	strcpy(port->name, "RIO0 mport");
+-
+-	port->ops = ops;
+-	port->host_deviceid = mpc85xx_rio_get_hdid(port->id);
+-
+-	rio_register_mport(port);
+-
+-	regs_win = (u32) ioremap(RIO_REGS_BASE, 0x20000);
+-	atmu_regs = (struct rio_atmu_regs *)(regs_win + RIO_ATMU_REGS_OFFSET);
+-	maint_atmu_regs = atmu_regs + 1;
+-	dbell_atmu_regs = atmu_regs + 2;
+-	msg_regs = (struct rio_msg_regs *)(regs_win + RIO_MSG_REGS_OFFSET);
+-
+-	/* Configure maintenance transaction window */
+-	out_be32((void *)&maint_atmu_regs->rowbar, 0x000c0000);
+-	out_be32((void *)&maint_atmu_regs->rowar, 0x80077015);
+-
+-	maint_win = (u32) ioremap(law_start, RIO_MAINT_WIN_SIZE);
+-
+-	/* Configure outbound doorbell window */
+-	out_be32((void *)&dbell_atmu_regs->rowbar, 0x000c0400);
+-	out_be32((void *)&dbell_atmu_regs->rowar, 0x8004200b);
+-	mpc85xx_rio_doorbell_init(port);
+-}
+diff --git a/arch/ppc/syslib/ppc85xx_rio.h b/arch/ppc/syslib/ppc85xx_rio.h
+deleted file mode 100644
+index 6d3ff30..0000000
+--- a/arch/ppc/syslib/ppc85xx_rio.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * MPC85xx RapidIO definitions
+- *
+- * Copyright 2005 MontaVista Software, Inc.
+- * Matt Porter <mporter at kernel.crashing.org>
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#ifndef __PPC_SYSLIB_PPC85XX_RIO_H
+-#define __PPC_SYSLIB_PPC85XX_RIO_H
+-
+-#include <linux/init.h>
+-
+-extern void mpc85xx_rio_setup(int law_start, int law_size);
+-
+-#endif				/* __PPC_SYSLIB_PPC85XX_RIO_H */
+diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c
+deleted file mode 100644
+index 2475ec6..0000000
+--- a/arch/ppc/syslib/ppc85xx_setup.c
++++ /dev/null
+@@ -1,367 +0,0 @@
+-/*
+- * MPC85XX common board code
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#include <linux/types.h>
+-#include <linux/module.h>
+-#include <linux/init.h>
+-#include <linux/pci.h>
+-#include <linux/serial.h>
+-#include <linux/tty.h>	/* for linux/serial_core.h */
+-#include <linux/serial_core.h>
+-#include <linux/serial_8250.h>
+-
+-#include <asm/time.h>
+-#include <asm/mpc85xx.h>
+-#include <asm/immap_85xx.h>
+-#include <asm/mmu.h>
+-#include <asm/ppc_sys.h>
+-#include <asm/kgdb.h>
+-#include <asm/machdep.h>
+-
+-#include <syslib/ppc85xx_setup.h>
+-
+-extern void abort(void);
+-
+-/* Return the amount of memory */
+-unsigned long __init
+-mpc85xx_find_end_of_memory(void)
+-{
+-        bd_t *binfo;
+-
+-        binfo = (bd_t *) __res;
+-
+-        return binfo->bi_memsize;
+-}
+-
+-/* The decrementer counts at the system (internal) clock freq divided by 8 */
+-void __init
+-mpc85xx_calibrate_decr(void)
+-{
+-        bd_t *binfo = (bd_t *) __res;
+-        unsigned int freq, divisor;
+-
+-        /* get the core frequency */
+-        freq = binfo->bi_busfreq;
+-
+-        /* The timebase is updated every 8 bus clocks, HID0[SEL_TBCLK] = 0 */
+-        divisor = 8;
+-        tb_ticks_per_jiffy = freq / divisor / HZ;
+-        tb_to_us = mulhwu_scale_factor(freq / divisor, 1000000);
+-
+-	/* Set the time base to zero */
+-	mtspr(SPRN_TBWL, 0);
+-	mtspr(SPRN_TBWU, 0);
+-
+-	/* Clear any pending timer interrupts */
+-	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
+-
+-	/* Enable decrementer interrupt */
+-	mtspr(SPRN_TCR, TCR_DIE);
+-}
+-
+-#ifdef CONFIG_SERIAL_8250
+-void __init
+-mpc85xx_early_serial_map(void)
+-{
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+-	struct uart_port serial_req;
+-#endif
+-	struct plat_serial8250_port *pdata;
+-	bd_t *binfo = (bd_t *) __res;
+-	pdata = (struct plat_serial8250_port *) ppc_sys_get_pdata(MPC85xx_DUART);
+-
+-	/* Setup serial port access */
+-	pdata[0].uartclk = binfo->bi_busfreq;
+-	pdata[0].mapbase += binfo->bi_immr_base;
+-	pdata[0].membase = ioremap(pdata[0].mapbase, MPC85xx_UART0_SIZE);
+-
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+-	memset(&serial_req, 0, sizeof (serial_req));
+-	serial_req.iotype = UPIO_MEM;
+-	serial_req.mapbase = pdata[0].mapbase;
+-	serial_req.membase = pdata[0].membase;
+-	serial_req.regshift = 0;
+-
+-	gen550_init(0, &serial_req);
+-#endif
+-
+-	pdata[1].uartclk = binfo->bi_busfreq;
+-	pdata[1].mapbase += binfo->bi_immr_base;
+-	pdata[1].membase = ioremap(pdata[1].mapbase, MPC85xx_UART0_SIZE);
+-
+-#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+-	/* Assume gen550_init() doesn't modify serial_req */
+-	serial_req.mapbase = pdata[1].mapbase;
+-	serial_req.membase = pdata[1].membase;
+-
+-	gen550_init(1, &serial_req);
+-#endif
+-}
+-#endif
+-
+-void
+-mpc85xx_restart(char *cmd)
+-{
+-	local_irq_disable();
+-	abort();
+-}
+-
+-void
+-mpc85xx_power_off(void)
+-{
+-	local_irq_disable();
+-	for(;;);
+-}
+-
+-void
+-mpc85xx_halt(void)
+-{
+-	local_irq_disable();
+-	for(;;);
+-}
+-
+-#ifdef CONFIG_PCI
+-
+-#if defined(CONFIG_MPC8555_CDS) || defined(CONFIG_MPC8548_CDS)
+-extern void mpc85xx_cds_enable_via(struct pci_controller *hose);
+-extern void mpc85xx_cds_fixup_via(struct pci_controller *hose);
+-#endif
+-
+-static void __init
+-mpc85xx_setup_pci1(struct pci_controller *hose)
+-{
+-	volatile struct ccsr_pci *pci;
+-	volatile struct ccsr_guts *guts;
+-	unsigned short temps;
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI1_OFFSET,
+-		    MPC85xx_PCI1_SIZE);
+-
+-	guts = ioremap(binfo->bi_immr_base + MPC85xx_GUTS_OFFSET,
+-		    MPC85xx_GUTS_SIZE);
+-
+-	early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps);
+-	temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+-	early_write_config_word(hose, 0, 0, PCI_COMMAND, temps);
+-
+-#define PORDEVSR_PCI	(0x00800000)	/* PCI Mode */
+-	if (guts->pordevsr & PORDEVSR_PCI) {
+- 		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
+- 	} else {
+-		/* PCI-X init */
+-		temps = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
+-			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
+-		early_write_config_word(hose, 0, 0, PCIX_COMMAND, temps);
+-	}
+-
+-	/* Disable all windows (except powar0 since its ignored) */
+-	pci->powar1 = 0;
+-	pci->powar2 = 0;
+-	pci->powar3 = 0;
+-	pci->powar4 = 0;
+-	pci->piwar1 = 0;
+-	pci->piwar2 = 0;
+-	pci->piwar3 = 0;
+-
+-	/* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI1_LOWER_MEM */
+-	pci->potar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
+-	pci->potear1 = 0x00000000;
+-	pci->powbar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
+-	/* Enable, Mem R/W */
+-	pci->powar1 = 0x80044000 |
+-	   (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);
+-
+-	/* Setup outbound IO windows @ MPC85XX_PCI1_IO_BASE */
+-	pci->potar2 = (MPC85XX_PCI1_LOWER_IO >> 12) & 0x000fffff;
+-	pci->potear2 = 0x00000000;
+-	pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff;
+-	/* Enable, IO R/W */
+-	pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI1_IO_SIZE) - 1);
+-
+-	/* Setup 2G inbound Memory Window @ 0 */
+-	pci->pitar1 = 0x00000000;
+-	pci->piwbar1 = 0x00000000;
+-	pci->piwar1 = 0xa0f5501e;	/* Enable, Prefetch, Local
+-					   Mem, Snoop R/W, 2G */
+-}
+-
+-
+-extern int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin);
+-extern int mpc85xx_exclude_device(u_char bus, u_char devfn);
+-
+-#ifdef CONFIG_85xx_PCI2
+-static void __init
+-mpc85xx_setup_pci2(struct pci_controller *hose)
+-{
+-	volatile struct ccsr_pci *pci;
+-	unsigned short temps;
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI2_OFFSET,
+-		    MPC85xx_PCI2_SIZE);
+-
+-	early_read_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, &temps);
+-	temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+-	early_write_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, temps);
+-	early_write_config_byte(hose, hose->bus_offset, 0, PCI_LATENCY_TIMER, 0x80);
+-
+-	/* Disable all windows (except powar0 since its ignored) */
+-	pci->powar1 = 0;
+-	pci->powar2 = 0;
+-	pci->powar3 = 0;
+-	pci->powar4 = 0;
+-	pci->piwar1 = 0;
+-	pci->piwar2 = 0;
+-	pci->piwar3 = 0;
+-
+-	/* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI2_LOWER_MEM */
+-	pci->potar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
+-	pci->potear1 = 0x00000000;
+-	pci->powbar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
+-	/* Enable, Mem R/W */
+-	pci->powar1 = 0x80044000 |
+-	   (__ilog2(MPC85XX_PCI2_UPPER_MEM - MPC85XX_PCI2_LOWER_MEM + 1) - 1);
+-
+-	/* Setup outbound IO windows @ MPC85XX_PCI2_IO_BASE */
+-	pci->potar2 = (MPC85XX_PCI2_LOWER_IO >> 12) & 0x000fffff;
+-	pci->potear2 = 0x00000000;
+-	pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
+-	/* Enable, IO R/W */
+-	pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI2_IO_SIZE) - 1);
+-
+-	/* Setup 2G inbound Memory Window @ 0 */
+-	pci->pitar1 = 0x00000000;
+-	pci->piwbar1 = 0x00000000;
+-	pci->piwar1 = 0xa0f5501e;	/* Enable, Prefetch, Local
+-					   Mem, Snoop R/W, 2G */
+-}
+-#endif /* CONFIG_85xx_PCI2 */
+-
+-int mpc85xx_pci1_last_busno = 0;
+-
+-void __init
+-mpc85xx_setup_hose(void)
+-{
+-	struct pci_controller *hose_a;
+-#ifdef CONFIG_85xx_PCI2
+-	struct pci_controller *hose_b;
+-#endif
+-	bd_t *binfo = (bd_t *) __res;
+-
+-	hose_a = pcibios_alloc_controller();
+-
+-	if (!hose_a)
+-		return;
+-
+-	ppc_md.pci_swizzle = common_swizzle;
+-	ppc_md.pci_map_irq = mpc85xx_map_irq;
+-
+-	hose_a->first_busno = 0;
+-	hose_a->bus_offset = 0;
+-	hose_a->last_busno = 0xff;
+-
+-	setup_indirect_pci(hose_a, binfo->bi_immr_base + PCI1_CFG_ADDR_OFFSET,
+-			   binfo->bi_immr_base + PCI1_CFG_DATA_OFFSET);
+-	hose_a->set_cfg_type = 1;
+-
+-	mpc85xx_setup_pci1(hose_a);
+-
+-	hose_a->pci_mem_offset = MPC85XX_PCI1_MEM_OFFSET;
+-	hose_a->mem_space.start = MPC85XX_PCI1_LOWER_MEM;
+-	hose_a->mem_space.end = MPC85XX_PCI1_UPPER_MEM;
+-
+-	hose_a->io_space.start = MPC85XX_PCI1_LOWER_IO;
+-	hose_a->io_space.end = MPC85XX_PCI1_UPPER_IO;
+-	hose_a->io_base_phys = MPC85XX_PCI1_IO_BASE;
+-#ifdef CONFIG_85xx_PCI2
+-	hose_a->io_base_virt =  ioremap(MPC85XX_PCI1_IO_BASE,
+-					MPC85XX_PCI1_IO_SIZE +
+-					MPC85XX_PCI2_IO_SIZE);
+-#else
+-	hose_a->io_base_virt =  ioremap(MPC85XX_PCI1_IO_BASE,
+-					MPC85XX_PCI1_IO_SIZE);
+-#endif
+-	isa_io_base = (unsigned long)hose_a->io_base_virt;
+-
+-	/* setup resources */
+-	pci_init_resource(&hose_a->mem_resources[0],
+-			MPC85XX_PCI1_LOWER_MEM,
+-			MPC85XX_PCI1_UPPER_MEM,
+-			IORESOURCE_MEM, "PCI1 host bridge");
+-
+-	pci_init_resource(&hose_a->io_resource,
+-			MPC85XX_PCI1_LOWER_IO,
+-			MPC85XX_PCI1_UPPER_IO,
+-			IORESOURCE_IO, "PCI1 host bridge");
+-
+-	ppc_md.pci_exclude_device = mpc85xx_exclude_device;
+-
+-#if defined(CONFIG_MPC8555_CDS) || defined(CONFIG_MPC8548_CDS)
+-	/* Pre pciauto_bus_scan VIA init */
+-	mpc85xx_cds_enable_via(hose_a);
+-#endif
+-
+-	hose_a->last_busno = pciauto_bus_scan(hose_a, hose_a->first_busno);
+-
+-#if defined(CONFIG_MPC8555_CDS) || defined(CONFIG_MPC8548_CDS)
+-	/* Post pciauto_bus_scan VIA fixup */
+-	mpc85xx_cds_fixup_via(hose_a);
+-#endif
+-
+-#ifdef CONFIG_85xx_PCI2
+-	hose_b = pcibios_alloc_controller();
+-
+-	if (!hose_b)
+-		return;
+-
+-	hose_b->bus_offset = hose_a->last_busno + 1;
+-	hose_b->first_busno = hose_a->last_busno + 1;
+-	hose_b->last_busno = 0xff;
+-
+-	setup_indirect_pci(hose_b, binfo->bi_immr_base + PCI2_CFG_ADDR_OFFSET,
+-			   binfo->bi_immr_base + PCI2_CFG_DATA_OFFSET);
+-	hose_b->set_cfg_type = 1;
+-
+-	mpc85xx_setup_pci2(hose_b);
+-
+-	hose_b->pci_mem_offset = MPC85XX_PCI2_MEM_OFFSET;
+-	hose_b->mem_space.start = MPC85XX_PCI2_LOWER_MEM;
+-	hose_b->mem_space.end = MPC85XX_PCI2_UPPER_MEM;
+-
+-	hose_b->io_space.start = MPC85XX_PCI2_LOWER_IO;
+-	hose_b->io_space.end = MPC85XX_PCI2_UPPER_IO;
+-	hose_b->io_base_phys = MPC85XX_PCI2_IO_BASE;
+-	hose_b->io_base_virt = hose_a->io_base_virt + MPC85XX_PCI1_IO_SIZE;
+-	
+-	/* setup resources */
+-	pci_init_resource(&hose_b->mem_resources[0],
+-			MPC85XX_PCI2_LOWER_MEM,
+-			MPC85XX_PCI2_UPPER_MEM,
+-			IORESOURCE_MEM, "PCI2 host bridge");
+-
+-	pci_init_resource(&hose_b->io_resource,
+-			MPC85XX_PCI2_LOWER_IO,
+-			MPC85XX_PCI2_UPPER_IO,
+-			IORESOURCE_IO, "PCI2 host bridge");
+-
+-	hose_b->last_busno = pciauto_bus_scan(hose_b, hose_b->first_busno);
+-
+-	/* let board code know what the last bus number was on PCI1 */
+-	mpc85xx_pci1_last_busno = hose_a->last_busno;
+-#endif
+-	return;
+-}
+-#endif /* CONFIG_PCI */
+-
+-
+diff --git a/arch/ppc/syslib/ppc85xx_setup.h b/arch/ppc/syslib/ppc85xx_setup.h
+deleted file mode 100644
+index 6ff7999..0000000
+--- a/arch/ppc/syslib/ppc85xx_setup.h
++++ /dev/null
+@@ -1,56 +0,0 @@
+-/*
+- * MPC85XX common board definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor Inc.
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifndef __PPC_SYSLIB_PPC85XX_SETUP_H
+-#define __PPC_SYSLIB_PPC85XX_SETUP_H
+-
+-#include <linux/init.h>
+-#include <asm/ppcboot.h>
+-
+-extern unsigned long mpc85xx_find_end_of_memory(void) __init;
+-extern void mpc85xx_calibrate_decr(void) __init;
+-extern void mpc85xx_early_serial_map(void) __init;
+-extern void mpc85xx_restart(char *cmd);
+-extern void mpc85xx_power_off(void);
+-extern void mpc85xx_halt(void);
+-extern void mpc85xx_setup_hose(void) __init;
+-
+-/* PCI config */
+-#define PCI1_CFG_ADDR_OFFSET	(0x8000)
+-#define PCI1_CFG_DATA_OFFSET	(0x8004)
+-
+-#define PCI2_CFG_ADDR_OFFSET	(0x9000)
+-#define PCI2_CFG_DATA_OFFSET	(0x9004)
+-
+-/* Additional register for PCI-X configuration */
+-#define PCIX_NEXT_CAP	0x60
+-#define PCIX_CAP_ID	0x61
+-#define PCIX_COMMAND	0x62
+-#define PCIX_STATUS	0x64
+-
+-/* Serial Config */
+-#ifdef CONFIG_SERIAL_MANY_PORTS
+-#define RS_TABLE_SIZE  64
+-#else
+-#define RS_TABLE_SIZE  2
+-#endif
+-
+-#ifndef BASE_BAUD
+-#define BASE_BAUD 115200
+-#endif
+-
+-/* Offset of CPM register space */
+-#define CPM_MAP_ADDR	(CCSRBAR + MPC85xx_CPM_OFFSET)
+-
+-#endif /* __PPC_SYSLIB_PPC85XX_SETUP_H */
+diff --git a/arch/ppc/syslib/ppc8xx_pic.c b/arch/ppc/syslib/ppc8xx_pic.c
+index e8619c7..bce9a75 100644
+--- a/arch/ppc/syslib/ppc8xx_pic.c
++++ b/arch/ppc/syslib/ppc8xx_pic.c
+@@ -16,7 +16,7 @@ extern int cpm_get_irq(void);
+  * the only interrupt controller.  Some boards, like the MBX and
+  * Sandpoint have the 8259 as a secondary controller.  Depending
+  * upon the processor type, the internal controller can have as
+- * few as 16 interrups or as many as 64.  We could use  the
++ * few as 16 interrupts or as many as 64.  We could use  the
+  * "clear_bit()" and "set_bit()" functions like other platforms,
+  * but they are overkill for us.
+  */
+diff --git a/arch/ppc/syslib/ppc8xx_pic.h b/arch/ppc/syslib/ppc8xx_pic.h
+index d7d9f65..53bcd97 100644
+--- a/arch/ppc/syslib/ppc8xx_pic.h
++++ b/arch/ppc/syslib/ppc8xx_pic.h
+@@ -6,7 +6,6 @@
+ 
+ extern struct hw_interrupt_type ppc8xx_pic;
+ 
+-void m8xx_pic_init(void);
+ void m8xx_do_IRQ(struct pt_regs *regs,
+                  int            cpu);
+ int m8xx_get_irq(struct pt_regs *regs);
+diff --git a/arch/ppc/syslib/ppc_sys.c b/arch/ppc/syslib/ppc_sys.c
+index 2d48018..837183c 100644
+--- a/arch/ppc/syslib/ppc_sys.c
++++ b/arch/ppc/syslib/ppc_sys.c
+@@ -185,7 +185,7 @@ void platform_notify_map(const struct platform_notify_dev_map *map,
+  */
+ 
+ /*
+-   Here we'll replace .name pointers with fixed-lenght strings
++   Here we'll replace .name pointers with fixed-length strings
+    Hereby, this should be called *before* any func stuff triggeded.
+  */
+ void ppc_sys_device_initfunc(void)
+diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c
+index 8f0b953..9056fe5 100644
+--- a/arch/ppc/xmon/start.c
++++ b/arch/ppc/xmon/start.c
+@@ -10,7 +10,6 @@
+ #include <linux/sysrq.h>
+ #include <linux/bitops.h>
+ #include <asm/xmon.h>
+-#include <asm/machdep.h>
+ #include <asm/errno.h>
+ #include <asm/processor.h>
+ #include <asm/delay.h>
+diff --git a/arch/ppc/xmon/start_8xx.c b/arch/ppc/xmon/start_8xx.c
+index a48bd59..3097406 100644
+--- a/arch/ppc/xmon/start_8xx.c
++++ b/arch/ppc/xmon/start_8xx.c
+@@ -14,7 +14,7 @@
+ #include <linux/kernel.h>
+ #include <asm/8xx_immap.h>
+ #include <asm/mpc8xx.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ extern void xmon_printf(const char *fmt, ...);
+ extern int xmon_8xx_write(char *str, int nb);
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 1330061..6ef54d2 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -276,9 +276,6 @@ source "kernel/Kconfig.preempt"
+ 
+ source "mm/Kconfig"
+ 
+-config HOLES_IN_ZONE
+-	def_bool y
+-
+ comment "I/O subsystem configuration"
+ 
+ config MACHCHK_WARNING
+diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
+deleted file mode 100644
+index d1defbb..0000000
+--- a/arch/s390/crypto/Kconfig
++++ /dev/null
+@@ -1,60 +0,0 @@
+-config CRYPTO_SHA1_S390
+-	tristate "SHA1 digest algorithm"
+-	depends on S390
+-	select CRYPTO_ALGAPI
+-	help
+-	  This is the s390 hardware accelerated implementation of the
+-	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
+-
+-config CRYPTO_SHA256_S390
+-	tristate "SHA256 digest algorithm"
+-	depends on S390
+-	select CRYPTO_ALGAPI
+-	help
+-	  This is the s390 hardware accelerated implementation of the
+-	  SHA256 secure hash standard (DFIPS 180-2).
+-
+-	  This version of SHA implements a 256 bit hash with 128 bits of
+-	  security against collision attacks.
+-
+-config CRYPTO_DES_S390
+-	tristate "DES and Triple DES cipher algorithms"
+-	depends on S390
+-	select CRYPTO_ALGAPI
+-	select CRYPTO_BLKCIPHER
+-	help
+-	  This us the s390 hardware accelerated implementation of the
+-	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
+-
+-config CRYPTO_AES_S390
+-	tristate "AES cipher algorithms"
+-	depends on S390
+-	select CRYPTO_ALGAPI
+-	select CRYPTO_BLKCIPHER
+-	help
+-	  This is the s390 hardware accelerated implementation of the
+-	  AES cipher algorithms (FIPS-197). AES uses the Rijndael
+-	  algorithm.
+-
+-	  Rijndael appears to be consistently a very good performer in
+-	  both hardware and software across a wide range of computing
+-	  environments regardless of its use in feedback or non-feedback
+-	  modes. Its key setup time is excellent, and its key agility is
+-	  good. Rijndael's very low memory requirements make it very well
+-	  suited for restricted-space environments, in which it also
+-	  demonstrates excellent performance. Rijndael's operations are
+-	  among the easiest to defend against power and timing attacks.
+-
+-	  On s390 the System z9-109 currently only supports the key size
+-	  of 128 bit.
+-
+-config S390_PRNG
+-	tristate "Pseudo random number generator device driver"
+-	depends on S390
+-	default "m"
+-	help
+-	  Select this option if you want to use the s390 pseudo random number
+-	  generator. The PRNG is part of the cryptographic processor functions
+-	  and uses triple-DES to generate secure random numbers like the
+-	  ANSI X9.17 standard. The PRNG is usable via the char device
+-	  /dev/prandom.
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 5126696..a3f67f8 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -6,6 +6,7 @@
+  * s390 Version:
+  *   Copyright IBM Corp. 2005,2007
+  *   Author(s): Jan Glauber (jang at de.ibm.com)
++ *		Sebastian Siewior (sebastian at breakpoint.cc> SW-Fallback
+  *
+  * Derived from "crypto/aes_generic.c"
+  *
+@@ -16,17 +17,13 @@
+  *
+  */
+ 
++#include <crypto/aes.h>
+ #include <crypto/algapi.h>
++#include <linux/err.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include "crypt_s390.h"
+ 
+-#define AES_MIN_KEY_SIZE	16
+-#define AES_MAX_KEY_SIZE	32
+-
+-/* data block size for all key lengths */
+-#define AES_BLOCK_SIZE		16
+-
+ #define AES_KEYLEN_128		1
+ #define AES_KEYLEN_192		2
+ #define AES_KEYLEN_256		4
+@@ -39,45 +36,89 @@ struct s390_aes_ctx {
+ 	long enc;
+ 	long dec;
+ 	int key_len;
++	union {
++		struct crypto_blkcipher *blk;
++		struct crypto_cipher *cip;
++	} fallback;
+ };
+ 
+-static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+-		       unsigned int key_len)
++/*
++ * Check if the key_len is supported by the HW.
++ * Returns 0 if it is, a positive number if it is not and software fallback is
++ * required or a negative number in case the key size is not valid
++ */
++static int need_fallback(unsigned int key_len)
+ {
+-	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+-	u32 *flags = &tfm->crt_flags;
+-
+ 	switch (key_len) {
+ 	case 16:
+ 		if (!(keylen_flag & AES_KEYLEN_128))
+-			goto fail;
++			return 1;
+ 		break;
+ 	case 24:
+ 		if (!(keylen_flag & AES_KEYLEN_192))
+-			goto fail;
+-
++			return 1;
+ 		break;
+ 	case 32:
+ 		if (!(keylen_flag & AES_KEYLEN_256))
+-			goto fail;
++			return 1;
+ 		break;
+ 	default:
+-		goto fail;
++		return -1;
+ 		break;
+ 	}
++	return 0;
++}
++
++static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
++		unsigned int key_len)
++{
++	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	int ret;
++
++	sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
++	sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
++			CRYPTO_TFM_REQ_MASK);
++
++	ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
++	if (ret) {
++		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
++		tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
++				CRYPTO_TFM_RES_MASK);
++	}
++	return ret;
++}
++
++static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
++		       unsigned int key_len)
++{
++	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	u32 *flags = &tfm->crt_flags;
++	int ret;
++
++	ret = need_fallback(key_len);
++	if (ret < 0) {
++		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
++		return -EINVAL;
++	}
+ 
+ 	sctx->key_len = key_len;
+-	memcpy(sctx->key, in_key, key_len);
+-	return 0;
+-fail:
+-	*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+-	return -EINVAL;
++	if (!ret) {
++		memcpy(sctx->key, in_key, key_len);
++		return 0;
++	}
++
++	return setkey_fallback_cip(tfm, in_key, key_len);
+ }
+ 
+ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+ 	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ 
++	if (unlikely(need_fallback(sctx->key_len))) {
++		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
++		return;
++	}
++
+ 	switch (sctx->key_len) {
+ 	case 16:
+ 		crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
+@@ -98,6 +139,11 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+ 	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ 
++	if (unlikely(need_fallback(sctx->key_len))) {
++		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
++		return;
++	}
++
+ 	switch (sctx->key_len) {
+ 	case 16:
+ 		crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
+@@ -114,6 +160,29 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ 	}
+ }
+ 
++static int fallback_init_cip(struct crypto_tfm *tfm)
++{
++	const char *name = tfm->__crt_alg->cra_name;
++	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++
++	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
++			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
++
++	if (IS_ERR(sctx->fallback.cip)) {
++		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
++		return PTR_ERR(sctx->fallback.blk);
++	}
++
++	return 0;
++}
++
++static void fallback_exit_cip(struct crypto_tfm *tfm)
++{
++	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++
++	crypto_free_cipher(sctx->fallback.cip);
++	sctx->fallback.cip = NULL;
++}
+ 
+ static struct crypto_alg aes_alg = {
+ 	.cra_name		=	"aes",
+@@ -125,6 +194,8 @@ static struct crypto_alg aes_alg = {
+ 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
+ 	.cra_module		=	THIS_MODULE,
+ 	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
++	.cra_init               =       fallback_init_cip,
++	.cra_exit               =       fallback_exit_cip,
+ 	.cra_u			=	{
+ 		.cipher = {
+ 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
+@@ -136,10 +207,70 @@ static struct crypto_alg aes_alg = {
+ 	}
+ };
+ 
++static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
++		unsigned int len)
++{
++	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	unsigned int ret;
++
++	sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
++	sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
++			CRYPTO_TFM_REQ_MASK);
++
++	ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
++	if (ret) {
++		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
++		tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
++				CRYPTO_TFM_RES_MASK);
++	}
++	return ret;
++}
++
++static int fallback_blk_dec(struct blkcipher_desc *desc,
++		struct scatterlist *dst, struct scatterlist *src,
++		unsigned int nbytes)
++{
++	unsigned int ret;
++	struct crypto_blkcipher *tfm;
++	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
++
++	tfm = desc->tfm;
++	desc->tfm = sctx->fallback.blk;
++
++	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
++
++	desc->tfm = tfm;
++	return ret;
++}
++
++static int fallback_blk_enc(struct blkcipher_desc *desc,
++		struct scatterlist *dst, struct scatterlist *src,
++		unsigned int nbytes)
++{
++	unsigned int ret;
++	struct crypto_blkcipher *tfm;
++	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
++
++	tfm = desc->tfm;
++	desc->tfm = sctx->fallback.blk;
++
++	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
++
++	desc->tfm = tfm;
++	return ret;
++}
++
+ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ 			   unsigned int key_len)
+ {
+ 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	int ret;
++
++	ret = need_fallback(key_len);
++	if (ret > 0) {
++		sctx->key_len = key_len;
++		return setkey_fallback_blk(tfm, in_key, key_len);
++	}
+ 
+ 	switch (key_len) {
+ 	case 16:
+@@ -188,6 +319,9 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
+ 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ 	struct blkcipher_walk walk;
+ 
++	if (unlikely(need_fallback(sctx->key_len)))
++		return fallback_blk_enc(desc, dst, src, nbytes);
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
+ }
+@@ -199,10 +333,37 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
+ 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ 	struct blkcipher_walk walk;
+ 
++	if (unlikely(need_fallback(sctx->key_len)))
++		return fallback_blk_dec(desc, dst, src, nbytes);
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
+ }
+ 
++static int fallback_init_blk(struct crypto_tfm *tfm)
++{
++	const char *name = tfm->__crt_alg->cra_name;
++	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++
++	sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
++			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
++
++	if (IS_ERR(sctx->fallback.blk)) {
++		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
++		return PTR_ERR(sctx->fallback.blk);
++	}
++
++	return 0;
++}
++
++static void fallback_exit_blk(struct crypto_tfm *tfm)
++{
++	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++
++	crypto_free_blkcipher(sctx->fallback.blk);
++	sctx->fallback.blk = NULL;
++}
++
+ static struct crypto_alg ecb_aes_alg = {
+ 	.cra_name		=	"ecb(aes)",
+ 	.cra_driver_name	=	"ecb-aes-s390",
+@@ -214,6 +375,8 @@ static struct crypto_alg ecb_aes_alg = {
+ 	.cra_type		=	&crypto_blkcipher_type,
+ 	.cra_module		=	THIS_MODULE,
+ 	.cra_list		=	LIST_HEAD_INIT(ecb_aes_alg.cra_list),
++	.cra_init		=	fallback_init_blk,
++	.cra_exit		=	fallback_exit_blk,
+ 	.cra_u			=	{
+ 		.blkcipher = {
+ 			.min_keysize		=	AES_MIN_KEY_SIZE,
+@@ -229,6 +392,13 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ 			   unsigned int key_len)
+ {
+ 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
++	int ret;
++
++	ret = need_fallback(key_len);
++	if (ret > 0) {
++		sctx->key_len = key_len;
++		return setkey_fallback_blk(tfm, in_key, key_len);
++	}
+ 
+ 	switch (key_len) {
+ 	case 16:
+@@ -283,6 +453,9 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+ 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ 	struct blkcipher_walk walk;
+ 
++	if (unlikely(need_fallback(sctx->key_len)))
++		return fallback_blk_enc(desc, dst, src, nbytes);
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
+ }
+@@ -294,6 +467,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+ 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ 	struct blkcipher_walk walk;
+ 
++	if (unlikely(need_fallback(sctx->key_len)))
++		return fallback_blk_dec(desc, dst, src, nbytes);
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
+ }
+@@ -309,6 +485,8 @@ static struct crypto_alg cbc_aes_alg = {
+ 	.cra_type		=	&crypto_blkcipher_type,
+ 	.cra_module		=	THIS_MODULE,
+ 	.cra_list		=	LIST_HEAD_INIT(cbc_aes_alg.cra_list),
++	.cra_init		=	fallback_init_blk,
++	.cra_exit		=	fallback_exit_blk,
+ 	.cra_u			=	{
+ 		.blkcipher = {
+ 			.min_keysize		=	AES_MIN_KEY_SIZE,
+@@ -336,14 +514,10 @@ static int __init aes_init(void)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* z9 109 and z9 BC/EC only support 128 bit key length */
+-	if (keylen_flag == AES_KEYLEN_128) {
+-		aes_alg.cra_u.cipher.cia_max_keysize = AES_MIN_KEY_SIZE;
+-		ecb_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
+-		cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
++	if (keylen_flag == AES_KEYLEN_128)
+ 		printk(KERN_INFO
+-		       "aes_s390: hardware acceleration only available for"
++		       "aes_s390: hardware acceleration only available for "
+ 		       "128 bit keys\n");
+-	}
+ 
+ 	ret = crypto_register_alg(&aes_alg);
+ 	if (ret)
+@@ -382,4 +556,3 @@ MODULE_ALIAS("aes");
+ 
+ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
+ MODULE_LICENSE("GPL");
+-
+diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
+index 8eb3a1a..0cfefdd 100644
+--- a/arch/s390/crypto/prng.c
++++ b/arch/s390/crypto/prng.c
+@@ -90,7 +90,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
+ 	int ret = 0;
+ 	int tmp;
+ 
+-	/* nbytes can be arbitrary long, we spilt it into chunks */
++	/* nbytes can be arbitrary length, we split it into chunks */
+ 	while (nbytes) {
+ 		/* same as in extract_entropy_user in random.c */
+ 		if (need_resched()) {
+@@ -146,7 +146,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
+ 	return ret;
+ }
+ 
+-static struct file_operations prng_fops = {
++static const struct file_operations prng_fops = {
+ 	.owner		= THIS_MODULE,
+ 	.open		= &prng_open,
+ 	.release	= NULL,
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index 5245717..4b010ff 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -490,7 +490,7 @@ static struct super_operations hypfs_s_ops = {
+ 	.show_options	= hypfs_show_options,
+ };
+ 
+-static decl_subsys(s390, NULL, NULL);
++static struct kobject *s390_kobj;
+ 
+ static int __init hypfs_init(void)
+ {
+@@ -506,17 +506,18 @@ static int __init hypfs_init(void)
+ 			goto fail_diag;
+ 		}
+ 	}
+-	kobj_set_kset_s(&s390_subsys, hypervisor_subsys);
+-	rc = subsystem_register(&s390_subsys);
+-	if (rc)
++	s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
++	if (!s390_kobj) {
++		rc = -ENOMEM;;
+ 		goto fail_sysfs;
++	}
+ 	rc = register_filesystem(&hypfs_type);
+ 	if (rc)
+ 		goto fail_filesystem;
+ 	return 0;
+ 
+ fail_filesystem:
+-	subsystem_unregister(&s390_subsys);
++	kobject_put(s390_kobj);
+ fail_sysfs:
+ 	if (!MACHINE_IS_VM)
+ 		hypfs_diag_exit();
+@@ -530,7 +531,7 @@ static void __exit hypfs_exit(void)
+ 	if (!MACHINE_IS_VM)
+ 		hypfs_diag_exit();
+ 	unregister_filesystem(&hypfs_type);
+-	subsystem_unregister(&s390_subsys);
++	kobject_put(s390_kobj);
+ }
+ 
+ module_init(hypfs_init)
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index 56cb710..b3b650a 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -31,7 +31,3 @@ S390_KEXEC_OBJS := machine_kexec.o crash.o
+ S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
+ obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
+ 
+-#
+-# This is just to get the dependencies...
+-#
+-binfmt_elf32.o:	$(TOPDIR)/fs/binfmt_elf.c
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 1b3af7d..9f7b73b 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -276,7 +276,7 @@ void __init startup_init(void)
+ 	create_kernel_nss();
+ 	sort_main_extable();
+ 	setup_lowcore_early();
+-	sclp_readinfo_early();
++	sclp_read_info_early();
+ 	sclp_facilities_detect();
+ 	memsize = sclp_memory_detect();
+ #ifndef CONFIG_64BIT
+diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
+index a87b197..79dccd2 100644
+--- a/arch/s390/kernel/head64.S
++++ b/arch/s390/kernel/head64.S
+@@ -157,7 +157,7 @@ startup_continue:
+ 	.long	0xb2b10000		# store facility list
+ 	tm	0xc8,0x08		# check bit for clearing-by-ASCE
+ 	bno	0f-.LPG1(%r13)
+-	lhi	%r1,2094
++	lhi	%r1,2048
+ 	lhi	%r2,0
+ 	.long	0xb98e2001
+ 	oi	7(%r12),0x80		# set IDTE flag
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index ce0856d..db28cca 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -2,7 +2,7 @@
+  *  arch/s390/kernel/ipl.c
+  *    ipl/reipl/dump support for Linux on s390.
+  *
+- *    Copyright (C) IBM Corp. 2005,2006
++ *    Copyright IBM Corp. 2005,2007
+  *    Author(s): Michael Holzheu <holzheu at de.ibm.com>
+  *		 Heiko Carstens <heiko.carstens at de.ibm.com>
+  *		 Volker Sameske <sameske at de.ibm.com>
+@@ -31,6 +31,43 @@
+ #define IPL_FCP_DUMP_STR	"fcp_dump"
+ #define IPL_NSS_STR		"nss"
+ 
++#define DUMP_CCW_STR		"ccw"
++#define DUMP_FCP_STR		"fcp"
++#define DUMP_NONE_STR		"none"
++
++/*
++ * Four shutdown trigger types are supported:
++ * - panic
++ * - halt
++ * - power off
++ * - reipl
++ */
++#define ON_PANIC_STR		"on_panic"
++#define ON_HALT_STR		"on_halt"
++#define ON_POFF_STR		"on_poff"
++#define ON_REIPL_STR		"on_reboot"
++
++struct shutdown_action;
++struct shutdown_trigger {
++	char *name;
++	struct shutdown_action *action;
++};
++
++/*
++ * Five shutdown action types are supported:
++ */
++#define SHUTDOWN_ACTION_IPL_STR		"ipl"
++#define SHUTDOWN_ACTION_REIPL_STR	"reipl"
++#define SHUTDOWN_ACTION_DUMP_STR	"dump"
++#define SHUTDOWN_ACTION_VMCMD_STR	"vmcmd"
++#define SHUTDOWN_ACTION_STOP_STR	"stop"
++
++struct shutdown_action {
++	char *name;
++	void (*fn) (struct shutdown_trigger *trigger);
++	int (*init) (void);
++};
++
+ static char *ipl_type_str(enum ipl_type type)
+ {
+ 	switch (type) {
+@@ -54,10 +91,6 @@ enum dump_type {
+ 	DUMP_TYPE_FCP	= 4,
+ };
+ 
+-#define DUMP_NONE_STR	 "none"
+-#define DUMP_CCW_STR	 "ccw"
+-#define DUMP_FCP_STR	 "fcp"
+-
+ static char *dump_type_str(enum dump_type type)
+ {
+ 	switch (type) {
+@@ -99,30 +132,6 @@ enum dump_method {
+ 	DUMP_METHOD_FCP_DIAG,
+ };
+ 
+-enum shutdown_action {
+-	SHUTDOWN_REIPL,
+-	SHUTDOWN_DUMP,
+-	SHUTDOWN_STOP,
+-};
+-
+-#define SHUTDOWN_REIPL_STR "reipl"
+-#define SHUTDOWN_DUMP_STR  "dump"
+-#define SHUTDOWN_STOP_STR  "stop"
+-
+-static char *shutdown_action_str(enum shutdown_action action)
+-{
+-	switch (action) {
+-	case SHUTDOWN_REIPL:
+-		return SHUTDOWN_REIPL_STR;
+-	case SHUTDOWN_DUMP:
+-		return SHUTDOWN_DUMP_STR;
+-	case SHUTDOWN_STOP:
+-		return SHUTDOWN_STOP_STR;
+-	default:
+-		return NULL;
+-	}
+-}
+-
+ static int diag308_set_works = 0;
+ 
+ static int reipl_capabilities = IPL_TYPE_UNKNOWN;
+@@ -140,8 +149,6 @@ static enum dump_method dump_method = DUMP_METHOD_NONE;
+ static struct ipl_parameter_block *dump_block_fcp;
+ static struct ipl_parameter_block *dump_block_ccw;
+ 
+-static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
+-
+ static struct sclp_ipl_info sclp_ipl_info;
+ 
+ int diag308(unsigned long subcode, void *addr)
+@@ -162,22 +169,25 @@ EXPORT_SYMBOL_GPL(diag308);
+ /* SYSFS */
+ 
+ #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value)		\
+-static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset,	\
++static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj,	\
++		struct kobj_attribute *attr,				\
+ 		char *page)						\
+ {									\
+ 	return sprintf(page, _format, _value);				\
+ }									\
+-static struct subsys_attribute sys_##_prefix##_##_name##_attr =		\
++static struct kobj_attribute sys_##_prefix##_##_name##_attr =		\
+ 	__ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
+ 
+ #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)	\
+-static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset,	\
++static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj,	\
++		struct kobj_attribute *attr,				\
+ 		char *page)						\
+ {									\
+ 	return sprintf(page, _fmt_out,					\
+ 			(unsigned long long) _value);			\
+ }									\
+-static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset,	\
++static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj,	\
++		struct kobj_attribute *attr,				\
+ 		const char *buf, size_t len)				\
+ {									\
+ 	unsigned long long value;					\
+@@ -186,25 +196,27 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset,	\
+ 	_value = value;							\
+ 	return len;							\
+ }									\
+-static struct subsys_attribute sys_##_prefix##_##_name##_attr =		\
++static struct kobj_attribute sys_##_prefix##_##_name##_attr =		\
+ 	__ATTR(_name,(S_IRUGO | S_IWUSR),				\
+ 			sys_##_prefix##_##_name##_show,			\
+ 			sys_##_prefix##_##_name##_store);
+ 
+ #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
+-static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset,	\
++static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj,	\
++		struct kobj_attribute *attr,				\
+ 		char *page)						\
+ {									\
+ 	return sprintf(page, _fmt_out, _value);				\
+ }									\
+-static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset,	\
++static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj,	\
++		struct kobj_attribute *attr,				\
+ 		const char *buf, size_t len)				\
+ {									\
+-	if (sscanf(buf, _fmt_in, _value) != 1)				\
+-		return -EINVAL;						\
++	strncpy(_value, buf, sizeof(_value) - 1);			\
++	strstrip(_value);						\
+ 	return len;							\
+ }									\
+-static struct subsys_attribute sys_##_prefix##_##_name##_attr =		\
++static struct kobj_attribute sys_##_prefix##_##_name##_attr =		\
+ 	__ATTR(_name,(S_IRUGO | S_IWUSR),				\
+ 			sys_##_prefix##_##_name##_show,			\
+ 			sys_##_prefix##_##_name##_store);
+@@ -240,44 +252,19 @@ static __init enum ipl_type get_ipl_type(void)
+ 	return IPL_TYPE_FCP;
+ }
+ 
+-void __init setup_ipl_info(void)
+-{
+-	ipl_info.type = get_ipl_type();
+-	switch (ipl_info.type) {
+-	case IPL_TYPE_CCW:
+-		ipl_info.data.ccw.dev_id.devno = ipl_devno;
+-		ipl_info.data.ccw.dev_id.ssid = 0;
+-		break;
+-	case IPL_TYPE_FCP:
+-	case IPL_TYPE_FCP_DUMP:
+-		ipl_info.data.fcp.dev_id.devno =
+-			IPL_PARMBLOCK_START->ipl_info.fcp.devno;
+-		ipl_info.data.fcp.dev_id.ssid = 0;
+-		ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
+-		ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
+-		break;
+-	case IPL_TYPE_NSS:
+-		strncpy(ipl_info.data.nss.name, kernel_nss_name,
+-			sizeof(ipl_info.data.nss.name));
+-		break;
+-	case IPL_TYPE_UNKNOWN:
+-	default:
+-		/* We have no info to copy */
+-		break;
+-	}
+-}
+-
+ struct ipl_info ipl_info;
+ EXPORT_SYMBOL_GPL(ipl_info);
+ 
+-static ssize_t ipl_type_show(struct kset *kset, char *page)
++static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
++			     char *page)
+ {
+ 	return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
+ }
+ 
+-static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
++static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
+ 
+-static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
++static ssize_t sys_ipl_device_show(struct kobject *kobj,
++				   struct kobj_attribute *attr, char *page)
+ {
+ 	struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
+ 
+@@ -292,7 +279,7 @@ static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
+ 	}
+ }
+ 
+-static struct subsys_attribute sys_ipl_device_attr =
++static struct kobj_attribute sys_ipl_device_attr =
+ 	__ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
+ 
+ static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
+@@ -367,7 +354,8 @@ static struct attribute_group ipl_fcp_attr_group = {
+ 
+ /* CCW ipl device attributes */
+ 
+-static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
++static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
++				     struct kobj_attribute *attr, char *page)
+ {
+ 	char loadparm[LOADPARM_LEN + 1] = {};
+ 
+@@ -379,7 +367,7 @@ static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
+ 	return sprintf(page, "%s\n", loadparm);
+ }
+ 
+-static struct subsys_attribute sys_ipl_ccw_loadparm_attr =
++static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
+ 	__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
+ 
+ static struct attribute *ipl_ccw_attrs[] = {
+@@ -418,10 +406,76 @@ static struct attribute_group ipl_unknown_attr_group = {
+ 	.attrs = ipl_unknown_attrs,
+ };
+ 
+-static decl_subsys(ipl, NULL, NULL);
++static struct kset *ipl_kset;
++
++static int __init ipl_register_fcp_files(void)
++{
++	int rc;
++
++	rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
++	if (rc)
++		goto out;
++	rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
++	if (rc)
++		goto out_ipl_parm;
++	rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
++	if (!rc)
++		goto out;
++
++	sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
++
++out_ipl_parm:
++	sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
++out:
++	return rc;
++}
++
++static void ipl_run(struct shutdown_trigger *trigger)
++{
++	diag308(DIAG308_IPL, NULL);
++	if (MACHINE_IS_VM)
++		__cpcmd("IPL", NULL, 0, NULL);
++	else if (ipl_info.type == IPL_TYPE_CCW)
++		reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
++}
++
++static int ipl_init(void)
++{
++	int rc;
++
++	ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
++	if (!ipl_kset) {
++		rc = -ENOMEM;
++		goto out;
++	}
++	switch (ipl_info.type) {
++	case IPL_TYPE_CCW:
++		rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group);
++		break;
++	case IPL_TYPE_FCP:
++	case IPL_TYPE_FCP_DUMP:
++		rc = ipl_register_fcp_files();
++		break;
++	case IPL_TYPE_NSS:
++		rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
++		break;
++	default:
++		rc = sysfs_create_group(&ipl_kset->kobj,
++					&ipl_unknown_attr_group);
++		break;
++	}
++out:
++	if (rc)
++		panic("ipl_init failed: rc = %i\n", rc);
++
++	return 0;
++}
++
++static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run,
++					    ipl_init};
+ 
+ /*
+- * reipl section
++ * reipl shutdown action: Reboot Linux on shutdown.
+  */
+ 
+ /* FCP reipl device attributes */
+@@ -465,7 +519,8 @@ static void reipl_get_ascii_loadparm(char *loadparm)
+ 	strstrip(loadparm);
+ }
+ 
+-static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
++static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
++				       struct kobj_attribute *attr, char *page)
+ {
+ 	char buf[LOADPARM_LEN + 1];
+ 
+@@ -473,7 +528,8 @@ static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
+ 	return sprintf(page, "%s\n", buf);
+ }
+ 
+-static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
++static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
++					struct kobj_attribute *attr,
+ 					const char *buf, size_t len)
+ {
+ 	int i, lp_len;
+@@ -500,7 +556,7 @@ static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
+ 	return len;
+ }
+ 
+-static struct subsys_attribute sys_reipl_ccw_loadparm_attr =
++static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
+ 	__ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
+ 	       reipl_ccw_loadparm_store);
+ 
+@@ -539,7 +595,9 @@ static int reipl_set_type(enum ipl_type type)
+ 
+ 	switch(type) {
+ 	case IPL_TYPE_CCW:
+-		if (MACHINE_IS_VM)
++		if (diag308_set_works)
++			reipl_method = REIPL_METHOD_CCW_DIAG;
++		else if (MACHINE_IS_VM)
+ 			reipl_method = REIPL_METHOD_CCW_VM;
+ 		else
+ 			reipl_method = REIPL_METHOD_CCW_CIO;
+@@ -568,13 +626,15 @@ static int reipl_set_type(enum ipl_type type)
+ 	return 0;
+ }
+ 
+-static ssize_t reipl_type_show(struct kset *kset, char *page)
++static ssize_t reipl_type_show(struct kobject *kobj,
++			       struct kobj_attribute *attr, char *page)
+ {
+ 	return sprintf(page, "%s\n", ipl_type_str(reipl_type));
+ }
+ 
+-static ssize_t reipl_type_store(struct kset *kset, const char *buf,
+-				size_t len)
++static ssize_t reipl_type_store(struct kobject *kobj,
++				struct kobj_attribute *attr,
++				const char *buf, size_t len)
+ {
+ 	int rc = -EINVAL;
+ 
+@@ -587,140 +647,12 @@ static ssize_t reipl_type_store(struct kset *kset, const char *buf,
+ 	return (rc != 0) ? rc : len;
+ }
+ 
+-static struct subsys_attribute reipl_type_attr =
+-		__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
+-
+-static decl_subsys(reipl, NULL, NULL);
+-
+-/*
+- * dump section
+- */
+-
+-/* FCP dump device attributes */
+-
+-DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+-		   dump_block_fcp->ipl_info.fcp.wwpn);
+-DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
+-		   dump_block_fcp->ipl_info.fcp.lun);
+-DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
+-		   dump_block_fcp->ipl_info.fcp.bootprog);
+-DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
+-		   dump_block_fcp->ipl_info.fcp.br_lba);
+-DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
+-		   dump_block_fcp->ipl_info.fcp.devno);
+-
+-static struct attribute *dump_fcp_attrs[] = {
+-	&sys_dump_fcp_device_attr.attr,
+-	&sys_dump_fcp_wwpn_attr.attr,
+-	&sys_dump_fcp_lun_attr.attr,
+-	&sys_dump_fcp_bootprog_attr.attr,
+-	&sys_dump_fcp_br_lba_attr.attr,
+-	NULL,
+-};
+-
+-static struct attribute_group dump_fcp_attr_group = {
+-	.name  = IPL_FCP_STR,
+-	.attrs = dump_fcp_attrs,
+-};
+-
+-/* CCW dump device attributes */
+-
+-DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+-		   dump_block_ccw->ipl_info.ccw.devno);
+-
+-static struct attribute *dump_ccw_attrs[] = {
+-	&sys_dump_ccw_device_attr.attr,
+-	NULL,
+-};
+-
+-static struct attribute_group dump_ccw_attr_group = {
+-	.name  = IPL_CCW_STR,
+-	.attrs = dump_ccw_attrs,
+-};
+-
+-/* dump type */
+-
+-static int dump_set_type(enum dump_type type)
+-{
+-	if (!(dump_capabilities & type))
+-		return -EINVAL;
+-	switch(type) {
+-	case DUMP_TYPE_CCW:
+-		if (MACHINE_IS_VM)
+-			dump_method = DUMP_METHOD_CCW_VM;
+-		else if (diag308_set_works)
+-			dump_method = DUMP_METHOD_CCW_DIAG;
+-		else
+-			dump_method = DUMP_METHOD_CCW_CIO;
+-		break;
+-	case DUMP_TYPE_FCP:
+-		dump_method = DUMP_METHOD_FCP_DIAG;
+-		break;
+-	default:
+-		dump_method = DUMP_METHOD_NONE;
+-	}
+-	dump_type = type;
+-	return 0;
+-}
+-
+-static ssize_t dump_type_show(struct kset *kset, char *page)
+-{
+-	return sprintf(page, "%s\n", dump_type_str(dump_type));
+-}
+-
+-static ssize_t dump_type_store(struct kset *kset, const char *buf,
+-			       size_t len)
+-{
+-	int rc = -EINVAL;
+-
+-	if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
+-		rc = dump_set_type(DUMP_TYPE_NONE);
+-	else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
+-		rc = dump_set_type(DUMP_TYPE_CCW);
+-	else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
+-		rc = dump_set_type(DUMP_TYPE_FCP);
+-	return (rc != 0) ? rc : len;
+-}
+-
+-static struct subsys_attribute dump_type_attr =
+-		__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
+-
+-static decl_subsys(dump, NULL, NULL);
+-
+-/*
+- * Shutdown actions section
+- */
+-
+-static decl_subsys(shutdown_actions, NULL, NULL);
+-
+-/* on panic */
+-
+-static ssize_t on_panic_show(struct kset *kset, char *page)
+-{
+-	return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
+-}
+-
+-static ssize_t on_panic_store(struct kset *kset, const char *buf,
+-			      size_t len)
+-{
+-	if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
+-		on_panic_action = SHUTDOWN_REIPL;
+-	else if (strncmp(buf, SHUTDOWN_DUMP_STR,
+-			 strlen(SHUTDOWN_DUMP_STR)) == 0)
+-		on_panic_action = SHUTDOWN_DUMP;
+-	else if (strncmp(buf, SHUTDOWN_STOP_STR,
+-			 strlen(SHUTDOWN_STOP_STR)) == 0)
+-		on_panic_action = SHUTDOWN_STOP;
+-	else
+-		return -EINVAL;
+-
+-	return len;
+-}
++static struct kobj_attribute reipl_type_attr =
++	__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
+ 
+-static struct subsys_attribute on_panic_attr =
+-		__ATTR(on_panic, 0644, on_panic_show, on_panic_store);
++static struct kset *reipl_kset;
+ 
+-void do_reipl(void)
++void reipl_run(struct shutdown_trigger *trigger)
+ {
+ 	struct ccw_dev_id devid;
+ 	static char buf[100];
+@@ -729,8 +661,6 @@ void do_reipl(void)
+ 	switch (reipl_method) {
+ 	case REIPL_METHOD_CCW_CIO:
+ 		devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
+-		if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
+-			diag308(DIAG308_IPL, NULL);
+ 		devid.ssid  = 0;
+ 		reipl_ccw_dev(&devid);
+ 		break;
+@@ -771,98 +701,6 @@ void do_reipl(void)
+ 	default:
+ 		break;
+ 	}
+-	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-}
+-
+-static void do_dump(void)
+-{
+-	struct ccw_dev_id devid;
+-	static char buf[100];
+-
+-	switch (dump_method) {
+-	case DUMP_METHOD_CCW_CIO:
+-		smp_send_stop();
+-		devid.devno = dump_block_ccw->ipl_info.ccw.devno;
+-		devid.ssid  = 0;
+-		reipl_ccw_dev(&devid);
+-		break;
+-	case DUMP_METHOD_CCW_VM:
+-		smp_send_stop();
+-		sprintf(buf, "STORE STATUS");
+-		__cpcmd(buf, NULL, 0, NULL);
+-		sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
+-		__cpcmd(buf, NULL, 0, NULL);
+-		break;
+-	case DUMP_METHOD_CCW_DIAG:
+-		diag308(DIAG308_SET, dump_block_ccw);
+-		diag308(DIAG308_DUMP, NULL);
+-		break;
+-	case DUMP_METHOD_FCP_DIAG:
+-		diag308(DIAG308_SET, dump_block_fcp);
+-		diag308(DIAG308_DUMP, NULL);
+-		break;
+-	case DUMP_METHOD_NONE:
+-	default:
+-		return;
+-	}
+-	printk(KERN_EMERG "Dump failed!\n");
+-}
+-
+-/* init functions */
+-
+-static int __init ipl_register_fcp_files(void)
+-{
+-	int rc;
+-
+-	rc = sysfs_create_group(&ipl_subsys.kobj,
+-				&ipl_fcp_attr_group);
+-	if (rc)
+-		goto out;
+-	rc = sysfs_create_bin_file(&ipl_subsys.kobj,
+-				   &ipl_parameter_attr);
+-	if (rc)
+-		goto out_ipl_parm;
+-	rc = sysfs_create_bin_file(&ipl_subsys.kobj,
+-				   &ipl_scp_data_attr);
+-	if (!rc)
+-		goto out;
+-
+-	sysfs_remove_bin_file(&ipl_subsys.kobj, &ipl_parameter_attr);
+-
+-out_ipl_parm:
+-	sysfs_remove_group(&ipl_subsys.kobj, &ipl_fcp_attr_group);
+-out:
+-	return rc;
+-}
+-
+-static int __init ipl_init(void)
+-{
+-	int rc;
+-
+-	rc = firmware_register(&ipl_subsys);
+-	if (rc)
+-		return rc;
+-	switch (ipl_info.type) {
+-	case IPL_TYPE_CCW:
+-		rc = sysfs_create_group(&ipl_subsys.kobj,
+-					&ipl_ccw_attr_group);
+-		break;
+-	case IPL_TYPE_FCP:
+-	case IPL_TYPE_FCP_DUMP:
+-		rc = ipl_register_fcp_files();
+-		break;
+-	case IPL_TYPE_NSS:
+-		rc = sysfs_create_group(&ipl_subsys.kobj,
+-					&ipl_nss_attr_group);
+-		break;
+-	default:
+-		rc = sysfs_create_group(&ipl_subsys.kobj,
+-					&ipl_unknown_attr_group);
+-		break;
+-	}
+-	if (rc)
+-		firmware_unregister(&ipl_subsys);
+-	return rc;
+ }
+ 
+ static void __init reipl_probe(void)
+@@ -883,7 +721,7 @@ static int __init reipl_nss_init(void)
+ 
+ 	if (!MACHINE_IS_VM)
+ 		return 0;
+-	rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_nss_attr_group);
++	rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
+ 	if (rc)
+ 		return rc;
+ 	strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
+@@ -898,7 +736,7 @@ static int __init reipl_ccw_init(void)
+ 	reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
+ 	if (!reipl_block_ccw)
+ 		return -ENOMEM;
+-	rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_ccw_attr_group);
++	rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group);
+ 	if (rc) {
+ 		free_page((unsigned long)reipl_block_ccw);
+ 		return rc;
+@@ -907,6 +745,7 @@ static int __init reipl_ccw_init(void)
+ 	reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
+ 	reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
+ 	reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
++	reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID;
+ 	/* check if read scp info worked and set loadparm */
+ 	if (sclp_ipl_info.is_valid)
+ 		memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
+@@ -915,8 +754,7 @@ static int __init reipl_ccw_init(void)
+ 		/* read scp info failed: set empty loadparm (EBCDIC blanks) */
+ 		memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
+ 		       LOADPARM_LEN);
+-	/* FIXME: check for diag308_set_works when enabling diag ccw reipl */
+-	if (!MACHINE_IS_VM)
++	if (!MACHINE_IS_VM && !diag308_set_works)
+ 		sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
+ 	if (ipl_info.type == IPL_TYPE_CCW)
+ 		reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
+@@ -936,7 +774,7 @@ static int __init reipl_fcp_init(void)
+ 	reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
+ 	if (!reipl_block_fcp)
+ 		return -ENOMEM;
+-	rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_fcp_attr_group);
++	rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group);
+ 	if (rc) {
+ 		free_page((unsigned long)reipl_block_fcp);
+ 		return rc;
+@@ -954,16 +792,16 @@ static int __init reipl_fcp_init(void)
+ 	return 0;
+ }
+ 
+-static int __init reipl_init(void)
++static int reipl_init(void)
+ {
+ 	int rc;
+ 
+-	rc = firmware_register(&reipl_subsys);
+-	if (rc)
+-		return rc;
+-	rc = subsys_create_file(&reipl_subsys, &reipl_type_attr);
++	reipl_kset = kset_create_and_add("reipl", NULL, firmware_kobj);
++	if (!reipl_kset)
++		return -ENOMEM;
++	rc = sysfs_create_file(&reipl_kset->kobj, &reipl_type_attr.attr);
+ 	if (rc) {
+-		firmware_unregister(&reipl_subsys);
++		kset_unregister(reipl_kset);
+ 		return rc;
+ 	}
+ 	rc = reipl_ccw_init();
+@@ -981,6 +819,140 @@ static int __init reipl_init(void)
+ 	return 0;
+ }
+ 
++static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR,
++					      reipl_run, reipl_init};
++
++/*
++ * dump shutdown action: Dump Linux on shutdown.
++ */
++
++/* FCP dump device attributes */
++
++DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
++		   dump_block_fcp->ipl_info.fcp.wwpn);
++DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
++		   dump_block_fcp->ipl_info.fcp.lun);
++DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
++		   dump_block_fcp->ipl_info.fcp.bootprog);
++DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
++		   dump_block_fcp->ipl_info.fcp.br_lba);
++DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
++		   dump_block_fcp->ipl_info.fcp.devno);
++
++static struct attribute *dump_fcp_attrs[] = {
++	&sys_dump_fcp_device_attr.attr,
++	&sys_dump_fcp_wwpn_attr.attr,
++	&sys_dump_fcp_lun_attr.attr,
++	&sys_dump_fcp_bootprog_attr.attr,
++	&sys_dump_fcp_br_lba_attr.attr,
++	NULL,
++};
++
++static struct attribute_group dump_fcp_attr_group = {
++	.name  = IPL_FCP_STR,
++	.attrs = dump_fcp_attrs,
++};
++
++/* CCW dump device attributes */
++
++DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
++		   dump_block_ccw->ipl_info.ccw.devno);
++
++static struct attribute *dump_ccw_attrs[] = {
++	&sys_dump_ccw_device_attr.attr,
++	NULL,
++};
++
++static struct attribute_group dump_ccw_attr_group = {
++	.name  = IPL_CCW_STR,
++	.attrs = dump_ccw_attrs,
++};
++
++/* dump type */
++
++static int dump_set_type(enum dump_type type)
++{
++	if (!(dump_capabilities & type))
++		return -EINVAL;
++	switch (type) {
++	case DUMP_TYPE_CCW:
++		if (diag308_set_works)
++			dump_method = DUMP_METHOD_CCW_DIAG;
++		else if (MACHINE_IS_VM)
++			dump_method = DUMP_METHOD_CCW_VM;
++		else
++			dump_method = DUMP_METHOD_CCW_CIO;
++		break;
++	case DUMP_TYPE_FCP:
++		dump_method = DUMP_METHOD_FCP_DIAG;
++		break;
++	default:
++		dump_method = DUMP_METHOD_NONE;
++	}
++	dump_type = type;
++	return 0;
++}
++
++static ssize_t dump_type_show(struct kobject *kobj,
++			      struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", dump_type_str(dump_type));
++}
++
++static ssize_t dump_type_store(struct kobject *kobj,
++			       struct kobj_attribute *attr,
++			       const char *buf, size_t len)
++{
++	int rc = -EINVAL;
++
++	if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
++		rc = dump_set_type(DUMP_TYPE_NONE);
++	else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
++		rc = dump_set_type(DUMP_TYPE_CCW);
++	else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
++		rc = dump_set_type(DUMP_TYPE_FCP);
++	return (rc != 0) ? rc : len;
++}
++
++static struct kobj_attribute dump_type_attr =
++	__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
++
++static struct kset *dump_kset;
++
++static void dump_run(struct shutdown_trigger *trigger)
++{
++	struct ccw_dev_id devid;
++	static char buf[100];
++
++	switch (dump_method) {
++	case DUMP_METHOD_CCW_CIO:
++		smp_send_stop();
++		devid.devno = dump_block_ccw->ipl_info.ccw.devno;
++		devid.ssid  = 0;
++		reipl_ccw_dev(&devid);
++		break;
++	case DUMP_METHOD_CCW_VM:
++		smp_send_stop();
++		sprintf(buf, "STORE STATUS");
++		__cpcmd(buf, NULL, 0, NULL);
++		sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
++		__cpcmd(buf, NULL, 0, NULL);
++		break;
++	case DUMP_METHOD_CCW_DIAG:
++		diag308(DIAG308_SET, dump_block_ccw);
++		diag308(DIAG308_DUMP, NULL);
++		break;
++	case DUMP_METHOD_FCP_DIAG:
++		diag308(DIAG308_SET, dump_block_fcp);
++		diag308(DIAG308_DUMP, NULL);
++		break;
++	case DUMP_METHOD_NONE:
++	default:
++		return;
++	}
++	printk(KERN_EMERG "Dump failed!\n");
++}
++
+ static int __init dump_ccw_init(void)
+ {
+ 	int rc;
+@@ -988,7 +960,7 @@ static int __init dump_ccw_init(void)
+ 	dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
+ 	if (!dump_block_ccw)
+ 		return -ENOMEM;
+-	rc = sysfs_create_group(&dump_subsys.kobj, &dump_ccw_attr_group);
++	rc = sysfs_create_group(&dump_kset->kobj, &dump_ccw_attr_group);
+ 	if (rc) {
+ 		free_page((unsigned long)dump_block_ccw);
+ 		return rc;
+@@ -1012,7 +984,7 @@ static int __init dump_fcp_init(void)
+ 	dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
+ 	if (!dump_block_fcp)
+ 		return -ENOMEM;
+-	rc = sysfs_create_group(&dump_subsys.kobj, &dump_fcp_attr_group);
++	rc = sysfs_create_group(&dump_kset->kobj, &dump_fcp_attr_group);
+ 	if (rc) {
+ 		free_page((unsigned long)dump_block_fcp);
+ 		return rc;
+@@ -1026,33 +998,16 @@ static int __init dump_fcp_init(void)
+ 	return 0;
+ }
+ 
+-#define SHUTDOWN_ON_PANIC_PRIO 0
+-
+-static int shutdown_on_panic_notify(struct notifier_block *self,
+-				    unsigned long event, void *data)
+-{
+-	if (on_panic_action == SHUTDOWN_DUMP)
+-		do_dump();
+-	else if (on_panic_action == SHUTDOWN_REIPL)
+-		do_reipl();
+-	return NOTIFY_OK;
+-}
+-
+-static struct notifier_block shutdown_on_panic_nb = {
+-	.notifier_call = shutdown_on_panic_notify,
+-	.priority = SHUTDOWN_ON_PANIC_PRIO
+-};
+-
+-static int __init dump_init(void)
++static int dump_init(void)
+ {
+ 	int rc;
+ 
+-	rc = firmware_register(&dump_subsys);
+-	if (rc)
+-		return rc;
+-	rc = subsys_create_file(&dump_subsys, &dump_type_attr);
++	dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
++	if (!dump_kset)
++		return -ENOMEM;
++	rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
+ 	if (rc) {
+-		firmware_unregister(&dump_subsys);
++		kset_unregister(dump_kset);
+ 		return rc;
+ 	}
+ 	rc = dump_ccw_init();
+@@ -1065,46 +1020,381 @@ static int __init dump_init(void)
+ 	return 0;
+ }
+ 
+-static int __init shutdown_actions_init(void)
++static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR,
++					     dump_run, dump_init};
++
++/*
++ * vmcmd shutdown action: Trigger vm command on shutdown.
++ */
++
++static char vmcmd_on_reboot[128];
++static char vmcmd_on_panic[128];
++static char vmcmd_on_halt[128];
++static char vmcmd_on_poff[128];
++
++DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
++DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
++DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
++DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
++
++static struct attribute *vmcmd_attrs[] = {
++	&sys_vmcmd_on_reboot_attr.attr,
++	&sys_vmcmd_on_panic_attr.attr,
++	&sys_vmcmd_on_halt_attr.attr,
++	&sys_vmcmd_on_poff_attr.attr,
++	NULL,
++};
++
++static struct attribute_group vmcmd_attr_group = {
++	.attrs = vmcmd_attrs,
++};
++
++static struct kset *vmcmd_kset;
++
++static void vmcmd_run(struct shutdown_trigger *trigger)
++{
++	char *cmd, *next_cmd;
++
++	if (strcmp(trigger->name, ON_REIPL_STR) == 0)
++		cmd = vmcmd_on_reboot;
++	else if (strcmp(trigger->name, ON_PANIC_STR) == 0)
++		cmd = vmcmd_on_panic;
++	else if (strcmp(trigger->name, ON_HALT_STR) == 0)
++		cmd = vmcmd_on_halt;
++	else if (strcmp(trigger->name, ON_POFF_STR) == 0)
++		cmd = vmcmd_on_poff;
++	else
++		return;
++
++	if (strlen(cmd) == 0)
++		return;
++	do {
++		next_cmd = strchr(cmd, '\n');
++		if (next_cmd) {
++			next_cmd[0] = 0;
++			next_cmd += 1;
++		}
++		__cpcmd(cmd, NULL, 0, NULL);
++		cmd = next_cmd;
++	} while (cmd != NULL);
++}
++
++static int vmcmd_init(void)
+ {
+-	int rc;
++	if (!MACHINE_IS_VM)
++		return -ENOTSUPP;
++	vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
++	if (!vmcmd_kset)
++		return -ENOMEM;
++	return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group);
++}
+ 
+-	rc = firmware_register(&shutdown_actions_subsys);
+-	if (rc)
+-		return rc;
+-	rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr);
+-	if (rc) {
+-		firmware_unregister(&shutdown_actions_subsys);
+-		return rc;
++static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
++					      vmcmd_run, vmcmd_init};
++
++/*
++ * stop shutdown action: Stop Linux on shutdown.
++ */
++
++static void stop_run(struct shutdown_trigger *trigger)
++{
++	if (strcmp(trigger->name, ON_PANIC_STR) == 0)
++		disabled_wait((unsigned long) __builtin_return_address(0));
++	else {
++		signal_processor(smp_processor_id(), sigp_stop);
++		for (;;);
+ 	}
+-	atomic_notifier_chain_register(&panic_notifier_list,
+-				       &shutdown_on_panic_nb);
+-	return 0;
+ }
+ 
+-static int __init s390_ipl_init(void)
++static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
++					     stop_run, NULL};
++
++/* action list */
++
++static struct shutdown_action *shutdown_actions_list[] = {
++	&ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action};
++#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
++
++/*
++ * Trigger section
++ */
++
++static struct kset *shutdown_actions_kset;
++
++static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
++		       size_t len)
+ {
+-	int rc;
++	int i;
++	for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
++		if (!shutdown_actions_list[i])
++			continue;
++		if (strncmp(buf, shutdown_actions_list[i]->name,
++			    strlen(shutdown_actions_list[i]->name)) == 0) {
++			trigger->action = shutdown_actions_list[i];
++			return len;
++		}
++	}
++	return -EINVAL;
++}
+ 
+-	sclp_get_ipl_info(&sclp_ipl_info);
++/* on reipl */
++
++static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
++						    &reipl_action};
++
++static ssize_t on_reboot_show(struct kobject *kobj,
++			      struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", on_reboot_trigger.action->name);
++}
++
++static ssize_t on_reboot_store(struct kobject *kobj,
++			       struct kobj_attribute *attr,
++			       const char *buf, size_t len)
++{
++	return set_trigger(buf, &on_reboot_trigger, len);
++}
++
++static struct kobj_attribute on_reboot_attr =
++	__ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
++
++static void do_machine_restart(char *__unused)
++{
++	smp_send_stop();
++	on_reboot_trigger.action->fn(&on_reboot_trigger);
++	reipl_run(NULL);
++}
++void (*_machine_restart)(char *command) = do_machine_restart;
++
++/* on panic */
++
++static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
++
++static ssize_t on_panic_show(struct kobject *kobj,
++			     struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", on_panic_trigger.action->name);
++}
++
++static ssize_t on_panic_store(struct kobject *kobj,
++			      struct kobj_attribute *attr,
++			      const char *buf, size_t len)
++{
++	return set_trigger(buf, &on_panic_trigger, len);
++}
++
++static struct kobj_attribute on_panic_attr =
++	__ATTR(on_panic, 0644, on_panic_show, on_panic_store);
++
++static void do_panic(void)
++{
++	on_panic_trigger.action->fn(&on_panic_trigger);
++	stop_run(&on_panic_trigger);
++}
++
++/* on halt */
++
++static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
++
++static ssize_t on_halt_show(struct kobject *kobj,
++			    struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", on_halt_trigger.action->name);
++}
++
++static ssize_t on_halt_store(struct kobject *kobj,
++			     struct kobj_attribute *attr,
++			     const char *buf, size_t len)
++{
++	return set_trigger(buf, &on_halt_trigger, len);
++}
++
++static struct kobj_attribute on_halt_attr =
++	__ATTR(on_halt, 0644, on_halt_show, on_halt_store);
++
++
++static void do_machine_halt(void)
++{
++	smp_send_stop();
++	on_halt_trigger.action->fn(&on_halt_trigger);
++	stop_run(&on_halt_trigger);
++}
++void (*_machine_halt)(void) = do_machine_halt;
++
++/* on power off */
++
++static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
++
++static ssize_t on_poff_show(struct kobject *kobj,
++			    struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", on_poff_trigger.action->name);
++}
++
++static ssize_t on_poff_store(struct kobject *kobj,
++			     struct kobj_attribute *attr,
++			     const char *buf, size_t len)
++{
++	return set_trigger(buf, &on_poff_trigger, len);
++}
++
++static struct kobj_attribute on_poff_attr =
++	__ATTR(on_poff, 0644, on_poff_show, on_poff_store);
++
++
++static void do_machine_power_off(void)
++{
++	smp_send_stop();
++	on_poff_trigger.action->fn(&on_poff_trigger);
++	stop_run(&on_poff_trigger);
++}
++void (*_machine_power_off)(void) = do_machine_power_off;
++
++static void __init shutdown_triggers_init(void)
++{
++	shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
++						    firmware_kobj);
++	if (!shutdown_actions_kset)
++		goto fail;
++	if (sysfs_create_file(&shutdown_actions_kset->kobj,
++			      &on_reboot_attr.attr))
++		goto fail;
++	if (sysfs_create_file(&shutdown_actions_kset->kobj,
++			      &on_panic_attr.attr))
++		goto fail;
++	if (sysfs_create_file(&shutdown_actions_kset->kobj,
++			      &on_halt_attr.attr))
++		goto fail;
++	if (sysfs_create_file(&shutdown_actions_kset->kobj,
++			      &on_poff_attr.attr))
++		goto fail;
++
++	return;
++fail:
++	panic("shutdown_triggers_init failed\n");
++}
++
++static void __init shutdown_actions_init(void)
++{
++	int i;
++
++	for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
++		if (!shutdown_actions_list[i]->init)
++			continue;
++		if (shutdown_actions_list[i]->init())
++			shutdown_actions_list[i] = NULL;
++	}
++}
++
++static int __init s390_ipl_init(void)
++{
+ 	reipl_probe();
+-	rc = ipl_init();
+-	if (rc)
+-		return rc;
+-	rc = reipl_init();
+-	if (rc)
+-		return rc;
+-	rc = dump_init();
+-	if (rc)
+-		return rc;
+-	rc = shutdown_actions_init();
+-	if (rc)
+-		return rc;
++	sclp_get_ipl_info(&sclp_ipl_info);
++	shutdown_actions_init();
++	shutdown_triggers_init();
+ 	return 0;
+ }
+ 
+ __initcall(s390_ipl_init);
+ 
++static void __init strncpy_skip_quote(char *dst, char *src, int n)
++{
++	int sx, dx;
++
++	dx = 0;
++	for (sx = 0; src[sx] != 0; sx++) {
++		if (src[sx] == '"')
++			continue;
++		dst[dx++] = src[sx];
++		if (dx >= n)
++			break;
++	}
++}
++
++static int __init vmcmd_on_reboot_setup(char *str)
++{
++	if (!MACHINE_IS_VM)
++		return 1;
++	strncpy_skip_quote(vmcmd_on_reboot, str, 127);
++	vmcmd_on_reboot[127] = 0;
++	on_reboot_trigger.action = &vmcmd_action;
++	return 1;
++}
++__setup("vmreboot=", vmcmd_on_reboot_setup);
++
++static int __init vmcmd_on_panic_setup(char *str)
++{
++	if (!MACHINE_IS_VM)
++		return 1;
++	strncpy_skip_quote(vmcmd_on_panic, str, 127);
++	vmcmd_on_panic[127] = 0;
++	on_panic_trigger.action = &vmcmd_action;
++	return 1;
++}
++__setup("vmpanic=", vmcmd_on_panic_setup);
++
++static int __init vmcmd_on_halt_setup(char *str)
++{
++	if (!MACHINE_IS_VM)
++		return 1;
++	strncpy_skip_quote(vmcmd_on_halt, str, 127);
++	vmcmd_on_halt[127] = 0;
++	on_halt_trigger.action = &vmcmd_action;
++	return 1;
++}
++__setup("vmhalt=", vmcmd_on_halt_setup);
++
++static int __init vmcmd_on_poff_setup(char *str)
++{
++	if (!MACHINE_IS_VM)
++		return 1;
++	strncpy_skip_quote(vmcmd_on_poff, str, 127);
++	vmcmd_on_poff[127] = 0;
++	on_poff_trigger.action = &vmcmd_action;
++	return 1;
++}
++__setup("vmpoff=", vmcmd_on_poff_setup);
++
++static int on_panic_notify(struct notifier_block *self,
++			   unsigned long event, void *data)
++{
++	do_panic();
++	return NOTIFY_OK;
++}
++
++static struct notifier_block on_panic_nb = {
++	.notifier_call = on_panic_notify,
++	.priority = 0,
++};
++
++void __init setup_ipl(void)
++{
++	ipl_info.type = get_ipl_type();
++	switch (ipl_info.type) {
++	case IPL_TYPE_CCW:
++		ipl_info.data.ccw.dev_id.devno = ipl_devno;
++		ipl_info.data.ccw.dev_id.ssid = 0;
++		break;
++	case IPL_TYPE_FCP:
++	case IPL_TYPE_FCP_DUMP:
++		ipl_info.data.fcp.dev_id.devno =
++			IPL_PARMBLOCK_START->ipl_info.fcp.devno;
++		ipl_info.data.fcp.dev_id.ssid = 0;
++		ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
++		ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
++		break;
++	case IPL_TYPE_NSS:
++		strncpy(ipl_info.data.nss.name, kernel_nss_name,
++			sizeof(ipl_info.data.nss.name));
++		break;
++	case IPL_TYPE_UNKNOWN:
++	default:
++		/* We have no info to copy */
++		break;
++	}
++	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
++}
++
+ void __init ipl_save_parameters(void)
+ {
+ 	struct cio_iplinfo iplinfo;
+@@ -1185,3 +1475,4 @@ void s390_reset_system(void)
+ 
+ 	do_reset_calls();
+ }
++
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 29f7884..0e7aca0 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -36,7 +36,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/notifier.h>
+-
++#include <linux/utsname.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -182,13 +182,15 @@ void cpu_idle(void)
+ 
+ void show_regs(struct pt_regs *regs)
+ {
+-	struct task_struct *tsk = current;
+-
+-        printk("CPU:    %d    %s\n", task_thread_info(tsk)->cpu, print_tainted());
+-        printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+-	       current->comm, task_pid_nr(current), (void *) tsk,
+-	       (void *) tsk->thread.ksp);
+-
++	print_modules();
++	printk("CPU: %d %s %s %.*s\n",
++	       task_thread_info(current)->cpu, print_tainted(),
++	       init_utsname()->release,
++	       (int)strcspn(init_utsname()->version, " "),
++	       init_utsname()->version);
++	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
++	       current->comm, current->pid, current,
++	       (void *) current->thread.ksp);
+ 	show_registers(regs);
+ 	/* Show stack backtrace if pt_regs is from kernel mode */
+ 	if (!(regs->psw.mask & PSW_MASK_PSTATE))
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 1d81bf9..6e036ba 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -86,13 +86,13 @@ FixPerRegisters(struct task_struct *task)
+ 		per_info->control_regs.bits.storage_alt_space_ctl = 0;
+ }
+ 
+-static void set_single_step(struct task_struct *task)
++void user_enable_single_step(struct task_struct *task)
+ {
+ 	task->thread.per_info.single_step = 1;
+ 	FixPerRegisters(task);
+ }
+ 
+-static void clear_single_step(struct task_struct *task)
++void user_disable_single_step(struct task_struct *task)
+ {
+ 	task->thread.per_info.single_step = 0;
+ 	FixPerRegisters(task);
+@@ -107,7 +107,7 @@ void
+ ptrace_disable(struct task_struct *child)
+ {
+ 	/* make sure the single step bit is not set. */
+-	clear_single_step(child);
++	user_disable_single_step(child);
+ }
+ 
+ #ifndef CONFIG_64BIT
+@@ -651,7 +651,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
+ 			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ 		child->exit_code = data;
+ 		/* make sure the single step bit is not set. */
+-		clear_single_step(child);
++		user_disable_single_step(child);
+ 		wake_up_process(child);
+ 		return 0;
+ 
+@@ -665,7 +665,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
+ 			return 0;
+ 		child->exit_code = SIGKILL;
+ 		/* make sure the single step bit is not set. */
+-		clear_single_step(child);
++		user_disable_single_step(child);
+ 		wake_up_process(child);
+ 		return 0;
+ 
+@@ -675,10 +675,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
+ 			return -EIO;
+ 		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ 		child->exit_code = data;
+-		if (data)
+-			set_tsk_thread_flag(child, TIF_SINGLE_STEP);
+-		else
+-			set_single_step(child);
++		user_enable_single_step(child);
+ 		/* give it a chance to run. */
+ 		wake_up_process(child);
+ 		return 0;
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 577aa7d..766c783 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -126,75 +126,6 @@ void __cpuinit cpu_init(void)
+ }
+ 
+ /*
+- * VM halt and poweroff setup routines
+- */
+-char vmhalt_cmd[128] = "";
+-char vmpoff_cmd[128] = "";
+-static char vmpanic_cmd[128] = "";
+-
+-static void strncpy_skip_quote(char *dst, char *src, int n)
+-{
+-        int sx, dx;
+-
+-        dx = 0;
+-        for (sx = 0; src[sx] != 0; sx++) {
+-                if (src[sx] == '"') continue;
+-                dst[dx++] = src[sx];
+-                if (dx >= n) break;
+-        }
+-}
+-
+-static int __init vmhalt_setup(char *str)
+-{
+-        strncpy_skip_quote(vmhalt_cmd, str, 127);
+-        vmhalt_cmd[127] = 0;
+-        return 1;
+-}
+-
+-__setup("vmhalt=", vmhalt_setup);
+-
+-static int __init vmpoff_setup(char *str)
+-{
+-        strncpy_skip_quote(vmpoff_cmd, str, 127);
+-        vmpoff_cmd[127] = 0;
+-        return 1;
+-}
+-
+-__setup("vmpoff=", vmpoff_setup);
+-
+-static int vmpanic_notify(struct notifier_block *self, unsigned long event,
+-			  void *data)
+-{
+-	if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
+-		cpcmd(vmpanic_cmd, NULL, 0, NULL);
+-
+-	return NOTIFY_OK;
+-}
+-
+-#define PANIC_PRI_VMPANIC	0
+-
+-static struct notifier_block vmpanic_nb = {
+-	.notifier_call = vmpanic_notify,
+-	.priority = PANIC_PRI_VMPANIC
+-};
+-
+-static int __init vmpanic_setup(char *str)
+-{
+-	static int register_done __initdata = 0;
+-
+-	strncpy_skip_quote(vmpanic_cmd, str, 127);
+-	vmpanic_cmd[127] = 0;
+-	if (!register_done) {
+-		register_done = 1;
+-		atomic_notifier_chain_register(&panic_notifier_list,
+-					       &vmpanic_nb);
+-	}
+-	return 1;
+-}
+-
+-__setup("vmpanic=", vmpanic_setup);
+-
+-/*
+  * condev= and conmode= setup parameter.
+  */
+ 
+@@ -308,38 +239,6 @@ static void __init setup_zfcpdump(unsigned int console_devno)
+ static inline void setup_zfcpdump(unsigned int console_devno) {}
+ #endif /* CONFIG_ZFCPDUMP */
+ 
+-#ifdef CONFIG_SMP
+-void (*_machine_restart)(char *command) = machine_restart_smp;
+-void (*_machine_halt)(void) = machine_halt_smp;
+-void (*_machine_power_off)(void) = machine_power_off_smp;
+-#else
+-/*
+- * Reboot, halt and power_off routines for non SMP.
+- */
+-static void do_machine_restart_nonsmp(char * __unused)
+-{
+-	do_reipl();
+-}
+-
+-static void do_machine_halt_nonsmp(void)
+-{
+-        if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+-		__cpcmd(vmhalt_cmd, NULL, 0, NULL);
+-        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-}
+-
+-static void do_machine_power_off_nonsmp(void)
+-{
+-        if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+-		__cpcmd(vmpoff_cmd, NULL, 0, NULL);
+-        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-}
+-
+-void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
+-void (*_machine_halt)(void) = do_machine_halt_nonsmp;
+-void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
+-#endif
+-
+  /*
+  * Reboot, halt and power_off stubs. They just call _machine_restart,
+  * _machine_halt or _machine_power_off. 
+@@ -559,7 +458,9 @@ setup_resources(void)
+ 	data_resource.start = (unsigned long) &_etext;
+ 	data_resource.end = (unsigned long) &_edata - 1;
+ 
+-	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
++	for (i = 0; i < MEMORY_CHUNKS; i++) {
++		if (!memory_chunk[i].size)
++			continue;
+ 		res = alloc_bootmem_low(sizeof(struct resource));
+ 		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+ 		switch (memory_chunk[i].type) {
+@@ -617,7 +518,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
+ static void __init setup_memory_end(void)
+ {
+ 	unsigned long memory_size;
+-	unsigned long max_mem, max_phys;
++	unsigned long max_mem;
+ 	int i;
+ 
+ #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+@@ -625,10 +526,31 @@ static void __init setup_memory_end(void)
+ 		memory_end = ZFCPDUMP_HSA_SIZE;
+ #endif
+ 	memory_size = 0;
+-	max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
+ 	memory_end &= PAGE_MASK;
+ 
+-	max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
++	max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
++	memory_end = min(max_mem, memory_end);
++
++	/*
++	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
++	 * extra checks that HOLES_IN_ZONE would require.
++	 */
++	for (i = 0; i < MEMORY_CHUNKS; i++) {
++		unsigned long start, end;
++		struct mem_chunk *chunk;
++		unsigned long align;
++
++		chunk = &memory_chunk[i];
++		align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
++		start = (chunk->addr + align - 1) & ~(align - 1);
++		end = (chunk->addr + chunk->size) & ~(align - 1);
++		if (start >= end)
++			memset(chunk, 0, sizeof(*chunk));
++		else {
++			chunk->addr = start;
++			chunk->size = end - start;
++		}
++	}
+ 
+ 	for (i = 0; i < MEMORY_CHUNKS; i++) {
+ 		struct mem_chunk *chunk = &memory_chunk[i];
+@@ -890,7 +812,7 @@ setup_arch(char **cmdline_p)
+ 
+ 	parse_early_param();
+ 
+-	setup_ipl_info();
++	setup_ipl();
+ 	setup_memory_end();
+ 	setup_addressing_mode();
+ 	setup_memory();
+@@ -899,7 +821,6 @@ setup_arch(char **cmdline_p)
+ 
+         cpu_init();
+         __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
+-	smp_setup_cpu_possible_map();
+ 
+ 	/*
+ 	 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
+@@ -920,7 +841,7 @@ setup_arch(char **cmdline_p)
+ 
+ void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
+ {
+-   printk("cpu %d "
++   printk(KERN_INFO "cpu %d "
+ #ifdef CONFIG_SMP
+            "phys_idx=%d "
+ #endif
+@@ -996,7 +917,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+ static void c_stop(struct seq_file *m, void *v)
+ {
+ }
+-struct seq_operations cpuinfo_op = {
++const struct seq_operations cpuinfo_op = {
+ 	.start	= c_start,
+ 	.next	= c_next,
+ 	.stop	= c_stop,
+diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
+index d264671..4449bf3 100644
+--- a/arch/s390/kernel/signal.c
++++ b/arch/s390/kernel/signal.c
+@@ -471,6 +471,7 @@ void do_signal(struct pt_regs *regs)
+ 
+ 	if (signr > 0) {
+ 		/* Whee!  Actually deliver the signal.  */
++		int ret;
+ #ifdef CONFIG_COMPAT
+ 		if (test_thread_flag(TIF_31BIT)) {
+ 			extern int handle_signal32(unsigned long sig,
+@@ -478,15 +479,12 @@ void do_signal(struct pt_regs *regs)
+ 						   siginfo_t *info,
+ 						   sigset_t *oldset,
+ 						   struct pt_regs *regs);
+-			if (handle_signal32(
+-				    signr, &ka, &info, oldset, regs) == 0) {
+-				if (test_thread_flag(TIF_RESTORE_SIGMASK))
+-					clear_thread_flag(TIF_RESTORE_SIGMASK);
+-			}
+-			return;
++			ret = handle_signal32(signr, &ka, &info, oldset, regs);
+ 	        }
++		else
+ #endif
+-		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
++			ret = handle_signal(signr, &ka, &info, oldset, regs);
++		if (!ret) {
+ 			/*
+ 			 * A signal was successfully delivered; the saved
+ 			 * sigmask will have been stored in the signal frame,
+@@ -495,6 +493,14 @@ void do_signal(struct pt_regs *regs)
+ 			 */
+ 			if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ 				clear_thread_flag(TIF_RESTORE_SIGMASK);
++
++			/*
++			 * If we would have taken a single-step trap
++			 * for a normal instruction, act like we took
++			 * one for the handler setup.
++			 */
++			if (current->thread.per_info.single_step)
++				set_thread_flag(TIF_SINGLE_STEP);
+ 		}
+ 		return;
+ 	}
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 264ea90..aa37fa1 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -42,6 +42,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/timer.h>
+ #include <asm/lowcore.h>
++#include <asm/sclp.h>
+ #include <asm/cpu.h>
+ 
+ /*
+@@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr);
+ cpumask_t cpu_online_map = CPU_MASK_NONE;
+ EXPORT_SYMBOL(cpu_online_map);
+ 
+-cpumask_t cpu_possible_map = CPU_MASK_NONE;
++cpumask_t cpu_possible_map = CPU_MASK_ALL;
+ EXPORT_SYMBOL(cpu_possible_map);
+ 
+ static struct task_struct *current_set[NR_CPUS];
+ 
++static u8 smp_cpu_type;
++static int smp_use_sigp_detection;
++
++enum s390_cpu_state {
++	CPU_STATE_STANDBY,
++	CPU_STATE_CONFIGURED,
++};
++
++#ifdef CONFIG_HOTPLUG_CPU
++static DEFINE_MUTEX(smp_cpu_state_mutex);
++#endif
++static int smp_cpu_state[NR_CPUS];
++
++static DEFINE_PER_CPU(struct cpu, cpu_devices);
++DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
++
+ static void smp_ext_bitcall(int, ec_bit_sig);
+ 
+ /*
+@@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ }
+ EXPORT_SYMBOL(smp_call_function_single);
+ 
++/**
++ * smp_call_function_mask(): Run a function on a set of other CPUs.
++ * @mask: The set of cpus to run on.  Must not include the current cpu.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @wait: If true, wait (atomically) until function has completed on other CPUs.
++ *
++ * Returns 0 on success, else a negative status code.
++ *
++ * If @wait is true, then returns once @func has returned; otherwise
++ * it returns just before the target cpu calls @func.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++int
++smp_call_function_mask(cpumask_t mask,
++			void (*func)(void *), void *info,
++			int wait)
++{
++	preempt_disable();
++	__smp_call_function_map(func, info, 0, wait, mask);
++	preempt_enable();
++	return 0;
++}
++EXPORT_SYMBOL(smp_call_function_mask);
++
+ void smp_send_stop(void)
+ {
+ 	int cpu, rc;
+@@ -217,33 +261,6 @@ void smp_send_stop(void)
+ }
+ 
+ /*
+- * Reboot, halt and power_off routines for SMP.
+- */
+-void machine_restart_smp(char *__unused)
+-{
+-	smp_send_stop();
+-	do_reipl();
+-}
+-
+-void machine_halt_smp(void)
+-{
+-	smp_send_stop();
+-	if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+-		__cpcmd(vmhalt_cmd, NULL, 0, NULL);
+-	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-	for (;;);
+-}
+-
+-void machine_power_off_smp(void)
+-{
+-	smp_send_stop();
+-	if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+-		__cpcmd(vmpoff_cmd, NULL, 0, NULL);
+-	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-	for (;;);
+-}
+-
+-/*
+  * This is the main routine where commands issued by other
+  * cpus are handled.
+  */
+@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
+ }
+ EXPORT_SYMBOL(smp_ctl_clear_bit);
+ 
++/*
++ * In early ipl state a temp. logically cpu number is needed, so the sigp
++ * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
++ * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
++ */
++#define CPU_INIT_NO	1
++
+ #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+ 
+ /*
+@@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
+ 		       "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
+ 		return;
+ 	}
+-	zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area));
+-	__cpu_logical_map[1] = (__u16) phy_cpu;
+-	while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy)
++	zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
++	__cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
++	while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
++	       sigp_busy)
+ 		cpu_relax();
+ 	memcpy(zfcpdump_save_areas[cpu],
+ 	       (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
+@@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
+ 
+ #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
+ 
+-/*
+- * Lets check how many CPUs we have.
+- */
+-static unsigned int __init smp_count_cpus(void)
++static int cpu_stopped(int cpu)
+ {
+-	unsigned int cpu, num_cpus;
+-	__u16 boot_cpu_addr;
++	__u32 status;
+ 
+-	/*
+-	 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
+-	 */
++	/* Check for stopped state */
++	if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
++	    sigp_status_stored) {
++		if (status & 0x40)
++			return 1;
++	}
++	return 0;
++}
++
++static int cpu_known(int cpu_id)
++{
++	int cpu;
++
++	for_each_present_cpu(cpu) {
++		if (__cpu_logical_map[cpu] == cpu_id)
++			return 1;
++	}
++	return 0;
++}
++
++static int smp_rescan_cpus_sigp(cpumask_t avail)
++{
++	int cpu_id, logical_cpu;
++
++	logical_cpu = first_cpu(avail);
++	if (logical_cpu == NR_CPUS)
++		return 0;
++	for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
++		if (cpu_known(cpu_id))
++			continue;
++		__cpu_logical_map[logical_cpu] = cpu_id;
++		if (!cpu_stopped(logical_cpu))
++			continue;
++		cpu_set(logical_cpu, cpu_present_map);
++		smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
++		logical_cpu = next_cpu(logical_cpu, avail);
++		if (logical_cpu == NR_CPUS)
++			break;
++	}
++	return 0;
++}
++
++static int smp_rescan_cpus_sclp(cpumask_t avail)
++{
++	struct sclp_cpu_info *info;
++	int cpu_id, logical_cpu, cpu;
++	int rc;
++
++	logical_cpu = first_cpu(avail);
++	if (logical_cpu == NR_CPUS)
++		return 0;
++	info = kmalloc(sizeof(*info), GFP_KERNEL);
++	if (!info)
++		return -ENOMEM;
++	rc = sclp_get_cpu_info(info);
++	if (rc)
++		goto out;
++	for (cpu = 0; cpu < info->combined; cpu++) {
++		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
++			continue;
++		cpu_id = info->cpu[cpu].address;
++		if (cpu_known(cpu_id))
++			continue;
++		__cpu_logical_map[logical_cpu] = cpu_id;
++		cpu_set(logical_cpu, cpu_present_map);
++		if (cpu >= info->configured)
++			smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
++		else
++			smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
++		logical_cpu = next_cpu(logical_cpu, avail);
++		if (logical_cpu == NR_CPUS)
++			break;
++	}
++out:
++	kfree(info);
++	return rc;
++}
++
++static int smp_rescan_cpus(void)
++{
++	cpumask_t avail;
++
++	cpus_xor(avail, cpu_possible_map, cpu_present_map);
++	if (smp_use_sigp_detection)
++		return smp_rescan_cpus_sigp(avail);
++	else
++		return smp_rescan_cpus_sclp(avail);
++}
++
++static void __init smp_detect_cpus(void)
++{
++	unsigned int cpu, c_cpus, s_cpus;
++	struct sclp_cpu_info *info;
++	u16 boot_cpu_addr, cpu_addr;
++
++	c_cpus = 1;
++	s_cpus = 0;
+ 	boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
+-	current_thread_info()->cpu = 0;
+-	num_cpus = 1;
+-	for (cpu = 0; cpu <= 65535; cpu++) {
+-		if ((__u16) cpu == boot_cpu_addr)
++	info = kmalloc(sizeof(*info), GFP_KERNEL);
++	if (!info)
++		panic("smp_detect_cpus failed to allocate memory\n");
++	/* Use sigp detection algorithm if sclp doesn't work. */
++	if (sclp_get_cpu_info(info)) {
++		smp_use_sigp_detection = 1;
++		for (cpu = 0; cpu <= 65535; cpu++) {
++			if (cpu == boot_cpu_addr)
++				continue;
++			__cpu_logical_map[CPU_INIT_NO] = cpu;
++			if (!cpu_stopped(CPU_INIT_NO))
++				continue;
++			smp_get_save_area(c_cpus, cpu);
++			c_cpus++;
++		}
++		goto out;
++	}
++
++	if (info->has_cpu_type) {
++		for (cpu = 0; cpu < info->combined; cpu++) {
++			if (info->cpu[cpu].address == boot_cpu_addr) {
++				smp_cpu_type = info->cpu[cpu].type;
++				break;
++			}
++		}
++	}
++
++	for (cpu = 0; cpu < info->combined; cpu++) {
++		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
++			continue;
++		cpu_addr = info->cpu[cpu].address;
++		if (cpu_addr == boot_cpu_addr)
+ 			continue;
+-		__cpu_logical_map[1] = (__u16) cpu;
+-		if (signal_processor(1, sigp_sense) == sigp_not_operational)
++		__cpu_logical_map[CPU_INIT_NO] = cpu_addr;
++		if (!cpu_stopped(CPU_INIT_NO)) {
++			s_cpus++;
+ 			continue;
+-		smp_get_save_area(num_cpus, cpu);
+-		num_cpus++;
++		}
++		smp_get_save_area(c_cpus, cpu_addr);
++		c_cpus++;
+ 	}
+-	printk("Detected %d CPU's\n", (int) num_cpus);
+-	printk("Boot cpu address %2X\n", boot_cpu_addr);
+-	return num_cpus;
++out:
++	kfree(info);
++	printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
++	get_online_cpus();
++	smp_rescan_cpus();
++	put_online_cpus();
+ }
+ 
+ /*
+@@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid)
+ 	return 0;
+ }
+ 
+-DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
+-
+ static void __init smp_create_idle(unsigned int cpu)
+ {
+ 	struct task_struct *p;
+@@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu)
+ 	spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
+ }
+ 
+-static int cpu_stopped(int cpu)
++static int __cpuinit smp_alloc_lowcore(int cpu)
+ {
+-	__u32 status;
++	unsigned long async_stack, panic_stack;
++	struct _lowcore *lowcore;
++	int lc_order;
++
++	lc_order = sizeof(long) == 8 ? 1 : 0;
++	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
++	if (!lowcore)
++		return -ENOMEM;
++	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
++	if (!async_stack)
++		goto out_async_stack;
++	panic_stack = __get_free_page(GFP_KERNEL);
++	if (!panic_stack)
++		goto out_panic_stack;
++
++	*lowcore = S390_lowcore;
++	lowcore->async_stack = async_stack + ASYNC_SIZE;
++	lowcore->panic_stack = panic_stack + PAGE_SIZE;
+ 
+-	/* Check for stopped state */
+-	if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
+-	    sigp_status_stored) {
+-		if (status & 0x40)
+-			return 1;
++#ifndef CONFIG_64BIT
++	if (MACHINE_HAS_IEEE) {
++		unsigned long save_area;
++
++		save_area = get_zeroed_page(GFP_KERNEL);
++		if (!save_area)
++			goto out_save_area;
++		lowcore->extended_save_area_addr = (u32) save_area;
+ 	}
++#endif
++	lowcore_ptr[cpu] = lowcore;
+ 	return 0;
++
++#ifndef CONFIG_64BIT
++out_save_area:
++	free_page(panic_stack);
++#endif
++out_panic_stack:
++	free_pages(async_stack, ASYNC_ORDER);
++out_async_stack:
++	free_pages((unsigned long) lowcore, lc_order);
++	return -ENOMEM;
+ }
+ 
+-/* Upping and downing of CPUs */
++#ifdef CONFIG_HOTPLUG_CPU
++static void smp_free_lowcore(int cpu)
++{
++	struct _lowcore *lowcore;
++	int lc_order;
++
++	lc_order = sizeof(long) == 8 ? 1 : 0;
++	lowcore = lowcore_ptr[cpu];
++#ifndef CONFIG_64BIT
++	if (MACHINE_HAS_IEEE)
++		free_page((unsigned long) lowcore->extended_save_area_addr);
++#endif
++	free_page(lowcore->panic_stack - PAGE_SIZE);
++	free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
++	free_pages((unsigned long) lowcore, lc_order);
++	lowcore_ptr[cpu] = NULL;
++}
++#endif /* CONFIG_HOTPLUG_CPU */
+ 
+-int __cpu_up(unsigned int cpu)
++/* Upping and downing of CPUs */
++int __cpuinit __cpu_up(unsigned int cpu)
+ {
+ 	struct task_struct *idle;
+ 	struct _lowcore *cpu_lowcore;
+ 	struct stack_frame *sf;
+ 	sigp_ccode ccode;
+-	int curr_cpu;
+ 
+-	for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
+-		__cpu_logical_map[cpu] = (__u16) curr_cpu;
+-		if (cpu_stopped(cpu))
+-			break;
+-	}
+-
+-	if (!cpu_stopped(cpu))
+-		return -ENODEV;
++	if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
++		return -EIO;
++	if (smp_alloc_lowcore(cpu))
++		return -ENOMEM;
+ 
+ 	ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
+ 				   cpu, sigp_set_prefix);
+@@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu)
+ 	cpu_lowcore = lowcore_ptr[cpu];
+ 	cpu_lowcore->kernel_stack = (unsigned long)
+ 		task_stack_page(idle) + THREAD_SIZE;
++	cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
+ 	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
+ 				     - sizeof(struct pt_regs)
+ 				     - sizeof(struct stack_frame));
+@@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu)
+ 	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
+ 	cpu_lowcore->current_task = (unsigned long) idle;
+ 	cpu_lowcore->cpu_data.cpu_nr = cpu;
++	cpu_lowcore->softirq_pending = 0;
++	cpu_lowcore->ext_call_fast = 0;
+ 	eieio();
+ 
+ 	while (signal_processor(cpu, sigp_restart) == sigp_busy)
+@@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu)
+ 	return 0;
+ }
+ 
+-static unsigned int __initdata additional_cpus;
+-static unsigned int __initdata possible_cpus;
+-
+-void __init smp_setup_cpu_possible_map(void)
++static int __init setup_possible_cpus(char *s)
+ {
+-	unsigned int phy_cpus, pos_cpus, cpu;
+-
+-	phy_cpus = smp_count_cpus();
+-	pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
+-
+-	if (possible_cpus)
+-		pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
++	int pcpus, cpu;
+ 
+-	for (cpu = 0; cpu < pos_cpus; cpu++)
++	pcpus = simple_strtoul(s, NULL, 0);
++	cpu_possible_map = cpumask_of_cpu(0);
++	for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
+ 		cpu_set(cpu, cpu_possible_map);
+-
+-	phy_cpus = min(phy_cpus, pos_cpus);
+-
+-	for (cpu = 0; cpu < phy_cpus; cpu++)
+-		cpu_set(cpu, cpu_present_map);
+-}
+-
+-#ifdef CONFIG_HOTPLUG_CPU
+-
+-static int __init setup_additional_cpus(char *s)
+-{
+-	additional_cpus = simple_strtoul(s, NULL, 0);
+-	return 0;
+-}
+-early_param("additional_cpus", setup_additional_cpus);
+-
+-static int __init setup_possible_cpus(char *s)
+-{
+-	possible_cpus = simple_strtoul(s, NULL, 0);
+ 	return 0;
+ }
+ early_param("possible_cpus", setup_possible_cpus);
+ 
++#ifdef CONFIG_HOTPLUG_CPU
++
+ int __cpu_disable(void)
+ {
+ 	struct ec_creg_mask_parms cr_parms;
+@@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu)
+ 	/* Wait until target cpu is down */
+ 	while (!smp_cpu_not_running(cpu))
+ 		cpu_relax();
+-	printk("Processor %d spun down\n", cpu);
++	smp_free_lowcore(cpu);
++	printk(KERN_INFO "Processor %d spun down\n", cpu);
+ }
+ 
+ void cpu_die(void)
+@@ -625,49 +796,19 @@ void cpu_die(void)
+ 
+ #endif /* CONFIG_HOTPLUG_CPU */
+ 
+-/*
+- *	Cycle through the processors and setup structures.
+- */
+-
+ void __init smp_prepare_cpus(unsigned int max_cpus)
+ {
+-	unsigned long stack;
+ 	unsigned int cpu;
+-	int i;
++
++	smp_detect_cpus();
+ 
+ 	/* request the 0x1201 emergency signal external interrupt */
+ 	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
+ 		panic("Couldn't request external interrupt 0x1201");
+ 	memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
+-	/*
+-	 *  Initialize prefix pages and stacks for all possible cpus
+-	 */
+ 	print_cpu_info(&S390_lowcore.cpu_data);
++	smp_alloc_lowcore(smp_processor_id());
+ 
+-	for_each_possible_cpu(i) {
+-		lowcore_ptr[i] = (struct _lowcore *)
+-			__get_free_pages(GFP_KERNEL | GFP_DMA,
+-					 sizeof(void*) == 8 ? 1 : 0);
+-		stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+-		if (!lowcore_ptr[i] || !stack)
+-			panic("smp_boot_cpus failed to allocate memory\n");
+-
+-		*(lowcore_ptr[i]) = S390_lowcore;
+-		lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
+-		stack = __get_free_pages(GFP_KERNEL, 0);
+-		if (!stack)
+-			panic("smp_boot_cpus failed to allocate memory\n");
+-		lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
+-#ifndef CONFIG_64BIT
+-		if (MACHINE_HAS_IEEE) {
+-			lowcore_ptr[i]->extended_save_area_addr =
+-				(__u32) __get_free_pages(GFP_KERNEL, 0);
+-			if (!lowcore_ptr[i]->extended_save_area_addr)
+-				panic("smp_boot_cpus failed to "
+-				      "allocate memory\n");
+-		}
+-#endif
+-	}
+ #ifndef CONFIG_64BIT
+ 	if (MACHINE_HAS_IEEE)
+ 		ctl_set_bit(14, 29); /* enable extended save area */
+@@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void)
+ {
+ 	BUG_ON(smp_processor_id() != 0);
+ 
++	current_thread_info()->cpu = 0;
++	cpu_set(0, cpu_present_map);
+ 	cpu_set(0, cpu_online_map);
+ 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
+ 	current_set[0] = current;
++	smp_cpu_state[0] = CPU_STATE_CONFIGURED;
+ 	spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
+ }
+ 
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+-	cpu_present_map = cpu_possible_map;
+ }
+ 
+ /*
+@@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier)
+ 	return 0;
+ }
+ 
+-static DEFINE_PER_CPU(struct cpu, cpu_devices);
++#ifdef CONFIG_HOTPLUG_CPU
++static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
++{
++	ssize_t count;
++
++	mutex_lock(&smp_cpu_state_mutex);
++	count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
++	mutex_unlock(&smp_cpu_state_mutex);
++	return count;
++}
++
++static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
++				   size_t count)
++{
++	int cpu = dev->id;
++	int val, rc;
++	char delim;
++
++	if (sscanf(buf, "%d %c", &val, &delim) != 1)
++		return -EINVAL;
++	if (val != 0 && val != 1)
++		return -EINVAL;
++
++	mutex_lock(&smp_cpu_state_mutex);
++	get_online_cpus();
++	rc = -EBUSY;
++	if (cpu_online(cpu))
++		goto out;
++	rc = 0;
++	switch (val) {
++	case 0:
++		if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
++			rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
++			if (!rc)
++				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
++		}
++		break;
++	case 1:
++		if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
++			rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
++			if (!rc)
++				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
++		}
++		break;
++	default:
++		break;
++	}
++out:
++	put_online_cpus();
++	mutex_unlock(&smp_cpu_state_mutex);
++	return rc ? rc : count;
++}
++static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
++{
++	return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
++}
++static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
++
++
++static struct attribute *cpu_common_attrs[] = {
++#ifdef CONFIG_HOTPLUG_CPU
++	&attr_configure.attr,
++#endif
++	&attr_address.attr,
++	NULL,
++};
++
++static struct attribute_group cpu_common_attr_group = {
++	.attrs = cpu_common_attrs,
++};
+ 
+ static ssize_t show_capability(struct sys_device *dev, char *buf)
+ {
+@@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
+ }
+ static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
+ 
+-static struct attribute *cpu_attrs[] = {
++static struct attribute *cpu_online_attrs[] = {
+ 	&attr_capability.attr,
+ 	&attr_idle_count.attr,
+ 	&attr_idle_time_us.attr,
+ 	NULL,
+ };
+ 
+-static struct attribute_group cpu_attr_group = {
+-	.attrs = cpu_attrs,
++static struct attribute_group cpu_online_attr_group = {
++	.attrs = cpu_online_attrs,
+ };
+ 
+ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+@@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+ 		idle->idle_time = 0;
+ 		idle->idle_count = 0;
+ 		spin_unlock_irq(&idle->lock);
+-		if (sysfs_create_group(&s->kobj, &cpu_attr_group))
++		if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
+ 			return NOTIFY_BAD;
+ 		break;
+ 	case CPU_DEAD:
+ 	case CPU_DEAD_FROZEN:
+-		sysfs_remove_group(&s->kobj, &cpu_attr_group);
++		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
+ 		break;
+ 	}
+ 	return NOTIFY_OK;
+@@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
+ 	.notifier_call = smp_cpu_notify,
+ };
+ 
++static int smp_add_present_cpu(int cpu)
++{
++	struct cpu *c = &per_cpu(cpu_devices, cpu);
++	struct sys_device *s = &c->sysdev;
++	int rc;
++
++	c->hotpluggable = 1;
++	rc = register_cpu(c, cpu);
++	if (rc)
++		goto out;
++	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
++	if (rc)
++		goto out_cpu;
++	if (!cpu_online(cpu))
++		goto out;
++	rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
++	if (!rc)
++		return 0;
++	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
++out_cpu:
++#ifdef CONFIG_HOTPLUG_CPU
++	unregister_cpu(c);
++#endif
++out:
++	return rc;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static ssize_t rescan_store(struct sys_device *dev, const char *buf,
++			    size_t count)
++{
++	cpumask_t newcpus;
++	int cpu;
++	int rc;
++
++	mutex_lock(&smp_cpu_state_mutex);
++	get_online_cpus();
++	newcpus = cpu_present_map;
++	rc = smp_rescan_cpus();
++	if (rc)
++		goto out;
++	cpus_andnot(newcpus, cpu_present_map, newcpus);
++	for_each_cpu_mask(cpu, newcpus) {
++		rc = smp_add_present_cpu(cpu);
++		if (rc)
++			cpu_clear(cpu, cpu_present_map);
++	}
++	rc = 0;
++out:
++	put_online_cpus();
++	mutex_unlock(&smp_cpu_state_mutex);
++	return rc ? rc : count;
++}
++static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
++#endif /* CONFIG_HOTPLUG_CPU */
++
+ static int __init topology_init(void)
+ {
+ 	int cpu;
+@@ -800,16 +1071,14 @@ static int __init topology_init(void)
+ 
+ 	register_cpu_notifier(&smp_cpu_nb);
+ 
+-	for_each_possible_cpu(cpu) {
+-		struct cpu *c = &per_cpu(cpu_devices, cpu);
+-		struct sys_device *s = &c->sysdev;
+-
+-		c->hotpluggable = 1;
+-		register_cpu(c, cpu);
+-		if (!cpu_online(cpu))
+-			continue;
+-		s = &c->sysdev;
+-		rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
++#ifdef CONFIG_HOTPLUG_CPU
++	rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
++			       &attr_rescan.attr);
++	if (rc)
++		return rc;
++#endif
++	for_each_present_cpu(cpu) {
++		rc = smp_add_present_cpu(cpu);
+ 		if (rc)
+ 			return rc;
+ 	}
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index 22b800c..3bbac12 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -1145,7 +1145,7 @@ static void etr_work_fn(struct work_struct *work)
+  * Sysfs interface functions
+  */
+ static struct sysdev_class etr_sysclass = {
+-	set_kset_name("etr")
++	.name	= "etr",
+ };
+ 
+ static struct sys_device etr_port0_dev = {
+diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
+index 8ed16a8..52b8342 100644
+--- a/arch/s390/kernel/traps.c
++++ b/arch/s390/kernel/traps.c
+@@ -31,6 +31,7 @@
+ #include <linux/reboot.h>
+ #include <linux/kprobes.h>
+ #include <linux/bug.h>
++#include <linux/utsname.h>
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -168,9 +169,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
+  */
+ void dump_stack(void)
+ {
++	printk("CPU: %d %s %s %.*s\n",
++	       task_thread_info(current)->cpu, print_tainted(),
++	       init_utsname()->release,
++	       (int)strcspn(init_utsname()->version, " "),
++	       init_utsname()->version);
++	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
++	       current->comm, current->pid, current,
++	       (void *) current->thread.ksp);
+ 	show_stack(NULL, NULL);
+ }
+-
+ EXPORT_SYMBOL(dump_stack);
+ 
+ static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
+@@ -258,8 +266,14 @@ void die(const char * str, struct pt_regs * regs, long err)
+ 	console_verbose();
+ 	spin_lock_irq(&die_lock);
+ 	bust_spinlocks(1);
+-	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+-	print_modules();
++	printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++	printk("PREEMPT ");
++#endif
++#ifdef CONFIG_SMP
++	printk("SMP");
++#endif
++	printk("\n");
+ 	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
+ 	show_regs(regs);
+ 	bust_spinlocks(0);
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 849120e..7d43c3c 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -17,6 +17,12 @@ ENTRY(_start)
+ jiffies = jiffies_64;
+ #endif
+ 
++PHDRS {
++	text PT_LOAD FLAGS(5);	/* R_E */
++	data PT_LOAD FLAGS(7);	/* RWE */
++	note PT_NOTE FLAGS(0);	/* ___ */
++}
++
+ SECTIONS
+ {
+ 	. = 0x00000000;
+@@ -33,6 +39,9 @@ SECTIONS
+ 
+ 	_etext = .;		/* End of text section */
+ 
++	NOTES :text :note
++	BUG_TABLE :text
++
+ 	RODATA
+ 
+ #ifdef CONFIG_SHARED_KERNEL
+@@ -49,9 +58,6 @@ SECTIONS
+ 		__stop___ex_table = .;
+ 	}
+ 
+-	NOTES
+-	BUG_TABLE
+-
+ 	.data : {		/* Data */
+ 		DATA_DATA
+ 		CONSTRUCTORS
+@@ -91,7 +97,7 @@ SECTIONS
+ 	__init_begin = .;
+ 	.init.text : {
+ 		_sinittext = .;
+-		*(.init.text)
++		INIT_TEXT
+ 		_einittext = .;
+ 	}
+ 	/*
+@@ -99,11 +105,11 @@ SECTIONS
+ 	 * to deal with references from __bug_table
+ 	*/
+ 	.exit.text : {
+-		*(.exit.text)
++		EXIT_TEXT
+ 	}
+ 
+ 	.init.data : {
+-		*(.init.data)
++		INIT_DATA
+ 	}
+ 	. = ALIGN(0x100);
+ 	.init.setup : {
+@@ -150,7 +156,7 @@ SECTIONS
+ 
+ 	/* Sections to be discarded */
+ 	/DISCARD/ : {
+-		*(.exit.data)
++		EXIT_DATA
+ 		*(.exitcall.exit)
+ 	}
+ 
+diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
+index 8d76403..e41f400 100644
+--- a/arch/s390/lib/spinlock.c
++++ b/arch/s390/lib/spinlock.c
+@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
+ 		_raw_yield();
+ }
+ 
+-void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
++void _raw_spin_lock_wait(raw_spinlock_t *lp)
+ {
+ 	int count = spin_retry;
+ 	unsigned int cpu = ~smp_processor_id();
+@@ -53,15 +53,36 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
+ 		}
+ 		if (__raw_spin_is_locked(lp))
+ 			continue;
+-		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
+-			lp->owner_pc = pc;
++		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
+ 			return;
+-		}
+ 	}
+ }
+ EXPORT_SYMBOL(_raw_spin_lock_wait);
+ 
+-int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
++void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
++{
++	int count = spin_retry;
++	unsigned int cpu = ~smp_processor_id();
++
++	local_irq_restore(flags);
++	while (1) {
++		if (count-- <= 0) {
++			unsigned int owner = lp->owner_cpu;
++			if (owner != 0)
++				_raw_yield_cpu(~owner);
++			count = spin_retry;
++		}
++		if (__raw_spin_is_locked(lp))
++			continue;
++		local_irq_disable();
++		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
++			return;
++		local_irq_restore(flags);
++	}
++}
++EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
++
++int _raw_spin_trylock_retry(raw_spinlock_t *lp)
+ {
+ 	unsigned int cpu = ~smp_processor_id();
+ 	int count;
+@@ -69,10 +90,8 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
+ 	for (count = spin_retry; count > 0; count--) {
+ 		if (__raw_spin_is_locked(lp))
+ 			continue;
+-		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
+-			lp->owner_pc = pc;
++		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
+ 			return 1;
+-		}
+ 	}
+ 	return 0;
+ }
+diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
+index 394980b..880b0eb 100644
+--- a/arch/s390/mm/extmem.c
++++ b/arch/s390/mm/extmem.c
+@@ -83,7 +83,7 @@ struct dcss_segment {
+ };
+ 
+ static DEFINE_MUTEX(dcss_lock);
+-static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
++static LIST_HEAD(dcss_list);
+ static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
+ 					"EW/EN-MIXED" };
+ 
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index fb9c5a8..79d13a1 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -15,10 +15,6 @@
+ #include <asm/setup.h>
+ #include <asm/tlbflush.h>
+ 
+-unsigned long vmalloc_end;
+-EXPORT_SYMBOL(vmalloc_end);
+-
+-static struct page *vmem_map;
+ static DEFINE_MUTEX(vmem_mutex);
+ 
+ struct memory_segment {
+@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
+ 	pte_t  pte;
+ 	int ret = -ENOMEM;
+ 
+-	map_start = vmem_map + PFN_DOWN(start);
+-	map_end	= vmem_map + PFN_DOWN(start + size);
++	map_start = VMEM_MAP + PFN_DOWN(start);
++	map_end	= VMEM_MAP + PFN_DOWN(start + size);
+ 
+ 	start_addr = (unsigned long) map_start & PAGE_MASK;
+ 	end_addr = PFN_ALIGN((unsigned long) map_end);
+@@ -240,10 +236,10 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
+ {
+ 	int ret;
+ 
+-	ret = vmem_add_range(start, size);
++	ret = vmem_add_mem_map(start, size);
+ 	if (ret)
+ 		return ret;
+-	return vmem_add_mem_map(start, size);
++	return vmem_add_range(start, size);
+ }
+ 
+ /*
+@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
+ {
+ 	struct memory_segment *tmp;
+ 
+-	if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
++	if (seg->start + seg->size >= VMALLOC_START ||
+ 	    seg->start + seg->size < seg->start)
+ 		return -ERANGE;
+ 
+@@ -357,17 +353,15 @@ out:
+ 
+ /*
+  * map whole physical memory to virtual memory (identity mapping)
++ * we reserve enough space in the vmalloc area for vmemmap to hotplug
++ * additional memory segments.
+  */
+ void __init vmem_map_init(void)
+ {
+-	unsigned long map_size;
+ 	int i;
+ 
+-	map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
+-	vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
+-	vmem_map = (struct page *) vmalloc_end;
+-	NODE_DATA(0)->node_mem_map = vmem_map;
+-
++	BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
++	NODE_DATA(0)->node_mem_map = VMEM_MAP;
+ 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
+ 		vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
+ }
+@@ -382,7 +376,7 @@ static int __init vmem_convert_memory_chunk(void)
+ 	int i;
+ 
+ 	mutex_lock(&vmem_mutex);
+-	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
++	for (i = 0; i < MEMORY_CHUNKS; i++) {
+ 		if (!memory_chunk[i].size)
+ 			continue;
+ 		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
+diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
+index 496d635..1cd9c8f 100644
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -6,8 +6,7 @@
+ mainmenu "Linux/SuperH Kernel Configuration"
+ 
+ config SUPERH
+-	bool
+-	default y
++	def_bool y
+ 	select EMBEDDED
+ 	help
+ 	  The SuperH is a RISC processor targeted for use in embedded systems
+@@ -15,36 +14,36 @@ config SUPERH
+ 	  gaming console.  The SuperH port has a home page at
+ 	  <http://www.linux-sh.org/>.
+ 
++config SUPERH32
++	def_bool !SUPERH64
++
++config SUPERH64
++	def_bool y if CPU_SH5
++
+ config RWSEM_GENERIC_SPINLOCK
+-	bool
+-	default y
++	def_bool y
+ 
+ config RWSEM_XCHGADD_ALGORITHM
+ 	bool
+ 
+ config GENERIC_BUG
+ 	def_bool y
+-	depends on BUG
++	depends on BUG && SUPERH32
+ 
+ config GENERIC_FIND_NEXT_BIT
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_HWEIGHT
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_HARDIRQS
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_IRQ_PROBE
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_CALIBRATE_DELAY
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_IOMAP
+ 	bool
+@@ -75,20 +74,16 @@ config ARCH_MAY_HAVE_PC_FDC
+ 	bool
+ 
+ config STACKTRACE_SUPPORT
+-	bool
+-	default y
++	def_bool y
+ 
+ config LOCKDEP_SUPPORT
+-	bool
+-	default y
++	def_bool y
+ 
+ config ARCH_HAS_ILOG2_U32
+-	bool
+-	default n
++	def_bool n
+ 
+ config ARCH_HAS_ILOG2_U64
+-	bool
+-	default n
++	def_bool n
+ 
+ config ARCH_NO_VIRT_TO_BUS
+ 	def_bool y
+@@ -97,110 +92,234 @@ source "init/Kconfig"
+ 
+ menu "System type"
+ 
+-source "arch/sh/mm/Kconfig"
++#
++# Processor families
++#
++config CPU_SH2
++	bool
+ 
+-menu "Processor features"
++config CPU_SH2A
++	bool
++	select CPU_SH2
++
++config CPU_SH3
++	bool
++	select CPU_HAS_INTEVT
++	select CPU_HAS_SR_RB
++
++config CPU_SH4
++	bool
++	select CPU_HAS_INTEVT
++	select CPU_HAS_SR_RB
++	select CPU_HAS_PTEA if !CPU_SH4A || CPU_SHX2
++	select CPU_HAS_FPU if !CPU_SH4AL_DSP
++
++config CPU_SH4A
++	bool
++	select CPU_SH4
++
++config CPU_SH4AL_DSP
++	bool
++	select CPU_SH4A
++	select CPU_HAS_DSP
++
++config CPU_SH5
++	bool
++	select CPU_HAS_FPU
++
++config CPU_SHX2
++	bool
++
++config CPU_SHX3
++	bool
+ 
+ choice
+-	prompt "Endianess selection" 
+-	default CPU_LITTLE_ENDIAN
+-	help
+-	  Some SuperH machines can be configured for either little or big
+-	  endian byte order. These modes require different kernels.
++	prompt "Processor sub-type selection"
+ 
+-config CPU_LITTLE_ENDIAN
+-	bool "Little Endian"
++#
++# Processor subtypes
++#
+ 
+-config CPU_BIG_ENDIAN
+-	bool "Big Endian"
++# SH-2 Processor Support
+ 
+-endchoice
++config CPU_SUBTYPE_SH7619
++	bool "Support SH7619 processor"
++	select CPU_SH2
++
++# SH-2A Processor Support
++
++config CPU_SUBTYPE_SH7203
++	bool "Support SH7203 processor"
++	select CPU_SH2A
++	select CPU_HAS_FPU
++
++config CPU_SUBTYPE_SH7206
++	bool "Support SH7206 processor"
++	select CPU_SH2A
+ 
+-config SH_FPU
+-	bool "FPU support"
+-	depends on CPU_HAS_FPU
+-	default y
++config CPU_SUBTYPE_SH7263
++	bool "Support SH7263 processor"
++	select CPU_SH2A
++	select CPU_HAS_FPU
++
++# SH-3 Processor Support
++
++config CPU_SUBTYPE_SH7705
++	bool "Support SH7705 processor"
++	select CPU_SH3
++
++config CPU_SUBTYPE_SH7706
++	bool "Support SH7706 processor"
++	select CPU_SH3
+ 	help
+-	  Selecting this option will enable support for SH processors that
+-	  have FPU units (ie, SH77xx).
++	  Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU.
+ 
+-	  This option must be set in order to enable the FPU.
++config CPU_SUBTYPE_SH7707
++	bool "Support SH7707 processor"
++	select CPU_SH3
++	help
++	  Select SH7707 if you have a  60 Mhz SH-3 HD6417707 CPU.
+ 
+-config SH_FPU_EMU
+-	bool "FPU emulation support"
+-	depends on !SH_FPU && EXPERIMENTAL
+-	default n
++config CPU_SUBTYPE_SH7708
++	bool "Support SH7708 processor"
++	select CPU_SH3
+ 	help
+-	  Selecting this option will enable support for software FPU emulation.
+-	  Most SH-3 users will want to say Y here, whereas most SH-4 users will
+-	  want to say N.
++	  Select SH7708 if you have a  60 Mhz SH-3 HD6417708S or
++	  if you have a 100 Mhz SH-3 HD6417708R CPU.
+ 
+-config SH_DSP
+-	bool "DSP support"
+-	depends on CPU_HAS_DSP
+-	default y
++config CPU_SUBTYPE_SH7709
++	bool "Support SH7709 processor"
++	select CPU_SH3
+ 	help
+-	  Selecting this option will enable support for SH processors that
+-	  have DSP units (ie, SH2-DSP, SH3-DSP, and SH4AL-DSP).
++	  Select SH7709 if you have a  80 Mhz SH-3 HD6417709 CPU.
+ 
+-	  This option must be set in order to enable the DSP.
++config CPU_SUBTYPE_SH7710
++	bool "Support SH7710 processor"
++	select CPU_SH3
++	select CPU_HAS_DSP
++	help
++	  Select SH7710 if you have a SH3-DSP SH7710 CPU.
+ 
+-config SH_ADC
+-	bool "ADC support"
+-	depends on CPU_SH3
+-	default y
++config CPU_SUBTYPE_SH7712
++	bool "Support SH7712 processor"
++	select CPU_SH3
++	select CPU_HAS_DSP
+ 	help
+-	  Selecting this option will allow the Linux kernel to use SH3 on-chip
+-	  ADC module.
++	  Select SH7712 if you have a SH3-DSP SH7712 CPU.
+ 
+-	  If unsure, say N.
++config CPU_SUBTYPE_SH7720
++	bool "Support SH7720 processor"
++	select CPU_SH3
++	select CPU_HAS_DSP
++	help
++	  Select SH7720 if you have a SH3-DSP SH7720 CPU.
+ 
+-config SH_STORE_QUEUES
+-	bool "Support for Store Queues"
+-	depends on CPU_SH4
++config CPU_SUBTYPE_SH7721
++	bool "Support SH7721 processor"
++	select CPU_SH3
++	select CPU_HAS_DSP
+ 	help
+-	  Selecting this option will enable an in-kernel API for manipulating
+-	  the store queues integrated in the SH-4 processors.
++	  Select SH7721 if you have a SH3-DSP SH7721 CPU.
+ 
+-config SPECULATIVE_EXECUTION
+-	bool "Speculative subroutine return"
+-	depends on CPU_SUBTYPE_SH7780 && EXPERIMENTAL
++# SH-4 Processor Support
++
++config CPU_SUBTYPE_SH7750
++	bool "Support SH7750 processor"
++	select CPU_SH4
+ 	help
+-	  This enables support for a speculative instruction fetch for
+-	  subroutine return. There are various pitfalls associated with
+-	  this, as outlined in the SH7780 hardware manual.
++	  Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
+ 
+-	  If unsure, say N.
++config CPU_SUBTYPE_SH7091
++	bool "Support SH7091 processor"
++	select CPU_SH4
++	help
++	  Select SH7091 if you have an SH-4 based Sega device (such as
++	  the Dreamcast, Naomi, and Naomi 2).
+ 
+-config CPU_HAS_INTEVT
+-	bool
++config CPU_SUBTYPE_SH7750R
++	bool "Support SH7750R processor"
++	select CPU_SH4
+ 
+-config CPU_HAS_MASKREG_IRQ
+-	bool
++config CPU_SUBTYPE_SH7750S
++	bool "Support SH7750S processor"
++	select CPU_SH4
+ 
+-config CPU_HAS_IPR_IRQ
+-	bool
++config CPU_SUBTYPE_SH7751
++	bool "Support SH7751 processor"
++	select CPU_SH4
++	help
++	  Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
++	  or if you have a HD6417751R CPU.
+ 
+-config CPU_HAS_SR_RB
+-	bool
++config CPU_SUBTYPE_SH7751R
++	bool "Support SH7751R processor"
++	select CPU_SH4
++
++config CPU_SUBTYPE_SH7760
++	bool "Support SH7760 processor"
++	select CPU_SH4
++
++config CPU_SUBTYPE_SH4_202
++	bool "Support SH4-202 processor"
++	select CPU_SH4
++
++# SH-4A Processor Support
++
++config CPU_SUBTYPE_SH7763
++	bool "Support SH7763 processor"
++	select CPU_SH4A
+ 	help
+-	  This will enable the use of SR.RB register bank usage. Processors
+-	  that are lacking this bit must have another method in place for
+-	  accomplishing what is taken care of by the banked registers.
++	  Select SH7763 if you have a SH4A SH7763(R5S77631) CPU.
+ 
+-	  See <file:Documentation/sh/register-banks.txt> for further
+-	  information on SR.RB and register banking in the kernel in general.
++config CPU_SUBTYPE_SH7770
++	bool "Support SH7770 processor"
++	select CPU_SH4A
+ 
+-config CPU_HAS_PTEA
+-	bool
++config CPU_SUBTYPE_SH7780
++	bool "Support SH7780 processor"
++	select CPU_SH4A
+ 
+-config CPU_HAS_DSP
+-	bool
++config CPU_SUBTYPE_SH7785
++	bool "Support SH7785 processor"
++	select CPU_SH4A
++	select CPU_SHX2
++	select ARCH_SPARSEMEM_ENABLE
++	select SYS_SUPPORTS_NUMA
+ 
+-config CPU_HAS_FPU
+-	bool
++config CPU_SUBTYPE_SHX3
++	bool "Support SH-X3 processor"
++	select CPU_SH4A
++	select CPU_SHX3
++	select ARCH_SPARSEMEM_ENABLE
++	select SYS_SUPPORTS_NUMA
++	select SYS_SUPPORTS_SMP
+ 
+-endmenu
++# SH4AL-DSP Processor Support
++
++config CPU_SUBTYPE_SH7343
++	bool "Support SH7343 processor"
++	select CPU_SH4AL_DSP
++
++config CPU_SUBTYPE_SH7722
++	bool "Support SH7722 processor"
++	select CPU_SH4AL_DSP
++	select CPU_SHX2
++	select ARCH_SPARSEMEM_ENABLE
++	select SYS_SUPPORTS_NUMA
++
++# SH-5 Processor Support
++
++config CPU_SUBTYPE_SH5_101
++	bool "Support SH5-101 processor"
++	select CPU_SH5
++
++config CPU_SUBTYPE_SH5_103
++	bool "Support SH5-103 processor"
++
++endchoice
++
++source "arch/sh/mm/Kconfig"
++source "arch/sh/Kconfig.cpu"
+ 
+ menu "Board support"
+ 
+@@ -321,13 +440,6 @@ config SH_SECUREEDGE5410
+ 	  This includes both the OEM SecureEdge products as well as the
+ 	  SME product line.
+ 
+-config SH_HS7751RVOIP
+-	bool "HS7751RVOIP"
+-	depends on CPU_SUBTYPE_SH7751R
+-	help
+-	  Select HS7751RVOIP if configuring for a Renesas Technology
+-	  Sales VoIP board.
+-
+ config SH_7710VOIPGW
+ 	bool "SH7710-VOIP-GW"
+ 	depends on CPU_SUBTYPE_SH7710
+@@ -343,6 +455,14 @@ config SH_RTS7751R2D
+ 	  Select RTS7751R2D if configuring for a Renesas Technology
+ 	  Sales SH-Graphics board.
+ 
++config SH_SDK7780
++	bool "SDK7780R3"
++	depends on CPU_SUBTYPE_SH7780
++	select SYS_SUPPORTS_PCI
++	help
++	  Select SDK7780 if configuring for a Renesas SH7780 SDK7780R3
++	  evaluation board.
++
+ config SH_HIGHLANDER
+ 	bool "Highlander"
+ 	depends on CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
+@@ -399,41 +519,47 @@ config SH_MAGIC_PANEL_R2
+ 	help
+ 	  Select Magic Panel R2 if configuring for Magic Panel R2.
+ 
++config SH_CAYMAN
++	bool "Hitachi Cayman"
++	depends on CPU_SUBTYPE_SH5_101 || CPU_SUBTYPE_SH5_103
++	select SYS_SUPPORTS_PCI
++
+ endmenu
+ 
+-source "arch/sh/boards/renesas/hs7751rvoip/Kconfig"
+ source "arch/sh/boards/renesas/rts7751r2d/Kconfig"
+ source "arch/sh/boards/renesas/r7780rp/Kconfig"
++source "arch/sh/boards/renesas/sdk7780/Kconfig"
+ source "arch/sh/boards/magicpanelr2/Kconfig"
+ 
+ menu "Timer and clock configuration"
+ 
+ config SH_TMU
+-	bool "TMU timer support"
++	def_bool y
++	prompt "TMU timer support"
+ 	depends on CPU_SH3 || CPU_SH4
+ 	select GENERIC_TIME
+ 	select GENERIC_CLOCKEVENTS
+-	default y
+ 	help
+ 	  This enables the use of the TMU as the system timer.
+ 
+ config SH_CMT
+-	bool "CMT timer support"
++	def_bool y
++	prompt "CMT timer support"
+ 	depends on CPU_SH2
+-	default y
+ 	help
+ 	  This enables the use of the CMT as the system timer.
+ 
+ config SH_MTU2
+-	bool "MTU2 timer support"
++	def_bool n
++	prompt "MTU2 timer support"
+ 	depends on CPU_SH2A
+-	default n
+ 	help
+ 	  This enables the use of the MTU2 as the system timer.
+ 
+ config SH_TIMER_IRQ
+ 	int
+-	default "28" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
++	default "28" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \
++			CPU_SUBTYPE_SH7763
+ 	default "86" if CPU_SUBTYPE_SH7619
+ 	default "140" if CPU_SUBTYPE_SH7206
+ 	default "16"
+@@ -445,7 +571,8 @@ config SH_PCLK_FREQ
+ 	default "32000000" if CPU_SUBTYPE_SH7722
+ 	default "33333333" if CPU_SUBTYPE_SH7770 || \
+ 			      CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \
+-			      CPU_SUBTYPE_SH7206
++			      CPU_SUBTYPE_SH7203 || CPU_SUBTYPE_SH7206 || \
++			      CPU_SUBTYPE_SH7263
+ 	default "60000000" if CPU_SUBTYPE_SH7751 || CPU_SUBTYPE_SH7751R
+ 	default "66000000" if CPU_SUBTYPE_SH4_202
+ 	default "50000000"
+@@ -456,7 +583,7 @@ config SH_PCLK_FREQ
+ 
+ config SH_CLK_MD
+ 	int "CPU Mode Pin Setting"
+-	depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206
++	depends on CPU_SH2
+ 	default 6 if CPU_SUBTYPE_SH7206
+ 	default 5 if CPU_SUBTYPE_SH7619
+ 	default 0
+@@ -490,9 +617,8 @@ source "arch/sh/drivers/Kconfig"
+ endmenu
+ 
+ config ISA_DMA_API
+-	bool
++	def_bool y
+ 	depends on SH_MPC1211
+-	default y
+ 
+ menu "Kernel features"
+ 
+@@ -570,7 +696,7 @@ source "kernel/Kconfig.preempt"
+ 
+ config GUSA
+ 	def_bool y
+-	depends on !SMP
++	depends on !SMP && SUPERH32
+ 	help
+ 	  This enables support for gUSA (general UserSpace Atomicity).
+ 	  This is the default implementation for both UP and non-ll/sc
+@@ -582,6 +708,16 @@ config GUSA
+ 	  This should only be disabled for special cases where alternate
+ 	  atomicity implementations exist.
+ 
++config GUSA_RB
++	bool "Implement atomic operations by roll-back (gRB) (EXPERIMENTAL)"
++	depends on GUSA && CPU_SH3 || (CPU_SH4 && !CPU_SH4A)
++	help
++	  Enabling this option will allow the kernel to implement some
++	  atomic operations using a software implemention of load-locked/
++	  store-conditional (LLSC). On machines which do not have hardware
++	  LLSC, this should be more efficient than the other alternative of
++	  disabling insterrupts around the atomic sequence.
++
+ endmenu
+ 
+ menu "Boot options"
+diff --git a/arch/sh/Kconfig.cpu b/arch/sh/Kconfig.cpu
+new file mode 100644
+index 0000000..d850184
+--- /dev/null
++++ b/arch/sh/Kconfig.cpu
+@@ -0,0 +1,115 @@
++menu "Processor features"
++
++choice
++	prompt "Endianess selection" 
++	default CPU_LITTLE_ENDIAN
++	help
++	  Some SuperH machines can be configured for either little or big
++	  endian byte order. These modes require different kernels.
++
++config CPU_LITTLE_ENDIAN
++	bool "Little Endian"
++
++config CPU_BIG_ENDIAN
++	bool "Big Endian"
++
++endchoice
++
++config SH_FPU
++	def_bool y
++	prompt "FPU support"
++	depends on CPU_HAS_FPU
++	help
++	  Selecting this option will enable support for SH processors that
++	  have FPU units (ie, SH77xx).
++
++	  This option must be set in order to enable the FPU.
++
++config SH64_FPU_DENORM_FLUSH
++	bool "Flush floating point denorms to zero"
++	depends on SH_FPU && SUPERH64
++
++config SH_FPU_EMU
++	def_bool n
++	prompt "FPU emulation support"
++	depends on !SH_FPU && EXPERIMENTAL
++	help
++	  Selecting this option will enable support for software FPU emulation.
++	  Most SH-3 users will want to say Y here, whereas most SH-4 users will
++	  want to say N.
++
++config SH_DSP
++	def_bool y
++	prompt "DSP support"
++	depends on CPU_HAS_DSP
++	help
++	  Selecting this option will enable support for SH processors that
++	  have DSP units (ie, SH2-DSP, SH3-DSP, and SH4AL-DSP).
++
++	  This option must be set in order to enable the DSP.
++
++config SH_ADC
++	def_bool y
++	prompt "ADC support"
++	depends on CPU_SH3
++	help
++	  Selecting this option will allow the Linux kernel to use SH3 on-chip
++	  ADC module.
++
++	  If unsure, say N.
++
++config SH_STORE_QUEUES
++	bool "Support for Store Queues"
++	depends on CPU_SH4
++	help
++	  Selecting this option will enable an in-kernel API for manipulating
++	  the store queues integrated in the SH-4 processors.
++
++config SPECULATIVE_EXECUTION
++	bool "Speculative subroutine return"
++	depends on CPU_SUBTYPE_SH7780 && EXPERIMENTAL
++	help
++	  This enables support for a speculative instruction fetch for
++	  subroutine return. There are various pitfalls associated with
++	  this, as outlined in the SH7780 hardware manual.
++
++	  If unsure, say N.
++
++config SH64_USER_MISALIGNED_FIXUP
++	def_bool y
++	prompt "Fixup misaligned loads/stores occurring in user mode"
++	depends on SUPERH64
++
++config SH64_ID2815_WORKAROUND
++	bool "Include workaround for SH5-101 cut2 silicon defect ID2815"
++	depends on CPU_SUBTYPE_SH5_101
++
++config CPU_HAS_INTEVT
++	bool
++
++config CPU_HAS_MASKREG_IRQ
++	bool
++
++config CPU_HAS_IPR_IRQ
++	bool
++
++config CPU_HAS_SR_RB
++	bool
++	help
++	  This will enable the use of SR.RB register bank usage. Processors
++	  that are lacking this bit must have another method in place for
++	  accomplishing what is taken care of by the banked registers.
++
++	  See <file:Documentation/sh/register-banks.txt> for further
++	  information on SR.RB and register banking in the kernel in general.
++
++config CPU_HAS_PTEA
++	bool
++
++config CPU_HAS_DSP
++	bool
++
++config CPU_HAS_FPU
++	bool
++
++endmenu
+diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
+index 722da68..f7c7161 100644
+--- a/arch/sh/Kconfig.debug
++++ b/arch/sh/Kconfig.debug
+@@ -1,8 +1,7 @@
+ menu "Kernel hacking"
+ 
+ config TRACE_IRQFLAGS_SUPPORT
+-	bool
+-	default y
++	def_bool y
+ 
+ source "lib/Kconfig.debug"
+ 
+@@ -30,12 +29,13 @@ config EARLY_SCIF_CONSOLE
+ config EARLY_SCIF_CONSOLE_PORT
+ 	hex
+ 	depends on EARLY_SCIF_CONSOLE
+-	default "0xffe00000" if CPU_SUBTYPE_SH7780
++ 	default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763
+ 	default "0xffea0000" if CPU_SUBTYPE_SH7785
+-	default "0xfffe9800" if CPU_SUBTYPE_SH7206
++	default "0xfffe8000" if CPU_SUBTYPE_SH7203
++	default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263
+ 	default "0xf8420000" if CPU_SUBTYPE_SH7619
+ 	default "0xa4400000" if CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7705
+-	default "0xa4430000" if CPU_SUBTYPE_SH7720
++	default "0xa4430000" if CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721
+ 	default "0xffc30000" if CPU_SUBTYPE_SHX3
+ 	default "0xffe80000" if CPU_SH4
+ 	default "0x00000000"
+@@ -62,7 +62,7 @@ config DEBUG_BOOTMEM
+ 
+ config DEBUG_STACKOVERFLOW
+ 	bool "Check for stack overflows"
+-	depends on DEBUG_KERNEL
++	depends on DEBUG_KERNEL && SUPERH32
+ 	help
+ 	  This option will cause messages to be printed if free stack space
+ 	  drops below a certain limit.
+@@ -88,7 +88,7 @@ config 4KSTACKS
+ 
+ config IRQSTACKS
+ 	bool "Use separate kernel stacks when processing interrupts"
+-	depends on DEBUG_KERNEL
++	depends on DEBUG_KERNEL && SUPERH32
+ 	help
+ 	  If you say Y here the kernel will use separate kernel stacks
+ 	  for handling hard and soft interrupts.  This can help avoid
+@@ -119,19 +119,19 @@ config COMPILE_OPTIONS
+ 	depends on MORE_COMPILE_OPTIONS
+ 
+ config KGDB_NMI
+-	bool "Enter KGDB on NMI"
+-	default n
++	def_bool n
++	prompt "Enter KGDB on NMI"
+ 
+ config SH_KGDB_CONSOLE
+-	bool "Console messages through GDB"
++	def_bool n
++	prompt "Console messages through GDB"
+ 	depends on !SERIAL_SH_SCI_CONSOLE && SERIAL_SH_SCI=y
+ 	select SERIAL_CORE_CONSOLE
+-	default n
+ 
+ config KGDB_SYSRQ
+-	bool "Allow SysRq 'G' to enter KGDB"
++	def_bool y
++	prompt "Allow SysRq 'G' to enter KGDB"
+ 	depends on MAGIC_SYSRQ
+-	default y
+ 
+ comment "Serial port setup"
+ 
+@@ -174,4 +174,29 @@ endchoice
+ 
+ endmenu
+ 
++if SUPERH64
++
++config SH64_PROC_ASIDS
++	bool "Debug: report ASIDs through /proc/asids"
++	depends on PROC_FS
++
++config SH64_SR_WATCH
++	bool "Debug: set SR.WATCH to enable hardware watchpoints and trace"
++
++config POOR_MANS_STRACE
++	bool "Debug: enable rudimentary strace facility"
++	help
++	  This option allows system calls to be traced to the console.  It also
++	  aids in detecting kernel stack underflow.  It is useful for debugging
++	  early-userland problems (e.g. init incurring fatal exceptions.)
++
++config SH_ALPHANUMERIC
++	bool "Enable debug outputs to on-board alphanumeric display"
++	depends on SH_CAYMAN
++
++config SH_NO_BSS_INIT
++	bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)"
++
++endif
++
+ endmenu
+diff --git a/arch/sh/Makefile b/arch/sh/Makefile
+index e189fae..17fc361 100644
+--- a/arch/sh/Makefile
++++ b/arch/sh/Makefile
+@@ -1,17 +1,13 @@
+-# $Id: Makefile,v 1.35 2004/04/15 03:39:20 sugioka Exp $
+ #
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
++# arch/sh/Makefile
+ #
+ # Copyright (C) 1999  Kaz Kojima
+ # Copyright (C) 2002, 2003, 2004  Paul Mundt
+ # Copyright (C) 2002  M. R. Brown
+ #
+-# This file is included by the global makefile so that you can add your own
+-# architecture-specific flags and dependencies. Remember to do have actions
+-# for "archclean" and "archdep" for cleaning up and making dependencies for
+-# this architecture
++# This file is subject to the terms and conditions of the GNU General Public
++# License.  See the file "COPYING" in the main directory of this archive
++# for more details.
+ #
+ isa-y					:= any
+ isa-$(CONFIG_SH_DSP)			:= sh
+@@ -21,13 +17,9 @@ isa-$(CONFIG_CPU_SH3)			:= sh3
+ isa-$(CONFIG_CPU_SH4)			:= sh4
+ isa-$(CONFIG_CPU_SH4A)			:= sh4a
+ isa-$(CONFIG_CPU_SH4AL_DSP)		:= sh4al
+-
++isa-$(CONFIG_CPU_SH5)			:= shmedia
+ isa-$(CONFIG_SH_DSP)			:= $(isa-y)-dsp
+ 
+-ifndef CONFIG_MMU
+-isa-y			:= $(isa-y)-nommu
+-endif
+-
+ ifndef CONFIG_SH_DSP
+ ifndef CONFIG_SH_FPU
+ isa-y			:= $(isa-y)-nofpu
+@@ -44,6 +36,7 @@ cflags-$(CONFIG_CPU_SH4)		:= $(call cc-option,-m4,) \
+ 	$(call cc-option,-mno-implicit-fp,-m4-nofpu)
+ cflags-$(CONFIG_CPU_SH4A)		+= $(call cc-option,-m4a,) \
+ 					   $(call cc-option,-m4a-nofpu,)
++cflags-$(CONFIG_CPU_SH5)		:= $(call cc-option,-m5-32media-nofpu,)
+ 
+ cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= -mb
+ cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -ml
+@@ -66,22 +59,27 @@ cflags-y	+= $(isaflags-y) -ffreestanding
+ cflags-$(CONFIG_MORE_COMPILE_OPTIONS)	+= \
+ 	$(shell echo $(CONFIG_COMPILE_OPTIONS) | sed -e 's/"//g')
+ 
+-OBJCOPYFLAGS	:= -O binary -R .note -R .note.gnu.build-id -R .comment -R .stab -R .stabstr -S
++OBJCOPYFLAGS	:= -O binary -R .note -R .note.gnu.build-id -R .comment \
++		   -R .stab -R .stabstr -S
+ 
+-#
+-# arch/sh/defconfig doesn't reflect any real hardware, and as such should
+-# never be used by anyone. Use a board-specific defconfig that has a
+-# reasonable chance of being current instead.
+-#
+-KBUILD_DEFCONFIG := r7780rp_defconfig
++# Give the various platforms the opportunity to set default image types
++defaultimage-$(CONFIG_SUPERH32)	:= zImage
+ 
+-KBUILD_IMAGE	:= arch/sh/boot/zImage
++# Set some sensible Kbuild defaults
++KBUILD_DEFCONFIG	:= r7780mp_defconfig
++KBUILD_IMAGE		:= $(defaultimage-y)
+ 
+ #
+ # Choosing incompatible machines durings configuration will result in
+ # error messages during linking.
+ #
+-LDFLAGS_vmlinux     += -e _stext
++ifdef CONFIG_SUPERH32
++LDFLAGS_vmlinux	+= -e _stext
++else
++LDFLAGS_vmlinux	+= --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
++		   --defsym phys_stext_shmedia=phys_stext+1 \
++		   -e phys_stext_shmedia
++endif
+ 
+ ifdef CONFIG_CPU_LITTLE_ENDIAN
+ LDFLAGS_vmlinux		+= --defsym 'jiffies=jiffies_64'
+@@ -94,7 +92,9 @@ endif
+ KBUILD_CFLAGS		+= -pipe $(cflags-y)
+ KBUILD_AFLAGS		+= $(cflags-y)
+ 
+-head-y := arch/sh/kernel/head.o arch/sh/kernel/init_task.o
++head-y			:= arch/sh/kernel/init_task.o
++head-$(CONFIG_SUPERH32)	+= arch/sh/kernel/head_32.o
++head-$(CONFIG_SUPERH64)	+= arch/sh/kernel/head_64.o
+ 
+ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+ 
+@@ -112,11 +112,11 @@ machdir-$(CONFIG_SH_DREAMCAST)			+= dreamcast
+ machdir-$(CONFIG_SH_MPC1211)			+= mpc1211
+ machdir-$(CONFIG_SH_SH03)			+= sh03
+ machdir-$(CONFIG_SH_SECUREEDGE5410)		+= snapgear
+-machdir-$(CONFIG_SH_HS7751RVOIP)		+= renesas/hs7751rvoip
+ machdir-$(CONFIG_SH_RTS7751R2D)			+= renesas/rts7751r2d
+ machdir-$(CONFIG_SH_7751_SYSTEMH)		+= renesas/systemh
+ machdir-$(CONFIG_SH_EDOSK7705)			+= renesas/edosk7705
+ machdir-$(CONFIG_SH_HIGHLANDER)			+= renesas/r7780rp
++machdir-$(CONFIG_SH_SDK7780)			+= renesas/sdk7780
+ machdir-$(CONFIG_SH_7710VOIPGW)			+= renesas/sh7710voipgw
+ machdir-$(CONFIG_SH_X3PROTO)			+= renesas/x3proto
+ machdir-$(CONFIG_SH_SH4202_MICRODEV)		+= superh/microdev
+@@ -127,6 +127,7 @@ machdir-$(CONFIG_SH_7206_SOLUTION_ENGINE)	+= se/7206
+ machdir-$(CONFIG_SH_7619_SOLUTION_ENGINE)	+= se/7619
+ machdir-$(CONFIG_SH_LBOX_RE2)			+= lboxre2
+ machdir-$(CONFIG_SH_MAGIC_PANEL_R2)		+= magicpanelr2
++machdir-$(CONFIG_SH_CAYMAN)			+= cayman
+ 
+ incdir-y	:= $(notdir $(machdir-y))
+ 
+@@ -137,22 +138,22 @@ endif
+ 
+ # Companion chips
+ core-$(CONFIG_HD6446X_SERIES)	+= arch/sh/cchips/hd6446x/
+-core-$(CONFIG_MFD_SM501)	+= arch/sh/cchips/voyagergx/
+ 
+ cpuincdir-$(CONFIG_CPU_SH2)	:= cpu-sh2
+ cpuincdir-$(CONFIG_CPU_SH2A)	:= cpu-sh2a
+ cpuincdir-$(CONFIG_CPU_SH3)	:= cpu-sh3
+ cpuincdir-$(CONFIG_CPU_SH4)	:= cpu-sh4
++cpuincdir-$(CONFIG_CPU_SH5)	:= cpu-sh5
+ 
+-libs-y				:= arch/sh/lib/	$(libs-y) $(LIBGCC)
++libs-$(CONFIG_SUPERH32)		:= arch/sh/lib/	$(libs-y)
++libs-$(CONFIG_SUPERH64)		:= arch/sh/lib64/ $(libs-y)
++libs-y				+= $(LIBGCC)
+ 
+ drivers-y			+= arch/sh/drivers/
+ drivers-$(CONFIG_OPROFILE)	+= arch/sh/oprofile/
+ 
+ boot := arch/sh/boot
+ 
+-CPPFLAGS_vmlinux.lds := -traditional
+-
+ incdir-prefix	:= $(srctree)/include/asm-sh/
+ 
+ #	Update machine arch and proc symlinks if something which affects
+@@ -196,29 +197,61 @@ include/asm-sh/.mach: $(wildcard include/config/sh/*.h) \
+ 	done
+ 	@touch $@
+ 
+-archprepare: include/asm-sh/.cpu include/asm-sh/.mach maketools
+-
+ PHONY += maketools FORCE
++
+ maketools:  include/linux/version.h FORCE
+ 	$(Q)$(MAKE) $(build)=arch/sh/tools include/asm-sh/machtypes.h
+ 
+-all: zImage
++all: $(KBUILD_IMAGE)
+ 
+ zImage uImage uImage.srec vmlinux.srec: vmlinux
+ 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ 
+ compressed: zImage
+ 
++archprepare: include/asm-sh/.cpu include/asm-sh/.mach maketools \
++	     arch/sh/lib64/syscalltab.h
++
+ archclean:
+ 	$(Q)$(MAKE) $(clean)=$(boot)
+ 
+-CLEAN_FILES += include/asm-sh/machtypes.h \
+-	       include/asm-sh/cpu include/asm-sh/.cpu \
+-	       include/asm-sh/mach include/asm-sh/.mach
+-
+ define archhelp
+ 	@echo '* zImage 	           - Compressed kernel image'
+ 	@echo '  vmlinux.srec	           - Create an ELF S-record'
+ 	@echo '  uImage  	           - Create a bootable image for U-Boot'
+ 	@echo '  uImage.srec  	           - Create an S-record for U-Boot'
+ endef
++
++define filechk_gen-syscalltab
++       (set -e; \
++	echo "/*"; \
++	echo " * DO NOT MODIFY."; \
++	echo " *"; \
++	echo " * This file was generated by arch/sh/Makefile"; \
++	echo " * Any changes will be reverted at build time."; \
++	echo " */"; \
++	echo ""; \
++	echo "#ifndef __SYSCALLTAB_H"; \
++	echo "#define __SYSCALLTAB_H"; \
++	echo ""; \
++	echo "#include <linux/kernel.h>"; \
++	echo ""; \
++	echo "struct syscall_info {"; \
++	echo "	const char *name;"; \
++	echo "} syscall_info_table[] = {"; \
++	sed -e '/^.*\.long /!d;s//	{ "/;s/\(\([^/]*\)\/\)\{1\}.*/\2/; \
++		s/[ \t]*$$//g;s/$$/" },/;s/\("\)sys_/\1/g'; \
++	echo "};"; \
++	echo ""; \
++	echo "#define NUM_SYSCALL_INFO_ENTRIES ARRAY_SIZE(syscall_info_table)";\
++	echo ""; \
++	echo "#endif /* __SYSCALLTAB_H */" )
++endef
++
++arch/sh/lib64/syscalltab.h: arch/sh/kernel/syscalls_64.S
++	$(call filechk,gen-syscalltab)
++
++CLEAN_FILES += arch/sh/lib64/syscalltab.h \
++	       include/asm-sh/machtypes.h \
++	       include/asm-sh/cpu include/asm-sh/.cpu \
++	       include/asm-sh/mach include/asm-sh/.mach
+diff --git a/arch/sh/boards/cayman/Makefile b/arch/sh/boards/cayman/Makefile
+new file mode 100644
+index 0000000..489a8f8
+--- /dev/null
++++ b/arch/sh/boards/cayman/Makefile
+@@ -0,0 +1,5 @@
++#
++# Makefile for the Hitachi Cayman specific parts of the kernel
++#
++obj-y := setup.o irq.o
++obj-$(CONFIG_HEARTBEAT)	+= led.o
+diff --git a/arch/sh/boards/cayman/irq.c b/arch/sh/boards/cayman/irq.c
+new file mode 100644
+index 0000000..30ec7be
+--- /dev/null
++++ b/arch/sh/boards/cayman/irq.c
+@@ -0,0 +1,197 @@
++/*
++ * arch/sh/mach-cayman/irq.c - SH-5 Cayman Interrupt Support
++ *
++ * This file handles the board specific parts of the Cayman interrupt system
++ *
++ * Copyright (C) 2002 Stuart Menefy
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/signal.h>
++#include <asm/cpu/irq.h>
++#include <asm/page.h>
++
++/* Setup for the SMSC FDC37C935 / LAN91C100FD */
++#define SMSC_IRQ         IRQ_IRL1
++
++/* Setup for PCI Bus 2, which transmits interrupts via the EPLD */
++#define PCI2_IRQ         IRQ_IRL3
++
++unsigned long epld_virt;
++
++#define EPLD_BASE        0x04002000
++#define EPLD_STATUS_BASE (epld_virt + 0x10)
++#define EPLD_MASK_BASE   (epld_virt + 0x20)
++
++/* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto
++   the same SH-5 interrupt */
++
++static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id)
++{
++        printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n");
++	return IRQ_NONE;
++}
++
++static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id)
++{
++        printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq);
++	return IRQ_NONE;
++}
++
++static struct irqaction cayman_action_smsc = {
++	.name		= "Cayman SMSC Mux",
++	.handler	= cayman_interrupt_smsc,
++	.flags		= IRQF_DISABLED,
++};
++
++static struct irqaction cayman_action_pci2 = {
++	.name		= "Cayman PCI2 Mux",
++	.handler	= cayman_interrupt_pci2,
++	.flags		= IRQF_DISABLED,
++};
++
++static void enable_cayman_irq(unsigned int irq)
++{
++	unsigned long flags;
++	unsigned long mask;
++	unsigned int reg;
++	unsigned char bit;
++
++	irq -= START_EXT_IRQS;
++	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
++	bit = 1<<(irq % 8);
++	local_irq_save(flags);
++	mask = ctrl_inl(reg);
++	mask |= bit;
++	ctrl_outl(mask, reg);
++	local_irq_restore(flags);
++}
++
++void disable_cayman_irq(unsigned int irq)
++{
++	unsigned long flags;
++	unsigned long mask;
++	unsigned int reg;
++	unsigned char bit;
++
++	irq -= START_EXT_IRQS;
++	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
++	bit = 1<<(irq % 8);
++	local_irq_save(flags);
++	mask = ctrl_inl(reg);
++	mask &= ~bit;
++	ctrl_outl(mask, reg);
++	local_irq_restore(flags);
++}
++
++static void ack_cayman_irq(unsigned int irq)
++{
++	disable_cayman_irq(irq);
++}
++
++static void end_cayman_irq(unsigned int irq)
++{
++	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
++		enable_cayman_irq(irq);
++}
++
++static unsigned int startup_cayman_irq(unsigned int irq)
++{
++	enable_cayman_irq(irq);
++	return 0; /* never anything pending */
++}
++
++static void shutdown_cayman_irq(unsigned int irq)
++{
++	disable_cayman_irq(irq);
++}
++
++struct hw_interrupt_type cayman_irq_type = {
++	.typename	= "Cayman-IRQ",
++	.startup	= startup_cayman_irq,
++	.shutdown	= shutdown_cayman_irq,
++	.enable		= enable_cayman_irq,
++	.disable	= disable_cayman_irq,
++	.ack		= ack_cayman_irq,
++	.end		= end_cayman_irq,
++};
++
++int cayman_irq_demux(int evt)
++{
++	int irq = intc_evt_to_irq[evt];
++
++	if (irq == SMSC_IRQ) {
++		unsigned long status;
++		int i;
++
++		status = ctrl_inl(EPLD_STATUS_BASE) &
++			 ctrl_inl(EPLD_MASK_BASE) & 0xff;
++		if (status == 0) {
++			irq = -1;
++		} else {
++			for (i=0; i<8; i++) {
++				if (status & (1<<i))
++					break;
++			}
++			irq = START_EXT_IRQS + i;
++		}
++	}
++
++	if (irq == PCI2_IRQ) {
++		unsigned long status;
++		int i;
++
++		status = ctrl_inl(EPLD_STATUS_BASE + 3 * sizeof(u32)) &
++			 ctrl_inl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff;
++		if (status == 0) {
++			irq = -1;
++		} else {
++			for (i=0; i<8; i++) {
++				if (status & (1<<i))
++					break;
++			}
++			irq = START_EXT_IRQS + (3 * 8) + i;
++		}
++	}
++
++	return irq;
++}
++
++#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
++int cayman_irq_describe(char* p, int irq)
++{
++	if (irq < NR_INTC_IRQS) {
++		return intc_irq_describe(p, irq);
++	} else if (irq < NR_INTC_IRQS + 8) {
++		return sprintf(p, "(SMSC %d)", irq - NR_INTC_IRQS);
++	} else if ((irq >= NR_INTC_IRQS + 24) && (irq < NR_INTC_IRQS + 32)) {
++		return sprintf(p, "(PCI2 %d)", irq - (NR_INTC_IRQS + 24));
++	}
++
++	return 0;
++}
++#endif
++
++void init_cayman_irq(void)
++{
++	int i;
++
++	epld_virt = onchip_remap(EPLD_BASE, 1024, "EPLD");
++	if (!epld_virt) {
++		printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n");
++		return;
++	}
++
++	for (i=0; i<NR_EXT_IRQS; i++) {
++		irq_desc[START_EXT_IRQS + i].chip = &cayman_irq_type;
++	}
++
++	/* Setup the SMSC interrupt */
++	setup_irq(SMSC_IRQ, &cayman_action_smsc);
++	setup_irq(PCI2_IRQ, &cayman_action_pci2);
++}
+diff --git a/arch/sh/boards/cayman/led.c b/arch/sh/boards/cayman/led.c
+new file mode 100644
+index 0000000..a808eac
+--- /dev/null
++++ b/arch/sh/boards/cayman/led.c
+@@ -0,0 +1,51 @@
++/*
++ * arch/sh/boards/cayman/led.c
++ *
++ * Copyright (C) 2002 Stuart Menefy <stuart.menefy at st.com>
++ *
++ * May be copied or modified under the terms of the GNU General Public
++ * License.  See linux/COPYING for more information.
++ *
++ * Flash the LEDs
++ */
++#include <asm/io.h>
++
++/*
++** It is supposed these functions to be used for a low level
++** debugging (via Cayman LEDs), hence to be available as soon
++** as possible.
++** Unfortunately Cayman LEDs relies on Cayman EPLD to be mapped
++** (this happen when IRQ are initialized... quite late).
++** These triky dependencies should be removed. Temporary, it
++** may be enough to NOP until EPLD is mapped.
++*/
++
++extern unsigned long epld_virt;
++
++#define LED_ADDR      (epld_virt + 0x008)
++#define HDSP2534_ADDR (epld_virt + 0x100)
++
++void mach_led(int position, int value)
++{
++	if (!epld_virt)
++		return;
++
++	if (value)
++		ctrl_outl(0, LED_ADDR);
++	else
++		ctrl_outl(1, LED_ADDR);
++
++}
++
++void mach_alphanum(int position, unsigned char value)
++{
++	if (!epld_virt)
++		return;
++
++	ctrl_outb(value, HDSP2534_ADDR + 0xe0 + (position << 2));
++}
++
++void mach_alphanum_brightness(int setting)
++{
++	ctrl_outb(setting & 7, HDSP2534_ADDR + 0xc0);
++}
+diff --git a/arch/sh/boards/cayman/setup.c b/arch/sh/boards/cayman/setup.c
+new file mode 100644
+index 0000000..8c9fa47
+--- /dev/null
++++ b/arch/sh/boards/cayman/setup.c
+@@ -0,0 +1,187 @@
++/*
++ * arch/sh/mach-cayman/setup.c
++ *
++ * SH5 Cayman support
++ *
++ * Copyright (C) 2002  David J. Mckay & Benedict Gaster
++ * Copyright (C) 2003 - 2007  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <asm/cpu/irq.h>
++
++/*
++ * Platform Dependent Interrupt Priorities.
++ */
++
++/* Using defaults defined in irq.h */
++#define	RES NO_PRIORITY		/* Disabled */
++#define IR0 IRL0_PRIORITY	/* IRLs */
++#define IR1 IRL1_PRIORITY
++#define IR2 IRL2_PRIORITY
++#define IR3 IRL3_PRIORITY
++#define PCA INTA_PRIORITY	/* PCI Ints */
++#define PCB INTB_PRIORITY
++#define PCC INTC_PRIORITY
++#define PCD INTD_PRIORITY
++#define SER TOP_PRIORITY
++#define ERR TOP_PRIORITY
++#define PW0 TOP_PRIORITY
++#define PW1 TOP_PRIORITY
++#define PW2 TOP_PRIORITY
++#define PW3 TOP_PRIORITY
++#define DM0 NO_PRIORITY		/* DMA Ints */
++#define DM1 NO_PRIORITY
++#define DM2 NO_PRIORITY
++#define DM3 NO_PRIORITY
++#define DAE NO_PRIORITY
++#define TU0 TIMER_PRIORITY	/* TMU Ints */
++#define TU1 NO_PRIORITY
++#define TU2 NO_PRIORITY
++#define TI2 NO_PRIORITY
++#define ATI NO_PRIORITY		/* RTC Ints */
++#define PRI NO_PRIORITY
++#define CUI RTC_PRIORITY
++#define ERI SCIF_PRIORITY	/* SCIF Ints */
++#define RXI SCIF_PRIORITY
++#define BRI SCIF_PRIORITY
++#define TXI SCIF_PRIORITY
++#define ITI TOP_PRIORITY	/* WDT Ints */
++
++/* Setup for the SMSC FDC37C935 */
++#define SMSC_SUPERIO_BASE	0x04000000
++#define SMSC_CONFIG_PORT_ADDR	0x3f0
++#define SMSC_INDEX_PORT_ADDR	SMSC_CONFIG_PORT_ADDR
++#define SMSC_DATA_PORT_ADDR	0x3f1
++
++#define SMSC_ENTER_CONFIG_KEY	0x55
++#define SMSC_EXIT_CONFIG_KEY	0xaa
++
++#define SMCS_LOGICAL_DEV_INDEX	0x07
++#define SMSC_DEVICE_ID_INDEX	0x20
++#define SMSC_DEVICE_REV_INDEX	0x21
++#define SMSC_ACTIVATE_INDEX	0x30
++#define SMSC_PRIMARY_BASE_INDEX  0x60
++#define SMSC_SECONDARY_BASE_INDEX 0x62
++#define SMSC_PRIMARY_INT_INDEX	0x70
++#define SMSC_SECONDARY_INT_INDEX 0x72
++
++#define SMSC_IDE1_DEVICE	1
++#define SMSC_KEYBOARD_DEVICE	7
++#define SMSC_CONFIG_REGISTERS	8
++
++#define SMSC_SUPERIO_READ_INDEXED(index) ({ \
++	outb((index), SMSC_INDEX_PORT_ADDR); \
++	inb(SMSC_DATA_PORT_ADDR); })
++#define SMSC_SUPERIO_WRITE_INDEXED(val, index) ({ \
++	outb((index), SMSC_INDEX_PORT_ADDR); \
++	outb((val),   SMSC_DATA_PORT_ADDR); })
++
++#define IDE1_PRIMARY_BASE	0x01f0
++#define IDE1_SECONDARY_BASE	0x03f6
++
++unsigned long smsc_superio_virt;
++
++int platform_int_priority[NR_INTC_IRQS] = {
++	IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD,	/* IRQ  0- 7 */
++	RES, RES, RES, RES, SER, ERR, PW3, PW2,	/* IRQ  8-15 */
++	PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES,	/* IRQ 16-23 */
++	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 24-31 */
++	TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI,	/* IRQ 32-39 */
++	RXI, BRI, TXI, RES, RES, RES, RES, RES,	/* IRQ 40-47 */
++	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 48-55 */
++	RES, RES, RES, RES, RES, RES, RES, ITI,	/* IRQ 56-63 */
++};
++
++static int __init smsc_superio_setup(void)
++{
++	unsigned char devid, devrev;
++
++	smsc_superio_virt = onchip_remap(SMSC_SUPERIO_BASE, 1024, "SMSC SuperIO");
++	if (!smsc_superio_virt) {
++		panic("Unable to remap SMSC SuperIO\n");
++	}
++
++	/* Initially the chip is in run state */
++	/* Put it into configuration state */
++	outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
++	outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
++
++	/* Read device ID info */
++	devid = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_ID_INDEX);
++	devrev = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_REV_INDEX);
++	printk("SMSC SuperIO devid %02x rev %02x\n", devid, devrev);
++
++	/* Select the keyboard device */
++	SMSC_SUPERIO_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX);
++
++	/* enable it */
++	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
++
++	/* Select the interrupts */
++	/* On a PC keyboard is IRQ1, mouse is IRQ12 */
++	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
++	SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
++
++#ifdef CONFIG_IDE
++	/*
++	 * Only IDE1 exists on the Cayman
++	 */
++
++	/* Power it on */
++	SMSC_SUPERIO_WRITE_INDEXED(1 << SMSC_IDE1_DEVICE, 0x22);
++
++	SMSC_SUPERIO_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX);
++	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
++
++	SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE >> 8,
++				   SMSC_PRIMARY_BASE_INDEX + 0);
++	SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE & 0xff,
++				   SMSC_PRIMARY_BASE_INDEX + 1);
++
++	SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE >> 8,
++				   SMSC_SECONDARY_BASE_INDEX + 0);
++	SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE & 0xff,
++				   SMSC_SECONDARY_BASE_INDEX + 1);
++
++	SMSC_SUPERIO_WRITE_INDEXED(14, SMSC_PRIMARY_INT_INDEX);
++
++	SMSC_SUPERIO_WRITE_INDEXED(SMSC_CONFIG_REGISTERS,
++				   SMCS_LOGICAL_DEV_INDEX);
++
++	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */
++	SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
++	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
++	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
++#endif
++
++	/* Exit the configuration state */
++	outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
++
++	return 0;
++}
++__initcall(smsc_superio_setup);
++
++static void __iomem *cayman_ioport_map(unsigned long port, unsigned int len)
++{
++	if (port < 0x400) {
++		extern unsigned long smsc_superio_virt;
++		return (void __iomem *)((port << 2) | smsc_superio_virt);
++	}
++
++	return (void __iomem *)port;
++}
++
++extern void init_cayman_irq(void);
++
++static struct sh_machine_vector mv_cayman __initmv = {
++	.mv_name		= "Hitachi Cayman",
++	.mv_nr_irqs		= 64,
++	.mv_ioport_map		= cayman_ioport_map,
++	.mv_init_irq		= init_cayman_irq,
++};
+diff --git a/arch/sh/boards/dreamcast/irq.c b/arch/sh/boards/dreamcast/irq.c
+index 5bf01f8..9d0673a 100644
+--- a/arch/sh/boards/dreamcast/irq.c
++++ b/arch/sh/boards/dreamcast/irq.c
+@@ -136,7 +136,7 @@ int systemasic_irq_demux(int irq)
+         emr = EMR_BASE + (level << 4) + (level << 2);
+         esr = ESR_BASE + (level << 2);
+ 
+-        /* Mask the ESR to filter any spurious, unwanted interrtupts */
++        /* Mask the ESR to filter any spurious, unwanted interrupts */
+         status = inl(esr);
+         status &= inl(emr);
+ 
+diff --git a/arch/sh/boards/dreamcast/setup.c b/arch/sh/boards/dreamcast/setup.c
+index 8799df6..2581c8c 100644
+--- a/arch/sh/boards/dreamcast/setup.c
++++ b/arch/sh/boards/dreamcast/setup.c
+@@ -33,9 +33,6 @@ extern void aica_time_init(void);
+ extern int gapspci_init(void);
+ extern int systemasic_irq_demux(int);
+ 
+-void *dreamcast_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t);
+-int dreamcast_consistent_free(struct device *, size_t, void *, dma_addr_t);
+-
+ static void __init dreamcast_setup(char **cmdline_p)
+ {
+ 	int i;
+@@ -64,9 +61,4 @@ static struct sh_machine_vector mv_dreamcast __initmv = {
+ 	.mv_name		= "Sega Dreamcast",
+ 	.mv_setup		= dreamcast_setup,
+ 	.mv_irq_demux		= systemasic_irq_demux,
+-
+-#ifdef CONFIG_PCI
+-	.mv_consistent_alloc	= dreamcast_consistent_alloc,
+-	.mv_consistent_free	= dreamcast_consistent_free,
+-#endif
+ };
+diff --git a/arch/sh/boards/landisk/gio.c b/arch/sh/boards/landisk/gio.c
+index a37643d..1702508 100644
+--- a/arch/sh/boards/landisk/gio.c
++++ b/arch/sh/boards/landisk/gio.c
+@@ -121,7 +121,7 @@ static int gio_ioctl(struct inode *inode, struct file *filp,
+ 	return 0;
+ }
+ 
+-static struct file_operations gio_fops = {
++static const struct file_operations gio_fops = {
+ 	.owner = THIS_MODULE,
+ 	.open = gio_open,	/* open */
+ 	.release = gio_close,	/* release */
+diff --git a/arch/sh/boards/renesas/hs7751rvoip/Kconfig b/arch/sh/boards/renesas/hs7751rvoip/Kconfig
+deleted file mode 100644
+index 1743be4..0000000
+--- a/arch/sh/boards/renesas/hs7751rvoip/Kconfig
++++ /dev/null
+@@ -1,12 +0,0 @@
+-if SH_HS7751RVOIP
+-
+-menu "HS7751RVoIP options"
+-
+-config HS7751RVOIP_CODEC
+-	bool "Support VoIP Codec section"
+-	help
+-	  Selecting this option will support CODEC section.
+-
+-endmenu
+-
+-endif
+diff --git a/arch/sh/boards/renesas/hs7751rvoip/Makefile b/arch/sh/boards/renesas/hs7751rvoip/Makefile
+deleted file mode 100644
+index e626377..0000000
+--- a/arch/sh/boards/renesas/hs7751rvoip/Makefile
++++ /dev/null
+@@ -1,8 +0,0 @@
+-#
+-# Makefile for the HS7751RVoIP specific parts of the kernel
+-#
+-
+-obj-y	 := setup.o io.o irq.o
+-
+-obj-$(CONFIG_PCI) += pci.o
+-
+diff --git a/arch/sh/boards/renesas/hs7751rvoip/io.c b/arch/sh/boards/renesas/hs7751rvoip/io.c
+deleted file mode 100644
+index bb9aa0d..0000000
+--- a/arch/sh/boards/renesas/hs7751rvoip/io.c
++++ /dev/null
+@@ -1,283 +0,0 @@
+-/*
+- * linux/arch/sh/boards/renesas/hs7751rvoip/io.c
+- *
+- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
+- * Based largely on io_se.c.
+- *
+- * I/O routine for Renesas Technology sales HS7751RVoIP
+- *
+- * Initial version only to support LAN access; some
+- * placeholder code from io_hs7751rvoip.c left in with the
+- * expectation of later SuperIO and PCMCIA access.
+- */
+-#include <linux/kernel.h>
+-#include <linux/types.h>
+-#include <linux/module.h>
+-#include <linux/pci.h>
+-#include <asm/io.h>
+-#include <asm/hs7751rvoip.h>
+-#include <asm/addrspace.h>
+-
+-extern void *area6_io8_base;	/* Area 6 8bit I/O Base address */
+-extern void *area5_io16_base;	/* Area 5 16bit I/O Base address */
+-
+-/*
+- * The 7751R HS7751RVoIP uses the built-in PCI controller (PCIC)
+- * of the 7751R processor, and has a SuperIO accessible via the PCI.
+- * The board also includes a PCMCIA controller on its memory bus,
+- * like the other Solution Engine boards.
+- */
+-
+-#define CODEC_IO_BASE	0x1000
+-#define CODEC_IOMAP(a)	((unsigned long)area6_io8_base + ((a) - CODEC_IO_BASE))
+-
+-static inline unsigned long port2adr(unsigned int port)
+-{
+-	if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
+-		if (port == 0x3f6)
+-			return ((unsigned long)area5_io16_base + 0x0c);
+-		else
+-			return ((unsigned long)area5_io16_base + 0x800 +
+-				((port-0x1f0) << 1));
+-	else
+-		maybebadio((unsigned long)port);
+-	return port;
+-}
+-
+-/* The 7751R HS7751RVoIP seems to have everything hooked */
+-/* up pretty normally (nothing on high-bytes only...) so this */
+-/* shouldn't be needed */
+-static inline int shifted_port(unsigned long port)
+-{
+-	/* For IDE registers, value is not shifted */
+-	if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
+-		return 0;
+-	else
+-		return 1;
+-}
+-
+-#if defined(CONFIG_HS7751RVOIP_CODEC)
+-#define codec_port(port)	\
+-	((CODEC_IO_BASE <= (port)) && ((port) < (CODEC_IO_BASE + 0x20)))
+-#else
+-#define codec_port(port)	(0)
+-#endif
+-
+-/*
+- * General outline: remap really low stuff [eventually] to SuperIO,
+- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
+- * is mapped through the PCI IO window.  Stuff with high bits (PXSEG)
+- * should be way beyond the window, and is used  w/o translation for
+- * compatibility.
+- */
+-unsigned char hs7751rvoip_inb(unsigned long port)
+-{
+-	if (PXSEG(port))
+-		return ctrl_inb(port);
+-	else if (codec_port(port))
+-		return ctrl_inb(CODEC_IOMAP(port));
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		return ctrl_inb(pci_ioaddr(port));
+-	else
+-		return ctrl_inw(port2adr(port)) & 0xff;
+-}
+-
+-unsigned char hs7751rvoip_inb_p(unsigned long port)
+-{
+-	unsigned char v;
+-
+-        if (PXSEG(port))
+-		v = ctrl_inb(port);
+-	else if (codec_port(port))
+-		v = ctrl_inb(CODEC_IOMAP(port));
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		v = ctrl_inb(pci_ioaddr(port));
+-	else
+-		v = ctrl_inw(port2adr(port)) & 0xff;
+-	ctrl_delay();
+-	return v;
+-}
+-
+-unsigned short hs7751rvoip_inw(unsigned long port)
+-{
+-        if (PXSEG(port))
+-		return ctrl_inw(port);
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		return ctrl_inw(pci_ioaddr(port));
+-	else
+-		maybebadio(port);
+-	return 0;
+-}
+-
+-unsigned int hs7751rvoip_inl(unsigned long port)
+-{
+-        if (PXSEG(port))
+-		return ctrl_inl(port);
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		return ctrl_inl(pci_ioaddr(port));
+-	else
+-		maybebadio(port);
+-	return 0;
+-}
+-
+-void hs7751rvoip_outb(unsigned char value, unsigned long port)
+-{
+-
+-        if (PXSEG(port))
+-		ctrl_outb(value, port);
+-	else if (codec_port(port))
+-		ctrl_outb(value, CODEC_IOMAP(port));
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		ctrl_outb(value, pci_ioaddr(port));
+-	else
+-		ctrl_outb(value, port2adr(port));
+-}
+-
+-void hs7751rvoip_outb_p(unsigned char value, unsigned long port)
+-{
+-        if (PXSEG(port))
+-		ctrl_outb(value, port);
+-	else if (codec_port(port))
+-		ctrl_outb(value, CODEC_IOMAP(port));
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		ctrl_outb(value, pci_ioaddr(port));
+-	else
+-		ctrl_outw(value, port2adr(port));
+-
+-	ctrl_delay();
+-}
+-
+-void hs7751rvoip_outw(unsigned short value, unsigned long port)
+-{
+-        if (PXSEG(port))
+-		ctrl_outw(value, port);
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		ctrl_outw(value, pci_ioaddr(port));
+-	else
+-		maybebadio(port);
+-}
+-
+-void hs7751rvoip_outl(unsigned int value, unsigned long port)
+-{
+-        if (PXSEG(port))
+-		ctrl_outl(value, port);
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		ctrl_outl(value, pci_ioaddr(port));
+-	else
+-		maybebadio(port);
+-}
+-
+-void hs7751rvoip_insb(unsigned long port, void *addr, unsigned long count)
+-{
+-	u8 *buf = addr;
+-
+-	if (PXSEG(port))
+-		while (count--)
+-			*buf++ = ctrl_inb(port);
+-	else if (codec_port(port))
+-		while (count--)
+-			*buf++ = ctrl_inb(CODEC_IOMAP(port));
+-	else if (is_pci_ioaddr(port) || shifted_port(port)) {
+-		volatile u8 *bp = (volatile u8 *)pci_ioaddr(port);
+-
+-		while (count--)
+-			*buf++ = *bp;
+-	} else {
+-		volatile u16 *p = (volatile u16 *)port2adr(port);
+-
+-		while (count--)
+-			*buf++ = *p & 0xff;
+-	}
+-}
+-
+-void hs7751rvoip_insw(unsigned long port, void *addr, unsigned long count)
+-{
+-	volatile u16 *p;
+-	u16 *buf = addr;
+-
+-	if (PXSEG(port))
+-		p = (volatile u16 *)port;
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		p = (volatile u16 *)pci_ioaddr(port);
+-	else
+-		p = (volatile u16 *)port2adr(port);
+-	while (count--)
+-		*buf++ = *p;
+-}
+-
+-void hs7751rvoip_insl(unsigned long port, void *addr, unsigned long count)
+-{
+-
+-	if (is_pci_ioaddr(port) || shifted_port(port)) {
+-		volatile u32 *p = (volatile u32 *)pci_ioaddr(port);
+-		u32 *buf = addr;
+-
+-		while (count--)
+-			*buf++ = *p;
+-	} else
+-		maybebadio(port);
+-}
+-
+-void hs7751rvoip_outsb(unsigned long port, const void *addr, unsigned long count)
+-{
+-	const u8 *buf = addr;
+-
+-	if (PXSEG(port))
+-		while (count--)
+-			ctrl_outb(*buf++, port);
+-	else if (codec_port(port))
+-		while (count--)
+-			ctrl_outb(*buf++, CODEC_IOMAP(port));
+-	else if (is_pci_ioaddr(port) || shifted_port(port)) {
+-		volatile u8 *bp = (volatile u8 *)pci_ioaddr(port);
+-
+-		while (count--)
+-			*bp = *buf++;
+-	} else {
+-		volatile u16 *p = (volatile u16 *)port2adr(port);
+-
+-		while (count--)
+-			*p = *buf++;
+-	}
+-}
+-
+-void hs7751rvoip_outsw(unsigned long port, const void *addr, unsigned long count)
+-{
+-	volatile u16 *p;
+-	const u16 *buf = addr;
+-
+-	if (PXSEG(port))
+-		p = (volatile u16 *)port;
+-	else if (is_pci_ioaddr(port) || shifted_port(port))
+-		p = (volatile u16 *)pci_ioaddr(port);
+-	else
+-		p = (volatile u16 *)port2adr(port);
+-
+-	while (count--)
+-		*p = *buf++;
+-}
+-
+-void hs7751rvoip_outsl(unsigned long port, const void *addr, unsigned long count)
+-{
+-	const u32 *buf = addr;
+-
+-	if (is_pci_ioaddr(port) || shifted_port(port)) {
+-		volatile u32 *p = (volatile u32 *)pci_ioaddr(port);
+-
+-		while (count--)
+-			*p = *buf++;
+-	} else
+-		maybebadio(port);
+-}
+-
+-void __iomem *hs7751rvoip_ioport_map(unsigned long port, unsigned int size)
+-{
+-        if (PXSEG(port))
+-                return (void __iomem *)port;
+-	else if (unlikely(codec_port(port) && (size == 1)))
+-		return (void __iomem *)CODEC_IOMAP(port);
+-        else if (is_pci_ioaddr(port))
+-                return (void __iomem *)pci_ioaddr(port);
+-
+-        return (void __iomem *)port2adr(port);
+-}
+diff --git a/arch/sh/boards/renesas/hs7751rvoip/irq.c b/arch/sh/boards/renesas/hs7751rvoip/irq.c
+deleted file mode 100644
+index e55c668..0000000
+--- a/arch/sh/boards/renesas/hs7751rvoip/irq.c
++++ /dev/null
+@@ -1,116 +0,0 @@
+-/*
+- * linux/arch/sh/boards/renesas/hs7751rvoip/irq.c
+- *
+- * Copyright (C) 2000  Kazumoto Kojima
+- *
+- * Renesas Technology Sales HS7751RVoIP Support.
+- *
+- * Modified for HS7751RVoIP by
+- * Atom Create Engineering Co., Ltd. 2002.
+- * Lineo uSolutions, Inc. 2003.
+- */
+-
+-#include <linux/init.h>
+-#include <linux/irq.h>
+-#include <linux/interrupt.h>
+-#include <asm/io.h>
+-#include <asm/irq.h>
+-#include <asm/hs7751rvoip.h>
+-
+-static int mask_pos[] = {8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7};
+-
+-static void enable_hs7751rvoip_irq(unsigned int irq);
+-static void disable_hs7751rvoip_irq(unsigned int irq);
+-
+-/* shutdown is same as "disable" */
+-#define shutdown_hs7751rvoip_irq disable_hs7751rvoip_irq
+-
+-static void ack_hs7751rvoip_irq(unsigned int irq);
+-static void end_hs7751rvoip_irq(unsigned int irq);
+-
+-static unsigned int startup_hs7751rvoip_irq(unsigned int irq)
+-{
+-	enable_hs7751rvoip_irq(irq);
+-	return 0; /* never anything pending */
+-}
+-
+-static void disable_hs7751rvoip_irq(unsigned int irq)
+-{
+-	unsigned short val;
+-	unsigned short mask = 0xffff ^ (0x0001 << mask_pos[irq]);
+-
+-	/* Set the priority in IPR to 0 */
+-	val = ctrl_inw(IRLCNTR3);
+-	val &= mask;
+-	ctrl_outw(val, IRLCNTR3);
+-}
+-
+-static void enable_hs7751rvoip_irq(unsigned int irq)
+-{
+-	unsigned short val;
+-	unsigned short value = (0x0001 << mask_pos[irq]);
+-
+-	/* Set priority in IPR back to original value */
+-	val = ctrl_inw(IRLCNTR3);
+-	val |= value;
+-	ctrl_outw(val, IRLCNTR3);
+-}
+-
+-static void ack_hs7751rvoip_irq(unsigned int irq)
+-{
+-	disable_hs7751rvoip_irq(irq);
+-}
+-
+-static void end_hs7751rvoip_irq(unsigned int irq)
+-{
+-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+-		enable_hs7751rvoip_irq(irq);
+-}
+-
+-static struct hw_interrupt_type hs7751rvoip_irq_type = {
+-	.typename =  "HS7751RVoIP IRQ",
+-	.startup = startup_hs7751rvoip_irq,
+-	.shutdown = shutdown_hs7751rvoip_irq,
+-	.enable = enable_hs7751rvoip_irq,
+-	.disable = disable_hs7751rvoip_irq,
+-	.ack = ack_hs7751rvoip_irq,
+-	.end = end_hs7751rvoip_irq,
+-};
+-
+-static void make_hs7751rvoip_irq(unsigned int irq)
+-{
+-	disable_irq_nosync(irq);
+-	irq_desc[irq].chip = &hs7751rvoip_irq_type;
+-	disable_hs7751rvoip_irq(irq);
+-}
+-
+-/*
+- * Initialize IRQ setting
+- */
+-void __init init_hs7751rvoip_IRQ(void)
+-{
+-	int i;
+-
+-	/* IRL0=ON HOOK1
+-	 * IRL1=OFF HOOK1
+-	 * IRL2=ON HOOK2
+-	 * IRL3=OFF HOOK2
+-	 * IRL4=Ringing Detection
+-	 * IRL5=CODEC
+-	 * IRL6=Ethernet
+-	 * IRL7=Ethernet Hub
+-	 * IRL8=USB Communication
+-	 * IRL9=USB Connection
+-	 * IRL10=USB DMA
+-	 * IRL11=CF Card
+-	 * IRL12=PCMCIA
+-	 * IRL13=PCI Slot
+-	 */
+-	ctrl_outw(0x9876, IRLCNTR1);
+-	ctrl_outw(0xdcba, IRLCNTR2);
+-	ctrl_outw(0x0050, IRLCNTR4);
+-	ctrl_outw(0x4321, IRLCNTR5);
+-
+-	for (i=0; i<14; i++)
+-		make_hs7751rvoip_irq(i);
+-}
+diff --git a/arch/sh/boards/renesas/hs7751rvoip/pci.c b/arch/sh/boards/renesas/hs7751rvoip/pci.c
+deleted file mode 100644
+index 1c0ddee..0000000
+--- a/arch/sh/boards/renesas/hs7751rvoip/pci.c
++++ /dev/null
+@@ -1,149 +0,0 @@
+-/*
+- * linux/arch/sh/boards/renesas/hs7751rvoip/pci.c
+- *
+- * Author:  Ian DaSilva (idasilva at mvista.com)
+- *
+- * Highly leveraged from pci-bigsur.c, written by Dustin McIntire.
+- *
+- * May be copied or modified under the terms of the GNU General Public
+- * License.  See linux/COPYING for more information.
+- *
+- * PCI initialization for the Renesas SH7751R HS7751RVoIP board
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/types.h>
+-#include <linux/init.h>
+-#include <linux/delay.h>
+-#include <linux/pci.h>
+-#include <linux/module.h>
+-
+-#include <asm/io.h>
+-#include "../../../drivers/pci/pci-sh7751.h"
+-#include <asm/hs7751rvoip/hs7751rvoip.h>
+-
+-#define PCIMCR_MRSET_OFF	0xBFFFFFFF
+-#define PCIMCR_RFSH_OFF		0xFFFFFFFB
+-
+-/*
+- * Only long word accesses of the PCIC's internal local registers and the
+- * configuration registers from the CPU is supported.
+- */
+-#define PCIC_WRITE(x,v) writel((v), PCI_REG(x))
+-#define PCIC_READ(x) readl(PCI_REG(x))
+-
+-/*
+- * Description:  This function sets up and initializes the pcic, sets
+- * up the BARS, maps the DRAM into the address space etc, etc.
+- */
+-int __init pcibios_init_platform(void)
+-{
+-	unsigned long bcr1, wcr1, wcr2, wcr3, mcr;
+-	unsigned short bcr2, bcr3;
+-
+-	/*
+-	 * Initialize the slave bus controller on the pcic.  The values used
+-	 * here should not be hardcoded, but they should be taken from the bsc
+-	 * on the processor, to make this function as generic as possible.
+-	 * (i.e. Another sbc may usr different SDRAM timing settings -- in order
+-	 * for the pcic to work, its settings need to be exactly the same.)
+-	 */
+-	bcr1 = (*(volatile unsigned long *)(SH7751_BCR1));
+-	bcr2 = (*(volatile unsigned short *)(SH7751_BCR2));
+-	bcr3 = (*(volatile unsigned short *)(SH7751_BCR3));
+-	wcr1 = (*(volatile unsigned long *)(SH7751_WCR1));
+-	wcr2 = (*(volatile unsigned long *)(SH7751_WCR2));
+-	wcr3 = (*(volatile unsigned long *)(SH7751_WCR3));
+-	mcr = (*(volatile unsigned long *)(SH7751_MCR));
+-
+-	bcr1 = bcr1 | 0x00080000;  /* Enable Bit 19, BREQEN */
+-	(*(volatile unsigned long *)(SH7751_BCR1)) = bcr1;
+-
+-	bcr1 = bcr1 | 0x40080000;  /* Enable Bit 19 BREQEN, set PCIC to slave */
+-	PCIC_WRITE(SH7751_PCIBCR1, bcr1);	/* PCIC BCR1 */
+-	PCIC_WRITE(SH7751_PCIBCR2, bcr2);	/* PCIC BCR2 */
+-	PCIC_WRITE(SH7751_PCIBCR3, bcr3);	/* PCIC BCR3 */
+-	PCIC_WRITE(SH7751_PCIWCR1, wcr1);	/* PCIC WCR1 */
+-	PCIC_WRITE(SH7751_PCIWCR2, wcr2);	/* PCIC WCR2 */
+-	PCIC_WRITE(SH7751_PCIWCR3, wcr3);	/* PCIC WCR3 */
+-	mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
+-	PCIC_WRITE(SH7751_PCIMCR, mcr);		/* PCIC MCR */
+-
+-	/* Enable all interrupts, so we know what to fix */
+-	PCIC_WRITE(SH7751_PCIINTM, 0x0000c3ff);
+-	PCIC_WRITE(SH7751_PCIAINTM, 0x0000380f);
+-
+-	/* Set up standard PCI config registers */
+-	PCIC_WRITE(SH7751_PCICONF1, 0xFB900047); /* Bus Master, Mem & I/O access */
+-	PCIC_WRITE(SH7751_PCICONF2, 0x00000000); /* PCI Class code & Revision ID */
+-	PCIC_WRITE(SH7751_PCICONF4, 0xab000001); /* PCI I/O address (local regs) */
+-	PCIC_WRITE(SH7751_PCICONF5, 0x0c000000); /* PCI MEM address (local RAM)  */
+-	PCIC_WRITE(SH7751_PCICONF6, 0xd0000000); /* PCI MEM address (unused) */
+-	PCIC_WRITE(SH7751_PCICONF11, 0x35051054); /* PCI Subsystem ID & Vendor ID */
+-	PCIC_WRITE(SH7751_PCILSR0, 0x03f00000);	/* MEM (full 64M exposed) */
+-	PCIC_WRITE(SH7751_PCILSR1, 0x00000000); /* MEM (unused) */
+-	PCIC_WRITE(SH7751_PCILAR0, 0x0c000000); /* MEM (direct map from PCI) */
+-	PCIC_WRITE(SH7751_PCILAR1, 0x00000000); /* MEM (unused) */
+-
+-	/* Now turn it on... */
+-	PCIC_WRITE(SH7751_PCICR, 0xa5000001);
+-
+-	/*
+-	 * Set PCIMBR and PCIIOBR here, assuming a single window
+-	 * (16M MEM, 256K IO) is enough.  If a larger space is
+-	 * needed, the readx/writex and inx/outx functions will
+-	 * have to do more (e.g. setting registers for each call).
+-	 */
+-
+-	/*
+-	 * Set the MBR so PCI address is one-to-one with window,
+-	 * meaning all calls go straight through... use ifdef to
+-	 * catch erroneous assumption.
+-	 */
+-	BUG_ON(PCIBIOS_MIN_MEM != SH7751_PCI_MEMORY_BASE);
+-
+-	PCIC_WRITE(SH7751_PCIMBR, PCIBIOS_MIN_MEM);
+-
+-	/* Set IOBR for window containing area specified in pci.h */
+-	PCIC_WRITE(SH7751_PCIIOBR, (PCIBIOS_MIN_IO & SH7751_PCIIOBR_MASK));
+-
+-	/* All done, may as well say so... */
+-	printk("SH7751R PCI: Finished initialization of the PCI controller\n");
+-
+-	return 1;
+-}
+-
+-int __init pcibios_map_platform_irq(u8 slot, u8 pin)
+-{
+-        switch (slot) {
+-	case 0: return IRQ_PCISLOT;	/* PCI Extend slot */
+-	case 1: return IRQ_PCMCIA;	/* PCI Cardbus Bridge */
+-	case 2: return IRQ_PCIETH;	/* Realtek Ethernet controller */
+-	case 3: return IRQ_PCIHUB;	/* Realtek Ethernet Hub controller */
+-	default:
+-		printk("PCI: Bad IRQ mapping request for slot %d\n", slot);
+-		return -1;
+-	}
+-}
+-
+-static struct resource sh7751_io_resource = {
+-	.name	= "SH7751_IO",
+-	.start	= 0x4000,
+-	.end	= 0x4000 + SH7751_PCI_IO_SIZE - 1,
+-	.flags	= IORESOURCE_IO
+-};
+-
+-static struct resource sh7751_mem_resource = {
+-	.name	= "SH7751_mem",
+-	.start	= SH7751_PCI_MEMORY_BASE,
+-	.end	= SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1,
+-	.flags	= IORESOURCE_MEM
+-};
+-
+-extern struct pci_ops sh7751_pci_ops;
+-
+-struct pci_channel board_pci_channels[] = {
+-	{ &sh7751_pci_ops, &sh7751_io_resource, &sh7751_mem_resource, 0, 0xff },
+-	{ NULL, NULL, NULL, 0, 0 },
+-};
+-EXPORT_SYMBOL(board_pci_channels);
+diff --git a/arch/sh/boards/renesas/hs7751rvoip/setup.c b/arch/sh/boards/renesas/hs7751rvoip/setup.c
+deleted file mode 100644
+index c056259..0000000
+--- a/arch/sh/boards/renesas/hs7751rvoip/setup.c
++++ /dev/null
+@@ -1,105 +0,0 @@
+-/*
+- * Renesas Technology Sales HS7751RVoIP Support.
+- *
+- * Copyright (C) 2000  Kazumoto Kojima
+- *
+- * Modified for HS7751RVoIP by
+- * Atom Create Engineering Co., Ltd. 2002.
+- * Lineo uSolutions, Inc. 2003.
+- */
+-#include <linux/init.h>
+-#include <linux/irq.h>
+-#include <linux/mm.h>
+-#include <linux/pm.h>
+-#include <asm/hs7751rvoip.h>
+-#include <asm/io.h>
+-#include <asm/machvec.h>
+-
+-static void hs7751rvoip_power_off(void)
+-{
+-	ctrl_outw(ctrl_inw(PA_OUTPORTR) & 0xffdf, PA_OUTPORTR);
+-}
+-
+-void *area5_io8_base;
+-void *area6_io8_base;
+-void *area5_io16_base;
+-void *area6_io16_base;
+-
+-static int __init hs7751rvoip_cf_init(void)
+-{
+-	pgprot_t prot;
+-	unsigned long paddrbase;
+-
+-	/* open I/O area window */
+-	paddrbase = virt_to_phys((void *)(PA_AREA5_IO+0x00000800));
+-	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_COM16);
+-	area5_io16_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot);
+-	if (!area5_io16_base) {
+-		printk("allocate_cf_area : can't open CF I/O window!\n");
+-		return -ENOMEM;
+-	}
+-
+-	/* XXX : do we need attribute and common-memory area also? */
+-
+-	paddrbase = virt_to_phys((void *)PA_AREA6_IO);
+-#if defined(CONFIG_HS7751RVOIP_CODEC)
+-	prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_COM8);
+-#else
+-	prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_IO8);
+-#endif
+-	area6_io8_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot);
+-	if (!area6_io8_base) {
+-		printk("allocate_cf_area : can't open CODEC I/O 8bit window!\n");
+-		return -ENOMEM;
+-	}
+-	prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_IO16);
+-	area6_io16_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot);
+-	if (!area6_io16_base) {
+-		printk("allocate_cf_area : can't open CODEC I/O 16bit window!\n");
+-		return -ENOMEM;
+-	}
+-
+-	return 0;
+-}
+-device_initcall(hs7751rvoip_cf_init);
+-
+-/*
+- * Initialize the board
+- */
+-static void __init hs7751rvoip_setup(char **cmdline_p)
+-{
+-	ctrl_outb(0xf0, PA_OUTPORTR);
+-	pm_power_off = hs7751rvoip_power_off;
+-
+-	printk(KERN_INFO "Renesas Technology Sales HS7751RVoIP-2 support.\n");
+-}
+-
+-static struct sh_machine_vector mv_hs7751rvoip __initmv = {
+-	.mv_name		= "HS7751RVoIP",
+-	.mv_setup		= hs7751rvoip_setup,
+-	.mv_nr_irqs		= 72,
+-
+-	.mv_inb			= hs7751rvoip_inb,
+-	.mv_inw			= hs7751rvoip_inw,
+-	.mv_inl			= hs7751rvoip_inl,
+-	.mv_outb		= hs7751rvoip_outb,
+-	.mv_outw		= hs7751rvoip_outw,
+-	.mv_outl		= hs7751rvoip_outl,
+-
+-	.mv_inb_p		= hs7751rvoip_inb_p,
+-	.mv_inw_p		= hs7751rvoip_inw,
+-	.mv_inl_p		= hs7751rvoip_inl,
+-	.mv_outb_p		= hs7751rvoip_outb_p,
+-	.mv_outw_p		= hs7751rvoip_outw,
+-	.mv_outl_p		= hs7751rvoip_outl,
+-
+-	.mv_insb		= hs7751rvoip_insb,
+-	.mv_insw		= hs7751rvoip_insw,
+-	.mv_insl		= hs7751rvoip_insl,
+-	.mv_outsb		= hs7751rvoip_outsb,
+-	.mv_outsw		= hs7751rvoip_outsw,
+-	.mv_outsl		= hs7751rvoip_outsl,
+-
+-	.mv_init_irq		= init_hs7751rvoip_IRQ,
+-	.mv_ioport_map		= hs7751rvoip_ioport_map,
+-};
+diff --git a/arch/sh/boards/renesas/r7780rp/Makefile b/arch/sh/boards/renesas/r7780rp/Makefile
+index dd26182..20a1008 100644
+--- a/arch/sh/boards/renesas/r7780rp/Makefile
++++ b/arch/sh/boards/renesas/r7780rp/Makefile
+@@ -3,7 +3,7 @@
+ #
+ irqinit-$(CONFIG_SH_R7780MP)	:= irq-r7780mp.o
+ irqinit-$(CONFIG_SH_R7785RP)	:= irq-r7785rp.o
+-irqinit-$(CONFIG_SH_R7780RP)	:= irq-r7780rp.o irq.o
++irqinit-$(CONFIG_SH_R7780RP)	:= irq-r7780rp.o
+ obj-y				:= setup.o $(irqinit-y)
+ 
+ ifneq ($(CONFIG_SH_R7785RP),y)
+diff --git a/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c b/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c
+index 59b47fe..1f8f073 100644
+--- a/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c
++++ b/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c
+@@ -47,7 +47,7 @@ static unsigned char irl2irq[HL_NR_IRL] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "r7780mp", vectors,
+-			 NULL, NULL, mask_registers, NULL, NULL);
++			 NULL, mask_registers, NULL, NULL);
+ 
+ unsigned char * __init highlander_init_irq_r7780mp(void)
+ {
+diff --git a/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c b/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c
+index fa4a534..bd34048 100644
+--- a/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c
++++ b/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c
+@@ -3,21 +3,65 @@
+  *
+  * Copyright (C) 2002  Atom Create Engineering Co., Ltd.
+  * Copyright (C) 2006  Paul Mundt
++ * Copyright (C) 2008  Magnus Damm
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+  * License.  See the file "COPYING" in the main directory of this archive
+  * for more details.
+  */
+ #include <linux/init.h>
++#include <linux/irq.h>
+ #include <linux/io.h>
+ #include <asm/r7780rp.h>
+ 
++enum {
++	UNUSED = 0,
++
++	/* board specific interrupt sources */
++
++	AX88796,          /* Ethernet controller */
++	PSW,              /* Push Switch */
++	CF,               /* Compact Flash */
++
++	PCI_A,
++	PCI_B,
++	PCI_C,
++	PCI_D,
++};
++
++static struct intc_vect vectors[] __initdata = {
++	INTC_IRQ(PCI_A, 65), /* dirty: overwrite cpu vectors for pci */
++	INTC_IRQ(PCI_B, 66),
++	INTC_IRQ(PCI_C, 67),
++	INTC_IRQ(PCI_D, 68),
++	INTC_IRQ(CF, IRQ_CF),
++	INTC_IRQ(PSW, IRQ_PSW),
++	INTC_IRQ(AX88796, IRQ_AX88796),
++};
++
++static struct intc_mask_reg mask_registers[] __initdata = {
++	{ 0xa5000000, 0, 16, /* IRLMSK */
++	  { PCI_A, PCI_B, PCI_C, PCI_D, CF, 0, 0, 0,
++	    0, 0, 0, 0, 0, 0, PSW, AX88796 } },
++};
++
++static unsigned char irl2irq[HL_NR_IRL] __initdata = {
++	65, 66, 67, 68,
++	IRQ_CF, 0, 0, 0,
++	0, 0, 0, 0,
++	IRQ_AX88796, IRQ_PSW
++};
++
++static DECLARE_INTC_DESC(intc_desc, "r7780rp", vectors,
++			 NULL, mask_registers, NULL, NULL);
++
+ unsigned char * __init highlander_init_irq_r7780rp(void)
+ {
+-	int i;
+-
+-	for (i = 0; i < 15; i++)
+-		make_r7780rp_irq(i);
++	if (ctrl_inw(0xa5000600)) {
++		printk(KERN_INFO "Using r7780rp interrupt controller.\n");
++		register_intc_controller(&intc_desc);
++		return irl2irq;
++	}
+ 
+ 	return NULL;
+ }
+diff --git a/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c b/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c
+index b2c6a84..bf7ec10 100644
+--- a/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c
++++ b/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c
+@@ -2,7 +2,7 @@
+  * Renesas Solutions Highlander R7785RP Support.
+  *
+  * Copyright (C) 2002  Atom Create Engineering Co., Ltd.
+- * Copyright (C) 2006  Paul Mundt
++ * Copyright (C) 2006 - 2008  Paul Mundt
+  * Copyright (C) 2007  Magnus Damm
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+@@ -17,31 +17,52 @@
+ enum {
+ 	UNUSED = 0,
+ 
+-	/* board specific interrupt sources */
+-	AX88796,          /* Ethernet controller */
+-	CF,               /* Compact Flash */
++	/* FPGA specific interrupt sources */
++	CF,		/* Compact Flash */
++	SMBUS,		/* SMBUS */
++	TP,		/* Touch panel */
++	RTC,		/* RTC Alarm */
++	TH_ALERT,	/* Temperature sensor */
++	AX88796,	/* Ethernet controller */
++
++	/* external bus connector */
++	EXT0, EXT1, EXT2, EXT3, EXT4, EXT5, EXT6, EXT7,
+ };
+ 
+ static struct intc_vect vectors[] __initdata = {
+ 	INTC_IRQ(CF, IRQ_CF),
++	INTC_IRQ(SMBUS, IRQ_SMBUS),
++	INTC_IRQ(TP, IRQ_TP),
++	INTC_IRQ(RTC, IRQ_RTC),
++	INTC_IRQ(TH_ALERT, IRQ_TH_ALERT),
++
++	INTC_IRQ(EXT0, IRQ_EXT0), INTC_IRQ(EXT1, IRQ_EXT1),
++	INTC_IRQ(EXT2, IRQ_EXT2), INTC_IRQ(EXT3, IRQ_EXT3),
++
++	INTC_IRQ(EXT4, IRQ_EXT4), INTC_IRQ(EXT5, IRQ_EXT5),
++	INTC_IRQ(EXT6, IRQ_EXT6), INTC_IRQ(EXT7, IRQ_EXT7),
++
+ 	INTC_IRQ(AX88796, IRQ_AX88796),
+ };
+ 
+ static struct intc_mask_reg mask_registers[] __initdata = {
+ 	{ 0xa4000010, 0, 16, /* IRLMCR1 */
+-	  { 0, 0, 0, 0, CF, AX88796, 0, 0,
+-	    0, 0, 0, 0, 0, 0, 0, 0 } },
++	  { 0, 0, 0, 0, CF, AX88796, SMBUS, TP,
++	    RTC, 0, TH_ALERT, 0, 0, 0, 0, 0 } },
++	{ 0xa4000012, 0, 16, /* IRLMCR2 */
++	  { 0, 0, 0, 0, 0, 0, 0, 0,
++	    EXT7, EXT6, EXT5, EXT4, EXT3, EXT2, EXT1, EXT0 } },
+ };
+ 
+ static unsigned char irl2irq[HL_NR_IRL] __initdata = {
+-	0, IRQ_CF, 0, 0,
+-	0, 0, 0, 0,
+-	0, 0, IRQ_AX88796, 0,
+-	0, 0, 0,
++	0, IRQ_CF, IRQ_EXT4, IRQ_EXT5,
++	IRQ_EXT6, IRQ_EXT7, IRQ_SMBUS, IRQ_TP,
++	IRQ_RTC, IRQ_TH_ALERT, IRQ_AX88796, IRQ_EXT0,
++	IRQ_EXT1, IRQ_EXT2, IRQ_EXT3,
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "r7785rp", vectors,
+-			 NULL, NULL, mask_registers, NULL, NULL);
++			 NULL, mask_registers, NULL, NULL);
+ 
+ unsigned char * __init highlander_init_irq_r7785rp(void)
+ {
+@@ -58,7 +79,7 @@ unsigned char * __init highlander_init_irq_r7785rp(void)
+ 	ctrl_outw(0x7060, PA_IRLPRC);	/* FPGA IRLC */
+ 	ctrl_outw(0x0000, PA_IRLPRD);	/* FPGA IRLD */
+ 	ctrl_outw(0x4321, PA_IRLPRE);	/* FPGA IRLE */
+-	ctrl_outw(0x0000, PA_IRLPRF);	/* FPGA IRLF */
++	ctrl_outw(0xdcba, PA_IRLPRF);	/* FPGA IRLF */
+ 
+ 	register_intc_controller(&intc_desc);
+ 	return irl2irq;
+diff --git a/arch/sh/boards/renesas/r7780rp/irq.c b/arch/sh/boards/renesas/r7780rp/irq.c
+deleted file mode 100644
+index e0b8eb5..0000000
+--- a/arch/sh/boards/renesas/r7780rp/irq.c
++++ /dev/null
+@@ -1,51 +0,0 @@
+-/*
+- * Renesas Solutions Highlander R7780RP-1 Support.
+- *
+- * Copyright (C) 2002  Atom Create Engineering Co., Ltd.
+- * Copyright (C) 2006  Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-#include <linux/init.h>
+-#include <linux/irq.h>
+-#include <linux/interrupt.h>
+-#include <linux/io.h>
+-#include <asm/r7780rp.h>
+-
+-#ifdef CONFIG_SH_R7780RP
+-static int mask_pos[] = {15, 14, 13, 12, 11, 10, 9, 8, 7, 5, 6, 4, 0, 1, 2, 0};
+-#elif defined(CONFIG_SH_R7780MP)
+-static int mask_pos[] = {12, 11, 9, 14, 15, 8, 13, 6, 5, 4, 3, 2, 0, 0, 1, 0};
+-#elif defined(CONFIG_SH_R7785RP)
+-static int mask_pos[] = {2, 11, 2, 2, 2, 2, 9, 8, 7, 5, 10, 2, 2, 2, 2, 2};
+-#endif
+-
+-static void enable_r7780rp_irq(unsigned int irq)
+-{
+-	/* Set priority in IPR back to original value */
+-	ctrl_outw(ctrl_inw(IRLCNTR1) | (1 << mask_pos[irq]), IRLCNTR1);
+-}
+-
+-static void disable_r7780rp_irq(unsigned int irq)
+-{
+-	/* Set the priority in IPR to 0 */
+-	ctrl_outw(ctrl_inw(IRLCNTR1) & (0xffff ^ (1 << mask_pos[irq])),
+-		  IRLCNTR1);
+-}
+-
+-static struct irq_chip r7780rp_irq_chip __read_mostly = {
+-	.name		= "R7780RP",
+-	.mask		= disable_r7780rp_irq,
+-	.unmask		= enable_r7780rp_irq,
+-	.mask_ack	= disable_r7780rp_irq,
+-};
+-
+-void make_r7780rp_irq(unsigned int irq)
+-{
+-	disable_irq_nosync(irq);
+-	set_irq_chip_and_handler_name(irq, &r7780rp_irq_chip,
+-				      handle_level_irq, "level");
+-	enable_r7780rp_irq(irq);
+-}
+diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
+index 0fdc0bc..a43b477 100644
+--- a/arch/sh/boards/renesas/r7780rp/setup.c
++++ b/arch/sh/boards/renesas/r7780rp/setup.c
+@@ -179,9 +179,11 @@ static struct platform_device ax88796_device = {
+ static struct platform_device *r7780rp_devices[] __initdata = {
+ 	&r8a66597_usb_host_device,
+ 	&m66592_usb_peripheral_device,
+-	&cf_ide_device,
+ 	&heartbeat_device,
++#ifndef CONFIG_SH_R7780RP
++	&cf_ide_device,
+ 	&ax88796_device,
++#endif
+ };
+ 
+ static int __init r7780rp_devices_setup(void)
+@@ -316,9 +318,9 @@ void __init highlander_init_irq(void)
+ 			break;
+ #endif
+ #ifdef CONFIG_SH_R7780RP
+-		highlander_init_irq_r7780rp();
+-		ucp = irl2irq;
+-		break;
++		ucp = highlander_init_irq_r7780rp();
++		if (ucp)
++			break;
+ #endif
+ 	} while (0);
+ 
+diff --git a/arch/sh/boards/renesas/rts7751r2d/irq.c b/arch/sh/boards/renesas/rts7751r2d/irq.c
+index 7cc2813..8e49f6e 100644
+--- a/arch/sh/boards/renesas/rts7751r2d/irq.c
++++ b/arch/sh/boards/renesas/rts7751r2d/irq.c
+@@ -13,7 +13,6 @@
+ #include <linux/irq.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+-#include <asm/voyagergx.h>
+ #include <asm/rts7751r2d.h>
+ 
+ #define R2D_NR_IRL 13
+@@ -71,7 +70,7 @@ static unsigned char irl2irq_r2d_1[R2D_NR_IRL] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_r2d_1, "r2d-1", vectors_r2d_1,
+-			 NULL, NULL, mask_registers_r2d_1, NULL, NULL);
++			 NULL, mask_registers_r2d_1, NULL, NULL);
+ 
+ #endif /* CONFIG_RTS7751R2D_1 */
+ 
+@@ -109,7 +108,7 @@ static unsigned char irl2irq_r2d_plus[R2D_NR_IRL] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_r2d_plus, "r2d-plus", vectors_r2d_plus,
+-			 NULL, NULL, mask_registers_r2d_plus, NULL, NULL);
++			 NULL, mask_registers_r2d_plus, NULL, NULL);
+ 
+ #endif /* CONFIG_RTS7751R2D_PLUS */
+ 
+@@ -153,7 +152,4 @@ void __init init_rts7751r2d_IRQ(void)
+ 	}
+ 
+ 	register_intc_controller(d);
+-#ifdef CONFIG_MFD_SM501
+-	setup_voyagergx_irq();
+-#endif
+ }
+diff --git a/arch/sh/boards/renesas/rts7751r2d/setup.c b/arch/sh/boards/renesas/rts7751r2d/setup.c
+index 8125d20..3452b07 100644
+--- a/arch/sh/boards/renesas/rts7751r2d/setup.c
++++ b/arch/sh/boards/renesas/rts7751r2d/setup.c
+@@ -13,34 +13,15 @@
+ #include <linux/pata_platform.h>
+ #include <linux/serial_8250.h>
+ #include <linux/sm501.h>
++#include <linux/sm501-regs.h>
+ #include <linux/pm.h>
++#include <linux/fb.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/spi_bitbang.h>
+ #include <asm/machvec.h>
+ #include <asm/rts7751r2d.h>
+-#include <asm/voyagergx.h>
+ #include <asm/io.h>
+-
+-static void __init voyagergx_serial_init(void)
+-{
+-	unsigned long val;
+-
+-	/*
+-	 * GPIO Control
+-	 */
+-	val = readl((void __iomem *)GPIO_MUX_HIGH);
+-	val |= 0x00001fe0;
+-	writel(val, (void __iomem *)GPIO_MUX_HIGH);
+-
+-	/*
+-	 * Power Mode Gate
+-	 */
+-	val = readl((void __iomem *)POWER_MODE0_GATE);
+-	val |= (POWER_MODE0_GATE_U0 | POWER_MODE0_GATE_U1);
+-	writel(val, (void __iomem *)POWER_MODE0_GATE);
+-
+-	val = readl((void __iomem *)POWER_MODE1_GATE);
+-	val |= (POWER_MODE1_GATE_U0 | POWER_MODE1_GATE_U1);
+-	writel(val, (void __iomem *)POWER_MODE1_GATE);
+-}
++#include <asm/spi.h>
+ 
+ static struct resource cf_ide_resources[] = {
+ 	[0] = {
+@@ -75,6 +56,43 @@ static struct platform_device cf_ide_device  = {
+ 	},
+ };
+ 
++static struct spi_board_info spi_bus[] = {
++	{
++		.modalias	= "rtc-r9701",
++		.max_speed_hz	= 1000000,
++		.mode		= SPI_MODE_3,
++	},
++};
++
++static void r2d_chip_select(struct sh_spi_info *spi, int cs, int state)
++{
++	BUG_ON(cs != 0);  /* Single Epson RTC-9701JE attached on CS0 */
++	ctrl_outw(state == BITBANG_CS_ACTIVE, PA_RTCCE);
++}
++
++static struct sh_spi_info spi_info = {
++	.num_chipselect = 1,
++	.chip_select = r2d_chip_select,
++};
++
++static struct resource spi_sh_sci_resources[] = {
++	{
++		.start	= 0xffe00000,
++		.end	= 0xffe0001f,
++		.flags	= IORESOURCE_MEM,
++	},
++};
++
++static struct platform_device spi_sh_sci_device  = {
++	.name		= "spi_sh_sci",
++	.id		= -1,
++	.num_resources	= ARRAY_SIZE(spi_sh_sci_resources),
++	.resource	= spi_sh_sci_resources,
++	.dev	= {
++		.platform_data	= &spi_info,
++	},
++};
++
+ static struct resource heartbeat_resources[] = {
+ 	[0] = {
+ 		.start	= PA_OUTPORT,
+@@ -93,11 +111,11 @@ static struct platform_device heartbeat_device = {
+ #ifdef CONFIG_MFD_SM501
+ static struct plat_serial8250_port uart_platform_data[] = {
+ 	{
+-		.membase	= (void __iomem *)VOYAGER_UART_BASE,
+-		.mapbase	= VOYAGER_UART_BASE,
++		.membase	= (void __iomem *)0xb3e30000,
++		.mapbase	= 0xb3e30000,
+ 		.iotype		= UPIO_MEM,
+-		.irq		= IRQ_SM501_U0,
+-		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
++		.irq		= IRQ_VOYAGER,
++		.flags		= UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ,
+ 		.regshift	= 2,
+ 		.uartclk	= (9600 * 16),
+ 	},
+@@ -124,14 +142,67 @@ static struct resource sm501_resources[] = {
+ 		.flags	= IORESOURCE_MEM,
+ 	},
+ 	[2]	= {
+-		.start	= IRQ_SM501_CV,
++		.start	= IRQ_VOYAGER,
+ 		.flags	= IORESOURCE_IRQ,
+ 	},
+ };
+ 
++static struct fb_videomode sm501_default_mode = {
++	.pixclock	= 35714,
++	.xres		= 640,
++	.yres		= 480,
++	.left_margin	= 105,
++	.right_margin	= 50,
++	.upper_margin	= 35,
++	.lower_margin	= 0,
++	.hsync_len	= 96,
++	.vsync_len	= 2,
++	.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++};
++
++static struct sm501_platdata_fbsub sm501_pdata_fbsub_pnl = {
++	.def_bpp	= 16,
++	.def_mode	= &sm501_default_mode,
++	.flags		= SM501FB_FLAG_USE_INIT_MODE |
++			  SM501FB_FLAG_USE_HWCURSOR |
++			  SM501FB_FLAG_USE_HWACCEL |
++			  SM501FB_FLAG_DISABLE_AT_EXIT,
++};
++
++static struct sm501_platdata_fbsub sm501_pdata_fbsub_crt = {
++	.flags		= (SM501FB_FLAG_USE_INIT_MODE |
++			   SM501FB_FLAG_USE_HWCURSOR |
++			   SM501FB_FLAG_USE_HWACCEL |
++			   SM501FB_FLAG_DISABLE_AT_EXIT),
++
++};
++
++static struct sm501_platdata_fb sm501_fb_pdata = {
++	.fb_route	= SM501_FB_OWN,
++	.fb_crt		= &sm501_pdata_fbsub_crt,
++	.fb_pnl		= &sm501_pdata_fbsub_pnl,
++	.flags		= SM501_FBPD_SWAP_FB_ENDIAN,
++};
++
++static struct sm501_initdata sm501_initdata = {
++	.gpio_high	= {
++		.set	= 0x00001fe0,
++		.mask	= 0x0,
++	},
++	.devices	= SM501_USE_USB_HOST,
++};
++
++static struct sm501_platdata sm501_platform_data = {
++	.init		= &sm501_initdata,
++	.fb		= &sm501_fb_pdata,
++};
++
+ static struct platform_device sm501_device = {
+ 	.name		= "sm501",
+ 	.id		= -1,
++	.dev		= {
++		.platform_data	= &sm501_platform_data,
++	},
+ 	.num_resources	= ARRAY_SIZE(sm501_resources),
+ 	.resource	= sm501_resources,
+ };
+@@ -145,10 +216,12 @@ static struct platform_device *rts7751r2d_devices[] __initdata = {
+ #endif
+ 	&cf_ide_device,
+ 	&heartbeat_device,
++	&spi_sh_sci_device,
+ };
+ 
+ static int __init rts7751r2d_devices_setup(void)
+ {
++	spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
+ 	return platform_add_devices(rts7751r2d_devices,
+ 				    ARRAY_SIZE(rts7751r2d_devices));
+ }
+@@ -192,6 +265,7 @@ u8 rts7751r2d_readb(void __iomem *addr)
+  */
+ static void __init rts7751r2d_setup(char **cmdline_p)
+ {
++	void __iomem *sm501_reg;
+ 	u16 ver = ctrl_inw(PA_VERREG);
+ 
+ 	printk(KERN_INFO "Renesas Technology Sales RTS7751R2D support.\n");
+@@ -202,7 +276,30 @@ static void __init rts7751r2d_setup(char **cmdline_p)
+ 	ctrl_outw(0x0000, PA_OUTPORT);
+ 	pm_power_off = rts7751r2d_power_off;
+ 
+-	voyagergx_serial_init();
++	/* sm501 dram configuration:
++	 * ColSizeX = 11 - External Memory Column Size: 256 words.
++	 * APX = 1 - External Memory Active to Pre-Charge Delay: 7 clocks.
++	 * RstX = 1 - External Memory Reset: Normal.
++	 * Rfsh = 1 - Local Memory Refresh to Command Delay: 12 clocks.
++	 * BwC =  1 - Local Memory Block Write Cycle Time: 2 clocks.
++	 * BwP =  1 - Local Memory Block Write to Pre-Charge Delay: 1 clock.
++	 * AP = 1 - Internal Memory Active to Pre-Charge Delay: 7 clocks.
++	 * Rst = 1 - Internal Memory Reset: Normal.
++	 * RA = 1 - Internal Memory Remain in Active State: Do not remain.
++	 */
++
++	sm501_reg = (void __iomem *)0xb3e00000 + SM501_DRAM_CONTROL;
++	writel(readl(sm501_reg) | 0x00f107c0, sm501_reg);
++
++	/*
++	 * Power Mode Gate - Enable UART0
++	 */
++
++	sm501_reg = (void __iomem *)0xb3e00000 + SM501_POWER_MODE_0_GATE;
++	writel(readl(sm501_reg) | (1 << SM501_GATE_UART0), sm501_reg);
++
++	sm501_reg = (void __iomem *)0xb3e00000 + SM501_POWER_MODE_1_GATE;
++	writel(readl(sm501_reg) | (1 << SM501_GATE_UART0), sm501_reg);
+ }
+ 
+ /*
+@@ -215,8 +312,4 @@ static struct sh_machine_vector mv_rts7751r2d __initmv = {
+ 	.mv_irq_demux		= rts7751r2d_irq_demux,
+ 	.mv_writeb		= rts7751r2d_writeb,
+ 	.mv_readb		= rts7751r2d_readb,
+-#if defined(CONFIG_MFD_SM501) && defined(CONFIG_USB_OHCI_HCD)
+-	.mv_consistent_alloc	= voyagergx_consistent_alloc,
+-	.mv_consistent_free	= voyagergx_consistent_free,
+-#endif
+ };
+diff --git a/arch/sh/boards/renesas/sdk7780/Kconfig b/arch/sh/boards/renesas/sdk7780/Kconfig
+new file mode 100644
+index 0000000..e4f5b69
+--- /dev/null
++++ b/arch/sh/boards/renesas/sdk7780/Kconfig
+@@ -0,0 +1,23 @@
++if SH_SDK7780
++
++choice
++	prompt "SDK7780 options"
++	default SH_SDK7780_BASE
++
++config SH_SDK7780_STANDALONE
++	bool "SDK7780 board support"
++	depends on CPU_SUBTYPE_SH7780
++	help
++	  Selecting this option will enable support for the
++	  standalone version of the SDK7780. If in doubt, say Y.
++
++config SH_SDK7780_BASE
++	bool "SDK7780 with base-board support"
++	depends on CPU_SUBTYPE_SH7780
++	help
++	  Selecting this option will enable support for the expansion
++	  baseboard devices. If in doubt, say Y.
++
++endchoice
++
++endif
+diff --git a/arch/sh/boards/renesas/sdk7780/Makefile b/arch/sh/boards/renesas/sdk7780/Makefile
+new file mode 100644
+index 0000000..3d8f0be
+--- /dev/null
++++ b/arch/sh/boards/renesas/sdk7780/Makefile
+@@ -0,0 +1,5 @@
++#
++# Makefile for the SDK7780 specific parts of the kernel
++#
++obj-y	 := setup.o irq.o
++
+diff --git a/arch/sh/boards/renesas/sdk7780/irq.c b/arch/sh/boards/renesas/sdk7780/irq.c
+new file mode 100644
+index 0000000..87cdc57
+--- /dev/null
++++ b/arch/sh/boards/renesas/sdk7780/irq.c
+@@ -0,0 +1,46 @@
++/*
++ * linux/arch/sh/boards/renesas/sdk7780/irq.c
++ *
++ * Renesas Technology Europe SDK7780 Support.
++ *
++ * Copyright (C) 2008  Nicholas Beck <nbeck at mpc-data.co.uk>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/init.h>
++#include <linux/irq.h>
++#include <linux/io.h>
++#include <asm/sdk7780.h>
++
++enum {
++	UNUSED = 0,
++	/* board specific interrupt sources */
++	SMC91C111,	/* Ethernet controller */
++};
++
++static struct intc_vect fpga_vectors[] __initdata = {
++	INTC_IRQ(SMC91C111, IRQ_ETHERNET),
++};
++
++static struct intc_mask_reg fpga_mask_registers[] __initdata = {
++	{ 0, FPGA_IRQ0MR, 16,
++	  { 0, 0, 0, 0, 0, 0, 0, 0,
++	    0, 0, 0, SMC91C111, 0, 0, 0, 0 } },
++};
++
++static DECLARE_INTC_DESC(fpga_intc_desc, "sdk7780-irq", fpga_vectors,
++			 NULL, fpga_mask_registers, NULL, NULL);
++
++void __init init_sdk7780_IRQ(void)
++{
++	printk(KERN_INFO "Using SDK7780 interrupt controller.\n");
++
++	ctrl_outw(0xFFFF, FPGA_IRQ0MR);
++	/* Setup IRL 0-3 */
++	ctrl_outw(0x0003, FPGA_IMSR);
++	plat_irq_setup_pins(IRQ_MODE_IRL3210);
++
++	register_intc_controller(&fpga_intc_desc);
++}
+diff --git a/arch/sh/boards/renesas/sdk7780/setup.c b/arch/sh/boards/renesas/sdk7780/setup.c
+new file mode 100644
+index 0000000..5df32f2
+--- /dev/null
++++ b/arch/sh/boards/renesas/sdk7780/setup.c
+@@ -0,0 +1,109 @@
++/*
++ * arch/sh/boards/renesas/sdk7780/setup.c
++ *
++ * Renesas Solutions SH7780 SDK Support
++ * Copyright (C) 2008 Nicholas Beck <nbeck at mpc-data.co.uk>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/platform_device.h>
++#include <linux/pata_platform.h>
++#include <asm/machvec.h>
++#include <asm/sdk7780.h>
++#include <asm/heartbeat.h>
++#include <asm/io.h>
++#include <asm/addrspace.h>
++
++#define GPIO_PECR        0xFFEA0008
++
++//* Heartbeat */
++static struct heartbeat_data heartbeat_data = {
++	.regsize = 16,
++};
++
++static struct resource heartbeat_resources[] = {
++	[0] = {
++		.start  = PA_LED,
++		.end    = PA_LED,
++		.flags  = IORESOURCE_MEM,
++	},
++};
++
++static struct platform_device heartbeat_device = {
++	.name           = "heartbeat",
++	.id             = -1,
++	.dev = {
++		.platform_data = &heartbeat_data,
++	},
++	.num_resources  = ARRAY_SIZE(heartbeat_resources),
++	.resource       = heartbeat_resources,
++};
++
++/* SMC91x */
++static struct resource smc91x_eth_resources[] = {
++	[0] = {
++		.name   = "smc91x-regs" ,
++		.start  = PA_LAN + 0x300,
++		.end    = PA_LAN + 0x300 + 0x10 ,
++		.flags  = IORESOURCE_MEM,
++	},
++	[1] = {
++		.start  = IRQ_ETHERNET,
++		.end    = IRQ_ETHERNET,
++		.flags  = IORESOURCE_IRQ,
++	},
++};
++
++static struct platform_device smc91x_eth_device = {
++	.name           = "smc91x",
++	.id             = 0,
++	.dev = {
++		.dma_mask               = NULL,         /* don't use dma */
++		.coherent_dma_mask      = 0xffffffff,
++	},
++	.num_resources  = ARRAY_SIZE(smc91x_eth_resources),
++	.resource       = smc91x_eth_resources,
++};
++
++static struct platform_device *sdk7780_devices[] __initdata = {
++	&heartbeat_device,
++	&smc91x_eth_device,
++};
++
++static int __init sdk7780_devices_setup(void)
++{
++	return platform_add_devices(sdk7780_devices,
++		ARRAY_SIZE(sdk7780_devices));
++}
++device_initcall(sdk7780_devices_setup);
++
++static void __init sdk7780_setup(char **cmdline_p)
++{
++	u16 ver = ctrl_inw(FPGA_FPVERR);
++	u16 dateStamp = ctrl_inw(FPGA_FPDATER);
++
++	printk(KERN_INFO "Renesas Technology Europe SDK7780 support.\n");
++	printk(KERN_INFO "Board version: %d (revision %d), "
++			 "FPGA version: %d (revision %d), datestamp : %d\n",
++			 (ver >> 12) & 0xf, (ver >> 8) & 0xf,
++			 (ver >>  4) & 0xf, ver & 0xf,
++			 dateStamp);
++
++	/* Setup pin mux'ing for PCIC */
++	ctrl_outw(0x0000, GPIO_PECR);
++}
++
++/*
++ * The Machine Vector
++ */
++static struct sh_machine_vector mv_se7780 __initmv = {
++	.mv_name        = "Renesas SDK7780-R3" ,
++	.mv_setup		= sdk7780_setup,
++	.mv_nr_irqs		= 111,
++	.mv_init_irq	= init_sdk7780_IRQ,
++};
++
+diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
+index 1b0f5be..59f552c 100644
+--- a/arch/sh/boot/Makefile
++++ b/arch/sh/boot/Makefile
+@@ -35,17 +35,28 @@ $(obj)/compressed/vmlinux: FORCE
+ KERNEL_LOAD	:= $(shell /bin/bash -c 'printf "0x%8x" \
+ 		     $$[$(CONFIG_PAGE_OFFSET)  + \
+ 			$(CONFIG_MEMORY_START) + \
++			$(CONFIG_ZERO_PAGE_OFFSET)]')
++
++KERNEL_ENTRY	:= $(shell /bin/bash -c 'printf "0x%8x" \
++		     $$[$(CONFIG_PAGE_OFFSET)  + \
++			$(CONFIG_MEMORY_START) + \
+ 			$(CONFIG_ZERO_PAGE_OFFSET)+0x1000]')
+ 
+ quiet_cmd_uimage = UIMAGE  $@
+       cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sh -O linux -T kernel \
+-		   -C none -a $(KERNEL_LOAD) -e $(KERNEL_LOAD) \
++		   -C none -a $(KERNEL_LOAD) -e $(KERNEL_ENTRY) \
+ 		   -n 'Linux-$(KERNELRELEASE)' -d $< $@
+ 
+-$(obj)/uImage: $(obj)/zImage FORCE
++$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
+ 	$(call if_changed,uimage)
+ 	@echo '  Image $@ is ready'
+ 
++$(obj)/vmlinux.bin: vmlinux FORCE
++	$(call if_changed,objcopy)
++
++$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
++	$(call if_changed,gzip)
++
+ OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
+ $(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
+ 	$(call if_changed,objcopy)
+@@ -54,4 +65,5 @@ OBJCOPYFLAGS_uImage.srec := -I binary -O srec
+ $(obj)/uImage.srec: $(obj)/uImage
+ 	$(call if_changed,objcopy)
+ 
+-clean-files	+= uImage uImage.srec vmlinux.srec
++clean-files	+= uImage uImage.srec vmlinux.srec \
++		   vmlinux.bin vmlinux.bin.gz
+diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
+index 906a13f..efb01dc 100644
+--- a/arch/sh/boot/compressed/Makefile
++++ b/arch/sh/boot/compressed/Makefile
+@@ -1,43 +1,5 @@
+-#
+-# linux/arch/sh/boot/compressed/Makefile
+-#
+-# create a compressed vmlinux image from the original vmlinux
+-#
+-
+-targets		:= vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
+-EXTRA_AFLAGS	:= -traditional
+-
+-OBJECTS = $(obj)/head.o $(obj)/misc.o
+-
+-ifdef CONFIG_SH_STANDARD_BIOS
+-OBJECTS += $(obj)/../../kernel/sh_bios.o
++ifeq ($(CONFIG_SUPERH32),y)
++include ${srctree}/arch/sh/boot/compressed/Makefile_32
++else
++include ${srctree}/arch/sh/boot/compressed/Makefile_64
+ endif
+-
+-#
+-# IMAGE_OFFSET is the load offset of the compression loader
+-#
+-IMAGE_OFFSET	:= $(shell /bin/bash -c 'printf "0x%08x" \
+-		     $$[$(CONFIG_PAGE_OFFSET)  + \
+-			$(CONFIG_MEMORY_START) + \
+-			$(CONFIG_BOOT_LINK_OFFSET)]')
+-
+-LIBGCC	:= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+-
+-LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds
+-
+-
+-$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
+-	$(call if_changed,ld)
+-	@:
+-
+-$(obj)/vmlinux.bin: vmlinux FORCE
+-	$(call if_changed,objcopy)
+-
+-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+-	$(call if_changed,gzip)
+-
+-LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh-linux -T
+-OBJCOPYFLAGS += -R .empty_zero_page
+-
+-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
+-	$(call if_changed,ld)
+diff --git a/arch/sh/boot/compressed/Makefile_32 b/arch/sh/boot/compressed/Makefile_32
+new file mode 100644
+index 0000000..6ac8d4a
+--- /dev/null
++++ b/arch/sh/boot/compressed/Makefile_32
+@@ -0,0 +1,43 @@
++#
++# linux/arch/sh/boot/compressed/Makefile
++#
++# create a compressed vmlinux image from the original vmlinux
++#
++
++targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
++		   head_32.o misc_32.o piggy.o
++EXTRA_AFLAGS	:= -traditional
++
++OBJECTS = $(obj)/head_32.o $(obj)/misc_32.o
++
++ifdef CONFIG_SH_STANDARD_BIOS
++OBJECTS += $(obj)/../../kernel/sh_bios.o
++endif
++
++#
++# IMAGE_OFFSET is the load offset of the compression loader
++#
++IMAGE_OFFSET	:= $(shell /bin/bash -c 'printf "0x%08x" \
++		     $$[$(CONFIG_PAGE_OFFSET)  + \
++			$(CONFIG_MEMORY_START) + \
++			$(CONFIG_BOOT_LINK_OFFSET)]')
++
++LIBGCC	:= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
++
++LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds
++
++$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
++	$(call if_changed,ld)
++	@:
++
++$(obj)/vmlinux.bin: vmlinux FORCE
++	$(call if_changed,objcopy)
++
++$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
++	$(call if_changed,gzip)
++
++LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh-linux -T
++OBJCOPYFLAGS += -R .empty_zero_page
++
++$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
++	$(call if_changed,ld)
+diff --git a/arch/sh/boot/compressed/Makefile_64 b/arch/sh/boot/compressed/Makefile_64
+new file mode 100644
+index 0000000..4334f2b
+--- /dev/null
++++ b/arch/sh/boot/compressed/Makefile_64
+@@ -0,0 +1,45 @@
++#
++# arch/sh/boot/compressed/Makefile_64
++#
++# create a compressed vmlinux image from the original vmlinux
++#
++# Copyright (C) 2002 Stuart Menefy
++# Copyright (C) 2004 Paul Mundt
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License.  See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++
++targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
++		   head_64.o misc_64.o cache.o piggy.o
++EXTRA_AFLAGS	:= -traditional
++
++OBJECTS		:= $(obj)/vmlinux_64.lds $(obj)/head_64.o $(obj)/misc_64.o \
++		   $(obj)/cache.o
++
++#
++# ZIMAGE_OFFSET is the load offset of the compression loader
++# (4M for the kernel plus 64K for this loader)
++#
++ZIMAGE_OFFSET	:= $(shell /bin/bash -c 'printf "0x%08x" \
++		     $$[$(CONFIG_PAGE_OFFSET)+0x400000+0x10000]')
++
++LDFLAGS_vmlinux := -Ttext $(ZIMAGE_OFFSET) -e startup \
++		    -T $(obj)/../../kernel/vmlinux.lds
++
++$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
++	$(call if_changed,ld)
++	@:
++
++$(obj)/vmlinux.bin: vmlinux FORCE
++	$(call if_changed,objcopy)
++
++$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
++	$(call if_changed,gzip)
++
++LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh64-linux -T
++OBJCOPYFLAGS += -R .empty_zero_page
++
++$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
++	$(call if_changed,ld)
+diff --git a/arch/sh/boot/compressed/cache.c b/arch/sh/boot/compressed/cache.c
+new file mode 100644
+index 0000000..e27fc74
+--- /dev/null
++++ b/arch/sh/boot/compressed/cache.c
+@@ -0,0 +1,12 @@
++int cache_control(unsigned int command)
++{
++	volatile unsigned int *p = (volatile unsigned int *) 0x80000000;
++	int i;
++
++	for (i = 0; i < (32 * 1024); i += 32) {
++		(void)*p;
++		p += (32 / sizeof (int));
++	}
++
++	return 0;
++}
+diff --git a/arch/sh/boot/compressed/head.S b/arch/sh/boot/compressed/head.S
+deleted file mode 100644
+index a8399b0..0000000
+--- a/arch/sh/boot/compressed/head.S
++++ /dev/null
+@@ -1,120 +0,0 @@
+-/*
+- *  linux/arch/sh/boot/compressed/head.S
+- *
+- *  Copyright (C) 1999 Stuart Menefy
+- *  Copyright (C) 2003 SUGIOKA Toshinobu
+- */
+-
+-.text
+-
+-#include <linux/linkage.h>
+-#include <asm/page.h>
+-
+-	.global	startup
+-startup:
+-	/* Load initial status register */
+-	mov.l   init_sr, r1
+-	ldc     r1, sr
+-
+-	/* Move myself to proper location if necessary */
+-	mova	1f, r0
+-	mov.l	1f, r2
+-	cmp/eq	r2, r0
+-	bt	clear_bss
+-	sub	r0, r2
+-	mov.l	bss_start_addr, r0
+-	mov	#0xe0, r1
+-	and	r1, r0			! align cache line
+-	mov.l	text_start_addr, r3
+-	mov	r0, r1
+-	sub	r2, r1
+-3:
+-	mov.l	@r1, r4
+-	mov.l	@(4,r1), r5
+-	mov.l	@(8,r1), r6
+-	mov.l	@(12,r1), r7
+-	mov.l	@(16,r1), r8
+-	mov.l	@(20,r1), r9
+-	mov.l	@(24,r1), r10
+-	mov.l	@(28,r1), r11
+-	mov.l	r4, @r0
+-	mov.l	r5, @(4,r0)
+-	mov.l	r6, @(8,r0)
+-	mov.l	r7, @(12,r0)
+-	mov.l	r8, @(16,r0)
+-	mov.l	r9, @(20,r0)
+-	mov.l	r10, @(24,r0)
+-	mov.l	r11, @(28,r0)
+-#ifdef CONFIG_CPU_SH4
+-	ocbwb	@r0
+-#endif
+-	cmp/hi	r3, r0
+-	add	#-32, r0
+-	bt/s	3b
+-	 add	#-32, r1
+-	mov.l	2f, r0
+-	jmp	@r0
+-	 nop
+-
+-	.align 2
+-1:	.long	1b
+-2:	.long	clear_bss
+-text_start_addr:
+-	.long	startup
+-
+-	/* Clear BSS */
+-clear_bss:
+-	mov.l	end_addr, r1
+-	mov.l	bss_start_addr, r2
+-	mov	#0, r0
+-l1:
+-	mov.l	r0, @-r1
+-	cmp/eq	r1,r2
+-	bf	l1
+-
+-	/* Set the initial pointer. */
+-	mov.l	init_stack_addr, r0
+-	mov.l	@r0, r15
+-
+-	/* Decompress the kernel */
+-	mov.l	decompress_kernel_addr, r0
+-	jsr	@r0
+-	nop
+-
+-	/* Jump to the start of the decompressed kernel */
+-	mov.l	kernel_start_addr, r0
+-	jmp	@r0
+-	nop
+-	
+-	.align	2
+-bss_start_addr:
+-	.long	__bss_start
+-end_addr:
+-	.long	_end
+-init_sr:
+-	.long	0x400000F0	/* Privileged mode, Bank=0, Block=0, IMASK=0xF */
+-init_stack_addr:
+-	.long	stack_start
+-decompress_kernel_addr:
+-	.long	decompress_kernel
+-kernel_start_addr:
+-	.long	_text+PAGE_SIZE
+-
+-	.align	9
+-fake_headers_as_bzImage:
+-	.word	0
+-	.ascii	"HdrS"		! header signature
+-	.word	0x0202		! header version number (>= 0x0105)
+-				! or else old loadlin-1.5 will fail)
+-	.word	0		! default_switch
+-	.word	0		! SETUPSEG
+-	.word	0x1000
+-	.word	0		! pointing to kernel version string
+-	.byte	0		! = 0, old one (LILO, Loadlin,
+-				! 0xTV: T=0 for LILO
+-				!       V = version
+-	.byte	1		! Load flags bzImage=1
+-	.word	0x8000		! size to move, when setup is not
+-	.long	0x100000	! 0x100000 = default for big kernel
+-	.long	0		! address of loaded ramdisk image
+-	.long	0		# its size in bytes
+diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S
+new file mode 100644
+index 0000000..a8399b0
+--- /dev/null
++++ b/arch/sh/boot/compressed/head_32.S
+@@ -0,0 +1,120 @@
++/*
++ *  linux/arch/sh/boot/compressed/head.S
++ *
++ *  Copyright (C) 1999 Stuart Menefy
++ *  Copyright (C) 2003 SUGIOKA Toshinobu
++ */
++
++.text
++
++#include <linux/linkage.h>
++#include <asm/page.h>
++
++	.global	startup
++startup:
++	/* Load initial status register */
++	mov.l   init_sr, r1
++	ldc     r1, sr
++
++	/* Move myself to proper location if necessary */
++	mova	1f, r0
++	mov.l	1f, r2
++	cmp/eq	r2, r0
++	bt	clear_bss
++	sub	r0, r2
++	mov.l	bss_start_addr, r0
++	mov	#0xe0, r1
++	and	r1, r0			! align cache line
++	mov.l	text_start_addr, r3
++	mov	r0, r1
++	sub	r2, r1
++3:
++	mov.l	@r1, r4
++	mov.l	@(4,r1), r5
++	mov.l	@(8,r1), r6
++	mov.l	@(12,r1), r7
++	mov.l	@(16,r1), r8
++	mov.l	@(20,r1), r9
++	mov.l	@(24,r1), r10
++	mov.l	@(28,r1), r11
++	mov.l	r4, @r0
++	mov.l	r5, @(4,r0)
++	mov.l	r6, @(8,r0)
++	mov.l	r7, @(12,r0)
++	mov.l	r8, @(16,r0)
++	mov.l	r9, @(20,r0)
++	mov.l	r10, @(24,r0)
++	mov.l	r11, @(28,r0)
++#ifdef CONFIG_CPU_SH4
++	ocbwb	@r0
++#endif
++	cmp/hi	r3, r0
++	add	#-32, r0
++	bt/s	3b
++	 add	#-32, r1
++	mov.l	2f, r0
++	jmp	@r0
++	 nop
++
++	.align 2
++1:	.long	1b
++2:	.long	clear_bss
++text_start_addr:
++	.long	startup
++
++	/* Clear BSS */
++clear_bss:
++	mov.l	end_addr, r1
++	mov.l	bss_start_addr, r2
++	mov	#0, r0
++l1:
++	mov.l	r0, @-r1
++	cmp/eq	r1,r2
++	bf	l1
++
++	/* Set the initial pointer. */
++	mov.l	init_stack_addr, r0
++	mov.l	@r0, r15
++
++	/* Decompress the kernel */
++	mov.l	decompress_kernel_addr, r0
++	jsr	@r0
++	nop
++
++	/* Jump to the start of the decompressed kernel */
++	mov.l	kernel_start_addr, r0
++	jmp	@r0
++	nop
++	
++	.align	2
++bss_start_addr:
++	.long	__bss_start
++end_addr:
++	.long	_end
++init_sr:
++	.long	0x400000F0	/* Privileged mode, Bank=0, Block=0, IMASK=0xF */
++init_stack_addr:
++	.long	stack_start
++decompress_kernel_addr:
++	.long	decompress_kernel
++kernel_start_addr:
++	.long	_text+PAGE_SIZE
++
++	.align	9
++fake_headers_as_bzImage:
++	.word	0
++	.ascii	"HdrS"		! header signature
++	.word	0x0202		! header version number (>= 0x0105)
++				! or else old loadlin-1.5 will fail)
++	.word	0		! default_switch
++	.word	0		! SETUPSEG
++	.word	0x1000
++	.word	0		! pointing to kernel version string
++	.byte	0		! = 0, old one (LILO, Loadlin,
++				! 0xTV: T=0 for LILO
++				!       V = version
++	.byte	1		! Load flags bzImage=1
++	.word	0x8000		! size to move, when setup is not
++	.long	0x100000	! 0x100000 = default for big kernel
++	.long	0		! address of loaded ramdisk image
++	.long	0		# its size in bytes
+diff --git a/arch/sh/boot/compressed/head_64.S b/arch/sh/boot/compressed/head_64.S
+new file mode 100644
+index 0000000..1d4ecbf
+--- /dev/null
++++ b/arch/sh/boot/compressed/head_64.S
+@@ -0,0 +1,163 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * arch/shmedia/boot/compressed/head.S
++ *
++ * Copied from
++ *   arch/shmedia/kernel/head.S
++ * which carried the copyright:
++ *   Copyright (C) 2000, 2001  Paolo Alberelli
++ *
++ * Modification for compressed loader:
++ *   Copyright (C) 2002 Stuart Menefy (stuart.menefy at st.com)
++ */
++#include <linux/linkage.h>
++#include <asm/cache.h>
++#include <asm/cpu/mmu_context.h>
++#include <asm/cpu/registers.h>
++
++/*
++ * Fixed TLB entries to identity map the beginning of RAM
++ */
++#define MMUIR_TEXT_H	0x0000000000000003 | CONFIG_MEMORY_START
++			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
++#define MMUIR_TEXT_L	0x000000000000009a | CONFIG_MEMORY_START
++			/* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */
++
++#define MMUDR_CACHED_H	0x0000000000000003 | CONFIG_MEMORY_START
++			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
++#define MMUDR_CACHED_L	0x000000000000015a | CONFIG_MEMORY_START
++			/* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */
++
++#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
++#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
++
++#if 1
++#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	/* OCE + OCI + WB */
++#else
++#define	OCCR0_INIT_VAL	OCCR0_OFF
++#endif
++#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			/* No locking */
++
++	.text
++
++	.global	startup
++startup:
++	/*
++	 * Prevent speculative fetch on device memory due to
++	 * uninitialized target registers.
++	 * This must be executed before the first branch.
++	 */
++	ptabs/u	r63, tr0
++	ptabs/u	r63, tr1
++	ptabs/u	r63, tr2
++	ptabs/u	r63, tr3
++	ptabs/u	r63, tr4
++	ptabs/u	r63, tr5
++	ptabs/u	r63, tr6
++	ptabs/u	r63, tr7
++	synci
++
++	/*
++	 * Set initial TLB entries for cached and uncached regions.
++	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
++	 */
++	/* Clear ITLBs */
++	pta	1f, tr1
++	movi	ITLB_FIXED, r21
++	movi	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
++1:	putcfg	r21, 0, r63		/* Clear MMUIR[n].PTEH.V */
++	addi	r21, TLB_STEP, r21
++        bne	r21, r22, tr1
++
++	/* Clear DTLBs */
++	pta	1f, tr1
++	movi	DTLB_FIXED, r21
++	movi	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
++1:	putcfg	r21, 0, r63		/* Clear MMUDR[n].PTEH.V */
++	addi	r21, TLB_STEP, r21
++        bne	r21, r22, tr1
++
++	/* Map one big (512Mb) page for ITLB */
++	movi	ITLB_FIXED, r21
++	movi	MMUIR_TEXT_L, r22	/* PTEL first */
++	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
++	movi	MMUIR_TEXT_H, r22	/* PTEH last */
++	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
++
++	/* Map one big CACHED (512Mb) page for DTLB */
++	movi	DTLB_FIXED, r21
++	movi	MMUDR_CACHED_L, r22	/* PTEL first */
++	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
++	movi	MMUDR_CACHED_H, r22	/* PTEH last */
++	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
++
++	/* ICache */
++	movi	ICCR_BASE, r21
++	movi	ICCR0_INIT_VAL, r22
++	movi	ICCR1_INIT_VAL, r23
++	putcfg	r21, ICCR_REG0, r22
++	putcfg	r21, ICCR_REG1, r23
++	synci
++
++	/* OCache */
++	movi	OCCR_BASE, r21
++	movi	OCCR0_INIT_VAL, r22
++	movi	OCCR1_INIT_VAL, r23
++	putcfg	r21, OCCR_REG0, r22
++	putcfg	r21, OCCR_REG1, r23
++	synco
++
++	/*
++	 * Enable the MMU.
++	 * From here-on code can be non-PIC.
++	 */
++	movi	SR_HARMLESS | SR_ENABLE_MMU, r22
++	putcon	r22, SSR
++	movi	1f, r22
++	putcon	r22, SPC
++	synco
++	rte				/* And now go into the hyperspace ... */
++1:					/* ... that's the next instruction ! */
++
++	/* Set initial stack pointer */
++	movi	datalabel stack_start, r0
++	ld.l	r0, 0, r15
++
++	/*
++	 * Clear bss
++	 */
++	pt	1f, tr1
++	movi	datalabel __bss_start, r22
++	movi	datalabel _end, r23
++1:	st.l	r22, 0, r63
++	addi	r22, 4, r22
++	bne	r22, r23, tr1
++
++	/*
++	 * Decompress the kernel.
++	 */
++	pt	decompress_kernel, tr0
++	blink	tr0, r18
++
++	/*
++	 * Disable the MMU.
++	 */
++	movi	SR_HARMLESS, r22
++	putcon	r22, SSR
++	movi	1f, r22
++	putcon	r22, SPC
++	synco
++	rte				/* And now go into the hyperspace ... */
++1:					/* ... that's the next instruction ! */
++
++	/* Jump into the decompressed kernel */
++	movi	datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19
++	ptabs	r19, tr0
++	blink	tr0, r18
++
++	/* Shouldn't return here, but just in case, loop forever */
++	pt	1f, tr0
++1:	blink	tr0, r63
+diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
+deleted file mode 100644
+index df65e30..0000000
+--- a/arch/sh/boot/compressed/misc.c
++++ /dev/null
+@@ -1,241 +0,0 @@
+-/*
+- * arch/sh/boot/compressed/misc.c
+- *
+- * This is a collection of several routines from gzip-1.0.3
+- * adapted for Linux.
+- *
+- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+- *
+- * Adapted for SH by Stuart Menefy, Aug 1999
+- *
+- * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000
+- */
+-
+-#include <asm/uaccess.h>
+-#include <asm/addrspace.h>
+-#include <asm/page.h>
+-#ifdef CONFIG_SH_STANDARD_BIOS
+-#include <asm/sh_bios.h>
+-#endif
+-
+-/*
+- * gzip declarations
+- */
+-
+-#define OF(args)  args
+-#define STATIC static
+-
+-#undef memset
+-#undef memcpy
+-#define memzero(s, n)     memset ((s), 0, (n))
+-
+-typedef unsigned char  uch;
+-typedef unsigned short ush;
+-typedef unsigned long  ulg;
+-
+-#define WSIZE 0x8000		/* Window size must be at least 32k, */
+-				/* and a power of two */
+-
+-static uch *inbuf;	     /* input buffer */
+-static uch window[WSIZE];    /* Sliding window buffer */
+-
+-static unsigned insize = 0;  /* valid bytes in inbuf */
+-static unsigned inptr = 0;   /* index of next byte to be processed in inbuf */
+-static unsigned outcnt = 0;  /* bytes in output buffer */
+-
+-/* gzip flag byte */
+-#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
+-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+-#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
+-#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
+-#define COMMENT      0x10 /* bit 4 set: file comment present */
+-#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
+-#define RESERVED     0xC0 /* bit 6,7:   reserved */
+-
+-#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+-
+-/* Diagnostic functions */
+-#ifdef DEBUG
+-#  define Assert(cond,msg) {if(!(cond)) error(msg);}
+-#  define Trace(x) fprintf x
+-#  define Tracev(x) {if (verbose) fprintf x ;}
+-#  define Tracevv(x) {if (verbose>1) fprintf x ;}
+-#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+-#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+-#else
+-#  define Assert(cond,msg)
+-#  define Trace(x)
+-#  define Tracev(x)
+-#  define Tracevv(x)
+-#  define Tracec(c,x)
+-#  define Tracecv(c,x)
+-#endif
+-
+-static int  fill_inbuf(void);
+-static void flush_window(void);
+-static void error(char *m);
+-static void gzip_mark(void **);
+-static void gzip_release(void **);
+-
+-extern char input_data[];
+-extern int input_len;
+-
+-static long bytes_out = 0;
+-static uch *output_data;
+-static unsigned long output_ptr = 0;
+-
+-static void *malloc(int size);
+-static void free(void *where);
+-static void error(char *m);
+-static void gzip_mark(void **);
+-static void gzip_release(void **);
+-
+-int puts(const char *);
+-
+-extern int _text;		/* Defined in vmlinux.lds.S */
+-extern int _end;
+-static unsigned long free_mem_ptr;
+-static unsigned long free_mem_end_ptr;
+-
+-#define HEAP_SIZE             0x10000
+-
+-#include "../../../../lib/inflate.c"
+-
+-static void *malloc(int size)
+-{
+-	void *p;
+-
+-	if (size <0) error("Malloc error");
+-	if (free_mem_ptr == 0) error("Memory error");
+-
+-	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
+-
+-	p = (void *)free_mem_ptr;
+-	free_mem_ptr += size;
+-
+-	if (free_mem_ptr >= free_mem_end_ptr)
+-		error("Out of memory");
+-
+-	return p;
+-}
+-
+-static void free(void *where)
+-{	/* Don't care */
+-}
+-
+-static void gzip_mark(void **ptr)
+-{
+-	*ptr = (void *) free_mem_ptr;
+-}
+-
+-static void gzip_release(void **ptr)
+-{
+-	free_mem_ptr = (long) *ptr;
+-}
+-
+-#ifdef CONFIG_SH_STANDARD_BIOS
+-size_t strlen(const char *s)
+-{
+-	int i = 0;
+-
+-	while (*s++)
+-		i++;
+-	return i;
+-}
+-
+-int puts(const char *s)
+-{
+-	int len = strlen(s);
+-	sh_bios_console_write(s, len);
+-	return len;
+-}
+-#else
+-int puts(const char *s)
+-{
+-	/* This should be updated to use the sh-sci routines */
+-	return 0;
+-}
+-#endif
+-
+-void* memset(void* s, int c, size_t n)
+-{
+-	int i;
+-	char *ss = (char*)s;
+-
+-	for (i=0;i<n;i++) ss[i] = c;
+-	return s;
+-}
+-
+-void* memcpy(void* __dest, __const void* __src,
+-			    size_t __n)
+-{
+-	int i;
+-	char *d = (char *)__dest, *s = (char *)__src;
+-
+-	for (i=0;i<__n;i++) d[i] = s[i];
+-	return __dest;
+-}
+-
+-/* ===========================================================================
+- * Fill the input buffer. This is called only when the buffer is empty
+- * and at least one byte is really needed.
+- */
+-static int fill_inbuf(void)
+-{
+-	if (insize != 0) {
+-		error("ran out of input data");
+-	}
+-
+-	inbuf = input_data;
+-	insize = input_len;
+-	inptr = 1;
+-	return inbuf[0];
+-}
+-
+-/* ===========================================================================
+- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+- * (Used for the decompressed data only.)
+- */
+-static void flush_window(void)
+-{
+-    ulg c = crc;         /* temporary variable */
+-    unsigned n;
+-    uch *in, *out, ch;
+-
+-    in = window;
+-    out = &output_data[output_ptr];
+-    for (n = 0; n < outcnt; n++) {
+-	    ch = *out++ = *in++;
+-	    c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+-    }
+-    crc = c;
+-    bytes_out += (ulg)outcnt;
+-    output_ptr += (ulg)outcnt;
+-    outcnt = 0;
+-}
+-
+-static void error(char *x)
+-{
+-	puts("\n\n");
+-	puts(x);
+-	puts("\n\n -- System halted");
+-
+-	while(1);	/* Halt */
+-}
+-
+-#define STACK_SIZE (4096)
+-long user_stack [STACK_SIZE];
+-long* stack_start = &user_stack[STACK_SIZE];
+-
+-void decompress_kernel(void)
+-{
+-	output_data = 0;
+-	output_ptr = P2SEGADDR((unsigned long)&_text+PAGE_SIZE);
+-	free_mem_ptr = (unsigned long)&_end;
+-	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+-
+-	makecrc();
+-	puts("Uncompressing Linux... ");
+-	gunzip();
+-	puts("Ok, booting the kernel.\n");
+-}
+diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c
+new file mode 100644
+index 0000000..adcea31
+--- /dev/null
++++ b/arch/sh/boot/compressed/misc_32.c
+@@ -0,0 +1,244 @@
++/*
++ * arch/sh/boot/compressed/misc.c
++ *
++ * This is a collection of several routines from gzip-1.0.3
++ * adapted for Linux.
++ *
++ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
++ *
++ * Adapted for SH by Stuart Menefy, Aug 1999
++ *
++ * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000
++ */
++
++#include <asm/uaccess.h>
++#include <asm/addrspace.h>
++#include <asm/page.h>
++#ifdef CONFIG_SH_STANDARD_BIOS
++#include <asm/sh_bios.h>
++#endif
++
++/*
++ * gzip declarations
++ */
++
++#define OF(args)  args
++#define STATIC static
++
++#undef memset
++#undef memcpy
++#define memzero(s, n)     memset ((s), 0, (n))
++
++typedef unsigned char  uch;
++typedef unsigned short ush;
++typedef unsigned long  ulg;
++
++#define WSIZE 0x8000		/* Window size must be at least 32k, */
++				/* and a power of two */
++
++static uch *inbuf;	     /* input buffer */
++static uch window[WSIZE];    /* Sliding window buffer */
++
++static unsigned insize = 0;  /* valid bytes in inbuf */
++static unsigned inptr = 0;   /* index of next byte to be processed in inbuf */
++static unsigned outcnt = 0;  /* bytes in output buffer */
++
++/* gzip flag byte */
++#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
++#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
++#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
++#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
++#define COMMENT      0x10 /* bit 4 set: file comment present */
++#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
++#define RESERVED     0xC0 /* bit 6,7:   reserved */
++
++#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
++
++/* Diagnostic functions */
++#ifdef DEBUG
++#  define Assert(cond,msg) {if(!(cond)) error(msg);}
++#  define Trace(x) fprintf x
++#  define Tracev(x) {if (verbose) fprintf x ;}
++#  define Tracevv(x) {if (verbose>1) fprintf x ;}
++#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
++#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
++#else
++#  define Assert(cond,msg)
++#  define Trace(x)
++#  define Tracev(x)
++#  define Tracevv(x)
++#  define Tracec(c,x)
++#  define Tracecv(c,x)
++#endif
++
++static int  fill_inbuf(void);
++static void flush_window(void);
++static void error(char *m);
++static void gzip_mark(void **);
++static void gzip_release(void **);
++
++extern char input_data[];
++extern int input_len;
++
++static long bytes_out = 0;
++static uch *output_data;
++static unsigned long output_ptr = 0;
++
++static void *malloc(int size);
++static void free(void *where);
++static void error(char *m);
++static void gzip_mark(void **);
++static void gzip_release(void **);
++
++int puts(const char *);
++
++extern int _text;		/* Defined in vmlinux.lds.S */
++extern int _end;
++static unsigned long free_mem_ptr;
++static unsigned long free_mem_end_ptr;
++
++#define HEAP_SIZE             0x10000
++
++#include "../../../../lib/inflate.c"
++
++static void *malloc(int size)
++{
++	void *p;
++
++	if (size <0) error("Malloc error");
++	if (free_mem_ptr == 0) error("Memory error");
++
++	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
++
++	p = (void *)free_mem_ptr;
++	free_mem_ptr += size;
++
++	if (free_mem_ptr >= free_mem_end_ptr)
++		error("Out of memory");
++
++	return p;
++}
++
++static void free(void *where)
++{	/* Don't care */
++}
++
++static void gzip_mark(void **ptr)
++{
++	*ptr = (void *) free_mem_ptr;
++}
++
++static void gzip_release(void **ptr)
++{
++	free_mem_ptr = (long) *ptr;
++}
++
++#ifdef CONFIG_SH_STANDARD_BIOS
++size_t strlen(const char *s)
++{
++	int i = 0;
++
++	while (*s++)
++		i++;
++	return i;
++}
++
++int puts(const char *s)
++{
++	int len = strlen(s);
++	sh_bios_console_write(s, len);
++	return len;
++}
++#else
++int puts(const char *s)
++{
++	/* This should be updated to use the sh-sci routines */
++	return 0;
++}
++#endif
++
++void* memset(void* s, int c, size_t n)
++{
++	int i;
++	char *ss = (char*)s;
++
++	for (i=0;i<n;i++) ss[i] = c;
++	return s;
++}
++
++void* memcpy(void* __dest, __const void* __src,
++			    size_t __n)
++{
++	int i;
++	char *d = (char *)__dest, *s = (char *)__src;
++
++	for (i=0;i<__n;i++) d[i] = s[i];
++	return __dest;
++}
++
++/* ===========================================================================
++ * Fill the input buffer. This is called only when the buffer is empty
++ * and at least one byte is really needed.
++ */
++static int fill_inbuf(void)
++{
++	if (insize != 0) {
++		error("ran out of input data");
++	}
++
++	inbuf = input_data;
++	insize = input_len;
++	inptr = 1;
++	return inbuf[0];
++}
++
++/* ===========================================================================
++ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
++ * (Used for the decompressed data only.)
++ */
++static void flush_window(void)
++{
++    ulg c = crc;         /* temporary variable */
++    unsigned n;
++    uch *in, *out, ch;
++
++    in = window;
++    out = &output_data[output_ptr];
++    for (n = 0; n < outcnt; n++) {
++	    ch = *out++ = *in++;
++	    c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
++    }
++    crc = c;
++    bytes_out += (ulg)outcnt;
++    output_ptr += (ulg)outcnt;
++    outcnt = 0;
++}
++
++static void error(char *x)
++{
++	puts("\n\n");
++	puts(x);
++	puts("\n\n -- System halted");
++
++	while(1);	/* Halt */
++}
++
++#define STACK_SIZE (4096)
++long user_stack [STACK_SIZE];
++long* stack_start = &user_stack[STACK_SIZE];
++
++void decompress_kernel(void)
++{
++	output_data = 0;
++	output_ptr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
++#ifdef CONFIG_29BIT
++	output_ptr |= P2SEG;
++#endif
++	free_mem_ptr = (unsigned long)&_end;
++	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++
++	makecrc();
++	puts("Uncompressing Linux... ");
++	gunzip();
++	puts("Ok, booting the kernel.\n");
++}
+diff --git a/arch/sh/boot/compressed/misc_64.c b/arch/sh/boot/compressed/misc_64.c
+new file mode 100644
+index 0000000..a006ef8
+--- /dev/null
++++ b/arch/sh/boot/compressed/misc_64.c
+@@ -0,0 +1,250 @@
++/*
++ * arch/sh/boot/compressed/misc_64.c
++ *
++ * This is a collection of several routines from gzip-1.0.3
++ * adapted for Linux.
++ *
++ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
++ *
++ * Adapted for SHmedia from sh by Stuart Menefy, May 2002
++ */
++
++#include <asm/uaccess.h>
++
++/* cache.c */
++#define CACHE_ENABLE      0
++#define CACHE_DISABLE     1
++int cache_control(unsigned int command);
++
++/*
++ * gzip declarations
++ */
++
++#define OF(args)  args
++#define STATIC static
++
++#undef memset
++#undef memcpy
++#define memzero(s, n)     memset ((s), 0, (n))
++
++typedef unsigned char uch;
++typedef unsigned short ush;
++typedef unsigned long ulg;
++
++#define WSIZE 0x8000		/* Window size must be at least 32k, */
++				/* and a power of two */
++
++static uch *inbuf;		/* input buffer */
++static uch window[WSIZE];	/* Sliding window buffer */
++
++static unsigned insize = 0;	/* valid bytes in inbuf */
++static unsigned inptr = 0;	/* index of next byte to be processed in inbuf */
++static unsigned outcnt = 0;	/* bytes in output buffer */
++
++/* gzip flag byte */
++#define ASCII_FLAG   0x01	/* bit 0 set: file probably ASCII text */
++#define CONTINUATION 0x02	/* bit 1 set: continuation of multi-part gzip file */
++#define EXTRA_FIELD  0x04	/* bit 2 set: extra field present */
++#define ORIG_NAME    0x08	/* bit 3 set: original file name present */
++#define COMMENT      0x10	/* bit 4 set: file comment present */
++#define ENCRYPTED    0x20	/* bit 5 set: file is encrypted */
++#define RESERVED     0xC0	/* bit 6,7:   reserved */
++
++#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
++
++/* Diagnostic functions */
++#ifdef DEBUG
++#  define Assert(cond,msg) {if(!(cond)) error(msg);}
++#  define Trace(x) fprintf x
++#  define Tracev(x) {if (verbose) fprintf x ;}
++#  define Tracevv(x) {if (verbose>1) fprintf x ;}
++#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
++#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
++#else
++#  define Assert(cond,msg)
++#  define Trace(x)
++#  define Tracev(x)
++#  define Tracevv(x)
++#  define Tracec(c,x)
++#  define Tracecv(c,x)
++#endif
++
++static int fill_inbuf(void);
++static void flush_window(void);
++static void error(char *m);
++static void gzip_mark(void **);
++static void gzip_release(void **);
++
++extern char input_data[];
++extern int input_len;
++
++static long bytes_out = 0;
++static uch *output_data;
++static unsigned long output_ptr = 0;
++
++static void *malloc(int size);
++static void free(void *where);
++static void error(char *m);
++static void gzip_mark(void **);
++static void gzip_release(void **);
++
++static void puts(const char *);
++
++extern int _text;		/* Defined in vmlinux.lds.S */
++extern int _end;
++static unsigned long free_mem_ptr;
++static unsigned long free_mem_end_ptr;
++
++#define HEAP_SIZE             0x10000
++
++#include "../../../../lib/inflate.c"
++
++static void *malloc(int size)
++{
++	void *p;
++
++	if (size < 0)
++		error("Malloc error\n");
++	if (free_mem_ptr == 0)
++		error("Memory error\n");
++
++	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
++
++	p = (void *) free_mem_ptr;
++	free_mem_ptr += size;
++
++	if (free_mem_ptr >= free_mem_end_ptr)
++		error("\nOut of memory\n");
++
++	return p;
++}
++
++static void free(void *where)
++{				/* Don't care */
++}
++
++static void gzip_mark(void **ptr)
++{
++	*ptr = (void *) free_mem_ptr;
++}
++
++static void gzip_release(void **ptr)
++{
++	free_mem_ptr = (long) *ptr;
++}
++
++void puts(const char *s)
++{
++}
++
++void *memset(void *s, int c, size_t n)
++{
++	int i;
++	char *ss = (char *) s;
++
++	for (i = 0; i < n; i++)
++		ss[i] = c;
++	return s;
++}
++
++void *memcpy(void *__dest, __const void *__src, size_t __n)
++{
++	int i;
++	char *d = (char *) __dest, *s = (char *) __src;
++
++	for (i = 0; i < __n; i++)
++		d[i] = s[i];
++	return __dest;
++}
++
++/* ===========================================================================
++ * Fill the input buffer. This is called only when the buffer is empty
++ * and at least one byte is really needed.
++ */
++static int fill_inbuf(void)
++{
++	if (insize != 0) {
++		error("ran out of input data\n");
++	}
++
++	inbuf = input_data;
++	insize = input_len;
++	inptr = 1;
++	return inbuf[0];
++}
++
++/* ===========================================================================
++ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
++ * (Used for the decompressed data only.)
++ */
++static void flush_window(void)
++{
++	ulg c = crc;		/* temporary variable */
++	unsigned n;
++	uch *in, *out, ch;
++
++	in = window;
++	out = &output_data[output_ptr];
++	for (n = 0; n < outcnt; n++) {
++		ch = *out++ = *in++;
++		c = crc_32_tab[((int) c ^ ch) & 0xff] ^ (c >> 8);
++	}
++	crc = c;
++	bytes_out += (ulg) outcnt;
++	output_ptr += (ulg) outcnt;
++	outcnt = 0;
++	puts(".");
++}
++
++static void error(char *x)
++{
++	puts("\n\n");
++	puts(x);
++	puts("\n\n -- System halted");
++
++	while (1) ;		/* Halt */
++}
++
++#define STACK_SIZE (4096)
++long __attribute__ ((aligned(8))) user_stack[STACK_SIZE];
++long *stack_start = &user_stack[STACK_SIZE];
++
++void decompress_kernel(void)
++{
++	output_data = (uch *) (CONFIG_MEMORY_START + 0x2000);
++	free_mem_ptr = (unsigned long) &_end;
++	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++
++	makecrc();
++	puts("Uncompressing Linux... ");
++	cache_control(CACHE_ENABLE);
++	gunzip();
++	puts("\n");
++
++#if 0
++	/* When booting from ROM may want to do something like this if the
++	 * boot loader doesn't.
++	 */
++
++	/* Set up the parameters and command line */
++	{
++		volatile unsigned int *parambase =
++		    (int *) (CONFIG_MEMORY_START + 0x1000);
++
++		parambase[0] = 0x1;	/* MOUNT_ROOT_RDONLY */
++		parambase[1] = 0x0;	/* RAMDISK_FLAGS */
++		parambase[2] = 0x0200;	/* ORIG_ROOT_DEV */
++		parambase[3] = 0x0;	/* LOADER_TYPE */
++		parambase[4] = 0x0;	/* INITRD_START */
++		parambase[5] = 0x0;	/* INITRD_SIZE */
++		parambase[6] = 0;
++
++		strcpy((char *) ((int) parambase + 0x100),
++		       "console=ttySC0,38400");
++	}
++#endif
++
++	puts("Ok, booting the kernel.\n");
++
++	cache_control(CACHE_DISABLE);
++}
+diff --git a/arch/sh/boot/compressed/vmlinux_64.lds b/arch/sh/boot/compressed/vmlinux_64.lds
+new file mode 100644
+index 0000000..59c2ef4
+--- /dev/null
++++ b/arch/sh/boot/compressed/vmlinux_64.lds
+@@ -0,0 +1,64 @@
++/*
++ * ld script to make compressed SuperH/shmedia Linux kernel+decompression
++ *		bootstrap
++ * Modified by Stuart Menefy from arch/sh/vmlinux.lds.S written by Niibe Yutaka
++ */
++
++
++#ifdef CONFIG_LITTLE_ENDIAN
++/* OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux") */
++#define NOP 0x6ff0fff0
++#else
++/* OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64") */
++#define NOP 0xf0fff06f
++#endif
++
++OUTPUT_FORMAT("elf32-sh64-linux")
++OUTPUT_ARCH(sh)
++ENTRY(_start)
++
++#define ALIGNED_GAP(section, align) (((ADDR(section)+SIZEOF(section)+(align)-1) & ~((align)-1))-ADDR(section))
++#define FOLLOWING(section, align) AT (LOADADDR(section) + ALIGNED_GAP(section,align))
++
++SECTIONS
++{
++  _text = .;			/* Text and read-only data */
++
++  .text : {
++	*(.text)
++	*(.text64)
++	*(.text..SHmedia32)
++	*(.fixup)
++	*(.gnu.warning)
++	} = NOP
++  . = ALIGN(4);
++  .rodata : { *(.rodata) }
++
++  /* There is no 'real' reason for eight byte alignment, four would work
++   * as well, but gdb downloads much (*4) faster with this.
++   */
++  . = ALIGN(8);
++  .image : { *(.image) }
++  . = ALIGN(4);
++  _etext = .;			/* End of text section */
++
++  .data :			/* Data */
++	FOLLOWING(.image, 4)
++	{
++	_data = .;
++	*(.data)
++	}
++  _data_image = LOADADDR(.data);/* Address of data section in ROM */
++
++  _edata = .;			/* End of data section */
++
++  .stack : { stack = .;  _stack = .; }
++
++  . = ALIGN(4);
++  __bss_start = .;		/* BSS */
++  .bss : {
++	*(.bss)
++	}
++  . = ALIGN(4);
++  _end = . ;
++}
+diff --git a/arch/sh/cchips/voyagergx/Makefile b/arch/sh/cchips/voyagergx/Makefile
+deleted file mode 100644
+index f73963c..0000000
+--- a/arch/sh/cchips/voyagergx/Makefile
++++ /dev/null
+@@ -1,9 +0,0 @@
+-#
+-# Makefile for VoyagerGX
+-#
+-
+-obj-y	:= irq.o setup.o
+-
+-obj-$(CONFIG_USB_OHCI_HCD)	+= consistent.o
+-
+-EXTRA_CFLAGS += -Werror
+diff --git a/arch/sh/cchips/voyagergx/consistent.c b/arch/sh/cchips/voyagergx/consistent.c
+deleted file mode 100644
+index 07e8b9c..0000000
+--- a/arch/sh/cchips/voyagergx/consistent.c
++++ /dev/null
+@@ -1,121 +0,0 @@
+-/*
+- * arch/sh/cchips/voyagergx/consistent.c
+- *
+- * Copyright (C) 2004  Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-#include <linux/mm.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/slab.h>
+-#include <linux/list.h>
+-#include <linux/types.h>
+-#include <linux/module.h>
+-#include <linux/device.h>
+-#include <asm/io.h>
+-
+-
+-struct voya_alloc_entry {
+-	struct list_head list;
+-	unsigned long ofs;
+-	unsigned long len;
+-};
+-
+-static DEFINE_SPINLOCK(voya_list_lock);
+-static LIST_HEAD(voya_alloc_list);
+-
+-#define OHCI_SRAM_START	0xb0000000
+-#define OHCI_HCCA_SIZE	0x100
+-#define OHCI_SRAM_SIZE	0x10000
+-
+-#define VOYAGER_OHCI_NAME	"voyager-ohci"
+-
+-void *voyagergx_consistent_alloc(struct device *dev, size_t size,
+-				 dma_addr_t *handle, gfp_t flag)
+-{
+-	struct list_head *list = &voya_alloc_list;
+-	struct voya_alloc_entry *entry;
+-	unsigned long start, end;
+-	unsigned long flags;
+-
+-	/*
+-	 * The SM501 contains an integrated 8051 with its own SRAM.
+-	 * Devices within the cchip can all hook into the 8051 SRAM.
+-	 * We presently use this for the OHCI.
+-	 *
+-	 * Everything else goes through consistent_alloc().
+-	 */
+-	if (!dev || strcmp(dev->driver->name, VOYAGER_OHCI_NAME))
+-		return NULL;
+-
+-	start = OHCI_SRAM_START + OHCI_HCCA_SIZE;
+-
+-	entry = kmalloc(sizeof(struct voya_alloc_entry), GFP_ATOMIC);
+-	if (!entry)
+-		return ERR_PTR(-ENOMEM);
+-
+-	entry->len = (size + 15) & ~15;
+-
+-	/*
+-	 * The basis for this allocator is dwmw2's malloc.. the
+-	 * Matrox allocator :-)
+-	 */
+-	spin_lock_irqsave(&voya_list_lock, flags);
+-	list_for_each(list, &voya_alloc_list) {
+-		struct voya_alloc_entry *p;
+-
+-		p = list_entry(list, struct voya_alloc_entry, list);
+-
+-		if (p->ofs - start >= size)
+-			goto out;
+-
+-		start = p->ofs + p->len;
+-	}
+-
+-	end  = start + (OHCI_SRAM_SIZE  - OHCI_HCCA_SIZE);
+-	list = &voya_alloc_list;
+-
+-	if (end - start >= size) {
+-out:
+-		entry->ofs = start;
+-		list_add_tail(&entry->list, list);
+-		spin_unlock_irqrestore(&voya_list_lock, flags);
+-
+-		*handle = start;
+-		return (void *)start;
+-	}
+-
+-	kfree(entry);
+-	spin_unlock_irqrestore(&voya_list_lock, flags);
+-
+-	return ERR_PTR(-EINVAL);
+-}
+-
+-int voyagergx_consistent_free(struct device *dev, size_t size,
+-			      void *vaddr, dma_addr_t handle)
+-{
+-	struct voya_alloc_entry *entry;
+-	unsigned long flags;
+-
+-	if (!dev || strcmp(dev->driver->name, VOYAGER_OHCI_NAME))
+-		return -EINVAL;
+-
+-	spin_lock_irqsave(&voya_list_lock, flags);
+-	list_for_each_entry(entry, &voya_alloc_list, list) {
+-		if (entry->ofs != handle)
+-			continue;
+-
+-		list_del(&entry->list);
+-		kfree(entry);
+-
+-		break;
+-	}
+-	spin_unlock_irqrestore(&voya_list_lock, flags);
+-
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL(voyagergx_consistent_alloc);
+-EXPORT_SYMBOL(voyagergx_consistent_free);
+diff --git a/arch/sh/cchips/voyagergx/irq.c b/arch/sh/cchips/voyagergx/irq.c
+deleted file mode 100644
+index ade3038..0000000
+--- a/arch/sh/cchips/voyagergx/irq.c
++++ /dev/null
+@@ -1,101 +0,0 @@
+-/* -------------------------------------------------------------------- */
+-/* setup_voyagergx.c:                                                     */
+-/* -------------------------------------------------------------------- */
+-/*  This program is free software; you can redistribute it and/or modify
+-    it under the terms of the GNU General Public License as published by
+-    the Free Software Foundation; either version 2 of the License, or
+-    (at your option) any later version.
+-
+-    This program is distributed in the hope that it will be useful,
+-    but WITHOUT ANY WARRANTY; without even the implied warranty of
+-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-    GNU General Public License for more details.
+-
+-    You should have received a copy of the GNU General Public License
+-    along with this program; if not, write to the Free Software
+-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+-
+-    Copyright 2003 (c) Lineo uSolutions,Inc.
+-*/
+-#include <linux/interrupt.h>
+-#include <linux/init.h>
+-#include <linux/io.h>
+-#include <asm/voyagergx.h>
+-#include <asm/rts7751r2d.h>
+-
+-enum {
+-	UNUSED = 0,
+-
+-	/* voyager specific interrupt sources */
+-	UP, G54, G53, G52, G51, G50, G49, G48,
+-	I2C, PW, DMA, PCI, I2S, AC, US,
+-	U1, U0, CV, MC, S1, S0,
+-	UH, TWOD, ZD, PV, CI,
+-};
+-
+-static struct intc_vect vectors[] __initdata = {
+-	INTC_IRQ(UP, IRQ_SM501_UP), INTC_IRQ(G54, IRQ_SM501_G54),
+-	INTC_IRQ(G53, IRQ_SM501_G53), INTC_IRQ(G52, IRQ_SM501_G52),
+-	INTC_IRQ(G51, IRQ_SM501_G51), INTC_IRQ(G50, IRQ_SM501_G50),
+-	INTC_IRQ(G49, IRQ_SM501_G49), INTC_IRQ(G48, IRQ_SM501_G48),
+-	INTC_IRQ(I2C, IRQ_SM501_I2C), INTC_IRQ(PW, IRQ_SM501_PW),
+-	INTC_IRQ(DMA, IRQ_SM501_DMA), INTC_IRQ(PCI, IRQ_SM501_PCI),
+-	INTC_IRQ(I2S, IRQ_SM501_I2S), INTC_IRQ(AC, IRQ_SM501_AC),
+-	INTC_IRQ(US, IRQ_SM501_US), INTC_IRQ(U1, IRQ_SM501_U1),
+-	INTC_IRQ(U0, IRQ_SM501_U0), INTC_IRQ(CV, IRQ_SM501_CV),
+-	INTC_IRQ(MC, IRQ_SM501_MC), INTC_IRQ(S1, IRQ_SM501_S1),
+-	INTC_IRQ(S0, IRQ_SM501_S0), INTC_IRQ(UH, IRQ_SM501_UH),
+-	INTC_IRQ(TWOD, IRQ_SM501_2D), INTC_IRQ(ZD, IRQ_SM501_ZD),
+-	INTC_IRQ(PV, IRQ_SM501_PV), INTC_IRQ(CI, IRQ_SM501_CI),
+-};
+-
+-static struct intc_mask_reg mask_registers[] __initdata = {
+-	{ VOYAGER_INT_MASK, 0, 32, /* "Interrupt Mask", MMIO_base + 0x30 */
+-	  { UP, G54, G53, G52, G51, G50, G49, G48,
+-	    I2C, PW, 0, DMA, PCI, I2S, AC, US,
+-	    0, 0, U1, U0, CV, MC, S1, S0,
+-	    0, UH, 0, 0, TWOD, ZD, PV, CI } },
+-};
+-
+-static DECLARE_INTC_DESC(intc_desc, "voyagergx", vectors,
+-			 NULL, NULL, mask_registers, NULL, NULL);
+-
+-static unsigned int voyagergx_stat2irq[32] = {
+-	IRQ_SM501_CI, IRQ_SM501_PV, IRQ_SM501_ZD, IRQ_SM501_2D,
+-	0, 0, IRQ_SM501_UH, 0,
+-	IRQ_SM501_S0, IRQ_SM501_S1, IRQ_SM501_MC, IRQ_SM501_CV,
+-	IRQ_SM501_U0, IRQ_SM501_U1, 0, 0,
+-	IRQ_SM501_US, IRQ_SM501_AC, IRQ_SM501_I2S, IRQ_SM501_PCI,
+-	IRQ_SM501_DMA, 0, IRQ_SM501_PW, IRQ_SM501_I2C,
+-	IRQ_SM501_G48, IRQ_SM501_G49, IRQ_SM501_G50, IRQ_SM501_G51,
+-	IRQ_SM501_G52, IRQ_SM501_G53, IRQ_SM501_G54, IRQ_SM501_UP
+-};
+-
+-static void voyagergx_irq_demux(unsigned int irq, struct irq_desc *desc)
+-{
+-	unsigned long intv = ctrl_inl(INT_STATUS);
+-	struct irq_desc *ext_desc;
+-	unsigned int ext_irq;
+-	unsigned int k = 0;
+-
+-	while (intv) {
+-		ext_irq = voyagergx_stat2irq[k];
+-		if (ext_irq && (intv & 1)) {
+-			ext_desc = irq_desc + ext_irq;
+-			handle_level_irq(ext_irq, ext_desc);
+-		}
+-		intv >>= 1;
+-		k++;
+-	}
+-}
+-
+-void __init setup_voyagergx_irq(void)
+-{
+-	printk(KERN_INFO "VoyagerGX on irq %d (mapped into %d to %d)\n",
+-	       IRQ_VOYAGER,
+-	       VOYAGER_IRQ_BASE,
+-	       VOYAGER_IRQ_BASE + VOYAGER_IRQ_NUM - 1);
+-
+-	register_intc_controller(&intc_desc);
+-	set_irq_chained_handler(IRQ_VOYAGER, voyagergx_irq_demux);
+-}
+diff --git a/arch/sh/cchips/voyagergx/setup.c b/arch/sh/cchips/voyagergx/setup.c
+deleted file mode 100644
+index 33f0302..0000000
+--- a/arch/sh/cchips/voyagergx/setup.c
++++ /dev/null
+@@ -1,37 +0,0 @@
+-/*
+- * arch/sh/cchips/voyagergx/setup.c
+- *
+- * Setup routines for VoyagerGX cchip.
+- *
+- * Copyright (C) 2003 Lineo uSolutions, Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the
+- * Free Software Foundation; either version 2 of the License, or (at your
+- * option) any later version.
+- */
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <asm/io.h>
+-#include <asm/voyagergx.h>
+-
+-static int __init setup_voyagergx(void)
+-{
+-	unsigned long val;
+-
+-	val = readl((void __iomem *)DRAM_CTRL);
+-	val |= (DRAM_CTRL_CPU_COLUMN_SIZE_256	|
+-		DRAM_CTRL_CPU_ACTIVE_PRECHARGE	|
+-		DRAM_CTRL_CPU_RESET		|
+-		DRAM_CTRL_REFRESH_COMMAND	|
+-		DRAM_CTRL_BLOCK_WRITE_TIME	|
+-		DRAM_CTRL_BLOCK_WRITE_PRECHARGE	|
+-		DRAM_CTRL_ACTIVE_PRECHARGE	|
+-		DRAM_CTRL_RESET			|
+-		DRAM_CTRL_REMAIN_ACTIVE);
+-	writel(val, (void __iomem *)DRAM_CTRL);
+-
+-	return 0;
+-}
+-
+-module_init(setup_voyagergx);
+diff --git a/arch/sh/configs/cayman_defconfig b/arch/sh/configs/cayman_defconfig
+new file mode 100644
+index 0000000..a05b278
+--- /dev/null
++++ b/arch/sh/configs/cayman_defconfig
+@@ -0,0 +1,1166 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc3
++# Fri Nov 23 14:15:55 2007
++#
++CONFIG_SUPERH=y
++# CONFIG_SUPERH32 is not set
++CONFIG_SUPERH64=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++# CONFIG_GENERIC_TIME is not set
++# CONFIG_GENERIC_CLOCKEVENTS is not set
++CONFIG_SYS_SUPPORTS_PCI=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_LOCKDEP_SUPPORT=y
++# CONFIG_ARCH_HAS_ILOG2_U32 is not set
++# CONFIG_ARCH_HAS_ILOG2_U64 is not set
++CONFIG_ARCH_NO_VIRT_TO_BUS=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++# CONFIG_SYSVIPC is not set
++CONFIG_POSIX_MQUEUE=y
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++# CONFIG_RELAY is not set
++# CONFIG_BLK_DEV_INITRD is not set
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_UID16=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SLOB is not set
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_AS is not set
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
++
++#
++# System type
++#
++CONFIG_CPU_SH5=y
++# CONFIG_CPU_SUBTYPE_SH7619 is not set
++# CONFIG_CPU_SUBTYPE_SH7206 is not set
++# CONFIG_CPU_SUBTYPE_SH7705 is not set
++# CONFIG_CPU_SUBTYPE_SH7706 is not set
++# CONFIG_CPU_SUBTYPE_SH7707 is not set
++# CONFIG_CPU_SUBTYPE_SH7708 is not set
++# CONFIG_CPU_SUBTYPE_SH7709 is not set
++# CONFIG_CPU_SUBTYPE_SH7710 is not set
++# CONFIG_CPU_SUBTYPE_SH7712 is not set
++# CONFIG_CPU_SUBTYPE_SH7720 is not set
++# CONFIG_CPU_SUBTYPE_SH7750 is not set
++# CONFIG_CPU_SUBTYPE_SH7091 is not set
++# CONFIG_CPU_SUBTYPE_SH7750R is not set
++# CONFIG_CPU_SUBTYPE_SH7750S is not set
++# CONFIG_CPU_SUBTYPE_SH7751 is not set
++# CONFIG_CPU_SUBTYPE_SH7751R is not set
++# CONFIG_CPU_SUBTYPE_SH7760 is not set
++# CONFIG_CPU_SUBTYPE_SH4_202 is not set
++# CONFIG_CPU_SUBTYPE_SH7770 is not set
++# CONFIG_CPU_SUBTYPE_SH7780 is not set
++# CONFIG_CPU_SUBTYPE_SH7785 is not set
++# CONFIG_CPU_SUBTYPE_SHX3 is not set
++# CONFIG_CPU_SUBTYPE_SH7343 is not set
++# CONFIG_CPU_SUBTYPE_SH7722 is not set
++CONFIG_CPU_SUBTYPE_SH5_101=y
++# CONFIG_CPU_SUBTYPE_SH5_103 is not set
++
++#
++# Memory management options
++#
++CONFIG_QUICKLIST=y
++CONFIG_MMU=y
++CONFIG_PAGE_OFFSET=0x20000000
++CONFIG_MEMORY_START=0x80000000
++CONFIG_MEMORY_SIZE=0x00400000
++CONFIG_32BIT=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_MAX_ACTIVE_REGIONS=1
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_PAGE_SIZE_4KB=y
++# CONFIG_PAGE_SIZE_8KB is not set
++# CONFIG_PAGE_SIZE_64KB is not set
++CONFIG_HUGETLB_PAGE_SIZE_64K=y
++# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
++# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
++# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
++# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
++# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++CONFIG_SPARSEMEM_STATIC=y
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ZONE_DMA_FLAG=0
++CONFIG_NR_QUICK=2
++
++#
++# Cache configuration
++#
++# CONFIG_SH_DIRECT_MAPPED is not set
++# CONFIG_CACHE_WRITEBACK is not set
++# CONFIG_CACHE_WRITETHROUGH is not set
++CONFIG_CACHE_OFF=y
++
++#
++# Processor features
++#
++CONFIG_CPU_LITTLE_ENDIAN=y
++# CONFIG_CPU_BIG_ENDIAN is not set
++CONFIG_SH_FPU=y
++# CONFIG_SH64_FPU_DENORM_FLUSH is not set
++CONFIG_SH64_USER_MISALIGNED_FIXUP=y
++CONFIG_SH64_ID2815_WORKAROUND=y
++CONFIG_CPU_HAS_FPU=y
++
++#
++# Board support
++#
++CONFIG_SH_CAYMAN=y
++
++#
++# Timer and clock configuration
++#
++CONFIG_SH_TIMER_IRQ=16
++CONFIG_SH_PCLK_FREQ=50000000
++# CONFIG_TICK_ONESHOT is not set
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# DMA support
++#
++
++#
++# Companion Chips
++#
++
++#
++# Additional SuperH Device Drivers
++#
++CONFIG_HEARTBEAT=y
++# CONFIG_PUSH_SWITCH is not set
++
++#
++# Kernel features
++#
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++# CONFIG_KEXEC is not set
++# CONFIG_CRASH_DUMP is not set
++# CONFIG_PREEMPT_NONE is not set
++# CONFIG_PREEMPT_VOLUNTARY is not set
++CONFIG_PREEMPT=y
++CONFIG_PREEMPT_BKL=y
++CONFIG_GUSA=y
++
++#
++# Boot options
++#
++CONFIG_ZERO_PAGE_OFFSET=0x00001000
++CONFIG_BOOT_LINK_OFFSET=0x00800000
++# CONFIG_CMDLINE_BOOL is not set
++
++#
++# Bus options
++#
++CONFIG_PCI=y
++CONFIG_SH_PCIDMA_NONCOHERENT=y
++CONFIG_PCI_AUTO=y
++CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++CONFIG_PCI_LEGACY=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCCARD is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_PHANTOM is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_AIC94XX is not set
++# CONFIG_SCSI_ARCMSR is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_STEX is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_QLA_ISCSI is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_SRP is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_IP1000 is not set
++# CONFIG_ARCNET is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_MII is not set
++# CONFIG_AX88796 is not set
++# CONFIG_STNIC is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++# CONFIG_SMC91X is not set
++# CONFIG_SMC911X is not set
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++CONFIG_NETDEV_1000=y
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2 is not set
++# CONFIG_QLA3XXX is not set
++# CONFIG_ATL1 is not set
++CONFIG_NETDEV_10000=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_NIU is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_TEHUTI is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_SH_SCI is not set
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++CONFIG_I2C=m
++CONFIG_I2C_BOARDINFO=y
++# CONFIG_I2C_CHARDEV is not set
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++
++#
++# PCI-based Watchdog Cards
++#
++# CONFIG_PCIPCWATCHDOG is not set
++# CONFIG_WDTPCI is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++# CONFIG_VIDEO_V4L1 is not set
++# CONFIG_VIDEO_V4L1_COMPAT is not set
++CONFIG_VIDEO_V4L2=y
++CONFIG_VIDEO_CAPTURE_DRIVERS=y
++# CONFIG_VIDEO_ADV_DEBUG is not set
++CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
++# CONFIG_VIDEO_CX23885 is not set
++# CONFIG_VIDEO_CAFE_CCIC is not set
++# CONFIG_RADIO_ADAPTERS is not set
++CONFIG_DVB_CORE=y
++# CONFIG_DVB_CORE_ATTACH is not set
++CONFIG_DVB_CAPTURE_DRIVERS=y
++
++#
++# Supported SAA7146 based PCI Adapters
++#
++
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++# CONFIG_DVB_B2C2_FLEXCOP is not set
++
++#
++# Supported BT878 Adapters
++#
++
++#
++# Supported Pluto2 Adapters
++#
++# CONFIG_DVB_PLUTO2 is not set
++
++#
++# Supported DVB Frontends
++#
++
++#
++# Customise DVB Frontends
++#
++# CONFIG_DVB_FE_CUSTOMISE is not set
++
++#
++# DVB-S (satellite) frontends
++#
++# CONFIG_DVB_STV0299 is not set
++# CONFIG_DVB_CX24110 is not set
++# CONFIG_DVB_CX24123 is not set
++# CONFIG_DVB_TDA8083 is not set
++# CONFIG_DVB_MT312 is not set
++# CONFIG_DVB_VES1X93 is not set
++# CONFIG_DVB_S5H1420 is not set
++# CONFIG_DVB_TDA10086 is not set
++
++#
++# DVB-T (terrestrial) frontends
++#
++# CONFIG_DVB_SP8870 is not set
++# CONFIG_DVB_SP887X is not set
++# CONFIG_DVB_CX22700 is not set
++# CONFIG_DVB_CX22702 is not set
++# CONFIG_DVB_L64781 is not set
++# CONFIG_DVB_TDA1004X is not set
++# CONFIG_DVB_NXT6000 is not set
++# CONFIG_DVB_MT352 is not set
++# CONFIG_DVB_ZL10353 is not set
++# CONFIG_DVB_DIB3000MB is not set
++# CONFIG_DVB_DIB3000MC is not set
++# CONFIG_DVB_DIB7000M is not set
++# CONFIG_DVB_DIB7000P is not set
++
++#
++# DVB-C (cable) frontends
++#
++# CONFIG_DVB_VES1820 is not set
++# CONFIG_DVB_TDA10021 is not set
++# CONFIG_DVB_TDA10023 is not set
++# CONFIG_DVB_STV0297 is not set
++
++#
++# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++#
++# CONFIG_DVB_NXT200X is not set
++# CONFIG_DVB_OR51211 is not set
++# CONFIG_DVB_OR51132 is not set
++# CONFIG_DVB_BCM3510 is not set
++# CONFIG_DVB_LGDT330X is not set
++# CONFIG_DVB_S5H1409 is not set
++
++#
++# Tuners/PLL support
++#
++# CONFIG_DVB_PLL is not set
++# CONFIG_DVB_TDA826X is not set
++# CONFIG_DVB_TDA827X is not set
++# CONFIG_DVB_TUNER_QT1010 is not set
++# CONFIG_DVB_TUNER_MT2060 is not set
++# CONFIG_DVB_TUNER_MT2266 is not set
++# CONFIG_DVB_TUNER_MT2131 is not set
++# CONFIG_DVB_TUNER_DIB0070 is not set
++
++#
++# Miscellaneous devices
++#
++# CONFIG_DVB_LNBP21 is not set
++# CONFIG_DVB_ISL6421 is not set
++# CONFIG_DVB_TUA6100 is not set
++CONFIG_DAB=y
++
++#
++# Graphics support
++#
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEO_OUTPUT_CONTROL=y
++CONFIG_FB=y
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB_DDC is not set
++# CONFIG_FB_CFB_FILLRECT is not set
++# CONFIG_FB_CFB_COPYAREA is not set
++# CONFIG_FB_CFB_IMAGEBLIT is not set
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
++# CONFIG_FB_SYS_FILLRECT is not set
++# CONFIG_FB_SYS_COPYAREA is not set
++# CONFIG_FB_SYS_IMAGEBLIT is not set
++# CONFIG_FB_SYS_FOPS is not set
++CONFIG_FB_DEFERRED_IO=y
++# CONFIG_FB_SVGALIB is not set
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++
++#
++# Frame buffer hardware drivers
++#
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_S3 is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_VT8623 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_ARK is not set
++# CONFIG_FB_PM3 is not set
++# CONFIG_FB_VIRTUAL is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Console display driver support
++#
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++CONFIG_FONTS=y
++# CONFIG_FONT_8x8 is not set
++CONFIG_FONT_8x16=y
++# CONFIG_FONT_6x11 is not set
++# CONFIG_FONT_7x14 is not set
++# CONFIG_FONT_PEARL_8x8 is not set
++# CONFIG_FONT_ACORN_8x8 is not set
++# CONFIG_FONT_MINI_4x6 is not set
++# CONFIG_FONT_SUN8x16 is not set
++# CONFIG_FONT_SUN12x22 is not set
++# CONFIG_FONT_10x18 is not set
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++# CONFIG_LOGO_LINUX_CLUT224 is not set
++# CONFIG_LOGO_SUPERH_MONO is not set
++# CONFIG_LOGO_SUPERH_VGA16 is not set
++CONFIG_LOGO_SUPERH_CLUT224=y
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_RTC_CLASS is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++CONFIG_MINIX_FS=y
++CONFIG_ROMFS_FS=y
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++# CONFIG_MSDOS_FS is not set
++# CONFIG_VFAT_FS is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_NLS is not set
++# CONFIG_DLM is not set
++CONFIG_INSTRUMENTATION=y
++# CONFIG_PROFILING is not set
++# CONFIG_MARKERS is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++CONFIG_DEBUG_FS=y
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++CONFIG_SCHED_DEBUG=y
++CONFIG_SCHEDSTATS=y
++# CONFIG_TIMER_STATS is not set
++# CONFIG_DEBUG_SLAB is not set
++CONFIG_DEBUG_PREEMPT=y
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_LOCK_STAT is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++CONFIG_FRAME_POINTER=y
++CONFIG_FORCED_INLINING=y
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++# CONFIG_SH_STANDARD_BIOS is not set
++# CONFIG_EARLY_SCIF_CONSOLE is not set
++# CONFIG_DEBUG_BOOTMEM is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_SH64_PROC_ASIDS=y
++CONFIG_SH64_SR_WATCH=y
++# CONFIG_POOR_MANS_STRACE is not set
++# CONFIG_SH_ALPHANUMERIC is not set
++# CONFIG_SH_NO_BSS_INIT is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++# CONFIG_CRYPTO is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
+diff --git a/arch/sh/configs/hs7751rvoip_defconfig b/arch/sh/configs/hs7751rvoip_defconfig
+deleted file mode 100644
+index 5d9da5a..0000000
+--- a/arch/sh/configs/hs7751rvoip_defconfig
++++ /dev/null
+@@ -1,908 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.18
+-# Tue Oct  3 13:04:52 2006
+-#
+-CONFIG_SUPERH=y
+-CONFIG_RWSEM_GENERIC_SPINLOCK=y
+-CONFIG_GENERIC_FIND_NEXT_BIT=y
+-CONFIG_GENERIC_HWEIGHT=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_GENERIC_IRQ_PROBE=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_LOCK_KERNEL=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
 -CONFIG_SYSVIPC=y
 -# CONFIG_IPC_NS is not set
 -CONFIG_POSIX_MQUEUE=y
@@ -81463,7 +187120,975 @@
 -# CONFIG_IP_PNP is not set
 -# CONFIG_NET_IPIP is not set
 -# CONFIG_NET_IPGRE is not set
--# CONFIG_IP_MROUTE is not set
+-# CONFIG_IP_MROUTE is not set
+-# CONFIG_ARPD is not set
+-# CONFIG_SYN_COOKIES is not set
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_XFRM_TUNNEL is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_XFRM_MODE_TRANSPORT=y
+-CONFIG_INET_XFRM_MODE_TUNNEL=y
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_CUBIC=y
+-CONFIG_DEFAULT_TCP_CONG="cubic"
+-# CONFIG_IPV6 is not set
+-# CONFIG_INET6_XFRM_TUNNEL is not set
+-# CONFIG_INET6_TUNNEL is not set
+-# CONFIG_NETWORK_SECMARK is not set
+-# CONFIG_NETFILTER is not set
+-
+-#
+-# DCCP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_DCCP is not set
+-
+-#
+-# SCTP Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_IP_SCTP is not set
+-
+-#
+-# TIPC Configuration (EXPERIMENTAL)
+-#
+-# CONFIG_TIPC is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-
+-#
+-# QoS and/or fair queueing
+-#
+-# CONFIG_NET_SCHED is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_IEEE80211 is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-CONFIG_FW_LOADER=m
+-# CONFIG_SYS_HYPERVISOR is not set
+-
+-#
+-# Connector - unified userspace <-> kernelspace linker
+-#
+-# CONFIG_CONNECTOR is not set
+-
+-#
+-# Memory Technology Devices (MTD)
+-#
+-# CONFIG_MTD is not set
+-
+-#
+-# Parallel port support
+-#
+-# CONFIG_PARPORT is not set
+-
+-#
+-# Plug and Play support
+-#
+-
+-#
+-# Block devices
+-#
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-# CONFIG_BLK_DEV_LOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_RAM is not set
+-# CONFIG_BLK_DEV_INITRD is not set
+-# CONFIG_CDROM_PKTCDVD is not set
+-# CONFIG_ATA_OVER_ETH is not set
+-
+-#
+-# ATA/ATAPI/MFM/RLL support
+-#
+-CONFIG_IDE=y
+-CONFIG_IDE_MAX_HWIFS=1
+-CONFIG_BLK_DEV_IDE=y
+-
+-#
+-# Please see Documentation/ide.txt for help/info on IDE drives
+-#
+-# CONFIG_BLK_DEV_IDE_SATA is not set
+-CONFIG_BLK_DEV_IDEDISK=y
+-# CONFIG_IDEDISK_MULTI_MODE is not set
+-# CONFIG_BLK_DEV_IDECD is not set
+-# CONFIG_BLK_DEV_IDETAPE is not set
+-# CONFIG_BLK_DEV_IDEFLOPPY is not set
+-# CONFIG_IDE_TASK_IOCTL is not set
+-
+-#
+-# IDE chipset support/bugfixes
+-#
+-CONFIG_IDE_GENERIC=y
+-# CONFIG_IDE_ARM is not set
+-# CONFIG_BLK_DEV_IDEDMA is not set
+-# CONFIG_IDEDMA_AUTO is not set
+-# CONFIG_BLK_DEV_HD is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-# CONFIG_SCSI is not set
+-# CONFIG_SCSI_NETLINK is not set
+-
+-#
+-# Serial ATA (prod) and Parallel ATA (experimental) drivers
+-#
+-# CONFIG_ATA is not set
+-
+-#
+-# Multi-device support (RAID and LVM)
+-#
+-# CONFIG_MD is not set
+-
+-#
+-# Fusion MPT device support
+-#
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-
+-#
+-# I2O device support
+-#
+-
+-#
+-# Network device support
+-#
+-CONFIG_NETDEVICES=y
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-
+-#
+-# PHY device support
+-#
+-# CONFIG_PHYLIB is not set
+-
+-#
+-# Ethernet (10 or 100Mbit)
+-#
+-CONFIG_NET_ETHERNET=y
+-CONFIG_MII=y
+-# CONFIG_STNIC is not set
+-# CONFIG_SMC91X is not set
+-
+-#
+-# Ethernet (1000 Mbit)
+-#
+-
+-#
+-# Ethernet (10000 Mbit)
+-#
+-
+-#
+-# Token Ring devices
+-#
+-
+-#
+-# Wireless LAN (non-hamradio)
+-#
+-# CONFIG_NET_RADIO is not set
+-
+-#
+-# Wan interfaces
+-#
+-# CONFIG_WAN is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-
+-#
+-# ISDN subsystem
+-#
+-# CONFIG_ISDN is not set
+-
+-#
+-# Telephony Support
+-#
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-# CONFIG_INPUT_FF_MEMLESS is not set
+-
+-#
+-# Userland interfaces
+-#
+-# CONFIG_INPUT_MOUSEDEV is not set
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_TSDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-CONFIG_SERIO=y
+-CONFIG_SERIO_I8042=y
+-CONFIG_SERIO_SERPORT=y
+-# CONFIG_SERIO_LIBPS2 is not set
+-# CONFIG_SERIO_RAW is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-# CONFIG_VT is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-# CONFIG_SERIAL_8250 is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_SH_SCI=y
+-CONFIG_SERIAL_SH_SCI_NR_UARTS=2
+-CONFIG_SERIAL_SH_SCI_CONSOLE=y
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_UNIX98_PTYS=y
+-# CONFIG_LEGACY_PTYS is not set
+-
+-#
+-# IPMI
+-#
+-# CONFIG_IPMI_HANDLER is not set
+-
+-#
+-# Watchdog Cards
+-#
+-# CONFIG_WATCHDOG is not set
+-CONFIG_HW_RANDOM=y
+-# CONFIG_GEN_RTC is not set
+-# CONFIG_DTLK is not set
+-# CONFIG_R3964 is not set
+-
+-#
+-# Ftape, the floppy tape device driver
+-#
+-# CONFIG_RAW_DRIVER is not set
+-
+-#
+-# TPM devices
+-#
+-# CONFIG_TCG_TPM is not set
+-# CONFIG_TELCLOCK is not set
+-
+-#
+-# I2C support
+-#
+-# CONFIG_I2C is not set
+-
+-#
+-# SPI support
+-#
+-# CONFIG_SPI is not set
+-# CONFIG_SPI_MASTER is not set
+-
+-#
+-# Dallas's 1-wire bus
+-#
+-
+-#
+-# Hardware Monitoring support
+-#
+-CONFIG_HWMON=y
+-# CONFIG_HWMON_VID is not set
+-# CONFIG_SENSORS_ABITUGURU is not set
+-# CONFIG_SENSORS_F71805F is not set
+-# CONFIG_SENSORS_VT1211 is not set
+-# CONFIG_HWMON_DEBUG_CHIP is not set
+-
+-#
+-# Misc devices
+-#
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-CONFIG_VIDEO_V4L2=y
+-
+-#
+-# Digital Video Broadcasting Devices
+-#
+-# CONFIG_DVB is not set
+-
+-#
+-# Graphics support
+-#
+-CONFIG_FIRMWARE_EDID=y
+-# CONFIG_FB is not set
+-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-
+-#
+-# USB support
+-#
+-# CONFIG_USB_ARCH_HAS_HCD is not set
+-# CONFIG_USB_ARCH_HAS_OHCI is not set
+-# CONFIG_USB_ARCH_HAS_EHCI is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-
+-#
+-# MMC/SD Card support
+-#
+-# CONFIG_MMC is not set
+-
+-#
+-# LED devices
+-#
+-# CONFIG_NEW_LEDS is not set
+-
+-#
+-# LED drivers
+-#
+-
+-#
+-# LED Triggers
+-#
+-
+-#
+-# InfiniBand support
+-#
+-
+-#
+-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+-#
+-
+-#
+-# Real Time Clock
+-#
+-# CONFIG_RTC_CLASS is not set
+-
+-#
+-# DMA Engine support
+-#
+-# CONFIG_DMA_ENGINE is not set
+-
+-#
+-# DMA Clients
+-#
+-
+-#
+-# DMA Devices
+-#
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-# CONFIG_EXT3_FS is not set
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_OCFS2_FS is not set
+-# CONFIG_MINIX_FS is not set
+-# CONFIG_ROMFS_FS is not set
+-CONFIG_INOTIFY=y
+-CONFIG_INOTIFY_USER=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_PROC_SYSCTL=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_POSIX_ACL is not set
+-# CONFIG_HUGETLBFS is not set
+-# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_RAMFS=y
+-# CONFIG_CONFIGFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Network File Systems
+-#
+-CONFIG_NFS_FS=y
+-CONFIG_NFS_V3=y
+-# CONFIG_NFS_V3_ACL is not set
+-CONFIG_NFS_V4=y
+-CONFIG_NFS_DIRECTIO=y
+-# CONFIG_NFSD is not set
+-CONFIG_LOCKD=y
+-CONFIG_LOCKD_V4=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-CONFIG_SUNRPC_GSS=y
+-CONFIG_RPCSEC_GSS_KRB5=y
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-# CONFIG_9P_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-CONFIG_MSDOS_PARTITION=y
+-# CONFIG_BSD_DISKLABEL is not set
+-# CONFIG_MINIX_SUBPARTITION is not set
+-# CONFIG_SOLARIS_X86_PARTITION is not set
+-# CONFIG_UNIXWARE_DISKLABEL is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_KARMA_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-
+-#
+-# Native Language Support
+-#
+-# CONFIG_NLS is not set
+-
+-#
+-# Profiling support
+-#
+-# CONFIG_PROFILING is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-CONFIG_ENABLE_MUST_CHECK=y
+-# CONFIG_MAGIC_SYSRQ is not set
+-# CONFIG_UNUSED_SYMBOLS is not set
+-# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_DEBUG_BUGVERBOSE is not set
+-# CONFIG_DEBUG_FS is not set
+-# CONFIG_SH_STANDARD_BIOS is not set
+-# CONFIG_EARLY_SCIF_CONSOLE is not set
+-# CONFIG_KGDB is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-
+-#
+-# Cryptographic options
+-#
+-CONFIG_CRYPTO=y
+-CONFIG_CRYPTO_ALGAPI=y
+-CONFIG_CRYPTO_BLKCIPHER=m
+-CONFIG_CRYPTO_MANAGER=m
+-# CONFIG_CRYPTO_HMAC is not set
+-# CONFIG_CRYPTO_NULL is not set
+-# CONFIG_CRYPTO_MD4 is not set
+-CONFIG_CRYPTO_MD5=y
+-# CONFIG_CRYPTO_SHA1 is not set
+-# CONFIG_CRYPTO_SHA256 is not set
+-# CONFIG_CRYPTO_SHA512 is not set
+-# CONFIG_CRYPTO_WP512 is not set
+-# CONFIG_CRYPTO_TGR192 is not set
+-CONFIG_CRYPTO_ECB=m
+-CONFIG_CRYPTO_CBC=m
+-CONFIG_CRYPTO_DES=y
+-# CONFIG_CRYPTO_BLOWFISH is not set
+-# CONFIG_CRYPTO_TWOFISH is not set
+-# CONFIG_CRYPTO_SERPENT is not set
+-# CONFIG_CRYPTO_AES is not set
+-# CONFIG_CRYPTO_CAST5 is not set
+-# CONFIG_CRYPTO_CAST6 is not set
+-# CONFIG_CRYPTO_TEA is not set
+-# CONFIG_CRYPTO_ARC4 is not set
+-# CONFIG_CRYPTO_KHAZAD is not set
+-# CONFIG_CRYPTO_ANUBIS is not set
+-# CONFIG_CRYPTO_DEFLATE is not set
+-# CONFIG_CRYPTO_MICHAEL_MIC is not set
+-# CONFIG_CRYPTO_CRC32C is not set
+-# CONFIG_CRYPTO_TEST is not set
+-
+-#
+-# Hardware crypto devices
+-#
+-
+-#
+-# Library routines
+-#
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-CONFIG_CRC32=y
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_PLIST=y
+diff --git a/arch/sh/configs/r7780rp_defconfig b/arch/sh/configs/r7780rp_defconfig
+deleted file mode 100644
+index 12cc019..0000000
+--- a/arch/sh/configs/r7780rp_defconfig
++++ /dev/null
+@@ -1,1328 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.21-rc7
+-# Tue May  1 12:28:39 2007
+-#
+-CONFIG_SUPERH=y
+-CONFIG_RWSEM_GENERIC_SPINLOCK=y
+-CONFIG_GENERIC_BUG=y
+-CONFIG_GENERIC_FIND_NEXT_BIT=y
+-CONFIG_GENERIC_HWEIGHT=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_GENERIC_IRQ_PROBE=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-# CONFIG_GENERIC_TIME is not set
+-CONFIG_STACKTRACE_SUPPORT=y
+-CONFIG_LOCKDEP_SUPPORT=y
+-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+-
+-#
+-# Code maturity level options
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_LOCK_KERNEL=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-
+-#
+-# General setup
+-#
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-CONFIG_SYSVIPC=y
+-# CONFIG_IPC_NS is not set
+-CONFIG_SYSVIPC_SYSCTL=y
+-# CONFIG_POSIX_MQUEUE is not set
+-CONFIG_BSD_PROCESS_ACCT=y
+-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+-# CONFIG_TASKSTATS is not set
+-# CONFIG_UTS_NS is not set
+-# CONFIG_AUDIT is not set
+-CONFIG_IKCONFIG=y
+-CONFIG_IKCONFIG_PROC=y
+-# CONFIG_SYSFS_DEPRECATED is not set
+-# CONFIG_RELAY is not set
+-# CONFIG_BLK_DEV_INITRD is not set
+-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+-CONFIG_SYSCTL=y
+-CONFIG_EMBEDDED=y
+-CONFIG_UID16=y
+-# CONFIG_SYSCTL_SYSCALL is not set
+-CONFIG_KALLSYMS=y
+-# CONFIG_KALLSYMS_ALL is not set
+-# CONFIG_KALLSYMS_EXTRA_PASS is not set
+-CONFIG_HOTPLUG=y
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_ELF_CORE=y
+-CONFIG_BASE_FULL=y
+-# CONFIG_FUTEX is not set
+-# CONFIG_EPOLL is not set
+-CONFIG_SHMEM=y
+-CONFIG_SLAB=y
+-CONFIG_VM_EVENT_COUNTERS=y
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-# CONFIG_SLOB is not set
+-
+-#
+-# Loadable module support
+-#
+-CONFIG_MODULES=y
+-CONFIG_MODULE_UNLOAD=y
+-# CONFIG_MODULE_FORCE_UNLOAD is not set
+-# CONFIG_MODVERSIONS is not set
+-# CONFIG_MODULE_SRCVERSION_ALL is not set
+-CONFIG_KMOD=y
+-
+-#
+-# Block layer
+-#
+-CONFIG_BLOCK=y
+-# CONFIG_LBD is not set
+-# CONFIG_BLK_DEV_IO_TRACE is not set
+-# CONFIG_LSF is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-# CONFIG_IOSCHED_AS is not set
+-# CONFIG_IOSCHED_DEADLINE is not set
+-# CONFIG_IOSCHED_CFQ is not set
+-# CONFIG_DEFAULT_AS is not set
+-# CONFIG_DEFAULT_DEADLINE is not set
+-# CONFIG_DEFAULT_CFQ is not set
+-CONFIG_DEFAULT_NOOP=y
+-CONFIG_DEFAULT_IOSCHED="noop"
+-
+-#
+-# System type
+-#
+-# CONFIG_SH_SOLUTION_ENGINE is not set
+-# CONFIG_SH_7722_SOLUTION_ENGINE is not set
+-# CONFIG_SH_7751_SOLUTION_ENGINE is not set
+-# CONFIG_SH_7780_SOLUTION_ENGINE is not set
+-# CONFIG_SH_7300_SOLUTION_ENGINE is not set
+-# CONFIG_SH_7343_SOLUTION_ENGINE is not set
+-# CONFIG_SH_73180_SOLUTION_ENGINE is not set
+-# CONFIG_SH_7751_SYSTEMH is not set
+-# CONFIG_SH_HP6XX is not set
+-# CONFIG_SH_SATURN is not set
+-# CONFIG_SH_DREAMCAST is not set
+-# CONFIG_SH_MPC1211 is not set
+-# CONFIG_SH_SH03 is not set
+-# CONFIG_SH_SECUREEDGE5410 is not set
+-# CONFIG_SH_HS7751RVOIP is not set
+-# CONFIG_SH_7710VOIPGW is not set
+-# CONFIG_SH_RTS7751R2D is not set
+-CONFIG_SH_HIGHLANDER=y
+-# CONFIG_SH_EDOSK7705 is not set
+-# CONFIG_SH_SH4202_MICRODEV is not set
+-# CONFIG_SH_LANDISK is not set
+-# CONFIG_SH_TITAN is not set
+-# CONFIG_SH_SHMIN is not set
+-# CONFIG_SH_7206_SOLUTION_ENGINE is not set
+-# CONFIG_SH_7619_SOLUTION_ENGINE is not set
+-# CONFIG_SH_LBOX_RE2 is not set
+-# CONFIG_SH_UNKNOWN is not set
+-CONFIG_SH_R7780RP=y
+-# CONFIG_SH_R7780MP is not set
+-# CONFIG_SH_R7785RP is not set
+-
+-#
+-# Processor selection
+-#
+-CONFIG_CPU_SH4=y
+-CONFIG_CPU_SH4A=y
+-
+-#
+-# SH-2 Processor Support
+-#
+-# CONFIG_CPU_SUBTYPE_SH7604 is not set
+-# CONFIG_CPU_SUBTYPE_SH7619 is not set
+-
+-#
+-# SH-2A Processor Support
+-#
+-# CONFIG_CPU_SUBTYPE_SH7206 is not set
+-
+-#
+-# SH-3 Processor Support
+-#
+-# CONFIG_CPU_SUBTYPE_SH7300 is not set
+-# CONFIG_CPU_SUBTYPE_SH7705 is not set
+-# CONFIG_CPU_SUBTYPE_SH7706 is not set
+-# CONFIG_CPU_SUBTYPE_SH7707 is not set
+-# CONFIG_CPU_SUBTYPE_SH7708 is not set
+-# CONFIG_CPU_SUBTYPE_SH7709 is not set
+-# CONFIG_CPU_SUBTYPE_SH7710 is not set
+-# CONFIG_CPU_SUBTYPE_SH7712 is not set
+-
+-#
+-# SH-4 Processor Support
+-#
+-# CONFIG_CPU_SUBTYPE_SH7750 is not set
+-# CONFIG_CPU_SUBTYPE_SH7091 is not set
+-# CONFIG_CPU_SUBTYPE_SH7750R is not set
+-# CONFIG_CPU_SUBTYPE_SH7750S is not set
+-# CONFIG_CPU_SUBTYPE_SH7751 is not set
+-# CONFIG_CPU_SUBTYPE_SH7751R is not set
+-# CONFIG_CPU_SUBTYPE_SH7760 is not set
+-# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+-
+-#
+-# ST40 Processor Support
+-#
+-# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
+-# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
+-
+-#
+-# SH-4A Processor Support
+-#
+-# CONFIG_CPU_SUBTYPE_SH7770 is not set
+-CONFIG_CPU_SUBTYPE_SH7780=y
+-# CONFIG_CPU_SUBTYPE_SH7785 is not set
+-
+-#
+-# SH4AL-DSP Processor Support
+-#
+-# CONFIG_CPU_SUBTYPE_SH73180 is not set
+-# CONFIG_CPU_SUBTYPE_SH7343 is not set
+-# CONFIG_CPU_SUBTYPE_SH7722 is not set
+-
+-#
+-# Memory management options
+-#
+-CONFIG_MMU=y
+-CONFIG_PAGE_OFFSET=0x80000000
+-CONFIG_MEMORY_START=0x08000000
+-CONFIG_MEMORY_SIZE=0x08000000
+-# CONFIG_32BIT is not set
+-CONFIG_VSYSCALL=y
+-CONFIG_ARCH_FLATMEM_ENABLE=y
+-CONFIG_ARCH_POPULATES_NODE_MAP=y
+-CONFIG_PAGE_SIZE_4KB=y
+-# CONFIG_PAGE_SIZE_8KB is not set
+-# CONFIG_PAGE_SIZE_64KB is not set
+-CONFIG_HUGETLB_PAGE_SIZE_64K=y
+-# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
+-# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+-# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+-# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-# CONFIG_RESOURCES_64BIT is not set
+-CONFIG_ZONE_DMA_FLAG=0
+-
+-#
+-# Cache configuration
+-#
+-# CONFIG_SH_DIRECT_MAPPED is not set
+-# CONFIG_SH_WRITETHROUGH is not set
+-# CONFIG_SH_OCRAM is not set
+-
+-#
+-# Processor features
+-#
+-CONFIG_CPU_LITTLE_ENDIAN=y
+-# CONFIG_CPU_BIG_ENDIAN is not set
+-CONFIG_SH_FPU=y
+-# CONFIG_SH_DSP is not set
+-CONFIG_SH_STORE_QUEUES=y
+-CONFIG_SPECULATIVE_EXECUTION=y
+-CONFIG_CPU_HAS_INTEVT=y
+-CONFIG_CPU_HAS_INTC_IRQ=y
+-CONFIG_CPU_HAS_SR_RB=y
+-
+-#
+-# Timer and clock configuration
+-#
+-CONFIG_SH_TMU=y
+-CONFIG_SH_TIMER_IRQ=28
+-CONFIG_NO_IDLE_HZ=y
+-CONFIG_SH_PCLK_FREQ=32000000
+-
+-#
+-# CPU Frequency scaling
+-#
+-# CONFIG_CPU_FREQ is not set
+-
+-#
+-# DMA support
+-#
+-# CONFIG_SH_DMA is not set
+-
+-#
+-# Companion Chips
+-#
+-# CONFIG_HD6446X_SERIES is not set
+-
+-#
+-# Additional SuperH Device Drivers
+-#
+-# CONFIG_HEARTBEAT is not set
+-CONFIG_PUSH_SWITCH=y
+-
+-#
+-# Kernel features
+-#
+-# CONFIG_HZ_100 is not set
+-CONFIG_HZ_250=y
+-# CONFIG_HZ_300 is not set
+-# CONFIG_HZ_1000 is not set
+-CONFIG_HZ=250
+-CONFIG_KEXEC=y
+-# CONFIG_CRASH_DUMP is not set
+-# CONFIG_SMP is not set
+-# CONFIG_PREEMPT_NONE is not set
+-# CONFIG_PREEMPT_VOLUNTARY is not set
+-CONFIG_PREEMPT=y
+-CONFIG_PREEMPT_BKL=y
+-
+-#
+-# Boot options
+-#
+-CONFIG_ZERO_PAGE_OFFSET=0x00001000
+-CONFIG_BOOT_LINK_OFFSET=0x00800000
+-# CONFIG_UBC_WAKEUP is not set
+-CONFIG_CMDLINE_BOOL=y
+-CONFIG_CMDLINE="mem=128M console=ttySC0,115200 root=/dev/sda1"
+-
+-#
+-# Bus options
+-#
+-CONFIG_PCI=y
+-CONFIG_SH_PCIDMA_NONCOHERENT=y
+-CONFIG_PCI_AUTO=y
+-CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
+-# CONFIG_PCI_DEBUG is not set
+-
+-#
+-# PCCARD (PCMCIA/CardBus) support
+-#
+-# CONFIG_PCCARD is not set
+-
+-#
+-# PCI Hotplug Support
+-#
+-# CONFIG_HOTPLUG_PCI is not set
+-
+-#
+-# Executable file formats
+-#
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_FLAT is not set
+-# CONFIG_BINFMT_MISC is not set
+-
+-#
+-# Power management options (EXPERIMENTAL)
+-#
+-# CONFIG_PM is not set
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-# CONFIG_NETDEBUG is not set
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-CONFIG_XFRM=y
+-# CONFIG_XFRM_USER is not set
+-# CONFIG_XFRM_SUB_POLICY is not set
+-# CONFIG_XFRM_MIGRATE is not set
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-# CONFIG_IP_MULTICAST is not set
+-CONFIG_IP_ADVANCED_ROUTER=y
+-CONFIG_ASK_IP_FIB_HASH=y
+-# CONFIG_IP_FIB_TRIE is not set
+-CONFIG_IP_FIB_HASH=y
+-# CONFIG_IP_MULTIPLE_TABLES is not set
+-# CONFIG_IP_ROUTE_MULTIPATH is not set
+-# CONFIG_IP_ROUTE_VERBOSE is not set
+-CONFIG_IP_PNP=y
+-CONFIG_IP_PNP_DHCP=y
+-# CONFIG_IP_PNP_BOOTP is not set
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
 -# CONFIG_ARPD is not set
 -# CONFIG_SYN_COOKIES is not set
 -# CONFIG_INET_AH is not set
@@ -81473,11 +188098,13 @@
 -# CONFIG_INET_TUNNEL is not set
 -CONFIG_INET_XFRM_MODE_TRANSPORT=y
 -CONFIG_INET_XFRM_MODE_TUNNEL=y
+-CONFIG_INET_XFRM_MODE_BEET=y
 -CONFIG_INET_DIAG=y
 -CONFIG_INET_TCP_DIAG=y
 -# CONFIG_TCP_CONG_ADVANCED is not set
 -CONFIG_TCP_CONG_CUBIC=y
 -CONFIG_DEFAULT_TCP_CONG="cubic"
+-# CONFIG_TCP_MD5SIG is not set
 -# CONFIG_IPV6 is not set
 -# CONFIG_INET6_XFRM_TUNNEL is not set
 -# CONFIG_INET6_TUNNEL is not set
@@ -81499,9 +188126,10 @@
 -#
 -# CONFIG_TIPC is not set
 -# CONFIG_ATM is not set
--# CONFIG_BRIDGE is not set
+-CONFIG_BRIDGE=m
 -# CONFIG_VLAN_8021Q is not set
 -# CONFIG_DECNET is not set
+-CONFIG_LLC=m
 -# CONFIG_LLC2 is not set
 -# CONFIG_IPX is not set
 -# CONFIG_ATALK is not set
@@ -81523,6 +188151,7 @@
 -# CONFIG_IRDA is not set
 -# CONFIG_BT is not set
 -# CONFIG_IEEE80211 is not set
+-CONFIG_WIRELESS_EXT=y
 -
 -#
 -# Device Drivers
@@ -81534,6 +188163,8 @@
 -CONFIG_STANDALONE=y
 -CONFIG_PREVENT_FIRMWARE_BUILD=y
 -CONFIG_FW_LOADER=m
+-# CONFIG_DEBUG_DRIVER is not set
+-# CONFIG_DEBUG_DEVRES is not set
 -# CONFIG_SYS_HYPERVISOR is not set
 -
 -#
@@ -81554,56 +188185,166 @@
 -#
 -# Plug and Play support
 -#
+-# CONFIG_PNPACPI is not set
 -
 -#
 -# Block devices
 -#
+-# CONFIG_BLK_CPQ_DA is not set
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
 -# CONFIG_BLK_DEV_COW_COMMON is not set
 -# CONFIG_BLK_DEV_LOOP is not set
 -# CONFIG_BLK_DEV_NBD is not set
--# CONFIG_BLK_DEV_RAM is not set
--# CONFIG_BLK_DEV_INITRD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=4096
+-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
 -# CONFIG_CDROM_PKTCDVD is not set
 -# CONFIG_ATA_OVER_ETH is not set
 -
 -#
+-# Misc devices
+-#
+-# CONFIG_SGI_IOC4 is not set
+-# CONFIG_TIFM_CORE is not set
+-
+-#
 -# ATA/ATAPI/MFM/RLL support
 -#
--CONFIG_IDE=y
--CONFIG_IDE_MAX_HWIFS=1
--CONFIG_BLK_DEV_IDE=y
+-# CONFIG_IDE is not set
 -
 -#
--# Please see Documentation/ide.txt for help/info on IDE drives
+-# SCSI device support
 -#
--# CONFIG_BLK_DEV_IDE_SATA is not set
--CONFIG_BLK_DEV_IDEDISK=y
--# CONFIG_IDEDISK_MULTI_MODE is not set
--# CONFIG_BLK_DEV_IDECD is not set
--# CONFIG_BLK_DEV_IDETAPE is not set
--# CONFIG_BLK_DEV_IDEFLOPPY is not set
--# CONFIG_IDE_TASK_IOCTL is not set
+-# CONFIG_RAID_ATTRS is not set
+-CONFIG_SCSI=y
+-# CONFIG_SCSI_TGT is not set
+-# CONFIG_SCSI_NETLINK is not set
+-CONFIG_SCSI_PROC_FS=y
 -
 -#
--# IDE chipset support/bugfixes
+-# SCSI support type (disk, tape, CD-ROM)
 -#
--CONFIG_IDE_GENERIC=y
--# CONFIG_IDE_ARM is not set
--# CONFIG_BLK_DEV_IDEDMA is not set
--# CONFIG_IDEDMA_AUTO is not set
--# CONFIG_BLK_DEV_HD is not set
+-CONFIG_BLK_DEV_SD=y
+-# CONFIG_CHR_DEV_ST is not set
+-# CONFIG_CHR_DEV_OSST is not set
+-# CONFIG_BLK_DEV_SR is not set
+-CONFIG_CHR_DEV_SG=m
+-# CONFIG_CHR_DEV_SCH is not set
 -
 -#
--# SCSI device support
+-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
 -#
--# CONFIG_RAID_ATTRS is not set
--# CONFIG_SCSI is not set
--# CONFIG_SCSI_NETLINK is not set
+-# CONFIG_SCSI_MULTI_LUN is not set
+-# CONFIG_SCSI_CONSTANTS is not set
+-# CONFIG_SCSI_LOGGING is not set
+-# CONFIG_SCSI_SCAN_ASYNC is not set
+-
+-#
+-# SCSI Transports
+-#
+-# CONFIG_SCSI_SPI_ATTRS is not set
+-# CONFIG_SCSI_FC_ATTRS is not set
+-# CONFIG_SCSI_ISCSI_ATTRS is not set
+-# CONFIG_SCSI_SAS_ATTRS is not set
+-# CONFIG_SCSI_SAS_LIBSAS is not set
+-
+-#
+-# SCSI low-level drivers
+-#
+-# CONFIG_ISCSI_TCP is not set
+-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+-# CONFIG_SCSI_3W_9XXX is not set
+-# CONFIG_SCSI_ACARD is not set
+-# CONFIG_SCSI_AACRAID is not set
+-# CONFIG_SCSI_AIC7XXX is not set
+-# CONFIG_SCSI_AIC7XXX_OLD is not set
+-# CONFIG_SCSI_AIC79XX is not set
+-# CONFIG_SCSI_AIC94XX is not set
+-# CONFIG_SCSI_DPT_I2O is not set
+-# CONFIG_SCSI_ARCMSR is not set
+-# CONFIG_MEGARAID_NEWGEN is not set
+-# CONFIG_MEGARAID_LEGACY is not set
+-# CONFIG_MEGARAID_SAS is not set
+-# CONFIG_SCSI_HPTIOP is not set
+-# CONFIG_SCSI_DMX3191D is not set
+-# CONFIG_SCSI_FUTURE_DOMAIN is not set
+-# CONFIG_SCSI_IPS is not set
+-# CONFIG_SCSI_INITIO is not set
+-# CONFIG_SCSI_INIA100 is not set
+-# CONFIG_SCSI_STEX is not set
+-# CONFIG_SCSI_SYM53C8XX_2 is not set
+-# CONFIG_SCSI_IPR is not set
+-# CONFIG_SCSI_QLOGIC_1280 is not set
+-# CONFIG_SCSI_QLA_FC is not set
+-# CONFIG_SCSI_QLA_ISCSI is not set
+-# CONFIG_SCSI_LPFC is not set
+-# CONFIG_SCSI_DC395x is not set
+-# CONFIG_SCSI_DC390T is not set
+-# CONFIG_SCSI_NSP32 is not set
+-# CONFIG_SCSI_DEBUG is not set
+-# CONFIG_SCSI_SRP is not set
 -
 -#
 -# Serial ATA (prod) and Parallel ATA (experimental) drivers
 -#
--# CONFIG_ATA is not set
+-CONFIG_ATA=y
+-# CONFIG_ATA_NONSTANDARD is not set
+-# CONFIG_SATA_AHCI is not set
+-# CONFIG_SATA_SVW is not set
+-# CONFIG_ATA_PIIX is not set
+-# CONFIG_SATA_MV is not set
+-# CONFIG_SATA_NV is not set
+-# CONFIG_PDC_ADMA is not set
+-# CONFIG_SATA_QSTOR is not set
+-# CONFIG_SATA_PROMISE is not set
+-# CONFIG_SATA_SX4 is not set
+-CONFIG_SATA_SIL=y
+-# CONFIG_SATA_SIL24 is not set
+-# CONFIG_SATA_SIS is not set
+-# CONFIG_SATA_ULI is not set
+-# CONFIG_SATA_VIA is not set
+-# CONFIG_SATA_VITESSE is not set
+-# CONFIG_SATA_INIC162X is not set
+-# CONFIG_PATA_ALI is not set
+-# CONFIG_PATA_AMD is not set
+-# CONFIG_PATA_ARTOP is not set
+-# CONFIG_PATA_ATIIXP is not set
+-# CONFIG_PATA_CMD64X is not set
+-# CONFIG_PATA_CS5520 is not set
+-# CONFIG_PATA_CS5530 is not set
+-# CONFIG_PATA_CYPRESS is not set
+-# CONFIG_PATA_EFAR is not set
+-# CONFIG_ATA_GENERIC is not set
+-# CONFIG_PATA_HPT366 is not set
+-# CONFIG_PATA_HPT37X is not set
+-# CONFIG_PATA_HPT3X2N is not set
+-# CONFIG_PATA_HPT3X3 is not set
+-# CONFIG_PATA_IT821X is not set
+-# CONFIG_PATA_IT8213 is not set
+-# CONFIG_PATA_JMICRON is not set
+-# CONFIG_PATA_TRIFLEX is not set
+-# CONFIG_PATA_MARVELL is not set
+-# CONFIG_PATA_MPIIX is not set
+-# CONFIG_PATA_OLDPIIX is not set
+-# CONFIG_PATA_NETCELL is not set
+-# CONFIG_PATA_NS87410 is not set
+-# CONFIG_PATA_OPTI is not set
+-# CONFIG_PATA_OPTIDMA is not set
+-# CONFIG_PATA_PDC_OLD is not set
+-# CONFIG_PATA_RADISYS is not set
+-# CONFIG_PATA_RZ1000 is not set
+-# CONFIG_PATA_SC1200 is not set
+-# CONFIG_PATA_SERVERWORKS is not set
+-# CONFIG_PATA_PDC2027X is not set
+-# CONFIG_PATA_SIL680 is not set
+-# CONFIG_PATA_SIS is not set
+-# CONFIG_PATA_VIA is not set
+-# CONFIG_PATA_WINBOND is not set
+-CONFIG_PATA_PLATFORM=y
 -
 -#
 -# Multi-device support (RAID and LVM)
@@ -81614,14 +188355,19 @@
 -# Fusion MPT device support
 -#
 -# CONFIG_FUSION is not set
+-# CONFIG_FUSION_SPI is not set
+-# CONFIG_FUSION_FC is not set
+-# CONFIG_FUSION_SAS is not set
 -
 -#
 -# IEEE 1394 (FireWire) support
 -#
+-# CONFIG_IEEE1394 is not set
 -
 -#
 -# I2O device support
 -#
+-# CONFIG_I2O is not set
 -
 -#
 -# Network device support
@@ -81633,6 +188379,11 @@
 -# CONFIG_TUN is not set
 -
 -#
+-# ARCnet devices
+-#
+-# CONFIG_ARCNET is not set
+-
+-#
 -# PHY device support
 -#
 -# CONFIG_PHYLIB is not set
@@ -81643,31 +188394,122 @@
 -CONFIG_NET_ETHERNET=y
 -CONFIG_MII=y
 -# CONFIG_STNIC is not set
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_CASSINI is not set
+-# CONFIG_NET_VENDOR_3COM is not set
 -# CONFIG_SMC91X is not set
 -
 -#
+-# Tulip family network device support
+-#
+-# CONFIG_NET_TULIP is not set
+-# CONFIG_HP100 is not set
+-CONFIG_NET_PCI=y
+-CONFIG_PCNET32=m
+-# CONFIG_PCNET32_NAPI is not set
+-# CONFIG_AMD8111_ETH is not set
+-# CONFIG_ADAPTEC_STARFIRE is not set
+-# CONFIG_B44 is not set
+-# CONFIG_FORCEDETH is not set
+-# CONFIG_DGRS is not set
+-# CONFIG_EEPRO100 is not set
+-# CONFIG_E100 is not set
+-# CONFIG_FEALNX is not set
+-# CONFIG_NATSEMI is not set
+-# CONFIG_NE2K_PCI is not set
+-CONFIG_8139CP=m
+-CONFIG_8139TOO=m
+-# CONFIG_8139TOO_PIO is not set
+-# CONFIG_8139TOO_TUNE_TWISTER is not set
+-CONFIG_8139TOO_8129=y
+-# CONFIG_8139_OLD_RX_RESET is not set
+-# CONFIG_SIS900 is not set
+-# CONFIG_EPIC100 is not set
+-# CONFIG_SUNDANCE is not set
+-# CONFIG_TLAN is not set
+-CONFIG_VIA_RHINE=m
+-CONFIG_VIA_RHINE_MMIO=y
+-# CONFIG_VIA_RHINE_NAPI is not set
+-# CONFIG_SC92031 is not set
+-
+-#
 -# Ethernet (1000 Mbit)
 -#
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-CONFIG_E1000=m
+-# CONFIG_E1000_NAPI is not set
+-# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-CONFIG_R8169=y
+-# CONFIG_R8169_NAPI is not set
+-# CONFIG_SIS190 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SKY2 is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_VIA_VELOCITY is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-# CONFIG_QLA3XXX is not set
+-# CONFIG_ATL1 is not set
 -
 -#
 -# Ethernet (10000 Mbit)
 -#
+-# CONFIG_CHELSIO_T1 is not set
+-# CONFIG_CHELSIO_T3 is not set
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-# CONFIG_MYRI10GE is not set
+-# CONFIG_NETXEN_NIC is not set
 -
 -#
 -# Token Ring devices
 -#
+-# CONFIG_TR is not set
 -
 -#
 -# Wireless LAN (non-hamradio)
 -#
--# CONFIG_NET_RADIO is not set
+-CONFIG_NET_RADIO=y
+-# CONFIG_NET_WIRELESS_RTNETLINK is not set
+-
+-#
+-# Obsolete Wireless cards support (pre-802.11)
+-#
+-# CONFIG_STRIP is not set
+-
+-#
+-# Wireless 802.11b ISA/PCI cards support
+-#
+-# CONFIG_IPW2100 is not set
+-# CONFIG_IPW2200 is not set
+-CONFIG_HERMES=m
+-# CONFIG_PLX_HERMES is not set
+-# CONFIG_TMD_HERMES is not set
+-# CONFIG_NORTEL_HERMES is not set
+-# CONFIG_PCI_HERMES is not set
+-# CONFIG_ATMEL is not set
+-
+-#
+-# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+-#
+-CONFIG_PRISM54=m
+-# CONFIG_HOSTAP is not set
+-CONFIG_NET_WIRELESS=y
 -
 -#
 -# Wan interfaces
 -#
 -# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
 -# CONFIG_PPP is not set
 -# CONFIG_SLIP is not set
+-# CONFIG_NET_FC is not set
 -# CONFIG_SHAPER is not set
 -# CONFIG_NETCONSOLE is not set
 -# CONFIG_NETPOLL is not set
@@ -81692,7 +188534,10 @@
 -#
 -# Userland interfaces
 -#
--# CONFIG_INPUT_MOUSEDEV is not set
+-CONFIG_INPUT_MOUSEDEV=y
+-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
 -# CONFIG_INPUT_JOYDEV is not set
 -# CONFIG_INPUT_TSDEV is not set
 -# CONFIG_INPUT_EVDEV is not set
@@ -81701,7 +188546,13 @@
 -#
 -# Input Device Drivers
 -#
--# CONFIG_INPUT_KEYBOARD is not set
+-CONFIG_INPUT_KEYBOARD=y
+-CONFIG_KEYBOARD_ATKBD=y
+-# CONFIG_KEYBOARD_SUNKBD is not set
+-# CONFIG_KEYBOARD_LKKBD is not set
+-# CONFIG_KEYBOARD_XTKBD is not set
+-# CONFIG_KEYBOARD_NEWTON is not set
+-# CONFIG_KEYBOARD_STOWAWAY is not set
 -# CONFIG_INPUT_MOUSE is not set
 -# CONFIG_INPUT_JOYSTICK is not set
 -# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -81711,9 +188562,10 @@
 -# Hardware I/O ports
 -#
 -CONFIG_SERIO=y
--CONFIG_SERIO_I8042=y
--CONFIG_SERIO_SERPORT=y
--# CONFIG_SERIO_LIBPS2 is not set
+-# CONFIG_SERIO_I8042 is not set
+-# CONFIG_SERIO_SERPORT is not set
+-# CONFIG_SERIO_PCIPS2 is not set
+-CONFIG_SERIO_LIBPS2=y
 -# CONFIG_SERIO_RAW is not set
 -# CONFIG_GAMEPORT is not set
 -
@@ -81736,8 +188588,10 @@
 -CONFIG_SERIAL_SH_SCI_CONSOLE=y
 -CONFIG_SERIAL_CORE=y
 -CONFIG_SERIAL_CORE_CONSOLE=y
+-# CONFIG_SERIAL_JSM is not set
 -CONFIG_UNIX98_PTYS=y
--# CONFIG_LEGACY_PTYS is not set
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
 -
 -#
 -# IPMI
@@ -81752,17 +188606,14 @@
 -# CONFIG_GEN_RTC is not set
 -# CONFIG_DTLK is not set
 -# CONFIG_R3964 is not set
--
--#
--# Ftape, the floppy tape device driver
--#
+-# CONFIG_APPLICOM is not set
+-# CONFIG_DRM is not set
 -# CONFIG_RAW_DRIVER is not set
 -
 -#
 -# TPM devices
 -#
 -# CONFIG_TCG_TPM is not set
--# CONFIG_TELCLOCK is not set
 -
 -#
 -# I2C support
@@ -81778,6 +188629,7 @@
 -#
 -# Dallas's 1-wire bus
 -#
+-# CONFIG_W1 is not set
 -
 -#
 -# Hardware Monitoring support
@@ -81786,18 +188638,19 @@
 -# CONFIG_HWMON_VID is not set
 -# CONFIG_SENSORS_ABITUGURU is not set
 -# CONFIG_SENSORS_F71805F is not set
+-# CONFIG_SENSORS_PC87427 is not set
 -# CONFIG_SENSORS_VT1211 is not set
 -# CONFIG_HWMON_DEBUG_CHIP is not set
 -
 -#
--# Misc devices
+-# Multifunction device drivers
 -#
+-# CONFIG_MFD_SM501 is not set
 -
 -#
 -# Multimedia devices
 -#
 -# CONFIG_VIDEO_DEV is not set
--CONFIG_VIDEO_V4L2=y
 -
 -#
 -# Digital Video Broadcasting Devices
@@ -81807,21 +188660,44 @@
 -#
 -# Graphics support
 -#
--CONFIG_FIRMWARE_EDID=y
--# CONFIG_FB is not set
 -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-# CONFIG_FB is not set
 -
 -#
 -# Sound
 -#
--# CONFIG_SOUND is not set
+-CONFIG_SOUND=m
+-
+-#
+-# Advanced Linux Sound Architecture
+-#
+-# CONFIG_SND is not set
+-
+-#
+-# Open Sound System
+-#
+-CONFIG_SOUND_PRIME=m
+-# CONFIG_OBSOLETE_OSS is not set
+-# CONFIG_SOUND_BT878 is not set
+-# CONFIG_SOUND_ICH is not set
+-# CONFIG_SOUND_TRIDENT is not set
+-# CONFIG_SOUND_MSNDCLAS is not set
+-# CONFIG_SOUND_MSNDPIN is not set
+-# CONFIG_SOUND_VIA82CXXX is not set
+-
+-#
+-# HID Devices
+-#
+-CONFIG_HID=y
+-# CONFIG_HID_DEBUG is not set
 -
 -#
 -# USB support
 -#
--# CONFIG_USB_ARCH_HAS_HCD is not set
--# CONFIG_USB_ARCH_HAS_OHCI is not set
--# CONFIG_USB_ARCH_HAS_EHCI is not set
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-CONFIG_USB_ARCH_HAS_EHCI=y
+-# CONFIG_USB is not set
 -
 -#
 -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -81853,6 +188729,7 @@
 -#
 -# InfiniBand support
 -#
+-# CONFIG_INFINIBAND is not set
 -
 -#
 -# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
@@ -81861,7 +188738,29 @@
 -#
 -# Real Time Clock
 -#
--# CONFIG_RTC_CLASS is not set
+-CONFIG_RTC_LIB=y
+-CONFIG_RTC_CLASS=y
+-CONFIG_RTC_HCTOSYS=y
+-CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+-# CONFIG_RTC_DEBUG is not set
+-
+-#
+-# RTC interfaces
+-#
+-CONFIG_RTC_INTF_SYSFS=y
+-CONFIG_RTC_INTF_PROC=y
+-CONFIG_RTC_INTF_DEV=y
+-# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+-
+-#
+-# RTC drivers
+-#
+-# CONFIG_RTC_DRV_DS1553 is not set
+-# CONFIG_RTC_DRV_DS1742 is not set
+-# CONFIG_RTC_DRV_M48T86 is not set
+-CONFIG_RTC_DRV_SH=y
+-# CONFIG_RTC_DRV_TEST is not set
+-# CONFIG_RTC_DRV_V3020 is not set
 -
 -#
 -# DMA Engine support
@@ -81877,18 +188776,34 @@
 -#
 -
 -#
+-# Auxiliary Display support
+-#
+-
+-#
+-# Virtualization
+-#
+-
+-#
 -# File systems
 -#
 -CONFIG_EXT2_FS=y
 -# CONFIG_EXT2_FS_XATTR is not set
 -# CONFIG_EXT2_FS_XIP is not set
--# CONFIG_EXT3_FS is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-# CONFIG_EXT4DEV_FS is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
 -# CONFIG_REISERFS_FS is not set
 -# CONFIG_JFS_FS is not set
--# CONFIG_FS_POSIX_ACL is not set
+-CONFIG_FS_POSIX_ACL=y
 -# CONFIG_XFS_FS is not set
+-# CONFIG_GFS2_FS is not set
 -# CONFIG_OCFS2_FS is not set
--# CONFIG_MINIX_FS is not set
+-CONFIG_MINIX_FS=y
 -# CONFIG_ROMFS_FS is not set
 -CONFIG_INOTIFY=y
 -CONFIG_INOTIFY_USER=y
@@ -81896,7 +188811,7 @@
 -CONFIG_DNOTIFY=y
 -# CONFIG_AUTOFS_FS is not set
 -# CONFIG_AUTOFS4_FS is not set
--# CONFIG_FUSE_FS is not set
+-CONFIG_FUSE_FS=m
 -
 -#
 -# CD-ROM/DVD Filesystems
@@ -81907,9 +188822,14 @@
 -#
 -# DOS/FAT/NT Filesystems
 -#
--# CONFIG_MSDOS_FS is not set
--# CONFIG_VFAT_FS is not set
--# CONFIG_NTFS_FS is not set
+-CONFIG_FAT_FS=y
+-CONFIG_MSDOS_FS=y
+-CONFIG_VFAT_FS=y
+-CONFIG_FAT_DEFAULT_CODEPAGE=437
+-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+-CONFIG_NTFS_FS=y
+-# CONFIG_NTFS_DEBUG is not set
+-CONFIG_NTFS_RW=y
 -
 -#
 -# Pseudo filesystems
@@ -81920,10 +188840,10 @@
 -CONFIG_SYSFS=y
 -CONFIG_TMPFS=y
 -# CONFIG_TMPFS_POSIX_ACL is not set
--# CONFIG_HUGETLBFS is not set
--# CONFIG_HUGETLB_PAGE is not set
+-CONFIG_HUGETLBFS=y
+-CONFIG_HUGETLB_PAGE=y
 -CONFIG_RAMFS=y
--# CONFIG_CONFIGFS_FS is not set
+-CONFIG_CONFIGFS_FS=m
 -
 -#
 -# Miscellaneous filesystems
@@ -81949,10 +188869,16 @@
 -CONFIG_NFS_V3=y
 -# CONFIG_NFS_V3_ACL is not set
 -CONFIG_NFS_V4=y
--CONFIG_NFS_DIRECTIO=y
--# CONFIG_NFSD is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-CONFIG_NFSD=y
+-CONFIG_NFSD_V3=y
+-# CONFIG_NFSD_V3_ACL is not set
+-CONFIG_NFSD_V4=y
+-CONFIG_NFSD_TCP=y
+-CONFIG_ROOT_NFS=y
 -CONFIG_LOCKD=y
 -CONFIG_LOCKD_V4=y
+-CONFIG_EXPORTFS=y
 -CONFIG_NFS_COMMON=y
 -CONFIG_SUNRPC=y
 -CONFIG_SUNRPC_GSS=y
@@ -81968,48 +188894,104 @@
 -#
 -# Partition Types
 -#
--CONFIG_PARTITION_ADVANCED=y
--# CONFIG_ACORN_PARTITION is not set
--# CONFIG_OSF_PARTITION is not set
--# CONFIG_AMIGA_PARTITION is not set
--# CONFIG_ATARI_PARTITION is not set
--# CONFIG_MAC_PARTITION is not set
+-# CONFIG_PARTITION_ADVANCED is not set
 -CONFIG_MSDOS_PARTITION=y
--# CONFIG_BSD_DISKLABEL is not set
--# CONFIG_MINIX_SUBPARTITION is not set
--# CONFIG_SOLARIS_X86_PARTITION is not set
--# CONFIG_UNIXWARE_DISKLABEL is not set
--# CONFIG_LDM_PARTITION is not set
--# CONFIG_SGI_PARTITION is not set
--# CONFIG_ULTRIX_PARTITION is not set
--# CONFIG_SUN_PARTITION is not set
--# CONFIG_KARMA_PARTITION is not set
--# CONFIG_EFI_PARTITION is not set
 -
 -#
 -# Native Language Support
 -#
--# CONFIG_NLS is not set
+-CONFIG_NLS=y
+-CONFIG_NLS_DEFAULT="iso8859-1"
+-CONFIG_NLS_CODEPAGE_437=y
+-# CONFIG_NLS_CODEPAGE_737 is not set
+-# CONFIG_NLS_CODEPAGE_775 is not set
+-# CONFIG_NLS_CODEPAGE_850 is not set
+-# CONFIG_NLS_CODEPAGE_852 is not set
+-# CONFIG_NLS_CODEPAGE_855 is not set
+-# CONFIG_NLS_CODEPAGE_857 is not set
+-# CONFIG_NLS_CODEPAGE_860 is not set
+-# CONFIG_NLS_CODEPAGE_861 is not set
+-# CONFIG_NLS_CODEPAGE_862 is not set
+-# CONFIG_NLS_CODEPAGE_863 is not set
+-# CONFIG_NLS_CODEPAGE_864 is not set
+-# CONFIG_NLS_CODEPAGE_865 is not set
+-# CONFIG_NLS_CODEPAGE_866 is not set
+-# CONFIG_NLS_CODEPAGE_869 is not set
+-# CONFIG_NLS_CODEPAGE_936 is not set
+-# CONFIG_NLS_CODEPAGE_950 is not set
+-CONFIG_NLS_CODEPAGE_932=y
+-# CONFIG_NLS_CODEPAGE_949 is not set
+-# CONFIG_NLS_CODEPAGE_874 is not set
+-# CONFIG_NLS_ISO8859_8 is not set
+-# CONFIG_NLS_CODEPAGE_1250 is not set
+-# CONFIG_NLS_CODEPAGE_1251 is not set
+-# CONFIG_NLS_ASCII is not set
+-CONFIG_NLS_ISO8859_1=y
+-# CONFIG_NLS_ISO8859_2 is not set
+-# CONFIG_NLS_ISO8859_3 is not set
+-# CONFIG_NLS_ISO8859_4 is not set
+-# CONFIG_NLS_ISO8859_5 is not set
+-# CONFIG_NLS_ISO8859_6 is not set
+-# CONFIG_NLS_ISO8859_7 is not set
+-# CONFIG_NLS_ISO8859_9 is not set
+-# CONFIG_NLS_ISO8859_13 is not set
+-# CONFIG_NLS_ISO8859_14 is not set
+-# CONFIG_NLS_ISO8859_15 is not set
+-# CONFIG_NLS_KOI8_R is not set
+-# CONFIG_NLS_KOI8_U is not set
+-# CONFIG_NLS_UTF8 is not set
+-
+-#
+-# Distributed Lock Manager
+-#
+-# CONFIG_DLM is not set
 -
 -#
 -# Profiling support
 -#
--# CONFIG_PROFILING is not set
+-CONFIG_PROFILING=y
+-CONFIG_OPROFILE=m
 -
 -#
 -# Kernel hacking
 -#
+-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 -# CONFIG_PRINTK_TIME is not set
 -CONFIG_ENABLE_MUST_CHECK=y
--# CONFIG_MAGIC_SYSRQ is not set
+-CONFIG_MAGIC_SYSRQ=y
 -# CONFIG_UNUSED_SYMBOLS is not set
--# CONFIG_DEBUG_KERNEL is not set
+-CONFIG_DEBUG_FS=y
+-# CONFIG_HEADERS_CHECK is not set
+-CONFIG_DEBUG_KERNEL=y
+-# CONFIG_DEBUG_SHIRQ is not set
 -CONFIG_LOG_BUF_SHIFT=14
--# CONFIG_DEBUG_BUGVERBOSE is not set
--# CONFIG_DEBUG_FS is not set
--# CONFIG_SH_STANDARD_BIOS is not set
+-CONFIG_DETECT_SOFTLOCKUP=y
+-# CONFIG_SCHEDSTATS is not set
+-# CONFIG_TIMER_STATS is not set
+-# CONFIG_DEBUG_SLAB is not set
+-# CONFIG_DEBUG_PREEMPT is not set
+-# CONFIG_DEBUG_SPINLOCK is not set
+-# CONFIG_DEBUG_MUTEXES is not set
+-# CONFIG_DEBUG_LOCK_ALLOC is not set
+-# CONFIG_PROVE_LOCKING is not set
+-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+-# CONFIG_DEBUG_KOBJECT is not set
+-CONFIG_DEBUG_BUGVERBOSE=y
+-CONFIG_DEBUG_INFO=y
+-# CONFIG_DEBUG_VM is not set
+-# CONFIG_DEBUG_LIST is not set
+-# CONFIG_FRAME_POINTER is not set
+-CONFIG_FORCED_INLINING=y
+-# CONFIG_RCU_TORTURE_TEST is not set
+-# CONFIG_FAULT_INJECTION is not set
+-CONFIG_SH_STANDARD_BIOS=y
 -# CONFIG_EARLY_SCIF_CONSOLE is not set
--# CONFIG_KGDB is not set
+-CONFIG_EARLY_PRINTK=y
+-CONFIG_DEBUG_STACKOVERFLOW=y
+-# CONFIG_DEBUG_STACK_USAGE is not set
+-# CONFIG_4KSTACKS is not set
+-# CONFIG_SH_KGDB is not set
 -
 -#
 -# Security options
@@ -82022,9 +189004,11 @@
 -#
 -CONFIG_CRYPTO=y
 -CONFIG_CRYPTO_ALGAPI=y
--CONFIG_CRYPTO_BLKCIPHER=m
--CONFIG_CRYPTO_MANAGER=m
--# CONFIG_CRYPTO_HMAC is not set
+-CONFIG_CRYPTO_BLKCIPHER=y
+-CONFIG_CRYPTO_HASH=y
+-CONFIG_CRYPTO_MANAGER=y
+-CONFIG_CRYPTO_HMAC=y
+-# CONFIG_CRYPTO_XCBC is not set
 -# CONFIG_CRYPTO_NULL is not set
 -# CONFIG_CRYPTO_MD4 is not set
 -CONFIG_CRYPTO_MD5=y
@@ -82033,9 +189017,13 @@
 -# CONFIG_CRYPTO_SHA512 is not set
 -# CONFIG_CRYPTO_WP512 is not set
 -# CONFIG_CRYPTO_TGR192 is not set
+-# CONFIG_CRYPTO_GF128MUL is not set
 -CONFIG_CRYPTO_ECB=m
--CONFIG_CRYPTO_CBC=m
+-CONFIG_CRYPTO_CBC=y
+-CONFIG_CRYPTO_PCBC=m
+-# CONFIG_CRYPTO_LRW is not set
 -CONFIG_CRYPTO_DES=y
+-# CONFIG_CRYPTO_FCRYPT is not set
 -# CONFIG_CRYPTO_BLOWFISH is not set
 -# CONFIG_CRYPTO_TWOFISH is not set
 -# CONFIG_CRYPTO_SERPENT is not set
@@ -82049,6 +189037,7 @@
 -# CONFIG_CRYPTO_DEFLATE is not set
 -# CONFIG_CRYPTO_MICHAEL_MIC is not set
 -# CONFIG_CRYPTO_CRC32C is not set
+-# CONFIG_CRYPTO_CAMELLIA is not set
 -# CONFIG_CRYPTO_TEST is not set
 -
 -#
@@ -82058,196506 +189047,229790 @@
 -#
 -# Library routines
 -#
+-CONFIG_BITREVERSE=y
 -# CONFIG_CRC_CCITT is not set
 -# CONFIG_CRC16 is not set
 -CONFIG_CRC32=y
 -# CONFIG_LIBCRC32C is not set
--CONFIG_PLIST=y
-diff --git a/arch/sh/configs/r7780rp_defconfig b/arch/sh/configs/r7780rp_defconfig
+-CONFIG_HAS_IOMEM=y
+-CONFIG_HAS_IOPORT=y
+diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
+index 2e43a2a..0dc1ce7 100644
+--- a/arch/sh/configs/r7785rp_defconfig
++++ b/arch/sh/configs/r7785rp_defconfig
+@@ -1,9 +1,10 @@
+ #
+ # Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc2
+-# Tue Nov 13 20:34:57 2007
++# Linux kernel version: 2.6.24-rc3
++# Fri Nov 23 14:03:57 2007
+ #
+ CONFIG_SUPERH=y
++CONFIG_SUPERH32=y
+ CONFIG_RWSEM_GENERIC_SPINLOCK=y
+ CONFIG_GENERIC_BUG=y
+ CONFIG_GENERIC_FIND_NEXT_BIT=y
+@@ -39,6 +40,7 @@ CONFIG_BSD_PROCESS_ACCT=y
+ # CONFIG_BSD_PROCESS_ACCT_V3 is not set
+ # CONFIG_TASKSTATS is not set
+ # CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
+ # CONFIG_AUDIT is not set
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
+@@ -130,6 +132,8 @@ CONFIG_CPU_SUBTYPE_SH7785=y
+ # CONFIG_CPU_SUBTYPE_SHX3 is not set
+ # CONFIG_CPU_SUBTYPE_SH7343 is not set
+ # CONFIG_CPU_SUBTYPE_SH7722 is not set
++# CONFIG_CPU_SUBTYPE_SH5_101 is not set
++# CONFIG_CPU_SUBTYPE_SH5_103 is not set
+ 
+ #
+ # Memory management options
+@@ -139,7 +143,8 @@ CONFIG_MMU=y
+ CONFIG_PAGE_OFFSET=0x80000000
+ CONFIG_MEMORY_START=0x08000000
+ CONFIG_MEMORY_SIZE=0x08000000
+-# CONFIG_32BIT is not set
++CONFIG_29BIT=y
++# CONFIG_PMB is not set
+ # CONFIG_X2TLB is not set
+ CONFIG_VSYSCALL=y
+ # CONFIG_NUMA is not set
+@@ -158,6 +163,7 @@ CONFIG_PAGE_SIZE_4KB=y
+ CONFIG_HUGETLB_PAGE_SIZE_1MB=y
+ # CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+ # CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
++# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
+ CONFIG_SELECT_MEMORY_MODEL=y
+ # CONFIG_FLATMEM_MANUAL is not set
+ # CONFIG_DISCONTIGMEM_MANUAL is not set
+@@ -701,6 +707,7 @@ CONFIG_DEVPORT=y
+ # CONFIG_POWER_SUPPLY is not set
+ CONFIG_HWMON=y
+ # CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_I5K_AMB is not set
+ # CONFIG_SENSORS_F71805F is not set
+ # CONFIG_SENSORS_F71882FG is not set
+ # CONFIG_SENSORS_IT87 is not set
+diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
+new file mode 100644
+index 0000000..bb9bcd6
+--- /dev/null
++++ b/arch/sh/configs/sdk7780_defconfig
+@@ -0,0 +1,1394 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.24-rc7
++# Tue Jan 22 11:34:03 2008
++#
++CONFIG_SUPERH=y
++CONFIG_SUPERH32=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_BUG=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_SYS_SUPPORTS_PCI=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_LOCKDEP_SUPPORT=y
++# CONFIG_ARCH_HAS_ILOG2_U32 is not set
++# CONFIG_ARCH_HAS_ILOG2_U64 is not set
++CONFIG_ARCH_NO_VIRT_TO_BUS=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_LOCALVERSION="_SDK7780"
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++# CONFIG_AUDIT is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=18
++# CONFIG_CGROUPS is not set
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_FAIR_USER_SCHED=y
++# CONFIG_FAIR_CGROUP_SCHED is not set
++CONFIG_SYSFS_DEPRECATED=y
++CONFIG_RELAY=y
++# CONFIG_BLK_DEV_INITRD is not set
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++CONFIG_SYSCTL=y
++CONFIG_EMBEDDED=y
++CONFIG_UID16=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_ANON_INODES=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_BLOCK=y
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_LSF is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# System type
++#
++CONFIG_CPU_SH4=y
++CONFIG_CPU_SH4A=y
++# CONFIG_CPU_SUBTYPE_SH7619 is not set
++# CONFIG_CPU_SUBTYPE_SH7203 is not set
++# CONFIG_CPU_SUBTYPE_SH7206 is not set
++# CONFIG_CPU_SUBTYPE_SH7263 is not set
++# CONFIG_CPU_SUBTYPE_SH7705 is not set
++# CONFIG_CPU_SUBTYPE_SH7706 is not set
++# CONFIG_CPU_SUBTYPE_SH7707 is not set
++# CONFIG_CPU_SUBTYPE_SH7708 is not set
++# CONFIG_CPU_SUBTYPE_SH7709 is not set
++# CONFIG_CPU_SUBTYPE_SH7710 is not set
++# CONFIG_CPU_SUBTYPE_SH7712 is not set
++# CONFIG_CPU_SUBTYPE_SH7720 is not set
++# CONFIG_CPU_SUBTYPE_SH7721 is not set
++# CONFIG_CPU_SUBTYPE_SH7750 is not set
++# CONFIG_CPU_SUBTYPE_SH7091 is not set
++# CONFIG_CPU_SUBTYPE_SH7750R is not set
++# CONFIG_CPU_SUBTYPE_SH7750S is not set
++# CONFIG_CPU_SUBTYPE_SH7751 is not set
++# CONFIG_CPU_SUBTYPE_SH7751R is not set
++# CONFIG_CPU_SUBTYPE_SH7760 is not set
++# CONFIG_CPU_SUBTYPE_SH4_202 is not set
++# CONFIG_CPU_SUBTYPE_SH7763 is not set
++# CONFIG_CPU_SUBTYPE_SH7770 is not set
++CONFIG_CPU_SUBTYPE_SH7780=y
++# CONFIG_CPU_SUBTYPE_SH7785 is not set
++# CONFIG_CPU_SUBTYPE_SHX3 is not set
++# CONFIG_CPU_SUBTYPE_SH7343 is not set
++# CONFIG_CPU_SUBTYPE_SH7722 is not set
++# CONFIG_CPU_SUBTYPE_SH5_101 is not set
++# CONFIG_CPU_SUBTYPE_SH5_103 is not set
++
++#
++# Memory management options
++#
++CONFIG_QUICKLIST=y
++CONFIG_MMU=y
++CONFIG_PAGE_OFFSET=0x80000000
++CONFIG_MEMORY_START=0x08000000
++CONFIG_MEMORY_SIZE=0x08000000
++CONFIG_29BIT=y
++# CONFIG_PMB is not set
++CONFIG_VSYSCALL=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_MAX_ACTIVE_REGIONS=1
++CONFIG_ARCH_POPULATES_NODE_MAP=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_PAGE_SIZE_4KB=y
++# CONFIG_PAGE_SIZE_8KB is not set
++# CONFIG_PAGE_SIZE_64KB is not set
++CONFIG_HUGETLB_PAGE_SIZE_64K=y
++# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
++# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
++# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
++# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
++# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++CONFIG_SPARSEMEM_STATIC=y
++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ZONE_DMA_FLAG=0
++CONFIG_NR_QUICK=2
++
++#
++# Cache configuration
++#
++# CONFIG_SH_DIRECT_MAPPED is not set
++CONFIG_CACHE_WRITEBACK=y
++# CONFIG_CACHE_WRITETHROUGH is not set
++# CONFIG_CACHE_OFF is not set
++
++#
++# Processor features
++#
++CONFIG_CPU_LITTLE_ENDIAN=y
++# CONFIG_CPU_BIG_ENDIAN is not set
++CONFIG_SH_FPU=y
++CONFIG_SH_STORE_QUEUES=y
++# CONFIG_SPECULATIVE_EXECUTION is not set
++CONFIG_CPU_HAS_INTEVT=y
++CONFIG_CPU_HAS_SR_RB=y
++CONFIG_CPU_HAS_FPU=y
++
++#
++# Board support
++#
++# CONFIG_SH_7780_SOLUTION_ENGINE is not set
++CONFIG_SH_SDK7780=y
++# CONFIG_SH_HIGHLANDER is not set
++# CONFIG_SH_SDK7780_STANDALONE is not set
++CONFIG_SH_SDK7780_BASE=y
++
++#
++# Timer and clock configuration
++#
++CONFIG_SH_TMU=y
++CONFIG_SH_TIMER_IRQ=28
++CONFIG_SH_PCLK_FREQ=33333333
++CONFIG_TICK_ONESHOT=y
++# CONFIG_NO_HZ is not set
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# DMA support
++#
++CONFIG_SH_DMA_API=y
++CONFIG_SH_DMA=y
++CONFIG_NR_ONCHIP_DMA_CHANNELS=12
++# CONFIG_NR_DMA_CHANNELS_BOOL is not set
++
++#
++# Companion Chips
++#
++
++#
++# Additional SuperH Device Drivers
++#
++CONFIG_HEARTBEAT=y
++# CONFIG_PUSH_SWITCH is not set
++
++#
++# Kernel features
++#
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++# CONFIG_KEXEC is not set
++# CONFIG_CRASH_DUMP is not set
++# CONFIG_PREEMPT_NONE is not set
++# CONFIG_PREEMPT_VOLUNTARY is not set
++CONFIG_PREEMPT=y
++CONFIG_PREEMPT_BKL=y
++CONFIG_GUSA=y
++
++#
++# Boot options
++#
++CONFIG_ZERO_PAGE_OFFSET=0x00001000
++CONFIG_BOOT_LINK_OFFSET=0x01800000
++CONFIG_CMDLINE_BOOL=y
++CONFIG_CMDLINE="mem=128M console=tty0 console=ttySC0,115200 ip=bootp root=/dev/nfs nfsroot=192.168.0.1:/home/rootfs"
++
++#
++# Bus options
++#
++CONFIG_PCI=y
++CONFIG_SH_PCIDMA_NONCOHERENT=y
++CONFIG_PCI_AUTO=y
++CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCI_LEGACY is not set
++CONFIG_PCI_DEBUG=y
++CONFIG_PCCARD=y
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_PCMCIA=y
++CONFIG_PCMCIA_LOAD_CIS=y
++CONFIG_PCMCIA_IOCTL=y
++CONFIG_CARDBUS=y
++
++#
++# PC-card bridges
++#
++CONFIG_YENTA=y
++CONFIG_YENTA_O2=y
++CONFIG_YENTA_RICOH=y
++CONFIG_YENTA_TI=y
++CONFIG_YENTA_ENE_TUNE=y
++CONFIG_YENTA_TOSHIBA=y
++# CONFIG_PD6729 is not set
++# CONFIG_I82092 is not set
++CONFIG_PCCARD_NONSTATIC=y
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_ASK_IP_FIB_HASH=y
++# CONFIG_IP_FIB_TRIE is not set
++CONFIG_IP_FIB_HASH=y
++# CONFIG_IP_MULTIPLE_TABLES is not set
++# CONFIG_IP_ROUTE_MULTIPATH is not set
++# CONFIG_IP_ROUTE_VERBOSE is not set
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++CONFIG_INET_TUNNEL=y
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++CONFIG_IPV6=y
++# CONFIG_IPV6_PRIVACY is not set
++# CONFIG_IPV6_ROUTER_PREF is not set
++# CONFIG_IPV6_OPTIMISTIC_DAD is not set
++# CONFIG_INET6_AH is not set
++# CONFIG_INET6_ESP is not set
++# CONFIG_INET6_IPCOMP is not set
++# CONFIG_IPV6_MIP6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++CONFIG_INET6_XFRM_MODE_TRANSPORT=y
++CONFIG_INET6_XFRM_MODE_TUNNEL=y
++# CONFIG_INET6_XFRM_MODE_BEET is not set
++# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
++CONFIG_IPV6_SIT=y
++# CONFIG_IPV6_TUNNEL is not set
++# CONFIG_IPV6_MULTIPLE_TABLES is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++CONFIG_NET_SCHED=y
++
++#
++# Queueing/Scheduling
++#
++# CONFIG_NET_SCH_CBQ is not set
++# CONFIG_NET_SCH_HTB is not set
++# CONFIG_NET_SCH_HFSC is not set
++# CONFIG_NET_SCH_PRIO is not set
++# CONFIG_NET_SCH_RR is not set
++# CONFIG_NET_SCH_RED is not set
++# CONFIG_NET_SCH_SFQ is not set
++# CONFIG_NET_SCH_TEQL is not set
++# CONFIG_NET_SCH_TBF is not set
++# CONFIG_NET_SCH_GRED is not set
++# CONFIG_NET_SCH_DSMARK is not set
++# CONFIG_NET_SCH_NETEM is not set
++# CONFIG_NET_SCH_INGRESS is not set
++
++#
++# Classification
++#
++# CONFIG_NET_CLS_BASIC is not set
++# CONFIG_NET_CLS_TCINDEX is not set
++# CONFIG_NET_CLS_ROUTE4 is not set
++# CONFIG_NET_CLS_FW is not set
++# CONFIG_NET_CLS_U32 is not set
++# CONFIG_NET_CLS_RSVP is not set
++# CONFIG_NET_CLS_RSVP6 is not set
++# CONFIG_NET_EMATCH is not set
++# CONFIG_NET_CLS_ACT is not set
++# CONFIG_NET_CLS_POLICE is not set
++CONFIG_NET_SCH_FIFO=y
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++
++#
++# Wireless
++#
++# CONFIG_CFG80211 is not set
++# CONFIG_WIRELESS_EXT is not set
++# CONFIG_MAC80211 is not set
++# CONFIG_IEEE80211 is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++CONFIG_PARPORT=y
++# CONFIG_PARPORT_PC is not set
++# CONFIG_PARPORT_GSC is not set
++# CONFIG_PARPORT_AX88796 is not set
++# CONFIG_PARPORT_1284 is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_MISC_DEVICES is not set
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++CONFIG_IDEDISK_MULTI_MODE=y
++# CONFIG_BLK_DEV_IDECS is not set
++# CONFIG_BLK_DEV_DELKIN is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++CONFIG_IDE_PROC_FS=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++CONFIG_BLK_DEV_PLATFORM=y
++
++#
++# PCI IDE chipsets support
++#
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++CONFIG_IDEPCI_PCIBUS_ORDER=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_JMICRON is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT8213 is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_BLK_DEV_TC86C001 is not set
++# CONFIG_IDE_ARM is not set
++# CONFIG_BLK_DEV_IDEDMA is not set
++# CONFIG_IDE_ARCH_OBSOLETE_INIT is not set
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++CONFIG_SCSI_NETLINK=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++CONFIG_SCSI_SPI_ATTRS=y
++CONFIG_SCSI_FC_ATTRS=y
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_AIC94XX is not set
++# CONFIG_SCSI_ARCMSR is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_STEX is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_QLA_ISCSI is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_SRP is not set
++# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++# CONFIG_SATA_AHCI is not set
++# CONFIG_SATA_SVW is not set
++# CONFIG_ATA_PIIX is not set
++# CONFIG_SATA_MV is not set
++# CONFIG_SATA_NV is not set
++# CONFIG_PDC_ADMA is not set
++# CONFIG_SATA_QSTOR is not set
++# CONFIG_SATA_PROMISE is not set
++# CONFIG_SATA_SX4 is not set
++# CONFIG_SATA_SIL is not set
++# CONFIG_SATA_SIL24 is not set
++# CONFIG_SATA_SIS is not set
++# CONFIG_SATA_ULI is not set
++# CONFIG_SATA_VIA is not set
++# CONFIG_SATA_VITESSE is not set
++# CONFIG_SATA_INIC162X is not set
++# CONFIG_PATA_ALI is not set
++# CONFIG_PATA_AMD is not set
++# CONFIG_PATA_ARTOP is not set
++# CONFIG_PATA_ATIIXP is not set
++# CONFIG_PATA_CMD640_PCI is not set
++# CONFIG_PATA_CMD64X is not set
++# CONFIG_PATA_CS5520 is not set
++# CONFIG_PATA_CS5530 is not set
++# CONFIG_PATA_CYPRESS is not set
++# CONFIG_PATA_EFAR is not set
++# CONFIG_ATA_GENERIC is not set
++# CONFIG_PATA_HPT366 is not set
++# CONFIG_PATA_HPT37X is not set
++# CONFIG_PATA_HPT3X2N is not set
++# CONFIG_PATA_HPT3X3 is not set
++# CONFIG_PATA_IT821X is not set
++# CONFIG_PATA_IT8213 is not set
++# CONFIG_PATA_JMICRON is not set
++# CONFIG_PATA_TRIFLEX is not set
++# CONFIG_PATA_MARVELL is not set
++# CONFIG_PATA_MPIIX is not set
++# CONFIG_PATA_OLDPIIX is not set
++# CONFIG_PATA_NETCELL is not set
++# CONFIG_PATA_NS87410 is not set
++# CONFIG_PATA_NS87415 is not set
++# CONFIG_PATA_OPTI is not set
++# CONFIG_PATA_OPTIDMA is not set
++# CONFIG_PATA_PCMCIA is not set
++# CONFIG_PATA_PDC_OLD is not set
++# CONFIG_PATA_RADISYS is not set
++# CONFIG_PATA_RZ1000 is not set
++# CONFIG_PATA_SC1200 is not set
++# CONFIG_PATA_SERVERWORKS is not set
++# CONFIG_PATA_PDC2027X is not set
++# CONFIG_PATA_SIL680 is not set
++# CONFIG_PATA_SIS is not set
++# CONFIG_PATA_VIA is not set
++# CONFIG_PATA_WINBOND is not set
++# CONFIG_PATA_PLATFORM is not set
++CONFIG_MD=y
++# CONFIG_BLK_DEV_MD is not set
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_DEBUG is not set
++# CONFIG_DM_CRYPT is not set
++# CONFIG_DM_SNAPSHOT is not set
++# CONFIG_DM_MIRROR is not set
++# CONFIG_DM_ZERO is not set
++# CONFIG_DM_MULTIPATH is not set
++# CONFIG_DM_DELAY is not set
++# CONFIG_DM_UEVENT is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_IEEE1394 is not set
++# CONFIG_I2O is not set
++CONFIG_NETDEVICES=y
++# CONFIG_NETDEVICES_MULTIQUEUE is not set
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++# CONFIG_ARCNET is not set
++# CONFIG_PHYLIB is not set
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_AX88796 is not set
++# CONFIG_STNIC is not set
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++CONFIG_SMC91X=y
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_NET_PCI is not set
++# CONFIG_B44 is not set
++# CONFIG_NET_POCKET is not set
++# CONFIG_NETDEV_1000 is not set
++# CONFIG_NETDEV_10000 is not set
++# CONFIG_TR is not set
++
++#
++# Wireless LAN
++#
++# CONFIG_WLAN_PRE80211 is not set
++# CONFIG_WLAN_80211 is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++# CONFIG_NET_PCMCIA is not set
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PLIP is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++CONFIG_NETCONSOLE=y
++# CONFIG_NETCONSOLE_DYNAMIC is not set
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_LIFEBOOK=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_APPLETOUCH is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++# CONFIG_SERIO_I8042 is not set
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PARKBD is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_SH_SCI=y
++CONFIG_SERIAL_SH_SCI_NR_UARTS=2
++CONFIG_SERIAL_SH_SCI_CONSOLE=y
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_PRINTER is not set
++# CONFIG_PPDEV is not set
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# PCMCIA character devices
++#
++# CONFIG_SYNCLINK_CS is not set
++# CONFIG_CARDMAN_4000 is not set
++# CONFIG_CARDMAN_4040 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++# CONFIG_W1 is not set
++CONFIG_POWER_SUPPLY=y
++# CONFIG_POWER_SUPPLY_DEBUG is not set
++# CONFIG_PDA_POWER is not set
++# CONFIG_BATTERY_DS2760 is not set
++# CONFIG_HWMON is not set
++# CONFIG_WATCHDOG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB_POSSIBLE=y
++CONFIG_SSB=y
++CONFIG_SSB_PCIHOST_POSSIBLE=y
++CONFIG_SSB_PCIHOST=y
++CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
++# CONFIG_SSB_PCMCIAHOST is not set
++# CONFIG_SSB_SILENT is not set
++# CONFIG_SSB_DEBUG is not set
++CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
++CONFIG_SSB_DRIVER_PCICORE=y
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_SM501 is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++# CONFIG_DVB_CORE is not set
++# CONFIG_DAB is not set
++
++#
++# Graphics support
++#
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++CONFIG_FB=y
++# CONFIG_FIRMWARE_EDID is not set
++# CONFIG_FB_DDC is not set
++# CONFIG_FB_CFB_FILLRECT is not set
++# CONFIG_FB_CFB_COPYAREA is not set
++# CONFIG_FB_CFB_IMAGEBLIT is not set
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
++# CONFIG_FB_SYS_FILLRECT is not set
++# CONFIG_FB_SYS_COPYAREA is not set
++# CONFIG_FB_SYS_IMAGEBLIT is not set
++# CONFIG_FB_SYS_FOPS is not set
++CONFIG_FB_DEFERRED_IO=y
++# CONFIG_FB_SVGALIB is not set
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++# CONFIG_FB_MODE_HELPERS is not set
++# CONFIG_FB_TILEBLITTING is not set
++
++#
++# Frame buffer hardware drivers
++#
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_S3 is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_VT8623 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_ARK is not set
++# CONFIG_FB_PM3 is not set
++# CONFIG_FB_VIRTUAL is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++CONFIG_DISPLAY_SUPPORT=y
++
++#
++# Display hardware drivers
++#
++
++#
++# Console display driver support
++#
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_LOGO_LINUX_MONO=y
++CONFIG_LOGO_LINUX_VGA16=y
++CONFIG_LOGO_LINUX_CLUT224=y
++CONFIG_LOGO_SUPERH_MONO=y
++CONFIG_LOGO_SUPERH_VGA16=y
++CONFIG_LOGO_SUPERH_CLUT224=y
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++# CONFIG_SND is not set
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=y
++# CONFIG_SOUND_TRIDENT is not set
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HID_DEBUG is not set
++# CONFIG_HIDRAW is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++# CONFIG_USB_HIDDEV is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++CONFIG_USB_DEBUG=y
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++# CONFIG_USB_DEVICE_CLASS is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++# CONFIG_USB_OHCI_HCD is not set
++# CONFIG_USB_UHCI_HCD is not set
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++CONFIG_USB_PRINTER=y
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_ONETOUCH is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++# CONFIG_USB_USS720 is not set
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_BERRY_CHARGE is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGET is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++# CONFIG_MMC is not set
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++# CONFIG_LEDS_TRIGGERS is not set
++# CONFIG_INFINIBAND is not set
++# CONFIG_RTC_CLASS is not set
++# CONFIG_AUXDISPLAY is not set
++
++#
++# Userspace I/O
++#
++# CONFIG_UIO is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++# CONFIG_EXT2_FS_SECURITY is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++# CONFIG_EXT3_FS_SECURITY is not set
++# CONFIG_EXT4DEV_FS is not set
++CONFIG_JBD=y
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++CONFIG_FS_POSIX_ACL=y
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++CONFIG_MINIX_FS=y
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++# CONFIG_AUTOFS_FS is not set
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++CONFIG_GENERIC_ACL=y
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++# CONFIG_JOLIET is not set
++# CONFIG_ZISOFS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=y
++CONFIG_NTFS_DEBUG=y
++CONFIG_NTFS_RW=y
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++# CONFIG_PROC_KCORE is not set
++CONFIG_PROC_SYSCTL=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++# CONFIG_NFSD_V4 is not set
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_SUNRPC_BIND34 is not set
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++CONFIG_NLS_ISO8859_15=y
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++CONFIG_NLS_UTF8=y
++# CONFIG_DLM is not set
++# CONFIG_INSTRUMENTATION is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++# CONFIG_ENABLE_MUST_CHECK is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++CONFIG_DEBUG_KERNEL=y
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_SCHEDSTATS is not set
++CONFIG_TIMER_STATS=y
++# CONFIG_SLUB_DEBUG_ON is not set
++CONFIG_DEBUG_PREEMPT=y
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_LOCK_STAT is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_SG is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_FORCED_INLINING is not set
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_SAMPLES is not set
++CONFIG_SH_STANDARD_BIOS=y
++# CONFIG_EARLY_SCIF_CONSOLE is not set
++# CONFIG_EARLY_PRINTK is not set
++# CONFIG_DEBUG_BOOTMEM is not set
++CONFIG_DEBUG_STACKOVERFLOW=y
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_4KSTACKS is not set
++# CONFIG_IRQSTACKS is not set
++# CONFIG_SH_KGDB is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITY_FILE_CAPABILITIES is not set
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_ALGAPI=y
++# CONFIG_CRYPTO_MANAGER is not set
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_GF128MUL is not set
++# CONFIG_CRYPTO_ECB is not set
++# CONFIG_CRYPTO_CBC is not set
++# CONFIG_CRYPTO_PCBC is not set
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_TEST is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++CONFIG_CRYPTO_HW=y
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
+diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
+index a5e37db..240a1ce 100644
+--- a/arch/sh/configs/se7712_defconfig
++++ b/arch/sh/configs/se7712_defconfig
+@@ -237,7 +237,7 @@ CONFIG_CPU_HAS_SR_RB=y
+ CONFIG_SH_TMU=y
+ CONFIG_SH_TIMER_IRQ=16
+ # CONFIG_NO_IDLE_HZ is not set
+-CONFIG_SH_PCLK_FREQ=33333333
++CONFIG_SH_PCLK_FREQ=66666666
+ 
+ #
+ # CPU Frequency scaling
+diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
+index 4e711a0..0193636 100644
+--- a/arch/sh/drivers/dma/Kconfig
++++ b/arch/sh/drivers/dma/Kconfig
+@@ -12,7 +12,7 @@ config SH_DMA
+ config NR_ONCHIP_DMA_CHANNELS
+ 	int
+ 	depends on SH_DMA
+-	default "6" if CPU_SUBTYPE_SH7720
++	default "6" if CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721
+ 	default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R
+ 	default "12" if CPU_SUBTYPE_SH7780
+ 	default "4"
+diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
+index 958bac1..5c33597 100644
+--- a/arch/sh/drivers/dma/dma-sh.c
++++ b/arch/sh/drivers/dma/dma-sh.c
+@@ -25,6 +25,7 @@ static int dmte_irq_map[] = {
+ 	DMTE2_IRQ,
+ 	DMTE3_IRQ,
+ #if defined(CONFIG_CPU_SUBTYPE_SH7720)  ||	\
++    defined(CONFIG_CPU_SUBTYPE_SH7721)  ||	\
+     defined(CONFIG_CPU_SUBTYPE_SH7751R) ||	\
+     defined(CONFIG_CPU_SUBTYPE_SH7760)  ||	\
+     defined(CONFIG_CPU_SUBTYPE_SH7709)  ||	\
+@@ -203,6 +204,7 @@ static int sh_dmac_get_dma_residue(struct dma_channel *chan)
+ }
+ 
+ #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
++    defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+     defined(CONFIG_CPU_SUBTYPE_SH7780)
+ #define dmaor_read_reg()	ctrl_inw(DMAOR)
+ #define dmaor_write_reg(data)	ctrl_outw(data, DMAOR)
+diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
+index eebcd47..51b57c0 100644
+--- a/arch/sh/drivers/dma/dma-sysfs.c
++++ b/arch/sh/drivers/dma/dma-sysfs.c
+@@ -19,7 +19,7 @@
+ #include <asm/dma.h>
+ 
+ static struct sysdev_class dma_sysclass = {
+-	set_kset_name("dma"),
++	.name = "dma",
+ };
+ EXPORT_SYMBOL(dma_sysclass);
+ 
+diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile
+index fba6b5b..0718805 100644
+--- a/arch/sh/drivers/pci/Makefile
++++ b/arch/sh/drivers/pci/Makefile
+@@ -7,16 +7,19 @@ obj-$(CONFIG_PCI_AUTO)			+= pci-auto.o
+ 
+ obj-$(CONFIG_CPU_SUBTYPE_SH7751)	+= pci-sh7751.o ops-sh4.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7751R)	+= pci-sh7751.o ops-sh4.o
++obj-$(CONFIG_CPU_SUBTYPE_SH7763)	+= pci-sh7780.o ops-sh4.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7780)	+= pci-sh7780.o ops-sh4.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7785)	+= pci-sh7780.o ops-sh4.o
++obj-$(CONFIG_CPU_SH5)			+= pci-sh5.o ops-sh5.o
+ 
+-obj-$(CONFIG_SH_DREAMCAST)		+= ops-dreamcast.o fixups-dreamcast.o \
+-					   dma-dreamcast.o
++obj-$(CONFIG_SH_DREAMCAST)		+= ops-dreamcast.o fixups-dreamcast.o
+ obj-$(CONFIG_SH_SECUREEDGE5410)		+= ops-snapgear.o
+ obj-$(CONFIG_SH_RTS7751R2D)		+= ops-rts7751r2d.o fixups-rts7751r2d.o
+ obj-$(CONFIG_SH_SH03)			+= ops-sh03.o fixups-sh03.o
+ obj-$(CONFIG_SH_HIGHLANDER)		+= ops-r7780rp.o fixups-r7780rp.o
++obj-$(CONFIG_SH_SDK7780)		+= ops-sdk7780.o fixups-sdk7780.o
+ obj-$(CONFIG_SH_TITAN)			+= ops-titan.o
+ obj-$(CONFIG_SH_LANDISK)		+= ops-landisk.o
+ obj-$(CONFIG_SH_LBOX_RE2)		+= ops-lboxre2.o fixups-lboxre2.o
+ obj-$(CONFIG_SH_7780_SOLUTION_ENGINE)	+= ops-se7780.o fixups-se7780.o
++obj-$(CONFIG_SH_CAYMAN)			+= ops-cayman.o
+diff --git a/arch/sh/drivers/pci/dma-dreamcast.c b/arch/sh/drivers/pci/dma-dreamcast.c
 deleted file mode 100644
-index 12cc019..0000000
---- a/arch/sh/configs/r7780rp_defconfig
+index 888a340..0000000
+--- a/arch/sh/drivers/pci/dma-dreamcast.c
 +++ /dev/null
-@@ -1,1328 +0,0 @@
--#
--# Automatically generated make config: don't edit
--# Linux kernel version: 2.6.21-rc7
--# Tue May  1 12:28:39 2007
--#
--CONFIG_SUPERH=y
--CONFIG_RWSEM_GENERIC_SPINLOCK=y
--CONFIG_GENERIC_BUG=y
--CONFIG_GENERIC_FIND_NEXT_BIT=y
--CONFIG_GENERIC_HWEIGHT=y
--CONFIG_GENERIC_HARDIRQS=y
--CONFIG_GENERIC_IRQ_PROBE=y
--CONFIG_GENERIC_CALIBRATE_DELAY=y
--# CONFIG_GENERIC_TIME is not set
--CONFIG_STACKTRACE_SUPPORT=y
--CONFIG_LOCKDEP_SUPPORT=y
--# CONFIG_ARCH_HAS_ILOG2_U32 is not set
--# CONFIG_ARCH_HAS_ILOG2_U64 is not set
--CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
--
--#
--# Code maturity level options
--#
--CONFIG_EXPERIMENTAL=y
--CONFIG_BROKEN_ON_SMP=y
--CONFIG_LOCK_KERNEL=y
--CONFIG_INIT_ENV_ARG_LIMIT=32
--
--#
--# General setup
--#
--CONFIG_LOCALVERSION=""
--CONFIG_LOCALVERSION_AUTO=y
--CONFIG_SWAP=y
--CONFIG_SYSVIPC=y
--# CONFIG_IPC_NS is not set
--CONFIG_SYSVIPC_SYSCTL=y
--# CONFIG_POSIX_MQUEUE is not set
--CONFIG_BSD_PROCESS_ACCT=y
--# CONFIG_BSD_PROCESS_ACCT_V3 is not set
--# CONFIG_TASKSTATS is not set
--# CONFIG_UTS_NS is not set
--# CONFIG_AUDIT is not set
--CONFIG_IKCONFIG=y
--CONFIG_IKCONFIG_PROC=y
--# CONFIG_SYSFS_DEPRECATED is not set
--# CONFIG_RELAY is not set
--# CONFIG_BLK_DEV_INITRD is not set
--CONFIG_CC_OPTIMIZE_FOR_SIZE=y
--CONFIG_SYSCTL=y
--CONFIG_EMBEDDED=y
--CONFIG_UID16=y
--# CONFIG_SYSCTL_SYSCALL is not set
--CONFIG_KALLSYMS=y
--# CONFIG_KALLSYMS_ALL is not set
--# CONFIG_KALLSYMS_EXTRA_PASS is not set
--CONFIG_HOTPLUG=y
--CONFIG_PRINTK=y
--CONFIG_BUG=y
--CONFIG_ELF_CORE=y
--CONFIG_BASE_FULL=y
--# CONFIG_FUTEX is not set
--# CONFIG_EPOLL is not set
--CONFIG_SHMEM=y
--CONFIG_SLAB=y
--CONFIG_VM_EVENT_COUNTERS=y
--# CONFIG_TINY_SHMEM is not set
--CONFIG_BASE_SMALL=0
--# CONFIG_SLOB is not set
--
--#
--# Loadable module support
--#
--CONFIG_MODULES=y
--CONFIG_MODULE_UNLOAD=y
--# CONFIG_MODULE_FORCE_UNLOAD is not set
--# CONFIG_MODVERSIONS is not set
--# CONFIG_MODULE_SRCVERSION_ALL is not set
--CONFIG_KMOD=y
--
--#
--# Block layer
--#
--CONFIG_BLOCK=y
--# CONFIG_LBD is not set
--# CONFIG_BLK_DEV_IO_TRACE is not set
--# CONFIG_LSF is not set
--
--#
--# IO Schedulers
--#
--CONFIG_IOSCHED_NOOP=y
--# CONFIG_IOSCHED_AS is not set
--# CONFIG_IOSCHED_DEADLINE is not set
--# CONFIG_IOSCHED_CFQ is not set
--# CONFIG_DEFAULT_AS is not set
--# CONFIG_DEFAULT_DEADLINE is not set
--# CONFIG_DEFAULT_CFQ is not set
--CONFIG_DEFAULT_NOOP=y
--CONFIG_DEFAULT_IOSCHED="noop"
--
--#
--# System type
--#
--# CONFIG_SH_SOLUTION_ENGINE is not set
--# CONFIG_SH_7722_SOLUTION_ENGINE is not set
--# CONFIG_SH_7751_SOLUTION_ENGINE is not set
--# CONFIG_SH_7780_SOLUTION_ENGINE is not set
--# CONFIG_SH_7300_SOLUTION_ENGINE is not set
--# CONFIG_SH_7343_SOLUTION_ENGINE is not set
--# CONFIG_SH_73180_SOLUTION_ENGINE is not set
--# CONFIG_SH_7751_SYSTEMH is not set
--# CONFIG_SH_HP6XX is not set
--# CONFIG_SH_SATURN is not set
--# CONFIG_SH_DREAMCAST is not set
--# CONFIG_SH_MPC1211 is not set
--# CONFIG_SH_SH03 is not set
--# CONFIG_SH_SECUREEDGE5410 is not set
--# CONFIG_SH_HS7751RVOIP is not set
--# CONFIG_SH_7710VOIPGW is not set
--# CONFIG_SH_RTS7751R2D is not set
--CONFIG_SH_HIGHLANDER=y
--# CONFIG_SH_EDOSK7705 is not set
--# CONFIG_SH_SH4202_MICRODEV is not set
--# CONFIG_SH_LANDISK is not set
--# CONFIG_SH_TITAN is not set
--# CONFIG_SH_SHMIN is not set
--# CONFIG_SH_7206_SOLUTION_ENGINE is not set
--# CONFIG_SH_7619_SOLUTION_ENGINE is not set
--# CONFIG_SH_LBOX_RE2 is not set
--# CONFIG_SH_UNKNOWN is not set
--CONFIG_SH_R7780RP=y
--# CONFIG_SH_R7780MP is not set
--# CONFIG_SH_R7785RP is not set
--
--#
--# Processor selection
--#
--CONFIG_CPU_SH4=y
--CONFIG_CPU_SH4A=y
--
--#
--# SH-2 Processor Support
--#
--# CONFIG_CPU_SUBTYPE_SH7604 is not set
--# CONFIG_CPU_SUBTYPE_SH7619 is not set
--
--#
--# SH-2A Processor Support
--#
--# CONFIG_CPU_SUBTYPE_SH7206 is not set
--
--#
--# SH-3 Processor Support
--#
--# CONFIG_CPU_SUBTYPE_SH7300 is not set
--# CONFIG_CPU_SUBTYPE_SH7705 is not set
--# CONFIG_CPU_SUBTYPE_SH7706 is not set
--# CONFIG_CPU_SUBTYPE_SH7707 is not set
--# CONFIG_CPU_SUBTYPE_SH7708 is not set
--# CONFIG_CPU_SUBTYPE_SH7709 is not set
--# CONFIG_CPU_SUBTYPE_SH7710 is not set
--# CONFIG_CPU_SUBTYPE_SH7712 is not set
--
--#
--# SH-4 Processor Support
--#
--# CONFIG_CPU_SUBTYPE_SH7750 is not set
--# CONFIG_CPU_SUBTYPE_SH7091 is not set
--# CONFIG_CPU_SUBTYPE_SH7750R is not set
--# CONFIG_CPU_SUBTYPE_SH7750S is not set
--# CONFIG_CPU_SUBTYPE_SH7751 is not set
--# CONFIG_CPU_SUBTYPE_SH7751R is not set
--# CONFIG_CPU_SUBTYPE_SH7760 is not set
--# CONFIG_CPU_SUBTYPE_SH4_202 is not set
--
--#
--# ST40 Processor Support
--#
--# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
--# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
--
--#
--# SH-4A Processor Support
--#
--# CONFIG_CPU_SUBTYPE_SH7770 is not set
--CONFIG_CPU_SUBTYPE_SH7780=y
--# CONFIG_CPU_SUBTYPE_SH7785 is not set
--
--#
--# SH4AL-DSP Processor Support
--#
--# CONFIG_CPU_SUBTYPE_SH73180 is not set
--# CONFIG_CPU_SUBTYPE_SH7343 is not set
--# CONFIG_CPU_SUBTYPE_SH7722 is not set
--
--#
--# Memory management options
--#
--CONFIG_MMU=y
--CONFIG_PAGE_OFFSET=0x80000000
--CONFIG_MEMORY_START=0x08000000
--CONFIG_MEMORY_SIZE=0x08000000
--# CONFIG_32BIT is not set
--CONFIG_VSYSCALL=y
--CONFIG_ARCH_FLATMEM_ENABLE=y
--CONFIG_ARCH_POPULATES_NODE_MAP=y
--CONFIG_PAGE_SIZE_4KB=y
--# CONFIG_PAGE_SIZE_8KB is not set
--# CONFIG_PAGE_SIZE_64KB is not set
--CONFIG_HUGETLB_PAGE_SIZE_64K=y
--# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
--# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
--# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
--# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
--CONFIG_SELECT_MEMORY_MODEL=y
--CONFIG_FLATMEM_MANUAL=y
--# CONFIG_DISCONTIGMEM_MANUAL is not set
--# CONFIG_SPARSEMEM_MANUAL is not set
--CONFIG_FLATMEM=y
--CONFIG_FLAT_NODE_MEM_MAP=y
--# CONFIG_SPARSEMEM_STATIC is not set
--CONFIG_SPLIT_PTLOCK_CPUS=4
--# CONFIG_RESOURCES_64BIT is not set
--CONFIG_ZONE_DMA_FLAG=0
--
--#
--# Cache configuration
--#
--# CONFIG_SH_DIRECT_MAPPED is not set
--# CONFIG_SH_WRITETHROUGH is not set
--# CONFIG_SH_OCRAM is not set
--
--#
--# Processor features
--#
--CONFIG_CPU_LITTLE_ENDIAN=y
--# CONFIG_CPU_BIG_ENDIAN is not set
--CONFIG_SH_FPU=y
--# CONFIG_SH_DSP is not set
--CONFIG_SH_STORE_QUEUES=y
--CONFIG_SPECULATIVE_EXECUTION=y
--CONFIG_CPU_HAS_INTEVT=y
--CONFIG_CPU_HAS_INTC_IRQ=y
--CONFIG_CPU_HAS_SR_RB=y
--
--#
--# Timer and clock configuration
--#
--CONFIG_SH_TMU=y
--CONFIG_SH_TIMER_IRQ=28
--CONFIG_NO_IDLE_HZ=y
--CONFIG_SH_PCLK_FREQ=32000000
--
--#
--# CPU Frequency scaling
--#
--# CONFIG_CPU_FREQ is not set
--
--#
--# DMA support
--#
--# CONFIG_SH_DMA is not set
--
--#
--# Companion Chips
--#
--# CONFIG_HD6446X_SERIES is not set
--
--#
--# Additional SuperH Device Drivers
--#
--# CONFIG_HEARTBEAT is not set
--CONFIG_PUSH_SWITCH=y
--
--#
--# Kernel features
--#
--# CONFIG_HZ_100 is not set
--CONFIG_HZ_250=y
--# CONFIG_HZ_300 is not set
--# CONFIG_HZ_1000 is not set
--CONFIG_HZ=250
--CONFIG_KEXEC=y
--# CONFIG_CRASH_DUMP is not set
--# CONFIG_SMP is not set
--# CONFIG_PREEMPT_NONE is not set
--# CONFIG_PREEMPT_VOLUNTARY is not set
--CONFIG_PREEMPT=y
--CONFIG_PREEMPT_BKL=y
--
--#
--# Boot options
--#
--CONFIG_ZERO_PAGE_OFFSET=0x00001000
--CONFIG_BOOT_LINK_OFFSET=0x00800000
--# CONFIG_UBC_WAKEUP is not set
--CONFIG_CMDLINE_BOOL=y
--CONFIG_CMDLINE="mem=128M console=ttySC0,115200 root=/dev/sda1"
--
--#
--# Bus options
--#
--CONFIG_PCI=y
--CONFIG_SH_PCIDMA_NONCOHERENT=y
--CONFIG_PCI_AUTO=y
--CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
--# CONFIG_PCI_DEBUG is not set
--
--#
--# PCCARD (PCMCIA/CardBus) support
--#
--# CONFIG_PCCARD is not set
--
--#
--# PCI Hotplug Support
--#
--# CONFIG_HOTPLUG_PCI is not set
--
--#
--# Executable file formats
--#
--CONFIG_BINFMT_ELF=y
--# CONFIG_BINFMT_FLAT is not set
--# CONFIG_BINFMT_MISC is not set
--
--#
--# Power management options (EXPERIMENTAL)
--#
--# CONFIG_PM is not set
--
--#
--# Networking
--#
--CONFIG_NET=y
--
--#
--# Networking options
--#
--# CONFIG_NETDEBUG is not set
--CONFIG_PACKET=y
--# CONFIG_PACKET_MMAP is not set
--CONFIG_UNIX=y
--CONFIG_XFRM=y
--# CONFIG_XFRM_USER is not set
--# CONFIG_XFRM_SUB_POLICY is not set
--# CONFIG_XFRM_MIGRATE is not set
--# CONFIG_NET_KEY is not set
--CONFIG_INET=y
--# CONFIG_IP_MULTICAST is not set
--CONFIG_IP_ADVANCED_ROUTER=y
--CONFIG_ASK_IP_FIB_HASH=y
--# CONFIG_IP_FIB_TRIE is not set
--CONFIG_IP_FIB_HASH=y
--# CONFIG_IP_MULTIPLE_TABLES is not set
--# CONFIG_IP_ROUTE_MULTIPATH is not set
--# CONFIG_IP_ROUTE_VERBOSE is not set
--CONFIG_IP_PNP=y
--CONFIG_IP_PNP_DHCP=y
--# CONFIG_IP_PNP_BOOTP is not set
--# CONFIG_IP_PNP_RARP is not set
--# CONFIG_NET_IPIP is not set
--# CONFIG_NET_IPGRE is not set
--# CONFIG_ARPD is not set
--# CONFIG_SYN_COOKIES is not set
--# CONFIG_INET_AH is not set
--# CONFIG_INET_ESP is not set
--# CONFIG_INET_IPCOMP is not set
--# CONFIG_INET_XFRM_TUNNEL is not set
--# CONFIG_INET_TUNNEL is not set
--CONFIG_INET_XFRM_MODE_TRANSPORT=y
--CONFIG_INET_XFRM_MODE_TUNNEL=y
--CONFIG_INET_XFRM_MODE_BEET=y
--CONFIG_INET_DIAG=y
--CONFIG_INET_TCP_DIAG=y
--# CONFIG_TCP_CONG_ADVANCED is not set
--CONFIG_TCP_CONG_CUBIC=y
--CONFIG_DEFAULT_TCP_CONG="cubic"
--# CONFIG_TCP_MD5SIG is not set
--# CONFIG_IPV6 is not set
--# CONFIG_INET6_XFRM_TUNNEL is not set
--# CONFIG_INET6_TUNNEL is not set
--# CONFIG_NETWORK_SECMARK is not set
--# CONFIG_NETFILTER is not set
--
--#
--# DCCP Configuration (EXPERIMENTAL)
--#
--# CONFIG_IP_DCCP is not set
--
--#
--# SCTP Configuration (EXPERIMENTAL)
--#
--# CONFIG_IP_SCTP is not set
--
--#
--# TIPC Configuration (EXPERIMENTAL)
--#
--# CONFIG_TIPC is not set
--# CONFIG_ATM is not set
--CONFIG_BRIDGE=m
--# CONFIG_VLAN_8021Q is not set
--# CONFIG_DECNET is not set
--CONFIG_LLC=m
--# CONFIG_LLC2 is not set
--# CONFIG_IPX is not set
--# CONFIG_ATALK is not set
--# CONFIG_X25 is not set
--# CONFIG_LAPB is not set
--# CONFIG_ECONET is not set
--# CONFIG_WAN_ROUTER is not set
--
--#
--# QoS and/or fair queueing
--#
--# CONFIG_NET_SCHED is not set
--
--#
--# Network testing
--#
--# CONFIG_NET_PKTGEN is not set
--# CONFIG_HAMRADIO is not set
--# CONFIG_IRDA is not set
--# CONFIG_BT is not set
--# CONFIG_IEEE80211 is not set
--CONFIG_WIRELESS_EXT=y
--
--#
--# Device Drivers
--#
--
--#
--# Generic Driver Options
--#
--CONFIG_STANDALONE=y
--CONFIG_PREVENT_FIRMWARE_BUILD=y
--CONFIG_FW_LOADER=m
--# CONFIG_DEBUG_DRIVER is not set
--# CONFIG_DEBUG_DEVRES is not set
--# CONFIG_SYS_HYPERVISOR is not set
--
--#
--# Connector - unified userspace <-> kernelspace linker
--#
--# CONFIG_CONNECTOR is not set
--
--#
--# Memory Technology Devices (MTD)
--#
--# CONFIG_MTD is not set
--
--#
--# Parallel port support
--#
--# CONFIG_PARPORT is not set
--
--#
--# Plug and Play support
--#
--# CONFIG_PNPACPI is not set
--
--#
--# Block devices
--#
--# CONFIG_BLK_CPQ_DA is not set
--# CONFIG_BLK_CPQ_CISS_DA is not set
--# CONFIG_BLK_DEV_DAC960 is not set
--# CONFIG_BLK_DEV_UMEM is not set
--# CONFIG_BLK_DEV_COW_COMMON is not set
--# CONFIG_BLK_DEV_LOOP is not set
--# CONFIG_BLK_DEV_NBD is not set
--# CONFIG_BLK_DEV_SX8 is not set
--CONFIG_BLK_DEV_RAM=y
--CONFIG_BLK_DEV_RAM_COUNT=16
--CONFIG_BLK_DEV_RAM_SIZE=4096
--CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
--# CONFIG_CDROM_PKTCDVD is not set
--# CONFIG_ATA_OVER_ETH is not set
--
--#
--# Misc devices
--#
--# CONFIG_SGI_IOC4 is not set
--# CONFIG_TIFM_CORE is not set
--
--#
--# ATA/ATAPI/MFM/RLL support
--#
--# CONFIG_IDE is not set
--
--#
--# SCSI device support
--#
--# CONFIG_RAID_ATTRS is not set
--CONFIG_SCSI=y
--# CONFIG_SCSI_TGT is not set
--# CONFIG_SCSI_NETLINK is not set
--CONFIG_SCSI_PROC_FS=y
--
--#
--# SCSI support type (disk, tape, CD-ROM)
--#
--CONFIG_BLK_DEV_SD=y
--# CONFIG_CHR_DEV_ST is not set
--# CONFIG_CHR_DEV_OSST is not set
--# CONFIG_BLK_DEV_SR is not set
--CONFIG_CHR_DEV_SG=m
--# CONFIG_CHR_DEV_SCH is not set
--
--#
--# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
--#
--# CONFIG_SCSI_MULTI_LUN is not set
--# CONFIG_SCSI_CONSTANTS is not set
--# CONFIG_SCSI_LOGGING is not set
--# CONFIG_SCSI_SCAN_ASYNC is not set
--
--#
--# SCSI Transports
--#
--# CONFIG_SCSI_SPI_ATTRS is not set
--# CONFIG_SCSI_FC_ATTRS is not set
--# CONFIG_SCSI_ISCSI_ATTRS is not set
--# CONFIG_SCSI_SAS_ATTRS is not set
--# CONFIG_SCSI_SAS_LIBSAS is not set
--
--#
--# SCSI low-level drivers
--#
--# CONFIG_ISCSI_TCP is not set
--# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
--# CONFIG_SCSI_3W_9XXX is not set
--# CONFIG_SCSI_ACARD is not set
--# CONFIG_SCSI_AACRAID is not set
--# CONFIG_SCSI_AIC7XXX is not set
--# CONFIG_SCSI_AIC7XXX_OLD is not set
--# CONFIG_SCSI_AIC79XX is not set
--# CONFIG_SCSI_AIC94XX is not set
--# CONFIG_SCSI_DPT_I2O is not set
--# CONFIG_SCSI_ARCMSR is not set
--# CONFIG_MEGARAID_NEWGEN is not set
--# CONFIG_MEGARAID_LEGACY is not set
--# CONFIG_MEGARAID_SAS is not set
--# CONFIG_SCSI_HPTIOP is not set
--# CONFIG_SCSI_DMX3191D is not set
--# CONFIG_SCSI_FUTURE_DOMAIN is not set
--# CONFIG_SCSI_IPS is not set
--# CONFIG_SCSI_INITIO is not set
--# CONFIG_SCSI_INIA100 is not set
--# CONFIG_SCSI_STEX is not set
--# CONFIG_SCSI_SYM53C8XX_2 is not set
--# CONFIG_SCSI_IPR is not set
--# CONFIG_SCSI_QLOGIC_1280 is not set
--# CONFIG_SCSI_QLA_FC is not set
--# CONFIG_SCSI_QLA_ISCSI is not set
--# CONFIG_SCSI_LPFC is not set
--# CONFIG_SCSI_DC395x is not set
--# CONFIG_SCSI_DC390T is not set
--# CONFIG_SCSI_NSP32 is not set
--# CONFIG_SCSI_DEBUG is not set
--# CONFIG_SCSI_SRP is not set
--
--#
--# Serial ATA (prod) and Parallel ATA (experimental) drivers
--#
--CONFIG_ATA=y
--# CONFIG_ATA_NONSTANDARD is not set
--# CONFIG_SATA_AHCI is not set
--# CONFIG_SATA_SVW is not set
--# CONFIG_ATA_PIIX is not set
--# CONFIG_SATA_MV is not set
--# CONFIG_SATA_NV is not set
--# CONFIG_PDC_ADMA is not set
--# CONFIG_SATA_QSTOR is not set
--# CONFIG_SATA_PROMISE is not set
--# CONFIG_SATA_SX4 is not set
--CONFIG_SATA_SIL=y
--# CONFIG_SATA_SIL24 is not set
--# CONFIG_SATA_SIS is not set
--# CONFIG_SATA_ULI is not set
--# CONFIG_SATA_VIA is not set
--# CONFIG_SATA_VITESSE is not set
--# CONFIG_SATA_INIC162X is not set
--# CONFIG_PATA_ALI is not set
--# CONFIG_PATA_AMD is not set
--# CONFIG_PATA_ARTOP is not set
--# CONFIG_PATA_ATIIXP is not set
--# CONFIG_PATA_CMD64X is not set
--# CONFIG_PATA_CS5520 is not set
--# CONFIG_PATA_CS5530 is not set
--# CONFIG_PATA_CYPRESS is not set
--# CONFIG_PATA_EFAR is not set
--# CONFIG_ATA_GENERIC is not set
--# CONFIG_PATA_HPT366 is not set
--# CONFIG_PATA_HPT37X is not set
--# CONFIG_PATA_HPT3X2N is not set
--# CONFIG_PATA_HPT3X3 is not set
--# CONFIG_PATA_IT821X is not set
--# CONFIG_PATA_IT8213 is not set
--# CONFIG_PATA_JMICRON is not set
--# CONFIG_PATA_TRIFLEX is not set
--# CONFIG_PATA_MARVELL is not set
--# CONFIG_PATA_MPIIX is not set
--# CONFIG_PATA_OLDPIIX is not set
--# CONFIG_PATA_NETCELL is not set
--# CONFIG_PATA_NS87410 is not set
--# CONFIG_PATA_OPTI is not set
--# CONFIG_PATA_OPTIDMA is not set
--# CONFIG_PATA_PDC_OLD is not set
--# CONFIG_PATA_RADISYS is not set
--# CONFIG_PATA_RZ1000 is not set
--# CONFIG_PATA_SC1200 is not set
--# CONFIG_PATA_SERVERWORKS is not set
--# CONFIG_PATA_PDC2027X is not set
--# CONFIG_PATA_SIL680 is not set
--# CONFIG_PATA_SIS is not set
--# CONFIG_PATA_VIA is not set
--# CONFIG_PATA_WINBOND is not set
--CONFIG_PATA_PLATFORM=y
--
--#
--# Multi-device support (RAID and LVM)
--#
--# CONFIG_MD is not set
--
--#
--# Fusion MPT device support
--#
--# CONFIG_FUSION is not set
--# CONFIG_FUSION_SPI is not set
--# CONFIG_FUSION_FC is not set
--# CONFIG_FUSION_SAS is not set
--
--#
--# IEEE 1394 (FireWire) support
--#
--# CONFIG_IEEE1394 is not set
--
--#
--# I2O device support
--#
--# CONFIG_I2O is not set
--
--#
--# Network device support
--#
--CONFIG_NETDEVICES=y
--# CONFIG_DUMMY is not set
--# CONFIG_BONDING is not set
--# CONFIG_EQUALIZER is not set
--# CONFIG_TUN is not set
--
--#
--# ARCnet devices
--#
--# CONFIG_ARCNET is not set
--
--#
--# PHY device support
--#
--# CONFIG_PHYLIB is not set
--
--#
--# Ethernet (10 or 100Mbit)
--#
--CONFIG_NET_ETHERNET=y
--CONFIG_MII=y
--# CONFIG_STNIC is not set
--# CONFIG_HAPPYMEAL is not set
--# CONFIG_SUNGEM is not set
--# CONFIG_CASSINI is not set
--# CONFIG_NET_VENDOR_3COM is not set
--# CONFIG_SMC91X is not set
--
--#
--# Tulip family network device support
--#
--# CONFIG_NET_TULIP is not set
--# CONFIG_HP100 is not set
--CONFIG_NET_PCI=y
--CONFIG_PCNET32=m
--# CONFIG_PCNET32_NAPI is not set
--# CONFIG_AMD8111_ETH is not set
--# CONFIG_ADAPTEC_STARFIRE is not set
--# CONFIG_B44 is not set
--# CONFIG_FORCEDETH is not set
--# CONFIG_DGRS is not set
--# CONFIG_EEPRO100 is not set
--# CONFIG_E100 is not set
--# CONFIG_FEALNX is not set
--# CONFIG_NATSEMI is not set
--# CONFIG_NE2K_PCI is not set
--CONFIG_8139CP=m
--CONFIG_8139TOO=m
--# CONFIG_8139TOO_PIO is not set
--# CONFIG_8139TOO_TUNE_TWISTER is not set
--CONFIG_8139TOO_8129=y
--# CONFIG_8139_OLD_RX_RESET is not set
--# CONFIG_SIS900 is not set
--# CONFIG_EPIC100 is not set
--# CONFIG_SUNDANCE is not set
--# CONFIG_TLAN is not set
--CONFIG_VIA_RHINE=m
--CONFIG_VIA_RHINE_MMIO=y
--# CONFIG_VIA_RHINE_NAPI is not set
--# CONFIG_SC92031 is not set
--
--#
--# Ethernet (1000 Mbit)
--#
--# CONFIG_ACENIC is not set
--# CONFIG_DL2K is not set
--CONFIG_E1000=m
--# CONFIG_E1000_NAPI is not set
--# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
--# CONFIG_NS83820 is not set
--# CONFIG_HAMACHI is not set
--# CONFIG_YELLOWFIN is not set
--CONFIG_R8169=y
--# CONFIG_R8169_NAPI is not set
--# CONFIG_SIS190 is not set
--# CONFIG_SKGE is not set
--# CONFIG_SKY2 is not set
--# CONFIG_SK98LIN is not set
--# CONFIG_VIA_VELOCITY is not set
--# CONFIG_TIGON3 is not set
--# CONFIG_BNX2 is not set
--# CONFIG_QLA3XXX is not set
--# CONFIG_ATL1 is not set
--
--#
--# Ethernet (10000 Mbit)
--#
--# CONFIG_CHELSIO_T1 is not set
--# CONFIG_CHELSIO_T3 is not set
--# CONFIG_IXGB is not set
--# CONFIG_S2IO is not set
--# CONFIG_MYRI10GE is not set
--# CONFIG_NETXEN_NIC is not set
--
--#
--# Token Ring devices
--#
--# CONFIG_TR is not set
--
--#
--# Wireless LAN (non-hamradio)
--#
--CONFIG_NET_RADIO=y
--# CONFIG_NET_WIRELESS_RTNETLINK is not set
--
--#
--# Obsolete Wireless cards support (pre-802.11)
--#
--# CONFIG_STRIP is not set
--
--#
--# Wireless 802.11b ISA/PCI cards support
--#
--# CONFIG_IPW2100 is not set
--# CONFIG_IPW2200 is not set
--CONFIG_HERMES=m
--# CONFIG_PLX_HERMES is not set
--# CONFIG_TMD_HERMES is not set
--# CONFIG_NORTEL_HERMES is not set
--# CONFIG_PCI_HERMES is not set
--# CONFIG_ATMEL is not set
--
--#
--# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
--#
--CONFIG_PRISM54=m
--# CONFIG_HOSTAP is not set
--CONFIG_NET_WIRELESS=y
--
--#
--# Wan interfaces
--#
--# CONFIG_WAN is not set
--# CONFIG_FDDI is not set
--# CONFIG_HIPPI is not set
--# CONFIG_PPP is not set
--# CONFIG_SLIP is not set
--# CONFIG_NET_FC is not set
--# CONFIG_SHAPER is not set
--# CONFIG_NETCONSOLE is not set
--# CONFIG_NETPOLL is not set
--# CONFIG_NET_POLL_CONTROLLER is not set
--
--#
--# ISDN subsystem
--#
--# CONFIG_ISDN is not set
--
--#
--# Telephony Support
--#
--# CONFIG_PHONE is not set
--
--#
--# Input device support
--#
--CONFIG_INPUT=y
--# CONFIG_INPUT_FF_MEMLESS is not set
--
--#
--# Userland interfaces
--#
--CONFIG_INPUT_MOUSEDEV=y
--# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
--CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
--CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
--# CONFIG_INPUT_JOYDEV is not set
--# CONFIG_INPUT_TSDEV is not set
--# CONFIG_INPUT_EVDEV is not set
--# CONFIG_INPUT_EVBUG is not set
--
--#
--# Input Device Drivers
--#
--CONFIG_INPUT_KEYBOARD=y
--CONFIG_KEYBOARD_ATKBD=y
--# CONFIG_KEYBOARD_SUNKBD is not set
--# CONFIG_KEYBOARD_LKKBD is not set
--# CONFIG_KEYBOARD_XTKBD is not set
--# CONFIG_KEYBOARD_NEWTON is not set
--# CONFIG_KEYBOARD_STOWAWAY is not set
--# CONFIG_INPUT_MOUSE is not set
--# CONFIG_INPUT_JOYSTICK is not set
--# CONFIG_INPUT_TOUCHSCREEN is not set
--# CONFIG_INPUT_MISC is not set
--
--#
--# Hardware I/O ports
--#
--CONFIG_SERIO=y
--# CONFIG_SERIO_I8042 is not set
--# CONFIG_SERIO_SERPORT is not set
--# CONFIG_SERIO_PCIPS2 is not set
--CONFIG_SERIO_LIBPS2=y
--# CONFIG_SERIO_RAW is not set
--# CONFIG_GAMEPORT is not set
+@@ -1,70 +0,0 @@
+-/*
+- * arch/sh/drivers/pci/dma-dreamcast.c
+- *
+- * PCI DMA support for the Sega Dreamcast
+- *
+- * Copyright (C) 2001, 2002  M. R. Brown
+- * Copyright (C) 2002, 2003  Paul Mundt
+- *
+- * This file originally bore the message (with enclosed-$):
+- *	Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp
+- *	Dreamcast PCI: Supports SEGA Broadband Adaptor only.
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
 -
--#
--# Character devices
--#
--# CONFIG_VT is not set
--# CONFIG_SERIAL_NONSTANDARD is not set
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/param.h>
+-#include <linux/interrupt.h>
+-#include <linux/init.h>
+-#include <linux/irq.h>
+-#include <linux/pci.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/device.h>
 -
--#
--# Serial drivers
--#
--# CONFIG_SERIAL_8250 is not set
+-#include <asm/io.h>
+-#include <asm/irq.h>
+-#include <asm/mach/pci.h>
 -
--#
--# Non-8250 serial port support
--#
--CONFIG_SERIAL_SH_SCI=y
--CONFIG_SERIAL_SH_SCI_NR_UARTS=2
--CONFIG_SERIAL_SH_SCI_CONSOLE=y
--CONFIG_SERIAL_CORE=y
--CONFIG_SERIAL_CORE_CONSOLE=y
--# CONFIG_SERIAL_JSM is not set
--CONFIG_UNIX98_PTYS=y
--CONFIG_LEGACY_PTYS=y
--CONFIG_LEGACY_PTY_COUNT=256
+-static int gapspci_dma_used = 0;
 -
--#
--# IPMI
--#
--# CONFIG_IPMI_HANDLER is not set
+-void *dreamcast_consistent_alloc(struct device *dev, size_t size,
+-				 dma_addr_t *dma_handle, gfp_t flag)
+-{
+-	unsigned long buf;
 -
--#
--# Watchdog Cards
--#
--# CONFIG_WATCHDOG is not set
--CONFIG_HW_RANDOM=y
--# CONFIG_GEN_RTC is not set
--# CONFIG_DTLK is not set
--# CONFIG_R3964 is not set
--# CONFIG_APPLICOM is not set
--# CONFIG_DRM is not set
--# CONFIG_RAW_DRIVER is not set
+-	if (dev && dev->bus != &pci_bus_type)
+-		return NULL;
 -
--#
--# TPM devices
--#
--# CONFIG_TCG_TPM is not set
+-	if (gapspci_dma_used + size > GAPSPCI_DMA_SIZE)
+-		return ERR_PTR(-EINVAL);
 -
--#
--# I2C support
--#
--# CONFIG_I2C is not set
+-	buf = GAPSPCI_DMA_BASE + gapspci_dma_used;
 -
--#
--# SPI support
--#
--# CONFIG_SPI is not set
--# CONFIG_SPI_MASTER is not set
+-	gapspci_dma_used = PAGE_ALIGN(gapspci_dma_used+size);
 -
--#
--# Dallas's 1-wire bus
--#
--# CONFIG_W1 is not set
+-	*dma_handle = (dma_addr_t)buf;
 -
--#
--# Hardware Monitoring support
--#
--CONFIG_HWMON=y
--# CONFIG_HWMON_VID is not set
--# CONFIG_SENSORS_ABITUGURU is not set
--# CONFIG_SENSORS_F71805F is not set
--# CONFIG_SENSORS_PC87427 is not set
--# CONFIG_SENSORS_VT1211 is not set
--# CONFIG_HWMON_DEBUG_CHIP is not set
+-	buf = P2SEGADDR(buf);
 -
--#
--# Multifunction device drivers
--#
--# CONFIG_MFD_SM501 is not set
+-	/* Flush the dcache before we hand off the buffer */
+-	__flush_purge_region((void *)buf, size);
 -
--#
--# Multimedia devices
--#
--# CONFIG_VIDEO_DEV is not set
+-	return (void *)buf;
+-}
 -
--#
--# Digital Video Broadcasting Devices
--#
--# CONFIG_DVB is not set
+-int dreamcast_consistent_free(struct device *dev, size_t size,
+-			 void *vaddr, dma_addr_t dma_handle)
+-{
+-	if (dev && dev->bus != &pci_bus_type)
+-		return -EINVAL;
 -
--#
--# Graphics support
--#
--# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
--# CONFIG_FB is not set
+-	/* XXX */
+-	gapspci_dma_used = 0;
 -
--#
--# Sound
--#
--CONFIG_SOUND=m
+-	return 0;
+-}
 -
--#
--# Advanced Linux Sound Architecture
--#
--# CONFIG_SND is not set
+diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c
+index 6f53f82..c446993 100644
+--- a/arch/sh/drivers/pci/fixups-dreamcast.c
++++ b/arch/sh/drivers/pci/fixups-dreamcast.c
+@@ -22,6 +22,7 @@
+ #include <linux/init.h>
+ #include <linux/irq.h>
+ #include <linux/pci.h>
++#include <linux/dma-mapping.h>
+ 
+ #include <asm/io.h>
+ #include <asm/irq.h>
+@@ -40,6 +41,15 @@ static void __init gapspci_fixup_resources(struct pci_dev *dev)
+ 		 */
+ 		dev->resource[1].start	= p->io_resource->start  + 0x100;
+ 		dev->resource[1].end	= dev->resource[1].start + 0x200 - 1;
++		/*
++		 * Redirect dma memory allocations to special memory window.
++		 */
++		BUG_ON(!dma_declare_coherent_memory(&dev->dev,
++						GAPSPCI_DMA_BASE,
++						GAPSPCI_DMA_BASE,
++						GAPSPCI_DMA_SIZE,
++						DMA_MEMORY_MAP |
++						DMA_MEMORY_EXCLUSIVE));
+ 		break;
+ 	default:
+ 		printk("PCI: Failed resource fixup\n");
+diff --git a/arch/sh/drivers/pci/fixups-sdk7780.c b/arch/sh/drivers/pci/fixups-sdk7780.c
+new file mode 100644
+index 0000000..2f88630
+--- /dev/null
++++ b/arch/sh/drivers/pci/fixups-sdk7780.c
+@@ -0,0 +1,59 @@
++/*
++ * arch/sh/drivers/pci/fixups-sdk7780.c
++ *
++ * PCI fixups for the SDK7780SE03
++ *
++ * Copyright (C) 2003  Lineo uSolutions, Inc.
++ * Copyright (C) 2004 - 2006  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/pci.h>
++#include "pci-sh4.h"
++#include <asm/io.h>
++
++int pci_fixup_pcic(void)
++{
++	ctrl_outl(0x00000001, SH7780_PCI_VCR2);
++
++	/* Enable all interrupts, so we know what to fix */
++	pci_write_reg(0x0000C3FF, SH7780_PCIIMR);
++	pci_write_reg(0x0000380F, SH7780_PCIAINTM);
++
++	/* Set up standard PCI config registers */
++	pci_write_reg(0xFB00, SH7780_PCISTATUS);
++	pci_write_reg(0x0047, SH7780_PCICMD);
++	pci_write_reg(0x00, SH7780_PCIPIF);
++	pci_write_reg(0x00, SH7780_PCISUB);
++	pci_write_reg(0x06, SH7780_PCIBCC);
++	pci_write_reg(0x1912, SH7780_PCISVID);
++	pci_write_reg(0x0001, SH7780_PCISID);
++
++	pci_write_reg(0x08000000, SH7780_PCIMBAR0);	/* PCI */
++	pci_write_reg(0x08000000, SH7780_PCILAR0);	/* SHwy */
++	pci_write_reg(0x07F00001, SH7780_PCILSR);	/* size 128M w/ MBAR */
++
++	pci_write_reg(0x00000000, SH7780_PCIMBAR1);
++	pci_write_reg(0x00000000, SH7780_PCILAR1);
++	pci_write_reg(0x00000000, SH7780_PCILSR1);
++
++	pci_write_reg(0xAB000801, SH7780_PCIIBAR);
++
++	/*
++	 * Set the MBR so PCI address is one-to-one with window,
++	 * meaning all calls go straight through... use ifdef to
++	 * catch erroneous assumption.
++	 */
++	pci_write_reg(0xFD000000 , SH7780_PCIMBR0);
++	pci_write_reg(0x00FC0000 , SH7780_PCIMBMR0);	/* 16M */
++
++	/* Set IOBR for window containing area specified in pci.h */
++	pci_write_reg(PCIBIOS_MIN_IO & ~(SH7780_PCI_IO_SIZE-1), SH7780_PCIIOBR);
++	pci_write_reg((SH7780_PCI_IO_SIZE-1) & (7 << 18), SH7780_PCIIOBMR);
++
++	pci_write_reg(0xA5000C01, SH7780_PCICR);
++
++	return 0;
++}
+diff --git a/arch/sh/drivers/pci/ops-cayman.c b/arch/sh/drivers/pci/ops-cayman.c
+new file mode 100644
+index 0000000..980275f
+--- /dev/null
++++ b/arch/sh/drivers/pci/ops-cayman.c
+@@ -0,0 +1,94 @@
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/types.h>
++#include <asm/cpu/irq.h>
++#include "pci-sh5.h"
++
++static inline u8 bridge_swizzle(u8 pin, u8 slot)
++{
++	return (((pin - 1) + slot) % 4) + 1;
++}
++
++int __init pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin)
++{
++	int result = -1;
++
++	/* The complication here is that the PCI IRQ lines from the Cayman's 2
++	   5V slots get into the CPU via a different path from the IRQ lines
++	   from the 3 3.3V slots.  Thus, we have to detect whether the card's
++	   interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling'
++	   at the point where we cross from 5V to 3.3V is not the normal case.
++
++	   The added complication is that we don't know that the 5V slots are
++	   always bus 2, because a card containing a PCI-PCI bridge may be
++	   plugged into a 3.3V slot, and this changes the bus numbering.
++
++	   Also, the Cayman has an intermediate PCI bus that goes a custom
++	   expansion board header (and to the secondary bridge).  This bus has
++	   never been used in practice.
++
++	   The 1ary onboard PCI-PCI bridge is device 3 on bus 0
++	   The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of
++	   the 1ary bridge.
++	   */
++
++	struct slot_pin {
++		int slot;
++		int pin;
++	} path[4];
++	int i=0;
++
++	while (dev->bus->number > 0) {
++
++		slot = path[i].slot = PCI_SLOT(dev->devfn);
++		pin = path[i].pin = bridge_swizzle(pin, slot);
++		dev = dev->bus->self;
++		i++;
++		if (i > 3) panic("PCI path to root bus too long!\n");
++	}
++
++	slot = PCI_SLOT(dev->devfn);
++	/* This is the slot on bus 0 through which the device is eventually
++	   reachable. */
++
++	/* Now work back up. */
++	if ((slot < 3) || (i == 0)) {
++		/* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
++		   swizzle now. */
++		result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
++	} else {
++		i--;
++		slot = path[i].slot;
++		pin  = path[i].pin;
++		if (slot > 0) {
++			panic("PCI expansion bus device found - not handled!\n");
++		} else {
++			if (i > 0) {
++				/* 5V slots */
++				i--;
++				slot = path[i].slot;
++				pin  = path[i].pin;
++				/* 'pin' was swizzled earlier wrt slot, don't do it again. */
++				result = IRQ_P2INTA + (pin - 1);
++			} else {
++				/* IRQ for 2ary PCI-PCI bridge : unused */
++				result = -1;
++			}
++		}
++	}
++
++	return result;
++}
++
++struct pci_channel board_pci_channels[] = {
++	{ &sh5_pci_ops, NULL, NULL, 0, 0xff },
++	{ NULL, NULL, NULL, 0, 0 },
++};
++EXPORT_SYMBOL(board_pci_channels);
++
++int __init pcibios_init_platform(void)
++{
++	return sh5pci_init(__pa(memory_start),
++			   __pa(memory_end) - __pa(memory_start));
++}
+diff --git a/arch/sh/drivers/pci/ops-r7780rp.c b/arch/sh/drivers/pci/ops-r7780rp.c
+index 48fe403..5fdadae 100644
+--- a/arch/sh/drivers/pci/ops-r7780rp.c
++++ b/arch/sh/drivers/pci/ops-r7780rp.c
+@@ -17,25 +17,13 @@
+ #include <asm/io.h>
+ #include "pci-sh4.h"
+ 
+-static char r7780rp_irq_tab[] __initdata = {
+-	0, 1, 2, 3,
+-};
 -
--#
--# Open Sound System
--#
--CONFIG_SOUND_PRIME=m
--# CONFIG_OBSOLETE_OSS is not set
--# CONFIG_SOUND_BT878 is not set
--# CONFIG_SOUND_ICH is not set
--# CONFIG_SOUND_TRIDENT is not set
--# CONFIG_SOUND_MSNDCLAS is not set
--# CONFIG_SOUND_MSNDPIN is not set
--# CONFIG_SOUND_VIA82CXXX is not set
+-static char r7780mp_irq_tab[] __initdata = {
++static char irq_tab[] __initdata = {
+ 	65, 66, 67, 68,
+ };
+ 
+ int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
+ {
+-	if (mach_is_r7780rp())
+-		return r7780rp_irq_tab[slot];
+-	if (mach_is_r7780mp() || mach_is_r7785rp())
+-		return r7780mp_irq_tab[slot];
 -
--#
--# HID Devices
--#
--CONFIG_HID=y
--# CONFIG_HID_DEBUG is not set
+-	printk(KERN_ERR "PCI: Bad IRQ mapping "
+-	       "request for slot %d, func %d\n", slot, pin-1);
 -
+-	return -1;
++	return irq_tab[slot];
+ }
+ 
+ static struct resource sh7780_io_resource = {
+diff --git a/arch/sh/drivers/pci/ops-sdk7780.c b/arch/sh/drivers/pci/ops-sdk7780.c
+new file mode 100644
+index 0000000..66a9b40
+--- /dev/null
++++ b/arch/sh/drivers/pci/ops-sdk7780.c
+@@ -0,0 +1,73 @@
++/*
++ * linux/arch/sh/drivers/pci/ops-sdk7780.c
++ *
++ * Copyright (C) 2006  Nobuhiro Iwamatsu
++ *
++ * PCI initialization for the SDK7780SE03
++ *
++ * May be copied or modified under the terms of the GNU General Public
++ * License.  See linux/COPYING for more information.
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++#include <asm/sdk7780.h>
++#include <asm/io.h>
++#include "pci-sh4.h"
++
++/* IDSEL [16][17][18][19][20][21][22][23][24][25][26][27][28][29][30][31] */
++static char sdk7780_irq_tab[4][16] __initdata = {
++	/* INTA */
++	{ 65, 68, 67, 68, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
++	/* INTB */
++	{ 66, 65, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
++	/* INTC */
++	{ 67, 66, -1, 66, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
++	/* INTD */
++	{ 68, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
++};
++
++int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
++{
++       return sdk7780_irq_tab[pin-1][slot];
++}
++
++static struct resource sdk7780_io_resource = {
++	.name	= "SH7780_IO",
++	.start	= SH7780_PCI_IO_BASE,
++	.end	= SH7780_PCI_IO_BASE + SH7780_PCI_IO_SIZE - 1,
++	.flags	= IORESOURCE_IO
++};
++
++static struct resource sdk7780_mem_resource = {
++	.name	= "SH7780_mem",
++	.start	= SH7780_PCI_MEMORY_BASE,
++	.end	= SH7780_PCI_MEMORY_BASE + SH7780_PCI_MEM_SIZE - 1,
++	.flags	= IORESOURCE_MEM
++};
++
++struct pci_channel board_pci_channels[] = {
++	{ &sh4_pci_ops, &sdk7780_io_resource, &sdk7780_mem_resource, 0, 0xff },
++	{ NULL, NULL, NULL, 0, 0 },
++};
++EXPORT_SYMBOL(board_pci_channels);
++
++static struct sh4_pci_address_map sdk7780_pci_map = {
++	.window0	= {
++		.base	= SH7780_CS2_BASE_ADDR,
++		.size	= 0x04000000,
++	},
++	.window1	= {
++		.base	= SH7780_CS3_BASE_ADDR,
++		.size	= 0x04000000,
++	},
++	.flags	= SH4_PCIC_NO_RESET,
++};
++
++int __init pcibios_init_platform(void)
++{
++	printk(KERN_INFO "SH7780 PCI: Finished initializing PCI controller\n");
++	return sh7780_pcic_init(&sdk7780_pci_map);
++}
+diff --git a/arch/sh/drivers/pci/ops-sh5.c b/arch/sh/drivers/pci/ops-sh5.c
+new file mode 100644
+index 0000000..729e38a
+--- /dev/null
++++ b/arch/sh/drivers/pci/ops-sh5.c
+@@ -0,0 +1,93 @@
++/*
++ * Support functions for the SH5 PCI hardware.
++ *
++ * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
++ * Copyright (C) 2003, 2004 Paul Mundt
++ * Copyright (C) 2004 Richard Curnow
++ *
++ * May be copied or modified under the terms of the GNU General Public
++ * License.  See linux/COPYING for more information.
++ */
++#include <linux/kernel.h>
++#include <linux/rwsem.h>
++#include <linux/smp.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/types.h>
++#include <linux/irq.h>
++#include <asm/pci.h>
++#include <asm/io.h>
++#include "pci-sh5.h"
++
++static void __init pci_fixup_ide_bases(struct pci_dev *d)
++{
++	int i;
++
++	/*
++	 * PCI IDE controllers use non-standard I/O port decoding, respect it.
++	 */
++	if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
++		return;
++	printk("PCI: IDE base address fixup for %s\n", pci_name(d));
++	for(i=0; i<4; i++) {
++		struct resource *r = &d->resource[i];
++		if ((r->start & ~0x80) == 0x374) {
++			r->start |= 2;
++			r->end = r->start;
++		}
++	}
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
++
++char * __devinit pcibios_setup(char *str)
++{
++	return str;
++}
++
++static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
++			int size, u32 *val)
++{
++	SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
++
++	switch (size) {
++		case 1:
++			*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
++			break;
++		case 2:
++			*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
++			break;
++		case 4:
++			*val = SH5PCI_READ(PDR);
++			break;
++	}
++
++	return PCIBIOS_SUCCESSFUL;
++}
++
++static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
++			 int size, u32 val)
++{
++	SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
++
++	switch (size) {
++		case 1:
++			SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
++			break;
++		case 2:
++			SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
++			break;
++		case 4:
++			SH5PCI_WRITE(PDR, val);
++			break;
++	}
++
++	return PCIBIOS_SUCCESSFUL;
++}
++
++struct pci_ops sh5_pci_ops = {
++	.read		= sh5pci_read,
++	.write		= sh5pci_write,
++};
+diff --git a/arch/sh/drivers/pci/pci-auto.c b/arch/sh/drivers/pci/pci-auto.c
+index 224e007..ea40470 100644
+--- a/arch/sh/drivers/pci/pci-auto.c
++++ b/arch/sh/drivers/pci/pci-auto.c
+@@ -516,10 +516,8 @@ pciauto_bus_scan(struct pci_channel *hose, int top_bus, int current_bus)
+ 					 PCI_COMMAND, cmdstat | PCI_COMMAND_IO |
+ 					 PCI_COMMAND_MEMORY |
+ 					 PCI_COMMAND_MASTER);
+-#if !defined(CONFIG_SH_HS7751RVOIP) && !defined(CONFIG_SH_RTS7751R2D)
+ 		early_write_config_byte(hose, top_bus, current_bus, pci_devfn,
+ 					PCI_LATENCY_TIMER, 0x80);
+-#endif
+ 
+ 		/* Allocate PCI I/O and/or memory space */
+ 		pciauto_setup_bars(hose, top_bus, current_bus, pci_devfn, PCI_BASE_ADDRESS_5);
+diff --git a/arch/sh/drivers/pci/pci-sh4.h b/arch/sh/drivers/pci/pci-sh4.h
+index 1901c33..4925c79 100644
+--- a/arch/sh/drivers/pci/pci-sh4.h
++++ b/arch/sh/drivers/pci/pci-sh4.h
+@@ -1,7 +1,9 @@
+ #ifndef __PCI_SH4_H
+ #define __PCI_SH4_H
+ 
+-#if defined(CONFIG_CPU_SUBTYPE_SH7780) || defined(CONFIG_CPU_SUBTYPE_SH7785)
++#if defined(CONFIG_CPU_SUBTYPE_SH7780) || \
++    defined(CONFIG_CPU_SUBTYPE_SH7785) || \
++    defined(CONFIG_CPU_SUBTYPE_SH7763)
+ #include "pci-sh7780.h"
+ #else
+ #include "pci-sh7751.h"
+diff --git a/arch/sh/drivers/pci/pci-sh5.c b/arch/sh/drivers/pci/pci-sh5.c
+new file mode 100644
+index 0000000..a00a4df
+--- /dev/null
++++ b/arch/sh/drivers/pci/pci-sh5.c
+@@ -0,0 +1,228 @@
++/*
++ * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
++ * Copyright (C) 2003, 2004 Paul Mundt
++ * Copyright (C) 2004 Richard Curnow
++ *
++ * May be copied or modified under the terms of the GNU General Public
++ * License.  See linux/COPYING for more information.
++ *
++ * Support functions for the SH5 PCI hardware.
++ */
++
++#include <linux/kernel.h>
++#include <linux/rwsem.h>
++#include <linux/smp.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/types.h>
++#include <linux/irq.h>
++#include <asm/cpu/irq.h>
++#include <asm/pci.h>
++#include <asm/io.h>
++#include "pci-sh5.h"
++
++unsigned long pcicr_virt;
++unsigned long PCI_IO_AREA;
++
++/* Rounds a number UP to the nearest power of two. Used for
++ * sizing the PCI window.
++ */
++static u32 __init r2p2(u32 num)
++{
++	int i = 31;
++	u32 tmp = num;
++
++	if (num == 0)
++		return 0;
++
++	do {
++		if (tmp & (1 << 31))
++			break;
++		i--;
++		tmp <<= 1;
++	} while (i >= 0);
++
++	tmp = 1 << i;
++	/* If the original number isn't a power of 2, round it up */
++	if (tmp != num)
++		tmp <<= 1;
++
++	return tmp;
++}
++
++static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
++{
++	struct pt_regs *regs = get_irq_regs();
++	unsigned pci_int, pci_air, pci_cir, pci_aint;
++
++	pci_int = SH5PCI_READ(INT);
++	pci_cir = SH5PCI_READ(CIR);
++	pci_air = SH5PCI_READ(AIR);
++
++	if (pci_int) {
++		printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
++		printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
++		printk("PCI AIR -> 0x%x\n", pci_air);
++		printk("PCI CIR -> 0x%x\n", pci_cir);
++		SH5PCI_WRITE(INT, ~0);
++	}
++
++	pci_aint = SH5PCI_READ(AINT);
++	if (pci_aint) {
++		printk("PCI ARB INTERRUPT!\n");
++		printk("PCI AINT -> 0x%x\n", pci_aint);
++		printk("PCI AIR -> 0x%x\n", pci_air);
++		printk("PCI CIR -> 0x%x\n", pci_cir);
++		SH5PCI_WRITE(AINT, ~0);
++	}
++
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
++{
++	printk("SERR IRQ\n");
++
++	return IRQ_NONE;
++}
++
++int __init sh5pci_init(unsigned long memStart, unsigned long memSize)
++{
++	u32 lsr0;
++	u32 uval;
++
++        if (request_irq(IRQ_ERR, pcish5_err_irq,
++                        IRQF_DISABLED, "PCI Error",NULL) < 0) {
++                printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
++                return -EINVAL;
++        }
++
++        if (request_irq(IRQ_SERR, pcish5_serr_irq,
++                        IRQF_DISABLED, "PCI SERR interrupt", NULL) < 0) {
++                printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
++                return -EINVAL;
++        }
++
++	pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR");
++	if (!pcicr_virt) {
++		panic("Unable to remap PCICR\n");
++	}
++
++	PCI_IO_AREA = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO");
++	if (!PCI_IO_AREA) {
++		panic("Unable to remap PCIIO\n");
++	}
++
++	/* Clear snoop registers */
++        SH5PCI_WRITE(CSCR0, 0);
++        SH5PCI_WRITE(CSCR1, 0);
++
++        /* Switch off interrupts */
++        SH5PCI_WRITE(INTM,  0);
++        SH5PCI_WRITE(AINTM, 0);
++        SH5PCI_WRITE(PINTM, 0);
++
++        /* Set bus active, take it out of reset */
++        uval = SH5PCI_READ(CR);
++
++	/* Set command Register */
++        SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE |
++		     CR_PFCS | CR_BMAM);
++
++	uval=SH5PCI_READ(CR);
++
++        /* Allow it to be a master */
++	/* NB - WE DISABLE I/O ACCESS to stop overlap */
++        /* set WAIT bit to enable stepping, an attempt to improve stability */
++	SH5PCI_WRITE_SHORT(CSR_CMD,
++			    PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
++			    PCI_COMMAND_WAIT);
++
++        /*
++        ** Set translation mapping memory in order to convert the address
++        ** used for the main bus, to the PCI internal address.
++        */
++        SH5PCI_WRITE(MBR,0x40000000);
++
++        /* Always set the max size 512M */
++        SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
++
++        /*
++        ** I/O addresses are mapped at internal PCI specific address
++        ** as is described into the configuration bridge table.
++        ** These are changed to 0, to allow cards that have legacy
++        ** io such as vga to function correctly. We set the SH5 IOBAR to
++        ** 256K, which is a bit big as we can only have 64K of address space
++        */
++
++        SH5PCI_WRITE(IOBR,0x0);
++
++        /* Set up a 256K window. Totally pointless waste  of address space */
++        SH5PCI_WRITE(IOBMR,0);
++
++	/* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec.
++	 * Ideally, we would want to map the I/O region somewhere, but it
++	 * is so big this is not that easy!
++         */
++	SH5PCI_WRITE(CSR_IBAR0,~0);
++	/* Set memory size value */
++        memSize = memory_end - memory_start;
++
++	/* Now we set up the mbars so the PCI bus can see the memory of
++	 * the machine */
++	if (memSize < (1024 * 1024)) {
++                printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%lx?\n",
++		       memSize);
++                return -EINVAL;
++        }
++
++        /* Set LSR 0 */
++        lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 :
++		((r2p2(memSize) - 0x100000) | 0x1);
++        SH5PCI_WRITE(LSR0, lsr0);
++
++        /* Set MBAR 0 */
++        SH5PCI_WRITE(CSR_MBAR0, memory_start);
++        SH5PCI_WRITE(LAR0, memory_start);
++
++        SH5PCI_WRITE(CSR_MBAR1,0);
++        SH5PCI_WRITE(LAR1,0);
++        SH5PCI_WRITE(LSR1,0);
++
++        /* Enable the PCI interrupts on the device */
++        SH5PCI_WRITE(INTM,  ~0);
++        SH5PCI_WRITE(AINTM, ~0);
++        SH5PCI_WRITE(PINTM, ~0);
++
++	return 0;
++}
++
++void __devinit pcibios_fixup_bus(struct pci_bus *bus)
++{
++	struct pci_dev *dev = bus->self;
++	int i;
++
++	if (dev) {
++		for (i= 0; i < 3; i++) {
++			bus->resource[i] =
++				&dev->resource[PCI_BRIDGE_RESOURCES+i];
++			bus->resource[i]->name = bus->name;
++		}
++		bus->resource[0]->flags |= IORESOURCE_IO;
++		bus->resource[1]->flags |= IORESOURCE_MEM;
++
++		/* For now, propagate host limits to the bus;
++		 * we'll adjust them later. */
++		bus->resource[0]->end = 64*1024 - 1 ;
++		bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1;
++		bus->resource[0]->start = PCIBIOS_MIN_IO;
++		bus->resource[1]->start = PCIBIOS_MIN_MEM;
++
++		/* Turn off downstream PF memory address range by default */
++		bus->resource[2]->start = 1024*1024;
++		bus->resource[2]->end = bus->resource[2]->start - 1;
++	}
++}
+diff --git a/arch/sh/drivers/pci/pci-sh5.h b/arch/sh/drivers/pci/pci-sh5.h
+new file mode 100644
+index 0000000..7cff3fc
+--- /dev/null
++++ b/arch/sh/drivers/pci/pci-sh5.h
+@@ -0,0 +1,113 @@
++/*
++ * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
++ *
++ * May be copied or modified under the terms of the GNU General Public
++ * License.  See linux/COPYING for more information.
++ *
++ * Definitions for the SH5 PCI hardware.
++ */
++#ifndef __PCI_SH5_H
++#define __PCI_SH5_H
++
++/* Product ID */
++#define PCISH5_PID		0x350d
++
++/* vendor ID */
++#define PCISH5_VID		0x1054
++
++/* Configuration types */
++#define ST_TYPE0                0x00    /* Configuration cycle type 0 */
++#define ST_TYPE1                0x01    /* Configuration cycle type 1 */
++
++/* VCR data */
++#define PCISH5_VCR_STATUS      0x00
++#define PCISH5_VCR_VERSION     0x08
++
++/*
++** ICR register offsets and bits
++*/
++#define PCISH5_ICR_CR          0x100   /* PCI control register values */
++#define CR_PBAM                 (1<<12)
++#define CR_PFCS                 (1<<11)
++#define CR_FTO                  (1<<10)
++#define CR_PFE                  (1<<9)
++#define CR_TBS                  (1<<8)
++#define CR_SPUE                 (1<<7)
++#define CR_BMAM                 (1<<6)
++#define CR_HOST                 (1<<5)
++#define CR_CLKEN                (1<<4)
++#define CR_SOCS                 (1<<3)
++#define CR_IOCS                 (1<<2)
++#define CR_RSTCTL               (1<<1)
++#define CR_CFINT                (1<<0)
++#define CR_LOCK_MASK            0xa5000000
++
++#define PCISH5_ICR_INT         0x114   /* Interrupt registert values     */
++#define INT_MADIM               (1<<2)
++
++#define PCISH5_ICR_LSR0        0X104   /* Local space register values    */
++#define PCISH5_ICR_LSR1        0X108   /* Local space register values    */
++#define PCISH5_ICR_LAR0        0x10c   /* Local address register values  */
++#define PCISH5_ICR_LAR1        0x110   /* Local address register values  */
++#define PCISH5_ICR_INTM        0x118   /* Interrupt mask register values                         */
++#define PCISH5_ICR_AIR         0x11c   /* Interrupt error address information register values    */
++#define PCISH5_ICR_CIR         0x120   /* Interrupt error command information register values    */
++#define PCISH5_ICR_AINT        0x130   /* Interrupt error arbiter interrupt register values      */
++#define PCISH5_ICR_AINTM       0x134   /* Interrupt error arbiter interrupt mask register values */
++#define PCISH5_ICR_BMIR        0x138   /* Interrupt error info register of bus master values     */
++#define PCISH5_ICR_PAR         0x1c0   /* Pio address register values                            */
++#define PCISH5_ICR_MBR         0x1c4   /* Memory space bank register values                      */
++#define PCISH5_ICR_IOBR        0x1c8   /* I/O space bank register values                         */
++#define PCISH5_ICR_PINT        0x1cc   /* power management interrupt register values             */
++#define PCISH5_ICR_PINTM       0x1d0   /* power management interrupt mask register values        */
++#define PCISH5_ICR_MBMR        0x1d8   /* memory space bank mask register values                 */
++#define PCISH5_ICR_IOBMR       0x1dc   /* I/O space bank mask register values                    */
++#define PCISH5_ICR_CSCR0       0x210   /* PCI cache snoop control register 0                     */
++#define PCISH5_ICR_CSCR1       0x214   /* PCI cache snoop control register 1                     */
++#define PCISH5_ICR_PDR         0x220   /* Pio data register values                               */
++
++/* These are configs space registers */
++#define PCISH5_ICR_CSR_VID     0x000	/* Vendor id                           */
++#define PCISH5_ICR_CSR_DID     0x002   /* Device id                           */
++#define PCISH5_ICR_CSR_CMD     0x004   /* Command register                    */
++#define PCISH5_ICR_CSR_STATUS  0x006   /* Stautus                             */
++#define PCISH5_ICR_CSR_IBAR0   0x010   /* I/O base address register           */
++#define PCISH5_ICR_CSR_MBAR0   0x014   /* First  Memory base address register */
++#define PCISH5_ICR_CSR_MBAR1   0x018   /* Second Memory base address register */
++
++/* Base address of registers */
++#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
++#define SH5PCI_IO_BASE  (PHYS_PCI_BLOCK + 0x00800000)
++/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG)    */
++
++extern unsigned long pcicr_virt;
++/* Register selection macro */
++#define PCISH5_ICR_REG(x)                ( pcicr_virt + (PCISH5_ICR_##x))
++/* #define PCISH5_VCR_REG(x)                ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
++
++/* Write I/O functions */
++#define SH5PCI_WRITE(reg,val)        ctrl_outl((u32)(val),PCISH5_ICR_REG(reg))
++#define SH5PCI_WRITE_SHORT(reg,val)  ctrl_outw((u16)(val),PCISH5_ICR_REG(reg))
++#define SH5PCI_WRITE_BYTE(reg,val)   ctrl_outb((u8)(val),PCISH5_ICR_REG(reg))
++
++/* Read I/O functions */
++#define SH5PCI_READ(reg)             ctrl_inl(PCISH5_ICR_REG(reg))
++#define SH5PCI_READ_SHORT(reg)       ctrl_inw(PCISH5_ICR_REG(reg))
++#define SH5PCI_READ_BYTE(reg)        ctrl_inb(PCISH5_ICR_REG(reg))
++
++/* Set PCI config bits */
++#define SET_CONFIG_BITS(bus,devfn,where)  ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
++
++/* Set PCI command register */
++#define CONFIG_CMD(bus, devfn, where)            SET_CONFIG_BITS(bus->number,devfn,where)
++
++/* Size converters */
++#define PCISH5_MEM_SIZCONV(x)		  (((x / 0x40000) - 1) << 18)
++#define PCISH5_IO_SIZCONV(x)		  (((x / 0x40000) - 1) << 18)
++
++extern struct pci_ops sh5_pci_ops;
++
++/* arch/sh/drivers/pci/pci-sh5.c */
++int sh5pci_init(unsigned long memStart, unsigned long memSize);
++
++#endif /* __PCI_SH5_H */
+diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
+index e516087..7d797f4 100644
+--- a/arch/sh/drivers/pci/pci-sh7780.c
++++ b/arch/sh/drivers/pci/pci-sh7780.c
+@@ -58,6 +58,7 @@ static int __init sh7780_pci_init(void)
+ 	id = pci_read_reg(SH7780_PCIVID);
+ 	if ((id & 0xffff) == SH7780_VENDOR_ID) {
+ 		switch ((id >> 16) & 0xffff) {
++		case SH7763_DEVICE_ID:
+ 		case SH7780_DEVICE_ID:
+ 		case SH7781_DEVICE_ID:
+ 		case SH7785_DEVICE_ID:
+diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h
+index 1d069a8..97b2c98 100644
+--- a/arch/sh/drivers/pci/pci-sh7780.h
++++ b/arch/sh/drivers/pci/pci-sh7780.h
+@@ -16,6 +16,7 @@
+ #define SH7780_VENDOR_ID	0x1912
+ #define SH7781_DEVICE_ID	0x0001
+ #define SH7780_DEVICE_ID	0x0002
++#define SH7763_DEVICE_ID	0x0004
+ #define SH7785_DEVICE_ID	0x0007
+ 
+ /* SH7780 Control Registers */
+diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
+index ccaba36..49b435c 100644
+--- a/arch/sh/drivers/pci/pci.c
++++ b/arch/sh/drivers/pci/pci.c
+@@ -71,7 +71,7 @@ subsys_initcall(pcibios_init);
+  *  Called after each bus is probed, but before its children
+  *  are examined.
+  */
+-void __devinit pcibios_fixup_bus(struct pci_bus *bus)
++void __devinit __weak pcibios_fixup_bus(struct pci_bus *bus)
+ {
+ 	pci_read_bridge_bases(bus);
+ }
+diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
+index 4b81d9c..349d833 100644
+--- a/arch/sh/kernel/Makefile
++++ b/arch/sh/kernel/Makefile
+@@ -1,25 +1,5 @@
 -#
--# USB support
+-# Makefile for the Linux/SuperH kernel.
 -#
--CONFIG_USB_ARCH_HAS_HCD=y
--CONFIG_USB_ARCH_HAS_OHCI=y
--CONFIG_USB_ARCH_HAS_EHCI=y
--# CONFIG_USB is not set
 -
--#
--# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
--#
+-extra-y	:= head.o init_task.o vmlinux.lds
 -
--#
--# USB Gadget Support
--#
--# CONFIG_USB_GADGET is not set
+-obj-y	:= debugtraps.o io.o io_generic.o irq.o machvec.o process.o ptrace.o \
+-	   semaphore.o setup.o signal.o sys_sh.o syscalls.o \
+-	   time.o topology.o traps.o
 -
--#
--# MMC/SD Card support
--#
--# CONFIG_MMC is not set
+-obj-y				+= cpu/ timers/
+-obj-$(CONFIG_VSYSCALL)		+= vsyscall/
+-obj-$(CONFIG_SMP)		+= smp.o
+-obj-$(CONFIG_CF_ENABLER)	+= cf-enabler.o
+-obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
+-obj-$(CONFIG_SH_KGDB)		+= kgdb_stub.o kgdb_jmp.o
+-obj-$(CONFIG_SH_CPU_FREQ)	+= cpufreq.o
+-obj-$(CONFIG_MODULES)		+= sh_ksyms.o module.o
+-obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
+-obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
+-obj-$(CONFIG_PM)		+= pm.o
+-obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
 -
--#
--# LED devices
--#
--# CONFIG_NEW_LEDS is not set
+-EXTRA_CFLAGS += -Werror
++ifeq ($(CONFIG_SUPERH32),y)
++include ${srctree}/arch/sh/kernel/Makefile_32
++else
++include ${srctree}/arch/sh/kernel/Makefile_64
++endif
+diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
+new file mode 100644
+index 0000000..c892898
+--- /dev/null
++++ b/arch/sh/kernel/Makefile_32
+@@ -0,0 +1,26 @@
++#
++# Makefile for the Linux/SuperH kernel.
++#
++
++extra-y	:= head_32.o init_task.o vmlinux.lds
++
++obj-y	:= debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
++	   ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \
++	   syscalls_32.o time_32.o topology.o traps.o traps_32.o
++
++obj-y				+= cpu/ timers/
++obj-$(CONFIG_VSYSCALL)		+= vsyscall/
++obj-$(CONFIG_SMP)		+= smp.o
++obj-$(CONFIG_CF_ENABLER)	+= cf-enabler.o
++obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
++obj-$(CONFIG_SH_KGDB)		+= kgdb_stub.o kgdb_jmp.o
++obj-$(CONFIG_SH_CPU_FREQ)	+= cpufreq.o
++obj-$(CONFIG_MODULES)		+= sh_ksyms_32.o module.o
++obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
++obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
++obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
++obj-$(CONFIG_PM)		+= pm.o
++obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
++obj-$(CONFIG_BINFMT_ELF)	+= dump_task.o
++
++EXTRA_CFLAGS += -Werror
+diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
+new file mode 100644
+index 0000000..1ef21cc
+--- /dev/null
++++ b/arch/sh/kernel/Makefile_64
+@@ -0,0 +1,22 @@
++extra-y	:= head_64.o init_task.o vmlinux.lds
++
++obj-y	:= debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
++	   ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \
++	   syscalls_64.o time_64.o topology.o traps.o traps_64.o
++
++obj-y				+= cpu/ timers/
++obj-$(CONFIG_VSYSCALL)		+= vsyscall/
++obj-$(CONFIG_SMP)		+= smp.o
++obj-$(CONFIG_CF_ENABLER)	+= cf-enabler.o
++obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
++obj-$(CONFIG_SH_KGDB)		+= kgdb_stub.o kgdb_jmp.o
++obj-$(CONFIG_SH_CPU_FREQ)	+= cpufreq.o
++obj-$(CONFIG_MODULES)		+= sh_ksyms_64.o module.o
++obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
++obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
++obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
++obj-$(CONFIG_PM)		+= pm.o
++obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
++obj-$(CONFIG_BINFMT_ELF)	+= dump_task.o
++
++EXTRA_CFLAGS += -Werror
+diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
+index d055a3e..f471d24 100644
+--- a/arch/sh/kernel/cpu/Makefile
++++ b/arch/sh/kernel/cpu/Makefile
+@@ -6,8 +6,14 @@ obj-$(CONFIG_CPU_SH2)		= sh2/
+ obj-$(CONFIG_CPU_SH2A)		= sh2a/
+ obj-$(CONFIG_CPU_SH3)		= sh3/
+ obj-$(CONFIG_CPU_SH4)		= sh4/
++obj-$(CONFIG_CPU_SH5)		= sh5/
++
++# Special cases for family ancestry.
++
+ obj-$(CONFIG_CPU_SH4A)		+= sh4a/
+ 
++# Common interfaces.
++
+ obj-$(CONFIG_UBC_WAKEUP)	+= ubc.o
+ obj-$(CONFIG_SH_ADC)		+= adc.o
+ 
+diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
+index c217c4b..80a3132 100644
+--- a/arch/sh/kernel/cpu/init.c
++++ b/arch/sh/kernel/cpu/init.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
++#include <linux/log2.h>
+ #include <asm/mmu_context.h>
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
+@@ -20,9 +21,12 @@
+ #include <asm/system.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cache.h>
++#include <asm/elf.h>
+ #include <asm/io.h>
+-#include <asm/ubc.h>
+ #include <asm/smp.h>
++#ifdef CONFIG_SUPERH32
++#include <asm/ubc.h>
++#endif
+ 
+ /*
+  * Generic wrapper for command line arguments to disable on-chip
+@@ -61,25 +65,12 @@ static void __init speculative_execution_init(void)
+ /*
+  * Generic first-level cache init
+  */
+-static void __init cache_init(void)
++#ifdef CONFIG_SUPERH32
++static void __uses_jump_to_uncached cache_init(void)
+ {
+ 	unsigned long ccr, flags;
+ 
+-	/* First setup the rest of the I-cache info */
+-	current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
+-				      current_cpu_data.icache.linesz;
 -
--#
--# LED drivers
--#
+-	current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
+-				    current_cpu_data.icache.linesz;
 -
--#
--# LED Triggers
--#
+-	/* And the D-cache too */
+-	current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
+-				      current_cpu_data.dcache.linesz;
 -
--#
--# InfiniBand support
--#
--# CONFIG_INFINIBAND is not set
+-	current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
+-				    current_cpu_data.dcache.linesz;
 -
--#
--# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
--#
+-	jump_to_P2();
++	jump_to_uncached();
+ 	ccr = ctrl_inl(CCR);
+ 
+ 	/*
+@@ -156,7 +147,31 @@ static void __init cache_init(void)
+ #endif
+ 
+ 	ctrl_outl(flags, CCR);
+-	back_to_P1();
++	back_to_cached();
++}
++#else
++#define cache_init()	do { } while (0)
++#endif
++
++#define CSHAPE(totalsize, linesize, assoc) \
++	((totalsize & ~0xff) | (linesize << 4) | assoc)
++
++#define CACHE_DESC_SHAPE(desc)	\
++	CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
++
++static void detect_cache_shape(void)
++{
++	l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
++
++	if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
++		l1i_cache_shape = l1d_cache_shape;
++	else
++		l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
++
++	if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
++		l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
++	else
++		l2_cache_shape = -1; /* No S-cache */
+ }
+ 
+ #ifdef CONFIG_SH_DSP
+@@ -228,14 +243,32 @@ asmlinkage void __cpuinit sh_cpu_init(void)
+ 	if (current_cpu_data.type == CPU_SH_NONE)
+ 		panic("Unknown CPU");
+ 
++	/* First setup the rest of the I-cache info */
++	current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
++				      current_cpu_data.icache.linesz;
++
++	current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
++				    current_cpu_data.icache.linesz;
++
++	/* And the D-cache too */
++	current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
++				      current_cpu_data.dcache.linesz;
++
++	current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
++				    current_cpu_data.dcache.linesz;
++
+ 	/* Init the cache */
+ 	cache_init();
+ 
+-	if (raw_smp_processor_id() == 0)
++	if (raw_smp_processor_id() == 0) {
+ 		shm_align_mask = max_t(unsigned long,
+ 				       current_cpu_data.dcache.way_size - 1,
+ 				       PAGE_SIZE - 1);
+ 
++		/* Boot CPU sets the cache shape */
++		detect_cache_shape();
++	}
++
+ 	/* Disable the FPU */
+ 	if (fpu_disabled) {
+ 		printk("FPU Disabled\n");
+@@ -273,7 +306,10 @@ asmlinkage void __cpuinit sh_cpu_init(void)
+ 	 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB.  So ..
+ 	 * we wake it up and hope that all is well.
+ 	 */
++#ifdef CONFIG_SUPERH32
+ 	if (raw_smp_processor_id() == 0)
+ 		ubc_wakeup();
++#endif
++
+ 	speculative_execution_init();
+ }
+diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
+index 8da8e17..cc1836e 100644
+--- a/arch/sh/kernel/cpu/irq/Makefile
++++ b/arch/sh/kernel/cpu/irq/Makefile
+@@ -1,7 +1,9 @@
+ #
+ # Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
+ #
+-obj-y	+= imask.o intc.o
++obj-y	+= intc.o
+ 
++obj-$(CONFIG_SUPERH32)			+= imask.o
++obj-$(CONFIG_CPU_SH5)			+= intc-sh5.o
+ obj-$(CONFIG_CPU_HAS_IPR_IRQ)		+= ipr.o
+ obj-$(CONFIG_CPU_HAS_MASKREG_IRQ)	+= maskreg.o
+diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
+new file mode 100644
+index 0000000..43ee7a9
+--- /dev/null
++++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
+@@ -0,0 +1,257 @@
++/*
++ * arch/sh/kernel/cpu/irq/intc-sh5.c
++ *
++ * Interrupt Controller support for SH5 INTC.
++ *
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003  Paul Mundt
++ *
++ * Per-interrupt selective. IRLM=0 (Fixed priority) is not
++ * supported being useless without a cascaded interrupt
++ * controller.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/bitops.h>
++#include <asm/cpu/irq.h>
++#include <asm/page.h>
++
++/*
++ * Maybe the generic Peripheral block could move to a more
++ * generic include file. INTC Block will be defined here
++ * and only here to make INTC self-contained in a single
++ * file.
++ */
++#define	INTC_BLOCK_OFFSET	0x01000000
++
++/* Base */
++#define INTC_BASE		PHYS_PERIPHERAL_BLOCK + \
++				INTC_BLOCK_OFFSET
++
++/* Address */
++#define INTC_ICR_SET		(intc_virt + 0x0)
++#define INTC_ICR_CLEAR		(intc_virt + 0x8)
++#define INTC_INTPRI_0		(intc_virt + 0x10)
++#define INTC_INTSRC_0		(intc_virt + 0x50)
++#define INTC_INTSRC_1		(intc_virt + 0x58)
++#define INTC_INTREQ_0		(intc_virt + 0x60)
++#define INTC_INTREQ_1		(intc_virt + 0x68)
++#define INTC_INTENB_0		(intc_virt + 0x70)
++#define INTC_INTENB_1		(intc_virt + 0x78)
++#define INTC_INTDSB_0		(intc_virt + 0x80)
++#define INTC_INTDSB_1		(intc_virt + 0x88)
++
++#define INTC_ICR_IRLM		0x1
++#define	INTC_INTPRI_PREGS	8		/* 8 Priority Registers */
++#define	INTC_INTPRI_PPREG	8		/* 8 Priorities per Register */
++
++
++/*
++ * Mapper between the vector ordinal and the IRQ number
++ * passed to kernel/device drivers.
++ */
++int intc_evt_to_irq[(0xE20/0x20)+1] = {
++	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x000 - 0x0E0 */
++	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x100 - 0x1E0 */
++	 0,  0,  0,  0,  0,  1,  0,  0,	/* 0x200 - 0x2E0 */
++	 2,  0,  0,  3,  0,  0,  0, -1,	/* 0x300 - 0x3E0 */
++	32, 33, 34, 35, 36, 37, 38, -1,	/* 0x400 - 0x4E0 */
++	-1, -1, -1, 63, -1, -1, -1, -1,	/* 0x500 - 0x5E0 */
++	-1, -1, 18, 19, 20, 21, 22, -1,	/* 0x600 - 0x6E0 */
++	39, 40, 41, 42, -1, -1, -1, -1,	/* 0x700 - 0x7E0 */
++	 4,  5,  6,  7, -1, -1, -1, -1,	/* 0x800 - 0x8E0 */
++	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x900 - 0x9E0 */
++	12, 13, 14, 15, 16, 17, -1, -1,	/* 0xA00 - 0xAE0 */
++	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xB00 - 0xBE0 */
++	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xC00 - 0xCE0 */
++	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xD00 - 0xDE0 */
++	-1, -1				/* 0xE00 - 0xE20 */
++};
++
++/*
++ * Opposite mapper.
++ */
++static int IRQ_to_vectorN[NR_INTC_IRQS] = {
++	0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /*  0- 7 */
++	  -1,   -1,   -1,   -1, 0x50, 0x51, 0x52, 0x53,	/*  8-15 */
++	0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36,   -1, /* 16-23 */
++	  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 24-31 */
++	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38,	/* 32-39 */
++        0x39, 0x3A, 0x3B,   -1,   -1,   -1,   -1,   -1, /* 40-47 */
++	  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 48-55 */
++	  -1,   -1,   -1,   -1,   -1,   -1,   -1, 0x2B, /* 56-63 */
++
++};
++
++static unsigned long intc_virt;
++
++static unsigned int startup_intc_irq(unsigned int irq);
++static void shutdown_intc_irq(unsigned int irq);
++static void enable_intc_irq(unsigned int irq);
++static void disable_intc_irq(unsigned int irq);
++static void mask_and_ack_intc(unsigned int);
++static void end_intc_irq(unsigned int irq);
++
++static struct hw_interrupt_type intc_irq_type = {
++	.typename = "INTC",
++	.startup = startup_intc_irq,
++	.shutdown = shutdown_intc_irq,
++	.enable = enable_intc_irq,
++	.disable = disable_intc_irq,
++	.ack = mask_and_ack_intc,
++	.end = end_intc_irq
++};
++
++static int irlm;		/* IRL mode */
++
++static unsigned int startup_intc_irq(unsigned int irq)
++{
++	enable_intc_irq(irq);
++	return 0; /* never anything pending */
++}
++
++static void shutdown_intc_irq(unsigned int irq)
++{
++	disable_intc_irq(irq);
++}
++
++static void enable_intc_irq(unsigned int irq)
++{
++	unsigned long reg;
++	unsigned long bitmask;
++
++	if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
++		printk("Trying to use straight IRL0-3 with an encoding platform.\n");
++
++	if (irq < 32) {
++		reg = INTC_INTENB_0;
++		bitmask = 1 << irq;
++	} else {
++		reg = INTC_INTENB_1;
++		bitmask = 1 << (irq - 32);
++	}
++
++	ctrl_outl(bitmask, reg);
++}
++
++static void disable_intc_irq(unsigned int irq)
++{
++	unsigned long reg;
++	unsigned long bitmask;
++
++	if (irq < 32) {
++		reg = INTC_INTDSB_0;
++		bitmask = 1 << irq;
++	} else {
++		reg = INTC_INTDSB_1;
++		bitmask = 1 << (irq - 32);
++	}
++
++	ctrl_outl(bitmask, reg);
++}
++
++static void mask_and_ack_intc(unsigned int irq)
++{
++	disable_intc_irq(irq);
++}
++
++static void end_intc_irq(unsigned int irq)
++{
++	enable_intc_irq(irq);
++}
++
++/* For future use, if we ever support IRLM=0) */
++void make_intc_irq(unsigned int irq)
++{
++	disable_irq_nosync(irq);
++	irq_desc[irq].chip = &intc_irq_type;
++	disable_intc_irq(irq);
++}
++
++#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
++int intc_irq_describe(char* p, int irq)
++{
++	if (irq < NR_INTC_IRQS)
++		return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
++	else
++		return 0;
++}
++#endif
++
++void __init plat_irq_setup(void)
++{
++        unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
++	unsigned long reg;
++	unsigned long data;
++	int i;
++
++	intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
++	if (!intc_virt) {
++		panic("Unable to remap INTC\n");
++	}
++
++
++	/* Set default: per-line enable/disable, priority driven ack/eoi */
++	for (i = 0; i < NR_INTC_IRQS; i++) {
++		if (platform_int_priority[i] != NO_PRIORITY) {
++			irq_desc[i].chip = &intc_irq_type;
++		}
++	}
++
++
++	/* Disable all interrupts and set all priorities to 0 to avoid trouble */
++	ctrl_outl(-1, INTC_INTDSB_0);
++	ctrl_outl(-1, INTC_INTDSB_1);
++
++	for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
++		ctrl_outl( NO_PRIORITY, reg);
++
++
++	/* Set IRLM */
++	/* If all the priorities are set to 'no priority', then
++	 * assume we are using encoded mode.
++	 */
++	irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
++		platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
++
++	if (irlm == NO_PRIORITY) {
++		/* IRLM = 0 */
++		reg = INTC_ICR_CLEAR;
++		i = IRQ_INTA;
++		printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
++	} else {
++		/* IRLM = 1 */
++		reg = INTC_ICR_SET;
++		i = IRQ_IRL0;
++	}
++	ctrl_outl(INTC_ICR_IRLM, reg);
++
++	/* Set interrupt priorities according to platform description */
++	for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
++		data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
++		if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
++			/* Upon the 7th, set Priority Register */
++			ctrl_outl(data, reg);
++			data = 0;
++			reg += 8;
++		}
++	}
++
++	/*
++	 * And now let interrupts come in.
++	 * sti() is not enough, we need to
++	 * lower priority, too.
++	 */
++        __asm__ __volatile__("getcon    " __SR ", %0\n\t"
++                             "and       %0, %1, %0\n\t"
++                             "putcon    %0, " __SR "\n\t"
++                             : "=&r" (__dummy0)
++                             : "r" (__dummy1));
++}
+diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c
+index 6ac018c..84806b2 100644
+--- a/arch/sh/kernel/cpu/irq/intc.c
++++ b/arch/sh/kernel/cpu/irq/intc.c
+@@ -335,31 +335,6 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc,
+ 	return 0;
+ }
+ 
+-static unsigned int __init intc_prio_value(struct intc_desc *desc,
+-					   intc_enum enum_id, int do_grps)
+-{
+-	struct intc_prio *p = desc->priorities;
+-	unsigned int i;
 -
--#
--# Real Time Clock
--#
--CONFIG_RTC_LIB=y
--CONFIG_RTC_CLASS=y
--CONFIG_RTC_HCTOSYS=y
--CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
--# CONFIG_RTC_DEBUG is not set
+-	for (i = 0; p && enum_id && i < desc->nr_priorities; i++) {
+-		p = desc->priorities + i;
 -
--#
--# RTC interfaces
--#
--CONFIG_RTC_INTF_SYSFS=y
--CONFIG_RTC_INTF_PROC=y
--CONFIG_RTC_INTF_DEV=y
--# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+-		if (p->enum_id != enum_id)
+-			continue;
 -
--#
--# RTC drivers
--#
--# CONFIG_RTC_DRV_DS1553 is not set
--# CONFIG_RTC_DRV_DS1742 is not set
--# CONFIG_RTC_DRV_M48T86 is not set
--CONFIG_RTC_DRV_SH=y
--# CONFIG_RTC_DRV_TEST is not set
--# CONFIG_RTC_DRV_V3020 is not set
+-		return p->priority;
+-	}
 -
--#
--# DMA Engine support
--#
--# CONFIG_DMA_ENGINE is not set
+-	if (do_grps)
+-		return intc_prio_value(desc, intc_grp_id(desc, enum_id), 0);
 -
--#
--# DMA Clients
--#
+-	/* default to the lowest priority possible if no priority is set
+-	 * - this needs to be at least 2 for 5-bit priorities on 7780
+-	 */
 -
--#
--# DMA Devices
--#
+-	return 2;
+-}
 -
--#
--# Auxiliary Display support
--#
+ static unsigned int __init intc_mask_data(struct intc_desc *desc,
+ 					  struct intc_desc_int *d,
+ 					  intc_enum enum_id, int do_grps)
+@@ -518,8 +493,10 @@ static void __init intc_register_irq(struct intc_desc *desc,
+ 				      handle_level_irq, "level");
+ 	set_irq_chip_data(irq, (void *)data[primary]);
+ 
+-	/* record the desired priority level */
+-	intc_prio_level[irq] = intc_prio_value(desc, enum_id, 1);
++	/* set priority level
++	 * - this needs to be at least 2 for 5-bit priorities on 7780
++	 */
++	intc_prio_level[irq] = 2;
+ 
+ 	/* enable secondary masking method if present */
+ 	if (data[!primary])
+diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
+index ee8f1fe..7a26569 100644
+--- a/arch/sh/kernel/cpu/sh2/entry.S
++++ b/arch/sh/kernel/cpu/sh2/entry.S
+@@ -149,6 +149,14 @@ ENTRY(exception_handler)
+ 	mov	#32,r8
+ 	cmp/hs	r8,r9
+ 	bt	trap_entry	! 64 > vec >= 32  is trap
++
++#if defined(CONFIG_SH_FPU)
++	mov     #13,r8
++	cmp/eq  r8,r9
++	bt      10f             ! fpu
++	nop
++#endif
++
+ 	mov.l	4f,r8
+ 	mov	r9,r4
+ 	shll2	r9
+@@ -158,6 +166,10 @@ ENTRY(exception_handler)
+ 	cmp/eq	r9,r8
+ 	bf	3f
+ 	mov.l	8f,r8		! unhandled exception
++#if defined(CONFIG_SH_FPU)
++10:
++	mov.l	9f, r8		! unhandled exception
++#endif
+ 3:
+ 	mov.l	5f,r10
+ 	jmp	@r8
+@@ -177,7 +189,10 @@ interrupt_entry:
+ 6:	.long	ret_from_irq
+ 7:	.long	do_IRQ
+ 8:	.long	do_exception_error
+-	
++#ifdef CONFIG_SH_FPU
++9:	.long	fpu_error_trap_handler
++#endif
++
+ trap_entry:
+ 	mov	#0x30,r8
+ 	cmp/ge	r8,r9		! vector 0x20-0x2f is systemcall
+@@ -250,7 +265,7 @@ ENTRY(sh_bios_handler)
+ 1:	.long	gdb_vbr_vector
+ #endif /* CONFIG_SH_STANDARD_BIOS */
+ 
+-ENTRY(address_error_handler)
++ENTRY(address_error_trap_handler)
+ 	mov	r15,r4				! regs
+ 	add	#4,r4
+ 	mov	#OFF_PC,r0
+diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+index ec6adc3..b230eb2 100644
+--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
++++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+@@ -65,7 +65,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, groups,
+-			 NULL, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ 
+ static struct plat_sci_port sci_platform_data[] = {
+ 	{
+diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
+index 965fa25..b279cdc 100644
+--- a/arch/sh/kernel/cpu/sh2a/Makefile
++++ b/arch/sh/kernel/cpu/sh2a/Makefile
+@@ -6,4 +6,8 @@ obj-y	:= common.o probe.o opcode_helper.o
+ 
+ common-y	+= $(addprefix ../sh2/, ex.o entry.o)
+ 
++obj-$(CONFIG_SH_FPU)	+= fpu.o
++
+ obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
++obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
++obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
+diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+new file mode 100644
+index 0000000..3feb95a
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+@@ -0,0 +1,89 @@
++/*
++ * arch/sh/kernel/cpu/sh2a/clock-sh7203.c
++ *
++ * SH7203 support for the clock framework
++ *
++ *  Copyright (C) 2007 Kieran Bingham (MPC-Data Ltd)
++ *
++ * Based on clock-sh7263.c
++ *  Copyright (C) 2006  Yoshinori Sato
++ *
++ * Based on clock-sh4.c
++ *  Copyright (C) 2005  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <asm/clock.h>
++#include <asm/freq.h>
++#include <asm/io.h>
++
++const static int pll1rate[]={8,12,16,0};
++const static int pfc_divisors[]={1,2,3,4,6,8,12};
++#define ifc_divisors pfc_divisors
++
++#if (CONFIG_SH_CLK_MD == 0)
++#define PLL2 (1)
++#elif (CONFIG_SH_CLK_MD == 1)
++#define PLL2 (2)
++#elif (CONFIG_SH_CLK_MD == 2)
++#define PLL2 (4)
++#elif (CONFIG_SH_CLK_MD == 3)
++#define PLL2 (4)
++#else
++#error "Illegal Clock Mode!"
++#endif
++
++static void master_clk_init(struct clk *clk)
++{
++	clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ;
++}
++
++static struct clk_ops sh7203_master_clk_ops = {
++	.init		= master_clk_init,
++};
++
++static void module_clk_recalc(struct clk *clk)
++{
++	int idx = (ctrl_inw(FREQCR) & 0x0007);
++	clk->rate = clk->parent->rate / pfc_divisors[idx];
++}
++
++static struct clk_ops sh7203_module_clk_ops = {
++	.recalc		= module_clk_recalc,
++};
++
++static void bus_clk_recalc(struct clk *clk)
++{
++	int idx = (ctrl_inw(FREQCR) & 0x0007);
++	clk->rate = clk->parent->rate / pfc_divisors[idx-2];
++}
++
++static struct clk_ops sh7203_bus_clk_ops = {
++	.recalc		= bus_clk_recalc,
++};
++
++static void cpu_clk_recalc(struct clk *clk)
++{
++	clk->rate = clk->parent->rate;
++}
++
++static struct clk_ops sh7203_cpu_clk_ops = {
++	.recalc		= cpu_clk_recalc,
++};
++
++static struct clk_ops *sh7203_clk_ops[] = {
++	&sh7203_master_clk_ops,
++	&sh7203_module_clk_ops,
++	&sh7203_bus_clk_ops,
++	&sh7203_cpu_clk_ops,
++};
++
++void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
++{
++	if (idx < ARRAY_SIZE(sh7203_clk_ops))
++		*ops = sh7203_clk_ops[idx];
++}
+diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
+new file mode 100644
+index 0000000..ff99562
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh2a/fpu.c
+@@ -0,0 +1,633 @@
++/*
++ * Save/restore floating point context for signal handlers.
++ *
++ * Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * FIXME! These routines can be optimized in big endian case.
++ */
++#include <linux/sched.h>
++#include <linux/signal.h>
++#include <asm/processor.h>
++#include <asm/io.h>
++
++/* The PR (precision) bit in the FP Status Register must be clear when
++ * an frchg instruction is executed, otherwise the instruction is undefined.
++ * Executing frchg with PR set causes a trap on some SH4 implementations.
++ */
++
++#define FPSCR_RCHG 0x00000000
++
++
++/*
++ * Save FPU registers onto task structure.
++ * Assume called with FPU enabled (SR.FD=0).
++ */
++void
++save_fpu(struct task_struct *tsk, struct pt_regs *regs)
++{
++	unsigned long dummy;
++
++	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
++	enable_fpu();
++	asm volatile("sts.l	fpul, @-%0\n\t"
++		     "sts.l	fpscr, @-%0\n\t"
++		     "fmov.s	fr15, @-%0\n\t"
++		     "fmov.s	fr14, @-%0\n\t"
++		     "fmov.s	fr13, @-%0\n\t"
++		     "fmov.s	fr12, @-%0\n\t"
++		     "fmov.s	fr11, @-%0\n\t"
++		     "fmov.s	fr10, @-%0\n\t"
++		     "fmov.s	fr9, @-%0\n\t"
++		     "fmov.s	fr8, @-%0\n\t"
++		     "fmov.s	fr7, @-%0\n\t"
++		     "fmov.s	fr6, @-%0\n\t"
++		     "fmov.s	fr5, @-%0\n\t"
++		     "fmov.s	fr4, @-%0\n\t"
++		     "fmov.s	fr3, @-%0\n\t"
++		     "fmov.s	fr2, @-%0\n\t"
++		     "fmov.s	fr1, @-%0\n\t"
++		     "fmov.s	fr0, @-%0\n\t"
++		     "lds	%3, fpscr\n\t"
++		     : "=r" (dummy)
++		     : "0" ((char *)(&tsk->thread.fpu.hard.status)),
++		       "r" (FPSCR_RCHG),
++		       "r" (FPSCR_INIT)
++		     : "memory");
++
++	disable_fpu();
++	release_fpu(regs);
++}
++
++static void
++restore_fpu(struct task_struct *tsk)
++{
++	unsigned long dummy;
++
++	enable_fpu();
++	asm volatile("fmov.s	@%0+, fr0\n\t"
++		     "fmov.s	@%0+, fr1\n\t"
++		     "fmov.s	@%0+, fr2\n\t"
++		     "fmov.s	@%0+, fr3\n\t"
++		     "fmov.s	@%0+, fr4\n\t"
++		     "fmov.s	@%0+, fr5\n\t"
++		     "fmov.s	@%0+, fr6\n\t"
++		     "fmov.s	@%0+, fr7\n\t"
++		     "fmov.s	@%0+, fr8\n\t"
++		     "fmov.s	@%0+, fr9\n\t"
++		     "fmov.s	@%0+, fr10\n\t"
++		     "fmov.s	@%0+, fr11\n\t"
++		     "fmov.s	@%0+, fr12\n\t"
++		     "fmov.s	@%0+, fr13\n\t"
++		     "fmov.s	@%0+, fr14\n\t"
++		     "fmov.s	@%0+, fr15\n\t"
++		     "lds.l	@%0+, fpscr\n\t"
++		     "lds.l	@%0+, fpul\n\t"
++		     : "=r" (dummy)
++		     : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
++		     : "memory");
++	disable_fpu();
++}
++
++/*
++ * Load the FPU with signalling NANS.  This bit pattern we're using
++ * has the property that no matter wether considered as single or as
++ * double precission represents signaling NANS.
++ */
++
++static void
++fpu_init(void)
++{
++	enable_fpu();
++	asm volatile("lds	%0, fpul\n\t"
++		     "fsts	fpul, fr0\n\t"
++		     "fsts	fpul, fr1\n\t"
++		     "fsts	fpul, fr2\n\t"
++		     "fsts	fpul, fr3\n\t"
++		     "fsts	fpul, fr4\n\t"
++		     "fsts	fpul, fr5\n\t"
++		     "fsts	fpul, fr6\n\t"
++		     "fsts	fpul, fr7\n\t"
++		     "fsts	fpul, fr8\n\t"
++		     "fsts	fpul, fr9\n\t"
++		     "fsts	fpul, fr10\n\t"
++		     "fsts	fpul, fr11\n\t"
++		     "fsts	fpul, fr12\n\t"
++		     "fsts	fpul, fr13\n\t"
++		     "fsts	fpul, fr14\n\t"
++		     "fsts	fpul, fr15\n\t"
++		     "lds	%2, fpscr\n\t"
++		     : /* no output */
++		     : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
++	disable_fpu();
++}
++
++/*
++ *	Emulate arithmetic ops on denormalized number for some FPU insns.
++ */
++
++/* denormalized float * float */
++static int denormal_mulf(int hx, int hy)
++{
++	unsigned int ix, iy;
++	unsigned long long m, n;
++	int exp, w;
++
++	ix = hx & 0x7fffffff;
++	iy = hy & 0x7fffffff;
++	if (iy < 0x00800000 || ix == 0)
++		return ((hx ^ hy) & 0x80000000);
++
++	exp = (iy & 0x7f800000) >> 23;
++	ix &= 0x007fffff;
++	iy = (iy & 0x007fffff) | 0x00800000;
++	m = (unsigned long long)ix * iy;
++	n = m;
++	w = -1;
++	while (n) { n >>= 1; w++; }
++
++	/* FIXME: use guard bits */
++	exp += w - 126 - 46;
++	if (exp > 0)
++		ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23);
++	else if (exp + 22 >= 0)
++		ix = (int) (m >> (w - 22 - exp)) & 0x007fffff;
++	else
++		ix = 0;
++
++	ix |= (hx ^ hy) & 0x80000000;
++	return ix;
++}
++
++/* denormalized double * double */
++static void mult64(unsigned long long x, unsigned long long y,
++		unsigned long long *highp, unsigned long long *lowp)
++{
++	unsigned long long sub0, sub1, sub2, sub3;
++	unsigned long long high, low;
++
++	sub0 = (x >> 32) * (unsigned long) (y >> 32);
++	sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32);
++	sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL);
++	sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL);
++	low = sub3;
++	high = 0LL;
++	sub3 += (sub1 << 32);
++	if (low > sub3)
++		high++;
++	low = sub3;
++	sub3 += (sub2 << 32);
++	if (low > sub3)
++		high++;
++	low = sub3;
++	high += (sub1 >> 32) + (sub2 >> 32);
++	high += sub0;
++	*lowp = low;
++	*highp = high;
++}
++
++static inline long long rshift64(unsigned long long mh,
++		unsigned long long ml, int n)
++{
++	if (n >= 64)
++		return mh >> (n - 64);
++	return (mh << (64 - n)) | (ml >> n);
++}
++
++static long long denormal_muld(long long hx, long long hy)
++{
++	unsigned long long ix, iy;
++	unsigned long long mh, ml, nh, nl;
++	int exp, w;
++
++	ix = hx & 0x7fffffffffffffffLL;
++	iy = hy & 0x7fffffffffffffffLL;
++	if (iy < 0x0010000000000000LL || ix == 0)
++		return ((hx ^ hy) & 0x8000000000000000LL);
++
++	exp = (iy & 0x7ff0000000000000LL) >> 52;
++	ix &= 0x000fffffffffffffLL;
++	iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL;
++	mult64(ix, iy, &mh, &ml);
++	nh = mh;
++	nl = ml;
++	w = -1;
++	if (nh) {
++		while (nh) { nh >>= 1; w++;}
++		w += 64;
++	} else
++		while (nl) { nl >>= 1; w++;}
++
++	/* FIXME: use guard bits */
++	exp += w - 1022 - 52 * 2;
++	if (exp > 0)
++		ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL)
++			| ((long long)exp << 52);
++	else if (exp + 51 >= 0)
++		ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL;
++	else
++		ix = 0;
++
++	ix |= (hx ^ hy) & 0x8000000000000000LL;
++	return ix;
++}
++
++/* ix - iy where iy: denormal and ix, iy >= 0 */
++static int denormal_subf1(unsigned int ix, unsigned int iy)
++{
++	int frac;
++	int exp;
++
++	if (ix < 0x00800000)
++		return ix - iy;
++
++	exp = (ix & 0x7f800000) >> 23;
++	if (exp - 1 > 31)
++		return ix;
++	iy >>= exp - 1;
++	if (iy == 0)
++		return ix;
++
++	frac = (ix & 0x007fffff) | 0x00800000;
++	frac -= iy;
++	while (frac < 0x00800000) {
++		if (--exp == 0)
++			return frac;
++		frac <<= 1;
++	}
++
++	return (exp << 23) | (frac & 0x007fffff);
++}
++
++/* ix + iy where iy: denormal and ix, iy >= 0 */
++static int denormal_addf1(unsigned int ix, unsigned int iy)
++{
++	int frac;
++	int exp;
++
++	if (ix < 0x00800000)
++		return ix + iy;
++
++	exp = (ix & 0x7f800000) >> 23;
++	if (exp - 1 > 31)
++		return ix;
++	iy >>= exp - 1;
++	if (iy == 0)
++	  return ix;
++
++	frac = (ix & 0x007fffff) | 0x00800000;
++	frac += iy;
++	if (frac >= 0x01000000) {
++		frac >>= 1;
++		++exp;
++	}
++
++	return (exp << 23) | (frac & 0x007fffff);
++}
++
++static int denormal_addf(int hx, int hy)
++{
++	unsigned int ix, iy;
++	int sign;
++
++	if ((hx ^ hy) & 0x80000000) {
++		sign = hx & 0x80000000;
++		ix = hx & 0x7fffffff;
++		iy = hy & 0x7fffffff;
++		if (iy < 0x00800000) {
++			ix = denormal_subf1(ix, iy);
++			if (ix < 0) {
++				ix = -ix;
++				sign ^= 0x80000000;
++			}
++		} else {
++			ix = denormal_subf1(iy, ix);
++			sign ^= 0x80000000;
++		}
++	} else {
++		sign = hx & 0x80000000;
++		ix = hx & 0x7fffffff;
++		iy = hy & 0x7fffffff;
++		if (iy < 0x00800000)
++			ix = denormal_addf1(ix, iy);
++		else
++			ix = denormal_addf1(iy, ix);
++	}
++
++	return sign | ix;
++}
++
++/* ix - iy where iy: denormal and ix, iy >= 0 */
++static long long denormal_subd1(unsigned long long ix, unsigned long long iy)
++{
++	long long frac;
++	int exp;
++
++	if (ix < 0x0010000000000000LL)
++		return ix - iy;
++
++	exp = (ix & 0x7ff0000000000000LL) >> 52;
++	if (exp - 1 > 63)
++		return ix;
++	iy >>= exp - 1;
++	if (iy == 0)
++		return ix;
++
++	frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
++	frac -= iy;
++	while (frac < 0x0010000000000000LL) {
++		if (--exp == 0)
++			return frac;
++		frac <<= 1;
++	}
++
++	return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL);
++}
++
++/* ix + iy where iy: denormal and ix, iy >= 0 */
++static long long denormal_addd1(unsigned long long ix, unsigned long long iy)
++{
++	long long frac;
++	long long exp;
++
++	if (ix < 0x0010000000000000LL)
++		return ix + iy;
++
++	exp = (ix & 0x7ff0000000000000LL) >> 52;
++	if (exp - 1 > 63)
++		return ix;
++	iy >>= exp - 1;
++	if (iy == 0)
++	  return ix;
++
++	frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
++	frac += iy;
++	if (frac >= 0x0020000000000000LL) {
++		frac >>= 1;
++		++exp;
++	}
++
++	return (exp << 52) | (frac & 0x000fffffffffffffLL);
++}
++
++static long long denormal_addd(long long hx, long long hy)
++{
++	unsigned long long ix, iy;
++	long long sign;
++
++	if ((hx ^ hy) & 0x8000000000000000LL) {
++		sign = hx & 0x8000000000000000LL;
++		ix = hx & 0x7fffffffffffffffLL;
++		iy = hy & 0x7fffffffffffffffLL;
++		if (iy < 0x0010000000000000LL) {
++			ix = denormal_subd1(ix, iy);
++			if (ix < 0) {
++				ix = -ix;
++				sign ^= 0x8000000000000000LL;
++			}
++		} else {
++			ix = denormal_subd1(iy, ix);
++			sign ^= 0x8000000000000000LL;
++		}
++	} else {
++		sign = hx & 0x8000000000000000LL;
++		ix = hx & 0x7fffffffffffffffLL;
++		iy = hy & 0x7fffffffffffffffLL;
++		if (iy < 0x0010000000000000LL)
++			ix = denormal_addd1(ix, iy);
++		else
++			ix = denormal_addd1(iy, ix);
++	}
++
++	return sign | ix;
++}
++
++/**
++ *	denormal_to_double - Given denormalized float number,
++ *	                     store double float
++ *
++ *	@fpu: Pointer to sh_fpu_hard structure
++ *	@n: Index to FP register
++ */
++static void
++denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
++{
++	unsigned long du, dl;
++	unsigned long x = fpu->fpul;
++	int exp = 1023 - 126;
++
++	if (x != 0 && (x & 0x7f800000) == 0) {
++		du = (x & 0x80000000);
++		while ((x & 0x00800000) == 0) {
++			x <<= 1;
++			exp--;
++		}
++		x &= 0x007fffff;
++		du |= (exp << 20) | (x >> 3);
++		dl = x << 29;
++
++		fpu->fp_regs[n] = du;
++		fpu->fp_regs[n+1] = dl;
++	}
++}
++
++/**
++ *	ieee_fpe_handler - Handle denormalized number exception
++ *
++ *	@regs: Pointer to register structure
++ *
++ *	Returns 1 when it's handled (should not cause exception).
++ */
++static int
++ieee_fpe_handler (struct pt_regs *regs)
++{
++	unsigned short insn = *(unsigned short *) regs->pc;
++	unsigned short finsn;
++	unsigned long nextpc;
++	int nib[4] = {
++		(insn >> 12) & 0xf,
++		(insn >> 8) & 0xf,
++		(insn >> 4) & 0xf,
++		insn & 0xf};
++
++	if (nib[0] == 0xb ||
++	    (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
++		regs->pr = regs->pc + 4;
++	if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
++		nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
++		finsn = *(unsigned short *) (regs->pc + 2);
++	} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
++		if (regs->sr & 1)
++			nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
++		else
++			nextpc = regs->pc + 4;
++		finsn = *(unsigned short *) (regs->pc + 2);
++	} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
++		if (regs->sr & 1)
++			nextpc = regs->pc + 4;
++		else
++			nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
++		finsn = *(unsigned short *) (regs->pc + 2);
++	} else if (nib[0] == 0x4 && nib[3] == 0xb &&
++		 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
++		nextpc = regs->regs[nib[1]];
++		finsn = *(unsigned short *) (regs->pc + 2);
++	} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
++		 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
++		nextpc = regs->pc + 4 + regs->regs[nib[1]];
++		finsn = *(unsigned short *) (regs->pc + 2);
++	} else if (insn == 0x000b) { /* rts */
++		nextpc = regs->pr;
++		finsn = *(unsigned short *) (regs->pc + 2);
++	} else {
++		nextpc = regs->pc + 2;
++		finsn = insn;
++	}
++
++#define FPSCR_FPU_ERROR (1 << 17)
++
++	if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
++		struct task_struct *tsk = current;
++
++		if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) {
++			/* FPU error */
++			denormal_to_double (&tsk->thread.fpu.hard,
++					    (finsn >> 8) & 0xf);
++		} else
++			return 0;
++
++		regs->pc = nextpc;
++		return 1;
++	} else if ((finsn & 0xf00f) == 0xf002) { /* fmul */
++		struct task_struct *tsk = current;
++		int fpscr;
++		int n, m, prec;
++		unsigned int hx, hy;
++
++		n = (finsn >> 8) & 0xf;
++		m = (finsn >> 4) & 0xf;
++		hx = tsk->thread.fpu.hard.fp_regs[n];
++		hy = tsk->thread.fpu.hard.fp_regs[m];
++		fpscr = tsk->thread.fpu.hard.fpscr;
++		prec = fpscr & (1 << 19);
++
++		if ((fpscr & FPSCR_FPU_ERROR)
++		     && (prec && ((hx & 0x7fffffff) < 0x00100000
++				   || (hy & 0x7fffffff) < 0x00100000))) {
++			long long llx, lly;
++
++			/* FPU error because of denormal */
++			llx = ((long long) hx << 32)
++			       | tsk->thread.fpu.hard.fp_regs[n+1];
++			lly = ((long long) hy << 32)
++			       | tsk->thread.fpu.hard.fp_regs[m+1];
++			if ((hx & 0x7fffffff) >= 0x00100000)
++				llx = denormal_muld(lly, llx);
++			else
++				llx = denormal_muld(llx, lly);
++			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
++			tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
++		} else if ((fpscr & FPSCR_FPU_ERROR)
++		     && (!prec && ((hx & 0x7fffffff) < 0x00800000
++				   || (hy & 0x7fffffff) < 0x00800000))) {
++			/* FPU error because of denormal */
++			if ((hx & 0x7fffffff) >= 0x00800000)
++				hx = denormal_mulf(hy, hx);
++			else
++				hx = denormal_mulf(hx, hy);
++			tsk->thread.fpu.hard.fp_regs[n] = hx;
++		} else
++			return 0;
++
++		regs->pc = nextpc;
++		return 1;
++	} else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */
++		struct task_struct *tsk = current;
++		int fpscr;
++		int n, m, prec;
++		unsigned int hx, hy;
++
++		n = (finsn >> 8) & 0xf;
++		m = (finsn >> 4) & 0xf;
++		hx = tsk->thread.fpu.hard.fp_regs[n];
++		hy = tsk->thread.fpu.hard.fp_regs[m];
++		fpscr = tsk->thread.fpu.hard.fpscr;
++		prec = fpscr & (1 << 19);
++
++		if ((fpscr & FPSCR_FPU_ERROR)
++		     && (prec && ((hx & 0x7fffffff) < 0x00100000
++				   || (hy & 0x7fffffff) < 0x00100000))) {
++			long long llx, lly;
++
++			/* FPU error because of denormal */
++			llx = ((long long) hx << 32)
++			       | tsk->thread.fpu.hard.fp_regs[n+1];
++			lly = ((long long) hy << 32)
++			       | tsk->thread.fpu.hard.fp_regs[m+1];
++			if ((finsn & 0xf00f) == 0xf000)
++				llx = denormal_addd(llx, lly);
++			else
++				llx = denormal_addd(llx, lly ^ (1LL << 63));
++			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
++			tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
++		} else if ((fpscr & FPSCR_FPU_ERROR)
++		     && (!prec && ((hx & 0x7fffffff) < 0x00800000
++				   || (hy & 0x7fffffff) < 0x00800000))) {
++			/* FPU error because of denormal */
++			if ((finsn & 0xf00f) == 0xf000)
++				hx = denormal_addf(hx, hy);
++			else
++				hx = denormal_addf(hx, hy ^ 0x80000000);
++			tsk->thread.fpu.hard.fp_regs[n] = hx;
++		} else
++			return 0;
++
++		regs->pc = nextpc;
++		return 1;
++	}
++
++	return 0;
++}
++
++BUILD_TRAP_HANDLER(fpu_error)
++{
++	struct task_struct *tsk = current;
++	TRAP_HANDLER_DECL;
++
++	save_fpu(tsk, regs);
++	if (ieee_fpe_handler(regs)) {
++		tsk->thread.fpu.hard.fpscr &=
++			~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
++		grab_fpu(regs);
++		restore_fpu(tsk);
++		set_tsk_thread_flag(tsk, TIF_USEDFPU);
++		return;
++	}
++
++	force_sig(SIGFPE, tsk);
++}
++
++BUILD_TRAP_HANDLER(fpu_state_restore)
++{
++	struct task_struct *tsk = current;
++	TRAP_HANDLER_DECL;
++
++	grab_fpu(regs);
++	if (!user_mode(regs)) {
++		printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
++		return;
++	}
++
++	if (used_math()) {
++		/* Using the FPU again.  */
++		restore_fpu(tsk);
++	} else	{
++		/* First time FPU user.  */
++		fpu_init();
++		set_used_math();
++	}
++	set_tsk_thread_flag(tsk, TIF_USEDFPU);
++}
+diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
+index 6d02465..6910e26 100644
+--- a/arch/sh/kernel/cpu/sh2a/probe.c
++++ b/arch/sh/kernel/cpu/sh2a/probe.c
+@@ -3,25 +3,36 @@
+  *
+  * CPU Subtype Probing for SH-2A.
+  *
+- * Copyright (C) 2004, 2005 Paul Mundt
++ * Copyright (C) 2004 - 2007  Paul Mundt
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+  * License.  See the file "COPYING" in the main directory of this archive
+  * for more details.
+  */
 -
--#
--# Virtualization
--#
+ #include <linux/init.h>
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+ 
+ int __init detect_cpu_and_cache_system(void)
+ {
+-	/* Just SH7206 for now .. */
+-	boot_cpu_data.type			= CPU_SH7206;
++	/* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
+ 	boot_cpu_data.flags			|= CPU_HAS_OP32;
+ 
++#if defined(CONFIG_CPU_SUBTYPE_SH7203)
++	boot_cpu_data.type			= CPU_SH7203;
++	/* SH7203 has an FPU.. */
++	boot_cpu_data.flags			|= CPU_HAS_FPU;
++#elif defined(CONFIG_CPU_SUBTYPE_SH7263)
++	boot_cpu_data.type			= CPU_SH7263;
++	boot_cpu_data.flags			|= CPU_HAS_FPU;
++#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
++	boot_cpu_data.type			= CPU_SH7206;
++	/* While SH7206 has a DSP.. */
++	boot_cpu_data.flags			|= CPU_HAS_DSP;
++#endif
++
+ 	boot_cpu_data.dcache.ways		= 4;
+-	boot_cpu_data.dcache.way_incr	= (1 << 11);
++	boot_cpu_data.dcache.way_incr		= (1 << 11);
+ 	boot_cpu_data.dcache.sets		= 128;
+ 	boot_cpu_data.dcache.entry_shift	= 4;
+ 	boot_cpu_data.dcache.linesz		= L1_CACHE_BYTES;
+@@ -37,4 +48,3 @@ int __init detect_cpu_and_cache_system(void)
+ 
+ 	return 0;
+ }
 -
--#
--# File systems
--#
--CONFIG_EXT2_FS=y
--# CONFIG_EXT2_FS_XATTR is not set
--# CONFIG_EXT2_FS_XIP is not set
--CONFIG_EXT3_FS=y
--CONFIG_EXT3_FS_XATTR=y
--# CONFIG_EXT3_FS_POSIX_ACL is not set
--# CONFIG_EXT3_FS_SECURITY is not set
--# CONFIG_EXT4DEV_FS is not set
--CONFIG_JBD=y
--# CONFIG_JBD_DEBUG is not set
--CONFIG_FS_MBCACHE=y
--# CONFIG_REISERFS_FS is not set
--# CONFIG_JFS_FS is not set
--CONFIG_FS_POSIX_ACL=y
--# CONFIG_XFS_FS is not set
--# CONFIG_GFS2_FS is not set
--# CONFIG_OCFS2_FS is not set
--CONFIG_MINIX_FS=y
--# CONFIG_ROMFS_FS is not set
--CONFIG_INOTIFY=y
--CONFIG_INOTIFY_USER=y
--# CONFIG_QUOTA is not set
--CONFIG_DNOTIFY=y
--# CONFIG_AUTOFS_FS is not set
--# CONFIG_AUTOFS4_FS is not set
--CONFIG_FUSE_FS=m
+diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+new file mode 100644
+index 0000000..db6ef5c
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+@@ -0,0 +1,319 @@
++/*
++ * SH7203 and SH7263 Setup
++ *
++ *  Copyright (C) 2007  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/platform_device.h>
++#include <linux/init.h>
++#include <linux/serial.h>
++#include <asm/sci.h>
++
++enum {
++	UNUSED = 0,
++
++	/* interrupt sources */
++	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
++	PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
++	DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI,
++	DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI,
++	DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI,
++	DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI,
++	USB, LCDC, CMT0, CMT1, BSC, WDT,
++	MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D,
++	MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F,
++	MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U,
++	MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U,
++	MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V,
++	MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V,
++	ADC_ADI,
++	IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI,
++	IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI,
++	IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI,
++	IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI, IIC33_TEI,
++	SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
++	SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
++	SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
++	SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
++	SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI,
++	SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI,
++	SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
++
++	/* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
++	ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF,
++	ROMDEC_IREADY,
++
++	FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
++
++	SDHI3, SDHI0, SDHI1,
++
++	RTC_ARM, RTC_PRD, RTC_CUP,
++	RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, RCAN0_SLE,
++	RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, RCAN1_SLE,
++
++	SRC_OVF, SRC_ODFI, SRC_IDEI, IEBI,
++
++	/* interrupt groups */
++	PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
++	MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
++	MTU3_ABCD, MTU4_ABCD,
++	IIC30, IIC31, IIC32, IIC33, SCIF0, SCIF1, SCIF2, SCIF3,
++	SSU0, SSU1, ROMDEC, SDHI, FLCTL, RTC, RCAN0, RCAN1, SRC
++};
++
++static struct intc_vect vectors[] __initdata = {
++	INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
++	INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
++	INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
++	INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
++	INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
++	INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
++	INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
++	INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
++	INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109),
++	INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113),
++	INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117),
++	INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121),
++	INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125),
++	INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129),
++	INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133),
++	INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137),
++	INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
++	INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
++	INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
++	INTC_IRQ(MTU2_TGI0A, 146), INTC_IRQ(MTU2_TGI0B, 147),
++	INTC_IRQ(MTU2_TGI0C, 148), INTC_IRQ(MTU2_TGI0D, 149),
++	INTC_IRQ(MTU2_TCI0V, 150),
++	INTC_IRQ(MTU2_TGI0E, 151), INTC_IRQ(MTU2_TGI0F, 152),
++	INTC_IRQ(MTU2_TGI1A, 153), INTC_IRQ(MTU2_TGI1B, 154),
++	INTC_IRQ(MTU2_TCI1V, 155), INTC_IRQ(MTU2_TCI1U, 156),
++	INTC_IRQ(MTU2_TGI2A, 157), INTC_IRQ(MTU2_TGI2B, 158),
++	INTC_IRQ(MTU2_TCI2V, 159), INTC_IRQ(MTU2_TCI2U, 160),
++	INTC_IRQ(MTU2_TGI3A, 161), INTC_IRQ(MTU2_TGI3B, 162),
++	INTC_IRQ(MTU2_TGI3C, 163), INTC_IRQ(MTU2_TGI3D, 164),
++	INTC_IRQ(MTU2_TCI3V, 165),
++	INTC_IRQ(MTU2_TGI4A, 166), INTC_IRQ(MTU2_TGI4B, 167),
++	INTC_IRQ(MTU2_TGI4C, 168), INTC_IRQ(MTU2_TGI4D, 169),
++	INTC_IRQ(MTU2_TCI4V, 170),
++	INTC_IRQ(ADC_ADI, 171),
++	INTC_IRQ(IIC30_STPI, 172), INTC_IRQ(IIC30_NAKI, 173),
++	INTC_IRQ(IIC30_RXI, 174), INTC_IRQ(IIC30_TXI, 175),
++	INTC_IRQ(IIC30_TEI, 176),
++	INTC_IRQ(IIC31_STPI, 177), INTC_IRQ(IIC31_NAKI, 178),
++	INTC_IRQ(IIC31_RXI, 179), INTC_IRQ(IIC31_TXI, 180),
++	INTC_IRQ(IIC31_TEI, 181),
++	INTC_IRQ(IIC32_STPI, 182), INTC_IRQ(IIC32_NAKI, 183),
++	INTC_IRQ(IIC32_RXI, 184), INTC_IRQ(IIC32_TXI, 185),
++	INTC_IRQ(IIC32_TEI, 186),
++	INTC_IRQ(IIC33_STPI, 187), INTC_IRQ(IIC33_NAKI, 188),
++	INTC_IRQ(IIC33_RXI, 189), INTC_IRQ(IIC33_TXI, 190),
++	INTC_IRQ(IIC33_TEI, 191),
++	INTC_IRQ(SCIF0_BRI, 192), INTC_IRQ(SCIF0_ERI, 193),
++	INTC_IRQ(SCIF0_RXI, 194), INTC_IRQ(SCIF0_TXI, 195),
++	INTC_IRQ(SCIF1_BRI, 196), INTC_IRQ(SCIF1_ERI, 197),
++	INTC_IRQ(SCIF1_RXI, 198), INTC_IRQ(SCIF1_TXI, 199),
++	INTC_IRQ(SCIF2_BRI, 200), INTC_IRQ(SCIF2_ERI, 201),
++	INTC_IRQ(SCIF2_RXI, 202), INTC_IRQ(SCIF2_TXI, 203),
++	INTC_IRQ(SCIF3_BRI, 204), INTC_IRQ(SCIF3_ERI, 205),
++	INTC_IRQ(SCIF3_RXI, 206), INTC_IRQ(SCIF3_TXI, 207),
++	INTC_IRQ(SSU0_SSERI, 208), INTC_IRQ(SSU0_SSRXI, 209),
++	INTC_IRQ(SSU0_SSTXI, 210),
++	INTC_IRQ(SSU1_SSERI, 211), INTC_IRQ(SSU1_SSRXI, 212),
++	INTC_IRQ(SSU1_SSTXI, 213),
++	INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
++	INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
++	INTC_IRQ(FLCTL_FLSTEI, 224), INTC_IRQ(FLCTL_FLTENDI, 225),
++	INTC_IRQ(FLCTL_FLTREQ0I, 226), INTC_IRQ(FLCTL_FLTREQ1I, 227),
++	INTC_IRQ(RTC_ARM, 231), INTC_IRQ(RTC_PRD, 232),
++	INTC_IRQ(RTC_CUP, 233),
++	INTC_IRQ(RCAN0_ERS, 234), INTC_IRQ(RCAN0_OVR, 235),
++	INTC_IRQ(RCAN0_RM0, 236), INTC_IRQ(RCAN0_RM1, 237),
++	INTC_IRQ(RCAN0_SLE, 238),
++	INTC_IRQ(RCAN1_ERS, 239), INTC_IRQ(RCAN1_OVR, 240),
++	INTC_IRQ(RCAN1_RM0, 241), INTC_IRQ(RCAN1_RM1, 242),
++	INTC_IRQ(RCAN1_SLE, 243),
++
++	/* SH7263-specific trash */
++#ifdef CONFIG_CPU_SUBTYPE_SH7263
++	INTC_IRQ(ROMDEC_ISY, 218), INTC_IRQ(ROMDEC_IERR, 219),
++	INTC_IRQ(ROMDEC_IARG, 220), INTC_IRQ(ROMDEC_ISEC, 221),
++	INTC_IRQ(ROMDEC_IBUF, 222), INTC_IRQ(ROMDEC_IREADY, 223),
++
++	INTC_IRQ(SDHI3, 228), INTC_IRQ(SDHI0, 229), INTC_IRQ(SDHI1, 230),
++
++	INTC_IRQ(SRC_OVF, 244), INTC_IRQ(SRC_ODFI, 245),
++	INTC_IRQ(SRC_IDEI, 246),
++
++	INTC_IRQ(IEBI, 247),
++#endif
++};
++
++static struct intc_group groups[] __initdata = {
++	INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
++		   PINT4, PINT5, PINT6, PINT7),
++	INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI),
++	INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI),
++	INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI),
++	INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI),
++	INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI),
++	INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI),
++	INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI),
++	INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI),
++	INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D),
++	INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F),
++	INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B),
++	INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U),
++	INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B),
++	INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U),
++	INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D),
++	INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D),
++	INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI,
++		   IIC30_TEI),
++	INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI,
++		   IIC31_TEI),
++	INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI,
++		   IIC32_TEI),
++	INTC_GROUP(IIC33, IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI,
++		   IIC33_TEI),
++	INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
++	INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
++	INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
++	INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
++	INTC_GROUP(SSU0, SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI),
++	INTC_GROUP(SSU1, SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI),
++	INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I,
++		   FLCTL_FLTREQ1I),
++	INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP),
++	INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1,
++		   RCAN0_SLE),
++	INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1,
++		   RCAN1_SLE),
++
++#ifdef CONFIG_CPU_SUBTYPE_SH7263
++	INTC_GROUP(ROMDEC, ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG,
++		   ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY),
++	INTC_GROUP(SDHI, SDHI3, SDHI0, SDHI1),
++	INTC_GROUP(SRC, SRC_OVF, SRC_ODFI, SRC_IDEI),
++#endif
++};
++
++static struct intc_prio_reg prio_registers[] __initdata = {
++	{ 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
++	{ 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
++	{ 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
++	{ 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
++	{ 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
++	{ 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } },
++	{ 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
++	{ 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB,
++					      MTU2_VU } },
++	{ 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD,
++					      MTU2_TCI4V } },
++	{ 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } },
++	{ 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } },
++	{ 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } },
++#ifdef CONFIG_CPU_SUBTYPE_SH7203
++	{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
++					      SSI3_SSII, 0 } },
++	{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } },
++	{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } },
++#else
++	{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
++					      SSI3_SSII, ROMDEC } },
++	{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } },
++	{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } },
++#endif
++};
++
++static struct intc_mask_reg mask_registers[] __initdata = {
++	{ 0xfffe0808, 0, 16, /* PINTER */
++	  { 0, 0, 0, 0, 0, 0, 0, 0,
++	    PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
++};
++
++static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
++			 mask_registers, prio_registers, NULL);
++
++static struct plat_sci_port sci_platform_data[] = {
++	{
++		.mapbase	= 0xfffe8000,
++		.flags		= UPF_BOOT_AUTOCONF,
++		.type		= PORT_SCIF,
++		.irqs		=  { 193, 194, 195, 192 },
++	}, {
++		.mapbase	= 0xfffe8800,
++		.flags		= UPF_BOOT_AUTOCONF,
++		.type		= PORT_SCIF,
++		.irqs		=  { 197, 198, 199, 196 },
++	}, {
++		.mapbase	= 0xfffe9000,
++		.flags		= UPF_BOOT_AUTOCONF,
++		.type		= PORT_SCIF,
++		.irqs		=  { 201, 202, 203, 200 },
++	}, {
++		.mapbase	= 0xfffe9800,
++		.flags		= UPF_BOOT_AUTOCONF,
++		.type		= PORT_SCIF,
++		.irqs		=  { 205, 206, 207, 204 },
++	}, {
++		.flags = 0,
++	}
++};
++
++static struct platform_device sci_device = {
++	.name		= "sh-sci",
++	.id		= -1,
++	.dev		= {
++		.platform_data	= sci_platform_data,
++	},
++};
++
++static struct resource rtc_resources[] = {
++	[0] = {
++		.start	= 0xffff2000,
++		.end	= 0xffff2000 + 0x58 - 1,
++		.flags	= IORESOURCE_IO,
++	},
++	[1] = {
++		/* Period IRQ */
++		.start	= 232,
++		.flags	= IORESOURCE_IRQ,
++	},
++	[2] = {
++		/* Carry IRQ */
++		.start	= 233,
++		.flags	= IORESOURCE_IRQ,
++	},
++	[3] = {
++		/* Alarm IRQ */
++		.start	= 231,
++		.flags	= IORESOURCE_IRQ,
++	},
++};
++
++static struct platform_device rtc_device = {
++	.name		= "sh-rtc",
++	.id		= -1,
++	.num_resources	= ARRAY_SIZE(rtc_resources),
++	.resource	= rtc_resources,
++};
++
++static struct platform_device *sh7203_devices[] __initdata = {
++	&sci_device,
++	&rtc_device,
++};
++
++static int __init sh7203_devices_setup(void)
++{
++	return platform_add_devices(sh7203_devices,
++				    ARRAY_SIZE(sh7203_devices));
++}
++__initcall(sh7203_devices_setup);
++
++void __init plat_irq_setup(void)
++{
++	register_intc_controller(&intc_desc);
++}
+diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+index bd745aa..a564425 100644
+--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
++++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+@@ -167,7 +167,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
+-			 NULL, mask_registers, prio_registers, NULL);
++			 mask_registers, prio_registers, NULL);
+ 
+ static struct plat_sci_port sci_platform_data[] = {
+ 	{
+diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
+index 646eb69..3ae4d91 100644
+--- a/arch/sh/kernel/cpu/sh3/Makefile
++++ b/arch/sh/kernel/cpu/sh3/Makefile
+@@ -13,6 +13,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7709)	+= setup-sh770x.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7710)	+= setup-sh7710.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7712)	+= setup-sh7710.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7720)	+= setup-sh7720.o
++obj-$(CONFIG_CPU_SUBTYPE_SH7721)	+= setup-sh7720.o
+ 
+ # Primary on-chip clocks (common)
+ clock-$(CONFIG_CPU_SH3)			:= clock-sh3.o
+@@ -21,5 +22,6 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7706)	:= clock-sh7706.o
+ clock-$(CONFIG_CPU_SUBTYPE_SH7709)	:= clock-sh7709.o
+ clock-$(CONFIG_CPU_SUBTYPE_SH7710)	:= clock-sh7710.o
+ clock-$(CONFIG_CPU_SUBTYPE_SH7720)	:= clock-sh7710.o
++clock-$(CONFIG_CPU_SUBTYPE_SH7712)	:= clock-sh7712.o
+ 
+ obj-y	+= $(clock-y)
+diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
+new file mode 100644
+index 0000000..54f54df
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
+@@ -0,0 +1,71 @@
++/*
++ * arch/sh/kernel/cpu/sh3/clock-sh7712.c
++ *
++ * SH7712 support for the clock framework
++ *
++ *  Copyright (C) 2007  Andrew Murray <amurray at mpc-data.co.uk>
++ *
++ * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
++ *  Copyright (C) 2005  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <asm/clock.h>
++#include <asm/freq.h>
++#include <asm/io.h>
++
++static int multipliers[] = { 1, 2, 3 };
++static int divisors[]    = { 1, 2, 3, 4, 6 };
++
++static void master_clk_init(struct clk *clk)
++{
++	int frqcr = ctrl_inw(FRQCR);
++	int idx = (frqcr & 0x0300) >> 8;
++
++	clk->rate *= multipliers[idx];
++}
++
++static struct clk_ops sh7712_master_clk_ops = {
++	.init		= master_clk_init,
++};
++
++static void module_clk_recalc(struct clk *clk)
++{
++	int frqcr = ctrl_inw(FRQCR);
++	int idx = frqcr & 0x0007;
++
++	clk->rate = clk->parent->rate / divisors[idx];
++}
++
++static struct clk_ops sh7712_module_clk_ops = {
++	.recalc		= module_clk_recalc,
++};
++
++static void cpu_clk_recalc(struct clk *clk)
++{
++	int frqcr = ctrl_inw(FRQCR);
++	int idx = (frqcr & 0x0030) >> 4;
++
++	clk->rate = clk->parent->rate / divisors[idx];
++}
++
++static struct clk_ops sh7712_cpu_clk_ops = {
++	.recalc		= cpu_clk_recalc,
++};
++
++static struct clk_ops *sh7712_clk_ops[] = {
++	&sh7712_master_clk_ops,
++	&sh7712_module_clk_ops,
++	&sh7712_cpu_clk_ops,
++};
++
++void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
++{
++	if (idx < ARRAY_SIZE(sh7712_clk_ops))
++		*ops = sh7712_clk_ops[idx];
++}
++
+diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
+index 0d12a12..4004073 100644
+--- a/arch/sh/kernel/cpu/sh3/entry.S
++++ b/arch/sh/kernel/cpu/sh3/entry.S
+@@ -13,8 +13,9 @@
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+-#include <asm/cpu/mmu_context.h>
+ #include <asm/unistd.h>
++#include <asm/cpu/mmu_context.h>
++#include <asm/page.h>
+ 
+ ! NOTE:
+ ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+@@ -409,6 +410,27 @@ ENTRY(handle_exception)
+ 	! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
+ 	! save all registers onto stack.
+ 	!
++
++#ifdef CONFIG_GUSA
++	! Check for roll back gRB (User and Kernel)
++	mov	r15, k0
++	shll	k0
++	bf/s	1f
++	 shll	k0
++	bf/s	1f
++	 stc	spc, k1
++	stc	r0_bank, k0
++	cmp/hs	k0, k1		! test k1 (saved PC) >= k0 (saved r0)
++	bt/s	2f
++	 stc	r1_bank, k1
++
++	add	#-2, k0
++	add	r15, k0
++	ldc	k0, spc		! PC = saved r0 + r15 - 2
++2:	mov	k1, r15		! SP = r1
++1:
++#endif
++
+ 	stc	ssr, k0		! Is it from kernel space?
+ 	shll	k0		! Check MD bit (bit30) by shifting it into...
+ 	shll	k0		!       ...the T bit
+diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
+index b6abf38..11b6d9c 100644
+--- a/arch/sh/kernel/cpu/sh3/ex.S
++++ b/arch/sh/kernel/cpu/sh3/ex.S
+@@ -36,7 +36,7 @@ ENTRY(exception_handling_table)
+ 	.long	exception_error	! address error store	/* 100 */
+ #endif
+ #if defined(CONFIG_SH_FPU)
+-	.long	do_fpu_error		/* 120 */
++	.long	fpu_error_trap_handler	/* 120 */
+ #else
+ 	.long	exception_error		/* 120 */
+ #endif
+diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
+index bf579e0..fcc80bb 100644
+--- a/arch/sh/kernel/cpu/sh3/probe.c
++++ b/arch/sh/kernel/cpu/sh3/probe.c
+@@ -16,11 +16,11 @@
+ #include <asm/cache.h>
+ #include <asm/io.h>
+ 
+-int __init detect_cpu_and_cache_system(void)
++int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
+ {
+ 	unsigned long addr0, addr1, data0, data1, data2, data3;
+ 
+-	jump_to_P2();
++	jump_to_uncached();
+ 	/*
+ 	 * Check if the entry shadows or not.
+ 	 * When shadowed, it's 128-entry system.
+@@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void)
+ 	ctrl_outl(data0&~SH_CACHE_VALID, addr0);
+ 	ctrl_outl(data2&~SH_CACHE_VALID, addr1);
+ 
+-	back_to_P1();
++	back_to_cached();
+ 
+ 	boot_cpu_data.dcache.ways		= 4;
+ 	boot_cpu_data.dcache.entry_shift	= 4;
+@@ -84,6 +84,9 @@ int __init detect_cpu_and_cache_system(void)
+ #if defined(CONFIG_CPU_SUBTYPE_SH7720)
+ 		boot_cpu_data.type = CPU_SH7720;
+ #endif
++#if defined(CONFIG_CPU_SUBTYPE_SH7721)
++		boot_cpu_data.type = CPU_SH7721;
++#endif
+ #if defined(CONFIG_CPU_SUBTYPE_SH7705)
+ 		boot_cpu_data.type = CPU_SH7705;
+ 
+diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+index f6c65f2..dd0a20a 100644
+--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
++++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+@@ -66,12 +66,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(DMAC, 7),
+-	INTC_PRIO(SCIF2, 3),
+-	INTC_PRIO(SCIF0, 3),
+-};
 -
--#
--# CD-ROM/DVD Filesystems
--#
--# CONFIG_ISO9660_FS is not set
--# CONFIG_UDF_FS is not set
+ static struct intc_prio_reg prio_registers[] __initdata = {
+ 	{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ 	{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
+@@ -85,7 +79,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ 
+ static struct intc_vect vectors_irq[] __initdata = {
+ 	INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+@@ -93,7 +87,7 @@ static struct intc_vect vectors_irq[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irq, "sh7705-irq", vectors_irq, NULL,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ 
+ static struct plat_sci_port sci_platform_data[] = {
+ 	{
+diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+index 60b04b1..969804b 100644
+--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
++++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+@@ -81,13 +81,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(DMAC, 7),
+-	INTC_PRIO(SCI, 3),
+-	INTC_PRIO(SCIF2, 3),
+-	INTC_PRIO(SCIF0, 3),
+-};
 -
--#
--# DOS/FAT/NT Filesystems
--#
--CONFIG_FAT_FS=y
--CONFIG_MSDOS_FS=y
--CONFIG_VFAT_FS=y
--CONFIG_FAT_DEFAULT_CODEPAGE=437
--CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
--CONFIG_NTFS_FS=y
--# CONFIG_NTFS_DEBUG is not set
--CONFIG_NTFS_RW=y
+ static struct intc_prio_reg prio_registers[] __initdata = {
+ 	{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ 	{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } },
+@@ -109,7 +102,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ 
+ #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+     defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+@@ -120,7 +113,7 @@ static struct intc_vect vectors_irq[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irq, "sh770x-irq", vectors_irq, NULL,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ #endif
+ 
+ static struct resource rtc_resources[] = {
+diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+index 84e5629..0cc0e2b 100644
+--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
++++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+@@ -73,18 +73,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(SIOF1, SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(DMAC1, 7),
+-	INTC_PRIO(DMAC2, 7),
+-	INTC_PRIO(SCIF0, 3),
+-	INTC_PRIO(SCIF1, 3),
+-	INTC_PRIO(SIOF0, 3),
+-	INTC_PRIO(SIOF1, 3),
+-	INTC_PRIO(EDMAC0, 5),
+-	INTC_PRIO(EDMAC1, 5),
+-	INTC_PRIO(EDMAC2, 5),
+-};
 -
--#
--# Pseudo filesystems
--#
--CONFIG_PROC_FS=y
--CONFIG_PROC_KCORE=y
--CONFIG_PROC_SYSCTL=y
--CONFIG_SYSFS=y
--CONFIG_TMPFS=y
--# CONFIG_TMPFS_POSIX_ACL is not set
--CONFIG_HUGETLBFS=y
--CONFIG_HUGETLB_PAGE=y
--CONFIG_RAMFS=y
--CONFIG_CONFIGFS_FS=m
+ static struct intc_prio_reg prio_registers[] __initdata = {
+ 	{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ 	{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
+@@ -101,7 +89,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ 
+ static struct intc_vect vectors_irq[] __initdata = {
+ 	INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+@@ -109,7 +97,7 @@ static struct intc_vect vectors_irq[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irq, "sh7710-irq", vectors_irq, NULL,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ 
+ static struct resource rtc_resources[] = {
+ 	[0] =	{
+diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+index a0929b8..3855ea4 100644
+--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
++++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+@@ -85,9 +85,62 @@ static struct platform_device sci_device = {
+ 	},
+ };
+ 
++static struct resource usb_ohci_resources[] = {
++	[0] = {
++		.start	= 0xA4428000,
++		.end	= 0xA44280FF,
++		.flags	= IORESOURCE_MEM,
++	},
++	[1] = {
++		.start	= 67,
++		.end	= 67,
++		.flags	= IORESOURCE_IRQ,
++	},
++};
++
++static u64 usb_ohci_dma_mask = 0xffffffffUL;
++static struct platform_device usb_ohci_device = {
++	.name		= "sh_ohci",
++	.id		= -1,
++	.dev = {
++		.dma_mask		= &usb_ohci_dma_mask,
++		.coherent_dma_mask	= 0xffffffff,
++	},
++	.num_resources	= ARRAY_SIZE(usb_ohci_resources),
++	.resource	= usb_ohci_resources,
++};
++
++static struct resource usbf_resources[] = {
++	[0] = {
++		.name	= "sh_udc",
++		.start	= 0xA4420000,
++		.end	= 0xA44200FF,
++		.flags	= IORESOURCE_MEM,
++	},
++	[1] = {
++		.name	= "sh_udc",
++		.start	= 65,
++		.end	= 65,
++		.flags	= IORESOURCE_IRQ,
++	},
++};
++
++static struct platform_device usbf_device = {
++	.name		= "sh_udc",
++	.id		= -1,
++	.dev = {
++		.dma_mask		= NULL,
++		.coherent_dma_mask	= 0xffffffff,
++	},
++	.num_resources	= ARRAY_SIZE(usbf_resources),
++	.resource	= usbf_resources,
++};
++
+ static struct platform_device *sh7720_devices[] __initdata = {
+ 	&rtc_device,
+ 	&sci_device,
++	&usb_ohci_device,
++	&usbf_device,
+ };
+ 
+ static int __init sh7720_devices_setup(void)
+@@ -127,8 +180,11 @@ static struct intc_vect vectors[] __initdata = {
+ 	INTC_VECT(USBF_SPD, 0x6e0),   INTC_VECT(DMAC1_DEI0, 0x800),
+ 	INTC_VECT(DMAC1_DEI1, 0x820), INTC_VECT(DMAC1_DEI2, 0x840),
+ 	INTC_VECT(DMAC1_DEI3, 0x860), INTC_VECT(LCDC, 0x900),
+-	INTC_VECT(SSL, 0x980),        INTC_VECT(USBFI0, 0xa20),
+-	INTC_VECT(USBFI1, 0xa40),     INTC_VECT(USBHI, 0xa60),
++#if defined(CONFIG_CPU_SUBTYPE_SH7720)
++	INTC_VECT(SSL, 0x980),
++#endif
++	INTC_VECT(USBFI0, 0xa20),     INTC_VECT(USBFI1, 0xa40),
++	INTC_VECT(USBHI, 0xa60),
+ 	INTC_VECT(DMAC2_DEI4, 0xb80), INTC_VECT(DMAC2_DEI5, 0xba0),
+ 	INTC_VECT(ADC, 0xbe0),        INTC_VECT(SCIF0, 0xc00),
+ 	INTC_VECT(SCIF1, 0xc20),      INTC_VECT(PINT07, 0xc80),
+@@ -153,22 +209,16 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(MMC, MMCI0, MMCI1, MMCI2, MMCI3),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(SCIF0, 2),
+-	INTC_PRIO(SCIF1, 2),
+-	INTC_PRIO(DMAC1, 1),
+-	INTC_PRIO(DMAC2, 1),
+-	INTC_PRIO(RTC, 2),
+-	INTC_PRIO(TMU, 2),
+-	INTC_PRIO(TPU, 2),
+-};
 -
--#
--# Miscellaneous filesystems
--#
--# CONFIG_ADFS_FS is not set
--# CONFIG_AFFS_FS is not set
--# CONFIG_HFS_FS is not set
--# CONFIG_HFSPLUS_FS is not set
--# CONFIG_BEFS_FS is not set
--# CONFIG_BFS_FS is not set
--# CONFIG_EFS_FS is not set
--# CONFIG_CRAMFS is not set
--# CONFIG_VXFS_FS is not set
--# CONFIG_HPFS_FS is not set
--# CONFIG_QNX4FS_FS is not set
--# CONFIG_SYSV_FS is not set
--# CONFIG_UFS_FS is not set
+ static struct intc_prio_reg prio_registers[] __initdata = {
+ 	{ 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ 	{ 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } },
+ 	{ 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
+ 	{ 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } },
++#if defined(CONFIG_CPU_SUBTYPE_SH7720)
+ 	{ 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } },
++#else
++	{ 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, 0 } },
++#endif
+ 	{ 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } },
+ 	{ 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } },
+ 	{ 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } },
+@@ -177,7 +227,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups,
+-		priorities, NULL, prio_registers, NULL);
++		NULL, prio_registers, NULL);
+ 
+ static struct intc_sense_reg sense_registers[] __initdata = {
+ 	{ INTC_ICR1, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
+@@ -190,7 +240,7 @@ static struct intc_vect vectors_irq[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_irq_desc, "sh7720-irq", vectors_irq,
+-		NULL, priorities, NULL, prio_registers, sense_registers);
++		NULL, NULL, prio_registers, sense_registers);
+ 
+ void __init plat_irq_setup_pins(int mode)
+ {
+diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
+index dadd6bf..d608557 100644
+--- a/arch/sh/kernel/cpu/sh4/Makefile
++++ b/arch/sh/kernel/cpu/sh4/Makefile
+@@ -5,7 +5,7 @@
+ obj-y	:= probe.o common.o
+ common-y	+= $(addprefix ../sh3/, entry.o ex.o)
+ 
+-obj-$(CONFIG_SH_FPU)			+= fpu.o
++obj-$(CONFIG_SH_FPU)			+= fpu.o softfloat.o
+ obj-$(CONFIG_SH_STORE_QUEUES)		+= sq.o
+ 
+ # CPU subtype setup
+diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
+index c5a4fc7..817f993 100644
+--- a/arch/sh/kernel/cpu/sh4/fpu.c
++++ b/arch/sh/kernel/cpu/sh4/fpu.c
+@@ -1,7 +1,4 @@
+-/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $
+- *
+- * linux/arch/sh/kernel/fpu.c
+- *
++/*
+  * Save/restore floating point context for signal handlers.
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+@@ -9,15 +6,16 @@
+  * for more details.
+  *
+  * Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
++ * Copyright (C) 2006  ST Microelectronics Ltd. (denorm support)
+  *
+- * FIXME! These routines can be optimized in big endian case.
++ * FIXME! These routines have not been tested for big endian case.
+  */
 -
--#
--# Network File Systems
--#
--CONFIG_NFS_FS=y
--CONFIG_NFS_V3=y
--# CONFIG_NFS_V3_ACL is not set
--CONFIG_NFS_V4=y
--# CONFIG_NFS_DIRECTIO is not set
--CONFIG_NFSD=y
--CONFIG_NFSD_V3=y
--# CONFIG_NFSD_V3_ACL is not set
--CONFIG_NFSD_V4=y
--CONFIG_NFSD_TCP=y
--CONFIG_ROOT_NFS=y
--CONFIG_LOCKD=y
--CONFIG_LOCKD_V4=y
--CONFIG_EXPORTFS=y
--CONFIG_NFS_COMMON=y
--CONFIG_SUNRPC=y
--CONFIG_SUNRPC_GSS=y
--CONFIG_RPCSEC_GSS_KRB5=y
--# CONFIG_RPCSEC_GSS_SPKM3 is not set
--# CONFIG_SMB_FS is not set
--# CONFIG_CIFS is not set
--# CONFIG_NCP_FS is not set
--# CONFIG_CODA_FS is not set
--# CONFIG_AFS_FS is not set
--# CONFIG_9P_FS is not set
+ #include <linux/sched.h>
+ #include <linux/signal.h>
++#include <linux/io.h>
++#include <asm/cpu/fpu.h>
+ #include <asm/processor.h>
+ #include <asm/system.h>
+-#include <asm/io.h>
+ 
+ /* The PR (precision) bit in the FP Status Register must be clear when
+  * an frchg instruction is executed, otherwise the instruction is undefined.
+@@ -25,177 +23,184 @@
+  */
+ 
+ #define FPSCR_RCHG 0x00000000
++extern unsigned long long float64_div(unsigned long long a,
++				      unsigned long long b);
++extern unsigned long int float32_div(unsigned long int a, unsigned long int b);
++extern unsigned long long float64_mul(unsigned long long a,
++				      unsigned long long b);
++extern unsigned long int float32_mul(unsigned long int a, unsigned long int b);
++extern unsigned long long float64_add(unsigned long long a,
++				      unsigned long long b);
++extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
++extern unsigned long long float64_sub(unsigned long long a,
++				      unsigned long long b);
++extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
+ 
++static unsigned int fpu_exception_flags;
+ 
+ /*
+  * Save FPU registers onto task structure.
+  * Assume called with FPU enabled (SR.FD=0).
+  */
+-void
+-save_fpu(struct task_struct *tsk, struct pt_regs *regs)
++void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+ {
+ 	unsigned long dummy;
+ 
+ 	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+ 	enable_fpu();
+-	asm volatile("sts.l	fpul, @-%0\n\t"
+-		     "sts.l	fpscr, @-%0\n\t"
+-		     "lds	%2, fpscr\n\t"
+-		     "frchg\n\t"
+-		     "fmov.s	fr15, @-%0\n\t"
+-		     "fmov.s	fr14, @-%0\n\t"
+-		     "fmov.s	fr13, @-%0\n\t"
+-		     "fmov.s	fr12, @-%0\n\t"
+-		     "fmov.s	fr11, @-%0\n\t"
+-		     "fmov.s	fr10, @-%0\n\t"
+-		     "fmov.s	fr9, @-%0\n\t"
+-		     "fmov.s	fr8, @-%0\n\t"
+-		     "fmov.s	fr7, @-%0\n\t"
+-		     "fmov.s	fr6, @-%0\n\t"
+-		     "fmov.s	fr5, @-%0\n\t"
+-		     "fmov.s	fr4, @-%0\n\t"
+-		     "fmov.s	fr3, @-%0\n\t"
+-		     "fmov.s	fr2, @-%0\n\t"
+-		     "fmov.s	fr1, @-%0\n\t"
+-		     "fmov.s	fr0, @-%0\n\t"
+-		     "frchg\n\t"
+-		     "fmov.s	fr15, @-%0\n\t"
+-		     "fmov.s	fr14, @-%0\n\t"
+-		     "fmov.s	fr13, @-%0\n\t"
+-		     "fmov.s	fr12, @-%0\n\t"
+-		     "fmov.s	fr11, @-%0\n\t"
+-		     "fmov.s	fr10, @-%0\n\t"
+-		     "fmov.s	fr9, @-%0\n\t"
+-		     "fmov.s	fr8, @-%0\n\t"
+-		     "fmov.s	fr7, @-%0\n\t"
+-		     "fmov.s	fr6, @-%0\n\t"
+-		     "fmov.s	fr5, @-%0\n\t"
+-		     "fmov.s	fr4, @-%0\n\t"
+-		     "fmov.s	fr3, @-%0\n\t"
+-		     "fmov.s	fr2, @-%0\n\t"
+-		     "fmov.s	fr1, @-%0\n\t"
+-		     "fmov.s	fr0, @-%0\n\t"
+-		     "lds	%3, fpscr\n\t"
+-		     : "=r" (dummy)
+-		     : "0" ((char *)(&tsk->thread.fpu.hard.status)),
+-		       "r" (FPSCR_RCHG),
+-		       "r" (FPSCR_INIT)
+-		     : "memory");
 -
--#
--# Partition Types
--#
--# CONFIG_PARTITION_ADVANCED is not set
--CONFIG_MSDOS_PARTITION=y
+- 	disable_fpu();
+- 	release_fpu(regs);
++	asm volatile ("sts.l	fpul, @-%0\n\t"
++		      "sts.l	fpscr, @-%0\n\t"
++		      "lds	%2, fpscr\n\t"
++		      "frchg\n\t"
++		      "fmov.s	fr15, @-%0\n\t"
++		      "fmov.s	fr14, @-%0\n\t"
++		      "fmov.s	fr13, @-%0\n\t"
++		      "fmov.s	fr12, @-%0\n\t"
++		      "fmov.s	fr11, @-%0\n\t"
++		      "fmov.s	fr10, @-%0\n\t"
++		      "fmov.s	fr9, @-%0\n\t"
++		      "fmov.s	fr8, @-%0\n\t"
++		      "fmov.s	fr7, @-%0\n\t"
++		      "fmov.s	fr6, @-%0\n\t"
++		      "fmov.s	fr5, @-%0\n\t"
++		      "fmov.s	fr4, @-%0\n\t"
++		      "fmov.s	fr3, @-%0\n\t"
++		      "fmov.s	fr2, @-%0\n\t"
++		      "fmov.s	fr1, @-%0\n\t"
++		      "fmov.s	fr0, @-%0\n\t"
++		      "frchg\n\t"
++		      "fmov.s	fr15, @-%0\n\t"
++		      "fmov.s	fr14, @-%0\n\t"
++		      "fmov.s	fr13, @-%0\n\t"
++		      "fmov.s	fr12, @-%0\n\t"
++		      "fmov.s	fr11, @-%0\n\t"
++		      "fmov.s	fr10, @-%0\n\t"
++		      "fmov.s	fr9, @-%0\n\t"
++		      "fmov.s	fr8, @-%0\n\t"
++		      "fmov.s	fr7, @-%0\n\t"
++		      "fmov.s	fr6, @-%0\n\t"
++		      "fmov.s	fr5, @-%0\n\t"
++		      "fmov.s	fr4, @-%0\n\t"
++		      "fmov.s	fr3, @-%0\n\t"
++		      "fmov.s	fr2, @-%0\n\t"
++		      "fmov.s	fr1, @-%0\n\t"
++		      "fmov.s	fr0, @-%0\n\t"
++		      "lds	%3, fpscr\n\t":"=r" (dummy)
++		      :"0"((char *)(&tsk->thread.fpu.hard.status)),
++		      "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
++		      :"memory");
++
++	disable_fpu();
++	release_fpu(regs);
+ }
+ 
+-static void
+-restore_fpu(struct task_struct *tsk)
++static void restore_fpu(struct task_struct *tsk)
+ {
+ 	unsigned long dummy;
+ 
+- 	enable_fpu();
+-	asm volatile("lds	%2, fpscr\n\t"
+-		     "fmov.s	@%0+, fr0\n\t"
+-		     "fmov.s	@%0+, fr1\n\t"
+-		     "fmov.s	@%0+, fr2\n\t"
+-		     "fmov.s	@%0+, fr3\n\t"
+-		     "fmov.s	@%0+, fr4\n\t"
+-		     "fmov.s	@%0+, fr5\n\t"
+-		     "fmov.s	@%0+, fr6\n\t"
+-		     "fmov.s	@%0+, fr7\n\t"
+-		     "fmov.s	@%0+, fr8\n\t"
+-		     "fmov.s	@%0+, fr9\n\t"
+-		     "fmov.s	@%0+, fr10\n\t"
+-		     "fmov.s	@%0+, fr11\n\t"
+-		     "fmov.s	@%0+, fr12\n\t"
+-		     "fmov.s	@%0+, fr13\n\t"
+-		     "fmov.s	@%0+, fr14\n\t"
+-		     "fmov.s	@%0+, fr15\n\t"
+-		     "frchg\n\t"
+-		     "fmov.s	@%0+, fr0\n\t"
+-		     "fmov.s	@%0+, fr1\n\t"
+-		     "fmov.s	@%0+, fr2\n\t"
+-		     "fmov.s	@%0+, fr3\n\t"
+-		     "fmov.s	@%0+, fr4\n\t"
+-		     "fmov.s	@%0+, fr5\n\t"
+-		     "fmov.s	@%0+, fr6\n\t"
+-		     "fmov.s	@%0+, fr7\n\t"
+-		     "fmov.s	@%0+, fr8\n\t"
+-		     "fmov.s	@%0+, fr9\n\t"
+-		     "fmov.s	@%0+, fr10\n\t"
+-		     "fmov.s	@%0+, fr11\n\t"
+-		     "fmov.s	@%0+, fr12\n\t"
+-		     "fmov.s	@%0+, fr13\n\t"
+-		     "fmov.s	@%0+, fr14\n\t"
+-		     "fmov.s	@%0+, fr15\n\t"
+-		     "frchg\n\t"
+-		     "lds.l	@%0+, fpscr\n\t"
+-		     "lds.l	@%0+, fpul\n\t"
+-		     : "=r" (dummy)
+-		     : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
+-		     : "memory");
++	enable_fpu();
++	asm volatile ("lds	%2, fpscr\n\t"
++		      "fmov.s	@%0+, fr0\n\t"
++		      "fmov.s	@%0+, fr1\n\t"
++		      "fmov.s	@%0+, fr2\n\t"
++		      "fmov.s	@%0+, fr3\n\t"
++		      "fmov.s	@%0+, fr4\n\t"
++		      "fmov.s	@%0+, fr5\n\t"
++		      "fmov.s	@%0+, fr6\n\t"
++		      "fmov.s	@%0+, fr7\n\t"
++		      "fmov.s	@%0+, fr8\n\t"
++		      "fmov.s	@%0+, fr9\n\t"
++		      "fmov.s	@%0+, fr10\n\t"
++		      "fmov.s	@%0+, fr11\n\t"
++		      "fmov.s	@%0+, fr12\n\t"
++		      "fmov.s	@%0+, fr13\n\t"
++		      "fmov.s	@%0+, fr14\n\t"
++		      "fmov.s	@%0+, fr15\n\t"
++		      "frchg\n\t"
++		      "fmov.s	@%0+, fr0\n\t"
++		      "fmov.s	@%0+, fr1\n\t"
++		      "fmov.s	@%0+, fr2\n\t"
++		      "fmov.s	@%0+, fr3\n\t"
++		      "fmov.s	@%0+, fr4\n\t"
++		      "fmov.s	@%0+, fr5\n\t"
++		      "fmov.s	@%0+, fr6\n\t"
++		      "fmov.s	@%0+, fr7\n\t"
++		      "fmov.s	@%0+, fr8\n\t"
++		      "fmov.s	@%0+, fr9\n\t"
++		      "fmov.s	@%0+, fr10\n\t"
++		      "fmov.s	@%0+, fr11\n\t"
++		      "fmov.s	@%0+, fr12\n\t"
++		      "fmov.s	@%0+, fr13\n\t"
++		      "fmov.s	@%0+, fr14\n\t"
++		      "fmov.s	@%0+, fr15\n\t"
++		      "frchg\n\t"
++		      "lds.l	@%0+, fpscr\n\t"
++		      "lds.l	@%0+, fpul\n\t"
++		      :"=r" (dummy)
++		      :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG)
++		      :"memory");
+ 	disable_fpu();
+ }
+ 
+ /*
+  * Load the FPU with signalling NANS.  This bit pattern we're using
+  * has the property that no matter wether considered as single or as
+- * double precision represents signaling NANS.  
++ * double precision represents signaling NANS.
+  */
+ 
+-static void
+-fpu_init(void)
++static void fpu_init(void)
+ {
+ 	enable_fpu();
+-	asm volatile("lds	%0, fpul\n\t"
+-		     "lds	%1, fpscr\n\t"
+-		     "fsts	fpul, fr0\n\t"
+-		     "fsts	fpul, fr1\n\t"
+-		     "fsts	fpul, fr2\n\t"
+-		     "fsts	fpul, fr3\n\t"
+-		     "fsts	fpul, fr4\n\t"
+-		     "fsts	fpul, fr5\n\t"
+-		     "fsts	fpul, fr6\n\t"
+-		     "fsts	fpul, fr7\n\t"
+-		     "fsts	fpul, fr8\n\t"
+-		     "fsts	fpul, fr9\n\t"
+-		     "fsts	fpul, fr10\n\t"
+-		     "fsts	fpul, fr11\n\t"
+-		     "fsts	fpul, fr12\n\t"
+-		     "fsts	fpul, fr13\n\t"
+-		     "fsts	fpul, fr14\n\t"
+-		     "fsts	fpul, fr15\n\t"
+-		     "frchg\n\t"
+-		     "fsts	fpul, fr0\n\t"
+-		     "fsts	fpul, fr1\n\t"
+-		     "fsts	fpul, fr2\n\t"
+-		     "fsts	fpul, fr3\n\t"
+-		     "fsts	fpul, fr4\n\t"
+-		     "fsts	fpul, fr5\n\t"
+-		     "fsts	fpul, fr6\n\t"
+-		     "fsts	fpul, fr7\n\t"
+-		     "fsts	fpul, fr8\n\t"
+-		     "fsts	fpul, fr9\n\t"
+-		     "fsts	fpul, fr10\n\t"
+-		     "fsts	fpul, fr11\n\t"
+-		     "fsts	fpul, fr12\n\t"
+-		     "fsts	fpul, fr13\n\t"
+-		     "fsts	fpul, fr14\n\t"
+-		     "fsts	fpul, fr15\n\t"
+-		     "frchg\n\t"
+-		     "lds	%2, fpscr\n\t"
+-		     : /* no output */
+-		     : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
+- 	disable_fpu();
++	asm volatile (	"lds	%0, fpul\n\t"
++			"lds	%1, fpscr\n\t"
++			"fsts	fpul, fr0\n\t"
++			"fsts	fpul, fr1\n\t"
++			"fsts	fpul, fr2\n\t"
++			"fsts	fpul, fr3\n\t"
++			"fsts	fpul, fr4\n\t"
++			"fsts	fpul, fr5\n\t"
++			"fsts	fpul, fr6\n\t"
++			"fsts	fpul, fr7\n\t"
++			"fsts	fpul, fr8\n\t"
++			"fsts	fpul, fr9\n\t"
++			"fsts	fpul, fr10\n\t"
++			"fsts	fpul, fr11\n\t"
++			"fsts	fpul, fr12\n\t"
++			"fsts	fpul, fr13\n\t"
++			"fsts	fpul, fr14\n\t"
++			"fsts	fpul, fr15\n\t"
++			"frchg\n\t"
++			"fsts	fpul, fr0\n\t"
++			"fsts	fpul, fr1\n\t"
++			"fsts	fpul, fr2\n\t"
++			"fsts	fpul, fr3\n\t"
++			"fsts	fpul, fr4\n\t"
++			"fsts	fpul, fr5\n\t"
++			"fsts	fpul, fr6\n\t"
++			"fsts	fpul, fr7\n\t"
++			"fsts	fpul, fr8\n\t"
++			"fsts	fpul, fr9\n\t"
++			"fsts	fpul, fr10\n\t"
++			"fsts	fpul, fr11\n\t"
++			"fsts	fpul, fr12\n\t"
++			"fsts	fpul, fr13\n\t"
++			"fsts	fpul, fr14\n\t"
++			"fsts	fpul, fr15\n\t"
++			"frchg\n\t"
++			"lds	%2, fpscr\n\t"
++			:	/* no output */
++			:"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
++	disable_fpu();
+ }
+ 
+ /**
+- *	denormal_to_double - Given denormalized float number,
+- *	                     store double float
++ *      denormal_to_double - Given denormalized float number,
++ *                           store double float
+  *
+- *	@fpu: Pointer to sh_fpu_hard structure
+- *	@n: Index to FP register
++ *      @fpu: Pointer to sh_fpu_hard structure
++ *      @n: Index to FP register
+  */
+-static void
+-denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
++static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
+ {
+ 	unsigned long du, dl;
+ 	unsigned long x = fpu->fpul;
+@@ -212,7 +217,7 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
+ 		dl = x << 29;
+ 
+ 		fpu->fp_regs[n] = du;
+-		fpu->fp_regs[n+1] = dl;
++		fpu->fp_regs[n + 1] = dl;
+ 	}
+ }
+ 
+@@ -223,68 +228,191 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
+  *
+  *	Returns 1 when it's handled (should not cause exception).
+  */
+-static int
+-ieee_fpe_handler (struct pt_regs *regs)
++static int ieee_fpe_handler(struct pt_regs *regs)
+ {
+-	unsigned short insn = *(unsigned short *) regs->pc;
++	unsigned short insn = *(unsigned short *)regs->pc;
+ 	unsigned short finsn;
+ 	unsigned long nextpc;
+ 	int nib[4] = {
+ 		(insn >> 12) & 0xf,
+ 		(insn >> 8) & 0xf,
+ 		(insn >> 4) & 0xf,
+-		insn & 0xf};
 -
--#
--# Native Language Support
--#
--CONFIG_NLS=y
--CONFIG_NLS_DEFAULT="iso8859-1"
--CONFIG_NLS_CODEPAGE_437=y
--# CONFIG_NLS_CODEPAGE_737 is not set
--# CONFIG_NLS_CODEPAGE_775 is not set
--# CONFIG_NLS_CODEPAGE_850 is not set
--# CONFIG_NLS_CODEPAGE_852 is not set
--# CONFIG_NLS_CODEPAGE_855 is not set
--# CONFIG_NLS_CODEPAGE_857 is not set
--# CONFIG_NLS_CODEPAGE_860 is not set
--# CONFIG_NLS_CODEPAGE_861 is not set
--# CONFIG_NLS_CODEPAGE_862 is not set
--# CONFIG_NLS_CODEPAGE_863 is not set
--# CONFIG_NLS_CODEPAGE_864 is not set
--# CONFIG_NLS_CODEPAGE_865 is not set
--# CONFIG_NLS_CODEPAGE_866 is not set
--# CONFIG_NLS_CODEPAGE_869 is not set
--# CONFIG_NLS_CODEPAGE_936 is not set
--# CONFIG_NLS_CODEPAGE_950 is not set
--CONFIG_NLS_CODEPAGE_932=y
--# CONFIG_NLS_CODEPAGE_949 is not set
--# CONFIG_NLS_CODEPAGE_874 is not set
--# CONFIG_NLS_ISO8859_8 is not set
--# CONFIG_NLS_CODEPAGE_1250 is not set
--# CONFIG_NLS_CODEPAGE_1251 is not set
--# CONFIG_NLS_ASCII is not set
--CONFIG_NLS_ISO8859_1=y
--# CONFIG_NLS_ISO8859_2 is not set
--# CONFIG_NLS_ISO8859_3 is not set
--# CONFIG_NLS_ISO8859_4 is not set
--# CONFIG_NLS_ISO8859_5 is not set
--# CONFIG_NLS_ISO8859_6 is not set
--# CONFIG_NLS_ISO8859_7 is not set
--# CONFIG_NLS_ISO8859_9 is not set
--# CONFIG_NLS_ISO8859_13 is not set
--# CONFIG_NLS_ISO8859_14 is not set
--# CONFIG_NLS_ISO8859_15 is not set
--# CONFIG_NLS_KOI8_R is not set
--# CONFIG_NLS_KOI8_U is not set
--# CONFIG_NLS_UTF8 is not set
+-	if (nib[0] == 0xb ||
+-	    (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
+-		regs->pr = regs->pc + 4;
+-  
+-	if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
+-		nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
+-		finsn = *(unsigned short *) (regs->pc + 2);
+-	} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
++		insn & 0xf
++	};
++
++	if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb))
++		regs->pr = regs->pc + 4;  /* bsr & jsr */
++
++	if (nib[0] == 0xa || nib[0] == 0xb) {
++		/* bra & bsr */
++		nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3);
++		finsn = *(unsigned short *)(regs->pc + 2);
++	} else if (nib[0] == 0x8 && nib[1] == 0xd) {
++		/* bt/s */
+ 		if (regs->sr & 1)
+-			nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
++			nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
+ 		else
+ 			nextpc = regs->pc + 4;
+-		finsn = *(unsigned short *) (regs->pc + 2);
+-	} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
++		finsn = *(unsigned short *)(regs->pc + 2);
++	} else if (nib[0] == 0x8 && nib[1] == 0xf) {
++		/* bf/s */
+ 		if (regs->sr & 1)
+ 			nextpc = regs->pc + 4;
+ 		else
+-			nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+-		finsn = *(unsigned short *) (regs->pc + 2);
++			nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
++		finsn = *(unsigned short *)(regs->pc + 2);
+ 	} else if (nib[0] == 0x4 && nib[3] == 0xb &&
+-		 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
++		   (nib[2] == 0x0 || nib[2] == 0x2)) {
++		/* jmp & jsr */
+ 		nextpc = regs->regs[nib[1]];
+-		finsn = *(unsigned short *) (regs->pc + 2);
++		finsn = *(unsigned short *)(regs->pc + 2);
+ 	} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
+-		 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
++		   (nib[2] == 0x0 || nib[2] == 0x2)) {
++		/* braf & bsrf */
+ 		nextpc = regs->pc + 4 + regs->regs[nib[1]];
+-		finsn = *(unsigned short *) (regs->pc + 2);
+-	} else if (insn == 0x000b) { /* rts */
++		finsn = *(unsigned short *)(regs->pc + 2);
++	} else if (insn == 0x000b) {
++		/* rts */
+ 		nextpc = regs->pr;
+-		finsn = *(unsigned short *) (regs->pc + 2);
++		finsn = *(unsigned short *)(regs->pc + 2);
+ 	} else {
+ 		nextpc = regs->pc + instruction_size(insn);
+ 		finsn = insn;
+ 	}
+ 
+-	if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
++	if ((finsn & 0xf1ff) == 0xf0ad) {
++		/* fcnvsd */
+ 		struct task_struct *tsk = current;
+ 
+ 		save_fpu(tsk, regs);
+-		if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
++		if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
+ 			/* FPU error */
+-			denormal_to_double (&tsk->thread.fpu.hard,
+-					    (finsn >> 8) & 0xf);
+-			tsk->thread.fpu.hard.fpscr &=
+-				~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
+-			grab_fpu(regs);
+-			restore_fpu(tsk);
+-			set_tsk_thread_flag(tsk, TIF_USEDFPU);
++			denormal_to_double(&tsk->thread.fpu.hard,
++					   (finsn >> 8) & 0xf);
++		else
++			return 0;
++
++		regs->pc = nextpc;
++		return 1;
++	} else if ((finsn & 0xf00f) == 0xf002) {
++		/* fmul */
++		struct task_struct *tsk = current;
++		int fpscr;
++		int n, m, prec;
++		unsigned int hx, hy;
++
++		n = (finsn >> 8) & 0xf;
++		m = (finsn >> 4) & 0xf;
++		hx = tsk->thread.fpu.hard.fp_regs[n];
++		hy = tsk->thread.fpu.hard.fp_regs[m];
++		fpscr = tsk->thread.fpu.hard.fpscr;
++		prec = fpscr & FPSCR_DBL_PRECISION;
++
++		if ((fpscr & FPSCR_CAUSE_ERROR)
++		    && (prec && ((hx & 0x7fffffff) < 0x00100000
++				 || (hy & 0x7fffffff) < 0x00100000))) {
++			long long llx, lly;
++
++			/* FPU error because of denormal (doubles) */
++			llx = ((long long)hx << 32)
++			    | tsk->thread.fpu.hard.fp_regs[n + 1];
++			lly = ((long long)hy << 32)
++			    | tsk->thread.fpu.hard.fp_regs[m + 1];
++			llx = float64_mul(llx, lly);
++			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
++			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
++		} else if ((fpscr & FPSCR_CAUSE_ERROR)
++			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
++					 || (hy & 0x7fffffff) < 0x00800000))) {
++			/* FPU error because of denormal (floats) */
++			hx = float32_mul(hx, hy);
++			tsk->thread.fpu.hard.fp_regs[n] = hx;
++		} else
++			return 0;
++
++		regs->pc = nextpc;
++		return 1;
++	} else if ((finsn & 0xf00e) == 0xf000) {
++		/* fadd, fsub */
++		struct task_struct *tsk = current;
++		int fpscr;
++		int n, m, prec;
++		unsigned int hx, hy;
++
++		n = (finsn >> 8) & 0xf;
++		m = (finsn >> 4) & 0xf;
++		hx = tsk->thread.fpu.hard.fp_regs[n];
++		hy = tsk->thread.fpu.hard.fp_regs[m];
++		fpscr = tsk->thread.fpu.hard.fpscr;
++		prec = fpscr & FPSCR_DBL_PRECISION;
++
++		if ((fpscr & FPSCR_CAUSE_ERROR)
++		    && (prec && ((hx & 0x7fffffff) < 0x00100000
++				 || (hy & 0x7fffffff) < 0x00100000))) {
++			long long llx, lly;
++
++			/* FPU error because of denormal (doubles) */
++			llx = ((long long)hx << 32)
++			    | tsk->thread.fpu.hard.fp_regs[n + 1];
++			lly = ((long long)hy << 32)
++			    | tsk->thread.fpu.hard.fp_regs[m + 1];
++			if ((finsn & 0xf00f) == 0xf000)
++				llx = float64_add(llx, lly);
++			else
++				llx = float64_sub(llx, lly);
++			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
++			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
++		} else if ((fpscr & FPSCR_CAUSE_ERROR)
++			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
++					 || (hy & 0x7fffffff) < 0x00800000))) {
++			/* FPU error because of denormal (floats) */
++			if ((finsn & 0xf00f) == 0xf000)
++				hx = float32_add(hx, hy);
++			else
++				hx = float32_sub(hx, hy);
++			tsk->thread.fpu.hard.fp_regs[n] = hx;
++		} else
++			return 0;
++
++		regs->pc = nextpc;
++		return 1;
++	} else if ((finsn & 0xf003) == 0xf003) {
++		/* fdiv */
++		struct task_struct *tsk = current;
++		int fpscr;
++		int n, m, prec;
++		unsigned int hx, hy;
++
++		n = (finsn >> 8) & 0xf;
++		m = (finsn >> 4) & 0xf;
++		hx = tsk->thread.fpu.hard.fp_regs[n];
++		hy = tsk->thread.fpu.hard.fp_regs[m];
++		fpscr = tsk->thread.fpu.hard.fpscr;
++		prec = fpscr & FPSCR_DBL_PRECISION;
++
++		if ((fpscr & FPSCR_CAUSE_ERROR)
++		    && (prec && ((hx & 0x7fffffff) < 0x00100000
++				 || (hy & 0x7fffffff) < 0x00100000))) {
++			long long llx, lly;
++
++			/* FPU error because of denormal (doubles) */
++			llx = ((long long)hx << 32)
++			    | tsk->thread.fpu.hard.fp_regs[n + 1];
++			lly = ((long long)hy << 32)
++			    | tsk->thread.fpu.hard.fp_regs[m + 1];
++
++			llx = float64_div(llx, lly);
++
++			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
++			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
++		} else if ((fpscr & FPSCR_CAUSE_ERROR)
++			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
++					 || (hy & 0x7fffffff) < 0x00800000))) {
++			/* FPU error because of denormal (floats) */
++			hx = float32_div(hx, hy);
++			tsk->thread.fpu.hard.fp_regs[n] = hx;
+ 		} else
+-			force_sig(SIGFPE, tsk);
++			return 0;
+ 
+ 		regs->pc = nextpc;
+ 		return 1;
+@@ -293,27 +421,48 @@ ieee_fpe_handler (struct pt_regs *regs)
+ 	return 0;
+ }
+ 
+-asmlinkage void
+-do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6,
+-	     unsigned long r7, struct pt_regs __regs)
++void float_raise(unsigned int flags)
++{
++	fpu_exception_flags |= flags;
++}
++
++int float_rounding_mode(void)
+ {
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ 	struct task_struct *tsk = current;
++	int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr);
++	return roundingMode;
++}
+ 
+-	if (ieee_fpe_handler(regs))
+-		return;
++BUILD_TRAP_HANDLER(fpu_error)
++{
++	struct task_struct *tsk = current;
++	TRAP_HANDLER_DECL;
+ 
+-	regs->pc += 2;
+ 	save_fpu(tsk, regs);
++	fpu_exception_flags = 0;
++	if (ieee_fpe_handler(regs)) {
++		tsk->thread.fpu.hard.fpscr &=
++		    ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
++		tsk->thread.fpu.hard.fpscr |= fpu_exception_flags;
++		/* Set the FPSCR flag as well as cause bits - simply
++		 * replicate the cause */
++		tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
++		grab_fpu(regs);
++		restore_fpu(tsk);
++		set_tsk_thread_flag(tsk, TIF_USEDFPU);
++		if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
++		     (fpu_exception_flags >> 2)) == 0) {
++			return;
++		}
++	}
++
+ 	force_sig(SIGFPE, tsk);
+ }
+ 
+-asmlinkage void
+-do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
+-		     unsigned long r7, struct pt_regs __regs)
++BUILD_TRAP_HANDLER(fpu_state_restore)
+ {
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ 	struct task_struct *tsk = current;
++	TRAP_HANDLER_DECL;
+ 
+ 	grab_fpu(regs);
+ 	if (!user_mode(regs)) {
+@@ -324,7 +473,7 @@ do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
+ 	if (used_math()) {
+ 		/* Using the FPU again.  */
+ 		restore_fpu(tsk);
+-	} else	{
++	} else {
+ 		/* First time FPU user.  */
+ 		fpu_init();
+ 		set_used_math();
+diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
+index bc9c28a..f2b9238 100644
+--- a/arch/sh/kernel/cpu/sh4/probe.c
++++ b/arch/sh/kernel/cpu/sh4/probe.c
+@@ -98,6 +98,8 @@ int __init detect_cpu_and_cache_system(void)
+ 	case 0x200A:
+ 		if (prr == 0x61)
+ 			boot_cpu_data.type = CPU_SH7781;
++		else if (prr == 0xa1)
++			boot_cpu_data.type = CPU_SH7763;
+ 		else
+ 			boot_cpu_data.type = CPU_SH7780;
+ 
+diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+index 523f68a..ae3603a 100644
+--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
++++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+@@ -126,12 +126,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(REF, REF_RCMI, REF_ROVI),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(SCIF, 3),
+-	INTC_PRIO(SCI1, 3),
+-	INTC_PRIO(DMAC, 7),
+-};
 -
--#
--# Distributed Lock Manager
--#
--# CONFIG_DLM is not set
+ static struct intc_prio_reg prio_registers[] __initdata = {
+ 	{ 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ 	{ 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } },
+@@ -143,7 +137,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ 
+ /* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
+ #if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+@@ -163,7 +157,7 @@ static struct intc_group groups_dma4[] __initdata = {
+ 
+ static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
+ 			 vectors_dma4, groups_dma4,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ #endif
+ 
+ /* SH7750R and SH7751R both have 8-channel DMA controllers */
+@@ -184,7 +178,7 @@ static struct intc_group groups_dma8[] __initdata = {
+ 
+ static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
+ 			 vectors_dma8, groups_dma8,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ #endif
+ 
+ /* SH7750R, SH7751 and SH7751R all have two extra timer channels */
+@@ -205,7 +199,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34",
+-			 vectors_tmu34, NULL, priorities,
++			 vectors_tmu34, NULL,
+ 			 mask_registers, prio_registers, NULL);
+ #endif
+ 
+@@ -216,7 +210,7 @@ static struct intc_vect vectors_irlm[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL,
+-			 priorities, NULL, prio_registers, NULL);
++			 NULL, prio_registers, NULL);
+ 
+ /* SH7751 and SH7751R both have PCI */
+ #if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
+@@ -233,7 +227,7 @@ static struct intc_group groups_pci[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci,
+-			 priorities, mask_registers, prio_registers, NULL);
++			 mask_registers, prio_registers, NULL);
+ #endif
+ 
+ #if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+index 7a898cb..85f8157 100644
+--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
++++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+@@ -92,15 +92,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(REF, REF_RCMI, REF_ROVI),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(SCIF0, 3),
+-	INTC_PRIO(SCIF1, 3),
+-	INTC_PRIO(SCIF2, 3),
+-	INTC_PRIO(SIM, 3),
+-	INTC_PRIO(DMAC, 7),
+-	INTC_PRIO(DMABRG, 13),
+-};
 -
--#
--# Profiling support
--#
--CONFIG_PROFILING=y
--CONFIG_OPROFILE=m
+ static struct intc_mask_reg mask_registers[] __initdata = {
+ 	{ 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
+ 	  { IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21,
+@@ -132,7 +123,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups,
+-			 priorities, mask_registers, prio_registers, NULL);
++			 mask_registers, prio_registers, NULL);
+ 
+ static struct intc_vect vectors_irq[] __initdata = {
+ 	INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
+@@ -140,7 +131,7 @@ static struct intc_vect vectors_irq[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
+-			 priorities, mask_registers, prio_registers, NULL);
++			 mask_registers, prio_registers, NULL);
+ 
+ static struct plat_sci_port sci_platform_data[] = {
+ 	{
+diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c
+new file mode 100644
+index 0000000..7b2d337
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh4/softfloat.c
+@@ -0,0 +1,892 @@
++/*
++ * Floating point emulation support for subnormalised numbers on SH4
++ * architecture This file is derived from the SoftFloat IEC/IEEE
++ * Floating-point Arithmetic Package, Release 2 the original license of
++ * which is reproduced below.
++ *
++ * ========================================================================
++ *
++ * This C source file is part of the SoftFloat IEC/IEEE Floating-point
++ * Arithmetic Package, Release 2.
++ *
++ * Written by John R. Hauser.  This work was made possible in part by the
++ * International Computer Science Institute, located at Suite 600, 1947 Center
++ * Street, Berkeley, California 94704.  Funding was partially provided by the
++ * National Science Foundation under grant MIP-9311980.  The original version
++ * of this code was written as part of a project to build a fixed-point vector
++ * processor in collaboration with the University of California at Berkeley,
++ * overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
++ * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
++ * arithmetic/softfloat.html'.
++ *
++ * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort
++ * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
++ * TIMES RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO
++ * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
++ * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
++ *
++ * Derivative works are acceptable, even for commercial purposes, so long as
++ * (1) they include prominent notice that the work is derivative, and (2) they
++ * include prominent notice akin to these three paragraphs for those parts of
++ * this code that are retained.
++ *
++ * ========================================================================
++ *
++ * SH4 modifications by Ismail Dhaoui <ismail.dhaoui at st.com>
++ * and Kamel Khelifi <kamel.khelifi at st.com>
++ */
++#include <linux/kernel.h>
++#include <asm/cpu/fpu.h>
++
++#define LIT64( a ) a##LL
++
++typedef char flag;
++typedef unsigned char uint8;
++typedef signed char int8;
++typedef int uint16;
++typedef int int16;
++typedef unsigned int uint32;
++typedef signed int int32;
++
++typedef unsigned long long int bits64;
++typedef signed long long int sbits64;
++
++typedef unsigned char bits8;
++typedef signed char sbits8;
++typedef unsigned short int bits16;
++typedef signed short int sbits16;
++typedef unsigned int bits32;
++typedef signed int sbits32;
++
++typedef unsigned long long int uint64;
++typedef signed long long int int64;
++
++typedef unsigned long int float32;
++typedef unsigned long long float64;
++
++extern void float_raise(unsigned int flags);	/* in fpu.c */
++extern int float_rounding_mode(void);	/* in fpu.c */
++
++inline bits64 extractFloat64Frac(float64 a);
++inline flag extractFloat64Sign(float64 a);
++inline int16 extractFloat64Exp(float64 a);
++inline int16 extractFloat32Exp(float32 a);
++inline flag extractFloat32Sign(float32 a);
++inline bits32 extractFloat32Frac(float32 a);
++inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
++inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
++inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
++inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
++float64 float64_sub(float64 a, float64 b);
++float32 float32_sub(float32 a, float32 b);
++float32 float32_add(float32 a, float32 b);
++float64 float64_add(float64 a, float64 b);
++float64 float64_div(float64 a, float64 b);
++float32 float32_div(float32 a, float32 b);
++float32 float32_mul(float32 a, float32 b);
++float64 float64_mul(float64 a, float64 b);
++inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
++		   bits64 * z1Ptr);
++inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
++		   bits64 * z1Ptr);
++inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
++
++static int8 countLeadingZeros32(bits32 a);
++static int8 countLeadingZeros64(bits64 a);
++static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp,
++					    bits64 zSig);
++static float64 subFloat64Sigs(float64 a, float64 b, flag zSign);
++static float64 addFloat64Sigs(float64 a, float64 b, flag zSign);
++static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig);
++static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp,
++					    bits32 zSig);
++static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig);
++static float32 subFloat32Sigs(float32 a, float32 b, flag zSign);
++static float32 addFloat32Sigs(float32 a, float32 b, flag zSign);
++static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr,
++				      bits64 * zSigPtr);
++static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
++static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
++				      bits32 * zSigPtr);
++
++inline bits64 extractFloat64Frac(float64 a)
++{
++	return a & LIT64(0x000FFFFFFFFFFFFF);
++}
++
++inline flag extractFloat64Sign(float64 a)
++{
++	return a >> 63;
++}
++
++inline int16 extractFloat64Exp(float64 a)
++{
++	return (a >> 52) & 0x7FF;
++}
++
++inline int16 extractFloat32Exp(float32 a)
++{
++	return (a >> 23) & 0xFF;
++}
++
++inline flag extractFloat32Sign(float32 a)
++{
++	return a >> 31;
++}
++
++inline bits32 extractFloat32Frac(float32 a)
++{
++	return a & 0x007FFFFF;
++}
++
++inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
++{
++	return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
++}
++
++inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
++{
++	bits64 z;
++
++	if (count == 0) {
++		z = a;
++	} else if (count < 64) {
++		z = (a >> count) | ((a << ((-count) & 63)) != 0);
++	} else {
++		z = (a != 0);
++	}
++	*zPtr = z;
++}
++
++static int8 countLeadingZeros32(bits32 a)
++{
++	static const int8 countLeadingZerosHigh[] = {
++		8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
++		3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
++		2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
++		2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
++		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
++		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
++		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
++		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
++	};
++	int8 shiftCount;
++
++	shiftCount = 0;
++	if (a < 0x10000) {
++		shiftCount += 16;
++		a <<= 16;
++	}
++	if (a < 0x1000000) {
++		shiftCount += 8;
++		a <<= 8;
++	}
++	shiftCount += countLeadingZerosHigh[a >> 24];
++	return shiftCount;
++
++}
++
++static int8 countLeadingZeros64(bits64 a)
++{
++	int8 shiftCount;
++
++	shiftCount = 0;
++	if (a < ((bits64) 1) << 32) {
++		shiftCount += 32;
++	} else {
++		a >>= 32;
++	}
++	shiftCount += countLeadingZeros32(a);
++	return shiftCount;
++
++}
++
++static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
++{
++	int8 shiftCount;
++
++	shiftCount = countLeadingZeros64(zSig) - 1;
++	return roundAndPackFloat64(zSign, zExp - shiftCount,
++				   zSig << shiftCount);
++
++}
++
++static float64 subFloat64Sigs(float64 a, float64 b, flag zSign)
++{
++	int16 aExp, bExp, zExp;
++	bits64 aSig, bSig, zSig;
++	int16 expDiff;
++
++	aSig = extractFloat64Frac(a);
++	aExp = extractFloat64Exp(a);
++	bSig = extractFloat64Frac(b);
++	bExp = extractFloat64Exp(b);
++	expDiff = aExp - bExp;
++	aSig <<= 10;
++	bSig <<= 10;
++	if (0 < expDiff)
++		goto aExpBigger;
++	if (expDiff < 0)
++		goto bExpBigger;
++	if (aExp == 0) {
++		aExp = 1;
++		bExp = 1;
++	}
++	if (bSig < aSig)
++		goto aBigger;
++	if (aSig < bSig)
++		goto bBigger;
++	return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
++      bExpBigger:
++	if (bExp == 0x7FF) {
++		return packFloat64(zSign ^ 1, 0x7FF, 0);
++	}
++	if (aExp == 0) {
++		++expDiff;
++	} else {
++		aSig |= LIT64(0x4000000000000000);
++	}
++	shift64RightJamming(aSig, -expDiff, &aSig);
++	bSig |= LIT64(0x4000000000000000);
++      bBigger:
++	zSig = bSig - aSig;
++	zExp = bExp;
++	zSign ^= 1;
++	goto normalizeRoundAndPack;
++      aExpBigger:
++	if (aExp == 0x7FF) {
++		return a;
++	}
++	if (bExp == 0) {
++		--expDiff;
++	} else {
++		bSig |= LIT64(0x4000000000000000);
++	}
++	shift64RightJamming(bSig, expDiff, &bSig);
++	aSig |= LIT64(0x4000000000000000);
++      aBigger:
++	zSig = aSig - bSig;
++	zExp = aExp;
++      normalizeRoundAndPack:
++	--zExp;
++	return normalizeRoundAndPackFloat64(zSign, zExp, zSig);
++
++}
++static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
++{
++	int16 aExp, bExp, zExp;
++	bits64 aSig, bSig, zSig;
++	int16 expDiff;
++
++	aSig = extractFloat64Frac(a);
++	aExp = extractFloat64Exp(a);
++	bSig = extractFloat64Frac(b);
++	bExp = extractFloat64Exp(b);
++	expDiff = aExp - bExp;
++	aSig <<= 9;
++	bSig <<= 9;
++	if (0 < expDiff) {
++		if (aExp == 0x7FF) {
++			return a;
++		}
++		if (bExp == 0) {
++			--expDiff;
++		} else {
++			bSig |= LIT64(0x2000000000000000);
++		}
++		shift64RightJamming(bSig, expDiff, &bSig);
++		zExp = aExp;
++	} else if (expDiff < 0) {
++		if (bExp == 0x7FF) {
++			return packFloat64(zSign, 0x7FF, 0);
++		}
++		if (aExp == 0) {
++			++expDiff;
++		} else {
++			aSig |= LIT64(0x2000000000000000);
++		}
++		shift64RightJamming(aSig, -expDiff, &aSig);
++		zExp = bExp;
++	} else {
++		if (aExp == 0x7FF) {
++			return a;
++		}
++		if (aExp == 0)
++			return packFloat64(zSign, 0, (aSig + bSig) >> 9);
++		zSig = LIT64(0x4000000000000000) + aSig + bSig;
++		zExp = aExp;
++		goto roundAndPack;
++	}
++	aSig |= LIT64(0x2000000000000000);
++	zSig = (aSig + bSig) << 1;
++	--zExp;
++	if ((sbits64) zSig < 0) {
++		zSig = aSig + bSig;
++		++zExp;
++	}
++      roundAndPack:
++	return roundAndPackFloat64(zSign, zExp, zSig);
++
++}
++
++inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
++{
++	return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
++}
++
++inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
++{
++	bits32 z;
++	if (count == 0) {
++		z = a;
++	} else if (count < 32) {
++		z = (a >> count) | ((a << ((-count) & 31)) != 0);
++	} else {
++		z = (a != 0);
++	}
++	*zPtr = z;
++}
++
++static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
++{
++	flag roundNearestEven;
++	int8 roundIncrement, roundBits;
++	flag isTiny;
++
++	/* SH4 has only 2 rounding modes - round to nearest and round to zero */
++	roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
++	roundIncrement = 0x40;
++	if (!roundNearestEven) {
++		roundIncrement = 0;
++	}
++	roundBits = zSig & 0x7F;
++	if (0xFD <= (bits16) zExp) {
++		if ((0xFD < zExp)
++		    || ((zExp == 0xFD)
++			&& ((sbits32) (zSig + roundIncrement) < 0))
++		    ) {
++			float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
++			return packFloat32(zSign, 0xFF,
++					   0) - (roundIncrement == 0);
++		}
++		if (zExp < 0) {
++			isTiny = (zExp < -1)
++			    || (zSig + roundIncrement < 0x80000000);
++			shift32RightJamming(zSig, -zExp, &zSig);
++			zExp = 0;
++			roundBits = zSig & 0x7F;
++			if (isTiny && roundBits)
++				float_raise(FPSCR_CAUSE_UNDERFLOW);
++		}
++	}
++	if (roundBits)
++		float_raise(FPSCR_CAUSE_INEXACT);
++	zSig = (zSig + roundIncrement) >> 7;
++	zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven);
++	if (zSig == 0)
++		zExp = 0;
++	return packFloat32(zSign, zExp, zSig);
++
++}
++
++static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
++{
++	int8 shiftCount;
++
++	shiftCount = countLeadingZeros32(zSig) - 1;
++	return roundAndPackFloat32(zSign, zExp - shiftCount,
++				   zSig << shiftCount);
++}
++
++static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
++{
++	flag roundNearestEven;
++	int16 roundIncrement, roundBits;
++	flag isTiny;
++
++	/* SH4 has only 2 rounding modes - round to nearest and round to zero */
++	roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
++	roundIncrement = 0x200;
++	if (!roundNearestEven) {
++		roundIncrement = 0;
++	}
++	roundBits = zSig & 0x3FF;
++	if (0x7FD <= (bits16) zExp) {
++		if ((0x7FD < zExp)
++		    || ((zExp == 0x7FD)
++			&& ((sbits64) (zSig + roundIncrement) < 0))
++		    ) {
++			float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
++			return packFloat64(zSign, 0x7FF,
++					   0) - (roundIncrement == 0);
++		}
++		if (zExp < 0) {
++			isTiny = (zExp < -1)
++			    || (zSig + roundIncrement <
++				LIT64(0x8000000000000000));
++			shift64RightJamming(zSig, -zExp, &zSig);
++			zExp = 0;
++			roundBits = zSig & 0x3FF;
++			if (isTiny && roundBits)
++				float_raise(FPSCR_CAUSE_UNDERFLOW);
++		}
++	}
++	if (roundBits)
++		float_raise(FPSCR_CAUSE_INEXACT);
++	zSig = (zSig + roundIncrement) >> 10;
++	zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven);
++	if (zSig == 0)
++		zExp = 0;
++	return packFloat64(zSign, zExp, zSig);
++
++}
++
++static float32 subFloat32Sigs(float32 a, float32 b, flag zSign)
++{
++	int16 aExp, bExp, zExp;
++	bits32 aSig, bSig, zSig;
++	int16 expDiff;
++
++	aSig = extractFloat32Frac(a);
++	aExp = extractFloat32Exp(a);
++	bSig = extractFloat32Frac(b);
++	bExp = extractFloat32Exp(b);
++	expDiff = aExp - bExp;
++	aSig <<= 7;
++	bSig <<= 7;
++	if (0 < expDiff)
++		goto aExpBigger;
++	if (expDiff < 0)
++		goto bExpBigger;
++	if (aExp == 0) {
++		aExp = 1;
++		bExp = 1;
++	}
++	if (bSig < aSig)
++		goto aBigger;
++	if (aSig < bSig)
++		goto bBigger;
++	return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
++      bExpBigger:
++	if (bExp == 0xFF) {
++		return packFloat32(zSign ^ 1, 0xFF, 0);
++	}
++	if (aExp == 0) {
++		++expDiff;
++	} else {
++		aSig |= 0x40000000;
++	}
++	shift32RightJamming(aSig, -expDiff, &aSig);
++	bSig |= 0x40000000;
++      bBigger:
++	zSig = bSig - aSig;
++	zExp = bExp;
++	zSign ^= 1;
++	goto normalizeRoundAndPack;
++      aExpBigger:
++	if (aExp == 0xFF) {
++		return a;
++	}
++	if (bExp == 0) {
++		--expDiff;
++	} else {
++		bSig |= 0x40000000;
++	}
++	shift32RightJamming(bSig, expDiff, &bSig);
++	aSig |= 0x40000000;
++      aBigger:
++	zSig = aSig - bSig;
++	zExp = aExp;
++      normalizeRoundAndPack:
++	--zExp;
++	return normalizeRoundAndPackFloat32(zSign, zExp, zSig);
++
++}
++
++static float32 addFloat32Sigs(float32 a, float32 b, flag zSign)
++{
++	int16 aExp, bExp, zExp;
++	bits32 aSig, bSig, zSig;
++	int16 expDiff;
++
++	aSig = extractFloat32Frac(a);
++	aExp = extractFloat32Exp(a);
++	bSig = extractFloat32Frac(b);
++	bExp = extractFloat32Exp(b);
++	expDiff = aExp - bExp;
++	aSig <<= 6;
++	bSig <<= 6;
++	if (0 < expDiff) {
++		if (aExp == 0xFF) {
++			return a;
++		}
++		if (bExp == 0) {
++			--expDiff;
++		} else {
++			bSig |= 0x20000000;
++		}
++		shift32RightJamming(bSig, expDiff, &bSig);
++		zExp = aExp;
++	} else if (expDiff < 0) {
++		if (bExp == 0xFF) {
++			return packFloat32(zSign, 0xFF, 0);
++		}
++		if (aExp == 0) {
++			++expDiff;
++		} else {
++			aSig |= 0x20000000;
++		}
++		shift32RightJamming(aSig, -expDiff, &aSig);
++		zExp = bExp;
++	} else {
++		if (aExp == 0xFF) {
++			return a;
++		}
++		if (aExp == 0)
++			return packFloat32(zSign, 0, (aSig + bSig) >> 6);
++		zSig = 0x40000000 + aSig + bSig;
++		zExp = aExp;
++		goto roundAndPack;
++	}
++	aSig |= 0x20000000;
++	zSig = (aSig + bSig) << 1;
++	--zExp;
++	if ((sbits32) zSig < 0) {
++		zSig = aSig + bSig;
++		++zExp;
++	}
++      roundAndPack:
++	return roundAndPackFloat32(zSign, zExp, zSig);
++
++}
++
++float64 float64_sub(float64 a, float64 b)
++{
++	flag aSign, bSign;
++
++	aSign = extractFloat64Sign(a);
++	bSign = extractFloat64Sign(b);
++	if (aSign == bSign) {
++		return subFloat64Sigs(a, b, aSign);
++	} else {
++		return addFloat64Sigs(a, b, aSign);
++	}
++
++}
++
++float32 float32_sub(float32 a, float32 b)
++{
++	flag aSign, bSign;
++
++	aSign = extractFloat32Sign(a);
++	bSign = extractFloat32Sign(b);
++	if (aSign == bSign) {
++		return subFloat32Sigs(a, b, aSign);
++	} else {
++		return addFloat32Sigs(a, b, aSign);
++	}
++
++}
++
++float32 float32_add(float32 a, float32 b)
++{
++	flag aSign, bSign;
++
++	aSign = extractFloat32Sign(a);
++	bSign = extractFloat32Sign(b);
++	if (aSign == bSign) {
++		return addFloat32Sigs(a, b, aSign);
++	} else {
++		return subFloat32Sigs(a, b, aSign);
++	}
++
++}
++
++float64 float64_add(float64 a, float64 b)
++{
++	flag aSign, bSign;
++
++	aSign = extractFloat64Sign(a);
++	bSign = extractFloat64Sign(b);
++	if (aSign == bSign) {
++		return addFloat64Sigs(a, b, aSign);
++	} else {
++		return subFloat64Sigs(a, b, aSign);
++	}
++}
++
++static void
++normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
++{
++	int8 shiftCount;
++
++	shiftCount = countLeadingZeros64(aSig) - 11;
++	*zSigPtr = aSig << shiftCount;
++	*zExpPtr = 1 - shiftCount;
++}
++
++inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
++		   bits64 * z1Ptr)
++{
++	bits64 z1;
++
++	z1 = a1 + b1;
++	*z1Ptr = z1;
++	*z0Ptr = a0 + b0 + (z1 < a1);
++}
++
++inline void
++sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
++       bits64 * z1Ptr)
++{
++	*z1Ptr = a1 - b1;
++	*z0Ptr = a0 - b0 - (a1 < b1);
++}
++
++static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
++{
++	bits64 b0, b1;
++	bits64 rem0, rem1, term0, term1;
++	bits64 z;
++	if (b <= a0)
++		return LIT64(0xFFFFFFFFFFFFFFFF);
++	b0 = b >> 32;
++	z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32;
++	mul64To128(b, z, &term0, &term1);
++	sub128(a0, a1, term0, term1, &rem0, &rem1);
++	while (((sbits64) rem0) < 0) {
++		z -= LIT64(0x100000000);
++		b1 = b << 32;
++		add128(rem0, rem1, b0, b1, &rem0, &rem1);
++	}
++	rem0 = (rem0 << 32) | (rem1 >> 32);
++	z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0;
++	return z;
++}
++
++inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
++{
++	bits32 aHigh, aLow, bHigh, bLow;
++	bits64 z0, zMiddleA, zMiddleB, z1;
++
++	aLow = a;
++	aHigh = a >> 32;
++	bLow = b;
++	bHigh = b >> 32;
++	z1 = ((bits64) aLow) * bLow;
++	zMiddleA = ((bits64) aLow) * bHigh;
++	zMiddleB = ((bits64) aHigh) * bLow;
++	z0 = ((bits64) aHigh) * bHigh;
++	zMiddleA += zMiddleB;
++	z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32);
++	zMiddleA <<= 32;
++	z1 += zMiddleA;
++	z0 += (z1 < zMiddleA);
++	*z1Ptr = z1;
++	*z0Ptr = z0;
++
++}
++
++static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
++				      bits32 * zSigPtr)
++{
++	int8 shiftCount;
++
++	shiftCount = countLeadingZeros32(aSig) - 8;
++	*zSigPtr = aSig << shiftCount;
++	*zExpPtr = 1 - shiftCount;
++
++}
++
++float64 float64_div(float64 a, float64 b)
++{
++	flag aSign, bSign, zSign;
++	int16 aExp, bExp, zExp;
++	bits64 aSig, bSig, zSig;
++	bits64 rem0, rem1;
++	bits64 term0, term1;
++
++	aSig = extractFloat64Frac(a);
++	aExp = extractFloat64Exp(a);
++	aSign = extractFloat64Sign(a);
++	bSig = extractFloat64Frac(b);
++	bExp = extractFloat64Exp(b);
++	bSign = extractFloat64Sign(b);
++	zSign = aSign ^ bSign;
++	if (aExp == 0x7FF) {
++		if (bExp == 0x7FF) {
++		}
++		return packFloat64(zSign, 0x7FF, 0);
++	}
++	if (bExp == 0x7FF) {
++		return packFloat64(zSign, 0, 0);
++	}
++	if (bExp == 0) {
++		if (bSig == 0) {
++			if ((aExp | aSig) == 0) {
++				float_raise(FPSCR_CAUSE_INVALID);
++			}
++			return packFloat64(zSign, 0x7FF, 0);
++		}
++		normalizeFloat64Subnormal(bSig, &bExp, &bSig);
++	}
++	if (aExp == 0) {
++		if (aSig == 0)
++			return packFloat64(zSign, 0, 0);
++		normalizeFloat64Subnormal(aSig, &aExp, &aSig);
++	}
++	zExp = aExp - bExp + 0x3FD;
++	aSig = (aSig | LIT64(0x0010000000000000)) << 10;
++	bSig = (bSig | LIT64(0x0010000000000000)) << 11;
++	if (bSig <= (aSig + aSig)) {
++		aSig >>= 1;
++		++zExp;
++	}
++	zSig = estimateDiv128To64(aSig, 0, bSig);
++	if ((zSig & 0x1FF) <= 2) {
++		mul64To128(bSig, zSig, &term0, &term1);
++		sub128(aSig, 0, term0, term1, &rem0, &rem1);
++		while ((sbits64) rem0 < 0) {
++			--zSig;
++			add128(rem0, rem1, 0, bSig, &rem0, &rem1);
++		}
++		zSig |= (rem1 != 0);
++	}
++	return roundAndPackFloat64(zSign, zExp, zSig);
++
++}
++
++float32 float32_div(float32 a, float32 b)
++{
++	flag aSign, bSign, zSign;
++	int16 aExp, bExp, zExp;
++	bits32 aSig, bSig, zSig;
++
++	aSig = extractFloat32Frac(a);
++	aExp = extractFloat32Exp(a);
++	aSign = extractFloat32Sign(a);
++	bSig = extractFloat32Frac(b);
++	bExp = extractFloat32Exp(b);
++	bSign = extractFloat32Sign(b);
++	zSign = aSign ^ bSign;
++	if (aExp == 0xFF) {
++		if (bExp == 0xFF) {
++		}
++		return packFloat32(zSign, 0xFF, 0);
++	}
++	if (bExp == 0xFF) {
++		return packFloat32(zSign, 0, 0);
++	}
++	if (bExp == 0) {
++		if (bSig == 0) {
++			return packFloat32(zSign, 0xFF, 0);
++		}
++		normalizeFloat32Subnormal(bSig, &bExp, &bSig);
++	}
++	if (aExp == 0) {
++		if (aSig == 0)
++			return packFloat32(zSign, 0, 0);
++		normalizeFloat32Subnormal(aSig, &aExp, &aSig);
++	}
++	zExp = aExp - bExp + 0x7D;
++	aSig = (aSig | 0x00800000) << 7;
++	bSig = (bSig | 0x00800000) << 8;
++	if (bSig <= (aSig + aSig)) {
++		aSig >>= 1;
++		++zExp;
++	}
++	zSig = (((bits64) aSig) << 32) / bSig;
++	if ((zSig & 0x3F) == 0) {
++		zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
++	}
++	return roundAndPackFloat32(zSign, zExp, zSig);
++
++}
++
++float32 float32_mul(float32 a, float32 b)
++{
++	char aSign, bSign, zSign;
++	int aExp, bExp, zExp;
++	unsigned int aSig, bSig;
++	unsigned long long zSig64;
++	unsigned int zSig;
++
++	aSig = extractFloat32Frac(a);
++	aExp = extractFloat32Exp(a);
++	aSign = extractFloat32Sign(a);
++	bSig = extractFloat32Frac(b);
++	bExp = extractFloat32Exp(b);
++	bSign = extractFloat32Sign(b);
++	zSign = aSign ^ bSign;
++	if (aExp == 0) {
++		if (aSig == 0)
++			return packFloat32(zSign, 0, 0);
++		normalizeFloat32Subnormal(aSig, &aExp, &aSig);
++	}
++	if (bExp == 0) {
++		if (bSig == 0)
++			return packFloat32(zSign, 0, 0);
++		normalizeFloat32Subnormal(bSig, &bExp, &bSig);
++	}
++	if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
++		return roundAndPackFloat32(zSign, 0xff, 0);
++
++	zExp = aExp + bExp - 0x7F;
++	aSig = (aSig | 0x00800000) << 7;
++	bSig = (bSig | 0x00800000) << 8;
++	shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
++	zSig = zSig64;
++	if (0 <= (signed int)(zSig << 1)) {
++		zSig <<= 1;
++		--zExp;
++	}
++	return roundAndPackFloat32(zSign, zExp, zSig);
++
++}
++
++float64 float64_mul(float64 a, float64 b)
++{
++	char aSign, bSign, zSign;
++	int aExp, bExp, zExp;
++	unsigned long long int aSig, bSig, zSig0, zSig1;
++
++	aSig = extractFloat64Frac(a);
++	aExp = extractFloat64Exp(a);
++	aSign = extractFloat64Sign(a);
++	bSig = extractFloat64Frac(b);
++	bExp = extractFloat64Exp(b);
++	bSign = extractFloat64Sign(b);
++	zSign = aSign ^ bSign;
++
++	if (aExp == 0) {
++		if (aSig == 0)
++			return packFloat64(zSign, 0, 0);
++		normalizeFloat64Subnormal(aSig, &aExp, &aSig);
++	}
++	if (bExp == 0) {
++		if (bSig == 0)
++			return packFloat64(zSign, 0, 0);
++		normalizeFloat64Subnormal(bSig, &bExp, &bSig);
++	}
++	if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
++		return roundAndPackFloat64(zSign, 0x7ff, 0);
++
++	zExp = aExp + bExp - 0x3FF;
++	aSig = (aSig | 0x0010000000000000LL) << 10;
++	bSig = (bSig | 0x0010000000000000LL) << 11;
++	mul64To128(aSig, bSig, &zSig0, &zSig1);
++	zSig0 |= (zSig1 != 0);
++	if (0 <= (signed long long int)(zSig0 << 1)) {
++		zSig0 <<= 1;
++		--zExp;
++	}
++	return roundAndPackFloat64(zSign, zExp, zSig0);
++}
+diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
+index b22a78c..3008c00 100644
+--- a/arch/sh/kernel/cpu/sh4/sq.c
++++ b/arch/sh/kernel/cpu/sh4/sq.c
+@@ -341,17 +341,18 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev)
+ {
+ 	unsigned int cpu = sysdev->id;
+ 	struct kobject *kobj;
++	int error;
+ 
+ 	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ 	if (unlikely(!sq_kobject[cpu]))
+ 		return -ENOMEM;
+ 
+ 	kobj = sq_kobject[cpu];
+-	kobj->parent = &sysdev->kobj;
+-	kobject_set_name(kobj, "%s", "sq");
+-	kobj->ktype = &ktype_percpu_entry;
 -
--#
--# Kernel hacking
--#
--CONFIG_TRACE_IRQFLAGS_SUPPORT=y
--# CONFIG_PRINTK_TIME is not set
--CONFIG_ENABLE_MUST_CHECK=y
--CONFIG_MAGIC_SYSRQ=y
--# CONFIG_UNUSED_SYMBOLS is not set
--CONFIG_DEBUG_FS=y
--# CONFIG_HEADERS_CHECK is not set
--CONFIG_DEBUG_KERNEL=y
--# CONFIG_DEBUG_SHIRQ is not set
--CONFIG_LOG_BUF_SHIFT=14
--CONFIG_DETECT_SOFTLOCKUP=y
--# CONFIG_SCHEDSTATS is not set
--# CONFIG_TIMER_STATS is not set
--# CONFIG_DEBUG_SLAB is not set
--# CONFIG_DEBUG_PREEMPT is not set
--# CONFIG_DEBUG_SPINLOCK is not set
--# CONFIG_DEBUG_MUTEXES is not set
--# CONFIG_DEBUG_LOCK_ALLOC is not set
--# CONFIG_PROVE_LOCKING is not set
--# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
--# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
--# CONFIG_DEBUG_KOBJECT is not set
--CONFIG_DEBUG_BUGVERBOSE=y
--CONFIG_DEBUG_INFO=y
--# CONFIG_DEBUG_VM is not set
--# CONFIG_DEBUG_LIST is not set
--# CONFIG_FRAME_POINTER is not set
--CONFIG_FORCED_INLINING=y
--# CONFIG_RCU_TORTURE_TEST is not set
--# CONFIG_FAULT_INJECTION is not set
--CONFIG_SH_STANDARD_BIOS=y
--# CONFIG_EARLY_SCIF_CONSOLE is not set
--CONFIG_EARLY_PRINTK=y
--CONFIG_DEBUG_STACKOVERFLOW=y
--# CONFIG_DEBUG_STACK_USAGE is not set
--# CONFIG_4KSTACKS is not set
--# CONFIG_SH_KGDB is not set
+-	return kobject_register(kobj);
++	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
++				     "%s", "sq");
++	if (!error)
++		kobject_uevent(kobj, KOBJ_ADD);
++	return error;
+ }
+ 
+ static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
+@@ -359,7 +360,7 @@ static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
+ 	unsigned int cpu = sysdev->id;
+ 	struct kobject *kobj = sq_kobject[cpu];
+ 
+-	kobject_unregister(kobj);
++	kobject_put(kobj);
+ 	return 0;
+ }
+ 
+diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
+index 2453987..08ac638 100644
+--- a/arch/sh/kernel/cpu/sh4a/Makefile
++++ b/arch/sh/kernel/cpu/sh4a/Makefile
+@@ -3,6 +3,7 @@
+ #
+ 
+ # CPU subtype setup
++obj-$(CONFIG_CPU_SUBTYPE_SH7763)	+= setup-sh7763.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7770)	+= setup-sh7770.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7780)	+= setup-sh7780.o
+ obj-$(CONFIG_CPU_SUBTYPE_SH7785)	+= setup-sh7785.o
+@@ -14,6 +15,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SHX3)		+= setup-shx3.o
+ smp-$(CONFIG_CPU_SUBTYPE_SHX3)		:= smp-shx3.o
+ 
+ # Primary on-chip clocks (common)
++clock-$(CONFIG_CPU_SUBTYPE_SH7763)	:= clock-sh7763.o
+ clock-$(CONFIG_CPU_SUBTYPE_SH7770)	:= clock-sh7770.o
+ clock-$(CONFIG_CPU_SUBTYPE_SH7780)	:= clock-sh7780.o
+ clock-$(CONFIG_CPU_SUBTYPE_SH7785)	:= clock-sh7785.o
+diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+new file mode 100644
+index 0000000..45889d4
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+@@ -0,0 +1,126 @@
++/*
++ * arch/sh/kernel/cpu/sh4a/clock-sh7763.c
++ *
++ * SH7763 support for the clock framework
++ *
++ *  Copyright (C) 2005  Paul Mundt
++ *  Copyright (C) 2007  Yoshihiro Shimoda
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <asm/clock.h>
++#include <asm/freq.h>
++#include <asm/io.h>
++
++static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
++static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
++static int p1fc_divisors[] = { 1, 1, 1, 16, 1, 1, 1, 1 };
++static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
++
++static void master_clk_init(struct clk *clk)
++{
++	clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07];
++}
++
++static struct clk_ops sh7763_master_clk_ops = {
++	.init		= master_clk_init,
++};
++
++static void module_clk_recalc(struct clk *clk)
++{
++	int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07);
++	clk->rate = clk->parent->rate / p0fc_divisors[idx];
++}
++
++static struct clk_ops sh7763_module_clk_ops = {
++	.recalc		= module_clk_recalc,
++};
++
++static void bus_clk_recalc(struct clk *clk)
++{
++	int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07);
++	clk->rate = clk->parent->rate / bfc_divisors[idx];
++}
++
++static struct clk_ops sh7763_bus_clk_ops = {
++	.recalc		= bus_clk_recalc,
++};
++
++static void cpu_clk_recalc(struct clk *clk)
++{
++	clk->rate = clk->parent->rate;
++}
++
++static struct clk_ops sh7763_cpu_clk_ops = {
++	.recalc		= cpu_clk_recalc,
++};
++
++static struct clk_ops *sh7763_clk_ops[] = {
++	&sh7763_master_clk_ops,
++	&sh7763_module_clk_ops,
++	&sh7763_bus_clk_ops,
++	&sh7763_cpu_clk_ops,
++};
++
++void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
++{
++	if (idx < ARRAY_SIZE(sh7763_clk_ops))
++		*ops = sh7763_clk_ops[idx];
++}
++
++static void shyway_clk_recalc(struct clk *clk)
++{
++	int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07);
++	clk->rate = clk->parent->rate / cfc_divisors[idx];
++}
++
++static struct clk_ops sh7763_shyway_clk_ops = {
++	.recalc		= shyway_clk_recalc,
++};
++
++static struct clk sh7763_shyway_clk = {
++	.name		= "shyway_clk",
++	.flags		= CLK_ALWAYS_ENABLED,
++	.ops		= &sh7763_shyway_clk_ops,
++};
++
++/*
++ * Additional SH7763-specific on-chip clocks that aren't already part of the
++ * clock framework
++ */
++static struct clk *sh7763_onchip_clocks[] = {
++	&sh7763_shyway_clk,
++};
++
++static int __init sh7763_clk_init(void)
++{
++	struct clk *clk = clk_get(NULL, "master_clk");
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) {
++		struct clk *clkp = sh7763_onchip_clocks[i];
++
++		clkp->parent = clk;
++		clk_register(clkp);
++		clk_enable(clkp);
++	}
++
++	/*
++	 * Now that we have the rest of the clocks registered, we need to
++	 * force the parent clock to propagate so that these clocks will
++	 * automatically figure out their rate. We cheat by handing the
++	 * parent clock its current rate and forcing child propagation.
++	 */
++	clk_set_rate(clk, clk_get_rate(clk));
++
++	clk_put(clk);
++
++	return 0;
++}
++
++arch_initcall(sh7763_clk_init);
++
+diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+index b9c6547..73c778d 100644
+--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
++++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+@@ -157,14 +157,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(SCIF0, 3),
+-	INTC_PRIO(SCIF1, 3),
+-	INTC_PRIO(SCIF2, 3),
+-	INTC_PRIO(TMU0, 2),
+-	INTC_PRIO(TMU1, 2),
+-};
 -
--#
--# Security options
--#
--# CONFIG_KEYS is not set
--# CONFIG_SECURITY is not set
+ static struct intc_mask_reg mask_registers[] __initdata = {
+ 	{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
+ 	  { } },
+@@ -217,7 +209,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
+ 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+ };
+ 
+-static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups, priorities,
++static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups,
+ 			 mask_registers, prio_registers, sense_registers);
+ 
+ void __init plat_irq_setup(void)
+diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+new file mode 100644
+index 0000000..eabd538
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+@@ -0,0 +1,390 @@
++/*
++ * SH7763 Setup
++ *
++ *  Copyright (C) 2006  Paul Mundt
++ *  Copyright (C) 2007  Yoshihiro Shimoda
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/platform_device.h>
++#include <linux/init.h>
++#include <linux/serial.h>
++#include <linux/io.h>
++#include <asm/sci.h>
++
++static struct resource rtc_resources[] = {
++	[0] = {
++		.start	= 0xffe80000,
++		.end	= 0xffe80000 + 0x58 - 1,
++		.flags	= IORESOURCE_IO,
++	},
++	[1] = {
++		/* Period IRQ */
++		.start	= 21,
++		.flags	= IORESOURCE_IRQ,
++	},
++	[2] = {
++		/* Carry IRQ */
++		.start	= 22,
++		.flags	= IORESOURCE_IRQ,
++	},
++	[3] = {
++		/* Alarm IRQ */
++		.start	= 20,
++		.flags	= IORESOURCE_IRQ,
++	},
++};
++
++static struct platform_device rtc_device = {
++	.name		= "sh-rtc",
++	.id		= -1,
++	.num_resources	= ARRAY_SIZE(rtc_resources),
++	.resource	= rtc_resources,
++};
++
++static struct plat_sci_port sci_platform_data[] = {
++	{
++		.mapbase	= 0xffe00000,
++		.flags		= UPF_BOOT_AUTOCONF,
++		.type		= PORT_SCIF,
++		.irqs		= { 40, 41, 43, 42 },
++	}, {
++		.mapbase	= 0xffe08000,
++		.flags		= UPF_BOOT_AUTOCONF,
++		.type		= PORT_SCIF,
++		.irqs		= { 76, 77, 79, 78 },
++	}, {
++		.flags = 0,
++	}
++};
++
++static struct platform_device sci_device = {
++	.name		= "sh-sci",
++	.id		= -1,
++	.dev		= {
++		.platform_data	= sci_platform_data,
++	},
++};
++
++static struct resource usb_ohci_resources[] = {
++	[0] = {
++		.start	= 0xffec8000,
++		.end	= 0xffec80ff,
++		.flags	= IORESOURCE_MEM,
++	},
++	[1] = {
++		.start	= 83,
++		.end	= 83,
++		.flags	= IORESOURCE_IRQ,
++	},
++};
++
++static u64 usb_ohci_dma_mask = 0xffffffffUL;
++static struct platform_device usb_ohci_device = {
++	.name		= "sh_ohci",
++	.id		= -1,
++	.dev = {
++		.dma_mask		= &usb_ohci_dma_mask,
++		.coherent_dma_mask	= 0xffffffff,
++	},
++	.num_resources	= ARRAY_SIZE(usb_ohci_resources),
++	.resource	= usb_ohci_resources,
++};
++
++static struct resource usbf_resources[] = {
++	[0] = {
++		.start	= 0xffec0000,
++		.end	= 0xffec00ff,
++		.flags	= IORESOURCE_MEM,
++	},
++	[1] = {
++		.start	= 84,
++		.end	= 84,
++		.flags	= IORESOURCE_IRQ,
++	},
++};
++
++static struct platform_device usbf_device = {
++	.name		= "sh_udc",
++	.id		= -1,
++	.dev = {
++		.dma_mask		= NULL,
++		.coherent_dma_mask	= 0xffffffff,
++	},
++	.num_resources	= ARRAY_SIZE(usbf_resources),
++	.resource	= usbf_resources,
++};
++
++static struct platform_device *sh7763_devices[] __initdata = {
++	&rtc_device,
++	&sci_device,
++	&usb_ohci_device,
++	&usbf_device,
++};
++
++static int __init sh7763_devices_setup(void)
++{
++	return platform_add_devices(sh7763_devices,
++				    ARRAY_SIZE(sh7763_devices));
++}
++__initcall(sh7763_devices_setup);
++
++enum {
++	UNUSED = 0,
++
++	/* interrupt sources */
++
++	IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
++	IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
++	IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
++	IRL_HHLL, IRL_HHLH, IRL_HHHL,
++
++	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
++	RTC_ATI, RTC_PRI, RTC_CUI,
++	WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
++	HUDI, LCDC,
++	DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE,
++	SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
++	DMAC0_DMINT4, DMAC0_DMINT5,
++	IIC0, IIC1,
++	CMT,
++	GEINT0, GEINT1, GEINT2,
++	HAC,
++	PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
++	PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
++	STIF0, STIF1,
++	SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
++	SIOF0, SIOF1, SIOF2,
++	USBH, USBFI0, USBFI1,
++	TPU, PCC,
++	MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
++	SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND,
++	TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3,
++	SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
++	GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3,
++
++	/* interrupt groups */
++
++	TMU012, TMU345, RTC, DMAC, SCIF0, GETHER, PCIC5,
++	SCIF1, USBF, MMCIF, SIM, SCIF2, GPIO,
++};
++
++static struct intc_vect vectors[] __initdata = {
++	INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
++	INTC_VECT(RTC_CUI, 0x4c0),
++	INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580),
++	INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0),
++	INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600),
++	INTC_VECT(LCDC, 0x620),
++	INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
++	INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0),
++	INTC_VECT(DMAC0_DMAE, 0x6c0),
++	INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
++	INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
++	INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0),
++	INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0),
++	INTC_VECT(CMT, 0x900), INTC_VECT(GEINT0, 0x920),
++	INTC_VECT(GEINT1, 0x940), INTC_VECT(GEINT2, 0x960),
++	INTC_VECT(HAC, 0x980),
++	INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
++	INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
++	INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
++	INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
++	INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
++	INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60),
++	INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0),
++	INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0),
++	INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20),
++	INTC_VECT(USBH, 0xc60), INTC_VECT(USBFI0, 0xc80),
++	INTC_VECT(USBFI1, 0xca0),
++	INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0),
++	INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
++	INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
++	INTC_VECT(SIM_ERI, 0xd80), INTC_VECT(SIM_RXI, 0xda0),
++	INTC_VECT(SIM_TXI, 0xdc0), INTC_VECT(SIM_TEND, 0xde0),
++	INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
++	INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60),
++	INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
++	INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0),
++	INTC_VECT(SCIF1_ERI, 0xf00), INTC_VECT(SCIF1_RXI, 0xf20),
++	INTC_VECT(SCIF1_BRI, 0xf40), INTC_VECT(SCIF1_TXI, 0xf60),
++	INTC_VECT(GPIO_CH0, 0xf80), INTC_VECT(GPIO_CH1, 0xfa0),
++	INTC_VECT(GPIO_CH2, 0xfc0), INTC_VECT(GPIO_CH3, 0xfe0),
++};
++
++static struct intc_group groups[] __initdata = {
++	INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
++	INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
++	INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
++	INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
++		   DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
++	INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
++	INTC_GROUP(GETHER, GEINT0, GEINT1, GEINT2),
++	INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
++	INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
++	INTC_GROUP(USBF, USBFI0, USBFI1),
++	INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
++	INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND),
++	INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
++	INTC_GROUP(GPIO, GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3),
++};
++
++static struct intc_prio priorities[] __initdata = {
++	INTC_PRIO(SCIF0, 3),
++	INTC_PRIO(SCIF1, 3),
++	INTC_PRIO(SCIF2, 3),
++};
++
++static struct intc_mask_reg mask_registers[] __initdata = {
++	{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
++	  { 0, 0, 0, 0, 0, 0, GPIO, 0,
++	    SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB,
++	    PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC,
++	    HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
++	{ 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
++	  { 0, 0, 0, 0, 0, 0, SCIF2, USBF,
++	    0, 0, STIF1, STIF0, 0, 0, USBH, GETHER,
++	    PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1,
++	    LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } },
++};
++
++static struct intc_prio_reg prio_registers[] __initdata = {
++	{ 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
++						 TMU2, TMU2_TICPI } },
++	{ 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
++	{ 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
++	{ 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } },
++	{ 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
++						 PCISERR, PCIINTA } },
++	{ 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
++						 PCIINTD, PCIC5 } },
++	{ 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } },
++	{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } },
++	{ 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } },
++	{ 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } },
++	{ 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } },
++	{ 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } },
++	{ 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } },
++	{ 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } },
++};
++
++static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups, priorities,
++			 mask_registers, prio_registers, NULL);
++
++/* Support for external interrupt pins in IRQ mode */
++
++static struct intc_vect irq_vectors[] __initdata = {
++	INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
++	INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
++	INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
++	INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
++};
++
++static struct intc_mask_reg irq_mask_registers[] __initdata = {
++	{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
++	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
++};
++
++static struct intc_prio_reg irq_prio_registers[] __initdata = {
++	{ 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
++					       IRQ4, IRQ5, IRQ6, IRQ7 } },
++};
++
++static struct intc_sense_reg irq_sense_registers[] __initdata = {
++	{ 0xffd0001c, 32, 2, /* ICR1 */   { IRQ0, IRQ1, IRQ2, IRQ3,
++					    IRQ4, IRQ5, IRQ6, IRQ7 } },
++};
++
++static DECLARE_INTC_DESC(intc_irq_desc, "sh7763-irq", irq_vectors,
++			 NULL, NULL, irq_mask_registers, irq_prio_registers,
++			 irq_sense_registers);
++
++/* External interrupt pins in IRL mode */
++
++static struct intc_vect irl_vectors[] __initdata = {
++	INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
++	INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
++	INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
++	INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
++	INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
++	INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
++	INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
++	INTC_VECT(IRL_HHHL, 0x3c0),
++};
++
++static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
++	{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
++	  { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
++	    IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
++	    IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
++	    IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
++};
++
++static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
++	{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
++	  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	    IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
++	    IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
++	    IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
++	    IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
++};
++
++static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors,
++			 NULL, NULL, irl7654_mask_registers, NULL, NULL);
++
++static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
++			 NULL, NULL, irl3210_mask_registers, NULL, NULL);
++
++#define INTC_ICR0	0xffd00000
++#define INTC_INTMSK0	0xffd00044
++#define INTC_INTMSK1	0xffd00048
++#define INTC_INTMSK2	0xffd40080
++#define INTC_INTMSKCLR1	0xffd00068
++#define INTC_INTMSKCLR2	0xffd40084
++
++void __init plat_irq_setup(void)
++{
++	/* disable IRQ7-0 */
++	ctrl_outl(0xff000000, INTC_INTMSK0);
++
++	/* disable IRL3-0 + IRL7-4 */
++	ctrl_outl(0xc0000000, INTC_INTMSK1);
++	ctrl_outl(0xfffefffe, INTC_INTMSK2);
++
++	register_intc_controller(&intc_desc);
++}
++
++void __init plat_irq_setup_pins(int mode)
++{
++	switch (mode) {
++	case IRQ_MODE_IRQ:
++		/* select IRQ mode for IRL3-0 + IRL7-4 */
++		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
++		register_intc_controller(&intc_irq_desc);
++		break;
++	case IRQ_MODE_IRL7654:
++		/* enable IRL7-4 but don't provide any masking */
++		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
++		ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
++		break;
++	case IRQ_MODE_IRL3210:
++		/* enable IRL0-3 but don't provide any masking */
++		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
++		ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
++		break;
++	case IRQ_MODE_IRL7654_MASK:
++		/* enable IRL7-4 and mask using cpu intc controller */
++		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
++		register_intc_controller(&intc_irl7654_desc);
++		break;
++	case IRQ_MODE_IRL3210_MASK:
++		/* enable IRL0-3 and mask using cpu intc controller */
++		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
++		register_intc_controller(&intc_irl3210_desc);
++		break;
++	default:
++		BUG();
++	}
++}
+diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+index e8fd33f..293004b 100644
+--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
++++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+@@ -168,11 +168,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(SCIF0, 3),
+-	INTC_PRIO(SCIF1, 3),
+-};
 -
--#
--# Cryptographic options
--#
--CONFIG_CRYPTO=y
--CONFIG_CRYPTO_ALGAPI=y
--CONFIG_CRYPTO_BLKCIPHER=y
--CONFIG_CRYPTO_HASH=y
--CONFIG_CRYPTO_MANAGER=y
--CONFIG_CRYPTO_HMAC=y
--# CONFIG_CRYPTO_XCBC is not set
--# CONFIG_CRYPTO_NULL is not set
--# CONFIG_CRYPTO_MD4 is not set
--CONFIG_CRYPTO_MD5=y
--# CONFIG_CRYPTO_SHA1 is not set
--# CONFIG_CRYPTO_SHA256 is not set
--# CONFIG_CRYPTO_SHA512 is not set
--# CONFIG_CRYPTO_WP512 is not set
--# CONFIG_CRYPTO_TGR192 is not set
--# CONFIG_CRYPTO_GF128MUL is not set
--CONFIG_CRYPTO_ECB=m
--CONFIG_CRYPTO_CBC=y
--CONFIG_CRYPTO_PCBC=m
--# CONFIG_CRYPTO_LRW is not set
--CONFIG_CRYPTO_DES=y
--# CONFIG_CRYPTO_FCRYPT is not set
--# CONFIG_CRYPTO_BLOWFISH is not set
--# CONFIG_CRYPTO_TWOFISH is not set
--# CONFIG_CRYPTO_SERPENT is not set
--# CONFIG_CRYPTO_AES is not set
--# CONFIG_CRYPTO_CAST5 is not set
--# CONFIG_CRYPTO_CAST6 is not set
--# CONFIG_CRYPTO_TEA is not set
--# CONFIG_CRYPTO_ARC4 is not set
--# CONFIG_CRYPTO_KHAZAD is not set
--# CONFIG_CRYPTO_ANUBIS is not set
--# CONFIG_CRYPTO_DEFLATE is not set
--# CONFIG_CRYPTO_MICHAEL_MIC is not set
--# CONFIG_CRYPTO_CRC32C is not set
--# CONFIG_CRYPTO_CAMELLIA is not set
--# CONFIG_CRYPTO_TEST is not set
+ static struct intc_mask_reg mask_registers[] __initdata = {
+ 	{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ 	  { 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
+@@ -195,7 +190,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ 	{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
+ };
+ 
+-static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups, priorities,
++static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups,
+ 			 mask_registers, prio_registers, NULL);
+ 
+ /* Support for external interrupt pins in IRQ mode */
+@@ -223,7 +218,7 @@ static struct intc_sense_reg irq_sense_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_irq_desc, "sh7780-irq", irq_vectors,
+-			 NULL, NULL, irq_mask_registers, irq_prio_registers,
++			 NULL, irq_mask_registers, irq_prio_registers,
+ 			 irq_sense_registers);
+ 
+ /* External interrupt pins in IRL mode */
+@@ -257,10 +252,10 @@ static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
+-			 NULL, NULL, irl7654_mask_registers, NULL, NULL);
++			 NULL, irl7654_mask_registers, NULL, NULL);
+ 
+ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
+-			 NULL, NULL, irl3210_mask_registers, NULL, NULL);
++			 NULL, irl3210_mask_registers, NULL, NULL);
+ 
+ #define INTC_ICR0	0xffd00000
+ #define INTC_INTMSK0	0xffd00044
+diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+index 39b215d..74b60e9 100644
+--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
++++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+@@ -178,15 +178,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(SCIF0, 3),
+-	INTC_PRIO(SCIF1, 3),
+-	INTC_PRIO(SCIF2, 3),
+-	INTC_PRIO(SCIF3, 3),
+-	INTC_PRIO(SCIF4, 3),
+-	INTC_PRIO(SCIF5, 3),
+-};
 -
--#
--# Hardware crypto devices
--#
+ static struct intc_mask_reg mask_registers[] __initdata = {
+ 	{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+@@ -227,7 +218,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ 	{ 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } },
+ };
+ 
+-static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups, priorities,
++static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups,
+ 			 mask_registers, prio_registers, NULL);
+ 
+ /* Support for external interrupt pins in IRQ mode */
+@@ -248,11 +239,11 @@ static struct intc_sense_reg sense_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irq0123, "sh7785-irq0123", vectors_irq0123,
+-			 NULL, NULL, mask_registers, prio_registers,
++			 NULL, mask_registers, prio_registers,
+ 			 sense_registers);
+ 
+ static DECLARE_INTC_DESC(intc_desc_irq4567, "sh7785-irq4567", vectors_irq4567,
+-			 NULL, NULL, mask_registers, prio_registers,
++			 NULL, mask_registers, prio_registers,
+ 			 sense_registers);
+ 
+ /* External interrupt pins in IRL mode */
+@@ -280,10 +271,10 @@ static struct intc_vect vectors_irl4567[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123,
+-			 NULL, NULL, mask_registers, NULL, NULL);
++			 NULL, mask_registers, NULL, NULL);
+ 
+ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
+-			 NULL, NULL, mask_registers, NULL, NULL);
++			 NULL, mask_registers, NULL, NULL);
+ 
+ #define INTC_ICR0	0xffd00000
+ #define INTC_INTMSK0	0xffd00044
+diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+index c6cdd7e..4dc958b 100644
+--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
++++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+@@ -165,13 +165,6 @@ static struct intc_group groups[] __initdata = {
+ 	INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS),
+ };
+ 
+-static struct intc_prio priorities[] __initdata = {
+-	INTC_PRIO(SCIF0, 3),
+-	INTC_PRIO(SCIF1, 3),
+-	INTC_PRIO(SCIF2, 3),
+-	INTC_PRIO(SCIF3, 3),
+-};
 -
--#
--# Library routines
--#
--CONFIG_BITREVERSE=y
--# CONFIG_CRC_CCITT is not set
--# CONFIG_CRC16 is not set
--CONFIG_CRC32=y
--# CONFIG_LIBCRC32C is not set
--CONFIG_HAS_IOMEM=y
--CONFIG_HAS_IOPORT=y
-diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
-index 2e43a2a..0dc1ce7 100644
---- a/arch/sh/configs/r7785rp_defconfig
-+++ b/arch/sh/configs/r7785rp_defconfig
-@@ -1,9 +1,10 @@
- #
- # Automatically generated make config: don't edit
--# Linux kernel version: 2.6.24-rc2
--# Tue Nov 13 20:34:57 2007
-+# Linux kernel version: 2.6.24-rc3
-+# Fri Nov 23 14:03:57 2007
- #
- CONFIG_SUPERH=y
-+CONFIG_SUPERH32=y
- CONFIG_RWSEM_GENERIC_SPINLOCK=y
- CONFIG_GENERIC_BUG=y
- CONFIG_GENERIC_FIND_NEXT_BIT=y
-@@ -39,6 +40,7 @@ CONFIG_BSD_PROCESS_ACCT=y
- # CONFIG_BSD_PROCESS_ACCT_V3 is not set
- # CONFIG_TASKSTATS is not set
- # CONFIG_USER_NS is not set
-+# CONFIG_PID_NS is not set
- # CONFIG_AUDIT is not set
- CONFIG_IKCONFIG=y
- CONFIG_IKCONFIG_PROC=y
-@@ -130,6 +132,8 @@ CONFIG_CPU_SUBTYPE_SH7785=y
- # CONFIG_CPU_SUBTYPE_SHX3 is not set
- # CONFIG_CPU_SUBTYPE_SH7343 is not set
- # CONFIG_CPU_SUBTYPE_SH7722 is not set
-+# CONFIG_CPU_SUBTYPE_SH5_101 is not set
-+# CONFIG_CPU_SUBTYPE_SH5_103 is not set
+ static struct intc_mask_reg mask_registers[] __initdata = {
+ 	{ 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
+ 	  { IRQ0, IRQ1, IRQ2, IRQ3 } },
+@@ -218,7 +211,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
+ 	    INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) },
+ };
  
- #
- # Memory management options
-@@ -139,7 +143,8 @@ CONFIG_MMU=y
- CONFIG_PAGE_OFFSET=0x80000000
- CONFIG_MEMORY_START=0x08000000
- CONFIG_MEMORY_SIZE=0x08000000
--# CONFIG_32BIT is not set
-+CONFIG_29BIT=y
-+# CONFIG_PMB is not set
- # CONFIG_X2TLB is not set
- CONFIG_VSYSCALL=y
- # CONFIG_NUMA is not set
-@@ -158,6 +163,7 @@ CONFIG_PAGE_SIZE_4KB=y
- CONFIG_HUGETLB_PAGE_SIZE_1MB=y
- # CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
- # CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
- CONFIG_SELECT_MEMORY_MODEL=y
- # CONFIG_FLATMEM_MANUAL is not set
- # CONFIG_DISCONTIGMEM_MANUAL is not set
-@@ -701,6 +707,7 @@ CONFIG_DEVPORT=y
- # CONFIG_POWER_SUPPLY is not set
- CONFIG_HWMON=y
- # CONFIG_HWMON_VID is not set
-+# CONFIG_SENSORS_I5K_AMB is not set
- # CONFIG_SENSORS_F71805F is not set
- # CONFIG_SENSORS_F71882FG is not set
- # CONFIG_SENSORS_IT87 is not set
-diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
+-static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups, priorities,
++static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups,
+ 			 mask_registers, prio_registers, NULL);
+ 
+ /* Support for external interrupt pins in IRQ mode */
+@@ -232,8 +225,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
+-			 priorities, mask_registers, prio_registers,
+-			 sense_registers);
++			 mask_registers, prio_registers, sense_registers);
+ 
+ /* External interrupt pins in IRL mode */
+ static struct intc_vect vectors_irl[] __initdata = {
+@@ -248,7 +240,7 @@ static struct intc_vect vectors_irl[] __initdata = {
+ };
+ 
+ static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
+-			 priorities, mask_registers, prio_registers, NULL);
++			 mask_registers, prio_registers, NULL);
+ 
+ void __init plat_irq_setup_pins(int mode)
+ {
+diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
 new file mode 100644
-index 0000000..bb9bcd6
+index 0000000..8646363
 --- /dev/null
-+++ b/arch/sh/configs/sdk7780_defconfig
-@@ -0,0 +1,1394 @@
++++ b/arch/sh/kernel/cpu/sh5/Makefile
+@@ -0,0 +1,7 @@
 +#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.24-rc7
-+# Tue Jan 22 11:34:03 2008
++# Makefile for the Linux/SuperH SH-5 backends.
 +#
-+CONFIG_SUPERH=y
-+CONFIG_SUPERH32=y
-+CONFIG_RWSEM_GENERIC_SPINLOCK=y
-+CONFIG_GENERIC_BUG=y
-+CONFIG_GENERIC_FIND_NEXT_BIT=y
-+CONFIG_GENERIC_HWEIGHT=y
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_GENERIC_TIME=y
-+CONFIG_GENERIC_CLOCKEVENTS=y
-+CONFIG_SYS_SUPPORTS_PCI=y
-+CONFIG_STACKTRACE_SUPPORT=y
-+CONFIG_LOCKDEP_SUPPORT=y
-+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-+CONFIG_ARCH_NO_VIRT_TO_BUS=y
-+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++obj-y := entry.o probe.o switchto.o
 +
-+#
-+# General setup
-+#
-+CONFIG_EXPERIMENTAL=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+CONFIG_LOCALVERSION="_SDK7780"
-+CONFIG_LOCALVERSION_AUTO=y
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+CONFIG_SYSVIPC_SYSCTL=y
-+CONFIG_POSIX_MQUEUE=y
-+CONFIG_BSD_PROCESS_ACCT=y
-+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-+# CONFIG_TASKSTATS is not set
-+# CONFIG_USER_NS is not set
-+# CONFIG_PID_NS is not set
-+# CONFIG_AUDIT is not set
-+CONFIG_IKCONFIG=y
-+CONFIG_IKCONFIG_PROC=y
-+CONFIG_LOG_BUF_SHIFT=18
-+# CONFIG_CGROUPS is not set
-+CONFIG_FAIR_GROUP_SCHED=y
-+CONFIG_FAIR_USER_SCHED=y
-+# CONFIG_FAIR_CGROUP_SCHED is not set
-+CONFIG_SYSFS_DEPRECATED=y
-+CONFIG_RELAY=y
-+# CONFIG_BLK_DEV_INITRD is not set
-+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-+CONFIG_SYSCTL=y
-+CONFIG_EMBEDDED=y
-+CONFIG_UID16=y
-+CONFIG_SYSCTL_SYSCALL=y
-+CONFIG_KALLSYMS=y
-+CONFIG_KALLSYMS_ALL=y
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_ELF_CORE=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_ANON_INODES=y
-+CONFIG_EPOLL=y
-+CONFIG_SIGNALFD=y
-+CONFIG_EVENTFD=y
-+CONFIG_SHMEM=y
-+CONFIG_VM_EVENT_COUNTERS=y
-+CONFIG_SLUB_DEBUG=y
-+# CONFIG_SLAB is not set
-+CONFIG_SLUB=y
-+# CONFIG_SLOB is not set
-+CONFIG_SLABINFO=y
-+CONFIG_RT_MUTEXES=y
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+CONFIG_MODULE_FORCE_UNLOAD=y
-+# CONFIG_MODVERSIONS is not set
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+CONFIG_BLOCK=y
-+CONFIG_LBD=y
-+# CONFIG_BLK_DEV_IO_TRACE is not set
-+# CONFIG_LSF is not set
-+# CONFIG_BLK_DEV_BSG is not set
++obj-$(CONFIG_SH_FPU)		+= fpu.o
++obj-$(CONFIG_KALLSYMS)		+= unwind.o
+diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
+new file mode 100644
+index 0000000..ba87501
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh5/entry.S
+@@ -0,0 +1,2101 @@
++/*
++ * arch/sh/kernel/cpu/sh5/entry.S
++ *
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2004 - 2007  Paul Mundt
++ * Copyright (C) 2003, 2004  Richard Curnow
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/errno.h>
++#include <linux/sys.h>
++#include <asm/cpu/registers.h>
++#include <asm/processor.h>
++#include <asm/unistd.h>
++#include <asm/thread_info.h>
++#include <asm/asm-offsets.h>
 +
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+CONFIG_DEFAULT_AS=y
-+# CONFIG_DEFAULT_DEADLINE is not set
-+# CONFIG_DEFAULT_CFQ is not set
-+# CONFIG_DEFAULT_NOOP is not set
-+CONFIG_DEFAULT_IOSCHED="anticipatory"
++/*
++ * SR fields.
++ */
++#define SR_ASID_MASK	0x00ff0000
++#define SR_FD_MASK	0x00008000
++#define SR_SS		0x08000000
++#define SR_BL		0x10000000
++#define SR_MD		0x40000000
 +
-+#
-+# System type
-+#
-+CONFIG_CPU_SH4=y
-+CONFIG_CPU_SH4A=y
-+# CONFIG_CPU_SUBTYPE_SH7619 is not set
-+# CONFIG_CPU_SUBTYPE_SH7203 is not set
-+# CONFIG_CPU_SUBTYPE_SH7206 is not set
-+# CONFIG_CPU_SUBTYPE_SH7263 is not set
-+# CONFIG_CPU_SUBTYPE_SH7705 is not set
-+# CONFIG_CPU_SUBTYPE_SH7706 is not set
-+# CONFIG_CPU_SUBTYPE_SH7707 is not set
-+# CONFIG_CPU_SUBTYPE_SH7708 is not set
-+# CONFIG_CPU_SUBTYPE_SH7709 is not set
-+# CONFIG_CPU_SUBTYPE_SH7710 is not set
-+# CONFIG_CPU_SUBTYPE_SH7712 is not set
-+# CONFIG_CPU_SUBTYPE_SH7720 is not set
-+# CONFIG_CPU_SUBTYPE_SH7721 is not set
-+# CONFIG_CPU_SUBTYPE_SH7750 is not set
-+# CONFIG_CPU_SUBTYPE_SH7091 is not set
-+# CONFIG_CPU_SUBTYPE_SH7750R is not set
-+# CONFIG_CPU_SUBTYPE_SH7750S is not set
-+# CONFIG_CPU_SUBTYPE_SH7751 is not set
-+# CONFIG_CPU_SUBTYPE_SH7751R is not set
-+# CONFIG_CPU_SUBTYPE_SH7760 is not set
-+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
-+# CONFIG_CPU_SUBTYPE_SH7763 is not set
-+# CONFIG_CPU_SUBTYPE_SH7770 is not set
-+CONFIG_CPU_SUBTYPE_SH7780=y
-+# CONFIG_CPU_SUBTYPE_SH7785 is not set
-+# CONFIG_CPU_SUBTYPE_SHX3 is not set
-+# CONFIG_CPU_SUBTYPE_SH7343 is not set
-+# CONFIG_CPU_SUBTYPE_SH7722 is not set
-+# CONFIG_CPU_SUBTYPE_SH5_101 is not set
-+# CONFIG_CPU_SUBTYPE_SH5_103 is not set
++/*
++ * Event code.
++ */
++#define	EVENT_INTERRUPT		0
++#define	EVENT_FAULT_TLB		1
++#define	EVENT_FAULT_NOT_TLB	2
++#define	EVENT_DEBUG		3
 +
-+#
-+# Memory management options
-+#
-+CONFIG_QUICKLIST=y
-+CONFIG_MMU=y
-+CONFIG_PAGE_OFFSET=0x80000000
-+CONFIG_MEMORY_START=0x08000000
-+CONFIG_MEMORY_SIZE=0x08000000
-+CONFIG_29BIT=y
-+# CONFIG_PMB is not set
-+CONFIG_VSYSCALL=y
-+CONFIG_ARCH_FLATMEM_ENABLE=y
-+CONFIG_ARCH_SPARSEMEM_ENABLE=y
-+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
-+CONFIG_MAX_ACTIVE_REGIONS=1
-+CONFIG_ARCH_POPULATES_NODE_MAP=y
-+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-+CONFIG_PAGE_SIZE_4KB=y
-+# CONFIG_PAGE_SIZE_8KB is not set
-+# CONFIG_PAGE_SIZE_64KB is not set
-+CONFIG_HUGETLB_PAGE_SIZE_64K=y
-+# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
-+# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
-+CONFIG_SELECT_MEMORY_MODEL=y
-+CONFIG_FLATMEM_MANUAL=y
-+# CONFIG_DISCONTIGMEM_MANUAL is not set
-+# CONFIG_SPARSEMEM_MANUAL is not set
-+CONFIG_FLATMEM=y
-+CONFIG_FLAT_NODE_MEM_MAP=y
-+CONFIG_SPARSEMEM_STATIC=y
-+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
-+CONFIG_SPLIT_PTLOCK_CPUS=4
-+CONFIG_RESOURCES_64BIT=y
-+CONFIG_ZONE_DMA_FLAG=0
-+CONFIG_NR_QUICK=2
++/* EXPEVT values */
++#define	RESET_CAUSE		0x20
++#define DEBUGSS_CAUSE		0x980
 +
-+#
-+# Cache configuration
-+#
-+# CONFIG_SH_DIRECT_MAPPED is not set
-+CONFIG_CACHE_WRITEBACK=y
-+# CONFIG_CACHE_WRITETHROUGH is not set
-+# CONFIG_CACHE_OFF is not set
++/*
++ * Frame layout. Quad index.
++ */
++#define	FRAME_T(x)	FRAME_TBASE+(x*8)
++#define	FRAME_R(x)	FRAME_RBASE+(x*8)
++#define	FRAME_S(x)	FRAME_SBASE+(x*8)
++#define FSPC		0
++#define FSSR		1
++#define FSYSCALL_ID	2
 +
-+#
-+# Processor features
-+#
-+CONFIG_CPU_LITTLE_ENDIAN=y
-+# CONFIG_CPU_BIG_ENDIAN is not set
-+CONFIG_SH_FPU=y
-+CONFIG_SH_STORE_QUEUES=y
-+# CONFIG_SPECULATIVE_EXECUTION is not set
-+CONFIG_CPU_HAS_INTEVT=y
-+CONFIG_CPU_HAS_SR_RB=y
-+CONFIG_CPU_HAS_FPU=y
++/* Arrange the save frame to be a multiple of 32 bytes long */
++#define FRAME_SBASE	0
++#define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */
++#define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */
++#define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* tr0 -tr7 */
++#define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */
 +
-+#
-+# Board support
-+#
-+# CONFIG_SH_7780_SOLUTION_ENGINE is not set
-+CONFIG_SH_SDK7780=y
-+# CONFIG_SH_HIGHLANDER is not set
-+# CONFIG_SH_SDK7780_STANDALONE is not set
-+CONFIG_SH_SDK7780_BASE=y
++#define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */
++#define FP_FRAME_BASE	0
 +
-+#
-+# Timer and clock configuration
-+#
-+CONFIG_SH_TMU=y
-+CONFIG_SH_TIMER_IRQ=28
-+CONFIG_SH_PCLK_FREQ=33333333
-+CONFIG_TICK_ONESHOT=y
-+# CONFIG_NO_HZ is not set
-+CONFIG_HIGH_RES_TIMERS=y
-+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++#define	SAVED_R2	0*8
++#define	SAVED_R3	1*8
++#define	SAVED_R4	2*8
++#define	SAVED_R5	3*8
++#define	SAVED_R18	4*8
++#define	SAVED_R6	5*8
++#define	SAVED_TR0	6*8
 +
-+#
-+# CPU Frequency scaling
-+#
-+# CONFIG_CPU_FREQ is not set
++/* These are the registers saved in the TLB path that aren't saved in the first
++   level of the normal one. */
++#define	TLB_SAVED_R25	7*8
++#define	TLB_SAVED_TR1	8*8
++#define	TLB_SAVED_TR2	9*8
++#define	TLB_SAVED_TR3	10*8
++#define	TLB_SAVED_TR4	11*8
++/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
++   breakage otherwise. */
++#define	TLB_SAVED_R0	12*8
++#define	TLB_SAVED_R1	13*8
 +
-+#
-+# DMA support
-+#
-+CONFIG_SH_DMA_API=y
-+CONFIG_SH_DMA=y
-+CONFIG_NR_ONCHIP_DMA_CHANNELS=12
-+# CONFIG_NR_DMA_CHANNELS_BOOL is not set
++#define CLI()				\
++	getcon	SR, r6;			\
++	ori	r6, 0xf0, r6;		\
++	putcon	r6, SR;
 +
-+#
-+# Companion Chips
-+#
++#define STI()				\
++	getcon	SR, r6;			\
++	andi	r6, ~0xf0, r6;		\
++	putcon	r6, SR;
 +
-+#
-+# Additional SuperH Device Drivers
-+#
-+CONFIG_HEARTBEAT=y
-+# CONFIG_PUSH_SWITCH is not set
++#ifdef CONFIG_PREEMPT
++#  define preempt_stop()	CLI()
++#else
++#  define preempt_stop()
++#  define resume_kernel		restore_all
++#endif
 +
-+#
-+# Kernel features
-+#
-+# CONFIG_HZ_100 is not set
-+CONFIG_HZ_250=y
-+# CONFIG_HZ_300 is not set
-+# CONFIG_HZ_1000 is not set
-+CONFIG_HZ=250
-+# CONFIG_KEXEC is not set
-+# CONFIG_CRASH_DUMP is not set
-+# CONFIG_PREEMPT_NONE is not set
-+# CONFIG_PREEMPT_VOLUNTARY is not set
-+CONFIG_PREEMPT=y
-+CONFIG_PREEMPT_BKL=y
-+CONFIG_GUSA=y
++	.section	.data, "aw"
 +
-+#
-+# Boot options
-+#
-+CONFIG_ZERO_PAGE_OFFSET=0x00001000
-+CONFIG_BOOT_LINK_OFFSET=0x01800000
-+CONFIG_CMDLINE_BOOL=y
-+CONFIG_CMDLINE="mem=128M console=tty0 console=ttySC0,115200 ip=bootp root=/dev/nfs nfsroot=192.168.0.1:/home/rootfs"
++#define FAST_TLBMISS_STACK_CACHELINES 4
++#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
 +
-+#
-+# Bus options
-+#
-+CONFIG_PCI=y
-+CONFIG_SH_PCIDMA_NONCOHERENT=y
-+CONFIG_PCI_AUTO=y
-+CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
-+# CONFIG_ARCH_SUPPORTS_MSI is not set
-+# CONFIG_PCI_LEGACY is not set
-+CONFIG_PCI_DEBUG=y
-+CONFIG_PCCARD=y
-+# CONFIG_PCMCIA_DEBUG is not set
-+CONFIG_PCMCIA=y
-+CONFIG_PCMCIA_LOAD_CIS=y
-+CONFIG_PCMCIA_IOCTL=y
-+CONFIG_CARDBUS=y
++/* Register back-up area for all exceptions */
++	.balign	32
++	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
++	 * register saves etc. */
++	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
++/* This is 32 byte aligned by construction */
++/* Register back-up area for all exceptions */
++reg_save_area:
++	.quad	0
++	.quad	0
++	.quad	0
++	.quad	0
 +
-+#
-+# PC-card bridges
-+#
-+CONFIG_YENTA=y
-+CONFIG_YENTA_O2=y
-+CONFIG_YENTA_RICOH=y
-+CONFIG_YENTA_TI=y
-+CONFIG_YENTA_ENE_TUNE=y
-+CONFIG_YENTA_TOSHIBA=y
-+# CONFIG_PD6729 is not set
-+# CONFIG_I82092 is not set
-+CONFIG_PCCARD_NONSTATIC=y
-+CONFIG_HOTPLUG_PCI=y
-+# CONFIG_HOTPLUG_PCI_FAKE is not set
-+# CONFIG_HOTPLUG_PCI_CPCI is not set
-+# CONFIG_HOTPLUG_PCI_SHPC is not set
++	.quad	0
++	.quad	0
++	.quad	0
++	.quad	0
 +
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+# CONFIG_BINFMT_MISC is not set
++	.quad	0
++	.quad	0
++	.quad	0
++	.quad	0
 +
-+#
-+# Networking
-+#
-+CONFIG_NET=y
++	.quad	0
++	.quad   0
 +
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+# CONFIG_PACKET_MMAP is not set
-+CONFIG_UNIX=y
-+CONFIG_XFRM=y
-+# CONFIG_XFRM_USER is not set
-+# CONFIG_XFRM_SUB_POLICY is not set
-+# CONFIG_XFRM_MIGRATE is not set
-+# CONFIG_NET_KEY is not set
-+CONFIG_INET=y
-+CONFIG_IP_MULTICAST=y
-+CONFIG_IP_ADVANCED_ROUTER=y
-+CONFIG_ASK_IP_FIB_HASH=y
-+# CONFIG_IP_FIB_TRIE is not set
-+CONFIG_IP_FIB_HASH=y
-+# CONFIG_IP_MULTIPLE_TABLES is not set
-+# CONFIG_IP_ROUTE_MULTIPATH is not set
-+# CONFIG_IP_ROUTE_VERBOSE is not set
-+CONFIG_IP_PNP=y
-+# CONFIG_IP_PNP_DHCP is not set
-+CONFIG_IP_PNP_BOOTP=y
-+# CONFIG_IP_PNP_RARP is not set
-+# CONFIG_NET_IPIP is not set
-+# CONFIG_NET_IPGRE is not set
-+# CONFIG_IP_MROUTE is not set
-+# CONFIG_ARPD is not set
-+# CONFIG_SYN_COOKIES is not set
-+# CONFIG_INET_AH is not set
-+# CONFIG_INET_ESP is not set
-+# CONFIG_INET_IPCOMP is not set
-+# CONFIG_INET_XFRM_TUNNEL is not set
-+CONFIG_INET_TUNNEL=y
-+CONFIG_INET_XFRM_MODE_TRANSPORT=y
-+CONFIG_INET_XFRM_MODE_TUNNEL=y
-+# CONFIG_INET_XFRM_MODE_BEET is not set
-+# CONFIG_INET_LRO is not set
-+CONFIG_INET_DIAG=y
-+CONFIG_INET_TCP_DIAG=y
-+# CONFIG_TCP_CONG_ADVANCED is not set
-+CONFIG_TCP_CONG_CUBIC=y
-+CONFIG_DEFAULT_TCP_CONG="cubic"
-+# CONFIG_TCP_MD5SIG is not set
-+CONFIG_IPV6=y
-+# CONFIG_IPV6_PRIVACY is not set
-+# CONFIG_IPV6_ROUTER_PREF is not set
-+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
-+# CONFIG_INET6_AH is not set
-+# CONFIG_INET6_ESP is not set
-+# CONFIG_INET6_IPCOMP is not set
-+# CONFIG_IPV6_MIP6 is not set
-+# CONFIG_INET6_XFRM_TUNNEL is not set
-+# CONFIG_INET6_TUNNEL is not set
-+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
-+CONFIG_INET6_XFRM_MODE_TUNNEL=y
-+# CONFIG_INET6_XFRM_MODE_BEET is not set
-+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
-+CONFIG_IPV6_SIT=y
-+# CONFIG_IPV6_TUNNEL is not set
-+# CONFIG_IPV6_MULTIPLE_TABLES is not set
-+# CONFIG_NETWORK_SECMARK is not set
-+# CONFIG_NETFILTER is not set
-+# CONFIG_IP_DCCP is not set
-+# CONFIG_IP_SCTP is not set
-+# CONFIG_TIPC is not set
-+# CONFIG_ATM is not set
-+# CONFIG_BRIDGE is not set
-+# CONFIG_VLAN_8021Q is not set
-+# CONFIG_DECNET is not set
-+# CONFIG_LLC2 is not set
-+# CONFIG_IPX is not set
-+# CONFIG_ATALK is not set
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+# CONFIG_ECONET is not set
-+# CONFIG_WAN_ROUTER is not set
-+CONFIG_NET_SCHED=y
++/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
++ * reentrancy. Note this area may be accessed via physical address.
++ * Align so this fits a whole single cache line, for ease of purging.
++ */
++	.balign 32,0,32
++resvec_save_area:
++	.quad	0
++	.quad	0
++	.quad	0
++	.quad	0
++	.quad	0
++	.balign 32,0,32
 +
-+#
-+# Queueing/Scheduling
-+#
-+# CONFIG_NET_SCH_CBQ is not set
-+# CONFIG_NET_SCH_HTB is not set
-+# CONFIG_NET_SCH_HFSC is not set
-+# CONFIG_NET_SCH_PRIO is not set
-+# CONFIG_NET_SCH_RR is not set
-+# CONFIG_NET_SCH_RED is not set
-+# CONFIG_NET_SCH_SFQ is not set
-+# CONFIG_NET_SCH_TEQL is not set
-+# CONFIG_NET_SCH_TBF is not set
-+# CONFIG_NET_SCH_GRED is not set
-+# CONFIG_NET_SCH_DSMARK is not set
-+# CONFIG_NET_SCH_NETEM is not set
-+# CONFIG_NET_SCH_INGRESS is not set
++/* Jump table of 3rd level handlers  */
++trap_jtable:
++	.long	do_exception_error		/* 0x000 */
++	.long	do_exception_error		/* 0x020 */
++	.long	tlb_miss_load				/* 0x040 */
++	.long	tlb_miss_store				/* 0x060 */
++	! ARTIFICIAL pseudo-EXPEVT setting
++	.long	do_debug_interrupt		/* 0x080 */
++	.long	tlb_miss_load				/* 0x0A0 */
++	.long	tlb_miss_store				/* 0x0C0 */
++	.long	do_address_error_load	/* 0x0E0 */
++	.long	do_address_error_store	/* 0x100 */
++#ifdef CONFIG_SH_FPU
++	.long	do_fpu_error		/* 0x120 */
++#else
++	.long	do_exception_error		/* 0x120 */
++#endif
++	.long	do_exception_error		/* 0x140 */
++	.long	system_call				/* 0x160 */
++	.long	do_reserved_inst		/* 0x180 */
++	.long	do_illegal_slot_inst	/* 0x1A0 */
++	.long	do_exception_error		/* 0x1C0 - NMI */
++	.long	do_exception_error		/* 0x1E0 */
++	.rept 15
++		.long do_IRQ		/* 0x200 - 0x3C0 */
++	.endr
++	.long	do_exception_error		/* 0x3E0 */
++	.rept 32
++		.long do_IRQ		/* 0x400 - 0x7E0 */
++	.endr
++	.long	fpu_error_or_IRQA			/* 0x800 */
++	.long	fpu_error_or_IRQB			/* 0x820 */
++	.long	do_IRQ			/* 0x840 */
++	.long	do_IRQ			/* 0x860 */
++	.rept 6
++		.long do_exception_error	/* 0x880 - 0x920 */
++	.endr
++	.long	do_software_break_point	/* 0x940 */
++	.long	do_exception_error		/* 0x960 */
++	.long	do_single_step		/* 0x980 */
 +
-+#
-+# Classification
-+#
-+# CONFIG_NET_CLS_BASIC is not set
-+# CONFIG_NET_CLS_TCINDEX is not set
-+# CONFIG_NET_CLS_ROUTE4 is not set
-+# CONFIG_NET_CLS_FW is not set
-+# CONFIG_NET_CLS_U32 is not set
-+# CONFIG_NET_CLS_RSVP is not set
-+# CONFIG_NET_CLS_RSVP6 is not set
-+# CONFIG_NET_EMATCH is not set
-+# CONFIG_NET_CLS_ACT is not set
-+# CONFIG_NET_CLS_POLICE is not set
-+CONFIG_NET_SCH_FIFO=y
++	.rept 3
++		.long do_exception_error	/* 0x9A0 - 0x9E0 */
++	.endr
++	.long	do_IRQ			/* 0xA00 */
++	.long	do_IRQ			/* 0xA20 */
++	.long	itlb_miss_or_IRQ			/* 0xA40 */
++	.long	do_IRQ			/* 0xA60 */
++	.long	do_IRQ			/* 0xA80 */
++	.long	itlb_miss_or_IRQ			/* 0xAA0 */
++	.long	do_exception_error		/* 0xAC0 */
++	.long	do_address_error_exec	/* 0xAE0 */
++	.rept 8
++		.long do_exception_error	/* 0xB00 - 0xBE0 */
++	.endr
++	.rept 18
++		.long do_IRQ		/* 0xC00 - 0xE20 */
++	.endr
 +
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+# CONFIG_HAMRADIO is not set
-+# CONFIG_IRDA is not set
-+# CONFIG_BT is not set
-+# CONFIG_AF_RXRPC is not set
++	.section	.text64, "ax"
 +
-+#
-+# Wireless
-+#
-+# CONFIG_CFG80211 is not set
-+# CONFIG_WIRELESS_EXT is not set
-+# CONFIG_MAC80211 is not set
-+# CONFIG_IEEE80211 is not set
-+# CONFIG_RFKILL is not set
-+# CONFIG_NET_9P is not set
++/*
++ * --- Exception/Interrupt/Event Handling Section
++ */
 +
-+#
-+# Device Drivers
-+#
++/*
++ * VBR and RESVEC blocks.
++ *
++ * First level handler for VBR-based exceptions.
++ *
++ * To avoid waste of space, align to the maximum text block size.
++ * This is assumed to be at most 128 bytes or 32 instructions.
++ * DO NOT EXCEED 32 instructions on the first level handlers !
++ *
++ * Also note that RESVEC is contained within the VBR block
++ * where the room left (1KB - TEXT_SIZE) allows placing
++ * the RESVEC block (at most 512B + TEXT_SIZE).
++ *
++ * So first (and only) level handler for RESVEC-based exceptions.
++ *
++ * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
++ * and interrupt) we are a lot tight with register space until
++ * saving onto the stack frame, which is done in handle_exception().
++ *
++ */
 +
-+#
-+# Generic Driver Options
-+#
-+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+CONFIG_FW_LOADER=y
-+# CONFIG_DEBUG_DRIVER is not set
-+# CONFIG_DEBUG_DEVRES is not set
-+# CONFIG_SYS_HYPERVISOR is not set
-+# CONFIG_CONNECTOR is not set
-+# CONFIG_MTD is not set
-+CONFIG_PARPORT=y
-+# CONFIG_PARPORT_PC is not set
-+# CONFIG_PARPORT_GSC is not set
-+# CONFIG_PARPORT_AX88796 is not set
-+# CONFIG_PARPORT_1284 is not set
-+CONFIG_BLK_DEV=y
-+# CONFIG_BLK_CPQ_CISS_DA is not set
-+# CONFIG_BLK_DEV_DAC960 is not set
-+# CONFIG_BLK_DEV_UMEM is not set
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=y
-+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-+# CONFIG_BLK_DEV_NBD is not set
-+# CONFIG_BLK_DEV_SX8 is not set
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=4096
-+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
-+# CONFIG_CDROM_PKTCDVD is not set
-+# CONFIG_ATA_OVER_ETH is not set
-+# CONFIG_MISC_DEVICES is not set
-+CONFIG_IDE=y
-+CONFIG_IDE_MAX_HWIFS=4
-+CONFIG_BLK_DEV_IDE=y
++#define	TEXT_SIZE 	128
++#define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */
 +
-+#
-+# Please see Documentation/ide.txt for help/info on IDE drives
-+#
-+# CONFIG_BLK_DEV_IDE_SATA is not set
-+CONFIG_BLK_DEV_IDEDISK=y
-+CONFIG_IDEDISK_MULTI_MODE=y
-+# CONFIG_BLK_DEV_IDECS is not set
-+# CONFIG_BLK_DEV_DELKIN is not set
-+CONFIG_BLK_DEV_IDECD=y
-+# CONFIG_BLK_DEV_IDETAPE is not set
-+# CONFIG_BLK_DEV_IDEFLOPPY is not set
-+# CONFIG_BLK_DEV_IDESCSI is not set
-+# CONFIG_IDE_TASK_IOCTL is not set
-+CONFIG_IDE_PROC_FS=y
++	.balign TEXT_SIZE
++LVBR_block:
++	.space	256, 0			/* Power-on class handler, */
++					/* not required here       */
++not_a_tlb_miss:
++	synco	/* TAKum03020 (but probably a good idea anyway.) */
++	/* Save original stack pointer into KCR1 */
++	putcon	SP, KCR1
 +
-+#
-+# IDE chipset support/bugfixes
-+#
-+CONFIG_IDE_GENERIC=y
-+CONFIG_BLK_DEV_PLATFORM=y
++	/* Save other original registers into reg_save_area */
++        movi  reg_save_area, SP
++	st.q	SP, SAVED_R2, r2
++	st.q	SP, SAVED_R3, r3
++	st.q	SP, SAVED_R4, r4
++	st.q	SP, SAVED_R5, r5
++	st.q	SP, SAVED_R6, r6
++	st.q	SP, SAVED_R18, r18
++	gettr	tr0, r3
++	st.q	SP, SAVED_TR0, r3
 +
-+#
-+# PCI IDE chipsets support
-+#
-+CONFIG_BLK_DEV_IDEPCI=y
-+# CONFIG_IDEPCI_SHARE_IRQ is not set
-+CONFIG_IDEPCI_PCIBUS_ORDER=y
-+# CONFIG_BLK_DEV_OFFBOARD is not set
-+CONFIG_BLK_DEV_GENERIC=y
-+# CONFIG_BLK_DEV_OPTI621 is not set
-+# CONFIG_BLK_DEV_AEC62XX is not set
-+# CONFIG_BLK_DEV_ALI15X3 is not set
-+# CONFIG_BLK_DEV_AMD74XX is not set
-+# CONFIG_BLK_DEV_CMD64X is not set
-+# CONFIG_BLK_DEV_TRIFLEX is not set
-+# CONFIG_BLK_DEV_CY82C693 is not set
-+# CONFIG_BLK_DEV_CS5520 is not set
-+# CONFIG_BLK_DEV_CS5530 is not set
-+# CONFIG_BLK_DEV_HPT34X is not set
-+# CONFIG_BLK_DEV_HPT366 is not set
-+# CONFIG_BLK_DEV_JMICRON is not set
-+# CONFIG_BLK_DEV_SC1200 is not set
-+# CONFIG_BLK_DEV_PIIX is not set
-+# CONFIG_BLK_DEV_IT8213 is not set
-+# CONFIG_BLK_DEV_IT821X is not set
-+# CONFIG_BLK_DEV_NS87415 is not set
-+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
-+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
-+# CONFIG_BLK_DEV_SVWKS is not set
-+# CONFIG_BLK_DEV_SIIMAGE is not set
-+# CONFIG_BLK_DEV_SLC90E66 is not set
-+# CONFIG_BLK_DEV_TRM290 is not set
-+# CONFIG_BLK_DEV_VIA82CXXX is not set
-+# CONFIG_BLK_DEV_TC86C001 is not set
-+# CONFIG_IDE_ARM is not set
-+# CONFIG_BLK_DEV_IDEDMA is not set
-+# CONFIG_IDE_ARCH_OBSOLETE_INIT is not set
-+# CONFIG_BLK_DEV_HD is not set
++	/* Set args for Non-debug, Not a TLB miss class handler */
++	getcon	EXPEVT, r2
++	movi	ret_from_exception, r3
++	ori	r3, 1, r3
++	movi	EVENT_FAULT_NOT_TLB, r4
++	or	SP, ZERO, r5
++	getcon	KCR1, SP
++	pta	handle_exception, tr0
++	blink	tr0, ZERO
 +
-+#
-+# SCSI device support
-+#
-+# CONFIG_RAID_ATTRS is not set
-+CONFIG_SCSI=y
-+CONFIG_SCSI_DMA=y
-+# CONFIG_SCSI_TGT is not set
-+CONFIG_SCSI_NETLINK=y
-+CONFIG_SCSI_PROC_FS=y
++	.balign 256
++	! VBR+0x200
++	nop
++	.balign 256
++	! VBR+0x300
++	nop
++	.balign 256
++	/*
++	 * Instead of the natural .balign 1024 place RESVEC here
++	 * respecting the final 1KB alignment.
++	 */
++	.balign TEXT_SIZE
++	/*
++	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
++	 * block making sure the final alignment is correct.
++	 */
++tlb_miss:
++	synco	/* TAKum03020 (but probably a good idea anyway.) */
++	putcon	SP, KCR1
++	movi	reg_save_area, SP
++	/* SP is guaranteed 32-byte aligned. */
++	st.q	SP, TLB_SAVED_R0 , r0
++	st.q	SP, TLB_SAVED_R1 , r1
++	st.q	SP, SAVED_R2 , r2
++	st.q	SP, SAVED_R3 , r3
++	st.q	SP, SAVED_R4 , r4
++	st.q	SP, SAVED_R5 , r5
++	st.q	SP, SAVED_R6 , r6
++	st.q	SP, SAVED_R18, r18
 +
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=y
-+# CONFIG_CHR_DEV_ST is not set
-+# CONFIG_CHR_DEV_OSST is not set
-+CONFIG_BLK_DEV_SR=y
-+# CONFIG_BLK_DEV_SR_VENDOR is not set
-+CONFIG_CHR_DEV_SG=y
-+# CONFIG_CHR_DEV_SCH is not set
++	/* Save R25 for safety; as/ld may want to use it to achieve the call to
++	 * the code in mm/tlbmiss.c */
++	st.q	SP, TLB_SAVED_R25, r25
++	gettr	tr0, r2
++	gettr	tr1, r3
++	gettr	tr2, r4
++	gettr	tr3, r5
++	gettr	tr4, r18
++	st.q	SP, SAVED_TR0 , r2
++	st.q	SP, TLB_SAVED_TR1 , r3
++	st.q	SP, TLB_SAVED_TR2 , r4
++	st.q	SP, TLB_SAVED_TR3 , r5
++	st.q	SP, TLB_SAVED_TR4 , r18
 +
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+# CONFIG_SCSI_MULTI_LUN is not set
-+# CONFIG_SCSI_CONSTANTS is not set
-+# CONFIG_SCSI_LOGGING is not set
-+# CONFIG_SCSI_SCAN_ASYNC is not set
-+CONFIG_SCSI_WAIT_SCAN=m
++	pt	do_fast_page_fault, tr0
++	getcon	SSR, r2
++	getcon	EXPEVT, r3
++	getcon	TEA, r4
++	shlri	r2, 30, r2
++	andi	r2, 1, r2	/* r2 = SSR.MD */
++	blink 	tr0, LINK
 +
-+#
-+# SCSI Transports
-+#
-+CONFIG_SCSI_SPI_ATTRS=y
-+CONFIG_SCSI_FC_ATTRS=y
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+# CONFIG_SCSI_SAS_LIBSAS is not set
-+# CONFIG_SCSI_SRP_ATTRS is not set
-+CONFIG_SCSI_LOWLEVEL=y
-+# CONFIG_ISCSI_TCP is not set
-+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-+# CONFIG_SCSI_3W_9XXX is not set
-+# CONFIG_SCSI_ACARD is not set
-+# CONFIG_SCSI_AACRAID is not set
-+# CONFIG_SCSI_AIC7XXX is not set
-+# CONFIG_SCSI_AIC7XXX_OLD is not set
-+# CONFIG_SCSI_AIC79XX is not set
-+# CONFIG_SCSI_AIC94XX is not set
-+# CONFIG_SCSI_ARCMSR is not set
-+# CONFIG_MEGARAID_NEWGEN is not set
-+# CONFIG_MEGARAID_LEGACY is not set
-+# CONFIG_MEGARAID_SAS is not set
-+# CONFIG_SCSI_HPTIOP is not set
-+# CONFIG_SCSI_DMX3191D is not set
-+# CONFIG_SCSI_FUTURE_DOMAIN is not set
-+# CONFIG_SCSI_IPS is not set
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+# CONFIG_SCSI_STEX is not set
-+# CONFIG_SCSI_SYM53C8XX_2 is not set
-+# CONFIG_SCSI_IPR is not set
-+# CONFIG_SCSI_QLOGIC_1280 is not set
-+# CONFIG_SCSI_QLA_FC is not set
-+# CONFIG_SCSI_QLA_ISCSI is not set
-+# CONFIG_SCSI_LPFC is not set
-+# CONFIG_SCSI_DC395x is not set
-+# CONFIG_SCSI_DC390T is not set
-+# CONFIG_SCSI_NSP32 is not set
-+# CONFIG_SCSI_DEBUG is not set
-+# CONFIG_SCSI_SRP is not set
-+# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
-+CONFIG_ATA=y
-+# CONFIG_ATA_NONSTANDARD is not set
-+# CONFIG_SATA_AHCI is not set
-+# CONFIG_SATA_SVW is not set
-+# CONFIG_ATA_PIIX is not set
-+# CONFIG_SATA_MV is not set
-+# CONFIG_SATA_NV is not set
-+# CONFIG_PDC_ADMA is not set
-+# CONFIG_SATA_QSTOR is not set
-+# CONFIG_SATA_PROMISE is not set
-+# CONFIG_SATA_SX4 is not set
-+# CONFIG_SATA_SIL is not set
-+# CONFIG_SATA_SIL24 is not set
-+# CONFIG_SATA_SIS is not set
-+# CONFIG_SATA_ULI is not set
-+# CONFIG_SATA_VIA is not set
-+# CONFIG_SATA_VITESSE is not set
-+# CONFIG_SATA_INIC162X is not set
-+# CONFIG_PATA_ALI is not set
-+# CONFIG_PATA_AMD is not set
-+# CONFIG_PATA_ARTOP is not set
-+# CONFIG_PATA_ATIIXP is not set
-+# CONFIG_PATA_CMD640_PCI is not set
-+# CONFIG_PATA_CMD64X is not set
-+# CONFIG_PATA_CS5520 is not set
-+# CONFIG_PATA_CS5530 is not set
-+# CONFIG_PATA_CYPRESS is not set
-+# CONFIG_PATA_EFAR is not set
-+# CONFIG_ATA_GENERIC is not set
-+# CONFIG_PATA_HPT366 is not set
-+# CONFIG_PATA_HPT37X is not set
-+# CONFIG_PATA_HPT3X2N is not set
-+# CONFIG_PATA_HPT3X3 is not set
-+# CONFIG_PATA_IT821X is not set
-+# CONFIG_PATA_IT8213 is not set
-+# CONFIG_PATA_JMICRON is not set
-+# CONFIG_PATA_TRIFLEX is not set
-+# CONFIG_PATA_MARVELL is not set
-+# CONFIG_PATA_MPIIX is not set
-+# CONFIG_PATA_OLDPIIX is not set
-+# CONFIG_PATA_NETCELL is not set
-+# CONFIG_PATA_NS87410 is not set
-+# CONFIG_PATA_NS87415 is not set
-+# CONFIG_PATA_OPTI is not set
-+# CONFIG_PATA_OPTIDMA is not set
-+# CONFIG_PATA_PCMCIA is not set
-+# CONFIG_PATA_PDC_OLD is not set
-+# CONFIG_PATA_RADISYS is not set
-+# CONFIG_PATA_RZ1000 is not set
-+# CONFIG_PATA_SC1200 is not set
-+# CONFIG_PATA_SERVERWORKS is not set
-+# CONFIG_PATA_PDC2027X is not set
-+# CONFIG_PATA_SIL680 is not set
-+# CONFIG_PATA_SIS is not set
-+# CONFIG_PATA_VIA is not set
-+# CONFIG_PATA_WINBOND is not set
-+# CONFIG_PATA_PLATFORM is not set
-+CONFIG_MD=y
-+# CONFIG_BLK_DEV_MD is not set
-+CONFIG_BLK_DEV_DM=y
-+# CONFIG_DM_DEBUG is not set
-+# CONFIG_DM_CRYPT is not set
-+# CONFIG_DM_SNAPSHOT is not set
-+# CONFIG_DM_MIRROR is not set
-+# CONFIG_DM_ZERO is not set
-+# CONFIG_DM_MULTIPATH is not set
-+# CONFIG_DM_DELAY is not set
-+# CONFIG_DM_UEVENT is not set
-+# CONFIG_FUSION is not set
++	pt	fixup_to_invoke_general_handler, tr1
 +
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+# CONFIG_FIREWIRE is not set
-+# CONFIG_IEEE1394 is not set
-+# CONFIG_I2O is not set
-+CONFIG_NETDEVICES=y
-+# CONFIG_NETDEVICES_MULTIQUEUE is not set
-+# CONFIG_DUMMY is not set
-+# CONFIG_BONDING is not set
-+# CONFIG_MACVLAN is not set
-+# CONFIG_EQUALIZER is not set
-+# CONFIG_TUN is not set
-+# CONFIG_VETH is not set
-+# CONFIG_ARCNET is not set
-+# CONFIG_PHYLIB is not set
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=y
-+# CONFIG_AX88796 is not set
-+# CONFIG_STNIC is not set
-+# CONFIG_HAPPYMEAL is not set
-+# CONFIG_SUNGEM is not set
-+# CONFIG_CASSINI is not set
-+# CONFIG_NET_VENDOR_3COM is not set
-+CONFIG_SMC91X=y
-+# CONFIG_NET_TULIP is not set
-+# CONFIG_HP100 is not set
-+# CONFIG_IBM_NEW_EMAC_ZMII is not set
-+# CONFIG_IBM_NEW_EMAC_RGMII is not set
-+# CONFIG_IBM_NEW_EMAC_TAH is not set
-+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-+# CONFIG_NET_PCI is not set
-+# CONFIG_B44 is not set
-+# CONFIG_NET_POCKET is not set
-+# CONFIG_NETDEV_1000 is not set
-+# CONFIG_NETDEV_10000 is not set
-+# CONFIG_TR is not set
++	/* If the fast path handler fixed the fault, just drop through quickly
++	   to the restore code right away to return to the excepting context.
++	   */
++	beqi/u	r2, 0, tr1
 +
-+#
-+# Wireless LAN
-+#
-+# CONFIG_WLAN_PRE80211 is not set
-+# CONFIG_WLAN_80211 is not set
++fast_tlb_miss_restore:
++	ld.q	SP, SAVED_TR0, r2
++	ld.q	SP, TLB_SAVED_TR1, r3
++	ld.q	SP, TLB_SAVED_TR2, r4
 +
-+#
-+# USB Network Adapters
-+#
-+# CONFIG_USB_CATC is not set
-+# CONFIG_USB_KAWETH is not set
-+# CONFIG_USB_PEGASUS is not set
-+# CONFIG_USB_RTL8150 is not set
-+# CONFIG_USB_USBNET is not set
-+# CONFIG_NET_PCMCIA is not set
-+# CONFIG_WAN is not set
-+# CONFIG_FDDI is not set
-+# CONFIG_HIPPI is not set
-+# CONFIG_PLIP is not set
-+# CONFIG_PPP is not set
-+# CONFIG_SLIP is not set
-+# CONFIG_NET_FC is not set
-+# CONFIG_SHAPER is not set
-+CONFIG_NETCONSOLE=y
-+# CONFIG_NETCONSOLE_DYNAMIC is not set
-+CONFIG_NETPOLL=y
-+# CONFIG_NETPOLL_TRAP is not set
-+CONFIG_NET_POLL_CONTROLLER=y
-+# CONFIG_ISDN is not set
-+# CONFIG_PHONE is not set
++	ld.q	SP, TLB_SAVED_TR3, r5
++	ld.q	SP, TLB_SAVED_TR4, r18
 +
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+# CONFIG_INPUT_FF_MEMLESS is not set
-+# CONFIG_INPUT_POLLDEV is not set
++	ptabs	r2, tr0
++	ptabs	r3, tr1
++	ptabs	r4, tr2
++	ptabs	r5, tr3
++	ptabs	r18, tr4
 +
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+CONFIG_INPUT_MOUSEDEV_PSAUX=y
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+# CONFIG_INPUT_JOYDEV is not set
-+CONFIG_INPUT_EVDEV=y
-+# CONFIG_INPUT_EVBUG is not set
++	ld.q	SP, TLB_SAVED_R0, r0
++	ld.q	SP, TLB_SAVED_R1, r1
++	ld.q	SP, SAVED_R2, r2
++	ld.q	SP, SAVED_R3, r3
++	ld.q	SP, SAVED_R4, r4
++	ld.q	SP, SAVED_R5, r5
++	ld.q	SP, SAVED_R6, r6
++	ld.q	SP, SAVED_R18, r18
++	ld.q	SP, TLB_SAVED_R25, r25
 +
-+#
-+# Input Device Drivers
-+#
-+CONFIG_INPUT_KEYBOARD=y
-+CONFIG_KEYBOARD_ATKBD=y
-+# CONFIG_KEYBOARD_SUNKBD is not set
-+# CONFIG_KEYBOARD_LKKBD is not set
-+# CONFIG_KEYBOARD_XTKBD is not set
-+# CONFIG_KEYBOARD_NEWTON is not set
-+# CONFIG_KEYBOARD_STOWAWAY is not set
-+CONFIG_INPUT_MOUSE=y
-+CONFIG_MOUSE_PS2=y
-+CONFIG_MOUSE_PS2_ALPS=y
-+CONFIG_MOUSE_PS2_LOGIPS2PP=y
-+CONFIG_MOUSE_PS2_SYNAPTICS=y
-+CONFIG_MOUSE_PS2_LIFEBOOK=y
-+CONFIG_MOUSE_PS2_TRACKPOINT=y
-+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
-+# CONFIG_MOUSE_SERIAL is not set
-+# CONFIG_MOUSE_APPLETOUCH is not set
-+# CONFIG_MOUSE_VSXXXAA is not set
-+# CONFIG_INPUT_JOYSTICK is not set
-+# CONFIG_INPUT_TABLET is not set
-+# CONFIG_INPUT_TOUCHSCREEN is not set
-+# CONFIG_INPUT_MISC is not set
++	getcon	KCR1, SP
++	rte
++	nop /* for safety, in case the code is run on sh5-101 cut1.x */
 +
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+# CONFIG_SERIO_I8042 is not set
-+# CONFIG_SERIO_SERPORT is not set
-+# CONFIG_SERIO_PARKBD is not set
-+# CONFIG_SERIO_PCIPS2 is not set
-+CONFIG_SERIO_LIBPS2=y
-+# CONFIG_SERIO_RAW is not set
-+# CONFIG_GAMEPORT is not set
++fixup_to_invoke_general_handler:
 +
-+#
-+# Character devices
-+#
-+CONFIG_VT=y
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+# CONFIG_VT_HW_CONSOLE_BINDING is not set
-+# CONFIG_SERIAL_NONSTANDARD is not set
++	/* OK, new method.  Restore stuff that's not expected to get saved into
++	   the 'first-level' reg save area, then just fall through to setting
++	   up the registers and calling the second-level handler. */
 +
-+#
-+# Serial drivers
-+#
-+# CONFIG_SERIAL_8250 is not set
++	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
++	   r25,tr1-4 and save r6 to get into the right state.  */
 +
-+#
-+# Non-8250 serial port support
-+#
-+CONFIG_SERIAL_SH_SCI=y
-+CONFIG_SERIAL_SH_SCI_NR_UARTS=2
-+CONFIG_SERIAL_SH_SCI_CONSOLE=y
-+CONFIG_SERIAL_CORE=y
-+CONFIG_SERIAL_CORE_CONSOLE=y
-+# CONFIG_SERIAL_JSM is not set
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+# CONFIG_PRINTER is not set
-+# CONFIG_PPDEV is not set
-+# CONFIG_IPMI_HANDLER is not set
-+CONFIG_HW_RANDOM=y
-+# CONFIG_R3964 is not set
-+# CONFIG_APPLICOM is not set
++	ld.q	SP, TLB_SAVED_TR1, r3
++	ld.q	SP, TLB_SAVED_TR2, r4
++	ld.q	SP, TLB_SAVED_TR3, r5
++	ld.q	SP, TLB_SAVED_TR4, r18
++	ld.q	SP, TLB_SAVED_R25, r25
++
++	ld.q	SP, TLB_SAVED_R0, r0
++	ld.q	SP, TLB_SAVED_R1, r1
++
++	ptabs/u	r3, tr1
++	ptabs/u	r4, tr2
++	ptabs/u	r5, tr3
++	ptabs/u	r18, tr4
++
++	/* Set args for Non-debug, TLB miss class handler */
++	getcon	EXPEVT, r2
++	movi	ret_from_exception, r3
++	ori	r3, 1, r3
++	movi	EVENT_FAULT_TLB, r4
++	or	SP, ZERO, r5
++	getcon	KCR1, SP
++	pta	handle_exception, tr0
++	blink	tr0, ZERO
++
++/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
++   DOES END UP AT VBR+0x600 */
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++
++	.balign 256
++	/* VBR + 0x600 */
++
++interrupt:
++	synco	/* TAKum03020 (but probably a good idea anyway.) */
++	/* Save original stack pointer into KCR1 */
++	putcon	SP, KCR1
++
++	/* Save other original registers into reg_save_area */
++        movi  reg_save_area, SP
++	st.q	SP, SAVED_R2, r2
++	st.q	SP, SAVED_R3, r3
++	st.q	SP, SAVED_R4, r4
++	st.q	SP, SAVED_R5, r5
++	st.q	SP, SAVED_R6, r6
++	st.q	SP, SAVED_R18, r18
++	gettr	tr0, r3
++	st.q	SP, SAVED_TR0, r3
++
++	/* Set args for interrupt class handler */
++	getcon	INTEVT, r2
++	movi	ret_from_irq, r3
++	ori	r3, 1, r3
++	movi	EVENT_INTERRUPT, r4
++	or	SP, ZERO, r5
++	getcon	KCR1, SP
++	pta	handle_exception, tr0
++	blink	tr0, ZERO
++	.balign	TEXT_SIZE		/* let's waste the bare minimum */
++
++LVBR_block_end:				/* Marker. Used for total checking */
++
++	.balign 256
++LRESVEC_block:
++	/* Panic handler. Called with MMU off. Possible causes/actions:
++	 * - Reset:		Jump to program start.
++	 * - Single Step:	Turn off Single Step & return.
++	 * - Others:		Call panic handler, passing PC as arg.
++	 *			(this may need to be extended...)
++	 */
++reset_or_panic:
++	synco	/* TAKum03020 (but probably a good idea anyway.) */
++	putcon	SP, DCR
++	/* First save r0-1 and tr0, as we need to use these */
++	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
++	st.q	SP, 0, r0
++	st.q	SP, 8, r1
++	gettr	tr0, r0
++	st.q	SP, 32, r0
++
++	/* Check cause */
++	getcon	EXPEVT, r0
++	movi	RESET_CAUSE, r1
++	sub	r1, r0, r1		/* r1=0 if reset */
++	movi	_stext-CONFIG_PAGE_OFFSET, r0
++	ori	r0, 1, r0
++	ptabs	r0, tr0
++	beqi	r1, 0, tr0		/* Jump to start address if reset */
++
++	getcon	EXPEVT, r0
++	movi	DEBUGSS_CAUSE, r1
++	sub	r1, r0, r1		/* r1=0 if single step */
++	pta	single_step_panic, tr0
++	beqi	r1, 0, tr0		/* jump if single step */
++
++	/* Now jump to where we save the registers. */
++	movi	panic_stash_regs-CONFIG_PAGE_OFFSET, r1
++	ptabs	r1, tr0
++	blink	tr0, r63
++
++single_step_panic:
++	/* We are in a handler with Single Step set. We need to resume the
++	 * handler, by turning on MMU & turning off Single Step. */
++	getcon	SSR, r0
++	movi	SR_MMU, r1
++	or	r0, r1, r0
++	movi	~SR_SS, r1
++	and	r0, r1, r0
++	putcon	r0, SSR
++	/* Restore EXPEVT, as the rte won't do this */
++	getcon	PEXPEVT, r0
++	putcon	r0, EXPEVT
++	/* Restore regs */
++	ld.q	SP, 32, r0
++	ptabs	r0, tr0
++	ld.q	SP, 0, r0
++	ld.q	SP, 8, r1
++	getcon	DCR, SP
++	synco
++	rte
++
++
++	.balign	256
++debug_exception:
++	synco	/* TAKum03020 (but probably a good idea anyway.) */
++	/*
++	 * Single step/software_break_point first level handler.
++	 * Called with MMU off, so the first thing we do is enable it
++	 * by doing an rte with appropriate SSR.
++	 */
++	putcon	SP, DCR
++	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
++	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
++
++	/* With the MMU off, we are bypassing the cache, so purge any
++         * data that will be made stale by the following stores.
++         */
++	ocbp	SP, 0
++	synco
++
++	st.q	SP, 0, r0
++	st.q	SP, 8, r1
++	getcon	SPC, r0
++	st.q	SP, 16, r0
++	getcon	SSR, r0
++	st.q	SP, 24, r0
++
++	/* Enable MMU, block exceptions, set priv mode, disable single step */
++	movi	SR_MMU | SR_BL | SR_MD, r1
++	or	r0, r1, r0
++	movi	~SR_SS, r1
++	and	r0, r1, r0
++	putcon	r0, SSR
++	/* Force control to debug_exception_2 when rte is executed */
++	movi	debug_exeception_2, r0
++	ori	r0, 1, r0      /* force SHmedia, just in case */
++	putcon	r0, SPC
++	getcon	DCR, SP
++	synco
++	rte
++debug_exeception_2:
++	/* Restore saved regs */
++	putcon	SP, KCR1
++	movi	resvec_save_area, SP
++	ld.q	SP, 24, r0
++	putcon	r0, SSR
++	ld.q	SP, 16, r0
++	putcon	r0, SPC
++	ld.q	SP, 0, r0
++	ld.q	SP, 8, r1
++
++	/* Save other original registers into reg_save_area */
++        movi  reg_save_area, SP
++	st.q	SP, SAVED_R2, r2
++	st.q	SP, SAVED_R3, r3
++	st.q	SP, SAVED_R4, r4
++	st.q	SP, SAVED_R5, r5
++	st.q	SP, SAVED_R6, r6
++	st.q	SP, SAVED_R18, r18
++	gettr	tr0, r3
++	st.q	SP, SAVED_TR0, r3
 +
-+#
-+# PCMCIA character devices
-+#
-+# CONFIG_SYNCLINK_CS is not set
-+# CONFIG_CARDMAN_4000 is not set
-+# CONFIG_CARDMAN_4040 is not set
-+# CONFIG_RAW_DRIVER is not set
-+# CONFIG_TCG_TPM is not set
-+CONFIG_DEVPORT=y
-+# CONFIG_I2C is not set
++	/* Set args for debug class handler */
++	getcon	EXPEVT, r2
++	movi	ret_from_exception, r3
++	ori	r3, 1, r3
++	movi	EVENT_DEBUG, r4
++	or	SP, ZERO, r5
++	getcon	KCR1, SP
++	pta	handle_exception, tr0
++	blink	tr0, ZERO
 +
-+#
-+# SPI support
-+#
-+# CONFIG_SPI is not set
-+# CONFIG_SPI_MASTER is not set
-+# CONFIG_W1 is not set
-+CONFIG_POWER_SUPPLY=y
-+# CONFIG_POWER_SUPPLY_DEBUG is not set
-+# CONFIG_PDA_POWER is not set
-+# CONFIG_BATTERY_DS2760 is not set
-+# CONFIG_HWMON is not set
-+# CONFIG_WATCHDOG is not set
++	.balign	256
++debug_interrupt:
++	/* !!! WE COME HERE IN REAL MODE !!! */
++	/* Hook-up debug interrupt to allow various debugging options to be
++	 * hooked into its handler. */
++	/* Save original stack pointer into KCR1 */
++	synco
++	putcon	SP, KCR1
++	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
++	ocbp	SP, 0
++	ocbp	SP, 32
++	synco
 +
-+#
-+# Sonics Silicon Backplane
-+#
-+CONFIG_SSB_POSSIBLE=y
-+CONFIG_SSB=y
-+CONFIG_SSB_PCIHOST_POSSIBLE=y
-+CONFIG_SSB_PCIHOST=y
-+CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
-+# CONFIG_SSB_PCMCIAHOST is not set
-+# CONFIG_SSB_SILENT is not set
-+# CONFIG_SSB_DEBUG is not set
-+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
-+CONFIG_SSB_DRIVER_PCICORE=y
++	/* Save other original registers into reg_save_area thru real addresses */
++	st.q	SP, SAVED_R2, r2
++	st.q	SP, SAVED_R3, r3
++	st.q	SP, SAVED_R4, r4
++	st.q	SP, SAVED_R5, r5
++	st.q	SP, SAVED_R6, r6
++	st.q	SP, SAVED_R18, r18
++	gettr	tr0, r3
++	st.q	SP, SAVED_TR0, r3
 +
-+#
-+# Multifunction device drivers
-+#
-+# CONFIG_MFD_SM501 is not set
++	/* move (spc,ssr)->(pspc,pssr).  The rte will shift
++	   them back again, so that they look like the originals
++	   as far as the real handler code is concerned. */
++	getcon	spc, r6
++	putcon	r6, pspc
++	getcon	ssr, r6
++	putcon	r6, pssr
 +
-+#
-+# Multimedia devices
-+#
-+# CONFIG_VIDEO_DEV is not set
-+# CONFIG_DVB_CORE is not set
-+# CONFIG_DAB is not set
++	! construct useful SR for handle_exception
++	movi	3, r6
++	shlli	r6, 30, r6
++	getcon	sr, r18
++	or	r18, r6, r6
++	putcon	r6, ssr
 +
-+#
-+# Graphics support
-+#
-+# CONFIG_DRM is not set
-+# CONFIG_VGASTATE is not set
-+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-+CONFIG_FB=y
-+# CONFIG_FIRMWARE_EDID is not set
-+# CONFIG_FB_DDC is not set
-+# CONFIG_FB_CFB_FILLRECT is not set
-+# CONFIG_FB_CFB_COPYAREA is not set
-+# CONFIG_FB_CFB_IMAGEBLIT is not set
-+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-+# CONFIG_FB_SYS_FILLRECT is not set
-+# CONFIG_FB_SYS_COPYAREA is not set
-+# CONFIG_FB_SYS_IMAGEBLIT is not set
-+# CONFIG_FB_SYS_FOPS is not set
-+CONFIG_FB_DEFERRED_IO=y
-+# CONFIG_FB_SVGALIB is not set
-+# CONFIG_FB_MACMODES is not set
-+# CONFIG_FB_BACKLIGHT is not set
-+# CONFIG_FB_MODE_HELPERS is not set
-+# CONFIG_FB_TILEBLITTING is not set
++	! SSR is now the current SR with the MD and MMU bits set
++	! i.e. the rte will switch back to priv mode and put
++	! the mmu back on
 +
-+#
-+# Frame buffer hardware drivers
-+#
-+# CONFIG_FB_CIRRUS is not set
-+# CONFIG_FB_PM2 is not set
-+# CONFIG_FB_CYBER2000 is not set
-+# CONFIG_FB_ASILIANT is not set
-+# CONFIG_FB_IMSTT is not set
-+# CONFIG_FB_S1D13XXX is not set
-+# CONFIG_FB_NVIDIA is not set
-+# CONFIG_FB_RIVA is not set
-+# CONFIG_FB_MATROX is not set
-+# CONFIG_FB_RADEON is not set
-+# CONFIG_FB_ATY128 is not set
-+# CONFIG_FB_ATY is not set
-+# CONFIG_FB_S3 is not set
-+# CONFIG_FB_SAVAGE is not set
-+# CONFIG_FB_SIS is not set
-+# CONFIG_FB_NEOMAGIC is not set
-+# CONFIG_FB_KYRO is not set
-+# CONFIG_FB_3DFX is not set
-+# CONFIG_FB_VOODOO1 is not set
-+# CONFIG_FB_VT8623 is not set
-+# CONFIG_FB_TRIDENT is not set
-+# CONFIG_FB_ARK is not set
-+# CONFIG_FB_PM3 is not set
-+# CONFIG_FB_VIRTUAL is not set
-+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++	! construct spc
++	movi	handle_exception, r18
++	ori	r18, 1, r18		! for safety (do we need this?)
++	putcon	r18, spc
 +
-+#
-+# Display device support
-+#
-+CONFIG_DISPLAY_SUPPORT=y
++	/* Set args for Non-debug, Not a TLB miss class handler */
 +
-+#
-+# Display hardware drivers
-+#
++	! EXPEVT==0x80 is unused, so 'steal' this value to put the
++	! debug interrupt handler in the vectoring table
++	movi	0x80, r2
++	movi	ret_from_exception, r3
++	ori	r3, 1, r3
++	movi	EVENT_FAULT_NOT_TLB, r4
 +
-+#
-+# Console display driver support
-+#
-+CONFIG_DUMMY_CONSOLE=y
-+CONFIG_FRAMEBUFFER_CONSOLE=y
-+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-+# CONFIG_FONTS is not set
-+CONFIG_FONT_8x8=y
-+CONFIG_FONT_8x16=y
-+CONFIG_LOGO=y
-+CONFIG_LOGO_LINUX_MONO=y
-+CONFIG_LOGO_LINUX_VGA16=y
-+CONFIG_LOGO_LINUX_CLUT224=y
-+CONFIG_LOGO_SUPERH_MONO=y
-+CONFIG_LOGO_SUPERH_VGA16=y
-+CONFIG_LOGO_SUPERH_CLUT224=y
++	or	SP, ZERO, r5
++	movi	CONFIG_PAGE_OFFSET, r6
++	add	r6, r5, r5
++	getcon	KCR1, SP
 +
-+#
-+# Sound
-+#
-+CONFIG_SOUND=y
++	synco	! for safety
++	rte	! -> handle_exception, switch back to priv mode again
 +
-+#
-+# Advanced Linux Sound Architecture
-+#
-+# CONFIG_SND is not set
++LRESVEC_block_end:			/* Marker. Unused. */
 +
-+#
-+# Open Sound System
-+#
-+CONFIG_SOUND_PRIME=y
-+# CONFIG_SOUND_TRIDENT is not set
-+# CONFIG_SOUND_MSNDCLAS is not set
-+# CONFIG_SOUND_MSNDPIN is not set
-+CONFIG_HID_SUPPORT=y
-+CONFIG_HID=y
-+# CONFIG_HID_DEBUG is not set
-+# CONFIG_HIDRAW is not set
++	.balign	TEXT_SIZE
 +
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=y
-+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-+# CONFIG_HID_FF is not set
-+# CONFIG_USB_HIDDEV is not set
-+CONFIG_USB_SUPPORT=y
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB_ARCH_HAS_EHCI=y
-+CONFIG_USB=y
-+CONFIG_USB_DEBUG=y
++/*
++ * Second level handler for VBR-based exceptions. Pre-handler.
++ * In common to all stack-frame sensitive handlers.
++ *
++ * Inputs:
++ * (KCR0) Current [current task union]
++ * (KCR1) Original SP
++ * (r2)   INTEVT/EXPEVT
++ * (r3)   appropriate return address
++ * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
++ * (r5)   Pointer to reg_save_area
++ * (SP)   Original SP
++ *
++ * Available registers:
++ * (r6)
++ * (r18)
++ * (tr0)
++ *
++ */
++handle_exception:
++	/* Common 2nd level handler. */
 +
-+#
-+# Miscellaneous USB options
-+#
-+CONFIG_USB_DEVICEFS=y
-+# CONFIG_USB_DEVICE_CLASS is not set
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_OTG is not set
++	/* First thing we need an appropriate stack pointer */
++	getcon	SSR, r6
++	shlri	r6, 30, r6
++	andi	r6, 1, r6
++	pta	stack_ok, tr0
++	bne	r6, ZERO, tr0		/* Original stack pointer is fine */
 +
-+#
-+# USB Host Controller Drivers
-+#
-+CONFIG_USB_EHCI_HCD=y
-+# CONFIG_USB_EHCI_SPLIT_ISO is not set
-+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
-+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-+# CONFIG_USB_ISP116X_HCD is not set
-+# CONFIG_USB_OHCI_HCD is not set
-+# CONFIG_USB_UHCI_HCD is not set
-+# CONFIG_USB_SL811_HCD is not set
-+# CONFIG_USB_R8A66597_HCD is not set
++	/* Set stack pointer for user fault */
++	getcon	KCR0, SP
++	movi	THREAD_SIZE, r6		/* Point to the end */
++	add	SP, r6, SP
 +
-+#
-+# USB Device Class drivers
-+#
-+# CONFIG_USB_ACM is not set
-+CONFIG_USB_PRINTER=y
++stack_ok:
 +
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
-+#
++/* DEBUG : check for underflow/overflow of the kernel stack */
++	pta	no_underflow, tr0
++	getcon  KCR0, r6
++	movi	1024, r18
++	add	r6, r18, r6
++	bge	SP, r6, tr0 	! ? below 1k from bottom of stack : danger zone
 +
-+#
-+# may also be needed; see USB_STORAGE Help for more information
-+#
-+CONFIG_USB_STORAGE=y
-+# CONFIG_USB_STORAGE_DEBUG is not set
-+# CONFIG_USB_STORAGE_DATAFAB is not set
-+# CONFIG_USB_STORAGE_FREECOM is not set
-+# CONFIG_USB_STORAGE_ISD200 is not set
-+# CONFIG_USB_STORAGE_DPCM is not set
-+# CONFIG_USB_STORAGE_USBAT is not set
-+# CONFIG_USB_STORAGE_SDDR09 is not set
-+# CONFIG_USB_STORAGE_SDDR55 is not set
-+# CONFIG_USB_STORAGE_JUMPSHOT is not set
-+# CONFIG_USB_STORAGE_ALAUDA is not set
-+# CONFIG_USB_STORAGE_ONETOUCH is not set
-+# CONFIG_USB_STORAGE_KARMA is not set
-+# CONFIG_USB_LIBUSUAL is not set
++/* Just panic to cause a crash. */
++bad_sp:
++	ld.b	r63, 0, r6
++	nop
 +
-+#
-+# USB Imaging devices
-+#
-+# CONFIG_USB_MDC800 is not set
-+# CONFIG_USB_MICROTEK is not set
-+CONFIG_USB_MON=y
++no_underflow:
++	pta	bad_sp, tr0
++	getcon	kcr0, r6
++	movi	THREAD_SIZE, r18
++	add	r18, r6, r6
++	bgt	SP, r6, tr0	! sp above the stack
 +
-+#
-+# USB port drivers
-+#
-+# CONFIG_USB_USS720 is not set
++	/* Make some room for the BASIC frame. */
++	movi	-(FRAME_SIZE), r6
++	add	SP, r6, SP
 +
-+#
-+# USB Serial Converter support
-+#
-+# CONFIG_USB_SERIAL is not set
++/* Could do this with no stalling if we had another spare register, but the
++   code below will be OK. */
++	ld.q	r5, SAVED_R2, r6
++	ld.q	r5, SAVED_R3, r18
++	st.q	SP, FRAME_R(2), r6
++	ld.q	r5, SAVED_R4, r6
++	st.q	SP, FRAME_R(3), r18
++	ld.q	r5, SAVED_R5, r18
++	st.q	SP, FRAME_R(4), r6
++	ld.q	r5, SAVED_R6, r6
++	st.q	SP, FRAME_R(5), r18
++	ld.q	r5, SAVED_R18, r18
++	st.q	SP, FRAME_R(6), r6
++	ld.q	r5, SAVED_TR0, r6
++	st.q	SP, FRAME_R(18), r18
++	st.q	SP, FRAME_T(0), r6
 +
-+#
-+# USB Miscellaneous drivers
-+#
-+# CONFIG_USB_EMI62 is not set
-+# CONFIG_USB_EMI26 is not set
-+# CONFIG_USB_ADUTUX is not set
-+# CONFIG_USB_AUERSWALD is not set
-+# CONFIG_USB_RIO500 is not set
-+# CONFIG_USB_LEGOTOWER is not set
-+# CONFIG_USB_LCD is not set
-+# CONFIG_USB_BERRY_CHARGE is not set
-+# CONFIG_USB_LED is not set
-+# CONFIG_USB_CYPRESS_CY7C63 is not set
-+# CONFIG_USB_CYTHERM is not set
-+# CONFIG_USB_PHIDGET is not set
-+# CONFIG_USB_IDMOUSE is not set
-+# CONFIG_USB_FTDI_ELAN is not set
-+# CONFIG_USB_APPLEDISPLAY is not set
-+# CONFIG_USB_SISUSBVGA is not set
-+# CONFIG_USB_LD is not set
-+# CONFIG_USB_TRANCEVIBRATOR is not set
-+# CONFIG_USB_IOWARRIOR is not set
-+# CONFIG_USB_TEST is not set
++	/* Keep old SP around */
++	getcon	KCR1, r6
 +
-+#
-+# USB DSL modem support
-+#
++	/* Save the rest of the general purpose registers */
++	st.q	SP, FRAME_R(0), r0
++	st.q	SP, FRAME_R(1), r1
++	st.q	SP, FRAME_R(7), r7
++	st.q	SP, FRAME_R(8), r8
++	st.q	SP, FRAME_R(9), r9
++	st.q	SP, FRAME_R(10), r10
++	st.q	SP, FRAME_R(11), r11
++	st.q	SP, FRAME_R(12), r12
++	st.q	SP, FRAME_R(13), r13
++	st.q	SP, FRAME_R(14), r14
 +
-+#
-+# USB Gadget Support
-+#
-+# CONFIG_USB_GADGET is not set
-+# CONFIG_MMC is not set
-+CONFIG_NEW_LEDS=y
-+CONFIG_LEDS_CLASS=y
++	/* SP is somewhere else */
++	st.q	SP, FRAME_R(15), r6
 +
-+#
-+# LED drivers
-+#
++	st.q	SP, FRAME_R(16), r16
++	st.q	SP, FRAME_R(17), r17
++	/* r18 is saved earlier. */
++	st.q	SP, FRAME_R(19), r19
++	st.q	SP, FRAME_R(20), r20
++	st.q	SP, FRAME_R(21), r21
++	st.q	SP, FRAME_R(22), r22
++	st.q	SP, FRAME_R(23), r23
++	st.q	SP, FRAME_R(24), r24
++	st.q	SP, FRAME_R(25), r25
++	st.q	SP, FRAME_R(26), r26
++	st.q	SP, FRAME_R(27), r27
++	st.q	SP, FRAME_R(28), r28
++	st.q	SP, FRAME_R(29), r29
++	st.q	SP, FRAME_R(30), r30
++	st.q	SP, FRAME_R(31), r31
++	st.q	SP, FRAME_R(32), r32
++	st.q	SP, FRAME_R(33), r33
++	st.q	SP, FRAME_R(34), r34
++	st.q	SP, FRAME_R(35), r35
++	st.q	SP, FRAME_R(36), r36
++	st.q	SP, FRAME_R(37), r37
++	st.q	SP, FRAME_R(38), r38
++	st.q	SP, FRAME_R(39), r39
++	st.q	SP, FRAME_R(40), r40
++	st.q	SP, FRAME_R(41), r41
++	st.q	SP, FRAME_R(42), r42
++	st.q	SP, FRAME_R(43), r43
++	st.q	SP, FRAME_R(44), r44
++	st.q	SP, FRAME_R(45), r45
++	st.q	SP, FRAME_R(46), r46
++	st.q	SP, FRAME_R(47), r47
++	st.q	SP, FRAME_R(48), r48
++	st.q	SP, FRAME_R(49), r49
++	st.q	SP, FRAME_R(50), r50
++	st.q	SP, FRAME_R(51), r51
++	st.q	SP, FRAME_R(52), r52
++	st.q	SP, FRAME_R(53), r53
++	st.q	SP, FRAME_R(54), r54
++	st.q	SP, FRAME_R(55), r55
++	st.q	SP, FRAME_R(56), r56
++	st.q	SP, FRAME_R(57), r57
++	st.q	SP, FRAME_R(58), r58
++	st.q	SP, FRAME_R(59), r59
++	st.q	SP, FRAME_R(60), r60
++	st.q	SP, FRAME_R(61), r61
++	st.q	SP, FRAME_R(62), r62
 +
-+#
-+# LED Triggers
-+#
-+# CONFIG_LEDS_TRIGGERS is not set
-+# CONFIG_INFINIBAND is not set
-+# CONFIG_RTC_CLASS is not set
-+# CONFIG_AUXDISPLAY is not set
++	/*
++	 * Save the S* registers.
++	 */
++	getcon	SSR, r61
++	st.q	SP, FRAME_S(FSSR), r61
++	getcon	SPC, r62
++	st.q	SP, FRAME_S(FSPC), r62
++	movi	-1, r62			/* Reset syscall_nr */
++	st.q	SP, FRAME_S(FSYSCALL_ID), r62
 +
-+#
-+# Userspace I/O
-+#
-+# CONFIG_UIO is not set
++	/* Save the rest of the target registers */
++	gettr	tr1, r6
++	st.q	SP, FRAME_T(1), r6
++	gettr	tr2, r6
++	st.q	SP, FRAME_T(2), r6
++	gettr	tr3, r6
++	st.q	SP, FRAME_T(3), r6
++	gettr	tr4, r6
++	st.q	SP, FRAME_T(4), r6
++	gettr	tr5, r6
++	st.q	SP, FRAME_T(5), r6
++	gettr	tr6, r6
++	st.q	SP, FRAME_T(6), r6
++	gettr	tr7, r6
++	st.q	SP, FRAME_T(7), r6
 +
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+CONFIG_EXT2_FS_XATTR=y
-+CONFIG_EXT2_FS_POSIX_ACL=y
-+# CONFIG_EXT2_FS_SECURITY is not set
-+# CONFIG_EXT2_FS_XIP is not set
-+CONFIG_EXT3_FS=y
-+CONFIG_EXT3_FS_XATTR=y
-+CONFIG_EXT3_FS_POSIX_ACL=y
-+# CONFIG_EXT3_FS_SECURITY is not set
-+# CONFIG_EXT4DEV_FS is not set
-+CONFIG_JBD=y
-+CONFIG_FS_MBCACHE=y
-+# CONFIG_REISERFS_FS is not set
-+# CONFIG_JFS_FS is not set
-+CONFIG_FS_POSIX_ACL=y
-+# CONFIG_XFS_FS is not set
-+# CONFIG_GFS2_FS is not set
-+# CONFIG_OCFS2_FS is not set
-+CONFIG_MINIX_FS=y
-+# CONFIG_ROMFS_FS is not set
-+CONFIG_INOTIFY=y
-+CONFIG_INOTIFY_USER=y
-+# CONFIG_QUOTA is not set
-+CONFIG_DNOTIFY=y
-+# CONFIG_AUTOFS_FS is not set
-+CONFIG_AUTOFS4_FS=y
-+# CONFIG_FUSE_FS is not set
-+CONFIG_GENERIC_ACL=y
++	! setup FP so that unwinder can wind back through nested kernel mode
++	! exceptions
++	add	SP, ZERO, r14
 +
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=y
-+# CONFIG_JOLIET is not set
-+# CONFIG_ZISOFS is not set
-+# CONFIG_UDF_FS is not set
++#ifdef CONFIG_POOR_MANS_STRACE
++	/* We've pushed all the registers now, so only r2-r4 hold anything
++	 * useful. Move them into callee save registers */
++	or	r2, ZERO, r28
++	or	r3, ZERO, r29
++	or	r4, ZERO, r30
 +
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=y
-+CONFIG_MSDOS_FS=y
-+CONFIG_VFAT_FS=y
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+CONFIG_NTFS_FS=y
-+CONFIG_NTFS_DEBUG=y
-+CONFIG_NTFS_RW=y
++	/* Preserve r2 as the event code */
++	movi	evt_debug, r3
++	ori	r3, 1, r3
++	ptabs	r3, tr0
 +
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+# CONFIG_PROC_KCORE is not set
-+CONFIG_PROC_SYSCTL=y
-+CONFIG_SYSFS=y
-+CONFIG_TMPFS=y
-+CONFIG_TMPFS_POSIX_ACL=y
-+CONFIG_HUGETLBFS=y
-+CONFIG_HUGETLB_PAGE=y
-+# CONFIG_CONFIGFS_FS is not set
++	or	SP, ZERO, r6
++	getcon	TRA, r5
++	blink	tr0, LINK
 +
-+#
-+# Miscellaneous filesystems
-+#
-+# CONFIG_ADFS_FS is not set
-+# CONFIG_AFFS_FS is not set
-+# CONFIG_HFS_FS is not set
-+# CONFIG_HFSPLUS_FS is not set
-+# CONFIG_BEFS_FS is not set
-+# CONFIG_BFS_FS is not set
-+# CONFIG_EFS_FS is not set
-+# CONFIG_CRAMFS is not set
-+# CONFIG_VXFS_FS is not set
-+# CONFIG_HPFS_FS is not set
-+# CONFIG_QNX4FS_FS is not set
-+# CONFIG_SYSV_FS is not set
-+# CONFIG_UFS_FS is not set
-+CONFIG_NETWORK_FILESYSTEMS=y
-+CONFIG_NFS_FS=y
-+CONFIG_NFS_V3=y
-+# CONFIG_NFS_V3_ACL is not set
-+# CONFIG_NFS_V4 is not set
-+# CONFIG_NFS_DIRECTIO is not set
-+CONFIG_NFSD=y
-+CONFIG_NFSD_V3=y
-+# CONFIG_NFSD_V3_ACL is not set
-+# CONFIG_NFSD_V4 is not set
-+CONFIG_NFSD_TCP=y
-+CONFIG_ROOT_NFS=y
-+CONFIG_LOCKD=y
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=y
-+CONFIG_NFS_COMMON=y
-+CONFIG_SUNRPC=y
-+# CONFIG_SUNRPC_BIND34 is not set
-+# CONFIG_RPCSEC_GSS_KRB5 is not set
-+# CONFIG_RPCSEC_GSS_SPKM3 is not set
-+# CONFIG_SMB_FS is not set
-+# CONFIG_CIFS is not set
-+# CONFIG_NCP_FS is not set
-+# CONFIG_CODA_FS is not set
-+# CONFIG_AFS_FS is not set
++	or	r28, ZERO, r2
++	or	r29, ZERO, r3
++	or	r30, ZERO, r4
++#endif
 +
-+#
-+# Partition Types
-+#
-+# CONFIG_PARTITION_ADVANCED is not set
-+CONFIG_MSDOS_PARTITION=y
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="iso8859-1"
-+CONFIG_NLS_CODEPAGE_437=y
-+# CONFIG_NLS_CODEPAGE_737 is not set
-+# CONFIG_NLS_CODEPAGE_775 is not set
-+# CONFIG_NLS_CODEPAGE_850 is not set
-+# CONFIG_NLS_CODEPAGE_852 is not set
-+# CONFIG_NLS_CODEPAGE_855 is not set
-+# CONFIG_NLS_CODEPAGE_857 is not set
-+# CONFIG_NLS_CODEPAGE_860 is not set
-+# CONFIG_NLS_CODEPAGE_861 is not set
-+# CONFIG_NLS_CODEPAGE_862 is not set
-+# CONFIG_NLS_CODEPAGE_863 is not set
-+# CONFIG_NLS_CODEPAGE_864 is not set
-+# CONFIG_NLS_CODEPAGE_865 is not set
-+# CONFIG_NLS_CODEPAGE_866 is not set
-+# CONFIG_NLS_CODEPAGE_869 is not set
-+# CONFIG_NLS_CODEPAGE_936 is not set
-+# CONFIG_NLS_CODEPAGE_950 is not set
-+# CONFIG_NLS_CODEPAGE_932 is not set
-+# CONFIG_NLS_CODEPAGE_949 is not set
-+# CONFIG_NLS_CODEPAGE_874 is not set
-+# CONFIG_NLS_ISO8859_8 is not set
-+# CONFIG_NLS_CODEPAGE_1250 is not set
-+# CONFIG_NLS_CODEPAGE_1251 is not set
-+CONFIG_NLS_ASCII=y
-+CONFIG_NLS_ISO8859_1=y
-+# CONFIG_NLS_ISO8859_2 is not set
-+# CONFIG_NLS_ISO8859_3 is not set
-+# CONFIG_NLS_ISO8859_4 is not set
-+# CONFIG_NLS_ISO8859_5 is not set
-+# CONFIG_NLS_ISO8859_6 is not set
-+# CONFIG_NLS_ISO8859_7 is not set
-+# CONFIG_NLS_ISO8859_9 is not set
-+# CONFIG_NLS_ISO8859_13 is not set
-+# CONFIG_NLS_ISO8859_14 is not set
-+CONFIG_NLS_ISO8859_15=y
-+# CONFIG_NLS_KOI8_R is not set
-+# CONFIG_NLS_KOI8_U is not set
-+CONFIG_NLS_UTF8=y
-+# CONFIG_DLM is not set
-+# CONFIG_INSTRUMENTATION is not set
++	/* For syscall and debug race condition, get TRA now */
++	getcon	TRA, r5
 +
-+#
-+# Kernel hacking
-+#
-+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_ENABLE_WARN_DEPRECATED=y
-+# CONFIG_ENABLE_MUST_CHECK is not set
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_UNUSED_SYMBOLS=y
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_HEADERS_CHECK is not set
-+CONFIG_DEBUG_KERNEL=y
-+# CONFIG_DEBUG_SHIRQ is not set
-+CONFIG_DETECT_SOFTLOCKUP=y
-+# CONFIG_SCHED_DEBUG is not set
-+# CONFIG_SCHEDSTATS is not set
-+CONFIG_TIMER_STATS=y
-+# CONFIG_SLUB_DEBUG_ON is not set
-+CONFIG_DEBUG_PREEMPT=y
-+# CONFIG_DEBUG_RT_MUTEXES is not set
-+# CONFIG_RT_MUTEX_TESTER is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_MUTEXES is not set
-+# CONFIG_DEBUG_LOCK_ALLOC is not set
-+# CONFIG_PROVE_LOCKING is not set
-+# CONFIG_LOCK_STAT is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+CONFIG_DEBUG_BUGVERBOSE=y
-+CONFIG_DEBUG_INFO=y
-+# CONFIG_DEBUG_VM is not set
-+# CONFIG_DEBUG_LIST is not set
-+# CONFIG_DEBUG_SG is not set
-+# CONFIG_FRAME_POINTER is not set
-+# CONFIG_FORCED_INLINING is not set
-+# CONFIG_BOOT_PRINTK_DELAY is not set
-+# CONFIG_RCU_TORTURE_TEST is not set
-+# CONFIG_FAULT_INJECTION is not set
-+# CONFIG_SAMPLES is not set
-+CONFIG_SH_STANDARD_BIOS=y
-+# CONFIG_EARLY_SCIF_CONSOLE is not set
-+# CONFIG_EARLY_PRINTK is not set
-+# CONFIG_DEBUG_BOOTMEM is not set
-+CONFIG_DEBUG_STACKOVERFLOW=y
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_4KSTACKS is not set
-+# CONFIG_IRQSTACKS is not set
-+# CONFIG_SH_KGDB is not set
++	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
++	 * Also set FD, to catch FPU usage in the kernel.
++	 *
++	 * benedict.gaster at superh.com 29/07/2002
++	 *
++	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
++	 * same time change BL from 1->0, as any pending interrupt of a level
++	 * higher than he previous value of IMASK will leak through and be
++	 * taken unexpectedly.
++	 *
++	 * To avoid this we raise the IMASK and then issue another PUTCON to
++	 * enable interrupts.
++         */
++	getcon	SR, r6
++	movi	SR_IMASK | SR_FD, r7
++	or	r6, r7, r6
++	putcon	r6, SR
++	movi	SR_UNBLOCK_EXC, r7
++	and	r6, r7, r6
++	putcon	r6, SR
 +
-+#
-+# Security options
-+#
-+# CONFIG_KEYS is not set
-+# CONFIG_SECURITY is not set
-+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-+CONFIG_CRYPTO=y
-+CONFIG_CRYPTO_ALGAPI=y
-+# CONFIG_CRYPTO_MANAGER is not set
-+# CONFIG_CRYPTO_HMAC is not set
-+# CONFIG_CRYPTO_XCBC is not set
-+# CONFIG_CRYPTO_NULL is not set
-+# CONFIG_CRYPTO_MD4 is not set
-+CONFIG_CRYPTO_MD5=y
-+# CONFIG_CRYPTO_SHA1 is not set
-+# CONFIG_CRYPTO_SHA256 is not set
-+# CONFIG_CRYPTO_SHA512 is not set
-+# CONFIG_CRYPTO_WP512 is not set
-+# CONFIG_CRYPTO_TGR192 is not set
-+# CONFIG_CRYPTO_GF128MUL is not set
-+# CONFIG_CRYPTO_ECB is not set
-+# CONFIG_CRYPTO_CBC is not set
-+# CONFIG_CRYPTO_PCBC is not set
-+# CONFIG_CRYPTO_LRW is not set
-+# CONFIG_CRYPTO_XTS is not set
-+# CONFIG_CRYPTO_CRYPTD is not set
-+CONFIG_CRYPTO_DES=y
-+# CONFIG_CRYPTO_FCRYPT is not set
-+# CONFIG_CRYPTO_BLOWFISH is not set
-+# CONFIG_CRYPTO_TWOFISH is not set
-+# CONFIG_CRYPTO_SERPENT is not set
-+# CONFIG_CRYPTO_AES is not set
-+# CONFIG_CRYPTO_CAST5 is not set
-+# CONFIG_CRYPTO_CAST6 is not set
-+# CONFIG_CRYPTO_TEA is not set
-+# CONFIG_CRYPTO_ARC4 is not set
-+# CONFIG_CRYPTO_KHAZAD is not set
-+# CONFIG_CRYPTO_ANUBIS is not set
-+# CONFIG_CRYPTO_SEED is not set
-+# CONFIG_CRYPTO_DEFLATE is not set
-+# CONFIG_CRYPTO_MICHAEL_MIC is not set
-+# CONFIG_CRYPTO_CRC32C is not set
-+# CONFIG_CRYPTO_CAMELLIA is not set
-+# CONFIG_CRYPTO_TEST is not set
-+# CONFIG_CRYPTO_AUTHENC is not set
-+CONFIG_CRYPTO_HW=y
 +
-+#
-+# Library routines
-+#
-+CONFIG_BITREVERSE=y
-+# CONFIG_CRC_CCITT is not set
-+# CONFIG_CRC16 is not set
-+# CONFIG_CRC_ITU_T is not set
-+CONFIG_CRC32=y
-+# CONFIG_CRC7 is not set
-+# CONFIG_LIBCRC32C is not set
-+CONFIG_PLIST=y
-+CONFIG_HAS_IOMEM=y
-+CONFIG_HAS_IOPORT=y
-+CONFIG_HAS_DMA=y
-diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
-index a5e37db..240a1ce 100644
---- a/arch/sh/configs/se7712_defconfig
-+++ b/arch/sh/configs/se7712_defconfig
-@@ -237,7 +237,7 @@ CONFIG_CPU_HAS_SR_RB=y
- CONFIG_SH_TMU=y
- CONFIG_SH_TIMER_IRQ=16
- # CONFIG_NO_IDLE_HZ is not set
--CONFIG_SH_PCLK_FREQ=33333333
-+CONFIG_SH_PCLK_FREQ=66666666
- 
- #
- # CPU Frequency scaling
-diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
-index 4e711a0..0193636 100644
---- a/arch/sh/drivers/dma/Kconfig
-+++ b/arch/sh/drivers/dma/Kconfig
-@@ -12,7 +12,7 @@ config SH_DMA
- config NR_ONCHIP_DMA_CHANNELS
- 	int
- 	depends on SH_DMA
--	default "6" if CPU_SUBTYPE_SH7720
-+	default "6" if CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721
- 	default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R
- 	default "12" if CPU_SUBTYPE_SH7780
- 	default "4"
-diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
-index 958bac1..5c33597 100644
---- a/arch/sh/drivers/dma/dma-sh.c
-+++ b/arch/sh/drivers/dma/dma-sh.c
-@@ -25,6 +25,7 @@ static int dmte_irq_map[] = {
- 	DMTE2_IRQ,
- 	DMTE3_IRQ,
- #if defined(CONFIG_CPU_SUBTYPE_SH7720)  ||	\
-+    defined(CONFIG_CPU_SUBTYPE_SH7721)  ||	\
-     defined(CONFIG_CPU_SUBTYPE_SH7751R) ||	\
-     defined(CONFIG_CPU_SUBTYPE_SH7760)  ||	\
-     defined(CONFIG_CPU_SUBTYPE_SH7709)  ||	\
-@@ -203,6 +204,7 @@ static int sh_dmac_get_dma_residue(struct dma_channel *chan)
- }
- 
- #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
-+    defined(CONFIG_CPU_SUBTYPE_SH7721) || \
-     defined(CONFIG_CPU_SUBTYPE_SH7780)
- #define dmaor_read_reg()	ctrl_inw(DMAOR)
- #define dmaor_write_reg(data)	ctrl_outw(data, DMAOR)
-diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
-index eebcd47..51b57c0 100644
---- a/arch/sh/drivers/dma/dma-sysfs.c
-+++ b/arch/sh/drivers/dma/dma-sysfs.c
-@@ -19,7 +19,7 @@
- #include <asm/dma.h>
- 
- static struct sysdev_class dma_sysclass = {
--	set_kset_name("dma"),
-+	.name = "dma",
- };
- EXPORT_SYMBOL(dma_sysclass);
- 
-diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile
-index fba6b5b..0718805 100644
---- a/arch/sh/drivers/pci/Makefile
-+++ b/arch/sh/drivers/pci/Makefile
-@@ -7,16 +7,19 @@ obj-$(CONFIG_PCI_AUTO)			+= pci-auto.o
- 
- obj-$(CONFIG_CPU_SUBTYPE_SH7751)	+= pci-sh7751.o ops-sh4.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7751R)	+= pci-sh7751.o ops-sh4.o
-+obj-$(CONFIG_CPU_SUBTYPE_SH7763)	+= pci-sh7780.o ops-sh4.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7780)	+= pci-sh7780.o ops-sh4.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7785)	+= pci-sh7780.o ops-sh4.o
-+obj-$(CONFIG_CPU_SH5)			+= pci-sh5.o ops-sh5.o
- 
--obj-$(CONFIG_SH_DREAMCAST)		+= ops-dreamcast.o fixups-dreamcast.o \
--					   dma-dreamcast.o
-+obj-$(CONFIG_SH_DREAMCAST)		+= ops-dreamcast.o fixups-dreamcast.o
- obj-$(CONFIG_SH_SECUREEDGE5410)		+= ops-snapgear.o
- obj-$(CONFIG_SH_RTS7751R2D)		+= ops-rts7751r2d.o fixups-rts7751r2d.o
- obj-$(CONFIG_SH_SH03)			+= ops-sh03.o fixups-sh03.o
- obj-$(CONFIG_SH_HIGHLANDER)		+= ops-r7780rp.o fixups-r7780rp.o
-+obj-$(CONFIG_SH_SDK7780)		+= ops-sdk7780.o fixups-sdk7780.o
- obj-$(CONFIG_SH_TITAN)			+= ops-titan.o
- obj-$(CONFIG_SH_LANDISK)		+= ops-landisk.o
- obj-$(CONFIG_SH_LBOX_RE2)		+= ops-lboxre2.o fixups-lboxre2.o
- obj-$(CONFIG_SH_7780_SOLUTION_ENGINE)	+= ops-se7780.o fixups-se7780.o
-+obj-$(CONFIG_SH_CAYMAN)			+= ops-cayman.o
-diff --git a/arch/sh/drivers/pci/dma-dreamcast.c b/arch/sh/drivers/pci/dma-dreamcast.c
-deleted file mode 100644
-index 888a340..0000000
---- a/arch/sh/drivers/pci/dma-dreamcast.c
-+++ /dev/null
-@@ -1,70 +0,0 @@
--/*
-- * arch/sh/drivers/pci/dma-dreamcast.c
-- *
-- * PCI DMA support for the Sega Dreamcast
-- *
-- * Copyright (C) 2001, 2002  M. R. Brown
-- * Copyright (C) 2002, 2003  Paul Mundt
-- *
-- * This file originally bore the message (with enclosed-$):
-- *	Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp
-- *	Dreamcast PCI: Supports SEGA Broadband Adaptor only.
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/param.h>
--#include <linux/interrupt.h>
--#include <linux/init.h>
--#include <linux/irq.h>
--#include <linux/pci.h>
--#include <linux/dma-mapping.h>
--#include <linux/device.h>
--
--#include <asm/io.h>
--#include <asm/irq.h>
--#include <asm/mach/pci.h>
--
--static int gapspci_dma_used = 0;
--
--void *dreamcast_consistent_alloc(struct device *dev, size_t size,
--				 dma_addr_t *dma_handle, gfp_t flag)
--{
--	unsigned long buf;
--
--	if (dev && dev->bus != &pci_bus_type)
--		return NULL;
--
--	if (gapspci_dma_used + size > GAPSPCI_DMA_SIZE)
--		return ERR_PTR(-EINVAL);
--
--	buf = GAPSPCI_DMA_BASE + gapspci_dma_used;
--
--	gapspci_dma_used = PAGE_ALIGN(gapspci_dma_used+size);
--
--	*dma_handle = (dma_addr_t)buf;
--
--	buf = P2SEGADDR(buf);
--
--	/* Flush the dcache before we hand off the buffer */
--	__flush_purge_region((void *)buf, size);
--
--	return (void *)buf;
--}
--
--int dreamcast_consistent_free(struct device *dev, size_t size,
--			 void *vaddr, dma_addr_t dma_handle)
--{
--	if (dev && dev->bus != &pci_bus_type)
--		return -EINVAL;
--
--	/* XXX */
--	gapspci_dma_used = 0;
--
--	return 0;
--}
--
-diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c
-index 6f53f82..c446993 100644
---- a/arch/sh/drivers/pci/fixups-dreamcast.c
-+++ b/arch/sh/drivers/pci/fixups-dreamcast.c
-@@ -22,6 +22,7 @@
- #include <linux/init.h>
- #include <linux/irq.h>
- #include <linux/pci.h>
-+#include <linux/dma-mapping.h>
- 
- #include <asm/io.h>
- #include <asm/irq.h>
-@@ -40,6 +41,15 @@ static void __init gapspci_fixup_resources(struct pci_dev *dev)
- 		 */
- 		dev->resource[1].start	= p->io_resource->start  + 0x100;
- 		dev->resource[1].end	= dev->resource[1].start + 0x200 - 1;
-+		/*
-+		 * Redirect dma memory allocations to special memory window.
-+		 */
-+		BUG_ON(!dma_declare_coherent_memory(&dev->dev,
-+						GAPSPCI_DMA_BASE,
-+						GAPSPCI_DMA_BASE,
-+						GAPSPCI_DMA_SIZE,
-+						DMA_MEMORY_MAP |
-+						DMA_MEMORY_EXCLUSIVE));
- 		break;
- 	default:
- 		printk("PCI: Failed resource fixup\n");
-diff --git a/arch/sh/drivers/pci/fixups-sdk7780.c b/arch/sh/drivers/pci/fixups-sdk7780.c
-new file mode 100644
-index 0000000..2f88630
---- /dev/null
-+++ b/arch/sh/drivers/pci/fixups-sdk7780.c
-@@ -0,0 +1,59 @@
++	/* Now call the appropriate 3rd level handler */
++	or	r3, ZERO, LINK
++	movi	trap_jtable, r3
++	shlri	r2, 3, r2
++	ldx.l	r2, r3, r3
++	shlri	r2, 2, r2
++	ptabs	r3, tr0
++	or	SP, ZERO, r3
++	blink	tr0, ZERO
++
 +/*
-+ * arch/sh/drivers/pci/fixups-sdk7780.c
++ * Second level handler for VBR-based exceptions. Post-handlers.
 + *
-+ * PCI fixups for the SDK7780SE03
++ * Post-handlers for interrupts (ret_from_irq), exceptions
++ * (ret_from_exception) and common reentrance doors (restore_all
++ * to get back to the original context, ret_from_syscall loop to
++ * check kernel exiting).
 + *
-+ * Copyright (C) 2003  Lineo uSolutions, Inc.
-+ * Copyright (C) 2004 - 2006  Paul Mundt
++ * ret_with_reschedule and work_notifysig are an inner lables of
++ * the ret_from_syscall loop.
++ *
++ * In common to all stack-frame sensitive handlers.
++ *
++ * Inputs:
++ * (SP)   struct pt_regs *, original register's frame pointer (basic)
 + *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
 + */
-+#include <linux/pci.h>
-+#include "pci-sh4.h"
-+#include <asm/io.h>
++	.global ret_from_irq
++ret_from_irq:
++#ifdef CONFIG_POOR_MANS_STRACE
++	pta	evt_debug_ret_from_irq, tr0
++	ori	SP, 0, r2
++	blink	tr0, LINK
++#endif
++	ld.q	SP, FRAME_S(FSSR), r6
++	shlri	r6, 30, r6
++	andi	r6, 1, r6
++	pta	resume_kernel, tr0
++	bne	r6, ZERO, tr0		/* no further checks */
++	STI()
++	pta	ret_with_reschedule, tr0
++	blink	tr0, ZERO		/* Do not check softirqs */
 +
-+int pci_fixup_pcic(void)
-+{
-+	ctrl_outl(0x00000001, SH7780_PCI_VCR2);
++	.global ret_from_exception
++ret_from_exception:
++	preempt_stop()
 +
-+	/* Enable all interrupts, so we know what to fix */
-+	pci_write_reg(0x0000C3FF, SH7780_PCIIMR);
-+	pci_write_reg(0x0000380F, SH7780_PCIAINTM);
++#ifdef CONFIG_POOR_MANS_STRACE
++	pta	evt_debug_ret_from_exc, tr0
++	ori	SP, 0, r2
++	blink	tr0, LINK
++#endif
 +
-+	/* Set up standard PCI config registers */
-+	pci_write_reg(0xFB00, SH7780_PCISTATUS);
-+	pci_write_reg(0x0047, SH7780_PCICMD);
-+	pci_write_reg(0x00, SH7780_PCIPIF);
-+	pci_write_reg(0x00, SH7780_PCISUB);
-+	pci_write_reg(0x06, SH7780_PCIBCC);
-+	pci_write_reg(0x1912, SH7780_PCISVID);
-+	pci_write_reg(0x0001, SH7780_PCISID);
++	ld.q	SP, FRAME_S(FSSR), r6
++	shlri	r6, 30, r6
++	andi	r6, 1, r6
++	pta	resume_kernel, tr0
++	bne	r6, ZERO, tr0		/* no further checks */
 +
-+	pci_write_reg(0x08000000, SH7780_PCIMBAR0);	/* PCI */
-+	pci_write_reg(0x08000000, SH7780_PCILAR0);	/* SHwy */
-+	pci_write_reg(0x07F00001, SH7780_PCILSR);	/* size 128M w/ MBAR */
++	/* Check softirqs */
 +
-+	pci_write_reg(0x00000000, SH7780_PCIMBAR1);
-+	pci_write_reg(0x00000000, SH7780_PCILAR1);
-+	pci_write_reg(0x00000000, SH7780_PCILSR1);
++#ifdef CONFIG_PREEMPT
++	pta   ret_from_syscall, tr0
++	blink   tr0, ZERO
 +
-+	pci_write_reg(0xAB000801, SH7780_PCIIBAR);
++resume_kernel:
++	pta	restore_all, tr0
 +
-+	/*
-+	 * Set the MBR so PCI address is one-to-one with window,
-+	 * meaning all calls go straight through... use ifdef to
-+	 * catch erroneous assumption.
-+	 */
-+	pci_write_reg(0xFD000000 , SH7780_PCIMBR0);
-+	pci_write_reg(0x00FC0000 , SH7780_PCIMBMR0);	/* 16M */
++	getcon	KCR0, r6
++	ld.l	r6, TI_PRE_COUNT, r7
++	beq/u	r7, ZERO, tr0
 +
-+	/* Set IOBR for window containing area specified in pci.h */
-+	pci_write_reg(PCIBIOS_MIN_IO & ~(SH7780_PCI_IO_SIZE-1), SH7780_PCIIOBR);
-+	pci_write_reg((SH7780_PCI_IO_SIZE-1) & (7 << 18), SH7780_PCIIOBMR);
++need_resched:
++	ld.l	r6, TI_FLAGS, r7
++	movi	(1 << TIF_NEED_RESCHED), r8
++	and	r8, r7, r8
++	bne	r8, ZERO, tr0
 +
-+	pci_write_reg(0xA5000C01, SH7780_PCICR);
++	getcon	SR, r7
++	andi	r7, 0xf0, r7
++	bne	r7, ZERO, tr0
 +
-+	return 0;
-+}
-diff --git a/arch/sh/drivers/pci/ops-cayman.c b/arch/sh/drivers/pci/ops-cayman.c
-new file mode 100644
-index 0000000..980275f
---- /dev/null
-+++ b/arch/sh/drivers/pci/ops-cayman.c
-@@ -0,0 +1,94 @@
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <linux/types.h>
-+#include <asm/cpu/irq.h>
-+#include "pci-sh5.h"
++	movi	((PREEMPT_ACTIVE >> 16) & 65535), r8
++	shori	(PREEMPT_ACTIVE & 65535), r8
++	st.l	r6, TI_PRE_COUNT, r8
 +
-+static inline u8 bridge_swizzle(u8 pin, u8 slot)
-+{
-+	return (((pin - 1) + slot) % 4) + 1;
-+}
++	STI()
++	movi	schedule, r7
++	ori	r7, 1, r7
++	ptabs	r7, tr1
++	blink	tr1, LINK
 +
-+int __init pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin)
-+{
-+	int result = -1;
++	st.l	r6, TI_PRE_COUNT, ZERO
++	CLI()
 +
-+	/* The complication here is that the PCI IRQ lines from the Cayman's 2
-+	   5V slots get into the CPU via a different path from the IRQ lines
-+	   from the 3 3.3V slots.  Thus, we have to detect whether the card's
-+	   interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling'
-+	   at the point where we cross from 5V to 3.3V is not the normal case.
++	pta	need_resched, tr1
++	blink	tr1, ZERO
++#endif
 +
-+	   The added complication is that we don't know that the 5V slots are
-+	   always bus 2, because a card containing a PCI-PCI bridge may be
-+	   plugged into a 3.3V slot, and this changes the bus numbering.
++	.global ret_from_syscall
++ret_from_syscall:
 +
-+	   Also, the Cayman has an intermediate PCI bus that goes a custom
-+	   expansion board header (and to the secondary bridge).  This bus has
-+	   never been used in practice.
++ret_with_reschedule:
++	getcon	KCR0, r6		! r6 contains current_thread_info
++	ld.l	r6, TI_FLAGS, r7	! r7 contains current_thread_info->flags
 +
-+	   The 1ary onboard PCI-PCI bridge is device 3 on bus 0
-+	   The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of
-+	   the 1ary bridge.
-+	   */
++	movi	_TIF_NEED_RESCHED, r8
++	and	r8, r7, r8
++	pta	work_resched, tr0
++	bne	r8, ZERO, tr0
 +
-+	struct slot_pin {
-+		int slot;
-+		int pin;
-+	} path[4];
-+	int i=0;
++	pta	restore_all, tr1
 +
-+	while (dev->bus->number > 0) {
++	movi	(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
++	and	r8, r7, r8
++	pta	work_notifysig, tr0
++	bne	r8, ZERO, tr0
 +
-+		slot = path[i].slot = PCI_SLOT(dev->devfn);
-+		pin = path[i].pin = bridge_swizzle(pin, slot);
-+		dev = dev->bus->self;
-+		i++;
-+		if (i > 3) panic("PCI path to root bus too long!\n");
-+	}
++	blink	tr1, ZERO
 +
-+	slot = PCI_SLOT(dev->devfn);
-+	/* This is the slot on bus 0 through which the device is eventually
-+	   reachable. */
++work_resched:
++	pta	ret_from_syscall, tr0
++	gettr	tr0, LINK
++	movi	schedule, r6
++	ptabs	r6, tr0
++	blink	tr0, ZERO		/* Call schedule(), return on top */
 +
-+	/* Now work back up. */
-+	if ((slot < 3) || (i == 0)) {
-+		/* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
-+		   swizzle now. */
-+		result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
-+	} else {
-+		i--;
-+		slot = path[i].slot;
-+		pin  = path[i].pin;
-+		if (slot > 0) {
-+			panic("PCI expansion bus device found - not handled!\n");
-+		} else {
-+			if (i > 0) {
-+				/* 5V slots */
-+				i--;
-+				slot = path[i].slot;
-+				pin  = path[i].pin;
-+				/* 'pin' was swizzled earlier wrt slot, don't do it again. */
-+				result = IRQ_P2INTA + (pin - 1);
-+			} else {
-+				/* IRQ for 2ary PCI-PCI bridge : unused */
-+				result = -1;
-+			}
-+		}
-+	}
++work_notifysig:
++	gettr	tr1, LINK
 +
-+	return result;
-+}
++	movi	do_signal, r6
++	ptabs	r6, tr0
++	or	SP, ZERO, r2
++	or	ZERO, ZERO, r3
++	blink	tr0, LINK	    /* Call do_signal(regs, 0), return here */
 +
-+struct pci_channel board_pci_channels[] = {
-+	{ &sh5_pci_ops, NULL, NULL, 0, 0xff },
-+	{ NULL, NULL, NULL, 0, 0 },
-+};
-+EXPORT_SYMBOL(board_pci_channels);
++restore_all:
++	/* Do prefetches */
++
++	ld.q	SP, FRAME_T(0), r6
++	ld.q	SP, FRAME_T(1), r7
++	ld.q	SP, FRAME_T(2), r8
++	ld.q	SP, FRAME_T(3), r9
++	ptabs	r6, tr0
++	ptabs	r7, tr1
++	ptabs	r8, tr2
++	ptabs	r9, tr3
++	ld.q	SP, FRAME_T(4), r6
++	ld.q	SP, FRAME_T(5), r7
++	ld.q	SP, FRAME_T(6), r8
++	ld.q	SP, FRAME_T(7), r9
++	ptabs	r6, tr4
++	ptabs	r7, tr5
++	ptabs	r8, tr6
++	ptabs	r9, tr7
++
++	ld.q	SP, FRAME_R(0), r0
++	ld.q	SP, FRAME_R(1), r1
++	ld.q	SP, FRAME_R(2), r2
++	ld.q	SP, FRAME_R(3), r3
++	ld.q	SP, FRAME_R(4), r4
++	ld.q	SP, FRAME_R(5), r5
++	ld.q	SP, FRAME_R(6), r6
++	ld.q	SP, FRAME_R(7), r7
++	ld.q	SP, FRAME_R(8), r8
++	ld.q	SP, FRAME_R(9), r9
++	ld.q	SP, FRAME_R(10), r10
++	ld.q	SP, FRAME_R(11), r11
++	ld.q	SP, FRAME_R(12), r12
++	ld.q	SP, FRAME_R(13), r13
++	ld.q	SP, FRAME_R(14), r14
++
++	ld.q	SP, FRAME_R(16), r16
++	ld.q	SP, FRAME_R(17), r17
++	ld.q	SP, FRAME_R(18), r18
++	ld.q	SP, FRAME_R(19), r19
++	ld.q	SP, FRAME_R(20), r20
++	ld.q	SP, FRAME_R(21), r21
++	ld.q	SP, FRAME_R(22), r22
++	ld.q	SP, FRAME_R(23), r23
++	ld.q	SP, FRAME_R(24), r24
++	ld.q	SP, FRAME_R(25), r25
++	ld.q	SP, FRAME_R(26), r26
++	ld.q	SP, FRAME_R(27), r27
++	ld.q	SP, FRAME_R(28), r28
++	ld.q	SP, FRAME_R(29), r29
++	ld.q	SP, FRAME_R(30), r30
++	ld.q	SP, FRAME_R(31), r31
++	ld.q	SP, FRAME_R(32), r32
++	ld.q	SP, FRAME_R(33), r33
++	ld.q	SP, FRAME_R(34), r34
++	ld.q	SP, FRAME_R(35), r35
++	ld.q	SP, FRAME_R(36), r36
++	ld.q	SP, FRAME_R(37), r37
++	ld.q	SP, FRAME_R(38), r38
++	ld.q	SP, FRAME_R(39), r39
++	ld.q	SP, FRAME_R(40), r40
++	ld.q	SP, FRAME_R(41), r41
++	ld.q	SP, FRAME_R(42), r42
++	ld.q	SP, FRAME_R(43), r43
++	ld.q	SP, FRAME_R(44), r44
++	ld.q	SP, FRAME_R(45), r45
++	ld.q	SP, FRAME_R(46), r46
++	ld.q	SP, FRAME_R(47), r47
++	ld.q	SP, FRAME_R(48), r48
++	ld.q	SP, FRAME_R(49), r49
++	ld.q	SP, FRAME_R(50), r50
++	ld.q	SP, FRAME_R(51), r51
++	ld.q	SP, FRAME_R(52), r52
++	ld.q	SP, FRAME_R(53), r53
++	ld.q	SP, FRAME_R(54), r54
++	ld.q	SP, FRAME_R(55), r55
++	ld.q	SP, FRAME_R(56), r56
++	ld.q	SP, FRAME_R(57), r57
++	ld.q	SP, FRAME_R(58), r58
++
++	getcon	SR, r59
++	movi	SR_BLOCK_EXC, r60
++	or	r59, r60, r59
++	putcon	r59, SR			/* SR.BL = 1, keep nesting out */
++	ld.q	SP, FRAME_S(FSSR), r61
++	ld.q	SP, FRAME_S(FSPC), r62
++	movi	SR_ASID_MASK, r60
++	and	r59, r60, r59
++	andc	r61, r60, r61		/* Clear out older ASID */
++	or	r59, r61, r61		/* Retain current ASID */
++	putcon	r61, SSR
++	putcon	r62, SPC
++
++	/* Ignore FSYSCALL_ID */
++
++	ld.q	SP, FRAME_R(59), r59
++	ld.q	SP, FRAME_R(60), r60
++	ld.q	SP, FRAME_R(61), r61
++	ld.q	SP, FRAME_R(62), r62
++
++	/* Last touch */
++	ld.q	SP, FRAME_R(15), SP
++	rte
++	nop
 +
-+int __init pcibios_init_platform(void)
-+{
-+	return sh5pci_init(__pa(memory_start),
-+			   __pa(memory_end) - __pa(memory_start));
-+}
-diff --git a/arch/sh/drivers/pci/ops-r7780rp.c b/arch/sh/drivers/pci/ops-r7780rp.c
-index 48fe403..5fdadae 100644
---- a/arch/sh/drivers/pci/ops-r7780rp.c
-+++ b/arch/sh/drivers/pci/ops-r7780rp.c
-@@ -17,25 +17,13 @@
- #include <asm/io.h>
- #include "pci-sh4.h"
- 
--static char r7780rp_irq_tab[] __initdata = {
--	0, 1, 2, 3,
--};
--
--static char r7780mp_irq_tab[] __initdata = {
-+static char irq_tab[] __initdata = {
- 	65, 66, 67, 68,
- };
- 
- int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
- {
--	if (mach_is_r7780rp())
--		return r7780rp_irq_tab[slot];
--	if (mach_is_r7780mp() || mach_is_r7785rp())
--		return r7780mp_irq_tab[slot];
--
--	printk(KERN_ERR "PCI: Bad IRQ mapping "
--	       "request for slot %d, func %d\n", slot, pin-1);
--
--	return -1;
-+	return irq_tab[slot];
- }
- 
- static struct resource sh7780_io_resource = {
-diff --git a/arch/sh/drivers/pci/ops-sdk7780.c b/arch/sh/drivers/pci/ops-sdk7780.c
-new file mode 100644
-index 0000000..66a9b40
---- /dev/null
-+++ b/arch/sh/drivers/pci/ops-sdk7780.c
-@@ -0,0 +1,73 @@
 +/*
-+ * linux/arch/sh/drivers/pci/ops-sdk7780.c
++ * Third level handlers for VBR-based exceptions. Adapting args to
++ * and/or deflecting to fourth level handlers.
 + *
-+ * Copyright (C) 2006  Nobuhiro Iwamatsu
++ * Fourth level handlers interface.
++ * Most are C-coded handlers directly pointed by the trap_jtable.
++ * (Third = Fourth level)
++ * Inputs:
++ * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
++ *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
++ * (r3)   struct pt_regs *, original register's frame pointer
++ * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
++ * (r5)   TRA control register (for syscall/debug benefit only)
++ * (LINK) return address
++ * (SP)   = r3
 + *
-+ * PCI initialization for the SDK7780SE03
++ * Kernel TLB fault handlers will get a slightly different interface.
++ * (r2)   struct pt_regs *, original register's frame pointer
++ * (r3)   writeaccess, whether it's a store fault as opposed to load fault
++ * (r4)   execaccess, whether it's a ITLB fault as opposed to DTLB fault
++ * (r5)   Effective Address of fault
++ * (LINK) return address
++ * (SP)   = r2
++ *
++ * fpu_error_or_IRQ? is a helper to deflect to the right cause.
 + *
-+ * May be copied or modified under the terms of the GNU General Public
-+ * License.  See linux/COPYING for more information.
 + */
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/pci.h>
-+#include <asm/sdk7780.h>
-+#include <asm/io.h>
-+#include "pci-sh4.h"
++tlb_miss_load:
++	or	SP, ZERO, r2
++	or	ZERO, ZERO, r3		/* Read */
++	or	ZERO, ZERO, r4		/* Data */
++	getcon	TEA, r5
++	pta	call_do_page_fault, tr0
++	beq	ZERO, ZERO, tr0
 +
-+/* IDSEL [16][17][18][19][20][21][22][23][24][25][26][27][28][29][30][31] */
-+static char sdk7780_irq_tab[4][16] __initdata = {
-+	/* INTA */
-+	{ 65, 68, 67, 68, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
-+	/* INTB */
-+	{ 66, 65, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
-+	/* INTC */
-+	{ 67, 66, -1, 66, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
-+	/* INTD */
-+	{ 68, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
-+};
++tlb_miss_store:
++	or	SP, ZERO, r2
++	movi	1, r3			/* Write */
++	or	ZERO, ZERO, r4		/* Data */
++	getcon	TEA, r5
++	pta	call_do_page_fault, tr0
++	beq	ZERO, ZERO, tr0
 +
-+int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
-+{
-+       return sdk7780_irq_tab[pin-1][slot];
-+}
++itlb_miss_or_IRQ:
++	pta	its_IRQ, tr0
++	beqi/u	r4, EVENT_INTERRUPT, tr0
++	or	SP, ZERO, r2
++	or	ZERO, ZERO, r3		/* Read */
++	movi	1, r4			/* Text */
++	getcon	TEA, r5
++	/* Fall through */
 +
-+static struct resource sdk7780_io_resource = {
-+	.name	= "SH7780_IO",
-+	.start	= SH7780_PCI_IO_BASE,
-+	.end	= SH7780_PCI_IO_BASE + SH7780_PCI_IO_SIZE - 1,
-+	.flags	= IORESOURCE_IO
-+};
++call_do_page_fault:
++	movi	do_page_fault, r6
++        ptabs	r6, tr0
++        blink	tr0, ZERO
 +
-+static struct resource sdk7780_mem_resource = {
-+	.name	= "SH7780_mem",
-+	.start	= SH7780_PCI_MEMORY_BASE,
-+	.end	= SH7780_PCI_MEMORY_BASE + SH7780_PCI_MEM_SIZE - 1,
-+	.flags	= IORESOURCE_MEM
-+};
++fpu_error_or_IRQA:
++	pta	its_IRQ, tr0
++	beqi/l	r4, EVENT_INTERRUPT, tr0
++#ifdef CONFIG_SH_FPU
++	movi	do_fpu_state_restore, r6
++#else
++	movi	do_exception_error, r6
++#endif
++	ptabs	r6, tr0
++	blink	tr0, ZERO
 +
-+struct pci_channel board_pci_channels[] = {
-+	{ &sh4_pci_ops, &sdk7780_io_resource, &sdk7780_mem_resource, 0, 0xff },
-+	{ NULL, NULL, NULL, 0, 0 },
-+};
-+EXPORT_SYMBOL(board_pci_channels);
++fpu_error_or_IRQB:
++	pta	its_IRQ, tr0
++	beqi/l	r4, EVENT_INTERRUPT, tr0
++#ifdef CONFIG_SH_FPU
++	movi	do_fpu_state_restore, r6
++#else
++	movi	do_exception_error, r6
++#endif
++	ptabs	r6, tr0
++	blink	tr0, ZERO
 +
-+static struct sh4_pci_address_map sdk7780_pci_map = {
-+	.window0	= {
-+		.base	= SH7780_CS2_BASE_ADDR,
-+		.size	= 0x04000000,
-+	},
-+	.window1	= {
-+		.base	= SH7780_CS3_BASE_ADDR,
-+		.size	= 0x04000000,
-+	},
-+	.flags	= SH4_PCIC_NO_RESET,
-+};
++its_IRQ:
++	movi	do_IRQ, r6
++	ptabs	r6, tr0
++	blink	tr0, ZERO
 +
-+int __init pcibios_init_platform(void)
-+{
-+	printk(KERN_INFO "SH7780 PCI: Finished initializing PCI controller\n");
-+	return sh7780_pcic_init(&sdk7780_pci_map);
-+}
-diff --git a/arch/sh/drivers/pci/ops-sh5.c b/arch/sh/drivers/pci/ops-sh5.c
-new file mode 100644
-index 0000000..729e38a
---- /dev/null
-+++ b/arch/sh/drivers/pci/ops-sh5.c
-@@ -0,0 +1,93 @@
 +/*
-+ * Support functions for the SH5 PCI hardware.
++ * system_call/unknown_trap third level handler:
 + *
-+ * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
-+ * Copyright (C) 2003, 2004 Paul Mundt
-+ * Copyright (C) 2004 Richard Curnow
++ * Inputs:
++ * (r2)   fault/interrupt code, entry number (TRAP = 11)
++ * (r3)   struct pt_regs *, original register's frame pointer
++ * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
++ * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
++ * (SP)   = r3
++ * (LINK) return address: ret_from_exception
++ * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
++ *
++ * Outputs:
++ * (*r3)  Syscall reply (Saved r2)
++ * (LINK) In case of syscall only it can be scrapped.
++ *        Common second level post handler will be ret_from_syscall.
++ *        Common (non-trace) exit point to that is syscall_ret (saving
++ *        result to r2). Common bad exit point is syscall_bad (returning
++ *        ENOSYS then saved to r2).
 + *
-+ * May be copied or modified under the terms of the GNU General Public
-+ * License.  See linux/COPYING for more information.
 + */
-+#include <linux/kernel.h>
-+#include <linux/rwsem.h>
-+#include <linux/smp.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/errno.h>
-+#include <linux/pci.h>
-+#include <linux/delay.h>
-+#include <linux/types.h>
-+#include <linux/irq.h>
-+#include <asm/pci.h>
-+#include <asm/io.h>
-+#include "pci-sh5.h"
 +
-+static void __init pci_fixup_ide_bases(struct pci_dev *d)
-+{
-+	int i;
++unknown_trap:
++	/* Unknown Trap or User Trace */
++	movi	do_unknown_trapa, r6
++	ptabs	r6, tr0
++        ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */
++        andi    r2, 0x1ff, r2		/* r2 = syscall # */
++	blink	tr0, LINK
 +
-+	/*
-+	 * PCI IDE controllers use non-standard I/O port decoding, respect it.
-+	 */
-+	if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
-+		return;
-+	printk("PCI: IDE base address fixup for %s\n", pci_name(d));
-+	for(i=0; i<4; i++) {
-+		struct resource *r = &d->resource[i];
-+		if ((r->start & ~0x80) == 0x374) {
-+			r->start |= 2;
-+			r->end = r->start;
-+		}
-+	}
-+}
-+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
++	pta	syscall_ret, tr0
++	blink	tr0, ZERO
 +
-+char * __devinit pcibios_setup(char *str)
-+{
-+	return str;
-+}
++        /* New syscall implementation*/
++system_call:
++	pta	unknown_trap, tr0
++        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
++        shlri   r4, 20, r4
++	bnei	r4, 1, tr0		/* unknown_trap if not 0x1yzzzz */
 +
-+static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
-+			int size, u32 *val)
-+{
-+	SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
++        /* It's a system call */
++	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */
++	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */
 +
-+	switch (size) {
-+		case 1:
-+			*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
-+			break;
-+		case 2:
-+			*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
-+			break;
-+		case 4:
-+			*val = SH5PCI_READ(PDR);
-+			break;
-+	}
++	STI()
 +
-+	return PCIBIOS_SUCCESSFUL;
-+}
++	pta	syscall_allowed, tr0
++	movi	NR_syscalls - 1, r4	/* Last valid */
++	bgeu/l	r4, r5, tr0
 +
-+static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
-+			 int size, u32 val)
-+{
-+	SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
++syscall_bad:
++	/* Return ENOSYS ! */
++	movi	-(ENOSYS), r2		/* Fall-through */
 +
-+	switch (size) {
-+		case 1:
-+			SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
-+			break;
-+		case 2:
-+			SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
-+			break;
-+		case 4:
-+			SH5PCI_WRITE(PDR, val);
-+			break;
-+	}
++	.global syscall_ret
++syscall_ret:
++	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */
 +
-+	return PCIBIOS_SUCCESSFUL;
-+}
++#ifdef CONFIG_POOR_MANS_STRACE
++	/* nothing useful in registers at this point */
 +
-+struct pci_ops sh5_pci_ops = {
-+	.read		= sh5pci_read,
-+	.write		= sh5pci_write,
-+};
-diff --git a/arch/sh/drivers/pci/pci-auto.c b/arch/sh/drivers/pci/pci-auto.c
-index 224e007..ea40470 100644
---- a/arch/sh/drivers/pci/pci-auto.c
-+++ b/arch/sh/drivers/pci/pci-auto.c
-@@ -516,10 +516,8 @@ pciauto_bus_scan(struct pci_channel *hose, int top_bus, int current_bus)
- 					 PCI_COMMAND, cmdstat | PCI_COMMAND_IO |
- 					 PCI_COMMAND_MEMORY |
- 					 PCI_COMMAND_MASTER);
--#if !defined(CONFIG_SH_HS7751RVOIP) && !defined(CONFIG_SH_RTS7751R2D)
- 		early_write_config_byte(hose, top_bus, current_bus, pci_devfn,
- 					PCI_LATENCY_TIMER, 0x80);
--#endif
- 
- 		/* Allocate PCI I/O and/or memory space */
- 		pciauto_setup_bars(hose, top_bus, current_bus, pci_devfn, PCI_BASE_ADDRESS_5);
-diff --git a/arch/sh/drivers/pci/pci-sh4.h b/arch/sh/drivers/pci/pci-sh4.h
-index 1901c33..4925c79 100644
---- a/arch/sh/drivers/pci/pci-sh4.h
-+++ b/arch/sh/drivers/pci/pci-sh4.h
-@@ -1,7 +1,9 @@
- #ifndef __PCI_SH4_H
- #define __PCI_SH4_H
- 
--#if defined(CONFIG_CPU_SUBTYPE_SH7780) || defined(CONFIG_CPU_SUBTYPE_SH7785)
-+#if defined(CONFIG_CPU_SUBTYPE_SH7780) || \
-+    defined(CONFIG_CPU_SUBTYPE_SH7785) || \
-+    defined(CONFIG_CPU_SUBTYPE_SH7763)
- #include "pci-sh7780.h"
- #else
- #include "pci-sh7751.h"
-diff --git a/arch/sh/drivers/pci/pci-sh5.c b/arch/sh/drivers/pci/pci-sh5.c
-new file mode 100644
-index 0000000..a00a4df
---- /dev/null
-+++ b/arch/sh/drivers/pci/pci-sh5.c
-@@ -0,0 +1,228 @@
-+/*
-+ * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
-+ * Copyright (C) 2003, 2004 Paul Mundt
-+ * Copyright (C) 2004 Richard Curnow
-+ *
-+ * May be copied or modified under the terms of the GNU General Public
-+ * License.  See linux/COPYING for more information.
-+ *
-+ * Support functions for the SH5 PCI hardware.
++	movi	evt_debug2, r5
++	ori	r5, 1, r5
++	ptabs	r5, tr0
++	ld.q	SP, FRAME_R(9), r2
++	or	SP, ZERO, r3
++	blink	tr0, LINK
++#endif
++
++	ld.q	SP, FRAME_S(FSPC), r2
++	addi	r2, 4, r2		/* Move PC, being pre-execution event */
++	st.q	SP, FRAME_S(FSPC), r2
++	pta	ret_from_syscall, tr0
++	blink	tr0, ZERO
++
++
++/*  A different return path for ret_from_fork, because we now need
++ *  to call schedule_tail with the later kernels. Because prev is
++ *  loaded into r2 by switch_to() means we can just call it straight  away
 + */
 +
-+#include <linux/kernel.h>
-+#include <linux/rwsem.h>
-+#include <linux/smp.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/errno.h>
-+#include <linux/pci.h>
-+#include <linux/delay.h>
-+#include <linux/types.h>
-+#include <linux/irq.h>
-+#include <asm/cpu/irq.h>
-+#include <asm/pci.h>
-+#include <asm/io.h>
-+#include "pci-sh5.h"
++.global	ret_from_fork
++ret_from_fork:
 +
-+unsigned long pcicr_virt;
-+unsigned long PCI_IO_AREA;
++	movi	schedule_tail,r5
++	ori	r5, 1, r5
++	ptabs	r5, tr0
++	blink	tr0, LINK
 +
-+/* Rounds a number UP to the nearest power of two. Used for
-+ * sizing the PCI window.
++#ifdef CONFIG_POOR_MANS_STRACE
++	/* nothing useful in registers at this point */
++
++	movi	evt_debug2, r5
++	ori	r5, 1, r5
++	ptabs	r5, tr0
++	ld.q	SP, FRAME_R(9), r2
++	or	SP, ZERO, r3
++	blink	tr0, LINK
++#endif
++
++	ld.q	SP, FRAME_S(FSPC), r2
++	addi	r2, 4, r2		/* Move PC, being pre-execution event */
++	st.q	SP, FRAME_S(FSPC), r2
++	pta	ret_from_syscall, tr0
++	blink	tr0, ZERO
++
++
++
++syscall_allowed:
++	/* Use LINK to deflect the exit point, default is syscall_ret */
++	pta	syscall_ret, tr0
++	gettr	tr0, LINK
++	pta	syscall_notrace, tr0
++
++	getcon	KCR0, r2
++	ld.l	r2, TI_FLAGS, r4
++	movi	(_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
++	and	r6, r4, r6
++	beq/l	r6, ZERO, tr0
++
++	/* Trace it by calling syscall_trace before and after */
++	movi	syscall_trace, r4
++	or	SP, ZERO, r2
++	or	ZERO, ZERO, r3
++	ptabs	r4, tr0
++	blink	tr0, LINK
++
++	/* Reload syscall number as r5 is trashed by syscall_trace */
++	ld.q	SP, FRAME_S(FSYSCALL_ID), r5
++	andi	r5, 0x1ff, r5
++
++	pta	syscall_ret_trace, tr0
++	gettr	tr0, LINK
++
++syscall_notrace:
++	/* Now point to the appropriate 4th level syscall handler */
++	movi	sys_call_table, r4
++	shlli	r5, 2, r5
++	ldx.l	r4, r5, r5
++	ptabs	r5, tr0
++
++	/* Prepare original args */
++	ld.q	SP, FRAME_R(2), r2
++	ld.q	SP, FRAME_R(3), r3
++	ld.q	SP, FRAME_R(4), r4
++	ld.q	SP, FRAME_R(5), r5
++	ld.q	SP, FRAME_R(6), r6
++	ld.q	SP, FRAME_R(7), r7
++
++	/* And now the trick for those syscalls requiring regs * ! */
++	or	SP, ZERO, r8
++
++	/* Call it */
++	blink	tr0, ZERO	/* LINK is already properly set */
++
++syscall_ret_trace:
++	/* We get back here only if under trace */
++	st.q	SP, FRAME_R(9), r2	/* Save return value */
++
++	movi	syscall_trace, LINK
++	or	SP, ZERO, r2
++	movi	1, r3
++	ptabs	LINK, tr0
++	blink	tr0, LINK
++
++	/* This needs to be done after any syscall tracing */
++	ld.q	SP, FRAME_S(FSPC), r2
++	addi	r2, 4, r2	/* Move PC, being pre-execution event */
++	st.q	SP, FRAME_S(FSPC), r2
++
++	pta	ret_from_syscall, tr0
++	blink	tr0, ZERO		/* Resume normal return sequence */
++
++/*
++ * --- Switch to running under a particular ASID and return the previous ASID value
++ * --- The caller is assumed to have done a cli before calling this.
++ *
++ * Input r2 : new ASID
++ * Output r2 : old ASID
 + */
-+static u32 __init r2p2(u32 num)
-+{
-+	int i = 31;
-+	u32 tmp = num;
 +
-+	if (num == 0)
-+		return 0;
++	.global switch_and_save_asid
++switch_and_save_asid:
++	getcon	sr, r0
++	movi	255, r4
++	shlli 	r4, 16, r4	/* r4 = mask to select ASID */
++	and	r0, r4, r3	/* r3 = shifted old ASID */
++	andi	r2, 255, r2	/* mask down new ASID */
++	shlli	r2, 16, r2	/* align new ASID against SR.ASID */
++	andc	r0, r4, r0	/* efface old ASID from SR */
++	or	r0, r2, r0	/* insert the new ASID */
++	putcon	r0, ssr
++	movi	1f, r0
++	putcon	r0, spc
++	rte
++	nop
++1:
++	ptabs	LINK, tr0
++	shlri	r3, 16, r2	/* r2 = old ASID */
++	blink tr0, r63
 +
-+	do {
-+		if (tmp & (1 << 31))
-+			break;
-+		i--;
-+		tmp <<= 1;
-+	} while (i >= 0);
++	.global	route_to_panic_handler
++route_to_panic_handler:
++	/* Switch to real mode, goto panic_handler, don't return.  Useful for
++	   last-chance debugging, e.g. if no output wants to go to the console.
++	   */
 +
-+	tmp = 1 << i;
-+	/* If the original number isn't a power of 2, round it up */
-+	if (tmp != num)
-+		tmp <<= 1;
++	movi	panic_handler - CONFIG_PAGE_OFFSET, r1
++	ptabs	r1, tr0
++	pta	1f, tr1
++	gettr	tr1, r0
++	putcon	r0, spc
++	getcon	sr, r0
++	movi	1, r1
++	shlli	r1, 31, r1
++	andc	r0, r1, r0
++	putcon	r0, ssr
++	rte
++	nop
++1:	/* Now in real mode */
++	blink tr0, r63
++	nop
 +
-+	return tmp;
-+}
++	.global peek_real_address_q
++peek_real_address_q:
++	/* Two args:
++	   r2 : real mode address to peek
++	   r2(out) : result quadword
 +
-+static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
-+{
-+	struct pt_regs *regs = get_irq_regs();
-+	unsigned pci_int, pci_air, pci_cir, pci_aint;
++	   This is provided as a cheapskate way of manipulating device
++	   registers for debugging (to avoid the need to onchip_remap the debug
++	   module, and to avoid the need to onchip_remap the watchpoint
++	   controller in a way that identity maps sufficient bits to avoid the
++	   SH5-101 cut2 silicon defect).
 +
-+	pci_int = SH5PCI_READ(INT);
-+	pci_cir = SH5PCI_READ(CIR);
-+	pci_air = SH5PCI_READ(AIR);
++	   This code is not performance critical
++	*/
 +
-+	if (pci_int) {
-+		printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
-+		printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
-+		printk("PCI AIR -> 0x%x\n", pci_air);
-+		printk("PCI CIR -> 0x%x\n", pci_cir);
-+		SH5PCI_WRITE(INT, ~0);
-+	}
++	add.l	r2, r63, r2	/* sign extend address */
++	getcon	sr, r0		/* r0 = saved original SR */
++	movi	1, r1
++	shlli	r1, 28, r1
++	or	r0, r1, r1	/* r0 with block bit set */
++	putcon	r1, sr		/* now in critical section */
++	movi	1, r36
++	shlli	r36, 31, r36
++	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
 +
-+	pci_aint = SH5PCI_READ(AINT);
-+	if (pci_aint) {
-+		printk("PCI ARB INTERRUPT!\n");
-+		printk("PCI AINT -> 0x%x\n", pci_aint);
-+		printk("PCI AIR -> 0x%x\n", pci_air);
-+		printk("PCI CIR -> 0x%x\n", pci_cir);
-+		SH5PCI_WRITE(AINT, ~0);
-+	}
++	putcon	r1, ssr
++	movi	.peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
++	movi	1f, r37		/* virtual mode return addr */
++	putcon	r36, spc
 +
-+	return IRQ_HANDLED;
-+}
++	synco
++	rte
++	nop
 +
-+static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
-+{
-+	printk("SERR IRQ\n");
++.peek0:	/* come here in real mode, don't touch caches!!
++           still in critical section (sr.bl==1) */
++	putcon	r0, ssr
++	putcon	r37, spc
++	/* Here's the actual peek.  If the address is bad, all bets are now off
++	 * what will happen (handlers invoked in real-mode = bad news) */
++	ld.q	r2, 0, r2
++	synco
++	rte	/* Back to virtual mode */
++	nop
 +
-+	return IRQ_NONE;
-+}
++1:
++	ptabs	LINK, tr0
++	blink	tr0, r63
 +
-+int __init sh5pci_init(unsigned long memStart, unsigned long memSize)
-+{
-+	u32 lsr0;
-+	u32 uval;
++	.global poke_real_address_q
++poke_real_address_q:
++	/* Two args:
++	   r2 : real mode address to poke
++	   r3 : quadword value to write.
 +
-+        if (request_irq(IRQ_ERR, pcish5_err_irq,
-+                        IRQF_DISABLED, "PCI Error",NULL) < 0) {
-+                printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
-+                return -EINVAL;
-+        }
++	   This is provided as a cheapskate way of manipulating device
++	   registers for debugging (to avoid the need to onchip_remap the debug
++	   module, and to avoid the need to onchip_remap the watchpoint
++	   controller in a way that identity maps sufficient bits to avoid the
++	   SH5-101 cut2 silicon defect).
 +
-+        if (request_irq(IRQ_SERR, pcish5_serr_irq,
-+                        IRQF_DISABLED, "PCI SERR interrupt", NULL) < 0) {
-+                printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
-+                return -EINVAL;
-+        }
++	   This code is not performance critical
++	*/
 +
-+	pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR");
-+	if (!pcicr_virt) {
-+		panic("Unable to remap PCICR\n");
-+	}
++	add.l	r2, r63, r2	/* sign extend address */
++	getcon	sr, r0		/* r0 = saved original SR */
++	movi	1, r1
++	shlli	r1, 28, r1
++	or	r0, r1, r1	/* r0 with block bit set */
++	putcon	r1, sr		/* now in critical section */
++	movi	1, r36
++	shlli	r36, 31, r36
++	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
 +
-+	PCI_IO_AREA = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO");
-+	if (!PCI_IO_AREA) {
-+		panic("Unable to remap PCIIO\n");
-+	}
++	putcon	r1, ssr
++	movi	.poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
++	movi	1f, r37		/* virtual mode return addr */
++	putcon	r36, spc
 +
-+	/* Clear snoop registers */
-+        SH5PCI_WRITE(CSCR0, 0);
-+        SH5PCI_WRITE(CSCR1, 0);
++	synco
++	rte
++	nop
 +
-+        /* Switch off interrupts */
-+        SH5PCI_WRITE(INTM,  0);
-+        SH5PCI_WRITE(AINTM, 0);
-+        SH5PCI_WRITE(PINTM, 0);
++.poke0:	/* come here in real mode, don't touch caches!!
++           still in critical section (sr.bl==1) */
++	putcon	r0, ssr
++	putcon	r37, spc
++	/* Here's the actual poke.  If the address is bad, all bets are now off
++	 * what will happen (handlers invoked in real-mode = bad news) */
++	st.q	r2, 0, r3
++	synco
++	rte	/* Back to virtual mode */
++	nop
 +
-+        /* Set bus active, take it out of reset */
-+        uval = SH5PCI_READ(CR);
++1:
++	ptabs	LINK, tr0
++	blink	tr0, r63
 +
-+	/* Set command Register */
-+        SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE |
-+		     CR_PFCS | CR_BMAM);
++/*
++ * --- User Access Handling Section
++ */
++
++/*
++ * User Access support. It all moved to non inlined Assembler
++ * functions in here.
++ *
++ * __kernel_size_t __copy_user(void *__to, const void *__from,
++ *			       __kernel_size_t __n)
++ *
++ * Inputs:
++ * (r2)  target address
++ * (r3)  source address
++ * (r4)  size in bytes
++ *
++ * Ouputs:
++ * (*r2) target data
++ * (r2)  non-copied bytes
++ *
++ * If a fault occurs on the user pointer, bail out early and return the
++ * number of bytes not copied in r2.
++ * Strategy : for large blocks, call a real memcpy function which can
++ * move >1 byte at a time using unaligned ld/st instructions, and can
++ * manipulate the cache using prefetch + alloco to improve the speed
++ * further.  If a fault occurs in that function, just revert to the
++ * byte-by-byte approach used for small blocks; this is rare so the
++ * performance hit for that case does not matter.
++ *
++ * For small blocks it's not worth the overhead of setting up and calling
++ * the memcpy routine; do the copy a byte at a time.
++ *
++ */
++	.global	__copy_user
++__copy_user:
++	pta	__copy_user_byte_by_byte, tr1
++	movi	16, r0 ! this value is a best guess, should tune it by benchmarking
++	bge/u	r0, r4, tr1
++	pta copy_user_memcpy, tr0
++	addi	SP, -32, SP
++	/* Save arguments in case we have to fix-up unhandled page fault */
++	st.q	SP, 0, r2
++	st.q	SP, 8, r3
++	st.q	SP, 16, r4
++	st.q	SP, 24, r35 ! r35 is callee-save
++	/* Save LINK in a register to reduce RTS time later (otherwise
++	   ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
++	ori	LINK, 0, r35
++	blink	tr0, LINK
 +
-+	uval=SH5PCI_READ(CR);
++	/* Copy completed normally if we get back here */
++	ptabs	r35, tr0
++	ld.q	SP, 24, r35
++	/* don't restore r2-r4, pointless */
++	/* set result=r2 to zero as the copy must have succeeded. */
++	or	r63, r63, r2
++	addi	SP, 32, SP
++	blink	tr0, r63 ! RTS
 +
-+        /* Allow it to be a master */
-+	/* NB - WE DISABLE I/O ACCESS to stop overlap */
-+        /* set WAIT bit to enable stepping, an attempt to improve stability */
-+	SH5PCI_WRITE_SHORT(CSR_CMD,
-+			    PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
-+			    PCI_COMMAND_WAIT);
++	.global __copy_user_fixup
++__copy_user_fixup:
++	/* Restore stack frame */
++	ori	r35, 0, LINK
++	ld.q	SP, 24, r35
++	ld.q	SP, 16, r4
++	ld.q	SP,  8, r3
++	ld.q	SP,  0, r2
++	addi	SP, 32, SP
++	/* Fall through to original code, in the 'same' state we entered with */
 +
-+        /*
-+        ** Set translation mapping memory in order to convert the address
-+        ** used for the main bus, to the PCI internal address.
-+        */
-+        SH5PCI_WRITE(MBR,0x40000000);
++/* The slow byte-by-byte method is used if the fast copy traps due to a bad
++   user address.  In that rare case, the speed drop can be tolerated. */
++__copy_user_byte_by_byte:
++	pta	___copy_user_exit, tr1
++	pta	___copy_user1, tr0
++	beq/u	r4, r63, tr1	/* early exit for zero length copy */
++	sub	r2, r3, r0
++	addi	r0, -1, r0
 +
-+        /* Always set the max size 512M */
-+        SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
++___copy_user1:
++	ld.b	r3, 0, r5		/* Fault address 1 */
 +
-+        /*
-+        ** I/O addresses are mapped at internal PCI specific address
-+        ** as is described into the configuration bridge table.
-+        ** These are changed to 0, to allow cards that have legacy
-+        ** io such as vga to function correctly. We set the SH5 IOBAR to
-+        ** 256K, which is a bit big as we can only have 64K of address space
-+        */
++	/* Could rewrite this to use just 1 add, but the second comes 'free'
++	   due to load latency */
++	addi	r3, 1, r3
++	addi	r4, -1, r4		/* No real fixup required */
++___copy_user2:
++	stx.b	r3, r0, r5		/* Fault address 2 */
++	bne     r4, ZERO, tr0
 +
-+        SH5PCI_WRITE(IOBR,0x0);
++___copy_user_exit:
++	or	r4, ZERO, r2
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+        /* Set up a 256K window. Totally pointless waste  of address space */
-+        SH5PCI_WRITE(IOBMR,0);
++/*
++ * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
++ *
++ * Inputs:
++ * (r2)  target address
++ * (r3)  size in bytes
++ *
++ * Ouputs:
++ * (*r2) zero-ed target data
++ * (r2)  non-zero-ed bytes
++ */
++	.global	__clear_user
++__clear_user:
++	pta	___clear_user_exit, tr1
++	pta	___clear_user1, tr0
++	beq/u	r3, r63, tr1
 +
-+	/* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec.
-+	 * Ideally, we would want to map the I/O region somewhere, but it
-+	 * is so big this is not that easy!
-+         */
-+	SH5PCI_WRITE(CSR_IBAR0,~0);
-+	/* Set memory size value */
-+        memSize = memory_end - memory_start;
++___clear_user1:
++	st.b	r2, 0, ZERO		/* Fault address */
++	addi	r2, 1, r2
++	addi	r3, -1, r3		/* No real fixup required */
++	bne     r3, ZERO, tr0
 +
-+	/* Now we set up the mbars so the PCI bus can see the memory of
-+	 * the machine */
-+	if (memSize < (1024 * 1024)) {
-+                printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%lx?\n",
-+		       memSize);
-+                return -EINVAL;
-+        }
++___clear_user_exit:
++	or	r3, ZERO, r2
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+        /* Set LSR 0 */
-+        lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 :
-+		((r2p2(memSize) - 0x100000) | 0x1);
-+        SH5PCI_WRITE(LSR0, lsr0);
 +
-+        /* Set MBAR 0 */
-+        SH5PCI_WRITE(CSR_MBAR0, memory_start);
-+        SH5PCI_WRITE(LAR0, memory_start);
++/*
++ * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
++ *			   int __count)
++ *
++ * Inputs:
++ * (r2)  target address
++ * (r3)  source address
++ * (r4)  maximum size in bytes
++ *
++ * Ouputs:
++ * (*r2) copied data
++ * (r2)  -EFAULT (in case of faulting)
++ *       copied data (otherwise)
++ */
++	.global	__strncpy_from_user
++__strncpy_from_user:
++	pta	___strncpy_from_user1, tr0
++	pta	___strncpy_from_user_done, tr1
++	or	r4, ZERO, r5		/* r5 = original count */
++	beq/u	r4, r63, tr1		/* early exit if r4==0 */
++	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
++	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
 +
-+        SH5PCI_WRITE(CSR_MBAR1,0);
-+        SH5PCI_WRITE(LAR1,0);
-+        SH5PCI_WRITE(LSR1,0);
++___strncpy_from_user1:
++	ld.b	r3, 0, r7		/* Fault address: only in reading */
++	st.b	r2, 0, r7
++	addi	r2, 1, r2
++	addi	r3, 1, r3
++	beq/u	ZERO, r7, tr1
++	addi	r4, -1, r4		/* return real number of copied bytes */
++	bne/l	ZERO, r4, tr0
 +
-+        /* Enable the PCI interrupts on the device */
-+        SH5PCI_WRITE(INTM,  ~0);
-+        SH5PCI_WRITE(AINTM, ~0);
-+        SH5PCI_WRITE(PINTM, ~0);
++___strncpy_from_user_done:
++	sub	r5, r4, r6		/* If done, return copied */
 +
-+	return 0;
-+}
++___strncpy_from_user_exit:
++	or	r6, ZERO, r2
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
-+{
-+	struct pci_dev *dev = bus->self;
-+	int i;
++/*
++ * extern long __strnlen_user(const char *__s, long __n)
++ *
++ * Inputs:
++ * (r2)  source address
++ * (r3)  source size in bytes
++ *
++ * Ouputs:
++ * (r2)  -EFAULT (in case of faulting)
++ *       string length (otherwise)
++ */
++	.global	__strnlen_user
++__strnlen_user:
++	pta	___strnlen_user_set_reply, tr0
++	pta	___strnlen_user1, tr1
++	or	ZERO, ZERO, r5		/* r5 = counter */
++	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
++	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
++	beq	r3, ZERO, tr0
 +
-+	if (dev) {
-+		for (i= 0; i < 3; i++) {
-+			bus->resource[i] =
-+				&dev->resource[PCI_BRIDGE_RESOURCES+i];
-+			bus->resource[i]->name = bus->name;
-+		}
-+		bus->resource[0]->flags |= IORESOURCE_IO;
-+		bus->resource[1]->flags |= IORESOURCE_MEM;
++___strnlen_user1:
++	ldx.b	r2, r5, r7		/* Fault address: only in reading */
++	addi	r3, -1, r3		/* No real fixup */
++	addi	r5, 1, r5
++	beq	r3, ZERO, tr0
++	bne	r7, ZERO, tr1
++! The line below used to be active.  This meant led to a junk byte lying between each pair
++! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
++! via the argv and envp arguments to main, it meant the 'flat' representation visible through
++! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
++!	addi	r5, 1, r5		/* Include '\0' */
 +
-+		/* For now, propagate host limits to the bus;
-+		 * we'll adjust them later. */
-+		bus->resource[0]->end = 64*1024 - 1 ;
-+		bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1;
-+		bus->resource[0]->start = PCIBIOS_MIN_IO;
-+		bus->resource[1]->start = PCIBIOS_MIN_MEM;
++___strnlen_user_set_reply:
++	or	r5, ZERO, r6		/* If done, return counter */
++
++___strnlen_user_exit:
++	or	r6, ZERO, r2
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+		/* Turn off downstream PF memory address range by default */
-+		bus->resource[2]->start = 1024*1024;
-+		bus->resource[2]->end = bus->resource[2]->start - 1;
-+	}
-+}
-diff --git a/arch/sh/drivers/pci/pci-sh5.h b/arch/sh/drivers/pci/pci-sh5.h
-new file mode 100644
-index 0000000..7cff3fc
---- /dev/null
-+++ b/arch/sh/drivers/pci/pci-sh5.h
-@@ -0,0 +1,113 @@
 +/*
-+ * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
++ * extern long __get_user_asm_?(void *val, long addr)
 + *
-+ * May be copied or modified under the terms of the GNU General Public
-+ * License.  See linux/COPYING for more information.
++ * Inputs:
++ * (r2)  dest address
++ * (r3)  source address (in User Space)
 + *
-+ * Definitions for the SH5 PCI hardware.
++ * Ouputs:
++ * (r2)  -EFAULT (faulting)
++ *       0 	 (not faulting)
 + */
-+#ifndef __PCI_SH5_H
-+#define __PCI_SH5_H
++	.global	__get_user_asm_b
++__get_user_asm_b:
++	or	r2, ZERO, r4
++	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 +
-+/* Product ID */
-+#define PCISH5_PID		0x350d
++___get_user_asm_b1:
++	ld.b	r3, 0, r5		/* r5 = data */
++	st.b	r4, 0, r5
++	or	ZERO, ZERO, r2
 +
-+/* vendor ID */
-+#define PCISH5_VID		0x1054
++___get_user_asm_b_exit:
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+/* Configuration types */
-+#define ST_TYPE0                0x00    /* Configuration cycle type 0 */
-+#define ST_TYPE1                0x01    /* Configuration cycle type 1 */
 +
-+/* VCR data */
-+#define PCISH5_VCR_STATUS      0x00
-+#define PCISH5_VCR_VERSION     0x08
++	.global	__get_user_asm_w
++__get_user_asm_w:
++	or	r2, ZERO, r4
++	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 +
-+/*
-+** ICR register offsets and bits
-+*/
-+#define PCISH5_ICR_CR          0x100   /* PCI control register values */
-+#define CR_PBAM                 (1<<12)
-+#define CR_PFCS                 (1<<11)
-+#define CR_FTO                  (1<<10)
-+#define CR_PFE                  (1<<9)
-+#define CR_TBS                  (1<<8)
-+#define CR_SPUE                 (1<<7)
-+#define CR_BMAM                 (1<<6)
-+#define CR_HOST                 (1<<5)
-+#define CR_CLKEN                (1<<4)
-+#define CR_SOCS                 (1<<3)
-+#define CR_IOCS                 (1<<2)
-+#define CR_RSTCTL               (1<<1)
-+#define CR_CFINT                (1<<0)
-+#define CR_LOCK_MASK            0xa5000000
++___get_user_asm_w1:
++	ld.w	r3, 0, r5		/* r5 = data */
++	st.w	r4, 0, r5
++	or	ZERO, ZERO, r2
 +
-+#define PCISH5_ICR_INT         0x114   /* Interrupt registert values     */
-+#define INT_MADIM               (1<<2)
++___get_user_asm_w_exit:
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+#define PCISH5_ICR_LSR0        0X104   /* Local space register values    */
-+#define PCISH5_ICR_LSR1        0X108   /* Local space register values    */
-+#define PCISH5_ICR_LAR0        0x10c   /* Local address register values  */
-+#define PCISH5_ICR_LAR1        0x110   /* Local address register values  */
-+#define PCISH5_ICR_INTM        0x118   /* Interrupt mask register values                         */
-+#define PCISH5_ICR_AIR         0x11c   /* Interrupt error address information register values    */
-+#define PCISH5_ICR_CIR         0x120   /* Interrupt error command information register values    */
-+#define PCISH5_ICR_AINT        0x130   /* Interrupt error arbiter interrupt register values      */
-+#define PCISH5_ICR_AINTM       0x134   /* Interrupt error arbiter interrupt mask register values */
-+#define PCISH5_ICR_BMIR        0x138   /* Interrupt error info register of bus master values     */
-+#define PCISH5_ICR_PAR         0x1c0   /* Pio address register values                            */
-+#define PCISH5_ICR_MBR         0x1c4   /* Memory space bank register values                      */
-+#define PCISH5_ICR_IOBR        0x1c8   /* I/O space bank register values                         */
-+#define PCISH5_ICR_PINT        0x1cc   /* power management interrupt register values             */
-+#define PCISH5_ICR_PINTM       0x1d0   /* power management interrupt mask register values        */
-+#define PCISH5_ICR_MBMR        0x1d8   /* memory space bank mask register values                 */
-+#define PCISH5_ICR_IOBMR       0x1dc   /* I/O space bank mask register values                    */
-+#define PCISH5_ICR_CSCR0       0x210   /* PCI cache snoop control register 0                     */
-+#define PCISH5_ICR_CSCR1       0x214   /* PCI cache snoop control register 1                     */
-+#define PCISH5_ICR_PDR         0x220   /* Pio data register values                               */
 +
-+/* These are configs space registers */
-+#define PCISH5_ICR_CSR_VID     0x000	/* Vendor id                           */
-+#define PCISH5_ICR_CSR_DID     0x002   /* Device id                           */
-+#define PCISH5_ICR_CSR_CMD     0x004   /* Command register                    */
-+#define PCISH5_ICR_CSR_STATUS  0x006   /* Stautus                             */
-+#define PCISH5_ICR_CSR_IBAR0   0x010   /* I/O base address register           */
-+#define PCISH5_ICR_CSR_MBAR0   0x014   /* First  Memory base address register */
-+#define PCISH5_ICR_CSR_MBAR1   0x018   /* Second Memory base address register */
++	.global	__get_user_asm_l
++__get_user_asm_l:
++	or	r2, ZERO, r4
++	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 +
-+/* Base address of registers */
-+#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
-+#define SH5PCI_IO_BASE  (PHYS_PCI_BLOCK + 0x00800000)
-+/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG)    */
++___get_user_asm_l1:
++	ld.l	r3, 0, r5		/* r5 = data */
++	st.l	r4, 0, r5
++	or	ZERO, ZERO, r2
 +
-+extern unsigned long pcicr_virt;
-+/* Register selection macro */
-+#define PCISH5_ICR_REG(x)                ( pcicr_virt + (PCISH5_ICR_##x))
-+/* #define PCISH5_VCR_REG(x)                ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
++___get_user_asm_l_exit:
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+/* Write I/O functions */
-+#define SH5PCI_WRITE(reg,val)        ctrl_outl((u32)(val),PCISH5_ICR_REG(reg))
-+#define SH5PCI_WRITE_SHORT(reg,val)  ctrl_outw((u16)(val),PCISH5_ICR_REG(reg))
-+#define SH5PCI_WRITE_BYTE(reg,val)   ctrl_outb((u8)(val),PCISH5_ICR_REG(reg))
 +
-+/* Read I/O functions */
-+#define SH5PCI_READ(reg)             ctrl_inl(PCISH5_ICR_REG(reg))
-+#define SH5PCI_READ_SHORT(reg)       ctrl_inw(PCISH5_ICR_REG(reg))
-+#define SH5PCI_READ_BYTE(reg)        ctrl_inb(PCISH5_ICR_REG(reg))
++	.global	__get_user_asm_q
++__get_user_asm_q:
++	or	r2, ZERO, r4
++	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 +
-+/* Set PCI config bits */
-+#define SET_CONFIG_BITS(bus,devfn,where)  ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
++___get_user_asm_q1:
++	ld.q	r3, 0, r5		/* r5 = data */
++	st.q	r4, 0, r5
++	or	ZERO, ZERO, r2
 +
-+/* Set PCI command register */
-+#define CONFIG_CMD(bus, devfn, where)            SET_CONFIG_BITS(bus->number,devfn,where)
++___get_user_asm_q_exit:
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+/* Size converters */
-+#define PCISH5_MEM_SIZCONV(x)		  (((x / 0x40000) - 1) << 18)
-+#define PCISH5_IO_SIZCONV(x)		  (((x / 0x40000) - 1) << 18)
++/*
++ * extern long __put_user_asm_?(void *pval, long addr)
++ *
++ * Inputs:
++ * (r2)  kernel pointer to value
++ * (r3)  dest address (in User Space)
++ *
++ * Ouputs:
++ * (r2)  -EFAULT (faulting)
++ *       0 	 (not faulting)
++ */
++	.global	__put_user_asm_b
++__put_user_asm_b:
++	ld.b	r2, 0, r4		/* r4 = data */
++	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 +
-+extern struct pci_ops sh5_pci_ops;
++___put_user_asm_b1:
++	st.b	r3, 0, r4
++	or	ZERO, ZERO, r2
 +
-+/* arch/sh/drivers/pci/pci-sh5.c */
-+int sh5pci_init(unsigned long memStart, unsigned long memSize);
++___put_user_asm_b_exit:
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+#endif /* __PCI_SH5_H */
-diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
-index e516087..7d797f4 100644
---- a/arch/sh/drivers/pci/pci-sh7780.c
-+++ b/arch/sh/drivers/pci/pci-sh7780.c
-@@ -58,6 +58,7 @@ static int __init sh7780_pci_init(void)
- 	id = pci_read_reg(SH7780_PCIVID);
- 	if ((id & 0xffff) == SH7780_VENDOR_ID) {
- 		switch ((id >> 16) & 0xffff) {
-+		case SH7763_DEVICE_ID:
- 		case SH7780_DEVICE_ID:
- 		case SH7781_DEVICE_ID:
- 		case SH7785_DEVICE_ID:
-diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h
-index 1d069a8..97b2c98 100644
---- a/arch/sh/drivers/pci/pci-sh7780.h
-+++ b/arch/sh/drivers/pci/pci-sh7780.h
-@@ -16,6 +16,7 @@
- #define SH7780_VENDOR_ID	0x1912
- #define SH7781_DEVICE_ID	0x0001
- #define SH7780_DEVICE_ID	0x0002
-+#define SH7763_DEVICE_ID	0x0004
- #define SH7785_DEVICE_ID	0x0007
- 
- /* SH7780 Control Registers */
-diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
-index ccaba36..49b435c 100644
---- a/arch/sh/drivers/pci/pci.c
-+++ b/arch/sh/drivers/pci/pci.c
-@@ -71,7 +71,7 @@ subsys_initcall(pcibios_init);
-  *  Called after each bus is probed, but before its children
-  *  are examined.
-  */
--void __devinit pcibios_fixup_bus(struct pci_bus *bus)
-+void __devinit __weak pcibios_fixup_bus(struct pci_bus *bus)
- {
- 	pci_read_bridge_bases(bus);
- }
-diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
-index 4b81d9c..349d833 100644
---- a/arch/sh/kernel/Makefile
-+++ b/arch/sh/kernel/Makefile
-@@ -1,25 +1,5 @@
--#
--# Makefile for the Linux/SuperH kernel.
--#
--
--extra-y	:= head.o init_task.o vmlinux.lds
--
--obj-y	:= debugtraps.o io.o io_generic.o irq.o machvec.o process.o ptrace.o \
--	   semaphore.o setup.o signal.o sys_sh.o syscalls.o \
--	   time.o topology.o traps.o
--
--obj-y				+= cpu/ timers/
--obj-$(CONFIG_VSYSCALL)		+= vsyscall/
--obj-$(CONFIG_SMP)		+= smp.o
--obj-$(CONFIG_CF_ENABLER)	+= cf-enabler.o
--obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
--obj-$(CONFIG_SH_KGDB)		+= kgdb_stub.o kgdb_jmp.o
--obj-$(CONFIG_SH_CPU_FREQ)	+= cpufreq.o
--obj-$(CONFIG_MODULES)		+= sh_ksyms.o module.o
--obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
--obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
--obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
--obj-$(CONFIG_PM)		+= pm.o
--obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
--
--EXTRA_CFLAGS += -Werror
-+ifeq ($(CONFIG_SUPERH32),y)
-+include ${srctree}/arch/sh/kernel/Makefile_32
-+else
-+include ${srctree}/arch/sh/kernel/Makefile_64
-+endif
-diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
-new file mode 100644
-index 0000000..c892898
---- /dev/null
-+++ b/arch/sh/kernel/Makefile_32
-@@ -0,0 +1,26 @@
-+#
-+# Makefile for the Linux/SuperH kernel.
-+#
 +
-+extra-y	:= head_32.o init_task.o vmlinux.lds
++	.global	__put_user_asm_w
++__put_user_asm_w:
++	ld.w	r2, 0, r4		/* r4 = data */
++	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 +
-+obj-y	:= debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
-+	   ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \
-+	   syscalls_32.o time_32.o topology.o traps.o traps_32.o
++___put_user_asm_w1:
++	st.w	r3, 0, r4
++	or	ZERO, ZERO, r2
 +
-+obj-y				+= cpu/ timers/
-+obj-$(CONFIG_VSYSCALL)		+= vsyscall/
-+obj-$(CONFIG_SMP)		+= smp.o
-+obj-$(CONFIG_CF_ENABLER)	+= cf-enabler.o
-+obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
-+obj-$(CONFIG_SH_KGDB)		+= kgdb_stub.o kgdb_jmp.o
-+obj-$(CONFIG_SH_CPU_FREQ)	+= cpufreq.o
-+obj-$(CONFIG_MODULES)		+= sh_ksyms_32.o module.o
-+obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-+obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
-+obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
-+obj-$(CONFIG_PM)		+= pm.o
-+obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
-+obj-$(CONFIG_BINFMT_ELF)	+= dump_task.o
++___put_user_asm_w_exit:
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+EXTRA_CFLAGS += -Werror
-diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
-new file mode 100644
-index 0000000..1ef21cc
---- /dev/null
-+++ b/arch/sh/kernel/Makefile_64
-@@ -0,0 +1,22 @@
-+extra-y	:= head_64.o init_task.o vmlinux.lds
 +
-+obj-y	:= debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
-+	   ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \
-+	   syscalls_64.o time_64.o topology.o traps.o traps_64.o
++	.global	__put_user_asm_l
++__put_user_asm_l:
++	ld.l	r2, 0, r4		/* r4 = data */
++	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 +
-+obj-y				+= cpu/ timers/
-+obj-$(CONFIG_VSYSCALL)		+= vsyscall/
-+obj-$(CONFIG_SMP)		+= smp.o
-+obj-$(CONFIG_CF_ENABLER)	+= cf-enabler.o
-+obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
-+obj-$(CONFIG_SH_KGDB)		+= kgdb_stub.o kgdb_jmp.o
-+obj-$(CONFIG_SH_CPU_FREQ)	+= cpufreq.o
-+obj-$(CONFIG_MODULES)		+= sh_ksyms_64.o module.o
-+obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-+obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
-+obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
-+obj-$(CONFIG_PM)		+= pm.o
-+obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
-+obj-$(CONFIG_BINFMT_ELF)	+= dump_task.o
++___put_user_asm_l1:
++	st.l	r3, 0, r4
++	or	ZERO, ZERO, r2
 +
-+EXTRA_CFLAGS += -Werror
-diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
-index d055a3e..f471d24 100644
---- a/arch/sh/kernel/cpu/Makefile
-+++ b/arch/sh/kernel/cpu/Makefile
-@@ -6,8 +6,14 @@ obj-$(CONFIG_CPU_SH2)		= sh2/
- obj-$(CONFIG_CPU_SH2A)		= sh2a/
- obj-$(CONFIG_CPU_SH3)		= sh3/
- obj-$(CONFIG_CPU_SH4)		= sh4/
-+obj-$(CONFIG_CPU_SH5)		= sh5/
++___put_user_asm_l_exit:
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+# Special cases for family ancestry.
 +
- obj-$(CONFIG_CPU_SH4A)		+= sh4a/
- 
-+# Common interfaces.
++	.global	__put_user_asm_q
++__put_user_asm_q:
++	ld.q	r2, 0, r4		/* r4 = data */
++	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 +
- obj-$(CONFIG_UBC_WAKEUP)	+= ubc.o
- obj-$(CONFIG_SH_ADC)		+= adc.o
- 
-diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
-index c217c4b..80a3132 100644
---- a/arch/sh/kernel/cpu/init.c
-+++ b/arch/sh/kernel/cpu/init.c
-@@ -13,6 +13,7 @@
- #include <linux/init.h>
- #include <linux/kernel.h>
- #include <linux/mm.h>
-+#include <linux/log2.h>
- #include <asm/mmu_context.h>
- #include <asm/processor.h>
- #include <asm/uaccess.h>
-@@ -20,9 +21,12 @@
- #include <asm/system.h>
- #include <asm/cacheflush.h>
- #include <asm/cache.h>
-+#include <asm/elf.h>
- #include <asm/io.h>
--#include <asm/ubc.h>
- #include <asm/smp.h>
-+#ifdef CONFIG_SUPERH32
-+#include <asm/ubc.h>
-+#endif
- 
- /*
-  * Generic wrapper for command line arguments to disable on-chip
-@@ -61,25 +65,12 @@ static void __init speculative_execution_init(void)
- /*
-  * Generic first-level cache init
-  */
--static void __init cache_init(void)
-+#ifdef CONFIG_SUPERH32
-+static void __uses_jump_to_uncached cache_init(void)
- {
- 	unsigned long ccr, flags;
- 
--	/* First setup the rest of the I-cache info */
--	current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
--				      current_cpu_data.icache.linesz;
--
--	current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
--				    current_cpu_data.icache.linesz;
--
--	/* And the D-cache too */
--	current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
--				      current_cpu_data.dcache.linesz;
--
--	current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
--				    current_cpu_data.dcache.linesz;
--
--	jump_to_P2();
-+	jump_to_uncached();
- 	ccr = ctrl_inl(CCR);
- 
- 	/*
-@@ -156,7 +147,31 @@ static void __init cache_init(void)
- #endif
- 
- 	ctrl_outl(flags, CCR);
--	back_to_P1();
-+	back_to_cached();
-+}
-+#else
-+#define cache_init()	do { } while (0)
-+#endif
++___put_user_asm_q1:
++	st.q	r3, 0, r4
++	or	ZERO, ZERO, r2
 +
-+#define CSHAPE(totalsize, linesize, assoc) \
-+	((totalsize & ~0xff) | (linesize << 4) | assoc)
++___put_user_asm_q_exit:
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
-+#define CACHE_DESC_SHAPE(desc)	\
-+	CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
++panic_stash_regs:
++	/* The idea is : when we get an unhandled panic, we dump the registers
++	   to a known memory location, the just sit in a tight loop.
++	   This allows the human to look at the memory region through the GDB
++	   session (assuming the debug module's SHwy initiator isn't locked up
++	   or anything), to hopefully analyze the cause of the panic. */
 +
-+static void detect_cache_shape(void)
-+{
-+	l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
++	/* On entry, former r15 (SP) is in DCR
++	   former r0  is at resvec_saved_area + 0
++	   former r1  is at resvec_saved_area + 8
++	   former tr0 is at resvec_saved_area + 32
++	   DCR is the only register whose value is lost altogether.
++	*/
 +
-+	if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
-+		l1i_cache_shape = l1d_cache_shape;
-+	else
-+		l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
++	movi	0xffffffff80000000, r0 ! phy of dump area
++	ld.q	SP, 0x000, r1	! former r0
++	st.q	r0,  0x000, r1
++	ld.q	SP, 0x008, r1	! former r1
++	st.q	r0,  0x008, r1
++	st.q	r0,  0x010, r2
++	st.q	r0,  0x018, r3
++	st.q	r0,  0x020, r4
++	st.q	r0,  0x028, r5
++	st.q	r0,  0x030, r6
++	st.q	r0,  0x038, r7
++	st.q	r0,  0x040, r8
++	st.q	r0,  0x048, r9
++	st.q	r0,  0x050, r10
++	st.q	r0,  0x058, r11
++	st.q	r0,  0x060, r12
++	st.q	r0,  0x068, r13
++	st.q	r0,  0x070, r14
++	getcon	dcr, r14
++	st.q	r0,  0x078, r14
++	st.q	r0,  0x080, r16
++	st.q	r0,  0x088, r17
++	st.q	r0,  0x090, r18
++	st.q	r0,  0x098, r19
++	st.q	r0,  0x0a0, r20
++	st.q	r0,  0x0a8, r21
++	st.q	r0,  0x0b0, r22
++	st.q	r0,  0x0b8, r23
++	st.q	r0,  0x0c0, r24
++	st.q	r0,  0x0c8, r25
++	st.q	r0,  0x0d0, r26
++	st.q	r0,  0x0d8, r27
++	st.q	r0,  0x0e0, r28
++	st.q	r0,  0x0e8, r29
++	st.q	r0,  0x0f0, r30
++	st.q	r0,  0x0f8, r31
++	st.q	r0,  0x100, r32
++	st.q	r0,  0x108, r33
++	st.q	r0,  0x110, r34
++	st.q	r0,  0x118, r35
++	st.q	r0,  0x120, r36
++	st.q	r0,  0x128, r37
++	st.q	r0,  0x130, r38
++	st.q	r0,  0x138, r39
++	st.q	r0,  0x140, r40
++	st.q	r0,  0x148, r41
++	st.q	r0,  0x150, r42
++	st.q	r0,  0x158, r43
++	st.q	r0,  0x160, r44
++	st.q	r0,  0x168, r45
++	st.q	r0,  0x170, r46
++	st.q	r0,  0x178, r47
++	st.q	r0,  0x180, r48
++	st.q	r0,  0x188, r49
++	st.q	r0,  0x190, r50
++	st.q	r0,  0x198, r51
++	st.q	r0,  0x1a0, r52
++	st.q	r0,  0x1a8, r53
++	st.q	r0,  0x1b0, r54
++	st.q	r0,  0x1b8, r55
++	st.q	r0,  0x1c0, r56
++	st.q	r0,  0x1c8, r57
++	st.q	r0,  0x1d0, r58
++	st.q	r0,  0x1d8, r59
++	st.q	r0,  0x1e0, r60
++	st.q	r0,  0x1e8, r61
++	st.q	r0,  0x1f0, r62
++	st.q	r0,  0x1f8, r63	! bogus, but for consistency's sake...
 +
-+	if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
-+		l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
-+	else
-+		l2_cache_shape = -1; /* No S-cache */
- }
- 
- #ifdef CONFIG_SH_DSP
-@@ -228,14 +243,32 @@ asmlinkage void __cpuinit sh_cpu_init(void)
- 	if (current_cpu_data.type == CPU_SH_NONE)
- 		panic("Unknown CPU");
- 
-+	/* First setup the rest of the I-cache info */
-+	current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
-+				      current_cpu_data.icache.linesz;
++	ld.q	SP, 0x020, r1  ! former tr0
++	st.q	r0,  0x200, r1
++	gettr	tr1, r1
++	st.q	r0,  0x208, r1
++	gettr	tr2, r1
++	st.q	r0,  0x210, r1
++	gettr	tr3, r1
++	st.q	r0,  0x218, r1
++	gettr	tr4, r1
++	st.q	r0,  0x220, r1
++	gettr	tr5, r1
++	st.q	r0,  0x228, r1
++	gettr	tr6, r1
++	st.q	r0,  0x230, r1
++	gettr	tr7, r1
++	st.q	r0,  0x238, r1
 +
-+	current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
-+				    current_cpu_data.icache.linesz;
++	getcon	sr,  r1
++	getcon	ssr,  r2
++	getcon	pssr,  r3
++	getcon	spc,  r4
++	getcon	pspc,  r5
++	getcon	intevt,  r6
++	getcon	expevt,  r7
++	getcon	pexpevt,  r8
++	getcon	tra,  r9
++	getcon	tea,  r10
++	getcon	kcr0, r11
++	getcon	kcr1, r12
++	getcon	vbr,  r13
++	getcon	resvec,  r14
 +
-+	/* And the D-cache too */
-+	current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
-+				      current_cpu_data.dcache.linesz;
++	st.q	r0,  0x240, r1
++	st.q	r0,  0x248, r2
++	st.q	r0,  0x250, r3
++	st.q	r0,  0x258, r4
++	st.q	r0,  0x260, r5
++	st.q	r0,  0x268, r6
++	st.q	r0,  0x270, r7
++	st.q	r0,  0x278, r8
++	st.q	r0,  0x280, r9
++	st.q	r0,  0x288, r10
++	st.q	r0,  0x290, r11
++	st.q	r0,  0x298, r12
++	st.q	r0,  0x2a0, r13
++	st.q	r0,  0x2a8, r14
++
++	getcon	SPC,r2
++	getcon	SSR,r3
++	getcon	EXPEVT,r4
++	/* Prepare to jump to C - physical address */
++	movi	panic_handler-CONFIG_PAGE_OFFSET, r1
++	ori	r1, 1, r1
++	ptabs   r1, tr0
++	getcon	DCR, SP
++	blink	tr0, ZERO
++	nop
++	nop
++	nop
++	nop
 +
-+	current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
-+				    current_cpu_data.dcache.linesz;
 +
- 	/* Init the cache */
- 	cache_init();
- 
--	if (raw_smp_processor_id() == 0)
-+	if (raw_smp_processor_id() == 0) {
- 		shm_align_mask = max_t(unsigned long,
- 				       current_cpu_data.dcache.way_size - 1,
- 				       PAGE_SIZE - 1);
- 
-+		/* Boot CPU sets the cache shape */
-+		detect_cache_shape();
-+	}
 +
- 	/* Disable the FPU */
- 	if (fpu_disabled) {
- 		printk("FPU Disabled\n");
-@@ -273,7 +306,10 @@ asmlinkage void __cpuinit sh_cpu_init(void)
- 	 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB.  So ..
- 	 * we wake it up and hope that all is well.
- 	 */
-+#ifdef CONFIG_SUPERH32
- 	if (raw_smp_processor_id() == 0)
- 		ubc_wakeup();
-+#endif
 +
- 	speculative_execution_init();
- }
-diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
-index 8da8e17..cc1836e 100644
---- a/arch/sh/kernel/cpu/irq/Makefile
-+++ b/arch/sh/kernel/cpu/irq/Makefile
-@@ -1,7 +1,9 @@
- #
- # Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
- #
--obj-y	+= imask.o intc.o
-+obj-y	+= intc.o
- 
-+obj-$(CONFIG_SUPERH32)			+= imask.o
-+obj-$(CONFIG_CPU_SH5)			+= intc-sh5.o
- obj-$(CONFIG_CPU_HAS_IPR_IRQ)		+= ipr.o
- obj-$(CONFIG_CPU_HAS_MASKREG_IRQ)	+= maskreg.o
-diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
-new file mode 100644
-index 0000000..43ee7a9
---- /dev/null
-+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
-@@ -0,0 +1,257 @@
 +/*
-+ * arch/sh/kernel/cpu/irq/intc-sh5.c
++ * --- Signal Handling Section
++ */
++
++/*
++ * extern long long _sa_default_rt_restorer
++ * extern long long _sa_default_restorer
 + *
-+ * Interrupt Controller support for SH5 INTC.
++ *		 or, better,
 + *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003  Paul Mundt
++ * extern void _sa_default_rt_restorer(void)
++ * extern void _sa_default_restorer(void)
 + *
-+ * Per-interrupt selective. IRLM=0 (Fixed priority) is not
-+ * supported being useless without a cascaded interrupt
-+ * controller.
++ * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
++ * from user space. Copied into user space by signal management.
++ * Both must be quad aligned and 2 quad long (4 instructions).
 + *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/io.h>
-+#include <linux/kernel.h>
-+#include <linux/bitops.h>
-+#include <asm/cpu/irq.h>
-+#include <asm/page.h>
-+
-+/*
-+ * Maybe the generic Peripheral block could move to a more
-+ * generic include file. INTC Block will be defined here
-+ * and only here to make INTC self-contained in a single
-+ * file.
 + */
-+#define	INTC_BLOCK_OFFSET	0x01000000
-+
-+/* Base */
-+#define INTC_BASE		PHYS_PERIPHERAL_BLOCK + \
-+				INTC_BLOCK_OFFSET
-+
-+/* Address */
-+#define INTC_ICR_SET		(intc_virt + 0x0)
-+#define INTC_ICR_CLEAR		(intc_virt + 0x8)
-+#define INTC_INTPRI_0		(intc_virt + 0x10)
-+#define INTC_INTSRC_0		(intc_virt + 0x50)
-+#define INTC_INTSRC_1		(intc_virt + 0x58)
-+#define INTC_INTREQ_0		(intc_virt + 0x60)
-+#define INTC_INTREQ_1		(intc_virt + 0x68)
-+#define INTC_INTENB_0		(intc_virt + 0x70)
-+#define INTC_INTENB_1		(intc_virt + 0x78)
-+#define INTC_INTDSB_0		(intc_virt + 0x80)
-+#define INTC_INTDSB_1		(intc_virt + 0x88)
-+
-+#define INTC_ICR_IRLM		0x1
-+#define	INTC_INTPRI_PREGS	8		/* 8 Priority Registers */
-+#define	INTC_INTPRI_PPREG	8		/* 8 Priorities per Register */
++	.balign 8
++	.global sa_default_rt_restorer
++sa_default_rt_restorer:
++	movi	0x10, r9
++	shori	__NR_rt_sigreturn, r9
++	trapa	r9
++	nop
 +
++	.balign 8
++	.global sa_default_restorer
++sa_default_restorer:
++	movi	0x10, r9
++	shori	__NR_sigreturn, r9
++	trapa	r9
++	nop
 +
 +/*
-+ * Mapper between the vector ordinal and the IRQ number
-+ * passed to kernel/device drivers.
++ * --- __ex_table Section
 + */
-+int intc_evt_to_irq[(0xE20/0x20)+1] = {
-+	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x000 - 0x0E0 */
-+	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x100 - 0x1E0 */
-+	 0,  0,  0,  0,  0,  1,  0,  0,	/* 0x200 - 0x2E0 */
-+	 2,  0,  0,  3,  0,  0,  0, -1,	/* 0x300 - 0x3E0 */
-+	32, 33, 34, 35, 36, 37, 38, -1,	/* 0x400 - 0x4E0 */
-+	-1, -1, -1, 63, -1, -1, -1, -1,	/* 0x500 - 0x5E0 */
-+	-1, -1, 18, 19, 20, 21, 22, -1,	/* 0x600 - 0x6E0 */
-+	39, 40, 41, 42, -1, -1, -1, -1,	/* 0x700 - 0x7E0 */
-+	 4,  5,  6,  7, -1, -1, -1, -1,	/* 0x800 - 0x8E0 */
-+	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x900 - 0x9E0 */
-+	12, 13, 14, 15, 16, 17, -1, -1,	/* 0xA00 - 0xAE0 */
-+	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xB00 - 0xBE0 */
-+	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xC00 - 0xCE0 */
-+	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xD00 - 0xDE0 */
-+	-1, -1				/* 0xE00 - 0xE20 */
-+};
 +
 +/*
-+ * Opposite mapper.
++ * User Access Exception Table.
 + */
-+static int IRQ_to_vectorN[NR_INTC_IRQS] = {
-+	0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /*  0- 7 */
-+	  -1,   -1,   -1,   -1, 0x50, 0x51, 0x52, 0x53,	/*  8-15 */
-+	0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36,   -1, /* 16-23 */
-+	  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 24-31 */
-+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38,	/* 32-39 */
-+        0x39, 0x3A, 0x3B,   -1,   -1,   -1,   -1,   -1, /* 40-47 */
-+	  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 48-55 */
-+	  -1,   -1,   -1,   -1,   -1,   -1,   -1, 0x2B, /* 56-63 */
-+
-+};
-+
-+static unsigned long intc_virt;
-+
-+static unsigned int startup_intc_irq(unsigned int irq);
-+static void shutdown_intc_irq(unsigned int irq);
-+static void enable_intc_irq(unsigned int irq);
-+static void disable_intc_irq(unsigned int irq);
-+static void mask_and_ack_intc(unsigned int);
-+static void end_intc_irq(unsigned int irq);
-+
-+static struct hw_interrupt_type intc_irq_type = {
-+	.typename = "INTC",
-+	.startup = startup_intc_irq,
-+	.shutdown = shutdown_intc_irq,
-+	.enable = enable_intc_irq,
-+	.disable = disable_intc_irq,
-+	.ack = mask_and_ack_intc,
-+	.end = end_intc_irq
-+};
-+
-+static int irlm;		/* IRL mode */
-+
-+static unsigned int startup_intc_irq(unsigned int irq)
-+{
-+	enable_intc_irq(irq);
-+	return 0; /* never anything pending */
-+}
-+
-+static void shutdown_intc_irq(unsigned int irq)
-+{
-+	disable_intc_irq(irq);
-+}
-+
-+static void enable_intc_irq(unsigned int irq)
-+{
-+	unsigned long reg;
-+	unsigned long bitmask;
-+
-+	if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
-+		printk("Trying to use straight IRL0-3 with an encoding platform.\n");
-+
-+	if (irq < 32) {
-+		reg = INTC_INTENB_0;
-+		bitmask = 1 << irq;
-+	} else {
-+		reg = INTC_INTENB_1;
-+		bitmask = 1 << (irq - 32);
-+	}
-+
-+	ctrl_outl(bitmask, reg);
-+}
-+
-+static void disable_intc_irq(unsigned int irq)
-+{
-+	unsigned long reg;
-+	unsigned long bitmask;
-+
-+	if (irq < 32) {
-+		reg = INTC_INTDSB_0;
-+		bitmask = 1 << irq;
-+	} else {
-+		reg = INTC_INTDSB_1;
-+		bitmask = 1 << (irq - 32);
-+	}
-+
-+	ctrl_outl(bitmask, reg);
-+}
-+
-+static void mask_and_ack_intc(unsigned int irq)
-+{
-+	disable_intc_irq(irq);
-+}
++	.section	__ex_table,  "a"
 +
-+static void end_intc_irq(unsigned int irq)
-+{
-+	enable_intc_irq(irq);
-+}
++	.global asm_uaccess_start	/* Just a marker */
++asm_uaccess_start:
 +
-+/* For future use, if we ever support IRLM=0) */
-+void make_intc_irq(unsigned int irq)
-+{
-+	disable_irq_nosync(irq);
-+	irq_desc[irq].chip = &intc_irq_type;
-+	disable_intc_irq(irq);
-+}
++	.long	___copy_user1, ___copy_user_exit
++	.long	___copy_user2, ___copy_user_exit
++	.long	___clear_user1, ___clear_user_exit
++	.long	___strncpy_from_user1, ___strncpy_from_user_exit
++	.long	___strnlen_user1, ___strnlen_user_exit
++	.long	___get_user_asm_b1, ___get_user_asm_b_exit
++	.long	___get_user_asm_w1, ___get_user_asm_w_exit
++	.long	___get_user_asm_l1, ___get_user_asm_l_exit
++	.long	___get_user_asm_q1, ___get_user_asm_q_exit
++	.long	___put_user_asm_b1, ___put_user_asm_b_exit
++	.long	___put_user_asm_w1, ___put_user_asm_w_exit
++	.long	___put_user_asm_l1, ___put_user_asm_l_exit
++	.long	___put_user_asm_q1, ___put_user_asm_q_exit
 +
-+#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
-+int intc_irq_describe(char* p, int irq)
-+{
-+	if (irq < NR_INTC_IRQS)
-+		return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
-+	else
-+		return 0;
-+}
-+#endif
++	.global asm_uaccess_end		/* Just a marker */
++asm_uaccess_end:
 +
-+void __init plat_irq_setup(void)
-+{
-+        unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
-+	unsigned long reg;
-+	unsigned long data;
-+	int i;
 +
-+	intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
-+	if (!intc_virt) {
-+		panic("Unable to remap INTC\n");
-+	}
 +
 +
-+	/* Set default: per-line enable/disable, priority driven ack/eoi */
-+	for (i = 0; i < NR_INTC_IRQS; i++) {
-+		if (platform_int_priority[i] != NO_PRIORITY) {
-+			irq_desc[i].chip = &intc_irq_type;
-+		}
-+	}
++/*
++ * --- .text.init Section
++ */
 +
++	.section	.text.init, "ax"
 +
-+	/* Disable all interrupts and set all priorities to 0 to avoid trouble */
-+	ctrl_outl(-1, INTC_INTDSB_0);
-+	ctrl_outl(-1, INTC_INTDSB_1);
++/*
++ * void trap_init (void)
++ *
++ */
++	.global	trap_init
++trap_init:
++	addi	SP, -24, SP			/* Room to save r28/r29/r30 */
++	st.q	SP, 0, r28
++	st.q	SP, 8, r29
++	st.q	SP, 16, r30
 +
-+	for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
-+		ctrl_outl( NO_PRIORITY, reg);
++	/* Set VBR and RESVEC */
++	movi	LVBR_block, r19
++	andi	r19, -4, r19			/* reset MMUOFF + reserved */
++	/* For RESVEC exceptions we force the MMU off, which means we need the
++	   physical address. */
++	movi	LRESVEC_block-CONFIG_PAGE_OFFSET, r20
++	andi	r20, -4, r20			/* reset reserved */
++	ori	r20, 1, r20			/* set MMUOFF */
++	putcon	r19, VBR
++	putcon	r20, RESVEC
 +
++	/* Sanity check */
++	movi	LVBR_block_end, r21
++	andi	r21, -4, r21
++	movi	BLOCK_SIZE, r29			/* r29 = expected size */
++	or	r19, ZERO, r30
++	add	r19, r29, r19
 +
-+	/* Set IRLM */
-+	/* If all the priorities are set to 'no priority', then
-+	 * assume we are using encoded mode.
++	/*
++	 * Ugly, but better loop forever now than crash afterwards.
++	 * We should print a message, but if we touch LVBR or
++	 * LRESVEC blocks we should not be surprised if we get stuck
++	 * in trap_init().
 +	 */
-+	irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
-+		platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
-+
-+	if (irlm == NO_PRIORITY) {
-+		/* IRLM = 0 */
-+		reg = INTC_ICR_CLEAR;
-+		i = IRQ_INTA;
-+		printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
-+	} else {
-+		/* IRLM = 1 */
-+		reg = INTC_ICR_SET;
-+		i = IRQ_IRL0;
-+	}
-+	ctrl_outl(INTC_ICR_IRLM, reg);
-+
-+	/* Set interrupt priorities according to platform description */
-+	for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
-+		data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
-+		if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
-+			/* Upon the 7th, set Priority Register */
-+			ctrl_outl(data, reg);
-+			data = 0;
-+			reg += 8;
-+		}
-+	}
++	pta	trap_init_loop, tr1
++	gettr	tr1, r28			/* r28 = trap_init_loop */
++	sub	r21, r30, r30			/* r30 = actual size */
 +
 +	/*
-+	 * And now let interrupts come in.
-+	 * sti() is not enough, we need to
-+	 * lower priority, too.
-+	 */
-+        __asm__ __volatile__("getcon    " __SR ", %0\n\t"
-+                             "and       %0, %1, %0\n\t"
-+                             "putcon    %0, " __SR "\n\t"
-+                             : "=&r" (__dummy0)
-+                             : "r" (__dummy1));
-+}
-diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c
-index 6ac018c..84806b2 100644
---- a/arch/sh/kernel/cpu/irq/intc.c
-+++ b/arch/sh/kernel/cpu/irq/intc.c
-@@ -335,31 +335,6 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc,
- 	return 0;
- }
- 
--static unsigned int __init intc_prio_value(struct intc_desc *desc,
--					   intc_enum enum_id, int do_grps)
--{
--	struct intc_prio *p = desc->priorities;
--	unsigned int i;
--
--	for (i = 0; p && enum_id && i < desc->nr_priorities; i++) {
--		p = desc->priorities + i;
--
--		if (p->enum_id != enum_id)
--			continue;
--
--		return p->priority;
--	}
--
--	if (do_grps)
--		return intc_prio_value(desc, intc_grp_id(desc, enum_id), 0);
--
--	/* default to the lowest priority possible if no priority is set
--	 * - this needs to be at least 2 for 5-bit priorities on 7780
--	 */
--
--	return 2;
--}
--
- static unsigned int __init intc_mask_data(struct intc_desc *desc,
- 					  struct intc_desc_int *d,
- 					  intc_enum enum_id, int do_grps)
-@@ -518,8 +493,10 @@ static void __init intc_register_irq(struct intc_desc *desc,
- 				      handle_level_irq, "level");
- 	set_irq_chip_data(irq, (void *)data[primary]);
- 
--	/* record the desired priority level */
--	intc_prio_level[irq] = intc_prio_value(desc, enum_id, 1);
-+	/* set priority level
-+	 * - this needs to be at least 2 for 5-bit priorities on 7780
++	 * VBR/RESVEC handlers overlap by being bigger than
++	 * allowed. Very bad. Just loop forever.
++	 * (r28) panic/loop address
++	 * (r29) expected size
++	 * (r30) actual size
 +	 */
-+	intc_prio_level[irq] = 2;
- 
- 	/* enable secondary masking method if present */
- 	if (data[!primary])
-diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
-index ee8f1fe..7a26569 100644
---- a/arch/sh/kernel/cpu/sh2/entry.S
-+++ b/arch/sh/kernel/cpu/sh2/entry.S
-@@ -149,6 +149,14 @@ ENTRY(exception_handler)
- 	mov	#32,r8
- 	cmp/hs	r8,r9
- 	bt	trap_entry	! 64 > vec >= 32  is trap
-+
-+#if defined(CONFIG_SH_FPU)
-+	mov     #13,r8
-+	cmp/eq  r8,r9
-+	bt      10f             ! fpu
-+	nop
-+#endif
++trap_init_loop:
++	bne	r19, r21, tr1
 +
- 	mov.l	4f,r8
- 	mov	r9,r4
- 	shll2	r9
-@@ -158,6 +166,10 @@ ENTRY(exception_handler)
- 	cmp/eq	r9,r8
- 	bf	3f
- 	mov.l	8f,r8		! unhandled exception
-+#if defined(CONFIG_SH_FPU)
-+10:
-+	mov.l	9f, r8		! unhandled exception
-+#endif
- 3:
- 	mov.l	5f,r10
- 	jmp	@r8
-@@ -177,7 +189,10 @@ interrupt_entry:
- 6:	.long	ret_from_irq
- 7:	.long	do_IRQ
- 8:	.long	do_exception_error
--	
-+#ifdef CONFIG_SH_FPU
-+9:	.long	fpu_error_trap_handler
-+#endif
++	/* Now that exception vectors are set up reset SR.BL */
++	getcon 	SR, r22
++	movi	SR_UNBLOCK_EXC, r23
++	and	r22, r23, r22
++	putcon	r22, SR
 +
- trap_entry:
- 	mov	#0x30,r8
- 	cmp/ge	r8,r9		! vector 0x20-0x2f is systemcall
-@@ -250,7 +265,7 @@ ENTRY(sh_bios_handler)
- 1:	.long	gdb_vbr_vector
- #endif /* CONFIG_SH_STANDARD_BIOS */
- 
--ENTRY(address_error_handler)
-+ENTRY(address_error_trap_handler)
- 	mov	r15,r4				! regs
- 	add	#4,r4
- 	mov	#OFF_PC,r0
-diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
-index ec6adc3..b230eb2 100644
---- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
-+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
-@@ -65,7 +65,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, groups,
--			 NULL, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- 
- static struct plat_sci_port sci_platform_data[] = {
- 	{
-diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
-index 965fa25..b279cdc 100644
---- a/arch/sh/kernel/cpu/sh2a/Makefile
-+++ b/arch/sh/kernel/cpu/sh2a/Makefile
-@@ -6,4 +6,8 @@ obj-y	:= common.o probe.o opcode_helper.o
- 
- common-y	+= $(addprefix ../sh2/, ex.o entry.o)
- 
-+obj-$(CONFIG_SH_FPU)	+= fpu.o
++	addi	SP, 24, SP
++	ptabs	LINK, tr0
++	blink	tr0, ZERO
 +
- obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
-+obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
-+obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
-diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
 new file mode 100644
-index 0000000..3feb95a
+index 0000000..30b76a9
 --- /dev/null
-+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
-@@ -0,0 +1,89 @@
++++ b/arch/sh/kernel/cpu/sh5/fpu.c
+@@ -0,0 +1,166 @@
 +/*
-+ * arch/sh/kernel/cpu/sh2a/clock-sh7203.c
-+ *
-+ * SH7203 support for the clock framework
-+ *
-+ *  Copyright (C) 2007 Kieran Bingham (MPC-Data Ltd)
++ * arch/sh/kernel/cpu/sh5/fpu.c
 + *
-+ * Based on clock-sh7263.c
-+ *  Copyright (C) 2006  Yoshinori Sato
++ * Copyright (C) 2001  Manuela Cirronis, Paolo Alberelli
++ * Copyright (C) 2002  STMicroelectronics Limited
++ *   Author : Stuart Menefy
 + *
-+ * Based on clock-sh4.c
-+ *  Copyright (C) 2005  Paul Mundt
++ * Started from SH4 version:
++ *   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + */
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <asm/clock.h>
-+#include <asm/freq.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#include <asm/processor.h>
++#include <asm/user.h>
 +#include <asm/io.h>
 +
-+const static int pll1rate[]={8,12,16,0};
-+const static int pfc_divisors[]={1,2,3,4,6,8,12};
-+#define ifc_divisors pfc_divisors
++/*
++ * Initially load the FPU with signalling NANS.  This bit pattern
++ * has the property that no matter whether considered as single or as
++ * double precision, it still represents a signalling NAN.
++ */
++#define sNAN64		0xFFFFFFFFFFFFFFFFULL
++#define sNAN32		0xFFFFFFFFUL
 +
-+#if (CONFIG_SH_CLK_MD == 0)
-+#define PLL2 (1)
-+#elif (CONFIG_SH_CLK_MD == 1)
-+#define PLL2 (2)
-+#elif (CONFIG_SH_CLK_MD == 2)
-+#define PLL2 (4)
-+#elif (CONFIG_SH_CLK_MD == 3)
-+#define PLL2 (4)
-+#else
-+#error "Illegal Clock Mode!"
-+#endif
++static union sh_fpu_union init_fpuregs = {
++	.hard = {
++		.fp_regs = { [0 ... 63] = sNAN32 },
++		.fpscr = FPSCR_INIT
++	}
++};
 +
-+static void master_clk_init(struct clk *clk)
++void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
 +{
-+	clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ;
-+}
++	asm volatile("fst.p     %0, (0*8), fp0\n\t"
++		     "fst.p     %0, (1*8), fp2\n\t"
++		     "fst.p     %0, (2*8), fp4\n\t"
++		     "fst.p     %0, (3*8), fp6\n\t"
++		     "fst.p     %0, (4*8), fp8\n\t"
++		     "fst.p     %0, (5*8), fp10\n\t"
++		     "fst.p     %0, (6*8), fp12\n\t"
++		     "fst.p     %0, (7*8), fp14\n\t"
++		     "fst.p     %0, (8*8), fp16\n\t"
++		     "fst.p     %0, (9*8), fp18\n\t"
++		     "fst.p     %0, (10*8), fp20\n\t"
++		     "fst.p     %0, (11*8), fp22\n\t"
++		     "fst.p     %0, (12*8), fp24\n\t"
++		     "fst.p     %0, (13*8), fp26\n\t"
++		     "fst.p     %0, (14*8), fp28\n\t"
++		     "fst.p     %0, (15*8), fp30\n\t"
++		     "fst.p     %0, (16*8), fp32\n\t"
++		     "fst.p     %0, (17*8), fp34\n\t"
++		     "fst.p     %0, (18*8), fp36\n\t"
++		     "fst.p     %0, (19*8), fp38\n\t"
++		     "fst.p     %0, (20*8), fp40\n\t"
++		     "fst.p     %0, (21*8), fp42\n\t"
++		     "fst.p     %0, (22*8), fp44\n\t"
++		     "fst.p     %0, (23*8), fp46\n\t"
++		     "fst.p     %0, (24*8), fp48\n\t"
++		     "fst.p     %0, (25*8), fp50\n\t"
++		     "fst.p     %0, (26*8), fp52\n\t"
++		     "fst.p     %0, (27*8), fp54\n\t"
++		     "fst.p     %0, (28*8), fp56\n\t"
++		     "fst.p     %0, (29*8), fp58\n\t"
++		     "fst.p     %0, (30*8), fp60\n\t"
++		     "fst.p     %0, (31*8), fp62\n\t"
 +
-+static struct clk_ops sh7203_master_clk_ops = {
-+	.init		= master_clk_init,
-+};
++		     "fgetscr   fr63\n\t"
++		     "fst.s     %0, (32*8), fr63\n\t"
++		: /* no output */
++		: "r" (&tsk->thread.fpu.hard)
++		: "memory");
++}
 +
-+static void module_clk_recalc(struct clk *clk)
++static inline void
++fpload(struct sh_fpu_hard_struct *fpregs)
 +{
-+	int idx = (ctrl_inw(FREQCR) & 0x0007);
-+	clk->rate = clk->parent->rate / pfc_divisors[idx];
++	asm volatile("fld.p     %0, (0*8), fp0\n\t"
++		     "fld.p     %0, (1*8), fp2\n\t"
++		     "fld.p     %0, (2*8), fp4\n\t"
++		     "fld.p     %0, (3*8), fp6\n\t"
++		     "fld.p     %0, (4*8), fp8\n\t"
++		     "fld.p     %0, (5*8), fp10\n\t"
++		     "fld.p     %0, (6*8), fp12\n\t"
++		     "fld.p     %0, (7*8), fp14\n\t"
++		     "fld.p     %0, (8*8), fp16\n\t"
++		     "fld.p     %0, (9*8), fp18\n\t"
++		     "fld.p     %0, (10*8), fp20\n\t"
++		     "fld.p     %0, (11*8), fp22\n\t"
++		     "fld.p     %0, (12*8), fp24\n\t"
++		     "fld.p     %0, (13*8), fp26\n\t"
++		     "fld.p     %0, (14*8), fp28\n\t"
++		     "fld.p     %0, (15*8), fp30\n\t"
++		     "fld.p     %0, (16*8), fp32\n\t"
++		     "fld.p     %0, (17*8), fp34\n\t"
++		     "fld.p     %0, (18*8), fp36\n\t"
++		     "fld.p     %0, (19*8), fp38\n\t"
++		     "fld.p     %0, (20*8), fp40\n\t"
++		     "fld.p     %0, (21*8), fp42\n\t"
++		     "fld.p     %0, (22*8), fp44\n\t"
++		     "fld.p     %0, (23*8), fp46\n\t"
++		     "fld.p     %0, (24*8), fp48\n\t"
++		     "fld.p     %0, (25*8), fp50\n\t"
++		     "fld.p     %0, (26*8), fp52\n\t"
++		     "fld.p     %0, (27*8), fp54\n\t"
++		     "fld.p     %0, (28*8), fp56\n\t"
++		     "fld.p     %0, (29*8), fp58\n\t"
++		     "fld.p     %0, (30*8), fp60\n\t"
++
++		     "fld.s     %0, (32*8), fr63\n\t"
++		     "fputscr   fr63\n\t"
++
++		     "fld.p     %0, (31*8), fp62\n\t"
++		: /* no output */
++		: "r" (fpregs) );
 +}
 +
-+static struct clk_ops sh7203_module_clk_ops = {
-+	.recalc		= module_clk_recalc,
-+};
++void fpinit(struct sh_fpu_hard_struct *fpregs)
++{
++	*fpregs = init_fpuregs.hard;
++}
 +
-+static void bus_clk_recalc(struct clk *clk)
++asmlinkage void
++do_fpu_error(unsigned long ex, struct pt_regs *regs)
 +{
-+	int idx = (ctrl_inw(FREQCR) & 0x0007);
-+	clk->rate = clk->parent->rate / pfc_divisors[idx-2];
++	struct task_struct *tsk = current;
++
++	regs->pc += 4;
++
++	tsk->thread.trap_no = 11;
++	tsk->thread.error_code = 0;
++	force_sig(SIGFPE, tsk);
 +}
 +
-+static struct clk_ops sh7203_bus_clk_ops = {
-+	.recalc		= bus_clk_recalc,
-+};
 +
-+static void cpu_clk_recalc(struct clk *clk)
++asmlinkage void
++do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
 +{
-+	clk->rate = clk->parent->rate;
-+}
++	void die(const char *str, struct pt_regs *regs, long err);
 +
-+static struct clk_ops sh7203_cpu_clk_ops = {
-+	.recalc		= cpu_clk_recalc,
-+};
++	if (! user_mode(regs))
++		die("FPU used in kernel", regs, ex);
 +
-+static struct clk_ops *sh7203_clk_ops[] = {
-+	&sh7203_master_clk_ops,
-+	&sh7203_module_clk_ops,
-+	&sh7203_bus_clk_ops,
-+	&sh7203_cpu_clk_ops,
-+};
++	regs->sr &= ~SR_FD;
 +
-+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
-+{
-+	if (idx < ARRAY_SIZE(sh7203_clk_ops))
-+		*ops = sh7203_clk_ops[idx];
++	if (last_task_used_math == current)
++		return;
++
++	enable_fpu();
++	if (last_task_used_math != NULL)
++		/* Other processes fpu state, save away */
++		save_fpu(last_task_used_math, regs);
++
++        last_task_used_math = current;
++        if (used_math()) {
++                fpload(&current->thread.fpu.hard);
++        } else {
++		/* First time FPU user.  */
++		fpload(&init_fpuregs.hard);
++                set_used_math();
++        }
++	disable_fpu();
 +}
-diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
+diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
 new file mode 100644
-index 0000000..ff99562
+index 0000000..15d167f
 --- /dev/null
-+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
-@@ -0,0 +1,633 @@
++++ b/arch/sh/kernel/cpu/sh5/probe.c
+@@ -0,0 +1,76 @@
 +/*
-+ * Save/restore floating point context for signal handlers.
++ * arch/sh/kernel/cpu/sh5/probe.c
 + *
-+ * Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
++ * CPU Subtype Probing for SH-5.
++ *
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003 - 2007  Paul Mundt
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
-+ *
-+ * FIXME! These routines can be optimized in big endian case.
 + */
-+#include <linux/sched.h>
-+#include <linux/signal.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/string.h>
 +#include <asm/processor.h>
-+#include <asm/io.h>
-+
-+/* The PR (precision) bit in the FP Status Register must be clear when
-+ * an frchg instruction is executed, otherwise the instruction is undefined.
-+ * Executing frchg with PR set causes a trap on some SH4 implementations.
-+ */
++#include <asm/cache.h>
 +
-+#define FPSCR_RCHG 0x00000000
++int __init detect_cpu_and_cache_system(void)
++{
++	unsigned long long cir;
 +
++	/* Do peeks in real mode to avoid having to set up a mapping for the
++	   WPC registers. On SH5-101 cut2, such a mapping would be exposed to
++	   an address translation erratum which would make it hard to set up
++	   correctly. */
++	cir = peek_real_address_q(0x0d000008);
++	if ((cir & 0xffff) == 0x5103) {
++		boot_cpu_data.type = CPU_SH5_103;
++	} else if (((cir >> 32) & 0xffff) == 0x51e2) {
++		/* CPU.VCR aliased at CIR address on SH5-101 */
++		boot_cpu_data.type = CPU_SH5_101;
++	} else {
++		boot_cpu_data.type = CPU_SH_NONE;
++	}
 +
-+/*
-+ * Save FPU registers onto task structure.
-+ * Assume called with FPU enabled (SR.FD=0).
-+ */
-+void
-+save_fpu(struct task_struct *tsk, struct pt_regs *regs)
-+{
-+	unsigned long dummy;
++	/*
++	 * First, setup some sane values for the I-cache.
++	 */
++	boot_cpu_data.icache.ways		= 4;
++	boot_cpu_data.icache.sets		= 256;
++	boot_cpu_data.icache.linesz		= L1_CACHE_BYTES;
 +
-+	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
-+	enable_fpu();
-+	asm volatile("sts.l	fpul, @-%0\n\t"
-+		     "sts.l	fpscr, @-%0\n\t"
-+		     "fmov.s	fr15, @-%0\n\t"
-+		     "fmov.s	fr14, @-%0\n\t"
-+		     "fmov.s	fr13, @-%0\n\t"
-+		     "fmov.s	fr12, @-%0\n\t"
-+		     "fmov.s	fr11, @-%0\n\t"
-+		     "fmov.s	fr10, @-%0\n\t"
-+		     "fmov.s	fr9, @-%0\n\t"
-+		     "fmov.s	fr8, @-%0\n\t"
-+		     "fmov.s	fr7, @-%0\n\t"
-+		     "fmov.s	fr6, @-%0\n\t"
-+		     "fmov.s	fr5, @-%0\n\t"
-+		     "fmov.s	fr4, @-%0\n\t"
-+		     "fmov.s	fr3, @-%0\n\t"
-+		     "fmov.s	fr2, @-%0\n\t"
-+		     "fmov.s	fr1, @-%0\n\t"
-+		     "fmov.s	fr0, @-%0\n\t"
-+		     "lds	%3, fpscr\n\t"
-+		     : "=r" (dummy)
-+		     : "0" ((char *)(&tsk->thread.fpu.hard.status)),
-+		       "r" (FPSCR_RCHG),
-+		       "r" (FPSCR_INIT)
-+		     : "memory");
++#if 0
++	/*
++	 * FIXME: This can probably be cleaned up a bit as well.. for example,
++	 * do we really need the way shift _and_ the way_step_shift ?? Judging
++	 * by the existing code, I would guess no.. is there any valid reason
++	 * why we need to be tracking this around?
++	 */
++	boot_cpu_data.icache.way_shift		= 13;
++	boot_cpu_data.icache.entry_shift	= 5;
++	boot_cpu_data.icache.set_shift		= 4;
++	boot_cpu_data.icache.way_step_shift	= 16;
++	boot_cpu_data.icache.asid_shift		= 2;
 +
-+	disable_fpu();
-+	release_fpu(regs);
-+}
++	/*
++	 * way offset = cache size / associativity, so just don't factor in
++	 * associativity in the first place..
++	 */
++	boot_cpu_data.icache.way_ofs	= boot_cpu_data.icache.sets *
++					  boot_cpu_data.icache.linesz;
 +
-+static void
-+restore_fpu(struct task_struct *tsk)
-+{
-+	unsigned long dummy;
++	boot_cpu_data.icache.asid_mask		= 0x3fc;
++	boot_cpu_data.icache.idx_mask		= 0x1fe0;
++	boot_cpu_data.icache.epn_mask		= 0xffffe000;
++#endif
 +
-+	enable_fpu();
-+	asm volatile("fmov.s	@%0+, fr0\n\t"
-+		     "fmov.s	@%0+, fr1\n\t"
-+		     "fmov.s	@%0+, fr2\n\t"
-+		     "fmov.s	@%0+, fr3\n\t"
-+		     "fmov.s	@%0+, fr4\n\t"
-+		     "fmov.s	@%0+, fr5\n\t"
-+		     "fmov.s	@%0+, fr6\n\t"
-+		     "fmov.s	@%0+, fr7\n\t"
-+		     "fmov.s	@%0+, fr8\n\t"
-+		     "fmov.s	@%0+, fr9\n\t"
-+		     "fmov.s	@%0+, fr10\n\t"
-+		     "fmov.s	@%0+, fr11\n\t"
-+		     "fmov.s	@%0+, fr12\n\t"
-+		     "fmov.s	@%0+, fr13\n\t"
-+		     "fmov.s	@%0+, fr14\n\t"
-+		     "fmov.s	@%0+, fr15\n\t"
-+		     "lds.l	@%0+, fpscr\n\t"
-+		     "lds.l	@%0+, fpul\n\t"
-+		     : "=r" (dummy)
-+		     : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
-+		     : "memory");
-+	disable_fpu();
-+}
++	boot_cpu_data.icache.flags		= 0;
 +
-+/*
-+ * Load the FPU with signalling NANS.  This bit pattern we're using
-+ * has the property that no matter wether considered as single or as
-+ * double precission represents signaling NANS.
-+ */
++	/* A trivial starting point.. */
++	memcpy(&boot_cpu_data.dcache,
++	       &boot_cpu_data.icache, sizeof(struct cache_info));
 +
-+static void
-+fpu_init(void)
-+{
-+	enable_fpu();
-+	asm volatile("lds	%0, fpul\n\t"
-+		     "fsts	fpul, fr0\n\t"
-+		     "fsts	fpul, fr1\n\t"
-+		     "fsts	fpul, fr2\n\t"
-+		     "fsts	fpul, fr3\n\t"
-+		     "fsts	fpul, fr4\n\t"
-+		     "fsts	fpul, fr5\n\t"
-+		     "fsts	fpul, fr6\n\t"
-+		     "fsts	fpul, fr7\n\t"
-+		     "fsts	fpul, fr8\n\t"
-+		     "fsts	fpul, fr9\n\t"
-+		     "fsts	fpul, fr10\n\t"
-+		     "fsts	fpul, fr11\n\t"
-+		     "fsts	fpul, fr12\n\t"
-+		     "fsts	fpul, fr13\n\t"
-+		     "fsts	fpul, fr14\n\t"
-+		     "fsts	fpul, fr15\n\t"
-+		     "lds	%2, fpscr\n\t"
-+		     : /* no output */
-+		     : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
-+	disable_fpu();
++	return 0;
 +}
-+
+diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
+new file mode 100644
+index 0000000..45c351b
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh5/switchto.S
+@@ -0,0 +1,198 @@
 +/*
-+ *	Emulate arithmetic ops on denormalized number for some FPU insns.
-+ */
++ * arch/sh/kernel/cpu/sh5/switchto.S
++ *
++ * sh64 context switch
++ *
++ * Copyright (C) 2004  Richard Curnow
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++*/
 +
-+/* denormalized float * float */
-+static int denormal_mulf(int hx, int hy)
-+{
-+	unsigned int ix, iy;
-+	unsigned long long m, n;
-+	int exp, w;
++	.section .text..SHmedia32,"ax"
++	.little
 +
-+	ix = hx & 0x7fffffff;
-+	iy = hy & 0x7fffffff;
-+	if (iy < 0x00800000 || ix == 0)
-+		return ((hx ^ hy) & 0x80000000);
++	.balign 32
 +
-+	exp = (iy & 0x7f800000) >> 23;
-+	ix &= 0x007fffff;
-+	iy = (iy & 0x007fffff) | 0x00800000;
-+	m = (unsigned long long)ix * iy;
-+	n = m;
-+	w = -1;
-+	while (n) { n >>= 1; w++; }
++	.type sh64_switch_to, at function
++	.global sh64_switch_to
++	.global __sh64_switch_to_end
++sh64_switch_to:
 +
-+	/* FIXME: use guard bits */
-+	exp += w - 126 - 46;
-+	if (exp > 0)
-+		ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23);
-+	else if (exp + 22 >= 0)
-+		ix = (int) (m >> (w - 22 - exp)) & 0x007fffff;
-+	else
-+		ix = 0;
++/* Incoming args
++   r2 - prev
++   r3 - &prev->thread
++   r4 - next
++   r5 - &next->thread
 +
-+	ix |= (hx ^ hy) & 0x80000000;
-+	return ix;
-+}
++   Outgoing results
++   r2 - last (=prev) : this just stays in r2 throughout
 +
-+/* denormalized double * double */
-+static void mult64(unsigned long long x, unsigned long long y,
-+		unsigned long long *highp, unsigned long long *lowp)
-+{
-+	unsigned long long sub0, sub1, sub2, sub3;
-+	unsigned long long high, low;
++   Want to create a full (struct pt_regs) on the stack to allow backtracing
++   functions to work.  However, we only need to populate the callee-save
++   register slots in this structure; since we're a function our ancestors must
++   have themselves preserved all caller saved state in the stack.  This saves
++   some wasted effort since we won't need to look at the values.
 +
-+	sub0 = (x >> 32) * (unsigned long) (y >> 32);
-+	sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32);
-+	sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL);
-+	sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL);
-+	low = sub3;
-+	high = 0LL;
-+	sub3 += (sub1 << 32);
-+	if (low > sub3)
-+		high++;
-+	low = sub3;
-+	sub3 += (sub2 << 32);
-+	if (low > sub3)
-+		high++;
-+	low = sub3;
-+	high += (sub1 >> 32) + (sub2 >> 32);
-+	high += sub0;
-+	*lowp = low;
-+	*highp = high;
-+}
++   In particular, all caller-save registers are immediately available for
++   scratch use.
 +
-+static inline long long rshift64(unsigned long long mh,
-+		unsigned long long ml, int n)
-+{
-+	if (n >= 64)
-+		return mh >> (n - 64);
-+	return (mh << (64 - n)) | (ml >> n);
-+}
++*/
 +
-+static long long denormal_muld(long long hx, long long hy)
-+{
-+	unsigned long long ix, iy;
-+	unsigned long long mh, ml, nh, nl;
-+	int exp, w;
++#define FRAME_SIZE (76*8 + 8)
 +
-+	ix = hx & 0x7fffffffffffffffLL;
-+	iy = hy & 0x7fffffffffffffffLL;
-+	if (iy < 0x0010000000000000LL || ix == 0)
-+		return ((hx ^ hy) & 0x8000000000000000LL);
++	movi	FRAME_SIZE, r0
++	sub.l	r15, r0, r15
++	! Do normal-style register save to support backtrace
 +
-+	exp = (iy & 0x7ff0000000000000LL) >> 52;
-+	ix &= 0x000fffffffffffffLL;
-+	iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL;
-+	mult64(ix, iy, &mh, &ml);
-+	nh = mh;
-+	nl = ml;
-+	w = -1;
-+	if (nh) {
-+		while (nh) { nh >>= 1; w++;}
-+		w += 64;
-+	} else
-+		while (nl) { nl >>= 1; w++;}
++	st.l	r15,   0, r18	! save link reg
++	st.l	r15,   4, r14	! save fp
++	add.l	r15, r63, r14	! setup frame pointer
 +
-+	/* FIXME: use guard bits */
-+	exp += w - 1022 - 52 * 2;
-+	if (exp > 0)
-+		ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL)
-+			| ((long long)exp << 52);
-+	else if (exp + 51 >= 0)
-+		ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL;
-+	else
-+		ix = 0;
++	! hopefully this looks normal to the backtrace now.
 +
-+	ix |= (hx ^ hy) & 0x8000000000000000LL;
-+	return ix;
-+}
++	addi.l	r15,   8, r1    ! base of pt_regs
++	addi.l	r1,   24, r0    ! base of pt_regs.regs
++	addi.l	r0, (63*8), r8	! base of pt_regs.trregs
 +
-+/* ix - iy where iy: denormal and ix, iy >= 0 */
-+static int denormal_subf1(unsigned int ix, unsigned int iy)
-+{
-+	int frac;
-+	int exp;
++	/* Note : to be fixed?
++	   struct pt_regs is really designed for holding the state on entry
++	   to an exception, i.e. pc,sr,regs etc.  However, for the context
++	   switch state, some of this is not required.  But the unwinder takes
++	   struct pt_regs * as an arg so we have to build this structure
++	   to allow unwinding switched tasks in show_state() */
 +
-+	if (ix < 0x00800000)
-+		return ix - iy;
++	st.q	r0, ( 9*8), r9
++	st.q	r0, (10*8), r10
++	st.q	r0, (11*8), r11
++	st.q	r0, (12*8), r12
++	st.q	r0, (13*8), r13
++	st.q	r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
++	! the point where the process is left in suspended animation, i.e. current
++	! fp here, not the saved one.
++	st.q	r0, (16*8), r16
 +
-+	exp = (ix & 0x7f800000) >> 23;
-+	if (exp - 1 > 31)
-+		return ix;
-+	iy >>= exp - 1;
-+	if (iy == 0)
-+		return ix;
++	st.q	r0, (24*8), r24
++	st.q	r0, (25*8), r25
++	st.q	r0, (26*8), r26
++	st.q	r0, (27*8), r27
++	st.q	r0, (28*8), r28
++	st.q	r0, (29*8), r29
++	st.q	r0, (30*8), r30
++	st.q	r0, (31*8), r31
++	st.q	r0, (32*8), r32
++	st.q	r0, (33*8), r33
++	st.q	r0, (34*8), r34
++	st.q	r0, (35*8), r35
 +
-+	frac = (ix & 0x007fffff) | 0x00800000;
-+	frac -= iy;
-+	while (frac < 0x00800000) {
-+		if (--exp == 0)
-+			return frac;
-+		frac <<= 1;
-+	}
++	st.q	r0, (44*8), r44
++	st.q	r0, (45*8), r45
++	st.q	r0, (46*8), r46
++	st.q	r0, (47*8), r47
++	st.q	r0, (48*8), r48
++	st.q	r0, (49*8), r49
++	st.q	r0, (50*8), r50
++	st.q	r0, (51*8), r51
++	st.q	r0, (52*8), r52
++	st.q	r0, (53*8), r53
++	st.q	r0, (54*8), r54
++	st.q	r0, (55*8), r55
++	st.q	r0, (56*8), r56
++	st.q	r0, (57*8), r57
++	st.q	r0, (58*8), r58
++	st.q	r0, (59*8), r59
 +
-+	return (exp << 23) | (frac & 0x007fffff);
-+}
++	! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
++	! Use a local label to avoid creating a symbol that will confuse the !
++	! backtrace
++	pta	.Lsave_pc, tr0
 +
-+/* ix + iy where iy: denormal and ix, iy >= 0 */
-+static int denormal_addf1(unsigned int ix, unsigned int iy)
-+{
-+	int frac;
-+	int exp;
++	gettr	tr5, r45
++	gettr	tr6, r46
++	gettr	tr7, r47
++	st.q	r8, (5*8), r45
++	st.q	r8, (6*8), r46
++	st.q	r8, (7*8), r47
 +
-+	if (ix < 0x00800000)
-+		return ix + iy;
++	! Now switch context
++	gettr	tr0, r9
++	st.l	r3, 0, r15	! prev->thread.sp
++	st.l	r3, 8, r1	! prev->thread.kregs
++	st.l	r3, 4, r9	! prev->thread.pc
++	st.q	r1, 0, r9	! save prev->thread.pc into pt_regs->pc
 +
-+	exp = (ix & 0x7f800000) >> 23;
-+	if (exp - 1 > 31)
-+		return ix;
-+	iy >>= exp - 1;
-+	if (iy == 0)
-+	  return ix;
++	! Load PC for next task (init value or save_pc later)
++	ld.l	r5, 4, r18	! next->thread.pc
++	! Switch stacks
++	ld.l	r5, 0, r15	! next->thread.sp
++	ptabs	r18, tr0
 +
-+	frac = (ix & 0x007fffff) | 0x00800000;
-+	frac += iy;
-+	if (frac >= 0x01000000) {
-+		frac >>= 1;
-+		++exp;
-+	}
++	! Update current
++	ld.l	r4, 4, r9	! next->thread_info (2nd element of next task_struct)
++	putcon	r9, kcr0	! current = next->thread_info
 +
-+	return (exp << 23) | (frac & 0x007fffff);
-+}
++	! go to save_pc for a reschedule, or the initial thread.pc for a new process
++	blink	tr0, r63
 +
-+static int denormal_addf(int hx, int hy)
-+{
-+	unsigned int ix, iy;
-+	int sign;
++	! Restore (when we come back to a previously saved task)
++.Lsave_pc:
++	addi.l	r15, 32, r0	! r0 = next's regs
++	addi.l	r0, (63*8), r8	! r8 = next's tr_regs
 +
-+	if ((hx ^ hy) & 0x80000000) {
-+		sign = hx & 0x80000000;
-+		ix = hx & 0x7fffffff;
-+		iy = hy & 0x7fffffff;
-+		if (iy < 0x00800000) {
-+			ix = denormal_subf1(ix, iy);
-+			if (ix < 0) {
-+				ix = -ix;
-+				sign ^= 0x80000000;
-+			}
-+		} else {
-+			ix = denormal_subf1(iy, ix);
-+			sign ^= 0x80000000;
-+		}
-+	} else {
-+		sign = hx & 0x80000000;
-+		ix = hx & 0x7fffffff;
-+		iy = hy & 0x7fffffff;
-+		if (iy < 0x00800000)
-+			ix = denormal_addf1(ix, iy);
-+		else
-+			ix = denormal_addf1(iy, ix);
-+	}
++	ld.q	r8, (5*8), r45
++	ld.q	r8, (6*8), r46
++	ld.q	r8, (7*8), r47
++	ptabs	r45, tr5
++	ptabs	r46, tr6
++	ptabs	r47, tr7
 +
-+	return sign | ix;
-+}
++	ld.q	r0, ( 9*8), r9
++	ld.q	r0, (10*8), r10
++	ld.q	r0, (11*8), r11
++	ld.q	r0, (12*8), r12
++	ld.q	r0, (13*8), r13
++	ld.q	r0, (14*8), r14
++	ld.q	r0, (16*8), r16
 +
-+/* ix - iy where iy: denormal and ix, iy >= 0 */
-+static long long denormal_subd1(unsigned long long ix, unsigned long long iy)
-+{
-+	long long frac;
-+	int exp;
++	ld.q	r0, (24*8), r24
++	ld.q	r0, (25*8), r25
++	ld.q	r0, (26*8), r26
++	ld.q	r0, (27*8), r27
++	ld.q	r0, (28*8), r28
++	ld.q	r0, (29*8), r29
++	ld.q	r0, (30*8), r30
++	ld.q	r0, (31*8), r31
++	ld.q	r0, (32*8), r32
++	ld.q	r0, (33*8), r33
++	ld.q	r0, (34*8), r34
++	ld.q	r0, (35*8), r35
 +
-+	if (ix < 0x0010000000000000LL)
-+		return ix - iy;
++	ld.q	r0, (44*8), r44
++	ld.q	r0, (45*8), r45
++	ld.q	r0, (46*8), r46
++	ld.q	r0, (47*8), r47
++	ld.q	r0, (48*8), r48
++	ld.q	r0, (49*8), r49
++	ld.q	r0, (50*8), r50
++	ld.q	r0, (51*8), r51
++	ld.q	r0, (52*8), r52
++	ld.q	r0, (53*8), r53
++	ld.q	r0, (54*8), r54
++	ld.q	r0, (55*8), r55
++	ld.q	r0, (56*8), r56
++	ld.q	r0, (57*8), r57
++	ld.q	r0, (58*8), r58
++	ld.q	r0, (59*8), r59
 +
-+	exp = (ix & 0x7ff0000000000000LL) >> 52;
-+	if (exp - 1 > 63)
-+		return ix;
-+	iy >>= exp - 1;
-+	if (iy == 0)
-+		return ix;
++	! epilogue
++	ld.l	r15, 0, r18
++	ld.l	r15, 4, r14
++	ptabs	r18, tr0
++	movi	FRAME_SIZE, r0
++	add	r15, r0, r15
++	blink	tr0, r63
++__sh64_switch_to_end:
++.LFE1:
++	.size	sh64_switch_to,.LFE1-sh64_switch_to
 +
-+	frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
-+	frac -= iy;
-+	while (frac < 0x0010000000000000LL) {
-+		if (--exp == 0)
-+			return frac;
-+		frac <<= 1;
-+	}
+diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
+new file mode 100644
+index 0000000..119c20a
+--- /dev/null
++++ b/arch/sh/kernel/cpu/sh5/unwind.c
+@@ -0,0 +1,326 @@
++/*
++ * arch/sh/kernel/cpu/sh5/unwind.c
++ *
++ * Copyright (C) 2004  Paul Mundt
++ * Copyright (C) 2004  Richard Curnow
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/kallsyms.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <asm/page.h>
++#include <asm/ptrace.h>
++#include <asm/processor.h>
++#include <asm/io.h>
 +
-+	return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL);
-+}
++static u8 regcache[63];
 +
-+/* ix + iy where iy: denormal and ix, iy >= 0 */
-+static long long denormal_addd1(unsigned long long ix, unsigned long long iy)
++/*
++ * Finding the previous stack frame isn't horribly straightforward as it is
++ * on some other platforms. In the sh64 case, we don't have "linked" stack
++ * frames, so we need to do a bit of work to determine the previous frame,
++ * and in turn, the previous r14/r18 pair.
++ *
++ * There are generally a few cases which determine where we can find out
++ * the r14/r18 values. In the general case, this can be determined by poking
++ * around the prologue of the symbol PC is in (note that we absolutely must
++ * have frame pointer support as well as the kernel symbol table mapped,
++ * otherwise we can't even get this far).
++ *
++ * In other cases, such as the interrupt/exception path, we can poke around
++ * the sp/fp.
++ *
++ * Notably, this entire approach is somewhat error prone, and in the event
++ * that the previous frame cannot be determined, that's all we can do.
++ * Either way, this still leaves us with a more correct backtrace then what
++ * we would be able to come up with by walking the stack (which is garbage
++ * for anything beyond the first frame).
++ *						-- PFM.
++ */
++static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
++		      unsigned long *pprev_fp, unsigned long *pprev_pc,
++		      struct pt_regs *regs)
 +{
-+	long long frac;
-+	long long exp;
++	const char *sym;
++	char namebuf[128];
++	unsigned long offset;
++	unsigned long prologue = 0;
++	unsigned long fp_displacement = 0;
++	unsigned long fp_prev = 0;
++	unsigned long offset_r14 = 0, offset_r18 = 0;
++	int i, found_prologue_end = 0;
 +
-+	if (ix < 0x0010000000000000LL)
-+		return ix + iy;
++	sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
++	if (!sym)
++		return -EINVAL;
 +
-+	exp = (ix & 0x7ff0000000000000LL) >> 52;
-+	if (exp - 1 > 63)
-+		return ix;
-+	iy >>= exp - 1;
-+	if (iy == 0)
-+	  return ix;
++	prologue = pc - offset;
++	if (!prologue)
++		return -EINVAL;
 +
-+	frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
-+	frac += iy;
-+	if (frac >= 0x0020000000000000LL) {
-+		frac >>= 1;
-+		++exp;
++	/* Validate fp, to avoid risk of dereferencing a bad pointer later.
++	   Assume 128Mb since that's the amount of RAM on a Cayman.  Modify
++	   when there is an SH-5 board with more. */
++	if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
++	    (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
++	    ((fp & 7) != 0)) {
++		return -EINVAL;
 +	}
 +
-+	return (exp << 52) | (frac & 0x000fffffffffffffLL);
-+}
++	/*
++	 * Depth to walk, depth is completely arbitrary.
++	 */
++	for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
++		unsigned long op;
++		u8 major, minor;
++		u8 src, dest, disp;
 +
-+static long long denormal_addd(long long hx, long long hy)
-+{
-+	unsigned long long ix, iy;
-+	long long sign;
++		op = *(unsigned long *)prologue;
 +
-+	if ((hx ^ hy) & 0x8000000000000000LL) {
-+		sign = hx & 0x8000000000000000LL;
-+		ix = hx & 0x7fffffffffffffffLL;
-+		iy = hy & 0x7fffffffffffffffLL;
-+		if (iy < 0x0010000000000000LL) {
-+			ix = denormal_subd1(ix, iy);
-+			if (ix < 0) {
-+				ix = -ix;
-+				sign ^= 0x8000000000000000LL;
-+			}
-+		} else {
-+			ix = denormal_subd1(iy, ix);
-+			sign ^= 0x8000000000000000LL;
-+		}
-+	} else {
-+		sign = hx & 0x8000000000000000LL;
-+		ix = hx & 0x7fffffffffffffffLL;
-+		iy = hy & 0x7fffffffffffffffLL;
-+		if (iy < 0x0010000000000000LL)
-+			ix = denormal_addd1(ix, iy);
-+		else
-+			ix = denormal_addd1(iy, ix);
-+	}
++		major = (op >> 26) & 0x3f;
++		src   = (op >> 20) & 0x3f;
++		minor = (op >> 16) & 0xf;
++		disp  = (op >> 10) & 0x3f;
++		dest  = (op >>  4) & 0x3f;
 +
-+	return sign | ix;
-+}
++		/*
++		 * Stack frame creation happens in a number of ways.. in the
++		 * general case when the stack frame is less than 511 bytes,
++		 * it's generally created by an addi or addi.l:
++		 *
++		 *	addi/addi.l r15, -FRAME_SIZE, r15
++		 *
++		 * in the event that the frame size is bigger than this, it's
++		 * typically created using a movi/sub pair as follows:
++		 *
++		 *	movi	FRAME_SIZE, rX
++		 *	sub	r15, rX, r15
++		 */
 +
-+/**
-+ *	denormal_to_double - Given denormalized float number,
-+ *	                     store double float
-+ *
-+ *	@fpu: Pointer to sh_fpu_hard structure
-+ *	@n: Index to FP register
-+ */
-+static void
-+denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
-+{
-+	unsigned long du, dl;
-+	unsigned long x = fpu->fpul;
-+	int exp = 1023 - 126;
++		switch (major) {
++		case (0x00 >> 2):
++			switch (minor) {
++			case 0x8: /* add.l */
++			case 0x9: /* add */
++				/* Look for r15, r63, r14 */
++				if (src == 15 && disp == 63 && dest == 14)
++					found_prologue_end = 1;
 +
-+	if (x != 0 && (x & 0x7f800000) == 0) {
-+		du = (x & 0x80000000);
-+		while ((x & 0x00800000) == 0) {
-+			x <<= 1;
-+			exp--;
-+		}
-+		x &= 0x007fffff;
-+		du |= (exp << 20) | (x >> 3);
-+		dl = x << 29;
++				break;
++			case 0xa: /* sub.l */
++			case 0xb: /* sub */
++				if (src != 15 || dest != 15)
++					continue;
 +
-+		fpu->fp_regs[n] = du;
-+		fpu->fp_regs[n+1] = dl;
-+	}
-+}
++				fp_displacement -= regcache[disp];
++				fp_prev = fp - fp_displacement;
++				break;
++			}
++			break;
++		case (0xa8 >> 2): /* st.l */
++			if (src != 15)
++				continue;
 +
-+/**
-+ *	ieee_fpe_handler - Handle denormalized number exception
-+ *
-+ *	@regs: Pointer to register structure
-+ *
-+ *	Returns 1 when it's handled (should not cause exception).
-+ */
-+static int
-+ieee_fpe_handler (struct pt_regs *regs)
-+{
-+	unsigned short insn = *(unsigned short *) regs->pc;
-+	unsigned short finsn;
-+	unsigned long nextpc;
-+	int nib[4] = {
-+		(insn >> 12) & 0xf,
-+		(insn >> 8) & 0xf,
-+		(insn >> 4) & 0xf,
-+		insn & 0xf};
++			switch (dest) {
++			case 14:
++				if (offset_r14 || fp_displacement == 0)
++					continue;
 +
-+	if (nib[0] == 0xb ||
-+	    (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
-+		regs->pr = regs->pc + 4;
-+	if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
-+		nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
-+		finsn = *(unsigned short *) (regs->pc + 2);
-+	} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
-+		if (regs->sr & 1)
-+			nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
-+		else
-+			nextpc = regs->pc + 4;
-+		finsn = *(unsigned short *) (regs->pc + 2);
-+	} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
-+		if (regs->sr & 1)
-+			nextpc = regs->pc + 4;
-+		else
-+			nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
-+		finsn = *(unsigned short *) (regs->pc + 2);
-+	} else if (nib[0] == 0x4 && nib[3] == 0xb &&
-+		 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
-+		nextpc = regs->regs[nib[1]];
-+		finsn = *(unsigned short *) (regs->pc + 2);
-+	} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
-+		 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
-+		nextpc = regs->pc + 4 + regs->regs[nib[1]];
-+		finsn = *(unsigned short *) (regs->pc + 2);
-+	} else if (insn == 0x000b) { /* rts */
-+		nextpc = regs->pr;
-+		finsn = *(unsigned short *) (regs->pc + 2);
-+	} else {
-+		nextpc = regs->pc + 2;
-+		finsn = insn;
-+	}
++				offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
++				offset_r14 *= sizeof(unsigned long);
++				offset_r14 += fp_displacement;
++				break;
++			case 18:
++				if (offset_r18 || fp_displacement == 0)
++					continue;
 +
-+#define FPSCR_FPU_ERROR (1 << 17)
++				offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
++				offset_r18 *= sizeof(unsigned long);
++				offset_r18 += fp_displacement;
++				break;
++			}
 +
-+	if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
-+		struct task_struct *tsk = current;
++			break;
++		case (0xcc >> 2): /* movi */
++			if (dest >= 63) {
++				printk(KERN_NOTICE "%s: Invalid dest reg %d "
++				       "specified in movi handler. Failed "
++				       "opcode was 0x%lx: ", __FUNCTION__,
++				       dest, op);
 +
-+		if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) {
-+			/* FPU error */
-+			denormal_to_double (&tsk->thread.fpu.hard,
-+					    (finsn >> 8) & 0xf);
-+		} else
-+			return 0;
++				continue;
++			}
 +
-+		regs->pc = nextpc;
-+		return 1;
-+	} else if ((finsn & 0xf00f) == 0xf002) { /* fmul */
-+		struct task_struct *tsk = current;
-+		int fpscr;
-+		int n, m, prec;
-+		unsigned int hx, hy;
++			/* Sign extend */
++			regcache[dest] =
++				((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
++			break;
++		case (0xd0 >> 2): /* addi */
++		case (0xd4 >> 2): /* addi.l */
++			/* Look for r15, -FRAME_SIZE, r15 */
++			if (src != 15 || dest != 15)
++				continue;
 +
-+		n = (finsn >> 8) & 0xf;
-+		m = (finsn >> 4) & 0xf;
-+		hx = tsk->thread.fpu.hard.fp_regs[n];
-+		hy = tsk->thread.fpu.hard.fp_regs[m];
-+		fpscr = tsk->thread.fpu.hard.fpscr;
-+		prec = fpscr & (1 << 19);
++			/* Sign extended frame size.. */
++			fp_displacement +=
++				(u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
++			fp_prev = fp - fp_displacement;
++			break;
++		}
 +
-+		if ((fpscr & FPSCR_FPU_ERROR)
-+		     && (prec && ((hx & 0x7fffffff) < 0x00100000
-+				   || (hy & 0x7fffffff) < 0x00100000))) {
-+			long long llx, lly;
++		if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
++			break;
++	}
 +
-+			/* FPU error because of denormal */
-+			llx = ((long long) hx << 32)
-+			       | tsk->thread.fpu.hard.fp_regs[n+1];
-+			lly = ((long long) hy << 32)
-+			       | tsk->thread.fpu.hard.fp_regs[m+1];
-+			if ((hx & 0x7fffffff) >= 0x00100000)
-+				llx = denormal_muld(lly, llx);
-+			else
-+				llx = denormal_muld(llx, lly);
-+			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-+			tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
-+		} else if ((fpscr & FPSCR_FPU_ERROR)
-+		     && (!prec && ((hx & 0x7fffffff) < 0x00800000
-+				   || (hy & 0x7fffffff) < 0x00800000))) {
-+			/* FPU error because of denormal */
-+			if ((hx & 0x7fffffff) >= 0x00800000)
-+				hx = denormal_mulf(hy, hx);
-+			else
-+				hx = denormal_mulf(hx, hy);
-+			tsk->thread.fpu.hard.fp_regs[n] = hx;
-+		} else
-+			return 0;
++	if (offset_r14 == 0 || fp_prev == 0) {
++		if (!offset_r14)
++			pr_debug("Unable to find r14 offset\n");
++		if (!fp_prev)
++			pr_debug("Unable to find previous fp\n");
 +
-+		regs->pc = nextpc;
-+		return 1;
-+	} else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */
-+		struct task_struct *tsk = current;
-+		int fpscr;
-+		int n, m, prec;
-+		unsigned int hx, hy;
++		return -EINVAL;
++	}
 +
-+		n = (finsn >> 8) & 0xf;
-+		m = (finsn >> 4) & 0xf;
-+		hx = tsk->thread.fpu.hard.fp_regs[n];
-+		hy = tsk->thread.fpu.hard.fp_regs[m];
-+		fpscr = tsk->thread.fpu.hard.fpscr;
-+		prec = fpscr & (1 << 19);
++	/* For innermost leaf function, there might not be a offset_r18 */
++	if (!*pprev_pc && (offset_r18 == 0))
++		return -EINVAL;
 +
-+		if ((fpscr & FPSCR_FPU_ERROR)
-+		     && (prec && ((hx & 0x7fffffff) < 0x00100000
-+				   || (hy & 0x7fffffff) < 0x00100000))) {
-+			long long llx, lly;
++	*pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
 +
-+			/* FPU error because of denormal */
-+			llx = ((long long) hx << 32)
-+			       | tsk->thread.fpu.hard.fp_regs[n+1];
-+			lly = ((long long) hy << 32)
-+			       | tsk->thread.fpu.hard.fp_regs[m+1];
-+			if ((finsn & 0xf00f) == 0xf000)
-+				llx = denormal_addd(llx, lly);
-+			else
-+				llx = denormal_addd(llx, lly ^ (1LL << 63));
-+			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-+			tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
-+		} else if ((fpscr & FPSCR_FPU_ERROR)
-+		     && (!prec && ((hx & 0x7fffffff) < 0x00800000
-+				   || (hy & 0x7fffffff) < 0x00800000))) {
-+			/* FPU error because of denormal */
-+			if ((finsn & 0xf00f) == 0xf000)
-+				hx = denormal_addf(hx, hy);
-+			else
-+				hx = denormal_addf(hx, hy ^ 0x80000000);
-+			tsk->thread.fpu.hard.fp_regs[n] = hx;
-+		} else
-+			return 0;
++	if (offset_r18)
++		*pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
 +
-+		regs->pc = nextpc;
-+		return 1;
-+	}
++	*pprev_pc &= ~1;
 +
 +	return 0;
 +}
 +
-+BUILD_TRAP_HANDLER(fpu_error)
-+{
-+	struct task_struct *tsk = current;
-+	TRAP_HANDLER_DECL;
++/* Don't put this on the stack since we'll want to call sh64_unwind
++ * when we're close to underflowing the stack anyway. */
++static struct pt_regs here_regs;
 +
-+	save_fpu(tsk, regs);
-+	if (ieee_fpe_handler(regs)) {
-+		tsk->thread.fpu.hard.fpscr &=
-+			~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
-+		grab_fpu(regs);
-+		restore_fpu(tsk);
-+		set_tsk_thread_flag(tsk, TIF_USEDFPU);
-+		return;
-+	}
++extern const char syscall_ret;
++extern const char ret_from_syscall;
++extern const char ret_from_exception;
++extern const char ret_from_irq;
 +
-+	force_sig(SIGFPE, tsk);
-+}
++static void sh64_unwind_inner(struct pt_regs *regs);
 +
-+BUILD_TRAP_HANDLER(fpu_state_restore)
++static void unwind_nested (unsigned long pc, unsigned long fp)
 +{
-+	struct task_struct *tsk = current;
-+	TRAP_HANDLER_DECL;
-+
-+	grab_fpu(regs);
-+	if (!user_mode(regs)) {
-+		printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
-+		return;
-+	}
-+
-+	if (used_math()) {
-+		/* Using the FPU again.  */
-+		restore_fpu(tsk);
-+	} else	{
-+		/* First time FPU user.  */
-+		fpu_init();
-+		set_used_math();
++	if ((fp >= __MEMORY_START) &&
++	    ((fp & 7) == 0)) {
++		sh64_unwind_inner((struct pt_regs *) fp);
 +	}
-+	set_tsk_thread_flag(tsk, TIF_USEDFPU);
 +}
-diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
-index 6d02465..6910e26 100644
---- a/arch/sh/kernel/cpu/sh2a/probe.c
-+++ b/arch/sh/kernel/cpu/sh2a/probe.c
-@@ -3,25 +3,36 @@
-  *
-  * CPU Subtype Probing for SH-2A.
-  *
-- * Copyright (C) 2004, 2005 Paul Mundt
-+ * Copyright (C) 2004 - 2007  Paul Mundt
-  *
-  * This file is subject to the terms and conditions of the GNU General Public
-  * License.  See the file "COPYING" in the main directory of this archive
-  * for more details.
-  */
--
- #include <linux/init.h>
- #include <asm/processor.h>
- #include <asm/cache.h>
- 
- int __init detect_cpu_and_cache_system(void)
- {
--	/* Just SH7206 for now .. */
--	boot_cpu_data.type			= CPU_SH7206;
-+	/* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
- 	boot_cpu_data.flags			|= CPU_HAS_OP32;
- 
-+#if defined(CONFIG_CPU_SUBTYPE_SH7203)
-+	boot_cpu_data.type			= CPU_SH7203;
-+	/* SH7203 has an FPU.. */
-+	boot_cpu_data.flags			|= CPU_HAS_FPU;
-+#elif defined(CONFIG_CPU_SUBTYPE_SH7263)
-+	boot_cpu_data.type			= CPU_SH7263;
-+	boot_cpu_data.flags			|= CPU_HAS_FPU;
-+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
-+	boot_cpu_data.type			= CPU_SH7206;
-+	/* While SH7206 has a DSP.. */
-+	boot_cpu_data.flags			|= CPU_HAS_DSP;
-+#endif
-+
- 	boot_cpu_data.dcache.ways		= 4;
--	boot_cpu_data.dcache.way_incr	= (1 << 11);
-+	boot_cpu_data.dcache.way_incr		= (1 << 11);
- 	boot_cpu_data.dcache.sets		= 128;
- 	boot_cpu_data.dcache.entry_shift	= 4;
- 	boot_cpu_data.dcache.linesz		= L1_CACHE_BYTES;
-@@ -37,4 +48,3 @@ int __init detect_cpu_and_cache_system(void)
- 
- 	return 0;
- }
--
-diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
-new file mode 100644
-index 0000000..db6ef5c
---- /dev/null
-+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
-@@ -0,0 +1,319 @@
-+/*
-+ * SH7203 and SH7263 Setup
-+ *
-+ *  Copyright (C) 2007  Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/platform_device.h>
-+#include <linux/init.h>
-+#include <linux/serial.h>
-+#include <asm/sci.h>
-+
-+enum {
-+	UNUSED = 0,
 +
-+	/* interrupt sources */
-+	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
-+	PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
-+	DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI,
-+	DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI,
-+	DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI,
-+	DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI,
-+	USB, LCDC, CMT0, CMT1, BSC, WDT,
-+	MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D,
-+	MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F,
-+	MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U,
-+	MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U,
-+	MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V,
-+	MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V,
-+	ADC_ADI,
-+	IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI,
-+	IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI,
-+	IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI,
-+	IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI, IIC33_TEI,
-+	SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
-+	SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
-+	SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
-+	SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
-+	SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI,
-+	SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI,
-+	SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
++static void sh64_unwind_inner(struct pt_regs *regs)
++{
++	unsigned long pc, fp;
++	int ofs = 0;
++	int first_pass;
 +
-+	/* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
-+	ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF,
-+	ROMDEC_IREADY,
++	pc = regs->pc & ~1;
++	fp = regs->regs[14];
 +
-+	FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
++	first_pass = 1;
++	for (;;) {
++		int cond;
++		unsigned long next_fp, next_pc;
 +
-+	SDHI3, SDHI0, SDHI1,
++		if (pc == ((unsigned long) &syscall_ret & ~1)) {
++			printk("SYSCALL\n");
++			unwind_nested(pc,fp);
++			return;
++		}
 +
-+	RTC_ARM, RTC_PRD, RTC_CUP,
-+	RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, RCAN0_SLE,
-+	RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, RCAN1_SLE,
++		if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
++			printk("SYSCALL (PREEMPTED)\n");
++			unwind_nested(pc,fp);
++			return;
++		}
 +
-+	SRC_OVF, SRC_ODFI, SRC_IDEI, IEBI,
++		/* In this case, the PC is discovered by lookup_prev_stack_frame but
++		   it has 4 taken off it to look like the 'caller' */
++		if (pc == ((unsigned long) &ret_from_exception & ~1)) {
++			printk("EXCEPTION\n");
++			unwind_nested(pc,fp);
++			return;
++		}
 +
-+	/* interrupt groups */
-+	PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
-+	MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
-+	MTU3_ABCD, MTU4_ABCD,
-+	IIC30, IIC31, IIC32, IIC33, SCIF0, SCIF1, SCIF2, SCIF3,
-+	SSU0, SSU1, ROMDEC, SDHI, FLCTL, RTC, RCAN0, RCAN1, SRC
-+};
++		if (pc == ((unsigned long) &ret_from_irq & ~1)) {
++			printk("IRQ\n");
++			unwind_nested(pc,fp);
++			return;
++		}
 +
-+static struct intc_vect vectors[] __initdata = {
-+	INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
-+	INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
-+	INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
-+	INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
-+	INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
-+	INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
-+	INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
-+	INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
-+	INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109),
-+	INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113),
-+	INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117),
-+	INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121),
-+	INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125),
-+	INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129),
-+	INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133),
-+	INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137),
-+	INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
-+	INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
-+	INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
-+	INTC_IRQ(MTU2_TGI0A, 146), INTC_IRQ(MTU2_TGI0B, 147),
-+	INTC_IRQ(MTU2_TGI0C, 148), INTC_IRQ(MTU2_TGI0D, 149),
-+	INTC_IRQ(MTU2_TCI0V, 150),
-+	INTC_IRQ(MTU2_TGI0E, 151), INTC_IRQ(MTU2_TGI0F, 152),
-+	INTC_IRQ(MTU2_TGI1A, 153), INTC_IRQ(MTU2_TGI1B, 154),
-+	INTC_IRQ(MTU2_TCI1V, 155), INTC_IRQ(MTU2_TCI1U, 156),
-+	INTC_IRQ(MTU2_TGI2A, 157), INTC_IRQ(MTU2_TGI2B, 158),
-+	INTC_IRQ(MTU2_TCI2V, 159), INTC_IRQ(MTU2_TCI2U, 160),
-+	INTC_IRQ(MTU2_TGI3A, 161), INTC_IRQ(MTU2_TGI3B, 162),
-+	INTC_IRQ(MTU2_TGI3C, 163), INTC_IRQ(MTU2_TGI3D, 164),
-+	INTC_IRQ(MTU2_TCI3V, 165),
-+	INTC_IRQ(MTU2_TGI4A, 166), INTC_IRQ(MTU2_TGI4B, 167),
-+	INTC_IRQ(MTU2_TGI4C, 168), INTC_IRQ(MTU2_TGI4D, 169),
-+	INTC_IRQ(MTU2_TCI4V, 170),
-+	INTC_IRQ(ADC_ADI, 171),
-+	INTC_IRQ(IIC30_STPI, 172), INTC_IRQ(IIC30_NAKI, 173),
-+	INTC_IRQ(IIC30_RXI, 174), INTC_IRQ(IIC30_TXI, 175),
-+	INTC_IRQ(IIC30_TEI, 176),
-+	INTC_IRQ(IIC31_STPI, 177), INTC_IRQ(IIC31_NAKI, 178),
-+	INTC_IRQ(IIC31_RXI, 179), INTC_IRQ(IIC31_TXI, 180),
-+	INTC_IRQ(IIC31_TEI, 181),
-+	INTC_IRQ(IIC32_STPI, 182), INTC_IRQ(IIC32_NAKI, 183),
-+	INTC_IRQ(IIC32_RXI, 184), INTC_IRQ(IIC32_TXI, 185),
-+	INTC_IRQ(IIC32_TEI, 186),
-+	INTC_IRQ(IIC33_STPI, 187), INTC_IRQ(IIC33_NAKI, 188),
-+	INTC_IRQ(IIC33_RXI, 189), INTC_IRQ(IIC33_TXI, 190),
-+	INTC_IRQ(IIC33_TEI, 191),
-+	INTC_IRQ(SCIF0_BRI, 192), INTC_IRQ(SCIF0_ERI, 193),
-+	INTC_IRQ(SCIF0_RXI, 194), INTC_IRQ(SCIF0_TXI, 195),
-+	INTC_IRQ(SCIF1_BRI, 196), INTC_IRQ(SCIF1_ERI, 197),
-+	INTC_IRQ(SCIF1_RXI, 198), INTC_IRQ(SCIF1_TXI, 199),
-+	INTC_IRQ(SCIF2_BRI, 200), INTC_IRQ(SCIF2_ERI, 201),
-+	INTC_IRQ(SCIF2_RXI, 202), INTC_IRQ(SCIF2_TXI, 203),
-+	INTC_IRQ(SCIF3_BRI, 204), INTC_IRQ(SCIF3_ERI, 205),
-+	INTC_IRQ(SCIF3_RXI, 206), INTC_IRQ(SCIF3_TXI, 207),
-+	INTC_IRQ(SSU0_SSERI, 208), INTC_IRQ(SSU0_SSRXI, 209),
-+	INTC_IRQ(SSU0_SSTXI, 210),
-+	INTC_IRQ(SSU1_SSERI, 211), INTC_IRQ(SSU1_SSRXI, 212),
-+	INTC_IRQ(SSU1_SSTXI, 213),
-+	INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
-+	INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
-+	INTC_IRQ(FLCTL_FLSTEI, 224), INTC_IRQ(FLCTL_FLTENDI, 225),
-+	INTC_IRQ(FLCTL_FLTREQ0I, 226), INTC_IRQ(FLCTL_FLTREQ1I, 227),
-+	INTC_IRQ(RTC_ARM, 231), INTC_IRQ(RTC_PRD, 232),
-+	INTC_IRQ(RTC_CUP, 233),
-+	INTC_IRQ(RCAN0_ERS, 234), INTC_IRQ(RCAN0_OVR, 235),
-+	INTC_IRQ(RCAN0_RM0, 236), INTC_IRQ(RCAN0_RM1, 237),
-+	INTC_IRQ(RCAN0_SLE, 238),
-+	INTC_IRQ(RCAN1_ERS, 239), INTC_IRQ(RCAN1_OVR, 240),
-+	INTC_IRQ(RCAN1_RM0, 241), INTC_IRQ(RCAN1_RM1, 242),
-+	INTC_IRQ(RCAN1_SLE, 243),
++		cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
++			((pc & 3) == 0) && ((fp & 7) == 0));
 +
-+	/* SH7263-specific trash */
-+#ifdef CONFIG_CPU_SUBTYPE_SH7263
-+	INTC_IRQ(ROMDEC_ISY, 218), INTC_IRQ(ROMDEC_IERR, 219),
-+	INTC_IRQ(ROMDEC_IARG, 220), INTC_IRQ(ROMDEC_ISEC, 221),
-+	INTC_IRQ(ROMDEC_IBUF, 222), INTC_IRQ(ROMDEC_IREADY, 223),
++		pc -= ofs;
 +
-+	INTC_IRQ(SDHI3, 228), INTC_IRQ(SDHI0, 229), INTC_IRQ(SDHI1, 230),
++		printk("[<%08lx>] ", pc);
++		print_symbol("%s\n", pc);
 +
-+	INTC_IRQ(SRC_OVF, 244), INTC_IRQ(SRC_ODFI, 245),
-+	INTC_IRQ(SRC_IDEI, 246),
++		if (first_pass) {
++			/* If the innermost frame is a leaf function, it's
++			 * possible that r18 is never saved out to the stack.
++			 */
++			next_pc = regs->regs[18];
++		} else {
++			next_pc = 0;
++		}
 +
-+	INTC_IRQ(IEBI, 247),
-+#endif
-+};
++		if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
++			ofs = sizeof(unsigned long);
++			pc = next_pc & ~1;
++			fp = next_fp;
++		} else {
++			printk("Unable to lookup previous stack frame\n");
++			break;
++		}
++		first_pass = 0;
++	}
 +
-+static struct intc_group groups[] __initdata = {
-+	INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
-+		   PINT4, PINT5, PINT6, PINT7),
-+	INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI),
-+	INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI),
-+	INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI),
-+	INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI),
-+	INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI),
-+	INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI),
-+	INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI),
-+	INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI),
-+	INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D),
-+	INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F),
-+	INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B),
-+	INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U),
-+	INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B),
-+	INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U),
-+	INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D),
-+	INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D),
-+	INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI,
-+		   IIC30_TEI),
-+	INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI,
-+		   IIC31_TEI),
-+	INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI,
-+		   IIC32_TEI),
-+	INTC_GROUP(IIC33, IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI,
-+		   IIC33_TEI),
-+	INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
-+	INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
-+	INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
-+	INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
-+	INTC_GROUP(SSU0, SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI),
-+	INTC_GROUP(SSU1, SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI),
-+	INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I,
-+		   FLCTL_FLTREQ1I),
-+	INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP),
-+	INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1,
-+		   RCAN0_SLE),
-+	INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1,
-+		   RCAN1_SLE),
++	printk("\n");
 +
-+#ifdef CONFIG_CPU_SUBTYPE_SH7263
-+	INTC_GROUP(ROMDEC, ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG,
-+		   ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY),
-+	INTC_GROUP(SDHI, SDHI3, SDHI0, SDHI1),
-+	INTC_GROUP(SRC, SRC_OVF, SRC_ODFI, SRC_IDEI),
-+#endif
-+};
++}
 +
-+static struct intc_prio_reg prio_registers[] __initdata = {
-+	{ 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
-+	{ 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
-+	{ 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
-+	{ 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
-+	{ 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
-+	{ 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } },
-+	{ 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
-+	{ 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB,
-+					      MTU2_VU } },
-+	{ 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD,
-+					      MTU2_TCI4V } },
-+	{ 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } },
-+	{ 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } },
-+	{ 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } },
-+#ifdef CONFIG_CPU_SUBTYPE_SH7203
-+	{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
-+					      SSI3_SSII, 0 } },
-+	{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } },
-+	{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } },
-+#else
-+	{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
-+					      SSI3_SSII, ROMDEC } },
-+	{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } },
-+	{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } },
-+#endif
-+};
++void sh64_unwind(struct pt_regs *regs)
++{
++	if (!regs) {
++		/*
++		 * Fetch current regs if we have no other saved state to back
++		 * trace from.
++		 */
++		regs = &here_regs;
 +
-+static struct intc_mask_reg mask_registers[] __initdata = {
-+	{ 0xfffe0808, 0, 16, /* PINTER */
-+	  { 0, 0, 0, 0, 0, 0, 0, 0,
-+	    PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
-+};
++		__asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
++		__asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
++		__asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
 +
-+static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
-+			 mask_registers, prio_registers, NULL);
++		__asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
++		__asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
++		__asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
++		__asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
++		__asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
++		__asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
++		__asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
++		__asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
 +
-+static struct plat_sci_port sci_platform_data[] = {
-+	{
-+		.mapbase	= 0xfffe8000,
-+		.flags		= UPF_BOOT_AUTOCONF,
-+		.type		= PORT_SCIF,
-+		.irqs		=  { 193, 194, 195, 192 },
-+	}, {
-+		.mapbase	= 0xfffe8800,
-+		.flags		= UPF_BOOT_AUTOCONF,
-+		.type		= PORT_SCIF,
-+		.irqs		=  { 197, 198, 199, 196 },
-+	}, {
-+		.mapbase	= 0xfffe9000,
-+		.flags		= UPF_BOOT_AUTOCONF,
-+		.type		= PORT_SCIF,
-+		.irqs		=  { 201, 202, 203, 200 },
-+	}, {
-+		.mapbase	= 0xfffe9800,
-+		.flags		= UPF_BOOT_AUTOCONF,
-+		.type		= PORT_SCIF,
-+		.irqs		=  { 205, 206, 207, 204 },
-+	}, {
-+		.flags = 0,
++		__asm__ __volatile__ (
++			"pta 0f, tr0\n\t"
++			"blink tr0, %0\n\t"
++			"0: nop"
++			: "=r" (regs->pc)
++		);
 +	}
-+};
 +
-+static struct platform_device sci_device = {
-+	.name		= "sh-sci",
-+	.id		= -1,
-+	.dev		= {
-+		.platform_data	= sci_platform_data,
-+	},
-+};
++	printk("\nCall Trace:\n");
++	sh64_unwind_inner(regs);
++}
 +
-+static struct resource rtc_resources[] = {
-+	[0] = {
-+		.start	= 0xffff2000,
-+		.end	= 0xffff2000 + 0x58 - 1,
-+		.flags	= IORESOURCE_IO,
-+	},
-+	[1] = {
-+		/* Period IRQ */
-+		.start	= 232,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+	[2] = {
-+		/* Carry IRQ */
-+		.start	= 233,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+	[3] = {
-+		/* Alarm IRQ */
-+		.start	= 231,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+};
+diff --git a/arch/sh/kernel/dump_task.c b/arch/sh/kernel/dump_task.c
+new file mode 100644
+index 0000000..4a8a408
+--- /dev/null
++++ b/arch/sh/kernel/dump_task.c
+@@ -0,0 +1,31 @@
++#include <linux/elfcore.h>
++#include <linux/sched.h>
 +
-+static struct platform_device rtc_device = {
-+	.name		= "sh-rtc",
-+	.id		= -1,
-+	.num_resources	= ARRAY_SIZE(rtc_resources),
-+	.resource	= rtc_resources,
-+};
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++	struct pt_regs ptregs;
 +
-+static struct platform_device *sh7203_devices[] __initdata = {
-+	&sci_device,
-+	&rtc_device,
-+};
++	ptregs = *task_pt_regs(tsk);
++	elf_core_copy_regs(regs, &ptregs);
 +
-+static int __init sh7203_devices_setup(void)
-+{
-+	return platform_add_devices(sh7203_devices,
-+				    ARRAY_SIZE(sh7203_devices));
++	return 1;
 +}
-+__initcall(sh7203_devices_setup);
 +
-+void __init plat_irq_setup(void)
++int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
 +{
-+	register_intc_controller(&intc_desc);
++	int fpvalid = 0;
++
++#if defined(CONFIG_SH_FPU)
++	fpvalid = !!tsk_used_math(tsk);
++	if (fpvalid) {
++		unlazy_fpu(tsk, task_pt_regs(tsk));
++		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
++	}
++#endif
++
++	return fpvalid;
 +}
-diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
-index bd745aa..a564425 100644
---- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
-+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
-@@ -167,7 +167,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
- };
++
+diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
+index 2f30977..957f256 100644
+--- a/arch/sh/kernel/early_printk.c
++++ b/arch/sh/kernel/early_printk.c
+@@ -63,7 +63,8 @@ static struct console bios_console = {
+ #include <linux/serial_core.h>
+ #include "../../../drivers/serial/sh-sci.h"
  
- static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
--			 NULL, mask_registers, prio_registers, NULL);
-+			 mask_registers, prio_registers, NULL);
+-#if defined(CONFIG_CPU_SUBTYPE_SH7720)
++#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
++    defined(CONFIG_CPU_SUBTYPE_SH7721)
+ #define EPK_SCSMR_VALUE 0x000
+ #define EPK_SCBRR_VALUE 0x00C
+ #define EPK_FIFO_SIZE 64
+@@ -117,7 +118,8 @@ static struct console scif_console = {
+ };
  
- static struct plat_sci_port sci_platform_data[] = {
- 	{
-diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
-index 646eb69..3ae4d91 100644
---- a/arch/sh/kernel/cpu/sh3/Makefile
-+++ b/arch/sh/kernel/cpu/sh3/Makefile
-@@ -13,6 +13,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7709)	+= setup-sh770x.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7710)	+= setup-sh7710.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7712)	+= setup-sh7710.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7720)	+= setup-sh7720.o
-+obj-$(CONFIG_CPU_SUBTYPE_SH7721)	+= setup-sh7720.o
+ #if !defined(CONFIG_SH_STANDARD_BIOS)
+-#if defined(CONFIG_CPU_SUBTYPE_SH7720)
++#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
++    defined(CONFIG_CPU_SUBTYPE_SH7721)
+ static void scif_sercon_init(char *s)
+ {
+ 	sci_out(&scif_port, SCSCR, 0x0000);	/* clear TE and RE */
+@@ -208,10 +210,12 @@ static int __init setup_early_printk(char *buf)
+ 	if (!strncmp(buf, "serial", 6)) {
+ 		early_console = &scif_console;
  
- # Primary on-chip clocks (common)
- clock-$(CONFIG_CPU_SH3)			:= clock-sh3.o
-@@ -21,5 +22,6 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7706)	:= clock-sh7706.o
- clock-$(CONFIG_CPU_SUBTYPE_SH7709)	:= clock-sh7709.o
- clock-$(CONFIG_CPU_SUBTYPE_SH7710)	:= clock-sh7710.o
- clock-$(CONFIG_CPU_SUBTYPE_SH7720)	:= clock-sh7710.o
-+clock-$(CONFIG_CPU_SUBTYPE_SH7712)	:= clock-sh7712.o
+-#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720)) && \
+-    !defined(CONFIG_SH_STANDARD_BIOS)
++#if !defined(CONFIG_SH_STANDARD_BIOS)
++#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \
++    defined(CONFIG_CPU_SUBTYPE_SH7721)
+ 		scif_sercon_init(buf + 6);
+ #endif
++#endif
+ 	}
+ #endif
  
- obj-y	+= $(clock-y)
-diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
+diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
+index e0317ed..926b2e7 100644
+--- a/arch/sh/kernel/entry-common.S
++++ b/arch/sh/kernel/entry-common.S
+@@ -176,25 +176,6 @@ work_notifysig:
+ 	jmp	@r1
+ 	 lds	r0, pr
+ work_resched:
+-#if defined(CONFIG_GUSA) && !defined(CONFIG_PREEMPT)
+-	! gUSA handling
+-	mov.l	@(OFF_SP,r15), r0	! get user space stack pointer
+-	mov	r0, r1
+-	shll	r0
+-	bf/s	1f
+-	 shll	r0
+-	bf/s	1f
+-	 mov	#OFF_PC, r0
+-	! 				  SP >= 0xc0000000 : gUSA mark
+-	mov.l	@(r0,r15), r2		! get user space PC (program counter)
+-	mov.l	@(OFF_R0,r15), r3	! end point
+-	cmp/hs	r3, r2			! r2 >= r3? 
+-	bt	1f
+-	add	r3, r1			! rewind point #2
+-	mov.l	r1, @(r0,r15)		! reset PC to rewind point #2
+-	!
+-1:
+-#endif
+ 	mov.l	1f, r1
+ 	jsr	@r1				! schedule
+ 	 nop
+@@ -224,7 +205,7 @@ work_resched:
+ syscall_exit_work:
+ 	! r0: current_thread_info->flags
+ 	! r8: current_thread_info
+-	tst	#_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP, r0
++	tst	#_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT, r0
+ 	bt/s	work_pending
+ 	 tst	#_TIF_NEED_RESCHED, r0
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -234,6 +215,8 @@ syscall_exit_work:
+ #endif
+ 	sti
+ 	! XXX setup arguments...
++	mov	r15, r4
++	mov	#1, r5
+ 	mov.l	4f, r0			! do_syscall_trace
+ 	jsr	@r0
+ 	 nop
+@@ -244,6 +227,8 @@ syscall_exit_work:
+ syscall_trace_entry:
+ 	!                     	Yes it is traced.
+ 	! XXX setup arguments...
++	mov     r15, r4
++	mov     #0, r5
+ 	mov.l	4f, r11		! Call do_syscall_trace which notifies
+ 	jsr	@r11	    	! superior (will chomp R[0-7])
+ 	 nop
+@@ -366,7 +351,7 @@ ENTRY(system_call)
+ 	!
+ 	get_current_thread_info r8, r10
+ 	mov.l	@(TI_FLAGS,r8), r8
+-	mov	#_TIF_SYSCALL_TRACE, r10
++	mov	#(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT), r10
+ 	tst	r10, r8
+ 	bf	syscall_trace_entry
+ 	!
+diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
+deleted file mode 100644
+index 3338239..0000000
+--- a/arch/sh/kernel/head.S
++++ /dev/null
+@@ -1,120 +0,0 @@
+-/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
+- *
+- *  arch/sh/kernel/head.S
+- *
+- *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * Head.S contains the SH exception handlers and startup code.
+- */
+-#include <linux/linkage.h>
+-#include <asm/thread_info.h>
+-
+-#ifdef CONFIG_CPU_SH4A
+-#define SYNCO()		synco
+-
+-#define PREFI(label, reg)	\
+-	mov.l	label, reg;	\
+-	prefi	@reg
+-#else
+-#define SYNCO()
+-#define PREFI(label, reg)
+-#endif
+-
+-	.section	.empty_zero_page, "aw"
+-ENTRY(empty_zero_page)
+-	.long	1		/* MOUNT_ROOT_RDONLY */
+-	.long	0		/* RAMDISK_FLAGS */
+-	.long	0x0200		/* ORIG_ROOT_DEV */
+-	.long	1		/* LOADER_TYPE */
+-	.long	0x00360000	/* INITRD_START */
+-	.long	0x000a0000	/* INITRD_SIZE */
+-	.long	0
+-1:
+-	.skip	PAGE_SIZE - empty_zero_page - 1b
+-
+-	.section	.text.head, "ax"
+-
+-/*
+- * Condition at the entry of _stext:
+- *
+- *   BSC has already been initialized.
+- *   INTC may or may not be initialized.
+- *   VBR may or may not be initialized.
+- *   MMU may or may not be initialized.
+- *   Cache may or may not be initialized.
+- *   Hardware (including on-chip modules) may or may not be initialized. 
+- *
+- */
+-ENTRY(_stext)
+-	!			Initialize Status Register
+-	mov.l	1f, r0		! MD=1, RB=0, BL=0, IMASK=0xF
+-	ldc	r0, sr
+-	!			Initialize global interrupt mask
+-#ifdef CONFIG_CPU_HAS_SR_RB
+-	mov	#0, r0
+-	ldc	r0, r6_bank
+-#endif
+-	
+-	/*
+-	 * Prefetch if possible to reduce cache miss penalty.
+-	 *
+-	 * We do this early on for SH-4A as a micro-optimization,
+-	 * as later on we will have speculative execution enabled
+-	 * and this will become less of an issue.
+-	 */
+-	PREFI(5f, r0)
+-	PREFI(6f, r0)
+-
+-	!
+-	mov.l	2f, r0
+-	mov	r0, r15		! Set initial r15 (stack pointer)
+-#ifdef CONFIG_CPU_HAS_SR_RB
+-	mov.l	7f, r0
+-	ldc	r0, r7_bank	! ... and initial thread_info
+-#endif
+-	
+-	!			Clear BSS area
+-#ifdef CONFIG_SMP	
+-	mov.l	3f, r0
+-	cmp/eq	#0, r0		! skip clear if set to zero
+-	bt	10f
+-#endif
+-	
+-	mov.l	3f, r1
+-	add	#4, r1
+-	mov.l	4f, r2
+-	mov	#0, r0
+-9:	cmp/hs	r2, r1
+-	bf/s	9b		! while (r1 < r2)
+-	 mov.l	r0, at -r2
+-
+-10:		
+-	!			Additional CPU initialization
+-	mov.l	6f, r0
+-	jsr	@r0
+-	 nop
+-
+-	SYNCO()			! Wait for pending instructions..
+-	
+-	!			Start kernel
+-	mov.l	5f, r0
+-	jmp	@r0
+-	 nop
+-
+-	.balign 4
+-#if defined(CONFIG_CPU_SH2)
+-1:	.long	0x000000F0		! IMASK=0xF
+-#else
+-1:	.long	0x400080F0		! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
+-#endif
+-ENTRY(stack_start)
+-2:	.long	init_thread_union+THREAD_SIZE
+-3:	.long	__bss_start
+-4:	.long	_end
+-5:	.long	start_kernel
+-6:	.long	sh_cpu_init
+-7:	.long	init_thread_union
+diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
 new file mode 100644
-index 0000000..54f54df
+index 0000000..d67d7ed
 --- /dev/null
-+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
-@@ -0,0 +1,71 @@
-+/*
-+ * arch/sh/kernel/cpu/sh3/clock-sh7712.c
-+ *
-+ * SH7712 support for the clock framework
++++ b/arch/sh/kernel/head_32.S
+@@ -0,0 +1,124 @@
++/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
 + *
-+ *  Copyright (C) 2007  Andrew Murray <amurray at mpc-data.co.uk>
++ *  arch/sh/kernel/head.S
 + *
-+ * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
-+ *  Copyright (C) 2005  Paul Mundt
++ *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
++ *
++ * Head.S contains the SH exception handlers and startup code.
 + */
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <asm/clock.h>
-+#include <asm/freq.h>
-+#include <asm/io.h>
-+
-+static int multipliers[] = { 1, 2, 3 };
-+static int divisors[]    = { 1, 2, 3, 4, 6 };
-+
-+static void master_clk_init(struct clk *clk)
-+{
-+	int frqcr = ctrl_inw(FRQCR);
-+	int idx = (frqcr & 0x0300) >> 8;
-+
-+	clk->rate *= multipliers[idx];
-+}
-+
-+static struct clk_ops sh7712_master_clk_ops = {
-+	.init		= master_clk_init,
-+};
-+
-+static void module_clk_recalc(struct clk *clk)
-+{
-+	int frqcr = ctrl_inw(FRQCR);
-+	int idx = frqcr & 0x0007;
-+
-+	clk->rate = clk->parent->rate / divisors[idx];
-+}
-+
-+static struct clk_ops sh7712_module_clk_ops = {
-+	.recalc		= module_clk_recalc,
-+};
-+
-+static void cpu_clk_recalc(struct clk *clk)
-+{
-+	int frqcr = ctrl_inw(FRQCR);
-+	int idx = (frqcr & 0x0030) >> 4;
-+
-+	clk->rate = clk->parent->rate / divisors[idx];
-+}
-+
-+static struct clk_ops sh7712_cpu_clk_ops = {
-+	.recalc		= cpu_clk_recalc,
-+};
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
 +
-+static struct clk_ops *sh7712_clk_ops[] = {
-+	&sh7712_master_clk_ops,
-+	&sh7712_module_clk_ops,
-+	&sh7712_cpu_clk_ops,
-+};
++#ifdef CONFIG_CPU_SH4A
++#define SYNCO()		synco
 +
-+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
-+{
-+	if (idx < ARRAY_SIZE(sh7712_clk_ops))
-+		*ops = sh7712_clk_ops[idx];
-+}
++#define PREFI(label, reg)	\
++	mov.l	label, reg;	\
++	prefi	@reg
++#else
++#define SYNCO()
++#define PREFI(label, reg)
++#endif
 +
-diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
-index 0d12a12..4004073 100644
---- a/arch/sh/kernel/cpu/sh3/entry.S
-+++ b/arch/sh/kernel/cpu/sh3/entry.S
-@@ -13,8 +13,9 @@
- #include <linux/linkage.h>
- #include <asm/asm-offsets.h>
- #include <asm/thread_info.h>
--#include <asm/cpu/mmu_context.h>
- #include <asm/unistd.h>
-+#include <asm/cpu/mmu_context.h>
-+#include <asm/page.h>
- 
- ! NOTE:
- ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
-@@ -409,6 +410,27 @@ ENTRY(handle_exception)
- 	! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
- 	! save all registers onto stack.
- 	!
++	.section	.empty_zero_page, "aw"
++ENTRY(empty_zero_page)
++	.long	1		/* MOUNT_ROOT_RDONLY */
++	.long	0		/* RAMDISK_FLAGS */
++	.long	0x0200		/* ORIG_ROOT_DEV */
++	.long	1		/* LOADER_TYPE */
++	.long	0x00360000	/* INITRD_START */
++	.long	0x000a0000	/* INITRD_SIZE */
++#ifdef CONFIG_32BIT
++	.long	0x53453f00 + 32	/* "SE?" = 32 bit */
++#else
++	.long	0x53453f00 + 29	/* "SE?" = 29 bit */
++#endif
++1:
++	.skip	PAGE_SIZE - empty_zero_page - 1b
 +
-+#ifdef CONFIG_GUSA
-+	! Check for roll back gRB (User and Kernel)
-+	mov	r15, k0
-+	shll	k0
-+	bf/s	1f
-+	 shll	k0
-+	bf/s	1f
-+	 stc	spc, k1
-+	stc	r0_bank, k0
-+	cmp/hs	k0, k1		! test k1 (saved PC) >= k0 (saved r0)
-+	bt/s	2f
-+	 stc	r1_bank, k1
++	.section	.text.head, "ax"
 +
-+	add	#-2, k0
-+	add	r15, k0
-+	ldc	k0, spc		! PC = saved r0 + r15 - 2
-+2:	mov	k1, r15		! SP = r1
-+1:
++/*
++ * Condition at the entry of _stext:
++ *
++ *   BSC has already been initialized.
++ *   INTC may or may not be initialized.
++ *   VBR may or may not be initialized.
++ *   MMU may or may not be initialized.
++ *   Cache may or may not be initialized.
++ *   Hardware (including on-chip modules) may or may not be initialized. 
++ *
++ */
++ENTRY(_stext)
++	!			Initialize Status Register
++	mov.l	1f, r0		! MD=1, RB=0, BL=0, IMASK=0xF
++	ldc	r0, sr
++	!			Initialize global interrupt mask
++#ifdef CONFIG_CPU_HAS_SR_RB
++	mov	#0, r0
++	ldc	r0, r6_bank
 +#endif
++	
++	/*
++	 * Prefetch if possible to reduce cache miss penalty.
++	 *
++	 * We do this early on for SH-4A as a micro-optimization,
++	 * as later on we will have speculative execution enabled
++	 * and this will become less of an issue.
++	 */
++	PREFI(5f, r0)
++	PREFI(6f, r0)
 +
- 	stc	ssr, k0		! Is it from kernel space?
- 	shll	k0		! Check MD bit (bit30) by shifting it into...
- 	shll	k0		!       ...the T bit
-diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
-index b6abf38..11b6d9c 100644
---- a/arch/sh/kernel/cpu/sh3/ex.S
-+++ b/arch/sh/kernel/cpu/sh3/ex.S
-@@ -36,7 +36,7 @@ ENTRY(exception_handling_table)
- 	.long	exception_error	! address error store	/* 100 */
- #endif
- #if defined(CONFIG_SH_FPU)
--	.long	do_fpu_error		/* 120 */
-+	.long	fpu_error_trap_handler	/* 120 */
- #else
- 	.long	exception_error		/* 120 */
- #endif
-diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
-index bf579e0..fcc80bb 100644
---- a/arch/sh/kernel/cpu/sh3/probe.c
-+++ b/arch/sh/kernel/cpu/sh3/probe.c
-@@ -16,11 +16,11 @@
- #include <asm/cache.h>
- #include <asm/io.h>
- 
--int __init detect_cpu_and_cache_system(void)
-+int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
- {
- 	unsigned long addr0, addr1, data0, data1, data2, data3;
- 
--	jump_to_P2();
-+	jump_to_uncached();
- 	/*
- 	 * Check if the entry shadows or not.
- 	 * When shadowed, it's 128-entry system.
-@@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void)
- 	ctrl_outl(data0&~SH_CACHE_VALID, addr0);
- 	ctrl_outl(data2&~SH_CACHE_VALID, addr1);
- 
--	back_to_P1();
-+	back_to_cached();
- 
- 	boot_cpu_data.dcache.ways		= 4;
- 	boot_cpu_data.dcache.entry_shift	= 4;
-@@ -84,6 +84,9 @@ int __init detect_cpu_and_cache_system(void)
- #if defined(CONFIG_CPU_SUBTYPE_SH7720)
- 		boot_cpu_data.type = CPU_SH7720;
- #endif
-+#if defined(CONFIG_CPU_SUBTYPE_SH7721)
-+		boot_cpu_data.type = CPU_SH7721;
++	!
++	mov.l	2f, r0
++	mov	r0, r15		! Set initial r15 (stack pointer)
++#ifdef CONFIG_CPU_HAS_SR_RB
++	mov.l	7f, r0
++	ldc	r0, r7_bank	! ... and initial thread_info
 +#endif
- #if defined(CONFIG_CPU_SUBTYPE_SH7705)
- 		boot_cpu_data.type = CPU_SH7705;
- 
-diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
-index f6c65f2..dd0a20a 100644
---- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
-+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
-@@ -66,12 +66,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(DMAC, 7),
--	INTC_PRIO(SCIF2, 3),
--	INTC_PRIO(SCIF0, 3),
--};
--
- static struct intc_prio_reg prio_registers[] __initdata = {
- 	{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
- 	{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
-@@ -85,7 +79,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- 
- static struct intc_vect vectors_irq[] __initdata = {
- 	INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
-@@ -93,7 +87,7 @@ static struct intc_vect vectors_irq[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irq, "sh7705-irq", vectors_irq, NULL,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- 
- static struct plat_sci_port sci_platform_data[] = {
- 	{
-diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
-index 60b04b1..969804b 100644
---- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
-+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
-@@ -81,13 +81,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(DMAC, 7),
--	INTC_PRIO(SCI, 3),
--	INTC_PRIO(SCIF2, 3),
--	INTC_PRIO(SCIF0, 3),
--};
--
- static struct intc_prio_reg prio_registers[] __initdata = {
- 	{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
- 	{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } },
-@@ -109,7 +102,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- 
- #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-     defined(CONFIG_CPU_SUBTYPE_SH7707) || \
-@@ -120,7 +113,7 @@ static struct intc_vect vectors_irq[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irq, "sh770x-irq", vectors_irq, NULL,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- #endif
- 
- static struct resource rtc_resources[] = {
-diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
-index 84e5629..0cc0e2b 100644
---- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
-+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
-@@ -73,18 +73,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(SIOF1, SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(DMAC1, 7),
--	INTC_PRIO(DMAC2, 7),
--	INTC_PRIO(SCIF0, 3),
--	INTC_PRIO(SCIF1, 3),
--	INTC_PRIO(SIOF0, 3),
--	INTC_PRIO(SIOF1, 3),
--	INTC_PRIO(EDMAC0, 5),
--	INTC_PRIO(EDMAC1, 5),
--	INTC_PRIO(EDMAC2, 5),
--};
--
- static struct intc_prio_reg prio_registers[] __initdata = {
- 	{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
- 	{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
-@@ -101,7 +89,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- 
- static struct intc_vect vectors_irq[] __initdata = {
- 	INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
-@@ -109,7 +97,7 @@ static struct intc_vect vectors_irq[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irq, "sh7710-irq", vectors_irq, NULL,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- 
- static struct resource rtc_resources[] = {
- 	[0] =	{
-diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
-index a0929b8..3855ea4 100644
---- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
-+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
-@@ -85,9 +85,62 @@ static struct platform_device sci_device = {
- 	},
- };
- 
-+static struct resource usb_ohci_resources[] = {
-+	[0] = {
-+		.start	= 0xA4428000,
-+		.end	= 0xA44280FF,
-+		.flags	= IORESOURCE_MEM,
-+	},
-+	[1] = {
-+		.start	= 67,
-+		.end	= 67,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+};
-+
-+static u64 usb_ohci_dma_mask = 0xffffffffUL;
-+static struct platform_device usb_ohci_device = {
-+	.name		= "sh_ohci",
-+	.id		= -1,
-+	.dev = {
-+		.dma_mask		= &usb_ohci_dma_mask,
-+		.coherent_dma_mask	= 0xffffffff,
-+	},
-+	.num_resources	= ARRAY_SIZE(usb_ohci_resources),
-+	.resource	= usb_ohci_resources,
-+};
++	
++	!			Clear BSS area
++#ifdef CONFIG_SMP	
++	mov.l	3f, r0
++	cmp/eq	#0, r0		! skip clear if set to zero
++	bt	10f
++#endif
++	
++	mov.l	3f, r1
++	add	#4, r1
++	mov.l	4f, r2
++	mov	#0, r0
++9:	cmp/hs	r2, r1
++	bf/s	9b		! while (r1 < r2)
++	 mov.l	r0, at -r2
 +
-+static struct resource usbf_resources[] = {
-+	[0] = {
-+		.name	= "sh_udc",
-+		.start	= 0xA4420000,
-+		.end	= 0xA44200FF,
-+		.flags	= IORESOURCE_MEM,
-+	},
-+	[1] = {
-+		.name	= "sh_udc",
-+		.start	= 65,
-+		.end	= 65,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+};
++10:		
++	!			Additional CPU initialization
++	mov.l	6f, r0
++	jsr	@r0
++	 nop
 +
-+static struct platform_device usbf_device = {
-+	.name		= "sh_udc",
-+	.id		= -1,
-+	.dev = {
-+		.dma_mask		= NULL,
-+		.coherent_dma_mask	= 0xffffffff,
-+	},
-+	.num_resources	= ARRAY_SIZE(usbf_resources),
-+	.resource	= usbf_resources,
-+};
++	SYNCO()			! Wait for pending instructions..
++	
++	!			Start kernel
++	mov.l	5f, r0
++	jmp	@r0
++	 nop
 +
- static struct platform_device *sh7720_devices[] __initdata = {
- 	&rtc_device,
- 	&sci_device,
-+	&usb_ohci_device,
-+	&usbf_device,
- };
- 
- static int __init sh7720_devices_setup(void)
-@@ -127,8 +180,11 @@ static struct intc_vect vectors[] __initdata = {
- 	INTC_VECT(USBF_SPD, 0x6e0),   INTC_VECT(DMAC1_DEI0, 0x800),
- 	INTC_VECT(DMAC1_DEI1, 0x820), INTC_VECT(DMAC1_DEI2, 0x840),
- 	INTC_VECT(DMAC1_DEI3, 0x860), INTC_VECT(LCDC, 0x900),
--	INTC_VECT(SSL, 0x980),        INTC_VECT(USBFI0, 0xa20),
--	INTC_VECT(USBFI1, 0xa40),     INTC_VECT(USBHI, 0xa60),
-+#if defined(CONFIG_CPU_SUBTYPE_SH7720)
-+	INTC_VECT(SSL, 0x980),
-+#endif
-+	INTC_VECT(USBFI0, 0xa20),     INTC_VECT(USBFI1, 0xa40),
-+	INTC_VECT(USBHI, 0xa60),
- 	INTC_VECT(DMAC2_DEI4, 0xb80), INTC_VECT(DMAC2_DEI5, 0xba0),
- 	INTC_VECT(ADC, 0xbe0),        INTC_VECT(SCIF0, 0xc00),
- 	INTC_VECT(SCIF1, 0xc20),      INTC_VECT(PINT07, 0xc80),
-@@ -153,22 +209,16 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(MMC, MMCI0, MMCI1, MMCI2, MMCI3),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(SCIF0, 2),
--	INTC_PRIO(SCIF1, 2),
--	INTC_PRIO(DMAC1, 1),
--	INTC_PRIO(DMAC2, 1),
--	INTC_PRIO(RTC, 2),
--	INTC_PRIO(TMU, 2),
--	INTC_PRIO(TPU, 2),
--};
--
- static struct intc_prio_reg prio_registers[] __initdata = {
- 	{ 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
- 	{ 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } },
- 	{ 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
- 	{ 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } },
-+#if defined(CONFIG_CPU_SUBTYPE_SH7720)
- 	{ 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } },
++	.balign 4
++#if defined(CONFIG_CPU_SH2)
++1:	.long	0x000000F0		! IMASK=0xF
 +#else
-+	{ 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, 0 } },
++1:	.long	0x400080F0		! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
 +#endif
- 	{ 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } },
- 	{ 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } },
- 	{ 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } },
-@@ -177,7 +227,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups,
--		priorities, NULL, prio_registers, NULL);
-+		NULL, prio_registers, NULL);
- 
- static struct intc_sense_reg sense_registers[] __initdata = {
- 	{ INTC_ICR1, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
-@@ -190,7 +240,7 @@ static struct intc_vect vectors_irq[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_irq_desc, "sh7720-irq", vectors_irq,
--		NULL, priorities, NULL, prio_registers, sense_registers);
-+		NULL, NULL, prio_registers, sense_registers);
- 
- void __init plat_irq_setup_pins(int mode)
- {
-diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
-index dadd6bf..d608557 100644
---- a/arch/sh/kernel/cpu/sh4/Makefile
-+++ b/arch/sh/kernel/cpu/sh4/Makefile
-@@ -5,7 +5,7 @@
- obj-y	:= probe.o common.o
- common-y	+= $(addprefix ../sh3/, entry.o ex.o)
- 
--obj-$(CONFIG_SH_FPU)			+= fpu.o
-+obj-$(CONFIG_SH_FPU)			+= fpu.o softfloat.o
- obj-$(CONFIG_SH_STORE_QUEUES)		+= sq.o
- 
- # CPU subtype setup
-diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
-index c5a4fc7..817f993 100644
---- a/arch/sh/kernel/cpu/sh4/fpu.c
-+++ b/arch/sh/kernel/cpu/sh4/fpu.c
-@@ -1,7 +1,4 @@
--/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $
-- *
-- * linux/arch/sh/kernel/fpu.c
-- *
++ENTRY(stack_start)
++2:	.long	init_thread_union+THREAD_SIZE
++3:	.long	__bss_start
++4:	.long	_end
++5:	.long	start_kernel
++6:	.long	sh_cpu_init
++7:	.long	init_thread_union
+diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
+new file mode 100644
+index 0000000..f42d4c0
+--- /dev/null
++++ b/arch/sh/kernel/head_64.S
+@@ -0,0 +1,356 @@
 +/*
-  * Save/restore floating point context for signal handlers.
-  *
-  * This file is subject to the terms and conditions of the GNU General Public
-@@ -9,15 +6,16 @@
-  * for more details.
-  *
-  * Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
-+ * Copyright (C) 2006  ST Microelectronics Ltd. (denorm support)
-  *
-- * FIXME! These routines can be optimized in big endian case.
-+ * FIXME! These routines have not been tested for big endian case.
-  */
--
- #include <linux/sched.h>
- #include <linux/signal.h>
-+#include <linux/io.h>
-+#include <asm/cpu/fpu.h>
- #include <asm/processor.h>
- #include <asm/system.h>
--#include <asm/io.h>
- 
- /* The PR (precision) bit in the FP Status Register must be clear when
-  * an frchg instruction is executed, otherwise the instruction is undefined.
-@@ -25,177 +23,184 @@
-  */
- 
- #define FPSCR_RCHG 0x00000000
-+extern unsigned long long float64_div(unsigned long long a,
-+				      unsigned long long b);
-+extern unsigned long int float32_div(unsigned long int a, unsigned long int b);
-+extern unsigned long long float64_mul(unsigned long long a,
-+				      unsigned long long b);
-+extern unsigned long int float32_mul(unsigned long int a, unsigned long int b);
-+extern unsigned long long float64_add(unsigned long long a,
-+				      unsigned long long b);
-+extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
-+extern unsigned long long float64_sub(unsigned long long a,
-+				      unsigned long long b);
-+extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
- 
-+static unsigned int fpu_exception_flags;
- 
- /*
-  * Save FPU registers onto task structure.
-  * Assume called with FPU enabled (SR.FD=0).
-  */
--void
--save_fpu(struct task_struct *tsk, struct pt_regs *regs)
-+void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
- {
- 	unsigned long dummy;
- 
- 	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
- 	enable_fpu();
--	asm volatile("sts.l	fpul, @-%0\n\t"
--		     "sts.l	fpscr, @-%0\n\t"
--		     "lds	%2, fpscr\n\t"
--		     "frchg\n\t"
--		     "fmov.s	fr15, @-%0\n\t"
--		     "fmov.s	fr14, @-%0\n\t"
--		     "fmov.s	fr13, @-%0\n\t"
--		     "fmov.s	fr12, @-%0\n\t"
--		     "fmov.s	fr11, @-%0\n\t"
--		     "fmov.s	fr10, @-%0\n\t"
--		     "fmov.s	fr9, @-%0\n\t"
--		     "fmov.s	fr8, @-%0\n\t"
--		     "fmov.s	fr7, @-%0\n\t"
--		     "fmov.s	fr6, @-%0\n\t"
--		     "fmov.s	fr5, @-%0\n\t"
--		     "fmov.s	fr4, @-%0\n\t"
--		     "fmov.s	fr3, @-%0\n\t"
--		     "fmov.s	fr2, @-%0\n\t"
--		     "fmov.s	fr1, @-%0\n\t"
--		     "fmov.s	fr0, @-%0\n\t"
--		     "frchg\n\t"
--		     "fmov.s	fr15, @-%0\n\t"
--		     "fmov.s	fr14, @-%0\n\t"
--		     "fmov.s	fr13, @-%0\n\t"
--		     "fmov.s	fr12, @-%0\n\t"
--		     "fmov.s	fr11, @-%0\n\t"
--		     "fmov.s	fr10, @-%0\n\t"
--		     "fmov.s	fr9, @-%0\n\t"
--		     "fmov.s	fr8, @-%0\n\t"
--		     "fmov.s	fr7, @-%0\n\t"
--		     "fmov.s	fr6, @-%0\n\t"
--		     "fmov.s	fr5, @-%0\n\t"
--		     "fmov.s	fr4, @-%0\n\t"
--		     "fmov.s	fr3, @-%0\n\t"
--		     "fmov.s	fr2, @-%0\n\t"
--		     "fmov.s	fr1, @-%0\n\t"
--		     "fmov.s	fr0, @-%0\n\t"
--		     "lds	%3, fpscr\n\t"
--		     : "=r" (dummy)
--		     : "0" ((char *)(&tsk->thread.fpu.hard.status)),
--		       "r" (FPSCR_RCHG),
--		       "r" (FPSCR_INIT)
--		     : "memory");
--
-- 	disable_fpu();
-- 	release_fpu(regs);
-+	asm volatile ("sts.l	fpul, @-%0\n\t"
-+		      "sts.l	fpscr, @-%0\n\t"
-+		      "lds	%2, fpscr\n\t"
-+		      "frchg\n\t"
-+		      "fmov.s	fr15, @-%0\n\t"
-+		      "fmov.s	fr14, @-%0\n\t"
-+		      "fmov.s	fr13, @-%0\n\t"
-+		      "fmov.s	fr12, @-%0\n\t"
-+		      "fmov.s	fr11, @-%0\n\t"
-+		      "fmov.s	fr10, @-%0\n\t"
-+		      "fmov.s	fr9, @-%0\n\t"
-+		      "fmov.s	fr8, @-%0\n\t"
-+		      "fmov.s	fr7, @-%0\n\t"
-+		      "fmov.s	fr6, @-%0\n\t"
-+		      "fmov.s	fr5, @-%0\n\t"
-+		      "fmov.s	fr4, @-%0\n\t"
-+		      "fmov.s	fr3, @-%0\n\t"
-+		      "fmov.s	fr2, @-%0\n\t"
-+		      "fmov.s	fr1, @-%0\n\t"
-+		      "fmov.s	fr0, @-%0\n\t"
-+		      "frchg\n\t"
-+		      "fmov.s	fr15, @-%0\n\t"
-+		      "fmov.s	fr14, @-%0\n\t"
-+		      "fmov.s	fr13, @-%0\n\t"
-+		      "fmov.s	fr12, @-%0\n\t"
-+		      "fmov.s	fr11, @-%0\n\t"
-+		      "fmov.s	fr10, @-%0\n\t"
-+		      "fmov.s	fr9, @-%0\n\t"
-+		      "fmov.s	fr8, @-%0\n\t"
-+		      "fmov.s	fr7, @-%0\n\t"
-+		      "fmov.s	fr6, @-%0\n\t"
-+		      "fmov.s	fr5, @-%0\n\t"
-+		      "fmov.s	fr4, @-%0\n\t"
-+		      "fmov.s	fr3, @-%0\n\t"
-+		      "fmov.s	fr2, @-%0\n\t"
-+		      "fmov.s	fr1, @-%0\n\t"
-+		      "fmov.s	fr0, @-%0\n\t"
-+		      "lds	%3, fpscr\n\t":"=r" (dummy)
-+		      :"0"((char *)(&tsk->thread.fpu.hard.status)),
-+		      "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
-+		      :"memory");
-+
-+	disable_fpu();
-+	release_fpu(regs);
- }
- 
--static void
--restore_fpu(struct task_struct *tsk)
-+static void restore_fpu(struct task_struct *tsk)
- {
- 	unsigned long dummy;
- 
-- 	enable_fpu();
--	asm volatile("lds	%2, fpscr\n\t"
--		     "fmov.s	@%0+, fr0\n\t"
--		     "fmov.s	@%0+, fr1\n\t"
--		     "fmov.s	@%0+, fr2\n\t"
--		     "fmov.s	@%0+, fr3\n\t"
--		     "fmov.s	@%0+, fr4\n\t"
--		     "fmov.s	@%0+, fr5\n\t"
--		     "fmov.s	@%0+, fr6\n\t"
--		     "fmov.s	@%0+, fr7\n\t"
--		     "fmov.s	@%0+, fr8\n\t"
--		     "fmov.s	@%0+, fr9\n\t"
--		     "fmov.s	@%0+, fr10\n\t"
--		     "fmov.s	@%0+, fr11\n\t"
--		     "fmov.s	@%0+, fr12\n\t"
--		     "fmov.s	@%0+, fr13\n\t"
--		     "fmov.s	@%0+, fr14\n\t"
--		     "fmov.s	@%0+, fr15\n\t"
--		     "frchg\n\t"
--		     "fmov.s	@%0+, fr0\n\t"
--		     "fmov.s	@%0+, fr1\n\t"
--		     "fmov.s	@%0+, fr2\n\t"
--		     "fmov.s	@%0+, fr3\n\t"
--		     "fmov.s	@%0+, fr4\n\t"
--		     "fmov.s	@%0+, fr5\n\t"
--		     "fmov.s	@%0+, fr6\n\t"
--		     "fmov.s	@%0+, fr7\n\t"
--		     "fmov.s	@%0+, fr8\n\t"
--		     "fmov.s	@%0+, fr9\n\t"
--		     "fmov.s	@%0+, fr10\n\t"
--		     "fmov.s	@%0+, fr11\n\t"
--		     "fmov.s	@%0+, fr12\n\t"
--		     "fmov.s	@%0+, fr13\n\t"
--		     "fmov.s	@%0+, fr14\n\t"
--		     "fmov.s	@%0+, fr15\n\t"
--		     "frchg\n\t"
--		     "lds.l	@%0+, fpscr\n\t"
--		     "lds.l	@%0+, fpul\n\t"
--		     : "=r" (dummy)
--		     : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
--		     : "memory");
-+	enable_fpu();
-+	asm volatile ("lds	%2, fpscr\n\t"
-+		      "fmov.s	@%0+, fr0\n\t"
-+		      "fmov.s	@%0+, fr1\n\t"
-+		      "fmov.s	@%0+, fr2\n\t"
-+		      "fmov.s	@%0+, fr3\n\t"
-+		      "fmov.s	@%0+, fr4\n\t"
-+		      "fmov.s	@%0+, fr5\n\t"
-+		      "fmov.s	@%0+, fr6\n\t"
-+		      "fmov.s	@%0+, fr7\n\t"
-+		      "fmov.s	@%0+, fr8\n\t"
-+		      "fmov.s	@%0+, fr9\n\t"
-+		      "fmov.s	@%0+, fr10\n\t"
-+		      "fmov.s	@%0+, fr11\n\t"
-+		      "fmov.s	@%0+, fr12\n\t"
-+		      "fmov.s	@%0+, fr13\n\t"
-+		      "fmov.s	@%0+, fr14\n\t"
-+		      "fmov.s	@%0+, fr15\n\t"
-+		      "frchg\n\t"
-+		      "fmov.s	@%0+, fr0\n\t"
-+		      "fmov.s	@%0+, fr1\n\t"
-+		      "fmov.s	@%0+, fr2\n\t"
-+		      "fmov.s	@%0+, fr3\n\t"
-+		      "fmov.s	@%0+, fr4\n\t"
-+		      "fmov.s	@%0+, fr5\n\t"
-+		      "fmov.s	@%0+, fr6\n\t"
-+		      "fmov.s	@%0+, fr7\n\t"
-+		      "fmov.s	@%0+, fr8\n\t"
-+		      "fmov.s	@%0+, fr9\n\t"
-+		      "fmov.s	@%0+, fr10\n\t"
-+		      "fmov.s	@%0+, fr11\n\t"
-+		      "fmov.s	@%0+, fr12\n\t"
-+		      "fmov.s	@%0+, fr13\n\t"
-+		      "fmov.s	@%0+, fr14\n\t"
-+		      "fmov.s	@%0+, fr15\n\t"
-+		      "frchg\n\t"
-+		      "lds.l	@%0+, fpscr\n\t"
-+		      "lds.l	@%0+, fpul\n\t"
-+		      :"=r" (dummy)
-+		      :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG)
-+		      :"memory");
- 	disable_fpu();
- }
- 
- /*
-  * Load the FPU with signalling NANS.  This bit pattern we're using
-  * has the property that no matter wether considered as single or as
-- * double precision represents signaling NANS.  
-+ * double precision represents signaling NANS.
-  */
- 
--static void
--fpu_init(void)
-+static void fpu_init(void)
- {
- 	enable_fpu();
--	asm volatile("lds	%0, fpul\n\t"
--		     "lds	%1, fpscr\n\t"
--		     "fsts	fpul, fr0\n\t"
--		     "fsts	fpul, fr1\n\t"
--		     "fsts	fpul, fr2\n\t"
--		     "fsts	fpul, fr3\n\t"
--		     "fsts	fpul, fr4\n\t"
--		     "fsts	fpul, fr5\n\t"
--		     "fsts	fpul, fr6\n\t"
--		     "fsts	fpul, fr7\n\t"
--		     "fsts	fpul, fr8\n\t"
--		     "fsts	fpul, fr9\n\t"
--		     "fsts	fpul, fr10\n\t"
--		     "fsts	fpul, fr11\n\t"
--		     "fsts	fpul, fr12\n\t"
--		     "fsts	fpul, fr13\n\t"
--		     "fsts	fpul, fr14\n\t"
--		     "fsts	fpul, fr15\n\t"
--		     "frchg\n\t"
--		     "fsts	fpul, fr0\n\t"
--		     "fsts	fpul, fr1\n\t"
--		     "fsts	fpul, fr2\n\t"
--		     "fsts	fpul, fr3\n\t"
--		     "fsts	fpul, fr4\n\t"
--		     "fsts	fpul, fr5\n\t"
--		     "fsts	fpul, fr6\n\t"
--		     "fsts	fpul, fr7\n\t"
--		     "fsts	fpul, fr8\n\t"
--		     "fsts	fpul, fr9\n\t"
--		     "fsts	fpul, fr10\n\t"
--		     "fsts	fpul, fr11\n\t"
--		     "fsts	fpul, fr12\n\t"
--		     "fsts	fpul, fr13\n\t"
--		     "fsts	fpul, fr14\n\t"
--		     "fsts	fpul, fr15\n\t"
--		     "frchg\n\t"
--		     "lds	%2, fpscr\n\t"
--		     : /* no output */
--		     : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
-- 	disable_fpu();
-+	asm volatile (	"lds	%0, fpul\n\t"
-+			"lds	%1, fpscr\n\t"
-+			"fsts	fpul, fr0\n\t"
-+			"fsts	fpul, fr1\n\t"
-+			"fsts	fpul, fr2\n\t"
-+			"fsts	fpul, fr3\n\t"
-+			"fsts	fpul, fr4\n\t"
-+			"fsts	fpul, fr5\n\t"
-+			"fsts	fpul, fr6\n\t"
-+			"fsts	fpul, fr7\n\t"
-+			"fsts	fpul, fr8\n\t"
-+			"fsts	fpul, fr9\n\t"
-+			"fsts	fpul, fr10\n\t"
-+			"fsts	fpul, fr11\n\t"
-+			"fsts	fpul, fr12\n\t"
-+			"fsts	fpul, fr13\n\t"
-+			"fsts	fpul, fr14\n\t"
-+			"fsts	fpul, fr15\n\t"
-+			"frchg\n\t"
-+			"fsts	fpul, fr0\n\t"
-+			"fsts	fpul, fr1\n\t"
-+			"fsts	fpul, fr2\n\t"
-+			"fsts	fpul, fr3\n\t"
-+			"fsts	fpul, fr4\n\t"
-+			"fsts	fpul, fr5\n\t"
-+			"fsts	fpul, fr6\n\t"
-+			"fsts	fpul, fr7\n\t"
-+			"fsts	fpul, fr8\n\t"
-+			"fsts	fpul, fr9\n\t"
-+			"fsts	fpul, fr10\n\t"
-+			"fsts	fpul, fr11\n\t"
-+			"fsts	fpul, fr12\n\t"
-+			"fsts	fpul, fr13\n\t"
-+			"fsts	fpul, fr14\n\t"
-+			"fsts	fpul, fr15\n\t"
-+			"frchg\n\t"
-+			"lds	%2, fpscr\n\t"
-+			:	/* no output */
-+			:"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
-+	disable_fpu();
- }
- 
- /**
-- *	denormal_to_double - Given denormalized float number,
-- *	                     store double float
-+ *      denormal_to_double - Given denormalized float number,
-+ *                           store double float
-  *
-- *	@fpu: Pointer to sh_fpu_hard structure
-- *	@n: Index to FP register
-+ *      @fpu: Pointer to sh_fpu_hard structure
-+ *      @n: Index to FP register
-  */
--static void
--denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
-+static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
- {
- 	unsigned long du, dl;
- 	unsigned long x = fpu->fpul;
-@@ -212,7 +217,7 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
- 		dl = x << 29;
- 
- 		fpu->fp_regs[n] = du;
--		fpu->fp_regs[n+1] = dl;
-+		fpu->fp_regs[n + 1] = dl;
- 	}
- }
- 
-@@ -223,68 +228,191 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
-  *
-  *	Returns 1 when it's handled (should not cause exception).
-  */
--static int
--ieee_fpe_handler (struct pt_regs *regs)
-+static int ieee_fpe_handler(struct pt_regs *regs)
- {
--	unsigned short insn = *(unsigned short *) regs->pc;
-+	unsigned short insn = *(unsigned short *)regs->pc;
- 	unsigned short finsn;
- 	unsigned long nextpc;
- 	int nib[4] = {
- 		(insn >> 12) & 0xf,
- 		(insn >> 8) & 0xf,
- 		(insn >> 4) & 0xf,
--		insn & 0xf};
--
--	if (nib[0] == 0xb ||
--	    (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
--		regs->pr = regs->pc + 4;
--  
--	if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
--		nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
--		finsn = *(unsigned short *) (regs->pc + 2);
--	} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
-+		insn & 0xf
-+	};
++ * arch/sh/kernel/head_64.S
++ *
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003, 2004  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <asm/page.h>
++#include <asm/cache.h>
++#include <asm/tlb.h>
++#include <asm/cpu/registers.h>
++#include <asm/cpu/mmu_context.h>
++#include <asm/thread_info.h>
 +
-+	if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb))
-+		regs->pr = regs->pc + 4;  /* bsr & jsr */
++/*
++ * MMU defines: TLB boundaries.
++ */
 +
-+	if (nib[0] == 0xa || nib[0] == 0xb) {
-+		/* bra & bsr */
-+		nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3);
-+		finsn = *(unsigned short *)(regs->pc + 2);
-+	} else if (nib[0] == 0x8 && nib[1] == 0xd) {
-+		/* bt/s */
- 		if (regs->sr & 1)
--			nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
-+			nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
- 		else
- 			nextpc = regs->pc + 4;
--		finsn = *(unsigned short *) (regs->pc + 2);
--	} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
-+		finsn = *(unsigned short *)(regs->pc + 2);
-+	} else if (nib[0] == 0x8 && nib[1] == 0xf) {
-+		/* bf/s */
- 		if (regs->sr & 1)
- 			nextpc = regs->pc + 4;
- 		else
--			nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
--		finsn = *(unsigned short *) (regs->pc + 2);
-+			nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
-+		finsn = *(unsigned short *)(regs->pc + 2);
- 	} else if (nib[0] == 0x4 && nib[3] == 0xb &&
--		 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
-+		   (nib[2] == 0x0 || nib[2] == 0x2)) {
-+		/* jmp & jsr */
- 		nextpc = regs->regs[nib[1]];
--		finsn = *(unsigned short *) (regs->pc + 2);
-+		finsn = *(unsigned short *)(regs->pc + 2);
- 	} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
--		 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
-+		   (nib[2] == 0x0 || nib[2] == 0x2)) {
-+		/* braf & bsrf */
- 		nextpc = regs->pc + 4 + regs->regs[nib[1]];
--		finsn = *(unsigned short *) (regs->pc + 2);
--	} else if (insn == 0x000b) { /* rts */
-+		finsn = *(unsigned short *)(regs->pc + 2);
-+	} else if (insn == 0x000b) {
-+		/* rts */
- 		nextpc = regs->pr;
--		finsn = *(unsigned short *) (regs->pc + 2);
-+		finsn = *(unsigned short *)(regs->pc + 2);
- 	} else {
- 		nextpc = regs->pc + instruction_size(insn);
- 		finsn = insn;
- 	}
- 
--	if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
-+	if ((finsn & 0xf1ff) == 0xf0ad) {
-+		/* fcnvsd */
- 		struct task_struct *tsk = current;
- 
- 		save_fpu(tsk, regs);
--		if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
-+		if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
- 			/* FPU error */
--			denormal_to_double (&tsk->thread.fpu.hard,
--					    (finsn >> 8) & 0xf);
--			tsk->thread.fpu.hard.fpscr &=
--				~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
--			grab_fpu(regs);
--			restore_fpu(tsk);
--			set_tsk_thread_flag(tsk, TIF_USEDFPU);
-+			denormal_to_double(&tsk->thread.fpu.hard,
-+					   (finsn >> 8) & 0xf);
-+		else
-+			return 0;
++#define MMUIR_FIRST	ITLB_FIXED
++#define MMUIR_END	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
++#define MMUIR_STEP	TLB_STEP
 +
-+		regs->pc = nextpc;
-+		return 1;
-+	} else if ((finsn & 0xf00f) == 0xf002) {
-+		/* fmul */
-+		struct task_struct *tsk = current;
-+		int fpscr;
-+		int n, m, prec;
-+		unsigned int hx, hy;
++#define MMUDR_FIRST	DTLB_FIXED
++#define MMUDR_END	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
++#define MMUDR_STEP	TLB_STEP
 +
-+		n = (finsn >> 8) & 0xf;
-+		m = (finsn >> 4) & 0xf;
-+		hx = tsk->thread.fpu.hard.fp_regs[n];
-+		hy = tsk->thread.fpu.hard.fp_regs[m];
-+		fpscr = tsk->thread.fpu.hard.fpscr;
-+		prec = fpscr & FPSCR_DBL_PRECISION;
++/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
++#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
++#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
++#endif
 +
-+		if ((fpscr & FPSCR_CAUSE_ERROR)
-+		    && (prec && ((hx & 0x7fffffff) < 0x00100000
-+				 || (hy & 0x7fffffff) < 0x00100000))) {
-+			long long llx, lly;
++/*
++ * MMU defines: Fixed TLBs.
++ */
++/* Deal safely with the case where the base of RAM is not 512Mb aligned */
 +
-+			/* FPU error because of denormal (doubles) */
-+			llx = ((long long)hx << 32)
-+			    | tsk->thread.fpu.hard.fp_regs[n + 1];
-+			lly = ((long long)hy << 32)
-+			    | tsk->thread.fpu.hard.fp_regs[m + 1];
-+			llx = float64_mul(llx, lly);
-+			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-+			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
-+		} else if ((fpscr & FPSCR_CAUSE_ERROR)
-+			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
-+					 || (hy & 0x7fffffff) < 0x00800000))) {
-+			/* FPU error because of denormal (floats) */
-+			hx = float32_mul(hx, hy);
-+			tsk->thread.fpu.hard.fp_regs[n] = hx;
-+		} else
-+			return 0;
++#define ALIGN_512M_MASK (0xffffffffe0000000)
++#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
++#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
 +
-+		regs->pc = nextpc;
-+		return 1;
-+	} else if ((finsn & 0xf00e) == 0xf000) {
-+		/* fadd, fsub */
-+		struct task_struct *tsk = current;
-+		int fpscr;
-+		int n, m, prec;
-+		unsigned int hx, hy;
++#define MMUIR_TEXT_H	(0x0000000000000003 | ALIGNED_EFFECTIVE)
++			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
 +
-+		n = (finsn >> 8) & 0xf;
-+		m = (finsn >> 4) & 0xf;
-+		hx = tsk->thread.fpu.hard.fp_regs[n];
-+		hy = tsk->thread.fpu.hard.fp_regs[m];
-+		fpscr = tsk->thread.fpu.hard.fpscr;
-+		prec = fpscr & FPSCR_DBL_PRECISION;
++#define MMUIR_TEXT_L	(0x000000000000009a | ALIGNED_PHYSICAL)
++			/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
 +
-+		if ((fpscr & FPSCR_CAUSE_ERROR)
-+		    && (prec && ((hx & 0x7fffffff) < 0x00100000
-+				 || (hy & 0x7fffffff) < 0x00100000))) {
-+			long long llx, lly;
++#define MMUDR_CACHED_H	0x0000000000000003 | ALIGNED_EFFECTIVE
++			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
++#define MMUDR_CACHED_L	0x000000000000015a | ALIGNED_PHYSICAL
++			/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
 +
-+			/* FPU error because of denormal (doubles) */
-+			llx = ((long long)hx << 32)
-+			    | tsk->thread.fpu.hard.fp_regs[n + 1];
-+			lly = ((long long)hy << 32)
-+			    | tsk->thread.fpu.hard.fp_regs[m + 1];
-+			if ((finsn & 0xf00f) == 0xf000)
-+				llx = float64_add(llx, lly);
-+			else
-+				llx = float64_sub(llx, lly);
-+			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-+			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
-+		} else if ((fpscr & FPSCR_CAUSE_ERROR)
-+			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
-+					 || (hy & 0x7fffffff) < 0x00800000))) {
-+			/* FPU error because of denormal (floats) */
-+			if ((finsn & 0xf00f) == 0xf000)
-+				hx = float32_add(hx, hy);
-+			else
-+				hx = float32_sub(hx, hy);
-+			tsk->thread.fpu.hard.fp_regs[n] = hx;
-+		} else
-+			return 0;
++#ifdef CONFIG_CACHE_OFF
++#define	ICCR0_INIT_VAL	ICCR0_OFF			/* ICACHE off */
++#else
++#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
++#endif
++#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
 +
-+		regs->pc = nextpc;
-+		return 1;
-+	} else if ((finsn & 0xf003) == 0xf003) {
-+		/* fdiv */
-+		struct task_struct *tsk = current;
-+		int fpscr;
-+		int n, m, prec;
-+		unsigned int hx, hy;
++#if defined (CONFIG_CACHE_OFF)
++#define	OCCR0_INIT_VAL	OCCR0_OFF			   /* D-cache: off  */
++#elif defined (CONFIG_CACHE_WRITETHROUGH)
++#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WT	   /* D-cache: on,   */
++							   /* WT, invalidate */
++#elif defined (CONFIG_CACHE_WRITEBACK)
++#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	   /* D-cache: on,   */
++							   /* WB, invalidate */
++#else
++#error preprocessor flag CONFIG_CACHE_... not recognized!
++#endif
 +
-+		n = (finsn >> 8) & 0xf;
-+		m = (finsn >> 4) & 0xf;
-+		hx = tsk->thread.fpu.hard.fp_regs[n];
-+		hy = tsk->thread.fpu.hard.fp_regs[m];
-+		fpscr = tsk->thread.fpu.hard.fpscr;
-+		prec = fpscr & FPSCR_DBL_PRECISION;
++#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			   /* No locking     */
 +
-+		if ((fpscr & FPSCR_CAUSE_ERROR)
-+		    && (prec && ((hx & 0x7fffffff) < 0x00100000
-+				 || (hy & 0x7fffffff) < 0x00100000))) {
-+			long long llx, lly;
++	.section	.empty_zero_page, "aw"
++	.global empty_zero_page
 +
-+			/* FPU error because of denormal (doubles) */
-+			llx = ((long long)hx << 32)
-+			    | tsk->thread.fpu.hard.fp_regs[n + 1];
-+			lly = ((long long)hy << 32)
-+			    | tsk->thread.fpu.hard.fp_regs[m + 1];
++empty_zero_page:
++	.long	1		/* MOUNT_ROOT_RDONLY */
++	.long	0		/* RAMDISK_FLAGS */
++	.long	0x0200		/* ORIG_ROOT_DEV */
++	.long	1		/* LOADER_TYPE */
++	.long	0x00800000	/* INITRD_START */
++	.long	0x00800000	/* INITRD_SIZE */
++	.long	0
 +
-+			llx = float64_div(llx, lly);
++	.text
++	.balign 4096,0,4096
 +
-+			tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
-+			tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
-+		} else if ((fpscr & FPSCR_CAUSE_ERROR)
-+			   && (!prec && ((hx & 0x7fffffff) < 0x00800000
-+					 || (hy & 0x7fffffff) < 0x00800000))) {
-+			/* FPU error because of denormal (floats) */
-+			hx = float32_div(hx, hy);
-+			tsk->thread.fpu.hard.fp_regs[n] = hx;
- 		} else
--			force_sig(SIGFPE, tsk);
-+			return 0;
- 
- 		regs->pc = nextpc;
- 		return 1;
-@@ -293,27 +421,48 @@ ieee_fpe_handler (struct pt_regs *regs)
- 	return 0;
- }
- 
--asmlinkage void
--do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6,
--	     unsigned long r7, struct pt_regs __regs)
-+void float_raise(unsigned int flags)
-+{
-+	fpu_exception_flags |= flags;
-+}
++	.section	.data, "aw"
++	.balign	PAGE_SIZE
 +
-+int float_rounding_mode(void)
- {
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- 	struct task_struct *tsk = current;
-+	int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr);
-+	return roundingMode;
-+}
- 
--	if (ieee_fpe_handler(regs))
--		return;
-+BUILD_TRAP_HANDLER(fpu_error)
-+{
-+	struct task_struct *tsk = current;
-+	TRAP_HANDLER_DECL;
- 
--	regs->pc += 2;
- 	save_fpu(tsk, regs);
-+	fpu_exception_flags = 0;
-+	if (ieee_fpe_handler(regs)) {
-+		tsk->thread.fpu.hard.fpscr &=
-+		    ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
-+		tsk->thread.fpu.hard.fpscr |= fpu_exception_flags;
-+		/* Set the FPSCR flag as well as cause bits - simply
-+		 * replicate the cause */
-+		tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
-+		grab_fpu(regs);
-+		restore_fpu(tsk);
-+		set_tsk_thread_flag(tsk, TIF_USEDFPU);
-+		if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
-+		     (fpu_exception_flags >> 2)) == 0) {
-+			return;
-+		}
-+	}
++	.section	.data, "aw"
++	.balign	PAGE_SIZE
 +
- 	force_sig(SIGFPE, tsk);
- }
- 
--asmlinkage void
--do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
--		     unsigned long r7, struct pt_regs __regs)
-+BUILD_TRAP_HANDLER(fpu_state_restore)
- {
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- 	struct task_struct *tsk = current;
-+	TRAP_HANDLER_DECL;
- 
- 	grab_fpu(regs);
- 	if (!user_mode(regs)) {
-@@ -324,7 +473,7 @@ do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
- 	if (used_math()) {
- 		/* Using the FPU again.  */
- 		restore_fpu(tsk);
--	} else	{
-+	} else {
- 		/* First time FPU user.  */
- 		fpu_init();
- 		set_used_math();
-diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
-index bc9c28a..f2b9238 100644
---- a/arch/sh/kernel/cpu/sh4/probe.c
-+++ b/arch/sh/kernel/cpu/sh4/probe.c
-@@ -98,6 +98,8 @@ int __init detect_cpu_and_cache_system(void)
- 	case 0x200A:
- 		if (prr == 0x61)
- 			boot_cpu_data.type = CPU_SH7781;
-+		else if (prr == 0xa1)
-+			boot_cpu_data.type = CPU_SH7763;
- 		else
- 			boot_cpu_data.type = CPU_SH7780;
- 
-diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
-index 523f68a..ae3603a 100644
---- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
-+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
-@@ -126,12 +126,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(REF, REF_RCMI, REF_ROVI),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(SCIF, 3),
--	INTC_PRIO(SCI1, 3),
--	INTC_PRIO(DMAC, 7),
--};
--
- static struct intc_prio_reg prio_registers[] __initdata = {
- 	{ 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
- 	{ 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } },
-@@ -143,7 +137,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- 
- /* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
- #if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
-@@ -163,7 +157,7 @@ static struct intc_group groups_dma4[] __initdata = {
- 
- static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
- 			 vectors_dma4, groups_dma4,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- #endif
- 
- /* SH7750R and SH7751R both have 8-channel DMA controllers */
-@@ -184,7 +178,7 @@ static struct intc_group groups_dma8[] __initdata = {
- 
- static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
- 			 vectors_dma8, groups_dma8,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- #endif
- 
- /* SH7750R, SH7751 and SH7751R all have two extra timer channels */
-@@ -205,7 +199,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34",
--			 vectors_tmu34, NULL, priorities,
-+			 vectors_tmu34, NULL,
- 			 mask_registers, prio_registers, NULL);
- #endif
- 
-@@ -216,7 +210,7 @@ static struct intc_vect vectors_irlm[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL,
--			 priorities, NULL, prio_registers, NULL);
-+			 NULL, prio_registers, NULL);
- 
- /* SH7751 and SH7751R both have PCI */
- #if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
-@@ -233,7 +227,7 @@ static struct intc_group groups_pci[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci,
--			 priorities, mask_registers, prio_registers, NULL);
-+			 mask_registers, prio_registers, NULL);
- #endif
- 
- #if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
-diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
-index 7a898cb..85f8157 100644
---- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
-+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
-@@ -92,15 +92,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(REF, REF_RCMI, REF_ROVI),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(SCIF0, 3),
--	INTC_PRIO(SCIF1, 3),
--	INTC_PRIO(SCIF2, 3),
--	INTC_PRIO(SIM, 3),
--	INTC_PRIO(DMAC, 7),
--	INTC_PRIO(DMABRG, 13),
--};
--
- static struct intc_mask_reg mask_registers[] __initdata = {
- 	{ 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
- 	  { IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21,
-@@ -132,7 +123,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups,
--			 priorities, mask_registers, prio_registers, NULL);
-+			 mask_registers, prio_registers, NULL);
- 
- static struct intc_vect vectors_irq[] __initdata = {
- 	INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
-@@ -140,7 +131,7 @@ static struct intc_vect vectors_irq[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
--			 priorities, mask_registers, prio_registers, NULL);
-+			 mask_registers, prio_registers, NULL);
- 
- static struct plat_sci_port sci_platform_data[] = {
- 	{
-diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c
-new file mode 100644
-index 0000000..7b2d337
---- /dev/null
-+++ b/arch/sh/kernel/cpu/sh4/softfloat.c
-@@ -0,0 +1,892 @@
++	.global mmu_pdtp_cache
++mmu_pdtp_cache:
++	.space PAGE_SIZE, 0
++
++	.global empty_bad_page
++empty_bad_page:
++	.space PAGE_SIZE, 0
++
++	.global empty_bad_pte_table
++empty_bad_pte_table:
++	.space PAGE_SIZE, 0
++
++	.global	fpu_in_use
++fpu_in_use:	.quad	0
++
++
++	.section	.text.head, "ax"
++	.balign L1_CACHE_BYTES
 +/*
-+ * Floating point emulation support for subnormalised numbers on SH4
-+ * architecture This file is derived from the SoftFloat IEC/IEEE
-+ * Floating-point Arithmetic Package, Release 2 the original license of
-+ * which is reproduced below.
-+ *
-+ * ========================================================================
-+ *
-+ * This C source file is part of the SoftFloat IEC/IEEE Floating-point
-+ * Arithmetic Package, Release 2.
-+ *
-+ * Written by John R. Hauser.  This work was made possible in part by the
-+ * International Computer Science Institute, located at Suite 600, 1947 Center
-+ * Street, Berkeley, California 94704.  Funding was partially provided by the
-+ * National Science Foundation under grant MIP-9311980.  The original version
-+ * of this code was written as part of a project to build a fixed-point vector
-+ * processor in collaboration with the University of California at Berkeley,
-+ * overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
-+ * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
-+ * arithmetic/softfloat.html'.
-+ *
-+ * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort
-+ * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
-+ * TIMES RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO
-+ * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
-+ * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
-+ *
-+ * Derivative works are acceptable, even for commercial purposes, so long as
-+ * (1) they include prominent notice that the work is derivative, and (2) they
-+ * include prominent notice akin to these three paragraphs for those parts of
-+ * this code that are retained.
++ * Condition at the entry of __stext:
++ * . Reset state:
++ *   . SR.FD    = 1		(FPU disabled)
++ *   . SR.BL    = 1		(Exceptions disabled)
++ *   . SR.MD    = 1		(Privileged Mode)
++ *   . SR.MMU   = 0		(MMU Disabled)
++ *   . SR.CD    = 0		(CTC User Visible)
++ *   . SR.IMASK = Undefined	(Interrupt Mask)
 + *
-+ * ========================================================================
++ * Operations supposed to be performed by __stext:
++ * . prevent speculative fetch onto device memory while MMU is off
++ * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
++ * . first, save CPU state and set it to something harmless
++ * . any CPU detection and/or endianness settings (?)
++ * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
++ * . set initial TLB entries for cached and uncached regions
++ *   (no fine granularity paging)
++ * . set initial cache state
++ * . enable MMU and caches
++ * . set CPU to a consistent state
++ *   . registers (including stack pointer and current/KCR0)
++ *   . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
++ *     at this stage. This is all to later Linux initialization steps.
++ *   . initialize FPU
++ * . clear BSS
++ * . jump into start_kernel()
++ * . be prepared to hopeless start_kernel() returns.
 + *
-+ * SH4 modifications by Ismail Dhaoui <ismail.dhaoui at st.com>
-+ * and Kamel Khelifi <kamel.khelifi at st.com>
 + */
-+#include <linux/kernel.h>
-+#include <asm/cpu/fpu.h>
-+
-+#define LIT64( a ) a##LL
++	.global _stext
++_stext:
++	/*
++	 * Prevent speculative fetch on device memory due to
++	 * uninitialized target registers.
++	 */
++	ptabs/u	ZERO, tr0
++	ptabs/u	ZERO, tr1
++	ptabs/u	ZERO, tr2
++	ptabs/u	ZERO, tr3
++	ptabs/u	ZERO, tr4
++	ptabs/u	ZERO, tr5
++	ptabs/u	ZERO, tr6
++	ptabs/u	ZERO, tr7
++	synci
 +
-+typedef char flag;
-+typedef unsigned char uint8;
-+typedef signed char int8;
-+typedef int uint16;
-+typedef int int16;
-+typedef unsigned int uint32;
-+typedef signed int int32;
++	/*
++	 * Read/Set CPU state. After this block:
++	 * r29 = Initial SR
++	 */
++	getcon	SR, r29
++	movi	SR_HARMLESS, r20
++	putcon	r20, SR
 +
-+typedef unsigned long long int bits64;
-+typedef signed long long int sbits64;
++	/*
++	 * Initialize EMI/LMI. To Be Done.
++	 */
 +
-+typedef unsigned char bits8;
-+typedef signed char sbits8;
-+typedef unsigned short int bits16;
-+typedef signed short int sbits16;
-+typedef unsigned int bits32;
-+typedef signed int sbits32;
++	/*
++	 * CPU detection and/or endianness settings (?). To Be Done.
++	 * Pure PIC code here, please ! Just save state into r30.
++         * After this block:
++	 * r30 = CPU type/Platform Endianness
++	 */
 +
-+typedef unsigned long long int uint64;
-+typedef signed long long int int64;
++	/*
++	 * Set initial TLB entries for cached and uncached regions.
++	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
++	 */
++	/* Clear ITLBs */
++	pta	clear_ITLB, tr1
++	movi	MMUIR_FIRST, r21
++	movi	MMUIR_END, r22
++clear_ITLB:
++	putcfg	r21, 0, ZERO		/* Clear MMUIR[n].PTEH.V */
++	addi	r21, MMUIR_STEP, r21
++        bne	r21, r22, tr1
 +
-+typedef unsigned long int float32;
-+typedef unsigned long long float64;
++	/* Clear DTLBs */
++	pta	clear_DTLB, tr1
++	movi	MMUDR_FIRST, r21
++	movi	MMUDR_END, r22
++clear_DTLB:
++	putcfg	r21, 0, ZERO		/* Clear MMUDR[n].PTEH.V */
++	addi	r21, MMUDR_STEP, r21
++        bne	r21, r22, tr1
 +
-+extern void float_raise(unsigned int flags);	/* in fpu.c */
-+extern int float_rounding_mode(void);	/* in fpu.c */
++	/* Map one big (512Mb) page for ITLB */
++	movi	MMUIR_FIRST, r21
++	movi	MMUIR_TEXT_L, r22	/* PTEL first */
++	add.l	r22, r63, r22		/* Sign extend */
++	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
++	movi	MMUIR_TEXT_H, r22	/* PTEH last */
++	add.l	r22, r63, r22		/* Sign extend */
++	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
 +
-+inline bits64 extractFloat64Frac(float64 a);
-+inline flag extractFloat64Sign(float64 a);
-+inline int16 extractFloat64Exp(float64 a);
-+inline int16 extractFloat32Exp(float32 a);
-+inline flag extractFloat32Sign(float32 a);
-+inline bits32 extractFloat32Frac(float32 a);
-+inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
-+inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
-+inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
-+inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
-+float64 float64_sub(float64 a, float64 b);
-+float32 float32_sub(float32 a, float32 b);
-+float32 float32_add(float32 a, float32 b);
-+float64 float64_add(float64 a, float64 b);
-+float64 float64_div(float64 a, float64 b);
-+float32 float32_div(float32 a, float32 b);
-+float32 float32_mul(float32 a, float32 b);
-+float64 float64_mul(float64 a, float64 b);
-+inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
-+		   bits64 * z1Ptr);
-+inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
-+		   bits64 * z1Ptr);
-+inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
++	/* Map one big CACHED (512Mb) page for DTLB */
++	movi	MMUDR_FIRST, r21
++	movi	MMUDR_CACHED_L, r22	/* PTEL first */
++	add.l	r22, r63, r22		/* Sign extend */
++	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
++	movi	MMUDR_CACHED_H, r22	/* PTEH last */
++	add.l	r22, r63, r22		/* Sign extend */
++	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
 +
-+static int8 countLeadingZeros32(bits32 a);
-+static int8 countLeadingZeros64(bits64 a);
-+static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp,
-+					    bits64 zSig);
-+static float64 subFloat64Sigs(float64 a, float64 b, flag zSign);
-+static float64 addFloat64Sigs(float64 a, float64 b, flag zSign);
-+static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig);
-+static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp,
-+					    bits32 zSig);
-+static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig);
-+static float32 subFloat32Sigs(float32 a, float32 b, flag zSign);
-+static float32 addFloat32Sigs(float32 a, float32 b, flag zSign);
-+static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr,
-+				      bits64 * zSigPtr);
-+static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
-+static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
-+				      bits32 * zSigPtr);
++#ifdef CONFIG_EARLY_PRINTK
++	/*
++	 * Setup a DTLB translation for SCIF phys.
++	 */
++	addi    r21, MMUDR_STEP, r21
++	movi    0x0a03, r22	/* SCIF phys */
++	shori   0x0148, r22
++	putcfg  r21, 1, r22	/* PTEL first */
++	movi    0xfa03, r22	/* 0xfa030000, fixed SCIF virt */
++	shori   0x0003, r22
++	putcfg  r21, 0, r22	/* PTEH last */
++#endif
 +
-+inline bits64 extractFloat64Frac(float64 a)
-+{
-+	return a & LIT64(0x000FFFFFFFFFFFFF);
-+}
++	/*
++	 * Set cache behaviours.
++	 */
++	/* ICache */
++	movi	ICCR_BASE, r21
++	movi	ICCR0_INIT_VAL, r22
++	movi	ICCR1_INIT_VAL, r23
++	putcfg	r21, ICCR_REG0, r22
++	putcfg	r21, ICCR_REG1, r23
 +
-+inline flag extractFloat64Sign(float64 a)
-+{
-+	return a >> 63;
-+}
++	/* OCache */
++	movi	OCCR_BASE, r21
++	movi	OCCR0_INIT_VAL, r22
++	movi	OCCR1_INIT_VAL, r23
++	putcfg	r21, OCCR_REG0, r22
++	putcfg	r21, OCCR_REG1, r23
 +
-+inline int16 extractFloat64Exp(float64 a)
-+{
-+	return (a >> 52) & 0x7FF;
-+}
 +
-+inline int16 extractFloat32Exp(float32 a)
-+{
-+	return (a >> 23) & 0xFF;
-+}
++	/*
++	 * Enable Caches and MMU. Do the first non-PIC jump.
++         * Now head.S global variables, constants and externs
++	 * can be used.
++	 */
++	getcon	SR, r21
++	movi	SR_ENABLE_MMU, r22
++	or	r21, r22, r21
++	putcon	r21, SSR
++	movi	hyperspace, r22
++	ori	r22, 1, r22	    /* Make it SHmedia, not required but..*/
++	putcon	r22, SPC
++	synco
++	rte			    /* And now go into the hyperspace ... */
++hyperspace:			    /* ... that's the next instruction !  */
 +
-+inline flag extractFloat32Sign(float32 a)
-+{
-+	return a >> 31;
-+}
++	/*
++	 * Set CPU to a consistent state.
++	 * r31 = FPU support flag
++	 * tr0/tr7 in use. Others give a chance to loop somewhere safe
++	 */
++	movi	start_kernel, r32
++	ori	r32, 1, r32
 +
-+inline bits32 extractFloat32Frac(float32 a)
-+{
-+	return a & 0x007FFFFF;
-+}
++	ptabs	r32, tr0		    /* r32 = _start_kernel address        */
++	pta/u	hopeless, tr1
++	pta/u	hopeless, tr2
++	pta/u	hopeless, tr3
++	pta/u	hopeless, tr4
++	pta/u	hopeless, tr5
++	pta/u	hopeless, tr6
++	pta/u	hopeless, tr7
++	gettr	tr1, r28			/* r28 = hopeless address */
 +
-+inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
-+{
-+	return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
-+}
++	/* Set initial stack pointer */
++	movi	init_thread_union, SP
++	putcon	SP, KCR0		/* Set current to init_task */
++	movi	THREAD_SIZE, r22	/* Point to the end */
++	add	SP, r22, SP
 +
-+inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
-+{
-+	bits64 z;
++	/*
++	 * Initialize FPU.
++	 * Keep FPU flag in r31. After this block:
++	 * r31 = FPU flag
++	 */
++	movi fpu_in_use, r31	/* Temporary */
 +
-+	if (count == 0) {
-+		z = a;
-+	} else if (count < 64) {
-+		z = (a >> count) | ((a << ((-count) & 63)) != 0);
-+	} else {
-+		z = (a != 0);
-+	}
-+	*zPtr = z;
-+}
++#ifdef CONFIG_SH_FPU
++	getcon	SR, r21
++	movi	SR_ENABLE_FPU, r22
++	and	r21, r22, r22
++	putcon	r22, SR			/* Try to enable */
++	getcon	SR, r22
++	xor	r21, r22, r21
++	shlri	r21, 15, r21		/* Supposedly 0/1 */
++	st.q	r31, 0 , r21		/* Set fpu_in_use */
++#else
++	movi	0, r21
++	st.q	r31, 0 , r21		/* Set fpu_in_use */
++#endif
++	or	r21, ZERO, r31		/* Set FPU flag at last */
 +
-+static int8 countLeadingZeros32(bits32 a)
-+{
-+	static const int8 countLeadingZerosHigh[] = {
-+		8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
-+		3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
-+		2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-+		2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-+		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-+		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-+		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-+		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-+	};
-+	int8 shiftCount;
++#ifndef CONFIG_SH_NO_BSS_INIT
++/* Don't clear BSS if running on slow platforms such as an RTL simulation,
++   remote memory via SHdebug link, etc.  For these the memory can be guaranteed
++   to be all zero on boot anyway. */
++	/*
++	 * Clear bss
++	 */
++	pta	clear_quad, tr1
++	movi	__bss_start, r22
++	movi	_end, r23
++clear_quad:
++	st.q	r22, 0, ZERO
++	addi	r22, 8, r22
++	bne	r22, r23, tr1		/* Both quad aligned, see vmlinux.lds.S */
++#endif
++	pta/u	hopeless, tr1
 +
-+	shiftCount = 0;
-+	if (a < 0x10000) {
-+		shiftCount += 16;
-+		a <<= 16;
-+	}
-+	if (a < 0x1000000) {
-+		shiftCount += 8;
-+		a <<= 8;
-+	}
-+	shiftCount += countLeadingZerosHigh[a >> 24];
-+	return shiftCount;
++	/* Say bye to head.S but be prepared to wrongly get back ... */
++	blink	tr0, LINK
 +
-+}
++	/* If we ever get back here through LINK/tr1-tr7 */
++	pta/u	hopeless, tr7
 +
-+static int8 countLeadingZeros64(bits64 a)
-+{
-+	int8 shiftCount;
++hopeless:
++	/*
++	 * Something's badly wrong here. Loop endlessly,
++         * there's nothing more we can do about it.
++	 *
++	 * Note on hopeless: it can be jumped into invariably
++	 * before or after jumping into hyperspace. The only
++	 * requirement is to be PIC called (PTA) before and
++	 * any way (PTA/PTABS) after. According to Virtual
++	 * to Physical mapping a simulator/emulator can easily
++	 * tell where we came here from just looking at hopeless
++	 * (PC) address.
++	 *
++	 * For debugging purposes:
++	 * (r28) hopeless/loop address
++	 * (r29) Original SR
++	 * (r30) CPU type/Platform endianness
++	 * (r31) FPU Support
++	 * (r32) _start_kernel address
++	 */
++	blink	tr7, ZERO
+diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
+index 4b449c4..f9bcc60 100644
+--- a/arch/sh/kernel/init_task.c
++++ b/arch/sh/kernel/init_task.c
+@@ -11,8 +11,8 @@ static struct fs_struct init_fs = INIT_FS;
+ static struct files_struct init_files = INIT_FILES;
+ static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++struct pt_regs fake_swapper_regs;
+ struct mm_struct init_mm = INIT_MM(init_mm);
+-
+ EXPORT_SYMBOL(init_mm);
+ 
+ /*
+@@ -22,7 +22,7 @@ EXPORT_SYMBOL(init_mm);
+  * way process stacks are handled. This is done by having a special
+  * "init_task" linker map entry..
+  */
+-union thread_union init_thread_union 
++union thread_union init_thread_union
+ 	__attribute__((__section__(".data.init_task"))) =
+ 		{ INIT_THREAD_INFO(init_task) };
+ 
+diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
+index 501fe03..71c9fde 100644
+--- a/arch/sh/kernel/io.c
++++ b/arch/sh/kernel/io.c
+@@ -61,73 +61,6 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
+ }
+ EXPORT_SYMBOL(memset_io);
+ 
+-void __raw_readsl(unsigned long addr, void *datap, int len)
+-{
+-	u32 *data;
+-
+-	for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--)
+-		*data++ = ctrl_inl(addr);
+-
+-	if (likely(len >= (0x20 >> 2))) {
+-		int tmp2, tmp3, tmp4, tmp5, tmp6;
+-
+-		__asm__ __volatile__(
+-			"1:			\n\t"
+-			"mov.l	@%7, r0		\n\t"
+-			"mov.l	@%7, %2		\n\t"
+-#ifdef CONFIG_CPU_SH4
+-			"movca.l r0, @%0	\n\t"
+-#else
+-			"mov.l	r0, @%0		\n\t"
+-#endif
+-			"mov.l	@%7, %3		\n\t"
+-			"mov.l	@%7, %4		\n\t"
+-			"mov.l	@%7, %5		\n\t"
+-			"mov.l	@%7, %6		\n\t"
+-			"mov.l	@%7, r7		\n\t"
+-			"mov.l	@%7, r0		\n\t"
+-			"mov.l	%2, @(0x04,%0)	\n\t"
+-			"mov	#0x20>>2, %2	\n\t"
+-			"mov.l	%3, @(0x08,%0)	\n\t"
+-			"sub	%2, %1		\n\t"
+-			"mov.l	%4, @(0x0c,%0)	\n\t"
+-			"cmp/hi	%1, %2		! T if 32 > len	\n\t"
+-			"mov.l	%5, @(0x10,%0)	\n\t"
+-			"mov.l	%6, @(0x14,%0)	\n\t"
+-			"mov.l	r7, @(0x18,%0)	\n\t"
+-			"mov.l	r0, @(0x1c,%0)	\n\t"
+-			"bf.s	1b		\n\t"
+-			" add	#0x20, %0	\n\t"
+-			: "=&r" (data), "=&r" (len),
+-			  "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
+-			  "=&r" (tmp5), "=&r" (tmp6)
+-			: "r"(addr), "0" (data), "1" (len)
+-			: "r0", "r7", "t", "memory");
+-	}
+-
+-	for (; len != 0; len--)
+-		*data++ = ctrl_inl(addr);
+-}
+-EXPORT_SYMBOL(__raw_readsl);
+-
+-void __raw_writesl(unsigned long addr, const void *data, int len)
+-{
+-	if (likely(len != 0)) {
+-		int tmp1;
+-
+-		__asm__ __volatile__ (
+-			"1:				\n\t"
+-			"mov.l	@%0+, %1	\n\t"
+-			"dt		%3		\n\t"
+-			"bf.s		1b		\n\t"
+-			" mov.l	%1, @%4		\n\t"
+-			: "=&r" (data), "=&r" (tmp1)
+-			: "0" (data), "r" (len), "r"(addr)
+-			: "t", "memory");
+-	}
+-}
+-EXPORT_SYMBOL(__raw_writesl);
+-
+ void __iomem *ioport_map(unsigned long port, unsigned int nr)
+ {
+ 	return sh_mv.mv_ioport_map(port, nr);
+diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
+index 142a4e5..b3d0a03 100644
+--- a/arch/sh/kernel/module.c
++++ b/arch/sh/kernel/module.c
+@@ -1,5 +1,15 @@
+ /*  Kernel module help for SH.
+ 
++    SHcompact version by Kaz Kojima and Paul Mundt.
 +
-+	shiftCount = 0;
-+	if (a < ((bits64) 1) << 32) {
-+		shiftCount += 32;
-+	} else {
-+		a >>= 32;
-+	}
-+	shiftCount += countLeadingZeros32(a);
-+	return shiftCount;
++    SHmedia bits:
 +
-+}
++	Copyright 2004 SuperH (UK) Ltd
++	Author: Richard Curnow
 +
-+static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
-+{
-+	int8 shiftCount;
++	Based on the sh version, and on code from the sh64-specific parts of
++	modutils, originally written by Richard Curnow and Ben Gaster.
 +
-+	shiftCount = countLeadingZeros64(zSig) - 1;
-+	return roundAndPackFloat64(zSign, zExp - shiftCount,
-+				   zSig << shiftCount);
+     This program is free software; you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation; either version 2 of the License, or
+@@ -21,12 +31,6 @@
+ #include <linux/string.h>
+ #include <linux/kernel.h>
+ 
+-#if 0
+-#define DEBUGP printk
+-#else
+-#define DEBUGP(fmt...)
+-#endif
+-
+ void *module_alloc(unsigned long size)
+ {
+ 	if (size == 0)
+@@ -52,6 +56,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_SUPERH32
+ #define COPY_UNALIGNED_WORD(sw, tw, align) \
+ { \
+ 	void *__s = &(sw), *__t = &(tw); \
+@@ -74,6 +79,10 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
+ 		break; \
+ 	} \
+ }
++#else
++/* One thing SHmedia doesn't screw up! */
++#define COPY_UNALIGNED_WORD(sw, tw, align)	{ (tw) = (sw); }
++#endif
+ 
+ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ 		   const char *strtab,
+@@ -89,8 +98,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ 	uint32_t value;
+ 	int align;
+ 
+-	DEBUGP("Applying relocate section %u to %u\n", relsec,
+-	       sechdrs[relsec].sh_info);
++	pr_debug("Applying relocate section %u to %u\n", relsec,
++		 sechdrs[relsec].sh_info);
+ 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ 		/* This is where to make the change */
+ 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+@@ -102,17 +111,44 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ 		relocation = sym->st_value + rel[i].r_addend;
+ 		align = (int)location & 3;
+ 
++#ifdef CONFIG_SUPERH64
++		/* For text addresses, bit2 of the st_other field indicates
++		 * whether the symbol is SHmedia (1) or SHcompact (0).  If
++		 * SHmedia, the LSB of the symbol needs to be asserted
++		 * for the CPU to be in SHmedia mode when it starts executing
++		 * the branch target. */
++		relocation |= (sym->st_other & 4);
++#endif
 +
-+}
+ 		switch (ELF32_R_TYPE(rel[i].r_info)) {
+ 		case R_SH_DIR32:
+-	    		COPY_UNALIGNED_WORD (*location, value, align);
++			COPY_UNALIGNED_WORD (*location, value, align);
+ 			value += relocation;
+-	    		COPY_UNALIGNED_WORD (value, *location, align);
++			COPY_UNALIGNED_WORD (value, *location, align);
+ 			break;
+ 		case R_SH_REL32:
+-	  		relocation = (relocation - (Elf32_Addr) location);
+-	    		COPY_UNALIGNED_WORD (*location, value, align);
++			relocation = (relocation - (Elf32_Addr) location);
++			COPY_UNALIGNED_WORD (*location, value, align);
+ 			value += relocation;
+-	    		COPY_UNALIGNED_WORD (value, *location, align);
++			COPY_UNALIGNED_WORD (value, *location, align);
++			break;
++		case R_SH_IMM_LOW16:
++			*location = (*location & ~0x3fffc00) |
++				((relocation & 0xffff) << 10);
++			break;
++		case R_SH_IMM_MEDLOW16:
++			*location = (*location & ~0x3fffc00) |
++				(((relocation >> 16) & 0xffff) << 10);
++			break;
++		case R_SH_IMM_LOW16_PCREL:
++			relocation -= (Elf32_Addr) location;
++			*location = (*location & ~0x3fffc00) |
++				((relocation & 0xffff) << 10);
++			break;
++		case R_SH_IMM_MEDLOW16_PCREL:
++			relocation -= (Elf32_Addr) location;
++			*location = (*location & ~0x3fffc00) |
++				(((relocation >> 16) & 0xffff) << 10);
+ 			break;
+ 		default:
+ 			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
+deleted file mode 100644
+index 6d7f2b0..0000000
+--- a/arch/sh/kernel/process.c
++++ /dev/null
+@@ -1,558 +0,0 @@
+-/*
+- * arch/sh/kernel/process.c
+- *
+- * This file handles the architecture-dependent parts of process handling..
+- *
+- *  Copyright (C) 1995  Linus Torvalds
+- *
+- *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+- *		     Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
+- *		     Copyright (C) 2002 - 2007  Paul Mundt
+- */
+-#include <linux/module.h>
+-#include <linux/mm.h>
+-#include <linux/elfcore.h>
+-#include <linux/pm.h>
+-#include <linux/kallsyms.h>
+-#include <linux/kexec.h>
+-#include <linux/kdebug.h>
+-#include <linux/tick.h>
+-#include <linux/reboot.h>
+-#include <linux/fs.h>
+-#include <linux/preempt.h>
+-#include <asm/uaccess.h>
+-#include <asm/mmu_context.h>
+-#include <asm/pgalloc.h>
+-#include <asm/system.h>
+-#include <asm/ubc.h>
+-
+-static int hlt_counter;
+-int ubc_usercnt = 0;
+-
+-void (*pm_idle)(void);
+-void (*pm_power_off)(void);
+-EXPORT_SYMBOL(pm_power_off);
+-
+-void disable_hlt(void)
+-{
+-	hlt_counter++;
+-}
+-EXPORT_SYMBOL(disable_hlt);
+-
+-void enable_hlt(void)
+-{
+-	hlt_counter--;
+-}
+-EXPORT_SYMBOL(enable_hlt);
+-
+-static int __init nohlt_setup(char *__unused)
+-{
+-	hlt_counter = 1;
+-	return 1;
+-}
+-__setup("nohlt", nohlt_setup);
+-
+-static int __init hlt_setup(char *__unused)
+-{
+-	hlt_counter = 0;
+-	return 1;
+-}
+-__setup("hlt", hlt_setup);
+-
+-void default_idle(void)
+-{
+-	if (!hlt_counter) {
+-		clear_thread_flag(TIF_POLLING_NRFLAG);
+-		smp_mb__after_clear_bit();
+-		set_bl_bit();
+-		while (!need_resched())
+-			cpu_sleep();
+-		clear_bl_bit();
+-		set_thread_flag(TIF_POLLING_NRFLAG);
+-	} else
+-		while (!need_resched())
+-			cpu_relax();
+-}
+-
+-void cpu_idle(void)
+-{
+-	set_thread_flag(TIF_POLLING_NRFLAG);
+-
+-	/* endless idle loop with no priority at all */
+-	while (1) {
+-		void (*idle)(void) = pm_idle;
+-
+-		if (!idle)
+-			idle = default_idle;
+-
+-		tick_nohz_stop_sched_tick();
+-		while (!need_resched())
+-			idle();
+-		tick_nohz_restart_sched_tick();
+-
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
+-		check_pgt_cache();
+-	}
+-}
+-
+-void machine_restart(char * __unused)
+-{
+-	/* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
+-	asm volatile("ldc %0, sr\n\t"
+-		     "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
+-}
+-
+-void machine_halt(void)
+-{
+-	local_irq_disable();
+-
+-	while (1)
+-		cpu_sleep();
+-}
+-
+-void machine_power_off(void)
+-{
+-	if (pm_power_off)
+-		pm_power_off();
+-}
+-
+-void show_regs(struct pt_regs * regs)
+-{
+-	printk("\n");
+-	printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm);
+-	print_symbol("PC is at %s\n", instruction_pointer(regs));
+-	printk("PC  : %08lx SP  : %08lx SR  : %08lx ",
+-	       regs->pc, regs->regs[15], regs->sr);
+-#ifdef CONFIG_MMU
+-	printk("TEA : %08x    ", ctrl_inl(MMU_TEA));
+-#else
+-	printk("                  ");
+-#endif
+-	printk("%s\n", print_tainted());
+-
+-	printk("R0  : %08lx R1  : %08lx R2  : %08lx R3  : %08lx\n",
+-	       regs->regs[0],regs->regs[1],
+-	       regs->regs[2],regs->regs[3]);
+-	printk("R4  : %08lx R5  : %08lx R6  : %08lx R7  : %08lx\n",
+-	       regs->regs[4],regs->regs[5],
+-	       regs->regs[6],regs->regs[7]);
+-	printk("R8  : %08lx R9  : %08lx R10 : %08lx R11 : %08lx\n",
+-	       regs->regs[8],regs->regs[9],
+-	       regs->regs[10],regs->regs[11]);
+-	printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
+-	       regs->regs[12],regs->regs[13],
+-	       regs->regs[14]);
+-	printk("MACH: %08lx MACL: %08lx GBR : %08lx PR  : %08lx\n",
+-	       regs->mach, regs->macl, regs->gbr, regs->pr);
+-
+-	show_trace(NULL, (unsigned long *)regs->regs[15], regs);
+-}
+-
+-/*
+- * Create a kernel thread
+- */
+-
+-/*
+- * This is the mechanism for creating a new kernel thread.
+- *
+- */
+-extern void kernel_thread_helper(void);
+-__asm__(".align 5\n"
+-	"kernel_thread_helper:\n\t"
+-	"jsr	@r5\n\t"
+-	" nop\n\t"
+-	"mov.l	1f, r1\n\t"
+-	"jsr	@r1\n\t"
+-	" mov	r0, r4\n\t"
+-	".align 2\n\t"
+-	"1:.long do_exit");
+-
+-/* Don't use this in BL=1(cli).  Or else, CPU resets! */
+-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+-{
+-	struct pt_regs regs;
+-
+-	memset(&regs, 0, sizeof(regs));
+-	regs.regs[4] = (unsigned long)arg;
+-	regs.regs[5] = (unsigned long)fn;
+-
+-	regs.pc = (unsigned long)kernel_thread_helper;
+-	regs.sr = (1 << 30);
+-
+-	/* Ok, create the new process.. */
+-	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+-		       &regs, 0, NULL, NULL);
+-}
+-
+-/*
+- * Free current thread data structures etc..
+- */
+-void exit_thread(void)
+-{
+-	if (current->thread.ubc_pc) {
+-		current->thread.ubc_pc = 0;
+-		ubc_usercnt -= 1;
+-	}
+-}
+-
+-void flush_thread(void)
+-{
+-#if defined(CONFIG_SH_FPU)
+-	struct task_struct *tsk = current;
+-	/* Forget lazy FPU state */
+-	clear_fpu(tsk, task_pt_regs(tsk));
+-	clear_used_math();
+-#endif
+-}
+-
+-void release_thread(struct task_struct *dead_task)
+-{
+-	/* do nothing */
+-}
+-
+-/* Fill in the fpu structure for a core dump.. */
+-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+-{
+-	int fpvalid = 0;
+-
+-#if defined(CONFIG_SH_FPU)
+-	struct task_struct *tsk = current;
+-
+-	fpvalid = !!tsk_used_math(tsk);
+-	if (fpvalid) {
+-		unlazy_fpu(tsk, regs);
+-		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+-	}
+-#endif
+-
+-	return fpvalid;
+-}
+-
+-/*
+- * Capture the user space registers if the task is not running (in user space)
+- */
+-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+-{
+-	struct pt_regs ptregs;
+-
+-	ptregs = *task_pt_regs(tsk);
+-	elf_core_copy_regs(regs, &ptregs);
+-
+-	return 1;
+-}
+-
+-int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
+-{
+-	int fpvalid = 0;
+-
+-#if defined(CONFIG_SH_FPU)
+-	fpvalid = !!tsk_used_math(tsk);
+-	if (fpvalid) {
+-		unlazy_fpu(tsk, task_pt_regs(tsk));
+-		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+-	}
+-#endif
+-
+-	return fpvalid;
+-}
+-
+-asmlinkage void ret_from_fork(void);
+-
+-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+-		unsigned long unused,
+-		struct task_struct *p, struct pt_regs *regs)
+-{
+-	struct thread_info *ti = task_thread_info(p);
+-	struct pt_regs *childregs;
+-#if defined(CONFIG_SH_FPU)
+-	struct task_struct *tsk = current;
+-
+-	unlazy_fpu(tsk, regs);
+-	p->thread.fpu = tsk->thread.fpu;
+-	copy_to_stopped_child_used_math(p);
+-#endif
+-
+-	childregs = task_pt_regs(p);
+-	*childregs = *regs;
+-
+-	if (user_mode(regs)) {
+-		childregs->regs[15] = usp;
+-		ti->addr_limit = USER_DS;
+-	} else {
+-		childregs->regs[15] = (unsigned long)childregs;
+-		ti->addr_limit = KERNEL_DS;
+-	}
+-
+-	if (clone_flags & CLONE_SETTLS)
+-		childregs->gbr = childregs->regs[0];
+-
+-	childregs->regs[0] = 0; /* Set return value for child */
+-
+-	p->thread.sp = (unsigned long) childregs;
+-	p->thread.pc = (unsigned long) ret_from_fork;
+-
+-	p->thread.ubc_pc = 0;
+-
+-	return 0;
+-}
+-
+-/* Tracing by user break controller.  */
+-static void ubc_set_tracing(int asid, unsigned long pc)
+-{
+-#if defined(CONFIG_CPU_SH4A)
+-	unsigned long val;
+-
+-	val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
+-	val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
+-
+-	ctrl_outl(val, UBC_CBR0);
+-	ctrl_outl(pc,  UBC_CAR0);
+-	ctrl_outl(0x0, UBC_CAMR0);
+-	ctrl_outl(0x0, UBC_CBCR);
+-
+-	val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
+-	ctrl_outl(val, UBC_CRR0);
+-
+-	/* Read UBC register that we wrote last, for checking update */
+-	val = ctrl_inl(UBC_CRR0);
+-
+-#else	/* CONFIG_CPU_SH4A */
+-	ctrl_outl(pc, UBC_BARA);
+-
+-#ifdef CONFIG_MMU
+-	ctrl_outb(asid, UBC_BASRA);
+-#endif
+-
+-	ctrl_outl(0, UBC_BAMRA);
+-
+-	if (current_cpu_data.type == CPU_SH7729 ||
+-	    current_cpu_data.type == CPU_SH7710 ||
+-	    current_cpu_data.type == CPU_SH7712) {
+-		ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
+-		ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
+-	} else {
+-		ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
+-		ctrl_outw(BRCR_PCBA, UBC_BRCR);
+-	}
+-#endif	/* CONFIG_CPU_SH4A */
+-}
+-
+-/*
+- *	switch_to(x,y) should switch tasks from x to y.
+- *
+- */
+-struct task_struct *__switch_to(struct task_struct *prev,
+-				struct task_struct *next)
+-{
+-#if defined(CONFIG_SH_FPU)
+-	unlazy_fpu(prev, task_pt_regs(prev));
+-#endif
+-
+-#if defined(CONFIG_GUSA) && defined(CONFIG_PREEMPT)
+-	{
+-		struct pt_regs *regs;
+-
+-		preempt_disable();
+-		regs = task_pt_regs(prev);
+-		if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
+-			int offset = (int)regs->regs[15];
+-
+-			/* Reset stack pointer: clear critical region mark */
+-			regs->regs[15] = regs->regs[1];
+-			if (regs->pc < regs->regs[0])
+-				/* Go to rewind point */
+-				regs->pc = regs->regs[0] + offset;
+-		}
+-		preempt_enable_no_resched();
+-	}
+-#endif
+-
+-#ifdef CONFIG_MMU
+-	/*
+-	 * Restore the kernel mode register
+-	 *	k7 (r7_bank1)
+-	 */
+-	asm volatile("ldc	%0, r7_bank"
+-		     : /* no output */
+-		     : "r" (task_thread_info(next)));
+-#endif
+-
+-	/* If no tasks are using the UBC, we're done */
+-	if (ubc_usercnt == 0)
+-		/* If no tasks are using the UBC, we're done */;
+-	else if (next->thread.ubc_pc && next->mm) {
+-		int asid = 0;
+-#ifdef CONFIG_MMU
+-		asid |= cpu_asid(smp_processor_id(), next->mm);
+-#endif
+-		ubc_set_tracing(asid, next->thread.ubc_pc);
+-	} else {
+-#if defined(CONFIG_CPU_SH4A)
+-		ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
+-		ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
+-#else
+-		ctrl_outw(0, UBC_BBRA);
+-		ctrl_outw(0, UBC_BBRB);
+-#endif
+-	}
+-
+-	return prev;
+-}
+-
+-asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
+-			unsigned long r6, unsigned long r7,
+-			struct pt_regs __regs)
+-{
+-#ifdef CONFIG_MMU
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
+-#else
+-	/* fork almost works, enough to trick you into looking elsewhere :-( */
+-	return -EINVAL;
+-#endif
+-}
+-
+-asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+-			 unsigned long parent_tidptr,
+-			 unsigned long child_tidptr,
+-			 struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	if (!newsp)
+-		newsp = regs->regs[15];
+-	return do_fork(clone_flags, newsp, regs, 0,
+-			(int __user *)parent_tidptr,
+-			(int __user *)child_tidptr);
+-}
+-
+-/*
+- * This is trivial, and on the face of it looks like it
+- * could equally well be done in user mode.
+- *
+- * Not so, for quite unobvious reasons - register pressure.
+- * In user mode vfork() cannot have a stack frame, and if
+- * done by calling the "clone()" system call directly, you
+- * do not have enough call-clobbered registers to hold all
+- * the information you need.
+- */
+-asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
+-			 unsigned long r6, unsigned long r7,
+-			 struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
+-		       0, NULL, NULL);
+-}
+-
+-/*
+- * sys_execve() executes a new program.
+- */
+-asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
+-			  char __user * __user *uenvp, unsigned long r7,
+-			  struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	int error;
+-	char *filename;
+-
+-	filename = getname(ufilename);
+-	error = PTR_ERR(filename);
+-	if (IS_ERR(filename))
+-		goto out;
+-
+-	error = do_execve(filename, uargv, uenvp, regs);
+-	if (error == 0) {
+-		task_lock(current);
+-		current->ptrace &= ~PT_DTRACE;
+-		task_unlock(current);
+-	}
+-	putname(filename);
+-out:
+-	return error;
+-}
+-
+-unsigned long get_wchan(struct task_struct *p)
+-{
+-	unsigned long pc;
+-
+-	if (!p || p == current || p->state == TASK_RUNNING)
+-		return 0;
+-
+-	/*
+-	 * The same comment as on the Alpha applies here, too ...
+-	 */
+-	pc = thread_saved_pc(p);
+-
+-#ifdef CONFIG_FRAME_POINTER
+-	if (in_sched_functions(pc)) {
+-		unsigned long schedule_frame = (unsigned long)p->thread.sp;
+-		return ((unsigned long *)schedule_frame)[21];
+-	}
+-#endif
+-
+-	return pc;
+-}
+-
+-asmlinkage void break_point_trap(void)
+-{
+-	/* Clear tracing.  */
+-#if defined(CONFIG_CPU_SH4A)
+-	ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
+-	ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
+-#else
+-	ctrl_outw(0, UBC_BBRA);
+-	ctrl_outw(0, UBC_BBRB);
+-#endif
+-	current->thread.ubc_pc = 0;
+-	ubc_usercnt -= 1;
+-
+-	force_sig(SIGTRAP, current);
+-}
+-
+-/*
+- * Generic trap handler.
+- */
+-asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
+-				   unsigned long r6, unsigned long r7,
+-				   struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-
+-	/* Rewind */
+-	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+-
+-	if (notify_die(DIE_TRAP, "debug trap", regs, 0, regs->tra & 0xff,
+-		       SIGTRAP) == NOTIFY_STOP)
+-		return;
+-
+-	force_sig(SIGTRAP, current);
+-}
+-
+-/*
+- * Special handler for BUG() traps.
+- */
+-asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
+-				 unsigned long r6, unsigned long r7,
+-				 struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-
+-	/* Rewind */
+-	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+-
+-	if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
+-		       SIGTRAP) == NOTIFY_STOP)
+-		return;
+-
+-#ifdef CONFIG_BUG
+-	if (__kernel_text_address(instruction_pointer(regs))) {
+-		u16 insn = *(u16 *)instruction_pointer(regs);
+-		if (insn == TRAPA_BUG_OPCODE)
+-			handle_BUG(regs);
+-	}
+-#endif
+-
+-	force_sig(SIGTRAP, current);
+-}
+diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
+new file mode 100644
+index 0000000..9ab1926
+--- /dev/null
++++ b/arch/sh/kernel/process_32.c
+@@ -0,0 +1,465 @@
++/*
++ * arch/sh/kernel/process.c
++ *
++ * This file handles the architecture-dependent parts of process handling..
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
++ *		     Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
++ *		     Copyright (C) 2002 - 2007  Paul Mundt
++ */
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/pm.h>
++#include <linux/kallsyms.h>
++#include <linux/kexec.h>
++#include <linux/kdebug.h>
++#include <linux/tick.h>
++#include <linux/reboot.h>
++#include <linux/fs.h>
++#include <linux/preempt.h>
++#include <asm/uaccess.h>
++#include <asm/mmu_context.h>
++#include <asm/pgalloc.h>
++#include <asm/system.h>
++#include <asm/ubc.h>
 +
-+static float64 subFloat64Sigs(float64 a, float64 b, flag zSign)
-+{
-+	int16 aExp, bExp, zExp;
-+	bits64 aSig, bSig, zSig;
-+	int16 expDiff;
++static int hlt_counter;
++int ubc_usercnt = 0;
 +
-+	aSig = extractFloat64Frac(a);
-+	aExp = extractFloat64Exp(a);
-+	bSig = extractFloat64Frac(b);
-+	bExp = extractFloat64Exp(b);
-+	expDiff = aExp - bExp;
-+	aSig <<= 10;
-+	bSig <<= 10;
-+	if (0 < expDiff)
-+		goto aExpBigger;
-+	if (expDiff < 0)
-+		goto bExpBigger;
-+	if (aExp == 0) {
-+		aExp = 1;
-+		bExp = 1;
-+	}
-+	if (bSig < aSig)
-+		goto aBigger;
-+	if (aSig < bSig)
-+		goto bBigger;
-+	return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
-+      bExpBigger:
-+	if (bExp == 0x7FF) {
-+		return packFloat64(zSign ^ 1, 0x7FF, 0);
-+	}
-+	if (aExp == 0) {
-+		++expDiff;
-+	} else {
-+		aSig |= LIT64(0x4000000000000000);
-+	}
-+	shift64RightJamming(aSig, -expDiff, &aSig);
-+	bSig |= LIT64(0x4000000000000000);
-+      bBigger:
-+	zSig = bSig - aSig;
-+	zExp = bExp;
-+	zSign ^= 1;
-+	goto normalizeRoundAndPack;
-+      aExpBigger:
-+	if (aExp == 0x7FF) {
-+		return a;
-+	}
-+	if (bExp == 0) {
-+		--expDiff;
-+	} else {
-+		bSig |= LIT64(0x4000000000000000);
-+	}
-+	shift64RightJamming(bSig, expDiff, &bSig);
-+	aSig |= LIT64(0x4000000000000000);
-+      aBigger:
-+	zSig = aSig - bSig;
-+	zExp = aExp;
-+      normalizeRoundAndPack:
-+	--zExp;
-+	return normalizeRoundAndPackFloat64(zSign, zExp, zSig);
++void (*pm_idle)(void);
++void (*pm_power_off)(void);
++EXPORT_SYMBOL(pm_power_off);
 +
-+}
-+static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
++void disable_hlt(void)
 +{
-+	int16 aExp, bExp, zExp;
-+	bits64 aSig, bSig, zSig;
-+	int16 expDiff;
-+
-+	aSig = extractFloat64Frac(a);
-+	aExp = extractFloat64Exp(a);
-+	bSig = extractFloat64Frac(b);
-+	bExp = extractFloat64Exp(b);
-+	expDiff = aExp - bExp;
-+	aSig <<= 9;
-+	bSig <<= 9;
-+	if (0 < expDiff) {
-+		if (aExp == 0x7FF) {
-+			return a;
-+		}
-+		if (bExp == 0) {
-+			--expDiff;
-+		} else {
-+			bSig |= LIT64(0x2000000000000000);
-+		}
-+		shift64RightJamming(bSig, expDiff, &bSig);
-+		zExp = aExp;
-+	} else if (expDiff < 0) {
-+		if (bExp == 0x7FF) {
-+			return packFloat64(zSign, 0x7FF, 0);
-+		}
-+		if (aExp == 0) {
-+			++expDiff;
-+		} else {
-+			aSig |= LIT64(0x2000000000000000);
-+		}
-+		shift64RightJamming(aSig, -expDiff, &aSig);
-+		zExp = bExp;
-+	} else {
-+		if (aExp == 0x7FF) {
-+			return a;
-+		}
-+		if (aExp == 0)
-+			return packFloat64(zSign, 0, (aSig + bSig) >> 9);
-+		zSig = LIT64(0x4000000000000000) + aSig + bSig;
-+		zExp = aExp;
-+		goto roundAndPack;
-+	}
-+	aSig |= LIT64(0x2000000000000000);
-+	zSig = (aSig + bSig) << 1;
-+	--zExp;
-+	if ((sbits64) zSig < 0) {
-+		zSig = aSig + bSig;
-+		++zExp;
-+	}
-+      roundAndPack:
-+	return roundAndPackFloat64(zSign, zExp, zSig);
-+
++	hlt_counter++;
 +}
++EXPORT_SYMBOL(disable_hlt);
 +
-+inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
++void enable_hlt(void)
 +{
-+	return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
++	hlt_counter--;
 +}
++EXPORT_SYMBOL(enable_hlt);
 +
-+inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
++static int __init nohlt_setup(char *__unused)
 +{
-+	bits32 z;
-+	if (count == 0) {
-+		z = a;
-+	} else if (count < 32) {
-+		z = (a >> count) | ((a << ((-count) & 31)) != 0);
-+	} else {
-+		z = (a != 0);
-+	}
-+	*zPtr = z;
++	hlt_counter = 1;
++	return 1;
 +}
++__setup("nohlt", nohlt_setup);
 +
-+static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
++static int __init hlt_setup(char *__unused)
 +{
-+	flag roundNearestEven;
-+	int8 roundIncrement, roundBits;
-+	flag isTiny;
-+
-+	/* SH4 has only 2 rounding modes - round to nearest and round to zero */
-+	roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
-+	roundIncrement = 0x40;
-+	if (!roundNearestEven) {
-+		roundIncrement = 0;
-+	}
-+	roundBits = zSig & 0x7F;
-+	if (0xFD <= (bits16) zExp) {
-+		if ((0xFD < zExp)
-+		    || ((zExp == 0xFD)
-+			&& ((sbits32) (zSig + roundIncrement) < 0))
-+		    ) {
-+			float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
-+			return packFloat32(zSign, 0xFF,
-+					   0) - (roundIncrement == 0);
-+		}
-+		if (zExp < 0) {
-+			isTiny = (zExp < -1)
-+			    || (zSig + roundIncrement < 0x80000000);
-+			shift32RightJamming(zSig, -zExp, &zSig);
-+			zExp = 0;
-+			roundBits = zSig & 0x7F;
-+			if (isTiny && roundBits)
-+				float_raise(FPSCR_CAUSE_UNDERFLOW);
-+		}
-+	}
-+	if (roundBits)
-+		float_raise(FPSCR_CAUSE_INEXACT);
-+	zSig = (zSig + roundIncrement) >> 7;
-+	zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven);
-+	if (zSig == 0)
-+		zExp = 0;
-+	return packFloat32(zSign, zExp, zSig);
-+
++	hlt_counter = 0;
++	return 1;
 +}
++__setup("hlt", hlt_setup);
 +
-+static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
++void default_idle(void)
 +{
-+	int8 shiftCount;
-+
-+	shiftCount = countLeadingZeros32(zSig) - 1;
-+	return roundAndPackFloat32(zSign, zExp - shiftCount,
-+				   zSig << shiftCount);
++	if (!hlt_counter) {
++		clear_thread_flag(TIF_POLLING_NRFLAG);
++		smp_mb__after_clear_bit();
++		set_bl_bit();
++		while (!need_resched())
++			cpu_sleep();
++		clear_bl_bit();
++		set_thread_flag(TIF_POLLING_NRFLAG);
++	} else
++		while (!need_resched())
++			cpu_relax();
 +}
 +
-+static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
++void cpu_idle(void)
 +{
-+	flag roundNearestEven;
-+	int16 roundIncrement, roundBits;
-+	flag isTiny;
++	set_thread_flag(TIF_POLLING_NRFLAG);
 +
-+	/* SH4 has only 2 rounding modes - round to nearest and round to zero */
-+	roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
-+	roundIncrement = 0x200;
-+	if (!roundNearestEven) {
-+		roundIncrement = 0;
-+	}
-+	roundBits = zSig & 0x3FF;
-+	if (0x7FD <= (bits16) zExp) {
-+		if ((0x7FD < zExp)
-+		    || ((zExp == 0x7FD)
-+			&& ((sbits64) (zSig + roundIncrement) < 0))
-+		    ) {
-+			float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
-+			return packFloat64(zSign, 0x7FF,
-+					   0) - (roundIncrement == 0);
-+		}
-+		if (zExp < 0) {
-+			isTiny = (zExp < -1)
-+			    || (zSig + roundIncrement <
-+				LIT64(0x8000000000000000));
-+			shift64RightJamming(zSig, -zExp, &zSig);
-+			zExp = 0;
-+			roundBits = zSig & 0x3FF;
-+			if (isTiny && roundBits)
-+				float_raise(FPSCR_CAUSE_UNDERFLOW);
-+		}
-+	}
-+	if (roundBits)
-+		float_raise(FPSCR_CAUSE_INEXACT);
-+	zSig = (zSig + roundIncrement) >> 10;
-+	zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven);
-+	if (zSig == 0)
-+		zExp = 0;
-+	return packFloat64(zSign, zExp, zSig);
++	/* endless idle loop with no priority at all */
++	while (1) {
++		void (*idle)(void) = pm_idle;
 +
-+}
++		if (!idle)
++			idle = default_idle;
 +
-+static float32 subFloat32Sigs(float32 a, float32 b, flag zSign)
-+{
-+	int16 aExp, bExp, zExp;
-+	bits32 aSig, bSig, zSig;
-+	int16 expDiff;
++		tick_nohz_stop_sched_tick();
++		while (!need_resched())
++			idle();
++		tick_nohz_restart_sched_tick();
 +
-+	aSig = extractFloat32Frac(a);
-+	aExp = extractFloat32Exp(a);
-+	bSig = extractFloat32Frac(b);
-+	bExp = extractFloat32Exp(b);
-+	expDiff = aExp - bExp;
-+	aSig <<= 7;
-+	bSig <<= 7;
-+	if (0 < expDiff)
-+		goto aExpBigger;
-+	if (expDiff < 0)
-+		goto bExpBigger;
-+	if (aExp == 0) {
-+		aExp = 1;
-+		bExp = 1;
-+	}
-+	if (bSig < aSig)
-+		goto aBigger;
-+	if (aSig < bSig)
-+		goto bBigger;
-+	return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
-+      bExpBigger:
-+	if (bExp == 0xFF) {
-+		return packFloat32(zSign ^ 1, 0xFF, 0);
-+	}
-+	if (aExp == 0) {
-+		++expDiff;
-+	} else {
-+		aSig |= 0x40000000;
-+	}
-+	shift32RightJamming(aSig, -expDiff, &aSig);
-+	bSig |= 0x40000000;
-+      bBigger:
-+	zSig = bSig - aSig;
-+	zExp = bExp;
-+	zSign ^= 1;
-+	goto normalizeRoundAndPack;
-+      aExpBigger:
-+	if (aExp == 0xFF) {
-+		return a;
-+	}
-+	if (bExp == 0) {
-+		--expDiff;
-+	} else {
-+		bSig |= 0x40000000;
++		preempt_enable_no_resched();
++		schedule();
++		preempt_disable();
++		check_pgt_cache();
 +	}
-+	shift32RightJamming(bSig, expDiff, &bSig);
-+	aSig |= 0x40000000;
-+      aBigger:
-+	zSig = aSig - bSig;
-+	zExp = aExp;
-+      normalizeRoundAndPack:
-+	--zExp;
-+	return normalizeRoundAndPackFloat32(zSign, zExp, zSig);
-+
 +}
 +
-+static float32 addFloat32Sigs(float32 a, float32 b, flag zSign)
++void machine_restart(char * __unused)
 +{
-+	int16 aExp, bExp, zExp;
-+	bits32 aSig, bSig, zSig;
-+	int16 expDiff;
-+
-+	aSig = extractFloat32Frac(a);
-+	aExp = extractFloat32Exp(a);
-+	bSig = extractFloat32Frac(b);
-+	bExp = extractFloat32Exp(b);
-+	expDiff = aExp - bExp;
-+	aSig <<= 6;
-+	bSig <<= 6;
-+	if (0 < expDiff) {
-+		if (aExp == 0xFF) {
-+			return a;
-+		}
-+		if (bExp == 0) {
-+			--expDiff;
-+		} else {
-+			bSig |= 0x20000000;
-+		}
-+		shift32RightJamming(bSig, expDiff, &bSig);
-+		zExp = aExp;
-+	} else if (expDiff < 0) {
-+		if (bExp == 0xFF) {
-+			return packFloat32(zSign, 0xFF, 0);
-+		}
-+		if (aExp == 0) {
-+			++expDiff;
-+		} else {
-+			aSig |= 0x20000000;
-+		}
-+		shift32RightJamming(aSig, -expDiff, &aSig);
-+		zExp = bExp;
-+	} else {
-+		if (aExp == 0xFF) {
-+			return a;
-+		}
-+		if (aExp == 0)
-+			return packFloat32(zSign, 0, (aSig + bSig) >> 6);
-+		zSig = 0x40000000 + aSig + bSig;
-+		zExp = aExp;
-+		goto roundAndPack;
-+	}
-+	aSig |= 0x20000000;
-+	zSig = (aSig + bSig) << 1;
-+	--zExp;
-+	if ((sbits32) zSig < 0) {
-+		zSig = aSig + bSig;
-+		++zExp;
-+	}
-+      roundAndPack:
-+	return roundAndPackFloat32(zSign, zExp, zSig);
-+
++	/* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
++	asm volatile("ldc %0, sr\n\t"
++		     "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
 +}
 +
-+float64 float64_sub(float64 a, float64 b)
++void machine_halt(void)
 +{
-+	flag aSign, bSign;
++	local_irq_disable();
 +
-+	aSign = extractFloat64Sign(a);
-+	bSign = extractFloat64Sign(b);
-+	if (aSign == bSign) {
-+		return subFloat64Sigs(a, b, aSign);
-+	} else {
-+		return addFloat64Sigs(a, b, aSign);
-+	}
++	while (1)
++		cpu_sleep();
++}
 +
++void machine_power_off(void)
++{
++	if (pm_power_off)
++		pm_power_off();
 +}
 +
-+float32 float32_sub(float32 a, float32 b)
++void show_regs(struct pt_regs * regs)
 +{
-+	flag aSign, bSign;
++	printk("\n");
++	printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm);
++	print_symbol("PC is at %s\n", instruction_pointer(regs));
++	printk("PC  : %08lx SP  : %08lx SR  : %08lx ",
++	       regs->pc, regs->regs[15], regs->sr);
++#ifdef CONFIG_MMU
++	printk("TEA : %08x    ", ctrl_inl(MMU_TEA));
++#else
++	printk("                  ");
++#endif
++	printk("%s\n", print_tainted());
 +
-+	aSign = extractFloat32Sign(a);
-+	bSign = extractFloat32Sign(b);
-+	if (aSign == bSign) {
-+		return subFloat32Sigs(a, b, aSign);
-+	} else {
-+		return addFloat32Sigs(a, b, aSign);
-+	}
++	printk("R0  : %08lx R1  : %08lx R2  : %08lx R3  : %08lx\n",
++	       regs->regs[0],regs->regs[1],
++	       regs->regs[2],regs->regs[3]);
++	printk("R4  : %08lx R5  : %08lx R6  : %08lx R7  : %08lx\n",
++	       regs->regs[4],regs->regs[5],
++	       regs->regs[6],regs->regs[7]);
++	printk("R8  : %08lx R9  : %08lx R10 : %08lx R11 : %08lx\n",
++	       regs->regs[8],regs->regs[9],
++	       regs->regs[10],regs->regs[11]);
++	printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
++	       regs->regs[12],regs->regs[13],
++	       regs->regs[14]);
++	printk("MACH: %08lx MACL: %08lx GBR : %08lx PR  : %08lx\n",
++	       regs->mach, regs->macl, regs->gbr, regs->pr);
 +
++	show_trace(NULL, (unsigned long *)regs->regs[15], regs);
 +}
 +
-+float32 float32_add(float32 a, float32 b)
++/*
++ * Create a kernel thread
++ */
++
++/*
++ * This is the mechanism for creating a new kernel thread.
++ *
++ */
++extern void kernel_thread_helper(void);
++__asm__(".align 5\n"
++	"kernel_thread_helper:\n\t"
++	"jsr	@r5\n\t"
++	" nop\n\t"
++	"mov.l	1f, r1\n\t"
++	"jsr	@r1\n\t"
++	" mov	r0, r4\n\t"
++	".align 2\n\t"
++	"1:.long do_exit");
++
++/* Don't use this in BL=1(cli).  Or else, CPU resets! */
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 +{
-+	flag aSign, bSign;
++	struct pt_regs regs;
 +
-+	aSign = extractFloat32Sign(a);
-+	bSign = extractFloat32Sign(b);
-+	if (aSign == bSign) {
-+		return addFloat32Sigs(a, b, aSign);
-+	} else {
-+		return subFloat32Sigs(a, b, aSign);
-+	}
++	memset(&regs, 0, sizeof(regs));
++	regs.regs[4] = (unsigned long)arg;
++	regs.regs[5] = (unsigned long)fn;
++
++	regs.pc = (unsigned long)kernel_thread_helper;
++	regs.sr = (1 << 30);
 +
++	/* Ok, create the new process.. */
++	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
++		       &regs, 0, NULL, NULL);
 +}
 +
-+float64 float64_add(float64 a, float64 b)
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
 +{
-+	flag aSign, bSign;
-+
-+	aSign = extractFloat64Sign(a);
-+	bSign = extractFloat64Sign(b);
-+	if (aSign == bSign) {
-+		return addFloat64Sigs(a, b, aSign);
-+	} else {
-+		return subFloat64Sigs(a, b, aSign);
++	if (current->thread.ubc_pc) {
++		current->thread.ubc_pc = 0;
++		ubc_usercnt -= 1;
 +	}
 +}
 +
-+static void
-+normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
++void flush_thread(void)
 +{
-+	int8 shiftCount;
-+
-+	shiftCount = countLeadingZeros64(aSig) - 11;
-+	*zSigPtr = aSig << shiftCount;
-+	*zExpPtr = 1 - shiftCount;
++#if defined(CONFIG_SH_FPU)
++	struct task_struct *tsk = current;
++	/* Forget lazy FPU state */
++	clear_fpu(tsk, task_pt_regs(tsk));
++	clear_used_math();
++#endif
 +}
 +
-+inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
-+		   bits64 * z1Ptr)
++void release_thread(struct task_struct *dead_task)
 +{
-+	bits64 z1;
-+
-+	z1 = a1 + b1;
-+	*z1Ptr = z1;
-+	*z0Ptr = a0 + b0 + (z1 < a1);
++	/* do nothing */
 +}
 +
-+inline void
-+sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
-+       bits64 * z1Ptr)
++/* Fill in the fpu structure for a core dump.. */
++int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
 +{
-+	*z1Ptr = a1 - b1;
-+	*z0Ptr = a0 - b0 - (a1 < b1);
-+}
++	int fpvalid = 0;
 +
-+static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
-+{
-+	bits64 b0, b1;
-+	bits64 rem0, rem1, term0, term1;
-+	bits64 z;
-+	if (b <= a0)
-+		return LIT64(0xFFFFFFFFFFFFFFFF);
-+	b0 = b >> 32;
-+	z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32;
-+	mul64To128(b, z, &term0, &term1);
-+	sub128(a0, a1, term0, term1, &rem0, &rem1);
-+	while (((sbits64) rem0) < 0) {
-+		z -= LIT64(0x100000000);
-+		b1 = b << 32;
-+		add128(rem0, rem1, b0, b1, &rem0, &rem1);
++#if defined(CONFIG_SH_FPU)
++	struct task_struct *tsk = current;
++
++	fpvalid = !!tsk_used_math(tsk);
++	if (fpvalid) {
++		unlazy_fpu(tsk, regs);
++		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
 +	}
-+	rem0 = (rem0 << 32) | (rem1 >> 32);
-+	z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0;
-+	return z;
++#endif
++
++	return fpvalid;
 +}
 +
-+inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
++asmlinkage void ret_from_fork(void);
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
++		unsigned long unused,
++		struct task_struct *p, struct pt_regs *regs)
 +{
-+	bits32 aHigh, aLow, bHigh, bLow;
-+	bits64 z0, zMiddleA, zMiddleB, z1;
++	struct thread_info *ti = task_thread_info(p);
++	struct pt_regs *childregs;
++#if defined(CONFIG_SH_FPU)
++	struct task_struct *tsk = current;
 +
-+	aLow = a;
-+	aHigh = a >> 32;
-+	bLow = b;
-+	bHigh = b >> 32;
-+	z1 = ((bits64) aLow) * bLow;
-+	zMiddleA = ((bits64) aLow) * bHigh;
-+	zMiddleB = ((bits64) aHigh) * bLow;
-+	z0 = ((bits64) aHigh) * bHigh;
-+	zMiddleA += zMiddleB;
-+	z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32);
-+	zMiddleA <<= 32;
-+	z1 += zMiddleA;
-+	z0 += (z1 < zMiddleA);
-+	*z1Ptr = z1;
-+	*z0Ptr = z0;
++	unlazy_fpu(tsk, regs);
++	p->thread.fpu = tsk->thread.fpu;
++	copy_to_stopped_child_used_math(p);
++#endif
++
++	childregs = task_pt_regs(p);
++	*childregs = *regs;
++
++	if (user_mode(regs)) {
++		childregs->regs[15] = usp;
++		ti->addr_limit = USER_DS;
++	} else {
++		childregs->regs[15] = (unsigned long)childregs;
++		ti->addr_limit = KERNEL_DS;
++	}
++
++	if (clone_flags & CLONE_SETTLS)
++		childregs->gbr = childregs->regs[0];
++
++	childregs->regs[0] = 0; /* Set return value for child */
++
++	p->thread.sp = (unsigned long) childregs;
++	p->thread.pc = (unsigned long) ret_from_fork;
++
++	p->thread.ubc_pc = 0;
 +
++	return 0;
 +}
 +
-+static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
-+				      bits32 * zSigPtr)
++/* Tracing by user break controller.  */
++static void ubc_set_tracing(int asid, unsigned long pc)
 +{
-+	int8 shiftCount;
++#if defined(CONFIG_CPU_SH4A)
++	unsigned long val;
 +
-+	shiftCount = countLeadingZeros32(aSig) - 8;
-+	*zSigPtr = aSig << shiftCount;
-+	*zExpPtr = 1 - shiftCount;
++	val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
++	val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
++
++	ctrl_outl(val, UBC_CBR0);
++	ctrl_outl(pc,  UBC_CAR0);
++	ctrl_outl(0x0, UBC_CAMR0);
++	ctrl_outl(0x0, UBC_CBCR);
++
++	val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
++	ctrl_outl(val, UBC_CRR0);
++
++	/* Read UBC register that we wrote last, for checking update */
++	val = ctrl_inl(UBC_CRR0);
++
++#else	/* CONFIG_CPU_SH4A */
++	ctrl_outl(pc, UBC_BARA);
++
++#ifdef CONFIG_MMU
++	ctrl_outb(asid, UBC_BASRA);
++#endif
 +
++	ctrl_outl(0, UBC_BAMRA);
++
++	if (current_cpu_data.type == CPU_SH7729 ||
++	    current_cpu_data.type == CPU_SH7710 ||
++	    current_cpu_data.type == CPU_SH7712) {
++		ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
++		ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
++	} else {
++		ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
++		ctrl_outw(BRCR_PCBA, UBC_BRCR);
++	}
++#endif	/* CONFIG_CPU_SH4A */
 +}
 +
-+float64 float64_div(float64 a, float64 b)
++/*
++ *	switch_to(x,y) should switch tasks from x to y.
++ *
++ */
++struct task_struct *__switch_to(struct task_struct *prev,
++				struct task_struct *next)
 +{
-+	flag aSign, bSign, zSign;
-+	int16 aExp, bExp, zExp;
-+	bits64 aSig, bSig, zSig;
-+	bits64 rem0, rem1;
-+	bits64 term0, term1;
++#if defined(CONFIG_SH_FPU)
++	unlazy_fpu(prev, task_pt_regs(prev));
++#endif
 +
-+	aSig = extractFloat64Frac(a);
-+	aExp = extractFloat64Exp(a);
-+	aSign = extractFloat64Sign(a);
-+	bSig = extractFloat64Frac(b);
-+	bExp = extractFloat64Exp(b);
-+	bSign = extractFloat64Sign(b);
-+	zSign = aSign ^ bSign;
-+	if (aExp == 0x7FF) {
-+		if (bExp == 0x7FF) {
-+		}
-+		return packFloat64(zSign, 0x7FF, 0);
-+	}
-+	if (bExp == 0x7FF) {
-+		return packFloat64(zSign, 0, 0);
-+	}
-+	if (bExp == 0) {
-+		if (bSig == 0) {
-+			if ((aExp | aSig) == 0) {
-+				float_raise(FPSCR_CAUSE_INVALID);
-+			}
-+			return packFloat64(zSign, 0x7FF, 0);
-+		}
-+		normalizeFloat64Subnormal(bSig, &bExp, &bSig);
-+	}
-+	if (aExp == 0) {
-+		if (aSig == 0)
-+			return packFloat64(zSign, 0, 0);
-+		normalizeFloat64Subnormal(aSig, &aExp, &aSig);
-+	}
-+	zExp = aExp - bExp + 0x3FD;
-+	aSig = (aSig | LIT64(0x0010000000000000)) << 10;
-+	bSig = (bSig | LIT64(0x0010000000000000)) << 11;
-+	if (bSig <= (aSig + aSig)) {
-+		aSig >>= 1;
-+		++zExp;
-+	}
-+	zSig = estimateDiv128To64(aSig, 0, bSig);
-+	if ((zSig & 0x1FF) <= 2) {
-+		mul64To128(bSig, zSig, &term0, &term1);
-+		sub128(aSig, 0, term0, term1, &rem0, &rem1);
-+		while ((sbits64) rem0 < 0) {
-+			--zSig;
-+			add128(rem0, rem1, 0, bSig, &rem0, &rem1);
-+		}
-+		zSig |= (rem1 != 0);
++#ifdef CONFIG_MMU
++	/*
++	 * Restore the kernel mode register
++	 *	k7 (r7_bank1)
++	 */
++	asm volatile("ldc	%0, r7_bank"
++		     : /* no output */
++		     : "r" (task_thread_info(next)));
++#endif
++
++	/* If no tasks are using the UBC, we're done */
++	if (ubc_usercnt == 0)
++		/* If no tasks are using the UBC, we're done */;
++	else if (next->thread.ubc_pc && next->mm) {
++		int asid = 0;
++#ifdef CONFIG_MMU
++		asid |= cpu_asid(smp_processor_id(), next->mm);
++#endif
++		ubc_set_tracing(asid, next->thread.ubc_pc);
++	} else {
++#if defined(CONFIG_CPU_SH4A)
++		ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
++		ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
++#else
++		ctrl_outw(0, UBC_BBRA);
++		ctrl_outw(0, UBC_BBRB);
++#endif
 +	}
-+	return roundAndPackFloat64(zSign, zExp, zSig);
 +
++	return prev;
 +}
 +
-+float32 float32_div(float32 a, float32 b)
++asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
++			unsigned long r6, unsigned long r7,
++			struct pt_regs __regs)
 +{
-+	flag aSign, bSign, zSign;
-+	int16 aExp, bExp, zExp;
-+	bits32 aSig, bSig, zSig;
++#ifdef CONFIG_MMU
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
++#else
++	/* fork almost works, enough to trick you into looking elsewhere :-( */
++	return -EINVAL;
++#endif
++}
 +
-+	aSig = extractFloat32Frac(a);
-+	aExp = extractFloat32Exp(a);
-+	aSign = extractFloat32Sign(a);
-+	bSig = extractFloat32Frac(b);
-+	bExp = extractFloat32Exp(b);
-+	bSign = extractFloat32Sign(b);
-+	zSign = aSign ^ bSign;
-+	if (aExp == 0xFF) {
-+		if (bExp == 0xFF) {
-+		}
-+		return packFloat32(zSign, 0xFF, 0);
-+	}
-+	if (bExp == 0xFF) {
-+		return packFloat32(zSign, 0, 0);
-+	}
-+	if (bExp == 0) {
-+		if (bSig == 0) {
-+			return packFloat32(zSign, 0xFF, 0);
-+		}
-+		normalizeFloat32Subnormal(bSig, &bExp, &bSig);
-+	}
-+	if (aExp == 0) {
-+		if (aSig == 0)
-+			return packFloat32(zSign, 0, 0);
-+		normalizeFloat32Subnormal(aSig, &aExp, &aSig);
-+	}
-+	zExp = aExp - bExp + 0x7D;
-+	aSig = (aSig | 0x00800000) << 7;
-+	bSig = (bSig | 0x00800000) << 8;
-+	if (bSig <= (aSig + aSig)) {
-+		aSig >>= 1;
-+		++zExp;
-+	}
-+	zSig = (((bits64) aSig) << 32) / bSig;
-+	if ((zSig & 0x3F) == 0) {
-+		zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
-+	}
-+	return roundAndPackFloat32(zSign, zExp, zSig);
++asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
++			 unsigned long parent_tidptr,
++			 unsigned long child_tidptr,
++			 struct pt_regs __regs)
++{
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	if (!newsp)
++		newsp = regs->regs[15];
++	return do_fork(clone_flags, newsp, regs, 0,
++			(int __user *)parent_tidptr,
++			(int __user *)child_tidptr);
++}
 +
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
++			 unsigned long r6, unsigned long r7,
++			 struct pt_regs __regs)
++{
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
++		       0, NULL, NULL);
 +}
 +
-+float32 float32_mul(float32 a, float32 b)
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
++			  char __user * __user *uenvp, unsigned long r7,
++			  struct pt_regs __regs)
 +{
-+	char aSign, bSign, zSign;
-+	int aExp, bExp, zExp;
-+	unsigned int aSig, bSig;
-+	unsigned long long zSig64;
-+	unsigned int zSig;
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	int error;
++	char *filename;
 +
-+	aSig = extractFloat32Frac(a);
-+	aExp = extractFloat32Exp(a);
-+	aSign = extractFloat32Sign(a);
-+	bSig = extractFloat32Frac(b);
-+	bExp = extractFloat32Exp(b);
-+	bSign = extractFloat32Sign(b);
-+	zSign = aSign ^ bSign;
-+	if (aExp == 0) {
-+		if (aSig == 0)
-+			return packFloat32(zSign, 0, 0);
-+		normalizeFloat32Subnormal(aSig, &aExp, &aSig);
-+	}
-+	if (bExp == 0) {
-+		if (bSig == 0)
-+			return packFloat32(zSign, 0, 0);
-+		normalizeFloat32Subnormal(bSig, &bExp, &bSig);
-+	}
-+	if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
-+		return roundAndPackFloat32(zSign, 0xff, 0);
++	filename = getname(ufilename);
++	error = PTR_ERR(filename);
++	if (IS_ERR(filename))
++		goto out;
 +
-+	zExp = aExp + bExp - 0x7F;
-+	aSig = (aSig | 0x00800000) << 7;
-+	bSig = (bSig | 0x00800000) << 8;
-+	shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
-+	zSig = zSig64;
-+	if (0 <= (signed int)(zSig << 1)) {
-+		zSig <<= 1;
-+		--zExp;
++	error = do_execve(filename, uargv, uenvp, regs);
++	if (error == 0) {
++		task_lock(current);
++		current->ptrace &= ~PT_DTRACE;
++		task_unlock(current);
 +	}
-+	return roundAndPackFloat32(zSign, zExp, zSig);
-+
++	putname(filename);
++out:
++	return error;
 +}
 +
-+float64 float64_mul(float64 a, float64 b)
++unsigned long get_wchan(struct task_struct *p)
 +{
-+	char aSign, bSign, zSign;
-+	int aExp, bExp, zExp;
-+	unsigned long long int aSig, bSig, zSig0, zSig1;
++	unsigned long pc;
 +
-+	aSig = extractFloat64Frac(a);
-+	aExp = extractFloat64Exp(a);
-+	aSign = extractFloat64Sign(a);
-+	bSig = extractFloat64Frac(b);
-+	bExp = extractFloat64Exp(b);
-+	bSign = extractFloat64Sign(b);
-+	zSign = aSign ^ bSign;
++	if (!p || p == current || p->state == TASK_RUNNING)
++		return 0;
 +
-+	if (aExp == 0) {
-+		if (aSig == 0)
-+			return packFloat64(zSign, 0, 0);
-+		normalizeFloat64Subnormal(aSig, &aExp, &aSig);
-+	}
-+	if (bExp == 0) {
-+		if (bSig == 0)
-+			return packFloat64(zSign, 0, 0);
-+		normalizeFloat64Subnormal(bSig, &bExp, &bSig);
-+	}
-+	if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
-+		return roundAndPackFloat64(zSign, 0x7ff, 0);
++	/*
++	 * The same comment as on the Alpha applies here, too ...
++	 */
++	pc = thread_saved_pc(p);
 +
-+	zExp = aExp + bExp - 0x3FF;
-+	aSig = (aSig | 0x0010000000000000LL) << 10;
-+	bSig = (bSig | 0x0010000000000000LL) << 11;
-+	mul64To128(aSig, bSig, &zSig0, &zSig1);
-+	zSig0 |= (zSig1 != 0);
-+	if (0 <= (signed long long int)(zSig0 << 1)) {
-+		zSig0 <<= 1;
-+		--zExp;
++#ifdef CONFIG_FRAME_POINTER
++	if (in_sched_functions(pc)) {
++		unsigned long schedule_frame = (unsigned long)p->thread.sp;
++		return ((unsigned long *)schedule_frame)[21];
 +	}
-+	return roundAndPackFloat64(zSign, zExp, zSig0);
++#endif
++
++	return pc;
 +}
-diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
-index b22a78c..3008c00 100644
---- a/arch/sh/kernel/cpu/sh4/sq.c
-+++ b/arch/sh/kernel/cpu/sh4/sq.c
-@@ -341,17 +341,18 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev)
- {
- 	unsigned int cpu = sysdev->id;
- 	struct kobject *kobj;
-+	int error;
- 
- 	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
- 	if (unlikely(!sq_kobject[cpu]))
- 		return -ENOMEM;
- 
- 	kobj = sq_kobject[cpu];
--	kobj->parent = &sysdev->kobj;
--	kobject_set_name(kobj, "%s", "sq");
--	kobj->ktype = &ktype_percpu_entry;
--
--	return kobject_register(kobj);
-+	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
-+				     "%s", "sq");
-+	if (!error)
-+		kobject_uevent(kobj, KOBJ_ADD);
-+	return error;
- }
- 
- static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
-@@ -359,7 +360,7 @@ static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
- 	unsigned int cpu = sysdev->id;
- 	struct kobject *kobj = sq_kobject[cpu];
- 
--	kobject_unregister(kobj);
-+	kobject_put(kobj);
- 	return 0;
- }
- 
-diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
-index 2453987..08ac638 100644
---- a/arch/sh/kernel/cpu/sh4a/Makefile
-+++ b/arch/sh/kernel/cpu/sh4a/Makefile
-@@ -3,6 +3,7 @@
- #
- 
- # CPU subtype setup
-+obj-$(CONFIG_CPU_SUBTYPE_SH7763)	+= setup-sh7763.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7770)	+= setup-sh7770.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7780)	+= setup-sh7780.o
- obj-$(CONFIG_CPU_SUBTYPE_SH7785)	+= setup-sh7785.o
-@@ -14,6 +15,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SHX3)		+= setup-shx3.o
- smp-$(CONFIG_CPU_SUBTYPE_SHX3)		:= smp-shx3.o
- 
- # Primary on-chip clocks (common)
-+clock-$(CONFIG_CPU_SUBTYPE_SH7763)	:= clock-sh7763.o
- clock-$(CONFIG_CPU_SUBTYPE_SH7770)	:= clock-sh7770.o
- clock-$(CONFIG_CPU_SUBTYPE_SH7780)	:= clock-sh7780.o
- clock-$(CONFIG_CPU_SUBTYPE_SH7785)	:= clock-sh7785.o
-diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
++
++asmlinkage void break_point_trap(void)
++{
++	/* Clear tracing.  */
++#if defined(CONFIG_CPU_SH4A)
++	ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
++	ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
++#else
++	ctrl_outw(0, UBC_BBRA);
++	ctrl_outw(0, UBC_BBRB);
++#endif
++	current->thread.ubc_pc = 0;
++	ubc_usercnt -= 1;
++
++	force_sig(SIGTRAP, current);
++}
+diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
 new file mode 100644
-index 0000000..45889d4
+index 0000000..cff3b7d
 --- /dev/null
-+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
-@@ -0,0 +1,126 @@
++++ b/arch/sh/kernel/process_64.c
+@@ -0,0 +1,701 @@
 +/*
-+ * arch/sh/kernel/cpu/sh4a/clock-sh7763.c
++ * arch/sh/kernel/process_64.c
 + *
-+ * SH7763 support for the clock framework
++ * This file handles the architecture-dependent parts of process handling..
 + *
-+ *  Copyright (C) 2005  Paul Mundt
-+ *  Copyright (C) 2007  Yoshihiro Shimoda
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003 - 2007  Paul Mundt
++ * Copyright (C) 2003, 2004 Richard Curnow
++ *
++ * Started from SH3/4 version:
++ *   Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
++ *
++ *   In turn started from i386 version:
++ *     Copyright (C) 1995  Linus Torvalds
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + */
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/ptrace.h>
++#include <linux/reboot.h>
 +#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <asm/clock.h>
-+#include <asm/freq.h>
-+#include <asm/io.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/io.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/mmu_context.h>
 +
-+static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
-+static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
-+static int p1fc_divisors[] = { 1, 1, 1, 16, 1, 1, 1, 1 };
-+static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
++struct task_struct *last_task_used_math = NULL;
 +
-+static void master_clk_init(struct clk *clk)
++static int hlt_counter = 1;
++
++#define HARD_IDLE_TIMEOUT (HZ / 3)
++
++void disable_hlt(void)
 +{
-+	clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07];
++	hlt_counter++;
 +}
 +
-+static struct clk_ops sh7763_master_clk_ops = {
-+	.init		= master_clk_init,
-+};
-+
-+static void module_clk_recalc(struct clk *clk)
++void enable_hlt(void)
 +{
-+	int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07);
-+	clk->rate = clk->parent->rate / p0fc_divisors[idx];
++	hlt_counter--;
 +}
 +
-+static struct clk_ops sh7763_module_clk_ops = {
-+	.recalc		= module_clk_recalc,
-+};
++static int __init nohlt_setup(char *__unused)
++{
++	hlt_counter = 1;
++	return 1;
++}
 +
-+static void bus_clk_recalc(struct clk *clk)
++static int __init hlt_setup(char *__unused)
 +{
-+	int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07);
-+	clk->rate = clk->parent->rate / bfc_divisors[idx];
++	hlt_counter = 0;
++	return 1;
 +}
 +
-+static struct clk_ops sh7763_bus_clk_ops = {
-+	.recalc		= bus_clk_recalc,
-+};
++__setup("nohlt", nohlt_setup);
++__setup("hlt", hlt_setup);
 +
-+static void cpu_clk_recalc(struct clk *clk)
++static inline void hlt(void)
 +{
-+	clk->rate = clk->parent->rate;
++	__asm__ __volatile__ ("sleep" : : : "memory");
 +}
 +
-+static struct clk_ops sh7763_cpu_clk_ops = {
-+	.recalc		= cpu_clk_recalc,
-+};
++/*
++ * The idle loop on a uniprocessor SH..
++ */
++void cpu_idle(void)
++{
++	/* endless idle loop with no priority at all */
++	while (1) {
++		if (hlt_counter) {
++			while (!need_resched())
++				cpu_relax();
++		} else {
++			local_irq_disable();
++			while (!need_resched()) {
++				local_irq_enable();
++				hlt();
++				local_irq_disable();
++			}
++			local_irq_enable();
++		}
++		preempt_enable_no_resched();
++		schedule();
++		preempt_disable();
++	}
 +
-+static struct clk_ops *sh7763_clk_ops[] = {
-+	&sh7763_master_clk_ops,
-+	&sh7763_module_clk_ops,
-+	&sh7763_bus_clk_ops,
-+	&sh7763_cpu_clk_ops,
-+};
++}
 +
-+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
++void machine_restart(char * __unused)
 +{
-+	if (idx < ARRAY_SIZE(sh7763_clk_ops))
-+		*ops = sh7763_clk_ops[idx];
++	extern void phys_stext(void);
++
++	phys_stext();
 +}
 +
-+static void shyway_clk_recalc(struct clk *clk)
++void machine_halt(void)
 +{
-+	int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07);
-+	clk->rate = clk->parent->rate / cfc_divisors[idx];
++	for (;;);
 +}
 +
-+static struct clk_ops sh7763_shyway_clk_ops = {
-+	.recalc		= shyway_clk_recalc,
-+};
++void machine_power_off(void)
++{
++#if 0
++	/* Disable watchdog timer */
++	ctrl_outl(0xa5000000, WTCSR);
++	/* Configure deep standby on sleep */
++	ctrl_outl(0x03, STBCR);
++#endif
 +
-+static struct clk sh7763_shyway_clk = {
-+	.name		= "shyway_clk",
-+	.flags		= CLK_ALWAYS_ENABLED,
-+	.ops		= &sh7763_shyway_clk_ops,
-+};
++	__asm__ __volatile__ (
++		"sleep\n\t"
++		"synci\n\t"
++		"nop;nop;nop;nop\n\t"
++	);
 +
-+/*
-+ * Additional SH7763-specific on-chip clocks that aren't already part of the
-+ * clock framework
-+ */
-+static struct clk *sh7763_onchip_clocks[] = {
-+	&sh7763_shyway_clk,
-+};
++	panic("Unexpected wakeup!\n");
++}
 +
-+static int __init sh7763_clk_init(void)
++void (*pm_power_off)(void) = machine_power_off;
++EXPORT_SYMBOL(pm_power_off);
++
++void show_regs(struct pt_regs * regs)
 +{
-+	struct clk *clk = clk_get(NULL, "master_clk");
-+	int i;
++	unsigned long long ah, al, bh, bl, ch, cl;
 +
-+	for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) {
-+		struct clk *clkp = sh7763_onchip_clocks[i];
++	printk("\n");
 +
-+		clkp->parent = clk;
-+		clk_register(clkp);
-+		clk_enable(clkp);
-+	}
++	ah = (regs->pc) >> 32;
++	al = (regs->pc) & 0xffffffff;
++	bh = (regs->regs[18]) >> 32;
++	bl = (regs->regs[18]) & 0xffffffff;
++	ch = (regs->regs[15]) >> 32;
++	cl = (regs->regs[15]) & 0xffffffff;
++	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->sr) >> 32;
++	al = (regs->sr) & 0xffffffff;
++        asm volatile ("getcon   " __TEA ", %0" : "=r" (bh));
++        asm volatile ("getcon   " __TEA ", %0" : "=r" (bl));
++	bh = (bh) >> 32;
++	bl = (bl) & 0xffffffff;
++        asm volatile ("getcon   " __KCR0 ", %0" : "=r" (ch));
++        asm volatile ("getcon   " __KCR0 ", %0" : "=r" (cl));
++	ch = (ch) >> 32;
++	cl = (cl) & 0xffffffff;
++	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[0]) >> 32;
++	al = (regs->regs[0]) & 0xffffffff;
++	bh = (regs->regs[1]) >> 32;
++	bl = (regs->regs[1]) & 0xffffffff;
++	ch = (regs->regs[2]) >> 32;
++	cl = (regs->regs[2]) & 0xffffffff;
++	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[3]) >> 32;
++	al = (regs->regs[3]) & 0xffffffff;
++	bh = (regs->regs[4]) >> 32;
++	bl = (regs->regs[4]) & 0xffffffff;
++	ch = (regs->regs[5]) >> 32;
++	cl = (regs->regs[5]) & 0xffffffff;
++	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[6]) >> 32;
++	al = (regs->regs[6]) & 0xffffffff;
++	bh = (regs->regs[7]) >> 32;
++	bl = (regs->regs[7]) & 0xffffffff;
++	ch = (regs->regs[8]) >> 32;
++	cl = (regs->regs[8]) & 0xffffffff;
++	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[9]) >> 32;
++	al = (regs->regs[9]) & 0xffffffff;
++	bh = (regs->regs[10]) >> 32;
++	bl = (regs->regs[10]) & 0xffffffff;
++	ch = (regs->regs[11]) >> 32;
++	cl = (regs->regs[11]) & 0xffffffff;
++	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[12]) >> 32;
++	al = (regs->regs[12]) & 0xffffffff;
++	bh = (regs->regs[13]) >> 32;
++	bl = (regs->regs[13]) & 0xffffffff;
++	ch = (regs->regs[14]) >> 32;
++	cl = (regs->regs[14]) & 0xffffffff;
++	printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[16]) >> 32;
++	al = (regs->regs[16]) & 0xffffffff;
++	bh = (regs->regs[17]) >> 32;
++	bl = (regs->regs[17]) & 0xffffffff;
++	ch = (regs->regs[19]) >> 32;
++	cl = (regs->regs[19]) & 0xffffffff;
++	printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[20]) >> 32;
++	al = (regs->regs[20]) & 0xffffffff;
++	bh = (regs->regs[21]) >> 32;
++	bl = (regs->regs[21]) & 0xffffffff;
++	ch = (regs->regs[22]) >> 32;
++	cl = (regs->regs[22]) & 0xffffffff;
++	printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[23]) >> 32;
++	al = (regs->regs[23]) & 0xffffffff;
++	bh = (regs->regs[24]) >> 32;
++	bl = (regs->regs[24]) & 0xffffffff;
++	ch = (regs->regs[25]) >> 32;
++	cl = (regs->regs[25]) & 0xffffffff;
++	printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[26]) >> 32;
++	al = (regs->regs[26]) & 0xffffffff;
++	bh = (regs->regs[27]) >> 32;
++	bl = (regs->regs[27]) & 0xffffffff;
++	ch = (regs->regs[28]) >> 32;
++	cl = (regs->regs[28]) & 0xffffffff;
++	printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[29]) >> 32;
++	al = (regs->regs[29]) & 0xffffffff;
++	bh = (regs->regs[30]) >> 32;
++	bl = (regs->regs[30]) & 0xffffffff;
++	ch = (regs->regs[31]) >> 32;
++	cl = (regs->regs[31]) & 0xffffffff;
++	printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[32]) >> 32;
++	al = (regs->regs[32]) & 0xffffffff;
++	bh = (regs->regs[33]) >> 32;
++	bl = (regs->regs[33]) & 0xffffffff;
++	ch = (regs->regs[34]) >> 32;
++	cl = (regs->regs[34]) & 0xffffffff;
++	printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[35]) >> 32;
++	al = (regs->regs[35]) & 0xffffffff;
++	bh = (regs->regs[36]) >> 32;
++	bl = (regs->regs[36]) & 0xffffffff;
++	ch = (regs->regs[37]) >> 32;
++	cl = (regs->regs[37]) & 0xffffffff;
++	printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[38]) >> 32;
++	al = (regs->regs[38]) & 0xffffffff;
++	bh = (regs->regs[39]) >> 32;
++	bl = (regs->regs[39]) & 0xffffffff;
++	ch = (regs->regs[40]) >> 32;
++	cl = (regs->regs[40]) & 0xffffffff;
++	printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[41]) >> 32;
++	al = (regs->regs[41]) & 0xffffffff;
++	bh = (regs->regs[42]) >> 32;
++	bl = (regs->regs[42]) & 0xffffffff;
++	ch = (regs->regs[43]) >> 32;
++	cl = (regs->regs[43]) & 0xffffffff;
++	printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[44]) >> 32;
++	al = (regs->regs[44]) & 0xffffffff;
++	bh = (regs->regs[45]) >> 32;
++	bl = (regs->regs[45]) & 0xffffffff;
++	ch = (regs->regs[46]) >> 32;
++	cl = (regs->regs[46]) & 0xffffffff;
++	printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[47]) >> 32;
++	al = (regs->regs[47]) & 0xffffffff;
++	bh = (regs->regs[48]) >> 32;
++	bl = (regs->regs[48]) & 0xffffffff;
++	ch = (regs->regs[49]) >> 32;
++	cl = (regs->regs[49]) & 0xffffffff;
++	printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[50]) >> 32;
++	al = (regs->regs[50]) & 0xffffffff;
++	bh = (regs->regs[51]) >> 32;
++	bl = (regs->regs[51]) & 0xffffffff;
++	ch = (regs->regs[52]) >> 32;
++	cl = (regs->regs[52]) & 0xffffffff;
++	printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[53]) >> 32;
++	al = (regs->regs[53]) & 0xffffffff;
++	bh = (regs->regs[54]) >> 32;
++	bl = (regs->regs[54]) & 0xffffffff;
++	ch = (regs->regs[55]) >> 32;
++	cl = (regs->regs[55]) & 0xffffffff;
++	printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[56]) >> 32;
++	al = (regs->regs[56]) & 0xffffffff;
++	bh = (regs->regs[57]) >> 32;
++	bl = (regs->regs[57]) & 0xffffffff;
++	ch = (regs->regs[58]) >> 32;
++	cl = (regs->regs[58]) & 0xffffffff;
++	printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[59]) >> 32;
++	al = (regs->regs[59]) & 0xffffffff;
++	bh = (regs->regs[60]) >> 32;
++	bl = (regs->regs[60]) & 0xffffffff;
++	ch = (regs->regs[61]) >> 32;
++	cl = (regs->regs[61]) & 0xffffffff;
++	printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->regs[62]) >> 32;
++	al = (regs->regs[62]) & 0xffffffff;
++	bh = (regs->tregs[0]) >> 32;
++	bl = (regs->tregs[0]) & 0xffffffff;
++	ch = (regs->tregs[1]) >> 32;
++	cl = (regs->tregs[1]) & 0xffffffff;
++	printk("R62 : %08Lx%08Lx T0  : %08Lx%08Lx T1  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->tregs[2]) >> 32;
++	al = (regs->tregs[2]) & 0xffffffff;
++	bh = (regs->tregs[3]) >> 32;
++	bl = (regs->tregs[3]) & 0xffffffff;
++	ch = (regs->tregs[4]) >> 32;
++	cl = (regs->tregs[4]) & 0xffffffff;
++	printk("T2  : %08Lx%08Lx T3  : %08Lx%08Lx T4  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++
++	ah = (regs->tregs[5]) >> 32;
++	al = (regs->tregs[5]) & 0xffffffff;
++	bh = (regs->tregs[6]) >> 32;
++	bl = (regs->tregs[6]) & 0xffffffff;
++	ch = (regs->tregs[7]) >> 32;
++	cl = (regs->tregs[7]) & 0xffffffff;
++	printk("T5  : %08Lx%08Lx T6  : %08Lx%08Lx T7  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
 +
 +	/*
-+	 * Now that we have the rest of the clocks registered, we need to
-+	 * force the parent clock to propagate so that these clocks will
-+	 * automatically figure out their rate. We cheat by handing the
-+	 * parent clock its current rate and forcing child propagation.
++	 * If we're in kernel mode, dump the stack too..
 +	 */
-+	clk_set_rate(clk, clk_get_rate(clk));
++	if (!user_mode(regs)) {
++		void show_stack(struct task_struct *tsk, unsigned long *sp);
++		unsigned long sp = regs->regs[15] & 0xffffffff;
++		struct task_struct *tsk = get_current();
 +
-+	clk_put(clk);
++		tsk->thread.kregs = regs;
 +
-+	return 0;
++		show_stack(tsk, (unsigned long *)sp);
++	}
 +}
 +
-+arch_initcall(sh7763_clk_init);
++struct task_struct * alloc_task_struct(void)
++{
++	/* Get task descriptor pages */
++	return (struct task_struct *)
++		__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
++}
++
++void free_task_struct(struct task_struct *p)
++{
++	free_pages((unsigned long) p, get_order(THREAD_SIZE));
++}
 +
-diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
-index b9c6547..73c778d 100644
---- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
-+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
-@@ -157,14 +157,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(SCIF0, 3),
--	INTC_PRIO(SCIF1, 3),
--	INTC_PRIO(SCIF2, 3),
--	INTC_PRIO(TMU0, 2),
--	INTC_PRIO(TMU1, 2),
--};
--
- static struct intc_mask_reg mask_registers[] __initdata = {
- 	{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
- 	  { } },
-@@ -217,7 +209,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
- 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
- };
- 
--static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups, priorities,
-+static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups,
- 			 mask_registers, prio_registers, sense_registers);
- 
- void __init plat_irq_setup(void)
-diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
-new file mode 100644
-index 0000000..eabd538
---- /dev/null
-+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
-@@ -0,0 +1,390 @@
 +/*
-+ * SH7763 Setup
-+ *
-+ *  Copyright (C) 2006  Paul Mundt
-+ *  Copyright (C) 2007  Yoshihiro Shimoda
++ * Create a kernel thread
++ */
++ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
++{
++	do_exit(fn(arg));
++}
++
++/*
++ * This is the mechanism for creating a new kernel thread.
 + *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
++ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
++ * who haven't done an "execve()") should use this: it will work within
++ * a system call from a "real" process, but the process memory space will
++ * not be freed until both the parent and the child have exited.
 + */
-+#include <linux/platform_device.h>
-+#include <linux/init.h>
-+#include <linux/serial.h>
-+#include <linux/io.h>
-+#include <asm/sci.h>
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++{
++	struct pt_regs regs;
 +
-+static struct resource rtc_resources[] = {
-+	[0] = {
-+		.start	= 0xffe80000,
-+		.end	= 0xffe80000 + 0x58 - 1,
-+		.flags	= IORESOURCE_IO,
-+	},
-+	[1] = {
-+		/* Period IRQ */
-+		.start	= 21,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+	[2] = {
-+		/* Carry IRQ */
-+		.start	= 22,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+	[3] = {
-+		/* Alarm IRQ */
-+		.start	= 20,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+};
++	memset(&regs, 0, sizeof(regs));
++	regs.regs[2] = (unsigned long)arg;
++	regs.regs[3] = (unsigned long)fn;
 +
-+static struct platform_device rtc_device = {
-+	.name		= "sh-rtc",
-+	.id		= -1,
-+	.num_resources	= ARRAY_SIZE(rtc_resources),
-+	.resource	= rtc_resources,
-+};
++	regs.pc = (unsigned long)kernel_thread_helper;
++	regs.sr = (1 << 30);
 +
-+static struct plat_sci_port sci_platform_data[] = {
-+	{
-+		.mapbase	= 0xffe00000,
-+		.flags		= UPF_BOOT_AUTOCONF,
-+		.type		= PORT_SCIF,
-+		.irqs		= { 40, 41, 43, 42 },
-+	}, {
-+		.mapbase	= 0xffe08000,
-+		.flags		= UPF_BOOT_AUTOCONF,
-+		.type		= PORT_SCIF,
-+		.irqs		= { 76, 77, 79, 78 },
-+	}, {
-+		.flags = 0,
++	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
++		       &regs, 0, NULL, NULL);
++}
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++	/*
++	 * See arch/sparc/kernel/process.c for the precedent for doing
++	 * this -- RPC.
++	 *
++	 * The SH-5 FPU save/restore approach relies on
++	 * last_task_used_math pointing to a live task_struct.  When
++	 * another task tries to use the FPU for the 1st time, the FPUDIS
++	 * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
++	 * existing FPU state to the FP regs field within
++	 * last_task_used_math before re-loading the new task's FPU state
++	 * (or initialising it if the FPU has been used before).  So if
++	 * last_task_used_math is stale, and its page has already been
++	 * re-allocated for another use, the consequences are rather
++	 * grim. Unless we null it here, there is no other path through
++	 * which it would get safely nulled.
++	 */
++#ifdef CONFIG_SH_FPU
++	if (last_task_used_math == current) {
++		last_task_used_math = NULL;
 +	}
-+};
++#endif
++}
 +
-+static struct platform_device sci_device = {
-+	.name		= "sh-sci",
-+	.id		= -1,
-+	.dev		= {
-+		.platform_data	= sci_platform_data,
-+	},
-+};
++void flush_thread(void)
++{
 +
-+static struct resource usb_ohci_resources[] = {
-+	[0] = {
-+		.start	= 0xffec8000,
-+		.end	= 0xffec80ff,
-+		.flags	= IORESOURCE_MEM,
-+	},
-+	[1] = {
-+		.start	= 83,
-+		.end	= 83,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+};
++	/* Called by fs/exec.c (flush_old_exec) to remove traces of a
++	 * previously running executable. */
++#ifdef CONFIG_SH_FPU
++	if (last_task_used_math == current) {
++		last_task_used_math = NULL;
++	}
++	/* Force FPU state to be reinitialised after exec */
++	clear_used_math();
++#endif
 +
-+static u64 usb_ohci_dma_mask = 0xffffffffUL;
-+static struct platform_device usb_ohci_device = {
-+	.name		= "sh_ohci",
-+	.id		= -1,
-+	.dev = {
-+		.dma_mask		= &usb_ohci_dma_mask,
-+		.coherent_dma_mask	= 0xffffffff,
-+	},
-+	.num_resources	= ARRAY_SIZE(usb_ohci_resources),
-+	.resource	= usb_ohci_resources,
-+};
++	/* if we are a kernel thread, about to change to user thread,
++         * update kreg
++         */
++	if(current->thread.kregs==&fake_swapper_regs) {
++          current->thread.kregs =
++             ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
++	  current->thread.uregs = current->thread.kregs;
++	}
++}
 +
-+static struct resource usbf_resources[] = {
-+	[0] = {
-+		.start	= 0xffec0000,
-+		.end	= 0xffec00ff,
-+		.flags	= IORESOURCE_MEM,
-+	},
-+	[1] = {
-+		.start	= 84,
-+		.end	= 84,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+};
++void release_thread(struct task_struct *dead_task)
++{
++	/* do nothing */
++}
 +
-+static struct platform_device usbf_device = {
-+	.name		= "sh_udc",
-+	.id		= -1,
-+	.dev = {
-+		.dma_mask		= NULL,
-+		.coherent_dma_mask	= 0xffffffff,
-+	},
-+	.num_resources	= ARRAY_SIZE(usbf_resources),
-+	.resource	= usbf_resources,
-+};
++/* Fill in the fpu structure for a core dump.. */
++int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
++{
++#ifdef CONFIG_SH_FPU
++	int fpvalid;
++	struct task_struct *tsk = current;
 +
-+static struct platform_device *sh7763_devices[] __initdata = {
-+	&rtc_device,
-+	&sci_device,
-+	&usb_ohci_device,
-+	&usbf_device,
-+};
++	fpvalid = !!tsk_used_math(tsk);
++	if (fpvalid) {
++		if (current == last_task_used_math) {
++			enable_fpu();
++			save_fpu(tsk, regs);
++			disable_fpu();
++			last_task_used_math = 0;
++			regs->sr |= SR_FD;
++		}
 +
-+static int __init sh7763_devices_setup(void)
-+{
-+	return platform_add_devices(sh7763_devices,
-+				    ARRAY_SIZE(sh7763_devices));
++		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
++	}
++
++	return fpvalid;
++#else
++	return 0; /* Task didn't use the fpu at all. */
++#endif
 +}
-+__initcall(sh7763_devices_setup);
 +
-+enum {
-+	UNUSED = 0,
++asmlinkage void ret_from_fork(void);
 +
-+	/* interrupt sources */
++int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
++		unsigned long unused,
++		struct task_struct *p, struct pt_regs *regs)
++{
++	struct pt_regs *childregs;
++	unsigned long long se;			/* Sign extension */
 +
-+	IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
-+	IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
-+	IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
-+	IRL_HHLL, IRL_HHLH, IRL_HHHL,
++#ifdef CONFIG_SH_FPU
++	if(last_task_used_math == current) {
++		enable_fpu();
++		save_fpu(current, regs);
++		disable_fpu();
++		last_task_used_math = NULL;
++		regs->sr |= SR_FD;
++	}
++#endif
++	/* Copy from sh version */
++	childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
 +
-+	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
-+	RTC_ATI, RTC_PRI, RTC_CUI,
-+	WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
-+	HUDI, LCDC,
-+	DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE,
-+	SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
-+	DMAC0_DMINT4, DMAC0_DMINT5,
-+	IIC0, IIC1,
-+	CMT,
-+	GEINT0, GEINT1, GEINT2,
-+	HAC,
-+	PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
-+	PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
-+	STIF0, STIF1,
-+	SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
-+	SIOF0, SIOF1, SIOF2,
-+	USBH, USBFI0, USBFI1,
-+	TPU, PCC,
-+	MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
-+	SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND,
-+	TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3,
-+	SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
-+	GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3,
++	*childregs = *regs;
 +
-+	/* interrupt groups */
++	if (user_mode(regs)) {
++		childregs->regs[15] = usp;
++		p->thread.uregs = childregs;
++	} else {
++		childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
++	}
 +
-+	TMU012, TMU345, RTC, DMAC, SCIF0, GETHER, PCIC5,
-+	SCIF1, USBF, MMCIF, SIM, SCIF2, GPIO,
-+};
++	childregs->regs[9] = 0; /* Set return value for child */
++	childregs->sr |= SR_FD; /* Invalidate FPU flag */
 +
-+static struct intc_vect vectors[] __initdata = {
-+	INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
-+	INTC_VECT(RTC_CUI, 0x4c0),
-+	INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580),
-+	INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0),
-+	INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600),
-+	INTC_VECT(LCDC, 0x620),
-+	INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
-+	INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0),
-+	INTC_VECT(DMAC0_DMAE, 0x6c0),
-+	INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
-+	INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
-+	INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0),
-+	INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0),
-+	INTC_VECT(CMT, 0x900), INTC_VECT(GEINT0, 0x920),
-+	INTC_VECT(GEINT1, 0x940), INTC_VECT(GEINT2, 0x960),
-+	INTC_VECT(HAC, 0x980),
-+	INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
-+	INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
-+	INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
-+	INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
-+	INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
-+	INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60),
-+	INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0),
-+	INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0),
-+	INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20),
-+	INTC_VECT(USBH, 0xc60), INTC_VECT(USBFI0, 0xc80),
-+	INTC_VECT(USBFI1, 0xca0),
-+	INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0),
-+	INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
-+	INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
-+	INTC_VECT(SIM_ERI, 0xd80), INTC_VECT(SIM_RXI, 0xda0),
-+	INTC_VECT(SIM_TXI, 0xdc0), INTC_VECT(SIM_TEND, 0xde0),
-+	INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
-+	INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60),
-+	INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
-+	INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0),
-+	INTC_VECT(SCIF1_ERI, 0xf00), INTC_VECT(SCIF1_RXI, 0xf20),
-+	INTC_VECT(SCIF1_BRI, 0xf40), INTC_VECT(SCIF1_TXI, 0xf60),
-+	INTC_VECT(GPIO_CH0, 0xf80), INTC_VECT(GPIO_CH1, 0xfa0),
-+	INTC_VECT(GPIO_CH2, 0xfc0), INTC_VECT(GPIO_CH3, 0xfe0),
-+};
++	p->thread.sp = (unsigned long) childregs;
++	p->thread.pc = (unsigned long) ret_from_fork;
 +
-+static struct intc_group groups[] __initdata = {
-+	INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
-+	INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
-+	INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
-+	INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
-+		   DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
-+	INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
-+	INTC_GROUP(GETHER, GEINT0, GEINT1, GEINT2),
-+	INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
-+	INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
-+	INTC_GROUP(USBF, USBFI0, USBFI1),
-+	INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
-+	INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND),
-+	INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
-+	INTC_GROUP(GPIO, GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3),
-+};
++	/*
++	 * Sign extend the edited stack.
++         * Note that thread.pc and thread.pc will stay
++	 * 32-bit wide and context switch must take care
++	 * of NEFF sign extension.
++	 */
 +
-+static struct intc_prio priorities[] __initdata = {
-+	INTC_PRIO(SCIF0, 3),
-+	INTC_PRIO(SCIF1, 3),
-+	INTC_PRIO(SCIF2, 3),
-+};
++	se = childregs->regs[15];
++	se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
++	childregs->regs[15] = se;
 +
-+static struct intc_mask_reg mask_registers[] __initdata = {
-+	{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
-+	  { 0, 0, 0, 0, 0, 0, GPIO, 0,
-+	    SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB,
-+	    PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC,
-+	    HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
-+	{ 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
-+	  { 0, 0, 0, 0, 0, 0, SCIF2, USBF,
-+	    0, 0, STIF1, STIF0, 0, 0, USBH, GETHER,
-+	    PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1,
-+	    LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } },
-+};
++	return 0;
++}
 +
-+static struct intc_prio_reg prio_registers[] __initdata = {
-+	{ 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
-+						 TMU2, TMU2_TICPI } },
-+	{ 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
-+	{ 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
-+	{ 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } },
-+	{ 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
-+						 PCISERR, PCIINTA } },
-+	{ 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
-+						 PCIINTD, PCIC5 } },
-+	{ 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } },
-+	{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } },
-+	{ 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } },
-+	{ 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } },
-+	{ 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } },
-+	{ 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } },
-+	{ 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } },
-+	{ 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } },
-+};
++asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
++			unsigned long r4, unsigned long r5,
++			unsigned long r6, unsigned long r7,
++			struct pt_regs *pregs)
++{
++	return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
++}
 +
-+static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups, priorities,
-+			 mask_registers, prio_registers, NULL);
++asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
++			 unsigned long r4, unsigned long r5,
++			 unsigned long r6, unsigned long r7,
++			 struct pt_regs *pregs)
++{
++	if (!newsp)
++		newsp = pregs->regs[15];
++	return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
++}
 +
-+/* Support for external interrupt pins in IRQ mode */
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
++			 unsigned long r4, unsigned long r5,
++			 unsigned long r6, unsigned long r7,
++			 struct pt_regs *pregs)
++{
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
++}
 +
-+static struct intc_vect irq_vectors[] __initdata = {
-+	INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
-+	INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
-+	INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
-+	INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
-+};
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage int sys_execve(char *ufilename, char **uargv,
++			  char **uenvp, unsigned long r5,
++			  unsigned long r6, unsigned long r7,
++			  struct pt_regs *pregs)
++{
++	int error;
++	char *filename;
 +
-+static struct intc_mask_reg irq_mask_registers[] __initdata = {
-+	{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
-+	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
-+};
++	lock_kernel();
++	filename = getname((char __user *)ufilename);
++	error = PTR_ERR(filename);
++	if (IS_ERR(filename))
++		goto out;
 +
-+static struct intc_prio_reg irq_prio_registers[] __initdata = {
-+	{ 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
-+					       IRQ4, IRQ5, IRQ6, IRQ7 } },
-+};
++	error = do_execve(filename,
++			  (char __user * __user *)uargv,
++			  (char __user * __user *)uenvp,
++			  pregs);
++	if (error == 0) {
++		task_lock(current);
++		current->ptrace &= ~PT_DTRACE;
++		task_unlock(current);
++	}
++	putname(filename);
++out:
++	unlock_kernel();
++	return error;
++}
 +
-+static struct intc_sense_reg irq_sense_registers[] __initdata = {
-+	{ 0xffd0001c, 32, 2, /* ICR1 */   { IRQ0, IRQ1, IRQ2, IRQ3,
-+					    IRQ4, IRQ5, IRQ6, IRQ7 } },
-+};
++/*
++ * These bracket the sleeping functions..
++ */
++extern void interruptible_sleep_on(wait_queue_head_t *q);
 +
-+static DECLARE_INTC_DESC(intc_irq_desc, "sh7763-irq", irq_vectors,
-+			 NULL, NULL, irq_mask_registers, irq_prio_registers,
-+			 irq_sense_registers);
++#define mid_sched	((unsigned long) interruptible_sleep_on)
 +
-+/* External interrupt pins in IRL mode */
++static int in_sh64_switch_to(unsigned long pc)
++{
++	extern char __sh64_switch_to_end;
++	/* For a sleeping task, the PC is somewhere in the middle of the function,
++	   so we don't have to worry about masking the LSB off */
++	return (pc >= (unsigned long) sh64_switch_to) &&
++	       (pc < (unsigned long) &__sh64_switch_to_end);
++}
 +
-+static struct intc_vect irl_vectors[] __initdata = {
-+	INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
-+	INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
-+	INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
-+	INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
-+	INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
-+	INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
-+	INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
-+	INTC_VECT(IRL_HHHL, 0x3c0),
-+};
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long schedule_fp;
++	unsigned long sh64_switch_to_fp;
++	unsigned long schedule_caller_pc;
++	unsigned long pc;
 +
-+static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
-+	{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
-+	  { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
-+	    IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
-+	    IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
-+	    IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
-+};
++	if (!p || p == current || p->state == TASK_RUNNING)
++		return 0;
 +
-+static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
-+	{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
-+	  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+	    IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
-+	    IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
-+	    IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
-+	    IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
-+};
++	/*
++	 * The same comment as on the Alpha applies here, too ...
++	 */
++	pc = thread_saved_pc(p);
 +
-+static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors,
-+			 NULL, NULL, irl7654_mask_registers, NULL, NULL);
++#ifdef CONFIG_FRAME_POINTER
++	if (in_sh64_switch_to(pc)) {
++		sh64_switch_to_fp = (long) p->thread.sp;
++		/* r14 is saved at offset 4 in the sh64_switch_to frame */
++		schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
 +
-+static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
-+			 NULL, NULL, irl3210_mask_registers, NULL, NULL);
++		/* and the caller of 'schedule' is (currently!) saved at offset 24
++		   in the frame of schedule (from disasm) */
++		schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
++		return schedule_caller_pc;
++	}
++#endif
++	return pc;
++}
 +
-+#define INTC_ICR0	0xffd00000
-+#define INTC_INTMSK0	0xffd00044
-+#define INTC_INTMSK1	0xffd00048
-+#define INTC_INTMSK2	0xffd40080
-+#define INTC_INTMSKCLR1	0xffd00068
-+#define INTC_INTMSKCLR2	0xffd40084
++/* Provide a /proc/asids file that lists out the
++   ASIDs currently associated with the processes.  (If the DM.PC register is
++   examined through the debug link, this shows ASID + PC.  To make use of this,
++   the PID->ASID relationship needs to be known.  This is primarily for
++   debugging.)
++   */
 +
-+void __init plat_irq_setup(void)
++#if defined(CONFIG_SH64_PROC_ASIDS)
++static int
++asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
 +{
-+	/* disable IRQ7-0 */
-+	ctrl_outl(0xff000000, INTC_INTMSK0);
-+
-+	/* disable IRL3-0 + IRL7-4 */
-+	ctrl_outl(0xc0000000, INTC_INTMSK1);
-+	ctrl_outl(0xfffefffe, INTC_INTMSK2);
++	int len=0;
++	struct task_struct *p;
++	read_lock(&tasklist_lock);
++	for_each_process(p) {
++		int pid = p->pid;
 +
-+	register_intc_controller(&intc_desc);
++		if (!pid)
++			continue;
++		if (p->mm)
++			len += sprintf(buf+len, "%5d : %02lx\n", pid,
++				       asid_cache(smp_processor_id()));
++		else
++			len += sprintf(buf+len, "%5d : (none)\n", pid);
++	}
++	read_unlock(&tasklist_lock);
++	*eof = 1;
++	return len;
 +}
 +
-+void __init plat_irq_setup_pins(int mode)
++static int __init register_proc_asids(void)
 +{
-+	switch (mode) {
-+	case IRQ_MODE_IRQ:
-+		/* select IRQ mode for IRL3-0 + IRL7-4 */
-+		ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
-+		register_intc_controller(&intc_irq_desc);
-+		break;
-+	case IRQ_MODE_IRL7654:
-+		/* enable IRL7-4 but don't provide any masking */
-+		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
-+		ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
-+		break;
-+	case IRQ_MODE_IRL3210:
-+		/* enable IRL0-3 but don't provide any masking */
-+		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
-+		ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
-+		break;
-+	case IRQ_MODE_IRL7654_MASK:
-+		/* enable IRL7-4 and mask using cpu intc controller */
-+		ctrl_outl(0x40000000, INTC_INTMSKCLR1);
-+		register_intc_controller(&intc_irl7654_desc);
-+		break;
-+	case IRQ_MODE_IRL3210_MASK:
-+		/* enable IRL0-3 and mask using cpu intc controller */
-+		ctrl_outl(0x80000000, INTC_INTMSKCLR1);
-+		register_intc_controller(&intc_irl3210_desc);
-+		break;
-+	default:
-+		BUG();
-+	}
++	create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
++	return 0;
 +}
-diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
-index e8fd33f..293004b 100644
---- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
-+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
-@@ -168,11 +168,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(SCIF0, 3),
--	INTC_PRIO(SCIF1, 3),
--};
++__initcall(register_proc_asids);
++#endif
+diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
+deleted file mode 100644
+index ac725f0..0000000
+--- a/arch/sh/kernel/ptrace.c
++++ /dev/null
+@@ -1,274 +0,0 @@
+-/*
+- * linux/arch/sh/kernel/ptrace.c
+- *
+- * Original x86 implementation:
+- *	By Ross Biro 1/23/92
+- *	edited by Linus Torvalds
+- *
+- * SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
+- *
+- */
+-#include <linux/kernel.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/errno.h>
+-#include <linux/ptrace.h>
+-#include <linux/user.h>
+-#include <linux/slab.h>
+-#include <linux/security.h>
+-#include <linux/signal.h>
+-#include <linux/io.h>
+-#include <asm/uaccess.h>
+-#include <asm/pgtable.h>
+-#include <asm/system.h>
+-#include <asm/processor.h>
+-#include <asm/mmu_context.h>
 -
- static struct intc_mask_reg mask_registers[] __initdata = {
- 	{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
- 	  { 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
-@@ -195,7 +190,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- 	{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
- };
- 
--static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups, priorities,
-+static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups,
- 			 mask_registers, prio_registers, NULL);
- 
- /* Support for external interrupt pins in IRQ mode */
-@@ -223,7 +218,7 @@ static struct intc_sense_reg irq_sense_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_irq_desc, "sh7780-irq", irq_vectors,
--			 NULL, NULL, irq_mask_registers, irq_prio_registers,
-+			 NULL, irq_mask_registers, irq_prio_registers,
- 			 irq_sense_registers);
- 
- /* External interrupt pins in IRL mode */
-@@ -257,10 +252,10 @@ static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
--			 NULL, NULL, irl7654_mask_registers, NULL, NULL);
-+			 NULL, irl7654_mask_registers, NULL, NULL);
- 
- static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
--			 NULL, NULL, irl3210_mask_registers, NULL, NULL);
-+			 NULL, irl3210_mask_registers, NULL, NULL);
- 
- #define INTC_ICR0	0xffd00000
- #define INTC_INTMSK0	0xffd00044
-diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
-index 39b215d..74b60e9 100644
---- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
-+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
-@@ -178,15 +178,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(SCIF0, 3),
--	INTC_PRIO(SCIF1, 3),
--	INTC_PRIO(SCIF2, 3),
--	INTC_PRIO(SCIF3, 3),
--	INTC_PRIO(SCIF4, 3),
--	INTC_PRIO(SCIF5, 3),
--};
+-/*
+- * does not yet catch signals sent when the child dies.
+- * in exit.c or in signal.c.
+- */
 -
- static struct intc_mask_reg mask_registers[] __initdata = {
- 	{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
- 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
-@@ -227,7 +218,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- 	{ 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } },
- };
- 
--static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups, priorities,
-+static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups,
- 			 mask_registers, prio_registers, NULL);
- 
- /* Support for external interrupt pins in IRQ mode */
-@@ -248,11 +239,11 @@ static struct intc_sense_reg sense_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irq0123, "sh7785-irq0123", vectors_irq0123,
--			 NULL, NULL, mask_registers, prio_registers,
-+			 NULL, mask_registers, prio_registers,
- 			 sense_registers);
- 
- static DECLARE_INTC_DESC(intc_desc_irq4567, "sh7785-irq4567", vectors_irq4567,
--			 NULL, NULL, mask_registers, prio_registers,
-+			 NULL, mask_registers, prio_registers,
- 			 sense_registers);
- 
- /* External interrupt pins in IRL mode */
-@@ -280,10 +271,10 @@ static struct intc_vect vectors_irl4567[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123,
--			 NULL, NULL, mask_registers, NULL, NULL);
-+			 NULL, mask_registers, NULL, NULL);
- 
- static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
--			 NULL, NULL, mask_registers, NULL, NULL);
-+			 NULL, mask_registers, NULL, NULL);
- 
- #define INTC_ICR0	0xffd00000
- #define INTC_INTMSK0	0xffd00044
-diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
-index c6cdd7e..4dc958b 100644
---- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
-+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
-@@ -165,13 +165,6 @@ static struct intc_group groups[] __initdata = {
- 	INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS),
- };
- 
--static struct intc_prio priorities[] __initdata = {
--	INTC_PRIO(SCIF0, 3),
--	INTC_PRIO(SCIF1, 3),
--	INTC_PRIO(SCIF2, 3),
--	INTC_PRIO(SCIF3, 3),
--};
+-/*
+- * This routine will get a word off of the process kernel stack.
+- */
+-static inline int get_stack_long(struct task_struct *task, int offset)
+-{
+-	unsigned char *stack;
+-
+-	stack = (unsigned char *)task_pt_regs(task);
+-	stack += offset;
+-	return (*((int *)stack));
+-}
+-
+-/*
+- * This routine will put a word on the process kernel stack.
+- */
+-static inline int put_stack_long(struct task_struct *task, int offset,
+-				 unsigned long data)
+-{
+-	unsigned char *stack;
+-
+-	stack = (unsigned char *)task_pt_regs(task);
+-	stack += offset;
+-	*(unsigned long *) stack = data;
+-	return 0;
+-}
+-
+-static void ptrace_disable_singlestep(struct task_struct *child)
+-{
+-	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+-
+-	/*
+-	 * Ensure the UBC is not programmed at the next context switch.
+-	 *
+-	 * Normally this is not needed but there are sequences such as
+-	 * singlestep, signal delivery, and continue that leave the
+-	 * ubc_pc non-zero leading to spurious SIGTRAPs.
+-	 */
+-	if (child->thread.ubc_pc != 0) {
+-		ubc_usercnt -= 1;
+-		child->thread.ubc_pc = 0;
+-	}
+-}
+-
+-/*
+- * Called by kernel/ptrace.c when detaching..
+- *
+- * Make sure single step bits etc are not set.
+- */
+-void ptrace_disable(struct task_struct *child)
+-{
+-	ptrace_disable_singlestep(child);
+-}
+-
+-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+-{
+-	struct user * dummy = NULL;
+-	int ret;
+-
+-	switch (request) {
+-	/* when I and D space are separate, these will need to be fixed. */
+-	case PTRACE_PEEKTEXT: /* read word at location addr. */
+-	case PTRACE_PEEKDATA:
+-		ret = generic_ptrace_peekdata(child, addr, data);
+-		break;
+-
+-	/* read the word at location addr in the USER area. */
+-	case PTRACE_PEEKUSR: {
+-		unsigned long tmp;
+-
+-		ret = -EIO;
+-		if ((addr & 3) || addr < 0 ||
+-		    addr > sizeof(struct user) - 3)
+-			break;
+-
+-		if (addr < sizeof(struct pt_regs))
+-			tmp = get_stack_long(child, addr);
+-		else if (addr >= (long) &dummy->fpu &&
+-			 addr < (long) &dummy->u_fpvalid) {
+-			if (!tsk_used_math(child)) {
+-				if (addr == (long)&dummy->fpu.fpscr)
+-					tmp = FPSCR_INIT;
+-				else
+-					tmp = 0;
+-			} else
+-				tmp = ((long *)&child->thread.fpu)
+-					[(addr - (long)&dummy->fpu) >> 2];
+-		} else if (addr == (long) &dummy->u_fpvalid)
+-			tmp = !!tsk_used_math(child);
+-		else
+-			tmp = 0;
+-		ret = put_user(tmp, (unsigned long __user *)data);
+-		break;
+-	}
+-
+-	/* when I and D space are separate, this will have to be fixed. */
+-	case PTRACE_POKETEXT: /* write the word at location addr. */
+-	case PTRACE_POKEDATA:
+-		ret = generic_ptrace_pokedata(child, addr, data);
+-		break;
+-
+-	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+-		ret = -EIO;
+-		if ((addr & 3) || addr < 0 ||
+-		    addr > sizeof(struct user) - 3)
+-			break;
+-
+-		if (addr < sizeof(struct pt_regs))
+-			ret = put_stack_long(child, addr, data);
+-		else if (addr >= (long) &dummy->fpu &&
+-			 addr < (long) &dummy->u_fpvalid) {
+-			set_stopped_child_used_math(child);
+-			((long *)&child->thread.fpu)
+-				[(addr - (long)&dummy->fpu) >> 2] = data;
+-			ret = 0;
+-		} else if (addr == (long) &dummy->u_fpvalid) {
+-			conditional_stopped_child_used_math(data, child);
+-			ret = 0;
+-		}
+-		break;
+-
+-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+-	case PTRACE_CONT: { /* restart after signal. */
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		if (request == PTRACE_SYSCALL)
+-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		else
+-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-
+-		ptrace_disable_singlestep(child);
+-
+-		child->exit_code = data;
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
+-	}
+-
+-/*
+- * make the child exit.  Best I can do is send it a sigkill.
+- * perhaps it should be put in the status that it wants to
+- * exit.
+- */
+-	case PTRACE_KILL: {
+-		ret = 0;
+-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
+-			break;
+-		ptrace_disable_singlestep(child);
+-		child->exit_code = SIGKILL;
+-		wake_up_process(child);
+-		break;
+-	}
+-
+-	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
+-		long pc;
+-		struct pt_regs *regs = NULL;
+-
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		if ((child->ptrace & PT_DTRACE) == 0) {
+-			/* Spurious delayed TF traps may occur */
+-			child->ptrace |= PT_DTRACE;
+-		}
+-
+-		pc = get_stack_long(child, (long)&regs->pc);
+-
+-		/* Next scheduling will set up UBC */
+-		if (child->thread.ubc_pc == 0)
+-			ubc_usercnt += 1;
+-		child->thread.ubc_pc = pc;
+-
+-		set_tsk_thread_flag(child, TIF_SINGLESTEP);
+-		child->exit_code = data;
+-		/* give it a chance to run. */
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
+-	}
+-
+-#ifdef CONFIG_SH_DSP
+-	case PTRACE_GETDSPREGS: {
+-		unsigned long dp;
+-
+-		ret = -EIO;
+-		dp = ((unsigned long) child) + THREAD_SIZE -
+-			 sizeof(struct pt_dspregs);
+-		if (*((int *) (dp - 4)) == SR_FD) {
+-			copy_to_user(addr, (void *) dp,
+-				sizeof(struct pt_dspregs));
+-			ret = 0;
+-		}
+-		break;
+-	}
+-
+-	case PTRACE_SETDSPREGS: {
+-		unsigned long dp;
+-
+-		ret = -EIO;
+-		dp = ((unsigned long) child) + THREAD_SIZE -
+-			 sizeof(struct pt_dspregs);
+-		if (*((int *) (dp - 4)) == SR_FD) {
+-			copy_from_user((void *) dp, addr,
+-				sizeof(struct pt_dspregs));
+-			ret = 0;
+-		}
+-		break;
+-	}
+-#endif
+-	default:
+-		ret = ptrace_request(child, request, addr, data);
+-		break;
+-	}
 -
- static struct intc_mask_reg mask_registers[] __initdata = {
- 	{ 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
- 	  { IRQ0, IRQ1, IRQ2, IRQ3 } },
-@@ -218,7 +211,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
- 	    INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) },
- };
- 
--static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups, priorities,
-+static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups,
- 			 mask_registers, prio_registers, NULL);
- 
- /* Support for external interrupt pins in IRQ mode */
-@@ -232,8 +225,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
--			 priorities, mask_registers, prio_registers,
--			 sense_registers);
-+			 mask_registers, prio_registers, sense_registers);
- 
- /* External interrupt pins in IRL mode */
- static struct intc_vect vectors_irl[] __initdata = {
-@@ -248,7 +240,7 @@ static struct intc_vect vectors_irl[] __initdata = {
- };
- 
- static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
--			 priorities, mask_registers, prio_registers, NULL);
-+			 mask_registers, prio_registers, NULL);
- 
- void __init plat_irq_setup_pins(int mode)
- {
-diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
-new file mode 100644
-index 0000000..8646363
---- /dev/null
-+++ b/arch/sh/kernel/cpu/sh5/Makefile
-@@ -0,0 +1,7 @@
-+#
-+# Makefile for the Linux/SuperH SH-5 backends.
-+#
-+obj-y := entry.o probe.o switchto.o
-+
-+obj-$(CONFIG_SH_FPU)		+= fpu.o
-+obj-$(CONFIG_KALLSYMS)		+= unwind.o
-diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
+-	return ret;
+-}
+-
+-asmlinkage void do_syscall_trace(void)
+-{
+-	struct task_struct *tsk = current;
+-
+-	if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
+-	    !test_thread_flag(TIF_SINGLESTEP))
+-		return;
+-	if (!(tsk->ptrace & PT_PTRACED))
+-		return;
+-	/* the 0x80 provides a way for the tracing parent to distinguish
+-	   between a syscall stop and SIGTRAP delivery */
+-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
+-				 !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
+-
+-	/*
+-	 * this isn't the same as continuing with a signal, but it will do
+-	 * for normal use.  strace only continues with a signal if the
+-	 * stopping signal is not SIGTRAP.  -brl
+-	 */
+-	if (tsk->exit_code) {
+-		send_sig(tsk->exit_code, tsk, 1);
+-		tsk->exit_code = 0;
+-	}
+-}
+diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
 new file mode 100644
-index 0000000..ba87501
+index 0000000..ce0664a
 --- /dev/null
-+++ b/arch/sh/kernel/cpu/sh5/entry.S
-@@ -0,0 +1,2101 @@
++++ b/arch/sh/kernel/ptrace_32.c
+@@ -0,0 +1,287 @@
 +/*
-+ * arch/sh/kernel/cpu/sh5/entry.S
++ * linux/arch/sh/kernel/ptrace.c
 + *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2004 - 2007  Paul Mundt
-+ * Copyright (C) 2003, 2004  Richard Curnow
++ * Original x86 implementation:
++ *	By Ross Biro 1/23/92
++ *	edited by Linus Torvalds
 + *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
++ * SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
++ * Audit support: Yuichi Nakamura <ynakam at hitachisoft.jp>
 + */
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
 +#include <linux/errno.h>
-+#include <linux/sys.h>
-+#include <asm/cpu/registers.h>
++#include <linux/ptrace.h>
++#include <linux/user.h>
++#include <linux/slab.h>
++#include <linux/security.h>
++#include <linux/signal.h>
++#include <linux/io.h>
++#include <linux/audit.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
 +#include <asm/processor.h>
-+#include <asm/unistd.h>
-+#include <asm/thread_info.h>
-+#include <asm/asm-offsets.h>
++#include <asm/mmu_context.h>
 +
 +/*
-+ * SR fields.
++ * does not yet catch signals sent when the child dies.
++ * in exit.c or in signal.c.
 + */
-+#define SR_ASID_MASK	0x00ff0000
-+#define SR_FD_MASK	0x00008000
-+#define SR_SS		0x08000000
-+#define SR_BL		0x10000000
-+#define SR_MD		0x40000000
 +
 +/*
-+ * Event code.
++ * This routine will get a word off of the process kernel stack.
 + */
-+#define	EVENT_INTERRUPT		0
-+#define	EVENT_FAULT_TLB		1
-+#define	EVENT_FAULT_NOT_TLB	2
-+#define	EVENT_DEBUG		3
++static inline int get_stack_long(struct task_struct *task, int offset)
++{
++	unsigned char *stack;
 +
-+/* EXPEVT values */
-+#define	RESET_CAUSE		0x20
-+#define DEBUGSS_CAUSE		0x980
++	stack = (unsigned char *)task_pt_regs(task);
++	stack += offset;
++	return (*((int *)stack));
++}
 +
 +/*
-+ * Frame layout. Quad index.
-+ */
-+#define	FRAME_T(x)	FRAME_TBASE+(x*8)
-+#define	FRAME_R(x)	FRAME_RBASE+(x*8)
-+#define	FRAME_S(x)	FRAME_SBASE+(x*8)
-+#define FSPC		0
-+#define FSSR		1
-+#define FSYSCALL_ID	2
-+
-+/* Arrange the save frame to be a multiple of 32 bytes long */
-+#define FRAME_SBASE	0
-+#define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */
-+#define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */
-+#define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* tr0 -tr7 */
-+#define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */
-+
-+#define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */
-+#define FP_FRAME_BASE	0
-+
-+#define	SAVED_R2	0*8
-+#define	SAVED_R3	1*8
-+#define	SAVED_R4	2*8
-+#define	SAVED_R5	3*8
-+#define	SAVED_R18	4*8
-+#define	SAVED_R6	5*8
-+#define	SAVED_TR0	6*8
-+
-+/* These are the registers saved in the TLB path that aren't saved in the first
-+   level of the normal one. */
-+#define	TLB_SAVED_R25	7*8
-+#define	TLB_SAVED_TR1	8*8
-+#define	TLB_SAVED_TR2	9*8
-+#define	TLB_SAVED_TR3	10*8
-+#define	TLB_SAVED_TR4	11*8
-+/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
-+   breakage otherwise. */
-+#define	TLB_SAVED_R0	12*8
-+#define	TLB_SAVED_R1	13*8
-+
-+#define CLI()				\
-+	getcon	SR, r6;			\
-+	ori	r6, 0xf0, r6;		\
-+	putcon	r6, SR;
-+
-+#define STI()				\
-+	getcon	SR, r6;			\
-+	andi	r6, ~0xf0, r6;		\
-+	putcon	r6, SR;
-+
-+#ifdef CONFIG_PREEMPT
-+#  define preempt_stop()	CLI()
-+#else
-+#  define preempt_stop()
-+#  define resume_kernel		restore_all
-+#endif
-+
-+	.section	.data, "aw"
-+
-+#define FAST_TLBMISS_STACK_CACHELINES 4
-+#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
-+
-+/* Register back-up area for all exceptions */
-+	.balign	32
-+	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
-+	 * register saves etc. */
-+	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
-+/* This is 32 byte aligned by construction */
-+/* Register back-up area for all exceptions */
-+reg_save_area:
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+
-+	.quad	0
-+	.quad   0
-+
-+/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
-+ * reentrancy. Note this area may be accessed via physical address.
-+ * Align so this fits a whole single cache line, for ease of purging.
++ * This routine will put a word on the process kernel stack.
 + */
-+	.balign 32,0,32
-+resvec_save_area:
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+	.quad	0
-+	.balign 32,0,32
-+
-+/* Jump table of 3rd level handlers  */
-+trap_jtable:
-+	.long	do_exception_error		/* 0x000 */
-+	.long	do_exception_error		/* 0x020 */
-+	.long	tlb_miss_load				/* 0x040 */
-+	.long	tlb_miss_store				/* 0x060 */
-+	! ARTIFICIAL pseudo-EXPEVT setting
-+	.long	do_debug_interrupt		/* 0x080 */
-+	.long	tlb_miss_load				/* 0x0A0 */
-+	.long	tlb_miss_store				/* 0x0C0 */
-+	.long	do_address_error_load	/* 0x0E0 */
-+	.long	do_address_error_store	/* 0x100 */
-+#ifdef CONFIG_SH_FPU
-+	.long	do_fpu_error		/* 0x120 */
-+#else
-+	.long	do_exception_error		/* 0x120 */
-+#endif
-+	.long	do_exception_error		/* 0x140 */
-+	.long	system_call				/* 0x160 */
-+	.long	do_reserved_inst		/* 0x180 */
-+	.long	do_illegal_slot_inst	/* 0x1A0 */
-+	.long	do_exception_error		/* 0x1C0 - NMI */
-+	.long	do_exception_error		/* 0x1E0 */
-+	.rept 15
-+		.long do_IRQ		/* 0x200 - 0x3C0 */
-+	.endr
-+	.long	do_exception_error		/* 0x3E0 */
-+	.rept 32
-+		.long do_IRQ		/* 0x400 - 0x7E0 */
-+	.endr
-+	.long	fpu_error_or_IRQA			/* 0x800 */
-+	.long	fpu_error_or_IRQB			/* 0x820 */
-+	.long	do_IRQ			/* 0x840 */
-+	.long	do_IRQ			/* 0x860 */
-+	.rept 6
-+		.long do_exception_error	/* 0x880 - 0x920 */
-+	.endr
-+	.long	do_software_break_point	/* 0x940 */
-+	.long	do_exception_error		/* 0x960 */
-+	.long	do_single_step		/* 0x980 */
++static inline int put_stack_long(struct task_struct *task, int offset,
++				 unsigned long data)
++{
++	unsigned char *stack;
 +
-+	.rept 3
-+		.long do_exception_error	/* 0x9A0 - 0x9E0 */
-+	.endr
-+	.long	do_IRQ			/* 0xA00 */
-+	.long	do_IRQ			/* 0xA20 */
-+	.long	itlb_miss_or_IRQ			/* 0xA40 */
-+	.long	do_IRQ			/* 0xA60 */
-+	.long	do_IRQ			/* 0xA80 */
-+	.long	itlb_miss_or_IRQ			/* 0xAA0 */
-+	.long	do_exception_error		/* 0xAC0 */
-+	.long	do_address_error_exec	/* 0xAE0 */
-+	.rept 8
-+		.long do_exception_error	/* 0xB00 - 0xBE0 */
-+	.endr
-+	.rept 18
-+		.long do_IRQ		/* 0xC00 - 0xE20 */
-+	.endr
++	stack = (unsigned char *)task_pt_regs(task);
++	stack += offset;
++	*(unsigned long *) stack = data;
++	return 0;
++}
 +
-+	.section	.text64, "ax"
++static void ptrace_disable_singlestep(struct task_struct *child)
++{
++	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 +
-+/*
-+ * --- Exception/Interrupt/Event Handling Section
-+ */
++	/*
++	 * Ensure the UBC is not programmed at the next context switch.
++	 *
++	 * Normally this is not needed but there are sequences such as
++	 * singlestep, signal delivery, and continue that leave the
++	 * ubc_pc non-zero leading to spurious SIGTRAPs.
++	 */
++	if (child->thread.ubc_pc != 0) {
++		ubc_usercnt -= 1;
++		child->thread.ubc_pc = 0;
++	}
++}
 +
 +/*
-+ * VBR and RESVEC blocks.
-+ *
-+ * First level handler for VBR-based exceptions.
-+ *
-+ * To avoid waste of space, align to the maximum text block size.
-+ * This is assumed to be at most 128 bytes or 32 instructions.
-+ * DO NOT EXCEED 32 instructions on the first level handlers !
-+ *
-+ * Also note that RESVEC is contained within the VBR block
-+ * where the room left (1KB - TEXT_SIZE) allows placing
-+ * the RESVEC block (at most 512B + TEXT_SIZE).
-+ *
-+ * So first (and only) level handler for RESVEC-based exceptions.
-+ *
-+ * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
-+ * and interrupt) we are a lot tight with register space until
-+ * saving onto the stack frame, which is done in handle_exception().
++ * Called by kernel/ptrace.c when detaching..
 + *
++ * Make sure single step bits etc are not set.
 + */
++void ptrace_disable(struct task_struct *child)
++{
++	ptrace_disable_singlestep(child);
++}
 +
-+#define	TEXT_SIZE 	128
-+#define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */
-+
-+	.balign TEXT_SIZE
-+LVBR_block:
-+	.space	256, 0			/* Power-on class handler, */
-+					/* not required here       */
-+not_a_tlb_miss:
-+	synco	/* TAKum03020 (but probably a good idea anyway.) */
-+	/* Save original stack pointer into KCR1 */
-+	putcon	SP, KCR1
-+
-+	/* Save other original registers into reg_save_area */
-+        movi  reg_save_area, SP
-+	st.q	SP, SAVED_R2, r2
-+	st.q	SP, SAVED_R3, r3
-+	st.q	SP, SAVED_R4, r4
-+	st.q	SP, SAVED_R5, r5
-+	st.q	SP, SAVED_R6, r6
-+	st.q	SP, SAVED_R18, r18
-+	gettr	tr0, r3
-+	st.q	SP, SAVED_TR0, r3
-+
-+	/* Set args for Non-debug, Not a TLB miss class handler */
-+	getcon	EXPEVT, r2
-+	movi	ret_from_exception, r3
-+	ori	r3, 1, r3
-+	movi	EVENT_FAULT_NOT_TLB, r4
-+	or	SP, ZERO, r5
-+	getcon	KCR1, SP
-+	pta	handle_exception, tr0
-+	blink	tr0, ZERO
-+
-+	.balign 256
-+	! VBR+0x200
-+	nop
-+	.balign 256
-+	! VBR+0x300
-+	nop
-+	.balign 256
-+	/*
-+	 * Instead of the natural .balign 1024 place RESVEC here
-+	 * respecting the final 1KB alignment.
-+	 */
-+	.balign TEXT_SIZE
-+	/*
-+	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
-+	 * block making sure the final alignment is correct.
-+	 */
-+tlb_miss:
-+	synco	/* TAKum03020 (but probably a good idea anyway.) */
-+	putcon	SP, KCR1
-+	movi	reg_save_area, SP
-+	/* SP is guaranteed 32-byte aligned. */
-+	st.q	SP, TLB_SAVED_R0 , r0
-+	st.q	SP, TLB_SAVED_R1 , r1
-+	st.q	SP, SAVED_R2 , r2
-+	st.q	SP, SAVED_R3 , r3
-+	st.q	SP, SAVED_R4 , r4
-+	st.q	SP, SAVED_R5 , r5
-+	st.q	SP, SAVED_R6 , r6
-+	st.q	SP, SAVED_R18, r18
-+
-+	/* Save R25 for safety; as/ld may want to use it to achieve the call to
-+	 * the code in mm/tlbmiss.c */
-+	st.q	SP, TLB_SAVED_R25, r25
-+	gettr	tr0, r2
-+	gettr	tr1, r3
-+	gettr	tr2, r4
-+	gettr	tr3, r5
-+	gettr	tr4, r18
-+	st.q	SP, SAVED_TR0 , r2
-+	st.q	SP, TLB_SAVED_TR1 , r3
-+	st.q	SP, TLB_SAVED_TR2 , r4
-+	st.q	SP, TLB_SAVED_TR3 , r5
-+	st.q	SP, TLB_SAVED_TR4 , r18
-+
-+	pt	do_fast_page_fault, tr0
-+	getcon	SSR, r2
-+	getcon	EXPEVT, r3
-+	getcon	TEA, r4
-+	shlri	r2, 30, r2
-+	andi	r2, 1, r2	/* r2 = SSR.MD */
-+	blink 	tr0, LINK
-+
-+	pt	fixup_to_invoke_general_handler, tr1
++long arch_ptrace(struct task_struct *child, long request, long addr, long data)
++{
++	struct user * dummy = NULL;
++	int ret;
 +
-+	/* If the fast path handler fixed the fault, just drop through quickly
-+	   to the restore code right away to return to the excepting context.
-+	   */
-+	beqi/u	r2, 0, tr1
++	switch (request) {
++	/* when I and D space are separate, these will need to be fixed. */
++	case PTRACE_PEEKTEXT: /* read word at location addr. */
++	case PTRACE_PEEKDATA:
++		ret = generic_ptrace_peekdata(child, addr, data);
++		break;
 +
-+fast_tlb_miss_restore:
-+	ld.q	SP, SAVED_TR0, r2
-+	ld.q	SP, TLB_SAVED_TR1, r3
-+	ld.q	SP, TLB_SAVED_TR2, r4
++	/* read the word at location addr in the USER area. */
++	case PTRACE_PEEKUSR: {
++		unsigned long tmp;
 +
-+	ld.q	SP, TLB_SAVED_TR3, r5
-+	ld.q	SP, TLB_SAVED_TR4, r18
++		ret = -EIO;
++		if ((addr & 3) || addr < 0 ||
++		    addr > sizeof(struct user) - 3)
++			break;
 +
-+	ptabs	r2, tr0
-+	ptabs	r3, tr1
-+	ptabs	r4, tr2
-+	ptabs	r5, tr3
-+	ptabs	r18, tr4
++		if (addr < sizeof(struct pt_regs))
++			tmp = get_stack_long(child, addr);
++		else if (addr >= (long) &dummy->fpu &&
++			 addr < (long) &dummy->u_fpvalid) {
++			if (!tsk_used_math(child)) {
++				if (addr == (long)&dummy->fpu.fpscr)
++					tmp = FPSCR_INIT;
++				else
++					tmp = 0;
++			} else
++				tmp = ((long *)&child->thread.fpu)
++					[(addr - (long)&dummy->fpu) >> 2];
++		} else if (addr == (long) &dummy->u_fpvalid)
++			tmp = !!tsk_used_math(child);
++		else
++			tmp = 0;
++		ret = put_user(tmp, (unsigned long __user *)data);
++		break;
++	}
 +
-+	ld.q	SP, TLB_SAVED_R0, r0
-+	ld.q	SP, TLB_SAVED_R1, r1
-+	ld.q	SP, SAVED_R2, r2
-+	ld.q	SP, SAVED_R3, r3
-+	ld.q	SP, SAVED_R4, r4
-+	ld.q	SP, SAVED_R5, r5
-+	ld.q	SP, SAVED_R6, r6
-+	ld.q	SP, SAVED_R18, r18
-+	ld.q	SP, TLB_SAVED_R25, r25
++	/* when I and D space are separate, this will have to be fixed. */
++	case PTRACE_POKETEXT: /* write the word at location addr. */
++	case PTRACE_POKEDATA:
++		ret = generic_ptrace_pokedata(child, addr, data);
++		break;
 +
-+	getcon	KCR1, SP
-+	rte
-+	nop /* for safety, in case the code is run on sh5-101 cut1.x */
++	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
++		ret = -EIO;
++		if ((addr & 3) || addr < 0 ||
++		    addr > sizeof(struct user) - 3)
++			break;
 +
-+fixup_to_invoke_general_handler:
++		if (addr < sizeof(struct pt_regs))
++			ret = put_stack_long(child, addr, data);
++		else if (addr >= (long) &dummy->fpu &&
++			 addr < (long) &dummy->u_fpvalid) {
++			set_stopped_child_used_math(child);
++			((long *)&child->thread.fpu)
++				[(addr - (long)&dummy->fpu) >> 2] = data;
++			ret = 0;
++		} else if (addr == (long) &dummy->u_fpvalid) {
++			conditional_stopped_child_used_math(data, child);
++			ret = 0;
++		}
++		break;
 +
-+	/* OK, new method.  Restore stuff that's not expected to get saved into
-+	   the 'first-level' reg save area, then just fall through to setting
-+	   up the registers and calling the second-level handler. */
++	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
++	case PTRACE_CONT: { /* restart after signal. */
++		ret = -EIO;
++		if (!valid_signal(data))
++			break;
++		if (request == PTRACE_SYSCALL)
++			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
++		else
++			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 +
-+	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
-+	   r25,tr1-4 and save r6 to get into the right state.  */
++		ptrace_disable_singlestep(child);
 +
-+	ld.q	SP, TLB_SAVED_TR1, r3
-+	ld.q	SP, TLB_SAVED_TR2, r4
-+	ld.q	SP, TLB_SAVED_TR3, r5
-+	ld.q	SP, TLB_SAVED_TR4, r18
-+	ld.q	SP, TLB_SAVED_R25, r25
++		child->exit_code = data;
++		wake_up_process(child);
++		ret = 0;
++		break;
++	}
 +
-+	ld.q	SP, TLB_SAVED_R0, r0
-+	ld.q	SP, TLB_SAVED_R1, r1
++/*
++ * make the child exit.  Best I can do is send it a sigkill.
++ * perhaps it should be put in the status that it wants to
++ * exit.
++ */
++	case PTRACE_KILL: {
++		ret = 0;
++		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
++			break;
++		ptrace_disable_singlestep(child);
++		child->exit_code = SIGKILL;
++		wake_up_process(child);
++		break;
++	}
 +
-+	ptabs/u	r3, tr1
-+	ptabs/u	r4, tr2
-+	ptabs/u	r5, tr3
-+	ptabs/u	r18, tr4
++	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
++		long pc;
++		struct pt_regs *regs = NULL;
 +
-+	/* Set args for Non-debug, TLB miss class handler */
-+	getcon	EXPEVT, r2
-+	movi	ret_from_exception, r3
-+	ori	r3, 1, r3
-+	movi	EVENT_FAULT_TLB, r4
-+	or	SP, ZERO, r5
-+	getcon	KCR1, SP
-+	pta	handle_exception, tr0
-+	blink	tr0, ZERO
++		ret = -EIO;
++		if (!valid_signal(data))
++			break;
++		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
++		if ((child->ptrace & PT_DTRACE) == 0) {
++			/* Spurious delayed TF traps may occur */
++			child->ptrace |= PT_DTRACE;
++		}
 +
-+/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
-+   DOES END UP AT VBR+0x600 */
-+	nop
-+	nop
-+	nop
-+	nop
-+	nop
-+	nop
++		pc = get_stack_long(child, (long)&regs->pc);
 +
-+	.balign 256
-+	/* VBR + 0x600 */
++		/* Next scheduling will set up UBC */
++		if (child->thread.ubc_pc == 0)
++			ubc_usercnt += 1;
++		child->thread.ubc_pc = pc;
 +
-+interrupt:
-+	synco	/* TAKum03020 (but probably a good idea anyway.) */
-+	/* Save original stack pointer into KCR1 */
-+	putcon	SP, KCR1
++		set_tsk_thread_flag(child, TIF_SINGLESTEP);
++		child->exit_code = data;
++		/* give it a chance to run. */
++		wake_up_process(child);
++		ret = 0;
++		break;
++	}
 +
-+	/* Save other original registers into reg_save_area */
-+        movi  reg_save_area, SP
-+	st.q	SP, SAVED_R2, r2
-+	st.q	SP, SAVED_R3, r3
-+	st.q	SP, SAVED_R4, r4
-+	st.q	SP, SAVED_R5, r5
-+	st.q	SP, SAVED_R6, r6
-+	st.q	SP, SAVED_R18, r18
-+	gettr	tr0, r3
-+	st.q	SP, SAVED_TR0, r3
++#ifdef CONFIG_SH_DSP
++	case PTRACE_GETDSPREGS: {
++		unsigned long dp;
 +
-+	/* Set args for interrupt class handler */
-+	getcon	INTEVT, r2
-+	movi	ret_from_irq, r3
-+	ori	r3, 1, r3
-+	movi	EVENT_INTERRUPT, r4
-+	or	SP, ZERO, r5
-+	getcon	KCR1, SP
-+	pta	handle_exception, tr0
-+	blink	tr0, ZERO
-+	.balign	TEXT_SIZE		/* let's waste the bare minimum */
++		ret = -EIO;
++		dp = ((unsigned long) child) + THREAD_SIZE -
++			 sizeof(struct pt_dspregs);
++		if (*((int *) (dp - 4)) == SR_FD) {
++			copy_to_user(addr, (void *) dp,
++				sizeof(struct pt_dspregs));
++			ret = 0;
++		}
++		break;
++	}
 +
-+LVBR_block_end:				/* Marker. Used for total checking */
++	case PTRACE_SETDSPREGS: {
++		unsigned long dp;
 +
-+	.balign 256
-+LRESVEC_block:
-+	/* Panic handler. Called with MMU off. Possible causes/actions:
-+	 * - Reset:		Jump to program start.
-+	 * - Single Step:	Turn off Single Step & return.
-+	 * - Others:		Call panic handler, passing PC as arg.
-+	 *			(this may need to be extended...)
-+	 */
-+reset_or_panic:
-+	synco	/* TAKum03020 (but probably a good idea anyway.) */
-+	putcon	SP, DCR
-+	/* First save r0-1 and tr0, as we need to use these */
-+	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
-+	st.q	SP, 0, r0
-+	st.q	SP, 8, r1
-+	gettr	tr0, r0
-+	st.q	SP, 32, r0
++		ret = -EIO;
++		dp = ((unsigned long) child) + THREAD_SIZE -
++			 sizeof(struct pt_dspregs);
++		if (*((int *) (dp - 4)) == SR_FD) {
++			copy_from_user((void *) dp, addr,
++				sizeof(struct pt_dspregs));
++			ret = 0;
++		}
++		break;
++	}
++#endif
++	default:
++		ret = ptrace_request(child, request, addr, data);
++		break;
++	}
 +
-+	/* Check cause */
-+	getcon	EXPEVT, r0
-+	movi	RESET_CAUSE, r1
-+	sub	r1, r0, r1		/* r1=0 if reset */
-+	movi	_stext-CONFIG_PAGE_OFFSET, r0
-+	ori	r0, 1, r0
-+	ptabs	r0, tr0
-+	beqi	r1, 0, tr0		/* Jump to start address if reset */
++	return ret;
++}
 +
-+	getcon	EXPEVT, r0
-+	movi	DEBUGSS_CAUSE, r1
-+	sub	r1, r0, r1		/* r1=0 if single step */
-+	pta	single_step_panic, tr0
-+	beqi	r1, 0, tr0		/* jump if single step */
++asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
++{
++	struct task_struct *tsk = current;
 +
-+	/* Now jump to where we save the registers. */
-+	movi	panic_stash_regs-CONFIG_PAGE_OFFSET, r1
-+	ptabs	r1, tr0
-+	blink	tr0, r63
++	if (unlikely(current->audit_context) && entryexit)
++		audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
++				   regs->regs[0]);
 +
-+single_step_panic:
-+	/* We are in a handler with Single Step set. We need to resume the
-+	 * handler, by turning on MMU & turning off Single Step. */
-+	getcon	SSR, r0
-+	movi	SR_MMU, r1
-+	or	r0, r1, r0
-+	movi	~SR_SS, r1
-+	and	r0, r1, r0
-+	putcon	r0, SSR
-+	/* Restore EXPEVT, as the rte won't do this */
-+	getcon	PEXPEVT, r0
-+	putcon	r0, EXPEVT
-+	/* Restore regs */
-+	ld.q	SP, 32, r0
-+	ptabs	r0, tr0
-+	ld.q	SP, 0, r0
-+	ld.q	SP, 8, r1
-+	getcon	DCR, SP
-+	synco
-+	rte
++	if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
++	    !test_thread_flag(TIF_SINGLESTEP))
++		goto out;
++	if (!(tsk->ptrace & PT_PTRACED))
++		goto out;
 +
++	/* the 0x80 provides a way for the tracing parent to distinguish
++	   between a syscall stop and SIGTRAP delivery */
++	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
++				 !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
 +
-+	.balign	256
-+debug_exception:
-+	synco	/* TAKum03020 (but probably a good idea anyway.) */
 +	/*
-+	 * Single step/software_break_point first level handler.
-+	 * Called with MMU off, so the first thing we do is enable it
-+	 * by doing an rte with appropriate SSR.
++	 * this isn't the same as continuing with a signal, but it will do
++	 * for normal use.  strace only continues with a signal if the
++	 * stopping signal is not SIGTRAP.  -brl
 +	 */
-+	putcon	SP, DCR
-+	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
-+	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
-+
-+	/* With the MMU off, we are bypassing the cache, so purge any
-+         * data that will be made stale by the following stores.
-+         */
-+	ocbp	SP, 0
-+	synco
-+
-+	st.q	SP, 0, r0
-+	st.q	SP, 8, r1
-+	getcon	SPC, r0
-+	st.q	SP, 16, r0
-+	getcon	SSR, r0
-+	st.q	SP, 24, r0
-+
-+	/* Enable MMU, block exceptions, set priv mode, disable single step */
-+	movi	SR_MMU | SR_BL | SR_MD, r1
-+	or	r0, r1, r0
-+	movi	~SR_SS, r1
-+	and	r0, r1, r0
-+	putcon	r0, SSR
-+	/* Force control to debug_exception_2 when rte is executed */
-+	movi	debug_exeception_2, r0
-+	ori	r0, 1, r0      /* force SHmedia, just in case */
-+	putcon	r0, SPC
-+	getcon	DCR, SP
-+	synco
-+	rte
-+debug_exeception_2:
-+	/* Restore saved regs */
-+	putcon	SP, KCR1
-+	movi	resvec_save_area, SP
-+	ld.q	SP, 24, r0
-+	putcon	r0, SSR
-+	ld.q	SP, 16, r0
-+	putcon	r0, SPC
-+	ld.q	SP, 0, r0
-+	ld.q	SP, 8, r1
-+
-+	/* Save other original registers into reg_save_area */
-+        movi  reg_save_area, SP
-+	st.q	SP, SAVED_R2, r2
-+	st.q	SP, SAVED_R3, r3
-+	st.q	SP, SAVED_R4, r4
-+	st.q	SP, SAVED_R5, r5
-+	st.q	SP, SAVED_R6, r6
-+	st.q	SP, SAVED_R18, r18
-+	gettr	tr0, r3
-+	st.q	SP, SAVED_TR0, r3
-+
-+	/* Set args for debug class handler */
-+	getcon	EXPEVT, r2
-+	movi	ret_from_exception, r3
-+	ori	r3, 1, r3
-+	movi	EVENT_DEBUG, r4
-+	or	SP, ZERO, r5
-+	getcon	KCR1, SP
-+	pta	handle_exception, tr0
-+	blink	tr0, ZERO
-+
-+	.balign	256
-+debug_interrupt:
-+	/* !!! WE COME HERE IN REAL MODE !!! */
-+	/* Hook-up debug interrupt to allow various debugging options to be
-+	 * hooked into its handler. */
-+	/* Save original stack pointer into KCR1 */
-+	synco
-+	putcon	SP, KCR1
-+	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
-+	ocbp	SP, 0
-+	ocbp	SP, 32
-+	synco
-+
-+	/* Save other original registers into reg_save_area thru real addresses */
-+	st.q	SP, SAVED_R2, r2
-+	st.q	SP, SAVED_R3, r3
-+	st.q	SP, SAVED_R4, r4
-+	st.q	SP, SAVED_R5, r5
-+	st.q	SP, SAVED_R6, r6
-+	st.q	SP, SAVED_R18, r18
-+	gettr	tr0, r3
-+	st.q	SP, SAVED_TR0, r3
-+
-+	/* move (spc,ssr)->(pspc,pssr).  The rte will shift
-+	   them back again, so that they look like the originals
-+	   as far as the real handler code is concerned. */
-+	getcon	spc, r6
-+	putcon	r6, pspc
-+	getcon	ssr, r6
-+	putcon	r6, pssr
-+
-+	! construct useful SR for handle_exception
-+	movi	3, r6
-+	shlli	r6, 30, r6
-+	getcon	sr, r18
-+	or	r18, r6, r6
-+	putcon	r6, ssr
-+
-+	! SSR is now the current SR with the MD and MMU bits set
-+	! i.e. the rte will switch back to priv mode and put
-+	! the mmu back on
-+
-+	! construct spc
-+	movi	handle_exception, r18
-+	ori	r18, 1, r18		! for safety (do we need this?)
-+	putcon	r18, spc
-+
-+	/* Set args for Non-debug, Not a TLB miss class handler */
-+
-+	! EXPEVT==0x80 is unused, so 'steal' this value to put the
-+	! debug interrupt handler in the vectoring table
-+	movi	0x80, r2
-+	movi	ret_from_exception, r3
-+	ori	r3, 1, r3
-+	movi	EVENT_FAULT_NOT_TLB, r4
-+
-+	or	SP, ZERO, r5
-+	movi	CONFIG_PAGE_OFFSET, r6
-+	add	r6, r5, r5
-+	getcon	KCR1, SP
-+
-+	synco	! for safety
-+	rte	! -> handle_exception, switch back to priv mode again
-+
-+LRESVEC_block_end:			/* Marker. Unused. */
++	if (tsk->exit_code) {
++		send_sig(tsk->exit_code, tsk, 1);
++		tsk->exit_code = 0;
++	}
 +
-+	.balign	TEXT_SIZE
++out:
++	if (unlikely(current->audit_context) && !entryexit)
++		audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[3],
++				    regs->regs[4], regs->regs[5],
++				    regs->regs[6], regs->regs[7]);
 +
++}
+diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
+new file mode 100644
+index 0000000..f6fbdfa
+--- /dev/null
++++ b/arch/sh/kernel/ptrace_64.c
+@@ -0,0 +1,341 @@
 +/*
-+ * Second level handler for VBR-based exceptions. Pre-handler.
-+ * In common to all stack-frame sensitive handlers.
++ * arch/sh/kernel/ptrace_64.c
 + *
-+ * Inputs:
-+ * (KCR0) Current [current task union]
-+ * (KCR1) Original SP
-+ * (r2)   INTEVT/EXPEVT
-+ * (r3)   appropriate return address
-+ * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
-+ * (r5)   Pointer to reg_save_area
-+ * (SP)   Original SP
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003 - 2007  Paul Mundt
 + *
-+ * Available registers:
-+ * (r6)
-+ * (r18)
-+ * (tr0)
++ * Started from SH3/4 version:
++ *   SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
++ *
++ *   Original x86 implementation:
++ *	By Ross Biro 1/23/92
++ *	edited by Linus Torvalds
 + *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
 + */
-+handle_exception:
-+	/* Common 2nd level handler. */
++#include <linux/kernel.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/errno.h>
++#include <linux/ptrace.h>
++#include <linux/user.h>
++#include <linux/signal.h>
++#include <linux/syscalls.h>
++#include <linux/audit.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/processor.h>
++#include <asm/mmu_context.h>
 +
-+	/* First thing we need an appropriate stack pointer */
-+	getcon	SSR, r6
-+	shlri	r6, 30, r6
-+	andi	r6, 1, r6
-+	pta	stack_ok, tr0
-+	bne	r6, ZERO, tr0		/* Original stack pointer is fine */
++/* This mask defines the bits of the SR which the user is not allowed to
++   change, which are everything except S, Q, M, PR, SZ, FR. */
++#define SR_MASK      (0xffff8cfd)
 +
-+	/* Set stack pointer for user fault */
-+	getcon	KCR0, SP
-+	movi	THREAD_SIZE, r6		/* Point to the end */
-+	add	SP, r6, SP
++/*
++ * does not yet catch signals sent when the child dies.
++ * in exit.c or in signal.c.
++ */
 +
-+stack_ok:
++/*
++ * This routine will get a word from the user area in the process kernel stack.
++ */
++static inline int get_stack_long(struct task_struct *task, int offset)
++{
++	unsigned char *stack;
 +
-+/* DEBUG : check for underflow/overflow of the kernel stack */
-+	pta	no_underflow, tr0
-+	getcon  KCR0, r6
-+	movi	1024, r18
-+	add	r6, r18, r6
-+	bge	SP, r6, tr0 	! ? below 1k from bottom of stack : danger zone
++	stack = (unsigned char *)(task->thread.uregs);
++	stack += offset;
++	return (*((int *)stack));
++}
 +
-+/* Just panic to cause a crash. */
-+bad_sp:
-+	ld.b	r63, 0, r6
-+	nop
++static inline unsigned long
++get_fpu_long(struct task_struct *task, unsigned long addr)
++{
++	unsigned long tmp;
++	struct pt_regs *regs;
++	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
 +
-+no_underflow:
-+	pta	bad_sp, tr0
-+	getcon	kcr0, r6
-+	movi	THREAD_SIZE, r18
-+	add	r18, r6, r6
-+	bgt	SP, r6, tr0	! sp above the stack
++	if (!tsk_used_math(task)) {
++		if (addr == offsetof(struct user_fpu_struct, fpscr)) {
++			tmp = FPSCR_INIT;
++		} else {
++			tmp = 0xffffffffUL; /* matches initial value in fpu.c */
++		}
++		return tmp;
++	}
 +
-+	/* Make some room for the BASIC frame. */
-+	movi	-(FRAME_SIZE), r6
-+	add	SP, r6, SP
++	if (last_task_used_math == task) {
++		enable_fpu();
++		save_fpu(task, regs);
++		disable_fpu();
++		last_task_used_math = 0;
++		regs->sr |= SR_FD;
++	}
 +
-+/* Could do this with no stalling if we had another spare register, but the
-+   code below will be OK. */
-+	ld.q	r5, SAVED_R2, r6
-+	ld.q	r5, SAVED_R3, r18
-+	st.q	SP, FRAME_R(2), r6
-+	ld.q	r5, SAVED_R4, r6
-+	st.q	SP, FRAME_R(3), r18
-+	ld.q	r5, SAVED_R5, r18
-+	st.q	SP, FRAME_R(4), r6
-+	ld.q	r5, SAVED_R6, r6
-+	st.q	SP, FRAME_R(5), r18
-+	ld.q	r5, SAVED_R18, r18
-+	st.q	SP, FRAME_R(6), r6
-+	ld.q	r5, SAVED_TR0, r6
-+	st.q	SP, FRAME_R(18), r18
-+	st.q	SP, FRAME_T(0), r6
++	tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
++	return tmp;
++}
 +
-+	/* Keep old SP around */
-+	getcon	KCR1, r6
++/*
++ * This routine will put a word into the user area in the process kernel stack.
++ */
++static inline int put_stack_long(struct task_struct *task, int offset,
++				 unsigned long data)
++{
++	unsigned char *stack;
 +
-+	/* Save the rest of the general purpose registers */
-+	st.q	SP, FRAME_R(0), r0
-+	st.q	SP, FRAME_R(1), r1
-+	st.q	SP, FRAME_R(7), r7
-+	st.q	SP, FRAME_R(8), r8
-+	st.q	SP, FRAME_R(9), r9
-+	st.q	SP, FRAME_R(10), r10
-+	st.q	SP, FRAME_R(11), r11
-+	st.q	SP, FRAME_R(12), r12
-+	st.q	SP, FRAME_R(13), r13
-+	st.q	SP, FRAME_R(14), r14
++	stack = (unsigned char *)(task->thread.uregs);
++	stack += offset;
++	*(unsigned long *) stack = data;
++	return 0;
++}
 +
-+	/* SP is somewhere else */
-+	st.q	SP, FRAME_R(15), r6
++static inline int
++put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
++{
++	struct pt_regs *regs;
 +
-+	st.q	SP, FRAME_R(16), r16
-+	st.q	SP, FRAME_R(17), r17
-+	/* r18 is saved earlier. */
-+	st.q	SP, FRAME_R(19), r19
-+	st.q	SP, FRAME_R(20), r20
-+	st.q	SP, FRAME_R(21), r21
-+	st.q	SP, FRAME_R(22), r22
-+	st.q	SP, FRAME_R(23), r23
-+	st.q	SP, FRAME_R(24), r24
-+	st.q	SP, FRAME_R(25), r25
-+	st.q	SP, FRAME_R(26), r26
-+	st.q	SP, FRAME_R(27), r27
-+	st.q	SP, FRAME_R(28), r28
-+	st.q	SP, FRAME_R(29), r29
-+	st.q	SP, FRAME_R(30), r30
-+	st.q	SP, FRAME_R(31), r31
-+	st.q	SP, FRAME_R(32), r32
-+	st.q	SP, FRAME_R(33), r33
-+	st.q	SP, FRAME_R(34), r34
-+	st.q	SP, FRAME_R(35), r35
-+	st.q	SP, FRAME_R(36), r36
-+	st.q	SP, FRAME_R(37), r37
-+	st.q	SP, FRAME_R(38), r38
-+	st.q	SP, FRAME_R(39), r39
-+	st.q	SP, FRAME_R(40), r40
-+	st.q	SP, FRAME_R(41), r41
-+	st.q	SP, FRAME_R(42), r42
-+	st.q	SP, FRAME_R(43), r43
-+	st.q	SP, FRAME_R(44), r44
-+	st.q	SP, FRAME_R(45), r45
-+	st.q	SP, FRAME_R(46), r46
-+	st.q	SP, FRAME_R(47), r47
-+	st.q	SP, FRAME_R(48), r48
-+	st.q	SP, FRAME_R(49), r49
-+	st.q	SP, FRAME_R(50), r50
-+	st.q	SP, FRAME_R(51), r51
-+	st.q	SP, FRAME_R(52), r52
-+	st.q	SP, FRAME_R(53), r53
-+	st.q	SP, FRAME_R(54), r54
-+	st.q	SP, FRAME_R(55), r55
-+	st.q	SP, FRAME_R(56), r56
-+	st.q	SP, FRAME_R(57), r57
-+	st.q	SP, FRAME_R(58), r58
-+	st.q	SP, FRAME_R(59), r59
-+	st.q	SP, FRAME_R(60), r60
-+	st.q	SP, FRAME_R(61), r61
-+	st.q	SP, FRAME_R(62), r62
++	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
 +
-+	/*
-+	 * Save the S* registers.
-+	 */
-+	getcon	SSR, r61
-+	st.q	SP, FRAME_S(FSSR), r61
-+	getcon	SPC, r62
-+	st.q	SP, FRAME_S(FSPC), r62
-+	movi	-1, r62			/* Reset syscall_nr */
-+	st.q	SP, FRAME_S(FSYSCALL_ID), r62
++	if (!tsk_used_math(task)) {
++		fpinit(&task->thread.fpu.hard);
++		set_stopped_child_used_math(task);
++	} else if (last_task_used_math == task) {
++		enable_fpu();
++		save_fpu(task, regs);
++		disable_fpu();
++		last_task_used_math = 0;
++		regs->sr |= SR_FD;
++	}
 +
-+	/* Save the rest of the target registers */
-+	gettr	tr1, r6
-+	st.q	SP, FRAME_T(1), r6
-+	gettr	tr2, r6
-+	st.q	SP, FRAME_T(2), r6
-+	gettr	tr3, r6
-+	st.q	SP, FRAME_T(3), r6
-+	gettr	tr4, r6
-+	st.q	SP, FRAME_T(4), r6
-+	gettr	tr5, r6
-+	st.q	SP, FRAME_T(5), r6
-+	gettr	tr6, r6
-+	st.q	SP, FRAME_T(6), r6
-+	gettr	tr7, r6
-+	st.q	SP, FRAME_T(7), r6
++	((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
++	return 0;
++}
 +
-+	! setup FP so that unwinder can wind back through nested kernel mode
-+	! exceptions
-+	add	SP, ZERO, r14
 +
-+#ifdef CONFIG_POOR_MANS_STRACE
-+	/* We've pushed all the registers now, so only r2-r4 hold anything
-+	 * useful. Move them into callee save registers */
-+	or	r2, ZERO, r28
-+	or	r3, ZERO, r29
-+	or	r4, ZERO, r30
++long arch_ptrace(struct task_struct *child, long request, long addr, long data)
++{
++	int ret;
 +
-+	/* Preserve r2 as the event code */
-+	movi	evt_debug, r3
-+	ori	r3, 1, r3
-+	ptabs	r3, tr0
++	switch (request) {
++	/* when I and D space are separate, these will need to be fixed. */
++	case PTRACE_PEEKTEXT: /* read word at location addr. */
++	case PTRACE_PEEKDATA:
++		ret = generic_ptrace_peekdata(child, addr, data);
++		break;
 +
-+	or	SP, ZERO, r6
-+	getcon	TRA, r5
-+	blink	tr0, LINK
++	/* read the word at location addr in the USER area. */
++	case PTRACE_PEEKUSR: {
++		unsigned long tmp;
 +
-+	or	r28, ZERO, r2
-+	or	r29, ZERO, r3
-+	or	r30, ZERO, r4
-+#endif
++		ret = -EIO;
++		if ((addr & 3) || addr < 0)
++			break;
 +
-+	/* For syscall and debug race condition, get TRA now */
-+	getcon	TRA, r5
++		if (addr < sizeof(struct pt_regs))
++			tmp = get_stack_long(child, addr);
++		else if ((addr >= offsetof(struct user, fpu)) &&
++			 (addr <  offsetof(struct user, u_fpvalid))) {
++			tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
++		} else if (addr == offsetof(struct user, u_fpvalid)) {
++			tmp = !!tsk_used_math(child);
++		} else {
++			break;
++		}
++		ret = put_user(tmp, (unsigned long *)data);
++		break;
++	}
 +
-+	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
-+	 * Also set FD, to catch FPU usage in the kernel.
-+	 *
-+	 * benedict.gaster at superh.com 29/07/2002
-+	 *
-+	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
-+	 * same time change BL from 1->0, as any pending interrupt of a level
-+	 * higher than he previous value of IMASK will leak through and be
-+	 * taken unexpectedly.
-+	 *
-+	 * To avoid this we raise the IMASK and then issue another PUTCON to
-+	 * enable interrupts.
-+         */
-+	getcon	SR, r6
-+	movi	SR_IMASK | SR_FD, r7
-+	or	r6, r7, r6
-+	putcon	r6, SR
-+	movi	SR_UNBLOCK_EXC, r7
-+	and	r6, r7, r6
-+	putcon	r6, SR
++	/* when I and D space are separate, this will have to be fixed. */
++	case PTRACE_POKETEXT: /* write the word at location addr. */
++	case PTRACE_POKEDATA:
++		ret = generic_ptrace_pokedata(child, addr, data);
++		break;
++
++	case PTRACE_POKEUSR:
++                /* write the word at location addr in the USER area. We must
++                   disallow any changes to certain SR bits or u_fpvalid, since
++                   this could crash the kernel or result in a security
++                   loophole. */
++		ret = -EIO;
++		if ((addr & 3) || addr < 0)
++			break;
 +
++		if (addr < sizeof(struct pt_regs)) {
++			/* Ignore change of top 32 bits of SR */
++			if (addr == offsetof (struct pt_regs, sr)+4)
++			{
++				ret = 0;
++				break;
++			}
++			/* If lower 32 bits of SR, ignore non-user bits */
++			if (addr == offsetof (struct pt_regs, sr))
++			{
++				long cursr = get_stack_long(child, addr);
++				data &= ~(SR_MASK);
++				data |= (cursr & SR_MASK);
++			}
++			ret = put_stack_long(child, addr, data);
++		}
++		else if ((addr >= offsetof(struct user, fpu)) &&
++			 (addr <  offsetof(struct user, u_fpvalid))) {
++			ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
++		}
++		break;
 +
-+	/* Now call the appropriate 3rd level handler */
-+	or	r3, ZERO, LINK
-+	movi	trap_jtable, r3
-+	shlri	r2, 3, r2
-+	ldx.l	r2, r3, r3
-+	shlri	r2, 2, r2
-+	ptabs	r3, tr0
-+	or	SP, ZERO, r3
-+	blink	tr0, ZERO
++	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
++	case PTRACE_CONT: { /* restart after signal. */
++		ret = -EIO;
++		if (!valid_signal(data))
++			break;
++		if (request == PTRACE_SYSCALL)
++			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
++		else
++			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
++		child->exit_code = data;
++		wake_up_process(child);
++		ret = 0;
++		break;
++	}
 +
 +/*
-+ * Second level handler for VBR-based exceptions. Post-handlers.
-+ *
-+ * Post-handlers for interrupts (ret_from_irq), exceptions
-+ * (ret_from_exception) and common reentrance doors (restore_all
-+ * to get back to the original context, ret_from_syscall loop to
-+ * check kernel exiting).
-+ *
-+ * ret_with_reschedule and work_notifysig are an inner lables of
-+ * the ret_from_syscall loop.
-+ *
-+ * In common to all stack-frame sensitive handlers.
-+ *
-+ * Inputs:
-+ * (SP)   struct pt_regs *, original register's frame pointer (basic)
-+ *
++ * make the child exit.  Best I can do is send it a sigkill.
++ * perhaps it should be put in the status that it wants to
++ * exit.
 + */
-+	.global ret_from_irq
-+ret_from_irq:
-+#ifdef CONFIG_POOR_MANS_STRACE
-+	pta	evt_debug_ret_from_irq, tr0
-+	ori	SP, 0, r2
-+	blink	tr0, LINK
-+#endif
-+	ld.q	SP, FRAME_S(FSSR), r6
-+	shlri	r6, 30, r6
-+	andi	r6, 1, r6
-+	pta	resume_kernel, tr0
-+	bne	r6, ZERO, tr0		/* no further checks */
-+	STI()
-+	pta	ret_with_reschedule, tr0
-+	blink	tr0, ZERO		/* Do not check softirqs */
-+
-+	.global ret_from_exception
-+ret_from_exception:
-+	preempt_stop()
-+
-+#ifdef CONFIG_POOR_MANS_STRACE
-+	pta	evt_debug_ret_from_exc, tr0
-+	ori	SP, 0, r2
-+	blink	tr0, LINK
-+#endif
-+
-+	ld.q	SP, FRAME_S(FSSR), r6
-+	shlri	r6, 30, r6
-+	andi	r6, 1, r6
-+	pta	resume_kernel, tr0
-+	bne	r6, ZERO, tr0		/* no further checks */
-+
-+	/* Check softirqs */
-+
-+#ifdef CONFIG_PREEMPT
-+	pta   ret_from_syscall, tr0
-+	blink   tr0, ZERO
-+
-+resume_kernel:
-+	pta	restore_all, tr0
-+
-+	getcon	KCR0, r6
-+	ld.l	r6, TI_PRE_COUNT, r7
-+	beq/u	r7, ZERO, tr0
-+
-+need_resched:
-+	ld.l	r6, TI_FLAGS, r7
-+	movi	(1 << TIF_NEED_RESCHED), r8
-+	and	r8, r7, r8
-+	bne	r8, ZERO, tr0
-+
-+	getcon	SR, r7
-+	andi	r7, 0xf0, r7
-+	bne	r7, ZERO, tr0
-+
-+	movi	((PREEMPT_ACTIVE >> 16) & 65535), r8
-+	shori	(PREEMPT_ACTIVE & 65535), r8
-+	st.l	r6, TI_PRE_COUNT, r8
-+
-+	STI()
-+	movi	schedule, r7
-+	ori	r7, 1, r7
-+	ptabs	r7, tr1
-+	blink	tr1, LINK
-+
-+	st.l	r6, TI_PRE_COUNT, ZERO
-+	CLI()
-+
-+	pta	need_resched, tr1
-+	blink	tr1, ZERO
-+#endif
++	case PTRACE_KILL: {
++		ret = 0;
++		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
++			break;
++		child->exit_code = SIGKILL;
++		wake_up_process(child);
++		break;
++	}
 +
-+	.global ret_from_syscall
-+ret_from_syscall:
++	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
++		struct pt_regs *regs;
 +
-+ret_with_reschedule:
-+	getcon	KCR0, r6		! r6 contains current_thread_info
-+	ld.l	r6, TI_FLAGS, r7	! r7 contains current_thread_info->flags
++		ret = -EIO;
++		if (!valid_signal(data))
++			break;
++		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
++		if ((child->ptrace & PT_DTRACE) == 0) {
++			/* Spurious delayed TF traps may occur */
++			child->ptrace |= PT_DTRACE;
++		}
 +
-+	movi	_TIF_NEED_RESCHED, r8
-+	and	r8, r7, r8
-+	pta	work_resched, tr0
-+	bne	r8, ZERO, tr0
++		regs = child->thread.uregs;
 +
-+	pta	restore_all, tr1
++		regs->sr |= SR_SSTEP;	/* auto-resetting upon exception */
 +
-+	movi	(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
-+	and	r8, r7, r8
-+	pta	work_notifysig, tr0
-+	bne	r8, ZERO, tr0
++		child->exit_code = data;
++		/* give it a chance to run. */
++		wake_up_process(child);
++		ret = 0;
++		break;
++	}
 +
-+	blink	tr1, ZERO
++	default:
++		ret = ptrace_request(child, request, addr, data);
++		break;
++	}
++	return ret;
++}
 +
-+work_resched:
-+	pta	ret_from_syscall, tr0
-+	gettr	tr0, LINK
-+	movi	schedule, r6
-+	ptabs	r6, tr0
-+	blink	tr0, ZERO		/* Call schedule(), return on top */
++asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
++{
++#define WPC_DBRMODE 0x0d104008
++	static int first_call = 1;
 +
-+work_notifysig:
-+	gettr	tr1, LINK
++	lock_kernel();
++	if (first_call) {
++		/* Set WPC.DBRMODE to 0.  This makes all debug events get
++		 * delivered through RESVEC, i.e. into the handlers in entry.S.
++		 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
++		 * would normally be left set to 1, which makes debug events get
++		 * delivered through DBRVEC, i.e. into the remote gdb's
++		 * handlers.  This prevents ptrace getting them, and confuses
++		 * the remote gdb.) */
++		printk("DBRMODE set to 0 to permit native debugging\n");
++		poke_real_address_q(WPC_DBRMODE, 0);
++		first_call = 0;
++	}
++	unlock_kernel();
 +
-+	movi	do_signal, r6
-+	ptabs	r6, tr0
-+	or	SP, ZERO, r2
-+	or	ZERO, ZERO, r3
-+	blink	tr0, LINK	    /* Call do_signal(regs, 0), return here */
++	return sys_ptrace(request, pid, addr, data);
++}
 +
-+restore_all:
-+	/* Do prefetches */
++asmlinkage void syscall_trace(struct pt_regs *regs, int entryexit)
++{
++	struct task_struct *tsk = current;
 +
-+	ld.q	SP, FRAME_T(0), r6
-+	ld.q	SP, FRAME_T(1), r7
-+	ld.q	SP, FRAME_T(2), r8
-+	ld.q	SP, FRAME_T(3), r9
-+	ptabs	r6, tr0
-+	ptabs	r7, tr1
-+	ptabs	r8, tr2
-+	ptabs	r9, tr3
-+	ld.q	SP, FRAME_T(4), r6
-+	ld.q	SP, FRAME_T(5), r7
-+	ld.q	SP, FRAME_T(6), r8
-+	ld.q	SP, FRAME_T(7), r9
-+	ptabs	r6, tr4
-+	ptabs	r7, tr5
-+	ptabs	r8, tr6
-+	ptabs	r9, tr7
++	if (unlikely(current->audit_context) && entryexit)
++		audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
++				   regs->regs[9]);
 +
-+	ld.q	SP, FRAME_R(0), r0
-+	ld.q	SP, FRAME_R(1), r1
-+	ld.q	SP, FRAME_R(2), r2
-+	ld.q	SP, FRAME_R(3), r3
-+	ld.q	SP, FRAME_R(4), r4
-+	ld.q	SP, FRAME_R(5), r5
-+	ld.q	SP, FRAME_R(6), r6
-+	ld.q	SP, FRAME_R(7), r7
-+	ld.q	SP, FRAME_R(8), r8
-+	ld.q	SP, FRAME_R(9), r9
-+	ld.q	SP, FRAME_R(10), r10
-+	ld.q	SP, FRAME_R(11), r11
-+	ld.q	SP, FRAME_R(12), r12
-+	ld.q	SP, FRAME_R(13), r13
-+	ld.q	SP, FRAME_R(14), r14
++	if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
++	    !test_thread_flag(TIF_SINGLESTEP))
++		goto out;
++	if (!(tsk->ptrace & PT_PTRACED))
++		goto out;
 +
-+	ld.q	SP, FRAME_R(16), r16
-+	ld.q	SP, FRAME_R(17), r17
-+	ld.q	SP, FRAME_R(18), r18
-+	ld.q	SP, FRAME_R(19), r19
-+	ld.q	SP, FRAME_R(20), r20
-+	ld.q	SP, FRAME_R(21), r21
-+	ld.q	SP, FRAME_R(22), r22
-+	ld.q	SP, FRAME_R(23), r23
-+	ld.q	SP, FRAME_R(24), r24
-+	ld.q	SP, FRAME_R(25), r25
-+	ld.q	SP, FRAME_R(26), r26
-+	ld.q	SP, FRAME_R(27), r27
-+	ld.q	SP, FRAME_R(28), r28
-+	ld.q	SP, FRAME_R(29), r29
-+	ld.q	SP, FRAME_R(30), r30
-+	ld.q	SP, FRAME_R(31), r31
-+	ld.q	SP, FRAME_R(32), r32
-+	ld.q	SP, FRAME_R(33), r33
-+	ld.q	SP, FRAME_R(34), r34
-+	ld.q	SP, FRAME_R(35), r35
-+	ld.q	SP, FRAME_R(36), r36
-+	ld.q	SP, FRAME_R(37), r37
-+	ld.q	SP, FRAME_R(38), r38
-+	ld.q	SP, FRAME_R(39), r39
-+	ld.q	SP, FRAME_R(40), r40
-+	ld.q	SP, FRAME_R(41), r41
-+	ld.q	SP, FRAME_R(42), r42
-+	ld.q	SP, FRAME_R(43), r43
-+	ld.q	SP, FRAME_R(44), r44
-+	ld.q	SP, FRAME_R(45), r45
-+	ld.q	SP, FRAME_R(46), r46
-+	ld.q	SP, FRAME_R(47), r47
-+	ld.q	SP, FRAME_R(48), r48
-+	ld.q	SP, FRAME_R(49), r49
-+	ld.q	SP, FRAME_R(50), r50
-+	ld.q	SP, FRAME_R(51), r51
-+	ld.q	SP, FRAME_R(52), r52
-+	ld.q	SP, FRAME_R(53), r53
-+	ld.q	SP, FRAME_R(54), r54
-+	ld.q	SP, FRAME_R(55), r55
-+	ld.q	SP, FRAME_R(56), r56
-+	ld.q	SP, FRAME_R(57), r57
-+	ld.q	SP, FRAME_R(58), r58
++	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
++				!test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
 +
-+	getcon	SR, r59
-+	movi	SR_BLOCK_EXC, r60
-+	or	r59, r60, r59
-+	putcon	r59, SR			/* SR.BL = 1, keep nesting out */
-+	ld.q	SP, FRAME_S(FSSR), r61
-+	ld.q	SP, FRAME_S(FSPC), r62
-+	movi	SR_ASID_MASK, r60
-+	and	r59, r60, r59
-+	andc	r61, r60, r61		/* Clear out older ASID */
-+	or	r59, r61, r61		/* Retain current ASID */
-+	putcon	r61, SSR
-+	putcon	r62, SPC
++	/*
++	 * this isn't the same as continuing with a signal, but it will do
++	 * for normal use.  strace only continues with a signal if the
++	 * stopping signal is not SIGTRAP.  -brl
++	 */
++	if (tsk->exit_code) {
++		send_sig(tsk->exit_code, tsk, 1);
++		tsk->exit_code = 0;
++	}
 +
-+	/* Ignore FSYSCALL_ID */
++out:
++	if (unlikely(current->audit_context) && !entryexit)
++		audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[1],
++				    regs->regs[2], regs->regs[3],
++				    regs->regs[4], regs->regs[5]);
++}
 +
-+	ld.q	SP, FRAME_R(59), r59
-+	ld.q	SP, FRAME_R(60), r60
-+	ld.q	SP, FRAME_R(61), r61
-+	ld.q	SP, FRAME_R(62), r62
++/* Called with interrupts disabled */
++asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
++{
++	/* This is called after a single step exception (DEBUGSS).
++	   There is no need to change the PC, as it is a post-execution
++	   exception, as entry.S does not do anything to the PC for DEBUGSS.
++	   We need to clear the Single Step setting in SR to avoid
++	   continually stepping. */
++	local_irq_enable();
++	regs->sr &= ~SR_SSTEP;
++	force_sig(SIGTRAP, current);
++}
 +
-+	/* Last touch */
-+	ld.q	SP, FRAME_R(15), SP
-+	rte
-+	nop
++/* Called with interrupts disabled */
++asmlinkage void do_software_break_point(unsigned long long vec,
++					struct pt_regs *regs)
++{
++	/* We need to forward step the PC, to counteract the backstep done
++	   in signal.c. */
++	local_irq_enable();
++	force_sig(SIGTRAP, current);
++	regs->pc += 4;
++}
 +
 +/*
-+ * Third level handlers for VBR-based exceptions. Adapting args to
-+ * and/or deflecting to fourth level handlers.
-+ *
-+ * Fourth level handlers interface.
-+ * Most are C-coded handlers directly pointed by the trap_jtable.
-+ * (Third = Fourth level)
-+ * Inputs:
-+ * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
-+ *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
-+ * (r3)   struct pt_regs *, original register's frame pointer
-+ * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
-+ * (r5)   TRA control register (for syscall/debug benefit only)
-+ * (LINK) return address
-+ * (SP)   = r3
-+ *
-+ * Kernel TLB fault handlers will get a slightly different interface.
-+ * (r2)   struct pt_regs *, original register's frame pointer
-+ * (r3)   writeaccess, whether it's a store fault as opposed to load fault
-+ * (r4)   execaccess, whether it's a ITLB fault as opposed to DTLB fault
-+ * (r5)   Effective Address of fault
-+ * (LINK) return address
-+ * (SP)   = r2
-+ *
-+ * fpu_error_or_IRQ? is a helper to deflect to the right cause.
++ * Called by kernel/ptrace.c when detaching..
 + *
++ * Make sure single step bits etc are not set.
 + */
-+tlb_miss_load:
-+	or	SP, ZERO, r2
-+	or	ZERO, ZERO, r3		/* Read */
-+	or	ZERO, ZERO, r4		/* Data */
-+	getcon	TEA, r5
-+	pta	call_do_page_fault, tr0
-+	beq	ZERO, ZERO, tr0
-+
-+tlb_miss_store:
-+	or	SP, ZERO, r2
-+	movi	1, r3			/* Write */
-+	or	ZERO, ZERO, r4		/* Data */
-+	getcon	TEA, r5
-+	pta	call_do_page_fault, tr0
-+	beq	ZERO, ZERO, tr0
++void ptrace_disable(struct task_struct *child)
++{
++        /* nothing to do.. */
++}
+diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
+index 4156aac..855cdf9 100644
+--- a/arch/sh/kernel/setup.c
++++ b/arch/sh/kernel/setup.c
+@@ -26,6 +26,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+ #include <asm/page.h>
++#include <asm/elf.h>
+ #include <asm/sections.h>
+ #include <asm/irq.h>
+ #include <asm/setup.h>
+@@ -78,12 +79,25 @@ EXPORT_SYMBOL(memory_start);
+ unsigned long memory_end = 0;
+ EXPORT_SYMBOL(memory_end);
+ 
++int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
 +
-+itlb_miss_or_IRQ:
-+	pta	its_IRQ, tr0
-+	beqi/u	r4, EVENT_INTERRUPT, tr0
-+	or	SP, ZERO, r2
-+	or	ZERO, ZERO, r3		/* Read */
-+	movi	1, r4			/* Text */
-+	getcon	TEA, r5
-+	/* Fall through */
+ static int __init early_parse_mem(char *p)
+ {
+ 	unsigned long size;
+ 
+-	memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
++	memory_start = (unsigned long)__va(__MEMORY_START);
+ 	size = memparse(p, &p);
 +
-+call_do_page_fault:
-+	movi	do_page_fault, r6
-+        ptabs	r6, tr0
-+        blink	tr0, ZERO
++	if (size > __MEMORY_SIZE) {
++		static char msg[] __initdata = KERN_ERR
++			"Using mem= to increase the size of kernel memory "
++			"is not allowed.\n"
++			"  Recompile the kernel with the correct value for "
++			"CONFIG_MEMORY_SIZE.\n";
++		printk(msg);
++		return 0;
++	}
 +
-+fpu_error_or_IRQA:
-+	pta	its_IRQ, tr0
-+	beqi/l	r4, EVENT_INTERRUPT, tr0
-+#ifdef CONFIG_SH_FPU
-+	movi	do_fpu_state_restore, r6
-+#else
-+	movi	do_exception_error, r6
-+#endif
-+	ptabs	r6, tr0
-+	blink	tr0, ZERO
+ 	memory_end = memory_start + size;
+ 
+ 	return 0;
+@@ -243,7 +257,7 @@ void __init setup_arch(char **cmdline_p)
+ 	data_resource.start = virt_to_phys(_etext);
+ 	data_resource.end = virt_to_phys(_edata)-1;
+ 
+-	memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
++	memory_start = (unsigned long)__va(__MEMORY_START);
+ 	if (!memory_end)
+ 		memory_end = memory_start + __MEMORY_SIZE;
+ 
+@@ -294,20 +308,23 @@ void __init setup_arch(char **cmdline_p)
+ }
+ 
+ static const char *cpu_name[] = {
++	[CPU_SH7203]	= "SH7203",	[CPU_SH7263]	= "SH7263",
+ 	[CPU_SH7206]	= "SH7206",	[CPU_SH7619]	= "SH7619",
+ 	[CPU_SH7705]	= "SH7705",	[CPU_SH7706]	= "SH7706",
+ 	[CPU_SH7707]	= "SH7707",	[CPU_SH7708]	= "SH7708",
+ 	[CPU_SH7709]	= "SH7709",	[CPU_SH7710]	= "SH7710",
+ 	[CPU_SH7712]	= "SH7712",	[CPU_SH7720]	= "SH7720",
+-	[CPU_SH7729]	= "SH7729",	[CPU_SH7750]	= "SH7750",
+-	[CPU_SH7750S]	= "SH7750S",	[CPU_SH7750R]	= "SH7750R",
+-	[CPU_SH7751]	= "SH7751",	[CPU_SH7751R]	= "SH7751R",
+-	[CPU_SH7760]	= "SH7760",
++	[CPU_SH7721]	= "SH7721",	[CPU_SH7729]	= "SH7729",
++	[CPU_SH7750]	= "SH7750",	[CPU_SH7750S]	= "SH7750S",
++	[CPU_SH7750R]	= "SH7750R",	[CPU_SH7751]	= "SH7751",
++	[CPU_SH7751R]	= "SH7751R",	[CPU_SH7760]	= "SH7760",
+ 	[CPU_SH4_202]	= "SH4-202",	[CPU_SH4_501]	= "SH4-501",
+-	[CPU_SH7770]	= "SH7770",	[CPU_SH7780]	= "SH7780",
+-	[CPU_SH7781]	= "SH7781",	[CPU_SH7343]	= "SH7343",
+-	[CPU_SH7785]	= "SH7785",	[CPU_SH7722]	= "SH7722",
+-	[CPU_SHX3]	= "SH-X3",	[CPU_SH_NONE]	= "Unknown"
++	[CPU_SH7763]	= "SH7763",	[CPU_SH7770]	= "SH7770",
++	[CPU_SH7780]	= "SH7780",	[CPU_SH7781]	= "SH7781",
++	[CPU_SH7343]	= "SH7343",	[CPU_SH7785]	= "SH7785",
++	[CPU_SH7722]	= "SH7722",	[CPU_SHX3]	= "SH-X3",
++	[CPU_SH5_101]	= "SH5-101",	[CPU_SH5_103]	= "SH5-103",
++	[CPU_SH_NONE]	= "Unknown"
+ };
+ 
+ const char *get_cpu_subtype(struct sh_cpuinfo *c)
+@@ -410,7 +427,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+ static void c_stop(struct seq_file *m, void *v)
+ {
+ }
+-struct seq_operations cpuinfo_op = {
++const struct seq_operations cpuinfo_op = {
+ 	.start	= c_start,
+ 	.next	= c_next,
+ 	.stop	= c_stop,
+diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
+deleted file mode 100644
+index e1a6de9..0000000
+--- a/arch/sh/kernel/sh_ksyms.c
++++ /dev/null
+@@ -1,150 +0,0 @@
+-#include <linux/module.h>
+-#include <linux/smp.h>
+-#include <linux/user.h>
+-#include <linux/elfcore.h>
+-#include <linux/sched.h>
+-#include <linux/in6.h>
+-#include <linux/interrupt.h>
+-#include <linux/vmalloc.h>
+-#include <linux/pci.h>
+-#include <linux/irq.h>
+-#include <asm/sections.h>
+-#include <asm/semaphore.h>
+-#include <asm/processor.h>
+-#include <asm/uaccess.h>
+-#include <asm/checksum.h>
+-#include <asm/io.h>
+-#include <asm/delay.h>
+-#include <asm/tlbflush.h>
+-#include <asm/cacheflush.h>
+-
+-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
+-extern struct hw_interrupt_type no_irq_type;
+-
+-EXPORT_SYMBOL(sh_mv);
+-
+-/* platform dependent support */
+-EXPORT_SYMBOL(dump_fpu);
+-EXPORT_SYMBOL(kernel_thread);
+-EXPORT_SYMBOL(irq_desc);
+-EXPORT_SYMBOL(no_irq_type);
+-
+-EXPORT_SYMBOL(strlen);
+-
+-/* PCI exports */
+-#ifdef CONFIG_PCI
+-EXPORT_SYMBOL(pci_alloc_consistent);
+-EXPORT_SYMBOL(pci_free_consistent);
+-#endif
+-
+-/* mem exports */
+-EXPORT_SYMBOL(memchr);
+-EXPORT_SYMBOL(memcpy);
+-EXPORT_SYMBOL(memset);
+-EXPORT_SYMBOL(memmove);
+-EXPORT_SYMBOL(__copy_user);
+-
+-#ifdef CONFIG_MMU
+-EXPORT_SYMBOL(get_vm_area);
+-#endif
+-
+-/* semaphore exports */
+-EXPORT_SYMBOL(__up);
+-EXPORT_SYMBOL(__down);
+-EXPORT_SYMBOL(__down_interruptible);
+-EXPORT_SYMBOL(__down_trylock);
+-
+-EXPORT_SYMBOL(__udelay);
+-EXPORT_SYMBOL(__ndelay);
+-EXPORT_SYMBOL(__const_udelay);
+-
+-#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
+-
+-/* These symbols are generated by the compiler itself */
+-DECLARE_EXPORT(__udivsi3);
+-DECLARE_EXPORT(__sdivsi3);
+-DECLARE_EXPORT(__ashrsi3);
+-DECLARE_EXPORT(__ashlsi3);
+-DECLARE_EXPORT(__ashrdi3);
+-DECLARE_EXPORT(__ashldi3);
+-DECLARE_EXPORT(__ashiftrt_r4_6);
+-DECLARE_EXPORT(__ashiftrt_r4_7);
+-DECLARE_EXPORT(__ashiftrt_r4_8);
+-DECLARE_EXPORT(__ashiftrt_r4_9);
+-DECLARE_EXPORT(__ashiftrt_r4_10);
+-DECLARE_EXPORT(__ashiftrt_r4_11);
+-DECLARE_EXPORT(__ashiftrt_r4_12);
+-DECLARE_EXPORT(__ashiftrt_r4_13);
+-DECLARE_EXPORT(__ashiftrt_r4_14);
+-DECLARE_EXPORT(__ashiftrt_r4_15);
+-DECLARE_EXPORT(__ashiftrt_r4_20);
+-DECLARE_EXPORT(__ashiftrt_r4_21);
+-DECLARE_EXPORT(__ashiftrt_r4_22);
+-DECLARE_EXPORT(__ashiftrt_r4_23);
+-DECLARE_EXPORT(__ashiftrt_r4_24);
+-DECLARE_EXPORT(__ashiftrt_r4_27);
+-DECLARE_EXPORT(__ashiftrt_r4_30);
+-DECLARE_EXPORT(__lshrsi3);
+-DECLARE_EXPORT(__lshrdi3);
+-DECLARE_EXPORT(__movstrSI8);
+-DECLARE_EXPORT(__movstrSI12);
+-DECLARE_EXPORT(__movstrSI16);
+-DECLARE_EXPORT(__movstrSI20);
+-DECLARE_EXPORT(__movstrSI24);
+-DECLARE_EXPORT(__movstrSI28);
+-DECLARE_EXPORT(__movstrSI32);
+-DECLARE_EXPORT(__movstrSI36);
+-DECLARE_EXPORT(__movstrSI40);
+-DECLARE_EXPORT(__movstrSI44);
+-DECLARE_EXPORT(__movstrSI48);
+-DECLARE_EXPORT(__movstrSI52);
+-DECLARE_EXPORT(__movstrSI56);
+-DECLARE_EXPORT(__movstrSI60);
+-#if __GNUC__ == 4
+-DECLARE_EXPORT(__movmem);
+-#else
+-DECLARE_EXPORT(__movstr);
+-#endif
+-
+-#if __GNUC__ == 4
+-DECLARE_EXPORT(__movmem_i4_even);
+-DECLARE_EXPORT(__movmem_i4_odd);
+-DECLARE_EXPORT(__movmemSI12_i4);
+-
+-#if (__GNUC_MINOR__ == 2 || defined(__GNUC_STM_RELEASE__))
+-/*
+- * GCC 4.2 emits these for division, as do GCC 4.1.x versions of the ST
+- * compiler which include backported patches.
+- */
+-DECLARE_EXPORT(__sdivsi3_i4i);
+-DECLARE_EXPORT(__udiv_qrnnd_16);
+-DECLARE_EXPORT(__udivsi3_i4i);
+-#endif
+-#else /* GCC 3.x */
+-DECLARE_EXPORT(__movstr_i4_even);
+-DECLARE_EXPORT(__movstr_i4_odd);
+-DECLARE_EXPORT(__movstrSI12_i4);
+-#endif /* __GNUC__ == 4 */
+-
+-#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
+-	defined(CONFIG_SH7705_CACHE_32KB))
+-/* needed by some modules */
+-EXPORT_SYMBOL(flush_cache_all);
+-EXPORT_SYMBOL(flush_cache_range);
+-EXPORT_SYMBOL(flush_dcache_page);
+-EXPORT_SYMBOL(__flush_purge_region);
+-#endif
+-
+-#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
+-	(defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
+-EXPORT_SYMBOL(clear_user_page);
+-#endif
+-
+-EXPORT_SYMBOL(csum_partial);
+-EXPORT_SYMBOL(csum_partial_copy_generic);
+-#ifdef CONFIG_IPV6
+-EXPORT_SYMBOL(csum_ipv6_magic);
+-#endif
+-EXPORT_SYMBOL(clear_page);
+-EXPORT_SYMBOL(__clear_user);
+-EXPORT_SYMBOL(_ebss);
+diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
+new file mode 100644
+index 0000000..e1a6de9
+--- /dev/null
++++ b/arch/sh/kernel/sh_ksyms_32.c
+@@ -0,0 +1,150 @@
++#include <linux/module.h>
++#include <linux/smp.h>
++#include <linux/user.h>
++#include <linux/elfcore.h>
++#include <linux/sched.h>
++#include <linux/in6.h>
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <linux/pci.h>
++#include <linux/irq.h>
++#include <asm/sections.h>
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/uaccess.h>
++#include <asm/checksum.h>
++#include <asm/io.h>
++#include <asm/delay.h>
++#include <asm/tlbflush.h>
++#include <asm/cacheflush.h>
 +
-+fpu_error_or_IRQB:
-+	pta	its_IRQ, tr0
-+	beqi/l	r4, EVENT_INTERRUPT, tr0
-+#ifdef CONFIG_SH_FPU
-+	movi	do_fpu_state_restore, r6
-+#else
-+	movi	do_exception_error, r6
-+#endif
-+	ptabs	r6, tr0
-+	blink	tr0, ZERO
++extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
++extern struct hw_interrupt_type no_irq_type;
 +
-+its_IRQ:
-+	movi	do_IRQ, r6
-+	ptabs	r6, tr0
-+	blink	tr0, ZERO
++EXPORT_SYMBOL(sh_mv);
 +
-+/*
-+ * system_call/unknown_trap third level handler:
-+ *
-+ * Inputs:
-+ * (r2)   fault/interrupt code, entry number (TRAP = 11)
-+ * (r3)   struct pt_regs *, original register's frame pointer
-+ * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
-+ * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
-+ * (SP)   = r3
-+ * (LINK) return address: ret_from_exception
-+ * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
-+ *
-+ * Outputs:
-+ * (*r3)  Syscall reply (Saved r2)
-+ * (LINK) In case of syscall only it can be scrapped.
-+ *        Common second level post handler will be ret_from_syscall.
-+ *        Common (non-trace) exit point to that is syscall_ret (saving
-+ *        result to r2). Common bad exit point is syscall_bad (returning
-+ *        ENOSYS then saved to r2).
-+ *
-+ */
++/* platform dependent support */
++EXPORT_SYMBOL(dump_fpu);
++EXPORT_SYMBOL(kernel_thread);
++EXPORT_SYMBOL(irq_desc);
++EXPORT_SYMBOL(no_irq_type);
 +
-+unknown_trap:
-+	/* Unknown Trap or User Trace */
-+	movi	do_unknown_trapa, r6
-+	ptabs	r6, tr0
-+        ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */
-+        andi    r2, 0x1ff, r2		/* r2 = syscall # */
-+	blink	tr0, LINK
++EXPORT_SYMBOL(strlen);
 +
-+	pta	syscall_ret, tr0
-+	blink	tr0, ZERO
++/* PCI exports */
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_alloc_consistent);
++EXPORT_SYMBOL(pci_free_consistent);
++#endif
 +
-+        /* New syscall implementation*/
-+system_call:
-+	pta	unknown_trap, tr0
-+        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
-+        shlri   r4, 20, r4
-+	bnei	r4, 1, tr0		/* unknown_trap if not 0x1yzzzz */
++/* mem exports */
++EXPORT_SYMBOL(memchr);
++EXPORT_SYMBOL(memcpy);
++EXPORT_SYMBOL(memset);
++EXPORT_SYMBOL(memmove);
++EXPORT_SYMBOL(__copy_user);
 +
-+        /* It's a system call */
-+	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */
-+	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */
++#ifdef CONFIG_MMU
++EXPORT_SYMBOL(get_vm_area);
++#endif
 +
-+	STI()
++/* semaphore exports */
++EXPORT_SYMBOL(__up);
++EXPORT_SYMBOL(__down);
++EXPORT_SYMBOL(__down_interruptible);
++EXPORT_SYMBOL(__down_trylock);
 +
-+	pta	syscall_allowed, tr0
-+	movi	NR_syscalls - 1, r4	/* Last valid */
-+	bgeu/l	r4, r5, tr0
++EXPORT_SYMBOL(__udelay);
++EXPORT_SYMBOL(__ndelay);
++EXPORT_SYMBOL(__const_udelay);
 +
-+syscall_bad:
-+	/* Return ENOSYS ! */
-+	movi	-(ENOSYS), r2		/* Fall-through */
++#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
 +
-+	.global syscall_ret
-+syscall_ret:
-+	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */
++/* These symbols are generated by the compiler itself */
++DECLARE_EXPORT(__udivsi3);
++DECLARE_EXPORT(__sdivsi3);
++DECLARE_EXPORT(__ashrsi3);
++DECLARE_EXPORT(__ashlsi3);
++DECLARE_EXPORT(__ashrdi3);
++DECLARE_EXPORT(__ashldi3);
++DECLARE_EXPORT(__ashiftrt_r4_6);
++DECLARE_EXPORT(__ashiftrt_r4_7);
++DECLARE_EXPORT(__ashiftrt_r4_8);
++DECLARE_EXPORT(__ashiftrt_r4_9);
++DECLARE_EXPORT(__ashiftrt_r4_10);
++DECLARE_EXPORT(__ashiftrt_r4_11);
++DECLARE_EXPORT(__ashiftrt_r4_12);
++DECLARE_EXPORT(__ashiftrt_r4_13);
++DECLARE_EXPORT(__ashiftrt_r4_14);
++DECLARE_EXPORT(__ashiftrt_r4_15);
++DECLARE_EXPORT(__ashiftrt_r4_20);
++DECLARE_EXPORT(__ashiftrt_r4_21);
++DECLARE_EXPORT(__ashiftrt_r4_22);
++DECLARE_EXPORT(__ashiftrt_r4_23);
++DECLARE_EXPORT(__ashiftrt_r4_24);
++DECLARE_EXPORT(__ashiftrt_r4_27);
++DECLARE_EXPORT(__ashiftrt_r4_30);
++DECLARE_EXPORT(__lshrsi3);
++DECLARE_EXPORT(__lshrdi3);
++DECLARE_EXPORT(__movstrSI8);
++DECLARE_EXPORT(__movstrSI12);
++DECLARE_EXPORT(__movstrSI16);
++DECLARE_EXPORT(__movstrSI20);
++DECLARE_EXPORT(__movstrSI24);
++DECLARE_EXPORT(__movstrSI28);
++DECLARE_EXPORT(__movstrSI32);
++DECLARE_EXPORT(__movstrSI36);
++DECLARE_EXPORT(__movstrSI40);
++DECLARE_EXPORT(__movstrSI44);
++DECLARE_EXPORT(__movstrSI48);
++DECLARE_EXPORT(__movstrSI52);
++DECLARE_EXPORT(__movstrSI56);
++DECLARE_EXPORT(__movstrSI60);
++#if __GNUC__ == 4
++DECLARE_EXPORT(__movmem);
++#else
++DECLARE_EXPORT(__movstr);
++#endif
 +
-+#ifdef CONFIG_POOR_MANS_STRACE
-+	/* nothing useful in registers at this point */
++#if __GNUC__ == 4
++DECLARE_EXPORT(__movmem_i4_even);
++DECLARE_EXPORT(__movmem_i4_odd);
++DECLARE_EXPORT(__movmemSI12_i4);
 +
-+	movi	evt_debug2, r5
-+	ori	r5, 1, r5
-+	ptabs	r5, tr0
-+	ld.q	SP, FRAME_R(9), r2
-+	or	SP, ZERO, r3
-+	blink	tr0, LINK
++#if (__GNUC_MINOR__ == 2 || defined(__GNUC_STM_RELEASE__))
++/*
++ * GCC 4.2 emits these for division, as do GCC 4.1.x versions of the ST
++ * compiler which include backported patches.
++ */
++DECLARE_EXPORT(__sdivsi3_i4i);
++DECLARE_EXPORT(__udiv_qrnnd_16);
++DECLARE_EXPORT(__udivsi3_i4i);
 +#endif
++#else /* GCC 3.x */
++DECLARE_EXPORT(__movstr_i4_even);
++DECLARE_EXPORT(__movstr_i4_odd);
++DECLARE_EXPORT(__movstrSI12_i4);
++#endif /* __GNUC__ == 4 */
 +
-+	ld.q	SP, FRAME_S(FSPC), r2
-+	addi	r2, 4, r2		/* Move PC, being pre-execution event */
-+	st.q	SP, FRAME_S(FSPC), r2
-+	pta	ret_from_syscall, tr0
-+	blink	tr0, ZERO
++#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
++	defined(CONFIG_SH7705_CACHE_32KB))
++/* needed by some modules */
++EXPORT_SYMBOL(flush_cache_all);
++EXPORT_SYMBOL(flush_cache_range);
++EXPORT_SYMBOL(flush_dcache_page);
++EXPORT_SYMBOL(__flush_purge_region);
++#endif
 +
++#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
++	(defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
++EXPORT_SYMBOL(clear_user_page);
++#endif
 +
-+/*  A different return path for ret_from_fork, because we now need
-+ *  to call schedule_tail with the later kernels. Because prev is
-+ *  loaded into r2 by switch_to() means we can just call it straight  away
++EXPORT_SYMBOL(csum_partial);
++EXPORT_SYMBOL(csum_partial_copy_generic);
++#ifdef CONFIG_IPV6
++EXPORT_SYMBOL(csum_ipv6_magic);
++#endif
++EXPORT_SYMBOL(clear_page);
++EXPORT_SYMBOL(__clear_user);
++EXPORT_SYMBOL(_ebss);
+diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
+new file mode 100644
+index 0000000..8004c38
+--- /dev/null
++++ b/arch/sh/kernel/sh_ksyms_64.c
+@@ -0,0 +1,55 @@
++/*
++ * arch/sh/kernel/sh_ksyms_64.c
++ *
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
 + */
++#include <linux/rwsem.h>
++#include <linux/module.h>
++#include <linux/smp.h>
++#include <linux/user.h>
++#include <linux/elfcore.h>
++#include <linux/sched.h>
++#include <linux/in6.h>
++#include <linux/interrupt.h>
++#include <linux/screen_info.h>
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/uaccess.h>
++#include <asm/checksum.h>
++#include <asm/io.h>
++#include <asm/delay.h>
++#include <asm/irq.h>
 +
-+.global	ret_from_fork
-+ret_from_fork:
++extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
 +
-+	movi	schedule_tail,r5
-+	ori	r5, 1, r5
-+	ptabs	r5, tr0
-+	blink	tr0, LINK
++/* platform dependent support */
++EXPORT_SYMBOL(dump_fpu);
++EXPORT_SYMBOL(kernel_thread);
 +
-+#ifdef CONFIG_POOR_MANS_STRACE
-+	/* nothing useful in registers at this point */
++/* Networking helper routines. */
++EXPORT_SYMBOL(csum_partial_copy_nocheck);
 +
-+	movi	evt_debug2, r5
-+	ori	r5, 1, r5
-+	ptabs	r5, tr0
-+	ld.q	SP, FRAME_R(9), r2
-+	or	SP, ZERO, r3
-+	blink	tr0, LINK
++#ifdef CONFIG_VT
++EXPORT_SYMBOL(screen_info);
 +#endif
 +
-+	ld.q	SP, FRAME_S(FSPC), r2
-+	addi	r2, 4, r2		/* Move PC, being pre-execution event */
-+	st.q	SP, FRAME_S(FSPC), r2
-+	pta	ret_from_syscall, tr0
-+	blink	tr0, ZERO
-+
-+
++EXPORT_SYMBOL(__down);
++EXPORT_SYMBOL(__down_trylock);
++EXPORT_SYMBOL(__up);
++EXPORT_SYMBOL(__put_user_asm_l);
++EXPORT_SYMBOL(__get_user_asm_l);
++EXPORT_SYMBOL(__copy_user);
++EXPORT_SYMBOL(memcpy);
++EXPORT_SYMBOL(__udelay);
++EXPORT_SYMBOL(__ndelay);
 +
-+syscall_allowed:
-+	/* Use LINK to deflect the exit point, default is syscall_ret */
-+	pta	syscall_ret, tr0
-+	gettr	tr0, LINK
-+	pta	syscall_notrace, tr0
++/* Ugh.  These come in from libgcc.a at link time. */
++#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
 +
-+	getcon	KCR0, r2
-+	ld.l	r2, TI_FLAGS, r4
-+	movi	(_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
-+	and	r6, r4, r6
-+	beq/l	r6, ZERO, tr0
++DECLARE_EXPORT(__sdivsi3);
++DECLARE_EXPORT(__muldi3);
++DECLARE_EXPORT(__udivsi3);
+diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
+deleted file mode 100644
+index ca754fd..0000000
+--- a/arch/sh/kernel/signal.c
++++ /dev/null
+@@ -1,629 +0,0 @@
+-/*
+- *  linux/arch/sh/kernel/signal.c
+- *
+- *  Copyright (C) 1991, 1992  Linus Torvalds
+- *
+- *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+- *
+- *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+- *
+- */
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/kernel.h>
+-#include <linux/signal.h>
+-#include <linux/errno.h>
+-#include <linux/wait.h>
+-#include <linux/ptrace.h>
+-#include <linux/unistd.h>
+-#include <linux/stddef.h>
+-#include <linux/tty.h>
+-#include <linux/elf.h>
+-#include <linux/personality.h>
+-#include <linux/binfmts.h>
+-#include <linux/freezer.h>
+-#include <linux/io.h>
+-#include <asm/system.h>
+-#include <asm/ucontext.h>
+-#include <asm/uaccess.h>
+-#include <asm/pgtable.h>
+-#include <asm/cacheflush.h>
+-
+-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+-
+-/*
+- * Atomically swap in the new signal mask, and wait for a signal.
+- */
+-asmlinkage int
+-sys_sigsuspend(old_sigset_t mask,
+-	       unsigned long r5, unsigned long r6, unsigned long r7,
+-	       struct pt_regs __regs)
+-{
+-	mask &= _BLOCKABLE;
+-	spin_lock_irq(&current->sighand->siglock);
+-	current->saved_sigmask = current->blocked;
+-	siginitset(&current->blocked, mask);
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
+-
+-	current->state = TASK_INTERRUPTIBLE;
+-	schedule();
+-	set_thread_flag(TIF_RESTORE_SIGMASK);
+-	return -ERESTARTNOHAND;
+-}
+-
+-asmlinkage int
+-sys_sigaction(int sig, const struct old_sigaction __user *act,
+-	      struct old_sigaction __user *oact)
+-{
+-	struct k_sigaction new_ka, old_ka;
+-	int ret;
+-
+-	if (act) {
+-		old_sigset_t mask;
+-		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+-		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+-		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+-			return -EFAULT;
+-		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
+-		__get_user(mask, &act->sa_mask);
+-		siginitset(&new_ka.sa.sa_mask, mask);
+-	}
+-
+-	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+-
+-	if (!ret && oact) {
+-		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+-		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+-		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+-			return -EFAULT;
+-		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+-		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+-	}
+-
+-	return ret;
+-}
+-
+-asmlinkage int
+-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+-		unsigned long r6, unsigned long r7,
+-		struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-
+-	return do_sigaltstack(uss, uoss, regs->regs[15]);
+-}
+-
+-
+-/*
+- * Do a signal return; undo the signal stack.
+- */
+-
+-#define MOVW(n)	 (0x9300|((n)-2))	/* Move mem word at PC+n to R3 */
+-#if defined(CONFIG_CPU_SH2)
+-#define TRAP_NOARG 0xc320		/* Syscall w/no args (NR in R3) */
+-#else
+-#define TRAP_NOARG 0xc310		/* Syscall w/no args (NR in R3) */
+-#endif
+-#define OR_R0_R0 0x200b			/* or r0,r0 (insert to avoid hardware bug) */
+-
+-struct sigframe
+-{
+-	struct sigcontext sc;
+-	unsigned long extramask[_NSIG_WORDS-1];
+-	u16 retcode[8];
+-};
+-
+-struct rt_sigframe
+-{
+-	struct siginfo info;
+-	struct ucontext uc;
+-	u16 retcode[8];
+-};
+-
+-#ifdef CONFIG_SH_FPU
+-static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
+-{
+-	struct task_struct *tsk = current;
+-
+-	if (!(current_cpu_data.flags & CPU_HAS_FPU))
+-		return 0;
+-
+-	set_used_math();
+-	return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0],
+-				sizeof(long)*(16*2+2));
+-}
+-
+-static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
+-				      struct pt_regs *regs)
+-{
+-	struct task_struct *tsk = current;
+-
+-	if (!(current_cpu_data.flags & CPU_HAS_FPU))
+-		return 0;
+-
+-	if (!used_math()) {
+-		__put_user(0, &sc->sc_ownedfp);
+-		return 0;
+-	}
+-
+-	__put_user(1, &sc->sc_ownedfp);
+-
+-	/* This will cause a "finit" to be triggered by the next
+-	   attempted FPU operation by the 'current' process.
+-	   */
+-	clear_used_math();
+-
+-	unlazy_fpu(tsk, regs);
+-	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
+-			      sizeof(long)*(16*2+2));
+-}
+-#endif /* CONFIG_SH_FPU */
+-
+-static int
+-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
+-{
+-	unsigned int err = 0;
+-
+-#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
+-			COPY(regs[1]);
+-	COPY(regs[2]);	COPY(regs[3]);
+-	COPY(regs[4]);	COPY(regs[5]);
+-	COPY(regs[6]);	COPY(regs[7]);
+-	COPY(regs[8]);	COPY(regs[9]);
+-	COPY(regs[10]);	COPY(regs[11]);
+-	COPY(regs[12]);	COPY(regs[13]);
+-	COPY(regs[14]);	COPY(regs[15]);
+-	COPY(gbr);	COPY(mach);
+-	COPY(macl);	COPY(pr);
+-	COPY(sr);	COPY(pc);
+-#undef COPY
+-
+-#ifdef CONFIG_SH_FPU
+-	if (current_cpu_data.flags & CPU_HAS_FPU) {
+-		int owned_fp;
+-		struct task_struct *tsk = current;
+-
+-		regs->sr |= SR_FD; /* Release FPU */
+-		clear_fpu(tsk, regs);
+-		clear_used_math();
+-		__get_user (owned_fp, &sc->sc_ownedfp);
+-		if (owned_fp)
+-			err |= restore_sigcontext_fpu(sc);
+-	}
+-#endif
+-
+-	regs->tra = -1;		/* disable syscall checks */
+-	err |= __get_user(*r0_p, &sc->sc_regs[0]);
+-	return err;
+-}
+-
+-asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
+-			     unsigned long r6, unsigned long r7,
+-			     struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
+-	sigset_t set;
+-	int r0;
+-
+-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+-		goto badframe;
+-
+-	if (__get_user(set.sig[0], &frame->sc.oldmask)
+-	    || (_NSIG_WORDS > 1
+-		&& __copy_from_user(&set.sig[1], &frame->extramask,
+-				    sizeof(frame->extramask))))
+-		goto badframe;
+-
+-	sigdelsetmask(&set, ~_BLOCKABLE);
+-
+-	spin_lock_irq(&current->sighand->siglock);
+-	current->blocked = set;
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
+-
+-	if (restore_sigcontext(regs, &frame->sc, &r0))
+-		goto badframe;
+-	return r0;
+-
+-badframe:
+-	force_sig(SIGSEGV, current);
+-	return 0;
+-}
+-
+-asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
+-				unsigned long r6, unsigned long r7,
+-				struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
+-	sigset_t set;
+-	stack_t st;
+-	int r0;
+-
+-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+-		goto badframe;
+-
+-	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+-		goto badframe;
+-
+-	sigdelsetmask(&set, ~_BLOCKABLE);
+-	spin_lock_irq(&current->sighand->siglock);
+-	current->blocked = set;
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
+-
+-	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
+-		goto badframe;
+-
+-	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+-		goto badframe;
+-	/* It is more difficult to avoid calling this function than to
+-	   call it and ignore errors.  */
+-	do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
+-
+-	return r0;
+-
+-badframe:
+-	force_sig(SIGSEGV, current);
+-	return 0;
+-}
+-
+-/*
+- * Set up a signal frame.
+- */
+-
+-static int
+-setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+-		 unsigned long mask)
+-{
+-	int err = 0;
+-
+-#define COPY(x)		err |= __put_user(regs->x, &sc->sc_##x)
+-	COPY(regs[0]);	COPY(regs[1]);
+-	COPY(regs[2]);	COPY(regs[3]);
+-	COPY(regs[4]);	COPY(regs[5]);
+-	COPY(regs[6]);	COPY(regs[7]);
+-	COPY(regs[8]);	COPY(regs[9]);
+-	COPY(regs[10]);	COPY(regs[11]);
+-	COPY(regs[12]);	COPY(regs[13]);
+-	COPY(regs[14]);	COPY(regs[15]);
+-	COPY(gbr);	COPY(mach);
+-	COPY(macl);	COPY(pr);
+-	COPY(sr);	COPY(pc);
+-#undef COPY
+-
+-#ifdef CONFIG_SH_FPU
+-	err |= save_sigcontext_fpu(sc, regs);
+-#endif
+-
+-	/* non-iBCS2 extensions.. */
+-	err |= __put_user(mask, &sc->oldmask);
+-
+-	return err;
+-}
+-
+-/*
+- * Determine which stack to use..
+- */
+-static inline void __user *
+-get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+-{
+-	if (ka->sa.sa_flags & SA_ONSTACK) {
+-		if (sas_ss_flags(sp) == 0)
+-			sp = current->sas_ss_sp + current->sas_ss_size;
+-	}
+-
+-	return (void __user *)((sp - frame_size) & -8ul);
+-}
+-
+-/* These symbols are defined with the addresses in the vsyscall page.
+-   See vsyscall-trapa.S.  */
+-extern void __user __kernel_sigreturn;
+-extern void __user __kernel_rt_sigreturn;
+-
+-static int setup_frame(int sig, struct k_sigaction *ka,
+-			sigset_t *set, struct pt_regs *regs)
+-{
+-	struct sigframe __user *frame;
+-	int err = 0;
+-	int signal;
+-
+-	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
+-
+-	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+-		goto give_sigsegv;
+-
+-	signal = current_thread_info()->exec_domain
+-		&& current_thread_info()->exec_domain->signal_invmap
+-		&& sig < 32
+-		? current_thread_info()->exec_domain->signal_invmap[sig]
+-		: sig;
+-
+-	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+-
+-	if (_NSIG_WORDS > 1)
+-		err |= __copy_to_user(frame->extramask, &set->sig[1],
+-				      sizeof(frame->extramask));
+-
+-	/* Set up to return from userspace.  If provided, use a stub
+-	   already in userspace.  */
+-	if (ka->sa.sa_flags & SA_RESTORER) {
+-		regs->pr = (unsigned long) ka->sa.sa_restorer;
+-#ifdef CONFIG_VSYSCALL
+-	} else if (likely(current->mm->context.vdso)) {
+-		regs->pr = VDSO_SYM(&__kernel_sigreturn);
+-#endif
+-	} else {
+-		/* Generate return code (system call to sigreturn) */
+-		err |= __put_user(MOVW(7), &frame->retcode[0]);
+-		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
+-		err |= __put_user((__NR_sigreturn), &frame->retcode[7]);
+-		regs->pr = (unsigned long) frame->retcode;
+-	}
+-
+-	if (err)
+-		goto give_sigsegv;
+-
+-	/* Set up registers for signal handler */
+-	regs->regs[15] = (unsigned long) frame;
+-	regs->regs[4] = signal; /* Arg for signal handler */
+-	regs->regs[5] = 0;
+-	regs->regs[6] = (unsigned long) &frame->sc;
+-	regs->pc = (unsigned long) ka->sa.sa_handler;
+-
+-	set_fs(USER_DS);
+-
+-	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+-		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
+-
+-	flush_cache_sigtramp(regs->pr);
+-
+-	if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
+-		flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
+-
+-	return 0;
+-
+-give_sigsegv:
+-	force_sigsegv(sig, current);
+-	return -EFAULT;
+-}
+-
+-static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+-			   sigset_t *set, struct pt_regs *regs)
+-{
+-	struct rt_sigframe __user *frame;
+-	int err = 0;
+-	int signal;
+-
+-	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
+-
+-	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+-		goto give_sigsegv;
+-
+-	signal = current_thread_info()->exec_domain
+-		&& current_thread_info()->exec_domain->signal_invmap
+-		&& sig < 32
+-		? current_thread_info()->exec_domain->signal_invmap[sig]
+-		: sig;
+-
+-	err |= copy_siginfo_to_user(&frame->info, info);
+-
+-	/* Create the ucontext.  */
+-	err |= __put_user(0, &frame->uc.uc_flags);
+-	err |= __put_user(0, &frame->uc.uc_link);
+-	err |= __put_user((void *)current->sas_ss_sp,
+-			  &frame->uc.uc_stack.ss_sp);
+-	err |= __put_user(sas_ss_flags(regs->regs[15]),
+-			  &frame->uc.uc_stack.ss_flags);
+-	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+-	err |= setup_sigcontext(&frame->uc.uc_mcontext,
+-			        regs, set->sig[0]);
+-	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+-
+-	/* Set up to return from userspace.  If provided, use a stub
+-	   already in userspace.  */
+-	if (ka->sa.sa_flags & SA_RESTORER) {
+-		regs->pr = (unsigned long) ka->sa.sa_restorer;
+-#ifdef CONFIG_VSYSCALL
+-	} else if (likely(current->mm->context.vdso)) {
+-		regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
+-#endif
+-	} else {
+-		/* Generate return code (system call to rt_sigreturn) */
+-		err |= __put_user(MOVW(7), &frame->retcode[0]);
+-		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
+-		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
+-		err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
+-		regs->pr = (unsigned long) frame->retcode;
+-	}
+-
+-	if (err)
+-		goto give_sigsegv;
+-
+-	/* Set up registers for signal handler */
+-	regs->regs[15] = (unsigned long) frame;
+-	regs->regs[4] = signal; /* Arg for signal handler */
+-	regs->regs[5] = (unsigned long) &frame->info;
+-	regs->regs[6] = (unsigned long) &frame->uc;
+-	regs->pc = (unsigned long) ka->sa.sa_handler;
+-
+-	set_fs(USER_DS);
+-
+-	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+-		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
+-
+-	flush_cache_sigtramp(regs->pr);
+-
+-	if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
+-		flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
+-
+-	return 0;
+-
+-give_sigsegv:
+-	force_sigsegv(sig, current);
+-	return -EFAULT;
+-}
+-
+-/*
+- * OK, we're invoking a handler
+- */
+-
+-static int
+-handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+-	      sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
+-{
+-	int ret;
+-
+-	/* Are we from a system call? */
+-	if (regs->tra >= 0) {
+-		/* If so, check system call restarting.. */
+-		switch (regs->regs[0]) {
+-			case -ERESTART_RESTARTBLOCK:
+-			case -ERESTARTNOHAND:
+-				regs->regs[0] = -EINTR;
+-				break;
+-
+-			case -ERESTARTSYS:
+-				if (!(ka->sa.sa_flags & SA_RESTART)) {
+-					regs->regs[0] = -EINTR;
+-					break;
+-				}
+-			/* fallthrough */
+-			case -ERESTARTNOINTR:
+-				regs->regs[0] = save_r0;
+-				regs->pc -= instruction_size(
+-						ctrl_inw(regs->pc - 4));
+-				break;
+-		}
+-#ifdef CONFIG_GUSA
+-	} else {
+-		/* gUSA handling */
+-		preempt_disable();
+-
+-		if (regs->regs[15] >= 0xc0000000) {
+-			int offset = (int)regs->regs[15];
+-
+-			/* Reset stack pointer: clear critical region mark */
+-			regs->regs[15] = regs->regs[1];
+-			if (regs->pc < regs->regs[0])
+-				/* Go to rewind point #1 */
+-				regs->pc = regs->regs[0] + offset -
+-					instruction_size(ctrl_inw(regs->pc-4));
+-		}
+-
+-		preempt_enable_no_resched();
+-#endif
+-	}
+-
+-	/* Set up the stack frame */
+-	if (ka->sa.sa_flags & SA_SIGINFO)
+-		ret = setup_rt_frame(sig, ka, info, oldset, regs);
+-	else
+-		ret = setup_frame(sig, ka, oldset, regs);
+-
+-	if (ka->sa.sa_flags & SA_ONESHOT)
+-		ka->sa.sa_handler = SIG_DFL;
+-
+-	if (ret == 0) {
+-		spin_lock_irq(&current->sighand->siglock);
+-		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+-		if (!(ka->sa.sa_flags & SA_NODEFER))
+-			sigaddset(&current->blocked,sig);
+-		recalc_sigpending();
+-		spin_unlock_irq(&current->sighand->siglock);
+-	}
+-
+-	return ret;
+-}
+-
+-/*
+- * Note that 'init' is a special process: it doesn't get signals it doesn't
+- * want to handle. Thus you cannot kill init even with a SIGKILL even by
+- * mistake.
+- *
+- * Note that we go through the signals twice: once to check the signals that
+- * the kernel can handle, and then we build all the user-level signal handling
+- * stack-frames in one go after that.
+- */
+-static void do_signal(struct pt_regs *regs, unsigned int save_r0)
+-{
+-	siginfo_t info;
+-	int signr;
+-	struct k_sigaction ka;
+-	sigset_t *oldset;
+-
+-	/*
+-	 * We want the common case to go fast, which
+-	 * is why we may in certain cases get here from
+-	 * kernel mode. Just return without doing anything
+-	 * if so.
+-	 */
+-	if (!user_mode(regs))
+-		return;
+-
+-	if (try_to_freeze())
+-		goto no_signal;
+-
+-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+-		oldset = &current->saved_sigmask;
+-	else
+-		oldset = &current->blocked;
+-
+-	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+-	if (signr > 0) {
+-		/* Whee!  Actually deliver the signal.  */
+-		if (handle_signal(signr, &ka, &info, oldset,
+-				  regs, save_r0) == 0) {
+-			/* a signal was successfully delivered; the saved
+-			 * sigmask will have been stored in the signal frame,
+-			 * and will be restored by sigreturn, so we can simply
+-			 * clear the TIF_RESTORE_SIGMASK flag */
+-			if (test_thread_flag(TIF_RESTORE_SIGMASK))
+-				clear_thread_flag(TIF_RESTORE_SIGMASK);
+-		}
+-
+-		return;
+-	}
+-
+- no_signal:
+-	/* Did we come from a system call? */
+-	if (regs->tra >= 0) {
+-		/* Restart the system call - no handlers present */
+-		if (regs->regs[0] == -ERESTARTNOHAND ||
+-		    regs->regs[0] == -ERESTARTSYS ||
+-		    regs->regs[0] == -ERESTARTNOINTR) {
+-			regs->regs[0] = save_r0;
+-			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+-		} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
+-			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+-			regs->regs[3] = __NR_restart_syscall;
+-		}
+-	}
+-
+-	/* if there's no signal to deliver, we just put the saved sigmask
+-	 * back */
+-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+-		clear_thread_flag(TIF_RESTORE_SIGMASK);
+-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+-	}
+-}
+-
+-asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
+-				 __u32 thread_info_flags)
+-{
+-	/* deal with pending signal delivery */
+-	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+-		do_signal(regs, save_r0);
+-}
+diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
+new file mode 100644
+index 0000000..f6b5fbf
+--- /dev/null
++++ b/arch/sh/kernel/signal_32.c
+@@ -0,0 +1,611 @@
++/*
++ *  linux/arch/sh/kernel/signal.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *
++ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
++ *
++ *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
++ *
++ */
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/kernel.h>
++#include <linux/signal.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/ptrace.h>
++#include <linux/unistd.h>
++#include <linux/stddef.h>
++#include <linux/tty.h>
++#include <linux/elf.h>
++#include <linux/personality.h>
++#include <linux/binfmts.h>
++#include <linux/freezer.h>
++#include <linux/io.h>
++#include <asm/system.h>
++#include <asm/ucontext.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/cacheflush.h>
 +
-+	/* Trace it by calling syscall_trace before and after */
-+	movi	syscall_trace, r4
-+	or	SP, ZERO, r2
-+	or	ZERO, ZERO, r3
-+	ptabs	r4, tr0
-+	blink	tr0, LINK
++#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 +
-+	/* Reload syscall number as r5 is trashed by syscall_trace */
-+	ld.q	SP, FRAME_S(FSYSCALL_ID), r5
-+	andi	r5, 0x1ff, r5
++/*
++ * Atomically swap in the new signal mask, and wait for a signal.
++ */
++asmlinkage int
++sys_sigsuspend(old_sigset_t mask,
++	       unsigned long r5, unsigned long r6, unsigned long r7,
++	       struct pt_regs __regs)
++{
++	mask &= _BLOCKABLE;
++	spin_lock_irq(&current->sighand->siglock);
++	current->saved_sigmask = current->blocked;
++	siginitset(&current->blocked, mask);
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
 +
-+	pta	syscall_ret_trace, tr0
-+	gettr	tr0, LINK
++	current->state = TASK_INTERRUPTIBLE;
++	schedule();
++	set_thread_flag(TIF_RESTORE_SIGMASK);
++	return -ERESTARTNOHAND;
++}
 +
-+syscall_notrace:
-+	/* Now point to the appropriate 4th level syscall handler */
-+	movi	sys_call_table, r4
-+	shlli	r5, 2, r5
-+	ldx.l	r4, r5, r5
-+	ptabs	r5, tr0
++asmlinkage int
++sys_sigaction(int sig, const struct old_sigaction __user *act,
++	      struct old_sigaction __user *oact)
++{
++	struct k_sigaction new_ka, old_ka;
++	int ret;
 +
-+	/* Prepare original args */
-+	ld.q	SP, FRAME_R(2), r2
-+	ld.q	SP, FRAME_R(3), r3
-+	ld.q	SP, FRAME_R(4), r4
-+	ld.q	SP, FRAME_R(5), r5
-+	ld.q	SP, FRAME_R(6), r6
-+	ld.q	SP, FRAME_R(7), r7
++	if (act) {
++		old_sigset_t mask;
++		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
++		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
++		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
++			return -EFAULT;
++		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
++		__get_user(mask, &act->sa_mask);
++		siginitset(&new_ka.sa.sa_mask, mask);
++	}
 +
-+	/* And now the trick for those syscalls requiring regs * ! */
-+	or	SP, ZERO, r8
++	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
 +
-+	/* Call it */
-+	blink	tr0, ZERO	/* LINK is already properly set */
++	if (!ret && oact) {
++		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
++		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
++		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
++			return -EFAULT;
++		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
++		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
++	}
 +
-+syscall_ret_trace:
-+	/* We get back here only if under trace */
-+	st.q	SP, FRAME_R(9), r2	/* Save return value */
++	return ret;
++}
 +
-+	movi	syscall_trace, LINK
-+	or	SP, ZERO, r2
-+	movi	1, r3
-+	ptabs	LINK, tr0
-+	blink	tr0, LINK
++asmlinkage int
++sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
++		unsigned long r6, unsigned long r7,
++		struct pt_regs __regs)
++{
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 +
-+	/* This needs to be done after any syscall tracing */
-+	ld.q	SP, FRAME_S(FSPC), r2
-+	addi	r2, 4, r2	/* Move PC, being pre-execution event */
-+	st.q	SP, FRAME_S(FSPC), r2
++	return do_sigaltstack(uss, uoss, regs->regs[15]);
++}
 +
-+	pta	ret_from_syscall, tr0
-+	blink	tr0, ZERO		/* Resume normal return sequence */
 +
 +/*
-+ * --- Switch to running under a particular ASID and return the previous ASID value
-+ * --- The caller is assumed to have done a cli before calling this.
-+ *
-+ * Input r2 : new ASID
-+ * Output r2 : old ASID
++ * Do a signal return; undo the signal stack.
 + */
 +
-+	.global switch_and_save_asid
-+switch_and_save_asid:
-+	getcon	sr, r0
-+	movi	255, r4
-+	shlli 	r4, 16, r4	/* r4 = mask to select ASID */
-+	and	r0, r4, r3	/* r3 = shifted old ASID */
-+	andi	r2, 255, r2	/* mask down new ASID */
-+	shlli	r2, 16, r2	/* align new ASID against SR.ASID */
-+	andc	r0, r4, r0	/* efface old ASID from SR */
-+	or	r0, r2, r0	/* insert the new ASID */
-+	putcon	r0, ssr
-+	movi	1f, r0
-+	putcon	r0, spc
-+	rte
-+	nop
-+1:
-+	ptabs	LINK, tr0
-+	shlri	r3, 16, r2	/* r2 = old ASID */
-+	blink tr0, r63
++#define MOVW(n)	 (0x9300|((n)-2))	/* Move mem word at PC+n to R3 */
++#if defined(CONFIG_CPU_SH2)
++#define TRAP_NOARG 0xc320		/* Syscall w/no args (NR in R3) */
++#else
++#define TRAP_NOARG 0xc310		/* Syscall w/no args (NR in R3) */
++#endif
++#define OR_R0_R0 0x200b			/* or r0,r0 (insert to avoid hardware bug) */
 +
-+	.global	route_to_panic_handler
-+route_to_panic_handler:
-+	/* Switch to real mode, goto panic_handler, don't return.  Useful for
-+	   last-chance debugging, e.g. if no output wants to go to the console.
-+	   */
++struct sigframe
++{
++	struct sigcontext sc;
++	unsigned long extramask[_NSIG_WORDS-1];
++	u16 retcode[8];
++};
 +
-+	movi	panic_handler - CONFIG_PAGE_OFFSET, r1
-+	ptabs	r1, tr0
-+	pta	1f, tr1
-+	gettr	tr1, r0
-+	putcon	r0, spc
-+	getcon	sr, r0
-+	movi	1, r1
-+	shlli	r1, 31, r1
-+	andc	r0, r1, r0
-+	putcon	r0, ssr
-+	rte
-+	nop
-+1:	/* Now in real mode */
-+	blink tr0, r63
-+	nop
++struct rt_sigframe
++{
++	struct siginfo info;
++	struct ucontext uc;
++	u16 retcode[8];
++};
 +
-+	.global peek_real_address_q
-+peek_real_address_q:
-+	/* Two args:
-+	   r2 : real mode address to peek
-+	   r2(out) : result quadword
++#ifdef CONFIG_SH_FPU
++static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
++{
++	struct task_struct *tsk = current;
 +
-+	   This is provided as a cheapskate way of manipulating device
-+	   registers for debugging (to avoid the need to onchip_remap the debug
-+	   module, and to avoid the need to onchip_remap the watchpoint
-+	   controller in a way that identity maps sufficient bits to avoid the
-+	   SH5-101 cut2 silicon defect).
++	if (!(current_cpu_data.flags & CPU_HAS_FPU))
++		return 0;
 +
-+	   This code is not performance critical
-+	*/
++	set_used_math();
++	return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0],
++				sizeof(long)*(16*2+2));
++}
 +
-+	add.l	r2, r63, r2	/* sign extend address */
-+	getcon	sr, r0		/* r0 = saved original SR */
-+	movi	1, r1
-+	shlli	r1, 28, r1
-+	or	r0, r1, r1	/* r0 with block bit set */
-+	putcon	r1, sr		/* now in critical section */
-+	movi	1, r36
-+	shlli	r36, 31, r36
-+	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
++static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
++				      struct pt_regs *regs)
++{
++	struct task_struct *tsk = current;
 +
-+	putcon	r1, ssr
-+	movi	.peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
-+	movi	1f, r37		/* virtual mode return addr */
-+	putcon	r36, spc
++	if (!(current_cpu_data.flags & CPU_HAS_FPU))
++		return 0;
 +
-+	synco
-+	rte
-+	nop
++	if (!used_math()) {
++		__put_user(0, &sc->sc_ownedfp);
++		return 0;
++	}
 +
-+.peek0:	/* come here in real mode, don't touch caches!!
-+           still in critical section (sr.bl==1) */
-+	putcon	r0, ssr
-+	putcon	r37, spc
-+	/* Here's the actual peek.  If the address is bad, all bets are now off
-+	 * what will happen (handlers invoked in real-mode = bad news) */
-+	ld.q	r2, 0, r2
-+	synco
-+	rte	/* Back to virtual mode */
-+	nop
++	__put_user(1, &sc->sc_ownedfp);
 +
-+1:
-+	ptabs	LINK, tr0
-+	blink	tr0, r63
++	/* This will cause a "finit" to be triggered by the next
++	   attempted FPU operation by the 'current' process.
++	   */
++	clear_used_math();
 +
-+	.global poke_real_address_q
-+poke_real_address_q:
-+	/* Two args:
-+	   r2 : real mode address to poke
-+	   r3 : quadword value to write.
++	unlazy_fpu(tsk, regs);
++	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
++			      sizeof(long)*(16*2+2));
++}
++#endif /* CONFIG_SH_FPU */
 +
-+	   This is provided as a cheapskate way of manipulating device
-+	   registers for debugging (to avoid the need to onchip_remap the debug
-+	   module, and to avoid the need to onchip_remap the watchpoint
-+	   controller in a way that identity maps sufficient bits to avoid the
-+	   SH5-101 cut2 silicon defect).
++static int
++restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
++{
++	unsigned int err = 0;
 +
-+	   This code is not performance critical
-+	*/
++#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
++			COPY(regs[1]);
++	COPY(regs[2]);	COPY(regs[3]);
++	COPY(regs[4]);	COPY(regs[5]);
++	COPY(regs[6]);	COPY(regs[7]);
++	COPY(regs[8]);	COPY(regs[9]);
++	COPY(regs[10]);	COPY(regs[11]);
++	COPY(regs[12]);	COPY(regs[13]);
++	COPY(regs[14]);	COPY(regs[15]);
++	COPY(gbr);	COPY(mach);
++	COPY(macl);	COPY(pr);
++	COPY(sr);	COPY(pc);
++#undef COPY
 +
-+	add.l	r2, r63, r2	/* sign extend address */
-+	getcon	sr, r0		/* r0 = saved original SR */
-+	movi	1, r1
-+	shlli	r1, 28, r1
-+	or	r0, r1, r1	/* r0 with block bit set */
-+	putcon	r1, sr		/* now in critical section */
-+	movi	1, r36
-+	shlli	r36, 31, r36
-+	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
++#ifdef CONFIG_SH_FPU
++	if (current_cpu_data.flags & CPU_HAS_FPU) {
++		int owned_fp;
++		struct task_struct *tsk = current;
 +
-+	putcon	r1, ssr
-+	movi	.poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
-+	movi	1f, r37		/* virtual mode return addr */
-+	putcon	r36, spc
++		regs->sr |= SR_FD; /* Release FPU */
++		clear_fpu(tsk, regs);
++		clear_used_math();
++		__get_user (owned_fp, &sc->sc_ownedfp);
++		if (owned_fp)
++			err |= restore_sigcontext_fpu(sc);
++	}
++#endif
 +
-+	synco
-+	rte
-+	nop
++	regs->tra = -1;		/* disable syscall checks */
++	err |= __get_user(*r0_p, &sc->sc_regs[0]);
++	return err;
++}
 +
-+.poke0:	/* come here in real mode, don't touch caches!!
-+           still in critical section (sr.bl==1) */
-+	putcon	r0, ssr
-+	putcon	r37, spc
-+	/* Here's the actual poke.  If the address is bad, all bets are now off
-+	 * what will happen (handlers invoked in real-mode = bad news) */
-+	st.q	r2, 0, r3
-+	synco
-+	rte	/* Back to virtual mode */
-+	nop
++asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
++			     unsigned long r6, unsigned long r7,
++			     struct pt_regs __regs)
++{
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
++	sigset_t set;
++	int r0;
 +
-+1:
-+	ptabs	LINK, tr0
-+	blink	tr0, r63
++	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
++		goto badframe;
 +
-+/*
-+ * --- User Access Handling Section
-+ */
++	if (__get_user(set.sig[0], &frame->sc.oldmask)
++	    || (_NSIG_WORDS > 1
++		&& __copy_from_user(&set.sig[1], &frame->extramask,
++				    sizeof(frame->extramask))))
++		goto badframe;
 +
-+/*
-+ * User Access support. It all moved to non inlined Assembler
-+ * functions in here.
-+ *
-+ * __kernel_size_t __copy_user(void *__to, const void *__from,
-+ *			       __kernel_size_t __n)
-+ *
-+ * Inputs:
-+ * (r2)  target address
-+ * (r3)  source address
-+ * (r4)  size in bytes
-+ *
-+ * Ouputs:
-+ * (*r2) target data
-+ * (r2)  non-copied bytes
-+ *
-+ * If a fault occurs on the user pointer, bail out early and return the
-+ * number of bytes not copied in r2.
-+ * Strategy : for large blocks, call a real memcpy function which can
-+ * move >1 byte at a time using unaligned ld/st instructions, and can
-+ * manipulate the cache using prefetch + alloco to improve the speed
-+ * further.  If a fault occurs in that function, just revert to the
-+ * byte-by-byte approach used for small blocks; this is rare so the
-+ * performance hit for that case does not matter.
-+ *
-+ * For small blocks it's not worth the overhead of setting up and calling
-+ * the memcpy routine; do the copy a byte at a time.
-+ *
-+ */
-+	.global	__copy_user
-+__copy_user:
-+	pta	__copy_user_byte_by_byte, tr1
-+	movi	16, r0 ! this value is a best guess, should tune it by benchmarking
-+	bge/u	r0, r4, tr1
-+	pta copy_user_memcpy, tr0
-+	addi	SP, -32, SP
-+	/* Save arguments in case we have to fix-up unhandled page fault */
-+	st.q	SP, 0, r2
-+	st.q	SP, 8, r3
-+	st.q	SP, 16, r4
-+	st.q	SP, 24, r35 ! r35 is callee-save
-+	/* Save LINK in a register to reduce RTS time later (otherwise
-+	   ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
-+	ori	LINK, 0, r35
-+	blink	tr0, LINK
++	sigdelsetmask(&set, ~_BLOCKABLE);
 +
-+	/* Copy completed normally if we get back here */
-+	ptabs	r35, tr0
-+	ld.q	SP, 24, r35
-+	/* don't restore r2-r4, pointless */
-+	/* set result=r2 to zero as the copy must have succeeded. */
-+	or	r63, r63, r2
-+	addi	SP, 32, SP
-+	blink	tr0, r63 ! RTS
++	spin_lock_irq(&current->sighand->siglock);
++	current->blocked = set;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
 +
-+	.global __copy_user_fixup
-+__copy_user_fixup:
-+	/* Restore stack frame */
-+	ori	r35, 0, LINK
-+	ld.q	SP, 24, r35
-+	ld.q	SP, 16, r4
-+	ld.q	SP,  8, r3
-+	ld.q	SP,  0, r2
-+	addi	SP, 32, SP
-+	/* Fall through to original code, in the 'same' state we entered with */
++	if (restore_sigcontext(regs, &frame->sc, &r0))
++		goto badframe;
++	return r0;
 +
-+/* The slow byte-by-byte method is used if the fast copy traps due to a bad
-+   user address.  In that rare case, the speed drop can be tolerated. */
-+__copy_user_byte_by_byte:
-+	pta	___copy_user_exit, tr1
-+	pta	___copy_user1, tr0
-+	beq/u	r4, r63, tr1	/* early exit for zero length copy */
-+	sub	r2, r3, r0
-+	addi	r0, -1, r0
++badframe:
++	force_sig(SIGSEGV, current);
++	return 0;
++}
 +
-+___copy_user1:
-+	ld.b	r3, 0, r5		/* Fault address 1 */
++asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
++				unsigned long r6, unsigned long r7,
++				struct pt_regs __regs)
++{
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
++	sigset_t set;
++	stack_t st;
++	int r0;
 +
-+	/* Could rewrite this to use just 1 add, but the second comes 'free'
-+	   due to load latency */
-+	addi	r3, 1, r3
-+	addi	r4, -1, r4		/* No real fixup required */
-+___copy_user2:
-+	stx.b	r3, r0, r5		/* Fault address 2 */
-+	bne     r4, ZERO, tr0
++	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
++		goto badframe;
 +
-+___copy_user_exit:
-+	or	r4, ZERO, r2
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
++		goto badframe;
 +
-+/*
-+ * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
-+ *
-+ * Inputs:
-+ * (r2)  target address
-+ * (r3)  size in bytes
-+ *
-+ * Ouputs:
-+ * (*r2) zero-ed target data
-+ * (r2)  non-zero-ed bytes
-+ */
-+	.global	__clear_user
-+__clear_user:
-+	pta	___clear_user_exit, tr1
-+	pta	___clear_user1, tr0
-+	beq/u	r3, r63, tr1
++	sigdelsetmask(&set, ~_BLOCKABLE);
++	spin_lock_irq(&current->sighand->siglock);
++	current->blocked = set;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
 +
-+___clear_user1:
-+	st.b	r2, 0, ZERO		/* Fault address */
-+	addi	r2, 1, r2
-+	addi	r3, -1, r3		/* No real fixup required */
-+	bne     r3, ZERO, tr0
++	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
++		goto badframe;
 +
-+___clear_user_exit:
-+	or	r3, ZERO, r2
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
++		goto badframe;
++	/* It is more difficult to avoid calling this function than to
++	   call it and ignore errors.  */
++	do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
++
++	return r0;
 +
++badframe:
++	force_sig(SIGSEGV, current);
++	return 0;
++}
 +
 +/*
-+ * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
-+ *			   int __count)
-+ *
-+ * Inputs:
-+ * (r2)  target address
-+ * (r3)  source address
-+ * (r4)  maximum size in bytes
-+ *
-+ * Ouputs:
-+ * (*r2) copied data
-+ * (r2)  -EFAULT (in case of faulting)
-+ *       copied data (otherwise)
++ * Set up a signal frame.
 + */
-+	.global	__strncpy_from_user
-+__strncpy_from_user:
-+	pta	___strncpy_from_user1, tr0
-+	pta	___strncpy_from_user_done, tr1
-+	or	r4, ZERO, r5		/* r5 = original count */
-+	beq/u	r4, r63, tr1		/* early exit if r4==0 */
-+	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
-+	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
-+
-+___strncpy_from_user1:
-+	ld.b	r3, 0, r7		/* Fault address: only in reading */
-+	st.b	r2, 0, r7
-+	addi	r2, 1, r2
-+	addi	r3, 1, r3
-+	beq/u	ZERO, r7, tr1
-+	addi	r4, -1, r4		/* return real number of copied bytes */
-+	bne/l	ZERO, r4, tr0
-+
-+___strncpy_from_user_done:
-+	sub	r5, r4, r6		/* If done, return copied */
 +
-+___strncpy_from_user_exit:
-+	or	r6, ZERO, r2
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++static int
++setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
++		 unsigned long mask)
++{
++	int err = 0;
 +
-+/*
-+ * extern long __strnlen_user(const char *__s, long __n)
-+ *
-+ * Inputs:
-+ * (r2)  source address
-+ * (r3)  source size in bytes
-+ *
-+ * Ouputs:
-+ * (r2)  -EFAULT (in case of faulting)
-+ *       string length (otherwise)
-+ */
-+	.global	__strnlen_user
-+__strnlen_user:
-+	pta	___strnlen_user_set_reply, tr0
-+	pta	___strnlen_user1, tr1
-+	or	ZERO, ZERO, r5		/* r5 = counter */
-+	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
-+	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
-+	beq	r3, ZERO, tr0
++#define COPY(x)		err |= __put_user(regs->x, &sc->sc_##x)
++	COPY(regs[0]);	COPY(regs[1]);
++	COPY(regs[2]);	COPY(regs[3]);
++	COPY(regs[4]);	COPY(regs[5]);
++	COPY(regs[6]);	COPY(regs[7]);
++	COPY(regs[8]);	COPY(regs[9]);
++	COPY(regs[10]);	COPY(regs[11]);
++	COPY(regs[12]);	COPY(regs[13]);
++	COPY(regs[14]);	COPY(regs[15]);
++	COPY(gbr);	COPY(mach);
++	COPY(macl);	COPY(pr);
++	COPY(sr);	COPY(pc);
++#undef COPY
 +
-+___strnlen_user1:
-+	ldx.b	r2, r5, r7		/* Fault address: only in reading */
-+	addi	r3, -1, r3		/* No real fixup */
-+	addi	r5, 1, r5
-+	beq	r3, ZERO, tr0
-+	bne	r7, ZERO, tr1
-+! The line below used to be active.  This meant led to a junk byte lying between each pair
-+! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
-+! via the argv and envp arguments to main, it meant the 'flat' representation visible through
-+! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
-+!	addi	r5, 1, r5		/* Include '\0' */
++#ifdef CONFIG_SH_FPU
++	err |= save_sigcontext_fpu(sc, regs);
++#endif
 +
-+___strnlen_user_set_reply:
-+	or	r5, ZERO, r6		/* If done, return counter */
++	/* non-iBCS2 extensions.. */
++	err |= __put_user(mask, &sc->oldmask);
 +
-+___strnlen_user_exit:
-+	or	r6, ZERO, r2
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++	return err;
++}
 +
 +/*
-+ * extern long __get_user_asm_?(void *val, long addr)
-+ *
-+ * Inputs:
-+ * (r2)  dest address
-+ * (r3)  source address (in User Space)
-+ *
-+ * Ouputs:
-+ * (r2)  -EFAULT (faulting)
-+ *       0 	 (not faulting)
++ * Determine which stack to use..
 + */
-+	.global	__get_user_asm_b
-+__get_user_asm_b:
-+	or	r2, ZERO, r4
-+	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
-+
-+___get_user_asm_b1:
-+	ld.b	r3, 0, r5		/* r5 = data */
-+	st.b	r4, 0, r5
-+	or	ZERO, ZERO, r2
-+
-+___get_user_asm_b_exit:
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
-+
-+
-+	.global	__get_user_asm_w
-+__get_user_asm_w:
-+	or	r2, ZERO, r4
-+	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
-+
-+___get_user_asm_w1:
-+	ld.w	r3, 0, r5		/* r5 = data */
-+	st.w	r4, 0, r5
-+	or	ZERO, ZERO, r2
-+
-+___get_user_asm_w_exit:
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
-+
-+
-+	.global	__get_user_asm_l
-+__get_user_asm_l:
-+	or	r2, ZERO, r4
-+	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
-+
-+___get_user_asm_l1:
-+	ld.l	r3, 0, r5		/* r5 = data */
-+	st.l	r4, 0, r5
-+	or	ZERO, ZERO, r2
-+
-+___get_user_asm_l_exit:
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
-+
++static inline void __user *
++get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
++{
++	if (ka->sa.sa_flags & SA_ONSTACK) {
++		if (sas_ss_flags(sp) == 0)
++			sp = current->sas_ss_sp + current->sas_ss_size;
++	}
 +
-+	.global	__get_user_asm_q
-+__get_user_asm_q:
-+	or	r2, ZERO, r4
-+	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
++	return (void __user *)((sp - frame_size) & -8ul);
++}
 +
-+___get_user_asm_q1:
-+	ld.q	r3, 0, r5		/* r5 = data */
-+	st.q	r4, 0, r5
-+	or	ZERO, ZERO, r2
++/* These symbols are defined with the addresses in the vsyscall page.
++   See vsyscall-trapa.S.  */
++extern void __user __kernel_sigreturn;
++extern void __user __kernel_rt_sigreturn;
 +
-+___get_user_asm_q_exit:
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++static int setup_frame(int sig, struct k_sigaction *ka,
++			sigset_t *set, struct pt_regs *regs)
++{
++	struct sigframe __user *frame;
++	int err = 0;
++	int signal;
 +
-+/*
-+ * extern long __put_user_asm_?(void *pval, long addr)
-+ *
-+ * Inputs:
-+ * (r2)  kernel pointer to value
-+ * (r3)  dest address (in User Space)
-+ *
-+ * Ouputs:
-+ * (r2)  -EFAULT (faulting)
-+ *       0 	 (not faulting)
-+ */
-+	.global	__put_user_asm_b
-+__put_user_asm_b:
-+	ld.b	r2, 0, r4		/* r4 = data */
-+	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
++	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
 +
-+___put_user_asm_b1:
-+	st.b	r3, 0, r4
-+	or	ZERO, ZERO, r2
++	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
++		goto give_sigsegv;
 +
-+___put_user_asm_b_exit:
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++	signal = current_thread_info()->exec_domain
++		&& current_thread_info()->exec_domain->signal_invmap
++		&& sig < 32
++		? current_thread_info()->exec_domain->signal_invmap[sig]
++		: sig;
 +
++	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
 +
-+	.global	__put_user_asm_w
-+__put_user_asm_w:
-+	ld.w	r2, 0, r4		/* r4 = data */
-+	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
++	if (_NSIG_WORDS > 1)
++		err |= __copy_to_user(frame->extramask, &set->sig[1],
++				      sizeof(frame->extramask));
 +
-+___put_user_asm_w1:
-+	st.w	r3, 0, r4
-+	or	ZERO, ZERO, r2
++	/* Set up to return from userspace.  If provided, use a stub
++	   already in userspace.  */
++	if (ka->sa.sa_flags & SA_RESTORER) {
++		regs->pr = (unsigned long) ka->sa.sa_restorer;
++#ifdef CONFIG_VSYSCALL
++	} else if (likely(current->mm->context.vdso)) {
++		regs->pr = VDSO_SYM(&__kernel_sigreturn);
++#endif
++	} else {
++		/* Generate return code (system call to sigreturn) */
++		err |= __put_user(MOVW(7), &frame->retcode[0]);
++		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
++		err |= __put_user((__NR_sigreturn), &frame->retcode[7]);
++		regs->pr = (unsigned long) frame->retcode;
++	}
 +
-+___put_user_asm_w_exit:
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++	if (err)
++		goto give_sigsegv;
 +
++	/* Set up registers for signal handler */
++	regs->regs[15] = (unsigned long) frame;
++	regs->regs[4] = signal; /* Arg for signal handler */
++	regs->regs[5] = 0;
++	regs->regs[6] = (unsigned long) &frame->sc;
++	regs->pc = (unsigned long) ka->sa.sa_handler;
 +
-+	.global	__put_user_asm_l
-+__put_user_asm_l:
-+	ld.l	r2, 0, r4		/* r4 = data */
-+	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
++	set_fs(USER_DS);
 +
-+___put_user_asm_l1:
-+	st.l	r3, 0, r4
-+	or	ZERO, ZERO, r2
++	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
++		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 +
-+___put_user_asm_l_exit:
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++	flush_cache_sigtramp(regs->pr);
 +
++	if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
++		flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
 +
-+	.global	__put_user_asm_q
-+__put_user_asm_q:
-+	ld.q	r2, 0, r4		/* r4 = data */
-+	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
++	return 0;
 +
-+___put_user_asm_q1:
-+	st.q	r3, 0, r4
-+	or	ZERO, ZERO, r2
++give_sigsegv:
++	force_sigsegv(sig, current);
++	return -EFAULT;
++}
 +
-+___put_user_asm_q_exit:
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
++			   sigset_t *set, struct pt_regs *regs)
++{
++	struct rt_sigframe __user *frame;
++	int err = 0;
++	int signal;
 +
-+panic_stash_regs:
-+	/* The idea is : when we get an unhandled panic, we dump the registers
-+	   to a known memory location, the just sit in a tight loop.
-+	   This allows the human to look at the memory region through the GDB
-+	   session (assuming the debug module's SHwy initiator isn't locked up
-+	   or anything), to hopefully analyze the cause of the panic. */
++	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
 +
-+	/* On entry, former r15 (SP) is in DCR
-+	   former r0  is at resvec_saved_area + 0
-+	   former r1  is at resvec_saved_area + 8
-+	   former tr0 is at resvec_saved_area + 32
-+	   DCR is the only register whose value is lost altogether.
-+	*/
++	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
++		goto give_sigsegv;
 +
-+	movi	0xffffffff80000000, r0 ! phy of dump area
-+	ld.q	SP, 0x000, r1	! former r0
-+	st.q	r0,  0x000, r1
-+	ld.q	SP, 0x008, r1	! former r1
-+	st.q	r0,  0x008, r1
-+	st.q	r0,  0x010, r2
-+	st.q	r0,  0x018, r3
-+	st.q	r0,  0x020, r4
-+	st.q	r0,  0x028, r5
-+	st.q	r0,  0x030, r6
-+	st.q	r0,  0x038, r7
-+	st.q	r0,  0x040, r8
-+	st.q	r0,  0x048, r9
-+	st.q	r0,  0x050, r10
-+	st.q	r0,  0x058, r11
-+	st.q	r0,  0x060, r12
-+	st.q	r0,  0x068, r13
-+	st.q	r0,  0x070, r14
-+	getcon	dcr, r14
-+	st.q	r0,  0x078, r14
-+	st.q	r0,  0x080, r16
-+	st.q	r0,  0x088, r17
-+	st.q	r0,  0x090, r18
-+	st.q	r0,  0x098, r19
-+	st.q	r0,  0x0a0, r20
-+	st.q	r0,  0x0a8, r21
-+	st.q	r0,  0x0b0, r22
-+	st.q	r0,  0x0b8, r23
-+	st.q	r0,  0x0c0, r24
-+	st.q	r0,  0x0c8, r25
-+	st.q	r0,  0x0d0, r26
-+	st.q	r0,  0x0d8, r27
-+	st.q	r0,  0x0e0, r28
-+	st.q	r0,  0x0e8, r29
-+	st.q	r0,  0x0f0, r30
-+	st.q	r0,  0x0f8, r31
-+	st.q	r0,  0x100, r32
-+	st.q	r0,  0x108, r33
-+	st.q	r0,  0x110, r34
-+	st.q	r0,  0x118, r35
-+	st.q	r0,  0x120, r36
-+	st.q	r0,  0x128, r37
-+	st.q	r0,  0x130, r38
-+	st.q	r0,  0x138, r39
-+	st.q	r0,  0x140, r40
-+	st.q	r0,  0x148, r41
-+	st.q	r0,  0x150, r42
-+	st.q	r0,  0x158, r43
-+	st.q	r0,  0x160, r44
-+	st.q	r0,  0x168, r45
-+	st.q	r0,  0x170, r46
-+	st.q	r0,  0x178, r47
-+	st.q	r0,  0x180, r48
-+	st.q	r0,  0x188, r49
-+	st.q	r0,  0x190, r50
-+	st.q	r0,  0x198, r51
-+	st.q	r0,  0x1a0, r52
-+	st.q	r0,  0x1a8, r53
-+	st.q	r0,  0x1b0, r54
-+	st.q	r0,  0x1b8, r55
-+	st.q	r0,  0x1c0, r56
-+	st.q	r0,  0x1c8, r57
-+	st.q	r0,  0x1d0, r58
-+	st.q	r0,  0x1d8, r59
-+	st.q	r0,  0x1e0, r60
-+	st.q	r0,  0x1e8, r61
-+	st.q	r0,  0x1f0, r62
-+	st.q	r0,  0x1f8, r63	! bogus, but for consistency's sake...
++	signal = current_thread_info()->exec_domain
++		&& current_thread_info()->exec_domain->signal_invmap
++		&& sig < 32
++		? current_thread_info()->exec_domain->signal_invmap[sig]
++		: sig;
 +
-+	ld.q	SP, 0x020, r1  ! former tr0
-+	st.q	r0,  0x200, r1
-+	gettr	tr1, r1
-+	st.q	r0,  0x208, r1
-+	gettr	tr2, r1
-+	st.q	r0,  0x210, r1
-+	gettr	tr3, r1
-+	st.q	r0,  0x218, r1
-+	gettr	tr4, r1
-+	st.q	r0,  0x220, r1
-+	gettr	tr5, r1
-+	st.q	r0,  0x228, r1
-+	gettr	tr6, r1
-+	st.q	r0,  0x230, r1
-+	gettr	tr7, r1
-+	st.q	r0,  0x238, r1
++	err |= copy_siginfo_to_user(&frame->info, info);
 +
-+	getcon	sr,  r1
-+	getcon	ssr,  r2
-+	getcon	pssr,  r3
-+	getcon	spc,  r4
-+	getcon	pspc,  r5
-+	getcon	intevt,  r6
-+	getcon	expevt,  r7
-+	getcon	pexpevt,  r8
-+	getcon	tra,  r9
-+	getcon	tea,  r10
-+	getcon	kcr0, r11
-+	getcon	kcr1, r12
-+	getcon	vbr,  r13
-+	getcon	resvec,  r14
++	/* Create the ucontext.  */
++	err |= __put_user(0, &frame->uc.uc_flags);
++	err |= __put_user(0, &frame->uc.uc_link);
++	err |= __put_user((void *)current->sas_ss_sp,
++			  &frame->uc.uc_stack.ss_sp);
++	err |= __put_user(sas_ss_flags(regs->regs[15]),
++			  &frame->uc.uc_stack.ss_flags);
++	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
++	err |= setup_sigcontext(&frame->uc.uc_mcontext,
++			        regs, set->sig[0]);
++	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 +
-+	st.q	r0,  0x240, r1
-+	st.q	r0,  0x248, r2
-+	st.q	r0,  0x250, r3
-+	st.q	r0,  0x258, r4
-+	st.q	r0,  0x260, r5
-+	st.q	r0,  0x268, r6
-+	st.q	r0,  0x270, r7
-+	st.q	r0,  0x278, r8
-+	st.q	r0,  0x280, r9
-+	st.q	r0,  0x288, r10
-+	st.q	r0,  0x290, r11
-+	st.q	r0,  0x298, r12
-+	st.q	r0,  0x2a0, r13
-+	st.q	r0,  0x2a8, r14
++	/* Set up to return from userspace.  If provided, use a stub
++	   already in userspace.  */
++	if (ka->sa.sa_flags & SA_RESTORER) {
++		regs->pr = (unsigned long) ka->sa.sa_restorer;
++#ifdef CONFIG_VSYSCALL
++	} else if (likely(current->mm->context.vdso)) {
++		regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
++#endif
++	} else {
++		/* Generate return code (system call to rt_sigreturn) */
++		err |= __put_user(MOVW(7), &frame->retcode[0]);
++		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
++		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
++		err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
++		regs->pr = (unsigned long) frame->retcode;
++	}
 +
-+	getcon	SPC,r2
-+	getcon	SSR,r3
-+	getcon	EXPEVT,r4
-+	/* Prepare to jump to C - physical address */
-+	movi	panic_handler-CONFIG_PAGE_OFFSET, r1
-+	ori	r1, 1, r1
-+	ptabs   r1, tr0
-+	getcon	DCR, SP
-+	blink	tr0, ZERO
-+	nop
-+	nop
-+	nop
-+	nop
++	if (err)
++		goto give_sigsegv;
 +
++	/* Set up registers for signal handler */
++	regs->regs[15] = (unsigned long) frame;
++	regs->regs[4] = signal; /* Arg for signal handler */
++	regs->regs[5] = (unsigned long) &frame->info;
++	regs->regs[6] = (unsigned long) &frame->uc;
++	regs->pc = (unsigned long) ka->sa.sa_handler;
 +
++	set_fs(USER_DS);
 +
++	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
++		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 +
-+/*
-+ * --- Signal Handling Section
-+ */
++	flush_cache_sigtramp(regs->pr);
 +
-+/*
-+ * extern long long _sa_default_rt_restorer
-+ * extern long long _sa_default_restorer
-+ *
-+ *		 or, better,
-+ *
-+ * extern void _sa_default_rt_restorer(void)
-+ * extern void _sa_default_restorer(void)
-+ *
-+ * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
-+ * from user space. Copied into user space by signal management.
-+ * Both must be quad aligned and 2 quad long (4 instructions).
-+ *
-+ */
-+	.balign 8
-+	.global sa_default_rt_restorer
-+sa_default_rt_restorer:
-+	movi	0x10, r9
-+	shori	__NR_rt_sigreturn, r9
-+	trapa	r9
-+	nop
++	if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
++		flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
 +
-+	.balign 8
-+	.global sa_default_restorer
-+sa_default_restorer:
-+	movi	0x10, r9
-+	shori	__NR_sigreturn, r9
-+	trapa	r9
-+	nop
++	return 0;
 +
-+/*
-+ * --- __ex_table Section
-+ */
++give_sigsegv:
++	force_sigsegv(sig, current);
++	return -EFAULT;
++}
 +
 +/*
-+ * User Access Exception Table.
++ * OK, we're invoking a handler
 + */
-+	.section	__ex_table,  "a"
 +
-+	.global asm_uaccess_start	/* Just a marker */
-+asm_uaccess_start:
-+
-+	.long	___copy_user1, ___copy_user_exit
-+	.long	___copy_user2, ___copy_user_exit
-+	.long	___clear_user1, ___clear_user_exit
-+	.long	___strncpy_from_user1, ___strncpy_from_user_exit
-+	.long	___strnlen_user1, ___strnlen_user_exit
-+	.long	___get_user_asm_b1, ___get_user_asm_b_exit
-+	.long	___get_user_asm_w1, ___get_user_asm_w_exit
-+	.long	___get_user_asm_l1, ___get_user_asm_l_exit
-+	.long	___get_user_asm_q1, ___get_user_asm_q_exit
-+	.long	___put_user_asm_b1, ___put_user_asm_b_exit
-+	.long	___put_user_asm_w1, ___put_user_asm_w_exit
-+	.long	___put_user_asm_l1, ___put_user_asm_l_exit
-+	.long	___put_user_asm_q1, ___put_user_asm_q_exit
++static int
++handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
++	      sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
++{
++	int ret;
 +
-+	.global asm_uaccess_end		/* Just a marker */
-+asm_uaccess_end:
++	/* Are we from a system call? */
++	if (regs->tra >= 0) {
++		/* If so, check system call restarting.. */
++		switch (regs->regs[0]) {
++			case -ERESTART_RESTARTBLOCK:
++			case -ERESTARTNOHAND:
++				regs->regs[0] = -EINTR;
++				break;
 +
++			case -ERESTARTSYS:
++				if (!(ka->sa.sa_flags & SA_RESTART)) {
++					regs->regs[0] = -EINTR;
++					break;
++				}
++			/* fallthrough */
++			case -ERESTARTNOINTR:
++				regs->regs[0] = save_r0;
++				regs->pc -= instruction_size(
++						ctrl_inw(regs->pc - 4));
++				break;
++		}
++	}
 +
++	/* Set up the stack frame */
++	if (ka->sa.sa_flags & SA_SIGINFO)
++		ret = setup_rt_frame(sig, ka, info, oldset, regs);
++	else
++		ret = setup_frame(sig, ka, oldset, regs);
 +
++	if (ka->sa.sa_flags & SA_ONESHOT)
++		ka->sa.sa_handler = SIG_DFL;
 +
-+/*
-+ * --- .text.init Section
-+ */
++	if (ret == 0) {
++		spin_lock_irq(&current->sighand->siglock);
++		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
++		if (!(ka->sa.sa_flags & SA_NODEFER))
++			sigaddset(&current->blocked,sig);
++		recalc_sigpending();
++		spin_unlock_irq(&current->sighand->siglock);
++	}
 +
-+	.section	.text.init, "ax"
++	return ret;
++}
 +
 +/*
-+ * void trap_init (void)
++ * Note that 'init' is a special process: it doesn't get signals it doesn't
++ * want to handle. Thus you cannot kill init even with a SIGKILL even by
++ * mistake.
 + *
++ * Note that we go through the signals twice: once to check the signals that
++ * the kernel can handle, and then we build all the user-level signal handling
++ * stack-frames in one go after that.
 + */
-+	.global	trap_init
-+trap_init:
-+	addi	SP, -24, SP			/* Room to save r28/r29/r30 */
-+	st.q	SP, 0, r28
-+	st.q	SP, 8, r29
-+	st.q	SP, 16, r30
-+
-+	/* Set VBR and RESVEC */
-+	movi	LVBR_block, r19
-+	andi	r19, -4, r19			/* reset MMUOFF + reserved */
-+	/* For RESVEC exceptions we force the MMU off, which means we need the
-+	   physical address. */
-+	movi	LRESVEC_block-CONFIG_PAGE_OFFSET, r20
-+	andi	r20, -4, r20			/* reset reserved */
-+	ori	r20, 1, r20			/* set MMUOFF */
-+	putcon	r19, VBR
-+	putcon	r20, RESVEC
-+
-+	/* Sanity check */
-+	movi	LVBR_block_end, r21
-+	andi	r21, -4, r21
-+	movi	BLOCK_SIZE, r29			/* r29 = expected size */
-+	or	r19, ZERO, r30
-+	add	r19, r29, r19
++static void do_signal(struct pt_regs *regs, unsigned int save_r0)
++{
++	siginfo_t info;
++	int signr;
++	struct k_sigaction ka;
++	sigset_t *oldset;
 +
 +	/*
-+	 * Ugly, but better loop forever now than crash afterwards.
-+	 * We should print a message, but if we touch LVBR or
-+	 * LRESVEC blocks we should not be surprised if we get stuck
-+	 * in trap_init().
++	 * We want the common case to go fast, which
++	 * is why we may in certain cases get here from
++	 * kernel mode. Just return without doing anything
++	 * if so.
 +	 */
-+	pta	trap_init_loop, tr1
-+	gettr	tr1, r28			/* r28 = trap_init_loop */
-+	sub	r21, r30, r30			/* r30 = actual size */
++	if (!user_mode(regs))
++		return;
 +
-+	/*
-+	 * VBR/RESVEC handlers overlap by being bigger than
-+	 * allowed. Very bad. Just loop forever.
-+	 * (r28) panic/loop address
-+	 * (r29) expected size
-+	 * (r30) actual size
-+	 */
-+trap_init_loop:
-+	bne	r19, r21, tr1
++	if (try_to_freeze())
++		goto no_signal;
 +
-+	/* Now that exception vectors are set up reset SR.BL */
-+	getcon 	SR, r22
-+	movi	SR_UNBLOCK_EXC, r23
-+	and	r22, r23, r22
-+	putcon	r22, SR
++	if (test_thread_flag(TIF_RESTORE_SIGMASK))
++		oldset = &current->saved_sigmask;
++	else
++		oldset = &current->blocked;
 +
-+	addi	SP, 24, SP
-+	ptabs	LINK, tr0
-+	blink	tr0, ZERO
++	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
++	if (signr > 0) {
++		/* Whee!  Actually deliver the signal.  */
++		if (handle_signal(signr, &ka, &info, oldset,
++				  regs, save_r0) == 0) {
++			/* a signal was successfully delivered; the saved
++			 * sigmask will have been stored in the signal frame,
++			 * and will be restored by sigreturn, so we can simply
++			 * clear the TIF_RESTORE_SIGMASK flag */
++			if (test_thread_flag(TIF_RESTORE_SIGMASK))
++				clear_thread_flag(TIF_RESTORE_SIGMASK);
++		}
 +
-diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
++		return;
++	}
++
++ no_signal:
++	/* Did we come from a system call? */
++	if (regs->tra >= 0) {
++		/* Restart the system call - no handlers present */
++		if (regs->regs[0] == -ERESTARTNOHAND ||
++		    regs->regs[0] == -ERESTARTSYS ||
++		    regs->regs[0] == -ERESTARTNOINTR) {
++			regs->regs[0] = save_r0;
++			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
++		} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
++			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
++			regs->regs[3] = __NR_restart_syscall;
++		}
++	}
++
++	/* if there's no signal to deliver, we just put the saved sigmask
++	 * back */
++	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
++		clear_thread_flag(TIF_RESTORE_SIGMASK);
++		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
++	}
++}
++
++asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
++				 __u32 thread_info_flags)
++{
++	/* deal with pending signal delivery */
++	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
++		do_signal(regs, save_r0);
++}
+diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
 new file mode 100644
-index 0000000..30b76a9
+index 0000000..80bde19
 --- /dev/null
-+++ b/arch/sh/kernel/cpu/sh5/fpu.c
-@@ -0,0 +1,166 @@
++++ b/arch/sh/kernel/signal_64.c
+@@ -0,0 +1,751 @@
 +/*
-+ * arch/sh/kernel/cpu/sh5/fpu.c
-+ *
-+ * Copyright (C) 2001  Manuela Cirronis, Paolo Alberelli
-+ * Copyright (C) 2002  STMicroelectronics Limited
-+ *   Author : Stuart Menefy
++ * arch/sh/kernel/signal_64.c
 + *
-+ * Started from SH4 version:
-+ *   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003  Paul Mundt
++ * Copyright (C) 2004  Richard Curnow
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + */
++#include <linux/rwsem.h>
 +#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/kernel.h>
 +#include <linux/signal.h>
-+#include <asm/processor.h>
-+#include <asm/user.h>
-+#include <asm/io.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/personality.h>
++#include <linux/freezer.h>
++#include <linux/ptrace.h>
++#include <linux/unistd.h>
++#include <linux/stddef.h>
++#include <asm/ucontext.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/cacheflush.h>
++
++#define REG_RET 9
++#define REG_ARG1 2
++#define REG_ARG2 3
++#define REG_ARG3 4
++#define REG_SP 15
++#define REG_PR 18
++#define REF_REG_RET regs->regs[REG_RET]
++#define REF_REG_SP regs->regs[REG_SP]
++#define DEREF_REG_PR regs->regs[REG_PR]
++
++#define DEBUG_SIG 0
++
++#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
++
++asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
 +
 +/*
-+ * Initially load the FPU with signalling NANS.  This bit pattern
-+ * has the property that no matter whether considered as single or as
-+ * double precision, it still represents a signalling NAN.
++ * Atomically swap in the new signal mask, and wait for a signal.
 + */
-+#define sNAN64		0xFFFFFFFFFFFFFFFFULL
-+#define sNAN32		0xFFFFFFFFUL
-+
-+static union sh_fpu_union init_fpuregs = {
-+	.hard = {
-+		.fp_regs = { [0 ... 63] = sNAN32 },
-+		.fpscr = FPSCR_INIT
-+	}
-+};
 +
-+void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
++asmlinkage int
++sys_sigsuspend(old_sigset_t mask,
++	       unsigned long r3, unsigned long r4, unsigned long r5,
++	       unsigned long r6, unsigned long r7,
++	       struct pt_regs * regs)
 +{
-+	asm volatile("fst.p     %0, (0*8), fp0\n\t"
-+		     "fst.p     %0, (1*8), fp2\n\t"
-+		     "fst.p     %0, (2*8), fp4\n\t"
-+		     "fst.p     %0, (3*8), fp6\n\t"
-+		     "fst.p     %0, (4*8), fp8\n\t"
-+		     "fst.p     %0, (5*8), fp10\n\t"
-+		     "fst.p     %0, (6*8), fp12\n\t"
-+		     "fst.p     %0, (7*8), fp14\n\t"
-+		     "fst.p     %0, (8*8), fp16\n\t"
-+		     "fst.p     %0, (9*8), fp18\n\t"
-+		     "fst.p     %0, (10*8), fp20\n\t"
-+		     "fst.p     %0, (11*8), fp22\n\t"
-+		     "fst.p     %0, (12*8), fp24\n\t"
-+		     "fst.p     %0, (13*8), fp26\n\t"
-+		     "fst.p     %0, (14*8), fp28\n\t"
-+		     "fst.p     %0, (15*8), fp30\n\t"
-+		     "fst.p     %0, (16*8), fp32\n\t"
-+		     "fst.p     %0, (17*8), fp34\n\t"
-+		     "fst.p     %0, (18*8), fp36\n\t"
-+		     "fst.p     %0, (19*8), fp38\n\t"
-+		     "fst.p     %0, (20*8), fp40\n\t"
-+		     "fst.p     %0, (21*8), fp42\n\t"
-+		     "fst.p     %0, (22*8), fp44\n\t"
-+		     "fst.p     %0, (23*8), fp46\n\t"
-+		     "fst.p     %0, (24*8), fp48\n\t"
-+		     "fst.p     %0, (25*8), fp50\n\t"
-+		     "fst.p     %0, (26*8), fp52\n\t"
-+		     "fst.p     %0, (27*8), fp54\n\t"
-+		     "fst.p     %0, (28*8), fp56\n\t"
-+		     "fst.p     %0, (29*8), fp58\n\t"
-+		     "fst.p     %0, (30*8), fp60\n\t"
-+		     "fst.p     %0, (31*8), fp62\n\t"
++	sigset_t saveset;
 +
-+		     "fgetscr   fr63\n\t"
-+		     "fst.s     %0, (32*8), fr63\n\t"
-+		: /* no output */
-+		: "r" (&tsk->thread.fpu.hard)
-+		: "memory");
++	mask &= _BLOCKABLE;
++	spin_lock_irq(&current->sighand->siglock);
++	saveset = current->blocked;
++	siginitset(&current->blocked, mask);
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++
++	REF_REG_RET = -EINTR;
++	while (1) {
++		current->state = TASK_INTERRUPTIBLE;
++		schedule();
++		regs->pc += 4;    /* because sys_sigreturn decrements the pc */
++		if (do_signal(regs, &saveset)) {
++			/* pc now points at signal handler. Need to decrement
++			   it because entry.S will increment it. */
++			regs->pc -= 4;
++			return -EINTR;
++		}
++	}
 +}
 +
-+static inline void
-+fpload(struct sh_fpu_hard_struct *fpregs)
++asmlinkage int
++sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
++	          unsigned long r4, unsigned long r5, unsigned long r6,
++	          unsigned long r7,
++	          struct pt_regs * regs)
 +{
-+	asm volatile("fld.p     %0, (0*8), fp0\n\t"
-+		     "fld.p     %0, (1*8), fp2\n\t"
-+		     "fld.p     %0, (2*8), fp4\n\t"
-+		     "fld.p     %0, (3*8), fp6\n\t"
-+		     "fld.p     %0, (4*8), fp8\n\t"
-+		     "fld.p     %0, (5*8), fp10\n\t"
-+		     "fld.p     %0, (6*8), fp12\n\t"
-+		     "fld.p     %0, (7*8), fp14\n\t"
-+		     "fld.p     %0, (8*8), fp16\n\t"
-+		     "fld.p     %0, (9*8), fp18\n\t"
-+		     "fld.p     %0, (10*8), fp20\n\t"
-+		     "fld.p     %0, (11*8), fp22\n\t"
-+		     "fld.p     %0, (12*8), fp24\n\t"
-+		     "fld.p     %0, (13*8), fp26\n\t"
-+		     "fld.p     %0, (14*8), fp28\n\t"
-+		     "fld.p     %0, (15*8), fp30\n\t"
-+		     "fld.p     %0, (16*8), fp32\n\t"
-+		     "fld.p     %0, (17*8), fp34\n\t"
-+		     "fld.p     %0, (18*8), fp36\n\t"
-+		     "fld.p     %0, (19*8), fp38\n\t"
-+		     "fld.p     %0, (20*8), fp40\n\t"
-+		     "fld.p     %0, (21*8), fp42\n\t"
-+		     "fld.p     %0, (22*8), fp44\n\t"
-+		     "fld.p     %0, (23*8), fp46\n\t"
-+		     "fld.p     %0, (24*8), fp48\n\t"
-+		     "fld.p     %0, (25*8), fp50\n\t"
-+		     "fld.p     %0, (26*8), fp52\n\t"
-+		     "fld.p     %0, (27*8), fp54\n\t"
-+		     "fld.p     %0, (28*8), fp56\n\t"
-+		     "fld.p     %0, (29*8), fp58\n\t"
-+		     "fld.p     %0, (30*8), fp60\n\t"
++	sigset_t saveset, newset;
 +
-+		     "fld.s     %0, (32*8), fr63\n\t"
-+		     "fputscr   fr63\n\t"
++	/* XXX: Don't preclude handling different sized sigset_t's.  */
++	if (sigsetsize != sizeof(sigset_t))
++		return -EINVAL;
 +
-+		     "fld.p     %0, (31*8), fp62\n\t"
-+		: /* no output */
-+		: "r" (fpregs) );
++	if (copy_from_user(&newset, unewset, sizeof(newset)))
++		return -EFAULT;
++	sigdelsetmask(&newset, ~_BLOCKABLE);
++	spin_lock_irq(&current->sighand->siglock);
++	saveset = current->blocked;
++	current->blocked = newset;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++
++	REF_REG_RET = -EINTR;
++	while (1) {
++		current->state = TASK_INTERRUPTIBLE;
++		schedule();
++		regs->pc += 4;    /* because sys_sigreturn decrements the pc */
++		if (do_signal(regs, &saveset)) {
++			/* pc now points at signal handler. Need to decrement
++			   it because entry.S will increment it. */
++			regs->pc -= 4;
++			return -EINTR;
++		}
++	}
 +}
 +
-+void fpinit(struct sh_fpu_hard_struct *fpregs)
++asmlinkage int
++sys_sigaction(int sig, const struct old_sigaction __user *act,
++	      struct old_sigaction __user *oact)
 +{
-+	*fpregs = init_fpuregs.hard;
++	struct k_sigaction new_ka, old_ka;
++	int ret;
++
++	if (act) {
++		old_sigset_t mask;
++		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
++		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
++		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
++			return -EFAULT;
++		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
++		__get_user(mask, &act->sa_mask);
++		siginitset(&new_ka.sa.sa_mask, mask);
++	}
++
++	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
++
++	if (!ret && oact) {
++		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
++		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
++		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
++			return -EFAULT;
++		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
++		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
++	}
++
++	return ret;
 +}
 +
-+asmlinkage void
-+do_fpu_error(unsigned long ex, struct pt_regs *regs)
++asmlinkage int
++sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
++	        unsigned long r4, unsigned long r5, unsigned long r6,
++	        unsigned long r7,
++	        struct pt_regs * regs)
 +{
-+	struct task_struct *tsk = current;
++	return do_sigaltstack(uss, uoss, REF_REG_SP);
++}
 +
-+	regs->pc += 4;
 +
-+	tsk->thread.trap_no = 11;
-+	tsk->thread.error_code = 0;
-+	force_sig(SIGFPE, tsk);
-+}
++/*
++ * Do a signal return; undo the signal stack.
++ */
 +
++struct sigframe
++{
++	struct sigcontext sc;
++	unsigned long extramask[_NSIG_WORDS-1];
++	long long retcode[2];
++};
 +
-+asmlinkage void
-+do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
++struct rt_sigframe
 +{
-+	void die(const char *str, struct pt_regs *regs, long err);
++	struct siginfo __user *pinfo;
++	void *puc;
++	struct siginfo info;
++	struct ucontext uc;
++	long long retcode[2];
++};
 +
-+	if (! user_mode(regs))
-+		die("FPU used in kernel", regs, ex);
++#ifdef CONFIG_SH_FPU
++static inline int
++restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
++{
++	int err = 0;
++	int fpvalid;
 +
-+	regs->sr &= ~SR_FD;
++	err |= __get_user (fpvalid, &sc->sc_fpvalid);
++	conditional_used_math(fpvalid);
++	if (! fpvalid)
++		return err;
 +
-+	if (last_task_used_math == current)
-+		return;
++	if (current == last_task_used_math) {
++		last_task_used_math = NULL;
++		regs->sr |= SR_FD;
++	}
 +
-+	enable_fpu();
-+	if (last_task_used_math != NULL)
-+		/* Other processes fpu state, save away */
-+		save_fpu(last_task_used_math, regs);
++	err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
++				(sizeof(long long) * 32) + (sizeof(int) * 1));
 +
-+        last_task_used_math = current;
-+        if (used_math()) {
-+                fpload(&current->thread.fpu.hard);
-+        } else {
-+		/* First time FPU user.  */
-+		fpload(&init_fpuregs.hard);
-+                set_used_math();
-+        }
-+	disable_fpu();
++	return err;
 +}
-diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
-new file mode 100644
-index 0000000..15d167f
---- /dev/null
-+++ b/arch/sh/kernel/cpu/sh5/probe.c
-@@ -0,0 +1,76 @@
-+/*
-+ * arch/sh/kernel/cpu/sh5/probe.c
-+ *
-+ * CPU Subtype Probing for SH-5.
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003 - 2007  Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/init.h>
-+#include <linux/io.h>
-+#include <linux/string.h>
-+#include <asm/processor.h>
-+#include <asm/cache.h>
 +
-+int __init detect_cpu_and_cache_system(void)
++static inline int
++setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
 +{
-+	unsigned long long cir;
-+
-+	/* Do peeks in real mode to avoid having to set up a mapping for the
-+	   WPC registers. On SH5-101 cut2, such a mapping would be exposed to
-+	   an address translation erratum which would make it hard to set up
-+	   correctly. */
-+	cir = peek_real_address_q(0x0d000008);
-+	if ((cir & 0xffff) == 0x5103) {
-+		boot_cpu_data.type = CPU_SH5_103;
-+	} else if (((cir >> 32) & 0xffff) == 0x51e2) {
-+		/* CPU.VCR aliased at CIR address on SH5-101 */
-+		boot_cpu_data.type = CPU_SH5_101;
-+	} else {
-+		boot_cpu_data.type = CPU_SH_NONE;
-+	}
++	int err = 0;
++	int fpvalid;
 +
-+	/*
-+	 * First, setup some sane values for the I-cache.
-+	 */
-+	boot_cpu_data.icache.ways		= 4;
-+	boot_cpu_data.icache.sets		= 256;
-+	boot_cpu_data.icache.linesz		= L1_CACHE_BYTES;
++	fpvalid = !!used_math();
++	err |= __put_user(fpvalid, &sc->sc_fpvalid);
++	if (! fpvalid)
++		return err;
 +
-+#if 0
-+	/*
-+	 * FIXME: This can probably be cleaned up a bit as well.. for example,
-+	 * do we really need the way shift _and_ the way_step_shift ?? Judging
-+	 * by the existing code, I would guess no.. is there any valid reason
-+	 * why we need to be tracking this around?
-+	 */
-+	boot_cpu_data.icache.way_shift		= 13;
-+	boot_cpu_data.icache.entry_shift	= 5;
-+	boot_cpu_data.icache.set_shift		= 4;
-+	boot_cpu_data.icache.way_step_shift	= 16;
-+	boot_cpu_data.icache.asid_shift		= 2;
++	if (current == last_task_used_math) {
++		enable_fpu();
++		save_fpu(current, regs);
++		disable_fpu();
++		last_task_used_math = NULL;
++		regs->sr |= SR_FD;
++	}
 +
-+	/*
-+	 * way offset = cache size / associativity, so just don't factor in
-+	 * associativity in the first place..
-+	 */
-+	boot_cpu_data.icache.way_ofs	= boot_cpu_data.icache.sets *
-+					  boot_cpu_data.icache.linesz;
++	err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
++			      (sizeof(long long) * 32) + (sizeof(int) * 1));
++	clear_used_math();
 +
-+	boot_cpu_data.icache.asid_mask		= 0x3fc;
-+	boot_cpu_data.icache.idx_mask		= 0x1fe0;
-+	boot_cpu_data.icache.epn_mask		= 0xffffe000;
++	return err;
++}
++#else
++static inline int
++restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
++{
++	return 0;
++}
++static inline int
++setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
++{
++	return 0;
++}
 +#endif
 +
-+	boot_cpu_data.icache.flags		= 0;
++static int
++restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
++{
++	unsigned int err = 0;
++        unsigned long long current_sr, new_sr;
++#define SR_MASK 0xffff8cfd
 +
-+	/* A trivial starting point.. */
-+	memcpy(&boot_cpu_data.dcache,
-+	       &boot_cpu_data.icache, sizeof(struct cache_info));
++#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
 +
-+	return 0;
-+}
-diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
-new file mode 100644
-index 0000000..45c351b
---- /dev/null
-+++ b/arch/sh/kernel/cpu/sh5/switchto.S
-@@ -0,0 +1,198 @@
-+/*
-+ * arch/sh/kernel/cpu/sh5/switchto.S
-+ *
-+ * sh64 context switch
-+ *
-+ * Copyright (C) 2004  Richard Curnow
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+*/
++	COPY(regs[0]);	COPY(regs[1]);	COPY(regs[2]);	COPY(regs[3]);
++	COPY(regs[4]);	COPY(regs[5]);	COPY(regs[6]);	COPY(regs[7]);
++	COPY(regs[8]);	COPY(regs[9]);  COPY(regs[10]);	COPY(regs[11]);
++	COPY(regs[12]);	COPY(regs[13]);	COPY(regs[14]);	COPY(regs[15]);
++	COPY(regs[16]);	COPY(regs[17]);	COPY(regs[18]);	COPY(regs[19]);
++	COPY(regs[20]);	COPY(regs[21]);	COPY(regs[22]);	COPY(regs[23]);
++	COPY(regs[24]);	COPY(regs[25]);	COPY(regs[26]);	COPY(regs[27]);
++	COPY(regs[28]);	COPY(regs[29]);	COPY(regs[30]);	COPY(regs[31]);
++	COPY(regs[32]);	COPY(regs[33]);	COPY(regs[34]);	COPY(regs[35]);
++	COPY(regs[36]);	COPY(regs[37]);	COPY(regs[38]);	COPY(regs[39]);
++	COPY(regs[40]);	COPY(regs[41]);	COPY(regs[42]);	COPY(regs[43]);
++	COPY(regs[44]);	COPY(regs[45]);	COPY(regs[46]);	COPY(regs[47]);
++	COPY(regs[48]);	COPY(regs[49]);	COPY(regs[50]);	COPY(regs[51]);
++	COPY(regs[52]);	COPY(regs[53]);	COPY(regs[54]);	COPY(regs[55]);
++	COPY(regs[56]);	COPY(regs[57]);	COPY(regs[58]);	COPY(regs[59]);
++	COPY(regs[60]);	COPY(regs[61]);	COPY(regs[62]);
++	COPY(tregs[0]);	COPY(tregs[1]);	COPY(tregs[2]);	COPY(tregs[3]);
++	COPY(tregs[4]);	COPY(tregs[5]);	COPY(tregs[6]);	COPY(tregs[7]);
 +
-+	.section .text..SHmedia32,"ax"
-+	.little
++        /* Prevent the signal handler manipulating SR in a way that can
++           crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
++           modified */
++        current_sr = regs->sr;
++        err |= __get_user(new_sr, &sc->sc_sr);
++        regs->sr &= SR_MASK;
++        regs->sr |= (new_sr & ~SR_MASK);
 +
-+	.balign 32
++	COPY(pc);
 +
-+	.type sh64_switch_to, at function
-+	.global sh64_switch_to
-+	.global __sh64_switch_to_end
-+sh64_switch_to:
++#undef COPY
 +
-+/* Incoming args
-+   r2 - prev
-+   r3 - &prev->thread
-+   r4 - next
-+   r5 - &next->thread
++	/* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
++	 * has been restored above.) */
++	err |= restore_sigcontext_fpu(regs, sc);
 +
-+   Outgoing results
-+   r2 - last (=prev) : this just stays in r2 throughout
++	regs->syscall_nr = -1;		/* disable syscall checks */
++	err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
++	return err;
++}
 +
-+   Want to create a full (struct pt_regs) on the stack to allow backtracing
-+   functions to work.  However, we only need to populate the callee-save
-+   register slots in this structure; since we're a function our ancestors must
-+   have themselves preserved all caller saved state in the stack.  This saves
-+   some wasted effort since we won't need to look at the values.
++asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
++				   unsigned long r4, unsigned long r5,
++				   unsigned long r6, unsigned long r7,
++				   struct pt_regs * regs)
++{
++	struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
++	sigset_t set;
++	long long ret;
 +
-+   In particular, all caller-save registers are immediately available for
-+   scratch use.
++	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
++		goto badframe;
 +
-+*/
++	if (__get_user(set.sig[0], &frame->sc.oldmask)
++	    || (_NSIG_WORDS > 1
++		&& __copy_from_user(&set.sig[1], &frame->extramask,
++				    sizeof(frame->extramask))))
++		goto badframe;
 +
-+#define FRAME_SIZE (76*8 + 8)
++	sigdelsetmask(&set, ~_BLOCKABLE);
 +
-+	movi	FRAME_SIZE, r0
-+	sub.l	r15, r0, r15
-+	! Do normal-style register save to support backtrace
++	spin_lock_irq(&current->sighand->siglock);
++	current->blocked = set;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
 +
-+	st.l	r15,   0, r18	! save link reg
-+	st.l	r15,   4, r14	! save fp
-+	add.l	r15, r63, r14	! setup frame pointer
++	if (restore_sigcontext(regs, &frame->sc, &ret))
++		goto badframe;
++	regs->pc -= 4;
 +
-+	! hopefully this looks normal to the backtrace now.
++	return (int) ret;
 +
-+	addi.l	r15,   8, r1    ! base of pt_regs
-+	addi.l	r1,   24, r0    ! base of pt_regs.regs
-+	addi.l	r0, (63*8), r8	! base of pt_regs.trregs
++badframe:
++	force_sig(SIGSEGV, current);
++	return 0;
++}
 +
-+	/* Note : to be fixed?
-+	   struct pt_regs is really designed for holding the state on entry
-+	   to an exception, i.e. pc,sr,regs etc.  However, for the context
-+	   switch state, some of this is not required.  But the unwinder takes
-+	   struct pt_regs * as an arg so we have to build this structure
-+	   to allow unwinding switched tasks in show_state() */
++asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
++				unsigned long r4, unsigned long r5,
++				unsigned long r6, unsigned long r7,
++				struct pt_regs * regs)
++{
++	struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
++	sigset_t set;
++	stack_t __user st;
++	long long ret;
 +
-+	st.q	r0, ( 9*8), r9
-+	st.q	r0, (10*8), r10
-+	st.q	r0, (11*8), r11
-+	st.q	r0, (12*8), r12
-+	st.q	r0, (13*8), r13
-+	st.q	r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
-+	! the point where the process is left in suspended animation, i.e. current
-+	! fp here, not the saved one.
-+	st.q	r0, (16*8), r16
++	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
++		goto badframe;
 +
-+	st.q	r0, (24*8), r24
-+	st.q	r0, (25*8), r25
-+	st.q	r0, (26*8), r26
-+	st.q	r0, (27*8), r27
-+	st.q	r0, (28*8), r28
-+	st.q	r0, (29*8), r29
-+	st.q	r0, (30*8), r30
-+	st.q	r0, (31*8), r31
-+	st.q	r0, (32*8), r32
-+	st.q	r0, (33*8), r33
-+	st.q	r0, (34*8), r34
-+	st.q	r0, (35*8), r35
++	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
++		goto badframe;
 +
-+	st.q	r0, (44*8), r44
-+	st.q	r0, (45*8), r45
-+	st.q	r0, (46*8), r46
-+	st.q	r0, (47*8), r47
-+	st.q	r0, (48*8), r48
-+	st.q	r0, (49*8), r49
-+	st.q	r0, (50*8), r50
-+	st.q	r0, (51*8), r51
-+	st.q	r0, (52*8), r52
-+	st.q	r0, (53*8), r53
-+	st.q	r0, (54*8), r54
-+	st.q	r0, (55*8), r55
-+	st.q	r0, (56*8), r56
-+	st.q	r0, (57*8), r57
-+	st.q	r0, (58*8), r58
-+	st.q	r0, (59*8), r59
++	sigdelsetmask(&set, ~_BLOCKABLE);
++	spin_lock_irq(&current->sighand->siglock);
++	current->blocked = set;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
 +
-+	! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
-+	! Use a local label to avoid creating a symbol that will confuse the !
-+	! backtrace
-+	pta	.Lsave_pc, tr0
++	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
++		goto badframe;
++	regs->pc -= 4;
 +
-+	gettr	tr5, r45
-+	gettr	tr6, r46
-+	gettr	tr7, r47
-+	st.q	r8, (5*8), r45
-+	st.q	r8, (6*8), r46
-+	st.q	r8, (7*8), r47
++	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
++		goto badframe;
++	/* It is more difficult to avoid calling this function than to
++	   call it and ignore errors.  */
++	do_sigaltstack(&st, NULL, REF_REG_SP);
 +
-+	! Now switch context
-+	gettr	tr0, r9
-+	st.l	r3, 0, r15	! prev->thread.sp
-+	st.l	r3, 8, r1	! prev->thread.kregs
-+	st.l	r3, 4, r9	! prev->thread.pc
-+	st.q	r1, 0, r9	! save prev->thread.pc into pt_regs->pc
++	return (int) ret;
 +
-+	! Load PC for next task (init value or save_pc later)
-+	ld.l	r5, 4, r18	! next->thread.pc
-+	! Switch stacks
-+	ld.l	r5, 0, r15	! next->thread.sp
-+	ptabs	r18, tr0
++badframe:
++	force_sig(SIGSEGV, current);
++	return 0;
++}
 +
-+	! Update current
-+	ld.l	r4, 4, r9	! next->thread_info (2nd element of next task_struct)
-+	putcon	r9, kcr0	! current = next->thread_info
++/*
++ * Set up a signal frame.
++ */
 +
-+	! go to save_pc for a reschedule, or the initial thread.pc for a new process
-+	blink	tr0, r63
++static int
++setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
++		 unsigned long mask)
++{
++	int err = 0;
 +
-+	! Restore (when we come back to a previously saved task)
-+.Lsave_pc:
-+	addi.l	r15, 32, r0	! r0 = next's regs
-+	addi.l	r0, (63*8), r8	! r8 = next's tr_regs
++	/* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
++	err |= setup_sigcontext_fpu(regs, sc);
 +
-+	ld.q	r8, (5*8), r45
-+	ld.q	r8, (6*8), r46
-+	ld.q	r8, (7*8), r47
-+	ptabs	r45, tr5
-+	ptabs	r46, tr6
-+	ptabs	r47, tr7
++#define COPY(x)		err |= __put_user(regs->x, &sc->sc_##x)
 +
-+	ld.q	r0, ( 9*8), r9
-+	ld.q	r0, (10*8), r10
-+	ld.q	r0, (11*8), r11
-+	ld.q	r0, (12*8), r12
-+	ld.q	r0, (13*8), r13
-+	ld.q	r0, (14*8), r14
-+	ld.q	r0, (16*8), r16
++	COPY(regs[0]);	COPY(regs[1]);	COPY(regs[2]);	COPY(regs[3]);
++	COPY(regs[4]);	COPY(regs[5]);	COPY(regs[6]);	COPY(regs[7]);
++	COPY(regs[8]);	COPY(regs[9]);	COPY(regs[10]);	COPY(regs[11]);
++	COPY(regs[12]);	COPY(regs[13]);	COPY(regs[14]);	COPY(regs[15]);
++	COPY(regs[16]);	COPY(regs[17]);	COPY(regs[18]);	COPY(regs[19]);
++	COPY(regs[20]);	COPY(regs[21]);	COPY(regs[22]);	COPY(regs[23]);
++	COPY(regs[24]);	COPY(regs[25]);	COPY(regs[26]);	COPY(regs[27]);
++	COPY(regs[28]);	COPY(regs[29]);	COPY(regs[30]);	COPY(regs[31]);
++	COPY(regs[32]);	COPY(regs[33]);	COPY(regs[34]);	COPY(regs[35]);
++	COPY(regs[36]);	COPY(regs[37]);	COPY(regs[38]);	COPY(regs[39]);
++	COPY(regs[40]);	COPY(regs[41]);	COPY(regs[42]);	COPY(regs[43]);
++	COPY(regs[44]);	COPY(regs[45]);	COPY(regs[46]);	COPY(regs[47]);
++	COPY(regs[48]);	COPY(regs[49]);	COPY(regs[50]);	COPY(regs[51]);
++	COPY(regs[52]);	COPY(regs[53]);	COPY(regs[54]);	COPY(regs[55]);
++	COPY(regs[56]);	COPY(regs[57]);	COPY(regs[58]);	COPY(regs[59]);
++	COPY(regs[60]);	COPY(regs[61]);	COPY(regs[62]);
++	COPY(tregs[0]);	COPY(tregs[1]);	COPY(tregs[2]);	COPY(tregs[3]);
++	COPY(tregs[4]);	COPY(tregs[5]);	COPY(tregs[6]);	COPY(tregs[7]);
++	COPY(sr);	COPY(pc);
 +
-+	ld.q	r0, (24*8), r24
-+	ld.q	r0, (25*8), r25
-+	ld.q	r0, (26*8), r26
-+	ld.q	r0, (27*8), r27
-+	ld.q	r0, (28*8), r28
-+	ld.q	r0, (29*8), r29
-+	ld.q	r0, (30*8), r30
-+	ld.q	r0, (31*8), r31
-+	ld.q	r0, (32*8), r32
-+	ld.q	r0, (33*8), r33
-+	ld.q	r0, (34*8), r34
-+	ld.q	r0, (35*8), r35
++#undef COPY
 +
-+	ld.q	r0, (44*8), r44
-+	ld.q	r0, (45*8), r45
-+	ld.q	r0, (46*8), r46
-+	ld.q	r0, (47*8), r47
-+	ld.q	r0, (48*8), r48
-+	ld.q	r0, (49*8), r49
-+	ld.q	r0, (50*8), r50
-+	ld.q	r0, (51*8), r51
-+	ld.q	r0, (52*8), r52
-+	ld.q	r0, (53*8), r53
-+	ld.q	r0, (54*8), r54
-+	ld.q	r0, (55*8), r55
-+	ld.q	r0, (56*8), r56
-+	ld.q	r0, (57*8), r57
-+	ld.q	r0, (58*8), r58
-+	ld.q	r0, (59*8), r59
++	err |= __put_user(mask, &sc->oldmask);
 +
-+	! epilogue
-+	ld.l	r15, 0, r18
-+	ld.l	r15, 4, r14
-+	ptabs	r18, tr0
-+	movi	FRAME_SIZE, r0
-+	add	r15, r0, r15
-+	blink	tr0, r63
-+__sh64_switch_to_end:
-+.LFE1:
-+	.size	sh64_switch_to,.LFE1-sh64_switch_to
++	return err;
++}
 +
-diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
-new file mode 100644
-index 0000000..119c20a
---- /dev/null
-+++ b/arch/sh/kernel/cpu/sh5/unwind.c
-@@ -0,0 +1,326 @@
 +/*
-+ * arch/sh/kernel/cpu/sh5/unwind.c
-+ *
-+ * Copyright (C) 2004  Paul Mundt
-+ * Copyright (C) 2004  Richard Curnow
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
++ * Determine which stack to use..
 + */
-+#include <linux/kallsyms.h>
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/errno.h>
-+#include <asm/page.h>
-+#include <asm/ptrace.h>
-+#include <asm/processor.h>
-+#include <asm/io.h>
++static inline void __user *
++get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
++{
++	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
++		sp = current->sas_ss_sp + current->sas_ss_size;
 +
-+static u8 regcache[63];
++	return (void __user *)((sp - frame_size) & -8ul);
++}
 +
-+/*
-+ * Finding the previous stack frame isn't horribly straightforward as it is
-+ * on some other platforms. In the sh64 case, we don't have "linked" stack
-+ * frames, so we need to do a bit of work to determine the previous frame,
-+ * and in turn, the previous r14/r18 pair.
-+ *
-+ * There are generally a few cases which determine where we can find out
-+ * the r14/r18 values. In the general case, this can be determined by poking
-+ * around the prologue of the symbol PC is in (note that we absolutely must
-+ * have frame pointer support as well as the kernel symbol table mapped,
-+ * otherwise we can't even get this far).
-+ *
-+ * In other cases, such as the interrupt/exception path, we can poke around
-+ * the sp/fp.
-+ *
-+ * Notably, this entire approach is somewhat error prone, and in the event
-+ * that the previous frame cannot be determined, that's all we can do.
-+ * Either way, this still leaves us with a more correct backtrace then what
-+ * we would be able to come up with by walking the stack (which is garbage
-+ * for anything beyond the first frame).
-+ *						-- PFM.
-+ */
-+static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
-+		      unsigned long *pprev_fp, unsigned long *pprev_pc,
-+		      struct pt_regs *regs)
++void sa_default_restorer(void);		/* See comments below */
++void sa_default_rt_restorer(void);	/* See comments below */
++
++static void setup_frame(int sig, struct k_sigaction *ka,
++			sigset_t *set, struct pt_regs *regs)
 +{
-+	const char *sym;
-+	char namebuf[128];
-+	unsigned long offset;
-+	unsigned long prologue = 0;
-+	unsigned long fp_displacement = 0;
-+	unsigned long fp_prev = 0;
-+	unsigned long offset_r14 = 0, offset_r18 = 0;
-+	int i, found_prologue_end = 0;
++	struct sigframe __user *frame;
++	int err = 0;
++	int signal;
 +
-+	sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
-+	if (!sym)
-+		return -EINVAL;
++	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
 +
-+	prologue = pc - offset;
-+	if (!prologue)
-+		return -EINVAL;
++	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
++		goto give_sigsegv;
 +
-+	/* Validate fp, to avoid risk of dereferencing a bad pointer later.
-+	   Assume 128Mb since that's the amount of RAM on a Cayman.  Modify
-+	   when there is an SH-5 board with more. */
-+	if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
-+	    (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
-+	    ((fp & 7) != 0)) {
-+		return -EINVAL;
-+	}
++	signal = current_thread_info()->exec_domain
++		&& current_thread_info()->exec_domain->signal_invmap
++		&& sig < 32
++		? current_thread_info()->exec_domain->signal_invmap[sig]
++		: sig;
 +
-+	/*
-+	 * Depth to walk, depth is completely arbitrary.
-+	 */
-+	for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
-+		unsigned long op;
-+		u8 major, minor;
-+		u8 src, dest, disp;
++	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
 +
-+		op = *(unsigned long *)prologue;
++	/* Give up earlier as i386, in case */
++	if (err)
++		goto give_sigsegv;
 +
-+		major = (op >> 26) & 0x3f;
-+		src   = (op >> 20) & 0x3f;
-+		minor = (op >> 16) & 0xf;
-+		disp  = (op >> 10) & 0x3f;
-+		dest  = (op >>  4) & 0x3f;
++	if (_NSIG_WORDS > 1) {
++		err |= __copy_to_user(frame->extramask, &set->sig[1],
++				      sizeof(frame->extramask)); }
++
++	/* Give up earlier as i386, in case */
++	if (err)
++		goto give_sigsegv;
++
++	/* Set up to return from userspace.  If provided, use a stub
++	   already in userspace.  */
++	if (ka->sa.sa_flags & SA_RESTORER) {
++		DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
 +
 +		/*
-+		 * Stack frame creation happens in a number of ways.. in the
-+		 * general case when the stack frame is less than 511 bytes,
-+		 * it's generally created by an addi or addi.l:
-+		 *
-+		 *	addi/addi.l r15, -FRAME_SIZE, r15
-+		 *
-+		 * in the event that the frame size is bigger than this, it's
-+		 * typically created using a movi/sub pair as follows:
-+		 *
-+		 *	movi	FRAME_SIZE, rX
-+		 *	sub	r15, rX, r15
++		 * On SH5 all edited pointers are subject to NEFF
++		 */
++		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
++        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
++	} else {
++		/*
++		 * Different approach on SH5.
++	         * . Endianness independent asm code gets placed in entry.S .
++		 *   This is limited to four ASM instructions corresponding
++		 *   to two long longs in size.
++		 * . err checking is done on the else branch only
++		 * . flush_icache_range() is called upon __put_user() only
++		 * . all edited pointers are subject to NEFF
++		 * . being code, linker turns ShMedia bit on, always
++		 *   dereference index -1.
 +		 */
++		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
++		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
++        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
 +
-+		switch (major) {
-+		case (0x00 >> 2):
-+			switch (minor) {
-+			case 0x8: /* add.l */
-+			case 0x9: /* add */
-+				/* Look for r15, r63, r14 */
-+				if (src == 15 && disp == 63 && dest == 14)
-+					found_prologue_end = 1;
++		if (__copy_to_user(frame->retcode,
++			(unsigned long long)sa_default_restorer & (~1), 16) != 0)
++			goto give_sigsegv;
 +
-+				break;
-+			case 0xa: /* sub.l */
-+			case 0xb: /* sub */
-+				if (src != 15 || dest != 15)
-+					continue;
++		/* Cohere the trampoline with the I-cache. */
++		flush_cache_sigtramp(DEREF_REG_PR-1);
++	}
 +
-+				fp_displacement -= regcache[disp];
-+				fp_prev = fp - fp_displacement;
-+				break;
-+			}
-+			break;
-+		case (0xa8 >> 2): /* st.l */
-+			if (src != 15)
-+				continue;
++	/*
++	 * Set up registers for signal handler.
++	 * All edited pointers are subject to NEFF.
++	 */
++	regs->regs[REG_SP] = (unsigned long) frame;
++	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
++        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
++	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
 +
-+			switch (dest) {
-+			case 14:
-+				if (offset_r14 || fp_displacement == 0)
-+					continue;
++        /* FIXME:
++           The glibc profiling support for SH-5 needs to be passed a sigcontext
++           so it can retrieve the PC.  At some point during 2003 the glibc
++           support was changed to receive the sigcontext through the 2nd
++           argument, but there are still versions of libc.so in use that use
++           the 3rd argument.  Until libc.so is stabilised, pass the sigcontext
++           through both 2nd and 3rd arguments.
++        */
 +
-+				offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
-+				offset_r14 *= sizeof(unsigned long);
-+				offset_r14 += fp_displacement;
-+				break;
-+			case 18:
-+				if (offset_r18 || fp_displacement == 0)
-+					continue;
++	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
++	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
 +
-+				offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
-+				offset_r18 *= sizeof(unsigned long);
-+				offset_r18 += fp_displacement;
-+				break;
-+			}
++	regs->pc = (unsigned long) ka->sa.sa_handler;
++	regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
 +
-+			break;
-+		case (0xcc >> 2): /* movi */
-+			if (dest >= 63) {
-+				printk(KERN_NOTICE "%s: Invalid dest reg %d "
-+				       "specified in movi handler. Failed "
-+				       "opcode was 0x%lx: ", __FUNCTION__,
-+				       dest, op);
++	set_fs(USER_DS);
 +
-+				continue;
-+			}
++#if DEBUG_SIG
++	/* Broken %016Lx */
++	printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
++		signal,
++		current->comm, current->pid, frame,
++		regs->pc >> 32, regs->pc & 0xffffffff,
++		DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
++#endif
 +
-+			/* Sign extend */
-+			regcache[dest] =
-+				((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
-+			break;
-+		case (0xd0 >> 2): /* addi */
-+		case (0xd4 >> 2): /* addi.l */
-+			/* Look for r15, -FRAME_SIZE, r15 */
-+			if (src != 15 || dest != 15)
-+				continue;
++	return;
 +
-+			/* Sign extended frame size.. */
-+			fp_displacement +=
-+				(u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
-+			fp_prev = fp - fp_displacement;
-+			break;
-+		}
++give_sigsegv:
++	force_sigsegv(sig, current);
++}
 +
-+		if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
-+			break;
-+	}
++static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
++			   sigset_t *set, struct pt_regs *regs)
++{
++	struct rt_sigframe __user *frame;
++	int err = 0;
++	int signal;
 +
-+	if (offset_r14 == 0 || fp_prev == 0) {
-+		if (!offset_r14)
-+			pr_debug("Unable to find r14 offset\n");
-+		if (!fp_prev)
-+			pr_debug("Unable to find previous fp\n");
++	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
 +
-+		return -EINVAL;
-+	}
++	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
++		goto give_sigsegv;
 +
-+	/* For innermost leaf function, there might not be a offset_r18 */
-+	if (!*pprev_pc && (offset_r18 == 0))
-+		return -EINVAL;
++	signal = current_thread_info()->exec_domain
++		&& current_thread_info()->exec_domain->signal_invmap
++		&& sig < 32
++		? current_thread_info()->exec_domain->signal_invmap[sig]
++		: sig;
 +
-+	*pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
++	err |= __put_user(&frame->info, &frame->pinfo);
++	err |= __put_user(&frame->uc, &frame->puc);
++	err |= copy_siginfo_to_user(&frame->info, info);
 +
-+	if (offset_r18)
-+		*pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
++	/* Give up earlier as i386, in case */
++	if (err)
++		goto give_sigsegv;
 +
-+	*pprev_pc &= ~1;
++	/* Create the ucontext.  */
++	err |= __put_user(0, &frame->uc.uc_flags);
++	err |= __put_user(0, &frame->uc.uc_link);
++	err |= __put_user((void *)current->sas_ss_sp,
++			  &frame->uc.uc_stack.ss_sp);
++	err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
++			  &frame->uc.uc_stack.ss_flags);
++	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
++	err |= setup_sigcontext(&frame->uc.uc_mcontext,
++			        regs, set->sig[0]);
++	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 +
-+	return 0;
-+}
++	/* Give up earlier as i386, in case */
++	if (err)
++		goto give_sigsegv;
 +
-+/* Don't put this on the stack since we'll want to call sh64_unwind
-+ * when we're close to underflowing the stack anyway. */
-+static struct pt_regs here_regs;
++	/* Set up to return from userspace.  If provided, use a stub
++	   already in userspace.  */
++	if (ka->sa.sa_flags & SA_RESTORER) {
++		DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
 +
-+extern const char syscall_ret;
-+extern const char ret_from_syscall;
-+extern const char ret_from_exception;
-+extern const char ret_from_irq;
++		/*
++		 * On SH5 all edited pointers are subject to NEFF
++		 */
++		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
++        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
++	} else {
++		/*
++		 * Different approach on SH5.
++	         * . Endianness independent asm code gets placed in entry.S .
++		 *   This is limited to four ASM instructions corresponding
++		 *   to two long longs in size.
++		 * . err checking is done on the else branch only
++		 * . flush_icache_range() is called upon __put_user() only
++		 * . all edited pointers are subject to NEFF
++		 * . being code, linker turns ShMedia bit on, always
++		 *   dereference index -1.
++		 */
 +
-+static void sh64_unwind_inner(struct pt_regs *regs);
++		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
++		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
++        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
 +
-+static void unwind_nested (unsigned long pc, unsigned long fp)
-+{
-+	if ((fp >= __MEMORY_START) &&
-+	    ((fp & 7) == 0)) {
-+		sh64_unwind_inner((struct pt_regs *) fp);
++		if (__copy_to_user(frame->retcode,
++			(unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
++			goto give_sigsegv;
++
++		flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
 +	}
-+}
 +
-+static void sh64_unwind_inner(struct pt_regs *regs)
-+{
-+	unsigned long pc, fp;
-+	int ofs = 0;
-+	int first_pass;
++	/*
++	 * Set up registers for signal handler.
++	 * All edited pointers are subject to NEFF.
++	 */
++	regs->regs[REG_SP] = (unsigned long) frame;
++	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
++        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
++	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
++	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
++	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
++	regs->pc = (unsigned long) ka->sa.sa_handler;
++	regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
 +
-+	pc = regs->pc & ~1;
-+	fp = regs->regs[14];
++	set_fs(USER_DS);
 +
-+	first_pass = 1;
-+	for (;;) {
-+		int cond;
-+		unsigned long next_fp, next_pc;
++#if DEBUG_SIG
++	/* Broken %016Lx */
++	printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
++		signal,
++		current->comm, current->pid, frame,
++		regs->pc >> 32, regs->pc & 0xffffffff,
++		DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
++#endif
 +
-+		if (pc == ((unsigned long) &syscall_ret & ~1)) {
-+			printk("SYSCALL\n");
-+			unwind_nested(pc,fp);
-+			return;
-+		}
++	return;
 +
-+		if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
-+			printk("SYSCALL (PREEMPTED)\n");
-+			unwind_nested(pc,fp);
-+			return;
-+		}
++give_sigsegv:
++	force_sigsegv(sig, current);
++}
 +
-+		/* In this case, the PC is discovered by lookup_prev_stack_frame but
-+		   it has 4 taken off it to look like the 'caller' */
-+		if (pc == ((unsigned long) &ret_from_exception & ~1)) {
-+			printk("EXCEPTION\n");
-+			unwind_nested(pc,fp);
-+			return;
-+		}
++/*
++ * OK, we're invoking a handler
++ */
 +
-+		if (pc == ((unsigned long) &ret_from_irq & ~1)) {
-+			printk("IRQ\n");
-+			unwind_nested(pc,fp);
-+			return;
++static void
++handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
++		sigset_t *oldset, struct pt_regs * regs)
++{
++	/* Are we from a system call? */
++	if (regs->syscall_nr >= 0) {
++		/* If so, check system call restarting.. */
++		switch (regs->regs[REG_RET]) {
++			case -ERESTART_RESTARTBLOCK:
++			case -ERESTARTNOHAND:
++				regs->regs[REG_RET] = -EINTR;
++				break;
++
++			case -ERESTARTSYS:
++				if (!(ka->sa.sa_flags & SA_RESTART)) {
++					regs->regs[REG_RET] = -EINTR;
++					break;
++				}
++			/* fallthrough */
++			case -ERESTARTNOINTR:
++				/* Decode syscall # */
++				regs->regs[REG_RET] = regs->syscall_nr;
++				regs->pc -= 4;
 +		}
++	}
 +
-+		cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
-+			((pc & 3) == 0) && ((fp & 7) == 0));
++	/* Set up the stack frame */
++	if (ka->sa.sa_flags & SA_SIGINFO)
++		setup_rt_frame(sig, ka, info, oldset, regs);
++	else
++		setup_frame(sig, ka, oldset, regs);
 +
-+		pc -= ofs;
++	spin_lock_irq(&current->sighand->siglock);
++	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
++	if (!(ka->sa.sa_flags & SA_NODEFER))
++		sigaddset(&current->blocked,sig);
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++}
 +
-+		printk("[<%08lx>] ", pc);
-+		print_symbol("%s\n", pc);
++/*
++ * Note that 'init' is a special process: it doesn't get signals it doesn't
++ * want to handle. Thus you cannot kill init even with a SIGKILL even by
++ * mistake.
++ *
++ * Note that we go through the signals twice: once to check the signals that
++ * the kernel can handle, and then we build all the user-level signal handling
++ * stack-frames in one go after that.
++ */
++int do_signal(struct pt_regs *regs, sigset_t *oldset)
++{
++	siginfo_t info;
++	int signr;
++	struct k_sigaction ka;
 +
-+		if (first_pass) {
-+			/* If the innermost frame is a leaf function, it's
-+			 * possible that r18 is never saved out to the stack.
-+			 */
-+			next_pc = regs->regs[18];
-+		} else {
-+			next_pc = 0;
-+		}
++	/*
++	 * We want the common case to go fast, which
++	 * is why we may in certain cases get here from
++	 * kernel mode. Just return without doing anything
++	 * if so.
++	 */
++	if (!user_mode(regs))
++		return 1;
 +
-+		if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
-+			ofs = sizeof(unsigned long);
-+			pc = next_pc & ~1;
-+			fp = next_fp;
-+		} else {
-+			printk("Unable to lookup previous stack frame\n");
-+			break;
-+		}
-+		first_pass = 0;
-+	}
++	if (try_to_freeze())
++		goto no_signal;
 +
-+	printk("\n");
++	if (test_thread_flag(TIF_RESTORE_SIGMASK))
++		oldset = &current->saved_sigmask;
++	else if (!oldset)
++		oldset = &current->blocked;
 +
-+}
++	signr = get_signal_to_deliver(&info, &ka, regs, 0);
++
++	if (signr > 0) {
++		/* Whee!  Actually deliver the signal.  */
++		handle_signal(signr, &info, &ka, oldset, regs);
 +
-+void sh64_unwind(struct pt_regs *regs)
-+{
-+	if (!regs) {
 +		/*
-+		 * Fetch current regs if we have no other saved state to back
-+		 * trace from.
++		 * If a signal was successfully delivered, the saved sigmask
++		 * is in its frame, and we can clear the TIF_RESTORE_SIGMASK
++		 * flag.
 +		 */
-+		regs = &here_regs;
++		if (test_thread_flag(TIF_RESTORE_SIGMASK))
++			clear_thread_flag(TIF_RESTORE_SIGMASK);
 +
-+		__asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
-+		__asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
-+		__asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
++		return 1;
++	}
 +
-+		__asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
-+		__asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
-+		__asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
-+		__asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
-+		__asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
-+		__asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
-+		__asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
-+		__asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
++no_signal:
++	/* Did we come from a system call? */
++	if (regs->syscall_nr >= 0) {
++		/* Restart the system call - no handlers present */
++		switch (regs->regs[REG_RET]) {
++		case -ERESTARTNOHAND:
++		case -ERESTARTSYS:
++		case -ERESTARTNOINTR:
++			/* Decode Syscall # */
++			regs->regs[REG_RET] = regs->syscall_nr;
++			regs->pc -= 4;
++			break;
 +
-+		__asm__ __volatile__ (
-+			"pta 0f, tr0\n\t"
-+			"blink tr0, %0\n\t"
-+			"0: nop"
-+			: "=r" (regs->pc)
-+		);
++		case -ERESTART_RESTARTBLOCK:
++			regs->regs[REG_RET] = __NR_restart_syscall;
++			regs->pc -= 4;
++			break;
++		}
 +	}
 +
-+	printk("\nCall Trace:\n");
-+	sh64_unwind_inner(regs);
++	/* No signal to deliver -- put the saved sigmask back */
++	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
++		clear_thread_flag(TIF_RESTORE_SIGMASK);
++		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
++	}
++
++	return 0;
 +}
+diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
+index d545a68..59cd285 100644
+--- a/arch/sh/kernel/sys_sh.c
++++ b/arch/sh/kernel/sys_sh.c
+@@ -7,7 +7,6 @@
+  *
+  * Taken from i386 version.
+  */
+-
+ #include <linux/errno.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
+@@ -27,28 +26,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+ 
+-/*
+- * sys_pipe() is the normal C calling standard for creating
+- * a pipe. It's not the way Unix traditionally does this, though.
+- */
+-asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
+-	unsigned long r6, unsigned long r7,
+-	struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	int fd[2];
+-	int error;
+-
+-	error = do_pipe(fd);
+-	if (!error) {
+-		regs->regs[1] = fd[1];
+-		return fd[0];
+-	}
+-	return error;
+-}
+-
+ unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
+-
+ EXPORT_SYMBOL(shm_align_mask);
+ 
+ #ifdef CONFIG_MMU
+@@ -140,7 +118,7 @@ full_search:
+ #endif /* CONFIG_MMU */
+ 
+ static inline long
+-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 
++do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
+ 	 unsigned long flags, int fd, unsigned long pgoff)
+ {
+ 	int error = -EBADF;
+@@ -195,12 +173,13 @@ asmlinkage int sys_ipc(uint call, int first, int second,
+ 	if (call <= SEMCTL)
+ 		switch (call) {
+ 		case SEMOP:
+-			return sys_semtimedop(first, (struct sembuf __user *)ptr,
++			return sys_semtimedop(first,
++					      (struct sembuf __user *)ptr,
+ 					      second, NULL);
+ 		case SEMTIMEDOP:
+-			return sys_semtimedop(first, (struct sembuf __user *)ptr,
+-					      second,
+-					      (const struct timespec __user *)fifth);
++			return sys_semtimedop(first,
++				(struct sembuf __user *)ptr, second,
++			        (const struct timespec __user *)fifth);
+ 		case SEMGET:
+ 			return sys_semget (first, second, third);
+ 		case SEMCTL: {
+@@ -215,25 +194,28 @@ asmlinkage int sys_ipc(uint call, int first, int second,
+ 			return -EINVAL;
+ 		}
+ 
+-	if (call <= MSGCTL) 
++	if (call <= MSGCTL)
+ 		switch (call) {
+ 		case MSGSND:
+-			return sys_msgsnd (first, (struct msgbuf __user *) ptr, 
++			return sys_msgsnd (first, (struct msgbuf __user *) ptr,
+ 					  second, third);
+ 		case MSGRCV:
+ 			switch (version) {
+-			case 0: {
++			case 0:
++			{
+ 				struct ipc_kludge tmp;
 +
-diff --git a/arch/sh/kernel/dump_task.c b/arch/sh/kernel/dump_task.c
+ 				if (!ptr)
+ 					return -EINVAL;
+-				
++
+ 				if (copy_from_user(&tmp,
+-						   (struct ipc_kludge __user *) ptr, 
++					(struct ipc_kludge __user *) ptr,
+ 						   sizeof (tmp)))
+ 					return -EFAULT;
++
+ 				return sys_msgrcv (first, tmp.msgp, second,
+ 						   tmp.msgtyp, third);
+-				}
++			}
+ 			default:
+ 				return sys_msgrcv (first,
+ 						   (struct msgbuf __user *) ptr,
+@@ -247,7 +229,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
+ 		default:
+ 			return -EINVAL;
+ 		}
+-	if (call <= SHMCTL) 
++	if (call <= SHMCTL)
+ 		switch (call) {
+ 		case SHMAT:
+ 			switch (version) {
+@@ -265,7 +247,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
+ 				return do_shmat (first, (char __user *) ptr,
+ 						  second, (ulong *) third);
+ 			}
+-		case SHMDT: 
++		case SHMDT:
+ 			return sys_shmdt ((char __user *)ptr);
+ 		case SHMGET:
+ 			return sys_shmget (first, second, third);
+@@ -275,7 +257,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
+ 		default:
+ 			return -EINVAL;
+ 		}
+-	
++
+ 	return -EINVAL;
+ }
+ 
+@@ -289,49 +271,3 @@ asmlinkage int sys_uname(struct old_utsname * name)
+ 	up_read(&uts_sem);
+ 	return err?-EFAULT:0;
+ }
+-
+-asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
+-			     size_t count, long dummy, loff_t pos)
+-{
+-	return sys_pread64(fd, buf, count, pos);
+-}
+-
+-asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
+-			      size_t count, long dummy, loff_t pos)
+-{
+-	return sys_pwrite64(fd, buf, count, pos);
+-}
+-
+-asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
+-				u32 len0, u32 len1, int advice)
+-{
+-#ifdef  __LITTLE_ENDIAN__
+-	return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
+-				(u64)len1 << 32 | len0,	advice);
+-#else
+-	return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
+-				(u64)len0 << 32 | len1,	advice);
+-#endif
+-}
+-
+-#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
+-#define SYSCALL_ARG3	"trapa #0x23"
+-#else
+-#define SYSCALL_ARG3	"trapa #0x13"
+-#endif
+-
+-/*
+- * Do a system call from kernel instead of calling sys_execve so we
+- * end up with proper pt_regs.
+- */
+-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+-{
+-	register long __sc0 __asm__ ("r3") = __NR_execve;
+-	register long __sc4 __asm__ ("r4") = (long) filename;
+-	register long __sc5 __asm__ ("r5") = (long) argv;
+-	register long __sc6 __asm__ ("r6") = (long) envp;
+-	__asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)	
+-			: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
+-			: "memory");
+-	return __sc0;
+-}
+diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
 new file mode 100644
-index 0000000..4a8a408
+index 0000000..125e493
 --- /dev/null
-+++ b/arch/sh/kernel/dump_task.c
-@@ -0,0 +1,31 @@
-+#include <linux/elfcore.h>
++++ b/arch/sh/kernel/sys_sh32.c
+@@ -0,0 +1,84 @@
++#include <linux/errno.h>
 +#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/sem.h>
++#include <linux/msg.h>
++#include <linux/shm.h>
++#include <linux/stat.h>
++#include <linux/syscalls.h>
++#include <linux/mman.h>
++#include <linux/file.h>
++#include <linux/utsname.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/ipc.h>
++#include <asm/cacheflush.h>
++#include <asm/uaccess.h>
++#include <asm/unistd.h>
 +
 +/*
-+ * Capture the user space registers if the task is not running (in user space)
++ * sys_pipe() is the normal C calling standard for creating
++ * a pipe. It's not the way Unix traditionally does this, though.
 + */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
++	unsigned long r6, unsigned long r7,
++	struct pt_regs __regs)
 +{
-+	struct pt_regs ptregs;
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	int fd[2];
++	int error;
 +
-+	ptregs = *task_pt_regs(tsk);
-+	elf_core_copy_regs(regs, &ptregs);
++	error = do_pipe(fd);
++	if (!error) {
++		regs->regs[1] = fd[1];
++		return fd[0];
++	}
++	return error;
++}
 +
-+	return 1;
++asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
++			     size_t count, long dummy, loff_t pos)
++{
++	return sys_pread64(fd, buf, count, pos);
 +}
 +
-+int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
++asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
++			      size_t count, long dummy, loff_t pos)
 +{
-+	int fpvalid = 0;
++	return sys_pwrite64(fd, buf, count, pos);
++}
 +
-+#if defined(CONFIG_SH_FPU)
-+	fpvalid = !!tsk_used_math(tsk);
-+	if (fpvalid) {
-+		unlazy_fpu(tsk, task_pt_regs(tsk));
-+		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
-+	}
++asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
++				u32 len0, u32 len1, int advice)
++{
++#ifdef  __LITTLE_ENDIAN__
++	return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
++				(u64)len1 << 32 | len0,	advice);
++#else
++	return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
++				(u64)len0 << 32 | len1,	advice);
 +#endif
-+
-+	return fpvalid;
 +}
 +
-diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
-index 2f30977..957f256 100644
---- a/arch/sh/kernel/early_printk.c
-+++ b/arch/sh/kernel/early_printk.c
-@@ -63,7 +63,8 @@ static struct console bios_console = {
- #include <linux/serial_core.h>
- #include "../../../drivers/serial/sh-sci.h"
- 
--#if defined(CONFIG_CPU_SUBTYPE_SH7720)
-+#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
-+    defined(CONFIG_CPU_SUBTYPE_SH7721)
- #define EPK_SCSMR_VALUE 0x000
- #define EPK_SCBRR_VALUE 0x00C
- #define EPK_FIFO_SIZE 64
-@@ -117,7 +118,8 @@ static struct console scif_console = {
- };
- 
- #if !defined(CONFIG_SH_STANDARD_BIOS)
--#if defined(CONFIG_CPU_SUBTYPE_SH7720)
-+#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
-+    defined(CONFIG_CPU_SUBTYPE_SH7721)
- static void scif_sercon_init(char *s)
- {
- 	sci_out(&scif_port, SCSCR, 0x0000);	/* clear TE and RE */
-@@ -208,10 +210,12 @@ static int __init setup_early_printk(char *buf)
- 	if (!strncmp(buf, "serial", 6)) {
- 		early_console = &scif_console;
- 
--#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720)) && \
--    !defined(CONFIG_SH_STANDARD_BIOS)
-+#if !defined(CONFIG_SH_STANDARD_BIOS)
-+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \
-+    defined(CONFIG_CPU_SUBTYPE_SH7721)
- 		scif_sercon_init(buf + 6);
- #endif
++#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
++#define SYSCALL_ARG3	"trapa #0x23"
++#else
++#define SYSCALL_ARG3	"trapa #0x13"
 +#endif
- 	}
- #endif
- 
-diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
-index e0317ed..926b2e7 100644
---- a/arch/sh/kernel/entry-common.S
-+++ b/arch/sh/kernel/entry-common.S
-@@ -176,25 +176,6 @@ work_notifysig:
- 	jmp	@r1
- 	 lds	r0, pr
- work_resched:
--#if defined(CONFIG_GUSA) && !defined(CONFIG_PREEMPT)
--	! gUSA handling
--	mov.l	@(OFF_SP,r15), r0	! get user space stack pointer
--	mov	r0, r1
--	shll	r0
--	bf/s	1f
--	 shll	r0
--	bf/s	1f
--	 mov	#OFF_PC, r0
--	! 				  SP >= 0xc0000000 : gUSA mark
--	mov.l	@(r0,r15), r2		! get user space PC (program counter)
--	mov.l	@(OFF_R0,r15), r3	! end point
--	cmp/hs	r3, r2			! r2 >= r3? 
--	bt	1f
--	add	r3, r1			! rewind point #2
--	mov.l	r1, @(r0,r15)		! reset PC to rewind point #2
--	!
--1:
--#endif
- 	mov.l	1f, r1
- 	jsr	@r1				! schedule
- 	 nop
-@@ -224,7 +205,7 @@ work_resched:
- syscall_exit_work:
- 	! r0: current_thread_info->flags
- 	! r8: current_thread_info
--	tst	#_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP, r0
-+	tst	#_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT, r0
- 	bt/s	work_pending
- 	 tst	#_TIF_NEED_RESCHED, r0
- #ifdef CONFIG_TRACE_IRQFLAGS
-@@ -234,6 +215,8 @@ syscall_exit_work:
- #endif
- 	sti
- 	! XXX setup arguments...
-+	mov	r15, r4
-+	mov	#1, r5
- 	mov.l	4f, r0			! do_syscall_trace
- 	jsr	@r0
- 	 nop
-@@ -244,6 +227,8 @@ syscall_exit_work:
- syscall_trace_entry:
- 	!                     	Yes it is traced.
- 	! XXX setup arguments...
-+	mov     r15, r4
-+	mov     #0, r5
- 	mov.l	4f, r11		! Call do_syscall_trace which notifies
- 	jsr	@r11	    	! superior (will chomp R[0-7])
- 	 nop
-@@ -366,7 +351,7 @@ ENTRY(system_call)
- 	!
- 	get_current_thread_info r8, r10
- 	mov.l	@(TI_FLAGS,r8), r8
--	mov	#_TIF_SYSCALL_TRACE, r10
-+	mov	#(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT), r10
- 	tst	r10, r8
- 	bf	syscall_trace_entry
- 	!
-diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
++
++/*
++ * Do a system call from kernel instead of calling sys_execve so we
++ * end up with proper pt_regs.
++ */
++int kernel_execve(const char *filename, char *const argv[], char *const envp[])
++{
++	register long __sc0 __asm__ ("r3") = __NR_execve;
++	register long __sc4 __asm__ ("r4") = (long) filename;
++	register long __sc5 __asm__ ("r5") = (long) argv;
++	register long __sc6 __asm__ ("r6") = (long) envp;
++	__asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
++			: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
++			: "memory");
++	return __sc0;
++}
+diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
+new file mode 100644
+index 0000000..578004d
+--- /dev/null
++++ b/arch/sh/kernel/sys_sh64.c
+@@ -0,0 +1,66 @@
++/*
++ * arch/sh/kernel/sys_sh64.c
++ *
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ *
++ * This file contains various random system calls that
++ * have a non-standard calling sequence on the Linux/SH5
++ * platform.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/errno.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/smp.h>
++#include <linux/sem.h>
++#include <linux/msg.h>
++#include <linux/shm.h>
++#include <linux/stat.h>
++#include <linux/mman.h>
++#include <linux/file.h>
++#include <linux/utsname.h>
++#include <linux/syscalls.h>
++#include <linux/ipc.h>
++#include <asm/uaccess.h>
++#include <asm/ptrace.h>
++#include <asm/unistd.h>
++
++/*
++ * sys_pipe() is the normal C calling standard for creating
++ * a pipe. It's not the way Unix traditionally does this, though.
++ */
++asmlinkage int sys_pipe(unsigned long * fildes)
++{
++        int fd[2];
++        int error;
++
++        error = do_pipe(fd);
++        if (!error) {
++                if (copy_to_user(fildes, fd, 2*sizeof(int)))
++                        error = -EFAULT;
++        }
++        return error;
++}
++
++/*
++ * Do a system call from kernel instead of calling sys_execve so we
++ * end up with proper pt_regs.
++ */
++int kernel_execve(const char *filename, char *const argv[], char *const envp[])
++{
++	register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
++	register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
++	register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
++	register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
++	__asm__ __volatile__ ("trapa	%1 !\t\t\t execve(%2,%3,%4)"
++	: "=r" (__sc0)
++	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
++	__asm__ __volatile__ ("!dummy	%0 %1 %2 %3"
++	: : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
++	return __sc0;
++}
+diff --git a/arch/sh/kernel/syscalls.S b/arch/sh/kernel/syscalls.S
 deleted file mode 100644
-index 3338239..0000000
---- a/arch/sh/kernel/head.S
+index 10bec45..0000000
+--- a/arch/sh/kernel/syscalls.S
 +++ /dev/null
-@@ -1,120 +0,0 @@
--/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
+@@ -1,343 +0,0 @@
+-/*
+- * arch/sh/kernel/syscalls.S
 - *
-- *  arch/sh/kernel/head.S
+- * System call table for SuperH
 - *
-- *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+- *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
+- *  Copyright (C) 2003  Paul Mundt
 - *
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * Head.S contains the SH exception handlers and startup code.
 - */
+-#include <linux/sys.h>
 -#include <linux/linkage.h>
--#include <asm/thread_info.h>
--
--#ifdef CONFIG_CPU_SH4A
--#define SYNCO()		synco
--
--#define PREFI(label, reg)	\
--	mov.l	label, reg;	\
--	prefi	@reg
--#else
--#define SYNCO()
--#define PREFI(label, reg)
--#endif
--
--	.section	.empty_zero_page, "aw"
--ENTRY(empty_zero_page)
--	.long	1		/* MOUNT_ROOT_RDONLY */
--	.long	0		/* RAMDISK_FLAGS */
--	.long	0x0200		/* ORIG_ROOT_DEV */
--	.long	1		/* LOADER_TYPE */
--	.long	0x00360000	/* INITRD_START */
--	.long	0x000a0000	/* INITRD_SIZE */
--	.long	0
--1:
--	.skip	PAGE_SIZE - empty_zero_page - 1b
--
--	.section	.text.head, "ax"
--
--/*
-- * Condition at the entry of _stext:
-- *
-- *   BSC has already been initialized.
-- *   INTC may or may not be initialized.
-- *   VBR may or may not be initialized.
-- *   MMU may or may not be initialized.
-- *   Cache may or may not be initialized.
-- *   Hardware (including on-chip modules) may or may not be initialized. 
-- *
-- */
--ENTRY(_stext)
--	!			Initialize Status Register
--	mov.l	1f, r0		! MD=1, RB=0, BL=0, IMASK=0xF
--	ldc	r0, sr
--	!			Initialize global interrupt mask
--#ifdef CONFIG_CPU_HAS_SR_RB
--	mov	#0, r0
--	ldc	r0, r6_bank
--#endif
--	
--	/*
--	 * Prefetch if possible to reduce cache miss penalty.
--	 *
--	 * We do this early on for SH-4A as a micro-optimization,
--	 * as later on we will have speculative execution enabled
--	 * and this will become less of an issue.
--	 */
--	PREFI(5f, r0)
--	PREFI(6f, r0)
--
--	!
--	mov.l	2f, r0
--	mov	r0, r15		! Set initial r15 (stack pointer)
--#ifdef CONFIG_CPU_HAS_SR_RB
--	mov.l	7f, r0
--	ldc	r0, r7_bank	! ... and initial thread_info
--#endif
--	
--	!			Clear BSS area
--#ifdef CONFIG_SMP	
--	mov.l	3f, r0
--	cmp/eq	#0, r0		! skip clear if set to zero
--	bt	10f
--#endif
--	
--	mov.l	3f, r1
--	add	#4, r1
--	mov.l	4f, r2
--	mov	#0, r0
--9:	cmp/hs	r2, r1
--	bf/s	9b		! while (r1 < r2)
--	 mov.l	r0, at -r2
 -
--10:		
--	!			Additional CPU initialization
--	mov.l	6f, r0
--	jsr	@r0
--	 nop
--
--	SYNCO()			! Wait for pending instructions..
--	
--	!			Start kernel
--	mov.l	5f, r0
--	jmp	@r0
--	 nop
--
--	.balign 4
--#if defined(CONFIG_CPU_SH2)
--1:	.long	0x000000F0		! IMASK=0xF
--#else
--1:	.long	0x400080F0		! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
--#endif
--ENTRY(stack_start)
--2:	.long	init_thread_union+THREAD_SIZE
--3:	.long	__bss_start
--4:	.long	_end
--5:	.long	start_kernel
--6:	.long	sh_cpu_init
--7:	.long	init_thread_union
-diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
+-	.data
+-ENTRY(sys_call_table)
+-	.long sys_restart_syscall	/* 0  -  old "setup()" system call*/
+-	.long sys_exit
+-	.long sys_fork
+-	.long sys_read
+-	.long sys_write
+-	.long sys_open		/* 5 */
+-	.long sys_close
+-	.long sys_waitpid
+-	.long sys_creat
+-	.long sys_link
+-	.long sys_unlink		/* 10 */
+-	.long sys_execve
+-	.long sys_chdir
+-	.long sys_time
+-	.long sys_mknod
+-	.long sys_chmod		/* 15 */
+-	.long sys_lchown16
+-	.long sys_ni_syscall	/* old break syscall holder */
+-	.long sys_stat
+-	.long sys_lseek
+-	.long sys_getpid		/* 20 */
+-	.long sys_mount
+-	.long sys_oldumount
+-	.long sys_setuid16
+-	.long sys_getuid16
+-	.long sys_stime		/* 25 */
+-	.long sys_ptrace
+-	.long sys_alarm
+-	.long sys_fstat
+-	.long sys_pause
+-	.long sys_utime		/* 30 */
+-	.long sys_ni_syscall	/* old stty syscall holder */
+-	.long sys_ni_syscall	/* old gtty syscall holder */
+-	.long sys_access
+-	.long sys_nice
+-	.long sys_ni_syscall	/* 35 */		/* old ftime syscall holder */
+-	.long sys_sync
+-	.long sys_kill
+-	.long sys_rename
+-	.long sys_mkdir
+-	.long sys_rmdir		/* 40 */
+-	.long sys_dup
+-	.long sys_pipe
+-	.long sys_times
+-	.long sys_ni_syscall	/* old prof syscall holder */
+-	.long sys_brk		/* 45 */
+-	.long sys_setgid16
+-	.long sys_getgid16
+-	.long sys_signal
+-	.long sys_geteuid16
+-	.long sys_getegid16	/* 50 */
+-	.long sys_acct
+-	.long sys_umount		/* recycled never used phys() */
+-	.long sys_ni_syscall	/* old lock syscall holder */
+-	.long sys_ioctl
+-	.long sys_fcntl		/* 55 */
+-	.long sys_ni_syscall	/* old mpx syscall holder */
+-	.long sys_setpgid
+-	.long sys_ni_syscall	/* old ulimit syscall holder */
+-	.long sys_ni_syscall	/* sys_olduname */
+-	.long sys_umask		/* 60 */
+-	.long sys_chroot
+-	.long sys_ustat
+-	.long sys_dup2
+-	.long sys_getppid
+-	.long sys_getpgrp		/* 65 */
+-	.long sys_setsid
+-	.long sys_sigaction
+-	.long sys_sgetmask
+-	.long sys_ssetmask
+-	.long sys_setreuid16	/* 70 */
+-	.long sys_setregid16
+-	.long sys_sigsuspend
+-	.long sys_sigpending
+-	.long sys_sethostname
+-	.long sys_setrlimit	/* 75 */
+-	.long sys_old_getrlimit
+-	.long sys_getrusage
+-	.long sys_gettimeofday
+-	.long sys_settimeofday
+-	.long sys_getgroups16	/* 80 */
+-	.long sys_setgroups16
+-	.long sys_ni_syscall	/* sys_oldselect */
+-	.long sys_symlink
+-	.long sys_lstat
+-	.long sys_readlink		/* 85 */
+-	.long sys_uselib
+-	.long sys_swapon
+-	.long sys_reboot
+-	.long old_readdir
+-	.long old_mmap		/* 90 */
+-	.long sys_munmap
+-	.long sys_truncate
+-	.long sys_ftruncate
+-	.long sys_fchmod
+-	.long sys_fchown16		/* 95 */
+-	.long sys_getpriority
+-	.long sys_setpriority
+-	.long sys_ni_syscall	/* old profil syscall holder */
+-	.long sys_statfs
+-	.long sys_fstatfs		/* 100 */
+-	.long sys_ni_syscall	/* ioperm */
+-	.long sys_socketcall
+-	.long sys_syslog
+-	.long sys_setitimer
+-	.long sys_getitimer	/* 105 */
+-	.long sys_newstat
+-	.long sys_newlstat
+-	.long sys_newfstat
+-	.long sys_uname
+-	.long sys_ni_syscall	/* 110 */ /* iopl */
+-	.long sys_vhangup
+-	.long sys_ni_syscall	/* idle */
+-	.long sys_ni_syscall	/* vm86old */
+-	.long sys_wait4
+-	.long sys_swapoff		/* 115 */
+-	.long sys_sysinfo
+-	.long sys_ipc
+-	.long sys_fsync
+-	.long sys_sigreturn
+-	.long sys_clone		/* 120 */
+-	.long sys_setdomainname
+-	.long sys_newuname
+-	.long sys_ni_syscall	/* sys_modify_ldt */
+-	.long sys_adjtimex
+-	.long sys_mprotect		/* 125 */
+-	.long sys_sigprocmask
+-	.long sys_ni_syscall	/* old "create_module" */
+-	.long sys_init_module
+-	.long sys_delete_module
+-	.long sys_ni_syscall	/* 130: old "get_kernel_syms" */
+-	.long sys_quotactl
+-	.long sys_getpgid
+-	.long sys_fchdir
+-	.long sys_bdflush
+-	.long sys_sysfs		/* 135 */
+-	.long sys_personality
+-	.long sys_ni_syscall	/* for afs_syscall */
+-	.long sys_setfsuid16
+-	.long sys_setfsgid16
+-	.long sys_llseek		/* 140 */
+-	.long sys_getdents
+-	.long sys_select
+-	.long sys_flock
+-	.long sys_msync
+-	.long sys_readv		/* 145 */
+-	.long sys_writev
+-	.long sys_getsid
+-	.long sys_fdatasync
+-	.long sys_sysctl
+-	.long sys_mlock		/* 150 */
+-	.long sys_munlock
+-	.long sys_mlockall
+-	.long sys_munlockall
+-	.long sys_sched_setparam
+-	.long sys_sched_getparam   /* 155 */
+-	.long sys_sched_setscheduler
+-	.long sys_sched_getscheduler
+-	.long sys_sched_yield
+-	.long sys_sched_get_priority_max
+-	.long sys_sched_get_priority_min  /* 160 */
+-	.long sys_sched_rr_get_interval
+-	.long sys_nanosleep
+-	.long sys_mremap
+-	.long sys_setresuid16
+-	.long sys_getresuid16	/* 165 */
+-	.long sys_ni_syscall	/* vm86 */
+-	.long sys_ni_syscall	/* old "query_module" */
+-	.long sys_poll
+-	.long sys_nfsservctl
+-	.long sys_setresgid16	/* 170 */
+-	.long sys_getresgid16
+-	.long sys_prctl
+-	.long sys_rt_sigreturn
+-	.long sys_rt_sigaction
+-	.long sys_rt_sigprocmask	/* 175 */
+-	.long sys_rt_sigpending
+-	.long sys_rt_sigtimedwait
+-	.long sys_rt_sigqueueinfo
+-	.long sys_rt_sigsuspend
+-	.long sys_pread_wrapper	   /* 180 */
+-	.long sys_pwrite_wrapper
+-	.long sys_chown16
+-	.long sys_getcwd
+-	.long sys_capget
+-	.long sys_capset           /* 185 */
+-	.long sys_sigaltstack
+-	.long sys_sendfile
+-	.long sys_ni_syscall	/* streams1 */
+-	.long sys_ni_syscall	/* streams2 */
+-	.long sys_vfork            /* 190 */
+-	.long sys_getrlimit
+-	.long sys_mmap2
+-	.long sys_truncate64
+-	.long sys_ftruncate64
+-	.long sys_stat64		/* 195 */
+-	.long sys_lstat64
+-	.long sys_fstat64
+-	.long sys_lchown
+-	.long sys_getuid
+-	.long sys_getgid		/* 200 */
+-	.long sys_geteuid
+-	.long sys_getegid
+-	.long sys_setreuid
+-	.long sys_setregid
+-	.long sys_getgroups	/* 205 */
+-	.long sys_setgroups
+-	.long sys_fchown
+-	.long sys_setresuid
+-	.long sys_getresuid
+-	.long sys_setresgid	/* 210 */
+-	.long sys_getresgid
+-	.long sys_chown
+-	.long sys_setuid
+-	.long sys_setgid
+-	.long sys_setfsuid		/* 215 */
+-	.long sys_setfsgid
+-	.long sys_pivot_root
+-	.long sys_mincore
+-	.long sys_madvise
+-	.long sys_getdents64	/* 220 */
+-	.long sys_fcntl64
+-	.long sys_ni_syscall	/* reserved for TUX */
+-	.long sys_ni_syscall	/* Reserved for Security */
+-	.long sys_gettid
+-	.long sys_readahead	/* 225 */
+-	.long sys_setxattr
+-	.long sys_lsetxattr
+-	.long sys_fsetxattr
+-	.long sys_getxattr
+-	.long sys_lgetxattr	/* 230 */
+-	.long sys_fgetxattr
+-	.long sys_listxattr
+-	.long sys_llistxattr
+-	.long sys_flistxattr
+-	.long sys_removexattr	/* 235 */
+-	.long sys_lremovexattr
+-	.long sys_fremovexattr
+-	.long sys_tkill
+-	.long sys_sendfile64
+-	.long sys_futex		/* 240 */
+-	.long sys_sched_setaffinity
+-	.long sys_sched_getaffinity
+-	.long sys_ni_syscall
+-	.long sys_ni_syscall
+-	.long sys_io_setup	/* 245 */
+-	.long sys_io_destroy
+-	.long sys_io_getevents
+-	.long sys_io_submit
+-	.long sys_io_cancel
+-	.long sys_fadvise64	/* 250 */
+-	.long sys_ni_syscall
+-	.long sys_exit_group
+-	.long sys_lookup_dcookie
+-	.long sys_epoll_create
+-	.long sys_epoll_ctl	/* 255 */
+-	.long sys_epoll_wait
+- 	.long sys_remap_file_pages
+- 	.long sys_set_tid_address
+- 	.long sys_timer_create
+- 	.long sys_timer_settime		/* 260 */
+- 	.long sys_timer_gettime
+- 	.long sys_timer_getoverrun
+- 	.long sys_timer_delete
+- 	.long sys_clock_settime
+- 	.long sys_clock_gettime		/* 265 */
+- 	.long sys_clock_getres
+- 	.long sys_clock_nanosleep
+-	.long sys_statfs64
+-	.long sys_fstatfs64
+-	.long sys_tgkill		/* 270 */
+-	.long sys_utimes
+- 	.long sys_fadvise64_64_wrapper
+-	.long sys_ni_syscall	/* Reserved for vserver */
+-	.long sys_mbind
+-	.long sys_get_mempolicy		/* 275 */
+-	.long sys_set_mempolicy
+-	.long sys_mq_open
+-	.long sys_mq_unlink
+-	.long sys_mq_timedsend
+-	.long sys_mq_timedreceive       /* 280 */
+-	.long sys_mq_notify
+-	.long sys_mq_getsetattr
+-	.long sys_kexec_load
+-	.long sys_waitid
+-	.long sys_add_key		/* 285 */
+-	.long sys_request_key
+-	.long sys_keyctl
+-	.long sys_ioprio_set
+-	.long sys_ioprio_get
+-	.long sys_inotify_init		/* 290 */
+-	.long sys_inotify_add_watch
+-	.long sys_inotify_rm_watch
+-	.long sys_ni_syscall
+-	.long sys_migrate_pages
+-	.long sys_openat		/* 295 */
+-	.long sys_mkdirat
+-	.long sys_mknodat
+-	.long sys_fchownat
+-	.long sys_futimesat
+-	.long sys_fstatat64		/* 300 */
+-	.long sys_unlinkat
+-	.long sys_renameat
+-	.long sys_linkat
+-	.long sys_symlinkat
+-	.long sys_readlinkat		/* 305 */
+-	.long sys_fchmodat
+-	.long sys_faccessat
+-	.long sys_pselect6
+-	.long sys_ppoll
+-	.long sys_unshare		/* 310 */
+-	.long sys_set_robust_list
+-	.long sys_get_robust_list
+-	.long sys_splice
+-	.long sys_sync_file_range
+-	.long sys_tee			/* 315 */
+-	.long sys_vmsplice
+-	.long sys_move_pages
+-	.long sys_getcpu
+-	.long sys_epoll_pwait
+-	.long sys_utimensat		/* 320 */
+-	.long sys_signalfd
+-	.long sys_timerfd
+-	.long sys_eventfd
+-	.long sys_fallocate
+diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
 new file mode 100644
-index 0000000..d67d7ed
+index 0000000..10bec45
 --- /dev/null
-+++ b/arch/sh/kernel/head_32.S
-@@ -0,0 +1,124 @@
-+/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
++++ b/arch/sh/kernel/syscalls_32.S
+@@ -0,0 +1,343 @@
++/*
++ * arch/sh/kernel/syscalls.S
 + *
-+ *  arch/sh/kernel/head.S
++ * System call table for SuperH
 + *
-+ *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
++ *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
++ *  Copyright (C) 2003  Paul Mundt
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + *
-+ * Head.S contains the SH exception handlers and startup code.
 + */
++#include <linux/sys.h>
 +#include <linux/linkage.h>
-+#include <asm/thread_info.h>
-+
-+#ifdef CONFIG_CPU_SH4A
-+#define SYNCO()		synco
-+
-+#define PREFI(label, reg)	\
-+	mov.l	label, reg;	\
-+	prefi	@reg
-+#else
-+#define SYNCO()
-+#define PREFI(label, reg)
-+#endif
-+
-+	.section	.empty_zero_page, "aw"
-+ENTRY(empty_zero_page)
-+	.long	1		/* MOUNT_ROOT_RDONLY */
-+	.long	0		/* RAMDISK_FLAGS */
-+	.long	0x0200		/* ORIG_ROOT_DEV */
-+	.long	1		/* LOADER_TYPE */
-+	.long	0x00360000	/* INITRD_START */
-+	.long	0x000a0000	/* INITRD_SIZE */
-+#ifdef CONFIG_32BIT
-+	.long	0x53453f00 + 32	/* "SE?" = 32 bit */
-+#else
-+	.long	0x53453f00 + 29	/* "SE?" = 29 bit */
-+#endif
-+1:
-+	.skip	PAGE_SIZE - empty_zero_page - 1b
-+
-+	.section	.text.head, "ax"
-+
-+/*
-+ * Condition at the entry of _stext:
-+ *
-+ *   BSC has already been initialized.
-+ *   INTC may or may not be initialized.
-+ *   VBR may or may not be initialized.
-+ *   MMU may or may not be initialized.
-+ *   Cache may or may not be initialized.
-+ *   Hardware (including on-chip modules) may or may not be initialized. 
-+ *
-+ */
-+ENTRY(_stext)
-+	!			Initialize Status Register
-+	mov.l	1f, r0		! MD=1, RB=0, BL=0, IMASK=0xF
-+	ldc	r0, sr
-+	!			Initialize global interrupt mask
-+#ifdef CONFIG_CPU_HAS_SR_RB
-+	mov	#0, r0
-+	ldc	r0, r6_bank
-+#endif
-+	
-+	/*
-+	 * Prefetch if possible to reduce cache miss penalty.
-+	 *
-+	 * We do this early on for SH-4A as a micro-optimization,
-+	 * as later on we will have speculative execution enabled
-+	 * and this will become less of an issue.
-+	 */
-+	PREFI(5f, r0)
-+	PREFI(6f, r0)
-+
-+	!
-+	mov.l	2f, r0
-+	mov	r0, r15		! Set initial r15 (stack pointer)
-+#ifdef CONFIG_CPU_HAS_SR_RB
-+	mov.l	7f, r0
-+	ldc	r0, r7_bank	! ... and initial thread_info
-+#endif
-+	
-+	!			Clear BSS area
-+#ifdef CONFIG_SMP	
-+	mov.l	3f, r0
-+	cmp/eq	#0, r0		! skip clear if set to zero
-+	bt	10f
-+#endif
-+	
-+	mov.l	3f, r1
-+	add	#4, r1
-+	mov.l	4f, r2
-+	mov	#0, r0
-+9:	cmp/hs	r2, r1
-+	bf/s	9b		! while (r1 < r2)
-+	 mov.l	r0, at -r2
-+
-+10:		
-+	!			Additional CPU initialization
-+	mov.l	6f, r0
-+	jsr	@r0
-+	 nop
-+
-+	SYNCO()			! Wait for pending instructions..
-+	
-+	!			Start kernel
-+	mov.l	5f, r0
-+	jmp	@r0
-+	 nop
 +
-+	.balign 4
-+#if defined(CONFIG_CPU_SH2)
-+1:	.long	0x000000F0		! IMASK=0xF
-+#else
-+1:	.long	0x400080F0		! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
-+#endif
-+ENTRY(stack_start)
-+2:	.long	init_thread_union+THREAD_SIZE
-+3:	.long	__bss_start
-+4:	.long	_end
-+5:	.long	start_kernel
-+6:	.long	sh_cpu_init
-+7:	.long	init_thread_union
-diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
++	.data
++ENTRY(sys_call_table)
++	.long sys_restart_syscall	/* 0  -  old "setup()" system call*/
++	.long sys_exit
++	.long sys_fork
++	.long sys_read
++	.long sys_write
++	.long sys_open		/* 5 */
++	.long sys_close
++	.long sys_waitpid
++	.long sys_creat
++	.long sys_link
++	.long sys_unlink		/* 10 */
++	.long sys_execve
++	.long sys_chdir
++	.long sys_time
++	.long sys_mknod
++	.long sys_chmod		/* 15 */
++	.long sys_lchown16
++	.long sys_ni_syscall	/* old break syscall holder */
++	.long sys_stat
++	.long sys_lseek
++	.long sys_getpid		/* 20 */
++	.long sys_mount
++	.long sys_oldumount
++	.long sys_setuid16
++	.long sys_getuid16
++	.long sys_stime		/* 25 */
++	.long sys_ptrace
++	.long sys_alarm
++	.long sys_fstat
++	.long sys_pause
++	.long sys_utime		/* 30 */
++	.long sys_ni_syscall	/* old stty syscall holder */
++	.long sys_ni_syscall	/* old gtty syscall holder */
++	.long sys_access
++	.long sys_nice
++	.long sys_ni_syscall	/* 35 */		/* old ftime syscall holder */
++	.long sys_sync
++	.long sys_kill
++	.long sys_rename
++	.long sys_mkdir
++	.long sys_rmdir		/* 40 */
++	.long sys_dup
++	.long sys_pipe
++	.long sys_times
++	.long sys_ni_syscall	/* old prof syscall holder */
++	.long sys_brk		/* 45 */
++	.long sys_setgid16
++	.long sys_getgid16
++	.long sys_signal
++	.long sys_geteuid16
++	.long sys_getegid16	/* 50 */
++	.long sys_acct
++	.long sys_umount		/* recycled never used phys() */
++	.long sys_ni_syscall	/* old lock syscall holder */
++	.long sys_ioctl
++	.long sys_fcntl		/* 55 */
++	.long sys_ni_syscall	/* old mpx syscall holder */
++	.long sys_setpgid
++	.long sys_ni_syscall	/* old ulimit syscall holder */
++	.long sys_ni_syscall	/* sys_olduname */
++	.long sys_umask		/* 60 */
++	.long sys_chroot
++	.long sys_ustat
++	.long sys_dup2
++	.long sys_getppid
++	.long sys_getpgrp		/* 65 */
++	.long sys_setsid
++	.long sys_sigaction
++	.long sys_sgetmask
++	.long sys_ssetmask
++	.long sys_setreuid16	/* 70 */
++	.long sys_setregid16
++	.long sys_sigsuspend
++	.long sys_sigpending
++	.long sys_sethostname
++	.long sys_setrlimit	/* 75 */
++	.long sys_old_getrlimit
++	.long sys_getrusage
++	.long sys_gettimeofday
++	.long sys_settimeofday
++	.long sys_getgroups16	/* 80 */
++	.long sys_setgroups16
++	.long sys_ni_syscall	/* sys_oldselect */
++	.long sys_symlink
++	.long sys_lstat
++	.long sys_readlink		/* 85 */
++	.long sys_uselib
++	.long sys_swapon
++	.long sys_reboot
++	.long old_readdir
++	.long old_mmap		/* 90 */
++	.long sys_munmap
++	.long sys_truncate
++	.long sys_ftruncate
++	.long sys_fchmod
++	.long sys_fchown16		/* 95 */
++	.long sys_getpriority
++	.long sys_setpriority
++	.long sys_ni_syscall	/* old profil syscall holder */
++	.long sys_statfs
++	.long sys_fstatfs		/* 100 */
++	.long sys_ni_syscall	/* ioperm */
++	.long sys_socketcall
++	.long sys_syslog
++	.long sys_setitimer
++	.long sys_getitimer	/* 105 */
++	.long sys_newstat
++	.long sys_newlstat
++	.long sys_newfstat
++	.long sys_uname
++	.long sys_ni_syscall	/* 110 */ /* iopl */
++	.long sys_vhangup
++	.long sys_ni_syscall	/* idle */
++	.long sys_ni_syscall	/* vm86old */
++	.long sys_wait4
++	.long sys_swapoff		/* 115 */
++	.long sys_sysinfo
++	.long sys_ipc
++	.long sys_fsync
++	.long sys_sigreturn
++	.long sys_clone		/* 120 */
++	.long sys_setdomainname
++	.long sys_newuname
++	.long sys_ni_syscall	/* sys_modify_ldt */
++	.long sys_adjtimex
++	.long sys_mprotect		/* 125 */
++	.long sys_sigprocmask
++	.long sys_ni_syscall	/* old "create_module" */
++	.long sys_init_module
++	.long sys_delete_module
++	.long sys_ni_syscall	/* 130: old "get_kernel_syms" */
++	.long sys_quotactl
++	.long sys_getpgid
++	.long sys_fchdir
++	.long sys_bdflush
++	.long sys_sysfs		/* 135 */
++	.long sys_personality
++	.long sys_ni_syscall	/* for afs_syscall */
++	.long sys_setfsuid16
++	.long sys_setfsgid16
++	.long sys_llseek		/* 140 */
++	.long sys_getdents
++	.long sys_select
++	.long sys_flock
++	.long sys_msync
++	.long sys_readv		/* 145 */
++	.long sys_writev
++	.long sys_getsid
++	.long sys_fdatasync
++	.long sys_sysctl
++	.long sys_mlock		/* 150 */
++	.long sys_munlock
++	.long sys_mlockall
++	.long sys_munlockall
++	.long sys_sched_setparam
++	.long sys_sched_getparam   /* 155 */
++	.long sys_sched_setscheduler
++	.long sys_sched_getscheduler
++	.long sys_sched_yield
++	.long sys_sched_get_priority_max
++	.long sys_sched_get_priority_min  /* 160 */
++	.long sys_sched_rr_get_interval
++	.long sys_nanosleep
++	.long sys_mremap
++	.long sys_setresuid16
++	.long sys_getresuid16	/* 165 */
++	.long sys_ni_syscall	/* vm86 */
++	.long sys_ni_syscall	/* old "query_module" */
++	.long sys_poll
++	.long sys_nfsservctl
++	.long sys_setresgid16	/* 170 */
++	.long sys_getresgid16
++	.long sys_prctl
++	.long sys_rt_sigreturn
++	.long sys_rt_sigaction
++	.long sys_rt_sigprocmask	/* 175 */
++	.long sys_rt_sigpending
++	.long sys_rt_sigtimedwait
++	.long sys_rt_sigqueueinfo
++	.long sys_rt_sigsuspend
++	.long sys_pread_wrapper	   /* 180 */
++	.long sys_pwrite_wrapper
++	.long sys_chown16
++	.long sys_getcwd
++	.long sys_capget
++	.long sys_capset           /* 185 */
++	.long sys_sigaltstack
++	.long sys_sendfile
++	.long sys_ni_syscall	/* streams1 */
++	.long sys_ni_syscall	/* streams2 */
++	.long sys_vfork            /* 190 */
++	.long sys_getrlimit
++	.long sys_mmap2
++	.long sys_truncate64
++	.long sys_ftruncate64
++	.long sys_stat64		/* 195 */
++	.long sys_lstat64
++	.long sys_fstat64
++	.long sys_lchown
++	.long sys_getuid
++	.long sys_getgid		/* 200 */
++	.long sys_geteuid
++	.long sys_getegid
++	.long sys_setreuid
++	.long sys_setregid
++	.long sys_getgroups	/* 205 */
++	.long sys_setgroups
++	.long sys_fchown
++	.long sys_setresuid
++	.long sys_getresuid
++	.long sys_setresgid	/* 210 */
++	.long sys_getresgid
++	.long sys_chown
++	.long sys_setuid
++	.long sys_setgid
++	.long sys_setfsuid		/* 215 */
++	.long sys_setfsgid
++	.long sys_pivot_root
++	.long sys_mincore
++	.long sys_madvise
++	.long sys_getdents64	/* 220 */
++	.long sys_fcntl64
++	.long sys_ni_syscall	/* reserved for TUX */
++	.long sys_ni_syscall	/* Reserved for Security */
++	.long sys_gettid
++	.long sys_readahead	/* 225 */
++	.long sys_setxattr
++	.long sys_lsetxattr
++	.long sys_fsetxattr
++	.long sys_getxattr
++	.long sys_lgetxattr	/* 230 */
++	.long sys_fgetxattr
++	.long sys_listxattr
++	.long sys_llistxattr
++	.long sys_flistxattr
++	.long sys_removexattr	/* 235 */
++	.long sys_lremovexattr
++	.long sys_fremovexattr
++	.long sys_tkill
++	.long sys_sendfile64
++	.long sys_futex		/* 240 */
++	.long sys_sched_setaffinity
++	.long sys_sched_getaffinity
++	.long sys_ni_syscall
++	.long sys_ni_syscall
++	.long sys_io_setup	/* 245 */
++	.long sys_io_destroy
++	.long sys_io_getevents
++	.long sys_io_submit
++	.long sys_io_cancel
++	.long sys_fadvise64	/* 250 */
++	.long sys_ni_syscall
++	.long sys_exit_group
++	.long sys_lookup_dcookie
++	.long sys_epoll_create
++	.long sys_epoll_ctl	/* 255 */
++	.long sys_epoll_wait
++ 	.long sys_remap_file_pages
++ 	.long sys_set_tid_address
++ 	.long sys_timer_create
++ 	.long sys_timer_settime		/* 260 */
++ 	.long sys_timer_gettime
++ 	.long sys_timer_getoverrun
++ 	.long sys_timer_delete
++ 	.long sys_clock_settime
++ 	.long sys_clock_gettime		/* 265 */
++ 	.long sys_clock_getres
++ 	.long sys_clock_nanosleep
++	.long sys_statfs64
++	.long sys_fstatfs64
++	.long sys_tgkill		/* 270 */
++	.long sys_utimes
++ 	.long sys_fadvise64_64_wrapper
++	.long sys_ni_syscall	/* Reserved for vserver */
++	.long sys_mbind
++	.long sys_get_mempolicy		/* 275 */
++	.long sys_set_mempolicy
++	.long sys_mq_open
++	.long sys_mq_unlink
++	.long sys_mq_timedsend
++	.long sys_mq_timedreceive       /* 280 */
++	.long sys_mq_notify
++	.long sys_mq_getsetattr
++	.long sys_kexec_load
++	.long sys_waitid
++	.long sys_add_key		/* 285 */
++	.long sys_request_key
++	.long sys_keyctl
++	.long sys_ioprio_set
++	.long sys_ioprio_get
++	.long sys_inotify_init		/* 290 */
++	.long sys_inotify_add_watch
++	.long sys_inotify_rm_watch
++	.long sys_ni_syscall
++	.long sys_migrate_pages
++	.long sys_openat		/* 295 */
++	.long sys_mkdirat
++	.long sys_mknodat
++	.long sys_fchownat
++	.long sys_futimesat
++	.long sys_fstatat64		/* 300 */
++	.long sys_unlinkat
++	.long sys_renameat
++	.long sys_linkat
++	.long sys_symlinkat
++	.long sys_readlinkat		/* 305 */
++	.long sys_fchmodat
++	.long sys_faccessat
++	.long sys_pselect6
++	.long sys_ppoll
++	.long sys_unshare		/* 310 */
++	.long sys_set_robust_list
++	.long sys_get_robust_list
++	.long sys_splice
++	.long sys_sync_file_range
++	.long sys_tee			/* 315 */
++	.long sys_vmsplice
++	.long sys_move_pages
++	.long sys_getcpu
++	.long sys_epoll_pwait
++	.long sys_utimensat		/* 320 */
++	.long sys_signalfd
++	.long sys_timerfd
++	.long sys_eventfd
++	.long sys_fallocate
+diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
 new file mode 100644
-index 0000000..f42d4c0
+index 0000000..98a93ef
 --- /dev/null
-+++ b/arch/sh/kernel/head_64.S
-@@ -0,0 +1,356 @@
++++ b/arch/sh/kernel/syscalls_64.S
+@@ -0,0 +1,381 @@
 +/*
-+ * arch/sh/kernel/head_64.S
++ * arch/sh/kernel/syscalls_64.S
 + *
 + * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003, 2004  Paul Mundt
++ * Copyright (C) 2004 - 2007  Paul Mundt
++ * Copyright (C) 2003, 2004 Richard Curnow
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + */
-+#include <asm/page.h>
-+#include <asm/cache.h>
-+#include <asm/tlb.h>
-+#include <asm/cpu/registers.h>
-+#include <asm/cpu/mmu_context.h>
-+#include <asm/thread_info.h>
-+
-+/*
-+ * MMU defines: TLB boundaries.
-+ */
-+
-+#define MMUIR_FIRST	ITLB_FIXED
-+#define MMUIR_END	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
-+#define MMUIR_STEP	TLB_STEP
-+
-+#define MMUDR_FIRST	DTLB_FIXED
-+#define MMUDR_END	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
-+#define MMUDR_STEP	TLB_STEP
-+
-+/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
-+#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
-+#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
-+#endif
-+
-+/*
-+ * MMU defines: Fixed TLBs.
-+ */
-+/* Deal safely with the case where the base of RAM is not 512Mb aligned */
-+
-+#define ALIGN_512M_MASK (0xffffffffe0000000)
-+#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
-+#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
-+
-+#define MMUIR_TEXT_H	(0x0000000000000003 | ALIGNED_EFFECTIVE)
-+			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-+
-+#define MMUIR_TEXT_L	(0x000000000000009a | ALIGNED_PHYSICAL)
-+			/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
-+
-+#define MMUDR_CACHED_H	0x0000000000000003 | ALIGNED_EFFECTIVE
-+			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-+#define MMUDR_CACHED_L	0x000000000000015a | ALIGNED_PHYSICAL
-+			/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
 +
-+#ifdef CONFIG_CACHE_OFF
-+#define	ICCR0_INIT_VAL	ICCR0_OFF			/* ICACHE off */
-+#else
-+#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
-+#endif
-+#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
-+
-+#if defined (CONFIG_CACHE_OFF)
-+#define	OCCR0_INIT_VAL	OCCR0_OFF			   /* D-cache: off  */
-+#elif defined (CONFIG_CACHE_WRITETHROUGH)
-+#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WT	   /* D-cache: on,   */
-+							   /* WT, invalidate */
-+#elif defined (CONFIG_CACHE_WRITEBACK)
-+#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	   /* D-cache: on,   */
-+							   /* WB, invalidate */
-+#else
-+#error preprocessor flag CONFIG_CACHE_... not recognized!
-+#endif
-+
-+#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			   /* No locking     */
-+
-+	.section	.empty_zero_page, "aw"
-+	.global empty_zero_page
-+
-+empty_zero_page:
-+	.long	1		/* MOUNT_ROOT_RDONLY */
-+	.long	0		/* RAMDISK_FLAGS */
-+	.long	0x0200		/* ORIG_ROOT_DEV */
-+	.long	1		/* LOADER_TYPE */
-+	.long	0x00800000	/* INITRD_START */
-+	.long	0x00800000	/* INITRD_SIZE */
-+	.long	0
-+
-+	.text
-+	.balign 4096,0,4096
-+
-+	.section	.data, "aw"
-+	.balign	PAGE_SIZE
-+
-+	.section	.data, "aw"
-+	.balign	PAGE_SIZE
-+
-+	.global mmu_pdtp_cache
-+mmu_pdtp_cache:
-+	.space PAGE_SIZE, 0
-+
-+	.global empty_bad_page
-+empty_bad_page:
-+	.space PAGE_SIZE, 0
-+
-+	.global empty_bad_pte_table
-+empty_bad_pte_table:
-+	.space PAGE_SIZE, 0
-+
-+	.global	fpu_in_use
-+fpu_in_use:	.quad	0
++#include <linux/sys.h>
 +
++	.section .data, "aw"
++	.balign 32
 +
-+	.section	.text.head, "ax"
-+	.balign L1_CACHE_BYTES
 +/*
-+ * Condition at the entry of __stext:
-+ * . Reset state:
-+ *   . SR.FD    = 1		(FPU disabled)
-+ *   . SR.BL    = 1		(Exceptions disabled)
-+ *   . SR.MD    = 1		(Privileged Mode)
-+ *   . SR.MMU   = 0		(MMU Disabled)
-+ *   . SR.CD    = 0		(CTC User Visible)
-+ *   . SR.IMASK = Undefined	(Interrupt Mask)
-+ *
-+ * Operations supposed to be performed by __stext:
-+ * . prevent speculative fetch onto device memory while MMU is off
-+ * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
-+ * . first, save CPU state and set it to something harmless
-+ * . any CPU detection and/or endianness settings (?)
-+ * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
-+ * . set initial TLB entries for cached and uncached regions
-+ *   (no fine granularity paging)
-+ * . set initial cache state
-+ * . enable MMU and caches
-+ * . set CPU to a consistent state
-+ *   . registers (including stack pointer and current/KCR0)
-+ *   . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
-+ *     at this stage. This is all to later Linux initialization steps.
-+ *   . initialize FPU
-+ * . clear BSS
-+ * . jump into start_kernel()
-+ * . be prepared to hopeless start_kernel() returns.
-+ *
++ * System calls jump table
 + */
-+	.global _stext
-+_stext:
-+	/*
-+	 * Prevent speculative fetch on device memory due to
-+	 * uninitialized target registers.
-+	 */
-+	ptabs/u	ZERO, tr0
-+	ptabs/u	ZERO, tr1
-+	ptabs/u	ZERO, tr2
-+	ptabs/u	ZERO, tr3
-+	ptabs/u	ZERO, tr4
-+	ptabs/u	ZERO, tr5
-+	ptabs/u	ZERO, tr6
-+	ptabs/u	ZERO, tr7
-+	synci
-+
-+	/*
-+	 * Read/Set CPU state. After this block:
-+	 * r29 = Initial SR
-+	 */
-+	getcon	SR, r29
-+	movi	SR_HARMLESS, r20
-+	putcon	r20, SR
-+
-+	/*
-+	 * Initialize EMI/LMI. To Be Done.
-+	 */
-+
-+	/*
-+	 * CPU detection and/or endianness settings (?). To Be Done.
-+	 * Pure PIC code here, please ! Just save state into r30.
-+         * After this block:
-+	 * r30 = CPU type/Platform Endianness
-+	 */
-+
-+	/*
-+	 * Set initial TLB entries for cached and uncached regions.
-+	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
-+	 */
-+	/* Clear ITLBs */
-+	pta	clear_ITLB, tr1
-+	movi	MMUIR_FIRST, r21
-+	movi	MMUIR_END, r22
-+clear_ITLB:
-+	putcfg	r21, 0, ZERO		/* Clear MMUIR[n].PTEH.V */
-+	addi	r21, MMUIR_STEP, r21
-+        bne	r21, r22, tr1
-+
-+	/* Clear DTLBs */
-+	pta	clear_DTLB, tr1
-+	movi	MMUDR_FIRST, r21
-+	movi	MMUDR_END, r22
-+clear_DTLB:
-+	putcfg	r21, 0, ZERO		/* Clear MMUDR[n].PTEH.V */
-+	addi	r21, MMUDR_STEP, r21
-+        bne	r21, r22, tr1
-+
-+	/* Map one big (512Mb) page for ITLB */
-+	movi	MMUIR_FIRST, r21
-+	movi	MMUIR_TEXT_L, r22	/* PTEL first */
-+	add.l	r22, r63, r22		/* Sign extend */
-+	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
-+	movi	MMUIR_TEXT_H, r22	/* PTEH last */
-+	add.l	r22, r63, r22		/* Sign extend */
-+	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
-+
-+	/* Map one big CACHED (512Mb) page for DTLB */
-+	movi	MMUDR_FIRST, r21
-+	movi	MMUDR_CACHED_L, r22	/* PTEL first */
-+	add.l	r22, r63, r22		/* Sign extend */
-+	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
-+	movi	MMUDR_CACHED_H, r22	/* PTEH last */
-+	add.l	r22, r63, r22		/* Sign extend */
-+	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
-+
-+#ifdef CONFIG_EARLY_PRINTK
-+	/*
-+	 * Setup a DTLB translation for SCIF phys.
-+	 */
-+	addi    r21, MMUDR_STEP, r21
-+	movi    0x0a03, r22	/* SCIF phys */
-+	shori   0x0148, r22
-+	putcfg  r21, 1, r22	/* PTEL first */
-+	movi    0xfa03, r22	/* 0xfa030000, fixed SCIF virt */
-+	shori   0x0003, r22
-+	putcfg  r21, 0, r22	/* PTEH last */
-+#endif
-+
-+	/*
-+	 * Set cache behaviours.
-+	 */
-+	/* ICache */
-+	movi	ICCR_BASE, r21
-+	movi	ICCR0_INIT_VAL, r22
-+	movi	ICCR1_INIT_VAL, r23
-+	putcfg	r21, ICCR_REG0, r22
-+	putcfg	r21, ICCR_REG1, r23
-+
-+	/* OCache */
-+	movi	OCCR_BASE, r21
-+	movi	OCCR0_INIT_VAL, r22
-+	movi	OCCR1_INIT_VAL, r23
-+	putcfg	r21, OCCR_REG0, r22
-+	putcfg	r21, OCCR_REG1, r23
-+
-+
-+	/*
-+	 * Enable Caches and MMU. Do the first non-PIC jump.
-+         * Now head.S global variables, constants and externs
-+	 * can be used.
-+	 */
-+	getcon	SR, r21
-+	movi	SR_ENABLE_MMU, r22
-+	or	r21, r22, r21
-+	putcon	r21, SSR
-+	movi	hyperspace, r22
-+	ori	r22, 1, r22	    /* Make it SHmedia, not required but..*/
-+	putcon	r22, SPC
-+	synco
-+	rte			    /* And now go into the hyperspace ... */
-+hyperspace:			    /* ... that's the next instruction !  */
-+
-+	/*
-+	 * Set CPU to a consistent state.
-+	 * r31 = FPU support flag
-+	 * tr0/tr7 in use. Others give a chance to loop somewhere safe
-+	 */
-+	movi	start_kernel, r32
-+	ori	r32, 1, r32
-+
-+	ptabs	r32, tr0		    /* r32 = _start_kernel address        */
-+	pta/u	hopeless, tr1
-+	pta/u	hopeless, tr2
-+	pta/u	hopeless, tr3
-+	pta/u	hopeless, tr4
-+	pta/u	hopeless, tr5
-+	pta/u	hopeless, tr6
-+	pta/u	hopeless, tr7
-+	gettr	tr1, r28			/* r28 = hopeless address */
-+
-+	/* Set initial stack pointer */
-+	movi	init_thread_union, SP
-+	putcon	SP, KCR0		/* Set current to init_task */
-+	movi	THREAD_SIZE, r22	/* Point to the end */
-+	add	SP, r22, SP
-+
-+	/*
-+	 * Initialize FPU.
-+	 * Keep FPU flag in r31. After this block:
-+	 * r31 = FPU flag
-+	 */
-+	movi fpu_in_use, r31	/* Temporary */
-+
-+#ifdef CONFIG_SH_FPU
-+	getcon	SR, r21
-+	movi	SR_ENABLE_FPU, r22
-+	and	r21, r22, r22
-+	putcon	r22, SR			/* Try to enable */
-+	getcon	SR, r22
-+	xor	r21, r22, r21
-+	shlri	r21, 15, r21		/* Supposedly 0/1 */
-+	st.q	r31, 0 , r21		/* Set fpu_in_use */
-+#else
-+	movi	0, r21
-+	st.q	r31, 0 , r21		/* Set fpu_in_use */
-+#endif
-+	or	r21, ZERO, r31		/* Set FPU flag at last */
-+
-+#ifndef CONFIG_SH_NO_BSS_INIT
-+/* Don't clear BSS if running on slow platforms such as an RTL simulation,
-+   remote memory via SHdebug link, etc.  For these the memory can be guaranteed
-+   to be all zero on boot anyway. */
-+	/*
-+	 * Clear bss
-+	 */
-+	pta	clear_quad, tr1
-+	movi	__bss_start, r22
-+	movi	_end, r23
-+clear_quad:
-+	st.q	r22, 0, ZERO
-+	addi	r22, 8, r22
-+	bne	r22, r23, tr1		/* Both quad aligned, see vmlinux.lds.S */
-+#endif
-+	pta/u	hopeless, tr1
-+
-+	/* Say bye to head.S but be prepared to wrongly get back ... */
-+	blink	tr0, LINK
-+
-+	/* If we ever get back here through LINK/tr1-tr7 */
-+	pta/u	hopeless, tr7
-+
-+hopeless:
-+	/*
-+	 * Something's badly wrong here. Loop endlessly,
-+         * there's nothing more we can do about it.
-+	 *
-+	 * Note on hopeless: it can be jumped into invariably
-+	 * before or after jumping into hyperspace. The only
-+	 * requirement is to be PIC called (PTA) before and
-+	 * any way (PTA/PTABS) after. According to Virtual
-+	 * to Physical mapping a simulator/emulator can easily
-+	 * tell where we came here from just looking at hopeless
-+	 * (PC) address.
-+	 *
-+	 * For debugging purposes:
-+	 * (r28) hopeless/loop address
-+	 * (r29) Original SR
-+	 * (r30) CPU type/Platform endianness
-+	 * (r31) FPU Support
-+	 * (r32) _start_kernel address
-+	 */
-+	blink	tr7, ZERO
-diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
-index 4b449c4..f9bcc60 100644
---- a/arch/sh/kernel/init_task.c
-+++ b/arch/sh/kernel/init_task.c
-@@ -11,8 +11,8 @@ static struct fs_struct init_fs = INIT_FS;
- static struct files_struct init_files = INIT_FILES;
- static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
- static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-+struct pt_regs fake_swapper_regs;
- struct mm_struct init_mm = INIT_MM(init_mm);
--
- EXPORT_SYMBOL(init_mm);
- 
- /*
-@@ -22,7 +22,7 @@ EXPORT_SYMBOL(init_mm);
-  * way process stacks are handled. This is done by having a special
-  * "init_task" linker map entry..
-  */
--union thread_union init_thread_union 
-+union thread_union init_thread_union
- 	__attribute__((__section__(".data.init_task"))) =
- 		{ INIT_THREAD_INFO(init_task) };
- 
-diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
-index 501fe03..71c9fde 100644
---- a/arch/sh/kernel/io.c
-+++ b/arch/sh/kernel/io.c
-@@ -61,73 +61,6 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
- }
- EXPORT_SYMBOL(memset_io);
- 
--void __raw_readsl(unsigned long addr, void *datap, int len)
--{
--	u32 *data;
--
--	for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--)
--		*data++ = ctrl_inl(addr);
--
--	if (likely(len >= (0x20 >> 2))) {
--		int tmp2, tmp3, tmp4, tmp5, tmp6;
--
--		__asm__ __volatile__(
--			"1:			\n\t"
--			"mov.l	@%7, r0		\n\t"
--			"mov.l	@%7, %2		\n\t"
--#ifdef CONFIG_CPU_SH4
--			"movca.l r0, @%0	\n\t"
--#else
--			"mov.l	r0, @%0		\n\t"
--#endif
--			"mov.l	@%7, %3		\n\t"
--			"mov.l	@%7, %4		\n\t"
--			"mov.l	@%7, %5		\n\t"
--			"mov.l	@%7, %6		\n\t"
--			"mov.l	@%7, r7		\n\t"
--			"mov.l	@%7, r0		\n\t"
--			"mov.l	%2, @(0x04,%0)	\n\t"
--			"mov	#0x20>>2, %2	\n\t"
--			"mov.l	%3, @(0x08,%0)	\n\t"
--			"sub	%2, %1		\n\t"
--			"mov.l	%4, @(0x0c,%0)	\n\t"
--			"cmp/hi	%1, %2		! T if 32 > len	\n\t"
--			"mov.l	%5, @(0x10,%0)	\n\t"
--			"mov.l	%6, @(0x14,%0)	\n\t"
--			"mov.l	r7, @(0x18,%0)	\n\t"
--			"mov.l	r0, @(0x1c,%0)	\n\t"
--			"bf.s	1b		\n\t"
--			" add	#0x20, %0	\n\t"
--			: "=&r" (data), "=&r" (len),
--			  "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
--			  "=&r" (tmp5), "=&r" (tmp6)
--			: "r"(addr), "0" (data), "1" (len)
--			: "r0", "r7", "t", "memory");
--	}
--
--	for (; len != 0; len--)
--		*data++ = ctrl_inl(addr);
--}
--EXPORT_SYMBOL(__raw_readsl);
--
--void __raw_writesl(unsigned long addr, const void *data, int len)
--{
--	if (likely(len != 0)) {
--		int tmp1;
--
--		__asm__ __volatile__ (
--			"1:				\n\t"
--			"mov.l	@%0+, %1	\n\t"
--			"dt		%3		\n\t"
--			"bf.s		1b		\n\t"
--			" mov.l	%1, @%4		\n\t"
--			: "=&r" (data), "=&r" (tmp1)
--			: "0" (data), "r" (len), "r"(addr)
--			: "t", "memory");
--	}
--}
--EXPORT_SYMBOL(__raw_writesl);
--
- void __iomem *ioport_map(unsigned long port, unsigned int nr)
- {
- 	return sh_mv.mv_ioport_map(port, nr);
-diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
-index 142a4e5..b3d0a03 100644
---- a/arch/sh/kernel/module.c
-+++ b/arch/sh/kernel/module.c
-@@ -1,5 +1,15 @@
- /*  Kernel module help for SH.
- 
-+    SHcompact version by Kaz Kojima and Paul Mundt.
-+
-+    SHmedia bits:
-+
-+	Copyright 2004 SuperH (UK) Ltd
-+	Author: Richard Curnow
-+
-+	Based on the sh version, and on code from the sh64-specific parts of
-+	modutils, originally written by Richard Curnow and Ben Gaster.
-+
-     This program is free software; you can redistribute it and/or modify
-     it under the terms of the GNU General Public License as published by
-     the Free Software Foundation; either version 2 of the License, or
-@@ -21,12 +31,6 @@
- #include <linux/string.h>
- #include <linux/kernel.h>
- 
--#if 0
--#define DEBUGP printk
--#else
--#define DEBUGP(fmt...)
--#endif
--
- void *module_alloc(unsigned long size)
- {
- 	if (size == 0)
-@@ -52,6 +56,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
- 	return 0;
- }
- 
-+#ifdef CONFIG_SUPERH32
- #define COPY_UNALIGNED_WORD(sw, tw, align) \
- { \
- 	void *__s = &(sw), *__t = &(tw); \
-@@ -74,6 +79,10 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
- 		break; \
- 	} \
- }
-+#else
-+/* One thing SHmedia doesn't screw up! */
-+#define COPY_UNALIGNED_WORD(sw, tw, align)	{ (tw) = (sw); }
-+#endif
- 
- int apply_relocate_add(Elf32_Shdr *sechdrs,
- 		   const char *strtab,
-@@ -89,8 +98,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
- 	uint32_t value;
- 	int align;
- 
--	DEBUGP("Applying relocate section %u to %u\n", relsec,
--	       sechdrs[relsec].sh_info);
-+	pr_debug("Applying relocate section %u to %u\n", relsec,
-+		 sechdrs[relsec].sh_info);
- 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- 		/* This is where to make the change */
- 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
-@@ -102,17 +111,44 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
- 		relocation = sym->st_value + rel[i].r_addend;
- 		align = (int)location & 3;
- 
-+#ifdef CONFIG_SUPERH64
-+		/* For text addresses, bit2 of the st_other field indicates
-+		 * whether the symbol is SHmedia (1) or SHcompact (0).  If
-+		 * SHmedia, the LSB of the symbol needs to be asserted
-+		 * for the CPU to be in SHmedia mode when it starts executing
-+		 * the branch target. */
-+		relocation |= (sym->st_other & 4);
-+#endif
-+
- 		switch (ELF32_R_TYPE(rel[i].r_info)) {
- 		case R_SH_DIR32:
--	    		COPY_UNALIGNED_WORD (*location, value, align);
-+			COPY_UNALIGNED_WORD (*location, value, align);
- 			value += relocation;
--	    		COPY_UNALIGNED_WORD (value, *location, align);
-+			COPY_UNALIGNED_WORD (value, *location, align);
- 			break;
- 		case R_SH_REL32:
--	  		relocation = (relocation - (Elf32_Addr) location);
--	    		COPY_UNALIGNED_WORD (*location, value, align);
-+			relocation = (relocation - (Elf32_Addr) location);
-+			COPY_UNALIGNED_WORD (*location, value, align);
- 			value += relocation;
--	    		COPY_UNALIGNED_WORD (value, *location, align);
-+			COPY_UNALIGNED_WORD (value, *location, align);
-+			break;
-+		case R_SH_IMM_LOW16:
-+			*location = (*location & ~0x3fffc00) |
-+				((relocation & 0xffff) << 10);
-+			break;
-+		case R_SH_IMM_MEDLOW16:
-+			*location = (*location & ~0x3fffc00) |
-+				(((relocation >> 16) & 0xffff) << 10);
-+			break;
-+		case R_SH_IMM_LOW16_PCREL:
-+			relocation -= (Elf32_Addr) location;
-+			*location = (*location & ~0x3fffc00) |
-+				((relocation & 0xffff) << 10);
-+			break;
-+		case R_SH_IMM_MEDLOW16_PCREL:
-+			relocation -= (Elf32_Addr) location;
-+			*location = (*location & ~0x3fffc00) |
-+				(((relocation >> 16) & 0xffff) << 10);
- 			break;
- 		default:
- 			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
-diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
++	.globl  sys_call_table
++sys_call_table:
++	.long sys_restart_syscall	/* 0  -  old "setup()" system call  */
++	.long sys_exit
++	.long sys_fork
++	.long sys_read
++	.long sys_write
++	.long sys_open			/* 5 */
++	.long sys_close
++	.long sys_waitpid
++	.long sys_creat
++	.long sys_link
++	.long sys_unlink		/* 10 */
++	.long sys_execve
++	.long sys_chdir
++	.long sys_time
++	.long sys_mknod
++	.long sys_chmod			/* 15 */
++	.long sys_lchown16
++	.long sys_ni_syscall	/* old break syscall holder */
++	.long sys_stat
++	.long sys_lseek
++	.long sys_getpid		/* 20 */
++	.long sys_mount
++	.long sys_oldumount
++	.long sys_setuid16
++	.long sys_getuid16
++	.long sys_stime			/* 25 */
++	.long sh64_ptrace
++	.long sys_alarm
++	.long sys_fstat
++	.long sys_pause
++	.long sys_utime			/* 30 */
++	.long sys_ni_syscall	/* old stty syscall holder */
++	.long sys_ni_syscall	/* old gtty syscall holder */
++	.long sys_access
++	.long sys_nice
++	.long sys_ni_syscall		/* 35 */ /* old ftime syscall holder */
++	.long sys_sync
++	.long sys_kill
++	.long sys_rename
++	.long sys_mkdir
++	.long sys_rmdir			/* 40 */
++	.long sys_dup
++	.long sys_pipe
++	.long sys_times
++	.long sys_ni_syscall	/* old prof syscall holder */
++	.long sys_brk			/* 45 */
++	.long sys_setgid16
++	.long sys_getgid16
++	.long sys_signal
++	.long sys_geteuid16
++	.long sys_getegid16		/* 50 */
++	.long sys_acct
++	.long sys_umount		/* recycled never used phys( */
++	.long sys_ni_syscall	/* old lock syscall holder */
++	.long sys_ioctl
++	.long sys_fcntl			/* 55 */
++	.long sys_ni_syscall	/* old mpx syscall holder */
++	.long sys_setpgid
++	.long sys_ni_syscall	/* old ulimit syscall holder */
++	.long sys_ni_syscall	/* sys_olduname */
++	.long sys_umask			/* 60 */
++	.long sys_chroot
++	.long sys_ustat
++	.long sys_dup2
++	.long sys_getppid
++	.long sys_getpgrp		/* 65 */
++	.long sys_setsid
++	.long sys_sigaction
++	.long sys_sgetmask
++	.long sys_ssetmask
++	.long sys_setreuid16		/* 70 */
++	.long sys_setregid16
++	.long sys_sigsuspend
++	.long sys_sigpending
++	.long sys_sethostname
++	.long sys_setrlimit		/* 75 */
++	.long sys_old_getrlimit
++	.long sys_getrusage
++	.long sys_gettimeofday
++	.long sys_settimeofday
++	.long sys_getgroups16		/* 80 */
++	.long sys_setgroups16
++	.long sys_ni_syscall	/* sys_oldselect */
++	.long sys_symlink
++	.long sys_lstat
++	.long sys_readlink		/* 85 */
++	.long sys_uselib
++	.long sys_swapon
++	.long sys_reboot
++	.long old_readdir
++	.long old_mmap			/* 90 */
++	.long sys_munmap
++	.long sys_truncate
++	.long sys_ftruncate
++	.long sys_fchmod
++	.long sys_fchown16		/* 95 */
++	.long sys_getpriority
++	.long sys_setpriority
++	.long sys_ni_syscall	/* old profil syscall holder */
++	.long sys_statfs
++	.long sys_fstatfs		/* 100 */
++	.long sys_ni_syscall	/* ioperm */
++	.long sys_socketcall	/* Obsolete implementation of socket syscall */
++	.long sys_syslog
++	.long sys_setitimer
++	.long sys_getitimer		/* 105 */
++	.long sys_newstat
++	.long sys_newlstat
++	.long sys_newfstat
++	.long sys_uname
++	.long sys_ni_syscall		/* 110 */ /* iopl */
++	.long sys_vhangup
++	.long sys_ni_syscall	/* idle */
++	.long sys_ni_syscall	/* vm86old */
++	.long sys_wait4
++	.long sys_swapoff		/* 115 */
++	.long sys_sysinfo
++	.long sys_ipc		/* Obsolete ipc syscall implementation */
++	.long sys_fsync
++	.long sys_sigreturn
++	.long sys_clone			/* 120 */
++	.long sys_setdomainname
++	.long sys_newuname
++	.long sys_ni_syscall	/* sys_modify_ldt */
++	.long sys_adjtimex
++	.long sys_mprotect		/* 125 */
++	.long sys_sigprocmask
++	.long sys_ni_syscall		/* old "create_module" */
++	.long sys_init_module
++	.long sys_delete_module
++	.long sys_ni_syscall		/* 130: old "get_kernel_syms" */
++	.long sys_quotactl
++	.long sys_getpgid
++	.long sys_fchdir
++	.long sys_bdflush
++	.long sys_sysfs			/* 135 */
++	.long sys_personality
++	.long sys_ni_syscall	/* for afs_syscall */
++	.long sys_setfsuid16
++	.long sys_setfsgid16
++	.long sys_llseek		/* 140 */
++	.long sys_getdents
++	.long sys_select
++	.long sys_flock
++	.long sys_msync
++	.long sys_readv			/* 145 */
++	.long sys_writev
++	.long sys_getsid
++	.long sys_fdatasync
++	.long sys_sysctl
++	.long sys_mlock			/* 150 */
++	.long sys_munlock
++	.long sys_mlockall
++	.long sys_munlockall
++	.long sys_sched_setparam
++	.long sys_sched_getparam	/* 155 */
++	.long sys_sched_setscheduler
++	.long sys_sched_getscheduler
++	.long sys_sched_yield
++	.long sys_sched_get_priority_max
++	.long sys_sched_get_priority_min  /* 160 */
++	.long sys_sched_rr_get_interval
++	.long sys_nanosleep
++	.long sys_mremap
++	.long sys_setresuid16
++	.long sys_getresuid16		/* 165 */
++	.long sys_ni_syscall	/* vm86 */
++	.long sys_ni_syscall	/* old "query_module" */
++	.long sys_poll
++	.long sys_nfsservctl
++	.long sys_setresgid16		/* 170 */
++	.long sys_getresgid16
++	.long sys_prctl
++	.long sys_rt_sigreturn
++	.long sys_rt_sigaction
++	.long sys_rt_sigprocmask	/* 175 */
++	.long sys_rt_sigpending
++	.long sys_rt_sigtimedwait
++	.long sys_rt_sigqueueinfo
++	.long sys_rt_sigsuspend
++	.long sys_pread64		/* 180 */
++	.long sys_pwrite64
++	.long sys_chown16
++	.long sys_getcwd
++	.long sys_capget
++	.long sys_capset		/* 185 */
++	.long sys_sigaltstack
++	.long sys_sendfile
++	.long sys_ni_syscall	/* streams1 */
++	.long sys_ni_syscall	/* streams2 */
++	.long sys_vfork			/* 190 */
++	.long sys_getrlimit
++	.long sys_mmap2
++	.long sys_truncate64
++	.long sys_ftruncate64
++	.long sys_stat64		/* 195 */
++	.long sys_lstat64
++	.long sys_fstat64
++	.long sys_lchown
++	.long sys_getuid
++	.long sys_getgid		/* 200 */
++	.long sys_geteuid
++	.long sys_getegid
++	.long sys_setreuid
++	.long sys_setregid
++	.long sys_getgroups		/* 205 */
++	.long sys_setgroups
++	.long sys_fchown
++	.long sys_setresuid
++	.long sys_getresuid
++	.long sys_setresgid		/* 210 */
++	.long sys_getresgid
++	.long sys_chown
++	.long sys_setuid
++	.long sys_setgid
++	.long sys_setfsuid		/* 215 */
++	.long sys_setfsgid
++	.long sys_pivot_root
++	.long sys_mincore
++	.long sys_madvise
++	/* Broken-out socket family (maintain backwards compatibility in syscall
++	   numbering with 2.4) */
++	.long sys_socket		/* 220 */
++	.long sys_bind
++	.long sys_connect
++	.long sys_listen
++	.long sys_accept
++	.long sys_getsockname		/* 225 */
++	.long sys_getpeername
++	.long sys_socketpair
++	.long sys_send
++	.long sys_sendto
++	.long sys_recv			/* 230*/
++	.long sys_recvfrom
++	.long sys_shutdown
++	.long sys_setsockopt
++	.long sys_getsockopt
++	.long sys_sendmsg		/* 235 */
++	.long sys_recvmsg
++	/* Broken-out IPC family (maintain backwards compatibility in syscall
++	   numbering with 2.4) */
++	.long sys_semop
++	.long sys_semget
++	.long sys_semctl
++	.long sys_msgsnd		/* 240 */
++	.long sys_msgrcv
++	.long sys_msgget
++	.long sys_msgctl
++	.long sys_shmat
++	.long sys_shmdt			/* 245 */
++	.long sys_shmget
++	.long sys_shmctl
++	/* Rest of syscalls listed in 2.4 i386 unistd.h */
++	.long sys_getdents64
++	.long sys_fcntl64
++	.long sys_ni_syscall		/* 250 reserved for TUX */
++	.long sys_ni_syscall		/* Reserved for Security */
++	.long sys_gettid
++	.long sys_readahead
++	.long sys_setxattr
++	.long sys_lsetxattr		/* 255 */
++	.long sys_fsetxattr
++	.long sys_getxattr
++	.long sys_lgetxattr
++	.long sys_fgetxattr
++	.long sys_listxattr		/* 260 */
++	.long sys_llistxattr
++	.long sys_flistxattr
++	.long sys_removexattr
++	.long sys_lremovexattr
++	.long sys_fremovexattr  	/* 265 */
++	.long sys_tkill
++	.long sys_sendfile64
++	.long sys_futex
++	.long sys_sched_setaffinity
++	.long sys_sched_getaffinity	/* 270 */
++	.long sys_ni_syscall
++	.long sys_ni_syscall
++	.long sys_io_setup
++	.long sys_io_destroy
++	.long sys_io_getevents		/* 275 */
++	.long sys_io_submit
++	.long sys_io_cancel
++	.long sys_fadvise64
++	.long sys_ni_syscall
++	.long sys_exit_group		/* 280 */
++	/* Rest of new 2.6 syscalls */
++	.long sys_lookup_dcookie
++	.long sys_epoll_create
++	.long sys_epoll_ctl
++	.long sys_epoll_wait
++ 	.long sys_remap_file_pages	/* 285 */
++ 	.long sys_set_tid_address
++ 	.long sys_timer_create
++ 	.long sys_timer_settime
++ 	.long sys_timer_gettime
++ 	.long sys_timer_getoverrun	/* 290 */
++ 	.long sys_timer_delete
++ 	.long sys_clock_settime
++ 	.long sys_clock_gettime
++ 	.long sys_clock_getres
++ 	.long sys_clock_nanosleep	/* 295 */
++	.long sys_statfs64
++	.long sys_fstatfs64
++	.long sys_tgkill
++	.long sys_utimes
++ 	.long sys_fadvise64_64		/* 300 */
++	.long sys_ni_syscall	/* Reserved for vserver */
++	.long sys_ni_syscall	/* Reserved for mbind */
++	.long sys_ni_syscall	/* get_mempolicy */
++	.long sys_ni_syscall	/* set_mempolicy */
++	.long sys_mq_open		/* 305 */
++	.long sys_mq_unlink
++	.long sys_mq_timedsend
++	.long sys_mq_timedreceive
++	.long sys_mq_notify
++	.long sys_mq_getsetattr		/* 310 */
++	.long sys_ni_syscall	/* Reserved for kexec */
++	.long sys_waitid
++	.long sys_add_key
++	.long sys_request_key
++	.long sys_keyctl		/* 315 */
++	.long sys_ioprio_set
++	.long sys_ioprio_get
++	.long sys_inotify_init
++	.long sys_inotify_add_watch
++	.long sys_inotify_rm_watch	/* 320 */
++	.long sys_ni_syscall
++	.long sys_migrate_pages
++	.long sys_openat
++	.long sys_mkdirat
++	.long sys_mknodat		/* 325 */
++	.long sys_fchownat
++	.long sys_futimesat
++	.long sys_fstatat64
++	.long sys_unlinkat
++	.long sys_renameat		/* 330 */
++	.long sys_linkat
++	.long sys_symlinkat
++	.long sys_readlinkat
++	.long sys_fchmodat
++	.long sys_faccessat		/* 335 */
++	.long sys_pselect6
++	.long sys_ppoll
++	.long sys_unshare
++	.long sys_set_robust_list
++	.long sys_get_robust_list	/* 340 */
++	.long sys_splice
++	.long sys_sync_file_range
++	.long sys_tee
++	.long sys_vmsplice
++	.long sys_move_pages		/* 345 */
++	.long sys_getcpu
++	.long sys_epoll_pwait
++	.long sys_utimensat
++	.long sys_signalfd
++	.long sys_timerfd		/* 350 */
++	.long sys_eventfd
++	.long sys_fallocate
+diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
 deleted file mode 100644
-index 6d7f2b0..0000000
---- a/arch/sh/kernel/process.c
+index a3a67d1..0000000
+--- a/arch/sh/kernel/time.c
 +++ /dev/null
-@@ -1,558 +0,0 @@
+@@ -1,269 +0,0 @@
 -/*
-- * arch/sh/kernel/process.c
-- *
-- * This file handles the architecture-dependent parts of process handling..
+- *  arch/sh/kernel/time.c
 - *
-- *  Copyright (C) 1995  Linus Torvalds
+- *  Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
+- *  Copyright (C) 2000  Philipp Rumpf <prumpf at tux.org>
+- *  Copyright (C) 2002 - 2007  Paul Mundt
+- *  Copyright (C) 2002  M. R. Brown  <mrbrown at linux-sh.org>
 - *
-- *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
-- *		     Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
-- *		     Copyright (C) 2002 - 2007  Paul Mundt
+- *  Some code taken from i386 version.
+- *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
 - */
+-#include <linux/kernel.h>
 -#include <linux/module.h>
--#include <linux/mm.h>
--#include <linux/elfcore.h>
--#include <linux/pm.h>
--#include <linux/kallsyms.h>
--#include <linux/kexec.h>
--#include <linux/kdebug.h>
--#include <linux/tick.h>
--#include <linux/reboot.h>
--#include <linux/fs.h>
--#include <linux/preempt.h>
--#include <asm/uaccess.h>
--#include <asm/mmu_context.h>
--#include <asm/pgalloc.h>
--#include <asm/system.h>
--#include <asm/ubc.h>
--
--static int hlt_counter;
--int ubc_usercnt = 0;
+-#include <linux/init.h>
+-#include <linux/profile.h>
+-#include <linux/timex.h>
+-#include <linux/sched.h>
+-#include <linux/clockchips.h>
+-#include <asm/clock.h>
+-#include <asm/rtc.h>
+-#include <asm/timer.h>
+-#include <asm/kgdb.h>
 -
--void (*pm_idle)(void);
--void (*pm_power_off)(void);
--EXPORT_SYMBOL(pm_power_off);
+-struct sys_timer *sys_timer;
 -
--void disable_hlt(void)
--{
--	hlt_counter++;
--}
--EXPORT_SYMBOL(disable_hlt);
+-/* Move this somewhere more sensible.. */
+-DEFINE_SPINLOCK(rtc_lock);
+-EXPORT_SYMBOL(rtc_lock);
 -
--void enable_hlt(void)
+-/* Dummy RTC ops */
+-static void null_rtc_get_time(struct timespec *tv)
 -{
--	hlt_counter--;
+-	tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
+-	tv->tv_nsec = 0;
 -}
--EXPORT_SYMBOL(enable_hlt);
 -
--static int __init nohlt_setup(char *__unused)
+-static int null_rtc_set_time(const time_t secs)
 -{
--	hlt_counter = 1;
--	return 1;
+-	return 0;
 -}
--__setup("nohlt", nohlt_setup);
 -
--static int __init hlt_setup(char *__unused)
+-/*
+- * Null high precision timer functions for systems lacking one.
+- */
+-static cycle_t null_hpt_read(void)
 -{
--	hlt_counter = 0;
--	return 1;
+-	return 0;
 -}
--__setup("hlt", hlt_setup);
 -
--void default_idle(void)
--{
--	if (!hlt_counter) {
--		clear_thread_flag(TIF_POLLING_NRFLAG);
--		smp_mb__after_clear_bit();
--		set_bl_bit();
--		while (!need_resched())
--			cpu_sleep();
--		clear_bl_bit();
--		set_thread_flag(TIF_POLLING_NRFLAG);
--	} else
--		while (!need_resched())
--			cpu_relax();
--}
+-void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
+-int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
 -
--void cpu_idle(void)
+-#ifndef CONFIG_GENERIC_TIME
+-void do_gettimeofday(struct timeval *tv)
 -{
--	set_thread_flag(TIF_POLLING_NRFLAG);
--
--	/* endless idle loop with no priority at all */
--	while (1) {
--		void (*idle)(void) = pm_idle;
--
--		if (!idle)
--			idle = default_idle;
+-	unsigned long flags;
+-	unsigned long seq;
+-	unsigned long usec, sec;
 -
--		tick_nohz_stop_sched_tick();
--		while (!need_resched())
--			idle();
--		tick_nohz_restart_sched_tick();
+-	do {
+-		/*
+-		 * Turn off IRQs when grabbing xtime_lock, so that
+-		 * the sys_timer get_offset code doesn't have to handle it.
+-		 */
+-		seq = read_seqbegin_irqsave(&xtime_lock, flags);
+-		usec = get_timer_offset();
+-		sec = xtime.tv_sec;
+-		usec += xtime.tv_nsec / NSEC_PER_USEC;
+-	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
 -
--		preempt_enable_no_resched();
--		schedule();
--		preempt_disable();
--		check_pgt_cache();
+-	while (usec >= 1000000) {
+-		usec -= 1000000;
+-		sec++;
 -	}
--}
--
--void machine_restart(char * __unused)
--{
--	/* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
--	asm volatile("ldc %0, sr\n\t"
--		     "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
--}
--
--void machine_halt(void)
--{
--	local_irq_disable();
 -
--	while (1)
--		cpu_sleep();
--}
--
--void machine_power_off(void)
--{
--	if (pm_power_off)
--		pm_power_off();
+-	tv->tv_sec = sec;
+-	tv->tv_usec = usec;
 -}
+-EXPORT_SYMBOL(do_gettimeofday);
 -
--void show_regs(struct pt_regs * regs)
+-int do_settimeofday(struct timespec *tv)
 -{
--	printk("\n");
--	printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm);
--	print_symbol("PC is at %s\n", instruction_pointer(regs));
--	printk("PC  : %08lx SP  : %08lx SR  : %08lx ",
--	       regs->pc, regs->regs[15], regs->sr);
--#ifdef CONFIG_MMU
--	printk("TEA : %08x    ", ctrl_inl(MMU_TEA));
--#else
--	printk("                  ");
--#endif
--	printk("%s\n", print_tainted());
--
--	printk("R0  : %08lx R1  : %08lx R2  : %08lx R3  : %08lx\n",
--	       regs->regs[0],regs->regs[1],
--	       regs->regs[2],regs->regs[3]);
--	printk("R4  : %08lx R5  : %08lx R6  : %08lx R7  : %08lx\n",
--	       regs->regs[4],regs->regs[5],
--	       regs->regs[6],regs->regs[7]);
--	printk("R8  : %08lx R9  : %08lx R10 : %08lx R11 : %08lx\n",
--	       regs->regs[8],regs->regs[9],
--	       regs->regs[10],regs->regs[11]);
--	printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
--	       regs->regs[12],regs->regs[13],
--	       regs->regs[14]);
--	printk("MACH: %08lx MACL: %08lx GBR : %08lx PR  : %08lx\n",
--	       regs->mach, regs->macl, regs->gbr, regs->pr);
--
--	show_trace(NULL, (unsigned long *)regs->regs[15], regs);
--}
+-	time_t wtm_sec, sec = tv->tv_sec;
+-	long wtm_nsec, nsec = tv->tv_nsec;
 -
--/*
-- * Create a kernel thread
-- */
+-	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+-		return -EINVAL;
 -
--/*
-- * This is the mechanism for creating a new kernel thread.
-- *
-- */
--extern void kernel_thread_helper(void);
--__asm__(".align 5\n"
--	"kernel_thread_helper:\n\t"
--	"jsr	@r5\n\t"
--	" nop\n\t"
--	"mov.l	1f, r1\n\t"
--	"jsr	@r1\n\t"
--	" mov	r0, r4\n\t"
--	".align 2\n\t"
--	"1:.long do_exit");
+-	write_seqlock_irq(&xtime_lock);
+-	/*
+-	 * This is revolting. We need to set "xtime" correctly. However, the
+-	 * value in this location is the value at the most recent update of
+-	 * wall time.  Discover what correction gettimeofday() would have
+-	 * made, and then undo it!
+-	 */
+-	nsec -= get_timer_offset() * NSEC_PER_USEC;
 -
--/* Don't use this in BL=1(cli).  Or else, CPU resets! */
--int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
--{
--	struct pt_regs regs;
+-	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+-	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
 -
--	memset(&regs, 0, sizeof(regs));
--	regs.regs[4] = (unsigned long)arg;
--	regs.regs[5] = (unsigned long)fn;
+-	set_normalized_timespec(&xtime, sec, nsec);
+-	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
 -
--	regs.pc = (unsigned long)kernel_thread_helper;
--	regs.sr = (1 << 30);
+-	ntp_clear();
+-	write_sequnlock_irq(&xtime_lock);
+-	clock_was_set();
 -
--	/* Ok, create the new process.. */
--	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
--		       &regs, 0, NULL, NULL);
+-	return 0;
 -}
+-EXPORT_SYMBOL(do_settimeofday);
+-#endif /* !CONFIG_GENERIC_TIME */
+-
+-#ifndef CONFIG_GENERIC_CLOCKEVENTS
+-/* last time the RTC clock got updated */
+-static long last_rtc_update;
 -
 -/*
-- * Free current thread data structures etc..
+- * handle_timer_tick() needs to keep up the real-time clock,
+- * as well as call the "do_timer()" routine every clocktick
 - */
--void exit_thread(void)
--{
--	if (current->thread.ubc_pc) {
--		current->thread.ubc_pc = 0;
--		ubc_usercnt -= 1;
--	}
--}
--
--void flush_thread(void)
+-void handle_timer_tick(void)
 -{
--#if defined(CONFIG_SH_FPU)
--	struct task_struct *tsk = current;
--	/* Forget lazy FPU state */
--	clear_fpu(tsk, task_pt_regs(tsk));
--	clear_used_math();
+-	do_timer(1);
+-#ifndef CONFIG_SMP
+-	update_process_times(user_mode(get_irq_regs()));
 -#endif
--}
--
--void release_thread(struct task_struct *dead_task)
--{
--	/* do nothing */
--}
--
--/* Fill in the fpu structure for a core dump.. */
--int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
--{
--	int fpvalid = 0;
--
--#if defined(CONFIG_SH_FPU)
--	struct task_struct *tsk = current;
+-	if (current->pid)
+-		profile_tick(CPU_PROFILING);
 -
--	fpvalid = !!tsk_used_math(tsk);
--	if (fpvalid) {
--		unlazy_fpu(tsk, regs);
--		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
--	}
+-#ifdef CONFIG_HEARTBEAT
+-	if (sh_mv.mv_heartbeat != NULL)
+-		sh_mv.mv_heartbeat();
 -#endif
 -
--	return fpvalid;
--}
--
--/*
-- * Capture the user space registers if the task is not running (in user space)
-- */
--int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
--{
--	struct pt_regs ptregs;
--
--	ptregs = *task_pt_regs(tsk);
--	elf_core_copy_regs(regs, &ptregs);
--
--	return 1;
--}
--
--int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
--{
--	int fpvalid = 0;
--
--#if defined(CONFIG_SH_FPU)
--	fpvalid = !!tsk_used_math(tsk);
--	if (fpvalid) {
--		unlazy_fpu(tsk, task_pt_regs(tsk));
--		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+-	/*
+-	 * If we have an externally synchronized Linux clock, then update
+-	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+-	 * called as close as possible to 500 ms before the new second starts.
+-	 */
+-	if (ntp_synced() &&
+-	    xtime.tv_sec > last_rtc_update + 660 &&
+-	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
+-	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+-		if (rtc_sh_set_time(xtime.tv_sec) == 0)
+-			last_rtc_update = xtime.tv_sec;
+-		else
+-			/* do it again in 60s */
+-			last_rtc_update = xtime.tv_sec - 600;
 -	}
--#endif
--
--	return fpvalid;
 -}
+-#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 -
--asmlinkage void ret_from_fork(void);
--
--int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
--		unsigned long unused,
--		struct task_struct *p, struct pt_regs *regs)
+-#ifdef CONFIG_PM
+-int timer_suspend(struct sys_device *dev, pm_message_t state)
 -{
--	struct thread_info *ti = task_thread_info(p);
--	struct pt_regs *childregs;
--#if defined(CONFIG_SH_FPU)
--	struct task_struct *tsk = current;
--
--	unlazy_fpu(tsk, regs);
--	p->thread.fpu = tsk->thread.fpu;
--	copy_to_stopped_child_used_math(p);
--#endif
--
--	childregs = task_pt_regs(p);
--	*childregs = *regs;
--
--	if (user_mode(regs)) {
--		childregs->regs[15] = usp;
--		ti->addr_limit = USER_DS;
--	} else {
--		childregs->regs[15] = (unsigned long)childregs;
--		ti->addr_limit = KERNEL_DS;
--	}
--
--	if (clone_flags & CLONE_SETTLS)
--		childregs->gbr = childregs->regs[0];
--
--	childregs->regs[0] = 0; /* Set return value for child */
--
--	p->thread.sp = (unsigned long) childregs;
--	p->thread.pc = (unsigned long) ret_from_fork;
+-	struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
 -
--	p->thread.ubc_pc = 0;
+-	sys_timer->ops->stop();
 -
 -	return 0;
 -}
 -
--/* Tracing by user break controller.  */
--static void ubc_set_tracing(int asid, unsigned long pc)
+-int timer_resume(struct sys_device *dev)
 -{
--#if defined(CONFIG_CPU_SH4A)
--	unsigned long val;
--
--	val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
--	val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
--
--	ctrl_outl(val, UBC_CBR0);
--	ctrl_outl(pc,  UBC_CAR0);
--	ctrl_outl(0x0, UBC_CAMR0);
--	ctrl_outl(0x0, UBC_CBCR);
--
--	val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
--	ctrl_outl(val, UBC_CRR0);
--
--	/* Read UBC register that we wrote last, for checking update */
--	val = ctrl_inl(UBC_CRR0);
--
--#else	/* CONFIG_CPU_SH4A */
--	ctrl_outl(pc, UBC_BARA);
--
--#ifdef CONFIG_MMU
--	ctrl_outb(asid, UBC_BASRA);
--#endif
+-	struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
 -
--	ctrl_outl(0, UBC_BAMRA);
+-	sys_timer->ops->start();
 -
--	if (current_cpu_data.type == CPU_SH7729 ||
--	    current_cpu_data.type == CPU_SH7710 ||
--	    current_cpu_data.type == CPU_SH7712) {
--		ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
--		ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
--	} else {
--		ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
--		ctrl_outw(BRCR_PCBA, UBC_BRCR);
--	}
--#endif	/* CONFIG_CPU_SH4A */
+-	return 0;
 -}
--
--/*
-- *	switch_to(x,y) should switch tasks from x to y.
-- *
-- */
--struct task_struct *__switch_to(struct task_struct *prev,
--				struct task_struct *next)
--{
--#if defined(CONFIG_SH_FPU)
--	unlazy_fpu(prev, task_pt_regs(prev));
--#endif
--
--#if defined(CONFIG_GUSA) && defined(CONFIG_PREEMPT)
--	{
--		struct pt_regs *regs;
--
--		preempt_disable();
--		regs = task_pt_regs(prev);
--		if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
--			int offset = (int)regs->regs[15];
--
--			/* Reset stack pointer: clear critical region mark */
--			regs->regs[15] = regs->regs[1];
--			if (regs->pc < regs->regs[0])
--				/* Go to rewind point */
--				regs->pc = regs->regs[0] + offset;
--		}
--		preempt_enable_no_resched();
--	}
--#endif
--
--#ifdef CONFIG_MMU
--	/*
--	 * Restore the kernel mode register
--	 *	k7 (r7_bank1)
--	 */
--	asm volatile("ldc	%0, r7_bank"
--		     : /* no output */
--		     : "r" (task_thread_info(next)));
--#endif
--
--	/* If no tasks are using the UBC, we're done */
--	if (ubc_usercnt == 0)
--		/* If no tasks are using the UBC, we're done */;
--	else if (next->thread.ubc_pc && next->mm) {
--		int asid = 0;
--#ifdef CONFIG_MMU
--		asid |= cpu_asid(smp_processor_id(), next->mm);
--#endif
--		ubc_set_tracing(asid, next->thread.ubc_pc);
--	} else {
--#if defined(CONFIG_CPU_SH4A)
--		ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
--		ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
 -#else
--		ctrl_outw(0, UBC_BBRA);
--		ctrl_outw(0, UBC_BBRB);
+-#define timer_suspend NULL
+-#define timer_resume NULL
 -#endif
--	}
 -
--	return prev;
--}
+-static struct sysdev_class timer_sysclass = {
+-	set_kset_name("timer"),
+-	.suspend = timer_suspend,
+-	.resume	 = timer_resume,
+-};
 -
--asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
--			unsigned long r6, unsigned long r7,
--			struct pt_regs __regs)
+-static int __init timer_init_sysfs(void)
 -{
--#ifdef CONFIG_MMU
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
--#else
--	/* fork almost works, enough to trick you into looking elsewhere :-( */
--	return -EINVAL;
--#endif
--}
+-	int ret = sysdev_class_register(&timer_sysclass);
+-	if (ret != 0)
+-		return ret;
 -
--asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
--			 unsigned long parent_tidptr,
--			 unsigned long child_tidptr,
--			 struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	if (!newsp)
--		newsp = regs->regs[15];
--	return do_fork(clone_flags, newsp, regs, 0,
--			(int __user *)parent_tidptr,
--			(int __user *)child_tidptr);
+-	sys_timer->dev.cls = &timer_sysclass;
+-	return sysdev_register(&sys_timer->dev);
 -}
+-device_initcall(timer_init_sysfs);
 -
--/*
-- * This is trivial, and on the face of it looks like it
-- * could equally well be done in user mode.
-- *
-- * Not so, for quite unobvious reasons - register pressure.
-- * In user mode vfork() cannot have a stack frame, and if
-- * done by calling the "clone()" system call directly, you
-- * do not have enough call-clobbered registers to hold all
-- * the information you need.
-- */
--asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
--			 unsigned long r6, unsigned long r7,
--			 struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
--		       0, NULL, NULL);
--}
+-void (*board_time_init)(void);
 -
 -/*
-- * sys_execve() executes a new program.
+- * Shamelessly based on the MIPS and Sparc64 work.
 - */
--asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
--			  char __user * __user *uenvp, unsigned long r7,
--			  struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	int error;
--	char *filename;
+-static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
+-unsigned long sh_hpt_frequency = 0;
 -
--	filename = getname(ufilename);
--	error = PTR_ERR(filename);
--	if (IS_ERR(filename))
--		goto out;
+-#define NSEC_PER_CYC_SHIFT	10
 -
--	error = do_execve(filename, uargv, uenvp, regs);
--	if (error == 0) {
--		task_lock(current);
--		current->ptrace &= ~PT_DTRACE;
--		task_unlock(current);
--	}
--	putname(filename);
--out:
--	return error;
--}
+-struct clocksource clocksource_sh = {
+-	.name		= "SuperH",
+-	.rating		= 200,
+-	.mask		= CLOCKSOURCE_MASK(32),
+-	.read		= null_hpt_read,
+-	.shift		= 16,
+-	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+-};
 -
--unsigned long get_wchan(struct task_struct *p)
+-static void __init init_sh_clocksource(void)
 -{
--	unsigned long pc;
--
--	if (!p || p == current || p->state == TASK_RUNNING)
--		return 0;
--
--	/*
--	 * The same comment as on the Alpha applies here, too ...
--	 */
--	pc = thread_saved_pc(p);
+-	if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
+-		return;
 -
--#ifdef CONFIG_FRAME_POINTER
--	if (in_sched_functions(pc)) {
--		unsigned long schedule_frame = (unsigned long)p->thread.sp;
--		return ((unsigned long *)schedule_frame)[21];
--	}
--#endif
+-	clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
+-						  clocksource_sh.shift);
 -
--	return pc;
--}
+-	timer_ticks_per_nsec_quotient =
+-		clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
 -
--asmlinkage void break_point_trap(void)
--{
--	/* Clear tracing.  */
--#if defined(CONFIG_CPU_SH4A)
--	ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
--	ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
--#else
--	ctrl_outw(0, UBC_BBRA);
--	ctrl_outw(0, UBC_BBRB);
--#endif
--	current->thread.ubc_pc = 0;
--	ubc_usercnt -= 1;
+-	clocksource_register(&clocksource_sh);
+-}
 -
--	force_sig(SIGTRAP, current);
+-#ifdef CONFIG_GENERIC_TIME
+-unsigned long long sched_clock(void)
+-{
+-	unsigned long long ticks = clocksource_sh.read();
+-	return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
 -}
+-#endif
 -
--/*
-- * Generic trap handler.
-- */
--asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
--				   unsigned long r6, unsigned long r7,
--				   struct pt_regs __regs)
+-void __init time_init(void)
 -{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	if (board_time_init)
+-		board_time_init();
 -
--	/* Rewind */
--	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+-	clk_init();
 -
--	if (notify_die(DIE_TRAP, "debug trap", regs, 0, regs->tra & 0xff,
--		       SIGTRAP) == NOTIFY_STOP)
--		return;
+-	rtc_sh_get_time(&xtime);
+-	set_normalized_timespec(&wall_to_monotonic,
+-				-xtime.tv_sec, -xtime.tv_nsec);
 -
--	force_sig(SIGTRAP, current);
--}
+-	/*
+-	 * Find the timer to use as the system timer, it will be
+-	 * initialized for us.
+-	 */
+-	sys_timer = get_sys_timer();
+-	printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
 -
--/*
-- * Special handler for BUG() traps.
-- */
--asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
--				 unsigned long r6, unsigned long r7,
--				 struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	if (sys_timer->ops->read)
+-		clocksource_sh.read = sys_timer->ops->read;
 -
--	/* Rewind */
--	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+-	init_sh_clocksource();
 -
--	if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
--		       SIGTRAP) == NOTIFY_STOP)
--		return;
+-	if (sh_hpt_frequency)
+-		printk("Using %lu.%03lu MHz high precision timer.\n",
+-		       ((sh_hpt_frequency + 500) / 1000) / 1000,
+-		       ((sh_hpt_frequency + 500) / 1000) % 1000);
 -
--#ifdef CONFIG_BUG
--	if (__kernel_text_address(instruction_pointer(regs))) {
--		u16 insn = *(u16 *)instruction_pointer(regs);
--		if (insn == TRAPA_BUG_OPCODE)
--			handle_BUG(regs);
--	}
+-#if defined(CONFIG_SH_KGDB)
+-	/*
+-	 * Set up kgdb as requested. We do it here because the serial
+-	 * init uses the timer vars we just set up for figuring baud.
+-	 */
+-	kgdb_init();
 -#endif
--
--	force_sig(SIGTRAP, current);
 -}
-diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
+diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c
 new file mode 100644
-index 0000000..9ab1926
+index 0000000..2bc04bf
 --- /dev/null
-+++ b/arch/sh/kernel/process_32.c
-@@ -0,0 +1,465 @@
++++ b/arch/sh/kernel/time_32.c
+@@ -0,0 +1,269 @@
 +/*
-+ * arch/sh/kernel/process.c
-+ *
-+ * This file handles the architecture-dependent parts of process handling..
++ *  arch/sh/kernel/time.c
 + *
-+ *  Copyright (C) 1995  Linus Torvalds
++ *  Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
++ *  Copyright (C) 2000  Philipp Rumpf <prumpf at tux.org>
++ *  Copyright (C) 2002 - 2007  Paul Mundt
++ *  Copyright (C) 2002  M. R. Brown  <mrbrown at linux-sh.org>
 + *
-+ *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
-+ *		     Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
-+ *		     Copyright (C) 2002 - 2007  Paul Mundt
++ *  Some code taken from i386 version.
++ *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
 + */
++#include <linux/kernel.h>
 +#include <linux/module.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/pm.h>
-+#include <linux/kallsyms.h>
-+#include <linux/kexec.h>
-+#include <linux/kdebug.h>
-+#include <linux/tick.h>
-+#include <linux/reboot.h>
-+#include <linux/fs.h>
-+#include <linux/preempt.h>
-+#include <asm/uaccess.h>
-+#include <asm/mmu_context.h>
-+#include <asm/pgalloc.h>
-+#include <asm/system.h>
-+#include <asm/ubc.h>
-+
-+static int hlt_counter;
-+int ubc_usercnt = 0;
++#include <linux/init.h>
++#include <linux/profile.h>
++#include <linux/timex.h>
++#include <linux/sched.h>
++#include <linux/clockchips.h>
++#include <asm/clock.h>
++#include <asm/rtc.h>
++#include <asm/timer.h>
++#include <asm/kgdb.h>
 +
-+void (*pm_idle)(void);
-+void (*pm_power_off)(void);
-+EXPORT_SYMBOL(pm_power_off);
++struct sys_timer *sys_timer;
 +
-+void disable_hlt(void)
-+{
-+	hlt_counter++;
-+}
-+EXPORT_SYMBOL(disable_hlt);
++/* Move this somewhere more sensible.. */
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
 +
-+void enable_hlt(void)
++/* Dummy RTC ops */
++static void null_rtc_get_time(struct timespec *tv)
 +{
-+	hlt_counter--;
++	tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
++	tv->tv_nsec = 0;
 +}
-+EXPORT_SYMBOL(enable_hlt);
 +
-+static int __init nohlt_setup(char *__unused)
++static int null_rtc_set_time(const time_t secs)
 +{
-+	hlt_counter = 1;
-+	return 1;
++	return 0;
 +}
-+__setup("nohlt", nohlt_setup);
 +
-+static int __init hlt_setup(char *__unused)
++/*
++ * Null high precision timer functions for systems lacking one.
++ */
++static cycle_t null_hpt_read(void)
 +{
-+	hlt_counter = 0;
-+	return 1;
++	return 0;
 +}
-+__setup("hlt", hlt_setup);
 +
-+void default_idle(void)
-+{
-+	if (!hlt_counter) {
-+		clear_thread_flag(TIF_POLLING_NRFLAG);
-+		smp_mb__after_clear_bit();
-+		set_bl_bit();
-+		while (!need_resched())
-+			cpu_sleep();
-+		clear_bl_bit();
-+		set_thread_flag(TIF_POLLING_NRFLAG);
-+	} else
-+		while (!need_resched())
-+			cpu_relax();
-+}
++void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
++int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
 +
-+void cpu_idle(void)
++#ifndef CONFIG_GENERIC_TIME
++void do_gettimeofday(struct timeval *tv)
 +{
-+	set_thread_flag(TIF_POLLING_NRFLAG);
-+
-+	/* endless idle loop with no priority at all */
-+	while (1) {
-+		void (*idle)(void) = pm_idle;
-+
-+		if (!idle)
-+			idle = default_idle;
++	unsigned long flags;
++	unsigned long seq;
++	unsigned long usec, sec;
 +
-+		tick_nohz_stop_sched_tick();
-+		while (!need_resched())
-+			idle();
-+		tick_nohz_restart_sched_tick();
++	do {
++		/*
++		 * Turn off IRQs when grabbing xtime_lock, so that
++		 * the sys_timer get_offset code doesn't have to handle it.
++		 */
++		seq = read_seqbegin_irqsave(&xtime_lock, flags);
++		usec = get_timer_offset();
++		sec = xtime.tv_sec;
++		usec += xtime.tv_nsec / NSEC_PER_USEC;
++	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
 +
-+		preempt_enable_no_resched();
-+		schedule();
-+		preempt_disable();
-+		check_pgt_cache();
++	while (usec >= 1000000) {
++		usec -= 1000000;
++		sec++;
 +	}
-+}
-+
-+void machine_restart(char * __unused)
-+{
-+	/* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
-+	asm volatile("ldc %0, sr\n\t"
-+		     "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
-+}
-+
-+void machine_halt(void)
-+{
-+	local_irq_disable();
-+
-+	while (1)
-+		cpu_sleep();
-+}
 +
-+void machine_power_off(void)
-+{
-+	if (pm_power_off)
-+		pm_power_off();
++	tv->tv_sec = sec;
++	tv->tv_usec = usec;
 +}
++EXPORT_SYMBOL(do_gettimeofday);
 +
-+void show_regs(struct pt_regs * regs)
++int do_settimeofday(struct timespec *tv)
 +{
-+	printk("\n");
-+	printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm);
-+	print_symbol("PC is at %s\n", instruction_pointer(regs));
-+	printk("PC  : %08lx SP  : %08lx SR  : %08lx ",
-+	       regs->pc, regs->regs[15], regs->sr);
-+#ifdef CONFIG_MMU
-+	printk("TEA : %08x    ", ctrl_inl(MMU_TEA));
-+#else
-+	printk("                  ");
-+#endif
-+	printk("%s\n", print_tainted());
-+
-+	printk("R0  : %08lx R1  : %08lx R2  : %08lx R3  : %08lx\n",
-+	       regs->regs[0],regs->regs[1],
-+	       regs->regs[2],regs->regs[3]);
-+	printk("R4  : %08lx R5  : %08lx R6  : %08lx R7  : %08lx\n",
-+	       regs->regs[4],regs->regs[5],
-+	       regs->regs[6],regs->regs[7]);
-+	printk("R8  : %08lx R9  : %08lx R10 : %08lx R11 : %08lx\n",
-+	       regs->regs[8],regs->regs[9],
-+	       regs->regs[10],regs->regs[11]);
-+	printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
-+	       regs->regs[12],regs->regs[13],
-+	       regs->regs[14]);
-+	printk("MACH: %08lx MACL: %08lx GBR : %08lx PR  : %08lx\n",
-+	       regs->mach, regs->macl, regs->gbr, regs->pr);
-+
-+	show_trace(NULL, (unsigned long *)regs->regs[15], regs);
-+}
++	time_t wtm_sec, sec = tv->tv_sec;
++	long wtm_nsec, nsec = tv->tv_nsec;
 +
-+/*
-+ * Create a kernel thread
-+ */
++	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++		return -EINVAL;
 +
-+/*
-+ * This is the mechanism for creating a new kernel thread.
-+ *
-+ */
-+extern void kernel_thread_helper(void);
-+__asm__(".align 5\n"
-+	"kernel_thread_helper:\n\t"
-+	"jsr	@r5\n\t"
-+	" nop\n\t"
-+	"mov.l	1f, r1\n\t"
-+	"jsr	@r1\n\t"
-+	" mov	r0, r4\n\t"
-+	".align 2\n\t"
-+	"1:.long do_exit");
++	write_seqlock_irq(&xtime_lock);
++	/*
++	 * This is revolting. We need to set "xtime" correctly. However, the
++	 * value in this location is the value at the most recent update of
++	 * wall time.  Discover what correction gettimeofday() would have
++	 * made, and then undo it!
++	 */
++	nsec -= get_timer_offset() * NSEC_PER_USEC;
 +
-+/* Don't use this in BL=1(cli).  Or else, CPU resets! */
-+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-+{
-+	struct pt_regs regs;
++	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
++	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
 +
-+	memset(&regs, 0, sizeof(regs));
-+	regs.regs[4] = (unsigned long)arg;
-+	regs.regs[5] = (unsigned long)fn;
++	set_normalized_timespec(&xtime, sec, nsec);
++	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
 +
-+	regs.pc = (unsigned long)kernel_thread_helper;
-+	regs.sr = (1 << 30);
++	ntp_clear();
++	write_sequnlock_irq(&xtime_lock);
++	clock_was_set();
 +
-+	/* Ok, create the new process.. */
-+	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
-+		       &regs, 0, NULL, NULL);
++	return 0;
 +}
++EXPORT_SYMBOL(do_settimeofday);
++#endif /* !CONFIG_GENERIC_TIME */
++
++#ifndef CONFIG_GENERIC_CLOCKEVENTS
++/* last time the RTC clock got updated */
++static long last_rtc_update;
 +
 +/*
-+ * Free current thread data structures etc..
++ * handle_timer_tick() needs to keep up the real-time clock,
++ * as well as call the "do_timer()" routine every clocktick
 + */
-+void exit_thread(void)
++void handle_timer_tick(void)
 +{
-+	if (current->thread.ubc_pc) {
-+		current->thread.ubc_pc = 0;
-+		ubc_usercnt -= 1;
-+	}
-+}
++	do_timer(1);
++#ifndef CONFIG_SMP
++	update_process_times(user_mode(get_irq_regs()));
++#endif
++	if (current->pid)
++		profile_tick(CPU_PROFILING);
 +
-+void flush_thread(void)
-+{
-+#if defined(CONFIG_SH_FPU)
-+	struct task_struct *tsk = current;
-+	/* Forget lazy FPU state */
-+	clear_fpu(tsk, task_pt_regs(tsk));
-+	clear_used_math();
++#ifdef CONFIG_HEARTBEAT
++	if (sh_mv.mv_heartbeat != NULL)
++		sh_mv.mv_heartbeat();
 +#endif
-+}
 +
-+void release_thread(struct task_struct *dead_task)
-+{
-+	/* do nothing */
++	/*
++	 * If we have an externally synchronized Linux clock, then update
++	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++	 * called as close as possible to 500 ms before the new second starts.
++	 */
++	if (ntp_synced() &&
++	    xtime.tv_sec > last_rtc_update + 660 &&
++	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
++	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
++		if (rtc_sh_set_time(xtime.tv_sec) == 0)
++			last_rtc_update = xtime.tv_sec;
++		else
++			/* do it again in 60s */
++			last_rtc_update = xtime.tv_sec - 600;
++	}
 +}
++#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 +
-+/* Fill in the fpu structure for a core dump.. */
-+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
++#ifdef CONFIG_PM
++int timer_suspend(struct sys_device *dev, pm_message_t state)
 +{
-+	int fpvalid = 0;
-+
-+#if defined(CONFIG_SH_FPU)
-+	struct task_struct *tsk = current;
++	struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
 +
-+	fpvalid = !!tsk_used_math(tsk);
-+	if (fpvalid) {
-+		unlazy_fpu(tsk, regs);
-+		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
-+	}
-+#endif
++	sys_timer->ops->stop();
 +
-+	return fpvalid;
++	return 0;
 +}
 +
-+asmlinkage void ret_from_fork(void);
-+
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
-+		unsigned long unused,
-+		struct task_struct *p, struct pt_regs *regs)
++int timer_resume(struct sys_device *dev)
 +{
-+	struct thread_info *ti = task_thread_info(p);
-+	struct pt_regs *childregs;
-+#if defined(CONFIG_SH_FPU)
-+	struct task_struct *tsk = current;
-+
-+	unlazy_fpu(tsk, regs);
-+	p->thread.fpu = tsk->thread.fpu;
-+	copy_to_stopped_child_used_math(p);
-+#endif
-+
-+	childregs = task_pt_regs(p);
-+	*childregs = *regs;
-+
-+	if (user_mode(regs)) {
-+		childregs->regs[15] = usp;
-+		ti->addr_limit = USER_DS;
-+	} else {
-+		childregs->regs[15] = (unsigned long)childregs;
-+		ti->addr_limit = KERNEL_DS;
-+	}
-+
-+	if (clone_flags & CLONE_SETTLS)
-+		childregs->gbr = childregs->regs[0];
-+
-+	childregs->regs[0] = 0; /* Set return value for child */
-+
-+	p->thread.sp = (unsigned long) childregs;
-+	p->thread.pc = (unsigned long) ret_from_fork;
++	struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
 +
-+	p->thread.ubc_pc = 0;
++	sys_timer->ops->start();
 +
 +	return 0;
 +}
++#else
++#define timer_suspend NULL
++#define timer_resume NULL
++#endif
 +
-+/* Tracing by user break controller.  */
-+static void ubc_set_tracing(int asid, unsigned long pc)
-+{
-+#if defined(CONFIG_CPU_SH4A)
-+	unsigned long val;
-+
-+	val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
-+	val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
-+
-+	ctrl_outl(val, UBC_CBR0);
-+	ctrl_outl(pc,  UBC_CAR0);
-+	ctrl_outl(0x0, UBC_CAMR0);
-+	ctrl_outl(0x0, UBC_CBCR);
++static struct sysdev_class timer_sysclass = {
++	.name	 = "timer",
++	.suspend = timer_suspend,
++	.resume	 = timer_resume,
++};
 +
-+	val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
-+	ctrl_outl(val, UBC_CRR0);
++static int __init timer_init_sysfs(void)
++{
++	int ret = sysdev_class_register(&timer_sysclass);
++	if (ret != 0)
++		return ret;
 +
-+	/* Read UBC register that we wrote last, for checking update */
-+	val = ctrl_inl(UBC_CRR0);
++	sys_timer->dev.cls = &timer_sysclass;
++	return sysdev_register(&sys_timer->dev);
++}
++device_initcall(timer_init_sysfs);
 +
-+#else	/* CONFIG_CPU_SH4A */
-+	ctrl_outl(pc, UBC_BARA);
++void (*board_time_init)(void);
 +
-+#ifdef CONFIG_MMU
-+	ctrl_outb(asid, UBC_BASRA);
-+#endif
++/*
++ * Shamelessly based on the MIPS and Sparc64 work.
++ */
++static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
++unsigned long sh_hpt_frequency = 0;
 +
-+	ctrl_outl(0, UBC_BAMRA);
++#define NSEC_PER_CYC_SHIFT	10
 +
-+	if (current_cpu_data.type == CPU_SH7729 ||
-+	    current_cpu_data.type == CPU_SH7710 ||
-+	    current_cpu_data.type == CPU_SH7712) {
-+		ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
-+		ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
-+	} else {
-+		ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
-+		ctrl_outw(BRCR_PCBA, UBC_BRCR);
-+	}
-+#endif	/* CONFIG_CPU_SH4A */
-+}
++struct clocksource clocksource_sh = {
++	.name		= "SuperH",
++	.rating		= 200,
++	.mask		= CLOCKSOURCE_MASK(32),
++	.read		= null_hpt_read,
++	.shift		= 16,
++	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
++};
 +
-+/*
-+ *	switch_to(x,y) should switch tasks from x to y.
-+ *
-+ */
-+struct task_struct *__switch_to(struct task_struct *prev,
-+				struct task_struct *next)
++static void __init init_sh_clocksource(void)
 +{
-+#if defined(CONFIG_SH_FPU)
-+	unlazy_fpu(prev, task_pt_regs(prev));
-+#endif
++	if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
++		return;
 +
-+#ifdef CONFIG_MMU
-+	/*
-+	 * Restore the kernel mode register
-+	 *	k7 (r7_bank1)
-+	 */
-+	asm volatile("ldc	%0, r7_bank"
-+		     : /* no output */
-+		     : "r" (task_thread_info(next)));
-+#endif
++	clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
++						  clocksource_sh.shift);
 +
-+	/* If no tasks are using the UBC, we're done */
-+	if (ubc_usercnt == 0)
-+		/* If no tasks are using the UBC, we're done */;
-+	else if (next->thread.ubc_pc && next->mm) {
-+		int asid = 0;
-+#ifdef CONFIG_MMU
-+		asid |= cpu_asid(smp_processor_id(), next->mm);
-+#endif
-+		ubc_set_tracing(asid, next->thread.ubc_pc);
-+	} else {
-+#if defined(CONFIG_CPU_SH4A)
-+		ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
-+		ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
-+#else
-+		ctrl_outw(0, UBC_BBRA);
-+		ctrl_outw(0, UBC_BBRB);
-+#endif
-+	}
++	timer_ticks_per_nsec_quotient =
++		clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
 +
-+	return prev;
++	clocksource_register(&clocksource_sh);
 +}
 +
-+asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
-+			unsigned long r6, unsigned long r7,
-+			struct pt_regs __regs)
++#ifdef CONFIG_GENERIC_TIME
++unsigned long long sched_clock(void)
 +{
-+#ifdef CONFIG_MMU
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
-+#else
-+	/* fork almost works, enough to trick you into looking elsewhere :-( */
-+	return -EINVAL;
-+#endif
++	unsigned long long ticks = clocksource_sh.read();
++	return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
 +}
++#endif
 +
-+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
-+			 unsigned long parent_tidptr,
-+			 unsigned long child_tidptr,
-+			 struct pt_regs __regs)
++void __init time_init(void)
 +{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	if (!newsp)
-+		newsp = regs->regs[15];
-+	return do_fork(clone_flags, newsp, regs, 0,
-+			(int __user *)parent_tidptr,
-+			(int __user *)child_tidptr);
-+}
++	if (board_time_init)
++		board_time_init();
 +
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
-+			 unsigned long r6, unsigned long r7,
-+			 struct pt_regs __regs)
-+{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
-+		       0, NULL, NULL);
-+}
++	clk_init();
 +
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
-+			  char __user * __user *uenvp, unsigned long r7,
-+			  struct pt_regs __regs)
-+{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	int error;
-+	char *filename;
++	rtc_sh_get_time(&xtime);
++	set_normalized_timespec(&wall_to_monotonic,
++				-xtime.tv_sec, -xtime.tv_nsec);
 +
-+	filename = getname(ufilename);
-+	error = PTR_ERR(filename);
-+	if (IS_ERR(filename))
-+		goto out;
++	/*
++	 * Find the timer to use as the system timer, it will be
++	 * initialized for us.
++	 */
++	sys_timer = get_sys_timer();
++	printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
 +
-+	error = do_execve(filename, uargv, uenvp, regs);
-+	if (error == 0) {
-+		task_lock(current);
-+		current->ptrace &= ~PT_DTRACE;
-+		task_unlock(current);
-+	}
-+	putname(filename);
-+out:
-+	return error;
-+}
++	if (sys_timer->ops->read)
++		clocksource_sh.read = sys_timer->ops->read;
 +
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+	unsigned long pc;
++	init_sh_clocksource();
 +
-+	if (!p || p == current || p->state == TASK_RUNNING)
-+		return 0;
++	if (sh_hpt_frequency)
++		printk("Using %lu.%03lu MHz high precision timer.\n",
++		       ((sh_hpt_frequency + 500) / 1000) / 1000,
++		       ((sh_hpt_frequency + 500) / 1000) % 1000);
 +
++#if defined(CONFIG_SH_KGDB)
 +	/*
-+	 * The same comment as on the Alpha applies here, too ...
++	 * Set up kgdb as requested. We do it here because the serial
++	 * init uses the timer vars we just set up for figuring baud.
 +	 */
-+	pc = thread_saved_pc(p);
-+
-+#ifdef CONFIG_FRAME_POINTER
-+	if (in_sched_functions(pc)) {
-+		unsigned long schedule_frame = (unsigned long)p->thread.sp;
-+		return ((unsigned long *)schedule_frame)[21];
-+	}
-+#endif
-+
-+	return pc;
-+}
-+
-+asmlinkage void break_point_trap(void)
-+{
-+	/* Clear tracing.  */
-+#if defined(CONFIG_CPU_SH4A)
-+	ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
-+	ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
-+#else
-+	ctrl_outw(0, UBC_BBRA);
-+	ctrl_outw(0, UBC_BBRB);
++	kgdb_init();
 +#endif
-+	current->thread.ubc_pc = 0;
-+	ubc_usercnt -= 1;
-+
-+	force_sig(SIGTRAP, current);
 +}
-diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
+diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c
 new file mode 100644
-index 0000000..cff3b7d
+index 0000000..f819ba3
 --- /dev/null
-+++ b/arch/sh/kernel/process_64.c
-@@ -0,0 +1,701 @@
++++ b/arch/sh/kernel/time_64.c
+@@ -0,0 +1,519 @@
 +/*
-+ * arch/sh/kernel/process_64.c
-+ *
-+ * This file handles the architecture-dependent parts of process handling..
++ * arch/sh/kernel/time_64.c
 + *
 + * Copyright (C) 2000, 2001  Paolo Alberelli
 + * Copyright (C) 2003 - 2007  Paul Mundt
-+ * Copyright (C) 2003, 2004 Richard Curnow
-+ *
-+ * Started from SH3/4 version:
-+ *   Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
++ * Copyright (C) 2003  Richard Curnow
 + *
-+ *   In turn started from i386 version:
-+ *     Copyright (C) 1995  Linus Torvalds
++ *    Original TMU/RTC code taken from sh version.
++ *    Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
++ *      Some code taken from i386 version.
++ *      Copyright (C) 1991, 1992, 1995  Linus Torvalds
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + */
++#include <linux/errno.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
 +#include <linux/mm.h>
-+#include <linux/fs.h>
-+#include <linux/ptrace.h>
-+#include <linux/reboot.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
 +#include <linux/init.h>
++#include <linux/profile.h>
++#include <linux/smp.h>
 +#include <linux/module.h>
-+#include <linux/proc_fs.h>
++#include <linux/bcd.h>
++#include <linux/timex.h>
++#include <linux/irq.h>
 +#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <asm/cpu/registers.h>	 /* required by inline __asm__ stmt. */
++#include <asm/cpu/irq.h>
++#include <asm/addrspace.h>
++#include <asm/processor.h>
 +#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/mmu_context.h>
-+
-+struct task_struct *last_task_used_math = NULL;
++#include <asm/delay.h>
 +
-+static int hlt_counter = 1;
++#define TMU_TOCR_INIT	0x00
++#define TMU0_TCR_INIT	0x0020
++#define TMU_TSTR_INIT	1
++#define TMU_TSTR_OFF	0
 +
-+#define HARD_IDLE_TIMEOUT (HZ / 3)
++/* Real Time Clock */
++#define	RTC_BLOCK_OFF	0x01040000
++#define RTC_BASE	PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
++#define RTC_RCR1_CIE	0x10	/* Carry Interrupt Enable */
++#define RTC_RCR1	(rtc_base + 0x38)
 +
-+void disable_hlt(void)
-+{
-+	hlt_counter++;
-+}
++/* Clock, Power and Reset Controller */
++#define	CPRC_BLOCK_OFF	0x01010000
++#define CPRC_BASE	PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
 +
-+void enable_hlt(void)
-+{
-+	hlt_counter--;
-+}
++#define FRQCR		(cprc_base+0x0)
++#define WTCSR		(cprc_base+0x0018)
++#define STBCR		(cprc_base+0x0030)
 +
-+static int __init nohlt_setup(char *__unused)
-+{
-+	hlt_counter = 1;
-+	return 1;
-+}
++/* Time Management Unit */
++#define	TMU_BLOCK_OFF	0x01020000
++#define TMU_BASE	PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
++#define TMU0_BASE	tmu_base + 0x8 + (0xc * 0x0)
++#define TMU1_BASE	tmu_base + 0x8 + (0xc * 0x1)
++#define TMU2_BASE	tmu_base + 0x8 + (0xc * 0x2)
 +
-+static int __init hlt_setup(char *__unused)
-+{
-+	hlt_counter = 0;
-+	return 1;
-+}
++#define TMU_TOCR	tmu_base+0x0	/* Byte access */
++#define TMU_TSTR	tmu_base+0x4	/* Byte access */
 +
-+__setup("nohlt", nohlt_setup);
-+__setup("hlt", hlt_setup);
++#define TMU0_TCOR	TMU0_BASE+0x0	/* Long access */
++#define TMU0_TCNT	TMU0_BASE+0x4	/* Long access */
++#define TMU0_TCR	TMU0_BASE+0x8	/* Word access */
 +
-+static inline void hlt(void)
-+{
-+	__asm__ __volatile__ ("sleep" : : : "memory");
-+}
++#define TICK_SIZE (tick_nsec / 1000)
 +
-+/*
-+ * The idle loop on a uniprocessor SH..
-+ */
-+void cpu_idle(void)
-+{
-+	/* endless idle loop with no priority at all */
-+	while (1) {
-+		if (hlt_counter) {
-+			while (!need_resched())
-+				cpu_relax();
-+		} else {
-+			local_irq_disable();
-+			while (!need_resched()) {
-+				local_irq_enable();
-+				hlt();
-+				local_irq_disable();
-+			}
-+			local_irq_enable();
-+		}
-+		preempt_enable_no_resched();
-+		schedule();
-+		preempt_disable();
-+	}
++static unsigned long tmu_base, rtc_base;
++unsigned long cprc_base;
 +
-+}
++/* Variables to allow interpolation of time of day to resolution better than a
++ * jiffy. */
 +
-+void machine_restart(char * __unused)
-+{
-+	extern void phys_stext(void);
++/* This is effectively protected by xtime_lock */
++static unsigned long ctc_last_interrupt;
++static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
 +
-+	phys_stext();
-+}
++#define CTC_JIFFY_SCALE_SHIFT 40
 +
-+void machine_halt(void)
-+{
-+	for (;;);
-+}
++/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
++static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
 +
-+void machine_power_off(void)
-+{
-+#if 0
-+	/* Disable watchdog timer */
-+	ctrl_outl(0xa5000000, WTCSR);
-+	/* Configure deep standby on sleep */
-+	ctrl_outl(0x03, STBCR);
-+#endif
++/* Estimate number of microseconds that have elapsed since the last timer tick,
++   by scaling the delta that has occurred in the CTC register.
 +
-+	__asm__ __volatile__ (
-+		"sleep\n\t"
-+		"synci\n\t"
-+		"nop;nop;nop;nop\n\t"
-+	);
++   WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
++   the CPU clock rate.  If the CPU sleeps, the CTC stops counting.  Bear this
++   in mind if enabling SLEEP_WORKS in process.c.  In that case, this algorithm
++   probably needs to use TMU.TCNT0 instead.  This will work even if the CPU is
++   sleeping, though will be coarser.
 +
-+	panic("Unexpected wakeup!\n");
-+}
++   FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
++   is running or if the freq or tick arguments of adjtimex are modified after
++   we have calibrated the scaling factor?  This will result in either a jump at
++   the end of a tick period, or a wrap backwards at the start of the next one,
++   if the application is reading the time of day often enough.  I think we
++   ought to do better than this.  For this reason, usecs_per_jiffy is left
++   separated out in the calculation below.  This allows some future hook into
++   the adjtime-related stuff in kernel/timer.c to remove this hazard.
 +
-+void (*pm_power_off)(void) = machine_power_off;
-+EXPORT_SYMBOL(pm_power_off);
++*/
 +
-+void show_regs(struct pt_regs * regs)
++static unsigned long usecs_since_tick(void)
 +{
-+	unsigned long long ah, al, bh, bl, ch, cl;
-+
-+	printk("\n");
-+
-+	ah = (regs->pc) >> 32;
-+	al = (regs->pc) & 0xffffffff;
-+	bh = (regs->regs[18]) >> 32;
-+	bl = (regs->regs[18]) & 0xffffffff;
-+	ch = (regs->regs[15]) >> 32;
-+	cl = (regs->regs[15]) & 0xffffffff;
-+	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->sr) >> 32;
-+	al = (regs->sr) & 0xffffffff;
-+        asm volatile ("getcon   " __TEA ", %0" : "=r" (bh));
-+        asm volatile ("getcon   " __TEA ", %0" : "=r" (bl));
-+	bh = (bh) >> 32;
-+	bl = (bl) & 0xffffffff;
-+        asm volatile ("getcon   " __KCR0 ", %0" : "=r" (ch));
-+        asm volatile ("getcon   " __KCR0 ", %0" : "=r" (cl));
-+	ch = (ch) >> 32;
-+	cl = (cl) & 0xffffffff;
-+	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[0]) >> 32;
-+	al = (regs->regs[0]) & 0xffffffff;
-+	bh = (regs->regs[1]) >> 32;
-+	bl = (regs->regs[1]) & 0xffffffff;
-+	ch = (regs->regs[2]) >> 32;
-+	cl = (regs->regs[2]) & 0xffffffff;
-+	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[3]) >> 32;
-+	al = (regs->regs[3]) & 0xffffffff;
-+	bh = (regs->regs[4]) >> 32;
-+	bl = (regs->regs[4]) & 0xffffffff;
-+	ch = (regs->regs[5]) >> 32;
-+	cl = (regs->regs[5]) & 0xffffffff;
-+	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[6]) >> 32;
-+	al = (regs->regs[6]) & 0xffffffff;
-+	bh = (regs->regs[7]) >> 32;
-+	bl = (regs->regs[7]) & 0xffffffff;
-+	ch = (regs->regs[8]) >> 32;
-+	cl = (regs->regs[8]) & 0xffffffff;
-+	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[9]) >> 32;
-+	al = (regs->regs[9]) & 0xffffffff;
-+	bh = (regs->regs[10]) >> 32;
-+	bl = (regs->regs[10]) & 0xffffffff;
-+	ch = (regs->regs[11]) >> 32;
-+	cl = (regs->regs[11]) & 0xffffffff;
-+	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[12]) >> 32;
-+	al = (regs->regs[12]) & 0xffffffff;
-+	bh = (regs->regs[13]) >> 32;
-+	bl = (regs->regs[13]) & 0xffffffff;
-+	ch = (regs->regs[14]) >> 32;
-+	cl = (regs->regs[14]) & 0xffffffff;
-+	printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[16]) >> 32;
-+	al = (regs->regs[16]) & 0xffffffff;
-+	bh = (regs->regs[17]) >> 32;
-+	bl = (regs->regs[17]) & 0xffffffff;
-+	ch = (regs->regs[19]) >> 32;
-+	cl = (regs->regs[19]) & 0xffffffff;
-+	printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[20]) >> 32;
-+	al = (regs->regs[20]) & 0xffffffff;
-+	bh = (regs->regs[21]) >> 32;
-+	bl = (regs->regs[21]) & 0xffffffff;
-+	ch = (regs->regs[22]) >> 32;
-+	cl = (regs->regs[22]) & 0xffffffff;
-+	printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[23]) >> 32;
-+	al = (regs->regs[23]) & 0xffffffff;
-+	bh = (regs->regs[24]) >> 32;
-+	bl = (regs->regs[24]) & 0xffffffff;
-+	ch = (regs->regs[25]) >> 32;
-+	cl = (regs->regs[25]) & 0xffffffff;
-+	printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+
-+	ah = (regs->regs[26]) >> 32;
-+	al = (regs->regs[26]) & 0xffffffff;
-+	bh = (regs->regs[27]) >> 32;
-+	bl = (regs->regs[27]) & 0xffffffff;
-+	ch = (regs->regs[28]) >> 32;
-+	cl = (regs->regs[28]) & 0xffffffff;
-+	printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	unsigned long long current_ctc;
++	long ctc_ticks_since_interrupt;
++	unsigned long long ull_ctc_ticks_since_interrupt;
++	unsigned long result;
 +
-+	ah = (regs->regs[29]) >> 32;
-+	al = (regs->regs[29]) & 0xffffffff;
-+	bh = (regs->regs[30]) >> 32;
-+	bl = (regs->regs[30]) & 0xffffffff;
-+	ch = (regs->regs[31]) >> 32;
-+	cl = (regs->regs[31]) & 0xffffffff;
-+	printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	unsigned long long mul1_out;
++	unsigned long long mul1_out_high;
++	unsigned long long mul2_out_low, mul2_out_high;
 +
-+	ah = (regs->regs[32]) >> 32;
-+	al = (regs->regs[32]) & 0xffffffff;
-+	bh = (regs->regs[33]) >> 32;
-+	bl = (regs->regs[33]) & 0xffffffff;
-+	ch = (regs->regs[34]) >> 32;
-+	cl = (regs->regs[34]) & 0xffffffff;
-+	printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	/* Read CTC register */
++	asm ("getcon cr62, %0" : "=r" (current_ctc));
++	/* Note, the CTC counts down on each CPU clock, not up.
++	   Note(2), use long type to get correct wraparound arithmetic when
++	   the counter crosses zero. */
++	ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
++	ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
 +
-+	ah = (regs->regs[35]) >> 32;
-+	al = (regs->regs[35]) & 0xffffffff;
-+	bh = (regs->regs[36]) >> 32;
-+	bl = (regs->regs[36]) & 0xffffffff;
-+	ch = (regs->regs[37]) >> 32;
-+	cl = (regs->regs[37]) & 0xffffffff;
-+	printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	/* Inline assembly to do 32x32x32->64 multiplier */
++	asm volatile ("mulu.l %1, %2, %0" :
++	     "=r" (mul1_out) :
++	     "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
 +
-+	ah = (regs->regs[38]) >> 32;
-+	al = (regs->regs[38]) & 0xffffffff;
-+	bh = (regs->regs[39]) >> 32;
-+	bl = (regs->regs[39]) & 0xffffffff;
-+	ch = (regs->regs[40]) >> 32;
-+	cl = (regs->regs[40]) & 0xffffffff;
-+	printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	mul1_out_high = mul1_out >> 32;
 +
-+	ah = (regs->regs[41]) >> 32;
-+	al = (regs->regs[41]) & 0xffffffff;
-+	bh = (regs->regs[42]) >> 32;
-+	bl = (regs->regs[42]) & 0xffffffff;
-+	ch = (regs->regs[43]) >> 32;
-+	cl = (regs->regs[43]) & 0xffffffff;
-+	printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	asm volatile ("mulu.l %1, %2, %0" :
++	     "=r" (mul2_out_low) :
++	     "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
 +
-+	ah = (regs->regs[44]) >> 32;
-+	al = (regs->regs[44]) & 0xffffffff;
-+	bh = (regs->regs[45]) >> 32;
-+	bl = (regs->regs[45]) & 0xffffffff;
-+	ch = (regs->regs[46]) >> 32;
-+	cl = (regs->regs[46]) & 0xffffffff;
-+	printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++#if 1
++	asm volatile ("mulu.l %1, %2, %0" :
++	     "=r" (mul2_out_high) :
++	     "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
++#endif
 +
-+	ah = (regs->regs[47]) >> 32;
-+	al = (regs->regs[47]) & 0xffffffff;
-+	bh = (regs->regs[48]) >> 32;
-+	bl = (regs->regs[48]) & 0xffffffff;
-+	ch = (regs->regs[49]) >> 32;
-+	cl = (regs->regs[49]) & 0xffffffff;
-+	printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
 +
-+	ah = (regs->regs[50]) >> 32;
-+	al = (regs->regs[50]) & 0xffffffff;
-+	bh = (regs->regs[51]) >> 32;
-+	bl = (regs->regs[51]) & 0xffffffff;
-+	ch = (regs->regs[52]) >> 32;
-+	cl = (regs->regs[52]) & 0xffffffff;
-+	printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	return result;
++}
 +
-+	ah = (regs->regs[53]) >> 32;
-+	al = (regs->regs[53]) & 0xffffffff;
-+	bh = (regs->regs[54]) >> 32;
-+	bl = (regs->regs[54]) & 0xffffffff;
-+	ch = (regs->regs[55]) >> 32;
-+	cl = (regs->regs[55]) & 0xffffffff;
-+	printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++void do_gettimeofday(struct timeval *tv)
++{
++	unsigned long flags;
++	unsigned long seq;
++	unsigned long usec, sec;
 +
-+	ah = (regs->regs[56]) >> 32;
-+	al = (regs->regs[56]) & 0xffffffff;
-+	bh = (regs->regs[57]) >> 32;
-+	bl = (regs->regs[57]) & 0xffffffff;
-+	ch = (regs->regs[58]) >> 32;
-+	cl = (regs->regs[58]) & 0xffffffff;
-+	printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	do {
++		seq = read_seqbegin_irqsave(&xtime_lock, flags);
++		usec = usecs_since_tick();
++		sec = xtime.tv_sec;
++		usec += xtime.tv_nsec / 1000;
++	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
 +
-+	ah = (regs->regs[59]) >> 32;
-+	al = (regs->regs[59]) & 0xffffffff;
-+	bh = (regs->regs[60]) >> 32;
-+	bl = (regs->regs[60]) & 0xffffffff;
-+	ch = (regs->regs[61]) >> 32;
-+	cl = (regs->regs[61]) & 0xffffffff;
-+	printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	while (usec >= 1000000) {
++		usec -= 1000000;
++		sec++;
++	}
 +
-+	ah = (regs->regs[62]) >> 32;
-+	al = (regs->regs[62]) & 0xffffffff;
-+	bh = (regs->tregs[0]) >> 32;
-+	bl = (regs->tregs[0]) & 0xffffffff;
-+	ch = (regs->tregs[1]) >> 32;
-+	cl = (regs->tregs[1]) & 0xffffffff;
-+	printk("R62 : %08Lx%08Lx T0  : %08Lx%08Lx T1  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	tv->tv_sec = sec;
++	tv->tv_usec = usec;
++}
 +
-+	ah = (regs->tregs[2]) >> 32;
-+	al = (regs->tregs[2]) & 0xffffffff;
-+	bh = (regs->tregs[3]) >> 32;
-+	bl = (regs->tregs[3]) & 0xffffffff;
-+	ch = (regs->tregs[4]) >> 32;
-+	cl = (regs->tregs[4]) & 0xffffffff;
-+	printk("T2  : %08Lx%08Lx T3  : %08Lx%08Lx T4  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++int do_settimeofday(struct timespec *tv)
++{
++	time_t wtm_sec, sec = tv->tv_sec;
++	long wtm_nsec, nsec = tv->tv_nsec;
 +
-+	ah = (regs->tregs[5]) >> 32;
-+	al = (regs->tregs[5]) & 0xffffffff;
-+	bh = (regs->tregs[6]) >> 32;
-+	bl = (regs->tregs[6]) & 0xffffffff;
-+	ch = (regs->tregs[7]) >> 32;
-+	cl = (regs->tregs[7]) & 0xffffffff;
-+	printk("T5  : %08Lx%08Lx T6  : %08Lx%08Lx T7  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++		return -EINVAL;
 +
++	write_seqlock_irq(&xtime_lock);
 +	/*
-+	 * If we're in kernel mode, dump the stack too..
++	 * This is revolting. We need to set "xtime" correctly. However, the
++	 * value in this location is the value at the most recent update of
++	 * wall time.  Discover what correction gettimeofday() would have
++	 * made, and then undo it!
 +	 */
-+	if (!user_mode(regs)) {
-+		void show_stack(struct task_struct *tsk, unsigned long *sp);
-+		unsigned long sp = regs->regs[15] & 0xffffffff;
-+		struct task_struct *tsk = get_current();
++	nsec -= 1000 * usecs_since_tick();
 +
-+		tsk->thread.kregs = regs;
++	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
++	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
 +
-+		show_stack(tsk, (unsigned long *)sp);
-+	}
-+}
++	set_normalized_timespec(&xtime, sec, nsec);
++	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
 +
-+struct task_struct * alloc_task_struct(void)
-+{
-+	/* Get task descriptor pages */
-+	return (struct task_struct *)
-+		__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
-+}
++	ntp_clear();
++	write_sequnlock_irq(&xtime_lock);
++	clock_was_set();
 +
-+void free_task_struct(struct task_struct *p)
-+{
-+	free_pages((unsigned long) p, get_order(THREAD_SIZE));
++	return 0;
 +}
++EXPORT_SYMBOL(do_settimeofday);
 +
-+/*
-+ * Create a kernel thread
-+ */
-+ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
++/* Dummy RTC ops */
++static void null_rtc_get_time(struct timespec *tv)
 +{
-+	do_exit(fn(arg));
++	tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
++	tv->tv_nsec = 0;
 +}
 +
-+/*
-+ * This is the mechanism for creating a new kernel thread.
-+ *
-+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
-+ * who haven't done an "execve()") should use this: it will work within
-+ * a system call from a "real" process, but the process memory space will
-+ * not be freed until both the parent and the child have exited.
-+ */
-+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++static int null_rtc_set_time(const time_t secs)
 +{
-+	struct pt_regs regs;
-+
-+	memset(&regs, 0, sizeof(regs));
-+	regs.regs[2] = (unsigned long)arg;
-+	regs.regs[3] = (unsigned long)fn;
++	return 0;
++}
 +
-+	regs.pc = (unsigned long)kernel_thread_helper;
-+	regs.sr = (1 << 30);
++void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
++int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
 +
-+	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
-+		       &regs, 0, NULL, NULL);
-+}
++/* last time the RTC clock got updated */
++static long last_rtc_update;
 +
 +/*
-+ * Free current thread data structures etc..
++ * timer_interrupt() needs to keep up the real-time clock,
++ * as well as call the "do_timer()" routine every clocktick
 + */
-+void exit_thread(void)
++static inline void do_timer_interrupt(void)
 +{
-+	/*
-+	 * See arch/sparc/kernel/process.c for the precedent for doing
-+	 * this -- RPC.
-+	 *
-+	 * The SH-5 FPU save/restore approach relies on
-+	 * last_task_used_math pointing to a live task_struct.  When
-+	 * another task tries to use the FPU for the 1st time, the FPUDIS
-+	 * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
-+	 * existing FPU state to the FP regs field within
-+	 * last_task_used_math before re-loading the new task's FPU state
-+	 * (or initialising it if the FPU has been used before).  So if
-+	 * last_task_used_math is stale, and its page has already been
-+	 * re-allocated for another use, the consequences are rather
-+	 * grim. Unless we null it here, there is no other path through
-+	 * which it would get safely nulled.
-+	 */
-+#ifdef CONFIG_SH_FPU
-+	if (last_task_used_math == current) {
-+		last_task_used_math = NULL;
-+	}
-+#endif
-+}
++	unsigned long long current_ctc;
++	asm ("getcon cr62, %0" : "=r" (current_ctc));
++	ctc_last_interrupt = (unsigned long) current_ctc;
 +
-+void flush_thread(void)
-+{
++	do_timer(1);
++#ifndef CONFIG_SMP
++	update_process_times(user_mode(get_irq_regs()));
++#endif
++	if (current->pid)
++		profile_tick(CPU_PROFILING);
 +
-+	/* Called by fs/exec.c (flush_old_exec) to remove traces of a
-+	 * previously running executable. */
-+#ifdef CONFIG_SH_FPU
-+	if (last_task_used_math == current) {
-+		last_task_used_math = NULL;
-+	}
-+	/* Force FPU state to be reinitialised after exec */
-+	clear_used_math();
++#ifdef CONFIG_HEARTBEAT
++	if (sh_mv.mv_heartbeat != NULL)
++		sh_mv.mv_heartbeat();
 +#endif
 +
-+	/* if we are a kernel thread, about to change to user thread,
-+         * update kreg
-+         */
-+	if(current->thread.kregs==&fake_swapper_regs) {
-+          current->thread.kregs =
-+             ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
-+	  current->thread.uregs = current->thread.kregs;
++	/*
++	 * If we have an externally synchronized Linux clock, then update
++	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++	 * called as close as possible to 500 ms before the new second starts.
++	 */
++	if (ntp_synced() &&
++	    xtime.tv_sec > last_rtc_update + 660 &&
++	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
++	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
++		if (rtc_sh_set_time(xtime.tv_sec) == 0)
++			last_rtc_update = xtime.tv_sec;
++		else
++			/* do it again in 60 s */
++			last_rtc_update = xtime.tv_sec - 600;
 +	}
 +}
 +
-+void release_thread(struct task_struct *dead_task)
-+{
-+	/* do nothing */
-+}
-+
-+/* Fill in the fpu structure for a core dump.. */
-+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
++/*
++ * This is the same as the above, except we _also_ save the current
++ * Time Stamp Counter value at the time of the timer interrupt, so that
++ * we later on can estimate the time of day more exactly.
++ */
++static irqreturn_t timer_interrupt(int irq, void *dev_id)
 +{
-+#ifdef CONFIG_SH_FPU
-+	int fpvalid;
-+	struct task_struct *tsk = current;
++	unsigned long timer_status;
 +
-+	fpvalid = !!tsk_used_math(tsk);
-+	if (fpvalid) {
-+		if (current == last_task_used_math) {
-+			enable_fpu();
-+			save_fpu(tsk, regs);
-+			disable_fpu();
-+			last_task_used_math = 0;
-+			regs->sr |= SR_FD;
-+		}
++	/* Clear UNF bit */
++	timer_status = ctrl_inw(TMU0_TCR);
++	timer_status &= ~0x100;
++	ctrl_outw(timer_status, TMU0_TCR);
 +
-+		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
-+	}
++	/*
++	 * Here we are in the timer irq handler. We just have irqs locally
++	 * disabled but we don't know if the timer_bh is running on the other
++	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
++	 * the irq version of write_lock because as just said we have irq
++	 * locally disabled. -arca
++	 */
++	write_lock(&xtime_lock);
++	do_timer_interrupt();
++	write_unlock(&xtime_lock);
 +
-+	return fpvalid;
-+#else
-+	return 0; /* Task didn't use the fpu at all. */
-+#endif
++	return IRQ_HANDLED;
 +}
 +
-+asmlinkage void ret_from_fork(void);
 +
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
-+		unsigned long unused,
-+		struct task_struct *p, struct pt_regs *regs)
++static __init unsigned int get_cpu_hz(void)
 +{
-+	struct pt_regs *childregs;
-+	unsigned long long se;			/* Sign extension */
-+
-+#ifdef CONFIG_SH_FPU
-+	if(last_task_used_math == current) {
-+		enable_fpu();
-+		save_fpu(current, regs);
-+		disable_fpu();
-+		last_task_used_math = NULL;
-+		regs->sr |= SR_FD;
-+	}
-+#endif
-+	/* Copy from sh version */
-+	childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
-+
-+	*childregs = *regs;
++	unsigned int count;
++	unsigned long __dummy;
++	unsigned long ctc_val_init, ctc_val;
 +
-+	if (user_mode(regs)) {
-+		childregs->regs[15] = usp;
-+		p->thread.uregs = childregs;
-+	} else {
-+		childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
-+	}
++	/*
++	** Regardless the toolchain, force the compiler to use the
++	** arbitrary register r3 as a clock tick counter.
++	** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
++	*/
++	register unsigned long long  __rtc_irq_flag __asm__ ("r3");
 +
-+	childregs->regs[9] = 0; /* Set return value for child */
-+	childregs->sr |= SR_FD; /* Invalidate FPU flag */
++	local_irq_enable();
++	do {} while (ctrl_inb(rtc_base) != 0);
++	ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */
 +
-+	p->thread.sp = (unsigned long) childregs;
-+	p->thread.pc = (unsigned long) ret_from_fork;
++	/*
++	 * r3 is arbitrary. CDC does not support "=z".
++	 */
++	ctc_val_init = 0xffffffff;
++	ctc_val = ctc_val_init;
 +
++	asm volatile("gettr	tr0, %1\n\t"
++		     "putcon	%0, " __CTC "\n\t"
++		     "and	%2, r63, %2\n\t"
++		     "pta	$+4, tr0\n\t"
++		     "beq/l	%2, r63, tr0\n\t"
++		     "ptabs	%1, tr0\n\t"
++		     "getcon	" __CTC ", %0\n\t"
++		: "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
++		: "0" (0));
++	local_irq_disable();
 +	/*
-+	 * Sign extend the edited stack.
-+         * Note that thread.pc and thread.pc will stay
-+	 * 32-bit wide and context switch must take care
-+	 * of NEFF sign extension.
++	 * SH-3:
++	 * CPU clock = 4 stages * loop
++	 * tst    rm,rm      if id ex
++	 * bt/s   1b            if id ex
++	 * add    #1,rd            if id ex
++         *                            (if) pipe line stole
++	 * tst    rm,rm                  if id ex
++         * ....
++	 *
++	 *
++	 * SH-4:
++	 * CPU clock = 6 stages * loop
++	 * I don't know why.
++         * ....
++	 *
++	 * SH-5:
++	 * Use CTC register to count.  This approach returns the right value
++	 * even if the I-cache is disabled (e.g. whilst debugging.)
++	 *
 +	 */
 +
-+	se = childregs->regs[15];
-+	se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
-+	childregs->regs[15] = se;
++	count = ctc_val_init - ctc_val; /* CTC counts down */
 +
-+	return 0;
++	/*
++	 * This really is count by the number of clock cycles
++         * by the ratio between a complete R64CNT
++         * wrap-around (128) and CUI interrupt being raised (64).
++	 */
++	return count*2;
 +}
 +
-+asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
-+			unsigned long r4, unsigned long r5,
-+			unsigned long r6, unsigned long r7,
-+			struct pt_regs *pregs)
++static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
 +{
-+	return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
-+}
++	struct pt_regs *regs = get_irq_regs();
 +
-+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
-+			 unsigned long r4, unsigned long r5,
-+			 unsigned long r6, unsigned long r7,
-+			 struct pt_regs *pregs)
-+{
-+	if (!newsp)
-+		newsp = pregs->regs[15];
-+	return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
-+}
++	ctrl_outb(0, RTC_RCR1);	/* Disable Carry Interrupts */
++	regs->regs[3] = 1;	/* Using r3 */
 +
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
-+			 unsigned long r4, unsigned long r5,
-+			 unsigned long r6, unsigned long r7,
-+			 struct pt_regs *pregs)
-+{
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
++	return IRQ_HANDLED;
 +}
 +
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage int sys_execve(char *ufilename, char **uargv,
-+			  char **uenvp, unsigned long r5,
-+			  unsigned long r6, unsigned long r7,
-+			  struct pt_regs *pregs)
-+{
-+	int error;
-+	char *filename;
++static struct irqaction irq0  = {
++	.handler = timer_interrupt,
++	.flags = IRQF_DISABLED,
++	.mask = CPU_MASK_NONE,
++	.name = "timer",
++};
++static struct irqaction irq1  = {
++	.handler = sh64_rtc_interrupt,
++	.flags = IRQF_DISABLED,
++	.mask = CPU_MASK_NONE,
++	.name = "rtc",
++};
 +
-+	lock_kernel();
-+	filename = getname((char __user *)ufilename);
-+	error = PTR_ERR(filename);
-+	if (IS_ERR(filename))
-+		goto out;
++void __init time_init(void)
++{
++	unsigned int cpu_clock, master_clock, bus_clock, module_clock;
++	unsigned long interval;
++	unsigned long frqcr, ifc, pfc;
++	static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
++#define bfc_table ifc_table	/* Same */
++#define pfc_table ifc_table	/* Same */
 +
-+	error = do_execve(filename,
-+			  (char __user * __user *)uargv,
-+			  (char __user * __user *)uenvp,
-+			  pregs);
-+	if (error == 0) {
-+		task_lock(current);
-+		current->ptrace &= ~PT_DTRACE;
-+		task_unlock(current);
++	tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
++	if (!tmu_base) {
++		panic("Unable to remap TMU\n");
 +	}
-+	putname(filename);
-+out:
-+	unlock_kernel();
-+	return error;
-+}
 +
-+/*
-+ * These bracket the sleeping functions..
-+ */
-+extern void interruptible_sleep_on(wait_queue_head_t *q);
++	rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
++	if (!rtc_base) {
++		panic("Unable to remap RTC\n");
++	}
 +
-+#define mid_sched	((unsigned long) interruptible_sleep_on)
++	cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
++	if (!cprc_base) {
++		panic("Unable to remap CPRC\n");
++	}
 +
-+static int in_sh64_switch_to(unsigned long pc)
-+{
-+	extern char __sh64_switch_to_end;
-+	/* For a sleeping task, the PC is somewhere in the middle of the function,
-+	   so we don't have to worry about masking the LSB off */
-+	return (pc >= (unsigned long) sh64_switch_to) &&
-+	       (pc < (unsigned long) &__sh64_switch_to_end);
-+}
++	rtc_sh_get_time(&xtime);
 +
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+	unsigned long schedule_fp;
-+	unsigned long sh64_switch_to_fp;
-+	unsigned long schedule_caller_pc;
-+	unsigned long pc;
++	setup_irq(TIMER_IRQ, &irq0);
++	setup_irq(RTC_IRQ, &irq1);
 +
-+	if (!p || p == current || p->state == TASK_RUNNING)
-+		return 0;
++	/* Check how fast it is.. */
++	cpu_clock = get_cpu_hz();
 +
-+	/*
-+	 * The same comment as on the Alpha applies here, too ...
-+	 */
-+	pc = thread_saved_pc(p);
++	/* Note careful order of operations to maintain reasonable precision and avoid overflow. */
++	scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
 +
-+#ifdef CONFIG_FRAME_POINTER
-+	if (in_sh64_switch_to(pc)) {
-+		sh64_switch_to_fp = (long) p->thread.sp;
-+		/* r14 is saved at offset 4 in the sh64_switch_to frame */
-+		schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
++	free_irq(RTC_IRQ, NULL);
 +
-+		/* and the caller of 'schedule' is (currently!) saved at offset 24
-+		   in the frame of schedule (from disasm) */
-+		schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
-+		return schedule_caller_pc;
++	printk("CPU clock: %d.%02dMHz\n",
++	       (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
++	{
++		unsigned short bfc;
++		frqcr = ctrl_inl(FRQCR);
++		ifc  = ifc_table[(frqcr>> 6) & 0x0007];
++		bfc  = bfc_table[(frqcr>> 3) & 0x0007];
++		pfc  = pfc_table[(frqcr>> 12) & 0x0007];
++		master_clock = cpu_clock * ifc;
++		bus_clock = master_clock/bfc;
 +	}
-+#endif
-+	return pc;
-+}
 +
-+/* Provide a /proc/asids file that lists out the
-+   ASIDs currently associated with the processes.  (If the DM.PC register is
-+   examined through the debug link, this shows ASID + PC.  To make use of this,
-+   the PID->ASID relationship needs to be known.  This is primarily for
-+   debugging.)
-+   */
++	printk("Bus clock: %d.%02dMHz\n",
++	       (bus_clock/1000000), (bus_clock % 1000000)/10000);
++	module_clock = master_clock/pfc;
++	printk("Module clock: %d.%02dMHz\n",
++	       (module_clock/1000000), (module_clock % 1000000)/10000);
++	interval = (module_clock/(HZ*4));
 +
-+#if defined(CONFIG_SH64_PROC_ASIDS)
-+static int
-+asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
++	printk("Interval = %ld\n", interval);
++
++	current_cpu_data.cpu_clock    = cpu_clock;
++	current_cpu_data.master_clock = master_clock;
++	current_cpu_data.bus_clock    = bus_clock;
++	current_cpu_data.module_clock = module_clock;
++
++	/* Start TMU0 */
++	ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
++	ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
++	ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
++	ctrl_outl(interval, TMU0_TCOR);
++	ctrl_outl(interval, TMU0_TCNT);
++	ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
++}
++
++void enter_deep_standby(void)
 +{
-+	int len=0;
-+	struct task_struct *p;
-+	read_lock(&tasklist_lock);
-+	for_each_process(p) {
-+		int pid = p->pid;
++	/* Disable watchdog timer */
++	ctrl_outl(0xa5000000, WTCSR);
++	/* Configure deep standby on sleep */
++	ctrl_outl(0x03, STBCR);
 +
-+		if (!pid)
-+			continue;
-+		if (p->mm)
-+			len += sprintf(buf+len, "%5d : %02lx\n", pid,
-+				       asid_cache(smp_processor_id()));
-+		else
-+			len += sprintf(buf+len, "%5d : (none)\n", pid);
++#ifdef CONFIG_SH_ALPHANUMERIC
++	{
++		extern void mach_alphanum(int position, unsigned char value);
++		extern void mach_alphanum_brightness(int setting);
++		char halted[] = "Halted. ";
++		int i;
++		mach_alphanum_brightness(6); /* dimmest setting above off */
++		for (i=0; i<8; i++) {
++			mach_alphanum(i, halted[i]);
++		}
++		asm __volatile__ ("synco");
 +	}
-+	read_unlock(&tasklist_lock);
-+	*eof = 1;
-+	return len;
++#endif
++
++	asm __volatile__ ("sleep");
++	asm __volatile__ ("synci");
++	asm __volatile__ ("nop");
++	asm __volatile__ ("nop");
++	asm __volatile__ ("nop");
++	asm __volatile__ ("nop");
++	panic("Unexpected wakeup!\n");
 +}
 +
-+static int __init register_proc_asids(void)
++static struct resource rtc_resources[] = {
++	[0] = {
++		/* RTC base, filled in by rtc_init */
++		.flags	= IORESOURCE_IO,
++	},
++	[1] = {
++		/* Period IRQ */
++		.start	= IRQ_PRI,
++		.flags	= IORESOURCE_IRQ,
++	},
++	[2] = {
++		/* Carry IRQ */
++		.start	= IRQ_CUI,
++		.flags	= IORESOURCE_IRQ,
++	},
++	[3] = {
++		/* Alarm IRQ */
++		.start	= IRQ_ATI,
++		.flags	= IORESOURCE_IRQ,
++	},
++};
++
++static struct platform_device rtc_device = {
++	.name		= "sh-rtc",
++	.id		= -1,
++	.num_resources	= ARRAY_SIZE(rtc_resources),
++	.resource	= rtc_resources,
++};
++
++static int __init rtc_init(void)
 +{
-+	create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
-+	return 0;
++	rtc_resources[0].start	= rtc_base;
++	rtc_resources[0].end	= rtc_resources[0].start + 0x58 - 1;
++
++	return platform_device_register(&rtc_device);
 +}
-+__initcall(register_proc_asids);
-+#endif
-diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
-deleted file mode 100644
-index ac725f0..0000000
---- a/arch/sh/kernel/ptrace.c
-+++ /dev/null
-@@ -1,274 +0,0 @@
++device_initcall(rtc_init);
+diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
+index 82de689..499e07b 100644
+--- a/arch/sh/kernel/timers/timer-cmt.c
++++ b/arch/sh/kernel/timers/timer-cmt.c
+@@ -31,7 +31,9 @@
+ #define cmt_clock_enable() do {	ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
+ #define CMT_CMCSR_INIT	0x0040
+ #define CMT_CMCSR_CALIB	0x0000
+-#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
++#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \
++      defined(CONFIG_CPU_SUBTYPE_SH7206) || \
++      defined(CONFIG_CPU_SUBTYPE_SH7263)
+ #define CMT_CMSTR	0xfffec000
+ #define CMT_CMCSR_0	0xfffec002
+ #define CMT_CMCNT_0	0xfffec004
+diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
+index 628ec9a..8935570 100644
+--- a/arch/sh/kernel/timers/timer-tmu.c
++++ b/arch/sh/kernel/timers/timer-tmu.c
+@@ -174,6 +174,7 @@ static int tmu_timer_init(void)
+ 	tmu_timer_stop();
+ 
+ #if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
++    !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
+     !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
+     !defined(CONFIG_CPU_SUBTYPE_SH7785) && \
+     !defined(CONFIG_CPU_SUBTYPE_SHX3)
+diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
+index cf99111..a3bdc68 100644
+--- a/arch/sh/kernel/traps.c
++++ b/arch/sh/kernel/traps.c
+@@ -1,947 +1,68 @@
 -/*
-- * linux/arch/sh/kernel/ptrace.c
-- *
-- * Original x86 implementation:
-- *	By Ross Biro 1/23/92
-- *	edited by Linus Torvalds
+- * 'traps.c' handles hardware traps and faults after we have saved some
+- * state in 'entry.S'.
 - *
-- * SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
+- *  SuperH version: Copyright (C) 1999 Niibe Yutaka
+- *                  Copyright (C) 2000 Philipp Rumpf
+- *                  Copyright (C) 2000 David Howells
+- *                  Copyright (C) 2002 - 2007 Paul Mundt
 - *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
 - */
 -#include <linux/kernel.h>
--#include <linux/sched.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/errno.h>
 -#include <linux/ptrace.h>
--#include <linux/user.h>
--#include <linux/slab.h>
--#include <linux/security.h>
--#include <linux/signal.h>
+-#include <linux/init.h>
+-#include <linux/spinlock.h>
+-#include <linux/module.h>
+-#include <linux/kallsyms.h>
 -#include <linux/io.h>
+ #include <linux/bug.h>
+-#include <linux/debug_locks.h>
++#include <linux/io.h>
++#include <linux/types.h>
+ #include <linux/kdebug.h>
+-#include <linux/kexec.h>
+-#include <linux/limits.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
+ #include <asm/system.h>
 -#include <asm/uaccess.h>
--#include <asm/pgtable.h>
--#include <asm/system.h>
--#include <asm/processor.h>
--#include <asm/mmu_context.h>
+-
+-#ifdef CONFIG_SH_KGDB
+-#include <asm/kgdb.h>
+-#define CHK_REMOTE_DEBUG(regs)			\
+-{						\
+-	if (kgdb_debug_hook && !user_mode(regs))\
+-		(*kgdb_debug_hook)(regs);       \
+-}
+-#else
+-#define CHK_REMOTE_DEBUG(regs)
+-#endif
+-
+-#ifdef CONFIG_CPU_SH2
+-# define TRAP_RESERVED_INST	4
+-# define TRAP_ILLEGAL_SLOT_INST	6
+-# define TRAP_ADDRESS_ERROR	9
+-# ifdef CONFIG_CPU_SH2A
+-#  define TRAP_DIVZERO_ERROR	17
+-#  define TRAP_DIVOVF_ERROR	18
+-# endif
+-#else
+-#define TRAP_RESERVED_INST	12
+-#define TRAP_ILLEGAL_SLOT_INST	13
+-#endif
+-
+-static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+-{
+-	unsigned long p;
+-	int i;
+-
+-	printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+-
+-	for (p = bottom & ~31; p < top; ) {
+-		printk("%04lx: ", p & 0xffff);
+-
+-		for (i = 0; i < 8; i++, p += 4) {
+-			unsigned int val;
+-
+-			if (p < bottom || p >= top)
+-				printk("         ");
+-			else {
+-				if (__get_user(val, (unsigned int __user *)p)) {
+-					printk("\n");
+-					return;
+-				}
+-				printk("%08x ", val);
+-			}
+-		}
+-		printk("\n");
+-	}
+-}
+-
+-static DEFINE_SPINLOCK(die_lock);
+-
+-void die(const char * str, struct pt_regs * regs, long err)
+-{
+-	static int die_counter;
+-
+-	oops_enter();
+-
+-	console_verbose();
+-	spin_lock_irq(&die_lock);
+-	bust_spinlocks(1);
+-
+-	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+-
+-	CHK_REMOTE_DEBUG(regs);
+-	print_modules();
+-	show_regs(regs);
+-
+-	printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
+-			task_pid_nr(current), task_stack_page(current) + 1);
+-
+-	if (!user_mode(regs) || in_interrupt())
+-		dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
+-			 (unsigned long)task_stack_page(current));
+-
+-	bust_spinlocks(0);
+-	add_taint(TAINT_DIE);
+-	spin_unlock_irq(&die_lock);
+-
+-	if (kexec_should_crash(current))
+-		crash_kexec(regs);
+-
+-	if (in_interrupt())
+-		panic("Fatal exception in interrupt");
+-
+-	if (panic_on_oops)
+-		panic("Fatal exception");
+-
+-	oops_exit();
+-	do_exit(SIGSEGV);
+-}
+-
+-static inline void die_if_kernel(const char *str, struct pt_regs *regs,
+-				 long err)
+-{
+-	if (!user_mode(regs))
+-		die(str, regs, err);
+-}
 -
 -/*
-- * does not yet catch signals sent when the child dies.
-- * in exit.c or in signal.c.
+- * try and fix up kernelspace address errors
+- * - userspace errors just cause EFAULT to be returned, resulting in SEGV
+- * - kernel/userspace interfaces cause a jump to an appropriate handler
+- * - other kernel errors are bad
+- * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
+- */
+-static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+-{
+-	if (!user_mode(regs)) {
+-		const struct exception_table_entry *fixup;
+-		fixup = search_exception_tables(regs->pc);
+-		if (fixup) {
+-			regs->pc = fixup->fixup;
+-			return 0;
+-		}
+-		die(str, regs, err);
+-	}
+-	return -EFAULT;
+-}
+-
+-/*
+- * handle an instruction that does an unaligned memory access by emulating the
+- * desired behaviour
+- * - note that PC _may not_ point to the faulting instruction
+- *   (if that instruction is in a branch delay slot)
+- * - return 0 if emulation okay, -EFAULT on existential error
+- */
+-static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
+-{
+-	int ret, index, count;
+-	unsigned long *rm, *rn;
+-	unsigned char *src, *dst;
+-
+-	index = (instruction>>8)&15;	/* 0x0F00 */
+-	rn = &regs->regs[index];
+-
+-	index = (instruction>>4)&15;	/* 0x00F0 */
+-	rm = &regs->regs[index];
+-
+-	count = 1<<(instruction&3);
+-
+-	ret = -EFAULT;
+-	switch (instruction>>12) {
+-	case 0: /* mov.[bwl] to/from memory via r0+rn */
+-		if (instruction & 8) {
+-			/* from memory */
+-			src = (unsigned char*) *rm;
+-			src += regs->regs[0];
+-			dst = (unsigned char*) rn;
+-			*(unsigned long*)dst = 0;
+-
+-#ifdef __LITTLE_ENDIAN__
+-			if (copy_from_user(dst, src, count))
+-				goto fetch_fault;
+-
+-			if ((count == 2) && dst[1] & 0x80) {
+-				dst[2] = 0xff;
+-				dst[3] = 0xff;
+-			}
+-#else
+-			dst += 4-count;
+-
+-			if (__copy_user(dst, src, count))
+-				goto fetch_fault;
+-
+-			if ((count == 2) && dst[2] & 0x80) {
+-				dst[0] = 0xff;
+-				dst[1] = 0xff;
+-			}
+-#endif
+-		} else {
+-			/* to memory */
+-			src = (unsigned char*) rm;
+-#if !defined(__LITTLE_ENDIAN__)
+-			src += 4-count;
+-#endif
+-			dst = (unsigned char*) *rn;
+-			dst += regs->regs[0];
+-
+-			if (copy_to_user(dst, src, count))
+-				goto fetch_fault;
+-		}
+-		ret = 0;
+-		break;
+-
+-	case 1: /* mov.l Rm,@(disp,Rn) */
+-		src = (unsigned char*) rm;
+-		dst = (unsigned char*) *rn;
+-		dst += (instruction&0x000F)<<2;
+-
+-		if (copy_to_user(dst,src,4))
+-			goto fetch_fault;
+-		ret = 0;
+-		break;
+-
+-	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
+-		if (instruction & 4)
+-			*rn -= count;
+-		src = (unsigned char*) rm;
+-		dst = (unsigned char*) *rn;
+-#if !defined(__LITTLE_ENDIAN__)
+-		src += 4-count;
+-#endif
+-		if (copy_to_user(dst, src, count))
+-			goto fetch_fault;
+-		ret = 0;
+-		break;
+-
+-	case 5: /* mov.l @(disp,Rm),Rn */
+-		src = (unsigned char*) *rm;
+-		src += (instruction&0x000F)<<2;
+-		dst = (unsigned char*) rn;
+-		*(unsigned long*)dst = 0;
+-
+-		if (copy_from_user(dst,src,4))
+-			goto fetch_fault;
+-		ret = 0;
+-		break;
+ 
+-	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
+-		src = (unsigned char*) *rm;
+-		if (instruction & 4)
+-			*rm += count;
+-		dst = (unsigned char*) rn;
+-		*(unsigned long*)dst = 0;
+-
+-#ifdef __LITTLE_ENDIAN__
+-		if (copy_from_user(dst, src, count))
+-			goto fetch_fault;
+-
+-		if ((count == 2) && dst[1] & 0x80) {
+-			dst[2] = 0xff;
+-			dst[3] = 0xff;
+-		}
+-#else
+-		dst += 4-count;
+-
+-		if (copy_from_user(dst, src, count))
+-			goto fetch_fault;
+-
+-		if ((count == 2) && dst[2] & 0x80) {
+-			dst[0] = 0xff;
+-			dst[1] = 0xff;
+-		}
+-#endif
+-		ret = 0;
+-		break;
+-
+-	case 8:
+-		switch ((instruction&0xFF00)>>8) {
+-		case 0x81: /* mov.w R0,@(disp,Rn) */
+-			src = (unsigned char*) &regs->regs[0];
+-#if !defined(__LITTLE_ENDIAN__)
+-			src += 2;
+-#endif
+-			dst = (unsigned char*) *rm; /* called Rn in the spec */
+-			dst += (instruction&0x000F)<<1;
+-
+-			if (copy_to_user(dst, src, 2))
+-				goto fetch_fault;
+-			ret = 0;
+-			break;
+-
+-		case 0x85: /* mov.w @(disp,Rm),R0 */
+-			src = (unsigned char*) *rm;
+-			src += (instruction&0x000F)<<1;
+-			dst = (unsigned char*) &regs->regs[0];
+-			*(unsigned long*)dst = 0;
+-
+-#if !defined(__LITTLE_ENDIAN__)
+-			dst += 2;
+-#endif
+-
+-			if (copy_from_user(dst, src, 2))
+-				goto fetch_fault;
+-
+-#ifdef __LITTLE_ENDIAN__
+-			if (dst[1] & 0x80) {
+-				dst[2] = 0xff;
+-				dst[3] = 0xff;
+-			}
+-#else
+-			if (dst[2] & 0x80) {
+-				dst[0] = 0xff;
+-				dst[1] = 0xff;
+-			}
+-#endif
+-			ret = 0;
+-			break;
+-		}
+-		break;
+-	}
+-	return ret;
+-
+- fetch_fault:
+-	/* Argh. Address not only misaligned but also non-existent.
+-	 * Raise an EFAULT and see if it's trapped
+-	 */
+-	return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
+-}
+-
+-/*
+- * emulate the instruction in the delay slot
+- * - fetches the instruction from PC+2
 - */
+-static inline int handle_unaligned_delayslot(struct pt_regs *regs)
++#ifdef CONFIG_BUG
++static void handle_BUG(struct pt_regs *regs)
+ {
+-	u16 instruction;
 -
+-	if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
+-		/* the instruction-fetch faulted */
+-		if (user_mode(regs))
+-			return -EFAULT;
+-
+-		/* kernel */
+-		die("delay-slot-insn faulting in handle_unaligned_delayslot",
+-		    regs, 0);
++	enum bug_trap_type tt;
++	tt = report_bug(regs->pc, regs);
++	if (tt == BUG_TRAP_TYPE_WARN) {
++		regs->pc += instruction_size(regs->pc);
++		return;
+ 	}
+ 
+-	return handle_unaligned_ins(instruction,regs);
++	die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
+ }
+ 
 -/*
-- * This routine will get a word off of the process kernel stack.
+- * handle an instruction that does an unaligned memory access
+- * - have to be careful of branch delay-slot instructions that fault
+- *  SH3:
+- *   - if the branch would be taken PC points to the branch
+- *   - if the branch would not be taken, PC points to delay-slot
+- *  SH4:
+- *   - PC always points to delayed branch
+- * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
 - */
--static inline int get_stack_long(struct task_struct *task, int offset)
--{
--	unsigned char *stack;
 -
--	stack = (unsigned char *)task_pt_regs(task);
--	stack += offset;
--	return (*((int *)stack));
--}
+-/* Macros to determine offset from current PC for branch instructions */
+-/* Explicit type coercion is used to force sign extension where needed */
+-#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
+-#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
 -
 -/*
-- * This routine will put a word on the process kernel stack.
+- * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+- * opcodes..
 - */
--static inline int put_stack_long(struct task_struct *task, int offset,
--				 unsigned long data)
--{
--	unsigned char *stack;
+-#ifndef CONFIG_CPU_SH2A
+-static int handle_unaligned_notify_count = 10;
 -
--	stack = (unsigned char *)task_pt_regs(task);
--	stack += offset;
--	*(unsigned long *) stack = data;
--	return 0;
--}
+-static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
++int is_valid_bugaddr(unsigned long addr)
+ {
+-	u_int rm;
+-	int ret, index;
 -
--static void ptrace_disable_singlestep(struct task_struct *child)
--{
--	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+-	index = (instruction>>8)&15;	/* 0x0F00 */
+-	rm = regs->regs[index];
 -
--	/*
--	 * Ensure the UBC is not programmed at the next context switch.
--	 *
--	 * Normally this is not needed but there are sequences such as
--	 * singlestep, signal delivery, and continue that leave the
--	 * ubc_pc non-zero leading to spurious SIGTRAPs.
--	 */
--	if (child->thread.ubc_pc != 0) {
--		ubc_usercnt -= 1;
--		child->thread.ubc_pc = 0;
+-	/* shout about the first ten userspace fixups */
+-	if (user_mode(regs) && handle_unaligned_notify_count>0) {
+-		handle_unaligned_notify_count--;
+-
+-		printk(KERN_NOTICE "Fixing up unaligned userspace access "
+-		       "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+-		       current->comm, task_pid_nr(current),
+-		       (u16 *)regs->pc, instruction);
 -	}
--}
 -
--/*
-- * Called by kernel/ptrace.c when detaching..
-- *
-- * Make sure single step bits etc are not set.
-- */
--void ptrace_disable(struct task_struct *child)
--{
--	ptrace_disable_singlestep(child);
--}
+-	ret = -EFAULT;
+-	switch (instruction&0xF000) {
+-	case 0x0000:
+-		if (instruction==0x000B) {
+-			/* rts */
+-			ret = handle_unaligned_delayslot(regs);
+-			if (ret==0)
+-				regs->pc = regs->pr;
+-		}
+-		else if ((instruction&0x00FF)==0x0023) {
+-			/* braf @Rm */
+-			ret = handle_unaligned_delayslot(regs);
+-			if (ret==0)
+-				regs->pc += rm + 4;
+-		}
+-		else if ((instruction&0x00FF)==0x0003) {
+-			/* bsrf @Rm */
+-			ret = handle_unaligned_delayslot(regs);
+-			if (ret==0) {
+-				regs->pr = regs->pc + 4;
+-				regs->pc += rm + 4;
+-			}
+-		}
+-		else {
+-			/* mov.[bwl] to/from memory via r0+rn */
+-			goto simple;
+-		}
+-		break;
 -
--long arch_ptrace(struct task_struct *child, long request, long addr, long data)
--{
--	struct user * dummy = NULL;
--	int ret;
+-	case 0x1000: /* mov.l Rm,@(disp,Rn) */
+-		goto simple;
 -
--	switch (request) {
--	/* when I and D space are separate, these will need to be fixed. */
--	case PTRACE_PEEKTEXT: /* read word at location addr. */
--	case PTRACE_PEEKDATA:
--		ret = generic_ptrace_peekdata(child, addr, data);
+-	case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
+-		goto simple;
+-
+-	case 0x4000:
+-		if ((instruction&0x00FF)==0x002B) {
+-			/* jmp @Rm */
+-			ret = handle_unaligned_delayslot(regs);
+-			if (ret==0)
+-				regs->pc = rm;
+-		}
+-		else if ((instruction&0x00FF)==0x000B) {
+-			/* jsr @Rm */
+-			ret = handle_unaligned_delayslot(regs);
+-			if (ret==0) {
+-				regs->pr = regs->pc + 4;
+-				regs->pc = rm;
+-			}
+-		}
+-		else {
+-			/* mov.[bwl] to/from memory via r0+rn */
+-			goto simple;
+-		}
 -		break;
 -
--	/* read the word at location addr in the USER area. */
--	case PTRACE_PEEKUSR: {
--		unsigned long tmp;
+-	case 0x5000: /* mov.l @(disp,Rm),Rn */
+-		goto simple;
 -
--		ret = -EIO;
--		if ((addr & 3) || addr < 0 ||
--		    addr > sizeof(struct user) - 3)
--			break;
+-	case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
+-		goto simple;
 -
--		if (addr < sizeof(struct pt_regs))
--			tmp = get_stack_long(child, addr);
--		else if (addr >= (long) &dummy->fpu &&
--			 addr < (long) &dummy->u_fpvalid) {
--			if (!tsk_used_math(child)) {
--				if (addr == (long)&dummy->fpu.fpscr)
--					tmp = FPSCR_INIT;
+-	case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
+-		switch (instruction&0x0F00) {
+-		case 0x0100: /* mov.w R0,@(disp,Rm) */
+-			goto simple;
+-		case 0x0500: /* mov.w @(disp,Rm),R0 */
+-			goto simple;
+-		case 0x0B00: /* bf   lab - no delayslot*/
+-			break;
+-		case 0x0F00: /* bf/s lab */
+-			ret = handle_unaligned_delayslot(regs);
+-			if (ret==0) {
+-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+-				if ((regs->sr & 0x00000001) != 0)
+-					regs->pc += 4; /* next after slot */
 -				else
--					tmp = 0;
--			} else
--				tmp = ((long *)&child->thread.fpu)
--					[(addr - (long)&dummy->fpu) >> 2];
--		} else if (addr == (long) &dummy->u_fpvalid)
--			tmp = !!tsk_used_math(child);
--		else
--			tmp = 0;
--		ret = put_user(tmp, (unsigned long __user *)data);
+-#endif
+-					regs->pc += SH_PC_8BIT_OFFSET(instruction);
+-			}
+-			break;
+-		case 0x0900: /* bt   lab - no delayslot */
+-			break;
+-		case 0x0D00: /* bt/s lab */
+-			ret = handle_unaligned_delayslot(regs);
+-			if (ret==0) {
+-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+-				if ((regs->sr & 0x00000001) == 0)
+-					regs->pc += 4; /* next after slot */
+-				else
+-#endif
+-					regs->pc += SH_PC_8BIT_OFFSET(instruction);
+-			}
+-			break;
+-		}
 -		break;
--	}
 -
--	/* when I and D space are separate, this will have to be fixed. */
--	case PTRACE_POKETEXT: /* write the word at location addr. */
--	case PTRACE_POKEDATA:
--		ret = generic_ptrace_pokedata(child, addr, data);
+-	case 0xA000: /* bra label */
+-		ret = handle_unaligned_delayslot(regs);
+-		if (ret==0)
+-			regs->pc += SH_PC_12BIT_OFFSET(instruction);
 -		break;
 -
--	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
--		ret = -EIO;
--		if ((addr & 3) || addr < 0 ||
--		    addr > sizeof(struct user) - 3)
--			break;
--
--		if (addr < sizeof(struct pt_regs))
--			ret = put_stack_long(child, addr, data);
--		else if (addr >= (long) &dummy->fpu &&
--			 addr < (long) &dummy->u_fpvalid) {
--			set_stopped_child_used_math(child);
--			((long *)&child->thread.fpu)
--				[(addr - (long)&dummy->fpu) >> 2] = data;
--			ret = 0;
--		} else if (addr == (long) &dummy->u_fpvalid) {
--			conditional_stopped_child_used_math(data, child);
--			ret = 0;
+-	case 0xB000: /* bsr label */
+-		ret = handle_unaligned_delayslot(regs);
+-		if (ret==0) {
+-			regs->pr = regs->pc + 4;
+-			regs->pc += SH_PC_12BIT_OFFSET(instruction);
 -		}
 -		break;
+-	}
+-	return ret;
 -
--	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
--	case PTRACE_CONT: { /* restart after signal. */
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		if (request == PTRACE_SYSCALL)
--			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		else
--			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-	/* handle non-delay-slot instruction */
+- simple:
+-	ret = handle_unaligned_ins(instruction,regs);
+-	if (ret==0)
+-		regs->pc += instruction_size(instruction);
+-	return ret;
++	return addr >= PAGE_OFFSET;
+ }
+-#endif /* CONFIG_CPU_SH2A */
 -
--		ptrace_disable_singlestep(child);
+-#ifdef CONFIG_CPU_HAS_SR_RB
+-#define lookup_exception_vector(x)	\
+-	__asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+-#else
+-#define lookup_exception_vector(x)	\
+-	__asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+ #endif
+ 
+ /*
+- * Handle various address error exceptions:
+- *  - instruction address error:
+- *       misaligned PC
+- *       PC >= 0x80000000 in user mode
+- *  - data address error (read and write)
+- *       misaligned data access
+- *       access to >= 0x80000000 is user mode
+- * Unfortuntaly we can't distinguish between instruction address error
+- * and data address errors caused by read accesses.
++ * Generic trap handler.
+  */
+-asmlinkage void do_address_error(struct pt_regs *regs,
+-				 unsigned long writeaccess,
+-				 unsigned long address)
++BUILD_TRAP_HANDLER(debug)
+ {
+-	unsigned long error_code = 0;
+-	mm_segment_t oldfs;
+-	siginfo_t info;
+-#ifndef CONFIG_CPU_SH2A
+-	u16 instruction;
+-	int tmp;
+-#endif
 -
--		child->exit_code = data;
--		wake_up_process(child);
--		ret = 0;
--		break;
--	}
+-	/* Intentional ifdef */
+-#ifdef CONFIG_CPU_HAS_SR_RB
+-	lookup_exception_vector(error_code);
+-#endif
 -
--/*
-- * make the child exit.  Best I can do is send it a sigkill.
-- * perhaps it should be put in the status that it wants to
-- * exit.
-- */
--	case PTRACE_KILL: {
--		ret = 0;
--		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
--			break;
--		ptrace_disable_singlestep(child);
--		child->exit_code = SIGKILL;
--		wake_up_process(child);
--		break;
--	}
+-	oldfs = get_fs();
 -
--	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
--		long pc;
--		struct pt_regs *regs = NULL;
+-	if (user_mode(regs)) {
+-		int si_code = BUS_ADRERR;
 -
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		if ((child->ptrace & PT_DTRACE) == 0) {
--			/* Spurious delayed TF traps may occur */
--			child->ptrace |= PT_DTRACE;
+-		local_irq_enable();
++	TRAP_HANDLER_DECL;
+ 
+-		/* bad PC is not something we can fix */
+-		if (regs->pc & 1) {
+-			si_code = BUS_ADRALN;
+-			goto uspace_segv;
+-		}
++	/* Rewind */
++	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ 
+-#ifndef CONFIG_CPU_SH2A
+-		set_fs(USER_DS);
+-		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+-			/* Argh. Fault on the instruction itself.
+-			   This should never happen non-SMP
+-			*/
+-			set_fs(oldfs);
+-			goto uspace_segv;
 -		}
 -
--		pc = get_stack_long(child, (long)&regs->pc);
+-		tmp = handle_unaligned_access(instruction, regs);
+-		set_fs(oldfs);
 -
--		/* Next scheduling will set up UBC */
--		if (child->thread.ubc_pc == 0)
--			ubc_usercnt += 1;
--		child->thread.ubc_pc = pc;
+-		if (tmp==0)
+-			return; /* sorted */
+-#endif
 -
--		set_tsk_thread_flag(child, TIF_SINGLESTEP);
--		child->exit_code = data;
--		/* give it a chance to run. */
--		wake_up_process(child);
--		ret = 0;
--		break;
--	}
+-uspace_segv:
+-		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+-		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+-		       regs->pr);
 -
--#ifdef CONFIG_SH_DSP
--	case PTRACE_GETDSPREGS: {
--		unsigned long dp;
+-		info.si_signo = SIGBUS;
+-		info.si_errno = 0;
+-		info.si_code = si_code;
+-		info.si_addr = (void __user *)address;
+-		force_sig_info(SIGBUS, &info, current);
+-	} else {
+-		if (regs->pc & 1)
+-			die("unaligned program counter", regs, error_code);
 -
--		ret = -EIO;
--		dp = ((unsigned long) child) + THREAD_SIZE -
--			 sizeof(struct pt_dspregs);
--		if (*((int *) (dp - 4)) == SR_FD) {
--			copy_to_user(addr, (void *) dp,
--				sizeof(struct pt_dspregs));
--			ret = 0;
+-#ifndef CONFIG_CPU_SH2A
+-		set_fs(KERNEL_DS);
+-		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+-			/* Argh. Fault on the instruction itself.
+-			   This should never happen non-SMP
+-			*/
+-			set_fs(oldfs);
+-			die("insn faulting in do_address_error", regs, 0);
 -		}
--		break;
+-
+-		handle_unaligned_access(instruction, regs);
+-		set_fs(oldfs);
+-#else
+-		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+-		       "access\n", current->comm);
++	if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
++		       SIGTRAP) == NOTIFY_STOP)
++		return;
+ 
+-		force_sig(SIGSEGV, current);
+-#endif
 -	}
++	force_sig(SIGTRAP, current);
+ }
+ 
+-#ifdef CONFIG_SH_DSP
+ /*
+- *	SH-DSP support gerg at snapgear.com.
++ * Special handler for BUG() traps.
+  */
+-int is_dsp_inst(struct pt_regs *regs)
++BUILD_TRAP_HANDLER(bug)
+ {
+-	unsigned short inst = 0;
 -
--	case PTRACE_SETDSPREGS: {
--		unsigned long dp;
+-	/*
+-	 * Safe guard if DSP mode is already enabled or we're lacking
+-	 * the DSP altogether.
+-	 */
+-	if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
+-		return 0;
 -
--		ret = -EIO;
--		dp = ((unsigned long) child) + THREAD_SIZE -
--			 sizeof(struct pt_dspregs);
--		if (*((int *) (dp - 4)) == SR_FD) {
--			copy_from_user((void *) dp, addr,
--				sizeof(struct pt_dspregs));
--			ret = 0;
--		}
+-	get_user(inst, ((unsigned short *) regs->pc));
+-
+-	inst &= 0xf000;
+-
+-	/* Check for any type of DSP or support instruction */
+-	if ((inst == 0xf000) || (inst == 0x4000))
+-		return 1;
+-
+-	return 0;
+-}
+-#else
+-#define is_dsp_inst(regs)	(0)
+-#endif /* CONFIG_SH_DSP */
++	TRAP_HANDLER_DECL;
+ 
+-#ifdef CONFIG_CPU_SH2A
+-asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+-				unsigned long r6, unsigned long r7,
+-				struct pt_regs __regs)
+-{
+-	siginfo_t info;
+-
+-	switch (r4) {
+-	case TRAP_DIVZERO_ERROR:
+-		info.si_code = FPE_INTDIV;
 -		break;
--	}
--#endif
--	default:
--		ret = ptrace_request(child, request, addr, data);
+-	case TRAP_DIVOVF_ERROR:
+-		info.si_code = FPE_INTOVF;
 -		break;
 -	}
 -
--	return ret;
+-	force_sig_info(SIGFPE, &info, current);
 -}
+-#endif
 -
--asmlinkage void do_syscall_trace(void)
+-/* arch/sh/kernel/cpu/sh4/fpu.c */
+-extern int do_fpu_inst(unsigned short, struct pt_regs *);
+-extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
+-		unsigned long r6, unsigned long r7, struct pt_regs __regs);
+-
+-asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
+-				unsigned long r6, unsigned long r7,
+-				struct pt_regs __regs)
 -{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	unsigned long error_code;
 -	struct task_struct *tsk = current;
 -
--	if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
--	    !test_thread_flag(TIF_SINGLESTEP))
--		return;
--	if (!(tsk->ptrace & PT_PTRACED))
+-#ifdef CONFIG_SH_FPU_EMU
+-	unsigned short inst = 0;
+-	int err;
+-
+-	get_user(inst, (unsigned short*)regs->pc);
+-
+-	err = do_fpu_inst(inst, regs);
+-	if (!err) {
+-		regs->pc += instruction_size(inst);
 -		return;
--	/* the 0x80 provides a way for the tracing parent to distinguish
--	   between a syscall stop and SIGTRAP delivery */
--	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
--				 !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
+-	}
+-	/* not a FPU inst. */
+-#endif
++	/* Rewind */
++	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ 
+-#ifdef CONFIG_SH_DSP
+-	/* Check if it's a DSP instruction */
+-	if (is_dsp_inst(regs)) {
+-		/* Enable DSP mode, and restart instruction. */
+-		regs->sr |= SR_DSP;
++	if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
++		       SIGTRAP) == NOTIFY_STOP)
+ 		return;
+-	}
+-#endif
+-
+-	lookup_exception_vector(error_code);
+-
+-	local_irq_enable();
+-	CHK_REMOTE_DEBUG(regs);
+-	force_sig(SIGILL, tsk);
+-	die_if_no_fixup("reserved instruction", regs, error_code);
+-}
 -
+-#ifdef CONFIG_SH_FPU_EMU
+-static int emulate_branch(unsigned short inst, struct pt_regs* regs)
+-{
 -	/*
--	 * this isn't the same as continuing with a signal, but it will do
--	 * for normal use.  strace only continues with a signal if the
--	 * stopping signal is not SIGTRAP.  -brl
+-	 * bfs: 8fxx: PC+=d*2+4;
+-	 * bts: 8dxx: PC+=d*2+4;
+-	 * bra: axxx: PC+=D*2+4;
+-	 * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
+-	 * braf:0x23: PC+=Rn*2+4;
+-	 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
+-	 * jmp: 4x2b: PC=Rn;
+-	 * jsr: 4x0b: PC=Rn      after PR=PC+4;
+-	 * rts: 000b: PC=PR;
 -	 */
--	if (tsk->exit_code) {
--		send_sig(tsk->exit_code, tsk, 1);
--		tsk->exit_code = 0;
+-	if ((inst & 0xfd00) == 0x8d00) {
+-		regs->pc += SH_PC_8BIT_OFFSET(inst);
+-		return 0;
+-	}
+-
+-	if ((inst & 0xe000) == 0xa000) {
+-		regs->pc += SH_PC_12BIT_OFFSET(inst);
+-		return 0;
+-	}
+-
+-	if ((inst & 0xf0df) == 0x0003) {
+-		regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
+-		return 0;
+-	}
+-
+-	if ((inst & 0xf0df) == 0x400b) {
+-		regs->pc = regs->regs[(inst & 0x0f00) >> 8];
+-		return 0;
 -	}
+-
+-	if ((inst & 0xffff) == 0x000b) {
+-		regs->pc = regs->pr;
+-		return 0;
+-	}
+-
+-	return 1;
 -}
-diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
+-#endif
+-
+-asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
+-				unsigned long r6, unsigned long r7,
+-				struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	unsigned long error_code;
+-	struct task_struct *tsk = current;
+-#ifdef CONFIG_SH_FPU_EMU
+-	unsigned short inst = 0;
+-
+-	get_user(inst, (unsigned short *)regs->pc + 1);
+-	if (!do_fpu_inst(inst, regs)) {
+-		get_user(inst, (unsigned short *)regs->pc);
+-		if (!emulate_branch(inst, regs))
+-			return;
+-		/* fault in branch.*/
+-	}
+-	/* not a FPU inst. */
+-#endif
+-
+-	lookup_exception_vector(error_code);
+-
+-	local_irq_enable();
+-	CHK_REMOTE_DEBUG(regs);
+-	force_sig(SIGILL, tsk);
+-	die_if_no_fixup("illegal slot instruction", regs, error_code);
+-}
+-
+-asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
+-				   unsigned long r6, unsigned long r7,
+-				   struct pt_regs __regs)
+-{
+-	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+-	long ex;
+-
+-	lookup_exception_vector(ex);
+-	die_if_kernel("exception", regs, ex);
+-}
+-
+-#if defined(CONFIG_SH_STANDARD_BIOS)
+-void *gdb_vbr_vector;
+-
+-static inline void __init gdb_vbr_init(void)
+-{
+-	register unsigned long vbr;
+-
+-	/*
+-	 * Read the old value of the VBR register to initialise
+-	 * the vector through which debug and BIOS traps are
+-	 * delegated by the Linux trap handler.
+-	 */
+-	asm volatile("stc vbr, %0" : "=r" (vbr));
+-
+-	gdb_vbr_vector = (void *)(vbr + 0x100);
+-	printk("Setting GDB trap vector to 0x%08lx\n",
+-	       (unsigned long)gdb_vbr_vector);
+-}
+-#endif
+-
+-void __cpuinit per_cpu_trap_init(void)
+-{
+-	extern void *vbr_base;
+-
+-#ifdef CONFIG_SH_STANDARD_BIOS
+-	if (raw_smp_processor_id() == 0)
+-		gdb_vbr_init();
+-#endif
+-
+-	/* NOTE: The VBR value should be at P1
+-	   (or P2, virtural "fixed" address space).
+-	   It's definitely should not in physical address.  */
+-
+-	asm volatile("ldc	%0, vbr"
+-		     : /* no output */
+-		     : "r" (&vbr_base)
+-		     : "memory");
+-}
+-
+-void *set_exception_table_vec(unsigned int vec, void *handler)
+-{
+-	extern void *exception_handling_table[];
+-	void *old_handler;
+-
+-	old_handler = exception_handling_table[vec];
+-	exception_handling_table[vec] = handler;
+-	return old_handler;
+-}
+-
+-extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
+-					     unsigned long r6, unsigned long r7,
+-					     struct pt_regs __regs);
+-
+-void __init trap_init(void)
+-{
+-	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
+-	set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
+-
+-#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
+-    defined(CONFIG_SH_FPU_EMU)
+-	/*
+-	 * For SH-4 lacking an FPU, treat floating point instructions as
+-	 * reserved. They'll be handled in the math-emu case, or faulted on
+-	 * otherwise.
+-	 */
+-	set_exception_table_evt(0x800, do_reserved_inst);
+-	set_exception_table_evt(0x820, do_illegal_slot_inst);
+-#elif defined(CONFIG_SH_FPU)
+-#ifdef CONFIG_CPU_SUBTYPE_SHX3
+-	set_exception_table_evt(0xd80, do_fpu_state_restore);
+-	set_exception_table_evt(0xda0, do_fpu_state_restore);
+-#else
+-	set_exception_table_evt(0x800, do_fpu_state_restore);
+-	set_exception_table_evt(0x820, do_fpu_state_restore);
+-#endif
+-#endif
+-
+-#ifdef CONFIG_CPU_SH2
+-	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
+-#endif
+-#ifdef CONFIG_CPU_SH2A
+-	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+-	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+-#endif
+-
+-	/* Setup VBR for boot cpu */
+-	per_cpu_trap_init();
+-}
+ 
+ #ifdef CONFIG_BUG
+-void handle_BUG(struct pt_regs *regs)
+-{
+-	enum bug_trap_type tt;
+-	tt = report_bug(regs->pc, regs);
+-	if (tt == BUG_TRAP_TYPE_WARN) {
+-		regs->pc += 2;
+-		return;
++	if (__kernel_text_address(instruction_pointer(regs))) {
++		opcode_t insn = *(opcode_t *)instruction_pointer(regs);
++		if (insn == TRAPA_BUG_OPCODE)
++			handle_BUG(regs);
+ 	}
+-
+-	die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
+-}
+-
+-int is_valid_bugaddr(unsigned long addr)
+-{
+-	return addr >= PAGE_OFFSET;
+-}
+-#endif
+-
+-void show_trace(struct task_struct *tsk, unsigned long *sp,
+-		struct pt_regs *regs)
+-{
+-	unsigned long addr;
+-
+-	if (regs && user_mode(regs))
+-		return;
+-
+-	printk("\nCall trace: ");
+-#ifdef CONFIG_KALLSYMS
+-	printk("\n");
+ #endif
+ 
+-	while (!kstack_end(sp)) {
+-		addr = *sp++;
+-		if (kernel_text_address(addr))
+-			print_ip_sym(addr);
+-	}
+-
+-	printk("\n");
+-
+-	if (!tsk)
+-		tsk = current;
+-
+-	debug_show_held_locks(tsk);
+-}
+-
+-void show_stack(struct task_struct *tsk, unsigned long *sp)
+-{
+-	unsigned long stack;
+-
+-	if (!tsk)
+-		tsk = current;
+-	if (tsk == current)
+-		sp = (unsigned long *)current_stack_pointer;
+-	else
+-		sp = (unsigned long *)tsk->thread.sp;
+-
+-	stack = (unsigned long)sp;
+-	dump_mem("Stack: ", stack, THREAD_SIZE +
+-		 (unsigned long)task_stack_page(tsk));
+-	show_trace(tsk, sp, NULL);
+-}
+-
+-void dump_stack(void)
+-{
+-	show_stack(NULL, NULL);
++	force_sig(SIGTRAP, current);
+ }
+-EXPORT_SYMBOL(dump_stack);
+diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
 new file mode 100644
-index 0000000..ce0664a
+index 0000000..2e58f7a
 --- /dev/null
-+++ b/arch/sh/kernel/ptrace_32.c
-@@ -0,0 +1,287 @@
++++ b/arch/sh/kernel/traps_32.c
+@@ -0,0 +1,919 @@
 +/*
-+ * linux/arch/sh/kernel/ptrace.c
++ * 'traps.c' handles hardware traps and faults after we have saved some
++ * state in 'entry.S'.
 + *
-+ * Original x86 implementation:
-+ *	By Ross Biro 1/23/92
-+ *	edited by Linus Torvalds
++ *  SuperH version: Copyright (C) 1999 Niibe Yutaka
++ *                  Copyright (C) 2000 Philipp Rumpf
++ *                  Copyright (C) 2000 David Howells
++ *                  Copyright (C) 2002 - 2007 Paul Mundt
 + *
-+ * SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
-+ * Audit support: Yuichi Nakamura <ynakam at hitachisoft.jp>
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
 + */
 +#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/errno.h>
 +#include <linux/ptrace.h>
-+#include <linux/user.h>
-+#include <linux/slab.h>
-+#include <linux/security.h>
-+#include <linux/signal.h>
++#include <linux/init.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
 +#include <linux/io.h>
-+#include <linux/audit.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
++#include <linux/bug.h>
++#include <linux/debug_locks.h>
++#include <linux/kdebug.h>
++#include <linux/kexec.h>
++#include <linux/limits.h>
 +#include <asm/system.h>
-+#include <asm/processor.h>
-+#include <asm/mmu_context.h>
++#include <asm/uaccess.h>
 +
-+/*
-+ * does not yet catch signals sent when the child dies.
-+ * in exit.c or in signal.c.
-+ */
++#ifdef CONFIG_SH_KGDB
++#include <asm/kgdb.h>
++#define CHK_REMOTE_DEBUG(regs)			\
++{						\
++	if (kgdb_debug_hook && !user_mode(regs))\
++		(*kgdb_debug_hook)(regs);       \
++}
++#else
++#define CHK_REMOTE_DEBUG(regs)
++#endif
 +
-+/*
-+ * This routine will get a word off of the process kernel stack.
-+ */
-+static inline int get_stack_long(struct task_struct *task, int offset)
++#ifdef CONFIG_CPU_SH2
++# define TRAP_RESERVED_INST	4
++# define TRAP_ILLEGAL_SLOT_INST	6
++# define TRAP_ADDRESS_ERROR	9
++# ifdef CONFIG_CPU_SH2A
++#  define TRAP_DIVZERO_ERROR	17
++#  define TRAP_DIVOVF_ERROR	18
++# endif
++#else
++#define TRAP_RESERVED_INST	12
++#define TRAP_ILLEGAL_SLOT_INST	13
++#endif
++
++static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
 +{
-+	unsigned char *stack;
++	unsigned long p;
++	int i;
 +
-+	stack = (unsigned char *)task_pt_regs(task);
-+	stack += offset;
-+	return (*((int *)stack));
++	printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
++
++	for (p = bottom & ~31; p < top; ) {
++		printk("%04lx: ", p & 0xffff);
++
++		for (i = 0; i < 8; i++, p += 4) {
++			unsigned int val;
++
++			if (p < bottom || p >= top)
++				printk("         ");
++			else {
++				if (__get_user(val, (unsigned int __user *)p)) {
++					printk("\n");
++					return;
++				}
++				printk("%08x ", val);
++			}
++		}
++		printk("\n");
++	}
 +}
 +
-+/*
-+ * This routine will put a word on the process kernel stack.
-+ */
-+static inline int put_stack_long(struct task_struct *task, int offset,
-+				 unsigned long data)
++static DEFINE_SPINLOCK(die_lock);
++
++void die(const char * str, struct pt_regs * regs, long err)
 +{
-+	unsigned char *stack;
++	static int die_counter;
 +
-+	stack = (unsigned char *)task_pt_regs(task);
-+	stack += offset;
-+	*(unsigned long *) stack = data;
-+	return 0;
++	oops_enter();
++
++	console_verbose();
++	spin_lock_irq(&die_lock);
++	bust_spinlocks(1);
++
++	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++
++	CHK_REMOTE_DEBUG(regs);
++	print_modules();
++	show_regs(regs);
++
++	printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
++			task_pid_nr(current), task_stack_page(current) + 1);
++
++	if (!user_mode(regs) || in_interrupt())
++		dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
++			 (unsigned long)task_stack_page(current));
++
++	bust_spinlocks(0);
++	add_taint(TAINT_DIE);
++	spin_unlock_irq(&die_lock);
++
++	if (kexec_should_crash(current))
++		crash_kexec(regs);
++
++	if (in_interrupt())
++		panic("Fatal exception in interrupt");
++
++	if (panic_on_oops)
++		panic("Fatal exception");
++
++	oops_exit();
++	do_exit(SIGSEGV);
 +}
 +
-+static void ptrace_disable_singlestep(struct task_struct *child)
++static inline void die_if_kernel(const char *str, struct pt_regs *regs,
++				 long err)
 +{
-+	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-+
-+	/*
-+	 * Ensure the UBC is not programmed at the next context switch.
-+	 *
-+	 * Normally this is not needed but there are sequences such as
-+	 * singlestep, signal delivery, and continue that leave the
-+	 * ubc_pc non-zero leading to spurious SIGTRAPs.
-+	 */
-+	if (child->thread.ubc_pc != 0) {
-+		ubc_usercnt -= 1;
-+		child->thread.ubc_pc = 0;
-+	}
++	if (!user_mode(regs))
++		die(str, regs, err);
 +}
 +
 +/*
-+ * Called by kernel/ptrace.c when detaching..
-+ *
-+ * Make sure single step bits etc are not set.
++ * try and fix up kernelspace address errors
++ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
++ * - kernel/userspace interfaces cause a jump to an appropriate handler
++ * - other kernel errors are bad
++ * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
 + */
-+void ptrace_disable(struct task_struct *child)
++static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
 +{
-+	ptrace_disable_singlestep(child);
++	if (!user_mode(regs)) {
++		const struct exception_table_entry *fixup;
++		fixup = search_exception_tables(regs->pc);
++		if (fixup) {
++			regs->pc = fixup->fixup;
++			return 0;
++		}
++		die(str, regs, err);
++	}
++	return -EFAULT;
 +}
 +
-+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
++/*
++ * handle an instruction that does an unaligned memory access by emulating the
++ * desired behaviour
++ * - note that PC _may not_ point to the faulting instruction
++ *   (if that instruction is in a branch delay slot)
++ * - return 0 if emulation okay, -EFAULT on existential error
++ */
++static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
 +{
-+	struct user * dummy = NULL;
-+	int ret;
++	int ret, index, count;
++	unsigned long *rm, *rn;
++	unsigned char *src, *dst;
 +
-+	switch (request) {
-+	/* when I and D space are separate, these will need to be fixed. */
-+	case PTRACE_PEEKTEXT: /* read word at location addr. */
-+	case PTRACE_PEEKDATA:
-+		ret = generic_ptrace_peekdata(child, addr, data);
-+		break;
++	index = (instruction>>8)&15;	/* 0x0F00 */
++	rn = &regs->regs[index];
 +
-+	/* read the word at location addr in the USER area. */
-+	case PTRACE_PEEKUSR: {
-+		unsigned long tmp;
++	index = (instruction>>4)&15;	/* 0x00F0 */
++	rm = &regs->regs[index];
 +
-+		ret = -EIO;
-+		if ((addr & 3) || addr < 0 ||
-+		    addr > sizeof(struct user) - 3)
-+			break;
++	count = 1<<(instruction&3);
 +
-+		if (addr < sizeof(struct pt_regs))
-+			tmp = get_stack_long(child, addr);
-+		else if (addr >= (long) &dummy->fpu &&
-+			 addr < (long) &dummy->u_fpvalid) {
-+			if (!tsk_used_math(child)) {
-+				if (addr == (long)&dummy->fpu.fpscr)
-+					tmp = FPSCR_INIT;
-+				else
-+					tmp = 0;
-+			} else
-+				tmp = ((long *)&child->thread.fpu)
-+					[(addr - (long)&dummy->fpu) >> 2];
-+		} else if (addr == (long) &dummy->u_fpvalid)
-+			tmp = !!tsk_used_math(child);
-+		else
-+			tmp = 0;
-+		ret = put_user(tmp, (unsigned long __user *)data);
-+		break;
-+	}
++	ret = -EFAULT;
++	switch (instruction>>12) {
++	case 0: /* mov.[bwl] to/from memory via r0+rn */
++		if (instruction & 8) {
++			/* from memory */
++			src = (unsigned char*) *rm;
++			src += regs->regs[0];
++			dst = (unsigned char*) rn;
++			*(unsigned long*)dst = 0;
 +
-+	/* when I and D space are separate, this will have to be fixed. */
-+	case PTRACE_POKETEXT: /* write the word at location addr. */
-+	case PTRACE_POKEDATA:
-+		ret = generic_ptrace_pokedata(child, addr, data);
-+		break;
++#ifdef __LITTLE_ENDIAN__
++			if (copy_from_user(dst, src, count))
++				goto fetch_fault;
 +
-+	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
-+		ret = -EIO;
-+		if ((addr & 3) || addr < 0 ||
-+		    addr > sizeof(struct user) - 3)
-+			break;
++			if ((count == 2) && dst[1] & 0x80) {
++				dst[2] = 0xff;
++				dst[3] = 0xff;
++			}
++#else
++			dst += 4-count;
 +
-+		if (addr < sizeof(struct pt_regs))
-+			ret = put_stack_long(child, addr, data);
-+		else if (addr >= (long) &dummy->fpu &&
-+			 addr < (long) &dummy->u_fpvalid) {
-+			set_stopped_child_used_math(child);
-+			((long *)&child->thread.fpu)
-+				[(addr - (long)&dummy->fpu) >> 2] = data;
-+			ret = 0;
-+		} else if (addr == (long) &dummy->u_fpvalid) {
-+			conditional_stopped_child_used_math(data, child);
-+			ret = 0;
++			if (__copy_user(dst, src, count))
++				goto fetch_fault;
++
++			if ((count == 2) && dst[2] & 0x80) {
++				dst[0] = 0xff;
++				dst[1] = 0xff;
++			}
++#endif
++		} else {
++			/* to memory */
++			src = (unsigned char*) rm;
++#if !defined(__LITTLE_ENDIAN__)
++			src += 4-count;
++#endif
++			dst = (unsigned char*) *rn;
++			dst += regs->regs[0];
++
++			if (copy_to_user(dst, src, count))
++				goto fetch_fault;
 +		}
++		ret = 0;
 +		break;
 +
-+	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-+	case PTRACE_CONT: { /* restart after signal. */
-+		ret = -EIO;
-+		if (!valid_signal(data))
-+			break;
-+		if (request == PTRACE_SYSCALL)
-+			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-+		else
-+			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-+
-+		ptrace_disable_singlestep(child);
++	case 1: /* mov.l Rm,@(disp,Rn) */
++		src = (unsigned char*) rm;
++		dst = (unsigned char*) *rn;
++		dst += (instruction&0x000F)<<2;
 +
-+		child->exit_code = data;
-+		wake_up_process(child);
++		if (copy_to_user(dst,src,4))
++			goto fetch_fault;
 +		ret = 0;
 +		break;
-+	}
 +
-+/*
-+ * make the child exit.  Best I can do is send it a sigkill.
-+ * perhaps it should be put in the status that it wants to
-+ * exit.
-+ */
-+	case PTRACE_KILL: {
++	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
++		if (instruction & 4)
++			*rn -= count;
++		src = (unsigned char*) rm;
++		dst = (unsigned char*) *rn;
++#if !defined(__LITTLE_ENDIAN__)
++		src += 4-count;
++#endif
++		if (copy_to_user(dst, src, count))
++			goto fetch_fault;
 +		ret = 0;
-+		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
-+			break;
-+		ptrace_disable_singlestep(child);
-+		child->exit_code = SIGKILL;
-+		wake_up_process(child);
 +		break;
-+	}
-+
-+	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
-+		long pc;
-+		struct pt_regs *regs = NULL;
-+
-+		ret = -EIO;
-+		if (!valid_signal(data))
-+			break;
-+		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-+		if ((child->ptrace & PT_DTRACE) == 0) {
-+			/* Spurious delayed TF traps may occur */
-+			child->ptrace |= PT_DTRACE;
-+		}
-+
-+		pc = get_stack_long(child, (long)&regs->pc);
 +
-+		/* Next scheduling will set up UBC */
-+		if (child->thread.ubc_pc == 0)
-+			ubc_usercnt += 1;
-+		child->thread.ubc_pc = pc;
++	case 5: /* mov.l @(disp,Rm),Rn */
++		src = (unsigned char*) *rm;
++		src += (instruction&0x000F)<<2;
++		dst = (unsigned char*) rn;
++		*(unsigned long*)dst = 0;
 +
-+		set_tsk_thread_flag(child, TIF_SINGLESTEP);
-+		child->exit_code = data;
-+		/* give it a chance to run. */
-+		wake_up_process(child);
++		if (copy_from_user(dst,src,4))
++			goto fetch_fault;
 +		ret = 0;
 +		break;
-+	}
 +
-+#ifdef CONFIG_SH_DSP
-+	case PTRACE_GETDSPREGS: {
-+		unsigned long dp;
++	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
++		src = (unsigned char*) *rm;
++		if (instruction & 4)
++			*rm += count;
++		dst = (unsigned char*) rn;
++		*(unsigned long*)dst = 0;
 +
-+		ret = -EIO;
-+		dp = ((unsigned long) child) + THREAD_SIZE -
-+			 sizeof(struct pt_dspregs);
-+		if (*((int *) (dp - 4)) == SR_FD) {
-+			copy_to_user(addr, (void *) dp,
-+				sizeof(struct pt_dspregs));
-+			ret = 0;
++#ifdef __LITTLE_ENDIAN__
++		if (copy_from_user(dst, src, count))
++			goto fetch_fault;
++
++		if ((count == 2) && dst[1] & 0x80) {
++			dst[2] = 0xff;
++			dst[3] = 0xff;
 +		}
-+		break;
-+	}
++#else
++		dst += 4-count;
 +
-+	case PTRACE_SETDSPREGS: {
-+		unsigned long dp;
++		if (copy_from_user(dst, src, count))
++			goto fetch_fault;
 +
-+		ret = -EIO;
-+		dp = ((unsigned long) child) + THREAD_SIZE -
-+			 sizeof(struct pt_dspregs);
-+		if (*((int *) (dp - 4)) == SR_FD) {
-+			copy_from_user((void *) dp, addr,
-+				sizeof(struct pt_dspregs));
-+			ret = 0;
++		if ((count == 2) && dst[2] & 0x80) {
++			dst[0] = 0xff;
++			dst[1] = 0xff;
 +		}
-+		break;
-+	}
 +#endif
-+	default:
-+		ret = ptrace_request(child, request, addr, data);
++		ret = 0;
 +		break;
-+	}
 +
-+	return ret;
-+}
++	case 8:
++		switch ((instruction&0xFF00)>>8) {
++		case 0x81: /* mov.w R0,@(disp,Rn) */
++			src = (unsigned char*) &regs->regs[0];
++#if !defined(__LITTLE_ENDIAN__)
++			src += 2;
++#endif
++			dst = (unsigned char*) *rm; /* called Rn in the spec */
++			dst += (instruction&0x000F)<<1;
 +
-+asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
-+{
-+	struct task_struct *tsk = current;
++			if (copy_to_user(dst, src, 2))
++				goto fetch_fault;
++			ret = 0;
++			break;
 +
-+	if (unlikely(current->audit_context) && entryexit)
-+		audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
-+				   regs->regs[0]);
++		case 0x85: /* mov.w @(disp,Rm),R0 */
++			src = (unsigned char*) *rm;
++			src += (instruction&0x000F)<<1;
++			dst = (unsigned char*) &regs->regs[0];
++			*(unsigned long*)dst = 0;
 +
-+	if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
-+	    !test_thread_flag(TIF_SINGLESTEP))
-+		goto out;
-+	if (!(tsk->ptrace & PT_PTRACED))
-+		goto out;
++#if !defined(__LITTLE_ENDIAN__)
++			dst += 2;
++#endif
 +
-+	/* the 0x80 provides a way for the tracing parent to distinguish
-+	   between a syscall stop and SIGTRAP delivery */
-+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
-+				 !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
++			if (copy_from_user(dst, src, 2))
++				goto fetch_fault;
 +
-+	/*
-+	 * this isn't the same as continuing with a signal, but it will do
-+	 * for normal use.  strace only continues with a signal if the
-+	 * stopping signal is not SIGTRAP.  -brl
-+	 */
-+	if (tsk->exit_code) {
-+		send_sig(tsk->exit_code, tsk, 1);
-+		tsk->exit_code = 0;
++#ifdef __LITTLE_ENDIAN__
++			if (dst[1] & 0x80) {
++				dst[2] = 0xff;
++				dst[3] = 0xff;
++			}
++#else
++			if (dst[2] & 0x80) {
++				dst[0] = 0xff;
++				dst[1] = 0xff;
++			}
++#endif
++			ret = 0;
++			break;
++		}
++		break;
 +	}
++	return ret;
 +
-+out:
-+	if (unlikely(current->audit_context) && !entryexit)
-+		audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[3],
-+				    regs->regs[4], regs->regs[5],
-+				    regs->regs[6], regs->regs[7]);
-+
++ fetch_fault:
++	/* Argh. Address not only misaligned but also non-existent.
++	 * Raise an EFAULT and see if it's trapped
++	 */
++	return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
 +}
-diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
-new file mode 100644
-index 0000000..f6fbdfa
---- /dev/null
-+++ b/arch/sh/kernel/ptrace_64.c
-@@ -0,0 +1,341 @@
-+/*
-+ * arch/sh/kernel/ptrace_64.c
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003 - 2007  Paul Mundt
-+ *
-+ * Started from SH3/4 version:
-+ *   SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
-+ *
-+ *   Original x86 implementation:
-+ *	By Ross Biro 1/23/92
-+ *	edited by Linus Torvalds
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/rwsem.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/errno.h>
-+#include <linux/ptrace.h>
-+#include <linux/user.h>
-+#include <linux/signal.h>
-+#include <linux/syscalls.h>
-+#include <linux/audit.h>
-+#include <asm/io.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/processor.h>
-+#include <asm/mmu_context.h>
-+
-+/* This mask defines the bits of the SR which the user is not allowed to
-+   change, which are everything except S, Q, M, PR, SZ, FR. */
-+#define SR_MASK      (0xffff8cfd)
-+
-+/*
-+ * does not yet catch signals sent when the child dies.
-+ * in exit.c or in signal.c.
-+ */
 +
 +/*
-+ * This routine will get a word from the user area in the process kernel stack.
++ * emulate the instruction in the delay slot
++ * - fetches the instruction from PC+2
 + */
-+static inline int get_stack_long(struct task_struct *task, int offset)
-+{
-+	unsigned char *stack;
-+
-+	stack = (unsigned char *)(task->thread.uregs);
-+	stack += offset;
-+	return (*((int *)stack));
-+}
-+
-+static inline unsigned long
-+get_fpu_long(struct task_struct *task, unsigned long addr)
++static inline int handle_unaligned_delayslot(struct pt_regs *regs)
 +{
-+	unsigned long tmp;
-+	struct pt_regs *regs;
-+	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
++	u16 instruction;
 +
-+	if (!tsk_used_math(task)) {
-+		if (addr == offsetof(struct user_fpu_struct, fpscr)) {
-+			tmp = FPSCR_INIT;
-+		} else {
-+			tmp = 0xffffffffUL; /* matches initial value in fpu.c */
-+		}
-+		return tmp;
-+	}
++	if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
++		/* the instruction-fetch faulted */
++		if (user_mode(regs))
++			return -EFAULT;
 +
-+	if (last_task_used_math == task) {
-+		enable_fpu();
-+		save_fpu(task, regs);
-+		disable_fpu();
-+		last_task_used_math = 0;
-+		regs->sr |= SR_FD;
++		/* kernel */
++		die("delay-slot-insn faulting in handle_unaligned_delayslot",
++		    regs, 0);
 +	}
 +
-+	tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
-+	return tmp;
++	return handle_unaligned_ins(instruction,regs);
 +}
 +
 +/*
-+ * This routine will put a word into the user area in the process kernel stack.
++ * handle an instruction that does an unaligned memory access
++ * - have to be careful of branch delay-slot instructions that fault
++ *  SH3:
++ *   - if the branch would be taken PC points to the branch
++ *   - if the branch would not be taken, PC points to delay-slot
++ *  SH4:
++ *   - PC always points to delayed branch
++ * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
 + */
-+static inline int put_stack_long(struct task_struct *task, int offset,
-+				 unsigned long data)
-+{
-+	unsigned char *stack;
-+
-+	stack = (unsigned char *)(task->thread.uregs);
-+	stack += offset;
-+	*(unsigned long *) stack = data;
-+	return 0;
-+}
-+
-+static inline int
-+put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
-+{
-+	struct pt_regs *regs;
-+
-+	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
-+
-+	if (!tsk_used_math(task)) {
-+		fpinit(&task->thread.fpu.hard);
-+		set_stopped_child_used_math(task);
-+	} else if (last_task_used_math == task) {
-+		enable_fpu();
-+		save_fpu(task, regs);
-+		disable_fpu();
-+		last_task_used_math = 0;
-+		regs->sr |= SR_FD;
-+	}
 +
-+	((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
-+	return 0;
-+}
++/* Macros to determine offset from current PC for branch instructions */
++/* Explicit type coercion is used to force sign extension where needed */
++#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
++#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
 +
++/*
++ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
++ * opcodes..
++ */
++#ifndef CONFIG_CPU_SH2A
++static int handle_unaligned_notify_count = 10;
 +
-+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
++static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
 +{
-+	int ret;
++	u_int rm;
++	int ret, index;
 +
-+	switch (request) {
-+	/* when I and D space are separate, these will need to be fixed. */
-+	case PTRACE_PEEKTEXT: /* read word at location addr. */
-+	case PTRACE_PEEKDATA:
-+		ret = generic_ptrace_peekdata(child, addr, data);
-+		break;
++	index = (instruction>>8)&15;	/* 0x0F00 */
++	rm = regs->regs[index];
 +
-+	/* read the word at location addr in the USER area. */
-+	case PTRACE_PEEKUSR: {
-+		unsigned long tmp;
++	/* shout about the first ten userspace fixups */
++	if (user_mode(regs) && handle_unaligned_notify_count>0) {
++		handle_unaligned_notify_count--;
 +
-+		ret = -EIO;
-+		if ((addr & 3) || addr < 0)
-+			break;
++		printk(KERN_NOTICE "Fixing up unaligned userspace access "
++		       "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
++		       current->comm, task_pid_nr(current),
++		       (u16 *)regs->pc, instruction);
++	}
 +
-+		if (addr < sizeof(struct pt_regs))
-+			tmp = get_stack_long(child, addr);
-+		else if ((addr >= offsetof(struct user, fpu)) &&
-+			 (addr <  offsetof(struct user, u_fpvalid))) {
-+			tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
-+		} else if (addr == offsetof(struct user, u_fpvalid)) {
-+			tmp = !!tsk_used_math(child);
-+		} else {
-+			break;
++	ret = -EFAULT;
++	switch (instruction&0xF000) {
++	case 0x0000:
++		if (instruction==0x000B) {
++			/* rts */
++			ret = handle_unaligned_delayslot(regs);
++			if (ret==0)
++				regs->pc = regs->pr;
++		}
++		else if ((instruction&0x00FF)==0x0023) {
++			/* braf @Rm */
++			ret = handle_unaligned_delayslot(regs);
++			if (ret==0)
++				regs->pc += rm + 4;
++		}
++		else if ((instruction&0x00FF)==0x0003) {
++			/* bsrf @Rm */
++			ret = handle_unaligned_delayslot(regs);
++			if (ret==0) {
++				regs->pr = regs->pc + 4;
++				regs->pc += rm + 4;
++			}
++		}
++		else {
++			/* mov.[bwl] to/from memory via r0+rn */
++			goto simple;
 +		}
-+		ret = put_user(tmp, (unsigned long *)data);
 +		break;
-+	}
 +
-+	/* when I and D space are separate, this will have to be fixed. */
-+	case PTRACE_POKETEXT: /* write the word at location addr. */
-+	case PTRACE_POKEDATA:
-+		ret = generic_ptrace_pokedata(child, addr, data);
-+		break;
++	case 0x1000: /* mov.l Rm,@(disp,Rn) */
++		goto simple;
 +
-+	case PTRACE_POKEUSR:
-+                /* write the word at location addr in the USER area. We must
-+                   disallow any changes to certain SR bits or u_fpvalid, since
-+                   this could crash the kernel or result in a security
-+                   loophole. */
-+		ret = -EIO;
-+		if ((addr & 3) || addr < 0)
-+			break;
++	case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
++		goto simple;
 +
-+		if (addr < sizeof(struct pt_regs)) {
-+			/* Ignore change of top 32 bits of SR */
-+			if (addr == offsetof (struct pt_regs, sr)+4)
-+			{
-+				ret = 0;
-+				break;
-+			}
-+			/* If lower 32 bits of SR, ignore non-user bits */
-+			if (addr == offsetof (struct pt_regs, sr))
-+			{
-+				long cursr = get_stack_long(child, addr);
-+				data &= ~(SR_MASK);
-+				data |= (cursr & SR_MASK);
++	case 0x4000:
++		if ((instruction&0x00FF)==0x002B) {
++			/* jmp @Rm */
++			ret = handle_unaligned_delayslot(regs);
++			if (ret==0)
++				regs->pc = rm;
++		}
++		else if ((instruction&0x00FF)==0x000B) {
++			/* jsr @Rm */
++			ret = handle_unaligned_delayslot(regs);
++			if (ret==0) {
++				regs->pr = regs->pc + 4;
++				regs->pc = rm;
 +			}
-+			ret = put_stack_long(child, addr, data);
 +		}
-+		else if ((addr >= offsetof(struct user, fpu)) &&
-+			 (addr <  offsetof(struct user, u_fpvalid))) {
-+			ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
++		else {
++			/* mov.[bwl] to/from memory via r0+rn */
++			goto simple;
 +		}
 +		break;
 +
-+	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-+	case PTRACE_CONT: { /* restart after signal. */
-+		ret = -EIO;
-+		if (!valid_signal(data))
-+			break;
-+		if (request == PTRACE_SYSCALL)
-+			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-+		else
-+			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-+		child->exit_code = data;
-+		wake_up_process(child);
-+		ret = 0;
-+		break;
-+	}
-+
-+/*
-+ * make the child exit.  Best I can do is send it a sigkill.
-+ * perhaps it should be put in the status that it wants to
-+ * exit.
-+ */
-+	case PTRACE_KILL: {
-+		ret = 0;
-+		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
-+			break;
-+		child->exit_code = SIGKILL;
-+		wake_up_process(child);
-+		break;
-+	}
++	case 0x5000: /* mov.l @(disp,Rm),Rn */
++		goto simple;
 +
-+	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
-+		struct pt_regs *regs;
++	case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
++		goto simple;
 +
-+		ret = -EIO;
-+		if (!valid_signal(data))
++	case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
++		switch (instruction&0x0F00) {
++		case 0x0100: /* mov.w R0,@(disp,Rm) */
++			goto simple;
++		case 0x0500: /* mov.w @(disp,Rm),R0 */
++			goto simple;
++		case 0x0B00: /* bf   lab - no delayslot*/
++			break;
++		case 0x0F00: /* bf/s lab */
++			ret = handle_unaligned_delayslot(regs);
++			if (ret==0) {
++#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
++				if ((regs->sr & 0x00000001) != 0)
++					regs->pc += 4; /* next after slot */
++				else
++#endif
++					regs->pc += SH_PC_8BIT_OFFSET(instruction);
++			}
++			break;
++		case 0x0900: /* bt   lab - no delayslot */
++			break;
++		case 0x0D00: /* bt/s lab */
++			ret = handle_unaligned_delayslot(regs);
++			if (ret==0) {
++#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
++				if ((regs->sr & 0x00000001) == 0)
++					regs->pc += 4; /* next after slot */
++				else
++#endif
++					regs->pc += SH_PC_8BIT_OFFSET(instruction);
++			}
 +			break;
-+		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-+		if ((child->ptrace & PT_DTRACE) == 0) {
-+			/* Spurious delayed TF traps may occur */
-+			child->ptrace |= PT_DTRACE;
 +		}
++		break;
 +
-+		regs = child->thread.uregs;
-+
-+		regs->sr |= SR_SSTEP;	/* auto-resetting upon exception */
-+
-+		child->exit_code = data;
-+		/* give it a chance to run. */
-+		wake_up_process(child);
-+		ret = 0;
++	case 0xA000: /* bra label */
++		ret = handle_unaligned_delayslot(regs);
++		if (ret==0)
++			regs->pc += SH_PC_12BIT_OFFSET(instruction);
 +		break;
-+	}
 +
-+	default:
-+		ret = ptrace_request(child, request, addr, data);
++	case 0xB000: /* bsr label */
++		ret = handle_unaligned_delayslot(regs);
++		if (ret==0) {
++			regs->pr = regs->pc + 4;
++			regs->pc += SH_PC_12BIT_OFFSET(instruction);
++		}
 +		break;
 +	}
 +	return ret;
++
++	/* handle non-delay-slot instruction */
++ simple:
++	ret = handle_unaligned_ins(instruction,regs);
++	if (ret==0)
++		regs->pc += instruction_size(instruction);
++	return ret;
 +}
++#endif /* CONFIG_CPU_SH2A */
 +
-+asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
++#ifdef CONFIG_CPU_HAS_SR_RB
++#define lookup_exception_vector(x)	\
++	__asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
++#else
++#define lookup_exception_vector(x)	\
++	__asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
++#endif
++
++/*
++ * Handle various address error exceptions:
++ *  - instruction address error:
++ *       misaligned PC
++ *       PC >= 0x80000000 in user mode
++ *  - data address error (read and write)
++ *       misaligned data access
++ *       access to >= 0x80000000 is user mode
++ * Unfortuntaly we can't distinguish between instruction address error
++ * and data address errors caused by read accesses.
++ */
++asmlinkage void do_address_error(struct pt_regs *regs,
++				 unsigned long writeaccess,
++				 unsigned long address)
 +{
-+#define WPC_DBRMODE 0x0d104008
-+	static int first_call = 1;
++	unsigned long error_code = 0;
++	mm_segment_t oldfs;
++	siginfo_t info;
++#ifndef CONFIG_CPU_SH2A
++	u16 instruction;
++	int tmp;
++#endif
 +
-+	lock_kernel();
-+	if (first_call) {
-+		/* Set WPC.DBRMODE to 0.  This makes all debug events get
-+		 * delivered through RESVEC, i.e. into the handlers in entry.S.
-+		 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
-+		 * would normally be left set to 1, which makes debug events get
-+		 * delivered through DBRVEC, i.e. into the remote gdb's
-+		 * handlers.  This prevents ptrace getting them, and confuses
-+		 * the remote gdb.) */
-+		printk("DBRMODE set to 0 to permit native debugging\n");
-+		poke_real_address_q(WPC_DBRMODE, 0);
-+		first_call = 0;
-+	}
-+	unlock_kernel();
++	/* Intentional ifdef */
++#ifdef CONFIG_CPU_HAS_SR_RB
++	lookup_exception_vector(error_code);
++#endif
 +
-+	return sys_ptrace(request, pid, addr, data);
-+}
++	oldfs = get_fs();
 +
-+asmlinkage void syscall_trace(struct pt_regs *regs, int entryexit)
-+{
-+	struct task_struct *tsk = current;
++	if (user_mode(regs)) {
++		int si_code = BUS_ADRERR;
 +
-+	if (unlikely(current->audit_context) && entryexit)
-+		audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
-+				   regs->regs[9]);
++		local_irq_enable();
 +
-+	if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
-+	    !test_thread_flag(TIF_SINGLESTEP))
-+		goto out;
-+	if (!(tsk->ptrace & PT_PTRACED))
-+		goto out;
++		/* bad PC is not something we can fix */
++		if (regs->pc & 1) {
++			si_code = BUS_ADRALN;
++			goto uspace_segv;
++		}
 +
-+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
-+				!test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
++#ifndef CONFIG_CPU_SH2A
++		set_fs(USER_DS);
++		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
++			/* Argh. Fault on the instruction itself.
++			   This should never happen non-SMP
++			*/
++			set_fs(oldfs);
++			goto uspace_segv;
++		}
 +
-+	/*
-+	 * this isn't the same as continuing with a signal, but it will do
-+	 * for normal use.  strace only continues with a signal if the
-+	 * stopping signal is not SIGTRAP.  -brl
-+	 */
-+	if (tsk->exit_code) {
-+		send_sig(tsk->exit_code, tsk, 1);
-+		tsk->exit_code = 0;
-+	}
++		tmp = handle_unaligned_access(instruction, regs);
++		set_fs(oldfs);
 +
-+out:
-+	if (unlikely(current->audit_context) && !entryexit)
-+		audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[1],
-+				    regs->regs[2], regs->regs[3],
-+				    regs->regs[4], regs->regs[5]);
-+}
++		if (tmp==0)
++			return; /* sorted */
++#endif
 +
-+/* Called with interrupts disabled */
-+asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
-+{
-+	/* This is called after a single step exception (DEBUGSS).
-+	   There is no need to change the PC, as it is a post-execution
-+	   exception, as entry.S does not do anything to the PC for DEBUGSS.
-+	   We need to clear the Single Step setting in SR to avoid
-+	   continually stepping. */
-+	local_irq_enable();
-+	regs->sr &= ~SR_SSTEP;
-+	force_sig(SIGTRAP, current);
-+}
++uspace_segv:
++		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
++		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
++		       regs->pr);
 +
-+/* Called with interrupts disabled */
-+asmlinkage void do_software_break_point(unsigned long long vec,
-+					struct pt_regs *regs)
-+{
-+	/* We need to forward step the PC, to counteract the backstep done
-+	   in signal.c. */
-+	local_irq_enable();
-+	force_sig(SIGTRAP, current);
-+	regs->pc += 4;
++		info.si_signo = SIGBUS;
++		info.si_errno = 0;
++		info.si_code = si_code;
++		info.si_addr = (void __user *)address;
++		force_sig_info(SIGBUS, &info, current);
++	} else {
++		if (regs->pc & 1)
++			die("unaligned program counter", regs, error_code);
++
++#ifndef CONFIG_CPU_SH2A
++		set_fs(KERNEL_DS);
++		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
++			/* Argh. Fault on the instruction itself.
++			   This should never happen non-SMP
++			*/
++			set_fs(oldfs);
++			die("insn faulting in do_address_error", regs, 0);
++		}
++
++		handle_unaligned_access(instruction, regs);
++		set_fs(oldfs);
++#else
++		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
++		       "access\n", current->comm);
++
++		force_sig(SIGSEGV, current);
++#endif
++	}
 +}
 +
++#ifdef CONFIG_SH_DSP
 +/*
-+ * Called by kernel/ptrace.c when detaching..
-+ *
-+ * Make sure single step bits etc are not set.
++ *	SH-DSP support gerg at snapgear.com.
 + */
-+void ptrace_disable(struct task_struct *child)
++int is_dsp_inst(struct pt_regs *regs)
 +{
-+        /* nothing to do.. */
-+}
-diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
-index 4156aac..855cdf9 100644
---- a/arch/sh/kernel/setup.c
-+++ b/arch/sh/kernel/setup.c
-@@ -26,6 +26,7 @@
- #include <asm/uaccess.h>
- #include <asm/io.h>
- #include <asm/page.h>
-+#include <asm/elf.h>
- #include <asm/sections.h>
- #include <asm/irq.h>
- #include <asm/setup.h>
-@@ -78,12 +79,25 @@ EXPORT_SYMBOL(memory_start);
- unsigned long memory_end = 0;
- EXPORT_SYMBOL(memory_end);
- 
-+int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
-+
- static int __init early_parse_mem(char *p)
- {
- 	unsigned long size;
- 
--	memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
-+	memory_start = (unsigned long)__va(__MEMORY_START);
- 	size = memparse(p, &p);
-+
-+	if (size > __MEMORY_SIZE) {
-+		static char msg[] __initdata = KERN_ERR
-+			"Using mem= to increase the size of kernel memory "
-+			"is not allowed.\n"
-+			"  Recompile the kernel with the correct value for "
-+			"CONFIG_MEMORY_SIZE.\n";
-+		printk(msg);
-+		return 0;
-+	}
++	unsigned short inst = 0;
 +
- 	memory_end = memory_start + size;
- 
- 	return 0;
-@@ -243,7 +257,7 @@ void __init setup_arch(char **cmdline_p)
- 	data_resource.start = virt_to_phys(_etext);
- 	data_resource.end = virt_to_phys(_edata)-1;
- 
--	memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
-+	memory_start = (unsigned long)__va(__MEMORY_START);
- 	if (!memory_end)
- 		memory_end = memory_start + __MEMORY_SIZE;
- 
-@@ -294,20 +308,23 @@ void __init setup_arch(char **cmdline_p)
- }
- 
- static const char *cpu_name[] = {
-+	[CPU_SH7203]	= "SH7203",	[CPU_SH7263]	= "SH7263",
- 	[CPU_SH7206]	= "SH7206",	[CPU_SH7619]	= "SH7619",
- 	[CPU_SH7705]	= "SH7705",	[CPU_SH7706]	= "SH7706",
- 	[CPU_SH7707]	= "SH7707",	[CPU_SH7708]	= "SH7708",
- 	[CPU_SH7709]	= "SH7709",	[CPU_SH7710]	= "SH7710",
- 	[CPU_SH7712]	= "SH7712",	[CPU_SH7720]	= "SH7720",
--	[CPU_SH7729]	= "SH7729",	[CPU_SH7750]	= "SH7750",
--	[CPU_SH7750S]	= "SH7750S",	[CPU_SH7750R]	= "SH7750R",
--	[CPU_SH7751]	= "SH7751",	[CPU_SH7751R]	= "SH7751R",
--	[CPU_SH7760]	= "SH7760",
-+	[CPU_SH7721]	= "SH7721",	[CPU_SH7729]	= "SH7729",
-+	[CPU_SH7750]	= "SH7750",	[CPU_SH7750S]	= "SH7750S",
-+	[CPU_SH7750R]	= "SH7750R",	[CPU_SH7751]	= "SH7751",
-+	[CPU_SH7751R]	= "SH7751R",	[CPU_SH7760]	= "SH7760",
- 	[CPU_SH4_202]	= "SH4-202",	[CPU_SH4_501]	= "SH4-501",
--	[CPU_SH7770]	= "SH7770",	[CPU_SH7780]	= "SH7780",
--	[CPU_SH7781]	= "SH7781",	[CPU_SH7343]	= "SH7343",
--	[CPU_SH7785]	= "SH7785",	[CPU_SH7722]	= "SH7722",
--	[CPU_SHX3]	= "SH-X3",	[CPU_SH_NONE]	= "Unknown"
-+	[CPU_SH7763]	= "SH7763",	[CPU_SH7770]	= "SH7770",
-+	[CPU_SH7780]	= "SH7780",	[CPU_SH7781]	= "SH7781",
-+	[CPU_SH7343]	= "SH7343",	[CPU_SH7785]	= "SH7785",
-+	[CPU_SH7722]	= "SH7722",	[CPU_SHX3]	= "SH-X3",
-+	[CPU_SH5_101]	= "SH5-101",	[CPU_SH5_103]	= "SH5-103",
-+	[CPU_SH_NONE]	= "Unknown"
- };
- 
- const char *get_cpu_subtype(struct sh_cpuinfo *c)
-@@ -410,7 +427,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
- static void c_stop(struct seq_file *m, void *v)
- {
- }
--struct seq_operations cpuinfo_op = {
-+const struct seq_operations cpuinfo_op = {
- 	.start	= c_start,
- 	.next	= c_next,
- 	.stop	= c_stop,
-diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
-deleted file mode 100644
-index e1a6de9..0000000
---- a/arch/sh/kernel/sh_ksyms.c
-+++ /dev/null
-@@ -1,150 +0,0 @@
--#include <linux/module.h>
--#include <linux/smp.h>
--#include <linux/user.h>
--#include <linux/elfcore.h>
--#include <linux/sched.h>
--#include <linux/in6.h>
--#include <linux/interrupt.h>
--#include <linux/vmalloc.h>
--#include <linux/pci.h>
--#include <linux/irq.h>
--#include <asm/sections.h>
--#include <asm/semaphore.h>
--#include <asm/processor.h>
--#include <asm/uaccess.h>
--#include <asm/checksum.h>
--#include <asm/io.h>
--#include <asm/delay.h>
--#include <asm/tlbflush.h>
--#include <asm/cacheflush.h>
--
--extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
--extern struct hw_interrupt_type no_irq_type;
--
--EXPORT_SYMBOL(sh_mv);
--
--/* platform dependent support */
--EXPORT_SYMBOL(dump_fpu);
--EXPORT_SYMBOL(kernel_thread);
--EXPORT_SYMBOL(irq_desc);
--EXPORT_SYMBOL(no_irq_type);
--
--EXPORT_SYMBOL(strlen);
--
--/* PCI exports */
--#ifdef CONFIG_PCI
--EXPORT_SYMBOL(pci_alloc_consistent);
--EXPORT_SYMBOL(pci_free_consistent);
--#endif
--
--/* mem exports */
--EXPORT_SYMBOL(memchr);
--EXPORT_SYMBOL(memcpy);
--EXPORT_SYMBOL(memset);
--EXPORT_SYMBOL(memmove);
--EXPORT_SYMBOL(__copy_user);
--
--#ifdef CONFIG_MMU
--EXPORT_SYMBOL(get_vm_area);
--#endif
--
--/* semaphore exports */
--EXPORT_SYMBOL(__up);
--EXPORT_SYMBOL(__down);
--EXPORT_SYMBOL(__down_interruptible);
--EXPORT_SYMBOL(__down_trylock);
--
--EXPORT_SYMBOL(__udelay);
--EXPORT_SYMBOL(__ndelay);
--EXPORT_SYMBOL(__const_udelay);
--
--#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
--
--/* These symbols are generated by the compiler itself */
--DECLARE_EXPORT(__udivsi3);
--DECLARE_EXPORT(__sdivsi3);
--DECLARE_EXPORT(__ashrsi3);
--DECLARE_EXPORT(__ashlsi3);
--DECLARE_EXPORT(__ashrdi3);
--DECLARE_EXPORT(__ashldi3);
--DECLARE_EXPORT(__ashiftrt_r4_6);
--DECLARE_EXPORT(__ashiftrt_r4_7);
--DECLARE_EXPORT(__ashiftrt_r4_8);
--DECLARE_EXPORT(__ashiftrt_r4_9);
--DECLARE_EXPORT(__ashiftrt_r4_10);
--DECLARE_EXPORT(__ashiftrt_r4_11);
--DECLARE_EXPORT(__ashiftrt_r4_12);
--DECLARE_EXPORT(__ashiftrt_r4_13);
--DECLARE_EXPORT(__ashiftrt_r4_14);
--DECLARE_EXPORT(__ashiftrt_r4_15);
--DECLARE_EXPORT(__ashiftrt_r4_20);
--DECLARE_EXPORT(__ashiftrt_r4_21);
--DECLARE_EXPORT(__ashiftrt_r4_22);
--DECLARE_EXPORT(__ashiftrt_r4_23);
--DECLARE_EXPORT(__ashiftrt_r4_24);
--DECLARE_EXPORT(__ashiftrt_r4_27);
--DECLARE_EXPORT(__ashiftrt_r4_30);
--DECLARE_EXPORT(__lshrsi3);
--DECLARE_EXPORT(__lshrdi3);
--DECLARE_EXPORT(__movstrSI8);
--DECLARE_EXPORT(__movstrSI12);
--DECLARE_EXPORT(__movstrSI16);
--DECLARE_EXPORT(__movstrSI20);
--DECLARE_EXPORT(__movstrSI24);
--DECLARE_EXPORT(__movstrSI28);
--DECLARE_EXPORT(__movstrSI32);
--DECLARE_EXPORT(__movstrSI36);
--DECLARE_EXPORT(__movstrSI40);
--DECLARE_EXPORT(__movstrSI44);
--DECLARE_EXPORT(__movstrSI48);
--DECLARE_EXPORT(__movstrSI52);
--DECLARE_EXPORT(__movstrSI56);
--DECLARE_EXPORT(__movstrSI60);
--#if __GNUC__ == 4
--DECLARE_EXPORT(__movmem);
--#else
--DECLARE_EXPORT(__movstr);
--#endif
--
--#if __GNUC__ == 4
--DECLARE_EXPORT(__movmem_i4_even);
--DECLARE_EXPORT(__movmem_i4_odd);
--DECLARE_EXPORT(__movmemSI12_i4);
--
--#if (__GNUC_MINOR__ == 2 || defined(__GNUC_STM_RELEASE__))
--/*
-- * GCC 4.2 emits these for division, as do GCC 4.1.x versions of the ST
-- * compiler which include backported patches.
-- */
--DECLARE_EXPORT(__sdivsi3_i4i);
--DECLARE_EXPORT(__udiv_qrnnd_16);
--DECLARE_EXPORT(__udivsi3_i4i);
--#endif
--#else /* GCC 3.x */
--DECLARE_EXPORT(__movstr_i4_even);
--DECLARE_EXPORT(__movstr_i4_odd);
--DECLARE_EXPORT(__movstrSI12_i4);
--#endif /* __GNUC__ == 4 */
--
--#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
--	defined(CONFIG_SH7705_CACHE_32KB))
--/* needed by some modules */
--EXPORT_SYMBOL(flush_cache_all);
--EXPORT_SYMBOL(flush_cache_range);
--EXPORT_SYMBOL(flush_dcache_page);
--EXPORT_SYMBOL(__flush_purge_region);
--#endif
--
--#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
--	(defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
--EXPORT_SYMBOL(clear_user_page);
--#endif
--
--EXPORT_SYMBOL(csum_partial);
--EXPORT_SYMBOL(csum_partial_copy_generic);
--#ifdef CONFIG_IPV6
--EXPORT_SYMBOL(csum_ipv6_magic);
--#endif
--EXPORT_SYMBOL(clear_page);
--EXPORT_SYMBOL(__clear_user);
--EXPORT_SYMBOL(_ebss);
-diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
-new file mode 100644
-index 0000000..e1a6de9
---- /dev/null
-+++ b/arch/sh/kernel/sh_ksyms_32.c
-@@ -0,0 +1,150 @@
-+#include <linux/module.h>
-+#include <linux/smp.h>
-+#include <linux/user.h>
-+#include <linux/elfcore.h>
-+#include <linux/sched.h>
-+#include <linux/in6.h>
-+#include <linux/interrupt.h>
-+#include <linux/vmalloc.h>
-+#include <linux/pci.h>
-+#include <linux/irq.h>
-+#include <asm/sections.h>
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/uaccess.h>
-+#include <asm/checksum.h>
-+#include <asm/io.h>
-+#include <asm/delay.h>
-+#include <asm/tlbflush.h>
-+#include <asm/cacheflush.h>
++	/*
++	 * Safe guard if DSP mode is already enabled or we're lacking
++	 * the DSP altogether.
++	 */
++	if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
++		return 0;
 +
-+extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-+extern struct hw_interrupt_type no_irq_type;
++	get_user(inst, ((unsigned short *) regs->pc));
 +
-+EXPORT_SYMBOL(sh_mv);
++	inst &= 0xf000;
 +
-+/* platform dependent support */
-+EXPORT_SYMBOL(dump_fpu);
-+EXPORT_SYMBOL(kernel_thread);
-+EXPORT_SYMBOL(irq_desc);
-+EXPORT_SYMBOL(no_irq_type);
++	/* Check for any type of DSP or support instruction */
++	if ((inst == 0xf000) || (inst == 0x4000))
++		return 1;
 +
-+EXPORT_SYMBOL(strlen);
++	return 0;
++}
++#else
++#define is_dsp_inst(regs)	(0)
++#endif /* CONFIG_SH_DSP */
 +
-+/* PCI exports */
-+#ifdef CONFIG_PCI
-+EXPORT_SYMBOL(pci_alloc_consistent);
-+EXPORT_SYMBOL(pci_free_consistent);
++#ifdef CONFIG_CPU_SH2A
++asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
++				unsigned long r6, unsigned long r7,
++				struct pt_regs __regs)
++{
++	siginfo_t info;
++
++	switch (r4) {
++	case TRAP_DIVZERO_ERROR:
++		info.si_code = FPE_INTDIV;
++		break;
++	case TRAP_DIVOVF_ERROR:
++		info.si_code = FPE_INTOVF;
++		break;
++	}
++
++	force_sig_info(SIGFPE, &info, current);
++}
 +#endif
 +
-+/* mem exports */
-+EXPORT_SYMBOL(memchr);
-+EXPORT_SYMBOL(memcpy);
-+EXPORT_SYMBOL(memset);
-+EXPORT_SYMBOL(memmove);
-+EXPORT_SYMBOL(__copy_user);
++asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
++				unsigned long r6, unsigned long r7,
++				struct pt_regs __regs)
++{
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	unsigned long error_code;
++	struct task_struct *tsk = current;
 +
-+#ifdef CONFIG_MMU
-+EXPORT_SYMBOL(get_vm_area);
++#ifdef CONFIG_SH_FPU_EMU
++	unsigned short inst = 0;
++	int err;
++
++	get_user(inst, (unsigned short*)regs->pc);
++
++	err = do_fpu_inst(inst, regs);
++	if (!err) {
++		regs->pc += instruction_size(inst);
++		return;
++	}
++	/* not a FPU inst. */
 +#endif
 +
-+/* semaphore exports */
-+EXPORT_SYMBOL(__up);
-+EXPORT_SYMBOL(__down);
-+EXPORT_SYMBOL(__down_interruptible);
-+EXPORT_SYMBOL(__down_trylock);
++#ifdef CONFIG_SH_DSP
++	/* Check if it's a DSP instruction */
++	if (is_dsp_inst(regs)) {
++		/* Enable DSP mode, and restart instruction. */
++		regs->sr |= SR_DSP;
++		return;
++	}
++#endif
 +
-+EXPORT_SYMBOL(__udelay);
-+EXPORT_SYMBOL(__ndelay);
-+EXPORT_SYMBOL(__const_udelay);
++	lookup_exception_vector(error_code);
 +
-+#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
++	local_irq_enable();
++	CHK_REMOTE_DEBUG(regs);
++	force_sig(SIGILL, tsk);
++	die_if_no_fixup("reserved instruction", regs, error_code);
++}
 +
-+/* These symbols are generated by the compiler itself */
-+DECLARE_EXPORT(__udivsi3);
-+DECLARE_EXPORT(__sdivsi3);
-+DECLARE_EXPORT(__ashrsi3);
-+DECLARE_EXPORT(__ashlsi3);
-+DECLARE_EXPORT(__ashrdi3);
-+DECLARE_EXPORT(__ashldi3);
-+DECLARE_EXPORT(__ashiftrt_r4_6);
-+DECLARE_EXPORT(__ashiftrt_r4_7);
-+DECLARE_EXPORT(__ashiftrt_r4_8);
-+DECLARE_EXPORT(__ashiftrt_r4_9);
-+DECLARE_EXPORT(__ashiftrt_r4_10);
-+DECLARE_EXPORT(__ashiftrt_r4_11);
-+DECLARE_EXPORT(__ashiftrt_r4_12);
-+DECLARE_EXPORT(__ashiftrt_r4_13);
-+DECLARE_EXPORT(__ashiftrt_r4_14);
-+DECLARE_EXPORT(__ashiftrt_r4_15);
-+DECLARE_EXPORT(__ashiftrt_r4_20);
-+DECLARE_EXPORT(__ashiftrt_r4_21);
-+DECLARE_EXPORT(__ashiftrt_r4_22);
-+DECLARE_EXPORT(__ashiftrt_r4_23);
-+DECLARE_EXPORT(__ashiftrt_r4_24);
-+DECLARE_EXPORT(__ashiftrt_r4_27);
-+DECLARE_EXPORT(__ashiftrt_r4_30);
-+DECLARE_EXPORT(__lshrsi3);
-+DECLARE_EXPORT(__lshrdi3);
-+DECLARE_EXPORT(__movstrSI8);
-+DECLARE_EXPORT(__movstrSI12);
-+DECLARE_EXPORT(__movstrSI16);
-+DECLARE_EXPORT(__movstrSI20);
-+DECLARE_EXPORT(__movstrSI24);
-+DECLARE_EXPORT(__movstrSI28);
-+DECLARE_EXPORT(__movstrSI32);
-+DECLARE_EXPORT(__movstrSI36);
-+DECLARE_EXPORT(__movstrSI40);
-+DECLARE_EXPORT(__movstrSI44);
-+DECLARE_EXPORT(__movstrSI48);
-+DECLARE_EXPORT(__movstrSI52);
-+DECLARE_EXPORT(__movstrSI56);
-+DECLARE_EXPORT(__movstrSI60);
-+#if __GNUC__ == 4
-+DECLARE_EXPORT(__movmem);
-+#else
-+DECLARE_EXPORT(__movstr);
++#ifdef CONFIG_SH_FPU_EMU
++static int emulate_branch(unsigned short inst, struct pt_regs* regs)
++{
++	/*
++	 * bfs: 8fxx: PC+=d*2+4;
++	 * bts: 8dxx: PC+=d*2+4;
++	 * bra: axxx: PC+=D*2+4;
++	 * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
++	 * braf:0x23: PC+=Rn*2+4;
++	 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
++	 * jmp: 4x2b: PC=Rn;
++	 * jsr: 4x0b: PC=Rn      after PR=PC+4;
++	 * rts: 000b: PC=PR;
++	 */
++	if ((inst & 0xfd00) == 0x8d00) {
++		regs->pc += SH_PC_8BIT_OFFSET(inst);
++		return 0;
++	}
++
++	if ((inst & 0xe000) == 0xa000) {
++		regs->pc += SH_PC_12BIT_OFFSET(inst);
++		return 0;
++	}
++
++	if ((inst & 0xf0df) == 0x0003) {
++		regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
++		return 0;
++	}
++
++	if ((inst & 0xf0df) == 0x400b) {
++		regs->pc = regs->regs[(inst & 0x0f00) >> 8];
++		return 0;
++	}
++
++	if ((inst & 0xffff) == 0x000b) {
++		regs->pc = regs->pr;
++		return 0;
++	}
++
++	return 1;
++}
 +#endif
 +
-+#if __GNUC__ == 4
-+DECLARE_EXPORT(__movmem_i4_even);
-+DECLARE_EXPORT(__movmem_i4_odd);
-+DECLARE_EXPORT(__movmemSI12_i4);
++asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
++				unsigned long r6, unsigned long r7,
++				struct pt_regs __regs)
++{
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	unsigned long error_code;
++	struct task_struct *tsk = current;
++#ifdef CONFIG_SH_FPU_EMU
++	unsigned short inst = 0;
 +
-+#if (__GNUC_MINOR__ == 2 || defined(__GNUC_STM_RELEASE__))
-+/*
-+ * GCC 4.2 emits these for division, as do GCC 4.1.x versions of the ST
-+ * compiler which include backported patches.
-+ */
-+DECLARE_EXPORT(__sdivsi3_i4i);
-+DECLARE_EXPORT(__udiv_qrnnd_16);
-+DECLARE_EXPORT(__udivsi3_i4i);
++	get_user(inst, (unsigned short *)regs->pc + 1);
++	if (!do_fpu_inst(inst, regs)) {
++		get_user(inst, (unsigned short *)regs->pc);
++		if (!emulate_branch(inst, regs))
++			return;
++		/* fault in branch.*/
++	}
++	/* not a FPU inst. */
 +#endif
-+#else /* GCC 3.x */
-+DECLARE_EXPORT(__movstr_i4_even);
-+DECLARE_EXPORT(__movstr_i4_odd);
-+DECLARE_EXPORT(__movstrSI12_i4);
-+#endif /* __GNUC__ == 4 */
 +
-+#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
-+	defined(CONFIG_SH7705_CACHE_32KB))
-+/* needed by some modules */
-+EXPORT_SYMBOL(flush_cache_all);
-+EXPORT_SYMBOL(flush_cache_range);
-+EXPORT_SYMBOL(flush_dcache_page);
-+EXPORT_SYMBOL(__flush_purge_region);
++	lookup_exception_vector(error_code);
++
++	local_irq_enable();
++	CHK_REMOTE_DEBUG(regs);
++	force_sig(SIGILL, tsk);
++	die_if_no_fixup("illegal slot instruction", regs, error_code);
++}
++
++asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
++				   unsigned long r6, unsigned long r7,
++				   struct pt_regs __regs)
++{
++	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	long ex;
++
++	lookup_exception_vector(ex);
++	die_if_kernel("exception", regs, ex);
++}
++
++#if defined(CONFIG_SH_STANDARD_BIOS)
++void *gdb_vbr_vector;
++
++static inline void __init gdb_vbr_init(void)
++{
++	register unsigned long vbr;
++
++	/*
++	 * Read the old value of the VBR register to initialise
++	 * the vector through which debug and BIOS traps are
++	 * delegated by the Linux trap handler.
++	 */
++	asm volatile("stc vbr, %0" : "=r" (vbr));
++
++	gdb_vbr_vector = (void *)(vbr + 0x100);
++	printk("Setting GDB trap vector to 0x%08lx\n",
++	       (unsigned long)gdb_vbr_vector);
++}
 +#endif
 +
-+#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
-+	(defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
-+EXPORT_SYMBOL(clear_user_page);
++void __cpuinit per_cpu_trap_init(void)
++{
++	extern void *vbr_base;
++
++#ifdef CONFIG_SH_STANDARD_BIOS
++	if (raw_smp_processor_id() == 0)
++		gdb_vbr_init();
 +#endif
 +
-+EXPORT_SYMBOL(csum_partial);
-+EXPORT_SYMBOL(csum_partial_copy_generic);
-+#ifdef CONFIG_IPV6
-+EXPORT_SYMBOL(csum_ipv6_magic);
++	/* NOTE: The VBR value should be at P1
++	   (or P2, virtural "fixed" address space).
++	   It's definitely should not in physical address.  */
++
++	asm volatile("ldc	%0, vbr"
++		     : /* no output */
++		     : "r" (&vbr_base)
++		     : "memory");
++}
++
++void *set_exception_table_vec(unsigned int vec, void *handler)
++{
++	extern void *exception_handling_table[];
++	void *old_handler;
++
++	old_handler = exception_handling_table[vec];
++	exception_handling_table[vec] = handler;
++	return old_handler;
++}
++
++void __init trap_init(void)
++{
++	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
++	set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
++
++#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
++    defined(CONFIG_SH_FPU_EMU)
++	/*
++	 * For SH-4 lacking an FPU, treat floating point instructions as
++	 * reserved. They'll be handled in the math-emu case, or faulted on
++	 * otherwise.
++	 */
++	set_exception_table_evt(0x800, do_reserved_inst);
++	set_exception_table_evt(0x820, do_illegal_slot_inst);
++#elif defined(CONFIG_SH_FPU)
++#ifdef CONFIG_CPU_SUBTYPE_SHX3
++	set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
++	set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
++#else
++	set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
++	set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
++#endif
 +#endif
-+EXPORT_SYMBOL(clear_page);
-+EXPORT_SYMBOL(__clear_user);
-+EXPORT_SYMBOL(_ebss);
-diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
-new file mode 100644
-index 0000000..8004c38
---- /dev/null
-+++ b/arch/sh/kernel/sh_ksyms_64.c
-@@ -0,0 +1,55 @@
-+/*
-+ * arch/sh/kernel/sh_ksyms_64.c
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/rwsem.h>
-+#include <linux/module.h>
-+#include <linux/smp.h>
-+#include <linux/user.h>
-+#include <linux/elfcore.h>
-+#include <linux/sched.h>
-+#include <linux/in6.h>
-+#include <linux/interrupt.h>
-+#include <linux/screen_info.h>
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/uaccess.h>
-+#include <asm/checksum.h>
-+#include <asm/io.h>
-+#include <asm/delay.h>
-+#include <asm/irq.h>
 +
-+extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
++#ifdef CONFIG_CPU_SH2
++	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
++#endif
++#ifdef CONFIG_CPU_SH2A
++	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
++	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
++#endif
 +
-+/* platform dependent support */
-+EXPORT_SYMBOL(dump_fpu);
-+EXPORT_SYMBOL(kernel_thread);
++	/* Setup VBR for boot cpu */
++	per_cpu_trap_init();
++}
 +
-+/* Networking helper routines. */
-+EXPORT_SYMBOL(csum_partial_copy_nocheck);
++void show_trace(struct task_struct *tsk, unsigned long *sp,
++		struct pt_regs *regs)
++{
++	unsigned long addr;
 +
-+#ifdef CONFIG_VT
-+EXPORT_SYMBOL(screen_info);
++	if (regs && user_mode(regs))
++		return;
++
++	printk("\nCall trace: ");
++#ifdef CONFIG_KALLSYMS
++	printk("\n");
 +#endif
 +
-+EXPORT_SYMBOL(__down);
-+EXPORT_SYMBOL(__down_trylock);
-+EXPORT_SYMBOL(__up);
-+EXPORT_SYMBOL(__put_user_asm_l);
-+EXPORT_SYMBOL(__get_user_asm_l);
-+EXPORT_SYMBOL(__copy_user);
-+EXPORT_SYMBOL(memcpy);
-+EXPORT_SYMBOL(__udelay);
-+EXPORT_SYMBOL(__ndelay);
++	while (!kstack_end(sp)) {
++		addr = *sp++;
++		if (kernel_text_address(addr))
++			print_ip_sym(addr);
++	}
 +
-+/* Ugh.  These come in from libgcc.a at link time. */
-+#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
++	printk("\n");
 +
-+DECLARE_EXPORT(__sdivsi3);
-+DECLARE_EXPORT(__muldi3);
-+DECLARE_EXPORT(__udivsi3);
-diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
-deleted file mode 100644
-index ca754fd..0000000
---- a/arch/sh/kernel/signal.c
-+++ /dev/null
-@@ -1,629 +0,0 @@
--/*
-- *  linux/arch/sh/kernel/signal.c
-- *
-- *  Copyright (C) 1991, 1992  Linus Torvalds
-- *
-- *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
-- *
-- *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
-- *
-- */
--#include <linux/sched.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/kernel.h>
--#include <linux/signal.h>
--#include <linux/errno.h>
--#include <linux/wait.h>
--#include <linux/ptrace.h>
--#include <linux/unistd.h>
--#include <linux/stddef.h>
--#include <linux/tty.h>
--#include <linux/elf.h>
--#include <linux/personality.h>
--#include <linux/binfmts.h>
--#include <linux/freezer.h>
--#include <linux/io.h>
--#include <asm/system.h>
--#include <asm/ucontext.h>
--#include <asm/uaccess.h>
--#include <asm/pgtable.h>
--#include <asm/cacheflush.h>
--
--#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
--
--/*
-- * Atomically swap in the new signal mask, and wait for a signal.
-- */
--asmlinkage int
--sys_sigsuspend(old_sigset_t mask,
--	       unsigned long r5, unsigned long r6, unsigned long r7,
--	       struct pt_regs __regs)
--{
--	mask &= _BLOCKABLE;
--	spin_lock_irq(&current->sighand->siglock);
--	current->saved_sigmask = current->blocked;
--	siginitset(&current->blocked, mask);
--	recalc_sigpending();
--	spin_unlock_irq(&current->sighand->siglock);
--
--	current->state = TASK_INTERRUPTIBLE;
--	schedule();
--	set_thread_flag(TIF_RESTORE_SIGMASK);
--	return -ERESTARTNOHAND;
--}
--
--asmlinkage int
--sys_sigaction(int sig, const struct old_sigaction __user *act,
--	      struct old_sigaction __user *oact)
--{
--	struct k_sigaction new_ka, old_ka;
--	int ret;
--
--	if (act) {
--		old_sigset_t mask;
--		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
--		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
--		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
--			return -EFAULT;
--		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
--		__get_user(mask, &act->sa_mask);
--		siginitset(&new_ka.sa.sa_mask, mask);
--	}
--
--	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
--
--	if (!ret && oact) {
--		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
--		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
--		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
--			return -EFAULT;
--		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
--		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
--	}
--
--	return ret;
--}
--
--asmlinkage int
--sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
--		unsigned long r6, unsigned long r7,
--		struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--
--	return do_sigaltstack(uss, uoss, regs->regs[15]);
--}
--
--
--/*
-- * Do a signal return; undo the signal stack.
-- */
--
--#define MOVW(n)	 (0x9300|((n)-2))	/* Move mem word at PC+n to R3 */
--#if defined(CONFIG_CPU_SH2)
--#define TRAP_NOARG 0xc320		/* Syscall w/no args (NR in R3) */
--#else
--#define TRAP_NOARG 0xc310		/* Syscall w/no args (NR in R3) */
--#endif
--#define OR_R0_R0 0x200b			/* or r0,r0 (insert to avoid hardware bug) */
--
--struct sigframe
--{
--	struct sigcontext sc;
--	unsigned long extramask[_NSIG_WORDS-1];
--	u16 retcode[8];
--};
--
--struct rt_sigframe
--{
--	struct siginfo info;
--	struct ucontext uc;
--	u16 retcode[8];
--};
--
--#ifdef CONFIG_SH_FPU
--static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
--{
--	struct task_struct *tsk = current;
--
--	if (!(current_cpu_data.flags & CPU_HAS_FPU))
--		return 0;
--
--	set_used_math();
--	return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0],
--				sizeof(long)*(16*2+2));
--}
--
--static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
--				      struct pt_regs *regs)
--{
--	struct task_struct *tsk = current;
--
--	if (!(current_cpu_data.flags & CPU_HAS_FPU))
--		return 0;
--
--	if (!used_math()) {
--		__put_user(0, &sc->sc_ownedfp);
--		return 0;
--	}
--
--	__put_user(1, &sc->sc_ownedfp);
--
--	/* This will cause a "finit" to be triggered by the next
--	   attempted FPU operation by the 'current' process.
--	   */
--	clear_used_math();
--
--	unlazy_fpu(tsk, regs);
--	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
--			      sizeof(long)*(16*2+2));
--}
--#endif /* CONFIG_SH_FPU */
--
--static int
--restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
--{
--	unsigned int err = 0;
--
--#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
--			COPY(regs[1]);
--	COPY(regs[2]);	COPY(regs[3]);
--	COPY(regs[4]);	COPY(regs[5]);
--	COPY(regs[6]);	COPY(regs[7]);
--	COPY(regs[8]);	COPY(regs[9]);
--	COPY(regs[10]);	COPY(regs[11]);
--	COPY(regs[12]);	COPY(regs[13]);
--	COPY(regs[14]);	COPY(regs[15]);
--	COPY(gbr);	COPY(mach);
--	COPY(macl);	COPY(pr);
--	COPY(sr);	COPY(pc);
--#undef COPY
--
--#ifdef CONFIG_SH_FPU
--	if (current_cpu_data.flags & CPU_HAS_FPU) {
--		int owned_fp;
--		struct task_struct *tsk = current;
--
--		regs->sr |= SR_FD; /* Release FPU */
--		clear_fpu(tsk, regs);
--		clear_used_math();
--		__get_user (owned_fp, &sc->sc_ownedfp);
--		if (owned_fp)
--			err |= restore_sigcontext_fpu(sc);
--	}
--#endif
--
--	regs->tra = -1;		/* disable syscall checks */
--	err |= __get_user(*r0_p, &sc->sc_regs[0]);
--	return err;
--}
--
--asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
--			     unsigned long r6, unsigned long r7,
--			     struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
--	sigset_t set;
--	int r0;
--
--	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
--		goto badframe;
--
--	if (__get_user(set.sig[0], &frame->sc.oldmask)
--	    || (_NSIG_WORDS > 1
--		&& __copy_from_user(&set.sig[1], &frame->extramask,
--				    sizeof(frame->extramask))))
--		goto badframe;
--
--	sigdelsetmask(&set, ~_BLOCKABLE);
--
--	spin_lock_irq(&current->sighand->siglock);
--	current->blocked = set;
--	recalc_sigpending();
--	spin_unlock_irq(&current->sighand->siglock);
--
--	if (restore_sigcontext(regs, &frame->sc, &r0))
--		goto badframe;
--	return r0;
--
--badframe:
--	force_sig(SIGSEGV, current);
--	return 0;
--}
--
--asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
--				unsigned long r6, unsigned long r7,
--				struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
--	sigset_t set;
--	stack_t st;
--	int r0;
--
--	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
--		goto badframe;
--
--	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
--		goto badframe;
--
--	sigdelsetmask(&set, ~_BLOCKABLE);
--	spin_lock_irq(&current->sighand->siglock);
--	current->blocked = set;
--	recalc_sigpending();
--	spin_unlock_irq(&current->sighand->siglock);
--
--	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
--		goto badframe;
--
--	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
--		goto badframe;
--	/* It is more difficult to avoid calling this function than to
--	   call it and ignore errors.  */
--	do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
--
--	return r0;
--
--badframe:
--	force_sig(SIGSEGV, current);
--	return 0;
--}
--
--/*
-- * Set up a signal frame.
-- */
--
--static int
--setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
--		 unsigned long mask)
--{
--	int err = 0;
--
--#define COPY(x)		err |= __put_user(regs->x, &sc->sc_##x)
--	COPY(regs[0]);	COPY(regs[1]);
--	COPY(regs[2]);	COPY(regs[3]);
--	COPY(regs[4]);	COPY(regs[5]);
--	COPY(regs[6]);	COPY(regs[7]);
--	COPY(regs[8]);	COPY(regs[9]);
--	COPY(regs[10]);	COPY(regs[11]);
--	COPY(regs[12]);	COPY(regs[13]);
--	COPY(regs[14]);	COPY(regs[15]);
--	COPY(gbr);	COPY(mach);
--	COPY(macl);	COPY(pr);
--	COPY(sr);	COPY(pc);
--#undef COPY
--
--#ifdef CONFIG_SH_FPU
--	err |= save_sigcontext_fpu(sc, regs);
--#endif
--
--	/* non-iBCS2 extensions.. */
--	err |= __put_user(mask, &sc->oldmask);
--
--	return err;
--}
--
--/*
-- * Determine which stack to use..
-- */
--static inline void __user *
--get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
--{
--	if (ka->sa.sa_flags & SA_ONSTACK) {
--		if (sas_ss_flags(sp) == 0)
--			sp = current->sas_ss_sp + current->sas_ss_size;
--	}
--
--	return (void __user *)((sp - frame_size) & -8ul);
--}
--
--/* These symbols are defined with the addresses in the vsyscall page.
--   See vsyscall-trapa.S.  */
--extern void __user __kernel_sigreturn;
--extern void __user __kernel_rt_sigreturn;
--
--static int setup_frame(int sig, struct k_sigaction *ka,
--			sigset_t *set, struct pt_regs *regs)
--{
--	struct sigframe __user *frame;
--	int err = 0;
--	int signal;
--
--	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
--
--	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
--		goto give_sigsegv;
--
--	signal = current_thread_info()->exec_domain
--		&& current_thread_info()->exec_domain->signal_invmap
--		&& sig < 32
--		? current_thread_info()->exec_domain->signal_invmap[sig]
--		: sig;
--
--	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
--
--	if (_NSIG_WORDS > 1)
--		err |= __copy_to_user(frame->extramask, &set->sig[1],
--				      sizeof(frame->extramask));
--
--	/* Set up to return from userspace.  If provided, use a stub
--	   already in userspace.  */
--	if (ka->sa.sa_flags & SA_RESTORER) {
--		regs->pr = (unsigned long) ka->sa.sa_restorer;
--#ifdef CONFIG_VSYSCALL
--	} else if (likely(current->mm->context.vdso)) {
--		regs->pr = VDSO_SYM(&__kernel_sigreturn);
--#endif
--	} else {
--		/* Generate return code (system call to sigreturn) */
--		err |= __put_user(MOVW(7), &frame->retcode[0]);
--		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
--		err |= __put_user((__NR_sigreturn), &frame->retcode[7]);
--		regs->pr = (unsigned long) frame->retcode;
--	}
--
--	if (err)
--		goto give_sigsegv;
--
--	/* Set up registers for signal handler */
--	regs->regs[15] = (unsigned long) frame;
--	regs->regs[4] = signal; /* Arg for signal handler */
--	regs->regs[5] = 0;
--	regs->regs[6] = (unsigned long) &frame->sc;
--	regs->pc = (unsigned long) ka->sa.sa_handler;
--
--	set_fs(USER_DS);
--
--	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
--		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
--
--	flush_cache_sigtramp(regs->pr);
--
--	if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
--		flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
--
--	return 0;
--
--give_sigsegv:
--	force_sigsegv(sig, current);
--	return -EFAULT;
--}
--
--static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
--			   sigset_t *set, struct pt_regs *regs)
--{
--	struct rt_sigframe __user *frame;
--	int err = 0;
--	int signal;
--
--	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
--
--	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
--		goto give_sigsegv;
--
--	signal = current_thread_info()->exec_domain
--		&& current_thread_info()->exec_domain->signal_invmap
--		&& sig < 32
--		? current_thread_info()->exec_domain->signal_invmap[sig]
--		: sig;
--
--	err |= copy_siginfo_to_user(&frame->info, info);
--
--	/* Create the ucontext.  */
--	err |= __put_user(0, &frame->uc.uc_flags);
--	err |= __put_user(0, &frame->uc.uc_link);
--	err |= __put_user((void *)current->sas_ss_sp,
--			  &frame->uc.uc_stack.ss_sp);
--	err |= __put_user(sas_ss_flags(regs->regs[15]),
--			  &frame->uc.uc_stack.ss_flags);
--	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
--	err |= setup_sigcontext(&frame->uc.uc_mcontext,
--			        regs, set->sig[0]);
--	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
--
--	/* Set up to return from userspace.  If provided, use a stub
--	   already in userspace.  */
--	if (ka->sa.sa_flags & SA_RESTORER) {
--		regs->pr = (unsigned long) ka->sa.sa_restorer;
--#ifdef CONFIG_VSYSCALL
--	} else if (likely(current->mm->context.vdso)) {
--		regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
--#endif
--	} else {
--		/* Generate return code (system call to rt_sigreturn) */
--		err |= __put_user(MOVW(7), &frame->retcode[0]);
--		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
--		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
--		err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
--		regs->pr = (unsigned long) frame->retcode;
--	}
--
--	if (err)
--		goto give_sigsegv;
--
--	/* Set up registers for signal handler */
--	regs->regs[15] = (unsigned long) frame;
--	regs->regs[4] = signal; /* Arg for signal handler */
--	regs->regs[5] = (unsigned long) &frame->info;
--	regs->regs[6] = (unsigned long) &frame->uc;
--	regs->pc = (unsigned long) ka->sa.sa_handler;
--
--	set_fs(USER_DS);
--
--	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
--		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
--
--	flush_cache_sigtramp(regs->pr);
--
--	if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
--		flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
--
--	return 0;
--
--give_sigsegv:
--	force_sigsegv(sig, current);
--	return -EFAULT;
--}
--
--/*
-- * OK, we're invoking a handler
-- */
--
--static int
--handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
--	      sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
--{
--	int ret;
--
--	/* Are we from a system call? */
--	if (regs->tra >= 0) {
--		/* If so, check system call restarting.. */
--		switch (regs->regs[0]) {
--			case -ERESTART_RESTARTBLOCK:
--			case -ERESTARTNOHAND:
--				regs->regs[0] = -EINTR;
--				break;
--
--			case -ERESTARTSYS:
--				if (!(ka->sa.sa_flags & SA_RESTART)) {
--					regs->regs[0] = -EINTR;
--					break;
--				}
--			/* fallthrough */
--			case -ERESTARTNOINTR:
--				regs->regs[0] = save_r0;
--				regs->pc -= instruction_size(
--						ctrl_inw(regs->pc - 4));
--				break;
--		}
--#ifdef CONFIG_GUSA
--	} else {
--		/* gUSA handling */
--		preempt_disable();
--
--		if (regs->regs[15] >= 0xc0000000) {
--			int offset = (int)regs->regs[15];
--
--			/* Reset stack pointer: clear critical region mark */
--			regs->regs[15] = regs->regs[1];
--			if (regs->pc < regs->regs[0])
--				/* Go to rewind point #1 */
--				regs->pc = regs->regs[0] + offset -
--					instruction_size(ctrl_inw(regs->pc-4));
--		}
--
--		preempt_enable_no_resched();
--#endif
--	}
--
--	/* Set up the stack frame */
--	if (ka->sa.sa_flags & SA_SIGINFO)
--		ret = setup_rt_frame(sig, ka, info, oldset, regs);
--	else
--		ret = setup_frame(sig, ka, oldset, regs);
--
--	if (ka->sa.sa_flags & SA_ONESHOT)
--		ka->sa.sa_handler = SIG_DFL;
--
--	if (ret == 0) {
--		spin_lock_irq(&current->sighand->siglock);
--		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
--		if (!(ka->sa.sa_flags & SA_NODEFER))
--			sigaddset(&current->blocked,sig);
--		recalc_sigpending();
--		spin_unlock_irq(&current->sighand->siglock);
--	}
--
--	return ret;
--}
--
--/*
-- * Note that 'init' is a special process: it doesn't get signals it doesn't
-- * want to handle. Thus you cannot kill init even with a SIGKILL even by
-- * mistake.
-- *
-- * Note that we go through the signals twice: once to check the signals that
-- * the kernel can handle, and then we build all the user-level signal handling
-- * stack-frames in one go after that.
-- */
--static void do_signal(struct pt_regs *regs, unsigned int save_r0)
--{
--	siginfo_t info;
--	int signr;
--	struct k_sigaction ka;
--	sigset_t *oldset;
--
--	/*
--	 * We want the common case to go fast, which
--	 * is why we may in certain cases get here from
--	 * kernel mode. Just return without doing anything
--	 * if so.
--	 */
--	if (!user_mode(regs))
--		return;
--
--	if (try_to_freeze())
--		goto no_signal;
--
--	if (test_thread_flag(TIF_RESTORE_SIGMASK))
--		oldset = &current->saved_sigmask;
--	else
--		oldset = &current->blocked;
--
--	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
--	if (signr > 0) {
--		/* Whee!  Actually deliver the signal.  */
--		if (handle_signal(signr, &ka, &info, oldset,
--				  regs, save_r0) == 0) {
--			/* a signal was successfully delivered; the saved
--			 * sigmask will have been stored in the signal frame,
--			 * and will be restored by sigreturn, so we can simply
--			 * clear the TIF_RESTORE_SIGMASK flag */
--			if (test_thread_flag(TIF_RESTORE_SIGMASK))
--				clear_thread_flag(TIF_RESTORE_SIGMASK);
--		}
--
--		return;
--	}
--
-- no_signal:
--	/* Did we come from a system call? */
--	if (regs->tra >= 0) {
--		/* Restart the system call - no handlers present */
--		if (regs->regs[0] == -ERESTARTNOHAND ||
--		    regs->regs[0] == -ERESTARTSYS ||
--		    regs->regs[0] == -ERESTARTNOINTR) {
--			regs->regs[0] = save_r0;
--			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
--		} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
--			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
--			regs->regs[3] = __NR_restart_syscall;
--		}
--	}
--
--	/* if there's no signal to deliver, we just put the saved sigmask
--	 * back */
--	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
--		clear_thread_flag(TIF_RESTORE_SIGMASK);
--		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
--	}
--}
--
--asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
--				 __u32 thread_info_flags)
--{
--	/* deal with pending signal delivery */
--	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
--		do_signal(regs, save_r0);
--}
-diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
++	if (!tsk)
++		tsk = current;
++
++	debug_show_held_locks(tsk);
++}
++
++void show_stack(struct task_struct *tsk, unsigned long *sp)
++{
++	unsigned long stack;
++
++	if (!tsk)
++		tsk = current;
++	if (tsk == current)
++		sp = (unsigned long *)current_stack_pointer;
++	else
++		sp = (unsigned long *)tsk->thread.sp;
++
++	stack = (unsigned long)sp;
++	dump_mem("Stack: ", stack, THREAD_SIZE +
++		 (unsigned long)task_stack_page(tsk));
++	show_trace(tsk, sp, NULL);
++}
++
++void dump_stack(void)
++{
++	show_stack(NULL, NULL);
++}
++EXPORT_SYMBOL(dump_stack);
+diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
 new file mode 100644
-index 0000000..f6b5fbf
+index 0000000..c0b3c6f
 --- /dev/null
-+++ b/arch/sh/kernel/signal_32.c
-@@ -0,0 +1,611 @@
++++ b/arch/sh/kernel/traps_64.c
+@@ -0,0 +1,975 @@
 +/*
-+ *  linux/arch/sh/kernel/signal.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *
-+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
++ * arch/sh/kernel/traps_64.c
 + *
-+ *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003, 2004  Paul Mundt
++ * Copyright (C) 2003, 2004  Richard Curnow
 + *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
 + */
 +#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
 +#include <linux/kernel.h>
-+#include <linux/signal.h>
++#include <linux/string.h>
 +#include <linux/errno.h>
-+#include <linux/wait.h>
 +#include <linux/ptrace.h>
-+#include <linux/unistd.h>
-+#include <linux/stddef.h>
-+#include <linux/tty.h>
-+#include <linux/elf.h>
-+#include <linux/personality.h>
-+#include <linux/binfmts.h>
-+#include <linux/freezer.h>
-+#include <linux/io.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/kallsyms.h>
++#include <linux/interrupt.h>
++#include <linux/sysctl.h>
++#include <linux/module.h>
 +#include <asm/system.h>
-+#include <asm/ucontext.h>
 +#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/processor.h>
 +#include <asm/pgtable.h>
-+#include <asm/cacheflush.h>
 +
-+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
++#undef DEBUG_EXCEPTION
++#ifdef DEBUG_EXCEPTION
++/* implemented in ../lib/dbg.c */
++extern void show_excp_regs(char *fname, int trapnr, int signr,
++			   struct pt_regs *regs);
++#else
++#define show_excp_regs(a, b, c, d)
++#endif
 +
-+/*
-+ * Atomically swap in the new signal mask, and wait for a signal.
-+ */
-+asmlinkage int
-+sys_sigsuspend(old_sigset_t mask,
-+	       unsigned long r5, unsigned long r6, unsigned long r7,
-+	       struct pt_regs __regs)
++static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
++		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
++
++#define DO_ERROR(trapnr, signr, str, name, tsk) \
++asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
++{ \
++	do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
++}
++
++spinlock_t die_lock;
++
++void die(const char * str, struct pt_regs * regs, long err)
 +{
-+	mask &= _BLOCKABLE;
-+	spin_lock_irq(&current->sighand->siglock);
-+	current->saved_sigmask = current->blocked;
-+	siginitset(&current->blocked, mask);
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
++	console_verbose();
++	spin_lock_irq(&die_lock);
++	printk("%s: %lx\n", str, (err & 0xffffff));
++	show_regs(regs);
++	spin_unlock_irq(&die_lock);
++	do_exit(SIGSEGV);
++}
 +
-+	current->state = TASK_INTERRUPTIBLE;
-+	schedule();
-+	set_thread_flag(TIF_RESTORE_SIGMASK);
-+	return -ERESTARTNOHAND;
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++{
++	if (!user_mode(regs))
++		die(str, regs, err);
 +}
 +
-+asmlinkage int
-+sys_sigaction(int sig, const struct old_sigaction __user *act,
-+	      struct old_sigaction __user *oact)
++static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
 +{
-+	struct k_sigaction new_ka, old_ka;
-+	int ret;
++	if (!user_mode(regs)) {
++		const struct exception_table_entry *fixup;
++		fixup = search_exception_tables(regs->pc);
++		if (fixup) {
++			regs->pc = fixup->fixup;
++			return;
++		}
++		die(str, regs, err);
++	}
++}
 +
-+	if (act) {
-+		old_sigset_t mask;
-+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-+		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
-+			return -EFAULT;
-+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
-+		__get_user(mask, &act->sa_mask);
-+		siginitset(&new_ka.sa.sa_mask, mask);
++DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
++DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
++
++
++/* Implement misaligned load/store handling for kernel (and optionally for user
++   mode too).  Limitation : only SHmedia mode code is handled - there is no
++   handling at all for misaligned accesses occurring in SHcompact code yet. */
++
++static int misaligned_fixup(struct pt_regs *regs);
++
++asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
++{
++	if (misaligned_fixup(regs) < 0) {
++		do_unhandled_exception(7, SIGSEGV, "address error(load)",
++				"do_address_error_load",
++				error_code, regs, current);
 +	}
++	return;
++}
 +
-+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
++asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
++{
++	if (misaligned_fixup(regs) < 0) {
++		do_unhandled_exception(8, SIGSEGV, "address error(store)",
++				"do_address_error_store",
++				error_code, regs, current);
++	}
++	return;
++}
 +
-+	if (!ret && oact) {
-+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-+		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
-+			return -EFAULT;
-+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
++#if defined(CONFIG_SH64_ID2815_WORKAROUND)
++
++#define OPCODE_INVALID      0
++#define OPCODE_USER_VALID   1
++#define OPCODE_PRIV_VALID   2
++
++/* getcon/putcon - requires checking which control register is referenced. */
++#define OPCODE_CTRL_REG     3
++
++/* Table of valid opcodes for SHmedia mode.
++   Form a 10-bit value by concatenating the major/minor opcodes i.e.
++   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
++   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
++   LSBs==4'b0000 etc). */
++static unsigned long shmedia_opcode_table[64] = {
++	0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
++	0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
++	0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
++	0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
++	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
++	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
++	0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
++	0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
++};
++
++void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
++{
++	/* Workaround SH5-101 cut2 silicon defect #2815 :
++	   in some situations, inter-mode branches from SHcompact -> SHmedia
++	   which should take ITLBMISS or EXECPROT exceptions at the target
++	   falsely take RESINST at the target instead. */
++
++	unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
++	unsigned long pc, aligned_pc;
++	int get_user_error;
++	int trapnr = 12;
++	int signr = SIGILL;
++	char *exception_name = "reserved_instruction";
++
++	pc = regs->pc;
++	if ((pc & 3) == 1) {
++		/* SHmedia : check for defect.  This requires executable vmas
++		   to be readable too. */
++		aligned_pc = pc & ~3;
++		if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
++			get_user_error = -EFAULT;
++		} else {
++			get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
++		}
++		if (get_user_error >= 0) {
++			unsigned long index, shift;
++			unsigned long major, minor, combined;
++			unsigned long reserved_field;
++			reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
++			major = (opcode >> 26) & 0x3f;
++			minor = (opcode >> 16) & 0xf;
++			combined = (major << 4) | minor;
++			index = major;
++			shift = minor << 1;
++			if (reserved_field == 0) {
++				int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
++				switch (opcode_state) {
++					case OPCODE_INVALID:
++						/* Trap. */
++						break;
++					case OPCODE_USER_VALID:
++						/* Restart the instruction : the branch to the instruction will now be from an RTE
++						   not from SHcompact so the silicon defect won't be triggered. */
++						return;
++					case OPCODE_PRIV_VALID:
++						if (!user_mode(regs)) {
++							/* Should only ever get here if a module has
++							   SHcompact code inside it.  If so, the same fix up is needed. */
++							return; /* same reason */
++						}
++						/* Otherwise, user mode trying to execute a privileged instruction -
++						   fall through to trap. */
++						break;
++					case OPCODE_CTRL_REG:
++						/* If in privileged mode, return as above. */
++						if (!user_mode(regs)) return;
++						/* In user mode ... */
++						if (combined == 0x9f) { /* GETCON */
++							unsigned long regno = (opcode >> 20) & 0x3f;
++							if (regno >= 62) {
++								return;
++							}
++							/* Otherwise, reserved or privileged control register, => trap */
++						} else if (combined == 0x1bf) { /* PUTCON */
++							unsigned long regno = (opcode >> 4) & 0x3f;
++							if (regno >= 62) {
++								return;
++							}
++							/* Otherwise, reserved or privileged control register, => trap */
++						} else {
++							/* Trap */
++						}
++						break;
++					default:
++						/* Fall through to trap. */
++						break;
++				}
++			}
++			/* fall through to normal resinst processing */
++		} else {
++			/* Error trying to read opcode.  This typically means a
++			   real fault, not a RESINST any more.  So change the
++			   codes. */
++			trapnr = 87;
++			exception_name = "address error (exec)";
++			signr = SIGSEGV;
++		}
 +	}
 +
-+	return ret;
++	do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
 +}
 +
-+asmlinkage int
-+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
-+		unsigned long r6, unsigned long r7,
-+		struct pt_regs __regs)
++#else /* CONFIG_SH64_ID2815_WORKAROUND */
++
++/* If the workaround isn't needed, this is just a straightforward reserved
++   instruction */
++DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
++
++#endif /* CONFIG_SH64_ID2815_WORKAROUND */
++
++/* Called with interrupts disabled */
++asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
 +{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
++	show_excp_regs(__FUNCTION__, -1, -1, regs);
++	die_if_kernel("exception", regs, ex);
++}
 +
-+	return do_sigaltstack(uss, uoss, regs->regs[15]);
++int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
++{
++	/* Syscall debug */
++        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
++
++	die_if_kernel("unknown trapa", regs, scId);
++
++	return -ENOSYS;
 +}
 +
++void show_stack(struct task_struct *tsk, unsigned long *sp)
++{
++#ifdef CONFIG_KALLSYMS
++	extern void sh64_unwind(struct pt_regs *regs);
++	struct pt_regs *regs;
 +
-+/*
-+ * Do a signal return; undo the signal stack.
-+ */
++	regs = tsk ? tsk->thread.kregs : NULL;
 +
-+#define MOVW(n)	 (0x9300|((n)-2))	/* Move mem word at PC+n to R3 */
-+#if defined(CONFIG_CPU_SH2)
-+#define TRAP_NOARG 0xc320		/* Syscall w/no args (NR in R3) */
++	sh64_unwind(regs);
 +#else
-+#define TRAP_NOARG 0xc310		/* Syscall w/no args (NR in R3) */
++	printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
 +#endif
-+#define OR_R0_R0 0x200b			/* or r0,r0 (insert to avoid hardware bug) */
++}
 +
-+struct sigframe
++void show_task(unsigned long *sp)
 +{
-+	struct sigcontext sc;
-+	unsigned long extramask[_NSIG_WORDS-1];
-+	u16 retcode[8];
-+};
++	show_stack(NULL, sp);
++}
 +
-+struct rt_sigframe
++void dump_stack(void)
 +{
-+	struct siginfo info;
-+	struct ucontext uc;
-+	u16 retcode[8];
-+};
++	show_task(NULL);
++}
++/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
++EXPORT_SYMBOL(dump_stack);
 +
-+#ifdef CONFIG_SH_FPU
-+static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
++static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
++		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
 +{
-+	struct task_struct *tsk = current;
++	show_excp_regs(fn_name, trapnr, signr, regs);
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = trapnr;
 +
-+	if (!(current_cpu_data.flags & CPU_HAS_FPU))
-+		return 0;
++	if (user_mode(regs))
++		force_sig(signr, tsk);
 +
-+	set_used_math();
-+	return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0],
-+				sizeof(long)*(16*2+2));
++	die_if_no_fixup(str, regs, error_code);
 +}
 +
-+static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
-+				      struct pt_regs *regs)
++static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
 +{
-+	struct task_struct *tsk = current;
++	int get_user_error;
++	unsigned long aligned_pc;
++	unsigned long opcode;
 +
-+	if (!(current_cpu_data.flags & CPU_HAS_FPU))
-+		return 0;
++	if ((pc & 3) == 1) {
++		/* SHmedia */
++		aligned_pc = pc & ~3;
++		if (from_user_mode) {
++			if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
++				get_user_error = -EFAULT;
++			} else {
++				get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
++				*result_opcode = opcode;
++			}
++			return get_user_error;
++		} else {
++			/* If the fault was in the kernel, we can either read
++			 * this directly, or if not, we fault.
++			*/
++			*result_opcode = *(unsigned long *) aligned_pc;
++			return 0;
++		}
++	} else if ((pc & 1) == 0) {
++		/* SHcompact */
++		/* TODO : provide handling for this.  We don't really support
++		   user-mode SHcompact yet, and for a kernel fault, this would
++		   have to come from a module built for SHcompact.  */
++		return -EFAULT;
++	} else {
++		/* misaligned */
++		return -EFAULT;
++	}
++}
 +
-+	if (!used_math()) {
-+		__put_user(0, &sc->sc_ownedfp);
-+		return 0;
++static int address_is_sign_extended(__u64 a)
++{
++	__u64 b;
++#if (NEFF == 32)
++	b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
++	return (b == a) ? 1 : 0;
++#else
++#error "Sign extend check only works for NEFF==32"
++#endif
++}
++
++static int generate_and_check_address(struct pt_regs *regs,
++				      __u32 opcode,
++				      int displacement_not_indexed,
++				      int width_shift,
++				      __u64 *address)
++{
++	/* return -1 for fault, 0 for OK */
++
++	__u64 base_address, addr;
++	int basereg;
++
++	basereg = (opcode >> 20) & 0x3f;
++	base_address = regs->regs[basereg];
++	if (displacement_not_indexed) {
++		__s64 displacement;
++		displacement = (opcode >> 10) & 0x3ff;
++		displacement = ((displacement << 54) >> 54); /* sign extend */
++		addr = (__u64)((__s64)base_address + (displacement << width_shift));
++	} else {
++		__u64 offset;
++		int offsetreg;
++		offsetreg = (opcode >> 10) & 0x3f;
++		offset = regs->regs[offsetreg];
++		addr = base_address + offset;
 +	}
 +
-+	__put_user(1, &sc->sc_ownedfp);
++	/* Check sign extended */
++	if (!address_is_sign_extended(addr)) {
++		return -1;
++	}
 +
-+	/* This will cause a "finit" to be triggered by the next
-+	   attempted FPU operation by the 'current' process.
-+	   */
-+	clear_used_math();
++#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++	/* Check accessible.  For misaligned access in the kernel, assume the
++	   address is always accessible (and if not, just fault when the
++	   load/store gets done.) */
++	if (user_mode(regs)) {
++		if (addr >= TASK_SIZE) {
++			return -1;
++		}
++		/* Do access_ok check later - it depends on whether it's a load or a store. */
++	}
++#endif
 +
-+	unlazy_fpu(tsk, regs);
-+	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
-+			      sizeof(long)*(16*2+2));
++	*address = addr;
++	return 0;
 +}
-+#endif /* CONFIG_SH_FPU */
 +
-+static int
-+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
++/* Default value as for sh */
++#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++static int user_mode_unaligned_fixup_count = 10;
++static int user_mode_unaligned_fixup_enable = 1;
++#endif
++
++static int kernel_mode_unaligned_fixup_count = 32;
++
++static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
 +{
-+	unsigned int err = 0;
++	unsigned short x;
++	unsigned char *p, *q;
++	p = (unsigned char *) (int) address;
++	q = (unsigned char *) &x;
++	q[0] = p[0];
++	q[1] = p[1];
 +
-+#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
-+			COPY(regs[1]);
-+	COPY(regs[2]);	COPY(regs[3]);
-+	COPY(regs[4]);	COPY(regs[5]);
-+	COPY(regs[6]);	COPY(regs[7]);
-+	COPY(regs[8]);	COPY(regs[9]);
-+	COPY(regs[10]);	COPY(regs[11]);
-+	COPY(regs[12]);	COPY(regs[13]);
-+	COPY(regs[14]);	COPY(regs[15]);
-+	COPY(gbr);	COPY(mach);
-+	COPY(macl);	COPY(pr);
-+	COPY(sr);	COPY(pc);
-+#undef COPY
++	if (do_sign_extend) {
++		*result = (__u64)(__s64) *(short *) &x;
++	} else {
++		*result = (__u64) x;
++	}
++}
 +
-+#ifdef CONFIG_SH_FPU
-+	if (current_cpu_data.flags & CPU_HAS_FPU) {
-+		int owned_fp;
-+		struct task_struct *tsk = current;
++static void misaligned_kernel_word_store(__u64 address, __u64 value)
++{
++	unsigned short x;
++	unsigned char *p, *q;
++	p = (unsigned char *) (int) address;
++	q = (unsigned char *) &x;
 +
-+		regs->sr |= SR_FD; /* Release FPU */
-+		clear_fpu(tsk, regs);
-+		clear_used_math();
-+		__get_user (owned_fp, &sc->sc_ownedfp);
-+		if (owned_fp)
-+			err |= restore_sigcontext_fpu(sc);
++	x = (__u16) value;
++	p[0] = q[0];
++	p[1] = q[1];
++}
++
++static int misaligned_load(struct pt_regs *regs,
++			   __u32 opcode,
++			   int displacement_not_indexed,
++			   int width_shift,
++			   int do_sign_extend)
++{
++	/* Return -1 for a fault, 0 for OK */
++	int error;
++	int destreg;
++	__u64 address;
++
++	error = generate_and_check_address(regs, opcode,
++			displacement_not_indexed, width_shift, &address);
++	if (error < 0) {
++		return error;
 +	}
++
++	destreg = (opcode >> 4) & 0x3f;
++#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++	if (user_mode(regs)) {
++		__u64 buffer;
++
++		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
++			return -1;
++		}
++
++		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
++			return -1; /* fault */
++		}
++		switch (width_shift) {
++		case 1:
++			if (do_sign_extend) {
++				regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
++			} else {
++				regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
++			}
++			break;
++		case 2:
++			regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
++			break;
++		case 3:
++			regs->regs[destreg] = buffer;
++			break;
++		default:
++			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
++				width_shift, (unsigned long) regs->pc);
++			break;
++		}
++	} else
 +#endif
++	{
++		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
++		__u64 lo, hi;
++
++		switch (width_shift) {
++		case 1:
++			misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
++			break;
++		case 2:
++			asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
++			asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
++			regs->regs[destreg] = lo | hi;
++			break;
++		case 3:
++			asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
++			asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
++			regs->regs[destreg] = lo | hi;
++			break;
++
++		default:
++			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
++				width_shift, (unsigned long) regs->pc);
++			break;
++		}
++	}
++
++	return 0;
 +
-+	regs->tra = -1;		/* disable syscall checks */
-+	err |= __get_user(*r0_p, &sc->sc_regs[0]);
-+	return err;
 +}
 +
-+asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
-+			     unsigned long r6, unsigned long r7,
-+			     struct pt_regs __regs)
++static int misaligned_store(struct pt_regs *regs,
++			    __u32 opcode,
++			    int displacement_not_indexed,
++			    int width_shift)
 +{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
-+	sigset_t set;
-+	int r0;
++	/* Return -1 for a fault, 0 for OK */
++	int error;
++	int srcreg;
++	__u64 address;
 +
-+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-+		goto badframe;
++	error = generate_and_check_address(regs, opcode,
++			displacement_not_indexed, width_shift, &address);
++	if (error < 0) {
++		return error;
++	}
 +
-+	if (__get_user(set.sig[0], &frame->sc.oldmask)
-+	    || (_NSIG_WORDS > 1
-+		&& __copy_from_user(&set.sig[1], &frame->extramask,
-+				    sizeof(frame->extramask))))
-+		goto badframe;
++	srcreg = (opcode >> 4) & 0x3f;
++#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++	if (user_mode(regs)) {
++		__u64 buffer;
 +
-+	sigdelsetmask(&set, ~_BLOCKABLE);
++		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
++			return -1;
++		}
 +
-+	spin_lock_irq(&current->sighand->siglock);
-+	current->blocked = set;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
++		switch (width_shift) {
++		case 1:
++			*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
++			break;
++		case 2:
++			*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
++			break;
++		case 3:
++			buffer = regs->regs[srcreg];
++			break;
++		default:
++			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
++				width_shift, (unsigned long) regs->pc);
++			break;
++		}
 +
-+	if (restore_sigcontext(regs, &frame->sc, &r0))
-+		goto badframe;
-+	return r0;
++		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
++			return -1; /* fault */
++		}
++	} else
++#endif
++	{
++		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
++		__u64 val = regs->regs[srcreg];
++
++		switch (width_shift) {
++		case 1:
++			misaligned_kernel_word_store(address, val);
++			break;
++		case 2:
++			asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
++			asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
++			break;
++		case 3:
++			asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
++			asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
++			break;
++
++		default:
++			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
++				width_shift, (unsigned long) regs->pc);
++			break;
++		}
++	}
 +
-+badframe:
-+	force_sig(SIGSEGV, current);
 +	return 0;
++
 +}
 +
-+asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
-+				unsigned long r6, unsigned long r7,
-+				struct pt_regs __regs)
++#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
++   error. */
++static int misaligned_fpu_load(struct pt_regs *regs,
++			   __u32 opcode,
++			   int displacement_not_indexed,
++			   int width_shift,
++			   int do_paired_load)
 +{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
-+	sigset_t set;
-+	stack_t st;
-+	int r0;
++	/* Return -1 for a fault, 0 for OK */
++	int error;
++	int destreg;
++	__u64 address;
 +
-+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-+		goto badframe;
++	error = generate_and_check_address(regs, opcode,
++			displacement_not_indexed, width_shift, &address);
++	if (error < 0) {
++		return error;
++	}
 +
-+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-+		goto badframe;
++	destreg = (opcode >> 4) & 0x3f;
++	if (user_mode(regs)) {
++		__u64 buffer;
++		__u32 buflo, bufhi;
 +
-+	sigdelsetmask(&set, ~_BLOCKABLE);
-+	spin_lock_irq(&current->sighand->siglock);
-+	current->blocked = set;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
++		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
++			return -1;
++		}
 +
-+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
-+		goto badframe;
++		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
++			return -1; /* fault */
++		}
++		/* 'current' may be the current owner of the FPU state, so
++		   context switch the registers into memory so they can be
++		   indexed by register number. */
++		if (last_task_used_math == current) {
++			enable_fpu();
++			save_fpu(current, regs);
++			disable_fpu();
++			last_task_used_math = NULL;
++			regs->sr |= SR_FD;
++		}
 +
-+	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
-+		goto badframe;
-+	/* It is more difficult to avoid calling this function than to
-+	   call it and ignore errors.  */
-+	do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
++		buflo = *(__u32*) &buffer;
++		bufhi = *(1 + (__u32*) &buffer);
++
++		switch (width_shift) {
++		case 2:
++			current->thread.fpu.hard.fp_regs[destreg] = buflo;
++			break;
++		case 3:
++			if (do_paired_load) {
++				current->thread.fpu.hard.fp_regs[destreg] = buflo;
++				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
++			} else {
++#if defined(CONFIG_LITTLE_ENDIAN)
++				current->thread.fpu.hard.fp_regs[destreg] = bufhi;
++				current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
++#else
++				current->thread.fpu.hard.fp_regs[destreg] = buflo;
++				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
++#endif
++			}
++			break;
++		default:
++			printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
++				width_shift, (unsigned long) regs->pc);
++			break;
++		}
++		return 0;
++	} else {
++		die ("Misaligned FPU load inside kernel", regs, 0);
++		return -1;
++	}
 +
-+	return r0;
 +
-+badframe:
-+	force_sig(SIGSEGV, current);
-+	return 0;
 +}
 +
-+/*
-+ * Set up a signal frame.
-+ */
++static int misaligned_fpu_store(struct pt_regs *regs,
++			   __u32 opcode,
++			   int displacement_not_indexed,
++			   int width_shift,
++			   int do_paired_load)
++{
++	/* Return -1 for a fault, 0 for OK */
++	int error;
++	int srcreg;
++	__u64 address;
 +
-+static int
-+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
-+		 unsigned long mask)
++	error = generate_and_check_address(regs, opcode,
++			displacement_not_indexed, width_shift, &address);
++	if (error < 0) {
++		return error;
++	}
++
++	srcreg = (opcode >> 4) & 0x3f;
++	if (user_mode(regs)) {
++		__u64 buffer;
++		/* Initialise these to NaNs. */
++		__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
++
++		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
++			return -1;
++		}
++
++		/* 'current' may be the current owner of the FPU state, so
++		   context switch the registers into memory so they can be
++		   indexed by register number. */
++		if (last_task_used_math == current) {
++			enable_fpu();
++			save_fpu(current, regs);
++			disable_fpu();
++			last_task_used_math = NULL;
++			regs->sr |= SR_FD;
++		}
++
++		switch (width_shift) {
++		case 2:
++			buflo = current->thread.fpu.hard.fp_regs[srcreg];
++			break;
++		case 3:
++			if (do_paired_load) {
++				buflo = current->thread.fpu.hard.fp_regs[srcreg];
++				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
++			} else {
++#if defined(CONFIG_LITTLE_ENDIAN)
++				bufhi = current->thread.fpu.hard.fp_regs[srcreg];
++				buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
++#else
++				buflo = current->thread.fpu.hard.fp_regs[srcreg];
++				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
++#endif
++			}
++			break;
++		default:
++			printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
++				width_shift, (unsigned long) regs->pc);
++			break;
++		}
++
++		*(__u32*) &buffer = buflo;
++		*(1 + (__u32*) &buffer) = bufhi;
++		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
++			return -1; /* fault */
++		}
++		return 0;
++	} else {
++		die ("Misaligned FPU load inside kernel", regs, 0);
++		return -1;
++	}
++}
++#endif
++
++static int misaligned_fixup(struct pt_regs *regs)
 +{
-+	int err = 0;
++	unsigned long opcode;
++	int error;
++	int major, minor;
 +
-+#define COPY(x)		err |= __put_user(regs->x, &sc->sc_##x)
-+	COPY(regs[0]);	COPY(regs[1]);
-+	COPY(regs[2]);	COPY(regs[3]);
-+	COPY(regs[4]);	COPY(regs[5]);
-+	COPY(regs[6]);	COPY(regs[7]);
-+	COPY(regs[8]);	COPY(regs[9]);
-+	COPY(regs[10]);	COPY(regs[11]);
-+	COPY(regs[12]);	COPY(regs[13]);
-+	COPY(regs[14]);	COPY(regs[15]);
-+	COPY(gbr);	COPY(mach);
-+	COPY(macl);	COPY(pr);
-+	COPY(sr);	COPY(pc);
-+#undef COPY
++#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++	/* Never fixup user mode misaligned accesses without this option enabled. */
++	return -1;
++#else
++	if (!user_mode_unaligned_fixup_enable) return -1;
++#endif
 +
-+#ifdef CONFIG_SH_FPU
-+	err |= save_sigcontext_fpu(sc, regs);
++	error = read_opcode(regs->pc, &opcode, user_mode(regs));
++	if (error < 0) {
++		return error;
++	}
++	major = (opcode >> 26) & 0x3f;
++	minor = (opcode >> 16) & 0xf;
++
++#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++	if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
++		--user_mode_unaligned_fixup_count;
++		/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
++		printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
++		       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
++	} else
++#endif
++	if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
++		--kernel_mode_unaligned_fixup_count;
++		if (in_interrupt()) {
++			printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
++			       (__u32)regs->pc, opcode);
++		} else {
++			printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
++			       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
++		}
++	}
++
++
++	switch (major) {
++		case (0x84>>2): /* LD.W */
++			error = misaligned_load(regs, opcode, 1, 1, 1);
++			break;
++		case (0xb0>>2): /* LD.UW */
++			error = misaligned_load(regs, opcode, 1, 1, 0);
++			break;
++		case (0x88>>2): /* LD.L */
++			error = misaligned_load(regs, opcode, 1, 2, 1);
++			break;
++		case (0x8c>>2): /* LD.Q */
++			error = misaligned_load(regs, opcode, 1, 3, 0);
++			break;
++
++		case (0xa4>>2): /* ST.W */
++			error = misaligned_store(regs, opcode, 1, 1);
++			break;
++		case (0xa8>>2): /* ST.L */
++			error = misaligned_store(regs, opcode, 1, 2);
++			break;
++		case (0xac>>2): /* ST.Q */
++			error = misaligned_store(regs, opcode, 1, 3);
++			break;
++
++		case (0x40>>2): /* indexed loads */
++			switch (minor) {
++				case 0x1: /* LDX.W */
++					error = misaligned_load(regs, opcode, 0, 1, 1);
++					break;
++				case 0x5: /* LDX.UW */
++					error = misaligned_load(regs, opcode, 0, 1, 0);
++					break;
++				case 0x2: /* LDX.L */
++					error = misaligned_load(regs, opcode, 0, 2, 1);
++					break;
++				case 0x3: /* LDX.Q */
++					error = misaligned_load(regs, opcode, 0, 3, 0);
++					break;
++				default:
++					error = -1;
++					break;
++			}
++			break;
++
++		case (0x60>>2): /* indexed stores */
++			switch (minor) {
++				case 0x1: /* STX.W */
++					error = misaligned_store(regs, opcode, 0, 1);
++					break;
++				case 0x2: /* STX.L */
++					error = misaligned_store(regs, opcode, 0, 2);
++					break;
++				case 0x3: /* STX.Q */
++					error = misaligned_store(regs, opcode, 0, 3);
++					break;
++				default:
++					error = -1;
++					break;
++			}
++			break;
++
++#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++		case (0x94>>2): /* FLD.S */
++			error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
++			break;
++		case (0x98>>2): /* FLD.P */
++			error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
++			break;
++		case (0x9c>>2): /* FLD.D */
++			error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
++			break;
++		case (0x1c>>2): /* floating indexed loads */
++			switch (minor) {
++			case 0x8: /* FLDX.S */
++				error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
++				break;
++			case 0xd: /* FLDX.P */
++				error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
++				break;
++			case 0x9: /* FLDX.D */
++				error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
++				break;
++			default:
++				error = -1;
++				break;
++			}
++			break;
++		case (0xb4>>2): /* FLD.S */
++			error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
++			break;
++		case (0xb8>>2): /* FLD.P */
++			error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
++			break;
++		case (0xbc>>2): /* FLD.D */
++			error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
++			break;
++		case (0x3c>>2): /* floating indexed stores */
++			switch (minor) {
++			case 0x8: /* FSTX.S */
++				error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
++				break;
++			case 0xd: /* FSTX.P */
++				error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
++				break;
++			case 0x9: /* FSTX.D */
++				error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
++				break;
++			default:
++				error = -1;
++				break;
++			}
++			break;
 +#endif
 +
-+	/* non-iBCS2 extensions.. */
-+	err |= __put_user(mask, &sc->oldmask);
-+
-+	return err;
-+}
-+
-+/*
-+ * Determine which stack to use..
-+ */
-+static inline void __user *
-+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
-+{
-+	if (ka->sa.sa_flags & SA_ONSTACK) {
-+		if (sas_ss_flags(sp) == 0)
-+			sp = current->sas_ss_sp + current->sas_ss_size;
++		default:
++			/* Fault */
++			error = -1;
++			break;
 +	}
 +
-+	return (void __user *)((sp - frame_size) & -8ul);
-+}
-+
-+/* These symbols are defined with the addresses in the vsyscall page.
-+   See vsyscall-trapa.S.  */
-+extern void __user __kernel_sigreturn;
-+extern void __user __kernel_rt_sigreturn;
-+
-+static int setup_frame(int sig, struct k_sigaction *ka,
-+			sigset_t *set, struct pt_regs *regs)
-+{
-+	struct sigframe __user *frame;
-+	int err = 0;
-+	int signal;
-+
-+	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
-+
-+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-+		goto give_sigsegv;
-+
-+	signal = current_thread_info()->exec_domain
-+		&& current_thread_info()->exec_domain->signal_invmap
-+		&& sig < 32
-+		? current_thread_info()->exec_domain->signal_invmap[sig]
-+		: sig;
-+
-+	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
-+
-+	if (_NSIG_WORDS > 1)
-+		err |= __copy_to_user(frame->extramask, &set->sig[1],
-+				      sizeof(frame->extramask));
-+
-+	/* Set up to return from userspace.  If provided, use a stub
-+	   already in userspace.  */
-+	if (ka->sa.sa_flags & SA_RESTORER) {
-+		regs->pr = (unsigned long) ka->sa.sa_restorer;
-+#ifdef CONFIG_VSYSCALL
-+	} else if (likely(current->mm->context.vdso)) {
-+		regs->pr = VDSO_SYM(&__kernel_sigreturn);
-+#endif
++	if (error < 0) {
++		return error;
 +	} else {
-+		/* Generate return code (system call to sigreturn) */
-+		err |= __put_user(MOVW(7), &frame->retcode[0]);
-+		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
-+		err |= __put_user((__NR_sigreturn), &frame->retcode[7]);
-+		regs->pr = (unsigned long) frame->retcode;
++		regs->pc += 4; /* Skip the instruction that's just been emulated */
++		return 0;
 +	}
 +
-+	if (err)
-+		goto give_sigsegv;
-+
-+	/* Set up registers for signal handler */
-+	regs->regs[15] = (unsigned long) frame;
-+	regs->regs[4] = signal; /* Arg for signal handler */
-+	regs->regs[5] = 0;
-+	regs->regs[6] = (unsigned long) &frame->sc;
-+	regs->pc = (unsigned long) ka->sa.sa_handler;
++}
 +
-+	set_fs(USER_DS);
++static ctl_table unaligned_table[] = {
++	{
++		.ctl_name	= CTL_UNNUMBERED,
++		.procname	= "kernel_reports",
++		.data		= &kernel_mode_unaligned_fixup_count,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec
++	},
++#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
++	{
++		.ctl_name	= CTL_UNNUMBERED,
++		.procname	= "user_reports",
++		.data		= &user_mode_unaligned_fixup_count,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec
++	},
++	{
++		.ctl_name	= CTL_UNNUMBERED,
++		.procname	= "user_enable",
++		.data		= &user_mode_unaligned_fixup_enable,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec},
++#endif
++	{}
++};
 +
-+	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
-+		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
++static ctl_table unaligned_root[] = {
++	{
++		.ctl_name	= CTL_UNNUMBERED,
++		.procname	= "unaligned_fixup",
++		.mode		= 0555,
++		unaligned_table
++	},
++	{}
++};
 +
-+	flush_cache_sigtramp(regs->pr);
++static ctl_table sh64_root[] = {
++	{
++		.ctl_name	= CTL_UNNUMBERED,
++		.procname	= "sh64",
++		.mode		= 0555,
++		.child		= unaligned_root
++	},
++	{}
++};
++static struct ctl_table_header *sysctl_header;
++static int __init init_sysctl(void)
++{
++	sysctl_header = register_sysctl_table(sh64_root);
++	return 0;
++}
 +
-+	if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
-+		flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
++__initcall(init_sysctl);
 +
-+	return 0;
 +
-+give_sigsegv:
-+	force_sigsegv(sig, current);
-+	return -EFAULT;
++asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
++{
++	u64 peek_real_address_q(u64 addr);
++	u64 poke_real_address_q(u64 addr, u64 val);
++	unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
++	unsigned long long exp_cause;
++	/* It's not worth ioremapping the debug module registers for the amount
++	   of access we make to them - just go direct to their physical
++	   addresses. */
++	exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
++	if (exp_cause & ~4) {
++		printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
++			(unsigned long)(exp_cause & 0xffffffff));
++	}
++	show_state();
++	/* Clear all DEBUGINT causes */
++	poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
 +}
+diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
+index 0956fb3..d7d4991 100644
+--- a/arch/sh/kernel/vmlinux.lds.S
++++ b/arch/sh/kernel/vmlinux.lds.S
+@@ -1,138 +1,5 @@
+-/*
+- * ld script to make SuperH Linux kernel
+- * Written by Niibe Yutaka
+- */
+-#include <asm/thread_info.h>
+-#include <asm/cache.h>
+-#include <asm-generic/vmlinux.lds.h>
+-
+-#ifdef CONFIG_CPU_LITTLE_ENDIAN
+-OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
++#ifdef CONFIG_SUPERH32
++# include "vmlinux_32.lds.S"
+ #else
+-OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
++# include "vmlinux_64.lds.S"
+ #endif
+-OUTPUT_ARCH(sh)
+-ENTRY(_start)
+-SECTIONS
+-{
+-	. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
+-	_text = .;			/* Text and read-only data */
+-
+-	.empty_zero_page : {
+-		*(.empty_zero_page)
+-	} = 0
+-
+-	.text : {
+-		*(.text.head)
+-		TEXT_TEXT
+-		SCHED_TEXT
+-		LOCK_TEXT
+-		KPROBES_TEXT
+-		*(.fixup)
+-		*(.gnu.warning)
+-	} = 0x0009
+-
+-	. = ALIGN(16);		/* Exception table */
+-	__start___ex_table = .;
+-	__ex_table : { *(__ex_table) }
+-	__stop___ex_table = .;
+-
+-	_etext = .;			/* End of text section */
+-
+-	BUG_TABLE
+-	NOTES
+-	RO_DATA(PAGE_SIZE)
+-
+-	. = ALIGN(THREAD_SIZE);
+-	.data : {			/* Data */
+-		*(.data.init_task)
+-
+-		. = ALIGN(L1_CACHE_BYTES);
+-		*(.data.cacheline_aligned)
+-
+-		. = ALIGN(L1_CACHE_BYTES);
+-		*(.data.read_mostly)
+-
+-		. = ALIGN(PAGE_SIZE);
+-		*(.data.page_aligned)
+-
+-		__nosave_begin = .;
+-		*(.data.nosave)
+-		. = ALIGN(PAGE_SIZE);
+-		__nosave_end = .;
+-
+-		DATA_DATA
+-		CONSTRUCTORS
+-	}
+-
+-	_edata = .;			/* End of data section */
+-
+-	. = ALIGN(PAGE_SIZE);		/* Init code and data */
+-	__init_begin = .;
+-	_sinittext = .;
+-	.init.text : { *(.init.text) }
+-	_einittext = .;
+-	.init.data : { *(.init.data) }
+-
+-	. = ALIGN(16);
+-	__setup_start = .;
+-	.init.setup : { *(.init.setup) }
+-	__setup_end = .;
+-
+-	__initcall_start = .;
+-	.initcall.init : {
+-		INITCALLS
+-	}
+-	__initcall_end = .;
+-	__con_initcall_start = .;
+-	.con_initcall.init : { *(.con_initcall.init) }
+-	__con_initcall_end = .;
+-
+-	SECURITY_INIT
+-
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	. = ALIGN(PAGE_SIZE);
+-	__initramfs_start = .;
+-	.init.ramfs : { *(.init.ramfs) }
+-	__initramfs_end = .;
+-#endif
+-
+-	. = ALIGN(4);
+-	__machvec_start = .;
+-	.machvec.init : { *(.machvec.init) }
+-	__machvec_end = .;
+-
+-	PERCPU(PAGE_SIZE)
+-
+-	/*
+-	 * .exit.text is discarded at runtime, not link time, to deal with
+-	 * references from __bug_table
+-	 */
+-	.exit.text : { *(.exit.text) }
+-	.exit.data : { *(.exit.data) }
+-
+-	. = ALIGN(PAGE_SIZE);
+-	.bss : {
+-		__init_end = .;
+-		__bss_start = .;		/* BSS */
+-		*(.bss.page_aligned)
+-		*(.bss)
+-		*(COMMON)
+-		. = ALIGN(4);
+-		_ebss = .;			/* uClinux MTD sucks */
+-		_end = . ;
+-	}
+-
+-	/*
+-	 * When something in the kernel is NOT compiled as a module, the
+-	 * module cleanup code and data are put into these segments. Both
+-	 * can then be thrown away, as cleanup code is never called unless
+-	 * it's a module.
+-	 */
+-	/DISCARD/ : {
+-		*(.exitcall.exit)
+-	}
+-
+-	STABS_DEBUG
+-	DWARF_DEBUG
+-}
+diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S
+new file mode 100644
+index 0000000..c711378
+--- /dev/null
++++ b/arch/sh/kernel/vmlinux_32.lds.S
+@@ -0,0 +1,152 @@
++/*
++ * ld script to make SuperH Linux kernel
++ * Written by Niibe Yutaka
++ */
++#include <asm/thread_info.h>
++#include <asm/cache.h>
++#include <asm-generic/vmlinux.lds.h>
 +
-+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-+			   sigset_t *set, struct pt_regs *regs)
++#ifdef CONFIG_CPU_LITTLE_ENDIAN
++OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
++#else
++OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
++#endif
++OUTPUT_ARCH(sh)
++ENTRY(_start)
++SECTIONS
 +{
-+	struct rt_sigframe __user *frame;
-+	int err = 0;
-+	int signal;
-+
-+	frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
++#ifdef CONFIG_32BIT
++	. = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
++#else
++	. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
++#endif
 +
-+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-+		goto give_sigsegv;
++	_text = .;			/* Text and read-only data */
 +
-+	signal = current_thread_info()->exec_domain
-+		&& current_thread_info()->exec_domain->signal_invmap
-+		&& sig < 32
-+		? current_thread_info()->exec_domain->signal_invmap[sig]
-+		: sig;
++	.empty_zero_page : {
++		*(.empty_zero_page)
++	} = 0
 +
-+	err |= copy_siginfo_to_user(&frame->info, info);
++	.text : {
++		*(.text.head)
++		TEXT_TEXT
++		SCHED_TEXT
++		LOCK_TEXT
++		KPROBES_TEXT
++		*(.fixup)
++		*(.gnu.warning)
++	} = 0x0009
 +
-+	/* Create the ucontext.  */
-+	err |= __put_user(0, &frame->uc.uc_flags);
-+	err |= __put_user(0, &frame->uc.uc_link);
-+	err |= __put_user((void *)current->sas_ss_sp,
-+			  &frame->uc.uc_stack.ss_sp);
-+	err |= __put_user(sas_ss_flags(regs->regs[15]),
-+			  &frame->uc.uc_stack.ss_flags);
-+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-+	err |= setup_sigcontext(&frame->uc.uc_mcontext,
-+			        regs, set->sig[0]);
-+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
++	. = ALIGN(16);		/* Exception table */
++	__start___ex_table = .;
++	__ex_table : { *(__ex_table) }
++	__stop___ex_table = .;
 +
-+	/* Set up to return from userspace.  If provided, use a stub
-+	   already in userspace.  */
-+	if (ka->sa.sa_flags & SA_RESTORER) {
-+		regs->pr = (unsigned long) ka->sa.sa_restorer;
-+#ifdef CONFIG_VSYSCALL
-+	} else if (likely(current->mm->context.vdso)) {
-+		regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
-+#endif
-+	} else {
-+		/* Generate return code (system call to rt_sigreturn) */
-+		err |= __put_user(MOVW(7), &frame->retcode[0]);
-+		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[5]);
-+		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
-+		err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
-+		regs->pr = (unsigned long) frame->retcode;
-+	}
++	_etext = .;			/* End of text section */
 +
-+	if (err)
-+		goto give_sigsegv;
++	BUG_TABLE
++	NOTES
++	RO_DATA(PAGE_SIZE)
 +
-+	/* Set up registers for signal handler */
-+	regs->regs[15] = (unsigned long) frame;
-+	regs->regs[4] = signal; /* Arg for signal handler */
-+	regs->regs[5] = (unsigned long) &frame->info;
-+	regs->regs[6] = (unsigned long) &frame->uc;
-+	regs->pc = (unsigned long) ka->sa.sa_handler;
++	/*
++	 * Code which must be executed uncached and the associated data
++	 */
++	. = ALIGN(PAGE_SIZE);
++	__uncached_start = .;
++	.uncached.text : { *(.uncached.text) }
++	.uncached.data : { *(.uncached.data) }
++	__uncached_end = .;
 +
-+	set_fs(USER_DS);
++	. = ALIGN(THREAD_SIZE);
++	.data : {			/* Data */
++		*(.data.init_task)
 +
-+	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
-+		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
++		. = ALIGN(L1_CACHE_BYTES);
++		*(.data.cacheline_aligned)
 +
-+	flush_cache_sigtramp(regs->pr);
++		. = ALIGN(L1_CACHE_BYTES);
++		*(.data.read_mostly)
 +
-+	if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
-+		flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
++		. = ALIGN(PAGE_SIZE);
++		*(.data.page_aligned)
 +
-+	return 0;
++		__nosave_begin = .;
++		*(.data.nosave)
++		. = ALIGN(PAGE_SIZE);
++		__nosave_end = .;
 +
-+give_sigsegv:
-+	force_sigsegv(sig, current);
-+	return -EFAULT;
-+}
++		DATA_DATA
++		CONSTRUCTORS
++	}
 +
-+/*
-+ * OK, we're invoking a handler
-+ */
++	_edata = .;			/* End of data section */
 +
-+static int
-+handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-+	      sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
-+{
-+	int ret;
++	. = ALIGN(PAGE_SIZE);		/* Init code and data */
++	__init_begin = .;
++	_sinittext = .;
++	.init.text : { INIT_TEXT }
++	_einittext = .;
++	.init.data : { INIT_DATA }
 +
-+	/* Are we from a system call? */
-+	if (regs->tra >= 0) {
-+		/* If so, check system call restarting.. */
-+		switch (regs->regs[0]) {
-+			case -ERESTART_RESTARTBLOCK:
-+			case -ERESTARTNOHAND:
-+				regs->regs[0] = -EINTR;
-+				break;
++	. = ALIGN(16);
++	__setup_start = .;
++	.init.setup : { *(.init.setup) }
++	__setup_end = .;
 +
-+			case -ERESTARTSYS:
-+				if (!(ka->sa.sa_flags & SA_RESTART)) {
-+					regs->regs[0] = -EINTR;
-+					break;
-+				}
-+			/* fallthrough */
-+			case -ERESTARTNOINTR:
-+				regs->regs[0] = save_r0;
-+				regs->pc -= instruction_size(
-+						ctrl_inw(regs->pc - 4));
-+				break;
-+		}
++	__initcall_start = .;
++	.initcall.init : {
++		INITCALLS
 +	}
++	__initcall_end = .;
++	__con_initcall_start = .;
++	.con_initcall.init : { *(.con_initcall.init) }
++	__con_initcall_end = .;
 +
-+	/* Set up the stack frame */
-+	if (ka->sa.sa_flags & SA_SIGINFO)
-+		ret = setup_rt_frame(sig, ka, info, oldset, regs);
-+	else
-+		ret = setup_frame(sig, ka, oldset, regs);
-+
-+	if (ka->sa.sa_flags & SA_ONESHOT)
-+		ka->sa.sa_handler = SIG_DFL;
++	SECURITY_INIT
 +
-+	if (ret == 0) {
-+		spin_lock_irq(&current->sighand->siglock);
-+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-+		if (!(ka->sa.sa_flags & SA_NODEFER))
-+			sigaddset(&current->blocked,sig);
-+		recalc_sigpending();
-+		spin_unlock_irq(&current->sighand->siglock);
-+	}
++#ifdef CONFIG_BLK_DEV_INITRD
++	. = ALIGN(PAGE_SIZE);
++	__initramfs_start = .;
++	.init.ramfs : { *(.init.ramfs) }
++	__initramfs_end = .;
++#endif
 +
-+	return ret;
-+}
++	. = ALIGN(4);
++	__machvec_start = .;
++	.machvec.init : { *(.machvec.init) }
++	__machvec_end = .;
 +
-+/*
-+ * Note that 'init' is a special process: it doesn't get signals it doesn't
-+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
-+ * mistake.
-+ *
-+ * Note that we go through the signals twice: once to check the signals that
-+ * the kernel can handle, and then we build all the user-level signal handling
-+ * stack-frames in one go after that.
-+ */
-+static void do_signal(struct pt_regs *regs, unsigned int save_r0)
-+{
-+	siginfo_t info;
-+	int signr;
-+	struct k_sigaction ka;
-+	sigset_t *oldset;
++	PERCPU(PAGE_SIZE)
 +
 +	/*
-+	 * We want the common case to go fast, which
-+	 * is why we may in certain cases get here from
-+	 * kernel mode. Just return without doing anything
-+	 * if so.
++	 * .exit.text is discarded at runtime, not link time, to deal with
++	 * references from __bug_table
 +	 */
-+	if (!user_mode(regs))
-+		return;
-+
-+	if (try_to_freeze())
-+		goto no_signal;
-+
-+	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-+		oldset = &current->saved_sigmask;
-+	else
-+		oldset = &current->blocked;
-+
-+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-+	if (signr > 0) {
-+		/* Whee!  Actually deliver the signal.  */
-+		if (handle_signal(signr, &ka, &info, oldset,
-+				  regs, save_r0) == 0) {
-+			/* a signal was successfully delivered; the saved
-+			 * sigmask will have been stored in the signal frame,
-+			 * and will be restored by sigreturn, so we can simply
-+			 * clear the TIF_RESTORE_SIGMASK flag */
-+			if (test_thread_flag(TIF_RESTORE_SIGMASK))
-+				clear_thread_flag(TIF_RESTORE_SIGMASK);
-+		}
-+
-+		return;
-+	}
++	.exit.text : { EXIT_TEXT }
++	.exit.data : { EXIT_DATA }
 +
-+ no_signal:
-+	/* Did we come from a system call? */
-+	if (regs->tra >= 0) {
-+		/* Restart the system call - no handlers present */
-+		if (regs->regs[0] == -ERESTARTNOHAND ||
-+		    regs->regs[0] == -ERESTARTSYS ||
-+		    regs->regs[0] == -ERESTARTNOINTR) {
-+			regs->regs[0] = save_r0;
-+			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
-+		} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
-+			regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
-+			regs->regs[3] = __NR_restart_syscall;
-+		}
++	. = ALIGN(PAGE_SIZE);
++	.bss : {
++		__init_end = .;
++		__bss_start = .;		/* BSS */
++		*(.bss.page_aligned)
++		*(.bss)
++		*(COMMON)
++		. = ALIGN(4);
++		_ebss = .;			/* uClinux MTD sucks */
++		_end = . ;
 +	}
 +
-+	/* if there's no signal to deliver, we just put the saved sigmask
-+	 * back */
-+	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-+		clear_thread_flag(TIF_RESTORE_SIGMASK);
-+		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
++	/*
++	 * When something in the kernel is NOT compiled as a module, the
++	 * module cleanup code and data are put into these segments. Both
++	 * can then be thrown away, as cleanup code is never called unless
++	 * it's a module.
++	 */
++	/DISCARD/ : {
++		*(.exitcall.exit)
 +	}
-+}
 +
-+asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
-+				 __u32 thread_info_flags)
-+{
-+	/* deal with pending signal delivery */
-+	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
-+		do_signal(regs, save_r0);
++	STABS_DEBUG
++	DWARF_DEBUG
 +}
-diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
+diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S
 new file mode 100644
-index 0000000..80bde19
+index 0000000..3f1bd63
 --- /dev/null
-+++ b/arch/sh/kernel/signal_64.c
-@@ -0,0 +1,751 @@
++++ b/arch/sh/kernel/vmlinux_64.lds.S
+@@ -0,0 +1,164 @@
 +/*
-+ * arch/sh/kernel/signal_64.c
++ * ld script to make SH64 Linux kernel
 + *
 + * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003  Paul Mundt
-+ * Copyright (C) 2004  Richard Curnow
++ *
++ * benedict.gaster at superh.com:	 2nd May 2002
++ *    Add definition of empty_zero_page to be the first page of kernel image.
++ *
++ * benedict.gaster at superh.com:	 3rd May 2002
++ *    Added support for ramdisk, removing statically linked romfs at the
++ *    same time.
++ *
++ * lethal at linux-sh.org:          9th May 2003
++ *    Kill off GLOBAL_NAME() usage and other CDC-isms.
++ *
++ * lethal at linux-sh.org:         19th May 2003
++ *    Remove support for ancient toolchains.
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + */
-+#include <linux/rwsem.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/kernel.h>
-+#include <linux/signal.h>
-+#include <linux/errno.h>
-+#include <linux/wait.h>
-+#include <linux/personality.h>
-+#include <linux/freezer.h>
-+#include <linux/ptrace.h>
-+#include <linux/unistd.h>
-+#include <linux/stddef.h>
-+#include <asm/ucontext.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/cacheflush.h>
-+
-+#define REG_RET 9
-+#define REG_ARG1 2
-+#define REG_ARG2 3
-+#define REG_ARG3 4
-+#define REG_SP 15
-+#define REG_PR 18
-+#define REF_REG_RET regs->regs[REG_RET]
-+#define REF_REG_SP regs->regs[REG_SP]
-+#define DEREF_REG_PR regs->regs[REG_PR]
-+
-+#define DEBUG_SIG 0
-+
-+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-+
-+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
-+
-+/*
-+ * Atomically swap in the new signal mask, and wait for a signal.
-+ */
-+
-+asmlinkage int
-+sys_sigsuspend(old_sigset_t mask,
-+	       unsigned long r3, unsigned long r4, unsigned long r5,
-+	       unsigned long r6, unsigned long r7,
-+	       struct pt_regs * regs)
-+{
-+	sigset_t saveset;
-+
-+	mask &= _BLOCKABLE;
-+	spin_lock_irq(&current->sighand->siglock);
-+	saveset = current->blocked;
-+	siginitset(&current->blocked, mask);
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
-+
-+	REF_REG_RET = -EINTR;
-+	while (1) {
-+		current->state = TASK_INTERRUPTIBLE;
-+		schedule();
-+		regs->pc += 4;    /* because sys_sigreturn decrements the pc */
-+		if (do_signal(regs, &saveset)) {
-+			/* pc now points at signal handler. Need to decrement
-+			   it because entry.S will increment it. */
-+			regs->pc -= 4;
-+			return -EINTR;
-+		}
-+	}
-+}
-+
-+asmlinkage int
-+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
-+	          unsigned long r4, unsigned long r5, unsigned long r6,
-+	          unsigned long r7,
-+	          struct pt_regs * regs)
-+{
-+	sigset_t saveset, newset;
++#include <asm/page.h>
++#include <asm/cache.h>
++#include <asm/thread_info.h>
 +
-+	/* XXX: Don't preclude handling different sized sigset_t's.  */
-+	if (sigsetsize != sizeof(sigset_t))
-+		return -EINVAL;
++#define LOAD_OFFSET	CONFIG_PAGE_OFFSET
++#include <asm-generic/vmlinux.lds.h>
 +
-+	if (copy_from_user(&newset, unewset, sizeof(newset)))
-+		return -EFAULT;
-+	sigdelsetmask(&newset, ~_BLOCKABLE);
-+	spin_lock_irq(&current->sighand->siglock);
-+	saveset = current->blocked;
-+	current->blocked = newset;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
++OUTPUT_ARCH(sh:sh5)
 +
-+	REF_REG_RET = -EINTR;
-+	while (1) {
-+		current->state = TASK_INTERRUPTIBLE;
-+		schedule();
-+		regs->pc += 4;    /* because sys_sigreturn decrements the pc */
-+		if (do_signal(regs, &saveset)) {
-+			/* pc now points at signal handler. Need to decrement
-+			   it because entry.S will increment it. */
-+			regs->pc -= 4;
-+			return -EINTR;
-+		}
-+	}
-+}
++#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
 +
-+asmlinkage int
-+sys_sigaction(int sig, const struct old_sigaction __user *act,
-+	      struct old_sigaction __user *oact)
++ENTRY(__start)
++SECTIONS
 +{
-+	struct k_sigaction new_ka, old_ka;
-+	int ret;
-+
-+	if (act) {
-+		old_sigset_t mask;
-+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-+		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
-+			return -EFAULT;
-+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
-+		__get_user(mask, &act->sa_mask);
-+		siginitset(&new_ka.sa.sa_mask, mask);
-+	}
++	. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
++	_text = .;			/* Text and read-only data */
 +
-+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
++	.empty_zero_page : C_PHYS(.empty_zero_page) {
++		*(.empty_zero_page)
++	} = 0
 +
-+	if (!ret && oact) {
-+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-+		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
-+			return -EFAULT;
-+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
-+	}
++	.text : C_PHYS(.text) {
++		*(.text.head)
++		TEXT_TEXT
++		*(.text64)
++		*(.text..SHmedia32)
++		SCHED_TEXT
++		LOCK_TEXT
++		KPROBES_TEXT
++		*(.fixup)
++		*(.gnu.warning)
++#ifdef CONFIG_LITTLE_ENDIAN
++	} = 0x6ff0fff0
++#else
++	} = 0xf0fff06f
++#endif
 +
-+	return ret;
-+}
++	/* We likely want __ex_table to be Cache Line aligned */
++	. = ALIGN(L1_CACHE_BYTES);		/* Exception table */
++	__start___ex_table = .;
++	__ex_table : C_PHYS(__ex_table) { *(__ex_table) }
++	__stop___ex_table = .;
 +
-+asmlinkage int
-+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
-+	        unsigned long r4, unsigned long r5, unsigned long r6,
-+	        unsigned long r7,
-+	        struct pt_regs * regs)
-+{
-+	return do_sigaltstack(uss, uoss, REF_REG_SP);
-+}
++	_etext = .;			/* End of text section */
 +
++	BUG_TABLE
++	NOTES 
++	RO_DATA(PAGE_SIZE)
 +
-+/*
-+ * Do a signal return; undo the signal stack.
-+ */
++	. = ALIGN(THREAD_SIZE);
++	.data : C_PHYS(.data) {			/* Data */
++		*(.data.init_task)
 +
-+struct sigframe
-+{
-+	struct sigcontext sc;
-+	unsigned long extramask[_NSIG_WORDS-1];
-+	long long retcode[2];
-+};
++		. = ALIGN(L1_CACHE_BYTES);
++		*(.data.cacheline_aligned)
 +
-+struct rt_sigframe
-+{
-+	struct siginfo __user *pinfo;
-+	void *puc;
-+	struct siginfo info;
-+	struct ucontext uc;
-+	long long retcode[2];
-+};
++		. = ALIGN(L1_CACHE_BYTES);
++		*(.data.read_mostly)
 +
-+#ifdef CONFIG_SH_FPU
-+static inline int
-+restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-+{
-+	int err = 0;
-+	int fpvalid;
++		. = ALIGN(PAGE_SIZE);
++		*(.data.page_aligned)
 +
-+	err |= __get_user (fpvalid, &sc->sc_fpvalid);
-+	conditional_used_math(fpvalid);
-+	if (! fpvalid)
-+		return err;
++		__nosave_begin = .;
++		*(.data.nosave)
++		. = ALIGN(PAGE_SIZE);
++		__nosave_end = .;
 +
-+	if (current == last_task_used_math) {
-+		last_task_used_math = NULL;
-+		regs->sr |= SR_FD;
++		DATA_DATA
++		CONSTRUCTORS
 +	}
 +
-+	err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
-+				(sizeof(long long) * 32) + (sizeof(int) * 1));
-+
-+	return err;
-+}
-+
-+static inline int
-+setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-+{
-+	int err = 0;
-+	int fpvalid;
-+
-+	fpvalid = !!used_math();
-+	err |= __put_user(fpvalid, &sc->sc_fpvalid);
-+	if (! fpvalid)
-+		return err;
++	_edata = .;			/* End of data section */
 +
-+	if (current == last_task_used_math) {
-+		enable_fpu();
-+		save_fpu(current, regs);
-+		disable_fpu();
-+		last_task_used_math = NULL;
-+		regs->sr |= SR_FD;
++	. = ALIGN(PAGE_SIZE);		/* Init code and data */
++	__init_begin = .;
++	_sinittext = .;
++	.init.text : C_PHYS(.init.text) { INIT_TEXT }
++	_einittext = .;
++	.init.data : C_PHYS(.init.data) { INIT_DATA }
++	. = ALIGN(L1_CACHE_BYTES);	/* Better if Cache Line aligned */
++	__setup_start = .;
++	.init.setup : C_PHYS(.init.setup) { *(.init.setup) }
++	__setup_end = .;
++	__initcall_start = .;
++	.initcall.init : C_PHYS(.initcall.init) {
++		INITCALLS
++	}
++	__initcall_end = .;
++	__con_initcall_start = .;
++	.con_initcall.init : C_PHYS(.con_initcall.init) {
++		*(.con_initcall.init)
 +	}
++	__con_initcall_end = .;
 +
-+	err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
-+			      (sizeof(long long) * 32) + (sizeof(int) * 1));
-+	clear_used_math();
++	SECURITY_INIT
 +
-+	return err;
-+}
-+#else
-+static inline int
-+restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-+{
-+	return 0;
-+}
-+static inline int
-+setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-+{
-+	return 0;
-+}
++#ifdef CONFIG_BLK_DEV_INITRD
++	. = ALIGN(PAGE_SIZE);
++	__initramfs_start = .;
++	.init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
++	__initramfs_end = .;
 +#endif
 +
-+static int
-+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
-+{
-+	unsigned int err = 0;
-+        unsigned long long current_sr, new_sr;
-+#define SR_MASK 0xffff8cfd
-+
-+#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
-+
-+	COPY(regs[0]);	COPY(regs[1]);	COPY(regs[2]);	COPY(regs[3]);
-+	COPY(regs[4]);	COPY(regs[5]);	COPY(regs[6]);	COPY(regs[7]);
-+	COPY(regs[8]);	COPY(regs[9]);  COPY(regs[10]);	COPY(regs[11]);
-+	COPY(regs[12]);	COPY(regs[13]);	COPY(regs[14]);	COPY(regs[15]);
-+	COPY(regs[16]);	COPY(regs[17]);	COPY(regs[18]);	COPY(regs[19]);
-+	COPY(regs[20]);	COPY(regs[21]);	COPY(regs[22]);	COPY(regs[23]);
-+	COPY(regs[24]);	COPY(regs[25]);	COPY(regs[26]);	COPY(regs[27]);
-+	COPY(regs[28]);	COPY(regs[29]);	COPY(regs[30]);	COPY(regs[31]);
-+	COPY(regs[32]);	COPY(regs[33]);	COPY(regs[34]);	COPY(regs[35]);
-+	COPY(regs[36]);	COPY(regs[37]);	COPY(regs[38]);	COPY(regs[39]);
-+	COPY(regs[40]);	COPY(regs[41]);	COPY(regs[42]);	COPY(regs[43]);
-+	COPY(regs[44]);	COPY(regs[45]);	COPY(regs[46]);	COPY(regs[47]);
-+	COPY(regs[48]);	COPY(regs[49]);	COPY(regs[50]);	COPY(regs[51]);
-+	COPY(regs[52]);	COPY(regs[53]);	COPY(regs[54]);	COPY(regs[55]);
-+	COPY(regs[56]);	COPY(regs[57]);	COPY(regs[58]);	COPY(regs[59]);
-+	COPY(regs[60]);	COPY(regs[61]);	COPY(regs[62]);
-+	COPY(tregs[0]);	COPY(tregs[1]);	COPY(tregs[2]);	COPY(tregs[3]);
-+	COPY(tregs[4]);	COPY(tregs[5]);	COPY(tregs[6]);	COPY(tregs[7]);
-+
-+        /* Prevent the signal handler manipulating SR in a way that can
-+           crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
-+           modified */
-+        current_sr = regs->sr;
-+        err |= __get_user(new_sr, &sc->sc_sr);
-+        regs->sr &= SR_MASK;
-+        regs->sr |= (new_sr & ~SR_MASK);
-+
-+	COPY(pc);
-+
-+#undef COPY
-+
-+	/* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
-+	 * has been restored above.) */
-+	err |= restore_sigcontext_fpu(regs, sc);
-+
-+	regs->syscall_nr = -1;		/* disable syscall checks */
-+	err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
-+	return err;
-+}
-+
-+asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
-+				   unsigned long r4, unsigned long r5,
-+				   unsigned long r6, unsigned long r7,
-+				   struct pt_regs * regs)
-+{
-+	struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
-+	sigset_t set;
-+	long long ret;
-+
-+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-+		goto badframe;
-+
-+	if (__get_user(set.sig[0], &frame->sc.oldmask)
-+	    || (_NSIG_WORDS > 1
-+		&& __copy_from_user(&set.sig[1], &frame->extramask,
-+				    sizeof(frame->extramask))))
-+		goto badframe;
++	. = ALIGN(8);
++	__machvec_start = .;
++	.machvec.init : C_PHYS(.machvec.init) { *(.machvec.init) }
++	__machvec_end = .;
 +
-+	sigdelsetmask(&set, ~_BLOCKABLE);
++	PERCPU(PAGE_SIZE)
 +
-+	spin_lock_irq(&current->sighand->siglock);
-+	current->blocked = set;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
++	/*
++	 * .exit.text is discarded at runtime, not link time, to deal with
++	 * references from __bug_table
++	 */
++	.exit.text : C_PHYS(.exit.text) { EXIT_TEXT }
++	.exit.data : C_PHYS(.exit.data) { EXIT_DATA }
 +
-+	if (restore_sigcontext(regs, &frame->sc, &ret))
-+		goto badframe;
-+	regs->pc -= 4;
++	. = ALIGN(PAGE_SIZE);
++	.bss : C_PHYS(.bss) {
++		__init_end = .;
++		__bss_start = .;		/* BSS */
++		*(.bss.page_aligned)
++		*(.bss)
++		*(COMMON)
++		. = ALIGN(4);
++		_ebss = .;			/* uClinux MTD sucks */
++		_end = . ;
++	}
 +
-+	return (int) ret;
++	/*
++	 * When something in the kernel is NOT compiled as a module, the
++	 * module cleanup code and data are put into these segments. Both
++	 * can then be thrown away, as cleanup code is never called unless
++	 * it's a module.
++	 */
++	/DISCARD/ : {
++		*(.exitcall.exit)
++	}
 +
-+badframe:
-+	force_sig(SIGSEGV, current);
-+	return 0;
++	STABS_DEBUG
++	DWARF_DEBUG
 +}
+diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
+index 9dc7b69..ebb55d1 100644
+--- a/arch/sh/lib/Makefile
++++ b/arch/sh/lib/Makefile
+@@ -2,12 +2,13 @@
+ # Makefile for SuperH-specific library files..
+ #
+ 
+-lib-y  = delay.o memset.o memmove.o memchr.o \
++lib-y  = delay.o io.o memset.o memmove.o memchr.o \
+ 	 checksum.o strlen.o div64.o div64-generic.o
+ 
+ memcpy-y			:= memcpy.o
+ memcpy-$(CONFIG_CPU_SH4)	:= memcpy-sh4.o
+ 
+-lib-y	+= $(memcpy-y)
++lib-$(CONFIG_MMU)		+= copy_page.o clear_page.o
++lib-y				+= $(memcpy-y)
+ 
+ EXTRA_CFLAGS += -Werror
+diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/clear_page.S
+new file mode 100644
+index 0000000..3539123
+--- /dev/null
++++ b/arch/sh/lib/clear_page.S
+@@ -0,0 +1,154 @@
++/*
++ * __clear_user_page, __clear_user, clear_page implementation of SuperH
++ *
++ * Copyright (C) 2001  Kaz Kojima
++ * Copyright (C) 2001, 2002  Niibe Yutaka
++ * Copyright (C) 2006  Paul Mundt
++ */
++#include <linux/linkage.h>
++#include <asm/page.h>
 +
-+asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
-+				unsigned long r4, unsigned long r5,
-+				unsigned long r6, unsigned long r7,
-+				struct pt_regs * regs)
-+{
-+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
-+	sigset_t set;
-+	stack_t __user st;
-+	long long ret;
-+
-+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-+		goto badframe;
-+
-+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-+		goto badframe;
++/*
++ * clear_page
++ * @to: P1 address
++ *
++ * void clear_page(void *to)
++ */
 +
-+	sigdelsetmask(&set, ~_BLOCKABLE);
-+	spin_lock_irq(&current->sighand->siglock);
-+	current->blocked = set;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
++/*
++ * r0 --- scratch
++ * r4 --- to
++ * r5 --- to + PAGE_SIZE
++ */
++ENTRY(clear_page)
++	mov	r4,r5
++	mov.l	.Llimit,r0
++	add	r0,r5
++	mov	#0,r0
++	!
++1:
++#if defined(CONFIG_CPU_SH3)
++	mov.l	r0, at r4
++#elif defined(CONFIG_CPU_SH4)
++	movca.l	r0, at r4
++	mov	r4,r1
++#endif
++	add	#32,r4
++	mov.l	r0, at -r4
++	mov.l	r0, at -r4
++	mov.l	r0, at -r4
++	mov.l	r0, at -r4
++	mov.l	r0, at -r4
++	mov.l	r0, at -r4
++	mov.l	r0, at -r4
++#if defined(CONFIG_CPU_SH4)
++	ocbwb	@r1
++#endif
++	cmp/eq	r5,r4
++	bf/s	1b
++	 add	#28,r4
++	!
++	rts
++	 nop
 +
-+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
-+		goto badframe;
-+	regs->pc -= 4;
++	.balign 4
++.Llimit:	.long	(PAGE_SIZE-28)
 +
-+	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
-+		goto badframe;
-+	/* It is more difficult to avoid calling this function than to
-+	   call it and ignore errors.  */
-+	do_sigaltstack(&st, NULL, REF_REG_SP);
++ENTRY(__clear_user)
++	!
++	mov	#0, r0
++	mov	#0xe0, r1	! 0xffffffe0
++	!
++	! r4..(r4+31)&~32 	   -------- not aligned	[ Area 0 ]
++	! (r4+31)&~32..(r4+r5)&~32 -------- aligned	[ Area 1 ]
++	! (r4+r5)&~32..r4+r5       -------- not aligned	[ Area 2 ]
++	!
++	! Clear area 0
++	mov	r4, r2
++	!
++	tst	r1, r5		! length < 32
++	bt	.Larea2		! skip to remainder
++	!
++	add	#31, r2
++	and	r1, r2
++	cmp/eq	r4, r2
++	bt	.Larea1
++	mov	r2, r3
++	sub	r4, r3
++	mov	r3, r7
++	mov	r4, r2
++	!
++.L0:	dt	r3
++0:	mov.b	r0, @r2
++	bf/s	.L0
++	 add	#1, r2
++	!
++	sub	r7, r5
++	mov	r2, r4
++.Larea1:
++	mov	r4, r3
++	add	r5, r3
++	and	r1, r3
++	cmp/hi	r2, r3
++	bf	.Larea2
++	!
++	! Clear area 1
++#if defined(CONFIG_CPU_SH4)
++1:	movca.l	r0, @r2
++#else
++1:	mov.l	r0, @r2
++#endif
++	add	#4, r2
++2:	mov.l	r0, @r2
++	add	#4, r2
++3:	mov.l	r0, @r2
++	add	#4, r2
++4:	mov.l	r0, @r2
++	add	#4, r2
++5:	mov.l	r0, @r2
++	add	#4, r2
++6:	mov.l	r0, @r2
++	add	#4, r2
++7:	mov.l	r0, @r2
++	add	#4, r2
++8:	mov.l	r0, @r2
++	add	#4, r2
++	cmp/hi	r2, r3
++	bt/s	1b
++	 nop
++	!
++	! Clear area 2
++.Larea2:
++	mov	r4, r3
++	add	r5, r3
++	cmp/hs	r3, r2
++	bt/s	.Ldone
++	 sub	r2, r3
++.L2:	dt	r3
++9:	mov.b	r0, @r2
++	bf/s	.L2
++	 add	#1, r2
++	!
++.Ldone:	rts
++	 mov	#0, r0	! return 0 as normal return
 +
-+	return (int) ret;
++	! return the number of bytes remained
++.Lbad_clear_user:
++	mov	r4, r0
++	add	r5, r0
++	rts
++	 sub	r2, r0
 +
-+badframe:
-+	force_sig(SIGSEGV, current);
-+	return 0;
-+}
++.section __ex_table,"a"
++	.align 2
++	.long	0b, .Lbad_clear_user
++	.long	1b, .Lbad_clear_user
++	.long	2b, .Lbad_clear_user
++	.long	3b, .Lbad_clear_user
++	.long	4b, .Lbad_clear_user
++	.long	5b, .Lbad_clear_user
++	.long	6b, .Lbad_clear_user
++	.long	7b, .Lbad_clear_user
++	.long	8b, .Lbad_clear_user
++	.long	9b, .Lbad_clear_user
++.previous
+diff --git a/arch/sh/lib/copy_page.S b/arch/sh/lib/copy_page.S
+new file mode 100644
+index 0000000..e002b91
+--- /dev/null
++++ b/arch/sh/lib/copy_page.S
+@@ -0,0 +1,389 @@
++/*
++ * copy_page, __copy_user_page, __copy_user implementation of SuperH
++ *
++ * Copyright (C) 2001  Niibe Yutaka & Kaz Kojima
++ * Copyright (C) 2002  Toshinobu Sugioka
++ * Copyright (C) 2006  Paul Mundt
++ */
++#include <linux/linkage.h>
++#include <asm/page.h>
 +
 +/*
-+ * Set up a signal frame.
++ * copy_page
++ * @to: P1 address
++ * @from: P1 address
++ *
++ * void copy_page(void *to, void *from)
 + */
 +
-+static int
-+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
-+		 unsigned long mask)
-+{
-+	int err = 0;
++/*
++ * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
++ * r8 --- from + PAGE_SIZE
++ * r9 --- not used
++ * r10 --- to
++ * r11 --- from
++ */
++ENTRY(copy_page)
++	mov.l	r8, at -r15
++	mov.l	r10, at -r15
++	mov.l	r11, at -r15
++	mov	r4,r10
++	mov	r5,r11
++	mov	r5,r8
++	mov.l	.Lpsz,r0
++	add	r0,r8
++	!
++1:	mov.l	@r11+,r0
++	mov.l	@r11+,r1
++	mov.l	@r11+,r2
++	mov.l	@r11+,r3
++	mov.l	@r11+,r4
++	mov.l	@r11+,r5
++	mov.l	@r11+,r6
++	mov.l	@r11+,r7
++#if defined(CONFIG_CPU_SH3)
++	mov.l	r0, at r10
++#elif defined(CONFIG_CPU_SH4)
++	movca.l	r0, at r10
++	mov	r10,r0
++#endif
++	add	#32,r10
++	mov.l	r7, at -r10
++	mov.l	r6, at -r10
++	mov.l	r5, at -r10
++	mov.l	r4, at -r10
++	mov.l	r3, at -r10
++	mov.l	r2, at -r10
++	mov.l	r1, at -r10
++#if defined(CONFIG_CPU_SH4)
++	ocbwb	@r0
++#endif
++	cmp/eq	r11,r8
++	bf/s	1b
++	 add	#28,r10
++	!
++	mov.l	@r15+,r11
++	mov.l	@r15+,r10
++	mov.l	@r15+,r8
++	rts
++	 nop
 +
-+	/* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
-+	err |= setup_sigcontext_fpu(regs, sc);
++	.balign 4
++.Lpsz:	.long	PAGE_SIZE
 +
-+#define COPY(x)		err |= __put_user(regs->x, &sc->sc_##x)
++/*
++ * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
++ * Return the number of bytes NOT copied
++ */
++#define EX(...)			\
++	9999: __VA_ARGS__ ;		\
++	.section __ex_table, "a";	\
++	.long 9999b, 6000f	;	\
++	.previous
++ENTRY(__copy_user)
++	! Check if small number of bytes
++	mov	#11,r0
++	mov	r4,r3
++	cmp/gt	r0,r6		! r6 (len) > r0 (11)
++	bf/s	.L_cleanup_loop_no_pop
++	 add	r6,r3		! last destination address
 +
-+	COPY(regs[0]);	COPY(regs[1]);	COPY(regs[2]);	COPY(regs[3]);
-+	COPY(regs[4]);	COPY(regs[5]);	COPY(regs[6]);	COPY(regs[7]);
-+	COPY(regs[8]);	COPY(regs[9]);	COPY(regs[10]);	COPY(regs[11]);
-+	COPY(regs[12]);	COPY(regs[13]);	COPY(regs[14]);	COPY(regs[15]);
-+	COPY(regs[16]);	COPY(regs[17]);	COPY(regs[18]);	COPY(regs[19]);
-+	COPY(regs[20]);	COPY(regs[21]);	COPY(regs[22]);	COPY(regs[23]);
-+	COPY(regs[24]);	COPY(regs[25]);	COPY(regs[26]);	COPY(regs[27]);
-+	COPY(regs[28]);	COPY(regs[29]);	COPY(regs[30]);	COPY(regs[31]);
-+	COPY(regs[32]);	COPY(regs[33]);	COPY(regs[34]);	COPY(regs[35]);
-+	COPY(regs[36]);	COPY(regs[37]);	COPY(regs[38]);	COPY(regs[39]);
-+	COPY(regs[40]);	COPY(regs[41]);	COPY(regs[42]);	COPY(regs[43]);
-+	COPY(regs[44]);	COPY(regs[45]);	COPY(regs[46]);	COPY(regs[47]);
-+	COPY(regs[48]);	COPY(regs[49]);	COPY(regs[50]);	COPY(regs[51]);
-+	COPY(regs[52]);	COPY(regs[53]);	COPY(regs[54]);	COPY(regs[55]);
-+	COPY(regs[56]);	COPY(regs[57]);	COPY(regs[58]);	COPY(regs[59]);
-+	COPY(regs[60]);	COPY(regs[61]);	COPY(regs[62]);
-+	COPY(tregs[0]);	COPY(tregs[1]);	COPY(tregs[2]);	COPY(tregs[3]);
-+	COPY(tregs[4]);	COPY(tregs[5]);	COPY(tregs[6]);	COPY(tregs[7]);
-+	COPY(sr);	COPY(pc);
++	! Calculate bytes needed to align to src
++	mov.l	r11, at -r15
++	neg	r5,r0
++	mov.l	r10, at -r15
++	add	#4,r0
++	mov.l	r9, at -r15
++	and	#3,r0
++	mov.l	r8, at -r15
++	tst	r0,r0
++	bt	2f
 +
-+#undef COPY
++1:
++	! Copy bytes to long word align src
++EX(	mov.b	@r5+,r1		)
++	dt	r0
++	add	#-1,r6
++EX(	mov.b	r1, at r4		)
++	bf/s	1b
++	 add	#1,r4
 +
-+	err |= __put_user(mask, &sc->oldmask);
++	! Jump to appropriate routine depending on dest
++2:	mov	#3,r1
++	mov	r6, r2
++	and	r4,r1
++	shlr2	r2
++	shll2	r1
++	mova	.L_jump_tbl,r0
++	mov.l	@(r0,r1),r1
++	jmp	@r1
++	 nop
 +
-+	return err;
-+}
++	.align 2
++.L_jump_tbl:
++	.long	.L_dest00
++	.long	.L_dest01
++	.long	.L_dest10
++	.long	.L_dest11
 +
 +/*
-+ * Determine which stack to use..
++ * Come here if there are less than 12 bytes to copy
++ *
++ * Keep the branch target close, so the bf/s callee doesn't overflow
++ * and result in a more expensive branch being inserted. This is the
++ * fast-path for small copies, the jump via the jump table will hit the
++ * default slow-path cleanup. -PFM.
 + */
-+static inline void __user *
-+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
-+{
-+	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
-+		sp = current->sas_ss_sp + current->sas_ss_size;
-+
-+	return (void __user *)((sp - frame_size) & -8ul);
-+}
-+
-+void sa_default_restorer(void);		/* See comments below */
-+void sa_default_rt_restorer(void);	/* See comments below */
-+
-+static void setup_frame(int sig, struct k_sigaction *ka,
-+			sigset_t *set, struct pt_regs *regs)
-+{
-+	struct sigframe __user *frame;
-+	int err = 0;
-+	int signal;
++.L_cleanup_loop_no_pop:
++	tst	r6,r6		! Check explicitly for zero
++	bt	1f
 +
-+	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
++2:
++EX(	mov.b	@r5+,r0		)
++	dt	r6
++EX(	mov.b	r0, at r4		)
++	bf/s	2b
++	 add	#1,r4
 +
-+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-+		goto give_sigsegv;
++1:	mov	#0,r0		! normal return
++5000:
 +
-+	signal = current_thread_info()->exec_domain
-+		&& current_thread_info()->exec_domain->signal_invmap
-+		&& sig < 32
-+		? current_thread_info()->exec_domain->signal_invmap[sig]
-+		: sig;
++# Exception handler:
++.section .fixup, "ax"
++6000:
++	mov.l	8000f,r1
++	mov	r3,r0
++	jmp	@r1
++	 sub	r4,r0
++	.align	2
++8000:	.long	5000b
 +
-+	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
++.previous
++	rts
++	 nop
 +
-+	/* Give up earlier as i386, in case */
-+	if (err)
-+		goto give_sigsegv;
++! Destination = 00
 +
-+	if (_NSIG_WORDS > 1) {
-+		err |= __copy_to_user(frame->extramask, &set->sig[1],
-+				      sizeof(frame->extramask)); }
++.L_dest00:
++	! Skip the large copy for small transfers
++	mov	#(32+32-4), r0
++	cmp/gt	r6, r0		! r0 (60) > r6 (len)
++	bt	1f
 +
-+	/* Give up earlier as i386, in case */
-+	if (err)
-+		goto give_sigsegv;
++	! Align dest to a 32 byte boundary
++	neg	r4,r0
++	add	#0x20, r0
++	and	#0x1f, r0
++	tst	r0, r0
++	bt	2f
 +
-+	/* Set up to return from userspace.  If provided, use a stub
-+	   already in userspace.  */
-+	if (ka->sa.sa_flags & SA_RESTORER) {
-+		DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
++	sub	r0, r6
++	shlr2	r0
++3:
++EX(	mov.l	@r5+,r1		)
++	dt	r0
++EX(	mov.l	r1, at r4		)
++	bf/s	3b
++	 add	#4,r4
 +
-+		/*
-+		 * On SH5 all edited pointers are subject to NEFF
-+		 */
-+		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
-+        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
-+	} else {
-+		/*
-+		 * Different approach on SH5.
-+	         * . Endianness independent asm code gets placed in entry.S .
-+		 *   This is limited to four ASM instructions corresponding
-+		 *   to two long longs in size.
-+		 * . err checking is done on the else branch only
-+		 * . flush_icache_range() is called upon __put_user() only
-+		 * . all edited pointers are subject to NEFF
-+		 * . being code, linker turns ShMedia bit on, always
-+		 *   dereference index -1.
-+		 */
-+		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
-+		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
-+        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
++2:
++EX(	mov.l	@r5+,r0		)
++EX(	mov.l	@r5+,r1		)
++EX(	mov.l	@r5+,r2		)
++EX(	mov.l	@r5+,r7		)
++EX(	mov.l	@r5+,r8		)
++EX(	mov.l	@r5+,r9		)
++EX(	mov.l	@r5+,r10	)
++EX(	mov.l	@r5+,r11	)
++#ifdef CONFIG_CPU_SH4
++EX(	movca.l	r0, at r4		)
++#else
++EX(	mov.l	r0, at r4		)
++#endif
++	add	#-32, r6
++EX(	mov.l	r1,@(4,r4)	)
++	mov	#32, r0
++EX(	mov.l	r2,@(8,r4)	)
++	cmp/gt	r6, r0		! r0 (32) > r6 (len)
++EX(	mov.l	r7,@(12,r4)	)
++EX(	mov.l	r8,@(16,r4)	)
++EX(	mov.l	r9,@(20,r4)	)
++EX(	mov.l	r10,@(24,r4)	)
++EX(	mov.l	r11,@(28,r4)	)
++	bf/s	2b
++	 add	#32,r4
 +
-+		if (__copy_to_user(frame->retcode,
-+			(unsigned long long)sa_default_restorer & (~1), 16) != 0)
-+			goto give_sigsegv;
++1:	mov	r6, r0
++	shlr2	r0
++	tst	r0, r0
++	bt	.L_cleanup
++1:
++EX(	mov.l	@r5+,r1		)
++	dt	r0
++EX(	mov.l	r1, at r4		)
++	bf/s	1b
++	 add	#4,r4
 +
-+		/* Cohere the trampoline with the I-cache. */
-+		flush_cache_sigtramp(DEREF_REG_PR-1);
-+	}
++	bra	.L_cleanup
++	 nop
 +
-+	/*
-+	 * Set up registers for signal handler.
-+	 * All edited pointers are subject to NEFF.
-+	 */
-+	regs->regs[REG_SP] = (unsigned long) frame;
-+	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
-+        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
-+	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
++! Destination = 10
 +
-+        /* FIXME:
-+           The glibc profiling support for SH-5 needs to be passed a sigcontext
-+           so it can retrieve the PC.  At some point during 2003 the glibc
-+           support was changed to receive the sigcontext through the 2nd
-+           argument, but there are still versions of libc.so in use that use
-+           the 3rd argument.  Until libc.so is stabilised, pass the sigcontext
-+           through both 2nd and 3rd arguments.
-+        */
++.L_dest10:
++	mov	r2,r7
++	shlr2	r7
++	shlr	r7
++	tst	r7,r7
++	mov	#7,r0
++	bt/s	1f
++	 and	r0,r2
++2:
++	dt	r7
++#ifdef CONFIG_CPU_LITTLE_ENDIAN
++EX(	mov.l	@r5+,r0		)
++EX(	mov.l	@r5+,r1		)
++EX(	mov.l	@r5+,r8		)
++EX(	mov.l	@r5+,r9		)
++EX(	mov.l	@r5+,r10	)
++EX(	mov.w	r0, at r4		)
++	add	#2,r4
++	xtrct	r1,r0
++	xtrct	r8,r1
++	xtrct	r9,r8
++	xtrct	r10,r9
 +
-+	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
-+	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
++EX(	mov.l	r0, at r4		)
++EX(	mov.l	r1,@(4,r4)	)
++EX(	mov.l	r8,@(8,r4)	)
++EX(	mov.l	r9,@(12,r4)	)
 +
-+	regs->pc = (unsigned long) ka->sa.sa_handler;
-+	regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
++EX(	mov.l	@r5+,r1		)
++EX(	mov.l	@r5+,r8		)
++EX(	mov.l	@r5+,r0		)
++	xtrct	r1,r10
++	xtrct	r8,r1
++	xtrct	r0,r8
++	shlr16	r0
++EX(	mov.l	r10,@(16,r4)	)
++EX(	mov.l	r1,@(20,r4)	)
++EX(	mov.l	r8,@(24,r4)	)
++EX(	mov.w	r0,@(28,r4)	)
++	bf/s	2b
++	 add	#30,r4
++#else
++EX(	mov.l	@(28,r5),r0	)
++EX(	mov.l	@(24,r5),r8	)
++EX(	mov.l	@(20,r5),r9	)
++EX(	mov.l	@(16,r5),r10	)
++EX(	mov.w	r0,@(30,r4)	)
++	add	#-2,r4
++	xtrct	r8,r0
++	xtrct	r9,r8
++	xtrct	r10,r9
++EX(	mov.l	r0,@(28,r4)	)
++EX(	mov.l	r8,@(24,r4)	)
++EX(	mov.l	r9,@(20,r4)	)
 +
-+	set_fs(USER_DS);
++EX(	mov.l	@(12,r5),r0	)
++EX(	mov.l	@(8,r5),r8	)
++	xtrct	r0,r10
++EX(	mov.l	@(4,r5),r9	)
++	mov.l	r10,@(16,r4)
++EX(	mov.l	@r5,r10		)
++	xtrct	r8,r0
++	xtrct	r9,r8
++	xtrct	r10,r9
++EX(	mov.l	r0,@(12,r4)	)
++EX(	mov.l	r8,@(8,r4)	)
++	swap.w	r10,r0
++EX(	mov.l	r9,@(4,r4)	)
++EX(	mov.w	r0,@(2,r4)	)
 +
-+#if DEBUG_SIG
-+	/* Broken %016Lx */
-+	printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
-+		signal,
-+		current->comm, current->pid, frame,
-+		regs->pc >> 32, regs->pc & 0xffffffff,
-+		DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
++	add	#32,r5
++	bf/s	2b
++	 add	#34,r4
 +#endif
++	tst	r2,r2
++	bt	.L_cleanup
 +
-+	return;
-+
-+give_sigsegv:
-+	force_sigsegv(sig, current);
-+}
-+
-+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-+			   sigset_t *set, struct pt_regs *regs)
-+{
-+	struct rt_sigframe __user *frame;
-+	int err = 0;
-+	int signal;
-+
-+	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
-+
-+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-+		goto give_sigsegv;
++1:	! Read longword, write two words per iteration
++EX(	mov.l	@r5+,r0		)
++	dt	r2
++#ifdef CONFIG_CPU_LITTLE_ENDIAN
++EX(	mov.w	r0, at r4		)
++	shlr16	r0
++EX(	mov.w 	r0,@(2,r4)	)
++#else
++EX(	mov.w	r0,@(2,r4)	)
++	shlr16	r0
++EX(	mov.w	r0, at r4		)
++#endif
++	bf/s	1b
++	 add	#4,r4
 +
-+	signal = current_thread_info()->exec_domain
-+		&& current_thread_info()->exec_domain->signal_invmap
-+		&& sig < 32
-+		? current_thread_info()->exec_domain->signal_invmap[sig]
-+		: sig;
++	bra	.L_cleanup
++	 nop
 +
-+	err |= __put_user(&frame->info, &frame->pinfo);
-+	err |= __put_user(&frame->uc, &frame->puc);
-+	err |= copy_siginfo_to_user(&frame->info, info);
++! Destination = 01 or 11
 +
-+	/* Give up earlier as i386, in case */
-+	if (err)
-+		goto give_sigsegv;
++.L_dest01:
++.L_dest11:
++	! Read longword, write byte, word, byte per iteration
++EX(	mov.l	@r5+,r0		)
++	dt	r2
++#ifdef CONFIG_CPU_LITTLE_ENDIAN
++EX(	mov.b	r0, at r4		)
++	shlr8	r0
++	add	#1,r4
++EX(	mov.w	r0, at r4		)
++	shlr16	r0
++EX(	mov.b	r0,@(2,r4)	)
++	bf/s	.L_dest01
++	 add	#3,r4
++#else
++EX(	mov.b	r0,@(3,r4)	)
++	shlr8	r0
++	swap.w	r0,r7
++EX(	mov.b	r7, at r4		)
++	add	#1,r4
++EX(	mov.w	r0, at r4		)
++	bf/s	.L_dest01
++	 add	#3,r4
++#endif
 +
-+	/* Create the ucontext.  */
-+	err |= __put_user(0, &frame->uc.uc_flags);
-+	err |= __put_user(0, &frame->uc.uc_link);
-+	err |= __put_user((void *)current->sas_ss_sp,
-+			  &frame->uc.uc_stack.ss_sp);
-+	err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
-+			  &frame->uc.uc_stack.ss_flags);
-+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-+	err |= setup_sigcontext(&frame->uc.uc_mcontext,
-+			        regs, set->sig[0]);
-+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
++! Cleanup last few bytes
++.L_cleanup:
++	mov	r6,r0
++	and	#3,r0
++	tst	r0,r0
++	bt	.L_exit
++	mov	r0,r6
 +
-+	/* Give up earlier as i386, in case */
-+	if (err)
-+		goto give_sigsegv;
++.L_cleanup_loop:
++EX(	mov.b	@r5+,r0		)
++	dt	r6
++EX(	mov.b	r0, at r4		)
++	bf/s	.L_cleanup_loop
++	 add	#1,r4
 +
-+	/* Set up to return from userspace.  If provided, use a stub
-+	   already in userspace.  */
-+	if (ka->sa.sa_flags & SA_RESTORER) {
-+		DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
++.L_exit:
++	mov	#0,r0		! normal return
 +
-+		/*
-+		 * On SH5 all edited pointers are subject to NEFF
-+		 */
-+		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
-+        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
-+	} else {
-+		/*
-+		 * Different approach on SH5.
-+	         * . Endianness independent asm code gets placed in entry.S .
-+		 *   This is limited to four ASM instructions corresponding
-+		 *   to two long longs in size.
-+		 * . err checking is done on the else branch only
-+		 * . flush_icache_range() is called upon __put_user() only
-+		 * . all edited pointers are subject to NEFF
-+		 * . being code, linker turns ShMedia bit on, always
-+		 *   dereference index -1.
-+		 */
++5000:
 +
-+		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
-+		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
-+        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
++# Exception handler:
++.section .fixup, "ax"
++6000:
++	mov.l	8000f,r1
++	mov	r3,r0
++	jmp	@r1
++	 sub	r4,r0
++	.align	2
++8000:	.long	5000b
 +
-+		if (__copy_to_user(frame->retcode,
-+			(unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
-+			goto give_sigsegv;
++.previous
++	mov.l	@r15+,r8
++	mov.l	@r15+,r9
++	mov.l	@r15+,r10
++	rts
++	 mov.l	@r15+,r11
+diff --git a/arch/sh/lib/io.c b/arch/sh/lib/io.c
+new file mode 100644
+index 0000000..4f54ec4
+--- /dev/null
++++ b/arch/sh/lib/io.c
+@@ -0,0 +1,82 @@
++/*
++ * arch/sh/lib/io.c - SH32 optimized I/O routines
++ *
++ * Copyright (C) 2000  Stuart Menefy
++ * Copyright (C) 2005  Paul Mundt
++ *
++ * Provide real functions which expand to whatever the header file defined.
++ * Also definitions of machine independent IO functions.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/module.h>
++#include <linux/io.h>
 +
-+		flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
-+	}
++void __raw_readsl(unsigned long addr, void *datap, int len)
++{
++	u32 *data;
 +
-+	/*
-+	 * Set up registers for signal handler.
-+	 * All edited pointers are subject to NEFF.
-+	 */
-+	regs->regs[REG_SP] = (unsigned long) frame;
-+	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
-+        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
-+	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
-+	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
-+	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
-+	regs->pc = (unsigned long) ka->sa.sa_handler;
-+	regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
++	for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--)
++		*data++ = ctrl_inl(addr);
 +
-+	set_fs(USER_DS);
++	if (likely(len >= (0x20 >> 2))) {
++		int tmp2, tmp3, tmp4, tmp5, tmp6;
 +
-+#if DEBUG_SIG
-+	/* Broken %016Lx */
-+	printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
-+		signal,
-+		current->comm, current->pid, frame,
-+		regs->pc >> 32, regs->pc & 0xffffffff,
-+		DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
++		__asm__ __volatile__(
++			"1:			\n\t"
++			"mov.l	@%7, r0		\n\t"
++			"mov.l	@%7, %2		\n\t"
++#ifdef CONFIG_CPU_SH4
++			"movca.l r0, @%0	\n\t"
++#else
++			"mov.l	r0, @%0		\n\t"
 +#endif
++			"mov.l	@%7, %3		\n\t"
++			"mov.l	@%7, %4		\n\t"
++			"mov.l	@%7, %5		\n\t"
++			"mov.l	@%7, %6		\n\t"
++			"mov.l	@%7, r7		\n\t"
++			"mov.l	@%7, r0		\n\t"
++			"mov.l	%2, @(0x04,%0)	\n\t"
++			"mov	#0x20>>2, %2	\n\t"
++			"mov.l	%3, @(0x08,%0)	\n\t"
++			"sub	%2, %1		\n\t"
++			"mov.l	%4, @(0x0c,%0)	\n\t"
++			"cmp/hi	%1, %2		! T if 32 > len	\n\t"
++			"mov.l	%5, @(0x10,%0)	\n\t"
++			"mov.l	%6, @(0x14,%0)	\n\t"
++			"mov.l	r7, @(0x18,%0)	\n\t"
++			"mov.l	r0, @(0x1c,%0)	\n\t"
++			"bf.s	1b		\n\t"
++			" add	#0x20, %0	\n\t"
++			: "=&r" (data), "=&r" (len),
++			  "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
++			  "=&r" (tmp5), "=&r" (tmp6)
++			: "r"(addr), "0" (data), "1" (len)
++			: "r0", "r7", "t", "memory");
++	}
 +
-+	return;
-+
-+give_sigsegv:
-+	force_sigsegv(sig, current);
++	for (; len != 0; len--)
++		*data++ = ctrl_inl(addr);
 +}
++EXPORT_SYMBOL(__raw_readsl);
 +
-+/*
-+ * OK, we're invoking a handler
-+ */
-+
-+static void
-+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-+		sigset_t *oldset, struct pt_regs * regs)
++void __raw_writesl(unsigned long addr, const void *data, int len)
 +{
-+	/* Are we from a system call? */
-+	if (regs->syscall_nr >= 0) {
-+		/* If so, check system call restarting.. */
-+		switch (regs->regs[REG_RET]) {
-+			case -ERESTART_RESTARTBLOCK:
-+			case -ERESTARTNOHAND:
-+				regs->regs[REG_RET] = -EINTR;
-+				break;
++	if (likely(len != 0)) {
++		int tmp1;
 +
-+			case -ERESTARTSYS:
-+				if (!(ka->sa.sa_flags & SA_RESTART)) {
-+					regs->regs[REG_RET] = -EINTR;
-+					break;
-+				}
-+			/* fallthrough */
-+			case -ERESTARTNOINTR:
-+				/* Decode syscall # */
-+				regs->regs[REG_RET] = regs->syscall_nr;
-+				regs->pc -= 4;
-+		}
++		__asm__ __volatile__ (
++			"1:				\n\t"
++			"mov.l	@%0+, %1	\n\t"
++			"dt		%3		\n\t"
++			"bf.s		1b		\n\t"
++			" mov.l	%1, @%4		\n\t"
++			: "=&r" (data), "=&r" (tmp1)
++			: "0" (data), "r" (len), "r"(addr)
++			: "t", "memory");
 +	}
-+
-+	/* Set up the stack frame */
-+	if (ka->sa.sa_flags & SA_SIGINFO)
-+		setup_rt_frame(sig, ka, info, oldset, regs);
-+	else
-+		setup_frame(sig, ka, oldset, regs);
-+
-+	spin_lock_irq(&current->sighand->siglock);
-+	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-+	if (!(ka->sa.sa_flags & SA_NODEFER))
-+		sigaddset(&current->blocked,sig);
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
 +}
++EXPORT_SYMBOL(__raw_writesl);
+diff --git a/arch/sh/lib64/.gitignore b/arch/sh/lib64/.gitignore
+new file mode 100644
+index 0000000..3508c2c
+--- /dev/null
++++ b/arch/sh/lib64/.gitignore
+@@ -0,0 +1 @@
++syscalltab.h
+diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile
+new file mode 100644
+index 0000000..9950966
+--- /dev/null
++++ b/arch/sh/lib64/Makefile
+@@ -0,0 +1,15 @@
++#
++# Makefile for the SH-5 specific library files..
++#
++# Copyright (C) 2000, 2001  Paolo Alberelli
++# Copyright (C) 2003  Paul Mundt
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License.  See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++
++# Panic should really be compiled as PIC
++lib-y  := udelay.o c-checksum.o dbg.o panic.o memcpy.o copy_user_memcpy.o \
++		copy_page.o clear_page.o
 +
+diff --git a/arch/sh/lib64/c-checksum.c b/arch/sh/lib64/c-checksum.c
+new file mode 100644
+index 0000000..5dfbd8b
+--- /dev/null
++++ b/arch/sh/lib64/c-checksum.c
+@@ -0,0 +1,214 @@
 +/*
-+ * Note that 'init' is a special process: it doesn't get signals it doesn't
-+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
-+ * mistake.
++ * arch/sh/lib64/c-checksum.c
 + *
-+ * Note that we go through the signals twice: once to check the signals that
-+ * the kernel can handle, and then we build all the user-level signal handling
-+ * stack-frames in one go after that.
++ * This file contains network checksum routines that are better done
++ * in an architecture-specific manner due to speed..
 + */
-+int do_signal(struct pt_regs *regs, sigset_t *oldset)
-+{
-+	siginfo_t info;
-+	int signr;
-+	struct k_sigaction ka;
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <asm/byteorder.h>
++#include <asm/uaccess.h>
 +
-+	/*
-+	 * We want the common case to go fast, which
-+	 * is why we may in certain cases get here from
-+	 * kernel mode. Just return without doing anything
-+	 * if so.
-+	 */
-+	if (!user_mode(regs))
-+		return 1;
++static inline unsigned short from64to16(unsigned long long x)
++{
++	/* add up 32-bit words for 33 bits */
++	x = (x & 0xffffffff) + (x >> 32);
++	/* add up 16-bit and 17-bit words for 17+c bits */
++	x = (x & 0xffff) + (x >> 16);
++	/* add up 16-bit and 2-bit for 16+c bit */
++	x = (x & 0xffff) + (x >> 16);
++	/* add up carry.. */
++	x = (x & 0xffff) + (x >> 16);
++	return x;
++}
 +
-+	if (try_to_freeze())
-+		goto no_signal;
++static inline unsigned short foldto16(unsigned long x)
++{
++	/* add up 16-bit for 17 bits */
++	x = (x & 0xffff) + (x >> 16);
++	/* add up carry.. */
++	x = (x & 0xffff) + (x >> 16);
++	return x;
++}
 +
-+	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-+		oldset = &current->saved_sigmask;
-+	else if (!oldset)
-+		oldset = &current->blocked;
++static inline unsigned short myfoldto16(unsigned long long x)
++{
++	/* Fold down to 32-bits so we don't loose in the typedef-less
++	   network stack.  */
++	/* 64 to 33 */
++	x = (x & 0xffffffff) + (x >> 32);
++	/* 33 to 32 */
++	x = (x & 0xffffffff) + (x >> 32);
 +
-+	signr = get_signal_to_deliver(&info, &ka, regs, 0);
++	/* add up 16-bit for 17 bits */
++	x = (x & 0xffff) + (x >> 16);
++	/* add up carry.. */
++	x = (x & 0xffff) + (x >> 16);
++	return x;
++}
 +
-+	if (signr > 0) {
-+		/* Whee!  Actually deliver the signal.  */
-+		handle_signal(signr, &info, &ka, oldset, regs);
++#define odd(x) ((x)&1)
++#define U16(x) ntohs(x)
 +
-+		/*
-+		 * If a signal was successfully delivered, the saved sigmask
-+		 * is in its frame, and we can clear the TIF_RESTORE_SIGMASK
-+		 * flag.
-+		 */
-+		if (test_thread_flag(TIF_RESTORE_SIGMASK))
-+			clear_thread_flag(TIF_RESTORE_SIGMASK);
++static unsigned long do_csum(const unsigned char *buff, int len)
++{
++	int odd, count;
++	unsigned long result = 0;
 +
-+		return 1;
++	pr_debug("do_csum buff %p, len %d (0x%x)\n", buff, len, len);
++#ifdef DEBUG
++	for (i = 0; i < len; i++) {
++		if ((i % 26) == 0)
++			printk("\n");
++		printk("%02X ", buff[i]);
 +	}
++#endif
 +
-+no_signal:
-+	/* Did we come from a system call? */
-+	if (regs->syscall_nr >= 0) {
-+		/* Restart the system call - no handlers present */
-+		switch (regs->regs[REG_RET]) {
-+		case -ERESTARTNOHAND:
-+		case -ERESTARTSYS:
-+		case -ERESTARTNOINTR:
-+			/* Decode Syscall # */
-+			regs->regs[REG_RET] = regs->syscall_nr;
-+			regs->pc -= 4;
-+			break;
++	if (len <= 0)
++		goto out;
 +
-+		case -ERESTART_RESTARTBLOCK:
-+			regs->regs[REG_RET] = __NR_restart_syscall;
-+			regs->pc -= 4;
-+			break;
++	odd = 1 & (unsigned long) buff;
++	if (odd) {
++		result = *buff << 8;
++		len--;
++		buff++;
++	}
++	count = len >> 1;	/* nr of 16-bit words.. */
++	if (count) {
++		if (2 & (unsigned long) buff) {
++			result += *(unsigned short *) buff;
++			count--;
++			len -= 2;
++			buff += 2;
++		}
++		count >>= 1;	/* nr of 32-bit words.. */
++		if (count) {
++			unsigned long carry = 0;
++			do {
++				unsigned long w = *(unsigned long *) buff;
++				buff += 4;
++				count--;
++				result += carry;
++				result += w;
++				carry = (w > result);
++			} while (count);
++			result += carry;
++			result = (result & 0xffff) + (result >> 16);
++		}
++		if (len & 2) {
++			result += *(unsigned short *) buff;
++			buff += 2;
 +		}
 +	}
++	if (len & 1)
++		result += *buff;
++	result = foldto16(result);
++	if (odd)
++		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
 +
-+	/* No signal to deliver -- put the saved sigmask back */
-+	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-+		clear_thread_flag(TIF_RESTORE_SIGMASK);
-+		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-+	}
++	pr_debug("\nCHECKSUM is 0x%lx\n", result);
 +
-+	return 0;
++      out:
++	return result;
 +}
-diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
-index d545a68..59cd285 100644
---- a/arch/sh/kernel/sys_sh.c
-+++ b/arch/sh/kernel/sys_sh.c
-@@ -7,7 +7,6 @@
-  *
-  * Taken from i386 version.
-  */
--
- #include <linux/errno.h>
- #include <linux/sched.h>
- #include <linux/mm.h>
-@@ -27,28 +26,7 @@
- #include <asm/uaccess.h>
- #include <asm/unistd.h>
- 
--/*
-- * sys_pipe() is the normal C calling standard for creating
-- * a pipe. It's not the way Unix traditionally does this, though.
-- */
--asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
--	unsigned long r6, unsigned long r7,
--	struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	int fd[2];
--	int error;
--
--	error = do_pipe(fd);
--	if (!error) {
--		regs->regs[1] = fd[1];
--		return fd[0];
--	}
--	return error;
--}
--
- unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
--
- EXPORT_SYMBOL(shm_align_mask);
- 
- #ifdef CONFIG_MMU
-@@ -140,7 +118,7 @@ full_search:
- #endif /* CONFIG_MMU */
- 
- static inline long
--do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 
-+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- 	 unsigned long flags, int fd, unsigned long pgoff)
- {
- 	int error = -EBADF;
-@@ -195,12 +173,13 @@ asmlinkage int sys_ipc(uint call, int first, int second,
- 	if (call <= SEMCTL)
- 		switch (call) {
- 		case SEMOP:
--			return sys_semtimedop(first, (struct sembuf __user *)ptr,
-+			return sys_semtimedop(first,
-+					      (struct sembuf __user *)ptr,
- 					      second, NULL);
- 		case SEMTIMEDOP:
--			return sys_semtimedop(first, (struct sembuf __user *)ptr,
--					      second,
--					      (const struct timespec __user *)fifth);
-+			return sys_semtimedop(first,
-+				(struct sembuf __user *)ptr, second,
-+			        (const struct timespec __user *)fifth);
- 		case SEMGET:
- 			return sys_semget (first, second, third);
- 		case SEMCTL: {
-@@ -215,25 +194,28 @@ asmlinkage int sys_ipc(uint call, int first, int second,
- 			return -EINVAL;
- 		}
- 
--	if (call <= MSGCTL) 
-+	if (call <= MSGCTL)
- 		switch (call) {
- 		case MSGSND:
--			return sys_msgsnd (first, (struct msgbuf __user *) ptr, 
-+			return sys_msgsnd (first, (struct msgbuf __user *) ptr,
- 					  second, third);
- 		case MSGRCV:
- 			switch (version) {
--			case 0: {
-+			case 0:
-+			{
- 				struct ipc_kludge tmp;
 +
- 				if (!ptr)
- 					return -EINVAL;
--				
++/* computes the checksum of a memory block at buff, length len,
++   and adds in "sum" (32-bit)  */
++__wsum csum_partial(const void *buff, int len, __wsum sum)
++{
++	unsigned long long result = do_csum(buff, len);
 +
- 				if (copy_from_user(&tmp,
--						   (struct ipc_kludge __user *) ptr, 
-+					(struct ipc_kludge __user *) ptr,
- 						   sizeof (tmp)))
- 					return -EFAULT;
++	/* add in old sum, and carry.. */
++	result += (__force u32)sum;
++	/* 32+c bits -> 32 bits */
++	result = (result & 0xffffffff) + (result >> 32);
 +
- 				return sys_msgrcv (first, tmp.msgp, second,
- 						   tmp.msgtyp, third);
--				}
-+			}
- 			default:
- 				return sys_msgrcv (first,
- 						   (struct msgbuf __user *) ptr,
-@@ -247,7 +229,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
- 		default:
- 			return -EINVAL;
- 		}
--	if (call <= SHMCTL) 
-+	if (call <= SHMCTL)
- 		switch (call) {
- 		case SHMAT:
- 			switch (version) {
-@@ -265,7 +247,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
- 				return do_shmat (first, (char __user *) ptr,
- 						  second, (ulong *) third);
- 			}
--		case SHMDT: 
-+		case SHMDT:
- 			return sys_shmdt ((char __user *)ptr);
- 		case SHMGET:
- 			return sys_shmget (first, second, third);
-@@ -275,7 +257,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
- 		default:
- 			return -EINVAL;
- 		}
--	
++	pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n",
++		buff, len, sum, result);
 +
- 	return -EINVAL;
- }
- 
-@@ -289,49 +271,3 @@ asmlinkage int sys_uname(struct old_utsname * name)
- 	up_read(&uts_sem);
- 	return err?-EFAULT:0;
- }
--
--asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
--			     size_t count, long dummy, loff_t pos)
--{
--	return sys_pread64(fd, buf, count, pos);
--}
--
--asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
--			      size_t count, long dummy, loff_t pos)
--{
--	return sys_pwrite64(fd, buf, count, pos);
--}
--
--asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
--				u32 len0, u32 len1, int advice)
--{
--#ifdef  __LITTLE_ENDIAN__
--	return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
--				(u64)len1 << 32 | len0,	advice);
--#else
--	return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
--				(u64)len0 << 32 | len1,	advice);
--#endif
--}
--
--#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
--#define SYSCALL_ARG3	"trapa #0x23"
--#else
--#define SYSCALL_ARG3	"trapa #0x13"
--#endif
--
--/*
-- * Do a system call from kernel instead of calling sys_execve so we
-- * end up with proper pt_regs.
-- */
--int kernel_execve(const char *filename, char *const argv[], char *const envp[])
--{
--	register long __sc0 __asm__ ("r3") = __NR_execve;
--	register long __sc4 __asm__ ("r4") = (long) filename;
--	register long __sc5 __asm__ ("r5") = (long) argv;
--	register long __sc6 __asm__ ("r6") = (long) envp;
--	__asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)	
--			: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
--			: "memory");
--	return __sc0;
--}
-diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
-new file mode 100644
-index 0000000..125e493
---- /dev/null
-+++ b/arch/sh/kernel/sys_sh32.c
-@@ -0,0 +1,84 @@
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/sem.h>
-+#include <linux/msg.h>
-+#include <linux/shm.h>
-+#include <linux/stat.h>
-+#include <linux/syscalls.h>
-+#include <linux/mman.h>
-+#include <linux/file.h>
-+#include <linux/utsname.h>
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/ipc.h>
-+#include <asm/cacheflush.h>
-+#include <asm/uaccess.h>
-+#include <asm/unistd.h>
++	return (__force __wsum)result;
++}
 +
-+/*
-+ * sys_pipe() is the normal C calling standard for creating
-+ * a pipe. It's not the way Unix traditionally does this, though.
-+ */
-+asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
-+	unsigned long r6, unsigned long r7,
-+	struct pt_regs __regs)
++/* Copy while checksumming, otherwise like csum_partial.  */
++__wsum
++csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 +{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	int fd[2];
-+	int error;
++	sum = csum_partial(src, len, sum);
++	memcpy(dst, src, len);
 +
-+	error = do_pipe(fd);
-+	if (!error) {
-+		regs->regs[1] = fd[1];
-+		return fd[0];
-+	}
-+	return error;
++	return sum;
 +}
 +
-+asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
-+			     size_t count, long dummy, loff_t pos)
++/* Copy from userspace and compute checksum.  If we catch an exception
++   then zero the rest of the buffer.  */
++__wsum
++csum_partial_copy_from_user(const void __user *src, void *dst, int len,
++			    __wsum sum, int *err_ptr)
 +{
-+	return sys_pread64(fd, buf, count, pos);
-+}
++	int missing;
 +
-+asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
-+			      size_t count, long dummy, loff_t pos)
-+{
-+	return sys_pwrite64(fd, buf, count, pos);
++	pr_debug
++	    ("csum_partial_copy_from_user src %p, dest %p, len %d, sum %08x, err_ptr %p\n",
++	     src, dst, len, sum, err_ptr);
++	missing = copy_from_user(dst, src, len);
++	pr_debug("  access_ok %d\n", __access_ok((unsigned long) src, len));
++	pr_debug("  missing %d\n", missing);
++	if (missing) {
++		memset(dst + len - missing, 0, missing);
++		*err_ptr = -EFAULT;
++	}
++
++	return csum_partial(dst, len, sum);
 +}
 +
-+asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
-+				u32 len0, u32 len1, int advice)
++/* Copy to userspace and compute checksum.  */
++__wsum
++csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len,
++			  __wsum sum, int *err_ptr)
 +{
-+#ifdef  __LITTLE_ENDIAN__
-+	return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
-+				(u64)len1 << 32 | len0,	advice);
-+#else
-+	return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
-+				(u64)len0 << 32 | len1,	advice);
-+#endif
-+}
++	sum = csum_partial(src, len, sum);
 +
-+#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
-+#define SYSCALL_ARG3	"trapa #0x23"
-+#else
-+#define SYSCALL_ARG3	"trapa #0x13"
-+#endif
++	if (copy_to_user(dst, src, len))
++		*err_ptr = -EFAULT;
 +
-+/*
-+ * Do a system call from kernel instead of calling sys_execve so we
-+ * end up with proper pt_regs.
-+ */
-+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-+{
-+	register long __sc0 __asm__ ("r3") = __NR_execve;
-+	register long __sc4 __asm__ ("r4") = (long) filename;
-+	register long __sc5 __asm__ ("r5") = (long) argv;
-+	register long __sc6 __asm__ ("r6") = (long) envp;
-+	__asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
-+			: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
-+			: "memory");
-+	return __sc0;
++	return sum;
 +}
-diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
-new file mode 100644
-index 0000000..578004d
---- /dev/null
-+++ b/arch/sh/kernel/sys_sh64.c
-@@ -0,0 +1,66 @@
-+/*
-+ * arch/sh/kernel/sys_sh64.c
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ *
-+ * This file contains various random system calls that
-+ * have a non-standard calling sequence on the Linux/SH5
-+ * platform.
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/errno.h>
-+#include <linux/rwsem.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/fs.h>
-+#include <linux/smp.h>
-+#include <linux/sem.h>
-+#include <linux/msg.h>
-+#include <linux/shm.h>
-+#include <linux/stat.h>
-+#include <linux/mman.h>
-+#include <linux/file.h>
-+#include <linux/utsname.h>
-+#include <linux/syscalls.h>
-+#include <linux/ipc.h>
-+#include <asm/uaccess.h>
-+#include <asm/ptrace.h>
-+#include <asm/unistd.h>
 +
 +/*
-+ * sys_pipe() is the normal C calling standard for creating
-+ * a pipe. It's not the way Unix traditionally does this, though.
++ *	This is a version of ip_compute_csum() optimized for IP headers,
++ *	which always checksum on 4 octet boundaries.
 + */
-+asmlinkage int sys_pipe(unsigned long * fildes)
++__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 +{
-+        int fd[2];
-+        int error;
++	pr_debug("ip_fast_csum %p,%d\n", iph, ihl);
 +
-+        error = do_pipe(fd);
-+        if (!error) {
-+                if (copy_to_user(fildes, fd, 2*sizeof(int)))
-+                        error = -EFAULT;
-+        }
-+        return error;
++	return (__force __sum16)~do_csum(iph, ihl * 4);
 +}
 +
-+/*
-+ * Do a system call from kernel instead of calling sys_execve so we
-+ * end up with proper pt_regs.
-+ */
-+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
++__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
++				unsigned short len,
++				unsigned short proto, __wsum sum)
 +{
-+	register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
-+	register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
-+	register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
-+	register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
-+	__asm__ __volatile__ ("trapa	%1 !\t\t\t execve(%2,%3,%4)"
-+	: "=r" (__sc0)
-+	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
-+	__asm__ __volatile__ ("!dummy	%0 %1 %2 %3"
-+	: : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
-+	return __sc0;
++	unsigned long long result;
++
++	pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead));
++	pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead));
++
++	result = (__force u64) saddr + (__force u64) daddr +
++		 (__force u64) sum + ((len + proto) << 8);
++
++	/* Fold down to 32-bits so we don't loose in the typedef-less
++	   network stack.  */
++	/* 64 to 33 */
++	result = (result & 0xffffffff) + (result >> 32);
++	/* 33 to 32 */
++	result = (result & 0xffffffff) + (result >> 32);
++
++	pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n",
++		__FUNCTION__, saddr, daddr, len, proto, sum, result);
++
++	return (__wsum)result;
 +}
-diff --git a/arch/sh/kernel/syscalls.S b/arch/sh/kernel/syscalls.S
-deleted file mode 100644
-index 10bec45..0000000
---- a/arch/sh/kernel/syscalls.S
-+++ /dev/null
-@@ -1,343 +0,0 @@
--/*
-- * arch/sh/kernel/syscalls.S
-- *
-- * System call table for SuperH
-- *
-- *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
-- *  Copyright (C) 2003  Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- */
--#include <linux/sys.h>
--#include <linux/linkage.h>
--
--	.data
--ENTRY(sys_call_table)
--	.long sys_restart_syscall	/* 0  -  old "setup()" system call*/
--	.long sys_exit
--	.long sys_fork
--	.long sys_read
--	.long sys_write
--	.long sys_open		/* 5 */
--	.long sys_close
--	.long sys_waitpid
--	.long sys_creat
--	.long sys_link
--	.long sys_unlink		/* 10 */
--	.long sys_execve
--	.long sys_chdir
--	.long sys_time
--	.long sys_mknod
--	.long sys_chmod		/* 15 */
--	.long sys_lchown16
--	.long sys_ni_syscall	/* old break syscall holder */
--	.long sys_stat
--	.long sys_lseek
--	.long sys_getpid		/* 20 */
--	.long sys_mount
--	.long sys_oldumount
--	.long sys_setuid16
--	.long sys_getuid16
--	.long sys_stime		/* 25 */
--	.long sys_ptrace
--	.long sys_alarm
--	.long sys_fstat
--	.long sys_pause
--	.long sys_utime		/* 30 */
--	.long sys_ni_syscall	/* old stty syscall holder */
--	.long sys_ni_syscall	/* old gtty syscall holder */
--	.long sys_access
--	.long sys_nice
--	.long sys_ni_syscall	/* 35 */		/* old ftime syscall holder */
--	.long sys_sync
--	.long sys_kill
--	.long sys_rename
--	.long sys_mkdir
--	.long sys_rmdir		/* 40 */
--	.long sys_dup
--	.long sys_pipe
--	.long sys_times
--	.long sys_ni_syscall	/* old prof syscall holder */
--	.long sys_brk		/* 45 */
--	.long sys_setgid16
--	.long sys_getgid16
--	.long sys_signal
--	.long sys_geteuid16
--	.long sys_getegid16	/* 50 */
--	.long sys_acct
--	.long sys_umount		/* recycled never used phys() */
--	.long sys_ni_syscall	/* old lock syscall holder */
--	.long sys_ioctl
--	.long sys_fcntl		/* 55 */
--	.long sys_ni_syscall	/* old mpx syscall holder */
--	.long sys_setpgid
--	.long sys_ni_syscall	/* old ulimit syscall holder */
--	.long sys_ni_syscall	/* sys_olduname */
--	.long sys_umask		/* 60 */
--	.long sys_chroot
--	.long sys_ustat
--	.long sys_dup2
--	.long sys_getppid
--	.long sys_getpgrp		/* 65 */
--	.long sys_setsid
--	.long sys_sigaction
--	.long sys_sgetmask
--	.long sys_ssetmask
--	.long sys_setreuid16	/* 70 */
--	.long sys_setregid16
--	.long sys_sigsuspend
--	.long sys_sigpending
--	.long sys_sethostname
--	.long sys_setrlimit	/* 75 */
--	.long sys_old_getrlimit
--	.long sys_getrusage
--	.long sys_gettimeofday
--	.long sys_settimeofday
--	.long sys_getgroups16	/* 80 */
--	.long sys_setgroups16
--	.long sys_ni_syscall	/* sys_oldselect */
--	.long sys_symlink
--	.long sys_lstat
--	.long sys_readlink		/* 85 */
--	.long sys_uselib
--	.long sys_swapon
--	.long sys_reboot
--	.long old_readdir
--	.long old_mmap		/* 90 */
--	.long sys_munmap
--	.long sys_truncate
--	.long sys_ftruncate
--	.long sys_fchmod
--	.long sys_fchown16		/* 95 */
--	.long sys_getpriority
--	.long sys_setpriority
--	.long sys_ni_syscall	/* old profil syscall holder */
--	.long sys_statfs
--	.long sys_fstatfs		/* 100 */
--	.long sys_ni_syscall	/* ioperm */
--	.long sys_socketcall
--	.long sys_syslog
--	.long sys_setitimer
--	.long sys_getitimer	/* 105 */
--	.long sys_newstat
--	.long sys_newlstat
--	.long sys_newfstat
--	.long sys_uname
--	.long sys_ni_syscall	/* 110 */ /* iopl */
--	.long sys_vhangup
--	.long sys_ni_syscall	/* idle */
--	.long sys_ni_syscall	/* vm86old */
--	.long sys_wait4
--	.long sys_swapoff		/* 115 */
--	.long sys_sysinfo
--	.long sys_ipc
--	.long sys_fsync
--	.long sys_sigreturn
--	.long sys_clone		/* 120 */
--	.long sys_setdomainname
--	.long sys_newuname
--	.long sys_ni_syscall	/* sys_modify_ldt */
--	.long sys_adjtimex
--	.long sys_mprotect		/* 125 */
--	.long sys_sigprocmask
--	.long sys_ni_syscall	/* old "create_module" */
--	.long sys_init_module
--	.long sys_delete_module
--	.long sys_ni_syscall	/* 130: old "get_kernel_syms" */
--	.long sys_quotactl
--	.long sys_getpgid
--	.long sys_fchdir
--	.long sys_bdflush
--	.long sys_sysfs		/* 135 */
--	.long sys_personality
--	.long sys_ni_syscall	/* for afs_syscall */
--	.long sys_setfsuid16
--	.long sys_setfsgid16
--	.long sys_llseek		/* 140 */
--	.long sys_getdents
--	.long sys_select
--	.long sys_flock
--	.long sys_msync
--	.long sys_readv		/* 145 */
--	.long sys_writev
--	.long sys_getsid
--	.long sys_fdatasync
--	.long sys_sysctl
--	.long sys_mlock		/* 150 */
--	.long sys_munlock
--	.long sys_mlockall
--	.long sys_munlockall
--	.long sys_sched_setparam
--	.long sys_sched_getparam   /* 155 */
--	.long sys_sched_setscheduler
--	.long sys_sched_getscheduler
--	.long sys_sched_yield
--	.long sys_sched_get_priority_max
--	.long sys_sched_get_priority_min  /* 160 */
--	.long sys_sched_rr_get_interval
--	.long sys_nanosleep
--	.long sys_mremap
--	.long sys_setresuid16
--	.long sys_getresuid16	/* 165 */
--	.long sys_ni_syscall	/* vm86 */
--	.long sys_ni_syscall	/* old "query_module" */
--	.long sys_poll
--	.long sys_nfsservctl
--	.long sys_setresgid16	/* 170 */
--	.long sys_getresgid16
--	.long sys_prctl
--	.long sys_rt_sigreturn
--	.long sys_rt_sigaction
--	.long sys_rt_sigprocmask	/* 175 */
--	.long sys_rt_sigpending
--	.long sys_rt_sigtimedwait
--	.long sys_rt_sigqueueinfo
--	.long sys_rt_sigsuspend
--	.long sys_pread_wrapper	   /* 180 */
--	.long sys_pwrite_wrapper
--	.long sys_chown16
--	.long sys_getcwd
--	.long sys_capget
--	.long sys_capset           /* 185 */
--	.long sys_sigaltstack
--	.long sys_sendfile
--	.long sys_ni_syscall	/* streams1 */
--	.long sys_ni_syscall	/* streams2 */
--	.long sys_vfork            /* 190 */
--	.long sys_getrlimit
--	.long sys_mmap2
--	.long sys_truncate64
--	.long sys_ftruncate64
--	.long sys_stat64		/* 195 */
--	.long sys_lstat64
--	.long sys_fstat64
--	.long sys_lchown
--	.long sys_getuid
--	.long sys_getgid		/* 200 */
--	.long sys_geteuid
--	.long sys_getegid
--	.long sys_setreuid
--	.long sys_setregid
--	.long sys_getgroups	/* 205 */
--	.long sys_setgroups
--	.long sys_fchown
--	.long sys_setresuid
--	.long sys_getresuid
--	.long sys_setresgid	/* 210 */
--	.long sys_getresgid
--	.long sys_chown
--	.long sys_setuid
--	.long sys_setgid
--	.long sys_setfsuid		/* 215 */
--	.long sys_setfsgid
--	.long sys_pivot_root
--	.long sys_mincore
--	.long sys_madvise
--	.long sys_getdents64	/* 220 */
--	.long sys_fcntl64
--	.long sys_ni_syscall	/* reserved for TUX */
--	.long sys_ni_syscall	/* Reserved for Security */
--	.long sys_gettid
--	.long sys_readahead	/* 225 */
--	.long sys_setxattr
--	.long sys_lsetxattr
--	.long sys_fsetxattr
--	.long sys_getxattr
--	.long sys_lgetxattr	/* 230 */
--	.long sys_fgetxattr
--	.long sys_listxattr
--	.long sys_llistxattr
--	.long sys_flistxattr
--	.long sys_removexattr	/* 235 */
--	.long sys_lremovexattr
--	.long sys_fremovexattr
--	.long sys_tkill
--	.long sys_sendfile64
--	.long sys_futex		/* 240 */
--	.long sys_sched_setaffinity
--	.long sys_sched_getaffinity
--	.long sys_ni_syscall
--	.long sys_ni_syscall
--	.long sys_io_setup	/* 245 */
--	.long sys_io_destroy
--	.long sys_io_getevents
--	.long sys_io_submit
--	.long sys_io_cancel
--	.long sys_fadvise64	/* 250 */
--	.long sys_ni_syscall
--	.long sys_exit_group
--	.long sys_lookup_dcookie
--	.long sys_epoll_create
--	.long sys_epoll_ctl	/* 255 */
--	.long sys_epoll_wait
-- 	.long sys_remap_file_pages
-- 	.long sys_set_tid_address
-- 	.long sys_timer_create
-- 	.long sys_timer_settime		/* 260 */
-- 	.long sys_timer_gettime
-- 	.long sys_timer_getoverrun
-- 	.long sys_timer_delete
-- 	.long sys_clock_settime
-- 	.long sys_clock_gettime		/* 265 */
-- 	.long sys_clock_getres
-- 	.long sys_clock_nanosleep
--	.long sys_statfs64
--	.long sys_fstatfs64
--	.long sys_tgkill		/* 270 */
--	.long sys_utimes
-- 	.long sys_fadvise64_64_wrapper
--	.long sys_ni_syscall	/* Reserved for vserver */
--	.long sys_mbind
--	.long sys_get_mempolicy		/* 275 */
--	.long sys_set_mempolicy
--	.long sys_mq_open
--	.long sys_mq_unlink
--	.long sys_mq_timedsend
--	.long sys_mq_timedreceive       /* 280 */
--	.long sys_mq_notify
--	.long sys_mq_getsetattr
--	.long sys_kexec_load
--	.long sys_waitid
--	.long sys_add_key		/* 285 */
--	.long sys_request_key
--	.long sys_keyctl
--	.long sys_ioprio_set
--	.long sys_ioprio_get
--	.long sys_inotify_init		/* 290 */
--	.long sys_inotify_add_watch
--	.long sys_inotify_rm_watch
--	.long sys_ni_syscall
--	.long sys_migrate_pages
--	.long sys_openat		/* 295 */
--	.long sys_mkdirat
--	.long sys_mknodat
--	.long sys_fchownat
--	.long sys_futimesat
--	.long sys_fstatat64		/* 300 */
--	.long sys_unlinkat
--	.long sys_renameat
--	.long sys_linkat
--	.long sys_symlinkat
--	.long sys_readlinkat		/* 305 */
--	.long sys_fchmodat
--	.long sys_faccessat
--	.long sys_pselect6
--	.long sys_ppoll
--	.long sys_unshare		/* 310 */
--	.long sys_set_robust_list
--	.long sys_get_robust_list
--	.long sys_splice
--	.long sys_sync_file_range
--	.long sys_tee			/* 315 */
--	.long sys_vmsplice
--	.long sys_move_pages
--	.long sys_getcpu
--	.long sys_epoll_pwait
--	.long sys_utimensat		/* 320 */
--	.long sys_signalfd
--	.long sys_timerfd
--	.long sys_eventfd
--	.long sys_fallocate
-diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
++EXPORT_SYMBOL(csum_tcpudp_nofold);
+diff --git a/arch/sh/lib64/clear_page.S b/arch/sh/lib64/clear_page.S
 new file mode 100644
-index 0000000..10bec45
+index 0000000..007ab48
 --- /dev/null
-+++ b/arch/sh/kernel/syscalls_32.S
-@@ -0,0 +1,343 @@
++++ b/arch/sh/lib64/clear_page.S
+@@ -0,0 +1,54 @@
 +/*
-+ * arch/sh/kernel/syscalls.S
-+ *
-+ * System call table for SuperH
-+ *
-+ *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
-+ *  Copyright (C) 2003  Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ */
-+#include <linux/sys.h>
-+#include <linux/linkage.h>
++   Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
 +
-+	.data
-+ENTRY(sys_call_table)
-+	.long sys_restart_syscall	/* 0  -  old "setup()" system call*/
-+	.long sys_exit
-+	.long sys_fork
-+	.long sys_read
-+	.long sys_write
-+	.long sys_open		/* 5 */
-+	.long sys_close
-+	.long sys_waitpid
-+	.long sys_creat
-+	.long sys_link
-+	.long sys_unlink		/* 10 */
-+	.long sys_execve
-+	.long sys_chdir
-+	.long sys_time
-+	.long sys_mknod
-+	.long sys_chmod		/* 15 */
-+	.long sys_lchown16
-+	.long sys_ni_syscall	/* old break syscall holder */
-+	.long sys_stat
-+	.long sys_lseek
-+	.long sys_getpid		/* 20 */
-+	.long sys_mount
-+	.long sys_oldumount
-+	.long sys_setuid16
-+	.long sys_getuid16
-+	.long sys_stime		/* 25 */
-+	.long sys_ptrace
-+	.long sys_alarm
-+	.long sys_fstat
-+	.long sys_pause
-+	.long sys_utime		/* 30 */
-+	.long sys_ni_syscall	/* old stty syscall holder */
-+	.long sys_ni_syscall	/* old gtty syscall holder */
-+	.long sys_access
-+	.long sys_nice
-+	.long sys_ni_syscall	/* 35 */		/* old ftime syscall holder */
-+	.long sys_sync
-+	.long sys_kill
-+	.long sys_rename
-+	.long sys_mkdir
-+	.long sys_rmdir		/* 40 */
-+	.long sys_dup
-+	.long sys_pipe
-+	.long sys_times
-+	.long sys_ni_syscall	/* old prof syscall holder */
-+	.long sys_brk		/* 45 */
-+	.long sys_setgid16
-+	.long sys_getgid16
-+	.long sys_signal
-+	.long sys_geteuid16
-+	.long sys_getegid16	/* 50 */
-+	.long sys_acct
-+	.long sys_umount		/* recycled never used phys() */
-+	.long sys_ni_syscall	/* old lock syscall holder */
-+	.long sys_ioctl
-+	.long sys_fcntl		/* 55 */
-+	.long sys_ni_syscall	/* old mpx syscall holder */
-+	.long sys_setpgid
-+	.long sys_ni_syscall	/* old ulimit syscall holder */
-+	.long sys_ni_syscall	/* sys_olduname */
-+	.long sys_umask		/* 60 */
-+	.long sys_chroot
-+	.long sys_ustat
-+	.long sys_dup2
-+	.long sys_getppid
-+	.long sys_getpgrp		/* 65 */
-+	.long sys_setsid
-+	.long sys_sigaction
-+	.long sys_sgetmask
-+	.long sys_ssetmask
-+	.long sys_setreuid16	/* 70 */
-+	.long sys_setregid16
-+	.long sys_sigsuspend
-+	.long sys_sigpending
-+	.long sys_sethostname
-+	.long sys_setrlimit	/* 75 */
-+	.long sys_old_getrlimit
-+	.long sys_getrusage
-+	.long sys_gettimeofday
-+	.long sys_settimeofday
-+	.long sys_getgroups16	/* 80 */
-+	.long sys_setgroups16
-+	.long sys_ni_syscall	/* sys_oldselect */
-+	.long sys_symlink
-+	.long sys_lstat
-+	.long sys_readlink		/* 85 */
-+	.long sys_uselib
-+	.long sys_swapon
-+	.long sys_reboot
-+	.long old_readdir
-+	.long old_mmap		/* 90 */
-+	.long sys_munmap
-+	.long sys_truncate
-+	.long sys_ftruncate
-+	.long sys_fchmod
-+	.long sys_fchown16		/* 95 */
-+	.long sys_getpriority
-+	.long sys_setpriority
-+	.long sys_ni_syscall	/* old profil syscall holder */
-+	.long sys_statfs
-+	.long sys_fstatfs		/* 100 */
-+	.long sys_ni_syscall	/* ioperm */
-+	.long sys_socketcall
-+	.long sys_syslog
-+	.long sys_setitimer
-+	.long sys_getitimer	/* 105 */
-+	.long sys_newstat
-+	.long sys_newlstat
-+	.long sys_newfstat
-+	.long sys_uname
-+	.long sys_ni_syscall	/* 110 */ /* iopl */
-+	.long sys_vhangup
-+	.long sys_ni_syscall	/* idle */
-+	.long sys_ni_syscall	/* vm86old */
-+	.long sys_wait4
-+	.long sys_swapoff		/* 115 */
-+	.long sys_sysinfo
-+	.long sys_ipc
-+	.long sys_fsync
-+	.long sys_sigreturn
-+	.long sys_clone		/* 120 */
-+	.long sys_setdomainname
-+	.long sys_newuname
-+	.long sys_ni_syscall	/* sys_modify_ldt */
-+	.long sys_adjtimex
-+	.long sys_mprotect		/* 125 */
-+	.long sys_sigprocmask
-+	.long sys_ni_syscall	/* old "create_module" */
-+	.long sys_init_module
-+	.long sys_delete_module
-+	.long sys_ni_syscall	/* 130: old "get_kernel_syms" */
-+	.long sys_quotactl
-+	.long sys_getpgid
-+	.long sys_fchdir
-+	.long sys_bdflush
-+	.long sys_sysfs		/* 135 */
-+	.long sys_personality
-+	.long sys_ni_syscall	/* for afs_syscall */
-+	.long sys_setfsuid16
-+	.long sys_setfsgid16
-+	.long sys_llseek		/* 140 */
-+	.long sys_getdents
-+	.long sys_select
-+	.long sys_flock
-+	.long sys_msync
-+	.long sys_readv		/* 145 */
-+	.long sys_writev
-+	.long sys_getsid
-+	.long sys_fdatasync
-+	.long sys_sysctl
-+	.long sys_mlock		/* 150 */
-+	.long sys_munlock
-+	.long sys_mlockall
-+	.long sys_munlockall
-+	.long sys_sched_setparam
-+	.long sys_sched_getparam   /* 155 */
-+	.long sys_sched_setscheduler
-+	.long sys_sched_getscheduler
-+	.long sys_sched_yield
-+	.long sys_sched_get_priority_max
-+	.long sys_sched_get_priority_min  /* 160 */
-+	.long sys_sched_rr_get_interval
-+	.long sys_nanosleep
-+	.long sys_mremap
-+	.long sys_setresuid16
-+	.long sys_getresuid16	/* 165 */
-+	.long sys_ni_syscall	/* vm86 */
-+	.long sys_ni_syscall	/* old "query_module" */
-+	.long sys_poll
-+	.long sys_nfsservctl
-+	.long sys_setresgid16	/* 170 */
-+	.long sys_getresgid16
-+	.long sys_prctl
-+	.long sys_rt_sigreturn
-+	.long sys_rt_sigaction
-+	.long sys_rt_sigprocmask	/* 175 */
-+	.long sys_rt_sigpending
-+	.long sys_rt_sigtimedwait
-+	.long sys_rt_sigqueueinfo
-+	.long sys_rt_sigsuspend
-+	.long sys_pread_wrapper	   /* 180 */
-+	.long sys_pwrite_wrapper
-+	.long sys_chown16
-+	.long sys_getcwd
-+	.long sys_capget
-+	.long sys_capset           /* 185 */
-+	.long sys_sigaltstack
-+	.long sys_sendfile
-+	.long sys_ni_syscall	/* streams1 */
-+	.long sys_ni_syscall	/* streams2 */
-+	.long sys_vfork            /* 190 */
-+	.long sys_getrlimit
-+	.long sys_mmap2
-+	.long sys_truncate64
-+	.long sys_ftruncate64
-+	.long sys_stat64		/* 195 */
-+	.long sys_lstat64
-+	.long sys_fstat64
-+	.long sys_lchown
-+	.long sys_getuid
-+	.long sys_getgid		/* 200 */
-+	.long sys_geteuid
-+	.long sys_getegid
-+	.long sys_setreuid
-+	.long sys_setregid
-+	.long sys_getgroups	/* 205 */
-+	.long sys_setgroups
-+	.long sys_fchown
-+	.long sys_setresuid
-+	.long sys_getresuid
-+	.long sys_setresgid	/* 210 */
-+	.long sys_getresgid
-+	.long sys_chown
-+	.long sys_setuid
-+	.long sys_setgid
-+	.long sys_setfsuid		/* 215 */
-+	.long sys_setfsgid
-+	.long sys_pivot_root
-+	.long sys_mincore
-+	.long sys_madvise
-+	.long sys_getdents64	/* 220 */
-+	.long sys_fcntl64
-+	.long sys_ni_syscall	/* reserved for TUX */
-+	.long sys_ni_syscall	/* Reserved for Security */
-+	.long sys_gettid
-+	.long sys_readahead	/* 225 */
-+	.long sys_setxattr
-+	.long sys_lsetxattr
-+	.long sys_fsetxattr
-+	.long sys_getxattr
-+	.long sys_lgetxattr	/* 230 */
-+	.long sys_fgetxattr
-+	.long sys_listxattr
-+	.long sys_llistxattr
-+	.long sys_flistxattr
-+	.long sys_removexattr	/* 235 */
-+	.long sys_lremovexattr
-+	.long sys_fremovexattr
-+	.long sys_tkill
-+	.long sys_sendfile64
-+	.long sys_futex		/* 240 */
-+	.long sys_sched_setaffinity
-+	.long sys_sched_getaffinity
-+	.long sys_ni_syscall
-+	.long sys_ni_syscall
-+	.long sys_io_setup	/* 245 */
-+	.long sys_io_destroy
-+	.long sys_io_getevents
-+	.long sys_io_submit
-+	.long sys_io_cancel
-+	.long sys_fadvise64	/* 250 */
-+	.long sys_ni_syscall
-+	.long sys_exit_group
-+	.long sys_lookup_dcookie
-+	.long sys_epoll_create
-+	.long sys_epoll_ctl	/* 255 */
-+	.long sys_epoll_wait
-+ 	.long sys_remap_file_pages
-+ 	.long sys_set_tid_address
-+ 	.long sys_timer_create
-+ 	.long sys_timer_settime		/* 260 */
-+ 	.long sys_timer_gettime
-+ 	.long sys_timer_getoverrun
-+ 	.long sys_timer_delete
-+ 	.long sys_clock_settime
-+ 	.long sys_clock_gettime		/* 265 */
-+ 	.long sys_clock_getres
-+ 	.long sys_clock_nanosleep
-+	.long sys_statfs64
-+	.long sys_fstatfs64
-+	.long sys_tgkill		/* 270 */
-+	.long sys_utimes
-+ 	.long sys_fadvise64_64_wrapper
-+	.long sys_ni_syscall	/* Reserved for vserver */
-+	.long sys_mbind
-+	.long sys_get_mempolicy		/* 275 */
-+	.long sys_set_mempolicy
-+	.long sys_mq_open
-+	.long sys_mq_unlink
-+	.long sys_mq_timedsend
-+	.long sys_mq_timedreceive       /* 280 */
-+	.long sys_mq_notify
-+	.long sys_mq_getsetattr
-+	.long sys_kexec_load
-+	.long sys_waitid
-+	.long sys_add_key		/* 285 */
-+	.long sys_request_key
-+	.long sys_keyctl
-+	.long sys_ioprio_set
-+	.long sys_ioprio_get
-+	.long sys_inotify_init		/* 290 */
-+	.long sys_inotify_add_watch
-+	.long sys_inotify_rm_watch
-+	.long sys_ni_syscall
-+	.long sys_migrate_pages
-+	.long sys_openat		/* 295 */
-+	.long sys_mkdirat
-+	.long sys_mknodat
-+	.long sys_fchownat
-+	.long sys_futimesat
-+	.long sys_fstatat64		/* 300 */
-+	.long sys_unlinkat
-+	.long sys_renameat
-+	.long sys_linkat
-+	.long sys_symlinkat
-+	.long sys_readlinkat		/* 305 */
-+	.long sys_fchmodat
-+	.long sys_faccessat
-+	.long sys_pselect6
-+	.long sys_ppoll
-+	.long sys_unshare		/* 310 */
-+	.long sys_set_robust_list
-+	.long sys_get_robust_list
-+	.long sys_splice
-+	.long sys_sync_file_range
-+	.long sys_tee			/* 315 */
-+	.long sys_vmsplice
-+	.long sys_move_pages
-+	.long sys_getcpu
-+	.long sys_epoll_pwait
-+	.long sys_utimensat		/* 320 */
-+	.long sys_signalfd
-+	.long sys_timerfd
-+	.long sys_eventfd
-+	.long sys_fallocate
-diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
-new file mode 100644
-index 0000000..98a93ef
---- /dev/null
-+++ b/arch/sh/kernel/syscalls_64.S
-@@ -0,0 +1,381 @@
-+/*
-+ * arch/sh/kernel/syscalls_64.S
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2004 - 2007  Paul Mundt
-+ * Copyright (C) 2003, 2004 Richard Curnow
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
++   This file is subject to the terms and conditions of the GNU General Public
++   License.  See the file "COPYING" in the main directory of this archive
++   for more details.
 +
-+#include <linux/sys.h>
++   Tight version of memset for the case of just clearing a page.  It turns out
++   that having the alloco's spaced out slightly due to the increment/branch
++   pair causes them to contend less for access to the cache.  Similarly,
++   keeping the stores apart from the allocos causes less contention.  => Do two
++   separate loops.  Do multiple stores per loop to amortise the
++   increment/branch cost a little.
 +
-+	.section .data, "aw"
-+	.balign 32
++   Parameters:
++   r2 : source effective address (start of page)
 +
-+/*
-+ * System calls jump table
-+ */
-+	.globl  sys_call_table
-+sys_call_table:
-+	.long sys_restart_syscall	/* 0  -  old "setup()" system call  */
-+	.long sys_exit
-+	.long sys_fork
-+	.long sys_read
-+	.long sys_write
-+	.long sys_open			/* 5 */
-+	.long sys_close
-+	.long sys_waitpid
-+	.long sys_creat
-+	.long sys_link
-+	.long sys_unlink		/* 10 */
-+	.long sys_execve
-+	.long sys_chdir
-+	.long sys_time
-+	.long sys_mknod
-+	.long sys_chmod			/* 15 */
-+	.long sys_lchown16
-+	.long sys_ni_syscall	/* old break syscall holder */
-+	.long sys_stat
-+	.long sys_lseek
-+	.long sys_getpid		/* 20 */
-+	.long sys_mount
-+	.long sys_oldumount
-+	.long sys_setuid16
-+	.long sys_getuid16
-+	.long sys_stime			/* 25 */
-+	.long sh64_ptrace
-+	.long sys_alarm
-+	.long sys_fstat
-+	.long sys_pause
-+	.long sys_utime			/* 30 */
-+	.long sys_ni_syscall	/* old stty syscall holder */
-+	.long sys_ni_syscall	/* old gtty syscall holder */
-+	.long sys_access
-+	.long sys_nice
-+	.long sys_ni_syscall		/* 35 */ /* old ftime syscall holder */
-+	.long sys_sync
-+	.long sys_kill
-+	.long sys_rename
-+	.long sys_mkdir
-+	.long sys_rmdir			/* 40 */
-+	.long sys_dup
-+	.long sys_pipe
-+	.long sys_times
-+	.long sys_ni_syscall	/* old prof syscall holder */
-+	.long sys_brk			/* 45 */
-+	.long sys_setgid16
-+	.long sys_getgid16
-+	.long sys_signal
-+	.long sys_geteuid16
-+	.long sys_getegid16		/* 50 */
-+	.long sys_acct
-+	.long sys_umount		/* recycled never used phys( */
-+	.long sys_ni_syscall	/* old lock syscall holder */
-+	.long sys_ioctl
-+	.long sys_fcntl			/* 55 */
-+	.long sys_ni_syscall	/* old mpx syscall holder */
-+	.long sys_setpgid
-+	.long sys_ni_syscall	/* old ulimit syscall holder */
-+	.long sys_ni_syscall	/* sys_olduname */
-+	.long sys_umask			/* 60 */
-+	.long sys_chroot
-+	.long sys_ustat
-+	.long sys_dup2
-+	.long sys_getppid
-+	.long sys_getpgrp		/* 65 */
-+	.long sys_setsid
-+	.long sys_sigaction
-+	.long sys_sgetmask
-+	.long sys_ssetmask
-+	.long sys_setreuid16		/* 70 */
-+	.long sys_setregid16
-+	.long sys_sigsuspend
-+	.long sys_sigpending
-+	.long sys_sethostname
-+	.long sys_setrlimit		/* 75 */
-+	.long sys_old_getrlimit
-+	.long sys_getrusage
-+	.long sys_gettimeofday
-+	.long sys_settimeofday
-+	.long sys_getgroups16		/* 80 */
-+	.long sys_setgroups16
-+	.long sys_ni_syscall	/* sys_oldselect */
-+	.long sys_symlink
-+	.long sys_lstat
-+	.long sys_readlink		/* 85 */
-+	.long sys_uselib
-+	.long sys_swapon
-+	.long sys_reboot
-+	.long old_readdir
-+	.long old_mmap			/* 90 */
-+	.long sys_munmap
-+	.long sys_truncate
-+	.long sys_ftruncate
-+	.long sys_fchmod
-+	.long sys_fchown16		/* 95 */
-+	.long sys_getpriority
-+	.long sys_setpriority
-+	.long sys_ni_syscall	/* old profil syscall holder */
-+	.long sys_statfs
-+	.long sys_fstatfs		/* 100 */
-+	.long sys_ni_syscall	/* ioperm */
-+	.long sys_socketcall	/* Obsolete implementation of socket syscall */
-+	.long sys_syslog
-+	.long sys_setitimer
-+	.long sys_getitimer		/* 105 */
-+	.long sys_newstat
-+	.long sys_newlstat
-+	.long sys_newfstat
-+	.long sys_uname
-+	.long sys_ni_syscall		/* 110 */ /* iopl */
-+	.long sys_vhangup
-+	.long sys_ni_syscall	/* idle */
-+	.long sys_ni_syscall	/* vm86old */
-+	.long sys_wait4
-+	.long sys_swapoff		/* 115 */
-+	.long sys_sysinfo
-+	.long sys_ipc		/* Obsolete ipc syscall implementation */
-+	.long sys_fsync
-+	.long sys_sigreturn
-+	.long sys_clone			/* 120 */
-+	.long sys_setdomainname
-+	.long sys_newuname
-+	.long sys_ni_syscall	/* sys_modify_ldt */
-+	.long sys_adjtimex
-+	.long sys_mprotect		/* 125 */
-+	.long sys_sigprocmask
-+	.long sys_ni_syscall		/* old "create_module" */
-+	.long sys_init_module
-+	.long sys_delete_module
-+	.long sys_ni_syscall		/* 130: old "get_kernel_syms" */
-+	.long sys_quotactl
-+	.long sys_getpgid
-+	.long sys_fchdir
-+	.long sys_bdflush
-+	.long sys_sysfs			/* 135 */
-+	.long sys_personality
-+	.long sys_ni_syscall	/* for afs_syscall */
-+	.long sys_setfsuid16
-+	.long sys_setfsgid16
-+	.long sys_llseek		/* 140 */
-+	.long sys_getdents
-+	.long sys_select
-+	.long sys_flock
-+	.long sys_msync
-+	.long sys_readv			/* 145 */
-+	.long sys_writev
-+	.long sys_getsid
-+	.long sys_fdatasync
-+	.long sys_sysctl
-+	.long sys_mlock			/* 150 */
-+	.long sys_munlock
-+	.long sys_mlockall
-+	.long sys_munlockall
-+	.long sys_sched_setparam
-+	.long sys_sched_getparam	/* 155 */
-+	.long sys_sched_setscheduler
-+	.long sys_sched_getscheduler
-+	.long sys_sched_yield
-+	.long sys_sched_get_priority_max
-+	.long sys_sched_get_priority_min  /* 160 */
-+	.long sys_sched_rr_get_interval
-+	.long sys_nanosleep
-+	.long sys_mremap
-+	.long sys_setresuid16
-+	.long sys_getresuid16		/* 165 */
-+	.long sys_ni_syscall	/* vm86 */
-+	.long sys_ni_syscall	/* old "query_module" */
-+	.long sys_poll
-+	.long sys_nfsservctl
-+	.long sys_setresgid16		/* 170 */
-+	.long sys_getresgid16
-+	.long sys_prctl
-+	.long sys_rt_sigreturn
-+	.long sys_rt_sigaction
-+	.long sys_rt_sigprocmask	/* 175 */
-+	.long sys_rt_sigpending
-+	.long sys_rt_sigtimedwait
-+	.long sys_rt_sigqueueinfo
-+	.long sys_rt_sigsuspend
-+	.long sys_pread64		/* 180 */
-+	.long sys_pwrite64
-+	.long sys_chown16
-+	.long sys_getcwd
-+	.long sys_capget
-+	.long sys_capset		/* 185 */
-+	.long sys_sigaltstack
-+	.long sys_sendfile
-+	.long sys_ni_syscall	/* streams1 */
-+	.long sys_ni_syscall	/* streams2 */
-+	.long sys_vfork			/* 190 */
-+	.long sys_getrlimit
-+	.long sys_mmap2
-+	.long sys_truncate64
-+	.long sys_ftruncate64
-+	.long sys_stat64		/* 195 */
-+	.long sys_lstat64
-+	.long sys_fstat64
-+	.long sys_lchown
-+	.long sys_getuid
-+	.long sys_getgid		/* 200 */
-+	.long sys_geteuid
-+	.long sys_getegid
-+	.long sys_setreuid
-+	.long sys_setregid
-+	.long sys_getgroups		/* 205 */
-+	.long sys_setgroups
-+	.long sys_fchown
-+	.long sys_setresuid
-+	.long sys_getresuid
-+	.long sys_setresgid		/* 210 */
-+	.long sys_getresgid
-+	.long sys_chown
-+	.long sys_setuid
-+	.long sys_setgid
-+	.long sys_setfsuid		/* 215 */
-+	.long sys_setfsgid
-+	.long sys_pivot_root
-+	.long sys_mincore
-+	.long sys_madvise
-+	/* Broken-out socket family (maintain backwards compatibility in syscall
-+	   numbering with 2.4) */
-+	.long sys_socket		/* 220 */
-+	.long sys_bind
-+	.long sys_connect
-+	.long sys_listen
-+	.long sys_accept
-+	.long sys_getsockname		/* 225 */
-+	.long sys_getpeername
-+	.long sys_socketpair
-+	.long sys_send
-+	.long sys_sendto
-+	.long sys_recv			/* 230*/
-+	.long sys_recvfrom
-+	.long sys_shutdown
-+	.long sys_setsockopt
-+	.long sys_getsockopt
-+	.long sys_sendmsg		/* 235 */
-+	.long sys_recvmsg
-+	/* Broken-out IPC family (maintain backwards compatibility in syscall
-+	   numbering with 2.4) */
-+	.long sys_semop
-+	.long sys_semget
-+	.long sys_semctl
-+	.long sys_msgsnd		/* 240 */
-+	.long sys_msgrcv
-+	.long sys_msgget
-+	.long sys_msgctl
-+	.long sys_shmat
-+	.long sys_shmdt			/* 245 */
-+	.long sys_shmget
-+	.long sys_shmctl
-+	/* Rest of syscalls listed in 2.4 i386 unistd.h */
-+	.long sys_getdents64
-+	.long sys_fcntl64
-+	.long sys_ni_syscall		/* 250 reserved for TUX */
-+	.long sys_ni_syscall		/* Reserved for Security */
-+	.long sys_gettid
-+	.long sys_readahead
-+	.long sys_setxattr
-+	.long sys_lsetxattr		/* 255 */
-+	.long sys_fsetxattr
-+	.long sys_getxattr
-+	.long sys_lgetxattr
-+	.long sys_fgetxattr
-+	.long sys_listxattr		/* 260 */
-+	.long sys_llistxattr
-+	.long sys_flistxattr
-+	.long sys_removexattr
-+	.long sys_lremovexattr
-+	.long sys_fremovexattr  	/* 265 */
-+	.long sys_tkill
-+	.long sys_sendfile64
-+	.long sys_futex
-+	.long sys_sched_setaffinity
-+	.long sys_sched_getaffinity	/* 270 */
-+	.long sys_ni_syscall
-+	.long sys_ni_syscall
-+	.long sys_io_setup
-+	.long sys_io_destroy
-+	.long sys_io_getevents		/* 275 */
-+	.long sys_io_submit
-+	.long sys_io_cancel
-+	.long sys_fadvise64
-+	.long sys_ni_syscall
-+	.long sys_exit_group		/* 280 */
-+	/* Rest of new 2.6 syscalls */
-+	.long sys_lookup_dcookie
-+	.long sys_epoll_create
-+	.long sys_epoll_ctl
-+	.long sys_epoll_wait
-+ 	.long sys_remap_file_pages	/* 285 */
-+ 	.long sys_set_tid_address
-+ 	.long sys_timer_create
-+ 	.long sys_timer_settime
-+ 	.long sys_timer_gettime
-+ 	.long sys_timer_getoverrun	/* 290 */
-+ 	.long sys_timer_delete
-+ 	.long sys_clock_settime
-+ 	.long sys_clock_gettime
-+ 	.long sys_clock_getres
-+ 	.long sys_clock_nanosleep	/* 295 */
-+	.long sys_statfs64
-+	.long sys_fstatfs64
-+	.long sys_tgkill
-+	.long sys_utimes
-+ 	.long sys_fadvise64_64		/* 300 */
-+	.long sys_ni_syscall	/* Reserved for vserver */
-+	.long sys_ni_syscall	/* Reserved for mbind */
-+	.long sys_ni_syscall	/* get_mempolicy */
-+	.long sys_ni_syscall	/* set_mempolicy */
-+	.long sys_mq_open		/* 305 */
-+	.long sys_mq_unlink
-+	.long sys_mq_timedsend
-+	.long sys_mq_timedreceive
-+	.long sys_mq_notify
-+	.long sys_mq_getsetattr		/* 310 */
-+	.long sys_ni_syscall	/* Reserved for kexec */
-+	.long sys_waitid
-+	.long sys_add_key
-+	.long sys_request_key
-+	.long sys_keyctl		/* 315 */
-+	.long sys_ioprio_set
-+	.long sys_ioprio_get
-+	.long sys_inotify_init
-+	.long sys_inotify_add_watch
-+	.long sys_inotify_rm_watch	/* 320 */
-+	.long sys_ni_syscall
-+	.long sys_migrate_pages
-+	.long sys_openat
-+	.long sys_mkdirat
-+	.long sys_mknodat		/* 325 */
-+	.long sys_fchownat
-+	.long sys_futimesat
-+	.long sys_fstatat64
-+	.long sys_unlinkat
-+	.long sys_renameat		/* 330 */
-+	.long sys_linkat
-+	.long sys_symlinkat
-+	.long sys_readlinkat
-+	.long sys_fchmodat
-+	.long sys_faccessat		/* 335 */
-+	.long sys_pselect6
-+	.long sys_ppoll
-+	.long sys_unshare
-+	.long sys_set_robust_list
-+	.long sys_get_robust_list	/* 340 */
-+	.long sys_splice
-+	.long sys_sync_file_range
-+	.long sys_tee
-+	.long sys_vmsplice
-+	.long sys_move_pages		/* 345 */
-+	.long sys_getcpu
-+	.long sys_epoll_pwait
-+	.long sys_utimensat
-+	.long sys_signalfd
-+	.long sys_timerfd		/* 350 */
-+	.long sys_eventfd
-+	.long sys_fallocate
-diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
-deleted file mode 100644
-index a3a67d1..0000000
---- a/arch/sh/kernel/time.c
-+++ /dev/null
-@@ -1,269 +0,0 @@
--/*
-- *  arch/sh/kernel/time.c
-- *
-- *  Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
-- *  Copyright (C) 2000  Philipp Rumpf <prumpf at tux.org>
-- *  Copyright (C) 2002 - 2007  Paul Mundt
-- *  Copyright (C) 2002  M. R. Brown  <mrbrown at linux-sh.org>
-- *
-- *  Some code taken from i386 version.
-- *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
-- */
--#include <linux/kernel.h>
--#include <linux/module.h>
--#include <linux/init.h>
--#include <linux/profile.h>
--#include <linux/timex.h>
--#include <linux/sched.h>
--#include <linux/clockchips.h>
--#include <asm/clock.h>
--#include <asm/rtc.h>
--#include <asm/timer.h>
--#include <asm/kgdb.h>
--
--struct sys_timer *sys_timer;
--
--/* Move this somewhere more sensible.. */
--DEFINE_SPINLOCK(rtc_lock);
--EXPORT_SYMBOL(rtc_lock);
--
--/* Dummy RTC ops */
--static void null_rtc_get_time(struct timespec *tv)
--{
--	tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
--	tv->tv_nsec = 0;
--}
--
--static int null_rtc_set_time(const time_t secs)
--{
--	return 0;
--}
--
--/*
-- * Null high precision timer functions for systems lacking one.
-- */
--static cycle_t null_hpt_read(void)
--{
--	return 0;
--}
--
--void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
--int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
--
--#ifndef CONFIG_GENERIC_TIME
--void do_gettimeofday(struct timeval *tv)
--{
--	unsigned long flags;
--	unsigned long seq;
--	unsigned long usec, sec;
--
--	do {
--		/*
--		 * Turn off IRQs when grabbing xtime_lock, so that
--		 * the sys_timer get_offset code doesn't have to handle it.
--		 */
--		seq = read_seqbegin_irqsave(&xtime_lock, flags);
--		usec = get_timer_offset();
--		sec = xtime.tv_sec;
--		usec += xtime.tv_nsec / NSEC_PER_USEC;
--	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
--
--	while (usec >= 1000000) {
--		usec -= 1000000;
--		sec++;
--	}
--
--	tv->tv_sec = sec;
--	tv->tv_usec = usec;
--}
--EXPORT_SYMBOL(do_gettimeofday);
--
--int do_settimeofday(struct timespec *tv)
--{
--	time_t wtm_sec, sec = tv->tv_sec;
--	long wtm_nsec, nsec = tv->tv_nsec;
--
--	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
--		return -EINVAL;
--
--	write_seqlock_irq(&xtime_lock);
--	/*
--	 * This is revolting. We need to set "xtime" correctly. However, the
--	 * value in this location is the value at the most recent update of
--	 * wall time.  Discover what correction gettimeofday() would have
--	 * made, and then undo it!
--	 */
--	nsec -= get_timer_offset() * NSEC_PER_USEC;
--
--	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
--	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
--
--	set_normalized_timespec(&xtime, sec, nsec);
--	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
--
--	ntp_clear();
--	write_sequnlock_irq(&xtime_lock);
--	clock_was_set();
--
--	return 0;
--}
--EXPORT_SYMBOL(do_settimeofday);
--#endif /* !CONFIG_GENERIC_TIME */
--
--#ifndef CONFIG_GENERIC_CLOCKEVENTS
--/* last time the RTC clock got updated */
--static long last_rtc_update;
--
--/*
-- * handle_timer_tick() needs to keep up the real-time clock,
-- * as well as call the "do_timer()" routine every clocktick
-- */
--void handle_timer_tick(void)
--{
--	do_timer(1);
--#ifndef CONFIG_SMP
--	update_process_times(user_mode(get_irq_regs()));
--#endif
--	if (current->pid)
--		profile_tick(CPU_PROFILING);
--
--#ifdef CONFIG_HEARTBEAT
--	if (sh_mv.mv_heartbeat != NULL)
--		sh_mv.mv_heartbeat();
--#endif
--
--	/*
--	 * If we have an externally synchronized Linux clock, then update
--	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
--	 * called as close as possible to 500 ms before the new second starts.
--	 */
--	if (ntp_synced() &&
--	    xtime.tv_sec > last_rtc_update + 660 &&
--	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
--	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
--		if (rtc_sh_set_time(xtime.tv_sec) == 0)
--			last_rtc_update = xtime.tv_sec;
--		else
--			/* do it again in 60s */
--			last_rtc_update = xtime.tv_sec - 600;
--	}
--}
--#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
--
--#ifdef CONFIG_PM
--int timer_suspend(struct sys_device *dev, pm_message_t state)
--{
--	struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
--
--	sys_timer->ops->stop();
--
--	return 0;
--}
--
--int timer_resume(struct sys_device *dev)
--{
--	struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
--
--	sys_timer->ops->start();
--
--	return 0;
--}
--#else
--#define timer_suspend NULL
--#define timer_resume NULL
--#endif
--
--static struct sysdev_class timer_sysclass = {
--	set_kset_name("timer"),
--	.suspend = timer_suspend,
--	.resume	 = timer_resume,
--};
--
--static int __init timer_init_sysfs(void)
--{
--	int ret = sysdev_class_register(&timer_sysclass);
--	if (ret != 0)
--		return ret;
--
--	sys_timer->dev.cls = &timer_sysclass;
--	return sysdev_register(&sys_timer->dev);
--}
--device_initcall(timer_init_sysfs);
--
--void (*board_time_init)(void);
--
--/*
-- * Shamelessly based on the MIPS and Sparc64 work.
-- */
--static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
--unsigned long sh_hpt_frequency = 0;
--
--#define NSEC_PER_CYC_SHIFT	10
--
--struct clocksource clocksource_sh = {
--	.name		= "SuperH",
--	.rating		= 200,
--	.mask		= CLOCKSOURCE_MASK(32),
--	.read		= null_hpt_read,
--	.shift		= 16,
--	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
--};
--
--static void __init init_sh_clocksource(void)
--{
--	if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
--		return;
--
--	clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
--						  clocksource_sh.shift);
--
--	timer_ticks_per_nsec_quotient =
--		clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
--
--	clocksource_register(&clocksource_sh);
--}
--
--#ifdef CONFIG_GENERIC_TIME
--unsigned long long sched_clock(void)
--{
--	unsigned long long ticks = clocksource_sh.read();
--	return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
--}
--#endif
--
--void __init time_init(void)
--{
--	if (board_time_init)
--		board_time_init();
--
--	clk_init();
--
--	rtc_sh_get_time(&xtime);
--	set_normalized_timespec(&wall_to_monotonic,
--				-xtime.tv_sec, -xtime.tv_nsec);
--
--	/*
--	 * Find the timer to use as the system timer, it will be
--	 * initialized for us.
--	 */
--	sys_timer = get_sys_timer();
--	printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
--
--	if (sys_timer->ops->read)
--		clocksource_sh.read = sys_timer->ops->read;
--
--	init_sh_clocksource();
--
--	if (sh_hpt_frequency)
--		printk("Using %lu.%03lu MHz high precision timer.\n",
--		       ((sh_hpt_frequency + 500) / 1000) / 1000,
--		       ((sh_hpt_frequency + 500) / 1000) % 1000);
--
--#if defined(CONFIG_SH_KGDB)
--	/*
--	 * Set up kgdb as requested. We do it here because the serial
--	 * init uses the timer vars we just set up for figuring baud.
--	 */
--	kgdb_init();
--#endif
--}
-diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c
++   Always clears 4096 bytes.
++
++   Note : alloco guarded by synco to avoid TAKum03020 erratum
++
++*/
++
++	.section .text..SHmedia32,"ax"
++	.little
++
++	.balign 8
++	.global clear_page
++clear_page:
++	pta/l 1f, tr1
++	pta/l 2f, tr2
++	ptabs/l r18, tr0
++
++	movi 4096, r7
++	add  r2, r7, r7
++	add  r2, r63, r6
++1:
++	alloco r6, 0
++	synco	! TAKum03020
++	addi	r6, 32, r6
++	bgt/l	r7, r6, tr1
++
++	add  r2, r63, r6
++2:
++	st.q  r6,   0, r63
++	st.q  r6,   8, r63
++	st.q  r6,  16, r63
++	st.q  r6,  24, r63
++	addi r6, 32, r6
++	bgt/l r7, r6, tr2
++
++	blink tr0, r63
++
++
+diff --git a/arch/sh/lib64/copy_page.S b/arch/sh/lib64/copy_page.S
 new file mode 100644
-index 0000000..2bc04bf
+index 0000000..0ec6fca
 --- /dev/null
-+++ b/arch/sh/kernel/time_32.c
-@@ -0,0 +1,269 @@
++++ b/arch/sh/lib64/copy_page.S
+@@ -0,0 +1,89 @@
 +/*
-+ *  arch/sh/kernel/time.c
-+ *
-+ *  Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
-+ *  Copyright (C) 2000  Philipp Rumpf <prumpf at tux.org>
-+ *  Copyright (C) 2002 - 2007  Paul Mundt
-+ *  Copyright (C) 2002  M. R. Brown  <mrbrown at linux-sh.org>
-+ *
-+ *  Some code taken from i386 version.
-+ *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/profile.h>
-+#include <linux/timex.h>
-+#include <linux/sched.h>
-+#include <linux/clockchips.h>
-+#include <asm/clock.h>
-+#include <asm/rtc.h>
-+#include <asm/timer.h>
-+#include <asm/kgdb.h>
++   Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
 +
-+struct sys_timer *sys_timer;
++   This file is subject to the terms and conditions of the GNU General Public
++   License.  See the file "COPYING" in the main directory of this archive
++   for more details.
 +
-+/* Move this somewhere more sensible.. */
-+DEFINE_SPINLOCK(rtc_lock);
-+EXPORT_SYMBOL(rtc_lock);
++   Tight version of mempy for the case of just copying a page.
++   Prefetch strategy empirically optimised against RTL simulations
++   of SH5-101 cut2 eval chip with Cayman board DDR memory.
++
++   Parameters:
++   r2 : destination effective address (start of page)
++   r3 : source effective address (start of page)
++
++   Always copies 4096 bytes.
++
++   Points to review.
++   * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
++     It seems like the prefetch needs to be at at least 4 lines ahead to get
++     the data into the cache in time, and the allocos contend with outstanding
++     prefetches for the same cache set, so it's better to have the numbers
++     different.
++   */
++
++	.section .text..SHmedia32,"ax"
++	.little
++
++	.balign 8
++	.global copy_page
++copy_page:
++
++	/* Copy 4096 bytes worth of data from r3 to r2.
++	   Do prefetches 4 lines ahead.
++	   Do alloco 2 lines ahead */
++
++	pta 1f, tr1
++	pta 2f, tr2
++	pta 3f, tr3
++	ptabs r18, tr0
++
++#if 0
++	/* TAKum03020 */
++	ld.q r3, 0x00, r63
++	ld.q r3, 0x20, r63
++	ld.q r3, 0x40, r63
++	ld.q r3, 0x60, r63
++#endif
++	alloco r2, 0x00
++	synco		! TAKum03020
++	alloco r2, 0x20
++	synco		! TAKum03020
++
++	movi 3968, r6
++	add  r2, r6, r6
++	addi r6, 64, r7
++	addi r7, 64, r8
++	sub r3, r2, r60
++	addi r60, 8, r61
++	addi r61, 8, r62
++	addi r62, 8, r23
++	addi r60, 0x80, r22
++
++/* Minimal code size.  The extra branches inside the loop don't cost much
++   because they overlap with the time spent waiting for prefetches to
++   complete. */
++1:
++#if 0
++	/* TAKum03020 */
++	bge/u r2, r6, tr2  ! skip prefetch for last 4 lines
++	ldx.q r2, r22, r63 ! prefetch 4 lines hence
++#endif
++2:
++	bge/u r2, r7, tr3  ! skip alloco for last 2 lines
++	alloco r2, 0x40    ! alloc destination line 2 lines ahead
++	synco		! TAKum03020
++3:
++	ldx.q r2, r60, r36
++	ldx.q r2, r61, r37
++	ldx.q r2, r62, r38
++	ldx.q r2, r23, r39
++	st.q  r2,   0, r36
++	st.q  r2,   8, r37
++	st.q  r2,  16, r38
++	st.q  r2,  24, r39
++	addi r2, 32, r2
++	bgt/l r8, r2, tr1
++
++	blink tr0, r63	   ! return
+diff --git a/arch/sh/lib64/copy_user_memcpy.S b/arch/sh/lib64/copy_user_memcpy.S
+new file mode 100644
+index 0000000..2a62816
+--- /dev/null
++++ b/arch/sh/lib64/copy_user_memcpy.S
+@@ -0,0 +1,217 @@
++!
++! Fast SH memcpy
++!
++! by Toshiyasu Morita (tm at netcom.com)
++! hacked by J"orn Rernnecke (joern.rennecke at superh.com) ("o for o-umlaut)
++! SH5 code Copyright 2002 SuperH Ltd.
++!
++! Entry: ARG0: destination pointer
++!        ARG1: source pointer
++!        ARG2: byte count
++!
++! Exit:  RESULT: destination pointer
++!        any other registers in the range r0-r7: trashed
++!
++! Notes: Usually one wants to do small reads and write a longword, but
++!        unfortunately it is difficult in some cases to concatanate bytes
++!        into a longword on the SH, so this does a longword read and small
++!        writes.
++!
++! This implementation makes two assumptions about how it is called:
++!
++! 1.: If the byte count is nonzero, the address of the last byte to be
++!     copied is unsigned greater than the address of the first byte to
++!     be copied.  This could be easily swapped for a signed comparison,
++!     but the algorithm used needs some comparison.
++!
++! 2.: When there are two or three bytes in the last word of an 11-or-more
++!     bytes memory chunk to b copied, the rest of the word can be read
++!     without side effects.
++!     This could be easily changed by increasing the minumum size of
++!     a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
++!     however, this would cost a few extra cyles on average.
++!     For SHmedia, the assumption is that any quadword can be read in its
++!     enirety if at least one byte is included in the copy.
 +
-+/* Dummy RTC ops */
-+static void null_rtc_get_time(struct timespec *tv)
-+{
-+	tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
-+	tv->tv_nsec = 0;
-+}
++/* Imported into Linux kernel by Richard Curnow.  This is used to implement the
++   __copy_user function in the general case, so it has to be a distinct
++   function from intra-kernel memcpy to allow for exception fix-ups in the
++   event that the user pointer is bad somewhere in the copy (e.g. due to
++   running off the end of the vma).
 +
-+static int null_rtc_set_time(const time_t secs)
-+{
-+	return 0;
-+}
++   Note, this algorithm will be slightly wasteful in the case where the source
++   and destination pointers are equally aligned, because the stlo/sthi pairs
++   could then be merged back into single stores.  If there are a lot of cache
++   misses, this is probably offset by the stall lengths on the preloads.
 +
-+/*
-+ * Null high precision timer functions for systems lacking one.
-+ */
-+static cycle_t null_hpt_read(void)
-+{
-+	return 0;
-+}
++*/
 +
-+void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
-+int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
++/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
++ * erratum.  The first two prefetches are nop-ed out to avoid upsetting the
++ * instruction counts used in the jump address calculation.
++ * */
 +
-+#ifndef CONFIG_GENERIC_TIME
-+void do_gettimeofday(struct timeval *tv)
-+{
-+	unsigned long flags;
-+	unsigned long seq;
-+	unsigned long usec, sec;
++	.section .text..SHmedia32,"ax"
++	.little
++	.balign 32
++	.global copy_user_memcpy
++	.global copy_user_memcpy_end
++copy_user_memcpy:
 +
-+	do {
-+		/*
-+		 * Turn off IRQs when grabbing xtime_lock, so that
-+		 * the sys_timer get_offset code doesn't have to handle it.
-+		 */
-+		seq = read_seqbegin_irqsave(&xtime_lock, flags);
-+		usec = get_timer_offset();
-+		sec = xtime.tv_sec;
-+		usec += xtime.tv_nsec / NSEC_PER_USEC;
-+	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
++#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
++#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
++#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
++#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
 +
-+	while (usec >= 1000000) {
-+		usec -= 1000000;
-+		sec++;
-+	}
++	nop ! ld.b r3,0,r63 ! TAKum03020
++	pta/l Large,tr0
++	movi 25,r0
++	bgeu/u r4,r0,tr0
++	nsb r4,r0
++	shlli r0,5,r0
++	movi (L1-L0+63*32 + 1) & 0xffff,r1
++	sub r1, r0, r0
++L0:	ptrel r0,tr0
++	add r2,r4,r5
++	ptabs r18,tr1
++	add r3,r4,r6
++	blink tr0,r63
 +
-+	tv->tv_sec = sec;
-+	tv->tv_usec = usec;
-+}
-+EXPORT_SYMBOL(do_gettimeofday);
++/* Rearranged to make cut2 safe */
++	.balign 8
++L4_7:	/* 4..7 byte memcpy cntd. */
++	stlo.l r2, 0, r0
++	or r6, r7, r6
++	sthi.l r5, -1, r6
++	stlo.l r5, -4, r6
++	blink tr1,r63
 +
-+int do_settimeofday(struct timespec *tv)
-+{
-+	time_t wtm_sec, sec = tv->tv_sec;
-+	long wtm_nsec, nsec = tv->tv_nsec;
++	.balign 8
++L1:	/* 0 byte memcpy */
++	nop
++	blink tr1,r63
++	nop
++	nop
++	nop
++	nop
 +
-+	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
-+		return -EINVAL;
++L2_3:	/* 2 or 3 byte memcpy cntd. */
++	st.b r5,-1,r6
++	blink tr1,r63
 +
-+	write_seqlock_irq(&xtime_lock);
-+	/*
-+	 * This is revolting. We need to set "xtime" correctly. However, the
-+	 * value in this location is the value at the most recent update of
-+	 * wall time.  Discover what correction gettimeofday() would have
-+	 * made, and then undo it!
-+	 */
-+	nsec -= get_timer_offset() * NSEC_PER_USEC;
++	/* 1 byte memcpy */
++	ld.b r3,0,r0
++	st.b r2,0,r0
++	blink tr1,r63
 +
-+	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
-+	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
++L8_15:	/* 8..15 byte memcpy cntd. */
++	stlo.q r2, 0, r0
++	or r6, r7, r6
++	sthi.q r5, -1, r6
++	stlo.q r5, -8, r6
++	blink tr1,r63
 +
-+	set_normalized_timespec(&xtime, sec, nsec);
-+	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++	/* 2 or 3 byte memcpy */
++	ld.b r3,0,r0
++	nop ! ld.b r2,0,r63 ! TAKum03020
++	ld.b r3,1,r1
++	st.b r2,0,r0
++	pta/l L2_3,tr0
++	ld.b r6,-1,r6
++	st.b r2,1,r1
++	blink tr0, r63
 +
-+	ntp_clear();
-+	write_sequnlock_irq(&xtime_lock);
-+	clock_was_set();
++	/* 4 .. 7 byte memcpy */
++	LDUAL (r3, 0, r0, r1)
++	pta L4_7, tr0
++	ldlo.l r6, -4, r7
++	or r0, r1, r0
++	sthi.l r2, 3, r0
++	ldhi.l r6, -1, r6
++	blink tr0, r63
 +
-+	return 0;
-+}
-+EXPORT_SYMBOL(do_settimeofday);
-+#endif /* !CONFIG_GENERIC_TIME */
++	/* 8 .. 15 byte memcpy */
++	LDUAQ (r3, 0, r0, r1)
++	pta L8_15, tr0
++	ldlo.q r6, -8, r7
++	or r0, r1, r0
++	sthi.q r2, 7, r0
++	ldhi.q r6, -1, r6
++	blink tr0, r63
 +
-+#ifndef CONFIG_GENERIC_CLOCKEVENTS
-+/* last time the RTC clock got updated */
-+static long last_rtc_update;
++	/* 16 .. 24 byte memcpy */
++	LDUAQ (r3, 0, r0, r1)
++	LDUAQ (r3, 8, r8, r9)
++	or r0, r1, r0
++	sthi.q r2, 7, r0
++	or r8, r9, r8
++	sthi.q r2, 15, r8
++	ldlo.q r6, -8, r7
++	ldhi.q r6, -1, r6
++	stlo.q r2, 8, r8
++	stlo.q r2, 0, r0
++	or r6, r7, r6
++	sthi.q r5, -1, r6
++	stlo.q r5, -8, r6
++	blink tr1,r63
 +
-+/*
-+ * handle_timer_tick() needs to keep up the real-time clock,
-+ * as well as call the "do_timer()" routine every clocktick
-+ */
-+void handle_timer_tick(void)
-+{
-+	do_timer(1);
-+#ifndef CONFIG_SMP
-+	update_process_times(user_mode(get_irq_regs()));
-+#endif
-+	if (current->pid)
-+		profile_tick(CPU_PROFILING);
++Large:
++	! ld.b r2, 0, r63 ! TAKum03020
++	pta/l  Loop_ua, tr1
++	ori r3, -8, r7
++	sub r2, r7, r22
++	sub r3, r2, r6
++	add r2, r4, r5
++	ldlo.q r3, 0, r0
++	addi r5, -16, r5
++	movi 64+8, r27 ! could subtract r7 from that.
++	stlo.q r2, 0, r0
++	sthi.q r2, 7, r0
++	ldx.q r22, r6, r0
++	bgtu/l r27, r4, tr1
 +
-+#ifdef CONFIG_HEARTBEAT
-+	if (sh_mv.mv_heartbeat != NULL)
-+		sh_mv.mv_heartbeat();
-+#endif
++	addi r5, -48, r27
++	pta/l Loop_line, tr0
++	addi r6, 64, r36
++	addi r6, -24, r19
++	addi r6, -16, r20
++	addi r6, -8, r21
 +
-+	/*
-+	 * If we have an externally synchronized Linux clock, then update
-+	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-+	 * called as close as possible to 500 ms before the new second starts.
-+	 */
-+	if (ntp_synced() &&
-+	    xtime.tv_sec > last_rtc_update + 660 &&
-+	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
-+	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
-+		if (rtc_sh_set_time(xtime.tv_sec) == 0)
-+			last_rtc_update = xtime.tv_sec;
-+		else
-+			/* do it again in 60s */
-+			last_rtc_update = xtime.tv_sec - 600;
-+	}
-+}
-+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
++Loop_line:
++	! ldx.q r22, r36, r63 ! TAKum03020
++	alloco r22, 32
++	synco
++	addi r22, 32, r22
++	ldx.q r22, r19, r23
++	sthi.q r22, -25, r0
++	ldx.q r22, r20, r24
++	ldx.q r22, r21, r25
++	stlo.q r22, -32, r0
++	ldx.q r22, r6,  r0
++	sthi.q r22, -17, r23
++	sthi.q r22,  -9, r24
++	sthi.q r22,  -1, r25
++	stlo.q r22, -24, r23
++	stlo.q r22, -16, r24
++	stlo.q r22,  -8, r25
++	bgeu r27, r22, tr0
 +
-+#ifdef CONFIG_PM
-+int timer_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+	struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
++Loop_ua:
++	addi r22, 8, r22
++	sthi.q r22, -1, r0
++	stlo.q r22, -8, r0
++	ldx.q r22, r6, r0
++	bgtu/l r5, r22, tr1
 +
-+	sys_timer->ops->stop();
++	add r3, r4, r7
++	ldlo.q r7, -8, r1
++	sthi.q r22, 7, r0
++	ldhi.q r7, -1, r7
++	ptabs r18,tr1
++	stlo.q r22, 0, r0
++	or r1, r7, r1
++	sthi.q r5, 15, r1
++	stlo.q r5, 8, r1
++	blink tr1, r63
++copy_user_memcpy_end:
++	nop
+diff --git a/arch/sh/lib64/dbg.c b/arch/sh/lib64/dbg.c
+new file mode 100644
+index 0000000..75825ef
+--- /dev/null
++++ b/arch/sh/lib64/dbg.c
+@@ -0,0 +1,430 @@
++/*--------------------------------------------------------------------------
++--
++-- Identity : Linux50 Debug Funcions
++--
++-- File     : arch/sh/lib64/dbg.c
++--
++-- Copyright 2000, 2001 STMicroelectronics Limited.
++-- Copyright 2004 Richard Curnow (evt_debug etc)
++--
++--------------------------------------------------------------------------*/
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <asm/mmu_context.h>
 +
-+	return 0;
-+}
++typedef u64 regType_t;
 +
-+int timer_resume(struct sys_device *dev)
++static regType_t getConfigReg(u64 id)
 +{
-+	struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
-+
-+	sys_timer->ops->start();
-+
-+	return 0;
++	register u64 reg __asm__("r2");
++	asm volatile ("getcfg   %1, 0, %0":"=r" (reg):"r"(id));
++	return (reg);
 +}
-+#else
-+#define timer_suspend NULL
-+#define timer_resume NULL
-+#endif
 +
-+static struct sysdev_class timer_sysclass = {
-+	.name	 = "timer",
-+	.suspend = timer_suspend,
-+	.resume	 = timer_resume,
-+};
++/* ======================================================================= */
 +
-+static int __init timer_init_sysfs(void)
-+{
-+	int ret = sysdev_class_register(&timer_sysclass);
-+	if (ret != 0)
-+		return ret;
++static char *szTab[] = { "4k", "64k", "1M", "512M" };
++static char *protTab[] = { "----",
++	"---R",
++	"--X-",
++	"--XR",
++	"-W--",
++	"-W-R",
++	"-WX-",
++	"-WXR",
++	"U---",
++	"U--R",
++	"U-X-",
++	"U-XR",
++	"UW--",
++	"UW-R",
++	"UWX-",
++	"UWXR"
++};
++#define  ITLB_BASE	0x00000000
++#define  DTLB_BASE	0x00800000
++#define  MAX_TLBs		64
++/* PTE High */
++#define  GET_VALID(pte)        ((pte) & 0x1)
++#define  GET_SHARED(pte)       ((pte) & 0x2)
++#define  GET_ASID(pte)         ((pte >> 2) & 0x0ff)
++#define  GET_EPN(pte)          ((pte) & 0xfffff000)
 +
-+	sys_timer->dev.cls = &timer_sysclass;
-+	return sysdev_register(&sys_timer->dev);
-+}
-+device_initcall(timer_init_sysfs);
++/* PTE Low */
++#define  GET_CBEHAVIOR(pte)    ((pte) & 0x3)
++#define  GET_PAGE_SIZE(pte)    szTab[((pte >> 3) & 0x3)]
++#define  GET_PROTECTION(pte)   protTab[((pte >> 6) & 0xf)]
++#define  GET_PPN(pte)          ((pte) & 0xfffff000)
 +
-+void (*board_time_init)(void);
++#define PAGE_1K_MASK           0x00000000
++#define PAGE_4K_MASK           0x00000010
++#define PAGE_64K_MASK          0x00000080
++#define MMU_PAGESIZE_MASK      (PAGE_64K_MASK | PAGE_4K_MASK)
++#define PAGE_1MB_MASK          MMU_PAGESIZE_MASK
++#define PAGE_1K                (1024)
++#define PAGE_4K                (1024 * 4)
++#define PAGE_64K               (1024 * 64)
++#define PAGE_1MB               (1024 * 1024)
 +
-+/*
-+ * Shamelessly based on the MIPS and Sparc64 work.
-+ */
-+static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
-+unsigned long sh_hpt_frequency = 0;
++#define HOW_TO_READ_TLB_CONTENT  \
++       "[ ID]  PPN         EPN        ASID  Share  CB  P.Size   PROT.\n"
 +
-+#define NSEC_PER_CYC_SHIFT	10
++void print_single_tlb(unsigned long tlb, int single_print)
++{
++	regType_t pteH;
++	regType_t pteL;
++	unsigned int valid, shared, asid, epn, cb, ppn;
++	char *pSize;
++	char *pProt;
 +
-+struct clocksource clocksource_sh = {
-+	.name		= "SuperH",
-+	.rating		= 200,
-+	.mask		= CLOCKSOURCE_MASK(32),
-+	.read		= null_hpt_read,
-+	.shift		= 16,
-+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
-+};
++	/*
++	   ** in case of single print <single_print> is true, this implies:
++	   **   1) print the TLB in any case also if NOT VALID
++	   **   2) print out the header
++	 */
 +
-+static void __init init_sh_clocksource(void)
-+{
-+	if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
++	pteH = getConfigReg(tlb);
++	valid = GET_VALID(pteH);
++	if (single_print)
++		printk(HOW_TO_READ_TLB_CONTENT);
++	else if (!valid)
 +		return;
 +
-+	clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
-+						  clocksource_sh.shift);
-+
-+	timer_ticks_per_nsec_quotient =
-+		clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
++	pteL = getConfigReg(tlb + 1);
 +
-+	clocksource_register(&clocksource_sh);
++	shared = GET_SHARED(pteH);
++	asid = GET_ASID(pteH);
++	epn = GET_EPN(pteH);
++	cb = GET_CBEHAVIOR(pteL);
++	pSize = GET_PAGE_SIZE(pteL);
++	pProt = GET_PROTECTION(pteL);
++	ppn = GET_PPN(pteL);
++	printk("[%c%2ld]  0x%08x  0x%08x  %03d   %02x    %02x   %4s    %s\n",
++	       ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP),
++	       ppn, epn, asid, shared, cb, pSize, pProt);
 +}
 +
-+#ifdef CONFIG_GENERIC_TIME
-+unsigned long long sched_clock(void)
++void print_dtlb(void)
 +{
-+	unsigned long long ticks = clocksource_sh.read();
-+	return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
++	int count;
++	unsigned long tlb;
++
++	printk(" ================= SH-5 D-TLBs Status ===================\n");
++	printk(HOW_TO_READ_TLB_CONTENT);
++	tlb = DTLB_BASE;
++	for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
++		print_single_tlb(tlb, 0);
++	printk
++	    (" =============================================================\n");
 +}
-+#endif
 +
-+void __init time_init(void)
++void print_itlb(void)
 +{
-+	if (board_time_init)
-+		board_time_init();
-+
-+	clk_init();
-+
-+	rtc_sh_get_time(&xtime);
-+	set_normalized_timespec(&wall_to_monotonic,
-+				-xtime.tv_sec, -xtime.tv_nsec);
++	int count;
++	unsigned long tlb;
 +
-+	/*
-+	 * Find the timer to use as the system timer, it will be
-+	 * initialized for us.
-+	 */
-+	sys_timer = get_sys_timer();
-+	printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
++	printk(" ================= SH-5 I-TLBs Status ===================\n");
++	printk(HOW_TO_READ_TLB_CONTENT);
++	tlb = ITLB_BASE;
++	for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
++		print_single_tlb(tlb, 0);
++	printk
++	    (" =============================================================\n");
++}
 +
-+	if (sys_timer->ops->read)
-+		clocksource_sh.read = sys_timer->ops->read;
++/* ======================================================================= */
 +
-+	init_sh_clocksource();
++#ifdef CONFIG_POOR_MANS_STRACE
 +
-+	if (sh_hpt_frequency)
-+		printk("Using %lu.%03lu MHz high precision timer.\n",
-+		       ((sh_hpt_frequency + 500) / 1000) / 1000,
-+		       ((sh_hpt_frequency + 500) / 1000) % 1000);
++#include "syscalltab.h"
 +
-+#if defined(CONFIG_SH_KGDB)
-+	/*
-+	 * Set up kgdb as requested. We do it here because the serial
-+	 * init uses the timer vars we just set up for figuring baud.
-+	 */
-+	kgdb_init();
-+#endif
-+}
-diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c
-new file mode 100644
-index 0000000..f819ba3
---- /dev/null
-+++ b/arch/sh/kernel/time_64.c
-@@ -0,0 +1,519 @@
-+/*
-+ * arch/sh/kernel/time_64.c
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003 - 2007  Paul Mundt
-+ * Copyright (C) 2003  Richard Curnow
-+ *
-+ *    Original TMU/RTC code taken from sh version.
-+ *    Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
-+ *      Some code taken from i386 version.
-+ *      Copyright (C) 1991, 1992, 1995  Linus Torvalds
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/errno.h>
-+#include <linux/rwsem.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/param.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/time.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/profile.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/bcd.h>
-+#include <linux/timex.h>
-+#include <linux/irq.h>
-+#include <linux/io.h>
-+#include <linux/platform_device.h>
-+#include <asm/cpu/registers.h>	 /* required by inline __asm__ stmt. */
-+#include <asm/cpu/irq.h>
-+#include <asm/addrspace.h>
-+#include <asm/processor.h>
-+#include <asm/uaccess.h>
-+#include <asm/delay.h>
++struct ring_node {
++	int evt;
++	int ret_addr;
++	int event;
++	int tra;
++	int pid;
++	unsigned long sp;
++	unsigned long pc;
++};
 +
-+#define TMU_TOCR_INIT	0x00
-+#define TMU0_TCR_INIT	0x0020
-+#define TMU_TSTR_INIT	1
-+#define TMU_TSTR_OFF	0
++static struct ring_node event_ring[16];
++static int event_ptr = 0;
 +
-+/* Real Time Clock */
-+#define	RTC_BLOCK_OFF	0x01040000
-+#define RTC_BASE	PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
-+#define RTC_RCR1_CIE	0x10	/* Carry Interrupt Enable */
-+#define RTC_RCR1	(rtc_base + 0x38)
++struct stored_syscall_data {
++	int pid;
++	int syscall_number;
++};
 +
-+/* Clock, Power and Reset Controller */
-+#define	CPRC_BLOCK_OFF	0x01010000
-+#define CPRC_BASE	PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
++#define N_STORED_SYSCALLS 16
 +
-+#define FRQCR		(cprc_base+0x0)
-+#define WTCSR		(cprc_base+0x0018)
-+#define STBCR		(cprc_base+0x0030)
++static struct stored_syscall_data stored_syscalls[N_STORED_SYSCALLS];
++static int syscall_next=0;
++static int syscall_next_print=0;
 +
-+/* Time Management Unit */
-+#define	TMU_BLOCK_OFF	0x01020000
-+#define TMU_BASE	PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
-+#define TMU0_BASE	tmu_base + 0x8 + (0xc * 0x0)
-+#define TMU1_BASE	tmu_base + 0x8 + (0xc * 0x1)
-+#define TMU2_BASE	tmu_base + 0x8 + (0xc * 0x2)
++void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs)
++{
++	int syscallno = tra & 0xff;
++	unsigned long sp;
++	unsigned long stack_bottom;
++	int pid;
++	struct ring_node *rr;
 +
-+#define TMU_TOCR	tmu_base+0x0	/* Byte access */
-+#define TMU_TSTR	tmu_base+0x4	/* Byte access */
++	pid = current->pid;
++	stack_bottom = (unsigned long) task_stack_page(current);
++	asm volatile("ori r15, 0, %0" : "=r" (sp));
++	rr = event_ring + event_ptr;
++	rr->evt = evt;
++	rr->ret_addr = ret_addr;
++	rr->event = event;
++	rr->tra = tra;
++	rr->pid = pid;
++	rr->sp = sp;
++	rr->pc = regs->pc;
 +
-+#define TMU0_TCOR	TMU0_BASE+0x0	/* Long access */
-+#define TMU0_TCNT	TMU0_BASE+0x4	/* Long access */
-+#define TMU0_TCR	TMU0_BASE+0x8	/* Word access */
++	if (sp < stack_bottom + 3092) {
++		printk("evt_debug : stack underflow report\n");
++		int i, j;
++		for (j=0, i = event_ptr; j<16; j++) {
++			rr = event_ring + i;
++			printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n",
++				rr->evt, rr->event, rr->tra, rr->pid, rr->sp, rr->pc);
++			i--;
++			i &= 15;
++		}
++		panic("STACK UNDERFLOW\n");
++	}
 +
-+#define TICK_SIZE (tick_nsec / 1000)
++	event_ptr = (event_ptr + 1) & 15;
 +
-+static unsigned long tmu_base, rtc_base;
-+unsigned long cprc_base;
++	if ((event == 2) && (evt == 0x160)) {
++		if (syscallno < NUM_SYSCALL_INFO_ENTRIES) {
++			/* Store the syscall information to print later.  We
++			 * can't print this now - currently we're running with
++			 * SR.BL=1, so we can't take a tlbmiss (which could occur
++			 * in the console drivers under printk).
++			 *
++			 * Just overwrite old entries on ring overflow - this
++			 * is only for last-hope debugging. */
++			stored_syscalls[syscall_next].pid = current->pid;
++			stored_syscalls[syscall_next].syscall_number = syscallno;
++			syscall_next++;
++			syscall_next &= (N_STORED_SYSCALLS - 1);
++		}
++	}
++}
 +
-+/* Variables to allow interpolation of time of day to resolution better than a
-+ * jiffy. */
++static void drain_syscalls(void) {
++	while (syscall_next_print != syscall_next) {
++		printk("Task %d: %s()\n",
++			stored_syscalls[syscall_next_print].pid,
++			syscall_info_table[stored_syscalls[syscall_next_print].syscall_number].name);
++			syscall_next_print++;
++			syscall_next_print &= (N_STORED_SYSCALLS - 1);
++	}
++}
 +
-+/* This is effectively protected by xtime_lock */
-+static unsigned long ctc_last_interrupt;
-+static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
++void evt_debug2(unsigned int ret)
++{
++	drain_syscalls();
++	printk("Task %d: syscall returns %08x\n", current->pid, ret);
++}
 +
-+#define CTC_JIFFY_SCALE_SHIFT 40
++void evt_debug_ret_from_irq(struct pt_regs *regs)
++{
++	int pid;
++	struct ring_node *rr;
 +
-+/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
-+static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
++	pid = current->pid;
++	rr = event_ring + event_ptr;
++	rr->evt = 0xffff;
++	rr->ret_addr = 0;
++	rr->event = 0;
++	rr->tra = 0;
++	rr->pid = pid;
++	rr->pc = regs->pc;
++	event_ptr = (event_ptr + 1) & 15;
++}
 +
-+/* Estimate number of microseconds that have elapsed since the last timer tick,
-+   by scaling the delta that has occurred in the CTC register.
++void evt_debug_ret_from_exc(struct pt_regs *regs)
++{
++	int pid;
++	struct ring_node *rr;
 +
-+   WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
-+   the CPU clock rate.  If the CPU sleeps, the CTC stops counting.  Bear this
-+   in mind if enabling SLEEP_WORKS in process.c.  In that case, this algorithm
-+   probably needs to use TMU.TCNT0 instead.  This will work even if the CPU is
-+   sleeping, though will be coarser.
++	pid = current->pid;
++	rr = event_ring + event_ptr;
++	rr->evt = 0xfffe;
++	rr->ret_addr = 0;
++	rr->event = 0;
++	rr->tra = 0;
++	rr->pid = pid;
++	rr->pc = regs->pc;
++	event_ptr = (event_ptr + 1) & 15;
++}
 +
-+   FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
-+   is running or if the freq or tick arguments of adjtimex are modified after
-+   we have calibrated the scaling factor?  This will result in either a jump at
-+   the end of a tick period, or a wrap backwards at the start of the next one,
-+   if the application is reading the time of day often enough.  I think we
-+   ought to do better than this.  For this reason, usecs_per_jiffy is left
-+   separated out in the calculation below.  This allows some future hook into
-+   the adjtime-related stuff in kernel/timer.c to remove this hazard.
++#endif /* CONFIG_POOR_MANS_STRACE */
 +
-+*/
++/* ======================================================================= */
 +
-+static unsigned long usecs_since_tick(void)
++void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs)
 +{
-+	unsigned long long current_ctc;
-+	long ctc_ticks_since_interrupt;
-+	unsigned long long ull_ctc_ticks_since_interrupt;
-+	unsigned long result;
 +
-+	unsigned long long mul1_out;
-+	unsigned long long mul1_out_high;
-+	unsigned long long mul2_out_low, mul2_out_high;
++	unsigned long long ah, al, bh, bl, ch, cl;
 +
-+	/* Read CTC register */
-+	asm ("getcon cr62, %0" : "=r" (current_ctc));
-+	/* Note, the CTC counts down on each CPU clock, not up.
-+	   Note(2), use long type to get correct wraparound arithmetic when
-+	   the counter crosses zero. */
-+	ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
-+	ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
++	printk("\n");
++	printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n",
++	       ((from) ? from : "???"), current->pid, trapnr, signr);
 +
-+	/* Inline assembly to do 32x32x32->64 multiplier */
-+	asm volatile ("mulu.l %1, %2, %0" :
-+	     "=r" (mul1_out) :
-+	     "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
++	asm volatile ("getcon   " __EXPEVT ", %0":"=r"(ah));
++	asm volatile ("getcon   " __EXPEVT ", %0":"=r"(al));
++	ah = (ah) >> 32;
++	al = (al) & 0xffffffff;
++	asm volatile ("getcon   " __KCR1 ", %0":"=r"(bh));
++	asm volatile ("getcon   " __KCR1 ", %0":"=r"(bl));
++	bh = (bh) >> 32;
++	bl = (bl) & 0xffffffff;
++	asm volatile ("getcon   " __INTEVT ", %0":"=r"(ch));
++	asm volatile ("getcon   " __INTEVT ", %0":"=r"(cl));
++	ch = (ch) >> 32;
++	cl = (cl) & 0xffffffff;
++	printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
 +
-+	mul1_out_high = mul1_out >> 32;
++	asm volatile ("getcon   " __PEXPEVT ", %0":"=r"(ah));
++	asm volatile ("getcon   " __PEXPEVT ", %0":"=r"(al));
++	ah = (ah) >> 32;
++	al = (al) & 0xffffffff;
++	asm volatile ("getcon   " __PSPC ", %0":"=r"(bh));
++	asm volatile ("getcon   " __PSPC ", %0":"=r"(bl));
++	bh = (bh) >> 32;
++	bl = (bl) & 0xffffffff;
++	asm volatile ("getcon   " __PSSR ", %0":"=r"(ch));
++	asm volatile ("getcon   " __PSSR ", %0":"=r"(cl));
++	ch = (ch) >> 32;
++	cl = (cl) & 0xffffffff;
++	printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
 +
-+	asm volatile ("mulu.l %1, %2, %0" :
-+	     "=r" (mul2_out_low) :
-+	     "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
++	ah = (regs->pc) >> 32;
++	al = (regs->pc) & 0xffffffff;
++	bh = (regs->regs[18]) >> 32;
++	bl = (regs->regs[18]) & 0xffffffff;
++	ch = (regs->regs[15]) >> 32;
++	cl = (regs->regs[15]) & 0xffffffff;
++	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
 +
-+#if 1
-+	asm volatile ("mulu.l %1, %2, %0" :
-+	     "=r" (mul2_out_high) :
-+	     "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
-+#endif
++	ah = (regs->sr) >> 32;
++	al = (regs->sr) & 0xffffffff;
++	asm volatile ("getcon   " __TEA ", %0":"=r"(bh));
++	asm volatile ("getcon   " __TEA ", %0":"=r"(bl));
++	bh = (bh) >> 32;
++	bl = (bl) & 0xffffffff;
++	asm volatile ("getcon   " __KCR0 ", %0":"=r"(ch));
++	asm volatile ("getcon   " __KCR0 ", %0":"=r"(cl));
++	ch = (ch) >> 32;
++	cl = (cl) & 0xffffffff;
++	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
 +
-+	result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
++	ah = (regs->regs[0]) >> 32;
++	al = (regs->regs[0]) & 0xffffffff;
++	bh = (regs->regs[1]) >> 32;
++	bl = (regs->regs[1]) & 0xffffffff;
++	ch = (regs->regs[2]) >> 32;
++	cl = (regs->regs[2]) & 0xffffffff;
++	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
 +
-+	return result;
-+}
++	ah = (regs->regs[3]) >> 32;
++	al = (regs->regs[3]) & 0xffffffff;
++	bh = (regs->regs[4]) >> 32;
++	bl = (regs->regs[4]) & 0xffffffff;
++	ch = (regs->regs[5]) >> 32;
++	cl = (regs->regs[5]) & 0xffffffff;
++	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
 +
-+void do_gettimeofday(struct timeval *tv)
-+{
-+	unsigned long flags;
-+	unsigned long seq;
-+	unsigned long usec, sec;
++	ah = (regs->regs[6]) >> 32;
++	al = (regs->regs[6]) & 0xffffffff;
++	bh = (regs->regs[7]) >> 32;
++	bl = (regs->regs[7]) & 0xffffffff;
++	ch = (regs->regs[8]) >> 32;
++	cl = (regs->regs[8]) & 0xffffffff;
++	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
 +
-+	do {
-+		seq = read_seqbegin_irqsave(&xtime_lock, flags);
-+		usec = usecs_since_tick();
-+		sec = xtime.tv_sec;
-+		usec += xtime.tv_nsec / 1000;
-+	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
++	ah = (regs->regs[9]) >> 32;
++	al = (regs->regs[9]) & 0xffffffff;
++	bh = (regs->regs[10]) >> 32;
++	bl = (regs->regs[10]) & 0xffffffff;
++	ch = (regs->regs[11]) >> 32;
++	cl = (regs->regs[11]) & 0xffffffff;
++	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++	printk("....\n");
 +
-+	while (usec >= 1000000) {
-+		usec -= 1000000;
-+		sec++;
-+	}
++	ah = (regs->tregs[0]) >> 32;
++	al = (regs->tregs[0]) & 0xffffffff;
++	bh = (regs->tregs[1]) >> 32;
++	bl = (regs->tregs[1]) & 0xffffffff;
++	ch = (regs->tregs[2]) >> 32;
++	cl = (regs->tregs[2]) & 0xffffffff;
++	printk("T0  : %08Lx%08Lx T1  : %08Lx%08Lx T2  : %08Lx%08Lx\n",
++	       ah, al, bh, bl, ch, cl);
++	printk("....\n");
 +
-+	tv->tv_sec = sec;
-+	tv->tv_usec = usec;
++	print_dtlb();
++	print_itlb();
 +}
 +
-+int do_settimeofday(struct timespec *tv)
-+{
-+	time_t wtm_sec, sec = tv->tv_sec;
-+	long wtm_nsec, nsec = tv->tv_nsec;
-+
-+	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
-+		return -EINVAL;
-+
-+	write_seqlock_irq(&xtime_lock);
-+	/*
-+	 * This is revolting. We need to set "xtime" correctly. However, the
-+	 * value in this location is the value at the most recent update of
-+	 * wall time.  Discover what correction gettimeofday() would have
-+	 * made, and then undo it!
-+	 */
-+	nsec -= 1000 * usecs_since_tick();
-+
-+	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
-+	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
++/* ======================================================================= */
 +
-+	set_normalized_timespec(&xtime, sec, nsec);
-+	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++/*
++** Depending on <base> scan the MMU, Data or Instruction side
++** looking for a valid mapping matching Eaddr & asid.
++** Return -1 if not found or the TLB id entry otherwise.
++** Note: it works only for 4k pages!
++*/
++static unsigned long
++lookup_mmu_side(unsigned long base, unsigned long Eaddr, unsigned long asid)
++{
++	regType_t pteH;
++	unsigned long epn;
++	int count;
 +
-+	ntp_clear();
-+	write_sequnlock_irq(&xtime_lock);
-+	clock_was_set();
++	epn = Eaddr & 0xfffff000;
 +
-+	return 0;
++	for (count = 0; count < MAX_TLBs; count++, base += TLB_STEP) {
++		pteH = getConfigReg(base);
++		if (GET_VALID(pteH))
++			if ((unsigned long) GET_EPN(pteH) == epn)
++				if ((unsigned long) GET_ASID(pteH) == asid)
++					break;
++	}
++	return ((unsigned long) ((count < MAX_TLBs) ? base : -1));
 +}
-+EXPORT_SYMBOL(do_settimeofday);
 +
-+/* Dummy RTC ops */
-+static void null_rtc_get_time(struct timespec *tv)
++unsigned long lookup_dtlb(unsigned long Eaddr)
 +{
-+	tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
-+	tv->tv_nsec = 0;
++	unsigned long asid = get_asid();
++	return (lookup_mmu_side((u64) DTLB_BASE, Eaddr, asid));
 +}
 +
-+static int null_rtc_set_time(const time_t secs)
++unsigned long lookup_itlb(unsigned long Eaddr)
 +{
-+	return 0;
++	unsigned long asid = get_asid();
++	return (lookup_mmu_side((u64) ITLB_BASE, Eaddr, asid));
 +}
 +
-+void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
-+int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
-+
-+/* last time the RTC clock got updated */
-+static long last_rtc_update;
-+
-+/*
-+ * timer_interrupt() needs to keep up the real-time clock,
-+ * as well as call the "do_timer()" routine every clocktick
-+ */
-+static inline void do_timer_interrupt(void)
++void print_page(struct page *page)
 +{
-+	unsigned long long current_ctc;
-+	asm ("getcon cr62, %0" : "=r" (current_ctc));
-+	ctc_last_interrupt = (unsigned long) current_ctc;
-+
-+	do_timer(1);
-+#ifndef CONFIG_SMP
-+	update_process_times(user_mode(get_irq_regs()));
-+#endif
-+	if (current->pid)
-+		profile_tick(CPU_PROFILING);
-+
-+#ifdef CONFIG_HEARTBEAT
-+	if (sh_mv.mv_heartbeat != NULL)
-+		sh_mv.mv_heartbeat();
-+#endif
++	printk("  page[%p] -> index 0x%lx,  count 0x%x,  flags 0x%lx\n",
++	       page, page->index, page_count(page), page->flags);
++	printk("       address_space = %p, pages =%ld\n", page->mapping,
++	       page->mapping->nrpages);
 +
-+	/*
-+	 * If we have an externally synchronized Linux clock, then update
-+	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-+	 * called as close as possible to 500 ms before the new second starts.
-+	 */
-+	if (ntp_synced() &&
-+	    xtime.tv_sec > last_rtc_update + 660 &&
-+	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
-+	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
-+		if (rtc_sh_set_time(xtime.tv_sec) == 0)
-+			last_rtc_update = xtime.tv_sec;
-+		else
-+			/* do it again in 60 s */
-+			last_rtc_update = xtime.tv_sec - 600;
-+	}
 +}
-+
+diff --git a/arch/sh/lib64/memcpy.c b/arch/sh/lib64/memcpy.c
+new file mode 100644
+index 0000000..fba436a
+--- /dev/null
++++ b/arch/sh/lib64/memcpy.c
+@@ -0,0 +1,81 @@
 +/*
-+ * This is the same as the above, except we _also_ save the current
-+ * Time Stamp Counter value at the time of the timer interrupt, so that
-+ * we later on can estimate the time of day more exactly.
++ * Copyright (C) 2002 Mark Debbage (Mark.Debbage at superh.com)
++ *
++ * May be copied or modified under the terms of the GNU General Public
++ * License.  See linux/COPYING for more information.
++ *
 + */
-+static irqreturn_t timer_interrupt(int irq, void *dev_id)
-+{
-+	unsigned long timer_status;
-+
-+	/* Clear UNF bit */
-+	timer_status = ctrl_inw(TMU0_TCR);
-+	timer_status &= ~0x100;
-+	ctrl_outw(timer_status, TMU0_TCR);
-+
-+	/*
-+	 * Here we are in the timer irq handler. We just have irqs locally
-+	 * disabled but we don't know if the timer_bh is running on the other
-+	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
-+	 * the irq version of write_lock because as just said we have irq
-+	 * locally disabled. -arca
-+	 */
-+	write_lock(&xtime_lock);
-+	do_timer_interrupt();
-+	write_unlock(&xtime_lock);
 +
-+	return IRQ_HANDLED;
-+}
++#include <linux/types.h>
++#include <asm/string.h>
 +
++// This is a simplistic optimization of memcpy to increase the
++// granularity of access beyond one byte using aligned
++// loads and stores. This is not an optimal implementation
++// for SH-5 (especially with regard to prefetching and the cache),
++// and a better version should be provided later ...
 +
-+static __init unsigned int get_cpu_hz(void)
++void *memcpy(void *dest, const void *src, size_t count)
 +{
-+	unsigned int count;
-+	unsigned long __dummy;
-+	unsigned long ctc_val_init, ctc_val;
++	char *d = (char *) dest, *s = (char *) src;
 +
-+	/*
-+	** Regardless the toolchain, force the compiler to use the
-+	** arbitrary register r3 as a clock tick counter.
-+	** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
-+	*/
-+	register unsigned long long  __rtc_irq_flag __asm__ ("r3");
++	if (count >= 32) {
++		int i = 8 - (((unsigned long) d) & 0x7);
 +
-+	local_irq_enable();
-+	do {} while (ctrl_inb(rtc_base) != 0);
-+	ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */
++		if (i != 8)
++			while (i-- && count--) {
++				*d++ = *s++;
++			}
 +
-+	/*
-+	 * r3 is arbitrary. CDC does not support "=z".
-+	 */
-+	ctc_val_init = 0xffffffff;
-+	ctc_val = ctc_val_init;
++		if (((((unsigned long) d) & 0x7) == 0) &&
++		    ((((unsigned long) s) & 0x7) == 0)) {
++			while (count >= 32) {
++				unsigned long long t1, t2, t3, t4;
++				t1 = *(unsigned long long *) (s);
++				t2 = *(unsigned long long *) (s + 8);
++				t3 = *(unsigned long long *) (s + 16);
++				t4 = *(unsigned long long *) (s + 24);
++				*(unsigned long long *) (d) = t1;
++				*(unsigned long long *) (d + 8) = t2;
++				*(unsigned long long *) (d + 16) = t3;
++				*(unsigned long long *) (d + 24) = t4;
++				d += 32;
++				s += 32;
++				count -= 32;
++			}
++			while (count >= 8) {
++				*(unsigned long long *) d =
++				    *(unsigned long long *) s;
++				d += 8;
++				s += 8;
++				count -= 8;
++			}
++		}
 +
-+	asm volatile("gettr	tr0, %1\n\t"
-+		     "putcon	%0, " __CTC "\n\t"
-+		     "and	%2, r63, %2\n\t"
-+		     "pta	$+4, tr0\n\t"
-+		     "beq/l	%2, r63, tr0\n\t"
-+		     "ptabs	%1, tr0\n\t"
-+		     "getcon	" __CTC ", %0\n\t"
-+		: "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
-+		: "0" (0));
-+	local_irq_disable();
-+	/*
-+	 * SH-3:
-+	 * CPU clock = 4 stages * loop
-+	 * tst    rm,rm      if id ex
-+	 * bt/s   1b            if id ex
-+	 * add    #1,rd            if id ex
-+         *                            (if) pipe line stole
-+	 * tst    rm,rm                  if id ex
-+         * ....
-+	 *
-+	 *
-+	 * SH-4:
-+	 * CPU clock = 6 stages * loop
-+	 * I don't know why.
-+         * ....
-+	 *
-+	 * SH-5:
-+	 * Use CTC register to count.  This approach returns the right value
-+	 * even if the I-cache is disabled (e.g. whilst debugging.)
-+	 *
-+	 */
++		if (((((unsigned long) d) & 0x3) == 0) &&
++		    ((((unsigned long) s) & 0x3) == 0)) {
++			while (count >= 4) {
++				*(unsigned long *) d = *(unsigned long *) s;
++				d += 4;
++				s += 4;
++				count -= 4;
++			}
++		}
 +
-+	count = ctc_val_init - ctc_val; /* CTC counts down */
++		if (((((unsigned long) d) & 0x1) == 0) &&
++		    ((((unsigned long) s) & 0x1) == 0)) {
++			while (count >= 2) {
++				*(unsigned short *) d = *(unsigned short *) s;
++				d += 2;
++				s += 2;
++				count -= 2;
++			}
++		}
++	}
 +
-+	/*
-+	 * This really is count by the number of clock cycles
-+         * by the ratio between a complete R64CNT
-+         * wrap-around (128) and CUI interrupt being raised (64).
-+	 */
-+	return count*2;
-+}
++	while (count--) {
++		*d++ = *s++;
++	}
 +
-+static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
-+{
-+	struct pt_regs *regs = get_irq_regs();
++	return d;
++}
+diff --git a/arch/sh/lib64/panic.c b/arch/sh/lib64/panic.c
+new file mode 100644
+index 0000000..ff559e2
+--- /dev/null
++++ b/arch/sh/lib64/panic.c
+@@ -0,0 +1,58 @@
++/*
++ * Copyright (C) 2003  Richard Curnow, SuperH UK Limited
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
 +
-+	ctrl_outb(0, RTC_RCR1);	/* Disable Carry Interrupts */
-+	regs->regs[3] = 1;	/* Using r3 */
++#include <linux/kernel.h>
++#include <asm/io.h>
++#include <asm/cpu/registers.h>
 +
-+	return IRQ_HANDLED;
-+}
++/* THIS IS A PHYSICAL ADDRESS */
++#define HDSP2534_ADDR (0x04002100)
 +
-+static struct irqaction irq0  = {
-+	.handler = timer_interrupt,
-+	.flags = IRQF_DISABLED,
-+	.mask = CPU_MASK_NONE,
-+	.name = "timer",
-+};
-+static struct irqaction irq1  = {
-+	.handler = sh64_rtc_interrupt,
-+	.flags = IRQF_DISABLED,
-+	.mask = CPU_MASK_NONE,
-+	.name = "rtc",
-+};
++#ifdef CONFIG_SH_CAYMAN
 +
-+void __init time_init(void)
++static void poor_mans_delay(void)
 +{
-+	unsigned int cpu_clock, master_clock, bus_clock, module_clock;
-+	unsigned long interval;
-+	unsigned long frqcr, ifc, pfc;
-+	static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
-+#define bfc_table ifc_table	/* Same */
-+#define pfc_table ifc_table	/* Same */
-+
-+	tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
-+	if (!tmu_base) {
-+		panic("Unable to remap TMU\n");
-+	}
++	int i;
++	for (i = 0; i < 2500000; i++) {
++	}		/* poor man's delay */
++}
 +
-+	rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
-+	if (!rtc_base) {
-+		panic("Unable to remap RTC\n");
-+	}
++static void show_value(unsigned long x)
++{
++	int i;
++	unsigned nibble;
++	for (i = 0; i < 8; i++) {
++		nibble = ((x >> (i * 4)) & 0xf);
 +
-+	cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
-+	if (!cprc_base) {
-+		panic("Unable to remap CPRC\n");
++		ctrl_outb(nibble + ((nibble > 9) ? 55 : 48),
++			  HDSP2534_ADDR + 0xe0 + ((7 - i) << 2));
 +	}
++}
 +
-+	rtc_sh_get_time(&xtime);
-+
-+	setup_irq(TIMER_IRQ, &irq0);
-+	setup_irq(RTC_IRQ, &irq1);
-+
-+	/* Check how fast it is.. */
-+	cpu_clock = get_cpu_hz();
-+
-+	/* Note careful order of operations to maintain reasonable precision and avoid overflow. */
-+	scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
-+
-+	free_irq(RTC_IRQ, NULL);
++#endif
 +
-+	printk("CPU clock: %d.%02dMHz\n",
-+	       (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
-+	{
-+		unsigned short bfc;
-+		frqcr = ctrl_inl(FRQCR);
-+		ifc  = ifc_table[(frqcr>> 6) & 0x0007];
-+		bfc  = bfc_table[(frqcr>> 3) & 0x0007];
-+		pfc  = pfc_table[(frqcr>> 12) & 0x0007];
-+		master_clock = cpu_clock * ifc;
-+		bus_clock = master_clock/bfc;
++void
++panic_handler(unsigned long panicPC, unsigned long panicSSR,
++	      unsigned long panicEXPEVT)
++{
++#ifdef CONFIG_SH_CAYMAN
++	while (1) {
++		/* This piece of code displays the PC on the LED display */
++		show_value(panicPC);
++		poor_mans_delay();
++		show_value(panicSSR);
++		poor_mans_delay();
++		show_value(panicEXPEVT);
++		poor_mans_delay();
 +	}
++#endif
 +
-+	printk("Bus clock: %d.%02dMHz\n",
-+	       (bus_clock/1000000), (bus_clock % 1000000)/10000);
-+	module_clock = master_clock/pfc;
-+	printk("Module clock: %d.%02dMHz\n",
-+	       (module_clock/1000000), (module_clock % 1000000)/10000);
-+	interval = (module_clock/(HZ*4));
++	/* Never return from the panic handler */
++	for (;;) ;
 +
-+	printk("Interval = %ld\n", interval);
++}
+diff --git a/arch/sh/lib64/udelay.c b/arch/sh/lib64/udelay.c
+new file mode 100644
+index 0000000..23c7d17
+--- /dev/null
++++ b/arch/sh/lib64/udelay.c
+@@ -0,0 +1,56 @@
++/*
++ * arch/sh/lib64/udelay.c
++ *
++ * Delay routines, using a pre-computed "loops_per_jiffy" value.
++ *
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003, 2004  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/sched.h>
++#include <asm/param.h>
 +
-+	current_cpu_data.cpu_clock    = cpu_clock;
-+	current_cpu_data.master_clock = master_clock;
-+	current_cpu_data.bus_clock    = bus_clock;
-+	current_cpu_data.module_clock = module_clock;
++/*
++ * Use only for very small delays (< 1 msec).
++ *
++ * The active part of our cycle counter is only 32-bits wide, and
++ * we're treating the difference between two marks as signed.  On
++ * a 1GHz box, that's about 2 seconds.
++ */
 +
-+	/* Start TMU0 */
-+	ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
-+	ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
-+	ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
-+	ctrl_outl(interval, TMU0_TCOR);
-+	ctrl_outl(interval, TMU0_TCNT);
-+	ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
++void __delay(int loops)
++{
++	long long dummy;
++	__asm__ __volatile__("gettr	tr0, %1\n\t"
++			     "pta	$+4, tr0\n\t"
++			     "addi	%0, -1, %0\n\t"
++			     "bne	%0, r63, tr0\n\t"
++			     "ptabs	%1, tr0\n\t":"=r"(loops),
++			     "=r"(dummy)
++			     :"0"(loops));
 +}
 +
-+void enter_deep_standby(void)
++void __udelay(unsigned long long usecs, unsigned long lpj)
 +{
-+	/* Disable watchdog timer */
-+	ctrl_outl(0xa5000000, WTCSR);
-+	/* Configure deep standby on sleep */
-+	ctrl_outl(0x03, STBCR);
-+
-+#ifdef CONFIG_SH_ALPHANUMERIC
-+	{
-+		extern void mach_alphanum(int position, unsigned char value);
-+		extern void mach_alphanum_brightness(int setting);
-+		char halted[] = "Halted. ";
-+		int i;
-+		mach_alphanum_brightness(6); /* dimmest setting above off */
-+		for (i=0; i<8; i++) {
-+			mach_alphanum(i, halted[i]);
-+		}
-+		asm __volatile__ ("synco");
-+	}
-+#endif
-+
-+	asm __volatile__ ("sleep");
-+	asm __volatile__ ("synci");
-+	asm __volatile__ ("nop");
-+	asm __volatile__ ("nop");
-+	asm __volatile__ ("nop");
-+	asm __volatile__ ("nop");
-+	panic("Unexpected wakeup!\n");
++	usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj;
++	__delay((long long) usecs >> 32);
 +}
 +
-+static struct resource rtc_resources[] = {
-+	[0] = {
-+		/* RTC base, filled in by rtc_init */
-+		.flags	= IORESOURCE_IO,
-+	},
-+	[1] = {
-+		/* Period IRQ */
-+		.start	= IRQ_PRI,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+	[2] = {
-+		/* Carry IRQ */
-+		.start	= IRQ_CUI,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+	[3] = {
-+		/* Alarm IRQ */
-+		.start	= IRQ_ATI,
-+		.flags	= IORESOURCE_IRQ,
-+	},
-+};
-+
-+static struct platform_device rtc_device = {
-+	.name		= "sh-rtc",
-+	.id		= -1,
-+	.num_resources	= ARRAY_SIZE(rtc_resources),
-+	.resource	= rtc_resources,
-+};
++void __ndelay(unsigned long long nsecs, unsigned long lpj)
++{
++	nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj;
++	__delay((long long) nsecs >> 32);
++}
 +
-+static int __init rtc_init(void)
++void udelay(unsigned long usecs)
 +{
-+	rtc_resources[0].start	= rtc_base;
-+	rtc_resources[0].end	= rtc_resources[0].start + 0x58 - 1;
++	__udelay(usecs, cpu_data[raw_smp_processor_id()].loops_per_jiffy);
++}
 +
-+	return platform_device_register(&rtc_device);
++void ndelay(unsigned long nsecs)
++{
++	__ndelay(nsecs, cpu_data[raw_smp_processor_id()].loops_per_jiffy);
 +}
-+device_initcall(rtc_init);
-diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
-index 82de689..499e07b 100644
---- a/arch/sh/kernel/timers/timer-cmt.c
-+++ b/arch/sh/kernel/timers/timer-cmt.c
-@@ -31,7 +31,9 @@
- #define cmt_clock_enable() do {	ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
- #define CMT_CMCSR_INIT	0x0040
- #define CMT_CMCSR_CALIB	0x0000
--#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
-+#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \
-+      defined(CONFIG_CPU_SUBTYPE_SH7206) || \
-+      defined(CONFIG_CPU_SUBTYPE_SH7263)
- #define CMT_CMSTR	0xfffec000
- #define CMT_CMCSR_0	0xfffec002
- #define CMT_CMCNT_0	0xfffec004
-diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
-index 628ec9a..8935570 100644
---- a/arch/sh/kernel/timers/timer-tmu.c
-+++ b/arch/sh/kernel/timers/timer-tmu.c
-@@ -174,6 +174,7 @@ static int tmu_timer_init(void)
- 	tmu_timer_stop();
- 
- #if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
-+    !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
-     !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
-     !defined(CONFIG_CPU_SUBTYPE_SH7785) && \
-     !defined(CONFIG_CPU_SUBTYPE_SHX3)
-diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
-index cf99111..a3bdc68 100644
---- a/arch/sh/kernel/traps.c
-+++ b/arch/sh/kernel/traps.c
-@@ -1,947 +1,68 @@
--/*
-- * 'traps.c' handles hardware traps and faults after we have saved some
-- * state in 'entry.S'.
-- *
-- *  SuperH version: Copyright (C) 1999 Niibe Yutaka
-- *                  Copyright (C) 2000 Philipp Rumpf
-- *                  Copyright (C) 2000 David Howells
-- *                  Copyright (C) 2002 - 2007 Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/kernel.h>
--#include <linux/ptrace.h>
--#include <linux/init.h>
--#include <linux/spinlock.h>
--#include <linux/module.h>
--#include <linux/kallsyms.h>
--#include <linux/io.h>
- #include <linux/bug.h>
--#include <linux/debug_locks.h>
-+#include <linux/io.h>
-+#include <linux/types.h>
- #include <linux/kdebug.h>
--#include <linux/kexec.h>
--#include <linux/limits.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
- #include <asm/system.h>
--#include <asm/uaccess.h>
--
--#ifdef CONFIG_SH_KGDB
--#include <asm/kgdb.h>
--#define CHK_REMOTE_DEBUG(regs)			\
--{						\
--	if (kgdb_debug_hook && !user_mode(regs))\
--		(*kgdb_debug_hook)(regs);       \
--}
--#else
--#define CHK_REMOTE_DEBUG(regs)
--#endif
--
--#ifdef CONFIG_CPU_SH2
--# define TRAP_RESERVED_INST	4
--# define TRAP_ILLEGAL_SLOT_INST	6
--# define TRAP_ADDRESS_ERROR	9
--# ifdef CONFIG_CPU_SH2A
--#  define TRAP_DIVZERO_ERROR	17
--#  define TRAP_DIVOVF_ERROR	18
--# endif
--#else
--#define TRAP_RESERVED_INST	12
--#define TRAP_ILLEGAL_SLOT_INST	13
--#endif
--
--static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
--{
--	unsigned long p;
--	int i;
--
--	printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
--
--	for (p = bottom & ~31; p < top; ) {
--		printk("%04lx: ", p & 0xffff);
--
--		for (i = 0; i < 8; i++, p += 4) {
--			unsigned int val;
--
--			if (p < bottom || p >= top)
--				printk("         ");
--			else {
--				if (__get_user(val, (unsigned int __user *)p)) {
--					printk("\n");
--					return;
--				}
--				printk("%08x ", val);
--			}
--		}
--		printk("\n");
--	}
--}
--
--static DEFINE_SPINLOCK(die_lock);
--
--void die(const char * str, struct pt_regs * regs, long err)
--{
--	static int die_counter;
--
--	oops_enter();
--
--	console_verbose();
--	spin_lock_irq(&die_lock);
--	bust_spinlocks(1);
--
--	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
--
--	CHK_REMOTE_DEBUG(regs);
--	print_modules();
--	show_regs(regs);
--
--	printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
--			task_pid_nr(current), task_stack_page(current) + 1);
--
--	if (!user_mode(regs) || in_interrupt())
--		dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
--			 (unsigned long)task_stack_page(current));
--
--	bust_spinlocks(0);
--	add_taint(TAINT_DIE);
--	spin_unlock_irq(&die_lock);
--
--	if (kexec_should_crash(current))
--		crash_kexec(regs);
--
--	if (in_interrupt())
--		panic("Fatal exception in interrupt");
--
--	if (panic_on_oops)
--		panic("Fatal exception");
--
--	oops_exit();
--	do_exit(SIGSEGV);
--}
--
--static inline void die_if_kernel(const char *str, struct pt_regs *regs,
--				 long err)
--{
--	if (!user_mode(regs))
--		die(str, regs, err);
--}
--
--/*
-- * try and fix up kernelspace address errors
-- * - userspace errors just cause EFAULT to be returned, resulting in SEGV
-- * - kernel/userspace interfaces cause a jump to an appropriate handler
-- * - other kernel errors are bad
-- * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
-- */
--static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
--{
--	if (!user_mode(regs)) {
--		const struct exception_table_entry *fixup;
--		fixup = search_exception_tables(regs->pc);
--		if (fixup) {
--			regs->pc = fixup->fixup;
--			return 0;
--		}
--		die(str, regs, err);
--	}
--	return -EFAULT;
--}
--
--/*
-- * handle an instruction that does an unaligned memory access by emulating the
-- * desired behaviour
-- * - note that PC _may not_ point to the faulting instruction
-- *   (if that instruction is in a branch delay slot)
-- * - return 0 if emulation okay, -EFAULT on existential error
-- */
--static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
--{
--	int ret, index, count;
--	unsigned long *rm, *rn;
--	unsigned char *src, *dst;
--
--	index = (instruction>>8)&15;	/* 0x0F00 */
--	rn = &regs->regs[index];
--
--	index = (instruction>>4)&15;	/* 0x00F0 */
--	rm = &regs->regs[index];
--
--	count = 1<<(instruction&3);
--
--	ret = -EFAULT;
--	switch (instruction>>12) {
--	case 0: /* mov.[bwl] to/from memory via r0+rn */
--		if (instruction & 8) {
--			/* from memory */
--			src = (unsigned char*) *rm;
--			src += regs->regs[0];
--			dst = (unsigned char*) rn;
--			*(unsigned long*)dst = 0;
--
--#ifdef __LITTLE_ENDIAN__
--			if (copy_from_user(dst, src, count))
--				goto fetch_fault;
--
--			if ((count == 2) && dst[1] & 0x80) {
--				dst[2] = 0xff;
--				dst[3] = 0xff;
--			}
--#else
--			dst += 4-count;
--
--			if (__copy_user(dst, src, count))
--				goto fetch_fault;
--
--			if ((count == 2) && dst[2] & 0x80) {
--				dst[0] = 0xff;
--				dst[1] = 0xff;
--			}
--#endif
--		} else {
--			/* to memory */
--			src = (unsigned char*) rm;
--#if !defined(__LITTLE_ENDIAN__)
--			src += 4-count;
--#endif
--			dst = (unsigned char*) *rn;
--			dst += regs->regs[0];
--
--			if (copy_to_user(dst, src, count))
--				goto fetch_fault;
--		}
--		ret = 0;
--		break;
--
--	case 1: /* mov.l Rm,@(disp,Rn) */
--		src = (unsigned char*) rm;
--		dst = (unsigned char*) *rn;
--		dst += (instruction&0x000F)<<2;
--
--		if (copy_to_user(dst,src,4))
--			goto fetch_fault;
--		ret = 0;
--		break;
--
--	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
--		if (instruction & 4)
--			*rn -= count;
--		src = (unsigned char*) rm;
--		dst = (unsigned char*) *rn;
--#if !defined(__LITTLE_ENDIAN__)
--		src += 4-count;
--#endif
--		if (copy_to_user(dst, src, count))
--			goto fetch_fault;
--		ret = 0;
--		break;
--
--	case 5: /* mov.l @(disp,Rm),Rn */
--		src = (unsigned char*) *rm;
--		src += (instruction&0x000F)<<2;
--		dst = (unsigned char*) rn;
--		*(unsigned long*)dst = 0;
--
--		if (copy_from_user(dst,src,4))
--			goto fetch_fault;
--		ret = 0;
--		break;
- 
--	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
--		src = (unsigned char*) *rm;
--		if (instruction & 4)
--			*rm += count;
--		dst = (unsigned char*) rn;
--		*(unsigned long*)dst = 0;
--
--#ifdef __LITTLE_ENDIAN__
--		if (copy_from_user(dst, src, count))
--			goto fetch_fault;
--
--		if ((count == 2) && dst[1] & 0x80) {
--			dst[2] = 0xff;
--			dst[3] = 0xff;
--		}
--#else
--		dst += 4-count;
--
--		if (copy_from_user(dst, src, count))
--			goto fetch_fault;
--
--		if ((count == 2) && dst[2] & 0x80) {
--			dst[0] = 0xff;
--			dst[1] = 0xff;
--		}
--#endif
--		ret = 0;
--		break;
--
--	case 8:
--		switch ((instruction&0xFF00)>>8) {
--		case 0x81: /* mov.w R0,@(disp,Rn) */
--			src = (unsigned char*) &regs->regs[0];
--#if !defined(__LITTLE_ENDIAN__)
--			src += 2;
--#endif
--			dst = (unsigned char*) *rm; /* called Rn in the spec */
--			dst += (instruction&0x000F)<<1;
--
--			if (copy_to_user(dst, src, 2))
--				goto fetch_fault;
--			ret = 0;
--			break;
--
--		case 0x85: /* mov.w @(disp,Rm),R0 */
--			src = (unsigned char*) *rm;
--			src += (instruction&0x000F)<<1;
--			dst = (unsigned char*) &regs->regs[0];
--			*(unsigned long*)dst = 0;
--
--#if !defined(__LITTLE_ENDIAN__)
--			dst += 2;
--#endif
--
--			if (copy_from_user(dst, src, 2))
--				goto fetch_fault;
--
--#ifdef __LITTLE_ENDIAN__
--			if (dst[1] & 0x80) {
--				dst[2] = 0xff;
--				dst[3] = 0xff;
--			}
--#else
--			if (dst[2] & 0x80) {
--				dst[0] = 0xff;
--				dst[1] = 0xff;
--			}
--#endif
--			ret = 0;
--			break;
--		}
--		break;
--	}
--	return ret;
--
-- fetch_fault:
--	/* Argh. Address not only misaligned but also non-existent.
--	 * Raise an EFAULT and see if it's trapped
--	 */
--	return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
--}
--
--/*
-- * emulate the instruction in the delay slot
-- * - fetches the instruction from PC+2
-- */
--static inline int handle_unaligned_delayslot(struct pt_regs *regs)
-+#ifdef CONFIG_BUG
-+static void handle_BUG(struct pt_regs *regs)
- {
--	u16 instruction;
--
--	if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
--		/* the instruction-fetch faulted */
--		if (user_mode(regs))
--			return -EFAULT;
--
--		/* kernel */
--		die("delay-slot-insn faulting in handle_unaligned_delayslot",
--		    regs, 0);
-+	enum bug_trap_type tt;
-+	tt = report_bug(regs->pc, regs);
-+	if (tt == BUG_TRAP_TYPE_WARN) {
-+		regs->pc += instruction_size(regs->pc);
-+		return;
- 	}
- 
--	return handle_unaligned_ins(instruction,regs);
-+	die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
- }
- 
--/*
-- * handle an instruction that does an unaligned memory access
-- * - have to be careful of branch delay-slot instructions that fault
-- *  SH3:
-- *   - if the branch would be taken PC points to the branch
-- *   - if the branch would not be taken, PC points to delay-slot
-- *  SH4:
-- *   - PC always points to delayed branch
-- * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
-- */
--
--/* Macros to determine offset from current PC for branch instructions */
--/* Explicit type coercion is used to force sign extension where needed */
--#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
--#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
--
--/*
-- * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
-- * opcodes..
-- */
--#ifndef CONFIG_CPU_SH2A
--static int handle_unaligned_notify_count = 10;
--
--static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
-+int is_valid_bugaddr(unsigned long addr)
- {
--	u_int rm;
--	int ret, index;
--
--	index = (instruction>>8)&15;	/* 0x0F00 */
--	rm = regs->regs[index];
--
--	/* shout about the first ten userspace fixups */
--	if (user_mode(regs) && handle_unaligned_notify_count>0) {
--		handle_unaligned_notify_count--;
--
--		printk(KERN_NOTICE "Fixing up unaligned userspace access "
--		       "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
--		       current->comm, task_pid_nr(current),
--		       (u16 *)regs->pc, instruction);
--	}
--
--	ret = -EFAULT;
--	switch (instruction&0xF000) {
--	case 0x0000:
--		if (instruction==0x000B) {
--			/* rts */
--			ret = handle_unaligned_delayslot(regs);
--			if (ret==0)
--				regs->pc = regs->pr;
--		}
--		else if ((instruction&0x00FF)==0x0023) {
--			/* braf @Rm */
--			ret = handle_unaligned_delayslot(regs);
--			if (ret==0)
--				regs->pc += rm + 4;
--		}
--		else if ((instruction&0x00FF)==0x0003) {
--			/* bsrf @Rm */
--			ret = handle_unaligned_delayslot(regs);
--			if (ret==0) {
--				regs->pr = regs->pc + 4;
--				regs->pc += rm + 4;
--			}
--		}
--		else {
--			/* mov.[bwl] to/from memory via r0+rn */
--			goto simple;
--		}
--		break;
--
--	case 0x1000: /* mov.l Rm,@(disp,Rn) */
--		goto simple;
--
--	case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
--		goto simple;
--
--	case 0x4000:
--		if ((instruction&0x00FF)==0x002B) {
--			/* jmp @Rm */
--			ret = handle_unaligned_delayslot(regs);
--			if (ret==0)
--				regs->pc = rm;
--		}
--		else if ((instruction&0x00FF)==0x000B) {
--			/* jsr @Rm */
--			ret = handle_unaligned_delayslot(regs);
--			if (ret==0) {
--				regs->pr = regs->pc + 4;
--				regs->pc = rm;
--			}
--		}
--		else {
--			/* mov.[bwl] to/from memory via r0+rn */
--			goto simple;
--		}
--		break;
--
--	case 0x5000: /* mov.l @(disp,Rm),Rn */
--		goto simple;
--
--	case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
--		goto simple;
--
--	case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
--		switch (instruction&0x0F00) {
--		case 0x0100: /* mov.w R0,@(disp,Rm) */
--			goto simple;
--		case 0x0500: /* mov.w @(disp,Rm),R0 */
--			goto simple;
--		case 0x0B00: /* bf   lab - no delayslot*/
--			break;
--		case 0x0F00: /* bf/s lab */
--			ret = handle_unaligned_delayslot(regs);
--			if (ret==0) {
--#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
--				if ((regs->sr & 0x00000001) != 0)
--					regs->pc += 4; /* next after slot */
--				else
--#endif
--					regs->pc += SH_PC_8BIT_OFFSET(instruction);
--			}
--			break;
--		case 0x0900: /* bt   lab - no delayslot */
--			break;
--		case 0x0D00: /* bt/s lab */
--			ret = handle_unaligned_delayslot(regs);
--			if (ret==0) {
--#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
--				if ((regs->sr & 0x00000001) == 0)
--					regs->pc += 4; /* next after slot */
--				else
--#endif
--					regs->pc += SH_PC_8BIT_OFFSET(instruction);
--			}
--			break;
--		}
--		break;
--
--	case 0xA000: /* bra label */
--		ret = handle_unaligned_delayslot(regs);
--		if (ret==0)
--			regs->pc += SH_PC_12BIT_OFFSET(instruction);
--		break;
--
--	case 0xB000: /* bsr label */
--		ret = handle_unaligned_delayslot(regs);
--		if (ret==0) {
--			regs->pr = regs->pc + 4;
--			regs->pc += SH_PC_12BIT_OFFSET(instruction);
--		}
--		break;
--	}
--	return ret;
--
--	/* handle non-delay-slot instruction */
-- simple:
--	ret = handle_unaligned_ins(instruction,regs);
--	if (ret==0)
--		regs->pc += instruction_size(instruction);
--	return ret;
-+	return addr >= PAGE_OFFSET;
- }
--#endif /* CONFIG_CPU_SH2A */
--
--#ifdef CONFIG_CPU_HAS_SR_RB
--#define lookup_exception_vector(x)	\
--	__asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
--#else
--#define lookup_exception_vector(x)	\
--	__asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
- #endif
- 
- /*
-- * Handle various address error exceptions:
-- *  - instruction address error:
-- *       misaligned PC
-- *       PC >= 0x80000000 in user mode
-- *  - data address error (read and write)
-- *       misaligned data access
-- *       access to >= 0x80000000 is user mode
-- * Unfortuntaly we can't distinguish between instruction address error
-- * and data address errors caused by read accesses.
-+ * Generic trap handler.
-  */
--asmlinkage void do_address_error(struct pt_regs *regs,
--				 unsigned long writeaccess,
--				 unsigned long address)
-+BUILD_TRAP_HANDLER(debug)
- {
--	unsigned long error_code = 0;
--	mm_segment_t oldfs;
--	siginfo_t info;
--#ifndef CONFIG_CPU_SH2A
--	u16 instruction;
--	int tmp;
--#endif
--
--	/* Intentional ifdef */
--#ifdef CONFIG_CPU_HAS_SR_RB
--	lookup_exception_vector(error_code);
--#endif
--
--	oldfs = get_fs();
--
--	if (user_mode(regs)) {
--		int si_code = BUS_ADRERR;
--
--		local_irq_enable();
-+	TRAP_HANDLER_DECL;
- 
--		/* bad PC is not something we can fix */
--		if (regs->pc & 1) {
--			si_code = BUS_ADRALN;
--			goto uspace_segv;
--		}
-+	/* Rewind */
-+	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
- 
--#ifndef CONFIG_CPU_SH2A
--		set_fs(USER_DS);
--		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
--			/* Argh. Fault on the instruction itself.
--			   This should never happen non-SMP
--			*/
--			set_fs(oldfs);
--			goto uspace_segv;
--		}
--
--		tmp = handle_unaligned_access(instruction, regs);
--		set_fs(oldfs);
--
--		if (tmp==0)
--			return; /* sorted */
--#endif
--
--uspace_segv:
--		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
--		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
--		       regs->pr);
--
--		info.si_signo = SIGBUS;
--		info.si_errno = 0;
--		info.si_code = si_code;
--		info.si_addr = (void __user *)address;
--		force_sig_info(SIGBUS, &info, current);
--	} else {
--		if (regs->pc & 1)
--			die("unaligned program counter", regs, error_code);
--
--#ifndef CONFIG_CPU_SH2A
--		set_fs(KERNEL_DS);
--		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
--			/* Argh. Fault on the instruction itself.
--			   This should never happen non-SMP
--			*/
--			set_fs(oldfs);
--			die("insn faulting in do_address_error", regs, 0);
--		}
--
--		handle_unaligned_access(instruction, regs);
--		set_fs(oldfs);
--#else
--		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
--		       "access\n", current->comm);
-+	if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
-+		       SIGTRAP) == NOTIFY_STOP)
-+		return;
- 
--		force_sig(SIGSEGV, current);
--#endif
--	}
-+	force_sig(SIGTRAP, current);
- }
- 
--#ifdef CONFIG_SH_DSP
- /*
-- *	SH-DSP support gerg at snapgear.com.
-+ * Special handler for BUG() traps.
-  */
--int is_dsp_inst(struct pt_regs *regs)
-+BUILD_TRAP_HANDLER(bug)
- {
--	unsigned short inst = 0;
--
--	/*
--	 * Safe guard if DSP mode is already enabled or we're lacking
--	 * the DSP altogether.
--	 */
--	if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
--		return 0;
--
--	get_user(inst, ((unsigned short *) regs->pc));
--
--	inst &= 0xf000;
--
--	/* Check for any type of DSP or support instruction */
--	if ((inst == 0xf000) || (inst == 0x4000))
--		return 1;
--
--	return 0;
--}
--#else
--#define is_dsp_inst(regs)	(0)
--#endif /* CONFIG_SH_DSP */
-+	TRAP_HANDLER_DECL;
- 
--#ifdef CONFIG_CPU_SH2A
--asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
--				unsigned long r6, unsigned long r7,
--				struct pt_regs __regs)
--{
--	siginfo_t info;
+diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
+index 1265f20..f549b8c 100644
+--- a/arch/sh/mm/Kconfig
++++ b/arch/sh/mm/Kconfig
+@@ -1,193 +1,3 @@
+-#
+-# Processor families
+-#
+-config CPU_SH2
+-	bool
 -
--	switch (r4) {
--	case TRAP_DIVZERO_ERROR:
--		info.si_code = FPE_INTDIV;
--		break;
--	case TRAP_DIVOVF_ERROR:
--		info.si_code = FPE_INTOVF;
--		break;
--	}
+-config CPU_SH2A
+-	bool
+-	select CPU_SH2
 -
--	force_sig_info(SIGFPE, &info, current);
--}
--#endif
+-config CPU_SH3
+-	bool
+-	select CPU_HAS_INTEVT
+-	select CPU_HAS_SR_RB
 -
--/* arch/sh/kernel/cpu/sh4/fpu.c */
--extern int do_fpu_inst(unsigned short, struct pt_regs *);
--extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
--		unsigned long r6, unsigned long r7, struct pt_regs __regs);
+-config CPU_SH4
+-	bool
+-	select CPU_HAS_INTEVT
+-	select CPU_HAS_SR_RB
+-	select CPU_HAS_PTEA if !CPU_SH4A || CPU_SHX2
+-	select CPU_HAS_FPU if !CPU_SH4AL_DSP
 -
--asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
--				unsigned long r6, unsigned long r7,
--				struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	unsigned long error_code;
--	struct task_struct *tsk = current;
+-config CPU_SH4A
+-	bool
+-	select CPU_SH4
 -
--#ifdef CONFIG_SH_FPU_EMU
--	unsigned short inst = 0;
--	int err;
+-config CPU_SH4AL_DSP
+-	bool
+-	select CPU_SH4A
+-	select CPU_HAS_DSP
 -
--	get_user(inst, (unsigned short*)regs->pc);
+-config CPU_SHX2
+-	bool
 -
--	err = do_fpu_inst(inst, regs);
--	if (!err) {
--		regs->pc += instruction_size(inst);
--		return;
--	}
--	/* not a FPU inst. */
--#endif
-+	/* Rewind */
-+	regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
- 
--#ifdef CONFIG_SH_DSP
--	/* Check if it's a DSP instruction */
--	if (is_dsp_inst(regs)) {
--		/* Enable DSP mode, and restart instruction. */
--		regs->sr |= SR_DSP;
-+	if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
-+		       SIGTRAP) == NOTIFY_STOP)
- 		return;
--	}
--#endif
+-config CPU_SHX3
+-	bool
 -
--	lookup_exception_vector(error_code);
+-choice
+-	prompt "Processor sub-type selection"
 -
--	local_irq_enable();
--	CHK_REMOTE_DEBUG(regs);
--	force_sig(SIGILL, tsk);
--	die_if_no_fixup("reserved instruction", regs, error_code);
--}
+-#
+-# Processor subtypes
+-#
 -
--#ifdef CONFIG_SH_FPU_EMU
--static int emulate_branch(unsigned short inst, struct pt_regs* regs)
--{
--	/*
--	 * bfs: 8fxx: PC+=d*2+4;
--	 * bts: 8dxx: PC+=d*2+4;
--	 * bra: axxx: PC+=D*2+4;
--	 * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
--	 * braf:0x23: PC+=Rn*2+4;
--	 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
--	 * jmp: 4x2b: PC=Rn;
--	 * jsr: 4x0b: PC=Rn      after PR=PC+4;
--	 * rts: 000b: PC=PR;
--	 */
--	if ((inst & 0xfd00) == 0x8d00) {
--		regs->pc += SH_PC_8BIT_OFFSET(inst);
--		return 0;
--	}
+-# SH-2 Processor Support
 -
--	if ((inst & 0xe000) == 0xa000) {
--		regs->pc += SH_PC_12BIT_OFFSET(inst);
--		return 0;
--	}
+-config CPU_SUBTYPE_SH7619
+-	bool "Support SH7619 processor"
+-	select CPU_SH2
 -
--	if ((inst & 0xf0df) == 0x0003) {
--		regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
--		return 0;
--	}
+-# SH-2A Processor Support
 -
--	if ((inst & 0xf0df) == 0x400b) {
--		regs->pc = regs->regs[(inst & 0x0f00) >> 8];
--		return 0;
--	}
+-config CPU_SUBTYPE_SH7206
+-	bool "Support SH7206 processor"
+-	select CPU_SH2A
 -
--	if ((inst & 0xffff) == 0x000b) {
--		regs->pc = regs->pr;
--		return 0;
--	}
+-# SH-3 Processor Support
 -
--	return 1;
--}
--#endif
+-config CPU_SUBTYPE_SH7705
+-	bool "Support SH7705 processor"
+-	select CPU_SH3
 -
--asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
--				unsigned long r6, unsigned long r7,
--				struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	unsigned long error_code;
--	struct task_struct *tsk = current;
--#ifdef CONFIG_SH_FPU_EMU
--	unsigned short inst = 0;
+-config CPU_SUBTYPE_SH7706
+-	bool "Support SH7706 processor"
+-	select CPU_SH3
+-	help
+-	  Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU.
 -
--	get_user(inst, (unsigned short *)regs->pc + 1);
--	if (!do_fpu_inst(inst, regs)) {
--		get_user(inst, (unsigned short *)regs->pc);
--		if (!emulate_branch(inst, regs))
--			return;
--		/* fault in branch.*/
--	}
--	/* not a FPU inst. */
--#endif
+-config CPU_SUBTYPE_SH7707
+-	bool "Support SH7707 processor"
+-	select CPU_SH3
+-	help
+-	  Select SH7707 if you have a  60 Mhz SH-3 HD6417707 CPU.
 -
--	lookup_exception_vector(error_code);
+-config CPU_SUBTYPE_SH7708
+-	bool "Support SH7708 processor"
+-	select CPU_SH3
+-	help
+-	  Select SH7708 if you have a  60 Mhz SH-3 HD6417708S or
+-	  if you have a 100 Mhz SH-3 HD6417708R CPU.
 -
--	local_irq_enable();
--	CHK_REMOTE_DEBUG(regs);
--	force_sig(SIGILL, tsk);
--	die_if_no_fixup("illegal slot instruction", regs, error_code);
--}
+-config CPU_SUBTYPE_SH7709
+-	bool "Support SH7709 processor"
+-	select CPU_SH3
+-	help
+-	  Select SH7709 if you have a  80 Mhz SH-3 HD6417709 CPU.
 -
--asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
--				   unsigned long r6, unsigned long r7,
--				   struct pt_regs __regs)
--{
--	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
--	long ex;
+-config CPU_SUBTYPE_SH7710
+-	bool "Support SH7710 processor"
+-	select CPU_SH3
+-	select CPU_HAS_DSP
+-	help
+-	  Select SH7710 if you have a SH3-DSP SH7710 CPU.
 -
--	lookup_exception_vector(ex);
--	die_if_kernel("exception", regs, ex);
--}
+-config CPU_SUBTYPE_SH7712
+-	bool "Support SH7712 processor"
+-	select CPU_SH3
+-	select CPU_HAS_DSP
+-	help
+-	  Select SH7712 if you have a SH3-DSP SH7712 CPU.
 -
--#if defined(CONFIG_SH_STANDARD_BIOS)
--void *gdb_vbr_vector;
+-config CPU_SUBTYPE_SH7720
+-	bool "Support SH7720 processor"
+-	select CPU_SH3
+-	select CPU_HAS_DSP
+-	help
+-	  Select SH7720 if you have a SH3-DSP SH7720 CPU.
 -
--static inline void __init gdb_vbr_init(void)
--{
--	register unsigned long vbr;
+-# SH-4 Processor Support
 -
--	/*
--	 * Read the old value of the VBR register to initialise
--	 * the vector through which debug and BIOS traps are
--	 * delegated by the Linux trap handler.
--	 */
--	asm volatile("stc vbr, %0" : "=r" (vbr));
+-config CPU_SUBTYPE_SH7750
+-	bool "Support SH7750 processor"
+-	select CPU_SH4
+-	help
+-	  Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
 -
--	gdb_vbr_vector = (void *)(vbr + 0x100);
--	printk("Setting GDB trap vector to 0x%08lx\n",
--	       (unsigned long)gdb_vbr_vector);
--}
--#endif
+-config CPU_SUBTYPE_SH7091
+-	bool "Support SH7091 processor"
+-	select CPU_SH4
+-	help
+-	  Select SH7091 if you have an SH-4 based Sega device (such as
+-	  the Dreamcast, Naomi, and Naomi 2).
 -
--void __cpuinit per_cpu_trap_init(void)
--{
--	extern void *vbr_base;
+-config CPU_SUBTYPE_SH7750R
+-	bool "Support SH7750R processor"
+-	select CPU_SH4
 -
--#ifdef CONFIG_SH_STANDARD_BIOS
--	if (raw_smp_processor_id() == 0)
--		gdb_vbr_init();
--#endif
+-config CPU_SUBTYPE_SH7750S
+-	bool "Support SH7750S processor"
+-	select CPU_SH4
 -
--	/* NOTE: The VBR value should be at P1
--	   (or P2, virtural "fixed" address space).
--	   It's definitely should not in physical address.  */
+-config CPU_SUBTYPE_SH7751
+-	bool "Support SH7751 processor"
+-	select CPU_SH4
+-	help
+-	  Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
+-	  or if you have a HD6417751R CPU.
 -
--	asm volatile("ldc	%0, vbr"
--		     : /* no output */
--		     : "r" (&vbr_base)
--		     : "memory");
--}
+-config CPU_SUBTYPE_SH7751R
+-	bool "Support SH7751R processor"
+-	select CPU_SH4
 -
--void *set_exception_table_vec(unsigned int vec, void *handler)
--{
--	extern void *exception_handling_table[];
--	void *old_handler;
+-config CPU_SUBTYPE_SH7760
+-	bool "Support SH7760 processor"
+-	select CPU_SH4
 -
--	old_handler = exception_handling_table[vec];
--	exception_handling_table[vec] = handler;
--	return old_handler;
--}
+-config CPU_SUBTYPE_SH4_202
+-	bool "Support SH4-202 processor"
+-	select CPU_SH4
 -
--extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
--					     unsigned long r6, unsigned long r7,
--					     struct pt_regs __regs);
+-# SH-4A Processor Support
 -
--void __init trap_init(void)
--{
--	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
--	set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
+-config CPU_SUBTYPE_SH7770
+-	bool "Support SH7770 processor"
+-	select CPU_SH4A
 -
--#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
--    defined(CONFIG_SH_FPU_EMU)
--	/*
--	 * For SH-4 lacking an FPU, treat floating point instructions as
--	 * reserved. They'll be handled in the math-emu case, or faulted on
--	 * otherwise.
--	 */
--	set_exception_table_evt(0x800, do_reserved_inst);
--	set_exception_table_evt(0x820, do_illegal_slot_inst);
--#elif defined(CONFIG_SH_FPU)
--#ifdef CONFIG_CPU_SUBTYPE_SHX3
--	set_exception_table_evt(0xd80, do_fpu_state_restore);
--	set_exception_table_evt(0xda0, do_fpu_state_restore);
--#else
--	set_exception_table_evt(0x800, do_fpu_state_restore);
--	set_exception_table_evt(0x820, do_fpu_state_restore);
--#endif
--#endif
+-config CPU_SUBTYPE_SH7780
+-	bool "Support SH7780 processor"
+-	select CPU_SH4A
 -
--#ifdef CONFIG_CPU_SH2
--	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
--#endif
--#ifdef CONFIG_CPU_SH2A
--	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
--	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
--#endif
+-config CPU_SUBTYPE_SH7785
+-	bool "Support SH7785 processor"
+-	select CPU_SH4A
+-	select CPU_SHX2
+-	select ARCH_SPARSEMEM_ENABLE
+-	select SYS_SUPPORTS_NUMA
 -
--	/* Setup VBR for boot cpu */
--	per_cpu_trap_init();
--}
- 
- #ifdef CONFIG_BUG
--void handle_BUG(struct pt_regs *regs)
--{
--	enum bug_trap_type tt;
--	tt = report_bug(regs->pc, regs);
--	if (tt == BUG_TRAP_TYPE_WARN) {
--		regs->pc += 2;
--		return;
-+	if (__kernel_text_address(instruction_pointer(regs))) {
-+		opcode_t insn = *(opcode_t *)instruction_pointer(regs);
-+		if (insn == TRAPA_BUG_OPCODE)
-+			handle_BUG(regs);
- 	}
+-config CPU_SUBTYPE_SHX3
+-	bool "Support SH-X3 processor"
+-	select CPU_SH4A
+-	select CPU_SHX3
+-	select ARCH_SPARSEMEM_ENABLE
+-	select SYS_SUPPORTS_NUMA
+-	select SYS_SUPPORTS_SMP
 -
--	die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
--}
+-# SH4AL-DSP Processor Support
 -
--int is_valid_bugaddr(unsigned long addr)
--{
--	return addr >= PAGE_OFFSET;
--}
--#endif
+-config CPU_SUBTYPE_SH7343
+-	bool "Support SH7343 processor"
+-	select CPU_SH4AL_DSP
 -
--void show_trace(struct task_struct *tsk, unsigned long *sp,
--		struct pt_regs *regs)
--{
--	unsigned long addr;
+-config CPU_SUBTYPE_SH7722
+-	bool "Support SH7722 processor"
+-	select CPU_SH4AL_DSP
+-	select CPU_SHX2
+-	select ARCH_SPARSEMEM_ENABLE
+-	select SYS_SUPPORTS_NUMA
 -
--	if (regs && user_mode(regs))
--		return;
+-endchoice
 -
--	printk("\nCall trace: ");
--#ifdef CONFIG_KALLSYMS
--	printk("\n");
- #endif
+ menu "Memory management options"
  
--	while (!kstack_end(sp)) {
--		addr = *sp++;
--		if (kernel_text_address(addr))
--			print_ip_sym(addr);
--	}
+ config QUICKLIST
+@@ -207,7 +17,8 @@ config MMU
+ 
+ config PAGE_OFFSET
+ 	hex
+-	default "0x80000000" if MMU
++	default "0x80000000" if MMU && SUPERH32
++	default "0x20000000" if MMU && SUPERH64
+ 	default "0x00000000"
+ 
+ config MEMORY_START
+@@ -228,17 +39,28 @@ config MEMORY_START
+ 
+ config MEMORY_SIZE
+ 	hex "Physical memory size"
+-	default "0x00400000"
++	default "0x04000000"
+ 	help
+ 	  This sets the default memory size assumed by your SH kernel. It can
+ 	  be overridden as normal by the 'mem=' argument on the kernel command
+ 	  line. If unsure, consult your board specifications or just leave it
+-	  as 0x00400000 which was the default value before this became
++	  as 0x04000000 which was the default value before this became
+ 	  configurable.
+ 
++# Physical addressing modes
++
++config 29BIT
++	def_bool !32BIT
++	depends on SUPERH32
++
+ config 32BIT
++	bool
++	default y if CPU_SH5
++
++config PMB
+ 	bool "Support 32-bit physical addressing through PMB"
+ 	depends on MMU && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
++	select 32BIT
+ 	default y
+ 	help
+ 	  If you say Y here, physical addressing will be extended to
+@@ -256,7 +78,7 @@ config X2TLB
+ 
+ config VSYSCALL
+ 	bool "Support vsyscall page"
+-	depends on MMU
++	depends on MMU && (CPU_SH3 || CPU_SH4)
+ 	default y
+ 	help
+ 	  This will enable support for the kernel mapping a vDSO page
+@@ -335,7 +157,7 @@ config PAGE_SIZE_8KB
+ 
+ config PAGE_SIZE_64KB
+ 	bool "64kB"
+-	depends on CPU_SH4
++	depends on CPU_SH4 || CPU_SH5
+ 	help
+ 	  This enables support for 64kB pages, possible on all SH-4
+ 	  CPUs and later.
+@@ -344,7 +166,7 @@ endchoice
+ 
+ choice
+ 	prompt "HugeTLB page size"
+-	depends on HUGETLB_PAGE && CPU_SH4 && MMU
++	depends on HUGETLB_PAGE && (CPU_SH4 || CPU_SH5) && MMU
+ 	default HUGETLB_PAGE_SIZE_64K
+ 
+ config HUGETLB_PAGE_SIZE_64K
+@@ -365,6 +187,10 @@ config HUGETLB_PAGE_SIZE_64MB
+ 	bool "64MB"
+ 	depends on X2TLB
+ 
++config HUGETLB_PAGE_SIZE_512MB
++	bool "512MB"
++	depends on CPU_SH5
++
+ endchoice
+ 
+ source "mm/Kconfig"
+@@ -392,12 +218,12 @@ config SH_DIRECT_MAPPED
+ 
+ choice
+ 	prompt "Cache mode"
+-	default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
++	default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
+ 	default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
+ 
+ config CACHE_WRITEBACK
+ 	bool "Write-back"
+-	depends on CPU_SH2A || CPU_SH3 || CPU_SH4
++	depends on CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
+ 
+ config CACHE_WRITETHROUGH
+ 	bool "Write-through"
+diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
+index aa44607..9f4bc3d 100644
+--- a/arch/sh/mm/Makefile
++++ b/arch/sh/mm/Makefile
+@@ -1,37 +1,5 @@
+-#
+-# Makefile for the Linux SuperH-specific parts of the memory manager.
+-#
 -
--	printk("\n");
+-obj-y			:= init.o extable.o consistent.o
 -
--	if (!tsk)
--		tsk = current;
+-ifndef CONFIG_CACHE_OFF
+-obj-$(CONFIG_CPU_SH2)		+= cache-sh2.o
+-obj-$(CONFIG_CPU_SH3)		+= cache-sh3.o
+-obj-$(CONFIG_CPU_SH4)		+= cache-sh4.o
+-obj-$(CONFIG_SH7705_CACHE_32KB)	+= cache-sh7705.o
++ifeq ($(CONFIG_SUPERH32),y)
++include ${srctree}/arch/sh/mm/Makefile_32
++else
++include ${srctree}/arch/sh/mm/Makefile_64
+ endif
 -
--	debug_show_held_locks(tsk);
--}
+-mmu-y			:= tlb-nommu.o pg-nommu.o
+-mmu-$(CONFIG_MMU)	:= fault.o clear_page.o copy_page.o tlb-flush.o	\
+-			   ioremap.o
 -
--void show_stack(struct task_struct *tsk, unsigned long *sp)
--{
--	unsigned long stack;
+-obj-y			+= $(mmu-y)
 -
--	if (!tsk)
--		tsk = current;
--	if (tsk == current)
--		sp = (unsigned long *)current_stack_pointer;
--	else
--		sp = (unsigned long *)tsk->thread.sp;
+-ifdef CONFIG_DEBUG_FS
+-obj-$(CONFIG_CPU_SH4)	+= cache-debugfs.o
+-endif
 -
--	stack = (unsigned long)sp;
--	dump_mem("Stack: ", stack, THREAD_SIZE +
--		 (unsigned long)task_stack_page(tsk));
--	show_trace(tsk, sp, NULL);
--}
+-ifdef CONFIG_MMU
+-obj-$(CONFIG_CPU_SH3)	+= tlb-sh3.o
+-obj-$(CONFIG_CPU_SH4)	+= tlb-sh4.o
+-ifndef CONFIG_CACHE_OFF
+-obj-$(CONFIG_CPU_SH4)		+= pg-sh4.o
+-obj-$(CONFIG_SH7705_CACHE_32KB)	+= pg-sh7705.o
+-endif
+-endif
 -
--void dump_stack(void)
--{
--	show_stack(NULL, NULL);
-+	force_sig(SIGTRAP, current);
- }
--EXPORT_SYMBOL(dump_stack);
-diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
+-obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
+-obj-$(CONFIG_32BIT)		+= pmb.o
+-obj-$(CONFIG_NUMA)		+= numa.o
+-
+-EXTRA_CFLAGS += -Werror
+diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32
 new file mode 100644
-index 0000000..2e58f7a
+index 0000000..e295db6
 --- /dev/null
-+++ b/arch/sh/kernel/traps_32.c
-@@ -0,0 +1,919 @@
-+/*
-+ * 'traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'entry.S'.
-+ *
-+ *  SuperH version: Copyright (C) 1999 Niibe Yutaka
-+ *                  Copyright (C) 2000 Philipp Rumpf
-+ *                  Copyright (C) 2000 David Howells
-+ *                  Copyright (C) 2002 - 2007 Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/ptrace.h>
-+#include <linux/init.h>
-+#include <linux/spinlock.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#include <linux/io.h>
-+#include <linux/bug.h>
-+#include <linux/debug_locks.h>
-+#include <linux/kdebug.h>
-+#include <linux/kexec.h>
-+#include <linux/limits.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+
-+#ifdef CONFIG_SH_KGDB
-+#include <asm/kgdb.h>
-+#define CHK_REMOTE_DEBUG(regs)			\
-+{						\
-+	if (kgdb_debug_hook && !user_mode(regs))\
-+		(*kgdb_debug_hook)(regs);       \
-+}
-+#else
-+#define CHK_REMOTE_DEBUG(regs)
-+#endif
-+
-+#ifdef CONFIG_CPU_SH2
-+# define TRAP_RESERVED_INST	4
-+# define TRAP_ILLEGAL_SLOT_INST	6
-+# define TRAP_ADDRESS_ERROR	9
-+# ifdef CONFIG_CPU_SH2A
-+#  define TRAP_DIVZERO_ERROR	17
-+#  define TRAP_DIVOVF_ERROR	18
-+# endif
-+#else
-+#define TRAP_RESERVED_INST	12
-+#define TRAP_ILLEGAL_SLOT_INST	13
-+#endif
-+
-+static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
-+{
-+	unsigned long p;
-+	int i;
++++ b/arch/sh/mm/Makefile_32
+@@ -0,0 +1,36 @@
++#
++# Makefile for the Linux SuperH-specific parts of the memory manager.
++#
 +
-+	printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
++obj-y			:= init.o extable_32.o consistent.o
 +
-+	for (p = bottom & ~31; p < top; ) {
-+		printk("%04lx: ", p & 0xffff);
++ifndef CONFIG_CACHE_OFF
++obj-$(CONFIG_CPU_SH2)		+= cache-sh2.o
++obj-$(CONFIG_CPU_SH3)		+= cache-sh3.o
++obj-$(CONFIG_CPU_SH4)		+= cache-sh4.o
++obj-$(CONFIG_SH7705_CACHE_32KB)	+= cache-sh7705.o
++endif
 +
-+		for (i = 0; i < 8; i++, p += 4) {
-+			unsigned int val;
++mmu-y			:= tlb-nommu.o pg-nommu.o
++mmu-$(CONFIG_MMU)	:= fault_32.o tlbflush_32.o ioremap_32.o
 +
-+			if (p < bottom || p >= top)
-+				printk("         ");
-+			else {
-+				if (__get_user(val, (unsigned int __user *)p)) {
-+					printk("\n");
-+					return;
-+				}
-+				printk("%08x ", val);
-+			}
-+		}
-+		printk("\n");
-+	}
-+}
++obj-y			+= $(mmu-y)
 +
-+static DEFINE_SPINLOCK(die_lock);
++ifdef CONFIG_DEBUG_FS
++obj-$(CONFIG_CPU_SH4)	+= cache-debugfs.o
++endif
 +
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+	static int die_counter;
++ifdef CONFIG_MMU
++obj-$(CONFIG_CPU_SH3)	+= tlb-sh3.o
++obj-$(CONFIG_CPU_SH4)	+= tlb-sh4.o
++ifndef CONFIG_CACHE_OFF
++obj-$(CONFIG_CPU_SH4)		+= pg-sh4.o
++obj-$(CONFIG_SH7705_CACHE_32KB)	+= pg-sh7705.o
++endif
++endif
 +
-+	oops_enter();
++obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
++obj-$(CONFIG_PMB)		+= pmb.o
++obj-$(CONFIG_NUMA)		+= numa.o
 +
-+	console_verbose();
-+	spin_lock_irq(&die_lock);
-+	bust_spinlocks(1);
++EXTRA_CFLAGS += -Werror
+diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64
+new file mode 100644
+index 0000000..cbd6aa3
+--- /dev/null
++++ b/arch/sh/mm/Makefile_64
+@@ -0,0 +1,44 @@
++#
++# Makefile for the Linux SuperH-specific parts of the memory manager.
++#
 +
-+	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++obj-y			:= init.o extable_64.o consistent.o
 +
-+	CHK_REMOTE_DEBUG(regs);
-+	print_modules();
-+	show_regs(regs);
++mmu-y			:= tlb-nommu.o pg-nommu.o
++mmu-$(CONFIG_MMU)	:= fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o
 +
-+	printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
-+			task_pid_nr(current), task_stack_page(current) + 1);
++ifndef CONFIG_CACHE_OFF
++obj-y			+= cache-sh5.o
++endif
 +
-+	if (!user_mode(regs) || in_interrupt())
-+		dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
-+			 (unsigned long)task_stack_page(current));
++obj-y			+= $(mmu-y)
 +
-+	bust_spinlocks(0);
-+	add_taint(TAINT_DIE);
-+	spin_unlock_irq(&die_lock);
++obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
++obj-$(CONFIG_NUMA)		+= numa.o
 +
-+	if (kexec_should_crash(current))
-+		crash_kexec(regs);
++EXTRA_CFLAGS += -Werror
 +
-+	if (in_interrupt())
-+		panic("Fatal exception in interrupt");
++# Special flags for fault_64.o.  This puts restrictions on the number of
++# caller-save registers that the compiler can target when building this file.
++# This is required because the code is called from a context in entry.S where
++# very few registers have been saved in the exception handler (for speed
++# reasons).
++# The caller save registers that have been saved and which can be used are
++# r2,r3,r4,r5 : argument passing
++# r15, r18 : SP and LINK
++# tr0-4 : allow all caller-save TR's.  The compiler seems to be able to make
++#         use of them, so it's probably beneficial to performance to save them
++#         and have them available for it.
++#
++# The resources not listed below are callee save, i.e. the compiler is free to
++# use any of them and will spill them to the stack itself.
 +
-+	if (panic_on_oops)
-+		panic("Fatal exception");
++CFLAGS_fault_64.o += -ffixed-r7 \
++	-ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
++	-ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
++	-ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
++	-ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
++	-ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
++	-ffixed-r41 -ffixed-r42 -ffixed-r43  \
++	-ffixed-r60 -ffixed-r61 -ffixed-r62 \
++	-fomit-frame-pointer
+diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
+index de6d2c9..db6d950 100644
+--- a/arch/sh/mm/cache-debugfs.c
++++ b/arch/sh/mm/cache-debugfs.c
+@@ -22,7 +22,8 @@ enum cache_type {
+ 	CACHE_TYPE_UNIFIED,
+ };
+ 
+-static int cache_seq_show(struct seq_file *file, void *iter)
++static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file,
++						  void *iter)
+ {
+ 	unsigned int cache_type = (unsigned int)file->private;
+ 	struct cache_info *cache;
+@@ -34,11 +35,11 @@ static int cache_seq_show(struct seq_file *file, void *iter)
+ 	 * Go uncached immediately so we don't skew the results any
+ 	 * more than we already are..
+ 	 */
+-	jump_to_P2();
++	jump_to_uncached();
+ 
+ 	ccr = ctrl_inl(CCR);
+ 	if ((ccr & CCR_CACHE_ENABLE) == 0) {
+-		back_to_P1();
++		back_to_cached();
+ 
+ 		seq_printf(file, "disabled\n");
+ 		return 0;
+@@ -104,7 +105,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
+ 		addrstart += cache->way_incr;
+ 	}
+ 
+-	back_to_P1();
++	back_to_cached();
+ 
+ 	return 0;
+ }
+diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
+index 226b190..43d7ff6 100644
+--- a/arch/sh/mm/cache-sh4.c
++++ b/arch/sh/mm/cache-sh4.c
+@@ -190,7 +190,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
+  * .. which happens to be the same behavior as flush_icache_range().
+  * So, we simply flush out a line.
+  */
+-void flush_cache_sigtramp(unsigned long addr)
++void __uses_jump_to_uncached flush_cache_sigtramp(unsigned long addr)
+ {
+ 	unsigned long v, index;
+ 	unsigned long flags;
+@@ -205,13 +205,13 @@ void flush_cache_sigtramp(unsigned long addr)
+ 			(v & boot_cpu_data.icache.entry_mask);
+ 
+ 	local_irq_save(flags);
+-	jump_to_P2();
++	jump_to_uncached();
+ 
+ 	for (i = 0; i < boot_cpu_data.icache.ways;
+ 	     i++, index += boot_cpu_data.icache.way_incr)
+ 		ctrl_outl(0, index);	/* Clear out Valid-bit */
+ 
+-	back_to_P1();
++	back_to_cached();
+ 	wmb();
+ 	local_irq_restore(flags);
+ }
+@@ -256,12 +256,12 @@ void flush_dcache_page(struct page *page)
+ }
+ 
+ /* TODO: Selective icache invalidation through IC address array.. */
+-static inline void flush_icache_all(void)
++static inline void __uses_jump_to_uncached flush_icache_all(void)
+ {
+ 	unsigned long flags, ccr;
+ 
+ 	local_irq_save(flags);
+-	jump_to_P2();
++	jump_to_uncached();
+ 
+ 	/* Flush I-cache */
+ 	ccr = ctrl_inl(CCR);
+@@ -269,11 +269,11 @@ static inline void flush_icache_all(void)
+ 	ctrl_outl(ccr, CCR);
+ 
+ 	/*
+-	 * back_to_P1() will take care of the barrier for us, don't add
++	 * back_to_cached() will take care of the barrier for us, don't add
+ 	 * another one!
+ 	 */
+ 
+-	back_to_P1();
++	back_to_cached();
+ 	local_irq_restore(flags);
+ }
+ 
+diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
+new file mode 100644
+index 0000000..4617e3a
+--- /dev/null
++++ b/arch/sh/mm/cache-sh5.c
+@@ -0,0 +1,1029 @@
++/*
++ * arch/sh/mm/cache-sh5.c
++ *
++ * Original version Copyright (C) 2000, 2001  Paolo Alberelli
++ * Second version Copyright (C) benedict.gaster at superh.com 2002
++ * Third version Copyright Richard.Curnow at superh.com 2003
++ * Hacks to third version Copyright (C) 2003 Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/init.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/threads.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/cache.h>
++#include <asm/tlb.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <asm/mmu_context.h>
++#include <asm/pgalloc.h> /* for flush_itlb_range */
 +
-+	oops_exit();
-+	do_exit(SIGSEGV);
-+}
++#include <linux/proc_fs.h>
 +
-+static inline void die_if_kernel(const char *str, struct pt_regs *regs,
-+				 long err)
-+{
-+	if (!user_mode(regs))
-+		die(str, regs, err);
-+}
++/* This function is in entry.S */
++extern unsigned long switch_and_save_asid(unsigned long new_asid);
 +
-+/*
-+ * try and fix up kernelspace address errors
-+ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
-+ * - kernel/userspace interfaces cause a jump to an appropriate handler
-+ * - other kernel errors are bad
-+ * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
-+ */
-+static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-+{
-+	if (!user_mode(regs)) {
-+		const struct exception_table_entry *fixup;
-+		fixup = search_exception_tables(regs->pc);
-+		if (fixup) {
-+			regs->pc = fixup->fixup;
-+			return 0;
-+		}
-+		die(str, regs, err);
-+	}
-+	return -EFAULT;
-+}
++/* Wired TLB entry for the D-cache */
++static unsigned long long dtlb_cache_slot;
 +
-+/*
-+ * handle an instruction that does an unaligned memory access by emulating the
-+ * desired behaviour
-+ * - note that PC _may not_ point to the faulting instruction
-+ *   (if that instruction is in a branch delay slot)
-+ * - return 0 if emulation okay, -EFAULT on existential error
++/**
++ * sh64_cache_init()
++ *
++ * This is pretty much just a straightforward clone of the SH
++ * detect_cpu_and_cache_system().
++ *
++ * This function is responsible for setting up all of the cache
++ * info dynamically as well as taking care of CPU probing and
++ * setting up the relevant subtype data.
++ *
++ * FIXME: For the time being, we only really support the SH5-101
++ * out of the box, and don't support dynamic probing for things
++ * like the SH5-103 or even cut2 of the SH5-101. Implement this
++ * later!
 + */
-+static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
++int __init sh64_cache_init(void)
 +{
-+	int ret, index, count;
-+	unsigned long *rm, *rn;
-+	unsigned char *src, *dst;
++	/*
++	 * First, setup some sane values for the I-cache.
++	 */
++	cpu_data->icache.ways		= 4;
++	cpu_data->icache.sets		= 256;
++	cpu_data->icache.linesz		= L1_CACHE_BYTES;
 +
-+	index = (instruction>>8)&15;	/* 0x0F00 */
-+	rn = &regs->regs[index];
++	/*
++	 * FIXME: This can probably be cleaned up a bit as well.. for example,
++	 * do we really need the way shift _and_ the way_step_shift ?? Judging
++	 * by the existing code, I would guess no.. is there any valid reason
++	 * why we need to be tracking this around?
++	 */
++	cpu_data->icache.way_shift	= 13;
++	cpu_data->icache.entry_shift	= 5;
++	cpu_data->icache.set_shift	= 4;
++	cpu_data->icache.way_step_shift	= 16;
++	cpu_data->icache.asid_shift	= 2;
 +
-+	index = (instruction>>4)&15;	/* 0x00F0 */
-+	rm = &regs->regs[index];
++	/*
++	 * way offset = cache size / associativity, so just don't factor in
++	 * associativity in the first place..
++	 */
++	cpu_data->icache.way_ofs	= cpu_data->icache.sets *
++					  cpu_data->icache.linesz;
 +
-+	count = 1<<(instruction&3);
++	cpu_data->icache.asid_mask	= 0x3fc;
++	cpu_data->icache.idx_mask	= 0x1fe0;
++	cpu_data->icache.epn_mask	= 0xffffe000;
++	cpu_data->icache.flags		= 0;
 +
-+	ret = -EFAULT;
-+	switch (instruction>>12) {
-+	case 0: /* mov.[bwl] to/from memory via r0+rn */
-+		if (instruction & 8) {
-+			/* from memory */
-+			src = (unsigned char*) *rm;
-+			src += regs->regs[0];
-+			dst = (unsigned char*) rn;
-+			*(unsigned long*)dst = 0;
++	/*
++	 * Next, setup some sane values for the D-cache.
++	 *
++	 * On the SH5, these are pretty consistent with the I-cache settings,
++	 * so we just copy over the existing definitions.. these can be fixed
++	 * up later, especially if we add runtime CPU probing.
++	 *
++	 * Though in the meantime it saves us from having to duplicate all of
++	 * the above definitions..
++	 */
++	cpu_data->dcache		= cpu_data->icache;
 +
-+#ifdef __LITTLE_ENDIAN__
-+			if (copy_from_user(dst, src, count))
-+				goto fetch_fault;
++	/*
++	 * Setup any cache-related flags here
++	 */
++#if defined(CONFIG_DCACHE_WRITE_THROUGH)
++	set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags));
++#elif defined(CONFIG_DCACHE_WRITE_BACK)
++	set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags));
++#endif
 +
-+			if ((count == 2) && dst[1] & 0x80) {
-+				dst[2] = 0xff;
-+				dst[3] = 0xff;
-+			}
-+#else
-+			dst += 4-count;
++	/*
++	 * We also need to reserve a slot for the D-cache in the DTLB, so we
++	 * do this now ..
++	 */
++	dtlb_cache_slot			= sh64_get_wired_dtlb_entry();
 +
-+			if (__copy_user(dst, src, count))
-+				goto fetch_fault;
++	return 0;
++}
 +
-+			if ((count == 2) && dst[2] & 0x80) {
-+				dst[0] = 0xff;
-+				dst[1] = 0xff;
-+			}
-+#endif
-+		} else {
-+			/* to memory */
-+			src = (unsigned char*) rm;
-+#if !defined(__LITTLE_ENDIAN__)
-+			src += 4-count;
++#ifdef CONFIG_DCACHE_DISABLED
++#define sh64_dcache_purge_all()					do { } while (0)
++#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0)
++#define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0)
++#define sh64_dcache_purge_phy_page(paddr)			do { } while (0)
++#define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0)
++#define sh64_dcache_purge_kernel_range(start, end)		do { } while (0)
++#define sh64_dcache_wback_current_user_range(start, end)	do { } while (0)
 +#endif
-+			dst = (unsigned char*) *rn;
-+			dst += regs->regs[0];
 +
-+			if (copy_to_user(dst, src, count))
-+				goto fetch_fault;
-+		}
-+		ret = 0;
-+		break;
++/*##########################################################################*/
 +
-+	case 1: /* mov.l Rm,@(disp,Rn) */
-+		src = (unsigned char*) rm;
-+		dst = (unsigned char*) *rn;
-+		dst += (instruction&0x000F)<<2;
++/* From here onwards, a rewrite of the implementation,
++   by Richard.Curnow at superh.com.
 +
-+		if (copy_to_user(dst,src,4))
-+			goto fetch_fault;
-+		ret = 0;
-+		break;
++   The major changes in this compared to the old version are;
++   1. use more selective purging through OCBP instead of using ALLOCO to purge
++      by natural replacement.  This avoids purging out unrelated cache lines
++      that happen to be in the same set.
++   2. exploit the APIs copy_user_page and clear_user_page better
++   3. be more selective about I-cache purging, in particular use invalidate_all
++      more sparingly.
 +
-+	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
-+		if (instruction & 4)
-+			*rn -= count;
-+		src = (unsigned char*) rm;
-+		dst = (unsigned char*) *rn;
-+#if !defined(__LITTLE_ENDIAN__)
-+		src += 4-count;
-+#endif
-+		if (copy_to_user(dst, src, count))
-+			goto fetch_fault;
-+		ret = 0;
-+		break;
++   */
 +
-+	case 5: /* mov.l @(disp,Rm),Rn */
-+		src = (unsigned char*) *rm;
-+		src += (instruction&0x000F)<<2;
-+		dst = (unsigned char*) rn;
-+		*(unsigned long*)dst = 0;
++/*##########################################################################
++			       SUPPORT FUNCTIONS
++  ##########################################################################*/
 +
-+		if (copy_from_user(dst,src,4))
-+			goto fetch_fault;
-+		ret = 0;
-+		break;
++/****************************************************************************/
++/* The following group of functions deal with mapping and unmapping a temporary
++   page into the DTLB slot that have been set aside for our exclusive use. */
++/* In order to accomplish this, we use the generic interface for adding and
++   removing a wired slot entry as defined in arch/sh/mm/tlb-sh5.c */
++/****************************************************************************/
 +
-+	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
-+		src = (unsigned char*) *rm;
-+		if (instruction & 4)
-+			*rm += count;
-+		dst = (unsigned char*) rn;
-+		*(unsigned long*)dst = 0;
++static unsigned long slot_own_flags;
 +
-+#ifdef __LITTLE_ENDIAN__
-+		if (copy_from_user(dst, src, count))
-+			goto fetch_fault;
++static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr)
++{
++	local_irq_save(slot_own_flags);
++	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
++}
 +
-+		if ((count == 2) && dst[1] & 0x80) {
-+			dst[2] = 0xff;
-+			dst[3] = 0xff;
-+		}
-+#else
-+		dst += 4-count;
++static inline void sh64_teardown_dtlb_cache_slot(void)
++{
++	sh64_teardown_tlb_slot(dtlb_cache_slot);
++	local_irq_restore(slot_own_flags);
++}
 +
-+		if (copy_from_user(dst, src, count))
-+			goto fetch_fault;
++/****************************************************************************/
 +
-+		if ((count == 2) && dst[2] & 0x80) {
-+			dst[0] = 0xff;
-+			dst[1] = 0xff;
-+		}
-+#endif
-+		ret = 0;
-+		break;
++#ifndef CONFIG_ICACHE_DISABLED
 +
-+	case 8:
-+		switch ((instruction&0xFF00)>>8) {
-+		case 0x81: /* mov.w R0,@(disp,Rn) */
-+			src = (unsigned char*) &regs->regs[0];
-+#if !defined(__LITTLE_ENDIAN__)
-+			src += 2;
-+#endif
-+			dst = (unsigned char*) *rm; /* called Rn in the spec */
-+			dst += (instruction&0x000F)<<1;
++static void __inline__ sh64_icache_inv_all(void)
++{
++	unsigned long long addr, flag, data;
++	unsigned int flags;
 +
-+			if (copy_to_user(dst, src, 2))
-+				goto fetch_fault;
-+			ret = 0;
-+			break;
++	addr=ICCR0;
++	flag=ICCR0_ICI;
++	data=0;
 +
-+		case 0x85: /* mov.w @(disp,Rm),R0 */
-+			src = (unsigned char*) *rm;
-+			src += (instruction&0x000F)<<1;
-+			dst = (unsigned char*) &regs->regs[0];
-+			*(unsigned long*)dst = 0;
++	/* Make this a critical section for safety (probably not strictly necessary.) */
++	local_irq_save(flags);
 +
-+#if !defined(__LITTLE_ENDIAN__)
-+			dst += 2;
-+#endif
++	/* Without %1 it gets unexplicably wrong */
++	asm volatile("getcfg	%3, 0, %0\n\t"
++			"or	%0, %2, %0\n\t"
++			"putcfg	%3, 0, %0\n\t"
++			"synci"
++			: "=&r" (data)
++			: "0" (data), "r" (flag), "r" (addr));
 +
-+			if (copy_from_user(dst, src, 2))
-+				goto fetch_fault;
++	local_irq_restore(flags);
++}
 +
-+#ifdef __LITTLE_ENDIAN__
-+			if (dst[1] & 0x80) {
-+				dst[2] = 0xff;
-+				dst[3] = 0xff;
-+			}
++static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
++{
++	/* Invalidate range of addresses [start,end] from the I-cache, where
++	 * the addresses lie in the kernel superpage. */
++
++	unsigned long long ullend, addr, aligned_start;
++#if (NEFF == 32)
++	aligned_start = (unsigned long long)(signed long long)(signed long) start;
 +#else
-+			if (dst[2] & 0x80) {
-+				dst[0] = 0xff;
-+				dst[1] = 0xff;
-+			}
++#error "NEFF != 32"
 +#endif
-+			ret = 0;
-+			break;
-+		}
-+		break;
++	aligned_start &= L1_CACHE_ALIGN_MASK;
++	addr = aligned_start;
++#if (NEFF == 32)
++	ullend = (unsigned long long) (signed long long) (signed long) end;
++#else
++#error "NEFF != 32"
++#endif
++	while (addr <= ullend) {
++		asm __volatile__ ("icbi %0, 0" : : "r" (addr));
++		addr += L1_CACHE_BYTES;
 +	}
-+	return ret;
-+
-+ fetch_fault:
-+	/* Argh. Address not only misaligned but also non-existent.
-+	 * Raise an EFAULT and see if it's trapped
-+	 */
-+	return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
 +}
 +
-+/*
-+ * emulate the instruction in the delay slot
-+ * - fetches the instruction from PC+2
-+ */
-+static inline int handle_unaligned_delayslot(struct pt_regs *regs)
++static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
 +{
-+	u16 instruction;
-+
-+	if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
-+		/* the instruction-fetch faulted */
-+		if (user_mode(regs))
-+			return -EFAULT;
++	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
++	   Also, eaddr is page-aligned. */
 +
-+		/* kernel */
-+		die("delay-slot-insn faulting in handle_unaligned_delayslot",
-+		    regs, 0);
-+	}
++	unsigned long long addr, end_addr;
++	unsigned long flags = 0;
++	unsigned long running_asid, vma_asid;
++	addr = eaddr;
++	end_addr = addr + PAGE_SIZE;
 +
-+	return handle_unaligned_ins(instruction,regs);
-+}
++	/* Check whether we can use the current ASID for the I-cache
++	   invalidation.  For example, if we're called via
++	   access_process_vm->flush_cache_page->here, (e.g. when reading from
++	   /proc), 'running_asid' will be that of the reader, not of the
++	   victim.
 +
-+/*
-+ * handle an instruction that does an unaligned memory access
-+ * - have to be careful of branch delay-slot instructions that fault
-+ *  SH3:
-+ *   - if the branch would be taken PC points to the branch
-+ *   - if the branch would not be taken, PC points to delay-slot
-+ *  SH4:
-+ *   - PC always points to delayed branch
-+ * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
-+ */
++	   Also, note the risk that we might get pre-empted between the ASID
++	   compare and blocking IRQs, and before we regain control, the
++	   pid->ASID mapping changes.  However, the whole cache will get
++	   invalidated when the mapping is renewed, so the worst that can
++	   happen is that the loop below ends up invalidating somebody else's
++	   cache entries.
++	*/
 +
-+/* Macros to determine offset from current PC for branch instructions */
-+/* Explicit type coercion is used to force sign extension where needed */
-+#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
-+#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
++	running_asid = get_asid();
++	vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK);
++	if (running_asid != vma_asid) {
++		local_irq_save(flags);
++		switch_and_save_asid(vma_asid);
++	}
++	while (addr < end_addr) {
++		/* Worth unrolling a little */
++		asm __volatile__("icbi %0,  0" : : "r" (addr));
++		asm __volatile__("icbi %0, 32" : : "r" (addr));
++		asm __volatile__("icbi %0, 64" : : "r" (addr));
++		asm __volatile__("icbi %0, 96" : : "r" (addr));
++		addr += 128;
++	}
++	if (running_asid != vma_asid) {
++		switch_and_save_asid(running_asid);
++		local_irq_restore(flags);
++	}
++}
 +
-+/*
-+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
-+ * opcodes..
-+ */
-+#ifndef CONFIG_CPU_SH2A
-+static int handle_unaligned_notify_count = 10;
++/****************************************************************************/
 +
-+static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
++static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
++			  unsigned long start, unsigned long end)
 +{
-+	u_int rm;
-+	int ret, index;
-+
-+	index = (instruction>>8)&15;	/* 0x0F00 */
-+	rm = regs->regs[index];
++	/* Used for invalidating big chunks of I-cache, i.e. assume the range
++	   is whole pages.  If 'start' or 'end' is not page aligned, the code
++	   is conservative and invalidates to the ends of the enclosing pages.
++	   This is functionally OK, just a performance loss. */
 +
-+	/* shout about the first ten userspace fixups */
-+	if (user_mode(regs) && handle_unaligned_notify_count>0) {
-+		handle_unaligned_notify_count--;
++	/* See the comments below in sh64_dcache_purge_user_range() regarding
++	   the choice of algorithm.  However, for the I-cache option (2) isn't
++	   available because there are no physical tags so aliases can't be
++	   resolved.  The icbi instruction has to be used through the user
++	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
++	   would be cheaper to use the selective code for a large range than is
++	   possible with the D-cache.  Just assume 64 for now as a working
++	   figure.
++	   */
 +
-+		printk(KERN_NOTICE "Fixing up unaligned userspace access "
-+		       "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
-+		       current->comm, task_pid_nr(current),
-+		       (u16 *)regs->pc, instruction);
-+	}
++	int n_pages;
 +
-+	ret = -EFAULT;
-+	switch (instruction&0xF000) {
-+	case 0x0000:
-+		if (instruction==0x000B) {
-+			/* rts */
-+			ret = handle_unaligned_delayslot(regs);
-+			if (ret==0)
-+				regs->pc = regs->pr;
-+		}
-+		else if ((instruction&0x00FF)==0x0023) {
-+			/* braf @Rm */
-+			ret = handle_unaligned_delayslot(regs);
-+			if (ret==0)
-+				regs->pc += rm + 4;
-+		}
-+		else if ((instruction&0x00FF)==0x0003) {
-+			/* bsrf @Rm */
-+			ret = handle_unaligned_delayslot(regs);
-+			if (ret==0) {
-+				regs->pr = regs->pc + 4;
-+				regs->pc += rm + 4;
-+			}
-+		}
-+		else {
-+			/* mov.[bwl] to/from memory via r0+rn */
-+			goto simple;
-+		}
-+		break;
++	if (!mm) return;
 +
-+	case 0x1000: /* mov.l Rm,@(disp,Rn) */
-+		goto simple;
++	n_pages = ((end - start) >> PAGE_SHIFT);
++	if (n_pages >= 64) {
++		sh64_icache_inv_all();
++	} else {
++		unsigned long aligned_start;
++		unsigned long eaddr;
++		unsigned long after_last_page_start;
++		unsigned long mm_asid, current_asid;
++		unsigned long long flags = 0ULL;
 +
-+	case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
-+		goto simple;
++		mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
++		current_asid = get_asid();
 +
-+	case 0x4000:
-+		if ((instruction&0x00FF)==0x002B) {
-+			/* jmp @Rm */
-+			ret = handle_unaligned_delayslot(regs);
-+			if (ret==0)
-+				regs->pc = rm;
-+		}
-+		else if ((instruction&0x00FF)==0x000B) {
-+			/* jsr @Rm */
-+			ret = handle_unaligned_delayslot(regs);
-+			if (ret==0) {
-+				regs->pr = regs->pc + 4;
-+				regs->pc = rm;
-+			}
-+		}
-+		else {
-+			/* mov.[bwl] to/from memory via r0+rn */
-+			goto simple;
++		if (mm_asid != current_asid) {
++			/* Switch ASID and run the invalidate loop under cli */
++			local_irq_save(flags);
++			switch_and_save_asid(mm_asid);
 +		}
-+		break;
-+
-+	case 0x5000: /* mov.l @(disp,Rm),Rn */
-+		goto simple;
 +
-+	case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
-+		goto simple;
++		aligned_start = start & PAGE_MASK;
++		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
 +
-+	case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
-+		switch (instruction&0x0F00) {
-+		case 0x0100: /* mov.w R0,@(disp,Rm) */
-+			goto simple;
-+		case 0x0500: /* mov.w @(disp,Rm),R0 */
-+			goto simple;
-+		case 0x0B00: /* bf   lab - no delayslot*/
-+			break;
-+		case 0x0F00: /* bf/s lab */
-+			ret = handle_unaligned_delayslot(regs);
-+			if (ret==0) {
-+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
-+				if ((regs->sr & 0x00000001) != 0)
-+					regs->pc += 4; /* next after slot */
-+				else
-+#endif
-+					regs->pc += SH_PC_8BIT_OFFSET(instruction);
++		while (aligned_start < after_last_page_start) {
++			struct vm_area_struct *vma;
++			unsigned long vma_end;
++			vma = find_vma(mm, aligned_start);
++			if (!vma || (aligned_start <= vma->vm_end)) {
++				/* Avoid getting stuck in an error condition */
++				aligned_start += PAGE_SIZE;
++				continue;
 +			}
-+			break;
-+		case 0x0900: /* bt   lab - no delayslot */
-+			break;
-+		case 0x0D00: /* bt/s lab */
-+			ret = handle_unaligned_delayslot(regs);
-+			if (ret==0) {
-+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
-+				if ((regs->sr & 0x00000001) == 0)
-+					regs->pc += 4; /* next after slot */
-+				else
-+#endif
-+					regs->pc += SH_PC_8BIT_OFFSET(instruction);
++			vma_end = vma->vm_end;
++			if (vma->vm_flags & VM_EXEC) {
++				/* Executable */
++				eaddr = aligned_start;
++				while (eaddr < vma_end) {
++					sh64_icache_inv_user_page(vma, eaddr);
++					eaddr += PAGE_SIZE;
++				}
 +			}
-+			break;
++			aligned_start = vma->vm_end; /* Skip to start of next region */
 +		}
-+		break;
-+
-+	case 0xA000: /* bra label */
-+		ret = handle_unaligned_delayslot(regs);
-+		if (ret==0)
-+			regs->pc += SH_PC_12BIT_OFFSET(instruction);
-+		break;
-+
-+	case 0xB000: /* bsr label */
-+		ret = handle_unaligned_delayslot(regs);
-+		if (ret==0) {
-+			regs->pr = regs->pc + 4;
-+			regs->pc += SH_PC_12BIT_OFFSET(instruction);
++		if (mm_asid != current_asid) {
++			switch_and_save_asid(current_asid);
++			local_irq_restore(flags);
 +		}
-+		break;
 +	}
-+	return ret;
-+
-+	/* handle non-delay-slot instruction */
-+ simple:
-+	ret = handle_unaligned_ins(instruction,regs);
-+	if (ret==0)
-+		regs->pc += instruction_size(instruction);
-+	return ret;
 +}
-+#endif /* CONFIG_CPU_SH2A */
-+
-+#ifdef CONFIG_CPU_HAS_SR_RB
-+#define lookup_exception_vector(x)	\
-+	__asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
-+#else
-+#define lookup_exception_vector(x)	\
-+	__asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
-+#endif
 +
-+/*
-+ * Handle various address error exceptions:
-+ *  - instruction address error:
-+ *       misaligned PC
-+ *       PC >= 0x80000000 in user mode
-+ *  - data address error (read and write)
-+ *       misaligned data access
-+ *       access to >= 0x80000000 is user mode
-+ * Unfortuntaly we can't distinguish between instruction address error
-+ * and data address errors caused by read accesses.
-+ */
-+asmlinkage void do_address_error(struct pt_regs *regs,
-+				 unsigned long writeaccess,
-+				 unsigned long address)
++static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
++						unsigned long start, int len)
 +{
-+	unsigned long error_code = 0;
-+	mm_segment_t oldfs;
-+	siginfo_t info;
-+#ifndef CONFIG_CPU_SH2A
-+	u16 instruction;
-+	int tmp;
-+#endif
-+
-+	/* Intentional ifdef */
-+#ifdef CONFIG_CPU_HAS_SR_RB
-+	lookup_exception_vector(error_code);
-+#endif
-+
-+	oldfs = get_fs();
-+
-+	if (user_mode(regs)) {
-+		int si_code = BUS_ADRERR;
-+
-+		local_irq_enable();
 +
-+		/* bad PC is not something we can fix */
-+		if (regs->pc & 1) {
-+			si_code = BUS_ADRALN;
-+			goto uspace_segv;
-+		}
-+
-+#ifndef CONFIG_CPU_SH2A
-+		set_fs(USER_DS);
-+		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
-+			/* Argh. Fault on the instruction itself.
-+			   This should never happen non-SMP
-+			*/
-+			set_fs(oldfs);
-+			goto uspace_segv;
-+		}
++	/* Invalidate a small range of user context I-cache, not necessarily
++	   page (or even cache-line) aligned. */
 +
-+		tmp = handle_unaligned_access(instruction, regs);
-+		set_fs(oldfs);
++	unsigned long long eaddr = start;
++	unsigned long long eaddr_end = start + len;
++	unsigned long current_asid, mm_asid;
++	unsigned long long flags;
++	unsigned long long epage_start;
 +
-+		if (tmp==0)
-+			return; /* sorted */
-+#endif
++	/* Since this is used inside ptrace, the ASID in the mm context
++	   typically won't match current_asid.  We'll have to switch ASID to do
++	   this.  For safety, and given that the range will be small, do all
++	   this under cli.
 +
-+uspace_segv:
-+		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
-+		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
-+		       regs->pr);
++	   Note, there is a hazard that the ASID in mm->context is no longer
++	   actually associated with mm, i.e. if the mm->context has started a
++	   new cycle since mm was last active.  However, this is just a
++	   performance issue: all that happens is that we invalidate lines
++	   belonging to another mm, so the owning process has to refill them
++	   when that mm goes live again.  mm itself can't have any cache
++	   entries because there will have been a flush_cache_all when the new
++	   mm->context cycle started. */
 +
-+		info.si_signo = SIGBUS;
-+		info.si_errno = 0;
-+		info.si_code = si_code;
-+		info.si_addr = (void __user *)address;
-+		force_sig_info(SIGBUS, &info, current);
-+	} else {
-+		if (regs->pc & 1)
-+			die("unaligned program counter", regs, error_code);
++	/* Align to start of cache line.  Otherwise, suppose len==8 and start
++	   was at 32N+28 : the last 4 bytes wouldn't get invalidated. */
++	eaddr = start & L1_CACHE_ALIGN_MASK;
++	eaddr_end = start + len;
 +
-+#ifndef CONFIG_CPU_SH2A
-+		set_fs(KERNEL_DS);
-+		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
-+			/* Argh. Fault on the instruction itself.
-+			   This should never happen non-SMP
-+			*/
-+			set_fs(oldfs);
-+			die("insn faulting in do_address_error", regs, 0);
-+		}
++	local_irq_save(flags);
++	mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
++	current_asid = switch_and_save_asid(mm_asid);
 +
-+		handle_unaligned_access(instruction, regs);
-+		set_fs(oldfs);
-+#else
-+		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
-+		       "access\n", current->comm);
++	epage_start = eaddr & PAGE_MASK;
 +
-+		force_sig(SIGSEGV, current);
-+#endif
++	while (eaddr < eaddr_end)
++	{
++		asm __volatile__("icbi %0, 0" : : "r" (eaddr));
++		eaddr += L1_CACHE_BYTES;
 +	}
++	switch_and_save_asid(current_asid);
++	local_irq_restore(flags);
 +}
 +
-+#ifdef CONFIG_SH_DSP
-+/*
-+ *	SH-DSP support gerg at snapgear.com.
-+ */
-+int is_dsp_inst(struct pt_regs *regs)
++static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
 +{
-+	unsigned short inst = 0;
-+
-+	/*
-+	 * Safe guard if DSP mode is already enabled or we're lacking
-+	 * the DSP altogether.
-+	 */
-+	if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
-+		return 0;
-+
-+	get_user(inst, ((unsigned short *) regs->pc));
-+
-+	inst &= 0xf000;
-+
-+	/* Check for any type of DSP or support instruction */
-+	if ((inst == 0xf000) || (inst == 0x4000))
-+		return 1;
++	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
++	   cache hit on the virtual tag the instruction ends there, without a
++	   TLB lookup. */
 +
-+	return 0;
-+}
-+#else
-+#define is_dsp_inst(regs)	(0)
-+#endif /* CONFIG_SH_DSP */
++	unsigned long long aligned_start;
++	unsigned long long ull_end;
++	unsigned long long addr;
 +
-+#ifdef CONFIG_CPU_SH2A
-+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
-+				unsigned long r6, unsigned long r7,
-+				struct pt_regs __regs)
-+{
-+	siginfo_t info;
++	ull_end = end;
 +
-+	switch (r4) {
-+	case TRAP_DIVZERO_ERROR:
-+		info.si_code = FPE_INTDIV;
-+		break;
-+	case TRAP_DIVOVF_ERROR:
-+		info.si_code = FPE_INTOVF;
-+		break;
++	/* Just invalidate over the range using the natural addresses.  TLB
++	   miss handling will be OK (TBC).  Since it's for the current process,
++	   either we're already in the right ASID context, or the ASIDs have
++	   been recycled since we were last active in which case we might just
++	   invalidate another processes I-cache entries : no worries, just a
++	   performance drop for him. */
++	aligned_start = start & L1_CACHE_ALIGN_MASK;
++	addr = aligned_start;
++	while (addr < ull_end) {
++		asm __volatile__ ("icbi %0, 0" : : "r" (addr));
++		asm __volatile__ ("nop");
++		asm __volatile__ ("nop");
++		addr += L1_CACHE_BYTES;
 +	}
-+
-+	force_sig_info(SIGFPE, &info, current);
 +}
-+#endif
 +
-+asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
-+				unsigned long r6, unsigned long r7,
-+				struct pt_regs __regs)
-+{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	unsigned long error_code;
-+	struct task_struct *tsk = current;
++#endif /* !CONFIG_ICACHE_DISABLED */
 +
-+#ifdef CONFIG_SH_FPU_EMU
-+	unsigned short inst = 0;
-+	int err;
++/****************************************************************************/
 +
-+	get_user(inst, (unsigned short*)regs->pc);
++#ifndef CONFIG_DCACHE_DISABLED
 +
-+	err = do_fpu_inst(inst, regs);
-+	if (!err) {
-+		regs->pc += instruction_size(inst);
-+		return;
-+	}
-+	/* not a FPU inst. */
-+#endif
++/* Buffer used as the target of alloco instructions to purge data from cache
++   sets by natural eviction. -- RPC */
++#define DUMMY_ALLOCO_AREA_SIZE L1_CACHE_SIZE_BYTES + (1024 * 4)
++static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
 +
-+#ifdef CONFIG_SH_DSP
-+	/* Check if it's a DSP instruction */
-+	if (is_dsp_inst(regs)) {
-+		/* Enable DSP mode, and restart instruction. */
-+		regs->sr |= SR_DSP;
-+		return;
-+	}
-+#endif
++/****************************************************************************/
 +
-+	lookup_exception_vector(error_code);
++static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
++{
++	/* Purge all ways in a particular block of sets, specified by the base
++	   set number and number of sets.  Can handle wrap-around, if that's
++	   needed.  */
 +
-+	local_irq_enable();
-+	CHK_REMOTE_DEBUG(regs);
-+	force_sig(SIGILL, tsk);
-+	die_if_no_fixup("reserved instruction", regs, error_code);
-+}
++	int dummy_buffer_base_set;
++	unsigned long long eaddr, eaddr0, eaddr1;
++	int j;
++	int set_offset;
 +
-+#ifdef CONFIG_SH_FPU_EMU
-+static int emulate_branch(unsigned short inst, struct pt_regs* regs)
-+{
-+	/*
-+	 * bfs: 8fxx: PC+=d*2+4;
-+	 * bts: 8dxx: PC+=d*2+4;
-+	 * bra: axxx: PC+=D*2+4;
-+	 * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
-+	 * braf:0x23: PC+=Rn*2+4;
-+	 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
-+	 * jmp: 4x2b: PC=Rn;
-+	 * jsr: 4x0b: PC=Rn      after PR=PC+4;
-+	 * rts: 000b: PC=PR;
-+	 */
-+	if ((inst & 0xfd00) == 0x8d00) {
-+		regs->pc += SH_PC_8BIT_OFFSET(inst);
-+		return 0;
-+	}
++	dummy_buffer_base_set = ((int)&dummy_alloco_area & cpu_data->dcache.idx_mask) >> cpu_data->dcache.entry_shift;
++	set_offset = sets_to_purge_base - dummy_buffer_base_set;
 +
-+	if ((inst & 0xe000) == 0xa000) {
-+		regs->pc += SH_PC_12BIT_OFFSET(inst);
-+		return 0;
-+	}
++	for (j=0; j<n_sets; j++, set_offset++) {
++		set_offset &= (cpu_data->dcache.sets - 1);
++		eaddr0 = (unsigned long long)dummy_alloco_area + (set_offset << cpu_data->dcache.entry_shift);
 +
-+	if ((inst & 0xf0df) == 0x0003) {
-+		regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
-+		return 0;
-+	}
++		/* Do one alloco which hits the required set per cache way.  For
++		   write-back mode, this will purge the #ways resident lines.   There's
++		   little point unrolling this loop because the allocos stall more if
++		   they're too close together. */
++		eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
++		for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
++			asm __volatile__ ("alloco %0, 0" : : "r" (eaddr));
++			asm __volatile__ ("synco"); /* TAKum03020 */
++		}
 +
-+	if ((inst & 0xf0df) == 0x400b) {
-+		regs->pc = regs->regs[(inst & 0x0f00) >> 8];
-+		return 0;
++		eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
++		for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
++			/* Load from each address.  Required because alloco is a NOP if
++			   the cache is write-through.  Write-through is a config option. */
++			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
++				*(volatile unsigned char *)(int)eaddr;
++		}
 +	}
 +
-+	if ((inst & 0xffff) == 0x000b) {
-+		regs->pc = regs->pr;
-+		return 0;
-+	}
++	/* Don't use OCBI to invalidate the lines.  That costs cycles directly.
++	   If the dummy block is just left resident, it will naturally get
++	   evicted as required.  */
 +
-+	return 1;
++	return;
 +}
-+#endif
 +
-+asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
-+				unsigned long r6, unsigned long r7,
-+				struct pt_regs __regs)
++/****************************************************************************/
++
++static void sh64_dcache_purge_all(void)
 +{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	unsigned long error_code;
-+	struct task_struct *tsk = current;
-+#ifdef CONFIG_SH_FPU_EMU
-+	unsigned short inst = 0;
++	/* Purge the entire contents of the dcache.  The most efficient way to
++	   achieve this is to use alloco instructions on a region of unused
++	   memory equal in size to the cache, thereby causing the current
++	   contents to be discarded by natural eviction.  The alternative,
++	   namely reading every tag, setting up a mapping for the corresponding
++	   page and doing an OCBP for the line, would be much more expensive.
++	   */
 +
-+	get_user(inst, (unsigned short *)regs->pc + 1);
-+	if (!do_fpu_inst(inst, regs)) {
-+		get_user(inst, (unsigned short *)regs->pc);
-+		if (!emulate_branch(inst, regs))
-+			return;
-+		/* fault in branch.*/
-+	}
-+	/* not a FPU inst. */
-+#endif
++	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
 +
-+	lookup_exception_vector(error_code);
++	return;
 +
-+	local_irq_enable();
-+	CHK_REMOTE_DEBUG(regs);
-+	force_sig(SIGILL, tsk);
-+	die_if_no_fixup("illegal slot instruction", regs, error_code);
 +}
 +
-+asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
-+				   unsigned long r6, unsigned long r7,
-+				   struct pt_regs __regs)
-+{
-+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-+	long ex;
++/****************************************************************************/
 +
-+	lookup_exception_vector(ex);
-+	die_if_kernel("exception", regs, ex);
++static void sh64_dcache_purge_kernel_range(unsigned long start, unsigned long end)
++{
++	/* Purge the range of addresses [start,end] from the D-cache.  The
++	   addresses lie in the superpage mapping.  There's no harm if we
++	   overpurge at either end - just a small performance loss. */
++	unsigned long long ullend, addr, aligned_start;
++#if (NEFF == 32)
++	aligned_start = (unsigned long long)(signed long long)(signed long) start;
++#else
++#error "NEFF != 32"
++#endif
++	aligned_start &= L1_CACHE_ALIGN_MASK;
++	addr = aligned_start;
++#if (NEFF == 32)
++	ullend = (unsigned long long) (signed long long) (signed long) end;
++#else
++#error "NEFF != 32"
++#endif
++	while (addr <= ullend) {
++		asm __volatile__ ("ocbp %0, 0" : : "r" (addr));
++		addr += L1_CACHE_BYTES;
++	}
++	return;
 +}
 +
-+#if defined(CONFIG_SH_STANDARD_BIOS)
-+void *gdb_vbr_vector;
++/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
++   anything else in the kernel */
++#define MAGIC_PAGE0_START 0xffffffffec000000ULL
 +
-+static inline void __init gdb_vbr_init(void)
++static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned long eaddr)
 +{
-+	register unsigned long vbr;
++	/* Purge the physical page 'paddr' from the cache.  It's known that any
++	   cache lines requiring attention have the same page colour as the the
++	   address 'eaddr'.
 +
-+	/*
-+	 * Read the old value of the VBR register to initialise
-+	 * the vector through which debug and BIOS traps are
-+	 * delegated by the Linux trap handler.
-+	 */
-+	asm volatile("stc vbr, %0" : "=r" (vbr));
++	   This relies on the fact that the D-cache matches on physical tags
++	   when no virtual tag matches.  So we create an alias for the original
++	   page and purge through that.  (Alternatively, we could have done
++	   this by switching ASID to match the original mapping and purged
++	   through that, but that involves ASID switching cost + probably a
++	   TLBMISS + refill anyway.)
++	   */
 +
-+	gdb_vbr_vector = (void *)(vbr + 0x100);
-+	printk("Setting GDB trap vector to 0x%08lx\n",
-+	       (unsigned long)gdb_vbr_vector);
-+}
-+#endif
++	unsigned long long magic_page_start;
++	unsigned long long magic_eaddr, magic_eaddr_end;
 +
-+void __cpuinit per_cpu_trap_init(void)
-+{
-+	extern void *vbr_base;
++	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
 +
-+#ifdef CONFIG_SH_STANDARD_BIOS
-+	if (raw_smp_processor_id() == 0)
-+		gdb_vbr_init();
-+#endif
++	/* As long as the kernel is not pre-emptible, this doesn't need to be
++	   under cli/sti. */
 +
-+	/* NOTE: The VBR value should be at P1
-+	   (or P2, virtural "fixed" address space).
-+	   It's definitely should not in physical address.  */
++	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
 +
-+	asm volatile("ldc	%0, vbr"
-+		     : /* no output */
-+		     : "r" (&vbr_base)
-+		     : "memory");
++	magic_eaddr = magic_page_start;
++	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
++	while (magic_eaddr < magic_eaddr_end) {
++		/* Little point in unrolling this loop - the OCBPs are blocking
++		   and won't go any quicker (i.e. the loop overhead is parallel
++		   to part of the OCBP execution.) */
++		asm __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
++		magic_eaddr += L1_CACHE_BYTES;
++	}
++
++	sh64_teardown_dtlb_cache_slot();
 +}
 +
-+void *set_exception_table_vec(unsigned int vec, void *handler)
++/****************************************************************************/
++
++static void sh64_dcache_purge_phy_page(unsigned long paddr)
 +{
-+	extern void *exception_handling_table[];
-+	void *old_handler;
++	/* Pure a page given its physical start address, by creating a
++	   temporary 1 page mapping and purging across that.  Even if we know
++	   the virtual address (& vma or mm) of the page, the method here is
++	   more elegant because it avoids issues of coping with page faults on
++	   the purge instructions (i.e. no special-case code required in the
++	   critical path in the TLB miss handling). */
 +
-+	old_handler = exception_handling_table[vec];
-+	exception_handling_table[vec] = handler;
-+	return old_handler;
-+}
++	unsigned long long eaddr_start, eaddr, eaddr_end;
++	int i;
 +
-+void __init trap_init(void)
-+{
-+	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
-+	set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
++	/* As long as the kernel is not pre-emptible, this doesn't need to be
++	   under cli/sti. */
 +
-+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
-+    defined(CONFIG_SH_FPU_EMU)
-+	/*
-+	 * For SH-4 lacking an FPU, treat floating point instructions as
-+	 * reserved. They'll be handled in the math-emu case, or faulted on
-+	 * otherwise.
-+	 */
-+	set_exception_table_evt(0x800, do_reserved_inst);
-+	set_exception_table_evt(0x820, do_illegal_slot_inst);
-+#elif defined(CONFIG_SH_FPU)
-+#ifdef CONFIG_CPU_SUBTYPE_SHX3
-+	set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
-+	set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
-+#else
-+	set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
-+	set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
-+#endif
-+#endif
++	eaddr_start = MAGIC_PAGE0_START;
++	for (i=0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
++		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
 +
-+#ifdef CONFIG_CPU_SH2
-+	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
-+#endif
-+#ifdef CONFIG_CPU_SH2A
-+	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
-+	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
-+#endif
++		eaddr = eaddr_start;
++		eaddr_end = eaddr + PAGE_SIZE;
++		while (eaddr < eaddr_end) {
++			asm __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
++			eaddr += L1_CACHE_BYTES;
++		}
 +
-+	/* Setup VBR for boot cpu */
-+	per_cpu_trap_init();
++		sh64_teardown_dtlb_cache_slot();
++		eaddr_start += PAGE_SIZE;
++	}
 +}
 +
-+void show_trace(struct task_struct *tsk, unsigned long *sp,
-+		struct pt_regs *regs)
++static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
++				unsigned long addr, unsigned long end)
 +{
-+	unsigned long addr;
-+
-+	if (regs && user_mode(regs))
-+		return;
-+
-+	printk("\nCall trace: ");
-+#ifdef CONFIG_KALLSYMS
-+	printk("\n");
-+#endif
++	pgd_t *pgd;
++	pmd_t *pmd;
++	pte_t *pte;
++	pte_t entry;
++	spinlock_t *ptl;
++	unsigned long paddr;
 +
-+	while (!kstack_end(sp)) {
-+		addr = *sp++;
-+		if (kernel_text_address(addr))
-+			print_ip_sym(addr);
-+	}
++	if (!mm)
++		return; /* No way to find physical address of page */
 +
-+	printk("\n");
++	pgd = pgd_offset(mm, addr);
++	if (pgd_bad(*pgd))
++		return;
 +
-+	if (!tsk)
-+		tsk = current;
++	pmd = pmd_offset(pgd, addr);
++	if (pmd_none(*pmd) || pmd_bad(*pmd))
++		return;
 +
-+	debug_show_held_locks(tsk);
++	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++	do {
++		entry = *pte;
++		if (pte_none(entry) || !pte_present(entry))
++			continue;
++		paddr = pte_val(entry) & PAGE_MASK;
++		sh64_dcache_purge_coloured_phy_page(paddr, addr);
++	} while (pte++, addr += PAGE_SIZE, addr != end);
++	pte_unmap_unlock(pte - 1, ptl);
 +}
++/****************************************************************************/
 +
-+void show_stack(struct task_struct *tsk, unsigned long *sp)
++static void sh64_dcache_purge_user_range(struct mm_struct *mm,
++			  unsigned long start, unsigned long end)
 +{
-+	unsigned long stack;
-+
-+	if (!tsk)
-+		tsk = current;
-+	if (tsk == current)
-+		sp = (unsigned long *)current_stack_pointer;
-+	else
-+		sp = (unsigned long *)tsk->thread.sp;
-+
-+	stack = (unsigned long)sp;
-+	dump_mem("Stack: ", stack, THREAD_SIZE +
-+		 (unsigned long)task_stack_page(tsk));
-+	show_trace(tsk, sp, NULL);
-+}
++	/* There are at least 5 choices for the implementation of this, with
++	   pros (+), cons(-), comments(*):
 +
-+void dump_stack(void)
-+{
-+	show_stack(NULL, NULL);
-+}
-+EXPORT_SYMBOL(dump_stack);
-diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
-new file mode 100644
-index 0000000..c0b3c6f
---- /dev/null
-+++ b/arch/sh/kernel/traps_64.c
-@@ -0,0 +1,975 @@
-+/*
-+ * arch/sh/kernel/traps_64.c
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003, 2004  Paul Mundt
-+ * Copyright (C) 2003, 2004  Richard Curnow
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/ptrace.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/kallsyms.h>
-+#include <linux/interrupt.h>
-+#include <linux/sysctl.h>
-+#include <linux/module.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/processor.h>
-+#include <asm/pgtable.h>
++	   1. ocbp each line in the range through the original user's ASID
++	      + no lines spuriously evicted
++	      - tlbmiss handling (must either handle faults on demand => extra
++		special-case code in tlbmiss critical path), or map the page in
++		advance (=> flush_tlb_range in advance to avoid multiple hits)
++	      - ASID switching
++	      - expensive for large ranges
 +
-+#undef DEBUG_EXCEPTION
-+#ifdef DEBUG_EXCEPTION
-+/* implemented in ../lib/dbg.c */
-+extern void show_excp_regs(char *fname, int trapnr, int signr,
-+			   struct pt_regs *regs);
-+#else
-+#define show_excp_regs(a, b, c, d)
-+#endif
++	   2. temporarily map each page in the range to a special effective
++	      address and ocbp through the temporary mapping; relies on the
++	      fact that SH-5 OCB* always do TLB lookup and match on ptags (they
++	      never look at the etags)
++	      + no spurious evictions
++	      - expensive for large ranges
++	      * surely cheaper than (1)
 +
-+static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
-+		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
++	   3. walk all the lines in the cache, check the tags, if a match
++	      occurs create a page mapping to ocbp the line through
++	      + no spurious evictions
++	      - tag inspection overhead
++	      - (especially for small ranges)
++	      - potential cost of setting up/tearing down page mapping for
++		every line that matches the range
++	      * cost partly independent of range size
 +
-+#define DO_ERROR(trapnr, signr, str, name, tsk) \
-+asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
-+{ \
-+	do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
-+}
++	   4. walk all the lines in the cache, check the tags, if a match
++	      occurs use 4 * alloco to purge the line (+3 other probably
++	      innocent victims) by natural eviction
++	      + no tlb mapping overheads
++	      - spurious evictions
++	      - tag inspection overhead
 +
-+spinlock_t die_lock;
++	   5. implement like flush_cache_all
++	      + no tag inspection overhead
++	      - spurious evictions
++	      - bad for small ranges
 +
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+	console_verbose();
-+	spin_lock_irq(&die_lock);
-+	printk("%s: %lx\n", str, (err & 0xffffff));
-+	show_regs(regs);
-+	spin_unlock_irq(&die_lock);
-+	do_exit(SIGSEGV);
-+}
++	   (1) can be ruled out as more expensive than (2).  (2) appears best
++	   for small ranges.  The choice between (3), (4) and (5) for large
++	   ranges and the range size for the large/small boundary need
++	   benchmarking to determine.
 +
-+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-+{
-+	if (!user_mode(regs))
-+		die(str, regs, err);
-+}
++	   For now use approach (2) for small ranges and (5) for large ones.
 +
-+static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-+{
-+	if (!user_mode(regs)) {
-+		const struct exception_table_entry *fixup;
-+		fixup = search_exception_tables(regs->pc);
-+		if (fixup) {
-+			regs->pc = fixup->fixup;
-+			return;
-+		}
-+		die(str, regs, err);
-+	}
-+}
++	   */
 +
-+DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
-+DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
++	int n_pages;
 +
++	n_pages = ((end - start) >> PAGE_SHIFT);
++	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
++#if 1
++		sh64_dcache_purge_all();
++#else
++		unsigned long long set, way;
++		unsigned long mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
++		for (set = 0; set < cpu_data->dcache.sets; set++) {
++			unsigned long long set_base_config_addr = CACHE_OC_ADDRESS_ARRAY + (set << cpu_data->dcache.set_shift);
++			for (way = 0; way < cpu_data->dcache.ways; way++) {
++				unsigned long long config_addr = set_base_config_addr + (way << cpu_data->dcache.way_step_shift);
++				unsigned long long tag0;
++				unsigned long line_valid;
 +
-+/* Implement misaligned load/store handling for kernel (and optionally for user
-+   mode too).  Limitation : only SHmedia mode code is handled - there is no
-+   handling at all for misaligned accesses occurring in SHcompact code yet. */
++				asm __volatile__("getcfg %1, 0, %0" : "=r" (tag0) : "r" (config_addr));
++				line_valid = tag0 & SH_CACHE_VALID;
++				if (line_valid) {
++					unsigned long cache_asid;
++					unsigned long epn;
 +
-+static int misaligned_fixup(struct pt_regs *regs);
++					cache_asid = (tag0 & cpu_data->dcache.asid_mask) >> cpu_data->dcache.asid_shift;
++					/* The next line needs some
++					   explanation.  The virtual tags
++					   encode bits [31:13] of the virtual
++					   address, bit [12] of the 'tag' being
++					   implied by the cache set index. */
++					epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift);
 +
-+asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
-+{
-+	if (misaligned_fixup(regs) < 0) {
-+		do_unhandled_exception(7, SIGSEGV, "address error(load)",
-+				"do_address_error_load",
-+				error_code, regs, current);
++					if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) {
++						/* TODO : could optimise this
++						   call by batching multiple
++						   adjacent sets together. */
++						sh64_dcache_purge_sets(set, 1);
++						break; /* Don't waste time inspecting other ways for this set */
++					}
++				}
++			}
++		}
++#endif
++	} else {
++		/* Small range, covered by a single page table page */
++		start &= PAGE_MASK;	/* should already be so */
++		end = PAGE_ALIGN(end);	/* should already be so */
++		sh64_dcache_purge_user_pages(mm, start, end);
 +	}
 +	return;
 +}
 +
-+asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
++static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end)
 +{
-+	if (misaligned_fixup(regs) < 0) {
-+		do_unhandled_exception(8, SIGSEGV, "address error(store)",
-+				"do_address_error_store",
-+				error_code, regs, current);
-+	}
-+	return;
-+}
++	unsigned long long aligned_start;
++	unsigned long long ull_end;
++	unsigned long long addr;
 +
-+#if defined(CONFIG_SH64_ID2815_WORKAROUND)
++	ull_end = end;
 +
-+#define OPCODE_INVALID      0
-+#define OPCODE_USER_VALID   1
-+#define OPCODE_PRIV_VALID   2
++	/* Just wback over the range using the natural addresses.  TLB miss
++	   handling will be OK (TBC) : the range has just been written to by
++	   the signal frame setup code, so the PTEs must exist.
 +
-+/* getcon/putcon - requires checking which control register is referenced. */
-+#define OPCODE_CTRL_REG     3
++	   Note, if we have CONFIG_PREEMPT and get preempted inside this loop,
++	   it doesn't matter, even if the pid->ASID mapping changes whilst
++	   we're away.  In that case the cache will have been flushed when the
++	   mapping was renewed.  So the writebacks below will be nugatory (and
++	   we'll doubtless have to fault the TLB entry/ies in again with the
++	   new ASID), but it's a rare case.
++	   */
++	aligned_start = start & L1_CACHE_ALIGN_MASK;
++	addr = aligned_start;
++	while (addr < ull_end) {
++		asm __volatile__ ("ocbwb %0, 0" : : "r" (addr));
++		addr += L1_CACHE_BYTES;
++	}
++}
 +
-+/* Table of valid opcodes for SHmedia mode.
-+   Form a 10-bit value by concatenating the major/minor opcodes i.e.
-+   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
-+   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
-+   LSBs==4'b0000 etc). */
-+static unsigned long shmedia_opcode_table[64] = {
-+	0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
-+	0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
-+	0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
-+	0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
-+	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
-+	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
-+	0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
-+	0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
-+};
++/****************************************************************************/
 +
-+void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
++/* These *MUST* lie in an area of virtual address space that's otherwise unused. */
++#define UNIQUE_EADDR_START 0xe0000000UL
++#define UNIQUE_EADDR_END   0xe8000000UL
++
++static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr)
 +{
-+	/* Workaround SH5-101 cut2 silicon defect #2815 :
-+	   in some situations, inter-mode branches from SHcompact -> SHmedia
-+	   which should take ITLBMISS or EXECPROT exceptions at the target
-+	   falsely take RESINST at the target instead. */
++	/* Given a physical address paddr, and a user virtual address
++	   user_eaddr which will eventually be mapped to it, create a one-off
++	   kernel-private eaddr mapped to the same paddr.  This is used for
++	   creating special destination pages for copy_user_page and
++	   clear_user_page */
 +
-+	unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
-+	unsigned long pc, aligned_pc;
-+	int get_user_error;
-+	int trapnr = 12;
-+	int signr = SIGILL;
-+	char *exception_name = "reserved_instruction";
++	static unsigned long current_pointer = UNIQUE_EADDR_START;
++	unsigned long coloured_pointer;
 +
-+	pc = regs->pc;
-+	if ((pc & 3) == 1) {
-+		/* SHmedia : check for defect.  This requires executable vmas
-+		   to be readable too. */
-+		aligned_pc = pc & ~3;
-+		if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
-+			get_user_error = -EFAULT;
-+		} else {
-+			get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
-+		}
-+		if (get_user_error >= 0) {
-+			unsigned long index, shift;
-+			unsigned long major, minor, combined;
-+			unsigned long reserved_field;
-+			reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
-+			major = (opcode >> 26) & 0x3f;
-+			minor = (opcode >> 16) & 0xf;
-+			combined = (major << 4) | minor;
-+			index = major;
-+			shift = minor << 1;
-+			if (reserved_field == 0) {
-+				int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
-+				switch (opcode_state) {
-+					case OPCODE_INVALID:
-+						/* Trap. */
-+						break;
-+					case OPCODE_USER_VALID:
-+						/* Restart the instruction : the branch to the instruction will now be from an RTE
-+						   not from SHcompact so the silicon defect won't be triggered. */
-+						return;
-+					case OPCODE_PRIV_VALID:
-+						if (!user_mode(regs)) {
-+							/* Should only ever get here if a module has
-+							   SHcompact code inside it.  If so, the same fix up is needed. */
-+							return; /* same reason */
-+						}
-+						/* Otherwise, user mode trying to execute a privileged instruction -
-+						   fall through to trap. */
-+						break;
-+					case OPCODE_CTRL_REG:
-+						/* If in privileged mode, return as above. */
-+						if (!user_mode(regs)) return;
-+						/* In user mode ... */
-+						if (combined == 0x9f) { /* GETCON */
-+							unsigned long regno = (opcode >> 20) & 0x3f;
-+							if (regno >= 62) {
-+								return;
-+							}
-+							/* Otherwise, reserved or privileged control register, => trap */
-+						} else if (combined == 0x1bf) { /* PUTCON */
-+							unsigned long regno = (opcode >> 4) & 0x3f;
-+							if (regno >= 62) {
-+								return;
-+							}
-+							/* Otherwise, reserved or privileged control register, => trap */
-+						} else {
-+							/* Trap */
-+						}
-+						break;
-+					default:
-+						/* Fall through to trap. */
-+						break;
-+				}
-+			}
-+			/* fall through to normal resinst processing */
-+		} else {
-+			/* Error trying to read opcode.  This typically means a
-+			   real fault, not a RESINST any more.  So change the
-+			   codes. */
-+			trapnr = 87;
-+			exception_name = "address error (exec)";
-+			signr = SIGSEGV;
-+		}
++	if (current_pointer == UNIQUE_EADDR_END) {
++		sh64_dcache_purge_all();
++		current_pointer = UNIQUE_EADDR_START;
 +	}
 +
-+	do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
-+}
++	coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK);
++	sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
 +
-+#else /* CONFIG_SH64_ID2815_WORKAROUND */
++	current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
 +
-+/* If the workaround isn't needed, this is just a straightforward reserved
-+   instruction */
-+DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
++	return coloured_pointer;
++}
 +
-+#endif /* CONFIG_SH64_ID2815_WORKAROUND */
++/****************************************************************************/
 +
-+/* Called with interrupts disabled */
-+asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
++static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address)
 +{
-+	show_excp_regs(__FUNCTION__, -1, -1, regs);
-+	die_if_kernel("exception", regs, ex);
-+}
++	void *coloured_to;
 +
-+int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
-+{
-+	/* Syscall debug */
-+        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
++	/* Discard any existing cache entries of the wrong colour.  These are
++	   present quite often, if the kernel has recently used the page
++	   internally, then given it up, then it's been allocated to the user.
++	   */
++	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
 +
-+	die_if_kernel("unknown trapa", regs, scId);
++	coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
++	sh64_page_copy(from, coloured_to);
 +
-+	return -ENOSYS;
++	sh64_teardown_dtlb_cache_slot();
 +}
 +
-+void show_stack(struct task_struct *tsk, unsigned long *sp)
++static void sh64_clear_user_page_coloured(void *to, unsigned long address)
 +{
-+#ifdef CONFIG_KALLSYMS
-+	extern void sh64_unwind(struct pt_regs *regs);
-+	struct pt_regs *regs;
++	void *coloured_to;
 +
-+	regs = tsk ? tsk->thread.kregs : NULL;
++	/* Discard any existing kernel-originated lines of the wrong colour (as
++	   above) */
++	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
 +
-+	sh64_unwind(regs);
-+#else
-+	printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
-+#endif
-+}
++	coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
++	sh64_page_clear(coloured_to);
 +
-+void show_task(unsigned long *sp)
-+{
-+	show_stack(NULL, sp);
++	sh64_teardown_dtlb_cache_slot();
 +}
 +
-+void dump_stack(void)
-+{
-+	show_task(NULL);
-+}
-+/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
-+EXPORT_SYMBOL(dump_stack);
++#endif /* !CONFIG_DCACHE_DISABLED */
 +
-+static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
-+		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
-+{
-+	show_excp_regs(fn_name, trapnr, signr, regs);
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = trapnr;
++/****************************************************************************/
 +
-+	if (user_mode(regs))
-+		force_sig(signr, tsk);
++/*##########################################################################
++			    EXTERNALLY CALLABLE API.
++  ##########################################################################*/
 +
-+	die_if_no_fixup(str, regs, error_code);
-+}
++/* These functions are described in Documentation/cachetlb.txt.
++   Each one of these functions varies in behaviour depending on whether the
++   I-cache and/or D-cache are configured out.
 +
-+static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
-+{
-+	int get_user_error;
-+	unsigned long aligned_pc;
-+	unsigned long opcode;
++   Note that the Linux term 'flush' corresponds to what is termed 'purge' in
++   the sh/sh64 jargon for the D-cache, i.e. write back dirty data then
++   invalidate the cache lines, and 'invalidate' for the I-cache.
++   */
 +
-+	if ((pc & 3) == 1) {
-+		/* SHmedia */
-+		aligned_pc = pc & ~3;
-+		if (from_user_mode) {
-+			if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
-+				get_user_error = -EFAULT;
-+			} else {
-+				get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
-+				*result_opcode = opcode;
-+			}
-+			return get_user_error;
-+		} else {
-+			/* If the fault was in the kernel, we can either read
-+			 * this directly, or if not, we fault.
-+			*/
-+			*result_opcode = *(unsigned long *) aligned_pc;
-+			return 0;
-+		}
-+	} else if ((pc & 1) == 0) {
-+		/* SHcompact */
-+		/* TODO : provide handling for this.  We don't really support
-+		   user-mode SHcompact yet, and for a kernel fault, this would
-+		   have to come from a module built for SHcompact.  */
-+		return -EFAULT;
-+	} else {
-+		/* misaligned */
-+		return -EFAULT;
-+	}
-+}
++#undef FLUSH_TRACE
 +
-+static int address_is_sign_extended(__u64 a)
++void flush_cache_all(void)
 +{
-+	__u64 b;
-+#if (NEFF == 32)
-+	b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
-+	return (b == a) ? 1 : 0;
-+#else
-+#error "Sign extend check only works for NEFF==32"
-+#endif
++	/* Invalidate the entire contents of both caches, after writing back to
++	   memory any dirty data from the D-cache. */
++	sh64_dcache_purge_all();
++	sh64_icache_inv_all();
 +}
 +
-+static int generate_and_check_address(struct pt_regs *regs,
-+				      __u32 opcode,
-+				      int displacement_not_indexed,
-+				      int width_shift,
-+				      __u64 *address)
-+{
-+	/* return -1 for fault, 0 for OK */
++/****************************************************************************/
 +
-+	__u64 base_address, addr;
-+	int basereg;
++void flush_cache_mm(struct mm_struct *mm)
++{
++	/* Invalidate an entire user-address space from both caches, after
++	   writing back dirty data (e.g. for shared mmap etc). */
 +
-+	basereg = (opcode >> 20) & 0x3f;
-+	base_address = regs->regs[basereg];
-+	if (displacement_not_indexed) {
-+		__s64 displacement;
-+		displacement = (opcode >> 10) & 0x3ff;
-+		displacement = ((displacement << 54) >> 54); /* sign extend */
-+		addr = (__u64)((__s64)base_address + (displacement << width_shift));
-+	} else {
-+		__u64 offset;
-+		int offsetreg;
-+		offsetreg = (opcode >> 10) & 0x3f;
-+		offset = regs->regs[offsetreg];
-+		addr = base_address + offset;
-+	}
++	/* This could be coded selectively by inspecting all the tags then
++	   doing 4*alloco on any set containing a match (as for
++	   flush_cache_range), but fork/exit/execve (where this is called from)
++	   are expensive anyway. */
 +
-+	/* Check sign extended */
-+	if (!address_is_sign_extended(addr)) {
-+		return -1;
-+	}
++	/* Have to do a purge here, despite the comments re I-cache below.
++	   There could be odd-coloured dirty data associated with the mm still
++	   in the cache - if this gets written out through natural eviction
++	   after the kernel has reused the page there will be chaos.
++	   */
 +
-+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+	/* Check accessible.  For misaligned access in the kernel, assume the
-+	   address is always accessible (and if not, just fault when the
-+	   load/store gets done.) */
-+	if (user_mode(regs)) {
-+		if (addr >= TASK_SIZE) {
-+			return -1;
-+		}
-+		/* Do access_ok check later - it depends on whether it's a load or a store. */
-+	}
-+#endif
++	sh64_dcache_purge_all();
 +
-+	*address = addr;
-+	return 0;
++	/* The mm being torn down won't ever be active again, so any Icache
++	   lines tagged with its ASID won't be visible for the rest of the
++	   lifetime of this ASID cycle.  Before the ASID gets reused, there
++	   will be a flush_cache_all.  Hence we don't need to touch the
++	   I-cache.  This is similar to the lack of action needed in
++	   flush_tlb_mm - see fault.c. */
 +}
 +
-+/* Default value as for sh */
-+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+static int user_mode_unaligned_fixup_count = 10;
-+static int user_mode_unaligned_fixup_enable = 1;
-+#endif
-+
-+static int kernel_mode_unaligned_fixup_count = 32;
++/****************************************************************************/
 +
-+static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
++void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
++		       unsigned long end)
 +{
-+	unsigned short x;
-+	unsigned char *p, *q;
-+	p = (unsigned char *) (int) address;
-+	q = (unsigned char *) &x;
-+	q[0] = p[0];
-+	q[1] = p[1];
++	struct mm_struct *mm = vma->vm_mm;
 +
-+	if (do_sign_extend) {
-+		*result = (__u64)(__s64) *(short *) &x;
-+	} else {
-+		*result = (__u64) x;
-+	}
-+}
++	/* Invalidate (from both caches) the range [start,end) of virtual
++	   addresses from the user address space specified by mm, after writing
++	   back any dirty data.
 +
-+static void misaligned_kernel_word_store(__u64 address, __u64 value)
-+{
-+	unsigned short x;
-+	unsigned char *p, *q;
-+	p = (unsigned char *) (int) address;
-+	q = (unsigned char *) &x;
++	   Note, 'end' is 1 byte beyond the end of the range to flush. */
 +
-+	x = (__u16) value;
-+	p[0] = q[0];
-+	p[1] = q[1];
++	sh64_dcache_purge_user_range(mm, start, end);
++	sh64_icache_inv_user_page_range(mm, start, end);
 +}
 +
-+static int misaligned_load(struct pt_regs *regs,
-+			   __u32 opcode,
-+			   int displacement_not_indexed,
-+			   int width_shift,
-+			   int do_sign_extend)
-+{
-+	/* Return -1 for a fault, 0 for OK */
-+	int error;
-+	int destreg;
-+	__u64 address;
-+
-+	error = generate_and_check_address(regs, opcode,
-+			displacement_not_indexed, width_shift, &address);
-+	if (error < 0) {
-+		return error;
-+	}
-+
-+	destreg = (opcode >> 4) & 0x3f;
-+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+	if (user_mode(regs)) {
-+		__u64 buffer;
++/****************************************************************************/
 +
-+		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
-+			return -1;
-+		}
++void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn)
++{
++	/* Invalidate any entries in either cache for the vma within the user
++	   address space vma->vm_mm for the page starting at virtual address
++	   'eaddr'.   This seems to be used primarily in breaking COW.  Note,
++	   the I-cache must be searched too in case the page in question is
++	   both writable and being executed from (e.g. stack trampolines.)
 +
-+		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
-+			return -1; /* fault */
-+		}
-+		switch (width_shift) {
-+		case 1:
-+			if (do_sign_extend) {
-+				regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
-+			} else {
-+				regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
-+			}
-+			break;
-+		case 2:
-+			regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
-+			break;
-+		case 3:
-+			regs->regs[destreg] = buffer;
-+			break;
-+		default:
-+			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
-+				width_shift, (unsigned long) regs->pc);
-+			break;
-+		}
-+	} else
-+#endif
-+	{
-+		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
-+		__u64 lo, hi;
++	   Note, this is called with pte lock held.
++	   */
 +
-+		switch (width_shift) {
-+		case 1:
-+			misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
-+			break;
-+		case 2:
-+			asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
-+			asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
-+			regs->regs[destreg] = lo | hi;
-+			break;
-+		case 3:
-+			asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
-+			asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
-+			regs->regs[destreg] = lo | hi;
-+			break;
++	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
 +
-+		default:
-+			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
-+				width_shift, (unsigned long) regs->pc);
-+			break;
-+		}
++	if (vma->vm_flags & VM_EXEC) {
++		sh64_icache_inv_user_page(vma, eaddr);
 +	}
-+
-+	return 0;
-+
 +}
 +
-+static int misaligned_store(struct pt_regs *regs,
-+			    __u32 opcode,
-+			    int displacement_not_indexed,
-+			    int width_shift)
-+{
-+	/* Return -1 for a fault, 0 for OK */
-+	int error;
-+	int srcreg;
-+	__u64 address;
-+
-+	error = generate_and_check_address(regs, opcode,
-+			displacement_not_indexed, width_shift, &address);
-+	if (error < 0) {
-+		return error;
-+	}
++/****************************************************************************/
 +
-+	srcreg = (opcode >> 4) & 0x3f;
-+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+	if (user_mode(regs)) {
-+		__u64 buffer;
++#ifndef CONFIG_DCACHE_DISABLED
 +
-+		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
-+			return -1;
-+		}
++void copy_user_page(void *to, void *from, unsigned long address, struct page *page)
++{
++	/* 'from' and 'to' are kernel virtual addresses (within the superpage
++	   mapping of the physical RAM).  'address' is the user virtual address
++	   where the copy 'to' will be mapped after.  This allows a custom
++	   mapping to be used to ensure that the new copy is placed in the
++	   right cache sets for the user to see it without having to bounce it
++	   out via memory.  Note however : the call to flush_page_to_ram in
++	   (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
++	   very important case!
 +
-+		switch (width_shift) {
-+		case 1:
-+			*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
-+			break;
-+		case 2:
-+			*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
-+			break;
-+		case 3:
-+			buffer = regs->regs[srcreg];
-+			break;
-+		default:
-+			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
-+				width_shift, (unsigned long) regs->pc);
-+			break;
-+		}
++	   TBD : can we guarantee that on every call, any cache entries for
++	   'from' are in the same colour sets as 'address' also?  i.e. is this
++	   always used just to deal with COW?  (I suspect not). */
 +
-+		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
-+			return -1; /* fault */
-+		}
-+	} else
-+#endif
-+	{
-+		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
-+		__u64 val = regs->regs[srcreg];
++	/* There are two possibilities here for when the page 'from' was last accessed:
++	   * by the kernel : this is OK, no purge required.
++	   * by the/a user (e.g. for break_COW) : need to purge.
 +
-+		switch (width_shift) {
-+		case 1:
-+			misaligned_kernel_word_store(address, val);
-+			break;
-+		case 2:
-+			asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
-+			asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
-+			break;
-+		case 3:
-+			asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
-+			asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
-+			break;
++	   If the potential user mapping at 'address' is the same colour as
++	   'from' there is no need to purge any cache lines from the 'from'
++	   page mapped into cache sets of colour 'address'.  (The copy will be
++	   accessing the page through 'from').
++	   */
 +
-+		default:
-+			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
-+				width_shift, (unsigned long) regs->pc);
-+			break;
-+		}
++	if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) {
++		sh64_dcache_purge_coloured_phy_page(__pa(from), address);
 +	}
 +
-+	return 0;
++	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
++		/* No synonym problem on destination */
++		sh64_page_copy(from, to);
++	} else {
++		sh64_copy_user_page_coloured(to, from, address);
++	}
 +
++	/* Note, don't need to flush 'from' page from the cache again - it's
++	   done anyway by the generic code */
 +}
 +
-+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
-+   error. */
-+static int misaligned_fpu_load(struct pt_regs *regs,
-+			   __u32 opcode,
-+			   int displacement_not_indexed,
-+			   int width_shift,
-+			   int do_paired_load)
++void clear_user_page(void *to, unsigned long address, struct page *page)
 +{
-+	/* Return -1 for a fault, 0 for OK */
-+	int error;
-+	int destreg;
-+	__u64 address;
++	/* 'to' is a kernel virtual address (within the superpage
++	   mapping of the physical RAM).  'address' is the user virtual address
++	   where the 'to' page will be mapped after.  This allows a custom
++	   mapping to be used to ensure that the new copy is placed in the
++	   right cache sets for the user to see it without having to bounce it
++	   out via memory.
++	*/
 +
-+	error = generate_and_check_address(regs, opcode,
-+			displacement_not_indexed, width_shift, &address);
-+	if (error < 0) {
-+		return error;
++	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
++		/* No synonym problem on destination */
++		sh64_page_clear(to);
++	} else {
++		sh64_clear_user_page_coloured(to, address);
 +	}
++}
 +
-+	destreg = (opcode >> 4) & 0x3f;
-+	if (user_mode(regs)) {
-+		__u64 buffer;
-+		__u32 buflo, bufhi;
++#endif /* !CONFIG_DCACHE_DISABLED */
 +
-+		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
-+			return -1;
-+		}
++/****************************************************************************/
 +
-+		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
-+			return -1; /* fault */
-+		}
-+		/* 'current' may be the current owner of the FPU state, so
-+		   context switch the registers into memory so they can be
-+		   indexed by register number. */
-+		if (last_task_used_math == current) {
-+			enable_fpu();
-+			save_fpu(current, regs);
-+			disable_fpu();
-+			last_task_used_math = NULL;
-+			regs->sr |= SR_FD;
-+		}
++void flush_dcache_page(struct page *page)
++{
++	sh64_dcache_purge_phy_page(page_to_phys(page));
++	wmb();
++}
 +
-+		buflo = *(__u32*) &buffer;
-+		bufhi = *(1 + (__u32*) &buffer);
++/****************************************************************************/
 +
-+		switch (width_shift) {
-+		case 2:
-+			current->thread.fpu.hard.fp_regs[destreg] = buflo;
-+			break;
-+		case 3:
-+			if (do_paired_load) {
-+				current->thread.fpu.hard.fp_regs[destreg] = buflo;
-+				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
-+			} else {
-+#if defined(CONFIG_LITTLE_ENDIAN)
-+				current->thread.fpu.hard.fp_regs[destreg] = bufhi;
-+				current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
-+#else
-+				current->thread.fpu.hard.fp_regs[destreg] = buflo;
-+				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
-+#endif
-+			}
-+			break;
-+		default:
-+			printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
-+				width_shift, (unsigned long) regs->pc);
-+			break;
-+		}
-+		return 0;
-+	} else {
-+		die ("Misaligned FPU load inside kernel", regs, 0);
-+		return -1;
-+	}
++void flush_icache_range(unsigned long start, unsigned long end)
++{
++	/* Flush the range [start,end] of kernel virtual adddress space from
++	   the I-cache.  The corresponding range must be purged from the
++	   D-cache also because the SH-5 doesn't have cache snooping between
++	   the caches.  The addresses will be visible through the superpage
++	   mapping, therefore it's guaranteed that there no cache entries for
++	   the range in cache sets of the wrong colour.
 +
++	   Primarily used for cohering the I-cache after a module has
++	   been loaded.  */
++
++	/* We also make sure to purge the same range from the D-cache since
++	   flush_page_to_ram() won't be doing this for us! */
 +
++	sh64_dcache_purge_kernel_range(start, end);
++	wmb();
++	sh64_icache_inv_kernel_range(start, end);
 +}
 +
-+static int misaligned_fpu_store(struct pt_regs *regs,
-+			   __u32 opcode,
-+			   int displacement_not_indexed,
-+			   int width_shift,
-+			   int do_paired_load)
++/****************************************************************************/
++
++void flush_icache_user_range(struct vm_area_struct *vma,
++			struct page *page, unsigned long addr, int len)
 +{
-+	/* Return -1 for a fault, 0 for OK */
-+	int error;
-+	int srcreg;
-+	__u64 address;
++	/* Flush the range of user (defined by vma->vm_mm) address space
++	   starting at 'addr' for 'len' bytes from the cache.  The range does
++	   not straddle a page boundary, the unique physical page containing
++	   the range is 'page'.  This seems to be used mainly for invalidating
++	   an address range following a poke into the program text through the
++	   ptrace() call from another process (e.g. for BRK instruction
++	   insertion). */
 +
-+	error = generate_and_check_address(regs, opcode,
-+			displacement_not_indexed, width_shift, &address);
-+	if (error < 0) {
-+		return error;
++	sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
++	mb();
++
++	if (vma->vm_flags & VM_EXEC) {
++		sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
 +	}
++}
 +
-+	srcreg = (opcode >> 4) & 0x3f;
-+	if (user_mode(regs)) {
-+		__u64 buffer;
-+		/* Initialise these to NaNs. */
-+		__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
++/*##########################################################################
++			ARCH/SH64 PRIVATE CALLABLE API.
++  ##########################################################################*/
 +
-+		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
-+			return -1;
-+		}
++void flush_cache_sigtramp(unsigned long start, unsigned long end)
++{
++	/* For the address range [start,end), write back the data from the
++	   D-cache and invalidate the corresponding region of the I-cache for
++	   the current process.  Used to flush signal trampolines on the stack
++	   to make them executable. */
 +
-+		/* 'current' may be the current owner of the FPU state, so
-+		   context switch the registers into memory so they can be
-+		   indexed by register number. */
-+		if (last_task_used_math == current) {
-+			enable_fpu();
-+			save_fpu(current, regs);
-+			disable_fpu();
-+			last_task_used_math = NULL;
-+			regs->sr |= SR_FD;
-+		}
++	sh64_dcache_wback_current_user_range(start, end);
++	wmb();
++	sh64_icache_inv_current_user_range(start, end);
++}
 +
-+		switch (width_shift) {
-+		case 2:
-+			buflo = current->thread.fpu.hard.fp_regs[srcreg];
-+			break;
-+		case 3:
-+			if (do_paired_load) {
-+				buflo = current->thread.fpu.hard.fp_regs[srcreg];
-+				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
-+			} else {
-+#if defined(CONFIG_LITTLE_ENDIAN)
-+				bufhi = current->thread.fpu.hard.fp_regs[srcreg];
-+				buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
-+#else
-+				buflo = current->thread.fpu.hard.fp_regs[srcreg];
-+				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
-+#endif
-+			}
-+			break;
-+		default:
-+			printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
-+				width_shift, (unsigned long) regs->pc);
-+			break;
-+		}
+diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
+index 4896d73..22dacc7 100644
+--- a/arch/sh/mm/cache-sh7705.c
++++ b/arch/sh/mm/cache-sh7705.c
+@@ -71,7 +71,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
+ /*
+  * Writeback&Invalidate the D-cache of the page
+  */
+-static void __flush_dcache_page(unsigned long phys)
++static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
+ {
+ 	unsigned long ways, waysize, addrstart;
+ 	unsigned long flags;
+@@ -92,7 +92,7 @@ static void __flush_dcache_page(unsigned long phys)
+ 	 * possible.
+ 	 */
+ 	local_irq_save(flags);
+-	jump_to_P2();
++	jump_to_uncached();
+ 
+ 	ways = current_cpu_data.dcache.ways;
+ 	waysize = current_cpu_data.dcache.sets;
+@@ -118,7 +118,7 @@ static void __flush_dcache_page(unsigned long phys)
+ 		addrstart += current_cpu_data.dcache.way_incr;
+ 	} while (--ways);
+ 
+-	back_to_P1();
++	back_to_cached();
+ 	local_irq_restore(flags);
+ }
+ 
+@@ -132,15 +132,15 @@ void flush_dcache_page(struct page *page)
+ 		__flush_dcache_page(PHYSADDR(page_address(page)));
+ }
+ 
+-void flush_cache_all(void)
++void __uses_jump_to_uncached flush_cache_all(void)
+ {
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	jump_to_P2();
++	jump_to_uncached();
+ 
+ 	cache_wback_all();
+-	back_to_P1();
++	back_to_cached();
+ 	local_irq_restore(flags);
+ }
+ 
+diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S
+deleted file mode 100644
+index 7a7c81e..0000000
+--- a/arch/sh/mm/clear_page.S
++++ /dev/null
+@@ -1,152 +0,0 @@
+-/*
+- * __clear_user_page, __clear_user, clear_page implementation of SuperH
+- *
+- * Copyright (C) 2001  Kaz Kojima
+- * Copyright (C) 2001, 2002  Niibe Yutaka
+- * Copyright (C) 2006  Paul Mundt
+- */
+-#include <linux/linkage.h>
+-#include <asm/page.h>
+-
+-/*
+- * clear_page_slow
+- * @to: P1 address
+- *
+- * void clear_page_slow(void *to)
+- */
+-
+-/*
+- * r0 --- scratch
+- * r4 --- to
+- * r5 --- to + PAGE_SIZE
+- */
+-ENTRY(clear_page_slow)
+-	mov	r4,r5
+-	mov.l	.Llimit,r0
+-	add	r0,r5
+-	mov	#0,r0
+-	!
+-1:
+-#if defined(CONFIG_CPU_SH3)
+-	mov.l	r0, at r4
+-#elif defined(CONFIG_CPU_SH4)
+-	movca.l	r0, at r4
+-	mov	r4,r1
+-#endif
+-	add	#32,r4
+-	mov.l	r0, at -r4
+-	mov.l	r0, at -r4
+-	mov.l	r0, at -r4
+-	mov.l	r0, at -r4
+-	mov.l	r0, at -r4
+-	mov.l	r0, at -r4
+-	mov.l	r0, at -r4
+-#if defined(CONFIG_CPU_SH4)
+-	ocbwb	@r1
+-#endif
+-	cmp/eq	r5,r4
+-	bf/s	1b
+-	 add	#28,r4
+-	!
+-	rts
+-	 nop
+-.Llimit:	.long	(PAGE_SIZE-28)
+-
+-ENTRY(__clear_user)
+-	!
+-	mov	#0, r0
+-	mov	#0xe0, r1	! 0xffffffe0
+-	!
+-	! r4..(r4+31)&~32 	   -------- not aligned	[ Area 0 ]
+-	! (r4+31)&~32..(r4+r5)&~32 -------- aligned	[ Area 1 ]
+-	! (r4+r5)&~32..r4+r5       -------- not aligned	[ Area 2 ]
+-	!
+-	! Clear area 0
+-	mov	r4, r2
+-	!
+-	tst	r1, r5		! length < 32
+-	bt	.Larea2		! skip to remainder
+-	!
+-	add	#31, r2
+-	and	r1, r2
+-	cmp/eq	r4, r2
+-	bt	.Larea1
+-	mov	r2, r3
+-	sub	r4, r3
+-	mov	r3, r7
+-	mov	r4, r2
+-	!
+-.L0:	dt	r3
+-0:	mov.b	r0, @r2
+-	bf/s	.L0
+-	 add	#1, r2
+-	!
+-	sub	r7, r5
+-	mov	r2, r4
+-.Larea1:
+-	mov	r4, r3
+-	add	r5, r3
+-	and	r1, r3
+-	cmp/hi	r2, r3
+-	bf	.Larea2
+-	!
+-	! Clear area 1
+-#if defined(CONFIG_CPU_SH4)
+-1:	movca.l	r0, @r2
+-#else
+-1:	mov.l	r0, @r2
+-#endif
+-	add	#4, r2
+-2:	mov.l	r0, @r2
+-	add	#4, r2
+-3:	mov.l	r0, @r2
+-	add	#4, r2
+-4:	mov.l	r0, @r2
+-	add	#4, r2
+-5:	mov.l	r0, @r2
+-	add	#4, r2
+-6:	mov.l	r0, @r2
+-	add	#4, r2
+-7:	mov.l	r0, @r2
+-	add	#4, r2
+-8:	mov.l	r0, @r2
+-	add	#4, r2
+-	cmp/hi	r2, r3
+-	bt/s	1b
+-	 nop
+-	!
+-	! Clear area 2
+-.Larea2:
+-	mov	r4, r3
+-	add	r5, r3
+-	cmp/hs	r3, r2
+-	bt/s	.Ldone
+-	 sub	r2, r3
+-.L2:	dt	r3
+-9:	mov.b	r0, @r2
+-	bf/s	.L2
+-	 add	#1, r2
+-	!
+-.Ldone:	rts
+-	 mov	#0, r0	! return 0 as normal return
+-
+-	! return the number of bytes remained
+-.Lbad_clear_user:
+-	mov	r4, r0
+-	add	r5, r0
+-	rts
+-	 sub	r2, r0
+-
+-.section __ex_table,"a"
+-	.align 2
+-	.long	0b, .Lbad_clear_user
+-	.long	1b, .Lbad_clear_user
+-	.long	2b, .Lbad_clear_user
+-	.long	3b, .Lbad_clear_user
+-	.long	4b, .Lbad_clear_user
+-	.long	5b, .Lbad_clear_user
+-	.long	6b, .Lbad_clear_user
+-	.long	7b, .Lbad_clear_user
+-	.long	8b, .Lbad_clear_user
+-	.long	9b, .Lbad_clear_user
+-.previous
+diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
+index e220c29..7b2131c 100644
+--- a/arch/sh/mm/consistent.c
++++ b/arch/sh/mm/consistent.c
+@@ -1,7 +1,9 @@
+ /*
+  * arch/sh/mm/consistent.c
+  *
+- * Copyright (C) 2004  Paul Mundt
++ * Copyright (C) 2004 - 2007  Paul Mundt
++ *
++ * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+  * License.  See the file "COPYING" in the main directory of this archive
+@@ -13,58 +15,152 @@
+ #include <asm/addrspace.h>
+ #include <asm/io.h>
+ 
+-void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
++struct dma_coherent_mem {
++	void		*virt_base;
++	u32		device_base;
++	int		size;
++	int		flags;
++	unsigned long	*bitmap;
++};
 +
-+		*(__u32*) &buffer = buflo;
-+		*(1 + (__u32*) &buffer) = bufhi;
-+		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
-+			return -1; /* fault */
++void *dma_alloc_coherent(struct device *dev, size_t size,
++			   dma_addr_t *dma_handle, gfp_t gfp)
+ {
+-	struct page *page, *end, *free;
+ 	void *ret;
+-	int order;
++	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++	int order = get_order(size);
+ 
+-	size = PAGE_ALIGN(size);
+-	order = get_order(size);
++	if (mem) {
++		int page = bitmap_find_free_region(mem->bitmap, mem->size,
++						     order);
++		if (page >= 0) {
++			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
++			ret = mem->virt_base + (page << PAGE_SHIFT);
++			memset(ret, 0, size);
++			return ret;
 +		}
-+		return 0;
-+	} else {
-+		die ("Misaligned FPU load inside kernel", regs, 0);
-+		return -1;
++		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
++			return NULL;
++	}
+ 
+-	page = alloc_pages(gfp, order);
+-	if (!page)
+-		return NULL;
+-	split_page(page, order);
++	ret = (void *)__get_free_pages(gfp, order);
+ 
+-	ret = page_address(page);
+-	memset(ret, 0, size);
+-	*handle = virt_to_phys(ret);
++	if (ret != NULL) {
++		memset(ret, 0, size);
++		/*
++		 * Pages from the page allocator may have data present in
++		 * cache. So flush the cache before using uncached memory.
++		 */
++		dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
++		*dma_handle = virt_to_phys(ret);
 +	}
++	return ret;
 +}
-+#endif
++EXPORT_SYMBOL(dma_alloc_coherent);
+ 
+-	/*
+-	 * We must flush the cache before we pass it on to the device
+-	 */
+-	__flush_purge_region(ret, size);
++void dma_free_coherent(struct device *dev, size_t size,
++			 void *vaddr, dma_addr_t dma_handle)
++{
++	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++	int order = get_order(size);
+ 
+-	page = virt_to_page(ret);
+-	free = page + (size >> PAGE_SHIFT);
+-	end  = page + (1 << order);
++	if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+ 
+-	while (++page < end) {
+-		/* Free any unused pages */
+-		if (page >= free) {
+-			__free_page(page);
+-		}
++		bitmap_release_region(mem->bitmap, page, order);
++	} else {
++		WARN_ON(irqs_disabled());	/* for portability */
++		BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
++		free_pages((unsigned long)vaddr, order);
+ 	}
++}
++EXPORT_SYMBOL(dma_free_coherent);
 +
-+static int misaligned_fixup(struct pt_regs *regs)
++int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++				dma_addr_t device_addr, size_t size, int flags)
 +{
-+	unsigned long opcode;
-+	int error;
-+	int major, minor;
++	void __iomem *mem_base = NULL;
++	int pages = size >> PAGE_SHIFT;
++	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
 +
-+#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+	/* Never fixup user mode misaligned accesses without this option enabled. */
-+	return -1;
-+#else
-+	if (!user_mode_unaligned_fixup_enable) return -1;
-+#endif
++	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
++		goto out;
++	if (!size)
++		goto out;
++	if (dev->dma_mem)
++		goto out;
 +
-+	error = read_opcode(regs->pc, &opcode, user_mode(regs));
-+	if (error < 0) {
-+		return error;
-+	}
-+	major = (opcode >> 26) & 0x3f;
-+	minor = (opcode >> 16) & 0xf;
++	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+ 
+-	return P2SEGADDR(ret);
++	mem_base = ioremap_nocache(bus_addr, size);
++	if (!mem_base)
++		goto out;
 +
-+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+	if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
-+		--user_mode_unaligned_fixup_count;
-+		/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
-+		printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
-+		       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
-+	} else
-+#endif
-+	if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
-+		--kernel_mode_unaligned_fixup_count;
-+		if (in_interrupt()) {
-+			printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
-+			       (__u32)regs->pc, opcode);
-+		} else {
-+			printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
-+			       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
-+		}
-+	}
++	dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++	if (!dev->dma_mem)
++		goto out;
++	dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++	if (!dev->dma_mem->bitmap)
++		goto free1_out;
 +
++	dev->dma_mem->virt_base = mem_base;
++	dev->dma_mem->device_base = device_addr;
++	dev->dma_mem->size = pages;
++	dev->dma_mem->flags = flags;
 +
-+	switch (major) {
-+		case (0x84>>2): /* LD.W */
-+			error = misaligned_load(regs, opcode, 1, 1, 1);
-+			break;
-+		case (0xb0>>2): /* LD.UW */
-+			error = misaligned_load(regs, opcode, 1, 1, 0);
-+			break;
-+		case (0x88>>2): /* LD.L */
-+			error = misaligned_load(regs, opcode, 1, 2, 1);
-+			break;
-+		case (0x8c>>2): /* LD.Q */
-+			error = misaligned_load(regs, opcode, 1, 3, 0);
-+			break;
++	if (flags & DMA_MEMORY_MAP)
++		return DMA_MEMORY_MAP;
 +
-+		case (0xa4>>2): /* ST.W */
-+			error = misaligned_store(regs, opcode, 1, 1);
-+			break;
-+		case (0xa8>>2): /* ST.L */
-+			error = misaligned_store(regs, opcode, 1, 2);
-+			break;
-+		case (0xac>>2): /* ST.Q */
-+			error = misaligned_store(regs, opcode, 1, 3);
-+			break;
++	return DMA_MEMORY_IO;
 +
-+		case (0x40>>2): /* indexed loads */
-+			switch (minor) {
-+				case 0x1: /* LDX.W */
-+					error = misaligned_load(regs, opcode, 0, 1, 1);
-+					break;
-+				case 0x5: /* LDX.UW */
-+					error = misaligned_load(regs, opcode, 0, 1, 0);
-+					break;
-+				case 0x2: /* LDX.L */
-+					error = misaligned_load(regs, opcode, 0, 2, 1);
-+					break;
-+				case 0x3: /* LDX.Q */
-+					error = misaligned_load(regs, opcode, 0, 3, 0);
-+					break;
-+				default:
-+					error = -1;
-+					break;
-+			}
-+			break;
++ free1_out:
++	kfree(dev->dma_mem);
++ out:
++	if (mem_base)
++		iounmap(mem_base);
++	return 0;
+ }
++EXPORT_SYMBOL(dma_declare_coherent_memory);
+ 
+-void consistent_free(void *vaddr, size_t size)
++void dma_release_declared_memory(struct device *dev)
+ {
+-	unsigned long addr = P1SEGADDR((unsigned long)vaddr);
+-	struct page *page=virt_to_page(addr);
+-	int num_pages=(size+PAGE_SIZE-1) >> PAGE_SHIFT;
+-	int i;
++	struct dma_coherent_mem *mem = dev->dma_mem;
+ 
+-	for(i=0;i<num_pages;i++) {
+-		__free_page((page+i));
+-	}
++	if (!mem)
++		return;
++	dev->dma_mem = NULL;
++	iounmap(mem->virt_base);
++	kfree(mem->bitmap);
++	kfree(mem);
+ }
++EXPORT_SYMBOL(dma_release_declared_memory);
+ 
+-void consistent_sync(void *vaddr, size_t size, int direction)
++void *dma_mark_declared_memory_occupied(struct device *dev,
++					dma_addr_t device_addr, size_t size)
+ {
+-	void * p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
++	struct dma_coherent_mem *mem = dev->dma_mem;
++	int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	int pos, err;
 +
-+		case (0x60>>2): /* indexed stores */
-+			switch (minor) {
-+				case 0x1: /* STX.W */
-+					error = misaligned_store(regs, opcode, 0, 1);
-+					break;
-+				case 0x2: /* STX.L */
-+					error = misaligned_store(regs, opcode, 0, 2);
-+					break;
-+				case 0x3: /* STX.Q */
-+					error = misaligned_store(regs, opcode, 0, 3);
-+					break;
-+				default:
-+					error = -1;
-+					break;
-+			}
-+			break;
++	if (!mem)
++		return ERR_PTR(-EINVAL);
 +
-+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+		case (0x94>>2): /* FLD.S */
-+			error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
-+			break;
-+		case (0x98>>2): /* FLD.P */
-+			error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
-+			break;
-+		case (0x9c>>2): /* FLD.D */
-+			error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
-+			break;
-+		case (0x1c>>2): /* floating indexed loads */
-+			switch (minor) {
-+			case 0x8: /* FLDX.S */
-+				error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
-+				break;
-+			case 0xd: /* FLDX.P */
-+				error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
-+				break;
-+			case 0x9: /* FLDX.D */
-+				error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
-+				break;
-+			default:
-+				error = -1;
-+				break;
-+			}
-+			break;
-+		case (0xb4>>2): /* FLD.S */
-+			error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
-+			break;
-+		case (0xb8>>2): /* FLD.P */
-+			error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
-+			break;
-+		case (0xbc>>2): /* FLD.D */
-+			error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
-+			break;
-+		case (0x3c>>2): /* floating indexed stores */
-+			switch (minor) {
-+			case 0x8: /* FSTX.S */
-+				error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
-+				break;
-+			case 0xd: /* FSTX.P */
-+				error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
-+				break;
-+			case 0x9: /* FSTX.D */
-+				error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
-+				break;
-+			default:
-+				error = -1;
-+				break;
-+			}
-+			break;
++	pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
++	err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
++	if (err != 0)
++		return ERR_PTR(err);
++	return mem->virt_base + (pos << PAGE_SHIFT);
++}
++EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
++
++void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
++		    enum dma_data_direction direction)
++{
++#ifdef CONFIG_CPU_SH5
++	void *p1addr = vaddr;
++#else
++	void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
 +#endif
+ 
+ 	switch (direction) {
+ 	case DMA_FROM_DEVICE:		/* invalidate only */
+@@ -80,8 +176,4 @@ void consistent_sync(void *vaddr, size_t size, int direction)
+ 		BUG();
+ 	}
+ }
+-
+-EXPORT_SYMBOL(consistent_alloc);
+-EXPORT_SYMBOL(consistent_free);
+-EXPORT_SYMBOL(consistent_sync);
+-
++EXPORT_SYMBOL(dma_cache_sync);
+diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
+deleted file mode 100644
+index 4068501..0000000
+--- a/arch/sh/mm/copy_page.S
++++ /dev/null
+@@ -1,388 +0,0 @@
+-/*
+- * copy_page, __copy_user_page, __copy_user implementation of SuperH
+- *
+- * Copyright (C) 2001  Niibe Yutaka & Kaz Kojima
+- * Copyright (C) 2002  Toshinobu Sugioka
+- * Copyright (C) 2006  Paul Mundt
+- */
+-#include <linux/linkage.h>
+-#include <asm/page.h>
+-
+-/*
+- * copy_page_slow
+- * @to: P1 address
+- * @from: P1 address
+- *
+- * void copy_page_slow(void *to, void *from)
+- */
+-
+-/*
+- * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
+- * r8 --- from + PAGE_SIZE
+- * r9 --- not used
+- * r10 --- to
+- * r11 --- from
+- */
+-ENTRY(copy_page_slow)
+-	mov.l	r8, at -r15
+-	mov.l	r10, at -r15
+-	mov.l	r11, at -r15
+-	mov	r4,r10
+-	mov	r5,r11
+-	mov	r5,r8
+-	mov.l	.Lpsz,r0
+-	add	r0,r8
+-	!
+-1:	mov.l	@r11+,r0
+-	mov.l	@r11+,r1
+-	mov.l	@r11+,r2
+-	mov.l	@r11+,r3
+-	mov.l	@r11+,r4
+-	mov.l	@r11+,r5
+-	mov.l	@r11+,r6
+-	mov.l	@r11+,r7
+-#if defined(CONFIG_CPU_SH3)
+-	mov.l	r0, at r10
+-#elif defined(CONFIG_CPU_SH4)
+-	movca.l	r0, at r10
+-	mov	r10,r0
+-#endif
+-	add	#32,r10
+-	mov.l	r7, at -r10
+-	mov.l	r6, at -r10
+-	mov.l	r5, at -r10
+-	mov.l	r4, at -r10
+-	mov.l	r3, at -r10
+-	mov.l	r2, at -r10
+-	mov.l	r1, at -r10
+-#if defined(CONFIG_CPU_SH4)
+-	ocbwb	@r0
+-#endif
+-	cmp/eq	r11,r8
+-	bf/s	1b
+-	 add	#28,r10
+-	!
+-	mov.l	@r15+,r11
+-	mov.l	@r15+,r10
+-	mov.l	@r15+,r8
+-	rts
+-	 nop
+-
+-	.align 2
+-.Lpsz:	.long	PAGE_SIZE
+-/*
+- * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
+- * Return the number of bytes NOT copied
+- */
+-#define EX(...)			\
+-	9999: __VA_ARGS__ ;		\
+-	.section __ex_table, "a";	\
+-	.long 9999b, 6000f	;	\
+-	.previous
+-ENTRY(__copy_user)
+-	! Check if small number of bytes
+-	mov	#11,r0
+-	mov	r4,r3
+-	cmp/gt	r0,r6		! r6 (len) > r0 (11)
+-	bf/s	.L_cleanup_loop_no_pop
+-	 add	r6,r3		! last destination address
+-
+-	! Calculate bytes needed to align to src
+-	mov.l	r11, at -r15
+-	neg	r5,r0
+-	mov.l	r10, at -r15
+-	add	#4,r0
+-	mov.l	r9, at -r15
+-	and	#3,r0
+-	mov.l	r8, at -r15
+-	tst	r0,r0
+-	bt	2f
+-
+-1:
+-	! Copy bytes to long word align src
+-EX(	mov.b	@r5+,r1		)
+-	dt	r0
+-	add	#-1,r6
+-EX(	mov.b	r1, at r4		)
+-	bf/s	1b
+-	 add	#1,r4
+-
+-	! Jump to appropriate routine depending on dest
+-2:	mov	#3,r1
+-	mov	r6, r2
+-	and	r4,r1
+-	shlr2	r2
+-	shll2	r1
+-	mova	.L_jump_tbl,r0
+-	mov.l	@(r0,r1),r1
+-	jmp	@r1
+-	 nop
+-
+-	.align 2
+-.L_jump_tbl:
+-	.long	.L_dest00
+-	.long	.L_dest01
+-	.long	.L_dest10
+-	.long	.L_dest11
+-
+-/*
+- * Come here if there are less than 12 bytes to copy
+- *
+- * Keep the branch target close, so the bf/s callee doesn't overflow
+- * and result in a more expensive branch being inserted. This is the
+- * fast-path for small copies, the jump via the jump table will hit the
+- * default slow-path cleanup. -PFM.
+- */
+-.L_cleanup_loop_no_pop:
+-	tst	r6,r6		! Check explicitly for zero
+-	bt	1f
+-
+-2:
+-EX(	mov.b	@r5+,r0		)
+-	dt	r6
+-EX(	mov.b	r0, at r4		)
+-	bf/s	2b
+-	 add	#1,r4
+-
+-1:	mov	#0,r0		! normal return
+-5000:
+-
+-# Exception handler:
+-.section .fixup, "ax"
+-6000:
+-	mov.l	8000f,r1
+-	mov	r3,r0
+-	jmp	@r1
+-	 sub	r4,r0
+-	.align	2
+-8000:	.long	5000b
+-
+-.previous
+-	rts
+-	 nop
+-
+-! Destination = 00
+-
+-.L_dest00:
+-	! Skip the large copy for small transfers
+-	mov	#(32+32-4), r0
+-	cmp/gt	r6, r0		! r0 (60) > r6 (len)
+-	bt	1f
+-
+-	! Align dest to a 32 byte boundary
+-	neg	r4,r0
+-	add	#0x20, r0
+-	and	#0x1f, r0
+-	tst	r0, r0
+-	bt	2f
+-
+-	sub	r0, r6
+-	shlr2	r0
+-3:
+-EX(	mov.l	@r5+,r1		)
+-	dt	r0
+-EX(	mov.l	r1, at r4		)
+-	bf/s	3b
+-	 add	#4,r4
+-
+-2:
+-EX(	mov.l	@r5+,r0		)
+-EX(	mov.l	@r5+,r1		)
+-EX(	mov.l	@r5+,r2		)
+-EX(	mov.l	@r5+,r7		)
+-EX(	mov.l	@r5+,r8		)
+-EX(	mov.l	@r5+,r9		)
+-EX(	mov.l	@r5+,r10	)
+-EX(	mov.l	@r5+,r11	)
+-#ifdef CONFIG_CPU_SH4
+-EX(	movca.l	r0, at r4		)
+-#else
+-EX(	mov.l	r0, at r4		)
+-#endif
+-	add	#-32, r6
+-EX(	mov.l	r1,@(4,r4)	)
+-	mov	#32, r0
+-EX(	mov.l	r2,@(8,r4)	)
+-	cmp/gt	r6, r0		! r0 (32) > r6 (len)
+-EX(	mov.l	r7,@(12,r4)	)
+-EX(	mov.l	r8,@(16,r4)	)
+-EX(	mov.l	r9,@(20,r4)	)
+-EX(	mov.l	r10,@(24,r4)	)
+-EX(	mov.l	r11,@(28,r4)	)
+-	bf/s	2b
+-	 add	#32,r4
+-
+-1:	mov	r6, r0
+-	shlr2	r0
+-	tst	r0, r0
+-	bt	.L_cleanup
+-1:
+-EX(	mov.l	@r5+,r1		)
+-	dt	r0
+-EX(	mov.l	r1, at r4		)
+-	bf/s	1b
+-	 add	#4,r4
+-
+-	bra	.L_cleanup
+-	 nop
+-
+-! Destination = 10
+-
+-.L_dest10:
+-	mov	r2,r7
+-	shlr2	r7
+-	shlr	r7
+-	tst	r7,r7
+-	mov	#7,r0
+-	bt/s	1f
+-	 and	r0,r2
+-2:
+-	dt	r7
+-#ifdef CONFIG_CPU_LITTLE_ENDIAN
+-EX(	mov.l	@r5+,r0		)
+-EX(	mov.l	@r5+,r1		)
+-EX(	mov.l	@r5+,r8		)
+-EX(	mov.l	@r5+,r9		)
+-EX(	mov.l	@r5+,r10	)
+-EX(	mov.w	r0, at r4		)
+-	add	#2,r4
+-	xtrct	r1,r0
+-	xtrct	r8,r1
+-	xtrct	r9,r8
+-	xtrct	r10,r9
+-
+-EX(	mov.l	r0, at r4		)
+-EX(	mov.l	r1,@(4,r4)	)
+-EX(	mov.l	r8,@(8,r4)	)
+-EX(	mov.l	r9,@(12,r4)	)
+-
+-EX(	mov.l	@r5+,r1		)
+-EX(	mov.l	@r5+,r8		)
+-EX(	mov.l	@r5+,r0		)
+-	xtrct	r1,r10
+-	xtrct	r8,r1
+-	xtrct	r0,r8
+-	shlr16	r0
+-EX(	mov.l	r10,@(16,r4)	)
+-EX(	mov.l	r1,@(20,r4)	)
+-EX(	mov.l	r8,@(24,r4)	)
+-EX(	mov.w	r0,@(28,r4)	)
+-	bf/s	2b
+-	 add	#30,r4
+-#else
+-EX(	mov.l	@(28,r5),r0	)
+-EX(	mov.l	@(24,r5),r8	)
+-EX(	mov.l	@(20,r5),r9	)
+-EX(	mov.l	@(16,r5),r10	)
+-EX(	mov.w	r0,@(30,r4)	)
+-	add	#-2,r4
+-	xtrct	r8,r0
+-	xtrct	r9,r8
+-	xtrct	r10,r9
+-EX(	mov.l	r0,@(28,r4)	)
+-EX(	mov.l	r8,@(24,r4)	)
+-EX(	mov.l	r9,@(20,r4)	)
+-
+-EX(	mov.l	@(12,r5),r0	)
+-EX(	mov.l	@(8,r5),r8	)
+-	xtrct	r0,r10
+-EX(	mov.l	@(4,r5),r9	)
+-	mov.l	r10,@(16,r4)
+-EX(	mov.l	@r5,r10		)
+-	xtrct	r8,r0
+-	xtrct	r9,r8
+-	xtrct	r10,r9
+-EX(	mov.l	r0,@(12,r4)	)
+-EX(	mov.l	r8,@(8,r4)	)
+-	swap.w	r10,r0
+-EX(	mov.l	r9,@(4,r4)	)
+-EX(	mov.w	r0,@(2,r4)	)
+-
+-	add	#32,r5
+-	bf/s	2b
+-	 add	#34,r4
+-#endif
+-	tst	r2,r2
+-	bt	.L_cleanup
+-
+-1:	! Read longword, write two words per iteration
+-EX(	mov.l	@r5+,r0		)
+-	dt	r2
+-#ifdef CONFIG_CPU_LITTLE_ENDIAN
+-EX(	mov.w	r0, at r4		)
+-	shlr16	r0
+-EX(	mov.w 	r0,@(2,r4)	)
+-#else
+-EX(	mov.w	r0,@(2,r4)	)
+-	shlr16	r0
+-EX(	mov.w	r0, at r4		)
+-#endif
+-	bf/s	1b
+-	 add	#4,r4
+-
+-	bra	.L_cleanup
+-	 nop
+-
+-! Destination = 01 or 11
+-
+-.L_dest01:
+-.L_dest11:
+-	! Read longword, write byte, word, byte per iteration
+-EX(	mov.l	@r5+,r0		)
+-	dt	r2
+-#ifdef CONFIG_CPU_LITTLE_ENDIAN
+-EX(	mov.b	r0, at r4		)
+-	shlr8	r0
+-	add	#1,r4
+-EX(	mov.w	r0, at r4		)
+-	shlr16	r0
+-EX(	mov.b	r0,@(2,r4)	)
+-	bf/s	.L_dest01
+-	 add	#3,r4
+-#else
+-EX(	mov.b	r0,@(3,r4)	)
+-	shlr8	r0
+-	swap.w	r0,r7
+-EX(	mov.b	r7, at r4		)
+-	add	#1,r4
+-EX(	mov.w	r0, at r4		)
+-	bf/s	.L_dest01
+-	 add	#3,r4
+-#endif
+-
+-! Cleanup last few bytes
+-.L_cleanup:
+-	mov	r6,r0
+-	and	#3,r0
+-	tst	r0,r0
+-	bt	.L_exit
+-	mov	r0,r6
+-
+-.L_cleanup_loop:
+-EX(	mov.b	@r5+,r0		)
+-	dt	r6
+-EX(	mov.b	r0, at r4		)
+-	bf/s	.L_cleanup_loop
+-	 add	#1,r4
+-
+-.L_exit:
+-	mov	#0,r0		! normal return
+-
+-5000:
+-
+-# Exception handler:
+-.section .fixup, "ax"
+-6000:
+-	mov.l	8000f,r1
+-	mov	r3,r0
+-	jmp	@r1
+-	 sub	r4,r0
+-	.align	2
+-8000:	.long	5000b
+-
+-.previous
+-	mov.l	@r15+,r8
+-	mov.l	@r15+,r9
+-	mov.l	@r15+,r10
+-	rts
+-	 mov.l	@r15+,r11
+diff --git a/arch/sh/mm/extable.c b/arch/sh/mm/extable.c
+deleted file mode 100644
+index c1cf446..0000000
+--- a/arch/sh/mm/extable.c
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/*
+- * linux/arch/sh/mm/extable.c
+- *  Taken from:
+- *   linux/arch/i386/mm/extable.c
+- */
+-
+-#include <linux/module.h>
+-#include <asm/uaccess.h>
+-
+-int fixup_exception(struct pt_regs *regs)
+-{
+-	const struct exception_table_entry *fixup;
+-
+-	fixup = search_exception_tables(regs->pc);
+-	if (fixup) {
+-		regs->pc = fixup->fixup;
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+diff --git a/arch/sh/mm/extable_32.c b/arch/sh/mm/extable_32.c
+new file mode 100644
+index 0000000..c1cf446
+--- /dev/null
++++ b/arch/sh/mm/extable_32.c
+@@ -0,0 +1,21 @@
++/*
++ * linux/arch/sh/mm/extable.c
++ *  Taken from:
++ *   linux/arch/i386/mm/extable.c
++ */
 +
-+		default:
-+			/* Fault */
-+			error = -1;
-+			break;
-+	}
++#include <linux/module.h>
++#include <asm/uaccess.h>
 +
-+	if (error < 0) {
-+		return error;
-+	} else {
-+		regs->pc += 4; /* Skip the instruction that's just been emulated */
-+		return 0;
++int fixup_exception(struct pt_regs *regs)
++{
++	const struct exception_table_entry *fixup;
++
++	fixup = search_exception_tables(regs->pc);
++	if (fixup) {
++		regs->pc = fixup->fixup;
++		return 1;
 +	}
 +
++	return 0;
 +}
+diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c
+new file mode 100644
+index 0000000..f054996
+--- /dev/null
++++ b/arch/sh/mm/extable_64.c
+@@ -0,0 +1,82 @@
++/*
++ * arch/sh/mm/extable_64.c
++ *
++ * Copyright (C) 2003 Richard Curnow
++ * Copyright (C) 2003, 2004  Paul Mundt
++ *
++ * Cloned from the 2.5 SH version..
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/rwsem.h>
++#include <linux/module.h>
++#include <asm/uaccess.h>
 +
-+static ctl_table unaligned_table[] = {
-+	{
-+		.ctl_name	= CTL_UNNUMBERED,
-+		.procname	= "kernel_reports",
-+		.data		= &kernel_mode_unaligned_fixup_count,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0644,
-+		.proc_handler	= &proc_dointvec
-+	},
-+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-+	{
-+		.ctl_name	= CTL_UNNUMBERED,
-+		.procname	= "user_reports",
-+		.data		= &user_mode_unaligned_fixup_count,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0644,
-+		.proc_handler	= &proc_dointvec
-+	},
-+	{
-+		.ctl_name	= CTL_UNNUMBERED,
-+		.procname	= "user_enable",
-+		.data		= &user_mode_unaligned_fixup_enable,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0644,
-+		.proc_handler	= &proc_dointvec},
-+#endif
-+	{}
-+};
++extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
++extern void __copy_user_fixup(void);
 +
-+static ctl_table unaligned_root[] = {
-+	{
-+		.ctl_name	= CTL_UNNUMBERED,
-+		.procname	= "unaligned_fixup",
-+		.mode		= 0555,
-+		unaligned_table
-+	},
-+	{}
++static const struct exception_table_entry __copy_user_fixup_ex = {
++	.fixup = (unsigned long)&__copy_user_fixup,
 +};
 +
-+static ctl_table sh64_root[] = {
-+	{
-+		.ctl_name	= CTL_UNNUMBERED,
-+		.procname	= "sh64",
-+		.mode		= 0555,
-+		.child		= unaligned_root
-+	},
-+	{}
-+};
-+static struct ctl_table_header *sysctl_header;
-+static int __init init_sysctl(void)
++/*
++ * Some functions that may trap due to a bad user-mode address have too
++ * many loads and stores in them to make it at all practical to label
++ * each one and put them all in the main exception table.
++ *
++ * In particular, the fast memcpy routine is like this.  It's fix-up is
++ * just to fall back to a slow byte-at-a-time copy, which is handled the
++ * conventional way.  So it's functionally OK to just handle any trap
++ * occurring in the fast memcpy with that fixup.
++ */
++static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
 +{
-+	sysctl_header = register_sysctl_table(sh64_root);
-+	return 0;
++	if ((addr >= (unsigned long)&copy_user_memcpy) &&
++	    (addr <= (unsigned long)&copy_user_memcpy_end))
++		return &__copy_user_fixup_ex;
++
++	return NULL;
 +}
 +
-+__initcall(init_sysctl);
++/* Simple binary search */
++const struct exception_table_entry *
++search_extable(const struct exception_table_entry *first,
++		 const struct exception_table_entry *last,
++		 unsigned long value)
++{
++	const struct exception_table_entry *mid;
++
++	mid = check_exception_ranges(value);
++	if (mid)
++		return mid;
 +
++        while (first <= last) {
++		long diff;
 +
-+asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
++		mid = (last - first) / 2 + first;
++		diff = mid->insn - value;
++                if (diff == 0)
++                        return mid;
++                else if (diff < 0)
++                        first = mid+1;
++                else
++                        last = mid-1;
++        }
++
++        return NULL;
++}
++
++int fixup_exception(struct pt_regs *regs)
 +{
-+	u64 peek_real_address_q(u64 addr);
-+	u64 poke_real_address_q(u64 addr, u64 val);
-+	unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
-+	unsigned long long exp_cause;
-+	/* It's not worth ioremapping the debug module registers for the amount
-+	   of access we make to them - just go direct to their physical
-+	   addresses. */
-+	exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
-+	if (exp_cause & ~4) {
-+		printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
-+			(unsigned long)(exp_cause & 0xffffffff));
++	const struct exception_table_entry *fixup;
++
++	fixup = search_exception_tables(regs->pc);
++	if (fixup) {
++		regs->pc = fixup->fixup;
++		return 1;
 +	}
-+	show_state();
-+	/* Clear all DEBUGINT causes */
-+	poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
++
++	return 0;
 +}
-diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
-index 0956fb3..d7d4991 100644
---- a/arch/sh/kernel/vmlinux.lds.S
-+++ b/arch/sh/kernel/vmlinux.lds.S
-@@ -1,138 +1,5 @@
+diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
+deleted file mode 100644
+index 60d74f7..0000000
+--- a/arch/sh/mm/fault.c
++++ /dev/null
+@@ -1,303 +0,0 @@
 -/*
-- * ld script to make SuperH Linux kernel
-- * Written by Niibe Yutaka
+- * Page fault handler for SH with an MMU.
+- *
+- *  Copyright (C) 1999  Niibe Yutaka
+- *  Copyright (C) 2003 - 2007  Paul Mundt
+- *
+- *  Based on linux/arch/i386/mm/fault.c:
+- *   Copyright (C) 1995  Linus Torvalds
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/hardirq.h>
+-#include <linux/kprobes.h>
+-#include <asm/system.h>
+-#include <asm/mmu_context.h>
+-#include <asm/tlbflush.h>
+-#include <asm/kgdb.h>
+-
+-/*
+- * This routine handles page faults.  It determines the address,
+- * and the problem, and then passes it off to one of the appropriate
+- * routines.
 - */
--#include <asm/thread_info.h>
--#include <asm/cache.h>
--#include <asm-generic/vmlinux.lds.h>
--
--#ifdef CONFIG_CPU_LITTLE_ENDIAN
--OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
-+#ifdef CONFIG_SUPERH32
-+# include "vmlinux_32.lds.S"
- #else
--OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
-+# include "vmlinux_64.lds.S"
- #endif
--OUTPUT_ARCH(sh)
--ENTRY(_start)
--SECTIONS
+-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+-					unsigned long writeaccess,
+-					unsigned long address)
 -{
--	. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
--	_text = .;			/* Text and read-only data */
+-	struct task_struct *tsk;
+-	struct mm_struct *mm;
+-	struct vm_area_struct * vma;
+-	int si_code;
+-	int fault;
+-	siginfo_t info;
 -
--	.empty_zero_page : {
--		*(.empty_zero_page)
--	} = 0
+-	trace_hardirqs_on();
+-	local_irq_enable();
 -
--	.text : {
--		*(.text.head)
--		TEXT_TEXT
--		SCHED_TEXT
--		LOCK_TEXT
--		KPROBES_TEXT
--		*(.fixup)
--		*(.gnu.warning)
--	} = 0x0009
+-#ifdef CONFIG_SH_KGDB
+-	if (kgdb_nofault && kgdb_bus_err_hook)
+-		kgdb_bus_err_hook();
+-#endif
 -
--	. = ALIGN(16);		/* Exception table */
--	__start___ex_table = .;
--	__ex_table : { *(__ex_table) }
--	__stop___ex_table = .;
+-	tsk = current;
+-	mm = tsk->mm;
+-	si_code = SEGV_MAPERR;
 -
--	_etext = .;			/* End of text section */
+-	if (unlikely(address >= TASK_SIZE)) {
+-		/*
+-		 * Synchronize this task's top level page-table
+-		 * with the 'reference' page table.
+-		 *
+-		 * Do _not_ use "tsk" here. We might be inside
+-		 * an interrupt in the middle of a task switch..
+-		 */
+-		int offset = pgd_index(address);
+-		pgd_t *pgd, *pgd_k;
+-		pud_t *pud, *pud_k;
+-		pmd_t *pmd, *pmd_k;
 -
--	BUG_TABLE
--	NOTES
--	RO_DATA(PAGE_SIZE)
+-		pgd = get_TTB() + offset;
+-		pgd_k = swapper_pg_dir + offset;
 -
--	. = ALIGN(THREAD_SIZE);
--	.data : {			/* Data */
--		*(.data.init_task)
+-		/* This will never happen with the folded page table. */
+-		if (!pgd_present(*pgd)) {
+-			if (!pgd_present(*pgd_k))
+-				goto bad_area_nosemaphore;
+-			set_pgd(pgd, *pgd_k);
+-			return;
+-		}
 -
--		. = ALIGN(L1_CACHE_BYTES);
--		*(.data.cacheline_aligned)
+-		pud = pud_offset(pgd, address);
+-		pud_k = pud_offset(pgd_k, address);
+-		if (pud_present(*pud) || !pud_present(*pud_k))
+-			goto bad_area_nosemaphore;
+-		set_pud(pud, *pud_k);
 -
--		. = ALIGN(L1_CACHE_BYTES);
--		*(.data.read_mostly)
+-		pmd = pmd_offset(pud, address);
+-		pmd_k = pmd_offset(pud_k, address);
+-		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+-			goto bad_area_nosemaphore;
+-		set_pmd(pmd, *pmd_k);
 -
--		. = ALIGN(PAGE_SIZE);
--		*(.data.page_aligned)
+-		return;
+-	}
 -
--		__nosave_begin = .;
--		*(.data.nosave)
--		. = ALIGN(PAGE_SIZE);
--		__nosave_end = .;
+-	/*
+-	 * If we're in an interrupt or have no user
+-	 * context, we must not take the fault..
+-	 */
+-	if (in_atomic() || !mm)
+-		goto no_context;
 -
--		DATA_DATA
--		CONSTRUCTORS
+-	down_read(&mm->mmap_sem);
+-
+-	vma = find_vma(mm, address);
+-	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (expand_stack(vma, address))
+-		goto bad_area;
+-/*
+- * Ok, we have a good vm_area for this memory access, so
+- * we can handle it..
+- */
+-good_area:
+-	si_code = SEGV_ACCERR;
+-	if (writeaccess) {
+-		if (!(vma->vm_flags & VM_WRITE))
+-			goto bad_area;
+-	} else {
+-		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+-			goto bad_area;
 -	}
 -
--	_edata = .;			/* End of data section */
+-	/*
+-	 * If for any reason at all we couldn't handle the fault,
+-	 * make sure we exit gracefully rather than endlessly redo
+-	 * the fault.
+-	 */
+-survive:
+-	fault = handle_mm_fault(mm, vma, address, writeaccess);
+-	if (unlikely(fault & VM_FAULT_ERROR)) {
+-		if (fault & VM_FAULT_OOM)
+-			goto out_of_memory;
+-		else if (fault & VM_FAULT_SIGBUS)
+-			goto do_sigbus;
+-		BUG();
+-	}
+-	if (fault & VM_FAULT_MAJOR)
+-		tsk->maj_flt++;
+-	else
+-		tsk->min_flt++;
 -
--	. = ALIGN(PAGE_SIZE);		/* Init code and data */
--	__init_begin = .;
--	_sinittext = .;
--	.init.text : { *(.init.text) }
--	_einittext = .;
--	.init.data : { *(.init.data) }
+-	up_read(&mm->mmap_sem);
+-	return;
 -
--	. = ALIGN(16);
--	__setup_start = .;
--	.init.setup : { *(.init.setup) }
--	__setup_end = .;
+-/*
+- * Something tried to access memory that isn't in our memory map..
+- * Fix it, but check if it's kernel or user first..
+- */
+-bad_area:
+-	up_read(&mm->mmap_sem);
 -
--	__initcall_start = .;
--	.initcall.init : {
--		INITCALLS
+-bad_area_nosemaphore:
+-	if (user_mode(regs)) {
+-		info.si_signo = SIGSEGV;
+-		info.si_errno = 0;
+-		info.si_code = si_code;
+-		info.si_addr = (void *) address;
+-		force_sig_info(SIGSEGV, &info, tsk);
+-		return;
 -	}
--	__initcall_end = .;
--	__con_initcall_start = .;
--	.con_initcall.init : { *(.con_initcall.init) }
--	__con_initcall_end = .;
 -
--	SECURITY_INIT
+-no_context:
+-	/* Are we prepared to handle this kernel fault?  */
+-	if (fixup_exception(regs))
+-		return;
 -
--#ifdef CONFIG_BLK_DEV_INITRD
--	. = ALIGN(PAGE_SIZE);
--	__initramfs_start = .;
--	.init.ramfs : { *(.init.ramfs) }
--	__initramfs_end = .;
--#endif
+-/*
+- * Oops. The kernel tried to access some bad page. We'll have to
+- * terminate things with extreme prejudice.
+- *
+- */
 -
--	. = ALIGN(4);
--	__machvec_start = .;
--	.machvec.init : { *(.machvec.init) }
--	__machvec_end = .;
+-	bust_spinlocks(1);
 -
--	PERCPU(PAGE_SIZE)
+-	if (oops_may_print()) {
+-		__typeof__(pte_val(__pte(0))) page;
+-
+-		if (address < PAGE_SIZE)
+-			printk(KERN_ALERT "Unable to handle kernel NULL "
+-					  "pointer dereference");
+-		else
+-			printk(KERN_ALERT "Unable to handle kernel paging "
+-					  "request");
+-		printk(" at virtual address %08lx\n", address);
+-		printk(KERN_ALERT "pc = %08lx\n", regs->pc);
+-		page = (unsigned long)get_TTB();
+-		if (page) {
+-			page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
+-			printk(KERN_ALERT "*pde = %08lx\n", page);
+-			if (page & _PAGE_PRESENT) {
+-				page &= PAGE_MASK;
+-				address &= 0x003ff000;
+-				page = ((__typeof__(page) *)
+-						__va(page))[address >>
+-							    PAGE_SHIFT];
+-				printk(KERN_ALERT "*pte = %08lx\n", page);
+-			}
+-		}
+-	}
+-
+-	die("Oops", regs, writeaccess);
+-	bust_spinlocks(0);
+-	do_exit(SIGKILL);
+-
+-/*
+- * We ran out of memory, or some other thing happened to us that made
+- * us unable to handle the page fault gracefully.
+- */
+-out_of_memory:
+-	up_read(&mm->mmap_sem);
+-	if (is_global_init(current)) {
+-		yield();
+-		down_read(&mm->mmap_sem);
+-		goto survive;
+-	}
+-	printk("VM: killing process %s\n", tsk->comm);
+-	if (user_mode(regs))
+-		do_group_exit(SIGKILL);
+-	goto no_context;
+-
+-do_sigbus:
+-	up_read(&mm->mmap_sem);
 -
 -	/*
--	 * .exit.text is discarded at runtime, not link time, to deal with
--	 * references from __bug_table
+-	 * Send a sigbus, regardless of whether we were in kernel
+-	 * or user mode.
 -	 */
--	.exit.text : { *(.exit.text) }
--	.exit.data : { *(.exit.data) }
+-	info.si_signo = SIGBUS;
+-	info.si_errno = 0;
+-	info.si_code = BUS_ADRERR;
+-	info.si_addr = (void *)address;
+-	force_sig_info(SIGBUS, &info, tsk);
 -
--	. = ALIGN(PAGE_SIZE);
--	.bss : {
--		__init_end = .;
--		__bss_start = .;		/* BSS */
--		*(.bss.page_aligned)
--		*(.bss)
--		*(COMMON)
--		. = ALIGN(4);
--		_ebss = .;			/* uClinux MTD sucks */
--		_end = . ;
--	}
+-	/* Kernel mode? Handle exceptions or die */
+-	if (!user_mode(regs))
+-		goto no_context;
+-}
+-
+-#ifdef CONFIG_SH_STORE_QUEUES
+-/*
+- * This is a special case for the SH-4 store queues, as pages for this
+- * space still need to be faulted in before it's possible to flush the
+- * store queue cache for writeout to the remapped region.
+- */
+-#define P3_ADDR_MAX		(P4SEG_STORE_QUE + 0x04000000)
+-#else
+-#define P3_ADDR_MAX		P4SEG
+-#endif
+-
+-/*
+- * Called with interrupts disabled.
+- */
+-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
+-					 unsigned long writeaccess,
+-					 unsigned long address)
+-{
+-	pgd_t *pgd;
+-	pud_t *pud;
+-	pmd_t *pmd;
+-	pte_t *pte;
+-	pte_t entry;
+-
+-#ifdef CONFIG_SH_KGDB
+-	if (kgdb_nofault && kgdb_bus_err_hook)
+-		kgdb_bus_err_hook();
+-#endif
 -
 -	/*
--	 * When something in the kernel is NOT compiled as a module, the
--	 * module cleanup code and data are put into these segments. Both
--	 * can then be thrown away, as cleanup code is never called unless
--	 * it's a module.
+-	 * We don't take page faults for P1, P2, and parts of P4, these
+-	 * are always mapped, whether it be due to legacy behaviour in
+-	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
 -	 */
--	/DISCARD/ : {
--		*(.exitcall.exit)
+-	if (address >= P3SEG && address < P3_ADDR_MAX) {
+-		pgd = pgd_offset_k(address);
+-	} else {
+-		if (unlikely(address >= TASK_SIZE || !current->mm))
+-			return 1;
+-
+-		pgd = pgd_offset(current->mm, address);
 -	}
 -
--	STABS_DEBUG
--	DWARF_DEBUG
+-	pud = pud_offset(pgd, address);
+-	if (pud_none_or_clear_bad(pud))
+-		return 1;
+-	pmd = pmd_offset(pud, address);
+-	if (pmd_none_or_clear_bad(pmd))
+-		return 1;
+-
+-	pte = pte_offset_kernel(pmd, address);
+-	entry = *pte;
+-	if (unlikely(pte_none(entry) || pte_not_present(entry)))
+-		return 1;
+-	if (unlikely(writeaccess && !pte_write(entry)))
+-		return 1;
+-
+-	if (writeaccess)
+-		entry = pte_mkdirty(entry);
+-	entry = pte_mkyoung(entry);
+-
+-	set_pte(pte, entry);
+-	update_mmu_cache(NULL, address, entry);
+-
+-	return 0;
 -}
-diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S
+diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
 new file mode 100644
-index 0000000..c711378
+index 0000000..33b43d2
 --- /dev/null
-+++ b/arch/sh/kernel/vmlinux_32.lds.S
-@@ -0,0 +1,152 @@
++++ b/arch/sh/mm/fault_32.c
+@@ -0,0 +1,303 @@
 +/*
-+ * ld script to make SuperH Linux kernel
-+ * Written by Niibe Yutaka
++ * Page fault handler for SH with an MMU.
++ *
++ *  Copyright (C) 1999  Niibe Yutaka
++ *  Copyright (C) 2003 - 2007  Paul Mundt
++ *
++ *  Based on linux/arch/i386/mm/fault.c:
++ *   Copyright (C) 1995  Linus Torvalds
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
 + */
-+#include <asm/thread_info.h>
-+#include <asm/cache.h>
-+#include <asm-generic/vmlinux.lds.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/hardirq.h>
++#include <linux/kprobes.h>
++#include <asm/system.h>
++#include <asm/mmu_context.h>
++#include <asm/tlbflush.h>
++#include <asm/kgdb.h>
 +
-+#ifdef CONFIG_CPU_LITTLE_ENDIAN
-+OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
-+#else
-+OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
-+#endif
-+OUTPUT_ARCH(sh)
-+ENTRY(_start)
-+SECTIONS
++/*
++ * This routine handles page faults.  It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ */
++asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
++					unsigned long writeaccess,
++					unsigned long address)
 +{
-+#ifdef CONFIG_32BIT
-+	. = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
-+#else
-+	. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	struct vm_area_struct * vma;
++	int si_code;
++	int fault;
++	siginfo_t info;
++
++	trace_hardirqs_on();
++	local_irq_enable();
++
++#ifdef CONFIG_SH_KGDB
++	if (kgdb_nofault && kgdb_bus_err_hook)
++		kgdb_bus_err_hook();
 +#endif
 +
-+	_text = .;			/* Text and read-only data */
++	tsk = current;
++	mm = tsk->mm;
++	si_code = SEGV_MAPERR;
 +
-+	.empty_zero_page : {
-+		*(.empty_zero_page)
-+	} = 0
++	if (unlikely(address >= TASK_SIZE)) {
++		/*
++		 * Synchronize this task's top level page-table
++		 * with the 'reference' page table.
++		 *
++		 * Do _not_ use "tsk" here. We might be inside
++		 * an interrupt in the middle of a task switch..
++		 */
++		int offset = pgd_index(address);
++		pgd_t *pgd, *pgd_k;
++		pud_t *pud, *pud_k;
++		pmd_t *pmd, *pmd_k;
 +
-+	.text : {
-+		*(.text.head)
-+		TEXT_TEXT
-+		SCHED_TEXT
-+		LOCK_TEXT
-+		KPROBES_TEXT
-+		*(.fixup)
-+		*(.gnu.warning)
-+	} = 0x0009
++		pgd = get_TTB() + offset;
++		pgd_k = swapper_pg_dir + offset;
 +
-+	. = ALIGN(16);		/* Exception table */
-+	__start___ex_table = .;
-+	__ex_table : { *(__ex_table) }
-+	__stop___ex_table = .;
++		/* This will never happen with the folded page table. */
++		if (!pgd_present(*pgd)) {
++			if (!pgd_present(*pgd_k))
++				goto bad_area_nosemaphore;
++			set_pgd(pgd, *pgd_k);
++			return;
++		}
 +
-+	_etext = .;			/* End of text section */
++		pud = pud_offset(pgd, address);
++		pud_k = pud_offset(pgd_k, address);
++		if (pud_present(*pud) || !pud_present(*pud_k))
++			goto bad_area_nosemaphore;
++		set_pud(pud, *pud_k);
 +
-+	BUG_TABLE
-+	NOTES
-+	RO_DATA(PAGE_SIZE)
++		pmd = pmd_offset(pud, address);
++		pmd_k = pmd_offset(pud_k, address);
++		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
++			goto bad_area_nosemaphore;
++		set_pmd(pmd, *pmd_k);
++
++		return;
++	}
 +
 +	/*
-+	 * Code which must be executed uncached and the associated data
++	 * If we're in an interrupt or have no user
++	 * context, we must not take the fault..
 +	 */
-+	. = ALIGN(PAGE_SIZE);
-+	__uncached_start = .;
-+	.uncached.text : { *(.uncached.text) }
-+	.uncached.data : { *(.uncached.data) }
-+	__uncached_end = .;
++	if (in_atomic() || !mm)
++		goto no_context;
 +
-+	. = ALIGN(THREAD_SIZE);
-+	.data : {			/* Data */
-+		*(.data.init_task)
++	down_read(&mm->mmap_sem);
 +
-+		. = ALIGN(L1_CACHE_BYTES);
-+		*(.data.cacheline_aligned)
++	vma = find_vma(mm, address);
++	if (!vma)
++		goto bad_area;
++	if (vma->vm_start <= address)
++		goto good_area;
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		goto bad_area;
++	if (expand_stack(vma, address))
++		goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++	si_code = SEGV_ACCERR;
++	if (writeaccess) {
++		if (!(vma->vm_flags & VM_WRITE))
++			goto bad_area;
++	} else {
++		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
++			goto bad_area;
++	}
 +
-+		. = ALIGN(L1_CACHE_BYTES);
-+		*(.data.read_mostly)
++	/*
++	 * If for any reason at all we couldn't handle the fault,
++	 * make sure we exit gracefully rather than endlessly redo
++	 * the fault.
++	 */
++survive:
++	fault = handle_mm_fault(mm, vma, address, writeaccess);
++	if (unlikely(fault & VM_FAULT_ERROR)) {
++		if (fault & VM_FAULT_OOM)
++			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGBUS)
++			goto do_sigbus;
++		BUG();
++	}
++	if (fault & VM_FAULT_MAJOR)
++		tsk->maj_flt++;
++	else
++		tsk->min_flt++;
 +
-+		. = ALIGN(PAGE_SIZE);
-+		*(.data.page_aligned)
++	up_read(&mm->mmap_sem);
++	return;
 +
-+		__nosave_begin = .;
-+		*(.data.nosave)
-+		. = ALIGN(PAGE_SIZE);
-+		__nosave_end = .;
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++	up_read(&mm->mmap_sem);
 +
-+		DATA_DATA
-+		CONSTRUCTORS
++bad_area_nosemaphore:
++	if (user_mode(regs)) {
++		info.si_signo = SIGSEGV;
++		info.si_errno = 0;
++		info.si_code = si_code;
++		info.si_addr = (void *) address;
++		force_sig_info(SIGSEGV, &info, tsk);
++		return;
 +	}
 +
-+	_edata = .;			/* End of data section */
++no_context:
++	/* Are we prepared to handle this kernel fault?  */
++	if (fixup_exception(regs))
++		return;
 +
-+	. = ALIGN(PAGE_SIZE);		/* Init code and data */
-+	__init_begin = .;
-+	_sinittext = .;
-+	.init.text : { INIT_TEXT }
-+	_einittext = .;
-+	.init.data : { INIT_DATA }
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ *
++ */
 +
-+	. = ALIGN(16);
-+	__setup_start = .;
-+	.init.setup : { *(.init.setup) }
-+	__setup_end = .;
++	bust_spinlocks(1);
 +
-+	__initcall_start = .;
-+	.initcall.init : {
-+		INITCALLS
-+	}
-+	__initcall_end = .;
-+	__con_initcall_start = .;
-+	.con_initcall.init : { *(.con_initcall.init) }
-+	__con_initcall_end = .;
++	if (oops_may_print()) {
++		unsigned long page;
 +
-+	SECURITY_INIT
++		if (address < PAGE_SIZE)
++			printk(KERN_ALERT "Unable to handle kernel NULL "
++					  "pointer dereference");
++		else
++			printk(KERN_ALERT "Unable to handle kernel paging "
++					  "request");
++		printk(" at virtual address %08lx\n", address);
++		printk(KERN_ALERT "pc = %08lx\n", regs->pc);
++		page = (unsigned long)get_TTB();
++		if (page) {
++			page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
++			printk(KERN_ALERT "*pde = %08lx\n", page);
++			if (page & _PAGE_PRESENT) {
++				page &= PAGE_MASK;
++				address &= 0x003ff000;
++				page = ((__typeof__(page) *)
++						__va(page))[address >>
++							    PAGE_SHIFT];
++				printk(KERN_ALERT "*pte = %08lx\n", page);
++			}
++		}
++	}
 +
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	. = ALIGN(PAGE_SIZE);
-+	__initramfs_start = .;
-+	.init.ramfs : { *(.init.ramfs) }
-+	__initramfs_end = .;
-+#endif
++	die("Oops", regs, writeaccess);
++	bust_spinlocks(0);
++	do_exit(SIGKILL);
 +
-+	. = ALIGN(4);
-+	__machvec_start = .;
-+	.machvec.init : { *(.machvec.init) }
-+	__machvec_end = .;
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++	up_read(&mm->mmap_sem);
++	if (is_global_init(current)) {
++		yield();
++		down_read(&mm->mmap_sem);
++		goto survive;
++	}
++	printk("VM: killing process %s\n", tsk->comm);
++	if (user_mode(regs))
++		do_group_exit(SIGKILL);
++	goto no_context;
 +
-+	PERCPU(PAGE_SIZE)
++do_sigbus:
++	up_read(&mm->mmap_sem);
 +
 +	/*
-+	 * .exit.text is discarded at runtime, not link time, to deal with
-+	 * references from __bug_table
++	 * Send a sigbus, regardless of whether we were in kernel
++	 * or user mode.
 +	 */
-+	.exit.text : { EXIT_TEXT }
-+	.exit.data : { EXIT_DATA }
++	info.si_signo = SIGBUS;
++	info.si_errno = 0;
++	info.si_code = BUS_ADRERR;
++	info.si_addr = (void *)address;
++	force_sig_info(SIGBUS, &info, tsk);
 +
-+	. = ALIGN(PAGE_SIZE);
-+	.bss : {
-+		__init_end = .;
-+		__bss_start = .;		/* BSS */
-+		*(.bss.page_aligned)
-+		*(.bss)
-+		*(COMMON)
-+		. = ALIGN(4);
-+		_ebss = .;			/* uClinux MTD sucks */
-+		_end = . ;
-+	}
++	/* Kernel mode? Handle exceptions or die */
++	if (!user_mode(regs))
++		goto no_context;
++}
++
++#ifdef CONFIG_SH_STORE_QUEUES
++/*
++ * This is a special case for the SH-4 store queues, as pages for this
++ * space still need to be faulted in before it's possible to flush the
++ * store queue cache for writeout to the remapped region.
++ */
++#define P3_ADDR_MAX		(P4SEG_STORE_QUE + 0x04000000)
++#else
++#define P3_ADDR_MAX		P4SEG
++#endif
++
++/*
++ * Called with interrupts disabled.
++ */
++asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
++					 unsigned long writeaccess,
++					 unsigned long address)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	pte_t entry;
++
++#ifdef CONFIG_SH_KGDB
++	if (kgdb_nofault && kgdb_bus_err_hook)
++		kgdb_bus_err_hook();
++#endif
 +
 +	/*
-+	 * When something in the kernel is NOT compiled as a module, the
-+	 * module cleanup code and data are put into these segments. Both
-+	 * can then be thrown away, as cleanup code is never called unless
-+	 * it's a module.
++	 * We don't take page faults for P1, P2, and parts of P4, these
++	 * are always mapped, whether it be due to legacy behaviour in
++	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
 +	 */
-+	/DISCARD/ : {
-+		*(.exitcall.exit)
++	if (address >= P3SEG && address < P3_ADDR_MAX) {
++		pgd = pgd_offset_k(address);
++	} else {
++		if (unlikely(address >= TASK_SIZE || !current->mm))
++			return 1;
++
++		pgd = pgd_offset(current->mm, address);
 +	}
 +
-+	STABS_DEBUG
-+	DWARF_DEBUG
++	pud = pud_offset(pgd, address);
++	if (pud_none_or_clear_bad(pud))
++		return 1;
++	pmd = pmd_offset(pud, address);
++	if (pmd_none_or_clear_bad(pmd))
++		return 1;
++
++	pte = pte_offset_kernel(pmd, address);
++	entry = *pte;
++	if (unlikely(pte_none(entry) || pte_not_present(entry)))
++		return 1;
++	if (unlikely(writeaccess && !pte_write(entry)))
++		return 1;
++
++	if (writeaccess)
++		entry = pte_mkdirty(entry);
++	entry = pte_mkyoung(entry);
++
++	set_pte(pte, entry);
++	update_mmu_cache(NULL, address, entry);
++
++	return 0;
 +}
-diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S
+diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c
 new file mode 100644
-index 0000000..3f1bd63
+index 0000000..399d537
 --- /dev/null
-+++ b/arch/sh/kernel/vmlinux_64.lds.S
-@@ -0,0 +1,164 @@
++++ b/arch/sh/mm/fault_64.c
+@@ -0,0 +1,275 @@
 +/*
-+ * ld script to make SH64 Linux kernel
++ * The SH64 TLB miss.
 + *
++ * Original code from fault.c
 + * Copyright (C) 2000, 2001  Paolo Alberelli
 + *
-+ * benedict.gaster at superh.com:	 2nd May 2002
-+ *    Add definition of empty_zero_page to be the first page of kernel image.
-+ *
-+ * benedict.gaster at superh.com:	 3rd May 2002
-+ *    Added support for ramdisk, removing statically linked romfs at the
-+ *    same time.
-+ *
-+ * lethal at linux-sh.org:          9th May 2003
-+ *    Kill off GLOBAL_NAME() usage and other CDC-isms.
++ * Fast PTE->TLB refill path
++ * Copyright (C) 2003 Richard.Curnow at superh.com
 + *
-+ * lethal at linux-sh.org:         19th May 2003
-+ *    Remove support for ancient toolchains.
++ * IMPORTANT NOTES :
++ * The do_fast_page_fault function is called from a context in entry.S
++ * where very few registers have been saved.  In particular, the code in
++ * this file must be compiled not to use ANY caller-save registers that
++ * are not part of the restricted save set.  Also, it means that code in
++ * this file must not make calls to functions elsewhere in the kernel, or
++ * else the excepting context will see corruption in its caller-save
++ * registers.  Plus, the entry.S save area is non-reentrant, so this code
++ * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
++ * on any exception.
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + */
-+#include <asm/page.h>
-+#include <asm/cache.h>
-+#include <asm/thread_info.h>
-+
-+#define LOAD_OFFSET	CONFIG_PAGE_OFFSET
-+#include <asm-generic/vmlinux.lds.h>
-+
-+OUTPUT_ARCH(sh:sh5)
-+
-+#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/interrupt.h>
++#include <asm/system.h>
++#include <asm/tlb.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <asm/pgalloc.h>
++#include <asm/mmu_context.h>
++#include <asm/cpu/registers.h>
 +
-+ENTRY(__start)
-+SECTIONS
++/* Callable from fault.c, so not static */
++inline void __do_tlb_refill(unsigned long address,
++                            unsigned long long is_text_not_data, pte_t *pte)
 +{
-+	. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
-+	_text = .;			/* Text and read-only data */
++	unsigned long long ptel;
++	unsigned long long pteh=0;
++	struct tlb_info *tlbp;
++	unsigned long long next;
 +
-+	.empty_zero_page : C_PHYS(.empty_zero_page) {
-+		*(.empty_zero_page)
-+	} = 0
++	/* Get PTEL first */
++	ptel = pte_val(*pte);
 +
-+	.text : C_PHYS(.text) {
-+		*(.text.head)
-+		TEXT_TEXT
-+		*(.text64)
-+		*(.text..SHmedia32)
-+		SCHED_TEXT
-+		LOCK_TEXT
-+		KPROBES_TEXT
-+		*(.fixup)
-+		*(.gnu.warning)
-+#ifdef CONFIG_LITTLE_ENDIAN
-+	} = 0x6ff0fff0
++	/*
++	 * Set PTEH register
++	 */
++	pteh = address & MMU_VPN_MASK;
++
++	/* Sign extend based on neff. */
++#if (NEFF == 32)
++	/* Faster sign extension */
++	pteh = (unsigned long long)(signed long long)(signed long)pteh;
 +#else
-+	} = 0xf0fff06f
++	/* General case */
++	pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
 +#endif
 +
-+	/* We likely want __ex_table to be Cache Line aligned */
-+	. = ALIGN(L1_CACHE_BYTES);		/* Exception table */
-+	__start___ex_table = .;
-+	__ex_table : C_PHYS(__ex_table) { *(__ex_table) }
-+	__stop___ex_table = .;
++	/* Set the ASID. */
++	pteh |= get_asid() << PTEH_ASID_SHIFT;
++	pteh |= PTEH_VALID;
 +
-+	_etext = .;			/* End of text section */
++	/* Set PTEL register, set_pte has performed the sign extension */
++	ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
 +
-+	BUG_TABLE
-+	NOTES 
-+	RO_DATA(PAGE_SIZE)
++	tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
++	next = tlbp->next;
++	__flush_tlb_slot(next);
++	asm volatile ("putcfg %0,1,%2\n\n\t"
++		      "putcfg %0,0,%1\n"
++		      :  : "r" (next), "r" (pteh), "r" (ptel) );
 +
-+	. = ALIGN(THREAD_SIZE);
-+	.data : C_PHYS(.data) {			/* Data */
-+		*(.data.init_task)
++	next += TLB_STEP;
++	if (next > tlbp->last) next = tlbp->first;
++	tlbp->next = next;
 +
-+		. = ALIGN(L1_CACHE_BYTES);
-+		*(.data.cacheline_aligned)
++}
 +
-+		. = ALIGN(L1_CACHE_BYTES);
-+		*(.data.read_mostly)
++static int handle_vmalloc_fault(struct mm_struct *mm,
++				unsigned long protection_flags,
++                                unsigned long long textaccess,
++				unsigned long address)
++{
++	pgd_t *dir;
++	pud_t *pud;
++	pmd_t *pmd;
++	static pte_t *pte;
++	pte_t entry;
 +
-+		. = ALIGN(PAGE_SIZE);
-+		*(.data.page_aligned)
++	dir = pgd_offset_k(address);
 +
-+		__nosave_begin = .;
-+		*(.data.nosave)
-+		. = ALIGN(PAGE_SIZE);
-+		__nosave_end = .;
++	pud = pud_offset(dir, address);
++	if (pud_none_or_clear_bad(pud))
++		return 0;
 +
-+		DATA_DATA
-+		CONSTRUCTORS
-+	}
++	pmd = pmd_offset(pud, address);
++	if (pmd_none_or_clear_bad(pmd))
++		return 0;
 +
-+	_edata = .;			/* End of data section */
++	pte = pte_offset_kernel(pmd, address);
++	entry = *pte;
 +
-+	. = ALIGN(PAGE_SIZE);		/* Init code and data */
-+	__init_begin = .;
-+	_sinittext = .;
-+	.init.text : C_PHYS(.init.text) { INIT_TEXT }
-+	_einittext = .;
-+	.init.data : C_PHYS(.init.data) { INIT_DATA }
-+	. = ALIGN(L1_CACHE_BYTES);	/* Better if Cache Line aligned */
-+	__setup_start = .;
-+	.init.setup : C_PHYS(.init.setup) { *(.init.setup) }
-+	__setup_end = .;
-+	__initcall_start = .;
-+	.initcall.init : C_PHYS(.initcall.init) {
-+		INITCALLS
-+	}
-+	__initcall_end = .;
-+	__con_initcall_start = .;
-+	.con_initcall.init : C_PHYS(.con_initcall.init) {
-+		*(.con_initcall.init)
-+	}
-+	__con_initcall_end = .;
++	if (pte_none(entry) || !pte_present(entry))
++		return 0;
++	if ((pte_val(entry) & protection_flags) != protection_flags)
++		return 0;
 +
-+	SECURITY_INIT
++        __do_tlb_refill(address, textaccess, pte);
 +
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	. = ALIGN(PAGE_SIZE);
-+	__initramfs_start = .;
-+	.init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
-+	__initramfs_end = .;
-+#endif
++	return 1;
++}
 +
-+	. = ALIGN(8);
-+	__machvec_start = .;
-+	.machvec.init : C_PHYS(.machvec.init) { *(.machvec.init) }
-+	__machvec_end = .;
++static int handle_tlbmiss(struct mm_struct *mm,
++			  unsigned long long protection_flags,
++			  unsigned long long textaccess,
++			  unsigned long address)
++{
++	pgd_t *dir;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	pte_t entry;
 +
-+	PERCPU(PAGE_SIZE)
++	/* NB. The PGD currently only contains a single entry - there is no
++	   page table tree stored for the top half of the address space since
++	   virtual pages in that region should never be mapped in user mode.
++	   (In kernel mode, the only things in that region are the 512Mb super
++	   page (locked in), and vmalloc (modules) +  I/O device pages (handled
++	   by handle_vmalloc_fault), so no PGD for the upper half is required
++	   by kernel mode either).
 +
-+	/*
-+	 * .exit.text is discarded at runtime, not link time, to deal with
-+	 * references from __bug_table
-+	 */
-+	.exit.text : C_PHYS(.exit.text) { EXIT_TEXT }
-+	.exit.data : C_PHYS(.exit.data) { EXIT_DATA }
++	   See how mm->pgd is allocated and initialised in pgd_alloc to see why
++	   the next test is necessary.  - RPC */
++	if (address >= (unsigned long) TASK_SIZE)
++		/* upper half - never has page table entries. */
++		return 0;
 +
-+	. = ALIGN(PAGE_SIZE);
-+	.bss : C_PHYS(.bss) {
-+		__init_end = .;
-+		__bss_start = .;		/* BSS */
-+		*(.bss.page_aligned)
-+		*(.bss)
-+		*(COMMON)
-+		. = ALIGN(4);
-+		_ebss = .;			/* uClinux MTD sucks */
-+		_end = . ;
-+	}
++	dir = pgd_offset(mm, address);
++	if (pgd_none(*dir) || !pgd_present(*dir))
++		return 0;
++	if (!pgd_present(*dir))
++		return 0;
 +
-+	/*
-+	 * When something in the kernel is NOT compiled as a module, the
-+	 * module cleanup code and data are put into these segments. Both
-+	 * can then be thrown away, as cleanup code is never called unless
-+	 * it's a module.
-+	 */
-+	/DISCARD/ : {
-+		*(.exitcall.exit)
-+	}
++	pud = pud_offset(dir, address);
++	if (pud_none(*pud) || !pud_present(*pud))
++		return 0;
 +
-+	STABS_DEBUG
-+	DWARF_DEBUG
-+}
-diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
-index 9dc7b69..ebb55d1 100644
---- a/arch/sh/lib/Makefile
-+++ b/arch/sh/lib/Makefile
-@@ -2,12 +2,13 @@
- # Makefile for SuperH-specific library files..
- #
- 
--lib-y  = delay.o memset.o memmove.o memchr.o \
-+lib-y  = delay.o io.o memset.o memmove.o memchr.o \
- 	 checksum.o strlen.o div64.o div64-generic.o
- 
- memcpy-y			:= memcpy.o
- memcpy-$(CONFIG_CPU_SH4)	:= memcpy-sh4.o
- 
--lib-y	+= $(memcpy-y)
-+lib-$(CONFIG_MMU)		+= copy_page.o clear_page.o
-+lib-y				+= $(memcpy-y)
- 
- EXTRA_CFLAGS += -Werror
-diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/clear_page.S
-new file mode 100644
-index 0000000..3539123
---- /dev/null
-+++ b/arch/sh/lib/clear_page.S
-@@ -0,0 +1,154 @@
-+/*
-+ * __clear_user_page, __clear_user, clear_page implementation of SuperH
-+ *
-+ * Copyright (C) 2001  Kaz Kojima
-+ * Copyright (C) 2001, 2002  Niibe Yutaka
-+ * Copyright (C) 2006  Paul Mundt
-+ */
-+#include <linux/linkage.h>
-+#include <asm/page.h>
++	pmd = pmd_offset(pud, address);
++	if (pmd_none(*pmd) || !pmd_present(*pmd))
++		return 0;
 +
-+/*
-+ * clear_page
-+ * @to: P1 address
-+ *
-+ * void clear_page(void *to)
-+ */
++	pte = pte_offset_kernel(pmd, address);
++	entry = *pte;
 +
-+/*
-+ * r0 --- scratch
-+ * r4 --- to
-+ * r5 --- to + PAGE_SIZE
-+ */
-+ENTRY(clear_page)
-+	mov	r4,r5
-+	mov.l	.Llimit,r0
-+	add	r0,r5
-+	mov	#0,r0
-+	!
-+1:
-+#if defined(CONFIG_CPU_SH3)
-+	mov.l	r0, at r4
-+#elif defined(CONFIG_CPU_SH4)
-+	movca.l	r0, at r4
-+	mov	r4,r1
-+#endif
-+	add	#32,r4
-+	mov.l	r0, at -r4
-+	mov.l	r0, at -r4
-+	mov.l	r0, at -r4
-+	mov.l	r0, at -r4
-+	mov.l	r0, at -r4
-+	mov.l	r0, at -r4
-+	mov.l	r0, at -r4
-+#if defined(CONFIG_CPU_SH4)
-+	ocbwb	@r1
-+#endif
-+	cmp/eq	r5,r4
-+	bf/s	1b
-+	 add	#28,r4
-+	!
-+	rts
-+	 nop
++	if (pte_none(entry) || !pte_present(entry))
++		return 0;
 +
-+	.balign 4
-+.Llimit:	.long	(PAGE_SIZE-28)
++	/*
++	 * If the page doesn't have sufficient protection bits set to
++	 * service the kind of fault being handled, there's not much
++	 * point doing the TLB refill.  Punt the fault to the general
++	 * handler.
++	 */
++	if ((pte_val(entry) & protection_flags) != protection_flags)
++		return 0;
 +
-+ENTRY(__clear_user)
-+	!
-+	mov	#0, r0
-+	mov	#0xe0, r1	! 0xffffffe0
-+	!
-+	! r4..(r4+31)&~32 	   -------- not aligned	[ Area 0 ]
-+	! (r4+31)&~32..(r4+r5)&~32 -------- aligned	[ Area 1 ]
-+	! (r4+r5)&~32..r4+r5       -------- not aligned	[ Area 2 ]
-+	!
-+	! Clear area 0
-+	mov	r4, r2
-+	!
-+	tst	r1, r5		! length < 32
-+	bt	.Larea2		! skip to remainder
-+	!
-+	add	#31, r2
-+	and	r1, r2
-+	cmp/eq	r4, r2
-+	bt	.Larea1
-+	mov	r2, r3
-+	sub	r4, r3
-+	mov	r3, r7
-+	mov	r4, r2
-+	!
-+.L0:	dt	r3
-+0:	mov.b	r0, @r2
-+	bf/s	.L0
-+	 add	#1, r2
-+	!
-+	sub	r7, r5
-+	mov	r2, r4
-+.Larea1:
-+	mov	r4, r3
-+	add	r5, r3
-+	and	r1, r3
-+	cmp/hi	r2, r3
-+	bf	.Larea2
-+	!
-+	! Clear area 1
-+#if defined(CONFIG_CPU_SH4)
-+1:	movca.l	r0, @r2
-+#else
-+1:	mov.l	r0, @r2
-+#endif
-+	add	#4, r2
-+2:	mov.l	r0, @r2
-+	add	#4, r2
-+3:	mov.l	r0, @r2
-+	add	#4, r2
-+4:	mov.l	r0, @r2
-+	add	#4, r2
-+5:	mov.l	r0, @r2
-+	add	#4, r2
-+6:	mov.l	r0, @r2
-+	add	#4, r2
-+7:	mov.l	r0, @r2
-+	add	#4, r2
-+8:	mov.l	r0, @r2
-+	add	#4, r2
-+	cmp/hi	r2, r3
-+	bt/s	1b
-+	 nop
-+	!
-+	! Clear area 2
-+.Larea2:
-+	mov	r4, r3
-+	add	r5, r3
-+	cmp/hs	r3, r2
-+	bt/s	.Ldone
-+	 sub	r2, r3
-+.L2:	dt	r3
-+9:	mov.b	r0, @r2
-+	bf/s	.L2
-+	 add	#1, r2
-+	!
-+.Ldone:	rts
-+	 mov	#0, r0	! return 0 as normal return
++        __do_tlb_refill(address, textaccess, pte);
 +
-+	! return the number of bytes remained
-+.Lbad_clear_user:
-+	mov	r4, r0
-+	add	r5, r0
-+	rts
-+	 sub	r2, r0
++	return 1;
++}
 +
-+.section __ex_table,"a"
-+	.align 2
-+	.long	0b, .Lbad_clear_user
-+	.long	1b, .Lbad_clear_user
-+	.long	2b, .Lbad_clear_user
-+	.long	3b, .Lbad_clear_user
-+	.long	4b, .Lbad_clear_user
-+	.long	5b, .Lbad_clear_user
-+	.long	6b, .Lbad_clear_user
-+	.long	7b, .Lbad_clear_user
-+	.long	8b, .Lbad_clear_user
-+	.long	9b, .Lbad_clear_user
-+.previous
-diff --git a/arch/sh/lib/copy_page.S b/arch/sh/lib/copy_page.S
-new file mode 100644
-index 0000000..e002b91
---- /dev/null
-+++ b/arch/sh/lib/copy_page.S
-@@ -0,0 +1,389 @@
 +/*
-+ * copy_page, __copy_user_page, __copy_user implementation of SuperH
-+ *
-+ * Copyright (C) 2001  Niibe Yutaka & Kaz Kojima
-+ * Copyright (C) 2002  Toshinobu Sugioka
-+ * Copyright (C) 2006  Paul Mundt
++ * Put all this information into one structure so that everything is just
++ * arithmetic relative to a single base address.  This reduces the number
++ * of movi/shori pairs needed just to load addresses of static data.
 + */
-+#include <linux/linkage.h>
-+#include <asm/page.h>
++struct expevt_lookup {
++	unsigned short protection_flags[8];
++	unsigned char  is_text_access[8];
++	unsigned char  is_write_access[8];
++};
 +
-+/*
-+ * copy_page
-+ * @to: P1 address
-+ * @from: P1 address
-+ *
-+ * void copy_page(void *to, void *from)
-+ */
++#define PRU (1<<9)
++#define PRW (1<<8)
++#define PRX (1<<7)
++#define PRR (1<<6)
 +
-+/*
-+ * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
-+ * r8 --- from + PAGE_SIZE
-+ * r9 --- not used
-+ * r10 --- to
-+ * r11 --- from
-+ */
-+ENTRY(copy_page)
-+	mov.l	r8, at -r15
-+	mov.l	r10, at -r15
-+	mov.l	r11, at -r15
-+	mov	r4,r10
-+	mov	r5,r11
-+	mov	r5,r8
-+	mov.l	.Lpsz,r0
-+	add	r0,r8
-+	!
-+1:	mov.l	@r11+,r0
-+	mov.l	@r11+,r1
-+	mov.l	@r11+,r2
-+	mov.l	@r11+,r3
-+	mov.l	@r11+,r4
-+	mov.l	@r11+,r5
-+	mov.l	@r11+,r6
-+	mov.l	@r11+,r7
-+#if defined(CONFIG_CPU_SH3)
-+	mov.l	r0, at r10
-+#elif defined(CONFIG_CPU_SH4)
-+	movca.l	r0, at r10
-+	mov	r10,r0
-+#endif
-+	add	#32,r10
-+	mov.l	r7, at -r10
-+	mov.l	r6, at -r10
-+	mov.l	r5, at -r10
-+	mov.l	r4, at -r10
-+	mov.l	r3, at -r10
-+	mov.l	r2, at -r10
-+	mov.l	r1, at -r10
-+#if defined(CONFIG_CPU_SH4)
-+	ocbwb	@r0
-+#endif
-+	cmp/eq	r11,r8
-+	bf/s	1b
-+	 add	#28,r10
-+	!
-+	mov.l	@r15+,r11
-+	mov.l	@r15+,r10
-+	mov.l	@r15+,r8
-+	rts
-+	 nop
++#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
++#define YOUNG (_PAGE_ACCESSED)
 +
-+	.balign 4
-+.Lpsz:	.long	PAGE_SIZE
++/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
++   the fault happened in user mode or privileged mode. */
++static struct expevt_lookup expevt_lookup_table = {
++	.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
++	.is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
++};
 +
 +/*
-+ * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
-+ * Return the number of bytes NOT copied
++   This routine handles page faults that can be serviced just by refilling a
++   TLB entry from an existing page table entry.  (This case represents a very
++   large majority of page faults.) Return 1 if the fault was successfully
++   handled.  Return 0 if the fault could not be handled.  (This leads into the
++   general fault handling in fault.c which deals with mapping file-backed
++   pages, stack growth, segmentation faults, swapping etc etc)
 + */
-+#define EX(...)			\
-+	9999: __VA_ARGS__ ;		\
-+	.section __ex_table, "a";	\
-+	.long 9999b, 6000f	;	\
-+	.previous
-+ENTRY(__copy_user)
-+	! Check if small number of bytes
-+	mov	#11,r0
-+	mov	r4,r3
-+	cmp/gt	r0,r6		! r6 (len) > r0 (11)
-+	bf/s	.L_cleanup_loop_no_pop
-+	 add	r6,r3		! last destination address
-+
-+	! Calculate bytes needed to align to src
-+	mov.l	r11, at -r15
-+	neg	r5,r0
-+	mov.l	r10, at -r15
-+	add	#4,r0
-+	mov.l	r9, at -r15
-+	and	#3,r0
-+	mov.l	r8, at -r15
-+	tst	r0,r0
-+	bt	2f
++asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
++				  unsigned long long expevt,
++			          unsigned long address)
++{
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	unsigned long long textaccess;
++	unsigned long long protection_flags;
++	unsigned long long index;
++	unsigned long long expevt4;
 +
-+1:
-+	! Copy bytes to long word align src
-+EX(	mov.b	@r5+,r1		)
-+	dt	r0
-+	add	#-1,r6
-+EX(	mov.b	r1, at r4		)
-+	bf/s	1b
-+	 add	#1,r4
++	/* The next few lines implement a way of hashing EXPEVT into a
++	 * small array index which can be used to lookup parameters
++	 * specific to the type of TLBMISS being handled.
++	 *
++	 * Note:
++	 *	ITLBMISS has EXPEVT==0xa40
++	 *	RTLBMISS has EXPEVT==0x040
++	 *	WTLBMISS has EXPEVT==0x060
++	 */
++	expevt4 = (expevt >> 4);
++	/* TODO : xor ssr_md into this expression too. Then we can check
++	 * that PRU is set when it needs to be. */
++	index = expevt4 ^ (expevt4 >> 5);
++	index &= 7;
++	protection_flags = expevt_lookup_table.protection_flags[index];
++	textaccess       = expevt_lookup_table.is_text_access[index];
 +
-+	! Jump to appropriate routine depending on dest
-+2:	mov	#3,r1
-+	mov	r6, r2
-+	and	r4,r1
-+	shlr2	r2
-+	shll2	r1
-+	mova	.L_jump_tbl,r0
-+	mov.l	@(r0,r1),r1
-+	jmp	@r1
-+	 nop
++	/* SIM
++	 * Note this is now called with interrupts still disabled
++	 * This is to cope with being called for a missing IO port
++	 * address with interrupts disabled. This should be fixed as
++	 * soon as we have a better 'fast path' miss handler.
++	 *
++	 * Plus take care how you try and debug this stuff.
++	 * For example, writing debug data to a port which you
++	 * have just faulted on is not going to work.
++	 */
 +
-+	.align 2
-+.L_jump_tbl:
-+	.long	.L_dest00
-+	.long	.L_dest01
-+	.long	.L_dest10
-+	.long	.L_dest11
++	tsk = current;
++	mm = tsk->mm;
 +
-+/*
-+ * Come here if there are less than 12 bytes to copy
-+ *
-+ * Keep the branch target close, so the bf/s callee doesn't overflow
-+ * and result in a more expensive branch being inserted. This is the
-+ * fast-path for small copies, the jump via the jump table will hit the
-+ * default slow-path cleanup. -PFM.
-+ */
-+.L_cleanup_loop_no_pop:
-+	tst	r6,r6		! Check explicitly for zero
-+	bt	1f
++	if ((address >= VMALLOC_START && address < VMALLOC_END) ||
++	    (address >= IOBASE_VADDR  && address < IOBASE_END)) {
++		if (ssr_md)
++			/*
++			 * Process-contexts can never have this address
++			 * range mapped
++			 */
++			if (handle_vmalloc_fault(mm, protection_flags,
++						 textaccess, address))
++				return 1;
++	} else if (!in_interrupt() && mm) {
++		if (handle_tlbmiss(mm, protection_flags, textaccess, address))
++			return 1;
++	}
 +
-+2:
-+EX(	mov.b	@r5+,r0		)
-+	dt	r6
-+EX(	mov.b	r0, at r4		)
-+	bf/s	2b
-+	 add	#1,r4
++	return 0;
++}
+diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
+index d5e160d..2918c6b 100644
+--- a/arch/sh/mm/init.c
++++ b/arch/sh/mm/init.c
+@@ -23,9 +23,7 @@
+ 
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+ pgd_t swapper_pg_dir[PTRS_PER_PGD];
+-
+-void (*copy_page)(void *from, void *to);
+-void (*clear_page)(void *to);
++unsigned long cached_to_uncached = 0;
+ 
+ void show_mem(void)
+ {
+@@ -102,7 +100,8 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
+ 
+ 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
+ 
+-	flush_tlb_one(get_asid(), addr);
++	if (cached_to_uncached)
++		flush_tlb_one(get_asid(), addr);
+ }
+ 
+ /*
+@@ -131,6 +130,37 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+ 
+ 	set_pte_phys(address, phys, prot);
+ }
 +
-+1:	mov	#0,r0		! normal return
-+5000:
++void __init page_table_range_init(unsigned long start, unsigned long end,
++					 pgd_t *pgd_base)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	int pgd_idx;
++	unsigned long vaddr;
 +
-+# Exception handler:
-+.section .fixup, "ax"
-+6000:
-+	mov.l	8000f,r1
-+	mov	r3,r0
-+	jmp	@r1
-+	 sub	r4,r0
-+	.align	2
-+8000:	.long	5000b
++	vaddr = start & PMD_MASK;
++	end = (end + PMD_SIZE - 1) & PMD_MASK;
++	pgd_idx = pgd_index(vaddr);
++	pgd = pgd_base + pgd_idx;
 +
-+.previous
-+	rts
-+	 nop
++	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++		BUG_ON(pgd_none(*pgd));
++		pud = pud_offset(pgd, 0);
++		BUG_ON(pud_none(*pud));
++		pmd = pmd_offset(pud, 0);
 +
-+! Destination = 00
++		if (!pmd_present(*pmd)) {
++			pte_t *pte_table;
++			pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
++			memset(pte_table, 0, PAGE_SIZE);
++			pmd_populate_kernel(&init_mm, pmd, pte_table);
++		}
 +
-+.L_dest00:
-+	! Skip the large copy for small transfers
-+	mov	#(32+32-4), r0
-+	cmp/gt	r6, r0		! r0 (60) > r6 (len)
-+	bt	1f
++		vaddr += PMD_SIZE;
++	}
++}
+ #endif	/* CONFIG_MMU */
+ 
+ /*
+@@ -150,6 +180,11 @@ void __init paging_init(void)
+ 	 * check for a null value. */
+ 	set_TTB(swapper_pg_dir);
+ 
++	/* Populate the relevant portions of swapper_pg_dir so that
++	 * we can use the fixmap entries without calling kmalloc.
++	 * pte's will be filled in by __set_fixmap(). */
++	page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);
 +
-+	! Align dest to a 32 byte boundary
-+	neg	r4,r0
-+	add	#0x20, r0
-+	and	#0x1f, r0
-+	tst	r0, r0
-+	bt	2f
+ 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ 
+ 	for_each_online_node(nid) {
+@@ -167,9 +202,22 @@ void __init paging_init(void)
+ 	}
+ 
+ 	free_area_init_nodes(max_zone_pfns);
 +
-+	sub	r0, r6
-+	shlr2	r0
-+3:
-+EX(	mov.l	@r5+,r1		)
-+	dt	r0
-+EX(	mov.l	r1, at r4		)
-+	bf/s	3b
-+	 add	#4,r4
++	/* Set up the uncached fixmap */
++	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
 +
-+2:
-+EX(	mov.l	@r5+,r0		)
-+EX(	mov.l	@r5+,r1		)
-+EX(	mov.l	@r5+,r2		)
-+EX(	mov.l	@r5+,r7		)
-+EX(	mov.l	@r5+,r8		)
-+EX(	mov.l	@r5+,r9		)
-+EX(	mov.l	@r5+,r10	)
-+EX(	mov.l	@r5+,r11	)
-+#ifdef CONFIG_CPU_SH4
-+EX(	movca.l	r0, at r4		)
-+#else
-+EX(	mov.l	r0, at r4		)
++#ifdef CONFIG_29BIT
++	/*
++	 * Handle trivial transitions between cached and uncached
++	 * segments, making use of the 1:1 mapping relationship in
++	 * 512MB lowmem.
++	 */
++	cached_to_uncached = P2SEG - P1SEG;
 +#endif
-+	add	#-32, r6
-+EX(	mov.l	r1,@(4,r4)	)
-+	mov	#32, r0
-+EX(	mov.l	r2,@(8,r4)	)
-+	cmp/gt	r6, r0		! r0 (32) > r6 (len)
-+EX(	mov.l	r7,@(12,r4)	)
-+EX(	mov.l	r8,@(16,r4)	)
-+EX(	mov.l	r9,@(20,r4)	)
-+EX(	mov.l	r10,@(24,r4)	)
-+EX(	mov.l	r11,@(28,r4)	)
-+	bf/s	2b
-+	 add	#32,r4
-+
-+1:	mov	r6, r0
-+	shlr2	r0
-+	tst	r0, r0
-+	bt	.L_cleanup
-+1:
-+EX(	mov.l	@r5+,r1		)
-+	dt	r0
-+EX(	mov.l	r1, at r4		)
-+	bf/s	1b
-+	 add	#4,r4
+ }
+ 
+ static struct kcore_list kcore_mem, kcore_vmalloc;
++int after_bootmem = 0;
+ 
+ void __init mem_init(void)
+ {
+@@ -202,17 +250,7 @@ void __init mem_init(void)
+ 	memset(empty_zero_page, 0, PAGE_SIZE);
+ 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
+ 
+-	/*
+-	 * Setup wrappers for copy/clear_page(), these will get overridden
+-	 * later in the boot process if a better method is available.
+-	 */
+-#ifdef CONFIG_MMU
+-	copy_page = copy_page_slow;
+-	clear_page = clear_page_slow;
+-#else
+-	copy_page = copy_page_nommu;
+-	clear_page = clear_page_nommu;
+-#endif
++	after_bootmem = 1;
+ 
+ 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
+ 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
+diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
+deleted file mode 100644
+index 0c7b7e3..0000000
+--- a/arch/sh/mm/ioremap.c
++++ /dev/null
+@@ -1,150 +0,0 @@
+-/*
+- * arch/sh/mm/ioremap.c
+- *
+- * Re-map IO memory to kernel address space so that we can access it.
+- * This is needed for high PCI addresses that aren't mapped in the
+- * 640k-1MB IO memory area on PC's
+- *
+- * (C) Copyright 1995 1996 Linus Torvalds
+- * (C) Copyright 2005, 2006 Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General
+- * Public License. See the file "COPYING" in the main directory of this
+- * archive for more details.
+- */
+-#include <linux/vmalloc.h>
+-#include <linux/module.h>
+-#include <linux/mm.h>
+-#include <linux/pci.h>
+-#include <linux/io.h>
+-#include <asm/page.h>
+-#include <asm/pgalloc.h>
+-#include <asm/addrspace.h>
+-#include <asm/cacheflush.h>
+-#include <asm/tlbflush.h>
+-#include <asm/mmu.h>
+-
+-/*
+- * Remap an arbitrary physical address space into the kernel virtual
+- * address space. Needed when the kernel wants to access high addresses
+- * directly.
+- *
+- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+- * have to convert them into an offset in a page-aligned mapping, but the
+- * caller shouldn't need to know that small detail.
+- */
+-void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+-			unsigned long flags)
+-{
+-	struct vm_struct * area;
+-	unsigned long offset, last_addr, addr, orig_addr;
+-	pgprot_t pgprot;
+-
+-	/* Don't allow wraparound or zero size */
+-	last_addr = phys_addr + size - 1;
+-	if (!size || last_addr < phys_addr)
+-		return NULL;
+-
+-	/*
+-	 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
+-	 * mapped at the end of the address space (typically 0xfd000000)
+-	 * in a non-translatable area, so mapping through page tables for
+-	 * this area is not only pointless, but also fundamentally
+-	 * broken. Just return the physical address instead.
+-	 *
+-	 * For boards that map a small PCI memory aperture somewhere in
+-	 * P1/P2 space, ioremap() will already do the right thing,
+-	 * and we'll never get this far.
+-	 */
+-	if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
+-		return (void __iomem *)phys_addr;
+-
+-	/*
+-	 * Don't allow anybody to remap normal RAM that we're using..
+-	 */
+-	if (phys_addr < virt_to_phys(high_memory))
+-		return NULL;
+-
+-	/*
+-	 * Mappings have to be page-aligned
+-	 */
+-	offset = phys_addr & ~PAGE_MASK;
+-	phys_addr &= PAGE_MASK;
+-	size = PAGE_ALIGN(last_addr+1) - phys_addr;
+-
+-	/*
+-	 * Ok, go for it..
+-	 */
+-	area = get_vm_area(size, VM_IOREMAP);
+-	if (!area)
+-		return NULL;
+-	area->phys_addr = phys_addr;
+-	orig_addr = addr = (unsigned long)area->addr;
+-
+-#ifdef CONFIG_32BIT
+-	/*
+-	 * First try to remap through the PMB once a valid VMA has been
+-	 * established. Smaller allocations (or the rest of the size
+-	 * remaining after a PMB mapping due to the size not being
+-	 * perfectly aligned on a PMB size boundary) are then mapped
+-	 * through the UTLB using conventional page tables.
+-	 *
+-	 * PMB entries are all pre-faulted.
+-	 */
+-	if (unlikely(size >= 0x1000000)) {
+-		unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
+-
+-		if (likely(mapped)) {
+-			addr		+= mapped;
+-			phys_addr	+= mapped;
+-			size		-= mapped;
+-		}
+-	}
+-#endif
+-
+-	pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
+-	if (likely(size))
+-		if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
+-			vunmap((void *)orig_addr);
+-			return NULL;
+-		}
+-
+-	return (void __iomem *)(offset + (char *)orig_addr);
+-}
+-EXPORT_SYMBOL(__ioremap);
+-
+-void __iounmap(void __iomem *addr)
+-{
+-	unsigned long vaddr = (unsigned long __force)addr;
+-	struct vm_struct *p;
+-
+-	if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
+-		return;
+-
+-#ifdef CONFIG_32BIT
+-	/*
+-	 * Purge any PMB entries that may have been established for this
+-	 * mapping, then proceed with conventional VMA teardown.
+-	 *
+-	 * XXX: Note that due to the way that remove_vm_area() does
+-	 * matching of the resultant VMA, we aren't able to fast-forward
+-	 * the address past the PMB space until the end of the VMA where
+-	 * the page tables reside. As such, unmap_vm_area() will be
+-	 * forced to linearly scan over the area until it finds the page
+-	 * tables where PTEs that need to be unmapped actually reside,
+-	 * which is far from optimal. Perhaps we need to use a separate
+-	 * VMA for the PMB mappings?
+-	 *					-- PFM.
+-	 */
+-	pmb_unmap(vaddr);
+-#endif
+-
+-	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
+-	if (!p) {
+-		printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
+-		return;
+-	}
+-
+-	kfree(p);
+-}
+-EXPORT_SYMBOL(__iounmap);
+diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
+new file mode 100644
+index 0000000..0c7b7e3
+--- /dev/null
++++ b/arch/sh/mm/ioremap_32.c
+@@ -0,0 +1,150 @@
++/*
++ * arch/sh/mm/ioremap.c
++ *
++ * Re-map IO memory to kernel address space so that we can access it.
++ * This is needed for high PCI addresses that aren't mapped in the
++ * 640k-1MB IO memory area on PC's
++ *
++ * (C) Copyright 1995 1996 Linus Torvalds
++ * (C) Copyright 2005, 2006 Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ */
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <linux/pci.h>
++#include <linux/io.h>
++#include <asm/page.h>
++#include <asm/pgalloc.h>
++#include <asm/addrspace.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/mmu.h>
 +
-+	bra	.L_cleanup
-+	 nop
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
++ *
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
++ */
++void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
++			unsigned long flags)
++{
++	struct vm_struct * area;
++	unsigned long offset, last_addr, addr, orig_addr;
++	pgprot_t pgprot;
 +
-+! Destination = 10
++	/* Don't allow wraparound or zero size */
++	last_addr = phys_addr + size - 1;
++	if (!size || last_addr < phys_addr)
++		return NULL;
 +
-+.L_dest10:
-+	mov	r2,r7
-+	shlr2	r7
-+	shlr	r7
-+	tst	r7,r7
-+	mov	#7,r0
-+	bt/s	1f
-+	 and	r0,r2
-+2:
-+	dt	r7
-+#ifdef CONFIG_CPU_LITTLE_ENDIAN
-+EX(	mov.l	@r5+,r0		)
-+EX(	mov.l	@r5+,r1		)
-+EX(	mov.l	@r5+,r8		)
-+EX(	mov.l	@r5+,r9		)
-+EX(	mov.l	@r5+,r10	)
-+EX(	mov.w	r0, at r4		)
-+	add	#2,r4
-+	xtrct	r1,r0
-+	xtrct	r8,r1
-+	xtrct	r9,r8
-+	xtrct	r10,r9
++	/*
++	 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
++	 * mapped at the end of the address space (typically 0xfd000000)
++	 * in a non-translatable area, so mapping through page tables for
++	 * this area is not only pointless, but also fundamentally
++	 * broken. Just return the physical address instead.
++	 *
++	 * For boards that map a small PCI memory aperture somewhere in
++	 * P1/P2 space, ioremap() will already do the right thing,
++	 * and we'll never get this far.
++	 */
++	if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
++		return (void __iomem *)phys_addr;
 +
-+EX(	mov.l	r0, at r4		)
-+EX(	mov.l	r1,@(4,r4)	)
-+EX(	mov.l	r8,@(8,r4)	)
-+EX(	mov.l	r9,@(12,r4)	)
++	/*
++	 * Don't allow anybody to remap normal RAM that we're using..
++	 */
++	if (phys_addr < virt_to_phys(high_memory))
++		return NULL;
 +
-+EX(	mov.l	@r5+,r1		)
-+EX(	mov.l	@r5+,r8		)
-+EX(	mov.l	@r5+,r0		)
-+	xtrct	r1,r10
-+	xtrct	r8,r1
-+	xtrct	r0,r8
-+	shlr16	r0
-+EX(	mov.l	r10,@(16,r4)	)
-+EX(	mov.l	r1,@(20,r4)	)
-+EX(	mov.l	r8,@(24,r4)	)
-+EX(	mov.w	r0,@(28,r4)	)
-+	bf/s	2b
-+	 add	#30,r4
-+#else
-+EX(	mov.l	@(28,r5),r0	)
-+EX(	mov.l	@(24,r5),r8	)
-+EX(	mov.l	@(20,r5),r9	)
-+EX(	mov.l	@(16,r5),r10	)
-+EX(	mov.w	r0,@(30,r4)	)
-+	add	#-2,r4
-+	xtrct	r8,r0
-+	xtrct	r9,r8
-+	xtrct	r10,r9
-+EX(	mov.l	r0,@(28,r4)	)
-+EX(	mov.l	r8,@(24,r4)	)
-+EX(	mov.l	r9,@(20,r4)	)
++	/*
++	 * Mappings have to be page-aligned
++	 */
++	offset = phys_addr & ~PAGE_MASK;
++	phys_addr &= PAGE_MASK;
++	size = PAGE_ALIGN(last_addr+1) - phys_addr;
 +
-+EX(	mov.l	@(12,r5),r0	)
-+EX(	mov.l	@(8,r5),r8	)
-+	xtrct	r0,r10
-+EX(	mov.l	@(4,r5),r9	)
-+	mov.l	r10,@(16,r4)
-+EX(	mov.l	@r5,r10		)
-+	xtrct	r8,r0
-+	xtrct	r9,r8
-+	xtrct	r10,r9
-+EX(	mov.l	r0,@(12,r4)	)
-+EX(	mov.l	r8,@(8,r4)	)
-+	swap.w	r10,r0
-+EX(	mov.l	r9,@(4,r4)	)
-+EX(	mov.w	r0,@(2,r4)	)
++	/*
++	 * Ok, go for it..
++	 */
++	area = get_vm_area(size, VM_IOREMAP);
++	if (!area)
++		return NULL;
++	area->phys_addr = phys_addr;
++	orig_addr = addr = (unsigned long)area->addr;
 +
-+	add	#32,r5
-+	bf/s	2b
-+	 add	#34,r4
-+#endif
-+	tst	r2,r2
-+	bt	.L_cleanup
++#ifdef CONFIG_32BIT
++	/*
++	 * First try to remap through the PMB once a valid VMA has been
++	 * established. Smaller allocations (or the rest of the size
++	 * remaining after a PMB mapping due to the size not being
++	 * perfectly aligned on a PMB size boundary) are then mapped
++	 * through the UTLB using conventional page tables.
++	 *
++	 * PMB entries are all pre-faulted.
++	 */
++	if (unlikely(size >= 0x1000000)) {
++		unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
 +
-+1:	! Read longword, write two words per iteration
-+EX(	mov.l	@r5+,r0		)
-+	dt	r2
-+#ifdef CONFIG_CPU_LITTLE_ENDIAN
-+EX(	mov.w	r0, at r4		)
-+	shlr16	r0
-+EX(	mov.w 	r0,@(2,r4)	)
-+#else
-+EX(	mov.w	r0,@(2,r4)	)
-+	shlr16	r0
-+EX(	mov.w	r0, at r4		)
++		if (likely(mapped)) {
++			addr		+= mapped;
++			phys_addr	+= mapped;
++			size		-= mapped;
++		}
++	}
 +#endif
-+	bf/s	1b
-+	 add	#4,r4
-+
-+	bra	.L_cleanup
-+	 nop
-+
-+! Destination = 01 or 11
 +
-+.L_dest01:
-+.L_dest11:
-+	! Read longword, write byte, word, byte per iteration
-+EX(	mov.l	@r5+,r0		)
-+	dt	r2
-+#ifdef CONFIG_CPU_LITTLE_ENDIAN
-+EX(	mov.b	r0, at r4		)
-+	shlr8	r0
-+	add	#1,r4
-+EX(	mov.w	r0, at r4		)
-+	shlr16	r0
-+EX(	mov.b	r0,@(2,r4)	)
-+	bf/s	.L_dest01
-+	 add	#3,r4
-+#else
-+EX(	mov.b	r0,@(3,r4)	)
-+	shlr8	r0
-+	swap.w	r0,r7
-+EX(	mov.b	r7, at r4		)
-+	add	#1,r4
-+EX(	mov.w	r0, at r4		)
-+	bf/s	.L_dest01
-+	 add	#3,r4
-+#endif
++	pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
++	if (likely(size))
++		if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
++			vunmap((void *)orig_addr);
++			return NULL;
++		}
 +
-+! Cleanup last few bytes
-+.L_cleanup:
-+	mov	r6,r0
-+	and	#3,r0
-+	tst	r0,r0
-+	bt	.L_exit
-+	mov	r0,r6
++	return (void __iomem *)(offset + (char *)orig_addr);
++}
++EXPORT_SYMBOL(__ioremap);
 +
-+.L_cleanup_loop:
-+EX(	mov.b	@r5+,r0		)
-+	dt	r6
-+EX(	mov.b	r0, at r4		)
-+	bf/s	.L_cleanup_loop
-+	 add	#1,r4
++void __iounmap(void __iomem *addr)
++{
++	unsigned long vaddr = (unsigned long __force)addr;
++	struct vm_struct *p;
 +
-+.L_exit:
-+	mov	#0,r0		! normal return
++	if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
++		return;
 +
-+5000:
++#ifdef CONFIG_32BIT
++	/*
++	 * Purge any PMB entries that may have been established for this
++	 * mapping, then proceed with conventional VMA teardown.
++	 *
++	 * XXX: Note that due to the way that remove_vm_area() does
++	 * matching of the resultant VMA, we aren't able to fast-forward
++	 * the address past the PMB space until the end of the VMA where
++	 * the page tables reside. As such, unmap_vm_area() will be
++	 * forced to linearly scan over the area until it finds the page
++	 * tables where PTEs that need to be unmapped actually reside,
++	 * which is far from optimal. Perhaps we need to use a separate
++	 * VMA for the PMB mappings?
++	 *					-- PFM.
++	 */
++	pmb_unmap(vaddr);
++#endif
 +
-+# Exception handler:
-+.section .fixup, "ax"
-+6000:
-+	mov.l	8000f,r1
-+	mov	r3,r0
-+	jmp	@r1
-+	 sub	r4,r0
-+	.align	2
-+8000:	.long	5000b
++	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
++	if (!p) {
++		printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
++		return;
++	}
 +
-+.previous
-+	mov.l	@r15+,r8
-+	mov.l	@r15+,r9
-+	mov.l	@r15+,r10
-+	rts
-+	 mov.l	@r15+,r11
-diff --git a/arch/sh/lib/io.c b/arch/sh/lib/io.c
++	kfree(p);
++}
++EXPORT_SYMBOL(__iounmap);
+diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c
 new file mode 100644
-index 0000000..4f54ec4
+index 0000000..e27d165
 --- /dev/null
-+++ b/arch/sh/lib/io.c
-@@ -0,0 +1,82 @@
++++ b/arch/sh/mm/ioremap_64.c
+@@ -0,0 +1,404 @@
 +/*
-+ * arch/sh/lib/io.c - SH32 optimized I/O routines
++ * arch/sh/mm/ioremap_64.c
 + *
-+ * Copyright (C) 2000  Stuart Menefy
-+ * Copyright (C) 2005  Paul Mundt
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003 - 2007  Paul Mundt
 + *
-+ * Provide real functions which expand to whatever the header file defined.
-+ * Also definitions of machine independent IO functions.
++ * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
++ * derived from arch/i386/mm/ioremap.c .
++ *
++ *   (C) Copyright 1995 1996 Linus Torvalds
 + *
 + * This file is subject to the terms and conditions of the GNU General Public
 + * License.  See the file "COPYING" in the main directory of this archive
 + * for more details.
 + */
++#include <linux/vmalloc.h>
++#include <linux/ioport.h>
 +#include <linux/module.h>
++#include <linux/mm.h>
 +#include <linux/io.h>
++#include <linux/bootmem.h>
++#include <linux/proc_fs.h>
++#include <asm/page.h>
++#include <asm/pgalloc.h>
++#include <asm/addrspace.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/mmu.h>
 +
-+void __raw_readsl(unsigned long addr, void *datap, int len)
++static void shmedia_mapioaddr(unsigned long, unsigned long);
++static unsigned long shmedia_ioremap(struct resource *, u32, int);
++
++/*
++ * Generic mapping function (not visible outside):
++ */
++
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
++ *
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
++ */
++void *__ioremap(unsigned long phys_addr, unsigned long size,
++		unsigned long flags)
 +{
-+	u32 *data;
++	void * addr;
++	struct vm_struct * area;
++	unsigned long offset, last_addr;
++	pgprot_t pgprot;
 +
-+	for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--)
-+		*data++ = ctrl_inl(addr);
++	/* Don't allow wraparound or zero size */
++	last_addr = phys_addr + size - 1;
++	if (!size || last_addr < phys_addr)
++		return NULL;
 +
-+	if (likely(len >= (0x20 >> 2))) {
-+		int tmp2, tmp3, tmp4, tmp5, tmp6;
++	pgprot = __pgprot(_PAGE_PRESENT  | _PAGE_READ   |
++			  _PAGE_WRITE    | _PAGE_DIRTY  |
++			  _PAGE_ACCESSED | _PAGE_SHARED | flags);
 +
-+		__asm__ __volatile__(
-+			"1:			\n\t"
-+			"mov.l	@%7, r0		\n\t"
-+			"mov.l	@%7, %2		\n\t"
-+#ifdef CONFIG_CPU_SH4
-+			"movca.l r0, @%0	\n\t"
-+#else
-+			"mov.l	r0, @%0		\n\t"
-+#endif
-+			"mov.l	@%7, %3		\n\t"
-+			"mov.l	@%7, %4		\n\t"
-+			"mov.l	@%7, %5		\n\t"
-+			"mov.l	@%7, %6		\n\t"
-+			"mov.l	@%7, r7		\n\t"
-+			"mov.l	@%7, r0		\n\t"
-+			"mov.l	%2, @(0x04,%0)	\n\t"
-+			"mov	#0x20>>2, %2	\n\t"
-+			"mov.l	%3, @(0x08,%0)	\n\t"
-+			"sub	%2, %1		\n\t"
-+			"mov.l	%4, @(0x0c,%0)	\n\t"
-+			"cmp/hi	%1, %2		! T if 32 > len	\n\t"
-+			"mov.l	%5, @(0x10,%0)	\n\t"
-+			"mov.l	%6, @(0x14,%0)	\n\t"
-+			"mov.l	r7, @(0x18,%0)	\n\t"
-+			"mov.l	r0, @(0x1c,%0)	\n\t"
-+			"bf.s	1b		\n\t"
-+			" add	#0x20, %0	\n\t"
-+			: "=&r" (data), "=&r" (len),
-+			  "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
-+			  "=&r" (tmp5), "=&r" (tmp6)
-+			: "r"(addr), "0" (data), "1" (len)
-+			: "r0", "r7", "t", "memory");
-+	}
++	/*
++	 * Mappings have to be page-aligned
++	 */
++	offset = phys_addr & ~PAGE_MASK;
++	phys_addr &= PAGE_MASK;
++	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
 +
-+	for (; len != 0; len--)
-+		*data++ = ctrl_inl(addr);
++	/*
++	 * Ok, go for it..
++	 */
++	area = get_vm_area(size, VM_IOREMAP);
++	pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
++	if (!area)
++		return NULL;
++	area->phys_addr = phys_addr;
++	addr = area->addr;
++	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
++			       phys_addr, pgprot)) {
++		vunmap(addr);
++		return NULL;
++	}
++	return (void *) (offset + (char *)addr);
 +}
-+EXPORT_SYMBOL(__raw_readsl);
++EXPORT_SYMBOL(__ioremap);
 +
-+void __raw_writesl(unsigned long addr, const void *data, int len)
++void __iounmap(void *addr)
 +{
-+	if (likely(len != 0)) {
-+		int tmp1;
++	struct vm_struct *area;
 +
-+		__asm__ __volatile__ (
-+			"1:				\n\t"
-+			"mov.l	@%0+, %1	\n\t"
-+			"dt		%3		\n\t"
-+			"bf.s		1b		\n\t"
-+			" mov.l	%1, @%4		\n\t"
-+			: "=&r" (data), "=&r" (tmp1)
-+			: "0" (data), "r" (len), "r"(addr)
-+			: "t", "memory");
++	vfree((void *) (PAGE_MASK & (unsigned long) addr));
++	area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
++	if (!area) {
++		printk(KERN_ERR "iounmap: bad address %p\n", addr);
++		return;
 +	}
++
++	kfree(area);
 +}
-+EXPORT_SYMBOL(__raw_writesl);
-diff --git a/arch/sh/lib64/.gitignore b/arch/sh/lib64/.gitignore
-new file mode 100644
-index 0000000..3508c2c
---- /dev/null
-+++ b/arch/sh/lib64/.gitignore
-@@ -0,0 +1 @@
-+syscalltab.h
-diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile
-new file mode 100644
-index 0000000..9950966
---- /dev/null
-+++ b/arch/sh/lib64/Makefile
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the SH-5 specific library files..
-+#
-+# Copyright (C) 2000, 2001  Paolo Alberelli
-+# Copyright (C) 2003  Paul Mundt
-+#
-+# This file is subject to the terms and conditions of the GNU General Public
-+# License.  See the file "COPYING" in the main directory of this archive
-+# for more details.
-+#
++EXPORT_SYMBOL(__iounmap);
 +
-+# Panic should really be compiled as PIC
-+lib-y  := udelay.o c-checksum.o dbg.o panic.o memcpy.o copy_user_memcpy.o \
-+		copy_page.o clear_page.o
++static struct resource shmedia_iomap = {
++	.name	= "shmedia_iomap",
++	.start	= IOBASE_VADDR + PAGE_SIZE,
++	.end	= IOBASE_END - 1,
++};
++
++static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
++static void shmedia_unmapioaddr(unsigned long vaddr);
++static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
 +
-diff --git a/arch/sh/lib64/c-checksum.c b/arch/sh/lib64/c-checksum.c
-new file mode 100644
-index 0000000..5dfbd8b
---- /dev/null
-+++ b/arch/sh/lib64/c-checksum.c
-@@ -0,0 +1,214 @@
 +/*
-+ * arch/sh/lib64/c-checksum.c
-+ *
-+ * This file contains network checksum routines that are better done
-+ * in an architecture-specific manner due to speed..
++ * We have the same problem as the SPARC, so lets have the same comment:
++ * Our mini-allocator...
++ * Boy this is gross! We need it because we must map I/O for
++ * timers and interrupt controller before the kmalloc is available.
 + */
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <asm/byteorder.h>
-+#include <asm/uaccess.h>
 +
-+static inline unsigned short from64to16(unsigned long long x)
++#define XNMLN  15
++#define XNRES  10
++
++struct xresource {
++	struct resource xres;   /* Must be first */
++	int xflag;              /* 1 == used */
++	char xname[XNMLN+1];
++};
++
++static struct xresource xresv[XNRES];
++
++static struct xresource *xres_alloc(void)
 +{
-+	/* add up 32-bit words for 33 bits */
-+	x = (x & 0xffffffff) + (x >> 32);
-+	/* add up 16-bit and 17-bit words for 17+c bits */
-+	x = (x & 0xffff) + (x >> 16);
-+	/* add up 16-bit and 2-bit for 16+c bit */
-+	x = (x & 0xffff) + (x >> 16);
-+	/* add up carry.. */
-+	x = (x & 0xffff) + (x >> 16);
-+	return x;
++        struct xresource *xrp;
++        int n;
++
++        xrp = xresv;
++        for (n = 0; n < XNRES; n++) {
++                if (xrp->xflag == 0) {
++                        xrp->xflag = 1;
++                        return xrp;
++                }
++                xrp++;
++        }
++        return NULL;
 +}
 +
-+static inline unsigned short foldto16(unsigned long x)
++static void xres_free(struct xresource *xrp)
 +{
-+	/* add up 16-bit for 17 bits */
-+	x = (x & 0xffff) + (x >> 16);
-+	/* add up carry.. */
-+	x = (x & 0xffff) + (x >> 16);
-+	return x;
++	xrp->xflag = 0;
 +}
 +
-+static inline unsigned short myfoldto16(unsigned long long x)
++static struct resource *shmedia_find_resource(struct resource *root,
++					      unsigned long vaddr)
 +{
-+	/* Fold down to 32-bits so we don't loose in the typedef-less
-+	   network stack.  */
-+	/* 64 to 33 */
-+	x = (x & 0xffffffff) + (x >> 32);
-+	/* 33 to 32 */
-+	x = (x & 0xffffffff) + (x >> 32);
++	struct resource *res;
 +
-+	/* add up 16-bit for 17 bits */
-+	x = (x & 0xffff) + (x >> 16);
-+	/* add up carry.. */
-+	x = (x & 0xffff) + (x >> 16);
-+	return x;
-+}
++	for (res = root->child; res; res = res->sibling)
++		if (res->start <= vaddr && res->end >= vaddr)
++			return res;
 +
-+#define odd(x) ((x)&1)
-+#define U16(x) ntohs(x)
++	return NULL;
++}
 +
-+static unsigned long do_csum(const unsigned char *buff, int len)
++static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
++				      const char *name)
 +{
-+	int odd, count;
-+	unsigned long result = 0;
-+
-+	pr_debug("do_csum buff %p, len %d (0x%x)\n", buff, len, len);
-+#ifdef DEBUG
-+	for (i = 0; i < len; i++) {
-+		if ((i % 26) == 0)
-+			printk("\n");
-+		printk("%02X ", buff[i]);
-+	}
-+#endif
++        static int printed_full = 0;
++        struct xresource *xres;
++        struct resource *res;
++        char *tack;
++        int tlen;
 +
-+	if (len <= 0)
-+		goto out;
++        if (name == NULL) name = "???";
 +
-+	odd = 1 & (unsigned long) buff;
-+	if (odd) {
-+		result = *buff << 8;
-+		len--;
-+		buff++;
-+	}
-+	count = len >> 1;	/* nr of 16-bit words.. */
-+	if (count) {
-+		if (2 & (unsigned long) buff) {
-+			result += *(unsigned short *) buff;
-+			count--;
-+			len -= 2;
-+			buff += 2;
-+		}
-+		count >>= 1;	/* nr of 32-bit words.. */
-+		if (count) {
-+			unsigned long carry = 0;
-+			do {
-+				unsigned long w = *(unsigned long *) buff;
-+				buff += 4;
-+				count--;
-+				result += carry;
-+				result += w;
-+				carry = (w > result);
-+			} while (count);
-+			result += carry;
-+			result = (result & 0xffff) + (result >> 16);
-+		}
-+		if (len & 2) {
-+			result += *(unsigned short *) buff;
-+			buff += 2;
-+		}
-+	}
-+	if (len & 1)
-+		result += *buff;
-+	result = foldto16(result);
-+	if (odd)
-+		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
++        if ((xres = xres_alloc()) != 0) {
++                tack = xres->xname;
++                res = &xres->xres;
++        } else {
++                if (!printed_full) {
++                        printk("%s: done with statics, switching to kmalloc\n",
++			       __FUNCTION__);
++                        printed_full = 1;
++                }
++                tlen = strlen(name);
++                tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
++                if (!tack)
++			return -ENOMEM;
++                memset(tack, 0, sizeof(struct resource));
++                res = (struct resource *) tack;
++                tack += sizeof (struct resource);
++        }
 +
-+	pr_debug("\nCHECKSUM is 0x%lx\n", result);
++        strncpy(tack, name, XNMLN);
++        tack[XNMLN] = 0;
++        res->name = tack;
 +
-+      out:
-+	return result;
++        return shmedia_ioremap(res, phys, size);
 +}
 +
-+/* computes the checksum of a memory block at buff, length len,
-+   and adds in "sum" (32-bit)  */
-+__wsum csum_partial(const void *buff, int len, __wsum sum)
++static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
 +{
-+	unsigned long long result = do_csum(buff, len);
++        unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
++	unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
++        unsigned long va;
++        unsigned int psz;
 +
-+	/* add in old sum, and carry.. */
-+	result += (__force u32)sum;
-+	/* 32+c bits -> 32 bits */
-+	result = (result & 0xffffffff) + (result >> 32);
++        if (allocate_resource(&shmedia_iomap, res, round_sz,
++			      shmedia_iomap.start, shmedia_iomap.end,
++			      PAGE_SIZE, NULL, NULL) != 0) {
++                panic("alloc_io_res(%s): cannot occupy\n",
++                    (res->name != NULL)? res->name: "???");
++        }
 +
-+	pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n",
-+		buff, len, sum, result);
++        va = res->start;
++        pa &= PAGE_MASK;
 +
-+	return (__force __wsum)result;
-+}
++	psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
 +
-+/* Copy while checksumming, otherwise like csum_partial.  */
-+__wsum
-+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-+{
-+	sum = csum_partial(src, len, sum);
-+	memcpy(dst, src, len);
++	/* log at boot time ... */
++	printk("mapioaddr: %6s  [%2d page%s]  va 0x%08lx   pa 0x%08x\n",
++	       ((res->name != NULL) ? res->name : "???"),
++	       psz, psz == 1 ? " " : "s", va, pa);
 +
-+	return sum;
++        for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
++                shmedia_mapioaddr(pa, va);
++                va += PAGE_SIZE;
++                pa += PAGE_SIZE;
++        }
++
++        res->start += offset;
++        res->end = res->start + sz - 1;         /* not strictly necessary.. */
++
++        return res->start;
 +}
 +
-+/* Copy from userspace and compute checksum.  If we catch an exception
-+   then zero the rest of the buffer.  */
-+__wsum
-+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
-+			    __wsum sum, int *err_ptr)
++static void shmedia_free_io(struct resource *res)
 +{
-+	int missing;
++	unsigned long len = res->end - res->start + 1;
 +
-+	pr_debug
-+	    ("csum_partial_copy_from_user src %p, dest %p, len %d, sum %08x, err_ptr %p\n",
-+	     src, dst, len, sum, err_ptr);
-+	missing = copy_from_user(dst, src, len);
-+	pr_debug("  access_ok %d\n", __access_ok((unsigned long) src, len));
-+	pr_debug("  missing %d\n", missing);
-+	if (missing) {
-+		memset(dst + len - missing, 0, missing);
-+		*err_ptr = -EFAULT;
++	BUG_ON((len & (PAGE_SIZE - 1)) != 0);
++
++	while (len) {
++		len -= PAGE_SIZE;
++		shmedia_unmapioaddr(res->start + len);
 +	}
 +
-+	return csum_partial(dst, len, sum);
++	release_resource(res);
 +}
 +
-+/* Copy to userspace and compute checksum.  */
-+__wsum
-+csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len,
-+			  __wsum sum, int *err_ptr)
++static __init_refok void *sh64_get_page(void)
 +{
-+	sum = csum_partial(src, len, sum);
-+
-+	if (copy_to_user(dst, src, len))
-+		*err_ptr = -EFAULT;
++	extern int after_bootmem;
++	void *page;
 +
-+	return sum;
-+}
++	if (after_bootmem) {
++		page = (void *)get_zeroed_page(GFP_ATOMIC);
++	} else {
++		page = alloc_bootmem_pages(PAGE_SIZE);
++	}
 +
-+/*
-+ *	This is a version of ip_compute_csum() optimized for IP headers,
-+ *	which always checksum on 4 octet boundaries.
-+ */
-+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-+{
-+	pr_debug("ip_fast_csum %p,%d\n", iph, ihl);
++	if (!page || ((unsigned long)page & ~PAGE_MASK))
++		panic("sh64_get_page: Out of memory already?\n");
 +
-+	return (__force __sum16)~do_csum(iph, ihl * 4);
++	return page;
 +}
 +
-+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
-+				unsigned short len,
-+				unsigned short proto, __wsum sum)
++static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
 +{
-+	unsigned long long result;
++	pgd_t *pgdp;
++	pud_t *pudp;
++	pmd_t *pmdp;
++	pte_t *ptep, pte;
++	pgprot_t prot;
++	unsigned long flags = 1; /* 1 = CB0-1 device */
 +
-+	pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead));
-+	pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead));
++	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);
 +
-+	result = (__force u64) saddr + (__force u64) daddr +
-+		 (__force u64) sum + ((len + proto) << 8);
++	pgdp = pgd_offset_k(va);
++	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
++		pudp = (pud_t *)sh64_get_page();
++		set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
++	}
 +
-+	/* Fold down to 32-bits so we don't loose in the typedef-less
-+	   network stack.  */
-+	/* 64 to 33 */
-+	result = (result & 0xffffffff) + (result >> 32);
-+	/* 33 to 32 */
-+	result = (result & 0xffffffff) + (result >> 32);
++	pudp = pud_offset(pgdp, va);
++	if (pud_none(*pudp) || !pud_present(*pudp)) {
++		pmdp = (pmd_t *)sh64_get_page();
++		set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
++	}
 +
-+	pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n",
-+		__FUNCTION__, saddr, daddr, len, proto, sum, result);
++	pmdp = pmd_offset(pudp, va);
++	if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
++		ptep = (pte_t *)sh64_get_page();
++		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
++	}
 +
-+	return (__wsum)result;
-+}
-+EXPORT_SYMBOL(csum_tcpudp_nofold);
-diff --git a/arch/sh/lib64/clear_page.S b/arch/sh/lib64/clear_page.S
-new file mode 100644
-index 0000000..007ab48
---- /dev/null
-+++ b/arch/sh/lib64/clear_page.S
-@@ -0,0 +1,54 @@
-+/*
-+   Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
++	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
++			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);
 +
-+   This file is subject to the terms and conditions of the GNU General Public
-+   License.  See the file "COPYING" in the main directory of this archive
-+   for more details.
++	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
++	ptep = pte_offset_kernel(pmdp, va);
 +
-+   Tight version of memset for the case of just clearing a page.  It turns out
-+   that having the alloco's spaced out slightly due to the increment/branch
-+   pair causes them to contend less for access to the cache.  Similarly,
-+   keeping the stores apart from the allocos causes less contention.  => Do two
-+   separate loops.  Do multiple stores per loop to amortise the
-+   increment/branch cost a little.
++	if (!pte_none(*ptep) &&
++	    pte_val(*ptep) != pte_val(pte))
++		pte_ERROR(*ptep);
 +
-+   Parameters:
-+   r2 : source effective address (start of page)
++	set_pte(ptep, pte);
 +
-+   Always clears 4096 bytes.
++	flush_tlb_kernel_range(va, PAGE_SIZE);
++}
 +
-+   Note : alloco guarded by synco to avoid TAKum03020 erratum
++static void shmedia_unmapioaddr(unsigned long vaddr)
++{
++	pgd_t *pgdp;
++	pud_t *pudp;
++	pmd_t *pmdp;
++	pte_t *ptep;
 +
-+*/
++	pgdp = pgd_offset_k(vaddr);
++	if (pgd_none(*pgdp) || pgd_bad(*pgdp))
++		return;
 +
-+	.section .text..SHmedia32,"ax"
-+	.little
++	pudp = pud_offset(pgdp, vaddr);
++	if (pud_none(*pudp) || pud_bad(*pudp))
++		return;
 +
-+	.balign 8
-+	.global clear_page
-+clear_page:
-+	pta/l 1f, tr1
-+	pta/l 2f, tr2
-+	ptabs/l r18, tr0
++	pmdp = pmd_offset(pudp, vaddr);
++	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
++		return;
 +
-+	movi 4096, r7
-+	add  r2, r7, r7
-+	add  r2, r63, r6
-+1:
-+	alloco r6, 0
-+	synco	! TAKum03020
-+	addi	r6, 32, r6
-+	bgt/l	r7, r6, tr1
++	ptep = pte_offset_kernel(pmdp, vaddr);
 +
-+	add  r2, r63, r6
-+2:
-+	st.q  r6,   0, r63
-+	st.q  r6,   8, r63
-+	st.q  r6,  16, r63
-+	st.q  r6,  24, r63
-+	addi r6, 32, r6
-+	bgt/l r7, r6, tr2
++	if (pte_none(*ptep) || !pte_present(*ptep))
++		return;
 +
-+	blink tr0, r63
++	clear_page((void *)ptep);
++	pte_clear(&init_mm, vaddr, ptep);
++}
 +
++unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
++{
++	if (size < PAGE_SIZE)
++		size = PAGE_SIZE;
 +
-diff --git a/arch/sh/lib64/copy_page.S b/arch/sh/lib64/copy_page.S
-new file mode 100644
-index 0000000..0ec6fca
---- /dev/null
-+++ b/arch/sh/lib64/copy_page.S
-@@ -0,0 +1,89 @@
-+/*
-+   Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
++	return shmedia_alloc_io(phys, size, name);
++}
 +
-+   This file is subject to the terms and conditions of the GNU General Public
-+   License.  See the file "COPYING" in the main directory of this archive
-+   for more details.
++void onchip_unmap(unsigned long vaddr)
++{
++	struct resource *res;
++	unsigned int psz;
 +
-+   Tight version of mempy for the case of just copying a page.
-+   Prefetch strategy empirically optimised against RTL simulations
-+   of SH5-101 cut2 eval chip with Cayman board DDR memory.
++	res = shmedia_find_resource(&shmedia_iomap, vaddr);
++	if (!res) {
++		printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
++		       __FUNCTION__, vaddr);
++		return;
++	}
 +
-+   Parameters:
-+   r2 : destination effective address (start of page)
-+   r3 : source effective address (start of page)
++        psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
 +
-+   Always copies 4096 bytes.
++        printk(KERN_DEBUG "unmapioaddr: %6s  [%2d page%s] freed\n",
++	       res->name, psz, psz == 1 ? " " : "s");
 +
-+   Points to review.
-+   * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
-+     It seems like the prefetch needs to be at at least 4 lines ahead to get
-+     the data into the cache in time, and the allocos contend with outstanding
-+     prefetches for the same cache set, so it's better to have the numbers
-+     different.
-+   */
++	shmedia_free_io(res);
 +
-+	.section .text..SHmedia32,"ax"
-+	.little
++	if ((char *)res >= (char *)xresv &&
++	    (char *)res <  (char *)&xresv[XNRES]) {
++		xres_free((struct xresource *)res);
++	} else {
++		kfree(res);
++	}
++}
 +
-+	.balign 8
-+	.global copy_page
-+copy_page:
++#ifdef CONFIG_PROC_FS
++static int
++ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
++		  void *data)
++{
++	char *p = buf, *e = buf + length;
++	struct resource *r;
++	const char *nm;
 +
-+	/* Copy 4096 bytes worth of data from r3 to r2.
-+	   Do prefetches 4 lines ahead.
-+	   Do alloco 2 lines ahead */
++	for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
++		if (p + 32 >= e)        /* Better than nothing */
++			break;
++		if ((nm = r->name) == 0) nm = "???";
++		p += sprintf(p, "%08lx-%08lx: %s\n",
++			     (unsigned long)r->start,
++			     (unsigned long)r->end, nm);
++	}
 +
-+	pta 1f, tr1
-+	pta 2f, tr2
-+	pta 3f, tr3
-+	ptabs r18, tr0
++	return p-buf;
++}
++#endif /* CONFIG_PROC_FS */
 +
-+#if 0
-+	/* TAKum03020 */
-+	ld.q r3, 0x00, r63
-+	ld.q r3, 0x20, r63
-+	ld.q r3, 0x40, r63
-+	ld.q r3, 0x60, r63
++static int __init register_proc_onchip(void)
++{
++#ifdef CONFIG_PROC_FS
++	create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
 +#endif
-+	alloco r2, 0x00
-+	synco		! TAKum03020
-+	alloco r2, 0x20
-+	synco		! TAKum03020
++	return 0;
++}
 +
-+	movi 3968, r6
-+	add  r2, r6, r6
-+	addi r6, 64, r7
-+	addi r7, 64, r8
-+	sub r3, r2, r60
-+	addi r60, 8, r61
-+	addi r61, 8, r62
-+	addi r62, 8, r23
-+	addi r60, 0x80, r22
++__initcall(register_proc_onchip);
+diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c
+index d15221b..677dd57 100644
+--- a/arch/sh/mm/pg-nommu.c
++++ b/arch/sh/mm/pg-nommu.c
+@@ -14,12 +14,12 @@
+ #include <linux/string.h>
+ #include <asm/page.h>
+ 
+-void copy_page_nommu(void *to, void *from)
++void copy_page(void *to, void *from)
+ {
+ 	memcpy(to, from, PAGE_SIZE);
+ }
+ 
+-void clear_page_nommu(void *to)
++void clear_page(void *to)
+ {
+ 	memset(to, 0, PAGE_SIZE);
+ }
+diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
+index 1d45b82..ab81c60 100644
+--- a/arch/sh/mm/pmb.c
++++ b/arch/sh/mm/pmb.c
+@@ -27,6 +27,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/mmu.h>
+ #include <asm/io.h>
++#include <asm/mmu_context.h>
+ 
+ #define NR_PMB_ENTRIES	16
+ 
+@@ -162,18 +163,18 @@ repeat:
+ 	return 0;
+ }
+ 
+-int set_pmb_entry(struct pmb_entry *pmbe)
++int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
+ {
+ 	int ret;
+ 
+-	jump_to_P2();
++	jump_to_uncached();
+ 	ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
+-	back_to_P1();
++	back_to_cached();
+ 
+ 	return ret;
+ }
+ 
+-void clear_pmb_entry(struct pmb_entry *pmbe)
++void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
+ {
+ 	unsigned int entry = pmbe->entry;
+ 	unsigned long addr;
+@@ -187,7 +188,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
+ 		     entry >= NR_PMB_ENTRIES))
+ 		return;
+ 
+-	jump_to_P2();
++	jump_to_uncached();
+ 
+ 	/* Clear V-bit */
+ 	addr = mk_pmb_addr(entry);
+@@ -196,7 +197,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
+ 	addr = mk_pmb_data(entry);
+ 	ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
+ 
+-	back_to_P1();
++	back_to_cached();
+ 
+ 	clear_bit(entry, &pmb_map);
+ }
+@@ -301,17 +302,17 @@ static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb)
+ 	pmbe->entry = PMB_NO_ENTRY;
+ }
+ 
+-static int __init pmb_init(void)
++static int __uses_jump_to_uncached pmb_init(void)
+ {
+ 	unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
+-	unsigned int entry;
++	unsigned int entry, i;
+ 
+ 	BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
+ 
+ 	pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
+ 				      SLAB_PANIC, pmb_cache_ctor);
+ 
+-	jump_to_P2();
++	jump_to_uncached();
+ 
+ 	/*
+ 	 * Ordering is important, P2 must be mapped in the PMB before we
+@@ -329,7 +330,12 @@ static int __init pmb_init(void)
+ 	/* PMB.SE and UB[7] */
+ 	ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
+ 
+-	back_to_P1();
++	/* Flush out the TLB */
++	i =  ctrl_inl(MMUCR);
++	i |= MMUCR_TI;
++	ctrl_outl(i, MMUCR);
 +
-+/* Minimal code size.  The extra branches inside the loop don't cost much
-+   because they overlap with the time spent waiting for prefetches to
-+   complete. */
-+1:
-+#if 0
-+	/* TAKum03020 */
-+	bge/u r2, r6, tr2  ! skip prefetch for last 4 lines
-+	ldx.q r2, r22, r63 ! prefetch 4 lines hence
-+#endif
-+2:
-+	bge/u r2, r7, tr3  ! skip alloco for last 2 lines
-+	alloco r2, 0x40    ! alloc destination line 2 lines ahead
-+	synco		! TAKum03020
-+3:
-+	ldx.q r2, r60, r36
-+	ldx.q r2, r61, r37
-+	ldx.q r2, r62, r38
-+	ldx.q r2, r23, r39
-+	st.q  r2,   0, r36
-+	st.q  r2,   8, r37
-+	st.q  r2,  16, r38
-+	st.q  r2,  24, r39
-+	addi r2, 32, r2
-+	bgt/l r8, r2, tr1
++	back_to_cached();
+ 
+ 	return 0;
+ }
+diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
+deleted file mode 100644
+index 6f45c1f..0000000
+--- a/arch/sh/mm/tlb-flush.c
++++ /dev/null
+@@ -1,140 +0,0 @@
+-/*
+- * TLB flushing operations for SH with an MMU.
+- *
+- *  Copyright (C) 1999  Niibe Yutaka
+- *  Copyright (C) 2003  Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-#include <linux/mm.h>
+-#include <asm/mmu_context.h>
+-#include <asm/tlbflush.h>
+-
+-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+-{
+-	unsigned int cpu = smp_processor_id();
+-
+-	if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
+-		unsigned long flags;
+-		unsigned long asid;
+-		unsigned long saved_asid = MMU_NO_ASID;
+-
+-		asid = cpu_asid(cpu, vma->vm_mm);
+-		page &= PAGE_MASK;
+-
+-		local_irq_save(flags);
+-		if (vma->vm_mm != current->mm) {
+-			saved_asid = get_asid();
+-			set_asid(asid);
+-		}
+-		local_flush_tlb_one(asid, page);
+-		if (saved_asid != MMU_NO_ASID)
+-			set_asid(saved_asid);
+-		local_irq_restore(flags);
+-	}
+-}
+-
+-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+-			   unsigned long end)
+-{
+-	struct mm_struct *mm = vma->vm_mm;
+-	unsigned int cpu = smp_processor_id();
+-
+-	if (cpu_context(cpu, mm) != NO_CONTEXT) {
+-		unsigned long flags;
+-		int size;
+-
+-		local_irq_save(flags);
+-		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+-		if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
+-			cpu_context(cpu, mm) = NO_CONTEXT;
+-			if (mm == current->mm)
+-				activate_context(mm, cpu);
+-		} else {
+-			unsigned long asid;
+-			unsigned long saved_asid = MMU_NO_ASID;
+-
+-			asid = cpu_asid(cpu, mm);
+-			start &= PAGE_MASK;
+-			end += (PAGE_SIZE - 1);
+-			end &= PAGE_MASK;
+-			if (mm != current->mm) {
+-				saved_asid = get_asid();
+-				set_asid(asid);
+-			}
+-			while (start < end) {
+-				local_flush_tlb_one(asid, start);
+-				start += PAGE_SIZE;
+-			}
+-			if (saved_asid != MMU_NO_ASID)
+-				set_asid(saved_asid);
+-		}
+-		local_irq_restore(flags);
+-	}
+-}
+-
+-void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+-{
+-	unsigned int cpu = smp_processor_id();
+-	unsigned long flags;
+-	int size;
+-
+-	local_irq_save(flags);
+-	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+-	if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
+-		local_flush_tlb_all();
+-	} else {
+-		unsigned long asid;
+-		unsigned long saved_asid = get_asid();
+-
+-		asid = cpu_asid(cpu, &init_mm);
+-		start &= PAGE_MASK;
+-		end += (PAGE_SIZE - 1);
+-		end &= PAGE_MASK;
+-		set_asid(asid);
+-		while (start < end) {
+-			local_flush_tlb_one(asid, start);
+-			start += PAGE_SIZE;
+-		}
+-		set_asid(saved_asid);
+-	}
+-	local_irq_restore(flags);
+-}
+-
+-void local_flush_tlb_mm(struct mm_struct *mm)
+-{
+-	unsigned int cpu = smp_processor_id();
+-
+-	/* Invalidate all TLB of this process. */
+-	/* Instead of invalidating each TLB, we get new MMU context. */
+-	if (cpu_context(cpu, mm) != NO_CONTEXT) {
+-		unsigned long flags;
+-
+-		local_irq_save(flags);
+-		cpu_context(cpu, mm) = NO_CONTEXT;
+-		if (mm == current->mm)
+-			activate_context(mm, cpu);
+-		local_irq_restore(flags);
+-	}
+-}
+-
+-void local_flush_tlb_all(void)
+-{
+-	unsigned long flags, status;
+-
+-	/*
+-	 * Flush all the TLB.
+-	 *
+-	 * Write to the MMU control register's bit:
+-	 *	TF-bit for SH-3, TI-bit for SH-4.
+-	 *      It's same position, bit #2.
+-	 */
+-	local_irq_save(flags);
+-	status = ctrl_inl(MMUCR);
+-	status |= 0x04;
+-	ctrl_outl(status, MMUCR);
+-	ctrl_barrier();
+-	local_irq_restore(flags);
+-}
+diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c
+index 1ccca7c..15111bc 100644
+--- a/arch/sh/mm/tlb-nommu.c
++++ b/arch/sh/mm/tlb-nommu.c
+@@ -9,6 +9,7 @@
+  */
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
++#include <asm/pgtable.h>
+ 
+ /*
+  * Nothing too terribly exciting here ..
+@@ -49,3 +50,12 @@ void update_mmu_cache(struct vm_area_struct * vma,
+ {
+ 	BUG();
+ }
 +
-+	blink tr0, r63	   ! return
-diff --git a/arch/sh/lib64/copy_user_memcpy.S b/arch/sh/lib64/copy_user_memcpy.S
++void __init page_table_range_init(unsigned long start, unsigned long end,
++				  pgd_t *pgd_base)
++{
++}
++
++void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++{
++}
+diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
+index 2d1dd60..f0c7b73 100644
+--- a/arch/sh/mm/tlb-sh4.c
++++ b/arch/sh/mm/tlb-sh4.c
+@@ -79,7 +79,8 @@ void update_mmu_cache(struct vm_area_struct * vma,
+ 	local_irq_restore(flags);
+ }
+ 
+-void local_flush_tlb_one(unsigned long asid, unsigned long page)
++void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
++						 unsigned long page)
+ {
+ 	unsigned long addr, data;
+ 
+@@ -91,7 +92,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
+ 	 */
+ 	addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
+ 	data = page | asid; /* VALID bit is off */
+-	jump_to_P2();
++	jump_to_uncached();
+ 	ctrl_outl(data, addr);
+-	back_to_P1();
++	back_to_cached();
+ }
+diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
 new file mode 100644
-index 0000000..2a62816
+index 0000000..f34274a
 --- /dev/null
-+++ b/arch/sh/lib64/copy_user_memcpy.S
-@@ -0,0 +1,217 @@
-+!
-+! Fast SH memcpy
-+!
-+! by Toshiyasu Morita (tm at netcom.com)
-+! hacked by J"orn Rernnecke (joern.rennecke at superh.com) ("o for o-umlaut)
-+! SH5 code Copyright 2002 SuperH Ltd.
-+!
-+! Entry: ARG0: destination pointer
-+!        ARG1: source pointer
-+!        ARG2: byte count
-+!
-+! Exit:  RESULT: destination pointer
-+!        any other registers in the range r0-r7: trashed
-+!
-+! Notes: Usually one wants to do small reads and write a longword, but
-+!        unfortunately it is difficult in some cases to concatanate bytes
-+!        into a longword on the SH, so this does a longword read and small
-+!        writes.
-+!
-+! This implementation makes two assumptions about how it is called:
-+!
-+! 1.: If the byte count is nonzero, the address of the last byte to be
-+!     copied is unsigned greater than the address of the first byte to
-+!     be copied.  This could be easily swapped for a signed comparison,
-+!     but the algorithm used needs some comparison.
-+!
-+! 2.: When there are two or three bytes in the last word of an 11-or-more
-+!     bytes memory chunk to b copied, the rest of the word can be read
-+!     without side effects.
-+!     This could be easily changed by increasing the minumum size of
-+!     a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
-+!     however, this would cost a few extra cyles on average.
-+!     For SHmedia, the assumption is that any quadword can be read in its
-+!     enirety if at least one byte is included in the copy.
-+
-+/* Imported into Linux kernel by Richard Curnow.  This is used to implement the
-+   __copy_user function in the general case, so it has to be a distinct
-+   function from intra-kernel memcpy to allow for exception fix-ups in the
-+   event that the user pointer is bad somewhere in the copy (e.g. due to
-+   running off the end of the vma).
++++ b/arch/sh/mm/tlb-sh5.c
+@@ -0,0 +1,164 @@
++/*
++ * arch/sh/mm/tlb-sh5.c
++ *
++ * Copyright (C) 2003  Paul Mundt <lethal at linux-sh.org>
++ * Copyright (C) 2003  Richard Curnow <richard.curnow at superh.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <asm/page.h>
++#include <asm/tlb.h>
++#include <asm/mmu_context.h>
 +
-+   Note, this algorithm will be slightly wasteful in the case where the source
-+   and destination pointers are equally aligned, because the stlo/sthi pairs
-+   could then be merged back into single stores.  If there are a lot of cache
-+   misses, this is probably offset by the stall lengths on the preloads.
++/**
++ * sh64_tlb_init
++ *
++ * Perform initial setup for the DTLB and ITLB.
++ */
++int __init sh64_tlb_init(void)
++{
++	/* Assign some sane DTLB defaults */
++	cpu_data->dtlb.entries	= 64;
++	cpu_data->dtlb.step	= 0x10;
 +
-+*/
++	cpu_data->dtlb.first	= DTLB_FIXED | cpu_data->dtlb.step;
++	cpu_data->dtlb.next	= cpu_data->dtlb.first;
 +
-+/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
-+ * erratum.  The first two prefetches are nop-ed out to avoid upsetting the
-+ * instruction counts used in the jump address calculation.
-+ * */
++	cpu_data->dtlb.last	= DTLB_FIXED |
++				  ((cpu_data->dtlb.entries - 1) *
++				   cpu_data->dtlb.step);
 +
-+	.section .text..SHmedia32,"ax"
-+	.little
-+	.balign 32
-+	.global copy_user_memcpy
-+	.global copy_user_memcpy_end
-+copy_user_memcpy:
++	/* And again for the ITLB */
++	cpu_data->itlb.entries	= 64;
++	cpu_data->itlb.step	= 0x10;
 +
-+#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
-+#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
-+#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
-+#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
++	cpu_data->itlb.first	= ITLB_FIXED | cpu_data->itlb.step;
++	cpu_data->itlb.next	= cpu_data->itlb.first;
++	cpu_data->itlb.last	= ITLB_FIXED |
++				  ((cpu_data->itlb.entries - 1) *
++				   cpu_data->itlb.step);
 +
-+	nop ! ld.b r3,0,r63 ! TAKum03020
-+	pta/l Large,tr0
-+	movi 25,r0
-+	bgeu/u r4,r0,tr0
-+	nsb r4,r0
-+	shlli r0,5,r0
-+	movi (L1-L0+63*32 + 1) & 0xffff,r1
-+	sub r1, r0, r0
-+L0:	ptrel r0,tr0
-+	add r2,r4,r5
-+	ptabs r18,tr1
-+	add r3,r4,r6
-+	blink tr0,r63
++	return 0;
++}
 +
-+/* Rearranged to make cut2 safe */
-+	.balign 8
-+L4_7:	/* 4..7 byte memcpy cntd. */
-+	stlo.l r2, 0, r0
-+	or r6, r7, r6
-+	sthi.l r5, -1, r6
-+	stlo.l r5, -4, r6
-+	blink tr1,r63
++/**
++ * sh64_next_free_dtlb_entry
++ *
++ * Find the next available DTLB entry
++ */
++unsigned long long sh64_next_free_dtlb_entry(void)
++{
++	return cpu_data->dtlb.next;
++}
 +
-+	.balign 8
-+L1:	/* 0 byte memcpy */
-+	nop
-+	blink tr1,r63
-+	nop
-+	nop
-+	nop
-+	nop
++/**
++ * sh64_get_wired_dtlb_entry
++ *
++ * Allocate a wired (locked-in) entry in the DTLB
++ */
++unsigned long long sh64_get_wired_dtlb_entry(void)
++{
++	unsigned long long entry = sh64_next_free_dtlb_entry();
 +
-+L2_3:	/* 2 or 3 byte memcpy cntd. */
-+	st.b r5,-1,r6
-+	blink tr1,r63
++	cpu_data->dtlb.first += cpu_data->dtlb.step;
++	cpu_data->dtlb.next  += cpu_data->dtlb.step;
 +
-+	/* 1 byte memcpy */
-+	ld.b r3,0,r0
-+	st.b r2,0,r0
-+	blink tr1,r63
++	return entry;
++}
 +
-+L8_15:	/* 8..15 byte memcpy cntd. */
-+	stlo.q r2, 0, r0
-+	or r6, r7, r6
-+	sthi.q r5, -1, r6
-+	stlo.q r5, -8, r6
-+	blink tr1,r63
++/**
++ * sh64_put_wired_dtlb_entry
++ *
++ * @entry:	Address of TLB slot.
++ *
++ * Free a wired (locked-in) entry in the DTLB.
++ *
++ * Works like a stack, last one to allocate must be first one to free.
++ */
++int sh64_put_wired_dtlb_entry(unsigned long long entry)
++{
++	__flush_tlb_slot(entry);
 +
-+	/* 2 or 3 byte memcpy */
-+	ld.b r3,0,r0
-+	nop ! ld.b r2,0,r63 ! TAKum03020
-+	ld.b r3,1,r1
-+	st.b r2,0,r0
-+	pta/l L2_3,tr0
-+	ld.b r6,-1,r6
-+	st.b r2,1,r1
-+	blink tr0, r63
++	/*
++	 * We don't do any particularly useful tracking of wired entries,
++	 * so this approach works like a stack .. last one to be allocated
++	 * has to be the first one to be freed.
++	 *
++	 * We could potentially load wired entries into a list and work on
++	 * rebalancing the list periodically (which also entails moving the
++	 * contents of a TLB entry) .. though I have a feeling that this is
++	 * more trouble than it's worth.
++	 */
 +
-+	/* 4 .. 7 byte memcpy */
-+	LDUAL (r3, 0, r0, r1)
-+	pta L4_7, tr0
-+	ldlo.l r6, -4, r7
-+	or r0, r1, r0
-+	sthi.l r2, 3, r0
-+	ldhi.l r6, -1, r6
-+	blink tr0, r63
++	/*
++	 * Entry must be valid .. we don't want any ITLB addresses!
++	 */
++	if (entry <= DTLB_FIXED)
++		return -EINVAL;
 +
-+	/* 8 .. 15 byte memcpy */
-+	LDUAQ (r3, 0, r0, r1)
-+	pta L8_15, tr0
-+	ldlo.q r6, -8, r7
-+	or r0, r1, r0
-+	sthi.q r2, 7, r0
-+	ldhi.q r6, -1, r6
-+	blink tr0, r63
++	/*
++	 * Next, check if we're within range to be freed. (ie, must be the
++	 * entry beneath the first 'free' entry!
++	 */
++	if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
++		return -EINVAL;
 +
-+	/* 16 .. 24 byte memcpy */
-+	LDUAQ (r3, 0, r0, r1)
-+	LDUAQ (r3, 8, r8, r9)
-+	or r0, r1, r0
-+	sthi.q r2, 7, r0
-+	or r8, r9, r8
-+	sthi.q r2, 15, r8
-+	ldlo.q r6, -8, r7
-+	ldhi.q r6, -1, r6
-+	stlo.q r2, 8, r8
-+	stlo.q r2, 0, r0
-+	or r6, r7, r6
-+	sthi.q r5, -1, r6
-+	stlo.q r5, -8, r6
-+	blink tr1,r63
++	/* If we are, then bring this entry back into the list */
++	cpu_data->dtlb.first	-= cpu_data->dtlb.step;
++	cpu_data->dtlb.next	= entry;
 +
-+Large:
-+	! ld.b r2, 0, r63 ! TAKum03020
-+	pta/l  Loop_ua, tr1
-+	ori r3, -8, r7
-+	sub r2, r7, r22
-+	sub r3, r2, r6
-+	add r2, r4, r5
-+	ldlo.q r3, 0, r0
-+	addi r5, -16, r5
-+	movi 64+8, r27 ! could subtract r7 from that.
-+	stlo.q r2, 0, r0
-+	sthi.q r2, 7, r0
-+	ldx.q r22, r6, r0
-+	bgtu/l r27, r4, tr1
++	return 0;
++}
 +
-+	addi r5, -48, r27
-+	pta/l Loop_line, tr0
-+	addi r6, 64, r36
-+	addi r6, -24, r19
-+	addi r6, -16, r20
-+	addi r6, -8, r21
++/**
++ * sh64_setup_tlb_slot
++ *
++ * @config_addr:	Address of TLB slot.
++ * @eaddr:		Virtual address.
++ * @asid:		Address Space Identifier.
++ * @paddr:		Physical address.
++ *
++ * Load up a virtual<->physical translation for @eaddr<->@paddr in the
++ * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
++ */
++inline void sh64_setup_tlb_slot(unsigned long long config_addr,
++				unsigned long eaddr,
++				unsigned long asid,
++				unsigned long paddr)
++{
++	unsigned long long pteh, ptel;
 +
-+Loop_line:
-+	! ldx.q r22, r36, r63 ! TAKum03020
-+	alloco r22, 32
-+	synco
-+	addi r22, 32, r22
-+	ldx.q r22, r19, r23
-+	sthi.q r22, -25, r0
-+	ldx.q r22, r20, r24
-+	ldx.q r22, r21, r25
-+	stlo.q r22, -32, r0
-+	ldx.q r22, r6,  r0
-+	sthi.q r22, -17, r23
-+	sthi.q r22,  -9, r24
-+	sthi.q r22,  -1, r25
-+	stlo.q r22, -24, r23
-+	stlo.q r22, -16, r24
-+	stlo.q r22,  -8, r25
-+	bgeu r27, r22, tr0
++	/* Sign extension */
++#if (NEFF == 32)
++	pteh = (unsigned long long)(signed long long)(signed long) eaddr;
++#else
++#error "Can't sign extend more than 32 bits yet"
++#endif
++	pteh &= PAGE_MASK;
++	pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
++#if (NEFF == 32)
++	ptel = (unsigned long long)(signed long long)(signed long) paddr;
++#else
++#error "Can't sign extend more than 32 bits yet"
++#endif
++	ptel &= PAGE_MASK;
++	ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
 +
-+Loop_ua:
-+	addi r22, 8, r22
-+	sthi.q r22, -1, r0
-+	stlo.q r22, -8, r0
-+	ldx.q r22, r6, r0
-+	bgtu/l r5, r22, tr1
++	asm volatile("putcfg %0, 1, %1\n\t"
++			"putcfg %0, 0, %2\n"
++			: : "r" (config_addr), "r" (ptel), "r" (pteh));
++}
 +
-+	add r3, r4, r7
-+	ldlo.q r7, -8, r1
-+	sthi.q r22, 7, r0
-+	ldhi.q r7, -1, r7
-+	ptabs r18,tr1
-+	stlo.q r22, 0, r0
-+	or r1, r7, r1
-+	sthi.q r5, 15, r1
-+	stlo.q r5, 8, r1
-+	blink tr1, r63
-+copy_user_memcpy_end:
-+	nop
-diff --git a/arch/sh/lib64/dbg.c b/arch/sh/lib64/dbg.c
++/**
++ * sh64_teardown_tlb_slot
++ *
++ * @config_addr:	Address of TLB slot.
++ *
++ * Teardown any existing mapping in the TLB slot @config_addr.
++ */
++inline void sh64_teardown_tlb_slot(unsigned long long config_addr)
++	__attribute__ ((alias("__flush_tlb_slot")));
+diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
 new file mode 100644
-index 0000000..75825ef
+index 0000000..6f45c1f
 --- /dev/null
-+++ b/arch/sh/lib64/dbg.c
-@@ -0,0 +1,430 @@
-+/*--------------------------------------------------------------------------
-+--
-+-- Identity : Linux50 Debug Funcions
-+--
-+-- File     : arch/sh/lib64/dbg.c
-+--
-+-- Copyright 2000, 2001 STMicroelectronics Limited.
-+-- Copyright 2004 Richard Curnow (evt_debug etc)
-+--
-+--------------------------------------------------------------------------*/
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
++++ b/arch/sh/mm/tlbflush_32.c
+@@ -0,0 +1,140 @@
++/*
++ * TLB flushing operations for SH with an MMU.
++ *
++ *  Copyright (C) 1999  Niibe Yutaka
++ *  Copyright (C) 2003  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
 +#include <linux/mm.h>
-+#include <linux/fs.h>
 +#include <asm/mmu_context.h>
++#include <asm/tlbflush.h>
 +
-+typedef u64 regType_t;
-+
-+static regType_t getConfigReg(u64 id)
++void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 +{
-+	register u64 reg __asm__("r2");
-+	asm volatile ("getcfg   %1, 0, %0":"=r" (reg):"r"(id));
-+	return (reg);
-+}
-+
-+/* ======================================================================= */
-+
-+static char *szTab[] = { "4k", "64k", "1M", "512M" };
-+static char *protTab[] = { "----",
-+	"---R",
-+	"--X-",
-+	"--XR",
-+	"-W--",
-+	"-W-R",
-+	"-WX-",
-+	"-WXR",
-+	"U---",
-+	"U--R",
-+	"U-X-",
-+	"U-XR",
-+	"UW--",
-+	"UW-R",
-+	"UWX-",
-+	"UWXR"
-+};
-+#define  ITLB_BASE	0x00000000
-+#define  DTLB_BASE	0x00800000
-+#define  MAX_TLBs		64
-+/* PTE High */
-+#define  GET_VALID(pte)        ((pte) & 0x1)
-+#define  GET_SHARED(pte)       ((pte) & 0x2)
-+#define  GET_ASID(pte)         ((pte >> 2) & 0x0ff)
-+#define  GET_EPN(pte)          ((pte) & 0xfffff000)
++	unsigned int cpu = smp_processor_id();
 +
-+/* PTE Low */
-+#define  GET_CBEHAVIOR(pte)    ((pte) & 0x3)
-+#define  GET_PAGE_SIZE(pte)    szTab[((pte >> 3) & 0x3)]
-+#define  GET_PROTECTION(pte)   protTab[((pte >> 6) & 0xf)]
-+#define  GET_PPN(pte)          ((pte) & 0xfffff000)
++	if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
++		unsigned long flags;
++		unsigned long asid;
++		unsigned long saved_asid = MMU_NO_ASID;
 +
-+#define PAGE_1K_MASK           0x00000000
-+#define PAGE_4K_MASK           0x00000010
-+#define PAGE_64K_MASK          0x00000080
-+#define MMU_PAGESIZE_MASK      (PAGE_64K_MASK | PAGE_4K_MASK)
-+#define PAGE_1MB_MASK          MMU_PAGESIZE_MASK
-+#define PAGE_1K                (1024)
-+#define PAGE_4K                (1024 * 4)
-+#define PAGE_64K               (1024 * 64)
-+#define PAGE_1MB               (1024 * 1024)
++		asid = cpu_asid(cpu, vma->vm_mm);
++		page &= PAGE_MASK;
 +
-+#define HOW_TO_READ_TLB_CONTENT  \
-+       "[ ID]  PPN         EPN        ASID  Share  CB  P.Size   PROT.\n"
++		local_irq_save(flags);
++		if (vma->vm_mm != current->mm) {
++			saved_asid = get_asid();
++			set_asid(asid);
++		}
++		local_flush_tlb_one(asid, page);
++		if (saved_asid != MMU_NO_ASID)
++			set_asid(saved_asid);
++		local_irq_restore(flags);
++	}
++}
 +
-+void print_single_tlb(unsigned long tlb, int single_print)
++void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
++			   unsigned long end)
 +{
-+	regType_t pteH;
-+	regType_t pteL;
-+	unsigned int valid, shared, asid, epn, cb, ppn;
-+	char *pSize;
-+	char *pProt;
-+
-+	/*
-+	   ** in case of single print <single_print> is true, this implies:
-+	   **   1) print the TLB in any case also if NOT VALID
-+	   **   2) print out the header
-+	 */
++	struct mm_struct *mm = vma->vm_mm;
++	unsigned int cpu = smp_processor_id();
 +
-+	pteH = getConfigReg(tlb);
-+	valid = GET_VALID(pteH);
-+	if (single_print)
-+		printk(HOW_TO_READ_TLB_CONTENT);
-+	else if (!valid)
-+		return;
++	if (cpu_context(cpu, mm) != NO_CONTEXT) {
++		unsigned long flags;
++		int size;
 +
-+	pteL = getConfigReg(tlb + 1);
++		local_irq_save(flags);
++		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
++		if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
++			cpu_context(cpu, mm) = NO_CONTEXT;
++			if (mm == current->mm)
++				activate_context(mm, cpu);
++		} else {
++			unsigned long asid;
++			unsigned long saved_asid = MMU_NO_ASID;
 +
-+	shared = GET_SHARED(pteH);
-+	asid = GET_ASID(pteH);
-+	epn = GET_EPN(pteH);
-+	cb = GET_CBEHAVIOR(pteL);
-+	pSize = GET_PAGE_SIZE(pteL);
-+	pProt = GET_PROTECTION(pteL);
-+	ppn = GET_PPN(pteL);
-+	printk("[%c%2ld]  0x%08x  0x%08x  %03d   %02x    %02x   %4s    %s\n",
-+	       ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP),
-+	       ppn, epn, asid, shared, cb, pSize, pProt);
++			asid = cpu_asid(cpu, mm);
++			start &= PAGE_MASK;
++			end += (PAGE_SIZE - 1);
++			end &= PAGE_MASK;
++			if (mm != current->mm) {
++				saved_asid = get_asid();
++				set_asid(asid);
++			}
++			while (start < end) {
++				local_flush_tlb_one(asid, start);
++				start += PAGE_SIZE;
++			}
++			if (saved_asid != MMU_NO_ASID)
++				set_asid(saved_asid);
++		}
++		local_irq_restore(flags);
++	}
 +}
 +
-+void print_dtlb(void)
++void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 +{
-+	int count;
-+	unsigned long tlb;
-+
-+	printk(" ================= SH-5 D-TLBs Status ===================\n");
-+	printk(HOW_TO_READ_TLB_CONTENT);
-+	tlb = DTLB_BASE;
-+	for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
-+		print_single_tlb(tlb, 0);
-+	printk
-+	    (" =============================================================\n");
-+}
++	unsigned int cpu = smp_processor_id();
++	unsigned long flags;
++	int size;
 +
-+void print_itlb(void)
-+{
-+	int count;
-+	unsigned long tlb;
++	local_irq_save(flags);
++	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
++	if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
++		local_flush_tlb_all();
++	} else {
++		unsigned long asid;
++		unsigned long saved_asid = get_asid();
 +
-+	printk(" ================= SH-5 I-TLBs Status ===================\n");
-+	printk(HOW_TO_READ_TLB_CONTENT);
-+	tlb = ITLB_BASE;
-+	for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
-+		print_single_tlb(tlb, 0);
-+	printk
-+	    (" =============================================================\n");
++		asid = cpu_asid(cpu, &init_mm);
++		start &= PAGE_MASK;
++		end += (PAGE_SIZE - 1);
++		end &= PAGE_MASK;
++		set_asid(asid);
++		while (start < end) {
++			local_flush_tlb_one(asid, start);
++			start += PAGE_SIZE;
++		}
++		set_asid(saved_asid);
++	}
++	local_irq_restore(flags);
 +}
 +
-+/* ======================================================================= */
-+
-+#ifdef CONFIG_POOR_MANS_STRACE
++void local_flush_tlb_mm(struct mm_struct *mm)
++{
++	unsigned int cpu = smp_processor_id();
 +
-+#include "syscalltab.h"
++	/* Invalidate all TLB of this process. */
++	/* Instead of invalidating each TLB, we get new MMU context. */
++	if (cpu_context(cpu, mm) != NO_CONTEXT) {
++		unsigned long flags;
 +
-+struct ring_node {
-+	int evt;
-+	int ret_addr;
-+	int event;
-+	int tra;
-+	int pid;
-+	unsigned long sp;
-+	unsigned long pc;
-+};
++		local_irq_save(flags);
++		cpu_context(cpu, mm) = NO_CONTEXT;
++		if (mm == current->mm)
++			activate_context(mm, cpu);
++		local_irq_restore(flags);
++	}
++}
 +
-+static struct ring_node event_ring[16];
-+static int event_ptr = 0;
++void local_flush_tlb_all(void)
++{
++	unsigned long flags, status;
 +
-+struct stored_syscall_data {
-+	int pid;
-+	int syscall_number;
-+};
++	/*
++	 * Flush all the TLB.
++	 *
++	 * Write to the MMU control register's bit:
++	 *	TF-bit for SH-3, TI-bit for SH-4.
++	 *      It's same position, bit #2.
++	 */
++	local_irq_save(flags);
++	status = ctrl_inl(MMUCR);
++	status |= 0x04;
++	ctrl_outl(status, MMUCR);
++	ctrl_barrier();
++	local_irq_restore(flags);
++}
+diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
+new file mode 100644
+index 0000000..2a98c9e
+--- /dev/null
++++ b/arch/sh/mm/tlbflush_64.c
+@@ -0,0 +1,475 @@
++/*
++ * arch/sh/mm/tlb-flush_64.c
++ *
++ * Copyright (C) 2000, 2001  Paolo Alberelli
++ * Copyright (C) 2003  Richard Curnow (/proc/tlb, bug fixes)
++ * Copyright (C) 2003  Paul Mundt
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++#include <linux/signal.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/interrupt.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/tlb.h>
++#include <asm/uaccess.h>
++#include <asm/pgalloc.h>
++#include <asm/mmu_context.h>
 +
-+#define N_STORED_SYSCALLS 16
++extern void die(const char *,struct pt_regs *,long);
 +
-+static struct stored_syscall_data stored_syscalls[N_STORED_SYSCALLS];
-+static int syscall_next=0;
-+static int syscall_next_print=0;
++#define PFLAG(val,flag)   (( (val) & (flag) ) ? #flag : "" )
++#define PPROT(flag) PFLAG(pgprot_val(prot),flag)
 +
-+void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs)
++static inline void print_prots(pgprot_t prot)
 +{
-+	int syscallno = tra & 0xff;
-+	unsigned long sp;
-+	unsigned long stack_bottom;
-+	int pid;
-+	struct ring_node *rr;
-+
-+	pid = current->pid;
-+	stack_bottom = (unsigned long) task_stack_page(current);
-+	asm volatile("ori r15, 0, %0" : "=r" (sp));
-+	rr = event_ring + event_ptr;
-+	rr->evt = evt;
-+	rr->ret_addr = ret_addr;
-+	rr->event = event;
-+	rr->tra = tra;
-+	rr->pid = pid;
-+	rr->sp = sp;
-+	rr->pc = regs->pc;
-+
-+	if (sp < stack_bottom + 3092) {
-+		printk("evt_debug : stack underflow report\n");
-+		int i, j;
-+		for (j=0, i = event_ptr; j<16; j++) {
-+			rr = event_ring + i;
-+			printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n",
-+				rr->evt, rr->event, rr->tra, rr->pid, rr->sp, rr->pc);
-+			i--;
-+			i &= 15;
-+		}
-+		panic("STACK UNDERFLOW\n");
-+	}
-+
-+	event_ptr = (event_ptr + 1) & 15;
++	printk("prot is 0x%08lx\n",pgprot_val(prot));
 +
-+	if ((event == 2) && (evt == 0x160)) {
-+		if (syscallno < NUM_SYSCALL_INFO_ENTRIES) {
-+			/* Store the syscall information to print later.  We
-+			 * can't print this now - currently we're running with
-+			 * SR.BL=1, so we can't take a tlbmiss (which could occur
-+			 * in the console drivers under printk).
-+			 *
-+			 * Just overwrite old entries on ring overflow - this
-+			 * is only for last-hope debugging. */
-+			stored_syscalls[syscall_next].pid = current->pid;
-+			stored_syscalls[syscall_next].syscall_number = syscallno;
-+			syscall_next++;
-+			syscall_next &= (N_STORED_SYSCALLS - 1);
-+		}
-+	}
++	printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
++	       PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
 +}
 +
-+static void drain_syscalls(void) {
-+	while (syscall_next_print != syscall_next) {
-+		printk("Task %d: %s()\n",
-+			stored_syscalls[syscall_next_print].pid,
-+			syscall_info_table[stored_syscalls[syscall_next_print].syscall_number].name);
-+			syscall_next_print++;
-+			syscall_next_print &= (N_STORED_SYSCALLS - 1);
-+	}
++static inline void print_vma(struct vm_area_struct *vma)
++{
++	printk("vma start 0x%08lx\n", vma->vm_start);
++	printk("vma end   0x%08lx\n", vma->vm_end);
++
++	print_prots(vma->vm_page_prot);
++	printk("vm_flags 0x%08lx\n", vma->vm_flags);
 +}
 +
-+void evt_debug2(unsigned int ret)
++static inline void print_task(struct task_struct *tsk)
 +{
-+	drain_syscalls();
-+	printk("Task %d: syscall returns %08x\n", current->pid, ret);
++	printk("Task pid %d\n", task_pid_nr(tsk));
 +}
 +
-+void evt_debug_ret_from_irq(struct pt_regs *regs)
++static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
 +{
-+	int pid;
-+	struct ring_node *rr;
++	pgd_t *dir;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	pte_t entry;
 +
-+	pid = current->pid;
-+	rr = event_ring + event_ptr;
-+	rr->evt = 0xffff;
-+	rr->ret_addr = 0;
-+	rr->event = 0;
-+	rr->tra = 0;
-+	rr->pid = pid;
-+	rr->pc = regs->pc;
-+	event_ptr = (event_ptr + 1) & 15;
-+}
++	dir = pgd_offset(mm, address);
++	if (pgd_none(*dir))
++		return NULL;
 +
-+void evt_debug_ret_from_exc(struct pt_regs *regs)
-+{
-+	int pid;
-+	struct ring_node *rr;
++	pud = pud_offset(dir, address);
++	if (pud_none(*pud))
++		return NULL;
 +
-+	pid = current->pid;
-+	rr = event_ring + event_ptr;
-+	rr->evt = 0xfffe;
-+	rr->ret_addr = 0;
-+	rr->event = 0;
-+	rr->tra = 0;
-+	rr->pid = pid;
-+	rr->pc = regs->pc;
-+	event_ptr = (event_ptr + 1) & 15;
-+}
++	pmd = pmd_offset(pud, address);
++	if (pmd_none(*pmd))
++		return NULL;
 +
-+#endif /* CONFIG_POOR_MANS_STRACE */
++	pte = pte_offset_kernel(pmd, address);
++	entry = *pte;
++	if (pte_none(entry) || !pte_present(entry))
++		return NULL;
 +
-+/* ======================================================================= */
++	return pte;
++}
 +
-+void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs)
++/*
++ * This routine handles page faults.  It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ */
++asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
++			      unsigned long textaccess, unsigned long address)
 +{
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	struct vm_area_struct * vma;
++	const struct exception_table_entry *fixup;
++	pte_t *pte;
++	int fault;
 +
-+	unsigned long long ah, al, bh, bl, ch, cl;
++	/* SIM
++	 * Note this is now called with interrupts still disabled
++	 * This is to cope with being called for a missing IO port
++	 * address with interrupts disabled. This should be fixed as
++	 * soon as we have a better 'fast path' miss handler.
++	 *
++	 * Plus take care how you try and debug this stuff.
++	 * For example, writing debug data to a port which you
++	 * have just faulted on is not going to work.
++	 */
 +
-+	printk("\n");
-+	printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n",
-+	       ((from) ? from : "???"), current->pid, trapnr, signr);
++	tsk = current;
++	mm = tsk->mm;
 +
-+	asm volatile ("getcon   " __EXPEVT ", %0":"=r"(ah));
-+	asm volatile ("getcon   " __EXPEVT ", %0":"=r"(al));
-+	ah = (ah) >> 32;
-+	al = (al) & 0xffffffff;
-+	asm volatile ("getcon   " __KCR1 ", %0":"=r"(bh));
-+	asm volatile ("getcon   " __KCR1 ", %0":"=r"(bl));
-+	bh = (bh) >> 32;
-+	bl = (bl) & 0xffffffff;
-+	asm volatile ("getcon   " __INTEVT ", %0":"=r"(ch));
-+	asm volatile ("getcon   " __INTEVT ", %0":"=r"(cl));
-+	ch = (ch) >> 32;
-+	cl = (cl) & 0xffffffff;
-+	printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	/* Not an IO address, so reenable interrupts */
++	local_irq_enable();
 +
-+	asm volatile ("getcon   " __PEXPEVT ", %0":"=r"(ah));
-+	asm volatile ("getcon   " __PEXPEVT ", %0":"=r"(al));
-+	ah = (ah) >> 32;
-+	al = (al) & 0xffffffff;
-+	asm volatile ("getcon   " __PSPC ", %0":"=r"(bh));
-+	asm volatile ("getcon   " __PSPC ", %0":"=r"(bl));
-+	bh = (bh) >> 32;
-+	bl = (bl) & 0xffffffff;
-+	asm volatile ("getcon   " __PSSR ", %0":"=r"(ch));
-+	asm volatile ("getcon   " __PSSR ", %0":"=r"(cl));
-+	ch = (ch) >> 32;
-+	cl = (cl) & 0xffffffff;
-+	printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	/*
++	 * If we're in an interrupt or have no user
++	 * context, we must not take the fault..
++	 */
++	if (in_atomic() || !mm)
++		goto no_context;
 +
-+	ah = (regs->pc) >> 32;
-+	al = (regs->pc) & 0xffffffff;
-+	bh = (regs->regs[18]) >> 32;
-+	bl = (regs->regs[18]) & 0xffffffff;
-+	ch = (regs->regs[15]) >> 32;
-+	cl = (regs->regs[15]) & 0xffffffff;
-+	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	/* TLB misses upon some cache flushes get done under cli() */
++	down_read(&mm->mmap_sem);
 +
-+	ah = (regs->sr) >> 32;
-+	al = (regs->sr) & 0xffffffff;
-+	asm volatile ("getcon   " __TEA ", %0":"=r"(bh));
-+	asm volatile ("getcon   " __TEA ", %0":"=r"(bl));
-+	bh = (bh) >> 32;
-+	bl = (bl) & 0xffffffff;
-+	asm volatile ("getcon   " __KCR0 ", %0":"=r"(ch));
-+	asm volatile ("getcon   " __KCR0 ", %0":"=r"(cl));
-+	ch = (ch) >> 32;
-+	cl = (cl) & 0xffffffff;
-+	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	vma = find_vma(mm, address);
 +
-+	ah = (regs->regs[0]) >> 32;
-+	al = (regs->regs[0]) & 0xffffffff;
-+	bh = (regs->regs[1]) >> 32;
-+	bl = (regs->regs[1]) & 0xffffffff;
-+	ch = (regs->regs[2]) >> 32;
-+	cl = (regs->regs[2]) & 0xffffffff;
-+	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	if (!vma) {
++#ifdef DEBUG_FAULT
++		print_task(tsk);
++		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
++		       __FUNCTION__,__LINE__,
++		       address,regs->pc,textaccess,writeaccess);
++		show_regs(regs);
++#endif
++		goto bad_area;
++	}
++	if (vma->vm_start <= address) {
++		goto good_area;
++	}
 +
-+	ah = (regs->regs[3]) >> 32;
-+	al = (regs->regs[3]) & 0xffffffff;
-+	bh = (regs->regs[4]) >> 32;
-+	bl = (regs->regs[4]) & 0xffffffff;
-+	ch = (regs->regs[5]) >> 32;
-+	cl = (regs->regs[5]) & 0xffffffff;
-+	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++	if (!(vma->vm_flags & VM_GROWSDOWN)) {
++#ifdef DEBUG_FAULT
++		print_task(tsk);
++		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
++		       __FUNCTION__,__LINE__,
++		       address,regs->pc,textaccess,writeaccess);
++		show_regs(regs);
 +
-+	ah = (regs->regs[6]) >> 32;
-+	al = (regs->regs[6]) & 0xffffffff;
-+	bh = (regs->regs[7]) >> 32;
-+	bl = (regs->regs[7]) & 0xffffffff;
-+	ch = (regs->regs[8]) >> 32;
-+	cl = (regs->regs[8]) & 0xffffffff;
-+	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
++		print_vma(vma);
++#endif
++		goto bad_area;
++	}
++	if (expand_stack(vma, address)) {
++#ifdef DEBUG_FAULT
++		print_task(tsk);
++		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
++		       __FUNCTION__,__LINE__,
++		       address,regs->pc,textaccess,writeaccess);
++		show_regs(regs);
++#endif
++		goto bad_area;
++	}
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++	if (textaccess) {
++		if (!(vma->vm_flags & VM_EXEC))
++			goto bad_area;
++	} else {
++		if (writeaccess) {
++			if (!(vma->vm_flags & VM_WRITE))
++				goto bad_area;
++		} else {
++			if (!(vma->vm_flags & VM_READ))
++				goto bad_area;
++		}
++	}
 +
-+	ah = (regs->regs[9]) >> 32;
-+	al = (regs->regs[9]) & 0xffffffff;
-+	bh = (regs->regs[10]) >> 32;
-+	bl = (regs->regs[10]) & 0xffffffff;
-+	ch = (regs->regs[11]) >> 32;
-+	cl = (regs->regs[11]) & 0xffffffff;
-+	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+	printk("....\n");
++	/*
++	 * If for any reason at all we couldn't handle the fault,
++	 * make sure we exit gracefully rather than endlessly redo
++	 * the fault.
++	 */
++survive:
++	fault = handle_mm_fault(mm, vma, address, writeaccess);
++	if (unlikely(fault & VM_FAULT_ERROR)) {
++		if (fault & VM_FAULT_OOM)
++			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGBUS)
++			goto do_sigbus;
++		BUG();
++	}
++	if (fault & VM_FAULT_MAJOR)
++		tsk->maj_flt++;
++	else
++		tsk->min_flt++;
 +
-+	ah = (regs->tregs[0]) >> 32;
-+	al = (regs->tregs[0]) & 0xffffffff;
-+	bh = (regs->tregs[1]) >> 32;
-+	bl = (regs->tregs[1]) & 0xffffffff;
-+	ch = (regs->tregs[2]) >> 32;
-+	cl = (regs->tregs[2]) & 0xffffffff;
-+	printk("T0  : %08Lx%08Lx T1  : %08Lx%08Lx T2  : %08Lx%08Lx\n",
-+	       ah, al, bh, bl, ch, cl);
-+	printk("....\n");
++	/* If we get here, the page fault has been handled.  Do the TLB refill
++	   now from the newly-setup PTE, to avoid having to fault again right
++	   away on the same instruction. */
++	pte = lookup_pte (mm, address);
++	if (!pte) {
++		/* From empirical evidence, we can get here, due to
++		   !pte_present(pte).  (e.g. if a swap-in occurs, and the page
++		   is swapped back out again before the process that wanted it
++		   gets rescheduled?) */
++		goto no_pte;
++	}
 +
-+	print_dtlb();
-+	print_itlb();
-+}
++	__do_tlb_refill(address, textaccess, pte);
 +
-+/* ======================================================================= */
++no_pte:
++
++	up_read(&mm->mmap_sem);
++	return;
 +
 +/*
-+** Depending on <base> scan the MMU, Data or Instruction side
-+** looking for a valid mapping matching Eaddr & asid.
-+** Return -1 if not found or the TLB id entry otherwise.
-+** Note: it works only for 4k pages!
-+*/
-+static unsigned long
-+lookup_mmu_side(unsigned long base, unsigned long Eaddr, unsigned long asid)
-+{
-+	regType_t pteH;
-+	unsigned long epn;
-+	int count;
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++#ifdef DEBUG_FAULT
++	printk("fault:bad area\n");
++#endif
++	up_read(&mm->mmap_sem);
 +
-+	epn = Eaddr & 0xfffff000;
++	if (user_mode(regs)) {
++		static int count=0;
++		siginfo_t info;
++		if (count < 4) {
++			/* This is really to help debug faults when starting
++			 * usermode, so only need a few */
++			count++;
++			printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
++				address, task_pid_nr(current), current->comm,
++				(unsigned long) regs->pc);
++#if 0
++			show_regs(regs);
++#endif
++		}
++		if (is_global_init(tsk)) {
++			panic("INIT had user mode bad_area\n");
++		}
++		tsk->thread.address = address;
++		tsk->thread.error_code = writeaccess;
++		info.si_signo = SIGSEGV;
++		info.si_errno = 0;
++		info.si_addr = (void *) address;
++		force_sig_info(SIGSEGV, &info, tsk);
++		return;
++	}
++
++no_context:
++#ifdef DEBUG_FAULT
++	printk("fault:No context\n");
++#endif
++	/* Are we prepared to handle this kernel fault?  */
++	fixup = search_exception_tables(regs->pc);
++	if (fixup) {
++		regs->pc = fixup->fixup;
++		return;
++	}
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ *
++ */
++	if (address < PAGE_SIZE)
++		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++	else
++		printk(KERN_ALERT "Unable to handle kernel paging request");
++	printk(" at virtual address %08lx\n", address);
++	printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
++	die("Oops", regs, writeaccess);
++	do_exit(SIGKILL);
 +
-+	for (count = 0; count < MAX_TLBs; count++, base += TLB_STEP) {
-+		pteH = getConfigReg(base);
-+		if (GET_VALID(pteH))
-+			if ((unsigned long) GET_EPN(pteH) == epn)
-+				if ((unsigned long) GET_ASID(pteH) == asid)
-+					break;
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++	if (is_global_init(current)) {
++		panic("INIT out of memory\n");
++		yield();
++		goto survive;
 +	}
-+	return ((unsigned long) ((count < MAX_TLBs) ? base : -1));
-+}
++	printk("fault:Out of memory\n");
++	up_read(&mm->mmap_sem);
++	if (is_global_init(current)) {
++		yield();
++		down_read(&mm->mmap_sem);
++		goto survive;
++	}
++	printk("VM: killing process %s\n", tsk->comm);
++	if (user_mode(regs))
++		do_group_exit(SIGKILL);
++	goto no_context;
 +
-+unsigned long lookup_dtlb(unsigned long Eaddr)
-+{
-+	unsigned long asid = get_asid();
-+	return (lookup_mmu_side((u64) DTLB_BASE, Eaddr, asid));
-+}
++do_sigbus:
++	printk("fault:Do sigbus\n");
++	up_read(&mm->mmap_sem);
 +
-+unsigned long lookup_itlb(unsigned long Eaddr)
-+{
-+	unsigned long asid = get_asid();
-+	return (lookup_mmu_side((u64) ITLB_BASE, Eaddr, asid));
++	/*
++	 * Send a sigbus, regardless of whether we were in kernel
++	 * or user mode.
++	 */
++	tsk->thread.address = address;
++	tsk->thread.error_code = writeaccess;
++	tsk->thread.trap_no = 14;
++	force_sig(SIGBUS, tsk);
++
++	/* Kernel mode? Handle exceptions or die */
++	if (!user_mode(regs))
++		goto no_context;
 +}
 +
-+void print_page(struct page *page)
++void update_mmu_cache(struct vm_area_struct * vma,
++			unsigned long address, pte_t pte)
 +{
-+	printk("  page[%p] -> index 0x%lx,  count 0x%x,  flags 0x%lx\n",
-+	       page, page->index, page_count(page), page->flags);
-+	printk("       address_space = %p, pages =%ld\n", page->mapping,
-+	       page->mapping->nrpages);
-+
++	/*
++	 * This appears to get called once for every pte entry that gets
++	 * established => I don't think it's efficient to try refilling the
++	 * TLBs with the pages - some may not get accessed even.  Also, for
++	 * executable pages, it is impossible to determine reliably here which
++	 * TLB they should be mapped into (or both even).
++	 *
++	 * So, just do nothing here and handle faults on demand.  In the
++	 * TLBMISS handling case, the refill is now done anyway after the pte
++	 * has been fixed up, so that deals with most useful cases.
++	 */
 +}
-diff --git a/arch/sh/lib64/memcpy.c b/arch/sh/lib64/memcpy.c
-new file mode 100644
-index 0000000..fba436a
---- /dev/null
-+++ b/arch/sh/lib64/memcpy.c
-@@ -0,0 +1,81 @@
-+/*
-+ * Copyright (C) 2002 Mark Debbage (Mark.Debbage at superh.com)
-+ *
-+ * May be copied or modified under the terms of the GNU General Public
-+ * License.  See linux/COPYING for more information.
-+ *
-+ */
-+
-+#include <linux/types.h>
-+#include <asm/string.h>
 +
-+// This is a simplistic optimization of memcpy to increase the
-+// granularity of access beyond one byte using aligned
-+// loads and stores. This is not an optimal implementation
-+// for SH-5 (especially with regard to prefetching and the cache),
-+// and a better version should be provided later ...
-+
-+void *memcpy(void *dest, const void *src, size_t count)
++void local_flush_tlb_one(unsigned long asid, unsigned long page)
 +{
-+	char *d = (char *) dest, *s = (char *) src;
++	unsigned long long match, pteh=0, lpage;
++	unsigned long tlb;
 +
-+	if (count >= 32) {
-+		int i = 8 - (((unsigned long) d) & 0x7);
++	/*
++	 * Sign-extend based on neff.
++	 */
++	lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
++	match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
++	match |= lpage;
 +
-+		if (i != 8)
-+			while (i-- && count--) {
-+				*d++ = *s++;
-+			}
++	for_each_itlb_entry(tlb) {
++		asm volatile ("getcfg	%1, 0, %0"
++			      : "=r" (pteh)
++			      : "r" (tlb) );
 +
-+		if (((((unsigned long) d) & 0x7) == 0) &&
-+		    ((((unsigned long) s) & 0x7) == 0)) {
-+			while (count >= 32) {
-+				unsigned long long t1, t2, t3, t4;
-+				t1 = *(unsigned long long *) (s);
-+				t2 = *(unsigned long long *) (s + 8);
-+				t3 = *(unsigned long long *) (s + 16);
-+				t4 = *(unsigned long long *) (s + 24);
-+				*(unsigned long long *) (d) = t1;
-+				*(unsigned long long *) (d + 8) = t2;
-+				*(unsigned long long *) (d + 16) = t3;
-+				*(unsigned long long *) (d + 24) = t4;
-+				d += 32;
-+				s += 32;
-+				count -= 32;
-+			}
-+			while (count >= 8) {
-+				*(unsigned long long *) d =
-+				    *(unsigned long long *) s;
-+				d += 8;
-+				s += 8;
-+				count -= 8;
-+			}
++		if (pteh == match) {
++			__flush_tlb_slot(tlb);
++			break;
 +		}
++	}
 +
-+		if (((((unsigned long) d) & 0x3) == 0) &&
-+		    ((((unsigned long) s) & 0x3) == 0)) {
-+			while (count >= 4) {
-+				*(unsigned long *) d = *(unsigned long *) s;
-+				d += 4;
-+				s += 4;
-+				count -= 4;
-+			}
-+		}
++	for_each_dtlb_entry(tlb) {
++		asm volatile ("getcfg	%1, 0, %0"
++			      : "=r" (pteh)
++			      : "r" (tlb) );
 +
-+		if (((((unsigned long) d) & 0x1) == 0) &&
-+		    ((((unsigned long) s) & 0x1) == 0)) {
-+			while (count >= 2) {
-+				*(unsigned short *) d = *(unsigned short *) s;
-+				d += 2;
-+				s += 2;
-+				count -= 2;
-+			}
++		if (pteh == match) {
++			__flush_tlb_slot(tlb);
++			break;
 +		}
-+	}
 +
-+	while (count--) {
-+		*d++ = *s++;
 +	}
++}
 +
-+	return d;
++void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
++{
++	unsigned long flags;
++
++	if (vma->vm_mm) {
++		page &= PAGE_MASK;
++		local_irq_save(flags);
++		local_flush_tlb_one(get_asid(), page);
++		local_irq_restore(flags);
++	}
 +}
-diff --git a/arch/sh/lib64/panic.c b/arch/sh/lib64/panic.c
-new file mode 100644
-index 0000000..ff559e2
---- /dev/null
-+++ b/arch/sh/lib64/panic.c
-@@ -0,0 +1,58 @@
-+/*
-+ * Copyright (C) 2003  Richard Curnow, SuperH UK Limited
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
 +
-+#include <linux/kernel.h>
-+#include <asm/io.h>
-+#include <asm/cpu/registers.h>
++void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
++			   unsigned long end)
++{
++	unsigned long flags;
++	unsigned long long match, pteh=0, pteh_epn, pteh_low;
++	unsigned long tlb;
++	unsigned int cpu = smp_processor_id();
++	struct mm_struct *mm;
 +
-+/* THIS IS A PHYSICAL ADDRESS */
-+#define HDSP2534_ADDR (0x04002100)
++	mm = vma->vm_mm;
++	if (cpu_context(cpu, mm) == NO_CONTEXT)
++		return;
 +
-+#ifdef CONFIG_SH_CAYMAN
++	local_irq_save(flags);
 +
-+static void poor_mans_delay(void)
-+{
-+	int i;
-+	for (i = 0; i < 2500000; i++) {
-+	}		/* poor man's delay */
-+}
++	start &= PAGE_MASK;
++	end &= PAGE_MASK;
 +
-+static void show_value(unsigned long x)
-+{
-+	int i;
-+	unsigned nibble;
-+	for (i = 0; i < 8; i++) {
-+		nibble = ((x >> (i * 4)) & 0xf);
++	match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
 +
-+		ctrl_outb(nibble + ((nibble > 9) ? 55 : 48),
-+			  HDSP2534_ADDR + 0xe0 + ((7 - i) << 2));
-+	}
-+}
++	/* Flush ITLB */
++	for_each_itlb_entry(tlb) {
++		asm volatile ("getcfg	%1, 0, %0"
++			      : "=r" (pteh)
++			      : "r" (tlb) );
 +
-+#endif
++		pteh_epn = pteh & PAGE_MASK;
++		pteh_low = pteh & ~PAGE_MASK;
 +
-+void
-+panic_handler(unsigned long panicPC, unsigned long panicSSR,
-+	      unsigned long panicEXPEVT)
-+{
-+#ifdef CONFIG_SH_CAYMAN
-+	while (1) {
-+		/* This piece of code displays the PC on the LED display */
-+		show_value(panicPC);
-+		poor_mans_delay();
-+		show_value(panicSSR);
-+		poor_mans_delay();
-+		show_value(panicEXPEVT);
-+		poor_mans_delay();
++		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
++			__flush_tlb_slot(tlb);
 +	}
-+#endif
 +
-+	/* Never return from the panic handler */
-+	for (;;) ;
++	/* Flush DTLB */
++	for_each_dtlb_entry(tlb) {
++		asm volatile ("getcfg	%1, 0, %0"
++			      : "=r" (pteh)
++			      : "r" (tlb) );
 +
-+}
-diff --git a/arch/sh/lib64/udelay.c b/arch/sh/lib64/udelay.c
-new file mode 100644
-index 0000000..23c7d17
---- /dev/null
-+++ b/arch/sh/lib64/udelay.c
-@@ -0,0 +1,56 @@
-+/*
-+ * arch/sh/lib64/udelay.c
-+ *
-+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003, 2004  Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/sched.h>
-+#include <asm/param.h>
++		pteh_epn = pteh & PAGE_MASK;
++		pteh_low = pteh & ~PAGE_MASK;
 +
-+/*
-+ * Use only for very small delays (< 1 msec).
-+ *
-+ * The active part of our cycle counter is only 32-bits wide, and
-+ * we're treating the difference between two marks as signed.  On
-+ * a 1GHz box, that's about 2 seconds.
-+ */
++		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
++			__flush_tlb_slot(tlb);
++	}
 +
-+void __delay(int loops)
-+{
-+	long long dummy;
-+	__asm__ __volatile__("gettr	tr0, %1\n\t"
-+			     "pta	$+4, tr0\n\t"
-+			     "addi	%0, -1, %0\n\t"
-+			     "bne	%0, r63, tr0\n\t"
-+			     "ptabs	%1, tr0\n\t":"=r"(loops),
-+			     "=r"(dummy)
-+			     :"0"(loops));
++	local_irq_restore(flags);
 +}
 +
-+void __udelay(unsigned long long usecs, unsigned long lpj)
++void local_flush_tlb_mm(struct mm_struct *mm)
 +{
-+	usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj;
-+	__delay((long long) usecs >> 32);
-+}
++	unsigned long flags;
++	unsigned int cpu = smp_processor_id();
 +
-+void __ndelay(unsigned long long nsecs, unsigned long lpj)
-+{
-+	nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj;
-+	__delay((long long) nsecs >> 32);
++	if (cpu_context(cpu, mm) == NO_CONTEXT)
++		return;
++
++	local_irq_save(flags);
++
++	cpu_context(cpu, mm) = NO_CONTEXT;
++	if (mm == current->mm)
++		activate_context(mm, cpu);
++
++	local_irq_restore(flags);
 +}
 +
-+void udelay(unsigned long usecs)
++void local_flush_tlb_all(void)
 +{
-+	__udelay(usecs, cpu_data[raw_smp_processor_id()].loops_per_jiffy);
++	/* Invalidate all, including shared pages, excluding fixed TLBs */
++	unsigned long flags, tlb;
++
++	local_irq_save(flags);
++
++	/* Flush each ITLB entry */
++	for_each_itlb_entry(tlb)
++		__flush_tlb_slot(tlb);
++
++	/* Flush each DTLB entry */
++	for_each_dtlb_entry(tlb)
++		__flush_tlb_slot(tlb);
++
++	local_irq_restore(flags);
 +}
 +
-+void ndelay(unsigned long nsecs)
++void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 +{
-+	__ndelay(nsecs, cpu_data[raw_smp_processor_id()].loops_per_jiffy);
++        /* FIXME: Optimize this later.. */
++        flush_tlb_all();
 +}
-diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
-index 1265f20..f549b8c 100644
---- a/arch/sh/mm/Kconfig
-+++ b/arch/sh/mm/Kconfig
-@@ -1,193 +1,3 @@
+diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
+index ff07169..2581067 100644
+--- a/arch/sh/tools/mach-types
++++ b/arch/sh/tools/mach-types
+@@ -29,7 +29,6 @@ HP6XX			SH_HP6XX
+ DREAMCAST		SH_DREAMCAST
+ MPC1211			SH_MPC1211
+ SNAPGEAR		SH_SECUREEDGE5410
+-HS7751RVOIP		SH_HS7751RVOIP
+ EDOSK7705		SH_EDOSK7705
+ SH4202_MICRODEV		SH_SH4202_MICRODEV
+ SH03			SH_SH03
+@@ -45,3 +44,4 @@ X3PROTO			SH_X3PROTO
+ MAGICPANELR2		SH_MAGIC_PANEL_R2
+ R2D_PLUS		RTS7751R2D_PLUS
+ R2D_1			RTS7751R2D_1
++CAYMAN			SH_CAYMAN
+diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig
+deleted file mode 100644
+index 6884d5a..0000000
+--- a/arch/sh64/Kconfig
++++ /dev/null
+@@ -1,295 +0,0 @@
+-#
+-# For a description of the syntax of this configuration file,
+-# see Documentation/kbuild/kconfig-language.txt.
+-#
+-
+-mainmenu "Linux/SH64 Kernel Configuration"
+-
+-config SUPERH
+-	bool
+-	default y
+-
+-config SUPERH64
+-	bool
+-	default y
+-
+-config MMU
+-	bool
+-	default y
+-
+-config QUICKLIST
+-	def_bool y
+-
+-config RWSEM_GENERIC_SPINLOCK
+-	bool
+-	default y
+-
+-config GENERIC_FIND_NEXT_BIT
+-	bool
+-	default y
+-
+-config GENERIC_HWEIGHT
+-	bool
+-	default y
+-
+-config GENERIC_CALIBRATE_DELAY
+-	bool
+-	default y
+-
+-config GENERIC_HARDIRQS
+-	bool
+-	default y
+-
+-config GENERIC_IRQ_PROBE
+-	bool
+-	default y
+-
+-config RWSEM_XCHGADD_ALGORITHM
+-	bool
+-
+-config ARCH_HAS_ILOG2_U32
+-	bool
+-	default n
+-
+-config ARCH_HAS_ILOG2_U64
+-	bool
+-	default n
+-
+-config ARCH_NO_VIRT_TO_BUS
+-	def_bool y
+-
+-source init/Kconfig
+-
+-menu "System type"
+-
+-choice
+-	prompt "SuperH system type"
+-	default SH_SIMULATOR
+-
+-config SH_SIMULATOR
+-	bool "Simulator"
+-
+-config SH_CAYMAN
+-	bool "Cayman"
+-
+-config SH_HARP
+-	bool "ST50-Harp"
+-
+-endchoice
+-
+-choice
+-	prompt "Processor family"
+-	default CPU_SH5
+-
+-config CPU_SH5
+-	bool "SH-5"
+-
+-endchoice
+-
+-choice
+-	prompt "Processor type"
+-
+-config CPU_SUBTYPE_SH5_101
+-	bool "SH5-101"
+-	depends on CPU_SH5
+-
+-config CPU_SUBTYPE_SH5_103
+-	bool "SH5-103"
+-	depends on CPU_SH5
+-
+-endchoice
+-
+-choice
+-	prompt "Endianness"
+-	default LITTLE_ENDIAN
+-
+-config LITTLE_ENDIAN
+-	bool "Little-Endian"
+-
+-config BIG_ENDIAN
+-	bool "Big-Endian"
+-
+-endchoice
+-
+-config SH_FPU
+-	bool "FPU support"
+-	default y
+-
+-config SH64_FPU_DENORM_FLUSH
+-	depends on SH_FPU
+-	bool "Flush floating point denorms to zero"
+-
+-choice
+-	prompt "Page table levels"
+-	default SH64_PGTABLE_2_LEVEL
+-
+-config SH64_PGTABLE_2_LEVEL
+-	bool "2"
+-
+-config SH64_PGTABLE_3_LEVEL
+-	bool "3"
+-
+-endchoice
+-
+-choice
+-	prompt "HugeTLB page size"
+-	depends on HUGETLB_PAGE && MMU
+-	default HUGETLB_PAGE_SIZE_64K
+-
+-config HUGETLB_PAGE_SIZE_64K
+-	bool "64K"
+-
+-config HUGETLB_PAGE_SIZE_1MB
+-	bool "1MB"
+-
+-config HUGETLB_PAGE_SIZE_512MB
+-	bool "512MB"
+-
+-endchoice
+-
+-config SH64_USER_MISALIGNED_FIXUP
+-	bool "Fixup misaligned loads/stores occurring in user mode"
+-
+-comment "Memory options"
+-
+-config CACHED_MEMORY_OFFSET
+-	hex "Cached Area Offset"
+-	default "20000000"
+-
+-config MEMORY_START
+-	hex "Physical memory start address"
+-	default "80000000"
+-
+-config MEMORY_SIZE_IN_MB
+-	int "Memory size (in MB)"
+-	default "8" if SH_SIMULATOR
+-	default "64"
+-
+-comment "Cache options"
+-
+-choice
+-	prompt "DCache mode"
+-	default DCACHE_DISABLED if SH_SIMULATOR
+-	default DCACHE_WRITE_BACK
+-
+-config DCACHE_WRITE_BACK
+-	bool "Write-back"
+-	depends on !SH_SIMULATOR
+-
+-config DCACHE_WRITE_THROUGH
+-	bool "Write-through"
+-	depends on !SH_SIMULATOR
+-
+-config DCACHE_DISABLED
+-	bool "Disabled"
+-
+-endchoice
+-
+-config ICACHE_DISABLED
+-	bool "ICache Disabling"
+-
+-config PCIDEVICE_MEMORY_START
+-	hex
+-	default "C0000000"
+-
+-config DEVICE_MEMORY_START
+-	hex
+-	default "E0000000"
+-
+-config FLASH_MEMORY_START
+-	hex "Flash memory/on-chip devices start address"
+-	default "00000000"
+-
+-config PCI_BLOCK_START
+-	hex "PCI block start address"
+-	default "40000000"
+-
+-comment "CPU Subtype specific options"
+-
+-config SH64_ID2815_WORKAROUND
+-	bool "Include workaround for SH5-101 cut2 silicon defect ID2815"
+-
+-comment "Misc options"
+-
+-config HEARTBEAT
+-	bool "Heartbeat LED"
+-	depends on SH_CAYMAN
+-
+-config HDSP253_LED
+-	bool "Support for HDSP-253 LED"
+-	depends on SH_CAYMAN
+-
+-config SH_DMA
+-	tristate "DMA controller (DMAC) support"
+-
+-config PREEMPT
+-	bool "Preemptible Kernel (EXPERIMENTAL)"
+-	depends on EXPERIMENTAL
+-
+-source "mm/Kconfig"
+-
+-endmenu
+-
+-menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
+-
+-config ISA
+-	bool
+-
+-config SBUS
+-	bool
+-
+-config PCI
+-	bool "PCI support"
+-	depends on SH_CAYMAN
+-	help
+-	  Find out whether you have a PCI motherboard. PCI is the name of a
+-	  bus system, i.e. the way the CPU talks to the other stuff inside
+-	  your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+-	  VESA. If you have PCI, say Y, otherwise N.
+-
+-	  The PCI-HOWTO, available from
+-	  <http://www.tldp.org/docs.html#howto>, contains valuable
+-	  information about which PCI hardware does work under Linux and which
+-	  doesn't.
+-
+-config SH_PCIDMA_NONCOHERENT
+-	bool "Cache and PCI noncoherent"
+-	depends on PCI
+-	default y
+-	help
+-	  Enable this option if your platform does not have a CPU cache which
+-	  remains coherent with PCI DMA. It is safest to say 'Y', although you
+-	  will see better performance if you can say 'N', because the PCI DMA
+-	  code will not have to flush the CPU's caches. If you have a PCI host
+-	  bridge integrated with your SH CPU, refer carefully to the chip specs
+-	  to see if you can say 'N' here. Otherwise, leave it as 'Y'.
+-
+-source "drivers/pci/Kconfig"
+-
+-source "drivers/pcmcia/Kconfig"
+-
+-source "drivers/pci/hotplug/Kconfig"
+-
+-endmenu
+-
+-menu "Executable file formats"
+-
+-source "fs/Kconfig.binfmt"
+-
+-endmenu
+-
+-source "net/Kconfig"
+-
+-source "drivers/Kconfig"
+-
+-source "fs/Kconfig"
+-
+-source "kernel/Kconfig.instrumentation"
+-
+-source "arch/sh64/Kconfig.debug"
+-
+-source "security/Kconfig"
+-
+-source "crypto/Kconfig"
+-
+-source "lib/Kconfig"
+diff --git a/arch/sh64/Kconfig.debug b/arch/sh64/Kconfig.debug
+deleted file mode 100644
+index 05c07c4..0000000
+--- a/arch/sh64/Kconfig.debug
++++ /dev/null
+@@ -1,33 +0,0 @@
+-menu "Kernel hacking"
+-
+-source "lib/Kconfig.debug"
+-
+-config EARLY_PRINTK
+-	bool "Early SCIF console support"
+-
+-config SH64_PROC_TLB
+-	bool "Debug: report TLB fill/purge activity through /proc/tlb"
+-	depends on PROC_FS
+-
+-config SH64_PROC_ASIDS
+-	bool "Debug: report ASIDs through /proc/asids"
+-	depends on PROC_FS
+-
+-config SH64_SR_WATCH
+-	bool "Debug: set SR.WATCH to enable hardware watchpoints and trace"
+-
+-config POOR_MANS_STRACE
+-	bool "Debug: enable rudimentary strace facility"
+-	help
+-	  This option allows system calls to be traced to the console.  It also
+-	  aids in detecting kernel stack underflow.  It is useful for debugging
+-	  early-userland problems (e.g. init incurring fatal exceptions.)
+-
+-config SH_ALPHANUMERIC
+-	bool "Enable debug outputs to on-board alphanumeric display"
+-	depends on SH_CAYMAN
+-
+-config SH_NO_BSS_INIT
+-	bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)"
+-
+-endmenu
+diff --git a/arch/sh64/Makefile b/arch/sh64/Makefile
+deleted file mode 100644
+index 8dac7e1..0000000
+--- a/arch/sh64/Makefile
++++ /dev/null
+@@ -1,111 +0,0 @@
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 2000, 2001  Paolo Alberelli
+-# Copyright (C) 2003, 2004  Paul Mundt
+-#
+-# This file is included by the global makefile so that you can add your own
+-# architecture-specific flags and dependencies. Remember to do have actions
+-# for "archclean" and "archdep" for cleaning up and making dependencies for
+-# this architecture
+-#
+-
+-cpu-y				:= -mb
+-cpu-$(CONFIG_LITTLE_ENDIAN)	:= -ml
+-
+-cpu-$(CONFIG_CPU_SH5)		+= -m5-32media-nofpu
+-
+-ifdef CONFIG_LITTLE_ENDIAN
+-LDFLAGS_vmlinux		+= --defsym 'jiffies=jiffies_64'
+-LDFLAGS			+= -EL  -mshlelf32_linux
+-else
+-LDFLAGS_vmlinux		+= --defsym 'jiffies=jiffies_64+4'
+-LDFLAGS			+= -EB  -mshelf32_linux
+-endif
+-
+-# No requirements for endianess support from AFLAGS, 'as' always run through gcc
+-KBUILD_CFLAGS		+= $(cpu-y)
+-
+-LDFLAGS_vmlinux	+= --defsym phys_stext=_stext-$(CONFIG_CACHED_MEMORY_OFFSET) \
+-		  --defsym phys_stext_shmedia=phys_stext+1 \
+-		  -e phys_stext_shmedia
+-
+-OBJCOPYFLAGS	:= -O binary -R .note -R .comment -R .stab -R .stabstr -S
+-
+-#
+-# arch/sh64/defconfig never had any hope of being
+-# frequently updated, so use one that does
+-#
+-KBUILD_DEFCONFIG	:= cayman_defconfig
+-
+-KBUILD_IMAGE		:= arch/$(ARCH)/boot/zImage
+-
+-ifdef LOADADDR
+-LINKFLAGS     += -Ttext $(word 1,$(LOADADDR))
+-endif
+-
+-machine-$(CONFIG_SH_CAYMAN)	:= cayman
+-machine-$(CONFIG_SH_SIMULATOR)	:= sim
+-machine-$(CONFIG_SH_HARP)	:= harp
+-
+-head-y := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o
+-
+-core-y	+= arch/sh64/kernel/ arch/sh64/mm/
+-
+-ifneq ($(machine-y),)
+-core-y	+= arch/sh64/mach-$(machine-y)/
+-endif
+-
+-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+-libs-y	+= arch/$(ARCH)/lib/ $(LIBGCC)
+-
+-drivers-$(CONFIG_OPROFILE)	+= arch/sh64/oprofile/
+-
+-boot := arch/$(ARCH)/boot
+-
+-zImage: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+-
+-compressed: zImage
+-
+-archclean:
+-	$(Q)$(MAKE) $(clean)=$(boot)
+-
+-archprepare: arch/$(ARCH)/lib/syscalltab.h
+-
+-define filechk_gen-syscalltab
+-       (set -e; \
+-	echo "/*"; \
+-	echo " * DO NOT MODIFY."; \
+-	echo " *"; \
+-	echo " * This file was generated by arch/$(ARCH)/Makefile"; \
+-	echo " * Any changes will be reverted at build time."; \
+-	echo " */"; \
+-	echo ""; \
+-	echo "#ifndef __SYSCALLTAB_H"; \
+-	echo "#define __SYSCALLTAB_H"; \
+-	echo ""; \
+-	echo "#include <linux/kernel.h>"; \
+-	echo ""; \
+-	echo "struct syscall_info {"; \
+-	echo "	const char *name;"; \
+-	echo "} syscall_info_table[] = {"; \
+-	sed -e '/^.*\.long /!d;s//    { "/;s/\(\([^/]*\)\/\)\{1\}.*/\2/; \
+-		s/[ \t]*$$//g;s/$$/" },/;s/\("\)sys_/\1/g'; \
+-	echo "};"; \
+-	echo ""; \
+-	echo "#define NUM_SYSCALL_INFO_ENTRIES	ARRAY_SIZE(syscall_info_table)"; \
+-	echo ""; \
+-	echo "#endif /* __SYSCALLTAB_H */" )
+-endef
+-
+-arch/$(ARCH)/lib/syscalltab.h: arch/sh64/kernel/syscalls.S
+-	$(call filechk,gen-syscalltab)
+-
+-CLEAN_FILES += arch/$(ARCH)/lib/syscalltab.h
+-
+-define archhelp
+-	@echo '* zImage 	           - Compressed kernel image'
+-endef
+diff --git a/arch/sh64/boot/Makefile b/arch/sh64/boot/Makefile
+deleted file mode 100644
+index fb71087..0000000
+--- a/arch/sh64/boot/Makefile
++++ /dev/null
+@@ -1,20 +0,0 @@
+-#
+-# arch/sh64/boot/Makefile
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 2002 Stuart Menefy
+-#
+-
+-targets := zImage
+-subdir- := compressed
+-
+-$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
+-	$(call if_changed,objcopy)
+-	@echo 'Kernel: $@ is ready'
+-
+-$(obj)/compressed/vmlinux: FORCE
+-	$(Q)$(MAKE) $(build)=$(obj)/compressed $@
+-
+diff --git a/arch/sh64/boot/compressed/Makefile b/arch/sh64/boot/compressed/Makefile
+deleted file mode 100644
+index 9cd2167..0000000
+--- a/arch/sh64/boot/compressed/Makefile
++++ /dev/null
+@@ -1,46 +0,0 @@
+-#
+-# linux/arch/sh64/boot/compressed/Makefile
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 2002 Stuart Menefy
+-# Copyright (C) 2004 Paul Mundt
+-#
+-# create a compressed vmlinux image from the original vmlinux
+-#
+-
+-targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
+-		   head.o misc.o cache.o piggy.o vmlinux.lds
+-
+-EXTRA_AFLAGS	:= -traditional
+-
+-OBJECTS		:= $(obj)/head.o $(obj)/misc.o $(obj)/cache.o
+-
+-#
+-# ZIMAGE_OFFSET is the load offset of the compression loader
+-# (4M for the kernel plus 64K for this loader)
+-#
+-ZIMAGE_OFFSET = $(shell printf "0x%8x" $$[$(CONFIG_MEMORY_START)+0x400000+0x10000])
+-
+-LDFLAGS_vmlinux := -Ttext $(ZIMAGE_OFFSET) -e startup \
+-		    -T $(obj)/../../kernel/vmlinux.lds \
+-		    --no-warn-mismatch
+-
+-$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
+-	$(call if_changed,ld)
+-	@:
+-
+-$(obj)/vmlinux.bin: vmlinux FORCE
+-	$(call if_changed,objcopy)
+-
+-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+-	$(call if_changed,gzip)
+-
+-LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh64-linux -T
+-OBJCOPYFLAGS += -R .empty_zero_page
+-
+-$(obj)/piggy.o: $(obj)/vmlinux.lds $(obj)/vmlinux.bin.gz FORCE
+-	$(call if_changed,ld)
+-
+diff --git a/arch/sh64/boot/compressed/cache.c b/arch/sh64/boot/compressed/cache.c
+deleted file mode 100644
+index 7087073..0000000
+--- a/arch/sh64/boot/compressed/cache.c
++++ /dev/null
+@@ -1,39 +0,0 @@
+-/*
+- * arch/shmedia/boot/compressed/cache.c -- simple cache management functions
+- *
+- * Code extracted from sh-ipl+g, sh-stub.c, which has the copyright:
+- *
+- *   This is originally based on an m68k software stub written by Glenn
+- *   Engel at HP, but has changed quite a bit.
+- *
+- *   Modifications for the SH by Ben Lee and Steve Chamberlain
+- *
+-****************************************************************************
+-
+-		THIS SOFTWARE IS NOT COPYRIGHTED
+-
+-   HP offers the following for use in the public domain.  HP makes no
+-   warranty with regard to the software or it's performance and the
+-   user accepts the software "AS IS" with all faults.
+-
+-   HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
+-   TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+-   OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+-
+-****************************************************************************/
+-
+-#define CACHE_ENABLE      0
+-#define CACHE_DISABLE     1
+-
+-int cache_control(unsigned int command)
+-{
+-	volatile unsigned int *p = (volatile unsigned int *) 0x80000000;
+-	int i;
+-
+-	for (i = 0; i < (32 * 1024); i += 32) {
+-		(void *) *p;
+-		p += (32 / sizeof (int));
+-	}
+-
+-	return 0;
+-}
+diff --git a/arch/sh64/boot/compressed/head.S b/arch/sh64/boot/compressed/head.S
+deleted file mode 100644
+index 82040b1..0000000
+--- a/arch/sh64/boot/compressed/head.S
++++ /dev/null
+@@ -1,164 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/shmedia/boot/compressed/head.S
+- *
+- * Copied from
+- *   arch/shmedia/kernel/head.S
+- * which carried the copyright:
+- *   Copyright (C) 2000, 2001  Paolo Alberelli
+- *
+- * Modification for compressed loader:
+- *   Copyright (C) 2002 Stuart Menefy (stuart.menefy at st.com)
+- */
+-
+-#include <linux/linkage.h>
+-#include <asm/registers.h>
+-#include <asm/cache.h>
+-#include <asm/mmu_context.h>
+-
+-/*
+- * Fixed TLB entries to identity map the beginning of RAM
+- */
+-#define MMUIR_TEXT_H	0x0000000000000003 | CONFIG_MEMORY_START
+-			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+-#define MMUIR_TEXT_L	0x000000000000009a | CONFIG_MEMORY_START
+-			/* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */
+-
+-#define MMUDR_CACHED_H	0x0000000000000003 | CONFIG_MEMORY_START
+-			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+-#define MMUDR_CACHED_L	0x000000000000015a | CONFIG_MEMORY_START
+-			/* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */
+-
+-#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
+-#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
+-
+-#if 1
+-#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	/* OCE + OCI + WB */
+-#else
+-#define	OCCR0_INIT_VAL	OCCR0_OFF
+-#endif
+-#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			/* No locking */
+-
+-	.text
+-
+-	.global	startup
+-startup:
+-	/*
+-	 * Prevent speculative fetch on device memory due to
+-	 * uninitialized target registers.
+-	 * This must be executed before the first branch.
+-	 */
+-	ptabs/u	ZERO, tr0
+-	ptabs/u	ZERO, tr1
+-	ptabs/u	ZERO, tr2
+-	ptabs/u	ZERO, tr3
+-	ptabs/u	ZERO, tr4
+-	ptabs/u	ZERO, tr5
+-	ptabs/u	ZERO, tr6
+-	ptabs/u	ZERO, tr7
+-	synci
+-
+-	/*
+-	 * Set initial TLB entries for cached and uncached regions.
+-	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
+-	 */
+-	/* Clear ITLBs */
+-	pta	1f, tr1
+-	movi	ITLB_FIXED, r21
+-	movi	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
+-1:	putcfg	r21, 0, ZERO		/* Clear MMUIR[n].PTEH.V */
+-	addi	r21, TLB_STEP, r21
+-        bne	r21, r22, tr1
+-
+-	/* Clear DTLBs */
+-	pta	1f, tr1
+-	movi	DTLB_FIXED, r21
+-	movi	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
+-1:	putcfg	r21, 0, ZERO		/* Clear MMUDR[n].PTEH.V */
+-	addi	r21, TLB_STEP, r21
+-        bne	r21, r22, tr1
+-
+-	/* Map one big (512Mb) page for ITLB */
+-	movi	ITLB_FIXED, r21
+-	movi	MMUIR_TEXT_L, r22	/* PTEL first */
+-	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
+-	movi	MMUIR_TEXT_H, r22	/* PTEH last */
+-	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
+-
+-	/* Map one big CACHED (512Mb) page for DTLB */
+-	movi	DTLB_FIXED, r21
+-	movi	MMUDR_CACHED_L, r22	/* PTEL first */
+-	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
+-	movi	MMUDR_CACHED_H, r22	/* PTEH last */
+-	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
+-
+-	/* ICache */
+-	movi	ICCR_BASE, r21
+-	movi	ICCR0_INIT_VAL, r22
+-	movi	ICCR1_INIT_VAL, r23
+-	putcfg	r21, ICCR_REG0, r22
+-	putcfg	r21, ICCR_REG1, r23
+-	synci
+-
+-	/* OCache */
+-	movi	OCCR_BASE, r21
+-	movi	OCCR0_INIT_VAL, r22
+-	movi	OCCR1_INIT_VAL, r23
+-	putcfg	r21, OCCR_REG0, r22
+-	putcfg	r21, OCCR_REG1, r23
+-	synco
+-
+-	/*
+-	 * Enable the MMU.
+-	 * From here-on code can be non-PIC.
+-	 */
+-	movi	SR_HARMLESS | SR_ENABLE_MMU, r22
+-	putcon	r22, SSR
+-	movi	1f, r22
+-	putcon	r22, SPC
+-	synco
+-	rte				/* And now go into the hyperspace ... */
+-1:					/* ... that's the next instruction ! */
+-
+-	/* Set initial stack pointer */
+-	movi	datalabel stack_start, r0
+-	ld.l	r0, 0, r15
+-
+-	/*
+-	 * Clear bss
+-	 */
+-	pt	1f, tr1
+-	movi	datalabel __bss_start, r22
+-	movi	datalabel _end, r23
+-1:	st.l	r22, 0, ZERO
+-	addi	r22, 4, r22
+-	bne	r22, r23, tr1
+-
+-	/*
+-	 * Decompress the kernel.
+-	 */
+-	pt	decompress_kernel, tr0
+-	blink	tr0, r18
+-
+-	/*
+-	 * Disable the MMU.
+-	 */
+-	movi	SR_HARMLESS, r22
+-	putcon	r22, SSR
+-	movi	1f, r22
+-	putcon	r22, SPC
+-	synco
+-	rte				/* And now go into the hyperspace ... */
+-1:					/* ... that's the next instruction ! */
+-
+-	/* Jump into the decompressed kernel */
+-	movi	datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19
+-	ptabs	r19, tr0
+-	blink	tr0, r18
+-
+-	/* Shouldn't return here, but just in case, loop forever */
+-	pt	1f, tr0
+-1:	blink	tr0, ZERO
+diff --git a/arch/sh64/boot/compressed/install.sh b/arch/sh64/boot/compressed/install.sh
+deleted file mode 100644
+index 90589f0..0000000
+--- a/arch/sh64/boot/compressed/install.sh
++++ /dev/null
+@@ -1,56 +0,0 @@
+-#!/bin/sh
+-#
+-# arch/sh/boot/install.sh
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 1995 by Linus Torvalds
+-#
+-# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+-# Adapted from code in arch/i386/boot/install.sh by Russell King
+-# Adapted from code in arch/arm/boot/install.sh by Stuart Menefy
+-#
+-# "make install" script for sh architecture
+-#
+-# Arguments:
+-#   $1 - kernel version
+-#   $2 - kernel image file
+-#   $3 - kernel map file
+-#   $4 - default install path (blank if root directory)
+-#
+-
+-# User may have a custom install script
+-
+-if [ -x /sbin/installkernel ]; then
+-  exec /sbin/installkernel "$@"
+-fi
+-
+-if [ "$2" = "zImage" ]; then
+-# Compressed install
+-  echo "Installing compressed kernel"
+-  if [ -f $4/vmlinuz-$1 ]; then
+-    mv $4/vmlinuz-$1 $4/vmlinuz.old
+-  fi
+-
+-  if [ -f $4/System.map-$1 ]; then
+-    mv $4/System.map-$1 $4/System.old
+-  fi
+-
+-  cat $2 > $4/vmlinuz-$1
+-  cp $3 $4/System.map-$1
+-else
+-# Normal install
+-  echo "Installing normal kernel"
+-  if [ -f $4/vmlinux-$1 ]; then
+-    mv $4/vmlinux-$1 $4/vmlinux.old
+-  fi
+-
+-  if [ -f $4/System.map ]; then
+-    mv $4/System.map $4/System.old
+-  fi
+-
+-  cat $2 > $4/vmlinux-$1
+-  cp $3 $4/System.map
+-fi
+diff --git a/arch/sh64/boot/compressed/misc.c b/arch/sh64/boot/compressed/misc.c
+deleted file mode 100644
+index aea00c5..0000000
+--- a/arch/sh64/boot/compressed/misc.c
++++ /dev/null
+@@ -1,250 +0,0 @@
+-/*
+- * arch/sh64/boot/compressed/misc.c
+- *
+- * This is a collection of several routines from gzip-1.0.3
+- * adapted for Linux.
+- *
+- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+- *
+- * Adapted for SHmedia from sh by Stuart Menefy, May 2002
+- */
+-
+-#include <asm/uaccess.h>
+-
+-/* cache.c */
+-#define CACHE_ENABLE      0
+-#define CACHE_DISABLE     1
+-int cache_control(unsigned int command);
+-
+-/*
+- * gzip declarations
+- */
+-
+-#define OF(args)  args
+-#define STATIC static
+-
+-#undef memset
+-#undef memcpy
+-#define memzero(s, n)     memset ((s), 0, (n))
+-
+-typedef unsigned char uch;
+-typedef unsigned short ush;
+-typedef unsigned long ulg;
+-
+-#define WSIZE 0x8000		/* Window size must be at least 32k, */
+-				/* and a power of two */
+-
+-static uch *inbuf;		/* input buffer */
+-static uch window[WSIZE];	/* Sliding window buffer */
+-
+-static unsigned insize = 0;	/* valid bytes in inbuf */
+-static unsigned inptr = 0;	/* index of next byte to be processed in inbuf */
+-static unsigned outcnt = 0;	/* bytes in output buffer */
+-
+-/* gzip flag byte */
+-#define ASCII_FLAG   0x01	/* bit 0 set: file probably ASCII text */
+-#define CONTINUATION 0x02	/* bit 1 set: continuation of multi-part gzip file */
+-#define EXTRA_FIELD  0x04	/* bit 2 set: extra field present */
+-#define ORIG_NAME    0x08	/* bit 3 set: original file name present */
+-#define COMMENT      0x10	/* bit 4 set: file comment present */
+-#define ENCRYPTED    0x20	/* bit 5 set: file is encrypted */
+-#define RESERVED     0xC0	/* bit 6,7:   reserved */
+-
+-#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+-
+-/* Diagnostic functions */
+-#ifdef DEBUG
+-#  define Assert(cond,msg) {if(!(cond)) error(msg);}
+-#  define Trace(x) fprintf x
+-#  define Tracev(x) {if (verbose) fprintf x ;}
+-#  define Tracevv(x) {if (verbose>1) fprintf x ;}
+-#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+-#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+-#else
+-#  define Assert(cond,msg)
+-#  define Trace(x)
+-#  define Tracev(x)
+-#  define Tracevv(x)
+-#  define Tracec(c,x)
+-#  define Tracecv(c,x)
+-#endif
+-
+-static int fill_inbuf(void);
+-static void flush_window(void);
+-static void error(char *m);
+-static void gzip_mark(void **);
+-static void gzip_release(void **);
+-
+-extern char input_data[];
+-extern int input_len;
+-
+-static long bytes_out = 0;
+-static uch *output_data;
+-static unsigned long output_ptr = 0;
+-
+-static void *malloc(int size);
+-static void free(void *where);
+-static void error(char *m);
+-static void gzip_mark(void **);
+-static void gzip_release(void **);
+-
+-static void puts(const char *);
+-
+-extern int _text;		/* Defined in vmlinux.lds.S */
+-extern int _end;
+-static unsigned long free_mem_ptr;
+-static unsigned long free_mem_end_ptr;
+-
+-#define HEAP_SIZE             0x10000
+-
+-#include "../../../../lib/inflate.c"
+-
+-static void *malloc(int size)
+-{
+-	void *p;
+-
+-	if (size < 0)
+-		error("Malloc error\n");
+-	if (free_mem_ptr == 0)
+-		error("Memory error\n");
+-
+-	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
+-
+-	p = (void *) free_mem_ptr;
+-	free_mem_ptr += size;
+-
+-	if (free_mem_ptr >= free_mem_end_ptr)
+-		error("\nOut of memory\n");
+-
+-	return p;
+-}
+-
+-static void free(void *where)
+-{				/* Don't care */
+-}
+-
+-static void gzip_mark(void **ptr)
+-{
+-	*ptr = (void *) free_mem_ptr;
+-}
+-
+-static void gzip_release(void **ptr)
+-{
+-	free_mem_ptr = (long) *ptr;
+-}
+-
+-void puts(const char *s)
+-{
+-}
+-
+-void *memset(void *s, int c, size_t n)
+-{
+-	int i;
+-	char *ss = (char *) s;
+-
+-	for (i = 0; i < n; i++)
+-		ss[i] = c;
+-	return s;
+-}
+-
+-void *memcpy(void *__dest, __const void *__src, size_t __n)
+-{
+-	int i;
+-	char *d = (char *) __dest, *s = (char *) __src;
+-
+-	for (i = 0; i < __n; i++)
+-		d[i] = s[i];
+-	return __dest;
+-}
+-
+-/* ===========================================================================
+- * Fill the input buffer. This is called only when the buffer is empty
+- * and at least one byte is really needed.
+- */
+-static int fill_inbuf(void)
+-{
+-	if (insize != 0) {
+-		error("ran out of input data\n");
+-	}
+-
+-	inbuf = input_data;
+-	insize = input_len;
+-	inptr = 1;
+-	return inbuf[0];
+-}
+-
+-/* ===========================================================================
+- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+- * (Used for the decompressed data only.)
+- */
+-static void flush_window(void)
+-{
+-	ulg c = crc;		/* temporary variable */
+-	unsigned n;
+-	uch *in, *out, ch;
+-
+-	in = window;
+-	out = &output_data[output_ptr];
+-	for (n = 0; n < outcnt; n++) {
+-		ch = *out++ = *in++;
+-		c = crc_32_tab[((int) c ^ ch) & 0xff] ^ (c >> 8);
+-	}
+-	crc = c;
+-	bytes_out += (ulg) outcnt;
+-	output_ptr += (ulg) outcnt;
+-	outcnt = 0;
+-	puts(".");
+-}
+-
+-static void error(char *x)
+-{
+-	puts("\n\n");
+-	puts(x);
+-	puts("\n\n -- System halted");
+-
+-	while (1) ;		/* Halt */
+-}
+-
+-#define STACK_SIZE (4096)
+-long __attribute__ ((aligned(8))) user_stack[STACK_SIZE];
+-long *stack_start = &user_stack[STACK_SIZE];
+-
+-void decompress_kernel(void)
+-{
+-	output_data = (uch *) (CONFIG_MEMORY_START + 0x2000);
+-	free_mem_ptr = (unsigned long) &_end;
+-	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+-
+-	makecrc();
+-	puts("Uncompressing Linux... ");
+-	cache_control(CACHE_ENABLE);
+-	gunzip();
+-	puts("\n");
+-
+-#if 0
+-	/* When booting from ROM may want to do something like this if the
+-	 * boot loader doesn't.
+-	 */
+-
+-	/* Set up the parameters and command line */
+-	{
+-		volatile unsigned int *parambase =
+-		    (int *) (CONFIG_MEMORY_START + 0x1000);
+-
+-		parambase[0] = 0x1;	/* MOUNT_ROOT_RDONLY */
+-		parambase[1] = 0x0;	/* RAMDISK_FLAGS */
+-		parambase[2] = 0x0200;	/* ORIG_ROOT_DEV */
+-		parambase[3] = 0x0;	/* LOADER_TYPE */
+-		parambase[4] = 0x0;	/* INITRD_START */
+-		parambase[5] = 0x0;	/* INITRD_SIZE */
+-		parambase[6] = 0;
+-
+-		strcpy((char *) ((int) parambase + 0x100),
+-		       "console=ttySC0,38400");
+-	}
+-#endif
+-
+-	puts("Ok, booting the kernel.\n");
+-
+-	cache_control(CACHE_DISABLE);
+-}
+diff --git a/arch/sh64/boot/compressed/vmlinux.lds.S b/arch/sh64/boot/compressed/vmlinux.lds.S
+deleted file mode 100644
+index 59c2ef4..0000000
+--- a/arch/sh64/boot/compressed/vmlinux.lds.S
++++ /dev/null
+@@ -1,64 +0,0 @@
+-/*
+- * ld script to make compressed SuperH/shmedia Linux kernel+decompression
+- *		bootstrap
+- * Modified by Stuart Menefy from arch/sh/vmlinux.lds.S written by Niibe Yutaka
+- */
+-
+-
+-#ifdef CONFIG_LITTLE_ENDIAN
+-/* OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux") */
+-#define NOP 0x6ff0fff0
+-#else
+-/* OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64") */
+-#define NOP 0xf0fff06f
+-#endif
+-
+-OUTPUT_FORMAT("elf32-sh64-linux")
+-OUTPUT_ARCH(sh)
+-ENTRY(_start)
+-
+-#define ALIGNED_GAP(section, align) (((ADDR(section)+SIZEOF(section)+(align)-1) & ~((align)-1))-ADDR(section))
+-#define FOLLOWING(section, align) AT (LOADADDR(section) + ALIGNED_GAP(section,align))
+-
+-SECTIONS
+-{
+-  _text = .;			/* Text and read-only data */
+-
+-  .text : {
+-	*(.text)
+-	*(.text64)
+-	*(.text..SHmedia32)
+-	*(.fixup)
+-	*(.gnu.warning)
+-	} = NOP
+-  . = ALIGN(4);
+-  .rodata : { *(.rodata) }
+-
+-  /* There is no 'real' reason for eight byte alignment, four would work
+-   * as well, but gdb downloads much (*4) faster with this.
+-   */
+-  . = ALIGN(8);
+-  .image : { *(.image) }
+-  . = ALIGN(4);
+-  _etext = .;			/* End of text section */
+-
+-  .data :			/* Data */
+-	FOLLOWING(.image, 4)
+-	{
+-	_data = .;
+-	*(.data)
+-	}
+-  _data_image = LOADADDR(.data);/* Address of data section in ROM */
+-
+-  _edata = .;			/* End of data section */
+-
+-  .stack : { stack = .;  _stack = .; }
+-
+-  . = ALIGN(4);
+-  __bss_start = .;		/* BSS */
+-  .bss : {
+-	*(.bss)
+-	}
+-  . = ALIGN(4);
+-  _end = . ;
+-}
+diff --git a/arch/sh64/configs/cayman_defconfig b/arch/sh64/configs/cayman_defconfig
+deleted file mode 100644
+index 75552bb..0000000
+--- a/arch/sh64/configs/cayman_defconfig
++++ /dev/null
+@@ -1,1126 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc1
+-# Fri Nov  2 14:35:27 2007
+-#
+-CONFIG_SUPERH=y
+-CONFIG_SUPERH64=y
+-CONFIG_MMU=y
+-CONFIG_QUICKLIST=y
+-CONFIG_RWSEM_GENERIC_SPINLOCK=y
+-CONFIG_GENERIC_FIND_NEXT_BIT=y
+-CONFIG_GENERIC_HWEIGHT=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_GENERIC_IRQ_PROBE=y
+-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+-CONFIG_ARCH_NO_VIRT_TO_BUS=y
+-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+-
+-#
+-# General setup
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_LOCK_KERNEL=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-# CONFIG_SYSVIPC is not set
+-CONFIG_POSIX_MQUEUE=y
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-# CONFIG_TASKSTATS is not set
+-# CONFIG_USER_NS is not set
+-# CONFIG_AUDIT is not set
+-# CONFIG_IKCONFIG is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_CGROUPS is not set
+-CONFIG_FAIR_GROUP_SCHED=y
+-CONFIG_FAIR_USER_SCHED=y
+-# CONFIG_FAIR_CGROUP_SCHED is not set
+-CONFIG_SYSFS_DEPRECATED=y
+-# CONFIG_RELAY is not set
+-# CONFIG_BLK_DEV_INITRD is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_EMBEDDED is not set
+-CONFIG_UID16=y
+-CONFIG_SYSCTL_SYSCALL=y
+-CONFIG_KALLSYMS=y
+-# CONFIG_KALLSYMS_ALL is not set
+-# CONFIG_KALLSYMS_EXTRA_PASS is not set
+-CONFIG_HOTPLUG=y
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_ELF_CORE=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-CONFIG_ANON_INODES=y
+-CONFIG_EPOLL=y
+-CONFIG_SIGNALFD=y
+-CONFIG_EVENTFD=y
+-CONFIG_SHMEM=y
+-CONFIG_VM_EVENT_COUNTERS=y
+-CONFIG_SLAB=y
+-# CONFIG_SLUB is not set
+-# CONFIG_SLOB is not set
+-CONFIG_RT_MUTEXES=y
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-CONFIG_MODULES=y
+-CONFIG_MODULE_UNLOAD=y
+-# CONFIG_MODULE_FORCE_UNLOAD is not set
+-# CONFIG_MODVERSIONS is not set
+-# CONFIG_MODULE_SRCVERSION_ALL is not set
+-CONFIG_KMOD=y
+-CONFIG_BLOCK=y
+-# CONFIG_LBD is not set
+-# CONFIG_BLK_DEV_IO_TRACE is not set
+-# CONFIG_LSF is not set
+-# CONFIG_BLK_DEV_BSG is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-# CONFIG_DEFAULT_AS is not set
+-# CONFIG_DEFAULT_DEADLINE is not set
+-CONFIG_DEFAULT_CFQ=y
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="cfq"
+-
+-#
+-# System type
+-#
+-# CONFIG_SH_SIMULATOR is not set
+-CONFIG_SH_CAYMAN=y
+-# CONFIG_SH_HARP is not set
+-CONFIG_CPU_SH5=y
+-CONFIG_CPU_SUBTYPE_SH5_101=y
+-# CONFIG_CPU_SUBTYPE_SH5_103 is not set
+-CONFIG_LITTLE_ENDIAN=y
+-# CONFIG_BIG_ENDIAN is not set
+-CONFIG_SH_FPU=y
+-# CONFIG_SH64_FPU_DENORM_FLUSH is not set
+-CONFIG_SH64_PGTABLE_2_LEVEL=y
+-# CONFIG_SH64_PGTABLE_3_LEVEL is not set
+-CONFIG_HUGETLB_PAGE_SIZE_64K=y
+-# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+-# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
+-CONFIG_SH64_USER_MISALIGNED_FIXUP=y
+-
+-#
+-# Memory options
+-#
+-CONFIG_CACHED_MEMORY_OFFSET=0x20000000
+-CONFIG_MEMORY_START=0x80000000
+-CONFIG_MEMORY_SIZE_IN_MB=128
+-
+-#
+-# Cache options
+-#
+-CONFIG_DCACHE_WRITE_BACK=y
+-# CONFIG_DCACHE_WRITE_THROUGH is not set
+-# CONFIG_DCACHE_DISABLED is not set
+-# CONFIG_ICACHE_DISABLED is not set
+-CONFIG_PCIDEVICE_MEMORY_START=C0000000
+-CONFIG_DEVICE_MEMORY_START=E0000000
+-CONFIG_FLASH_MEMORY_START=0x00000000
+-CONFIG_PCI_BLOCK_START=0x40000000
+-
+-#
+-# CPU Subtype specific options
+-#
+-CONFIG_SH64_ID2815_WORKAROUND=y
+-
+-#
+-# Misc options
+-#
+-CONFIG_HEARTBEAT=y
+-CONFIG_HDSP253_LED=y
+-# CONFIG_SH_DMA is not set
+-CONFIG_PREEMPT=y
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-# CONFIG_RESOURCES_64BIT is not set
+-CONFIG_ZONE_DMA_FLAG=0
+-CONFIG_NR_QUICK=1
+-
+-#
+-# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+-#
+-CONFIG_PCI=y
+-CONFIG_SH_PCIDMA_NONCOHERENT=y
+-# CONFIG_ARCH_SUPPORTS_MSI is not set
+-# CONFIG_PCI_DEBUG is not set
+-# CONFIG_PCCARD is not set
+-# CONFIG_HOTPLUG_PCI is not set
+-
+-#
+-# Executable file formats
+-#
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-CONFIG_XFRM=y
+-# CONFIG_XFRM_USER is not set
+-# CONFIG_XFRM_SUB_POLICY is not set
+-# CONFIG_XFRM_MIGRATE is not set
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-# CONFIG_IP_MULTICAST is not set
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-# CONFIG_IP_PNP_DHCP is not set
+-# CONFIG_IP_PNP_BOOTP is not set
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_ARPD is not set
+-# CONFIG_SYN_COOKIES is not set
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_XFRM_TUNNEL is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_XFRM_MODE_TRANSPORT=y
+-CONFIG_INET_XFRM_MODE_TUNNEL=y
+-CONFIG_INET_XFRM_MODE_BEET=y
+-# CONFIG_INET_LRO is not set
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_CUBIC=y
+-CONFIG_DEFAULT_TCP_CONG="cubic"
+-# CONFIG_TCP_MD5SIG is not set
+-# CONFIG_IPV6 is not set
+-# CONFIG_INET6_XFRM_TUNNEL is not set
+-# CONFIG_INET6_TUNNEL is not set
+-# CONFIG_NETWORK_SECMARK is not set
+-# CONFIG_NETFILTER is not set
+-# CONFIG_IP_DCCP is not set
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_TIPC is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-# CONFIG_NET_SCHED is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_AF_RXRPC is not set
+-
+-#
+-# Wireless
+-#
+-# CONFIG_CFG80211 is not set
+-# CONFIG_WIRELESS_EXT is not set
+-# CONFIG_MAC80211 is not set
+-# CONFIG_IEEE80211 is not set
+-# CONFIG_RFKILL is not set
+-# CONFIG_NET_9P is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-# CONFIG_DEBUG_DRIVER is not set
+-# CONFIG_DEBUG_DEVRES is not set
+-# CONFIG_SYS_HYPERVISOR is not set
+-# CONFIG_CONNECTOR is not set
+-# CONFIG_MTD is not set
+-# CONFIG_PARPORT is not set
+-CONFIG_BLK_DEV=y
+-# CONFIG_BLK_CPQ_CISS_DA is not set
+-# CONFIG_BLK_DEV_DAC960 is not set
+-# CONFIG_BLK_DEV_UMEM is not set
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-# CONFIG_BLK_DEV_SX8 is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=4096
+-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+-# CONFIG_CDROM_PKTCDVD is not set
+-# CONFIG_ATA_OVER_ETH is not set
+-CONFIG_MISC_DEVICES=y
+-# CONFIG_PHANTOM is not set
+-# CONFIG_EEPROM_93CX6 is not set
+-# CONFIG_SGI_IOC4 is not set
+-# CONFIG_TIFM_CORE is not set
+-# CONFIG_IDE is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-CONFIG_SCSI=y
+-CONFIG_SCSI_DMA=y
+-# CONFIG_SCSI_TGT is not set
+-# CONFIG_SCSI_NETLINK is not set
+-CONFIG_SCSI_PROC_FS=y
+-
+-#
+-# SCSI support type (disk, tape, CD-ROM)
+-#
+-CONFIG_BLK_DEV_SD=y
+-# CONFIG_CHR_DEV_ST is not set
+-# CONFIG_CHR_DEV_OSST is not set
+-# CONFIG_BLK_DEV_SR is not set
+-# CONFIG_CHR_DEV_SG is not set
+-# CONFIG_CHR_DEV_SCH is not set
+-
+-#
+-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+-#
+-CONFIG_SCSI_MULTI_LUN=y
+-# CONFIG_SCSI_CONSTANTS is not set
+-# CONFIG_SCSI_LOGGING is not set
+-# CONFIG_SCSI_SCAN_ASYNC is not set
+-CONFIG_SCSI_WAIT_SCAN=m
+-
+-#
+-# SCSI Transports
+-#
+-CONFIG_SCSI_SPI_ATTRS=y
+-# CONFIG_SCSI_FC_ATTRS is not set
+-# CONFIG_SCSI_ISCSI_ATTRS is not set
+-# CONFIG_SCSI_SAS_LIBSAS is not set
+-# CONFIG_SCSI_SRP_ATTRS is not set
+-CONFIG_SCSI_LOWLEVEL=y
+-# CONFIG_ISCSI_TCP is not set
+-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+-# CONFIG_SCSI_3W_9XXX is not set
+-# CONFIG_SCSI_ACARD is not set
+-# CONFIG_SCSI_AACRAID is not set
+-# CONFIG_SCSI_AIC7XXX is not set
+-# CONFIG_SCSI_AIC7XXX_OLD is not set
+-# CONFIG_SCSI_AIC79XX is not set
+-# CONFIG_SCSI_AIC94XX is not set
+-# CONFIG_SCSI_ARCMSR is not set
+-# CONFIG_MEGARAID_NEWGEN is not set
+-# CONFIG_MEGARAID_LEGACY is not set
+-# CONFIG_MEGARAID_SAS is not set
+-# CONFIG_SCSI_HPTIOP is not set
+-# CONFIG_SCSI_DMX3191D is not set
+-# CONFIG_SCSI_FUTURE_DOMAIN is not set
+-# CONFIG_SCSI_IPS is not set
+-# CONFIG_SCSI_INITIO is not set
+-# CONFIG_SCSI_INIA100 is not set
+-# CONFIG_SCSI_STEX is not set
+-CONFIG_SCSI_SYM53C8XX_2=y
+-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+-CONFIG_SCSI_SYM53C8XX_MMIO=y
+-# CONFIG_SCSI_QLOGIC_1280 is not set
+-# CONFIG_SCSI_QLA_FC is not set
+-# CONFIG_SCSI_QLA_ISCSI is not set
+-# CONFIG_SCSI_LPFC is not set
+-# CONFIG_SCSI_DC395x is not set
+-# CONFIG_SCSI_DC390T is not set
+-# CONFIG_SCSI_NSP32 is not set
+-# CONFIG_SCSI_DEBUG is not set
+-# CONFIG_SCSI_SRP is not set
+-# CONFIG_ATA is not set
+-# CONFIG_MD is not set
+-# CONFIG_FUSION is not set
+-
+-#
+-# IEEE 1394 (FireWire) support
+-#
+-# CONFIG_FIREWIRE is not set
+-# CONFIG_IEEE1394 is not set
+-# CONFIG_I2O is not set
+-CONFIG_NETDEVICES=y
+-# CONFIG_NETDEVICES_MULTIQUEUE is not set
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_MACVLAN is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-# CONFIG_VETH is not set
+-# CONFIG_IP1000 is not set
+-# CONFIG_ARCNET is not set
+-# CONFIG_PHYLIB is not set
+-CONFIG_NET_ETHERNET=y
+-# CONFIG_MII is not set
+-# CONFIG_STNIC is not set
+-# CONFIG_HAPPYMEAL is not set
+-# CONFIG_SUNGEM is not set
+-# CONFIG_CASSINI is not set
+-# CONFIG_NET_VENDOR_3COM is not set
+-# CONFIG_SMC91X is not set
+-# CONFIG_SMC911X is not set
+-CONFIG_NET_TULIP=y
+-# CONFIG_DE2104X is not set
+-CONFIG_TULIP=y
+-# CONFIG_TULIP_MWI is not set
+-# CONFIG_TULIP_MMIO is not set
+-# CONFIG_TULIP_NAPI is not set
+-# CONFIG_DE4X5 is not set
+-# CONFIG_WINBOND_840 is not set
+-# CONFIG_DM9102 is not set
+-# CONFIG_ULI526X is not set
+-# CONFIG_HP100 is not set
+-# CONFIG_IBM_NEW_EMAC_ZMII is not set
+-# CONFIG_IBM_NEW_EMAC_RGMII is not set
+-# CONFIG_IBM_NEW_EMAC_TAH is not set
+-# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+-CONFIG_NET_PCI=y
+-# CONFIG_PCNET32 is not set
+-# CONFIG_AMD8111_ETH is not set
+-# CONFIG_ADAPTEC_STARFIRE is not set
+-# CONFIG_B44 is not set
+-# CONFIG_FORCEDETH is not set
+-# CONFIG_EEPRO100 is not set
+-# CONFIG_E100 is not set
+-# CONFIG_FEALNX is not set
+-# CONFIG_NATSEMI is not set
+-# CONFIG_NE2K_PCI is not set
+-# CONFIG_8139CP is not set
+-# CONFIG_8139TOO is not set
+-# CONFIG_SIS900 is not set
+-# CONFIG_EPIC100 is not set
+-# CONFIG_SUNDANCE is not set
+-# CONFIG_TLAN is not set
+-# CONFIG_VIA_RHINE is not set
+-# CONFIG_SC92031 is not set
+-CONFIG_NETDEV_1000=y
+-# CONFIG_ACENIC is not set
+-# CONFIG_DL2K is not set
+-# CONFIG_E1000 is not set
+-# CONFIG_E1000E is not set
+-# CONFIG_NS83820 is not set
+-# CONFIG_HAMACHI is not set
+-# CONFIG_YELLOWFIN is not set
+-# CONFIG_R8169 is not set
+-# CONFIG_SIS190 is not set
+-# CONFIG_SKGE is not set
+-# CONFIG_SKY2 is not set
+-# CONFIG_SK98LIN is not set
+-# CONFIG_VIA_VELOCITY is not set
+-# CONFIG_TIGON3 is not set
+-# CONFIG_BNX2 is not set
+-# CONFIG_QLA3XXX is not set
+-# CONFIG_ATL1 is not set
+-CONFIG_NETDEV_10000=y
+-# CONFIG_CHELSIO_T1 is not set
+-# CONFIG_CHELSIO_T3 is not set
+-# CONFIG_IXGBE is not set
+-# CONFIG_IXGB is not set
+-# CONFIG_S2IO is not set
+-# CONFIG_MYRI10GE is not set
+-# CONFIG_NETXEN_NIC is not set
+-# CONFIG_NIU is not set
+-# CONFIG_MLX4_CORE is not set
+-# CONFIG_TEHUTI is not set
+-# CONFIG_TR is not set
+-
+-#
+-# Wireless LAN
+-#
+-# CONFIG_WLAN_PRE80211 is not set
+-# CONFIG_WLAN_80211 is not set
+-# CONFIG_WAN is not set
+-# CONFIG_FDDI is not set
+-# CONFIG_HIPPI is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_NET_FC is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-# CONFIG_ISDN is not set
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-# CONFIG_INPUT_FF_MEMLESS is not set
+-# CONFIG_INPUT_POLLDEV is not set
+-
+-#
+-# Userland interfaces
+-#
+-CONFIG_INPUT_MOUSEDEV=y
+-CONFIG_INPUT_MOUSEDEV_PSAUX=y
+-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-CONFIG_INPUT_KEYBOARD=y
+-CONFIG_KEYBOARD_ATKBD=y
+-# CONFIG_KEYBOARD_SUNKBD is not set
+-# CONFIG_KEYBOARD_LKKBD is not set
+-# CONFIG_KEYBOARD_XTKBD is not set
+-# CONFIG_KEYBOARD_NEWTON is not set
+-# CONFIG_KEYBOARD_STOWAWAY is not set
+-CONFIG_INPUT_MOUSE=y
+-CONFIG_MOUSE_PS2=y
+-CONFIG_MOUSE_PS2_ALPS=y
+-CONFIG_MOUSE_PS2_LOGIPS2PP=y
+-CONFIG_MOUSE_PS2_SYNAPTICS=y
+-CONFIG_MOUSE_PS2_LIFEBOOK=y
+-CONFIG_MOUSE_PS2_TRACKPOINT=y
+-# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+-# CONFIG_MOUSE_SERIAL is not set
+-# CONFIG_MOUSE_APPLETOUCH is not set
+-# CONFIG_MOUSE_VSXXXAA is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TABLET is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-CONFIG_SERIO=y
+-CONFIG_SERIO_I8042=y
+-CONFIG_SERIO_SERPORT=y
+-# CONFIG_SERIO_PCIPS2 is not set
+-CONFIG_SERIO_LIBPS2=y
+-# CONFIG_SERIO_RAW is not set
+-# CONFIG_GAMEPORT is not set
+-
 -#
--# Processor families
+-# Character devices
 -#
--config CPU_SH2
--	bool
+-CONFIG_VT=y
+-CONFIG_VT_CONSOLE=y
+-CONFIG_HW_CONSOLE=y
+-# CONFIG_VT_HW_CONSOLE_BINDING is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-# CONFIG_SERIAL_8250 is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_SH_SCI=y
+-CONFIG_SERIAL_SH_SCI_NR_UARTS=2
+-CONFIG_SERIAL_SH_SCI_CONSOLE=y
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-# CONFIG_SERIAL_JSM is not set
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-# CONFIG_IPMI_HANDLER is not set
+-CONFIG_HW_RANDOM=y
+-# CONFIG_R3964 is not set
+-# CONFIG_APPLICOM is not set
+-# CONFIG_RAW_DRIVER is not set
+-# CONFIG_TCG_TPM is not set
+-CONFIG_DEVPORT=y
+-CONFIG_I2C=m
+-CONFIG_I2C_BOARDINFO=y
+-# CONFIG_I2C_CHARDEV is not set
+-
+-#
+-# I2C Algorithms
+-#
+-# CONFIG_I2C_ALGOBIT is not set
+-# CONFIG_I2C_ALGOPCF is not set
+-# CONFIG_I2C_ALGOPCA is not set
+-
+-#
+-# I2C Hardware Bus support
+-#
+-# CONFIG_I2C_ALI1535 is not set
+-# CONFIG_I2C_ALI1563 is not set
+-# CONFIG_I2C_ALI15X3 is not set
+-# CONFIG_I2C_AMD756 is not set
+-# CONFIG_I2C_AMD8111 is not set
+-# CONFIG_I2C_I801 is not set
+-# CONFIG_I2C_I810 is not set
+-# CONFIG_I2C_PIIX4 is not set
+-# CONFIG_I2C_NFORCE2 is not set
+-# CONFIG_I2C_OCORES is not set
+-# CONFIG_I2C_PARPORT_LIGHT is not set
+-# CONFIG_I2C_PROSAVAGE is not set
+-# CONFIG_I2C_SAVAGE4 is not set
+-# CONFIG_I2C_SIMTEC is not set
+-# CONFIG_I2C_SIS5595 is not set
+-# CONFIG_I2C_SIS630 is not set
+-# CONFIG_I2C_SIS96X is not set
+-# CONFIG_I2C_TAOS_EVM is not set
+-# CONFIG_I2C_STUB is not set
+-# CONFIG_I2C_VIA is not set
+-# CONFIG_I2C_VIAPRO is not set
+-# CONFIG_I2C_VOODOO3 is not set
+-
+-#
+-# Miscellaneous I2C Chip support
+-#
+-# CONFIG_SENSORS_DS1337 is not set
+-# CONFIG_SENSORS_DS1374 is not set
+-# CONFIG_DS1682 is not set
+-# CONFIG_SENSORS_EEPROM is not set
+-# CONFIG_SENSORS_PCF8574 is not set
+-# CONFIG_SENSORS_PCA9539 is not set
+-# CONFIG_SENSORS_PCF8591 is not set
+-# CONFIG_SENSORS_MAX6875 is not set
+-# CONFIG_SENSORS_TSL2550 is not set
+-# CONFIG_I2C_DEBUG_CORE is not set
+-# CONFIG_I2C_DEBUG_ALGO is not set
+-# CONFIG_I2C_DEBUG_BUS is not set
+-# CONFIG_I2C_DEBUG_CHIP is not set
+-
+-#
+-# SPI support
+-#
+-# CONFIG_SPI is not set
+-# CONFIG_SPI_MASTER is not set
+-# CONFIG_W1 is not set
+-# CONFIG_POWER_SUPPLY is not set
+-CONFIG_HWMON=y
+-# CONFIG_HWMON_VID is not set
+-# CONFIG_SENSORS_AD7418 is not set
+-# CONFIG_SENSORS_ADM1021 is not set
+-# CONFIG_SENSORS_ADM1025 is not set
+-# CONFIG_SENSORS_ADM1026 is not set
+-# CONFIG_SENSORS_ADM1029 is not set
+-# CONFIG_SENSORS_ADM1031 is not set
+-# CONFIG_SENSORS_ADM9240 is not set
+-# CONFIG_SENSORS_ADT7470 is not set
+-# CONFIG_SENSORS_ATXP1 is not set
+-# CONFIG_SENSORS_DS1621 is not set
+-# CONFIG_SENSORS_F71805F is not set
+-# CONFIG_SENSORS_F71882FG is not set
+-# CONFIG_SENSORS_F75375S is not set
+-# CONFIG_SENSORS_GL518SM is not set
+-# CONFIG_SENSORS_GL520SM is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_LM63 is not set
+-# CONFIG_SENSORS_LM75 is not set
+-# CONFIG_SENSORS_LM77 is not set
+-# CONFIG_SENSORS_LM78 is not set
+-# CONFIG_SENSORS_LM80 is not set
+-# CONFIG_SENSORS_LM83 is not set
+-# CONFIG_SENSORS_LM85 is not set
+-# CONFIG_SENSORS_LM87 is not set
+-# CONFIG_SENSORS_LM90 is not set
+-# CONFIG_SENSORS_LM92 is not set
+-# CONFIG_SENSORS_LM93 is not set
+-# CONFIG_SENSORS_MAX1619 is not set
+-# CONFIG_SENSORS_MAX6650 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_PC87427 is not set
+-# CONFIG_SENSORS_SIS5595 is not set
+-# CONFIG_SENSORS_DME1737 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_SMSC47M192 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_THMC50 is not set
+-# CONFIG_SENSORS_VIA686A is not set
+-# CONFIG_SENSORS_VT1211 is not set
+-# CONFIG_SENSORS_VT8231 is not set
+-# CONFIG_SENSORS_W83781D is not set
+-# CONFIG_SENSORS_W83791D is not set
+-# CONFIG_SENSORS_W83792D is not set
+-# CONFIG_SENSORS_W83793 is not set
+-# CONFIG_SENSORS_W83L785TS is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-# CONFIG_SENSORS_W83627EHF is not set
+-# CONFIG_HWMON_DEBUG_CHIP is not set
+-CONFIG_WATCHDOG=y
+-# CONFIG_WATCHDOG_NOWAYOUT is not set
+-
+-#
+-# Watchdog Device Drivers
+-#
+-# CONFIG_SOFT_WATCHDOG is not set
+-
+-#
+-# PCI-based Watchdog Cards
+-#
+-# CONFIG_PCIPCWATCHDOG is not set
+-# CONFIG_WDTPCI is not set
+-
+-#
+-# Sonics Silicon Backplane
+-#
+-CONFIG_SSB_POSSIBLE=y
+-# CONFIG_SSB is not set
+-
+-#
+-# Multifunction device drivers
+-#
+-# CONFIG_MFD_SM501 is not set
+-
+-#
+-# Multimedia devices
+-#
+-CONFIG_VIDEO_DEV=m
+-# CONFIG_VIDEO_V4L1 is not set
+-# CONFIG_VIDEO_V4L1_COMPAT is not set
+-CONFIG_VIDEO_V4L2=y
+-CONFIG_VIDEO_CAPTURE_DRIVERS=y
+-# CONFIG_VIDEO_ADV_DEBUG is not set
+-CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+-# CONFIG_VIDEO_VIVI is not set
+-# CONFIG_VIDEO_SAA5246A is not set
+-# CONFIG_VIDEO_SAA5249 is not set
+-# CONFIG_VIDEO_SAA7134 is not set
+-# CONFIG_VIDEO_HEXIUM_ORION is not set
+-# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+-# CONFIG_VIDEO_CX88 is not set
+-# CONFIG_VIDEO_CX23885 is not set
+-# CONFIG_VIDEO_CAFE_CCIC is not set
+-# CONFIG_RADIO_ADAPTERS is not set
+-CONFIG_DVB_CORE=y
+-# CONFIG_DVB_CORE_ATTACH is not set
+-CONFIG_DVB_CAPTURE_DRIVERS=y
+-
+-#
+-# Supported SAA7146 based PCI Adapters
+-#
+-
+-#
+-# Supported FlexCopII (B2C2) Adapters
+-#
+-# CONFIG_DVB_B2C2_FLEXCOP is not set
+-
+-#
+-# Supported BT878 Adapters
+-#
+-
+-#
+-# Supported Pluto2 Adapters
+-#
+-# CONFIG_DVB_PLUTO2 is not set
+-
+-#
+-# Supported DVB Frontends
+-#
+-
+-#
+-# Customise DVB Frontends
+-#
+-# CONFIG_DVB_FE_CUSTOMISE is not set
+-
+-#
+-# DVB-S (satellite) frontends
+-#
+-# CONFIG_DVB_STV0299 is not set
+-# CONFIG_DVB_CX24110 is not set
+-# CONFIG_DVB_CX24123 is not set
+-# CONFIG_DVB_TDA8083 is not set
+-# CONFIG_DVB_MT312 is not set
+-# CONFIG_DVB_VES1X93 is not set
+-# CONFIG_DVB_S5H1420 is not set
+-# CONFIG_DVB_TDA10086 is not set
+-
+-#
+-# DVB-T (terrestrial) frontends
+-#
+-# CONFIG_DVB_SP8870 is not set
+-# CONFIG_DVB_SP887X is not set
+-# CONFIG_DVB_CX22700 is not set
+-# CONFIG_DVB_CX22702 is not set
+-# CONFIG_DVB_L64781 is not set
+-# CONFIG_DVB_TDA1004X is not set
+-# CONFIG_DVB_NXT6000 is not set
+-# CONFIG_DVB_MT352 is not set
+-# CONFIG_DVB_ZL10353 is not set
+-# CONFIG_DVB_DIB3000MB is not set
+-# CONFIG_DVB_DIB3000MC is not set
+-# CONFIG_DVB_DIB7000M is not set
+-# CONFIG_DVB_DIB7000P is not set
+-
+-#
+-# DVB-C (cable) frontends
+-#
+-# CONFIG_DVB_VES1820 is not set
+-# CONFIG_DVB_TDA10021 is not set
+-# CONFIG_DVB_TDA10023 is not set
+-# CONFIG_DVB_STV0297 is not set
+-
+-#
+-# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+-#
+-# CONFIG_DVB_NXT200X is not set
+-# CONFIG_DVB_OR51211 is not set
+-# CONFIG_DVB_OR51132 is not set
+-# CONFIG_DVB_BCM3510 is not set
+-# CONFIG_DVB_LGDT330X is not set
+-# CONFIG_DVB_S5H1409 is not set
+-
+-#
+-# Tuners/PLL support
+-#
+-# CONFIG_DVB_PLL is not set
+-# CONFIG_DVB_TDA826X is not set
+-# CONFIG_DVB_TDA827X is not set
+-# CONFIG_DVB_TUNER_QT1010 is not set
+-# CONFIG_DVB_TUNER_MT2060 is not set
+-# CONFIG_DVB_TUNER_MT2266 is not set
+-# CONFIG_DVB_TUNER_MT2131 is not set
+-# CONFIG_DVB_TUNER_DIB0070 is not set
+-
+-#
+-# Miscellaneous devices
+-#
+-# CONFIG_DVB_LNBP21 is not set
+-# CONFIG_DVB_ISL6421 is not set
+-# CONFIG_DVB_TUA6100 is not set
+-CONFIG_DAB=y
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_DRM is not set
+-# CONFIG_VGASTATE is not set
+-CONFIG_VIDEO_OUTPUT_CONTROL=y
+-CONFIG_FB=y
+-CONFIG_FIRMWARE_EDID=y
+-# CONFIG_FB_DDC is not set
+-CONFIG_FB_CFB_FILLRECT=y
+-CONFIG_FB_CFB_COPYAREA=y
+-CONFIG_FB_CFB_IMAGEBLIT=y
+-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+-# CONFIG_FB_SYS_FILLRECT is not set
+-# CONFIG_FB_SYS_COPYAREA is not set
+-# CONFIG_FB_SYS_IMAGEBLIT is not set
+-# CONFIG_FB_SYS_FOPS is not set
+-CONFIG_FB_DEFERRED_IO=y
+-# CONFIG_FB_SVGALIB is not set
+-# CONFIG_FB_MACMODES is not set
+-# CONFIG_FB_BACKLIGHT is not set
+-CONFIG_FB_MODE_HELPERS=y
+-# CONFIG_FB_TILEBLITTING is not set
+-
+-#
+-# Frame buffer hardware drivers
+-#
+-# CONFIG_FB_CIRRUS is not set
+-# CONFIG_FB_PM2 is not set
+-# CONFIG_FB_CYBER2000 is not set
+-# CONFIG_FB_ASILIANT is not set
+-# CONFIG_FB_IMSTT is not set
+-# CONFIG_FB_S1D13XXX is not set
+-# CONFIG_FB_NVIDIA is not set
+-# CONFIG_FB_RIVA is not set
+-# CONFIG_FB_MATROX is not set
+-# CONFIG_FB_RADEON is not set
+-# CONFIG_FB_ATY128 is not set
+-# CONFIG_FB_ATY is not set
+-# CONFIG_FB_S3 is not set
+-# CONFIG_FB_SAVAGE is not set
+-# CONFIG_FB_SIS is not set
+-# CONFIG_FB_NEOMAGIC is not set
+-CONFIG_FB_KYRO=y
+-# CONFIG_FB_3DFX is not set
+-# CONFIG_FB_VOODOO1 is not set
+-# CONFIG_FB_VT8623 is not set
+-# CONFIG_FB_TRIDENT is not set
+-# CONFIG_FB_ARK is not set
+-# CONFIG_FB_PM3 is not set
+-# CONFIG_FB_VIRTUAL is not set
+-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-
+-#
+-# Display device support
+-#
+-# CONFIG_DISPLAY_SUPPORT is not set
+-
+-#
+-# Console display driver support
+-#
+-CONFIG_DUMMY_CONSOLE=y
+-CONFIG_FRAMEBUFFER_CONSOLE=y
+-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+-CONFIG_FONTS=y
+-# CONFIG_FONT_8x8 is not set
+-CONFIG_FONT_8x16=y
+-# CONFIG_FONT_6x11 is not set
+-# CONFIG_FONT_7x14 is not set
+-# CONFIG_FONT_PEARL_8x8 is not set
+-# CONFIG_FONT_ACORN_8x8 is not set
+-# CONFIG_FONT_MINI_4x6 is not set
+-# CONFIG_FONT_SUN8x16 is not set
+-# CONFIG_FONT_SUN12x22 is not set
+-# CONFIG_FONT_10x18 is not set
+-CONFIG_LOGO=y
+-# CONFIG_LOGO_LINUX_MONO is not set
+-# CONFIG_LOGO_LINUX_VGA16 is not set
+-# CONFIG_LOGO_LINUX_CLUT224 is not set
+-# CONFIG_LOGO_SUPERH_MONO is not set
+-# CONFIG_LOGO_SUPERH_VGA16 is not set
+-CONFIG_LOGO_SUPERH_CLUT224=y
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-CONFIG_HID_SUPPORT=y
+-CONFIG_HID=y
+-# CONFIG_HID_DEBUG is not set
+-# CONFIG_HIDRAW is not set
+-CONFIG_USB_SUPPORT=y
+-CONFIG_USB_ARCH_HAS_HCD=y
+-CONFIG_USB_ARCH_HAS_OHCI=y
+-CONFIG_USB_ARCH_HAS_EHCI=y
+-# CONFIG_USB is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-# CONFIG_MMC is not set
+-# CONFIG_NEW_LEDS is not set
+-# CONFIG_INFINIBAND is not set
+-# CONFIG_RTC_CLASS is not set
+-
+-#
+-# Userspace I/O
+-#
+-# CONFIG_UIO is not set
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-# CONFIG_EXT4DEV_FS is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_GFS2_FS is not set
+-# CONFIG_OCFS2_FS is not set
+-CONFIG_MINIX_FS=y
+-CONFIG_ROMFS_FS=y
+-CONFIG_INOTIFY=y
+-CONFIG_INOTIFY_USER=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_PROC_SYSCTL=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_POSIX_ACL is not set
+-CONFIG_HUGETLBFS=y
+-CONFIG_HUGETLB_PAGE=y
+-# CONFIG_CONFIGFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-CONFIG_NETWORK_FILESYSTEMS=y
+-CONFIG_NFS_FS=y
+-CONFIG_NFS_V3=y
+-# CONFIG_NFS_V3_ACL is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_LOCKD_V4=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-# CONFIG_SUNRPC_BIND34 is not set
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-CONFIG_MSDOS_PARTITION=y
+-# CONFIG_BSD_DISKLABEL is not set
+-# CONFIG_MINIX_SUBPARTITION is not set
+-# CONFIG_SOLARIS_X86_PARTITION is not set
+-# CONFIG_UNIXWARE_DISKLABEL is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_KARMA_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-# CONFIG_SYSV68_PARTITION is not set
+-# CONFIG_NLS is not set
+-# CONFIG_DLM is not set
+-CONFIG_INSTRUMENTATION=y
+-# CONFIG_PROFILING is not set
+-# CONFIG_MARKERS is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-CONFIG_ENABLE_WARN_DEPRECATED=y
+-CONFIG_ENABLE_MUST_CHECK=y
+-CONFIG_MAGIC_SYSRQ=y
+-# CONFIG_UNUSED_SYMBOLS is not set
+-CONFIG_DEBUG_FS=y
+-# CONFIG_HEADERS_CHECK is not set
+-CONFIG_DEBUG_KERNEL=y
+-# CONFIG_DEBUG_SHIRQ is not set
+-CONFIG_DETECT_SOFTLOCKUP=y
+-CONFIG_SCHED_DEBUG=y
+-CONFIG_SCHEDSTATS=y
+-# CONFIG_TIMER_STATS is not set
+-# CONFIG_DEBUG_SLAB is not set
+-# CONFIG_DEBUG_RT_MUTEXES is not set
+-# CONFIG_RT_MUTEX_TESTER is not set
+-# CONFIG_DEBUG_SPINLOCK is not set
+-# CONFIG_DEBUG_MUTEXES is not set
+-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+-# CONFIG_DEBUG_KOBJECT is not set
+-CONFIG_DEBUG_BUGVERBOSE=y
+-# CONFIG_DEBUG_INFO is not set
+-# CONFIG_DEBUG_VM is not set
+-# CONFIG_DEBUG_LIST is not set
+-# CONFIG_DEBUG_SG is not set
+-CONFIG_FRAME_POINTER=y
+-CONFIG_FORCED_INLINING=y
+-# CONFIG_BOOT_PRINTK_DELAY is not set
+-# CONFIG_RCU_TORTURE_TEST is not set
+-# CONFIG_FAULT_INJECTION is not set
+-# CONFIG_SAMPLES is not set
+-# CONFIG_EARLY_PRINTK is not set
+-CONFIG_SH64_PROC_TLB=y
+-CONFIG_SH64_PROC_ASIDS=y
+-CONFIG_SH64_SR_WATCH=y
+-# CONFIG_POOR_MANS_STRACE is not set
+-# CONFIG_SH_ALPHANUMERIC is not set
+-# CONFIG_SH_NO_BSS_INIT is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Library routines
+-#
+-CONFIG_BITREVERSE=y
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-# CONFIG_CRC_ITU_T is not set
+-CONFIG_CRC32=y
+-# CONFIG_CRC7 is not set
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_PLIST=y
+-CONFIG_HAS_IOMEM=y
+-CONFIG_HAS_IOPORT=y
+-CONFIG_HAS_DMA=y
+diff --git a/arch/sh64/configs/harp_defconfig b/arch/sh64/configs/harp_defconfig
+deleted file mode 100644
+index ba302cd..0000000
+--- a/arch/sh64/configs/harp_defconfig
++++ /dev/null
+@@ -1,745 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc1
+-# Fri Nov  2 14:35:57 2007
+-#
+-CONFIG_SUPERH=y
+-CONFIG_SUPERH64=y
+-CONFIG_MMU=y
+-CONFIG_QUICKLIST=y
+-CONFIG_RWSEM_GENERIC_SPINLOCK=y
+-CONFIG_GENERIC_FIND_NEXT_BIT=y
+-CONFIG_GENERIC_HWEIGHT=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_GENERIC_IRQ_PROBE=y
+-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+-CONFIG_ARCH_NO_VIRT_TO_BUS=y
+-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+-
+-#
+-# General setup
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_LOCK_KERNEL=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-# CONFIG_SYSVIPC is not set
+-CONFIG_POSIX_MQUEUE=y
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-# CONFIG_TASKSTATS is not set
+-# CONFIG_USER_NS is not set
+-# CONFIG_AUDIT is not set
+-# CONFIG_IKCONFIG is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_CGROUPS is not set
+-CONFIG_FAIR_GROUP_SCHED=y
+-CONFIG_FAIR_USER_SCHED=y
+-# CONFIG_FAIR_CGROUP_SCHED is not set
+-CONFIG_SYSFS_DEPRECATED=y
+-# CONFIG_RELAY is not set
+-# CONFIG_BLK_DEV_INITRD is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_EMBEDDED is not set
+-CONFIG_UID16=y
+-CONFIG_SYSCTL_SYSCALL=y
+-CONFIG_KALLSYMS=y
+-# CONFIG_KALLSYMS_ALL is not set
+-# CONFIG_KALLSYMS_EXTRA_PASS is not set
+-CONFIG_HOTPLUG=y
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_ELF_CORE=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-CONFIG_ANON_INODES=y
+-CONFIG_EPOLL=y
+-CONFIG_SIGNALFD=y
+-CONFIG_EVENTFD=y
+-CONFIG_SHMEM=y
+-CONFIG_VM_EVENT_COUNTERS=y
+-CONFIG_SLAB=y
+-# CONFIG_SLUB is not set
+-# CONFIG_SLOB is not set
+-CONFIG_RT_MUTEXES=y
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-# CONFIG_MODULES is not set
+-CONFIG_BLOCK=y
+-# CONFIG_LBD is not set
+-# CONFIG_BLK_DEV_IO_TRACE is not set
+-# CONFIG_LSF is not set
+-# CONFIG_BLK_DEV_BSG is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-# CONFIG_DEFAULT_AS is not set
+-# CONFIG_DEFAULT_DEADLINE is not set
+-CONFIG_DEFAULT_CFQ=y
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="cfq"
+-
+-#
+-# System type
+-#
+-# CONFIG_SH_SIMULATOR is not set
+-# CONFIG_SH_CAYMAN is not set
+-CONFIG_SH_HARP=y
+-CONFIG_CPU_SH5=y
+-CONFIG_CPU_SUBTYPE_SH5_101=y
+-# CONFIG_CPU_SUBTYPE_SH5_103 is not set
+-CONFIG_LITTLE_ENDIAN=y
+-# CONFIG_BIG_ENDIAN is not set
+-CONFIG_SH_FPU=y
+-# CONFIG_SH64_FPU_DENORM_FLUSH is not set
+-CONFIG_SH64_PGTABLE_2_LEVEL=y
+-# CONFIG_SH64_PGTABLE_3_LEVEL is not set
+-CONFIG_HUGETLB_PAGE_SIZE_64K=y
+-# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+-# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
+-CONFIG_SH64_USER_MISALIGNED_FIXUP=y
+-
+-#
+-# Memory options
+-#
+-CONFIG_CACHED_MEMORY_OFFSET=0x20000000
+-CONFIG_MEMORY_START=0x80000000
+-CONFIG_MEMORY_SIZE_IN_MB=128
+-
+-#
+-# Cache options
+-#
+-CONFIG_DCACHE_WRITE_BACK=y
+-# CONFIG_DCACHE_WRITE_THROUGH is not set
+-# CONFIG_DCACHE_DISABLED is not set
+-# CONFIG_ICACHE_DISABLED is not set
+-CONFIG_PCIDEVICE_MEMORY_START=C0000000
+-CONFIG_DEVICE_MEMORY_START=E0000000
+-CONFIG_FLASH_MEMORY_START=0x00000000
+-CONFIG_PCI_BLOCK_START=0x40000000
+-
+-#
+-# CPU Subtype specific options
+-#
+-CONFIG_SH64_ID2815_WORKAROUND=y
+-
+-#
+-# Misc options
+-#
+-# CONFIG_SH_DMA is not set
+-CONFIG_PREEMPT=y
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-# CONFIG_RESOURCES_64BIT is not set
+-CONFIG_ZONE_DMA_FLAG=0
+-CONFIG_NR_QUICK=1
+-
+-#
+-# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+-#
+-# CONFIG_ARCH_SUPPORTS_MSI is not set
+-# CONFIG_PCCARD is not set
+-
+-#
+-# Executable file formats
+-#
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-
+-#
+-# Networking
+-#
+-CONFIG_NET=y
+-
+-#
+-# Networking options
+-#
+-CONFIG_PACKET=y
+-# CONFIG_PACKET_MMAP is not set
+-CONFIG_UNIX=y
+-CONFIG_XFRM=y
+-# CONFIG_XFRM_USER is not set
+-# CONFIG_XFRM_SUB_POLICY is not set
+-# CONFIG_XFRM_MIGRATE is not set
+-# CONFIG_NET_KEY is not set
+-CONFIG_INET=y
+-# CONFIG_IP_MULTICAST is not set
+-# CONFIG_IP_ADVANCED_ROUTER is not set
+-CONFIG_IP_FIB_HASH=y
+-CONFIG_IP_PNP=y
+-# CONFIG_IP_PNP_DHCP is not set
+-# CONFIG_IP_PNP_BOOTP is not set
+-# CONFIG_IP_PNP_RARP is not set
+-# CONFIG_NET_IPIP is not set
+-# CONFIG_NET_IPGRE is not set
+-# CONFIG_ARPD is not set
+-# CONFIG_SYN_COOKIES is not set
+-# CONFIG_INET_AH is not set
+-# CONFIG_INET_ESP is not set
+-# CONFIG_INET_IPCOMP is not set
+-# CONFIG_INET_XFRM_TUNNEL is not set
+-# CONFIG_INET_TUNNEL is not set
+-CONFIG_INET_XFRM_MODE_TRANSPORT=y
+-CONFIG_INET_XFRM_MODE_TUNNEL=y
+-CONFIG_INET_XFRM_MODE_BEET=y
+-# CONFIG_INET_LRO is not set
+-CONFIG_INET_DIAG=y
+-CONFIG_INET_TCP_DIAG=y
+-# CONFIG_TCP_CONG_ADVANCED is not set
+-CONFIG_TCP_CONG_CUBIC=y
+-CONFIG_DEFAULT_TCP_CONG="cubic"
+-# CONFIG_TCP_MD5SIG is not set
+-# CONFIG_IPV6 is not set
+-# CONFIG_INET6_XFRM_TUNNEL is not set
+-# CONFIG_INET6_TUNNEL is not set
+-# CONFIG_NETWORK_SECMARK is not set
+-# CONFIG_NETFILTER is not set
+-# CONFIG_IP_DCCP is not set
+-# CONFIG_IP_SCTP is not set
+-# CONFIG_TIPC is not set
+-# CONFIG_ATM is not set
+-# CONFIG_BRIDGE is not set
+-# CONFIG_VLAN_8021Q is not set
+-# CONFIG_DECNET is not set
+-# CONFIG_LLC2 is not set
+-# CONFIG_IPX is not set
+-# CONFIG_ATALK is not set
+-# CONFIG_X25 is not set
+-# CONFIG_LAPB is not set
+-# CONFIG_ECONET is not set
+-# CONFIG_WAN_ROUTER is not set
+-# CONFIG_NET_SCHED is not set
+-
+-#
+-# Network testing
+-#
+-# CONFIG_NET_PKTGEN is not set
+-# CONFIG_HAMRADIO is not set
+-# CONFIG_IRDA is not set
+-# CONFIG_BT is not set
+-# CONFIG_AF_RXRPC is not set
+-
+-#
+-# Wireless
+-#
+-# CONFIG_CFG80211 is not set
+-# CONFIG_WIRELESS_EXT is not set
+-# CONFIG_MAC80211 is not set
+-# CONFIG_IEEE80211 is not set
+-# CONFIG_RFKILL is not set
+-# CONFIG_NET_9P is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-# CONFIG_DEBUG_DRIVER is not set
+-# CONFIG_DEBUG_DEVRES is not set
+-# CONFIG_SYS_HYPERVISOR is not set
+-# CONFIG_CONNECTOR is not set
+-# CONFIG_MTD is not set
+-# CONFIG_PARPORT is not set
+-CONFIG_BLK_DEV=y
+-# CONFIG_BLK_DEV_COW_COMMON is not set
+-CONFIG_BLK_DEV_LOOP=y
+-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+-# CONFIG_BLK_DEV_NBD is not set
+-CONFIG_BLK_DEV_RAM=y
+-CONFIG_BLK_DEV_RAM_COUNT=16
+-CONFIG_BLK_DEV_RAM_SIZE=4096
+-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+-# CONFIG_CDROM_PKTCDVD is not set
+-# CONFIG_ATA_OVER_ETH is not set
+-CONFIG_MISC_DEVICES=y
+-# CONFIG_EEPROM_93CX6 is not set
+-# CONFIG_IDE is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-CONFIG_SCSI=y
+-CONFIG_SCSI_DMA=y
+-# CONFIG_SCSI_TGT is not set
+-# CONFIG_SCSI_NETLINK is not set
+-CONFIG_SCSI_PROC_FS=y
+-
+-#
+-# SCSI support type (disk, tape, CD-ROM)
+-#
+-CONFIG_BLK_DEV_SD=y
+-# CONFIG_CHR_DEV_ST is not set
+-# CONFIG_CHR_DEV_OSST is not set
+-# CONFIG_BLK_DEV_SR is not set
+-# CONFIG_CHR_DEV_SG is not set
+-# CONFIG_CHR_DEV_SCH is not set
+-
+-#
+-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+-#
+-CONFIG_SCSI_MULTI_LUN=y
+-# CONFIG_SCSI_CONSTANTS is not set
+-# CONFIG_SCSI_LOGGING is not set
+-# CONFIG_SCSI_SCAN_ASYNC is not set
+-
+-#
+-# SCSI Transports
+-#
+-CONFIG_SCSI_SPI_ATTRS=y
+-# CONFIG_SCSI_FC_ATTRS is not set
+-# CONFIG_SCSI_ISCSI_ATTRS is not set
+-# CONFIG_SCSI_SAS_LIBSAS is not set
+-# CONFIG_SCSI_SRP_ATTRS is not set
+-CONFIG_SCSI_LOWLEVEL=y
+-# CONFIG_ISCSI_TCP is not set
+-# CONFIG_SCSI_DEBUG is not set
+-# CONFIG_ATA is not set
+-# CONFIG_MD is not set
+-CONFIG_NETDEVICES=y
+-# CONFIG_NETDEVICES_MULTIQUEUE is not set
+-# CONFIG_DUMMY is not set
+-# CONFIG_BONDING is not set
+-# CONFIG_MACVLAN is not set
+-# CONFIG_EQUALIZER is not set
+-# CONFIG_TUN is not set
+-# CONFIG_VETH is not set
+-# CONFIG_PHYLIB is not set
+-CONFIG_NET_ETHERNET=y
+-# CONFIG_MII is not set
+-# CONFIG_STNIC is not set
+-# CONFIG_SMC91X is not set
+-# CONFIG_SMC911X is not set
+-# CONFIG_IBM_NEW_EMAC_ZMII is not set
+-# CONFIG_IBM_NEW_EMAC_RGMII is not set
+-# CONFIG_IBM_NEW_EMAC_TAH is not set
+-# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+-# CONFIG_B44 is not set
+-CONFIG_NETDEV_1000=y
+-CONFIG_NETDEV_10000=y
+-
+-#
+-# Wireless LAN
+-#
+-# CONFIG_WLAN_PRE80211 is not set
+-# CONFIG_WLAN_80211 is not set
+-# CONFIG_WAN is not set
+-# CONFIG_PPP is not set
+-# CONFIG_SLIP is not set
+-# CONFIG_SHAPER is not set
+-# CONFIG_NETCONSOLE is not set
+-# CONFIG_NETPOLL is not set
+-# CONFIG_NET_POLL_CONTROLLER is not set
+-# CONFIG_ISDN is not set
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-# CONFIG_INPUT_FF_MEMLESS is not set
+-# CONFIG_INPUT_POLLDEV is not set
+-
+-#
+-# Userland interfaces
+-#
+-CONFIG_INPUT_MOUSEDEV=y
+-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TABLET is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-CONFIG_VT=y
+-CONFIG_VT_CONSOLE=y
+-CONFIG_HW_CONSOLE=y
+-# CONFIG_VT_HW_CONSOLE_BINDING is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-# CONFIG_SERIAL_8250 is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_SH_SCI=y
+-CONFIG_SERIAL_SH_SCI_NR_UARTS=2
+-CONFIG_SERIAL_SH_SCI_CONSOLE=y
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_UNIX98_PTYS=y
+-CONFIG_LEGACY_PTYS=y
+-CONFIG_LEGACY_PTY_COUNT=256
+-# CONFIG_IPMI_HANDLER is not set
+-CONFIG_HW_RANDOM=y
+-# CONFIG_R3964 is not set
+-# CONFIG_RAW_DRIVER is not set
+-# CONFIG_TCG_TPM is not set
+-# CONFIG_I2C is not set
+-
+-#
+-# SPI support
+-#
+-# CONFIG_SPI is not set
+-# CONFIG_SPI_MASTER is not set
+-# CONFIG_W1 is not set
+-# CONFIG_POWER_SUPPLY is not set
+-CONFIG_HWMON=y
+-# CONFIG_HWMON_VID is not set
+-# CONFIG_SENSORS_F71805F is not set
+-# CONFIG_SENSORS_F71882FG is not set
+-# CONFIG_SENSORS_IT87 is not set
+-# CONFIG_SENSORS_PC87360 is not set
+-# CONFIG_SENSORS_PC87427 is not set
+-# CONFIG_SENSORS_SMSC47M1 is not set
+-# CONFIG_SENSORS_SMSC47B397 is not set
+-# CONFIG_SENSORS_VT1211 is not set
+-# CONFIG_SENSORS_W83627HF is not set
+-# CONFIG_SENSORS_W83627EHF is not set
+-# CONFIG_HWMON_DEBUG_CHIP is not set
+-CONFIG_WATCHDOG=y
+-# CONFIG_WATCHDOG_NOWAYOUT is not set
+-
+-#
+-# Watchdog Device Drivers
+-#
+-# CONFIG_SOFT_WATCHDOG is not set
+-
+-#
+-# Sonics Silicon Backplane
+-#
+-CONFIG_SSB_POSSIBLE=y
+-# CONFIG_SSB is not set
+-
+-#
+-# Multifunction device drivers
+-#
+-# CONFIG_MFD_SM501 is not set
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-# CONFIG_DVB_CORE is not set
+-CONFIG_DAB=y
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_VGASTATE is not set
+-CONFIG_VIDEO_OUTPUT_CONTROL=y
+-CONFIG_FB=y
+-CONFIG_FIRMWARE_EDID=y
+-# CONFIG_FB_DDC is not set
+-# CONFIG_FB_CFB_FILLRECT is not set
+-# CONFIG_FB_CFB_COPYAREA is not set
+-# CONFIG_FB_CFB_IMAGEBLIT is not set
+-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+-# CONFIG_FB_SYS_FILLRECT is not set
+-# CONFIG_FB_SYS_COPYAREA is not set
+-# CONFIG_FB_SYS_IMAGEBLIT is not set
+-# CONFIG_FB_SYS_FOPS is not set
+-CONFIG_FB_DEFERRED_IO=y
+-# CONFIG_FB_SVGALIB is not set
+-# CONFIG_FB_MACMODES is not set
+-# CONFIG_FB_BACKLIGHT is not set
+-CONFIG_FB_MODE_HELPERS=y
+-# CONFIG_FB_TILEBLITTING is not set
+-
+-#
+-# Frame buffer hardware drivers
+-#
+-# CONFIG_FB_S1D13XXX is not set
+-# CONFIG_FB_VIRTUAL is not set
+-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-
+-#
+-# Display device support
+-#
+-# CONFIG_DISPLAY_SUPPORT is not set
+-
+-#
+-# Console display driver support
+-#
+-CONFIG_DUMMY_CONSOLE=y
+-CONFIG_FRAMEBUFFER_CONSOLE=y
+-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+-CONFIG_FONTS=y
+-# CONFIG_FONT_8x8 is not set
+-CONFIG_FONT_8x16=y
+-# CONFIG_FONT_6x11 is not set
+-# CONFIG_FONT_7x14 is not set
+-# CONFIG_FONT_PEARL_8x8 is not set
+-# CONFIG_FONT_ACORN_8x8 is not set
+-# CONFIG_FONT_MINI_4x6 is not set
+-# CONFIG_FONT_SUN8x16 is not set
+-# CONFIG_FONT_SUN12x22 is not set
+-# CONFIG_FONT_10x18 is not set
+-CONFIG_LOGO=y
+-# CONFIG_LOGO_LINUX_MONO is not set
+-# CONFIG_LOGO_LINUX_VGA16 is not set
+-# CONFIG_LOGO_LINUX_CLUT224 is not set
+-# CONFIG_LOGO_SUPERH_MONO is not set
+-# CONFIG_LOGO_SUPERH_VGA16 is not set
+-CONFIG_LOGO_SUPERH_CLUT224=y
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-CONFIG_HID_SUPPORT=y
+-CONFIG_HID=y
+-# CONFIG_HID_DEBUG is not set
+-# CONFIG_HIDRAW is not set
+-CONFIG_USB_SUPPORT=y
+-CONFIG_USB_ARCH_HAS_HCD=y
+-# CONFIG_USB_ARCH_HAS_OHCI is not set
+-# CONFIG_USB_ARCH_HAS_EHCI is not set
+-# CONFIG_USB is not set
+-
+-#
+-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+-#
+-
+-#
+-# USB Gadget Support
+-#
+-# CONFIG_USB_GADGET is not set
+-# CONFIG_MMC is not set
+-# CONFIG_NEW_LEDS is not set
+-# CONFIG_RTC_CLASS is not set
+-
+-#
+-# Userspace I/O
+-#
+-# CONFIG_UIO is not set
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-# CONFIG_EXT4DEV_FS is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_GFS2_FS is not set
+-# CONFIG_OCFS2_FS is not set
+-CONFIG_MINIX_FS=y
+-CONFIG_ROMFS_FS=y
+-CONFIG_INOTIFY=y
+-CONFIG_INOTIFY_USER=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_PROC_SYSCTL=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_POSIX_ACL is not set
+-CONFIG_HUGETLBFS=y
+-CONFIG_HUGETLB_PAGE=y
+-# CONFIG_CONFIGFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-CONFIG_NETWORK_FILESYSTEMS=y
+-CONFIG_NFS_FS=y
+-CONFIG_NFS_V3=y
+-# CONFIG_NFS_V3_ACL is not set
+-# CONFIG_NFS_V4 is not set
+-# CONFIG_NFS_DIRECTIO is not set
+-# CONFIG_NFSD is not set
+-CONFIG_ROOT_NFS=y
+-CONFIG_LOCKD=y
+-CONFIG_LOCKD_V4=y
+-CONFIG_NFS_COMMON=y
+-CONFIG_SUNRPC=y
+-# CONFIG_SUNRPC_BIND34 is not set
+-# CONFIG_RPCSEC_GSS_KRB5 is not set
+-# CONFIG_RPCSEC_GSS_SPKM3 is not set
+-# CONFIG_SMB_FS is not set
+-# CONFIG_CIFS is not set
+-# CONFIG_NCP_FS is not set
+-# CONFIG_CODA_FS is not set
+-# CONFIG_AFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-CONFIG_MSDOS_PARTITION=y
+-# CONFIG_BSD_DISKLABEL is not set
+-# CONFIG_MINIX_SUBPARTITION is not set
+-# CONFIG_SOLARIS_X86_PARTITION is not set
+-# CONFIG_UNIXWARE_DISKLABEL is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_KARMA_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-# CONFIG_SYSV68_PARTITION is not set
+-# CONFIG_NLS is not set
+-# CONFIG_DLM is not set
+-CONFIG_INSTRUMENTATION=y
+-# CONFIG_PROFILING is not set
+-# CONFIG_MARKERS is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-CONFIG_ENABLE_WARN_DEPRECATED=y
+-CONFIG_ENABLE_MUST_CHECK=y
+-CONFIG_MAGIC_SYSRQ=y
+-# CONFIG_UNUSED_SYMBOLS is not set
+-CONFIG_DEBUG_FS=y
+-# CONFIG_HEADERS_CHECK is not set
+-CONFIG_DEBUG_KERNEL=y
+-# CONFIG_DEBUG_SHIRQ is not set
+-CONFIG_DETECT_SOFTLOCKUP=y
+-CONFIG_SCHED_DEBUG=y
+-CONFIG_SCHEDSTATS=y
+-# CONFIG_TIMER_STATS is not set
+-# CONFIG_DEBUG_SLAB is not set
+-# CONFIG_DEBUG_RT_MUTEXES is not set
+-# CONFIG_RT_MUTEX_TESTER is not set
+-# CONFIG_DEBUG_SPINLOCK is not set
+-# CONFIG_DEBUG_MUTEXES is not set
+-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+-# CONFIG_DEBUG_KOBJECT is not set
+-CONFIG_DEBUG_BUGVERBOSE=y
+-# CONFIG_DEBUG_INFO is not set
+-# CONFIG_DEBUG_VM is not set
+-# CONFIG_DEBUG_LIST is not set
+-# CONFIG_DEBUG_SG is not set
+-CONFIG_FRAME_POINTER=y
+-CONFIG_FORCED_INLINING=y
+-# CONFIG_BOOT_PRINTK_DELAY is not set
+-# CONFIG_FAULT_INJECTION is not set
+-# CONFIG_SAMPLES is not set
+-# CONFIG_EARLY_PRINTK is not set
+-CONFIG_SH64_PROC_TLB=y
+-CONFIG_SH64_PROC_ASIDS=y
+-CONFIG_SH64_SR_WATCH=y
+-# CONFIG_POOR_MANS_STRACE is not set
+-# CONFIG_SH_NO_BSS_INIT is not set
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Library routines
+-#
+-CONFIG_BITREVERSE=y
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-# CONFIG_CRC_ITU_T is not set
+-CONFIG_CRC32=y
+-# CONFIG_CRC7 is not set
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_PLIST=y
+-CONFIG_HAS_IOMEM=y
+-CONFIG_HAS_IOPORT=y
+-CONFIG_HAS_DMA=y
+diff --git a/arch/sh64/configs/sim_defconfig b/arch/sh64/configs/sim_defconfig
+deleted file mode 100644
+index 18476cc..0000000
+--- a/arch/sh64/configs/sim_defconfig
++++ /dev/null
+@@ -1,558 +0,0 @@
+-#
+-# Automatically generated make config: don't edit
+-# Linux kernel version: 2.6.24-rc1
+-# Fri Nov  2 14:36:08 2007
+-#
+-CONFIG_SUPERH=y
+-CONFIG_SUPERH64=y
+-CONFIG_MMU=y
+-CONFIG_QUICKLIST=y
+-CONFIG_RWSEM_GENERIC_SPINLOCK=y
+-CONFIG_GENERIC_FIND_NEXT_BIT=y
+-CONFIG_GENERIC_HWEIGHT=y
+-CONFIG_GENERIC_CALIBRATE_DELAY=y
+-CONFIG_GENERIC_HARDIRQS=y
+-CONFIG_GENERIC_IRQ_PROBE=y
+-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+-CONFIG_ARCH_NO_VIRT_TO_BUS=y
+-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+-
+-#
+-# General setup
+-#
+-CONFIG_EXPERIMENTAL=y
+-CONFIG_BROKEN_ON_SMP=y
+-CONFIG_LOCK_KERNEL=y
+-CONFIG_INIT_ENV_ARG_LIMIT=32
+-CONFIG_LOCALVERSION=""
+-CONFIG_LOCALVERSION_AUTO=y
+-CONFIG_SWAP=y
+-# CONFIG_SYSVIPC is not set
+-# CONFIG_BSD_PROCESS_ACCT is not set
+-# CONFIG_USER_NS is not set
+-# CONFIG_IKCONFIG is not set
+-CONFIG_LOG_BUF_SHIFT=14
+-# CONFIG_CGROUPS is not set
+-CONFIG_FAIR_GROUP_SCHED=y
+-CONFIG_FAIR_USER_SCHED=y
+-# CONFIG_FAIR_CGROUP_SCHED is not set
+-CONFIG_SYSFS_DEPRECATED=y
+-# CONFIG_RELAY is not set
+-# CONFIG_BLK_DEV_INITRD is not set
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_SYSCTL=y
+-# CONFIG_EMBEDDED is not set
+-CONFIG_UID16=y
+-CONFIG_SYSCTL_SYSCALL=y
+-CONFIG_KALLSYMS=y
+-# CONFIG_KALLSYMS_ALL is not set
+-# CONFIG_KALLSYMS_EXTRA_PASS is not set
+-CONFIG_HOTPLUG=y
+-CONFIG_PRINTK=y
+-CONFIG_BUG=y
+-CONFIG_ELF_CORE=y
+-CONFIG_BASE_FULL=y
+-CONFIG_FUTEX=y
+-CONFIG_ANON_INODES=y
+-CONFIG_EPOLL=y
+-CONFIG_SIGNALFD=y
+-CONFIG_EVENTFD=y
+-CONFIG_SHMEM=y
+-CONFIG_VM_EVENT_COUNTERS=y
+-CONFIG_SLAB=y
+-# CONFIG_SLUB is not set
+-# CONFIG_SLOB is not set
+-CONFIG_RT_MUTEXES=y
+-# CONFIG_TINY_SHMEM is not set
+-CONFIG_BASE_SMALL=0
+-# CONFIG_MODULES is not set
+-CONFIG_BLOCK=y
+-# CONFIG_LBD is not set
+-# CONFIG_BLK_DEV_IO_TRACE is not set
+-# CONFIG_LSF is not set
+-# CONFIG_BLK_DEV_BSG is not set
+-
+-#
+-# IO Schedulers
+-#
+-CONFIG_IOSCHED_NOOP=y
+-CONFIG_IOSCHED_AS=y
+-CONFIG_IOSCHED_DEADLINE=y
+-CONFIG_IOSCHED_CFQ=y
+-# CONFIG_DEFAULT_AS is not set
+-# CONFIG_DEFAULT_DEADLINE is not set
+-CONFIG_DEFAULT_CFQ=y
+-# CONFIG_DEFAULT_NOOP is not set
+-CONFIG_DEFAULT_IOSCHED="cfq"
+-
+-#
+-# System type
+-#
+-CONFIG_SH_SIMULATOR=y
+-# CONFIG_SH_CAYMAN is not set
+-# CONFIG_SH_HARP is not set
+-CONFIG_CPU_SH5=y
+-CONFIG_CPU_SUBTYPE_SH5_101=y
+-# CONFIG_CPU_SUBTYPE_SH5_103 is not set
+-CONFIG_LITTLE_ENDIAN=y
+-# CONFIG_BIG_ENDIAN is not set
+-CONFIG_SH_FPU=y
+-# CONFIG_SH64_FPU_DENORM_FLUSH is not set
+-CONFIG_SH64_PGTABLE_2_LEVEL=y
+-# CONFIG_SH64_PGTABLE_3_LEVEL is not set
+-CONFIG_HUGETLB_PAGE_SIZE_64K=y
+-# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+-# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
+-CONFIG_SH64_USER_MISALIGNED_FIXUP=y
+-
+-#
+-# Memory options
+-#
+-CONFIG_CACHED_MEMORY_OFFSET=0x20000000
+-CONFIG_MEMORY_START=0x80000000
+-CONFIG_MEMORY_SIZE_IN_MB=128
+-
+-#
+-# Cache options
+-#
+-# CONFIG_DCACHE_WRITE_BACK is not set
+-# CONFIG_DCACHE_WRITE_THROUGH is not set
+-CONFIG_DCACHE_DISABLED=y
+-# CONFIG_ICACHE_DISABLED is not set
+-CONFIG_PCIDEVICE_MEMORY_START=C0000000
+-CONFIG_DEVICE_MEMORY_START=E0000000
+-CONFIG_FLASH_MEMORY_START=0x00000000
+-CONFIG_PCI_BLOCK_START=0x40000000
+-
+-#
+-# CPU Subtype specific options
+-#
+-CONFIG_SH64_ID2815_WORKAROUND=y
+-
+-#
+-# Misc options
+-#
+-# CONFIG_SH_DMA is not set
+-CONFIG_PREEMPT=y
+-CONFIG_SELECT_MEMORY_MODEL=y
+-CONFIG_FLATMEM_MANUAL=y
+-# CONFIG_DISCONTIGMEM_MANUAL is not set
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_FLATMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+-# CONFIG_SPARSEMEM_STATIC is not set
+-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+-CONFIG_SPLIT_PTLOCK_CPUS=4
+-# CONFIG_RESOURCES_64BIT is not set
+-CONFIG_ZONE_DMA_FLAG=0
+-CONFIG_NR_QUICK=1
+-
+-#
+-# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+-#
+-# CONFIG_ARCH_SUPPORTS_MSI is not set
+-# CONFIG_PCCARD is not set
+-
+-#
+-# Executable file formats
+-#
+-CONFIG_BINFMT_ELF=y
+-# CONFIG_BINFMT_MISC is not set
+-
+-#
+-# Networking
+-#
+-# CONFIG_NET is not set
+-
+-#
+-# Device Drivers
+-#
+-
+-#
+-# Generic Driver Options
+-#
+-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+-CONFIG_STANDALONE=y
+-CONFIG_PREVENT_FIRMWARE_BUILD=y
+-# CONFIG_FW_LOADER is not set
+-# CONFIG_DEBUG_DRIVER is not set
+-# CONFIG_DEBUG_DEVRES is not set
+-# CONFIG_SYS_HYPERVISOR is not set
+-# CONFIG_MTD is not set
+-# CONFIG_PARPORT is not set
+-# CONFIG_BLK_DEV is not set
+-# CONFIG_MISC_DEVICES is not set
+-# CONFIG_IDE is not set
+-
+-#
+-# SCSI device support
+-#
+-# CONFIG_RAID_ATTRS is not set
+-CONFIG_SCSI=y
+-CONFIG_SCSI_DMA=y
+-# CONFIG_SCSI_TGT is not set
+-# CONFIG_SCSI_NETLINK is not set
+-CONFIG_SCSI_PROC_FS=y
+-
+-#
+-# SCSI support type (disk, tape, CD-ROM)
+-#
+-CONFIG_BLK_DEV_SD=y
+-# CONFIG_CHR_DEV_ST is not set
+-# CONFIG_CHR_DEV_OSST is not set
+-# CONFIG_BLK_DEV_SR is not set
+-# CONFIG_CHR_DEV_SG is not set
+-# CONFIG_CHR_DEV_SCH is not set
+-
+-#
+-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+-#
+-CONFIG_SCSI_MULTI_LUN=y
+-# CONFIG_SCSI_CONSTANTS is not set
+-# CONFIG_SCSI_LOGGING is not set
+-# CONFIG_SCSI_SCAN_ASYNC is not set
+-
+-#
+-# SCSI Transports
+-#
+-CONFIG_SCSI_SPI_ATTRS=y
+-# CONFIG_SCSI_FC_ATTRS is not set
+-# CONFIG_SCSI_SAS_LIBSAS is not set
+-# CONFIG_SCSI_SRP_ATTRS is not set
+-CONFIG_SCSI_LOWLEVEL=y
+-# CONFIG_SCSI_DEBUG is not set
+-# CONFIG_ATA is not set
+-# CONFIG_MD is not set
+-# CONFIG_PHONE is not set
+-
+-#
+-# Input device support
+-#
+-CONFIG_INPUT=y
+-# CONFIG_INPUT_FF_MEMLESS is not set
+-# CONFIG_INPUT_POLLDEV is not set
+-
+-#
+-# Userland interfaces
+-#
+-CONFIG_INPUT_MOUSEDEV=y
+-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+-# CONFIG_INPUT_JOYDEV is not set
+-# CONFIG_INPUT_EVDEV is not set
+-# CONFIG_INPUT_EVBUG is not set
+-
+-#
+-# Input Device Drivers
+-#
+-# CONFIG_INPUT_KEYBOARD is not set
+-# CONFIG_INPUT_MOUSE is not set
+-# CONFIG_INPUT_JOYSTICK is not set
+-# CONFIG_INPUT_TABLET is not set
+-# CONFIG_INPUT_TOUCHSCREEN is not set
+-# CONFIG_INPUT_MISC is not set
+-
+-#
+-# Hardware I/O ports
+-#
+-# CONFIG_SERIO is not set
+-# CONFIG_GAMEPORT is not set
+-
+-#
+-# Character devices
+-#
+-CONFIG_VT=y
+-CONFIG_VT_CONSOLE=y
+-CONFIG_HW_CONSOLE=y
+-# CONFIG_VT_HW_CONSOLE_BINDING is not set
+-# CONFIG_SERIAL_NONSTANDARD is not set
+-
+-#
+-# Serial drivers
+-#
+-# CONFIG_SERIAL_8250 is not set
+-
+-#
+-# Non-8250 serial port support
+-#
+-CONFIG_SERIAL_SH_SCI=y
+-CONFIG_SERIAL_SH_SCI_NR_UARTS=2
+-CONFIG_SERIAL_SH_SCI_CONSOLE=y
+-CONFIG_SERIAL_CORE=y
+-CONFIG_SERIAL_CORE_CONSOLE=y
+-CONFIG_UNIX98_PTYS=y
+-# CONFIG_LEGACY_PTYS is not set
+-# CONFIG_IPMI_HANDLER is not set
+-# CONFIG_HW_RANDOM is not set
+-# CONFIG_R3964 is not set
+-# CONFIG_RAW_DRIVER is not set
+-# CONFIG_TCG_TPM is not set
+-# CONFIG_I2C is not set
+-
+-#
+-# SPI support
+-#
+-# CONFIG_SPI is not set
+-# CONFIG_SPI_MASTER is not set
+-# CONFIG_W1 is not set
+-# CONFIG_POWER_SUPPLY is not set
+-# CONFIG_HWMON is not set
+-# CONFIG_WATCHDOG is not set
+-
+-#
+-# Sonics Silicon Backplane
+-#
+-CONFIG_SSB_POSSIBLE=y
+-# CONFIG_SSB is not set
+-
+-#
+-# Multifunction device drivers
+-#
+-# CONFIG_MFD_SM501 is not set
+-
+-#
+-# Multimedia devices
+-#
+-# CONFIG_VIDEO_DEV is not set
+-CONFIG_DAB=y
+-
+-#
+-# Graphics support
+-#
+-# CONFIG_VGASTATE is not set
+-CONFIG_VIDEO_OUTPUT_CONTROL=y
+-CONFIG_FB=y
+-CONFIG_FIRMWARE_EDID=y
+-# CONFIG_FB_DDC is not set
+-# CONFIG_FB_CFB_FILLRECT is not set
+-# CONFIG_FB_CFB_COPYAREA is not set
+-# CONFIG_FB_CFB_IMAGEBLIT is not set
+-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+-# CONFIG_FB_SYS_FILLRECT is not set
+-# CONFIG_FB_SYS_COPYAREA is not set
+-# CONFIG_FB_SYS_IMAGEBLIT is not set
+-# CONFIG_FB_SYS_FOPS is not set
+-CONFIG_FB_DEFERRED_IO=y
+-# CONFIG_FB_SVGALIB is not set
+-# CONFIG_FB_MACMODES is not set
+-# CONFIG_FB_BACKLIGHT is not set
+-CONFIG_FB_MODE_HELPERS=y
+-# CONFIG_FB_TILEBLITTING is not set
+-
+-#
+-# Frame buffer hardware drivers
+-#
+-# CONFIG_FB_S1D13XXX is not set
+-# CONFIG_FB_VIRTUAL is not set
+-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-
+-#
+-# Display device support
+-#
+-# CONFIG_DISPLAY_SUPPORT is not set
+-
+-#
+-# Console display driver support
+-#
+-CONFIG_DUMMY_CONSOLE=y
+-CONFIG_FRAMEBUFFER_CONSOLE=y
+-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+-CONFIG_FONTS=y
+-# CONFIG_FONT_8x8 is not set
+-CONFIG_FONT_8x16=y
+-# CONFIG_FONT_6x11 is not set
+-# CONFIG_FONT_7x14 is not set
+-# CONFIG_FONT_PEARL_8x8 is not set
+-# CONFIG_FONT_ACORN_8x8 is not set
+-# CONFIG_FONT_MINI_4x6 is not set
+-# CONFIG_FONT_SUN8x16 is not set
+-# CONFIG_FONT_SUN12x22 is not set
+-# CONFIG_FONT_10x18 is not set
+-CONFIG_LOGO=y
+-# CONFIG_LOGO_LINUX_MONO is not set
+-# CONFIG_LOGO_LINUX_VGA16 is not set
+-# CONFIG_LOGO_LINUX_CLUT224 is not set
+-# CONFIG_LOGO_SUPERH_MONO is not set
+-# CONFIG_LOGO_SUPERH_VGA16 is not set
+-CONFIG_LOGO_SUPERH_CLUT224=y
+-
+-#
+-# Sound
+-#
+-# CONFIG_SOUND is not set
+-# CONFIG_HID_SUPPORT is not set
+-# CONFIG_USB_SUPPORT is not set
+-# CONFIG_MMC is not set
+-# CONFIG_NEW_LEDS is not set
+-# CONFIG_RTC_CLASS is not set
+-
+-#
+-# Userspace I/O
+-#
+-# CONFIG_UIO is not set
+-
+-#
+-# File systems
+-#
+-CONFIG_EXT2_FS=y
+-# CONFIG_EXT2_FS_XATTR is not set
+-# CONFIG_EXT2_FS_XIP is not set
+-CONFIG_EXT3_FS=y
+-CONFIG_EXT3_FS_XATTR=y
+-# CONFIG_EXT3_FS_POSIX_ACL is not set
+-# CONFIG_EXT3_FS_SECURITY is not set
+-# CONFIG_EXT4DEV_FS is not set
+-CONFIG_JBD=y
+-# CONFIG_JBD_DEBUG is not set
+-CONFIG_FS_MBCACHE=y
+-# CONFIG_REISERFS_FS is not set
+-# CONFIG_JFS_FS is not set
+-# CONFIG_FS_POSIX_ACL is not set
+-# CONFIG_XFS_FS is not set
+-# CONFIG_GFS2_FS is not set
+-CONFIG_MINIX_FS=y
+-CONFIG_ROMFS_FS=y
+-CONFIG_INOTIFY=y
+-CONFIG_INOTIFY_USER=y
+-# CONFIG_QUOTA is not set
+-CONFIG_DNOTIFY=y
+-# CONFIG_AUTOFS_FS is not set
+-# CONFIG_AUTOFS4_FS is not set
+-# CONFIG_FUSE_FS is not set
+-
+-#
+-# CD-ROM/DVD Filesystems
+-#
+-# CONFIG_ISO9660_FS is not set
+-# CONFIG_UDF_FS is not set
+-
+-#
+-# DOS/FAT/NT Filesystems
+-#
+-# CONFIG_MSDOS_FS is not set
+-# CONFIG_VFAT_FS is not set
+-# CONFIG_NTFS_FS is not set
+-
+-#
+-# Pseudo filesystems
+-#
+-CONFIG_PROC_FS=y
+-CONFIG_PROC_KCORE=y
+-CONFIG_PROC_SYSCTL=y
+-CONFIG_SYSFS=y
+-CONFIG_TMPFS=y
+-# CONFIG_TMPFS_POSIX_ACL is not set
+-CONFIG_HUGETLBFS=y
+-CONFIG_HUGETLB_PAGE=y
+-# CONFIG_CONFIGFS_FS is not set
+-
+-#
+-# Miscellaneous filesystems
+-#
+-# CONFIG_ADFS_FS is not set
+-# CONFIG_AFFS_FS is not set
+-# CONFIG_HFS_FS is not set
+-# CONFIG_HFSPLUS_FS is not set
+-# CONFIG_BEFS_FS is not set
+-# CONFIG_BFS_FS is not set
+-# CONFIG_EFS_FS is not set
+-# CONFIG_CRAMFS is not set
+-# CONFIG_VXFS_FS is not set
+-# CONFIG_HPFS_FS is not set
+-# CONFIG_QNX4FS_FS is not set
+-# CONFIG_SYSV_FS is not set
+-# CONFIG_UFS_FS is not set
+-
+-#
+-# Partition Types
+-#
+-CONFIG_PARTITION_ADVANCED=y
+-# CONFIG_ACORN_PARTITION is not set
+-# CONFIG_OSF_PARTITION is not set
+-# CONFIG_AMIGA_PARTITION is not set
+-# CONFIG_ATARI_PARTITION is not set
+-# CONFIG_MAC_PARTITION is not set
+-CONFIG_MSDOS_PARTITION=y
+-# CONFIG_BSD_DISKLABEL is not set
+-# CONFIG_MINIX_SUBPARTITION is not set
+-# CONFIG_SOLARIS_X86_PARTITION is not set
+-# CONFIG_UNIXWARE_DISKLABEL is not set
+-# CONFIG_LDM_PARTITION is not set
+-# CONFIG_SGI_PARTITION is not set
+-# CONFIG_ULTRIX_PARTITION is not set
+-# CONFIG_SUN_PARTITION is not set
+-# CONFIG_KARMA_PARTITION is not set
+-# CONFIG_EFI_PARTITION is not set
+-# CONFIG_SYSV68_PARTITION is not set
+-# CONFIG_NLS is not set
+-CONFIG_INSTRUMENTATION=y
+-CONFIG_PROFILING=y
+-# CONFIG_OPROFILE is not set
+-# CONFIG_MARKERS is not set
+-
+-#
+-# Kernel hacking
+-#
+-# CONFIG_PRINTK_TIME is not set
+-CONFIG_ENABLE_WARN_DEPRECATED=y
+-CONFIG_ENABLE_MUST_CHECK=y
+-CONFIG_MAGIC_SYSRQ=y
+-# CONFIG_UNUSED_SYMBOLS is not set
+-CONFIG_DEBUG_FS=y
+-# CONFIG_HEADERS_CHECK is not set
+-CONFIG_DEBUG_KERNEL=y
+-# CONFIG_DEBUG_SHIRQ is not set
+-CONFIG_DETECT_SOFTLOCKUP=y
+-CONFIG_SCHED_DEBUG=y
+-CONFIG_SCHEDSTATS=y
+-# CONFIG_TIMER_STATS is not set
+-# CONFIG_DEBUG_SLAB is not set
+-# CONFIG_DEBUG_RT_MUTEXES is not set
+-# CONFIG_RT_MUTEX_TESTER is not set
+-# CONFIG_DEBUG_SPINLOCK is not set
+-# CONFIG_DEBUG_MUTEXES is not set
+-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+-# CONFIG_DEBUG_KOBJECT is not set
+-CONFIG_DEBUG_BUGVERBOSE=y
+-# CONFIG_DEBUG_INFO is not set
+-# CONFIG_DEBUG_VM is not set
+-# CONFIG_DEBUG_LIST is not set
+-# CONFIG_DEBUG_SG is not set
+-CONFIG_FRAME_POINTER=y
+-CONFIG_FORCED_INLINING=y
+-# CONFIG_BOOT_PRINTK_DELAY is not set
+-# CONFIG_FAULT_INJECTION is not set
+-# CONFIG_SAMPLES is not set
+-# CONFIG_EARLY_PRINTK is not set
+-CONFIG_SH64_PROC_TLB=y
+-CONFIG_SH64_PROC_ASIDS=y
+-CONFIG_SH64_SR_WATCH=y
+-# CONFIG_POOR_MANS_STRACE is not set
+-CONFIG_SH_NO_BSS_INIT=y
+-
+-#
+-# Security options
+-#
+-# CONFIG_KEYS is not set
+-# CONFIG_SECURITY is not set
+-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+-# CONFIG_CRYPTO is not set
+-
+-#
+-# Library routines
+-#
+-CONFIG_BITREVERSE=y
+-# CONFIG_CRC_CCITT is not set
+-# CONFIG_CRC16 is not set
+-# CONFIG_CRC_ITU_T is not set
+-CONFIG_CRC32=y
+-# CONFIG_CRC7 is not set
+-# CONFIG_LIBCRC32C is not set
+-CONFIG_PLIST=y
+-CONFIG_HAS_IOMEM=y
+-CONFIG_HAS_IOPORT=y
+-CONFIG_HAS_DMA=y
+diff --git a/arch/sh64/kernel/Makefile b/arch/sh64/kernel/Makefile
+deleted file mode 100644
+index e3467bd..0000000
+--- a/arch/sh64/kernel/Makefile
++++ /dev/null
+@@ -1,36 +0,0 @@
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 2000, 2001  Paolo Alberelli
+-# Copyright (C) 2003  Paul Mundt
+-#
+-# Makefile for the Linux sh64 kernel.
+-#
+-# Note! Dependencies are done automagically by 'make dep', which also
+-# removes any old dependencies. DON'T put your own dependencies here
+-# unless it's something special (ie not a .c file).
+-#
+-
+-extra-y	:= head.o init_task.o vmlinux.lds
+-
+-obj-y	:= process.o signal.o entry.o traps.o irq.o irq_intc.o \
+-	   ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \
+-	   switchto.o syscalls.o
+-
+-obj-$(CONFIG_HEARTBEAT)		+= led.o
+-obj-$(CONFIG_SH_ALPHANUMERIC)	+= alphanum.o
+-obj-$(CONFIG_SH_DMA)		+= dma.o
+-obj-$(CONFIG_SH_FPU)		+= fpu.o
+-obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+-obj-$(CONFIG_KALLSYMS)		+= unwind.o
+-obj-$(CONFIG_PCI)		+= pcibios.o
+-obj-$(CONFIG_MODULES)		+= module.o
+-
+-ifeq ($(CONFIG_PCI),y)
+-obj-$(CONFIG_CPU_SH5)		+= pci_sh5.o
+-endif
+-
+-USE_STANDARD_AS_RULE := true
+-
+diff --git a/arch/sh64/kernel/alphanum.c b/arch/sh64/kernel/alphanum.c
+deleted file mode 100644
+index d1619d9..0000000
+--- a/arch/sh64/kernel/alphanum.c
++++ /dev/null
+@@ -1,43 +0,0 @@
+-/*
+- * arch/sh64/kernel/alphanum.c
+- *
+- * Copyright (C) 2002 Stuart Menefy <stuart.menefy at st.com>
+- *
+- * May be copied or modified under the terms of the GNU General Public
+- * License.  See linux/COPYING for more information.
+- *
+- * Machine-independent functions for handling 8-digit alphanumeric display
+- * (e.g. Agilent HDSP-253x)
+- */
+-#include <linux/stddef.h>
+-#include <linux/sched.h>
+-
+-void mach_alphanum(int pos, unsigned char val);
+-
+-void print_seg(char *file, int line)
+-{
+-	int i;
+-	unsigned int nibble;
+-
+-	for (i = 0; i < 5; i++) {
+-		mach_alphanum(i, file[i]);
+-	}
+-
+-	for (i = 0; i < 3; i++) {
+-		nibble = ((line >> (i * 4)) & 0xf);
+-		mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
+-	}
+-}
+-
+-void print_seg_num(unsigned num)
+-{
+-	int i;
+-	unsigned int nibble;
+-
+-	for (i = 0; i < 8; i++) {
+-		nibble = ((num >> (i * 4)) & 0xf);
+-
+-		mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
+-	}
+-}
+-
+diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c
+deleted file mode 100644
+index ca76537..0000000
+--- a/arch/sh64/kernel/asm-offsets.c
++++ /dev/null
+@@ -1,33 +0,0 @@
+-/*
+- * This program is used to generate definitions needed by
+- * assembly language modules.
+- *
+- * We use the technique used in the OSF Mach kernel code:
+- * generate asm statements containing #defines,
+- * compile this file to assembler, and then extract the
+- * #defines from the assembly-language output.
+- */
+-
+-#include <linux/stddef.h>
+-#include <linux/types.h>
+-#include <linux/mm.h>
+-#include <asm/thread_info.h>
+-
+-#define DEFINE(sym, val) \
+-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+-
+-#define BLANK() asm volatile("\n->" : : )
+-
+-int main(void)
+-{
+-	/* offsets into the thread_info struct */
+-	DEFINE(TI_TASK,		offsetof(struct thread_info, task));
+-	DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
+-	DEFINE(TI_FLAGS,	offsetof(struct thread_info, flags));
+-	DEFINE(TI_PRE_COUNT,	offsetof(struct thread_info, preempt_count));
+-	DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
+-	DEFINE(TI_ADDR_LIMIT,	offsetof(struct thread_info, addr_limit));
+-	DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
+-
+-	return 0;
+-}
+diff --git a/arch/sh64/kernel/dma.c b/arch/sh64/kernel/dma.c
+deleted file mode 100644
+index 32c6f05..0000000
+--- a/arch/sh64/kernel/dma.c
++++ /dev/null
+@@ -1,297 +0,0 @@
+-/*
+- * arch/sh64/kernel/dma.c
+- *
+- * DMA routines for the SH-5 DMAC.
+- *
+- * Copyright (C) 2003  Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/interrupt.h>
+-#include <linux/types.h>
+-#include <linux/irq.h>
+-#include <linux/spinlock.h>
+-#include <linux/mm.h>
+-#include <asm/hardware.h>
+-#include <asm/dma.h>
+-#include <asm/signal.h>
+-#include <asm/errno.h>
+-#include <asm/io.h>
+-
+-typedef struct {
+-	unsigned long dev_addr;
+-	unsigned long mem_addr;
+-
+-	unsigned int mode;
+-	unsigned int count;
+-} dma_info_t;
+-
+-static dma_info_t dma_info[MAX_DMA_CHANNELS];
+-static DEFINE_SPINLOCK(dma_spin_lock);
+-
+-/* arch/sh64/kernel/irq_intc.c */
+-extern void make_intc_irq(unsigned int irq);
+-
+-/* DMAC Interrupts */
+-#define DMA_IRQ_DMTE0	18
+-#define DMA_IRQ_DERR	22
+-
+-#define DMAC_COMMON_BASE	(dmac_base + 0x08)
+-#define DMAC_SAR_BASE		(dmac_base + 0x10)
+-#define DMAC_DAR_BASE		(dmac_base + 0x18)
+-#define DMAC_COUNT_BASE		(dmac_base + 0x20)
+-#define DMAC_CTRL_BASE		(dmac_base + 0x28)
+-#define DMAC_STATUS_BASE	(dmac_base + 0x30)
+-
+-#define DMAC_SAR(n)	(DMAC_SAR_BASE    + ((n) * 0x28))
+-#define DMAC_DAR(n)	(DMAC_DAR_BASE    + ((n) * 0x28))
+-#define DMAC_COUNT(n)	(DMAC_COUNT_BASE  + ((n) * 0x28))
+-#define DMAC_CTRL(n)	(DMAC_CTRL_BASE   + ((n) * 0x28))
+-#define DMAC_STATUS(n)	(DMAC_STATUS_BASE + ((n) * 0x28))
+-
+-/* DMAC.COMMON Bit Definitions */
+-#define DMAC_COMMON_PR	0x00000001	/* Priority */
+-					/* Bits 1-2 Reserved */
+-#define DMAC_COMMON_ME	0x00000008	/* Master Enable */
+-#define DMAC_COMMON_NMI	0x00000010	/* NMI Flag */
+-					/* Bits 5-6 Reserved */
+-#define DMAC_COMMON_ER	0x00000780	/* Error Response */
+-#define DMAC_COMMON_AAE	0x00007800	/* Address Alignment Error */
+-					/* Bits 15-63 Reserved */
+-
+-/* DMAC.SAR Bit Definitions */
+-#define DMAC_SAR_ADDR	0xffffffff	/* Source Address */
+-
+-/* DMAC.DAR Bit Definitions */
+-#define DMAC_DAR_ADDR	0xffffffff	/* Destination Address */
+-
+-/* DMAC.COUNT Bit Definitions */
+-#define DMAC_COUNT_CNT	0xffffffff	/* Transfer Count */
+-
+-/* DMAC.CTRL Bit Definitions */
+-#define DMAC_CTRL_TS	0x00000007	/* Transfer Size */
+-#define DMAC_CTRL_SI	0x00000018	/* Source Increment */
+-#define DMAC_CTRL_DI	0x00000060	/* Destination Increment */
+-#define DMAC_CTRL_RS	0x00000780	/* Resource Select */
+-#define DMAC_CTRL_IE	0x00000800	/* Interrupt Enable */
+-#define DMAC_CTRL_TE	0x00001000	/* Transfer Enable */
+-					/* Bits 15-63 Reserved */
+-
+-/* DMAC.STATUS Bit Definitions */
+-#define DMAC_STATUS_TE	0x00000001	/* Transfer End */
+-#define DMAC_STATUS_AAE	0x00000002	/* Address Alignment Error */
+-					/* Bits 2-63 Reserved */
+-
+-static unsigned long dmac_base;
+-
+-void set_dma_count(unsigned int chan, unsigned int count);
+-void set_dma_addr(unsigned int chan, unsigned int addr);
+-
+-static irqreturn_t dma_mte(int irq, void *dev_id, struct pt_regs *regs)
+-{
+-	unsigned int chan = irq - DMA_IRQ_DMTE0;
+-	dma_info_t *info = dma_info + chan;
+-	u64 status;
+-
+-	if (info->mode & DMA_MODE_WRITE) {
+-		sh64_out64(info->mem_addr & DMAC_SAR_ADDR, DMAC_SAR(chan));
+-	} else {
+-		sh64_out64(info->mem_addr & DMAC_DAR_ADDR, DMAC_DAR(chan));
+-	}
+-
+-	set_dma_count(chan, info->count);
+-
+-	/* Clear the TE bit */
+-	status = sh64_in64(DMAC_STATUS(chan));
+-	status &= ~DMAC_STATUS_TE;
+-	sh64_out64(status, DMAC_STATUS(chan));
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static struct irqaction irq_dmte = {
+-	.handler	= dma_mte,
+-	.flags		= IRQF_DISABLED,
+-	.name		= "DMA MTE",
+-};
+-
+-static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
+-{
+-	u64 tmp;
+-	u8 chan;
+-
+-	printk(KERN_NOTICE "DMAC: Got a DMA Error!\n");
+-
+-	tmp = sh64_in64(DMAC_COMMON_BASE);
+-
+-	/* Check for the type of error */
+-	if ((chan = tmp & DMAC_COMMON_AAE)) {
+-		/* It's an address alignment error.. */
+-		printk(KERN_NOTICE "DMAC: Alignment error on channel %d, ", chan);
+-
+-		printk(KERN_NOTICE "SAR: 0x%08llx, DAR: 0x%08llx, COUNT: %lld\n",
+-		       (sh64_in64(DMAC_SAR(chan)) & DMAC_SAR_ADDR),
+-		       (sh64_in64(DMAC_DAR(chan)) & DMAC_DAR_ADDR),
+-		       (sh64_in64(DMAC_COUNT(chan)) & DMAC_COUNT_CNT));
+-
+-	} else if ((chan = tmp & DMAC_COMMON_ER)) {
+-		/* Something else went wrong.. */
+-		printk(KERN_NOTICE "DMAC: Error on channel %d\n", chan);
+-	}
+-
+-	/* Reset the ME bit to clear the interrupt */
+-	tmp |= DMAC_COMMON_ME;
+-	sh64_out64(tmp, DMAC_COMMON_BASE);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static struct irqaction irq_derr = {
+-	.handler	= dma_err,
+-	.flags		= IRQF_DISABLED,
+-	.name		= "DMA Error",
+-};
+-
+-static inline unsigned long calc_xmit_shift(unsigned int chan)
+-{
+-	return sh64_in64(DMAC_CTRL(chan)) & 0x03;
+-}
+-
+-void setup_dma(unsigned int chan, dma_info_t *info)
+-{
+-	unsigned int irq = DMA_IRQ_DMTE0 + chan;
+-	dma_info_t *dma = dma_info + chan;
+-
+-	make_intc_irq(irq);
+-	setup_irq(irq, &irq_dmte);
+-	dma = info;
+-}
+-
+-void enable_dma(unsigned int chan)
+-{
+-	u64 ctrl;
+-
+-	ctrl = sh64_in64(DMAC_CTRL(chan));
+-	ctrl |= DMAC_CTRL_TE;
+-	sh64_out64(ctrl, DMAC_CTRL(chan));
+-}
+-
+-void disable_dma(unsigned int chan)
+-{
+-	u64 ctrl;
+-
+-	ctrl = sh64_in64(DMAC_CTRL(chan));
+-	ctrl &= ~DMAC_CTRL_TE;
+-	sh64_out64(ctrl, DMAC_CTRL(chan));
+-}
+-
+-void set_dma_mode(unsigned int chan, char mode)
+-{
+-	dma_info_t *info = dma_info + chan;
+-
+-	info->mode = mode;
+-
+-	set_dma_addr(chan, info->mem_addr);
+-	set_dma_count(chan, info->count);
+-}
+-
+-void set_dma_addr(unsigned int chan, unsigned int addr)
+-{
+-	dma_info_t *info = dma_info + chan;
+-	unsigned long sar, dar;
+-
+-	info->mem_addr = addr;
+-	sar = (info->mode & DMA_MODE_WRITE) ? info->mem_addr : info->dev_addr;
+-	dar = (info->mode & DMA_MODE_WRITE) ? info->dev_addr : info->mem_addr;
+-
+-	sh64_out64(sar & DMAC_SAR_ADDR, DMAC_SAR(chan));
+-	sh64_out64(dar & DMAC_SAR_ADDR, DMAC_DAR(chan));
+-}
+-
+-void set_dma_count(unsigned int chan, unsigned int count)
+-{
+-	dma_info_t *info = dma_info + chan;
+-	u64 tmp;
+-
+-	info->count = count;
+-
+-	tmp = (info->count >> calc_xmit_shift(chan)) & DMAC_COUNT_CNT;
+-
+-	sh64_out64(tmp, DMAC_COUNT(chan));
+-}
+-
+-unsigned long claim_dma_lock(void)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dma_spin_lock, flags);
+-
+-	return flags;
+-}
+-
+-void release_dma_lock(unsigned long flags)
+-{
+-	spin_unlock_irqrestore(&dma_spin_lock, flags);
+-}
+-
+-int get_dma_residue(unsigned int chan)
+-{
+-	return sh64_in64(DMAC_COUNT(chan) << calc_xmit_shift(chan));
+-}
+-
+-int __init init_dma(void)
+-{
+-	struct vcr_info vcr;
+-	u64 tmp;
+-
+-	/* Remap the DMAC */
+-	dmac_base = onchip_remap(PHYS_DMAC_BLOCK, 1024, "DMAC");
+-	if (!dmac_base) {
+-		printk(KERN_ERR "Unable to remap DMAC\n");
+-		return -ENOMEM;
+-	}
+-
+-	/* Report DMAC.VCR Info */
+-	vcr = sh64_get_vcr_info(dmac_base);
+-	printk("DMAC: Module ID: 0x%04x, Module version: 0x%04x\n",
+-	       vcr.mod_id, vcr.mod_vers);
+-
+-	/* Set the ME bit */
+-	tmp = sh64_in64(DMAC_COMMON_BASE);
+-	tmp |= DMAC_COMMON_ME;
+-	sh64_out64(tmp, DMAC_COMMON_BASE);
+-
+-	/* Enable the DMAC Error Interrupt */
+-	make_intc_irq(DMA_IRQ_DERR);
+-	setup_irq(DMA_IRQ_DERR, &irq_derr);
+-
+-	return 0;
+-}
+-
+-static void __exit exit_dma(void)
+-{
+-	onchip_unmap(dmac_base);
+-	free_irq(DMA_IRQ_DERR, 0);
+-}
+-
+-module_init(init_dma);
+-module_exit(exit_dma);
+-
+-MODULE_AUTHOR("Paul Mundt");
+-MODULE_DESCRIPTION("DMA API for SH-5 DMAC");
+-MODULE_LICENSE("GPL");
+-
+-EXPORT_SYMBOL(setup_dma);
+-EXPORT_SYMBOL(claim_dma_lock);
+-EXPORT_SYMBOL(release_dma_lock);
+-EXPORT_SYMBOL(enable_dma);
+-EXPORT_SYMBOL(disable_dma);
+-EXPORT_SYMBOL(set_dma_mode);
+-EXPORT_SYMBOL(set_dma_addr);
+-EXPORT_SYMBOL(set_dma_count);
+-EXPORT_SYMBOL(get_dma_residue);
+-
+diff --git a/arch/sh64/kernel/early_printk.c b/arch/sh64/kernel/early_printk.c
+deleted file mode 100644
+index 4f91311..0000000
+--- a/arch/sh64/kernel/early_printk.c
++++ /dev/null
+@@ -1,99 +0,0 @@
+-/*
+- * arch/sh64/kernel/early_printk.c
+- *
+- * SH-5 Early SCIF console (cloned and hacked from sh implementation)
+- *
+- * Copyright (C) 2003, 2004  Paul Mundt <lethal at linux-sh.org>
+- * Copyright (C) 2002  M. R. Brown <mrbrown at 0xd6.org>
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-#include <linux/console.h>
+-#include <linux/tty.h>
+-#include <linux/init.h>
+-#include <asm/io.h>
+-#include <asm/hardware.h>
+-
+-#define SCIF_BASE_ADDR	0x01030000
+-#define SCIF_ADDR_SH5	PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
+-
+-/*
+- * Fixed virtual address where SCIF is mapped (should already be done
+- * in arch/sh64/kernel/head.S!).
+- */
+-#define SCIF_REG	0xfa030000
+-
+-enum {
+-	SCIF_SCSMR2	= SCIF_REG + 0x00,
+-	SCIF_SCBRR2	= SCIF_REG + 0x04,
+-	SCIF_SCSCR2	= SCIF_REG + 0x08,
+-	SCIF_SCFTDR2	= SCIF_REG + 0x0c,
+-	SCIF_SCFSR2	= SCIF_REG + 0x10,
+-	SCIF_SCFRDR2	= SCIF_REG + 0x14,
+-	SCIF_SCFCR2	= SCIF_REG + 0x18,
+-	SCIF_SCFDR2	= SCIF_REG + 0x1c,
+-	SCIF_SCSPTR2	= SCIF_REG + 0x20,
+-	SCIF_SCLSR2	= SCIF_REG + 0x24,
+-};
+-
+-static void sh_console_putc(int c)
+-{
+-	while (!(ctrl_inw(SCIF_SCFSR2) & 0x20))
+-		cpu_relax();
+-
+-	ctrl_outb(c, SCIF_SCFTDR2);
+-	ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2);
+-
+-	if (c == '\n')
+-		sh_console_putc('\r');
+-}
+-
+-static void sh_console_flush(void)
+-{
+-	ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
+-
+-	while (!(ctrl_inw(SCIF_SCFSR2) & 0x40))
+-		cpu_relax();
+-
+-	ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
+-}
+-
+-static void sh_console_write(struct console *con, const char *s, unsigned count)
+-{
+-	while (count-- > 0)
+-		sh_console_putc(*s++);
+-
+-	sh_console_flush();
+-}
+-
+-static int __init sh_console_setup(struct console *con, char *options)
+-{
+-	con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8;
+-
+-	return 0;
+-}
+-
+-static struct console sh_console = {
+-	.name		= "scifcon",
+-	.write		= sh_console_write,
+-	.setup		= sh_console_setup,
+-	.flags		= CON_PRINTBUFFER | CON_BOOT,
+-	.index		= -1,
+-};
+-
+-void __init enable_early_printk(void)
+-{
+-	ctrl_outb(0x2a, SCIF_SCBRR2);	/* 19200bps */
+-
+-	ctrl_outw(0x04, SCIF_SCFCR2);	/* Reset TFRST */
+-	ctrl_outw(0x10, SCIF_SCFCR2);	/* TTRG0=1 */
+-
+-	ctrl_outw(0, SCIF_SCSPTR2);
+-	ctrl_outw(0x60, SCIF_SCFSR2);
+-	ctrl_outw(0, SCIF_SCLSR2);
+-	ctrl_outw(0x30, SCIF_SCSCR2);
+-
+-	register_console(&sh_console);
+-}
+diff --git a/arch/sh64/kernel/entry.S b/arch/sh64/kernel/entry.S
+deleted file mode 100644
+index 7013fcb..0000000
+--- a/arch/sh64/kernel/entry.S
++++ /dev/null
+@@ -1,2102 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/entry.S
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2004, 2005  Paul Mundt
+- * Copyright (C) 2003, 2004 Richard Curnow
+- *
+- */
+-
+-#include <linux/errno.h>
+-#include <linux/sys.h>
+-
+-#include <asm/processor.h>
+-#include <asm/registers.h>
+-#include <asm/unistd.h>
+-#include <asm/thread_info.h>
+-#include <asm/asm-offsets.h>
+-
+-/*
+- * SR fields.
+- */
+-#define SR_ASID_MASK	0x00ff0000
+-#define SR_FD_MASK	0x00008000
+-#define SR_SS		0x08000000
+-#define SR_BL		0x10000000
+-#define SR_MD		0x40000000
+-
+-/*
+- * Event code.
+- */
+-#define	EVENT_INTERRUPT		0
+-#define	EVENT_FAULT_TLB		1
+-#define	EVENT_FAULT_NOT_TLB	2
+-#define	EVENT_DEBUG		3
+-
+-/* EXPEVT values */
+-#define	RESET_CAUSE		0x20
+-#define DEBUGSS_CAUSE		0x980
+-
+-/*
+- * Frame layout. Quad index.
+- */
+-#define	FRAME_T(x)	FRAME_TBASE+(x*8)
+-#define	FRAME_R(x)	FRAME_RBASE+(x*8)
+-#define	FRAME_S(x)	FRAME_SBASE+(x*8)
+-#define FSPC		0
+-#define FSSR		1
+-#define FSYSCALL_ID	2
+-
+-/* Arrange the save frame to be a multiple of 32 bytes long */
+-#define FRAME_SBASE	0
+-#define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */
+-#define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */
+-#define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* tr0 -tr7 */
+-#define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */
+-
+-#define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */
+-#define FP_FRAME_BASE	0
+-
+-#define	SAVED_R2	0*8
+-#define	SAVED_R3	1*8
+-#define	SAVED_R4	2*8
+-#define	SAVED_R5	3*8
+-#define	SAVED_R18	4*8
+-#define	SAVED_R6	5*8
+-#define	SAVED_TR0	6*8
+-
+-/* These are the registers saved in the TLB path that aren't saved in the first
+-   level of the normal one. */
+-#define	TLB_SAVED_R25	7*8
+-#define	TLB_SAVED_TR1	8*8
+-#define	TLB_SAVED_TR2	9*8
+-#define	TLB_SAVED_TR3	10*8
+-#define	TLB_SAVED_TR4	11*8
+-/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
+-   breakage otherwise. */
+-#define	TLB_SAVED_R0	12*8
+-#define	TLB_SAVED_R1	13*8
+-
+-#define CLI()				\
+-	getcon	SR, r6;			\
+-	ori	r6, 0xf0, r6;		\
+-	putcon	r6, SR;
+-
+-#define STI()				\
+-	getcon	SR, r6;			\
+-	andi	r6, ~0xf0, r6;		\
+-	putcon	r6, SR;
+-
+-#ifdef CONFIG_PREEMPT
+-#  define preempt_stop()	CLI()
+-#else
+-#  define preempt_stop()
+-#  define resume_kernel		restore_all
+-#endif
+-
+-	.section	.data, "aw"
+-
+-#define FAST_TLBMISS_STACK_CACHELINES 4
+-#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
+-
+-/* Register back-up area for all exceptions */
+-	.balign	32
+-	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
+-	 * register saves etc. */
+-	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
+-/* This is 32 byte aligned by construction */
+-/* Register back-up area for all exceptions */
+-reg_save_area:
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-
+-	.quad	0
+-	.quad   0
+-
+-/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
+- * reentrancy. Note this area may be accessed via physical address.
+- * Align so this fits a whole single cache line, for ease of purging.
+- */
+-	.balign 32,0,32
+-resvec_save_area:
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-	.quad	0
+-	.balign 32,0,32
+-
+-/* Jump table of 3rd level handlers  */
+-trap_jtable:
+-	.long	do_exception_error		/* 0x000 */
+-	.long	do_exception_error		/* 0x020 */
+-	.long	tlb_miss_load				/* 0x040 */
+-	.long	tlb_miss_store				/* 0x060 */
+-	! ARTIFICIAL pseudo-EXPEVT setting
+-	.long	do_debug_interrupt		/* 0x080 */
+-	.long	tlb_miss_load				/* 0x0A0 */
+-	.long	tlb_miss_store				/* 0x0C0 */
+-	.long	do_address_error_load	/* 0x0E0 */
+-	.long	do_address_error_store	/* 0x100 */
+-#ifdef CONFIG_SH_FPU
+-	.long	do_fpu_error		/* 0x120 */
+-#else
+-	.long	do_exception_error		/* 0x120 */
+-#endif
+-	.long	do_exception_error		/* 0x140 */
+-	.long	system_call				/* 0x160 */
+-	.long	do_reserved_inst		/* 0x180 */
+-	.long	do_illegal_slot_inst	/* 0x1A0 */
+-	.long	do_NMI			/* 0x1C0 */
+-	.long	do_exception_error		/* 0x1E0 */
+-	.rept 15
+-		.long do_IRQ		/* 0x200 - 0x3C0 */
+-	.endr
+-	.long	do_exception_error		/* 0x3E0 */
+-	.rept 32
+-		.long do_IRQ		/* 0x400 - 0x7E0 */
+-	.endr
+-	.long	fpu_error_or_IRQA			/* 0x800 */
+-	.long	fpu_error_or_IRQB			/* 0x820 */
+-	.long	do_IRQ			/* 0x840 */
+-	.long	do_IRQ			/* 0x860 */
+-	.rept 6
+-		.long do_exception_error	/* 0x880 - 0x920 */
+-	.endr
+-	.long	do_software_break_point	/* 0x940 */
+-	.long	do_exception_error		/* 0x960 */
+-	.long	do_single_step		/* 0x980 */
+-
+-	.rept 3
+-		.long do_exception_error	/* 0x9A0 - 0x9E0 */
+-	.endr
+-	.long	do_IRQ			/* 0xA00 */
+-	.long	do_IRQ			/* 0xA20 */
+-	.long	itlb_miss_or_IRQ			/* 0xA40 */
+-	.long	do_IRQ			/* 0xA60 */
+-	.long	do_IRQ			/* 0xA80 */
+-	.long	itlb_miss_or_IRQ			/* 0xAA0 */
+-	.long	do_exception_error		/* 0xAC0 */
+-	.long	do_address_error_exec	/* 0xAE0 */
+-	.rept 8
+-		.long do_exception_error	/* 0xB00 - 0xBE0 */
+-	.endr
+-	.rept 18
+-		.long do_IRQ		/* 0xC00 - 0xE20 */
+-	.endr
+-
+-	.section	.text64, "ax"
+-
+-/*
+- * --- Exception/Interrupt/Event Handling Section
+- */
+-
+-/*
+- * VBR and RESVEC blocks.
+- *
+- * First level handler for VBR-based exceptions.
+- *
+- * To avoid waste of space, align to the maximum text block size.
+- * This is assumed to be at most 128 bytes or 32 instructions.
+- * DO NOT EXCEED 32 instructions on the first level handlers !
+- *
+- * Also note that RESVEC is contained within the VBR block
+- * where the room left (1KB - TEXT_SIZE) allows placing
+- * the RESVEC block (at most 512B + TEXT_SIZE).
+- *
+- * So first (and only) level handler for RESVEC-based exceptions.
+- *
+- * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
+- * and interrupt) we are a lot tight with register space until
+- * saving onto the stack frame, which is done in handle_exception().
+- *
+- */
+-
+-#define	TEXT_SIZE 	128
+-#define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */
+-
+-	.balign TEXT_SIZE
+-LVBR_block:
+-	.space	256, 0			/* Power-on class handler, */
+-					/* not required here       */
+-not_a_tlb_miss:
+-	synco	/* TAKum03020 (but probably a good idea anyway.) */
+-	/* Save original stack pointer into KCR1 */
+-	putcon	SP, KCR1
+-
+-	/* Save other original registers into reg_save_area */
+-        movi  reg_save_area, SP
+-	st.q	SP, SAVED_R2, r2
+-	st.q	SP, SAVED_R3, r3
+-	st.q	SP, SAVED_R4, r4
+-	st.q	SP, SAVED_R5, r5
+-	st.q	SP, SAVED_R6, r6
+-	st.q	SP, SAVED_R18, r18
+-	gettr	tr0, r3
+-	st.q	SP, SAVED_TR0, r3
+-
+-	/* Set args for Non-debug, Not a TLB miss class handler */
+-	getcon	EXPEVT, r2
+-	movi	ret_from_exception, r3
+-	ori	r3, 1, r3
+-	movi	EVENT_FAULT_NOT_TLB, r4
+-	or	SP, ZERO, r5
+-	getcon	KCR1, SP
+-	pta	handle_exception, tr0
+-	blink	tr0, ZERO
+-
+-	.balign 256
+-	! VBR+0x200
+-	nop
+-	.balign 256
+-	! VBR+0x300
+-	nop
+-	.balign 256
+-	/*
+-	 * Instead of the natural .balign 1024 place RESVEC here
+-	 * respecting the final 1KB alignment.
+-	 */
+-	.balign TEXT_SIZE
+-	/*
+-	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
+-	 * block making sure the final alignment is correct.
+-	 */
+-tlb_miss:
+-	synco	/* TAKum03020 (but probably a good idea anyway.) */
+-	putcon	SP, KCR1
+-	movi	reg_save_area, SP
+-	/* SP is guaranteed 32-byte aligned. */
+-	st.q	SP, TLB_SAVED_R0 , r0
+-	st.q	SP, TLB_SAVED_R1 , r1
+-	st.q	SP, SAVED_R2 , r2
+-	st.q	SP, SAVED_R3 , r3
+-	st.q	SP, SAVED_R4 , r4
+-	st.q	SP, SAVED_R5 , r5
+-	st.q	SP, SAVED_R6 , r6
+-	st.q	SP, SAVED_R18, r18
+-
+-	/* Save R25 for safety; as/ld may want to use it to achieve the call to
+-	 * the code in mm/tlbmiss.c */
+-	st.q	SP, TLB_SAVED_R25, r25
+-	gettr	tr0, r2
+-	gettr	tr1, r3
+-	gettr	tr2, r4
+-	gettr	tr3, r5
+-	gettr	tr4, r18
+-	st.q	SP, SAVED_TR0 , r2
+-	st.q	SP, TLB_SAVED_TR1 , r3
+-	st.q	SP, TLB_SAVED_TR2 , r4
+-	st.q	SP, TLB_SAVED_TR3 , r5
+-	st.q	SP, TLB_SAVED_TR4 , r18
+-
+-	pt	do_fast_page_fault, tr0
+-	getcon	SSR, r2
+-	getcon	EXPEVT, r3
+-	getcon	TEA, r4
+-	shlri	r2, 30, r2
+-	andi	r2, 1, r2	/* r2 = SSR.MD */
+-	blink 	tr0, LINK
+-
+-	pt	fixup_to_invoke_general_handler, tr1
+-
+-	/* If the fast path handler fixed the fault, just drop through quickly
+-	   to the restore code right away to return to the excepting context.
+-	   */
+-	beqi/u	r2, 0, tr1
+-
+-fast_tlb_miss_restore:
+-	ld.q	SP, SAVED_TR0, r2
+-	ld.q	SP, TLB_SAVED_TR1, r3
+-	ld.q	SP, TLB_SAVED_TR2, r4
+-
+-	ld.q	SP, TLB_SAVED_TR3, r5
+-	ld.q	SP, TLB_SAVED_TR4, r18
+-
+-	ptabs	r2, tr0
+-	ptabs	r3, tr1
+-	ptabs	r4, tr2
+-	ptabs	r5, tr3
+-	ptabs	r18, tr4
+-
+-	ld.q	SP, TLB_SAVED_R0, r0
+-	ld.q	SP, TLB_SAVED_R1, r1
+-	ld.q	SP, SAVED_R2, r2
+-	ld.q	SP, SAVED_R3, r3
+-	ld.q	SP, SAVED_R4, r4
+-	ld.q	SP, SAVED_R5, r5
+-	ld.q	SP, SAVED_R6, r6
+-	ld.q	SP, SAVED_R18, r18
+-	ld.q	SP, TLB_SAVED_R25, r25
+-
+-	getcon	KCR1, SP
+-	rte
+-	nop /* for safety, in case the code is run on sh5-101 cut1.x */
+-
+-fixup_to_invoke_general_handler:
+-
+-	/* OK, new method.  Restore stuff that's not expected to get saved into
+-	   the 'first-level' reg save area, then just fall through to setting
+-	   up the registers and calling the second-level handler. */
+-
+-	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
+-	   r25,tr1-4 and save r6 to get into the right state.  */
+-
+-	ld.q	SP, TLB_SAVED_TR1, r3
+-	ld.q	SP, TLB_SAVED_TR2, r4
+-	ld.q	SP, TLB_SAVED_TR3, r5
+-	ld.q	SP, TLB_SAVED_TR4, r18
+-	ld.q	SP, TLB_SAVED_R25, r25
+-
+-	ld.q	SP, TLB_SAVED_R0, r0
+-	ld.q	SP, TLB_SAVED_R1, r1
+-
+-	ptabs/u	r3, tr1
+-	ptabs/u	r4, tr2
+-	ptabs/u	r5, tr3
+-	ptabs/u	r18, tr4
+-
+-	/* Set args for Non-debug, TLB miss class handler */
+-	getcon	EXPEVT, r2
+-	movi	ret_from_exception, r3
+-	ori	r3, 1, r3
+-	movi	EVENT_FAULT_TLB, r4
+-	or	SP, ZERO, r5
+-	getcon	KCR1, SP
+-	pta	handle_exception, tr0
+-	blink	tr0, ZERO
+-
+-/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
+-   DOES END UP AT VBR+0x600 */
+-	nop
+-	nop
+-	nop
+-	nop
+-	nop
+-	nop
+-
+-	.balign 256
+-	/* VBR + 0x600 */
+-
+-interrupt:
+-	synco	/* TAKum03020 (but probably a good idea anyway.) */
+-	/* Save original stack pointer into KCR1 */
+-	putcon	SP, KCR1
+-
+-	/* Save other original registers into reg_save_area */
+-        movi  reg_save_area, SP
+-	st.q	SP, SAVED_R2, r2
+-	st.q	SP, SAVED_R3, r3
+-	st.q	SP, SAVED_R4, r4
+-	st.q	SP, SAVED_R5, r5
+-	st.q	SP, SAVED_R6, r6
+-	st.q	SP, SAVED_R18, r18
+-	gettr	tr0, r3
+-	st.q	SP, SAVED_TR0, r3
+-
+-	/* Set args for interrupt class handler */
+-	getcon	INTEVT, r2
+-	movi	ret_from_irq, r3
+-	ori	r3, 1, r3
+-	movi	EVENT_INTERRUPT, r4
+-	or	SP, ZERO, r5
+-	getcon	KCR1, SP
+-	pta	handle_exception, tr0
+-	blink	tr0, ZERO
+-	.balign	TEXT_SIZE		/* let's waste the bare minimum */
+-
+-LVBR_block_end:				/* Marker. Used for total checking */
+-
+-	.balign 256
+-LRESVEC_block:
+-	/* Panic handler. Called with MMU off. Possible causes/actions:
+-	 * - Reset:		Jump to program start.
+-	 * - Single Step:	Turn off Single Step & return.
+-	 * - Others:		Call panic handler, passing PC as arg.
+-	 *			(this may need to be extended...)
+-	 */
+-reset_or_panic:
+-	synco	/* TAKum03020 (but probably a good idea anyway.) */
+-	putcon	SP, DCR
+-	/* First save r0-1 and tr0, as we need to use these */
+-	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
+-	st.q	SP, 0, r0
+-	st.q	SP, 8, r1
+-	gettr	tr0, r0
+-	st.q	SP, 32, r0
+-
+-	/* Check cause */
+-	getcon	EXPEVT, r0
+-	movi	RESET_CAUSE, r1
+-	sub	r1, r0, r1		/* r1=0 if reset */
+-	movi	_stext-CONFIG_CACHED_MEMORY_OFFSET, r0
+-	ori	r0, 1, r0
+-	ptabs	r0, tr0
+-	beqi	r1, 0, tr0		/* Jump to start address if reset */
+-
+-	getcon	EXPEVT, r0
+-	movi	DEBUGSS_CAUSE, r1
+-	sub	r1, r0, r1		/* r1=0 if single step */
+-	pta	single_step_panic, tr0
+-	beqi	r1, 0, tr0		/* jump if single step */
+-
+-	/* Now jump to where we save the registers. */
+-	movi	panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1
+-	ptabs	r1, tr0
+-	blink	tr0, r63
+-
+-single_step_panic:
+-	/* We are in a handler with Single Step set. We need to resume the
+-	 * handler, by turning on MMU & turning off Single Step. */
+-	getcon	SSR, r0
+-	movi	SR_MMU, r1
+-	or	r0, r1, r0
+-	movi	~SR_SS, r1
+-	and	r0, r1, r0
+-	putcon	r0, SSR
+-	/* Restore EXPEVT, as the rte won't do this */
+-	getcon	PEXPEVT, r0
+-	putcon	r0, EXPEVT
+-	/* Restore regs */
+-	ld.q	SP, 32, r0
+-	ptabs	r0, tr0
+-	ld.q	SP, 0, r0
+-	ld.q	SP, 8, r1
+-	getcon	DCR, SP
+-	synco
+-	rte
+-
+-
+-	.balign	256
+-debug_exception:
+-	synco	/* TAKum03020 (but probably a good idea anyway.) */
+-	/*
+-	 * Single step/software_break_point first level handler.
+-	 * Called with MMU off, so the first thing we do is enable it
+-	 * by doing an rte with appropriate SSR.
+-	 */
+-	putcon	SP, DCR
+-	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
+-	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
+-
+-	/* With the MMU off, we are bypassing the cache, so purge any
+-         * data that will be made stale by the following stores.
+-         */
+-	ocbp	SP, 0
+-	synco
+-
+-	st.q	SP, 0, r0
+-	st.q	SP, 8, r1
+-	getcon	SPC, r0
+-	st.q	SP, 16, r0
+-	getcon	SSR, r0
+-	st.q	SP, 24, r0
+-
+-	/* Enable MMU, block exceptions, set priv mode, disable single step */
+-	movi	SR_MMU | SR_BL | SR_MD, r1
+-	or	r0, r1, r0
+-	movi	~SR_SS, r1
+-	and	r0, r1, r0
+-	putcon	r0, SSR
+-	/* Force control to debug_exception_2 when rte is executed */
+-	movi	debug_exeception_2, r0
+-	ori	r0, 1, r0      /* force SHmedia, just in case */
+-	putcon	r0, SPC
+-	getcon	DCR, SP
+-	synco
+-	rte
+-debug_exeception_2:
+-	/* Restore saved regs */
+-	putcon	SP, KCR1
+-	movi	resvec_save_area, SP
+-	ld.q	SP, 24, r0
+-	putcon	r0, SSR
+-	ld.q	SP, 16, r0
+-	putcon	r0, SPC
+-	ld.q	SP, 0, r0
+-	ld.q	SP, 8, r1
+-
+-	/* Save other original registers into reg_save_area */
+-        movi  reg_save_area, SP
+-	st.q	SP, SAVED_R2, r2
+-	st.q	SP, SAVED_R3, r3
+-	st.q	SP, SAVED_R4, r4
+-	st.q	SP, SAVED_R5, r5
+-	st.q	SP, SAVED_R6, r6
+-	st.q	SP, SAVED_R18, r18
+-	gettr	tr0, r3
+-	st.q	SP, SAVED_TR0, r3
+-
+-	/* Set args for debug class handler */
+-	getcon	EXPEVT, r2
+-	movi	ret_from_exception, r3
+-	ori	r3, 1, r3
+-	movi	EVENT_DEBUG, r4
+-	or	SP, ZERO, r5
+-	getcon	KCR1, SP
+-	pta	handle_exception, tr0
+-	blink	tr0, ZERO
+-
+-	.balign	256
+-debug_interrupt:
+-	/* !!! WE COME HERE IN REAL MODE !!! */
+-	/* Hook-up debug interrupt to allow various debugging options to be
+-	 * hooked into its handler. */
+-	/* Save original stack pointer into KCR1 */
+-	synco
+-	putcon	SP, KCR1
+-	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
+-	ocbp	SP, 0
+-	ocbp	SP, 32
+-	synco
+-
+-	/* Save other original registers into reg_save_area thru real addresses */
+-	st.q	SP, SAVED_R2, r2
+-	st.q	SP, SAVED_R3, r3
+-	st.q	SP, SAVED_R4, r4
+-	st.q	SP, SAVED_R5, r5
+-	st.q	SP, SAVED_R6, r6
+-	st.q	SP, SAVED_R18, r18
+-	gettr	tr0, r3
+-	st.q	SP, SAVED_TR0, r3
+-
+-	/* move (spc,ssr)->(pspc,pssr).  The rte will shift
+-	   them back again, so that they look like the originals
+-	   as far as the real handler code is concerned. */
+-	getcon	spc, r6
+-	putcon	r6, pspc
+-	getcon	ssr, r6
+-	putcon	r6, pssr
+-
+-	! construct useful SR for handle_exception
+-	movi	3, r6
+-	shlli	r6, 30, r6
+-	getcon	sr, r18
+-	or	r18, r6, r6
+-	putcon	r6, ssr
+-
+-	! SSR is now the current SR with the MD and MMU bits set
+-	! i.e. the rte will switch back to priv mode and put
+-	! the mmu back on
+-
+-	! construct spc
+-	movi	handle_exception, r18
+-	ori	r18, 1, r18		! for safety (do we need this?)
+-	putcon	r18, spc
+-
+-	/* Set args for Non-debug, Not a TLB miss class handler */
+-
+-	! EXPEVT==0x80 is unused, so 'steal' this value to put the
+-	! debug interrupt handler in the vectoring table
+-	movi	0x80, r2
+-	movi	ret_from_exception, r3
+-	ori	r3, 1, r3
+-	movi	EVENT_FAULT_NOT_TLB, r4
+-
+-	or	SP, ZERO, r5
+-	movi	CONFIG_CACHED_MEMORY_OFFSET, r6
+-	add	r6, r5, r5
+-	getcon	KCR1, SP
+-
+-	synco	! for safety
+-	rte	! -> handle_exception, switch back to priv mode again
+-
+-LRESVEC_block_end:			/* Marker. Unused. */
+-
+-	.balign	TEXT_SIZE
+-
+-/*
+- * Second level handler for VBR-based exceptions. Pre-handler.
+- * In common to all stack-frame sensitive handlers.
+- *
+- * Inputs:
+- * (KCR0) Current [current task union]
+- * (KCR1) Original SP
+- * (r2)   INTEVT/EXPEVT
+- * (r3)   appropriate return address
+- * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
+- * (r5)   Pointer to reg_save_area
+- * (SP)   Original SP
+- *
+- * Available registers:
+- * (r6)
+- * (r18)
+- * (tr0)
+- *
+- */
+-handle_exception:
+-	/* Common 2nd level handler. */
+-
+-	/* First thing we need an appropriate stack pointer */
+-	getcon	SSR, r6
+-	shlri	r6, 30, r6
+-	andi	r6, 1, r6
+-	pta	stack_ok, tr0
+-	bne	r6, ZERO, tr0		/* Original stack pointer is fine */
+-
+-	/* Set stack pointer for user fault */
+-	getcon	KCR0, SP
+-	movi	THREAD_SIZE, r6		/* Point to the end */
+-	add	SP, r6, SP
+-
+-stack_ok:
+-
+-/* DEBUG : check for underflow/overflow of the kernel stack */
+-	pta	no_underflow, tr0
+-	getcon  KCR0, r6
+-	movi	1024, r18
+-	add	r6, r18, r6
+-	bge	SP, r6, tr0 	! ? below 1k from bottom of stack : danger zone
+-
+-/* Just panic to cause a crash. */
+-bad_sp:
+-	ld.b	r63, 0, r6
+-	nop
+-
+-no_underflow:
+-	pta	bad_sp, tr0
+-	getcon	kcr0, r6
+-	movi	THREAD_SIZE, r18
+-	add	r18, r6, r6
+-	bgt	SP, r6, tr0	! sp above the stack
+-
+-	/* Make some room for the BASIC frame. */
+-	movi	-(FRAME_SIZE), r6
+-	add	SP, r6, SP
+-
+-/* Could do this with no stalling if we had another spare register, but the
+-   code below will be OK. */
+-	ld.q	r5, SAVED_R2, r6
+-	ld.q	r5, SAVED_R3, r18
+-	st.q	SP, FRAME_R(2), r6
+-	ld.q	r5, SAVED_R4, r6
+-	st.q	SP, FRAME_R(3), r18
+-	ld.q	r5, SAVED_R5, r18
+-	st.q	SP, FRAME_R(4), r6
+-	ld.q	r5, SAVED_R6, r6
+-	st.q	SP, FRAME_R(5), r18
+-	ld.q	r5, SAVED_R18, r18
+-	st.q	SP, FRAME_R(6), r6
+-	ld.q	r5, SAVED_TR0, r6
+-	st.q	SP, FRAME_R(18), r18
+-	st.q	SP, FRAME_T(0), r6
+-
+-	/* Keep old SP around */
+-	getcon	KCR1, r6
+-
+-	/* Save the rest of the general purpose registers */
+-	st.q	SP, FRAME_R(0), r0
+-	st.q	SP, FRAME_R(1), r1
+-	st.q	SP, FRAME_R(7), r7
+-	st.q	SP, FRAME_R(8), r8
+-	st.q	SP, FRAME_R(9), r9
+-	st.q	SP, FRAME_R(10), r10
+-	st.q	SP, FRAME_R(11), r11
+-	st.q	SP, FRAME_R(12), r12
+-	st.q	SP, FRAME_R(13), r13
+-	st.q	SP, FRAME_R(14), r14
+-
+-	/* SP is somewhere else */
+-	st.q	SP, FRAME_R(15), r6
+-
+-	st.q	SP, FRAME_R(16), r16
+-	st.q	SP, FRAME_R(17), r17
+-	/* r18 is saved earlier. */
+-	st.q	SP, FRAME_R(19), r19
+-	st.q	SP, FRAME_R(20), r20
+-	st.q	SP, FRAME_R(21), r21
+-	st.q	SP, FRAME_R(22), r22
+-	st.q	SP, FRAME_R(23), r23
+-	st.q	SP, FRAME_R(24), r24
+-	st.q	SP, FRAME_R(25), r25
+-	st.q	SP, FRAME_R(26), r26
+-	st.q	SP, FRAME_R(27), r27
+-	st.q	SP, FRAME_R(28), r28
+-	st.q	SP, FRAME_R(29), r29
+-	st.q	SP, FRAME_R(30), r30
+-	st.q	SP, FRAME_R(31), r31
+-	st.q	SP, FRAME_R(32), r32
+-	st.q	SP, FRAME_R(33), r33
+-	st.q	SP, FRAME_R(34), r34
+-	st.q	SP, FRAME_R(35), r35
+-	st.q	SP, FRAME_R(36), r36
+-	st.q	SP, FRAME_R(37), r37
+-	st.q	SP, FRAME_R(38), r38
+-	st.q	SP, FRAME_R(39), r39
+-	st.q	SP, FRAME_R(40), r40
+-	st.q	SP, FRAME_R(41), r41
+-	st.q	SP, FRAME_R(42), r42
+-	st.q	SP, FRAME_R(43), r43
+-	st.q	SP, FRAME_R(44), r44
+-	st.q	SP, FRAME_R(45), r45
+-	st.q	SP, FRAME_R(46), r46
+-	st.q	SP, FRAME_R(47), r47
+-	st.q	SP, FRAME_R(48), r48
+-	st.q	SP, FRAME_R(49), r49
+-	st.q	SP, FRAME_R(50), r50
+-	st.q	SP, FRAME_R(51), r51
+-	st.q	SP, FRAME_R(52), r52
+-	st.q	SP, FRAME_R(53), r53
+-	st.q	SP, FRAME_R(54), r54
+-	st.q	SP, FRAME_R(55), r55
+-	st.q	SP, FRAME_R(56), r56
+-	st.q	SP, FRAME_R(57), r57
+-	st.q	SP, FRAME_R(58), r58
+-	st.q	SP, FRAME_R(59), r59
+-	st.q	SP, FRAME_R(60), r60
+-	st.q	SP, FRAME_R(61), r61
+-	st.q	SP, FRAME_R(62), r62
+-
+-	/*
+-	 * Save the S* registers.
+-	 */
+-	getcon	SSR, r61
+-	st.q	SP, FRAME_S(FSSR), r61
+-	getcon	SPC, r62
+-	st.q	SP, FRAME_S(FSPC), r62
+-	movi	-1, r62			/* Reset syscall_nr */
+-	st.q	SP, FRAME_S(FSYSCALL_ID), r62
+-
+-	/* Save the rest of the target registers */
+-	gettr	tr1, r6
+-	st.q	SP, FRAME_T(1), r6
+-	gettr	tr2, r6
+-	st.q	SP, FRAME_T(2), r6
+-	gettr	tr3, r6
+-	st.q	SP, FRAME_T(3), r6
+-	gettr	tr4, r6
+-	st.q	SP, FRAME_T(4), r6
+-	gettr	tr5, r6
+-	st.q	SP, FRAME_T(5), r6
+-	gettr	tr6, r6
+-	st.q	SP, FRAME_T(6), r6
+-	gettr	tr7, r6
+-	st.q	SP, FRAME_T(7), r6
+-
+-	! setup FP so that unwinder can wind back through nested kernel mode
+-	! exceptions
+-	add	SP, ZERO, r14
+-
+-#ifdef CONFIG_POOR_MANS_STRACE
+-	/* We've pushed all the registers now, so only r2-r4 hold anything
+-	 * useful. Move them into callee save registers */
+-	or	r2, ZERO, r28
+-	or	r3, ZERO, r29
+-	or	r4, ZERO, r30
+-
+-	/* Preserve r2 as the event code */
+-	movi	evt_debug, r3
+-	ori	r3, 1, r3
+-	ptabs	r3, tr0
+-
+-	or	SP, ZERO, r6
+-	getcon	TRA, r5
+-	blink	tr0, LINK
+-
+-	or	r28, ZERO, r2
+-	or	r29, ZERO, r3
+-	or	r30, ZERO, r4
+-#endif
+-
+-	/* For syscall and debug race condition, get TRA now */
+-	getcon	TRA, r5
+-
+-	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
+-	 * Also set FD, to catch FPU usage in the kernel.
+-	 *
+-	 * benedict.gaster at superh.com 29/07/2002
+-	 *
+-	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
+-	 * same time change BL from 1->0, as any pending interrupt of a level
+-	 * higher than he previous value of IMASK will leak through and be
+-	 * taken unexpectedly.
+-	 *
+-	 * To avoid this we raise the IMASK and then issue another PUTCON to
+-	 * enable interrupts.
+-         */
+-	getcon	SR, r6
+-	movi	SR_IMASK | SR_FD, r7
+-	or	r6, r7, r6
+-	putcon	r6, SR
+-	movi	SR_UNBLOCK_EXC, r7
+-	and	r6, r7, r6
+-	putcon	r6, SR
+-
+-
+-	/* Now call the appropriate 3rd level handler */
+-	or	r3, ZERO, LINK
+-	movi	trap_jtable, r3
+-	shlri	r2, 3, r2
+-	ldx.l	r2, r3, r3
+-	shlri	r2, 2, r2
+-	ptabs	r3, tr0
+-	or	SP, ZERO, r3
+-	blink	tr0, ZERO
+-
+-/*
+- * Second level handler for VBR-based exceptions. Post-handlers.
+- *
+- * Post-handlers for interrupts (ret_from_irq), exceptions
+- * (ret_from_exception) and common reentrance doors (restore_all
+- * to get back to the original context, ret_from_syscall loop to
+- * check kernel exiting).
+- *
+- * ret_with_reschedule and work_notifysig are an inner lables of
+- * the ret_from_syscall loop.
+- *
+- * In common to all stack-frame sensitive handlers.
+- *
+- * Inputs:
+- * (SP)   struct pt_regs *, original register's frame pointer (basic)
+- *
+- */
+-	.global ret_from_irq
+-ret_from_irq:
+-#ifdef CONFIG_POOR_MANS_STRACE
+-	pta	evt_debug_ret_from_irq, tr0
+-	ori	SP, 0, r2
+-	blink	tr0, LINK
+-#endif
+-	ld.q	SP, FRAME_S(FSSR), r6
+-	shlri	r6, 30, r6
+-	andi	r6, 1, r6
+-	pta	resume_kernel, tr0
+-	bne	r6, ZERO, tr0		/* no further checks */
+-	STI()
+-	pta	ret_with_reschedule, tr0
+-	blink	tr0, ZERO		/* Do not check softirqs */
+-
+-	.global ret_from_exception
+-ret_from_exception:
+-	preempt_stop()
+-
+-#ifdef CONFIG_POOR_MANS_STRACE
+-	pta	evt_debug_ret_from_exc, tr0
+-	ori	SP, 0, r2
+-	blink	tr0, LINK
+-#endif
+-
+-	ld.q	SP, FRAME_S(FSSR), r6
+-	shlri	r6, 30, r6
+-	andi	r6, 1, r6
+-	pta	resume_kernel, tr0
+-	bne	r6, ZERO, tr0		/* no further checks */
+-
+-	/* Check softirqs */
+-
+-#ifdef CONFIG_PREEMPT
+-	pta   ret_from_syscall, tr0
+-	blink   tr0, ZERO
+-
+-resume_kernel:
+-	pta	restore_all, tr0
+-
+-	getcon	KCR0, r6
+-	ld.l	r6, TI_PRE_COUNT, r7
+-	beq/u	r7, ZERO, tr0
+-
+-need_resched:
+-	ld.l	r6, TI_FLAGS, r7
+-	movi	(1 << TIF_NEED_RESCHED), r8
+-	and	r8, r7, r8
+-	bne	r8, ZERO, tr0
+-
+-	getcon	SR, r7
+-	andi	r7, 0xf0, r7
+-	bne	r7, ZERO, tr0
+-
+-	movi	((PREEMPT_ACTIVE >> 16) & 65535), r8
+-	shori	(PREEMPT_ACTIVE & 65535), r8
+-	st.l	r6, TI_PRE_COUNT, r8
+-
+-	STI()
+-	movi	schedule, r7
+-	ori	r7, 1, r7
+-	ptabs	r7, tr1
+-	blink	tr1, LINK
+-
+-	st.l	r6, TI_PRE_COUNT, ZERO
+-	CLI()
+-
+-	pta	need_resched, tr1
+-	blink	tr1, ZERO
+-#endif
+-
+-	.global ret_from_syscall
+-ret_from_syscall:
+-
+-ret_with_reschedule:
+-	getcon	KCR0, r6		! r6 contains current_thread_info
+-	ld.l	r6, TI_FLAGS, r7	! r7 contains current_thread_info->flags
+-
+-	! FIXME:!!!
+-	! no handling of TIF_SYSCALL_TRACE yet!!
+-
+-	movi	_TIF_NEED_RESCHED, r8
+-	and	r8, r7, r8
+-	pta	work_resched, tr0
+-	bne	r8, ZERO, tr0
+-
+-	pta	restore_all, tr1
+-
+-	movi	(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
+-	and	r8, r7, r8
+-	pta	work_notifysig, tr0
+-	bne	r8, ZERO, tr0
+-
+-	blink	tr1, ZERO
+-
+-work_resched:
+-	pta	ret_from_syscall, tr0
+-	gettr	tr0, LINK
+-	movi	schedule, r6
+-	ptabs	r6, tr0
+-	blink	tr0, ZERO		/* Call schedule(), return on top */
+-
+-work_notifysig:
+-	gettr	tr1, LINK
+-
+-	movi	do_signal, r6
+-	ptabs	r6, tr0
+-	or	SP, ZERO, r2
+-	or	ZERO, ZERO, r3
+-	blink	tr0, LINK	    /* Call do_signal(regs, 0), return here */
+-
+-restore_all:
+-	/* Do prefetches */
+-
+-	ld.q	SP, FRAME_T(0), r6
+-	ld.q	SP, FRAME_T(1), r7
+-	ld.q	SP, FRAME_T(2), r8
+-	ld.q	SP, FRAME_T(3), r9
+-	ptabs	r6, tr0
+-	ptabs	r7, tr1
+-	ptabs	r8, tr2
+-	ptabs	r9, tr3
+-	ld.q	SP, FRAME_T(4), r6
+-	ld.q	SP, FRAME_T(5), r7
+-	ld.q	SP, FRAME_T(6), r8
+-	ld.q	SP, FRAME_T(7), r9
+-	ptabs	r6, tr4
+-	ptabs	r7, tr5
+-	ptabs	r8, tr6
+-	ptabs	r9, tr7
+-
+-	ld.q	SP, FRAME_R(0), r0
+-	ld.q	SP, FRAME_R(1), r1
+-	ld.q	SP, FRAME_R(2), r2
+-	ld.q	SP, FRAME_R(3), r3
+-	ld.q	SP, FRAME_R(4), r4
+-	ld.q	SP, FRAME_R(5), r5
+-	ld.q	SP, FRAME_R(6), r6
+-	ld.q	SP, FRAME_R(7), r7
+-	ld.q	SP, FRAME_R(8), r8
+-	ld.q	SP, FRAME_R(9), r9
+-	ld.q	SP, FRAME_R(10), r10
+-	ld.q	SP, FRAME_R(11), r11
+-	ld.q	SP, FRAME_R(12), r12
+-	ld.q	SP, FRAME_R(13), r13
+-	ld.q	SP, FRAME_R(14), r14
+-
+-	ld.q	SP, FRAME_R(16), r16
+-	ld.q	SP, FRAME_R(17), r17
+-	ld.q	SP, FRAME_R(18), r18
+-	ld.q	SP, FRAME_R(19), r19
+-	ld.q	SP, FRAME_R(20), r20
+-	ld.q	SP, FRAME_R(21), r21
+-	ld.q	SP, FRAME_R(22), r22
+-	ld.q	SP, FRAME_R(23), r23
+-	ld.q	SP, FRAME_R(24), r24
+-	ld.q	SP, FRAME_R(25), r25
+-	ld.q	SP, FRAME_R(26), r26
+-	ld.q	SP, FRAME_R(27), r27
+-	ld.q	SP, FRAME_R(28), r28
+-	ld.q	SP, FRAME_R(29), r29
+-	ld.q	SP, FRAME_R(30), r30
+-	ld.q	SP, FRAME_R(31), r31
+-	ld.q	SP, FRAME_R(32), r32
+-	ld.q	SP, FRAME_R(33), r33
+-	ld.q	SP, FRAME_R(34), r34
+-	ld.q	SP, FRAME_R(35), r35
+-	ld.q	SP, FRAME_R(36), r36
+-	ld.q	SP, FRAME_R(37), r37
+-	ld.q	SP, FRAME_R(38), r38
+-	ld.q	SP, FRAME_R(39), r39
+-	ld.q	SP, FRAME_R(40), r40
+-	ld.q	SP, FRAME_R(41), r41
+-	ld.q	SP, FRAME_R(42), r42
+-	ld.q	SP, FRAME_R(43), r43
+-	ld.q	SP, FRAME_R(44), r44
+-	ld.q	SP, FRAME_R(45), r45
+-	ld.q	SP, FRAME_R(46), r46
+-	ld.q	SP, FRAME_R(47), r47
+-	ld.q	SP, FRAME_R(48), r48
+-	ld.q	SP, FRAME_R(49), r49
+-	ld.q	SP, FRAME_R(50), r50
+-	ld.q	SP, FRAME_R(51), r51
+-	ld.q	SP, FRAME_R(52), r52
+-	ld.q	SP, FRAME_R(53), r53
+-	ld.q	SP, FRAME_R(54), r54
+-	ld.q	SP, FRAME_R(55), r55
+-	ld.q	SP, FRAME_R(56), r56
+-	ld.q	SP, FRAME_R(57), r57
+-	ld.q	SP, FRAME_R(58), r58
+-
+-	getcon	SR, r59
+-	movi	SR_BLOCK_EXC, r60
+-	or	r59, r60, r59
+-	putcon	r59, SR			/* SR.BL = 1, keep nesting out */
+-	ld.q	SP, FRAME_S(FSSR), r61
+-	ld.q	SP, FRAME_S(FSPC), r62
+-	movi	SR_ASID_MASK, r60
+-	and	r59, r60, r59
+-	andc	r61, r60, r61		/* Clear out older ASID */
+-	or	r59, r61, r61		/* Retain current ASID */
+-	putcon	r61, SSR
+-	putcon	r62, SPC
+-
+-	/* Ignore FSYSCALL_ID */
+-
+-	ld.q	SP, FRAME_R(59), r59
+-	ld.q	SP, FRAME_R(60), r60
+-	ld.q	SP, FRAME_R(61), r61
+-	ld.q	SP, FRAME_R(62), r62
+-
+-	/* Last touch */
+-	ld.q	SP, FRAME_R(15), SP
+-	rte
+-	nop
+-
+-/*
+- * Third level handlers for VBR-based exceptions. Adapting args to
+- * and/or deflecting to fourth level handlers.
+- *
+- * Fourth level handlers interface.
+- * Most are C-coded handlers directly pointed by the trap_jtable.
+- * (Third = Fourth level)
+- * Inputs:
+- * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
+- *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
+- * (r3)   struct pt_regs *, original register's frame pointer
+- * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
+- * (r5)   TRA control register (for syscall/debug benefit only)
+- * (LINK) return address
+- * (SP)   = r3
+- *
+- * Kernel TLB fault handlers will get a slightly different interface.
+- * (r2)   struct pt_regs *, original register's frame pointer
+- * (r3)   writeaccess, whether it's a store fault as opposed to load fault
+- * (r4)   execaccess, whether it's a ITLB fault as opposed to DTLB fault
+- * (r5)   Effective Address of fault
+- * (LINK) return address
+- * (SP)   = r2
+- *
+- * fpu_error_or_IRQ? is a helper to deflect to the right cause.
+- *
+- */
+-tlb_miss_load:
+-	or	SP, ZERO, r2
+-	or	ZERO, ZERO, r3		/* Read */
+-	or	ZERO, ZERO, r4		/* Data */
+-	getcon	TEA, r5
+-	pta	call_do_page_fault, tr0
+-	beq	ZERO, ZERO, tr0
+-
+-tlb_miss_store:
+-	or	SP, ZERO, r2
+-	movi	1, r3			/* Write */
+-	or	ZERO, ZERO, r4		/* Data */
+-	getcon	TEA, r5
+-	pta	call_do_page_fault, tr0
+-	beq	ZERO, ZERO, tr0
+-
+-itlb_miss_or_IRQ:
+-	pta	its_IRQ, tr0
+-	beqi/u	r4, EVENT_INTERRUPT, tr0
+-	or	SP, ZERO, r2
+-	or	ZERO, ZERO, r3		/* Read */
+-	movi	1, r4			/* Text */
+-	getcon	TEA, r5
+-	/* Fall through */
+-
+-call_do_page_fault:
+-	movi	do_page_fault, r6
+-        ptabs	r6, tr0
+-        blink	tr0, ZERO
+-
+-fpu_error_or_IRQA:
+-	pta	its_IRQ, tr0
+-	beqi/l	r4, EVENT_INTERRUPT, tr0
+-#ifdef CONFIG_SH_FPU
+-	movi	do_fpu_state_restore, r6
+-#else
+-	movi	do_exception_error, r6
+-#endif
+-	ptabs	r6, tr0
+-	blink	tr0, ZERO
+-
+-fpu_error_or_IRQB:
+-	pta	its_IRQ, tr0
+-	beqi/l	r4, EVENT_INTERRUPT, tr0
+-#ifdef CONFIG_SH_FPU
+-	movi	do_fpu_state_restore, r6
+-#else
+-	movi	do_exception_error, r6
+-#endif
+-	ptabs	r6, tr0
+-	blink	tr0, ZERO
+-
+-its_IRQ:
+-	movi	do_IRQ, r6
+-	ptabs	r6, tr0
+-	blink	tr0, ZERO
+-
+-/*
+- * system_call/unknown_trap third level handler:
+- *
+- * Inputs:
+- * (r2)   fault/interrupt code, entry number (TRAP = 11)
+- * (r3)   struct pt_regs *, original register's frame pointer
+- * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
+- * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
+- * (SP)   = r3
+- * (LINK) return address: ret_from_exception
+- * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
+- *
+- * Outputs:
+- * (*r3)  Syscall reply (Saved r2)
+- * (LINK) In case of syscall only it can be scrapped.
+- *        Common second level post handler will be ret_from_syscall.
+- *        Common (non-trace) exit point to that is syscall_ret (saving
+- *        result to r2). Common bad exit point is syscall_bad (returning
+- *        ENOSYS then saved to r2).
+- *
+- */
+-
+-unknown_trap:
+-	/* Unknown Trap or User Trace */
+-	movi	do_unknown_trapa, r6
+-	ptabs	r6, tr0
+-        ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */
+-        andi    r2, 0x1ff, r2		/* r2 = syscall # */
+-	blink	tr0, LINK
+-
+-	pta	syscall_ret, tr0
+-	blink	tr0, ZERO
+-
+-        /* New syscall implementation*/
+-system_call:
+-	pta	unknown_trap, tr0
+-        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
+-        shlri   r4, 20, r4
+-	bnei	r4, 1, tr0		/* unknown_trap if not 0x1yzzzz */
+-
+-        /* It's a system call */
+-	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */
+-	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */
+-
+-	STI()
+-
+-	pta	syscall_allowed, tr0
+-	movi	NR_syscalls - 1, r4	/* Last valid */
+-	bgeu/l	r4, r5, tr0
+-
+-syscall_bad:
+-	/* Return ENOSYS ! */
+-	movi	-(ENOSYS), r2		/* Fall-through */
+-
+-	.global syscall_ret
+-syscall_ret:
+-	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */
+-
+-#ifdef CONFIG_POOR_MANS_STRACE
+-	/* nothing useful in registers at this point */
+-
+-	movi	evt_debug2, r5
+-	ori	r5, 1, r5
+-	ptabs	r5, tr0
+-	ld.q	SP, FRAME_R(9), r2
+-	or	SP, ZERO, r3
+-	blink	tr0, LINK
+-#endif
+-
+-	ld.q	SP, FRAME_S(FSPC), r2
+-	addi	r2, 4, r2		/* Move PC, being pre-execution event */
+-	st.q	SP, FRAME_S(FSPC), r2
+-	pta	ret_from_syscall, tr0
+-	blink	tr0, ZERO
+-
+-
+-/*  A different return path for ret_from_fork, because we now need
+- *  to call schedule_tail with the later kernels. Because prev is
+- *  loaded into r2 by switch_to() means we can just call it straight  away
+- */
+-
+-.global	ret_from_fork
+-ret_from_fork:
+-
+-	movi	schedule_tail,r5
+-	ori	r5, 1, r5
+-	ptabs	r5, tr0
+-	blink	tr0, LINK
+-
+-#ifdef CONFIG_POOR_MANS_STRACE
+-	/* nothing useful in registers at this point */
+-
+-	movi	evt_debug2, r5
+-	ori	r5, 1, r5
+-	ptabs	r5, tr0
+-	ld.q	SP, FRAME_R(9), r2
+-	or	SP, ZERO, r3
+-	blink	tr0, LINK
+-#endif
+-
+-	ld.q	SP, FRAME_S(FSPC), r2
+-	addi	r2, 4, r2		/* Move PC, being pre-execution event */
+-	st.q	SP, FRAME_S(FSPC), r2
+-	pta	ret_from_syscall, tr0
+-	blink	tr0, ZERO
+-
+-
+-
+-syscall_allowed:
+-	/* Use LINK to deflect the exit point, default is syscall_ret */
+-	pta	syscall_ret, tr0
+-	gettr	tr0, LINK
+-	pta	syscall_notrace, tr0
+-
+-	getcon	KCR0, r2
+-	ld.l	r2, TI_FLAGS, r4
+-	movi	(1 << TIF_SYSCALL_TRACE), r6
+-	and	r6, r4, r6
+-	beq/l	r6, ZERO, tr0
+-
+-	/* Trace it by calling syscall_trace before and after */
+-	movi	syscall_trace, r4
+-	ptabs	r4, tr0
+-	blink	tr0, LINK
+-	/* Reload syscall number as r5 is trashed by syscall_trace */
+-	ld.q	SP, FRAME_S(FSYSCALL_ID), r5
+-	andi	r5, 0x1ff, r5
+-
+-	pta	syscall_ret_trace, tr0
+-	gettr	tr0, LINK
+-
+-syscall_notrace:
+-	/* Now point to the appropriate 4th level syscall handler */
+-	movi	sys_call_table, r4
+-	shlli	r5, 2, r5
+-	ldx.l	r4, r5, r5
+-	ptabs	r5, tr0
+-
+-	/* Prepare original args */
+-	ld.q	SP, FRAME_R(2), r2
+-	ld.q	SP, FRAME_R(3), r3
+-	ld.q	SP, FRAME_R(4), r4
+-	ld.q	SP, FRAME_R(5), r5
+-	ld.q	SP, FRAME_R(6), r6
+-	ld.q	SP, FRAME_R(7), r7
+-
+-	/* And now the trick for those syscalls requiring regs * ! */
+-	or	SP, ZERO, r8
+-
+-	/* Call it */
+-	blink	tr0, ZERO	/* LINK is already properly set */
+-
+-syscall_ret_trace:
+-	/* We get back here only if under trace */
+-	st.q	SP, FRAME_R(9), r2	/* Save return value */
+-
+-	movi	syscall_trace, LINK
+-	ptabs	LINK, tr0
+-	blink	tr0, LINK
+-
+-	/* This needs to be done after any syscall tracing */
+-	ld.q	SP, FRAME_S(FSPC), r2
+-	addi	r2, 4, r2	/* Move PC, being pre-execution event */
+-	st.q	SP, FRAME_S(FSPC), r2
+-
+-	pta	ret_from_syscall, tr0
+-	blink	tr0, ZERO		/* Resume normal return sequence */
+-
+-/*
+- * --- Switch to running under a particular ASID and return the previous ASID value
+- * --- The caller is assumed to have done a cli before calling this.
+- *
+- * Input r2 : new ASID
+- * Output r2 : old ASID
+- */
+-
+-	.global switch_and_save_asid
+-switch_and_save_asid:
+-	getcon	sr, r0
+-	movi	255, r4
+-	shlli 	r4, 16, r4	/* r4 = mask to select ASID */
+-	and	r0, r4, r3	/* r3 = shifted old ASID */
+-	andi	r2, 255, r2	/* mask down new ASID */
+-	shlli	r2, 16, r2	/* align new ASID against SR.ASID */
+-	andc	r0, r4, r0	/* efface old ASID from SR */
+-	or	r0, r2, r0	/* insert the new ASID */
+-	putcon	r0, ssr
+-	movi	1f, r0
+-	putcon	r0, spc
+-	rte
+-	nop
+-1:
+-	ptabs	LINK, tr0
+-	shlri	r3, 16, r2	/* r2 = old ASID */
+-	blink tr0, r63
+-
+-	.global	route_to_panic_handler
+-route_to_panic_handler:
+-	/* Switch to real mode, goto panic_handler, don't return.  Useful for
+-	   last-chance debugging, e.g. if no output wants to go to the console.
+-	   */
+-
+-	movi	panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1
+-	ptabs	r1, tr0
+-	pta	1f, tr1
+-	gettr	tr1, r0
+-	putcon	r0, spc
+-	getcon	sr, r0
+-	movi	1, r1
+-	shlli	r1, 31, r1
+-	andc	r0, r1, r0
+-	putcon	r0, ssr
+-	rte
+-	nop
+-1:	/* Now in real mode */
+-	blink tr0, r63
+-	nop
+-
+-	.global peek_real_address_q
+-peek_real_address_q:
+-	/* Two args:
+-	   r2 : real mode address to peek
+-	   r2(out) : result quadword
+-
+-	   This is provided as a cheapskate way of manipulating device
+-	   registers for debugging (to avoid the need to onchip_remap the debug
+-	   module, and to avoid the need to onchip_remap the watchpoint
+-	   controller in a way that identity maps sufficient bits to avoid the
+-	   SH5-101 cut2 silicon defect).
+-
+-	   This code is not performance critical
+-	*/
+-
+-	add.l	r2, r63, r2	/* sign extend address */
+-	getcon	sr, r0		/* r0 = saved original SR */
+-	movi	1, r1
+-	shlli	r1, 28, r1
+-	or	r0, r1, r1	/* r0 with block bit set */
+-	putcon	r1, sr		/* now in critical section */
+-	movi	1, r36
+-	shlli	r36, 31, r36
+-	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
+-
+-	putcon	r1, ssr
+-	movi	.peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
+-	movi	1f, r37		/* virtual mode return addr */
+-	putcon	r36, spc
+-
+-	synco
+-	rte
+-	nop
+-
+-.peek0:	/* come here in real mode, don't touch caches!!
+-           still in critical section (sr.bl==1) */
+-	putcon	r0, ssr
+-	putcon	r37, spc
+-	/* Here's the actual peek.  If the address is bad, all bets are now off
+-	 * what will happen (handlers invoked in real-mode = bad news) */
+-	ld.q	r2, 0, r2
+-	synco
+-	rte	/* Back to virtual mode */
+-	nop
+-
+-1:
+-	ptabs	LINK, tr0
+-	blink	tr0, r63
+-
+-	.global poke_real_address_q
+-poke_real_address_q:
+-	/* Two args:
+-	   r2 : real mode address to poke
+-	   r3 : quadword value to write.
+-
+-	   This is provided as a cheapskate way of manipulating device
+-	   registers for debugging (to avoid the need to onchip_remap the debug
+-	   module, and to avoid the need to onchip_remap the watchpoint
+-	   controller in a way that identity maps sufficient bits to avoid the
+-	   SH5-101 cut2 silicon defect).
+-
+-	   This code is not performance critical
+-	*/
+-
+-	add.l	r2, r63, r2	/* sign extend address */
+-	getcon	sr, r0		/* r0 = saved original SR */
+-	movi	1, r1
+-	shlli	r1, 28, r1
+-	or	r0, r1, r1	/* r0 with block bit set */
+-	putcon	r1, sr		/* now in critical section */
+-	movi	1, r36
+-	shlli	r36, 31, r36
+-	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
+-
+-	putcon	r1, ssr
+-	movi	.poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
+-	movi	1f, r37		/* virtual mode return addr */
+-	putcon	r36, spc
+-
+-	synco
+-	rte
+-	nop
+-
+-.poke0:	/* come here in real mode, don't touch caches!!
+-           still in critical section (sr.bl==1) */
+-	putcon	r0, ssr
+-	putcon	r37, spc
+-	/* Here's the actual poke.  If the address is bad, all bets are now off
+-	 * what will happen (handlers invoked in real-mode = bad news) */
+-	st.q	r2, 0, r3
+-	synco
+-	rte	/* Back to virtual mode */
+-	nop
+-
+-1:
+-	ptabs	LINK, tr0
+-	blink	tr0, r63
+-
+-/*
+- * --- User Access Handling Section
+- */
+-
+-/*
+- * User Access support. It all moved to non inlined Assembler
+- * functions in here.
+- *
+- * __kernel_size_t __copy_user(void *__to, const void *__from,
+- *			       __kernel_size_t __n)
+- *
+- * Inputs:
+- * (r2)  target address
+- * (r3)  source address
+- * (r4)  size in bytes
+- *
+- * Ouputs:
+- * (*r2) target data
+- * (r2)  non-copied bytes
+- *
+- * If a fault occurs on the user pointer, bail out early and return the
+- * number of bytes not copied in r2.
+- * Strategy : for large blocks, call a real memcpy function which can
+- * move >1 byte at a time using unaligned ld/st instructions, and can
+- * manipulate the cache using prefetch + alloco to improve the speed
+- * further.  If a fault occurs in that function, just revert to the
+- * byte-by-byte approach used for small blocks; this is rare so the
+- * performance hit for that case does not matter.
+- *
+- * For small blocks it's not worth the overhead of setting up and calling
+- * the memcpy routine; do the copy a byte at a time.
+- *
+- */
+-	.global	__copy_user
+-__copy_user:
+-	pta	__copy_user_byte_by_byte, tr1
+-	movi	16, r0 ! this value is a best guess, should tune it by benchmarking
+-	bge/u	r0, r4, tr1
+-	pta copy_user_memcpy, tr0
+-	addi	SP, -32, SP
+-	/* Save arguments in case we have to fix-up unhandled page fault */
+-	st.q	SP, 0, r2
+-	st.q	SP, 8, r3
+-	st.q	SP, 16, r4
+-	st.q	SP, 24, r35 ! r35 is callee-save
+-	/* Save LINK in a register to reduce RTS time later (otherwise
+-	   ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
+-	ori	LINK, 0, r35
+-	blink	tr0, LINK
+-
+-	/* Copy completed normally if we get back here */
+-	ptabs	r35, tr0
+-	ld.q	SP, 24, r35
+-	/* don't restore r2-r4, pointless */
+-	/* set result=r2 to zero as the copy must have succeeded. */
+-	or	r63, r63, r2
+-	addi	SP, 32, SP
+-	blink	tr0, r63 ! RTS
+-
+-	.global __copy_user_fixup
+-__copy_user_fixup:
+-	/* Restore stack frame */
+-	ori	r35, 0, LINK
+-	ld.q	SP, 24, r35
+-	ld.q	SP, 16, r4
+-	ld.q	SP,  8, r3
+-	ld.q	SP,  0, r2
+-	addi	SP, 32, SP
+-	/* Fall through to original code, in the 'same' state we entered with */
+-
+-/* The slow byte-by-byte method is used if the fast copy traps due to a bad
+-   user address.  In that rare case, the speed drop can be tolerated. */
+-__copy_user_byte_by_byte:
+-	pta	___copy_user_exit, tr1
+-	pta	___copy_user1, tr0
+-	beq/u	r4, r63, tr1	/* early exit for zero length copy */
+-	sub	r2, r3, r0
+-	addi	r0, -1, r0
+-
+-___copy_user1:
+-	ld.b	r3, 0, r5		/* Fault address 1 */
+-
+-	/* Could rewrite this to use just 1 add, but the second comes 'free'
+-	   due to load latency */
+-	addi	r3, 1, r3
+-	addi	r4, -1, r4		/* No real fixup required */
+-___copy_user2:
+-	stx.b	r3, r0, r5		/* Fault address 2 */
+-	bne     r4, ZERO, tr0
+-
+-___copy_user_exit:
+-	or	r4, ZERO, r2
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
+-
+-/*
+- * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
+- *
+- * Inputs:
+- * (r2)  target address
+- * (r3)  size in bytes
+- *
+- * Ouputs:
+- * (*r2) zero-ed target data
+- * (r2)  non-zero-ed bytes
+- */
+-	.global	__clear_user
+-__clear_user:
+-	pta	___clear_user_exit, tr1
+-	pta	___clear_user1, tr0
+-	beq/u	r3, r63, tr1
+-
+-___clear_user1:
+-	st.b	r2, 0, ZERO		/* Fault address */
+-	addi	r2, 1, r2
+-	addi	r3, -1, r3		/* No real fixup required */
+-	bne     r3, ZERO, tr0
+-
+-___clear_user_exit:
+-	or	r3, ZERO, r2
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
+-
+-
+-/*
+- * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
+- *			   int __count)
+- *
+- * Inputs:
+- * (r2)  target address
+- * (r3)  source address
+- * (r4)  maximum size in bytes
+- *
+- * Ouputs:
+- * (*r2) copied data
+- * (r2)  -EFAULT (in case of faulting)
+- *       copied data (otherwise)
+- */
+-	.global	__strncpy_from_user
+-__strncpy_from_user:
+-	pta	___strncpy_from_user1, tr0
+-	pta	___strncpy_from_user_done, tr1
+-	or	r4, ZERO, r5		/* r5 = original count */
+-	beq/u	r4, r63, tr1		/* early exit if r4==0 */
+-	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
+-	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
+-
+-___strncpy_from_user1:
+-	ld.b	r3, 0, r7		/* Fault address: only in reading */
+-	st.b	r2, 0, r7
+-	addi	r2, 1, r2
+-	addi	r3, 1, r3
+-	beq/u	ZERO, r7, tr1
+-	addi	r4, -1, r4		/* return real number of copied bytes */
+-	bne/l	ZERO, r4, tr0
+-
+-___strncpy_from_user_done:
+-	sub	r5, r4, r6		/* If done, return copied */
+-
+-___strncpy_from_user_exit:
+-	or	r6, ZERO, r2
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--config CPU_SH2A
--	bool
--	select CPU_SH2
+-/*
+- * extern long __strnlen_user(const char *__s, long __n)
+- *
+- * Inputs:
+- * (r2)  source address
+- * (r3)  source size in bytes
+- *
+- * Ouputs:
+- * (r2)  -EFAULT (in case of faulting)
+- *       string length (otherwise)
+- */
+-	.global	__strnlen_user
+-__strnlen_user:
+-	pta	___strnlen_user_set_reply, tr0
+-	pta	___strnlen_user1, tr1
+-	or	ZERO, ZERO, r5		/* r5 = counter */
+-	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
+-	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
+-	beq	r3, ZERO, tr0
 -
--config CPU_SH3
--	bool
--	select CPU_HAS_INTEVT
--	select CPU_HAS_SR_RB
+-___strnlen_user1:
+-	ldx.b	r2, r5, r7		/* Fault address: only in reading */
+-	addi	r3, -1, r3		/* No real fixup */
+-	addi	r5, 1, r5
+-	beq	r3, ZERO, tr0
+-	bne	r7, ZERO, tr1
+-! The line below used to be active.  This meant led to a junk byte lying between each pair
+-! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
+-! via the argv and envp arguments to main, it meant the 'flat' representation visible through
+-! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
+-!	addi	r5, 1, r5		/* Include '\0' */
 -
--config CPU_SH4
--	bool
--	select CPU_HAS_INTEVT
--	select CPU_HAS_SR_RB
--	select CPU_HAS_PTEA if !CPU_SH4A || CPU_SHX2
--	select CPU_HAS_FPU if !CPU_SH4AL_DSP
+-___strnlen_user_set_reply:
+-	or	r5, ZERO, r6		/* If done, return counter */
 -
--config CPU_SH4A
--	bool
--	select CPU_SH4
+-___strnlen_user_exit:
+-	or	r6, ZERO, r2
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--config CPU_SH4AL_DSP
--	bool
--	select CPU_SH4A
--	select CPU_HAS_DSP
+-/*
+- * extern long __get_user_asm_?(void *val, long addr)
+- *
+- * Inputs:
+- * (r2)  dest address
+- * (r3)  source address (in User Space)
+- *
+- * Ouputs:
+- * (r2)  -EFAULT (faulting)
+- *       0 	 (not faulting)
+- */
+-	.global	__get_user_asm_b
+-__get_user_asm_b:
+-	or	r2, ZERO, r4
+-	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--config CPU_SHX2
--	bool
+-___get_user_asm_b1:
+-	ld.b	r3, 0, r5		/* r5 = data */
+-	st.b	r4, 0, r5
+-	or	ZERO, ZERO, r2
 -
--config CPU_SHX3
--	bool
+-___get_user_asm_b_exit:
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--choice
--	prompt "Processor sub-type selection"
 -
--#
--# Processor subtypes
--#
+-	.global	__get_user_asm_w
+-__get_user_asm_w:
+-	or	r2, ZERO, r4
+-	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--# SH-2 Processor Support
+-___get_user_asm_w1:
+-	ld.w	r3, 0, r5		/* r5 = data */
+-	st.w	r4, 0, r5
+-	or	ZERO, ZERO, r2
 -
--config CPU_SUBTYPE_SH7619
--	bool "Support SH7619 processor"
--	select CPU_SH2
+-___get_user_asm_w_exit:
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--# SH-2A Processor Support
 -
--config CPU_SUBTYPE_SH7206
--	bool "Support SH7206 processor"
--	select CPU_SH2A
+-	.global	__get_user_asm_l
+-__get_user_asm_l:
+-	or	r2, ZERO, r4
+-	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--# SH-3 Processor Support
+-___get_user_asm_l1:
+-	ld.l	r3, 0, r5		/* r5 = data */
+-	st.l	r4, 0, r5
+-	or	ZERO, ZERO, r2
 -
--config CPU_SUBTYPE_SH7705
--	bool "Support SH7705 processor"
--	select CPU_SH3
+-___get_user_asm_l_exit:
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--config CPU_SUBTYPE_SH7706
--	bool "Support SH7706 processor"
--	select CPU_SH3
--	help
--	  Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU.
 -
--config CPU_SUBTYPE_SH7707
--	bool "Support SH7707 processor"
--	select CPU_SH3
--	help
--	  Select SH7707 if you have a  60 Mhz SH-3 HD6417707 CPU.
+-	.global	__get_user_asm_q
+-__get_user_asm_q:
+-	or	r2, ZERO, r4
+-	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--config CPU_SUBTYPE_SH7708
--	bool "Support SH7708 processor"
--	select CPU_SH3
--	help
--	  Select SH7708 if you have a  60 Mhz SH-3 HD6417708S or
--	  if you have a 100 Mhz SH-3 HD6417708R CPU.
+-___get_user_asm_q1:
+-	ld.q	r3, 0, r5		/* r5 = data */
+-	st.q	r4, 0, r5
+-	or	ZERO, ZERO, r2
 -
--config CPU_SUBTYPE_SH7709
--	bool "Support SH7709 processor"
--	select CPU_SH3
--	help
--	  Select SH7709 if you have a  80 Mhz SH-3 HD6417709 CPU.
+-___get_user_asm_q_exit:
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--config CPU_SUBTYPE_SH7710
--	bool "Support SH7710 processor"
--	select CPU_SH3
--	select CPU_HAS_DSP
--	help
--	  Select SH7710 if you have a SH3-DSP SH7710 CPU.
+-/*
+- * extern long __put_user_asm_?(void *pval, long addr)
+- *
+- * Inputs:
+- * (r2)  kernel pointer to value
+- * (r3)  dest address (in User Space)
+- *
+- * Ouputs:
+- * (r2)  -EFAULT (faulting)
+- *       0 	 (not faulting)
+- */
+-	.global	__put_user_asm_b
+-__put_user_asm_b:
+-	ld.b	r2, 0, r4		/* r4 = data */
+-	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--config CPU_SUBTYPE_SH7712
--	bool "Support SH7712 processor"
--	select CPU_SH3
--	select CPU_HAS_DSP
--	help
--	  Select SH7712 if you have a SH3-DSP SH7712 CPU.
+-___put_user_asm_b1:
+-	st.b	r3, 0, r4
+-	or	ZERO, ZERO, r2
 -
--config CPU_SUBTYPE_SH7720
--	bool "Support SH7720 processor"
--	select CPU_SH3
--	select CPU_HAS_DSP
--	help
--	  Select SH7720 if you have a SH3-DSP SH7720 CPU.
+-___put_user_asm_b_exit:
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--# SH-4 Processor Support
 -
--config CPU_SUBTYPE_SH7750
--	bool "Support SH7750 processor"
--	select CPU_SH4
--	help
--	  Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
+-	.global	__put_user_asm_w
+-__put_user_asm_w:
+-	ld.w	r2, 0, r4		/* r4 = data */
+-	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--config CPU_SUBTYPE_SH7091
--	bool "Support SH7091 processor"
--	select CPU_SH4
--	help
--	  Select SH7091 if you have an SH-4 based Sega device (such as
--	  the Dreamcast, Naomi, and Naomi 2).
+-___put_user_asm_w1:
+-	st.w	r3, 0, r4
+-	or	ZERO, ZERO, r2
 -
--config CPU_SUBTYPE_SH7750R
--	bool "Support SH7750R processor"
--	select CPU_SH4
+-___put_user_asm_w_exit:
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--config CPU_SUBTYPE_SH7750S
--	bool "Support SH7750S processor"
--	select CPU_SH4
 -
--config CPU_SUBTYPE_SH7751
--	bool "Support SH7751 processor"
--	select CPU_SH4
--	help
--	  Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
--	  or if you have a HD6417751R CPU.
+-	.global	__put_user_asm_l
+-__put_user_asm_l:
+-	ld.l	r2, 0, r4		/* r4 = data */
+-	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--config CPU_SUBTYPE_SH7751R
--	bool "Support SH7751R processor"
--	select CPU_SH4
+-___put_user_asm_l1:
+-	st.l	r3, 0, r4
+-	or	ZERO, ZERO, r2
 -
--config CPU_SUBTYPE_SH7760
--	bool "Support SH7760 processor"
--	select CPU_SH4
+-___put_user_asm_l_exit:
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--config CPU_SUBTYPE_SH4_202
--	bool "Support SH4-202 processor"
--	select CPU_SH4
 -
--# SH-4A Processor Support
+-	.global	__put_user_asm_q
+-__put_user_asm_q:
+-	ld.q	r2, 0, r4		/* r4 = data */
+-	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--config CPU_SUBTYPE_SH7770
--	bool "Support SH7770 processor"
--	select CPU_SH4A
+-___put_user_asm_q1:
+-	st.q	r3, 0, r4
+-	or	ZERO, ZERO, r2
 -
--config CPU_SUBTYPE_SH7780
--	bool "Support SH7780 processor"
--	select CPU_SH4A
+-___put_user_asm_q_exit:
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
 -
--config CPU_SUBTYPE_SH7785
--	bool "Support SH7785 processor"
--	select CPU_SH4A
--	select CPU_SHX2
--	select ARCH_SPARSEMEM_ENABLE
--	select SYS_SUPPORTS_NUMA
+-panic_stash_regs:
+-	/* The idea is : when we get an unhandled panic, we dump the registers
+-	   to a known memory location, the just sit in a tight loop.
+-	   This allows the human to look at the memory region through the GDB
+-	   session (assuming the debug module's SHwy initiator isn't locked up
+-	   or anything), to hopefully analyze the cause of the panic. */
 -
--config CPU_SUBTYPE_SHX3
--	bool "Support SH-X3 processor"
--	select CPU_SH4A
--	select CPU_SHX3
--	select ARCH_SPARSEMEM_ENABLE
--	select SYS_SUPPORTS_NUMA
--	select SYS_SUPPORTS_SMP
+-	/* On entry, former r15 (SP) is in DCR
+-	   former r0  is at resvec_saved_area + 0
+-	   former r1  is at resvec_saved_area + 8
+-	   former tr0 is at resvec_saved_area + 32
+-	   DCR is the only register whose value is lost altogether.
+-	*/
 -
--# SH4AL-DSP Processor Support
+-	movi	0xffffffff80000000, r0 ! phy of dump area
+-	ld.q	SP, 0x000, r1	! former r0
+-	st.q	r0,  0x000, r1
+-	ld.q	SP, 0x008, r1	! former r1
+-	st.q	r0,  0x008, r1
+-	st.q	r0,  0x010, r2
+-	st.q	r0,  0x018, r3
+-	st.q	r0,  0x020, r4
+-	st.q	r0,  0x028, r5
+-	st.q	r0,  0x030, r6
+-	st.q	r0,  0x038, r7
+-	st.q	r0,  0x040, r8
+-	st.q	r0,  0x048, r9
+-	st.q	r0,  0x050, r10
+-	st.q	r0,  0x058, r11
+-	st.q	r0,  0x060, r12
+-	st.q	r0,  0x068, r13
+-	st.q	r0,  0x070, r14
+-	getcon	dcr, r14
+-	st.q	r0,  0x078, r14
+-	st.q	r0,  0x080, r16
+-	st.q	r0,  0x088, r17
+-	st.q	r0,  0x090, r18
+-	st.q	r0,  0x098, r19
+-	st.q	r0,  0x0a0, r20
+-	st.q	r0,  0x0a8, r21
+-	st.q	r0,  0x0b0, r22
+-	st.q	r0,  0x0b8, r23
+-	st.q	r0,  0x0c0, r24
+-	st.q	r0,  0x0c8, r25
+-	st.q	r0,  0x0d0, r26
+-	st.q	r0,  0x0d8, r27
+-	st.q	r0,  0x0e0, r28
+-	st.q	r0,  0x0e8, r29
+-	st.q	r0,  0x0f0, r30
+-	st.q	r0,  0x0f8, r31
+-	st.q	r0,  0x100, r32
+-	st.q	r0,  0x108, r33
+-	st.q	r0,  0x110, r34
+-	st.q	r0,  0x118, r35
+-	st.q	r0,  0x120, r36
+-	st.q	r0,  0x128, r37
+-	st.q	r0,  0x130, r38
+-	st.q	r0,  0x138, r39
+-	st.q	r0,  0x140, r40
+-	st.q	r0,  0x148, r41
+-	st.q	r0,  0x150, r42
+-	st.q	r0,  0x158, r43
+-	st.q	r0,  0x160, r44
+-	st.q	r0,  0x168, r45
+-	st.q	r0,  0x170, r46
+-	st.q	r0,  0x178, r47
+-	st.q	r0,  0x180, r48
+-	st.q	r0,  0x188, r49
+-	st.q	r0,  0x190, r50
+-	st.q	r0,  0x198, r51
+-	st.q	r0,  0x1a0, r52
+-	st.q	r0,  0x1a8, r53
+-	st.q	r0,  0x1b0, r54
+-	st.q	r0,  0x1b8, r55
+-	st.q	r0,  0x1c0, r56
+-	st.q	r0,  0x1c8, r57
+-	st.q	r0,  0x1d0, r58
+-	st.q	r0,  0x1d8, r59
+-	st.q	r0,  0x1e0, r60
+-	st.q	r0,  0x1e8, r61
+-	st.q	r0,  0x1f0, r62
+-	st.q	r0,  0x1f8, r63	! bogus, but for consistency's sake...
 -
--config CPU_SUBTYPE_SH7343
--	bool "Support SH7343 processor"
--	select CPU_SH4AL_DSP
+-	ld.q	SP, 0x020, r1  ! former tr0
+-	st.q	r0,  0x200, r1
+-	gettr	tr1, r1
+-	st.q	r0,  0x208, r1
+-	gettr	tr2, r1
+-	st.q	r0,  0x210, r1
+-	gettr	tr3, r1
+-	st.q	r0,  0x218, r1
+-	gettr	tr4, r1
+-	st.q	r0,  0x220, r1
+-	gettr	tr5, r1
+-	st.q	r0,  0x228, r1
+-	gettr	tr6, r1
+-	st.q	r0,  0x230, r1
+-	gettr	tr7, r1
+-	st.q	r0,  0x238, r1
 -
--config CPU_SUBTYPE_SH7722
--	bool "Support SH7722 processor"
--	select CPU_SH4AL_DSP
--	select CPU_SHX2
--	select ARCH_SPARSEMEM_ENABLE
--	select SYS_SUPPORTS_NUMA
+-	getcon	sr,  r1
+-	getcon	ssr,  r2
+-	getcon	pssr,  r3
+-	getcon	spc,  r4
+-	getcon	pspc,  r5
+-	getcon	intevt,  r6
+-	getcon	expevt,  r7
+-	getcon	pexpevt,  r8
+-	getcon	tra,  r9
+-	getcon	tea,  r10
+-	getcon	kcr0, r11
+-	getcon	kcr1, r12
+-	getcon	vbr,  r13
+-	getcon	resvec,  r14
 -
--endchoice
+-	st.q	r0,  0x240, r1
+-	st.q	r0,  0x248, r2
+-	st.q	r0,  0x250, r3
+-	st.q	r0,  0x258, r4
+-	st.q	r0,  0x260, r5
+-	st.q	r0,  0x268, r6
+-	st.q	r0,  0x270, r7
+-	st.q	r0,  0x278, r8
+-	st.q	r0,  0x280, r9
+-	st.q	r0,  0x288, r10
+-	st.q	r0,  0x290, r11
+-	st.q	r0,  0x298, r12
+-	st.q	r0,  0x2a0, r13
+-	st.q	r0,  0x2a8, r14
 -
- menu "Memory management options"
- 
- config QUICKLIST
-@@ -207,7 +17,8 @@ config MMU
- 
- config PAGE_OFFSET
- 	hex
--	default "0x80000000" if MMU
-+	default "0x80000000" if MMU && SUPERH32
-+	default "0x20000000" if MMU && SUPERH64
- 	default "0x00000000"
- 
- config MEMORY_START
-@@ -228,17 +39,28 @@ config MEMORY_START
- 
- config MEMORY_SIZE
- 	hex "Physical memory size"
--	default "0x00400000"
-+	default "0x04000000"
- 	help
- 	  This sets the default memory size assumed by your SH kernel. It can
- 	  be overridden as normal by the 'mem=' argument on the kernel command
- 	  line. If unsure, consult your board specifications or just leave it
--	  as 0x00400000 which was the default value before this became
-+	  as 0x04000000 which was the default value before this became
- 	  configurable.
- 
-+# Physical addressing modes
-+
-+config 29BIT
-+	def_bool !32BIT
-+	depends on SUPERH32
-+
- config 32BIT
-+	bool
-+	default y if CPU_SH5
-+
-+config PMB
- 	bool "Support 32-bit physical addressing through PMB"
- 	depends on MMU && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
-+	select 32BIT
- 	default y
- 	help
- 	  If you say Y here, physical addressing will be extended to
-@@ -256,7 +78,7 @@ config X2TLB
- 
- config VSYSCALL
- 	bool "Support vsyscall page"
--	depends on MMU
-+	depends on MMU && (CPU_SH3 || CPU_SH4)
- 	default y
- 	help
- 	  This will enable support for the kernel mapping a vDSO page
-@@ -335,7 +157,7 @@ config PAGE_SIZE_8KB
- 
- config PAGE_SIZE_64KB
- 	bool "64kB"
--	depends on CPU_SH4
-+	depends on CPU_SH4 || CPU_SH5
- 	help
- 	  This enables support for 64kB pages, possible on all SH-4
- 	  CPUs and later.
-@@ -344,7 +166,7 @@ endchoice
- 
- choice
- 	prompt "HugeTLB page size"
--	depends on HUGETLB_PAGE && CPU_SH4 && MMU
-+	depends on HUGETLB_PAGE && (CPU_SH4 || CPU_SH5) && MMU
- 	default HUGETLB_PAGE_SIZE_64K
- 
- config HUGETLB_PAGE_SIZE_64K
-@@ -365,6 +187,10 @@ config HUGETLB_PAGE_SIZE_64MB
- 	bool "64MB"
- 	depends on X2TLB
- 
-+config HUGETLB_PAGE_SIZE_512MB
-+	bool "512MB"
-+	depends on CPU_SH5
-+
- endchoice
- 
- source "mm/Kconfig"
-@@ -392,12 +218,12 @@ config SH_DIRECT_MAPPED
- 
- choice
- 	prompt "Cache mode"
--	default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
-+	default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
- 	default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
- 
- config CACHE_WRITEBACK
- 	bool "Write-back"
--	depends on CPU_SH2A || CPU_SH3 || CPU_SH4
-+	depends on CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
- 
- config CACHE_WRITETHROUGH
- 	bool "Write-through"
-diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
-index aa44607..9f4bc3d 100644
---- a/arch/sh/mm/Makefile
-+++ b/arch/sh/mm/Makefile
-@@ -1,37 +1,5 @@
--#
--# Makefile for the Linux SuperH-specific parts of the memory manager.
--#
+-	getcon	SPC,r2
+-	getcon	SSR,r3
+-	getcon	EXPEVT,r4
+-	/* Prepare to jump to C - physical address */
+-	movi	panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1
+-	ori	r1, 1, r1
+-	ptabs   r1, tr0
+-	getcon	DCR, SP
+-	blink	tr0, ZERO
+-	nop
+-	nop
+-	nop
+-	nop
 -
--obj-y			:= init.o extable.o consistent.o
 -
--ifndef CONFIG_CACHE_OFF
--obj-$(CONFIG_CPU_SH2)		+= cache-sh2.o
--obj-$(CONFIG_CPU_SH3)		+= cache-sh3.o
--obj-$(CONFIG_CPU_SH4)		+= cache-sh4.o
--obj-$(CONFIG_SH7705_CACHE_32KB)	+= cache-sh7705.o
-+ifeq ($(CONFIG_SUPERH32),y)
-+include ${srctree}/arch/sh/mm/Makefile_32
-+else
-+include ${srctree}/arch/sh/mm/Makefile_64
- endif
 -
--mmu-y			:= tlb-nommu.o pg-nommu.o
--mmu-$(CONFIG_MMU)	:= fault.o clear_page.o copy_page.o tlb-flush.o	\
--			   ioremap.o
 -
--obj-y			+= $(mmu-y)
+-/*
+- * --- Signal Handling Section
+- */
 -
--ifdef CONFIG_DEBUG_FS
--obj-$(CONFIG_CPU_SH4)	+= cache-debugfs.o
--endif
+-/*
+- * extern long long _sa_default_rt_restorer
+- * extern long long _sa_default_restorer
+- *
+- *		 or, better,
+- *
+- * extern void _sa_default_rt_restorer(void)
+- * extern void _sa_default_restorer(void)
+- *
+- * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
+- * from user space. Copied into user space by signal management.
+- * Both must be quad aligned and 2 quad long (4 instructions).
+- *
+- */
+-	.balign 8
+-	.global sa_default_rt_restorer
+-sa_default_rt_restorer:
+-	movi	0x10, r9
+-	shori	__NR_rt_sigreturn, r9
+-	trapa	r9
+-	nop
 -
--ifdef CONFIG_MMU
--obj-$(CONFIG_CPU_SH3)	+= tlb-sh3.o
--obj-$(CONFIG_CPU_SH4)	+= tlb-sh4.o
--ifndef CONFIG_CACHE_OFF
--obj-$(CONFIG_CPU_SH4)		+= pg-sh4.o
--obj-$(CONFIG_SH7705_CACHE_32KB)	+= pg-sh7705.o
--endif
--endif
+-	.balign 8
+-	.global sa_default_restorer
+-sa_default_restorer:
+-	movi	0x10, r9
+-	shori	__NR_sigreturn, r9
+-	trapa	r9
+-	nop
 -
--obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
--obj-$(CONFIG_32BIT)		+= pmb.o
--obj-$(CONFIG_NUMA)		+= numa.o
+-/*
+- * --- __ex_table Section
+- */
 -
--EXTRA_CFLAGS += -Werror
-diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32
-new file mode 100644
-index 0000000..e295db6
---- /dev/null
-+++ b/arch/sh/mm/Makefile_32
-@@ -0,0 +1,36 @@
-+#
-+# Makefile for the Linux SuperH-specific parts of the memory manager.
-+#
-+
-+obj-y			:= init.o extable_32.o consistent.o
-+
-+ifndef CONFIG_CACHE_OFF
-+obj-$(CONFIG_CPU_SH2)		+= cache-sh2.o
-+obj-$(CONFIG_CPU_SH3)		+= cache-sh3.o
-+obj-$(CONFIG_CPU_SH4)		+= cache-sh4.o
-+obj-$(CONFIG_SH7705_CACHE_32KB)	+= cache-sh7705.o
-+endif
-+
-+mmu-y			:= tlb-nommu.o pg-nommu.o
-+mmu-$(CONFIG_MMU)	:= fault_32.o tlbflush_32.o ioremap_32.o
-+
-+obj-y			+= $(mmu-y)
-+
-+ifdef CONFIG_DEBUG_FS
-+obj-$(CONFIG_CPU_SH4)	+= cache-debugfs.o
-+endif
-+
-+ifdef CONFIG_MMU
-+obj-$(CONFIG_CPU_SH3)	+= tlb-sh3.o
-+obj-$(CONFIG_CPU_SH4)	+= tlb-sh4.o
-+ifndef CONFIG_CACHE_OFF
-+obj-$(CONFIG_CPU_SH4)		+= pg-sh4.o
-+obj-$(CONFIG_SH7705_CACHE_32KB)	+= pg-sh7705.o
-+endif
-+endif
-+
-+obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
-+obj-$(CONFIG_PMB)		+= pmb.o
-+obj-$(CONFIG_NUMA)		+= numa.o
-+
-+EXTRA_CFLAGS += -Werror
-diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64
-new file mode 100644
-index 0000000..cbd6aa3
---- /dev/null
-+++ b/arch/sh/mm/Makefile_64
-@@ -0,0 +1,44 @@
-+#
-+# Makefile for the Linux SuperH-specific parts of the memory manager.
-+#
-+
-+obj-y			:= init.o extable_64.o consistent.o
-+
-+mmu-y			:= tlb-nommu.o pg-nommu.o
-+mmu-$(CONFIG_MMU)	:= fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o
-+
-+ifndef CONFIG_CACHE_OFF
-+obj-y			+= cache-sh5.o
-+endif
-+
-+obj-y			+= $(mmu-y)
-+
-+obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
-+obj-$(CONFIG_NUMA)		+= numa.o
-+
-+EXTRA_CFLAGS += -Werror
-+
-+# Special flags for fault_64.o.  This puts restrictions on the number of
-+# caller-save registers that the compiler can target when building this file.
-+# This is required because the code is called from a context in entry.S where
-+# very few registers have been saved in the exception handler (for speed
-+# reasons).
-+# The caller save registers that have been saved and which can be used are
-+# r2,r3,r4,r5 : argument passing
-+# r15, r18 : SP and LINK
-+# tr0-4 : allow all caller-save TR's.  The compiler seems to be able to make
-+#         use of them, so it's probably beneficial to performance to save them
-+#         and have them available for it.
-+#
-+# The resources not listed below are callee save, i.e. the compiler is free to
-+# use any of them and will spill them to the stack itself.
-+
-+CFLAGS_fault_64.o += -ffixed-r7 \
-+	-ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
-+	-ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
-+	-ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
-+	-ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
-+	-ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
-+	-ffixed-r41 -ffixed-r42 -ffixed-r43  \
-+	-ffixed-r60 -ffixed-r61 -ffixed-r62 \
-+	-fomit-frame-pointer
-diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
-index de6d2c9..db6d950 100644
---- a/arch/sh/mm/cache-debugfs.c
-+++ b/arch/sh/mm/cache-debugfs.c
-@@ -22,7 +22,8 @@ enum cache_type {
- 	CACHE_TYPE_UNIFIED,
- };
- 
--static int cache_seq_show(struct seq_file *file, void *iter)
-+static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file,
-+						  void *iter)
- {
- 	unsigned int cache_type = (unsigned int)file->private;
- 	struct cache_info *cache;
-@@ -34,11 +35,11 @@ static int cache_seq_show(struct seq_file *file, void *iter)
- 	 * Go uncached immediately so we don't skew the results any
- 	 * more than we already are..
- 	 */
--	jump_to_P2();
-+	jump_to_uncached();
- 
- 	ccr = ctrl_inl(CCR);
- 	if ((ccr & CCR_CACHE_ENABLE) == 0) {
--		back_to_P1();
-+		back_to_cached();
- 
- 		seq_printf(file, "disabled\n");
- 		return 0;
-@@ -104,7 +105,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
- 		addrstart += cache->way_incr;
- 	}
- 
--	back_to_P1();
-+	back_to_cached();
- 
- 	return 0;
- }
-diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
-index 226b190..43d7ff6 100644
---- a/arch/sh/mm/cache-sh4.c
-+++ b/arch/sh/mm/cache-sh4.c
-@@ -190,7 +190,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
-  * .. which happens to be the same behavior as flush_icache_range().
-  * So, we simply flush out a line.
-  */
--void flush_cache_sigtramp(unsigned long addr)
-+void __uses_jump_to_uncached flush_cache_sigtramp(unsigned long addr)
- {
- 	unsigned long v, index;
- 	unsigned long flags;
-@@ -205,13 +205,13 @@ void flush_cache_sigtramp(unsigned long addr)
- 			(v & boot_cpu_data.icache.entry_mask);
- 
- 	local_irq_save(flags);
--	jump_to_P2();
-+	jump_to_uncached();
- 
- 	for (i = 0; i < boot_cpu_data.icache.ways;
- 	     i++, index += boot_cpu_data.icache.way_incr)
- 		ctrl_outl(0, index);	/* Clear out Valid-bit */
- 
--	back_to_P1();
-+	back_to_cached();
- 	wmb();
- 	local_irq_restore(flags);
- }
-@@ -256,12 +256,12 @@ void flush_dcache_page(struct page *page)
- }
- 
- /* TODO: Selective icache invalidation through IC address array.. */
--static inline void flush_icache_all(void)
-+static inline void __uses_jump_to_uncached flush_icache_all(void)
- {
- 	unsigned long flags, ccr;
- 
- 	local_irq_save(flags);
--	jump_to_P2();
-+	jump_to_uncached();
- 
- 	/* Flush I-cache */
- 	ccr = ctrl_inl(CCR);
-@@ -269,11 +269,11 @@ static inline void flush_icache_all(void)
- 	ctrl_outl(ccr, CCR);
- 
- 	/*
--	 * back_to_P1() will take care of the barrier for us, don't add
-+	 * back_to_cached() will take care of the barrier for us, don't add
- 	 * another one!
- 	 */
- 
--	back_to_P1();
-+	back_to_cached();
- 	local_irq_restore(flags);
- }
- 
-diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
-new file mode 100644
-index 0000000..4617e3a
---- /dev/null
-+++ b/arch/sh/mm/cache-sh5.c
-@@ -0,0 +1,1029 @@
-+/*
-+ * arch/sh/mm/cache-sh5.c
-+ *
-+ * Original version Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Second version Copyright (C) benedict.gaster at superh.com 2002
-+ * Third version Copyright Richard.Curnow at superh.com 2003
-+ * Hacks to third version Copyright (C) 2003 Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/init.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/threads.h>
-+#include <asm/page.h>
-+#include <asm/pgtable.h>
-+#include <asm/processor.h>
-+#include <asm/cache.h>
-+#include <asm/tlb.h>
-+#include <asm/io.h>
-+#include <asm/uaccess.h>
-+#include <asm/mmu_context.h>
-+#include <asm/pgalloc.h> /* for flush_itlb_range */
-+
-+#include <linux/proc_fs.h>
-+
-+/* This function is in entry.S */
-+extern unsigned long switch_and_save_asid(unsigned long new_asid);
-+
-+/* Wired TLB entry for the D-cache */
-+static unsigned long long dtlb_cache_slot;
-+
-+/**
-+ * sh64_cache_init()
-+ *
-+ * This is pretty much just a straightforward clone of the SH
-+ * detect_cpu_and_cache_system().
-+ *
-+ * This function is responsible for setting up all of the cache
-+ * info dynamically as well as taking care of CPU probing and
-+ * setting up the relevant subtype data.
-+ *
-+ * FIXME: For the time being, we only really support the SH5-101
-+ * out of the box, and don't support dynamic probing for things
-+ * like the SH5-103 or even cut2 of the SH5-101. Implement this
-+ * later!
-+ */
-+int __init sh64_cache_init(void)
-+{
-+	/*
-+	 * First, setup some sane values for the I-cache.
-+	 */
-+	cpu_data->icache.ways		= 4;
-+	cpu_data->icache.sets		= 256;
-+	cpu_data->icache.linesz		= L1_CACHE_BYTES;
-+
-+	/*
-+	 * FIXME: This can probably be cleaned up a bit as well.. for example,
-+	 * do we really need the way shift _and_ the way_step_shift ?? Judging
-+	 * by the existing code, I would guess no.. is there any valid reason
-+	 * why we need to be tracking this around?
-+	 */
-+	cpu_data->icache.way_shift	= 13;
-+	cpu_data->icache.entry_shift	= 5;
-+	cpu_data->icache.set_shift	= 4;
-+	cpu_data->icache.way_step_shift	= 16;
-+	cpu_data->icache.asid_shift	= 2;
-+
-+	/*
-+	 * way offset = cache size / associativity, so just don't factor in
-+	 * associativity in the first place..
-+	 */
-+	cpu_data->icache.way_ofs	= cpu_data->icache.sets *
-+					  cpu_data->icache.linesz;
-+
-+	cpu_data->icache.asid_mask	= 0x3fc;
-+	cpu_data->icache.idx_mask	= 0x1fe0;
-+	cpu_data->icache.epn_mask	= 0xffffe000;
-+	cpu_data->icache.flags		= 0;
-+
-+	/*
-+	 * Next, setup some sane values for the D-cache.
-+	 *
-+	 * On the SH5, these are pretty consistent with the I-cache settings,
-+	 * so we just copy over the existing definitions.. these can be fixed
-+	 * up later, especially if we add runtime CPU probing.
-+	 *
-+	 * Though in the meantime it saves us from having to duplicate all of
-+	 * the above definitions..
-+	 */
-+	cpu_data->dcache		= cpu_data->icache;
-+
-+	/*
-+	 * Setup any cache-related flags here
-+	 */
-+#if defined(CONFIG_DCACHE_WRITE_THROUGH)
-+	set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags));
-+#elif defined(CONFIG_DCACHE_WRITE_BACK)
-+	set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags));
-+#endif
-+
-+	/*
-+	 * We also need to reserve a slot for the D-cache in the DTLB, so we
-+	 * do this now ..
-+	 */
-+	dtlb_cache_slot			= sh64_get_wired_dtlb_entry();
-+
-+	return 0;
-+}
-+
-+#ifdef CONFIG_DCACHE_DISABLED
-+#define sh64_dcache_purge_all()					do { } while (0)
-+#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0)
-+#define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0)
-+#define sh64_dcache_purge_phy_page(paddr)			do { } while (0)
-+#define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0)
-+#define sh64_dcache_purge_kernel_range(start, end)		do { } while (0)
-+#define sh64_dcache_wback_current_user_range(start, end)	do { } while (0)
-+#endif
-+
-+/*##########################################################################*/
-+
-+/* From here onwards, a rewrite of the implementation,
-+   by Richard.Curnow at superh.com.
-+
-+   The major changes in this compared to the old version are;
-+   1. use more selective purging through OCBP instead of using ALLOCO to purge
-+      by natural replacement.  This avoids purging out unrelated cache lines
-+      that happen to be in the same set.
-+   2. exploit the APIs copy_user_page and clear_user_page better
-+   3. be more selective about I-cache purging, in particular use invalidate_all
-+      more sparingly.
-+
-+   */
-+
-+/*##########################################################################
-+			       SUPPORT FUNCTIONS
-+  ##########################################################################*/
-+
-+/****************************************************************************/
-+/* The following group of functions deal with mapping and unmapping a temporary
-+   page into the DTLB slot that have been set aside for our exclusive use. */
-+/* In order to accomplish this, we use the generic interface for adding and
-+   removing a wired slot entry as defined in arch/sh/mm/tlb-sh5.c */
-+/****************************************************************************/
-+
-+static unsigned long slot_own_flags;
-+
-+static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr)
-+{
-+	local_irq_save(slot_own_flags);
-+	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
-+}
-+
-+static inline void sh64_teardown_dtlb_cache_slot(void)
-+{
-+	sh64_teardown_tlb_slot(dtlb_cache_slot);
-+	local_irq_restore(slot_own_flags);
-+}
-+
-+/****************************************************************************/
-+
-+#ifndef CONFIG_ICACHE_DISABLED
-+
-+static void __inline__ sh64_icache_inv_all(void)
-+{
-+	unsigned long long addr, flag, data;
-+	unsigned int flags;
-+
-+	addr=ICCR0;
-+	flag=ICCR0_ICI;
-+	data=0;
-+
-+	/* Make this a critical section for safety (probably not strictly necessary.) */
-+	local_irq_save(flags);
-+
-+	/* Without %1 it gets unexplicably wrong */
-+	asm volatile("getcfg	%3, 0, %0\n\t"
-+			"or	%0, %2, %0\n\t"
-+			"putcfg	%3, 0, %0\n\t"
-+			"synci"
-+			: "=&r" (data)
-+			: "0" (data), "r" (flag), "r" (addr));
-+
-+	local_irq_restore(flags);
-+}
-+
-+static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
-+{
-+	/* Invalidate range of addresses [start,end] from the I-cache, where
-+	 * the addresses lie in the kernel superpage. */
-+
-+	unsigned long long ullend, addr, aligned_start;
-+#if (NEFF == 32)
-+	aligned_start = (unsigned long long)(signed long long)(signed long) start;
-+#else
-+#error "NEFF != 32"
-+#endif
-+	aligned_start &= L1_CACHE_ALIGN_MASK;
-+	addr = aligned_start;
-+#if (NEFF == 32)
-+	ullend = (unsigned long long) (signed long long) (signed long) end;
-+#else
-+#error "NEFF != 32"
-+#endif
-+	while (addr <= ullend) {
-+		asm __volatile__ ("icbi %0, 0" : : "r" (addr));
-+		addr += L1_CACHE_BYTES;
-+	}
-+}
-+
-+static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
-+{
-+	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
-+	   Also, eaddr is page-aligned. */
-+
-+	unsigned long long addr, end_addr;
-+	unsigned long flags = 0;
-+	unsigned long running_asid, vma_asid;
-+	addr = eaddr;
-+	end_addr = addr + PAGE_SIZE;
-+
-+	/* Check whether we can use the current ASID for the I-cache
-+	   invalidation.  For example, if we're called via
-+	   access_process_vm->flush_cache_page->here, (e.g. when reading from
-+	   /proc), 'running_asid' will be that of the reader, not of the
-+	   victim.
-+
-+	   Also, note the risk that we might get pre-empted between the ASID
-+	   compare and blocking IRQs, and before we regain control, the
-+	   pid->ASID mapping changes.  However, the whole cache will get
-+	   invalidated when the mapping is renewed, so the worst that can
-+	   happen is that the loop below ends up invalidating somebody else's
-+	   cache entries.
-+	*/
-+
-+	running_asid = get_asid();
-+	vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK);
-+	if (running_asid != vma_asid) {
-+		local_irq_save(flags);
-+		switch_and_save_asid(vma_asid);
-+	}
-+	while (addr < end_addr) {
-+		/* Worth unrolling a little */
-+		asm __volatile__("icbi %0,  0" : : "r" (addr));
-+		asm __volatile__("icbi %0, 32" : : "r" (addr));
-+		asm __volatile__("icbi %0, 64" : : "r" (addr));
-+		asm __volatile__("icbi %0, 96" : : "r" (addr));
-+		addr += 128;
-+	}
-+	if (running_asid != vma_asid) {
-+		switch_and_save_asid(running_asid);
-+		local_irq_restore(flags);
-+	}
-+}
-+
-+/****************************************************************************/
-+
-+static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
-+			  unsigned long start, unsigned long end)
-+{
-+	/* Used for invalidating big chunks of I-cache, i.e. assume the range
-+	   is whole pages.  If 'start' or 'end' is not page aligned, the code
-+	   is conservative and invalidates to the ends of the enclosing pages.
-+	   This is functionally OK, just a performance loss. */
-+
-+	/* See the comments below in sh64_dcache_purge_user_range() regarding
-+	   the choice of algorithm.  However, for the I-cache option (2) isn't
-+	   available because there are no physical tags so aliases can't be
-+	   resolved.  The icbi instruction has to be used through the user
-+	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
-+	   would be cheaper to use the selective code for a large range than is
-+	   possible with the D-cache.  Just assume 64 for now as a working
-+	   figure.
-+	   */
-+
-+	int n_pages;
-+
-+	if (!mm) return;
-+
-+	n_pages = ((end - start) >> PAGE_SHIFT);
-+	if (n_pages >= 64) {
-+		sh64_icache_inv_all();
-+	} else {
-+		unsigned long aligned_start;
-+		unsigned long eaddr;
-+		unsigned long after_last_page_start;
-+		unsigned long mm_asid, current_asid;
-+		unsigned long long flags = 0ULL;
-+
-+		mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
-+		current_asid = get_asid();
-+
-+		if (mm_asid != current_asid) {
-+			/* Switch ASID and run the invalidate loop under cli */
-+			local_irq_save(flags);
-+			switch_and_save_asid(mm_asid);
-+		}
-+
-+		aligned_start = start & PAGE_MASK;
-+		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
-+
-+		while (aligned_start < after_last_page_start) {
-+			struct vm_area_struct *vma;
-+			unsigned long vma_end;
-+			vma = find_vma(mm, aligned_start);
-+			if (!vma || (aligned_start <= vma->vm_end)) {
-+				/* Avoid getting stuck in an error condition */
-+				aligned_start += PAGE_SIZE;
-+				continue;
-+			}
-+			vma_end = vma->vm_end;
-+			if (vma->vm_flags & VM_EXEC) {
-+				/* Executable */
-+				eaddr = aligned_start;
-+				while (eaddr < vma_end) {
-+					sh64_icache_inv_user_page(vma, eaddr);
-+					eaddr += PAGE_SIZE;
-+				}
-+			}
-+			aligned_start = vma->vm_end; /* Skip to start of next region */
-+		}
-+		if (mm_asid != current_asid) {
-+			switch_and_save_asid(current_asid);
-+			local_irq_restore(flags);
-+		}
-+	}
-+}
-+
-+static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
-+						unsigned long start, int len)
-+{
-+
-+	/* Invalidate a small range of user context I-cache, not necessarily
-+	   page (or even cache-line) aligned. */
-+
-+	unsigned long long eaddr = start;
-+	unsigned long long eaddr_end = start + len;
-+	unsigned long current_asid, mm_asid;
-+	unsigned long long flags;
-+	unsigned long long epage_start;
-+
-+	/* Since this is used inside ptrace, the ASID in the mm context
-+	   typically won't match current_asid.  We'll have to switch ASID to do
-+	   this.  For safety, and given that the range will be small, do all
-+	   this under cli.
-+
-+	   Note, there is a hazard that the ASID in mm->context is no longer
-+	   actually associated with mm, i.e. if the mm->context has started a
-+	   new cycle since mm was last active.  However, this is just a
-+	   performance issue: all that happens is that we invalidate lines
-+	   belonging to another mm, so the owning process has to refill them
-+	   when that mm goes live again.  mm itself can't have any cache
-+	   entries because there will have been a flush_cache_all when the new
-+	   mm->context cycle started. */
-+
-+	/* Align to start of cache line.  Otherwise, suppose len==8 and start
-+	   was at 32N+28 : the last 4 bytes wouldn't get invalidated. */
-+	eaddr = start & L1_CACHE_ALIGN_MASK;
-+	eaddr_end = start + len;
-+
-+	local_irq_save(flags);
-+	mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
-+	current_asid = switch_and_save_asid(mm_asid);
-+
-+	epage_start = eaddr & PAGE_MASK;
-+
-+	while (eaddr < eaddr_end)
-+	{
-+		asm __volatile__("icbi %0, 0" : : "r" (eaddr));
-+		eaddr += L1_CACHE_BYTES;
-+	}
-+	switch_and_save_asid(current_asid);
-+	local_irq_restore(flags);
-+}
-+
-+static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
-+{
-+	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
-+	   cache hit on the virtual tag the instruction ends there, without a
-+	   TLB lookup. */
-+
-+	unsigned long long aligned_start;
-+	unsigned long long ull_end;
-+	unsigned long long addr;
-+
-+	ull_end = end;
-+
-+	/* Just invalidate over the range using the natural addresses.  TLB
-+	   miss handling will be OK (TBC).  Since it's for the current process,
-+	   either we're already in the right ASID context, or the ASIDs have
-+	   been recycled since we were last active in which case we might just
-+	   invalidate another processes I-cache entries : no worries, just a
-+	   performance drop for him. */
-+	aligned_start = start & L1_CACHE_ALIGN_MASK;
-+	addr = aligned_start;
-+	while (addr < ull_end) {
-+		asm __volatile__ ("icbi %0, 0" : : "r" (addr));
-+		asm __volatile__ ("nop");
-+		asm __volatile__ ("nop");
-+		addr += L1_CACHE_BYTES;
-+	}
-+}
-+
-+#endif /* !CONFIG_ICACHE_DISABLED */
-+
-+/****************************************************************************/
-+
-+#ifndef CONFIG_DCACHE_DISABLED
-+
-+/* Buffer used as the target of alloco instructions to purge data from cache
-+   sets by natural eviction. -- RPC */
-+#define DUMMY_ALLOCO_AREA_SIZE L1_CACHE_SIZE_BYTES + (1024 * 4)
-+static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
-+
-+/****************************************************************************/
-+
-+static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
-+{
-+	/* Purge all ways in a particular block of sets, specified by the base
-+	   set number and number of sets.  Can handle wrap-around, if that's
-+	   needed.  */
-+
-+	int dummy_buffer_base_set;
-+	unsigned long long eaddr, eaddr0, eaddr1;
-+	int j;
-+	int set_offset;
-+
-+	dummy_buffer_base_set = ((int)&dummy_alloco_area & cpu_data->dcache.idx_mask) >> cpu_data->dcache.entry_shift;
-+	set_offset = sets_to_purge_base - dummy_buffer_base_set;
-+
-+	for (j=0; j<n_sets; j++, set_offset++) {
-+		set_offset &= (cpu_data->dcache.sets - 1);
-+		eaddr0 = (unsigned long long)dummy_alloco_area + (set_offset << cpu_data->dcache.entry_shift);
-+
-+		/* Do one alloco which hits the required set per cache way.  For
-+		   write-back mode, this will purge the #ways resident lines.   There's
-+		   little point unrolling this loop because the allocos stall more if
-+		   they're too close together. */
-+		eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
-+		for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
-+			asm __volatile__ ("alloco %0, 0" : : "r" (eaddr));
-+			asm __volatile__ ("synco"); /* TAKum03020 */
-+		}
-+
-+		eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
-+		for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
-+			/* Load from each address.  Required because alloco is a NOP if
-+			   the cache is write-through.  Write-through is a config option. */
-+			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
-+				*(volatile unsigned char *)(int)eaddr;
-+		}
-+	}
-+
-+	/* Don't use OCBI to invalidate the lines.  That costs cycles directly.
-+	   If the dummy block is just left resident, it will naturally get
-+	   evicted as required.  */
-+
-+	return;
-+}
-+
-+/****************************************************************************/
-+
-+static void sh64_dcache_purge_all(void)
-+{
-+	/* Purge the entire contents of the dcache.  The most efficient way to
-+	   achieve this is to use alloco instructions on a region of unused
-+	   memory equal in size to the cache, thereby causing the current
-+	   contents to be discarded by natural eviction.  The alternative,
-+	   namely reading every tag, setting up a mapping for the corresponding
-+	   page and doing an OCBP for the line, would be much more expensive.
-+	   */
-+
-+	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
-+
-+	return;
-+
-+}
-+
-+/****************************************************************************/
-+
-+static void sh64_dcache_purge_kernel_range(unsigned long start, unsigned long end)
-+{
-+	/* Purge the range of addresses [start,end] from the D-cache.  The
-+	   addresses lie in the superpage mapping.  There's no harm if we
-+	   overpurge at either end - just a small performance loss. */
-+	unsigned long long ullend, addr, aligned_start;
-+#if (NEFF == 32)
-+	aligned_start = (unsigned long long)(signed long long)(signed long) start;
-+#else
-+#error "NEFF != 32"
-+#endif
-+	aligned_start &= L1_CACHE_ALIGN_MASK;
-+	addr = aligned_start;
-+#if (NEFF == 32)
-+	ullend = (unsigned long long) (signed long long) (signed long) end;
-+#else
-+#error "NEFF != 32"
-+#endif
-+	while (addr <= ullend) {
-+		asm __volatile__ ("ocbp %0, 0" : : "r" (addr));
-+		addr += L1_CACHE_BYTES;
-+	}
-+	return;
-+}
-+
-+/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
-+   anything else in the kernel */
-+#define MAGIC_PAGE0_START 0xffffffffec000000ULL
-+
-+static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned long eaddr)
-+{
-+	/* Purge the physical page 'paddr' from the cache.  It's known that any
-+	   cache lines requiring attention have the same page colour as the the
-+	   address 'eaddr'.
-+
-+	   This relies on the fact that the D-cache matches on physical tags
-+	   when no virtual tag matches.  So we create an alias for the original
-+	   page and purge through that.  (Alternatively, we could have done
-+	   this by switching ASID to match the original mapping and purged
-+	   through that, but that involves ASID switching cost + probably a
-+	   TLBMISS + refill anyway.)
-+	   */
-+
-+	unsigned long long magic_page_start;
-+	unsigned long long magic_eaddr, magic_eaddr_end;
-+
-+	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
-+
-+	/* As long as the kernel is not pre-emptible, this doesn't need to be
-+	   under cli/sti. */
-+
-+	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
-+
-+	magic_eaddr = magic_page_start;
-+	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
-+	while (magic_eaddr < magic_eaddr_end) {
-+		/* Little point in unrolling this loop - the OCBPs are blocking
-+		   and won't go any quicker (i.e. the loop overhead is parallel
-+		   to part of the OCBP execution.) */
-+		asm __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
-+		magic_eaddr += L1_CACHE_BYTES;
-+	}
-+
-+	sh64_teardown_dtlb_cache_slot();
-+}
-+
-+/****************************************************************************/
-+
-+static void sh64_dcache_purge_phy_page(unsigned long paddr)
-+{
-+	/* Pure a page given its physical start address, by creating a
-+	   temporary 1 page mapping and purging across that.  Even if we know
-+	   the virtual address (& vma or mm) of the page, the method here is
-+	   more elegant because it avoids issues of coping with page faults on
-+	   the purge instructions (i.e. no special-case code required in the
-+	   critical path in the TLB miss handling). */
-+
-+	unsigned long long eaddr_start, eaddr, eaddr_end;
-+	int i;
-+
-+	/* As long as the kernel is not pre-emptible, this doesn't need to be
-+	   under cli/sti. */
-+
-+	eaddr_start = MAGIC_PAGE0_START;
-+	for (i=0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
-+		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
-+
-+		eaddr = eaddr_start;
-+		eaddr_end = eaddr + PAGE_SIZE;
-+		while (eaddr < eaddr_end) {
-+			asm __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
-+			eaddr += L1_CACHE_BYTES;
-+		}
-+
-+		sh64_teardown_dtlb_cache_slot();
-+		eaddr_start += PAGE_SIZE;
-+	}
-+}
-+
-+static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
-+				unsigned long addr, unsigned long end)
-+{
-+	pgd_t *pgd;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	pte_t entry;
-+	spinlock_t *ptl;
-+	unsigned long paddr;
-+
-+	if (!mm)
-+		return; /* No way to find physical address of page */
-+
-+	pgd = pgd_offset(mm, addr);
-+	if (pgd_bad(*pgd))
-+		return;
-+
-+	pmd = pmd_offset(pgd, addr);
-+	if (pmd_none(*pmd) || pmd_bad(*pmd))
-+		return;
-+
-+	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-+	do {
-+		entry = *pte;
-+		if (pte_none(entry) || !pte_present(entry))
-+			continue;
-+		paddr = pte_val(entry) & PAGE_MASK;
-+		sh64_dcache_purge_coloured_phy_page(paddr, addr);
-+	} while (pte++, addr += PAGE_SIZE, addr != end);
-+	pte_unmap_unlock(pte - 1, ptl);
-+}
-+/****************************************************************************/
-+
-+static void sh64_dcache_purge_user_range(struct mm_struct *mm,
-+			  unsigned long start, unsigned long end)
-+{
-+	/* There are at least 5 choices for the implementation of this, with
-+	   pros (+), cons(-), comments(*):
-+
-+	   1. ocbp each line in the range through the original user's ASID
-+	      + no lines spuriously evicted
-+	      - tlbmiss handling (must either handle faults on demand => extra
-+		special-case code in tlbmiss critical path), or map the page in
-+		advance (=> flush_tlb_range in advance to avoid multiple hits)
-+	      - ASID switching
-+	      - expensive for large ranges
-+
-+	   2. temporarily map each page in the range to a special effective
-+	      address and ocbp through the temporary mapping; relies on the
-+	      fact that SH-5 OCB* always do TLB lookup and match on ptags (they
-+	      never look at the etags)
-+	      + no spurious evictions
-+	      - expensive for large ranges
-+	      * surely cheaper than (1)
-+
-+	   3. walk all the lines in the cache, check the tags, if a match
-+	      occurs create a page mapping to ocbp the line through
-+	      + no spurious evictions
-+	      - tag inspection overhead
-+	      - (especially for small ranges)
-+	      - potential cost of setting up/tearing down page mapping for
-+		every line that matches the range
-+	      * cost partly independent of range size
-+
-+	   4. walk all the lines in the cache, check the tags, if a match
-+	      occurs use 4 * alloco to purge the line (+3 other probably
-+	      innocent victims) by natural eviction
-+	      + no tlb mapping overheads
-+	      - spurious evictions
-+	      - tag inspection overhead
-+
-+	   5. implement like flush_cache_all
-+	      + no tag inspection overhead
-+	      - spurious evictions
-+	      - bad for small ranges
-+
-+	   (1) can be ruled out as more expensive than (2).  (2) appears best
-+	   for small ranges.  The choice between (3), (4) and (5) for large
-+	   ranges and the range size for the large/small boundary need
-+	   benchmarking to determine.
-+
-+	   For now use approach (2) for small ranges and (5) for large ones.
-+
-+	   */
-+
-+	int n_pages;
-+
-+	n_pages = ((end - start) >> PAGE_SHIFT);
-+	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
-+#if 1
-+		sh64_dcache_purge_all();
-+#else
-+		unsigned long long set, way;
-+		unsigned long mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
-+		for (set = 0; set < cpu_data->dcache.sets; set++) {
-+			unsigned long long set_base_config_addr = CACHE_OC_ADDRESS_ARRAY + (set << cpu_data->dcache.set_shift);
-+			for (way = 0; way < cpu_data->dcache.ways; way++) {
-+				unsigned long long config_addr = set_base_config_addr + (way << cpu_data->dcache.way_step_shift);
-+				unsigned long long tag0;
-+				unsigned long line_valid;
-+
-+				asm __volatile__("getcfg %1, 0, %0" : "=r" (tag0) : "r" (config_addr));
-+				line_valid = tag0 & SH_CACHE_VALID;
-+				if (line_valid) {
-+					unsigned long cache_asid;
-+					unsigned long epn;
-+
-+					cache_asid = (tag0 & cpu_data->dcache.asid_mask) >> cpu_data->dcache.asid_shift;
-+					/* The next line needs some
-+					   explanation.  The virtual tags
-+					   encode bits [31:13] of the virtual
-+					   address, bit [12] of the 'tag' being
-+					   implied by the cache set index. */
-+					epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift);
-+
-+					if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) {
-+						/* TODO : could optimise this
-+						   call by batching multiple
-+						   adjacent sets together. */
-+						sh64_dcache_purge_sets(set, 1);
-+						break; /* Don't waste time inspecting other ways for this set */
-+					}
-+				}
-+			}
-+		}
-+#endif
-+	} else {
-+		/* Small range, covered by a single page table page */
-+		start &= PAGE_MASK;	/* should already be so */
-+		end = PAGE_ALIGN(end);	/* should already be so */
-+		sh64_dcache_purge_user_pages(mm, start, end);
-+	}
-+	return;
-+}
-+
-+static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end)
-+{
-+	unsigned long long aligned_start;
-+	unsigned long long ull_end;
-+	unsigned long long addr;
-+
-+	ull_end = end;
-+
-+	/* Just wback over the range using the natural addresses.  TLB miss
-+	   handling will be OK (TBC) : the range has just been written to by
-+	   the signal frame setup code, so the PTEs must exist.
-+
-+	   Note, if we have CONFIG_PREEMPT and get preempted inside this loop,
-+	   it doesn't matter, even if the pid->ASID mapping changes whilst
-+	   we're away.  In that case the cache will have been flushed when the
-+	   mapping was renewed.  So the writebacks below will be nugatory (and
-+	   we'll doubtless have to fault the TLB entry/ies in again with the
-+	   new ASID), but it's a rare case.
-+	   */
-+	aligned_start = start & L1_CACHE_ALIGN_MASK;
-+	addr = aligned_start;
-+	while (addr < ull_end) {
-+		asm __volatile__ ("ocbwb %0, 0" : : "r" (addr));
-+		addr += L1_CACHE_BYTES;
-+	}
-+}
-+
-+/****************************************************************************/
-+
-+/* These *MUST* lie in an area of virtual address space that's otherwise unused. */
-+#define UNIQUE_EADDR_START 0xe0000000UL
-+#define UNIQUE_EADDR_END   0xe8000000UL
-+
-+static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr)
-+{
-+	/* Given a physical address paddr, and a user virtual address
-+	   user_eaddr which will eventually be mapped to it, create a one-off
-+	   kernel-private eaddr mapped to the same paddr.  This is used for
-+	   creating special destination pages for copy_user_page and
-+	   clear_user_page */
-+
-+	static unsigned long current_pointer = UNIQUE_EADDR_START;
-+	unsigned long coloured_pointer;
-+
-+	if (current_pointer == UNIQUE_EADDR_END) {
-+		sh64_dcache_purge_all();
-+		current_pointer = UNIQUE_EADDR_START;
-+	}
-+
-+	coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK);
-+	sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
-+
-+	current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
-+
-+	return coloured_pointer;
-+}
-+
-+/****************************************************************************/
-+
-+static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address)
-+{
-+	void *coloured_to;
-+
-+	/* Discard any existing cache entries of the wrong colour.  These are
-+	   present quite often, if the kernel has recently used the page
-+	   internally, then given it up, then it's been allocated to the user.
-+	   */
-+	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
-+
-+	coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
-+	sh64_page_copy(from, coloured_to);
-+
-+	sh64_teardown_dtlb_cache_slot();
-+}
-+
-+static void sh64_clear_user_page_coloured(void *to, unsigned long address)
-+{
-+	void *coloured_to;
-+
-+	/* Discard any existing kernel-originated lines of the wrong colour (as
-+	   above) */
-+	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
-+
-+	coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
-+	sh64_page_clear(coloured_to);
-+
-+	sh64_teardown_dtlb_cache_slot();
-+}
-+
-+#endif /* !CONFIG_DCACHE_DISABLED */
-+
-+/****************************************************************************/
-+
-+/*##########################################################################
-+			    EXTERNALLY CALLABLE API.
-+  ##########################################################################*/
-+
-+/* These functions are described in Documentation/cachetlb.txt.
-+   Each one of these functions varies in behaviour depending on whether the
-+   I-cache and/or D-cache are configured out.
-+
-+   Note that the Linux term 'flush' corresponds to what is termed 'purge' in
-+   the sh/sh64 jargon for the D-cache, i.e. write back dirty data then
-+   invalidate the cache lines, and 'invalidate' for the I-cache.
-+   */
-+
-+#undef FLUSH_TRACE
-+
-+void flush_cache_all(void)
-+{
-+	/* Invalidate the entire contents of both caches, after writing back to
-+	   memory any dirty data from the D-cache. */
-+	sh64_dcache_purge_all();
-+	sh64_icache_inv_all();
-+}
-+
-+/****************************************************************************/
-+
-+void flush_cache_mm(struct mm_struct *mm)
-+{
-+	/* Invalidate an entire user-address space from both caches, after
-+	   writing back dirty data (e.g. for shared mmap etc). */
-+
-+	/* This could be coded selectively by inspecting all the tags then
-+	   doing 4*alloco on any set containing a match (as for
-+	   flush_cache_range), but fork/exit/execve (where this is called from)
-+	   are expensive anyway. */
-+
-+	/* Have to do a purge here, despite the comments re I-cache below.
-+	   There could be odd-coloured dirty data associated with the mm still
-+	   in the cache - if this gets written out through natural eviction
-+	   after the kernel has reused the page there will be chaos.
-+	   */
-+
-+	sh64_dcache_purge_all();
-+
-+	/* The mm being torn down won't ever be active again, so any Icache
-+	   lines tagged with its ASID won't be visible for the rest of the
-+	   lifetime of this ASID cycle.  Before the ASID gets reused, there
-+	   will be a flush_cache_all.  Hence we don't need to touch the
-+	   I-cache.  This is similar to the lack of action needed in
-+	   flush_tlb_mm - see fault.c. */
-+}
-+
-+/****************************************************************************/
-+
-+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
-+		       unsigned long end)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+
-+	/* Invalidate (from both caches) the range [start,end) of virtual
-+	   addresses from the user address space specified by mm, after writing
-+	   back any dirty data.
-+
-+	   Note, 'end' is 1 byte beyond the end of the range to flush. */
-+
-+	sh64_dcache_purge_user_range(mm, start, end);
-+	sh64_icache_inv_user_page_range(mm, start, end);
-+}
-+
-+/****************************************************************************/
-+
-+void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn)
-+{
-+	/* Invalidate any entries in either cache for the vma within the user
-+	   address space vma->vm_mm for the page starting at virtual address
-+	   'eaddr'.   This seems to be used primarily in breaking COW.  Note,
-+	   the I-cache must be searched too in case the page in question is
-+	   both writable and being executed from (e.g. stack trampolines.)
-+
-+	   Note, this is called with pte lock held.
-+	   */
-+
-+	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
-+
-+	if (vma->vm_flags & VM_EXEC) {
-+		sh64_icache_inv_user_page(vma, eaddr);
-+	}
-+}
-+
-+/****************************************************************************/
-+
-+#ifndef CONFIG_DCACHE_DISABLED
-+
-+void copy_user_page(void *to, void *from, unsigned long address, struct page *page)
-+{
-+	/* 'from' and 'to' are kernel virtual addresses (within the superpage
-+	   mapping of the physical RAM).  'address' is the user virtual address
-+	   where the copy 'to' will be mapped after.  This allows a custom
-+	   mapping to be used to ensure that the new copy is placed in the
-+	   right cache sets for the user to see it without having to bounce it
-+	   out via memory.  Note however : the call to flush_page_to_ram in
-+	   (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
-+	   very important case!
-+
-+	   TBD : can we guarantee that on every call, any cache entries for
-+	   'from' are in the same colour sets as 'address' also?  i.e. is this
-+	   always used just to deal with COW?  (I suspect not). */
-+
-+	/* There are two possibilities here for when the page 'from' was last accessed:
-+	   * by the kernel : this is OK, no purge required.
-+	   * by the/a user (e.g. for break_COW) : need to purge.
-+
-+	   If the potential user mapping at 'address' is the same colour as
-+	   'from' there is no need to purge any cache lines from the 'from'
-+	   page mapped into cache sets of colour 'address'.  (The copy will be
-+	   accessing the page through 'from').
-+	   */
-+
-+	if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) {
-+		sh64_dcache_purge_coloured_phy_page(__pa(from), address);
-+	}
-+
-+	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
-+		/* No synonym problem on destination */
-+		sh64_page_copy(from, to);
-+	} else {
-+		sh64_copy_user_page_coloured(to, from, address);
-+	}
-+
-+	/* Note, don't need to flush 'from' page from the cache again - it's
-+	   done anyway by the generic code */
-+}
-+
-+void clear_user_page(void *to, unsigned long address, struct page *page)
-+{
-+	/* 'to' is a kernel virtual address (within the superpage
-+	   mapping of the physical RAM).  'address' is the user virtual address
-+	   where the 'to' page will be mapped after.  This allows a custom
-+	   mapping to be used to ensure that the new copy is placed in the
-+	   right cache sets for the user to see it without having to bounce it
-+	   out via memory.
-+	*/
-+
-+	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
-+		/* No synonym problem on destination */
-+		sh64_page_clear(to);
-+	} else {
-+		sh64_clear_user_page_coloured(to, address);
-+	}
-+}
-+
-+#endif /* !CONFIG_DCACHE_DISABLED */
-+
-+/****************************************************************************/
-+
-+void flush_dcache_page(struct page *page)
-+{
-+	sh64_dcache_purge_phy_page(page_to_phys(page));
-+	wmb();
-+}
-+
-+/****************************************************************************/
-+
-+void flush_icache_range(unsigned long start, unsigned long end)
-+{
-+	/* Flush the range [start,end] of kernel virtual adddress space from
-+	   the I-cache.  The corresponding range must be purged from the
-+	   D-cache also because the SH-5 doesn't have cache snooping between
-+	   the caches.  The addresses will be visible through the superpage
-+	   mapping, therefore it's guaranteed that there no cache entries for
-+	   the range in cache sets of the wrong colour.
-+
-+	   Primarily used for cohering the I-cache after a module has
-+	   been loaded.  */
-+
-+	/* We also make sure to purge the same range from the D-cache since
-+	   flush_page_to_ram() won't be doing this for us! */
-+
-+	sh64_dcache_purge_kernel_range(start, end);
-+	wmb();
-+	sh64_icache_inv_kernel_range(start, end);
-+}
-+
-+/****************************************************************************/
-+
-+void flush_icache_user_range(struct vm_area_struct *vma,
-+			struct page *page, unsigned long addr, int len)
-+{
-+	/* Flush the range of user (defined by vma->vm_mm) address space
-+	   starting at 'addr' for 'len' bytes from the cache.  The range does
-+	   not straddle a page boundary, the unique physical page containing
-+	   the range is 'page'.  This seems to be used mainly for invalidating
-+	   an address range following a poke into the program text through the
-+	   ptrace() call from another process (e.g. for BRK instruction
-+	   insertion). */
-+
-+	sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
-+	mb();
-+
-+	if (vma->vm_flags & VM_EXEC) {
-+		sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
-+	}
-+}
-+
-+/*##########################################################################
-+			ARCH/SH64 PRIVATE CALLABLE API.
-+  ##########################################################################*/
-+
-+void flush_cache_sigtramp(unsigned long start, unsigned long end)
-+{
-+	/* For the address range [start,end), write back the data from the
-+	   D-cache and invalidate the corresponding region of the I-cache for
-+	   the current process.  Used to flush signal trampolines on the stack
-+	   to make them executable. */
-+
-+	sh64_dcache_wback_current_user_range(start, end);
-+	wmb();
-+	sh64_icache_inv_current_user_range(start, end);
-+}
-+
-diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
-index 4896d73..22dacc7 100644
---- a/arch/sh/mm/cache-sh7705.c
-+++ b/arch/sh/mm/cache-sh7705.c
-@@ -71,7 +71,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
- /*
-  * Writeback&Invalidate the D-cache of the page
-  */
--static void __flush_dcache_page(unsigned long phys)
-+static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
- {
- 	unsigned long ways, waysize, addrstart;
- 	unsigned long flags;
-@@ -92,7 +92,7 @@ static void __flush_dcache_page(unsigned long phys)
- 	 * possible.
- 	 */
- 	local_irq_save(flags);
--	jump_to_P2();
-+	jump_to_uncached();
- 
- 	ways = current_cpu_data.dcache.ways;
- 	waysize = current_cpu_data.dcache.sets;
-@@ -118,7 +118,7 @@ static void __flush_dcache_page(unsigned long phys)
- 		addrstart += current_cpu_data.dcache.way_incr;
- 	} while (--ways);
- 
--	back_to_P1();
-+	back_to_cached();
- 	local_irq_restore(flags);
- }
- 
-@@ -132,15 +132,15 @@ void flush_dcache_page(struct page *page)
- 		__flush_dcache_page(PHYSADDR(page_address(page)));
- }
- 
--void flush_cache_all(void)
-+void __uses_jump_to_uncached flush_cache_all(void)
- {
- 	unsigned long flags;
- 
- 	local_irq_save(flags);
--	jump_to_P2();
-+	jump_to_uncached();
- 
- 	cache_wback_all();
--	back_to_P1();
-+	back_to_cached();
- 	local_irq_restore(flags);
- }
- 
-diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S
+-/*
+- * User Access Exception Table.
+- */
+-	.section	__ex_table,  "a"
+-
+-	.global asm_uaccess_start	/* Just a marker */
+-asm_uaccess_start:
+-
+-	.long	___copy_user1, ___copy_user_exit
+-	.long	___copy_user2, ___copy_user_exit
+-	.long	___clear_user1, ___clear_user_exit
+-	.long	___strncpy_from_user1, ___strncpy_from_user_exit
+-	.long	___strnlen_user1, ___strnlen_user_exit
+-	.long	___get_user_asm_b1, ___get_user_asm_b_exit
+-	.long	___get_user_asm_w1, ___get_user_asm_w_exit
+-	.long	___get_user_asm_l1, ___get_user_asm_l_exit
+-	.long	___get_user_asm_q1, ___get_user_asm_q_exit
+-	.long	___put_user_asm_b1, ___put_user_asm_b_exit
+-	.long	___put_user_asm_w1, ___put_user_asm_w_exit
+-	.long	___put_user_asm_l1, ___put_user_asm_l_exit
+-	.long	___put_user_asm_q1, ___put_user_asm_q_exit
+-
+-	.global asm_uaccess_end		/* Just a marker */
+-asm_uaccess_end:
+-
+-
+-
+-
+-/*
+- * --- .text.init Section
+- */
+-
+-	.section	.text.init, "ax"
+-
+-/*
+- * void trap_init (void)
+- *
+- */
+-	.global	trap_init
+-trap_init:
+-	addi	SP, -24, SP			/* Room to save r28/r29/r30 */
+-	st.q	SP, 0, r28
+-	st.q	SP, 8, r29
+-	st.q	SP, 16, r30
+-
+-	/* Set VBR and RESVEC */
+-	movi	LVBR_block, r19
+-	andi	r19, -4, r19			/* reset MMUOFF + reserved */
+-	/* For RESVEC exceptions we force the MMU off, which means we need the
+-	   physical address. */
+-	movi	LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20
+-	andi	r20, -4, r20			/* reset reserved */
+-	ori	r20, 1, r20			/* set MMUOFF */
+-	putcon	r19, VBR
+-	putcon	r20, RESVEC
+-
+-	/* Sanity check */
+-	movi	LVBR_block_end, r21
+-	andi	r21, -4, r21
+-	movi	BLOCK_SIZE, r29			/* r29 = expected size */
+-	or	r19, ZERO, r30
+-	add	r19, r29, r19
+-
+-	/*
+-	 * Ugly, but better loop forever now than crash afterwards.
+-	 * We should print a message, but if we touch LVBR or
+-	 * LRESVEC blocks we should not be surprised if we get stuck
+-	 * in trap_init().
+-	 */
+-	pta	trap_init_loop, tr1
+-	gettr	tr1, r28			/* r28 = trap_init_loop */
+-	sub	r21, r30, r30			/* r30 = actual size */
+-
+-	/*
+-	 * VBR/RESVEC handlers overlap by being bigger than
+-	 * allowed. Very bad. Just loop forever.
+-	 * (r28) panic/loop address
+-	 * (r29) expected size
+-	 * (r30) actual size
+-	 */
+-trap_init_loop:
+-	bne	r19, r21, tr1
+-
+-	/* Now that exception vectors are set up reset SR.BL */
+-	getcon 	SR, r22
+-	movi	SR_UNBLOCK_EXC, r23
+-	and	r22, r23, r22
+-	putcon	r22, SR
+-
+-	addi	SP, 24, SP
+-	ptabs	LINK, tr0
+-	blink	tr0, ZERO
+-
+diff --git a/arch/sh64/kernel/fpu.c b/arch/sh64/kernel/fpu.c
+deleted file mode 100644
+index 8ad4ed6..0000000
+--- a/arch/sh64/kernel/fpu.c
++++ /dev/null
+@@ -1,170 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/fpu.c
+- *
+- * Copyright (C) 2001  Manuela Cirronis, Paolo Alberelli
+- * Copyright (C) 2002  STMicroelectronics Limited
+- *   Author : Stuart Menefy
+- *
+- * Started from SH4 version:
+- *   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
+- *
+- */
+-
+-#include <linux/sched.h>
+-#include <linux/signal.h>
+-#include <asm/processor.h>
+-#include <asm/user.h>
+-#include <asm/io.h>
+-
+-/*
+- * Initially load the FPU with signalling NANS.  This bit pattern
+- * has the property that no matter whether considered as single or as
+- * double precision, it still represents a signalling NAN.
+- */
+-#define sNAN64		0xFFFFFFFFFFFFFFFFULL
+-#define sNAN32		0xFFFFFFFFUL
+-
+-static union sh_fpu_union init_fpuregs = {
+-	.hard = {
+-	  .fp_regs = { [0 ... 63] = sNAN32 },
+-	  .fpscr = FPSCR_INIT
+-	}
+-};
+-
+-inline void fpsave(struct sh_fpu_hard_struct *fpregs)
+-{
+-	asm volatile("fst.p     %0, (0*8), fp0\n\t"
+-		     "fst.p     %0, (1*8), fp2\n\t"
+-		     "fst.p     %0, (2*8), fp4\n\t"
+-		     "fst.p     %0, (3*8), fp6\n\t"
+-		     "fst.p     %0, (4*8), fp8\n\t"
+-		     "fst.p     %0, (5*8), fp10\n\t"
+-		     "fst.p     %0, (6*8), fp12\n\t"
+-		     "fst.p     %0, (7*8), fp14\n\t"
+-		     "fst.p     %0, (8*8), fp16\n\t"
+-		     "fst.p     %0, (9*8), fp18\n\t"
+-		     "fst.p     %0, (10*8), fp20\n\t"
+-		     "fst.p     %0, (11*8), fp22\n\t"
+-		     "fst.p     %0, (12*8), fp24\n\t"
+-		     "fst.p     %0, (13*8), fp26\n\t"
+-		     "fst.p     %0, (14*8), fp28\n\t"
+-		     "fst.p     %0, (15*8), fp30\n\t"
+-		     "fst.p     %0, (16*8), fp32\n\t"
+-		     "fst.p     %0, (17*8), fp34\n\t"
+-		     "fst.p     %0, (18*8), fp36\n\t"
+-		     "fst.p     %0, (19*8), fp38\n\t"
+-		     "fst.p     %0, (20*8), fp40\n\t"
+-		     "fst.p     %0, (21*8), fp42\n\t"
+-		     "fst.p     %0, (22*8), fp44\n\t"
+-		     "fst.p     %0, (23*8), fp46\n\t"
+-		     "fst.p     %0, (24*8), fp48\n\t"
+-		     "fst.p     %0, (25*8), fp50\n\t"
+-		     "fst.p     %0, (26*8), fp52\n\t"
+-		     "fst.p     %0, (27*8), fp54\n\t"
+-		     "fst.p     %0, (28*8), fp56\n\t"
+-		     "fst.p     %0, (29*8), fp58\n\t"
+-		     "fst.p     %0, (30*8), fp60\n\t"
+-		     "fst.p     %0, (31*8), fp62\n\t"
+-
+-		     "fgetscr   fr63\n\t"
+-		     "fst.s     %0, (32*8), fr63\n\t"
+-		: /* no output */
+-		: "r" (fpregs)
+-		: "memory");
+-}
+-
+-
+-static inline void
+-fpload(struct sh_fpu_hard_struct *fpregs)
+-{
+-	asm volatile("fld.p     %0, (0*8), fp0\n\t"
+-		     "fld.p     %0, (1*8), fp2\n\t"
+-		     "fld.p     %0, (2*8), fp4\n\t"
+-		     "fld.p     %0, (3*8), fp6\n\t"
+-		     "fld.p     %0, (4*8), fp8\n\t"
+-		     "fld.p     %0, (5*8), fp10\n\t"
+-		     "fld.p     %0, (6*8), fp12\n\t"
+-		     "fld.p     %0, (7*8), fp14\n\t"
+-		     "fld.p     %0, (8*8), fp16\n\t"
+-		     "fld.p     %0, (9*8), fp18\n\t"
+-		     "fld.p     %0, (10*8), fp20\n\t"
+-		     "fld.p     %0, (11*8), fp22\n\t"
+-		     "fld.p     %0, (12*8), fp24\n\t"
+-		     "fld.p     %0, (13*8), fp26\n\t"
+-		     "fld.p     %0, (14*8), fp28\n\t"
+-		     "fld.p     %0, (15*8), fp30\n\t"
+-		     "fld.p     %0, (16*8), fp32\n\t"
+-		     "fld.p     %0, (17*8), fp34\n\t"
+-		     "fld.p     %0, (18*8), fp36\n\t"
+-		     "fld.p     %0, (19*8), fp38\n\t"
+-		     "fld.p     %0, (20*8), fp40\n\t"
+-		     "fld.p     %0, (21*8), fp42\n\t"
+-		     "fld.p     %0, (22*8), fp44\n\t"
+-		     "fld.p     %0, (23*8), fp46\n\t"
+-		     "fld.p     %0, (24*8), fp48\n\t"
+-		     "fld.p     %0, (25*8), fp50\n\t"
+-		     "fld.p     %0, (26*8), fp52\n\t"
+-		     "fld.p     %0, (27*8), fp54\n\t"
+-		     "fld.p     %0, (28*8), fp56\n\t"
+-		     "fld.p     %0, (29*8), fp58\n\t"
+-		     "fld.p     %0, (30*8), fp60\n\t"
+-
+-		     "fld.s     %0, (32*8), fr63\n\t"
+-		     "fputscr   fr63\n\t"
+-
+-		     "fld.p     %0, (31*8), fp62\n\t"
+-		: /* no output */
+-		: "r" (fpregs) );
+-}
+-
+-void fpinit(struct sh_fpu_hard_struct *fpregs)
+-{
+-	*fpregs = init_fpuregs.hard;
+-}
+-
+-asmlinkage void
+-do_fpu_error(unsigned long ex, struct pt_regs *regs)
+-{
+-	struct task_struct *tsk = current;
+-
+-	regs->pc += 4;
+-
+-	tsk->thread.trap_no = 11;
+-	tsk->thread.error_code = 0;
+-	force_sig(SIGFPE, tsk);
+-}
+-
+-
+-asmlinkage void
+-do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
+-{
+-	void die(const char *str, struct pt_regs *regs, long err);
+-
+-	if (! user_mode(regs))
+-		die("FPU used in kernel", regs, ex);
+-
+-	regs->sr &= ~SR_FD;
+-
+-	if (last_task_used_math == current)
+-		return;
+-
+-	grab_fpu();
+-	if (last_task_used_math != NULL) {
+-		/* Other processes fpu state, save away */
+-		fpsave(&last_task_used_math->thread.fpu.hard);
+-        }
+-        last_task_used_math = current;
+-        if (used_math()) {
+-                fpload(&current->thread.fpu.hard);
+-        } else {
+-		/* First time FPU user.  */
+-		fpload(&init_fpuregs.hard);
+-                set_used_math();
+-        }
+-	release_fpu();
+-}
+-
+diff --git a/arch/sh64/kernel/head.S b/arch/sh64/kernel/head.S
 deleted file mode 100644
-index 7a7c81e..0000000
---- a/arch/sh/mm/clear_page.S
+index 186406d..0000000
+--- a/arch/sh64/kernel/head.S
 +++ /dev/null
-@@ -1,152 +0,0 @@
+@@ -1,372 +0,0 @@
 -/*
-- * __clear_user_page, __clear_user, clear_page implementation of SuperH
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
 - *
-- * Copyright (C) 2001  Kaz Kojima
-- * Copyright (C) 2001, 2002  Niibe Yutaka
-- * Copyright (C) 2006  Paul Mundt
+- * arch/sh64/kernel/head.S
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003, 2004  Paul Mundt
+- *
+- *
+- * benedict.gaster at superh.com:	 2nd May 2002
+- *    Moved definition of empty_zero_page to its own section allowing
+- *    it to be placed at an absolute address known at load time.
+- *
+- * lethal at linux-sh.org:          9th May 2003
+- *    Kill off GLOBAL_NAME() usage.
+- *
+- * lethal at linux-sh.org:          8th May 2004
+- *    Add early SCIF console DTLB mapping.
 - */
--#include <linux/linkage.h>
+-
+-
 -#include <asm/page.h>
+-#include <asm/mmu_context.h>
+-#include <asm/cache.h>
+-#include <asm/tlb.h>
+-#include <asm/processor.h>
+-#include <asm/registers.h>
+-#include <asm/thread_info.h>
 -
 -/*
-- * clear_page_slow
-- * @to: P1 address
-- *
-- * void clear_page_slow(void *to)
+- * MMU defines: TLB boundaries.
 - */
 -
+-#define MMUIR_FIRST	ITLB_FIXED
+-#define MMUIR_END	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
+-#define MMUIR_STEP	TLB_STEP
+-
+-#define MMUDR_FIRST	DTLB_FIXED
+-#define MMUDR_END	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
+-#define MMUDR_STEP	TLB_STEP
+-
+-/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */
+-#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1))
+-#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb"
+-#endif
+-
 -/*
-- * r0 --- scratch
-- * r4 --- to
-- * r5 --- to + PAGE_SIZE
+- * MMU defines: Fixed TLBs.
 - */
--ENTRY(clear_page_slow)
--	mov	r4,r5
--	mov.l	.Llimit,r0
--	add	r0,r5
--	mov	#0,r0
--	!
--1:
--#if defined(CONFIG_CPU_SH3)
--	mov.l	r0, at r4
--#elif defined(CONFIG_CPU_SH4)
--	movca.l	r0, at r4
--	mov	r4,r1
+-/* Deal safely with the case where the base of RAM is not 512Mb aligned */
+-
+-#define ALIGN_512M_MASK (0xffffffffe0000000)
+-#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
+-#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
+-
+-#define MMUIR_TEXT_H	(0x0000000000000003 | ALIGNED_EFFECTIVE)
+-			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+-
+-#define MMUIR_TEXT_L	(0x000000000000009a | ALIGNED_PHYSICAL)
+-			/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
+-
+-#define MMUDR_CACHED_H	0x0000000000000003 | ALIGNED_EFFECTIVE
+-			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+-#define MMUDR_CACHED_L	0x000000000000015a | ALIGNED_PHYSICAL
+-			/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
+-
+-#ifdef CONFIG_ICACHE_DISABLED
+-#define	ICCR0_INIT_VAL	ICCR0_OFF			/* ICACHE off */
+-#else
+-#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
 -#endif
--	add	#32,r4
--	mov.l	r0, at -r4
--	mov.l	r0, at -r4
--	mov.l	r0, at -r4
--	mov.l	r0, at -r4
--	mov.l	r0, at -r4
--	mov.l	r0, at -r4
--	mov.l	r0, at -r4
--#if defined(CONFIG_CPU_SH4)
--	ocbwb	@r1
+-#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
+-
+-#if defined (CONFIG_DCACHE_DISABLED)
+-#define	OCCR0_INIT_VAL	OCCR0_OFF			   /* D-cache: off  */
+-#elif defined (CONFIG_DCACHE_WRITE_THROUGH)
+-#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WT	   /* D-cache: on,   */
+-							   /* WT, invalidate */
+-#elif defined (CONFIG_DCACHE_WRITE_BACK)
+-#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	   /* D-cache: on,   */
+-							   /* WB, invalidate */
+-#else
+-#error preprocessor flag CONFIG_DCACHE_... not recognized!
 -#endif
--	cmp/eq	r5,r4
--	bf/s	1b
--	 add	#28,r4
--	!
--	rts
--	 nop
--.Llimit:	.long	(PAGE_SIZE-28)
 -
--ENTRY(__clear_user)
--	!
--	mov	#0, r0
--	mov	#0xe0, r1	! 0xffffffe0
--	!
--	! r4..(r4+31)&~32 	   -------- not aligned	[ Area 0 ]
--	! (r4+31)&~32..(r4+r5)&~32 -------- aligned	[ Area 1 ]
--	! (r4+r5)&~32..r4+r5       -------- not aligned	[ Area 2 ]
--	!
--	! Clear area 0
--	mov	r4, r2
--	!
--	tst	r1, r5		! length < 32
--	bt	.Larea2		! skip to remainder
--	!
--	add	#31, r2
--	and	r1, r2
--	cmp/eq	r4, r2
--	bt	.Larea1
--	mov	r2, r3
--	sub	r4, r3
--	mov	r3, r7
--	mov	r4, r2
--	!
--.L0:	dt	r3
--0:	mov.b	r0, @r2
--	bf/s	.L0
--	 add	#1, r2
--	!
--	sub	r7, r5
--	mov	r2, r4
--.Larea1:
--	mov	r4, r3
--	add	r5, r3
--	and	r1, r3
--	cmp/hi	r2, r3
--	bf	.Larea2
--	!
--	! Clear area 1
--#if defined(CONFIG_CPU_SH4)
--1:	movca.l	r0, @r2
+-#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			   /* No locking     */
+-
+-	.section	.empty_zero_page, "aw"
+-	.global empty_zero_page
+-
+-empty_zero_page:
+-	.long	1		/* MOUNT_ROOT_RDONLY */
+-	.long	0		/* RAMDISK_FLAGS */
+-	.long	0x0200		/* ORIG_ROOT_DEV */
+-	.long	1		/* LOADER_TYPE */
+-	.long	0x00800000	/* INITRD_START */
+-	.long	0x00800000	/* INITRD_SIZE */
+-	.long	0
+-
+-	.text
+-	.balign 4096,0,4096
+-
+-	.section	.data, "aw"
+-	.balign	PAGE_SIZE
+-
+-	.section	.data, "aw"
+-	.balign	PAGE_SIZE
+-
+-	.global swapper_pg_dir
+-swapper_pg_dir:
+-	.space PAGE_SIZE, 0
+-
+-	.global empty_bad_page
+-empty_bad_page:
+-	.space PAGE_SIZE, 0
+-
+-	.global empty_bad_pte_table
+-empty_bad_pte_table:
+-	.space PAGE_SIZE, 0
+-
+-	.global	fpu_in_use
+-fpu_in_use:	.quad	0
+-
+-
+-	.section	.text.head, "ax"
+-	.balign L1_CACHE_BYTES
+-/*
+- * Condition at the entry of __stext:
+- * . Reset state:
+- *   . SR.FD    = 1		(FPU disabled)
+- *   . SR.BL    = 1		(Exceptions disabled)
+- *   . SR.MD    = 1		(Privileged Mode)
+- *   . SR.MMU   = 0		(MMU Disabled)
+- *   . SR.CD    = 0		(CTC User Visible)
+- *   . SR.IMASK = Undefined	(Interrupt Mask)
+- *
+- * Operations supposed to be performed by __stext:
+- * . prevent speculative fetch onto device memory while MMU is off
+- * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
+- * . first, save CPU state and set it to something harmless
+- * . any CPU detection and/or endianness settings (?)
+- * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
+- * . set initial TLB entries for cached and uncached regions
+- *   (no fine granularity paging)
+- * . set initial cache state
+- * . enable MMU and caches
+- * . set CPU to a consistent state
+- *   . registers (including stack pointer and current/KCR0)
+- *   . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
+- *     at this stage. This is all to later Linux initialization steps.
+- *   . initialize FPU
+- * . clear BSS
+- * . jump into start_kernel()
+- * . be prepared to hopeless start_kernel() returns.
+- *
+- */
+-	.global _stext
+-_stext:
+-	/*
+-	 * Prevent speculative fetch on device memory due to
+-	 * uninitialized target registers.
+-	 */
+-	ptabs/u	ZERO, tr0
+-	ptabs/u	ZERO, tr1
+-	ptabs/u	ZERO, tr2
+-	ptabs/u	ZERO, tr3
+-	ptabs/u	ZERO, tr4
+-	ptabs/u	ZERO, tr5
+-	ptabs/u	ZERO, tr6
+-	ptabs/u	ZERO, tr7
+-	synci
+-
+-	/*
+-	 * Read/Set CPU state. After this block:
+-	 * r29 = Initial SR
+-	 */
+-	getcon	SR, r29
+-	movi	SR_HARMLESS, r20
+-	putcon	r20, SR
+-
+-	/*
+-	 * Initialize EMI/LMI. To Be Done.
+-	 */
+-
+-	/*
+-	 * CPU detection and/or endianness settings (?). To Be Done.
+-	 * Pure PIC code here, please ! Just save state into r30.
+-         * After this block:
+-	 * r30 = CPU type/Platform Endianness
+-	 */
+-
+-	/*
+-	 * Set initial TLB entries for cached and uncached regions.
+-	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
+-	 */
+-	/* Clear ITLBs */
+-	pta	clear_ITLB, tr1
+-	movi	MMUIR_FIRST, r21
+-	movi	MMUIR_END, r22
+-clear_ITLB:
+-	putcfg	r21, 0, ZERO		/* Clear MMUIR[n].PTEH.V */
+-	addi	r21, MMUIR_STEP, r21
+-        bne	r21, r22, tr1
+-
+-	/* Clear DTLBs */
+-	pta	clear_DTLB, tr1
+-	movi	MMUDR_FIRST, r21
+-	movi	MMUDR_END, r22
+-clear_DTLB:
+-	putcfg	r21, 0, ZERO		/* Clear MMUDR[n].PTEH.V */
+-	addi	r21, MMUDR_STEP, r21
+-        bne	r21, r22, tr1
+-
+-	/* Map one big (512Mb) page for ITLB */
+-	movi	MMUIR_FIRST, r21
+-	movi	MMUIR_TEXT_L, r22	/* PTEL first */
+-	add.l	r22, r63, r22		/* Sign extend */
+-	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
+-	movi	MMUIR_TEXT_H, r22	/* PTEH last */
+-	add.l	r22, r63, r22		/* Sign extend */
+-	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
+-
+-	/* Map one big CACHED (512Mb) page for DTLB */
+-	movi	MMUDR_FIRST, r21
+-	movi	MMUDR_CACHED_L, r22	/* PTEL first */
+-	add.l	r22, r63, r22		/* Sign extend */
+-	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
+-	movi	MMUDR_CACHED_H, r22	/* PTEH last */
+-	add.l	r22, r63, r22		/* Sign extend */
+-	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
+-
+-#ifdef CONFIG_EARLY_PRINTK
+-	/*
+-	 * Setup a DTLB translation for SCIF phys.
+-	 */
+-	addi    r21, MMUDR_STEP, r21
+-	movi    0x0a03, r22	/* SCIF phys */
+-	shori   0x0148, r22
+-	putcfg  r21, 1, r22	/* PTEL first */
+-	movi    0xfa03, r22	/* 0xfa030000, fixed SCIF virt */
+-	shori   0x0003, r22
+-	putcfg  r21, 0, r22	/* PTEH last */
+-#endif
+-
+-	/*
+-	 * Set cache behaviours.
+-	 */
+-	/* ICache */
+-	movi	ICCR_BASE, r21
+-	movi	ICCR0_INIT_VAL, r22
+-	movi	ICCR1_INIT_VAL, r23
+-	putcfg	r21, ICCR_REG0, r22
+-	putcfg	r21, ICCR_REG1, r23
+-
+-	/* OCache */
+-	movi	OCCR_BASE, r21
+-	movi	OCCR0_INIT_VAL, r22
+-	movi	OCCR1_INIT_VAL, r23
+-	putcfg	r21, OCCR_REG0, r22
+-	putcfg	r21, OCCR_REG1, r23
+-
+-
+-	/*
+-	 * Enable Caches and MMU. Do the first non-PIC jump.
+-         * Now head.S global variables, constants and externs
+-	 * can be used.
+-	 */
+-	getcon	SR, r21
+-	movi	SR_ENABLE_MMU, r22
+-	or	r21, r22, r21
+-	putcon	r21, SSR
+-	movi	hyperspace, r22
+-	ori	r22, 1, r22	    /* Make it SHmedia, not required but..*/
+-	putcon	r22, SPC
+-	synco
+-	rte			    /* And now go into the hyperspace ... */
+-hyperspace:			    /* ... that's the next instruction !  */
+-
+-	/*
+-	 * Set CPU to a consistent state.
+-	 * r31 = FPU support flag
+-	 * tr0/tr7 in use. Others give a chance to loop somewhere safe
+-	 */
+-	movi	start_kernel, r32
+-	ori	r32, 1, r32
+-
+-	ptabs	r32, tr0		    /* r32 = _start_kernel address        */
+-	pta/u	hopeless, tr1
+-	pta/u	hopeless, tr2
+-	pta/u	hopeless, tr3
+-	pta/u	hopeless, tr4
+-	pta/u	hopeless, tr5
+-	pta/u	hopeless, tr6
+-	pta/u	hopeless, tr7
+-	gettr	tr1, r28			/* r28 = hopeless address */
+-
+-	/* Set initial stack pointer */
+-	movi	init_thread_union, SP
+-	putcon	SP, KCR0		/* Set current to init_task */
+-	movi	THREAD_SIZE, r22	/* Point to the end */
+-	add	SP, r22, SP
+-
+-	/*
+-	 * Initialize FPU.
+-	 * Keep FPU flag in r31. After this block:
+-	 * r31 = FPU flag
+-	 */
+-	movi fpu_in_use, r31	/* Temporary */
+-
+-#ifdef CONFIG_SH_FPU
+-	getcon	SR, r21
+-	movi	SR_ENABLE_FPU, r22
+-	and	r21, r22, r22
+-	putcon	r22, SR			/* Try to enable */
+-	getcon	SR, r22
+-	xor	r21, r22, r21
+-	shlri	r21, 15, r21		/* Supposedly 0/1 */
+-	st.q	r31, 0 , r21		/* Set fpu_in_use */
 -#else
--1:	mov.l	r0, @r2
+-	movi	0, r21
+-	st.q	r31, 0 , r21		/* Set fpu_in_use */
 -#endif
--	add	#4, r2
--2:	mov.l	r0, @r2
--	add	#4, r2
--3:	mov.l	r0, @r2
--	add	#4, r2
--4:	mov.l	r0, @r2
--	add	#4, r2
--5:	mov.l	r0, @r2
--	add	#4, r2
--6:	mov.l	r0, @r2
--	add	#4, r2
--7:	mov.l	r0, @r2
--	add	#4, r2
--8:	mov.l	r0, @r2
--	add	#4, r2
--	cmp/hi	r2, r3
--	bt/s	1b
--	 nop
--	!
--	! Clear area 2
--.Larea2:
--	mov	r4, r3
--	add	r5, r3
--	cmp/hs	r3, r2
--	bt/s	.Ldone
--	 sub	r2, r3
--.L2:	dt	r3
--9:	mov.b	r0, @r2
--	bf/s	.L2
--	 add	#1, r2
--	!
--.Ldone:	rts
--	 mov	#0, r0	! return 0 as normal return
+-	or	r21, ZERO, r31		/* Set FPU flag at last */
 -
--	! return the number of bytes remained
--.Lbad_clear_user:
--	mov	r4, r0
--	add	r5, r0
--	rts
--	 sub	r2, r0
+-#ifndef CONFIG_SH_NO_BSS_INIT
+-/* Don't clear BSS if running on slow platforms such as an RTL simulation,
+-   remote memory via SHdebug link, etc.  For these the memory can be guaranteed
+-   to be all zero on boot anyway. */
+-	/*
+-	 * Clear bss
+-	 */
+-	pta	clear_quad, tr1
+-	movi	__bss_start, r22
+-	movi	_end, r23
+-clear_quad:
+-	st.q	r22, 0, ZERO
+-	addi	r22, 8, r22
+-	bne	r22, r23, tr1		/* Both quad aligned, see vmlinux.lds.S */
+-#endif
+-	pta/u	hopeless, tr1
 -
--.section __ex_table,"a"
--	.align 2
--	.long	0b, .Lbad_clear_user
--	.long	1b, .Lbad_clear_user
--	.long	2b, .Lbad_clear_user
--	.long	3b, .Lbad_clear_user
--	.long	4b, .Lbad_clear_user
--	.long	5b, .Lbad_clear_user
--	.long	6b, .Lbad_clear_user
--	.long	7b, .Lbad_clear_user
--	.long	8b, .Lbad_clear_user
--	.long	9b, .Lbad_clear_user
--.previous
-diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
-index e220c29..7b2131c 100644
---- a/arch/sh/mm/consistent.c
-+++ b/arch/sh/mm/consistent.c
-@@ -1,7 +1,9 @@
- /*
-  * arch/sh/mm/consistent.c
-  *
-- * Copyright (C) 2004  Paul Mundt
-+ * Copyright (C) 2004 - 2007  Paul Mundt
-+ *
-+ * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
-  *
-  * This file is subject to the terms and conditions of the GNU General Public
-  * License.  See the file "COPYING" in the main directory of this archive
-@@ -13,58 +15,152 @@
- #include <asm/addrspace.h>
- #include <asm/io.h>
- 
--void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
-+struct dma_coherent_mem {
-+	void		*virt_base;
-+	u32		device_base;
-+	int		size;
-+	int		flags;
-+	unsigned long	*bitmap;
-+};
-+
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+			   dma_addr_t *dma_handle, gfp_t gfp)
- {
--	struct page *page, *end, *free;
- 	void *ret;
--	int order;
-+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+	int order = get_order(size);
- 
--	size = PAGE_ALIGN(size);
--	order = get_order(size);
-+	if (mem) {
-+		int page = bitmap_find_free_region(mem->bitmap, mem->size,
-+						     order);
-+		if (page >= 0) {
-+			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
-+			ret = mem->virt_base + (page << PAGE_SHIFT);
-+			memset(ret, 0, size);
-+			return ret;
-+		}
-+		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
-+			return NULL;
-+	}
- 
--	page = alloc_pages(gfp, order);
--	if (!page)
--		return NULL;
--	split_page(page, order);
-+	ret = (void *)__get_free_pages(gfp, order);
- 
--	ret = page_address(page);
--	memset(ret, 0, size);
--	*handle = virt_to_phys(ret);
-+	if (ret != NULL) {
-+		memset(ret, 0, size);
-+		/*
-+		 * Pages from the page allocator may have data present in
-+		 * cache. So flush the cache before using uncached memory.
-+		 */
-+		dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
-+		*dma_handle = virt_to_phys(ret);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(dma_alloc_coherent);
- 
+-	/* Say bye to head.S but be prepared to wrongly get back ... */
+-	blink	tr0, LINK
+-
+-	/* If we ever get back here through LINK/tr1-tr7 */
+-	pta/u	hopeless, tr7
+-
+-hopeless:
 -	/*
--	 * We must flush the cache before we pass it on to the device
+-	 * Something's badly wrong here. Loop endlessly,
+-         * there's nothing more we can do about it.
+-	 *
+-	 * Note on hopeless: it can be jumped into invariably
+-	 * before or after jumping into hyperspace. The only
+-	 * requirement is to be PIC called (PTA) before and
+-	 * any way (PTA/PTABS) after. According to Virtual
+-	 * to Physical mapping a simulator/emulator can easily
+-	 * tell where we came here from just looking at hopeless
+-	 * (PC) address.
+-	 *
+-	 * For debugging purposes:
+-	 * (r28) hopeless/loop address
+-	 * (r29) Original SR
+-	 * (r30) CPU type/Platform endianness
+-	 * (r31) FPU Support
+-	 * (r32) _start_kernel address
 -	 */
--	__flush_purge_region(ret, size);
-+void dma_free_coherent(struct device *dev, size_t size,
-+			 void *vaddr, dma_addr_t dma_handle)
-+{
-+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+	int order = get_order(size);
- 
--	page = virt_to_page(ret);
--	free = page + (size >> PAGE_SHIFT);
--	end  = page + (1 << order);
-+	if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-+		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
- 
--	while (++page < end) {
--		/* Free any unused pages */
--		if (page >= free) {
--			__free_page(page);
--		}
-+		bitmap_release_region(mem->bitmap, page, order);
-+	} else {
-+		WARN_ON(irqs_disabled());	/* for portability */
-+		BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
-+		free_pages((unsigned long)vaddr, order);
- 	}
-+}
-+EXPORT_SYMBOL(dma_free_coherent);
-+
-+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+				dma_addr_t device_addr, size_t size, int flags)
-+{
-+	void __iomem *mem_base = NULL;
-+	int pages = size >> PAGE_SHIFT;
-+	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-+
-+	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
-+		goto out;
-+	if (!size)
-+		goto out;
-+	if (dev->dma_mem)
-+		goto out;
-+
-+	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
- 
--	return P2SEGADDR(ret);
-+	mem_base = ioremap_nocache(bus_addr, size);
-+	if (!mem_base)
-+		goto out;
-+
-+	dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
-+	if (!dev->dma_mem)
-+		goto out;
-+	dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-+	if (!dev->dma_mem->bitmap)
-+		goto free1_out;
-+
-+	dev->dma_mem->virt_base = mem_base;
-+	dev->dma_mem->device_base = device_addr;
-+	dev->dma_mem->size = pages;
-+	dev->dma_mem->flags = flags;
-+
-+	if (flags & DMA_MEMORY_MAP)
-+		return DMA_MEMORY_MAP;
-+
-+	return DMA_MEMORY_IO;
-+
-+ free1_out:
-+	kfree(dev->dma_mem);
-+ out:
-+	if (mem_base)
-+		iounmap(mem_base);
-+	return 0;
- }
-+EXPORT_SYMBOL(dma_declare_coherent_memory);
- 
--void consistent_free(void *vaddr, size_t size)
-+void dma_release_declared_memory(struct device *dev)
- {
--	unsigned long addr = P1SEGADDR((unsigned long)vaddr);
--	struct page *page=virt_to_page(addr);
--	int num_pages=(size+PAGE_SIZE-1) >> PAGE_SHIFT;
--	int i;
-+	struct dma_coherent_mem *mem = dev->dma_mem;
- 
--	for(i=0;i<num_pages;i++) {
--		__free_page((page+i));
--	}
-+	if (!mem)
-+		return;
-+	dev->dma_mem = NULL;
-+	iounmap(mem->virt_base);
-+	kfree(mem->bitmap);
-+	kfree(mem);
- }
-+EXPORT_SYMBOL(dma_release_declared_memory);
- 
--void consistent_sync(void *vaddr, size_t size, int direction)
-+void *dma_mark_declared_memory_occupied(struct device *dev,
-+					dma_addr_t device_addr, size_t size)
- {
--	void * p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
-+	struct dma_coherent_mem *mem = dev->dma_mem;
-+	int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+	int pos, err;
-+
-+	if (!mem)
-+		return ERR_PTR(-EINVAL);
-+
-+	pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
-+	err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
-+	if (err != 0)
-+		return ERR_PTR(err);
-+	return mem->virt_base + (pos << PAGE_SHIFT);
-+}
-+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-+
-+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-+		    enum dma_data_direction direction)
-+{
-+#ifdef CONFIG_CPU_SH5
-+	void *p1addr = vaddr;
-+#else
-+	void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
-+#endif
- 
- 	switch (direction) {
- 	case DMA_FROM_DEVICE:		/* invalidate only */
-@@ -80,8 +176,4 @@ void consistent_sync(void *vaddr, size_t size, int direction)
- 		BUG();
- 	}
- }
+-	blink	tr7, ZERO
 -
--EXPORT_SYMBOL(consistent_alloc);
--EXPORT_SYMBOL(consistent_free);
--EXPORT_SYMBOL(consistent_sync);
 -
-+EXPORT_SYMBOL(dma_cache_sync);
-diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
+diff --git a/arch/sh64/kernel/init_task.c b/arch/sh64/kernel/init_task.c
 deleted file mode 100644
-index 4068501..0000000
---- a/arch/sh/mm/copy_page.S
+index deee8bf..0000000
+--- a/arch/sh64/kernel/init_task.c
 +++ /dev/null
-@@ -1,388 +0,0 @@
+@@ -1,46 +0,0 @@
 -/*
-- * copy_page, __copy_user_page, __copy_user implementation of SuperH
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/init_task.c
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003  Paul Mundt
 - *
-- * Copyright (C) 2001  Niibe Yutaka & Kaz Kojima
-- * Copyright (C) 2002  Toshinobu Sugioka
-- * Copyright (C) 2006  Paul Mundt
 - */
--#include <linux/linkage.h>
--#include <asm/page.h>
+-#include <linux/rwsem.h>
+-#include <linux/mm.h>
+-#include <linux/sched.h>
+-#include <linux/init_task.h>
+-#include <linux/mqueue.h>
+-#include <linux/fs.h>
+-#include <asm/uaccess.h>
+-#include <asm/pgtable.h>
+-
+-static struct fs_struct init_fs = INIT_FS;
+-static struct files_struct init_files = INIT_FILES;
+-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+-struct mm_struct init_mm = INIT_MM(init_mm);
+-
+-struct pt_regs fake_swapper_regs;
 -
 -/*
-- * copy_page_slow
-- * @to: P1 address
-- * @from: P1 address
+- * Initial thread structure.
 - *
-- * void copy_page_slow(void *to, void *from)
+- * We need to make sure that this is THREAD_SIZE-byte aligned due
+- * to the way process stacks are handled. This is done by having a
+- * special "init_task" linker map entry..
 - */
+-union thread_union init_thread_union
+-	__attribute__((__section__(".data.init_task"))) =
+-		{ INIT_THREAD_INFO(init_task) };
 -
 -/*
-- * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
-- * r8 --- from + PAGE_SIZE
-- * r9 --- not used
-- * r10 --- to
-- * r11 --- from
+- * Initial task structure.
+- *
+- * All other task structs will be allocated on slabs in fork.c
 - */
--ENTRY(copy_page_slow)
--	mov.l	r8, at -r15
--	mov.l	r10, at -r15
--	mov.l	r11, at -r15
--	mov	r4,r10
--	mov	r5,r11
--	mov	r5,r8
--	mov.l	.Lpsz,r0
--	add	r0,r8
--	!
--1:	mov.l	@r11+,r0
--	mov.l	@r11+,r1
--	mov.l	@r11+,r2
--	mov.l	@r11+,r3
--	mov.l	@r11+,r4
--	mov.l	@r11+,r5
--	mov.l	@r11+,r6
--	mov.l	@r11+,r7
--#if defined(CONFIG_CPU_SH3)
--	mov.l	r0, at r10
--#elif defined(CONFIG_CPU_SH4)
--	movca.l	r0, at r10
--	mov	r10,r0
--#endif
--	add	#32,r10
--	mov.l	r7, at -r10
--	mov.l	r6, at -r10
--	mov.l	r5, at -r10
--	mov.l	r4, at -r10
--	mov.l	r3, at -r10
--	mov.l	r2, at -r10
--	mov.l	r1, at -r10
--#if defined(CONFIG_CPU_SH4)
--	ocbwb	@r0
+-struct task_struct init_task = INIT_TASK(init_task);
+-
+diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c
+deleted file mode 100644
+index 9412b71..0000000
+--- a/arch/sh64/kernel/irq.c
++++ /dev/null
+@@ -1,115 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/irq.c
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003  Paul Mundt
+- *
+- */
+-
+-/*
+- * IRQs are in fact implemented a bit like signal handlers for the kernel.
+- * Naturally it's not a 1:1 relation, but there are similarities.
+- */
+-
+-#include <linux/errno.h>
+-#include <linux/kernel_stat.h>
+-#include <linux/signal.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/ioport.h>
+-#include <linux/interrupt.h>
+-#include <linux/timex.h>
+-#include <linux/slab.h>
+-#include <linux/random.h>
+-#include <linux/smp.h>
+-#include <linux/init.h>
+-#include <linux/seq_file.h>
+-#include <linux/bitops.h>
+-#include <asm/system.h>
+-#include <asm/io.h>
+-#include <asm/smp.h>
+-#include <asm/pgalloc.h>
+-#include <asm/delay.h>
+-#include <asm/irq.h>
+-#include <linux/irq.h>
+-
+-void ack_bad_irq(unsigned int irq)
+-{
+-	printk("unexpected IRQ trap at irq %02x\n", irq);
+-}
+-
+-#if defined(CONFIG_PROC_FS)
+-int show_interrupts(struct seq_file *p, void *v)
+-{
+-	int i = *(loff_t *) v, j;
+-	struct irqaction * action;
+-	unsigned long flags;
+-
+-	if (i == 0) {
+-		seq_puts(p, "           ");
+-		for_each_online_cpu(j)
+-			seq_printf(p, "CPU%d       ",j);
+-		seq_putc(p, '\n');
+-	}
+-
+-	if (i < NR_IRQS) {
+-		spin_lock_irqsave(&irq_desc[i].lock, flags);
+-		action = irq_desc[i].action;
+-		if (!action)
+-			goto unlock;
+-		seq_printf(p, "%3d: ",i);
+-		seq_printf(p, "%10u ", kstat_irqs(i));
+-		seq_printf(p, " %14s", irq_desc[i].chip->typename);
+-		seq_printf(p, "  %s", action->name);
+-
+-		for (action=action->next; action; action = action->next)
+-			seq_printf(p, ", %s", action->name);
+-		seq_putc(p, '\n');
+-unlock:
+-		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+-	}
+-	return 0;
+-}
 -#endif
--	cmp/eq	r11,r8
--	bf/s	1b
--	 add	#28,r10
--	!
--	mov.l	@r15+,r11
--	mov.l	@r15+,r10
--	mov.l	@r15+,r8
--	rts
--	 nop
 -
--	.align 2
--.Lpsz:	.long	PAGE_SIZE
 -/*
-- * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
-- * Return the number of bytes NOT copied
+- * do_NMI handles all Non-Maskable Interrupts.
 - */
--#define EX(...)			\
--	9999: __VA_ARGS__ ;		\
--	.section __ex_table, "a";	\
--	.long 9999b, 6000f	;	\
--	.previous
--ENTRY(__copy_user)
--	! Check if small number of bytes
--	mov	#11,r0
--	mov	r4,r3
--	cmp/gt	r0,r6		! r6 (len) > r0 (11)
--	bf/s	.L_cleanup_loop_no_pop
--	 add	r6,r3		! last destination address
+-asmlinkage void do_NMI(unsigned long vector_num, struct pt_regs * regs)
+-{
+-	if (regs->sr & 0x40000000)
+-		printk("unexpected NMI trap in system mode\n");
+-	else
+-		printk("unexpected NMI trap in user mode\n");
 -
--	! Calculate bytes needed to align to src
--	mov.l	r11, at -r15
--	neg	r5,r0
--	mov.l	r10, at -r15
--	add	#4,r0
--	mov.l	r9, at -r15
--	and	#3,r0
--	mov.l	r8, at -r15
--	tst	r0,r0
--	bt	2f
+-	/* No statistics */
+-}
 -
--1:
--	! Copy bytes to long word align src
--EX(	mov.b	@r5+,r1		)
--	dt	r0
--	add	#-1,r6
--EX(	mov.b	r1, at r4		)
--	bf/s	1b
--	 add	#1,r4
+-/*
+- * do_IRQ handles all normal device IRQ's.
+- */
+-asmlinkage int do_IRQ(unsigned long vector_num, struct pt_regs * regs)
+-{
+-	struct pt_regs *old_regs = set_irq_regs(regs);
+-	int irq;
 -
--	! Jump to appropriate routine depending on dest
--2:	mov	#3,r1
--	mov	r6, r2
--	and	r4,r1
--	shlr2	r2
--	shll2	r1
--	mova	.L_jump_tbl,r0
--	mov.l	@(r0,r1),r1
--	jmp	@r1
--	 nop
+-	irq_enter();
 -
--	.align 2
--.L_jump_tbl:
--	.long	.L_dest00
--	.long	.L_dest01
--	.long	.L_dest10
--	.long	.L_dest11
+-	irq = irq_demux(vector_num);
+-
+-	if (irq >= 0) {
+-		__do_IRQ(irq);
+-	} else {
+-		printk("unexpected IRQ trap at vector %03lx\n", vector_num);
+-	}
+-
+-	irq_exit();
+-
+-	set_irq_regs(old_regs);
+-	return 1;
+-}
 -
+diff --git a/arch/sh64/kernel/irq_intc.c b/arch/sh64/kernel/irq_intc.c
+deleted file mode 100644
+index 3b63a93..0000000
+--- a/arch/sh64/kernel/irq_intc.c
++++ /dev/null
+@@ -1,272 +0,0 @@
 -/*
-- * Come here if there are less than 12 bytes to copy
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/irq_intc.c
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003  Paul Mundt
+- *
+- * Interrupt Controller support for SH5 INTC.
+- * Per-interrupt selective. IRLM=0 (Fixed priority) is not
+- * supported being useless without a cascaded interrupt
+- * controller.
 - *
-- * Keep the branch target close, so the bf/s callee doesn't overflow
-- * and result in a more expensive branch being inserted. This is the
-- * fast-path for small copies, the jump via the jump table will hit the
-- * default slow-path cleanup. -PFM.
 - */
--.L_cleanup_loop_no_pop:
--	tst	r6,r6		! Check explicitly for zero
--	bt	1f
 -
--2:
--EX(	mov.b	@r5+,r0		)
--	dt	r6
--EX(	mov.b	r0, at r4		)
--	bf/s	2b
--	 add	#1,r4
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+-#include <linux/irq.h>
+-#include <linux/kernel.h>
+-#include <linux/stddef.h>
+-#include <linux/bitops.h>       /* this includes also <asm/registers.h */
+-                                /* which is required to remap register */
+-                                /* names used into __asm__ blocks...   */
 -
--1:	mov	#0,r0		! normal return
--5000:
+-#include <asm/hardware.h>
+-#include <asm/platform.h>
+-#include <asm/page.h>
+-#include <asm/io.h>
+-#include <asm/irq.h>
 -
--# Exception handler:
--.section .fixup, "ax"
--6000:
--	mov.l	8000f,r1
--	mov	r3,r0
--	jmp	@r1
--	 sub	r4,r0
--	.align	2
--8000:	.long	5000b
+-/*
+- * Maybe the generic Peripheral block could move to a more
+- * generic include file. INTC Block will be defined here
+- * and only here to make INTC self-contained in a single
+- * file.
+- */
+-#define	INTC_BLOCK_OFFSET	0x01000000
 -
--.previous
--	rts
--	 nop
+-/* Base */
+-#define INTC_BASE		PHYS_PERIPHERAL_BLOCK + \
+-				INTC_BLOCK_OFFSET
 -
--! Destination = 00
+-/* Address */
+-#define INTC_ICR_SET		(intc_virt + 0x0)
+-#define INTC_ICR_CLEAR		(intc_virt + 0x8)
+-#define INTC_INTPRI_0		(intc_virt + 0x10)
+-#define INTC_INTSRC_0		(intc_virt + 0x50)
+-#define INTC_INTSRC_1		(intc_virt + 0x58)
+-#define INTC_INTREQ_0		(intc_virt + 0x60)
+-#define INTC_INTREQ_1		(intc_virt + 0x68)
+-#define INTC_INTENB_0		(intc_virt + 0x70)
+-#define INTC_INTENB_1		(intc_virt + 0x78)
+-#define INTC_INTDSB_0		(intc_virt + 0x80)
+-#define INTC_INTDSB_1		(intc_virt + 0x88)
 -
--.L_dest00:
--	! Skip the large copy for small transfers
--	mov	#(32+32-4), r0
--	cmp/gt	r6, r0		! r0 (60) > r6 (len)
--	bt	1f
+-#define INTC_ICR_IRLM		0x1
+-#define	INTC_INTPRI_PREGS	8		/* 8 Priority Registers */
+-#define	INTC_INTPRI_PPREG	8		/* 8 Priorities per Register */
 -
--	! Align dest to a 32 byte boundary
--	neg	r4,r0
--	add	#0x20, r0
--	and	#0x1f, r0
--	tst	r0, r0
--	bt	2f
 -
--	sub	r0, r6
--	shlr2	r0
--3:
--EX(	mov.l	@r5+,r1		)
--	dt	r0
--EX(	mov.l	r1, at r4		)
--	bf/s	3b
--	 add	#4,r4
+-/*
+- * Mapper between the vector ordinal and the IRQ number
+- * passed to kernel/device drivers.
+- */
+-int intc_evt_to_irq[(0xE20/0x20)+1] = {
+-	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x000 - 0x0E0 */
+-	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x100 - 0x1E0 */
+-	 0,  0,  0,  0,  0,  1,  0,  0,	/* 0x200 - 0x2E0 */
+-	 2,  0,  0,  3,  0,  0,  0, -1,	/* 0x300 - 0x3E0 */
+-	32, 33, 34, 35, 36, 37, 38, -1,	/* 0x400 - 0x4E0 */
+-	-1, -1, -1, 63, -1, -1, -1, -1,	/* 0x500 - 0x5E0 */
+-	-1, -1, 18, 19, 20, 21, 22, -1,	/* 0x600 - 0x6E0 */
+-	39, 40, 41, 42, -1, -1, -1, -1,	/* 0x700 - 0x7E0 */
+-	 4,  5,  6,  7, -1, -1, -1, -1,	/* 0x800 - 0x8E0 */
+-	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x900 - 0x9E0 */
+-	12, 13, 14, 15, 16, 17, -1, -1,	/* 0xA00 - 0xAE0 */
+-	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xB00 - 0xBE0 */
+-	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xC00 - 0xCE0 */
+-	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xD00 - 0xDE0 */
+-	-1, -1				/* 0xE00 - 0xE20 */
+-};
 -
--2:
--EX(	mov.l	@r5+,r0		)
--EX(	mov.l	@r5+,r1		)
--EX(	mov.l	@r5+,r2		)
--EX(	mov.l	@r5+,r7		)
--EX(	mov.l	@r5+,r8		)
--EX(	mov.l	@r5+,r9		)
--EX(	mov.l	@r5+,r10	)
--EX(	mov.l	@r5+,r11	)
--#ifdef CONFIG_CPU_SH4
--EX(	movca.l	r0, at r4		)
--#else
--EX(	mov.l	r0, at r4		)
--#endif
--	add	#-32, r6
--EX(	mov.l	r1,@(4,r4)	)
--	mov	#32, r0
--EX(	mov.l	r2,@(8,r4)	)
--	cmp/gt	r6, r0		! r0 (32) > r6 (len)
--EX(	mov.l	r7,@(12,r4)	)
--EX(	mov.l	r8,@(16,r4)	)
--EX(	mov.l	r9,@(20,r4)	)
--EX(	mov.l	r10,@(24,r4)	)
--EX(	mov.l	r11,@(28,r4)	)
--	bf/s	2b
--	 add	#32,r4
+-/*
+- * Opposite mapper.
+- */
+-static int IRQ_to_vectorN[NR_INTC_IRQS] = {
+-	0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /*  0- 7 */
+-	  -1,   -1,   -1,   -1, 0x50, 0x51, 0x52, 0x53,	/*  8-15 */
+-	0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36,   -1, /* 16-23 */
+-	  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 24-31 */
+-	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38,	/* 32-39 */
+-        0x39, 0x3A, 0x3B,   -1,   -1,   -1,   -1,   -1, /* 40-47 */
+-	  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 48-55 */
+-	  -1,   -1,   -1,   -1,   -1,   -1,   -1, 0x2B, /* 56-63 */
 -
--1:	mov	r6, r0
--	shlr2	r0
--	tst	r0, r0
--	bt	.L_cleanup
--1:
--EX(	mov.l	@r5+,r1		)
--	dt	r0
--EX(	mov.l	r1, at r4		)
--	bf/s	1b
--	 add	#4,r4
+-};
 -
--	bra	.L_cleanup
--	 nop
+-static unsigned long intc_virt;
 -
--! Destination = 10
+-static unsigned int startup_intc_irq(unsigned int irq);
+-static void shutdown_intc_irq(unsigned int irq);
+-static void enable_intc_irq(unsigned int irq);
+-static void disable_intc_irq(unsigned int irq);
+-static void mask_and_ack_intc(unsigned int);
+-static void end_intc_irq(unsigned int irq);
 -
--.L_dest10:
--	mov	r2,r7
--	shlr2	r7
--	shlr	r7
--	tst	r7,r7
--	mov	#7,r0
--	bt/s	1f
--	 and	r0,r2
--2:
--	dt	r7
--#ifdef CONFIG_CPU_LITTLE_ENDIAN
--EX(	mov.l	@r5+,r0		)
--EX(	mov.l	@r5+,r1		)
--EX(	mov.l	@r5+,r8		)
--EX(	mov.l	@r5+,r9		)
--EX(	mov.l	@r5+,r10	)
--EX(	mov.w	r0, at r4		)
--	add	#2,r4
--	xtrct	r1,r0
--	xtrct	r8,r1
--	xtrct	r9,r8
--	xtrct	r10,r9
+-static struct hw_interrupt_type intc_irq_type = {
+-	.typename = "INTC",
+-	.startup = startup_intc_irq,
+-	.shutdown = shutdown_intc_irq,
+-	.enable = enable_intc_irq,
+-	.disable = disable_intc_irq,
+-	.ack = mask_and_ack_intc,
+-	.end = end_intc_irq
+-};
 -
--EX(	mov.l	r0, at r4		)
--EX(	mov.l	r1,@(4,r4)	)
--EX(	mov.l	r8,@(8,r4)	)
--EX(	mov.l	r9,@(12,r4)	)
+-static int irlm;		/* IRL mode */
 -
--EX(	mov.l	@r5+,r1		)
--EX(	mov.l	@r5+,r8		)
--EX(	mov.l	@r5+,r0		)
--	xtrct	r1,r10
--	xtrct	r8,r1
--	xtrct	r0,r8
--	shlr16	r0
--EX(	mov.l	r10,@(16,r4)	)
--EX(	mov.l	r1,@(20,r4)	)
--EX(	mov.l	r8,@(24,r4)	)
--EX(	mov.w	r0,@(28,r4)	)
--	bf/s	2b
--	 add	#30,r4
--#else
--EX(	mov.l	@(28,r5),r0	)
--EX(	mov.l	@(24,r5),r8	)
--EX(	mov.l	@(20,r5),r9	)
--EX(	mov.l	@(16,r5),r10	)
--EX(	mov.w	r0,@(30,r4)	)
--	add	#-2,r4
--	xtrct	r8,r0
--	xtrct	r9,r8
--	xtrct	r10,r9
--EX(	mov.l	r0,@(28,r4)	)
--EX(	mov.l	r8,@(24,r4)	)
--EX(	mov.l	r9,@(20,r4)	)
+-static unsigned int startup_intc_irq(unsigned int irq)
+-{
+-	enable_intc_irq(irq);
+-	return 0; /* never anything pending */
+-}
 -
--EX(	mov.l	@(12,r5),r0	)
--EX(	mov.l	@(8,r5),r8	)
--	xtrct	r0,r10
--EX(	mov.l	@(4,r5),r9	)
--	mov.l	r10,@(16,r4)
--EX(	mov.l	@r5,r10		)
--	xtrct	r8,r0
--	xtrct	r9,r8
--	xtrct	r10,r9
--EX(	mov.l	r0,@(12,r4)	)
--EX(	mov.l	r8,@(8,r4)	)
--	swap.w	r10,r0
--EX(	mov.l	r9,@(4,r4)	)
--EX(	mov.w	r0,@(2,r4)	)
+-static void shutdown_intc_irq(unsigned int irq)
+-{
+-	disable_intc_irq(irq);
+-}
 -
--	add	#32,r5
--	bf/s	2b
--	 add	#34,r4
--#endif
--	tst	r2,r2
--	bt	.L_cleanup
+-static void enable_intc_irq(unsigned int irq)
+-{
+-	unsigned long reg;
+-	unsigned long bitmask;
 -
--1:	! Read longword, write two words per iteration
--EX(	mov.l	@r5+,r0		)
--	dt	r2
--#ifdef CONFIG_CPU_LITTLE_ENDIAN
--EX(	mov.w	r0, at r4		)
--	shlr16	r0
--EX(	mov.w 	r0,@(2,r4)	)
--#else
--EX(	mov.w	r0,@(2,r4)	)
--	shlr16	r0
--EX(	mov.w	r0, at r4		)
--#endif
--	bf/s	1b
--	 add	#4,r4
+-	if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
+-		printk("Trying to use straight IRL0-3 with an encoding platform.\n");
 -
--	bra	.L_cleanup
--	 nop
+-	if (irq < 32) {
+-		reg = INTC_INTENB_0;
+-		bitmask = 1 << irq;
+-	} else {
+-		reg = INTC_INTENB_1;
+-		bitmask = 1 << (irq - 32);
+-	}
 -
--! Destination = 01 or 11
+-	ctrl_outl(bitmask, reg);
+-}
 -
--.L_dest01:
--.L_dest11:
--	! Read longword, write byte, word, byte per iteration
--EX(	mov.l	@r5+,r0		)
--	dt	r2
--#ifdef CONFIG_CPU_LITTLE_ENDIAN
--EX(	mov.b	r0, at r4		)
--	shlr8	r0
--	add	#1,r4
--EX(	mov.w	r0, at r4		)
--	shlr16	r0
--EX(	mov.b	r0,@(2,r4)	)
--	bf/s	.L_dest01
--	 add	#3,r4
--#else
--EX(	mov.b	r0,@(3,r4)	)
--	shlr8	r0
--	swap.w	r0,r7
--EX(	mov.b	r7, at r4		)
--	add	#1,r4
--EX(	mov.w	r0, at r4		)
--	bf/s	.L_dest01
--	 add	#3,r4
+-static void disable_intc_irq(unsigned int irq)
+-{
+-	unsigned long reg;
+-	unsigned long bitmask;
+-
+-	if (irq < 32) {
+-		reg = INTC_INTDSB_0;
+-		bitmask = 1 << irq;
+-	} else {
+-		reg = INTC_INTDSB_1;
+-		bitmask = 1 << (irq - 32);
+-	}
+-
+-	ctrl_outl(bitmask, reg);
+-}
+-
+-static void mask_and_ack_intc(unsigned int irq)
+-{
+-	disable_intc_irq(irq);
+-}
+-
+-static void end_intc_irq(unsigned int irq)
+-{
+-	enable_intc_irq(irq);
+-}
+-
+-/* For future use, if we ever support IRLM=0) */
+-void make_intc_irq(unsigned int irq)
+-{
+-	disable_irq_nosync(irq);
+-	irq_desc[irq].chip = &intc_irq_type;
+-	disable_intc_irq(irq);
+-}
+-
+-#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
+-int intc_irq_describe(char* p, int irq)
+-{
+-	if (irq < NR_INTC_IRQS)
+-		return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
+-	else
+-		return 0;
+-}
 -#endif
 -
--! Cleanup last few bytes
--.L_cleanup:
--	mov	r6,r0
--	and	#3,r0
--	tst	r0,r0
--	bt	.L_exit
--	mov	r0,r6
+-void __init init_IRQ(void)
+-{
+-        unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
+-	unsigned long reg;
+-	unsigned long data;
+-	int i;
 -
--.L_cleanup_loop:
--EX(	mov.b	@r5+,r0		)
--	dt	r6
--EX(	mov.b	r0, at r4		)
--	bf/s	.L_cleanup_loop
--	 add	#1,r4
+-	intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
+-	if (!intc_virt) {
+-		panic("Unable to remap INTC\n");
+-	}
 -
--.L_exit:
--	mov	#0,r0		! normal return
 -
--5000:
+-	/* Set default: per-line enable/disable, priority driven ack/eoi */
+-	for (i = 0; i < NR_INTC_IRQS; i++) {
+-		if (platform_int_priority[i] != NO_PRIORITY) {
+-			irq_desc[i].chip = &intc_irq_type;
+-		}
+-	}
 -
--# Exception handler:
--.section .fixup, "ax"
--6000:
--	mov.l	8000f,r1
--	mov	r3,r0
--	jmp	@r1
--	 sub	r4,r0
--	.align	2
--8000:	.long	5000b
 -
--.previous
--	mov.l	@r15+,r8
--	mov.l	@r15+,r9
--	mov.l	@r15+,r10
--	rts
--	 mov.l	@r15+,r11
-diff --git a/arch/sh/mm/extable.c b/arch/sh/mm/extable.c
+-	/* Disable all interrupts and set all priorities to 0 to avoid trouble */
+-	ctrl_outl(-1, INTC_INTDSB_0);
+-	ctrl_outl(-1, INTC_INTDSB_1);
+-
+-	for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
+-		ctrl_outl( NO_PRIORITY, reg);
+-
+-
+-	/* Set IRLM */
+-	/* If all the priorities are set to 'no priority', then
+-	 * assume we are using encoded mode.
+-	 */
+-	irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
+-		platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
+-
+-	if (irlm == NO_PRIORITY) {
+-		/* IRLM = 0 */
+-		reg = INTC_ICR_CLEAR;
+-		i = IRQ_INTA;
+-		printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
+-	} else {
+-		/* IRLM = 1 */
+-		reg = INTC_ICR_SET;
+-		i = IRQ_IRL0;
+-	}
+-	ctrl_outl(INTC_ICR_IRLM, reg);
+-
+-	/* Set interrupt priorities according to platform description */
+-	for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
+-		data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
+-		if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
+-			/* Upon the 7th, set Priority Register */
+-			ctrl_outl(data, reg);
+-			data = 0;
+-			reg += 8;
+-		}
+-	}
+-
+-#ifdef CONFIG_SH_CAYMAN
+-	{
+-		extern void init_cayman_irq(void);
+-
+-		init_cayman_irq();
+-	}
+-#endif
+-
+-	/*
+-	 * And now let interrupts come in.
+-	 * sti() is not enough, we need to
+-	 * lower priority, too.
+-	 */
+-        __asm__ __volatile__("getcon    " __SR ", %0\n\t"
+-                             "and       %0, %1, %0\n\t"
+-                             "putcon    %0, " __SR "\n\t"
+-                             : "=&r" (__dummy0)
+-                             : "r" (__dummy1));
+-}
+diff --git a/arch/sh64/kernel/led.c b/arch/sh64/kernel/led.c
 deleted file mode 100644
-index c1cf446..0000000
---- a/arch/sh/mm/extable.c
+index e35d3f6..0000000
+--- a/arch/sh64/kernel/led.c
 +++ /dev/null
-@@ -1,21 +0,0 @@
+@@ -1,40 +0,0 @@
 -/*
-- * linux/arch/sh/mm/extable.c
-- *  Taken from:
-- *   linux/arch/i386/mm/extable.c
+- * arch/sh64/kernel/led.c
+- *
+- * Copyright (C) 2002 Stuart Menefy <stuart.menefy at st.com>
+- *
+- * May be copied or modified under the terms of the GNU General Public
+- * License.  See linux/COPYING for more information.
+- *
+- * Flash the LEDs
 - */
+-#include <linux/stddef.h>
+-#include <linux/sched.h>
 -
--#include <linux/module.h>
--#include <asm/uaccess.h>
+-void mach_led(int pos, int val);
 -
--int fixup_exception(struct pt_regs *regs)
+-/* acts like an actual heart beat -- ie thump-thump-pause... */
+-void heartbeat(void)
 -{
--	const struct exception_table_entry *fixup;
+-	static unsigned int cnt = 0, period = 0, dist = 0;
 -
--	fixup = search_exception_tables(regs->pc);
--	if (fixup) {
--		regs->pc = fixup->fixup;
--		return 1;
+-	if (cnt == 0 || cnt == dist) {
+-		mach_led(-1, 1);
+-	} else if (cnt == 7 || cnt == dist + 7) {
+-		mach_led(-1, 0);
+-	}
+-
+-	if (++cnt > period) {
+-		cnt = 0;
+-
+-		/*
+-		 * The hyperbolic function below modifies the heartbeat period
+-		 * length in dependency of the current (5min) load. It goes
+-		 * through the points f(0)=126, f(1)=86, f(5)=51, f(inf)->30.
+-		 */
+-		period = ((672 << FSHIFT) / (5 * avenrun[0] +
+-					    (7 << FSHIFT))) + 30;
+-		dist = period / 4;
 -	}
+-}
+-
+diff --git a/arch/sh64/kernel/module.c b/arch/sh64/kernel/module.c
+deleted file mode 100644
+index 2598f6b..0000000
+--- a/arch/sh64/kernel/module.c
++++ /dev/null
+@@ -1,161 +0,0 @@
+-/*  Kernel module help for sh64.
+-
+-    This program is free software; you can redistribute it and/or modify
+-    it under the terms of the GNU General Public License as published by
+-    the Free Software Foundation; either version 2 of the License, or
+-    (at your option) any later version.
+-
+-    This program is distributed in the hope that it will be useful,
+-    but WITHOUT ANY WARRANTY; without even the implied warranty of
+-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-    GNU General Public License for more details.
+-
+-    You should have received a copy of the GNU General Public License
+-    along with this program; if not, write to the Free Software
+-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+-
+-    Copyright 2004 SuperH (UK) Ltd
+-    Author: Richard Curnow
+-
+-    Based on the sh version, and on code from the sh64-specific parts of
+-    modutils, originally written by Richard Curnow and Ben Gaster.
+-
+-*/
+-#include <linux/moduleloader.h>
+-#include <linux/elf.h>
+-#include <linux/vmalloc.h>
+-#include <linux/fs.h>
+-#include <linux/string.h>
+-#include <linux/kernel.h>
+-
+-#if 0
+-#define DEBUGP printk
+-#else
+-#define DEBUGP(fmt...)
+-#endif
+-
+-void *module_alloc(unsigned long size)
+-{
+-	if (size == 0)
+-		return NULL;
+-	return vmalloc(size);
+-}
+-
+-
+-/* Free memory returned from module_alloc */
+-void module_free(struct module *mod, void *module_region)
+-{
+-	vfree(module_region);
+-	/* FIXME: If module_region == mod->init_region, trim exception
+-           table entries. */
+-}
 -
+-/* We don't need anything special. */
+-int module_frob_arch_sections(Elf_Ehdr *hdr,
+-			      Elf_Shdr *sechdrs,
+-			      char *secstrings,
+-			      struct module *mod)
+-{
 -	return 0;
 -}
-diff --git a/arch/sh/mm/extable_32.c b/arch/sh/mm/extable_32.c
-new file mode 100644
-index 0000000..c1cf446
---- /dev/null
-+++ b/arch/sh/mm/extable_32.c
-@@ -0,0 +1,21 @@
-+/*
-+ * linux/arch/sh/mm/extable.c
-+ *  Taken from:
-+ *   linux/arch/i386/mm/extable.c
-+ */
-+
-+#include <linux/module.h>
-+#include <asm/uaccess.h>
-+
-+int fixup_exception(struct pt_regs *regs)
-+{
-+	const struct exception_table_entry *fixup;
-+
-+	fixup = search_exception_tables(regs->pc);
-+	if (fixup) {
-+		regs->pc = fixup->fixup;
-+		return 1;
-+	}
-+
-+	return 0;
-+}
-diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c
-new file mode 100644
-index 0000000..f054996
---- /dev/null
-+++ b/arch/sh/mm/extable_64.c
-@@ -0,0 +1,82 @@
-+/*
-+ * arch/sh/mm/extable_64.c
-+ *
-+ * Copyright (C) 2003 Richard Curnow
-+ * Copyright (C) 2003, 2004  Paul Mundt
-+ *
-+ * Cloned from the 2.5 SH version..
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/rwsem.h>
-+#include <linux/module.h>
-+#include <asm/uaccess.h>
-+
-+extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
-+extern void __copy_user_fixup(void);
-+
-+static const struct exception_table_entry __copy_user_fixup_ex = {
-+	.fixup = (unsigned long)&__copy_user_fixup,
-+};
-+
-+/*
-+ * Some functions that may trap due to a bad user-mode address have too
-+ * many loads and stores in them to make it at all practical to label
-+ * each one and put them all in the main exception table.
-+ *
-+ * In particular, the fast memcpy routine is like this.  It's fix-up is
-+ * just to fall back to a slow byte-at-a-time copy, which is handled the
-+ * conventional way.  So it's functionally OK to just handle any trap
-+ * occurring in the fast memcpy with that fixup.
-+ */
-+static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
-+{
-+	if ((addr >= (unsigned long)&copy_user_memcpy) &&
-+	    (addr <= (unsigned long)&copy_user_memcpy_end))
-+		return &__copy_user_fixup_ex;
-+
-+	return NULL;
-+}
-+
-+/* Simple binary search */
-+const struct exception_table_entry *
-+search_extable(const struct exception_table_entry *first,
-+		 const struct exception_table_entry *last,
-+		 unsigned long value)
-+{
-+	const struct exception_table_entry *mid;
-+
-+	mid = check_exception_ranges(value);
-+	if (mid)
-+		return mid;
-+
-+        while (first <= last) {
-+		long diff;
-+
-+		mid = (last - first) / 2 + first;
-+		diff = mid->insn - value;
-+                if (diff == 0)
-+                        return mid;
-+                else if (diff < 0)
-+                        first = mid+1;
-+                else
-+                        last = mid-1;
-+        }
-+
-+        return NULL;
-+}
-+
-+int fixup_exception(struct pt_regs *regs)
-+{
-+	const struct exception_table_entry *fixup;
-+
-+	fixup = search_exception_tables(regs->pc);
-+	if (fixup) {
-+		regs->pc = fixup->fixup;
-+		return 1;
-+	}
-+
-+	return 0;
-+}
-diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
+-
+-int apply_relocate_add(Elf32_Shdr *sechdrs,
+-		   const char *strtab,
+-		   unsigned int symindex,
+-		   unsigned int relsec,
+-		   struct module *me)
+-{
+-	unsigned int i;
+-	Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+-	Elf32_Sym *sym;
+-	Elf32_Addr relocation;
+-	uint32_t *location;
+-	int align;
+-	int is_shmedia;
+-
+-	DEBUGP("Applying relocate section %u to %u\n", relsec,
+-	       sechdrs[relsec].sh_info);
+-	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+-		/* This is where to make the change */
+-		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+-			+ rel[i].r_offset;
+-		/* This is the symbol it is referring to.  Note that all
+-		   undefined symbols have been resolved.  */
+-		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+-			+ ELF32_R_SYM(rel[i].r_info);
+-		relocation = sym->st_value + rel[i].r_addend;
+-		align = (int)location & 3;
+-
+-		/* For text addresses, bit2 of the st_other field indicates
+-		 * whether the symbol is SHmedia (1) or SHcompact (0).  If
+-		 * SHmedia, the LSB of the symbol needs to be asserted
+-		 * for the CPU to be in SHmedia mode when it starts executing
+-		 * the branch target. */
+-		is_shmedia = (sym->st_other & 4) ? 1 : 0;
+-		if (is_shmedia) {
+-			relocation |= 1;
+-		}
+-
+-		switch (ELF32_R_TYPE(rel[i].r_info)) {
+-		case R_SH_DIR32:
+-			DEBUGP("R_SH_DIR32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
+-			*location += relocation;
+-			break;
+-		case R_SH_REL32:
+-			DEBUGP("R_SH_REL32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
+-			relocation -= (Elf32_Addr) location;
+-			*location += relocation;
+-			break;
+-		case R_SH_IMM_LOW16:
+-			DEBUGP("R_SH_IMM_LOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
+-			*location = (*location & ~0x3fffc00) |
+-				((relocation & 0xffff) << 10);
+-			break;
+-		case R_SH_IMM_MEDLOW16:
+-			DEBUGP("R_SH_IMM_MEDLOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
+-			*location = (*location & ~0x3fffc00) |
+-				(((relocation >> 16) & 0xffff) << 10);
+-			break;
+-		case R_SH_IMM_LOW16_PCREL:
+-			DEBUGP("R_SH_IMM_LOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
+-			relocation -= (Elf32_Addr) location;
+-			*location = (*location & ~0x3fffc00) |
+-				((relocation & 0xffff) << 10);
+-			break;
+-		case R_SH_IMM_MEDLOW16_PCREL:
+-			DEBUGP("R_SH_IMM_MEDLOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
+-			relocation -= (Elf32_Addr) location;
+-			*location = (*location & ~0x3fffc00) |
+-				(((relocation >> 16) & 0xffff) << 10);
+-			break;
+-		default:
+-			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+-			       me->name, ELF32_R_TYPE(rel[i].r_info));
+-			return -ENOEXEC;
+-		}
+-	}
+-	return 0;
+-}
+-
+-int apply_relocate(Elf32_Shdr *sechdrs,
+-		       const char *strtab,
+-		       unsigned int symindex,
+-		       unsigned int relsec,
+-		       struct module *me)
+-{
+-	printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
+-	       me->name);
+-	return -ENOEXEC;
+-}
+-
+-int module_finalize(const Elf_Ehdr *hdr,
+-		    const Elf_Shdr *sechdrs,
+-		    struct module *me)
+-{
+-	return 0;
+-}
+-
+-void module_arch_cleanup(struct module *mod)
+-{
+-}
+-
+diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c
 deleted file mode 100644
-index 60d74f7..0000000
---- a/arch/sh/mm/fault.c
+index b4d9534..0000000
+--- a/arch/sh64/kernel/pci_sh5.c
 +++ /dev/null
-@@ -1,303 +0,0 @@
+@@ -1,536 +0,0 @@
 -/*
-- * Page fault handler for SH with an MMU.
-- *
-- *  Copyright (C) 1999  Niibe Yutaka
-- *  Copyright (C) 2003 - 2007  Paul Mundt
+- * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
+- * Copyright (C) 2003, 2004 Paul Mundt
+- * Copyright (C) 2004 Richard Curnow
 - *
-- *  Based on linux/arch/i386/mm/fault.c:
-- *   Copyright (C) 1995  Linus Torvalds
+- * May be copied or modified under the terms of the GNU General Public
+- * License.  See linux/COPYING for more information.
 - *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
+- * Support functions for the SH5 PCI hardware.
 - */
+-
 -#include <linux/kernel.h>
--#include <linux/mm.h>
--#include <linux/hardirq.h>
--#include <linux/kprobes.h>
--#include <asm/system.h>
--#include <asm/mmu_context.h>
--#include <asm/tlbflush.h>
--#include <asm/kgdb.h>
+-#include <linux/rwsem.h>
+-#include <linux/smp.h>
+-#include <linux/interrupt.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/pci.h>
+-#include <linux/delay.h>
+-#include <linux/types.h>
+-#include <asm/pci.h>
+-#include <linux/irq.h>
 -
--/*
-- * This routine handles page faults.  It determines the address,
-- * and the problem, and then passes it off to one of the appropriate
-- * routines.
+-#include <asm/io.h>
+-#include <asm/hardware.h>
+-#include "pci_sh5.h"
+-
+-static unsigned long pcicr_virt;
+-unsigned long pciio_virt;
+-
+-static void __init pci_fixup_ide_bases(struct pci_dev *d)
+-{
+-	int i;
+-
+-	/*
+-	 * PCI IDE controllers use non-standard I/O port decoding, respect it.
+-	 */
+-	if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
+-		return;
+-	printk("PCI: IDE base address fixup for %s\n", pci_name(d));
+-	for(i=0; i<4; i++) {
+-		struct resource *r = &d->resource[i];
+-		if ((r->start & ~0x80) == 0x374) {
+-			r->start |= 2;
+-			r->end = r->start;
+-		}
+-	}
+-}
+-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
+-
+-char * __devinit pcibios_setup(char *str)
+-{
+-	return str;
+-}
+-
+-/* Rounds a number UP to the nearest power of two. Used for
+- * sizing the PCI window.
 - */
--asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
--					unsigned long writeaccess,
--					unsigned long address)
+-static u32 __init r2p2(u32 num)
 -{
--	struct task_struct *tsk;
--	struct mm_struct *mm;
--	struct vm_area_struct * vma;
--	int si_code;
--	int fault;
--	siginfo_t info;
+-	int i = 31;
+-	u32 tmp = num;
+-
+-	if (num == 0)
+-		return 0;
+-
+-	do {
+-		if (tmp & (1 << 31))
+-			break;
+-		i--;
+-		tmp <<= 1;
+-	} while (i >= 0);
+-
+-	tmp = 1 << i;
+-	/* If the original number isn't a power of 2, round it up */
+-	if (tmp != num)
+-		tmp <<= 1;
+-
+-	return tmp;
+-}
+-
+-extern unsigned long long memory_start, memory_end;
+-
+-int __init sh5pci_init(unsigned memStart, unsigned memSize)
+-{
+-	u32 lsr0;
+-	u32 uval;
+-
+-	pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR");
+-	if (!pcicr_virt) {
+-		panic("Unable to remap PCICR\n");
+-	}
+-
+-	pciio_virt = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO");
+-	if (!pciio_virt) {
+-		panic("Unable to remap PCIIO\n");
+-	}
+-
+-	pr_debug("Register base addres is 0x%08lx\n", pcicr_virt);
+-
+-	/* Clear snoop registers */
+-        SH5PCI_WRITE(CSCR0, 0);
+-        SH5PCI_WRITE(CSCR1, 0);
+-
+-	pr_debug("Wrote to reg\n");
+-
+-        /* Switch off interrupts */
+-        SH5PCI_WRITE(INTM,  0);
+-        SH5PCI_WRITE(AINTM, 0);
+-        SH5PCI_WRITE(PINTM, 0);
+-
+-        /* Set bus active, take it out of reset */
+-        uval = SH5PCI_READ(CR);
+-
+-	/* Set command Register */
+-        SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE | CR_PFCS | CR_BMAM);
+-
+-	uval=SH5PCI_READ(CR);
+-        pr_debug("CR is actually 0x%08x\n",uval);
+-
+-        /* Allow it to be a master */
+-	/* NB - WE DISABLE I/O ACCESS to stop overlap */
+-        /* set WAIT bit to enable stepping, an attempt to improve stability */
+-	SH5PCI_WRITE_SHORT(CSR_CMD,
+-			    PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_WAIT);
+-
+-        /*
+-        ** Set translation mapping memory in order to convert the address
+-        ** used for the main bus, to the PCI internal address.
+-        */
+-        SH5PCI_WRITE(MBR,0x40000000);
+-
+-        /* Always set the max size 512M */
+-        SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
+-
+-        /*
+-        ** I/O addresses are mapped at internal PCI specific address
+-        ** as is described into the configuration bridge table.
+-        ** These are changed to 0, to allow cards that have legacy
+-        ** io such as vga to function correctly. We set the SH5 IOBAR to
+-        ** 256K, which is a bit big as we can only have 64K of address space
+-        */
+-
+-        SH5PCI_WRITE(IOBR,0x0);
+-
+-	pr_debug("PCI:Writing 0x%08x to IOBR\n",0);
+-
+-        /* Set up a 256K window. Totally pointless waste  of address space */
+-        SH5PCI_WRITE(IOBMR,0);
+-	pr_debug("PCI:Writing 0x%08x to IOBMR\n",0);
+-
+-	/* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec. Ideally,
+-         * we would want to map the I/O region somewhere, but it is so big this is not
+-         * that easy!
+-         */
+-	SH5PCI_WRITE(CSR_IBAR0,~0);
+-	/* Set memory size value */
+-        memSize = memory_end - memory_start;
 -
--	trace_hardirqs_on();
--	local_irq_enable();
+-        /* Now we set up the mbars so the PCI bus can see the memory of the machine */
+-        if (memSize < (1024 * 1024)) {
+-                printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%x?\n", memSize);
+-                return -EINVAL;
+-        }
 -
--#ifdef CONFIG_SH_KGDB
--	if (kgdb_nofault && kgdb_bus_err_hook)
--		kgdb_bus_err_hook();
--#endif
+-        /* Set LSR 0 */
+-        lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 : ((r2p2(memSize) - 0x100000) | 0x1);
+-        SH5PCI_WRITE(LSR0, lsr0);
 -
--	tsk = current;
--	mm = tsk->mm;
--	si_code = SEGV_MAPERR;
+-	pr_debug("PCI:Writing 0x%08x to LSR0\n",lsr0);
 -
--	if (unlikely(address >= TASK_SIZE)) {
--		/*
--		 * Synchronize this task's top level page-table
--		 * with the 'reference' page table.
--		 *
--		 * Do _not_ use "tsk" here. We might be inside
--		 * an interrupt in the middle of a task switch..
--		 */
--		int offset = pgd_index(address);
--		pgd_t *pgd, *pgd_k;
--		pud_t *pud, *pud_k;
--		pmd_t *pmd, *pmd_k;
+-        /* Set MBAR 0 */
+-        SH5PCI_WRITE(CSR_MBAR0, memory_start);
+-        SH5PCI_WRITE(LAR0, memory_start);
 -
--		pgd = get_TTB() + offset;
--		pgd_k = swapper_pg_dir + offset;
+-        SH5PCI_WRITE(CSR_MBAR1,0);
+-        SH5PCI_WRITE(LAR1,0);
+-        SH5PCI_WRITE(LSR1,0);
 -
--		/* This will never happen with the folded page table. */
--		if (!pgd_present(*pgd)) {
--			if (!pgd_present(*pgd_k))
--				goto bad_area_nosemaphore;
--			set_pgd(pgd, *pgd_k);
--			return;
--		}
+-	pr_debug("PCI:Writing 0x%08llx to CSR_MBAR0\n",memory_start);
+-	pr_debug("PCI:Writing 0x%08llx to LAR0\n",memory_start);
 -
--		pud = pud_offset(pgd, address);
--		pud_k = pud_offset(pgd_k, address);
--		if (pud_present(*pud) || !pud_present(*pud_k))
--			goto bad_area_nosemaphore;
--		set_pud(pud, *pud_k);
+-        /* Enable the PCI interrupts on the device */
+-        SH5PCI_WRITE(INTM,  ~0);
+-        SH5PCI_WRITE(AINTM, ~0);
+-        SH5PCI_WRITE(PINTM, ~0);
 -
--		pmd = pmd_offset(pud, address);
--		pmd_k = pmd_offset(pud_k, address);
--		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
--			goto bad_area_nosemaphore;
--		set_pmd(pmd, *pmd_k);
+-	pr_debug("Switching on all error interrupts\n");
 -
--		return;
+-        return(0);
+-}
+-
+-static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+-			int size, u32 *val)
+-{
+-	SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
+-
+-	switch (size) {
+-		case 1:
+-			*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
+-			break;
+-		case 2:
+-			*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
+-			break;
+-		case 4:
+-			*val = SH5PCI_READ(PDR);
+-			break;
 -	}
 -
--	/*
--	 * If we're in an interrupt or have no user
--	 * context, we must not take the fault..
--	 */
--	if (in_atomic() || !mm)
--		goto no_context;
+-	return PCIBIOS_SUCCESSFUL;
+-}
 -
--	down_read(&mm->mmap_sem);
+-static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+-			 int size, u32 val)
+-{
+-	SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
 -
--	vma = find_vma(mm, address);
--	if (!vma)
--		goto bad_area;
--	if (vma->vm_start <= address)
--		goto good_area;
--	if (!(vma->vm_flags & VM_GROWSDOWN))
--		goto bad_area;
--	if (expand_stack(vma, address))
--		goto bad_area;
--/*
-- * Ok, we have a good vm_area for this memory access, so
-- * we can handle it..
-- */
--good_area:
--	si_code = SEGV_ACCERR;
--	if (writeaccess) {
--		if (!(vma->vm_flags & VM_WRITE))
--			goto bad_area;
--	} else {
--		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
--			goto bad_area;
+-	switch (size) {
+-		case 1:
+-			SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
+-			break;
+-		case 2:
+-			SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
+-			break;
+-		case 4:
+-			SH5PCI_WRITE(PDR, val);
+-			break;
 -	}
 -
--	/*
--	 * If for any reason at all we couldn't handle the fault,
--	 * make sure we exit gracefully rather than endlessly redo
--	 * the fault.
--	 */
--survive:
--	fault = handle_mm_fault(mm, vma, address, writeaccess);
--	if (unlikely(fault & VM_FAULT_ERROR)) {
--		if (fault & VM_FAULT_OOM)
--			goto out_of_memory;
--		else if (fault & VM_FAULT_SIGBUS)
--			goto do_sigbus;
--		BUG();
--	}
--	if (fault & VM_FAULT_MAJOR)
--		tsk->maj_flt++;
--	else
--		tsk->min_flt++;
+-	return PCIBIOS_SUCCESSFUL;
+-}
 -
--	up_read(&mm->mmap_sem);
--	return;
+-static struct pci_ops pci_config_ops = {
+-	.read =		sh5pci_read,
+-	.write =	sh5pci_write,
+-};
 -
--/*
-- * Something tried to access memory that isn't in our memory map..
-- * Fix it, but check if it's kernel or user first..
-- */
--bad_area:
--	up_read(&mm->mmap_sem);
+-/* Everything hangs off this */
+-static struct pci_bus *pci_root_bus;
 -
--bad_area_nosemaphore:
--	if (user_mode(regs)) {
--		info.si_signo = SIGSEGV;
--		info.si_errno = 0;
--		info.si_code = si_code;
--		info.si_addr = (void *) address;
--		force_sig_info(SIGSEGV, &info, tsk);
--		return;
+-
+-static u8 __init no_swizzle(struct pci_dev *dev, u8 * pin)
+-{
+-	pr_debug("swizzle for dev %d on bus %d slot %d pin is %d\n",
+-	         dev->devfn,dev->bus->number, PCI_SLOT(dev->devfn),*pin);
+-	return PCI_SLOT(dev->devfn);
+-}
+-
+-static inline u8 bridge_swizzle(u8 pin, u8 slot)
+-{
+-	return (((pin-1) + slot) % 4) + 1;
+-}
+-
+-u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp)
+-{
+-	if (dev->bus->number != 0) {
+-		u8 pin = *pinp;
+-		do {
+-			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+-			/* Move up the chain of bridges. */
+-			dev = dev->bus->self;
+-		} while (dev->bus->self);
+-		*pinp = pin;
+-
+-		/* The slot is the slot of the last bridge. */
 -	}
 -
--no_context:
--	/* Are we prepared to handle this kernel fault?  */
--	if (fixup_exception(regs))
--		return;
+-	return PCI_SLOT(dev->devfn);
+-}
 -
--/*
-- * Oops. The kernel tried to access some bad page. We'll have to
-- * terminate things with extreme prejudice.
-- *
-- */
+-/* This needs to be shunted out of here into the board specific bit */
 -
--	bust_spinlocks(1);
+-static int __init map_cayman_irq(struct pci_dev *dev, u8 slot, u8 pin)
+-{
+-	int result = -1;
 -
--	if (oops_may_print()) {
--		__typeof__(pte_val(__pte(0))) page;
+-	/* The complication here is that the PCI IRQ lines from the Cayman's 2
+-	   5V slots get into the CPU via a different path from the IRQ lines
+-	   from the 3 3.3V slots.  Thus, we have to detect whether the card's
+-	   interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling'
+-	   at the point where we cross from 5V to 3.3V is not the normal case.
 -
--		if (address < PAGE_SIZE)
--			printk(KERN_ALERT "Unable to handle kernel NULL "
--					  "pointer dereference");
--		else
--			printk(KERN_ALERT "Unable to handle kernel paging "
--					  "request");
--		printk(" at virtual address %08lx\n", address);
--		printk(KERN_ALERT "pc = %08lx\n", regs->pc);
--		page = (unsigned long)get_TTB();
--		if (page) {
--			page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
--			printk(KERN_ALERT "*pde = %08lx\n", page);
--			if (page & _PAGE_PRESENT) {
--				page &= PAGE_MASK;
--				address &= 0x003ff000;
--				page = ((__typeof__(page) *)
--						__va(page))[address >>
--							    PAGE_SHIFT];
--				printk(KERN_ALERT "*pte = %08lx\n", page);
+-	   The added complication is that we don't know that the 5V slots are
+-	   always bus 2, because a card containing a PCI-PCI bridge may be
+-	   plugged into a 3.3V slot, and this changes the bus numbering.
+-
+-	   Also, the Cayman has an intermediate PCI bus that goes a custom
+-	   expansion board header (and to the secondary bridge).  This bus has
+-	   never been used in practice.
+-
+-	   The 1ary onboard PCI-PCI bridge is device 3 on bus 0
+-	   The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge.
+-	   */
+-
+-	struct slot_pin {
+-		int slot;
+-		int pin;
+-	} path[4];
+-	int i=0;
+-
+-	while (dev->bus->number > 0) {
+-
+-		slot = path[i].slot = PCI_SLOT(dev->devfn);
+-		pin = path[i].pin = bridge_swizzle(pin, slot);
+-		dev = dev->bus->self;
+-		i++;
+-		if (i > 3) panic("PCI path to root bus too long!\n");
+-	}
+-
+-	slot = PCI_SLOT(dev->devfn);
+-	/* This is the slot on bus 0 through which the device is eventually
+-	   reachable. */
+-
+-	/* Now work back up. */
+-	if ((slot < 3) || (i == 0)) {
+-		/* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
+-		   swizzle now. */
+-		result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
+-	} else {
+-		i--;
+-		slot = path[i].slot;
+-		pin  = path[i].pin;
+-		if (slot > 0) {
+-			panic("PCI expansion bus device found - not handled!\n");
+-		} else {
+-			if (i > 0) {
+-				/* 5V slots */
+-				i--;
+-				slot = path[i].slot;
+-				pin  = path[i].pin;
+-				/* 'pin' was swizzled earlier wrt slot, don't do it again. */
+-				result = IRQ_P2INTA + (pin - 1);
+-			} else {
+-				/* IRQ for 2ary PCI-PCI bridge : unused */
+-				result = -1;
 -			}
 -		}
 -	}
 -
--	die("Oops", regs, writeaccess);
--	bust_spinlocks(0);
--	do_exit(SIGKILL);
+-	return result;
+-}
 -
--/*
-- * We ran out of memory, or some other thing happened to us that made
-- * us unable to handle the page fault gracefully.
-- */
--out_of_memory:
--	up_read(&mm->mmap_sem);
--	if (is_global_init(current)) {
--		yield();
--		down_read(&mm->mmap_sem);
--		goto survive;
--	}
--	printk("VM: killing process %s\n", tsk->comm);
--	if (user_mode(regs))
--		do_group_exit(SIGKILL);
--	goto no_context;
+-static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
+-{
+-	struct pt_regs *regs = get_irq_regs();
+-	unsigned pci_int, pci_air, pci_cir, pci_aint;
 -
--do_sigbus:
--	up_read(&mm->mmap_sem);
+-	pci_int = SH5PCI_READ(INT);
+-	pci_cir = SH5PCI_READ(CIR);
+-	pci_air = SH5PCI_READ(AIR);
 -
--	/*
--	 * Send a sigbus, regardless of whether we were in kernel
--	 * or user mode.
--	 */
--	info.si_signo = SIGBUS;
--	info.si_errno = 0;
--	info.si_code = BUS_ADRERR;
--	info.si_addr = (void *)address;
--	force_sig_info(SIGBUS, &info, tsk);
+-	if (pci_int) {
+-		printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
+-		printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
+-		printk("PCI AIR -> 0x%x\n", pci_air);
+-		printk("PCI CIR -> 0x%x\n", pci_cir);
+-		SH5PCI_WRITE(INT, ~0);
+-	}
 -
--	/* Kernel mode? Handle exceptions or die */
--	if (!user_mode(regs))
--		goto no_context;
--}
+-	pci_aint = SH5PCI_READ(AINT);
+-	if (pci_aint) {
+-		printk("PCI ARB INTERRUPT!\n");
+-		printk("PCI AINT -> 0x%x\n", pci_aint);
+-		printk("PCI AIR -> 0x%x\n", pci_air);
+-		printk("PCI CIR -> 0x%x\n", pci_cir);
+-		SH5PCI_WRITE(AINT, ~0);
+-	}
 -
--#ifdef CONFIG_SH_STORE_QUEUES
--/*
-- * This is a special case for the SH-4 store queues, as pages for this
-- * space still need to be faulted in before it's possible to flush the
-- * store queue cache for writeout to the remapped region.
-- */
--#define P3_ADDR_MAX		(P4SEG_STORE_QUE + 0x04000000)
--#else
--#define P3_ADDR_MAX		P4SEG
--#endif
+-	return IRQ_HANDLED;
+-}
 -
--/*
-- * Called with interrupts disabled.
-- */
--asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
--					 unsigned long writeaccess,
--					 unsigned long address)
+-static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
 -{
--	pgd_t *pgd;
--	pud_t *pud;
--	pmd_t *pmd;
--	pte_t *pte;
--	pte_t entry;
+-	printk("SERR IRQ\n");
 -
--#ifdef CONFIG_SH_KGDB
--	if (kgdb_nofault && kgdb_bus_err_hook)
--		kgdb_bus_err_hook();
--#endif
+-	return IRQ_NONE;
+-}
 -
--	/*
--	 * We don't take page faults for P1, P2, and parts of P4, these
--	 * are always mapped, whether it be due to legacy behaviour in
--	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
--	 */
--	if (address >= P3SEG && address < P3_ADDR_MAX) {
--		pgd = pgd_offset_k(address);
--	} else {
--		if (unlikely(address >= TASK_SIZE || !current->mm))
--			return 1;
+-static void __init
+-pcibios_size_bridge(struct pci_bus *bus, struct resource *ior,
+-		    struct resource *memr)
+-{
+-	struct resource io_res, mem_res;
+-	struct pci_dev *dev;
+-	struct pci_dev *bridge = bus->self;
+-	struct list_head *ln;
 -
--		pgd = pgd_offset(current->mm, address);
--	}
+-	if (!bridge)
+-		return;	/* host bridge, nothing to do */
 -
--	pud = pud_offset(pgd, address);
--	if (pud_none_or_clear_bad(pud))
--		return 1;
--	pmd = pmd_offset(pud, address);
--	if (pmd_none_or_clear_bad(pmd))
--		return 1;
+-	/* set reasonable default locations for pcibios_align_resource */
+-	io_res.start = PCIBIOS_MIN_IO;
+-	mem_res.start = PCIBIOS_MIN_MEM;
 -
--	pte = pte_offset_kernel(pmd, address);
--	entry = *pte;
--	if (unlikely(pte_none(entry) || pte_not_present(entry)))
--		return 1;
--	if (unlikely(writeaccess && !pte_write(entry)))
--		return 1;
+-	io_res.end = io_res.start;
+-	mem_res.end = mem_res.start;
 -
--	if (writeaccess)
--		entry = pte_mkdirty(entry);
--	entry = pte_mkyoung(entry);
+-	/* Collect information about how our direct children are layed out. */
+-	for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
+-		int i;
+-		dev = pci_dev_b(ln);
 -
--	set_pte(pte, entry);
--	update_mmu_cache(NULL, address, entry);
+-		/* Skip bridges for now */
+-		if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+-			continue;
 -
--	return 0;
--}
-diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
-new file mode 100644
-index 0000000..33b43d2
---- /dev/null
-+++ b/arch/sh/mm/fault_32.c
-@@ -0,0 +1,303 @@
-+/*
-+ * Page fault handler for SH with an MMU.
-+ *
-+ *  Copyright (C) 1999  Niibe Yutaka
-+ *  Copyright (C) 2003 - 2007  Paul Mundt
-+ *
-+ *  Based on linux/arch/i386/mm/fault.c:
-+ *   Copyright (C) 1995  Linus Torvalds
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/hardirq.h>
-+#include <linux/kprobes.h>
-+#include <asm/system.h>
-+#include <asm/mmu_context.h>
-+#include <asm/tlbflush.h>
-+#include <asm/kgdb.h>
-+
-+/*
-+ * This routine handles page faults.  It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ */
-+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
-+					unsigned long writeaccess,
-+					unsigned long address)
-+{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	struct vm_area_struct * vma;
-+	int si_code;
-+	int fault;
-+	siginfo_t info;
-+
-+	trace_hardirqs_on();
-+	local_irq_enable();
-+
-+#ifdef CONFIG_SH_KGDB
-+	if (kgdb_nofault && kgdb_bus_err_hook)
-+		kgdb_bus_err_hook();
-+#endif
-+
-+	tsk = current;
-+	mm = tsk->mm;
-+	si_code = SEGV_MAPERR;
-+
-+	if (unlikely(address >= TASK_SIZE)) {
-+		/*
-+		 * Synchronize this task's top level page-table
-+		 * with the 'reference' page table.
-+		 *
-+		 * Do _not_ use "tsk" here. We might be inside
-+		 * an interrupt in the middle of a task switch..
-+		 */
-+		int offset = pgd_index(address);
-+		pgd_t *pgd, *pgd_k;
-+		pud_t *pud, *pud_k;
-+		pmd_t *pmd, *pmd_k;
-+
-+		pgd = get_TTB() + offset;
-+		pgd_k = swapper_pg_dir + offset;
-+
-+		/* This will never happen with the folded page table. */
-+		if (!pgd_present(*pgd)) {
-+			if (!pgd_present(*pgd_k))
-+				goto bad_area_nosemaphore;
-+			set_pgd(pgd, *pgd_k);
-+			return;
-+		}
-+
-+		pud = pud_offset(pgd, address);
-+		pud_k = pud_offset(pgd_k, address);
-+		if (pud_present(*pud) || !pud_present(*pud_k))
-+			goto bad_area_nosemaphore;
-+		set_pud(pud, *pud_k);
-+
-+		pmd = pmd_offset(pud, address);
-+		pmd_k = pmd_offset(pud_k, address);
-+		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
-+			goto bad_area_nosemaphore;
-+		set_pmd(pmd, *pmd_k);
-+
-+		return;
-+	}
-+
-+	/*
-+	 * If we're in an interrupt or have no user
-+	 * context, we must not take the fault..
-+	 */
-+	if (in_atomic() || !mm)
-+		goto no_context;
-+
-+	down_read(&mm->mmap_sem);
-+
-+	vma = find_vma(mm, address);
-+	if (!vma)
-+		goto bad_area;
-+	if (vma->vm_start <= address)
-+		goto good_area;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		goto bad_area;
-+	if (expand_stack(vma, address))
-+		goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+	si_code = SEGV_ACCERR;
-+	if (writeaccess) {
-+		if (!(vma->vm_flags & VM_WRITE))
-+			goto bad_area;
-+	} else {
-+		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
-+			goto bad_area;
-+	}
-+
-+	/*
-+	 * If for any reason at all we couldn't handle the fault,
-+	 * make sure we exit gracefully rather than endlessly redo
-+	 * the fault.
-+	 */
-+survive:
-+	fault = handle_mm_fault(mm, vma, address, writeaccess);
-+	if (unlikely(fault & VM_FAULT_ERROR)) {
-+		if (fault & VM_FAULT_OOM)
-+			goto out_of_memory;
-+		else if (fault & VM_FAULT_SIGBUS)
-+			goto do_sigbus;
-+		BUG();
-+	}
-+	if (fault & VM_FAULT_MAJOR)
-+		tsk->maj_flt++;
-+	else
-+		tsk->min_flt++;
-+
-+	up_read(&mm->mmap_sem);
-+	return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+	up_read(&mm->mmap_sem);
-+
-+bad_area_nosemaphore:
-+	if (user_mode(regs)) {
-+		info.si_signo = SIGSEGV;
-+		info.si_errno = 0;
-+		info.si_code = si_code;
-+		info.si_addr = (void *) address;
-+		force_sig_info(SIGSEGV, &info, tsk);
-+		return;
-+	}
-+
-+no_context:
-+	/* Are we prepared to handle this kernel fault?  */
-+	if (fixup_exception(regs))
-+		return;
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ *
-+ */
-+
-+	bust_spinlocks(1);
-+
-+	if (oops_may_print()) {
-+		unsigned long page;
-+
-+		if (address < PAGE_SIZE)
-+			printk(KERN_ALERT "Unable to handle kernel NULL "
-+					  "pointer dereference");
-+		else
-+			printk(KERN_ALERT "Unable to handle kernel paging "
-+					  "request");
-+		printk(" at virtual address %08lx\n", address);
-+		printk(KERN_ALERT "pc = %08lx\n", regs->pc);
-+		page = (unsigned long)get_TTB();
-+		if (page) {
-+			page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
-+			printk(KERN_ALERT "*pde = %08lx\n", page);
-+			if (page & _PAGE_PRESENT) {
-+				page &= PAGE_MASK;
-+				address &= 0x003ff000;
-+				page = ((__typeof__(page) *)
-+						__va(page))[address >>
-+							    PAGE_SHIFT];
-+				printk(KERN_ALERT "*pte = %08lx\n", page);
-+			}
-+		}
-+	}
-+
-+	die("Oops", regs, writeaccess);
-+	bust_spinlocks(0);
-+	do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+	up_read(&mm->mmap_sem);
-+	if (is_global_init(current)) {
-+		yield();
-+		down_read(&mm->mmap_sem);
-+		goto survive;
-+	}
-+	printk("VM: killing process %s\n", tsk->comm);
-+	if (user_mode(regs))
-+		do_group_exit(SIGKILL);
-+	goto no_context;
-+
-+do_sigbus:
-+	up_read(&mm->mmap_sem);
-+
-+	/*
-+	 * Send a sigbus, regardless of whether we were in kernel
-+	 * or user mode.
-+	 */
-+	info.si_signo = SIGBUS;
-+	info.si_errno = 0;
-+	info.si_code = BUS_ADRERR;
-+	info.si_addr = (void *)address;
-+	force_sig_info(SIGBUS, &info, tsk);
-+
-+	/* Kernel mode? Handle exceptions or die */
-+	if (!user_mode(regs))
-+		goto no_context;
-+}
-+
-+#ifdef CONFIG_SH_STORE_QUEUES
-+/*
-+ * This is a special case for the SH-4 store queues, as pages for this
-+ * space still need to be faulted in before it's possible to flush the
-+ * store queue cache for writeout to the remapped region.
-+ */
-+#define P3_ADDR_MAX		(P4SEG_STORE_QUE + 0x04000000)
-+#else
-+#define P3_ADDR_MAX		P4SEG
-+#endif
-+
-+/*
-+ * Called with interrupts disabled.
-+ */
-+asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
-+					 unsigned long writeaccess,
-+					 unsigned long address)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	pte_t entry;
-+
-+#ifdef CONFIG_SH_KGDB
-+	if (kgdb_nofault && kgdb_bus_err_hook)
-+		kgdb_bus_err_hook();
-+#endif
-+
-+	/*
-+	 * We don't take page faults for P1, P2, and parts of P4, these
-+	 * are always mapped, whether it be due to legacy behaviour in
-+	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
-+	 */
-+	if (address >= P3SEG && address < P3_ADDR_MAX) {
-+		pgd = pgd_offset_k(address);
-+	} else {
-+		if (unlikely(address >= TASK_SIZE || !current->mm))
-+			return 1;
-+
-+		pgd = pgd_offset(current->mm, address);
-+	}
-+
-+	pud = pud_offset(pgd, address);
-+	if (pud_none_or_clear_bad(pud))
-+		return 1;
-+	pmd = pmd_offset(pud, address);
-+	if (pmd_none_or_clear_bad(pmd))
-+		return 1;
-+
-+	pte = pte_offset_kernel(pmd, address);
-+	entry = *pte;
-+	if (unlikely(pte_none(entry) || pte_not_present(entry)))
-+		return 1;
-+	if (unlikely(writeaccess && !pte_write(entry)))
-+		return 1;
-+
-+	if (writeaccess)
-+		entry = pte_mkdirty(entry);
-+	entry = pte_mkyoung(entry);
-+
-+	set_pte(pte, entry);
-+	update_mmu_cache(NULL, address, entry);
-+
-+	return 0;
-+}
-diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c
-new file mode 100644
-index 0000000..399d537
---- /dev/null
-+++ b/arch/sh/mm/fault_64.c
-@@ -0,0 +1,275 @@
-+/*
-+ * The SH64 TLB miss.
-+ *
-+ * Original code from fault.c
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ *
-+ * Fast PTE->TLB refill path
-+ * Copyright (C) 2003 Richard.Curnow at superh.com
-+ *
-+ * IMPORTANT NOTES :
-+ * The do_fast_page_fault function is called from a context in entry.S
-+ * where very few registers have been saved.  In particular, the code in
-+ * this file must be compiled not to use ANY caller-save registers that
-+ * are not part of the restricted save set.  Also, it means that code in
-+ * this file must not make calls to functions elsewhere in the kernel, or
-+ * else the excepting context will see corruption in its caller-save
-+ * registers.  Plus, the entry.S save area is non-reentrant, so this code
-+ * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
-+ * on any exception.
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/interrupt.h>
-+#include <asm/system.h>
-+#include <asm/tlb.h>
-+#include <asm/io.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgalloc.h>
-+#include <asm/mmu_context.h>
-+#include <asm/cpu/registers.h>
-+
-+/* Callable from fault.c, so not static */
-+inline void __do_tlb_refill(unsigned long address,
-+                            unsigned long long is_text_not_data, pte_t *pte)
-+{
-+	unsigned long long ptel;
-+	unsigned long long pteh=0;
-+	struct tlb_info *tlbp;
-+	unsigned long long next;
-+
-+	/* Get PTEL first */
-+	ptel = pte_val(*pte);
-+
-+	/*
-+	 * Set PTEH register
-+	 */
-+	pteh = address & MMU_VPN_MASK;
-+
-+	/* Sign extend based on neff. */
-+#if (NEFF == 32)
-+	/* Faster sign extension */
-+	pteh = (unsigned long long)(signed long long)(signed long)pteh;
-+#else
-+	/* General case */
-+	pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
-+#endif
-+
-+	/* Set the ASID. */
-+	pteh |= get_asid() << PTEH_ASID_SHIFT;
-+	pteh |= PTEH_VALID;
-+
-+	/* Set PTEL register, set_pte has performed the sign extension */
-+	ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-+
-+	tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
-+	next = tlbp->next;
-+	__flush_tlb_slot(next);
-+	asm volatile ("putcfg %0,1,%2\n\n\t"
-+		      "putcfg %0,0,%1\n"
-+		      :  : "r" (next), "r" (pteh), "r" (ptel) );
-+
-+	next += TLB_STEP;
-+	if (next > tlbp->last) next = tlbp->first;
-+	tlbp->next = next;
-+
-+}
-+
-+static int handle_vmalloc_fault(struct mm_struct *mm,
-+				unsigned long protection_flags,
-+                                unsigned long long textaccess,
-+				unsigned long address)
-+{
-+	pgd_t *dir;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	static pte_t *pte;
-+	pte_t entry;
-+
-+	dir = pgd_offset_k(address);
-+
-+	pud = pud_offset(dir, address);
-+	if (pud_none_or_clear_bad(pud))
-+		return 0;
-+
-+	pmd = pmd_offset(pud, address);
-+	if (pmd_none_or_clear_bad(pmd))
-+		return 0;
-+
-+	pte = pte_offset_kernel(pmd, address);
-+	entry = *pte;
-+
-+	if (pte_none(entry) || !pte_present(entry))
-+		return 0;
-+	if ((pte_val(entry) & protection_flags) != protection_flags)
-+		return 0;
-+
-+        __do_tlb_refill(address, textaccess, pte);
-+
-+	return 1;
-+}
-+
-+static int handle_tlbmiss(struct mm_struct *mm,
-+			  unsigned long long protection_flags,
-+			  unsigned long long textaccess,
-+			  unsigned long address)
-+{
-+	pgd_t *dir;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	pte_t entry;
-+
-+	/* NB. The PGD currently only contains a single entry - there is no
-+	   page table tree stored for the top half of the address space since
-+	   virtual pages in that region should never be mapped in user mode.
-+	   (In kernel mode, the only things in that region are the 512Mb super
-+	   page (locked in), and vmalloc (modules) +  I/O device pages (handled
-+	   by handle_vmalloc_fault), so no PGD for the upper half is required
-+	   by kernel mode either).
-+
-+	   See how mm->pgd is allocated and initialised in pgd_alloc to see why
-+	   the next test is necessary.  - RPC */
-+	if (address >= (unsigned long) TASK_SIZE)
-+		/* upper half - never has page table entries. */
-+		return 0;
-+
-+	dir = pgd_offset(mm, address);
-+	if (pgd_none(*dir) || !pgd_present(*dir))
-+		return 0;
-+	if (!pgd_present(*dir))
-+		return 0;
-+
-+	pud = pud_offset(dir, address);
-+	if (pud_none(*pud) || !pud_present(*pud))
-+		return 0;
-+
-+	pmd = pmd_offset(pud, address);
-+	if (pmd_none(*pmd) || !pmd_present(*pmd))
-+		return 0;
-+
-+	pte = pte_offset_kernel(pmd, address);
-+	entry = *pte;
-+
-+	if (pte_none(entry) || !pte_present(entry))
-+		return 0;
-+
-+	/*
-+	 * If the page doesn't have sufficient protection bits set to
-+	 * service the kind of fault being handled, there's not much
-+	 * point doing the TLB refill.  Punt the fault to the general
-+	 * handler.
-+	 */
-+	if ((pte_val(entry) & protection_flags) != protection_flags)
-+		return 0;
-+
-+        __do_tlb_refill(address, textaccess, pte);
-+
-+	return 1;
-+}
-+
-+/*
-+ * Put all this information into one structure so that everything is just
-+ * arithmetic relative to a single base address.  This reduces the number
-+ * of movi/shori pairs needed just to load addresses of static data.
-+ */
-+struct expevt_lookup {
-+	unsigned short protection_flags[8];
-+	unsigned char  is_text_access[8];
-+	unsigned char  is_write_access[8];
-+};
-+
-+#define PRU (1<<9)
-+#define PRW (1<<8)
-+#define PRX (1<<7)
-+#define PRR (1<<6)
-+
-+#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
-+#define YOUNG (_PAGE_ACCESSED)
-+
-+/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
-+   the fault happened in user mode or privileged mode. */
-+static struct expevt_lookup expevt_lookup_table = {
-+	.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
-+	.is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
-+};
-+
-+/*
-+   This routine handles page faults that can be serviced just by refilling a
-+   TLB entry from an existing page table entry.  (This case represents a very
-+   large majority of page faults.) Return 1 if the fault was successfully
-+   handled.  Return 0 if the fault could not be handled.  (This leads into the
-+   general fault handling in fault.c which deals with mapping file-backed
-+   pages, stack growth, segmentation faults, swapping etc etc)
-+ */
-+asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
-+				  unsigned long long expevt,
-+			          unsigned long address)
-+{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	unsigned long long textaccess;
-+	unsigned long long protection_flags;
-+	unsigned long long index;
-+	unsigned long long expevt4;
-+
-+	/* The next few lines implement a way of hashing EXPEVT into a
-+	 * small array index which can be used to lookup parameters
-+	 * specific to the type of TLBMISS being handled.
-+	 *
-+	 * Note:
-+	 *	ITLBMISS has EXPEVT==0xa40
-+	 *	RTLBMISS has EXPEVT==0x040
-+	 *	WTLBMISS has EXPEVT==0x060
-+	 */
-+	expevt4 = (expevt >> 4);
-+	/* TODO : xor ssr_md into this expression too. Then we can check
-+	 * that PRU is set when it needs to be. */
-+	index = expevt4 ^ (expevt4 >> 5);
-+	index &= 7;
-+	protection_flags = expevt_lookup_table.protection_flags[index];
-+	textaccess       = expevt_lookup_table.is_text_access[index];
-+
-+	/* SIM
-+	 * Note this is now called with interrupts still disabled
-+	 * This is to cope with being called for a missing IO port
-+	 * address with interrupts disabled. This should be fixed as
-+	 * soon as we have a better 'fast path' miss handler.
-+	 *
-+	 * Plus take care how you try and debug this stuff.
-+	 * For example, writing debug data to a port which you
-+	 * have just faulted on is not going to work.
-+	 */
-+
-+	tsk = current;
-+	mm = tsk->mm;
-+
-+	if ((address >= VMALLOC_START && address < VMALLOC_END) ||
-+	    (address >= IOBASE_VADDR  && address < IOBASE_END)) {
-+		if (ssr_md)
-+			/*
-+			 * Process-contexts can never have this address
-+			 * range mapped
-+			 */
-+			if (handle_vmalloc_fault(mm, protection_flags,
-+						 textaccess, address))
-+				return 1;
-+	} else if (!in_interrupt() && mm) {
-+		if (handle_tlbmiss(mm, protection_flags, textaccess, address))
-+			return 1;
-+	}
-+
-+	return 0;
-+}
-diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
-index d5e160d..2918c6b 100644
---- a/arch/sh/mm/init.c
-+++ b/arch/sh/mm/init.c
-@@ -23,9 +23,7 @@
- 
- DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
- pgd_t swapper_pg_dir[PTRS_PER_PGD];
+-		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+-			struct resource res;
+-			unsigned long size;
 -
--void (*copy_page)(void *from, void *to);
--void (*clear_page)(void *to);
-+unsigned long cached_to_uncached = 0;
- 
- void show_mem(void)
- {
-@@ -102,7 +100,8 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
- 
- 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
- 
--	flush_tlb_one(get_asid(), addr);
-+	if (cached_to_uncached)
-+		flush_tlb_one(get_asid(), addr);
- }
- 
- /*
-@@ -131,6 +130,37 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
- 
- 	set_pte_phys(address, phys, prot);
- }
-+
-+void __init page_table_range_init(unsigned long start, unsigned long end,
-+					 pgd_t *pgd_base)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	int pgd_idx;
-+	unsigned long vaddr;
-+
-+	vaddr = start & PMD_MASK;
-+	end = (end + PMD_SIZE - 1) & PMD_MASK;
-+	pgd_idx = pgd_index(vaddr);
-+	pgd = pgd_base + pgd_idx;
-+
-+	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-+		BUG_ON(pgd_none(*pgd));
-+		pud = pud_offset(pgd, 0);
-+		BUG_ON(pud_none(*pud));
-+		pmd = pmd_offset(pud, 0);
-+
-+		if (!pmd_present(*pmd)) {
-+			pte_t *pte_table;
-+			pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
-+			memset(pte_table, 0, PAGE_SIZE);
-+			pmd_populate_kernel(&init_mm, pmd, pte_table);
-+		}
-+
-+		vaddr += PMD_SIZE;
-+	}
-+}
- #endif	/* CONFIG_MMU */
- 
- /*
-@@ -150,6 +180,11 @@ void __init paging_init(void)
- 	 * check for a null value. */
- 	set_TTB(swapper_pg_dir);
- 
-+	/* Populate the relevant portions of swapper_pg_dir so that
-+	 * we can use the fixmap entries without calling kmalloc.
-+	 * pte's will be filled in by __set_fixmap(). */
-+	page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);
-+
- 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- 
- 	for_each_online_node(nid) {
-@@ -167,9 +202,22 @@ void __init paging_init(void)
- 	}
- 
- 	free_area_init_nodes(max_zone_pfns);
-+
-+	/* Set up the uncached fixmap */
-+	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
-+
-+#ifdef CONFIG_29BIT
-+	/*
-+	 * Handle trivial transitions between cached and uncached
-+	 * segments, making use of the 1:1 mapping relationship in
-+	 * 512MB lowmem.
-+	 */
-+	cached_to_uncached = P2SEG - P1SEG;
-+#endif
- }
- 
- static struct kcore_list kcore_mem, kcore_vmalloc;
-+int after_bootmem = 0;
- 
- void __init mem_init(void)
- {
-@@ -202,17 +250,7 @@ void __init mem_init(void)
- 	memset(empty_zero_page, 0, PAGE_SIZE);
- 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
- 
--	/*
--	 * Setup wrappers for copy/clear_page(), these will get overridden
--	 * later in the boot process if a better method is available.
--	 */
--#ifdef CONFIG_MMU
--	copy_page = copy_page_slow;
--	clear_page = clear_page_slow;
--#else
--	copy_page = copy_page_nommu;
--	clear_page = clear_page_nommu;
--#endif
-+	after_bootmem = 1;
- 
- 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
- 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
-deleted file mode 100644
-index 0c7b7e3..0000000
---- a/arch/sh/mm/ioremap.c
-+++ /dev/null
-@@ -1,150 +0,0 @@
--/*
-- * arch/sh/mm/ioremap.c
-- *
-- * Re-map IO memory to kernel address space so that we can access it.
-- * This is needed for high PCI addresses that aren't mapped in the
-- * 640k-1MB IO memory area on PC's
-- *
-- * (C) Copyright 1995 1996 Linus Torvalds
-- * (C) Copyright 2005, 2006 Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General
-- * Public License. See the file "COPYING" in the main directory of this
-- * archive for more details.
-- */
--#include <linux/vmalloc.h>
--#include <linux/module.h>
--#include <linux/mm.h>
--#include <linux/pci.h>
--#include <linux/io.h>
--#include <asm/page.h>
--#include <asm/pgalloc.h>
--#include <asm/addrspace.h>
--#include <asm/cacheflush.h>
--#include <asm/tlbflush.h>
--#include <asm/mmu.h>
+-			memcpy(&res, &dev->resource[i], sizeof(res));
+-			size = res.end - res.start + 1;
 -
--/*
-- * Remap an arbitrary physical address space into the kernel virtual
-- * address space. Needed when the kernel wants to access high addresses
-- * directly.
-- *
-- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-- * have to convert them into an offset in a page-aligned mapping, but the
-- * caller shouldn't need to know that small detail.
-- */
--void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
--			unsigned long flags)
--{
--	struct vm_struct * area;
--	unsigned long offset, last_addr, addr, orig_addr;
--	pgprot_t pgprot;
+-			if (res.flags & IORESOURCE_IO) {
+-				res.start = io_res.end;
+-				pcibios_align_resource(dev, &res, size, 0);
+-				io_res.end = res.start + size;
+-			} else if (res.flags & IORESOURCE_MEM) {
+-				res.start = mem_res.end;
+-				pcibios_align_resource(dev, &res, size, 0);
+-				mem_res.end = res.start + size;
+-			}
+-		}
+-	}
 -
--	/* Don't allow wraparound or zero size */
--	last_addr = phys_addr + size - 1;
--	if (!size || last_addr < phys_addr)
--		return NULL;
+-	/* And for all of the subordinate busses. */
+-	for (ln=bus->children.next; ln != &bus->children; ln=ln->next)
+-		pcibios_size_bridge(pci_bus_b(ln), &io_res, &mem_res);
 -
--	/*
--	 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
--	 * mapped at the end of the address space (typically 0xfd000000)
--	 * in a non-translatable area, so mapping through page tables for
--	 * this area is not only pointless, but also fundamentally
--	 * broken. Just return the physical address instead.
--	 *
--	 * For boards that map a small PCI memory aperture somewhere in
--	 * P1/P2 space, ioremap() will already do the right thing,
--	 * and we'll never get this far.
--	 */
--	if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
--		return (void __iomem *)phys_addr;
+-	/* turn the ending locations into sizes (subtract start) */
+-	io_res.end -= io_res.start;
+-	mem_res.end -= mem_res.start;
 -
--	/*
--	 * Don't allow anybody to remap normal RAM that we're using..
--	 */
--	if (phys_addr < virt_to_phys(high_memory))
--		return NULL;
+-	/* Align the sizes up by bridge rules */
+-	io_res.end = ALIGN(io_res.end, 4*1024) - 1;
+-	mem_res.end = ALIGN(mem_res.end, 1*1024*1024) - 1;
 -
--	/*
--	 * Mappings have to be page-aligned
--	 */
--	offset = phys_addr & ~PAGE_MASK;
--	phys_addr &= PAGE_MASK;
--	size = PAGE_ALIGN(last_addr+1) - phys_addr;
+-	/* Adjust the bridge's allocation requirements */
+-	bridge->resource[0].end = bridge->resource[0].start + io_res.end;
+-	bridge->resource[1].end = bridge->resource[1].start + mem_res.end;
 -
--	/*
--	 * Ok, go for it..
--	 */
--	area = get_vm_area(size, VM_IOREMAP);
--	if (!area)
--		return NULL;
--	area->phys_addr = phys_addr;
--	orig_addr = addr = (unsigned long)area->addr;
+-	bridge->resource[PCI_BRIDGE_RESOURCES].end =
+-	    bridge->resource[PCI_BRIDGE_RESOURCES].start + io_res.end;
+-	bridge->resource[PCI_BRIDGE_RESOURCES+1].end =
+-	    bridge->resource[PCI_BRIDGE_RESOURCES+1].start + mem_res.end;
 -
--#ifdef CONFIG_32BIT
--	/*
--	 * First try to remap through the PMB once a valid VMA has been
--	 * established. Smaller allocations (or the rest of the size
--	 * remaining after a PMB mapping due to the size not being
--	 * perfectly aligned on a PMB size boundary) are then mapped
--	 * through the UTLB using conventional page tables.
--	 *
--	 * PMB entries are all pre-faulted.
--	 */
--	if (unlikely(size >= 0x1000000)) {
--		unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
+-	/* adjust parent's resource requirements */
+-	if (ior) {
+-		ior->end = ALIGN(ior->end, 4*1024);
+-		ior->end += io_res.end;
+-	}
 -
--		if (likely(mapped)) {
--			addr		+= mapped;
--			phys_addr	+= mapped;
--			size		-= mapped;
--		}
+-	if (memr) {
+-		memr->end = ALIGN(memr->end, 1*1024*1024);
+-		memr->end += mem_res.end;
 -	}
--#endif
+-}
 -
--	pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
--	if (likely(size))
--		if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
--			vunmap((void *)orig_addr);
--			return NULL;
--		}
+-static void __init pcibios_size_bridges(void)
+-{
+-	struct resource io_res, mem_res;
 -
--	return (void __iomem *)(offset + (char *)orig_addr);
+-	memset(&io_res, 0, sizeof(io_res));
+-	memset(&mem_res, 0, sizeof(mem_res));
+-
+-	pcibios_size_bridge(pci_root_bus, &io_res, &mem_res);
 -}
--EXPORT_SYMBOL(__ioremap);
 -
--void __iounmap(void __iomem *addr)
+-static int __init pcibios_init(void)
 -{
--	unsigned long vaddr = (unsigned long __force)addr;
--	struct vm_struct *p;
+-        if (request_irq(IRQ_ERR, pcish5_err_irq,
+-                        IRQF_DISABLED, "PCI Error",NULL) < 0) {
+-                printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
+-                return -EINVAL;
+-        }
 -
--	if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
--		return;
+-        if (request_irq(IRQ_SERR, pcish5_serr_irq,
+-                        IRQF_DISABLED, "PCI SERR interrupt", NULL) < 0) {
+-                printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
+-                return -EINVAL;
+-        }
 -
--#ifdef CONFIG_32BIT
--	/*
--	 * Purge any PMB entries that may have been established for this
--	 * mapping, then proceed with conventional VMA teardown.
--	 *
--	 * XXX: Note that due to the way that remove_vm_area() does
--	 * matching of the resultant VMA, we aren't able to fast-forward
--	 * the address past the PMB space until the end of the VMA where
--	 * the page tables reside. As such, unmap_vm_area() will be
--	 * forced to linearly scan over the area until it finds the page
--	 * tables where PTEs that need to be unmapped actually reside,
--	 * which is far from optimal. Perhaps we need to use a separate
--	 * VMA for the PMB mappings?
--	 *					-- PFM.
+-	/* The pci subsystem needs to know where memory is and how much
+-	 * of it there is. I've simply made these globals. A better mechanism
+-	 * is probably needed.
 -	 */
--	pmb_unmap(vaddr);
--#endif
+-	sh5pci_init(__pa(memory_start),
+-		     __pa(memory_end) - __pa(memory_start));
 -
--	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
--	if (!p) {
--		printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
--		return;
--	}
+-	pci_root_bus = pci_scan_bus(0, &pci_config_ops, NULL);
+-	pcibios_size_bridges();
+-	pci_assign_unassigned_resources();
+-	pci_fixup_irqs(no_swizzle, map_cayman_irq);
 -
--	kfree(p);
+-	return 0;
 -}
--EXPORT_SYMBOL(__iounmap);
-diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
-new file mode 100644
-index 0000000..0c7b7e3
---- /dev/null
-+++ b/arch/sh/mm/ioremap_32.c
-@@ -0,0 +1,150 @@
-+/*
-+ * arch/sh/mm/ioremap.c
-+ *
-+ * Re-map IO memory to kernel address space so that we can access it.
-+ * This is needed for high PCI addresses that aren't mapped in the
-+ * 640k-1MB IO memory area on PC's
-+ *
-+ * (C) Copyright 1995 1996 Linus Torvalds
-+ * (C) Copyright 2005, 2006 Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General
-+ * Public License. See the file "COPYING" in the main directory of this
-+ * archive for more details.
-+ */
-+#include <linux/vmalloc.h>
-+#include <linux/module.h>
-+#include <linux/mm.h>
-+#include <linux/pci.h>
-+#include <linux/io.h>
-+#include <asm/page.h>
-+#include <asm/pgalloc.h>
-+#include <asm/addrspace.h>
-+#include <asm/cacheflush.h>
-+#include <asm/tlbflush.h>
-+#include <asm/mmu.h>
-+
-+/*
-+ * Remap an arbitrary physical address space into the kernel virtual
-+ * address space. Needed when the kernel wants to access high addresses
-+ * directly.
-+ *
-+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-+ * have to convert them into an offset in a page-aligned mapping, but the
-+ * caller shouldn't need to know that small detail.
-+ */
-+void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
-+			unsigned long flags)
-+{
-+	struct vm_struct * area;
-+	unsigned long offset, last_addr, addr, orig_addr;
-+	pgprot_t pgprot;
-+
-+	/* Don't allow wraparound or zero size */
-+	last_addr = phys_addr + size - 1;
-+	if (!size || last_addr < phys_addr)
-+		return NULL;
-+
-+	/*
-+	 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
-+	 * mapped at the end of the address space (typically 0xfd000000)
-+	 * in a non-translatable area, so mapping through page tables for
-+	 * this area is not only pointless, but also fundamentally
-+	 * broken. Just return the physical address instead.
-+	 *
-+	 * For boards that map a small PCI memory aperture somewhere in
-+	 * P1/P2 space, ioremap() will already do the right thing,
-+	 * and we'll never get this far.
-+	 */
-+	if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
-+		return (void __iomem *)phys_addr;
-+
-+	/*
-+	 * Don't allow anybody to remap normal RAM that we're using..
-+	 */
-+	if (phys_addr < virt_to_phys(high_memory))
-+		return NULL;
-+
-+	/*
-+	 * Mappings have to be page-aligned
-+	 */
-+	offset = phys_addr & ~PAGE_MASK;
-+	phys_addr &= PAGE_MASK;
-+	size = PAGE_ALIGN(last_addr+1) - phys_addr;
-+
-+	/*
-+	 * Ok, go for it..
-+	 */
-+	area = get_vm_area(size, VM_IOREMAP);
-+	if (!area)
-+		return NULL;
-+	area->phys_addr = phys_addr;
-+	orig_addr = addr = (unsigned long)area->addr;
-+
-+#ifdef CONFIG_32BIT
-+	/*
-+	 * First try to remap through the PMB once a valid VMA has been
-+	 * established. Smaller allocations (or the rest of the size
-+	 * remaining after a PMB mapping due to the size not being
-+	 * perfectly aligned on a PMB size boundary) are then mapped
-+	 * through the UTLB using conventional page tables.
-+	 *
-+	 * PMB entries are all pre-faulted.
-+	 */
-+	if (unlikely(size >= 0x1000000)) {
-+		unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
-+
-+		if (likely(mapped)) {
-+			addr		+= mapped;
-+			phys_addr	+= mapped;
-+			size		-= mapped;
-+		}
-+	}
-+#endif
-+
-+	pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
-+	if (likely(size))
-+		if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
-+			vunmap((void *)orig_addr);
-+			return NULL;
-+		}
-+
-+	return (void __iomem *)(offset + (char *)orig_addr);
-+}
-+EXPORT_SYMBOL(__ioremap);
-+
-+void __iounmap(void __iomem *addr)
-+{
-+	unsigned long vaddr = (unsigned long __force)addr;
-+	struct vm_struct *p;
-+
-+	if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
-+		return;
-+
-+#ifdef CONFIG_32BIT
-+	/*
-+	 * Purge any PMB entries that may have been established for this
-+	 * mapping, then proceed with conventional VMA teardown.
-+	 *
-+	 * XXX: Note that due to the way that remove_vm_area() does
-+	 * matching of the resultant VMA, we aren't able to fast-forward
-+	 * the address past the PMB space until the end of the VMA where
-+	 * the page tables reside. As such, unmap_vm_area() will be
-+	 * forced to linearly scan over the area until it finds the page
-+	 * tables where PTEs that need to be unmapped actually reside,
-+	 * which is far from optimal. Perhaps we need to use a separate
-+	 * VMA for the PMB mappings?
-+	 *					-- PFM.
-+	 */
-+	pmb_unmap(vaddr);
-+#endif
-+
-+	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
-+	if (!p) {
-+		printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
-+		return;
-+	}
-+
-+	kfree(p);
-+}
-+EXPORT_SYMBOL(__iounmap);
-diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c
-new file mode 100644
-index 0000000..e27d165
---- /dev/null
-+++ b/arch/sh/mm/ioremap_64.c
-@@ -0,0 +1,404 @@
-+/*
-+ * arch/sh/mm/ioremap_64.c
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003 - 2007  Paul Mundt
-+ *
-+ * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
-+ * derived from arch/i386/mm/ioremap.c .
-+ *
-+ *   (C) Copyright 1995 1996 Linus Torvalds
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/vmalloc.h>
-+#include <linux/ioport.h>
-+#include <linux/module.h>
-+#include <linux/mm.h>
-+#include <linux/io.h>
-+#include <linux/bootmem.h>
-+#include <linux/proc_fs.h>
-+#include <asm/page.h>
-+#include <asm/pgalloc.h>
-+#include <asm/addrspace.h>
-+#include <asm/cacheflush.h>
-+#include <asm/tlbflush.h>
-+#include <asm/mmu.h>
-+
-+static void shmedia_mapioaddr(unsigned long, unsigned long);
-+static unsigned long shmedia_ioremap(struct resource *, u32, int);
-+
-+/*
-+ * Generic mapping function (not visible outside):
-+ */
-+
-+/*
-+ * Remap an arbitrary physical address space into the kernel virtual
-+ * address space. Needed when the kernel wants to access high addresses
-+ * directly.
-+ *
-+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-+ * have to convert them into an offset in a page-aligned mapping, but the
-+ * caller shouldn't need to know that small detail.
-+ */
-+void *__ioremap(unsigned long phys_addr, unsigned long size,
-+		unsigned long flags)
-+{
-+	void * addr;
-+	struct vm_struct * area;
-+	unsigned long offset, last_addr;
-+	pgprot_t pgprot;
-+
-+	/* Don't allow wraparound or zero size */
-+	last_addr = phys_addr + size - 1;
-+	if (!size || last_addr < phys_addr)
-+		return NULL;
-+
-+	pgprot = __pgprot(_PAGE_PRESENT  | _PAGE_READ   |
-+			  _PAGE_WRITE    | _PAGE_DIRTY  |
-+			  _PAGE_ACCESSED | _PAGE_SHARED | flags);
-+
-+	/*
-+	 * Mappings have to be page-aligned
-+	 */
-+	offset = phys_addr & ~PAGE_MASK;
-+	phys_addr &= PAGE_MASK;
-+	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
-+
-+	/*
-+	 * Ok, go for it..
-+	 */
-+	area = get_vm_area(size, VM_IOREMAP);
-+	pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
-+	if (!area)
-+		return NULL;
-+	area->phys_addr = phys_addr;
-+	addr = area->addr;
-+	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
-+			       phys_addr, pgprot)) {
-+		vunmap(addr);
-+		return NULL;
-+	}
-+	return (void *) (offset + (char *)addr);
-+}
-+EXPORT_SYMBOL(__ioremap);
-+
-+void __iounmap(void *addr)
-+{
-+	struct vm_struct *area;
-+
-+	vfree((void *) (PAGE_MASK & (unsigned long) addr));
-+	area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
-+	if (!area) {
-+		printk(KERN_ERR "iounmap: bad address %p\n", addr);
-+		return;
-+	}
-+
-+	kfree(area);
-+}
-+EXPORT_SYMBOL(__iounmap);
-+
-+static struct resource shmedia_iomap = {
-+	.name	= "shmedia_iomap",
-+	.start	= IOBASE_VADDR + PAGE_SIZE,
-+	.end	= IOBASE_END - 1,
-+};
-+
-+static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
-+static void shmedia_unmapioaddr(unsigned long vaddr);
-+static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
-+
-+/*
-+ * We have the same problem as the SPARC, so lets have the same comment:
-+ * Our mini-allocator...
-+ * Boy this is gross! We need it because we must map I/O for
-+ * timers and interrupt controller before the kmalloc is available.
-+ */
-+
-+#define XNMLN  15
-+#define XNRES  10
-+
-+struct xresource {
-+	struct resource xres;   /* Must be first */
-+	int xflag;              /* 1 == used */
-+	char xname[XNMLN+1];
-+};
-+
-+static struct xresource xresv[XNRES];
-+
-+static struct xresource *xres_alloc(void)
-+{
-+        struct xresource *xrp;
-+        int n;
-+
-+        xrp = xresv;
-+        for (n = 0; n < XNRES; n++) {
-+                if (xrp->xflag == 0) {
-+                        xrp->xflag = 1;
-+                        return xrp;
-+                }
-+                xrp++;
-+        }
-+        return NULL;
-+}
-+
-+static void xres_free(struct xresource *xrp)
-+{
-+	xrp->xflag = 0;
-+}
-+
-+static struct resource *shmedia_find_resource(struct resource *root,
-+					      unsigned long vaddr)
-+{
-+	struct resource *res;
-+
-+	for (res = root->child; res; res = res->sibling)
-+		if (res->start <= vaddr && res->end >= vaddr)
-+			return res;
-+
-+	return NULL;
-+}
-+
-+static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
-+				      const char *name)
-+{
-+        static int printed_full = 0;
-+        struct xresource *xres;
-+        struct resource *res;
-+        char *tack;
-+        int tlen;
-+
-+        if (name == NULL) name = "???";
-+
-+        if ((xres = xres_alloc()) != 0) {
-+                tack = xres->xname;
-+                res = &xres->xres;
-+        } else {
-+                if (!printed_full) {
-+                        printk("%s: done with statics, switching to kmalloc\n",
-+			       __FUNCTION__);
-+                        printed_full = 1;
-+                }
-+                tlen = strlen(name);
-+                tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
-+                if (!tack)
-+			return -ENOMEM;
-+                memset(tack, 0, sizeof(struct resource));
-+                res = (struct resource *) tack;
-+                tack += sizeof (struct resource);
-+        }
-+
-+        strncpy(tack, name, XNMLN);
-+        tack[XNMLN] = 0;
-+        res->name = tack;
-+
-+        return shmedia_ioremap(res, phys, size);
-+}
-+
-+static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
-+{
-+        unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
-+	unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
-+        unsigned long va;
-+        unsigned int psz;
-+
-+        if (allocate_resource(&shmedia_iomap, res, round_sz,
-+			      shmedia_iomap.start, shmedia_iomap.end,
-+			      PAGE_SIZE, NULL, NULL) != 0) {
-+                panic("alloc_io_res(%s): cannot occupy\n",
-+                    (res->name != NULL)? res->name: "???");
-+        }
-+
-+        va = res->start;
-+        pa &= PAGE_MASK;
-+
-+	psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
-+
-+	/* log at boot time ... */
-+	printk("mapioaddr: %6s  [%2d page%s]  va 0x%08lx   pa 0x%08x\n",
-+	       ((res->name != NULL) ? res->name : "???"),
-+	       psz, psz == 1 ? " " : "s", va, pa);
-+
-+        for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
-+                shmedia_mapioaddr(pa, va);
-+                va += PAGE_SIZE;
-+                pa += PAGE_SIZE;
-+        }
-+
-+        res->start += offset;
-+        res->end = res->start + sz - 1;         /* not strictly necessary.. */
-+
-+        return res->start;
-+}
-+
-+static void shmedia_free_io(struct resource *res)
-+{
-+	unsigned long len = res->end - res->start + 1;
-+
-+	BUG_ON((len & (PAGE_SIZE - 1)) != 0);
-+
-+	while (len) {
-+		len -= PAGE_SIZE;
-+		shmedia_unmapioaddr(res->start + len);
-+	}
-+
-+	release_resource(res);
-+}
-+
-+static __init_refok void *sh64_get_page(void)
-+{
-+	extern int after_bootmem;
-+	void *page;
-+
-+	if (after_bootmem) {
-+		page = (void *)get_zeroed_page(GFP_ATOMIC);
-+	} else {
-+		page = alloc_bootmem_pages(PAGE_SIZE);
-+	}
-+
-+	if (!page || ((unsigned long)page & ~PAGE_MASK))
-+		panic("sh64_get_page: Out of memory already?\n");
-+
-+	return page;
-+}
-+
-+static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
-+{
-+	pgd_t *pgdp;
-+	pud_t *pudp;
-+	pmd_t *pmdp;
-+	pte_t *ptep, pte;
-+	pgprot_t prot;
-+	unsigned long flags = 1; /* 1 = CB0-1 device */
-+
-+	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);
-+
-+	pgdp = pgd_offset_k(va);
-+	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
-+		pudp = (pud_t *)sh64_get_page();
-+		set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
-+	}
-+
-+	pudp = pud_offset(pgdp, va);
-+	if (pud_none(*pudp) || !pud_present(*pudp)) {
-+		pmdp = (pmd_t *)sh64_get_page();
-+		set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
-+	}
-+
-+	pmdp = pmd_offset(pudp, va);
-+	if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
-+		ptep = (pte_t *)sh64_get_page();
-+		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
-+	}
-+
-+	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
-+			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);
-+
-+	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
-+	ptep = pte_offset_kernel(pmdp, va);
-+
-+	if (!pte_none(*ptep) &&
-+	    pte_val(*ptep) != pte_val(pte))
-+		pte_ERROR(*ptep);
-+
-+	set_pte(ptep, pte);
-+
-+	flush_tlb_kernel_range(va, PAGE_SIZE);
-+}
-+
-+static void shmedia_unmapioaddr(unsigned long vaddr)
-+{
-+	pgd_t *pgdp;
-+	pud_t *pudp;
-+	pmd_t *pmdp;
-+	pte_t *ptep;
-+
-+	pgdp = pgd_offset_k(vaddr);
-+	if (pgd_none(*pgdp) || pgd_bad(*pgdp))
-+		return;
-+
-+	pudp = pud_offset(pgdp, vaddr);
-+	if (pud_none(*pudp) || pud_bad(*pudp))
-+		return;
-+
-+	pmdp = pmd_offset(pudp, vaddr);
-+	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
-+		return;
-+
-+	ptep = pte_offset_kernel(pmdp, vaddr);
-+
-+	if (pte_none(*ptep) || !pte_present(*ptep))
-+		return;
-+
-+	clear_page((void *)ptep);
-+	pte_clear(&init_mm, vaddr, ptep);
-+}
-+
-+unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
-+{
-+	if (size < PAGE_SIZE)
-+		size = PAGE_SIZE;
-+
-+	return shmedia_alloc_io(phys, size, name);
-+}
-+
-+void onchip_unmap(unsigned long vaddr)
-+{
-+	struct resource *res;
-+	unsigned int psz;
-+
-+	res = shmedia_find_resource(&shmedia_iomap, vaddr);
-+	if (!res) {
-+		printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
-+		       __FUNCTION__, vaddr);
-+		return;
-+	}
-+
-+        psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
-+
-+        printk(KERN_DEBUG "unmapioaddr: %6s  [%2d page%s] freed\n",
-+	       res->name, psz, psz == 1 ? " " : "s");
-+
-+	shmedia_free_io(res);
-+
-+	if ((char *)res >= (char *)xresv &&
-+	    (char *)res <  (char *)&xresv[XNRES]) {
-+		xres_free((struct xresource *)res);
-+	} else {
-+		kfree(res);
-+	}
-+}
-+
-+#ifdef CONFIG_PROC_FS
-+static int
-+ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
-+		  void *data)
-+{
-+	char *p = buf, *e = buf + length;
-+	struct resource *r;
-+	const char *nm;
-+
-+	for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
-+		if (p + 32 >= e)        /* Better than nothing */
-+			break;
-+		if ((nm = r->name) == 0) nm = "???";
-+		p += sprintf(p, "%08lx-%08lx: %s\n",
-+			     (unsigned long)r->start,
-+			     (unsigned long)r->end, nm);
-+	}
-+
-+	return p-buf;
-+}
-+#endif /* CONFIG_PROC_FS */
-+
-+static int __init register_proc_onchip(void)
-+{
-+#ifdef CONFIG_PROC_FS
-+	create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
-+#endif
-+	return 0;
-+}
-+
-+__initcall(register_proc_onchip);
-diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c
-index d15221b..677dd57 100644
---- a/arch/sh/mm/pg-nommu.c
-+++ b/arch/sh/mm/pg-nommu.c
-@@ -14,12 +14,12 @@
- #include <linux/string.h>
- #include <asm/page.h>
- 
--void copy_page_nommu(void *to, void *from)
-+void copy_page(void *to, void *from)
- {
- 	memcpy(to, from, PAGE_SIZE);
- }
- 
--void clear_page_nommu(void *to)
-+void clear_page(void *to)
- {
- 	memset(to, 0, PAGE_SIZE);
- }
-diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
-index 1d45b82..ab81c60 100644
---- a/arch/sh/mm/pmb.c
-+++ b/arch/sh/mm/pmb.c
-@@ -27,6 +27,7 @@
- #include <asm/pgtable.h>
- #include <asm/mmu.h>
- #include <asm/io.h>
-+#include <asm/mmu_context.h>
- 
- #define NR_PMB_ENTRIES	16
- 
-@@ -162,18 +163,18 @@ repeat:
- 	return 0;
- }
- 
--int set_pmb_entry(struct pmb_entry *pmbe)
-+int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
- {
- 	int ret;
- 
--	jump_to_P2();
-+	jump_to_uncached();
- 	ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
--	back_to_P1();
-+	back_to_cached();
- 
- 	return ret;
- }
- 
--void clear_pmb_entry(struct pmb_entry *pmbe)
-+void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
- {
- 	unsigned int entry = pmbe->entry;
- 	unsigned long addr;
-@@ -187,7 +188,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
- 		     entry >= NR_PMB_ENTRIES))
- 		return;
- 
--	jump_to_P2();
-+	jump_to_uncached();
- 
- 	/* Clear V-bit */
- 	addr = mk_pmb_addr(entry);
-@@ -196,7 +197,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
- 	addr = mk_pmb_data(entry);
- 	ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
- 
--	back_to_P1();
-+	back_to_cached();
- 
- 	clear_bit(entry, &pmb_map);
- }
-@@ -301,17 +302,17 @@ static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb)
- 	pmbe->entry = PMB_NO_ENTRY;
- }
- 
--static int __init pmb_init(void)
-+static int __uses_jump_to_uncached pmb_init(void)
- {
- 	unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
--	unsigned int entry;
-+	unsigned int entry, i;
- 
- 	BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
- 
- 	pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
- 				      SLAB_PANIC, pmb_cache_ctor);
- 
--	jump_to_P2();
-+	jump_to_uncached();
- 
- 	/*
- 	 * Ordering is important, P2 must be mapped in the PMB before we
-@@ -329,7 +330,12 @@ static int __init pmb_init(void)
- 	/* PMB.SE and UB[7] */
- 	ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
- 
--	back_to_P1();
-+	/* Flush out the TLB */
-+	i =  ctrl_inl(MMUCR);
-+	i |= MMUCR_TI;
-+	ctrl_outl(i, MMUCR);
-+
-+	back_to_cached();
- 
- 	return 0;
- }
-diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
+-
+-subsys_initcall(pcibios_init);
+-
+-void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+-{
+-	struct pci_dev *dev = bus->self;
+-	int i;
+-
+-#if 1
+-	if(dev) {
+-		for(i=0; i<3; i++) {
+-			bus->resource[i] =
+-				&dev->resource[PCI_BRIDGE_RESOURCES+i];
+-			bus->resource[i]->name = bus->name;
+-		}
+-		bus->resource[0]->flags |= IORESOURCE_IO;
+-		bus->resource[1]->flags |= IORESOURCE_MEM;
+-
+-		/* For now, propagate host limits to the bus;
+-		 * we'll adjust them later. */
+-
+-#if 1
+-		bus->resource[0]->end = 64*1024 - 1 ;
+-		bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1;
+-		bus->resource[0]->start = PCIBIOS_MIN_IO;
+-		bus->resource[1]->start = PCIBIOS_MIN_MEM;
+-#else
+-		bus->resource[0]->end = 0;
+-		bus->resource[1]->end = 0;
+-		bus->resource[0]->start =0;
+-		bus->resource[1]->start = 0;
+-#endif
+-		/* Turn off downstream PF memory address range by default */
+-		bus->resource[2]->start = 1024*1024;
+-		bus->resource[2]->end = bus->resource[2]->start - 1;
+-	}
+-#endif
+-
+-}
+-
+diff --git a/arch/sh64/kernel/pci_sh5.h b/arch/sh64/kernel/pci_sh5.h
 deleted file mode 100644
-index 6f45c1f..0000000
---- a/arch/sh/mm/tlb-flush.c
+index c71159d..0000000
+--- a/arch/sh64/kernel/pci_sh5.h
 +++ /dev/null
-@@ -1,140 +0,0 @@
+@@ -1,107 +0,0 @@
 -/*
-- * TLB flushing operations for SH with an MMU.
+- * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
 - *
-- *  Copyright (C) 1999  Niibe Yutaka
-- *  Copyright (C) 2003  Paul Mundt
+- * May be copied or modified under the terms of the GNU General Public
+- * License.  See linux/COPYING for more information.
+- *
+- * Definitions for the SH5 PCI hardware.
+- */
+-
+-/* Product ID */
+-#define PCISH5_PID		0x350d
+-
+-/* vendor ID */
+-#define PCISH5_VID		0x1054
+-
+-/* Configuration types */
+-#define ST_TYPE0                0x00    /* Configuration cycle type 0 */
+-#define ST_TYPE1                0x01    /* Configuration cycle type 1 */
+-
+-/* VCR data */
+-#define PCISH5_VCR_STATUS      0x00
+-#define PCISH5_VCR_VERSION     0x08
+-
+-/*
+-** ICR register offsets and bits
+-*/
+-#define PCISH5_ICR_CR          0x100   /* PCI control register values */
+-#define CR_PBAM                 (1<<12)
+-#define CR_PFCS                 (1<<11)
+-#define CR_FTO                  (1<<10)
+-#define CR_PFE                  (1<<9)
+-#define CR_TBS                  (1<<8)
+-#define CR_SPUE                 (1<<7)
+-#define CR_BMAM                 (1<<6)
+-#define CR_HOST                 (1<<5)
+-#define CR_CLKEN                (1<<4)
+-#define CR_SOCS                 (1<<3)
+-#define CR_IOCS                 (1<<2)
+-#define CR_RSTCTL               (1<<1)
+-#define CR_CFINT                (1<<0)
+-#define CR_LOCK_MASK            0xa5000000
+-
+-#define PCISH5_ICR_INT         0x114   /* Interrupt registert values     */
+-#define INT_MADIM               (1<<2)
+-
+-#define PCISH5_ICR_LSR0        0X104   /* Local space register values    */
+-#define PCISH5_ICR_LSR1        0X108   /* Local space register values    */
+-#define PCISH5_ICR_LAR0        0x10c   /* Local address register values  */
+-#define PCISH5_ICR_LAR1        0x110   /* Local address register values  */
+-#define PCISH5_ICR_INTM        0x118   /* Interrupt mask register values                         */
+-#define PCISH5_ICR_AIR         0x11c   /* Interrupt error address information register values    */
+-#define PCISH5_ICR_CIR         0x120   /* Interrupt error command information register values    */
+-#define PCISH5_ICR_AINT        0x130   /* Interrupt error arbiter interrupt register values      */
+-#define PCISH5_ICR_AINTM       0x134   /* Interrupt error arbiter interrupt mask register values */
+-#define PCISH5_ICR_BMIR        0x138   /* Interrupt error info register of bus master values     */
+-#define PCISH5_ICR_PAR         0x1c0   /* Pio address register values                            */
+-#define PCISH5_ICR_MBR         0x1c4   /* Memory space bank register values                      */
+-#define PCISH5_ICR_IOBR        0x1c8   /* I/O space bank register values                         */
+-#define PCISH5_ICR_PINT        0x1cc   /* power management interrupt register values             */
+-#define PCISH5_ICR_PINTM       0x1d0   /* power management interrupt mask register values        */
+-#define PCISH5_ICR_MBMR        0x1d8   /* memory space bank mask register values                 */
+-#define PCISH5_ICR_IOBMR       0x1dc   /* I/O space bank mask register values                    */
+-#define PCISH5_ICR_CSCR0       0x210   /* PCI cache snoop control register 0                     */
+-#define PCISH5_ICR_CSCR1       0x214   /* PCI cache snoop control register 1                     */
+-#define PCISH5_ICR_PDR         0x220   /* Pio data register values                               */
+-
+-/* These are configs space registers */
+-#define PCISH5_ICR_CSR_VID     0x000	/* Vendor id                           */
+-#define PCISH5_ICR_CSR_DID     0x002   /* Device id                           */
+-#define PCISH5_ICR_CSR_CMD     0x004   /* Command register                    */
+-#define PCISH5_ICR_CSR_STATUS  0x006   /* Stautus                             */
+-#define PCISH5_ICR_CSR_IBAR0   0x010   /* I/O base address register           */
+-#define PCISH5_ICR_CSR_MBAR0   0x014   /* First  Memory base address register */
+-#define PCISH5_ICR_CSR_MBAR1   0x018   /* Second Memory base address register */
+-
+-
+-
+-/* Base address of registers */
+-#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
+-#define SH5PCI_IO_BASE  (PHYS_PCI_BLOCK + 0x00800000)
+-/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG)    */
+-
+-/* Register selection macro */
+-#define PCISH5_ICR_REG(x)                ( pcicr_virt + (PCISH5_ICR_##x))
+-/* #define PCISH5_VCR_REG(x)                ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
+-
+-/* Write I/O functions */
+-#define SH5PCI_WRITE(reg,val)        ctrl_outl((u32)(val),PCISH5_ICR_REG(reg))
+-#define SH5PCI_WRITE_SHORT(reg,val)  ctrl_outw((u16)(val),PCISH5_ICR_REG(reg))
+-#define SH5PCI_WRITE_BYTE(reg,val)   ctrl_outb((u8)(val),PCISH5_ICR_REG(reg))
+-
+-/* Read I/O functions */
+-#define SH5PCI_READ(reg)             ctrl_inl(PCISH5_ICR_REG(reg))
+-#define SH5PCI_READ_SHORT(reg)       ctrl_inw(PCISH5_ICR_REG(reg))
+-#define SH5PCI_READ_BYTE(reg)        ctrl_inb(PCISH5_ICR_REG(reg))
+-
+-/* Set PCI config bits */
+-#define SET_CONFIG_BITS(bus,devfn,where)  ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
+-
+-/* Set PCI command register */
+-#define CONFIG_CMD(bus, devfn, where)            SET_CONFIG_BITS(bus->number,devfn,where)
+-
+-/* Size converters */
+-#define PCISH5_MEM_SIZCONV(x)		  (((x / 0x40000) - 1) << 18)
+-#define PCISH5_IO_SIZCONV(x)		  (((x / 0x40000) - 1) << 18)
+-
+-
+diff --git a/arch/sh64/kernel/pcibios.c b/arch/sh64/kernel/pcibios.c
+deleted file mode 100644
+index 945920b..0000000
+--- a/arch/sh64/kernel/pcibios.c
++++ /dev/null
+@@ -1,168 +0,0 @@
+-/*
+- * $Id: pcibios.c,v 1.1 2001/08/24 12:38:19 dwmw2 Exp $
+- *
+- * arch/sh/kernel/pcibios.c
+- *
+- * Copyright (C) 2002 STMicroelectronics Limited
+- *   Author : David J. McKay
+- *
+- * Copyright (C) 2004 Richard Curnow, SuperH UK Limited
 - *
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
+- * This is GPL'd.
+- *
+- * Provided here are generic versions of:
+- *	pcibios_update_resource()
+- *	pcibios_align_resource()
+- *	pcibios_enable_device()
+- *	pcibios_set_master()
+- *	pcibios_update_irq()
+- *
+- * These functions are collected here to reduce duplication of common
+- * code amongst the many platform-specific PCI support code files.
+- *
+- * Platform-specific files are expected to provide:
+- *	pcibios_fixup_bus()
+- *	pcibios_init()
+- *	pcibios_setup()
+- *	pcibios_fixup_pbus_ranges()
 - */
--#include <linux/mm.h>
--#include <asm/mmu_context.h>
--#include <asm/tlbflush.h>
 -
--void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+-#include <linux/kernel.h>
+-#include <linux/pci.h>
+-#include <linux/init.h>
+-
+-void
+-pcibios_update_resource(struct pci_dev *dev, struct resource *root,
+-			struct resource *res, int resource)
 -{
--	unsigned int cpu = smp_processor_id();
+-	u32 new, check;
+-	int reg;
 -
--	if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
--		unsigned long flags;
--		unsigned long asid;
--		unsigned long saved_asid = MMU_NO_ASID;
+-	new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
+-	if (resource < 6) {
+-		reg = PCI_BASE_ADDRESS_0 + 4*resource;
+-	} else if (resource == PCI_ROM_RESOURCE) {
+-		res->flags |= IORESOURCE_ROM_ENABLE;
+-		new |= PCI_ROM_ADDRESS_ENABLE;
+-		reg = dev->rom_base_reg;
+-	} else {
+-		/* Somebody might have asked allocation of a non-standard resource */
+-		return;
+-	}
 -
--		asid = cpu_asid(cpu, vma->vm_mm);
--		page &= PAGE_MASK;
+-	pci_write_config_dword(dev, reg, new);
+-	pci_read_config_dword(dev, reg, &check);
+-	if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
+-		printk(KERN_ERR "PCI: Error while updating region "
+-		       "%s/%d (%08x != %08x)\n", pci_name(dev), resource,
+-		       new, check);
+-	}
+-}
 -
--		local_irq_save(flags);
--		if (vma->vm_mm != current->mm) {
--			saved_asid = get_asid();
--			set_asid(asid);
+-/*
+- * We need to avoid collisions with `mirrored' VGA ports
+- * and other strange ISA hardware, so we always want the
+- * addresses to be allocated in the 0x000-0x0ff region
+- * modulo 0x400.
+- */
+-void pcibios_align_resource(void *data, struct resource *res,
+-			    resource_size_t size, resource_size_t align)
+-{
+-	if (res->flags & IORESOURCE_IO) {
+-		resource_size_t start = res->start;
+-
+-		if (start & 0x300) {
+-			start = (start + 0x3ff) & ~0x3ff;
+-			res->start = start;
 -		}
--		local_flush_tlb_one(asid, page);
--		if (saved_asid != MMU_NO_ASID)
--			set_asid(saved_asid);
--		local_irq_restore(flags);
 -	}
 -}
 -
--void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
--			   unsigned long end)
+-static void pcibios_enable_bridge(struct pci_dev *dev)
 -{
--	struct mm_struct *mm = vma->vm_mm;
--	unsigned int cpu = smp_processor_id();
+-	struct pci_bus *bus = dev->subordinate;
+-	u16 cmd, old_cmd;
 -
--	if (cpu_context(cpu, mm) != NO_CONTEXT) {
--		unsigned long flags;
--		int size;
+-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+-	old_cmd = cmd;
 -
--		local_irq_save(flags);
--		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
--		if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
--			cpu_context(cpu, mm) = NO_CONTEXT;
--			if (mm == current->mm)
--				activate_context(mm, cpu);
--		} else {
--			unsigned long asid;
--			unsigned long saved_asid = MMU_NO_ASID;
+-	if (bus->resource[0]->flags & IORESOURCE_IO) {
+-		cmd |= PCI_COMMAND_IO;
+-	}
+-	if ((bus->resource[1]->flags & IORESOURCE_MEM) ||
+-	    (bus->resource[2]->flags & IORESOURCE_PREFETCH)) {
+-		cmd |= PCI_COMMAND_MEMORY;
+-	}
 -
--			asid = cpu_asid(cpu, mm);
--			start &= PAGE_MASK;
--			end += (PAGE_SIZE - 1);
--			end &= PAGE_MASK;
--			if (mm != current->mm) {
--				saved_asid = get_asid();
--				set_asid(asid);
--			}
--			while (start < end) {
--				local_flush_tlb_one(asid, start);
--				start += PAGE_SIZE;
--			}
--			if (saved_asid != MMU_NO_ASID)
--				set_asid(saved_asid);
--		}
--		local_irq_restore(flags);
+-	if (cmd != old_cmd) {
+-		pci_write_config_word(dev, PCI_COMMAND, cmd);
 -	}
+-
+-	printk("PCI bridge %s, command register -> %04x\n",
+-		pci_name(dev), cmd);
+-
 -}
 -
--void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+-
+-
+-int pcibios_enable_device(struct pci_dev *dev, int mask)
 -{
--	unsigned int cpu = smp_processor_id();
--	unsigned long flags;
--	int size;
+-	u16 cmd, old_cmd;
+-	int idx;
+-	struct resource *r;
 -
--	local_irq_save(flags);
--	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
--	if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
--		local_flush_tlb_all();
--	} else {
--		unsigned long asid;
--		unsigned long saved_asid = get_asid();
+-	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+-		pcibios_enable_bridge(dev);
+-	}
 -
--		asid = cpu_asid(cpu, &init_mm);
--		start &= PAGE_MASK;
--		end += (PAGE_SIZE - 1);
--		end &= PAGE_MASK;
--		set_asid(asid);
--		while (start < end) {
--			local_flush_tlb_one(asid, start);
--			start += PAGE_SIZE;
+-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+-	old_cmd = cmd;
+-	for(idx=0; idx<6; idx++) {
+-		if (!(mask & (1 << idx)))
+-			continue;
+-		r = &dev->resource[idx];
+-		if (!r->start && r->end) {
+-			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
+-			return -EINVAL;
 -		}
--		set_asid(saved_asid);
+-		if (r->flags & IORESOURCE_IO)
+-			cmd |= PCI_COMMAND_IO;
+-		if (r->flags & IORESOURCE_MEM)
+-			cmd |= PCI_COMMAND_MEMORY;
 -	}
--	local_irq_restore(flags);
+-	if (dev->resource[PCI_ROM_RESOURCE].start)
+-		cmd |= PCI_COMMAND_MEMORY;
+-	if (cmd != old_cmd) {
+-		printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
+-		pci_write_config_word(dev, PCI_COMMAND, cmd);
+-	}
+-	return 0;
 -}
 -
--void local_flush_tlb_mm(struct mm_struct *mm)
+-/*
+- *  If we set up a device for bus mastering, we need to check and set
+- *  the latency timer as it may not be properly set.
+- */
+-unsigned int pcibios_max_latency = 255;
+-
+-void pcibios_set_master(struct pci_dev *dev)
 -{
--	unsigned int cpu = smp_processor_id();
+-	u8 lat;
+-	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+-	if (lat < 16)
+-		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
+-	else if (lat > pcibios_max_latency)
+-		lat = pcibios_max_latency;
+-	else
+-		return;
+-	printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
+-	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+-}
 -
--	/* Invalidate all TLB of this process. */
--	/* Instead of invalidating each TLB, we get new MMU context. */
--	if (cpu_context(cpu, mm) != NO_CONTEXT) {
--		unsigned long flags;
+-void __init pcibios_update_irq(struct pci_dev *dev, int irq)
+-{
+-	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+-}
+diff --git a/arch/sh64/kernel/process.c b/arch/sh64/kernel/process.c
+deleted file mode 100644
+index 0761af4..0000000
+--- a/arch/sh64/kernel/process.c
++++ /dev/null
+@@ -1,691 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/process.c
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003  Paul Mundt
+- * Copyright (C) 2003, 2004 Richard Curnow
+- *
+- * Started from SH3/4 version:
+- *   Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+- *
+- *   In turn started from i386 version:
+- *     Copyright (C) 1995  Linus Torvalds
+- *
+- */
 -
--		local_irq_save(flags);
--		cpu_context(cpu, mm) = NO_CONTEXT;
--		if (mm == current->mm)
--			activate_context(mm, cpu);
--		local_irq_restore(flags);
+-/*
+- * This file handles the architecture-dependent parts of process handling..
+- */
+-#include <linux/mm.h>
+-#include <linux/fs.h>
+-#include <linux/ptrace.h>
+-#include <linux/reboot.h>
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/proc_fs.h>
+-#include <asm/uaccess.h>
+-#include <asm/pgtable.h>
+-
+-struct task_struct *last_task_used_math = NULL;
+-
+-static int hlt_counter = 1;
+-
+-#define HARD_IDLE_TIMEOUT (HZ / 3)
+-
+-void disable_hlt(void)
+-{
+-	hlt_counter++;
+-}
+-
+-void enable_hlt(void)
+-{
+-	hlt_counter--;
+-}
+-
+-static int __init nohlt_setup(char *__unused)
+-{
+-	hlt_counter = 1;
+-	return 1;
+-}
+-
+-static int __init hlt_setup(char *__unused)
+-{
+-	hlt_counter = 0;
+-	return 1;
+-}
+-
+-__setup("nohlt", nohlt_setup);
+-__setup("hlt", hlt_setup);
+-
+-static inline void hlt(void)
+-{
+-	__asm__ __volatile__ ("sleep" : : : "memory");
+-}
+-
+-/*
+- * The idle loop on a uniprocessor SH..
+- */
+-void cpu_idle(void)
+-{
+-	/* endless idle loop with no priority at all */
+-	while (1) {
+-		if (hlt_counter) {
+-			while (!need_resched())
+-				cpu_relax();
+-		} else {
+-			local_irq_disable();
+-			while (!need_resched()) {
+-				local_irq_enable();
+-				hlt();
+-				local_irq_disable();
+-			}
+-			local_irq_enable();
+-		}
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
 -	}
+-
 -}
 -
--void local_flush_tlb_all(void)
+-void machine_restart(char * __unused)
 -{
--	unsigned long flags, status;
+-	extern void phys_stext(void);
+-
+-	phys_stext();
+-}
+-
+-void machine_halt(void)
+-{
+-	for (;;);
+-}
+-
+-void machine_power_off(void)
+-{
+-	extern void enter_deep_standby(void);
+-
+-	enter_deep_standby();
+-}
+-
+-void (*pm_power_off)(void) = machine_power_off;
+-EXPORT_SYMBOL(pm_power_off);
+-
+-void show_regs(struct pt_regs * regs)
+-{
+-	unsigned long long ah, al, bh, bl, ch, cl;
+-
+-	printk("\n");
+-
+-	ah = (regs->pc) >> 32;
+-	al = (regs->pc) & 0xffffffff;
+-	bh = (regs->regs[18]) >> 32;
+-	bl = (regs->regs[18]) & 0xffffffff;
+-	ch = (regs->regs[15]) >> 32;
+-	cl = (regs->regs[15]) & 0xffffffff;
+-	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->sr) >> 32;
+-	al = (regs->sr) & 0xffffffff;
+-        asm volatile ("getcon   " __TEA ", %0" : "=r" (bh));
+-        asm volatile ("getcon   " __TEA ", %0" : "=r" (bl));
+-	bh = (bh) >> 32;
+-	bl = (bl) & 0xffffffff;
+-        asm volatile ("getcon   " __KCR0 ", %0" : "=r" (ch));
+-        asm volatile ("getcon   " __KCR0 ", %0" : "=r" (cl));
+-	ch = (ch) >> 32;
+-	cl = (cl) & 0xffffffff;
+-	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[0]) >> 32;
+-	al = (regs->regs[0]) & 0xffffffff;
+-	bh = (regs->regs[1]) >> 32;
+-	bl = (regs->regs[1]) & 0xffffffff;
+-	ch = (regs->regs[2]) >> 32;
+-	cl = (regs->regs[2]) & 0xffffffff;
+-	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[3]) >> 32;
+-	al = (regs->regs[3]) & 0xffffffff;
+-	bh = (regs->regs[4]) >> 32;
+-	bl = (regs->regs[4]) & 0xffffffff;
+-	ch = (regs->regs[5]) >> 32;
+-	cl = (regs->regs[5]) & 0xffffffff;
+-	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[6]) >> 32;
+-	al = (regs->regs[6]) & 0xffffffff;
+-	bh = (regs->regs[7]) >> 32;
+-	bl = (regs->regs[7]) & 0xffffffff;
+-	ch = (regs->regs[8]) >> 32;
+-	cl = (regs->regs[8]) & 0xffffffff;
+-	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[9]) >> 32;
+-	al = (regs->regs[9]) & 0xffffffff;
+-	bh = (regs->regs[10]) >> 32;
+-	bl = (regs->regs[10]) & 0xffffffff;
+-	ch = (regs->regs[11]) >> 32;
+-	cl = (regs->regs[11]) & 0xffffffff;
+-	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[12]) >> 32;
+-	al = (regs->regs[12]) & 0xffffffff;
+-	bh = (regs->regs[13]) >> 32;
+-	bl = (regs->regs[13]) & 0xffffffff;
+-	ch = (regs->regs[14]) >> 32;
+-	cl = (regs->regs[14]) & 0xffffffff;
+-	printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[16]) >> 32;
+-	al = (regs->regs[16]) & 0xffffffff;
+-	bh = (regs->regs[17]) >> 32;
+-	bl = (regs->regs[17]) & 0xffffffff;
+-	ch = (regs->regs[19]) >> 32;
+-	cl = (regs->regs[19]) & 0xffffffff;
+-	printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[20]) >> 32;
+-	al = (regs->regs[20]) & 0xffffffff;
+-	bh = (regs->regs[21]) >> 32;
+-	bl = (regs->regs[21]) & 0xffffffff;
+-	ch = (regs->regs[22]) >> 32;
+-	cl = (regs->regs[22]) & 0xffffffff;
+-	printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[23]) >> 32;
+-	al = (regs->regs[23]) & 0xffffffff;
+-	bh = (regs->regs[24]) >> 32;
+-	bl = (regs->regs[24]) & 0xffffffff;
+-	ch = (regs->regs[25]) >> 32;
+-	cl = (regs->regs[25]) & 0xffffffff;
+-	printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[26]) >> 32;
+-	al = (regs->regs[26]) & 0xffffffff;
+-	bh = (regs->regs[27]) >> 32;
+-	bl = (regs->regs[27]) & 0xffffffff;
+-	ch = (regs->regs[28]) >> 32;
+-	cl = (regs->regs[28]) & 0xffffffff;
+-	printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[29]) >> 32;
+-	al = (regs->regs[29]) & 0xffffffff;
+-	bh = (regs->regs[30]) >> 32;
+-	bl = (regs->regs[30]) & 0xffffffff;
+-	ch = (regs->regs[31]) >> 32;
+-	cl = (regs->regs[31]) & 0xffffffff;
+-	printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[32]) >> 32;
+-	al = (regs->regs[32]) & 0xffffffff;
+-	bh = (regs->regs[33]) >> 32;
+-	bl = (regs->regs[33]) & 0xffffffff;
+-	ch = (regs->regs[34]) >> 32;
+-	cl = (regs->regs[34]) & 0xffffffff;
+-	printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[35]) >> 32;
+-	al = (regs->regs[35]) & 0xffffffff;
+-	bh = (regs->regs[36]) >> 32;
+-	bl = (regs->regs[36]) & 0xffffffff;
+-	ch = (regs->regs[37]) >> 32;
+-	cl = (regs->regs[37]) & 0xffffffff;
+-	printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[38]) >> 32;
+-	al = (regs->regs[38]) & 0xffffffff;
+-	bh = (regs->regs[39]) >> 32;
+-	bl = (regs->regs[39]) & 0xffffffff;
+-	ch = (regs->regs[40]) >> 32;
+-	cl = (regs->regs[40]) & 0xffffffff;
+-	printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[41]) >> 32;
+-	al = (regs->regs[41]) & 0xffffffff;
+-	bh = (regs->regs[42]) >> 32;
+-	bl = (regs->regs[42]) & 0xffffffff;
+-	ch = (regs->regs[43]) >> 32;
+-	cl = (regs->regs[43]) & 0xffffffff;
+-	printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[44]) >> 32;
+-	al = (regs->regs[44]) & 0xffffffff;
+-	bh = (regs->regs[45]) >> 32;
+-	bl = (regs->regs[45]) & 0xffffffff;
+-	ch = (regs->regs[46]) >> 32;
+-	cl = (regs->regs[46]) & 0xffffffff;
+-	printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[47]) >> 32;
+-	al = (regs->regs[47]) & 0xffffffff;
+-	bh = (regs->regs[48]) >> 32;
+-	bl = (regs->regs[48]) & 0xffffffff;
+-	ch = (regs->regs[49]) >> 32;
+-	cl = (regs->regs[49]) & 0xffffffff;
+-	printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[50]) >> 32;
+-	al = (regs->regs[50]) & 0xffffffff;
+-	bh = (regs->regs[51]) >> 32;
+-	bl = (regs->regs[51]) & 0xffffffff;
+-	ch = (regs->regs[52]) >> 32;
+-	cl = (regs->regs[52]) & 0xffffffff;
+-	printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[53]) >> 32;
+-	al = (regs->regs[53]) & 0xffffffff;
+-	bh = (regs->regs[54]) >> 32;
+-	bl = (regs->regs[54]) & 0xffffffff;
+-	ch = (regs->regs[55]) >> 32;
+-	cl = (regs->regs[55]) & 0xffffffff;
+-	printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[56]) >> 32;
+-	al = (regs->regs[56]) & 0xffffffff;
+-	bh = (regs->regs[57]) >> 32;
+-	bl = (regs->regs[57]) & 0xffffffff;
+-	ch = (regs->regs[58]) >> 32;
+-	cl = (regs->regs[58]) & 0xffffffff;
+-	printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[59]) >> 32;
+-	al = (regs->regs[59]) & 0xffffffff;
+-	bh = (regs->regs[60]) >> 32;
+-	bl = (regs->regs[60]) & 0xffffffff;
+-	ch = (regs->regs[61]) >> 32;
+-	cl = (regs->regs[61]) & 0xffffffff;
+-	printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->regs[62]) >> 32;
+-	al = (regs->regs[62]) & 0xffffffff;
+-	bh = (regs->tregs[0]) >> 32;
+-	bl = (regs->tregs[0]) & 0xffffffff;
+-	ch = (regs->tregs[1]) >> 32;
+-	cl = (regs->tregs[1]) & 0xffffffff;
+-	printk("R62 : %08Lx%08Lx T0  : %08Lx%08Lx T1  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->tregs[2]) >> 32;
+-	al = (regs->tregs[2]) & 0xffffffff;
+-	bh = (regs->tregs[3]) >> 32;
+-	bl = (regs->tregs[3]) & 0xffffffff;
+-	ch = (regs->tregs[4]) >> 32;
+-	cl = (regs->tregs[4]) & 0xffffffff;
+-	printk("T2  : %08Lx%08Lx T3  : %08Lx%08Lx T4  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-
+-	ah = (regs->tregs[5]) >> 32;
+-	al = (regs->tregs[5]) & 0xffffffff;
+-	bh = (regs->tregs[6]) >> 32;
+-	bl = (regs->tregs[6]) & 0xffffffff;
+-	ch = (regs->tregs[7]) >> 32;
+-	cl = (regs->tregs[7]) & 0xffffffff;
+-	printk("T5  : %08Lx%08Lx T6  : %08Lx%08Lx T7  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
 -
 -	/*
--	 * Flush all the TLB.
--	 *
--	 * Write to the MMU control register's bit:
--	 *	TF-bit for SH-3, TI-bit for SH-4.
--	 *      It's same position, bit #2.
+-	 * If we're in kernel mode, dump the stack too..
 -	 */
--	local_irq_save(flags);
--	status = ctrl_inl(MMUCR);
--	status |= 0x04;
--	ctrl_outl(status, MMUCR);
--	ctrl_barrier();
--	local_irq_restore(flags);
+-	if (!user_mode(regs)) {
+-		void show_stack(struct task_struct *tsk, unsigned long *sp);
+-		unsigned long sp = regs->regs[15] & 0xffffffff;
+-		struct task_struct *tsk = get_current();
+-
+-		tsk->thread.kregs = regs;
+-
+-		show_stack(tsk, (unsigned long *)sp);
+-	}
 -}
-diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c
-index 1ccca7c..15111bc 100644
---- a/arch/sh/mm/tlb-nommu.c
-+++ b/arch/sh/mm/tlb-nommu.c
-@@ -9,6 +9,7 @@
-  */
- #include <linux/kernel.h>
- #include <linux/mm.h>
-+#include <asm/pgtable.h>
- 
- /*
-  * Nothing too terribly exciting here ..
-@@ -49,3 +50,12 @@ void update_mmu_cache(struct vm_area_struct * vma,
- {
- 	BUG();
- }
-+
-+void __init page_table_range_init(unsigned long start, unsigned long end,
-+				  pgd_t *pgd_base)
-+{
-+}
-+
-+void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-+{
-+}
-diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
-index 2d1dd60..f0c7b73 100644
---- a/arch/sh/mm/tlb-sh4.c
-+++ b/arch/sh/mm/tlb-sh4.c
-@@ -79,7 +79,8 @@ void update_mmu_cache(struct vm_area_struct * vma,
- 	local_irq_restore(flags);
- }
- 
--void local_flush_tlb_one(unsigned long asid, unsigned long page)
-+void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
-+						 unsigned long page)
- {
- 	unsigned long addr, data;
- 
-@@ -91,7 +92,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
- 	 */
- 	addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
- 	data = page | asid; /* VALID bit is off */
--	jump_to_P2();
-+	jump_to_uncached();
- 	ctrl_outl(data, addr);
--	back_to_P1();
-+	back_to_cached();
- }
-diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
-new file mode 100644
-index 0000000..f34274a
---- /dev/null
-+++ b/arch/sh/mm/tlb-sh5.c
-@@ -0,0 +1,164 @@
-+/*
-+ * arch/sh/mm/tlb-sh5.c
-+ *
-+ * Copyright (C) 2003  Paul Mundt <lethal at linux-sh.org>
-+ * Copyright (C) 2003  Richard Curnow <richard.curnow at superh.com>
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <asm/page.h>
-+#include <asm/tlb.h>
-+#include <asm/mmu_context.h>
-+
-+/**
-+ * sh64_tlb_init
-+ *
-+ * Perform initial setup for the DTLB and ITLB.
-+ */
-+int __init sh64_tlb_init(void)
-+{
-+	/* Assign some sane DTLB defaults */
-+	cpu_data->dtlb.entries	= 64;
-+	cpu_data->dtlb.step	= 0x10;
-+
-+	cpu_data->dtlb.first	= DTLB_FIXED | cpu_data->dtlb.step;
-+	cpu_data->dtlb.next	= cpu_data->dtlb.first;
-+
-+	cpu_data->dtlb.last	= DTLB_FIXED |
-+				  ((cpu_data->dtlb.entries - 1) *
-+				   cpu_data->dtlb.step);
-+
-+	/* And again for the ITLB */
-+	cpu_data->itlb.entries	= 64;
-+	cpu_data->itlb.step	= 0x10;
-+
-+	cpu_data->itlb.first	= ITLB_FIXED | cpu_data->itlb.step;
-+	cpu_data->itlb.next	= cpu_data->itlb.first;
-+	cpu_data->itlb.last	= ITLB_FIXED |
-+				  ((cpu_data->itlb.entries - 1) *
-+				   cpu_data->itlb.step);
-+
-+	return 0;
-+}
-+
-+/**
-+ * sh64_next_free_dtlb_entry
-+ *
-+ * Find the next available DTLB entry
-+ */
-+unsigned long long sh64_next_free_dtlb_entry(void)
-+{
-+	return cpu_data->dtlb.next;
-+}
-+
-+/**
-+ * sh64_get_wired_dtlb_entry
-+ *
-+ * Allocate a wired (locked-in) entry in the DTLB
-+ */
-+unsigned long long sh64_get_wired_dtlb_entry(void)
-+{
-+	unsigned long long entry = sh64_next_free_dtlb_entry();
-+
-+	cpu_data->dtlb.first += cpu_data->dtlb.step;
-+	cpu_data->dtlb.next  += cpu_data->dtlb.step;
-+
-+	return entry;
-+}
-+
-+/**
-+ * sh64_put_wired_dtlb_entry
-+ *
-+ * @entry:	Address of TLB slot.
-+ *
-+ * Free a wired (locked-in) entry in the DTLB.
-+ *
-+ * Works like a stack, last one to allocate must be first one to free.
-+ */
-+int sh64_put_wired_dtlb_entry(unsigned long long entry)
-+{
-+	__flush_tlb_slot(entry);
-+
-+	/*
-+	 * We don't do any particularly useful tracking of wired entries,
-+	 * so this approach works like a stack .. last one to be allocated
-+	 * has to be the first one to be freed.
-+	 *
-+	 * We could potentially load wired entries into a list and work on
-+	 * rebalancing the list periodically (which also entails moving the
-+	 * contents of a TLB entry) .. though I have a feeling that this is
-+	 * more trouble than it's worth.
-+	 */
-+
-+	/*
-+	 * Entry must be valid .. we don't want any ITLB addresses!
-+	 */
-+	if (entry <= DTLB_FIXED)
-+		return -EINVAL;
-+
-+	/*
-+	 * Next, check if we're within range to be freed. (ie, must be the
-+	 * entry beneath the first 'free' entry!
-+	 */
-+	if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
-+		return -EINVAL;
-+
-+	/* If we are, then bring this entry back into the list */
-+	cpu_data->dtlb.first	-= cpu_data->dtlb.step;
-+	cpu_data->dtlb.next	= entry;
-+
-+	return 0;
-+}
-+
-+/**
-+ * sh64_setup_tlb_slot
-+ *
-+ * @config_addr:	Address of TLB slot.
-+ * @eaddr:		Virtual address.
-+ * @asid:		Address Space Identifier.
-+ * @paddr:		Physical address.
-+ *
-+ * Load up a virtual<->physical translation for @eaddr<->@paddr in the
-+ * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
-+ */
-+inline void sh64_setup_tlb_slot(unsigned long long config_addr,
-+				unsigned long eaddr,
-+				unsigned long asid,
-+				unsigned long paddr)
-+{
-+	unsigned long long pteh, ptel;
-+
-+	/* Sign extension */
-+#if (NEFF == 32)
-+	pteh = (unsigned long long)(signed long long)(signed long) eaddr;
-+#else
-+#error "Can't sign extend more than 32 bits yet"
-+#endif
-+	pteh &= PAGE_MASK;
-+	pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
-+#if (NEFF == 32)
-+	ptel = (unsigned long long)(signed long long)(signed long) paddr;
-+#else
-+#error "Can't sign extend more than 32 bits yet"
-+#endif
-+	ptel &= PAGE_MASK;
-+	ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
-+
-+	asm volatile("putcfg %0, 1, %1\n\t"
-+			"putcfg %0, 0, %2\n"
-+			: : "r" (config_addr), "r" (ptel), "r" (pteh));
-+}
-+
-+/**
-+ * sh64_teardown_tlb_slot
-+ *
-+ * @config_addr:	Address of TLB slot.
-+ *
-+ * Teardown any existing mapping in the TLB slot @config_addr.
-+ */
-+inline void sh64_teardown_tlb_slot(unsigned long long config_addr)
-+	__attribute__ ((alias("__flush_tlb_slot")));
-diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
-new file mode 100644
-index 0000000..6f45c1f
---- /dev/null
-+++ b/arch/sh/mm/tlbflush_32.c
-@@ -0,0 +1,140 @@
-+/*
-+ * TLB flushing operations for SH with an MMU.
-+ *
-+ *  Copyright (C) 1999  Niibe Yutaka
-+ *  Copyright (C) 2003  Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/mm.h>
-+#include <asm/mmu_context.h>
-+#include <asm/tlbflush.h>
-+
-+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-+{
-+	unsigned int cpu = smp_processor_id();
-+
-+	if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
-+		unsigned long flags;
-+		unsigned long asid;
-+		unsigned long saved_asid = MMU_NO_ASID;
-+
-+		asid = cpu_asid(cpu, vma->vm_mm);
-+		page &= PAGE_MASK;
-+
-+		local_irq_save(flags);
-+		if (vma->vm_mm != current->mm) {
-+			saved_asid = get_asid();
-+			set_asid(asid);
-+		}
-+		local_flush_tlb_one(asid, page);
-+		if (saved_asid != MMU_NO_ASID)
-+			set_asid(saved_asid);
-+		local_irq_restore(flags);
-+	}
-+}
-+
-+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-+			   unsigned long end)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+	unsigned int cpu = smp_processor_id();
-+
-+	if (cpu_context(cpu, mm) != NO_CONTEXT) {
-+		unsigned long flags;
-+		int size;
-+
-+		local_irq_save(flags);
-+		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
-+		if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
-+			cpu_context(cpu, mm) = NO_CONTEXT;
-+			if (mm == current->mm)
-+				activate_context(mm, cpu);
-+		} else {
-+			unsigned long asid;
-+			unsigned long saved_asid = MMU_NO_ASID;
-+
-+			asid = cpu_asid(cpu, mm);
-+			start &= PAGE_MASK;
-+			end += (PAGE_SIZE - 1);
-+			end &= PAGE_MASK;
-+			if (mm != current->mm) {
-+				saved_asid = get_asid();
-+				set_asid(asid);
-+			}
-+			while (start < end) {
-+				local_flush_tlb_one(asid, start);
-+				start += PAGE_SIZE;
-+			}
-+			if (saved_asid != MMU_NO_ASID)
-+				set_asid(saved_asid);
-+		}
-+		local_irq_restore(flags);
-+	}
-+}
-+
-+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-+{
-+	unsigned int cpu = smp_processor_id();
-+	unsigned long flags;
-+	int size;
-+
-+	local_irq_save(flags);
-+	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
-+	if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
-+		local_flush_tlb_all();
-+	} else {
-+		unsigned long asid;
-+		unsigned long saved_asid = get_asid();
-+
-+		asid = cpu_asid(cpu, &init_mm);
-+		start &= PAGE_MASK;
-+		end += (PAGE_SIZE - 1);
-+		end &= PAGE_MASK;
-+		set_asid(asid);
-+		while (start < end) {
-+			local_flush_tlb_one(asid, start);
-+			start += PAGE_SIZE;
-+		}
-+		set_asid(saved_asid);
-+	}
-+	local_irq_restore(flags);
-+}
-+
-+void local_flush_tlb_mm(struct mm_struct *mm)
-+{
-+	unsigned int cpu = smp_processor_id();
-+
-+	/* Invalidate all TLB of this process. */
-+	/* Instead of invalidating each TLB, we get new MMU context. */
-+	if (cpu_context(cpu, mm) != NO_CONTEXT) {
-+		unsigned long flags;
-+
-+		local_irq_save(flags);
-+		cpu_context(cpu, mm) = NO_CONTEXT;
-+		if (mm == current->mm)
-+			activate_context(mm, cpu);
-+		local_irq_restore(flags);
-+	}
-+}
-+
-+void local_flush_tlb_all(void)
-+{
-+	unsigned long flags, status;
-+
-+	/*
-+	 * Flush all the TLB.
-+	 *
-+	 * Write to the MMU control register's bit:
-+	 *	TF-bit for SH-3, TI-bit for SH-4.
-+	 *      It's same position, bit #2.
-+	 */
-+	local_irq_save(flags);
-+	status = ctrl_inl(MMUCR);
-+	status |= 0x04;
-+	ctrl_outl(status, MMUCR);
-+	ctrl_barrier();
-+	local_irq_restore(flags);
-+}
-diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
-new file mode 100644
-index 0000000..2a98c9e
---- /dev/null
-+++ b/arch/sh/mm/tlbflush_64.c
-@@ -0,0 +1,475 @@
-+/*
-+ * arch/sh/mm/tlb-flush_64.c
-+ *
-+ * Copyright (C) 2000, 2001  Paolo Alberelli
-+ * Copyright (C) 2003  Richard Curnow (/proc/tlb, bug fixes)
-+ * Copyright (C) 2003  Paul Mundt
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ */
-+#include <linux/signal.h>
-+#include <linux/rwsem.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/interrupt.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/tlb.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgalloc.h>
-+#include <asm/mmu_context.h>
-+
-+extern void die(const char *,struct pt_regs *,long);
-+
-+#define PFLAG(val,flag)   (( (val) & (flag) ) ? #flag : "" )
-+#define PPROT(flag) PFLAG(pgprot_val(prot),flag)
-+
-+static inline void print_prots(pgprot_t prot)
-+{
-+	printk("prot is 0x%08lx\n",pgprot_val(prot));
-+
-+	printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
-+	       PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
-+}
-+
-+static inline void print_vma(struct vm_area_struct *vma)
-+{
-+	printk("vma start 0x%08lx\n", vma->vm_start);
-+	printk("vma end   0x%08lx\n", vma->vm_end);
-+
-+	print_prots(vma->vm_page_prot);
-+	printk("vm_flags 0x%08lx\n", vma->vm_flags);
-+}
-+
-+static inline void print_task(struct task_struct *tsk)
-+{
-+	printk("Task pid %d\n", task_pid_nr(tsk));
-+}
-+
-+static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
-+{
-+	pgd_t *dir;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	pte_t entry;
-+
-+	dir = pgd_offset(mm, address);
-+	if (pgd_none(*dir))
-+		return NULL;
-+
-+	pud = pud_offset(dir, address);
-+	if (pud_none(*pud))
-+		return NULL;
-+
-+	pmd = pmd_offset(pud, address);
-+	if (pmd_none(*pmd))
-+		return NULL;
-+
-+	pte = pte_offset_kernel(pmd, address);
-+	entry = *pte;
-+	if (pte_none(entry) || !pte_present(entry))
-+		return NULL;
-+
-+	return pte;
-+}
-+
-+/*
-+ * This routine handles page faults.  It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ */
-+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
-+			      unsigned long textaccess, unsigned long address)
-+{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	struct vm_area_struct * vma;
-+	const struct exception_table_entry *fixup;
-+	pte_t *pte;
-+	int fault;
-+
-+	/* SIM
-+	 * Note this is now called with interrupts still disabled
-+	 * This is to cope with being called for a missing IO port
-+	 * address with interrupts disabled. This should be fixed as
-+	 * soon as we have a better 'fast path' miss handler.
-+	 *
-+	 * Plus take care how you try and debug this stuff.
-+	 * For example, writing debug data to a port which you
-+	 * have just faulted on is not going to work.
-+	 */
-+
-+	tsk = current;
-+	mm = tsk->mm;
-+
-+	/* Not an IO address, so reenable interrupts */
-+	local_irq_enable();
-+
-+	/*
-+	 * If we're in an interrupt or have no user
-+	 * context, we must not take the fault..
-+	 */
-+	if (in_atomic() || !mm)
-+		goto no_context;
-+
-+	/* TLB misses upon some cache flushes get done under cli() */
-+	down_read(&mm->mmap_sem);
-+
-+	vma = find_vma(mm, address);
-+
-+	if (!vma) {
-+#ifdef DEBUG_FAULT
-+		print_task(tsk);
-+		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
-+		       __FUNCTION__,__LINE__,
-+		       address,regs->pc,textaccess,writeaccess);
-+		show_regs(regs);
-+#endif
-+		goto bad_area;
-+	}
-+	if (vma->vm_start <= address) {
-+		goto good_area;
-+	}
-+
-+	if (!(vma->vm_flags & VM_GROWSDOWN)) {
-+#ifdef DEBUG_FAULT
-+		print_task(tsk);
-+		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
-+		       __FUNCTION__,__LINE__,
-+		       address,regs->pc,textaccess,writeaccess);
-+		show_regs(regs);
-+
-+		print_vma(vma);
-+#endif
-+		goto bad_area;
-+	}
-+	if (expand_stack(vma, address)) {
-+#ifdef DEBUG_FAULT
-+		print_task(tsk);
-+		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
-+		       __FUNCTION__,__LINE__,
-+		       address,regs->pc,textaccess,writeaccess);
-+		show_regs(regs);
-+#endif
-+		goto bad_area;
-+	}
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+	if (textaccess) {
-+		if (!(vma->vm_flags & VM_EXEC))
-+			goto bad_area;
-+	} else {
-+		if (writeaccess) {
-+			if (!(vma->vm_flags & VM_WRITE))
-+				goto bad_area;
-+		} else {
-+			if (!(vma->vm_flags & VM_READ))
-+				goto bad_area;
-+		}
-+	}
-+
-+	/*
-+	 * If for any reason at all we couldn't handle the fault,
-+	 * make sure we exit gracefully rather than endlessly redo
-+	 * the fault.
-+	 */
-+survive:
-+	fault = handle_mm_fault(mm, vma, address, writeaccess);
-+	if (unlikely(fault & VM_FAULT_ERROR)) {
-+		if (fault & VM_FAULT_OOM)
-+			goto out_of_memory;
-+		else if (fault & VM_FAULT_SIGBUS)
-+			goto do_sigbus;
-+		BUG();
-+	}
-+	if (fault & VM_FAULT_MAJOR)
-+		tsk->maj_flt++;
-+	else
-+		tsk->min_flt++;
-+
-+	/* If we get here, the page fault has been handled.  Do the TLB refill
-+	   now from the newly-setup PTE, to avoid having to fault again right
-+	   away on the same instruction. */
-+	pte = lookup_pte (mm, address);
-+	if (!pte) {
-+		/* From empirical evidence, we can get here, due to
-+		   !pte_present(pte).  (e.g. if a swap-in occurs, and the page
-+		   is swapped back out again before the process that wanted it
-+		   gets rescheduled?) */
-+		goto no_pte;
-+	}
-+
-+	__do_tlb_refill(address, textaccess, pte);
-+
-+no_pte:
-+
-+	up_read(&mm->mmap_sem);
-+	return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+#ifdef DEBUG_FAULT
-+	printk("fault:bad area\n");
-+#endif
-+	up_read(&mm->mmap_sem);
-+
-+	if (user_mode(regs)) {
-+		static int count=0;
-+		siginfo_t info;
-+		if (count < 4) {
-+			/* This is really to help debug faults when starting
-+			 * usermode, so only need a few */
-+			count++;
-+			printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
-+				address, task_pid_nr(current), current->comm,
-+				(unsigned long) regs->pc);
-+#if 0
-+			show_regs(regs);
-+#endif
-+		}
-+		if (is_global_init(tsk)) {
-+			panic("INIT had user mode bad_area\n");
-+		}
-+		tsk->thread.address = address;
-+		tsk->thread.error_code = writeaccess;
-+		info.si_signo = SIGSEGV;
-+		info.si_errno = 0;
-+		info.si_addr = (void *) address;
-+		force_sig_info(SIGSEGV, &info, tsk);
-+		return;
-+	}
-+
-+no_context:
-+#ifdef DEBUG_FAULT
-+	printk("fault:No context\n");
-+#endif
-+	/* Are we prepared to handle this kernel fault?  */
-+	fixup = search_exception_tables(regs->pc);
-+	if (fixup) {
-+		regs->pc = fixup->fixup;
-+		return;
-+	}
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ *
-+ */
-+	if (address < PAGE_SIZE)
-+		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-+	else
-+		printk(KERN_ALERT "Unable to handle kernel paging request");
-+	printk(" at virtual address %08lx\n", address);
-+	printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
-+	die("Oops", regs, writeaccess);
-+	do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+	if (is_global_init(current)) {
-+		panic("INIT out of memory\n");
-+		yield();
-+		goto survive;
-+	}
-+	printk("fault:Out of memory\n");
-+	up_read(&mm->mmap_sem);
-+	if (is_global_init(current)) {
-+		yield();
-+		down_read(&mm->mmap_sem);
-+		goto survive;
-+	}
-+	printk("VM: killing process %s\n", tsk->comm);
-+	if (user_mode(regs))
-+		do_group_exit(SIGKILL);
-+	goto no_context;
-+
-+do_sigbus:
-+	printk("fault:Do sigbus\n");
-+	up_read(&mm->mmap_sem);
-+
-+	/*
-+	 * Send a sigbus, regardless of whether we were in kernel
-+	 * or user mode.
-+	 */
-+	tsk->thread.address = address;
-+	tsk->thread.error_code = writeaccess;
-+	tsk->thread.trap_no = 14;
-+	force_sig(SIGBUS, tsk);
-+
-+	/* Kernel mode? Handle exceptions or die */
-+	if (!user_mode(regs))
-+		goto no_context;
-+}
-+
-+void update_mmu_cache(struct vm_area_struct * vma,
-+			unsigned long address, pte_t pte)
-+{
-+	/*
-+	 * This appears to get called once for every pte entry that gets
-+	 * established => I don't think it's efficient to try refilling the
-+	 * TLBs with the pages - some may not get accessed even.  Also, for
-+	 * executable pages, it is impossible to determine reliably here which
-+	 * TLB they should be mapped into (or both even).
-+	 *
-+	 * So, just do nothing here and handle faults on demand.  In the
-+	 * TLBMISS handling case, the refill is now done anyway after the pte
-+	 * has been fixed up, so that deals with most useful cases.
-+	 */
-+}
-+
-+void local_flush_tlb_one(unsigned long asid, unsigned long page)
-+{
-+	unsigned long long match, pteh=0, lpage;
-+	unsigned long tlb;
-+
-+	/*
-+	 * Sign-extend based on neff.
-+	 */
-+	lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
-+	match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
-+	match |= lpage;
-+
-+	for_each_itlb_entry(tlb) {
-+		asm volatile ("getcfg	%1, 0, %0"
-+			      : "=r" (pteh)
-+			      : "r" (tlb) );
-+
-+		if (pteh == match) {
-+			__flush_tlb_slot(tlb);
-+			break;
-+		}
-+	}
-+
-+	for_each_dtlb_entry(tlb) {
-+		asm volatile ("getcfg	%1, 0, %0"
-+			      : "=r" (pteh)
-+			      : "r" (tlb) );
-+
-+		if (pteh == match) {
-+			__flush_tlb_slot(tlb);
-+			break;
-+		}
-+
-+	}
-+}
-+
-+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-+{
-+	unsigned long flags;
-+
-+	if (vma->vm_mm) {
-+		page &= PAGE_MASK;
-+		local_irq_save(flags);
-+		local_flush_tlb_one(get_asid(), page);
-+		local_irq_restore(flags);
-+	}
-+}
-+
-+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-+			   unsigned long end)
-+{
-+	unsigned long flags;
-+	unsigned long long match, pteh=0, pteh_epn, pteh_low;
-+	unsigned long tlb;
-+	unsigned int cpu = smp_processor_id();
-+	struct mm_struct *mm;
-+
-+	mm = vma->vm_mm;
-+	if (cpu_context(cpu, mm) == NO_CONTEXT)
-+		return;
-+
-+	local_irq_save(flags);
-+
-+	start &= PAGE_MASK;
-+	end &= PAGE_MASK;
-+
-+	match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
-+
-+	/* Flush ITLB */
-+	for_each_itlb_entry(tlb) {
-+		asm volatile ("getcfg	%1, 0, %0"
-+			      : "=r" (pteh)
-+			      : "r" (tlb) );
-+
-+		pteh_epn = pteh & PAGE_MASK;
-+		pteh_low = pteh & ~PAGE_MASK;
-+
-+		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
-+			__flush_tlb_slot(tlb);
-+	}
-+
-+	/* Flush DTLB */
-+	for_each_dtlb_entry(tlb) {
-+		asm volatile ("getcfg	%1, 0, %0"
-+			      : "=r" (pteh)
-+			      : "r" (tlb) );
-+
-+		pteh_epn = pteh & PAGE_MASK;
-+		pteh_low = pteh & ~PAGE_MASK;
-+
-+		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
-+			__flush_tlb_slot(tlb);
-+	}
-+
-+	local_irq_restore(flags);
-+}
-+
-+void local_flush_tlb_mm(struct mm_struct *mm)
-+{
-+	unsigned long flags;
-+	unsigned int cpu = smp_processor_id();
-+
-+	if (cpu_context(cpu, mm) == NO_CONTEXT)
-+		return;
-+
-+	local_irq_save(flags);
-+
-+	cpu_context(cpu, mm) = NO_CONTEXT;
-+	if (mm == current->mm)
-+		activate_context(mm, cpu);
-+
-+	local_irq_restore(flags);
-+}
-+
-+void local_flush_tlb_all(void)
-+{
-+	/* Invalidate all, including shared pages, excluding fixed TLBs */
-+	unsigned long flags, tlb;
-+
-+	local_irq_save(flags);
-+
-+	/* Flush each ITLB entry */
-+	for_each_itlb_entry(tlb)
-+		__flush_tlb_slot(tlb);
-+
-+	/* Flush each DTLB entry */
-+	for_each_dtlb_entry(tlb)
-+		__flush_tlb_slot(tlb);
-+
-+	local_irq_restore(flags);
-+}
-+
-+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-+{
-+        /* FIXME: Optimize this later.. */
-+        flush_tlb_all();
-+}
-diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
-index ff07169..2581067 100644
---- a/arch/sh/tools/mach-types
-+++ b/arch/sh/tools/mach-types
-@@ -29,7 +29,6 @@ HP6XX			SH_HP6XX
- DREAMCAST		SH_DREAMCAST
- MPC1211			SH_MPC1211
- SNAPGEAR		SH_SECUREEDGE5410
--HS7751RVOIP		SH_HS7751RVOIP
- EDOSK7705		SH_EDOSK7705
- SH4202_MICRODEV		SH_SH4202_MICRODEV
- SH03			SH_SH03
-@@ -45,3 +44,4 @@ X3PROTO			SH_X3PROTO
- MAGICPANELR2		SH_MAGIC_PANEL_R2
- R2D_PLUS		RTS7751R2D_PLUS
- R2D_1			RTS7751R2D_1
-+CAYMAN			SH_CAYMAN
-diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig
-deleted file mode 100644
-index 6884d5a..0000000
---- a/arch/sh64/Kconfig
-+++ /dev/null
-@@ -1,295 +0,0 @@
--#
--# For a description of the syntax of this configuration file,
--# see Documentation/kbuild/kconfig-language.txt.
--#
 -
--mainmenu "Linux/SH64 Kernel Configuration"
+-struct task_struct * alloc_task_struct(void)
+-{
+-	/* Get task descriptor pages */
+-	return (struct task_struct *)
+-		__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
+-}
 -
--config SUPERH
--	bool
--	default y
+-void free_task_struct(struct task_struct *p)
+-{
+-	free_pages((unsigned long) p, get_order(THREAD_SIZE));
+-}
 -
--config SUPERH64
--	bool
--	default y
+-/*
+- * Create a kernel thread
+- */
+-ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
+-{
+-	do_exit(fn(arg));
+-}
 -
--config MMU
--	bool
--	default y
+-/*
+- * This is the mechanism for creating a new kernel thread.
+- *
+- * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+- * who haven't done an "execve()") should use this: it will work within
+- * a system call from a "real" process, but the process memory space will
+- * not be freed until both the parent and the child have exited.
+- */
+-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+-{
+-	struct pt_regs regs;
 -
--config QUICKLIST
--	def_bool y
+-	memset(&regs, 0, sizeof(regs));
+-	regs.regs[2] = (unsigned long)arg;
+-	regs.regs[3] = (unsigned long)fn;
 -
--config RWSEM_GENERIC_SPINLOCK
--	bool
--	default y
+-	regs.pc = (unsigned long)kernel_thread_helper;
+-	regs.sr = (1 << 30);
 -
--config GENERIC_FIND_NEXT_BIT
--	bool
--	default y
+-	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+-		       &regs, 0, NULL, NULL);
+-}
 -
--config GENERIC_HWEIGHT
--	bool
--	default y
+-/*
+- * Free current thread data structures etc..
+- */
+-void exit_thread(void)
+-{
+-	/* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC.
 -
--config GENERIC_CALIBRATE_DELAY
--	bool
--	default y
+-	   The SH-5 FPU save/restore approach relies on last_task_used_math
+-	   pointing to a live task_struct.  When another task tries to use the
+-	   FPU for the 1st time, the FPUDIS trap handling (see
+-	   arch/sh64/kernel/fpu.c) will save the existing FPU state to the
+-	   FP regs field within last_task_used_math before re-loading the new
+-	   task's FPU state (or initialising it if the FPU has been used
+-	   before).  So if last_task_used_math is stale, and its page has already been
+-	   re-allocated for another use, the consequences are rather grim. Unless we
+-	   null it here, there is no other path through which it would get safely
+-	   nulled. */
 -
--config GENERIC_HARDIRQS
--	bool
--	default y
+-#ifdef CONFIG_SH_FPU
+-	if (last_task_used_math == current) {
+-		last_task_used_math = NULL;
+-	}
+-#endif
+-}
 -
--config GENERIC_IRQ_PROBE
--	bool
--	default y
+-void flush_thread(void)
+-{
 -
--config RWSEM_XCHGADD_ALGORITHM
--	bool
+-	/* Called by fs/exec.c (flush_old_exec) to remove traces of a
+-	 * previously running executable. */
+-#ifdef CONFIG_SH_FPU
+-	if (last_task_used_math == current) {
+-		last_task_used_math = NULL;
+-	}
+-	/* Force FPU state to be reinitialised after exec */
+-	clear_used_math();
+-#endif
 -
--config ARCH_HAS_ILOG2_U32
--	bool
--	default n
+-	/* if we are a kernel thread, about to change to user thread,
+-         * update kreg
+-         */
+-	if(current->thread.kregs==&fake_swapper_regs) {
+-          current->thread.kregs =
+-             ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
+-	  current->thread.uregs = current->thread.kregs;
+-	}
+-}
 -
--config ARCH_HAS_ILOG2_U64
--	bool
--	default n
+-void release_thread(struct task_struct *dead_task)
+-{
+-	/* do nothing */
+-}
 -
--config ARCH_NO_VIRT_TO_BUS
--	def_bool y
+-/* Fill in the fpu structure for a core dump.. */
+-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+-{
+-#ifdef CONFIG_SH_FPU
+-	int fpvalid;
+-	struct task_struct *tsk = current;
 -
--source init/Kconfig
+-	fpvalid = !!tsk_used_math(tsk);
+-	if (fpvalid) {
+-		if (current == last_task_used_math) {
+-			grab_fpu();
+-			fpsave(&tsk->thread.fpu.hard);
+-			release_fpu();
+-			last_task_used_math = 0;
+-			regs->sr |= SR_FD;
+-		}
 -
--menu "System type"
+-		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+-	}
 -
--choice
--	prompt "SuperH system type"
--	default SH_SIMULATOR
+-	return fpvalid;
+-#else
+-	return 0; /* Task didn't use the fpu at all. */
+-#endif
+-}
 -
--config SH_SIMULATOR
--	bool "Simulator"
+-asmlinkage void ret_from_fork(void);
 -
--config SH_CAYMAN
--	bool "Cayman"
+-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+-		unsigned long unused,
+-		struct task_struct *p, struct pt_regs *regs)
+-{
+-	struct pt_regs *childregs;
+-	unsigned long long se;			/* Sign extension */
 -
--config SH_HARP
--	bool "ST50-Harp"
+-#ifdef CONFIG_SH_FPU
+-	if(last_task_used_math == current) {
+-		grab_fpu();
+-		fpsave(&current->thread.fpu.hard);
+-		release_fpu();
+-		last_task_used_math = NULL;
+-		regs->sr |= SR_FD;
+-	}
+-#endif
+-	/* Copy from sh version */
+-	childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
 -
--endchoice
+-	*childregs = *regs;
 -
--choice
--	prompt "Processor family"
--	default CPU_SH5
+-	if (user_mode(regs)) {
+-		childregs->regs[15] = usp;
+-		p->thread.uregs = childregs;
+-	} else {
+-		childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+-	}
 -
--config CPU_SH5
--	bool "SH-5"
+-	childregs->regs[9] = 0; /* Set return value for child */
+-	childregs->sr |= SR_FD; /* Invalidate FPU flag */
 -
--endchoice
+-	p->thread.sp = (unsigned long) childregs;
+-	p->thread.pc = (unsigned long) ret_from_fork;
 -
--choice
--	prompt "Processor type"
+-	/*
+-	 * Sign extend the edited stack.
+-         * Note that thread.pc and thread.pc will stay
+-	 * 32-bit wide and context switch must take care
+-	 * of NEFF sign extension.
+-	 */
 -
--config CPU_SUBTYPE_SH5_101
--	bool "SH5-101"
--	depends on CPU_SH5
+-	se = childregs->regs[15];
+-	se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
+-	childregs->regs[15] = se;
 -
--config CPU_SUBTYPE_SH5_103
--	bool "SH5-103"
--	depends on CPU_SH5
+-	return 0;
+-}
 -
--endchoice
+-asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
+-			unsigned long r4, unsigned long r5,
+-			unsigned long r6, unsigned long r7,
+-			struct pt_regs *pregs)
+-{
+-	return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
+-}
 -
--choice
--	prompt "Endianness"
--	default LITTLE_ENDIAN
+-asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+-			 unsigned long r4, unsigned long r5,
+-			 unsigned long r6, unsigned long r7,
+-			 struct pt_regs *pregs)
+-{
+-	if (!newsp)
+-		newsp = pregs->regs[15];
+-	return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
+-}
 -
--config LITTLE_ENDIAN
--	bool "Little-Endian"
+-/*
+- * This is trivial, and on the face of it looks like it
+- * could equally well be done in user mode.
+- *
+- * Not so, for quite unobvious reasons - register pressure.
+- * In user mode vfork() cannot have a stack frame, and if
+- * done by calling the "clone()" system call directly, you
+- * do not have enough call-clobbered registers to hold all
+- * the information you need.
+- */
+-asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
+-			 unsigned long r4, unsigned long r5,
+-			 unsigned long r6, unsigned long r7,
+-			 struct pt_regs *pregs)
+-{
+-	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
+-}
 -
--config BIG_ENDIAN
--	bool "Big-Endian"
+-/*
+- * sys_execve() executes a new program.
+- */
+-asmlinkage int sys_execve(char *ufilename, char **uargv,
+-			  char **uenvp, unsigned long r5,
+-			  unsigned long r6, unsigned long r7,
+-			  struct pt_regs *pregs)
+-{
+-	int error;
+-	char *filename;
 -
--endchoice
+-	lock_kernel();
+-	filename = getname((char __user *)ufilename);
+-	error = PTR_ERR(filename);
+-	if (IS_ERR(filename))
+-		goto out;
 -
--config SH_FPU
--	bool "FPU support"
--	default y
+-	error = do_execve(filename,
+-			  (char __user * __user *)uargv,
+-			  (char __user * __user *)uenvp,
+-			  pregs);
+-	if (error == 0) {
+-		task_lock(current);
+-		current->ptrace &= ~PT_DTRACE;
+-		task_unlock(current);
+-	}
+-	putname(filename);
+-out:
+-	unlock_kernel();
+-	return error;
+-}
 -
--config SH64_FPU_DENORM_FLUSH
--	depends on SH_FPU
--	bool "Flush floating point denorms to zero"
+-/*
+- * These bracket the sleeping functions..
+- */
+-extern void interruptible_sleep_on(wait_queue_head_t *q);
+-
+-#define mid_sched	((unsigned long) interruptible_sleep_on)
+-
+-static int in_sh64_switch_to(unsigned long pc)
+-{
+-	extern char __sh64_switch_to_end;
+-	/* For a sleeping task, the PC is somewhere in the middle of the function,
+-	   so we don't have to worry about masking the LSB off */
+-	return (pc >= (unsigned long) sh64_switch_to) &&
+-	       (pc < (unsigned long) &__sh64_switch_to_end);
+-}
+-
+-unsigned long get_wchan(struct task_struct *p)
+-{
+-	unsigned long schedule_fp;
+-	unsigned long sh64_switch_to_fp;
+-	unsigned long schedule_caller_pc;
+-	unsigned long pc;
+-
+-	if (!p || p == current || p->state == TASK_RUNNING)
+-		return 0;
+-
+-	/*
+-	 * The same comment as on the Alpha applies here, too ...
+-	 */
+-	pc = thread_saved_pc(p);
+-
+-#ifdef CONFIG_FRAME_POINTER
+-	if (in_sh64_switch_to(pc)) {
+-		sh64_switch_to_fp = (long) p->thread.sp;
+-		/* r14 is saved at offset 4 in the sh64_switch_to frame */
+-		schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
+-
+-		/* and the caller of 'schedule' is (currently!) saved at offset 24
+-		   in the frame of schedule (from disasm) */
+-		schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
+-		return schedule_caller_pc;
+-	}
+-#endif
+-	return pc;
+-}
+-
+-/* Provide a /proc/asids file that lists out the
+-   ASIDs currently associated with the processes.  (If the DM.PC register is
+-   examined through the debug link, this shows ASID + PC.  To make use of this,
+-   the PID->ASID relationship needs to be known.  This is primarily for
+-   debugging.)
+-   */
+-
+-#if defined(CONFIG_SH64_PROC_ASIDS)
+-static int
+-asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
+-{
+-	int len=0;
+-	struct task_struct *p;
+-	read_lock(&tasklist_lock);
+-	for_each_process(p) {
+-		int pid = p->pid;
+-		struct mm_struct *mm;
+-		if (!pid) continue;
+-		mm = p->mm;
+-		if (mm) {
+-			unsigned long asid, context;
+-			context = mm->context;
+-			asid = (context & 0xff);
+-			len += sprintf(buf+len, "%5d : %02lx\n", pid, asid);
+-		} else {
+-			len += sprintf(buf+len, "%5d : (none)\n", pid);
+-		}
+-	}
+-	read_unlock(&tasklist_lock);
+-	*eof = 1;
+-	return len;
+-}
+-
+-static int __init register_proc_asids(void)
+-{
+-	create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
+-	return 0;
+-}
+-__initcall(register_proc_asids);
+-#endif
+diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
+deleted file mode 100644
+index 8a2d339..0000000
+--- a/arch/sh64/kernel/ptrace.c
++++ /dev/null
+@@ -1,332 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/ptrace.c
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003  Paul Mundt
+- *
+- * Started from SH3/4 version:
+- *   SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
+- *
+- *   Original x86 implementation:
+- *	By Ross Biro 1/23/92
+- *	edited by Linus Torvalds
+- *
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/smp_lock.h>
+-#include <linux/errno.h>
+-#include <linux/ptrace.h>
+-#include <linux/user.h>
+-#include <linux/signal.h>
+-#include <linux/syscalls.h>
+-
+-#include <asm/io.h>
+-#include <asm/uaccess.h>
+-#include <asm/pgtable.h>
+-#include <asm/system.h>
+-#include <asm/processor.h>
+-#include <asm/mmu_context.h>
+-
+-/* This mask defines the bits of the SR which the user is not allowed to
+-   change, which are everything except S, Q, M, PR, SZ, FR. */
+-#define SR_MASK      (0xffff8cfd)
+-
+-/*
+- * does not yet catch signals sent when the child dies.
+- * in exit.c or in signal.c.
+- */
+-
+-/*
+- * This routine will get a word from the user area in the process kernel stack.
+- */
+-static inline int get_stack_long(struct task_struct *task, int offset)
+-{
+-	unsigned char *stack;
+-
+-	stack = (unsigned char *)(task->thread.uregs);
+-	stack += offset;
+-	return (*((int *)stack));
+-}
+-
+-static inline unsigned long
+-get_fpu_long(struct task_struct *task, unsigned long addr)
+-{
+-	unsigned long tmp;
+-	struct pt_regs *regs;
+-	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
+-
+-	if (!tsk_used_math(task)) {
+-		if (addr == offsetof(struct user_fpu_struct, fpscr)) {
+-			tmp = FPSCR_INIT;
+-		} else {
+-			tmp = 0xffffffffUL; /* matches initial value in fpu.c */
+-		}
+-		return tmp;
+-	}
 -
--choice
--	prompt "Page table levels"
--	default SH64_PGTABLE_2_LEVEL
+-	if (last_task_used_math == task) {
+-		grab_fpu();
+-		fpsave(&task->thread.fpu.hard);
+-		release_fpu();
+-		last_task_used_math = 0;
+-		regs->sr |= SR_FD;
+-	}
 -
--config SH64_PGTABLE_2_LEVEL
--	bool "2"
+-	tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
+-	return tmp;
+-}
 -
--config SH64_PGTABLE_3_LEVEL
--	bool "3"
+-/*
+- * This routine will put a word into the user area in the process kernel stack.
+- */
+-static inline int put_stack_long(struct task_struct *task, int offset,
+-				 unsigned long data)
+-{
+-	unsigned char *stack;
 -
--endchoice
+-	stack = (unsigned char *)(task->thread.uregs);
+-	stack += offset;
+-	*(unsigned long *) stack = data;
+-	return 0;
+-}
 -
--choice
--	prompt "HugeTLB page size"
--	depends on HUGETLB_PAGE && MMU
--	default HUGETLB_PAGE_SIZE_64K
+-static inline int
+-put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
+-{
+-	struct pt_regs *regs;
 -
--config HUGETLB_PAGE_SIZE_64K
--	bool "64K"
+-	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
 -
--config HUGETLB_PAGE_SIZE_1MB
--	bool "1MB"
+-	if (!tsk_used_math(task)) {
+-		fpinit(&task->thread.fpu.hard);
+-		set_stopped_child_used_math(task);
+-	} else if (last_task_used_math == task) {
+-		grab_fpu();
+-		fpsave(&task->thread.fpu.hard);
+-		release_fpu();
+-		last_task_used_math = 0;
+-		regs->sr |= SR_FD;
+-	}
 -
--config HUGETLB_PAGE_SIZE_512MB
--	bool "512MB"
+-	((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
+-	return 0;
+-}
 -
--endchoice
 -
--config SH64_USER_MISALIGNED_FIXUP
--	bool "Fixup misaligned loads/stores occurring in user mode"
+-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+-{
+-	int ret;
 -
--comment "Memory options"
+-	switch (request) {
+-	/* when I and D space are separate, these will need to be fixed. */
+-	case PTRACE_PEEKTEXT: /* read word at location addr. */
+-	case PTRACE_PEEKDATA:
+-		ret = generic_ptrace_peekdata(child, addr, data);
+-		break;
 -
--config CACHED_MEMORY_OFFSET
--	hex "Cached Area Offset"
--	default "20000000"
+-	/* read the word at location addr in the USER area. */
+-	case PTRACE_PEEKUSR: {
+-		unsigned long tmp;
 -
--config MEMORY_START
--	hex "Physical memory start address"
--	default "80000000"
+-		ret = -EIO;
+-		if ((addr & 3) || addr < 0)
+-			break;
 -
--config MEMORY_SIZE_IN_MB
--	int "Memory size (in MB)"
--	default "8" if SH_SIMULATOR
--	default "64"
+-		if (addr < sizeof(struct pt_regs))
+-			tmp = get_stack_long(child, addr);
+-		else if ((addr >= offsetof(struct user, fpu)) &&
+-			 (addr <  offsetof(struct user, u_fpvalid))) {
+-			tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
+-		} else if (addr == offsetof(struct user, u_fpvalid)) {
+-			tmp = !!tsk_used_math(child);
+-		} else {
+-			break;
+-		}
+-		ret = put_user(tmp, (unsigned long *)data);
+-		break;
+-	}
 -
--comment "Cache options"
+-	/* when I and D space are separate, this will have to be fixed. */
+-	case PTRACE_POKETEXT: /* write the word at location addr. */
+-	case PTRACE_POKEDATA:
+-		ret = generic_ptrace_pokedata(child, addr, data);
+-		break;
 -
--choice
--	prompt "DCache mode"
--	default DCACHE_DISABLED if SH_SIMULATOR
--	default DCACHE_WRITE_BACK
+-	case PTRACE_POKEUSR:
+-                /* write the word at location addr in the USER area. We must
+-                   disallow any changes to certain SR bits or u_fpvalid, since
+-                   this could crash the kernel or result in a security
+-                   loophole. */
+-		ret = -EIO;
+-		if ((addr & 3) || addr < 0)
+-			break;
 -
--config DCACHE_WRITE_BACK
--	bool "Write-back"
--	depends on !SH_SIMULATOR
+-		if (addr < sizeof(struct pt_regs)) {
+-			/* Ignore change of top 32 bits of SR */
+-			if (addr == offsetof (struct pt_regs, sr)+4)
+-			{
+-				ret = 0;
+-				break;
+-			}
+-			/* If lower 32 bits of SR, ignore non-user bits */
+-			if (addr == offsetof (struct pt_regs, sr))
+-			{
+-				long cursr = get_stack_long(child, addr);
+-				data &= ~(SR_MASK);
+-				data |= (cursr & SR_MASK);
+-			}
+-			ret = put_stack_long(child, addr, data);
+-		}
+-		else if ((addr >= offsetof(struct user, fpu)) &&
+-			 (addr <  offsetof(struct user, u_fpvalid))) {
+-			ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
+-		}
+-		break;
 -
--config DCACHE_WRITE_THROUGH
--	bool "Write-through"
--	depends on !SH_SIMULATOR
+-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+-	case PTRACE_CONT: { /* restart after signal. */
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		if (request == PTRACE_SYSCALL)
+-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		else
+-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		child->exit_code = data;
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
+-	}
 -
--config DCACHE_DISABLED
--	bool "Disabled"
+-/*
+- * make the child exit.  Best I can do is send it a sigkill.
+- * perhaps it should be put in the status that it wants to
+- * exit.
+- */
+-	case PTRACE_KILL: {
+-		ret = 0;
+-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
+-			break;
+-		child->exit_code = SIGKILL;
+-		wake_up_process(child);
+-		break;
+-	}
 -
--endchoice
+-	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
+-		struct pt_regs *regs;
 -
--config ICACHE_DISABLED
--	bool "ICache Disabling"
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		if ((child->ptrace & PT_DTRACE) == 0) {
+-			/* Spurious delayed TF traps may occur */
+-			child->ptrace |= PT_DTRACE;
+-		}
 -
--config PCIDEVICE_MEMORY_START
--	hex
--	default "C0000000"
+-		regs = child->thread.uregs;
 -
--config DEVICE_MEMORY_START
--	hex
--	default "E0000000"
+-		regs->sr |= SR_SSTEP;	/* auto-resetting upon exception */
 -
--config FLASH_MEMORY_START
--	hex "Flash memory/on-chip devices start address"
--	default "00000000"
+-		child->exit_code = data;
+-		/* give it a chance to run. */
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
+-	}
 -
--config PCI_BLOCK_START
--	hex "PCI block start address"
--	default "40000000"
+-	default:
+-		ret = ptrace_request(child, request, addr, data);
+-		break;
+-	}
+-	return ret;
+-}
 -
--comment "CPU Subtype specific options"
+-asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+-{
+-	extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
+-#define WPC_DBRMODE 0x0d104008
+-	static int first_call = 1;
 -
--config SH64_ID2815_WORKAROUND
--	bool "Include workaround for SH5-101 cut2 silicon defect ID2815"
+-	lock_kernel();
+-	if (first_call) {
+-		/* Set WPC.DBRMODE to 0.  This makes all debug events get
+-		 * delivered through RESVEC, i.e. into the handlers in entry.S.
+-		 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
+-		 * would normally be left set to 1, which makes debug events get
+-		 * delivered through DBRVEC, i.e. into the remote gdb's
+-		 * handlers.  This prevents ptrace getting them, and confuses
+-		 * the remote gdb.) */
+-		printk("DBRMODE set to 0 to permit native debugging\n");
+-		poke_real_address_q(WPC_DBRMODE, 0);
+-		first_call = 0;
+-	}
+-	unlock_kernel();
 -
--comment "Misc options"
+-	return sys_ptrace(request, pid, addr, data);
+-}
 -
--config HEARTBEAT
--	bool "Heartbeat LED"
--	depends on SH_CAYMAN
+-asmlinkage void syscall_trace(void)
+-{
+-	struct task_struct *tsk = current;
 -
--config HDSP253_LED
--	bool "Support for HDSP-253 LED"
--	depends on SH_CAYMAN
+-	if (!test_thread_flag(TIF_SYSCALL_TRACE))
+-		return;
+-	if (!(tsk->ptrace & PT_PTRACED))
+-		return;
 -
--config SH_DMA
--	tristate "DMA controller (DMAC) support"
+-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+-				 ? 0x80 : 0));
+-	/*
+-	 * this isn't the same as continuing with a signal, but it will do
+-	 * for normal use.  strace only continues with a signal if the
+-	 * stopping signal is not SIGTRAP.  -brl
+-	 */
+-	if (tsk->exit_code) {
+-		send_sig(tsk->exit_code, tsk, 1);
+-		tsk->exit_code = 0;
+-	}
+-}
 -
--config PREEMPT
--	bool "Preemptible Kernel (EXPERIMENTAL)"
--	depends on EXPERIMENTAL
+-/* Called with interrupts disabled */
+-asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
+-{
+-	/* This is called after a single step exception (DEBUGSS).
+-	   There is no need to change the PC, as it is a post-execution
+-	   exception, as entry.S does not do anything to the PC for DEBUGSS.
+-	   We need to clear the Single Step setting in SR to avoid
+-	   continually stepping. */
+-	local_irq_enable();
+-	regs->sr &= ~SR_SSTEP;
+-	force_sig(SIGTRAP, current);
+-}
 -
--source "mm/Kconfig"
+-/* Called with interrupts disabled */
+-asmlinkage void do_software_break_point(unsigned long long vec,
+-					struct pt_regs *regs)
+-{
+-	/* We need to forward step the PC, to counteract the backstep done
+-	   in signal.c. */
+-	local_irq_enable();
+-	force_sig(SIGTRAP, current);
+-	regs->pc += 4;
+-}
 -
--endmenu
+-/*
+- * Called by kernel/ptrace.c when detaching..
+- *
+- * Make sure single step bits etc are not set.
+- */
+-void ptrace_disable(struct task_struct *child)
+-{
+-        /* nothing to do.. */
+-}
+diff --git a/arch/sh64/kernel/semaphore.c b/arch/sh64/kernel/semaphore.c
+deleted file mode 100644
+index 72c1653..0000000
+--- a/arch/sh64/kernel/semaphore.c
++++ /dev/null
+@@ -1,140 +0,0 @@
+-/*
+- * Just taken from alpha implementation.
+- * This can't work well, perhaps.
+- */
+-/*
+- *  Generic semaphore code. Buyer beware. Do your own
+- * specific changes in <asm/semaphore-helper.h>
+- */
 -
--menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
+-#include <linux/errno.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/wait.h>
+-#include <linux/init.h>
+-#include <asm/semaphore.h>
+-#include <asm/semaphore-helper.h>
 -
--config ISA
--	bool
+-spinlock_t semaphore_wake_lock;
 -
--config SBUS
--	bool
+-/*
+- * Semaphores are implemented using a two-way counter:
+- * The "count" variable is decremented for each process
+- * that tries to sleep, while the "waking" variable is
+- * incremented when the "up()" code goes to wake up waiting
+- * processes.
+- *
+- * Notably, the inline "up()" and "down()" functions can
+- * efficiently test if they need to do any extra work (up
+- * needs to do something only if count was negative before
+- * the increment operation.
+- *
+- * waking_non_zero() (from asm/semaphore.h) must execute
+- * atomically.
+- *
+- * When __up() is called, the count was negative before
+- * incrementing it, and we need to wake up somebody.
+- *
+- * This routine adds one to the count of processes that need to
+- * wake up and exit.  ALL waiting processes actually wake up but
+- * only the one that gets to the "waking" field first will gate
+- * through and acquire the semaphore.  The others will go back
+- * to sleep.
+- *
+- * Note that these functions are only called when there is
+- * contention on the lock, and as such all this is the
+- * "non-critical" part of the whole semaphore business. The
+- * critical part is the inline stuff in <asm/semaphore.h>
+- * where we want to avoid any extra jumps and calls.
+- */
+-void __up(struct semaphore *sem)
+-{
+-	wake_one_more(sem);
+-	wake_up(&sem->wait);
+-}
 -
--config PCI
--	bool "PCI support"
--	depends on SH_CAYMAN
--	help
--	  Find out whether you have a PCI motherboard. PCI is the name of a
--	  bus system, i.e. the way the CPU talks to the other stuff inside
--	  your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
--	  VESA. If you have PCI, say Y, otherwise N.
+-/*
+- * Perform the "down" function.  Return zero for semaphore acquired,
+- * return negative for signalled out of the function.
+- *
+- * If called from __down, the return is ignored and the wait loop is
+- * not interruptible.  This means that a task waiting on a semaphore
+- * using "down()" cannot be killed until someone does an "up()" on
+- * the semaphore.
+- *
+- * If called from __down_interruptible, the return value gets checked
+- * upon return.  If the return value is negative then the task continues
+- * with the negative value in the return register (it can be tested by
+- * the caller).
+- *
+- * Either form may be used in conjunction with "up()".
+- *
+- */
 -
--	  The PCI-HOWTO, available from
--	  <http://www.tldp.org/docs.html#howto>, contains valuable
--	  information about which PCI hardware does work under Linux and which
--	  doesn't.
+-#define DOWN_VAR				\
+-	struct task_struct *tsk = current;	\
+-	wait_queue_t wait;			\
+-	init_waitqueue_entry(&wait, tsk);
 -
--config SH_PCIDMA_NONCOHERENT
--	bool "Cache and PCI noncoherent"
--	depends on PCI
--	default y
--	help
--	  Enable this option if your platform does not have a CPU cache which
--	  remains coherent with PCI DMA. It is safest to say 'Y', although you
--	  will see better performance if you can say 'N', because the PCI DMA
--	  code will not have to flush the CPU's caches. If you have a PCI host
--	  bridge integrated with your SH CPU, refer carefully to the chip specs
--	  to see if you can say 'N' here. Otherwise, leave it as 'Y'.
+-#define DOWN_HEAD(task_state)						\
+-									\
+-									\
+-	tsk->state = (task_state);					\
+-	add_wait_queue(&sem->wait, &wait);				\
+-									\
+-	/*								\
+-	 * Ok, we're set up.  sem->count is known to be less than zero	\
+-	 * so we must wait.						\
+-	 *								\
+-	 * We can let go the lock for purposes of waiting.		\
+-	 * We re-acquire it after awaking so as to protect		\
+-	 * all semaphore operations.					\
+-	 *								\
+-	 * If "up()" is called before we call waking_non_zero() then	\
+-	 * we will catch it right away.  If it is called later then	\
+-	 * we will have to go through a wakeup cycle to catch it.	\
+-	 *								\
+-	 * Multiple waiters contend for the semaphore lock to see	\
+-	 * who gets to gate through and who has to wait some more.	\
+-	 */								\
+-	for (;;) {
 -
--source "drivers/pci/Kconfig"
+-#define DOWN_TAIL(task_state)			\
+-		tsk->state = (task_state);	\
+-	}					\
+-	tsk->state = TASK_RUNNING;		\
+-	remove_wait_queue(&sem->wait, &wait);
 -
--source "drivers/pcmcia/Kconfig"
+-void __sched __down(struct semaphore * sem)
+-{
+-	DOWN_VAR
+-	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+-	if (waking_non_zero(sem))
+-		break;
+-	schedule();
+-	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+-}
 -
--source "drivers/pci/hotplug/Kconfig"
+-int __sched __down_interruptible(struct semaphore * sem)
+-{
+-	int ret = 0;
+-	DOWN_VAR
+-	DOWN_HEAD(TASK_INTERRUPTIBLE)
 -
--endmenu
+-	ret = waking_non_zero_interruptible(sem, tsk);
+-	if (ret)
+-	{
+-		if (ret == 1)
+-			/* ret != 0 only if we get interrupted -arca */
+-			ret = 0;
+-		break;
+-	}
+-	schedule();
+-	DOWN_TAIL(TASK_INTERRUPTIBLE)
+-	return ret;
+-}
 -
--menu "Executable file formats"
+-int __down_trylock(struct semaphore * sem)
+-{
+-	return waking_non_zero_trylock(sem);
+-}
+diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
+deleted file mode 100644
+index 2b7264c..0000000
+--- a/arch/sh64/kernel/setup.c
++++ /dev/null
+@@ -1,379 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/setup.c
+- *
+- * sh64 Arch Support
+- *
+- * This file handles the architecture-dependent parts of initialization
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003, 2004  Paul Mundt
+- *
+- * benedict.gaster at superh.com:   2nd May 2002
+- *    Modified to use the empty_zero_page to pass command line arguments.
+- *
+- * benedict.gaster at superh.com:	 3rd May 2002
+- *    Added support for ramdisk, removing statically linked romfs at the same time.
+- *
+- * lethal at linux-sh.org:          15th May 2003
+- *    Added generic procfs cpuinfo reporting. Make boards just export their name.
+- *
+- * lethal at linux-sh.org:          25th May 2003
+- *    Added generic get_cpu_subtype() for subtype reporting from cpu_data->type.
+- *
+- */
+-#include <linux/errno.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/stddef.h>
+-#include <linux/unistd.h>
+-#include <linux/ptrace.h>
+-#include <linux/slab.h>
+-#include <linux/user.h>
+-#include <linux/a.out.h>
+-#include <linux/screen_info.h>
+-#include <linux/ioport.h>
+-#include <linux/delay.h>
+-#include <linux/init.h>
+-#include <linux/seq_file.h>
+-#include <linux/blkdev.h>
+-#include <linux/bootmem.h>
+-#include <linux/console.h>
+-#include <linux/root_dev.h>
+-#include <linux/cpu.h>
+-#include <linux/initrd.h>
+-#include <linux/pfn.h>
+-#include <asm/processor.h>
+-#include <asm/page.h>
+-#include <asm/pgtable.h>
+-#include <asm/platform.h>
+-#include <asm/uaccess.h>
+-#include <asm/system.h>
+-#include <asm/io.h>
+-#include <asm/sections.h>
+-#include <asm/setup.h>
+-#include <asm/smp.h>
 -
--source "fs/Kconfig.binfmt"
+-struct screen_info screen_info;
 -
--endmenu
+-#ifdef CONFIG_BLK_DEV_RAM
+-extern int rd_doload;		/* 1 = load ramdisk, 0 = don't load */
+-extern int rd_prompt;		/* 1 = prompt for ramdisk, 0 = don't prompt */
+-extern int rd_image_start;	/* starting block # of image */
+-#endif
 -
--source "net/Kconfig"
+-extern int root_mountflags;
+-extern char *get_system_type(void);
+-extern void platform_setup(void);
+-extern void platform_monitor(void);
+-extern void platform_reserve(void);
+-extern int sh64_cache_init(void);
+-extern int sh64_tlb_init(void);
 -
--source "drivers/Kconfig"
+-#define RAMDISK_IMAGE_START_MASK	0x07FF
+-#define RAMDISK_PROMPT_FLAG		0x8000
+-#define RAMDISK_LOAD_FLAG		0x4000
 -
--source "fs/Kconfig"
+-static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
+-unsigned long long memory_start = CONFIG_MEMORY_START;
+-unsigned long long memory_end = CONFIG_MEMORY_START + (CONFIG_MEMORY_SIZE_IN_MB * 1024 * 1024);
 -
--source "kernel/Kconfig.instrumentation"
+-struct sh_cpuinfo boot_cpu_data;
 -
--source "arch/sh64/Kconfig.debug"
+-static inline void parse_mem_cmdline (char ** cmdline_p)
+-{
+-        char c = ' ', *to = command_line, *from = COMMAND_LINE;
+-	int len = 0;
 -
--source "security/Kconfig"
+-	/* Save unparsed command line copy for /proc/cmdline */
+-	memcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+-	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
 -
--source "crypto/Kconfig"
+-	for (;;) {
+-	  /*
+-	   * "mem=XXX[kKmM]" defines a size of memory.
+-	   */
+-	        if (c == ' ' && !memcmp(from, "mem=", 4)) {
+-		      if (to != command_line)
+-			to--;
+-		      {
+-			unsigned long mem_size;
 -
--source "lib/Kconfig"
-diff --git a/arch/sh64/Kconfig.debug b/arch/sh64/Kconfig.debug
-deleted file mode 100644
-index 05c07c4..0000000
---- a/arch/sh64/Kconfig.debug
-+++ /dev/null
-@@ -1,33 +0,0 @@
--menu "Kernel hacking"
+-			mem_size = memparse(from+4, &from);
+-			memory_end = memory_start + mem_size;
+-		      }
+-		}
+-		c = *(from++);
+-		if (!c)
+-		  break;
+-		if (COMMAND_LINE_SIZE <= ++len)
+-		  break;
+-		*(to++) = c;
+-	}
+-	*to = '\0';
 -
--source "lib/Kconfig.debug"
+-	*cmdline_p = command_line;
+-}
 -
--config EARLY_PRINTK
--	bool "Early SCIF console support"
+-static void __init sh64_cpu_type_detect(void)
+-{
+-	extern unsigned long long peek_real_address_q(unsigned long long addr);
+-	unsigned long long cir;
+-	/* Do peeks in real mode to avoid having to set up a mapping for the
+-	   WPC registers.  On SH5-101 cut2, such a mapping would be exposed to
+-	   an address translation erratum which would make it hard to set up
+-	   correctly. */
+-	cir = peek_real_address_q(0x0d000008);
 -
--config SH64_PROC_TLB
--	bool "Debug: report TLB fill/purge activity through /proc/tlb"
--	depends on PROC_FS
+-	if ((cir & 0xffff) == 0x5103) {
+-		boot_cpu_data.type = CPU_SH5_103;
+-	} else if (((cir >> 32) & 0xffff) == 0x51e2) {
+-		/* CPU.VCR aliased at CIR address on SH5-101 */
+-		boot_cpu_data.type = CPU_SH5_101;
+-	} else {
+-		boot_cpu_data.type = CPU_SH_NONE;
+-	}
+-}
 -
--config SH64_PROC_ASIDS
--	bool "Debug: report ASIDs through /proc/asids"
--	depends on PROC_FS
+-void __init setup_arch(char **cmdline_p)
+-{
+-	unsigned long bootmap_size, i;
+-	unsigned long first_pfn, start_pfn, last_pfn, pages;
 -
--config SH64_SR_WATCH
--	bool "Debug: set SR.WATCH to enable hardware watchpoints and trace"
+-#ifdef CONFIG_EARLY_PRINTK
+-	extern void enable_early_printk(void);
 -
--config POOR_MANS_STRACE
--	bool "Debug: enable rudimentary strace facility"
--	help
--	  This option allows system calls to be traced to the console.  It also
--	  aids in detecting kernel stack underflow.  It is useful for debugging
--	  early-userland problems (e.g. init incurring fatal exceptions.)
+-	/*
+-	 * Setup Early SCIF console
+-	 */
+-	enable_early_printk();
+-#endif
 -
--config SH_ALPHANUMERIC
--	bool "Enable debug outputs to on-board alphanumeric display"
--	depends on SH_CAYMAN
+-	/*
+-	 * Setup TLB mappings
+-	 */
+-	sh64_tlb_init();
 -
--config SH_NO_BSS_INIT
--	bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)"
+-	/*
+-	 * Caches are already initialized by the time we get here, so we just
+-	 * fill in cpu_data info for the caches.
+-	 */
+-	sh64_cache_init();
 -
--endmenu
-diff --git a/arch/sh64/Makefile b/arch/sh64/Makefile
-deleted file mode 100644
-index 8dac7e1..0000000
---- a/arch/sh64/Makefile
-+++ /dev/null
-@@ -1,111 +0,0 @@
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 2000, 2001  Paolo Alberelli
--# Copyright (C) 2003, 2004  Paul Mundt
--#
--# This file is included by the global makefile so that you can add your own
--# architecture-specific flags and dependencies. Remember to do have actions
--# for "archclean" and "archdep" for cleaning up and making dependencies for
--# this architecture
--#
+-	platform_setup();
+-	platform_monitor();
 -
--cpu-y				:= -mb
--cpu-$(CONFIG_LITTLE_ENDIAN)	:= -ml
+-	sh64_cpu_type_detect();
 -
--cpu-$(CONFIG_CPU_SH5)		+= -m5-32media-nofpu
+-	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
 -
--ifdef CONFIG_LITTLE_ENDIAN
--LDFLAGS_vmlinux		+= --defsym 'jiffies=jiffies_64'
--LDFLAGS			+= -EL  -mshlelf32_linux
--else
--LDFLAGS_vmlinux		+= --defsym 'jiffies=jiffies_64+4'
--LDFLAGS			+= -EB  -mshelf32_linux
--endif
+-#ifdef CONFIG_BLK_DEV_RAM
+-	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+-	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+-	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+-#endif
 -
--# No requirements for endianess support from AFLAGS, 'as' always run through gcc
--KBUILD_CFLAGS		+= $(cpu-y)
+-	if (!MOUNT_ROOT_RDONLY)
+-		root_mountflags &= ~MS_RDONLY;
+-	init_mm.start_code = (unsigned long) _text;
+-	init_mm.end_code = (unsigned long) _etext;
+-	init_mm.end_data = (unsigned long) _edata;
+-	init_mm.brk = (unsigned long) _end;
 -
--LDFLAGS_vmlinux	+= --defsym phys_stext=_stext-$(CONFIG_CACHED_MEMORY_OFFSET) \
--		  --defsym phys_stext_shmedia=phys_stext+1 \
--		  -e phys_stext_shmedia
+-	code_resource.start = __pa(_text);
+-	code_resource.end = __pa(_etext)-1;
+-	data_resource.start = __pa(_etext);
+-	data_resource.end = __pa(_edata)-1;
 -
--OBJCOPYFLAGS	:= -O binary -R .note -R .comment -R .stab -R .stabstr -S
+-	parse_mem_cmdline(cmdline_p);
 -
--#
--# arch/sh64/defconfig never had any hope of being
--# frequently updated, so use one that does
--#
--KBUILD_DEFCONFIG	:= cayman_defconfig
+-	/*
+-	 * Find the lowest and highest page frame numbers we have available
+-	 */
+-	first_pfn = PFN_DOWN(memory_start);
+-	last_pfn = PFN_DOWN(memory_end);
+-	pages = last_pfn - first_pfn;
 -
--KBUILD_IMAGE		:= arch/$(ARCH)/boot/zImage
+-	/*
+-	 * Partially used pages are not usable - thus
+-	 * we are rounding upwards:
+-	 */
+-	start_pfn = PFN_UP(__pa(_end));
 -
--ifdef LOADADDR
--LINKFLAGS     += -Ttext $(word 1,$(LOADADDR))
--endif
+-	/*
+-	 * Find a proper area for the bootmem bitmap. After this
+-	 * bootstrap step all allocations (until the page allocator
+-	 * is intact) must be done via bootmem_alloc().
+-	 */
+-	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
+-					 first_pfn,
+-					 last_pfn);
+-        /*
+-         * Round it up.
+-         */
+-        bootmap_size = PFN_PHYS(PFN_UP(bootmap_size));
 -
--machine-$(CONFIG_SH_CAYMAN)	:= cayman
--machine-$(CONFIG_SH_SIMULATOR)	:= sim
--machine-$(CONFIG_SH_HARP)	:= harp
+-	/*
+-	 * Register fully available RAM pages with the bootmem allocator.
+-	 */
+-	free_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), PFN_PHYS(pages));
 -
--head-y := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o
+-	/*
+-	 * Reserve all kernel sections + bootmem bitmap + a guard page.
+-	 */
+-	reserve_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn),
+-		        (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE) - PFN_PHYS(first_pfn));
 -
--core-y	+= arch/sh64/kernel/ arch/sh64/mm/
+-	/*
+-	 * Reserve platform dependent sections
+-	 */
+-	platform_reserve();
 -
--ifneq ($(machine-y),)
--core-y	+= arch/sh64/mach-$(machine-y)/
--endif
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (LOADER_TYPE && INITRD_START) {
+-		if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
+-		        reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
 -
--LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
--libs-y	+= arch/$(ARCH)/lib/ $(LIBGCC)
+-			initrd_start = (long) INITRD_START + PAGE_OFFSET + __MEMORY_START;
+-			initrd_end = initrd_start + INITRD_SIZE;
+-		} else {
+-			printk("initrd extends beyond end of memory "
+-			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+-				    (long) INITRD_START + INITRD_SIZE,
+-				    PFN_PHYS(last_pfn));
+-			initrd_start = 0;
+-		}
+-	}
+-#endif
 -
--drivers-$(CONFIG_OPROFILE)	+= arch/sh64/oprofile/
+-	/*
+-	 * Claim all RAM, ROM, and I/O resources.
+-	 */
 -
--boot := arch/$(ARCH)/boot
+-	/* Kernel RAM */
+-	request_resource(&iomem_resource, &code_resource);
+-	request_resource(&iomem_resource, &data_resource);
 -
--zImage: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+-	/* Other KRAM space */
+-	for (i = 0; i < STANDARD_KRAM_RESOURCES - 2; i++)
+-		request_resource(&iomem_resource,
+-				 &platform_parms.kram_res_p[i]);
 -
--compressed: zImage
+-	/* XRAM space */
+-	for (i = 0; i < STANDARD_XRAM_RESOURCES; i++)
+-		request_resource(&iomem_resource,
+-				 &platform_parms.xram_res_p[i]);
 -
--archclean:
--	$(Q)$(MAKE) $(clean)=$(boot)
+-	/* ROM space */
+-	for (i = 0; i < STANDARD_ROM_RESOURCES; i++)
+-		request_resource(&iomem_resource,
+-				 &platform_parms.rom_res_p[i]);
 -
--archprepare: arch/$(ARCH)/lib/syscalltab.h
+-	/* I/O space */
+-	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+-		request_resource(&ioport_resource,
+-				 &platform_parms.io_res_p[i]);
 -
--define filechk_gen-syscalltab
--       (set -e; \
--	echo "/*"; \
--	echo " * DO NOT MODIFY."; \
--	echo " *"; \
--	echo " * This file was generated by arch/$(ARCH)/Makefile"; \
--	echo " * Any changes will be reverted at build time."; \
--	echo " */"; \
--	echo ""; \
--	echo "#ifndef __SYSCALLTAB_H"; \
--	echo "#define __SYSCALLTAB_H"; \
--	echo ""; \
--	echo "#include <linux/kernel.h>"; \
--	echo ""; \
--	echo "struct syscall_info {"; \
--	echo "	const char *name;"; \
--	echo "} syscall_info_table[] = {"; \
--	sed -e '/^.*\.long /!d;s//    { "/;s/\(\([^/]*\)\/\)\{1\}.*/\2/; \
--		s/[ \t]*$$//g;s/$$/" },/;s/\("\)sys_/\1/g'; \
--	echo "};"; \
--	echo ""; \
--	echo "#define NUM_SYSCALL_INFO_ENTRIES	ARRAY_SIZE(syscall_info_table)"; \
--	echo ""; \
--	echo "#endif /* __SYSCALLTAB_H */" )
--endef
 -
--arch/$(ARCH)/lib/syscalltab.h: arch/sh64/kernel/syscalls.S
--	$(call filechk,gen-syscalltab)
+-#ifdef CONFIG_VT
+-#if defined(CONFIG_VGA_CONSOLE)
+-	conswitchp = &vga_con;
+-#elif defined(CONFIG_DUMMY_CONSOLE)
+-	conswitchp = &dummy_con;
+-#endif
+-#endif
 -
--CLEAN_FILES += arch/$(ARCH)/lib/syscalltab.h
+-	printk("Hardware FPU: %s\n", fpu_in_use ? "enabled" : "disabled");
 -
--define archhelp
--	@echo '* zImage 	           - Compressed kernel image'
--endef
-diff --git a/arch/sh64/boot/Makefile b/arch/sh64/boot/Makefile
-deleted file mode 100644
-index fb71087..0000000
---- a/arch/sh64/boot/Makefile
-+++ /dev/null
-@@ -1,20 +0,0 @@
--#
--# arch/sh64/boot/Makefile
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 2002 Stuart Menefy
--#
+-	paging_init();
+-}
 -
--targets := zImage
--subdir- := compressed
+-void __xchg_called_with_bad_pointer(void)
+-{
+-	printk(KERN_EMERG "xchg() called with bad pointer !\n");
+-}
 -
--$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
--	$(call if_changed,objcopy)
--	@echo 'Kernel: $@ is ready'
+-static struct cpu cpu[1];
 -
--$(obj)/compressed/vmlinux: FORCE
--	$(Q)$(MAKE) $(build)=$(obj)/compressed $@
+-static int __init topology_init(void)
+-{
+-	return register_cpu(cpu, 0);
+-}
 -
-diff --git a/arch/sh64/boot/compressed/Makefile b/arch/sh64/boot/compressed/Makefile
-deleted file mode 100644
-index 9cd2167..0000000
---- a/arch/sh64/boot/compressed/Makefile
-+++ /dev/null
-@@ -1,46 +0,0 @@
--#
--# linux/arch/sh64/boot/compressed/Makefile
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 2002 Stuart Menefy
--# Copyright (C) 2004 Paul Mundt
--#
--# create a compressed vmlinux image from the original vmlinux
--#
+-subsys_initcall(topology_init);
 -
--targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
--		   head.o misc.o cache.o piggy.o vmlinux.lds
+-/*
+- *	Get CPU information
+- */
+-static const char *cpu_name[] = {
+-	[CPU_SH5_101]	= "SH5-101",
+-	[CPU_SH5_103]	= "SH5-103",
+-	[CPU_SH_NONE]	= "Unknown",
+-};
 -
--EXTRA_AFLAGS	:= -traditional
+-const char *get_cpu_subtype(void)
+-{
+-	return cpu_name[boot_cpu_data.type];
+-}
 -
--OBJECTS		:= $(obj)/head.o $(obj)/misc.o $(obj)/cache.o
+-#ifdef CONFIG_PROC_FS
+-static int show_cpuinfo(struct seq_file *m,void *v)
+-{
+-	unsigned int cpu = smp_processor_id();
 -
--#
--# ZIMAGE_OFFSET is the load offset of the compression loader
--# (4M for the kernel plus 64K for this loader)
--#
--ZIMAGE_OFFSET = $(shell printf "0x%8x" $$[$(CONFIG_MEMORY_START)+0x400000+0x10000])
+-	if (!cpu)
+-		seq_printf(m, "machine\t\t: %s\n", get_system_type());
 -
--LDFLAGS_vmlinux := -Ttext $(ZIMAGE_OFFSET) -e startup \
--		    -T $(obj)/../../kernel/vmlinux.lds \
--		    --no-warn-mismatch
+-	seq_printf(m, "processor\t: %d\n", cpu);
+-	seq_printf(m, "cpu family\t: SH-5\n");
+-	seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype());
 -
--$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
--	$(call if_changed,ld)
--	@:
+-	seq_printf(m, "icache size\t: %dK-bytes\n",
+-		   (boot_cpu_data.icache.ways *
+-		    boot_cpu_data.icache.sets *
+-		    boot_cpu_data.icache.linesz) >> 10);
+-	seq_printf(m, "dcache size\t: %dK-bytes\n",
+-		   (boot_cpu_data.dcache.ways *
+-		    boot_cpu_data.dcache.sets *
+-		    boot_cpu_data.dcache.linesz) >> 10);
+-	seq_printf(m, "itlb entries\t: %d\n", boot_cpu_data.itlb.entries);
+-	seq_printf(m, "dtlb entries\t: %d\n", boot_cpu_data.dtlb.entries);
 -
--$(obj)/vmlinux.bin: vmlinux FORCE
--	$(call if_changed,objcopy)
+-#define PRINT_CLOCK(name, value) \
+-	seq_printf(m, name " clock\t: %d.%02dMHz\n", \
+-		     ((value) / 1000000), ((value) % 1000000)/10000)
 -
--$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
--	$(call if_changed,gzip)
+-	PRINT_CLOCK("cpu", boot_cpu_data.cpu_clock);
+-	PRINT_CLOCK("bus", boot_cpu_data.bus_clock);
+-	PRINT_CLOCK("module", boot_cpu_data.module_clock);
 -
--LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh64-linux -T
--OBJCOPYFLAGS += -R .empty_zero_page
+-        seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
+-		     (loops_per_jiffy*HZ+2500)/500000,
+-		     ((loops_per_jiffy*HZ+2500)/5000) % 100);
 -
--$(obj)/piggy.o: $(obj)/vmlinux.lds $(obj)/vmlinux.bin.gz FORCE
--	$(call if_changed,ld)
+-	return 0;
+-}
 -
-diff --git a/arch/sh64/boot/compressed/cache.c b/arch/sh64/boot/compressed/cache.c
+-static void *c_start(struct seq_file *m, loff_t *pos)
+-{
+-	return (void*)(*pos == 0);
+-}
+-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+-{
+-	return NULL;
+-}
+-static void c_stop(struct seq_file *m, void *v)
+-{
+-}
+-struct seq_operations cpuinfo_op = {
+-	.start	= c_start,
+-	.next	= c_next,
+-	.stop	= c_stop,
+-	.show	= show_cpuinfo,
+-};
+-#endif /* CONFIG_PROC_FS */
+diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c
 deleted file mode 100644
-index 7087073..0000000
---- a/arch/sh64/boot/compressed/cache.c
+index b1705ac..0000000
+--- a/arch/sh64/kernel/sh_ksyms.c
 +++ /dev/null
-@@ -1,39 +0,0 @@
+@@ -1,62 +0,0 @@
 -/*
-- * arch/shmedia/boot/compressed/cache.c -- simple cache management functions
-- *
-- * Code extracted from sh-ipl+g, sh-stub.c, which has the copyright:
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
 - *
-- *   This is originally based on an m68k software stub written by Glenn
-- *   Engel at HP, but has changed quite a bit.
+- * arch/sh64/kernel/sh_ksyms.c
 - *
-- *   Modifications for the SH by Ben Lee and Steve Chamberlain
+- * Copyright (C) 2000, 2001  Paolo Alberelli
 - *
--****************************************************************************
+- */
 -
--		THIS SOFTWARE IS NOT COPYRIGHTED
+-#include <linux/rwsem.h>
+-#include <linux/module.h>
+-#include <linux/smp.h>
+-#include <linux/user.h>
+-#include <linux/elfcore.h>
+-#include <linux/sched.h>
+-#include <linux/in6.h>
+-#include <linux/interrupt.h>
+-#include <linux/screen_info.h>
 -
--   HP offers the following for use in the public domain.  HP makes no
--   warranty with regard to the software or it's performance and the
--   user accepts the software "AS IS" with all faults.
+-#include <asm/semaphore.h>
+-#include <asm/processor.h>
+-#include <asm/uaccess.h>
+-#include <asm/checksum.h>
+-#include <asm/io.h>
+-#include <asm/delay.h>
+-#include <asm/irq.h>
 -
--   HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
--   TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
--   OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
 -
--****************************************************************************/
+-/* platform dependent support */
+-EXPORT_SYMBOL(dump_fpu);
+-EXPORT_SYMBOL(kernel_thread);
 -
--#define CACHE_ENABLE      0
--#define CACHE_DISABLE     1
+-/* Networking helper routines. */
+-EXPORT_SYMBOL(csum_partial_copy_nocheck);
 -
--int cache_control(unsigned int command)
--{
--	volatile unsigned int *p = (volatile unsigned int *) 0x80000000;
--	int i;
+-#ifdef CONFIG_VT
+-EXPORT_SYMBOL(screen_info);
+-#endif
 -
--	for (i = 0; i < (32 * 1024); i += 32) {
--		(void *) *p;
--		p += (32 / sizeof (int));
--	}
+-EXPORT_SYMBOL(__down);
+-EXPORT_SYMBOL(__down_trylock);
+-EXPORT_SYMBOL(__up);
+-EXPORT_SYMBOL(__put_user_asm_l);
+-EXPORT_SYMBOL(__get_user_asm_l);
+-EXPORT_SYMBOL(__copy_user);
+-EXPORT_SYMBOL(memcpy);
+-EXPORT_SYMBOL(udelay);
+-EXPORT_SYMBOL(__udelay);
+-EXPORT_SYMBOL(ndelay);
+-EXPORT_SYMBOL(__ndelay);
+-EXPORT_SYMBOL(flush_dcache_page);
+-EXPORT_SYMBOL(sh64_page_clear);
 -
--	return 0;
--}
-diff --git a/arch/sh64/boot/compressed/head.S b/arch/sh64/boot/compressed/head.S
+-/* Ugh.  These come in from libgcc.a at link time. */
+-#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
+-
+-DECLARE_EXPORT(__sdivsi3);
+-DECLARE_EXPORT(__muldi3);
+-DECLARE_EXPORT(__udivsi3);
+diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
 deleted file mode 100644
-index 82040b1..0000000
---- a/arch/sh64/boot/compressed/head.S
+index 79fc48c..0000000
+--- a/arch/sh64/kernel/signal.c
 +++ /dev/null
-@@ -1,164 +0,0 @@
+@@ -1,750 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/shmedia/boot/compressed/head.S
+- * arch/sh64/kernel/signal.c
 - *
-- * Copied from
-- *   arch/shmedia/kernel/head.S
-- * which carried the copyright:
-- *   Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003  Paul Mundt
+- * Copyright (C) 2004  Richard Curnow
+- *
+- * Started from sh version.
 - *
-- * Modification for compressed loader:
-- *   Copyright (C) 2002 Stuart Menefy (stuart.menefy at st.com)
 - */
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/kernel.h>
+-#include <linux/signal.h>
+-#include <linux/errno.h>
+-#include <linux/wait.h>
+-#include <linux/personality.h>
+-#include <linux/freezer.h>
+-#include <linux/ptrace.h>
+-#include <linux/unistd.h>
+-#include <linux/stddef.h>
+-#include <asm/ucontext.h>
+-#include <asm/uaccess.h>
+-#include <asm/pgtable.h>
 -
--#include <linux/linkage.h>
--#include <asm/registers.h>
--#include <asm/cache.h>
--#include <asm/mmu_context.h>
+-
+-#define REG_RET 9
+-#define REG_ARG1 2
+-#define REG_ARG2 3
+-#define REG_ARG3 4
+-#define REG_SP 15
+-#define REG_PR 18
+-#define REF_REG_RET regs->regs[REG_RET]
+-#define REF_REG_SP regs->regs[REG_SP]
+-#define DEREF_REG_PR regs->regs[REG_PR]
+-
+-#define DEBUG_SIG 0
+-
+-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+-
+-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
 -
 -/*
-- * Fixed TLB entries to identity map the beginning of RAM
+- * Atomically swap in the new signal mask, and wait for a signal.
 - */
--#define MMUIR_TEXT_H	0x0000000000000003 | CONFIG_MEMORY_START
--			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
--#define MMUIR_TEXT_L	0x000000000000009a | CONFIG_MEMORY_START
--			/* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */
 -
--#define MMUDR_CACHED_H	0x0000000000000003 | CONFIG_MEMORY_START
--			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
--#define MMUDR_CACHED_L	0x000000000000015a | CONFIG_MEMORY_START
--			/* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */
+-asmlinkage int
+-sys_sigsuspend(old_sigset_t mask,
+-	       unsigned long r3, unsigned long r4, unsigned long r5,
+-	       unsigned long r6, unsigned long r7,
+-	       struct pt_regs * regs)
+-{
+-	sigset_t saveset;
 -
--#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
--#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
+-	mask &= _BLOCKABLE;
+-	spin_lock_irq(&current->sighand->siglock);
+-	saveset = current->blocked;
+-	siginitset(&current->blocked, mask);
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
 -
--#if 1
--#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	/* OCE + OCI + WB */
--#else
--#define	OCCR0_INIT_VAL	OCCR0_OFF
--#endif
--#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			/* No locking */
+-	REF_REG_RET = -EINTR;
+-	while (1) {
+-		current->state = TASK_INTERRUPTIBLE;
+-		schedule();
+-		regs->pc += 4;    /* because sys_sigreturn decrements the pc */
+-		if (do_signal(regs, &saveset)) {
+-			/* pc now points at signal handler. Need to decrement
+-			   it because entry.S will increment it. */
+-			regs->pc -= 4;
+-			return -EINTR;
+-		}
+-	}
+-}
 -
--	.text
+-asmlinkage int
+-sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
+-	          unsigned long r4, unsigned long r5, unsigned long r6,
+-	          unsigned long r7,
+-	          struct pt_regs * regs)
+-{
+-	sigset_t saveset, newset;
 -
--	.global	startup
--startup:
--	/*
--	 * Prevent speculative fetch on device memory due to
--	 * uninitialized target registers.
--	 * This must be executed before the first branch.
--	 */
--	ptabs/u	ZERO, tr0
--	ptabs/u	ZERO, tr1
--	ptabs/u	ZERO, tr2
--	ptabs/u	ZERO, tr3
--	ptabs/u	ZERO, tr4
--	ptabs/u	ZERO, tr5
--	ptabs/u	ZERO, tr6
--	ptabs/u	ZERO, tr7
--	synci
+-	/* XXX: Don't preclude handling different sized sigset_t's.  */
+-	if (sigsetsize != sizeof(sigset_t))
+-		return -EINVAL;
 -
--	/*
--	 * Set initial TLB entries for cached and uncached regions.
--	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
--	 */
--	/* Clear ITLBs */
--	pta	1f, tr1
--	movi	ITLB_FIXED, r21
--	movi	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
--1:	putcfg	r21, 0, ZERO		/* Clear MMUIR[n].PTEH.V */
--	addi	r21, TLB_STEP, r21
--        bne	r21, r22, tr1
+-	if (copy_from_user(&newset, unewset, sizeof(newset)))
+-		return -EFAULT;
+-	sigdelsetmask(&newset, ~_BLOCKABLE);
+-	spin_lock_irq(&current->sighand->siglock);
+-	saveset = current->blocked;
+-	current->blocked = newset;
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
 -
--	/* Clear DTLBs */
--	pta	1f, tr1
--	movi	DTLB_FIXED, r21
--	movi	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
--1:	putcfg	r21, 0, ZERO		/* Clear MMUDR[n].PTEH.V */
--	addi	r21, TLB_STEP, r21
--        bne	r21, r22, tr1
+-	REF_REG_RET = -EINTR;
+-	while (1) {
+-		current->state = TASK_INTERRUPTIBLE;
+-		schedule();
+-		regs->pc += 4;    /* because sys_sigreturn decrements the pc */
+-		if (do_signal(regs, &saveset)) {
+-			/* pc now points at signal handler. Need to decrement
+-			   it because entry.S will increment it. */
+-			regs->pc -= 4;
+-			return -EINTR;
+-		}
+-	}
+-}
 -
--	/* Map one big (512Mb) page for ITLB */
--	movi	ITLB_FIXED, r21
--	movi	MMUIR_TEXT_L, r22	/* PTEL first */
--	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
--	movi	MMUIR_TEXT_H, r22	/* PTEH last */
--	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
+-asmlinkage int
+-sys_sigaction(int sig, const struct old_sigaction __user *act,
+-	      struct old_sigaction __user *oact)
+-{
+-	struct k_sigaction new_ka, old_ka;
+-	int ret;
 -
--	/* Map one big CACHED (512Mb) page for DTLB */
--	movi	DTLB_FIXED, r21
--	movi	MMUDR_CACHED_L, r22	/* PTEL first */
--	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
--	movi	MMUDR_CACHED_H, r22	/* PTEH last */
--	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
+-	if (act) {
+-		old_sigset_t mask;
+-		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+-		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+-		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+-			return -EFAULT;
+-		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
+-		__get_user(mask, &act->sa_mask);
+-		siginitset(&new_ka.sa.sa_mask, mask);
+-	}
 -
--	/* ICache */
--	movi	ICCR_BASE, r21
--	movi	ICCR0_INIT_VAL, r22
--	movi	ICCR1_INIT_VAL, r23
--	putcfg	r21, ICCR_REG0, r22
--	putcfg	r21, ICCR_REG1, r23
--	synci
+-	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
 -
--	/* OCache */
--	movi	OCCR_BASE, r21
--	movi	OCCR0_INIT_VAL, r22
--	movi	OCCR1_INIT_VAL, r23
--	putcfg	r21, OCCR_REG0, r22
--	putcfg	r21, OCCR_REG1, r23
--	synco
+-	if (!ret && oact) {
+-		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+-		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+-		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+-			return -EFAULT;
+-		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+-		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+-	}
 -
--	/*
--	 * Enable the MMU.
--	 * From here-on code can be non-PIC.
--	 */
--	movi	SR_HARMLESS | SR_ENABLE_MMU, r22
--	putcon	r22, SSR
--	movi	1f, r22
--	putcon	r22, SPC
--	synco
--	rte				/* And now go into the hyperspace ... */
--1:					/* ... that's the next instruction ! */
+-	return ret;
+-}
 -
--	/* Set initial stack pointer */
--	movi	datalabel stack_start, r0
--	ld.l	r0, 0, r15
+-asmlinkage int
+-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+-	        unsigned long r4, unsigned long r5, unsigned long r6,
+-	        unsigned long r7,
+-	        struct pt_regs * regs)
+-{
+-	return do_sigaltstack(uss, uoss, REF_REG_SP);
+-}
 -
--	/*
--	 * Clear bss
--	 */
--	pt	1f, tr1
--	movi	datalabel __bss_start, r22
--	movi	datalabel _end, r23
--1:	st.l	r22, 0, ZERO
--	addi	r22, 4, r22
--	bne	r22, r23, tr1
 -
--	/*
--	 * Decompress the kernel.
--	 */
--	pt	decompress_kernel, tr0
--	blink	tr0, r18
+-/*
+- * Do a signal return; undo the signal stack.
+- */
 -
--	/*
--	 * Disable the MMU.
--	 */
--	movi	SR_HARMLESS, r22
--	putcon	r22, SSR
--	movi	1f, r22
--	putcon	r22, SPC
--	synco
--	rte				/* And now go into the hyperspace ... */
--1:					/* ... that's the next instruction ! */
+-struct sigframe
+-{
+-	struct sigcontext sc;
+-	unsigned long extramask[_NSIG_WORDS-1];
+-	long long retcode[2];
+-};
 -
--	/* Jump into the decompressed kernel */
--	movi	datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19
--	ptabs	r19, tr0
--	blink	tr0, r18
+-struct rt_sigframe
+-{
+-	struct siginfo __user *pinfo;
+-	void *puc;
+-	struct siginfo info;
+-	struct ucontext uc;
+-	long long retcode[2];
+-};
 -
--	/* Shouldn't return here, but just in case, loop forever */
--	pt	1f, tr0
--1:	blink	tr0, ZERO
-diff --git a/arch/sh64/boot/compressed/install.sh b/arch/sh64/boot/compressed/install.sh
-deleted file mode 100644
-index 90589f0..0000000
---- a/arch/sh64/boot/compressed/install.sh
-+++ /dev/null
-@@ -1,56 +0,0 @@
--#!/bin/sh
--#
--# arch/sh/boot/install.sh
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 1995 by Linus Torvalds
--#
--# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
--# Adapted from code in arch/i386/boot/install.sh by Russell King
--# Adapted from code in arch/arm/boot/install.sh by Stuart Menefy
--#
--# "make install" script for sh architecture
--#
--# Arguments:
--#   $1 - kernel version
--#   $2 - kernel image file
--#   $3 - kernel map file
--#   $4 - default install path (blank if root directory)
--#
+-#ifdef CONFIG_SH_FPU
+-static inline int
+-restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+-{
+-	int err = 0;
+-	int fpvalid;
 -
--# User may have a custom install script
+-	err |= __get_user (fpvalid, &sc->sc_fpvalid);
+-	conditional_used_math(fpvalid);
+-	if (! fpvalid)
+-		return err;
 -
--if [ -x /sbin/installkernel ]; then
--  exec /sbin/installkernel "$@"
--fi
+-	if (current == last_task_used_math) {
+-		last_task_used_math = NULL;
+-		regs->sr |= SR_FD;
+-	}
 -
--if [ "$2" = "zImage" ]; then
--# Compressed install
--  echo "Installing compressed kernel"
--  if [ -f $4/vmlinuz-$1 ]; then
--    mv $4/vmlinuz-$1 $4/vmlinuz.old
--  fi
+-	err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
+-				(sizeof(long long) * 32) + (sizeof(int) * 1));
 -
--  if [ -f $4/System.map-$1 ]; then
--    mv $4/System.map-$1 $4/System.old
--  fi
+-	return err;
+-}
 -
--  cat $2 > $4/vmlinuz-$1
--  cp $3 $4/System.map-$1
--else
--# Normal install
--  echo "Installing normal kernel"
--  if [ -f $4/vmlinux-$1 ]; then
--    mv $4/vmlinux-$1 $4/vmlinux.old
--  fi
+-static inline int
+-setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+-{
+-	int err = 0;
+-	int fpvalid;
 -
--  if [ -f $4/System.map ]; then
--    mv $4/System.map $4/System.old
--  fi
+-	fpvalid = !!used_math();
+-	err |= __put_user(fpvalid, &sc->sc_fpvalid);
+-	if (! fpvalid)
+-		return err;
 -
--  cat $2 > $4/vmlinux-$1
--  cp $3 $4/System.map
--fi
-diff --git a/arch/sh64/boot/compressed/misc.c b/arch/sh64/boot/compressed/misc.c
-deleted file mode 100644
-index aea00c5..0000000
---- a/arch/sh64/boot/compressed/misc.c
-+++ /dev/null
-@@ -1,250 +0,0 @@
--/*
-- * arch/sh64/boot/compressed/misc.c
-- *
-- * This is a collection of several routines from gzip-1.0.3
-- * adapted for Linux.
-- *
-- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
-- *
-- * Adapted for SHmedia from sh by Stuart Menefy, May 2002
-- */
+-	if (current == last_task_used_math) {
+-		grab_fpu();
+-		fpsave(&current->thread.fpu.hard);
+-		release_fpu();
+-		last_task_used_math = NULL;
+-		regs->sr |= SR_FD;
+-	}
 -
--#include <asm/uaccess.h>
+-	err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
+-			      (sizeof(long long) * 32) + (sizeof(int) * 1));
+-	clear_used_math();
 -
--/* cache.c */
--#define CACHE_ENABLE      0
--#define CACHE_DISABLE     1
--int cache_control(unsigned int command);
+-	return err;
+-}
+-#else
+-static inline int
+-restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+-{}
+-static inline int
+-setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+-{}
+-#endif
 -
--/*
-- * gzip declarations
-- */
+-static int
+-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
+-{
+-	unsigned int err = 0;
+-        unsigned long long current_sr, new_sr;
+-#define SR_MASK 0xffff8cfd
 -
--#define OF(args)  args
--#define STATIC static
+-#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
 -
--#undef memset
--#undef memcpy
--#define memzero(s, n)     memset ((s), 0, (n))
+-	COPY(regs[0]);	COPY(regs[1]);	COPY(regs[2]);	COPY(regs[3]);
+-	COPY(regs[4]);	COPY(regs[5]);	COPY(regs[6]);	COPY(regs[7]);
+-	COPY(regs[8]);	COPY(regs[9]);  COPY(regs[10]);	COPY(regs[11]);
+-	COPY(regs[12]);	COPY(regs[13]);	COPY(regs[14]);	COPY(regs[15]);
+-	COPY(regs[16]);	COPY(regs[17]);	COPY(regs[18]);	COPY(regs[19]);
+-	COPY(regs[20]);	COPY(regs[21]);	COPY(regs[22]);	COPY(regs[23]);
+-	COPY(regs[24]);	COPY(regs[25]);	COPY(regs[26]);	COPY(regs[27]);
+-	COPY(regs[28]);	COPY(regs[29]);	COPY(regs[30]);	COPY(regs[31]);
+-	COPY(regs[32]);	COPY(regs[33]);	COPY(regs[34]);	COPY(regs[35]);
+-	COPY(regs[36]);	COPY(regs[37]);	COPY(regs[38]);	COPY(regs[39]);
+-	COPY(regs[40]);	COPY(regs[41]);	COPY(regs[42]);	COPY(regs[43]);
+-	COPY(regs[44]);	COPY(regs[45]);	COPY(regs[46]);	COPY(regs[47]);
+-	COPY(regs[48]);	COPY(regs[49]);	COPY(regs[50]);	COPY(regs[51]);
+-	COPY(regs[52]);	COPY(regs[53]);	COPY(regs[54]);	COPY(regs[55]);
+-	COPY(regs[56]);	COPY(regs[57]);	COPY(regs[58]);	COPY(regs[59]);
+-	COPY(regs[60]);	COPY(regs[61]);	COPY(regs[62]);
+-	COPY(tregs[0]);	COPY(tregs[1]);	COPY(tregs[2]);	COPY(tregs[3]);
+-	COPY(tregs[4]);	COPY(tregs[5]);	COPY(tregs[6]);	COPY(tregs[7]);
 -
--typedef unsigned char uch;
--typedef unsigned short ush;
--typedef unsigned long ulg;
+-        /* Prevent the signal handler manipulating SR in a way that can
+-           crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
+-           modified */
+-        current_sr = regs->sr;
+-        err |= __get_user(new_sr, &sc->sc_sr);
+-        regs->sr &= SR_MASK;
+-        regs->sr |= (new_sr & ~SR_MASK);
 -
--#define WSIZE 0x8000		/* Window size must be at least 32k, */
--				/* and a power of two */
+-	COPY(pc);
 -
--static uch *inbuf;		/* input buffer */
--static uch window[WSIZE];	/* Sliding window buffer */
+-#undef COPY
 -
--static unsigned insize = 0;	/* valid bytes in inbuf */
--static unsigned inptr = 0;	/* index of next byte to be processed in inbuf */
--static unsigned outcnt = 0;	/* bytes in output buffer */
+-	/* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
+-	 * has been restored above.) */
+-	err |= restore_sigcontext_fpu(regs, sc);
 -
--/* gzip flag byte */
--#define ASCII_FLAG   0x01	/* bit 0 set: file probably ASCII text */
--#define CONTINUATION 0x02	/* bit 1 set: continuation of multi-part gzip file */
--#define EXTRA_FIELD  0x04	/* bit 2 set: extra field present */
--#define ORIG_NAME    0x08	/* bit 3 set: original file name present */
--#define COMMENT      0x10	/* bit 4 set: file comment present */
--#define ENCRYPTED    0x20	/* bit 5 set: file is encrypted */
--#define RESERVED     0xC0	/* bit 6,7:   reserved */
+-	regs->syscall_nr = -1;		/* disable syscall checks */
+-	err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
+-	return err;
+-}
 -
--#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+-asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
+-				   unsigned long r4, unsigned long r5,
+-				   unsigned long r6, unsigned long r7,
+-				   struct pt_regs * regs)
+-{
+-	struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
+-	sigset_t set;
+-	long long ret;
 -
--/* Diagnostic functions */
--#ifdef DEBUG
--#  define Assert(cond,msg) {if(!(cond)) error(msg);}
--#  define Trace(x) fprintf x
--#  define Tracev(x) {if (verbose) fprintf x ;}
--#  define Tracevv(x) {if (verbose>1) fprintf x ;}
--#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
--#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
--#else
--#  define Assert(cond,msg)
--#  define Trace(x)
--#  define Tracev(x)
--#  define Tracevv(x)
--#  define Tracec(c,x)
--#  define Tracecv(c,x)
--#endif
+-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+-		goto badframe;
 -
--static int fill_inbuf(void);
--static void flush_window(void);
--static void error(char *m);
--static void gzip_mark(void **);
--static void gzip_release(void **);
+-	if (__get_user(set.sig[0], &frame->sc.oldmask)
+-	    || (_NSIG_WORDS > 1
+-		&& __copy_from_user(&set.sig[1], &frame->extramask,
+-				    sizeof(frame->extramask))))
+-		goto badframe;
 -
--extern char input_data[];
--extern int input_len;
+-	sigdelsetmask(&set, ~_BLOCKABLE);
 -
--static long bytes_out = 0;
--static uch *output_data;
--static unsigned long output_ptr = 0;
+-	spin_lock_irq(&current->sighand->siglock);
+-	current->blocked = set;
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
 -
--static void *malloc(int size);
--static void free(void *where);
--static void error(char *m);
--static void gzip_mark(void **);
--static void gzip_release(void **);
+-	if (restore_sigcontext(regs, &frame->sc, &ret))
+-		goto badframe;
+-	regs->pc -= 4;
 -
--static void puts(const char *);
+-	return (int) ret;
 -
--extern int _text;		/* Defined in vmlinux.lds.S */
--extern int _end;
--static unsigned long free_mem_ptr;
--static unsigned long free_mem_end_ptr;
+-badframe:
+-	force_sig(SIGSEGV, current);
+-	return 0;
+-}
 -
--#define HEAP_SIZE             0x10000
+-asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
+-				unsigned long r4, unsigned long r5,
+-				unsigned long r6, unsigned long r7,
+-				struct pt_regs * regs)
+-{
+-	struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
+-	sigset_t set;
+-	stack_t __user st;
+-	long long ret;
 -
--#include "../../../../lib/inflate.c"
+-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+-		goto badframe;
 -
--static void *malloc(int size)
--{
--	void *p;
+-	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+-		goto badframe;
 -
--	if (size < 0)
--		error("Malloc error\n");
--	if (free_mem_ptr == 0)
--		error("Memory error\n");
+-	sigdelsetmask(&set, ~_BLOCKABLE);
+-	spin_lock_irq(&current->sighand->siglock);
+-	current->blocked = set;
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
 -
--	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
+-	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
+-		goto badframe;
+-	regs->pc -= 4;
 -
--	p = (void *) free_mem_ptr;
--	free_mem_ptr += size;
+-	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+-		goto badframe;
+-	/* It is more difficult to avoid calling this function than to
+-	   call it and ignore errors.  */
+-	do_sigaltstack(&st, NULL, REF_REG_SP);
 -
--	if (free_mem_ptr >= free_mem_end_ptr)
--		error("\nOut of memory\n");
+-	return (int) ret;
 -
--	return p;
+-badframe:
+-	force_sig(SIGSEGV, current);
+-	return 0;
 -}
 -
--static void free(void *where)
--{				/* Don't care */
--}
+-/*
+- * Set up a signal frame.
+- */
 -
--static void gzip_mark(void **ptr)
+-static int
+-setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+-		 unsigned long mask)
 -{
--	*ptr = (void *) free_mem_ptr;
--}
+-	int err = 0;
 -
--static void gzip_release(void **ptr)
--{
--	free_mem_ptr = (long) *ptr;
--}
+-	/* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
+-	err |= setup_sigcontext_fpu(regs, sc);
 -
--void puts(const char *s)
--{
--}
+-#define COPY(x)		err |= __put_user(regs->x, &sc->sc_##x)
 -
--void *memset(void *s, int c, size_t n)
--{
--	int i;
--	char *ss = (char *) s;
+-	COPY(regs[0]);	COPY(regs[1]);	COPY(regs[2]);	COPY(regs[3]);
+-	COPY(regs[4]);	COPY(regs[5]);	COPY(regs[6]);	COPY(regs[7]);
+-	COPY(regs[8]);	COPY(regs[9]);	COPY(regs[10]);	COPY(regs[11]);
+-	COPY(regs[12]);	COPY(regs[13]);	COPY(regs[14]);	COPY(regs[15]);
+-	COPY(regs[16]);	COPY(regs[17]);	COPY(regs[18]);	COPY(regs[19]);
+-	COPY(regs[20]);	COPY(regs[21]);	COPY(regs[22]);	COPY(regs[23]);
+-	COPY(regs[24]);	COPY(regs[25]);	COPY(regs[26]);	COPY(regs[27]);
+-	COPY(regs[28]);	COPY(regs[29]);	COPY(regs[30]);	COPY(regs[31]);
+-	COPY(regs[32]);	COPY(regs[33]);	COPY(regs[34]);	COPY(regs[35]);
+-	COPY(regs[36]);	COPY(regs[37]);	COPY(regs[38]);	COPY(regs[39]);
+-	COPY(regs[40]);	COPY(regs[41]);	COPY(regs[42]);	COPY(regs[43]);
+-	COPY(regs[44]);	COPY(regs[45]);	COPY(regs[46]);	COPY(regs[47]);
+-	COPY(regs[48]);	COPY(regs[49]);	COPY(regs[50]);	COPY(regs[51]);
+-	COPY(regs[52]);	COPY(regs[53]);	COPY(regs[54]);	COPY(regs[55]);
+-	COPY(regs[56]);	COPY(regs[57]);	COPY(regs[58]);	COPY(regs[59]);
+-	COPY(regs[60]);	COPY(regs[61]);	COPY(regs[62]);
+-	COPY(tregs[0]);	COPY(tregs[1]);	COPY(tregs[2]);	COPY(tregs[3]);
+-	COPY(tregs[4]);	COPY(tregs[5]);	COPY(tregs[6]);	COPY(tregs[7]);
+-	COPY(sr);	COPY(pc);
 -
--	for (i = 0; i < n; i++)
--		ss[i] = c;
--	return s;
--}
+-#undef COPY
 -
--void *memcpy(void *__dest, __const void *__src, size_t __n)
--{
--	int i;
--	char *d = (char *) __dest, *s = (char *) __src;
+-	err |= __put_user(mask, &sc->oldmask);
 -
--	for (i = 0; i < __n; i++)
--		d[i] = s[i];
--	return __dest;
+-	return err;
 -}
 -
--/* ===========================================================================
-- * Fill the input buffer. This is called only when the buffer is empty
-- * and at least one byte is really needed.
+-/*
+- * Determine which stack to use..
 - */
--static int fill_inbuf(void)
+-static inline void __user *
+-get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
 -{
--	if (insize != 0) {
--		error("ran out of input data\n");
--	}
+-	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
+-		sp = current->sas_ss_sp + current->sas_ss_size;
 -
--	inbuf = input_data;
--	insize = input_len;
--	inptr = 1;
--	return inbuf[0];
+-	return (void __user *)((sp - frame_size) & -8ul);
 -}
 -
--/* ===========================================================================
-- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
-- * (Used for the decompressed data only.)
-- */
--static void flush_window(void)
+-void sa_default_restorer(void);		/* See comments below */
+-void sa_default_rt_restorer(void);	/* See comments below */
+-
+-static void setup_frame(int sig, struct k_sigaction *ka,
+-			sigset_t *set, struct pt_regs *regs)
 -{
--	ulg c = crc;		/* temporary variable */
--	unsigned n;
--	uch *in, *out, ch;
+-	struct sigframe __user *frame;
+-	int err = 0;
+-	int signal;
 -
--	in = window;
--	out = &output_data[output_ptr];
--	for (n = 0; n < outcnt; n++) {
--		ch = *out++ = *in++;
--		c = crc_32_tab[((int) c ^ ch) & 0xff] ^ (c >> 8);
+-	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
+-
+-	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+-		goto give_sigsegv;
+-
+-	signal = current_thread_info()->exec_domain
+-		&& current_thread_info()->exec_domain->signal_invmap
+-		&& sig < 32
+-		? current_thread_info()->exec_domain->signal_invmap[sig]
+-		: sig;
+-
+-	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+-
+-	/* Give up earlier as i386, in case */
+-	if (err)
+-		goto give_sigsegv;
+-
+-	if (_NSIG_WORDS > 1) {
+-		err |= __copy_to_user(frame->extramask, &set->sig[1],
+-				      sizeof(frame->extramask)); }
+-
+-	/* Give up earlier as i386, in case */
+-	if (err)
+-		goto give_sigsegv;
+-
+-	/* Set up to return from userspace.  If provided, use a stub
+-	   already in userspace.  */
+-	if (ka->sa.sa_flags & SA_RESTORER) {
+-		DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
+-
+-		/*
+-		 * On SH5 all edited pointers are subject to NEFF
+-		 */
+-		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
+-        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+-	} else {
+-		/*
+-		 * Different approach on SH5.
+-	         * . Endianness independent asm code gets placed in entry.S .
+-		 *   This is limited to four ASM instructions corresponding
+-		 *   to two long longs in size.
+-		 * . err checking is done on the else branch only
+-		 * . flush_icache_range() is called upon __put_user() only
+-		 * . all edited pointers are subject to NEFF
+-		 * . being code, linker turns ShMedia bit on, always
+-		 *   dereference index -1.
+-		 */
+-		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
+-		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
+-        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+-
+-		if (__copy_to_user(frame->retcode,
+-			(unsigned long long)sa_default_restorer & (~1), 16) != 0)
+-			goto give_sigsegv;
+-
+-		/* Cohere the trampoline with the I-cache. */
+-		flush_cache_sigtramp(DEREF_REG_PR-1, DEREF_REG_PR-1+16);
 -	}
--	crc = c;
--	bytes_out += (ulg) outcnt;
--	output_ptr += (ulg) outcnt;
--	outcnt = 0;
--	puts(".");
+-
+-	/*
+-	 * Set up registers for signal handler.
+-	 * All edited pointers are subject to NEFF.
+-	 */
+-	regs->regs[REG_SP] = (unsigned long) frame;
+-	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
+-        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+-	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+-
+-        /* FIXME:
+-           The glibc profiling support for SH-5 needs to be passed a sigcontext
+-           so it can retrieve the PC.  At some point during 2003 the glibc
+-           support was changed to receive the sigcontext through the 2nd
+-           argument, but there are still versions of libc.so in use that use
+-           the 3rd argument.  Until libc.so is stabilised, pass the sigcontext
+-           through both 2nd and 3rd arguments.
+-        */
+-
+-	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
+-	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
+-
+-	regs->pc = (unsigned long) ka->sa.sa_handler;
+-	regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+-
+-	set_fs(USER_DS);
+-
+-#if DEBUG_SIG
+-	/* Broken %016Lx */
+-	printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
+-		signal,
+-		current->comm, current->pid, frame,
+-		regs->pc >> 32, regs->pc & 0xffffffff,
+-		DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
+-#endif
+-
+-	return;
+-
+-give_sigsegv:
+-	force_sigsegv(sig, current);
 -}
 -
--static void error(char *x)
+-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+-			   sigset_t *set, struct pt_regs *regs)
 -{
--	puts("\n\n");
--	puts(x);
--	puts("\n\n -- System halted");
+-	struct rt_sigframe __user *frame;
+-	int err = 0;
+-	int signal;
 -
--	while (1) ;		/* Halt */
--}
+-	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
 -
--#define STACK_SIZE (4096)
--long __attribute__ ((aligned(8))) user_stack[STACK_SIZE];
--long *stack_start = &user_stack[STACK_SIZE];
+-	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+-		goto give_sigsegv;
 -
--void decompress_kernel(void)
--{
--	output_data = (uch *) (CONFIG_MEMORY_START + 0x2000);
--	free_mem_ptr = (unsigned long) &_end;
--	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+-	signal = current_thread_info()->exec_domain
+-		&& current_thread_info()->exec_domain->signal_invmap
+-		&& sig < 32
+-		? current_thread_info()->exec_domain->signal_invmap[sig]
+-		: sig;
 -
--	makecrc();
--	puts("Uncompressing Linux... ");
--	cache_control(CACHE_ENABLE);
--	gunzip();
--	puts("\n");
+-	err |= __put_user(&frame->info, &frame->pinfo);
+-	err |= __put_user(&frame->uc, &frame->puc);
+-	err |= copy_siginfo_to_user(&frame->info, info);
 -
--#if 0
--	/* When booting from ROM may want to do something like this if the
--	 * boot loader doesn't.
--	 */
+-	/* Give up earlier as i386, in case */
+-	if (err)
+-		goto give_sigsegv;
 -
--	/* Set up the parameters and command line */
--	{
--		volatile unsigned int *parambase =
--		    (int *) (CONFIG_MEMORY_START + 0x1000);
+-	/* Create the ucontext.  */
+-	err |= __put_user(0, &frame->uc.uc_flags);
+-	err |= __put_user(0, &frame->uc.uc_link);
+-	err |= __put_user((void *)current->sas_ss_sp,
+-			  &frame->uc.uc_stack.ss_sp);
+-	err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
+-			  &frame->uc.uc_stack.ss_flags);
+-	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+-	err |= setup_sigcontext(&frame->uc.uc_mcontext,
+-			        regs, set->sig[0]);
+-	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 -
--		parambase[0] = 0x1;	/* MOUNT_ROOT_RDONLY */
--		parambase[1] = 0x0;	/* RAMDISK_FLAGS */
--		parambase[2] = 0x0200;	/* ORIG_ROOT_DEV */
--		parambase[3] = 0x0;	/* LOADER_TYPE */
--		parambase[4] = 0x0;	/* INITRD_START */
--		parambase[5] = 0x0;	/* INITRD_SIZE */
--		parambase[6] = 0;
+-	/* Give up earlier as i386, in case */
+-	if (err)
+-		goto give_sigsegv;
 -
--		strcpy((char *) ((int) parambase + 0x100),
--		       "console=ttySC0,38400");
+-	/* Set up to return from userspace.  If provided, use a stub
+-	   already in userspace.  */
+-	if (ka->sa.sa_flags & SA_RESTORER) {
+-		DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
+-
+-		/*
+-		 * On SH5 all edited pointers are subject to NEFF
+-		 */
+-		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
+-        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+-	} else {
+-		/*
+-		 * Different approach on SH5.
+-	         * . Endianness independent asm code gets placed in entry.S .
+-		 *   This is limited to four ASM instructions corresponding
+-		 *   to two long longs in size.
+-		 * . err checking is done on the else branch only
+-		 * . flush_icache_range() is called upon __put_user() only
+-		 * . all edited pointers are subject to NEFF
+-		 * . being code, linker turns ShMedia bit on, always
+-		 *   dereference index -1.
+-		 */
+-
+-		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
+-		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
+-        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+-
+-		if (__copy_to_user(frame->retcode,
+-			(unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
+-			goto give_sigsegv;
+-
+-		flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
 -	}
+-
+-	/*
+-	 * Set up registers for signal handler.
+-	 * All edited pointers are subject to NEFF.
+-	 */
+-	regs->regs[REG_SP] = (unsigned long) frame;
+-	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
+-        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+-	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+-	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
+-	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
+-	regs->pc = (unsigned long) ka->sa.sa_handler;
+-	regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+-
+-	set_fs(USER_DS);
+-
+-#if DEBUG_SIG
+-	/* Broken %016Lx */
+-	printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
+-		signal,
+-		current->comm, current->pid, frame,
+-		regs->pc >> 32, regs->pc & 0xffffffff,
+-		DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
 -#endif
 -
--	puts("Ok, booting the kernel.\n");
+-	return;
 -
--	cache_control(CACHE_DISABLE);
+-give_sigsegv:
+-	force_sigsegv(sig, current);
 -}
-diff --git a/arch/sh64/boot/compressed/vmlinux.lds.S b/arch/sh64/boot/compressed/vmlinux.lds.S
-deleted file mode 100644
-index 59c2ef4..0000000
---- a/arch/sh64/boot/compressed/vmlinux.lds.S
-+++ /dev/null
-@@ -1,64 +0,0 @@
+-
+-/*
+- * OK, we're invoking a handler
+- */
+-
+-static void
+-handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+-		sigset_t *oldset, struct pt_regs * regs)
+-{
+-	/* Are we from a system call? */
+-	if (regs->syscall_nr >= 0) {
+-		/* If so, check system call restarting.. */
+-		switch (regs->regs[REG_RET]) {
+-			case -ERESTART_RESTARTBLOCK:
+-			case -ERESTARTNOHAND:
+-				regs->regs[REG_RET] = -EINTR;
+-				break;
+-
+-			case -ERESTARTSYS:
+-				if (!(ka->sa.sa_flags & SA_RESTART)) {
+-					regs->regs[REG_RET] = -EINTR;
+-					break;
+-				}
+-			/* fallthrough */
+-			case -ERESTARTNOINTR:
+-				/* Decode syscall # */
+-				regs->regs[REG_RET] = regs->syscall_nr;
+-				regs->pc -= 4;
+-		}
+-	}
+-
+-	/* Set up the stack frame */
+-	if (ka->sa.sa_flags & SA_SIGINFO)
+-		setup_rt_frame(sig, ka, info, oldset, regs);
+-	else
+-		setup_frame(sig, ka, oldset, regs);
+-
+-	spin_lock_irq(&current->sighand->siglock);
+-	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+-	if (!(ka->sa.sa_flags & SA_NODEFER))
+-		sigaddset(&current->blocked,sig);
+-	recalc_sigpending();
+-	spin_unlock_irq(&current->sighand->siglock);
+-}
+-
 -/*
-- * ld script to make compressed SuperH/shmedia Linux kernel+decompression
-- *		bootstrap
-- * Modified by Stuart Menefy from arch/sh/vmlinux.lds.S written by Niibe Yutaka
+- * Note that 'init' is a special process: it doesn't get signals it doesn't
+- * want to handle. Thus you cannot kill init even with a SIGKILL even by
+- * mistake.
+- *
+- * Note that we go through the signals twice: once to check the signals that
+- * the kernel can handle, and then we build all the user-level signal handling
+- * stack-frames in one go after that.
 - */
+-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+-{
+-	siginfo_t info;
+-	int signr;
+-	struct k_sigaction ka;
 -
+-	/*
+-	 * We want the common case to go fast, which
+-	 * is why we may in certain cases get here from
+-	 * kernel mode. Just return without doing anything
+-	 * if so.
+-	 */
+-	if (!user_mode(regs))
+-		return 1;
 -
--#ifdef CONFIG_LITTLE_ENDIAN
--/* OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux") */
--#define NOP 0x6ff0fff0
--#else
--/* OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64") */
--#define NOP 0xf0fff06f
--#endif
--
--OUTPUT_FORMAT("elf32-sh64-linux")
--OUTPUT_ARCH(sh)
--ENTRY(_start)
+-	if (try_to_freeze())
+-		goto no_signal;
 -
--#define ALIGNED_GAP(section, align) (((ADDR(section)+SIZEOF(section)+(align)-1) & ~((align)-1))-ADDR(section))
--#define FOLLOWING(section, align) AT (LOADADDR(section) + ALIGNED_GAP(section,align))
+-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+-		oldset = &current->saved_sigmask;
+-	else if (!oldset)
+-		oldset = &current->blocked;
 -
--SECTIONS
--{
--  _text = .;			/* Text and read-only data */
+-	signr = get_signal_to_deliver(&info, &ka, regs, 0);
 -
--  .text : {
--	*(.text)
--	*(.text64)
--	*(.text..SHmedia32)
--	*(.fixup)
--	*(.gnu.warning)
--	} = NOP
--  . = ALIGN(4);
--  .rodata : { *(.rodata) }
+-	if (signr > 0) {
+-		/* Whee!  Actually deliver the signal.  */
+-		handle_signal(signr, &info, &ka, oldset, regs);
 -
--  /* There is no 'real' reason for eight byte alignment, four would work
--   * as well, but gdb downloads much (*4) faster with this.
--   */
--  . = ALIGN(8);
--  .image : { *(.image) }
--  . = ALIGN(4);
--  _etext = .;			/* End of text section */
+-		/*
+-		 * If a signal was successfully delivered, the saved sigmask
+-		 * is in its frame, and we can clear the TIF_RESTORE_SIGMASK
+-		 * flag.
+-		 */
+-		if (test_thread_flag(TIF_RESTORE_SIGMASK))
+-			clear_thread_flag(TIF_RESTORE_SIGMASK);
 -
--  .data :			/* Data */
--	FOLLOWING(.image, 4)
--	{
--	_data = .;
--	*(.data)
+-		return 1;
 -	}
--  _data_image = LOADADDR(.data);/* Address of data section in ROM */
 -
--  _edata = .;			/* End of data section */
+-no_signal:
+-	/* Did we come from a system call? */
+-	if (regs->syscall_nr >= 0) {
+-		/* Restart the system call - no handlers present */
+-		switch (regs->regs[REG_RET]) {
+-		case -ERESTARTNOHAND:
+-		case -ERESTARTSYS:
+-		case -ERESTARTNOINTR:
+-			/* Decode Syscall # */
+-			regs->regs[REG_RET] = regs->syscall_nr;
+-			regs->pc -= 4;
+-			break;
 -
--  .stack : { stack = .;  _stack = .; }
+-		case -ERESTART_RESTARTBLOCK:
+-			regs->regs[REG_RET] = __NR_restart_syscall;
+-			regs->pc -= 4;
+-			break;
+-		}
+-	}
 -
--  . = ALIGN(4);
--  __bss_start = .;		/* BSS */
--  .bss : {
--	*(.bss)
+-	/* No signal to deliver -- put the saved sigmask back */
+-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+-		clear_thread_flag(TIF_RESTORE_SIGMASK);
+-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
 -	}
--  . = ALIGN(4);
--  _end = . ;
+-
+-	return 0;
 -}
-diff --git a/arch/sh64/configs/cayman_defconfig b/arch/sh64/configs/cayman_defconfig
+diff --git a/arch/sh64/kernel/switchto.S b/arch/sh64/kernel/switchto.S
 deleted file mode 100644
-index 75552bb..0000000
---- a/arch/sh64/configs/cayman_defconfig
+index 45b2d90..0000000
+--- a/arch/sh64/kernel/switchto.S
 +++ /dev/null
-@@ -1,1126 +0,0 @@
--#
--# Automatically generated make config: don't edit
--# Linux kernel version: 2.6.24-rc1
--# Fri Nov  2 14:35:27 2007
--#
--CONFIG_SUPERH=y
--CONFIG_SUPERH64=y
--CONFIG_MMU=y
--CONFIG_QUICKLIST=y
--CONFIG_RWSEM_GENERIC_SPINLOCK=y
--CONFIG_GENERIC_FIND_NEXT_BIT=y
--CONFIG_GENERIC_HWEIGHT=y
--CONFIG_GENERIC_CALIBRATE_DELAY=y
--CONFIG_GENERIC_HARDIRQS=y
--CONFIG_GENERIC_IRQ_PROBE=y
--# CONFIG_ARCH_HAS_ILOG2_U32 is not set
--# CONFIG_ARCH_HAS_ILOG2_U64 is not set
--CONFIG_ARCH_NO_VIRT_TO_BUS=y
--CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
--
--#
--# General setup
--#
--CONFIG_EXPERIMENTAL=y
--CONFIG_BROKEN_ON_SMP=y
--CONFIG_LOCK_KERNEL=y
--CONFIG_INIT_ENV_ARG_LIMIT=32
--CONFIG_LOCALVERSION=""
--CONFIG_LOCALVERSION_AUTO=y
--CONFIG_SWAP=y
--# CONFIG_SYSVIPC is not set
--CONFIG_POSIX_MQUEUE=y
--# CONFIG_BSD_PROCESS_ACCT is not set
--# CONFIG_TASKSTATS is not set
--# CONFIG_USER_NS is not set
--# CONFIG_AUDIT is not set
--# CONFIG_IKCONFIG is not set
--CONFIG_LOG_BUF_SHIFT=14
--# CONFIG_CGROUPS is not set
--CONFIG_FAIR_GROUP_SCHED=y
--CONFIG_FAIR_USER_SCHED=y
--# CONFIG_FAIR_CGROUP_SCHED is not set
--CONFIG_SYSFS_DEPRECATED=y
--# CONFIG_RELAY is not set
--# CONFIG_BLK_DEV_INITRD is not set
--# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
--CONFIG_SYSCTL=y
--# CONFIG_EMBEDDED is not set
--CONFIG_UID16=y
--CONFIG_SYSCTL_SYSCALL=y
--CONFIG_KALLSYMS=y
--# CONFIG_KALLSYMS_ALL is not set
--# CONFIG_KALLSYMS_EXTRA_PASS is not set
--CONFIG_HOTPLUG=y
--CONFIG_PRINTK=y
--CONFIG_BUG=y
--CONFIG_ELF_CORE=y
--CONFIG_BASE_FULL=y
--CONFIG_FUTEX=y
--CONFIG_ANON_INODES=y
--CONFIG_EPOLL=y
--CONFIG_SIGNALFD=y
--CONFIG_EVENTFD=y
--CONFIG_SHMEM=y
--CONFIG_VM_EVENT_COUNTERS=y
--CONFIG_SLAB=y
--# CONFIG_SLUB is not set
--# CONFIG_SLOB is not set
--CONFIG_RT_MUTEXES=y
--# CONFIG_TINY_SHMEM is not set
--CONFIG_BASE_SMALL=0
--CONFIG_MODULES=y
--CONFIG_MODULE_UNLOAD=y
--# CONFIG_MODULE_FORCE_UNLOAD is not set
--# CONFIG_MODVERSIONS is not set
--# CONFIG_MODULE_SRCVERSION_ALL is not set
--CONFIG_KMOD=y
--CONFIG_BLOCK=y
--# CONFIG_LBD is not set
--# CONFIG_BLK_DEV_IO_TRACE is not set
--# CONFIG_LSF is not set
--# CONFIG_BLK_DEV_BSG is not set
--
--#
--# IO Schedulers
--#
--CONFIG_IOSCHED_NOOP=y
--CONFIG_IOSCHED_AS=y
--CONFIG_IOSCHED_DEADLINE=y
--CONFIG_IOSCHED_CFQ=y
--# CONFIG_DEFAULT_AS is not set
--# CONFIG_DEFAULT_DEADLINE is not set
--CONFIG_DEFAULT_CFQ=y
--# CONFIG_DEFAULT_NOOP is not set
--CONFIG_DEFAULT_IOSCHED="cfq"
--
--#
--# System type
--#
--# CONFIG_SH_SIMULATOR is not set
--CONFIG_SH_CAYMAN=y
--# CONFIG_SH_HARP is not set
--CONFIG_CPU_SH5=y
--CONFIG_CPU_SUBTYPE_SH5_101=y
--# CONFIG_CPU_SUBTYPE_SH5_103 is not set
--CONFIG_LITTLE_ENDIAN=y
--# CONFIG_BIG_ENDIAN is not set
--CONFIG_SH_FPU=y
--# CONFIG_SH64_FPU_DENORM_FLUSH is not set
--CONFIG_SH64_PGTABLE_2_LEVEL=y
--# CONFIG_SH64_PGTABLE_3_LEVEL is not set
--CONFIG_HUGETLB_PAGE_SIZE_64K=y
--# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
--# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
--CONFIG_SH64_USER_MISALIGNED_FIXUP=y
--
--#
--# Memory options
--#
--CONFIG_CACHED_MEMORY_OFFSET=0x20000000
--CONFIG_MEMORY_START=0x80000000
--CONFIG_MEMORY_SIZE_IN_MB=128
--
--#
--# Cache options
--#
--CONFIG_DCACHE_WRITE_BACK=y
--# CONFIG_DCACHE_WRITE_THROUGH is not set
--# CONFIG_DCACHE_DISABLED is not set
--# CONFIG_ICACHE_DISABLED is not set
--CONFIG_PCIDEVICE_MEMORY_START=C0000000
--CONFIG_DEVICE_MEMORY_START=E0000000
--CONFIG_FLASH_MEMORY_START=0x00000000
--CONFIG_PCI_BLOCK_START=0x40000000
--
--#
--# CPU Subtype specific options
--#
--CONFIG_SH64_ID2815_WORKAROUND=y
+@@ -1,198 +0,0 @@
+-/*
+- * arch/sh64/kernel/switchto.S
+- *
+- * sh64 context switch
+- *
+- * Copyright (C) 2004  Richard Curnow
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+-*/
 -
--#
--# Misc options
--#
--CONFIG_HEARTBEAT=y
--CONFIG_HDSP253_LED=y
--# CONFIG_SH_DMA is not set
--CONFIG_PREEMPT=y
--CONFIG_SELECT_MEMORY_MODEL=y
--CONFIG_FLATMEM_MANUAL=y
--# CONFIG_DISCONTIGMEM_MANUAL is not set
--# CONFIG_SPARSEMEM_MANUAL is not set
--CONFIG_FLATMEM=y
--CONFIG_FLAT_NODE_MEM_MAP=y
--# CONFIG_SPARSEMEM_STATIC is not set
--# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
--CONFIG_SPLIT_PTLOCK_CPUS=4
--# CONFIG_RESOURCES_64BIT is not set
--CONFIG_ZONE_DMA_FLAG=0
--CONFIG_NR_QUICK=1
+-	.section .text..SHmedia32,"ax"
+-	.little
 -
--#
--# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
--#
--CONFIG_PCI=y
--CONFIG_SH_PCIDMA_NONCOHERENT=y
--# CONFIG_ARCH_SUPPORTS_MSI is not set
--# CONFIG_PCI_DEBUG is not set
--# CONFIG_PCCARD is not set
--# CONFIG_HOTPLUG_PCI is not set
+-	.balign 32
 -
--#
--# Executable file formats
--#
--CONFIG_BINFMT_ELF=y
--# CONFIG_BINFMT_MISC is not set
+-	.type sh64_switch_to, at function
+-	.global sh64_switch_to
+-	.global __sh64_switch_to_end
+-sh64_switch_to:
 -
--#
--# Networking
--#
--CONFIG_NET=y
+-/* Incoming args
+-   r2 - prev
+-   r3 - &prev->thread
+-   r4 - next
+-   r5 - &next->thread
 -
--#
--# Networking options
--#
--CONFIG_PACKET=y
--# CONFIG_PACKET_MMAP is not set
--CONFIG_UNIX=y
--CONFIG_XFRM=y
--# CONFIG_XFRM_USER is not set
--# CONFIG_XFRM_SUB_POLICY is not set
--# CONFIG_XFRM_MIGRATE is not set
--# CONFIG_NET_KEY is not set
--CONFIG_INET=y
--# CONFIG_IP_MULTICAST is not set
--# CONFIG_IP_ADVANCED_ROUTER is not set
--CONFIG_IP_FIB_HASH=y
--CONFIG_IP_PNP=y
--# CONFIG_IP_PNP_DHCP is not set
--# CONFIG_IP_PNP_BOOTP is not set
--# CONFIG_IP_PNP_RARP is not set
--# CONFIG_NET_IPIP is not set
--# CONFIG_NET_IPGRE is not set
--# CONFIG_ARPD is not set
--# CONFIG_SYN_COOKIES is not set
--# CONFIG_INET_AH is not set
--# CONFIG_INET_ESP is not set
--# CONFIG_INET_IPCOMP is not set
--# CONFIG_INET_XFRM_TUNNEL is not set
--# CONFIG_INET_TUNNEL is not set
--CONFIG_INET_XFRM_MODE_TRANSPORT=y
--CONFIG_INET_XFRM_MODE_TUNNEL=y
--CONFIG_INET_XFRM_MODE_BEET=y
--# CONFIG_INET_LRO is not set
--CONFIG_INET_DIAG=y
--CONFIG_INET_TCP_DIAG=y
--# CONFIG_TCP_CONG_ADVANCED is not set
--CONFIG_TCP_CONG_CUBIC=y
--CONFIG_DEFAULT_TCP_CONG="cubic"
--# CONFIG_TCP_MD5SIG is not set
--# CONFIG_IPV6 is not set
--# CONFIG_INET6_XFRM_TUNNEL is not set
--# CONFIG_INET6_TUNNEL is not set
--# CONFIG_NETWORK_SECMARK is not set
--# CONFIG_NETFILTER is not set
--# CONFIG_IP_DCCP is not set
--# CONFIG_IP_SCTP is not set
--# CONFIG_TIPC is not set
--# CONFIG_ATM is not set
--# CONFIG_BRIDGE is not set
--# CONFIG_VLAN_8021Q is not set
--# CONFIG_DECNET is not set
--# CONFIG_LLC2 is not set
--# CONFIG_IPX is not set
--# CONFIG_ATALK is not set
--# CONFIG_X25 is not set
--# CONFIG_LAPB is not set
--# CONFIG_ECONET is not set
--# CONFIG_WAN_ROUTER is not set
--# CONFIG_NET_SCHED is not set
+-   Outgoing results
+-   r2 - last (=prev) : this just stays in r2 throughout
 -
--#
--# Network testing
--#
--# CONFIG_NET_PKTGEN is not set
--# CONFIG_HAMRADIO is not set
--# CONFIG_IRDA is not set
--# CONFIG_BT is not set
--# CONFIG_AF_RXRPC is not set
+-   Want to create a full (struct pt_regs) on the stack to allow backtracing
+-   functions to work.  However, we only need to populate the callee-save
+-   register slots in this structure; since we're a function our ancestors must
+-   have themselves preserved all caller saved state in the stack.  This saves
+-   some wasted effort since we won't need to look at the values.
 -
--#
--# Wireless
--#
--# CONFIG_CFG80211 is not set
--# CONFIG_WIRELESS_EXT is not set
--# CONFIG_MAC80211 is not set
--# CONFIG_IEEE80211 is not set
--# CONFIG_RFKILL is not set
--# CONFIG_NET_9P is not set
+-   In particular, all caller-save registers are immediately available for
+-   scratch use.
 -
--#
--# Device Drivers
--#
+-*/
 -
--#
--# Generic Driver Options
--#
--CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
--CONFIG_STANDALONE=y
--CONFIG_PREVENT_FIRMWARE_BUILD=y
--# CONFIG_FW_LOADER is not set
--# CONFIG_DEBUG_DRIVER is not set
--# CONFIG_DEBUG_DEVRES is not set
--# CONFIG_SYS_HYPERVISOR is not set
--# CONFIG_CONNECTOR is not set
--# CONFIG_MTD is not set
--# CONFIG_PARPORT is not set
--CONFIG_BLK_DEV=y
--# CONFIG_BLK_CPQ_CISS_DA is not set
--# CONFIG_BLK_DEV_DAC960 is not set
--# CONFIG_BLK_DEV_UMEM is not set
--# CONFIG_BLK_DEV_COW_COMMON is not set
--CONFIG_BLK_DEV_LOOP=y
--# CONFIG_BLK_DEV_CRYPTOLOOP is not set
--# CONFIG_BLK_DEV_NBD is not set
--# CONFIG_BLK_DEV_SX8 is not set
--CONFIG_BLK_DEV_RAM=y
--CONFIG_BLK_DEV_RAM_COUNT=16
--CONFIG_BLK_DEV_RAM_SIZE=4096
--CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
--# CONFIG_CDROM_PKTCDVD is not set
--# CONFIG_ATA_OVER_ETH is not set
--CONFIG_MISC_DEVICES=y
--# CONFIG_PHANTOM is not set
--# CONFIG_EEPROM_93CX6 is not set
--# CONFIG_SGI_IOC4 is not set
--# CONFIG_TIFM_CORE is not set
--# CONFIG_IDE is not set
+-#define FRAME_SIZE (76*8 + 8)
 -
--#
--# SCSI device support
--#
--# CONFIG_RAID_ATTRS is not set
--CONFIG_SCSI=y
--CONFIG_SCSI_DMA=y
--# CONFIG_SCSI_TGT is not set
--# CONFIG_SCSI_NETLINK is not set
--CONFIG_SCSI_PROC_FS=y
+-	movi	FRAME_SIZE, r0
+-	sub.l	r15, r0, r15
+-	! Do normal-style register save to support backtrace
 -
--#
--# SCSI support type (disk, tape, CD-ROM)
--#
--CONFIG_BLK_DEV_SD=y
--# CONFIG_CHR_DEV_ST is not set
--# CONFIG_CHR_DEV_OSST is not set
--# CONFIG_BLK_DEV_SR is not set
--# CONFIG_CHR_DEV_SG is not set
--# CONFIG_CHR_DEV_SCH is not set
+-	st.l	r15,   0, r18	! save link reg
+-	st.l	r15,   4, r14	! save fp
+-	add.l	r15, r63, r14	! setup frame pointer
 -
--#
--# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
--#
--CONFIG_SCSI_MULTI_LUN=y
--# CONFIG_SCSI_CONSTANTS is not set
--# CONFIG_SCSI_LOGGING is not set
--# CONFIG_SCSI_SCAN_ASYNC is not set
--CONFIG_SCSI_WAIT_SCAN=m
+-	! hopefully this looks normal to the backtrace now.
 -
--#
--# SCSI Transports
--#
--CONFIG_SCSI_SPI_ATTRS=y
--# CONFIG_SCSI_FC_ATTRS is not set
--# CONFIG_SCSI_ISCSI_ATTRS is not set
--# CONFIG_SCSI_SAS_LIBSAS is not set
--# CONFIG_SCSI_SRP_ATTRS is not set
--CONFIG_SCSI_LOWLEVEL=y
--# CONFIG_ISCSI_TCP is not set
--# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
--# CONFIG_SCSI_3W_9XXX is not set
--# CONFIG_SCSI_ACARD is not set
--# CONFIG_SCSI_AACRAID is not set
--# CONFIG_SCSI_AIC7XXX is not set
--# CONFIG_SCSI_AIC7XXX_OLD is not set
--# CONFIG_SCSI_AIC79XX is not set
--# CONFIG_SCSI_AIC94XX is not set
--# CONFIG_SCSI_ARCMSR is not set
--# CONFIG_MEGARAID_NEWGEN is not set
--# CONFIG_MEGARAID_LEGACY is not set
--# CONFIG_MEGARAID_SAS is not set
--# CONFIG_SCSI_HPTIOP is not set
--# CONFIG_SCSI_DMX3191D is not set
--# CONFIG_SCSI_FUTURE_DOMAIN is not set
--# CONFIG_SCSI_IPS is not set
--# CONFIG_SCSI_INITIO is not set
--# CONFIG_SCSI_INIA100 is not set
--# CONFIG_SCSI_STEX is not set
--CONFIG_SCSI_SYM53C8XX_2=y
--CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
--CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
--CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
--CONFIG_SCSI_SYM53C8XX_MMIO=y
--# CONFIG_SCSI_QLOGIC_1280 is not set
--# CONFIG_SCSI_QLA_FC is not set
--# CONFIG_SCSI_QLA_ISCSI is not set
--# CONFIG_SCSI_LPFC is not set
--# CONFIG_SCSI_DC395x is not set
--# CONFIG_SCSI_DC390T is not set
--# CONFIG_SCSI_NSP32 is not set
--# CONFIG_SCSI_DEBUG is not set
--# CONFIG_SCSI_SRP is not set
--# CONFIG_ATA is not set
--# CONFIG_MD is not set
--# CONFIG_FUSION is not set
+-	addi.l	r15,   8, r1    ! base of pt_regs
+-	addi.l	r1,   24, r0    ! base of pt_regs.regs
+-	addi.l	r0, (63*8), r8	! base of pt_regs.trregs
 -
--#
--# IEEE 1394 (FireWire) support
--#
--# CONFIG_FIREWIRE is not set
--# CONFIG_IEEE1394 is not set
--# CONFIG_I2O is not set
--CONFIG_NETDEVICES=y
--# CONFIG_NETDEVICES_MULTIQUEUE is not set
--# CONFIG_DUMMY is not set
--# CONFIG_BONDING is not set
--# CONFIG_MACVLAN is not set
--# CONFIG_EQUALIZER is not set
--# CONFIG_TUN is not set
--# CONFIG_VETH is not set
--# CONFIG_IP1000 is not set
--# CONFIG_ARCNET is not set
--# CONFIG_PHYLIB is not set
--CONFIG_NET_ETHERNET=y
--# CONFIG_MII is not set
--# CONFIG_STNIC is not set
--# CONFIG_HAPPYMEAL is not set
--# CONFIG_SUNGEM is not set
--# CONFIG_CASSINI is not set
--# CONFIG_NET_VENDOR_3COM is not set
--# CONFIG_SMC91X is not set
--# CONFIG_SMC911X is not set
--CONFIG_NET_TULIP=y
--# CONFIG_DE2104X is not set
--CONFIG_TULIP=y
--# CONFIG_TULIP_MWI is not set
--# CONFIG_TULIP_MMIO is not set
--# CONFIG_TULIP_NAPI is not set
--# CONFIG_DE4X5 is not set
--# CONFIG_WINBOND_840 is not set
--# CONFIG_DM9102 is not set
--# CONFIG_ULI526X is not set
--# CONFIG_HP100 is not set
--# CONFIG_IBM_NEW_EMAC_ZMII is not set
--# CONFIG_IBM_NEW_EMAC_RGMII is not set
--# CONFIG_IBM_NEW_EMAC_TAH is not set
--# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
--CONFIG_NET_PCI=y
--# CONFIG_PCNET32 is not set
--# CONFIG_AMD8111_ETH is not set
--# CONFIG_ADAPTEC_STARFIRE is not set
--# CONFIG_B44 is not set
--# CONFIG_FORCEDETH is not set
--# CONFIG_EEPRO100 is not set
--# CONFIG_E100 is not set
--# CONFIG_FEALNX is not set
--# CONFIG_NATSEMI is not set
--# CONFIG_NE2K_PCI is not set
--# CONFIG_8139CP is not set
--# CONFIG_8139TOO is not set
--# CONFIG_SIS900 is not set
--# CONFIG_EPIC100 is not set
--# CONFIG_SUNDANCE is not set
--# CONFIG_TLAN is not set
--# CONFIG_VIA_RHINE is not set
--# CONFIG_SC92031 is not set
--CONFIG_NETDEV_1000=y
--# CONFIG_ACENIC is not set
--# CONFIG_DL2K is not set
--# CONFIG_E1000 is not set
--# CONFIG_E1000E is not set
--# CONFIG_NS83820 is not set
--# CONFIG_HAMACHI is not set
--# CONFIG_YELLOWFIN is not set
--# CONFIG_R8169 is not set
--# CONFIG_SIS190 is not set
--# CONFIG_SKGE is not set
--# CONFIG_SKY2 is not set
--# CONFIG_SK98LIN is not set
--# CONFIG_VIA_VELOCITY is not set
--# CONFIG_TIGON3 is not set
--# CONFIG_BNX2 is not set
--# CONFIG_QLA3XXX is not set
--# CONFIG_ATL1 is not set
--CONFIG_NETDEV_10000=y
--# CONFIG_CHELSIO_T1 is not set
--# CONFIG_CHELSIO_T3 is not set
--# CONFIG_IXGBE is not set
--# CONFIG_IXGB is not set
--# CONFIG_S2IO is not set
--# CONFIG_MYRI10GE is not set
--# CONFIG_NETXEN_NIC is not set
--# CONFIG_NIU is not set
--# CONFIG_MLX4_CORE is not set
--# CONFIG_TEHUTI is not set
--# CONFIG_TR is not set
+-	/* Note : to be fixed?
+-	   struct pt_regs is really designed for holding the state on entry
+-	   to an exception, i.e. pc,sr,regs etc.  However, for the context
+-	   switch state, some of this is not required.  But the unwinder takes
+-	   struct pt_regs * as an arg so we have to build this structure
+-	   to allow unwinding switched tasks in show_state() */
 -
--#
--# Wireless LAN
--#
--# CONFIG_WLAN_PRE80211 is not set
--# CONFIG_WLAN_80211 is not set
--# CONFIG_WAN is not set
--# CONFIG_FDDI is not set
--# CONFIG_HIPPI is not set
--# CONFIG_PPP is not set
--# CONFIG_SLIP is not set
--# CONFIG_NET_FC is not set
--# CONFIG_SHAPER is not set
--# CONFIG_NETCONSOLE is not set
--# CONFIG_NETPOLL is not set
--# CONFIG_NET_POLL_CONTROLLER is not set
--# CONFIG_ISDN is not set
--# CONFIG_PHONE is not set
+-	st.q	r0, ( 9*8), r9
+-	st.q	r0, (10*8), r10
+-	st.q	r0, (11*8), r11
+-	st.q	r0, (12*8), r12
+-	st.q	r0, (13*8), r13
+-	st.q	r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
+-	! the point where the process is left in suspended animation, i.e. current
+-	! fp here, not the saved one.
+-	st.q	r0, (16*8), r16
 -
--#
--# Input device support
--#
--CONFIG_INPUT=y
--# CONFIG_INPUT_FF_MEMLESS is not set
--# CONFIG_INPUT_POLLDEV is not set
+-	st.q	r0, (24*8), r24
+-	st.q	r0, (25*8), r25
+-	st.q	r0, (26*8), r26
+-	st.q	r0, (27*8), r27
+-	st.q	r0, (28*8), r28
+-	st.q	r0, (29*8), r29
+-	st.q	r0, (30*8), r30
+-	st.q	r0, (31*8), r31
+-	st.q	r0, (32*8), r32
+-	st.q	r0, (33*8), r33
+-	st.q	r0, (34*8), r34
+-	st.q	r0, (35*8), r35
 -
--#
--# Userland interfaces
--#
--CONFIG_INPUT_MOUSEDEV=y
--CONFIG_INPUT_MOUSEDEV_PSAUX=y
--CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
--CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
--# CONFIG_INPUT_JOYDEV is not set
--# CONFIG_INPUT_EVDEV is not set
--# CONFIG_INPUT_EVBUG is not set
+-	st.q	r0, (44*8), r44
+-	st.q	r0, (45*8), r45
+-	st.q	r0, (46*8), r46
+-	st.q	r0, (47*8), r47
+-	st.q	r0, (48*8), r48
+-	st.q	r0, (49*8), r49
+-	st.q	r0, (50*8), r50
+-	st.q	r0, (51*8), r51
+-	st.q	r0, (52*8), r52
+-	st.q	r0, (53*8), r53
+-	st.q	r0, (54*8), r54
+-	st.q	r0, (55*8), r55
+-	st.q	r0, (56*8), r56
+-	st.q	r0, (57*8), r57
+-	st.q	r0, (58*8), r58
+-	st.q	r0, (59*8), r59
 -
--#
--# Input Device Drivers
--#
--CONFIG_INPUT_KEYBOARD=y
--CONFIG_KEYBOARD_ATKBD=y
--# CONFIG_KEYBOARD_SUNKBD is not set
--# CONFIG_KEYBOARD_LKKBD is not set
--# CONFIG_KEYBOARD_XTKBD is not set
--# CONFIG_KEYBOARD_NEWTON is not set
--# CONFIG_KEYBOARD_STOWAWAY is not set
--CONFIG_INPUT_MOUSE=y
--CONFIG_MOUSE_PS2=y
--CONFIG_MOUSE_PS2_ALPS=y
--CONFIG_MOUSE_PS2_LOGIPS2PP=y
--CONFIG_MOUSE_PS2_SYNAPTICS=y
--CONFIG_MOUSE_PS2_LIFEBOOK=y
--CONFIG_MOUSE_PS2_TRACKPOINT=y
--# CONFIG_MOUSE_PS2_TOUCHKIT is not set
--# CONFIG_MOUSE_SERIAL is not set
--# CONFIG_MOUSE_APPLETOUCH is not set
--# CONFIG_MOUSE_VSXXXAA is not set
--# CONFIG_INPUT_JOYSTICK is not set
--# CONFIG_INPUT_TABLET is not set
--# CONFIG_INPUT_TOUCHSCREEN is not set
--# CONFIG_INPUT_MISC is not set
+-	! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
+-	! Use a local label to avoid creating a symbol that will confuse the !
+-	! backtrace
+-	pta	.Lsave_pc, tr0
 -
--#
--# Hardware I/O ports
--#
--CONFIG_SERIO=y
--CONFIG_SERIO_I8042=y
--CONFIG_SERIO_SERPORT=y
--# CONFIG_SERIO_PCIPS2 is not set
--CONFIG_SERIO_LIBPS2=y
--# CONFIG_SERIO_RAW is not set
--# CONFIG_GAMEPORT is not set
+-	gettr	tr5, r45
+-	gettr	tr6, r46
+-	gettr	tr7, r47
+-	st.q	r8, (5*8), r45
+-	st.q	r8, (6*8), r46
+-	st.q	r8, (7*8), r47
 -
--#
--# Character devices
--#
--CONFIG_VT=y
--CONFIG_VT_CONSOLE=y
--CONFIG_HW_CONSOLE=y
--# CONFIG_VT_HW_CONSOLE_BINDING is not set
--# CONFIG_SERIAL_NONSTANDARD is not set
+-	! Now switch context
+-	gettr	tr0, r9
+-	st.l	r3, 0, r15	! prev->thread.sp
+-	st.l	r3, 8, r1	! prev->thread.kregs
+-	st.l	r3, 4, r9	! prev->thread.pc
+-	st.q	r1, 0, r9	! save prev->thread.pc into pt_regs->pc
 -
--#
--# Serial drivers
--#
--# CONFIG_SERIAL_8250 is not set
+-	! Load PC for next task (init value or save_pc later)
+-	ld.l	r5, 4, r18	! next->thread.pc
+-	! Switch stacks
+-	ld.l	r5, 0, r15	! next->thread.sp
+-	ptabs	r18, tr0
 -
--#
--# Non-8250 serial port support
--#
--CONFIG_SERIAL_SH_SCI=y
--CONFIG_SERIAL_SH_SCI_NR_UARTS=2
--CONFIG_SERIAL_SH_SCI_CONSOLE=y
--CONFIG_SERIAL_CORE=y
--CONFIG_SERIAL_CORE_CONSOLE=y
--# CONFIG_SERIAL_JSM is not set
--CONFIG_UNIX98_PTYS=y
--CONFIG_LEGACY_PTYS=y
--CONFIG_LEGACY_PTY_COUNT=256
--# CONFIG_IPMI_HANDLER is not set
--CONFIG_HW_RANDOM=y
--# CONFIG_R3964 is not set
--# CONFIG_APPLICOM is not set
--# CONFIG_RAW_DRIVER is not set
--# CONFIG_TCG_TPM is not set
--CONFIG_DEVPORT=y
--CONFIG_I2C=m
--CONFIG_I2C_BOARDINFO=y
--# CONFIG_I2C_CHARDEV is not set
+-	! Update current
+-	ld.l	r4, 4, r9	! next->thread_info (2nd element of next task_struct)
+-	putcon	r9, kcr0	! current = next->thread_info
 -
--#
--# I2C Algorithms
--#
--# CONFIG_I2C_ALGOBIT is not set
--# CONFIG_I2C_ALGOPCF is not set
--# CONFIG_I2C_ALGOPCA is not set
+-	! go to save_pc for a reschedule, or the initial thread.pc for a new process
+-	blink	tr0, r63
 -
--#
--# I2C Hardware Bus support
--#
--# CONFIG_I2C_ALI1535 is not set
--# CONFIG_I2C_ALI1563 is not set
--# CONFIG_I2C_ALI15X3 is not set
--# CONFIG_I2C_AMD756 is not set
--# CONFIG_I2C_AMD8111 is not set
--# CONFIG_I2C_I801 is not set
--# CONFIG_I2C_I810 is not set
--# CONFIG_I2C_PIIX4 is not set
--# CONFIG_I2C_NFORCE2 is not set
--# CONFIG_I2C_OCORES is not set
--# CONFIG_I2C_PARPORT_LIGHT is not set
--# CONFIG_I2C_PROSAVAGE is not set
--# CONFIG_I2C_SAVAGE4 is not set
--# CONFIG_I2C_SIMTEC is not set
--# CONFIG_I2C_SIS5595 is not set
--# CONFIG_I2C_SIS630 is not set
--# CONFIG_I2C_SIS96X is not set
--# CONFIG_I2C_TAOS_EVM is not set
--# CONFIG_I2C_STUB is not set
--# CONFIG_I2C_VIA is not set
--# CONFIG_I2C_VIAPRO is not set
--# CONFIG_I2C_VOODOO3 is not set
+-	! Restore (when we come back to a previously saved task)
+-.Lsave_pc:
+-	addi.l	r15, 32, r0	! r0 = next's regs
+-	addi.l	r0, (63*8), r8	! r8 = next's tr_regs
 -
--#
--# Miscellaneous I2C Chip support
--#
--# CONFIG_SENSORS_DS1337 is not set
--# CONFIG_SENSORS_DS1374 is not set
--# CONFIG_DS1682 is not set
--# CONFIG_SENSORS_EEPROM is not set
--# CONFIG_SENSORS_PCF8574 is not set
--# CONFIG_SENSORS_PCA9539 is not set
--# CONFIG_SENSORS_PCF8591 is not set
--# CONFIG_SENSORS_MAX6875 is not set
--# CONFIG_SENSORS_TSL2550 is not set
--# CONFIG_I2C_DEBUG_CORE is not set
--# CONFIG_I2C_DEBUG_ALGO is not set
--# CONFIG_I2C_DEBUG_BUS is not set
--# CONFIG_I2C_DEBUG_CHIP is not set
+-	ld.q	r8, (5*8), r45
+-	ld.q	r8, (6*8), r46
+-	ld.q	r8, (7*8), r47
+-	ptabs	r45, tr5
+-	ptabs	r46, tr6
+-	ptabs	r47, tr7
 -
--#
--# SPI support
--#
--# CONFIG_SPI is not set
--# CONFIG_SPI_MASTER is not set
--# CONFIG_W1 is not set
--# CONFIG_POWER_SUPPLY is not set
--CONFIG_HWMON=y
--# CONFIG_HWMON_VID is not set
--# CONFIG_SENSORS_AD7418 is not set
--# CONFIG_SENSORS_ADM1021 is not set
--# CONFIG_SENSORS_ADM1025 is not set
--# CONFIG_SENSORS_ADM1026 is not set
--# CONFIG_SENSORS_ADM1029 is not set
--# CONFIG_SENSORS_ADM1031 is not set
--# CONFIG_SENSORS_ADM9240 is not set
--# CONFIG_SENSORS_ADT7470 is not set
--# CONFIG_SENSORS_ATXP1 is not set
--# CONFIG_SENSORS_DS1621 is not set
--# CONFIG_SENSORS_F71805F is not set
--# CONFIG_SENSORS_F71882FG is not set
--# CONFIG_SENSORS_F75375S is not set
--# CONFIG_SENSORS_GL518SM is not set
--# CONFIG_SENSORS_GL520SM is not set
--# CONFIG_SENSORS_IT87 is not set
--# CONFIG_SENSORS_LM63 is not set
--# CONFIG_SENSORS_LM75 is not set
--# CONFIG_SENSORS_LM77 is not set
--# CONFIG_SENSORS_LM78 is not set
--# CONFIG_SENSORS_LM80 is not set
--# CONFIG_SENSORS_LM83 is not set
--# CONFIG_SENSORS_LM85 is not set
--# CONFIG_SENSORS_LM87 is not set
--# CONFIG_SENSORS_LM90 is not set
--# CONFIG_SENSORS_LM92 is not set
--# CONFIG_SENSORS_LM93 is not set
--# CONFIG_SENSORS_MAX1619 is not set
--# CONFIG_SENSORS_MAX6650 is not set
--# CONFIG_SENSORS_PC87360 is not set
--# CONFIG_SENSORS_PC87427 is not set
--# CONFIG_SENSORS_SIS5595 is not set
--# CONFIG_SENSORS_DME1737 is not set
--# CONFIG_SENSORS_SMSC47M1 is not set
--# CONFIG_SENSORS_SMSC47M192 is not set
--# CONFIG_SENSORS_SMSC47B397 is not set
--# CONFIG_SENSORS_THMC50 is not set
--# CONFIG_SENSORS_VIA686A is not set
--# CONFIG_SENSORS_VT1211 is not set
--# CONFIG_SENSORS_VT8231 is not set
--# CONFIG_SENSORS_W83781D is not set
--# CONFIG_SENSORS_W83791D is not set
--# CONFIG_SENSORS_W83792D is not set
--# CONFIG_SENSORS_W83793 is not set
--# CONFIG_SENSORS_W83L785TS is not set
--# CONFIG_SENSORS_W83627HF is not set
--# CONFIG_SENSORS_W83627EHF is not set
--# CONFIG_HWMON_DEBUG_CHIP is not set
--CONFIG_WATCHDOG=y
--# CONFIG_WATCHDOG_NOWAYOUT is not set
+-	ld.q	r0, ( 9*8), r9
+-	ld.q	r0, (10*8), r10
+-	ld.q	r0, (11*8), r11
+-	ld.q	r0, (12*8), r12
+-	ld.q	r0, (13*8), r13
+-	ld.q	r0, (14*8), r14
+-	ld.q	r0, (16*8), r16
 -
--#
--# Watchdog Device Drivers
--#
--# CONFIG_SOFT_WATCHDOG is not set
+-	ld.q	r0, (24*8), r24
+-	ld.q	r0, (25*8), r25
+-	ld.q	r0, (26*8), r26
+-	ld.q	r0, (27*8), r27
+-	ld.q	r0, (28*8), r28
+-	ld.q	r0, (29*8), r29
+-	ld.q	r0, (30*8), r30
+-	ld.q	r0, (31*8), r31
+-	ld.q	r0, (32*8), r32
+-	ld.q	r0, (33*8), r33
+-	ld.q	r0, (34*8), r34
+-	ld.q	r0, (35*8), r35
 -
--#
--# PCI-based Watchdog Cards
--#
--# CONFIG_PCIPCWATCHDOG is not set
--# CONFIG_WDTPCI is not set
+-	ld.q	r0, (44*8), r44
+-	ld.q	r0, (45*8), r45
+-	ld.q	r0, (46*8), r46
+-	ld.q	r0, (47*8), r47
+-	ld.q	r0, (48*8), r48
+-	ld.q	r0, (49*8), r49
+-	ld.q	r0, (50*8), r50
+-	ld.q	r0, (51*8), r51
+-	ld.q	r0, (52*8), r52
+-	ld.q	r0, (53*8), r53
+-	ld.q	r0, (54*8), r54
+-	ld.q	r0, (55*8), r55
+-	ld.q	r0, (56*8), r56
+-	ld.q	r0, (57*8), r57
+-	ld.q	r0, (58*8), r58
+-	ld.q	r0, (59*8), r59
 -
--#
--# Sonics Silicon Backplane
--#
--CONFIG_SSB_POSSIBLE=y
--# CONFIG_SSB is not set
+-	! epilogue
+-	ld.l	r15, 0, r18
+-	ld.l	r15, 4, r14
+-	ptabs	r18, tr0
+-	movi	FRAME_SIZE, r0
+-	add	r15, r0, r15
+-	blink	tr0, r63
+-__sh64_switch_to_end:
+-.LFE1:
+-	.size	sh64_switch_to,.LFE1-sh64_switch_to
 -
--#
--# Multifunction device drivers
--#
--# CONFIG_MFD_SM501 is not set
+diff --git a/arch/sh64/kernel/sys_sh64.c b/arch/sh64/kernel/sys_sh64.c
+deleted file mode 100644
+index de0a303..0000000
+--- a/arch/sh64/kernel/sys_sh64.c
++++ /dev/null
+@@ -1,304 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/sys_sh64.c
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- *
+- * This file contains various random system calls that
+- * have a non-standard calling sequence on the Linux/SH5
+- * platform.
+- *
+- * Mostly taken from i386 version.
+- *
+- */
 -
--#
--# Multimedia devices
--#
--CONFIG_VIDEO_DEV=m
--# CONFIG_VIDEO_V4L1 is not set
--# CONFIG_VIDEO_V4L1_COMPAT is not set
--CONFIG_VIDEO_V4L2=y
--CONFIG_VIDEO_CAPTURE_DRIVERS=y
--# CONFIG_VIDEO_ADV_DEBUG is not set
--CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
--# CONFIG_VIDEO_VIVI is not set
--# CONFIG_VIDEO_SAA5246A is not set
--# CONFIG_VIDEO_SAA5249 is not set
--# CONFIG_VIDEO_SAA7134 is not set
--# CONFIG_VIDEO_HEXIUM_ORION is not set
--# CONFIG_VIDEO_HEXIUM_GEMINI is not set
--# CONFIG_VIDEO_CX88 is not set
--# CONFIG_VIDEO_CX23885 is not set
--# CONFIG_VIDEO_CAFE_CCIC is not set
--# CONFIG_RADIO_ADAPTERS is not set
--CONFIG_DVB_CORE=y
--# CONFIG_DVB_CORE_ATTACH is not set
--CONFIG_DVB_CAPTURE_DRIVERS=y
+-#include <linux/errno.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/fs.h>
+-#include <linux/smp.h>
+-#include <linux/sem.h>
+-#include <linux/msg.h>
+-#include <linux/shm.h>
+-#include <linux/stat.h>
+-#include <linux/mman.h>
+-#include <linux/file.h>
+-#include <linux/utsname.h>
+-#include <linux/syscalls.h>
+-#include <linux/ipc.h>
+-#include <asm/uaccess.h>
+-#include <asm/ptrace.h>
+-#include <asm/unistd.h>
 -
--#
--# Supported SAA7146 based PCI Adapters
--#
+-#define REG_3	3
 -
--#
--# Supported FlexCopII (B2C2) Adapters
--#
--# CONFIG_DVB_B2C2_FLEXCOP is not set
+-/*
+- * sys_pipe() is the normal C calling standard for creating
+- * a pipe. It's not the way Unix traditionally does this, though.
+- */
+-#ifdef NEW_PIPE_IMPLEMENTATION
+-asmlinkage int sys_pipe(unsigned long * fildes,
+-			unsigned long   dummy_r3,
+-			unsigned long   dummy_r4,
+-			unsigned long   dummy_r5,
+-			unsigned long   dummy_r6,
+-			unsigned long   dummy_r7,
+-			struct pt_regs * regs)	   /* r8 = pt_regs  forced by entry.S */
+-{
+-	int fd[2];
+-	int ret;
 -
--#
--# Supported BT878 Adapters
--#
+-	ret = do_pipe(fd);
+-	if (ret == 0)
+-		/*
+-		 ***********************************************************************
+-		 *   To avoid the copy_to_user we prefer to break the ABIs convention, *
+-		 *   packing the valid pair of file IDs into a single register (r3);   *
+-		 *   while r2 is the return code as defined by the sh5-ABIs.	       *
+-		 *   BE CAREFUL: pipe stub, into glibc, must be aware of this solution *
+-		 ***********************************************************************
 -
--#
--# Supported Pluto2 Adapters
--#
--# CONFIG_DVB_PLUTO2 is not set
+-#ifdef __LITTLE_ENDIAN__
+-		regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]);
+-#else
+-		regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]);
+-#endif
 -
--#
--# Supported DVB Frontends
--#
+-		*/
+-	       /* although not very clever this is endianess independent */
+-		regs->regs[REG_3] = (unsigned long long) *((unsigned long long *) fd);
 -
--#
--# Customise DVB Frontends
--#
--# CONFIG_DVB_FE_CUSTOMISE is not set
+-	return ret;
+-}
 -
--#
--# DVB-S (satellite) frontends
--#
--# CONFIG_DVB_STV0299 is not set
--# CONFIG_DVB_CX24110 is not set
--# CONFIG_DVB_CX24123 is not set
--# CONFIG_DVB_TDA8083 is not set
--# CONFIG_DVB_MT312 is not set
--# CONFIG_DVB_VES1X93 is not set
--# CONFIG_DVB_S5H1420 is not set
--# CONFIG_DVB_TDA10086 is not set
+-#else
+-asmlinkage int sys_pipe(unsigned long * fildes)
+-{
+-        int fd[2];
+-        int error;
 -
--#
--# DVB-T (terrestrial) frontends
--#
--# CONFIG_DVB_SP8870 is not set
--# CONFIG_DVB_SP887X is not set
--# CONFIG_DVB_CX22700 is not set
--# CONFIG_DVB_CX22702 is not set
--# CONFIG_DVB_L64781 is not set
--# CONFIG_DVB_TDA1004X is not set
--# CONFIG_DVB_NXT6000 is not set
--# CONFIG_DVB_MT352 is not set
--# CONFIG_DVB_ZL10353 is not set
--# CONFIG_DVB_DIB3000MB is not set
--# CONFIG_DVB_DIB3000MC is not set
--# CONFIG_DVB_DIB7000M is not set
--# CONFIG_DVB_DIB7000P is not set
+-        error = do_pipe(fd);
+-        if (!error) {
+-                if (copy_to_user(fildes, fd, 2*sizeof(int)))
+-                        error = -EFAULT;
+-        }
+-        return error;
+-}
 -
--#
--# DVB-C (cable) frontends
--#
--# CONFIG_DVB_VES1820 is not set
--# CONFIG_DVB_TDA10021 is not set
--# CONFIG_DVB_TDA10023 is not set
--# CONFIG_DVB_STV0297 is not set
+-#endif
 -
--#
--# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
--#
--# CONFIG_DVB_NXT200X is not set
--# CONFIG_DVB_OR51211 is not set
--# CONFIG_DVB_OR51132 is not set
--# CONFIG_DVB_BCM3510 is not set
--# CONFIG_DVB_LGDT330X is not set
--# CONFIG_DVB_S5H1409 is not set
+-/*
+- * To avoid cache alias, we map the shard page with same color.
+- */
+-#define COLOUR_ALIGN(addr)	(((addr)+SHMLBA-1)&~(SHMLBA-1))
 -
--#
--# Tuners/PLL support
--#
--# CONFIG_DVB_PLL is not set
--# CONFIG_DVB_TDA826X is not set
--# CONFIG_DVB_TDA827X is not set
--# CONFIG_DVB_TUNER_QT1010 is not set
--# CONFIG_DVB_TUNER_MT2060 is not set
--# CONFIG_DVB_TUNER_MT2266 is not set
--# CONFIG_DVB_TUNER_MT2131 is not set
--# CONFIG_DVB_TUNER_DIB0070 is not set
+-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+-	unsigned long len, unsigned long pgoff, unsigned long flags)
+-{
+-	struct vm_area_struct *vma;
 -
--#
--# Miscellaneous devices
--#
--# CONFIG_DVB_LNBP21 is not set
--# CONFIG_DVB_ISL6421 is not set
--# CONFIG_DVB_TUA6100 is not set
--CONFIG_DAB=y
+-	if (flags & MAP_FIXED) {
+-		/* We do not accept a shared mapping if it would violate
+-		 * cache aliasing constraints.
+-		 */
+-		if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
+-			return -EINVAL;
+-		return addr;
+-	}
 -
--#
--# Graphics support
--#
--# CONFIG_DRM is not set
--# CONFIG_VGASTATE is not set
--CONFIG_VIDEO_OUTPUT_CONTROL=y
--CONFIG_FB=y
--CONFIG_FIRMWARE_EDID=y
--# CONFIG_FB_DDC is not set
--CONFIG_FB_CFB_FILLRECT=y
--CONFIG_FB_CFB_COPYAREA=y
--CONFIG_FB_CFB_IMAGEBLIT=y
--# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
--# CONFIG_FB_SYS_FILLRECT is not set
--# CONFIG_FB_SYS_COPYAREA is not set
--# CONFIG_FB_SYS_IMAGEBLIT is not set
--# CONFIG_FB_SYS_FOPS is not set
--CONFIG_FB_DEFERRED_IO=y
--# CONFIG_FB_SVGALIB is not set
--# CONFIG_FB_MACMODES is not set
--# CONFIG_FB_BACKLIGHT is not set
--CONFIG_FB_MODE_HELPERS=y
--# CONFIG_FB_TILEBLITTING is not set
+-	if (len > TASK_SIZE)
+-		return -ENOMEM;
+-	if (!addr)
+-		addr = TASK_UNMAPPED_BASE;
 -
--#
--# Frame buffer hardware drivers
--#
--# CONFIG_FB_CIRRUS is not set
--# CONFIG_FB_PM2 is not set
--# CONFIG_FB_CYBER2000 is not set
--# CONFIG_FB_ASILIANT is not set
--# CONFIG_FB_IMSTT is not set
--# CONFIG_FB_S1D13XXX is not set
--# CONFIG_FB_NVIDIA is not set
--# CONFIG_FB_RIVA is not set
--# CONFIG_FB_MATROX is not set
--# CONFIG_FB_RADEON is not set
--# CONFIG_FB_ATY128 is not set
--# CONFIG_FB_ATY is not set
--# CONFIG_FB_S3 is not set
--# CONFIG_FB_SAVAGE is not set
--# CONFIG_FB_SIS is not set
--# CONFIG_FB_NEOMAGIC is not set
--CONFIG_FB_KYRO=y
--# CONFIG_FB_3DFX is not set
--# CONFIG_FB_VOODOO1 is not set
--# CONFIG_FB_VT8623 is not set
--# CONFIG_FB_TRIDENT is not set
--# CONFIG_FB_ARK is not set
--# CONFIG_FB_PM3 is not set
--# CONFIG_FB_VIRTUAL is not set
--# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-	if (flags & MAP_PRIVATE)
+-		addr = PAGE_ALIGN(addr);
+-	else
+-		addr = COLOUR_ALIGN(addr);
 -
--#
--# Display device support
--#
--# CONFIG_DISPLAY_SUPPORT is not set
+-	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+-		/* At this point:  (!vma || addr < vma->vm_end). */
+-		if (TASK_SIZE - len < addr)
+-			return -ENOMEM;
+-		if (!vma || addr + len <= vma->vm_start)
+-			return addr;
+-		addr = vma->vm_end;
+-		if (!(flags & MAP_PRIVATE))
+-			addr = COLOUR_ALIGN(addr);
+-	}
+-}
 -
--#
--# Console display driver support
--#
--CONFIG_DUMMY_CONSOLE=y
--CONFIG_FRAMEBUFFER_CONSOLE=y
--# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
--# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
--CONFIG_FONTS=y
--# CONFIG_FONT_8x8 is not set
--CONFIG_FONT_8x16=y
--# CONFIG_FONT_6x11 is not set
--# CONFIG_FONT_7x14 is not set
--# CONFIG_FONT_PEARL_8x8 is not set
--# CONFIG_FONT_ACORN_8x8 is not set
--# CONFIG_FONT_MINI_4x6 is not set
--# CONFIG_FONT_SUN8x16 is not set
--# CONFIG_FONT_SUN12x22 is not set
--# CONFIG_FONT_10x18 is not set
--CONFIG_LOGO=y
--# CONFIG_LOGO_LINUX_MONO is not set
--# CONFIG_LOGO_LINUX_VGA16 is not set
--# CONFIG_LOGO_LINUX_CLUT224 is not set
--# CONFIG_LOGO_SUPERH_MONO is not set
--# CONFIG_LOGO_SUPERH_VGA16 is not set
--CONFIG_LOGO_SUPERH_CLUT224=y
+-/* common code for old and new mmaps */
+-static inline long do_mmap2(
+-	unsigned long addr, unsigned long len,
+-	unsigned long prot, unsigned long flags,
+-	unsigned long fd, unsigned long pgoff)
+-{
+-	int error = -EBADF;
+-	struct file * file = NULL;
 -
--#
--# Sound
--#
--# CONFIG_SOUND is not set
--CONFIG_HID_SUPPORT=y
--CONFIG_HID=y
--# CONFIG_HID_DEBUG is not set
--# CONFIG_HIDRAW is not set
--CONFIG_USB_SUPPORT=y
--CONFIG_USB_ARCH_HAS_HCD=y
--CONFIG_USB_ARCH_HAS_OHCI=y
--CONFIG_USB_ARCH_HAS_EHCI=y
--# CONFIG_USB is not set
+-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+-	if (!(flags & MAP_ANONYMOUS)) {
+-		file = fget(fd);
+-		if (!file)
+-			goto out;
+-	}
 -
--#
--# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
--#
+-	down_write(&current->mm->mmap_sem);
+-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+-	up_write(&current->mm->mmap_sem);
 -
--#
--# USB Gadget Support
--#
--# CONFIG_USB_GADGET is not set
--# CONFIG_MMC is not set
--# CONFIG_NEW_LEDS is not set
--# CONFIG_INFINIBAND is not set
--# CONFIG_RTC_CLASS is not set
+-	if (file)
+-		fput(file);
+-out:
+-	return error;
+-}
 -
--#
--# Userspace I/O
--#
--# CONFIG_UIO is not set
+-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+-	unsigned long prot, unsigned long flags,
+-	unsigned long fd, unsigned long pgoff)
+-{
+-	return do_mmap2(addr, len, prot, flags, fd, pgoff);
+-}
 -
--#
--# File systems
--#
--CONFIG_EXT2_FS=y
--# CONFIG_EXT2_FS_XATTR is not set
--# CONFIG_EXT2_FS_XIP is not set
--CONFIG_EXT3_FS=y
--CONFIG_EXT3_FS_XATTR=y
--# CONFIG_EXT3_FS_POSIX_ACL is not set
--# CONFIG_EXT3_FS_SECURITY is not set
--# CONFIG_EXT4DEV_FS is not set
--CONFIG_JBD=y
--# CONFIG_JBD_DEBUG is not set
--CONFIG_FS_MBCACHE=y
--# CONFIG_REISERFS_FS is not set
--# CONFIG_JFS_FS is not set
--# CONFIG_FS_POSIX_ACL is not set
--# CONFIG_XFS_FS is not set
--# CONFIG_GFS2_FS is not set
--# CONFIG_OCFS2_FS is not set
--CONFIG_MINIX_FS=y
--CONFIG_ROMFS_FS=y
--CONFIG_INOTIFY=y
--CONFIG_INOTIFY_USER=y
--# CONFIG_QUOTA is not set
--CONFIG_DNOTIFY=y
--# CONFIG_AUTOFS_FS is not set
--# CONFIG_AUTOFS4_FS is not set
--# CONFIG_FUSE_FS is not set
+-asmlinkage int old_mmap(unsigned long addr, unsigned long len,
+-	unsigned long prot, unsigned long flags,
+-	int fd, unsigned long off)
+-{
+-	if (off & ~PAGE_MASK)
+-		return -EINVAL;
+-	return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
+-}
 -
--#
--# CD-ROM/DVD Filesystems
--#
--# CONFIG_ISO9660_FS is not set
--# CONFIG_UDF_FS is not set
+-/*
+- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+- *
+- * This is really horribly ugly.
+- */
+-asmlinkage int sys_ipc(uint call, int first, int second,
+-		       int third, void __user *ptr, long fifth)
+-{
+-	int version, ret;
 -
--#
--# DOS/FAT/NT Filesystems
--#
--# CONFIG_MSDOS_FS is not set
--# CONFIG_VFAT_FS is not set
--# CONFIG_NTFS_FS is not set
+-	version = call >> 16; /* hack for backward compatibility */
+-	call &= 0xffff;
 -
--#
--# Pseudo filesystems
--#
--CONFIG_PROC_FS=y
--CONFIG_PROC_KCORE=y
--CONFIG_PROC_SYSCTL=y
--CONFIG_SYSFS=y
--CONFIG_TMPFS=y
--# CONFIG_TMPFS_POSIX_ACL is not set
--CONFIG_HUGETLBFS=y
--CONFIG_HUGETLB_PAGE=y
--# CONFIG_CONFIGFS_FS is not set
+-	if (call <= SEMCTL)
+-		switch (call) {
+-		case SEMOP:
+-			return sys_semtimedop(first, (struct sembuf __user *)ptr,
+-					      second, NULL);
+-		case SEMTIMEDOP:
+-			return sys_semtimedop(first, (struct sembuf __user *)ptr,
+-					      second,
+-					      (const struct timespec __user *)fifth);
+-		case SEMGET:
+-			return sys_semget (first, second, third);
+-		case SEMCTL: {
+-			union semun fourth;
+-			if (!ptr)
+-				return -EINVAL;
+-			if (get_user(fourth.__pad, (void * __user *) ptr))
+-				return -EFAULT;
+-			return sys_semctl (first, second, third, fourth);
+-			}
+-		default:
+-			return -EINVAL;
+-		}
 -
--#
--# Miscellaneous filesystems
--#
--# CONFIG_ADFS_FS is not set
--# CONFIG_AFFS_FS is not set
--# CONFIG_HFS_FS is not set
--# CONFIG_HFSPLUS_FS is not set
--# CONFIG_BEFS_FS is not set
--# CONFIG_BFS_FS is not set
--# CONFIG_EFS_FS is not set
--# CONFIG_CRAMFS is not set
--# CONFIG_VXFS_FS is not set
--# CONFIG_HPFS_FS is not set
--# CONFIG_QNX4FS_FS is not set
--# CONFIG_SYSV_FS is not set
--# CONFIG_UFS_FS is not set
--CONFIG_NETWORK_FILESYSTEMS=y
--CONFIG_NFS_FS=y
--CONFIG_NFS_V3=y
--# CONFIG_NFS_V3_ACL is not set
--# CONFIG_NFS_V4 is not set
--# CONFIG_NFS_DIRECTIO is not set
--# CONFIG_NFSD is not set
--CONFIG_ROOT_NFS=y
--CONFIG_LOCKD=y
--CONFIG_LOCKD_V4=y
--CONFIG_NFS_COMMON=y
--CONFIG_SUNRPC=y
--# CONFIG_SUNRPC_BIND34 is not set
--# CONFIG_RPCSEC_GSS_KRB5 is not set
--# CONFIG_RPCSEC_GSS_SPKM3 is not set
--# CONFIG_SMB_FS is not set
--# CONFIG_CIFS is not set
--# CONFIG_NCP_FS is not set
--# CONFIG_CODA_FS is not set
--# CONFIG_AFS_FS is not set
+-	if (call <= MSGCTL)
+-		switch (call) {
+-		case MSGSND:
+-			return sys_msgsnd (first, (struct msgbuf __user *) ptr,
+-					  second, third);
+-		case MSGRCV:
+-			switch (version) {
+-			case 0: {
+-				struct ipc_kludge tmp;
+-				if (!ptr)
+-					return -EINVAL;
 -
--#
--# Partition Types
--#
--CONFIG_PARTITION_ADVANCED=y
--# CONFIG_ACORN_PARTITION is not set
--# CONFIG_OSF_PARTITION is not set
--# CONFIG_AMIGA_PARTITION is not set
--# CONFIG_ATARI_PARTITION is not set
--# CONFIG_MAC_PARTITION is not set
--CONFIG_MSDOS_PARTITION=y
--# CONFIG_BSD_DISKLABEL is not set
--# CONFIG_MINIX_SUBPARTITION is not set
--# CONFIG_SOLARIS_X86_PARTITION is not set
--# CONFIG_UNIXWARE_DISKLABEL is not set
--# CONFIG_LDM_PARTITION is not set
--# CONFIG_SGI_PARTITION is not set
--# CONFIG_ULTRIX_PARTITION is not set
--# CONFIG_SUN_PARTITION is not set
--# CONFIG_KARMA_PARTITION is not set
--# CONFIG_EFI_PARTITION is not set
--# CONFIG_SYSV68_PARTITION is not set
--# CONFIG_NLS is not set
--# CONFIG_DLM is not set
--CONFIG_INSTRUMENTATION=y
--# CONFIG_PROFILING is not set
--# CONFIG_MARKERS is not set
+-				if (copy_from_user(&tmp,
+-						   (struct ipc_kludge __user *) ptr,
+-						   sizeof (tmp)))
+-					return -EFAULT;
+-				return sys_msgrcv (first, tmp.msgp, second,
+-						   tmp.msgtyp, third);
+-				}
+-			default:
+-				return sys_msgrcv (first,
+-						   (struct msgbuf __user *) ptr,
+-						   second, fifth, third);
+-			}
+-		case MSGGET:
+-			return sys_msgget ((key_t) first, second);
+-		case MSGCTL:
+-			return sys_msgctl (first, second,
+-					   (struct msqid_ds __user *) ptr);
+-		default:
+-			return -EINVAL;
+-		}
+-	if (call <= SHMCTL)
+-		switch (call) {
+-		case SHMAT:
+-			switch (version) {
+-			default: {
+-				ulong raddr;
+-				ret = do_shmat (first, (char __user *) ptr,
+-						 second, &raddr);
+-				if (ret)
+-					return ret;
+-				return put_user (raddr, (ulong __user *) third);
+-			}
+-			case 1:	/* iBCS2 emulator entry point */
+-				if (!segment_eq(get_fs(), get_ds()))
+-					return -EINVAL;
+-				return do_shmat (first, (char __user *) ptr,
+-						  second, (ulong *) third);
+-			}
+-		case SHMDT:
+-			return sys_shmdt ((char __user *)ptr);
+-		case SHMGET:
+-			return sys_shmget (first, second, third);
+-		case SHMCTL:
+-			return sys_shmctl (first, second,
+-					   (struct shmid_ds __user *) ptr);
+-		default:
+-			return -EINVAL;
+-		}
 -
--#
--# Kernel hacking
--#
--# CONFIG_PRINTK_TIME is not set
--CONFIG_ENABLE_WARN_DEPRECATED=y
--CONFIG_ENABLE_MUST_CHECK=y
--CONFIG_MAGIC_SYSRQ=y
--# CONFIG_UNUSED_SYMBOLS is not set
--CONFIG_DEBUG_FS=y
--# CONFIG_HEADERS_CHECK is not set
--CONFIG_DEBUG_KERNEL=y
--# CONFIG_DEBUG_SHIRQ is not set
--CONFIG_DETECT_SOFTLOCKUP=y
--CONFIG_SCHED_DEBUG=y
--CONFIG_SCHEDSTATS=y
--# CONFIG_TIMER_STATS is not set
--# CONFIG_DEBUG_SLAB is not set
--# CONFIG_DEBUG_RT_MUTEXES is not set
--# CONFIG_RT_MUTEX_TESTER is not set
--# CONFIG_DEBUG_SPINLOCK is not set
--# CONFIG_DEBUG_MUTEXES is not set
--# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
--# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
--# CONFIG_DEBUG_KOBJECT is not set
--CONFIG_DEBUG_BUGVERBOSE=y
--# CONFIG_DEBUG_INFO is not set
--# CONFIG_DEBUG_VM is not set
--# CONFIG_DEBUG_LIST is not set
--# CONFIG_DEBUG_SG is not set
--CONFIG_FRAME_POINTER=y
--CONFIG_FORCED_INLINING=y
--# CONFIG_BOOT_PRINTK_DELAY is not set
--# CONFIG_RCU_TORTURE_TEST is not set
--# CONFIG_FAULT_INJECTION is not set
--# CONFIG_SAMPLES is not set
--# CONFIG_EARLY_PRINTK is not set
--CONFIG_SH64_PROC_TLB=y
--CONFIG_SH64_PROC_ASIDS=y
--CONFIG_SH64_SR_WATCH=y
--# CONFIG_POOR_MANS_STRACE is not set
--# CONFIG_SH_ALPHANUMERIC is not set
--# CONFIG_SH_NO_BSS_INIT is not set
+-	return -EINVAL;
+-}
 -
--#
--# Security options
--#
--# CONFIG_KEYS is not set
--# CONFIG_SECURITY is not set
--# CONFIG_SECURITY_FILE_CAPABILITIES is not set
--# CONFIG_CRYPTO is not set
+-asmlinkage int sys_uname(struct old_utsname * name)
+-{
+-	int err;
+-	if (!name)
+-		return -EFAULT;
+-	down_read(&uts_sem);
+-	err = copy_to_user(name, utsname(), sizeof (*name));
+-	up_read(&uts_sem);
+-	return err?-EFAULT:0;
+-}
 -
--#
--# Library routines
--#
--CONFIG_BITREVERSE=y
--# CONFIG_CRC_CCITT is not set
--# CONFIG_CRC16 is not set
--# CONFIG_CRC_ITU_T is not set
--CONFIG_CRC32=y
--# CONFIG_CRC7 is not set
--# CONFIG_LIBCRC32C is not set
--CONFIG_PLIST=y
--CONFIG_HAS_IOMEM=y
--CONFIG_HAS_IOPORT=y
--CONFIG_HAS_DMA=y
-diff --git a/arch/sh64/configs/harp_defconfig b/arch/sh64/configs/harp_defconfig
+-/*
+- * Do a system call from kernel instead of calling sys_execve so we
+- * end up with proper pt_regs.
+- */
+-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+-{
+-	register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
+-	register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
+-	register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
+-	register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
+-	__asm__ __volatile__ ("trapa	%1 !\t\t\t execve(%2,%3,%4)"
+-	: "=r" (__sc0)
+-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
+-	__asm__ __volatile__ ("!dummy	%0 %1 %2 %3"
+-	: : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
+-	return __sc0;
+-}
+diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S
 deleted file mode 100644
-index ba302cd..0000000
---- a/arch/sh64/configs/harp_defconfig
+index abb94c0..0000000
+--- a/arch/sh64/kernel/syscalls.S
 +++ /dev/null
-@@ -1,745 +0,0 @@
--#
--# Automatically generated make config: don't edit
--# Linux kernel version: 2.6.24-rc1
--# Fri Nov  2 14:35:57 2007
--#
--CONFIG_SUPERH=y
--CONFIG_SUPERH64=y
--CONFIG_MMU=y
--CONFIG_QUICKLIST=y
--CONFIG_RWSEM_GENERIC_SPINLOCK=y
--CONFIG_GENERIC_FIND_NEXT_BIT=y
--CONFIG_GENERIC_HWEIGHT=y
--CONFIG_GENERIC_CALIBRATE_DELAY=y
--CONFIG_GENERIC_HARDIRQS=y
--CONFIG_GENERIC_IRQ_PROBE=y
--# CONFIG_ARCH_HAS_ILOG2_U32 is not set
--# CONFIG_ARCH_HAS_ILOG2_U64 is not set
--CONFIG_ARCH_NO_VIRT_TO_BUS=y
--CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+@@ -1,381 +0,0 @@
+-/*
+- * arch/sh64/kernel/syscalls.S
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2004 - 2007  Paul Mundt
+- * Copyright (C) 2003, 2004 Richard Curnow
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
 -
--#
--# General setup
--#
--CONFIG_EXPERIMENTAL=y
--CONFIG_BROKEN_ON_SMP=y
--CONFIG_LOCK_KERNEL=y
--CONFIG_INIT_ENV_ARG_LIMIT=32
--CONFIG_LOCALVERSION=""
--CONFIG_LOCALVERSION_AUTO=y
--CONFIG_SWAP=y
--# CONFIG_SYSVIPC is not set
--CONFIG_POSIX_MQUEUE=y
--# CONFIG_BSD_PROCESS_ACCT is not set
--# CONFIG_TASKSTATS is not set
--# CONFIG_USER_NS is not set
--# CONFIG_AUDIT is not set
--# CONFIG_IKCONFIG is not set
--CONFIG_LOG_BUF_SHIFT=14
--# CONFIG_CGROUPS is not set
--CONFIG_FAIR_GROUP_SCHED=y
--CONFIG_FAIR_USER_SCHED=y
--# CONFIG_FAIR_CGROUP_SCHED is not set
--CONFIG_SYSFS_DEPRECATED=y
--# CONFIG_RELAY is not set
--# CONFIG_BLK_DEV_INITRD is not set
--# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
--CONFIG_SYSCTL=y
--# CONFIG_EMBEDDED is not set
--CONFIG_UID16=y
--CONFIG_SYSCTL_SYSCALL=y
--CONFIG_KALLSYMS=y
--# CONFIG_KALLSYMS_ALL is not set
--# CONFIG_KALLSYMS_EXTRA_PASS is not set
--CONFIG_HOTPLUG=y
--CONFIG_PRINTK=y
--CONFIG_BUG=y
--CONFIG_ELF_CORE=y
--CONFIG_BASE_FULL=y
--CONFIG_FUTEX=y
--CONFIG_ANON_INODES=y
--CONFIG_EPOLL=y
--CONFIG_SIGNALFD=y
--CONFIG_EVENTFD=y
--CONFIG_SHMEM=y
--CONFIG_VM_EVENT_COUNTERS=y
--CONFIG_SLAB=y
--# CONFIG_SLUB is not set
--# CONFIG_SLOB is not set
--CONFIG_RT_MUTEXES=y
--# CONFIG_TINY_SHMEM is not set
--CONFIG_BASE_SMALL=0
--# CONFIG_MODULES is not set
--CONFIG_BLOCK=y
--# CONFIG_LBD is not set
--# CONFIG_BLK_DEV_IO_TRACE is not set
--# CONFIG_LSF is not set
--# CONFIG_BLK_DEV_BSG is not set
+-#include <linux/sys.h>
 -
--#
--# IO Schedulers
--#
--CONFIG_IOSCHED_NOOP=y
--CONFIG_IOSCHED_AS=y
--CONFIG_IOSCHED_DEADLINE=y
--CONFIG_IOSCHED_CFQ=y
--# CONFIG_DEFAULT_AS is not set
--# CONFIG_DEFAULT_DEADLINE is not set
--CONFIG_DEFAULT_CFQ=y
--# CONFIG_DEFAULT_NOOP is not set
--CONFIG_DEFAULT_IOSCHED="cfq"
+-	.section .data, "aw"
+-	.balign 32
 -
--#
--# System type
--#
--# CONFIG_SH_SIMULATOR is not set
--# CONFIG_SH_CAYMAN is not set
--CONFIG_SH_HARP=y
--CONFIG_CPU_SH5=y
--CONFIG_CPU_SUBTYPE_SH5_101=y
--# CONFIG_CPU_SUBTYPE_SH5_103 is not set
--CONFIG_LITTLE_ENDIAN=y
--# CONFIG_BIG_ENDIAN is not set
--CONFIG_SH_FPU=y
--# CONFIG_SH64_FPU_DENORM_FLUSH is not set
--CONFIG_SH64_PGTABLE_2_LEVEL=y
--# CONFIG_SH64_PGTABLE_3_LEVEL is not set
--CONFIG_HUGETLB_PAGE_SIZE_64K=y
--# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
--# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
--CONFIG_SH64_USER_MISALIGNED_FIXUP=y
+-/*
+- * System calls jump table
+- */
+-	.globl  sys_call_table
+-sys_call_table:
+-	.long sys_restart_syscall	/* 0  -  old "setup()" system call  */
+-	.long sys_exit
+-	.long sys_fork
+-	.long sys_read
+-	.long sys_write
+-	.long sys_open			/* 5 */
+-	.long sys_close
+-	.long sys_waitpid
+-	.long sys_creat
+-	.long sys_link
+-	.long sys_unlink		/* 10 */
+-	.long sys_execve
+-	.long sys_chdir
+-	.long sys_time
+-	.long sys_mknod
+-	.long sys_chmod			/* 15 */
+-	.long sys_lchown16
+-	.long sys_ni_syscall	/* old break syscall holder */
+-	.long sys_stat
+-	.long sys_lseek
+-	.long sys_getpid		/* 20 */
+-	.long sys_mount
+-	.long sys_oldumount
+-	.long sys_setuid16
+-	.long sys_getuid16
+-	.long sys_stime			/* 25 */
+-	.long sh64_ptrace
+-	.long sys_alarm
+-	.long sys_fstat
+-	.long sys_pause
+-	.long sys_utime			/* 30 */
+-	.long sys_ni_syscall	/* old stty syscall holder */
+-	.long sys_ni_syscall	/* old gtty syscall holder */
+-	.long sys_access
+-	.long sys_nice
+-	.long sys_ni_syscall		/* 35 */ /* old ftime syscall holder */
+-	.long sys_sync
+-	.long sys_kill
+-	.long sys_rename
+-	.long sys_mkdir
+-	.long sys_rmdir			/* 40 */
+-	.long sys_dup
+-	.long sys_pipe
+-	.long sys_times
+-	.long sys_ni_syscall	/* old prof syscall holder */
+-	.long sys_brk			/* 45 */
+-	.long sys_setgid16
+-	.long sys_getgid16
+-	.long sys_signal
+-	.long sys_geteuid16
+-	.long sys_getegid16		/* 50 */
+-	.long sys_acct
+-	.long sys_umount		/* recycled never used phys( */
+-	.long sys_ni_syscall	/* old lock syscall holder */
+-	.long sys_ioctl
+-	.long sys_fcntl			/* 55 */
+-	.long sys_ni_syscall	/* old mpx syscall holder */
+-	.long sys_setpgid
+-	.long sys_ni_syscall	/* old ulimit syscall holder */
+-	.long sys_ni_syscall	/* sys_olduname */
+-	.long sys_umask			/* 60 */
+-	.long sys_chroot
+-	.long sys_ustat
+-	.long sys_dup2
+-	.long sys_getppid
+-	.long sys_getpgrp		/* 65 */
+-	.long sys_setsid
+-	.long sys_sigaction
+-	.long sys_sgetmask
+-	.long sys_ssetmask
+-	.long sys_setreuid16		/* 70 */
+-	.long sys_setregid16
+-	.long sys_sigsuspend
+-	.long sys_sigpending
+-	.long sys_sethostname
+-	.long sys_setrlimit		/* 75 */
+-	.long sys_old_getrlimit
+-	.long sys_getrusage
+-	.long sys_gettimeofday
+-	.long sys_settimeofday
+-	.long sys_getgroups16		/* 80 */
+-	.long sys_setgroups16
+-	.long sys_ni_syscall	/* sys_oldselect */
+-	.long sys_symlink
+-	.long sys_lstat
+-	.long sys_readlink		/* 85 */
+-	.long sys_uselib
+-	.long sys_swapon
+-	.long sys_reboot
+-	.long old_readdir
+-	.long old_mmap			/* 90 */
+-	.long sys_munmap
+-	.long sys_truncate
+-	.long sys_ftruncate
+-	.long sys_fchmod
+-	.long sys_fchown16		/* 95 */
+-	.long sys_getpriority
+-	.long sys_setpriority
+-	.long sys_ni_syscall	/* old profil syscall holder */
+-	.long sys_statfs
+-	.long sys_fstatfs		/* 100 */
+-	.long sys_ni_syscall	/* ioperm */
+-	.long sys_socketcall	/* Obsolete implementation of socket syscall */
+-	.long sys_syslog
+-	.long sys_setitimer
+-	.long sys_getitimer		/* 105 */
+-	.long sys_newstat
+-	.long sys_newlstat
+-	.long sys_newfstat
+-	.long sys_uname
+-	.long sys_ni_syscall		/* 110 */ /* iopl */
+-	.long sys_vhangup
+-	.long sys_ni_syscall	/* idle */
+-	.long sys_ni_syscall	/* vm86old */
+-	.long sys_wait4
+-	.long sys_swapoff		/* 115 */
+-	.long sys_sysinfo
+-	.long sys_ipc		/* Obsolete ipc syscall implementation */
+-	.long sys_fsync
+-	.long sys_sigreturn
+-	.long sys_clone			/* 120 */
+-	.long sys_setdomainname
+-	.long sys_newuname
+-	.long sys_ni_syscall	/* sys_modify_ldt */
+-	.long sys_adjtimex
+-	.long sys_mprotect		/* 125 */
+-	.long sys_sigprocmask
+-	.long sys_ni_syscall		/* old "create_module" */
+-	.long sys_init_module
+-	.long sys_delete_module
+-	.long sys_ni_syscall		/* 130: old "get_kernel_syms" */
+-	.long sys_quotactl
+-	.long sys_getpgid
+-	.long sys_fchdir
+-	.long sys_bdflush
+-	.long sys_sysfs			/* 135 */
+-	.long sys_personality
+-	.long sys_ni_syscall	/* for afs_syscall */
+-	.long sys_setfsuid16
+-	.long sys_setfsgid16
+-	.long sys_llseek		/* 140 */
+-	.long sys_getdents
+-	.long sys_select
+-	.long sys_flock
+-	.long sys_msync
+-	.long sys_readv			/* 145 */
+-	.long sys_writev
+-	.long sys_getsid
+-	.long sys_fdatasync
+-	.long sys_sysctl
+-	.long sys_mlock			/* 150 */
+-	.long sys_munlock
+-	.long sys_mlockall
+-	.long sys_munlockall
+-	.long sys_sched_setparam
+-	.long sys_sched_getparam	/* 155 */
+-	.long sys_sched_setscheduler
+-	.long sys_sched_getscheduler
+-	.long sys_sched_yield
+-	.long sys_sched_get_priority_max
+-	.long sys_sched_get_priority_min  /* 160 */
+-	.long sys_sched_rr_get_interval
+-	.long sys_nanosleep
+-	.long sys_mremap
+-	.long sys_setresuid16
+-	.long sys_getresuid16		/* 165 */
+-	.long sys_ni_syscall	/* vm86 */
+-	.long sys_ni_syscall	/* old "query_module" */
+-	.long sys_poll
+-	.long sys_nfsservctl
+-	.long sys_setresgid16		/* 170 */
+-	.long sys_getresgid16
+-	.long sys_prctl
+-	.long sys_rt_sigreturn
+-	.long sys_rt_sigaction
+-	.long sys_rt_sigprocmask	/* 175 */
+-	.long sys_rt_sigpending
+-	.long sys_rt_sigtimedwait
+-	.long sys_rt_sigqueueinfo
+-	.long sys_rt_sigsuspend
+-	.long sys_pread64		/* 180 */
+-	.long sys_pwrite64
+-	.long sys_chown16
+-	.long sys_getcwd
+-	.long sys_capget
+-	.long sys_capset		/* 185 */
+-	.long sys_sigaltstack
+-	.long sys_sendfile
+-	.long sys_ni_syscall	/* streams1 */
+-	.long sys_ni_syscall	/* streams2 */
+-	.long sys_vfork			/* 190 */
+-	.long sys_getrlimit
+-	.long sys_mmap2
+-	.long sys_truncate64
+-	.long sys_ftruncate64
+-	.long sys_stat64		/* 195 */
+-	.long sys_lstat64
+-	.long sys_fstat64
+-	.long sys_lchown
+-	.long sys_getuid
+-	.long sys_getgid		/* 200 */
+-	.long sys_geteuid
+-	.long sys_getegid
+-	.long sys_setreuid
+-	.long sys_setregid
+-	.long sys_getgroups		/* 205 */
+-	.long sys_setgroups
+-	.long sys_fchown
+-	.long sys_setresuid
+-	.long sys_getresuid
+-	.long sys_setresgid		/* 210 */
+-	.long sys_getresgid
+-	.long sys_chown
+-	.long sys_setuid
+-	.long sys_setgid
+-	.long sys_setfsuid		/* 215 */
+-	.long sys_setfsgid
+-	.long sys_pivot_root
+-	.long sys_mincore
+-	.long sys_madvise
+-	/* Broken-out socket family (maintain backwards compatibility in syscall
+-	   numbering with 2.4) */
+-	.long sys_socket		/* 220 */
+-	.long sys_bind
+-	.long sys_connect
+-	.long sys_listen
+-	.long sys_accept
+-	.long sys_getsockname		/* 225 */
+-	.long sys_getpeername
+-	.long sys_socketpair
+-	.long sys_send
+-	.long sys_sendto
+-	.long sys_recv			/* 230*/
+-	.long sys_recvfrom
+-	.long sys_shutdown
+-	.long sys_setsockopt
+-	.long sys_getsockopt
+-	.long sys_sendmsg		/* 235 */
+-	.long sys_recvmsg
+-	/* Broken-out IPC family (maintain backwards compatibility in syscall
+-	   numbering with 2.4) */
+-	.long sys_semop
+-	.long sys_semget
+-	.long sys_semctl
+-	.long sys_msgsnd		/* 240 */
+-	.long sys_msgrcv
+-	.long sys_msgget
+-	.long sys_msgctl
+-	.long sys_shmat
+-	.long sys_shmdt			/* 245 */
+-	.long sys_shmget
+-	.long sys_shmctl
+-	/* Rest of syscalls listed in 2.4 i386 unistd.h */
+-	.long sys_getdents64
+-	.long sys_fcntl64
+-	.long sys_ni_syscall		/* 250 reserved for TUX */
+-	.long sys_ni_syscall		/* Reserved for Security */
+-	.long sys_gettid
+-	.long sys_readahead
+-	.long sys_setxattr
+-	.long sys_lsetxattr		/* 255 */
+-	.long sys_fsetxattr
+-	.long sys_getxattr
+-	.long sys_lgetxattr
+-	.long sys_fgetxattr
+-	.long sys_listxattr		/* 260 */
+-	.long sys_llistxattr
+-	.long sys_flistxattr
+-	.long sys_removexattr
+-	.long sys_lremovexattr
+-	.long sys_fremovexattr  	/* 265 */
+-	.long sys_tkill
+-	.long sys_sendfile64
+-	.long sys_futex
+-	.long sys_sched_setaffinity
+-	.long sys_sched_getaffinity	/* 270 */
+-	.long sys_ni_syscall
+-	.long sys_ni_syscall
+-	.long sys_io_setup
+-	.long sys_io_destroy
+-	.long sys_io_getevents		/* 275 */
+-	.long sys_io_submit
+-	.long sys_io_cancel
+-	.long sys_fadvise64
+-	.long sys_ni_syscall
+-	.long sys_exit_group		/* 280 */
+-	/* Rest of new 2.6 syscalls */
+-	.long sys_lookup_dcookie
+-	.long sys_epoll_create
+-	.long sys_epoll_ctl
+-	.long sys_epoll_wait
+- 	.long sys_remap_file_pages	/* 285 */
+- 	.long sys_set_tid_address
+- 	.long sys_timer_create
+- 	.long sys_timer_settime
+- 	.long sys_timer_gettime
+- 	.long sys_timer_getoverrun	/* 290 */
+- 	.long sys_timer_delete
+- 	.long sys_clock_settime
+- 	.long sys_clock_gettime
+- 	.long sys_clock_getres
+- 	.long sys_clock_nanosleep	/* 295 */
+-	.long sys_statfs64
+-	.long sys_fstatfs64
+-	.long sys_tgkill
+-	.long sys_utimes
+- 	.long sys_fadvise64_64		/* 300 */
+-	.long sys_ni_syscall	/* Reserved for vserver */
+-	.long sys_ni_syscall	/* Reserved for mbind */
+-	.long sys_ni_syscall	/* get_mempolicy */
+-	.long sys_ni_syscall	/* set_mempolicy */
+-	.long sys_mq_open		/* 305 */
+-	.long sys_mq_unlink
+-	.long sys_mq_timedsend
+-	.long sys_mq_timedreceive
+-	.long sys_mq_notify
+-	.long sys_mq_getsetattr		/* 310 */
+-	.long sys_ni_syscall	/* Reserved for kexec */
+-	.long sys_waitid
+-	.long sys_add_key
+-	.long sys_request_key
+-	.long sys_keyctl		/* 315 */
+-	.long sys_ioprio_set
+-	.long sys_ioprio_get
+-	.long sys_inotify_init
+-	.long sys_inotify_add_watch
+-	.long sys_inotify_rm_watch	/* 320 */
+-	.long sys_ni_syscall
+-	.long sys_migrate_pages
+-	.long sys_openat
+-	.long sys_mkdirat
+-	.long sys_mknodat		/* 325 */
+-	.long sys_fchownat
+-	.long sys_futimesat
+-	.long sys_fstatat64
+-	.long sys_unlinkat
+-	.long sys_renameat		/* 330 */
+-	.long sys_linkat
+-	.long sys_symlinkat
+-	.long sys_readlinkat
+-	.long sys_fchmodat
+-	.long sys_faccessat		/* 335 */
+-	.long sys_pselect6
+-	.long sys_ppoll
+-	.long sys_unshare
+-	.long sys_set_robust_list
+-	.long sys_get_robust_list	/* 340 */
+-	.long sys_splice
+-	.long sys_sync_file_range
+-	.long sys_tee
+-	.long sys_vmsplice
+-	.long sys_move_pages		/* 345 */
+-	.long sys_getcpu
+-	.long sys_epoll_pwait
+-	.long sys_utimensat
+-	.long sys_signalfd
+-	.long sys_timerfd		/* 350 */
+-	.long sys_eventfd
+-	.long sys_fallocate
+diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
+deleted file mode 100644
+index 06f3c17..0000000
+--- a/arch/sh64/kernel/time.c
++++ /dev/null
+@@ -1,593 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/kernel/time.c
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003, 2004  Paul Mundt
+- * Copyright (C) 2003  Richard Curnow
+- *
+- *    Original TMU/RTC code taken from sh version.
+- *    Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
+- *      Some code taken from i386 version.
+- *      Copyright (C) 1991, 1992, 1995  Linus Torvalds
+- */
 -
--#
--# Memory options
--#
--CONFIG_CACHED_MEMORY_OFFSET=0x20000000
--CONFIG_MEMORY_START=0x80000000
--CONFIG_MEMORY_SIZE_IN_MB=128
+-#include <linux/errno.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/param.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
+-#include <linux/interrupt.h>
+-#include <linux/time.h>
+-#include <linux/delay.h>
+-#include <linux/init.h>
+-#include <linux/profile.h>
+-#include <linux/smp.h>
+-#include <linux/module.h>
+-#include <linux/bcd.h>
 -
--#
--# Cache options
--#
--CONFIG_DCACHE_WRITE_BACK=y
--# CONFIG_DCACHE_WRITE_THROUGH is not set
--# CONFIG_DCACHE_DISABLED is not set
--# CONFIG_ICACHE_DISABLED is not set
--CONFIG_PCIDEVICE_MEMORY_START=C0000000
--CONFIG_DEVICE_MEMORY_START=E0000000
--CONFIG_FLASH_MEMORY_START=0x00000000
--CONFIG_PCI_BLOCK_START=0x40000000
+-#include <asm/registers.h>	 /* required by inline __asm__ stmt. */
 -
--#
--# CPU Subtype specific options
--#
--CONFIG_SH64_ID2815_WORKAROUND=y
+-#include <asm/processor.h>
+-#include <asm/uaccess.h>
+-#include <asm/io.h>
+-#include <asm/irq.h>
+-#include <asm/delay.h>
 -
--#
--# Misc options
--#
--# CONFIG_SH_DMA is not set
--CONFIG_PREEMPT=y
--CONFIG_SELECT_MEMORY_MODEL=y
--CONFIG_FLATMEM_MANUAL=y
--# CONFIG_DISCONTIGMEM_MANUAL is not set
--# CONFIG_SPARSEMEM_MANUAL is not set
--CONFIG_FLATMEM=y
--CONFIG_FLAT_NODE_MEM_MAP=y
--# CONFIG_SPARSEMEM_STATIC is not set
--# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
--CONFIG_SPLIT_PTLOCK_CPUS=4
--# CONFIG_RESOURCES_64BIT is not set
--CONFIG_ZONE_DMA_FLAG=0
--CONFIG_NR_QUICK=1
+-#include <linux/timex.h>
+-#include <linux/irq.h>
+-#include <asm/hardware.h>
 -
--#
--# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
--#
--# CONFIG_ARCH_SUPPORTS_MSI is not set
--# CONFIG_PCCARD is not set
+-#define TMU_TOCR_INIT	0x00
+-#define TMU0_TCR_INIT	0x0020
+-#define TMU_TSTR_INIT	1
+-#define TMU_TSTR_OFF	0
 -
--#
--# Executable file formats
--#
--CONFIG_BINFMT_ELF=y
--# CONFIG_BINFMT_MISC is not set
+-/* RCR1 Bits */
+-#define RCR1_CF		0x80	/* Carry Flag             */
+-#define RCR1_CIE	0x10	/* Carry Interrupt Enable */
+-#define RCR1_AIE	0x08	/* Alarm Interrupt Enable */
+-#define RCR1_AF		0x01	/* Alarm Flag             */
 -
--#
--# Networking
--#
--CONFIG_NET=y
+-/* RCR2 Bits */
+-#define RCR2_PEF	0x80	/* PEriodic interrupt Flag */
+-#define RCR2_PESMASK	0x70	/* Periodic interrupt Set  */
+-#define RCR2_RTCEN	0x08	/* ENable RTC              */
+-#define RCR2_ADJ	0x04	/* ADJustment (30-second)  */
+-#define RCR2_RESET	0x02	/* Reset bit               */
+-#define RCR2_START	0x01	/* Start bit               */
 -
--#
--# Networking options
--#
--CONFIG_PACKET=y
--# CONFIG_PACKET_MMAP is not set
--CONFIG_UNIX=y
--CONFIG_XFRM=y
--# CONFIG_XFRM_USER is not set
--# CONFIG_XFRM_SUB_POLICY is not set
--# CONFIG_XFRM_MIGRATE is not set
--# CONFIG_NET_KEY is not set
--CONFIG_INET=y
--# CONFIG_IP_MULTICAST is not set
--# CONFIG_IP_ADVANCED_ROUTER is not set
--CONFIG_IP_FIB_HASH=y
--CONFIG_IP_PNP=y
--# CONFIG_IP_PNP_DHCP is not set
--# CONFIG_IP_PNP_BOOTP is not set
--# CONFIG_IP_PNP_RARP is not set
--# CONFIG_NET_IPIP is not set
--# CONFIG_NET_IPGRE is not set
--# CONFIG_ARPD is not set
--# CONFIG_SYN_COOKIES is not set
--# CONFIG_INET_AH is not set
--# CONFIG_INET_ESP is not set
--# CONFIG_INET_IPCOMP is not set
--# CONFIG_INET_XFRM_TUNNEL is not set
--# CONFIG_INET_TUNNEL is not set
--CONFIG_INET_XFRM_MODE_TRANSPORT=y
--CONFIG_INET_XFRM_MODE_TUNNEL=y
--CONFIG_INET_XFRM_MODE_BEET=y
--# CONFIG_INET_LRO is not set
--CONFIG_INET_DIAG=y
--CONFIG_INET_TCP_DIAG=y
--# CONFIG_TCP_CONG_ADVANCED is not set
--CONFIG_TCP_CONG_CUBIC=y
--CONFIG_DEFAULT_TCP_CONG="cubic"
--# CONFIG_TCP_MD5SIG is not set
--# CONFIG_IPV6 is not set
--# CONFIG_INET6_XFRM_TUNNEL is not set
--# CONFIG_INET6_TUNNEL is not set
--# CONFIG_NETWORK_SECMARK is not set
--# CONFIG_NETFILTER is not set
--# CONFIG_IP_DCCP is not set
--# CONFIG_IP_SCTP is not set
--# CONFIG_TIPC is not set
--# CONFIG_ATM is not set
--# CONFIG_BRIDGE is not set
--# CONFIG_VLAN_8021Q is not set
--# CONFIG_DECNET is not set
--# CONFIG_LLC2 is not set
--# CONFIG_IPX is not set
--# CONFIG_ATALK is not set
--# CONFIG_X25 is not set
--# CONFIG_LAPB is not set
--# CONFIG_ECONET is not set
--# CONFIG_WAN_ROUTER is not set
--# CONFIG_NET_SCHED is not set
+-/* Clock, Power and Reset Controller */
+-#define	CPRC_BLOCK_OFF	0x01010000
+-#define CPRC_BASE	PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
 -
--#
--# Network testing
--#
--# CONFIG_NET_PKTGEN is not set
--# CONFIG_HAMRADIO is not set
--# CONFIG_IRDA is not set
--# CONFIG_BT is not set
--# CONFIG_AF_RXRPC is not set
+-#define FRQCR		(cprc_base+0x0)
+-#define WTCSR		(cprc_base+0x0018)
+-#define STBCR		(cprc_base+0x0030)
 -
--#
--# Wireless
--#
--# CONFIG_CFG80211 is not set
--# CONFIG_WIRELESS_EXT is not set
--# CONFIG_MAC80211 is not set
--# CONFIG_IEEE80211 is not set
--# CONFIG_RFKILL is not set
--# CONFIG_NET_9P is not set
+-/* Time Management Unit */
+-#define	TMU_BLOCK_OFF	0x01020000
+-#define TMU_BASE	PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
+-#define TMU0_BASE	tmu_base + 0x8 + (0xc * 0x0)
+-#define TMU1_BASE	tmu_base + 0x8 + (0xc * 0x1)
+-#define TMU2_BASE	tmu_base + 0x8 + (0xc * 0x2)
 -
--#
--# Device Drivers
--#
+-#define TMU_TOCR	tmu_base+0x0	/* Byte access */
+-#define TMU_TSTR	tmu_base+0x4	/* Byte access */
 -
--#
--# Generic Driver Options
--#
--CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
--CONFIG_STANDALONE=y
--CONFIG_PREVENT_FIRMWARE_BUILD=y
--# CONFIG_FW_LOADER is not set
--# CONFIG_DEBUG_DRIVER is not set
--# CONFIG_DEBUG_DEVRES is not set
--# CONFIG_SYS_HYPERVISOR is not set
--# CONFIG_CONNECTOR is not set
--# CONFIG_MTD is not set
--# CONFIG_PARPORT is not set
--CONFIG_BLK_DEV=y
--# CONFIG_BLK_DEV_COW_COMMON is not set
--CONFIG_BLK_DEV_LOOP=y
--# CONFIG_BLK_DEV_CRYPTOLOOP is not set
--# CONFIG_BLK_DEV_NBD is not set
--CONFIG_BLK_DEV_RAM=y
--CONFIG_BLK_DEV_RAM_COUNT=16
--CONFIG_BLK_DEV_RAM_SIZE=4096
--CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
--# CONFIG_CDROM_PKTCDVD is not set
--# CONFIG_ATA_OVER_ETH is not set
--CONFIG_MISC_DEVICES=y
--# CONFIG_EEPROM_93CX6 is not set
--# CONFIG_IDE is not set
+-#define TMU0_TCOR	TMU0_BASE+0x0	/* Long access */
+-#define TMU0_TCNT	TMU0_BASE+0x4	/* Long access */
+-#define TMU0_TCR	TMU0_BASE+0x8	/* Word access */
 -
--#
--# SCSI device support
--#
--# CONFIG_RAID_ATTRS is not set
--CONFIG_SCSI=y
--CONFIG_SCSI_DMA=y
--# CONFIG_SCSI_TGT is not set
--# CONFIG_SCSI_NETLINK is not set
--CONFIG_SCSI_PROC_FS=y
+-/* Real Time Clock */
+-#define	RTC_BLOCK_OFF	0x01040000
+-#define RTC_BASE	PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
 -
--#
--# SCSI support type (disk, tape, CD-ROM)
--#
--CONFIG_BLK_DEV_SD=y
--# CONFIG_CHR_DEV_ST is not set
--# CONFIG_CHR_DEV_OSST is not set
--# CONFIG_BLK_DEV_SR is not set
--# CONFIG_CHR_DEV_SG is not set
--# CONFIG_CHR_DEV_SCH is not set
+-#define R64CNT  	rtc_base+0x00
+-#define RSECCNT 	rtc_base+0x04
+-#define RMINCNT 	rtc_base+0x08
+-#define RHRCNT  	rtc_base+0x0c
+-#define RWKCNT  	rtc_base+0x10
+-#define RDAYCNT 	rtc_base+0x14
+-#define RMONCNT 	rtc_base+0x18
+-#define RYRCNT  	rtc_base+0x1c	/* 16bit */
+-#define RSECAR  	rtc_base+0x20
+-#define RMINAR  	rtc_base+0x24
+-#define RHRAR   	rtc_base+0x28
+-#define RWKAR   	rtc_base+0x2c
+-#define RDAYAR  	rtc_base+0x30
+-#define RMONAR  	rtc_base+0x34
+-#define RCR1    	rtc_base+0x38
+-#define RCR2    	rtc_base+0x3c
 -
--#
--# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
--#
--CONFIG_SCSI_MULTI_LUN=y
--# CONFIG_SCSI_CONSTANTS is not set
--# CONFIG_SCSI_LOGGING is not set
--# CONFIG_SCSI_SCAN_ASYNC is not set
+-#define TICK_SIZE (tick_nsec / 1000)
 -
--#
--# SCSI Transports
--#
--CONFIG_SCSI_SPI_ATTRS=y
--# CONFIG_SCSI_FC_ATTRS is not set
--# CONFIG_SCSI_ISCSI_ATTRS is not set
--# CONFIG_SCSI_SAS_LIBSAS is not set
--# CONFIG_SCSI_SRP_ATTRS is not set
--CONFIG_SCSI_LOWLEVEL=y
--# CONFIG_ISCSI_TCP is not set
--# CONFIG_SCSI_DEBUG is not set
--# CONFIG_ATA is not set
--# CONFIG_MD is not set
--CONFIG_NETDEVICES=y
--# CONFIG_NETDEVICES_MULTIQUEUE is not set
--# CONFIG_DUMMY is not set
--# CONFIG_BONDING is not set
--# CONFIG_MACVLAN is not set
--# CONFIG_EQUALIZER is not set
--# CONFIG_TUN is not set
--# CONFIG_VETH is not set
--# CONFIG_PHYLIB is not set
--CONFIG_NET_ETHERNET=y
--# CONFIG_MII is not set
--# CONFIG_STNIC is not set
--# CONFIG_SMC91X is not set
--# CONFIG_SMC911X is not set
--# CONFIG_IBM_NEW_EMAC_ZMII is not set
--# CONFIG_IBM_NEW_EMAC_RGMII is not set
--# CONFIG_IBM_NEW_EMAC_TAH is not set
--# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
--# CONFIG_B44 is not set
--CONFIG_NETDEV_1000=y
--CONFIG_NETDEV_10000=y
+-static unsigned long tmu_base, rtc_base;
+-unsigned long cprc_base;
 -
--#
--# Wireless LAN
--#
--# CONFIG_WLAN_PRE80211 is not set
--# CONFIG_WLAN_80211 is not set
--# CONFIG_WAN is not set
--# CONFIG_PPP is not set
--# CONFIG_SLIP is not set
--# CONFIG_SHAPER is not set
--# CONFIG_NETCONSOLE is not set
--# CONFIG_NETPOLL is not set
--# CONFIG_NET_POLL_CONTROLLER is not set
--# CONFIG_ISDN is not set
--# CONFIG_PHONE is not set
+-/* Variables to allow interpolation of time of day to resolution better than a
+- * jiffy. */
 -
--#
--# Input device support
--#
--CONFIG_INPUT=y
--# CONFIG_INPUT_FF_MEMLESS is not set
--# CONFIG_INPUT_POLLDEV is not set
+-/* This is effectively protected by xtime_lock */
+-static unsigned long ctc_last_interrupt;
+-static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
 -
--#
--# Userland interfaces
--#
--CONFIG_INPUT_MOUSEDEV=y
--# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
--CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
--CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
--# CONFIG_INPUT_JOYDEV is not set
--# CONFIG_INPUT_EVDEV is not set
--# CONFIG_INPUT_EVBUG is not set
+-#define CTC_JIFFY_SCALE_SHIFT 40
 -
--#
--# Input Device Drivers
--#
--# CONFIG_INPUT_KEYBOARD is not set
--# CONFIG_INPUT_MOUSE is not set
--# CONFIG_INPUT_JOYSTICK is not set
--# CONFIG_INPUT_TABLET is not set
--# CONFIG_INPUT_TOUCHSCREEN is not set
--# CONFIG_INPUT_MISC is not set
+-/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
+-static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
 -
--#
--# Hardware I/O ports
--#
--# CONFIG_SERIO is not set
--# CONFIG_GAMEPORT is not set
+-/* Estimate number of microseconds that have elapsed since the last timer tick,
+-   by scaling the delta that has occurred in the CTC register.
 -
--#
--# Character devices
--#
--CONFIG_VT=y
--CONFIG_VT_CONSOLE=y
--CONFIG_HW_CONSOLE=y
--# CONFIG_VT_HW_CONSOLE_BINDING is not set
--# CONFIG_SERIAL_NONSTANDARD is not set
+-   WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
+-   the CPU clock rate.  If the CPU sleeps, the CTC stops counting.  Bear this
+-   in mind if enabling SLEEP_WORKS in process.c.  In that case, this algorithm
+-   probably needs to use TMU.TCNT0 instead.  This will work even if the CPU is
+-   sleeping, though will be coarser.
 -
--#
--# Serial drivers
--#
--# CONFIG_SERIAL_8250 is not set
+-   FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
+-   is running or if the freq or tick arguments of adjtimex are modified after
+-   we have calibrated the scaling factor?  This will result in either a jump at
+-   the end of a tick period, or a wrap backwards at the start of the next one,
+-   if the application is reading the time of day often enough.  I think we
+-   ought to do better than this.  For this reason, usecs_per_jiffy is left
+-   separated out in the calculation below.  This allows some future hook into
+-   the adjtime-related stuff in kernel/timer.c to remove this hazard.
 -
--#
--# Non-8250 serial port support
--#
--CONFIG_SERIAL_SH_SCI=y
--CONFIG_SERIAL_SH_SCI_NR_UARTS=2
--CONFIG_SERIAL_SH_SCI_CONSOLE=y
--CONFIG_SERIAL_CORE=y
--CONFIG_SERIAL_CORE_CONSOLE=y
--CONFIG_UNIX98_PTYS=y
--CONFIG_LEGACY_PTYS=y
--CONFIG_LEGACY_PTY_COUNT=256
--# CONFIG_IPMI_HANDLER is not set
--CONFIG_HW_RANDOM=y
--# CONFIG_R3964 is not set
--# CONFIG_RAW_DRIVER is not set
--# CONFIG_TCG_TPM is not set
--# CONFIG_I2C is not set
+-*/
 -
--#
--# SPI support
--#
--# CONFIG_SPI is not set
--# CONFIG_SPI_MASTER is not set
--# CONFIG_W1 is not set
--# CONFIG_POWER_SUPPLY is not set
--CONFIG_HWMON=y
--# CONFIG_HWMON_VID is not set
--# CONFIG_SENSORS_F71805F is not set
--# CONFIG_SENSORS_F71882FG is not set
--# CONFIG_SENSORS_IT87 is not set
--# CONFIG_SENSORS_PC87360 is not set
--# CONFIG_SENSORS_PC87427 is not set
--# CONFIG_SENSORS_SMSC47M1 is not set
--# CONFIG_SENSORS_SMSC47B397 is not set
--# CONFIG_SENSORS_VT1211 is not set
--# CONFIG_SENSORS_W83627HF is not set
--# CONFIG_SENSORS_W83627EHF is not set
--# CONFIG_HWMON_DEBUG_CHIP is not set
--CONFIG_WATCHDOG=y
--# CONFIG_WATCHDOG_NOWAYOUT is not set
+-static unsigned long usecs_since_tick(void)
+-{
+-	unsigned long long current_ctc;
+-	long ctc_ticks_since_interrupt;
+-	unsigned long long ull_ctc_ticks_since_interrupt;
+-	unsigned long result;
 -
--#
--# Watchdog Device Drivers
--#
--# CONFIG_SOFT_WATCHDOG is not set
+-	unsigned long long mul1_out;
+-	unsigned long long mul1_out_high;
+-	unsigned long long mul2_out_low, mul2_out_high;
 -
--#
--# Sonics Silicon Backplane
--#
--CONFIG_SSB_POSSIBLE=y
--# CONFIG_SSB is not set
+-	/* Read CTC register */
+-	asm ("getcon cr62, %0" : "=r" (current_ctc));
+-	/* Note, the CTC counts down on each CPU clock, not up.
+-	   Note(2), use long type to get correct wraparound arithmetic when
+-	   the counter crosses zero. */
+-	ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
+-	ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
 -
--#
--# Multifunction device drivers
--#
--# CONFIG_MFD_SM501 is not set
+-	/* Inline assembly to do 32x32x32->64 multiplier */
+-	asm volatile ("mulu.l %1, %2, %0" :
+-	     "=r" (mul1_out) :
+-	     "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
 -
--#
--# Multimedia devices
--#
--# CONFIG_VIDEO_DEV is not set
--# CONFIG_DVB_CORE is not set
--CONFIG_DAB=y
+-	mul1_out_high = mul1_out >> 32;
 -
--#
--# Graphics support
--#
--# CONFIG_VGASTATE is not set
--CONFIG_VIDEO_OUTPUT_CONTROL=y
--CONFIG_FB=y
--CONFIG_FIRMWARE_EDID=y
--# CONFIG_FB_DDC is not set
--# CONFIG_FB_CFB_FILLRECT is not set
--# CONFIG_FB_CFB_COPYAREA is not set
--# CONFIG_FB_CFB_IMAGEBLIT is not set
--# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
--# CONFIG_FB_SYS_FILLRECT is not set
--# CONFIG_FB_SYS_COPYAREA is not set
--# CONFIG_FB_SYS_IMAGEBLIT is not set
--# CONFIG_FB_SYS_FOPS is not set
--CONFIG_FB_DEFERRED_IO=y
--# CONFIG_FB_SVGALIB is not set
--# CONFIG_FB_MACMODES is not set
--# CONFIG_FB_BACKLIGHT is not set
--CONFIG_FB_MODE_HELPERS=y
--# CONFIG_FB_TILEBLITTING is not set
+-	asm volatile ("mulu.l %1, %2, %0" :
+-	     "=r" (mul2_out_low) :
+-	     "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
 -
--#
--# Frame buffer hardware drivers
--#
--# CONFIG_FB_S1D13XXX is not set
--# CONFIG_FB_VIRTUAL is not set
--# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-#if 1
+-	asm volatile ("mulu.l %1, %2, %0" :
+-	     "=r" (mul2_out_high) :
+-	     "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
+-#endif
 -
--#
--# Display device support
--#
--# CONFIG_DISPLAY_SUPPORT is not set
+-	result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
 -
--#
--# Console display driver support
--#
--CONFIG_DUMMY_CONSOLE=y
--CONFIG_FRAMEBUFFER_CONSOLE=y
--# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
--# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
--CONFIG_FONTS=y
--# CONFIG_FONT_8x8 is not set
--CONFIG_FONT_8x16=y
--# CONFIG_FONT_6x11 is not set
--# CONFIG_FONT_7x14 is not set
--# CONFIG_FONT_PEARL_8x8 is not set
--# CONFIG_FONT_ACORN_8x8 is not set
--# CONFIG_FONT_MINI_4x6 is not set
--# CONFIG_FONT_SUN8x16 is not set
--# CONFIG_FONT_SUN12x22 is not set
--# CONFIG_FONT_10x18 is not set
--CONFIG_LOGO=y
--# CONFIG_LOGO_LINUX_MONO is not set
--# CONFIG_LOGO_LINUX_VGA16 is not set
--# CONFIG_LOGO_LINUX_CLUT224 is not set
--# CONFIG_LOGO_SUPERH_MONO is not set
--# CONFIG_LOGO_SUPERH_VGA16 is not set
--CONFIG_LOGO_SUPERH_CLUT224=y
+-	return result;
+-}
 -
--#
--# Sound
--#
--# CONFIG_SOUND is not set
--CONFIG_HID_SUPPORT=y
--CONFIG_HID=y
--# CONFIG_HID_DEBUG is not set
--# CONFIG_HIDRAW is not set
--CONFIG_USB_SUPPORT=y
--CONFIG_USB_ARCH_HAS_HCD=y
--# CONFIG_USB_ARCH_HAS_OHCI is not set
--# CONFIG_USB_ARCH_HAS_EHCI is not set
--# CONFIG_USB is not set
+-void do_gettimeofday(struct timeval *tv)
+-{
+-	unsigned long flags;
+-	unsigned long seq;
+-	unsigned long usec, sec;
 -
--#
--# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
--#
+-	do {
+-		seq = read_seqbegin_irqsave(&xtime_lock, flags);
+-		usec = usecs_since_tick();
+-		sec = xtime.tv_sec;
+-		usec += xtime.tv_nsec / 1000;
+-	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
 -
--#
--# USB Gadget Support
--#
--# CONFIG_USB_GADGET is not set
--# CONFIG_MMC is not set
--# CONFIG_NEW_LEDS is not set
--# CONFIG_RTC_CLASS is not set
+-	while (usec >= 1000000) {
+-		usec -= 1000000;
+-		sec++;
+-	}
 -
--#
--# Userspace I/O
--#
--# CONFIG_UIO is not set
+-	tv->tv_sec = sec;
+-	tv->tv_usec = usec;
+-}
 -
--#
--# File systems
--#
--CONFIG_EXT2_FS=y
--# CONFIG_EXT2_FS_XATTR is not set
--# CONFIG_EXT2_FS_XIP is not set
--CONFIG_EXT3_FS=y
--CONFIG_EXT3_FS_XATTR=y
--# CONFIG_EXT3_FS_POSIX_ACL is not set
--# CONFIG_EXT3_FS_SECURITY is not set
--# CONFIG_EXT4DEV_FS is not set
--CONFIG_JBD=y
--# CONFIG_JBD_DEBUG is not set
--CONFIG_FS_MBCACHE=y
--# CONFIG_REISERFS_FS is not set
--# CONFIG_JFS_FS is not set
--# CONFIG_FS_POSIX_ACL is not set
--# CONFIG_XFS_FS is not set
--# CONFIG_GFS2_FS is not set
--# CONFIG_OCFS2_FS is not set
--CONFIG_MINIX_FS=y
--CONFIG_ROMFS_FS=y
--CONFIG_INOTIFY=y
--CONFIG_INOTIFY_USER=y
--# CONFIG_QUOTA is not set
--CONFIG_DNOTIFY=y
--# CONFIG_AUTOFS_FS is not set
--# CONFIG_AUTOFS4_FS is not set
--# CONFIG_FUSE_FS is not set
+-int do_settimeofday(struct timespec *tv)
+-{
+-	time_t wtm_sec, sec = tv->tv_sec;
+-	long wtm_nsec, nsec = tv->tv_nsec;
 -
--#
--# CD-ROM/DVD Filesystems
--#
--# CONFIG_ISO9660_FS is not set
--# CONFIG_UDF_FS is not set
+-	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+-		return -EINVAL;
 -
--#
--# DOS/FAT/NT Filesystems
--#
--# CONFIG_MSDOS_FS is not set
--# CONFIG_VFAT_FS is not set
--# CONFIG_NTFS_FS is not set
+-	write_seqlock_irq(&xtime_lock);
+-	/*
+-	 * This is revolting. We need to set "xtime" correctly. However, the
+-	 * value in this location is the value at the most recent update of
+-	 * wall time.  Discover what correction gettimeofday() would have
+-	 * made, and then undo it!
+-	 */
+-	nsec -= 1000 * usecs_since_tick();
 -
--#
--# Pseudo filesystems
--#
--CONFIG_PROC_FS=y
--CONFIG_PROC_KCORE=y
--CONFIG_PROC_SYSCTL=y
--CONFIG_SYSFS=y
--CONFIG_TMPFS=y
--# CONFIG_TMPFS_POSIX_ACL is not set
--CONFIG_HUGETLBFS=y
--CONFIG_HUGETLB_PAGE=y
--# CONFIG_CONFIGFS_FS is not set
+-	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+-	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
 -
--#
--# Miscellaneous filesystems
--#
--# CONFIG_ADFS_FS is not set
--# CONFIG_AFFS_FS is not set
--# CONFIG_HFS_FS is not set
--# CONFIG_HFSPLUS_FS is not set
--# CONFIG_BEFS_FS is not set
--# CONFIG_BFS_FS is not set
--# CONFIG_EFS_FS is not set
--# CONFIG_CRAMFS is not set
--# CONFIG_VXFS_FS is not set
--# CONFIG_HPFS_FS is not set
--# CONFIG_QNX4FS_FS is not set
--# CONFIG_SYSV_FS is not set
--# CONFIG_UFS_FS is not set
--CONFIG_NETWORK_FILESYSTEMS=y
--CONFIG_NFS_FS=y
--CONFIG_NFS_V3=y
--# CONFIG_NFS_V3_ACL is not set
--# CONFIG_NFS_V4 is not set
--# CONFIG_NFS_DIRECTIO is not set
--# CONFIG_NFSD is not set
--CONFIG_ROOT_NFS=y
--CONFIG_LOCKD=y
--CONFIG_LOCKD_V4=y
--CONFIG_NFS_COMMON=y
--CONFIG_SUNRPC=y
--# CONFIG_SUNRPC_BIND34 is not set
--# CONFIG_RPCSEC_GSS_KRB5 is not set
--# CONFIG_RPCSEC_GSS_SPKM3 is not set
--# CONFIG_SMB_FS is not set
--# CONFIG_CIFS is not set
--# CONFIG_NCP_FS is not set
--# CONFIG_CODA_FS is not set
--# CONFIG_AFS_FS is not set
+-	set_normalized_timespec(&xtime, sec, nsec);
+-	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
 -
--#
--# Partition Types
--#
--CONFIG_PARTITION_ADVANCED=y
--# CONFIG_ACORN_PARTITION is not set
--# CONFIG_OSF_PARTITION is not set
--# CONFIG_AMIGA_PARTITION is not set
--# CONFIG_ATARI_PARTITION is not set
--# CONFIG_MAC_PARTITION is not set
--CONFIG_MSDOS_PARTITION=y
--# CONFIG_BSD_DISKLABEL is not set
--# CONFIG_MINIX_SUBPARTITION is not set
--# CONFIG_SOLARIS_X86_PARTITION is not set
--# CONFIG_UNIXWARE_DISKLABEL is not set
--# CONFIG_LDM_PARTITION is not set
--# CONFIG_SGI_PARTITION is not set
--# CONFIG_ULTRIX_PARTITION is not set
--# CONFIG_SUN_PARTITION is not set
--# CONFIG_KARMA_PARTITION is not set
--# CONFIG_EFI_PARTITION is not set
--# CONFIG_SYSV68_PARTITION is not set
--# CONFIG_NLS is not set
--# CONFIG_DLM is not set
--CONFIG_INSTRUMENTATION=y
--# CONFIG_PROFILING is not set
--# CONFIG_MARKERS is not set
+-	ntp_clear();
+-	write_sequnlock_irq(&xtime_lock);
+-	clock_was_set();
 -
--#
--# Kernel hacking
--#
--# CONFIG_PRINTK_TIME is not set
--CONFIG_ENABLE_WARN_DEPRECATED=y
--CONFIG_ENABLE_MUST_CHECK=y
--CONFIG_MAGIC_SYSRQ=y
--# CONFIG_UNUSED_SYMBOLS is not set
--CONFIG_DEBUG_FS=y
--# CONFIG_HEADERS_CHECK is not set
--CONFIG_DEBUG_KERNEL=y
--# CONFIG_DEBUG_SHIRQ is not set
--CONFIG_DETECT_SOFTLOCKUP=y
--CONFIG_SCHED_DEBUG=y
--CONFIG_SCHEDSTATS=y
--# CONFIG_TIMER_STATS is not set
--# CONFIG_DEBUG_SLAB is not set
--# CONFIG_DEBUG_RT_MUTEXES is not set
--# CONFIG_RT_MUTEX_TESTER is not set
--# CONFIG_DEBUG_SPINLOCK is not set
--# CONFIG_DEBUG_MUTEXES is not set
--# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
--# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
--# CONFIG_DEBUG_KOBJECT is not set
--CONFIG_DEBUG_BUGVERBOSE=y
--# CONFIG_DEBUG_INFO is not set
--# CONFIG_DEBUG_VM is not set
--# CONFIG_DEBUG_LIST is not set
--# CONFIG_DEBUG_SG is not set
--CONFIG_FRAME_POINTER=y
--CONFIG_FORCED_INLINING=y
--# CONFIG_BOOT_PRINTK_DELAY is not set
--# CONFIG_FAULT_INJECTION is not set
--# CONFIG_SAMPLES is not set
--# CONFIG_EARLY_PRINTK is not set
--CONFIG_SH64_PROC_TLB=y
--CONFIG_SH64_PROC_ASIDS=y
--CONFIG_SH64_SR_WATCH=y
--# CONFIG_POOR_MANS_STRACE is not set
--# CONFIG_SH_NO_BSS_INIT is not set
+-	return 0;
+-}
+-EXPORT_SYMBOL(do_settimeofday);
 -
--#
--# Security options
--#
--# CONFIG_KEYS is not set
--# CONFIG_SECURITY is not set
--# CONFIG_SECURITY_FILE_CAPABILITIES is not set
--# CONFIG_CRYPTO is not set
+-static int set_rtc_time(unsigned long nowtime)
+-{
+-	int retval = 0;
+-	int real_seconds, real_minutes, cmos_minutes;
 -
--#
--# Library routines
--#
--CONFIG_BITREVERSE=y
--# CONFIG_CRC_CCITT is not set
--# CONFIG_CRC16 is not set
--# CONFIG_CRC_ITU_T is not set
--CONFIG_CRC32=y
--# CONFIG_CRC7 is not set
--# CONFIG_LIBCRC32C is not set
--CONFIG_PLIST=y
--CONFIG_HAS_IOMEM=y
--CONFIG_HAS_IOPORT=y
--CONFIG_HAS_DMA=y
-diff --git a/arch/sh64/configs/sim_defconfig b/arch/sh64/configs/sim_defconfig
-deleted file mode 100644
-index 18476cc..0000000
---- a/arch/sh64/configs/sim_defconfig
-+++ /dev/null
-@@ -1,558 +0,0 @@
--#
--# Automatically generated make config: don't edit
--# Linux kernel version: 2.6.24-rc1
--# Fri Nov  2 14:36:08 2007
--#
--CONFIG_SUPERH=y
--CONFIG_SUPERH64=y
--CONFIG_MMU=y
--CONFIG_QUICKLIST=y
--CONFIG_RWSEM_GENERIC_SPINLOCK=y
--CONFIG_GENERIC_FIND_NEXT_BIT=y
--CONFIG_GENERIC_HWEIGHT=y
--CONFIG_GENERIC_CALIBRATE_DELAY=y
--CONFIG_GENERIC_HARDIRQS=y
--CONFIG_GENERIC_IRQ_PROBE=y
--# CONFIG_ARCH_HAS_ILOG2_U32 is not set
--# CONFIG_ARCH_HAS_ILOG2_U64 is not set
--CONFIG_ARCH_NO_VIRT_TO_BUS=y
--CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+-	ctrl_outb(RCR2_RESET, RCR2);  /* Reset pre-scaler & stop RTC */
 -
--#
--# General setup
--#
--CONFIG_EXPERIMENTAL=y
--CONFIG_BROKEN_ON_SMP=y
--CONFIG_LOCK_KERNEL=y
--CONFIG_INIT_ENV_ARG_LIMIT=32
--CONFIG_LOCALVERSION=""
--CONFIG_LOCALVERSION_AUTO=y
--CONFIG_SWAP=y
--# CONFIG_SYSVIPC is not set
--# CONFIG_BSD_PROCESS_ACCT is not set
--# CONFIG_USER_NS is not set
--# CONFIG_IKCONFIG is not set
--CONFIG_LOG_BUF_SHIFT=14
--# CONFIG_CGROUPS is not set
--CONFIG_FAIR_GROUP_SCHED=y
--CONFIG_FAIR_USER_SCHED=y
--# CONFIG_FAIR_CGROUP_SCHED is not set
--CONFIG_SYSFS_DEPRECATED=y
--# CONFIG_RELAY is not set
--# CONFIG_BLK_DEV_INITRD is not set
--# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
--CONFIG_SYSCTL=y
--# CONFIG_EMBEDDED is not set
--CONFIG_UID16=y
--CONFIG_SYSCTL_SYSCALL=y
--CONFIG_KALLSYMS=y
--# CONFIG_KALLSYMS_ALL is not set
--# CONFIG_KALLSYMS_EXTRA_PASS is not set
--CONFIG_HOTPLUG=y
--CONFIG_PRINTK=y
--CONFIG_BUG=y
--CONFIG_ELF_CORE=y
--CONFIG_BASE_FULL=y
--CONFIG_FUTEX=y
--CONFIG_ANON_INODES=y
--CONFIG_EPOLL=y
--CONFIG_SIGNALFD=y
--CONFIG_EVENTFD=y
--CONFIG_SHMEM=y
--CONFIG_VM_EVENT_COUNTERS=y
--CONFIG_SLAB=y
--# CONFIG_SLUB is not set
--# CONFIG_SLOB is not set
--CONFIG_RT_MUTEXES=y
--# CONFIG_TINY_SHMEM is not set
--CONFIG_BASE_SMALL=0
--# CONFIG_MODULES is not set
--CONFIG_BLOCK=y
--# CONFIG_LBD is not set
--# CONFIG_BLK_DEV_IO_TRACE is not set
--# CONFIG_LSF is not set
--# CONFIG_BLK_DEV_BSG is not set
+-	cmos_minutes = ctrl_inb(RMINCNT);
+-	BCD_TO_BIN(cmos_minutes);
 -
--#
--# IO Schedulers
--#
--CONFIG_IOSCHED_NOOP=y
--CONFIG_IOSCHED_AS=y
--CONFIG_IOSCHED_DEADLINE=y
--CONFIG_IOSCHED_CFQ=y
--# CONFIG_DEFAULT_AS is not set
--# CONFIG_DEFAULT_DEADLINE is not set
--CONFIG_DEFAULT_CFQ=y
--# CONFIG_DEFAULT_NOOP is not set
--CONFIG_DEFAULT_IOSCHED="cfq"
+-	/*
+-	 * since we're only adjusting minutes and seconds,
+-	 * don't interfere with hour overflow. This avoids
+-	 * messing with unknown time zones but requires your
+-	 * RTC not to be off by more than 15 minutes
+-	 */
+-	real_seconds = nowtime % 60;
+-	real_minutes = nowtime / 60;
+-	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+-		real_minutes += 30;	/* correct for half hour time zone */
+-	real_minutes %= 60;
 -
--#
--# System type
--#
--CONFIG_SH_SIMULATOR=y
--# CONFIG_SH_CAYMAN is not set
--# CONFIG_SH_HARP is not set
--CONFIG_CPU_SH5=y
--CONFIG_CPU_SUBTYPE_SH5_101=y
--# CONFIG_CPU_SUBTYPE_SH5_103 is not set
--CONFIG_LITTLE_ENDIAN=y
--# CONFIG_BIG_ENDIAN is not set
--CONFIG_SH_FPU=y
--# CONFIG_SH64_FPU_DENORM_FLUSH is not set
--CONFIG_SH64_PGTABLE_2_LEVEL=y
--# CONFIG_SH64_PGTABLE_3_LEVEL is not set
--CONFIG_HUGETLB_PAGE_SIZE_64K=y
--# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
--# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
--CONFIG_SH64_USER_MISALIGNED_FIXUP=y
+-	if (abs(real_minutes - cmos_minutes) < 30) {
+-		BIN_TO_BCD(real_seconds);
+-		BIN_TO_BCD(real_minutes);
+-		ctrl_outb(real_seconds, RSECCNT);
+-		ctrl_outb(real_minutes, RMINCNT);
+-	} else {
+-		printk(KERN_WARNING
+-		       "set_rtc_time: can't update from %d to %d\n",
+-		       cmos_minutes, real_minutes);
+-		retval = -1;
+-	}
 -
--#
--# Memory options
--#
--CONFIG_CACHED_MEMORY_OFFSET=0x20000000
--CONFIG_MEMORY_START=0x80000000
--CONFIG_MEMORY_SIZE_IN_MB=128
+-	ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2);  /* Start RTC */
 -
--#
--# Cache options
--#
--# CONFIG_DCACHE_WRITE_BACK is not set
--# CONFIG_DCACHE_WRITE_THROUGH is not set
--CONFIG_DCACHE_DISABLED=y
--# CONFIG_ICACHE_DISABLED is not set
--CONFIG_PCIDEVICE_MEMORY_START=C0000000
--CONFIG_DEVICE_MEMORY_START=E0000000
--CONFIG_FLASH_MEMORY_START=0x00000000
--CONFIG_PCI_BLOCK_START=0x40000000
+-	return retval;
+-}
 -
--#
--# CPU Subtype specific options
--#
--CONFIG_SH64_ID2815_WORKAROUND=y
+-/* last time the RTC clock got updated */
+-static long last_rtc_update = 0;
 -
--#
--# Misc options
--#
--# CONFIG_SH_DMA is not set
--CONFIG_PREEMPT=y
--CONFIG_SELECT_MEMORY_MODEL=y
--CONFIG_FLATMEM_MANUAL=y
--# CONFIG_DISCONTIGMEM_MANUAL is not set
--# CONFIG_SPARSEMEM_MANUAL is not set
--CONFIG_FLATMEM=y
--CONFIG_FLAT_NODE_MEM_MAP=y
--# CONFIG_SPARSEMEM_STATIC is not set
--# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
--CONFIG_SPLIT_PTLOCK_CPUS=4
--# CONFIG_RESOURCES_64BIT is not set
--CONFIG_ZONE_DMA_FLAG=0
--CONFIG_NR_QUICK=1
+-/*
+- * timer_interrupt() needs to keep up the real-time clock,
+- * as well as call the "do_timer()" routine every clocktick
+- */
+-static inline void do_timer_interrupt(void)
+-{
+-	unsigned long long current_ctc;
+-	asm ("getcon cr62, %0" : "=r" (current_ctc));
+-	ctc_last_interrupt = (unsigned long) current_ctc;
 -
--#
--# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
--#
--# CONFIG_ARCH_SUPPORTS_MSI is not set
--# CONFIG_PCCARD is not set
+-	do_timer(1);
+-#ifndef CONFIG_SMP
+-	update_process_times(user_mode(get_irq_regs()));
+-#endif
+-	if (current->pid)
+-		profile_tick(CPU_PROFILING);
 -
--#
--# Executable file formats
--#
--CONFIG_BINFMT_ELF=y
--# CONFIG_BINFMT_MISC is not set
+-#ifdef CONFIG_HEARTBEAT
+-	{
+-		extern void heartbeat(void);
 -
--#
--# Networking
--#
--# CONFIG_NET is not set
+-		heartbeat();
+-	}
+-#endif
 -
--#
--# Device Drivers
--#
+-	/*
+-	 * If we have an externally synchronized Linux clock, then update
+-	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+-	 * called as close as possible to 500 ms before the new second starts.
+-	 */
+-	if (ntp_synced() &&
+-	    xtime.tv_sec > last_rtc_update + 660 &&
+-	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
+-	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+-		if (set_rtc_time(xtime.tv_sec) == 0)
+-			last_rtc_update = xtime.tv_sec;
+-		else
+-			last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+-	}
+-}
 -
--#
--# Generic Driver Options
--#
--CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
--CONFIG_STANDALONE=y
--CONFIG_PREVENT_FIRMWARE_BUILD=y
--# CONFIG_FW_LOADER is not set
--# CONFIG_DEBUG_DRIVER is not set
--# CONFIG_DEBUG_DEVRES is not set
--# CONFIG_SYS_HYPERVISOR is not set
--# CONFIG_MTD is not set
--# CONFIG_PARPORT is not set
--# CONFIG_BLK_DEV is not set
--# CONFIG_MISC_DEVICES is not set
--# CONFIG_IDE is not set
+-/*
+- * This is the same as the above, except we _also_ save the current
+- * Time Stamp Counter value at the time of the timer interrupt, so that
+- * we later on can estimate the time of day more exactly.
+- */
+-static irqreturn_t timer_interrupt(int irq, void *dev_id)
+-{
+-	unsigned long timer_status;
 -
--#
--# SCSI device support
--#
--# CONFIG_RAID_ATTRS is not set
--CONFIG_SCSI=y
--CONFIG_SCSI_DMA=y
--# CONFIG_SCSI_TGT is not set
--# CONFIG_SCSI_NETLINK is not set
--CONFIG_SCSI_PROC_FS=y
+-	/* Clear UNF bit */
+-	timer_status = ctrl_inw(TMU0_TCR);
+-	timer_status &= ~0x100;
+-	ctrl_outw(timer_status, TMU0_TCR);
 -
--#
--# SCSI support type (disk, tape, CD-ROM)
--#
--CONFIG_BLK_DEV_SD=y
--# CONFIG_CHR_DEV_ST is not set
--# CONFIG_CHR_DEV_OSST is not set
--# CONFIG_BLK_DEV_SR is not set
--# CONFIG_CHR_DEV_SG is not set
--# CONFIG_CHR_DEV_SCH is not set
+-	/*
+-	 * Here we are in the timer irq handler. We just have irqs locally
+-	 * disabled but we don't know if the timer_bh is running on the other
+-	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+-	 * the irq version of write_lock because as just said we have irq
+-	 * locally disabled. -arca
+-	 */
+-	write_lock(&xtime_lock);
+-	do_timer_interrupt();
+-	write_unlock(&xtime_lock);
 -
--#
--# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
--#
--CONFIG_SCSI_MULTI_LUN=y
--# CONFIG_SCSI_CONSTANTS is not set
--# CONFIG_SCSI_LOGGING is not set
--# CONFIG_SCSI_SCAN_ASYNC is not set
+-	return IRQ_HANDLED;
+-}
 -
--#
--# SCSI Transports
--#
--CONFIG_SCSI_SPI_ATTRS=y
--# CONFIG_SCSI_FC_ATTRS is not set
--# CONFIG_SCSI_SAS_LIBSAS is not set
--# CONFIG_SCSI_SRP_ATTRS is not set
--CONFIG_SCSI_LOWLEVEL=y
--# CONFIG_SCSI_DEBUG is not set
--# CONFIG_ATA is not set
--# CONFIG_MD is not set
--# CONFIG_PHONE is not set
+-static unsigned long get_rtc_time(void)
+-{
+-	unsigned int sec, min, hr, wk, day, mon, yr, yr100;
 -
--#
--# Input device support
--#
--CONFIG_INPUT=y
--# CONFIG_INPUT_FF_MEMLESS is not set
--# CONFIG_INPUT_POLLDEV is not set
+- again:
+-	do {
+-		ctrl_outb(0, RCR1);  /* Clear CF-bit */
+-		sec = ctrl_inb(RSECCNT);
+-		min = ctrl_inb(RMINCNT);
+-		hr  = ctrl_inb(RHRCNT);
+-		wk  = ctrl_inb(RWKCNT);
+-		day = ctrl_inb(RDAYCNT);
+-		mon = ctrl_inb(RMONCNT);
+-		yr  = ctrl_inw(RYRCNT);
+-		yr100 = (yr >> 8);
+-		yr &= 0xff;
+-	} while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
 -
--#
--# Userland interfaces
--#
--CONFIG_INPUT_MOUSEDEV=y
--# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
--CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
--CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
--# CONFIG_INPUT_JOYDEV is not set
--# CONFIG_INPUT_EVDEV is not set
--# CONFIG_INPUT_EVBUG is not set
+-	BCD_TO_BIN(yr100);
+-	BCD_TO_BIN(yr);
+-	BCD_TO_BIN(mon);
+-	BCD_TO_BIN(day);
+-	BCD_TO_BIN(hr);
+-	BCD_TO_BIN(min);
+-	BCD_TO_BIN(sec);
 -
--#
--# Input Device Drivers
--#
--# CONFIG_INPUT_KEYBOARD is not set
--# CONFIG_INPUT_MOUSE is not set
--# CONFIG_INPUT_JOYSTICK is not set
--# CONFIG_INPUT_TABLET is not set
--# CONFIG_INPUT_TOUCHSCREEN is not set
--# CONFIG_INPUT_MISC is not set
+-	if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
+-	    hr > 23 || min > 59 || sec > 59) {
+-		printk(KERN_ERR
+-		       "SH RTC: invalid value, resetting to 1 Jan 2000\n");
+-		ctrl_outb(RCR2_RESET, RCR2);  /* Reset & Stop */
+-		ctrl_outb(0, RSECCNT);
+-		ctrl_outb(0, RMINCNT);
+-		ctrl_outb(0, RHRCNT);
+-		ctrl_outb(6, RWKCNT);
+-		ctrl_outb(1, RDAYCNT);
+-		ctrl_outb(1, RMONCNT);
+-		ctrl_outw(0x2000, RYRCNT);
+-		ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2);  /* Start */
+-		goto again;
+-	}
 -
--#
--# Hardware I/O ports
--#
--# CONFIG_SERIO is not set
--# CONFIG_GAMEPORT is not set
+-	return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
+-}
 -
--#
--# Character devices
--#
--CONFIG_VT=y
--CONFIG_VT_CONSOLE=y
--CONFIG_HW_CONSOLE=y
--# CONFIG_VT_HW_CONSOLE_BINDING is not set
--# CONFIG_SERIAL_NONSTANDARD is not set
+-static __init unsigned int get_cpu_hz(void)
+-{
+-	unsigned int count;
+-	unsigned long __dummy;
+-	unsigned long ctc_val_init, ctc_val;
 -
--#
--# Serial drivers
--#
--# CONFIG_SERIAL_8250 is not set
+-	/*
+-	** Regardless the toolchain, force the compiler to use the
+-	** arbitrary register r3 as a clock tick counter.
+-	** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
+-	*/
+-	register unsigned long long  __rtc_irq_flag __asm__ ("r3");
 -
--#
--# Non-8250 serial port support
--#
--CONFIG_SERIAL_SH_SCI=y
--CONFIG_SERIAL_SH_SCI_NR_UARTS=2
--CONFIG_SERIAL_SH_SCI_CONSOLE=y
--CONFIG_SERIAL_CORE=y
--CONFIG_SERIAL_CORE_CONSOLE=y
--CONFIG_UNIX98_PTYS=y
--# CONFIG_LEGACY_PTYS is not set
--# CONFIG_IPMI_HANDLER is not set
--# CONFIG_HW_RANDOM is not set
--# CONFIG_R3964 is not set
--# CONFIG_RAW_DRIVER is not set
--# CONFIG_TCG_TPM is not set
--# CONFIG_I2C is not set
+-	local_irq_enable();
+-	do {} while (ctrl_inb(R64CNT) != 0);
+-	ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
 -
--#
--# SPI support
--#
--# CONFIG_SPI is not set
--# CONFIG_SPI_MASTER is not set
--# CONFIG_W1 is not set
--# CONFIG_POWER_SUPPLY is not set
--# CONFIG_HWMON is not set
--# CONFIG_WATCHDOG is not set
+-	/*
+-	 * r3 is arbitrary. CDC does not support "=z".
+-	 */
+-	ctc_val_init = 0xffffffff;
+-	ctc_val = ctc_val_init;
 -
--#
--# Sonics Silicon Backplane
--#
--CONFIG_SSB_POSSIBLE=y
--# CONFIG_SSB is not set
+-	asm volatile("gettr	tr0, %1\n\t"
+-		     "putcon	%0, " __CTC "\n\t"
+-		     "and	%2, r63, %2\n\t"
+-		     "pta	$+4, tr0\n\t"
+-		     "beq/l	%2, r63, tr0\n\t"
+-		     "ptabs	%1, tr0\n\t"
+-		     "getcon	" __CTC ", %0\n\t"
+-		: "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
+-		: "0" (0));
+-	local_irq_disable();
+-	/*
+-	 * SH-3:
+-	 * CPU clock = 4 stages * loop
+-	 * tst    rm,rm      if id ex
+-	 * bt/s   1b            if id ex
+-	 * add    #1,rd            if id ex
+-         *                            (if) pipe line stole
+-	 * tst    rm,rm                  if id ex
+-         * ....
+-	 *
+-	 *
+-	 * SH-4:
+-	 * CPU clock = 6 stages * loop
+-	 * I don't know why.
+-         * ....
+-	 *
+-	 * SH-5:
+-	 * Use CTC register to count.  This approach returns the right value
+-	 * even if the I-cache is disabled (e.g. whilst debugging.)
+-	 *
+-	 */
 -
--#
--# Multifunction device drivers
--#
--# CONFIG_MFD_SM501 is not set
+-	count = ctc_val_init - ctc_val; /* CTC counts down */
 -
--#
--# Multimedia devices
--#
--# CONFIG_VIDEO_DEV is not set
--CONFIG_DAB=y
+-#if defined (CONFIG_SH_SIMULATOR)
+-	/*
+-	 * Let's pretend we are a 5MHz SH-5 to avoid a too
+-	 * little timer interval. Also to keep delay
+-	 * calibration within a reasonable time.
+-	 */
+-	return 5000000;
+-#else
+-	/*
+-	 * This really is count by the number of clock cycles
+-         * by the ratio between a complete R64CNT
+-         * wrap-around (128) and CUI interrupt being raised (64).
+-	 */
+-	return count*2;
+-#endif
+-}
 -
--#
--# Graphics support
--#
--# CONFIG_VGASTATE is not set
--CONFIG_VIDEO_OUTPUT_CONTROL=y
--CONFIG_FB=y
--CONFIG_FIRMWARE_EDID=y
--# CONFIG_FB_DDC is not set
--# CONFIG_FB_CFB_FILLRECT is not set
--# CONFIG_FB_CFB_COPYAREA is not set
--# CONFIG_FB_CFB_IMAGEBLIT is not set
--# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
--# CONFIG_FB_SYS_FILLRECT is not set
--# CONFIG_FB_SYS_COPYAREA is not set
--# CONFIG_FB_SYS_IMAGEBLIT is not set
--# CONFIG_FB_SYS_FOPS is not set
--CONFIG_FB_DEFERRED_IO=y
--# CONFIG_FB_SVGALIB is not set
--# CONFIG_FB_MACMODES is not set
--# CONFIG_FB_BACKLIGHT is not set
--CONFIG_FB_MODE_HELPERS=y
--# CONFIG_FB_TILEBLITTING is not set
+-static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
+-{
+-	struct pt_regs *regs = get_irq_regs();
 -
--#
--# Frame buffer hardware drivers
--#
--# CONFIG_FB_S1D13XXX is not set
--# CONFIG_FB_VIRTUAL is not set
--# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+-	ctrl_outb(0, RCR1);	/* Disable Carry Interrupts */
+-	regs->regs[3] = 1;	/* Using r3 */
 -
--#
--# Display device support
--#
--# CONFIG_DISPLAY_SUPPORT is not set
+-	return IRQ_HANDLED;
+-}
 -
--#
--# Console display driver support
--#
--CONFIG_DUMMY_CONSOLE=y
--CONFIG_FRAMEBUFFER_CONSOLE=y
--# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
--# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
--CONFIG_FONTS=y
--# CONFIG_FONT_8x8 is not set
--CONFIG_FONT_8x16=y
--# CONFIG_FONT_6x11 is not set
--# CONFIG_FONT_7x14 is not set
--# CONFIG_FONT_PEARL_8x8 is not set
--# CONFIG_FONT_ACORN_8x8 is not set
--# CONFIG_FONT_MINI_4x6 is not set
--# CONFIG_FONT_SUN8x16 is not set
--# CONFIG_FONT_SUN12x22 is not set
--# CONFIG_FONT_10x18 is not set
--CONFIG_LOGO=y
--# CONFIG_LOGO_LINUX_MONO is not set
--# CONFIG_LOGO_LINUX_VGA16 is not set
--# CONFIG_LOGO_LINUX_CLUT224 is not set
--# CONFIG_LOGO_SUPERH_MONO is not set
--# CONFIG_LOGO_SUPERH_VGA16 is not set
--CONFIG_LOGO_SUPERH_CLUT224=y
+-static struct irqaction irq0  = {
+-	.handler = timer_interrupt,
+-	.flags = IRQF_DISABLED,
+-	.mask = CPU_MASK_NONE,
+-	.name = "timer",
+-};
+-static struct irqaction irq1  = {
+-	.handler = sh64_rtc_interrupt,
+-	.flags = IRQF_DISABLED,
+-	.mask = CPU_MASK_NONE,
+-	.name = "rtc",
+-};
 -
--#
--# Sound
--#
--# CONFIG_SOUND is not set
--# CONFIG_HID_SUPPORT is not set
--# CONFIG_USB_SUPPORT is not set
--# CONFIG_MMC is not set
--# CONFIG_NEW_LEDS is not set
--# CONFIG_RTC_CLASS is not set
+-void __init time_init(void)
+-{
+-	unsigned int cpu_clock, master_clock, bus_clock, module_clock;
+-	unsigned long interval;
+-	unsigned long frqcr, ifc, pfc;
+-	static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
+-#define bfc_table ifc_table	/* Same */
+-#define pfc_table ifc_table	/* Same */
 -
--#
--# Userspace I/O
--#
--# CONFIG_UIO is not set
+-	tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
+-	if (!tmu_base) {
+-		panic("Unable to remap TMU\n");
+-	}
 -
--#
--# File systems
--#
--CONFIG_EXT2_FS=y
--# CONFIG_EXT2_FS_XATTR is not set
--# CONFIG_EXT2_FS_XIP is not set
--CONFIG_EXT3_FS=y
--CONFIG_EXT3_FS_XATTR=y
--# CONFIG_EXT3_FS_POSIX_ACL is not set
--# CONFIG_EXT3_FS_SECURITY is not set
--# CONFIG_EXT4DEV_FS is not set
--CONFIG_JBD=y
--# CONFIG_JBD_DEBUG is not set
--CONFIG_FS_MBCACHE=y
--# CONFIG_REISERFS_FS is not set
--# CONFIG_JFS_FS is not set
--# CONFIG_FS_POSIX_ACL is not set
--# CONFIG_XFS_FS is not set
--# CONFIG_GFS2_FS is not set
--CONFIG_MINIX_FS=y
--CONFIG_ROMFS_FS=y
--CONFIG_INOTIFY=y
--CONFIG_INOTIFY_USER=y
--# CONFIG_QUOTA is not set
--CONFIG_DNOTIFY=y
--# CONFIG_AUTOFS_FS is not set
--# CONFIG_AUTOFS4_FS is not set
--# CONFIG_FUSE_FS is not set
+-	rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
+-	if (!rtc_base) {
+-		panic("Unable to remap RTC\n");
+-	}
 -
--#
--# CD-ROM/DVD Filesystems
--#
--# CONFIG_ISO9660_FS is not set
--# CONFIG_UDF_FS is not set
+-	cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
+-	if (!cprc_base) {
+-		panic("Unable to remap CPRC\n");
+-	}
 -
--#
--# DOS/FAT/NT Filesystems
--#
--# CONFIG_MSDOS_FS is not set
--# CONFIG_VFAT_FS is not set
--# CONFIG_NTFS_FS is not set
+-	xtime.tv_sec = get_rtc_time();
+-	xtime.tv_nsec = 0;
 -
--#
--# Pseudo filesystems
--#
--CONFIG_PROC_FS=y
--CONFIG_PROC_KCORE=y
--CONFIG_PROC_SYSCTL=y
--CONFIG_SYSFS=y
--CONFIG_TMPFS=y
--# CONFIG_TMPFS_POSIX_ACL is not set
--CONFIG_HUGETLBFS=y
--CONFIG_HUGETLB_PAGE=y
--# CONFIG_CONFIGFS_FS is not set
+-	setup_irq(TIMER_IRQ, &irq0);
+-	setup_irq(RTC_IRQ, &irq1);
 -
--#
--# Miscellaneous filesystems
--#
--# CONFIG_ADFS_FS is not set
--# CONFIG_AFFS_FS is not set
--# CONFIG_HFS_FS is not set
--# CONFIG_HFSPLUS_FS is not set
--# CONFIG_BEFS_FS is not set
--# CONFIG_BFS_FS is not set
--# CONFIG_EFS_FS is not set
--# CONFIG_CRAMFS is not set
--# CONFIG_VXFS_FS is not set
--# CONFIG_HPFS_FS is not set
--# CONFIG_QNX4FS_FS is not set
--# CONFIG_SYSV_FS is not set
--# CONFIG_UFS_FS is not set
+-	/* Check how fast it is.. */
+-	cpu_clock = get_cpu_hz();
 -
--#
--# Partition Types
--#
--CONFIG_PARTITION_ADVANCED=y
--# CONFIG_ACORN_PARTITION is not set
--# CONFIG_OSF_PARTITION is not set
--# CONFIG_AMIGA_PARTITION is not set
--# CONFIG_ATARI_PARTITION is not set
--# CONFIG_MAC_PARTITION is not set
--CONFIG_MSDOS_PARTITION=y
--# CONFIG_BSD_DISKLABEL is not set
--# CONFIG_MINIX_SUBPARTITION is not set
--# CONFIG_SOLARIS_X86_PARTITION is not set
--# CONFIG_UNIXWARE_DISKLABEL is not set
--# CONFIG_LDM_PARTITION is not set
--# CONFIG_SGI_PARTITION is not set
--# CONFIG_ULTRIX_PARTITION is not set
--# CONFIG_SUN_PARTITION is not set
--# CONFIG_KARMA_PARTITION is not set
--# CONFIG_EFI_PARTITION is not set
--# CONFIG_SYSV68_PARTITION is not set
--# CONFIG_NLS is not set
--CONFIG_INSTRUMENTATION=y
--CONFIG_PROFILING=y
--# CONFIG_OPROFILE is not set
--# CONFIG_MARKERS is not set
+-	/* Note careful order of operations to maintain reasonable precision and avoid overflow. */
+-	scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
 -
--#
--# Kernel hacking
--#
--# CONFIG_PRINTK_TIME is not set
--CONFIG_ENABLE_WARN_DEPRECATED=y
--CONFIG_ENABLE_MUST_CHECK=y
--CONFIG_MAGIC_SYSRQ=y
--# CONFIG_UNUSED_SYMBOLS is not set
--CONFIG_DEBUG_FS=y
--# CONFIG_HEADERS_CHECK is not set
--CONFIG_DEBUG_KERNEL=y
--# CONFIG_DEBUG_SHIRQ is not set
--CONFIG_DETECT_SOFTLOCKUP=y
--CONFIG_SCHED_DEBUG=y
--CONFIG_SCHEDSTATS=y
--# CONFIG_TIMER_STATS is not set
--# CONFIG_DEBUG_SLAB is not set
--# CONFIG_DEBUG_RT_MUTEXES is not set
--# CONFIG_RT_MUTEX_TESTER is not set
--# CONFIG_DEBUG_SPINLOCK is not set
--# CONFIG_DEBUG_MUTEXES is not set
--# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
--# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
--# CONFIG_DEBUG_KOBJECT is not set
--CONFIG_DEBUG_BUGVERBOSE=y
--# CONFIG_DEBUG_INFO is not set
--# CONFIG_DEBUG_VM is not set
--# CONFIG_DEBUG_LIST is not set
--# CONFIG_DEBUG_SG is not set
--CONFIG_FRAME_POINTER=y
--CONFIG_FORCED_INLINING=y
--# CONFIG_BOOT_PRINTK_DELAY is not set
--# CONFIG_FAULT_INJECTION is not set
--# CONFIG_SAMPLES is not set
--# CONFIG_EARLY_PRINTK is not set
--CONFIG_SH64_PROC_TLB=y
--CONFIG_SH64_PROC_ASIDS=y
--CONFIG_SH64_SR_WATCH=y
--# CONFIG_POOR_MANS_STRACE is not set
--CONFIG_SH_NO_BSS_INIT=y
+-	disable_irq(RTC_IRQ);
 -
--#
--# Security options
--#
--# CONFIG_KEYS is not set
--# CONFIG_SECURITY is not set
--# CONFIG_SECURITY_FILE_CAPABILITIES is not set
--# CONFIG_CRYPTO is not set
+-	printk("CPU clock: %d.%02dMHz\n",
+-	       (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
+-	{
+-		unsigned short bfc;
+-		frqcr = ctrl_inl(FRQCR);
+-		ifc  = ifc_table[(frqcr>> 6) & 0x0007];
+-		bfc  = bfc_table[(frqcr>> 3) & 0x0007];
+-		pfc  = pfc_table[(frqcr>> 12) & 0x0007];
+-		master_clock = cpu_clock * ifc;
+-		bus_clock = master_clock/bfc;
+-	}
 -
--#
--# Library routines
--#
--CONFIG_BITREVERSE=y
--# CONFIG_CRC_CCITT is not set
--# CONFIG_CRC16 is not set
--# CONFIG_CRC_ITU_T is not set
--CONFIG_CRC32=y
--# CONFIG_CRC7 is not set
--# CONFIG_LIBCRC32C is not set
--CONFIG_PLIST=y
--CONFIG_HAS_IOMEM=y
--CONFIG_HAS_IOPORT=y
--CONFIG_HAS_DMA=y
-diff --git a/arch/sh64/kernel/Makefile b/arch/sh64/kernel/Makefile
-deleted file mode 100644
-index e3467bd..0000000
---- a/arch/sh64/kernel/Makefile
-+++ /dev/null
-@@ -1,36 +0,0 @@
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 2000, 2001  Paolo Alberelli
--# Copyright (C) 2003  Paul Mundt
--#
--# Makefile for the Linux sh64 kernel.
--#
--# Note! Dependencies are done automagically by 'make dep', which also
--# removes any old dependencies. DON'T put your own dependencies here
--# unless it's something special (ie not a .c file).
--#
+-	printk("Bus clock: %d.%02dMHz\n",
+-	       (bus_clock/1000000), (bus_clock % 1000000)/10000);
+-	module_clock = master_clock/pfc;
+-	printk("Module clock: %d.%02dMHz\n",
+-	       (module_clock/1000000), (module_clock % 1000000)/10000);
+-	interval = (module_clock/(HZ*4));
 -
--extra-y	:= head.o init_task.o vmlinux.lds
+-	printk("Interval = %ld\n", interval);
 -
--obj-y	:= process.o signal.o entry.o traps.o irq.o irq_intc.o \
--	   ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \
--	   switchto.o syscalls.o
+-	current_cpu_data.cpu_clock    = cpu_clock;
+-	current_cpu_data.master_clock = master_clock;
+-	current_cpu_data.bus_clock    = bus_clock;
+-	current_cpu_data.module_clock = module_clock;
 -
--obj-$(CONFIG_HEARTBEAT)		+= led.o
--obj-$(CONFIG_SH_ALPHANUMERIC)	+= alphanum.o
--obj-$(CONFIG_SH_DMA)		+= dma.o
--obj-$(CONFIG_SH_FPU)		+= fpu.o
--obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
--obj-$(CONFIG_KALLSYMS)		+= unwind.o
--obj-$(CONFIG_PCI)		+= pcibios.o
--obj-$(CONFIG_MODULES)		+= module.o
+-	/* Start TMU0 */
+-	ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
+-	ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
+-	ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
+-	ctrl_outl(interval, TMU0_TCOR);
+-	ctrl_outl(interval, TMU0_TCNT);
+-	ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
+-}
 -
--ifeq ($(CONFIG_PCI),y)
--obj-$(CONFIG_CPU_SH5)		+= pci_sh5.o
--endif
+-void enter_deep_standby(void)
+-{
+-	/* Disable watchdog timer */
+-	ctrl_outl(0xa5000000, WTCSR);
+-	/* Configure deep standby on sleep */
+-	ctrl_outl(0x03, STBCR);
 -
--USE_STANDARD_AS_RULE := true
+-#ifdef CONFIG_SH_ALPHANUMERIC
+-	{
+-		extern void mach_alphanum(int position, unsigned char value);
+-		extern void mach_alphanum_brightness(int setting);
+-		char halted[] = "Halted. ";
+-		int i;
+-		mach_alphanum_brightness(6); /* dimmest setting above off */
+-		for (i=0; i<8; i++) {
+-			mach_alphanum(i, halted[i]);
+-		}
+-		asm __volatile__ ("synco");
+-	}
+-#endif
 -
-diff --git a/arch/sh64/kernel/alphanum.c b/arch/sh64/kernel/alphanum.c
+-	asm __volatile__ ("sleep");
+-	asm __volatile__ ("synci");
+-	asm __volatile__ ("nop");
+-	asm __volatile__ ("nop");
+-	asm __volatile__ ("nop");
+-	asm __volatile__ ("nop");
+-	panic("Unexpected wakeup!\n");
+-}
+diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c
 deleted file mode 100644
-index d1619d9..0000000
---- a/arch/sh64/kernel/alphanum.c
+index f32df38..0000000
+--- a/arch/sh64/kernel/traps.c
 +++ /dev/null
-@@ -1,43 +0,0 @@
+@@ -1,982 +0,0 @@
 -/*
-- * arch/sh64/kernel/alphanum.c
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
 - *
-- * Copyright (C) 2002 Stuart Menefy <stuart.menefy at st.com>
+- * arch/sh64/kernel/traps.c
 - *
-- * May be copied or modified under the terms of the GNU General Public
-- * License.  See linux/COPYING for more information.
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003, 2004  Paul Mundt
+- * Copyright (C) 2003, 2004  Richard Curnow
 - *
-- * Machine-independent functions for handling 8-digit alphanumeric display
-- * (e.g. Agilent HDSP-253x)
 - */
--#include <linux/stddef.h>
--#include <linux/sched.h>
 -
--void mach_alphanum(int pos, unsigned char val);
+-/*
+- * 'Traps.c' handles hardware traps and faults after we have saved some
+- * state in 'entry.S'.
+- */
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/ptrace.h>
+-#include <linux/timer.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/init.h>
+-#include <linux/delay.h>
+-#include <linux/spinlock.h>
+-#include <linux/kallsyms.h>
+-#include <linux/interrupt.h>
+-#include <linux/sysctl.h>
+-#include <linux/module.h>
+-#include <asm/system.h>
+-#include <asm/uaccess.h>
+-#include <asm/io.h>
+-#include <asm/atomic.h>
+-#include <asm/processor.h>
+-#include <asm/pgtable.h>
 -
--void print_seg(char *file, int line)
--{
--	int i;
--	unsigned int nibble;
+-#undef DEBUG_EXCEPTION
+-#ifdef DEBUG_EXCEPTION
+-/* implemented in ../lib/dbg.c */
+-extern void show_excp_regs(char *fname, int trapnr, int signr,
+-			   struct pt_regs *regs);
+-#else
+-#define show_excp_regs(a, b, c, d)
+-#endif
 -
--	for (i = 0; i < 5; i++) {
--		mach_alphanum(i, file[i]);
--	}
+-static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
+-		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
 -
--	for (i = 0; i < 3; i++) {
--		nibble = ((line >> (i * 4)) & 0xf);
--		mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
--	}
+-#define DO_ERROR(trapnr, signr, str, name, tsk) \
+-asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
+-{ \
+-	do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
 -}
 -
--void print_seg_num(unsigned num)
+-spinlock_t die_lock;
+-
+-void die(const char * str, struct pt_regs * regs, long err)
 -{
--	int i;
--	unsigned int nibble;
+-	console_verbose();
+-	spin_lock_irq(&die_lock);
+-	printk("%s: %lx\n", str, (err & 0xffffff));
+-	show_regs(regs);
+-	spin_unlock_irq(&die_lock);
+-	do_exit(SIGSEGV);
+-}
 -
--	for (i = 0; i < 8; i++) {
--		nibble = ((num >> (i * 4)) & 0xf);
+-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+-{
+-	if (!user_mode(regs))
+-		die(str, regs, err);
+-}
 -
--		mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
+-static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+-{
+-	if (!user_mode(regs)) {
+-		const struct exception_table_entry *fixup;
+-		fixup = search_exception_tables(regs->pc);
+-		if (fixup) {
+-			regs->pc = fixup->fixup;
+-			return;
+-		}
+-		die(str, regs, err);
 -	}
 -}
 -
-diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c
-deleted file mode 100644
-index ca76537..0000000
---- a/arch/sh64/kernel/asm-offsets.c
-+++ /dev/null
-@@ -1,33 +0,0 @@
--/*
-- * This program is used to generate definitions needed by
-- * assembly language modules.
-- *
-- * We use the technique used in the OSF Mach kernel code:
-- * generate asm statements containing #defines,
-- * compile this file to assembler, and then extract the
-- * #defines from the assembly-language output.
-- */
+-DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
+-DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
 -
--#include <linux/stddef.h>
--#include <linux/types.h>
--#include <linux/mm.h>
--#include <asm/thread_info.h>
 -
--#define DEFINE(sym, val) \
--        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+-/* Implement misaligned load/store handling for kernel (and optionally for user
+-   mode too).  Limitation : only SHmedia mode code is handled - there is no
+-   handling at all for misaligned accesses occurring in SHcompact code yet. */
 -
--#define BLANK() asm volatile("\n->" : : )
+-static int misaligned_fixup(struct pt_regs *regs);
 -
--int main(void)
+-asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
 -{
--	/* offsets into the thread_info struct */
--	DEFINE(TI_TASK,		offsetof(struct thread_info, task));
--	DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
--	DEFINE(TI_FLAGS,	offsetof(struct thread_info, flags));
--	DEFINE(TI_PRE_COUNT,	offsetof(struct thread_info, preempt_count));
--	DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
--	DEFINE(TI_ADDR_LIMIT,	offsetof(struct thread_info, addr_limit));
--	DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
+-	if (misaligned_fixup(regs) < 0) {
+-		do_unhandled_exception(7, SIGSEGV, "address error(load)",
+-				"do_address_error_load",
+-				error_code, regs, current);
+-	}
+-	return;
+-}
 -
--	return 0;
+-asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
+-{
+-	if (misaligned_fixup(regs) < 0) {
+-		do_unhandled_exception(8, SIGSEGV, "address error(store)",
+-				"do_address_error_store",
+-				error_code, regs, current);
+-	}
+-	return;
 -}
-diff --git a/arch/sh64/kernel/dma.c b/arch/sh64/kernel/dma.c
-deleted file mode 100644
-index 32c6f05..0000000
---- a/arch/sh64/kernel/dma.c
-+++ /dev/null
-@@ -1,297 +0,0 @@
--/*
-- * arch/sh64/kernel/dma.c
-- *
-- * DMA routines for the SH-5 DMAC.
-- *
-- * Copyright (C) 2003  Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/init.h>
--#include <linux/module.h>
--#include <linux/interrupt.h>
--#include <linux/types.h>
--#include <linux/irq.h>
--#include <linux/spinlock.h>
--#include <linux/mm.h>
--#include <asm/hardware.h>
--#include <asm/dma.h>
--#include <asm/signal.h>
--#include <asm/errno.h>
--#include <asm/io.h>
 -
--typedef struct {
--	unsigned long dev_addr;
--	unsigned long mem_addr;
+-#if defined(CONFIG_SH64_ID2815_WORKAROUND)
 -
--	unsigned int mode;
--	unsigned int count;
--} dma_info_t;
+-#define OPCODE_INVALID      0
+-#define OPCODE_USER_VALID   1
+-#define OPCODE_PRIV_VALID   2
 -
--static dma_info_t dma_info[MAX_DMA_CHANNELS];
--static DEFINE_SPINLOCK(dma_spin_lock);
+-/* getcon/putcon - requires checking which control register is referenced. */
+-#define OPCODE_CTRL_REG     3
 -
--/* arch/sh64/kernel/irq_intc.c */
--extern void make_intc_irq(unsigned int irq);
+-/* Table of valid opcodes for SHmedia mode.
+-   Form a 10-bit value by concatenating the major/minor opcodes i.e.
+-   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
+-   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
+-   LSBs==4'b0000 etc). */
+-static unsigned long shmedia_opcode_table[64] = {
+-	0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
+-	0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
+-	0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
+-	0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
+-	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+-	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+-	0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+-	0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
+-};
 -
--/* DMAC Interrupts */
--#define DMA_IRQ_DMTE0	18
--#define DMA_IRQ_DERR	22
+-void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
+-{
+-	/* Workaround SH5-101 cut2 silicon defect #2815 :
+-	   in some situations, inter-mode branches from SHcompact -> SHmedia
+-	   which should take ITLBMISS or EXECPROT exceptions at the target
+-	   falsely take RESINST at the target instead. */
 -
--#define DMAC_COMMON_BASE	(dmac_base + 0x08)
--#define DMAC_SAR_BASE		(dmac_base + 0x10)
--#define DMAC_DAR_BASE		(dmac_base + 0x18)
--#define DMAC_COUNT_BASE		(dmac_base + 0x20)
--#define DMAC_CTRL_BASE		(dmac_base + 0x28)
--#define DMAC_STATUS_BASE	(dmac_base + 0x30)
+-	unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
+-	unsigned long pc, aligned_pc;
+-	int get_user_error;
+-	int trapnr = 12;
+-	int signr = SIGILL;
+-	char *exception_name = "reserved_instruction";
 -
--#define DMAC_SAR(n)	(DMAC_SAR_BASE    + ((n) * 0x28))
--#define DMAC_DAR(n)	(DMAC_DAR_BASE    + ((n) * 0x28))
--#define DMAC_COUNT(n)	(DMAC_COUNT_BASE  + ((n) * 0x28))
--#define DMAC_CTRL(n)	(DMAC_CTRL_BASE   + ((n) * 0x28))
--#define DMAC_STATUS(n)	(DMAC_STATUS_BASE + ((n) * 0x28))
+-	pc = regs->pc;
+-	if ((pc & 3) == 1) {
+-		/* SHmedia : check for defect.  This requires executable vmas
+-		   to be readable too. */
+-		aligned_pc = pc & ~3;
+-		if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+-			get_user_error = -EFAULT;
+-		} else {
+-			get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+-		}
+-		if (get_user_error >= 0) {
+-			unsigned long index, shift;
+-			unsigned long major, minor, combined;
+-			unsigned long reserved_field;
+-			reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
+-			major = (opcode >> 26) & 0x3f;
+-			minor = (opcode >> 16) & 0xf;
+-			combined = (major << 4) | minor;
+-			index = major;
+-			shift = minor << 1;
+-			if (reserved_field == 0) {
+-				int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
+-				switch (opcode_state) {
+-					case OPCODE_INVALID:
+-						/* Trap. */
+-						break;
+-					case OPCODE_USER_VALID:
+-						/* Restart the instruction : the branch to the instruction will now be from an RTE
+-						   not from SHcompact so the silicon defect won't be triggered. */
+-						return;
+-					case OPCODE_PRIV_VALID:
+-						if (!user_mode(regs)) {
+-							/* Should only ever get here if a module has
+-							   SHcompact code inside it.  If so, the same fix up is needed. */
+-							return; /* same reason */
+-						}
+-						/* Otherwise, user mode trying to execute a privileged instruction -
+-						   fall through to trap. */
+-						break;
+-					case OPCODE_CTRL_REG:
+-						/* If in privileged mode, return as above. */
+-						if (!user_mode(regs)) return;
+-						/* In user mode ... */
+-						if (combined == 0x9f) { /* GETCON */
+-							unsigned long regno = (opcode >> 20) & 0x3f;
+-							if (regno >= 62) {
+-								return;
+-							}
+-							/* Otherwise, reserved or privileged control register, => trap */
+-						} else if (combined == 0x1bf) { /* PUTCON */
+-							unsigned long regno = (opcode >> 4) & 0x3f;
+-							if (regno >= 62) {
+-								return;
+-							}
+-							/* Otherwise, reserved or privileged control register, => trap */
+-						} else {
+-							/* Trap */
+-						}
+-						break;
+-					default:
+-						/* Fall through to trap. */
+-						break;
+-				}
+-			}
+-			/* fall through to normal resinst processing */
+-		} else {
+-			/* Error trying to read opcode.  This typically means a
+-			   real fault, not a RESINST any more.  So change the
+-			   codes. */
+-			trapnr = 87;
+-			exception_name = "address error (exec)";
+-			signr = SIGSEGV;
+-		}
+-	}
 -
--/* DMAC.COMMON Bit Definitions */
--#define DMAC_COMMON_PR	0x00000001	/* Priority */
--					/* Bits 1-2 Reserved */
--#define DMAC_COMMON_ME	0x00000008	/* Master Enable */
--#define DMAC_COMMON_NMI	0x00000010	/* NMI Flag */
--					/* Bits 5-6 Reserved */
--#define DMAC_COMMON_ER	0x00000780	/* Error Response */
--#define DMAC_COMMON_AAE	0x00007800	/* Address Alignment Error */
--					/* Bits 15-63 Reserved */
+-	do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
+-}
 -
--/* DMAC.SAR Bit Definitions */
--#define DMAC_SAR_ADDR	0xffffffff	/* Source Address */
+-#else /* CONFIG_SH64_ID2815_WORKAROUND */
 -
--/* DMAC.DAR Bit Definitions */
--#define DMAC_DAR_ADDR	0xffffffff	/* Destination Address */
+-/* If the workaround isn't needed, this is just a straightforward reserved
+-   instruction */
+-DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
 -
--/* DMAC.COUNT Bit Definitions */
--#define DMAC_COUNT_CNT	0xffffffff	/* Transfer Count */
+-#endif /* CONFIG_SH64_ID2815_WORKAROUND */
 -
--/* DMAC.CTRL Bit Definitions */
--#define DMAC_CTRL_TS	0x00000007	/* Transfer Size */
--#define DMAC_CTRL_SI	0x00000018	/* Source Increment */
--#define DMAC_CTRL_DI	0x00000060	/* Destination Increment */
--#define DMAC_CTRL_RS	0x00000780	/* Resource Select */
--#define DMAC_CTRL_IE	0x00000800	/* Interrupt Enable */
--#define DMAC_CTRL_TE	0x00001000	/* Transfer Enable */
--					/* Bits 15-63 Reserved */
+-/* Called with interrupts disabled */
+-asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
+-{
+-	PLS();
+-	show_excp_regs(__FUNCTION__, -1, -1, regs);
+-	die_if_kernel("exception", regs, ex);
+-}
 -
--/* DMAC.STATUS Bit Definitions */
--#define DMAC_STATUS_TE	0x00000001	/* Transfer End */
--#define DMAC_STATUS_AAE	0x00000002	/* Address Alignment Error */
--					/* Bits 2-63 Reserved */
+-int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
+-{
+-	/* Syscall debug */
+-        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
 -
--static unsigned long dmac_base;
+-	die_if_kernel("unknown trapa", regs, scId);
 -
--void set_dma_count(unsigned int chan, unsigned int count);
--void set_dma_addr(unsigned int chan, unsigned int addr);
+-	return -ENOSYS;
+-}
 -
--static irqreturn_t dma_mte(int irq, void *dev_id, struct pt_regs *regs)
+-void show_stack(struct task_struct *tsk, unsigned long *sp)
 -{
--	unsigned int chan = irq - DMA_IRQ_DMTE0;
--	dma_info_t *info = dma_info + chan;
--	u64 status;
+-#ifdef CONFIG_KALLSYMS
+-	extern void sh64_unwind(struct pt_regs *regs);
+-	struct pt_regs *regs;
 -
--	if (info->mode & DMA_MODE_WRITE) {
--		sh64_out64(info->mem_addr & DMAC_SAR_ADDR, DMAC_SAR(chan));
--	} else {
--		sh64_out64(info->mem_addr & DMAC_DAR_ADDR, DMAC_DAR(chan));
--	}
+-	regs = tsk ? tsk->thread.kregs : NULL;
 -
--	set_dma_count(chan, info->count);
+-	sh64_unwind(regs);
+-#else
+-	printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
+-#endif
+-}
 -
--	/* Clear the TE bit */
--	status = sh64_in64(DMAC_STATUS(chan));
--	status &= ~DMAC_STATUS_TE;
--	sh64_out64(status, DMAC_STATUS(chan));
+-void show_task(unsigned long *sp)
+-{
+-	show_stack(NULL, sp);
+-}
 -
--	return IRQ_HANDLED;
+-void dump_stack(void)
+-{
+-	show_task(NULL);
 -}
+-/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
+-EXPORT_SYMBOL(dump_stack);
 -
--static struct irqaction irq_dmte = {
--	.handler	= dma_mte,
--	.flags		= IRQF_DISABLED,
--	.name		= "DMA MTE",
--};
+-static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
+-		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
+-{
+-	show_excp_regs(fn_name, trapnr, signr, regs);
+-	tsk->thread.error_code = error_code;
+-	tsk->thread.trap_no = trapnr;
 -
--static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
+-	if (user_mode(regs))
+-		force_sig(signr, tsk);
+-
+-	die_if_no_fixup(str, regs, error_code);
+-}
+-
+-static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
 -{
--	u64 tmp;
--	u8 chan;
+-	int get_user_error;
+-	unsigned long aligned_pc;
+-	unsigned long opcode;
 -
--	printk(KERN_NOTICE "DMAC: Got a DMA Error!\n");
+-	if ((pc & 3) == 1) {
+-		/* SHmedia */
+-		aligned_pc = pc & ~3;
+-		if (from_user_mode) {
+-			if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+-				get_user_error = -EFAULT;
+-			} else {
+-				get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+-				*result_opcode = opcode;
+-			}
+-			return get_user_error;
+-		} else {
+-			/* If the fault was in the kernel, we can either read
+-			 * this directly, or if not, we fault.
+-			*/
+-			*result_opcode = *(unsigned long *) aligned_pc;
+-			return 0;
+-		}
+-	} else if ((pc & 1) == 0) {
+-		/* SHcompact */
+-		/* TODO : provide handling for this.  We don't really support
+-		   user-mode SHcompact yet, and for a kernel fault, this would
+-		   have to come from a module built for SHcompact.  */
+-		return -EFAULT;
+-	} else {
+-		/* misaligned */
+-		return -EFAULT;
+-	}
+-}
 -
--	tmp = sh64_in64(DMAC_COMMON_BASE);
+-static int address_is_sign_extended(__u64 a)
+-{
+-	__u64 b;
+-#if (NEFF == 32)
+-	b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
+-	return (b == a) ? 1 : 0;
+-#else
+-#error "Sign extend check only works for NEFF==32"
+-#endif
+-}
 -
--	/* Check for the type of error */
--	if ((chan = tmp & DMAC_COMMON_AAE)) {
--		/* It's an address alignment error.. */
--		printk(KERN_NOTICE "DMAC: Alignment error on channel %d, ", chan);
+-static int generate_and_check_address(struct pt_regs *regs,
+-				      __u32 opcode,
+-				      int displacement_not_indexed,
+-				      int width_shift,
+-				      __u64 *address)
+-{
+-	/* return -1 for fault, 0 for OK */
 -
--		printk(KERN_NOTICE "SAR: 0x%08llx, DAR: 0x%08llx, COUNT: %lld\n",
--		       (sh64_in64(DMAC_SAR(chan)) & DMAC_SAR_ADDR),
--		       (sh64_in64(DMAC_DAR(chan)) & DMAC_DAR_ADDR),
--		       (sh64_in64(DMAC_COUNT(chan)) & DMAC_COUNT_CNT));
+-	__u64 base_address, addr;
+-	int basereg;
 -
--	} else if ((chan = tmp & DMAC_COMMON_ER)) {
--		/* Something else went wrong.. */
--		printk(KERN_NOTICE "DMAC: Error on channel %d\n", chan);
+-	basereg = (opcode >> 20) & 0x3f;
+-	base_address = regs->regs[basereg];
+-	if (displacement_not_indexed) {
+-		__s64 displacement;
+-		displacement = (opcode >> 10) & 0x3ff;
+-		displacement = ((displacement << 54) >> 54); /* sign extend */
+-		addr = (__u64)((__s64)base_address + (displacement << width_shift));
+-	} else {
+-		__u64 offset;
+-		int offsetreg;
+-		offsetreg = (opcode >> 10) & 0x3f;
+-		offset = regs->regs[offsetreg];
+-		addr = base_address + offset;
 -	}
 -
--	/* Reset the ME bit to clear the interrupt */
--	tmp |= DMAC_COMMON_ME;
--	sh64_out64(tmp, DMAC_COMMON_BASE);
+-	/* Check sign extended */
+-	if (!address_is_sign_extended(addr)) {
+-		return -1;
+-	}
 -
--	return IRQ_HANDLED;
+-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-	/* Check accessible.  For misaligned access in the kernel, assume the
+-	   address is always accessible (and if not, just fault when the
+-	   load/store gets done.) */
+-	if (user_mode(regs)) {
+-		if (addr >= TASK_SIZE) {
+-			return -1;
+-		}
+-		/* Do access_ok check later - it depends on whether it's a load or a store. */
+-	}
+-#endif
+-
+-	*address = addr;
+-	return 0;
 -}
 -
--static struct irqaction irq_derr = {
--	.handler	= dma_err,
--	.flags		= IRQF_DISABLED,
--	.name		= "DMA Error",
--};
+-/* Default value as for sh */
+-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-static int user_mode_unaligned_fixup_count = 10;
+-static int user_mode_unaligned_fixup_enable = 1;
+-#endif
 -
--static inline unsigned long calc_xmit_shift(unsigned int chan)
--{
--	return sh64_in64(DMAC_CTRL(chan)) & 0x03;
--}
+-static int kernel_mode_unaligned_fixup_count = 32;
 -
--void setup_dma(unsigned int chan, dma_info_t *info)
+-static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
 -{
--	unsigned int irq = DMA_IRQ_DMTE0 + chan;
--	dma_info_t *dma = dma_info + chan;
+-	unsigned short x;
+-	unsigned char *p, *q;
+-	p = (unsigned char *) (int) address;
+-	q = (unsigned char *) &x;
+-	q[0] = p[0];
+-	q[1] = p[1];
 -
--	make_intc_irq(irq);
--	setup_irq(irq, &irq_dmte);
--	dma = info;
+-	if (do_sign_extend) {
+-		*result = (__u64)(__s64) *(short *) &x;
+-	} else {
+-		*result = (__u64) x;
+-	}
 -}
 -
--void enable_dma(unsigned int chan)
+-static void misaligned_kernel_word_store(__u64 address, __u64 value)
 -{
--	u64 ctrl;
+-	unsigned short x;
+-	unsigned char *p, *q;
+-	p = (unsigned char *) (int) address;
+-	q = (unsigned char *) &x;
 -
--	ctrl = sh64_in64(DMAC_CTRL(chan));
--	ctrl |= DMAC_CTRL_TE;
--	sh64_out64(ctrl, DMAC_CTRL(chan));
+-	x = (__u16) value;
+-	p[0] = q[0];
+-	p[1] = q[1];
 -}
 -
--void disable_dma(unsigned int chan)
+-static int misaligned_load(struct pt_regs *regs,
+-			   __u32 opcode,
+-			   int displacement_not_indexed,
+-			   int width_shift,
+-			   int do_sign_extend)
 -{
--	u64 ctrl;
+-	/* Return -1 for a fault, 0 for OK */
+-	int error;
+-	int destreg;
+-	__u64 address;
 -
--	ctrl = sh64_in64(DMAC_CTRL(chan));
--	ctrl &= ~DMAC_CTRL_TE;
--	sh64_out64(ctrl, DMAC_CTRL(chan));
--}
+-	error = generate_and_check_address(regs, opcode,
+-			displacement_not_indexed, width_shift, &address);
+-	if (error < 0) {
+-		return error;
+-	}
 -
--void set_dma_mode(unsigned int chan, char mode)
--{
--	dma_info_t *info = dma_info + chan;
+-	destreg = (opcode >> 4) & 0x3f;
+-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-	if (user_mode(regs)) {
+-		__u64 buffer;
 -
--	info->mode = mode;
+-		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
+-			return -1;
+-		}
 -
--	set_dma_addr(chan, info->mem_addr);
--	set_dma_count(chan, info->count);
--}
+-		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
+-			return -1; /* fault */
+-		}
+-		switch (width_shift) {
+-		case 1:
+-			if (do_sign_extend) {
+-				regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
+-			} else {
+-				regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
+-			}
+-			break;
+-		case 2:
+-			regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
+-			break;
+-		case 3:
+-			regs->regs[destreg] = buffer;
+-			break;
+-		default:
+-			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
+-				width_shift, (unsigned long) regs->pc);
+-			break;
+-		}
+-	} else
+-#endif
+-	{
+-		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
+-		__u64 lo, hi;
 -
--void set_dma_addr(unsigned int chan, unsigned int addr)
--{
--	dma_info_t *info = dma_info + chan;
--	unsigned long sar, dar;
+-		switch (width_shift) {
+-		case 1:
+-			misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
+-			break;
+-		case 2:
+-			asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
+-			asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
+-			regs->regs[destreg] = lo | hi;
+-			break;
+-		case 3:
+-			asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
+-			asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
+-			regs->regs[destreg] = lo | hi;
+-			break;
 -
--	info->mem_addr = addr;
--	sar = (info->mode & DMA_MODE_WRITE) ? info->mem_addr : info->dev_addr;
--	dar = (info->mode & DMA_MODE_WRITE) ? info->dev_addr : info->mem_addr;
+-		default:
+-			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
+-				width_shift, (unsigned long) regs->pc);
+-			break;
+-		}
+-	}
+-
+-	return 0;
 -
--	sh64_out64(sar & DMAC_SAR_ADDR, DMAC_SAR(chan));
--	sh64_out64(dar & DMAC_SAR_ADDR, DMAC_DAR(chan));
 -}
 -
--void set_dma_count(unsigned int chan, unsigned int count)
+-static int misaligned_store(struct pt_regs *regs,
+-			    __u32 opcode,
+-			    int displacement_not_indexed,
+-			    int width_shift)
 -{
--	dma_info_t *info = dma_info + chan;
--	u64 tmp;
+-	/* Return -1 for a fault, 0 for OK */
+-	int error;
+-	int srcreg;
+-	__u64 address;
 -
--	info->count = count;
+-	error = generate_and_check_address(regs, opcode,
+-			displacement_not_indexed, width_shift, &address);
+-	if (error < 0) {
+-		return error;
+-	}
 -
--	tmp = (info->count >> calc_xmit_shift(chan)) & DMAC_COUNT_CNT;
+-	srcreg = (opcode >> 4) & 0x3f;
+-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-	if (user_mode(regs)) {
+-		__u64 buffer;
 -
--	sh64_out64(tmp, DMAC_COUNT(chan));
--}
+-		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
+-			return -1;
+-		}
 -
--unsigned long claim_dma_lock(void)
--{
--	unsigned long flags;
+-		switch (width_shift) {
+-		case 1:
+-			*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
+-			break;
+-		case 2:
+-			*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
+-			break;
+-		case 3:
+-			buffer = regs->regs[srcreg];
+-			break;
+-		default:
+-			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
+-				width_shift, (unsigned long) regs->pc);
+-			break;
+-		}
 -
--	spin_lock_irqsave(&dma_spin_lock, flags);
+-		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
+-			return -1; /* fault */
+-		}
+-	} else
+-#endif
+-	{
+-		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
+-		__u64 val = regs->regs[srcreg];
 -
--	return flags;
--}
+-		switch (width_shift) {
+-		case 1:
+-			misaligned_kernel_word_store(address, val);
+-			break;
+-		case 2:
+-			asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
+-			asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
+-			break;
+-		case 3:
+-			asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
+-			asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
+-			break;
 -
--void release_dma_lock(unsigned long flags)
--{
--	spin_unlock_irqrestore(&dma_spin_lock, flags);
--}
+-		default:
+-			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
+-				width_shift, (unsigned long) regs->pc);
+-			break;
+-		}
+-	}
+-
+-	return 0;
 -
--int get_dma_residue(unsigned int chan)
--{
--	return sh64_in64(DMAC_COUNT(chan) << calc_xmit_shift(chan));
 -}
 -
--int __init init_dma(void)
+-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
+-   error. */
+-static int misaligned_fpu_load(struct pt_regs *regs,
+-			   __u32 opcode,
+-			   int displacement_not_indexed,
+-			   int width_shift,
+-			   int do_paired_load)
 -{
--	struct vcr_info vcr;
--	u64 tmp;
+-	/* Return -1 for a fault, 0 for OK */
+-	int error;
+-	int destreg;
+-	__u64 address;
 -
--	/* Remap the DMAC */
--	dmac_base = onchip_remap(PHYS_DMAC_BLOCK, 1024, "DMAC");
--	if (!dmac_base) {
--		printk(KERN_ERR "Unable to remap DMAC\n");
--		return -ENOMEM;
+-	error = generate_and_check_address(regs, opcode,
+-			displacement_not_indexed, width_shift, &address);
+-	if (error < 0) {
+-		return error;
 -	}
 -
--	/* Report DMAC.VCR Info */
--	vcr = sh64_get_vcr_info(dmac_base);
--	printk("DMAC: Module ID: 0x%04x, Module version: 0x%04x\n",
--	       vcr.mod_id, vcr.mod_vers);
+-	destreg = (opcode >> 4) & 0x3f;
+-	if (user_mode(regs)) {
+-		__u64 buffer;
+-		__u32 buflo, bufhi;
 -
--	/* Set the ME bit */
--	tmp = sh64_in64(DMAC_COMMON_BASE);
--	tmp |= DMAC_COMMON_ME;
--	sh64_out64(tmp, DMAC_COMMON_BASE);
+-		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
+-			return -1;
+-		}
 -
--	/* Enable the DMAC Error Interrupt */
--	make_intc_irq(DMA_IRQ_DERR);
--	setup_irq(DMA_IRQ_DERR, &irq_derr);
+-		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
+-			return -1; /* fault */
+-		}
+-		/* 'current' may be the current owner of the FPU state, so
+-		   context switch the registers into memory so they can be
+-		   indexed by register number. */
+-		if (last_task_used_math == current) {
+-			grab_fpu();
+-			fpsave(&current->thread.fpu.hard);
+-			release_fpu();
+-			last_task_used_math = NULL;
+-			regs->sr |= SR_FD;
+-		}
+-
+-		buflo = *(__u32*) &buffer;
+-		bufhi = *(1 + (__u32*) &buffer);
+-
+-		switch (width_shift) {
+-		case 2:
+-			current->thread.fpu.hard.fp_regs[destreg] = buflo;
+-			break;
+-		case 3:
+-			if (do_paired_load) {
+-				current->thread.fpu.hard.fp_regs[destreg] = buflo;
+-				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+-			} else {
+-#if defined(CONFIG_LITTLE_ENDIAN)
+-				current->thread.fpu.hard.fp_regs[destreg] = bufhi;
+-				current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
+-#else
+-				current->thread.fpu.hard.fp_regs[destreg] = buflo;
+-				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+-#endif
+-			}
+-			break;
+-		default:
+-			printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
+-				width_shift, (unsigned long) regs->pc);
+-			break;
+-		}
+-		return 0;
+-	} else {
+-		die ("Misaligned FPU load inside kernel", regs, 0);
+-		return -1;
+-	}
 -
--	return 0;
--}
 -
--static void __exit exit_dma(void)
--{
--	onchip_unmap(dmac_base);
--	free_irq(DMA_IRQ_DERR, 0);
 -}
 -
--module_init(init_dma);
--module_exit(exit_dma);
+-static int misaligned_fpu_store(struct pt_regs *regs,
+-			   __u32 opcode,
+-			   int displacement_not_indexed,
+-			   int width_shift,
+-			   int do_paired_load)
+-{
+-	/* Return -1 for a fault, 0 for OK */
+-	int error;
+-	int srcreg;
+-	__u64 address;
 -
--MODULE_AUTHOR("Paul Mundt");
--MODULE_DESCRIPTION("DMA API for SH-5 DMAC");
--MODULE_LICENSE("GPL");
+-	error = generate_and_check_address(regs, opcode,
+-			displacement_not_indexed, width_shift, &address);
+-	if (error < 0) {
+-		return error;
+-	}
 -
--EXPORT_SYMBOL(setup_dma);
--EXPORT_SYMBOL(claim_dma_lock);
--EXPORT_SYMBOL(release_dma_lock);
--EXPORT_SYMBOL(enable_dma);
--EXPORT_SYMBOL(disable_dma);
--EXPORT_SYMBOL(set_dma_mode);
--EXPORT_SYMBOL(set_dma_addr);
--EXPORT_SYMBOL(set_dma_count);
--EXPORT_SYMBOL(get_dma_residue);
+-	srcreg = (opcode >> 4) & 0x3f;
+-	if (user_mode(regs)) {
+-		__u64 buffer;
+-		/* Initialise these to NaNs. */
+-		__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
 -
-diff --git a/arch/sh64/kernel/early_printk.c b/arch/sh64/kernel/early_printk.c
-deleted file mode 100644
-index 4f91311..0000000
---- a/arch/sh64/kernel/early_printk.c
-+++ /dev/null
-@@ -1,99 +0,0 @@
--/*
-- * arch/sh64/kernel/early_printk.c
-- *
-- * SH-5 Early SCIF console (cloned and hacked from sh implementation)
-- *
-- * Copyright (C) 2003, 2004  Paul Mundt <lethal at linux-sh.org>
-- * Copyright (C) 2002  M. R. Brown <mrbrown at 0xd6.org>
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/console.h>
--#include <linux/tty.h>
--#include <linux/init.h>
--#include <asm/io.h>
--#include <asm/hardware.h>
+-		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
+-			return -1;
+-		}
 -
--#define SCIF_BASE_ADDR	0x01030000
--#define SCIF_ADDR_SH5	PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
+-		/* 'current' may be the current owner of the FPU state, so
+-		   context switch the registers into memory so they can be
+-		   indexed by register number. */
+-		if (last_task_used_math == current) {
+-			grab_fpu();
+-			fpsave(&current->thread.fpu.hard);
+-			release_fpu();
+-			last_task_used_math = NULL;
+-			regs->sr |= SR_FD;
+-		}
 -
--/*
-- * Fixed virtual address where SCIF is mapped (should already be done
-- * in arch/sh64/kernel/head.S!).
-- */
--#define SCIF_REG	0xfa030000
+-		switch (width_shift) {
+-		case 2:
+-			buflo = current->thread.fpu.hard.fp_regs[srcreg];
+-			break;
+-		case 3:
+-			if (do_paired_load) {
+-				buflo = current->thread.fpu.hard.fp_regs[srcreg];
+-				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+-			} else {
+-#if defined(CONFIG_LITTLE_ENDIAN)
+-				bufhi = current->thread.fpu.hard.fp_regs[srcreg];
+-				buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
+-#else
+-				buflo = current->thread.fpu.hard.fp_regs[srcreg];
+-				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+-#endif
+-			}
+-			break;
+-		default:
+-			printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
+-				width_shift, (unsigned long) regs->pc);
+-			break;
+-		}
 -
--enum {
--	SCIF_SCSMR2	= SCIF_REG + 0x00,
--	SCIF_SCBRR2	= SCIF_REG + 0x04,
--	SCIF_SCSCR2	= SCIF_REG + 0x08,
--	SCIF_SCFTDR2	= SCIF_REG + 0x0c,
--	SCIF_SCFSR2	= SCIF_REG + 0x10,
--	SCIF_SCFRDR2	= SCIF_REG + 0x14,
--	SCIF_SCFCR2	= SCIF_REG + 0x18,
--	SCIF_SCFDR2	= SCIF_REG + 0x1c,
--	SCIF_SCSPTR2	= SCIF_REG + 0x20,
--	SCIF_SCLSR2	= SCIF_REG + 0x24,
--};
+-		*(__u32*) &buffer = buflo;
+-		*(1 + (__u32*) &buffer) = bufhi;
+-		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
+-			return -1; /* fault */
+-		}
+-		return 0;
+-	} else {
+-		die ("Misaligned FPU load inside kernel", regs, 0);
+-		return -1;
+-	}
+-}
+-#endif
 -
--static void sh_console_putc(int c)
+-static int misaligned_fixup(struct pt_regs *regs)
 -{
--	while (!(ctrl_inw(SCIF_SCFSR2) & 0x20))
--		cpu_relax();
+-	unsigned long opcode;
+-	int error;
+-	int major, minor;
 -
--	ctrl_outb(c, SCIF_SCFTDR2);
--	ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2);
+-#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-	/* Never fixup user mode misaligned accesses without this option enabled. */
+-	return -1;
+-#else
+-	if (!user_mode_unaligned_fixup_enable) return -1;
+-#endif
 -
--	if (c == '\n')
--		sh_console_putc('\r');
--}
+-	error = read_opcode(regs->pc, &opcode, user_mode(regs));
+-	if (error < 0) {
+-		return error;
+-	}
+-	major = (opcode >> 26) & 0x3f;
+-	minor = (opcode >> 16) & 0xf;
 -
--static void sh_console_flush(void)
--{
--	ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
+-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-	if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
+-		--user_mode_unaligned_fixup_count;
+-		/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
+-		printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
+-		       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+-	} else
+-#endif
+-	if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
+-		--kernel_mode_unaligned_fixup_count;
+-		if (in_interrupt()) {
+-			printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
+-			       (__u32)regs->pc, opcode);
+-		} else {
+-			printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
+-			       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+-		}
+-	}
 -
--	while (!(ctrl_inw(SCIF_SCFSR2) & 0x40))
--		cpu_relax();
 -
--	ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
--}
+-	switch (major) {
+-		case (0x84>>2): /* LD.W */
+-			error = misaligned_load(regs, opcode, 1, 1, 1);
+-			break;
+-		case (0xb0>>2): /* LD.UW */
+-			error = misaligned_load(regs, opcode, 1, 1, 0);
+-			break;
+-		case (0x88>>2): /* LD.L */
+-			error = misaligned_load(regs, opcode, 1, 2, 1);
+-			break;
+-		case (0x8c>>2): /* LD.Q */
+-			error = misaligned_load(regs, opcode, 1, 3, 0);
+-			break;
 -
--static void sh_console_write(struct console *con, const char *s, unsigned count)
--{
--	while (count-- > 0)
--		sh_console_putc(*s++);
+-		case (0xa4>>2): /* ST.W */
+-			error = misaligned_store(regs, opcode, 1, 1);
+-			break;
+-		case (0xa8>>2): /* ST.L */
+-			error = misaligned_store(regs, opcode, 1, 2);
+-			break;
+-		case (0xac>>2): /* ST.Q */
+-			error = misaligned_store(regs, opcode, 1, 3);
+-			break;
 -
--	sh_console_flush();
--}
+-		case (0x40>>2): /* indexed loads */
+-			switch (minor) {
+-				case 0x1: /* LDX.W */
+-					error = misaligned_load(regs, opcode, 0, 1, 1);
+-					break;
+-				case 0x5: /* LDX.UW */
+-					error = misaligned_load(regs, opcode, 0, 1, 0);
+-					break;
+-				case 0x2: /* LDX.L */
+-					error = misaligned_load(regs, opcode, 0, 2, 1);
+-					break;
+-				case 0x3: /* LDX.Q */
+-					error = misaligned_load(regs, opcode, 0, 3, 0);
+-					break;
+-				default:
+-					error = -1;
+-					break;
+-			}
+-			break;
 -
--static int __init sh_console_setup(struct console *con, char *options)
--{
--	con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8;
+-		case (0x60>>2): /* indexed stores */
+-			switch (minor) {
+-				case 0x1: /* STX.W */
+-					error = misaligned_store(regs, opcode, 0, 1);
+-					break;
+-				case 0x2: /* STX.L */
+-					error = misaligned_store(regs, opcode, 0, 2);
+-					break;
+-				case 0x3: /* STX.Q */
+-					error = misaligned_store(regs, opcode, 0, 3);
+-					break;
+-				default:
+-					error = -1;
+-					break;
+-			}
+-			break;
+-
+-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-		case (0x94>>2): /* FLD.S */
+-			error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
+-			break;
+-		case (0x98>>2): /* FLD.P */
+-			error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
+-			break;
+-		case (0x9c>>2): /* FLD.D */
+-			error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
+-			break;
+-		case (0x1c>>2): /* floating indexed loads */
+-			switch (minor) {
+-			case 0x8: /* FLDX.S */
+-				error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
+-				break;
+-			case 0xd: /* FLDX.P */
+-				error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
+-				break;
+-			case 0x9: /* FLDX.D */
+-				error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
+-				break;
+-			default:
+-				error = -1;
+-				break;
+-			}
+-			break;
+-		case (0xb4>>2): /* FLD.S */
+-			error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
+-			break;
+-		case (0xb8>>2): /* FLD.P */
+-			error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
+-			break;
+-		case (0xbc>>2): /* FLD.D */
+-			error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
+-			break;
+-		case (0x3c>>2): /* floating indexed stores */
+-			switch (minor) {
+-			case 0x8: /* FSTX.S */
+-				error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
+-				break;
+-			case 0xd: /* FSTX.P */
+-				error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
+-				break;
+-			case 0x9: /* FSTX.D */
+-				error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
+-				break;
+-			default:
+-				error = -1;
+-				break;
+-			}
+-			break;
+-#endif
+-
+-		default:
+-			/* Fault */
+-			error = -1;
+-			break;
+-	}
+-
+-	if (error < 0) {
+-		return error;
+-	} else {
+-		regs->pc += 4; /* Skip the instruction that's just been emulated */
+-		return 0;
+-	}
 -
--	return 0;
 -}
 -
--static struct console sh_console = {
--	.name		= "scifcon",
--	.write		= sh_console_write,
--	.setup		= sh_console_setup,
--	.flags		= CON_PRINTBUFFER | CON_BOOT,
--	.index		= -1,
+-static ctl_table unaligned_table[] = {
+-	{
+-		.ctl_name	= CTL_UNNUMBERED,
+-		.procname	= "kernel_reports",
+-		.data		= &kernel_mode_unaligned_fixup_count,
+-		.maxlen		= sizeof(int),
+-		.mode		= 0644,
+-		.proc_handler	= &proc_dointvec
+-	},
+-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+-	{
+-		.ctl_name	= CTL_UNNUMBERED,
+-		.procname	= "user_reports",
+-		.data		= &user_mode_unaligned_fixup_count,
+-		.maxlen		= sizeof(int),
+-		.mode		= 0644,
+-		.proc_handler	= &proc_dointvec
+-	},
+-	{
+-		.ctl_name	= CTL_UNNUMBERED,
+-		.procname	= "user_enable",
+-		.data		= &user_mode_unaligned_fixup_enable,
+-		.maxlen		= sizeof(int),
+-		.mode		= 0644,
+-		.proc_handler	= &proc_dointvec},
+-#endif
+-	{}
 -};
 -
--void __init enable_early_printk(void)
+-static ctl_table unaligned_root[] = {
+-	{
+-		.ctl_name	= CTL_UNNUMBERED,
+-		.procname	= "unaligned_fixup",
+-		.mode		= 0555,
+-		unaligned_table
+-	},
+-	{}
+-};
+-
+-static ctl_table sh64_root[] = {
+-	{
+-		.ctl_name	= CTL_UNNUMBERED,
+-		.procname	= "sh64",
+-		.mode		= 0555,
+-		.child		= unaligned_root
+-	},
+-	{}
+-};
+-static struct ctl_table_header *sysctl_header;
+-static int __init init_sysctl(void)
 -{
--	ctrl_outb(0x2a, SCIF_SCBRR2);	/* 19200bps */
+-	sysctl_header = register_sysctl_table(sh64_root);
+-	return 0;
+-}
 -
--	ctrl_outw(0x04, SCIF_SCFCR2);	/* Reset TFRST */
--	ctrl_outw(0x10, SCIF_SCFCR2);	/* TTRG0=1 */
+-__initcall(init_sysctl);
 -
--	ctrl_outw(0, SCIF_SCSPTR2);
--	ctrl_outw(0x60, SCIF_SCFSR2);
--	ctrl_outw(0, SCIF_SCLSR2);
--	ctrl_outw(0x30, SCIF_SCSCR2);
 -
--	register_console(&sh_console);
+-asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
+-{
+-	u64 peek_real_address_q(u64 addr);
+-	u64 poke_real_address_q(u64 addr, u64 val);
+-	unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
+-	unsigned long long exp_cause;
+-	/* It's not worth ioremapping the debug module registers for the amount
+-	   of access we make to them - just go direct to their physical
+-	   addresses. */
+-	exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
+-	if (exp_cause & ~4) {
+-		printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
+-			(unsigned long)(exp_cause & 0xffffffff));
+-	}
+-	show_state();
+-	/* Clear all DEBUGINT causes */
+-	poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
 -}
-diff --git a/arch/sh64/kernel/entry.S b/arch/sh64/kernel/entry.S
+diff --git a/arch/sh64/kernel/unwind.c b/arch/sh64/kernel/unwind.c
 deleted file mode 100644
-index 7013fcb..0000000
---- a/arch/sh64/kernel/entry.S
+index 1214c78..0000000
+--- a/arch/sh64/kernel/unwind.c
 +++ /dev/null
-@@ -1,2102 +0,0 @@
+@@ -1,326 +0,0 @@
 -/*
+- * arch/sh64/kernel/unwind.c
+- *
+- * Copyright (C) 2004  Paul Mundt
+- * Copyright (C) 2004  Richard Curnow
+- *
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
-- *
-- * arch/sh64/kernel/entry.S
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2004, 2005  Paul Mundt
-- * Copyright (C) 2003, 2004 Richard Curnow
-- *
 - */
--
+-#include <linux/kallsyms.h>
+-#include <linux/kernel.h>
+-#include <linux/types.h>
 -#include <linux/errno.h>
--#include <linux/sys.h>
--
+-#include <asm/page.h>
+-#include <asm/ptrace.h>
 -#include <asm/processor.h>
--#include <asm/registers.h>
--#include <asm/unistd.h>
--#include <asm/thread_info.h>
--#include <asm/asm-offsets.h>
+-#include <asm/io.h>
 -
--/*
-- * SR fields.
-- */
--#define SR_ASID_MASK	0x00ff0000
--#define SR_FD_MASK	0x00008000
--#define SR_SS		0x08000000
--#define SR_BL		0x10000000
--#define SR_MD		0x40000000
+-static u8 regcache[63];
 -
 -/*
-- * Event code.
+- * Finding the previous stack frame isn't horribly straightforward as it is
+- * on some other platforms. In the sh64 case, we don't have "linked" stack
+- * frames, so we need to do a bit of work to determine the previous frame,
+- * and in turn, the previous r14/r18 pair.
+- *
+- * There are generally a few cases which determine where we can find out
+- * the r14/r18 values. In the general case, this can be determined by poking
+- * around the prologue of the symbol PC is in (note that we absolutely must
+- * have frame pointer support as well as the kernel symbol table mapped,
+- * otherwise we can't even get this far).
+- *
+- * In other cases, such as the interrupt/exception path, we can poke around
+- * the sp/fp.
+- *
+- * Notably, this entire approach is somewhat error prone, and in the event
+- * that the previous frame cannot be determined, that's all we can do.
+- * Either way, this still leaves us with a more correct backtrace then what
+- * we would be able to come up with by walking the stack (which is garbage
+- * for anything beyond the first frame).
+- *						-- PFM.
 - */
--#define	EVENT_INTERRUPT		0
--#define	EVENT_FAULT_TLB		1
--#define	EVENT_FAULT_NOT_TLB	2
--#define	EVENT_DEBUG		3
+-static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
+-		      unsigned long *pprev_fp, unsigned long *pprev_pc,
+-		      struct pt_regs *regs)
+-{
+-	const char *sym;
+-	char namebuf[128];
+-	unsigned long offset;
+-	unsigned long prologue = 0;
+-	unsigned long fp_displacement = 0;
+-	unsigned long fp_prev = 0;
+-	unsigned long offset_r14 = 0, offset_r18 = 0;
+-	int i, found_prologue_end = 0;
 -
--/* EXPEVT values */
--#define	RESET_CAUSE		0x20
--#define DEBUGSS_CAUSE		0x980
+-	sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
+-	if (!sym)
+-		return -EINVAL;
 -
--/*
-- * Frame layout. Quad index.
-- */
--#define	FRAME_T(x)	FRAME_TBASE+(x*8)
--#define	FRAME_R(x)	FRAME_RBASE+(x*8)
--#define	FRAME_S(x)	FRAME_SBASE+(x*8)
--#define FSPC		0
--#define FSSR		1
--#define FSYSCALL_ID	2
+-	prologue = pc - offset;
+-	if (!prologue)
+-		return -EINVAL;
 -
--/* Arrange the save frame to be a multiple of 32 bytes long */
--#define FRAME_SBASE	0
--#define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */
--#define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */
--#define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* tr0 -tr7 */
--#define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */
+-	/* Validate fp, to avoid risk of dereferencing a bad pointer later.
+-	   Assume 128Mb since that's the amount of RAM on a Cayman.  Modify
+-	   when there is an SH-5 board with more. */
+-	if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
+-	    (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
+-	    ((fp & 7) != 0)) {
+-		return -EINVAL;
+-	}
 -
--#define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */
--#define FP_FRAME_BASE	0
+-	/*
+-	 * Depth to walk, depth is completely arbitrary.
+-	 */
+-	for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
+-		unsigned long op;
+-		u8 major, minor;
+-		u8 src, dest, disp;
 -
--#define	SAVED_R2	0*8
--#define	SAVED_R3	1*8
--#define	SAVED_R4	2*8
--#define	SAVED_R5	3*8
--#define	SAVED_R18	4*8
--#define	SAVED_R6	5*8
--#define	SAVED_TR0	6*8
+-		op = *(unsigned long *)prologue;
 -
--/* These are the registers saved in the TLB path that aren't saved in the first
--   level of the normal one. */
--#define	TLB_SAVED_R25	7*8
--#define	TLB_SAVED_TR1	8*8
--#define	TLB_SAVED_TR2	9*8
--#define	TLB_SAVED_TR3	10*8
--#define	TLB_SAVED_TR4	11*8
--/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
--   breakage otherwise. */
--#define	TLB_SAVED_R0	12*8
--#define	TLB_SAVED_R1	13*8
+-		major = (op >> 26) & 0x3f;
+-		src   = (op >> 20) & 0x3f;
+-		minor = (op >> 16) & 0xf;
+-		disp  = (op >> 10) & 0x3f;
+-		dest  = (op >>  4) & 0x3f;
 -
--#define CLI()				\
--	getcon	SR, r6;			\
--	ori	r6, 0xf0, r6;		\
--	putcon	r6, SR;
+-		/*
+-		 * Stack frame creation happens in a number of ways.. in the
+-		 * general case when the stack frame is less than 511 bytes,
+-		 * it's generally created by an addi or addi.l:
+-		 *
+-		 *	addi/addi.l r15, -FRAME_SIZE, r15
+-		 *
+-		 * in the event that the frame size is bigger than this, it's
+-		 * typically created using a movi/sub pair as follows:
+-		 *
+-		 *	movi	FRAME_SIZE, rX
+-		 *	sub	r15, rX, r15
+-		 */
 -
--#define STI()				\
--	getcon	SR, r6;			\
--	andi	r6, ~0xf0, r6;		\
--	putcon	r6, SR;
+-		switch (major) {
+-		case (0x00 >> 2):
+-			switch (minor) {
+-			case 0x8: /* add.l */
+-			case 0x9: /* add */
+-				/* Look for r15, r63, r14 */
+-				if (src == 15 && disp == 63 && dest == 14)
+-					found_prologue_end = 1;
 -
--#ifdef CONFIG_PREEMPT
--#  define preempt_stop()	CLI()
--#else
--#  define preempt_stop()
--#  define resume_kernel		restore_all
--#endif
+-				break;
+-			case 0xa: /* sub.l */
+-			case 0xb: /* sub */
+-				if (src != 15 || dest != 15)
+-					continue;
 -
--	.section	.data, "aw"
+-				fp_displacement -= regcache[disp];
+-				fp_prev = fp - fp_displacement;
+-				break;
+-			}
+-			break;
+-		case (0xa8 >> 2): /* st.l */
+-			if (src != 15)
+-				continue;
 -
--#define FAST_TLBMISS_STACK_CACHELINES 4
--#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
+-			switch (dest) {
+-			case 14:
+-				if (offset_r14 || fp_displacement == 0)
+-					continue;
 -
--/* Register back-up area for all exceptions */
--	.balign	32
--	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
--	 * register saves etc. */
--	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
--/* This is 32 byte aligned by construction */
--/* Register back-up area for all exceptions */
--reg_save_area:
--	.quad	0
--	.quad	0
--	.quad	0
--	.quad	0
+-				offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+-				offset_r14 *= sizeof(unsigned long);
+-				offset_r14 += fp_displacement;
+-				break;
+-			case 18:
+-				if (offset_r18 || fp_displacement == 0)
+-					continue;
 -
--	.quad	0
--	.quad	0
--	.quad	0
--	.quad	0
+-				offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+-				offset_r18 *= sizeof(unsigned long);
+-				offset_r18 += fp_displacement;
+-				break;
+-			}
 -
--	.quad	0
--	.quad	0
--	.quad	0
--	.quad	0
+-			break;
+-		case (0xcc >> 2): /* movi */
+-			if (dest >= 63) {
+-				printk(KERN_NOTICE "%s: Invalid dest reg %d "
+-				       "specified in movi handler. Failed "
+-				       "opcode was 0x%lx: ", __FUNCTION__,
+-				       dest, op);
 -
--	.quad	0
--	.quad   0
+-				continue;
+-			}
 -
--/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
-- * reentrancy. Note this area may be accessed via physical address.
-- * Align so this fits a whole single cache line, for ease of purging.
-- */
--	.balign 32,0,32
--resvec_save_area:
--	.quad	0
--	.quad	0
--	.quad	0
--	.quad	0
--	.quad	0
--	.balign 32,0,32
+-			/* Sign extend */
+-			regcache[dest] =
+-				((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
+-			break;
+-		case (0xd0 >> 2): /* addi */
+-		case (0xd4 >> 2): /* addi.l */
+-			/* Look for r15, -FRAME_SIZE, r15 */
+-			if (src != 15 || dest != 15)
+-				continue;
 -
--/* Jump table of 3rd level handlers  */
--trap_jtable:
--	.long	do_exception_error		/* 0x000 */
--	.long	do_exception_error		/* 0x020 */
--	.long	tlb_miss_load				/* 0x040 */
--	.long	tlb_miss_store				/* 0x060 */
--	! ARTIFICIAL pseudo-EXPEVT setting
--	.long	do_debug_interrupt		/* 0x080 */
--	.long	tlb_miss_load				/* 0x0A0 */
--	.long	tlb_miss_store				/* 0x0C0 */
--	.long	do_address_error_load	/* 0x0E0 */
--	.long	do_address_error_store	/* 0x100 */
--#ifdef CONFIG_SH_FPU
--	.long	do_fpu_error		/* 0x120 */
--#else
--	.long	do_exception_error		/* 0x120 */
--#endif
--	.long	do_exception_error		/* 0x140 */
--	.long	system_call				/* 0x160 */
--	.long	do_reserved_inst		/* 0x180 */
--	.long	do_illegal_slot_inst	/* 0x1A0 */
--	.long	do_NMI			/* 0x1C0 */
--	.long	do_exception_error		/* 0x1E0 */
--	.rept 15
--		.long do_IRQ		/* 0x200 - 0x3C0 */
--	.endr
--	.long	do_exception_error		/* 0x3E0 */
--	.rept 32
--		.long do_IRQ		/* 0x400 - 0x7E0 */
--	.endr
--	.long	fpu_error_or_IRQA			/* 0x800 */
--	.long	fpu_error_or_IRQB			/* 0x820 */
--	.long	do_IRQ			/* 0x840 */
--	.long	do_IRQ			/* 0x860 */
--	.rept 6
--		.long do_exception_error	/* 0x880 - 0x920 */
--	.endr
--	.long	do_software_break_point	/* 0x940 */
--	.long	do_exception_error		/* 0x960 */
--	.long	do_single_step		/* 0x980 */
+-			/* Sign extended frame size.. */
+-			fp_displacement +=
+-				(u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+-			fp_prev = fp - fp_displacement;
+-			break;
+-		}
 -
--	.rept 3
--		.long do_exception_error	/* 0x9A0 - 0x9E0 */
--	.endr
--	.long	do_IRQ			/* 0xA00 */
--	.long	do_IRQ			/* 0xA20 */
--	.long	itlb_miss_or_IRQ			/* 0xA40 */
--	.long	do_IRQ			/* 0xA60 */
--	.long	do_IRQ			/* 0xA80 */
--	.long	itlb_miss_or_IRQ			/* 0xAA0 */
--	.long	do_exception_error		/* 0xAC0 */
--	.long	do_address_error_exec	/* 0xAE0 */
--	.rept 8
--		.long do_exception_error	/* 0xB00 - 0xBE0 */
--	.endr
--	.rept 18
--		.long do_IRQ		/* 0xC00 - 0xE20 */
--	.endr
+-		if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
+-			break;
+-	}
 -
--	.section	.text64, "ax"
+-	if (offset_r14 == 0 || fp_prev == 0) {
+-		if (!offset_r14)
+-			pr_debug("Unable to find r14 offset\n");
+-		if (!fp_prev)
+-			pr_debug("Unable to find previous fp\n");
 -
--/*
-- * --- Exception/Interrupt/Event Handling Section
-- */
+-		return -EINVAL;
+-	}
 -
--/*
-- * VBR and RESVEC blocks.
-- *
-- * First level handler for VBR-based exceptions.
-- *
-- * To avoid waste of space, align to the maximum text block size.
-- * This is assumed to be at most 128 bytes or 32 instructions.
-- * DO NOT EXCEED 32 instructions on the first level handlers !
-- *
-- * Also note that RESVEC is contained within the VBR block
-- * where the room left (1KB - TEXT_SIZE) allows placing
-- * the RESVEC block (at most 512B + TEXT_SIZE).
-- *
-- * So first (and only) level handler for RESVEC-based exceptions.
-- *
-- * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
-- * and interrupt) we are a lot tight with register space until
-- * saving onto the stack frame, which is done in handle_exception().
-- *
-- */
+-	/* For innermost leaf function, there might not be a offset_r18 */
+-	if (!*pprev_pc && (offset_r18 == 0))
+-		return -EINVAL;
 -
--#define	TEXT_SIZE 	128
--#define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */
+-	*pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
 -
--	.balign TEXT_SIZE
--LVBR_block:
--	.space	256, 0			/* Power-on class handler, */
--					/* not required here       */
--not_a_tlb_miss:
--	synco	/* TAKum03020 (but probably a good idea anyway.) */
--	/* Save original stack pointer into KCR1 */
--	putcon	SP, KCR1
+-	if (offset_r18)
+-		*pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
 -
--	/* Save other original registers into reg_save_area */
--        movi  reg_save_area, SP
--	st.q	SP, SAVED_R2, r2
--	st.q	SP, SAVED_R3, r3
--	st.q	SP, SAVED_R4, r4
--	st.q	SP, SAVED_R5, r5
--	st.q	SP, SAVED_R6, r6
--	st.q	SP, SAVED_R18, r18
--	gettr	tr0, r3
--	st.q	SP, SAVED_TR0, r3
+-	*pprev_pc &= ~1;
 -
--	/* Set args for Non-debug, Not a TLB miss class handler */
--	getcon	EXPEVT, r2
--	movi	ret_from_exception, r3
--	ori	r3, 1, r3
--	movi	EVENT_FAULT_NOT_TLB, r4
--	or	SP, ZERO, r5
--	getcon	KCR1, SP
--	pta	handle_exception, tr0
--	blink	tr0, ZERO
+-	return 0;
+-}
 -
--	.balign 256
--	! VBR+0x200
--	nop
--	.balign 256
--	! VBR+0x300
--	nop
--	.balign 256
--	/*
--	 * Instead of the natural .balign 1024 place RESVEC here
--	 * respecting the final 1KB alignment.
--	 */
--	.balign TEXT_SIZE
--	/*
--	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
--	 * block making sure the final alignment is correct.
--	 */
--tlb_miss:
--	synco	/* TAKum03020 (but probably a good idea anyway.) */
--	putcon	SP, KCR1
--	movi	reg_save_area, SP
--	/* SP is guaranteed 32-byte aligned. */
--	st.q	SP, TLB_SAVED_R0 , r0
--	st.q	SP, TLB_SAVED_R1 , r1
--	st.q	SP, SAVED_R2 , r2
--	st.q	SP, SAVED_R3 , r3
--	st.q	SP, SAVED_R4 , r4
--	st.q	SP, SAVED_R5 , r5
--	st.q	SP, SAVED_R6 , r6
--	st.q	SP, SAVED_R18, r18
+-/* Don't put this on the stack since we'll want to call sh64_unwind
+- * when we're close to underflowing the stack anyway. */
+-static struct pt_regs here_regs;
 -
--	/* Save R25 for safety; as/ld may want to use it to achieve the call to
--	 * the code in mm/tlbmiss.c */
--	st.q	SP, TLB_SAVED_R25, r25
--	gettr	tr0, r2
--	gettr	tr1, r3
--	gettr	tr2, r4
--	gettr	tr3, r5
--	gettr	tr4, r18
--	st.q	SP, SAVED_TR0 , r2
--	st.q	SP, TLB_SAVED_TR1 , r3
--	st.q	SP, TLB_SAVED_TR2 , r4
--	st.q	SP, TLB_SAVED_TR3 , r5
--	st.q	SP, TLB_SAVED_TR4 , r18
+-extern const char syscall_ret;
+-extern const char ret_from_syscall;
+-extern const char ret_from_exception;
+-extern const char ret_from_irq;
 -
--	pt	do_fast_page_fault, tr0
--	getcon	SSR, r2
--	getcon	EXPEVT, r3
--	getcon	TEA, r4
--	shlri	r2, 30, r2
--	andi	r2, 1, r2	/* r2 = SSR.MD */
--	blink 	tr0, LINK
+-static void sh64_unwind_inner(struct pt_regs *regs);
 -
--	pt	fixup_to_invoke_general_handler, tr1
+-static void unwind_nested (unsigned long pc, unsigned long fp)
+-{
+-	if ((fp >= __MEMORY_START) &&
+-	    ((fp & 7) == 0)) {
+-		sh64_unwind_inner((struct pt_regs *) fp);
+-	}
+-}
 -
--	/* If the fast path handler fixed the fault, just drop through quickly
--	   to the restore code right away to return to the excepting context.
--	   */
--	beqi/u	r2, 0, tr1
+-static void sh64_unwind_inner(struct pt_regs *regs)
+-{
+-	unsigned long pc, fp;
+-	int ofs = 0;
+-	int first_pass;
 -
--fast_tlb_miss_restore:
--	ld.q	SP, SAVED_TR0, r2
--	ld.q	SP, TLB_SAVED_TR1, r3
--	ld.q	SP, TLB_SAVED_TR2, r4
+-	pc = regs->pc & ~1;
+-	fp = regs->regs[14];
 -
--	ld.q	SP, TLB_SAVED_TR3, r5
--	ld.q	SP, TLB_SAVED_TR4, r18
+-	first_pass = 1;
+-	for (;;) {
+-		int cond;
+-		unsigned long next_fp, next_pc;
 -
--	ptabs	r2, tr0
--	ptabs	r3, tr1
--	ptabs	r4, tr2
--	ptabs	r5, tr3
--	ptabs	r18, tr4
+-		if (pc == ((unsigned long) &syscall_ret & ~1)) {
+-			printk("SYSCALL\n");
+-			unwind_nested(pc,fp);
+-			return;
+-		}
 -
--	ld.q	SP, TLB_SAVED_R0, r0
--	ld.q	SP, TLB_SAVED_R1, r1
--	ld.q	SP, SAVED_R2, r2
--	ld.q	SP, SAVED_R3, r3
--	ld.q	SP, SAVED_R4, r4
--	ld.q	SP, SAVED_R5, r5
--	ld.q	SP, SAVED_R6, r6
--	ld.q	SP, SAVED_R18, r18
--	ld.q	SP, TLB_SAVED_R25, r25
+-		if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
+-			printk("SYSCALL (PREEMPTED)\n");
+-			unwind_nested(pc,fp);
+-			return;
+-		}
 -
--	getcon	KCR1, SP
--	rte
--	nop /* for safety, in case the code is run on sh5-101 cut1.x */
+-		/* In this case, the PC is discovered by lookup_prev_stack_frame but
+-		   it has 4 taken off it to look like the 'caller' */
+-		if (pc == ((unsigned long) &ret_from_exception & ~1)) {
+-			printk("EXCEPTION\n");
+-			unwind_nested(pc,fp);
+-			return;
+-		}
 -
--fixup_to_invoke_general_handler:
+-		if (pc == ((unsigned long) &ret_from_irq & ~1)) {
+-			printk("IRQ\n");
+-			unwind_nested(pc,fp);
+-			return;
+-		}
 -
--	/* OK, new method.  Restore stuff that's not expected to get saved into
--	   the 'first-level' reg save area, then just fall through to setting
--	   up the registers and calling the second-level handler. */
+-		cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
+-			((pc & 3) == 0) && ((fp & 7) == 0));
 -
--	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
--	   r25,tr1-4 and save r6 to get into the right state.  */
+-		pc -= ofs;
 -
--	ld.q	SP, TLB_SAVED_TR1, r3
--	ld.q	SP, TLB_SAVED_TR2, r4
--	ld.q	SP, TLB_SAVED_TR3, r5
--	ld.q	SP, TLB_SAVED_TR4, r18
--	ld.q	SP, TLB_SAVED_R25, r25
+-		printk("[<%08lx>] ", pc);
+-		print_symbol("%s\n", pc);
 -
--	ld.q	SP, TLB_SAVED_R0, r0
--	ld.q	SP, TLB_SAVED_R1, r1
+-		if (first_pass) {
+-			/* If the innermost frame is a leaf function, it's
+-			 * possible that r18 is never saved out to the stack.
+-			 */
+-			next_pc = regs->regs[18];
+-		} else {
+-			next_pc = 0;
+-		}
 -
--	ptabs/u	r3, tr1
--	ptabs/u	r4, tr2
--	ptabs/u	r5, tr3
--	ptabs/u	r18, tr4
+-		if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
+-			ofs = sizeof(unsigned long);
+-			pc = next_pc & ~1;
+-			fp = next_fp;
+-		} else {
+-			printk("Unable to lookup previous stack frame\n");
+-			break;
+-		}
+-		first_pass = 0;
+-	}
 -
--	/* Set args for Non-debug, TLB miss class handler */
--	getcon	EXPEVT, r2
--	movi	ret_from_exception, r3
--	ori	r3, 1, r3
--	movi	EVENT_FAULT_TLB, r4
--	or	SP, ZERO, r5
--	getcon	KCR1, SP
--	pta	handle_exception, tr0
--	blink	tr0, ZERO
+-	printk("\n");
 -
--/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
--   DOES END UP AT VBR+0x600 */
--	nop
--	nop
--	nop
--	nop
--	nop
--	nop
+-}
 -
--	.balign 256
--	/* VBR + 0x600 */
+-void sh64_unwind(struct pt_regs *regs)
+-{
+-	if (!regs) {
+-		/*
+-		 * Fetch current regs if we have no other saved state to back
+-		 * trace from.
+-		 */
+-		regs = &here_regs;
 -
--interrupt:
--	synco	/* TAKum03020 (but probably a good idea anyway.) */
--	/* Save original stack pointer into KCR1 */
--	putcon	SP, KCR1
+-		__asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
+-		__asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
+-		__asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
 -
--	/* Save other original registers into reg_save_area */
--        movi  reg_save_area, SP
--	st.q	SP, SAVED_R2, r2
--	st.q	SP, SAVED_R3, r3
--	st.q	SP, SAVED_R4, r4
--	st.q	SP, SAVED_R5, r5
--	st.q	SP, SAVED_R6, r6
--	st.q	SP, SAVED_R18, r18
--	gettr	tr0, r3
--	st.q	SP, SAVED_TR0, r3
+-		__asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
+-		__asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
+-		__asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
+-		__asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
+-		__asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
+-		__asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
+-		__asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
+-		__asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
 -
--	/* Set args for interrupt class handler */
--	getcon	INTEVT, r2
--	movi	ret_from_irq, r3
--	ori	r3, 1, r3
--	movi	EVENT_INTERRUPT, r4
--	or	SP, ZERO, r5
--	getcon	KCR1, SP
--	pta	handle_exception, tr0
--	blink	tr0, ZERO
--	.balign	TEXT_SIZE		/* let's waste the bare minimum */
+-		__asm__ __volatile__ (
+-			"pta 0f, tr0\n\t"
+-			"blink tr0, %0\n\t"
+-			"0: nop"
+-			: "=r" (regs->pc)
+-		);
+-	}
 -
--LVBR_block_end:				/* Marker. Used for total checking */
+-	printk("\nCall Trace:\n");
+-	sh64_unwind_inner(regs);
+-}
 -
--	.balign 256
--LRESVEC_block:
--	/* Panic handler. Called with MMU off. Possible causes/actions:
--	 * - Reset:		Jump to program start.
--	 * - Single Step:	Turn off Single Step & return.
--	 * - Others:		Call panic handler, passing PC as arg.
--	 *			(this may need to be extended...)
--	 */
--reset_or_panic:
--	synco	/* TAKum03020 (but probably a good idea anyway.) */
--	putcon	SP, DCR
--	/* First save r0-1 and tr0, as we need to use these */
--	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
--	st.q	SP, 0, r0
--	st.q	SP, 8, r1
--	gettr	tr0, r0
--	st.q	SP, 32, r0
+diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S
+deleted file mode 100644
+index f533a06..0000000
+--- a/arch/sh64/kernel/vmlinux.lds.S
++++ /dev/null
+@@ -1,140 +0,0 @@
+-/*
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh5/vmlinux.lds.S
+- *
+- * ld script to make ST50 Linux kernel
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- *
+- * benedict.gaster at superh.com:	 2nd May 2002
+- *    Add definition of empty_zero_page to be the first page of kernel image.
+- *
+- * benedict.gaster at superh.com:	 3rd May 2002
+- *    Added support for ramdisk, removing statically linked romfs at the same time.
+- *
+- * lethal at linux-sh.org:          9th May 2003
+- *    Kill off GLOBAL_NAME() usage and other CDC-isms.
+- *
+- * lethal at linux-sh.org:         19th May 2003
+- *    Remove support for ancient toolchains.
+- */
 -
--	/* Check cause */
--	getcon	EXPEVT, r0
--	movi	RESET_CAUSE, r1
--	sub	r1, r0, r1		/* r1=0 if reset */
--	movi	_stext-CONFIG_CACHED_MEMORY_OFFSET, r0
--	ori	r0, 1, r0
--	ptabs	r0, tr0
--	beqi	r1, 0, tr0		/* Jump to start address if reset */
+-#include <asm/page.h>
+-#include <asm/cache.h>
+-#include <asm/processor.h>
+-#include <asm/thread_info.h>
 -
--	getcon	EXPEVT, r0
--	movi	DEBUGSS_CAUSE, r1
--	sub	r1, r0, r1		/* r1=0 if single step */
--	pta	single_step_panic, tr0
--	beqi	r1, 0, tr0		/* jump if single step */
+-#define LOAD_OFFSET	CONFIG_CACHED_MEMORY_OFFSET
+-#include <asm-generic/vmlinux.lds.h>
 -
--	/* Now jump to where we save the registers. */
--	movi	panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1
--	ptabs	r1, tr0
--	blink	tr0, r63
+-OUTPUT_ARCH(sh:sh5)
 -
--single_step_panic:
--	/* We are in a handler with Single Step set. We need to resume the
--	 * handler, by turning on MMU & turning off Single Step. */
--	getcon	SSR, r0
--	movi	SR_MMU, r1
--	or	r0, r1, r0
--	movi	~SR_SS, r1
--	and	r0, r1, r0
--	putcon	r0, SSR
--	/* Restore EXPEVT, as the rte won't do this */
--	getcon	PEXPEVT, r0
--	putcon	r0, EXPEVT
--	/* Restore regs */
--	ld.q	SP, 32, r0
--	ptabs	r0, tr0
--	ld.q	SP, 0, r0
--	ld.q	SP, 8, r1
--	getcon	DCR, SP
--	synco
--	rte
+-#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
 -
+-ENTRY(__start)
+-SECTIONS
+-{
+-  . = CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
+-  _text = .;			/* Text and read-only data */
+-  text = .;			/* Text and read-only data */
 -
--	.balign	256
--debug_exception:
--	synco	/* TAKum03020 (but probably a good idea anyway.) */
--	/*
--	 * Single step/software_break_point first level handler.
--	 * Called with MMU off, so the first thing we do is enable it
--	 * by doing an rte with appropriate SSR.
--	 */
--	putcon	SP, DCR
--	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
--	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
+-  .empty_zero_page : C_PHYS(.empty_zero_page) {
+-	*(.empty_zero_page)
+-	} = 0
 -
--	/* With the MMU off, we are bypassing the cache, so purge any
--         * data that will be made stale by the following stores.
--         */
--	ocbp	SP, 0
--	synco
+-  .text : C_PHYS(.text) {
+-  	*(.text.head)
+-	TEXT_TEXT
+-	*(.text64)
+-        *(.text..SHmedia32)
+-	SCHED_TEXT
+-	LOCK_TEXT
+-	*(.fixup)
+-	*(.gnu.warning)
+-#ifdef CONFIG_LITTLE_ENDIAN
+-	} = 0x6ff0fff0
+-#else
+-	} = 0xf0fff06f
+-#endif
 -
--	st.q	SP, 0, r0
--	st.q	SP, 8, r1
--	getcon	SPC, r0
--	st.q	SP, 16, r0
--	getcon	SSR, r0
--	st.q	SP, 24, r0
+-  /* We likely want __ex_table to be Cache Line aligned */
+-  . = ALIGN(L1_CACHE_BYTES);		/* Exception table */
+-  __start___ex_table = .;
+-  __ex_table : C_PHYS(__ex_table) { *(__ex_table) }
+-  __stop___ex_table = .;
 -
--	/* Enable MMU, block exceptions, set priv mode, disable single step */
--	movi	SR_MMU | SR_BL | SR_MD, r1
--	or	r0, r1, r0
--	movi	~SR_SS, r1
--	and	r0, r1, r0
--	putcon	r0, SSR
--	/* Force control to debug_exception_2 when rte is executed */
--	movi	debug_exeception_2, r0
--	ori	r0, 1, r0      /* force SHmedia, just in case */
--	putcon	r0, SPC
--	getcon	DCR, SP
--	synco
--	rte
--debug_exeception_2:
--	/* Restore saved regs */
--	putcon	SP, KCR1
--	movi	resvec_save_area, SP
--	ld.q	SP, 24, r0
--	putcon	r0, SSR
--	ld.q	SP, 16, r0
--	putcon	r0, SPC
--	ld.q	SP, 0, r0
--	ld.q	SP, 8, r1
+-  _etext = .;			/* End of text section */
 -
--	/* Save other original registers into reg_save_area */
--        movi  reg_save_area, SP
--	st.q	SP, SAVED_R2, r2
--	st.q	SP, SAVED_R3, r3
--	st.q	SP, SAVED_R4, r4
--	st.q	SP, SAVED_R5, r5
--	st.q	SP, SAVED_R6, r6
--	st.q	SP, SAVED_R18, r18
--	gettr	tr0, r3
--	st.q	SP, SAVED_TR0, r3
+-  NOTES 
 -
--	/* Set args for debug class handler */
--	getcon	EXPEVT, r2
--	movi	ret_from_exception, r3
--	ori	r3, 1, r3
--	movi	EVENT_DEBUG, r4
--	or	SP, ZERO, r5
--	getcon	KCR1, SP
--	pta	handle_exception, tr0
--	blink	tr0, ZERO
+-  RODATA
 -
--	.balign	256
--debug_interrupt:
--	/* !!! WE COME HERE IN REAL MODE !!! */
--	/* Hook-up debug interrupt to allow various debugging options to be
--	 * hooked into its handler. */
--	/* Save original stack pointer into KCR1 */
--	synco
--	putcon	SP, KCR1
--	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
--	ocbp	SP, 0
--	ocbp	SP, 32
--	synco
+-  .data : C_PHYS(.data) {			/* Data */
+-	DATA_DATA
+-	CONSTRUCTORS
+-	}
 -
--	/* Save other original registers into reg_save_area thru real addresses */
--	st.q	SP, SAVED_R2, r2
--	st.q	SP, SAVED_R3, r3
--	st.q	SP, SAVED_R4, r4
--	st.q	SP, SAVED_R5, r5
--	st.q	SP, SAVED_R6, r6
--	st.q	SP, SAVED_R18, r18
--	gettr	tr0, r3
--	st.q	SP, SAVED_TR0, r3
+-  . = ALIGN(PAGE_SIZE);
+-  .data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) }
 -
--	/* move (spc,ssr)->(pspc,pssr).  The rte will shift
--	   them back again, so that they look like the originals
--	   as far as the real handler code is concerned. */
--	getcon	spc, r6
--	putcon	r6, pspc
--	getcon	ssr, r6
--	putcon	r6, pssr
+-  PERCPU(PAGE_SIZE)
 -
--	! construct useful SR for handle_exception
--	movi	3, r6
--	shlli	r6, 30, r6
--	getcon	sr, r18
--	or	r18, r6, r6
--	putcon	r6, ssr
+-  . = ALIGN(L1_CACHE_BYTES);
+-  .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) }
 -
--	! SSR is now the current SR with the MD and MMU bits set
--	! i.e. the rte will switch back to priv mode and put
--	! the mmu back on
+-  _edata = .;			/* End of data section */
 -
--	! construct spc
--	movi	handle_exception, r18
--	ori	r18, 1, r18		! for safety (do we need this?)
--	putcon	r18, spc
+-  . = ALIGN(THREAD_SIZE);	/* init_task: structure size aligned */
+-  .data.init_task : C_PHYS(.data.init_task) { *(.data.init_task) }
 -
--	/* Set args for Non-debug, Not a TLB miss class handler */
+-  . = ALIGN(PAGE_SIZE);		/* Init code and data */
+-  __init_begin = .;
+-  _sinittext = .;
+-  .init.text : C_PHYS(.init.text) { *(.init.text) }
+-  _einittext = .;
+-  .init.data : C_PHYS(.init.data) { *(.init.data) }
+-  . = ALIGN(L1_CACHE_BYTES);	/* Better if Cache Line aligned */
+-  __setup_start = .;
+-  .init.setup : C_PHYS(.init.setup) { *(.init.setup) }
+-  __setup_end = .;
+-  __initcall_start = .;
+-  .initcall.init : C_PHYS(.initcall.init) {
+-	INITCALLS
+-  }
+-  __initcall_end = .;
+-  __con_initcall_start = .;
+-  .con_initcall.init : C_PHYS(.con_initcall.init) { *(.con_initcall.init) }
+-  __con_initcall_end = .;
+-  SECURITY_INIT
 -
--	! EXPEVT==0x80 is unused, so 'steal' this value to put the
--	! debug interrupt handler in the vectoring table
--	movi	0x80, r2
--	movi	ret_from_exception, r3
--	ori	r3, 1, r3
--	movi	EVENT_FAULT_NOT_TLB, r4
+-#ifdef CONFIG_BLK_DEV_INITRD
+-  __initramfs_start = .;
+-  .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
+-  __initramfs_end = .;
+-#endif
 -
--	or	SP, ZERO, r5
--	movi	CONFIG_CACHED_MEMORY_OFFSET, r6
--	add	r6, r5, r5
--	getcon	KCR1, SP
+-  . = ALIGN(PAGE_SIZE);
+-  __init_end = .;
 -
--	synco	! for safety
--	rte	! -> handle_exception, switch back to priv mode again
+-  /* Align to the biggest single data representation, head and tail */
+-  . = ALIGN(8);
+-  __bss_start = .;		/* BSS */
+-  .bss : C_PHYS(.bss) {
+-	*(.bss)
+-	}
+-  . = ALIGN(8);
+-  _end = . ;
 -
--LRESVEC_block_end:			/* Marker. Unused. */
+-  /* Sections to be discarded */
+-  /DISCARD/ : {
+-	*(.exit.text)
+-	*(.exit.data)
+-	*(.exitcall.exit)
+-	}
 -
--	.balign	TEXT_SIZE
+-  STABS_DEBUG
+-  DWARF_DEBUG
+-}
+diff --git a/arch/sh64/lib/.gitignore b/arch/sh64/lib/.gitignore
+deleted file mode 100644
+index 3508c2c..0000000
+--- a/arch/sh64/lib/.gitignore
++++ /dev/null
+@@ -1 +0,0 @@
+-syscalltab.h
+diff --git a/arch/sh64/lib/Makefile b/arch/sh64/lib/Makefile
+deleted file mode 100644
+index 6a4cc3f..0000000
+--- a/arch/sh64/lib/Makefile
++++ /dev/null
+@@ -1,19 +0,0 @@
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 2000, 2001  Paolo Alberelli
+-# Coprygith (C) 2003  Paul Mundt
+-#
+-# Makefile for the SH-5 specific library files..
+-#
+-# Note! Dependencies are done automagically by 'make dep', which also
+-# removes any old dependencies. DON'T put your own dependencies here
+-# unless it's something special (ie not a .c file).
+-#
+-
+-# Panic should really be compiled as PIC
+-lib-y  := udelay.o c-checksum.o dbg.o io.o panic.o memcpy.o copy_user_memcpy.o \
+-		page_copy.o page_clear.o iomap.o
 -
+diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c
+deleted file mode 100644
+index 053137a..0000000
+--- a/arch/sh64/lib/c-checksum.c
++++ /dev/null
+@@ -1,217 +0,0 @@
 -/*
-- * Second level handler for VBR-based exceptions. Pre-handler.
-- * In common to all stack-frame sensitive handlers.
-- *
-- * Inputs:
-- * (KCR0) Current [current task union]
-- * (KCR1) Original SP
-- * (r2)   INTEVT/EXPEVT
-- * (r3)   appropriate return address
-- * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
-- * (r5)   Pointer to reg_save_area
-- * (SP)   Original SP
-- *
-- * Available registers:
-- * (r6)
-- * (r18)
-- * (tr0)
+- * arch/sh64/lib/c-checksum.c
 - *
+- * This file contains network checksum routines that are better done
+- * in an architecture-specific manner due to speed..
 - */
--handle_exception:
--	/* Common 2nd level handler. */
--
--	/* First thing we need an appropriate stack pointer */
--	getcon	SSR, r6
--	shlri	r6, 30, r6
--	andi	r6, 1, r6
--	pta	stack_ok, tr0
--	bne	r6, ZERO, tr0		/* Original stack pointer is fine */
--
--	/* Set stack pointer for user fault */
--	getcon	KCR0, SP
--	movi	THREAD_SIZE, r6		/* Point to the end */
--	add	SP, r6, SP
--
--stack_ok:
--
--/* DEBUG : check for underflow/overflow of the kernel stack */
--	pta	no_underflow, tr0
--	getcon  KCR0, r6
--	movi	1024, r18
--	add	r6, r18, r6
--	bge	SP, r6, tr0 	! ? below 1k from bottom of stack : danger zone
--
--/* Just panic to cause a crash. */
--bad_sp:
--	ld.b	r63, 0, r6
--	nop
--
--no_underflow:
--	pta	bad_sp, tr0
--	getcon	kcr0, r6
--	movi	THREAD_SIZE, r18
--	add	r18, r6, r6
--	bgt	SP, r6, tr0	! sp above the stack
--
--	/* Make some room for the BASIC frame. */
--	movi	-(FRAME_SIZE), r6
--	add	SP, r6, SP
 -
--/* Could do this with no stalling if we had another spare register, but the
--   code below will be OK. */
--	ld.q	r5, SAVED_R2, r6
--	ld.q	r5, SAVED_R3, r18
--	st.q	SP, FRAME_R(2), r6
--	ld.q	r5, SAVED_R4, r6
--	st.q	SP, FRAME_R(3), r18
--	ld.q	r5, SAVED_R5, r18
--	st.q	SP, FRAME_R(4), r6
--	ld.q	r5, SAVED_R6, r6
--	st.q	SP, FRAME_R(5), r18
--	ld.q	r5, SAVED_R18, r18
--	st.q	SP, FRAME_R(6), r6
--	ld.q	r5, SAVED_TR0, r6
--	st.q	SP, FRAME_R(18), r18
--	st.q	SP, FRAME_T(0), r6
+-#undef DEBUG
 -
--	/* Keep old SP around */
--	getcon	KCR1, r6
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <asm/byteorder.h>
+-#include <asm/uaccess.h>
 -
--	/* Save the rest of the general purpose registers */
--	st.q	SP, FRAME_R(0), r0
--	st.q	SP, FRAME_R(1), r1
--	st.q	SP, FRAME_R(7), r7
--	st.q	SP, FRAME_R(8), r8
--	st.q	SP, FRAME_R(9), r9
--	st.q	SP, FRAME_R(10), r10
--	st.q	SP, FRAME_R(11), r11
--	st.q	SP, FRAME_R(12), r12
--	st.q	SP, FRAME_R(13), r13
--	st.q	SP, FRAME_R(14), r14
+-static inline unsigned short from64to16(unsigned long long x)
+-{
+-	/* add up 32-bit words for 33 bits */
+-	x = (x & 0xffffffff) + (x >> 32);
+-	/* add up 16-bit and 17-bit words for 17+c bits */
+-	x = (x & 0xffff) + (x >> 16);
+-	/* add up 16-bit and 2-bit for 16+c bit */
+-	x = (x & 0xffff) + (x >> 16);
+-	/* add up carry.. */
+-	x = (x & 0xffff) + (x >> 16);
+-	return x;
+-}
 -
--	/* SP is somewhere else */
--	st.q	SP, FRAME_R(15), r6
+-static inline unsigned short foldto16(unsigned long x)
+-{
+-	/* add up 16-bit for 17 bits */
+-	x = (x & 0xffff) + (x >> 16);
+-	/* add up carry.. */
+-	x = (x & 0xffff) + (x >> 16);
+-	return x;
+-}
 -
--	st.q	SP, FRAME_R(16), r16
--	st.q	SP, FRAME_R(17), r17
--	/* r18 is saved earlier. */
--	st.q	SP, FRAME_R(19), r19
--	st.q	SP, FRAME_R(20), r20
--	st.q	SP, FRAME_R(21), r21
--	st.q	SP, FRAME_R(22), r22
--	st.q	SP, FRAME_R(23), r23
--	st.q	SP, FRAME_R(24), r24
--	st.q	SP, FRAME_R(25), r25
--	st.q	SP, FRAME_R(26), r26
--	st.q	SP, FRAME_R(27), r27
--	st.q	SP, FRAME_R(28), r28
--	st.q	SP, FRAME_R(29), r29
--	st.q	SP, FRAME_R(30), r30
--	st.q	SP, FRAME_R(31), r31
--	st.q	SP, FRAME_R(32), r32
--	st.q	SP, FRAME_R(33), r33
--	st.q	SP, FRAME_R(34), r34
--	st.q	SP, FRAME_R(35), r35
--	st.q	SP, FRAME_R(36), r36
--	st.q	SP, FRAME_R(37), r37
--	st.q	SP, FRAME_R(38), r38
--	st.q	SP, FRAME_R(39), r39
--	st.q	SP, FRAME_R(40), r40
--	st.q	SP, FRAME_R(41), r41
--	st.q	SP, FRAME_R(42), r42
--	st.q	SP, FRAME_R(43), r43
--	st.q	SP, FRAME_R(44), r44
--	st.q	SP, FRAME_R(45), r45
--	st.q	SP, FRAME_R(46), r46
--	st.q	SP, FRAME_R(47), r47
--	st.q	SP, FRAME_R(48), r48
--	st.q	SP, FRAME_R(49), r49
--	st.q	SP, FRAME_R(50), r50
--	st.q	SP, FRAME_R(51), r51
--	st.q	SP, FRAME_R(52), r52
--	st.q	SP, FRAME_R(53), r53
--	st.q	SP, FRAME_R(54), r54
--	st.q	SP, FRAME_R(55), r55
--	st.q	SP, FRAME_R(56), r56
--	st.q	SP, FRAME_R(57), r57
--	st.q	SP, FRAME_R(58), r58
--	st.q	SP, FRAME_R(59), r59
--	st.q	SP, FRAME_R(60), r60
--	st.q	SP, FRAME_R(61), r61
--	st.q	SP, FRAME_R(62), r62
+-static inline unsigned short myfoldto16(unsigned long long x)
+-{
+-	/* Fold down to 32-bits so we don't loose in the typedef-less
+-	   network stack.  */
+-	/* 64 to 33 */
+-	x = (x & 0xffffffff) + (x >> 32);
+-	/* 33 to 32 */
+-	x = (x & 0xffffffff) + (x >> 32);
 -
--	/*
--	 * Save the S* registers.
--	 */
--	getcon	SSR, r61
--	st.q	SP, FRAME_S(FSSR), r61
--	getcon	SPC, r62
--	st.q	SP, FRAME_S(FSPC), r62
--	movi	-1, r62			/* Reset syscall_nr */
--	st.q	SP, FRAME_S(FSYSCALL_ID), r62
+-	/* add up 16-bit for 17 bits */
+-	x = (x & 0xffff) + (x >> 16);
+-	/* add up carry.. */
+-	x = (x & 0xffff) + (x >> 16);
+-	return x;
+-}
 -
--	/* Save the rest of the target registers */
--	gettr	tr1, r6
--	st.q	SP, FRAME_T(1), r6
--	gettr	tr2, r6
--	st.q	SP, FRAME_T(2), r6
--	gettr	tr3, r6
--	st.q	SP, FRAME_T(3), r6
--	gettr	tr4, r6
--	st.q	SP, FRAME_T(4), r6
--	gettr	tr5, r6
--	st.q	SP, FRAME_T(5), r6
--	gettr	tr6, r6
--	st.q	SP, FRAME_T(6), r6
--	gettr	tr7, r6
--	st.q	SP, FRAME_T(7), r6
+-#define odd(x) ((x)&1)
+-#define U16(x) ntohs(x)
 -
--	! setup FP so that unwinder can wind back through nested kernel mode
--	! exceptions
--	add	SP, ZERO, r14
+-static unsigned long do_csum(const unsigned char *buff, int len)
+-{
+-	int odd, count;
+-	unsigned long result = 0;
 -
--#ifdef CONFIG_POOR_MANS_STRACE
--	/* We've pushed all the registers now, so only r2-r4 hold anything
--	 * useful. Move them into callee save registers */
--	or	r2, ZERO, r28
--	or	r3, ZERO, r29
--	or	r4, ZERO, r30
+-	pr_debug("do_csum buff %p, len %d (0x%x)\n", buff, len, len);
+-#ifdef DEBUG
+-	for (i = 0; i < len; i++) {
+-		if ((i % 26) == 0)
+-			printk("\n");
+-		printk("%02X ", buff[i]);
+-	}
+-#endif
 -
--	/* Preserve r2 as the event code */
--	movi	evt_debug, r3
--	ori	r3, 1, r3
--	ptabs	r3, tr0
+-	if (len <= 0)
+-		goto out;
 -
--	or	SP, ZERO, r6
--	getcon	TRA, r5
--	blink	tr0, LINK
+-	odd = 1 & (unsigned long) buff;
+-	if (odd) {
+-		result = *buff << 8;
+-		len--;
+-		buff++;
+-	}
+-	count = len >> 1;	/* nr of 16-bit words.. */
+-	if (count) {
+-		if (2 & (unsigned long) buff) {
+-			result += *(unsigned short *) buff;
+-			count--;
+-			len -= 2;
+-			buff += 2;
+-		}
+-		count >>= 1;	/* nr of 32-bit words.. */
+-		if (count) {
+-			unsigned long carry = 0;
+-			do {
+-				unsigned long w = *(unsigned long *) buff;
+-				buff += 4;
+-				count--;
+-				result += carry;
+-				result += w;
+-				carry = (w > result);
+-			} while (count);
+-			result += carry;
+-			result = (result & 0xffff) + (result >> 16);
+-		}
+-		if (len & 2) {
+-			result += *(unsigned short *) buff;
+-			buff += 2;
+-		}
+-	}
+-	if (len & 1)
+-		result += *buff;
+-	result = foldto16(result);
+-	if (odd)
+-		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
 -
--	or	r28, ZERO, r2
--	or	r29, ZERO, r3
--	or	r30, ZERO, r4
--#endif
+-	pr_debug("\nCHECKSUM is 0x%lx\n", result);
 -
--	/* For syscall and debug race condition, get TRA now */
--	getcon	TRA, r5
+-      out:
+-	return result;
+-}
 -
--	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
--	 * Also set FD, to catch FPU usage in the kernel.
--	 *
--	 * benedict.gaster at superh.com 29/07/2002
--	 *
--	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
--	 * same time change BL from 1->0, as any pending interrupt of a level
--	 * higher than he previous value of IMASK will leak through and be
--	 * taken unexpectedly.
--	 *
--	 * To avoid this we raise the IMASK and then issue another PUTCON to
--	 * enable interrupts.
--         */
--	getcon	SR, r6
--	movi	SR_IMASK | SR_FD, r7
--	or	r6, r7, r6
--	putcon	r6, SR
--	movi	SR_UNBLOCK_EXC, r7
--	and	r6, r7, r6
--	putcon	r6, SR
+-/* computes the checksum of a memory block at buff, length len,
+-   and adds in "sum" (32-bit)  */
+-__wsum csum_partial(const void *buff, int len, __wsum sum)
+-{
+-	unsigned long long result = do_csum(buff, len);
 -
+-	/* add in old sum, and carry.. */
+-	result += (__force u32)sum;
+-	/* 32+c bits -> 32 bits */
+-	result = (result & 0xffffffff) + (result >> 32);
 -
--	/* Now call the appropriate 3rd level handler */
--	or	r3, ZERO, LINK
--	movi	trap_jtable, r3
--	shlri	r2, 3, r2
--	ldx.l	r2, r3, r3
--	shlri	r2, 2, r2
--	ptabs	r3, tr0
--	or	SP, ZERO, r3
--	blink	tr0, ZERO
+-	pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n",
+-		buff, len, sum, result);
 -
--/*
-- * Second level handler for VBR-based exceptions. Post-handlers.
-- *
-- * Post-handlers for interrupts (ret_from_irq), exceptions
-- * (ret_from_exception) and common reentrance doors (restore_all
-- * to get back to the original context, ret_from_syscall loop to
-- * check kernel exiting).
-- *
-- * ret_with_reschedule and work_notifysig are an inner lables of
-- * the ret_from_syscall loop.
-- *
-- * In common to all stack-frame sensitive handlers.
-- *
-- * Inputs:
-- * (SP)   struct pt_regs *, original register's frame pointer (basic)
-- *
-- */
--	.global ret_from_irq
--ret_from_irq:
--#ifdef CONFIG_POOR_MANS_STRACE
--	pta	evt_debug_ret_from_irq, tr0
--	ori	SP, 0, r2
--	blink	tr0, LINK
--#endif
--	ld.q	SP, FRAME_S(FSSR), r6
--	shlri	r6, 30, r6
--	andi	r6, 1, r6
--	pta	resume_kernel, tr0
--	bne	r6, ZERO, tr0		/* no further checks */
--	STI()
--	pta	ret_with_reschedule, tr0
--	blink	tr0, ZERO		/* Do not check softirqs */
+-	return (__force __wsum)result;
+-}
 -
--	.global ret_from_exception
--ret_from_exception:
--	preempt_stop()
+-/* Copy while checksumming, otherwise like csum_partial.  */
+-__wsum
+-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+-{
+-	sum = csum_partial(src, len, sum);
+-	memcpy(dst, src, len);
 -
--#ifdef CONFIG_POOR_MANS_STRACE
--	pta	evt_debug_ret_from_exc, tr0
--	ori	SP, 0, r2
--	blink	tr0, LINK
--#endif
+-	return sum;
+-}
 -
--	ld.q	SP, FRAME_S(FSSR), r6
--	shlri	r6, 30, r6
--	andi	r6, 1, r6
--	pta	resume_kernel, tr0
--	bne	r6, ZERO, tr0		/* no further checks */
+-/* Copy from userspace and compute checksum.  If we catch an exception
+-   then zero the rest of the buffer.  */
+-__wsum
+-csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+-			    __wsum sum, int *err_ptr)
+-{
+-	int missing;
 -
--	/* Check softirqs */
+-	pr_debug
+-	    ("csum_partial_copy_from_user src %p, dest %p, len %d, sum %08x, err_ptr %p\n",
+-	     src, dst, len, sum, err_ptr);
+-	missing = copy_from_user(dst, src, len);
+-	pr_debug("  access_ok %d\n", __access_ok((unsigned long) src, len));
+-	pr_debug("  missing %d\n", missing);
+-	if (missing) {
+-		memset(dst + len - missing, 0, missing);
+-		*err_ptr = -EFAULT;
+-	}
 -
--#ifdef CONFIG_PREEMPT
--	pta   ret_from_syscall, tr0
--	blink   tr0, ZERO
+-	return csum_partial(dst, len, sum);
+-}
 -
--resume_kernel:
--	pta	restore_all, tr0
+-/* Copy to userspace and compute checksum.  */
+-__wsum
+-csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len,
+-			  __wsum sum, int *err_ptr)
+-{
+-	sum = csum_partial(src, len, sum);
 -
--	getcon	KCR0, r6
--	ld.l	r6, TI_PRE_COUNT, r7
--	beq/u	r7, ZERO, tr0
+-	if (copy_to_user(dst, src, len))
+-		*err_ptr = -EFAULT;
 -
--need_resched:
--	ld.l	r6, TI_FLAGS, r7
--	movi	(1 << TIF_NEED_RESCHED), r8
--	and	r8, r7, r8
--	bne	r8, ZERO, tr0
+-	return sum;
+-}
 -
--	getcon	SR, r7
--	andi	r7, 0xf0, r7
--	bne	r7, ZERO, tr0
+-/*
+- *	This is a version of ip_compute_csum() optimized for IP headers,
+- *	which always checksum on 4 octet boundaries.
+- */
+-__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+-{
+-	pr_debug("ip_fast_csum %p,%d\n", iph, ihl);
 -
--	movi	((PREEMPT_ACTIVE >> 16) & 65535), r8
--	shori	(PREEMPT_ACTIVE & 65535), r8
--	st.l	r6, TI_PRE_COUNT, r8
+-	return (__force __sum16)~do_csum(iph, ihl * 4);
+-}
 -
--	STI()
--	movi	schedule, r7
--	ori	r7, 1, r7
--	ptabs	r7, tr1
--	blink	tr1, LINK
+-__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+-				unsigned short len,
+-				unsigned short proto, __wsum sum)
+-{
+-	unsigned long long result;
 -
--	st.l	r6, TI_PRE_COUNT, ZERO
--	CLI()
+-	pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead));
+-	pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead));
 -
--	pta	need_resched, tr1
--	blink	tr1, ZERO
--#endif
+-	result = (__force u64) saddr + (__force u64) daddr +
+-		 (__force u64) sum + ((len + proto) << 8);
 -
--	.global ret_from_syscall
--ret_from_syscall:
+-	/* Fold down to 32-bits so we don't loose in the typedef-less
+-	   network stack.  */
+-	/* 64 to 33 */
+-	result = (result & 0xffffffff) + (result >> 32);
+-	/* 33 to 32 */
+-	result = (result & 0xffffffff) + (result >> 32);
 -
--ret_with_reschedule:
--	getcon	KCR0, r6		! r6 contains current_thread_info
--	ld.l	r6, TI_FLAGS, r7	! r7 contains current_thread_info->flags
+-	pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n",
+-		__FUNCTION__, saddr, daddr, len, proto, sum, result);
 -
--	! FIXME:!!!
--	! no handling of TIF_SYSCALL_TRACE yet!!
+-	return (__wsum)result;
+-}
+-EXPORT_SYMBOL(csum_tcpudp_nofold);
+diff --git a/arch/sh64/lib/copy_user_memcpy.S b/arch/sh64/lib/copy_user_memcpy.S
+deleted file mode 100644
+index 2a62816..0000000
+--- a/arch/sh64/lib/copy_user_memcpy.S
++++ /dev/null
+@@ -1,217 +0,0 @@
+-!
+-! Fast SH memcpy
+-!
+-! by Toshiyasu Morita (tm at netcom.com)
+-! hacked by J"orn Rernnecke (joern.rennecke at superh.com) ("o for o-umlaut)
+-! SH5 code Copyright 2002 SuperH Ltd.
+-!
+-! Entry: ARG0: destination pointer
+-!        ARG1: source pointer
+-!        ARG2: byte count
+-!
+-! Exit:  RESULT: destination pointer
+-!        any other registers in the range r0-r7: trashed
+-!
+-! Notes: Usually one wants to do small reads and write a longword, but
+-!        unfortunately it is difficult in some cases to concatanate bytes
+-!        into a longword on the SH, so this does a longword read and small
+-!        writes.
+-!
+-! This implementation makes two assumptions about how it is called:
+-!
+-! 1.: If the byte count is nonzero, the address of the last byte to be
+-!     copied is unsigned greater than the address of the first byte to
+-!     be copied.  This could be easily swapped for a signed comparison,
+-!     but the algorithm used needs some comparison.
+-!
+-! 2.: When there are two or three bytes in the last word of an 11-or-more
+-!     bytes memory chunk to b copied, the rest of the word can be read
+-!     without side effects.
+-!     This could be easily changed by increasing the minumum size of
+-!     a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
+-!     however, this would cost a few extra cyles on average.
+-!     For SHmedia, the assumption is that any quadword can be read in its
+-!     enirety if at least one byte is included in the copy.
 -
--	movi	_TIF_NEED_RESCHED, r8
--	and	r8, r7, r8
--	pta	work_resched, tr0
--	bne	r8, ZERO, tr0
+-/* Imported into Linux kernel by Richard Curnow.  This is used to implement the
+-   __copy_user function in the general case, so it has to be a distinct
+-   function from intra-kernel memcpy to allow for exception fix-ups in the
+-   event that the user pointer is bad somewhere in the copy (e.g. due to
+-   running off the end of the vma).
 -
--	pta	restore_all, tr1
+-   Note, this algorithm will be slightly wasteful in the case where the source
+-   and destination pointers are equally aligned, because the stlo/sthi pairs
+-   could then be merged back into single stores.  If there are a lot of cache
+-   misses, this is probably offset by the stall lengths on the preloads.
 -
--	movi	(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
--	and	r8, r7, r8
--	pta	work_notifysig, tr0
--	bne	r8, ZERO, tr0
+-*/
 -
--	blink	tr1, ZERO
+-/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
+- * erratum.  The first two prefetches are nop-ed out to avoid upsetting the
+- * instruction counts used in the jump address calculation.
+- * */
 -
--work_resched:
--	pta	ret_from_syscall, tr0
--	gettr	tr0, LINK
--	movi	schedule, r6
--	ptabs	r6, tr0
--	blink	tr0, ZERO		/* Call schedule(), return on top */
+-	.section .text..SHmedia32,"ax"
+-	.little
+-	.balign 32
+-	.global copy_user_memcpy
+-	.global copy_user_memcpy_end
+-copy_user_memcpy:
 -
--work_notifysig:
--	gettr	tr1, LINK
+-#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
+-#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
+-#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
+-#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
 -
--	movi	do_signal, r6
--	ptabs	r6, tr0
--	or	SP, ZERO, r2
--	or	ZERO, ZERO, r3
--	blink	tr0, LINK	    /* Call do_signal(regs, 0), return here */
+-	nop ! ld.b r3,0,r63 ! TAKum03020
+-	pta/l Large,tr0
+-	movi 25,r0
+-	bgeu/u r4,r0,tr0
+-	nsb r4,r0
+-	shlli r0,5,r0
+-	movi (L1-L0+63*32 + 1) & 0xffff,r1
+-	sub r1, r0, r0
+-L0:	ptrel r0,tr0
+-	add r2,r4,r5
+-	ptabs r18,tr1
+-	add r3,r4,r6
+-	blink tr0,r63
 -
--restore_all:
--	/* Do prefetches */
+-/* Rearranged to make cut2 safe */
+-	.balign 8
+-L4_7:	/* 4..7 byte memcpy cntd. */
+-	stlo.l r2, 0, r0
+-	or r6, r7, r6
+-	sthi.l r5, -1, r6
+-	stlo.l r5, -4, r6
+-	blink tr1,r63
 -
--	ld.q	SP, FRAME_T(0), r6
--	ld.q	SP, FRAME_T(1), r7
--	ld.q	SP, FRAME_T(2), r8
--	ld.q	SP, FRAME_T(3), r9
--	ptabs	r6, tr0
--	ptabs	r7, tr1
--	ptabs	r8, tr2
--	ptabs	r9, tr3
--	ld.q	SP, FRAME_T(4), r6
--	ld.q	SP, FRAME_T(5), r7
--	ld.q	SP, FRAME_T(6), r8
--	ld.q	SP, FRAME_T(7), r9
--	ptabs	r6, tr4
--	ptabs	r7, tr5
--	ptabs	r8, tr6
--	ptabs	r9, tr7
+-	.balign 8
+-L1:	/* 0 byte memcpy */
+-	nop
+-	blink tr1,r63
+-	nop
+-	nop
+-	nop
+-	nop
 -
--	ld.q	SP, FRAME_R(0), r0
--	ld.q	SP, FRAME_R(1), r1
--	ld.q	SP, FRAME_R(2), r2
--	ld.q	SP, FRAME_R(3), r3
--	ld.q	SP, FRAME_R(4), r4
--	ld.q	SP, FRAME_R(5), r5
--	ld.q	SP, FRAME_R(6), r6
--	ld.q	SP, FRAME_R(7), r7
--	ld.q	SP, FRAME_R(8), r8
--	ld.q	SP, FRAME_R(9), r9
--	ld.q	SP, FRAME_R(10), r10
--	ld.q	SP, FRAME_R(11), r11
--	ld.q	SP, FRAME_R(12), r12
--	ld.q	SP, FRAME_R(13), r13
--	ld.q	SP, FRAME_R(14), r14
+-L2_3:	/* 2 or 3 byte memcpy cntd. */
+-	st.b r5,-1,r6
+-	blink tr1,r63
 -
--	ld.q	SP, FRAME_R(16), r16
--	ld.q	SP, FRAME_R(17), r17
--	ld.q	SP, FRAME_R(18), r18
--	ld.q	SP, FRAME_R(19), r19
--	ld.q	SP, FRAME_R(20), r20
--	ld.q	SP, FRAME_R(21), r21
--	ld.q	SP, FRAME_R(22), r22
--	ld.q	SP, FRAME_R(23), r23
--	ld.q	SP, FRAME_R(24), r24
--	ld.q	SP, FRAME_R(25), r25
--	ld.q	SP, FRAME_R(26), r26
--	ld.q	SP, FRAME_R(27), r27
--	ld.q	SP, FRAME_R(28), r28
--	ld.q	SP, FRAME_R(29), r29
--	ld.q	SP, FRAME_R(30), r30
--	ld.q	SP, FRAME_R(31), r31
--	ld.q	SP, FRAME_R(32), r32
--	ld.q	SP, FRAME_R(33), r33
--	ld.q	SP, FRAME_R(34), r34
--	ld.q	SP, FRAME_R(35), r35
--	ld.q	SP, FRAME_R(36), r36
--	ld.q	SP, FRAME_R(37), r37
--	ld.q	SP, FRAME_R(38), r38
--	ld.q	SP, FRAME_R(39), r39
--	ld.q	SP, FRAME_R(40), r40
--	ld.q	SP, FRAME_R(41), r41
--	ld.q	SP, FRAME_R(42), r42
--	ld.q	SP, FRAME_R(43), r43
--	ld.q	SP, FRAME_R(44), r44
--	ld.q	SP, FRAME_R(45), r45
--	ld.q	SP, FRAME_R(46), r46
--	ld.q	SP, FRAME_R(47), r47
--	ld.q	SP, FRAME_R(48), r48
--	ld.q	SP, FRAME_R(49), r49
--	ld.q	SP, FRAME_R(50), r50
--	ld.q	SP, FRAME_R(51), r51
--	ld.q	SP, FRAME_R(52), r52
--	ld.q	SP, FRAME_R(53), r53
--	ld.q	SP, FRAME_R(54), r54
--	ld.q	SP, FRAME_R(55), r55
--	ld.q	SP, FRAME_R(56), r56
--	ld.q	SP, FRAME_R(57), r57
--	ld.q	SP, FRAME_R(58), r58
+-	/* 1 byte memcpy */
+-	ld.b r3,0,r0
+-	st.b r2,0,r0
+-	blink tr1,r63
 -
--	getcon	SR, r59
--	movi	SR_BLOCK_EXC, r60
--	or	r59, r60, r59
--	putcon	r59, SR			/* SR.BL = 1, keep nesting out */
--	ld.q	SP, FRAME_S(FSSR), r61
--	ld.q	SP, FRAME_S(FSPC), r62
--	movi	SR_ASID_MASK, r60
--	and	r59, r60, r59
--	andc	r61, r60, r61		/* Clear out older ASID */
--	or	r59, r61, r61		/* Retain current ASID */
--	putcon	r61, SSR
--	putcon	r62, SPC
+-L8_15:	/* 8..15 byte memcpy cntd. */
+-	stlo.q r2, 0, r0
+-	or r6, r7, r6
+-	sthi.q r5, -1, r6
+-	stlo.q r5, -8, r6
+-	blink tr1,r63
 -
--	/* Ignore FSYSCALL_ID */
+-	/* 2 or 3 byte memcpy */
+-	ld.b r3,0,r0
+-	nop ! ld.b r2,0,r63 ! TAKum03020
+-	ld.b r3,1,r1
+-	st.b r2,0,r0
+-	pta/l L2_3,tr0
+-	ld.b r6,-1,r6
+-	st.b r2,1,r1
+-	blink tr0, r63
 -
--	ld.q	SP, FRAME_R(59), r59
--	ld.q	SP, FRAME_R(60), r60
--	ld.q	SP, FRAME_R(61), r61
--	ld.q	SP, FRAME_R(62), r62
+-	/* 4 .. 7 byte memcpy */
+-	LDUAL (r3, 0, r0, r1)
+-	pta L4_7, tr0
+-	ldlo.l r6, -4, r7
+-	or r0, r1, r0
+-	sthi.l r2, 3, r0
+-	ldhi.l r6, -1, r6
+-	blink tr0, r63
 -
--	/* Last touch */
--	ld.q	SP, FRAME_R(15), SP
--	rte
--	nop
+-	/* 8 .. 15 byte memcpy */
+-	LDUAQ (r3, 0, r0, r1)
+-	pta L8_15, tr0
+-	ldlo.q r6, -8, r7
+-	or r0, r1, r0
+-	sthi.q r2, 7, r0
+-	ldhi.q r6, -1, r6
+-	blink tr0, r63
 -
--/*
-- * Third level handlers for VBR-based exceptions. Adapting args to
-- * and/or deflecting to fourth level handlers.
-- *
-- * Fourth level handlers interface.
-- * Most are C-coded handlers directly pointed by the trap_jtable.
-- * (Third = Fourth level)
-- * Inputs:
-- * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
-- *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
-- * (r3)   struct pt_regs *, original register's frame pointer
-- * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
-- * (r5)   TRA control register (for syscall/debug benefit only)
-- * (LINK) return address
-- * (SP)   = r3
-- *
-- * Kernel TLB fault handlers will get a slightly different interface.
-- * (r2)   struct pt_regs *, original register's frame pointer
-- * (r3)   writeaccess, whether it's a store fault as opposed to load fault
-- * (r4)   execaccess, whether it's a ITLB fault as opposed to DTLB fault
-- * (r5)   Effective Address of fault
-- * (LINK) return address
-- * (SP)   = r2
-- *
-- * fpu_error_or_IRQ? is a helper to deflect to the right cause.
-- *
-- */
--tlb_miss_load:
--	or	SP, ZERO, r2
--	or	ZERO, ZERO, r3		/* Read */
--	or	ZERO, ZERO, r4		/* Data */
--	getcon	TEA, r5
--	pta	call_do_page_fault, tr0
--	beq	ZERO, ZERO, tr0
+-	/* 16 .. 24 byte memcpy */
+-	LDUAQ (r3, 0, r0, r1)
+-	LDUAQ (r3, 8, r8, r9)
+-	or r0, r1, r0
+-	sthi.q r2, 7, r0
+-	or r8, r9, r8
+-	sthi.q r2, 15, r8
+-	ldlo.q r6, -8, r7
+-	ldhi.q r6, -1, r6
+-	stlo.q r2, 8, r8
+-	stlo.q r2, 0, r0
+-	or r6, r7, r6
+-	sthi.q r5, -1, r6
+-	stlo.q r5, -8, r6
+-	blink tr1,r63
 -
--tlb_miss_store:
--	or	SP, ZERO, r2
--	movi	1, r3			/* Write */
--	or	ZERO, ZERO, r4		/* Data */
--	getcon	TEA, r5
--	pta	call_do_page_fault, tr0
--	beq	ZERO, ZERO, tr0
+-Large:
+-	! ld.b r2, 0, r63 ! TAKum03020
+-	pta/l  Loop_ua, tr1
+-	ori r3, -8, r7
+-	sub r2, r7, r22
+-	sub r3, r2, r6
+-	add r2, r4, r5
+-	ldlo.q r3, 0, r0
+-	addi r5, -16, r5
+-	movi 64+8, r27 ! could subtract r7 from that.
+-	stlo.q r2, 0, r0
+-	sthi.q r2, 7, r0
+-	ldx.q r22, r6, r0
+-	bgtu/l r27, r4, tr1
 -
--itlb_miss_or_IRQ:
--	pta	its_IRQ, tr0
--	beqi/u	r4, EVENT_INTERRUPT, tr0
--	or	SP, ZERO, r2
--	or	ZERO, ZERO, r3		/* Read */
--	movi	1, r4			/* Text */
--	getcon	TEA, r5
--	/* Fall through */
+-	addi r5, -48, r27
+-	pta/l Loop_line, tr0
+-	addi r6, 64, r36
+-	addi r6, -24, r19
+-	addi r6, -16, r20
+-	addi r6, -8, r21
 -
--call_do_page_fault:
--	movi	do_page_fault, r6
--        ptabs	r6, tr0
--        blink	tr0, ZERO
+-Loop_line:
+-	! ldx.q r22, r36, r63 ! TAKum03020
+-	alloco r22, 32
+-	synco
+-	addi r22, 32, r22
+-	ldx.q r22, r19, r23
+-	sthi.q r22, -25, r0
+-	ldx.q r22, r20, r24
+-	ldx.q r22, r21, r25
+-	stlo.q r22, -32, r0
+-	ldx.q r22, r6,  r0
+-	sthi.q r22, -17, r23
+-	sthi.q r22,  -9, r24
+-	sthi.q r22,  -1, r25
+-	stlo.q r22, -24, r23
+-	stlo.q r22, -16, r24
+-	stlo.q r22,  -8, r25
+-	bgeu r27, r22, tr0
 -
--fpu_error_or_IRQA:
--	pta	its_IRQ, tr0
--	beqi/l	r4, EVENT_INTERRUPT, tr0
--#ifdef CONFIG_SH_FPU
--	movi	do_fpu_state_restore, r6
--#else
--	movi	do_exception_error, r6
--#endif
--	ptabs	r6, tr0
--	blink	tr0, ZERO
+-Loop_ua:
+-	addi r22, 8, r22
+-	sthi.q r22, -1, r0
+-	stlo.q r22, -8, r0
+-	ldx.q r22, r6, r0
+-	bgtu/l r5, r22, tr1
 -
--fpu_error_or_IRQB:
--	pta	its_IRQ, tr0
--	beqi/l	r4, EVENT_INTERRUPT, tr0
--#ifdef CONFIG_SH_FPU
--	movi	do_fpu_state_restore, r6
--#else
--	movi	do_exception_error, r6
--#endif
--	ptabs	r6, tr0
--	blink	tr0, ZERO
+-	add r3, r4, r7
+-	ldlo.q r7, -8, r1
+-	sthi.q r22, 7, r0
+-	ldhi.q r7, -1, r7
+-	ptabs r18,tr1
+-	stlo.q r22, 0, r0
+-	or r1, r7, r1
+-	sthi.q r5, 15, r1
+-	stlo.q r5, 8, r1
+-	blink tr1, r63
+-copy_user_memcpy_end:
+-	nop
+diff --git a/arch/sh64/lib/dbg.c b/arch/sh64/lib/dbg.c
+deleted file mode 100644
+index 97816e0..0000000
+--- a/arch/sh64/lib/dbg.c
++++ /dev/null
+@@ -1,430 +0,0 @@
+-/*--------------------------------------------------------------------------
+---
+--- Identity : Linux50 Debug Funcions
+---
+--- File     : arch/sh64/lib/dbg.C
+---
+--- Copyright 2000, 2001 STMicroelectronics Limited.
+--- Copyright 2004 Richard Curnow (evt_debug etc)
+---
+---------------------------------------------------------------------------*/
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/fs.h>
+-#include <asm/mmu_context.h>
 -
--its_IRQ:
--	movi	do_IRQ, r6
--	ptabs	r6, tr0
--	blink	tr0, ZERO
+-typedef u64 regType_t;
 -
--/*
-- * system_call/unknown_trap third level handler:
-- *
-- * Inputs:
-- * (r2)   fault/interrupt code, entry number (TRAP = 11)
-- * (r3)   struct pt_regs *, original register's frame pointer
-- * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
-- * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
-- * (SP)   = r3
-- * (LINK) return address: ret_from_exception
-- * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
-- *
-- * Outputs:
-- * (*r3)  Syscall reply (Saved r2)
-- * (LINK) In case of syscall only it can be scrapped.
-- *        Common second level post handler will be ret_from_syscall.
-- *        Common (non-trace) exit point to that is syscall_ret (saving
-- *        result to r2). Common bad exit point is syscall_bad (returning
-- *        ENOSYS then saved to r2).
-- *
-- */
+-static regType_t getConfigReg(u64 id)
+-{
+-	register u64 reg __asm__("r2");
+-	asm volatile ("getcfg   %1, 0, %0":"=r" (reg):"r"(id));
+-	return (reg);
+-}
 -
--unknown_trap:
--	/* Unknown Trap or User Trace */
--	movi	do_unknown_trapa, r6
--	ptabs	r6, tr0
--        ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */
--        andi    r2, 0x1ff, r2		/* r2 = syscall # */
--	blink	tr0, LINK
+-/* ======================================================================= */
 -
--	pta	syscall_ret, tr0
--	blink	tr0, ZERO
+-static char *szTab[] = { "4k", "64k", "1M", "512M" };
+-static char *protTab[] = { "----",
+-	"---R",
+-	"--X-",
+-	"--XR",
+-	"-W--",
+-	"-W-R",
+-	"-WX-",
+-	"-WXR",
+-	"U---",
+-	"U--R",
+-	"U-X-",
+-	"U-XR",
+-	"UW--",
+-	"UW-R",
+-	"UWX-",
+-	"UWXR"
+-};
+-#define  ITLB_BASE	0x00000000
+-#define  DTLB_BASE	0x00800000
+-#define  MAX_TLBs		64
+-/* PTE High */
+-#define  GET_VALID(pte)        ((pte) & 0x1)
+-#define  GET_SHARED(pte)       ((pte) & 0x2)
+-#define  GET_ASID(pte)         ((pte >> 2) & 0x0ff)
+-#define  GET_EPN(pte)          ((pte) & 0xfffff000)
 -
--        /* New syscall implementation*/
--system_call:
--	pta	unknown_trap, tr0
--        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
--        shlri   r4, 20, r4
--	bnei	r4, 1, tr0		/* unknown_trap if not 0x1yzzzz */
+-/* PTE Low */
+-#define  GET_CBEHAVIOR(pte)    ((pte) & 0x3)
+-#define  GET_PAGE_SIZE(pte)    szTab[((pte >> 3) & 0x3)]
+-#define  GET_PROTECTION(pte)   protTab[((pte >> 6) & 0xf)]
+-#define  GET_PPN(pte)          ((pte) & 0xfffff000)
 -
--        /* It's a system call */
--	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */
--	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */
+-#define PAGE_1K_MASK           0x00000000
+-#define PAGE_4K_MASK           0x00000010
+-#define PAGE_64K_MASK          0x00000080
+-#define MMU_PAGESIZE_MASK      (PAGE_64K_MASK | PAGE_4K_MASK)
+-#define PAGE_1MB_MASK          MMU_PAGESIZE_MASK
+-#define PAGE_1K                (1024)
+-#define PAGE_4K                (1024 * 4)
+-#define PAGE_64K               (1024 * 64)
+-#define PAGE_1MB               (1024 * 1024)
 -
--	STI()
+-#define HOW_TO_READ_TLB_CONTENT  \
+-       "[ ID]  PPN         EPN        ASID  Share  CB  P.Size   PROT.\n"
 -
--	pta	syscall_allowed, tr0
--	movi	NR_syscalls - 1, r4	/* Last valid */
--	bgeu/l	r4, r5, tr0
+-void print_single_tlb(unsigned long tlb, int single_print)
+-{
+-	regType_t pteH;
+-	regType_t pteL;
+-	unsigned int valid, shared, asid, epn, cb, ppn;
+-	char *pSize;
+-	char *pProt;
 -
--syscall_bad:
--	/* Return ENOSYS ! */
--	movi	-(ENOSYS), r2		/* Fall-through */
+-	/*
+-	   ** in case of single print <single_print> is true, this implies:
+-	   **   1) print the TLB in any case also if NOT VALID
+-	   **   2) print out the header
+-	 */
 -
--	.global syscall_ret
--syscall_ret:
--	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */
+-	pteH = getConfigReg(tlb);
+-	valid = GET_VALID(pteH);
+-	if (single_print)
+-		printk(HOW_TO_READ_TLB_CONTENT);
+-	else if (!valid)
+-		return;
 -
--#ifdef CONFIG_POOR_MANS_STRACE
--	/* nothing useful in registers at this point */
+-	pteL = getConfigReg(tlb + 1);
 -
--	movi	evt_debug2, r5
--	ori	r5, 1, r5
--	ptabs	r5, tr0
--	ld.q	SP, FRAME_R(9), r2
--	or	SP, ZERO, r3
--	blink	tr0, LINK
--#endif
+-	shared = GET_SHARED(pteH);
+-	asid = GET_ASID(pteH);
+-	epn = GET_EPN(pteH);
+-	cb = GET_CBEHAVIOR(pteL);
+-	pSize = GET_PAGE_SIZE(pteL);
+-	pProt = GET_PROTECTION(pteL);
+-	ppn = GET_PPN(pteL);
+-	printk("[%c%2ld]  0x%08x  0x%08x  %03d   %02x    %02x   %4s    %s\n",
+-	       ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP),
+-	       ppn, epn, asid, shared, cb, pSize, pProt);
+-}
 -
--	ld.q	SP, FRAME_S(FSPC), r2
--	addi	r2, 4, r2		/* Move PC, being pre-execution event */
--	st.q	SP, FRAME_S(FSPC), r2
--	pta	ret_from_syscall, tr0
--	blink	tr0, ZERO
+-void print_dtlb(void)
+-{
+-	int count;
+-	unsigned long tlb;
 -
+-	printk(" ================= SH-5 D-TLBs Status ===================\n");
+-	printk(HOW_TO_READ_TLB_CONTENT);
+-	tlb = DTLB_BASE;
+-	for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
+-		print_single_tlb(tlb, 0);
+-	printk
+-	    (" =============================================================\n");
+-}
 -
--/*  A different return path for ret_from_fork, because we now need
-- *  to call schedule_tail with the later kernels. Because prev is
-- *  loaded into r2 by switch_to() means we can just call it straight  away
-- */
+-void print_itlb(void)
+-{
+-	int count;
+-	unsigned long tlb;
 -
--.global	ret_from_fork
--ret_from_fork:
+-	printk(" ================= SH-5 I-TLBs Status ===================\n");
+-	printk(HOW_TO_READ_TLB_CONTENT);
+-	tlb = ITLB_BASE;
+-	for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
+-		print_single_tlb(tlb, 0);
+-	printk
+-	    (" =============================================================\n");
+-}
 -
--	movi	schedule_tail,r5
--	ori	r5, 1, r5
--	ptabs	r5, tr0
--	blink	tr0, LINK
+-/* ======================================================================= */
 -
 -#ifdef CONFIG_POOR_MANS_STRACE
--	/* nothing useful in registers at this point */
 -
--	movi	evt_debug2, r5
--	ori	r5, 1, r5
--	ptabs	r5, tr0
--	ld.q	SP, FRAME_R(9), r2
--	or	SP, ZERO, r3
--	blink	tr0, LINK
--#endif
+-#include "syscalltab.h"
 -
--	ld.q	SP, FRAME_S(FSPC), r2
--	addi	r2, 4, r2		/* Move PC, being pre-execution event */
--	st.q	SP, FRAME_S(FSPC), r2
--	pta	ret_from_syscall, tr0
--	blink	tr0, ZERO
+-struct ring_node {
+-	int evt;
+-	int ret_addr;
+-	int event;
+-	int tra;
+-	int pid;
+-	unsigned long sp;
+-	unsigned long pc;
+-};
 -
+-static struct ring_node event_ring[16];
+-static int event_ptr = 0;
 -
+-struct stored_syscall_data {
+-	int pid;
+-	int syscall_number;
+-};
 -
--syscall_allowed:
--	/* Use LINK to deflect the exit point, default is syscall_ret */
--	pta	syscall_ret, tr0
--	gettr	tr0, LINK
--	pta	syscall_notrace, tr0
+-#define N_STORED_SYSCALLS 16
 -
--	getcon	KCR0, r2
--	ld.l	r2, TI_FLAGS, r4
--	movi	(1 << TIF_SYSCALL_TRACE), r6
--	and	r6, r4, r6
--	beq/l	r6, ZERO, tr0
+-static struct stored_syscall_data stored_syscalls[N_STORED_SYSCALLS];
+-static int syscall_next=0;
+-static int syscall_next_print=0;
 -
--	/* Trace it by calling syscall_trace before and after */
--	movi	syscall_trace, r4
--	ptabs	r4, tr0
--	blink	tr0, LINK
--	/* Reload syscall number as r5 is trashed by syscall_trace */
--	ld.q	SP, FRAME_S(FSYSCALL_ID), r5
--	andi	r5, 0x1ff, r5
+-void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs)
+-{
+-	int syscallno = tra & 0xff;
+-	unsigned long sp;
+-	unsigned long stack_bottom;
+-	int pid;
+-	struct ring_node *rr;
 -
--	pta	syscall_ret_trace, tr0
--	gettr	tr0, LINK
+-	pid = current->pid;
+-	stack_bottom = (unsigned long) task_stack_page(current);
+-	asm volatile("ori r15, 0, %0" : "=r" (sp));
+-	rr = event_ring + event_ptr;
+-	rr->evt = evt;
+-	rr->ret_addr = ret_addr;
+-	rr->event = event;
+-	rr->tra = tra;
+-	rr->pid = pid;
+-	rr->sp = sp;
+-	rr->pc = regs->pc;
 -
--syscall_notrace:
--	/* Now point to the appropriate 4th level syscall handler */
--	movi	sys_call_table, r4
--	shlli	r5, 2, r5
--	ldx.l	r4, r5, r5
--	ptabs	r5, tr0
+-	if (sp < stack_bottom + 3092) {
+-		printk("evt_debug : stack underflow report\n");
+-		int i, j;
+-		for (j=0, i = event_ptr; j<16; j++) {
+-			rr = event_ring + i;
+-			printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n",
+-				rr->evt, rr->event, rr->tra, rr->pid, rr->sp, rr->pc);
+-			i--;
+-			i &= 15;
+-		}
+-		panic("STACK UNDERFLOW\n");
+-	}
 -
--	/* Prepare original args */
--	ld.q	SP, FRAME_R(2), r2
--	ld.q	SP, FRAME_R(3), r3
--	ld.q	SP, FRAME_R(4), r4
--	ld.q	SP, FRAME_R(5), r5
--	ld.q	SP, FRAME_R(6), r6
--	ld.q	SP, FRAME_R(7), r7
+-	event_ptr = (event_ptr + 1) & 15;
 -
--	/* And now the trick for those syscalls requiring regs * ! */
--	or	SP, ZERO, r8
+-	if ((event == 2) && (evt == 0x160)) {
+-		if (syscallno < NUM_SYSCALL_INFO_ENTRIES) {
+-			/* Store the syscall information to print later.  We
+-			 * can't print this now - currently we're running with
+-			 * SR.BL=1, so we can't take a tlbmiss (which could occur
+-			 * in the console drivers under printk).
+-			 *
+-			 * Just overwrite old entries on ring overflow - this
+-			 * is only for last-hope debugging. */
+-			stored_syscalls[syscall_next].pid = current->pid;
+-			stored_syscalls[syscall_next].syscall_number = syscallno;
+-			syscall_next++;
+-			syscall_next &= (N_STORED_SYSCALLS - 1);
+-		}
+-	}
+-}
 -
--	/* Call it */
--	blink	tr0, ZERO	/* LINK is already properly set */
+-static void drain_syscalls(void) {
+-	while (syscall_next_print != syscall_next) {
+-		printk("Task %d: %s()\n",
+-			stored_syscalls[syscall_next_print].pid,
+-			syscall_info_table[stored_syscalls[syscall_next_print].syscall_number].name);
+-			syscall_next_print++;
+-			syscall_next_print &= (N_STORED_SYSCALLS - 1);
+-	}
+-}
 -
--syscall_ret_trace:
--	/* We get back here only if under trace */
--	st.q	SP, FRAME_R(9), r2	/* Save return value */
+-void evt_debug2(unsigned int ret)
+-{
+-	drain_syscalls();
+-	printk("Task %d: syscall returns %08x\n", current->pid, ret);
+-}
 -
--	movi	syscall_trace, LINK
--	ptabs	LINK, tr0
--	blink	tr0, LINK
+-void evt_debug_ret_from_irq(struct pt_regs *regs)
+-{
+-	int pid;
+-	struct ring_node *rr;
 -
--	/* This needs to be done after any syscall tracing */
--	ld.q	SP, FRAME_S(FSPC), r2
--	addi	r2, 4, r2	/* Move PC, being pre-execution event */
--	st.q	SP, FRAME_S(FSPC), r2
+-	pid = current->pid;
+-	rr = event_ring + event_ptr;
+-	rr->evt = 0xffff;
+-	rr->ret_addr = 0;
+-	rr->event = 0;
+-	rr->tra = 0;
+-	rr->pid = pid;
+-	rr->pc = regs->pc;
+-	event_ptr = (event_ptr + 1) & 15;
+-}
 -
--	pta	ret_from_syscall, tr0
--	blink	tr0, ZERO		/* Resume normal return sequence */
+-void evt_debug_ret_from_exc(struct pt_regs *regs)
+-{
+-	int pid;
+-	struct ring_node *rr;
 -
--/*
-- * --- Switch to running under a particular ASID and return the previous ASID value
-- * --- The caller is assumed to have done a cli before calling this.
-- *
-- * Input r2 : new ASID
-- * Output r2 : old ASID
-- */
+-	pid = current->pid;
+-	rr = event_ring + event_ptr;
+-	rr->evt = 0xfffe;
+-	rr->ret_addr = 0;
+-	rr->event = 0;
+-	rr->tra = 0;
+-	rr->pid = pid;
+-	rr->pc = regs->pc;
+-	event_ptr = (event_ptr + 1) & 15;
+-}
 -
--	.global switch_and_save_asid
--switch_and_save_asid:
--	getcon	sr, r0
--	movi	255, r4
--	shlli 	r4, 16, r4	/* r4 = mask to select ASID */
--	and	r0, r4, r3	/* r3 = shifted old ASID */
--	andi	r2, 255, r2	/* mask down new ASID */
--	shlli	r2, 16, r2	/* align new ASID against SR.ASID */
--	andc	r0, r4, r0	/* efface old ASID from SR */
--	or	r0, r2, r0	/* insert the new ASID */
--	putcon	r0, ssr
--	movi	1f, r0
--	putcon	r0, spc
--	rte
--	nop
--1:
--	ptabs	LINK, tr0
--	shlri	r3, 16, r2	/* r2 = old ASID */
--	blink tr0, r63
+-#endif /* CONFIG_POOR_MANS_STRACE */
 -
--	.global	route_to_panic_handler
--route_to_panic_handler:
--	/* Switch to real mode, goto panic_handler, don't return.  Useful for
--	   last-chance debugging, e.g. if no output wants to go to the console.
--	   */
+-/* ======================================================================= */
 -
--	movi	panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1
--	ptabs	r1, tr0
--	pta	1f, tr1
--	gettr	tr1, r0
--	putcon	r0, spc
--	getcon	sr, r0
--	movi	1, r1
--	shlli	r1, 31, r1
--	andc	r0, r1, r0
--	putcon	r0, ssr
--	rte
--	nop
--1:	/* Now in real mode */
--	blink tr0, r63
--	nop
+-void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs)
+-{
 -
--	.global peek_real_address_q
--peek_real_address_q:
--	/* Two args:
--	   r2 : real mode address to peek
--	   r2(out) : result quadword
+-	unsigned long long ah, al, bh, bl, ch, cl;
 -
--	   This is provided as a cheapskate way of manipulating device
--	   registers for debugging (to avoid the need to onchip_remap the debug
--	   module, and to avoid the need to onchip_remap the watchpoint
--	   controller in a way that identity maps sufficient bits to avoid the
--	   SH5-101 cut2 silicon defect).
+-	printk("\n");
+-	printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n",
+-	       ((from) ? from : "???"), current->pid, trapnr, signr);
 -
--	   This code is not performance critical
--	*/
+-	asm volatile ("getcon   " __EXPEVT ", %0":"=r"(ah));
+-	asm volatile ("getcon   " __EXPEVT ", %0":"=r"(al));
+-	ah = (ah) >> 32;
+-	al = (al) & 0xffffffff;
+-	asm volatile ("getcon   " __KCR1 ", %0":"=r"(bh));
+-	asm volatile ("getcon   " __KCR1 ", %0":"=r"(bl));
+-	bh = (bh) >> 32;
+-	bl = (bl) & 0xffffffff;
+-	asm volatile ("getcon   " __INTEVT ", %0":"=r"(ch));
+-	asm volatile ("getcon   " __INTEVT ", %0":"=r"(cl));
+-	ch = (ch) >> 32;
+-	cl = (cl) & 0xffffffff;
+-	printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
 -
--	add.l	r2, r63, r2	/* sign extend address */
--	getcon	sr, r0		/* r0 = saved original SR */
--	movi	1, r1
--	shlli	r1, 28, r1
--	or	r0, r1, r1	/* r0 with block bit set */
--	putcon	r1, sr		/* now in critical section */
--	movi	1, r36
--	shlli	r36, 31, r36
--	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
+-	asm volatile ("getcon   " __PEXPEVT ", %0":"=r"(ah));
+-	asm volatile ("getcon   " __PEXPEVT ", %0":"=r"(al));
+-	ah = (ah) >> 32;
+-	al = (al) & 0xffffffff;
+-	asm volatile ("getcon   " __PSPC ", %0":"=r"(bh));
+-	asm volatile ("getcon   " __PSPC ", %0":"=r"(bl));
+-	bh = (bh) >> 32;
+-	bl = (bl) & 0xffffffff;
+-	asm volatile ("getcon   " __PSSR ", %0":"=r"(ch));
+-	asm volatile ("getcon   " __PSSR ", %0":"=r"(cl));
+-	ch = (ch) >> 32;
+-	cl = (cl) & 0xffffffff;
+-	printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
 -
--	putcon	r1, ssr
--	movi	.peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
--	movi	1f, r37		/* virtual mode return addr */
--	putcon	r36, spc
+-	ah = (regs->pc) >> 32;
+-	al = (regs->pc) & 0xffffffff;
+-	bh = (regs->regs[18]) >> 32;
+-	bl = (regs->regs[18]) & 0xffffffff;
+-	ch = (regs->regs[15]) >> 32;
+-	cl = (regs->regs[15]) & 0xffffffff;
+-	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
 -
--	synco
--	rte
--	nop
+-	ah = (regs->sr) >> 32;
+-	al = (regs->sr) & 0xffffffff;
+-	asm volatile ("getcon   " __TEA ", %0":"=r"(bh));
+-	asm volatile ("getcon   " __TEA ", %0":"=r"(bl));
+-	bh = (bh) >> 32;
+-	bl = (bl) & 0xffffffff;
+-	asm volatile ("getcon   " __KCR0 ", %0":"=r"(ch));
+-	asm volatile ("getcon   " __KCR0 ", %0":"=r"(cl));
+-	ch = (ch) >> 32;
+-	cl = (cl) & 0xffffffff;
+-	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
 -
--.peek0:	/* come here in real mode, don't touch caches!!
--           still in critical section (sr.bl==1) */
--	putcon	r0, ssr
--	putcon	r37, spc
--	/* Here's the actual peek.  If the address is bad, all bets are now off
--	 * what will happen (handlers invoked in real-mode = bad news) */
--	ld.q	r2, 0, r2
--	synco
--	rte	/* Back to virtual mode */
--	nop
+-	ah = (regs->regs[0]) >> 32;
+-	al = (regs->regs[0]) & 0xffffffff;
+-	bh = (regs->regs[1]) >> 32;
+-	bl = (regs->regs[1]) & 0xffffffff;
+-	ch = (regs->regs[2]) >> 32;
+-	cl = (regs->regs[2]) & 0xffffffff;
+-	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
 -
--1:
--	ptabs	LINK, tr0
--	blink	tr0, r63
+-	ah = (regs->regs[3]) >> 32;
+-	al = (regs->regs[3]) & 0xffffffff;
+-	bh = (regs->regs[4]) >> 32;
+-	bl = (regs->regs[4]) & 0xffffffff;
+-	ch = (regs->regs[5]) >> 32;
+-	cl = (regs->regs[5]) & 0xffffffff;
+-	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
 -
--	.global poke_real_address_q
--poke_real_address_q:
--	/* Two args:
--	   r2 : real mode address to poke
--	   r3 : quadword value to write.
+-	ah = (regs->regs[6]) >> 32;
+-	al = (regs->regs[6]) & 0xffffffff;
+-	bh = (regs->regs[7]) >> 32;
+-	bl = (regs->regs[7]) & 0xffffffff;
+-	ch = (regs->regs[8]) >> 32;
+-	cl = (regs->regs[8]) & 0xffffffff;
+-	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
 -
--	   This is provided as a cheapskate way of manipulating device
--	   registers for debugging (to avoid the need to onchip_remap the debug
--	   module, and to avoid the need to onchip_remap the watchpoint
--	   controller in a way that identity maps sufficient bits to avoid the
--	   SH5-101 cut2 silicon defect).
+-	ah = (regs->regs[9]) >> 32;
+-	al = (regs->regs[9]) & 0xffffffff;
+-	bh = (regs->regs[10]) >> 32;
+-	bl = (regs->regs[10]) & 0xffffffff;
+-	ch = (regs->regs[11]) >> 32;
+-	cl = (regs->regs[11]) & 0xffffffff;
+-	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-	printk("....\n");
 -
--	   This code is not performance critical
--	*/
+-	ah = (regs->tregs[0]) >> 32;
+-	al = (regs->tregs[0]) & 0xffffffff;
+-	bh = (regs->tregs[1]) >> 32;
+-	bl = (regs->tregs[1]) & 0xffffffff;
+-	ch = (regs->tregs[2]) >> 32;
+-	cl = (regs->tregs[2]) & 0xffffffff;
+-	printk("T0  : %08Lx%08Lx T1  : %08Lx%08Lx T2  : %08Lx%08Lx\n",
+-	       ah, al, bh, bl, ch, cl);
+-	printk("....\n");
 -
--	add.l	r2, r63, r2	/* sign extend address */
--	getcon	sr, r0		/* r0 = saved original SR */
--	movi	1, r1
--	shlli	r1, 28, r1
--	or	r0, r1, r1	/* r0 with block bit set */
--	putcon	r1, sr		/* now in critical section */
--	movi	1, r36
--	shlli	r36, 31, r36
--	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
+-	print_dtlb();
+-	print_itlb();
+-}
 -
--	putcon	r1, ssr
--	movi	.poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
--	movi	1f, r37		/* virtual mode return addr */
--	putcon	r36, spc
+-/* ======================================================================= */
 -
--	synco
--	rte
--	nop
+-/*
+-** Depending on <base> scan the MMU, Data or Instruction side
+-** looking for a valid mapping matching Eaddr & asid.
+-** Return -1 if not found or the TLB id entry otherwise.
+-** Note: it works only for 4k pages!
+-*/
+-static unsigned long
+-lookup_mmu_side(unsigned long base, unsigned long Eaddr, unsigned long asid)
+-{
+-	regType_t pteH;
+-	unsigned long epn;
+-	int count;
 -
--.poke0:	/* come here in real mode, don't touch caches!!
--           still in critical section (sr.bl==1) */
--	putcon	r0, ssr
--	putcon	r37, spc
--	/* Here's the actual poke.  If the address is bad, all bets are now off
--	 * what will happen (handlers invoked in real-mode = bad news) */
--	st.q	r2, 0, r3
--	synco
--	rte	/* Back to virtual mode */
--	nop
+-	epn = Eaddr & 0xfffff000;
 -
--1:
--	ptabs	LINK, tr0
--	blink	tr0, r63
+-	for (count = 0; count < MAX_TLBs; count++, base += TLB_STEP) {
+-		pteH = getConfigReg(base);
+-		if (GET_VALID(pteH))
+-			if ((unsigned long) GET_EPN(pteH) == epn)
+-				if ((unsigned long) GET_ASID(pteH) == asid)
+-					break;
+-	}
+-	return ((unsigned long) ((count < MAX_TLBs) ? base : -1));
+-}
 -
--/*
-- * --- User Access Handling Section
-- */
+-unsigned long lookup_dtlb(unsigned long Eaddr)
+-{
+-	unsigned long asid = get_asid();
+-	return (lookup_mmu_side((u64) DTLB_BASE, Eaddr, asid));
+-}
+-
+-unsigned long lookup_itlb(unsigned long Eaddr)
+-{
+-	unsigned long asid = get_asid();
+-	return (lookup_mmu_side((u64) ITLB_BASE, Eaddr, asid));
+-}
+-
+-void print_page(struct page *page)
+-{
+-	printk("  page[%p] -> index 0x%lx,  count 0x%x,  flags 0x%lx\n",
+-	       page, page->index, page_count(page), page->flags);
+-	printk("       address_space = %p, pages =%ld\n", page->mapping,
+-	       page->mapping->nrpages);
 -
+-}
+diff --git a/arch/sh64/lib/io.c b/arch/sh64/lib/io.c
+deleted file mode 100644
+index a3f3a2b..0000000
+--- a/arch/sh64/lib/io.c
++++ /dev/null
+@@ -1,128 +0,0 @@
 -/*
-- * User Access support. It all moved to non inlined Assembler
-- * functions in here.
-- *
-- * __kernel_size_t __copy_user(void *__to, const void *__from,
-- *			       __kernel_size_t __n)
-- *
-- * Inputs:
-- * (r2)  target address
-- * (r3)  source address
-- * (r4)  size in bytes
-- *
-- * Ouputs:
-- * (*r2) target data
-- * (r2)  non-copied bytes
+- * Copyright (C) 2000 David J. Mckay (david.mckay at st.com)
 - *
-- * If a fault occurs on the user pointer, bail out early and return the
-- * number of bytes not copied in r2.
-- * Strategy : for large blocks, call a real memcpy function which can
-- * move >1 byte at a time using unaligned ld/st instructions, and can
-- * manipulate the cache using prefetch + alloco to improve the speed
-- * further.  If a fault occurs in that function, just revert to the
-- * byte-by-byte approach used for small blocks; this is rare so the
-- * performance hit for that case does not matter.
+- * May be copied or modified under the terms of the GNU General Public
+- * License.  See linux/COPYING for more information.
 - *
-- * For small blocks it's not worth the overhead of setting up and calling
-- * the memcpy routine; do the copy a byte at a time.
+- * This file contains the I/O routines for use on the overdrive board
 - *
 - */
--	.global	__copy_user
--__copy_user:
--	pta	__copy_user_byte_by_byte, tr1
--	movi	16, r0 ! this value is a best guess, should tune it by benchmarking
--	bge/u	r0, r4, tr1
--	pta copy_user_memcpy, tr0
--	addi	SP, -32, SP
--	/* Save arguments in case we have to fix-up unhandled page fault */
--	st.q	SP, 0, r2
--	st.q	SP, 8, r3
--	st.q	SP, 16, r4
--	st.q	SP, 24, r35 ! r35 is callee-save
--	/* Save LINK in a register to reduce RTS time later (otherwise
--	   ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
--	ori	LINK, 0, r35
--	blink	tr0, LINK
--
--	/* Copy completed normally if we get back here */
--	ptabs	r35, tr0
--	ld.q	SP, 24, r35
--	/* don't restore r2-r4, pointless */
--	/* set result=r2 to zero as the copy must have succeeded. */
--	or	r63, r63, r2
--	addi	SP, 32, SP
--	blink	tr0, r63 ! RTS
 -
--	.global __copy_user_fixup
--__copy_user_fixup:
--	/* Restore stack frame */
--	ori	r35, 0, LINK
--	ld.q	SP, 24, r35
--	ld.q	SP, 16, r4
--	ld.q	SP,  8, r3
--	ld.q	SP,  0, r2
--	addi	SP, 32, SP
--	/* Fall through to original code, in the 'same' state we entered with */
+-#include <linux/kernel.h>
+-#include <linux/types.h>
+-#include <linux/delay.h>
+-#include <linux/module.h>
+-#include <asm/system.h>
+-#include <asm/processor.h>
+-#include <asm/io.h>
 -
--/* The slow byte-by-byte method is used if the fast copy traps due to a bad
--   user address.  In that rare case, the speed drop can be tolerated. */
--__copy_user_byte_by_byte:
--	pta	___copy_user_exit, tr1
--	pta	___copy_user1, tr0
--	beq/u	r4, r63, tr1	/* early exit for zero length copy */
--	sub	r2, r3, r0
--	addi	r0, -1, r0
+-/*  Now for the string version of these functions */
+-void outsb(unsigned long port, const void *addr, unsigned long count)
+-{
+-	int i;
+-	unsigned char *p = (unsigned char *) addr;
 -
--___copy_user1:
--	ld.b	r3, 0, r5		/* Fault address 1 */
+-	for (i = 0; i < count; i++, p++) {
+-		outb(*p, port);
+-	}
+-}
+-EXPORT_SYMBOL(outsb);
 -
--	/* Could rewrite this to use just 1 add, but the second comes 'free'
--	   due to load latency */
--	addi	r3, 1, r3
--	addi	r4, -1, r4		/* No real fixup required */
--___copy_user2:
--	stx.b	r3, r0, r5		/* Fault address 2 */
--	bne     r4, ZERO, tr0
+-void insb(unsigned long port, void *addr, unsigned long count)
+-{
+-	int i;
+-	unsigned char *p = (unsigned char *) addr;
 -
--___copy_user_exit:
--	or	r4, ZERO, r2
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-	for (i = 0; i < count; i++, p++) {
+-		*p = inb(port);
+-	}
+-}
+-EXPORT_SYMBOL(insb);
 -
--/*
-- * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
-- *
-- * Inputs:
-- * (r2)  target address
-- * (r3)  size in bytes
-- *
-- * Ouputs:
-- * (*r2) zero-ed target data
-- * (r2)  non-zero-ed bytes
+-/* For the 16 and 32 bit string functions, we have to worry about alignment.
+- * The SH does not do unaligned accesses, so we have to read as bytes and
+- * then write as a word or dword.
+- * This can be optimised a lot more, especially in the case where the data
+- * is aligned
 - */
--	.global	__clear_user
--__clear_user:
--	pta	___clear_user_exit, tr1
--	pta	___clear_user1, tr0
--	beq/u	r3, r63, tr1
 -
--___clear_user1:
--	st.b	r2, 0, ZERO		/* Fault address */
--	addi	r2, 1, r2
--	addi	r3, -1, r3		/* No real fixup required */
--	bne     r3, ZERO, tr0
+-void outsw(unsigned long port, const void *addr, unsigned long count)
+-{
+-	int i;
+-	unsigned short tmp;
+-	unsigned char *p = (unsigned char *) addr;
 -
--___clear_user_exit:
--	or	r3, ZERO, r2
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-	for (i = 0; i < count; i++, p += 2) {
+-		tmp = (*p) | ((*(p + 1)) << 8);
+-		outw(tmp, port);
+-	}
+-}
+-EXPORT_SYMBOL(outsw);
 -
+-void insw(unsigned long port, void *addr, unsigned long count)
+-{
+-	int i;
+-	unsigned short tmp;
+-	unsigned char *p = (unsigned char *) addr;
 -
--/*
-- * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
-- *			   int __count)
-- *
-- * Inputs:
-- * (r2)  target address
-- * (r3)  source address
-- * (r4)  maximum size in bytes
-- *
-- * Ouputs:
-- * (*r2) copied data
-- * (r2)  -EFAULT (in case of faulting)
-- *       copied data (otherwise)
-- */
--	.global	__strncpy_from_user
--__strncpy_from_user:
--	pta	___strncpy_from_user1, tr0
--	pta	___strncpy_from_user_done, tr1
--	or	r4, ZERO, r5		/* r5 = original count */
--	beq/u	r4, r63, tr1		/* early exit if r4==0 */
--	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
--	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
+-	for (i = 0; i < count; i++, p += 2) {
+-		tmp = inw(port);
+-		p[0] = tmp & 0xff;
+-		p[1] = (tmp >> 8) & 0xff;
+-	}
+-}
+-EXPORT_SYMBOL(insw);
 -
--___strncpy_from_user1:
--	ld.b	r3, 0, r7		/* Fault address: only in reading */
--	st.b	r2, 0, r7
--	addi	r2, 1, r2
--	addi	r3, 1, r3
--	beq/u	ZERO, r7, tr1
--	addi	r4, -1, r4		/* return real number of copied bytes */
--	bne/l	ZERO, r4, tr0
+-void outsl(unsigned long port, const void *addr, unsigned long count)
+-{
+-	int i;
+-	unsigned tmp;
+-	unsigned char *p = (unsigned char *) addr;
 -
--___strncpy_from_user_done:
--	sub	r5, r4, r6		/* If done, return copied */
+-	for (i = 0; i < count; i++, p += 4) {
+-		tmp = (*p) | ((*(p + 1)) << 8) | ((*(p + 2)) << 16) |
+-		    ((*(p + 3)) << 24);
+-		outl(tmp, port);
+-	}
+-}
+-EXPORT_SYMBOL(outsl);
 -
--___strncpy_from_user_exit:
--	or	r6, ZERO, r2
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-void insl(unsigned long port, void *addr, unsigned long count)
+-{
+-	int i;
+-	unsigned tmp;
+-	unsigned char *p = (unsigned char *) addr;
 -
--/*
-- * extern long __strnlen_user(const char *__s, long __n)
-- *
-- * Inputs:
-- * (r2)  source address
-- * (r3)  source size in bytes
-- *
-- * Ouputs:
-- * (r2)  -EFAULT (in case of faulting)
-- *       string length (otherwise)
-- */
--	.global	__strnlen_user
--__strnlen_user:
--	pta	___strnlen_user_set_reply, tr0
--	pta	___strnlen_user1, tr1
--	or	ZERO, ZERO, r5		/* r5 = counter */
--	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
--	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
--	beq	r3, ZERO, tr0
+-	for (i = 0; i < count; i++, p += 4) {
+-		tmp = inl(port);
+-		p[0] = tmp & 0xff;
+-		p[1] = (tmp >> 8) & 0xff;
+-		p[2] = (tmp >> 16) & 0xff;
+-		p[3] = (tmp >> 24) & 0xff;
 -
--___strnlen_user1:
--	ldx.b	r2, r5, r7		/* Fault address: only in reading */
--	addi	r3, -1, r3		/* No real fixup */
--	addi	r5, 1, r5
--	beq	r3, ZERO, tr0
--	bne	r7, ZERO, tr1
--! The line below used to be active.  This meant led to a junk byte lying between each pair
--! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
--! via the argv and envp arguments to main, it meant the 'flat' representation visible through
--! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
--!	addi	r5, 1, r5		/* Include '\0' */
+-	}
+-}
+-EXPORT_SYMBOL(insl);
 -
--___strnlen_user_set_reply:
--	or	r5, ZERO, r6		/* If done, return counter */
+-void memcpy_toio(void __iomem *to, const void *from, long count)
+-{
+-	unsigned char *p = (unsigned char *) from;
 -
--___strnlen_user_exit:
--	or	r6, ZERO, r2
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-	while (count) {
+-		count--;
+-		writeb(*p++, to++);
+-	}
+-}
+-EXPORT_SYMBOL(memcpy_toio);
+-
+-void memcpy_fromio(void *to, void __iomem *from, long count)
+-{
+-	int i;
+-	unsigned char *p = (unsigned char *) to;
 -
+-	for (i = 0; i < count; i++) {
+-		p[i] = readb(from);
+-		from++;
+-	}
+-}
+-EXPORT_SYMBOL(memcpy_fromio);
+diff --git a/arch/sh64/lib/iomap.c b/arch/sh64/lib/iomap.c
+deleted file mode 100644
+index 253d1e3..0000000
+--- a/arch/sh64/lib/iomap.c
++++ /dev/null
+@@ -1,54 +0,0 @@
 -/*
-- * extern long __get_user_asm_?(void *val, long addr)
+- * arch/sh64/lib/iomap.c
 - *
-- * Inputs:
-- * (r2)  dest address
-- * (r3)  source address (in User Space)
+- * Generic sh64 iomap interface
 - *
-- * Ouputs:
-- * (r2)  -EFAULT (faulting)
-- *       0 	 (not faulting)
+- * Copyright (C) 2004  Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
 - */
--	.global	__get_user_asm_b
--__get_user_asm_b:
--	or	r2, ZERO, r4
--	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
+-#include <linux/pci.h>
+-#include <asm/io.h>
 -
--___get_user_asm_b1:
--	ld.b	r3, 0, r5		/* r5 = data */
--	st.b	r4, 0, r5
--	or	ZERO, ZERO, r2
+-void __iomem *__attribute__ ((weak))
+-ioport_map(unsigned long port, unsigned int len)
+-{
+-	return (void __iomem *)port;
+-}
+-EXPORT_SYMBOL(ioport_map);
 -
--___get_user_asm_b_exit:
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-void ioport_unmap(void __iomem *addr)
+-{
+-	/* Nothing .. */
+-}
+-EXPORT_SYMBOL(ioport_unmap);
 -
+-#ifdef CONFIG_PCI
+-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+-{
+-	unsigned long start = pci_resource_start(dev, bar);
+-	unsigned long len = pci_resource_len(dev, bar);
+-	unsigned long flags = pci_resource_flags(dev, bar);
 -
--	.global	__get_user_asm_w
--__get_user_asm_w:
--	or	r2, ZERO, r4
--	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
+-	if (!len)
+-		return NULL;
+-	if (max && len > max)
+-		len = max;
+-	if (flags & IORESOURCE_IO)
+-		return ioport_map(start + pciio_virt, len);
+-	if (flags & IORESOURCE_MEM)
+-		return (void __iomem *)start;
 -
--___get_user_asm_w1:
--	ld.w	r3, 0, r5		/* r5 = data */
--	st.w	r4, 0, r5
--	or	ZERO, ZERO, r2
+-	/* What? */
+-	return NULL;
+-}
+-EXPORT_SYMBOL(pci_iomap);
 -
--___get_user_asm_w_exit:
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+-{
+-	/* Nothing .. */
+-}
+-EXPORT_SYMBOL(pci_iounmap);
+-#endif
+diff --git a/arch/sh64/lib/memcpy.c b/arch/sh64/lib/memcpy.c
+deleted file mode 100644
+index fba436a..0000000
+--- a/arch/sh64/lib/memcpy.c
++++ /dev/null
+@@ -1,81 +0,0 @@
+-/*
+- * Copyright (C) 2002 Mark Debbage (Mark.Debbage at superh.com)
+- *
+- * May be copied or modified under the terms of the GNU General Public
+- * License.  See linux/COPYING for more information.
+- *
+- */
 -
+-#include <linux/types.h>
+-#include <asm/string.h>
 -
--	.global	__get_user_asm_l
--__get_user_asm_l:
--	or	r2, ZERO, r4
--	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
+-// This is a simplistic optimization of memcpy to increase the
+-// granularity of access beyond one byte using aligned
+-// loads and stores. This is not an optimal implementation
+-// for SH-5 (especially with regard to prefetching and the cache),
+-// and a better version should be provided later ...
 -
--___get_user_asm_l1:
--	ld.l	r3, 0, r5		/* r5 = data */
--	st.l	r4, 0, r5
--	or	ZERO, ZERO, r2
+-void *memcpy(void *dest, const void *src, size_t count)
+-{
+-	char *d = (char *) dest, *s = (char *) src;
 -
--___get_user_asm_l_exit:
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-	if (count >= 32) {
+-		int i = 8 - (((unsigned long) d) & 0x7);
 -
+-		if (i != 8)
+-			while (i-- && count--) {
+-				*d++ = *s++;
+-			}
 -
--	.global	__get_user_asm_q
--__get_user_asm_q:
--	or	r2, ZERO, r4
--	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
+-		if (((((unsigned long) d) & 0x7) == 0) &&
+-		    ((((unsigned long) s) & 0x7) == 0)) {
+-			while (count >= 32) {
+-				unsigned long long t1, t2, t3, t4;
+-				t1 = *(unsigned long long *) (s);
+-				t2 = *(unsigned long long *) (s + 8);
+-				t3 = *(unsigned long long *) (s + 16);
+-				t4 = *(unsigned long long *) (s + 24);
+-				*(unsigned long long *) (d) = t1;
+-				*(unsigned long long *) (d + 8) = t2;
+-				*(unsigned long long *) (d + 16) = t3;
+-				*(unsigned long long *) (d + 24) = t4;
+-				d += 32;
+-				s += 32;
+-				count -= 32;
+-			}
+-			while (count >= 8) {
+-				*(unsigned long long *) d =
+-				    *(unsigned long long *) s;
+-				d += 8;
+-				s += 8;
+-				count -= 8;
+-			}
+-		}
 -
--___get_user_asm_q1:
--	ld.q	r3, 0, r5		/* r5 = data */
--	st.q	r4, 0, r5
--	or	ZERO, ZERO, r2
+-		if (((((unsigned long) d) & 0x3) == 0) &&
+-		    ((((unsigned long) s) & 0x3) == 0)) {
+-			while (count >= 4) {
+-				*(unsigned long *) d = *(unsigned long *) s;
+-				d += 4;
+-				s += 4;
+-				count -= 4;
+-			}
+-		}
 -
--___get_user_asm_q_exit:
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-		if (((((unsigned long) d) & 0x1) == 0) &&
+-		    ((((unsigned long) s) & 0x1) == 0)) {
+-			while (count >= 2) {
+-				*(unsigned short *) d = *(unsigned short *) s;
+-				d += 2;
+-				s += 2;
+-				count -= 2;
+-			}
+-		}
+-	}
+-
+-	while (count--) {
+-		*d++ = *s++;
+-	}
 -
+-	return d;
+-}
+diff --git a/arch/sh64/lib/page_clear.S b/arch/sh64/lib/page_clear.S
+deleted file mode 100644
+index ac0111d..0000000
+--- a/arch/sh64/lib/page_clear.S
++++ /dev/null
+@@ -1,54 +0,0 @@
 -/*
-- * extern long __put_user_asm_?(void *pval, long addr)
-- *
-- * Inputs:
-- * (r2)  kernel pointer to value
-- * (r3)  dest address (in User Space)
-- *
-- * Ouputs:
-- * (r2)  -EFAULT (faulting)
-- *       0 	 (not faulting)
-- */
--	.global	__put_user_asm_b
--__put_user_asm_b:
--	ld.b	r2, 0, r4		/* r4 = data */
--	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
+-   Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
 -
--___put_user_asm_b1:
--	st.b	r3, 0, r4
--	or	ZERO, ZERO, r2
+-   This file is subject to the terms and conditions of the GNU General Public
+-   License.  See the file "COPYING" in the main directory of this archive
+-   for more details.
 -
--___put_user_asm_b_exit:
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-   Tight version of memset for the case of just clearing a page.  It turns out
+-   that having the alloco's spaced out slightly due to the increment/branch
+-   pair causes them to contend less for access to the cache.  Similarly,
+-   keeping the stores apart from the allocos causes less contention.  => Do two
+-   separate loops.  Do multiple stores per loop to amortise the
+-   increment/branch cost a little.
 -
+-   Parameters:
+-   r2 : source effective address (start of page)
 -
--	.global	__put_user_asm_w
--__put_user_asm_w:
--	ld.w	r2, 0, r4		/* r4 = data */
--	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
+-   Always clears 4096 bytes.
 -
--___put_user_asm_w1:
--	st.w	r3, 0, r4
--	or	ZERO, ZERO, r2
+-   Note : alloco guarded by synco to avoid TAKum03020 erratum
 -
--___put_user_asm_w_exit:
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-*/
 -
+-	.section .text..SHmedia32,"ax"
+-	.little
 -
--	.global	__put_user_asm_l
--__put_user_asm_l:
--	ld.l	r2, 0, r4		/* r4 = data */
--	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
+-	.balign 8
+-	.global sh64_page_clear
+-sh64_page_clear:
+-	pta/l 1f, tr1
+-	pta/l 2f, tr2
+-	ptabs/l r18, tr0
 -
--___put_user_asm_l1:
--	st.l	r3, 0, r4
--	or	ZERO, ZERO, r2
+-	movi 4096, r7
+-	add  r2, r7, r7
+-	add  r2, r63, r6
+-1:
+-	alloco r6, 0
+-	synco	! TAKum03020
+-	addi	r6, 32, r6
+-	bgt/l	r7, r6, tr1
 -
--___put_user_asm_l_exit:
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-	add  r2, r63, r6
+-2:
+-	st.q  r6,   0, r63
+-	st.q  r6,   8, r63
+-	st.q  r6,  16, r63
+-	st.q  r6,  24, r63
+-	addi r6, 32, r6
+-	bgt/l r7, r6, tr2
 -
+-	blink tr0, r63
 -
--	.global	__put_user_asm_q
--__put_user_asm_q:
--	ld.q	r2, 0, r4		/* r4 = data */
--	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
 -
--___put_user_asm_q1:
--	st.q	r3, 0, r4
--	or	ZERO, ZERO, r2
+diff --git a/arch/sh64/lib/page_copy.S b/arch/sh64/lib/page_copy.S
+deleted file mode 100644
+index e159c3c..0000000
+--- a/arch/sh64/lib/page_copy.S
++++ /dev/null
+@@ -1,91 +0,0 @@
+-/*
+-   Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
 -
--___put_user_asm_q_exit:
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+-   This file is subject to the terms and conditions of the GNU General Public
+-   License.  See the file "COPYING" in the main directory of this archive
+-   for more details.
 -
--panic_stash_regs:
--	/* The idea is : when we get an unhandled panic, we dump the registers
--	   to a known memory location, the just sit in a tight loop.
--	   This allows the human to look at the memory region through the GDB
--	   session (assuming the debug module's SHwy initiator isn't locked up
--	   or anything), to hopefully analyze the cause of the panic. */
+-   Tight version of mempy for the case of just copying a page.
+-   Prefetch strategy empirically optimised against RTL simulations
+-   of SH5-101 cut2 eval chip with Cayman board DDR memory.
 -
--	/* On entry, former r15 (SP) is in DCR
--	   former r0  is at resvec_saved_area + 0
--	   former r1  is at resvec_saved_area + 8
--	   former tr0 is at resvec_saved_area + 32
--	   DCR is the only register whose value is lost altogether.
--	*/
+-   Parameters:
+-   r2 : source effective address (start of page)
+-   r3 : destination effective address (start of page)
 -
--	movi	0xffffffff80000000, r0 ! phy of dump area
--	ld.q	SP, 0x000, r1	! former r0
--	st.q	r0,  0x000, r1
--	ld.q	SP, 0x008, r1	! former r1
--	st.q	r0,  0x008, r1
--	st.q	r0,  0x010, r2
--	st.q	r0,  0x018, r3
--	st.q	r0,  0x020, r4
--	st.q	r0,  0x028, r5
--	st.q	r0,  0x030, r6
--	st.q	r0,  0x038, r7
--	st.q	r0,  0x040, r8
--	st.q	r0,  0x048, r9
--	st.q	r0,  0x050, r10
--	st.q	r0,  0x058, r11
--	st.q	r0,  0x060, r12
--	st.q	r0,  0x068, r13
--	st.q	r0,  0x070, r14
--	getcon	dcr, r14
--	st.q	r0,  0x078, r14
--	st.q	r0,  0x080, r16
--	st.q	r0,  0x088, r17
--	st.q	r0,  0x090, r18
--	st.q	r0,  0x098, r19
--	st.q	r0,  0x0a0, r20
--	st.q	r0,  0x0a8, r21
--	st.q	r0,  0x0b0, r22
--	st.q	r0,  0x0b8, r23
--	st.q	r0,  0x0c0, r24
--	st.q	r0,  0x0c8, r25
--	st.q	r0,  0x0d0, r26
--	st.q	r0,  0x0d8, r27
--	st.q	r0,  0x0e0, r28
--	st.q	r0,  0x0e8, r29
--	st.q	r0,  0x0f0, r30
--	st.q	r0,  0x0f8, r31
--	st.q	r0,  0x100, r32
--	st.q	r0,  0x108, r33
--	st.q	r0,  0x110, r34
--	st.q	r0,  0x118, r35
--	st.q	r0,  0x120, r36
--	st.q	r0,  0x128, r37
--	st.q	r0,  0x130, r38
--	st.q	r0,  0x138, r39
--	st.q	r0,  0x140, r40
--	st.q	r0,  0x148, r41
--	st.q	r0,  0x150, r42
--	st.q	r0,  0x158, r43
--	st.q	r0,  0x160, r44
--	st.q	r0,  0x168, r45
--	st.q	r0,  0x170, r46
--	st.q	r0,  0x178, r47
--	st.q	r0,  0x180, r48
--	st.q	r0,  0x188, r49
--	st.q	r0,  0x190, r50
--	st.q	r0,  0x198, r51
--	st.q	r0,  0x1a0, r52
--	st.q	r0,  0x1a8, r53
--	st.q	r0,  0x1b0, r54
--	st.q	r0,  0x1b8, r55
--	st.q	r0,  0x1c0, r56
--	st.q	r0,  0x1c8, r57
--	st.q	r0,  0x1d0, r58
--	st.q	r0,  0x1d8, r59
--	st.q	r0,  0x1e0, r60
--	st.q	r0,  0x1e8, r61
--	st.q	r0,  0x1f0, r62
--	st.q	r0,  0x1f8, r63	! bogus, but for consistency's sake...
+-   Always copies 4096 bytes.
 -
--	ld.q	SP, 0x020, r1  ! former tr0
--	st.q	r0,  0x200, r1
--	gettr	tr1, r1
--	st.q	r0,  0x208, r1
--	gettr	tr2, r1
--	st.q	r0,  0x210, r1
--	gettr	tr3, r1
--	st.q	r0,  0x218, r1
--	gettr	tr4, r1
--	st.q	r0,  0x220, r1
--	gettr	tr5, r1
--	st.q	r0,  0x228, r1
--	gettr	tr6, r1
--	st.q	r0,  0x230, r1
--	gettr	tr7, r1
--	st.q	r0,  0x238, r1
+-   Points to review.
+-   * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
+-     It seems like the prefetch needs to be at at least 4 lines ahead to get
+-     the data into the cache in time, and the allocos contend with outstanding
+-     prefetches for the same cache set, so it's better to have the numbers
+-     different.
+-   */
 -
--	getcon	sr,  r1
--	getcon	ssr,  r2
--	getcon	pssr,  r3
--	getcon	spc,  r4
--	getcon	pspc,  r5
--	getcon	intevt,  r6
--	getcon	expevt,  r7
--	getcon	pexpevt,  r8
--	getcon	tra,  r9
--	getcon	tea,  r10
--	getcon	kcr0, r11
--	getcon	kcr1, r12
--	getcon	vbr,  r13
--	getcon	resvec,  r14
+-	.section .text..SHmedia32,"ax"
+-	.little
 -
--	st.q	r0,  0x240, r1
--	st.q	r0,  0x248, r2
--	st.q	r0,  0x250, r3
--	st.q	r0,  0x258, r4
--	st.q	r0,  0x260, r5
--	st.q	r0,  0x268, r6
--	st.q	r0,  0x270, r7
--	st.q	r0,  0x278, r8
--	st.q	r0,  0x280, r9
--	st.q	r0,  0x288, r10
--	st.q	r0,  0x290, r11
--	st.q	r0,  0x298, r12
--	st.q	r0,  0x2a0, r13
--	st.q	r0,  0x2a8, r14
+-	.balign 8
+-	.global sh64_page_copy
+-sh64_page_copy:
 -
--	getcon	SPC,r2
--	getcon	SSR,r3
--	getcon	EXPEVT,r4
--	/* Prepare to jump to C - physical address */
--	movi	panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1
--	ori	r1, 1, r1
--	ptabs   r1, tr0
--	getcon	DCR, SP
--	blink	tr0, ZERO
--	nop
--	nop
--	nop
--	nop
+-	/* Copy 4096 bytes worth of data from r2 to r3.
+-	   Do prefetches 4 lines ahead.
+-	   Do alloco 2 lines ahead */
 -
+-	pta 1f, tr1
+-	pta 2f, tr2
+-	pta 3f, tr3
+-	ptabs r18, tr0
 -
+-#if 0
+-	/* TAKum03020 */
+-	ld.q r2, 0x00, r63
+-	ld.q r2, 0x20, r63
+-	ld.q r2, 0x40, r63
+-	ld.q r2, 0x60, r63
+-#endif
+-	alloco r3, 0x00
+-	synco		! TAKum03020
+-	alloco r3, 0x20
+-	synco		! TAKum03020
 -
+-	movi 3968, r6
+-	add  r3, r6, r6
+-	addi r6, 64, r7
+-	addi r7, 64, r8
+-	sub r2, r3, r60
+-	addi r60, 8, r61
+-	addi r61, 8, r62
+-	addi r62, 8, r23
+-	addi r60, 0x80, r22
 -
--/*
-- * --- Signal Handling Section
-- */
+-/* Minimal code size.  The extra branches inside the loop don't cost much
+-   because they overlap with the time spent waiting for prefetches to
+-   complete. */
+-1:
+-#if 0
+-	/* TAKum03020 */
+-	bge/u r3, r6, tr2  ! skip prefetch for last 4 lines
+-	ldx.q r3, r22, r63 ! prefetch 4 lines hence
+-#endif
+-2:
+-	bge/u r3, r7, tr3  ! skip alloco for last 2 lines
+-	alloco r3, 0x40    ! alloc destination line 2 lines ahead
+-	synco		! TAKum03020
+-3:
+-	ldx.q r3, r60, r36
+-	ldx.q r3, r61, r37
+-	ldx.q r3, r62, r38
+-	ldx.q r3, r23, r39
+-	st.q  r3,   0, r36
+-	st.q  r3,   8, r37
+-	st.q  r3,  16, r38
+-	st.q  r3,  24, r39
+-	addi r3, 32, r3
+-	bgt/l r8, r3, tr1
 -
+-	blink tr0, r63	   ! return
+-
+-
+diff --git a/arch/sh64/lib/panic.c b/arch/sh64/lib/panic.c
+deleted file mode 100644
+index c9eb1cb..0000000
+--- a/arch/sh64/lib/panic.c
++++ /dev/null
+@@ -1,58 +0,0 @@
 -/*
-- * extern long long _sa_default_rt_restorer
-- * extern long long _sa_default_restorer
-- *
-- *		 or, better,
-- *
-- * extern void _sa_default_rt_restorer(void)
-- * extern void _sa_default_restorer(void)
-- *
-- * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
-- * from user space. Copied into user space by signal management.
-- * Both must be quad aligned and 2 quad long (4 instructions).
+- * Copyright (C) 2003  Richard Curnow, SuperH UK Limited
 - *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
 - */
--	.balign 8
--	.global sa_default_rt_restorer
--sa_default_rt_restorer:
--	movi	0x10, r9
--	shori	__NR_rt_sigreturn, r9
--	trapa	r9
--	nop
 -
--	.balign 8
--	.global sa_default_restorer
--sa_default_restorer:
--	movi	0x10, r9
--	shori	__NR_sigreturn, r9
--	trapa	r9
--	nop
+-#include <linux/kernel.h>
+-#include <asm/io.h>
+-#include <asm/registers.h>
 -
--/*
-- * --- __ex_table Section
-- */
+-/* THIS IS A PHYSICAL ADDRESS */
+-#define HDSP2534_ADDR (0x04002100)
 -
--/*
-- * User Access Exception Table.
-- */
--	.section	__ex_table,  "a"
+-#ifdef CONFIG_SH_CAYMAN
 -
--	.global asm_uaccess_start	/* Just a marker */
--asm_uaccess_start:
+-static void poor_mans_delay(void)
+-{
+-	int i;
+-	for (i = 0; i < 2500000; i++) {
+-	}		/* poor man's delay */
+-}
 -
--	.long	___copy_user1, ___copy_user_exit
--	.long	___copy_user2, ___copy_user_exit
--	.long	___clear_user1, ___clear_user_exit
--	.long	___strncpy_from_user1, ___strncpy_from_user_exit
--	.long	___strnlen_user1, ___strnlen_user_exit
--	.long	___get_user_asm_b1, ___get_user_asm_b_exit
--	.long	___get_user_asm_w1, ___get_user_asm_w_exit
--	.long	___get_user_asm_l1, ___get_user_asm_l_exit
--	.long	___get_user_asm_q1, ___get_user_asm_q_exit
--	.long	___put_user_asm_b1, ___put_user_asm_b_exit
--	.long	___put_user_asm_w1, ___put_user_asm_w_exit
--	.long	___put_user_asm_l1, ___put_user_asm_l_exit
--	.long	___put_user_asm_q1, ___put_user_asm_q_exit
+-static void show_value(unsigned long x)
+-{
+-	int i;
+-	unsigned nibble;
+-	for (i = 0; i < 8; i++) {
+-		nibble = ((x >> (i * 4)) & 0xf);
 -
--	.global asm_uaccess_end		/* Just a marker */
--asm_uaccess_end:
+-		ctrl_outb(nibble + ((nibble > 9) ? 55 : 48),
+-			  HDSP2534_ADDR + 0xe0 + ((7 - i) << 2));
+-	}
+-}
 -
+-#endif
 -
+-void
+-panic_handler(unsigned long panicPC, unsigned long panicSSR,
+-	      unsigned long panicEXPEVT)
+-{
+-#ifdef CONFIG_SH_CAYMAN
+-	while (1) {
+-		/* This piece of code displays the PC on the LED display */
+-		show_value(panicPC);
+-		poor_mans_delay();
+-		show_value(panicSSR);
+-		poor_mans_delay();
+-		show_value(panicEXPEVT);
+-		poor_mans_delay();
+-	}
+-#endif
 -
+-	/* Never return from the panic handler */
+-	for (;;) ;
 -
+-}
+diff --git a/arch/sh64/lib/udelay.c b/arch/sh64/lib/udelay.c
+deleted file mode 100644
+index 3276539..0000000
+--- a/arch/sh64/lib/udelay.c
++++ /dev/null
+@@ -1,59 +0,0 @@
 -/*
-- * --- .text.init Section
+- * arch/sh64/lib/udelay.c
+- *
+- * Delay routines, using a pre-computed "loops_per_jiffy" value.
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003, 2004  Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
 - */
+-#include <linux/sched.h>
+-#include <asm/param.h>
 -
--	.section	.text.init, "ax"
+-extern unsigned long loops_per_jiffy;
 -
 -/*
-- * void trap_init (void)
+- * Use only for very small delays (< 1 msec).
 - *
+- * The active part of our cycle counter is only 32-bits wide, and
+- * we're treating the difference between two marks as signed.  On
+- * a 1GHz box, that's about 2 seconds.
 - */
--	.global	trap_init
--trap_init:
--	addi	SP, -24, SP			/* Room to save r28/r29/r30 */
--	st.q	SP, 0, r28
--	st.q	SP, 8, r29
--	st.q	SP, 16, r30
 -
--	/* Set VBR and RESVEC */
--	movi	LVBR_block, r19
--	andi	r19, -4, r19			/* reset MMUOFF + reserved */
--	/* For RESVEC exceptions we force the MMU off, which means we need the
--	   physical address. */
--	movi	LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20
--	andi	r20, -4, r20			/* reset reserved */
--	ori	r20, 1, r20			/* set MMUOFF */
--	putcon	r19, VBR
--	putcon	r20, RESVEC
+-void __delay(int loops)
+-{
+-	long long dummy;
+-	__asm__ __volatile__("gettr	tr0, %1\n\t"
+-			     "pta	$+4, tr0\n\t"
+-			     "addi	%0, -1, %0\n\t"
+-			     "bne	%0, r63, tr0\n\t"
+-			     "ptabs	%1, tr0\n\t":"=r"(loops),
+-			     "=r"(dummy)
+-			     :"0"(loops));
+-}
 -
--	/* Sanity check */
--	movi	LVBR_block_end, r21
--	andi	r21, -4, r21
--	movi	BLOCK_SIZE, r29			/* r29 = expected size */
--	or	r19, ZERO, r30
--	add	r19, r29, r19
+-void __udelay(unsigned long long usecs, unsigned long lpj)
+-{
+-	usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj;
+-	__delay((long long) usecs >> 32);
+-}
 -
--	/*
--	 * Ugly, but better loop forever now than crash afterwards.
--	 * We should print a message, but if we touch LVBR or
--	 * LRESVEC blocks we should not be surprised if we get stuck
--	 * in trap_init().
--	 */
--	pta	trap_init_loop, tr1
--	gettr	tr1, r28			/* r28 = trap_init_loop */
--	sub	r21, r30, r30			/* r30 = actual size */
+-void __ndelay(unsigned long long nsecs, unsigned long lpj)
+-{
+-	nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj;
+-	__delay((long long) nsecs >> 32);
+-}
 -
--	/*
--	 * VBR/RESVEC handlers overlap by being bigger than
--	 * allowed. Very bad. Just loop forever.
--	 * (r28) panic/loop address
--	 * (r29) expected size
--	 * (r30) actual size
--	 */
--trap_init_loop:
--	bne	r19, r21, tr1
+-void udelay(unsigned long usecs)
+-{
+-	__udelay(usecs, loops_per_jiffy);
+-}
 -
--	/* Now that exception vectors are set up reset SR.BL */
--	getcon 	SR, r22
--	movi	SR_UNBLOCK_EXC, r23
--	and	r22, r23, r22
--	putcon	r22, SR
+-void ndelay(unsigned long nsecs)
+-{
+-	__ndelay(nsecs, loops_per_jiffy);
+-}
 -
--	addi	SP, 24, SP
--	ptabs	LINK, tr0
--	blink	tr0, ZERO
+diff --git a/arch/sh64/mach-cayman/Makefile b/arch/sh64/mach-cayman/Makefile
+deleted file mode 100644
+index 67a2258..0000000
+--- a/arch/sh64/mach-cayman/Makefile
++++ /dev/null
+@@ -1,11 +0,0 @@
+-#
+-# Makefile for the Hitachi Cayman specific parts of the kernel
+-#
+-# Note! Dependencies are done automagically by 'make dep', which also
+-# removes any old dependencies. DON'T put your own dependencies here
+-# unless it's something special (ie not a .c file).
+-#
 -
-diff --git a/arch/sh64/kernel/fpu.c b/arch/sh64/kernel/fpu.c
+-obj-y := setup.o irq.o iomap.o
+-obj-$(CONFIG_HEARTBEAT)	+= led.o
+-
+diff --git a/arch/sh64/mach-cayman/iomap.c b/arch/sh64/mach-cayman/iomap.c
 deleted file mode 100644
-index 8ad4ed6..0000000
---- a/arch/sh64/kernel/fpu.c
+index a5c645f..0000000
+--- a/arch/sh64/mach-cayman/iomap.c
 +++ /dev/null
-@@ -1,170 +0,0 @@
+@@ -1,22 +0,0 @@
+-/*
+- * arch/sh64/mach-cayman/iomap.c
+- *
+- * Cayman iomap interface
+- *
+- * Copyright (C) 2004  Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-#include <asm/io.h>
+-#include <asm/cayman.h>
+-
+-void __iomem *ioport_map(unsigned long port, unsigned int len)
+-{
+-	if (port < 0x400)
+-		return (void __iomem *)((port << 2) | smsc_superio_virt);
+-
+-	return (void __iomem *)port;
+-}
+-
+diff --git a/arch/sh64/mach-cayman/irq.c b/arch/sh64/mach-cayman/irq.c
+deleted file mode 100644
+index aaad36d..0000000
+--- a/arch/sh64/mach-cayman/irq.c
++++ /dev/null
+@@ -1,195 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/fpu.c
+- * arch/sh64/kernel/irq_cayman.c
 - *
-- * Copyright (C) 2001  Manuela Cirronis, Paolo Alberelli
-- * Copyright (C) 2002  STMicroelectronics Limited
-- *   Author : Stuart Menefy
+- * SH-5 Cayman Interrupt Support
 - *
-- * Started from SH4 version:
-- *   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
+- * This file handles the board specific parts of the Cayman interrupt system
 - *
+- * Copyright (C) 2002 Stuart Menefy
 - */
 -
--#include <linux/sched.h>
--#include <linux/signal.h>
--#include <asm/processor.h>
--#include <asm/user.h>
+-#include <asm/irq.h>
+-#include <asm/page.h>
 -#include <asm/io.h>
+-#include <linux/irq.h>
+-#include <linux/interrupt.h>
+-#include <linux/signal.h>
+-#include <asm/cayman.h>
 -
--/*
-- * Initially load the FPU with signalling NANS.  This bit pattern
-- * has the property that no matter whether considered as single or as
-- * double precision, it still represents a signalling NAN.
-- */
--#define sNAN64		0xFFFFFFFFFFFFFFFFULL
--#define sNAN32		0xFFFFFFFFUL
+-unsigned long epld_virt;
 -
--static union sh_fpu_union init_fpuregs = {
--	.hard = {
--	  .fp_regs = { [0 ... 63] = sNAN32 },
--	  .fpscr = FPSCR_INIT
--	}
+-#define EPLD_BASE        0x04002000
+-#define EPLD_STATUS_BASE (epld_virt + 0x10)
+-#define EPLD_MASK_BASE   (epld_virt + 0x20)
+-
+-/* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto
+-   the same SH-5 interrupt */
+-
+-static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id)
+-{
+-        printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n");
+-	return IRQ_NONE;
+-}
+-
+-static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id)
+-{
+-        printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq);
+-	return IRQ_NONE;
+-}
+-
+-static struct irqaction cayman_action_smsc = {
+-	.name		= "Cayman SMSC Mux",
+-	.handler	= cayman_interrupt_smsc,
+-	.flags		= IRQF_DISABLED,
 -};
 -
--inline void fpsave(struct sh_fpu_hard_struct *fpregs)
+-static struct irqaction cayman_action_pci2 = {
+-	.name		= "Cayman PCI2 Mux",
+-	.handler	= cayman_interrupt_pci2,
+-	.flags		= IRQF_DISABLED,
+-};
+-
+-static void enable_cayman_irq(unsigned int irq)
 -{
--	asm volatile("fst.p     %0, (0*8), fp0\n\t"
--		     "fst.p     %0, (1*8), fp2\n\t"
--		     "fst.p     %0, (2*8), fp4\n\t"
--		     "fst.p     %0, (3*8), fp6\n\t"
--		     "fst.p     %0, (4*8), fp8\n\t"
--		     "fst.p     %0, (5*8), fp10\n\t"
--		     "fst.p     %0, (6*8), fp12\n\t"
--		     "fst.p     %0, (7*8), fp14\n\t"
--		     "fst.p     %0, (8*8), fp16\n\t"
--		     "fst.p     %0, (9*8), fp18\n\t"
--		     "fst.p     %0, (10*8), fp20\n\t"
--		     "fst.p     %0, (11*8), fp22\n\t"
--		     "fst.p     %0, (12*8), fp24\n\t"
--		     "fst.p     %0, (13*8), fp26\n\t"
--		     "fst.p     %0, (14*8), fp28\n\t"
--		     "fst.p     %0, (15*8), fp30\n\t"
--		     "fst.p     %0, (16*8), fp32\n\t"
--		     "fst.p     %0, (17*8), fp34\n\t"
--		     "fst.p     %0, (18*8), fp36\n\t"
--		     "fst.p     %0, (19*8), fp38\n\t"
--		     "fst.p     %0, (20*8), fp40\n\t"
--		     "fst.p     %0, (21*8), fp42\n\t"
--		     "fst.p     %0, (22*8), fp44\n\t"
--		     "fst.p     %0, (23*8), fp46\n\t"
--		     "fst.p     %0, (24*8), fp48\n\t"
--		     "fst.p     %0, (25*8), fp50\n\t"
--		     "fst.p     %0, (26*8), fp52\n\t"
--		     "fst.p     %0, (27*8), fp54\n\t"
--		     "fst.p     %0, (28*8), fp56\n\t"
--		     "fst.p     %0, (29*8), fp58\n\t"
--		     "fst.p     %0, (30*8), fp60\n\t"
--		     "fst.p     %0, (31*8), fp62\n\t"
+-	unsigned long flags;
+-	unsigned long mask;
+-	unsigned int reg;
+-	unsigned char bit;
 -
--		     "fgetscr   fr63\n\t"
--		     "fst.s     %0, (32*8), fr63\n\t"
--		: /* no output */
--		: "r" (fpregs)
--		: "memory");
+-	irq -= START_EXT_IRQS;
+-	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
+-	bit = 1<<(irq % 8);
+-	local_irq_save(flags);
+-	mask = ctrl_inl(reg);
+-	mask |= bit;
+-	ctrl_outl(mask, reg);
+-	local_irq_restore(flags);
 -}
 -
+-void disable_cayman_irq(unsigned int irq)
+-{
+-	unsigned long flags;
+-	unsigned long mask;
+-	unsigned int reg;
+-	unsigned char bit;
 -
--static inline void
--fpload(struct sh_fpu_hard_struct *fpregs)
+-	irq -= START_EXT_IRQS;
+-	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
+-	bit = 1<<(irq % 8);
+-	local_irq_save(flags);
+-	mask = ctrl_inl(reg);
+-	mask &= ~bit;
+-	ctrl_outl(mask, reg);
+-	local_irq_restore(flags);
+-}
+-
+-static void ack_cayman_irq(unsigned int irq)
 -{
--	asm volatile("fld.p     %0, (0*8), fp0\n\t"
--		     "fld.p     %0, (1*8), fp2\n\t"
--		     "fld.p     %0, (2*8), fp4\n\t"
--		     "fld.p     %0, (3*8), fp6\n\t"
--		     "fld.p     %0, (4*8), fp8\n\t"
--		     "fld.p     %0, (5*8), fp10\n\t"
--		     "fld.p     %0, (6*8), fp12\n\t"
--		     "fld.p     %0, (7*8), fp14\n\t"
--		     "fld.p     %0, (8*8), fp16\n\t"
--		     "fld.p     %0, (9*8), fp18\n\t"
--		     "fld.p     %0, (10*8), fp20\n\t"
--		     "fld.p     %0, (11*8), fp22\n\t"
--		     "fld.p     %0, (12*8), fp24\n\t"
--		     "fld.p     %0, (13*8), fp26\n\t"
--		     "fld.p     %0, (14*8), fp28\n\t"
--		     "fld.p     %0, (15*8), fp30\n\t"
--		     "fld.p     %0, (16*8), fp32\n\t"
--		     "fld.p     %0, (17*8), fp34\n\t"
--		     "fld.p     %0, (18*8), fp36\n\t"
--		     "fld.p     %0, (19*8), fp38\n\t"
--		     "fld.p     %0, (20*8), fp40\n\t"
--		     "fld.p     %0, (21*8), fp42\n\t"
--		     "fld.p     %0, (22*8), fp44\n\t"
--		     "fld.p     %0, (23*8), fp46\n\t"
--		     "fld.p     %0, (24*8), fp48\n\t"
--		     "fld.p     %0, (25*8), fp50\n\t"
--		     "fld.p     %0, (26*8), fp52\n\t"
--		     "fld.p     %0, (27*8), fp54\n\t"
--		     "fld.p     %0, (28*8), fp56\n\t"
--		     "fld.p     %0, (29*8), fp58\n\t"
--		     "fld.p     %0, (30*8), fp60\n\t"
+-	disable_cayman_irq(irq);
+-}
 -
--		     "fld.s     %0, (32*8), fr63\n\t"
--		     "fputscr   fr63\n\t"
+-static void end_cayman_irq(unsigned int irq)
+-{
+-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+-		enable_cayman_irq(irq);
+-}
 -
--		     "fld.p     %0, (31*8), fp62\n\t"
--		: /* no output */
--		: "r" (fpregs) );
+-static unsigned int startup_cayman_irq(unsigned int irq)
+-{
+-	enable_cayman_irq(irq);
+-	return 0; /* never anything pending */
 -}
 -
--void fpinit(struct sh_fpu_hard_struct *fpregs)
+-static void shutdown_cayman_irq(unsigned int irq)
 -{
--	*fpregs = init_fpuregs.hard;
+-	disable_cayman_irq(irq);
 -}
 -
--asmlinkage void
--do_fpu_error(unsigned long ex, struct pt_regs *regs)
+-struct hw_interrupt_type cayman_irq_type = {
+-	.typename	= "Cayman-IRQ",
+-	.startup	= startup_cayman_irq,
+-	.shutdown	= shutdown_cayman_irq,
+-	.enable		= enable_cayman_irq,
+-	.disable	= disable_cayman_irq,
+-	.ack		= ack_cayman_irq,
+-	.end		= end_cayman_irq,
+-};
+-
+-int cayman_irq_demux(int evt)
 -{
--	struct task_struct *tsk = current;
+-	int irq = intc_evt_to_irq[evt];
 -
--	regs->pc += 4;
+-	if (irq == SMSC_IRQ) {
+-		unsigned long status;
+-		int i;
 -
--	tsk->thread.trap_no = 11;
--	tsk->thread.error_code = 0;
--	force_sig(SIGFPE, tsk);
+-		status = ctrl_inl(EPLD_STATUS_BASE) &
+-			 ctrl_inl(EPLD_MASK_BASE) & 0xff;
+-		if (status == 0) {
+-			irq = -1;
+-		} else {
+-			for (i=0; i<8; i++) {
+-				if (status & (1<<i))
+-					break;
+-			}
+-			irq = START_EXT_IRQS + i;
+-		}
+-	}
+-
+-	if (irq == PCI2_IRQ) {
+-		unsigned long status;
+-		int i;
+-
+-		status = ctrl_inl(EPLD_STATUS_BASE + 3 * sizeof(u32)) &
+-			 ctrl_inl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff;
+-		if (status == 0) {
+-			irq = -1;
+-		} else {
+-			for (i=0; i<8; i++) {
+-				if (status & (1<<i))
+-					break;
+-			}
+-			irq = START_EXT_IRQS + (3 * 8) + i;
+-		}
+-	}
+-
+-	return irq;
 -}
 -
+-#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
+-int cayman_irq_describe(char* p, int irq)
+-{
+-	if (irq < NR_INTC_IRQS) {
+-		return intc_irq_describe(p, irq);
+-	} else if (irq < NR_INTC_IRQS + 8) {
+-		return sprintf(p, "(SMSC %d)", irq - NR_INTC_IRQS);
+-	} else if ((irq >= NR_INTC_IRQS + 24) && (irq < NR_INTC_IRQS + 32)) {
+-		return sprintf(p, "(PCI2 %d)", irq - (NR_INTC_IRQS + 24));
+-	}
 -
--asmlinkage void
--do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
+-	return 0;
+-}
+-#endif
+-
+-void init_cayman_irq(void)
 -{
--	void die(const char *str, struct pt_regs *regs, long err);
+-	int i;
 -
--	if (! user_mode(regs))
--		die("FPU used in kernel", regs, ex);
+-	epld_virt = onchip_remap(EPLD_BASE, 1024, "EPLD");
+-	if (!epld_virt) {
+-		printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n");
+-		return;
+-	}
 -
--	regs->sr &= ~SR_FD;
+-	for (i=0; i<NR_EXT_IRQS; i++) {
+-		irq_desc[START_EXT_IRQS + i].chip = &cayman_irq_type;
+-	}
 -
--	if (last_task_used_math == current)
+-	/* Setup the SMSC interrupt */
+-	setup_irq(SMSC_IRQ, &cayman_action_smsc);
+-	setup_irq(PCI2_IRQ, &cayman_action_pci2);
+-}
+diff --git a/arch/sh64/mach-cayman/led.c b/arch/sh64/mach-cayman/led.c
+deleted file mode 100644
+index b4e122f..0000000
+--- a/arch/sh64/mach-cayman/led.c
++++ /dev/null
+@@ -1,51 +0,0 @@
+-/*
+- * arch/sh64/mach-cayman/led.c
+- *
+- * Copyright (C) 2002 Stuart Menefy <stuart.menefy at st.com>
+- *
+- * May be copied or modified under the terms of the GNU General Public
+- * License.  See linux/COPYING for more information.
+- *
+- * Flash the LEDs
+- */
+-#include <asm/io.h>
+-
+-/*
+-** It is supposed these functions to be used for a low level
+-** debugging (via Cayman LEDs), hence to be available as soon
+-** as possible.
+-** Unfortunately Cayman LEDs relies on Cayman EPLD to be mapped
+-** (this happen when IRQ are initialized... quite late).
+-** These triky dependencies should be removed. Temporary, it
+-** may be enough to NOP until EPLD is mapped.
+-*/
+-
+-extern unsigned long epld_virt;
+-
+-#define LED_ADDR      (epld_virt + 0x008)
+-#define HDSP2534_ADDR (epld_virt + 0x100)
+-
+-void mach_led(int position, int value)
+-{
+-	if (!epld_virt)
 -		return;
 -
--	grab_fpu();
--	if (last_task_used_math != NULL) {
--		/* Other processes fpu state, save away */
--		fpsave(&last_task_used_math->thread.fpu.hard);
--        }
--        last_task_used_math = current;
--        if (used_math()) {
--                fpload(&current->thread.fpu.hard);
--        } else {
--		/* First time FPU user.  */
--		fpload(&init_fpuregs.hard);
--                set_used_math();
--        }
--	release_fpu();
+-	if (value)
+-		ctrl_outl(0, LED_ADDR);
+-	else
+-		ctrl_outl(1, LED_ADDR);
+-
 -}
 -
-diff --git a/arch/sh64/kernel/head.S b/arch/sh64/kernel/head.S
+-void mach_alphanum(int position, unsigned char value)
+-{
+-	if (!epld_virt)
+-		return;
+-
+-	ctrl_outb(value, HDSP2534_ADDR + 0xe0 + (position << 2));
+-}
+-
+-void mach_alphanum_brightness(int setting)
+-{
+-	ctrl_outb(setting & 7, HDSP2534_ADDR + 0xc0);
+-}
+diff --git a/arch/sh64/mach-cayman/setup.c b/arch/sh64/mach-cayman/setup.c
 deleted file mode 100644
-index 186406d..0000000
---- a/arch/sh64/kernel/head.S
+index 726c520..0000000
+--- a/arch/sh64/mach-cayman/setup.c
 +++ /dev/null
-@@ -1,372 +0,0 @@
+@@ -1,239 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/head.S
+- * arch/sh64/mach-cayman/setup.c
 - *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003, 2004  Paul Mundt
+- * SH5 Cayman support
 - *
+- * This file handles the architecture-dependent parts of initialization
 - *
-- * benedict.gaster at superh.com:	 2nd May 2002
-- *    Moved definition of empty_zero_page to its own section allowing
-- *    it to be placed at an absolute address known at load time.
+- * Copyright David J. Mckay.
+- * Needs major work!
 - *
-- * lethal at linux-sh.org:          9th May 2003
-- *    Kill off GLOBAL_NAME() usage.
+- * benedict.gaster at superh.com:	 3rd May 2002
+- *    Added support for ramdisk, removing statically linked romfs at the same time.
 - *
-- * lethal at linux-sh.org:          8th May 2004
-- *    Add early SCIF console DTLB mapping.
+- * lethal at linux-sh.org:          15th May 2003
+- *    Use the generic procfs cpuinfo interface, just return a valid board name.
 - */
--
--
--#include <asm/page.h>
--#include <asm/mmu_context.h>
--#include <asm/cache.h>
--#include <asm/tlb.h>
--#include <asm/processor.h>
--#include <asm/registers.h>
--#include <asm/thread_info.h>
+-#include <linux/init.h>
+-#include <linux/kernel.h>
+-#include <asm/platform.h>
+-#include <asm/irq.h>
+-#include <asm/io.h>
 -
 -/*
-- * MMU defines: TLB boundaries.
+- * Platform Dependent Interrupt Priorities.
 - */
 -
--#define MMUIR_FIRST	ITLB_FIXED
--#define MMUIR_END	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
--#define MMUIR_STEP	TLB_STEP
--
--#define MMUDR_FIRST	DTLB_FIXED
--#define MMUDR_END	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
--#define MMUDR_STEP	TLB_STEP
+-/* Using defaults defined in irq.h */
+-#define	RES NO_PRIORITY		/* Disabled */
+-#define IR0 IRL0_PRIORITY	/* IRLs */
+-#define IR1 IRL1_PRIORITY
+-#define IR2 IRL2_PRIORITY
+-#define IR3 IRL3_PRIORITY
+-#define PCA INTA_PRIORITY	/* PCI Ints */
+-#define PCB INTB_PRIORITY
+-#define PCC INTC_PRIORITY
+-#define PCD INTD_PRIORITY
+-#define SER TOP_PRIORITY
+-#define ERR TOP_PRIORITY
+-#define PW0 TOP_PRIORITY
+-#define PW1 TOP_PRIORITY
+-#define PW2 TOP_PRIORITY
+-#define PW3 TOP_PRIORITY
+-#define DM0 NO_PRIORITY		/* DMA Ints */
+-#define DM1 NO_PRIORITY
+-#define DM2 NO_PRIORITY
+-#define DM3 NO_PRIORITY
+-#define DAE NO_PRIORITY
+-#define TU0 TIMER_PRIORITY	/* TMU Ints */
+-#define TU1 NO_PRIORITY
+-#define TU2 NO_PRIORITY
+-#define TI2 NO_PRIORITY
+-#define ATI NO_PRIORITY		/* RTC Ints */
+-#define PRI NO_PRIORITY
+-#define CUI RTC_PRIORITY
+-#define ERI SCIF_PRIORITY	/* SCIF Ints */
+-#define RXI SCIF_PRIORITY
+-#define BRI SCIF_PRIORITY
+-#define TXI SCIF_PRIORITY
+-#define ITI TOP_PRIORITY	/* WDT Ints */
 -
--/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */
--#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1))
--#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb"
--#endif
+-/* Setup for the SMSC FDC37C935 */
+-#define SMSC_SUPERIO_BASE	0x04000000
+-#define SMSC_CONFIG_PORT_ADDR	0x3f0
+-#define SMSC_INDEX_PORT_ADDR	SMSC_CONFIG_PORT_ADDR
+-#define SMSC_DATA_PORT_ADDR	0x3f1
 -
--/*
-- * MMU defines: Fixed TLBs.
-- */
--/* Deal safely with the case where the base of RAM is not 512Mb aligned */
+-#define SMSC_ENTER_CONFIG_KEY	0x55
+-#define SMSC_EXIT_CONFIG_KEY	0xaa
 -
--#define ALIGN_512M_MASK (0xffffffffe0000000)
--#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
--#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
+-#define SMCS_LOGICAL_DEV_INDEX	0x07
+-#define SMSC_DEVICE_ID_INDEX	0x20
+-#define SMSC_DEVICE_REV_INDEX	0x21
+-#define SMSC_ACTIVATE_INDEX	0x30
+-#define SMSC_PRIMARY_BASE_INDEX  0x60
+-#define SMSC_SECONDARY_BASE_INDEX 0x62
+-#define SMSC_PRIMARY_INT_INDEX	0x70
+-#define SMSC_SECONDARY_INT_INDEX 0x72
 -
--#define MMUIR_TEXT_H	(0x0000000000000003 | ALIGNED_EFFECTIVE)
--			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+-#define SMSC_IDE1_DEVICE	1
+-#define SMSC_KEYBOARD_DEVICE	7
+-#define SMSC_CONFIG_REGISTERS	8
 -
--#define MMUIR_TEXT_L	(0x000000000000009a | ALIGNED_PHYSICAL)
--			/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
+-#define SMSC_SUPERIO_READ_INDEXED(index) ({ \
+-	outb((index), SMSC_INDEX_PORT_ADDR); \
+-	inb(SMSC_DATA_PORT_ADDR); })
+-#define SMSC_SUPERIO_WRITE_INDEXED(val, index) ({ \
+-	outb((index), SMSC_INDEX_PORT_ADDR); \
+-	outb((val),   SMSC_DATA_PORT_ADDR); })
 -
--#define MMUDR_CACHED_H	0x0000000000000003 | ALIGNED_EFFECTIVE
--			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
--#define MMUDR_CACHED_L	0x000000000000015a | ALIGNED_PHYSICAL
--			/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
+-#define IDE1_PRIMARY_BASE	0x01f0
+-#define IDE1_SECONDARY_BASE	0x03f6
 -
--#ifdef CONFIG_ICACHE_DISABLED
--#define	ICCR0_INIT_VAL	ICCR0_OFF			/* ICACHE off */
--#else
--#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
--#endif
--#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
+-unsigned long smsc_superio_virt;
 -
--#if defined (CONFIG_DCACHE_DISABLED)
--#define	OCCR0_INIT_VAL	OCCR0_OFF			   /* D-cache: off  */
--#elif defined (CONFIG_DCACHE_WRITE_THROUGH)
--#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WT	   /* D-cache: on,   */
--							   /* WT, invalidate */
--#elif defined (CONFIG_DCACHE_WRITE_BACK)
--#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	   /* D-cache: on,   */
--							   /* WB, invalidate */
--#else
--#error preprocessor flag CONFIG_DCACHE_... not recognized!
--#endif
+-/*
+- * Platform dependent structures: maps and parms block.
+- */
+-struct resource io_resources[] = {
+-	/* To be updated with external devices */
+-};
 -
--#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			   /* No locking     */
+-struct resource kram_resources[] = {
+-	/* These must be last in the array */
+-	{ .name = "Kernel code", .start = 0, .end = 0 },
+-	/* These must be last in the array */
+-	{ .name = "Kernel data", .start = 0, .end = 0 }
+-};
 -
--	.section	.empty_zero_page, "aw"
--	.global empty_zero_page
+-struct resource xram_resources[] = {
+-	/* To be updated with external devices */
+-};
 -
--empty_zero_page:
--	.long	1		/* MOUNT_ROOT_RDONLY */
--	.long	0		/* RAMDISK_FLAGS */
--	.long	0x0200		/* ORIG_ROOT_DEV */
--	.long	1		/* LOADER_TYPE */
--	.long	0x00800000	/* INITRD_START */
--	.long	0x00800000	/* INITRD_SIZE */
--	.long	0
+-struct resource rom_resources[] = {
+-	/* To be updated with external devices */
+-};
 -
--	.text
--	.balign 4096,0,4096
+-struct sh64_platform platform_parms = {
+-	.readonly_rootfs =	1,
+-	.initial_root_dev =	0x0100,
+-	.loader_type =		1,
+-	.io_res_p =		io_resources,
+-	.io_res_count =		ARRAY_SIZE(io_resources),
+-	.kram_res_p =		kram_resources,
+-	.kram_res_count =	ARRAY_SIZE(kram_resources),
+-	.xram_res_p =		xram_resources,
+-	.xram_res_count =	ARRAY_SIZE(xram_resources),
+-	.rom_res_p =		rom_resources,
+-	.rom_res_count =	ARRAY_SIZE(rom_resources),
+-};
 -
--	.section	.data, "aw"
--	.balign	PAGE_SIZE
+-int platform_int_priority[NR_INTC_IRQS] = {
+-	IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD,	/* IRQ  0- 7 */
+-	RES, RES, RES, RES, SER, ERR, PW3, PW2,	/* IRQ  8-15 */
+-	PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES,	/* IRQ 16-23 */
+-	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 24-31 */
+-	TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI,	/* IRQ 32-39 */
+-	RXI, BRI, TXI, RES, RES, RES, RES, RES,	/* IRQ 40-47 */
+-	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 48-55 */
+-	RES, RES, RES, RES, RES, RES, RES, ITI,	/* IRQ 56-63 */
+-};
 -
--	.section	.data, "aw"
--	.balign	PAGE_SIZE
+-static int __init smsc_superio_setup(void)
+-{
+-	unsigned char devid, devrev;
 -
--	.global swapper_pg_dir
--swapper_pg_dir:
--	.space PAGE_SIZE, 0
+-	smsc_superio_virt = onchip_remap(SMSC_SUPERIO_BASE, 1024, "SMSC SuperIO");
+-	if (!smsc_superio_virt) {
+-		panic("Unable to remap SMSC SuperIO\n");
+-	}
 -
--	.global empty_bad_page
--empty_bad_page:
--	.space PAGE_SIZE, 0
+-	/* Initially the chip is in run state */
+-	/* Put it into configuration state */
+-	outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
+-	outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
 -
--	.global empty_bad_pte_table
--empty_bad_pte_table:
--	.space PAGE_SIZE, 0
+-	/* Read device ID info */
+-	devid = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_ID_INDEX);
+-	devrev = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_REV_INDEX);
+-	printk("SMSC SuperIO devid %02x rev %02x\n", devid, devrev);
 -
--	.global	fpu_in_use
--fpu_in_use:	.quad	0
+-	/* Select the keyboard device */
+-	SMSC_SUPERIO_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX);
 -
+-	/* enable it */
+-	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
 -
--	.section	.text.head, "ax"
--	.balign L1_CACHE_BYTES
--/*
-- * Condition at the entry of __stext:
-- * . Reset state:
-- *   . SR.FD    = 1		(FPU disabled)
-- *   . SR.BL    = 1		(Exceptions disabled)
-- *   . SR.MD    = 1		(Privileged Mode)
-- *   . SR.MMU   = 0		(MMU Disabled)
-- *   . SR.CD    = 0		(CTC User Visible)
-- *   . SR.IMASK = Undefined	(Interrupt Mask)
-- *
-- * Operations supposed to be performed by __stext:
-- * . prevent speculative fetch onto device memory while MMU is off
-- * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
-- * . first, save CPU state and set it to something harmless
-- * . any CPU detection and/or endianness settings (?)
-- * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
-- * . set initial TLB entries for cached and uncached regions
-- *   (no fine granularity paging)
-- * . set initial cache state
-- * . enable MMU and caches
-- * . set CPU to a consistent state
-- *   . registers (including stack pointer and current/KCR0)
-- *   . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
-- *     at this stage. This is all to later Linux initialization steps.
-- *   . initialize FPU
-- * . clear BSS
-- * . jump into start_kernel()
-- * . be prepared to hopeless start_kernel() returns.
-- *
-- */
--	.global _stext
--_stext:
--	/*
--	 * Prevent speculative fetch on device memory due to
--	 * uninitialized target registers.
--	 */
--	ptabs/u	ZERO, tr0
--	ptabs/u	ZERO, tr1
--	ptabs/u	ZERO, tr2
--	ptabs/u	ZERO, tr3
--	ptabs/u	ZERO, tr4
--	ptabs/u	ZERO, tr5
--	ptabs/u	ZERO, tr6
--	ptabs/u	ZERO, tr7
--	synci
+-	/* Select the interrupts */
+-	/* On a PC keyboard is IRQ1, mouse is IRQ12 */
+-	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
+-	SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
 -
+-#ifdef CONFIG_IDE
 -	/*
--	 * Read/Set CPU state. After this block:
--	 * r29 = Initial SR
+-	 * Only IDE1 exists on the Cayman
 -	 */
--	getcon	SR, r29
--	movi	SR_HARMLESS, r20
--	putcon	r20, SR
 -
--	/*
--	 * Initialize EMI/LMI. To Be Done.
--	 */
+-	/* Power it on */
+-	SMSC_SUPERIO_WRITE_INDEXED(1 << SMSC_IDE1_DEVICE, 0x22);
 -
--	/*
--	 * CPU detection and/or endianness settings (?). To Be Done.
--	 * Pure PIC code here, please ! Just save state into r30.
--         * After this block:
--	 * r30 = CPU type/Platform Endianness
--	 */
+-	SMSC_SUPERIO_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX);
+-	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
 -
--	/*
--	 * Set initial TLB entries for cached and uncached regions.
--	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
--	 */
--	/* Clear ITLBs */
--	pta	clear_ITLB, tr1
--	movi	MMUIR_FIRST, r21
--	movi	MMUIR_END, r22
--clear_ITLB:
--	putcfg	r21, 0, ZERO		/* Clear MMUIR[n].PTEH.V */
--	addi	r21, MMUIR_STEP, r21
--        bne	r21, r22, tr1
+-	SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE >> 8,
+-				   SMSC_PRIMARY_BASE_INDEX + 0);
+-	SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE & 0xff,
+-				   SMSC_PRIMARY_BASE_INDEX + 1);
 -
--	/* Clear DTLBs */
--	pta	clear_DTLB, tr1
--	movi	MMUDR_FIRST, r21
--	movi	MMUDR_END, r22
--clear_DTLB:
--	putcfg	r21, 0, ZERO		/* Clear MMUDR[n].PTEH.V */
--	addi	r21, MMUDR_STEP, r21
--        bne	r21, r22, tr1
+-	SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE >> 8,
+-				   SMSC_SECONDARY_BASE_INDEX + 0);
+-	SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE & 0xff,
+-				   SMSC_SECONDARY_BASE_INDEX + 1);
 -
--	/* Map one big (512Mb) page for ITLB */
--	movi	MMUIR_FIRST, r21
--	movi	MMUIR_TEXT_L, r22	/* PTEL first */
--	add.l	r22, r63, r22		/* Sign extend */
--	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
--	movi	MMUIR_TEXT_H, r22	/* PTEH last */
--	add.l	r22, r63, r22		/* Sign extend */
--	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
+-	SMSC_SUPERIO_WRITE_INDEXED(14, SMSC_PRIMARY_INT_INDEX);
 -
--	/* Map one big CACHED (512Mb) page for DTLB */
--	movi	MMUDR_FIRST, r21
--	movi	MMUDR_CACHED_L, r22	/* PTEL first */
--	add.l	r22, r63, r22		/* Sign extend */
--	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
--	movi	MMUDR_CACHED_H, r22	/* PTEH last */
--	add.l	r22, r63, r22		/* Sign extend */
--	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
+-	SMSC_SUPERIO_WRITE_INDEXED(SMSC_CONFIG_REGISTERS,
+-				   SMCS_LOGICAL_DEV_INDEX);
 -
--#ifdef CONFIG_EARLY_PRINTK
--	/*
--	 * Setup a DTLB translation for SCIF phys.
--	 */
--	addi    r21, MMUDR_STEP, r21
--	movi    0x0a03, r22	/* SCIF phys */
--	shori   0x0148, r22
--	putcfg  r21, 1, r22	/* PTEL first */
--	movi    0xfa03, r22	/* 0xfa030000, fixed SCIF virt */
--	shori   0x0003, r22
--	putcfg  r21, 0, r22	/* PTEH last */
+-	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */
+-	SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
+-	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
+-	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
 -#endif
 -
--	/*
--	 * Set cache behaviours.
--	 */
--	/* ICache */
--	movi	ICCR_BASE, r21
--	movi	ICCR0_INIT_VAL, r22
--	movi	ICCR1_INIT_VAL, r23
--	putcfg	r21, ICCR_REG0, r22
--	putcfg	r21, ICCR_REG1, r23
--
--	/* OCache */
--	movi	OCCR_BASE, r21
--	movi	OCCR0_INIT_VAL, r22
--	movi	OCCR1_INIT_VAL, r23
--	putcfg	r21, OCCR_REG0, r22
--	putcfg	r21, OCCR_REG1, r23
--
--
--	/*
--	 * Enable Caches and MMU. Do the first non-PIC jump.
--         * Now head.S global variables, constants and externs
--	 * can be used.
--	 */
--	getcon	SR, r21
--	movi	SR_ENABLE_MMU, r22
--	or	r21, r22, r21
--	putcon	r21, SSR
--	movi	hyperspace, r22
--	ori	r22, 1, r22	    /* Make it SHmedia, not required but..*/
--	putcon	r22, SPC
--	synco
--	rte			    /* And now go into the hyperspace ... */
--hyperspace:			    /* ... that's the next instruction !  */
--
--	/*
--	 * Set CPU to a consistent state.
--	 * r31 = FPU support flag
--	 * tr0/tr7 in use. Others give a chance to loop somewhere safe
--	 */
--	movi	start_kernel, r32
--	ori	r32, 1, r32
--
--	ptabs	r32, tr0		    /* r32 = _start_kernel address        */
--	pta/u	hopeless, tr1
--	pta/u	hopeless, tr2
--	pta/u	hopeless, tr3
--	pta/u	hopeless, tr4
--	pta/u	hopeless, tr5
--	pta/u	hopeless, tr6
--	pta/u	hopeless, tr7
--	gettr	tr1, r28			/* r28 = hopeless address */
--
--	/* Set initial stack pointer */
--	movi	init_thread_union, SP
--	putcon	SP, KCR0		/* Set current to init_task */
--	movi	THREAD_SIZE, r22	/* Point to the end */
--	add	SP, r22, SP
--
--	/*
--	 * Initialize FPU.
--	 * Keep FPU flag in r31. After this block:
--	 * r31 = FPU flag
--	 */
--	movi fpu_in_use, r31	/* Temporary */
+-	/* Exit the configuration state */
+-	outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
 -
--#ifdef CONFIG_SH_FPU
--	getcon	SR, r21
--	movi	SR_ENABLE_FPU, r22
--	and	r21, r22, r22
--	putcon	r22, SR			/* Try to enable */
--	getcon	SR, r22
--	xor	r21, r22, r21
--	shlri	r21, 15, r21		/* Supposedly 0/1 */
--	st.q	r31, 0 , r21		/* Set fpu_in_use */
--#else
--	movi	0, r21
--	st.q	r31, 0 , r21		/* Set fpu_in_use */
--#endif
--	or	r21, ZERO, r31		/* Set FPU flag at last */
+-	return 0;
+-}
 -
--#ifndef CONFIG_SH_NO_BSS_INIT
--/* Don't clear BSS if running on slow platforms such as an RTL simulation,
--   remote memory via SHdebug link, etc.  For these the memory can be guaranteed
--   to be all zero on boot anyway. */
--	/*
--	 * Clear bss
--	 */
--	pta	clear_quad, tr1
--	movi	__bss_start, r22
--	movi	_end, r23
--clear_quad:
--	st.q	r22, 0, ZERO
--	addi	r22, 8, r22
--	bne	r22, r23, tr1		/* Both quad aligned, see vmlinux.lds.S */
--#endif
--	pta/u	hopeless, tr1
+-/* This is grotty, but, because kernel is always referenced on the link line
+- * before any devices, this is safe.
+- */
+-__initcall(smsc_superio_setup);
 -
--	/* Say bye to head.S but be prepared to wrongly get back ... */
--	blink	tr0, LINK
+-void __init platform_setup(void)
+-{
+-	/* Cayman platform leaves the decision to head.S, for now */
+-	platform_parms.fpu_flags = fpu_in_use;
+-}
 -
--	/* If we ever get back here through LINK/tr1-tr7 */
--	pta/u	hopeless, tr7
+-void __init platform_monitor(void)
+-{
+-	/* Nothing yet .. */
+-}
 -
--hopeless:
--	/*
--	 * Something's badly wrong here. Loop endlessly,
--         * there's nothing more we can do about it.
--	 *
--	 * Note on hopeless: it can be jumped into invariably
--	 * before or after jumping into hyperspace. The only
--	 * requirement is to be PIC called (PTA) before and
--	 * any way (PTA/PTABS) after. According to Virtual
--	 * to Physical mapping a simulator/emulator can easily
--	 * tell where we came here from just looking at hopeless
--	 * (PC) address.
--	 *
--	 * For debugging purposes:
--	 * (r28) hopeless/loop address
--	 * (r29) Original SR
--	 * (r30) CPU type/Platform endianness
--	 * (r31) FPU Support
--	 * (r32) _start_kernel address
--	 */
--	blink	tr7, ZERO
+-void __init platform_reserve(void)
+-{
+-	/* Nothing yet .. */
+-}
 -
+-const char *get_system_type(void)
+-{
+-	return "Hitachi Cayman";
+-}
 -
-diff --git a/arch/sh64/kernel/init_task.c b/arch/sh64/kernel/init_task.c
+diff --git a/arch/sh64/mach-harp/Makefile b/arch/sh64/mach-harp/Makefile
 deleted file mode 100644
-index deee8bf..0000000
---- a/arch/sh64/kernel/init_task.c
+index 2f2963f..0000000
+--- a/arch/sh64/mach-harp/Makefile
 +++ /dev/null
-@@ -1,46 +0,0 @@
+@@ -1 +0,0 @@
+-obj-y := setup.o
+diff --git a/arch/sh64/mach-harp/setup.c b/arch/sh64/mach-harp/setup.c
+deleted file mode 100644
+index 05011cb..0000000
+--- a/arch/sh64/mach-harp/setup.c
++++ /dev/null
+@@ -1,129 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/init_task.c
+- * arch/sh64/mach-harp/setup.c
+- *
+- * SH-5 Simulator Platform Support
+- *
+- * This file handles the architecture-dependent parts of initialization
 - *
 - * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003  Paul Mundt
 - *
+- * benedict.gaster at superh.com:	 3rd May 2002
+- *    Added support for ramdisk, removing statically linked romfs at the same time. *
+- *
+- * lethal at linux-sh.org:          15th May 2003
+- *    Use the generic procfs cpuinfo interface, just return a valid board name.
 - */
--#include <linux/rwsem.h>
--#include <linux/mm.h>
--#include <linux/sched.h>
--#include <linux/init_task.h>
--#include <linux/mqueue.h>
--#include <linux/fs.h>
--#include <asm/uaccess.h>
--#include <asm/pgtable.h>
--
--static struct fs_struct init_fs = INIT_FS;
--static struct files_struct init_files = INIT_FILES;
--static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
--static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
--struct mm_struct init_mm = INIT_MM(init_mm);
--
--struct pt_regs fake_swapper_regs;
+-#include <linux/init.h>
+-#include <linux/kernel.h>
+-#include <asm/platform.h>
+-#include <asm/irq.h>
 -
 -/*
-- * Initial thread structure.
-- *
-- * We need to make sure that this is THREAD_SIZE-byte aligned due
-- * to the way process stacks are handled. This is done by having a
-- * special "init_task" linker map entry..
+- * Platform Dependent Interrupt Priorities.
 - */
--union thread_union init_thread_union
--	__attribute__((__section__(".data.init_task"))) =
--		{ INIT_THREAD_INFO(init_task) };
+-
+-/* Using defaults defined in irq.h */
+-#define	RES NO_PRIORITY		/* Disabled */
+-#define IR0 IRL0_PRIORITY	/* IRLs */
+-#define IR1 IRL1_PRIORITY
+-#define IR2 IRL2_PRIORITY
+-#define IR3 IRL3_PRIORITY
+-#define PCA INTA_PRIORITY	/* PCI Ints */
+-#define PCB INTB_PRIORITY
+-#define PCC INTC_PRIORITY
+-#define PCD INTD_PRIORITY
+-#define SER TOP_PRIORITY
+-#define ERR TOP_PRIORITY
+-#define PW0 TOP_PRIORITY
+-#define PW1 TOP_PRIORITY
+-#define PW2 TOP_PRIORITY
+-#define PW3 TOP_PRIORITY
+-#define DM0 NO_PRIORITY		/* DMA Ints */
+-#define DM1 NO_PRIORITY
+-#define DM2 NO_PRIORITY
+-#define DM3 NO_PRIORITY
+-#define DAE NO_PRIORITY
+-#define TU0 TIMER_PRIORITY	/* TMU Ints */
+-#define TU1 NO_PRIORITY
+-#define TU2 NO_PRIORITY
+-#define TI2 NO_PRIORITY
+-#define ATI NO_PRIORITY		/* RTC Ints */
+-#define PRI NO_PRIORITY
+-#define CUI RTC_PRIORITY
+-#define ERI SCIF_PRIORITY	/* SCIF Ints */
+-#define RXI SCIF_PRIORITY
+-#define BRI SCIF_PRIORITY
+-#define TXI SCIF_PRIORITY
+-#define ITI TOP_PRIORITY	/* WDT Ints */
 -
 -/*
-- * Initial task structure.
-- *
-- * All other task structs will be allocated on slabs in fork.c
+- * Platform dependent structures: maps and parms block.
 - */
--struct task_struct init_task = INIT_TASK(init_task);
+-struct resource io_resources[] = {
+-	/* To be updated with external devices */
+-};
 -
-diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c
+-struct resource kram_resources[] = {
+-	/* These must be last in the array */
+-	{ .name = "Kernel code", .start = 0, .end = 0 },
+-	/* These must be last in the array */
+-	{ .name = "Kernel data", .start = 0, .end = 0 }
+-};
+-
+-struct resource xram_resources[] = {
+-	/* To be updated with external devices */
+-};
+-
+-struct resource rom_resources[] = {
+-	/* To be updated with external devices */
+-};
+-
+-struct sh64_platform platform_parms = {
+-	.readonly_rootfs =	1,
+-	.initial_root_dev =	0x0100,
+-	.loader_type =		1,
+-	.io_res_p =		io_resources,
+-	.io_res_count =		ARRAY_SIZE(io_resources),
+-	.kram_res_p =		kram_resources,
+-	.kram_res_count =	ARRAY_SIZE(kram_resources),
+-	.xram_res_p =		xram_resources,
+-	.xram_res_count =	ARRAY_SIZE(xram_resources),
+-	.rom_res_p =		rom_resources,
+-	.rom_res_count =	ARRAY_SIZE(rom_resources),
+-};
+-
+-int platform_int_priority[NR_INTC_IRQS] = {
+-	IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD,	/* IRQ  0- 7 */
+-	RES, RES, RES, RES, SER, ERR, PW3, PW2,	/* IRQ  8-15 */
+-	PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES,	/* IRQ 16-23 */
+-	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 24-31 */
+-	TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI,	/* IRQ 32-39 */
+-	RXI, BRI, TXI, RES, RES, RES, RES, RES,	/* IRQ 40-47 */
+-	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 48-55 */
+-	RES, RES, RES, RES, RES, RES, RES, ITI,	/* IRQ 56-63 */
+-};
+-
+-void __init platform_setup(void)
+-{
+-	/* Harp platform leaves the decision to head.S, for now */
+-	platform_parms.fpu_flags = fpu_in_use;
+-}
+-
+-void __init platform_monitor(void)
+-{
+-	/* Nothing yet .. */
+-}
+-
+-void __init platform_reserve(void)
+-{
+-	/* Nothing yet .. */
+-}
+-
+-const char *get_system_type(void)
+-{
+-	return "ST50 Harp";
+-}
+diff --git a/arch/sh64/mach-sim/Makefile b/arch/sh64/mach-sim/Makefile
 deleted file mode 100644
-index 9412b71..0000000
---- a/arch/sh64/kernel/irq.c
+index 2f2963f..0000000
+--- a/arch/sh64/mach-sim/Makefile
 +++ /dev/null
-@@ -1,115 +0,0 @@
+@@ -1 +0,0 @@
+-obj-y := setup.o
+diff --git a/arch/sh64/mach-sim/setup.c b/arch/sh64/mach-sim/setup.c
+deleted file mode 100644
+index e3386ec..0000000
+--- a/arch/sh64/mach-sim/setup.c
++++ /dev/null
+@@ -1,126 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/irq.c
+- * arch/sh64/mach-sim/setup.c
+- *
+- * ST50 Simulator Platform Support
+- *
+- * This file handles the architecture-dependent parts of initialization
 - *
 - * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003  Paul Mundt
 - *
+- * lethal at linux-sh.org:          15th May 2003
+- *    Use the generic procfs cpuinfo interface, just return a valid board name.
 - */
+-#include <linux/init.h>
+-#include <linux/kernel.h>
+-#include <asm/platform.h>
+-#include <asm/irq.h>
 -
 -/*
-- * IRQs are in fact implemented a bit like signal handlers for the kernel.
-- * Naturally it's not a 1:1 relation, but there are similarities.
+- * Platform Dependent Interrupt Priorities.
 - */
 -
--#include <linux/errno.h>
--#include <linux/kernel_stat.h>
--#include <linux/signal.h>
--#include <linux/rwsem.h>
--#include <linux/sched.h>
--#include <linux/ioport.h>
--#include <linux/interrupt.h>
--#include <linux/timex.h>
--#include <linux/slab.h>
--#include <linux/random.h>
--#include <linux/smp.h>
--#include <linux/init.h>
--#include <linux/seq_file.h>
--#include <linux/bitops.h>
--#include <asm/system.h>
--#include <asm/io.h>
--#include <asm/smp.h>
--#include <asm/pgalloc.h>
--#include <asm/delay.h>
--#include <asm/irq.h>
--#include <linux/irq.h>
+-/* Using defaults defined in irq.h */
+-#define	RES NO_PRIORITY		/* Disabled */
+-#define IR0 IRL0_PRIORITY	/* IRLs */
+-#define IR1 IRL1_PRIORITY
+-#define IR2 IRL2_PRIORITY
+-#define IR3 IRL3_PRIORITY
+-#define PCA INTA_PRIORITY	/* PCI Ints */
+-#define PCB INTB_PRIORITY
+-#define PCC INTC_PRIORITY
+-#define PCD INTD_PRIORITY
+-#define SER TOP_PRIORITY
+-#define ERR TOP_PRIORITY
+-#define PW0 TOP_PRIORITY
+-#define PW1 TOP_PRIORITY
+-#define PW2 TOP_PRIORITY
+-#define PW3 TOP_PRIORITY
+-#define DM0 NO_PRIORITY		/* DMA Ints */
+-#define DM1 NO_PRIORITY
+-#define DM2 NO_PRIORITY
+-#define DM3 NO_PRIORITY
+-#define DAE NO_PRIORITY
+-#define TU0 TIMER_PRIORITY	/* TMU Ints */
+-#define TU1 NO_PRIORITY
+-#define TU2 NO_PRIORITY
+-#define TI2 NO_PRIORITY
+-#define ATI NO_PRIORITY		/* RTC Ints */
+-#define PRI NO_PRIORITY
+-#define CUI RTC_PRIORITY
+-#define ERI SCIF_PRIORITY	/* SCIF Ints */
+-#define RXI SCIF_PRIORITY
+-#define BRI SCIF_PRIORITY
+-#define TXI SCIF_PRIORITY
+-#define ITI TOP_PRIORITY	/* WDT Ints */
 -
--void ack_bad_irq(unsigned int irq)
--{
--	printk("unexpected IRQ trap at irq %02x\n", irq);
--}
+-/*
+- * Platform dependent structures: maps and parms block.
+- */
+-struct resource io_resources[] = {
+-	/* Nothing yet .. */
+-};
 -
--#if defined(CONFIG_PROC_FS)
--int show_interrupts(struct seq_file *p, void *v)
--{
--	int i = *(loff_t *) v, j;
--	struct irqaction * action;
--	unsigned long flags;
+-struct resource kram_resources[] = {
+-	/* These must be last in the array */
+-	{ .name = "Kernel code", .start = 0, .end = 0 },
+-	/* These must be last in the array */
+-	{ .name = "Kernel data", .start = 0, .end = 0 }
+-};
 -
--	if (i == 0) {
--		seq_puts(p, "           ");
--		for_each_online_cpu(j)
--			seq_printf(p, "CPU%d       ",j);
--		seq_putc(p, '\n');
--	}
+-struct resource xram_resources[] = {
+-	/* Nothing yet .. */
+-};
 -
--	if (i < NR_IRQS) {
--		spin_lock_irqsave(&irq_desc[i].lock, flags);
--		action = irq_desc[i].action;
--		if (!action)
--			goto unlock;
--		seq_printf(p, "%3d: ",i);
--		seq_printf(p, "%10u ", kstat_irqs(i));
--		seq_printf(p, " %14s", irq_desc[i].chip->typename);
--		seq_printf(p, "  %s", action->name);
+-struct resource rom_resources[] = {
+-	/* Nothing yet .. */
+-};
 -
--		for (action=action->next; action; action = action->next)
--			seq_printf(p, ", %s", action->name);
--		seq_putc(p, '\n');
--unlock:
--		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
--	}
--	return 0;
--}
--#endif
+-struct sh64_platform platform_parms = {
+-	.readonly_rootfs =	1,
+-	.initial_root_dev =	0x0100,
+-	.loader_type =		1,
+-	.io_res_p =		io_resources,
+-	.io_res_count =		ARRAY_SIZE(io_resources),
+-	.kram_res_p =		kram_resources,
+-	.kram_res_count =	ARRAY_SIZE(kram_resources),
+-	.xram_res_p =		xram_resources,
+-	.xram_res_count =	ARRAY_SIZE(xram_resources),
+-	.rom_res_p =		rom_resources,
+-	.rom_res_count =	ARRAY_SIZE(rom_resources),
+-};
 -
--/*
-- * do_NMI handles all Non-Maskable Interrupts.
-- */
--asmlinkage void do_NMI(unsigned long vector_num, struct pt_regs * regs)
--{
--	if (regs->sr & 0x40000000)
--		printk("unexpected NMI trap in system mode\n");
--	else
--		printk("unexpected NMI trap in user mode\n");
+-int platform_int_priority[NR_IRQS] = {
+-	IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD,	/* IRQ  0- 7 */
+-	RES, RES, RES, RES, SER, ERR, PW3, PW2,	/* IRQ  8-15 */
+-	PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES,	/* IRQ 16-23 */
+-	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 24-31 */
+-	TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI,	/* IRQ 32-39 */
+-	RXI, BRI, TXI, RES, RES, RES, RES, RES,	/* IRQ 40-47 */
+-	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 48-55 */
+-	RES, RES, RES, RES, RES, RES, RES, ITI,	/* IRQ 56-63 */
+-};
 -
--	/* No statistics */
+-void __init platform_setup(void)
+-{
+-	/* Simulator platform leaves the decision to head.S */
+-	platform_parms.fpu_flags = fpu_in_use;
 -}
 -
--/*
-- * do_IRQ handles all normal device IRQ's.
-- */
--asmlinkage int do_IRQ(unsigned long vector_num, struct pt_regs * regs)
+-void __init platform_monitor(void)
 -{
--	struct pt_regs *old_regs = set_irq_regs(regs);
--	int irq;
+-	/* Nothing yet .. */
+-}
 -
--	irq_enter();
+-void __init platform_reserve(void)
+-{
+-	/* Nothing yet .. */
+-}
 -
--	irq = irq_demux(vector_num);
+-const char *get_system_type(void)
+-{
+-	return "SH-5 Simulator";
+-}
+diff --git a/arch/sh64/mm/Makefile b/arch/sh64/mm/Makefile
+deleted file mode 100644
+index d0e8136..0000000
+--- a/arch/sh64/mm/Makefile
++++ /dev/null
+@@ -1,44 +0,0 @@
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 2000, 2001  Paolo Alberelli
+-# Copyright (C) 2003, 2004  Paul Mundt
+-#
+-# Makefile for the sh64-specific parts of the Linux memory manager.
+-#
+-# Note! Dependencies are done automagically by 'make dep', which also
+-# removes any old dependencies. DON'T put your own dependencies here
+-# unless it's something special (ie not a .c file).
+-#
 -
--	if (irq >= 0) {
--		__do_IRQ(irq);
--	} else {
--		printk("unexpected IRQ trap at vector %03lx\n", vector_num);
--	}
+-obj-y := cache.o consistent.o extable.o fault.o init.o ioremap.o \
+-	 tlbmiss.o tlb.o
 -
--	irq_exit();
+-obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 -
--	set_irq_regs(old_regs);
--	return 1;
--}
+-# Special flags for tlbmiss.o.  This puts restrictions on the number of
+-# caller-save registers that the compiler can target when building this file.
+-# This is required because the code is called from a context in entry.S where
+-# very few registers have been saved in the exception handler (for speed
+-# reasons).
+-# The caller save registers that have been saved and which can be used are
+-# r2,r3,r4,r5 : argument passing
+-# r15, r18 : SP and LINK
+-# tr0-4 : allow all caller-save TR's.  The compiler seems to be able to make
+-#         use of them, so it's probably beneficial to performance to save them
+-#         and have them available for it.
+-#
+-# The resources not listed below are callee save, i.e. the compiler is free to
+-# use any of them and will spill them to the stack itself.
 -
-diff --git a/arch/sh64/kernel/irq_intc.c b/arch/sh64/kernel/irq_intc.c
+-CFLAGS_tlbmiss.o += -ffixed-r7 \
+-	-ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
+-	-ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
+-	-ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
+-	-ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
+-	-ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
+-	-ffixed-r41 -ffixed-r42 -ffixed-r43  \
+-	-ffixed-r60 -ffixed-r61 -ffixed-r62 \
+-	-fomit-frame-pointer
+diff --git a/arch/sh64/mm/cache.c b/arch/sh64/mm/cache.c
 deleted file mode 100644
-index 3b63a93..0000000
---- a/arch/sh64/kernel/irq_intc.c
+index 421487c..0000000
+--- a/arch/sh64/mm/cache.c
 +++ /dev/null
-@@ -1,272 +0,0 @@
+@@ -1,1032 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/irq_intc.c
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003  Paul Mundt
-- *
-- * Interrupt Controller support for SH5 INTC.
-- * Per-interrupt selective. IRLM=0 (Fixed priority) is not
-- * supported being useless without a cascaded interrupt
-- * controller.
+- * arch/sh64/mm/cache.c
 - *
+- * Original version Copyright (C) 2000, 2001  Paolo Alberelli
+- * Second version Copyright (C) benedict.gaster at superh.com 2002
+- * Third version Copyright Richard.Curnow at superh.com 2003
+- * Hacks to third version Copyright (C) 2003 Paul Mundt
 - */
 -
--#include <linux/init.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
--#include <linux/kernel.h>
--#include <linux/stddef.h>
--#include <linux/bitops.h>       /* this includes also <asm/registers.h */
--                                /* which is required to remap register */
--                                /* names used into __asm__ blocks...   */
+-/****************************************************************************/
 -
--#include <asm/hardware.h>
--#include <asm/platform.h>
+-#include <linux/init.h>
+-#include <linux/mman.h>
+-#include <linux/mm.h>
+-#include <linux/threads.h>
 -#include <asm/page.h>
+-#include <asm/pgtable.h>
+-#include <asm/processor.h>
+-#include <asm/cache.h>
+-#include <asm/tlb.h>
 -#include <asm/io.h>
--#include <asm/irq.h>
--
--/*
-- * Maybe the generic Peripheral block could move to a more
-- * generic include file. INTC Block will be defined here
-- * and only here to make INTC self-contained in a single
-- * file.
-- */
--#define	INTC_BLOCK_OFFSET	0x01000000
--
--/* Base */
--#define INTC_BASE		PHYS_PERIPHERAL_BLOCK + \
--				INTC_BLOCK_OFFSET
+-#include <asm/uaccess.h>
+-#include <asm/mmu_context.h>
+-#include <asm/pgalloc.h> /* for flush_itlb_range */
 -
--/* Address */
--#define INTC_ICR_SET		(intc_virt + 0x0)
--#define INTC_ICR_CLEAR		(intc_virt + 0x8)
--#define INTC_INTPRI_0		(intc_virt + 0x10)
--#define INTC_INTSRC_0		(intc_virt + 0x50)
--#define INTC_INTSRC_1		(intc_virt + 0x58)
--#define INTC_INTREQ_0		(intc_virt + 0x60)
--#define INTC_INTREQ_1		(intc_virt + 0x68)
--#define INTC_INTENB_0		(intc_virt + 0x70)
--#define INTC_INTENB_1		(intc_virt + 0x78)
--#define INTC_INTDSB_0		(intc_virt + 0x80)
--#define INTC_INTDSB_1		(intc_virt + 0x88)
+-#include <linux/proc_fs.h>
 -
--#define INTC_ICR_IRLM		0x1
--#define	INTC_INTPRI_PREGS	8		/* 8 Priority Registers */
--#define	INTC_INTPRI_PPREG	8		/* 8 Priorities per Register */
+-/* This function is in entry.S */
+-extern unsigned long switch_and_save_asid(unsigned long new_asid);
 -
+-/* Wired TLB entry for the D-cache */
+-static unsigned long long dtlb_cache_slot;
 -
--/*
-- * Mapper between the vector ordinal and the IRQ number
-- * passed to kernel/device drivers.
+-/**
+- * sh64_cache_init()
+- *
+- * This is pretty much just a straightforward clone of the SH
+- * detect_cpu_and_cache_system().
+- *
+- * This function is responsible for setting up all of the cache
+- * info dynamically as well as taking care of CPU probing and
+- * setting up the relevant subtype data.
+- *
+- * FIXME: For the time being, we only really support the SH5-101
+- * out of the box, and don't support dynamic probing for things
+- * like the SH5-103 or even cut2 of the SH5-101. Implement this
+- * later!
 - */
--int intc_evt_to_irq[(0xE20/0x20)+1] = {
--	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x000 - 0x0E0 */
--	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x100 - 0x1E0 */
--	 0,  0,  0,  0,  0,  1,  0,  0,	/* 0x200 - 0x2E0 */
--	 2,  0,  0,  3,  0,  0,  0, -1,	/* 0x300 - 0x3E0 */
--	32, 33, 34, 35, 36, 37, 38, -1,	/* 0x400 - 0x4E0 */
--	-1, -1, -1, 63, -1, -1, -1, -1,	/* 0x500 - 0x5E0 */
--	-1, -1, 18, 19, 20, 21, 22, -1,	/* 0x600 - 0x6E0 */
--	39, 40, 41, 42, -1, -1, -1, -1,	/* 0x700 - 0x7E0 */
--	 4,  5,  6,  7, -1, -1, -1, -1,	/* 0x800 - 0x8E0 */
--	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0x900 - 0x9E0 */
--	12, 13, 14, 15, 16, 17, -1, -1,	/* 0xA00 - 0xAE0 */
--	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xB00 - 0xBE0 */
--	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xC00 - 0xCE0 */
--	-1, -1, -1, -1, -1, -1, -1, -1,	/* 0xD00 - 0xDE0 */
--	-1, -1				/* 0xE00 - 0xE20 */
--};
+-int __init sh64_cache_init(void)
+-{
+-	/*
+-	 * First, setup some sane values for the I-cache.
+-	 */
+-	cpu_data->icache.ways		= 4;
+-	cpu_data->icache.sets		= 256;
+-	cpu_data->icache.linesz		= L1_CACHE_BYTES;
 -
--/*
-- * Opposite mapper.
-- */
--static int IRQ_to_vectorN[NR_INTC_IRQS] = {
--	0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /*  0- 7 */
--	  -1,   -1,   -1,   -1, 0x50, 0x51, 0x52, 0x53,	/*  8-15 */
--	0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36,   -1, /* 16-23 */
--	  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 24-31 */
--	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38,	/* 32-39 */
--        0x39, 0x3A, 0x3B,   -1,   -1,   -1,   -1,   -1, /* 40-47 */
--	  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 48-55 */
--	  -1,   -1,   -1,   -1,   -1,   -1,   -1, 0x2B, /* 56-63 */
+-	/*
+-	 * FIXME: This can probably be cleaned up a bit as well.. for example,
+-	 * do we really need the way shift _and_ the way_step_shift ?? Judging
+-	 * by the existing code, I would guess no.. is there any valid reason
+-	 * why we need to be tracking this around?
+-	 */
+-	cpu_data->icache.way_shift	= 13;
+-	cpu_data->icache.entry_shift	= 5;
+-	cpu_data->icache.set_shift	= 4;
+-	cpu_data->icache.way_step_shift	= 16;
+-	cpu_data->icache.asid_shift	= 2;
 -
--};
+-	/*
+-	 * way offset = cache size / associativity, so just don't factor in
+-	 * associativity in the first place..
+-	 */
+-	cpu_data->icache.way_ofs	= cpu_data->icache.sets *
+-					  cpu_data->icache.linesz;
 -
--static unsigned long intc_virt;
+-	cpu_data->icache.asid_mask	= 0x3fc;
+-	cpu_data->icache.idx_mask	= 0x1fe0;
+-	cpu_data->icache.epn_mask	= 0xffffe000;
+-	cpu_data->icache.flags		= 0;
 -
--static unsigned int startup_intc_irq(unsigned int irq);
--static void shutdown_intc_irq(unsigned int irq);
--static void enable_intc_irq(unsigned int irq);
--static void disable_intc_irq(unsigned int irq);
--static void mask_and_ack_intc(unsigned int);
--static void end_intc_irq(unsigned int irq);
+-	/*
+-	 * Next, setup some sane values for the D-cache.
+-	 *
+-	 * On the SH5, these are pretty consistent with the I-cache settings,
+-	 * so we just copy over the existing definitions.. these can be fixed
+-	 * up later, especially if we add runtime CPU probing.
+-	 *
+-	 * Though in the meantime it saves us from having to duplicate all of
+-	 * the above definitions..
+-	 */
+-	cpu_data->dcache		= cpu_data->icache;
 -
--static struct hw_interrupt_type intc_irq_type = {
--	.typename = "INTC",
--	.startup = startup_intc_irq,
--	.shutdown = shutdown_intc_irq,
--	.enable = enable_intc_irq,
--	.disable = disable_intc_irq,
--	.ack = mask_and_ack_intc,
--	.end = end_intc_irq
--};
+-	/*
+-	 * Setup any cache-related flags here
+-	 */
+-#if defined(CONFIG_DCACHE_WRITE_THROUGH)
+-	set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags));
+-#elif defined(CONFIG_DCACHE_WRITE_BACK)
+-	set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags));
+-#endif
 -
--static int irlm;		/* IRL mode */
+-	/*
+-	 * We also need to reserve a slot for the D-cache in the DTLB, so we
+-	 * do this now ..
+-	 */
+-	dtlb_cache_slot			= sh64_get_wired_dtlb_entry();
 -
--static unsigned int startup_intc_irq(unsigned int irq)
--{
--	enable_intc_irq(irq);
--	return 0; /* never anything pending */
+-	return 0;
 -}
 -
--static void shutdown_intc_irq(unsigned int irq)
--{
--	disable_intc_irq(irq);
--}
+-#ifdef CONFIG_DCACHE_DISABLED
+-#define sh64_dcache_purge_all()					do { } while (0)
+-#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0)
+-#define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0)
+-#define sh64_dcache_purge_phy_page(paddr)			do { } while (0)
+-#define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0)
+-#define sh64_dcache_purge_kernel_range(start, end)		do { } while (0)
+-#define sh64_dcache_wback_current_user_range(start, end)	do { } while (0)
+-#endif
 -
--static void enable_intc_irq(unsigned int irq)
--{
--	unsigned long reg;
--	unsigned long bitmask;
+-/*##########################################################################*/
 -
--	if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
--		printk("Trying to use straight IRL0-3 with an encoding platform.\n");
+-/* From here onwards, a rewrite of the implementation,
+-   by Richard.Curnow at superh.com.
 -
--	if (irq < 32) {
--		reg = INTC_INTENB_0;
--		bitmask = 1 << irq;
--	} else {
--		reg = INTC_INTENB_1;
--		bitmask = 1 << (irq - 32);
--	}
+-   The major changes in this compared to the old version are;
+-   1. use more selective purging through OCBP instead of using ALLOCO to purge
+-      by natural replacement.  This avoids purging out unrelated cache lines
+-      that happen to be in the same set.
+-   2. exploit the APIs copy_user_page and clear_user_page better
+-   3. be more selective about I-cache purging, in particular use invalidate_all
+-      more sparingly.
 -
--	ctrl_outl(bitmask, reg);
--}
+-   */
 -
--static void disable_intc_irq(unsigned int irq)
--{
--	unsigned long reg;
--	unsigned long bitmask;
+-/*##########################################################################
+-			       SUPPORT FUNCTIONS
+-  ##########################################################################*/
 -
--	if (irq < 32) {
--		reg = INTC_INTDSB_0;
--		bitmask = 1 << irq;
--	} else {
--		reg = INTC_INTDSB_1;
--		bitmask = 1 << (irq - 32);
--	}
+-/****************************************************************************/
+-/* The following group of functions deal with mapping and unmapping a temporary
+-   page into the DTLB slot that have been set aside for our exclusive use. */
+-/* In order to accomplish this, we use the generic interface for adding and
+-   removing a wired slot entry as defined in arch/sh64/mm/tlb.c */
+-/****************************************************************************/
 -
--	ctrl_outl(bitmask, reg);
--}
+-static unsigned long slot_own_flags;
 -
--static void mask_and_ack_intc(unsigned int irq)
+-static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr)
 -{
--	disable_intc_irq(irq);
+-	local_irq_save(slot_own_flags);
+-	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
 -}
 -
--static void end_intc_irq(unsigned int irq)
+-static inline void sh64_teardown_dtlb_cache_slot(void)
 -{
--	enable_intc_irq(irq);
+-	sh64_teardown_tlb_slot(dtlb_cache_slot);
+-	local_irq_restore(slot_own_flags);
 -}
 -
--/* For future use, if we ever support IRLM=0) */
--void make_intc_irq(unsigned int irq)
+-/****************************************************************************/
+-
+-#ifndef CONFIG_ICACHE_DISABLED
+-
+-static void __inline__ sh64_icache_inv_all(void)
 -{
--	disable_irq_nosync(irq);
--	irq_desc[irq].chip = &intc_irq_type;
--	disable_intc_irq(irq);
+-	unsigned long long addr, flag, data;
+-	unsigned int flags;
+-
+-	addr=ICCR0;
+-	flag=ICCR0_ICI;
+-	data=0;
+-
+-	/* Make this a critical section for safety (probably not strictly necessary.) */
+-	local_irq_save(flags);
+-
+-	/* Without %1 it gets unexplicably wrong */
+-	asm volatile("getcfg	%3, 0, %0\n\t"
+-			"or	%0, %2, %0\n\t"
+-			"putcfg	%3, 0, %0\n\t"
+-			"synci"
+-			: "=&r" (data)
+-			: "0" (data), "r" (flag), "r" (addr));
+-
+-	local_irq_restore(flags);
 -}
 -
--#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
--int intc_irq_describe(char* p, int irq)
+-static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
 -{
--	if (irq < NR_INTC_IRQS)
--		return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
--	else
--		return 0;
--}
+-	/* Invalidate range of addresses [start,end] from the I-cache, where
+-	 * the addresses lie in the kernel superpage. */
+-
+-	unsigned long long ullend, addr, aligned_start;
+-#if (NEFF == 32)
+-	aligned_start = (unsigned long long)(signed long long)(signed long) start;
+-#else
+-#error "NEFF != 32"
+-#endif
+-	aligned_start &= L1_CACHE_ALIGN_MASK;
+-	addr = aligned_start;
+-#if (NEFF == 32)
+-	ullend = (unsigned long long) (signed long long) (signed long) end;
+-#else
+-#error "NEFF != 32"
 -#endif
+-	while (addr <= ullend) {
+-		asm __volatile__ ("icbi %0, 0" : : "r" (addr));
+-		addr += L1_CACHE_BYTES;
+-	}
+-}
 -
--void __init init_IRQ(void)
+-static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
 -{
--        unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
--	unsigned long reg;
--	unsigned long data;
--	int i;
+-	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
+-	   Also, eaddr is page-aligned. */
 -
--	intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
--	if (!intc_virt) {
--		panic("Unable to remap INTC\n");
--	}
+-	unsigned long long addr, end_addr;
+-	unsigned long flags = 0;
+-	unsigned long running_asid, vma_asid;
+-	addr = eaddr;
+-	end_addr = addr + PAGE_SIZE;
 -
+-	/* Check whether we can use the current ASID for the I-cache
+-	   invalidation.  For example, if we're called via
+-	   access_process_vm->flush_cache_page->here, (e.g. when reading from
+-	   /proc), 'running_asid' will be that of the reader, not of the
+-	   victim.
 -
--	/* Set default: per-line enable/disable, priority driven ack/eoi */
--	for (i = 0; i < NR_INTC_IRQS; i++) {
--		if (platform_int_priority[i] != NO_PRIORITY) {
--			irq_desc[i].chip = &intc_irq_type;
--		}
+-	   Also, note the risk that we might get pre-empted between the ASID
+-	   compare and blocking IRQs, and before we regain control, the
+-	   pid->ASID mapping changes.  However, the whole cache will get
+-	   invalidated when the mapping is renewed, so the worst that can
+-	   happen is that the loop below ends up invalidating somebody else's
+-	   cache entries.
+-	*/
+-
+-	running_asid = get_asid();
+-	vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK);
+-	if (running_asid != vma_asid) {
+-		local_irq_save(flags);
+-		switch_and_save_asid(vma_asid);
+-	}
+-	while (addr < end_addr) {
+-		/* Worth unrolling a little */
+-		asm __volatile__("icbi %0,  0" : : "r" (addr));
+-		asm __volatile__("icbi %0, 32" : : "r" (addr));
+-		asm __volatile__("icbi %0, 64" : : "r" (addr));
+-		asm __volatile__("icbi %0, 96" : : "r" (addr));
+-		addr += 128;
+-	}
+-	if (running_asid != vma_asid) {
+-		switch_and_save_asid(running_asid);
+-		local_irq_restore(flags);
 -	}
+-}
 -
+-/****************************************************************************/
 -
--	/* Disable all interrupts and set all priorities to 0 to avoid trouble */
--	ctrl_outl(-1, INTC_INTDSB_0);
--	ctrl_outl(-1, INTC_INTDSB_1);
+-static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
+-			  unsigned long start, unsigned long end)
+-{
+-	/* Used for invalidating big chunks of I-cache, i.e. assume the range
+-	   is whole pages.  If 'start' or 'end' is not page aligned, the code
+-	   is conservative and invalidates to the ends of the enclosing pages.
+-	   This is functionally OK, just a performance loss. */
 -
--	for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
--		ctrl_outl( NO_PRIORITY, reg);
+-	/* See the comments below in sh64_dcache_purge_user_range() regarding
+-	   the choice of algorithm.  However, for the I-cache option (2) isn't
+-	   available because there are no physical tags so aliases can't be
+-	   resolved.  The icbi instruction has to be used through the user
+-	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
+-	   would be cheaper to use the selective code for a large range than is
+-	   possible with the D-cache.  Just assume 64 for now as a working
+-	   figure.
+-	   */
 -
+-	int n_pages;
 -
--	/* Set IRLM */
--	/* If all the priorities are set to 'no priority', then
--	 * assume we are using encoded mode.
--	 */
--	irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
--		platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
+-	if (!mm) return;
 -
--	if (irlm == NO_PRIORITY) {
--		/* IRLM = 0 */
--		reg = INTC_ICR_CLEAR;
--		i = IRQ_INTA;
--		printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
+-	n_pages = ((end - start) >> PAGE_SHIFT);
+-	if (n_pages >= 64) {
+-		sh64_icache_inv_all();
 -	} else {
--		/* IRLM = 1 */
--		reg = INTC_ICR_SET;
--		i = IRQ_IRL0;
--	}
--	ctrl_outl(INTC_ICR_IRLM, reg);
+-		unsigned long aligned_start;
+-		unsigned long eaddr;
+-		unsigned long after_last_page_start;
+-		unsigned long mm_asid, current_asid;
+-		unsigned long long flags = 0ULL;
 -
--	/* Set interrupt priorities according to platform description */
--	for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
--		data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
--		if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
--			/* Upon the 7th, set Priority Register */
--			ctrl_outl(data, reg);
--			data = 0;
--			reg += 8;
+-		mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
+-		current_asid = get_asid();
+-
+-		if (mm_asid != current_asid) {
+-			/* Switch ASID and run the invalidate loop under cli */
+-			local_irq_save(flags);
+-			switch_and_save_asid(mm_asid);
+-		}
+-
+-		aligned_start = start & PAGE_MASK;
+-		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
+-
+-		while (aligned_start < after_last_page_start) {
+-			struct vm_area_struct *vma;
+-			unsigned long vma_end;
+-			vma = find_vma(mm, aligned_start);
+-			if (!vma || (aligned_start <= vma->vm_end)) {
+-				/* Avoid getting stuck in an error condition */
+-				aligned_start += PAGE_SIZE;
+-				continue;
+-			}
+-			vma_end = vma->vm_end;
+-			if (vma->vm_flags & VM_EXEC) {
+-				/* Executable */
+-				eaddr = aligned_start;
+-				while (eaddr < vma_end) {
+-					sh64_icache_inv_user_page(vma, eaddr);
+-					eaddr += PAGE_SIZE;
+-				}
+-			}
+-			aligned_start = vma->vm_end; /* Skip to start of next region */
+-		}
+-		if (mm_asid != current_asid) {
+-			switch_and_save_asid(current_asid);
+-			local_irq_restore(flags);
 -		}
 -	}
+-}
 -
--#ifdef CONFIG_SH_CAYMAN
--	{
--		extern void init_cayman_irq(void);
+-static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
+-						unsigned long start, int len)
+-{
 -
--		init_cayman_irq();
--	}
--#endif
+-	/* Invalidate a small range of user context I-cache, not necessarily
+-	   page (or even cache-line) aligned. */
 -
--	/*
--	 * And now let interrupts come in.
--	 * sti() is not enough, we need to
--	 * lower priority, too.
--	 */
--        __asm__ __volatile__("getcon    " __SR ", %0\n\t"
--                             "and       %0, %1, %0\n\t"
--                             "putcon    %0, " __SR "\n\t"
--                             : "=&r" (__dummy0)
--                             : "r" (__dummy1));
--}
-diff --git a/arch/sh64/kernel/led.c b/arch/sh64/kernel/led.c
-deleted file mode 100644
-index e35d3f6..0000000
---- a/arch/sh64/kernel/led.c
-+++ /dev/null
-@@ -1,40 +0,0 @@
--/*
-- * arch/sh64/kernel/led.c
-- *
-- * Copyright (C) 2002 Stuart Menefy <stuart.menefy at st.com>
-- *
-- * May be copied or modified under the terms of the GNU General Public
-- * License.  See linux/COPYING for more information.
-- *
-- * Flash the LEDs
-- */
--#include <linux/stddef.h>
--#include <linux/sched.h>
+-	unsigned long long eaddr = start;
+-	unsigned long long eaddr_end = start + len;
+-	unsigned long current_asid, mm_asid;
+-	unsigned long long flags;
+-	unsigned long long epage_start;
 -
--void mach_led(int pos, int val);
+-	/* Since this is used inside ptrace, the ASID in the mm context
+-	   typically won't match current_asid.  We'll have to switch ASID to do
+-	   this.  For safety, and given that the range will be small, do all
+-	   this under cli.
 -
--/* acts like an actual heart beat -- ie thump-thump-pause... */
--void heartbeat(void)
--{
--	static unsigned int cnt = 0, period = 0, dist = 0;
+-	   Note, there is a hazard that the ASID in mm->context is no longer
+-	   actually associated with mm, i.e. if the mm->context has started a
+-	   new cycle since mm was last active.  However, this is just a
+-	   performance issue: all that happens is that we invalidate lines
+-	   belonging to another mm, so the owning process has to refill them
+-	   when that mm goes live again.  mm itself can't have any cache
+-	   entries because there will have been a flush_cache_all when the new
+-	   mm->context cycle started. */
 -
--	if (cnt == 0 || cnt == dist) {
--		mach_led(-1, 1);
--	} else if (cnt == 7 || cnt == dist + 7) {
--		mach_led(-1, 0);
--	}
+-	/* Align to start of cache line.  Otherwise, suppose len==8 and start
+-	   was at 32N+28 : the last 4 bytes wouldn't get invalidated. */
+-	eaddr = start & L1_CACHE_ALIGN_MASK;
+-	eaddr_end = start + len;
 -
--	if (++cnt > period) {
--		cnt = 0;
+-	local_irq_save(flags);
+-	mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
+-	current_asid = switch_and_save_asid(mm_asid);
 -
--		/*
--		 * The hyperbolic function below modifies the heartbeat period
--		 * length in dependency of the current (5min) load. It goes
--		 * through the points f(0)=126, f(1)=86, f(5)=51, f(inf)->30.
--		 */
--		period = ((672 << FSHIFT) / (5 * avenrun[0] +
--					    (7 << FSHIFT))) + 30;
--		dist = period / 4;
+-	epage_start = eaddr & PAGE_MASK;
+-
+-	while (eaddr < eaddr_end)
+-	{
+-		asm __volatile__("icbi %0, 0" : : "r" (eaddr));
+-		eaddr += L1_CACHE_BYTES;
 -	}
+-	switch_and_save_asid(current_asid);
+-	local_irq_restore(flags);
 -}
 -
-diff --git a/arch/sh64/kernel/module.c b/arch/sh64/kernel/module.c
-deleted file mode 100644
-index 2598f6b..0000000
---- a/arch/sh64/kernel/module.c
-+++ /dev/null
-@@ -1,161 +0,0 @@
--/*  Kernel module help for sh64.
--
--    This program is free software; you can redistribute it and/or modify
--    it under the terms of the GNU General Public License as published by
--    the Free Software Foundation; either version 2 of the License, or
--    (at your option) any later version.
+-static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
+-{
+-	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
+-	   cache hit on the virtual tag the instruction ends there, without a
+-	   TLB lookup. */
 -
--    This program is distributed in the hope that it will be useful,
--    but WITHOUT ANY WARRANTY; without even the implied warranty of
--    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
--    GNU General Public License for more details.
+-	unsigned long long aligned_start;
+-	unsigned long long ull_end;
+-	unsigned long long addr;
 -
--    You should have received a copy of the GNU General Public License
--    along with this program; if not, write to the Free Software
--    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+-	ull_end = end;
 -
--    Copyright 2004 SuperH (UK) Ltd
--    Author: Richard Curnow
+-	/* Just invalidate over the range using the natural addresses.  TLB
+-	   miss handling will be OK (TBC).  Since it's for the current process,
+-	   either we're already in the right ASID context, or the ASIDs have
+-	   been recycled since we were last active in which case we might just
+-	   invalidate another processes I-cache entries : no worries, just a
+-	   performance drop for him. */
+-	aligned_start = start & L1_CACHE_ALIGN_MASK;
+-	addr = aligned_start;
+-	while (addr < ull_end) {
+-		asm __volatile__ ("icbi %0, 0" : : "r" (addr));
+-		asm __volatile__ ("nop");
+-		asm __volatile__ ("nop");
+-		addr += L1_CACHE_BYTES;
+-	}
+-}
 -
--    Based on the sh version, and on code from the sh64-specific parts of
--    modutils, originally written by Richard Curnow and Ben Gaster.
+-#endif /* !CONFIG_ICACHE_DISABLED */
 -
--*/
--#include <linux/moduleloader.h>
--#include <linux/elf.h>
--#include <linux/vmalloc.h>
--#include <linux/fs.h>
--#include <linux/string.h>
--#include <linux/kernel.h>
+-/****************************************************************************/
 -
--#if 0
--#define DEBUGP printk
--#else
--#define DEBUGP(fmt...)
--#endif
+-#ifndef CONFIG_DCACHE_DISABLED
 -
--void *module_alloc(unsigned long size)
--{
--	if (size == 0)
--		return NULL;
--	return vmalloc(size);
--}
+-/* Buffer used as the target of alloco instructions to purge data from cache
+-   sets by natural eviction. -- RPC */
+-#define DUMMY_ALLOCO_AREA_SIZE L1_CACHE_SIZE_BYTES + (1024 * 4)
+-static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
 -
+-/****************************************************************************/
 -
--/* Free memory returned from module_alloc */
--void module_free(struct module *mod, void *module_region)
+-static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
 -{
--	vfree(module_region);
--	/* FIXME: If module_region == mod->init_region, trim exception
--           table entries. */
--}
+-	/* Purge all ways in a particular block of sets, specified by the base
+-	   set number and number of sets.  Can handle wrap-around, if that's
+-	   needed.  */
 -
--/* We don't need anything special. */
--int module_frob_arch_sections(Elf_Ehdr *hdr,
--			      Elf_Shdr *sechdrs,
--			      char *secstrings,
--			      struct module *mod)
--{
--	return 0;
--}
+-	int dummy_buffer_base_set;
+-	unsigned long long eaddr, eaddr0, eaddr1;
+-	int j;
+-	int set_offset;
 -
--int apply_relocate_add(Elf32_Shdr *sechdrs,
--		   const char *strtab,
--		   unsigned int symindex,
--		   unsigned int relsec,
--		   struct module *me)
--{
--	unsigned int i;
--	Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
--	Elf32_Sym *sym;
--	Elf32_Addr relocation;
--	uint32_t *location;
--	int align;
--	int is_shmedia;
+-	dummy_buffer_base_set = ((int)&dummy_alloco_area & cpu_data->dcache.idx_mask) >> cpu_data->dcache.entry_shift;
+-	set_offset = sets_to_purge_base - dummy_buffer_base_set;
 -
--	DEBUGP("Applying relocate section %u to %u\n", relsec,
--	       sechdrs[relsec].sh_info);
--	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
--		/* This is where to make the change */
--		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
--			+ rel[i].r_offset;
--		/* This is the symbol it is referring to.  Note that all
--		   undefined symbols have been resolved.  */
--		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
--			+ ELF32_R_SYM(rel[i].r_info);
--		relocation = sym->st_value + rel[i].r_addend;
--		align = (int)location & 3;
+-	for (j=0; j<n_sets; j++, set_offset++) {
+-		set_offset &= (cpu_data->dcache.sets - 1);
+-		eaddr0 = (unsigned long long)dummy_alloco_area + (set_offset << cpu_data->dcache.entry_shift);
 -
--		/* For text addresses, bit2 of the st_other field indicates
--		 * whether the symbol is SHmedia (1) or SHcompact (0).  If
--		 * SHmedia, the LSB of the symbol needs to be asserted
--		 * for the CPU to be in SHmedia mode when it starts executing
--		 * the branch target. */
--		is_shmedia = (sym->st_other & 4) ? 1 : 0;
--		if (is_shmedia) {
--			relocation |= 1;
+-		/* Do one alloco which hits the required set per cache way.  For
+-		   write-back mode, this will purge the #ways resident lines.   There's
+-		   little point unrolling this loop because the allocos stall more if
+-		   they're too close together. */
+-		eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
+-		for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
+-			asm __volatile__ ("alloco %0, 0" : : "r" (eaddr));
+-			asm __volatile__ ("synco"); /* TAKum03020 */
 -		}
 -
--		switch (ELF32_R_TYPE(rel[i].r_info)) {
--		case R_SH_DIR32:
--			DEBUGP("R_SH_DIR32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
--			*location += relocation;
--			break;
--		case R_SH_REL32:
--			DEBUGP("R_SH_REL32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
--			relocation -= (Elf32_Addr) location;
--			*location += relocation;
--			break;
--		case R_SH_IMM_LOW16:
--			DEBUGP("R_SH_IMM_LOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
--			*location = (*location & ~0x3fffc00) |
--				((relocation & 0xffff) << 10);
--			break;
--		case R_SH_IMM_MEDLOW16:
--			DEBUGP("R_SH_IMM_MEDLOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
--			*location = (*location & ~0x3fffc00) |
--				(((relocation >> 16) & 0xffff) << 10);
--			break;
--		case R_SH_IMM_LOW16_PCREL:
--			DEBUGP("R_SH_IMM_LOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
--			relocation -= (Elf32_Addr) location;
--			*location = (*location & ~0x3fffc00) |
--				((relocation & 0xffff) << 10);
--			break;
--		case R_SH_IMM_MEDLOW16_PCREL:
--			DEBUGP("R_SH_IMM_MEDLOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
--			relocation -= (Elf32_Addr) location;
--			*location = (*location & ~0x3fffc00) |
--				(((relocation >> 16) & 0xffff) << 10);
--			break;
--		default:
--			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
--			       me->name, ELF32_R_TYPE(rel[i].r_info));
--			return -ENOEXEC;
+-		eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
+-		for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
+-			/* Load from each address.  Required because alloco is a NOP if
+-			   the cache is write-through.  Write-through is a config option. */
+-			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
+-				*(volatile unsigned char *)(int)eaddr;
 -		}
 -	}
--	return 0;
--}
 -
--int apply_relocate(Elf32_Shdr *sechdrs,
--		       const char *strtab,
--		       unsigned int symindex,
--		       unsigned int relsec,
--		       struct module *me)
--{
--	printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
--	       me->name);
--	return -ENOEXEC;
--}
+-	/* Don't use OCBI to invalidate the lines.  That costs cycles directly.
+-	   If the dummy block is just left resident, it will naturally get
+-	   evicted as required.  */
 -
--int module_finalize(const Elf_Ehdr *hdr,
--		    const Elf_Shdr *sechdrs,
--		    struct module *me)
--{
--	return 0;
+-	return;
 -}
 -
--void module_arch_cleanup(struct module *mod)
+-/****************************************************************************/
+-
+-static void sh64_dcache_purge_all(void)
 -{
--}
+-	/* Purge the entire contents of the dcache.  The most efficient way to
+-	   achieve this is to use alloco instructions on a region of unused
+-	   memory equal in size to the cache, thereby causing the current
+-	   contents to be discarded by natural eviction.  The alternative,
+-	   namely reading every tag, setting up a mapping for the corresponding
+-	   page and doing an OCBP for the line, would be much more expensive.
+-	   */
 -
-diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c
-deleted file mode 100644
-index b4d9534..0000000
---- a/arch/sh64/kernel/pci_sh5.c
-+++ /dev/null
-@@ -1,536 +0,0 @@
--/*
-- * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
-- * Copyright (C) 2003, 2004 Paul Mundt
-- * Copyright (C) 2004 Richard Curnow
-- *
-- * May be copied or modified under the terms of the GNU General Public
-- * License.  See linux/COPYING for more information.
-- *
-- * Support functions for the SH5 PCI hardware.
-- */
+-	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
 -
--#include <linux/kernel.h>
--#include <linux/rwsem.h>
--#include <linux/smp.h>
--#include <linux/interrupt.h>
--#include <linux/init.h>
--#include <linux/errno.h>
--#include <linux/pci.h>
--#include <linux/delay.h>
--#include <linux/types.h>
--#include <asm/pci.h>
--#include <linux/irq.h>
+-	return;
 -
--#include <asm/io.h>
--#include <asm/hardware.h>
--#include "pci_sh5.h"
+-}
 -
--static unsigned long pcicr_virt;
--unsigned long pciio_virt;
+-/****************************************************************************/
 -
--static void __init pci_fixup_ide_bases(struct pci_dev *d)
+-static void sh64_dcache_purge_kernel_range(unsigned long start, unsigned long end)
 -{
--	int i;
--
--	/*
--	 * PCI IDE controllers use non-standard I/O port decoding, respect it.
--	 */
--	if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
--		return;
--	printk("PCI: IDE base address fixup for %s\n", pci_name(d));
--	for(i=0; i<4; i++) {
--		struct resource *r = &d->resource[i];
--		if ((r->start & ~0x80) == 0x374) {
--			r->start |= 2;
--			r->end = r->start;
--		}
+-	/* Purge the range of addresses [start,end] from the D-cache.  The
+-	   addresses lie in the superpage mapping.  There's no harm if we
+-	   overpurge at either end - just a small performance loss. */
+-	unsigned long long ullend, addr, aligned_start;
+-#if (NEFF == 32)
+-	aligned_start = (unsigned long long)(signed long long)(signed long) start;
+-#else
+-#error "NEFF != 32"
+-#endif
+-	aligned_start &= L1_CACHE_ALIGN_MASK;
+-	addr = aligned_start;
+-#if (NEFF == 32)
+-	ullend = (unsigned long long) (signed long long) (signed long) end;
+-#else
+-#error "NEFF != 32"
+-#endif
+-	while (addr <= ullend) {
+-		asm __volatile__ ("ocbp %0, 0" : : "r" (addr));
+-		addr += L1_CACHE_BYTES;
 -	}
+-	return;
 -}
--DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
 -
--char * __devinit pcibios_setup(char *str)
--{
--	return str;
--}
+-/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
+-   anything else in the kernel */
+-#define MAGIC_PAGE0_START 0xffffffffec000000ULL
 -
--/* Rounds a number UP to the nearest power of two. Used for
-- * sizing the PCI window.
-- */
--static u32 __init r2p2(u32 num)
+-static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned long eaddr)
 -{
--	int i = 31;
--	u32 tmp = num;
--
--	if (num == 0)
--		return 0;
+-	/* Purge the physical page 'paddr' from the cache.  It's known that any
+-	   cache lines requiring attention have the same page colour as the the
+-	   address 'eaddr'.
 -
--	do {
--		if (tmp & (1 << 31))
--			break;
--		i--;
--		tmp <<= 1;
--	} while (i >= 0);
+-	   This relies on the fact that the D-cache matches on physical tags
+-	   when no virtual tag matches.  So we create an alias for the original
+-	   page and purge through that.  (Alternatively, we could have done
+-	   this by switching ASID to match the original mapping and purged
+-	   through that, but that involves ASID switching cost + probably a
+-	   TLBMISS + refill anyway.)
+-	   */
 -
--	tmp = 1 << i;
--	/* If the original number isn't a power of 2, round it up */
--	if (tmp != num)
--		tmp <<= 1;
+-	unsigned long long magic_page_start;
+-	unsigned long long magic_eaddr, magic_eaddr_end;
 -
--	return tmp;
--}
+-	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
 -
--extern unsigned long long memory_start, memory_end;
+-	/* As long as the kernel is not pre-emptible, this doesn't need to be
+-	   under cli/sti. */
 -
--int __init sh5pci_init(unsigned memStart, unsigned memSize)
--{
--	u32 lsr0;
--	u32 uval;
+-	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
 -
--	pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR");
--	if (!pcicr_virt) {
--		panic("Unable to remap PCICR\n");
+-	magic_eaddr = magic_page_start;
+-	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
+-	while (magic_eaddr < magic_eaddr_end) {
+-		/* Little point in unrolling this loop - the OCBPs are blocking
+-		   and won't go any quicker (i.e. the loop overhead is parallel
+-		   to part of the OCBP execution.) */
+-		asm __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
+-		magic_eaddr += L1_CACHE_BYTES;
 -	}
 -
--	pciio_virt = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO");
--	if (!pciio_virt) {
--		panic("Unable to remap PCIIO\n");
--	}
+-	sh64_teardown_dtlb_cache_slot();
+-}
 -
--	pr_debug("Register base addres is 0x%08lx\n", pcicr_virt);
+-/****************************************************************************/
 -
--	/* Clear snoop registers */
--        SH5PCI_WRITE(CSCR0, 0);
--        SH5PCI_WRITE(CSCR1, 0);
+-static void sh64_dcache_purge_phy_page(unsigned long paddr)
+-{
+-	/* Pure a page given its physical start address, by creating a
+-	   temporary 1 page mapping and purging across that.  Even if we know
+-	   the virtual address (& vma or mm) of the page, the method here is
+-	   more elegant because it avoids issues of coping with page faults on
+-	   the purge instructions (i.e. no special-case code required in the
+-	   critical path in the TLB miss handling). */
 -
--	pr_debug("Wrote to reg\n");
+-	unsigned long long eaddr_start, eaddr, eaddr_end;
+-	int i;
 -
--        /* Switch off interrupts */
--        SH5PCI_WRITE(INTM,  0);
--        SH5PCI_WRITE(AINTM, 0);
--        SH5PCI_WRITE(PINTM, 0);
+-	/* As long as the kernel is not pre-emptible, this doesn't need to be
+-	   under cli/sti. */
 -
--        /* Set bus active, take it out of reset */
--        uval = SH5PCI_READ(CR);
+-	eaddr_start = MAGIC_PAGE0_START;
+-	for (i=0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
+-		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
 -
--	/* Set command Register */
--        SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE | CR_PFCS | CR_BMAM);
+-		eaddr = eaddr_start;
+-		eaddr_end = eaddr + PAGE_SIZE;
+-		while (eaddr < eaddr_end) {
+-			asm __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
+-			eaddr += L1_CACHE_BYTES;
+-		}
 -
--	uval=SH5PCI_READ(CR);
--        pr_debug("CR is actually 0x%08x\n",uval);
+-		sh64_teardown_dtlb_cache_slot();
+-		eaddr_start += PAGE_SIZE;
+-	}
+-}
 -
--        /* Allow it to be a master */
--	/* NB - WE DISABLE I/O ACCESS to stop overlap */
--        /* set WAIT bit to enable stepping, an attempt to improve stability */
--	SH5PCI_WRITE_SHORT(CSR_CMD,
--			    PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_WAIT);
+-static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
+-				unsigned long addr, unsigned long end)
+-{
+-	pgd_t *pgd;
+-	pmd_t *pmd;
+-	pte_t *pte;
+-	pte_t entry;
+-	spinlock_t *ptl;
+-	unsigned long paddr;
 -
--        /*
--        ** Set translation mapping memory in order to convert the address
--        ** used for the main bus, to the PCI internal address.
--        */
--        SH5PCI_WRITE(MBR,0x40000000);
+-	if (!mm)
+-		return; /* No way to find physical address of page */
 -
--        /* Always set the max size 512M */
--        SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
+-	pgd = pgd_offset(mm, addr);
+-	if (pgd_bad(*pgd))
+-		return;
 -
--        /*
--        ** I/O addresses are mapped at internal PCI specific address
--        ** as is described into the configuration bridge table.
--        ** These are changed to 0, to allow cards that have legacy
--        ** io such as vga to function correctly. We set the SH5 IOBAR to
--        ** 256K, which is a bit big as we can only have 64K of address space
--        */
+-	pmd = pmd_offset(pgd, addr);
+-	if (pmd_none(*pmd) || pmd_bad(*pmd))
+-		return;
 -
--        SH5PCI_WRITE(IOBR,0x0);
+-	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+-	do {
+-		entry = *pte;
+-		if (pte_none(entry) || !pte_present(entry))
+-			continue;
+-		paddr = pte_val(entry) & PAGE_MASK;
+-		sh64_dcache_purge_coloured_phy_page(paddr, addr);
+-	} while (pte++, addr += PAGE_SIZE, addr != end);
+-	pte_unmap_unlock(pte - 1, ptl);
+-}
+-/****************************************************************************/
 -
--	pr_debug("PCI:Writing 0x%08x to IOBR\n",0);
+-static void sh64_dcache_purge_user_range(struct mm_struct *mm,
+-			  unsigned long start, unsigned long end)
+-{
+-	/* There are at least 5 choices for the implementation of this, with
+-	   pros (+), cons(-), comments(*):
 -
--        /* Set up a 256K window. Totally pointless waste  of address space */
--        SH5PCI_WRITE(IOBMR,0);
--	pr_debug("PCI:Writing 0x%08x to IOBMR\n",0);
+-	   1. ocbp each line in the range through the original user's ASID
+-	      + no lines spuriously evicted
+-	      - tlbmiss handling (must either handle faults on demand => extra
+-		special-case code in tlbmiss critical path), or map the page in
+-		advance (=> flush_tlb_range in advance to avoid multiple hits)
+-	      - ASID switching
+-	      - expensive for large ranges
 -
--	/* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec. Ideally,
--         * we would want to map the I/O region somewhere, but it is so big this is not
--         * that easy!
--         */
--	SH5PCI_WRITE(CSR_IBAR0,~0);
--	/* Set memory size value */
--        memSize = memory_end - memory_start;
+-	   2. temporarily map each page in the range to a special effective
+-	      address and ocbp through the temporary mapping; relies on the
+-	      fact that SH-5 OCB* always do TLB lookup and match on ptags (they
+-	      never look at the etags)
+-	      + no spurious evictions
+-	      - expensive for large ranges
+-	      * surely cheaper than (1)
 -
--        /* Now we set up the mbars so the PCI bus can see the memory of the machine */
--        if (memSize < (1024 * 1024)) {
--                printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%x?\n", memSize);
--                return -EINVAL;
--        }
+-	   3. walk all the lines in the cache, check the tags, if a match
+-	      occurs create a page mapping to ocbp the line through
+-	      + no spurious evictions
+-	      - tag inspection overhead
+-	      - (especially for small ranges)
+-	      - potential cost of setting up/tearing down page mapping for
+-		every line that matches the range
+-	      * cost partly independent of range size
 -
--        /* Set LSR 0 */
--        lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 : ((r2p2(memSize) - 0x100000) | 0x1);
--        SH5PCI_WRITE(LSR0, lsr0);
+-	   4. walk all the lines in the cache, check the tags, if a match
+-	      occurs use 4 * alloco to purge the line (+3 other probably
+-	      innocent victims) by natural eviction
+-	      + no tlb mapping overheads
+-	      - spurious evictions
+-	      - tag inspection overhead
 -
--	pr_debug("PCI:Writing 0x%08x to LSR0\n",lsr0);
+-	   5. implement like flush_cache_all
+-	      + no tag inspection overhead
+-	      - spurious evictions
+-	      - bad for small ranges
 -
--        /* Set MBAR 0 */
--        SH5PCI_WRITE(CSR_MBAR0, memory_start);
--        SH5PCI_WRITE(LAR0, memory_start);
+-	   (1) can be ruled out as more expensive than (2).  (2) appears best
+-	   for small ranges.  The choice between (3), (4) and (5) for large
+-	   ranges and the range size for the large/small boundary need
+-	   benchmarking to determine.
 -
--        SH5PCI_WRITE(CSR_MBAR1,0);
--        SH5PCI_WRITE(LAR1,0);
--        SH5PCI_WRITE(LSR1,0);
+-	   For now use approach (2) for small ranges and (5) for large ones.
 -
--	pr_debug("PCI:Writing 0x%08llx to CSR_MBAR0\n",memory_start);
--	pr_debug("PCI:Writing 0x%08llx to LAR0\n",memory_start);
+-	   */
 -
--        /* Enable the PCI interrupts on the device */
--        SH5PCI_WRITE(INTM,  ~0);
--        SH5PCI_WRITE(AINTM, ~0);
--        SH5PCI_WRITE(PINTM, ~0);
+-	int n_pages;
 -
--	pr_debug("Switching on all error interrupts\n");
+-	n_pages = ((end - start) >> PAGE_SHIFT);
+-	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
+-#if 1
+-		sh64_dcache_purge_all();
+-#else
+-		unsigned long long set, way;
+-		unsigned long mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
+-		for (set = 0; set < cpu_data->dcache.sets; set++) {
+-			unsigned long long set_base_config_addr = CACHE_OC_ADDRESS_ARRAY + (set << cpu_data->dcache.set_shift);
+-			for (way = 0; way < cpu_data->dcache.ways; way++) {
+-				unsigned long long config_addr = set_base_config_addr + (way << cpu_data->dcache.way_step_shift);
+-				unsigned long long tag0;
+-				unsigned long line_valid;
 -
--        return(0);
--}
+-				asm __volatile__("getcfg %1, 0, %0" : "=r" (tag0) : "r" (config_addr));
+-				line_valid = tag0 & SH_CACHE_VALID;
+-				if (line_valid) {
+-					unsigned long cache_asid;
+-					unsigned long epn;
 -
--static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
--			int size, u32 *val)
--{
--	SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
+-					cache_asid = (tag0 & cpu_data->dcache.asid_mask) >> cpu_data->dcache.asid_shift;
+-					/* The next line needs some
+-					   explanation.  The virtual tags
+-					   encode bits [31:13] of the virtual
+-					   address, bit [12] of the 'tag' being
+-					   implied by the cache set index. */
+-					epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift);
 -
--	switch (size) {
--		case 1:
--			*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
--			break;
--		case 2:
--			*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
--			break;
--		case 4:
--			*val = SH5PCI_READ(PDR);
--			break;
+-					if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) {
+-						/* TODO : could optimise this
+-						   call by batching multiple
+-						   adjacent sets together. */
+-						sh64_dcache_purge_sets(set, 1);
+-						break; /* Don't waste time inspecting other ways for this set */
+-					}
+-				}
+-			}
+-		}
+-#endif
+-	} else {
+-		/* Small range, covered by a single page table page */
+-		start &= PAGE_MASK;	/* should already be so */
+-		end = PAGE_ALIGN(end);	/* should already be so */
+-		sh64_dcache_purge_user_pages(mm, start, end);
 -	}
--
--	return PCIBIOS_SUCCESSFUL;
+-	return;
 -}
 -
--static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
--			 int size, u32 val)
+-static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end)
 -{
--	SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
+-	unsigned long long aligned_start;
+-	unsigned long long ull_end;
+-	unsigned long long addr;
 -
--	switch (size) {
--		case 1:
--			SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
--			break;
--		case 2:
--			SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
--			break;
--		case 4:
--			SH5PCI_WRITE(PDR, val);
--			break;
--	}
+-	ull_end = end;
 -
--	return PCIBIOS_SUCCESSFUL;
--}
+-	/* Just wback over the range using the natural addresses.  TLB miss
+-	   handling will be OK (TBC) : the range has just been written to by
+-	   the signal frame setup code, so the PTEs must exist.
 -
--static struct pci_ops pci_config_ops = {
--	.read =		sh5pci_read,
--	.write =	sh5pci_write,
--};
+-	   Note, if we have CONFIG_PREEMPT and get preempted inside this loop,
+-	   it doesn't matter, even if the pid->ASID mapping changes whilst
+-	   we're away.  In that case the cache will have been flushed when the
+-	   mapping was renewed.  So the writebacks below will be nugatory (and
+-	   we'll doubtless have to fault the TLB entry/ies in again with the
+-	   new ASID), but it's a rare case.
+-	   */
+-	aligned_start = start & L1_CACHE_ALIGN_MASK;
+-	addr = aligned_start;
+-	while (addr < ull_end) {
+-		asm __volatile__ ("ocbwb %0, 0" : : "r" (addr));
+-		addr += L1_CACHE_BYTES;
+-	}
+-}
 -
--/* Everything hangs off this */
--static struct pci_bus *pci_root_bus;
+-/****************************************************************************/
 -
+-/* These *MUST* lie in an area of virtual address space that's otherwise unused. */
+-#define UNIQUE_EADDR_START 0xe0000000UL
+-#define UNIQUE_EADDR_END   0xe8000000UL
 -
--static u8 __init no_swizzle(struct pci_dev *dev, u8 * pin)
+-static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr)
 -{
--	pr_debug("swizzle for dev %d on bus %d slot %d pin is %d\n",
--	         dev->devfn,dev->bus->number, PCI_SLOT(dev->devfn),*pin);
--	return PCI_SLOT(dev->devfn);
--}
+-	/* Given a physical address paddr, and a user virtual address
+-	   user_eaddr which will eventually be mapped to it, create a one-off
+-	   kernel-private eaddr mapped to the same paddr.  This is used for
+-	   creating special destination pages for copy_user_page and
+-	   clear_user_page */
 -
--static inline u8 bridge_swizzle(u8 pin, u8 slot)
--{
--	return (((pin-1) + slot) % 4) + 1;
+-	static unsigned long current_pointer = UNIQUE_EADDR_START;
+-	unsigned long coloured_pointer;
+-
+-	if (current_pointer == UNIQUE_EADDR_END) {
+-		sh64_dcache_purge_all();
+-		current_pointer = UNIQUE_EADDR_START;
+-	}
+-
+-	coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK);
+-	sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
+-
+-	current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
+-
+-	return coloured_pointer;
 -}
 -
--u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp)
+-/****************************************************************************/
+-
+-static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address)
 -{
--	if (dev->bus->number != 0) {
--		u8 pin = *pinp;
--		do {
--			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
--			/* Move up the chain of bridges. */
--			dev = dev->bus->self;
--		} while (dev->bus->self);
--		*pinp = pin;
+-	void *coloured_to;
 -
--		/* The slot is the slot of the last bridge. */
--	}
+-	/* Discard any existing cache entries of the wrong colour.  These are
+-	   present quite often, if the kernel has recently used the page
+-	   internally, then given it up, then it's been allocated to the user.
+-	   */
+-	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
 -
--	return PCI_SLOT(dev->devfn);
--}
+-	coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
+-	sh64_page_copy(from, coloured_to);
 -
--/* This needs to be shunted out of here into the board specific bit */
+-	sh64_teardown_dtlb_cache_slot();
+-}
 -
--static int __init map_cayman_irq(struct pci_dev *dev, u8 slot, u8 pin)
+-static void sh64_clear_user_page_coloured(void *to, unsigned long address)
 -{
--	int result = -1;
+-	void *coloured_to;
 -
--	/* The complication here is that the PCI IRQ lines from the Cayman's 2
--	   5V slots get into the CPU via a different path from the IRQ lines
--	   from the 3 3.3V slots.  Thus, we have to detect whether the card's
--	   interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling'
--	   at the point where we cross from 5V to 3.3V is not the normal case.
+-	/* Discard any existing kernel-originated lines of the wrong colour (as
+-	   above) */
+-	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
 -
--	   The added complication is that we don't know that the 5V slots are
--	   always bus 2, because a card containing a PCI-PCI bridge may be
--	   plugged into a 3.3V slot, and this changes the bus numbering.
+-	coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
+-	sh64_page_clear(coloured_to);
 -
--	   Also, the Cayman has an intermediate PCI bus that goes a custom
--	   expansion board header (and to the secondary bridge).  This bus has
--	   never been used in practice.
+-	sh64_teardown_dtlb_cache_slot();
+-}
 -
--	   The 1ary onboard PCI-PCI bridge is device 3 on bus 0
--	   The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge.
--	   */
+-#endif /* !CONFIG_DCACHE_DISABLED */
 -
--	struct slot_pin {
--		int slot;
--		int pin;
--	} path[4];
--	int i=0;
+-/****************************************************************************/
 -
--	while (dev->bus->number > 0) {
+-/*##########################################################################
+-			    EXTERNALLY CALLABLE API.
+-  ##########################################################################*/
 -
--		slot = path[i].slot = PCI_SLOT(dev->devfn);
--		pin = path[i].pin = bridge_swizzle(pin, slot);
--		dev = dev->bus->self;
--		i++;
--		if (i > 3) panic("PCI path to root bus too long!\n");
--	}
+-/* These functions are described in Documentation/cachetlb.txt.
+-   Each one of these functions varies in behaviour depending on whether the
+-   I-cache and/or D-cache are configured out.
 -
--	slot = PCI_SLOT(dev->devfn);
--	/* This is the slot on bus 0 through which the device is eventually
--	   reachable. */
+-   Note that the Linux term 'flush' corresponds to what is termed 'purge' in
+-   the sh/sh64 jargon for the D-cache, i.e. write back dirty data then
+-   invalidate the cache lines, and 'invalidate' for the I-cache.
+-   */
 -
--	/* Now work back up. */
--	if ((slot < 3) || (i == 0)) {
--		/* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
--		   swizzle now. */
--		result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
--	} else {
--		i--;
--		slot = path[i].slot;
--		pin  = path[i].pin;
--		if (slot > 0) {
--			panic("PCI expansion bus device found - not handled!\n");
--		} else {
--			if (i > 0) {
--				/* 5V slots */
--				i--;
--				slot = path[i].slot;
--				pin  = path[i].pin;
--				/* 'pin' was swizzled earlier wrt slot, don't do it again. */
--				result = IRQ_P2INTA + (pin - 1);
--			} else {
--				/* IRQ for 2ary PCI-PCI bridge : unused */
--				result = -1;
--			}
--		}
--	}
+-#undef FLUSH_TRACE
 -
--	return result;
+-void flush_cache_all(void)
+-{
+-	/* Invalidate the entire contents of both caches, after writing back to
+-	   memory any dirty data from the D-cache. */
+-	sh64_dcache_purge_all();
+-	sh64_icache_inv_all();
 -}
 -
--static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
+-/****************************************************************************/
+-
+-void flush_cache_mm(struct mm_struct *mm)
 -{
--	struct pt_regs *regs = get_irq_regs();
--	unsigned pci_int, pci_air, pci_cir, pci_aint;
+-	/* Invalidate an entire user-address space from both caches, after
+-	   writing back dirty data (e.g. for shared mmap etc). */
 -
--	pci_int = SH5PCI_READ(INT);
--	pci_cir = SH5PCI_READ(CIR);
--	pci_air = SH5PCI_READ(AIR);
+-	/* This could be coded selectively by inspecting all the tags then
+-	   doing 4*alloco on any set containing a match (as for
+-	   flush_cache_range), but fork/exit/execve (where this is called from)
+-	   are expensive anyway. */
 -
--	if (pci_int) {
--		printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
--		printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
--		printk("PCI AIR -> 0x%x\n", pci_air);
--		printk("PCI CIR -> 0x%x\n", pci_cir);
--		SH5PCI_WRITE(INT, ~0);
--	}
+-	/* Have to do a purge here, despite the comments re I-cache below.
+-	   There could be odd-coloured dirty data associated with the mm still
+-	   in the cache - if this gets written out through natural eviction
+-	   after the kernel has reused the page there will be chaos.
+-	   */
 -
--	pci_aint = SH5PCI_READ(AINT);
--	if (pci_aint) {
--		printk("PCI ARB INTERRUPT!\n");
--		printk("PCI AINT -> 0x%x\n", pci_aint);
--		printk("PCI AIR -> 0x%x\n", pci_air);
--		printk("PCI CIR -> 0x%x\n", pci_cir);
--		SH5PCI_WRITE(AINT, ~0);
--	}
+-	sh64_dcache_purge_all();
 -
--	return IRQ_HANDLED;
+-	/* The mm being torn down won't ever be active again, so any Icache
+-	   lines tagged with its ASID won't be visible for the rest of the
+-	   lifetime of this ASID cycle.  Before the ASID gets reused, there
+-	   will be a flush_cache_all.  Hence we don't need to touch the
+-	   I-cache.  This is similar to the lack of action needed in
+-	   flush_tlb_mm - see fault.c. */
 -}
 -
--static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
--{
--	printk("SERR IRQ\n");
--
--	return IRQ_NONE;
--}
+-/****************************************************************************/
 -
--static void __init
--pcibios_size_bridge(struct pci_bus *bus, struct resource *ior,
--		    struct resource *memr)
+-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+-		       unsigned long end)
 -{
--	struct resource io_res, mem_res;
--	struct pci_dev *dev;
--	struct pci_dev *bridge = bus->self;
--	struct list_head *ln;
+-	struct mm_struct *mm = vma->vm_mm;
 -
--	if (!bridge)
--		return;	/* host bridge, nothing to do */
+-	/* Invalidate (from both caches) the range [start,end) of virtual
+-	   addresses from the user address space specified by mm, after writing
+-	   back any dirty data.
 -
--	/* set reasonable default locations for pcibios_align_resource */
--	io_res.start = PCIBIOS_MIN_IO;
--	mem_res.start = PCIBIOS_MIN_MEM;
+-	   Note, 'end' is 1 byte beyond the end of the range to flush. */
 -
--	io_res.end = io_res.start;
--	mem_res.end = mem_res.start;
+-	sh64_dcache_purge_user_range(mm, start, end);
+-	sh64_icache_inv_user_page_range(mm, start, end);
+-}
 -
--	/* Collect information about how our direct children are layed out. */
--	for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
--		int i;
--		dev = pci_dev_b(ln);
+-/****************************************************************************/
 -
--		/* Skip bridges for now */
--		if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
--			continue;
+-void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn)
+-{
+-	/* Invalidate any entries in either cache for the vma within the user
+-	   address space vma->vm_mm for the page starting at virtual address
+-	   'eaddr'.   This seems to be used primarily in breaking COW.  Note,
+-	   the I-cache must be searched too in case the page in question is
+-	   both writable and being executed from (e.g. stack trampolines.)
 -
--		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
--			struct resource res;
--			unsigned long size;
+-	   Note, this is called with pte lock held.
+-	   */
 -
--			memcpy(&res, &dev->resource[i], sizeof(res));
--			size = res.end - res.start + 1;
+-	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
 -
--			if (res.flags & IORESOURCE_IO) {
--				res.start = io_res.end;
--				pcibios_align_resource(dev, &res, size, 0);
--				io_res.end = res.start + size;
--			} else if (res.flags & IORESOURCE_MEM) {
--				res.start = mem_res.end;
--				pcibios_align_resource(dev, &res, size, 0);
--				mem_res.end = res.start + size;
--			}
--		}
+-	if (vma->vm_flags & VM_EXEC) {
+-		sh64_icache_inv_user_page(vma, eaddr);
 -	}
+-}
 -
--	/* And for all of the subordinate busses. */
--	for (ln=bus->children.next; ln != &bus->children; ln=ln->next)
--		pcibios_size_bridge(pci_bus_b(ln), &io_res, &mem_res);
+-/****************************************************************************/
 -
--	/* turn the ending locations into sizes (subtract start) */
--	io_res.end -= io_res.start;
--	mem_res.end -= mem_res.start;
+-#ifndef CONFIG_DCACHE_DISABLED
 -
--	/* Align the sizes up by bridge rules */
--	io_res.end = ALIGN(io_res.end, 4*1024) - 1;
--	mem_res.end = ALIGN(mem_res.end, 1*1024*1024) - 1;
+-void copy_user_page(void *to, void *from, unsigned long address, struct page *page)
+-{
+-	/* 'from' and 'to' are kernel virtual addresses (within the superpage
+-	   mapping of the physical RAM).  'address' is the user virtual address
+-	   where the copy 'to' will be mapped after.  This allows a custom
+-	   mapping to be used to ensure that the new copy is placed in the
+-	   right cache sets for the user to see it without having to bounce it
+-	   out via memory.  Note however : the call to flush_page_to_ram in
+-	   (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
+-	   very important case!
 -
--	/* Adjust the bridge's allocation requirements */
--	bridge->resource[0].end = bridge->resource[0].start + io_res.end;
--	bridge->resource[1].end = bridge->resource[1].start + mem_res.end;
+-	   TBD : can we guarantee that on every call, any cache entries for
+-	   'from' are in the same colour sets as 'address' also?  i.e. is this
+-	   always used just to deal with COW?  (I suspect not). */
 -
--	bridge->resource[PCI_BRIDGE_RESOURCES].end =
--	    bridge->resource[PCI_BRIDGE_RESOURCES].start + io_res.end;
--	bridge->resource[PCI_BRIDGE_RESOURCES+1].end =
--	    bridge->resource[PCI_BRIDGE_RESOURCES+1].start + mem_res.end;
+-	/* There are two possibilities here for when the page 'from' was last accessed:
+-	   * by the kernel : this is OK, no purge required.
+-	   * by the/a user (e.g. for break_COW) : need to purge.
 -
--	/* adjust parent's resource requirements */
--	if (ior) {
--		ior->end = ALIGN(ior->end, 4*1024);
--		ior->end += io_res.end;
+-	   If the potential user mapping at 'address' is the same colour as
+-	   'from' there is no need to purge any cache lines from the 'from'
+-	   page mapped into cache sets of colour 'address'.  (The copy will be
+-	   accessing the page through 'from').
+-	   */
+-
+-	if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) {
+-		sh64_dcache_purge_coloured_phy_page(__pa(from), address);
 -	}
 -
--	if (memr) {
--		memr->end = ALIGN(memr->end, 1*1024*1024);
--		memr->end += mem_res.end;
+-	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
+-		/* No synonym problem on destination */
+-		sh64_page_copy(from, to);
+-	} else {
+-		sh64_copy_user_page_coloured(to, from, address);
 -	}
+-
+-	/* Note, don't need to flush 'from' page from the cache again - it's
+-	   done anyway by the generic code */
 -}
 -
--static void __init pcibios_size_bridges(void)
+-void clear_user_page(void *to, unsigned long address, struct page *page)
 -{
--	struct resource io_res, mem_res;
--
--	memset(&io_res, 0, sizeof(io_res));
--	memset(&mem_res, 0, sizeof(mem_res));
+-	/* 'to' is a kernel virtual address (within the superpage
+-	   mapping of the physical RAM).  'address' is the user virtual address
+-	   where the 'to' page will be mapped after.  This allows a custom
+-	   mapping to be used to ensure that the new copy is placed in the
+-	   right cache sets for the user to see it without having to bounce it
+-	   out via memory.
+-	*/
 -
--	pcibios_size_bridge(pci_root_bus, &io_res, &mem_res);
+-	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
+-		/* No synonym problem on destination */
+-		sh64_page_clear(to);
+-	} else {
+-		sh64_clear_user_page_coloured(to, address);
+-	}
 -}
 -
--static int __init pcibios_init(void)
+-#endif /* !CONFIG_DCACHE_DISABLED */
+-
+-/****************************************************************************/
+-
+-void flush_dcache_page(struct page *page)
 -{
--        if (request_irq(IRQ_ERR, pcish5_err_irq,
--                        IRQF_DISABLED, "PCI Error",NULL) < 0) {
--                printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
--                return -EINVAL;
--        }
+-	sh64_dcache_purge_phy_page(page_to_phys(page));
+-	wmb();
+-}
 -
--        if (request_irq(IRQ_SERR, pcish5_serr_irq,
--                        IRQF_DISABLED, "PCI SERR interrupt", NULL) < 0) {
--                printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
--                return -EINVAL;
--        }
+-/****************************************************************************/
 -
--	/* The pci subsystem needs to know where memory is and how much
--	 * of it there is. I've simply made these globals. A better mechanism
--	 * is probably needed.
--	 */
--	sh5pci_init(__pa(memory_start),
--		     __pa(memory_end) - __pa(memory_start));
+-void flush_icache_range(unsigned long start, unsigned long end)
+-{
+-	/* Flush the range [start,end] of kernel virtual adddress space from
+-	   the I-cache.  The corresponding range must be purged from the
+-	   D-cache also because the SH-5 doesn't have cache snooping between
+-	   the caches.  The addresses will be visible through the superpage
+-	   mapping, therefore it's guaranteed that there no cache entries for
+-	   the range in cache sets of the wrong colour.
 -
--	pci_root_bus = pci_scan_bus(0, &pci_config_ops, NULL);
--	pcibios_size_bridges();
--	pci_assign_unassigned_resources();
--	pci_fixup_irqs(no_swizzle, map_cayman_irq);
+-	   Primarily used for cohering the I-cache after a module has
+-	   been loaded.  */
 -
--	return 0;
+-	/* We also make sure to purge the same range from the D-cache since
+-	   flush_page_to_ram() won't be doing this for us! */
+-
+-	sh64_dcache_purge_kernel_range(start, end);
+-	wmb();
+-	sh64_icache_inv_kernel_range(start, end);
 -}
 -
--subsys_initcall(pcibios_init);
+-/****************************************************************************/
 -
--void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+-void flush_icache_user_range(struct vm_area_struct *vma,
+-			struct page *page, unsigned long addr, int len)
 -{
--	struct pci_dev *dev = bus->self;
--	int i;
--
--#if 1
--	if(dev) {
--		for(i=0; i<3; i++) {
--			bus->resource[i] =
--				&dev->resource[PCI_BRIDGE_RESOURCES+i];
--			bus->resource[i]->name = bus->name;
--		}
--		bus->resource[0]->flags |= IORESOURCE_IO;
--		bus->resource[1]->flags |= IORESOURCE_MEM;
+-	/* Flush the range of user (defined by vma->vm_mm) address space
+-	   starting at 'addr' for 'len' bytes from the cache.  The range does
+-	   not straddle a page boundary, the unique physical page containing
+-	   the range is 'page'.  This seems to be used mainly for invalidating
+-	   an address range following a poke into the program text through the
+-	   ptrace() call from another process (e.g. for BRK instruction
+-	   insertion). */
 -
--		/* For now, propagate host limits to the bus;
--		 * we'll adjust them later. */
+-	sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
+-	mb();
 -
--#if 1
--		bus->resource[0]->end = 64*1024 - 1 ;
--		bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1;
--		bus->resource[0]->start = PCIBIOS_MIN_IO;
--		bus->resource[1]->start = PCIBIOS_MIN_MEM;
--#else
--		bus->resource[0]->end = 0;
--		bus->resource[1]->end = 0;
--		bus->resource[0]->start =0;
--		bus->resource[1]->start = 0;
--#endif
--		/* Turn off downstream PF memory address range by default */
--		bus->resource[2]->start = 1024*1024;
--		bus->resource[2]->end = bus->resource[2]->start - 1;
+-	if (vma->vm_flags & VM_EXEC) {
+-		sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
 -	}
--#endif
+-}
+-
+-/*##########################################################################
+-			ARCH/SH64 PRIVATE CALLABLE API.
+-  ##########################################################################*/
 -
+-void flush_cache_sigtramp(unsigned long start, unsigned long end)
+-{
+-	/* For the address range [start,end), write back the data from the
+-	   D-cache and invalidate the corresponding region of the I-cache for
+-	   the current process.  Used to flush signal trampolines on the stack
+-	   to make them executable. */
+-
+-	sh64_dcache_wback_current_user_range(start, end);
+-	wmb();
+-	sh64_icache_inv_current_user_range(start, end);
 -}
 -
-diff --git a/arch/sh64/kernel/pci_sh5.h b/arch/sh64/kernel/pci_sh5.h
+diff --git a/arch/sh64/mm/consistent.c b/arch/sh64/mm/consistent.c
 deleted file mode 100644
-index c71159d..0000000
---- a/arch/sh64/kernel/pci_sh5.h
+index c439620..0000000
+--- a/arch/sh64/mm/consistent.c
 +++ /dev/null
-@@ -1,107 +0,0 @@
+@@ -1,53 +0,0 @@
 -/*
 - * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
+- * Copyright (C) 2003 Paul Mundt (lethal at linux-sh.org)
 - *
 - * May be copied or modified under the terms of the GNU General Public
 - * License.  See linux/COPYING for more information.
 - *
-- * Definitions for the SH5 PCI hardware.
+- * Dynamic DMA mapping support.
 - */
+-#include <linux/types.h>
+-#include <linux/mm.h>
+-#include <linux/string.h>
+-#include <linux/pci.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/module.h>
+-#include <asm/io.h>
 -
--/* Product ID */
--#define PCISH5_PID		0x350d
--
--/* vendor ID */
--#define PCISH5_VID		0x1054
--
--/* Configuration types */
--#define ST_TYPE0                0x00    /* Configuration cycle type 0 */
--#define ST_TYPE1                0x01    /* Configuration cycle type 1 */
--
--/* VCR data */
--#define PCISH5_VCR_STATUS      0x00
--#define PCISH5_VCR_VERSION     0x08
--
--/*
--** ICR register offsets and bits
--*/
--#define PCISH5_ICR_CR          0x100   /* PCI control register values */
--#define CR_PBAM                 (1<<12)
--#define CR_PFCS                 (1<<11)
--#define CR_FTO                  (1<<10)
--#define CR_PFE                  (1<<9)
--#define CR_TBS                  (1<<8)
--#define CR_SPUE                 (1<<7)
--#define CR_BMAM                 (1<<6)
--#define CR_HOST                 (1<<5)
--#define CR_CLKEN                (1<<4)
--#define CR_SOCS                 (1<<3)
--#define CR_IOCS                 (1<<2)
--#define CR_RSTCTL               (1<<1)
--#define CR_CFINT                (1<<0)
--#define CR_LOCK_MASK            0xa5000000
--
--#define PCISH5_ICR_INT         0x114   /* Interrupt registert values     */
--#define INT_MADIM               (1<<2)
--
--#define PCISH5_ICR_LSR0        0X104   /* Local space register values    */
--#define PCISH5_ICR_LSR1        0X108   /* Local space register values    */
--#define PCISH5_ICR_LAR0        0x10c   /* Local address register values  */
--#define PCISH5_ICR_LAR1        0x110   /* Local address register values  */
--#define PCISH5_ICR_INTM        0x118   /* Interrupt mask register values                         */
--#define PCISH5_ICR_AIR         0x11c   /* Interrupt error address information register values    */
--#define PCISH5_ICR_CIR         0x120   /* Interrupt error command information register values    */
--#define PCISH5_ICR_AINT        0x130   /* Interrupt error arbiter interrupt register values      */
--#define PCISH5_ICR_AINTM       0x134   /* Interrupt error arbiter interrupt mask register values */
--#define PCISH5_ICR_BMIR        0x138   /* Interrupt error info register of bus master values     */
--#define PCISH5_ICR_PAR         0x1c0   /* Pio address register values                            */
--#define PCISH5_ICR_MBR         0x1c4   /* Memory space bank register values                      */
--#define PCISH5_ICR_IOBR        0x1c8   /* I/O space bank register values                         */
--#define PCISH5_ICR_PINT        0x1cc   /* power management interrupt register values             */
--#define PCISH5_ICR_PINTM       0x1d0   /* power management interrupt mask register values        */
--#define PCISH5_ICR_MBMR        0x1d8   /* memory space bank mask register values                 */
--#define PCISH5_ICR_IOBMR       0x1dc   /* I/O space bank mask register values                    */
--#define PCISH5_ICR_CSCR0       0x210   /* PCI cache snoop control register 0                     */
--#define PCISH5_ICR_CSCR1       0x214   /* PCI cache snoop control register 1                     */
--#define PCISH5_ICR_PDR         0x220   /* Pio data register values                               */
--
--/* These are configs space registers */
--#define PCISH5_ICR_CSR_VID     0x000	/* Vendor id                           */
--#define PCISH5_ICR_CSR_DID     0x002   /* Device id                           */
--#define PCISH5_ICR_CSR_CMD     0x004   /* Command register                    */
--#define PCISH5_ICR_CSR_STATUS  0x006   /* Stautus                             */
--#define PCISH5_ICR_CSR_IBAR0   0x010   /* I/O base address register           */
--#define PCISH5_ICR_CSR_MBAR0   0x014   /* First  Memory base address register */
--#define PCISH5_ICR_CSR_MBAR1   0x018   /* Second Memory base address register */
--
--
--
--/* Base address of registers */
--#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
--#define SH5PCI_IO_BASE  (PHYS_PCI_BLOCK + 0x00800000)
--/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG)    */
+-void *consistent_alloc(struct pci_dev *hwdev, size_t size,
+-			   dma_addr_t *dma_handle)
+-{
+-	void *ret;
+-	int gfp = GFP_ATOMIC;
+-        void *vp;
 -
--/* Register selection macro */
--#define PCISH5_ICR_REG(x)                ( pcicr_virt + (PCISH5_ICR_##x))
--/* #define PCISH5_VCR_REG(x)                ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
+-	if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
+-		gfp |= GFP_DMA;
 -
--/* Write I/O functions */
--#define SH5PCI_WRITE(reg,val)        ctrl_outl((u32)(val),PCISH5_ICR_REG(reg))
--#define SH5PCI_WRITE_SHORT(reg,val)  ctrl_outw((u16)(val),PCISH5_ICR_REG(reg))
--#define SH5PCI_WRITE_BYTE(reg,val)   ctrl_outb((u8)(val),PCISH5_ICR_REG(reg))
+-	ret = (void *)__get_free_pages(gfp, get_order(size));
 -
--/* Read I/O functions */
--#define SH5PCI_READ(reg)             ctrl_inl(PCISH5_ICR_REG(reg))
--#define SH5PCI_READ_SHORT(reg)       ctrl_inw(PCISH5_ICR_REG(reg))
--#define SH5PCI_READ_BYTE(reg)        ctrl_inb(PCISH5_ICR_REG(reg))
+-	/* now call our friend ioremap_nocache to give us an uncached area */
+-        vp = ioremap_nocache(virt_to_phys(ret), size);
 -
--/* Set PCI config bits */
--#define SET_CONFIG_BITS(bus,devfn,where)  ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
+-	if (vp != NULL) {
+-		memset(vp, 0, size);
+-		*dma_handle = virt_to_phys(ret);
+-		dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
+-	}
 -
--/* Set PCI command register */
--#define CONFIG_CMD(bus, devfn, where)            SET_CONFIG_BITS(bus->number,devfn,where)
+-	return vp;
+-}
+-EXPORT_SYMBOL(consistent_alloc);
 -
--/* Size converters */
--#define PCISH5_MEM_SIZCONV(x)		  (((x / 0x40000) - 1) << 18)
--#define PCISH5_IO_SIZCONV(x)		  (((x / 0x40000) - 1) << 18)
+-void consistent_free(struct pci_dev *hwdev, size_t size,
+-			 void *vaddr, dma_addr_t dma_handle)
+-{
+-	void *alloc;
 -
+-	alloc = phys_to_virt((unsigned long)dma_handle);
+-	free_pages((unsigned long)alloc, get_order(size));
 -
-diff --git a/arch/sh64/kernel/pcibios.c b/arch/sh64/kernel/pcibios.c
+-	iounmap(vaddr);
+-}
+-EXPORT_SYMBOL(consistent_free);
+diff --git a/arch/sh64/mm/extable.c b/arch/sh64/mm/extable.c
 deleted file mode 100644
-index 945920b..0000000
---- a/arch/sh64/kernel/pcibios.c
+index a2e6e05..0000000
+--- a/arch/sh64/mm/extable.c
 +++ /dev/null
-@@ -1,168 +0,0 @@
+@@ -1,80 +0,0 @@
 -/*
-- * $Id: pcibios.c,v 1.1 2001/08/24 12:38:19 dwmw2 Exp $
-- *
-- * arch/sh/kernel/pcibios.c
-- *
-- * Copyright (C) 2002 STMicroelectronics Limited
-- *   Author : David J. McKay
-- *
-- * Copyright (C) 2004 Richard Curnow, SuperH UK Limited
-- *
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
-- * This is GPL'd.
 - *
-- * Provided here are generic versions of:
-- *	pcibios_update_resource()
-- *	pcibios_align_resource()
-- *	pcibios_enable_device()
-- *	pcibios_set_master()
-- *	pcibios_update_irq()
+- * arch/sh64/mm/extable.c
 - *
-- * These functions are collected here to reduce duplication of common
-- * code amongst the many platform-specific PCI support code files.
+- * Copyright (C) 2003 Richard Curnow
+- * Copyright (C) 2003, 2004  Paul Mundt
 - *
-- * Platform-specific files are expected to provide:
-- *	pcibios_fixup_bus()
-- *	pcibios_init()
-- *	pcibios_setup()
-- *	pcibios_fixup_pbus_ranges()
+- * Cloned from the 2.5 SH version..
 - */
+-#include <linux/rwsem.h>
+-#include <linux/module.h>
+-#include <asm/uaccess.h>
 -
--#include <linux/kernel.h>
--#include <linux/pci.h>
--#include <linux/init.h>
--
--void
--pcibios_update_resource(struct pci_dev *dev, struct resource *root,
--			struct resource *res, int resource)
--{
--	u32 new, check;
--	int reg;
+-extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
+-extern void __copy_user_fixup(void);
 -
--	new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
--	if (resource < 6) {
--		reg = PCI_BASE_ADDRESS_0 + 4*resource;
--	} else if (resource == PCI_ROM_RESOURCE) {
--		res->flags |= IORESOURCE_ROM_ENABLE;
--		new |= PCI_ROM_ADDRESS_ENABLE;
--		reg = dev->rom_base_reg;
--	} else {
--		/* Somebody might have asked allocation of a non-standard resource */
--		return;
--	}
+-static const struct exception_table_entry __copy_user_fixup_ex = {
+-	.fixup = (unsigned long)&__copy_user_fixup,
+-};
 -
--	pci_write_config_dword(dev, reg, new);
--	pci_read_config_dword(dev, reg, &check);
--	if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
--		printk(KERN_ERR "PCI: Error while updating region "
--		       "%s/%d (%08x != %08x)\n", pci_name(dev), resource,
--		       new, check);
--	}
--}
+-/* Some functions that may trap due to a bad user-mode address have too many loads
+-   and stores in them to make it at all practical to label each one and put them all in
+-   the main exception table.
 -
--/*
-- * We need to avoid collisions with `mirrored' VGA ports
-- * and other strange ISA hardware, so we always want the
-- * addresses to be allocated in the 0x000-0x0ff region
-- * modulo 0x400.
-- */
--void pcibios_align_resource(void *data, struct resource *res,
--			    resource_size_t size, resource_size_t align)
+-   In particular, the fast memcpy routine is like this.  It's fix-up is just to fall back
+-   to a slow byte-at-a-time copy, which is handled the conventional way.  So it's functionally
+-   OK to just handle any trap occurring in the fast memcpy with that fixup. */
+-static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
 -{
--	if (res->flags & IORESOURCE_IO) {
--		resource_size_t start = res->start;
+-	if ((addr >= (unsigned long)&copy_user_memcpy) &&
+-	    (addr <= (unsigned long)&copy_user_memcpy_end))
+-		return &__copy_user_fixup_ex;
 -
--		if (start & 0x300) {
--			start = (start + 0x3ff) & ~0x3ff;
--			res->start = start;
--		}
--	}
+-	return NULL;
 -}
 -
--static void pcibios_enable_bridge(struct pci_dev *dev)
+-/* Simple binary search */
+-const struct exception_table_entry *
+-search_extable(const struct exception_table_entry *first,
+-		 const struct exception_table_entry *last,
+-		 unsigned long value)
 -{
--	struct pci_bus *bus = dev->subordinate;
--	u16 cmd, old_cmd;
--
--	pci_read_config_word(dev, PCI_COMMAND, &cmd);
--	old_cmd = cmd;
+-	const struct exception_table_entry *mid;
 -
--	if (bus->resource[0]->flags & IORESOURCE_IO) {
--		cmd |= PCI_COMMAND_IO;
--	}
--	if ((bus->resource[1]->flags & IORESOURCE_MEM) ||
--	    (bus->resource[2]->flags & IORESOURCE_PREFETCH)) {
--		cmd |= PCI_COMMAND_MEMORY;
--	}
+-	mid = check_exception_ranges(value);
+-	if (mid)
+-		return mid;
 -
--	if (cmd != old_cmd) {
--		pci_write_config_word(dev, PCI_COMMAND, cmd);
--	}
+-        while (first <= last) {
+-		long diff;
 -
--	printk("PCI bridge %s, command register -> %04x\n",
--		pci_name(dev), cmd);
+-		mid = (last - first) / 2 + first;
+-		diff = mid->insn - value;
+-                if (diff == 0)
+-                        return mid;
+-                else if (diff < 0)
+-                        first = mid+1;
+-                else
+-                        last = mid-1;
+-        }
 -
+-        return NULL;
 -}
 -
--
--
--int pcibios_enable_device(struct pci_dev *dev, int mask)
+-int fixup_exception(struct pt_regs *regs)
 -{
--	u16 cmd, old_cmd;
--	int idx;
--	struct resource *r;
+-	const struct exception_table_entry *fixup;
 -
--	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
--		pcibios_enable_bridge(dev);
+-	fixup = search_exception_tables(regs->pc);
+-	if (fixup) {
+-		regs->pc = fixup->fixup;
+-		return 1;
 -	}
 -
--	pci_read_config_word(dev, PCI_COMMAND, &cmd);
--	old_cmd = cmd;
--	for(idx=0; idx<6; idx++) {
--		if (!(mask & (1 << idx)))
--			continue;
--		r = &dev->resource[idx];
--		if (!r->start && r->end) {
--			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
--			return -EINVAL;
--		}
--		if (r->flags & IORESOURCE_IO)
--			cmd |= PCI_COMMAND_IO;
--		if (r->flags & IORESOURCE_MEM)
--			cmd |= PCI_COMMAND_MEMORY;
--	}
--	if (dev->resource[PCI_ROM_RESOURCE].start)
--		cmd |= PCI_COMMAND_MEMORY;
--	if (cmd != old_cmd) {
--		printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
--		pci_write_config_word(dev, PCI_COMMAND, cmd);
--	}
 -	return 0;
 -}
 -
--/*
-- *  If we set up a device for bus mastering, we need to check and set
-- *  the latency timer as it may not be properly set.
-- */
--unsigned int pcibios_max_latency = 255;
--
--void pcibios_set_master(struct pci_dev *dev)
--{
--	u8 lat;
--	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
--	if (lat < 16)
--		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
--	else if (lat > pcibios_max_latency)
--		lat = pcibios_max_latency;
--	else
--		return;
--	printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
--	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
--}
--
--void __init pcibios_update_irq(struct pci_dev *dev, int irq)
--{
--	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
--}
-diff --git a/arch/sh64/kernel/process.c b/arch/sh64/kernel/process.c
+diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
 deleted file mode 100644
-index 0761af4..0000000
---- a/arch/sh64/kernel/process.c
+index 7c79a1b..0000000
+--- a/arch/sh64/mm/fault.c
 +++ /dev/null
-@@ -1,691 +0,0 @@
+@@ -1,602 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/process.c
+- * arch/sh64/mm/fault.c
 - *
 - * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003  Richard Curnow (/proc/tlb, bug fixes)
 - * Copyright (C) 2003  Paul Mundt
-- * Copyright (C) 2003, 2004 Richard Curnow
-- *
-- * Started from SH3/4 version:
-- *   Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
-- *
-- *   In turn started from i386 version:
-- *     Copyright (C) 1995  Linus Torvalds
 - *
 - */
 -
--/*
-- * This file handles the architecture-dependent parts of process handling..
-- */
--#include <linux/mm.h>
--#include <linux/fs.h>
+-#include <linux/signal.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/errno.h>
+-#include <linux/string.h>
+-#include <linux/types.h>
 -#include <linux/ptrace.h>
--#include <linux/reboot.h>
--#include <linux/init.h>
--#include <linux/module.h>
--#include <linux/proc_fs.h>
--#include <asm/uaccess.h>
--#include <asm/pgtable.h>
--
--struct task_struct *last_task_used_math = NULL;
--
--static int hlt_counter = 1;
+-#include <linux/mman.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/interrupt.h>
 -
--#define HARD_IDLE_TIMEOUT (HZ / 3)
+-#include <asm/system.h>
+-#include <asm/io.h>
+-#include <asm/tlb.h>
+-#include <asm/uaccess.h>
+-#include <asm/pgalloc.h>
+-#include <asm/mmu_context.h>
+-#include <asm/registers.h>		/* required by inline asm statements */
 -
--void disable_hlt(void)
--{
--	hlt_counter++;
--}
+-#if defined(CONFIG_SH64_PROC_TLB)
+-#include <linux/init.h>
+-#include <linux/proc_fs.h>
+-/* Count numbers of tlb refills in each region */
+-static unsigned long long calls_to_update_mmu_cache = 0ULL;
+-static unsigned long long calls_to_flush_tlb_page   = 0ULL;
+-static unsigned long long calls_to_flush_tlb_range  = 0ULL;
+-static unsigned long long calls_to_flush_tlb_mm     = 0ULL;
+-static unsigned long long calls_to_flush_tlb_all    = 0ULL;
+-unsigned long long calls_to_do_slow_page_fault = 0ULL;
+-unsigned long long calls_to_do_fast_page_fault = 0ULL;
 -
--void enable_hlt(void)
--{
--	hlt_counter--;
--}
+-/* Count size of ranges for flush_tlb_range */
+-static unsigned long long flush_tlb_range_1         = 0ULL;
+-static unsigned long long flush_tlb_range_2         = 0ULL;
+-static unsigned long long flush_tlb_range_3_4       = 0ULL;
+-static unsigned long long flush_tlb_range_5_7       = 0ULL;
+-static unsigned long long flush_tlb_range_8_11      = 0ULL;
+-static unsigned long long flush_tlb_range_12_15     = 0ULL;
+-static unsigned long long flush_tlb_range_16_up     = 0ULL;
 -
--static int __init nohlt_setup(char *__unused)
--{
--	hlt_counter = 1;
--	return 1;
--}
+-static unsigned long long page_not_present          = 0ULL;
 -
--static int __init hlt_setup(char *__unused)
--{
--	hlt_counter = 0;
--	return 1;
--}
+-#endif
 -
--__setup("nohlt", nohlt_setup);
--__setup("hlt", hlt_setup);
+-extern void die(const char *,struct pt_regs *,long);
 -
--static inline void hlt(void)
--{
--	__asm__ __volatile__ ("sleep" : : : "memory");
--}
+-#define PFLAG(val,flag)   (( (val) & (flag) ) ? #flag : "" )
+-#define PPROT(flag) PFLAG(pgprot_val(prot),flag)
 -
--/*
-- * The idle loop on a uniprocessor SH..
-- */
--void cpu_idle(void)
+-static inline void print_prots(pgprot_t prot)
 -{
--	/* endless idle loop with no priority at all */
--	while (1) {
--		if (hlt_counter) {
--			while (!need_resched())
--				cpu_relax();
--		} else {
--			local_irq_disable();
--			while (!need_resched()) {
--				local_irq_enable();
--				hlt();
--				local_irq_disable();
--			}
--			local_irq_enable();
--		}
--		preempt_enable_no_resched();
--		schedule();
--		preempt_disable();
--	}
+-	printk("prot is 0x%08lx\n",pgprot_val(prot));
 -
+-	printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
+-	       PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
 -}
 -
--void machine_restart(char * __unused)
+-static inline void print_vma(struct vm_area_struct *vma)
 -{
--	extern void phys_stext(void);
+-	printk("vma start 0x%08lx\n", vma->vm_start);
+-	printk("vma end   0x%08lx\n", vma->vm_end);
 -
--	phys_stext();
+-	print_prots(vma->vm_page_prot);
+-	printk("vm_flags 0x%08lx\n", vma->vm_flags);
 -}
 -
--void machine_halt(void)
+-static inline void print_task(struct task_struct *tsk)
 -{
--	for (;;);
+-	printk("Task pid %d\n", task_pid_nr(tsk));
 -}
 -
--void machine_power_off(void)
+-static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
 -{
--	extern void enter_deep_standby(void);
--
--	enter_deep_standby();
--}
--
--void (*pm_power_off)(void) = machine_power_off;
--EXPORT_SYMBOL(pm_power_off);
+-	pgd_t *dir;
+-	pmd_t *pmd;
+-	pte_t *pte;
+-	pte_t entry;
 -
--void show_regs(struct pt_regs * regs)
--{
--	unsigned long long ah, al, bh, bl, ch, cl;
+-	dir = pgd_offset(mm, address);
+-	if (pgd_none(*dir)) {
+-		return NULL;
+-	}
 -
--	printk("\n");
+-	pmd = pmd_offset(dir, address);
+-	if (pmd_none(*pmd)) {
+-		return NULL;
+-	}
 -
--	ah = (regs->pc) >> 32;
--	al = (regs->pc) & 0xffffffff;
--	bh = (regs->regs[18]) >> 32;
--	bl = (regs->regs[18]) & 0xffffffff;
--	ch = (regs->regs[15]) >> 32;
--	cl = (regs->regs[15]) & 0xffffffff;
--	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	pte = pte_offset_kernel(pmd, address);
+-	entry = *pte;
 -
--	ah = (regs->sr) >> 32;
--	al = (regs->sr) & 0xffffffff;
--        asm volatile ("getcon   " __TEA ", %0" : "=r" (bh));
--        asm volatile ("getcon   " __TEA ", %0" : "=r" (bl));
--	bh = (bh) >> 32;
--	bl = (bl) & 0xffffffff;
--        asm volatile ("getcon   " __KCR0 ", %0" : "=r" (ch));
--        asm volatile ("getcon   " __KCR0 ", %0" : "=r" (cl));
--	ch = (ch) >> 32;
--	cl = (cl) & 0xffffffff;
--	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	if (pte_none(entry)) {
+-		return NULL;
+-	}
+-	if (!pte_present(entry)) {
+-		return NULL;
+-	}
 -
--	ah = (regs->regs[0]) >> 32;
--	al = (regs->regs[0]) & 0xffffffff;
--	bh = (regs->regs[1]) >> 32;
--	bl = (regs->regs[1]) & 0xffffffff;
--	ch = (regs->regs[2]) >> 32;
--	cl = (regs->regs[2]) & 0xffffffff;
--	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	return pte;
+-}
 -
--	ah = (regs->regs[3]) >> 32;
--	al = (regs->regs[3]) & 0xffffffff;
--	bh = (regs->regs[4]) >> 32;
--	bl = (regs->regs[4]) & 0xffffffff;
--	ch = (regs->regs[5]) >> 32;
--	cl = (regs->regs[5]) & 0xffffffff;
--	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-/*
+- * This routine handles page faults.  It determines the address,
+- * and the problem, and then passes it off to one of the appropriate
+- * routines.
+- */
+-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
+-			      unsigned long textaccess, unsigned long address)
+-{
+-	struct task_struct *tsk;
+-	struct mm_struct *mm;
+-	struct vm_area_struct * vma;
+-	const struct exception_table_entry *fixup;
+-	pte_t *pte;
+-	int fault;
 -
--	ah = (regs->regs[6]) >> 32;
--	al = (regs->regs[6]) & 0xffffffff;
--	bh = (regs->regs[7]) >> 32;
--	bl = (regs->regs[7]) & 0xffffffff;
--	ch = (regs->regs[8]) >> 32;
--	cl = (regs->regs[8]) & 0xffffffff;
--	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-#if defined(CONFIG_SH64_PROC_TLB)
+-        ++calls_to_do_slow_page_fault;
+-#endif
 -
--	ah = (regs->regs[9]) >> 32;
--	al = (regs->regs[9]) & 0xffffffff;
--	bh = (regs->regs[10]) >> 32;
--	bl = (regs->regs[10]) & 0xffffffff;
--	ch = (regs->regs[11]) >> 32;
--	cl = (regs->regs[11]) & 0xffffffff;
--	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	/* SIM
+-	 * Note this is now called with interrupts still disabled
+-	 * This is to cope with being called for a missing IO port
+-	 * address with interrupts disabled. This should be fixed as
+-	 * soon as we have a better 'fast path' miss handler.
+-	 *
+-	 * Plus take care how you try and debug this stuff.
+-	 * For example, writing debug data to a port which you
+-	 * have just faulted on is not going to work.
+-	 */
 -
--	ah = (regs->regs[12]) >> 32;
--	al = (regs->regs[12]) & 0xffffffff;
--	bh = (regs->regs[13]) >> 32;
--	bl = (regs->regs[13]) & 0xffffffff;
--	ch = (regs->regs[14]) >> 32;
--	cl = (regs->regs[14]) & 0xffffffff;
--	printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	tsk = current;
+-	mm = tsk->mm;
 -
--	ah = (regs->regs[16]) >> 32;
--	al = (regs->regs[16]) & 0xffffffff;
--	bh = (regs->regs[17]) >> 32;
--	bl = (regs->regs[17]) & 0xffffffff;
--	ch = (regs->regs[19]) >> 32;
--	cl = (regs->regs[19]) & 0xffffffff;
--	printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	/* Not an IO address, so reenable interrupts */
+-	local_irq_enable();
 -
--	ah = (regs->regs[20]) >> 32;
--	al = (regs->regs[20]) & 0xffffffff;
--	bh = (regs->regs[21]) >> 32;
--	bl = (regs->regs[21]) & 0xffffffff;
--	ch = (regs->regs[22]) >> 32;
--	cl = (regs->regs[22]) & 0xffffffff;
--	printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	/*
+-	 * If we're in an interrupt or have no user
+-	 * context, we must not take the fault..
+-	 */
+-	if (in_atomic() || !mm)
+-		goto no_context;
 -
--	ah = (regs->regs[23]) >> 32;
--	al = (regs->regs[23]) & 0xffffffff;
--	bh = (regs->regs[24]) >> 32;
--	bl = (regs->regs[24]) & 0xffffffff;
--	ch = (regs->regs[25]) >> 32;
--	cl = (regs->regs[25]) & 0xffffffff;
--	printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	/* TLB misses upon some cache flushes get done under cli() */
+-	down_read(&mm->mmap_sem);
 -
--	ah = (regs->regs[26]) >> 32;
--	al = (regs->regs[26]) & 0xffffffff;
--	bh = (regs->regs[27]) >> 32;
--	bl = (regs->regs[27]) & 0xffffffff;
--	ch = (regs->regs[28]) >> 32;
--	cl = (regs->regs[28]) & 0xffffffff;
--	printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	vma = find_vma(mm, address);
 -
--	ah = (regs->regs[29]) >> 32;
--	al = (regs->regs[29]) & 0xffffffff;
--	bh = (regs->regs[30]) >> 32;
--	bl = (regs->regs[30]) & 0xffffffff;
--	ch = (regs->regs[31]) >> 32;
--	cl = (regs->regs[31]) & 0xffffffff;
--	printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	if (!vma) {
+-#ifdef DEBUG_FAULT
+-		print_task(tsk);
+-		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
+-		       __FUNCTION__,__LINE__,
+-		       address,regs->pc,textaccess,writeaccess);
+-		show_regs(regs);
+-#endif
+-		goto bad_area;
+-	}
+-	if (vma->vm_start <= address) {
+-		goto good_area;
+-	}
 -
--	ah = (regs->regs[32]) >> 32;
--	al = (regs->regs[32]) & 0xffffffff;
--	bh = (regs->regs[33]) >> 32;
--	bl = (regs->regs[33]) & 0xffffffff;
--	ch = (regs->regs[34]) >> 32;
--	cl = (regs->regs[34]) & 0xffffffff;
--	printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	if (!(vma->vm_flags & VM_GROWSDOWN)) {
+-#ifdef DEBUG_FAULT
+-		print_task(tsk);
+-		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
+-		       __FUNCTION__,__LINE__,
+-		       address,regs->pc,textaccess,writeaccess);
+-		show_regs(regs);
 -
--	ah = (regs->regs[35]) >> 32;
--	al = (regs->regs[35]) & 0xffffffff;
--	bh = (regs->regs[36]) >> 32;
--	bl = (regs->regs[36]) & 0xffffffff;
--	ch = (regs->regs[37]) >> 32;
--	cl = (regs->regs[37]) & 0xffffffff;
--	printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-		print_vma(vma);
+-#endif
+-		goto bad_area;
+-	}
+-	if (expand_stack(vma, address)) {
+-#ifdef DEBUG_FAULT
+-		print_task(tsk);
+-		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
+-		       __FUNCTION__,__LINE__,
+-		       address,regs->pc,textaccess,writeaccess);
+-		show_regs(regs);
+-#endif
+-		goto bad_area;
+-	}
+-/*
+- * Ok, we have a good vm_area for this memory access, so
+- * we can handle it..
+- */
+-good_area:
+-	if (textaccess) {
+-		if (!(vma->vm_flags & VM_EXEC))
+-			goto bad_area;
+-	} else {
+-		if (writeaccess) {
+-			if (!(vma->vm_flags & VM_WRITE))
+-				goto bad_area;
+-		} else {
+-			if (!(vma->vm_flags & VM_READ))
+-				goto bad_area;
+-		}
+-	}
 -
--	ah = (regs->regs[38]) >> 32;
--	al = (regs->regs[38]) & 0xffffffff;
--	bh = (regs->regs[39]) >> 32;
--	bl = (regs->regs[39]) & 0xffffffff;
--	ch = (regs->regs[40]) >> 32;
--	cl = (regs->regs[40]) & 0xffffffff;
--	printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	/*
+-	 * If for any reason at all we couldn't handle the fault,
+-	 * make sure we exit gracefully rather than endlessly redo
+-	 * the fault.
+-	 */
+-survive:
+-	fault = handle_mm_fault(mm, vma, address, writeaccess);
+-	if (unlikely(fault & VM_FAULT_ERROR)) {
+-		if (fault & VM_FAULT_OOM)
+-			goto out_of_memory;
+-		else if (fault & VM_FAULT_SIGBUS)
+-			goto do_sigbus;
+-		BUG();
+-	}
+-	if (fault & VM_FAULT_MAJOR)
+-		tsk->maj_flt++;
+-	else
+-		tsk->min_flt++;
 -
--	ah = (regs->regs[41]) >> 32;
--	al = (regs->regs[41]) & 0xffffffff;
--	bh = (regs->regs[42]) >> 32;
--	bl = (regs->regs[42]) & 0xffffffff;
--	ch = (regs->regs[43]) >> 32;
--	cl = (regs->regs[43]) & 0xffffffff;
--	printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	/* If we get here, the page fault has been handled.  Do the TLB refill
+-	   now from the newly-setup PTE, to avoid having to fault again right
+-	   away on the same instruction. */
+-	pte = lookup_pte (mm, address);
+-	if (!pte) {
+-		/* From empirical evidence, we can get here, due to
+-		   !pte_present(pte).  (e.g. if a swap-in occurs, and the page
+-		   is swapped back out again before the process that wanted it
+-		   gets rescheduled?) */
+-		goto no_pte;
+-	}
 -
--	ah = (regs->regs[44]) >> 32;
--	al = (regs->regs[44]) & 0xffffffff;
--	bh = (regs->regs[45]) >> 32;
--	bl = (regs->regs[45]) & 0xffffffff;
--	ch = (regs->regs[46]) >> 32;
--	cl = (regs->regs[46]) & 0xffffffff;
--	printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	__do_tlb_refill(address, textaccess, pte);
 -
--	ah = (regs->regs[47]) >> 32;
--	al = (regs->regs[47]) & 0xffffffff;
--	bh = (regs->regs[48]) >> 32;
--	bl = (regs->regs[48]) & 0xffffffff;
--	ch = (regs->regs[49]) >> 32;
--	cl = (regs->regs[49]) & 0xffffffff;
--	printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-no_pte:
 -
--	ah = (regs->regs[50]) >> 32;
--	al = (regs->regs[50]) & 0xffffffff;
--	bh = (regs->regs[51]) >> 32;
--	bl = (regs->regs[51]) & 0xffffffff;
--	ch = (regs->regs[52]) >> 32;
--	cl = (regs->regs[52]) & 0xffffffff;
--	printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	up_read(&mm->mmap_sem);
+-	return;
 -
--	ah = (regs->regs[53]) >> 32;
--	al = (regs->regs[53]) & 0xffffffff;
--	bh = (regs->regs[54]) >> 32;
--	bl = (regs->regs[54]) & 0xffffffff;
--	ch = (regs->regs[55]) >> 32;
--	cl = (regs->regs[55]) & 0xffffffff;
--	printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-/*
+- * Something tried to access memory that isn't in our memory map..
+- * Fix it, but check if it's kernel or user first..
+- */
+-bad_area:
+-#ifdef DEBUG_FAULT
+-	printk("fault:bad area\n");
+-#endif
+-	up_read(&mm->mmap_sem);
 -
--	ah = (regs->regs[56]) >> 32;
--	al = (regs->regs[56]) & 0xffffffff;
--	bh = (regs->regs[57]) >> 32;
--	bl = (regs->regs[57]) & 0xffffffff;
--	ch = (regs->regs[58]) >> 32;
--	cl = (regs->regs[58]) & 0xffffffff;
--	printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-	if (user_mode(regs)) {
+-		static int count=0;
+-		siginfo_t info;
+-		if (count < 4) {
+-			/* This is really to help debug faults when starting
+-			 * usermode, so only need a few */
+-			count++;
+-			printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
+-				address, task_pid_nr(current), current->comm,
+-				(unsigned long) regs->pc);
+-#if 0
+-			show_regs(regs);
+-#endif
+-		}
+-		if (is_global_init(tsk)) {
+-			panic("INIT had user mode bad_area\n");
+-		}
+-		tsk->thread.address = address;
+-		tsk->thread.error_code = writeaccess;
+-		info.si_signo = SIGSEGV;
+-		info.si_errno = 0;
+-		info.si_addr = (void *) address;
+-		force_sig_info(SIGSEGV, &info, tsk);
+-		return;
+-	}
 -
--	ah = (regs->regs[59]) >> 32;
--	al = (regs->regs[59]) & 0xffffffff;
--	bh = (regs->regs[60]) >> 32;
--	bl = (regs->regs[60]) & 0xffffffff;
--	ch = (regs->regs[61]) >> 32;
--	cl = (regs->regs[61]) & 0xffffffff;
--	printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-no_context:
+-#ifdef DEBUG_FAULT
+-	printk("fault:No context\n");
+-#endif
+-	/* Are we prepared to handle this kernel fault?  */
+-	fixup = search_exception_tables(regs->pc);
+-	if (fixup) {
+-		regs->pc = fixup->fixup;
+-		return;
+-	}
 -
--	ah = (regs->regs[62]) >> 32;
--	al = (regs->regs[62]) & 0xffffffff;
--	bh = (regs->tregs[0]) >> 32;
--	bl = (regs->tregs[0]) & 0xffffffff;
--	ch = (regs->tregs[1]) >> 32;
--	cl = (regs->tregs[1]) & 0xffffffff;
--	printk("R62 : %08Lx%08Lx T0  : %08Lx%08Lx T1  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-/*
+- * Oops. The kernel tried to access some bad page. We'll have to
+- * terminate things with extreme prejudice.
+- *
+- */
+-	if (address < PAGE_SIZE)
+-		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+-	else
+-		printk(KERN_ALERT "Unable to handle kernel paging request");
+-	printk(" at virtual address %08lx\n", address);
+-	printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
+-	die("Oops", regs, writeaccess);
+-	do_exit(SIGKILL);
 -
--	ah = (regs->tregs[2]) >> 32;
--	al = (regs->tregs[2]) & 0xffffffff;
--	bh = (regs->tregs[3]) >> 32;
--	bl = (regs->tregs[3]) & 0xffffffff;
--	ch = (regs->tregs[4]) >> 32;
--	cl = (regs->tregs[4]) & 0xffffffff;
--	printk("T2  : %08Lx%08Lx T3  : %08Lx%08Lx T4  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-/*
+- * We ran out of memory, or some other thing happened to us that made
+- * us unable to handle the page fault gracefully.
+- */
+-out_of_memory:
+-	if (is_global_init(current)) {
+-		panic("INIT out of memory\n");
+-		yield();
+-		goto survive;
+-	}
+-	printk("fault:Out of memory\n");
+-	up_read(&mm->mmap_sem);
+-	if (is_global_init(current)) {
+-		yield();
+-		down_read(&mm->mmap_sem);
+-		goto survive;
+-	}
+-	printk("VM: killing process %s\n", tsk->comm);
+-	if (user_mode(regs))
+-		do_group_exit(SIGKILL);
+-	goto no_context;
 -
--	ah = (regs->tregs[5]) >> 32;
--	al = (regs->tregs[5]) & 0xffffffff;
--	bh = (regs->tregs[6]) >> 32;
--	bl = (regs->tregs[6]) & 0xffffffff;
--	ch = (regs->tregs[7]) >> 32;
--	cl = (regs->tregs[7]) & 0xffffffff;
--	printk("T5  : %08Lx%08Lx T6  : %08Lx%08Lx T7  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
+-do_sigbus:
+-	printk("fault:Do sigbus\n");
+-	up_read(&mm->mmap_sem);
 -
 -	/*
--	 * If we're in kernel mode, dump the stack too..
+-	 * Send a sigbus, regardless of whether we were in kernel
+-	 * or user mode.
 -	 */
--	if (!user_mode(regs)) {
--		void show_stack(struct task_struct *tsk, unsigned long *sp);
--		unsigned long sp = regs->regs[15] & 0xffffffff;
--		struct task_struct *tsk = get_current();
--
--		tsk->thread.kregs = regs;
+-	tsk->thread.address = address;
+-	tsk->thread.error_code = writeaccess;
+-	tsk->thread.trap_no = 14;
+-	force_sig(SIGBUS, tsk);
 -
--		show_stack(tsk, (unsigned long *)sp);
--	}
+-	/* Kernel mode? Handle exceptions or die */
+-	if (!user_mode(regs))
+-		goto no_context;
 -}
 -
--struct task_struct * alloc_task_struct(void)
--{
--	/* Get task descriptor pages */
--	return (struct task_struct *)
--		__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
--}
 -
--void free_task_struct(struct task_struct *p)
--{
--	free_pages((unsigned long) p, get_order(THREAD_SIZE));
--}
+-void flush_tlb_all(void);
 -
--/*
-- * Create a kernel thread
-- */
--ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
+-void update_mmu_cache(struct vm_area_struct * vma,
+-			unsigned long address, pte_t pte)
 -{
--	do_exit(fn(arg));
+-#if defined(CONFIG_SH64_PROC_TLB)
+-	++calls_to_update_mmu_cache;
+-#endif
+-
+-	/*
+-	 * This appears to get called once for every pte entry that gets
+-	 * established => I don't think it's efficient to try refilling the
+-	 * TLBs with the pages - some may not get accessed even.  Also, for
+-	 * executable pages, it is impossible to determine reliably here which
+-	 * TLB they should be mapped into (or both even).
+-	 *
+-	 * So, just do nothing here and handle faults on demand.  In the
+-	 * TLBMISS handling case, the refill is now done anyway after the pte
+-	 * has been fixed up, so that deals with most useful cases.
+-	 */
 -}
 -
--/*
-- * This is the mechanism for creating a new kernel thread.
-- *
-- * NOTE! Only a kernel-only process(ie the swapper or direct descendants
-- * who haven't done an "execve()") should use this: it will work within
-- * a system call from a "real" process, but the process memory space will
-- * not be freed until both the parent and the child have exited.
-- */
--int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+-static void __flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 -{
--	struct pt_regs regs;
+-	unsigned long long match, pteh=0, lpage;
+-	unsigned long tlb;
+-	struct mm_struct *mm;
 -
--	memset(&regs, 0, sizeof(regs));
--	regs.regs[2] = (unsigned long)arg;
--	regs.regs[3] = (unsigned long)fn;
+-	mm = vma->vm_mm;
 -
--	regs.pc = (unsigned long)kernel_thread_helper;
--	regs.sr = (1 << 30);
+-	if (mm->context == NO_CONTEXT)
+-		return;
 -
--	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
--		       &regs, 0, NULL, NULL);
--}
+-	/*
+-	 * Sign-extend based on neff.
+-	 */
+-	lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
+-	match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;
+-	match |= lpage;
 -
--/*
-- * Free current thread data structures etc..
-- */
--void exit_thread(void)
--{
--	/* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC.
+-        /* Do ITLB : don't bother for pages in non-exectutable VMAs */
+-	if (vma->vm_flags & VM_EXEC) {
+-		for_each_itlb_entry(tlb) {
+-			asm volatile ("getcfg	%1, 0, %0"
+-				      : "=r" (pteh)
+-				      : "r" (tlb) );
 -
--	   The SH-5 FPU save/restore approach relies on last_task_used_math
--	   pointing to a live task_struct.  When another task tries to use the
--	   FPU for the 1st time, the FPUDIS trap handling (see
--	   arch/sh64/kernel/fpu.c) will save the existing FPU state to the
--	   FP regs field within last_task_used_math before re-loading the new
--	   task's FPU state (or initialising it if the FPU has been used
--	   before).  So if last_task_used_math is stale, and its page has already been
--	   re-allocated for another use, the consequences are rather grim. Unless we
--	   null it here, there is no other path through which it would get safely
--	   nulled. */
+-			if (pteh == match) {
+-				__flush_tlb_slot(tlb);
+-				break;
+-			}
+-
+-		}
+-	}
+-
+-        /* Do DTLB : any page could potentially be in here. */
+-	for_each_dtlb_entry(tlb) {
+-		asm volatile ("getcfg	%1, 0, %0"
+-			      : "=r" (pteh)
+-			      : "r" (tlb) );
+-
+-		if (pteh == match) {
+-			__flush_tlb_slot(tlb);
+-			break;
+-		}
 -
--#ifdef CONFIG_SH_FPU
--	if (last_task_used_math == current) {
--		last_task_used_math = NULL;
 -	}
--#endif
 -}
 -
--void flush_thread(void)
+-void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 -{
+-	unsigned long flags;
 -
--	/* Called by fs/exec.c (flush_old_exec) to remove traces of a
--	 * previously running executable. */
--#ifdef CONFIG_SH_FPU
--	if (last_task_used_math == current) {
--		last_task_used_math = NULL;
--	}
--	/* Force FPU state to be reinitialised after exec */
--	clear_used_math();
+-#if defined(CONFIG_SH64_PROC_TLB)
+-        ++calls_to_flush_tlb_page;
 -#endif
 -
--	/* if we are a kernel thread, about to change to user thread,
--         * update kreg
--         */
--	if(current->thread.kregs==&fake_swapper_regs) {
--          current->thread.kregs =
--             ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
--	  current->thread.uregs = current->thread.kregs;
+-	if (vma->vm_mm) {
+-		page &= PAGE_MASK;
+-		local_irq_save(flags);
+-		__flush_tlb_page(vma, page);
+-		local_irq_restore(flags);
 -	}
 -}
 -
--void release_thread(struct task_struct *dead_task)
+-void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+-		     unsigned long end)
 -{
--	/* do nothing */
--}
+-	unsigned long flags;
+-	unsigned long long match, pteh=0, pteh_epn, pteh_low;
+-	unsigned long tlb;
+-	struct mm_struct *mm;
 -
--/* Fill in the fpu structure for a core dump.. */
--int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
--{
--#ifdef CONFIG_SH_FPU
--	int fpvalid;
--	struct task_struct *tsk = current;
+-	mm = vma->vm_mm;
 -
--	fpvalid = !!tsk_used_math(tsk);
--	if (fpvalid) {
--		if (current == last_task_used_math) {
--			grab_fpu();
--			fpsave(&tsk->thread.fpu.hard);
--			release_fpu();
--			last_task_used_math = 0;
--			regs->sr |= SR_FD;
--		}
+-#if defined(CONFIG_SH64_PROC_TLB)
+-	++calls_to_flush_tlb_range;
 -
--		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+-	{
+-		unsigned long size = (end - 1) - start;
+-		size >>= 12; /* divide by PAGE_SIZE */
+-		size++; /* end=start+4096 => 1 page */
+-		switch (size) {
+-		  case  1        : flush_tlb_range_1++;     break;
+-		  case  2        : flush_tlb_range_2++;     break;
+-		  case  3 ...  4 : flush_tlb_range_3_4++;   break;
+-		  case  5 ...  7 : flush_tlb_range_5_7++;   break;
+-		  case  8 ... 11 : flush_tlb_range_8_11++;  break;
+-		  case 12 ... 15 : flush_tlb_range_12_15++; break;
+-		  default        : flush_tlb_range_16_up++; break;
+-		}
 -	}
--
--	return fpvalid;
--#else
--	return 0; /* Task didn't use the fpu at all. */
 -#endif
--}
--
--asmlinkage void ret_from_fork(void);
 -
--int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
--		unsigned long unused,
--		struct task_struct *p, struct pt_regs *regs)
--{
--	struct pt_regs *childregs;
--	unsigned long long se;			/* Sign extension */
+-	if (mm->context == NO_CONTEXT)
+-		return;
 -
--#ifdef CONFIG_SH_FPU
--	if(last_task_used_math == current) {
--		grab_fpu();
--		fpsave(&current->thread.fpu.hard);
--		release_fpu();
--		last_task_used_math = NULL;
--		regs->sr |= SR_FD;
--	}
--#endif
--	/* Copy from sh version */
--	childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
+-	local_irq_save(flags);
 -
--	*childregs = *regs;
+-	start &= PAGE_MASK;
+-	end &= PAGE_MASK;
 -
--	if (user_mode(regs)) {
--		childregs->regs[15] = usp;
--		p->thread.uregs = childregs;
--	} else {
--		childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
--	}
+-	match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;
 -
--	childregs->regs[9] = 0; /* Set return value for child */
--	childregs->sr |= SR_FD; /* Invalidate FPU flag */
+-	/* Flush ITLB */
+-	for_each_itlb_entry(tlb) {
+-		asm volatile ("getcfg	%1, 0, %0"
+-			      : "=r" (pteh)
+-			      : "r" (tlb) );
 -
--	p->thread.sp = (unsigned long) childregs;
--	p->thread.pc = (unsigned long) ret_from_fork;
+-		pteh_epn = pteh & PAGE_MASK;
+-		pteh_low = pteh & ~PAGE_MASK;
 -
--	/*
--	 * Sign extend the edited stack.
--         * Note that thread.pc and thread.pc will stay
--	 * 32-bit wide and context switch must take care
--	 * of NEFF sign extension.
--	 */
+-		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
+-			__flush_tlb_slot(tlb);
+-	}
 -
--	se = childregs->regs[15];
--	se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
--	childregs->regs[15] = se;
+-	/* Flush DTLB */
+-	for_each_dtlb_entry(tlb) {
+-		asm volatile ("getcfg	%1, 0, %0"
+-			      : "=r" (pteh)
+-			      : "r" (tlb) );
 -
--	return 0;
--}
+-		pteh_epn = pteh & PAGE_MASK;
+-		pteh_low = pteh & ~PAGE_MASK;
 -
--asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
--			unsigned long r4, unsigned long r5,
--			unsigned long r6, unsigned long r7,
--			struct pt_regs *pregs)
--{
--	return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
--}
+-		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
+-			__flush_tlb_slot(tlb);
+-	}
 -
--asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
--			 unsigned long r4, unsigned long r5,
--			 unsigned long r6, unsigned long r7,
--			 struct pt_regs *pregs)
--{
--	if (!newsp)
--		newsp = pregs->regs[15];
--	return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
+-	local_irq_restore(flags);
 -}
 -
--/*
-- * This is trivial, and on the face of it looks like it
-- * could equally well be done in user mode.
-- *
-- * Not so, for quite unobvious reasons - register pressure.
-- * In user mode vfork() cannot have a stack frame, and if
-- * done by calling the "clone()" system call directly, you
-- * do not have enough call-clobbered registers to hold all
-- * the information you need.
-- */
--asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
--			 unsigned long r4, unsigned long r5,
--			 unsigned long r6, unsigned long r7,
--			 struct pt_regs *pregs)
+-void flush_tlb_mm(struct mm_struct *mm)
 -{
--	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
--}
+-	unsigned long flags;
 -
--/*
-- * sys_execve() executes a new program.
-- */
--asmlinkage int sys_execve(char *ufilename, char **uargv,
--			  char **uenvp, unsigned long r5,
--			  unsigned long r6, unsigned long r7,
--			  struct pt_regs *pregs)
--{
--	int error;
--	char *filename;
+-#if defined(CONFIG_SH64_PROC_TLB)
+-	++calls_to_flush_tlb_mm;
+-#endif
 -
--	lock_kernel();
--	filename = getname((char __user *)ufilename);
--	error = PTR_ERR(filename);
--	if (IS_ERR(filename))
--		goto out;
+-	if (mm->context == NO_CONTEXT)
+-		return;
 -
--	error = do_execve(filename,
--			  (char __user * __user *)uargv,
--			  (char __user * __user *)uenvp,
--			  pregs);
--	if (error == 0) {
--		task_lock(current);
--		current->ptrace &= ~PT_DTRACE;
--		task_unlock(current);
--	}
--	putname(filename);
--out:
--	unlock_kernel();
--	return error;
--}
+-	local_irq_save(flags);
 -
--/*
-- * These bracket the sleeping functions..
-- */
--extern void interruptible_sleep_on(wait_queue_head_t *q);
+-	mm->context=NO_CONTEXT;
+-	if(mm==current->mm)
+-		activate_context(mm);
 -
--#define mid_sched	((unsigned long) interruptible_sleep_on)
+-	local_irq_restore(flags);
 -
--static int in_sh64_switch_to(unsigned long pc)
--{
--	extern char __sh64_switch_to_end;
--	/* For a sleeping task, the PC is somewhere in the middle of the function,
--	   so we don't have to worry about masking the LSB off */
--	return (pc >= (unsigned long) sh64_switch_to) &&
--	       (pc < (unsigned long) &__sh64_switch_to_end);
 -}
 -
--unsigned long get_wchan(struct task_struct *p)
+-void flush_tlb_all(void)
 -{
--	unsigned long schedule_fp;
--	unsigned long sh64_switch_to_fp;
--	unsigned long schedule_caller_pc;
--	unsigned long pc;
+-	/* Invalidate all, including shared pages, excluding fixed TLBs */
 -
--	if (!p || p == current || p->state == TASK_RUNNING)
--		return 0;
+-	unsigned long flags, tlb;
 -
--	/*
--	 * The same comment as on the Alpha applies here, too ...
--	 */
--	pc = thread_saved_pc(p);
+-#if defined(CONFIG_SH64_PROC_TLB)
+-	++calls_to_flush_tlb_all;
+-#endif
 -
--#ifdef CONFIG_FRAME_POINTER
--	if (in_sh64_switch_to(pc)) {
--		sh64_switch_to_fp = (long) p->thread.sp;
--		/* r14 is saved at offset 4 in the sh64_switch_to frame */
--		schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
+-	local_irq_save(flags);
 -
--		/* and the caller of 'schedule' is (currently!) saved at offset 24
--		   in the frame of schedule (from disasm) */
--		schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
--		return schedule_caller_pc;
+-	/* Flush each ITLB entry */
+-	for_each_itlb_entry(tlb) {
+-		__flush_tlb_slot(tlb);
 -	}
--#endif
--	return pc;
+-
+-	/* Flush each DTLB entry */
+-	for_each_dtlb_entry(tlb) {
+-		__flush_tlb_slot(tlb);
+-	}
+-
+-	local_irq_restore(flags);
 -}
 -
--/* Provide a /proc/asids file that lists out the
--   ASIDs currently associated with the processes.  (If the DM.PC register is
--   examined through the debug link, this shows ASID + PC.  To make use of this,
--   the PID->ASID relationship needs to be known.  This is primarily for
--   debugging.)
--   */
+-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+-{
+-        /* FIXME: Optimize this later.. */
+-        flush_tlb_all();
+-}
+-
+-#if defined(CONFIG_SH64_PROC_TLB)
+-/* Procfs interface to read the performance information */
 -
--#if defined(CONFIG_SH64_PROC_ASIDS)
 -static int
--asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
+-tlb_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
 -{
--	int len=0;
--	struct task_struct *p;
--	read_lock(&tasklist_lock);
--	for_each_process(p) {
--		int pid = p->pid;
--		struct mm_struct *mm;
--		if (!pid) continue;
--		mm = p->mm;
--		if (mm) {
--			unsigned long asid, context;
--			context = mm->context;
--			asid = (context & 0xff);
--			len += sprintf(buf+len, "%5d : %02lx\n", pid, asid);
--		} else {
--			len += sprintf(buf+len, "%5d : (none)\n", pid);
--		}
--	}
--	read_unlock(&tasklist_lock);
--	*eof = 1;
--	return len;
+-  int len=0;
+-  len += sprintf(buf+len, "do_fast_page_fault   called %12lld times\n", calls_to_do_fast_page_fault);
+-  len += sprintf(buf+len, "do_slow_page_fault   called %12lld times\n", calls_to_do_slow_page_fault);
+-  len += sprintf(buf+len, "update_mmu_cache     called %12lld times\n", calls_to_update_mmu_cache);
+-  len += sprintf(buf+len, "flush_tlb_page       called %12lld times\n", calls_to_flush_tlb_page);
+-  len += sprintf(buf+len, "flush_tlb_range      called %12lld times\n", calls_to_flush_tlb_range);
+-  len += sprintf(buf+len, "flush_tlb_mm         called %12lld times\n", calls_to_flush_tlb_mm);
+-  len += sprintf(buf+len, "flush_tlb_all        called %12lld times\n", calls_to_flush_tlb_all);
+-  len += sprintf(buf+len, "flush_tlb_range_sizes\n"
+-                          " 1      : %12lld\n"
+-                          " 2      : %12lld\n"
+-                          " 3 -  4 : %12lld\n"
+-                          " 5 -  7 : %12lld\n"
+-                          " 8 - 11 : %12lld\n"
+-                          "12 - 15 : %12lld\n"
+-                          "16+     : %12lld\n",
+-                          flush_tlb_range_1, flush_tlb_range_2, flush_tlb_range_3_4,
+-                          flush_tlb_range_5_7, flush_tlb_range_8_11, flush_tlb_range_12_15,
+-                          flush_tlb_range_16_up);
+-  len += sprintf(buf+len, "page not present           %12lld times\n", page_not_present);
+-  *eof = 1;
+-  return len;
 -}
 -
--static int __init register_proc_asids(void)
+-static int __init register_proc_tlb(void)
 -{
--	create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
--	return 0;
+-  create_proc_read_entry("tlb", 0, NULL, tlb_proc_info, NULL);
+-  return 0;
 -}
--__initcall(register_proc_asids);
+-
+-__initcall(register_proc_tlb);
+-
 -#endif
-diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
+diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
 deleted file mode 100644
-index 8a2d339..0000000
---- a/arch/sh64/kernel/ptrace.c
+index fa66daa..0000000
+--- a/arch/sh64/mm/hugetlbpage.c
 +++ /dev/null
-@@ -1,332 +0,0 @@
+@@ -1,105 +0,0 @@
 -/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/kernel/ptrace.c
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003  Paul Mundt
+- * arch/sh64/mm/hugetlbpage.c
 - *
-- * Started from SH3/4 version:
-- *   SuperH version:   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
+- * SuperH HugeTLB page support.
 - *
-- *   Original x86 implementation:
-- *	By Ross Biro 1/23/92
-- *	edited by Linus Torvalds
+- * Cloned from sparc64 by Paul Mundt.
 - *
+- * Copyright (C) 2002, 2003 David S. Miller (davem at redhat.com)
 - */
 -
--#include <linux/kernel.h>
--#include <linux/rwsem.h>
--#include <linux/sched.h>
+-#include <linux/init.h>
+-#include <linux/fs.h>
 -#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/smp_lock.h>
--#include <linux/errno.h>
--#include <linux/ptrace.h>
--#include <linux/user.h>
--#include <linux/signal.h>
--#include <linux/syscalls.h>
--
--#include <asm/io.h>
--#include <asm/uaccess.h>
--#include <asm/pgtable.h>
--#include <asm/system.h>
--#include <asm/processor.h>
--#include <asm/mmu_context.h>
--
--/* This mask defines the bits of the SR which the user is not allowed to
--   change, which are everything except S, Q, M, PR, SZ, FR. */
--#define SR_MASK      (0xffff8cfd)
+-#include <linux/hugetlb.h>
+-#include <linux/pagemap.h>
+-#include <linux/slab.h>
+-#include <linux/sysctl.h>
 -
--/*
-- * does not yet catch signals sent when the child dies.
-- * in exit.c or in signal.c.
-- */
+-#include <asm/mman.h>
+-#include <asm/pgalloc.h>
+-#include <asm/tlb.h>
+-#include <asm/tlbflush.h>
+-#include <asm/cacheflush.h>
 -
--/*
-- * This routine will get a word from the user area in the process kernel stack.
-- */
--static inline int get_stack_long(struct task_struct *task, int offset)
+-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 -{
--	unsigned char *stack;
+-	pgd_t *pgd;
+-	pmd_t *pmd;
+-	pte_t *pte = NULL;
 -
--	stack = (unsigned char *)(task->thread.uregs);
--	stack += offset;
--	return (*((int *)stack));
+-	pgd = pgd_offset(mm, addr);
+-	if (pgd) {
+-		pmd = pmd_alloc(mm, pgd, addr);
+-		if (pmd)
+-			pte = pte_alloc_map(mm, pmd, addr);
+-	}
+-	return pte;
 -}
 -
--static inline unsigned long
--get_fpu_long(struct task_struct *task, unsigned long addr)
+-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 -{
--	unsigned long tmp;
--	struct pt_regs *regs;
--	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
--
--	if (!tsk_used_math(task)) {
--		if (addr == offsetof(struct user_fpu_struct, fpscr)) {
--			tmp = FPSCR_INIT;
--		} else {
--			tmp = 0xffffffffUL; /* matches initial value in fpu.c */
--		}
--		return tmp;
--	}
+-	pgd_t *pgd;
+-	pmd_t *pmd;
+-	pte_t *pte = NULL;
 -
--	if (last_task_used_math == task) {
--		grab_fpu();
--		fpsave(&task->thread.fpu.hard);
--		release_fpu();
--		last_task_used_math = 0;
--		regs->sr |= SR_FD;
+-	pgd = pgd_offset(mm, addr);
+-	if (pgd) {
+-		pmd = pmd_offset(pgd, addr);
+-		if (pmd)
+-			pte = pte_offset_map(pmd, addr);
 -	}
--
--	tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
--	return tmp;
+-	return pte;
 -}
 -
--/*
-- * This routine will put a word into the user area in the process kernel stack.
-- */
--static inline int put_stack_long(struct task_struct *task, int offset,
--				 unsigned long data)
+-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 -{
--	unsigned char *stack;
--
--	stack = (unsigned char *)(task->thread.uregs);
--	stack += offset;
--	*(unsigned long *) stack = data;
 -	return 0;
 -}
 -
--static inline int
--put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
+-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+-		     pte_t *ptep, pte_t entry)
 -{
--	struct pt_regs *regs;
--
--	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
+-	int i;
 -
--	if (!tsk_used_math(task)) {
--		fpinit(&task->thread.fpu.hard);
--		set_stopped_child_used_math(task);
--	} else if (last_task_used_math == task) {
--		grab_fpu();
--		fpsave(&task->thread.fpu.hard);
--		release_fpu();
--		last_task_used_math = 0;
--		regs->sr |= SR_FD;
+-	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+-		set_pte_at(mm, addr, ptep, entry);
+-		ptep++;
+-		addr += PAGE_SIZE;
+-		pte_val(entry) += PAGE_SIZE;
 -	}
--
--	((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
--	return 0;
 -}
 -
--
--long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+-			      pte_t *ptep)
 -{
--	int ret;
--
--	switch (request) {
--	/* when I and D space are separate, these will need to be fixed. */
--	case PTRACE_PEEKTEXT: /* read word at location addr. */
--	case PTRACE_PEEKDATA:
--		ret = generic_ptrace_peekdata(child, addr, data);
--		break;
--
--	/* read the word at location addr in the USER area. */
--	case PTRACE_PEEKUSR: {
--		unsigned long tmp;
--
--		ret = -EIO;
--		if ((addr & 3) || addr < 0)
--			break;
--
--		if (addr < sizeof(struct pt_regs))
--			tmp = get_stack_long(child, addr);
--		else if ((addr >= offsetof(struct user, fpu)) &&
--			 (addr <  offsetof(struct user, u_fpvalid))) {
--			tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
--		} else if (addr == offsetof(struct user, u_fpvalid)) {
--			tmp = !!tsk_used_math(child);
--		} else {
--			break;
--		}
--		ret = put_user(tmp, (unsigned long *)data);
--		break;
--	}
--
--	/* when I and D space are separate, this will have to be fixed. */
--	case PTRACE_POKETEXT: /* write the word at location addr. */
--	case PTRACE_POKEDATA:
--		ret = generic_ptrace_pokedata(child, addr, data);
--		break;
--
--	case PTRACE_POKEUSR:
--                /* write the word at location addr in the USER area. We must
--                   disallow any changes to certain SR bits or u_fpvalid, since
--                   this could crash the kernel or result in a security
--                   loophole. */
--		ret = -EIO;
--		if ((addr & 3) || addr < 0)
--			break;
--
--		if (addr < sizeof(struct pt_regs)) {
--			/* Ignore change of top 32 bits of SR */
--			if (addr == offsetof (struct pt_regs, sr)+4)
--			{
--				ret = 0;
--				break;
--			}
--			/* If lower 32 bits of SR, ignore non-user bits */
--			if (addr == offsetof (struct pt_regs, sr))
--			{
--				long cursr = get_stack_long(child, addr);
--				data &= ~(SR_MASK);
--				data |= (cursr & SR_MASK);
--			}
--			ret = put_stack_long(child, addr, data);
--		}
--		else if ((addr >= offsetof(struct user, fpu)) &&
--			 (addr <  offsetof(struct user, u_fpvalid))) {
--			ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
--		}
--		break;
--
--	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
--	case PTRACE_CONT: { /* restart after signal. */
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		if (request == PTRACE_SYSCALL)
--			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		else
--			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		child->exit_code = data;
--		wake_up_process(child);
--		ret = 0;
--		break;
--	}
--
--/*
-- * make the child exit.  Best I can do is send it a sigkill.
-- * perhaps it should be put in the status that it wants to
-- * exit.
-- */
--	case PTRACE_KILL: {
--		ret = 0;
--		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
--			break;
--		child->exit_code = SIGKILL;
--		wake_up_process(child);
--		break;
--	}
--
--	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
--		struct pt_regs *regs;
--
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		if ((child->ptrace & PT_DTRACE) == 0) {
--			/* Spurious delayed TF traps may occur */
--			child->ptrace |= PT_DTRACE;
--		}
--
--		regs = child->thread.uregs;
--
--		regs->sr |= SR_SSTEP;	/* auto-resetting upon exception */
--
--		child->exit_code = data;
--		/* give it a chance to run. */
--		wake_up_process(child);
--		ret = 0;
--		break;
--	}
--
--	default:
--		ret = ptrace_request(child, request, addr, data);
--		break;
--	}
--	return ret;
--}
+-	pte_t entry;
+-	int i;
 -
--asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
--{
--	extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
--#define WPC_DBRMODE 0x0d104008
--	static int first_call = 1;
+-	entry = *ptep;
 -
--	lock_kernel();
--	if (first_call) {
--		/* Set WPC.DBRMODE to 0.  This makes all debug events get
--		 * delivered through RESVEC, i.e. into the handlers in entry.S.
--		 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
--		 * would normally be left set to 1, which makes debug events get
--		 * delivered through DBRVEC, i.e. into the remote gdb's
--		 * handlers.  This prevents ptrace getting them, and confuses
--		 * the remote gdb.) */
--		printk("DBRMODE set to 0 to permit native debugging\n");
--		poke_real_address_q(WPC_DBRMODE, 0);
--		first_call = 0;
+-	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+-		pte_clear(mm, addr, ptep);
+-		addr += PAGE_SIZE;
+-		ptep++;
 -	}
--	unlock_kernel();
--
--	return sys_ptrace(request, pid, addr, data);
--}
--
--asmlinkage void syscall_trace(void)
--{
--	struct task_struct *tsk = current;
--
--	if (!test_thread_flag(TIF_SYSCALL_TRACE))
--		return;
--	if (!(tsk->ptrace & PT_PTRACED))
--		return;
 -
--	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
--				 ? 0x80 : 0));
--	/*
--	 * this isn't the same as continuing with a signal, but it will do
--	 * for normal use.  strace only continues with a signal if the
--	 * stopping signal is not SIGTRAP.  -brl
--	 */
--	if (tsk->exit_code) {
--		send_sig(tsk->exit_code, tsk, 1);
--		tsk->exit_code = 0;
--	}
+-	return entry;
 -}
 -
--/* Called with interrupts disabled */
--asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
+-struct page *follow_huge_addr(struct mm_struct *mm,
+-			      unsigned long address, int write)
 -{
--	/* This is called after a single step exception (DEBUGSS).
--	   There is no need to change the PC, as it is a post-execution
--	   exception, as entry.S does not do anything to the PC for DEBUGSS.
--	   We need to clear the Single Step setting in SR to avoid
--	   continually stepping. */
--	local_irq_enable();
--	regs->sr &= ~SR_SSTEP;
--	force_sig(SIGTRAP, current);
+-	return ERR_PTR(-EINVAL);
 -}
 -
--/* Called with interrupts disabled */
--asmlinkage void do_software_break_point(unsigned long long vec,
--					struct pt_regs *regs)
+-int pmd_huge(pmd_t pmd)
 -{
--	/* We need to forward step the PC, to counteract the backstep done
--	   in signal.c. */
--	local_irq_enable();
--	force_sig(SIGTRAP, current);
--	regs->pc += 4;
+-	return 0;
 -}
 -
--/*
-- * Called by kernel/ptrace.c when detaching..
-- *
-- * Make sure single step bits etc are not set.
-- */
--void ptrace_disable(struct task_struct *child)
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmd, int write)
 -{
--        /* nothing to do.. */
+-	return NULL;
 -}
-diff --git a/arch/sh64/kernel/semaphore.c b/arch/sh64/kernel/semaphore.c
+diff --git a/arch/sh64/mm/init.c b/arch/sh64/mm/init.c
 deleted file mode 100644
-index 72c1653..0000000
---- a/arch/sh64/kernel/semaphore.c
+index 21cf42d..0000000
+--- a/arch/sh64/mm/init.c
 +++ /dev/null
-@@ -1,140 +0,0 @@
--/*
-- * Just taken from alpha implementation.
-- * This can't work well, perhaps.
-- */
+@@ -1,189 +0,0 @@
 -/*
-- *  Generic semaphore code. Buyer beware. Do your own
-- * specific changes in <asm/semaphore-helper.h>
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * arch/sh64/mm/init.c
+- *
+- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Copyright (C) 2003, 2004  Paul Mundt
+- *
 - */
 -
--#include <linux/errno.h>
--#include <linux/rwsem.h>
--#include <linux/sched.h>
--#include <linux/wait.h>
 -#include <linux/init.h>
--#include <asm/semaphore.h>
--#include <asm/semaphore-helper.h>
+-#include <linux/rwsem.h>
+-#include <linux/mm.h>
+-#include <linux/swap.h>
+-#include <linux/bootmem.h>
 -
--spinlock_t semaphore_wake_lock;
+-#include <asm/mmu_context.h>
+-#include <asm/page.h>
+-#include <asm/pgalloc.h>
+-#include <asm/pgtable.h>
+-#include <asm/tlb.h>
+-
+-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 -
 -/*
-- * Semaphores are implemented using a two-way counter:
-- * The "count" variable is decremented for each process
-- * that tries to sleep, while the "waking" variable is
-- * incremented when the "up()" code goes to wake up waiting
-- * processes.
-- *
-- * Notably, the inline "up()" and "down()" functions can
-- * efficiently test if they need to do any extra work (up
-- * needs to do something only if count was negative before
-- * the increment operation.
-- *
-- * waking_non_zero() (from asm/semaphore.h) must execute
-- * atomically.
-- *
-- * When __up() is called, the count was negative before
-- * incrementing it, and we need to wake up somebody.
+- * Cache of MMU context last used.
+- */
+-unsigned long mmu_context_cache;
+-pgd_t * mmu_pdtp_cache;
+-int after_bootmem = 0;
+-
+-/*
+- * BAD_PAGE is the page that is used for page faults when linux
+- * is out-of-memory. Older versions of linux just did a
+- * do_exit(), but using this instead means there is less risk
+- * for a process dying in kernel mode, possibly leaving an inode
+- * unused etc..
 - *
-- * This routine adds one to the count of processes that need to
-- * wake up and exit.  ALL waiting processes actually wake up but
-- * only the one that gets to the "waking" field first will gate
-- * through and acquire the semaphore.  The others will go back
-- * to sleep.
+- * BAD_PAGETABLE is the accompanying page-table: it is initialized
+- * to point to BAD_PAGE entries.
 - *
-- * Note that these functions are only called when there is
-- * contention on the lock, and as such all this is the
-- * "non-critical" part of the whole semaphore business. The
-- * critical part is the inline stuff in <asm/semaphore.h>
-- * where we want to avoid any extra jumps and calls.
+- * ZERO_PAGE is a special page that is used for zero-initialized
+- * data and COW.
 - */
--void __up(struct semaphore *sem)
+-
+-extern unsigned char empty_zero_page[PAGE_SIZE];
+-extern unsigned char empty_bad_page[PAGE_SIZE];
+-extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
+-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+-
+-extern char _text, _etext, _edata, __bss_start, _end;
+-extern char __init_begin, __init_end;
+-
+-/* It'd be good if these lines were in the standard header file. */
+-#define START_PFN	(NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
+-#define MAX_LOW_PFN	(NODE_DATA(0)->bdata->node_low_pfn)
+-
+-
+-void show_mem(void)
 -{
--	wake_one_more(sem);
--	wake_up(&sem->wait);
+-	int i, total = 0, reserved = 0;
+-	int shared = 0, cached = 0;
+-
+-	printk("Mem-info:\n");
+-	show_free_areas();
+-	printk("Free swap:       %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+-	i = max_mapnr;
+-	while (i-- > 0) {
+-		total++;
+-		if (PageReserved(mem_map+i))
+-			reserved++;
+-		else if (PageSwapCache(mem_map+i))
+-			cached++;
+-		else if (page_count(mem_map+i))
+-			shared += page_count(mem_map+i) - 1;
+-	}
+-	printk("%d pages of RAM\n",total);
+-	printk("%d reserved pages\n",reserved);
+-	printk("%d pages shared\n",shared);
+-	printk("%d pages swap cached\n",cached);
+-	printk("%ld pages in page table cache\n", quicklist_total_size());
 -}
 -
 -/*
-- * Perform the "down" function.  Return zero for semaphore acquired,
-- * return negative for signalled out of the function.
-- *
-- * If called from __down, the return is ignored and the wait loop is
-- * not interruptible.  This means that a task waiting on a semaphore
-- * using "down()" cannot be killed until someone does an "up()" on
-- * the semaphore.
+- * paging_init() sets up the page tables.
 - *
-- * If called from __down_interruptible, the return value gets checked
-- * upon return.  If the return value is negative then the task continues
-- * with the negative value in the return register (it can be tested by
-- * the caller).
+- * head.S already did a lot to set up address translation for the kernel.
+- * Here we comes with:
+- * . MMU enabled
+- * . ASID set (SR)
+- * .  some 512MB regions being mapped of which the most relevant here is:
+- *   . CACHED segment (ASID 0 [irrelevant], shared AND NOT user)
+- * . possible variable length regions being mapped as:
+- *   . UNCACHED segment (ASID 0 [irrelevant], shared AND NOT user)
+- * . All of the memory regions are placed, independently from the platform
+- *   on high addresses, above 0x80000000.
+- * . swapper_pg_dir is already cleared out by the .space directive
+- *   in any case swapper does not require a real page directory since
+- *   it's all kernel contained.
 - *
-- * Either form may be used in conjunction with "up()".
+- * Those pesky NULL-reference errors in the kernel are then
+- * dealt with by not mapping address 0x00000000 at all.
 - *
 - */
+-void __init paging_init(void)
+-{
+-	unsigned long zones_size[MAX_NR_ZONES] = {0, };
 -
--#define DOWN_VAR				\
--	struct task_struct *tsk = current;	\
--	wait_queue_t wait;			\
--	init_waitqueue_entry(&wait, tsk);
+-	pgd_init((unsigned long)swapper_pg_dir);
+-	pgd_init((unsigned long)swapper_pg_dir +
+-		 sizeof(pgd_t) * USER_PTRS_PER_PGD);
 -
--#define DOWN_HEAD(task_state)						\
--									\
--									\
--	tsk->state = (task_state);					\
--	add_wait_queue(&sem->wait, &wait);				\
--									\
--	/*								\
--	 * Ok, we're set up.  sem->count is known to be less than zero	\
--	 * so we must wait.						\
--	 *								\
--	 * We can let go the lock for purposes of waiting.		\
--	 * We re-acquire it after awaking so as to protect		\
--	 * all semaphore operations.					\
--	 *								\
--	 * If "up()" is called before we call waking_non_zero() then	\
--	 * we will catch it right away.  If it is called later then	\
--	 * we will have to go through a wakeup cycle to catch it.	\
--	 *								\
--	 * Multiple waiters contend for the semaphore lock to see	\
--	 * who gets to gate through and who has to wait some more.	\
--	 */								\
--	for (;;) {
+-	mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
 -
--#define DOWN_TAIL(task_state)			\
--		tsk->state = (task_state);	\
--	}					\
--	tsk->state = TASK_RUNNING;		\
--	remove_wait_queue(&sem->wait, &wait);
+-	zones_size[ZONE_NORMAL] = MAX_LOW_PFN - START_PFN;
+-	NODE_DATA(0)->node_mem_map = NULL;
+-	free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0);
+-}
 -
--void __sched __down(struct semaphore * sem)
+-void __init mem_init(void)
 -{
--	DOWN_VAR
--	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
--	if (waking_non_zero(sem))
--		break;
--	schedule();
--	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+-	int codesize, reservedpages, datasize, initsize;
+-	int tmp;
+-
+-	max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
+-	high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE);
+-
+-	/*
+-         * Clear the zero-page.
+-         * This is not required but we might want to re-use
+-         * this very page to pass boot parameters, one day.
+-         */
+-	memset(empty_zero_page, 0, PAGE_SIZE);
+-
+-	/* this will put all low memory onto the freelists */
+-	totalram_pages += free_all_bootmem_node(NODE_DATA(0));
+-	reservedpages = 0;
+-	for (tmp = 0; tmp < num_physpages; tmp++)
+-		/*
+-		 * Only count reserved RAM pages
+-		 */
+-		if (PageReserved(mem_map+tmp))
+-			reservedpages++;
+-
+-	after_bootmem = 1;
+-
+-	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
+-	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
+-	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+-
+-	printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
+-		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+-		max_mapnr << (PAGE_SHIFT-10),
+-		codesize >> 10,
+-		reservedpages << (PAGE_SHIFT-10),
+-		datasize >> 10,
+-		initsize >> 10);
 -}
 -
--int __sched __down_interruptible(struct semaphore * sem)
+-void free_initmem(void)
 -{
--	int ret = 0;
--	DOWN_VAR
--	DOWN_HEAD(TASK_INTERRUPTIBLE)
+-	unsigned long addr;
 -
--	ret = waking_non_zero_interruptible(sem, tsk);
--	if (ret)
--	{
--		if (ret == 1)
--			/* ret != 0 only if we get interrupted -arca */
--			ret = 0;
--		break;
+-	addr = (unsigned long)(&__init_begin);
+-	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+-		ClearPageReserved(virt_to_page(addr));
+-		init_page_count(virt_to_page(addr));
+-		free_page(addr);
+-		totalram_pages++;
 -	}
--	schedule();
--	DOWN_TAIL(TASK_INTERRUPTIBLE)
--	return ret;
+-	printk ("Freeing unused kernel memory: %ldk freed\n", (&__init_end - &__init_begin) >> 10);
 -}
 -
--int __down_trylock(struct semaphore * sem)
+-#ifdef CONFIG_BLK_DEV_INITRD
+-void free_initrd_mem(unsigned long start, unsigned long end)
 -{
--	return waking_non_zero_trylock(sem);
+-	unsigned long p;
+-	for (p = start; p < end; p += PAGE_SIZE) {
+-		ClearPageReserved(virt_to_page(p));
+-		init_page_count(virt_to_page(p));
+-		free_page(p);
+-		totalram_pages++;
+-	}
+-	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
 -}
-diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
+-#endif
+-
+diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c
 deleted file mode 100644
-index 2b7264c..0000000
---- a/arch/sh64/kernel/setup.c
+index 535304e..0000000
+--- a/arch/sh64/mm/ioremap.c
 +++ /dev/null
-@@ -1,379 +0,0 @@
+@@ -1,388 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/setup.c
-- *
-- * sh64 Arch Support
-- *
-- * This file handles the architecture-dependent parts of initialization
+- * arch/sh64/mm/ioremap.c
 - *
 - * Copyright (C) 2000, 2001  Paolo Alberelli
 - * Copyright (C) 2003, 2004  Paul Mundt
 - *
-- * benedict.gaster at superh.com:   2nd May 2002
-- *    Modified to use the empty_zero_page to pass command line arguments.
-- *
-- * benedict.gaster at superh.com:	 3rd May 2002
-- *    Added support for ramdisk, removing statically linked romfs at the same time.
-- *
-- * lethal at linux-sh.org:          15th May 2003
-- *    Added generic procfs cpuinfo reporting. Make boards just export their name.
-- *
-- * lethal at linux-sh.org:          25th May 2003
-- *    Added generic get_cpu_subtype() for subtype reporting from cpu_data->type.
+- * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
+- * derived from arch/i386/mm/ioremap.c .
 - *
+- *   (C) Copyright 1995 1996 Linus Torvalds
 - */
--#include <linux/errno.h>
--#include <linux/rwsem.h>
--#include <linux/sched.h>
 -#include <linux/kernel.h>
--#include <linux/mm.h>
--#include <linux/stddef.h>
--#include <linux/unistd.h>
--#include <linux/ptrace.h>
 -#include <linux/slab.h>
--#include <linux/user.h>
--#include <linux/a.out.h>
--#include <linux/screen_info.h>
+-#include <linux/vmalloc.h>
+-#include <linux/sched.h>
+-#include <linux/string.h>
+-#include <linux/io.h>
 -#include <linux/ioport.h>
--#include <linux/delay.h>
--#include <linux/init.h>
--#include <linux/seq_file.h>
--#include <linux/blkdev.h>
 -#include <linux/bootmem.h>
--#include <linux/console.h>
--#include <linux/root_dev.h>
--#include <linux/cpu.h>
--#include <linux/initrd.h>
--#include <linux/pfn.h>
--#include <asm/processor.h>
--#include <asm/page.h>
--#include <asm/pgtable.h>
--#include <asm/platform.h>
--#include <asm/uaccess.h>
--#include <asm/system.h>
--#include <asm/io.h>
--#include <asm/sections.h>
--#include <asm/setup.h>
--#include <asm/smp.h>
--
--struct screen_info screen_info;
+-#include <linux/proc_fs.h>
+-#include <linux/module.h>
+-#include <asm/pgalloc.h>
+-#include <asm/tlbflush.h>
 -
--#ifdef CONFIG_BLK_DEV_RAM
--extern int rd_doload;		/* 1 = load ramdisk, 0 = don't load */
--extern int rd_prompt;		/* 1 = prompt for ramdisk, 0 = don't prompt */
--extern int rd_image_start;	/* starting block # of image */
--#endif
+-static void shmedia_mapioaddr(unsigned long, unsigned long);
+-static unsigned long shmedia_ioremap(struct resource *, u32, int);
 -
--extern int root_mountflags;
--extern char *get_system_type(void);
--extern void platform_setup(void);
--extern void platform_monitor(void);
--extern void platform_reserve(void);
--extern int sh64_cache_init(void);
--extern int sh64_tlb_init(void);
+-/*
+- * Generic mapping function (not visible outside):
+- */
 -
--#define RAMDISK_IMAGE_START_MASK	0x07FF
--#define RAMDISK_PROMPT_FLAG		0x8000
--#define RAMDISK_LOAD_FLAG		0x4000
+-/*
+- * Remap an arbitrary physical address space into the kernel virtual
+- * address space. Needed when the kernel wants to access high addresses
+- * directly.
+- *
+- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+- * have to convert them into an offset in a page-aligned mapping, but the
+- * caller shouldn't need to know that small detail.
+- */
+-void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+-{
+-	void * addr;
+-	struct vm_struct * area;
+-	unsigned long offset, last_addr;
+-	pgprot_t pgprot;
 -
--static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
--unsigned long long memory_start = CONFIG_MEMORY_START;
--unsigned long long memory_end = CONFIG_MEMORY_START + (CONFIG_MEMORY_SIZE_IN_MB * 1024 * 1024);
+-	/* Don't allow wraparound or zero size */
+-	last_addr = phys_addr + size - 1;
+-	if (!size || last_addr < phys_addr)
+-		return NULL;
 -
--struct sh_cpuinfo boot_cpu_data;
+-	pgprot = __pgprot(_PAGE_PRESENT  | _PAGE_READ   |
+-			  _PAGE_WRITE    | _PAGE_DIRTY  |
+-			  _PAGE_ACCESSED | _PAGE_SHARED | flags);
 -
--static inline void parse_mem_cmdline (char ** cmdline_p)
--{
--        char c = ' ', *to = command_line, *from = COMMAND_LINE;
--	int len = 0;
+-	/*
+-	 * Mappings have to be page-aligned
+-	 */
+-	offset = phys_addr & ~PAGE_MASK;
+-	phys_addr &= PAGE_MASK;
+-	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
 -
--	/* Save unparsed command line copy for /proc/cmdline */
--	memcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
--	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
+-	/*
+-	 * Ok, go for it..
+-	 */
+-	area = get_vm_area(size, VM_IOREMAP);
+-	pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
+-	if (!area)
+-		return NULL;
+-	area->phys_addr = phys_addr;
+-	addr = area->addr;
+-	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+-			       phys_addr, pgprot)) {
+-		vunmap(addr);
+-		return NULL;
+-	}
+-	return (void *) (offset + (char *)addr);
+-}
+-EXPORT_SYMBOL(__ioremap);
 -
--	for (;;) {
--	  /*
--	   * "mem=XXX[kKmM]" defines a size of memory.
--	   */
--	        if (c == ' ' && !memcmp(from, "mem=", 4)) {
--		      if (to != command_line)
--			to--;
--		      {
--			unsigned long mem_size;
+-void iounmap(void *addr)
+-{
+-	struct vm_struct *area;
 -
--			mem_size = memparse(from+4, &from);
--			memory_end = memory_start + mem_size;
--		      }
--		}
--		c = *(from++);
--		if (!c)
--		  break;
--		if (COMMAND_LINE_SIZE <= ++len)
--		  break;
--		*(to++) = c;
+-	vfree((void *) (PAGE_MASK & (unsigned long) addr));
+-	area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
+-	if (!area) {
+-		printk(KERN_ERR "iounmap: bad address %p\n", addr);
+-		return;
 -	}
--	*to = '\0';
 -
--	*cmdline_p = command_line;
+-	kfree(area);
 -}
+-EXPORT_SYMBOL(iounmap);
 -
--static void __init sh64_cpu_type_detect(void)
+-static struct resource shmedia_iomap = {
+-	.name	= "shmedia_iomap",
+-	.start	= IOBASE_VADDR + PAGE_SIZE,
+-	.end	= IOBASE_END - 1,
+-};
+-
+-static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
+-static void shmedia_unmapioaddr(unsigned long vaddr);
+-static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
+-
+-/*
+- * We have the same problem as the SPARC, so lets have the same comment:
+- * Our mini-allocator...
+- * Boy this is gross! We need it because we must map I/O for
+- * timers and interrupt controller before the kmalloc is available.
+- */
+-
+-#define XNMLN  15
+-#define XNRES  10
+-
+-struct xresource {
+-	struct resource xres;   /* Must be first */
+-	int xflag;              /* 1 == used */
+-	char xname[XNMLN+1];
+-};
+-
+-static struct xresource xresv[XNRES];
+-
+-static struct xresource *xres_alloc(void)
 -{
--	extern unsigned long long peek_real_address_q(unsigned long long addr);
--	unsigned long long cir;
--	/* Do peeks in real mode to avoid having to set up a mapping for the
--	   WPC registers.  On SH5-101 cut2, such a mapping would be exposed to
--	   an address translation erratum which would make it hard to set up
--	   correctly. */
--	cir = peek_real_address_q(0x0d000008);
+-        struct xresource *xrp;
+-        int n;
 -
--	if ((cir & 0xffff) == 0x5103) {
--		boot_cpu_data.type = CPU_SH5_103;
--	} else if (((cir >> 32) & 0xffff) == 0x51e2) {
--		/* CPU.VCR aliased at CIR address on SH5-101 */
--		boot_cpu_data.type = CPU_SH5_101;
--	} else {
--		boot_cpu_data.type = CPU_SH_NONE;
--	}
+-        xrp = xresv;
+-        for (n = 0; n < XNRES; n++) {
+-                if (xrp->xflag == 0) {
+-                        xrp->xflag = 1;
+-                        return xrp;
+-                }
+-                xrp++;
+-        }
+-        return NULL;
 -}
 -
--void __init setup_arch(char **cmdline_p)
+-static void xres_free(struct xresource *xrp)
 -{
--	unsigned long bootmap_size, i;
--	unsigned long first_pfn, start_pfn, last_pfn, pages;
+-	xrp->xflag = 0;
+-}
 -
--#ifdef CONFIG_EARLY_PRINTK
--	extern void enable_early_printk(void);
+-static struct resource *shmedia_find_resource(struct resource *root,
+-					      unsigned long vaddr)
+-{
+-	struct resource *res;
 -
--	/*
--	 * Setup Early SCIF console
--	 */
--	enable_early_printk();
--#endif
+-	for (res = root->child; res; res = res->sibling)
+-		if (res->start <= vaddr && res->end >= vaddr)
+-			return res;
 -
--	/*
--	 * Setup TLB mappings
--	 */
--	sh64_tlb_init();
+-	return NULL;
+-}
 -
--	/*
--	 * Caches are already initialized by the time we get here, so we just
--	 * fill in cpu_data info for the caches.
--	 */
--	sh64_cache_init();
+-static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
+-				      const char *name)
+-{
+-        static int printed_full = 0;
+-        struct xresource *xres;
+-        struct resource *res;
+-        char *tack;
+-        int tlen;
 -
--	platform_setup();
--	platform_monitor();
+-        if (name == NULL) name = "???";
 -
--	sh64_cpu_type_detect();
+-        if ((xres = xres_alloc()) != 0) {
+-                tack = xres->xname;
+-                res = &xres->xres;
+-        } else {
+-                if (!printed_full) {
+-                        printk("%s: done with statics, switching to kmalloc\n",
+-			       __FUNCTION__);
+-                        printed_full = 1;
+-                }
+-                tlen = strlen(name);
+-                tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
+-                if (!tack)
+-			return -ENOMEM;
+-                memset(tack, 0, sizeof(struct resource));
+-                res = (struct resource *) tack;
+-                tack += sizeof (struct resource);
+-        }
 -
--	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+-        strncpy(tack, name, XNMLN);
+-        tack[XNMLN] = 0;
+-        res->name = tack;
 -
--#ifdef CONFIG_BLK_DEV_RAM
--	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
--	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
--	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
--#endif
+-        return shmedia_ioremap(res, phys, size);
+-}
 -
--	if (!MOUNT_ROOT_RDONLY)
--		root_mountflags &= ~MS_RDONLY;
--	init_mm.start_code = (unsigned long) _text;
--	init_mm.end_code = (unsigned long) _etext;
--	init_mm.end_data = (unsigned long) _edata;
--	init_mm.brk = (unsigned long) _end;
+-static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
+-{
+-        unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
+-	unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
+-        unsigned long va;
+-        unsigned int psz;
 -
--	code_resource.start = __pa(_text);
--	code_resource.end = __pa(_etext)-1;
--	data_resource.start = __pa(_etext);
--	data_resource.end = __pa(_edata)-1;
+-        if (allocate_resource(&shmedia_iomap, res, round_sz,
+-			      shmedia_iomap.start, shmedia_iomap.end,
+-			      PAGE_SIZE, NULL, NULL) != 0) {
+-                panic("alloc_io_res(%s): cannot occupy\n",
+-                    (res->name != NULL)? res->name: "???");
+-        }
 -
--	parse_mem_cmdline(cmdline_p);
+-        va = res->start;
+-        pa &= PAGE_MASK;
 -
--	/*
--	 * Find the lowest and highest page frame numbers we have available
--	 */
--	first_pfn = PFN_DOWN(memory_start);
--	last_pfn = PFN_DOWN(memory_end);
--	pages = last_pfn - first_pfn;
+-	psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
 -
--	/*
--	 * Partially used pages are not usable - thus
--	 * we are rounding upwards:
--	 */
--	start_pfn = PFN_UP(__pa(_end));
+-	/* log at boot time ... */
+-	printk("mapioaddr: %6s  [%2d page%s]  va 0x%08lx   pa 0x%08x\n",
+-	       ((res->name != NULL) ? res->name : "???"),
+-	       psz, psz == 1 ? " " : "s", va, pa);
 -
--	/*
--	 * Find a proper area for the bootmem bitmap. After this
--	 * bootstrap step all allocations (until the page allocator
--	 * is intact) must be done via bootmem_alloc().
--	 */
--	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
--					 first_pfn,
--					 last_pfn);
--        /*
--         * Round it up.
--         */
--        bootmap_size = PFN_PHYS(PFN_UP(bootmap_size));
+-        for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
+-                shmedia_mapioaddr(pa, va);
+-                va += PAGE_SIZE;
+-                pa += PAGE_SIZE;
+-        }
 -
--	/*
--	 * Register fully available RAM pages with the bootmem allocator.
--	 */
--	free_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), PFN_PHYS(pages));
+-        res->start += offset;
+-        res->end = res->start + sz - 1;         /* not strictly necessary.. */
 -
--	/*
--	 * Reserve all kernel sections + bootmem bitmap + a guard page.
--	 */
--	reserve_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn),
--		        (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE) - PFN_PHYS(first_pfn));
+-        return res->start;
+-}
 -
--	/*
--	 * Reserve platform dependent sections
--	 */
--	platform_reserve();
+-static void shmedia_free_io(struct resource *res)
+-{
+-	unsigned long len = res->end - res->start + 1;
 -
--#ifdef CONFIG_BLK_DEV_INITRD
--	if (LOADER_TYPE && INITRD_START) {
--		if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
--		        reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
+-	BUG_ON((len & (PAGE_SIZE - 1)) != 0);
 -
--			initrd_start = (long) INITRD_START + PAGE_OFFSET + __MEMORY_START;
--			initrd_end = initrd_start + INITRD_SIZE;
--		} else {
--			printk("initrd extends beyond end of memory "
--			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
--				    (long) INITRD_START + INITRD_SIZE,
--				    PFN_PHYS(last_pfn));
--			initrd_start = 0;
--		}
+-	while (len) {
+-		len -= PAGE_SIZE;
+-		shmedia_unmapioaddr(res->start + len);
 -	}
--#endif
 -
--	/*
--	 * Claim all RAM, ROM, and I/O resources.
--	 */
+-	release_resource(res);
+-}
 -
--	/* Kernel RAM */
--	request_resource(&iomem_resource, &code_resource);
--	request_resource(&iomem_resource, &data_resource);
+-static __init_refok void *sh64_get_page(void)
+-{
+-	extern int after_bootmem;
+-	void *page;
 -
--	/* Other KRAM space */
--	for (i = 0; i < STANDARD_KRAM_RESOURCES - 2; i++)
--		request_resource(&iomem_resource,
--				 &platform_parms.kram_res_p[i]);
+-	if (after_bootmem) {
+-		page = (void *)get_zeroed_page(GFP_ATOMIC);
+-	} else {
+-		page = alloc_bootmem_pages(PAGE_SIZE);
+-	}
 -
--	/* XRAM space */
--	for (i = 0; i < STANDARD_XRAM_RESOURCES; i++)
--		request_resource(&iomem_resource,
--				 &platform_parms.xram_res_p[i]);
+-	if (!page || ((unsigned long)page & ~PAGE_MASK))
+-		panic("sh64_get_page: Out of memory already?\n");
 -
--	/* ROM space */
--	for (i = 0; i < STANDARD_ROM_RESOURCES; i++)
--		request_resource(&iomem_resource,
--				 &platform_parms.rom_res_p[i]);
+-	return page;
+-}
 -
--	/* I/O space */
--	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
--		request_resource(&ioport_resource,
--				 &platform_parms.io_res_p[i]);
+-static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
+-{
+-	pgd_t *pgdp;
+-	pmd_t *pmdp;
+-	pte_t *ptep, pte;
+-	pgprot_t prot;
+-	unsigned long flags = 1; /* 1 = CB0-1 device */
 -
+-	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);
 -
--#ifdef CONFIG_VT
--#if defined(CONFIG_VGA_CONSOLE)
--	conswitchp = &vga_con;
--#elif defined(CONFIG_DUMMY_CONSOLE)
--	conswitchp = &dummy_con;
--#endif
--#endif
+-	pgdp = pgd_offset_k(va);
+-	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
+-		pmdp = (pmd_t *)sh64_get_page();
+-		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
+-	}
 -
--	printk("Hardware FPU: %s\n", fpu_in_use ? "enabled" : "disabled");
+-	pmdp = pmd_offset(pgdp, va);
+-	if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
+-		ptep = (pte_t *)sh64_get_page();
+-		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
+-	}
 -
--	paging_init();
+-	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
+-			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);
+-
+-	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
+-	ptep = pte_offset_kernel(pmdp, va);
+-
+-	if (!pte_none(*ptep) &&
+-	    pte_val(*ptep) != pte_val(pte))
+-		pte_ERROR(*ptep);
+-
+-	set_pte(ptep, pte);
+-
+-	flush_tlb_kernel_range(va, PAGE_SIZE);
 -}
 -
--void __xchg_called_with_bad_pointer(void)
+-static void shmedia_unmapioaddr(unsigned long vaddr)
 -{
--	printk(KERN_EMERG "xchg() called with bad pointer !\n");
--}
+-	pgd_t *pgdp;
+-	pmd_t *pmdp;
+-	pte_t *ptep;
 -
--static struct cpu cpu[1];
+-	pgdp = pgd_offset_k(vaddr);
+-	pmdp = pmd_offset(pgdp, vaddr);
 -
--static int __init topology_init(void)
--{
--	return register_cpu(cpu, 0);
--}
+-	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
+-		return;
 -
--subsys_initcall(topology_init);
+-	ptep = pte_offset_kernel(pmdp, vaddr);
 -
--/*
-- *	Get CPU information
-- */
--static const char *cpu_name[] = {
--	[CPU_SH5_101]	= "SH5-101",
--	[CPU_SH5_103]	= "SH5-103",
--	[CPU_SH_NONE]	= "Unknown",
--};
+-	if (pte_none(*ptep) || !pte_present(*ptep))
+-		return;
 -
--const char *get_cpu_subtype(void)
--{
--	return cpu_name[boot_cpu_data.type];
+-	clear_page((void *)ptep);
+-	pte_clear(&init_mm, vaddr, ptep);
 -}
 -
--#ifdef CONFIG_PROC_FS
--static int show_cpuinfo(struct seq_file *m,void *v)
+-unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
 -{
--	unsigned int cpu = smp_processor_id();
+-	if (size < PAGE_SIZE)
+-		size = PAGE_SIZE;
 -
--	if (!cpu)
--		seq_printf(m, "machine\t\t: %s\n", get_system_type());
+-	return shmedia_alloc_io(phys, size, name);
+-}
 -
--	seq_printf(m, "processor\t: %d\n", cpu);
--	seq_printf(m, "cpu family\t: SH-5\n");
--	seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype());
+-void onchip_unmap(unsigned long vaddr)
+-{
+-	struct resource *res;
+-	unsigned int psz;
 -
--	seq_printf(m, "icache size\t: %dK-bytes\n",
--		   (boot_cpu_data.icache.ways *
--		    boot_cpu_data.icache.sets *
--		    boot_cpu_data.icache.linesz) >> 10);
--	seq_printf(m, "dcache size\t: %dK-bytes\n",
--		   (boot_cpu_data.dcache.ways *
--		    boot_cpu_data.dcache.sets *
--		    boot_cpu_data.dcache.linesz) >> 10);
--	seq_printf(m, "itlb entries\t: %d\n", boot_cpu_data.itlb.entries);
--	seq_printf(m, "dtlb entries\t: %d\n", boot_cpu_data.dtlb.entries);
+-	res = shmedia_find_resource(&shmedia_iomap, vaddr);
+-	if (!res) {
+-		printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
+-		       __FUNCTION__, vaddr);
+-		return;
+-	}
 -
--#define PRINT_CLOCK(name, value) \
--	seq_printf(m, name " clock\t: %d.%02dMHz\n", \
--		     ((value) / 1000000), ((value) % 1000000)/10000)
+-        psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
 -
--	PRINT_CLOCK("cpu", boot_cpu_data.cpu_clock);
--	PRINT_CLOCK("bus", boot_cpu_data.bus_clock);
--	PRINT_CLOCK("module", boot_cpu_data.module_clock);
+-        printk(KERN_DEBUG "unmapioaddr: %6s  [%2d page%s] freed\n",
+-	       res->name, psz, psz == 1 ? " " : "s");
 -
--        seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
--		     (loops_per_jiffy*HZ+2500)/500000,
--		     ((loops_per_jiffy*HZ+2500)/5000) % 100);
+-	shmedia_free_io(res);
 -
--	return 0;
+-	if ((char *)res >= (char *)xresv &&
+-	    (char *)res <  (char *)&xresv[XNRES]) {
+-		xres_free((struct xresource *)res);
+-	} else {
+-		kfree(res);
+-	}
 -}
 -
--static void *c_start(struct seq_file *m, loff_t *pos)
--{
--	return (void*)(*pos == 0);
--}
--static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+-#ifdef CONFIG_PROC_FS
+-static int
+-ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
+-		  void *data)
 -{
--	return NULL;
+-	char *p = buf, *e = buf + length;
+-	struct resource *r;
+-	const char *nm;
+-
+-	for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
+-		if (p + 32 >= e)        /* Better than nothing */
+-			break;
+-		if ((nm = r->name) == 0) nm = "???";
+-		p += sprintf(p, "%08lx-%08lx: %s\n",
+-			     (unsigned long)r->start,
+-			     (unsigned long)r->end, nm);
+-	}
+-
+-	return p-buf;
 -}
--static void c_stop(struct seq_file *m, void *v)
+-#endif /* CONFIG_PROC_FS */
+-
+-static int __init register_proc_onchip(void)
 -{
+-#ifdef CONFIG_PROC_FS
+-	create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
+-#endif
+-	return 0;
 -}
--struct seq_operations cpuinfo_op = {
--	.start	= c_start,
--	.next	= c_next,
--	.stop	= c_stop,
--	.show	= show_cpuinfo,
--};
--#endif /* CONFIG_PROC_FS */
-diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c
+-
+-__initcall(register_proc_onchip);
+diff --git a/arch/sh64/mm/tlb.c b/arch/sh64/mm/tlb.c
 deleted file mode 100644
-index b1705ac..0000000
---- a/arch/sh64/kernel/sh_ksyms.c
+index d517e7d..0000000
+--- a/arch/sh64/mm/tlb.c
 +++ /dev/null
-@@ -1,62 +0,0 @@
+@@ -1,166 +0,0 @@
 -/*
+- * arch/sh64/mm/tlb.c
+- *
+- * Copyright (C) 2003  Paul Mundt <lethal at linux-sh.org>
+- * Copyright (C) 2003  Richard Curnow <richard.curnow at superh.com>
+- *
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/sh_ksyms.c
+- */
+-#include <linux/mm.h>
+-#include <linux/init.h>
+-#include <asm/page.h>
+-#include <asm/tlb.h>
+-#include <asm/mmu_context.h>
+-
+-/**
+- * sh64_tlb_init
 - *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * Perform initial setup for the DTLB and ITLB.
+- */
+-int __init sh64_tlb_init(void)
+-{
+-	/* Assign some sane DTLB defaults */
+-	cpu_data->dtlb.entries	= 64;
+-	cpu_data->dtlb.step	= 0x10;
+-
+-	cpu_data->dtlb.first	= DTLB_FIXED | cpu_data->dtlb.step;
+-	cpu_data->dtlb.next	= cpu_data->dtlb.first;
+-
+-	cpu_data->dtlb.last	= DTLB_FIXED |
+-				  ((cpu_data->dtlb.entries - 1) *
+-				   cpu_data->dtlb.step);
+-
+-	/* And again for the ITLB */
+-	cpu_data->itlb.entries	= 64;
+-	cpu_data->itlb.step	= 0x10;
+-
+-	cpu_data->itlb.first	= ITLB_FIXED | cpu_data->itlb.step;
+-	cpu_data->itlb.next	= cpu_data->itlb.first;
+-	cpu_data->itlb.last	= ITLB_FIXED |
+-				  ((cpu_data->itlb.entries - 1) *
+-				   cpu_data->itlb.step);
+-
+-	return 0;
+-}
+-
+-/**
+- * sh64_next_free_dtlb_entry
+- *
+- * Find the next available DTLB entry
+- */
+-unsigned long long sh64_next_free_dtlb_entry(void)
+-{
+-	return cpu_data->dtlb.next;
+-}
+-
+-/**
+- * sh64_get_wired_dtlb_entry
 - *
+- * Allocate a wired (locked-in) entry in the DTLB
 - */
+-unsigned long long sh64_get_wired_dtlb_entry(void)
+-{
+-	unsigned long long entry = sh64_next_free_dtlb_entry();
 -
--#include <linux/rwsem.h>
--#include <linux/module.h>
--#include <linux/smp.h>
--#include <linux/user.h>
--#include <linux/elfcore.h>
--#include <linux/sched.h>
--#include <linux/in6.h>
--#include <linux/interrupt.h>
--#include <linux/screen_info.h>
+-	cpu_data->dtlb.first += cpu_data->dtlb.step;
+-	cpu_data->dtlb.next  += cpu_data->dtlb.step;
 -
--#include <asm/semaphore.h>
--#include <asm/processor.h>
--#include <asm/uaccess.h>
--#include <asm/checksum.h>
--#include <asm/io.h>
--#include <asm/delay.h>
--#include <asm/irq.h>
+-	return entry;
+-}
 -
--extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
+-/**
+- * sh64_put_wired_dtlb_entry
+- *
+- * @entry:	Address of TLB slot.
+- *
+- * Free a wired (locked-in) entry in the DTLB.
+- *
+- * Works like a stack, last one to allocate must be first one to free.
+- */
+-int sh64_put_wired_dtlb_entry(unsigned long long entry)
+-{
+-	__flush_tlb_slot(entry);
 -
--/* platform dependent support */
--EXPORT_SYMBOL(dump_fpu);
--EXPORT_SYMBOL(kernel_thread);
+-	/*
+-	 * We don't do any particularly useful tracking of wired entries,
+-	 * so this approach works like a stack .. last one to be allocated
+-	 * has to be the first one to be freed.
+-	 *
+-	 * We could potentially load wired entries into a list and work on
+-	 * rebalancing the list periodically (which also entails moving the
+-	 * contents of a TLB entry) .. though I have a feeling that this is
+-	 * more trouble than it's worth.
+-	 */
 -
--/* Networking helper routines. */
--EXPORT_SYMBOL(csum_partial_copy_nocheck);
+-	/*
+-	 * Entry must be valid .. we don't want any ITLB addresses!
+-	 */
+-	if (entry <= DTLB_FIXED)
+-		return -EINVAL;
 -
--#ifdef CONFIG_VT
--EXPORT_SYMBOL(screen_info);
+-	/*
+-	 * Next, check if we're within range to be freed. (ie, must be the
+-	 * entry beneath the first 'free' entry!
+-	 */
+-	if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
+-		return -EINVAL;
+-
+-	/* If we are, then bring this entry back into the list */
+-	cpu_data->dtlb.first	-= cpu_data->dtlb.step;
+-	cpu_data->dtlb.next	= entry;
+-
+-	return 0;
+-}
+-
+-/**
+- * sh64_setup_tlb_slot
+- *
+- * @config_addr:	Address of TLB slot.
+- * @eaddr:		Virtual address.
+- * @asid:		Address Space Identifier.
+- * @paddr:		Physical address.
+- *
+- * Load up a virtual<->physical translation for @eaddr<->@paddr in the
+- * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
+- */
+-inline void sh64_setup_tlb_slot(unsigned long long config_addr,
+-				unsigned long eaddr,
+-				unsigned long asid,
+-				unsigned long paddr)
+-{
+-	unsigned long long pteh, ptel;
+-
+-	/* Sign extension */
+-#if (NEFF == 32)
+-	pteh = (unsigned long long)(signed long long)(signed long) eaddr;
+-#else
+-#error "Can't sign extend more than 32 bits yet"
 -#endif
+-	pteh &= PAGE_MASK;
+-	pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
+-#if (NEFF == 32)
+-	ptel = (unsigned long long)(signed long long)(signed long) paddr;
+-#else
+-#error "Can't sign extend more than 32 bits yet"
+-#endif
+-	ptel &= PAGE_MASK;
+-	ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
 -
--EXPORT_SYMBOL(__down);
--EXPORT_SYMBOL(__down_trylock);
--EXPORT_SYMBOL(__up);
--EXPORT_SYMBOL(__put_user_asm_l);
--EXPORT_SYMBOL(__get_user_asm_l);
--EXPORT_SYMBOL(__copy_user);
--EXPORT_SYMBOL(memcpy);
--EXPORT_SYMBOL(udelay);
--EXPORT_SYMBOL(__udelay);
--EXPORT_SYMBOL(ndelay);
--EXPORT_SYMBOL(__ndelay);
--EXPORT_SYMBOL(flush_dcache_page);
--EXPORT_SYMBOL(sh64_page_clear);
+-	asm volatile("putcfg %0, 1, %1\n\t"
+-			"putcfg %0, 0, %2\n"
+-			: : "r" (config_addr), "r" (ptel), "r" (pteh));
+-}
 -
--/* Ugh.  These come in from libgcc.a at link time. */
--#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
+-/**
+- * sh64_teardown_tlb_slot
+- *
+- * @config_addr:	Address of TLB slot.
+- *
+- * Teardown any existing mapping in the TLB slot @config_addr.
+- */
+-inline void sh64_teardown_tlb_slot(unsigned long long config_addr)
+-	__attribute__ ((alias("__flush_tlb_slot")));
 -
--DECLARE_EXPORT(__sdivsi3);
--DECLARE_EXPORT(__muldi3);
--DECLARE_EXPORT(__udivsi3);
-diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
+diff --git a/arch/sh64/mm/tlbmiss.c b/arch/sh64/mm/tlbmiss.c
 deleted file mode 100644
-index 79fc48c..0000000
---- a/arch/sh64/kernel/signal.c
+index b767d6c..0000000
+--- a/arch/sh64/mm/tlbmiss.c
 +++ /dev/null
-@@ -1,750 +0,0 @@
+@@ -1,279 +0,0 @@
 -/*
 - * This file is subject to the terms and conditions of the GNU General Public
 - * License.  See the file "COPYING" in the main directory of this archive
 - * for more details.
 - *
-- * arch/sh64/kernel/signal.c
+- * arch/sh64/mm/tlbmiss.c
 - *
+- * Original code from fault.c
 - * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003  Paul Mundt
-- * Copyright (C) 2004  Richard Curnow
 - *
-- * Started from sh version.
+- * Fast PTE->TLB refill path
+- * Copyright (C) 2003 Richard.Curnow at superh.com
+- *
+- * IMPORTANT NOTES :
+- * The do_fast_page_fault function is called from a context in entry.S where very few registers
+- * have been saved.  In particular, the code in this file must be compiled not to use ANY
+- * caller-save registers that are not part of the restricted save set.  Also, it means that
+- * code in this file must not make calls to functions elsewhere in the kernel, or else the
+- * excepting context will see corruption in its caller-save registers.  Plus, the entry.S save
+- * area is non-reentrant, so this code has to run with SR.BL==1, i.e. no interrupts taken inside
+- * it and panic on any exception.
 - *
 - */
--#include <linux/rwsem.h>
+-
+-#include <linux/signal.h>
 -#include <linux/sched.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
 -#include <linux/kernel.h>
--#include <linux/signal.h>
 -#include <linux/errno.h>
--#include <linux/wait.h>
--#include <linux/personality.h>
--#include <linux/freezer.h>
+-#include <linux/string.h>
+-#include <linux/types.h>
 -#include <linux/ptrace.h>
--#include <linux/unistd.h>
--#include <linux/stddef.h>
--#include <asm/ucontext.h>
+-#include <linux/mman.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/interrupt.h>
+-
+-#include <asm/system.h>
+-#include <asm/tlb.h>
+-#include <asm/io.h>
 -#include <asm/uaccess.h>
--#include <asm/pgtable.h>
+-#include <asm/pgalloc.h>
+-#include <asm/mmu_context.h>
+-#include <asm/registers.h>		/* required by inline asm statements */
 -
+-/* Callable from fault.c, so not static */
+-inline void __do_tlb_refill(unsigned long address,
+-                            unsigned long long is_text_not_data, pte_t *pte)
+-{
+-	unsigned long long ptel;
+-	unsigned long long pteh=0;
+-	struct tlb_info *tlbp;
+-	unsigned long long next;
 -
--#define REG_RET 9
--#define REG_ARG1 2
--#define REG_ARG2 3
--#define REG_ARG3 4
--#define REG_SP 15
--#define REG_PR 18
--#define REF_REG_RET regs->regs[REG_RET]
--#define REF_REG_SP regs->regs[REG_SP]
--#define DEREF_REG_PR regs->regs[REG_PR]
+-	/* Get PTEL first */
+-	ptel = pte_val(*pte);
 -
--#define DEBUG_SIG 0
+-	/*
+-	 * Set PTEH register
+-	 */
+-	pteh = address & MMU_VPN_MASK;
 -
--#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+-	/* Sign extend based on neff. */
+-#if (NEFF == 32)
+-	/* Faster sign extension */
+-	pteh = (unsigned long long)(signed long long)(signed long)pteh;
+-#else
+-	/* General case */
+-	pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
+-#endif
 -
--asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+-	/* Set the ASID. */
+-	pteh |= get_asid() << PTEH_ASID_SHIFT;
+-	pteh |= PTEH_VALID;
 -
--/*
-- * Atomically swap in the new signal mask, and wait for a signal.
-- */
+-	/* Set PTEL register, set_pte has performed the sign extension */
+-	ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
 -
--asmlinkage int
--sys_sigsuspend(old_sigset_t mask,
--	       unsigned long r3, unsigned long r4, unsigned long r5,
--	       unsigned long r6, unsigned long r7,
--	       struct pt_regs * regs)
+-	tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
+-	next = tlbp->next;
+-	__flush_tlb_slot(next);
+-	asm volatile ("putcfg %0,1,%2\n\n\t"
+-		      "putcfg %0,0,%1\n"
+-		      :  : "r" (next), "r" (pteh), "r" (ptel) );
+-
+-	next += TLB_STEP;
+-	if (next > tlbp->last) next = tlbp->first;
+-	tlbp->next = next;
+-
+-}
+-
+-static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags,
+-                                unsigned long long textaccess,
+-				unsigned long address)
 -{
--	sigset_t saveset;
+-	pgd_t *dir;
+-	pmd_t *pmd;
+-	static pte_t *pte;
+-	pte_t entry;
 -
--	mask &= _BLOCKABLE;
--	spin_lock_irq(&current->sighand->siglock);
--	saveset = current->blocked;
--	siginitset(&current->blocked, mask);
--	recalc_sigpending();
--	spin_unlock_irq(&current->sighand->siglock);
+-	dir = pgd_offset_k(address);
+-	pmd = pmd_offset(dir, address);
 -
--	REF_REG_RET = -EINTR;
--	while (1) {
--		current->state = TASK_INTERRUPTIBLE;
--		schedule();
--		regs->pc += 4;    /* because sys_sigreturn decrements the pc */
--		if (do_signal(regs, &saveset)) {
--			/* pc now points at signal handler. Need to decrement
--			   it because entry.S will increment it. */
--			regs->pc -= 4;
--			return -EINTR;
--		}
+-	if (pmd_none(*pmd)) {
+-		return 0;
 -	}
--}
 -
--asmlinkage int
--sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
--	          unsigned long r4, unsigned long r5, unsigned long r6,
--	          unsigned long r7,
--	          struct pt_regs * regs)
--{
--	sigset_t saveset, newset;
+-	if (pmd_bad(*pmd)) {
+-		pmd_clear(pmd);
+-		return 0;
+-	}
 -
--	/* XXX: Don't preclude handling different sized sigset_t's.  */
--	if (sigsetsize != sizeof(sigset_t))
--		return -EINVAL;
+-	pte = pte_offset_kernel(pmd, address);
+-	entry = *pte;
 -
--	if (copy_from_user(&newset, unewset, sizeof(newset)))
--		return -EFAULT;
--	sigdelsetmask(&newset, ~_BLOCKABLE);
--	spin_lock_irq(&current->sighand->siglock);
--	saveset = current->blocked;
--	current->blocked = newset;
--	recalc_sigpending();
--	spin_unlock_irq(&current->sighand->siglock);
+-	if (pte_none(entry) || !pte_present(entry)) {
+-		return 0;
+-	}
 -
--	REF_REG_RET = -EINTR;
--	while (1) {
--		current->state = TASK_INTERRUPTIBLE;
--		schedule();
--		regs->pc += 4;    /* because sys_sigreturn decrements the pc */
--		if (do_signal(regs, &saveset)) {
--			/* pc now points at signal handler. Need to decrement
--			   it because entry.S will increment it. */
--			regs->pc -= 4;
--			return -EINTR;
--		}
+-	if ((pte_val(entry) & protection_flags) != protection_flags) {
+-		return 0;
 -	}
+-
+-        __do_tlb_refill(address, textaccess, pte);
+-
+-	return 1;
 -}
 -
--asmlinkage int
--sys_sigaction(int sig, const struct old_sigaction __user *act,
--	      struct old_sigaction __user *oact)
+-static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags,
+-			unsigned long long textaccess,
+-			unsigned long address)
 -{
--	struct k_sigaction new_ka, old_ka;
--	int ret;
+-	pgd_t *dir;
+-	pmd_t *pmd;
+-	pte_t *pte;
+-	pte_t entry;
 -
--	if (act) {
--		old_sigset_t mask;
--		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
--		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
--		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
--			return -EFAULT;
--		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
--		__get_user(mask, &act->sa_mask);
--		siginitset(&new_ka.sa.sa_mask, mask);
+-	/* NB. The PGD currently only contains a single entry - there is no
+-	   page table tree stored for the top half of the address space since
+-	   virtual pages in that region should never be mapped in user mode.
+-	   (In kernel mode, the only things in that region are the 512Mb super
+-	   page (locked in), and vmalloc (modules) +  I/O device pages (handled
+-	   by handle_vmalloc_fault), so no PGD for the upper half is required
+-	   by kernel mode either).
+-
+-	   See how mm->pgd is allocated and initialised in pgd_alloc to see why
+-	   the next test is necessary.  - RPC */
+-	if (address >= (unsigned long) TASK_SIZE) {
+-		/* upper half - never has page table entries. */
+-		return 0;
+-	}
+-	dir = pgd_offset(mm, address);
+-	if (pgd_none(*dir)) {
+-		return 0;
+-	}
+-	if (!pgd_present(*dir)) {
+-		return 0;
 -	}
 -
--	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+-	pmd = pmd_offset(dir, address);
+-	if (pmd_none(*pmd)) {
+-		return 0;
+-	}
+-	if (!pmd_present(*pmd)) {
+-		return 0;
+-	}
+-	pte = pte_offset_kernel(pmd, address);
+-	entry = *pte;
+-	if (pte_none(entry)) {
+-		return 0;
+-	}
+-	if (!pte_present(entry)) {
+-		return 0;
+-	}
 -
--	if (!ret && oact) {
--		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
--		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
--		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
--			return -EFAULT;
--		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
--		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+-	/* If the page doesn't have sufficient protection bits set to service the
+-	   kind of fault being handled, there's not much point doing the TLB refill.
+-	   Punt the fault to the general handler. */
+-	if ((pte_val(entry) & protection_flags) != protection_flags) {
+-		return 0;
 -	}
 -
--	return ret;
--}
+-        __do_tlb_refill(address, textaccess, pte);
 -
--asmlinkage int
--sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
--	        unsigned long r4, unsigned long r5, unsigned long r6,
--	        unsigned long r7,
--	        struct pt_regs * regs)
--{
--	return do_sigaltstack(uss, uoss, REF_REG_SP);
+-	return 1;
 -}
 -
+-/* Put all this information into one structure so that everything is just arithmetic
+-   relative to a single base address.  This reduces the number of movi/shori pairs needed
+-   just to load addresses of static data. */
+-struct expevt_lookup {
+-	unsigned short protection_flags[8];
+-	unsigned char  is_text_access[8];
+-	unsigned char  is_write_access[8];
+-};
 -
--/*
-- * Do a signal return; undo the signal stack.
-- */
+-#define PRU (1<<9)
+-#define PRW (1<<8)
+-#define PRX (1<<7)
+-#define PRR (1<<6)
 -
--struct sigframe
--{
--	struct sigcontext sc;
--	unsigned long extramask[_NSIG_WORDS-1];
--	long long retcode[2];
--};
+-#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
+-#define YOUNG (_PAGE_ACCESSED)
 -
--struct rt_sigframe
--{
--	struct siginfo __user *pinfo;
--	void *puc;
--	struct siginfo info;
--	struct ucontext uc;
--	long long retcode[2];
+-/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
+-   the fault happened in user mode or privileged mode. */
+-static struct expevt_lookup expevt_lookup_table = {
+-	.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
+-	.is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
 -};
 -
--#ifdef CONFIG_SH_FPU
--static inline int
--restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+-/*
+-   This routine handles page faults that can be serviced just by refilling a
+-   TLB entry from an existing page table entry.  (This case represents a very
+-   large majority of page faults.) Return 1 if the fault was successfully
+-   handled.  Return 0 if the fault could not be handled.  (This leads into the
+-   general fault handling in fault.c which deals with mapping file-backed
+-   pages, stack growth, segmentation faults, swapping etc etc)
+- */
+-asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
+-			          unsigned long address)
 -{
--	int err = 0;
--	int fpvalid;
+-	struct task_struct *tsk;
+-	struct mm_struct *mm;
+-	unsigned long long textaccess;
+-	unsigned long long protection_flags;
+-	unsigned long long index;
+-	unsigned long long expevt4;
 -
--	err |= __get_user (fpvalid, &sc->sc_fpvalid);
--	conditional_used_math(fpvalid);
--	if (! fpvalid)
--		return err;
+-	/* The next few lines implement a way of hashing EXPEVT into a small array index
+-	   which can be used to lookup parameters specific to the type of TLBMISS being
+-	   handled.  Note:
+-	   ITLBMISS has EXPEVT==0xa40
+-	   RTLBMISS has EXPEVT==0x040
+-	   WTLBMISS has EXPEVT==0x060
+-	*/
 -
--	if (current == last_task_used_math) {
--		last_task_used_math = NULL;
--		regs->sr |= SR_FD;
--	}
+-	expevt4 = (expevt >> 4);
+-	/* TODO : xor ssr_md into this expression too.  Then we can check that PRU is set
+-	   when it needs to be. */
+-	index = expevt4 ^ (expevt4 >> 5);
+-	index &= 7;
+-	protection_flags = expevt_lookup_table.protection_flags[index];
+-	textaccess       = expevt_lookup_table.is_text_access[index];
 -
--	err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
--				(sizeof(long long) * 32) + (sizeof(int) * 1));
+-#ifdef CONFIG_SH64_PROC_TLB
+-	++calls_to_do_fast_page_fault;
+-#endif
 -
--	return err;
+-	/* SIM
+-	 * Note this is now called with interrupts still disabled
+-	 * This is to cope with being called for a missing IO port
+-	 * address with interrupts disabled. This should be fixed as
+-	 * soon as we have a better 'fast path' miss handler.
+-	 *
+-	 * Plus take care how you try and debug this stuff.
+-	 * For example, writing debug data to a port which you
+-	 * have just faulted on is not going to work.
+-	 */
+-
+-	tsk = current;
+-	mm = tsk->mm;
+-
+-	if ((address >= VMALLOC_START && address < VMALLOC_END) ||
+-	    (address >= IOBASE_VADDR  && address < IOBASE_END)) {
+-		if (ssr_md) {
+-			/* Process-contexts can never have this address range mapped */
+-			if (handle_vmalloc_fault(mm, protection_flags, textaccess, address)) {
+-				return 1;
+-			}
+-		}
+-	} else if (!in_interrupt() && mm) {
+-		if (handle_tlbmiss(mm, protection_flags, textaccess, address)) {
+-			return 1;
+-		}
+-	}
+-
+-	return 0;
 -}
 -
--static inline int
--setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
--{
--	int err = 0;
--	int fpvalid;
+diff --git a/arch/sh64/oprofile/Makefile b/arch/sh64/oprofile/Makefile
+deleted file mode 100644
+index 11a451f..0000000
+--- a/arch/sh64/oprofile/Makefile
++++ /dev/null
+@@ -1,12 +0,0 @@
+-obj-$(CONFIG_OPROFILE) += oprofile.o
 -
--	fpvalid = !!used_math();
--	err |= __put_user(fpvalid, &sc->sc_fpvalid);
--	if (! fpvalid)
--		return err;
+-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+-		oprof.o cpu_buffer.o buffer_sync.o \
+-		event_buffer.o oprofile_files.o \
+-		oprofilefs.o oprofile_stats.o \
+-		timer_int.o )
 -
--	if (current == last_task_used_math) {
--		grab_fpu();
--		fpsave(&current->thread.fpu.hard);
--		release_fpu();
--		last_task_used_math = NULL;
--		regs->sr |= SR_FD;
--	}
+-profdrvr-y				:= op_model_null.o
 -
--	err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
--			      (sizeof(long long) * 32) + (sizeof(int) * 1));
--	clear_used_math();
+-oprofile-y				:= $(DRIVER_OBJS) $(profdrvr-y)
 -
--	return err;
+diff --git a/arch/sh64/oprofile/op_model_null.c b/arch/sh64/oprofile/op_model_null.c
+deleted file mode 100644
+index a750ea1..0000000
+--- a/arch/sh64/oprofile/op_model_null.c
++++ /dev/null
+@@ -1,23 +0,0 @@
+-/*
+- * arch/sh64/oprofile/op_model_null.c
+- *
+- * Copyright (C) 2003  Paul Mundt
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-#include <linux/kernel.h>
+-#include <linux/oprofile.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-
+-int __init oprofile_arch_init(struct oprofile_operations *ops)
+-{
+-	return -ENODEV;
 -}
--#else
--static inline int
--restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
--{}
--static inline int
--setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
--{}
--#endif
 -
--static int
--restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
+-void oprofile_arch_exit(void)
 -{
--	unsigned int err = 0;
--        unsigned long long current_sr, new_sr;
--#define SR_MASK 0xffff8cfd
+-}
 -
--#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
+index a8b4200..216147d 100644
+--- a/arch/sparc/kernel/vmlinux.lds.S
++++ b/arch/sparc/kernel/vmlinux.lds.S
+@@ -48,12 +48,12 @@ SECTIONS
+ 	__init_begin = .;
+ 	.init.text : {
+ 		_sinittext = .;
+-		*(.init.text)
++		INIT_TEXT
+ 		_einittext = .;
+ 	}
+ 	__init_text_end = .;
+ 	.init.data : {
+-		*(.init.data)
++		INIT_DATA
+ 	}
+ 	. = ALIGN(16);
+ 	.init.setup : {
+@@ -102,8 +102,8 @@ SECTIONS
+ 	_end = . ;
+ 	PROVIDE (end = .);
+ 	/DISCARD/ : {
+-		*(.exit.text)
+-		*(.exit.data)
++		EXIT_TEXT
++		EXIT_DATA
+ 		*(.exitcall.exit)
+ 	}
+ 
+diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
+index 10b212a..73fc05d 100644
+--- a/arch/sparc64/Kconfig
++++ b/arch/sparc64/Kconfig
+@@ -66,6 +66,9 @@ config AUDIT_ARCH
+ 	bool
+ 	default y
+ 
++config HAVE_SETUP_PER_CPU_AREA
++	def_bool y
++
+ config ARCH_NO_VIRT_TO_BUS
+ 	def_bool y
+ 
+@@ -200,6 +203,11 @@ config US2E_FREQ
+ 	  If in doubt, say N.
+ 
+ # Global things across all Sun machines.
++config GENERIC_LOCKBREAK
++	bool
++	default y
++	depends on SMP && PREEMPT
++
+ config RWSEM_GENERIC_SPINLOCK
+ 	bool
+ 
+diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
+index 953be81..dc7bf1b 100644
+--- a/arch/sparc64/kernel/unaligned.c
++++ b/arch/sparc64/kernel/unaligned.c
+@@ -175,7 +175,7 @@ unsigned long compute_effective_address(struct pt_regs *regs,
+ }
+ 
+ /* This is just to make gcc think die_if_kernel does return... */
+-static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
++static void __used unaligned_panic(char *str, struct pt_regs *regs)
+ {
+ 	die_if_kernel(str, regs);
+ }
+diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
+index 9fcd503..01f8096 100644
+--- a/arch/sparc64/kernel/vmlinux.lds.S
++++ b/arch/sparc64/kernel/vmlinux.lds.S
+@@ -56,11 +56,11 @@ SECTIONS
+ 	.init.text : {
+ 		__init_begin = .;
+ 		_sinittext = .;
+-		*(.init.text)
++		INIT_TEXT
+ 		_einittext = .;
+ 	}
+ 	.init.data : {
+-		*(.init.data)
++		INIT_DATA
+ 	}
+ 	. = ALIGN(16);
+ 	.init.setup : {
+@@ -137,8 +137,8 @@ SECTIONS
+ 	PROVIDE (end = .);
+ 
+ 	/DISCARD/ : {
+-		*(.exit.text)
+-		*(.exit.data)
++		EXIT_TEXT
++		EXIT_DATA
+ 		*(.exitcall.exit)
+ 	}
+ 
+diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
+index fbeb55d..523e993 100644
+--- a/arch/sparc64/mm/init.c
++++ b/arch/sparc64/mm/init.c
+@@ -1328,6 +1328,11 @@ pgd_t swapper_pg_dir[2048];
+ static void sun4u_pgprot_init(void);
+ static void sun4v_pgprot_init(void);
+ 
++/* Dummy function */
++void __init setup_per_cpu_areas(void)
++{
++}
++
+ void __init paging_init(void)
+ {
+ 	unsigned long end_pfn, pages_avail, shift, phys_base;
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index b1a77b1..99f9f96 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -475,17 +475,9 @@ static void do_ubd_request(struct request_queue * q);
+ /* Only changed by ubd_init, which is an initcall. */
+ int thread_fd = -1;
+ 
+-static void ubd_end_request(struct request *req, int bytes, int uptodate)
++static void ubd_end_request(struct request *req, int bytes, int error)
+ {
+-	if (!end_that_request_first(req, uptodate, bytes >> 9)) {
+-		struct ubd *dev = req->rq_disk->private_data;
+-		unsigned long flags;
 -
--	COPY(regs[0]);	COPY(regs[1]);	COPY(regs[2]);	COPY(regs[3]);
--	COPY(regs[4]);	COPY(regs[5]);	COPY(regs[6]);	COPY(regs[7]);
--	COPY(regs[8]);	COPY(regs[9]);  COPY(regs[10]);	COPY(regs[11]);
--	COPY(regs[12]);	COPY(regs[13]);	COPY(regs[14]);	COPY(regs[15]);
--	COPY(regs[16]);	COPY(regs[17]);	COPY(regs[18]);	COPY(regs[19]);
--	COPY(regs[20]);	COPY(regs[21]);	COPY(regs[22]);	COPY(regs[23]);
--	COPY(regs[24]);	COPY(regs[25]);	COPY(regs[26]);	COPY(regs[27]);
--	COPY(regs[28]);	COPY(regs[29]);	COPY(regs[30]);	COPY(regs[31]);
--	COPY(regs[32]);	COPY(regs[33]);	COPY(regs[34]);	COPY(regs[35]);
--	COPY(regs[36]);	COPY(regs[37]);	COPY(regs[38]);	COPY(regs[39]);
--	COPY(regs[40]);	COPY(regs[41]);	COPY(regs[42]);	COPY(regs[43]);
--	COPY(regs[44]);	COPY(regs[45]);	COPY(regs[46]);	COPY(regs[47]);
--	COPY(regs[48]);	COPY(regs[49]);	COPY(regs[50]);	COPY(regs[51]);
--	COPY(regs[52]);	COPY(regs[53]);	COPY(regs[54]);	COPY(regs[55]);
--	COPY(regs[56]);	COPY(regs[57]);	COPY(regs[58]);	COPY(regs[59]);
--	COPY(regs[60]);	COPY(regs[61]);	COPY(regs[62]);
--	COPY(tregs[0]);	COPY(tregs[1]);	COPY(tregs[2]);	COPY(tregs[3]);
--	COPY(tregs[4]);	COPY(tregs[5]);	COPY(tregs[6]);	COPY(tregs[7]);
+-		add_disk_randomness(req->rq_disk);
+-		spin_lock_irqsave(&dev->lock, flags);
+-		end_that_request_last(req, uptodate);
+-		spin_unlock_irqrestore(&dev->lock, flags);
+-	}
++	blk_end_request(req, error, bytes);
+ }
+ 
+ /* Callable only from interrupt context - otherwise you need to do
+@@ -493,10 +485,10 @@ static void ubd_end_request(struct request *req, int bytes, int uptodate)
+ static inline void ubd_finish(struct request *req, int bytes)
+ {
+ 	if(bytes < 0){
+-		ubd_end_request(req, 0, 0);
++		ubd_end_request(req, 0, -EIO);
+ 		return;
+ 	}
+-	ubd_end_request(req, bytes, 1);
++	ubd_end_request(req, bytes, 0);
+ }
+ 
+ static LIST_HEAD(restart);
+diff --git a/arch/um/include/init.h b/arch/um/include/init.h
+index d4de7c0..cebc6ca 100644
+--- a/arch/um/include/init.h
++++ b/arch/um/include/init.h
+@@ -42,15 +42,15 @@ typedef void (*exitcall_t)(void);
+ 
+ /* These are for everybody (although not all archs will actually
+    discard it in modules) */
+-#define __init		__attribute__ ((__section__ (".init.text")))
+-#define __initdata	__attribute__ ((__section__ (".init.data")))
+-#define __exitdata	__attribute__ ((__section__(".exit.data")))
+-#define __exit_call	__attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
++#define __init		__section(.init.text)
++#define __initdata	__section(.init.data)
++#define __exitdata	__section(.exit.data)
++#define __exit_call	__used __section(.exitcall.exit)
+ 
+ #ifdef MODULE
+-#define __exit		__attribute__ ((__section__(".exit.text")))
++#define __exit		__section(.exit.text)
+ #else
+-#define __exit		__attribute_used__ __attribute__ ((__section__(".exit.text")))
++#define __exit		__used __section(.exit.text)
+ #endif
+ 
+ #endif
+@@ -103,16 +103,16 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
+  * Mark functions and data as being only used at initialization
+  * or exit time.
+  */
+-#define __uml_init_setup	__attribute_used__ __attribute__ ((__section__ (".uml.setup.init")))
+-#define __uml_setup_help	__attribute_used__ __attribute__ ((__section__ (".uml.help.init")))
+-#define __uml_init_call		__attribute_used__ __attribute__ ((__section__ (".uml.initcall.init")))
+-#define __uml_postsetup_call	__attribute_used__ __attribute__ ((__section__ (".uml.postsetup.init")))
+-#define __uml_exit_call		__attribute_used__ __attribute__ ((__section__ (".uml.exitcall.exit")))
++#define __uml_init_setup	__used __section(.uml.setup.init)
++#define __uml_setup_help	__used __section(.uml.help.init)
++#define __uml_init_call		__used __section(.uml.initcall.init)
++#define __uml_postsetup_call	__used __section(.uml.postsetup.init)
++#define __uml_exit_call		__used __section(.uml.exitcall.exit)
+ 
+ #ifndef __KERNEL__
+ 
+ #define __define_initcall(level,fn) \
+-	static initcall_t __initcall_##fn __attribute_used__ \
++	static initcall_t __initcall_##fn __used \
+ 	__attribute__((__section__(".initcall" level ".init"))) = fn
+ 
+ /* Userspace initcalls shouldn't depend on anything in the kernel, so we'll
+@@ -122,7 +122,7 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
+ 
+ #define __exitcall(fn) static exitcall_t __exitcall_##fn __exit_call = fn
+ 
+-#define __init_call	__attribute_used__ __attribute__ ((__section__ (".initcall.init")))
++#define __init_call	__used __section(.initcall.init)
+ 
+ #endif
+ 
+diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
+index 3866f49..26090b7 100644
+--- a/arch/um/kernel/dyn.lds.S
++++ b/arch/um/kernel/dyn.lds.S
+@@ -17,7 +17,7 @@ SECTIONS
+   __init_begin = .;
+   .init.text : {
+ 	_sinittext = .;
+-	*(.init.text)
++	INIT_TEXT
+ 	_einittext = .;
+   }
+ 
+@@ -84,7 +84,7 @@ SECTIONS
+ 
+   #include "asm/common.lds.S"
+ 
+-  init.data : { *(.init.data) }
++  init.data : { INIT_DATA }
+ 
+   /* Ensure the __preinit_array_start label is properly aligned.  We
+      could instead move the label definition inside the section, but
+diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
+index 1b388b4..7c7142b 100644
+--- a/arch/um/kernel/ksyms.c
++++ b/arch/um/kernel/ksyms.c
+@@ -71,10 +71,10 @@ EXPORT_SYMBOL(dump_thread);
+ 
+ /* required for SMP */
+ 
+-extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
++extern void __write_lock_failed(rwlock_t *rw);
+ EXPORT_SYMBOL(__write_lock_failed);
+ 
+-extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
++extern void __read_lock_failed(rwlock_t *rw);
+ EXPORT_SYMBOL(__read_lock_failed);
+ 
+ #endif
+diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
+index 13df191..5828c1d 100644
+--- a/arch/um/kernel/uml.lds.S
++++ b/arch/um/kernel/uml.lds.S
+@@ -23,7 +23,7 @@ SECTIONS
+   __init_begin = .;
+   .init.text : {
+ 	_sinittext = .;
+-	*(.init.text)
++	INIT_TEXT
+ 	_einittext = .;
+   }
+   . = ALIGN(4096);
+@@ -48,7 +48,7 @@ SECTIONS
+ 
+   #include "asm/common.lds.S"
+ 
+-  init.data : { *(init.data) }
++  init.data : { INIT_DATA }
+   .data    :
+   {
+     . = ALIGN(KERNEL_STACK_SIZE);		/* init_task */
+diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
+index 0147227..19053d4 100644
+--- a/arch/um/sys-i386/signal.c
++++ b/arch/um/sys-i386/signal.c
+@@ -3,10 +3,10 @@
+  * Licensed under the GPL
+  */
+ 
+-#include "linux/ptrace.h"
+-#include "asm/unistd.h"
+-#include "asm/uaccess.h"
+-#include "asm/ucontext.h"
++#include <linux/ptrace.h>
++#include <asm/unistd.h>
++#include <asm/uaccess.h>
++#include <asm/ucontext.h>
+ #include "frame_kern.h"
+ #include "skas.h"
+ 
+@@ -18,17 +18,17 @@ void copy_sc(struct uml_pt_regs *regs, void *from)
+ 	REGS_FS(regs->gp) = sc->fs;
+ 	REGS_ES(regs->gp) = sc->es;
+ 	REGS_DS(regs->gp) = sc->ds;
+-	REGS_EDI(regs->gp) = sc->edi;
+-	REGS_ESI(regs->gp) = sc->esi;
+-	REGS_EBP(regs->gp) = sc->ebp;
+-	REGS_SP(regs->gp) = sc->esp;
+-	REGS_EBX(regs->gp) = sc->ebx;
+-	REGS_EDX(regs->gp) = sc->edx;
+-	REGS_ECX(regs->gp) = sc->ecx;
+-	REGS_EAX(regs->gp) = sc->eax;
+-	REGS_IP(regs->gp) = sc->eip;
++	REGS_EDI(regs->gp) = sc->di;
++	REGS_ESI(regs->gp) = sc->si;
++	REGS_EBP(regs->gp) = sc->bp;
++	REGS_SP(regs->gp) = sc->sp;
++	REGS_EBX(regs->gp) = sc->bx;
++	REGS_EDX(regs->gp) = sc->dx;
++	REGS_ECX(regs->gp) = sc->cx;
++	REGS_EAX(regs->gp) = sc->ax;
++	REGS_IP(regs->gp) = sc->ip;
+ 	REGS_CS(regs->gp) = sc->cs;
+-	REGS_EFLAGS(regs->gp) = sc->eflags;
++	REGS_EFLAGS(regs->gp) = sc->flags;
+ 	REGS_SS(regs->gp) = sc->ss;
+ }
+ 
+@@ -229,18 +229,18 @@ static int copy_sc_to_user(struct sigcontext __user *to,
+ 	sc.fs = REGS_FS(regs->regs.gp);
+ 	sc.es = REGS_ES(regs->regs.gp);
+ 	sc.ds = REGS_DS(regs->regs.gp);
+-	sc.edi = REGS_EDI(regs->regs.gp);
+-	sc.esi = REGS_ESI(regs->regs.gp);
+-	sc.ebp = REGS_EBP(regs->regs.gp);
+-	sc.esp = sp;
+-	sc.ebx = REGS_EBX(regs->regs.gp);
+-	sc.edx = REGS_EDX(regs->regs.gp);
+-	sc.ecx = REGS_ECX(regs->regs.gp);
+-	sc.eax = REGS_EAX(regs->regs.gp);
+-	sc.eip = REGS_IP(regs->regs.gp);
++	sc.di = REGS_EDI(regs->regs.gp);
++	sc.si = REGS_ESI(regs->regs.gp);
++	sc.bp = REGS_EBP(regs->regs.gp);
++	sc.sp = sp;
++	sc.bx = REGS_EBX(regs->regs.gp);
++	sc.dx = REGS_EDX(regs->regs.gp);
++	sc.cx = REGS_ECX(regs->regs.gp);
++	sc.ax = REGS_EAX(regs->regs.gp);
++	sc.ip = REGS_IP(regs->regs.gp);
+ 	sc.cs = REGS_CS(regs->regs.gp);
+-	sc.eflags = REGS_EFLAGS(regs->regs.gp);
+-	sc.esp_at_signal = regs->regs.gp[UESP];
++	sc.flags = REGS_EFLAGS(regs->regs.gp);
++	sc.sp_at_signal = regs->regs.gp[UESP];
+ 	sc.ss = regs->regs.gp[SS];
+ 	sc.cr2 = fi->cr2;
+ 	sc.err = fi->error_code;
+diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
+index 1778d33..7457436 100644
+--- a/arch/um/sys-x86_64/signal.c
++++ b/arch/um/sys-x86_64/signal.c
+@@ -4,11 +4,11 @@
+  * Licensed under the GPL
+  */
+ 
+-#include "linux/personality.h"
+-#include "linux/ptrace.h"
+-#include "asm/unistd.h"
+-#include "asm/uaccess.h"
+-#include "asm/ucontext.h"
++#include <linux/personality.h>
++#include <linux/ptrace.h>
++#include <asm/unistd.h>
++#include <asm/uaccess.h>
++#include <asm/ucontext.h>
+ #include "frame_kern.h"
+ #include "skas.h"
+ 
+@@ -27,16 +27,16 @@ void copy_sc(struct uml_pt_regs *regs, void *from)
+ 	GETREG(regs, R13, sc, r13);
+ 	GETREG(regs, R14, sc, r14);
+ 	GETREG(regs, R15, sc, r15);
+-	GETREG(regs, RDI, sc, rdi);
+-	GETREG(regs, RSI, sc, rsi);
+-	GETREG(regs, RBP, sc, rbp);
+-	GETREG(regs, RBX, sc, rbx);
+-	GETREG(regs, RDX, sc, rdx);
+-	GETREG(regs, RAX, sc, rax);
+-	GETREG(regs, RCX, sc, rcx);
+-	GETREG(regs, RSP, sc, rsp);
+-	GETREG(regs, RIP, sc, rip);
+-	GETREG(regs, EFLAGS, sc, eflags);
++	GETREG(regs, RDI, sc, di);
++	GETREG(regs, RSI, sc, si);
++	GETREG(regs, RBP, sc, bp);
++	GETREG(regs, RBX, sc, bx);
++	GETREG(regs, RDX, sc, dx);
++	GETREG(regs, RAX, sc, ax);
++	GETREG(regs, RCX, sc, cx);
++	GETREG(regs, RSP, sc, sp);
++	GETREG(regs, RIP, sc, ip);
++	GETREG(regs, EFLAGS, sc, flags);
+ 	GETREG(regs, CS, sc, cs);
+ 
+ #undef GETREG
+@@ -61,16 +61,16 @@ static int copy_sc_from_user(struct pt_regs *regs,
+ 	err |= GETREG(regs, R13, from, r13);
+ 	err |= GETREG(regs, R14, from, r14);
+ 	err |= GETREG(regs, R15, from, r15);
+-	err |= GETREG(regs, RDI, from, rdi);
+-	err |= GETREG(regs, RSI, from, rsi);
+-	err |= GETREG(regs, RBP, from, rbp);
+-	err |= GETREG(regs, RBX, from, rbx);
+-	err |= GETREG(regs, RDX, from, rdx);
+-	err |= GETREG(regs, RAX, from, rax);
+-	err |= GETREG(regs, RCX, from, rcx);
+-	err |= GETREG(regs, RSP, from, rsp);
+-	err |= GETREG(regs, RIP, from, rip);
+-	err |= GETREG(regs, EFLAGS, from, eflags);
++	err |= GETREG(regs, RDI, from, di);
++	err |= GETREG(regs, RSI, from, si);
++	err |= GETREG(regs, RBP, from, bp);
++	err |= GETREG(regs, RBX, from, bx);
++	err |= GETREG(regs, RDX, from, dx);
++	err |= GETREG(regs, RAX, from, ax);
++	err |= GETREG(regs, RCX, from, cx);
++	err |= GETREG(regs, RSP, from, sp);
++	err |= GETREG(regs, RIP, from, ip);
++	err |= GETREG(regs, EFLAGS, from, flags);
+ 	err |= GETREG(regs, CS, from, cs);
+ 	if (err)
+ 		return 1;
+@@ -108,19 +108,19 @@ static int copy_sc_to_user(struct sigcontext __user *to,
+ 	__put_user((regs)->regs.gp[(regno) / sizeof(unsigned long)],	\
+ 		   &(sc)->regname)
+ 
+-	err |= PUTREG(regs, RDI, to, rdi);
+-	err |= PUTREG(regs, RSI, to, rsi);
+-	err |= PUTREG(regs, RBP, to, rbp);
++	err |= PUTREG(regs, RDI, to, di);
++	err |= PUTREG(regs, RSI, to, si);
++	err |= PUTREG(regs, RBP, to, bp);
+ 	/*
+ 	 * Must use orignal RSP, which is passed in, rather than what's in
+ 	 * the pt_regs, because that's already been updated to point at the
+ 	 * signal frame.
+ 	 */
+-	err |= __put_user(sp, &to->rsp);
+-	err |= PUTREG(regs, RBX, to, rbx);
+-	err |= PUTREG(regs, RDX, to, rdx);
+-	err |= PUTREG(regs, RCX, to, rcx);
+-	err |= PUTREG(regs, RAX, to, rax);
++	err |= __put_user(sp, &to->sp);
++	err |= PUTREG(regs, RBX, to, bx);
++	err |= PUTREG(regs, RDX, to, dx);
++	err |= PUTREG(regs, RCX, to, cx);
++	err |= PUTREG(regs, RAX, to, ax);
+ 	err |= PUTREG(regs, R8, to, r8);
+ 	err |= PUTREG(regs, R9, to, r9);
+ 	err |= PUTREG(regs, R10, to, r10);
+@@ -135,8 +135,8 @@ static int copy_sc_to_user(struct sigcontext __user *to,
+ 	err |= __put_user(fi->error_code, &to->err);
+ 	err |= __put_user(fi->trap_no, &to->trapno);
+ 
+-	err |= PUTREG(regs, RIP, to, rip);
+-	err |= PUTREG(regs, EFLAGS, to, eflags);
++	err |= PUTREG(regs, RIP, to, ip);
++	err |= PUTREG(regs, EFLAGS, to, flags);
+ #undef PUTREG
+ 
+ 	err |= __put_user(mask, &to->oldmask);
+diff --git a/arch/v850/kernel/vmlinux.lds.S b/arch/v850/kernel/vmlinux.lds.S
+index 6172599..d08cd1d 100644
+--- a/arch/v850/kernel/vmlinux.lds.S
++++ b/arch/v850/kernel/vmlinux.lds.S
+@@ -114,7 +114,7 @@
+ #define DATA_CONTENTS							      \
+ 		__sdata = . ;						      \
+ 		DATA_DATA						      \
+-			*(.exit.data)	/* 2.5 convention */		      \
++			EXIT_DATA	/* 2.5 convention */		      \
+ 			*(.data.exit)	/* 2.4 convention */		      \
+ 		. = ALIGN (16) ;					      \
+ 		*(.data.cacheline_aligned)				      \
+@@ -157,9 +157,9 @@
+ 		. = ALIGN (4096) ;					      \
+ 		__init_start = . ;					      \
+ 			__sinittext = .;				      \
+-			*(.init.text)	/* 2.5 convention */		      \
++			INIT_TEXT	/* 2.5 convention */		      \
+ 			__einittext = .;				      \
+-			*(.init.data)					      \
++			INIT_DATA					      \
+ 			*(.text.init)	/* 2.4 convention */		      \
+ 			*(.data.init)					      \
+ 		INITCALL_CONTENTS					      \
+@@ -170,7 +170,7 @@
+ #define ROMK_INIT_RAM_CONTENTS						      \
+ 		. = ALIGN (4096) ;					      \
+ 		__init_start = . ;					      \
+-			*(.init.data)	/* 2.5 convention */		      \
++			INIT_DATA	/* 2.5 convention */		      \
+ 			*(.data.init)	/* 2.4 convention */		      \
+ 		__init_end = . ;					      \
+ 		. = ALIGN (4096) ;
+@@ -179,7 +179,7 @@
+    should go into ROM.  */	
+ #define ROMK_INIT_ROM_CONTENTS						      \
+ 			_sinittext = .;					      \
+-			*(.init.text)	/* 2.5 convention */		      \
++			INIT_TEXT	/* 2.5 convention */		      \
+ 			_einittext = .;					      \
+ 			*(.text.init)	/* 2.4 convention */		      \
+ 		INITCALL_CONTENTS					      \
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 80b7ba4..65b4491 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -17,81 +17,69 @@ config X86_64
+ 
+ ### Arch settings
+ config X86
+-	bool
+-	default y
++	def_bool y
++
++config GENERIC_LOCKBREAK
++	def_bool n
+ 
+ config GENERIC_TIME
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_CMOS_UPDATE
+-	bool
+-	default y
++	def_bool y
+ 
+ config CLOCKSOURCE_WATCHDOG
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_CLOCKEVENTS
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_CLOCKEVENTS_BROADCAST
+-	bool
+-	default y
++	def_bool y
+ 	depends on X86_64 || (X86_32 && X86_LOCAL_APIC)
+ 
+ config LOCKDEP_SUPPORT
+-	bool
+-	default y
++	def_bool y
+ 
+ config STACKTRACE_SUPPORT
+-	bool
+-	default y
++	def_bool y
+ 
+ config SEMAPHORE_SLEEPERS
+-	bool
+-	default y
++	def_bool y
+ 
+ config MMU
+-	bool
+-	default y
++	def_bool y
+ 
+ config ZONE_DMA
+-	bool
+-	default y
++	def_bool y
+ 
+ config QUICKLIST
+-	bool
+-	default X86_32
++	def_bool X86_32
+ 
+ config SBUS
+ 	bool
+ 
+ config GENERIC_ISA_DMA
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_IOMAP
+-	bool
+-	default y
++	def_bool y
+ 
+ config GENERIC_BUG
+-	bool
+-	default y
++	def_bool y
+ 	depends on BUG
+ 
+ config GENERIC_HWEIGHT
+-	bool
+-	default y
++	def_bool y
++
++config GENERIC_GPIO
++	def_bool n
+ 
+ config ARCH_MAY_HAVE_PC_FDC
+-	bool
+-	default y
++	def_bool y
+ 
+ config DMI
+-	bool
+-	default y
++	def_bool y
+ 
+ config RWSEM_GENERIC_SPINLOCK
+ 	def_bool !X86_XADD
+@@ -112,10 +100,14 @@ config GENERIC_TIME_VSYSCALL
+ 	bool
+ 	default X86_64
+ 
++config HAVE_SETUP_PER_CPU_AREA
++	def_bool X86_64
++
+ config ARCH_SUPPORTS_OPROFILE
+ 	bool
+ 	default y
+ 
++select HAVE_KVM
+ 
+ config ZONE_DMA32
+ 	bool
+@@ -144,9 +136,17 @@ config GENERIC_PENDING_IRQ
+ 
+ config X86_SMP
+ 	bool
+-	depends on X86_32 && SMP && !X86_VOYAGER
++	depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
+ 	default y
+ 
++config X86_32_SMP
++	def_bool y
++	depends on X86_32 && SMP
++
++config X86_64_SMP
++	def_bool y
++	depends on X86_64 && SMP
++
+ config X86_HT
+ 	bool
+ 	depends on SMP
+@@ -292,6 +292,18 @@ config X86_ES7000
+ 	  Only choose this option if you have such a system, otherwise you
+ 	  should say N here.
+ 
++config X86_RDC321X
++	bool "RDC R-321x SoC"
++	depends on X86_32
++	select M486
++	select X86_REBOOTFIXUPS
++	select GENERIC_GPIO
++	select LEDS_GPIO
++	help
++	  This option is needed for RDC R-321x system-on-chip, also known
++	  as R-8610-(G).
++	  If you don't have one of these chips, you should say N here.
++
+ config X86_VSMP
+ 	bool "Support for ScaleMP vSMP"
+ 	depends on X86_64 && PCI
+@@ -303,8 +315,8 @@ config X86_VSMP
+ endchoice
+ 
+ config SCHED_NO_NO_OMIT_FRAME_POINTER
+-	bool "Single-depth WCHAN output"
+-	default y
++	def_bool y
++	prompt "Single-depth WCHAN output"
+ 	depends on X86_32
+ 	help
+ 	  Calculate simpler /proc/<PID>/wchan values. If this option
+@@ -314,18 +326,8 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
+ 
+ 	  If in doubt, say "Y".
+ 
+-config PARAVIRT
+-	bool
+-	depends on X86_32 && !(X86_VISWS || X86_VOYAGER)
+-	help
+-	  This changes the kernel so it can modify itself when it is run
+-	  under a hypervisor, potentially improving performance significantly
+-	  over full virtualization.  However, when run without a hypervisor
+-	  the kernel is theoretically slower and slightly larger.
 -
--        /* Prevent the signal handler manipulating SR in a way that can
--           crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
--           modified */
--        current_sr = regs->sr;
--        err |= __get_user(new_sr, &sc->sc_sr);
--        regs->sr &= SR_MASK;
--        regs->sr |= (new_sr & ~SR_MASK);
+ menuconfig PARAVIRT_GUEST
+ 	bool "Paravirtualized guest support"
+-	depends on X86_32
+ 	help
+ 	  Say Y here to get to see options related to running Linux under
+ 	  various hypervisors.  This option alone does not add any kernel code.
+@@ -339,6 +341,7 @@ source "arch/x86/xen/Kconfig"
+ config VMI
+ 	bool "VMI Guest support"
+ 	select PARAVIRT
++	depends on X86_32
+ 	depends on !(X86_VISWS || X86_VOYAGER)
+ 	help
+ 	  VMI provides a paravirtualized interface to the VMware ESX server
+@@ -348,40 +351,43 @@ config VMI
+ 
+ source "arch/x86/lguest/Kconfig"
+ 
++config PARAVIRT
++	bool "Enable paravirtualization code"
++	depends on !(X86_VISWS || X86_VOYAGER)
++	help
++	  This changes the kernel so it can modify itself when it is run
++	  under a hypervisor, potentially improving performance significantly
++	  over full virtualization.  However, when run without a hypervisor
++	  the kernel is theoretically slower and slightly larger.
++
+ endif
+ 
+ config ACPI_SRAT
+-	bool
+-	default y
++	def_bool y
+ 	depends on X86_32 && ACPI && NUMA && (X86_SUMMIT || X86_GENERICARCH)
+ 	select ACPI_NUMA
+ 
+ config HAVE_ARCH_PARSE_SRAT
+-       bool
+-       default y
+-       depends on ACPI_SRAT
++	def_bool y
++	depends on ACPI_SRAT
+ 
+ config X86_SUMMIT_NUMA
+-	bool
+-	default y
++	def_bool y
+ 	depends on X86_32 && NUMA && (X86_SUMMIT || X86_GENERICARCH)
+ 
+ config X86_CYCLONE_TIMER
+-	bool
+-	default y
++	def_bool y
+ 	depends on X86_32 && X86_SUMMIT || X86_GENERICARCH
+ 
+ config ES7000_CLUSTERED_APIC
+-	bool
+-	default y
++	def_bool y
+ 	depends on SMP && X86_ES7000 && MPENTIUMIII
+ 
+ source "arch/x86/Kconfig.cpu"
+ 
+ config HPET_TIMER
+-	bool
++	def_bool X86_64
+ 	prompt "HPET Timer Support" if X86_32
+-	default X86_64
+ 	help
+          Use the IA-PC HPET (High Precision Event Timer) to manage
+          time in preference to the PIT and RTC, if a HPET is
+@@ -399,9 +405,8 @@ config HPET_TIMER
+          Choose N to continue using the legacy 8254 timer.
+ 
+ config HPET_EMULATE_RTC
+-	bool
+-	depends on HPET_TIMER && RTC=y
+-	default y
++	def_bool y
++	depends on HPET_TIMER && (RTC=y || RTC=m)
+ 
+ # Mark as embedded because too many people got it wrong.
+ # The code disables itself when not needed.
+@@ -441,8 +446,8 @@ config CALGARY_IOMMU
+ 	  If unsure, say Y.
+ 
+ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
+-	bool "Should Calgary be enabled by default?"
+-	default y
++	def_bool y
++	prompt "Should Calgary be enabled by default?"
+ 	depends on CALGARY_IOMMU
+ 	help
+ 	  Should Calgary be enabled by default? if you choose 'y', Calgary
+@@ -486,9 +491,9 @@ config SCHED_SMT
+ 	  N here.
+ 
+ config SCHED_MC
+-	bool "Multi-core scheduler support"
++	def_bool y
++	prompt "Multi-core scheduler support"
+ 	depends on (X86_64 && SMP) || (X86_32 && X86_HT)
+-	default y
+ 	help
+ 	  Multi-core scheduler support improves the CPU scheduler's decision
+ 	  making when dealing with multi-core CPU chips at a cost of slightly
+@@ -522,19 +527,16 @@ config X86_UP_IOAPIC
+ 	  an IO-APIC, then the kernel will still run with no slowdown at all.
+ 
+ config X86_LOCAL_APIC
+-	bool
++	def_bool y
+ 	depends on X86_64 || (X86_32 && (X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER) || X86_GENERICARCH))
+-	default y
+ 
+ config X86_IO_APIC
+-	bool
++	def_bool y
+ 	depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)) || X86_GENERICARCH))
+-	default y
+ 
+ config X86_VISWS_APIC
+-	bool
++	def_bool y
+ 	depends on X86_32 && X86_VISWS
+-	default y
+ 
+ config X86_MCE
+ 	bool "Machine Check Exception"
+@@ -554,17 +556,17 @@ config X86_MCE
+ 	  the 386 and 486, so nearly everyone can say Y here.
+ 
+ config X86_MCE_INTEL
+-	bool "Intel MCE features"
++	def_bool y
++	prompt "Intel MCE features"
+ 	depends on X86_64 && X86_MCE && X86_LOCAL_APIC
+-	default y
+ 	help
+ 	   Additional support for intel specific MCE features such as
+ 	   the thermal monitor.
+ 
+ config X86_MCE_AMD
+-	bool "AMD MCE features"
++	def_bool y
++	prompt "AMD MCE features"
+ 	depends on X86_64 && X86_MCE && X86_LOCAL_APIC
+-	default y
+ 	help
+ 	   Additional support for AMD specific MCE features such as
+ 	   the DRAM Error Threshold.
+@@ -637,9 +639,9 @@ config I8K
+ 	  Say N otherwise.
+ 
+ config X86_REBOOTFIXUPS
+-	bool "Enable X86 board specific fixups for reboot"
++	def_bool n
++	prompt "Enable X86 board specific fixups for reboot"
+ 	depends on X86_32 && X86
+-	default n
+ 	---help---
+ 	  This enables chipset and/or board specific fixups to be done
+ 	  in order to get reboot to work correctly. This is only needed on
+@@ -648,7 +650,7 @@ config X86_REBOOTFIXUPS
+ 	  system.
+ 
+ 	  Currently, the only fixup is for the Geode machines using
+-	  CS5530A and CS5536 chipsets.
++	  CS5530A and CS5536 chipsets and the RDC R-321x SoC.
+ 
+ 	  Say Y if you want to enable the fixup. Currently, it's safe to
+ 	  enable this option even if you don't need it.
+@@ -672,9 +674,8 @@ config MICROCODE
+ 	  module will be called microcode.
+ 
+ config MICROCODE_OLD_INTERFACE
+-	bool
++	def_bool y
+ 	depends on MICROCODE
+-	default y
+ 
+ config X86_MSR
+ 	tristate "/dev/cpu/*/msr - Model-specific register support"
+@@ -798,13 +799,12 @@ config PAGE_OFFSET
+ 	depends on X86_32
+ 
+ config HIGHMEM
+-	bool
++	def_bool y
+ 	depends on X86_32 && (HIGHMEM64G || HIGHMEM4G)
+-	default y
+ 
+ config X86_PAE
+-	bool "PAE (Physical Address Extension) Support"
+-	default n
++	def_bool n
++	prompt "PAE (Physical Address Extension) Support"
+ 	depends on X86_32 && !HIGHMEM4G
+ 	select RESOURCES_64BIT
+ 	help
+@@ -836,10 +836,10 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
+ 	depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)
+ 
+ config K8_NUMA
+-       bool "Old style AMD Opteron NUMA detection"
+-       depends on X86_64 && NUMA && PCI
+-       default y
+-       help
++	def_bool y
++	prompt "Old style AMD Opteron NUMA detection"
++	depends on X86_64 && NUMA && PCI
++	help
+ 	 Enable K8 NUMA node topology detection.  You should say Y here if
+ 	 you have a multi processor AMD K8 system. This uses an old
+ 	 method to read the NUMA configuration directly from the builtin
+@@ -847,10 +847,10 @@ config K8_NUMA
+ 	 instead, which also takes priority if both are compiled in.
+ 
+ config X86_64_ACPI_NUMA
+-	bool "ACPI NUMA detection"
++	def_bool y
++	prompt "ACPI NUMA detection"
+ 	depends on X86_64 && NUMA && ACPI && PCI
+ 	select ACPI_NUMA
+-	default y
+ 	help
+ 	  Enable ACPI SRAT based node topology detection.
+ 
+@@ -864,52 +864,53 @@ config NUMA_EMU
+ 
+ config NODES_SHIFT
+ 	int
++	range 1 15  if X86_64
+ 	default "6" if X86_64
+ 	default "4" if X86_NUMAQ
+ 	default "3"
+ 	depends on NEED_MULTIPLE_NODES
+ 
+ config HAVE_ARCH_BOOTMEM_NODE
+-	bool
++	def_bool y
+ 	depends on X86_32 && NUMA
+-	default y
+ 
+ config ARCH_HAVE_MEMORY_PRESENT
+-	bool
++	def_bool y
+ 	depends on X86_32 && DISCONTIGMEM
+-	default y
+ 
+ config NEED_NODE_MEMMAP_SIZE
+-	bool
++	def_bool y
+ 	depends on X86_32 && (DISCONTIGMEM || SPARSEMEM)
+-	default y
+ 
+ config HAVE_ARCH_ALLOC_REMAP
+-	bool
++	def_bool y
+ 	depends on X86_32 && NUMA
+-	default y
+ 
+ config ARCH_FLATMEM_ENABLE
+ 	def_bool y
+-	depends on (X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC) || (X86_64 && !NUMA)
++	depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA
+ 
+ config ARCH_DISCONTIGMEM_ENABLE
+ 	def_bool y
+-	depends on NUMA
++	depends on NUMA && X86_32
+ 
+ config ARCH_DISCONTIGMEM_DEFAULT
+ 	def_bool y
+-	depends on NUMA
++	depends on NUMA && X86_32
++
++config ARCH_SPARSEMEM_DEFAULT
++	def_bool y
++	depends on X86_64
+ 
+ config ARCH_SPARSEMEM_ENABLE
+ 	def_bool y
+-	depends on NUMA || (EXPERIMENTAL && (X86_PC || X86_64))
++	depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC)
+ 	select SPARSEMEM_STATIC if X86_32
+ 	select SPARSEMEM_VMEMMAP_ENABLE if X86_64
+ 
+ config ARCH_SELECT_MEMORY_MODEL
+ 	def_bool y
+-	depends on X86_32 && ARCH_SPARSEMEM_ENABLE
++	depends on ARCH_SPARSEMEM_ENABLE
+ 
+ config ARCH_MEMORY_PROBE
+ 	def_bool X86_64
+@@ -987,42 +988,32 @@ config MTRR
+ 	  See <file:Documentation/mtrr.txt> for more information.
+ 
+ config EFI
+-	bool "Boot from EFI support"
+-	depends on X86_32 && ACPI
+-	default n
++	def_bool n
++	prompt "EFI runtime service support"
++	depends on ACPI
+ 	---help---
+-	This enables the kernel to boot on EFI platforms using
+-	system configuration information passed to it from the firmware.
+-	This also enables the kernel to use any EFI runtime services that are
++	This enables the kernel to use EFI runtime services that are
+ 	available (such as the EFI variable services).
+ 
+-	This option is only useful on systems that have EFI firmware
+-	and will result in a kernel image that is ~8k larger.  In addition,
+-	you must use the latest ELILO loader available at
+-	<http://elilo.sourceforge.net> in order to take advantage of
+-	kernel initialization using EFI information (neither GRUB nor LILO know
+-	anything about EFI).  However, even with this option, the resultant
+-	kernel should continue to boot on existing non-EFI platforms.
++	This option is only useful on systems that have EFI firmware.
++  	In addition, you should use the latest ELILO loader available
++  	at <http://elilo.sourceforge.net> in order to take advantage
++  	of EFI runtime services. However, even with this option, the
++  	resultant kernel should continue to boot on existing non-EFI
++  	platforms.
+ 
+ config IRQBALANCE
+-	bool "Enable kernel irq balancing"
++	def_bool y
++	prompt "Enable kernel irq balancing"
+ 	depends on X86_32 && SMP && X86_IO_APIC
+-	default y
+ 	help
+ 	  The default yes will allow the kernel to do irq load balancing.
+ 	  Saying no will keep the kernel from doing irq load balancing.
+ 
+-# turning this on wastes a bunch of space.
+-# Summit needs it only when NUMA is on
+-config BOOT_IOREMAP
+-	bool
+-	depends on X86_32 && (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
+-	default y
 -
--	COPY(pc);
+ config SECCOMP
+-	bool "Enable seccomp to safely compute untrusted bytecode"
++	def_bool y
++	prompt "Enable seccomp to safely compute untrusted bytecode"
+ 	depends on PROC_FS
+-	default y
+ 	help
+ 	  This kernel feature is useful for number crunching applications
+ 	  that may need to compute untrusted bytecode during their
+@@ -1189,11 +1180,11 @@ config HOTPLUG_CPU
+ 	  suspend.
+ 
+ config COMPAT_VDSO
+-	bool "Compat VDSO support"
+-	default y
+-	depends on X86_32
++	def_bool y
++	prompt "Compat VDSO support"
++	depends on X86_32 || IA32_EMULATION
+ 	help
+-	  Map the VDSO to the predictable old-style address too.
++	  Map the 32-bit VDSO to the predictable old-style address too.
+ 	---help---
+ 	  Say N here if you are running a sufficiently recent glibc
+ 	  version (2.3.3 or later), to remove the high-mapped
+@@ -1207,30 +1198,26 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
+ 	def_bool y
+ 	depends on X86_64 || (X86_32 && HIGHMEM)
+ 
+-config MEMORY_HOTPLUG_RESERVE
+-	def_bool X86_64
+-	depends on (MEMORY_HOTPLUG && DISCONTIGMEM)
 -
--#undef COPY
+ config HAVE_ARCH_EARLY_PFN_TO_NID
+ 	def_bool X86_64
+ 	depends on NUMA
+ 
+-config OUT_OF_LINE_PFN_TO_PAGE
+-	def_bool X86_64
+-	depends on DISCONTIGMEM
+-
+ menu "Power management options"
+ 	depends on !X86_VOYAGER
+ 
+ config ARCH_HIBERNATION_HEADER
+-	bool
++	def_bool y
+ 	depends on X86_64 && HIBERNATION
+-	default y
+ 
+ source "kernel/power/Kconfig"
+ 
+ source "drivers/acpi/Kconfig"
+ 
++config X86_APM_BOOT
++	bool
++	default y
++	depends on APM || APM_MODULE
++
+ menuconfig APM
+ 	tristate "APM (Advanced Power Management) BIOS support"
+ 	depends on X86_32 && PM_SLEEP && !X86_VISWS
+@@ -1371,7 +1358,7 @@ menu "Bus options (PCI etc.)"
+ config PCI
+ 	bool "PCI support" if !X86_VISWS
+ 	depends on !X86_VOYAGER
+-	default y if X86_VISWS
++	default y
+ 	select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
+ 	help
+ 	  Find out whether you have a PCI motherboard. PCI is the name of a
+@@ -1418,25 +1405,21 @@ config PCI_GOANY
+ endchoice
+ 
+ config PCI_BIOS
+-	bool
++	def_bool y
+ 	depends on X86_32 && !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
+-	default y
+ 
+ # x86-64 doesn't support PCI BIOS access from long mode so always go direct.
+ config PCI_DIRECT
+-	bool
++	def_bool y
+ 	depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
+-	default y
+ 
+ config PCI_MMCONFIG
+-	bool
++	def_bool y
+ 	depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
+-	default y
+ 
+ config PCI_DOMAINS
+-	bool
++	def_bool y
+ 	depends on PCI
+-	default y
+ 
+ config PCI_MMCONFIG
+ 	bool "Support mmconfig PCI config space access"
+@@ -1453,9 +1436,9 @@ config DMAR
+ 	  remapping devices.
+ 
+ config DMAR_GFX_WA
+-	bool "Support for Graphics workaround"
++	def_bool y
++	prompt "Support for Graphics workaround"
+ 	depends on DMAR
+-	default y
+ 	help
+ 	 Current Graphics drivers tend to use physical address
+ 	 for DMA and avoid using DMA APIs. Setting this config
+@@ -1464,9 +1447,8 @@ config DMAR_GFX_WA
+ 	 to use physical addresses for DMA.
+ 
+ config DMAR_FLOPPY_WA
+-	bool
++	def_bool y
+ 	depends on DMAR
+-	default y
+ 	help
+ 	 Floppy disk drivers are know to bypass DMA API calls
+ 	 thereby failing to work when IOMMU is enabled. This
+@@ -1479,8 +1461,7 @@ source "drivers/pci/Kconfig"
+ 
+ # x86_64 have no ISA slots, but do have ISA-style DMA.
+ config ISA_DMA_API
+-	bool
+-	default y
++	def_bool y
+ 
+ if X86_32
+ 
+@@ -1546,9 +1527,9 @@ config SCx200HR_TIMER
+ 	  other workaround is idle=poll boot option.
+ 
+ config GEODE_MFGPT_TIMER
+-	bool "Geode Multi-Function General Purpose Timer (MFGPT) events"
++	def_bool y
++	prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
+ 	depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
+-	default y
+ 	help
+ 	  This driver provides a clock event source based on the MFGPT
+ 	  timer(s) in the CS5535 and CS5536 companion chip for the geode.
+@@ -1575,6 +1556,7 @@ source "fs/Kconfig.binfmt"
+ config IA32_EMULATION
+ 	bool "IA32 Emulation"
+ 	depends on X86_64
++	select COMPAT_BINFMT_ELF
+ 	help
+ 	  Include code to run 32-bit programs under a 64-bit kernel. You should
+ 	  likely turn this on, unless you're 100% sure that you don't have any
+@@ -1587,18 +1569,16 @@ config IA32_AOUT
+          Support old a.out binaries in the 32bit emulation.
+ 
+ config COMPAT
+-	bool
++	def_bool y
+ 	depends on IA32_EMULATION
+-	default y
+ 
+ config COMPAT_FOR_U64_ALIGNMENT
+ 	def_bool COMPAT
+ 	depends on X86_64
+ 
+ config SYSVIPC_COMPAT
+-	bool
++	def_bool y
+ 	depends on X86_64 && COMPAT && SYSVIPC
+-	default y
+ 
+ endmenu
+ 
+@@ -1619,4 +1599,6 @@ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+ 
++source "arch/x86/kvm/Kconfig"
++
+ source "lib/Kconfig"
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index c301622..e09a6b7 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -219,10 +219,10 @@ config MGEODEGX1
+ 	  Select this for a Geode GX1 (Cyrix MediaGX) chip.
+ 
+ config MGEODE_LX
+-       bool "Geode GX/LX"
++	bool "Geode GX/LX"
+ 	depends on X86_32
+-       help
+-         Select this for AMD Geode GX and LX processors.
++	help
++	  Select this for AMD Geode GX and LX processors.
+ 
+ config MCYRIXIII
+ 	bool "CyrixIII/VIA-C3"
+@@ -258,7 +258,7 @@ config MPSC
+ 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
+ 	  Xeon CPUs with Intel 64bit which is compatible with x86-64.
+ 	  Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
+-          Netburst core and shouldn't use this option. You can distinguish them
++	  Netburst core and shouldn't use this option. You can distinguish them
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
+@@ -317,81 +317,75 @@ config X86_L1_CACHE_SHIFT
+ 	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7
+ 
+ config X86_XADD
+-	bool
++	def_bool y
+ 	depends on X86_32 && !M386
+-	default y
+ 
+ config X86_PPRO_FENCE
+-	bool
++	bool "PentiumPro memory ordering errata workaround"
+ 	depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
+-	default y
++	help
++	  Old PentiumPro multiprocessor systems had errata that could cause memory
++	  operations to violate the x86 ordering standard in rare cases. Enabling this
++	  option will attempt to work around some (but not all) occurances of
++	  this problem, at the cost of much heavier spinlock and memory barrier
++	  operations.
++
++	  If unsure, say n here. Even distro kernels should think twice before enabling
++	  this: there are few systems, and an unlikely bug.
+ 
+ config X86_F00F_BUG
+-	bool
++	def_bool y
+ 	depends on M586MMX || M586TSC || M586 || M486 || M386
+-	default y
+ 
+ config X86_WP_WORKS_OK
+-	bool
++	def_bool y
+ 	depends on X86_32 && !M386
+-	default y
+ 
+ config X86_INVLPG
+-	bool
++	def_bool y
+ 	depends on X86_32 && !M386
+-	default y
+ 
+ config X86_BSWAP
+-	bool
++	def_bool y
+ 	depends on X86_32 && !M386
+-	default y
+ 
+ config X86_POPAD_OK
+-	bool
++	def_bool y
+ 	depends on X86_32 && !M386
+-	default y
+ 
+ config X86_ALIGNMENT_16
+-	bool
++	def_bool y
+ 	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+-	default y
+ 
+ config X86_GOOD_APIC
+-	bool
++	def_bool y
+ 	depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON || MCORE2 || MVIAC7 || X86_64
+-	default y
+ 
+ config X86_INTEL_USERCOPY
+-	bool
++	def_bool y
+ 	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
+-	default y
+ 
+ config X86_USE_PPRO_CHECKSUM
+-	bool
++	def_bool y
+ 	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2
+-	default y
+ 
+ config X86_USE_3DNOW
+-	bool
++	def_bool y
+ 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
+-	default y
+ 
+ config X86_OOSTORE
+-	bool
++	def_bool y
+ 	depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
+-	default y
+ 
+ config X86_TSC
+-	bool
++	def_bool y
+ 	depends on ((MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64
+-	default y
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+-	bool
++	def_bool y
+ 	depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7)
+-	default y
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+@@ -399,3 +393,6 @@ config X86_MINIMUM_CPU_FAMILY
+ 	default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK)
+ 	default "3"
+ 
++config X86_DEBUGCTLMSR
++	def_bool y
++	depends on !(M586MMX || M586TSC || M586 || M486 || M386)
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index 761ca7b..2e1e3af 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -6,7 +6,7 @@ config TRACE_IRQFLAGS_SUPPORT
+ source "lib/Kconfig.debug"
+ 
+ config EARLY_PRINTK
+-	bool "Early printk" if EMBEDDED && DEBUG_KERNEL && X86_32
++	bool "Early printk" if EMBEDDED
+ 	default y
+ 	help
+ 	  Write kernel log output directly into the VGA buffer or to a serial
+@@ -40,22 +40,49 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
+ 
+ config DEBUG_PAGEALLOC
+ 	bool "Debug page memory allocations"
+-	depends on DEBUG_KERNEL && !HIBERNATION && !HUGETLBFS
+-	depends on X86_32
++	depends on DEBUG_KERNEL && X86_32
+ 	help
+ 	  Unmap pages from the kernel linear mapping after free_pages().
+ 	  This results in a large slowdown, but helps to find certain types
+ 	  of memory corruptions.
+ 
++config DEBUG_PER_CPU_MAPS
++	bool "Debug access to per_cpu maps"
++	depends on DEBUG_KERNEL
++	depends on X86_64_SMP
++	default n
++	help
++	  Say Y to verify that the per_cpu map being accessed has
++	  been setup.  Adds a fair amount of code to kernel memory
++	  and decreases performance.
++
++	  Say N if unsure.
++
+ config DEBUG_RODATA
+ 	bool "Write protect kernel read-only data structures"
++	default y
+ 	depends on DEBUG_KERNEL
+ 	help
+ 	  Mark the kernel read-only data as write-protected in the pagetables,
+ 	  in order to catch accidental (and incorrect) writes to such const
+-	  data. This option may have a slight performance impact because a
+-	  portion of the kernel code won't be covered by a 2MB TLB anymore.
+-	  If in doubt, say "N".
++	  data. This is recommended so that we can catch kernel bugs sooner.
++	  If in doubt, say "Y".
++
++config DEBUG_RODATA_TEST
++	bool "Testcase for the DEBUG_RODATA feature"
++	depends on DEBUG_RODATA
++	help
++	  This option enables a testcase for the DEBUG_RODATA
++	  feature as well as for the change_page_attr() infrastructure.
++	  If in doubt, say "N"
++
++config DEBUG_NX_TEST
++	tristate "Testcase for the NX non-executable stack feature"
++	depends on DEBUG_KERNEL && m
++	help
++	  This option enables a testcase for the CPU NX capability
++	  and the software setup of this feature.
++	  If in doubt, say "N"
+ 
+ config 4KSTACKS
+ 	bool "Use 4Kb for kernel stacks instead of 8Kb"
+@@ -75,8 +102,7 @@ config X86_FIND_SMP_CONFIG
+ 
+ config X86_MPPARSE
+ 	def_bool y
+-	depends on X86_LOCAL_APIC && !X86_VISWS
+-	depends on X86_32
++	depends on (X86_32 && (X86_LOCAL_APIC && !X86_VISWS)) || X86_64
+ 
+ config DOUBLEFAULT
+ 	default y
+@@ -112,4 +138,91 @@ config IOMMU_LEAK
+ 	  Add a simple leak tracer to the IOMMU code. This is useful when you
+ 	  are debugging a buggy device driver that leaks IOMMU mappings.
+ 
++#
++# IO delay types:
++#
++
++config IO_DELAY_TYPE_0X80
++	int
++	default "0"
++
++config IO_DELAY_TYPE_0XED
++	int
++	default "1"
++
++config IO_DELAY_TYPE_UDELAY
++	int
++	default "2"
++
++config IO_DELAY_TYPE_NONE
++	int
++	default "3"
++
++choice
++	prompt "IO delay type"
++	default IO_DELAY_0XED
++
++config IO_DELAY_0X80
++	bool "port 0x80 based port-IO delay [recommended]"
++	help
++	  This is the traditional Linux IO delay used for in/out_p.
++	  It is the most tested hence safest selection here.
++
++config IO_DELAY_0XED
++	bool "port 0xed based port-IO delay"
++	help
++	  Use port 0xed as the IO delay. This frees up port 0x80 which is
++	  often used as a hardware-debug port.
++
++config IO_DELAY_UDELAY
++	bool "udelay based port-IO delay"
++	help
++	  Use udelay(2) as the IO delay method. This provides the delay
++	  while not having any side-effect on the IO port space.
++
++config IO_DELAY_NONE
++	bool "no port-IO delay"
++	help
++	  No port-IO delay. Will break on old boxes that require port-IO
++	  delay for certain operations. Should work on most new machines.
++
++endchoice
++
++if IO_DELAY_0X80
++config DEFAULT_IO_DELAY_TYPE
++	int
++	default IO_DELAY_TYPE_0X80
++endif
++
++if IO_DELAY_0XED
++config DEFAULT_IO_DELAY_TYPE
++	int
++	default IO_DELAY_TYPE_0XED
++endif
++
++if IO_DELAY_UDELAY
++config DEFAULT_IO_DELAY_TYPE
++	int
++	default IO_DELAY_TYPE_UDELAY
++endif
++
++if IO_DELAY_NONE
++config DEFAULT_IO_DELAY_TYPE
++	int
++	default IO_DELAY_TYPE_NONE
++endif
++
++config DEBUG_BOOT_PARAMS
++	bool "Debug boot parameters"
++	depends on DEBUG_KERNEL
++	depends on DEBUG_FS
++	help
++	  This option will cause struct boot_params to be exported via debugfs.
++
++config CPA_DEBUG
++	bool "CPA self test code"
++	depends on DEBUG_KERNEL
++	help
++	  Do change_page_attr self tests at boot.
++
+ endmenu
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 7aa1dc6..da8f412 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -7,13 +7,254 @@ else
+         KBUILD_DEFCONFIG := $(ARCH)_defconfig
+ endif
+ 
+-# No need to remake these files
+-$(srctree)/arch/x86/Makefile%: ;
++core-$(CONFIG_KVM) += arch/x86/kvm/
++
++# BITS is used as extension for files which are available in a 32 bit
++# and a 64 bit version to simplify shared Makefiles.
++# e.g.: obj-y += foo_$(BITS).o
++export BITS
+ 
+ ifeq ($(CONFIG_X86_32),y)
++        BITS := 32
+         UTS_MACHINE := i386
+-        include $(srctree)/arch/x86/Makefile_32
++        CHECKFLAGS += -D__i386__
++
++        biarch := $(call cc-option,-m32)
++        KBUILD_AFLAGS += $(biarch)
++        KBUILD_CFLAGS += $(biarch)
++
++        ifdef CONFIG_RELOCATABLE
++                LDFLAGS_vmlinux := --emit-relocs
++        endif
++
++        KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
++
++        # prevent gcc from keeping the stack 16 byte aligned
++        KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
++
++        # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
++        # a lot more stack due to the lack of sharing of stacklots:
++        KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \
++                echo $(call cc-option,-fno-unit-at-a-time); fi ;)
++
++        # CPU-specific tuning. Anything which can be shared with UML should go here.
++        include $(srctree)/arch/x86/Makefile_32.cpu
++        KBUILD_CFLAGS += $(cflags-y)
++
++        # temporary until string.h is fixed
++        KBUILD_CFLAGS += -ffreestanding
+ else
++        BITS := 64
+         UTS_MACHINE := x86_64
+-        include $(srctree)/arch/x86/Makefile_64
++        CHECKFLAGS += -D__x86_64__ -m64
++
++        KBUILD_AFLAGS += -m64
++        KBUILD_CFLAGS += -m64
++
++        # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
++
++        cflags-$(CONFIG_MCORE2) += \
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
++        cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
++        KBUILD_CFLAGS += $(cflags-y)
++
++        KBUILD_CFLAGS += -mno-red-zone
++        KBUILD_CFLAGS += -mcmodel=kernel
++
++        # -funit-at-a-time shrinks the kernel .text considerably
++        # unfortunately it makes reading oopses harder.
++        KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
++
++        # this works around some issues with generating unwind tables in older gccs
++        # newer gccs do it by default
++        KBUILD_CFLAGS += -maccumulate-outgoing-args
++
++        stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
++        stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
++                "$(CC)" -fstack-protector )
++        stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
++                "$(CC)" -fstack-protector-all )
++
++        KBUILD_CFLAGS += $(stackp-y)
+ endif
++
++# Stackpointer is addressed different for 32 bit and 64 bit x86
++sp-$(CONFIG_X86_32) := esp
++sp-$(CONFIG_X86_64) := rsp
++
++# do binutils support CFI?
++cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
++# is .cfi_signal_frame supported too?
++cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
++KBUILD_AFLAGS += $(cfi) $(cfi-sigframe)
++KBUILD_CFLAGS += $(cfi) $(cfi-sigframe)
++
++LDFLAGS := -m elf_$(UTS_MACHINE)
++OBJCOPYFLAGS := -O binary -R .note -R .comment -S
++
++# Speed up the build
++KBUILD_CFLAGS += -pipe
++# Workaround for a gcc prelease that unfortunately was shipped in a suse release
++KBUILD_CFLAGS += -Wno-sign-compare
++#
++KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
++# prevent gcc from generating any FP code by mistake
++KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
++
++###
++# Sub architecture support
++# fcore-y is linked before mcore-y files.
++
++# Default subarch .c files
++mcore-y  := arch/x86/mach-default/
++
++# Voyager subarch support
++mflags-$(CONFIG_X86_VOYAGER)	:= -Iinclude/asm-x86/mach-voyager
++mcore-$(CONFIG_X86_VOYAGER)	:= arch/x86/mach-voyager/
++
++# VISWS subarch support
++mflags-$(CONFIG_X86_VISWS)	:= -Iinclude/asm-x86/mach-visws
++mcore-$(CONFIG_X86_VISWS)	:= arch/x86/mach-visws/
++
++# NUMAQ subarch support
++mflags-$(CONFIG_X86_NUMAQ)	:= -Iinclude/asm-x86/mach-numaq
++mcore-$(CONFIG_X86_NUMAQ)	:= arch/x86/mach-default/
++
++# BIGSMP subarch support
++mflags-$(CONFIG_X86_BIGSMP)	:= -Iinclude/asm-x86/mach-bigsmp
++mcore-$(CONFIG_X86_BIGSMP)	:= arch/x86/mach-default/
++
++#Summit subarch support
++mflags-$(CONFIG_X86_SUMMIT)	:= -Iinclude/asm-x86/mach-summit
++mcore-$(CONFIG_X86_SUMMIT)	:= arch/x86/mach-default/
++
++# generic subarchitecture
++mflags-$(CONFIG_X86_GENERICARCH):= -Iinclude/asm-x86/mach-generic
++fcore-$(CONFIG_X86_GENERICARCH)	+= arch/x86/mach-generic/
++mcore-$(CONFIG_X86_GENERICARCH)	:= arch/x86/mach-default/
++
++
++# ES7000 subarch support
++mflags-$(CONFIG_X86_ES7000)	:= -Iinclude/asm-x86/mach-es7000
++fcore-$(CONFIG_X86_ES7000)	:= arch/x86/mach-es7000/
++mcore-$(CONFIG_X86_ES7000)	:= arch/x86/mach-default/
++
++# RDC R-321x subarch support
++mflags-$(CONFIG_X86_RDC321X)	:= -Iinclude/asm-x86/mach-rdc321x
++mcore-$(CONFIG_X86_RDC321X)	:= arch/x86/mach-default
++core-$(CONFIG_X86_RDC321X)	+= arch/x86/mach-rdc321x/
++
++# default subarch .h files
++mflags-y += -Iinclude/asm-x86/mach-default
++
++# 64 bit does not support subarch support - clear sub arch variables
++fcore-$(CONFIG_X86_64)  :=
++mcore-$(CONFIG_X86_64)  :=
++mflags-$(CONFIG_X86_64) :=
++
++KBUILD_CFLAGS += $(mflags-y)
++KBUILD_AFLAGS += $(mflags-y)
++
++###
++# Kernel objects
++
++head-y                := arch/x86/kernel/head_$(BITS).o
++head-$(CONFIG_X86_64) += arch/x86/kernel/head64.o
++head-y                += arch/x86/kernel/init_task.o
++
++libs-y  += arch/x86/lib/
++
++# Sub architecture files that needs linking first
++core-y += $(fcore-y)
++
++# Xen paravirtualization support
++core-$(CONFIG_XEN) += arch/x86/xen/
++
++# lguest paravirtualization support
++core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
++
++core-y += arch/x86/kernel/
++core-y += arch/x86/mm/
++
++# Remaining sub architecture files
++core-y += $(mcore-y)
++
++core-y += arch/x86/crypto/
++core-y += arch/x86/vdso/
++core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
++
++# drivers-y are linked after core-y
++drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
++drivers-$(CONFIG_PCI)            += arch/x86/pci/
++
++# must be linked after kernel/
++drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/
++
++ifeq ($(CONFIG_X86_32),y)
++drivers-$(CONFIG_PM) += arch/x86/power/
++drivers-$(CONFIG_FB) += arch/x86/video/
++endif
++
++####
++# boot loader support. Several targets are kept for legacy purposes
++
++boot := arch/x86/boot
++
++PHONY += zImage bzImage compressed zlilo bzlilo \
++         zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
++
++# Default kernel to build
++all: bzImage
++
++# KBUILD_IMAGE specify target image being built
++                    KBUILD_IMAGE := $(boot)/bzImage
++zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage
++
++zImage bzImage: vmlinux
++	$(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
++	$(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot
++	$(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/bzImage
++
++compressed: zImage
++
++zlilo bzlilo: vmlinux
++	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zlilo
++
++zdisk bzdisk: vmlinux
++	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk
++
++fdimage fdimage144 fdimage288 isoimage: vmlinux
++	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
++
++install: vdso_install
++	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
++
++PHONY += vdso_install
++vdso_install:
++	$(Q)$(MAKE) $(build)=arch/x86/vdso $@
++
++archclean:
++	$(Q)rm -rf $(objtree)/arch/i386
++	$(Q)rm -rf $(objtree)/arch/x86_64
++	$(Q)$(MAKE) $(clean)=$(boot)
++
++define archhelp
++  echo  '* bzImage      - Compressed kernel image (arch/x86/boot/bzImage)'
++  echo  '  install      - Install kernel using'
++  echo  '                  (your) ~/bin/installkernel or'
++  echo  '                  (distribution) /sbin/installkernel or'
++  echo  '                  install to $$(INSTALL_PATH) and run lilo'
++  echo  '  fdimage      - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
++  echo  '  fdimage144   - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
++  echo  '  fdimage288   - Create 2.8MB boot floppy image (arch/x86/boot/fdimage)'
++  echo  '  isoimage     - Create a boot CD-ROM image (arch/x86/boot/image.iso)'
++  echo  '                  bzdisk/fdimage*/isoimage also accept:'
++  echo  '                  FDARGS="..."  arguments for the booted kernel'
++  echo  '                  FDINITRD=file initrd for the booted kernel'
++endef
++
++CLEAN_FILES += arch/x86/boot/fdimage \
++	       arch/x86/boot/image.iso \
++	       arch/x86/boot/mtools.conf
+diff --git a/arch/x86/Makefile_32 b/arch/x86/Makefile_32
+deleted file mode 100644
+index 50394da..0000000
+--- a/arch/x86/Makefile_32
++++ /dev/null
+@@ -1,175 +0,0 @@
+-#
+-# i386 Makefile
+-#
+-# This file is included by the global makefile so that you can add your own
+-# architecture-specific flags and dependencies. Remember to do have actions
+-# for "archclean" cleaning up for this architecture.
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 1994 by Linus Torvalds
+-#
+-# 19990713  Artur Skawina <skawina at geocities.com>
+-#           Added '-march' and '-mpreferred-stack-boundary' support
+-#
+-# 20050320  Kianusch Sayah Karadji <kianusch at sk-tech.net>
+-#           Added support for GEODE CPU
 -
--	/* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
--	 * has been restored above.) */
--	err |= restore_sigcontext_fpu(regs, sc);
+-# BITS is used as extension for files which are available in a 32 bit
+-# and a 64 bit version to simplify shared Makefiles.
+-# e.g.: obj-y += foo_$(BITS).o
+-BITS := 32
+-export BITS
 -
--	regs->syscall_nr = -1;		/* disable syscall checks */
--	err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
--	return err;
--}
+-HAS_BIARCH      := $(call cc-option-yn, -m32)
+-ifeq ($(HAS_BIARCH),y)
+-AS              := $(AS) --32
+-LD              := $(LD) -m elf_i386
+-CC              := $(CC) -m32
+-endif
 -
--asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
--				   unsigned long r4, unsigned long r5,
--				   unsigned long r6, unsigned long r7,
--				   struct pt_regs * regs)
--{
--	struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
--	sigset_t set;
--	long long ret;
+-LDFLAGS		:= -m elf_i386
+-OBJCOPYFLAGS	:= -O binary -R .note -R .comment -S
+-ifdef CONFIG_RELOCATABLE
+-LDFLAGS_vmlinux := --emit-relocs
+-endif
+-CHECKFLAGS	+= -D__i386__
 -
--	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
--		goto badframe;
+-KBUILD_CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return
 -
--	if (__get_user(set.sig[0], &frame->sc.oldmask)
--	    || (_NSIG_WORDS > 1
--		&& __copy_from_user(&set.sig[1], &frame->extramask,
--				    sizeof(frame->extramask))))
--		goto badframe;
+-# prevent gcc from keeping the stack 16 byte aligned
+-KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
 -
--	sigdelsetmask(&set, ~_BLOCKABLE);
+-# CPU-specific tuning. Anything which can be shared with UML should go here.
+-include $(srctree)/arch/x86/Makefile_32.cpu
 -
--	spin_lock_irq(&current->sighand->siglock);
--	current->blocked = set;
--	recalc_sigpending();
--	spin_unlock_irq(&current->sighand->siglock);
+-# temporary until string.h is fixed
+-cflags-y += -ffreestanding
 -
--	if (restore_sigcontext(regs, &frame->sc, &ret))
--		goto badframe;
--	regs->pc -= 4;
+-# this works around some issues with generating unwind tables in older gccs
+-# newer gccs do it by default
+-cflags-y += -maccumulate-outgoing-args
 -
--	return (int) ret;
+-# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
+-# a lot more stack due to the lack of sharing of stacklots:
+-KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
 -
--badframe:
--	force_sig(SIGSEGV, current);
--	return 0;
--}
+-# do binutils support CFI?
+-cflags-y += $(call as-instr,.cfi_startproc\n.cfi_rel_offset esp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
+-KBUILD_AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_rel_offset esp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
 -
--asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
--				unsigned long r4, unsigned long r5,
--				unsigned long r6, unsigned long r7,
--				struct pt_regs * regs)
--{
--	struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
--	sigset_t set;
--	stack_t __user st;
--	long long ret;
+-# is .cfi_signal_frame supported too?
+-cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
+-KBUILD_AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
 -
--	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
--		goto badframe;
+-KBUILD_CFLAGS += $(cflags-y)
 -
--	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
--		goto badframe;
+-# Default subarch .c files
+-mcore-y  := arch/x86/mach-default
 -
--	sigdelsetmask(&set, ~_BLOCKABLE);
--	spin_lock_irq(&current->sighand->siglock);
--	current->blocked = set;
--	recalc_sigpending();
--	spin_unlock_irq(&current->sighand->siglock);
+-# Voyager subarch support
+-mflags-$(CONFIG_X86_VOYAGER)	:= -Iinclude/asm-x86/mach-voyager
+-mcore-$(CONFIG_X86_VOYAGER)	:= arch/x86/mach-voyager
 -
--	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
--		goto badframe;
--	regs->pc -= 4;
+-# VISWS subarch support
+-mflags-$(CONFIG_X86_VISWS)	:= -Iinclude/asm-x86/mach-visws
+-mcore-$(CONFIG_X86_VISWS)	:= arch/x86/mach-visws
 -
--	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
--		goto badframe;
--	/* It is more difficult to avoid calling this function than to
--	   call it and ignore errors.  */
--	do_sigaltstack(&st, NULL, REF_REG_SP);
+-# NUMAQ subarch support
+-mflags-$(CONFIG_X86_NUMAQ)	:= -Iinclude/asm-x86/mach-numaq
+-mcore-$(CONFIG_X86_NUMAQ)	:= arch/x86/mach-default
 -
--	return (int) ret;
+-# BIGSMP subarch support
+-mflags-$(CONFIG_X86_BIGSMP)	:= -Iinclude/asm-x86/mach-bigsmp
+-mcore-$(CONFIG_X86_BIGSMP)	:= arch/x86/mach-default
 -
--badframe:
--	force_sig(SIGSEGV, current);
--	return 0;
--}
+-#Summit subarch support
+-mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-x86/mach-summit
+-mcore-$(CONFIG_X86_SUMMIT)  := arch/x86/mach-default
 -
--/*
-- * Set up a signal frame.
-- */
+-# generic subarchitecture
+-mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-x86/mach-generic
+-mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default
+-core-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
 -
--static int
--setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
--		 unsigned long mask)
--{
--	int err = 0;
+-# ES7000 subarch support
+-mflags-$(CONFIG_X86_ES7000)	:= -Iinclude/asm-x86/mach-es7000
+-mcore-$(CONFIG_X86_ES7000)	:= arch/x86/mach-default
+-core-$(CONFIG_X86_ES7000)	:= arch/x86/mach-es7000/
 -
--	/* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
--	err |= setup_sigcontext_fpu(regs, sc);
+-# Xen paravirtualization support
+-core-$(CONFIG_XEN)		+= arch/x86/xen/
 -
--#define COPY(x)		err |= __put_user(regs->x, &sc->sc_##x)
+-# lguest paravirtualization support
+-core-$(CONFIG_LGUEST_GUEST)	+= arch/x86/lguest/
 -
--	COPY(regs[0]);	COPY(regs[1]);	COPY(regs[2]);	COPY(regs[3]);
--	COPY(regs[4]);	COPY(regs[5]);	COPY(regs[6]);	COPY(regs[7]);
--	COPY(regs[8]);	COPY(regs[9]);	COPY(regs[10]);	COPY(regs[11]);
--	COPY(regs[12]);	COPY(regs[13]);	COPY(regs[14]);	COPY(regs[15]);
--	COPY(regs[16]);	COPY(regs[17]);	COPY(regs[18]);	COPY(regs[19]);
--	COPY(regs[20]);	COPY(regs[21]);	COPY(regs[22]);	COPY(regs[23]);
--	COPY(regs[24]);	COPY(regs[25]);	COPY(regs[26]);	COPY(regs[27]);
--	COPY(regs[28]);	COPY(regs[29]);	COPY(regs[30]);	COPY(regs[31]);
--	COPY(regs[32]);	COPY(regs[33]);	COPY(regs[34]);	COPY(regs[35]);
--	COPY(regs[36]);	COPY(regs[37]);	COPY(regs[38]);	COPY(regs[39]);
--	COPY(regs[40]);	COPY(regs[41]);	COPY(regs[42]);	COPY(regs[43]);
--	COPY(regs[44]);	COPY(regs[45]);	COPY(regs[46]);	COPY(regs[47]);
--	COPY(regs[48]);	COPY(regs[49]);	COPY(regs[50]);	COPY(regs[51]);
--	COPY(regs[52]);	COPY(regs[53]);	COPY(regs[54]);	COPY(regs[55]);
--	COPY(regs[56]);	COPY(regs[57]);	COPY(regs[58]);	COPY(regs[59]);
--	COPY(regs[60]);	COPY(regs[61]);	COPY(regs[62]);
--	COPY(tregs[0]);	COPY(tregs[1]);	COPY(tregs[2]);	COPY(tregs[3]);
--	COPY(tregs[4]);	COPY(tregs[5]);	COPY(tregs[6]);	COPY(tregs[7]);
--	COPY(sr);	COPY(pc);
+-# default subarch .h files
+-mflags-y += -Iinclude/asm-x86/mach-default
 -
--#undef COPY
+-head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task.o
 -
--	err |= __put_user(mask, &sc->oldmask);
+-libs-y 					+= arch/x86/lib/
+-core-y					+= arch/x86/kernel/ \
+-					   arch/x86/mm/ \
+-					   $(mcore-y)/ \
+-					   arch/x86/crypto/
+-drivers-$(CONFIG_MATH_EMULATION)	+= arch/x86/math-emu/
+-drivers-$(CONFIG_PCI)			+= arch/x86/pci/
+-# must be linked after kernel/
+-drivers-$(CONFIG_OPROFILE)		+= arch/x86/oprofile/
+-drivers-$(CONFIG_PM)			+= arch/x86/power/
+-drivers-$(CONFIG_FB)                    += arch/x86/video/
 -
--	return err;
--}
+-KBUILD_CFLAGS += $(mflags-y)
+-KBUILD_AFLAGS += $(mflags-y)
 -
--/*
-- * Determine which stack to use..
-- */
--static inline void __user *
--get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
--{
--	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
--		sp = current->sas_ss_sp + current->sas_ss_size;
+-boot := arch/x86/boot
 -
--	return (void __user *)((sp - frame_size) & -8ul);
--}
+-PHONY += zImage bzImage compressed zlilo bzlilo \
+-         zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
 -
--void sa_default_restorer(void);		/* See comments below */
--void sa_default_rt_restorer(void);	/* See comments below */
+-all: bzImage
 -
--static void setup_frame(int sig, struct k_sigaction *ka,
--			sigset_t *set, struct pt_regs *regs)
--{
--	struct sigframe __user *frame;
--	int err = 0;
--	int signal;
+-# KBUILD_IMAGE specify target image being built
+-                    KBUILD_IMAGE := $(boot)/bzImage
+-zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage
 -
--	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
+-zImage bzImage: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
+-	$(Q)mkdir -p $(objtree)/arch/i386/boot
+-	$(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
 -
--	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
--		goto give_sigsegv;
+-compressed: zImage
 -
--	signal = current_thread_info()->exec_domain
--		&& current_thread_info()->exec_domain->signal_invmap
--		&& sig < 32
--		? current_thread_info()->exec_domain->signal_invmap[sig]
--		: sig;
+-zlilo bzlilo: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zlilo
 -
--	err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+-zdisk bzdisk: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk
 -
--	/* Give up earlier as i386, in case */
--	if (err)
--		goto give_sigsegv;
+-fdimage fdimage144 fdimage288 isoimage: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
 -
--	if (_NSIG_WORDS > 1) {
--		err |= __copy_to_user(frame->extramask, &set->sig[1],
--				      sizeof(frame->extramask)); }
+-install:
+-	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
 -
--	/* Give up earlier as i386, in case */
--	if (err)
--		goto give_sigsegv;
+-archclean:
+-	$(Q)rm -rf $(objtree)/arch/i386/boot
+-	$(Q)$(MAKE) $(clean)=arch/x86/boot
 -
--	/* Set up to return from userspace.  If provided, use a stub
--	   already in userspace.  */
--	if (ka->sa.sa_flags & SA_RESTORER) {
--		DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
+-define archhelp
+-  echo  '* bzImage	- Compressed kernel image (arch/x86/boot/bzImage)'
+-  echo  '  install	- Install kernel using'
+-  echo  '		   (your) ~/bin/installkernel or'
+-  echo  '		   (distribution) /sbin/installkernel or'
+-  echo  '		   install to $$(INSTALL_PATH) and run lilo'
+-  echo  '  bzdisk       - Create a boot floppy in /dev/fd0'
+-  echo  '  fdimage      - Create a boot floppy image'
+-  echo  '  isoimage     - Create a boot CD-ROM image'
+-endef
 -
--		/*
--		 * On SH5 all edited pointers are subject to NEFF
--		 */
--		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
--        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
--	} else {
--		/*
--		 * Different approach on SH5.
--	         * . Endianness independent asm code gets placed in entry.S .
--		 *   This is limited to four ASM instructions corresponding
--		 *   to two long longs in size.
--		 * . err checking is done on the else branch only
--		 * . flush_icache_range() is called upon __put_user() only
--		 * . all edited pointers are subject to NEFF
--		 * . being code, linker turns ShMedia bit on, always
--		 *   dereference index -1.
--		 */
--		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
--		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
--        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+-CLEAN_FILES += arch/x86/boot/fdimage \
+-	       arch/x86/boot/image.iso \
+-	       arch/x86/boot/mtools.conf
+diff --git a/arch/x86/Makefile_64 b/arch/x86/Makefile_64
+deleted file mode 100644
+index a804860..0000000
+--- a/arch/x86/Makefile_64
++++ /dev/null
+@@ -1,144 +0,0 @@
+-#
+-# x86_64 Makefile
+-#
+-# This file is included by the global makefile so that you can add your own
+-# architecture-specific flags and dependencies. Remember to do have actions
+-# for "archclean" and "archdep" for cleaning up and making dependencies for
+-# this architecture
+-#
+-# This file is subject to the terms and conditions of the GNU General Public
+-# License.  See the file "COPYING" in the main directory of this archive
+-# for more details.
+-#
+-# Copyright (C) 1994 by Linus Torvalds
+-#
+-# 19990713  Artur Skawina <skawina at geocities.com>
+-#           Added '-march' and '-mpreferred-stack-boundary' support
+-# 20000913  Pavel Machek <pavel at suse.cz>
+-#	    Converted for x86_64 architecture
+-# 20010105  Andi Kleen, add IA32 compiler.
+-#           ....and later removed it again....
+-#
+-# $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $
 -
--		if (__copy_to_user(frame->retcode,
--			(unsigned long long)sa_default_restorer & (~1), 16) != 0)
--			goto give_sigsegv;
+-# BITS is used as extension for files which are available in a 32 bit
+-# and a 64 bit version to simplify shared Makefiles.
+-# e.g.: obj-y += foo_$(BITS).o
+-BITS := 64
+-export BITS
 -
--		/* Cohere the trampoline with the I-cache. */
--		flush_cache_sigtramp(DEREF_REG_PR-1, DEREF_REG_PR-1+16);
--	}
+-LDFLAGS		:= -m elf_x86_64
+-OBJCOPYFLAGS	:= -O binary -R .note -R .comment -S
+-LDFLAGS_vmlinux :=
+-CHECKFLAGS      += -D__x86_64__ -m64
 -
--	/*
--	 * Set up registers for signal handler.
--	 * All edited pointers are subject to NEFF.
--	 */
--	regs->regs[REG_SP] = (unsigned long) frame;
--	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
--        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
--	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+-cflags-y	:=
+-cflags-kernel-y	:=
+-cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
+-cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+-# gcc doesn't support -march=core2 yet as of gcc 4.3, but I hope it
+-# will eventually. Use -mtune=generic as fallback
+-cflags-$(CONFIG_MCORE2) += \
+-	$(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
 -
--        /* FIXME:
--           The glibc profiling support for SH-5 needs to be passed a sigcontext
--           so it can retrieve the PC.  At some point during 2003 the glibc
--           support was changed to receive the sigcontext through the 2nd
--           argument, but there are still versions of libc.so in use that use
--           the 3rd argument.  Until libc.so is stabilised, pass the sigcontext
--           through both 2nd and 3rd arguments.
--        */
+-cflags-y += -m64
+-cflags-y += -mno-red-zone
+-cflags-y += -mcmodel=kernel
+-cflags-y += -pipe
+-cflags-y += -Wno-sign-compare
+-cflags-y += -fno-asynchronous-unwind-tables
+-ifneq ($(CONFIG_DEBUG_INFO),y)
+-# -fweb shrinks the kernel a bit, but the difference is very small
+-# it also messes up debugging, so don't use it for now.
+-#cflags-y += $(call cc-option,-fweb)
+-endif
+-# -funit-at-a-time shrinks the kernel .text considerably
+-# unfortunately it makes reading oopses harder.
+-cflags-y += $(call cc-option,-funit-at-a-time)
+-# prevent gcc from generating any FP code by mistake
+-cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+-# this works around some issues with generating unwind tables in older gccs
+-# newer gccs do it by default
+-cflags-y += -maccumulate-outgoing-args
 -
--	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
--	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
+-# do binutils support CFI?
+-cflags-y += $(call as-instr,.cfi_startproc\n.cfi_rel_offset rsp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
+-KBUILD_AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_rel_offset rsp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
 -
--	regs->pc = (unsigned long) ka->sa.sa_handler;
--	regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+-# is .cfi_signal_frame supported too?
+-cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
+-KBUILD_AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
 -
--	set_fs(USER_DS);
+-cflags-$(CONFIG_CC_STACKPROTECTOR) += $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh "$(CC)" -fstack-protector )
+-cflags-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh "$(CC)" -fstack-protector-all )
 -
--#if DEBUG_SIG
--	/* Broken %016Lx */
--	printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
--		signal,
--		current->comm, current->pid, frame,
--		regs->pc >> 32, regs->pc & 0xffffffff,
--		DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
--#endif
+-KBUILD_CFLAGS += $(cflags-y)
+-CFLAGS_KERNEL += $(cflags-kernel-y)
+-KBUILD_AFLAGS += -m64
 -
--	return;
+-head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task.o
 -
--give_sigsegv:
--	force_sigsegv(sig, current);
--}
+-libs-y 					+= arch/x86/lib/
+-core-y					+= arch/x86/kernel/ \
+-					   arch/x86/mm/ \
+-					   arch/x86/crypto/ \
+-					   arch/x86/vdso/
+-core-$(CONFIG_IA32_EMULATION)		+= arch/x86/ia32/
+-drivers-$(CONFIG_PCI)			+= arch/x86/pci/
+-drivers-$(CONFIG_OPROFILE)		+= arch/x86/oprofile/
 -
--static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
--			   sigset_t *set, struct pt_regs *regs)
--{
--	struct rt_sigframe __user *frame;
--	int err = 0;
--	int signal;
+-boot := arch/x86/boot
 -
--	frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
+-PHONY += bzImage bzlilo install archmrproper \
+-	 fdimage fdimage144 fdimage288 isoimage archclean
 -
--	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
--		goto give_sigsegv;
+-#Default target when executing "make"
+-all: bzImage
 -
--	signal = current_thread_info()->exec_domain
--		&& current_thread_info()->exec_domain->signal_invmap
--		&& sig < 32
--		? current_thread_info()->exec_domain->signal_invmap[sig]
--		: sig;
+-BOOTIMAGE                     := arch/x86/boot/bzImage
+-KBUILD_IMAGE                  := $(BOOTIMAGE)
 -
--	err |= __put_user(&frame->info, &frame->pinfo);
--	err |= __put_user(&frame->uc, &frame->puc);
--	err |= copy_siginfo_to_user(&frame->info, info);
+-bzImage: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
+-	$(Q)mkdir -p $(objtree)/arch/x86_64/boot
+-	$(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
 -
--	/* Give up earlier as i386, in case */
--	if (err)
--		goto give_sigsegv;
+-bzlilo: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo
 -
--	/* Create the ucontext.  */
--	err |= __put_user(0, &frame->uc.uc_flags);
--	err |= __put_user(0, &frame->uc.uc_link);
--	err |= __put_user((void *)current->sas_ss_sp,
--			  &frame->uc.uc_stack.ss_sp);
--	err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
--			  &frame->uc.uc_stack.ss_flags);
--	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
--	err |= setup_sigcontext(&frame->uc.uc_mcontext,
--			        regs, set->sig[0]);
--	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+-bzdisk: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk
 -
--	/* Give up earlier as i386, in case */
--	if (err)
--		goto give_sigsegv;
+-fdimage fdimage144 fdimage288 isoimage: vmlinux
+-	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
 -
--	/* Set up to return from userspace.  If provided, use a stub
--	   already in userspace.  */
--	if (ka->sa.sa_flags & SA_RESTORER) {
--		DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
+-install: vdso_install
+-	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ 
 -
--		/*
--		 * On SH5 all edited pointers are subject to NEFF
--		 */
--		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
--        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
--	} else {
--		/*
--		 * Different approach on SH5.
--	         * . Endianness independent asm code gets placed in entry.S .
--		 *   This is limited to four ASM instructions corresponding
--		 *   to two long longs in size.
--		 * . err checking is done on the else branch only
--		 * . flush_icache_range() is called upon __put_user() only
--		 * . all edited pointers are subject to NEFF
--		 * . being code, linker turns ShMedia bit on, always
--		 *   dereference index -1.
--		 */
+-vdso_install:
+-ifeq ($(CONFIG_IA32_EMULATION),y)
+-	$(Q)$(MAKE) $(build)=arch/x86/ia32 $@
+-endif
+-	$(Q)$(MAKE) $(build)=arch/x86/vdso $@
 -
--		DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
--		DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
--        		 	(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+-archclean:
+-	$(Q)rm -rf $(objtree)/arch/x86_64/boot
+-	$(Q)$(MAKE) $(clean)=$(boot)
 -
--		if (__copy_to_user(frame->retcode,
--			(unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
--			goto give_sigsegv;
+-define archhelp
+-  echo  '* bzImage	- Compressed kernel image (arch/x86/boot/bzImage)'
+-  echo  '  install	- Install kernel using'
+-  echo  '		   (your) ~/bin/installkernel or'
+-  echo  '		   (distribution) /sbin/installkernel or'
+-  echo  '		   install to $$(INSTALL_PATH) and run lilo'
+-  echo  '  bzdisk       - Create a boot floppy in /dev/fd0'
+-  echo  '  fdimage      - Create a boot floppy image'
+-  echo  '  isoimage     - Create a boot CD-ROM image'
+-endef
 -
--		flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
--	}
+-CLEAN_FILES += arch/x86/boot/fdimage \
+-	       arch/x86/boot/image.iso \
+-	       arch/x86/boot/mtools.conf
 -
--	/*
--	 * Set up registers for signal handler.
--	 * All edited pointers are subject to NEFF.
--	 */
--	regs->regs[REG_SP] = (unsigned long) frame;
--	regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
--        		 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
--	regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
--	regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
--	regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
--	regs->pc = (unsigned long) ka->sa.sa_handler;
--	regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
 -
--	set_fs(USER_DS);
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 7a3116c..349b81a 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -28,9 +28,11 @@ SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
+ targets		:= vmlinux.bin setup.bin setup.elf zImage bzImage
+ subdir- 	:= compressed
+ 
+-setup-y		+= a20.o apm.o cmdline.o copy.o cpu.o cpucheck.o edd.o
++setup-y		+= a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
+ setup-y		+= header.o main.o mca.o memory.o pm.o pmjump.o
+-setup-y		+= printf.o string.o tty.o video.o version.o voyager.o
++setup-y		+= printf.o string.o tty.o video.o version.o
++setup-$(CONFIG_X86_APM_BOOT) += apm.o
++setup-$(CONFIG_X86_VOYAGER) += voyager.o
+ 
+ # The link order of the video-*.o modules can matter.  In particular,
+ # video-vga.o *must* be listed first, followed by video-vesa.o.
+@@ -49,10 +51,7 @@ HOSTCFLAGS_build.o := $(LINUXINCLUDE)
+ 
+ # How to compile the 16-bit code.  Note we always compile for -march=i386,
+ # that way we can complain to the user if the CPU is insufficient.
+-cflags-$(CONFIG_X86_32) :=
+-cflags-$(CONFIG_X86_64) := -m32
+ KBUILD_CFLAGS	:= $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+-		   $(cflags-y) \
+ 		   -Wall -Wstrict-prototypes \
+ 		   -march=i386 -mregparm=3 \
+ 		   -include $(srctree)/$(src)/code16gcc.h \
+@@ -62,6 +61,7 @@ KBUILD_CFLAGS	:= $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+ 			$(call cc-option, -fno-unit-at-a-time)) \
+ 		   $(call cc-option, -fno-stack-protector) \
+ 		   $(call cc-option, -mpreferred-stack-boundary=2)
++KBUILD_CFLAGS +=   $(call cc-option,-m32)
+ KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ 
+ $(obj)/zImage:  IMAGE_OFFSET := 0x1000
+diff --git a/arch/x86/boot/apm.c b/arch/x86/boot/apm.c
+index eab50c5..c117c7f 100644
+--- a/arch/x86/boot/apm.c
++++ b/arch/x86/boot/apm.c
+@@ -19,8 +19,6 @@
+ 
+ #include "boot.h"
+ 
+-#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
 -
--#if DEBUG_SIG
--	/* Broken %016Lx */
--	printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
--		signal,
--		current->comm, current->pid, frame,
--		regs->pc >> 32, regs->pc & 0xffffffff,
--		DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
+ int query_apm_bios(void)
+ {
+ 	u16 ax, bx, cx, dx, di;
+@@ -95,4 +93,3 @@ int query_apm_bios(void)
+ 	return 0;
+ }
+ 
 -#endif
--
--	return;
--
--give_sigsegv:
--	force_sigsegv(sig, current);
--}
--
--/*
-- * OK, we're invoking a handler
-- */
--
--static void
--handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
--		sigset_t *oldset, struct pt_regs * regs)
--{
--	/* Are we from a system call? */
--	if (regs->syscall_nr >= 0) {
--		/* If so, check system call restarting.. */
--		switch (regs->regs[REG_RET]) {
--			case -ERESTART_RESTARTBLOCK:
--			case -ERESTARTNOHAND:
--				regs->regs[REG_RET] = -EINTR;
--				break;
--
--			case -ERESTARTSYS:
--				if (!(ka->sa.sa_flags & SA_RESTART)) {
--					regs->regs[REG_RET] = -EINTR;
--					break;
--				}
--			/* fallthrough */
--			case -ERESTARTNOINTR:
--				/* Decode syscall # */
--				regs->regs[REG_RET] = regs->syscall_nr;
--				regs->pc -= 4;
--		}
--	}
--
--	/* Set up the stack frame */
--	if (ka->sa.sa_flags & SA_SIGINFO)
--		setup_rt_frame(sig, ka, info, oldset, regs);
--	else
--		setup_frame(sig, ka, oldset, regs);
--
--	spin_lock_irq(&current->sighand->siglock);
--	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
--	if (!(ka->sa.sa_flags & SA_NODEFER))
--		sigaddset(&current->blocked,sig);
--	recalc_sigpending();
--	spin_unlock_irq(&current->sighand->siglock);
--}
--
--/*
-- * Note that 'init' is a special process: it doesn't get signals it doesn't
-- * want to handle. Thus you cannot kill init even with a SIGKILL even by
-- * mistake.
-- *
-- * Note that we go through the signals twice: once to check the signals that
-- * the kernel can handle, and then we build all the user-level signal handling
-- * stack-frames in one go after that.
-- */
--int do_signal(struct pt_regs *regs, sigset_t *oldset)
--{
--	siginfo_t info;
--	int signr;
--	struct k_sigaction ka;
--
--	/*
--	 * We want the common case to go fast, which
--	 * is why we may in certain cases get here from
--	 * kernel mode. Just return without doing anything
--	 * if so.
--	 */
--	if (!user_mode(regs))
--		return 1;
--
--	if (try_to_freeze())
--		goto no_signal;
--
--	if (test_thread_flag(TIF_RESTORE_SIGMASK))
--		oldset = &current->saved_sigmask;
--	else if (!oldset)
--		oldset = &current->blocked;
--
--	signr = get_signal_to_deliver(&info, &ka, regs, 0);
--
--	if (signr > 0) {
--		/* Whee!  Actually deliver the signal.  */
--		handle_signal(signr, &info, &ka, oldset, regs);
--
--		/*
--		 * If a signal was successfully delivered, the saved sigmask
--		 * is in its frame, and we can clear the TIF_RESTORE_SIGMASK
--		 * flag.
--		 */
--		if (test_thread_flag(TIF_RESTORE_SIGMASK))
--			clear_thread_flag(TIF_RESTORE_SIGMASK);
--
--		return 1;
--	}
--
--no_signal:
--	/* Did we come from a system call? */
--	if (regs->syscall_nr >= 0) {
--		/* Restart the system call - no handlers present */
--		switch (regs->regs[REG_RET]) {
--		case -ERESTARTNOHAND:
--		case -ERESTARTSYS:
--		case -ERESTARTNOINTR:
--			/* Decode Syscall # */
--			regs->regs[REG_RET] = regs->syscall_nr;
--			regs->pc -= 4;
--			break;
--
--		case -ERESTART_RESTARTBLOCK:
--			regs->regs[REG_RET] = __NR_restart_syscall;
--			regs->pc -= 4;
--			break;
--		}
--	}
--
--	/* No signal to deliver -- put the saved sigmask back */
--	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
--		clear_thread_flag(TIF_RESTORE_SIGMASK);
--		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
--	}
--
--	return 0;
--}
-diff --git a/arch/sh64/kernel/switchto.S b/arch/sh64/kernel/switchto.S
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index d2b5adf..7822a49 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -109,7 +109,7 @@ typedef unsigned int addr_t;
+ static inline u8 rdfs8(addr_t addr)
+ {
+ 	u8 v;
+-	asm volatile("movb %%fs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr));
++	asm volatile("movb %%fs:%1,%0" : "=q" (v) : "m" (*(u8 *)addr));
+ 	return v;
+ }
+ static inline u16 rdfs16(addr_t addr)
+@@ -127,21 +127,21 @@ static inline u32 rdfs32(addr_t addr)
+ 
+ static inline void wrfs8(u8 v, addr_t addr)
+ {
+-	asm volatile("movb %1,%%fs:%0" : "+m" (*(u8 *)addr) : "r" (v));
++	asm volatile("movb %1,%%fs:%0" : "+m" (*(u8 *)addr) : "qi" (v));
+ }
+ static inline void wrfs16(u16 v, addr_t addr)
+ {
+-	asm volatile("movw %1,%%fs:%0" : "+m" (*(u16 *)addr) : "r" (v));
++	asm volatile("movw %1,%%fs:%0" : "+m" (*(u16 *)addr) : "ri" (v));
+ }
+ static inline void wrfs32(u32 v, addr_t addr)
+ {
+-	asm volatile("movl %1,%%fs:%0" : "+m" (*(u32 *)addr) : "r" (v));
++	asm volatile("movl %1,%%fs:%0" : "+m" (*(u32 *)addr) : "ri" (v));
+ }
+ 
+ static inline u8 rdgs8(addr_t addr)
+ {
+ 	u8 v;
+-	asm volatile("movb %%gs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr));
++	asm volatile("movb %%gs:%1,%0" : "=q" (v) : "m" (*(u8 *)addr));
+ 	return v;
+ }
+ static inline u16 rdgs16(addr_t addr)
+@@ -159,15 +159,15 @@ static inline u32 rdgs32(addr_t addr)
+ 
+ static inline void wrgs8(u8 v, addr_t addr)
+ {
+-	asm volatile("movb %1,%%gs:%0" : "+m" (*(u8 *)addr) : "r" (v));
++	asm volatile("movb %1,%%gs:%0" : "+m" (*(u8 *)addr) : "qi" (v));
+ }
+ static inline void wrgs16(u16 v, addr_t addr)
+ {
+-	asm volatile("movw %1,%%gs:%0" : "+m" (*(u16 *)addr) : "r" (v));
++	asm volatile("movw %1,%%gs:%0" : "+m" (*(u16 *)addr) : "ri" (v));
+ }
+ static inline void wrgs32(u32 v, addr_t addr)
+ {
+-	asm volatile("movl %1,%%gs:%0" : "+m" (*(u32 *)addr) : "r" (v));
++	asm volatile("movl %1,%%gs:%0" : "+m" (*(u32 *)addr) : "ri" (v));
+ }
+ 
+ /* Note: these only return true/false, not a signed return value! */
+@@ -241,6 +241,7 @@ int query_apm_bios(void);
+ 
+ /* cmdline.c */
+ int cmdline_find_option(const char *option, char *buffer, int bufsize);
++int cmdline_find_option_bool(const char *option);
+ 
+ /* cpu.c, cpucheck.c */
+ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
+diff --git a/arch/x86/boot/cmdline.c b/arch/x86/boot/cmdline.c
+index 34bb778..680408a 100644
+--- a/arch/x86/boot/cmdline.c
++++ b/arch/x86/boot/cmdline.c
+@@ -95,3 +95,68 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize)
+ 
+ 	return len;
+ }
++
++/*
++ * Find a boolean option (like quiet,noapic,nosmp....)
++ *
++ * Returns the position of that option (starts counting with 1)
++ * or 0 on not found
++ */
++int cmdline_find_option_bool(const char *option)
++{
++	u32 cmdline_ptr = boot_params.hdr.cmd_line_ptr;
++	addr_t cptr;
++	char c;
++	int pos = 0, wstart = 0;
++	const char *opptr = NULL;
++	enum {
++		st_wordstart,	/* Start of word/after whitespace */
++		st_wordcmp,	/* Comparing this word */
++		st_wordskip,	/* Miscompare, skip */
++	} state = st_wordstart;
++
++	if (!cmdline_ptr || cmdline_ptr >= 0x100000)
++		return -1;	/* No command line, or inaccessible */
++
++	cptr = cmdline_ptr & 0xf;
++	set_fs(cmdline_ptr >> 4);
++
++	while (cptr < 0x10000) {
++		c = rdfs8(cptr++);
++		pos++;
++
++		switch (state) {
++		case st_wordstart:
++			if (!c)
++				return 0;
++			else if (myisspace(c))
++				break;
++
++			state = st_wordcmp;
++			opptr = option;
++			wstart = pos;
++			/* fall through */
++
++		case st_wordcmp:
++			if (!*opptr)
++				if (!c || myisspace(c))
++					return wstart;
++				else
++					state = st_wordskip;
++			else if (!c)
++				return 0;
++			else if (c != *opptr++)
++				state = st_wordskip;
++			break;
++
++		case st_wordskip:
++			if (!c)
++				return 0;
++			else if (myisspace(c))
++				state = st_wordstart;
++			break;
++		}
++	}
++
++	return 0;	/* Buffer overrun */
++}
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 52c1db8..fe24cea 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -1,5 +1,63 @@
++#
++# linux/arch/x86/boot/compressed/Makefile
++#
++# create a compressed vmlinux image from the original vmlinux
++#
++
++targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o
++
++KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
++KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
++cflags-$(CONFIG_X86_64) := -mcmodel=small
++KBUILD_CFLAGS += $(cflags-y)
++KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
++KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
++
++KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
++
++LDFLAGS := -m elf_$(UTS_MACHINE)
++LDFLAGS_vmlinux := -T
++
++$(obj)/vmlinux: $(src)/vmlinux_$(BITS).lds $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/piggy.o FORCE
++	$(call if_changed,ld)
++	@:
++
++$(obj)/vmlinux.bin: vmlinux FORCE
++	$(call if_changed,objcopy)
++
++
+ ifeq ($(CONFIG_X86_32),y)
+-include ${srctree}/arch/x86/boot/compressed/Makefile_32
++targets += vmlinux.bin.all vmlinux.relocs
++hostprogs-y := relocs
++
++quiet_cmd_relocs = RELOCS  $@
++      cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
++$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
++	$(call if_changed,relocs)
++
++vmlinux.bin.all-y := $(obj)/vmlinux.bin
++vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs
++quiet_cmd_relocbin = BUILD   $@
++      cmd_relocbin = cat $(filter-out FORCE,$^) > $@
++$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE
++	$(call if_changed,relocbin)
++
++ifdef CONFIG_RELOCATABLE
++$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
++	$(call if_changed,gzip)
+ else
+-include ${srctree}/arch/x86/boot/compressed/Makefile_64
++$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
++	$(call if_changed,gzip)
+ endif
++LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
++
++else
++$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
++	$(call if_changed,gzip)
++
++LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
++endif
++
++
++$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
++	$(call if_changed,ld)
+diff --git a/arch/x86/boot/compressed/Makefile_32 b/arch/x86/boot/compressed/Makefile_32
 deleted file mode 100644
-index 45b2d90..0000000
---- a/arch/sh64/kernel/switchto.S
+index e43ff7c..0000000
+--- a/arch/x86/boot/compressed/Makefile_32
 +++ /dev/null
-@@ -1,198 +0,0 @@
--/*
-- * arch/sh64/kernel/switchto.S
-- *
-- * sh64 context switch
-- *
-- * Copyright (C) 2004  Richard Curnow
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
--*/
--
--	.section .text..SHmedia32,"ax"
--	.little
--
--	.balign 32
--
--	.type sh64_switch_to, at function
--	.global sh64_switch_to
--	.global __sh64_switch_to_end
--sh64_switch_to:
--
--/* Incoming args
--   r2 - prev
--   r3 - &prev->thread
--   r4 - next
--   r5 - &next->thread
--
--   Outgoing results
--   r2 - last (=prev) : this just stays in r2 throughout
--
--   Want to create a full (struct pt_regs) on the stack to allow backtracing
--   functions to work.  However, we only need to populate the callee-save
--   register slots in this structure; since we're a function our ancestors must
--   have themselves preserved all caller saved state in the stack.  This saves
--   some wasted effort since we won't need to look at the values.
--
--   In particular, all caller-save registers are immediately available for
--   scratch use.
--
--*/
--
--#define FRAME_SIZE (76*8 + 8)
--
--	movi	FRAME_SIZE, r0
--	sub.l	r15, r0, r15
--	! Do normal-style register save to support backtrace
--
--	st.l	r15,   0, r18	! save link reg
--	st.l	r15,   4, r14	! save fp
--	add.l	r15, r63, r14	! setup frame pointer
--
--	! hopefully this looks normal to the backtrace now.
--
--	addi.l	r15,   8, r1    ! base of pt_regs
--	addi.l	r1,   24, r0    ! base of pt_regs.regs
--	addi.l	r0, (63*8), r8	! base of pt_regs.trregs
--
--	/* Note : to be fixed?
--	   struct pt_regs is really designed for holding the state on entry
--	   to an exception, i.e. pc,sr,regs etc.  However, for the context
--	   switch state, some of this is not required.  But the unwinder takes
--	   struct pt_regs * as an arg so we have to build this structure
--	   to allow unwinding switched tasks in show_state() */
--
--	st.q	r0, ( 9*8), r9
--	st.q	r0, (10*8), r10
--	st.q	r0, (11*8), r11
--	st.q	r0, (12*8), r12
--	st.q	r0, (13*8), r13
--	st.q	r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
--	! the point where the process is left in suspended animation, i.e. current
--	! fp here, not the saved one.
--	st.q	r0, (16*8), r16
--
--	st.q	r0, (24*8), r24
--	st.q	r0, (25*8), r25
--	st.q	r0, (26*8), r26
--	st.q	r0, (27*8), r27
--	st.q	r0, (28*8), r28
--	st.q	r0, (29*8), r29
--	st.q	r0, (30*8), r30
--	st.q	r0, (31*8), r31
--	st.q	r0, (32*8), r32
--	st.q	r0, (33*8), r33
--	st.q	r0, (34*8), r34
--	st.q	r0, (35*8), r35
--
--	st.q	r0, (44*8), r44
--	st.q	r0, (45*8), r45
--	st.q	r0, (46*8), r46
--	st.q	r0, (47*8), r47
--	st.q	r0, (48*8), r48
--	st.q	r0, (49*8), r49
--	st.q	r0, (50*8), r50
--	st.q	r0, (51*8), r51
--	st.q	r0, (52*8), r52
--	st.q	r0, (53*8), r53
--	st.q	r0, (54*8), r54
--	st.q	r0, (55*8), r55
--	st.q	r0, (56*8), r56
--	st.q	r0, (57*8), r57
--	st.q	r0, (58*8), r58
--	st.q	r0, (59*8), r59
--
--	! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
--	! Use a local label to avoid creating a symbol that will confuse the !
--	! backtrace
--	pta	.Lsave_pc, tr0
--
--	gettr	tr5, r45
--	gettr	tr6, r46
--	gettr	tr7, r47
--	st.q	r8, (5*8), r45
--	st.q	r8, (6*8), r46
--	st.q	r8, (7*8), r47
--
--	! Now switch context
--	gettr	tr0, r9
--	st.l	r3, 0, r15	! prev->thread.sp
--	st.l	r3, 8, r1	! prev->thread.kregs
--	st.l	r3, 4, r9	! prev->thread.pc
--	st.q	r1, 0, r9	! save prev->thread.pc into pt_regs->pc
+@@ -1,50 +0,0 @@
+-#
+-# linux/arch/x86/boot/compressed/Makefile
+-#
+-# create a compressed vmlinux image from the original vmlinux
+-#
 -
--	! Load PC for next task (init value or save_pc later)
--	ld.l	r5, 4, r18	! next->thread.pc
--	! Switch stacks
--	ld.l	r5, 0, r15	! next->thread.sp
--	ptabs	r18, tr0
+-targets		:= vmlinux vmlinux.bin vmlinux.bin.gz head_32.o misc_32.o piggy.o \
+-			vmlinux.bin.all vmlinux.relocs
+-EXTRA_AFLAGS	:= -traditional
 -
--	! Update current
--	ld.l	r4, 4, r9	! next->thread_info (2nd element of next task_struct)
--	putcon	r9, kcr0	! current = next->thread_info
+-LDFLAGS_vmlinux := -T
+-hostprogs-y	:= relocs
 -
--	! go to save_pc for a reschedule, or the initial thread.pc for a new process
--	blink	tr0, r63
+-KBUILD_CFLAGS  := -m32 -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
+-	   -fno-strict-aliasing -fPIC \
+-	   $(call cc-option,-ffreestanding) \
+-	   $(call cc-option,-fno-stack-protector)
+-LDFLAGS := -m elf_i386
 -
--	! Restore (when we come back to a previously saved task)
--.Lsave_pc:
--	addi.l	r15, 32, r0	! r0 = next's regs
--	addi.l	r0, (63*8), r8	! r8 = next's tr_regs
+-$(obj)/vmlinux: $(src)/vmlinux_32.lds $(obj)/head_32.o $(obj)/misc_32.o $(obj)/piggy.o FORCE
+-	$(call if_changed,ld)
+-	@:
 -
--	ld.q	r8, (5*8), r45
--	ld.q	r8, (6*8), r46
--	ld.q	r8, (7*8), r47
--	ptabs	r45, tr5
--	ptabs	r46, tr6
--	ptabs	r47, tr7
+-$(obj)/vmlinux.bin: vmlinux FORCE
+-	$(call if_changed,objcopy)
 -
--	ld.q	r0, ( 9*8), r9
--	ld.q	r0, (10*8), r10
--	ld.q	r0, (11*8), r11
--	ld.q	r0, (12*8), r12
--	ld.q	r0, (13*8), r13
--	ld.q	r0, (14*8), r14
--	ld.q	r0, (16*8), r16
+-quiet_cmd_relocs = RELOCS  $@
+-      cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
+-$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
+-	$(call if_changed,relocs)
 -
--	ld.q	r0, (24*8), r24
--	ld.q	r0, (25*8), r25
--	ld.q	r0, (26*8), r26
--	ld.q	r0, (27*8), r27
--	ld.q	r0, (28*8), r28
--	ld.q	r0, (29*8), r29
--	ld.q	r0, (30*8), r30
--	ld.q	r0, (31*8), r31
--	ld.q	r0, (32*8), r32
--	ld.q	r0, (33*8), r33
--	ld.q	r0, (34*8), r34
--	ld.q	r0, (35*8), r35
+-vmlinux.bin.all-y := $(obj)/vmlinux.bin
+-vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs
+-quiet_cmd_relocbin = BUILD   $@
+-      cmd_relocbin = cat $(filter-out FORCE,$^) > $@
+-$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE
+-	$(call if_changed,relocbin)
 -
--	ld.q	r0, (44*8), r44
--	ld.q	r0, (45*8), r45
--	ld.q	r0, (46*8), r46
--	ld.q	r0, (47*8), r47
--	ld.q	r0, (48*8), r48
--	ld.q	r0, (49*8), r49
--	ld.q	r0, (50*8), r50
--	ld.q	r0, (51*8), r51
--	ld.q	r0, (52*8), r52
--	ld.q	r0, (53*8), r53
--	ld.q	r0, (54*8), r54
--	ld.q	r0, (55*8), r55
--	ld.q	r0, (56*8), r56
--	ld.q	r0, (57*8), r57
--	ld.q	r0, (58*8), r58
--	ld.q	r0, (59*8), r59
+-ifdef CONFIG_RELOCATABLE
+-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
+-	$(call if_changed,gzip)
+-else
+-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+-	$(call if_changed,gzip)
+-endif
 -
--	! epilogue
--	ld.l	r15, 0, r18
--	ld.l	r15, 4, r14
--	ptabs	r18, tr0
--	movi	FRAME_SIZE, r0
--	add	r15, r0, r15
--	blink	tr0, r63
--__sh64_switch_to_end:
--.LFE1:
--	.size	sh64_switch_to,.LFE1-sh64_switch_to
+-LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
 -
-diff --git a/arch/sh64/kernel/sys_sh64.c b/arch/sh64/kernel/sys_sh64.c
+-$(obj)/piggy.o: $(src)/vmlinux_32.scr $(obj)/vmlinux.bin.gz FORCE
+-	$(call if_changed,ld)
+diff --git a/arch/x86/boot/compressed/Makefile_64 b/arch/x86/boot/compressed/Makefile_64
 deleted file mode 100644
-index de0a303..0000000
---- a/arch/sh64/kernel/sys_sh64.c
+index 7801e8d..0000000
+--- a/arch/x86/boot/compressed/Makefile_64
 +++ /dev/null
-@@ -1,304 +0,0 @@
--/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/kernel/sys_sh64.c
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- *
-- * This file contains various random system calls that
-- * have a non-standard calling sequence on the Linux/SH5
-- * platform.
-- *
-- * Mostly taken from i386 version.
-- *
-- */
--
--#include <linux/errno.h>
--#include <linux/rwsem.h>
--#include <linux/sched.h>
--#include <linux/mm.h>
--#include <linux/fs.h>
--#include <linux/smp.h>
--#include <linux/sem.h>
--#include <linux/msg.h>
--#include <linux/shm.h>
--#include <linux/stat.h>
--#include <linux/mman.h>
--#include <linux/file.h>
--#include <linux/utsname.h>
--#include <linux/syscalls.h>
--#include <linux/ipc.h>
--#include <asm/uaccess.h>
--#include <asm/ptrace.h>
--#include <asm/unistd.h>
--
--#define REG_3	3
--
--/*
-- * sys_pipe() is the normal C calling standard for creating
-- * a pipe. It's not the way Unix traditionally does this, though.
-- */
--#ifdef NEW_PIPE_IMPLEMENTATION
--asmlinkage int sys_pipe(unsigned long * fildes,
--			unsigned long   dummy_r3,
--			unsigned long   dummy_r4,
--			unsigned long   dummy_r5,
--			unsigned long   dummy_r6,
--			unsigned long   dummy_r7,
--			struct pt_regs * regs)	   /* r8 = pt_regs  forced by entry.S */
--{
--	int fd[2];
--	int ret;
--
--	ret = do_pipe(fd);
--	if (ret == 0)
--		/*
--		 ***********************************************************************
--		 *   To avoid the copy_to_user we prefer to break the ABIs convention, *
--		 *   packing the valid pair of file IDs into a single register (r3);   *
--		 *   while r2 is the return code as defined by the sh5-ABIs.	       *
--		 *   BE CAREFUL: pipe stub, into glibc, must be aware of this solution *
--		 ***********************************************************************
--
--#ifdef __LITTLE_ENDIAN__
--		regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]);
--#else
--		regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]);
--#endif
--
--		*/
--	       /* although not very clever this is endianess independent */
--		regs->regs[REG_3] = (unsigned long long) *((unsigned long long *) fd);
--
--	return ret;
--}
--
--#else
--asmlinkage int sys_pipe(unsigned long * fildes)
--{
--        int fd[2];
--        int error;
--
--        error = do_pipe(fd);
--        if (!error) {
--                if (copy_to_user(fildes, fd, 2*sizeof(int)))
--                        error = -EFAULT;
--        }
--        return error;
--}
--
--#endif
--
--/*
-- * To avoid cache alias, we map the shard page with same color.
-- */
--#define COLOUR_ALIGN(addr)	(((addr)+SHMLBA-1)&~(SHMLBA-1))
--
--unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
--	unsigned long len, unsigned long pgoff, unsigned long flags)
--{
--	struct vm_area_struct *vma;
--
--	if (flags & MAP_FIXED) {
--		/* We do not accept a shared mapping if it would violate
--		 * cache aliasing constraints.
--		 */
--		if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
--			return -EINVAL;
--		return addr;
--	}
--
--	if (len > TASK_SIZE)
--		return -ENOMEM;
--	if (!addr)
--		addr = TASK_UNMAPPED_BASE;
--
--	if (flags & MAP_PRIVATE)
--		addr = PAGE_ALIGN(addr);
--	else
--		addr = COLOUR_ALIGN(addr);
--
--	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
--		/* At this point:  (!vma || addr < vma->vm_end). */
--		if (TASK_SIZE - len < addr)
--			return -ENOMEM;
--		if (!vma || addr + len <= vma->vm_start)
--			return addr;
--		addr = vma->vm_end;
--		if (!(flags & MAP_PRIVATE))
--			addr = COLOUR_ALIGN(addr);
--	}
--}
--
--/* common code for old and new mmaps */
--static inline long do_mmap2(
--	unsigned long addr, unsigned long len,
--	unsigned long prot, unsigned long flags,
--	unsigned long fd, unsigned long pgoff)
--{
--	int error = -EBADF;
--	struct file * file = NULL;
--
--	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
--	if (!(flags & MAP_ANONYMOUS)) {
--		file = fget(fd);
--		if (!file)
--			goto out;
--	}
--
--	down_write(&current->mm->mmap_sem);
--	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
--	up_write(&current->mm->mmap_sem);
--
--	if (file)
--		fput(file);
--out:
--	return error;
--}
--
--asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
--	unsigned long prot, unsigned long flags,
--	unsigned long fd, unsigned long pgoff)
--{
--	return do_mmap2(addr, len, prot, flags, fd, pgoff);
--}
--
--asmlinkage int old_mmap(unsigned long addr, unsigned long len,
--	unsigned long prot, unsigned long flags,
--	int fd, unsigned long off)
--{
--	if (off & ~PAGE_MASK)
--		return -EINVAL;
--	return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
--}
--
--/*
-- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
-- *
-- * This is really horribly ugly.
-- */
--asmlinkage int sys_ipc(uint call, int first, int second,
--		       int third, void __user *ptr, long fifth)
--{
--	int version, ret;
--
--	version = call >> 16; /* hack for backward compatibility */
--	call &= 0xffff;
--
--	if (call <= SEMCTL)
--		switch (call) {
--		case SEMOP:
--			return sys_semtimedop(first, (struct sembuf __user *)ptr,
--					      second, NULL);
--		case SEMTIMEDOP:
--			return sys_semtimedop(first, (struct sembuf __user *)ptr,
--					      second,
--					      (const struct timespec __user *)fifth);
--		case SEMGET:
--			return sys_semget (first, second, third);
--		case SEMCTL: {
--			union semun fourth;
--			if (!ptr)
--				return -EINVAL;
--			if (get_user(fourth.__pad, (void * __user *) ptr))
--				return -EFAULT;
--			return sys_semctl (first, second, third, fourth);
--			}
--		default:
--			return -EINVAL;
--		}
--
--	if (call <= MSGCTL)
--		switch (call) {
--		case MSGSND:
--			return sys_msgsnd (first, (struct msgbuf __user *) ptr,
--					  second, third);
--		case MSGRCV:
--			switch (version) {
--			case 0: {
--				struct ipc_kludge tmp;
--				if (!ptr)
--					return -EINVAL;
+@@ -1,30 +0,0 @@
+-#
+-# linux/arch/x86/boot/compressed/Makefile
+-#
+-# create a compressed vmlinux image from the original vmlinux
+-#
 -
--				if (copy_from_user(&tmp,
--						   (struct ipc_kludge __user *) ptr,
--						   sizeof (tmp)))
--					return -EFAULT;
--				return sys_msgrcv (first, tmp.msgp, second,
--						   tmp.msgtyp, third);
--				}
--			default:
--				return sys_msgrcv (first,
--						   (struct msgbuf __user *) ptr,
--						   second, fifth, third);
--			}
--		case MSGGET:
--			return sys_msgget ((key_t) first, second);
--		case MSGCTL:
--			return sys_msgctl (first, second,
--					   (struct msqid_ds __user *) ptr);
--		default:
--			return -EINVAL;
--		}
--	if (call <= SHMCTL)
--		switch (call) {
--		case SHMAT:
--			switch (version) {
--			default: {
--				ulong raddr;
--				ret = do_shmat (first, (char __user *) ptr,
--						 second, &raddr);
--				if (ret)
--					return ret;
--				return put_user (raddr, (ulong __user *) third);
--			}
--			case 1:	/* iBCS2 emulator entry point */
--				if (!segment_eq(get_fs(), get_ds()))
--					return -EINVAL;
--				return do_shmat (first, (char __user *) ptr,
--						  second, (ulong *) third);
--			}
--		case SHMDT:
--			return sys_shmdt ((char __user *)ptr);
--		case SHMGET:
--			return sys_shmget (first, second, third);
--		case SHMCTL:
--			return sys_shmctl (first, second,
--					   (struct shmid_ds __user *) ptr);
--		default:
--			return -EINVAL;
--		}
+-targets		:= vmlinux vmlinux.bin vmlinux.bin.gz head_64.o misc_64.o piggy.o
 -
--	return -EINVAL;
--}
+-KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUXINCLUDE) -O2  \
+-	  -fno-strict-aliasing -fPIC -mcmodel=small \
+-	   $(call cc-option, -ffreestanding) \
+-	   $(call cc-option, -fno-stack-protector)
+-KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+-LDFLAGS := -m elf_x86_64
 -
--asmlinkage int sys_uname(struct old_utsname * name)
--{
--	int err;
--	if (!name)
--		return -EFAULT;
--	down_read(&uts_sem);
--	err = copy_to_user(name, utsname(), sizeof (*name));
--	up_read(&uts_sem);
--	return err?-EFAULT:0;
--}
+-LDFLAGS_vmlinux := -T
+-$(obj)/vmlinux: $(src)/vmlinux_64.lds $(obj)/head_64.o $(obj)/misc_64.o $(obj)/piggy.o FORCE
+-	$(call if_changed,ld)
+-	@:
 -
--/*
-- * Do a system call from kernel instead of calling sys_execve so we
-- * end up with proper pt_regs.
-- */
--int kernel_execve(const char *filename, char *const argv[], char *const envp[])
--{
--	register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
--	register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
--	register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
--	register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
--	__asm__ __volatile__ ("trapa	%1 !\t\t\t execve(%2,%3,%4)"
--	: "=r" (__sc0)
--	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
--	__asm__ __volatile__ ("!dummy	%0 %1 %2 %3"
--	: : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
--	return __sc0;
--}
-diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S
-deleted file mode 100644
-index abb94c0..0000000
---- a/arch/sh64/kernel/syscalls.S
-+++ /dev/null
-@@ -1,381 +0,0 @@
--/*
-- * arch/sh64/kernel/syscalls.S
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2004 - 2007  Paul Mundt
-- * Copyright (C) 2003, 2004 Richard Curnow
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
+-$(obj)/vmlinux.bin: vmlinux FORCE
+-	$(call if_changed,objcopy)
 -
--#include <linux/sys.h>
+-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+-	$(call if_changed,gzip)
 -
--	.section .data, "aw"
--	.balign 32
+-LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
 -
--/*
-- * System calls jump table
-- */
--	.globl  sys_call_table
--sys_call_table:
--	.long sys_restart_syscall	/* 0  -  old "setup()" system call  */
--	.long sys_exit
--	.long sys_fork
--	.long sys_read
--	.long sys_write
--	.long sys_open			/* 5 */
--	.long sys_close
--	.long sys_waitpid
--	.long sys_creat
--	.long sys_link
--	.long sys_unlink		/* 10 */
--	.long sys_execve
--	.long sys_chdir
--	.long sys_time
--	.long sys_mknod
--	.long sys_chmod			/* 15 */
--	.long sys_lchown16
--	.long sys_ni_syscall	/* old break syscall holder */
--	.long sys_stat
--	.long sys_lseek
--	.long sys_getpid		/* 20 */
--	.long sys_mount
--	.long sys_oldumount
--	.long sys_setuid16
--	.long sys_getuid16
--	.long sys_stime			/* 25 */
--	.long sh64_ptrace
--	.long sys_alarm
--	.long sys_fstat
--	.long sys_pause
--	.long sys_utime			/* 30 */
--	.long sys_ni_syscall	/* old stty syscall holder */
--	.long sys_ni_syscall	/* old gtty syscall holder */
--	.long sys_access
--	.long sys_nice
--	.long sys_ni_syscall		/* 35 */ /* old ftime syscall holder */
--	.long sys_sync
--	.long sys_kill
--	.long sys_rename
--	.long sys_mkdir
--	.long sys_rmdir			/* 40 */
--	.long sys_dup
--	.long sys_pipe
--	.long sys_times
--	.long sys_ni_syscall	/* old prof syscall holder */
--	.long sys_brk			/* 45 */
--	.long sys_setgid16
--	.long sys_getgid16
--	.long sys_signal
--	.long sys_geteuid16
--	.long sys_getegid16		/* 50 */
--	.long sys_acct
--	.long sys_umount		/* recycled never used phys( */
--	.long sys_ni_syscall	/* old lock syscall holder */
--	.long sys_ioctl
--	.long sys_fcntl			/* 55 */
--	.long sys_ni_syscall	/* old mpx syscall holder */
--	.long sys_setpgid
--	.long sys_ni_syscall	/* old ulimit syscall holder */
--	.long sys_ni_syscall	/* sys_olduname */
--	.long sys_umask			/* 60 */
--	.long sys_chroot
--	.long sys_ustat
--	.long sys_dup2
--	.long sys_getppid
--	.long sys_getpgrp		/* 65 */
--	.long sys_setsid
--	.long sys_sigaction
--	.long sys_sgetmask
--	.long sys_ssetmask
--	.long sys_setreuid16		/* 70 */
--	.long sys_setregid16
--	.long sys_sigsuspend
--	.long sys_sigpending
--	.long sys_sethostname
--	.long sys_setrlimit		/* 75 */
--	.long sys_old_getrlimit
--	.long sys_getrusage
--	.long sys_gettimeofday
--	.long sys_settimeofday
--	.long sys_getgroups16		/* 80 */
--	.long sys_setgroups16
--	.long sys_ni_syscall	/* sys_oldselect */
--	.long sys_symlink
--	.long sys_lstat
--	.long sys_readlink		/* 85 */
--	.long sys_uselib
--	.long sys_swapon
--	.long sys_reboot
--	.long old_readdir
--	.long old_mmap			/* 90 */
--	.long sys_munmap
--	.long sys_truncate
--	.long sys_ftruncate
--	.long sys_fchmod
--	.long sys_fchown16		/* 95 */
--	.long sys_getpriority
--	.long sys_setpriority
--	.long sys_ni_syscall	/* old profil syscall holder */
--	.long sys_statfs
--	.long sys_fstatfs		/* 100 */
--	.long sys_ni_syscall	/* ioperm */
--	.long sys_socketcall	/* Obsolete implementation of socket syscall */
--	.long sys_syslog
--	.long sys_setitimer
--	.long sys_getitimer		/* 105 */
--	.long sys_newstat
--	.long sys_newlstat
--	.long sys_newfstat
--	.long sys_uname
--	.long sys_ni_syscall		/* 110 */ /* iopl */
--	.long sys_vhangup
--	.long sys_ni_syscall	/* idle */
--	.long sys_ni_syscall	/* vm86old */
--	.long sys_wait4
--	.long sys_swapoff		/* 115 */
--	.long sys_sysinfo
--	.long sys_ipc		/* Obsolete ipc syscall implementation */
--	.long sys_fsync
--	.long sys_sigreturn
--	.long sys_clone			/* 120 */
--	.long sys_setdomainname
--	.long sys_newuname
--	.long sys_ni_syscall	/* sys_modify_ldt */
--	.long sys_adjtimex
--	.long sys_mprotect		/* 125 */
--	.long sys_sigprocmask
--	.long sys_ni_syscall		/* old "create_module" */
--	.long sys_init_module
--	.long sys_delete_module
--	.long sys_ni_syscall		/* 130: old "get_kernel_syms" */
--	.long sys_quotactl
--	.long sys_getpgid
--	.long sys_fchdir
--	.long sys_bdflush
--	.long sys_sysfs			/* 135 */
--	.long sys_personality
--	.long sys_ni_syscall	/* for afs_syscall */
--	.long sys_setfsuid16
--	.long sys_setfsgid16
--	.long sys_llseek		/* 140 */
--	.long sys_getdents
--	.long sys_select
--	.long sys_flock
--	.long sys_msync
--	.long sys_readv			/* 145 */
--	.long sys_writev
--	.long sys_getsid
--	.long sys_fdatasync
--	.long sys_sysctl
--	.long sys_mlock			/* 150 */
--	.long sys_munlock
--	.long sys_mlockall
--	.long sys_munlockall
--	.long sys_sched_setparam
--	.long sys_sched_getparam	/* 155 */
--	.long sys_sched_setscheduler
--	.long sys_sched_getscheduler
--	.long sys_sched_yield
--	.long sys_sched_get_priority_max
--	.long sys_sched_get_priority_min  /* 160 */
--	.long sys_sched_rr_get_interval
--	.long sys_nanosleep
--	.long sys_mremap
--	.long sys_setresuid16
--	.long sys_getresuid16		/* 165 */
--	.long sys_ni_syscall	/* vm86 */
--	.long sys_ni_syscall	/* old "query_module" */
--	.long sys_poll
--	.long sys_nfsservctl
--	.long sys_setresgid16		/* 170 */
--	.long sys_getresgid16
--	.long sys_prctl
--	.long sys_rt_sigreturn
--	.long sys_rt_sigaction
--	.long sys_rt_sigprocmask	/* 175 */
--	.long sys_rt_sigpending
--	.long sys_rt_sigtimedwait
--	.long sys_rt_sigqueueinfo
--	.long sys_rt_sigsuspend
--	.long sys_pread64		/* 180 */
--	.long sys_pwrite64
--	.long sys_chown16
--	.long sys_getcwd
--	.long sys_capget
--	.long sys_capset		/* 185 */
--	.long sys_sigaltstack
--	.long sys_sendfile
--	.long sys_ni_syscall	/* streams1 */
--	.long sys_ni_syscall	/* streams2 */
--	.long sys_vfork			/* 190 */
--	.long sys_getrlimit
--	.long sys_mmap2
--	.long sys_truncate64
--	.long sys_ftruncate64
--	.long sys_stat64		/* 195 */
--	.long sys_lstat64
--	.long sys_fstat64
--	.long sys_lchown
--	.long sys_getuid
--	.long sys_getgid		/* 200 */
--	.long sys_geteuid
--	.long sys_getegid
--	.long sys_setreuid
--	.long sys_setregid
--	.long sys_getgroups		/* 205 */
--	.long sys_setgroups
--	.long sys_fchown
--	.long sys_setresuid
--	.long sys_getresuid
--	.long sys_setresgid		/* 210 */
--	.long sys_getresgid
--	.long sys_chown
--	.long sys_setuid
--	.long sys_setgid
--	.long sys_setfsuid		/* 215 */
--	.long sys_setfsgid
--	.long sys_pivot_root
--	.long sys_mincore
--	.long sys_madvise
--	/* Broken-out socket family (maintain backwards compatibility in syscall
--	   numbering with 2.4) */
--	.long sys_socket		/* 220 */
--	.long sys_bind
--	.long sys_connect
--	.long sys_listen
--	.long sys_accept
--	.long sys_getsockname		/* 225 */
--	.long sys_getpeername
--	.long sys_socketpair
--	.long sys_send
--	.long sys_sendto
--	.long sys_recv			/* 230*/
--	.long sys_recvfrom
--	.long sys_shutdown
--	.long sys_setsockopt
--	.long sys_getsockopt
--	.long sys_sendmsg		/* 235 */
--	.long sys_recvmsg
--	/* Broken-out IPC family (maintain backwards compatibility in syscall
--	   numbering with 2.4) */
--	.long sys_semop
--	.long sys_semget
--	.long sys_semctl
--	.long sys_msgsnd		/* 240 */
--	.long sys_msgrcv
--	.long sys_msgget
--	.long sys_msgctl
--	.long sys_shmat
--	.long sys_shmdt			/* 245 */
--	.long sys_shmget
--	.long sys_shmctl
--	/* Rest of syscalls listed in 2.4 i386 unistd.h */
--	.long sys_getdents64
--	.long sys_fcntl64
--	.long sys_ni_syscall		/* 250 reserved for TUX */
--	.long sys_ni_syscall		/* Reserved for Security */
--	.long sys_gettid
--	.long sys_readahead
--	.long sys_setxattr
--	.long sys_lsetxattr		/* 255 */
--	.long sys_fsetxattr
--	.long sys_getxattr
--	.long sys_lgetxattr
--	.long sys_fgetxattr
--	.long sys_listxattr		/* 260 */
--	.long sys_llistxattr
--	.long sys_flistxattr
--	.long sys_removexattr
--	.long sys_lremovexattr
--	.long sys_fremovexattr  	/* 265 */
--	.long sys_tkill
--	.long sys_sendfile64
--	.long sys_futex
--	.long sys_sched_setaffinity
--	.long sys_sched_getaffinity	/* 270 */
--	.long sys_ni_syscall
--	.long sys_ni_syscall
--	.long sys_io_setup
--	.long sys_io_destroy
--	.long sys_io_getevents		/* 275 */
--	.long sys_io_submit
--	.long sys_io_cancel
--	.long sys_fadvise64
--	.long sys_ni_syscall
--	.long sys_exit_group		/* 280 */
--	/* Rest of new 2.6 syscalls */
--	.long sys_lookup_dcookie
--	.long sys_epoll_create
--	.long sys_epoll_ctl
--	.long sys_epoll_wait
-- 	.long sys_remap_file_pages	/* 285 */
-- 	.long sys_set_tid_address
-- 	.long sys_timer_create
-- 	.long sys_timer_settime
-- 	.long sys_timer_gettime
-- 	.long sys_timer_getoverrun	/* 290 */
-- 	.long sys_timer_delete
-- 	.long sys_clock_settime
-- 	.long sys_clock_gettime
-- 	.long sys_clock_getres
-- 	.long sys_clock_nanosleep	/* 295 */
--	.long sys_statfs64
--	.long sys_fstatfs64
--	.long sys_tgkill
--	.long sys_utimes
-- 	.long sys_fadvise64_64		/* 300 */
--	.long sys_ni_syscall	/* Reserved for vserver */
--	.long sys_ni_syscall	/* Reserved for mbind */
--	.long sys_ni_syscall	/* get_mempolicy */
--	.long sys_ni_syscall	/* set_mempolicy */
--	.long sys_mq_open		/* 305 */
--	.long sys_mq_unlink
--	.long sys_mq_timedsend
--	.long sys_mq_timedreceive
--	.long sys_mq_notify
--	.long sys_mq_getsetattr		/* 310 */
--	.long sys_ni_syscall	/* Reserved for kexec */
--	.long sys_waitid
--	.long sys_add_key
--	.long sys_request_key
--	.long sys_keyctl		/* 315 */
--	.long sys_ioprio_set
--	.long sys_ioprio_get
--	.long sys_inotify_init
--	.long sys_inotify_add_watch
--	.long sys_inotify_rm_watch	/* 320 */
--	.long sys_ni_syscall
--	.long sys_migrate_pages
--	.long sys_openat
--	.long sys_mkdirat
--	.long sys_mknodat		/* 325 */
--	.long sys_fchownat
--	.long sys_futimesat
--	.long sys_fstatat64
--	.long sys_unlinkat
--	.long sys_renameat		/* 330 */
--	.long sys_linkat
--	.long sys_symlinkat
--	.long sys_readlinkat
--	.long sys_fchmodat
--	.long sys_faccessat		/* 335 */
--	.long sys_pselect6
--	.long sys_ppoll
--	.long sys_unshare
--	.long sys_set_robust_list
--	.long sys_get_robust_list	/* 340 */
--	.long sys_splice
--	.long sys_sync_file_range
--	.long sys_tee
--	.long sys_vmsplice
--	.long sys_move_pages		/* 345 */
--	.long sys_getcpu
--	.long sys_epoll_pwait
--	.long sys_utimensat
--	.long sys_signalfd
--	.long sys_timerfd		/* 350 */
--	.long sys_eventfd
--	.long sys_fallocate
-diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
+-$(obj)/piggy.o: $(obj)/vmlinux_64.scr $(obj)/vmlinux.bin.gz FORCE
+-	$(call if_changed,ld)
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+new file mode 100644
+index 0000000..8182e32
+--- /dev/null
++++ b/arch/x86/boot/compressed/misc.c
+@@ -0,0 +1,413 @@
++/*
++ * misc.c
++ *
++ * This is a collection of several routines from gzip-1.0.3
++ * adapted for Linux.
++ *
++ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
++ * puts by Nick Holloway 1993, better puts by Martin Mares 1995
++ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
++ */
++
++/*
++ * we have to be careful, because no indirections are allowed here, and
++ * paravirt_ops is a kind of one. As it will only run in baremetal anyway,
++ * we just keep it from happening
++ */
++#undef CONFIG_PARAVIRT
++#ifdef CONFIG_X86_64
++#define _LINUX_STRING_H_ 1
++#define __LINUX_BITMAP_H 1
++#endif
++
++#include <linux/linkage.h>
++#include <linux/screen_info.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/boot.h>
++
++/* WARNING!!
++ * This code is compiled with -fPIC and it is relocated dynamically
++ * at run time, but no relocation processing is performed.
++ * This means that it is not safe to place pointers in static structures.
++ */
++
++/*
++ * Getting to provable safe in place decompression is hard.
++ * Worst case behaviours need to be analyzed.
++ * Background information:
++ *
++ * The file layout is:
++ *    magic[2]
++ *    method[1]
++ *    flags[1]
++ *    timestamp[4]
++ *    extraflags[1]
++ *    os[1]
++ *    compressed data blocks[N]
++ *    crc[4] orig_len[4]
++ *
++ * resulting in 18 bytes of non compressed data overhead.
++ *
++ * Files divided into blocks
++ * 1 bit (last block flag)
++ * 2 bits (block type)
++ *
++ * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
++ * The smallest block type encoding is always used.
++ *
++ * stored:
++ *    32 bits length in bytes.
++ *
++ * fixed:
++ *    magic fixed tree.
++ *    symbols.
++ *
++ * dynamic:
++ *    dynamic tree encoding.
++ *    symbols.
++ *
++ *
++ * The buffer for decompression in place is the length of the
++ * uncompressed data, plus a small amount extra to keep the algorithm safe.
++ * The compressed data is placed at the end of the buffer.  The output
++ * pointer is placed at the start of the buffer and the input pointer
++ * is placed where the compressed data starts.  Problems will occur
++ * when the output pointer overruns the input pointer.
++ *
++ * The output pointer can only overrun the input pointer if the input
++ * pointer is moving faster than the output pointer.  A condition only
++ * triggered by data whose compressed form is larger than the uncompressed
++ * form.
++ *
++ * The worst case at the block level is a growth of the compressed data
++ * of 5 bytes per 32767 bytes.
++ *
++ * The worst case internal to a compressed block is very hard to figure.
++ * The worst case can at least be boundined by having one bit that represents
++ * 32764 bytes and then all of the rest of the bytes representing the very
++ * very last byte.
++ *
++ * All of which is enough to compute an amount of extra data that is required
++ * to be safe.  To avoid problems at the block level allocating 5 extra bytes
++ * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
++ * adding an extra 32767 bytes (the worst case uncompressed block size) is
++ * sufficient, to ensure that in the worst case the decompressed data for
++ * block will stop the byte before the compressed data for a block begins.
++ * To avoid problems with the compressed data's meta information an extra 18
++ * bytes are needed.  Leading to the formula:
++ *
++ * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
++ *
++ * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
++ * Adding 32768 instead of 32767 just makes for round numbers.
++ * Adding the decompressor_size is necessary as it musht live after all
++ * of the data as well.  Last I measured the decompressor is about 14K.
++ * 10K of actual data and 4K of bss.
++ *
++ */
++
++/*
++ * gzip declarations
++ */
++
++#define OF(args)  args
++#define STATIC static
++
++#undef memset
++#undef memcpy
++#define memzero(s, n)     memset ((s), 0, (n))
++
++typedef unsigned char  uch;
++typedef unsigned short ush;
++typedef unsigned long  ulg;
++
++#define WSIZE 0x80000000	/* Window size must be at least 32k,
++				 * and a power of two
++				 * We don't actually have a window just
++				 * a huge output buffer so I report
++				 * a 2G windows size, as that should
++				 * always be larger than our output buffer.
++				 */
++
++static uch *inbuf;	/* input buffer */
++static uch *window;	/* Sliding window buffer, (and final output buffer) */
++
++static unsigned insize;  /* valid bytes in inbuf */
++static unsigned inptr;   /* index of next byte to be processed in inbuf */
++static unsigned outcnt;  /* bytes in output buffer */
++
++/* gzip flag byte */
++#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
++#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
++#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
++#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
++#define COMMENT      0x10 /* bit 4 set: file comment present */
++#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
++#define RESERVED     0xC0 /* bit 6,7:   reserved */
++
++#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
++		
++/* Diagnostic functions */
++#ifdef DEBUG
++#  define Assert(cond,msg) {if(!(cond)) error(msg);}
++#  define Trace(x) fprintf x
++#  define Tracev(x) {if (verbose) fprintf x ;}
++#  define Tracevv(x) {if (verbose>1) fprintf x ;}
++#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
++#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
++#else
++#  define Assert(cond,msg)
++#  define Trace(x)
++#  define Tracev(x)
++#  define Tracevv(x)
++#  define Tracec(c,x)
++#  define Tracecv(c,x)
++#endif
++
++static int  fill_inbuf(void);
++static void flush_window(void);
++static void error(char *m);
++static void gzip_mark(void **);
++static void gzip_release(void **);
++  
++/*
++ * This is set up by the setup-routine at boot-time
++ */
++static unsigned char *real_mode; /* Pointer to real-mode data */
++
++#define RM_EXT_MEM_K   (*(unsigned short *)(real_mode + 0x2))
++#ifndef STANDARD_MEMORY_BIOS_CALL
++#define RM_ALT_MEM_K   (*(unsigned long *)(real_mode + 0x1e0))
++#endif
++#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
++
++extern unsigned char input_data[];
++extern int input_len;
++
++static long bytes_out = 0;
++
++static void *malloc(int size);
++static void free(void *where);
++
++static void *memset(void *s, int c, unsigned n);
++static void *memcpy(void *dest, const void *src, unsigned n);
++
++static void putstr(const char *);
++
++#ifdef CONFIG_X86_64
++#define memptr long
++#else
++#define memptr unsigned
++#endif
++
++static memptr free_mem_ptr;
++static memptr free_mem_end_ptr;
++
++#ifdef CONFIG_X86_64
++#define HEAP_SIZE             0x7000
++#else
++#define HEAP_SIZE             0x4000
++#endif
++
++static char *vidmem = (char *)0xb8000;
++static int vidport;
++static int lines, cols;
++
++#ifdef CONFIG_X86_NUMAQ
++void *xquad_portio;
++#endif
++
++#include "../../../../lib/inflate.c"
++
++static void *malloc(int size)
++{
++	void *p;
++
++	if (size <0) error("Malloc error");
++	if (free_mem_ptr <= 0) error("Memory error");
++
++	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
++
++	p = (void *)free_mem_ptr;
++	free_mem_ptr += size;
++
++	if (free_mem_ptr >= free_mem_end_ptr)
++		error("Out of memory");
++
++	return p;
++}
++
++static void free(void *where)
++{	/* Don't care */
++}
++
++static void gzip_mark(void **ptr)
++{
++	*ptr = (void *) free_mem_ptr;
++}
++
++static void gzip_release(void **ptr)
++{
++	free_mem_ptr = (memptr) *ptr;
++}
++ 
++static void scroll(void)
++{
++	int i;
++
++	memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
++	for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
++		vidmem[i] = ' ';
++}
++
++static void putstr(const char *s)
++{
++	int x,y,pos;
++	char c;
++
++#ifdef CONFIG_X86_32
++	if (RM_SCREEN_INFO.orig_video_mode == 0 && lines == 0 && cols == 0)
++		return;
++#endif
++
++	x = RM_SCREEN_INFO.orig_x;
++	y = RM_SCREEN_INFO.orig_y;
++
++	while ( ( c = *s++ ) != '\0' ) {
++		if ( c == '\n' ) {
++			x = 0;
++			if ( ++y >= lines ) {
++				scroll();
++				y--;
++			}
++		} else {
++			vidmem [(x + cols * y) * 2] = c;
++			if ( ++x >= cols ) {
++				x = 0;
++				if ( ++y >= lines ) {
++					scroll();
++					y--;
++				}
++			}
++		}
++	}
++
++	RM_SCREEN_INFO.orig_x = x;
++	RM_SCREEN_INFO.orig_y = y;
++
++	pos = (x + cols * y) * 2;	/* Update cursor position */
++	outb(14, vidport);
++	outb(0xff & (pos >> 9), vidport+1);
++	outb(15, vidport);
++	outb(0xff & (pos >> 1), vidport+1);
++}
++
++static void* memset(void* s, int c, unsigned n)
++{
++	int i;
++	char *ss = s;
++
++	for (i=0;i<n;i++) ss[i] = c;
++	return s;
++}
++
++static void* memcpy(void* dest, const void* src, unsigned n)
++{
++	int i;
++	const char *s = src;
++	char *d = dest;
++
++	for (i=0;i<n;i++) d[i] = s[i];
++	return dest;
++}
++
++/* ===========================================================================
++ * Fill the input buffer. This is called only when the buffer is empty
++ * and at least one byte is really needed.
++ */
++static int fill_inbuf(void)
++{
++	error("ran out of input data");
++	return 0;
++}
++
++/* ===========================================================================
++ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
++ * (Used for the decompressed data only.)
++ */
++static void flush_window(void)
++{
++	/* With my window equal to my output buffer
++	 * I only need to compute the crc here.
++	 */
++	ulg c = crc;         /* temporary variable */
++	unsigned n;
++	uch *in, ch;
++
++	in = window;
++	for (n = 0; n < outcnt; n++) {
++		ch = *in++;
++		c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
++	}
++	crc = c;
++	bytes_out += (ulg)outcnt;
++	outcnt = 0;
++}
++
++static void error(char *x)
++{
++	putstr("\n\n");
++	putstr(x);
++	putstr("\n\n -- System halted");
++
++	while (1)
++		asm("hlt");
++}
++
++asmlinkage void decompress_kernel(void *rmode, memptr heap,
++				  uch *input_data, unsigned long input_len,
++				  uch *output)
++{
++	real_mode = rmode;
++
++	if (RM_SCREEN_INFO.orig_video_mode == 7) {
++		vidmem = (char *) 0xb0000;
++		vidport = 0x3b4;
++	} else {
++		vidmem = (char *) 0xb8000;
++		vidport = 0x3d4;
++	}
++
++	lines = RM_SCREEN_INFO.orig_video_lines;
++	cols = RM_SCREEN_INFO.orig_video_cols;
++
++	window = output;		/* Output buffer (Normally at 1M) */
++	free_mem_ptr     = heap;	/* Heap */
++	free_mem_end_ptr = heap + HEAP_SIZE;
++	inbuf  = input_data;		/* Input buffer */
++	insize = input_len;
++	inptr  = 0;
++
++#ifdef CONFIG_X86_64
++	if ((ulg)output & (__KERNEL_ALIGN - 1))
++		error("Destination address not 2M aligned");
++	if ((ulg)output >= 0xffffffffffUL)
++		error("Destination address too large");
++#else
++	if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
++		error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
++	if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
++		error("Destination address too large");
++#ifndef CONFIG_RELOCATABLE
++	if ((u32)output != LOAD_PHYSICAL_ADDR)
++		error("Wrong destination address");
++#endif
++#endif
++
++	makecrc();
++	putstr("\nDecompressing Linux... ");
++	gunzip();
++	putstr("done.\nBooting the kernel.\n");
++	return;
++}
+diff --git a/arch/x86/boot/compressed/misc_32.c b/arch/x86/boot/compressed/misc_32.c
 deleted file mode 100644
-index 06f3c17..0000000
---- a/arch/sh64/kernel/time.c
+index b74d60d..0000000
+--- a/arch/x86/boot/compressed/misc_32.c
 +++ /dev/null
-@@ -1,593 +0,0 @@
+@@ -1,382 +0,0 @@
 -/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/kernel/time.c
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003, 2004  Paul Mundt
-- * Copyright (C) 2003  Richard Curnow
+- * misc.c
+- * 
+- * This is a collection of several routines from gzip-1.0.3 
+- * adapted for Linux.
 - *
-- *    Original TMU/RTC code taken from sh version.
-- *    Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
-- *      Some code taken from i386 version.
-- *      Copyright (C) 1991, 1992, 1995  Linus Torvalds
+- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+- * puts by Nick Holloway 1993, better puts by Martin Mares 1995
+- * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
 - */
 -
--#include <linux/errno.h>
--#include <linux/rwsem.h>
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/param.h>
--#include <linux/string.h>
--#include <linux/mm.h>
--#include <linux/interrupt.h>
--#include <linux/time.h>
--#include <linux/delay.h>
--#include <linux/init.h>
--#include <linux/profile.h>
--#include <linux/smp.h>
--#include <linux/module.h>
--#include <linux/bcd.h>
+-#undef CONFIG_PARAVIRT
+-#include <linux/linkage.h>
+-#include <linux/vmalloc.h>
+-#include <linux/screen_info.h>
+-#include <asm/io.h>
+-#include <asm/page.h>
+-#include <asm/boot.h>
 -
--#include <asm/registers.h>	 /* required by inline __asm__ stmt. */
+-/* WARNING!!
+- * This code is compiled with -fPIC and it is relocated dynamically
+- * at run time, but no relocation processing is performed.
+- * This means that it is not safe to place pointers in static structures.
+- */
 -
--#include <asm/processor.h>
--#include <asm/uaccess.h>
--#include <asm/io.h>
--#include <asm/irq.h>
--#include <asm/delay.h>
+-/*
+- * Getting to provable safe in place decompression is hard.
+- * Worst case behaviours need to be analyzed.
+- * Background information:
+- *
+- * The file layout is:
+- *    magic[2]
+- *    method[1]
+- *    flags[1]
+- *    timestamp[4]
+- *    extraflags[1]
+- *    os[1]
+- *    compressed data blocks[N]
+- *    crc[4] orig_len[4]
+- *
+- * resulting in 18 bytes of non compressed data overhead.
+- *
+- * Files divided into blocks
+- * 1 bit (last block flag)
+- * 2 bits (block type)
+- *
+- * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
+- * The smallest block type encoding is always used.
+- *
+- * stored:
+- *    32 bits length in bytes.
+- *
+- * fixed:
+- *    magic fixed tree.
+- *    symbols.
+- *
+- * dynamic:
+- *    dynamic tree encoding.
+- *    symbols.
+- *
+- *
+- * The buffer for decompression in place is the length of the
+- * uncompressed data, plus a small amount extra to keep the algorithm safe.
+- * The compressed data is placed at the end of the buffer.  The output
+- * pointer is placed at the start of the buffer and the input pointer
+- * is placed where the compressed data starts.  Problems will occur
+- * when the output pointer overruns the input pointer.
+- *
+- * The output pointer can only overrun the input pointer if the input
+- * pointer is moving faster than the output pointer.  A condition only
+- * triggered by data whose compressed form is larger than the uncompressed
+- * form.
+- *
+- * The worst case at the block level is a growth of the compressed data
+- * of 5 bytes per 32767 bytes.
+- *
+- * The worst case internal to a compressed block is very hard to figure.
+- * The worst case can at least be boundined by having one bit that represents
+- * 32764 bytes and then all of the rest of the bytes representing the very
+- * very last byte.
+- *
+- * All of which is enough to compute an amount of extra data that is required
+- * to be safe.  To avoid problems at the block level allocating 5 extra bytes
+- * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
+- * adding an extra 32767 bytes (the worst case uncompressed block size) is
+- * sufficient, to ensure that in the worst case the decompressed data for
+- * block will stop the byte before the compressed data for a block begins.
+- * To avoid problems with the compressed data's meta information an extra 18
+- * bytes are needed.  Leading to the formula:
+- *
+- * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
+- *
+- * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
+- * Adding 32768 instead of 32767 just makes for round numbers.
+- * Adding the decompressor_size is necessary as it musht live after all
+- * of the data as well.  Last I measured the decompressor is about 14K.
+- * 10K of actual data and 4K of bss.
+- *
+- */
 -
--#include <linux/timex.h>
--#include <linux/irq.h>
--#include <asm/hardware.h>
+-/*
+- * gzip declarations
+- */
 -
--#define TMU_TOCR_INIT	0x00
--#define TMU0_TCR_INIT	0x0020
--#define TMU_TSTR_INIT	1
--#define TMU_TSTR_OFF	0
+-#define OF(args)  args
+-#define STATIC static
 -
--/* RCR1 Bits */
--#define RCR1_CF		0x80	/* Carry Flag             */
--#define RCR1_CIE	0x10	/* Carry Interrupt Enable */
--#define RCR1_AIE	0x08	/* Alarm Interrupt Enable */
--#define RCR1_AF		0x01	/* Alarm Flag             */
+-#undef memset
+-#undef memcpy
+-#define memzero(s, n)     memset ((s), 0, (n))
 -
--/* RCR2 Bits */
--#define RCR2_PEF	0x80	/* PEriodic interrupt Flag */
--#define RCR2_PESMASK	0x70	/* Periodic interrupt Set  */
--#define RCR2_RTCEN	0x08	/* ENable RTC              */
--#define RCR2_ADJ	0x04	/* ADJustment (30-second)  */
--#define RCR2_RESET	0x02	/* Reset bit               */
--#define RCR2_START	0x01	/* Start bit               */
+-typedef unsigned char  uch;
+-typedef unsigned short ush;
+-typedef unsigned long  ulg;
 -
--/* Clock, Power and Reset Controller */
--#define	CPRC_BLOCK_OFF	0x01010000
--#define CPRC_BASE	PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
+-#define WSIZE 0x80000000	/* Window size must be at least 32k,
+-				 * and a power of two
+-				 * We don't actually have a window just
+-				 * a huge output buffer so I report
+-				 * a 2G windows size, as that should
+-				 * always be larger than our output buffer.
+-				 */
 -
--#define FRQCR		(cprc_base+0x0)
--#define WTCSR		(cprc_base+0x0018)
--#define STBCR		(cprc_base+0x0030)
+-static uch *inbuf;	/* input buffer */
+-static uch *window;	/* Sliding window buffer, (and final output buffer) */
 -
--/* Time Management Unit */
--#define	TMU_BLOCK_OFF	0x01020000
--#define TMU_BASE	PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
--#define TMU0_BASE	tmu_base + 0x8 + (0xc * 0x0)
--#define TMU1_BASE	tmu_base + 0x8 + (0xc * 0x1)
--#define TMU2_BASE	tmu_base + 0x8 + (0xc * 0x2)
+-static unsigned insize;  /* valid bytes in inbuf */
+-static unsigned inptr;   /* index of next byte to be processed in inbuf */
+-static unsigned outcnt;  /* bytes in output buffer */
 -
--#define TMU_TOCR	tmu_base+0x0	/* Byte access */
--#define TMU_TSTR	tmu_base+0x4	/* Byte access */
+-/* gzip flag byte */
+-#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
+-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+-#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
+-#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
+-#define COMMENT      0x10 /* bit 4 set: file comment present */
+-#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
+-#define RESERVED     0xC0 /* bit 6,7:   reserved */
 -
--#define TMU0_TCOR	TMU0_BASE+0x0	/* Long access */
--#define TMU0_TCNT	TMU0_BASE+0x4	/* Long access */
--#define TMU0_TCR	TMU0_BASE+0x8	/* Word access */
+-#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+-		
+-/* Diagnostic functions */
+-#ifdef DEBUG
+-#  define Assert(cond,msg) {if(!(cond)) error(msg);}
+-#  define Trace(x) fprintf x
+-#  define Tracev(x) {if (verbose) fprintf x ;}
+-#  define Tracevv(x) {if (verbose>1) fprintf x ;}
+-#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+-#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+-#else
+-#  define Assert(cond,msg)
+-#  define Trace(x)
+-#  define Tracev(x)
+-#  define Tracevv(x)
+-#  define Tracec(c,x)
+-#  define Tracecv(c,x)
+-#endif
 -
--/* Real Time Clock */
--#define	RTC_BLOCK_OFF	0x01040000
--#define RTC_BASE	PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
+-static int  fill_inbuf(void);
+-static void flush_window(void);
+-static void error(char *m);
+-static void gzip_mark(void **);
+-static void gzip_release(void **);
+-  
+-/*
+- * This is set up by the setup-routine at boot-time
+- */
+-static unsigned char *real_mode; /* Pointer to real-mode data */
 -
--#define R64CNT  	rtc_base+0x00
--#define RSECCNT 	rtc_base+0x04
--#define RMINCNT 	rtc_base+0x08
--#define RHRCNT  	rtc_base+0x0c
--#define RWKCNT  	rtc_base+0x10
--#define RDAYCNT 	rtc_base+0x14
--#define RMONCNT 	rtc_base+0x18
--#define RYRCNT  	rtc_base+0x1c	/* 16bit */
--#define RSECAR  	rtc_base+0x20
--#define RMINAR  	rtc_base+0x24
--#define RHRAR   	rtc_base+0x28
--#define RWKAR   	rtc_base+0x2c
--#define RDAYAR  	rtc_base+0x30
--#define RMONAR  	rtc_base+0x34
--#define RCR1    	rtc_base+0x38
--#define RCR2    	rtc_base+0x3c
+-#define RM_EXT_MEM_K   (*(unsigned short *)(real_mode + 0x2))
+-#ifndef STANDARD_MEMORY_BIOS_CALL
+-#define RM_ALT_MEM_K   (*(unsigned long *)(real_mode + 0x1e0))
+-#endif
+-#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
 -
--#define TICK_SIZE (tick_nsec / 1000)
+-extern unsigned char input_data[];
+-extern int input_len;
 -
--static unsigned long tmu_base, rtc_base;
--unsigned long cprc_base;
+-static long bytes_out = 0;
 -
--/* Variables to allow interpolation of time of day to resolution better than a
-- * jiffy. */
+-static void *malloc(int size);
+-static void free(void *where);
 -
--/* This is effectively protected by xtime_lock */
--static unsigned long ctc_last_interrupt;
--static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
+-static void *memset(void *s, int c, unsigned n);
+-static void *memcpy(void *dest, const void *src, unsigned n);
 -
--#define CTC_JIFFY_SCALE_SHIFT 40
+-static void putstr(const char *);
 -
--/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
--static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
+-static unsigned long free_mem_ptr;
+-static unsigned long free_mem_end_ptr;
 -
--/* Estimate number of microseconds that have elapsed since the last timer tick,
--   by scaling the delta that has occurred in the CTC register.
+-#define HEAP_SIZE             0x4000
 -
--   WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
--   the CPU clock rate.  If the CPU sleeps, the CTC stops counting.  Bear this
--   in mind if enabling SLEEP_WORKS in process.c.  In that case, this algorithm
--   probably needs to use TMU.TCNT0 instead.  This will work even if the CPU is
--   sleeping, though will be coarser.
+-static char *vidmem = (char *)0xb8000;
+-static int vidport;
+-static int lines, cols;
 -
--   FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
--   is running or if the freq or tick arguments of adjtimex are modified after
--   we have calibrated the scaling factor?  This will result in either a jump at
--   the end of a tick period, or a wrap backwards at the start of the next one,
--   if the application is reading the time of day often enough.  I think we
--   ought to do better than this.  For this reason, usecs_per_jiffy is left
--   separated out in the calculation below.  This allows some future hook into
--   the adjtime-related stuff in kernel/timer.c to remove this hazard.
+-#ifdef CONFIG_X86_NUMAQ
+-void *xquad_portio;
+-#endif
 -
--*/
+-#include "../../../../lib/inflate.c"
 -
--static unsigned long usecs_since_tick(void)
+-static void *malloc(int size)
 -{
--	unsigned long long current_ctc;
--	long ctc_ticks_since_interrupt;
--	unsigned long long ull_ctc_ticks_since_interrupt;
--	unsigned long result;
+-	void *p;
 -
--	unsigned long long mul1_out;
--	unsigned long long mul1_out_high;
--	unsigned long long mul2_out_low, mul2_out_high;
+-	if (size <0) error("Malloc error");
+-	if (free_mem_ptr <= 0) error("Memory error");
 -
--	/* Read CTC register */
--	asm ("getcon cr62, %0" : "=r" (current_ctc));
--	/* Note, the CTC counts down on each CPU clock, not up.
--	   Note(2), use long type to get correct wraparound arithmetic when
--	   the counter crosses zero. */
--	ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
--	ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
+-	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
 -
--	/* Inline assembly to do 32x32x32->64 multiplier */
--	asm volatile ("mulu.l %1, %2, %0" :
--	     "=r" (mul1_out) :
--	     "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
+-	p = (void *)free_mem_ptr;
+-	free_mem_ptr += size;
 -
--	mul1_out_high = mul1_out >> 32;
+-	if (free_mem_ptr >= free_mem_end_ptr)
+-		error("Out of memory");
 -
--	asm volatile ("mulu.l %1, %2, %0" :
--	     "=r" (mul2_out_low) :
--	     "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
+-	return p;
+-}
 -
--#if 1
--	asm volatile ("mulu.l %1, %2, %0" :
--	     "=r" (mul2_out_high) :
--	     "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
--#endif
+-static void free(void *where)
+-{	/* Don't care */
+-}
 -
--	result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
+-static void gzip_mark(void **ptr)
+-{
+-	*ptr = (void *) free_mem_ptr;
+-}
 -
--	return result;
+-static void gzip_release(void **ptr)
+-{
+-	free_mem_ptr = (unsigned long) *ptr;
 -}
+- 
+-static void scroll(void)
+-{
+-	int i;
 -
--void do_gettimeofday(struct timeval *tv)
+-	memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
+-	for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
+-		vidmem[i] = ' ';
+-}
+-
+-static void putstr(const char *s)
 -{
--	unsigned long flags;
--	unsigned long seq;
--	unsigned long usec, sec;
+-	int x,y,pos;
+-	char c;
 -
--	do {
--		seq = read_seqbegin_irqsave(&xtime_lock, flags);
--		usec = usecs_since_tick();
--		sec = xtime.tv_sec;
--		usec += xtime.tv_nsec / 1000;
--	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+-	if (RM_SCREEN_INFO.orig_video_mode == 0 && lines == 0 && cols == 0)
+-		return;
 -
--	while (usec >= 1000000) {
--		usec -= 1000000;
--		sec++;
+-	x = RM_SCREEN_INFO.orig_x;
+-	y = RM_SCREEN_INFO.orig_y;
+-
+-	while ( ( c = *s++ ) != '\0' ) {
+-		if ( c == '\n' ) {
+-			x = 0;
+-			if ( ++y >= lines ) {
+-				scroll();
+-				y--;
+-			}
+-		} else {
+-			vidmem [ ( x + cols * y ) * 2 ] = c;
+-			if ( ++x >= cols ) {
+-				x = 0;
+-				if ( ++y >= lines ) {
+-					scroll();
+-					y--;
+-				}
+-			}
+-		}
 -	}
 -
--	tv->tv_sec = sec;
--	tv->tv_usec = usec;
+-	RM_SCREEN_INFO.orig_x = x;
+-	RM_SCREEN_INFO.orig_y = y;
+-
+-	pos = (x + cols * y) * 2;	/* Update cursor position */
+-	outb_p(14, vidport);
+-	outb_p(0xff & (pos >> 9), vidport+1);
+-	outb_p(15, vidport);
+-	outb_p(0xff & (pos >> 1), vidport+1);
 -}
 -
--int do_settimeofday(struct timespec *tv)
+-static void* memset(void* s, int c, unsigned n)
 -{
--	time_t wtm_sec, sec = tv->tv_sec;
--	long wtm_nsec, nsec = tv->tv_nsec;
--
--	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
--		return -EINVAL;
--
--	write_seqlock_irq(&xtime_lock);
--	/*
--	 * This is revolting. We need to set "xtime" correctly. However, the
--	 * value in this location is the value at the most recent update of
--	 * wall time.  Discover what correction gettimeofday() would have
--	 * made, and then undo it!
--	 */
--	nsec -= 1000 * usecs_since_tick();
+-	int i;
+-	char *ss = (char*)s;
 -
--	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
--	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+-	for (i=0;i<n;i++) ss[i] = c;
+-	return s;
+-}
 -
--	set_normalized_timespec(&xtime, sec, nsec);
--	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+-static void* memcpy(void* dest, const void* src, unsigned n)
+-{
+-	int i;
+-	char *d = (char *)dest, *s = (char *)src;
 -
--	ntp_clear();
--	write_sequnlock_irq(&xtime_lock);
--	clock_was_set();
+-	for (i=0;i<n;i++) d[i] = s[i];
+-	return dest;
+-}
 -
+-/* ===========================================================================
+- * Fill the input buffer. This is called only when the buffer is empty
+- * and at least one byte is really needed.
+- */
+-static int fill_inbuf(void)
+-{
+-	error("ran out of input data");
 -	return 0;
 -}
--EXPORT_SYMBOL(do_settimeofday);
 -
--static int set_rtc_time(unsigned long nowtime)
+-/* ===========================================================================
+- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+- * (Used for the decompressed data only.)
+- */
+-static void flush_window(void)
 -{
--	int retval = 0;
--	int real_seconds, real_minutes, cmos_minutes;
+-	/* With my window equal to my output buffer
+-	 * I only need to compute the crc here.
+-	 */
+-	ulg c = crc;         /* temporary variable */
+-	unsigned n;
+-	uch *in, ch;
 -
--	ctrl_outb(RCR2_RESET, RCR2);  /* Reset pre-scaler & stop RTC */
+-	in = window;
+-	for (n = 0; n < outcnt; n++) {
+-		ch = *in++;
+-		c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+-	}
+-	crc = c;
+-	bytes_out += (ulg)outcnt;
+-	outcnt = 0;
+-}
 -
--	cmos_minutes = ctrl_inb(RMINCNT);
--	BCD_TO_BIN(cmos_minutes);
+-static void error(char *x)
+-{
+-	putstr("\n\n");
+-	putstr(x);
+-	putstr("\n\n -- System halted");
 -
--	/*
--	 * since we're only adjusting minutes and seconds,
--	 * don't interfere with hour overflow. This avoids
--	 * messing with unknown time zones but requires your
--	 * RTC not to be off by more than 15 minutes
--	 */
--	real_seconds = nowtime % 60;
--	real_minutes = nowtime / 60;
--	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
--		real_minutes += 30;	/* correct for half hour time zone */
--	real_minutes %= 60;
+-	while(1);	/* Halt */
+-}
 -
--	if (abs(real_minutes - cmos_minutes) < 30) {
--		BIN_TO_BCD(real_seconds);
--		BIN_TO_BCD(real_minutes);
--		ctrl_outb(real_seconds, RSECCNT);
--		ctrl_outb(real_minutes, RMINCNT);
+-asmlinkage void decompress_kernel(void *rmode, unsigned long end,
+-			uch *input_data, unsigned long input_len, uch *output)
+-{
+-	real_mode = rmode;
+-
+-	if (RM_SCREEN_INFO.orig_video_mode == 7) {
+-		vidmem = (char *) 0xb0000;
+-		vidport = 0x3b4;
 -	} else {
--		printk(KERN_WARNING
--		       "set_rtc_time: can't update from %d to %d\n",
--		       cmos_minutes, real_minutes);
--		retval = -1;
+-		vidmem = (char *) 0xb8000;
+-		vidport = 0x3d4;
 -	}
 -
--	ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2);  /* Start RTC */
+-	lines = RM_SCREEN_INFO.orig_video_lines;
+-	cols = RM_SCREEN_INFO.orig_video_cols;
 -
--	return retval;
+-	window = output;  	/* Output buffer (Normally at 1M) */
+-	free_mem_ptr     = end;	/* Heap  */
+-	free_mem_end_ptr = end + HEAP_SIZE;
+-	inbuf  = input_data;	/* Input buffer */
+-	insize = input_len;
+-	inptr  = 0;
+-
+-	if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
+-		error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
+-	if (end > ((-__PAGE_OFFSET-(512 <<20)-1) & 0x7fffffff))
+-		error("Destination address too large");
+-#ifndef CONFIG_RELOCATABLE
+-	if ((u32)output != LOAD_PHYSICAL_ADDR)
+-		error("Wrong destination address");
+-#endif
+-
+-	makecrc();
+-	putstr("Uncompressing Linux... ");
+-	gunzip();
+-	putstr("Ok, booting the kernel.\n");
+-	return;
 -}
+diff --git a/arch/x86/boot/compressed/misc_64.c b/arch/x86/boot/compressed/misc_64.c
+deleted file mode 100644
+index 6ea015a..0000000
+--- a/arch/x86/boot/compressed/misc_64.c
++++ /dev/null
+@@ -1,371 +0,0 @@
+-/*
+- * misc.c
+- * 
+- * This is a collection of several routines from gzip-1.0.3 
+- * adapted for Linux.
+- *
+- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+- * puts by Nick Holloway 1993, better puts by Martin Mares 1995
+- * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+- */
 -
--/* last time the RTC clock got updated */
--static long last_rtc_update = 0;
+-#define _LINUX_STRING_H_ 1
+-#define __LINUX_BITMAP_H 1
+-
+-#include <linux/linkage.h>
+-#include <linux/screen_info.h>
+-#include <asm/io.h>
+-#include <asm/page.h>
+-
+-/* WARNING!!
+- * This code is compiled with -fPIC and it is relocated dynamically
+- * at run time, but no relocation processing is performed.
+- * This means that it is not safe to place pointers in static structures.
+- */
 -
 -/*
-- * timer_interrupt() needs to keep up the real-time clock,
-- * as well as call the "do_timer()" routine every clocktick
+- * Getting to provable safe in place decompression is hard.
+- * Worst case behaviours need to be analyzed.
+- * Background information:
+- *
+- * The file layout is:
+- *    magic[2]
+- *    method[1]
+- *    flags[1]
+- *    timestamp[4]
+- *    extraflags[1]
+- *    os[1]
+- *    compressed data blocks[N]
+- *    crc[4] orig_len[4]
+- *
+- * resulting in 18 bytes of non compressed data overhead.
+- *
+- * Files divided into blocks
+- * 1 bit (last block flag)
+- * 2 bits (block type)
+- *
+- * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
+- * The smallest block type encoding is always used.
+- *
+- * stored:
+- *    32 bits length in bytes.
+- *
+- * fixed:
+- *    magic fixed tree.
+- *    symbols.
+- *
+- * dynamic:
+- *    dynamic tree encoding.
+- *    symbols.
+- *
+- *
+- * The buffer for decompression in place is the length of the
+- * uncompressed data, plus a small amount extra to keep the algorithm safe.
+- * The compressed data is placed at the end of the buffer.  The output
+- * pointer is placed at the start of the buffer and the input pointer
+- * is placed where the compressed data starts.  Problems will occur
+- * when the output pointer overruns the input pointer.
+- *
+- * The output pointer can only overrun the input pointer if the input
+- * pointer is moving faster than the output pointer.  A condition only
+- * triggered by data whose compressed form is larger than the uncompressed
+- * form.
+- *
+- * The worst case at the block level is a growth of the compressed data
+- * of 5 bytes per 32767 bytes.
+- *
+- * The worst case internal to a compressed block is very hard to figure.
+- * The worst case can at least be boundined by having one bit that represents
+- * 32764 bytes and then all of the rest of the bytes representing the very
+- * very last byte.
+- *
+- * All of which is enough to compute an amount of extra data that is required
+- * to be safe.  To avoid problems at the block level allocating 5 extra bytes
+- * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
+- * adding an extra 32767 bytes (the worst case uncompressed block size) is
+- * sufficient, to ensure that in the worst case the decompressed data for
+- * block will stop the byte before the compressed data for a block begins.
+- * To avoid problems with the compressed data's meta information an extra 18
+- * bytes are needed.  Leading to the formula:
+- *
+- * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
+- *
+- * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
+- * Adding 32768 instead of 32767 just makes for round numbers.
+- * Adding the decompressor_size is necessary as it musht live after all
+- * of the data as well.  Last I measured the decompressor is about 14K.
+- * 10K of actual data and 4K of bss.
+- *
 - */
--static inline void do_timer_interrupt(void)
--{
--	unsigned long long current_ctc;
--	asm ("getcon cr62, %0" : "=r" (current_ctc));
--	ctc_last_interrupt = (unsigned long) current_ctc;
 -
--	do_timer(1);
--#ifndef CONFIG_SMP
--	update_process_times(user_mode(get_irq_regs()));
--#endif
--	if (current->pid)
--		profile_tick(CPU_PROFILING);
+-/*
+- * gzip declarations
+- */
 -
--#ifdef CONFIG_HEARTBEAT
--	{
--		extern void heartbeat(void);
+-#define OF(args)  args
+-#define STATIC static
 -
--		heartbeat();
--	}
--#endif
+-#undef memset
+-#undef memcpy
+-#define memzero(s, n)     memset ((s), 0, (n))
 -
--	/*
--	 * If we have an externally synchronized Linux clock, then update
--	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
--	 * called as close as possible to 500 ms before the new second starts.
--	 */
--	if (ntp_synced() &&
--	    xtime.tv_sec > last_rtc_update + 660 &&
--	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
--	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
--		if (set_rtc_time(xtime.tv_sec) == 0)
--			last_rtc_update = xtime.tv_sec;
--		else
--			last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
--	}
--}
+-typedef unsigned char  uch;
+-typedef unsigned short ush;
+-typedef unsigned long  ulg;
+-
+-#define WSIZE 0x80000000	/* Window size must be at least 32k,
+-				 * and a power of two
+-				 * We don't actually have a window just
+-				 * a huge output buffer so I report
+-				 * a 2G windows size, as that should
+-				 * always be larger than our output buffer.
+-				 */
+-
+-static uch *inbuf;	/* input buffer */
+-static uch *window;	/* Sliding window buffer, (and final output buffer) */
+-
+-static unsigned insize;  /* valid bytes in inbuf */
+-static unsigned inptr;   /* index of next byte to be processed in inbuf */
+-static unsigned outcnt;  /* bytes in output buffer */
+-
+-/* gzip flag byte */
+-#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
+-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+-#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
+-#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
+-#define COMMENT      0x10 /* bit 4 set: file comment present */
+-#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
+-#define RESERVED     0xC0 /* bit 6,7:   reserved */
 -
+-#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+-		
+-/* Diagnostic functions */
+-#ifdef DEBUG
+-#  define Assert(cond,msg) {if(!(cond)) error(msg);}
+-#  define Trace(x) fprintf x
+-#  define Tracev(x) {if (verbose) fprintf x ;}
+-#  define Tracevv(x) {if (verbose>1) fprintf x ;}
+-#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+-#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+-#else
+-#  define Assert(cond,msg)
+-#  define Trace(x)
+-#  define Tracev(x)
+-#  define Tracevv(x)
+-#  define Tracec(c,x)
+-#  define Tracecv(c,x)
+-#endif
+-
+-static int  fill_inbuf(void);
+-static void flush_window(void);
+-static void error(char *m);
+-static void gzip_mark(void **);
+-static void gzip_release(void **);
+-  
 -/*
-- * This is the same as the above, except we _also_ save the current
-- * Time Stamp Counter value at the time of the timer interrupt, so that
-- * we later on can estimate the time of day more exactly.
+- * This is set up by the setup-routine at boot-time
 - */
--static irqreturn_t timer_interrupt(int irq, void *dev_id)
--{
--	unsigned long timer_status;
+-static unsigned char *real_mode; /* Pointer to real-mode data */
 -
--	/* Clear UNF bit */
--	timer_status = ctrl_inw(TMU0_TCR);
--	timer_status &= ~0x100;
--	ctrl_outw(timer_status, TMU0_TCR);
+-#define RM_EXT_MEM_K   (*(unsigned short *)(real_mode + 0x2))
+-#ifndef STANDARD_MEMORY_BIOS_CALL
+-#define RM_ALT_MEM_K   (*(unsigned long *)(real_mode + 0x1e0))
+-#endif
+-#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
 -
--	/*
--	 * Here we are in the timer irq handler. We just have irqs locally
--	 * disabled but we don't know if the timer_bh is running on the other
--	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
--	 * the irq version of write_lock because as just said we have irq
--	 * locally disabled. -arca
--	 */
--	write_lock(&xtime_lock);
--	do_timer_interrupt();
--	write_unlock(&xtime_lock);
+-extern unsigned char input_data[];
+-extern int input_len;
 -
--	return IRQ_HANDLED;
--}
+-static long bytes_out = 0;
 -
--static unsigned long get_rtc_time(void)
--{
--	unsigned int sec, min, hr, wk, day, mon, yr, yr100;
+-static void *malloc(int size);
+-static void free(void *where);
 -
-- again:
--	do {
--		ctrl_outb(0, RCR1);  /* Clear CF-bit */
--		sec = ctrl_inb(RSECCNT);
--		min = ctrl_inb(RMINCNT);
--		hr  = ctrl_inb(RHRCNT);
--		wk  = ctrl_inb(RWKCNT);
--		day = ctrl_inb(RDAYCNT);
--		mon = ctrl_inb(RMONCNT);
--		yr  = ctrl_inw(RYRCNT);
--		yr100 = (yr >> 8);
--		yr &= 0xff;
--	} while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
+-static void *memset(void *s, int c, unsigned n);
+-static void *memcpy(void *dest, const void *src, unsigned n);
 -
--	BCD_TO_BIN(yr100);
--	BCD_TO_BIN(yr);
--	BCD_TO_BIN(mon);
--	BCD_TO_BIN(day);
--	BCD_TO_BIN(hr);
--	BCD_TO_BIN(min);
--	BCD_TO_BIN(sec);
+-static void putstr(const char *);
 -
--	if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
--	    hr > 23 || min > 59 || sec > 59) {
--		printk(KERN_ERR
--		       "SH RTC: invalid value, resetting to 1 Jan 2000\n");
--		ctrl_outb(RCR2_RESET, RCR2);  /* Reset & Stop */
--		ctrl_outb(0, RSECCNT);
--		ctrl_outb(0, RMINCNT);
--		ctrl_outb(0, RHRCNT);
--		ctrl_outb(6, RWKCNT);
--		ctrl_outb(1, RDAYCNT);
--		ctrl_outb(1, RMONCNT);
--		ctrl_outw(0x2000, RYRCNT);
--		ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2);  /* Start */
--		goto again;
--	}
+-static long free_mem_ptr;
+-static long free_mem_end_ptr;
 -
--	return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
--}
+-#define HEAP_SIZE             0x7000
 -
--static __init unsigned int get_cpu_hz(void)
+-static char *vidmem = (char *)0xb8000;
+-static int vidport;
+-static int lines, cols;
+-
+-#include "../../../../lib/inflate.c"
+-
+-static void *malloc(int size)
 -{
--	unsigned int count;
--	unsigned long __dummy;
--	unsigned long ctc_val_init, ctc_val;
+-	void *p;
 -
--	/*
--	** Regardless the toolchain, force the compiler to use the
--	** arbitrary register r3 as a clock tick counter.
--	** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
--	*/
--	register unsigned long long  __rtc_irq_flag __asm__ ("r3");
+-	if (size <0) error("Malloc error");
+-	if (free_mem_ptr <= 0) error("Memory error");
 -
--	local_irq_enable();
--	do {} while (ctrl_inb(R64CNT) != 0);
--	ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
+-	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
 -
--	/*
--	 * r3 is arbitrary. CDC does not support "=z".
--	 */
--	ctc_val_init = 0xffffffff;
--	ctc_val = ctc_val_init;
+-	p = (void *)free_mem_ptr;
+-	free_mem_ptr += size;
 -
--	asm volatile("gettr	tr0, %1\n\t"
--		     "putcon	%0, " __CTC "\n\t"
--		     "and	%2, r63, %2\n\t"
--		     "pta	$+4, tr0\n\t"
--		     "beq/l	%2, r63, tr0\n\t"
--		     "ptabs	%1, tr0\n\t"
--		     "getcon	" __CTC ", %0\n\t"
--		: "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
--		: "0" (0));
--	local_irq_disable();
--	/*
--	 * SH-3:
--	 * CPU clock = 4 stages * loop
--	 * tst    rm,rm      if id ex
--	 * bt/s   1b            if id ex
--	 * add    #1,rd            if id ex
--         *                            (if) pipe line stole
--	 * tst    rm,rm                  if id ex
--         * ....
--	 *
--	 *
--	 * SH-4:
--	 * CPU clock = 6 stages * loop
--	 * I don't know why.
--         * ....
--	 *
--	 * SH-5:
--	 * Use CTC register to count.  This approach returns the right value
--	 * even if the I-cache is disabled (e.g. whilst debugging.)
--	 *
--	 */
+-	if (free_mem_ptr >= free_mem_end_ptr)
+-		error("Out of memory");
 -
--	count = ctc_val_init - ctc_val; /* CTC counts down */
+-	return p;
+-}
 -
--#if defined (CONFIG_SH_SIMULATOR)
--	/*
--	 * Let's pretend we are a 5MHz SH-5 to avoid a too
--	 * little timer interval. Also to keep delay
--	 * calibration within a reasonable time.
--	 */
--	return 5000000;
--#else
--	/*
--	 * This really is count by the number of clock cycles
--         * by the ratio between a complete R64CNT
--         * wrap-around (128) and CUI interrupt being raised (64).
--	 */
--	return count*2;
--#endif
+-static void free(void *where)
+-{	/* Don't care */
 -}
 -
--static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
+-static void gzip_mark(void **ptr)
 -{
--	struct pt_regs *regs = get_irq_regs();
--
--	ctrl_outb(0, RCR1);	/* Disable Carry Interrupts */
--	regs->regs[3] = 1;	/* Using r3 */
+-	*ptr = (void *) free_mem_ptr;
+-}
 -
--	return IRQ_HANDLED;
+-static void gzip_release(void **ptr)
+-{
+-	free_mem_ptr = (long) *ptr;
 -}
+- 
+-static void scroll(void)
+-{
+-	int i;
 -
--static struct irqaction irq0  = {
--	.handler = timer_interrupt,
--	.flags = IRQF_DISABLED,
--	.mask = CPU_MASK_NONE,
--	.name = "timer",
--};
--static struct irqaction irq1  = {
--	.handler = sh64_rtc_interrupt,
--	.flags = IRQF_DISABLED,
--	.mask = CPU_MASK_NONE,
--	.name = "rtc",
--};
+-	memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
+-	for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
+-		vidmem[i] = ' ';
+-}
 -
--void __init time_init(void)
+-static void putstr(const char *s)
 -{
--	unsigned int cpu_clock, master_clock, bus_clock, module_clock;
--	unsigned long interval;
--	unsigned long frqcr, ifc, pfc;
--	static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
--#define bfc_table ifc_table	/* Same */
--#define pfc_table ifc_table	/* Same */
+-	int x,y,pos;
+-	char c;
 -
--	tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
--	if (!tmu_base) {
--		panic("Unable to remap TMU\n");
--	}
+-	x = RM_SCREEN_INFO.orig_x;
+-	y = RM_SCREEN_INFO.orig_y;
 -
--	rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
--	if (!rtc_base) {
--		panic("Unable to remap RTC\n");
+-	while ( ( c = *s++ ) != '\0' ) {
+-		if ( c == '\n' ) {
+-			x = 0;
+-			if ( ++y >= lines ) {
+-				scroll();
+-				y--;
+-			}
+-		} else {
+-			vidmem [ ( x + cols * y ) * 2 ] = c; 
+-			if ( ++x >= cols ) {
+-				x = 0;
+-				if ( ++y >= lines ) {
+-					scroll();
+-					y--;
+-				}
+-			}
+-		}
 -	}
 -
--	cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
--	if (!cprc_base) {
--		panic("Unable to remap CPRC\n");
--	}
+-	RM_SCREEN_INFO.orig_x = x;
+-	RM_SCREEN_INFO.orig_y = y;
 -
--	xtime.tv_sec = get_rtc_time();
--	xtime.tv_nsec = 0;
+-	pos = (x + cols * y) * 2;	/* Update cursor position */
+-	outb_p(14, vidport);
+-	outb_p(0xff & (pos >> 9), vidport+1);
+-	outb_p(15, vidport);
+-	outb_p(0xff & (pos >> 1), vidport+1);
+-}
 -
--	setup_irq(TIMER_IRQ, &irq0);
--	setup_irq(RTC_IRQ, &irq1);
+-static void* memset(void* s, int c, unsigned n)
+-{
+-	int i;
+-	char *ss = (char*)s;
 -
--	/* Check how fast it is.. */
--	cpu_clock = get_cpu_hz();
+-	for (i=0;i<n;i++) ss[i] = c;
+-	return s;
+-}
 -
--	/* Note careful order of operations to maintain reasonable precision and avoid overflow. */
--	scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
+-static void* memcpy(void* dest, const void* src, unsigned n)
+-{
+-	int i;
+-	char *d = (char *)dest, *s = (char *)src;
 -
--	disable_irq(RTC_IRQ);
+-	for (i=0;i<n;i++) d[i] = s[i];
+-	return dest;
+-}
 -
--	printk("CPU clock: %d.%02dMHz\n",
--	       (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
--	{
--		unsigned short bfc;
--		frqcr = ctrl_inl(FRQCR);
--		ifc  = ifc_table[(frqcr>> 6) & 0x0007];
--		bfc  = bfc_table[(frqcr>> 3) & 0x0007];
--		pfc  = pfc_table[(frqcr>> 12) & 0x0007];
--		master_clock = cpu_clock * ifc;
--		bus_clock = master_clock/bfc;
--	}
+-/* ===========================================================================
+- * Fill the input buffer. This is called only when the buffer is empty
+- * and at least one byte is really needed.
+- */
+-static int fill_inbuf(void)
+-{
+-	error("ran out of input data");
+-	return 0;
+-}
 -
--	printk("Bus clock: %d.%02dMHz\n",
--	       (bus_clock/1000000), (bus_clock % 1000000)/10000);
--	module_clock = master_clock/pfc;
--	printk("Module clock: %d.%02dMHz\n",
--	       (module_clock/1000000), (module_clock % 1000000)/10000);
--	interval = (module_clock/(HZ*4));
+-/* ===========================================================================
+- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+- * (Used for the decompressed data only.)
+- */
+-static void flush_window(void)
+-{
+-	/* With my window equal to my output buffer
+-	 * I only need to compute the crc here.
+-	 */
+-	ulg c = crc;         /* temporary variable */
+-	unsigned n;
+-	uch *in, ch;
 -
--	printk("Interval = %ld\n", interval);
+-	in = window;
+-	for (n = 0; n < outcnt; n++) {
+-		ch = *in++;
+-		c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+-	}
+-	crc = c;
+-	bytes_out += (ulg)outcnt;
+-	outcnt = 0;
+-}
 -
--	current_cpu_data.cpu_clock    = cpu_clock;
--	current_cpu_data.master_clock = master_clock;
--	current_cpu_data.bus_clock    = bus_clock;
--	current_cpu_data.module_clock = module_clock;
+-static void error(char *x)
+-{
+-	putstr("\n\n");
+-	putstr(x);
+-	putstr("\n\n -- System halted");
 -
--	/* Start TMU0 */
--	ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
--	ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
--	ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
--	ctrl_outl(interval, TMU0_TCOR);
--	ctrl_outl(interval, TMU0_TCNT);
--	ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
+-	while(1);	/* Halt */
 -}
 -
--void enter_deep_standby(void)
+-asmlinkage void decompress_kernel(void *rmode, unsigned long heap,
+-	uch *input_data, unsigned long input_len, uch *output)
 -{
--	/* Disable watchdog timer */
--	ctrl_outl(0xa5000000, WTCSR);
--	/* Configure deep standby on sleep */
--	ctrl_outl(0x03, STBCR);
+-	real_mode = rmode;
 -
--#ifdef CONFIG_SH_ALPHANUMERIC
--	{
--		extern void mach_alphanum(int position, unsigned char value);
--		extern void mach_alphanum_brightness(int setting);
--		char halted[] = "Halted. ";
--		int i;
--		mach_alphanum_brightness(6); /* dimmest setting above off */
--		for (i=0; i<8; i++) {
--			mach_alphanum(i, halted[i]);
--		}
--		asm __volatile__ ("synco");
+-	if (RM_SCREEN_INFO.orig_video_mode == 7) {
+-		vidmem = (char *) 0xb0000;
+-		vidport = 0x3b4;
+-	} else {
+-		vidmem = (char *) 0xb8000;
+-		vidport = 0x3d4;
 -	}
--#endif
 -
--	asm __volatile__ ("sleep");
--	asm __volatile__ ("synci");
--	asm __volatile__ ("nop");
--	asm __volatile__ ("nop");
--	asm __volatile__ ("nop");
--	asm __volatile__ ("nop");
--	panic("Unexpected wakeup!\n");
--}
-diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c
+-	lines = RM_SCREEN_INFO.orig_video_lines;
+-	cols = RM_SCREEN_INFO.orig_video_cols;
+-
+-	window = output;  		/* Output buffer (Normally at 1M) */
+-	free_mem_ptr     = heap;	/* Heap  */
+-	free_mem_end_ptr = heap + HEAP_SIZE;
+-	inbuf  = input_data;		/* Input buffer */
+-	insize = input_len;
+-	inptr  = 0;
+-
+-	if ((ulg)output & (__KERNEL_ALIGN - 1))
+-		error("Destination address not 2M aligned");
+-	if ((ulg)output >= 0xffffffffffUL)
+-		error("Destination address too large");
+-
+-	makecrc();
+-	putstr(".\nDecompressing Linux...");
+-	gunzip();
+-	putstr("done.\nBooting the kernel.\n");
+-	return;
+-}
+diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
+index 7a0d00b..d01ea42 100644
+--- a/arch/x86/boot/compressed/relocs.c
++++ b/arch/x86/boot/compressed/relocs.c
+@@ -27,11 +27,6 @@ static unsigned long *relocs;
+  * absolute relocations present w.r.t these symbols.
+  */
+ static const char* safe_abs_relocs[] = {
+-		"__kernel_vsyscall",
+-		"__kernel_rt_sigreturn",
+-		"__kernel_sigreturn",
+-		"SYSENTER_RETURN",
+-		"VDSO_NOTE_MASK",
+ 		"xen_irq_disable_direct_reloc",
+ 		"xen_save_fl_direct_reloc",
+ };
+@@ -45,6 +40,8 @@ static int is_safe_abs_reloc(const char* sym_name)
+ 			/* Match found */
+ 			return 1;
+ 	}
++	if (strncmp(sym_name, "VDSO", 4) == 0)
++		return 1;
+ 	if (strncmp(sym_name, "__crc_", 6) == 0)
+ 		return 1;
+ 	return 0;
+diff --git a/arch/x86/boot/compressed/vmlinux.scr b/arch/x86/boot/compressed/vmlinux.scr
+new file mode 100644
+index 0000000..f02382a
+--- /dev/null
++++ b/arch/x86/boot/compressed/vmlinux.scr
+@@ -0,0 +1,10 @@
++SECTIONS
++{
++  .rodata.compressed : {
++	input_len = .;
++	LONG(input_data_end - input_data) input_data = .;
++	*(.data)
++	output_len = . - 4;
++	input_data_end = .;
++	}
++}
+diff --git a/arch/x86/boot/compressed/vmlinux_32.lds b/arch/x86/boot/compressed/vmlinux_32.lds
+index cc4854f..bb3c483 100644
+--- a/arch/x86/boot/compressed/vmlinux_32.lds
++++ b/arch/x86/boot/compressed/vmlinux_32.lds
+@@ -3,17 +3,17 @@ OUTPUT_ARCH(i386)
+ ENTRY(startup_32)
+ SECTIONS
+ {
+-        /* Be careful parts of head.S assume startup_32 is at
+-         * address 0.
++	/* Be careful parts of head_32.S assume startup_32 is at
++	 * address 0.
+ 	 */
+-	. =  0 	;
++	. = 0;
+ 	.text.head : {
+ 		_head = . ;
+ 		*(.text.head)
+ 		_ehead = . ;
+ 	}
+-	.data.compressed : {
+-		*(.data.compressed)
++	.rodata.compressed : {
++		*(.rodata.compressed)
+ 	}
+ 	.text :	{
+ 		_text = .; 	/* Text */
+diff --git a/arch/x86/boot/compressed/vmlinux_32.scr b/arch/x86/boot/compressed/vmlinux_32.scr
+deleted file mode 100644
+index 707a88f..0000000
+--- a/arch/x86/boot/compressed/vmlinux_32.scr
++++ /dev/null
+@@ -1,10 +0,0 @@
+-SECTIONS
+-{
+-  .data.compressed : {
+-	input_len = .;
+-	LONG(input_data_end - input_data) input_data = .; 
+-	*(.data) 
+-	output_len = . - 4;
+-	input_data_end = .; 
+-	}
+-}
+diff --git a/arch/x86/boot/compressed/vmlinux_64.lds b/arch/x86/boot/compressed/vmlinux_64.lds
+index 94c13e5..f6e5b44 100644
+--- a/arch/x86/boot/compressed/vmlinux_64.lds
++++ b/arch/x86/boot/compressed/vmlinux_64.lds
+@@ -3,15 +3,19 @@ OUTPUT_ARCH(i386:x86-64)
+ ENTRY(startup_64)
+ SECTIONS
+ {
+-	/* Be careful parts of head.S assume startup_32 is at
+- 	 * address 0.
++	/* Be careful parts of head_64.S assume startup_64 is at
++	 * address 0.
+ 	 */
+ 	. = 0;
+-	.text :	{
++	.text.head : {
+ 		_head = . ;
+ 		*(.text.head)
+ 		_ehead = . ;
+-		*(.text.compressed)
++	}
++	.rodata.compressed : {
++		*(.rodata.compressed)
++	}
++	.text :	{
+ 		_text = .; 	/* Text */
+ 		*(.text)
+ 		*(.text.*)
+diff --git a/arch/x86/boot/compressed/vmlinux_64.scr b/arch/x86/boot/compressed/vmlinux_64.scr
+deleted file mode 100644
+index bd1429c..0000000
+--- a/arch/x86/boot/compressed/vmlinux_64.scr
++++ /dev/null
+@@ -1,10 +0,0 @@
+-SECTIONS
+-{
+-  .text.compressed : {
+-	input_len = .;
+-	LONG(input_data_end - input_data) input_data = .;
+-	*(.data)
+-	output_len = . - 4;
+-	input_data_end = .;
+-	}
+-}
+diff --git a/arch/x86/boot/edd.c b/arch/x86/boot/edd.c
+index bd138e4..8721dc4 100644
+--- a/arch/x86/boot/edd.c
++++ b/arch/x86/boot/edd.c
+@@ -129,6 +129,7 @@ void query_edd(void)
+ 	char eddarg[8];
+ 	int do_mbr = 1;
+ 	int do_edd = 1;
++	int be_quiet;
+ 	int devno;
+ 	struct edd_info ei, *edp;
+ 	u32 *mbrptr;
+@@ -140,12 +141,21 @@ void query_edd(void)
+ 			do_edd = 0;
+ 	}
+ 
++	be_quiet = cmdline_find_option_bool("quiet");
++
+ 	edp    = boot_params.eddbuf;
+ 	mbrptr = boot_params.edd_mbr_sig_buffer;
+ 
+ 	if (!do_edd)
+ 		return;
+ 
++	/* Bugs in OnBoard or AddOnCards Bios may hang the EDD probe,
++	 * so give a hint if this happens.
++	 */
++
++	if (!be_quiet)
++		printf("Probing EDD (edd=off to disable)... ");
++
+ 	for (devno = 0x80; devno < 0x80+EDD_MBR_SIG_MAX; devno++) {
+ 		/*
+ 		 * Scan the BIOS-supported hard disks and query EDD
+@@ -162,6 +172,9 @@ void query_edd(void)
+ 		if (do_mbr && !read_mbr_sig(devno, &ei, mbrptr++))
+ 			boot_params.edd_mbr_sig_buf_entries = devno-0x80+1;
+ 	}
++
++	if (!be_quiet)
++		printf("ok\n");
+ }
+ 
+ #endif
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index 4cc5b04..64ad901 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -195,10 +195,13 @@ cmd_line_ptr:	.long	0		# (Header version 0x0202 or later)
+ 					# can be located anywhere in
+ 					# low memory 0x10000 or higher.
+ 
+-ramdisk_max:	.long (-__PAGE_OFFSET-(512 << 20)-1) & 0x7fffffff
++ramdisk_max:	.long 0x7fffffff
+ 					# (Header version 0x0203 or later)
+ 					# The highest safe address for
+ 					# the contents of an initrd
++					# The current kernel allows up to 4 GB,
++					# but leave it at 2 GB to avoid
++					# possible bootloader bugs.
+ 
+ kernel_alignment:  .long CONFIG_PHYSICAL_ALIGN	#physical addr alignment
+ 						#required for protected mode
+diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
+index 1f95750..7828da5 100644
+--- a/arch/x86/boot/main.c
++++ b/arch/x86/boot/main.c
+@@ -100,20 +100,32 @@ static void set_bios_mode(void)
+ #endif
+ }
+ 
+-void main(void)
++static void init_heap(void)
+ {
+-	/* First, copy the boot header into the "zeropage" */
+-	copy_boot_params();
++	char *stack_end;
+ 
+-	/* End of heap check */
+ 	if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
+-		heap_end = (char *)(boot_params.hdr.heap_end_ptr
+-				    +0x200-STACK_SIZE);
++		asm("leal %P1(%%esp),%0"
++		    : "=r" (stack_end) : "i" (-STACK_SIZE));
++
++		heap_end = (char *)
++			((size_t)boot_params.hdr.heap_end_ptr + 0x200);
++		if (heap_end > stack_end)
++			heap_end = stack_end;
+ 	} else {
+ 		/* Boot protocol 2.00 only, no heap available */
+ 		puts("WARNING: Ancient bootloader, some functionality "
+ 		     "may be limited!\n");
+ 	}
++}
++
++void main(void)
++{
++	/* First, copy the boot header into the "zeropage" */
++	copy_boot_params();
++
++	/* End of heap check */
++	init_heap();
+ 
+ 	/* Make sure we have all the proper CPU support */
+ 	if (validate_cpu()) {
+@@ -131,9 +143,6 @@ void main(void)
+ 	/* Set keyboard repeat rate (why?) */
+ 	keyboard_set_repeat();
+ 
+-	/* Set the video mode */
+-	set_video();
+-
+ 	/* Query MCA information */
+ 	query_mca();
+ 
+@@ -154,6 +163,10 @@ void main(void)
+ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+ 	query_edd();
+ #endif
++
++	/* Set the video mode */
++	set_video();
++
+ 	/* Do the last things and invoke protected mode */
+ 	go_to_protected_mode();
+ }
+diff --git a/arch/x86/boot/pm.c b/arch/x86/boot/pm.c
+index 09fb342..1a0f936 100644
+--- a/arch/x86/boot/pm.c
++++ b/arch/x86/boot/pm.c
+@@ -104,7 +104,7 @@ static void reset_coprocessor(void)
+ 	(((u64)(base & 0xff000000) << 32) |	\
+ 	 ((u64)flags << 40) |			\
+ 	 ((u64)(limit & 0x00ff0000) << 32) |	\
+-	 ((u64)(base & 0x00ffff00) << 16) |	\
++	 ((u64)(base & 0x00ffffff) << 16) |	\
+ 	 ((u64)(limit & 0x0000ffff)))
+ 
+ struct gdt_ptr {
+@@ -121,6 +121,10 @@ static void setup_gdt(void)
+ 		[GDT_ENTRY_BOOT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff),
+ 		/* DS: data, read/write, 4 GB, base 0 */
+ 		[GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff),
++		/* TSS: 32-bit tss, 104 bytes, base 4096 */
++		/* We only have a TSS here to keep Intel VT happy;
++		   we don't actually use it for anything. */
++		[GDT_ENTRY_BOOT_TSS] = GDT_ENTRY(0x0089, 4096, 103),
+ 	};
+ 	/* Xen HVM incorrectly stores a pointer to the gdt_ptr, instead
+ 	   of the gdt_ptr contents.  Thus, make it static so it will
+diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
+index fa6bed1..f5402d5 100644
+--- a/arch/x86/boot/pmjump.S
++++ b/arch/x86/boot/pmjump.S
+@@ -15,6 +15,7 @@
+  */
+ 
+ #include <asm/boot.h>
++#include <asm/processor-flags.h>
+ #include <asm/segment.h>
+ 
+ 	.text
+@@ -29,28 +30,55 @@
+  */
+ protected_mode_jump:
+ 	movl	%edx, %esi		# Pointer to boot_params table
+-	movl	%eax, 2f		# Patch ljmpl instruction
++
++	xorl	%ebx, %ebx
++	movw	%cs, %bx
++	shll	$4, %ebx
++	addl	%ebx, 2f
+ 
+ 	movw	$__BOOT_DS, %cx
+-	xorl	%ebx, %ebx		# Per the 32-bit boot protocol
+-	xorl	%ebp, %ebp		# Per the 32-bit boot protocol
+-	xorl	%edi, %edi		# Per the 32-bit boot protocol
++	movw	$__BOOT_TSS, %di
+ 
+ 	movl	%cr0, %edx
+-	orb	$1, %dl			# Protected mode (PE) bit
++	orb	$X86_CR0_PE, %dl	# Protected mode
+ 	movl	%edx, %cr0
+ 	jmp	1f			# Short jump to serialize on 386/486
+ 1:
+ 
+-	movw	%cx, %ds
+-	movw	%cx, %es
+-	movw	%cx, %fs
+-	movw	%cx, %gs
+-	movw	%cx, %ss
+-
+-	# Jump to the 32-bit entrypoint
++	# Transition to 32-bit mode
+ 	.byte	0x66, 0xea		# ljmpl opcode
+-2:	.long	0			# offset
++2:	.long	in_pm32			# offset
+ 	.word	__BOOT_CS		# segment
+ 
+ 	.size	protected_mode_jump, .-protected_mode_jump
++
++	.code32
++	.type	in_pm32, @function
++in_pm32:
++	# Set up data segments for flat 32-bit mode
++	movl	%ecx, %ds
++	movl	%ecx, %es
++	movl	%ecx, %fs
++	movl	%ecx, %gs
++	movl	%ecx, %ss
++	# The 32-bit code sets up its own stack, but this way we do have
++	# a valid stack if some debugging hack wants to use it.
++	addl	%ebx, %esp
++
++	# Set up TR to make Intel VT happy
++	ltr	%di
++
++	# Clear registers to allow for future extensions to the
++	# 32-bit boot protocol
++	xorl	%ecx, %ecx
++	xorl	%edx, %edx
++	xorl	%ebx, %ebx
++	xorl	%ebp, %ebp
++	xorl	%edi, %edi
++
++	# Set up LDTR to make Intel VT happy
++	lldt	%cx
++
++	jmpl	*%eax			# Jump to the 32-bit entrypoint
++
++	.size	in_pm32, .-in_pm32
+diff --git a/arch/x86/boot/video-bios.c b/arch/x86/boot/video-bios.c
+index ed0672a..ff664a1 100644
+--- a/arch/x86/boot/video-bios.c
++++ b/arch/x86/boot/video-bios.c
+@@ -104,6 +104,7 @@ static int bios_probe(void)
+ 
+ 		mi = GET_HEAP(struct mode_info, 1);
+ 		mi->mode = VIDEO_FIRST_BIOS+mode;
++		mi->depth = 0;	/* text */
+ 		mi->x = rdfs16(0x44a);
+ 		mi->y = rdfs8(0x484)+1;
+ 		nmodes++;
+@@ -116,7 +117,7 @@ static int bios_probe(void)
+ 
+ __videocard video_bios =
+ {
+-	.card_name	= "BIOS (scanned)",
++	.card_name	= "BIOS",
+ 	.probe		= bios_probe,
+ 	.set_mode	= bios_set_mode,
+ 	.unsafe		= 1,
+diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
+index 4716b9a..662dd2f 100644
+--- a/arch/x86/boot/video-vesa.c
++++ b/arch/x86/boot/video-vesa.c
+@@ -79,20 +79,28 @@ static int vesa_probe(void)
+ 			/* Text Mode, TTY BIOS supported,
+ 			   supported by hardware */
+ 			mi = GET_HEAP(struct mode_info, 1);
+-			mi->mode = mode + VIDEO_FIRST_VESA;
+-			mi->x    = vminfo.h_res;
+-			mi->y    = vminfo.v_res;
++			mi->mode  = mode + VIDEO_FIRST_VESA;
++			mi->depth = 0; /* text */
++			mi->x     = vminfo.h_res;
++			mi->y     = vminfo.v_res;
+ 			nmodes++;
+-		} else if ((vminfo.mode_attr & 0x99) == 0x99) {
++		} else if ((vminfo.mode_attr & 0x99) == 0x99 &&
++			   (vminfo.memory_layout == 4 ||
++			    vminfo.memory_layout == 6) &&
++			   vminfo.memory_planes == 1) {
+ #ifdef CONFIG_FB
+ 			/* Graphics mode, color, linear frame buffer
+-			   supported -- register the mode but hide from
+-			   the menu.  Only do this if framebuffer is
+-			   configured, however, otherwise the user will
+-			   be left without a screen. */
++			   supported.  Only register the mode if
++			   if framebuffer is configured, however,
++			   otherwise the user will be left without a screen.
++			   We don't require CONFIG_FB_VESA, however, since
++			   some of the other framebuffer drivers can use
++			   this mode-setting, too. */
+ 			mi = GET_HEAP(struct mode_info, 1);
+ 			mi->mode = mode + VIDEO_FIRST_VESA;
+-			mi->x = mi->y = 0;
++			mi->depth = vminfo.bpp;
++			mi->x = vminfo.h_res;
++			mi->y = vminfo.v_res;
+ 			nmodes++;
+ #endif
+ 		}
+diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
+index aef02f9..7259387 100644
+--- a/arch/x86/boot/video-vga.c
++++ b/arch/x86/boot/video-vga.c
+@@ -18,22 +18,22 @@
+ #include "video.h"
+ 
+ static struct mode_info vga_modes[] = {
+-	{ VIDEO_80x25,  80, 25 },
+-	{ VIDEO_8POINT, 80, 50 },
+-	{ VIDEO_80x43,  80, 43 },
+-	{ VIDEO_80x28,  80, 28 },
+-	{ VIDEO_80x30,  80, 30 },
+-	{ VIDEO_80x34,  80, 34 },
+-	{ VIDEO_80x60,  80, 60 },
++	{ VIDEO_80x25,  80, 25, 0 },
++	{ VIDEO_8POINT, 80, 50, 0 },
++	{ VIDEO_80x43,  80, 43, 0 },
++	{ VIDEO_80x28,  80, 28, 0 },
++	{ VIDEO_80x30,  80, 30, 0 },
++	{ VIDEO_80x34,  80, 34, 0 },
++	{ VIDEO_80x60,  80, 60, 0 },
+ };
+ 
+ static struct mode_info ega_modes[] = {
+-	{ VIDEO_80x25,  80, 25 },
+-	{ VIDEO_8POINT, 80, 43 },
++	{ VIDEO_80x25,  80, 25, 0 },
++	{ VIDEO_8POINT, 80, 43, 0 },
+ };
+ 
+ static struct mode_info cga_modes[] = {
+-	{ VIDEO_80x25,  80, 25 },
++	{ VIDEO_80x25,  80, 25, 0 },
+ };
+ 
+ __videocard video_vga;
+diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
+index ad9712f..696d08f 100644
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -293,13 +293,28 @@ static void display_menu(void)
+ 	struct mode_info *mi;
+ 	char ch;
+ 	int i;
++	int nmodes;
++	int modes_per_line;
++	int col;
+ 
+-	puts("Mode:    COLSxROWS:\n");
++	nmodes = 0;
++	for (card = video_cards; card < video_cards_end; card++)
++		nmodes += card->nmodes;
+ 
++	modes_per_line = 1;
++	if (nmodes >= 20)
++		modes_per_line = 3;
++
++	for (col = 0; col < modes_per_line; col++)
++		puts("Mode: Resolution:  Type: ");
++	putchar('\n');
++
++	col = 0;
+ 	ch = '0';
+ 	for (card = video_cards; card < video_cards_end; card++) {
+ 		mi = card->modes;
+ 		for (i = 0; i < card->nmodes; i++, mi++) {
++			char resbuf[32];
+ 			int visible = mi->x && mi->y;
+ 			u16 mode_id = mi->mode ? mi->mode :
+ 				(mi->y << 8)+mi->x;
+@@ -307,8 +322,18 @@ static void display_menu(void)
+ 			if (!visible)
+ 				continue; /* Hidden mode */
+ 
+-			printf("%c  %04X  %3dx%-3d  %s\n",
+-			       ch, mode_id, mi->x, mi->y, card->card_name);
++			if (mi->depth)
++				sprintf(resbuf, "%dx%d", mi->y, mi->depth);
++			else
++				sprintf(resbuf, "%d", mi->y);
++
++			printf("%c %03X %4dx%-7s %-6s",
++			       ch, mode_id, mi->x, resbuf, card->card_name);
++			col++;
++			if (col >= modes_per_line) {
++				putchar('\n');
++				col = 0;
++			}
+ 
+ 			if (ch == '9')
+ 				ch = 'a';
+@@ -318,6 +343,8 @@ static void display_menu(void)
+ 				ch++;
+ 		}
+ 	}
++	if (col)
++		putchar('\n');
+ }
+ 
+ #define H(x)	((x)-'a'+10)
+diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h
+index b92447d..d69347f 100644
+--- a/arch/x86/boot/video.h
++++ b/arch/x86/boot/video.h
+@@ -83,7 +83,8 @@ void store_screen(void);
+ 
+ struct mode_info {
+ 	u16 mode;		/* Mode number (vga= style) */
+-	u8  x, y;		/* Width, height */
++	u16 x, y;		/* Width, height */
++	u16 depth;		/* Bits per pixel, 0 for text mode */
+ };
+ 
+ struct card_info {
+diff --git a/arch/x86/boot/voyager.c b/arch/x86/boot/voyager.c
+index 61c8fe0..6499e32 100644
+--- a/arch/x86/boot/voyager.c
++++ b/arch/x86/boot/voyager.c
+@@ -16,8 +16,6 @@
+ 
+ #include "boot.h"
+ 
+-#ifdef CONFIG_X86_VOYAGER
+-
+ int query_voyager(void)
+ {
+ 	u8 err;
+@@ -42,5 +40,3 @@ int query_voyager(void)
+ 	copy_from_fs(data_ptr, di, 7);	/* Table is 7 bytes apparently */
+ 	return 0;
+ }
+-
+-#endif /* CONFIG_X86_VOYAGER */
+diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
+index 54ee176..77562e7 100644
+--- a/arch/x86/configs/i386_defconfig
++++ b/arch/x86/configs/i386_defconfig
+@@ -99,9 +99,9 @@ CONFIG_IOSCHED_NOOP=y
+ CONFIG_IOSCHED_AS=y
+ CONFIG_IOSCHED_DEADLINE=y
+ CONFIG_IOSCHED_CFQ=y
+-CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_AS is not set
+ # CONFIG_DEFAULT_DEADLINE is not set
+-# CONFIG_DEFAULT_CFQ is not set
++CONFIG_DEFAULT_CFQ=y
+ # CONFIG_DEFAULT_NOOP is not set
+ CONFIG_DEFAULT_IOSCHED="anticipatory"
+ 
+diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
+index 38a83f9..9e2b0ef 100644
+--- a/arch/x86/configs/x86_64_defconfig
++++ b/arch/x86/configs/x86_64_defconfig
+@@ -145,15 +145,6 @@ CONFIG_K8_NUMA=y
+ CONFIG_NODES_SHIFT=6
+ CONFIG_X86_64_ACPI_NUMA=y
+ CONFIG_NUMA_EMU=y
+-CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
+-CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
+-CONFIG_ARCH_SPARSEMEM_ENABLE=y
+-CONFIG_SELECT_MEMORY_MODEL=y
+-# CONFIG_FLATMEM_MANUAL is not set
+-CONFIG_DISCONTIGMEM_MANUAL=y
+-# CONFIG_SPARSEMEM_MANUAL is not set
+-CONFIG_DISCONTIGMEM=y
+-CONFIG_FLAT_NODE_MEM_MAP=y
+ CONFIG_NEED_MULTIPLE_NODES=y
+ # CONFIG_SPARSEMEM_STATIC is not set
+ CONFIG_SPLIT_PTLOCK_CPUS=4
+diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
+index 46bb609..3874c2d 100644
+--- a/arch/x86/crypto/Makefile
++++ b/arch/x86/crypto/Makefile
+@@ -4,12 +4,16 @@
+ 
+ obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
++obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
+ 
+ obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
++obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+ 
+-aes-i586-y := aes-i586-asm_32.o aes_32.o
+-twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
++aes-i586-y := aes-i586-asm_32.o aes_glue.o
++twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
++salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
+ 
+-aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
+-twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
++aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
++twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
++salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
+diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
+index f942f0c..1093bed 100644
+--- a/arch/x86/crypto/aes-i586-asm_32.S
++++ b/arch/x86/crypto/aes-i586-asm_32.S
+@@ -46,9 +46,9 @@
+ #define in_blk 16
+ 
+ /* offsets in crypto_tfm structure */
+-#define ekey (crypto_tfm_ctx_offset + 0)
+-#define nrnd (crypto_tfm_ctx_offset + 256)
+-#define dkey (crypto_tfm_ctx_offset + 260)
++#define klen (crypto_tfm_ctx_offset + 0)
++#define ekey (crypto_tfm_ctx_offset + 4)
++#define dkey (crypto_tfm_ctx_offset + 244)
+ 
+ // register mapping for encrypt and decrypt subroutines
+ 
+@@ -221,8 +221,8 @@
+ 
+ .global  aes_enc_blk
+ 
+-.extern  ft_tab
+-.extern  fl_tab
++.extern  crypto_ft_tab
++.extern  crypto_fl_tab
+ 
+ .align 4
+ 
+@@ -236,7 +236,7 @@ aes_enc_blk:
+ 1:	push    %ebx
+ 	mov     in_blk+4(%esp),%r2
+ 	push    %esi
+-	mov     nrnd(%ebp),%r3   // number of rounds
++	mov     klen(%ebp),%r3   // key size
+ 	push    %edi
+ #if ekey != 0
+ 	lea     ekey(%ebp),%ebp  // key pointer
+@@ -255,26 +255,26 @@ aes_enc_blk:
+ 
+ 	sub     $8,%esp		// space for register saves on stack
+ 	add     $16,%ebp	// increment to next round key
+-	cmp     $12,%r3
++	cmp     $24,%r3
+ 	jb      4f		// 10 rounds for 128-bit key
+ 	lea     32(%ebp),%ebp
+ 	je      3f		// 12 rounds for 192-bit key
+ 	lea     32(%ebp),%ebp
+ 
+-2:	fwd_rnd1( -64(%ebp) ,ft_tab)	// 14 rounds for 256-bit key
+-	fwd_rnd2( -48(%ebp) ,ft_tab)
+-3:	fwd_rnd1( -32(%ebp) ,ft_tab)	// 12 rounds for 192-bit key
+-	fwd_rnd2( -16(%ebp) ,ft_tab)
+-4:	fwd_rnd1(    (%ebp) ,ft_tab)	// 10 rounds for 128-bit key
+-	fwd_rnd2( +16(%ebp) ,ft_tab)
+-	fwd_rnd1( +32(%ebp) ,ft_tab)
+-	fwd_rnd2( +48(%ebp) ,ft_tab)
+-	fwd_rnd1( +64(%ebp) ,ft_tab)
+-	fwd_rnd2( +80(%ebp) ,ft_tab)
+-	fwd_rnd1( +96(%ebp) ,ft_tab)
+-	fwd_rnd2(+112(%ebp) ,ft_tab)
+-	fwd_rnd1(+128(%ebp) ,ft_tab)
+-	fwd_rnd2(+144(%ebp) ,fl_tab)	// last round uses a different table
++2:	fwd_rnd1( -64(%ebp), crypto_ft_tab)	// 14 rounds for 256-bit key
++	fwd_rnd2( -48(%ebp), crypto_ft_tab)
++3:	fwd_rnd1( -32(%ebp), crypto_ft_tab)	// 12 rounds for 192-bit key
++	fwd_rnd2( -16(%ebp), crypto_ft_tab)
++4:	fwd_rnd1(    (%ebp), crypto_ft_tab)	// 10 rounds for 128-bit key
++	fwd_rnd2( +16(%ebp), crypto_ft_tab)
++	fwd_rnd1( +32(%ebp), crypto_ft_tab)
++	fwd_rnd2( +48(%ebp), crypto_ft_tab)
++	fwd_rnd1( +64(%ebp), crypto_ft_tab)
++	fwd_rnd2( +80(%ebp), crypto_ft_tab)
++	fwd_rnd1( +96(%ebp), crypto_ft_tab)
++	fwd_rnd2(+112(%ebp), crypto_ft_tab)
++	fwd_rnd1(+128(%ebp), crypto_ft_tab)
++	fwd_rnd2(+144(%ebp), crypto_fl_tab)	// last round uses a different table
+ 
+ // move final values to the output array.  CAUTION: the 
+ // order of these assigns rely on the register mappings
+@@ -297,8 +297,8 @@ aes_enc_blk:
+ 
+ .global  aes_dec_blk
+ 
+-.extern  it_tab
+-.extern  il_tab
++.extern  crypto_it_tab
++.extern  crypto_il_tab
+ 
+ .align 4
+ 
+@@ -312,14 +312,11 @@ aes_dec_blk:
+ 1:	push    %ebx
+ 	mov     in_blk+4(%esp),%r2
+ 	push    %esi
+-	mov     nrnd(%ebp),%r3   // number of rounds
++	mov     klen(%ebp),%r3   // key size
+ 	push    %edi
+ #if dkey != 0
+ 	lea     dkey(%ebp),%ebp  // key pointer
+ #endif
+-	mov     %r3,%r0
+-	shl     $4,%r0
+-	add     %r0,%ebp
+ 	
+ // input four columns and xor in first round key
+ 
+@@ -333,27 +330,27 @@ aes_dec_blk:
+ 	xor     12(%ebp),%r5
+ 
+ 	sub     $8,%esp		// space for register saves on stack
+-	sub     $16,%ebp	// increment to next round key
+-	cmp     $12,%r3
++	add     $16,%ebp	// increment to next round key
++	cmp     $24,%r3
+ 	jb      4f		// 10 rounds for 128-bit key
+-	lea     -32(%ebp),%ebp
++	lea     32(%ebp),%ebp
+ 	je      3f		// 12 rounds for 192-bit key
+-	lea     -32(%ebp),%ebp
+-
+-2:	inv_rnd1( +64(%ebp), it_tab)	// 14 rounds for 256-bit key
+-	inv_rnd2( +48(%ebp), it_tab)
+-3:	inv_rnd1( +32(%ebp), it_tab)	// 12 rounds for 192-bit key
+-	inv_rnd2( +16(%ebp), it_tab)
+-4:	inv_rnd1(    (%ebp), it_tab)	// 10 rounds for 128-bit key
+-	inv_rnd2( -16(%ebp), it_tab)
+-	inv_rnd1( -32(%ebp), it_tab)
+-	inv_rnd2( -48(%ebp), it_tab)
+-	inv_rnd1( -64(%ebp), it_tab)
+-	inv_rnd2( -80(%ebp), it_tab)
+-	inv_rnd1( -96(%ebp), it_tab)
+-	inv_rnd2(-112(%ebp), it_tab)
+-	inv_rnd1(-128(%ebp), it_tab)
+-	inv_rnd2(-144(%ebp), il_tab)	// last round uses a different table
++	lea     32(%ebp),%ebp
++
++2:	inv_rnd1( -64(%ebp), crypto_it_tab)	// 14 rounds for 256-bit key
++	inv_rnd2( -48(%ebp), crypto_it_tab)
++3:	inv_rnd1( -32(%ebp), crypto_it_tab)	// 12 rounds for 192-bit key
++	inv_rnd2( -16(%ebp), crypto_it_tab)
++4:	inv_rnd1(    (%ebp), crypto_it_tab)	// 10 rounds for 128-bit key
++	inv_rnd2( +16(%ebp), crypto_it_tab)
++	inv_rnd1( +32(%ebp), crypto_it_tab)
++	inv_rnd2( +48(%ebp), crypto_it_tab)
++	inv_rnd1( +64(%ebp), crypto_it_tab)
++	inv_rnd2( +80(%ebp), crypto_it_tab)
++	inv_rnd1( +96(%ebp), crypto_it_tab)
++	inv_rnd2(+112(%ebp), crypto_it_tab)
++	inv_rnd1(+128(%ebp), crypto_it_tab)
++	inv_rnd2(+144(%ebp), crypto_il_tab)	// last round uses a different table
+ 
+ // move final values to the output array.  CAUTION: the 
+ // order of these assigns rely on the register mappings
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 26b40de..a120f52 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,10 +8,10 @@
+  * including this sentence is retained in full.
+  */
+ 
+-.extern aes_ft_tab
+-.extern aes_it_tab
+-.extern aes_fl_tab
+-.extern aes_il_tab
++.extern crypto_ft_tab
++.extern crypto_it_tab
++.extern crypto_fl_tab
++.extern crypto_il_tab
+ 
+ .text
+ 
+@@ -56,13 +56,13 @@
+ 	.align	8;			\
+ FUNC:	movq	r1,r2;			\
+ 	movq	r3,r4;			\
+-	leaq	BASE+KEY+52(r8),r9;	\
++	leaq	BASE+KEY+48+4(r8),r9;	\
+ 	movq	r10,r11;		\
+ 	movl	(r7),r5 ## E;		\
+ 	movl	4(r7),r1 ## E;		\
+ 	movl	8(r7),r6 ## E;		\
+ 	movl	12(r7),r7 ## E;		\
+-	movl	BASE(r8),r10 ## E;	\
++	movl	BASE+0(r8),r10 ## E;	\
+ 	xorl	-48(r9),r5 ## E;	\
+ 	xorl	-44(r9),r1 ## E;	\
+ 	xorl	-40(r9),r6 ## E;	\
+@@ -154,37 +154,37 @@ FUNC:	movq	r1,r2;			\
+ /* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
+ 
+ 	entry(aes_enc_blk,0,enc128,enc192)
+-	encrypt_round(aes_ft_tab,-96)
+-	encrypt_round(aes_ft_tab,-80)
+-enc192:	encrypt_round(aes_ft_tab,-64)
+-	encrypt_round(aes_ft_tab,-48)
+-enc128:	encrypt_round(aes_ft_tab,-32)
+-	encrypt_round(aes_ft_tab,-16)
+-	encrypt_round(aes_ft_tab,  0)
+-	encrypt_round(aes_ft_tab, 16)
+-	encrypt_round(aes_ft_tab, 32)
+-	encrypt_round(aes_ft_tab, 48)
+-	encrypt_round(aes_ft_tab, 64)
+-	encrypt_round(aes_ft_tab, 80)
+-	encrypt_round(aes_ft_tab, 96)
+-	encrypt_final(aes_fl_tab,112)
++	encrypt_round(crypto_ft_tab,-96)
++	encrypt_round(crypto_ft_tab,-80)
++enc192:	encrypt_round(crypto_ft_tab,-64)
++	encrypt_round(crypto_ft_tab,-48)
++enc128:	encrypt_round(crypto_ft_tab,-32)
++	encrypt_round(crypto_ft_tab,-16)
++	encrypt_round(crypto_ft_tab,  0)
++	encrypt_round(crypto_ft_tab, 16)
++	encrypt_round(crypto_ft_tab, 32)
++	encrypt_round(crypto_ft_tab, 48)
++	encrypt_round(crypto_ft_tab, 64)
++	encrypt_round(crypto_ft_tab, 80)
++	encrypt_round(crypto_ft_tab, 96)
++	encrypt_final(crypto_fl_tab,112)
+ 	return
+ 
+ /* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
+ 
+ 	entry(aes_dec_blk,240,dec128,dec192)
+-	decrypt_round(aes_it_tab,-96)
+-	decrypt_round(aes_it_tab,-80)
+-dec192:	decrypt_round(aes_it_tab,-64)
+-	decrypt_round(aes_it_tab,-48)
+-dec128:	decrypt_round(aes_it_tab,-32)
+-	decrypt_round(aes_it_tab,-16)
+-	decrypt_round(aes_it_tab,  0)
+-	decrypt_round(aes_it_tab, 16)
+-	decrypt_round(aes_it_tab, 32)
+-	decrypt_round(aes_it_tab, 48)
+-	decrypt_round(aes_it_tab, 64)
+-	decrypt_round(aes_it_tab, 80)
+-	decrypt_round(aes_it_tab, 96)
+-	decrypt_final(aes_il_tab,112)
++	decrypt_round(crypto_it_tab,-96)
++	decrypt_round(crypto_it_tab,-80)
++dec192:	decrypt_round(crypto_it_tab,-64)
++	decrypt_round(crypto_it_tab,-48)
++dec128:	decrypt_round(crypto_it_tab,-32)
++	decrypt_round(crypto_it_tab,-16)
++	decrypt_round(crypto_it_tab,  0)
++	decrypt_round(crypto_it_tab, 16)
++	decrypt_round(crypto_it_tab, 32)
++	decrypt_round(crypto_it_tab, 48)
++	decrypt_round(crypto_it_tab, 64)
++	decrypt_round(crypto_it_tab, 80)
++	decrypt_round(crypto_it_tab, 96)
++	decrypt_final(crypto_il_tab,112)
+ 	return
+diff --git a/arch/x86/crypto/aes_32.c b/arch/x86/crypto/aes_32.c
 deleted file mode 100644
-index f32df38..0000000
---- a/arch/sh64/kernel/traps.c
+index 49aad93..0000000
+--- a/arch/x86/crypto/aes_32.c
 +++ /dev/null
-@@ -1,982 +0,0 @@
--/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
+@@ -1,515 +0,0 @@
+-/* 
+- * 
+- * Glue Code for optimized 586 assembler version of AES
 - *
-- * arch/sh64/kernel/traps.c
+- * Copyright (c) 2002, Dr Brian Gladman <>, Worcester, UK.
+- * All rights reserved.
 - *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003, 2004  Paul Mundt
-- * Copyright (C) 2003, 2004  Richard Curnow
+- * LICENSE TERMS
+- *
+- * The free distribution and use of this software in both source and binary
+- * form is allowed (with or without changes) provided that:
+- *
+- *   1. distributions of this source code include the above copyright
+- *      notice, this list of conditions and the following disclaimer;
+- *
+- *   2. distributions in binary form include the above copyright
+- *      notice, this list of conditions and the following disclaimer
+- *      in the documentation and/or other associated materials;
+- *
+- *   3. the copyright holder's name is not used to endorse products
+- *      built using this software without specific written permission.
+- *
+- * ALTERNATIVELY, provided that this notice is retained in full, this product
+- * may be distributed under the terms of the GNU General Public License (GPL),
+- * in which case the provisions of the GPL apply INSTEAD OF those given above.
+- *
+- * DISCLAIMER
+- *
+- * This software is provided 'as is' with no explicit or implied warranties
+- * in respect of its properties, including, but not limited to, correctness
+- * and/or fitness for purpose.
+- *
+- * Copyright (c) 2003, Adam J. Richter <adam at yggdrasil.com> (conversion to
+- * 2.5 API).
+- * Copyright (c) 2003, 2004 Fruhwirth Clemens <clemens at endorphin.org>
+- * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris at redhat.com>
 - *
 - */
 -
--/*
-- * 'Traps.c' handles hardware traps and faults after we have saved some
-- * state in 'entry.S'.
-- */
--#include <linux/sched.h>
+-#include <asm/byteorder.h>
 -#include <linux/kernel.h>
--#include <linux/string.h>
--#include <linux/errno.h>
--#include <linux/ptrace.h>
--#include <linux/timer.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/init.h>
--#include <linux/delay.h>
--#include <linux/spinlock.h>
--#include <linux/kallsyms.h>
--#include <linux/interrupt.h>
--#include <linux/sysctl.h>
 -#include <linux/module.h>
--#include <asm/system.h>
--#include <asm/uaccess.h>
--#include <asm/io.h>
--#include <asm/atomic.h>
--#include <asm/processor.h>
--#include <asm/pgtable.h>
+-#include <linux/init.h>
+-#include <linux/types.h>
+-#include <linux/crypto.h>
+-#include <linux/linkage.h>
 -
--#undef DEBUG_EXCEPTION
--#ifdef DEBUG_EXCEPTION
--/* implemented in ../lib/dbg.c */
--extern void show_excp_regs(char *fname, int trapnr, int signr,
--			   struct pt_regs *regs);
--#else
--#define show_excp_regs(a, b, c, d)
--#endif
+-asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 -
--static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
--		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
+-#define AES_MIN_KEY_SIZE	16
+-#define AES_MAX_KEY_SIZE	32
+-#define AES_BLOCK_SIZE		16
+-#define AES_KS_LENGTH		4 * AES_BLOCK_SIZE
+-#define RC_LENGTH		29
 -
--#define DO_ERROR(trapnr, signr, str, name, tsk) \
--asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
--{ \
--	do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
--}
+-struct aes_ctx {
+-	u32 ekey[AES_KS_LENGTH];
+-	u32 rounds;
+-	u32 dkey[AES_KS_LENGTH];
+-};
 -
--spinlock_t die_lock;
+-#define WPOLY 0x011b
+-#define bytes2word(b0, b1, b2, b3)  \
+-	(((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
 -
--void die(const char * str, struct pt_regs * regs, long err)
--{
--	console_verbose();
--	spin_lock_irq(&die_lock);
--	printk("%s: %lx\n", str, (err & 0xffffff));
--	show_regs(regs);
--	spin_unlock_irq(&die_lock);
--	do_exit(SIGSEGV);
--}
+-/* define the finite field multiplies required for Rijndael */
+-#define f2(x) ((x) ? pow[log[x] + 0x19] : 0)
+-#define f3(x) ((x) ? pow[log[x] + 0x01] : 0)
+-#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0)
+-#define fb(x) ((x) ? pow[log[x] + 0x68] : 0)
+-#define fd(x) ((x) ? pow[log[x] + 0xee] : 0)
+-#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0)
+-#define fi(x) ((x) ?   pow[255 - log[x]]: 0)
 -
--static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+-static inline u32 upr(u32 x, int n)
 -{
--	if (!user_mode(regs))
--		die(str, regs, err);
+-	return (x << 8 * n) | (x >> (32 - 8 * n));
 -}
 -
--static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+-static inline u8 bval(u32 x, int n)
 -{
--	if (!user_mode(regs)) {
--		const struct exception_table_entry *fixup;
--		fixup = search_exception_tables(regs->pc);
--		if (fixup) {
--			regs->pc = fixup->fixup;
--			return;
--		}
--		die(str, regs, err);
--	}
+-	return x >> 8 * n;
 -}
 -
--DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
--DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
+-/* The forward and inverse affine transformations used in the S-box */
+-#define fwd_affine(x) \
+-	(w = (u32)x, w ^= (w<<1)^(w<<2)^(w<<3)^(w<<4), 0x63^(u8)(w^(w>>8)))
 -
+-#define inv_affine(x) \
+-	(w = (u32)x, w = (w<<1)^(w<<3)^(w<<6), 0x05^(u8)(w^(w>>8)))
 -
--/* Implement misaligned load/store handling for kernel (and optionally for user
--   mode too).  Limitation : only SHmedia mode code is handled - there is no
--   handling at all for misaligned accesses occurring in SHcompact code yet. */
+-static u32 rcon_tab[RC_LENGTH];
 -
--static int misaligned_fixup(struct pt_regs *regs);
+-u32 ft_tab[4][256];
+-u32 fl_tab[4][256];
+-static u32 im_tab[4][256];
+-u32 il_tab[4][256];
+-u32 it_tab[4][256];
 -
--asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
+-static void gen_tabs(void)
 -{
--	if (misaligned_fixup(regs) < 0) {
--		do_unhandled_exception(7, SIGSEGV, "address error(load)",
--				"do_address_error_load",
--				error_code, regs, current);
--	}
--	return;
--}
+-	u32 i, w;
+-	u8 pow[512], log[256];
 -
--asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
--{
--	if (misaligned_fixup(regs) < 0) {
--		do_unhandled_exception(8, SIGSEGV, "address error(store)",
--				"do_address_error_store",
--				error_code, regs, current);
+-	/*
+-	 * log and power tables for GF(2^8) finite field with
+-	 * WPOLY as modular polynomial - the simplest primitive
+-	 * root is 0x03, used here to generate the tables.
+-	 */
+-	i = 0; w = 1; 
+-	
+-	do {
+-		pow[i] = (u8)w;
+-		pow[i + 255] = (u8)w;
+-		log[w] = (u8)i++;
+-		w ^=  (w << 1) ^ (w & 0x80 ? WPOLY : 0);
+-	} while (w != 1);
+-	
+-	for(i = 0, w = 1; i < RC_LENGTH; ++i) {
+-		rcon_tab[i] = bytes2word(w, 0, 0, 0);
+-		w = f2(w);
 -	}
--	return;
--}
--
--#if defined(CONFIG_SH64_ID2815_WORKAROUND)
--
--#define OPCODE_INVALID      0
--#define OPCODE_USER_VALID   1
--#define OPCODE_PRIV_VALID   2
--
--/* getcon/putcon - requires checking which control register is referenced. */
--#define OPCODE_CTRL_REG     3
 -
--/* Table of valid opcodes for SHmedia mode.
--   Form a 10-bit value by concatenating the major/minor opcodes i.e.
--   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
--   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
--   LSBs==4'b0000 etc). */
--static unsigned long shmedia_opcode_table[64] = {
--	0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
--	0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
--	0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
--	0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
--	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
--	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
--	0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
--	0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
--};
+-	for(i = 0; i < 256; ++i) {
+-		u8 b;
+-		
+-		b = fwd_affine(fi((u8)i));
+-		w = bytes2word(f2(b), b, b, f3(b));
 -
--void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
--{
--	/* Workaround SH5-101 cut2 silicon defect #2815 :
--	   in some situations, inter-mode branches from SHcompact -> SHmedia
--	   which should take ITLBMISS or EXECPROT exceptions at the target
--	   falsely take RESINST at the target instead. */
+-		/* tables for a normal encryption round */
+-		ft_tab[0][i] = w;
+-		ft_tab[1][i] = upr(w, 1);
+-		ft_tab[2][i] = upr(w, 2);
+-		ft_tab[3][i] = upr(w, 3);
+-		w = bytes2word(b, 0, 0, 0);
+-		
+-		/*
+-		 * tables for last encryption round
+-		 * (may also be used in the key schedule)
+-		 */
+-		fl_tab[0][i] = w;
+-		fl_tab[1][i] = upr(w, 1);
+-		fl_tab[2][i] = upr(w, 2);
+-		fl_tab[3][i] = upr(w, 3);
+-		
+-		b = fi(inv_affine((u8)i));
+-		w = bytes2word(fe(b), f9(b), fd(b), fb(b));
 -
--	unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
--	unsigned long pc, aligned_pc;
--	int get_user_error;
--	int trapnr = 12;
--	int signr = SIGILL;
--	char *exception_name = "reserved_instruction";
+-		/* tables for the inverse mix column operation  */
+-		im_tab[0][b] = w;
+-		im_tab[1][b] = upr(w, 1);
+-		im_tab[2][b] = upr(w, 2);
+-		im_tab[3][b] = upr(w, 3);
 -
--	pc = regs->pc;
--	if ((pc & 3) == 1) {
--		/* SHmedia : check for defect.  This requires executable vmas
--		   to be readable too. */
--		aligned_pc = pc & ~3;
--		if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
--			get_user_error = -EFAULT;
--		} else {
--			get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
--		}
--		if (get_user_error >= 0) {
--			unsigned long index, shift;
--			unsigned long major, minor, combined;
--			unsigned long reserved_field;
--			reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
--			major = (opcode >> 26) & 0x3f;
--			minor = (opcode >> 16) & 0xf;
--			combined = (major << 4) | minor;
--			index = major;
--			shift = minor << 1;
--			if (reserved_field == 0) {
--				int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
--				switch (opcode_state) {
--					case OPCODE_INVALID:
--						/* Trap. */
--						break;
--					case OPCODE_USER_VALID:
--						/* Restart the instruction : the branch to the instruction will now be from an RTE
--						   not from SHcompact so the silicon defect won't be triggered. */
--						return;
--					case OPCODE_PRIV_VALID:
--						if (!user_mode(regs)) {
--							/* Should only ever get here if a module has
--							   SHcompact code inside it.  If so, the same fix up is needed. */
--							return; /* same reason */
--						}
--						/* Otherwise, user mode trying to execute a privileged instruction -
--						   fall through to trap. */
--						break;
--					case OPCODE_CTRL_REG:
--						/* If in privileged mode, return as above. */
--						if (!user_mode(regs)) return;
--						/* In user mode ... */
--						if (combined == 0x9f) { /* GETCON */
--							unsigned long regno = (opcode >> 20) & 0x3f;
--							if (regno >= 62) {
--								return;
--							}
--							/* Otherwise, reserved or privileged control register, => trap */
--						} else if (combined == 0x1bf) { /* PUTCON */
--							unsigned long regno = (opcode >> 4) & 0x3f;
--							if (regno >= 62) {
--								return;
--							}
--							/* Otherwise, reserved or privileged control register, => trap */
--						} else {
--							/* Trap */
--						}
--						break;
--					default:
--						/* Fall through to trap. */
--						break;
--				}
--			}
--			/* fall through to normal resinst processing */
--		} else {
--			/* Error trying to read opcode.  This typically means a
--			   real fault, not a RESINST any more.  So change the
--			   codes. */
--			trapnr = 87;
--			exception_name = "address error (exec)";
--			signr = SIGSEGV;
--		}
--	}
+-		/* tables for a normal decryption round */
+-		it_tab[0][i] = w;
+-		it_tab[1][i] = upr(w,1);
+-		it_tab[2][i] = upr(w,2);
+-		it_tab[3][i] = upr(w,3);
 -
--	do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
+-		w = bytes2word(b, 0, 0, 0);
+-		
+-		/* tables for last decryption round */
+-		il_tab[0][i] = w;
+-		il_tab[1][i] = upr(w,1);
+-		il_tab[2][i] = upr(w,2);
+-		il_tab[3][i] = upr(w,3);
+-    }
 -}
 -
--#else /* CONFIG_SH64_ID2815_WORKAROUND */
--
--/* If the workaround isn't needed, this is just a straightforward reserved
--   instruction */
--DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
--
--#endif /* CONFIG_SH64_ID2815_WORKAROUND */
+-#define four_tables(x,tab,vf,rf,c)		\
+-(	tab[0][bval(vf(x,0,c),rf(0,c))]	^	\
+-	tab[1][bval(vf(x,1,c),rf(1,c))] ^	\
+-	tab[2][bval(vf(x,2,c),rf(2,c))] ^	\
+-	tab[3][bval(vf(x,3,c),rf(3,c))]		\
+-)
 -
--/* Called with interrupts disabled */
--asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
--{
--	PLS();
--	show_excp_regs(__FUNCTION__, -1, -1, regs);
--	die_if_kernel("exception", regs, ex);
--}
+-#define vf1(x,r,c)  (x)
+-#define rf1(r,c)    (r)
+-#define rf2(r,c)    ((r-c)&3)
 -
--int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
--{
--	/* Syscall debug */
--        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
+-#define inv_mcol(x) four_tables(x,im_tab,vf1,rf1,0)
+-#define ls_box(x,c) four_tables(x,fl_tab,vf1,rf2,c)
 -
--	die_if_kernel("unknown trapa", regs, scId);
+-#define ff(x) inv_mcol(x)
 -
--	return -ENOSYS;
+-#define ke4(k,i)							\
+-{									\
+-	k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i];		\
+-	k[4*(i)+5] = ss[1] ^= ss[0];					\
+-	k[4*(i)+6] = ss[2] ^= ss[1];					\
+-	k[4*(i)+7] = ss[3] ^= ss[2];					\
 -}
 -
--void show_stack(struct task_struct *tsk, unsigned long *sp)
--{
--#ifdef CONFIG_KALLSYMS
--	extern void sh64_unwind(struct pt_regs *regs);
--	struct pt_regs *regs;
--
--	regs = tsk ? tsk->thread.kregs : NULL;
--
--	sh64_unwind(regs);
--#else
--	printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
--#endif
+-#define kel4(k,i)							\
+-{									\
+-	k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i];		\
+-	k[4*(i)+5] = ss[1] ^= ss[0];					\
+-	k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2];	\
 -}
 -
--void show_task(unsigned long *sp)
--{
--	show_stack(NULL, sp);
+-#define ke6(k,i)							\
+-{									\
+-	k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i];		\
+-	k[6*(i)+ 7] = ss[1] ^= ss[0];					\
+-	k[6*(i)+ 8] = ss[2] ^= ss[1];					\
+-	k[6*(i)+ 9] = ss[3] ^= ss[2];					\
+-	k[6*(i)+10] = ss[4] ^= ss[3];					\
+-	k[6*(i)+11] = ss[5] ^= ss[4];					\
 -}
 -
--void dump_stack(void)
--{
--	show_task(NULL);
+-#define kel6(k,i)							\
+-{									\
+-	k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i];		\
+-	k[6*(i)+ 7] = ss[1] ^= ss[0];					\
+-	k[6*(i)+ 8] = ss[2] ^= ss[1];					\
+-	k[6*(i)+ 9] = ss[3] ^= ss[2];					\
 -}
--/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
--EXPORT_SYMBOL(dump_stack);
--
--static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
--		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
--{
--	show_excp_regs(fn_name, trapnr, signr, regs);
--	tsk->thread.error_code = error_code;
--	tsk->thread.trap_no = trapnr;
 -
--	if (user_mode(regs))
--		force_sig(signr, tsk);
--
--	die_if_no_fixup(str, regs, error_code);
+-#define ke8(k,i)							\
+-{									\
+-	k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i];		\
+-	k[8*(i)+ 9] = ss[1] ^= ss[0];					\
+-	k[8*(i)+10] = ss[2] ^= ss[1];					\
+-	k[8*(i)+11] = ss[3] ^= ss[2];					\
+-	k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0);				\
+-	k[8*(i)+13] = ss[5] ^= ss[4];					\
+-	k[8*(i)+14] = ss[6] ^= ss[5];					\
+-	k[8*(i)+15] = ss[7] ^= ss[6];					\
 -}
 -
--static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
--{
--	int get_user_error;
--	unsigned long aligned_pc;
--	unsigned long opcode;
--
--	if ((pc & 3) == 1) {
--		/* SHmedia */
--		aligned_pc = pc & ~3;
--		if (from_user_mode) {
--			if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
--				get_user_error = -EFAULT;
--			} else {
--				get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
--				*result_opcode = opcode;
--			}
--			return get_user_error;
--		} else {
--			/* If the fault was in the kernel, we can either read
--			 * this directly, or if not, we fault.
--			*/
--			*result_opcode = *(unsigned long *) aligned_pc;
--			return 0;
--		}
--	} else if ((pc & 1) == 0) {
--		/* SHcompact */
--		/* TODO : provide handling for this.  We don't really support
--		   user-mode SHcompact yet, and for a kernel fault, this would
--		   have to come from a module built for SHcompact.  */
--		return -EFAULT;
--	} else {
--		/* misaligned */
--		return -EFAULT;
--	}
+-#define kel8(k,i)							\
+-{									\
+-	k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i];		\
+-	k[8*(i)+ 9] = ss[1] ^= ss[0];					\
+-	k[8*(i)+10] = ss[2] ^= ss[1];					\
+-	k[8*(i)+11] = ss[3] ^= ss[2];					\
 -}
 -
--static int address_is_sign_extended(__u64 a)
--{
--	__u64 b;
--#if (NEFF == 32)
--	b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
--	return (b == a) ? 1 : 0;
--#else
--#error "Sign extend check only works for NEFF==32"
--#endif
+-#define kdf4(k,i)							\
+-{									\
+-	ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3];				\
+-	ss[1] = ss[1] ^ ss[3];						\
+-	ss[2] = ss[2] ^ ss[3];						\
+-	ss[3] = ss[3];							\
+-	ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i];			\
+-	ss[i % 4] ^= ss[4];						\
+-	ss[4] ^= k[4*(i)];						\
+-	k[4*(i)+4] = ff(ss[4]);						\
+-	ss[4] ^= k[4*(i)+1];						\
+-	k[4*(i)+5] = ff(ss[4]);						\
+-	ss[4] ^= k[4*(i)+2];						\
+-	k[4*(i)+6] = ff(ss[4]);						\
+-	ss[4] ^= k[4*(i)+3];						\
+-	k[4*(i)+7] = ff(ss[4]);						\
 -}
 -
--static int generate_and_check_address(struct pt_regs *regs,
--				      __u32 opcode,
--				      int displacement_not_indexed,
--				      int width_shift,
--				      __u64 *address)
--{
--	/* return -1 for fault, 0 for OK */
--
--	__u64 base_address, addr;
--	int basereg;
--
--	basereg = (opcode >> 20) & 0x3f;
--	base_address = regs->regs[basereg];
--	if (displacement_not_indexed) {
--		__s64 displacement;
--		displacement = (opcode >> 10) & 0x3ff;
--		displacement = ((displacement << 54) >> 54); /* sign extend */
--		addr = (__u64)((__s64)base_address + (displacement << width_shift));
--	} else {
--		__u64 offset;
--		int offsetreg;
--		offsetreg = (opcode >> 10) & 0x3f;
--		offset = regs->regs[offsetreg];
--		addr = base_address + offset;
--	}
--
--	/* Check sign extended */
--	if (!address_is_sign_extended(addr)) {
--		return -1;
--	}
--
--#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--	/* Check accessible.  For misaligned access in the kernel, assume the
--	   address is always accessible (and if not, just fault when the
--	   load/store gets done.) */
--	if (user_mode(regs)) {
--		if (addr >= TASK_SIZE) {
--			return -1;
--		}
--		/* Do access_ok check later - it depends on whether it's a load or a store. */
--	}
--#endif
--
--	*address = addr;
--	return 0;
+-#define kd4(k,i)							\
+-{									\
+-	ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i];			\
+-	ss[i % 4] ^= ss[4];						\
+-	ss[4] = ff(ss[4]);						\
+-	k[4*(i)+4] = ss[4] ^= k[4*(i)];					\
+-	k[4*(i)+5] = ss[4] ^= k[4*(i)+1];				\
+-	k[4*(i)+6] = ss[4] ^= k[4*(i)+2];				\
+-	k[4*(i)+7] = ss[4] ^= k[4*(i)+3];				\
 -}
 -
--/* Default value as for sh */
--#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--static int user_mode_unaligned_fixup_count = 10;
--static int user_mode_unaligned_fixup_enable = 1;
--#endif
--
--static int kernel_mode_unaligned_fixup_count = 32;
--
--static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
--{
--	unsigned short x;
--	unsigned char *p, *q;
--	p = (unsigned char *) (int) address;
--	q = (unsigned char *) &x;
--	q[0] = p[0];
--	q[1] = p[1];
--
--	if (do_sign_extend) {
--		*result = (__u64)(__s64) *(short *) &x;
--	} else {
--		*result = (__u64) x;
--	}
+-#define kdl4(k,i)							\
+-{									\
+-	ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i];			\
+-	ss[i % 4] ^= ss[4];						\
+-	k[4*(i)+4] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3];			\
+-	k[4*(i)+5] = ss[1] ^ ss[3];					\
+-	k[4*(i)+6] = ss[0];						\
+-	k[4*(i)+7] = ss[1];						\
 -}
 -
--static void misaligned_kernel_word_store(__u64 address, __u64 value)
--{
--	unsigned short x;
--	unsigned char *p, *q;
--	p = (unsigned char *) (int) address;
--	q = (unsigned char *) &x;
--
--	x = (__u16) value;
--	p[0] = q[0];
--	p[1] = q[1];
+-#define kdf6(k,i)							\
+-{									\
+-	ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i];				\
+-	k[6*(i)+ 6] = ff(ss[0]);					\
+-	ss[1] ^= ss[0];							\
+-	k[6*(i)+ 7] = ff(ss[1]);					\
+-	ss[2] ^= ss[1];							\
+-	k[6*(i)+ 8] = ff(ss[2]);					\
+-	ss[3] ^= ss[2];							\
+-	k[6*(i)+ 9] = ff(ss[3]);					\
+-	ss[4] ^= ss[3];							\
+-	k[6*(i)+10] = ff(ss[4]);					\
+-	ss[5] ^= ss[4];							\
+-	k[6*(i)+11] = ff(ss[5]);					\
 -}
 -
--static int misaligned_load(struct pt_regs *regs,
--			   __u32 opcode,
--			   int displacement_not_indexed,
--			   int width_shift,
--			   int do_sign_extend)
--{
--	/* Return -1 for a fault, 0 for OK */
--	int error;
--	int destreg;
--	__u64 address;
--
--	error = generate_and_check_address(regs, opcode,
--			displacement_not_indexed, width_shift, &address);
--	if (error < 0) {
--		return error;
--	}
--
--	destreg = (opcode >> 4) & 0x3f;
--#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--	if (user_mode(regs)) {
--		__u64 buffer;
--
--		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
--			return -1;
--		}
--
--		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
--			return -1; /* fault */
--		}
--		switch (width_shift) {
--		case 1:
--			if (do_sign_extend) {
--				regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
--			} else {
--				regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
--			}
--			break;
--		case 2:
--			regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
--			break;
--		case 3:
--			regs->regs[destreg] = buffer;
--			break;
--		default:
--			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
--				width_shift, (unsigned long) regs->pc);
--			break;
--		}
--	} else
--#endif
--	{
--		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
--		__u64 lo, hi;
+-#define kd6(k,i)							\
+-{									\
+-	ss[6] = ls_box(ss[5],3) ^ rcon_tab[i];				\
+-	ss[0] ^= ss[6]; ss[6] = ff(ss[6]);				\
+-	k[6*(i)+ 6] = ss[6] ^= k[6*(i)];				\
+-	ss[1] ^= ss[0];							\
+-	k[6*(i)+ 7] = ss[6] ^= k[6*(i)+ 1];				\
+-	ss[2] ^= ss[1];							\
+-	k[6*(i)+ 8] = ss[6] ^= k[6*(i)+ 2];				\
+-	ss[3] ^= ss[2];							\
+-	k[6*(i)+ 9] = ss[6] ^= k[6*(i)+ 3];				\
+-	ss[4] ^= ss[3];							\
+-	k[6*(i)+10] = ss[6] ^= k[6*(i)+ 4];				\
+-	ss[5] ^= ss[4];							\
+-	k[6*(i)+11] = ss[6] ^= k[6*(i)+ 5];				\
+-}
 -
--		switch (width_shift) {
--		case 1:
--			misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
--			break;
--		case 2:
--			asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
--			asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
--			regs->regs[destreg] = lo | hi;
--			break;
--		case 3:
--			asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
--			asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
--			regs->regs[destreg] = lo | hi;
--			break;
+-#define kdl6(k,i)							\
+-{									\
+-	ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i];				\
+-	k[6*(i)+ 6] = ss[0];						\
+-	ss[1] ^= ss[0];							\
+-	k[6*(i)+ 7] = ss[1];						\
+-	ss[2] ^= ss[1];							\
+-	k[6*(i)+ 8] = ss[2];						\
+-	ss[3] ^= ss[2];							\
+-	k[6*(i)+ 9] = ss[3];						\
+-}
 -
--		default:
--			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
--				width_shift, (unsigned long) regs->pc);
--			break;
--		}
--	}
+-#define kdf8(k,i)							\
+-{									\
+-	ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i];				\
+-	k[8*(i)+ 8] = ff(ss[0]);					\
+-	ss[1] ^= ss[0];							\
+-	k[8*(i)+ 9] = ff(ss[1]);					\
+-	ss[2] ^= ss[1];							\
+-	k[8*(i)+10] = ff(ss[2]);					\
+-	ss[3] ^= ss[2];							\
+-	k[8*(i)+11] = ff(ss[3]);					\
+-	ss[4] ^= ls_box(ss[3],0);					\
+-	k[8*(i)+12] = ff(ss[4]);					\
+-	ss[5] ^= ss[4];							\
+-	k[8*(i)+13] = ff(ss[5]);					\
+-	ss[6] ^= ss[5];							\
+-	k[8*(i)+14] = ff(ss[6]);					\
+-	ss[7] ^= ss[6];							\
+-	k[8*(i)+15] = ff(ss[7]);					\
+-}
 -
--	return 0;
+-#define kd8(k,i)							\
+-{									\
+-	u32 __g = ls_box(ss[7],3) ^ rcon_tab[i];			\
+-	ss[0] ^= __g;							\
+-	__g = ff(__g);							\
+-	k[8*(i)+ 8] = __g ^= k[8*(i)];					\
+-	ss[1] ^= ss[0];							\
+-	k[8*(i)+ 9] = __g ^= k[8*(i)+ 1];				\
+-	ss[2] ^= ss[1];							\
+-	k[8*(i)+10] = __g ^= k[8*(i)+ 2];				\
+-	ss[3] ^= ss[2];							\
+-	k[8*(i)+11] = __g ^= k[8*(i)+ 3];				\
+-	__g = ls_box(ss[3],0);						\
+-	ss[4] ^= __g;							\
+-	__g = ff(__g);							\
+-	k[8*(i)+12] = __g ^= k[8*(i)+ 4];				\
+-	ss[5] ^= ss[4];							\
+-	k[8*(i)+13] = __g ^= k[8*(i)+ 5];				\
+-	ss[6] ^= ss[5];							\
+-	k[8*(i)+14] = __g ^= k[8*(i)+ 6];				\
+-	ss[7] ^= ss[6];							\
+-	k[8*(i)+15] = __g ^= k[8*(i)+ 7];				\
+-}
 -
+-#define kdl8(k,i)							\
+-{									\
+-	ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i];				\
+-	k[8*(i)+ 8] = ss[0];						\
+-	ss[1] ^= ss[0];							\
+-	k[8*(i)+ 9] = ss[1];						\
+-	ss[2] ^= ss[1];							\
+-	k[8*(i)+10] = ss[2];						\
+-	ss[3] ^= ss[2];							\
+-	k[8*(i)+11] = ss[3];						\
 -}
 -
--static int misaligned_store(struct pt_regs *regs,
--			    __u32 opcode,
--			    int displacement_not_indexed,
--			    int width_shift)
+-static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+-		       unsigned int key_len)
 -{
--	/* Return -1 for a fault, 0 for OK */
--	int error;
--	int srcreg;
--	__u64 address;
--
--	error = generate_and_check_address(regs, opcode,
--			displacement_not_indexed, width_shift, &address);
--	if (error < 0) {
--		return error;
--	}
+-	int i;
+-	u32 ss[8];
+-	struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
+-	const __le32 *key = (const __le32 *)in_key;
+-	u32 *flags = &tfm->crt_flags;
 -
--	srcreg = (opcode >> 4) & 0x3f;
--#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--	if (user_mode(regs)) {
--		__u64 buffer;
+-	/* encryption schedule */
+-	
+-	ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]);
+-	ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]);
+-	ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]);
+-	ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]);
 -
--		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
--			return -1;
--		}
+-	switch(key_len) {
+-	case 16:
+-		for (i = 0; i < 9; i++)
+-			ke4(ctx->ekey, i);
+-		kel4(ctx->ekey, 9);
+-		ctx->rounds = 10;
+-		break;
+-		
+-	case 24:
+-		ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
+-		ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
+-		for (i = 0; i < 7; i++)
+-			ke6(ctx->ekey, i);
+-		kel6(ctx->ekey, 7); 
+-		ctx->rounds = 12;
+-		break;
 -
--		switch (width_shift) {
--		case 1:
--			*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
--			break;
--		case 2:
--			*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
--			break;
--		case 3:
--			buffer = regs->regs[srcreg];
--			break;
--		default:
--			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
--				width_shift, (unsigned long) regs->pc);
--			break;
--		}
+-	case 32:
+-		ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
+-		ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
+-		ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]);
+-		ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]);
+-		for (i = 0; i < 6; i++)
+-			ke8(ctx->ekey, i);
+-		kel8(ctx->ekey, 6);
+-		ctx->rounds = 14;
+-		break;
 -
--		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
--			return -1; /* fault */
--		}
--	} else
--#endif
--	{
--		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
--		__u64 val = regs->regs[srcreg];
+-	default:
+-		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+-		return -EINVAL;
+-	}
+-	
+-	/* decryption schedule */
+-	
+-	ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]);
+-	ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]);
+-	ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]);
+-	ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]);
 -
--		switch (width_shift) {
--		case 1:
--			misaligned_kernel_word_store(address, val);
--			break;
--		case 2:
--			asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
--			asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
--			break;
--		case 3:
--			asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
--			asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
--			break;
+-	switch (key_len) {
+-	case 16:
+-		kdf4(ctx->dkey, 0);
+-		for (i = 1; i < 9; i++)
+-			kd4(ctx->dkey, i);
+-		kdl4(ctx->dkey, 9);
+-		break;
+-		
+-	case 24:
+-		ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
+-		ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
+-		kdf6(ctx->dkey, 0);
+-		for (i = 1; i < 7; i++)
+-			kd6(ctx->dkey, i);
+-		kdl6(ctx->dkey, 7);
+-		break;
 -
--		default:
--			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
--				width_shift, (unsigned long) regs->pc);
--			break;
--		}
+-	case 32:
+-		ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
+-		ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
+-		ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6]));
+-		ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7]));
+-		kdf8(ctx->dkey, 0);
+-		for (i = 1; i < 6; i++)
+-			kd8(ctx->dkey, i);
+-		kdl8(ctx->dkey, 6);
+-		break;
 -	}
--
 -	return 0;
--
 -}
 -
--#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
--   error. */
--static int misaligned_fpu_load(struct pt_regs *regs,
--			   __u32 opcode,
--			   int displacement_not_indexed,
--			   int width_shift,
--			   int do_paired_load)
+-static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 -{
--	/* Return -1 for a fault, 0 for OK */
--	int error;
--	int destreg;
--	__u64 address;
--
--	error = generate_and_check_address(regs, opcode,
--			displacement_not_indexed, width_shift, &address);
--	if (error < 0) {
--		return error;
--	}
--
--	destreg = (opcode >> 4) & 0x3f;
--	if (user_mode(regs)) {
--		__u64 buffer;
--		__u32 buflo, bufhi;
--
--		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
--			return -1;
--		}
--
--		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
--			return -1; /* fault */
--		}
--		/* 'current' may be the current owner of the FPU state, so
--		   context switch the registers into memory so they can be
--		   indexed by register number. */
--		if (last_task_used_math == current) {
--			grab_fpu();
--			fpsave(&current->thread.fpu.hard);
--			release_fpu();
--			last_task_used_math = NULL;
--			regs->sr |= SR_FD;
--		}
--
--		buflo = *(__u32*) &buffer;
--		bufhi = *(1 + (__u32*) &buffer);
--
--		switch (width_shift) {
--		case 2:
--			current->thread.fpu.hard.fp_regs[destreg] = buflo;
--			break;
--		case 3:
--			if (do_paired_load) {
--				current->thread.fpu.hard.fp_regs[destreg] = buflo;
--				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
--			} else {
--#if defined(CONFIG_LITTLE_ENDIAN)
--				current->thread.fpu.hard.fp_regs[destreg] = bufhi;
--				current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
--#else
--				current->thread.fpu.hard.fp_regs[destreg] = buflo;
--				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
--#endif
--			}
--			break;
--		default:
--			printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
--				width_shift, (unsigned long) regs->pc);
--			break;
--		}
--		return 0;
--	} else {
--		die ("Misaligned FPU load inside kernel", regs, 0);
--		return -1;
--	}
--
--
+-	aes_enc_blk(tfm, dst, src);
 -}
 -
--static int misaligned_fpu_store(struct pt_regs *regs,
--			   __u32 opcode,
--			   int displacement_not_indexed,
--			   int width_shift,
--			   int do_paired_load)
+-static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 -{
--	/* Return -1 for a fault, 0 for OK */
--	int error;
--	int srcreg;
--	__u64 address;
--
--	error = generate_and_check_address(regs, opcode,
--			displacement_not_indexed, width_shift, &address);
--	if (error < 0) {
--		return error;
--	}
--
--	srcreg = (opcode >> 4) & 0x3f;
--	if (user_mode(regs)) {
--		__u64 buffer;
--		/* Initialise these to NaNs. */
--		__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
--
--		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
--			return -1;
--		}
--
--		/* 'current' may be the current owner of the FPU state, so
--		   context switch the registers into memory so they can be
--		   indexed by register number. */
--		if (last_task_used_math == current) {
--			grab_fpu();
--			fpsave(&current->thread.fpu.hard);
--			release_fpu();
--			last_task_used_math = NULL;
--			regs->sr |= SR_FD;
--		}
--
--		switch (width_shift) {
--		case 2:
--			buflo = current->thread.fpu.hard.fp_regs[srcreg];
--			break;
--		case 3:
--			if (do_paired_load) {
--				buflo = current->thread.fpu.hard.fp_regs[srcreg];
--				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
--			} else {
--#if defined(CONFIG_LITTLE_ENDIAN)
--				bufhi = current->thread.fpu.hard.fp_regs[srcreg];
--				buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
--#else
--				buflo = current->thread.fpu.hard.fp_regs[srcreg];
--				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
--#endif
--			}
--			break;
--		default:
--			printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
--				width_shift, (unsigned long) regs->pc);
--			break;
--		}
--
--		*(__u32*) &buffer = buflo;
--		*(1 + (__u32*) &buffer) = bufhi;
--		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
--			return -1; /* fault */
--		}
--		return 0;
--	} else {
--		die ("Misaligned FPU load inside kernel", regs, 0);
--		return -1;
--	}
+-	aes_dec_blk(tfm, dst, src);
 -}
--#endif
--
--static int misaligned_fixup(struct pt_regs *regs)
--{
--	unsigned long opcode;
--	int error;
--	int major, minor;
 -
--#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--	/* Never fixup user mode misaligned accesses without this option enabled. */
--	return -1;
--#else
--	if (!user_mode_unaligned_fixup_enable) return -1;
--#endif
--
--	error = read_opcode(regs->pc, &opcode, user_mode(regs));
--	if (error < 0) {
--		return error;
--	}
--	major = (opcode >> 26) & 0x3f;
--	minor = (opcode >> 16) & 0xf;
--
--#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--	if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
--		--user_mode_unaligned_fixup_count;
--		/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
--		printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
--		       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
--	} else
--#endif
--	if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
--		--kernel_mode_unaligned_fixup_count;
--		if (in_interrupt()) {
--			printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
--			       (__u32)regs->pc, opcode);
--		} else {
--			printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
--			       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+-static struct crypto_alg aes_alg = {
+-	.cra_name		=	"aes",
+-	.cra_driver_name	=	"aes-i586",
+-	.cra_priority		=	200,
+-	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+-	.cra_blocksize		=	AES_BLOCK_SIZE,
+-	.cra_ctxsize		=	sizeof(struct aes_ctx),
+-	.cra_module		=	THIS_MODULE,
+-	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
+-	.cra_u			=	{
+-		.cipher = {
+-			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
+-			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
+-			.cia_setkey	   	= 	aes_set_key,
+-			.cia_encrypt	 	=	aes_encrypt,
+-			.cia_decrypt	  	=	aes_decrypt
 -		}
 -	}
--
--
--	switch (major) {
--		case (0x84>>2): /* LD.W */
--			error = misaligned_load(regs, opcode, 1, 1, 1);
--			break;
--		case (0xb0>>2): /* LD.UW */
--			error = misaligned_load(regs, opcode, 1, 1, 0);
--			break;
--		case (0x88>>2): /* LD.L */
--			error = misaligned_load(regs, opcode, 1, 2, 1);
--			break;
--		case (0x8c>>2): /* LD.Q */
--			error = misaligned_load(regs, opcode, 1, 3, 0);
--			break;
--
--		case (0xa4>>2): /* ST.W */
--			error = misaligned_store(regs, opcode, 1, 1);
--			break;
--		case (0xa8>>2): /* ST.L */
--			error = misaligned_store(regs, opcode, 1, 2);
--			break;
--		case (0xac>>2): /* ST.Q */
--			error = misaligned_store(regs, opcode, 1, 3);
--			break;
--
--		case (0x40>>2): /* indexed loads */
--			switch (minor) {
--				case 0x1: /* LDX.W */
--					error = misaligned_load(regs, opcode, 0, 1, 1);
--					break;
--				case 0x5: /* LDX.UW */
--					error = misaligned_load(regs, opcode, 0, 1, 0);
--					break;
--				case 0x2: /* LDX.L */
--					error = misaligned_load(regs, opcode, 0, 2, 1);
--					break;
--				case 0x3: /* LDX.Q */
--					error = misaligned_load(regs, opcode, 0, 3, 0);
--					break;
--				default:
--					error = -1;
--					break;
--			}
--			break;
--
--		case (0x60>>2): /* indexed stores */
--			switch (minor) {
--				case 0x1: /* STX.W */
--					error = misaligned_store(regs, opcode, 0, 1);
--					break;
--				case 0x2: /* STX.L */
--					error = misaligned_store(regs, opcode, 0, 2);
--					break;
--				case 0x3: /* STX.Q */
--					error = misaligned_store(regs, opcode, 0, 3);
--					break;
--				default:
--					error = -1;
--					break;
--			}
--			break;
--
--#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--		case (0x94>>2): /* FLD.S */
--			error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
--			break;
--		case (0x98>>2): /* FLD.P */
--			error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
--			break;
--		case (0x9c>>2): /* FLD.D */
--			error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
--			break;
--		case (0x1c>>2): /* floating indexed loads */
--			switch (minor) {
--			case 0x8: /* FLDX.S */
--				error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
--				break;
--			case 0xd: /* FLDX.P */
--				error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
--				break;
--			case 0x9: /* FLDX.D */
--				error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
--				break;
--			default:
--				error = -1;
--				break;
--			}
--			break;
--		case (0xb4>>2): /* FLD.S */
--			error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
--			break;
--		case (0xb8>>2): /* FLD.P */
--			error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
--			break;
--		case (0xbc>>2): /* FLD.D */
--			error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
--			break;
--		case (0x3c>>2): /* floating indexed stores */
--			switch (minor) {
--			case 0x8: /* FSTX.S */
--				error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
--				break;
--			case 0xd: /* FSTX.P */
--				error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
--				break;
--			case 0x9: /* FSTX.D */
--				error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
--				break;
--			default:
--				error = -1;
--				break;
--			}
--			break;
--#endif
--
--		default:
--			/* Fault */
--			error = -1;
--			break;
--	}
--
--	if (error < 0) {
--		return error;
--	} else {
--		regs->pc += 4; /* Skip the instruction that's just been emulated */
--		return 0;
--	}
--
--}
--
--static ctl_table unaligned_table[] = {
--	{
--		.ctl_name	= CTL_UNNUMBERED,
--		.procname	= "kernel_reports",
--		.data		= &kernel_mode_unaligned_fixup_count,
--		.maxlen		= sizeof(int),
--		.mode		= 0644,
--		.proc_handler	= &proc_dointvec
--	},
--#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
--	{
--		.ctl_name	= CTL_UNNUMBERED,
--		.procname	= "user_reports",
--		.data		= &user_mode_unaligned_fixup_count,
--		.maxlen		= sizeof(int),
--		.mode		= 0644,
--		.proc_handler	= &proc_dointvec
--	},
--	{
--		.ctl_name	= CTL_UNNUMBERED,
--		.procname	= "user_enable",
--		.data		= &user_mode_unaligned_fixup_enable,
--		.maxlen		= sizeof(int),
--		.mode		= 0644,
--		.proc_handler	= &proc_dointvec},
--#endif
--	{}
--};
--
--static ctl_table unaligned_root[] = {
--	{
--		.ctl_name	= CTL_UNNUMBERED,
--		.procname	= "unaligned_fixup",
--		.mode		= 0555,
--		unaligned_table
--	},
--	{}
 -};
 -
--static ctl_table sh64_root[] = {
--	{
--		.ctl_name	= CTL_UNNUMBERED,
--		.procname	= "sh64",
--		.mode		= 0555,
--		.child		= unaligned_root
--	},
--	{}
--};
--static struct ctl_table_header *sysctl_header;
--static int __init init_sysctl(void)
+-static int __init aes_init(void)
 -{
--	sysctl_header = register_sysctl_table(sh64_root);
--	return 0;
+-	gen_tabs();
+-	return crypto_register_alg(&aes_alg);
 -}
 -
--__initcall(init_sysctl);
--
--
--asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
+-static void __exit aes_fini(void)
 -{
--	u64 peek_real_address_q(u64 addr);
--	u64 poke_real_address_q(u64 addr, u64 val);
--	unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
--	unsigned long long exp_cause;
--	/* It's not worth ioremapping the debug module registers for the amount
--	   of access we make to them - just go direct to their physical
--	   addresses. */
--	exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
--	if (exp_cause & ~4) {
--		printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
--			(unsigned long)(exp_cause & 0xffffffff));
--	}
--	show_state();
--	/* Clear all DEBUGINT causes */
--	poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
+-	crypto_unregister_alg(&aes_alg);
 -}
-diff --git a/arch/sh64/kernel/unwind.c b/arch/sh64/kernel/unwind.c
+-
+-module_init(aes_init);
+-module_exit(aes_fini);
+-
+-MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, i586 asm optimized");
+-MODULE_LICENSE("Dual BSD/GPL");
+-MODULE_AUTHOR("Fruhwirth Clemens, James Morris, Brian Gladman, Adam Richter");
+-MODULE_ALIAS("aes");
+diff --git a/arch/x86/crypto/aes_64.c b/arch/x86/crypto/aes_64.c
 deleted file mode 100644
-index 1214c78..0000000
---- a/arch/sh64/kernel/unwind.c
+index 5cdb13e..0000000
+--- a/arch/x86/crypto/aes_64.c
 +++ /dev/null
-@@ -1,326 +0,0 @@
+@@ -1,336 +0,0 @@
 -/*
-- * arch/sh64/kernel/unwind.c
+- * Cryptographic API.
 - *
-- * Copyright (C) 2004  Paul Mundt
-- * Copyright (C) 2004  Richard Curnow
+- * AES Cipher Algorithm.
 - *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/kallsyms.h>
--#include <linux/kernel.h>
--#include <linux/types.h>
--#include <linux/errno.h>
--#include <asm/page.h>
--#include <asm/ptrace.h>
--#include <asm/processor.h>
--#include <asm/io.h>
--
--static u8 regcache[63];
--
--/*
-- * Finding the previous stack frame isn't horribly straightforward as it is
-- * on some other platforms. In the sh64 case, we don't have "linked" stack
-- * frames, so we need to do a bit of work to determine the previous frame,
-- * and in turn, the previous r14/r18 pair.
+- * Based on Brian Gladman's code.
 - *
-- * There are generally a few cases which determine where we can find out
-- * the r14/r18 values. In the general case, this can be determined by poking
-- * around the prologue of the symbol PC is in (note that we absolutely must
-- * have frame pointer support as well as the kernel symbol table mapped,
-- * otherwise we can't even get this far).
+- * Linux developers:
+- *  Alexander Kjeldaas <astor at fast.no>
+- *  Herbert Valerio Riedel <hvr at hvrlab.org>
+- *  Kyle McMartin <kyle at debian.org>
+- *  Adam J. Richter <adam at yggdrasil.com> (conversion to 2.5 API).
+- *  Andreas Steinmetz <ast at domdv.de> (adapted to x86_64 assembler)
 - *
-- * In other cases, such as the interrupt/exception path, we can poke around
-- * the sp/fp.
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
 - *
-- * Notably, this entire approach is somewhat error prone, and in the event
-- * that the previous frame cannot be determined, that's all we can do.
-- * Either way, this still leaves us with a more correct backtrace then what
-- * we would be able to come up with by walking the stack (which is garbage
-- * for anything beyond the first frame).
-- *						-- PFM.
+- * ---------------------------------------------------------------------------
+- * Copyright (c) 2002, Dr Brian Gladman <brg at gladman.me.uk>, Worcester, UK.
+- * All rights reserved.
+- *
+- * LICENSE TERMS
+- *
+- * The free distribution and use of this software in both source and binary
+- * form is allowed (with or without changes) provided that:
+- *
+- *   1. distributions of this source code include the above copyright
+- *      notice, this list of conditions and the following disclaimer;
+- *
+- *   2. distributions in binary form include the above copyright
+- *      notice, this list of conditions and the following disclaimer
+- *      in the documentation and/or other associated materials;
+- *
+- *   3. the copyright holder's name is not used to endorse products
+- *      built using this software without specific written permission.
+- *
+- * ALTERNATIVELY, provided that this notice is retained in full, this product
+- * may be distributed under the terms of the GNU General Public License (GPL),
+- * in which case the provisions of the GPL apply INSTEAD OF those given above.
+- *
+- * DISCLAIMER
+- *
+- * This software is provided 'as is' with no explicit or implied warranties
+- * in respect of its properties, including, but not limited to, correctness
+- * and/or fitness for purpose.
+- * ---------------------------------------------------------------------------
 - */
--static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
--		      unsigned long *pprev_fp, unsigned long *pprev_pc,
--		      struct pt_regs *regs)
--{
--	const char *sym;
--	char namebuf[128];
--	unsigned long offset;
--	unsigned long prologue = 0;
--	unsigned long fp_displacement = 0;
--	unsigned long fp_prev = 0;
--	unsigned long offset_r14 = 0, offset_r18 = 0;
--	int i, found_prologue_end = 0;
--
--	sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
--	if (!sym)
--		return -EINVAL;
 -
--	prologue = pc - offset;
--	if (!prologue)
--		return -EINVAL;
+-/* Some changes from the Gladman version:
+-    s/RIJNDAEL(e_key)/E_KEY/g
+-    s/RIJNDAEL(d_key)/D_KEY/g
+-*/
 -
--	/* Validate fp, to avoid risk of dereferencing a bad pointer later.
--	   Assume 128Mb since that's the amount of RAM on a Cayman.  Modify
--	   when there is an SH-5 board with more. */
--	if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
--	    (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
--	    ((fp & 7) != 0)) {
--		return -EINVAL;
--	}
+-#include <asm/byteorder.h>
+-#include <linux/bitops.h>
+-#include <linux/crypto.h>
+-#include <linux/errno.h>
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/types.h>
 -
--	/*
--	 * Depth to walk, depth is completely arbitrary.
--	 */
--	for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
--		unsigned long op;
--		u8 major, minor;
--		u8 src, dest, disp;
+-#define AES_MIN_KEY_SIZE	16
+-#define AES_MAX_KEY_SIZE	32
 -
--		op = *(unsigned long *)prologue;
+-#define AES_BLOCK_SIZE		16
 -
--		major = (op >> 26) & 0x3f;
--		src   = (op >> 20) & 0x3f;
--		minor = (op >> 16) & 0xf;
--		disp  = (op >> 10) & 0x3f;
--		dest  = (op >>  4) & 0x3f;
+-/*
+- * #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
+- */
+-static inline u8 byte(const u32 x, const unsigned n)
+-{
+-	return x >> (n << 3);
+-}
 -
--		/*
--		 * Stack frame creation happens in a number of ways.. in the
--		 * general case when the stack frame is less than 511 bytes,
--		 * it's generally created by an addi or addi.l:
--		 *
--		 *	addi/addi.l r15, -FRAME_SIZE, r15
--		 *
--		 * in the event that the frame size is bigger than this, it's
--		 * typically created using a movi/sub pair as follows:
--		 *
--		 *	movi	FRAME_SIZE, rX
--		 *	sub	r15, rX, r15
--		 */
+-struct aes_ctx
+-{
+-	u32 key_length;
+-	u32 buf[120];
+-};
 -
--		switch (major) {
--		case (0x00 >> 2):
--			switch (minor) {
--			case 0x8: /* add.l */
--			case 0x9: /* add */
--				/* Look for r15, r63, r14 */
--				if (src == 15 && disp == 63 && dest == 14)
--					found_prologue_end = 1;
+-#define E_KEY (&ctx->buf[0])
+-#define D_KEY (&ctx->buf[60])
 -
--				break;
--			case 0xa: /* sub.l */
--			case 0xb: /* sub */
--				if (src != 15 || dest != 15)
--					continue;
+-static u8 pow_tab[256] __initdata;
+-static u8 log_tab[256] __initdata;
+-static u8 sbx_tab[256] __initdata;
+-static u8 isb_tab[256] __initdata;
+-static u32 rco_tab[10];
+-u32 aes_ft_tab[4][256];
+-u32 aes_it_tab[4][256];
 -
--				fp_displacement -= regcache[disp];
--				fp_prev = fp - fp_displacement;
--				break;
--			}
--			break;
--		case (0xa8 >> 2): /* st.l */
--			if (src != 15)
--				continue;
+-u32 aes_fl_tab[4][256];
+-u32 aes_il_tab[4][256];
 -
--			switch (dest) {
--			case 14:
--				if (offset_r14 || fp_displacement == 0)
--					continue;
+-static inline u8 f_mult(u8 a, u8 b)
+-{
+-	u8 aa = log_tab[a], cc = aa + log_tab[b];
 -
--				offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
--				offset_r14 *= sizeof(unsigned long);
--				offset_r14 += fp_displacement;
--				break;
--			case 18:
--				if (offset_r18 || fp_displacement == 0)
--					continue;
+-	return pow_tab[cc + (cc < aa ? 1 : 0)];
+-}
 -
--				offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
--				offset_r18 *= sizeof(unsigned long);
--				offset_r18 += fp_displacement;
--				break;
--			}
+-#define ff_mult(a, b) (a && b ? f_mult(a, b) : 0)
 -
--			break;
--		case (0xcc >> 2): /* movi */
--			if (dest >= 63) {
--				printk(KERN_NOTICE "%s: Invalid dest reg %d "
--				       "specified in movi handler. Failed "
--				       "opcode was 0x%lx: ", __FUNCTION__,
--				       dest, op);
+-#define ls_box(x)				\
+-	(aes_fl_tab[0][byte(x, 0)] ^		\
+-	 aes_fl_tab[1][byte(x, 1)] ^		\
+-	 aes_fl_tab[2][byte(x, 2)] ^		\
+-	 aes_fl_tab[3][byte(x, 3)])
 -
--				continue;
--			}
+-static void __init gen_tabs(void)
+-{
+-	u32 i, t;
+-	u8 p, q;
 -
--			/* Sign extend */
--			regcache[dest] =
--				((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
--			break;
--		case (0xd0 >> 2): /* addi */
--		case (0xd4 >> 2): /* addi.l */
--			/* Look for r15, -FRAME_SIZE, r15 */
--			if (src != 15 || dest != 15)
--				continue;
+-	/* log and power tables for GF(2**8) finite field with
+-	   0x011b as modular polynomial - the simplest primitive
+-	   root is 0x03, used here to generate the tables */
 -
--			/* Sign extended frame size.. */
--			fp_displacement +=
--				(u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
--			fp_prev = fp - fp_displacement;
--			break;
--		}
+-	for (i = 0, p = 1; i < 256; ++i) {
+-		pow_tab[i] = (u8)p;
+-		log_tab[p] = (u8)i;
 -
--		if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
--			break;
+-		p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0);
 -	}
 -
--	if (offset_r14 == 0 || fp_prev == 0) {
--		if (!offset_r14)
--			pr_debug("Unable to find r14 offset\n");
--		if (!fp_prev)
--			pr_debug("Unable to find previous fp\n");
+-	log_tab[1] = 0;
 -
--		return -EINVAL;
+-	for (i = 0, p = 1; i < 10; ++i) {
+-		rco_tab[i] = p;
+-
+-		p = (p << 1) ^ (p & 0x80 ? 0x01b : 0);
 -	}
 -
--	/* For innermost leaf function, there might not be a offset_r18 */
--	if (!*pprev_pc && (offset_r18 == 0))
--		return -EINVAL;
+-	for (i = 0; i < 256; ++i) {
+-		p = (i ? pow_tab[255 - log_tab[i]] : 0);
+-		q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2));
+-		p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2));
+-		sbx_tab[i] = p;
+-		isb_tab[p] = (u8)i;
+-	}
 -
--	*pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
+-	for (i = 0; i < 256; ++i) {
+-		p = sbx_tab[i];
 -
--	if (offset_r18)
--		*pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
+-		t = p;
+-		aes_fl_tab[0][i] = t;
+-		aes_fl_tab[1][i] = rol32(t, 8);
+-		aes_fl_tab[2][i] = rol32(t, 16);
+-		aes_fl_tab[3][i] = rol32(t, 24);
 -
--	*pprev_pc &= ~1;
+-		t = ((u32)ff_mult(2, p)) |
+-		    ((u32)p << 8) |
+-		    ((u32)p << 16) | ((u32)ff_mult(3, p) << 24);
 -
--	return 0;
--}
+-		aes_ft_tab[0][i] = t;
+-		aes_ft_tab[1][i] = rol32(t, 8);
+-		aes_ft_tab[2][i] = rol32(t, 16);
+-		aes_ft_tab[3][i] = rol32(t, 24);
 -
--/* Don't put this on the stack since we'll want to call sh64_unwind
-- * when we're close to underflowing the stack anyway. */
--static struct pt_regs here_regs;
+-		p = isb_tab[i];
 -
--extern const char syscall_ret;
--extern const char ret_from_syscall;
--extern const char ret_from_exception;
--extern const char ret_from_irq;
+-		t = p;
+-		aes_il_tab[0][i] = t;
+-		aes_il_tab[1][i] = rol32(t, 8);
+-		aes_il_tab[2][i] = rol32(t, 16);
+-		aes_il_tab[3][i] = rol32(t, 24);
 -
--static void sh64_unwind_inner(struct pt_regs *regs);
+-		t = ((u32)ff_mult(14, p)) |
+-		    ((u32)ff_mult(9, p) << 8) |
+-		    ((u32)ff_mult(13, p) << 16) |
+-		    ((u32)ff_mult(11, p) << 24);
 -
--static void unwind_nested (unsigned long pc, unsigned long fp)
--{
--	if ((fp >= __MEMORY_START) &&
--	    ((fp & 7) == 0)) {
--		sh64_unwind_inner((struct pt_regs *) fp);
+-		aes_it_tab[0][i] = t;
+-		aes_it_tab[1][i] = rol32(t, 8);
+-		aes_it_tab[2][i] = rol32(t, 16);
+-		aes_it_tab[3][i] = rol32(t, 24);
 -	}
 -}
 -
--static void sh64_unwind_inner(struct pt_regs *regs)
--{
--	unsigned long pc, fp;
--	int ofs = 0;
--	int first_pass;
+-#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
 -
--	pc = regs->pc & ~1;
--	fp = regs->regs[14];
+-#define imix_col(y, x)			\
+-	u    = star_x(x);		\
+-	v    = star_x(u);		\
+-	w    = star_x(v);		\
+-	t    = w ^ (x);			\
+-	(y)  = u ^ v ^ w;		\
+-	(y) ^= ror32(u ^ t,  8) ^	\
+-	       ror32(v ^ t, 16) ^	\
+-	       ror32(t, 24)
 -
--	first_pass = 1;
--	for (;;) {
--		int cond;
--		unsigned long next_fp, next_pc;
+-/* initialise the key schedule from the user supplied key */
 -
--		if (pc == ((unsigned long) &syscall_ret & ~1)) {
--			printk("SYSCALL\n");
--			unwind_nested(pc,fp);
--			return;
--		}
+-#define loop4(i)					\
+-{							\
+-	t = ror32(t,  8); t = ls_box(t) ^ rco_tab[i];	\
+-	t ^= E_KEY[4 * i];     E_KEY[4 * i + 4] = t;	\
+-	t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t;	\
+-	t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t;	\
+-	t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t;	\
+-}
 -
--		if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
--			printk("SYSCALL (PREEMPTED)\n");
--			unwind_nested(pc,fp);
--			return;
--		}
+-#define loop6(i)					\
+-{							\
+-	t = ror32(t,  8); t = ls_box(t) ^ rco_tab[i];	\
+-	t ^= E_KEY[6 * i];     E_KEY[6 * i + 6] = t;	\
+-	t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t;	\
+-	t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t;	\
+-	t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t;	\
+-	t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t;	\
+-	t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t;	\
+-}
 -
--		/* In this case, the PC is discovered by lookup_prev_stack_frame but
--		   it has 4 taken off it to look like the 'caller' */
--		if (pc == ((unsigned long) &ret_from_exception & ~1)) {
--			printk("EXCEPTION\n");
--			unwind_nested(pc,fp);
--			return;
--		}
+-#define loop8(i)					\
+-{							\
+-	t = ror32(t,  8); ; t = ls_box(t) ^ rco_tab[i];	\
+-	t ^= E_KEY[8 * i];     E_KEY[8 * i + 8] = t;	\
+-	t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t;	\
+-	t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t;	\
+-	t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t;	\
+-	t  = E_KEY[8 * i + 4] ^ ls_box(t);		\
+-	E_KEY[8 * i + 12] = t;				\
+-	t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t;	\
+-	t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t;	\
+-	t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t;	\
+-}
 -
--		if (pc == ((unsigned long) &ret_from_irq & ~1)) {
--			printk("IRQ\n");
--			unwind_nested(pc,fp);
--			return;
--		}
+-static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+-		       unsigned int key_len)
+-{
+-	struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
+-	const __le32 *key = (const __le32 *)in_key;
+-	u32 *flags = &tfm->crt_flags;
+-	u32 i, j, t, u, v, w;
 -
--		cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
--			((pc & 3) == 0) && ((fp & 7) == 0));
+-	if (key_len % 8) {
+-		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+-		return -EINVAL;
+-	}
 -
--		pc -= ofs;
+-	ctx->key_length = key_len;
 -
--		printk("[<%08lx>] ", pc);
--		print_symbol("%s\n", pc);
+-	D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]);
+-	D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]);
+-	D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]);
+-	D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]);
 -
--		if (first_pass) {
--			/* If the innermost frame is a leaf function, it's
--			 * possible that r18 is never saved out to the stack.
--			 */
--			next_pc = regs->regs[18];
--		} else {
--			next_pc = 0;
--		}
+-	switch (key_len) {
+-	case 16:
+-		t = E_KEY[3];
+-		for (i = 0; i < 10; ++i)
+-			loop4(i);
+-		break;
 -
--		if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
--			ofs = sizeof(unsigned long);
--			pc = next_pc & ~1;
--			fp = next_fp;
--		} else {
--			printk("Unable to lookup previous stack frame\n");
--			break;
--		}
--		first_pass = 0;
+-	case 24:
+-		E_KEY[4] = le32_to_cpu(key[4]);
+-		t = E_KEY[5] = le32_to_cpu(key[5]);
+-		for (i = 0; i < 8; ++i)
+-			loop6 (i);
+-		break;
+-
+-	case 32:
+-		E_KEY[4] = le32_to_cpu(key[4]);
+-		E_KEY[5] = le32_to_cpu(key[5]);
+-		E_KEY[6] = le32_to_cpu(key[6]);
+-		t = E_KEY[7] = le32_to_cpu(key[7]);
+-		for (i = 0; i < 7; ++i)
+-			loop8(i);
+-		break;
 -	}
 -
--	printk("\n");
+-	D_KEY[0] = E_KEY[key_len + 24];
+-	D_KEY[1] = E_KEY[key_len + 25];
+-	D_KEY[2] = E_KEY[key_len + 26];
+-	D_KEY[3] = E_KEY[key_len + 27];
+-
+-	for (i = 4; i < key_len + 24; ++i) {
+-		j = key_len + 24 - (i & ~3) + (i & 3);
+-		imix_col(D_KEY[j], E_KEY[i]);
+-	}
 -
+-	return 0;
 -}
 -
--void sh64_unwind(struct pt_regs *regs)
--{
--	if (!regs) {
--		/*
--		 * Fetch current regs if we have no other saved state to back
--		 * trace from.
--		 */
--		regs = &here_regs;
+-asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+-asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
 -
--		__asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
--		__asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
--		__asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
+-static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+-{
+-	aes_enc_blk(tfm, dst, src);
+-}
 -
--		__asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
--		__asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
--		__asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
--		__asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
--		__asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
--		__asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
--		__asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
--		__asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
+-static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+-{
+-	aes_dec_blk(tfm, dst, src);
+-}
 -
--		__asm__ __volatile__ (
--			"pta 0f, tr0\n\t"
--			"blink tr0, %0\n\t"
--			"0: nop"
--			: "=r" (regs->pc)
--		);
+-static struct crypto_alg aes_alg = {
+-	.cra_name		=	"aes",
+-	.cra_driver_name	=	"aes-x86_64",
+-	.cra_priority		=	200,
+-	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+-	.cra_blocksize		=	AES_BLOCK_SIZE,
+-	.cra_ctxsize		=	sizeof(struct aes_ctx),
+-	.cra_module		=	THIS_MODULE,
+-	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
+-	.cra_u			=	{
+-		.cipher = {
+-			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
+-			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
+-			.cia_setkey	   	= 	aes_set_key,
+-			.cia_encrypt	 	=	aes_encrypt,
+-			.cia_decrypt	  	=	aes_decrypt
+-		}
 -	}
+-};
 -
--	printk("\nCall Trace:\n");
--	sh64_unwind_inner(regs);
+-static int __init aes_init(void)
+-{
+-	gen_tabs();
+-	return crypto_register_alg(&aes_alg);
 -}
 -
-diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S
+-static void __exit aes_fini(void)
+-{
+-	crypto_unregister_alg(&aes_alg);
+-}
+-
+-module_init(aes_init);
+-module_exit(aes_fini);
+-
+-MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS("aes");
+diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
+new file mode 100644
+index 0000000..71f4578
+--- /dev/null
++++ b/arch/x86/crypto/aes_glue.c
+@@ -0,0 +1,57 @@
++/*
++ * Glue Code for the asm optimized version of the AES Cipher Algorithm
++ *
++ */
++
++#include <crypto/aes.h>
++
++asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
++asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
++
++static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
++{
++	aes_enc_blk(tfm, dst, src);
++}
++
++static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
++{
++	aes_dec_blk(tfm, dst, src);
++}
++
++static struct crypto_alg aes_alg = {
++	.cra_name		= "aes",
++	.cra_driver_name	= "aes-asm",
++	.cra_priority		= 200,
++	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
++	.cra_blocksize		= AES_BLOCK_SIZE,
++	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
++	.cra_module		= THIS_MODULE,
++	.cra_list		= LIST_HEAD_INIT(aes_alg.cra_list),
++	.cra_u	= {
++		.cipher	= {
++			.cia_min_keysize	= AES_MIN_KEY_SIZE,
++			.cia_max_keysize	= AES_MAX_KEY_SIZE,
++			.cia_setkey		= crypto_aes_set_key,
++			.cia_encrypt		= aes_encrypt,
++			.cia_decrypt		= aes_decrypt
++		}
++	}
++};
++
++static int __init aes_init(void)
++{
++	return crypto_register_alg(&aes_alg);
++}
++
++static void __exit aes_fini(void)
++{
++	crypto_unregister_alg(&aes_alg);
++}
++
++module_init(aes_init);
++module_exit(aes_fini);
++
++MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("aes");
++MODULE_ALIAS("aes-asm");
+diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S
+new file mode 100644
+index 0000000..72eb306
+--- /dev/null
++++ b/arch/x86/crypto/salsa20-i586-asm_32.S
+@@ -0,0 +1,1114 @@
++# salsa20_pm.s version 20051229
++# D. J. Bernstein
++# Public domain.
++
++# enter ECRYPT_encrypt_bytes
++.text
++.p2align 5
++.globl ECRYPT_encrypt_bytes
++ECRYPT_encrypt_bytes:
++	mov	%esp,%eax
++	and	$31,%eax
++	add	$256,%eax
++	sub	%eax,%esp
++	# eax_stack = eax
++	movl	%eax,80(%esp)
++	# ebx_stack = ebx
++	movl	%ebx,84(%esp)
++	# esi_stack = esi
++	movl	%esi,88(%esp)
++	# edi_stack = edi
++	movl	%edi,92(%esp)
++	# ebp_stack = ebp
++	movl	%ebp,96(%esp)
++	# x = arg1
++	movl	4(%esp,%eax),%edx
++	# m = arg2
++	movl	8(%esp,%eax),%esi
++	# out = arg3
++	movl	12(%esp,%eax),%edi
++	# bytes = arg4
++	movl	16(%esp,%eax),%ebx
++	# bytes -= 0
++	sub	$0,%ebx
++	# goto done if unsigned<=
++	jbe	._done
++._start:
++	# in0 = *(uint32 *) (x + 0)
++	movl	0(%edx),%eax
++	# in1 = *(uint32 *) (x + 4)
++	movl	4(%edx),%ecx
++	# in2 = *(uint32 *) (x + 8)
++	movl	8(%edx),%ebp
++	# j0 = in0
++	movl	%eax,164(%esp)
++	# in3 = *(uint32 *) (x + 12)
++	movl	12(%edx),%eax
++	# j1 = in1
++	movl	%ecx,168(%esp)
++	# in4 = *(uint32 *) (x + 16)
++	movl	16(%edx),%ecx
++	# j2 = in2
++	movl	%ebp,172(%esp)
++	# in5 = *(uint32 *) (x + 20)
++	movl	20(%edx),%ebp
++	# j3 = in3
++	movl	%eax,176(%esp)
++	# in6 = *(uint32 *) (x + 24)
++	movl	24(%edx),%eax
++	# j4 = in4
++	movl	%ecx,180(%esp)
++	# in7 = *(uint32 *) (x + 28)
++	movl	28(%edx),%ecx
++	# j5 = in5
++	movl	%ebp,184(%esp)
++	# in8 = *(uint32 *) (x + 32)
++	movl	32(%edx),%ebp
++	# j6 = in6
++	movl	%eax,188(%esp)
++	# in9 = *(uint32 *) (x + 36)
++	movl	36(%edx),%eax
++	# j7 = in7
++	movl	%ecx,192(%esp)
++	# in10 = *(uint32 *) (x + 40)
++	movl	40(%edx),%ecx
++	# j8 = in8
++	movl	%ebp,196(%esp)
++	# in11 = *(uint32 *) (x + 44)
++	movl	44(%edx),%ebp
++	# j9 = in9
++	movl	%eax,200(%esp)
++	# in12 = *(uint32 *) (x + 48)
++	movl	48(%edx),%eax
++	# j10 = in10
++	movl	%ecx,204(%esp)
++	# in13 = *(uint32 *) (x + 52)
++	movl	52(%edx),%ecx
++	# j11 = in11
++	movl	%ebp,208(%esp)
++	# in14 = *(uint32 *) (x + 56)
++	movl	56(%edx),%ebp
++	# j12 = in12
++	movl	%eax,212(%esp)
++	# in15 = *(uint32 *) (x + 60)
++	movl	60(%edx),%eax
++	# j13 = in13
++	movl	%ecx,216(%esp)
++	# j14 = in14
++	movl	%ebp,220(%esp)
++	# j15 = in15
++	movl	%eax,224(%esp)
++	# x_backup = x
++	movl	%edx,64(%esp)
++._bytesatleast1:
++	#   bytes - 64
++	cmp	$64,%ebx
++	#   goto nocopy if unsigned>=
++	jae	._nocopy
++	#     ctarget = out
++	movl	%edi,228(%esp)
++	#     out = &tmp
++	leal	0(%esp),%edi
++	#     i = bytes
++	mov	%ebx,%ecx
++	#     while (i) { *out++ = *m++; --i }
++	rep	movsb
++	#     out = &tmp
++	leal	0(%esp),%edi
++	#     m = &tmp
++	leal	0(%esp),%esi
++._nocopy:
++	#   out_backup = out
++	movl	%edi,72(%esp)
++	#   m_backup = m
++	movl	%esi,68(%esp)
++	#   bytes_backup = bytes
++	movl	%ebx,76(%esp)
++	#   in0 = j0
++	movl	164(%esp),%eax
++	#   in1 = j1
++	movl	168(%esp),%ecx
++	#   in2 = j2
++	movl	172(%esp),%edx
++	#   in3 = j3
++	movl	176(%esp),%ebx
++	#   x0 = in0
++	movl	%eax,100(%esp)
++	#   x1 = in1
++	movl	%ecx,104(%esp)
++	#   x2 = in2
++	movl	%edx,108(%esp)
++	#   x3 = in3
++	movl	%ebx,112(%esp)
++	#   in4 = j4
++	movl	180(%esp),%eax
++	#   in5 = j5
++	movl	184(%esp),%ecx
++	#   in6 = j6
++	movl	188(%esp),%edx
++	#   in7 = j7
++	movl	192(%esp),%ebx
++	#   x4 = in4
++	movl	%eax,116(%esp)
++	#   x5 = in5
++	movl	%ecx,120(%esp)
++	#   x6 = in6
++	movl	%edx,124(%esp)
++	#   x7 = in7
++	movl	%ebx,128(%esp)
++	#   in8 = j8
++	movl	196(%esp),%eax
++	#   in9 = j9
++	movl	200(%esp),%ecx
++	#   in10 = j10
++	movl	204(%esp),%edx
++	#   in11 = j11
++	movl	208(%esp),%ebx
++	#   x8 = in8
++	movl	%eax,132(%esp)
++	#   x9 = in9
++	movl	%ecx,136(%esp)
++	#   x10 = in10
++	movl	%edx,140(%esp)
++	#   x11 = in11
++	movl	%ebx,144(%esp)
++	#   in12 = j12
++	movl	212(%esp),%eax
++	#   in13 = j13
++	movl	216(%esp),%ecx
++	#   in14 = j14
++	movl	220(%esp),%edx
++	#   in15 = j15
++	movl	224(%esp),%ebx
++	#   x12 = in12
++	movl	%eax,148(%esp)
++	#   x13 = in13
++	movl	%ecx,152(%esp)
++	#   x14 = in14
++	movl	%edx,156(%esp)
++	#   x15 = in15
++	movl	%ebx,160(%esp)
++	#   i = 20
++	mov	$20,%ebp
++	# p = x0
++	movl	100(%esp),%eax
++	# s = x5
++	movl	120(%esp),%ecx
++	# t = x10
++	movl	140(%esp),%edx
++	# w = x15
++	movl	160(%esp),%ebx
++._mainloop:
++	# x0 = p
++	movl	%eax,100(%esp)
++	# 				x10 = t
++	movl	%edx,140(%esp)
++	# p += x12
++	addl	148(%esp),%eax
++	# 		x5 = s
++	movl	%ecx,120(%esp)
++	# 				t += x6
++	addl	124(%esp),%edx
++	# 						x15 = w
++	movl	%ebx,160(%esp)
++	# 		r = x1
++	movl	104(%esp),%esi
++	# 		r += s
++	add	%ecx,%esi
++	# 						v = x11
++	movl	144(%esp),%edi
++	# 						v += w
++	add	%ebx,%edi
++	# p <<<= 7
++	rol	$7,%eax
++	# p ^= x4
++	xorl	116(%esp),%eax
++	# 				t <<<= 7
++	rol	$7,%edx
++	# 				t ^= x14
++	xorl	156(%esp),%edx
++	# 		r <<<= 7
++	rol	$7,%esi
++	# 		r ^= x9
++	xorl	136(%esp),%esi
++	# 						v <<<= 7
++	rol	$7,%edi
++	# 						v ^= x3
++	xorl	112(%esp),%edi
++	# x4 = p
++	movl	%eax,116(%esp)
++	# 				x14 = t
++	movl	%edx,156(%esp)
++	# p += x0
++	addl	100(%esp),%eax
++	# 		x9 = r
++	movl	%esi,136(%esp)
++	# 				t += x10
++	addl	140(%esp),%edx
++	# 						x3 = v
++	movl	%edi,112(%esp)
++	# p <<<= 9
++	rol	$9,%eax
++	# p ^= x8
++	xorl	132(%esp),%eax
++	# 				t <<<= 9
++	rol	$9,%edx
++	# 				t ^= x2
++	xorl	108(%esp),%edx
++	# 		s += r
++	add	%esi,%ecx
++	# 		s <<<= 9
++	rol	$9,%ecx
++	# 		s ^= x13
++	xorl	152(%esp),%ecx
++	# 						w += v
++	add	%edi,%ebx
++	# 						w <<<= 9
++	rol	$9,%ebx
++	# 						w ^= x7
++	xorl	128(%esp),%ebx
++	# x8 = p
++	movl	%eax,132(%esp)
++	# 				x2 = t
++	movl	%edx,108(%esp)
++	# p += x4
++	addl	116(%esp),%eax
++	# 		x13 = s
++	movl	%ecx,152(%esp)
++	# 				t += x14
++	addl	156(%esp),%edx
++	# 						x7 = w
++	movl	%ebx,128(%esp)
++	# p <<<= 13
++	rol	$13,%eax
++	# p ^= x12
++	xorl	148(%esp),%eax
++	# 				t <<<= 13
++	rol	$13,%edx
++	# 				t ^= x6
++	xorl	124(%esp),%edx
++	# 		r += s
++	add	%ecx,%esi
++	# 		r <<<= 13
++	rol	$13,%esi
++	# 		r ^= x1
++	xorl	104(%esp),%esi
++	# 						v += w
++	add	%ebx,%edi
++	# 						v <<<= 13
++	rol	$13,%edi
++	# 						v ^= x11
++	xorl	144(%esp),%edi
++	# x12 = p
++	movl	%eax,148(%esp)
++	# 				x6 = t
++	movl	%edx,124(%esp)
++	# p += x8
++	addl	132(%esp),%eax
++	# 		x1 = r
++	movl	%esi,104(%esp)
++	# 				t += x2
++	addl	108(%esp),%edx
++	# 						x11 = v
++	movl	%edi,144(%esp)
++	# p <<<= 18
++	rol	$18,%eax
++	# p ^= x0
++	xorl	100(%esp),%eax
++	# 				t <<<= 18
++	rol	$18,%edx
++	# 				t ^= x10
++	xorl	140(%esp),%edx
++	# 		s += r
++	add	%esi,%ecx
++	# 		s <<<= 18
++	rol	$18,%ecx
++	# 		s ^= x5
++	xorl	120(%esp),%ecx
++	# 						w += v
++	add	%edi,%ebx
++	# 						w <<<= 18
++	rol	$18,%ebx
++	# 						w ^= x15
++	xorl	160(%esp),%ebx
++	# x0 = p
++	movl	%eax,100(%esp)
++	# 				x10 = t
++	movl	%edx,140(%esp)
++	# p += x3
++	addl	112(%esp),%eax
++	# p <<<= 7
++	rol	$7,%eax
++	# 		x5 = s
++	movl	%ecx,120(%esp)
++	# 				t += x9
++	addl	136(%esp),%edx
++	# 						x15 = w
++	movl	%ebx,160(%esp)
++	# 		r = x4
++	movl	116(%esp),%esi
++	# 		r += s
++	add	%ecx,%esi
++	# 						v = x14
++	movl	156(%esp),%edi
++	# 						v += w
++	add	%ebx,%edi
++	# p ^= x1
++	xorl	104(%esp),%eax
++	# 				t <<<= 7
++	rol	$7,%edx
++	# 				t ^= x11
++	xorl	144(%esp),%edx
++	# 		r <<<= 7
++	rol	$7,%esi
++	# 		r ^= x6
++	xorl	124(%esp),%esi
++	# 						v <<<= 7
++	rol	$7,%edi
++	# 						v ^= x12
++	xorl	148(%esp),%edi
++	# x1 = p
++	movl	%eax,104(%esp)
++	# 				x11 = t
++	movl	%edx,144(%esp)
++	# p += x0
++	addl	100(%esp),%eax
++	# 		x6 = r
++	movl	%esi,124(%esp)
++	# 				t += x10
++	addl	140(%esp),%edx
++	# 						x12 = v
++	movl	%edi,148(%esp)
++	# p <<<= 9
++	rol	$9,%eax
++	# p ^= x2
++	xorl	108(%esp),%eax
++	# 				t <<<= 9
++	rol	$9,%edx
++	# 				t ^= x8
++	xorl	132(%esp),%edx
++	# 		s += r
++	add	%esi,%ecx
++	# 		s <<<= 9
++	rol	$9,%ecx
++	# 		s ^= x7
++	xorl	128(%esp),%ecx
++	# 						w += v
++	add	%edi,%ebx
++	# 						w <<<= 9
++	rol	$9,%ebx
++	# 						w ^= x13
++	xorl	152(%esp),%ebx
++	# x2 = p
++	movl	%eax,108(%esp)
++	# 				x8 = t
++	movl	%edx,132(%esp)
++	# p += x1
++	addl	104(%esp),%eax
++	# 		x7 = s
++	movl	%ecx,128(%esp)
++	# 				t += x11
++	addl	144(%esp),%edx
++	# 						x13 = w
++	movl	%ebx,152(%esp)
++	# p <<<= 13
++	rol	$13,%eax
++	# p ^= x3
++	xorl	112(%esp),%eax
++	# 				t <<<= 13
++	rol	$13,%edx
++	# 				t ^= x9
++	xorl	136(%esp),%edx
++	# 		r += s
++	add	%ecx,%esi
++	# 		r <<<= 13
++	rol	$13,%esi
++	# 		r ^= x4
++	xorl	116(%esp),%esi
++	# 						v += w
++	add	%ebx,%edi
++	# 						v <<<= 13
++	rol	$13,%edi
++	# 						v ^= x14
++	xorl	156(%esp),%edi
++	# x3 = p
++	movl	%eax,112(%esp)
++	# 				x9 = t
++	movl	%edx,136(%esp)
++	# p += x2
++	addl	108(%esp),%eax
++	# 		x4 = r
++	movl	%esi,116(%esp)
++	# 				t += x8
++	addl	132(%esp),%edx
++	# 						x14 = v
++	movl	%edi,156(%esp)
++	# p <<<= 18
++	rol	$18,%eax
++	# p ^= x0
++	xorl	100(%esp),%eax
++	# 				t <<<= 18
++	rol	$18,%edx
++	# 				t ^= x10
++	xorl	140(%esp),%edx
++	# 		s += r
++	add	%esi,%ecx
++	# 		s <<<= 18
++	rol	$18,%ecx
++	# 		s ^= x5
++	xorl	120(%esp),%ecx
++	# 						w += v
++	add	%edi,%ebx
++	# 						w <<<= 18
++	rol	$18,%ebx
++	# 						w ^= x15
++	xorl	160(%esp),%ebx
++	# x0 = p
++	movl	%eax,100(%esp)
++	# 				x10 = t
++	movl	%edx,140(%esp)
++	# p += x12
++	addl	148(%esp),%eax
++	# 		x5 = s
++	movl	%ecx,120(%esp)
++	# 				t += x6
++	addl	124(%esp),%edx
++	# 						x15 = w
++	movl	%ebx,160(%esp)
++	# 		r = x1
++	movl	104(%esp),%esi
++	# 		r += s
++	add	%ecx,%esi
++	# 						v = x11
++	movl	144(%esp),%edi
++	# 						v += w
++	add	%ebx,%edi
++	# p <<<= 7
++	rol	$7,%eax
++	# p ^= x4
++	xorl	116(%esp),%eax
++	# 				t <<<= 7
++	rol	$7,%edx
++	# 				t ^= x14
++	xorl	156(%esp),%edx
++	# 		r <<<= 7
++	rol	$7,%esi
++	# 		r ^= x9
++	xorl	136(%esp),%esi
++	# 						v <<<= 7
++	rol	$7,%edi
++	# 						v ^= x3
++	xorl	112(%esp),%edi
++	# x4 = p
++	movl	%eax,116(%esp)
++	# 				x14 = t
++	movl	%edx,156(%esp)
++	# p += x0
++	addl	100(%esp),%eax
++	# 		x9 = r
++	movl	%esi,136(%esp)
++	# 				t += x10
++	addl	140(%esp),%edx
++	# 						x3 = v
++	movl	%edi,112(%esp)
++	# p <<<= 9
++	rol	$9,%eax
++	# p ^= x8
++	xorl	132(%esp),%eax
++	# 				t <<<= 9
++	rol	$9,%edx
++	# 				t ^= x2
++	xorl	108(%esp),%edx
++	# 		s += r
++	add	%esi,%ecx
++	# 		s <<<= 9
++	rol	$9,%ecx
++	# 		s ^= x13
++	xorl	152(%esp),%ecx
++	# 						w += v
++	add	%edi,%ebx
++	# 						w <<<= 9
++	rol	$9,%ebx
++	# 						w ^= x7
++	xorl	128(%esp),%ebx
++	# x8 = p
++	movl	%eax,132(%esp)
++	# 				x2 = t
++	movl	%edx,108(%esp)
++	# p += x4
++	addl	116(%esp),%eax
++	# 		x13 = s
++	movl	%ecx,152(%esp)
++	# 				t += x14
++	addl	156(%esp),%edx
++	# 						x7 = w
++	movl	%ebx,128(%esp)
++	# p <<<= 13
++	rol	$13,%eax
++	# p ^= x12
++	xorl	148(%esp),%eax
++	# 				t <<<= 13
++	rol	$13,%edx
++	# 				t ^= x6
++	xorl	124(%esp),%edx
++	# 		r += s
++	add	%ecx,%esi
++	# 		r <<<= 13
++	rol	$13,%esi
++	# 		r ^= x1
++	xorl	104(%esp),%esi
++	# 						v += w
++	add	%ebx,%edi
++	# 						v <<<= 13
++	rol	$13,%edi
++	# 						v ^= x11
++	xorl	144(%esp),%edi
++	# x12 = p
++	movl	%eax,148(%esp)
++	# 				x6 = t
++	movl	%edx,124(%esp)
++	# p += x8
++	addl	132(%esp),%eax
++	# 		x1 = r
++	movl	%esi,104(%esp)
++	# 				t += x2
++	addl	108(%esp),%edx
++	# 						x11 = v
++	movl	%edi,144(%esp)
++	# p <<<= 18
++	rol	$18,%eax
++	# p ^= x0
++	xorl	100(%esp),%eax
++	# 				t <<<= 18
++	rol	$18,%edx
++	# 				t ^= x10
++	xorl	140(%esp),%edx
++	# 		s += r
++	add	%esi,%ecx
++	# 		s <<<= 18
++	rol	$18,%ecx
++	# 		s ^= x5
++	xorl	120(%esp),%ecx
++	# 						w += v
++	add	%edi,%ebx
++	# 						w <<<= 18
++	rol	$18,%ebx
++	# 						w ^= x15
++	xorl	160(%esp),%ebx
++	# x0 = p
++	movl	%eax,100(%esp)
++	# 				x10 = t
++	movl	%edx,140(%esp)
++	# p += x3
++	addl	112(%esp),%eax
++	# p <<<= 7
++	rol	$7,%eax
++	# 		x5 = s
++	movl	%ecx,120(%esp)
++	# 				t += x9
++	addl	136(%esp),%edx
++	# 						x15 = w
++	movl	%ebx,160(%esp)
++	# 		r = x4
++	movl	116(%esp),%esi
++	# 		r += s
++	add	%ecx,%esi
++	# 						v = x14
++	movl	156(%esp),%edi
++	# 						v += w
++	add	%ebx,%edi
++	# p ^= x1
++	xorl	104(%esp),%eax
++	# 				t <<<= 7
++	rol	$7,%edx
++	# 				t ^= x11
++	xorl	144(%esp),%edx
++	# 		r <<<= 7
++	rol	$7,%esi
++	# 		r ^= x6
++	xorl	124(%esp),%esi
++	# 						v <<<= 7
++	rol	$7,%edi
++	# 						v ^= x12
++	xorl	148(%esp),%edi
++	# x1 = p
++	movl	%eax,104(%esp)
++	# 				x11 = t
++	movl	%edx,144(%esp)
++	# p += x0
++	addl	100(%esp),%eax
++	# 		x6 = r
++	movl	%esi,124(%esp)
++	# 				t += x10
++	addl	140(%esp),%edx
++	# 						x12 = v
++	movl	%edi,148(%esp)
++	# p <<<= 9
++	rol	$9,%eax
++	# p ^= x2
++	xorl	108(%esp),%eax
++	# 				t <<<= 9
++	rol	$9,%edx
++	# 				t ^= x8
++	xorl	132(%esp),%edx
++	# 		s += r
++	add	%esi,%ecx
++	# 		s <<<= 9
++	rol	$9,%ecx
++	# 		s ^= x7
++	xorl	128(%esp),%ecx
++	# 						w += v
++	add	%edi,%ebx
++	# 						w <<<= 9
++	rol	$9,%ebx
++	# 						w ^= x13
++	xorl	152(%esp),%ebx
++	# x2 = p
++	movl	%eax,108(%esp)
++	# 				x8 = t
++	movl	%edx,132(%esp)
++	# p += x1
++	addl	104(%esp),%eax
++	# 		x7 = s
++	movl	%ecx,128(%esp)
++	# 				t += x11
++	addl	144(%esp),%edx
++	# 						x13 = w
++	movl	%ebx,152(%esp)
++	# p <<<= 13
++	rol	$13,%eax
++	# p ^= x3
++	xorl	112(%esp),%eax
++	# 				t <<<= 13
++	rol	$13,%edx
++	# 				t ^= x9
++	xorl	136(%esp),%edx
++	# 		r += s
++	add	%ecx,%esi
++	# 		r <<<= 13
++	rol	$13,%esi
++	# 		r ^= x4
++	xorl	116(%esp),%esi
++	# 						v += w
++	add	%ebx,%edi
++	# 						v <<<= 13
++	rol	$13,%edi
++	# 						v ^= x14
++	xorl	156(%esp),%edi
++	# x3 = p
++	movl	%eax,112(%esp)
++	# 				x9 = t
++	movl	%edx,136(%esp)
++	# p += x2
++	addl	108(%esp),%eax
++	# 		x4 = r
++	movl	%esi,116(%esp)
++	# 				t += x8
++	addl	132(%esp),%edx
++	# 						x14 = v
++	movl	%edi,156(%esp)
++	# p <<<= 18
++	rol	$18,%eax
++	# p ^= x0
++	xorl	100(%esp),%eax
++	# 				t <<<= 18
++	rol	$18,%edx
++	# 				t ^= x10
++	xorl	140(%esp),%edx
++	# 		s += r
++	add	%esi,%ecx
++	# 		s <<<= 18
++	rol	$18,%ecx
++	# 		s ^= x5
++	xorl	120(%esp),%ecx
++	# 						w += v
++	add	%edi,%ebx
++	# 						w <<<= 18
++	rol	$18,%ebx
++	# 						w ^= x15
++	xorl	160(%esp),%ebx
++	# i -= 4
++	sub	$4,%ebp
++	# goto mainloop if unsigned >
++	ja	._mainloop
++	# x0 = p
++	movl	%eax,100(%esp)
++	# x5 = s
++	movl	%ecx,120(%esp)
++	# x10 = t
++	movl	%edx,140(%esp)
++	# x15 = w
++	movl	%ebx,160(%esp)
++	#   out = out_backup
++	movl	72(%esp),%edi
++	#   m = m_backup
++	movl	68(%esp),%esi
++	#   in0 = x0
++	movl	100(%esp),%eax
++	#   in1 = x1
++	movl	104(%esp),%ecx
++	#   in0 += j0
++	addl	164(%esp),%eax
++	#   in1 += j1
++	addl	168(%esp),%ecx
++	#   in0 ^= *(uint32 *) (m + 0)
++	xorl	0(%esi),%eax
++	#   in1 ^= *(uint32 *) (m + 4)
++	xorl	4(%esi),%ecx
++	#   *(uint32 *) (out + 0) = in0
++	movl	%eax,0(%edi)
++	#   *(uint32 *) (out + 4) = in1
++	movl	%ecx,4(%edi)
++	#   in2 = x2
++	movl	108(%esp),%eax
++	#   in3 = x3
++	movl	112(%esp),%ecx
++	#   in2 += j2
++	addl	172(%esp),%eax
++	#   in3 += j3
++	addl	176(%esp),%ecx
++	#   in2 ^= *(uint32 *) (m + 8)
++	xorl	8(%esi),%eax
++	#   in3 ^= *(uint32 *) (m + 12)
++	xorl	12(%esi),%ecx
++	#   *(uint32 *) (out + 8) = in2
++	movl	%eax,8(%edi)
++	#   *(uint32 *) (out + 12) = in3
++	movl	%ecx,12(%edi)
++	#   in4 = x4
++	movl	116(%esp),%eax
++	#   in5 = x5
++	movl	120(%esp),%ecx
++	#   in4 += j4
++	addl	180(%esp),%eax
++	#   in5 += j5
++	addl	184(%esp),%ecx
++	#   in4 ^= *(uint32 *) (m + 16)
++	xorl	16(%esi),%eax
++	#   in5 ^= *(uint32 *) (m + 20)
++	xorl	20(%esi),%ecx
++	#   *(uint32 *) (out + 16) = in4
++	movl	%eax,16(%edi)
++	#   *(uint32 *) (out + 20) = in5
++	movl	%ecx,20(%edi)
++	#   in6 = x6
++	movl	124(%esp),%eax
++	#   in7 = x7
++	movl	128(%esp),%ecx
++	#   in6 += j6
++	addl	188(%esp),%eax
++	#   in7 += j7
++	addl	192(%esp),%ecx
++	#   in6 ^= *(uint32 *) (m + 24)
++	xorl	24(%esi),%eax
++	#   in7 ^= *(uint32 *) (m + 28)
++	xorl	28(%esi),%ecx
++	#   *(uint32 *) (out + 24) = in6
++	movl	%eax,24(%edi)
++	#   *(uint32 *) (out + 28) = in7
++	movl	%ecx,28(%edi)
++	#   in8 = x8
++	movl	132(%esp),%eax
++	#   in9 = x9
++	movl	136(%esp),%ecx
++	#   in8 += j8
++	addl	196(%esp),%eax
++	#   in9 += j9
++	addl	200(%esp),%ecx
++	#   in8 ^= *(uint32 *) (m + 32)
++	xorl	32(%esi),%eax
++	#   in9 ^= *(uint32 *) (m + 36)
++	xorl	36(%esi),%ecx
++	#   *(uint32 *) (out + 32) = in8
++	movl	%eax,32(%edi)
++	#   *(uint32 *) (out + 36) = in9
++	movl	%ecx,36(%edi)
++	#   in10 = x10
++	movl	140(%esp),%eax
++	#   in11 = x11
++	movl	144(%esp),%ecx
++	#   in10 += j10
++	addl	204(%esp),%eax
++	#   in11 += j11
++	addl	208(%esp),%ecx
++	#   in10 ^= *(uint32 *) (m + 40)
++	xorl	40(%esi),%eax
++	#   in11 ^= *(uint32 *) (m + 44)
++	xorl	44(%esi),%ecx
++	#   *(uint32 *) (out + 40) = in10
++	movl	%eax,40(%edi)
++	#   *(uint32 *) (out + 44) = in11
++	movl	%ecx,44(%edi)
++	#   in12 = x12
++	movl	148(%esp),%eax
++	#   in13 = x13
++	movl	152(%esp),%ecx
++	#   in12 += j12
++	addl	212(%esp),%eax
++	#   in13 += j13
++	addl	216(%esp),%ecx
++	#   in12 ^= *(uint32 *) (m + 48)
++	xorl	48(%esi),%eax
++	#   in13 ^= *(uint32 *) (m + 52)
++	xorl	52(%esi),%ecx
++	#   *(uint32 *) (out + 48) = in12
++	movl	%eax,48(%edi)
++	#   *(uint32 *) (out + 52) = in13
++	movl	%ecx,52(%edi)
++	#   in14 = x14
++	movl	156(%esp),%eax
++	#   in15 = x15
++	movl	160(%esp),%ecx
++	#   in14 += j14
++	addl	220(%esp),%eax
++	#   in15 += j15
++	addl	224(%esp),%ecx
++	#   in14 ^= *(uint32 *) (m + 56)
++	xorl	56(%esi),%eax
++	#   in15 ^= *(uint32 *) (m + 60)
++	xorl	60(%esi),%ecx
++	#   *(uint32 *) (out + 56) = in14
++	movl	%eax,56(%edi)
++	#   *(uint32 *) (out + 60) = in15
++	movl	%ecx,60(%edi)
++	#   bytes = bytes_backup
++	movl	76(%esp),%ebx
++	#   in8 = j8
++	movl	196(%esp),%eax
++	#   in9 = j9
++	movl	200(%esp),%ecx
++	#   in8 += 1
++	add	$1,%eax
++	#   in9 += 0 + carry
++	adc	$0,%ecx
++	#   j8 = in8
++	movl	%eax,196(%esp)
++	#   j9 = in9
++	movl	%ecx,200(%esp)
++	#   bytes - 64
++	cmp	$64,%ebx
++	#   goto bytesatleast65 if unsigned>
++	ja	._bytesatleast65
++	#     goto bytesatleast64 if unsigned>=
++	jae	._bytesatleast64
++	#       m = out
++	mov	%edi,%esi
++	#       out = ctarget
++	movl	228(%esp),%edi
++	#       i = bytes
++	mov	%ebx,%ecx
++	#       while (i) { *out++ = *m++; --i }
++	rep	movsb
++._bytesatleast64:
++	#     x = x_backup
++	movl	64(%esp),%eax
++	#     in8 = j8
++	movl	196(%esp),%ecx
++	#     in9 = j9
++	movl	200(%esp),%edx
++	#     *(uint32 *) (x + 32) = in8
++	movl	%ecx,32(%eax)
++	#     *(uint32 *) (x + 36) = in9
++	movl	%edx,36(%eax)
++._done:
++	#     eax = eax_stack
++	movl	80(%esp),%eax
++	#     ebx = ebx_stack
++	movl	84(%esp),%ebx
++	#     esi = esi_stack
++	movl	88(%esp),%esi
++	#     edi = edi_stack
++	movl	92(%esp),%edi
++	#     ebp = ebp_stack
++	movl	96(%esp),%ebp
++	#     leave
++	add	%eax,%esp
++	ret
++._bytesatleast65:
++	#   bytes -= 64
++	sub	$64,%ebx
++	#   out += 64
++	add	$64,%edi
++	#   m += 64
++	add	$64,%esi
++	# goto bytesatleast1
++	jmp	._bytesatleast1
++# enter ECRYPT_keysetup
++.text
++.p2align 5
++.globl ECRYPT_keysetup
++ECRYPT_keysetup:
++	mov	%esp,%eax
++	and	$31,%eax
++	add	$256,%eax
++	sub	%eax,%esp
++	#   eax_stack = eax
++	movl	%eax,64(%esp)
++	#   ebx_stack = ebx
++	movl	%ebx,68(%esp)
++	#   esi_stack = esi
++	movl	%esi,72(%esp)
++	#   edi_stack = edi
++	movl	%edi,76(%esp)
++	#   ebp_stack = ebp
++	movl	%ebp,80(%esp)
++	#   k = arg2
++	movl	8(%esp,%eax),%ecx
++	#   kbits = arg3
++	movl	12(%esp,%eax),%edx
++	#   x = arg1
++	movl	4(%esp,%eax),%eax
++	#   in1 = *(uint32 *) (k + 0)
++	movl	0(%ecx),%ebx
++	#   in2 = *(uint32 *) (k + 4)
++	movl	4(%ecx),%esi
++	#   in3 = *(uint32 *) (k + 8)
++	movl	8(%ecx),%edi
++	#   in4 = *(uint32 *) (k + 12)
++	movl	12(%ecx),%ebp
++	#   *(uint32 *) (x + 4) = in1
++	movl	%ebx,4(%eax)
++	#   *(uint32 *) (x + 8) = in2
++	movl	%esi,8(%eax)
++	#   *(uint32 *) (x + 12) = in3
++	movl	%edi,12(%eax)
++	#   *(uint32 *) (x + 16) = in4
++	movl	%ebp,16(%eax)
++	#   kbits - 256
++	cmp	$256,%edx
++	#   goto kbits128 if unsigned<
++	jb	._kbits128
++._kbits256:
++	#     in11 = *(uint32 *) (k + 16)
++	movl	16(%ecx),%edx
++	#     in12 = *(uint32 *) (k + 20)
++	movl	20(%ecx),%ebx
++	#     in13 = *(uint32 *) (k + 24)
++	movl	24(%ecx),%esi
++	#     in14 = *(uint32 *) (k + 28)
++	movl	28(%ecx),%ecx
++	#     *(uint32 *) (x + 44) = in11
++	movl	%edx,44(%eax)
++	#     *(uint32 *) (x + 48) = in12
++	movl	%ebx,48(%eax)
++	#     *(uint32 *) (x + 52) = in13
++	movl	%esi,52(%eax)
++	#     *(uint32 *) (x + 56) = in14
++	movl	%ecx,56(%eax)
++	#     in0 = 1634760805
++	mov	$1634760805,%ecx
++	#     in5 = 857760878
++	mov	$857760878,%edx
++	#     in10 = 2036477234
++	mov	$2036477234,%ebx
++	#     in15 = 1797285236
++	mov	$1797285236,%esi
++	#     *(uint32 *) (x + 0) = in0
++	movl	%ecx,0(%eax)
++	#     *(uint32 *) (x + 20) = in5
++	movl	%edx,20(%eax)
++	#     *(uint32 *) (x + 40) = in10
++	movl	%ebx,40(%eax)
++	#     *(uint32 *) (x + 60) = in15
++	movl	%esi,60(%eax)
++	#   goto keysetupdone
++	jmp	._keysetupdone
++._kbits128:
++	#     in11 = *(uint32 *) (k + 0)
++	movl	0(%ecx),%edx
++	#     in12 = *(uint32 *) (k + 4)
++	movl	4(%ecx),%ebx
++	#     in13 = *(uint32 *) (k + 8)
++	movl	8(%ecx),%esi
++	#     in14 = *(uint32 *) (k + 12)
++	movl	12(%ecx),%ecx
++	#     *(uint32 *) (x + 44) = in11
++	movl	%edx,44(%eax)
++	#     *(uint32 *) (x + 48) = in12
++	movl	%ebx,48(%eax)
++	#     *(uint32 *) (x + 52) = in13
++	movl	%esi,52(%eax)
++	#     *(uint32 *) (x + 56) = in14
++	movl	%ecx,56(%eax)
++	#     in0 = 1634760805
++	mov	$1634760805,%ecx
++	#     in5 = 824206446
++	mov	$824206446,%edx
++	#     in10 = 2036477238
++	mov	$2036477238,%ebx
++	#     in15 = 1797285236
++	mov	$1797285236,%esi
++	#     *(uint32 *) (x + 0) = in0
++	movl	%ecx,0(%eax)
++	#     *(uint32 *) (x + 20) = in5
++	movl	%edx,20(%eax)
++	#     *(uint32 *) (x + 40) = in10
++	movl	%ebx,40(%eax)
++	#     *(uint32 *) (x + 60) = in15
++	movl	%esi,60(%eax)
++._keysetupdone:
++	#   eax = eax_stack
++	movl	64(%esp),%eax
++	#   ebx = ebx_stack
++	movl	68(%esp),%ebx
++	#   esi = esi_stack
++	movl	72(%esp),%esi
++	#   edi = edi_stack
++	movl	76(%esp),%edi
++	#   ebp = ebp_stack
++	movl	80(%esp),%ebp
++	# leave
++	add	%eax,%esp
++	ret
++# enter ECRYPT_ivsetup
++.text
++.p2align 5
++.globl ECRYPT_ivsetup
++ECRYPT_ivsetup:
++	mov	%esp,%eax
++	and	$31,%eax
++	add	$256,%eax
++	sub	%eax,%esp
++	#   eax_stack = eax
++	movl	%eax,64(%esp)
++	#   ebx_stack = ebx
++	movl	%ebx,68(%esp)
++	#   esi_stack = esi
++	movl	%esi,72(%esp)
++	#   edi_stack = edi
++	movl	%edi,76(%esp)
++	#   ebp_stack = ebp
++	movl	%ebp,80(%esp)
++	#   iv = arg2
++	movl	8(%esp,%eax),%ecx
++	#   x = arg1
++	movl	4(%esp,%eax),%eax
++	#   in6 = *(uint32 *) (iv + 0)
++	movl	0(%ecx),%edx
++	#   in7 = *(uint32 *) (iv + 4)
++	movl	4(%ecx),%ecx
++	#   in8 = 0
++	mov	$0,%ebx
++	#   in9 = 0
++	mov	$0,%esi
++	#   *(uint32 *) (x + 24) = in6
++	movl	%edx,24(%eax)
++	#   *(uint32 *) (x + 28) = in7
++	movl	%ecx,28(%eax)
++	#   *(uint32 *) (x + 32) = in8
++	movl	%ebx,32(%eax)
++	#   *(uint32 *) (x + 36) = in9
++	movl	%esi,36(%eax)
++	#   eax = eax_stack
++	movl	64(%esp),%eax
++	#   ebx = ebx_stack
++	movl	68(%esp),%ebx
++	#   esi = esi_stack
++	movl	72(%esp),%esi
++	#   edi = edi_stack
++	movl	76(%esp),%edi
++	#   ebp = ebp_stack
++	movl	80(%esp),%ebp
++	# leave
++	add	%eax,%esp
++	ret
+diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+new file mode 100644
+index 0000000..6214a9b
+--- /dev/null
++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+@@ -0,0 +1,920 @@
++# enter ECRYPT_encrypt_bytes
++.text
++.p2align 5
++.globl ECRYPT_encrypt_bytes
++ECRYPT_encrypt_bytes:
++	mov	%rsp,%r11
++	and	$31,%r11
++	add	$256,%r11
++	sub	%r11,%rsp
++	# x = arg1
++	mov	%rdi,%r8
++	# m = arg2
++	mov	%rsi,%rsi
++	# out = arg3
++	mov	%rdx,%rdi
++	# bytes = arg4
++	mov	%rcx,%rdx
++	#               unsigned>? bytes - 0
++	cmp	$0,%rdx
++	# comment:fp stack unchanged by jump
++	# goto done if !unsigned>
++	jbe	._done
++	# comment:fp stack unchanged by fallthrough
++# start:
++._start:
++	# r11_stack = r11
++	movq	%r11,0(%rsp)
++	# r12_stack = r12
++	movq	%r12,8(%rsp)
++	# r13_stack = r13
++	movq	%r13,16(%rsp)
++	# r14_stack = r14
++	movq	%r14,24(%rsp)
++	# r15_stack = r15
++	movq	%r15,32(%rsp)
++	# rbx_stack = rbx
++	movq	%rbx,40(%rsp)
++	# rbp_stack = rbp
++	movq	%rbp,48(%rsp)
++	# in0 = *(uint64 *) (x + 0)
++	movq	0(%r8),%rcx
++	# in2 = *(uint64 *) (x + 8)
++	movq	8(%r8),%r9
++	# in4 = *(uint64 *) (x + 16)
++	movq	16(%r8),%rax
++	# in6 = *(uint64 *) (x + 24)
++	movq	24(%r8),%r10
++	# in8 = *(uint64 *) (x + 32)
++	movq	32(%r8),%r11
++	# in10 = *(uint64 *) (x + 40)
++	movq	40(%r8),%r12
++	# in12 = *(uint64 *) (x + 48)
++	movq	48(%r8),%r13
++	# in14 = *(uint64 *) (x + 56)
++	movq	56(%r8),%r14
++	# j0 = in0
++	movq	%rcx,56(%rsp)
++	# j2 = in2
++	movq	%r9,64(%rsp)
++	# j4 = in4
++	movq	%rax,72(%rsp)
++	# j6 = in6
++	movq	%r10,80(%rsp)
++	# j8 = in8
++	movq	%r11,88(%rsp)
++	# j10 = in10
++	movq	%r12,96(%rsp)
++	# j12 = in12
++	movq	%r13,104(%rsp)
++	# j14 = in14
++	movq	%r14,112(%rsp)
++	# x_backup = x
++	movq	%r8,120(%rsp)
++# bytesatleast1:
++._bytesatleast1:
++	#                   unsigned<? bytes - 64
++	cmp	$64,%rdx
++	# comment:fp stack unchanged by jump
++	#   goto nocopy if !unsigned<
++	jae	._nocopy
++	#     ctarget = out
++	movq	%rdi,128(%rsp)
++	#     out = &tmp
++	leaq	192(%rsp),%rdi
++	#     i = bytes
++	mov	%rdx,%rcx
++	#     while (i) { *out++ = *m++; --i }
++	rep	movsb
++	#     out = &tmp
++	leaq	192(%rsp),%rdi
++	#     m = &tmp
++	leaq	192(%rsp),%rsi
++	# comment:fp stack unchanged by fallthrough
++#   nocopy:
++._nocopy:
++	#   out_backup = out
++	movq	%rdi,136(%rsp)
++	#   m_backup = m
++	movq	%rsi,144(%rsp)
++	#   bytes_backup = bytes
++	movq	%rdx,152(%rsp)
++	#   x1 = j0
++	movq	56(%rsp),%rdi
++	#   x0 = x1
++	mov	%rdi,%rdx
++	#   (uint64) x1 >>= 32
++	shr	$32,%rdi
++	#   		x3 = j2
++	movq	64(%rsp),%rsi
++	#   		x2 = x3
++	mov	%rsi,%rcx
++	#   		(uint64) x3 >>= 32
++	shr	$32,%rsi
++	#   x5 = j4
++	movq	72(%rsp),%r8
++	#   x4 = x5
++	mov	%r8,%r9
++	#   (uint64) x5 >>= 32
++	shr	$32,%r8
++	#   x5_stack = x5
++	movq	%r8,160(%rsp)
++	#   		x7 = j6
++	movq	80(%rsp),%r8
++	#   		x6 = x7
++	mov	%r8,%rax
++	#   		(uint64) x7 >>= 32
++	shr	$32,%r8
++	#   x9 = j8
++	movq	88(%rsp),%r10
++	#   x8 = x9
++	mov	%r10,%r11
++	#   (uint64) x9 >>= 32
++	shr	$32,%r10
++	#   		x11 = j10
++	movq	96(%rsp),%r12
++	#   		x10 = x11
++	mov	%r12,%r13
++	#   		x10_stack = x10
++	movq	%r13,168(%rsp)
++	#   		(uint64) x11 >>= 32
++	shr	$32,%r12
++	#   x13 = j12
++	movq	104(%rsp),%r13
++	#   x12 = x13
++	mov	%r13,%r14
++	#   (uint64) x13 >>= 32
++	shr	$32,%r13
++	#   		x15 = j14
++	movq	112(%rsp),%r15
++	#   		x14 = x15
++	mov	%r15,%rbx
++	#   		(uint64) x15 >>= 32
++	shr	$32,%r15
++	#   		x15_stack = x15
++	movq	%r15,176(%rsp)
++	#   i = 20
++	mov	$20,%r15
++#   mainloop:
++._mainloop:
++	#   i_backup = i
++	movq	%r15,184(%rsp)
++	# 		x5 = x5_stack
++	movq	160(%rsp),%r15
++	# a = x12 + x0
++	lea	(%r14,%rdx),%rbp
++	# (uint32) a <<<= 7
++	rol	$7,%ebp
++	# x4 ^= a
++	xor	%rbp,%r9
++	# 		b = x1 + x5
++	lea	(%rdi,%r15),%rbp
++	# 		(uint32) b <<<= 7
++	rol	$7,%ebp
++	# 		x9 ^= b
++	xor	%rbp,%r10
++	# a = x0 + x4
++	lea	(%rdx,%r9),%rbp
++	# (uint32) a <<<= 9
++	rol	$9,%ebp
++	# x8 ^= a
++	xor	%rbp,%r11
++	# 		b = x5 + x9
++	lea	(%r15,%r10),%rbp
++	# 		(uint32) b <<<= 9
++	rol	$9,%ebp
++	# 		x13 ^= b
++	xor	%rbp,%r13
++	# a = x4 + x8
++	lea	(%r9,%r11),%rbp
++	# (uint32) a <<<= 13
++	rol	$13,%ebp
++	# x12 ^= a
++	xor	%rbp,%r14
++	# 		b = x9 + x13
++	lea	(%r10,%r13),%rbp
++	# 		(uint32) b <<<= 13
++	rol	$13,%ebp
++	# 		x1 ^= b
++	xor	%rbp,%rdi
++	# a = x8 + x12
++	lea	(%r11,%r14),%rbp
++	# (uint32) a <<<= 18
++	rol	$18,%ebp
++	# x0 ^= a
++	xor	%rbp,%rdx
++	# 		b = x13 + x1
++	lea	(%r13,%rdi),%rbp
++	# 		(uint32) b <<<= 18
++	rol	$18,%ebp
++	# 		x5 ^= b
++	xor	%rbp,%r15
++	# 				x10 = x10_stack
++	movq	168(%rsp),%rbp
++	# 		x5_stack = x5
++	movq	%r15,160(%rsp)
++	# 				c = x6 + x10
++	lea	(%rax,%rbp),%r15
++	# 				(uint32) c <<<= 7
++	rol	$7,%r15d
++	# 				x14 ^= c
++	xor	%r15,%rbx
++	# 				c = x10 + x14
++	lea	(%rbp,%rbx),%r15
++	# 				(uint32) c <<<= 9
++	rol	$9,%r15d
++	# 				x2 ^= c
++	xor	%r15,%rcx
++	# 				c = x14 + x2
++	lea	(%rbx,%rcx),%r15
++	# 				(uint32) c <<<= 13
++	rol	$13,%r15d
++	# 				x6 ^= c
++	xor	%r15,%rax
++	# 				c = x2 + x6
++	lea	(%rcx,%rax),%r15
++	# 				(uint32) c <<<= 18
++	rol	$18,%r15d
++	# 				x10 ^= c
++	xor	%r15,%rbp
++	# 						x15 = x15_stack
++	movq	176(%rsp),%r15
++	# 				x10_stack = x10
++	movq	%rbp,168(%rsp)
++	# 						d = x11 + x15
++	lea	(%r12,%r15),%rbp
++	# 						(uint32) d <<<= 7
++	rol	$7,%ebp
++	# 						x3 ^= d
++	xor	%rbp,%rsi
++	# 						d = x15 + x3
++	lea	(%r15,%rsi),%rbp
++	# 						(uint32) d <<<= 9
++	rol	$9,%ebp
++	# 						x7 ^= d
++	xor	%rbp,%r8
++	# 						d = x3 + x7
++	lea	(%rsi,%r8),%rbp
++	# 						(uint32) d <<<= 13
++	rol	$13,%ebp
++	# 						x11 ^= d
++	xor	%rbp,%r12
++	# 						d = x7 + x11
++	lea	(%r8,%r12),%rbp
++	# 						(uint32) d <<<= 18
++	rol	$18,%ebp
++	# 						x15 ^= d
++	xor	%rbp,%r15
++	# 						x15_stack = x15
++	movq	%r15,176(%rsp)
++	# 		x5 = x5_stack
++	movq	160(%rsp),%r15
++	# a = x3 + x0
++	lea	(%rsi,%rdx),%rbp
++	# (uint32) a <<<= 7
++	rol	$7,%ebp
++	# x1 ^= a
++	xor	%rbp,%rdi
++	# 		b = x4 + x5
++	lea	(%r9,%r15),%rbp
++	# 		(uint32) b <<<= 7
++	rol	$7,%ebp
++	# 		x6 ^= b
++	xor	%rbp,%rax
++	# a = x0 + x1
++	lea	(%rdx,%rdi),%rbp
++	# (uint32) a <<<= 9
++	rol	$9,%ebp
++	# x2 ^= a
++	xor	%rbp,%rcx
++	# 		b = x5 + x6
++	lea	(%r15,%rax),%rbp
++	# 		(uint32) b <<<= 9
++	rol	$9,%ebp
++	# 		x7 ^= b
++	xor	%rbp,%r8
++	# a = x1 + x2
++	lea	(%rdi,%rcx),%rbp
++	# (uint32) a <<<= 13
++	rol	$13,%ebp
++	# x3 ^= a
++	xor	%rbp,%rsi
++	# 		b = x6 + x7
++	lea	(%rax,%r8),%rbp
++	# 		(uint32) b <<<= 13
++	rol	$13,%ebp
++	# 		x4 ^= b
++	xor	%rbp,%r9
++	# a = x2 + x3
++	lea	(%rcx,%rsi),%rbp
++	# (uint32) a <<<= 18
++	rol	$18,%ebp
++	# x0 ^= a
++	xor	%rbp,%rdx
++	# 		b = x7 + x4
++	lea	(%r8,%r9),%rbp
++	# 		(uint32) b <<<= 18
++	rol	$18,%ebp
++	# 		x5 ^= b
++	xor	%rbp,%r15
++	# 				x10 = x10_stack
++	movq	168(%rsp),%rbp
++	# 		x5_stack = x5
++	movq	%r15,160(%rsp)
++	# 				c = x9 + x10
++	lea	(%r10,%rbp),%r15
++	# 				(uint32) c <<<= 7
++	rol	$7,%r15d
++	# 				x11 ^= c
++	xor	%r15,%r12
++	# 				c = x10 + x11
++	lea	(%rbp,%r12),%r15
++	# 				(uint32) c <<<= 9
++	rol	$9,%r15d
++	# 				x8 ^= c
++	xor	%r15,%r11
++	# 				c = x11 + x8
++	lea	(%r12,%r11),%r15
++	# 				(uint32) c <<<= 13
++	rol	$13,%r15d
++	# 				x9 ^= c
++	xor	%r15,%r10
++	# 				c = x8 + x9
++	lea	(%r11,%r10),%r15
++	# 				(uint32) c <<<= 18
++	rol	$18,%r15d
++	# 				x10 ^= c
++	xor	%r15,%rbp
++	# 						x15 = x15_stack
++	movq	176(%rsp),%r15
++	# 				x10_stack = x10
++	movq	%rbp,168(%rsp)
++	# 						d = x14 + x15
++	lea	(%rbx,%r15),%rbp
++	# 						(uint32) d <<<= 7
++	rol	$7,%ebp
++	# 						x12 ^= d
++	xor	%rbp,%r14
++	# 						d = x15 + x12
++	lea	(%r15,%r14),%rbp
++	# 						(uint32) d <<<= 9
++	rol	$9,%ebp
++	# 						x13 ^= d
++	xor	%rbp,%r13
++	# 						d = x12 + x13
++	lea	(%r14,%r13),%rbp
++	# 						(uint32) d <<<= 13
++	rol	$13,%ebp
++	# 						x14 ^= d
++	xor	%rbp,%rbx
++	# 						d = x13 + x14
++	lea	(%r13,%rbx),%rbp
++	# 						(uint32) d <<<= 18
++	rol	$18,%ebp
++	# 						x15 ^= d
++	xor	%rbp,%r15
++	# 						x15_stack = x15
++	movq	%r15,176(%rsp)
++	# 		x5 = x5_stack
++	movq	160(%rsp),%r15
++	# a = x12 + x0
++	lea	(%r14,%rdx),%rbp
++	# (uint32) a <<<= 7
++	rol	$7,%ebp
++	# x4 ^= a
++	xor	%rbp,%r9
++	# 		b = x1 + x5
++	lea	(%rdi,%r15),%rbp
++	# 		(uint32) b <<<= 7
++	rol	$7,%ebp
++	# 		x9 ^= b
++	xor	%rbp,%r10
++	# a = x0 + x4
++	lea	(%rdx,%r9),%rbp
++	# (uint32) a <<<= 9
++	rol	$9,%ebp
++	# x8 ^= a
++	xor	%rbp,%r11
++	# 		b = x5 + x9
++	lea	(%r15,%r10),%rbp
++	# 		(uint32) b <<<= 9
++	rol	$9,%ebp
++	# 		x13 ^= b
++	xor	%rbp,%r13
++	# a = x4 + x8
++	lea	(%r9,%r11),%rbp
++	# (uint32) a <<<= 13
++	rol	$13,%ebp
++	# x12 ^= a
++	xor	%rbp,%r14
++	# 		b = x9 + x13
++	lea	(%r10,%r13),%rbp
++	# 		(uint32) b <<<= 13
++	rol	$13,%ebp
++	# 		x1 ^= b
++	xor	%rbp,%rdi
++	# a = x8 + x12
++	lea	(%r11,%r14),%rbp
++	# (uint32) a <<<= 18
++	rol	$18,%ebp
++	# x0 ^= a
++	xor	%rbp,%rdx
++	# 		b = x13 + x1
++	lea	(%r13,%rdi),%rbp
++	# 		(uint32) b <<<= 18
++	rol	$18,%ebp
++	# 		x5 ^= b
++	xor	%rbp,%r15
++	# 				x10 = x10_stack
++	movq	168(%rsp),%rbp
++	# 		x5_stack = x5
++	movq	%r15,160(%rsp)
++	# 				c = x6 + x10
++	lea	(%rax,%rbp),%r15
++	# 				(uint32) c <<<= 7
++	rol	$7,%r15d
++	# 				x14 ^= c
++	xor	%r15,%rbx
++	# 				c = x10 + x14
++	lea	(%rbp,%rbx),%r15
++	# 				(uint32) c <<<= 9
++	rol	$9,%r15d
++	# 				x2 ^= c
++	xor	%r15,%rcx
++	# 				c = x14 + x2
++	lea	(%rbx,%rcx),%r15
++	# 				(uint32) c <<<= 13
++	rol	$13,%r15d
++	# 				x6 ^= c
++	xor	%r15,%rax
++	# 				c = x2 + x6
++	lea	(%rcx,%rax),%r15
++	# 				(uint32) c <<<= 18
++	rol	$18,%r15d
++	# 				x10 ^= c
++	xor	%r15,%rbp
++	# 						x15 = x15_stack
++	movq	176(%rsp),%r15
++	# 				x10_stack = x10
++	movq	%rbp,168(%rsp)
++	# 						d = x11 + x15
++	lea	(%r12,%r15),%rbp
++	# 						(uint32) d <<<= 7
++	rol	$7,%ebp
++	# 						x3 ^= d
++	xor	%rbp,%rsi
++	# 						d = x15 + x3
++	lea	(%r15,%rsi),%rbp
++	# 						(uint32) d <<<= 9
++	rol	$9,%ebp
++	# 						x7 ^= d
++	xor	%rbp,%r8
++	# 						d = x3 + x7
++	lea	(%rsi,%r8),%rbp
++	# 						(uint32) d <<<= 13
++	rol	$13,%ebp
++	# 						x11 ^= d
++	xor	%rbp,%r12
++	# 						d = x7 + x11
++	lea	(%r8,%r12),%rbp
++	# 						(uint32) d <<<= 18
++	rol	$18,%ebp
++	# 						x15 ^= d
++	xor	%rbp,%r15
++	# 						x15_stack = x15
++	movq	%r15,176(%rsp)
++	# 		x5 = x5_stack
++	movq	160(%rsp),%r15
++	# a = x3 + x0
++	lea	(%rsi,%rdx),%rbp
++	# (uint32) a <<<= 7
++	rol	$7,%ebp
++	# x1 ^= a
++	xor	%rbp,%rdi
++	# 		b = x4 + x5
++	lea	(%r9,%r15),%rbp
++	# 		(uint32) b <<<= 7
++	rol	$7,%ebp
++	# 		x6 ^= b
++	xor	%rbp,%rax
++	# a = x0 + x1
++	lea	(%rdx,%rdi),%rbp
++	# (uint32) a <<<= 9
++	rol	$9,%ebp
++	# x2 ^= a
++	xor	%rbp,%rcx
++	# 		b = x5 + x6
++	lea	(%r15,%rax),%rbp
++	# 		(uint32) b <<<= 9
++	rol	$9,%ebp
++	# 		x7 ^= b
++	xor	%rbp,%r8
++	# a = x1 + x2
++	lea	(%rdi,%rcx),%rbp
++	# (uint32) a <<<= 13
++	rol	$13,%ebp
++	# x3 ^= a
++	xor	%rbp,%rsi
++	# 		b = x6 + x7
++	lea	(%rax,%r8),%rbp
++	# 		(uint32) b <<<= 13
++	rol	$13,%ebp
++	# 		x4 ^= b
++	xor	%rbp,%r9
++	# a = x2 + x3
++	lea	(%rcx,%rsi),%rbp
++	# (uint32) a <<<= 18
++	rol	$18,%ebp
++	# x0 ^= a
++	xor	%rbp,%rdx
++	# 		b = x7 + x4
++	lea	(%r8,%r9),%rbp
++	# 		(uint32) b <<<= 18
++	rol	$18,%ebp
++	# 		x5 ^= b
++	xor	%rbp,%r15
++	# 				x10 = x10_stack
++	movq	168(%rsp),%rbp
++	# 		x5_stack = x5
++	movq	%r15,160(%rsp)
++	# 				c = x9 + x10
++	lea	(%r10,%rbp),%r15
++	# 				(uint32) c <<<= 7
++	rol	$7,%r15d
++	# 				x11 ^= c
++	xor	%r15,%r12
++	# 				c = x10 + x11
++	lea	(%rbp,%r12),%r15
++	# 				(uint32) c <<<= 9
++	rol	$9,%r15d
++	# 				x8 ^= c
++	xor	%r15,%r11
++	# 				c = x11 + x8
++	lea	(%r12,%r11),%r15
++	# 				(uint32) c <<<= 13
++	rol	$13,%r15d
++	# 				x9 ^= c
++	xor	%r15,%r10
++	# 				c = x8 + x9
++	lea	(%r11,%r10),%r15
++	# 				(uint32) c <<<= 18
++	rol	$18,%r15d
++	# 				x10 ^= c
++	xor	%r15,%rbp
++	# 						x15 = x15_stack
++	movq	176(%rsp),%r15
++	# 				x10_stack = x10
++	movq	%rbp,168(%rsp)
++	# 						d = x14 + x15
++	lea	(%rbx,%r15),%rbp
++	# 						(uint32) d <<<= 7
++	rol	$7,%ebp
++	# 						x12 ^= d
++	xor	%rbp,%r14
++	# 						d = x15 + x12
++	lea	(%r15,%r14),%rbp
++	# 						(uint32) d <<<= 9
++	rol	$9,%ebp
++	# 						x13 ^= d
++	xor	%rbp,%r13
++	# 						d = x12 + x13
++	lea	(%r14,%r13),%rbp
++	# 						(uint32) d <<<= 13
++	rol	$13,%ebp
++	# 						x14 ^= d
++	xor	%rbp,%rbx
++	# 						d = x13 + x14
++	lea	(%r13,%rbx),%rbp
++	# 						(uint32) d <<<= 18
++	rol	$18,%ebp
++	# 						x15 ^= d
++	xor	%rbp,%r15
++	# 						x15_stack = x15
++	movq	%r15,176(%rsp)
++	#   i = i_backup
++	movq	184(%rsp),%r15
++	#                  unsigned>? i -= 4
++	sub	$4,%r15
++	# comment:fp stack unchanged by jump
++	# goto mainloop if unsigned>
++	ja	._mainloop
++	#   (uint32) x2 += j2
++	addl	64(%rsp),%ecx
++	#   x3 <<= 32
++	shl	$32,%rsi
++	#   x3 += j2
++	addq	64(%rsp),%rsi
++	#   (uint64) x3 >>= 32
++	shr	$32,%rsi
++	#   x3 <<= 32
++	shl	$32,%rsi
++	#   x2 += x3
++	add	%rsi,%rcx
++	#   (uint32) x6 += j6
++	addl	80(%rsp),%eax
++	#   x7 <<= 32
++	shl	$32,%r8
++	#   x7 += j6
++	addq	80(%rsp),%r8
++	#   (uint64) x7 >>= 32
++	shr	$32,%r8
++	#   x7 <<= 32
++	shl	$32,%r8
++	#   x6 += x7
++	add	%r8,%rax
++	#   (uint32) x8 += j8
++	addl	88(%rsp),%r11d
++	#   x9 <<= 32
++	shl	$32,%r10
++	#   x9 += j8
++	addq	88(%rsp),%r10
++	#   (uint64) x9 >>= 32
++	shr	$32,%r10
++	#   x9 <<= 32
++	shl	$32,%r10
++	#   x8 += x9
++	add	%r10,%r11
++	#   (uint32) x12 += j12
++	addl	104(%rsp),%r14d
++	#   x13 <<= 32
++	shl	$32,%r13
++	#   x13 += j12
++	addq	104(%rsp),%r13
++	#   (uint64) x13 >>= 32
++	shr	$32,%r13
++	#   x13 <<= 32
++	shl	$32,%r13
++	#   x12 += x13
++	add	%r13,%r14
++	#   (uint32) x0 += j0
++	addl	56(%rsp),%edx
++	#   x1 <<= 32
++	shl	$32,%rdi
++	#   x1 += j0
++	addq	56(%rsp),%rdi
++	#   (uint64) x1 >>= 32
++	shr	$32,%rdi
++	#   x1 <<= 32
++	shl	$32,%rdi
++	#   x0 += x1
++	add	%rdi,%rdx
++	#   x5 = x5_stack
++	movq	160(%rsp),%rdi
++	#   (uint32) x4 += j4
++	addl	72(%rsp),%r9d
++	#   x5 <<= 32
++	shl	$32,%rdi
++	#   x5 += j4
++	addq	72(%rsp),%rdi
++	#   (uint64) x5 >>= 32
++	shr	$32,%rdi
++	#   x5 <<= 32
++	shl	$32,%rdi
++	#   x4 += x5
++	add	%rdi,%r9
++	#   x10 = x10_stack
++	movq	168(%rsp),%r8
++	#   (uint32) x10 += j10
++	addl	96(%rsp),%r8d
++	#   x11 <<= 32
++	shl	$32,%r12
++	#   x11 += j10
++	addq	96(%rsp),%r12
++	#   (uint64) x11 >>= 32
++	shr	$32,%r12
++	#   x11 <<= 32
++	shl	$32,%r12
++	#   x10 += x11
++	add	%r12,%r8
++	#   x15 = x15_stack
++	movq	176(%rsp),%rdi
++	#   (uint32) x14 += j14
++	addl	112(%rsp),%ebx
++	#   x15 <<= 32
++	shl	$32,%rdi
++	#   x15 += j14
++	addq	112(%rsp),%rdi
++	#   (uint64) x15 >>= 32
++	shr	$32,%rdi
++	#   x15 <<= 32
++	shl	$32,%rdi
++	#   x14 += x15
++	add	%rdi,%rbx
++	#   out = out_backup
++	movq	136(%rsp),%rdi
++	#   m = m_backup
++	movq	144(%rsp),%rsi
++	#   x0 ^= *(uint64 *) (m + 0)
++	xorq	0(%rsi),%rdx
++	#   *(uint64 *) (out + 0) = x0
++	movq	%rdx,0(%rdi)
++	#   x2 ^= *(uint64 *) (m + 8)
++	xorq	8(%rsi),%rcx
++	#   *(uint64 *) (out + 8) = x2
++	movq	%rcx,8(%rdi)
++	#   x4 ^= *(uint64 *) (m + 16)
++	xorq	16(%rsi),%r9
++	#   *(uint64 *) (out + 16) = x4
++	movq	%r9,16(%rdi)
++	#   x6 ^= *(uint64 *) (m + 24)
++	xorq	24(%rsi),%rax
++	#   *(uint64 *) (out + 24) = x6
++	movq	%rax,24(%rdi)
++	#   x8 ^= *(uint64 *) (m + 32)
++	xorq	32(%rsi),%r11
++	#   *(uint64 *) (out + 32) = x8
++	movq	%r11,32(%rdi)
++	#   x10 ^= *(uint64 *) (m + 40)
++	xorq	40(%rsi),%r8
++	#   *(uint64 *) (out + 40) = x10
++	movq	%r8,40(%rdi)
++	#   x12 ^= *(uint64 *) (m + 48)
++	xorq	48(%rsi),%r14
++	#   *(uint64 *) (out + 48) = x12
++	movq	%r14,48(%rdi)
++	#   x14 ^= *(uint64 *) (m + 56)
++	xorq	56(%rsi),%rbx
++	#   *(uint64 *) (out + 56) = x14
++	movq	%rbx,56(%rdi)
++	#   bytes = bytes_backup
++	movq	152(%rsp),%rdx
++	#   in8 = j8
++	movq	88(%rsp),%rcx
++	#   in8 += 1
++	add	$1,%rcx
++	#   j8 = in8
++	movq	%rcx,88(%rsp)
++	#                          unsigned>? unsigned<? bytes - 64
++	cmp	$64,%rdx
++	# comment:fp stack unchanged by jump
++	#   goto bytesatleast65 if unsigned>
++	ja	._bytesatleast65
++	# comment:fp stack unchanged by jump
++	#     goto bytesatleast64 if !unsigned<
++	jae	._bytesatleast64
++	#       m = out
++	mov	%rdi,%rsi
++	#       out = ctarget
++	movq	128(%rsp),%rdi
++	#       i = bytes
++	mov	%rdx,%rcx
++	#       while (i) { *out++ = *m++; --i }
++	rep	movsb
++	# comment:fp stack unchanged by fallthrough
++#     bytesatleast64:
++._bytesatleast64:
++	#     x = x_backup
++	movq	120(%rsp),%rdi
++	#     in8 = j8
++	movq	88(%rsp),%rsi
++	#     *(uint64 *) (x + 32) = in8
++	movq	%rsi,32(%rdi)
++	#     r11 = r11_stack
++	movq	0(%rsp),%r11
++	#     r12 = r12_stack
++	movq	8(%rsp),%r12
++	#     r13 = r13_stack
++	movq	16(%rsp),%r13
++	#     r14 = r14_stack
++	movq	24(%rsp),%r14
++	#     r15 = r15_stack
++	movq	32(%rsp),%r15
++	#     rbx = rbx_stack
++	movq	40(%rsp),%rbx
++	#     rbp = rbp_stack
++	movq	48(%rsp),%rbp
++	# comment:fp stack unchanged by fallthrough
++#     done:
++._done:
++	#     leave
++	add	%r11,%rsp
++	mov	%rdi,%rax
++	mov	%rsi,%rdx
++	ret
++#   bytesatleast65:
++._bytesatleast65:
++	#   bytes -= 64
++	sub	$64,%rdx
++	#   out += 64
++	add	$64,%rdi
++	#   m += 64
++	add	$64,%rsi
++	# comment:fp stack unchanged by jump
++	# goto bytesatleast1
++	jmp	._bytesatleast1
++# enter ECRYPT_keysetup
++.text
++.p2align 5
++.globl ECRYPT_keysetup
++ECRYPT_keysetup:
++	mov	%rsp,%r11
++	and	$31,%r11
++	add	$256,%r11
++	sub	%r11,%rsp
++	#   k = arg2
++	mov	%rsi,%rsi
++	#   kbits = arg3
++	mov	%rdx,%rdx
++	#   x = arg1
++	mov	%rdi,%rdi
++	#   in0 = *(uint64 *) (k + 0)
++	movq	0(%rsi),%r8
++	#   in2 = *(uint64 *) (k + 8)
++	movq	8(%rsi),%r9
++	#   *(uint64 *) (x + 4) = in0
++	movq	%r8,4(%rdi)
++	#   *(uint64 *) (x + 12) = in2
++	movq	%r9,12(%rdi)
++	#                    unsigned<? kbits - 256
++	cmp	$256,%rdx
++	# comment:fp stack unchanged by jump
++	#   goto kbits128 if unsigned<
++	jb	._kbits128
++#   kbits256:
++._kbits256:
++	#     in10 = *(uint64 *) (k + 16)
++	movq	16(%rsi),%rdx
++	#     in12 = *(uint64 *) (k + 24)
++	movq	24(%rsi),%rsi
++	#     *(uint64 *) (x + 44) = in10
++	movq	%rdx,44(%rdi)
++	#     *(uint64 *) (x + 52) = in12
++	movq	%rsi,52(%rdi)
++	#     in0 = 1634760805
++	mov	$1634760805,%rsi
++	#     in4 = 857760878
++	mov	$857760878,%rdx
++	#     in10 = 2036477234
++	mov	$2036477234,%rcx
++	#     in14 = 1797285236
++	mov	$1797285236,%r8
++	#     *(uint32 *) (x + 0) = in0
++	movl	%esi,0(%rdi)
++	#     *(uint32 *) (x + 20) = in4
++	movl	%edx,20(%rdi)
++	#     *(uint32 *) (x + 40) = in10
++	movl	%ecx,40(%rdi)
++	#     *(uint32 *) (x + 60) = in14
++	movl	%r8d,60(%rdi)
++	# comment:fp stack unchanged by jump
++	#   goto keysetupdone
++	jmp	._keysetupdone
++#   kbits128:
++._kbits128:
++	#     in10 = *(uint64 *) (k + 0)
++	movq	0(%rsi),%rdx
++	#     in12 = *(uint64 *) (k + 8)
++	movq	8(%rsi),%rsi
++	#     *(uint64 *) (x + 44) = in10
++	movq	%rdx,44(%rdi)
++	#     *(uint64 *) (x + 52) = in12
++	movq	%rsi,52(%rdi)
++	#     in0 = 1634760805
++	mov	$1634760805,%rsi
++	#     in4 = 824206446
++	mov	$824206446,%rdx
++	#     in10 = 2036477238
++	mov	$2036477238,%rcx
++	#     in14 = 1797285236
++	mov	$1797285236,%r8
++	#     *(uint32 *) (x + 0) = in0
++	movl	%esi,0(%rdi)
++	#     *(uint32 *) (x + 20) = in4
++	movl	%edx,20(%rdi)
++	#     *(uint32 *) (x + 40) = in10
++	movl	%ecx,40(%rdi)
++	#     *(uint32 *) (x + 60) = in14
++	movl	%r8d,60(%rdi)
++#   keysetupdone:
++._keysetupdone:
++	# leave
++	add	%r11,%rsp
++	mov	%rdi,%rax
++	mov	%rsi,%rdx
++	ret
++# enter ECRYPT_ivsetup
++.text
++.p2align 5
++.globl ECRYPT_ivsetup
++ECRYPT_ivsetup:
++	mov	%rsp,%r11
++	and	$31,%r11
++	add	$256,%r11
++	sub	%r11,%rsp
++	#   iv = arg2
++	mov	%rsi,%rsi
++	#   x = arg1
++	mov	%rdi,%rdi
++	#   in6 = *(uint64 *) (iv + 0)
++	movq	0(%rsi),%rsi
++	#   in8 = 0
++	mov	$0,%r8
++	#   *(uint64 *) (x + 24) = in6
++	movq	%rsi,24(%rdi)
++	#   *(uint64 *) (x + 32) = in8
++	movq	%r8,32(%rdi)
++	# leave
++	add	%r11,%rsp
++	mov	%rdi,%rax
++	mov	%rsi,%rdx
++	ret
+diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
+new file mode 100644
+index 0000000..bccb76d
+--- /dev/null
++++ b/arch/x86/crypto/salsa20_glue.c
+@@ -0,0 +1,129 @@
++/*
++ * Glue code for optimized assembly version of  Salsa20.
++ *
++ * Copyright (c) 2007 Tan Swee Heng <thesweeheng at gmail.com>
++ *
++ * The assembly codes are public domain assembly codes written by Daniel. J.
++ * Bernstein <djb at cr.yp.to>. The codes are modified to include indentation
++ * and to remove extraneous comments and functions that are not needed.
++ * - i586 version, renamed as salsa20-i586-asm_32.S
++ *   available from <http://cr.yp.to/snuffle/salsa20/x86-pm/salsa20.s>
++ * - x86-64 version, renamed as salsa20-x86_64-asm_64.S
++ *   available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++
++#include <crypto/algapi.h>
++#include <linux/module.h>
++#include <linux/crypto.h>
++
++#define SALSA20_IV_SIZE        8U
++#define SALSA20_MIN_KEY_SIZE  16U
++#define SALSA20_MAX_KEY_SIZE  32U
++
++// use the ECRYPT_* function names
++#define salsa20_keysetup        ECRYPT_keysetup
++#define salsa20_ivsetup         ECRYPT_ivsetup
++#define salsa20_encrypt_bytes   ECRYPT_encrypt_bytes
++
++struct salsa20_ctx
++{
++	u32 input[16];
++};
++
++asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k,
++				 u32 keysize, u32 ivsize);
++asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv);
++asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx,
++				      const u8 *src, u8 *dst, u32 bytes);
++
++static int setkey(struct crypto_tfm *tfm, const u8 *key,
++		  unsigned int keysize)
++{
++	struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
++	salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8);
++	return 0;
++}
++
++static int encrypt(struct blkcipher_desc *desc,
++		   struct scatterlist *dst, struct scatterlist *src,
++		   unsigned int nbytes)
++{
++	struct blkcipher_walk walk;
++	struct crypto_blkcipher *tfm = desc->tfm;
++	struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
++	int err;
++
++	blkcipher_walk_init(&walk, dst, src, nbytes);
++	err = blkcipher_walk_virt_block(desc, &walk, 64);
++
++	salsa20_ivsetup(ctx, walk.iv);
++
++	if (likely(walk.nbytes == nbytes))
++	{
++		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
++				      walk.dst.virt.addr, nbytes);
++		return blkcipher_walk_done(desc, &walk, 0);
++	}
++
++	while (walk.nbytes >= 64) {
++		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
++				      walk.dst.virt.addr,
++				      walk.nbytes - (walk.nbytes % 64));
++		err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
++	}
++
++	if (walk.nbytes) {
++		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
++				      walk.dst.virt.addr, walk.nbytes);
++		err = blkcipher_walk_done(desc, &walk, 0);
++	}
++
++	return err;
++}
++
++static struct crypto_alg alg = {
++	.cra_name           =   "salsa20",
++	.cra_driver_name    =   "salsa20-asm",
++	.cra_priority       =   200,
++	.cra_flags          =   CRYPTO_ALG_TYPE_BLKCIPHER,
++	.cra_type           =   &crypto_blkcipher_type,
++	.cra_blocksize      =   1,
++	.cra_ctxsize        =   sizeof(struct salsa20_ctx),
++	.cra_alignmask      =	3,
++	.cra_module         =   THIS_MODULE,
++	.cra_list           =   LIST_HEAD_INIT(alg.cra_list),
++	.cra_u              =   {
++		.blkcipher = {
++			.setkey         =   setkey,
++			.encrypt        =   encrypt,
++			.decrypt        =   encrypt,
++			.min_keysize    =   SALSA20_MIN_KEY_SIZE,
++			.max_keysize    =   SALSA20_MAX_KEY_SIZE,
++			.ivsize         =   SALSA20_IV_SIZE,
++		}
++	}
++};
++
++static int __init init(void)
++{
++	return crypto_register_alg(&alg);
++}
++
++static void __exit fini(void)
++{
++	crypto_unregister_alg(&alg);
++}
++
++module_init(init);
++module_exit(fini);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
++MODULE_ALIAS("salsa20");
++MODULE_ALIAS("salsa20-asm");
+diff --git a/arch/x86/crypto/twofish_32.c b/arch/x86/crypto/twofish_32.c
 deleted file mode 100644
-index f533a06..0000000
---- a/arch/sh64/kernel/vmlinux.lds.S
+index e3004df..0000000
+--- a/arch/x86/crypto/twofish_32.c
 +++ /dev/null
-@@ -1,140 +0,0 @@
+@@ -1,97 +0,0 @@
 -/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh5/vmlinux.lds.S
+- *  Glue Code for optimized 586 assembler version of TWOFISH
 - *
-- * ld script to make ST50 Linux kernel
+- * Originally Twofish for GPG
+- * By Matthew Skala <mskala at ansuz.sooke.bc.ca>, July 26, 1998
+- * 256-bit key length added March 20, 1999
+- * Some modifications to reduce the text size by Werner Koch, April, 1998
+- * Ported to the kerneli patch by Marc Mutz <Marc at Mutz.com>
+- * Ported to CryptoAPI by Colin Slater <hoho at tacomeat.net>
 - *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
+- * The original author has disclaimed all copyright interest in this
+- * code and thus put it in the public domain. The subsequent authors
+- * have put this under the GNU General Public License.
 - *
-- * benedict.gaster at superh.com:	 2nd May 2002
-- *    Add definition of empty_zero_page to be the first page of kernel image.
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
 - *
-- * benedict.gaster at superh.com:	 3rd May 2002
-- *    Added support for ramdisk, removing statically linked romfs at the same time.
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
 - *
-- * lethal at linux-sh.org:          9th May 2003
-- *    Kill off GLOBAL_NAME() usage and other CDC-isms.
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+- * USA
 - *
-- * lethal at linux-sh.org:         19th May 2003
-- *    Remove support for ancient toolchains.
-- */
--
--#include <asm/page.h>
--#include <asm/cache.h>
--#include <asm/processor.h>
--#include <asm/thread_info.h>
--
--#define LOAD_OFFSET	CONFIG_CACHED_MEMORY_OFFSET
--#include <asm-generic/vmlinux.lds.h>
--
--OUTPUT_ARCH(sh:sh5)
--
--#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
--
--ENTRY(__start)
--SECTIONS
--{
--  . = CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
--  _text = .;			/* Text and read-only data */
--  text = .;			/* Text and read-only data */
--
--  .empty_zero_page : C_PHYS(.empty_zero_page) {
--	*(.empty_zero_page)
--	} = 0
--
--  .text : C_PHYS(.text) {
--  	*(.text.head)
--	TEXT_TEXT
--	*(.text64)
--        *(.text..SHmedia32)
--	SCHED_TEXT
--	LOCK_TEXT
--	*(.fixup)
--	*(.gnu.warning)
--#ifdef CONFIG_LITTLE_ENDIAN
--	} = 0x6ff0fff0
--#else
--	} = 0xf0fff06f
--#endif
--
--  /* We likely want __ex_table to be Cache Line aligned */
--  . = ALIGN(L1_CACHE_BYTES);		/* Exception table */
--  __start___ex_table = .;
--  __ex_table : C_PHYS(__ex_table) { *(__ex_table) }
--  __stop___ex_table = .;
--
--  _etext = .;			/* End of text section */
--
--  NOTES 
--
--  RODATA
--
--  .data : C_PHYS(.data) {			/* Data */
--	DATA_DATA
--	CONSTRUCTORS
--	}
--
--  . = ALIGN(PAGE_SIZE);
--  .data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) }
--
--  PERCPU(PAGE_SIZE)
--
--  . = ALIGN(L1_CACHE_BYTES);
--  .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) }
--
--  _edata = .;			/* End of data section */
--
--  . = ALIGN(THREAD_SIZE);	/* init_task: structure size aligned */
--  .data.init_task : C_PHYS(.data.init_task) { *(.data.init_task) }
--
--  . = ALIGN(PAGE_SIZE);		/* Init code and data */
--  __init_begin = .;
--  _sinittext = .;
--  .init.text : C_PHYS(.init.text) { *(.init.text) }
--  _einittext = .;
--  .init.data : C_PHYS(.init.data) { *(.init.data) }
--  . = ALIGN(L1_CACHE_BYTES);	/* Better if Cache Line aligned */
--  __setup_start = .;
--  .init.setup : C_PHYS(.init.setup) { *(.init.setup) }
--  __setup_end = .;
--  __initcall_start = .;
--  .initcall.init : C_PHYS(.initcall.init) {
--	INITCALLS
--  }
--  __initcall_end = .;
--  __con_initcall_start = .;
--  .con_initcall.init : C_PHYS(.con_initcall.init) { *(.con_initcall.init) }
--  __con_initcall_end = .;
--  SECURITY_INIT
--
--#ifdef CONFIG_BLK_DEV_INITRD
--  __initramfs_start = .;
--  .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
--  __initramfs_end = .;
--#endif
--
--  . = ALIGN(PAGE_SIZE);
--  __init_end = .;
--
--  /* Align to the biggest single data representation, head and tail */
--  . = ALIGN(8);
--  __bss_start = .;		/* BSS */
--  .bss : C_PHYS(.bss) {
--	*(.bss)
--	}
--  . = ALIGN(8);
--  _end = . ;
--
--  /* Sections to be discarded */
--  /DISCARD/ : {
--	*(.exit.text)
--	*(.exit.data)
--	*(.exitcall.exit)
--	}
--
--  STABS_DEBUG
--  DWARF_DEBUG
--}
-diff --git a/arch/sh64/lib/.gitignore b/arch/sh64/lib/.gitignore
-deleted file mode 100644
-index 3508c2c..0000000
---- a/arch/sh64/lib/.gitignore
-+++ /dev/null
-@@ -1 +0,0 @@
--syscalltab.h
-diff --git a/arch/sh64/lib/Makefile b/arch/sh64/lib/Makefile
-deleted file mode 100644
-index 6a4cc3f..0000000
---- a/arch/sh64/lib/Makefile
-+++ /dev/null
-@@ -1,19 +0,0 @@
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 2000, 2001  Paolo Alberelli
--# Coprygith (C) 2003  Paul Mundt
--#
--# Makefile for the SH-5 specific library files..
--#
--# Note! Dependencies are done automagically by 'make dep', which also
--# removes any old dependencies. DON'T put your own dependencies here
--# unless it's something special (ie not a .c file).
--#
--
--# Panic should really be compiled as PIC
--lib-y  := udelay.o c-checksum.o dbg.o io.o panic.o memcpy.o copy_user_memcpy.o \
--		page_copy.o page_clear.o iomap.o
--
-diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c
-deleted file mode 100644
-index 053137a..0000000
---- a/arch/sh64/lib/c-checksum.c
-+++ /dev/null
-@@ -1,217 +0,0 @@
--/*
-- * arch/sh64/lib/c-checksum.c
+- * This code is a "clean room" implementation, written from the paper
+- * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
+- * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
+- * through http://www.counterpane.com/twofish.html
 - *
-- * This file contains network checksum routines that are better done
-- * in an architecture-specific manner due to speed..
+- * For background information on multiplication in finite fields, used for
+- * the matrix operations in the key schedule, see the book _Contemporary
+- * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
+- * Third Edition.
 - */
 -
--#undef DEBUG
--
--#include <linux/string.h>
--#include <linux/errno.h>
--#include <linux/kernel.h>
+-#include <crypto/twofish.h>
+-#include <linux/crypto.h>
+-#include <linux/init.h>
 -#include <linux/module.h>
--#include <asm/byteorder.h>
--#include <asm/uaccess.h>
--
--static inline unsigned short from64to16(unsigned long long x)
--{
--	/* add up 32-bit words for 33 bits */
--	x = (x & 0xffffffff) + (x >> 32);
--	/* add up 16-bit and 17-bit words for 17+c bits */
--	x = (x & 0xffff) + (x >> 16);
--	/* add up 16-bit and 2-bit for 16+c bit */
--	x = (x & 0xffff) + (x >> 16);
--	/* add up carry.. */
--	x = (x & 0xffff) + (x >> 16);
--	return x;
--}
--
--static inline unsigned short foldto16(unsigned long x)
--{
--	/* add up 16-bit for 17 bits */
--	x = (x & 0xffff) + (x >> 16);
--	/* add up carry.. */
--	x = (x & 0xffff) + (x >> 16);
--	return x;
--}
--
--static inline unsigned short myfoldto16(unsigned long long x)
--{
--	/* Fold down to 32-bits so we don't loose in the typedef-less
--	   network stack.  */
--	/* 64 to 33 */
--	x = (x & 0xffffffff) + (x >> 32);
--	/* 33 to 32 */
--	x = (x & 0xffffffff) + (x >> 32);
--
--	/* add up 16-bit for 17 bits */
--	x = (x & 0xffff) + (x >> 16);
--	/* add up carry.. */
--	x = (x & 0xffff) + (x >> 16);
--	return x;
--}
--
--#define odd(x) ((x)&1)
--#define U16(x) ntohs(x)
--
--static unsigned long do_csum(const unsigned char *buff, int len)
--{
--	int odd, count;
--	unsigned long result = 0;
--
--	pr_debug("do_csum buff %p, len %d (0x%x)\n", buff, len, len);
--#ifdef DEBUG
--	for (i = 0; i < len; i++) {
--		if ((i % 26) == 0)
--			printk("\n");
--		printk("%02X ", buff[i]);
--	}
--#endif
--
--	if (len <= 0)
--		goto out;
--
--	odd = 1 & (unsigned long) buff;
--	if (odd) {
--		result = *buff << 8;
--		len--;
--		buff++;
--	}
--	count = len >> 1;	/* nr of 16-bit words.. */
--	if (count) {
--		if (2 & (unsigned long) buff) {
--			result += *(unsigned short *) buff;
--			count--;
--			len -= 2;
--			buff += 2;
--		}
--		count >>= 1;	/* nr of 32-bit words.. */
--		if (count) {
--			unsigned long carry = 0;
--			do {
--				unsigned long w = *(unsigned long *) buff;
--				buff += 4;
--				count--;
--				result += carry;
--				result += w;
--				carry = (w > result);
--			} while (count);
--			result += carry;
--			result = (result & 0xffff) + (result >> 16);
--		}
--		if (len & 2) {
--			result += *(unsigned short *) buff;
--			buff += 2;
--		}
--	}
--	if (len & 1)
--		result += *buff;
--	result = foldto16(result);
--	if (odd)
--		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
--
--	pr_debug("\nCHECKSUM is 0x%lx\n", result);
--
--      out:
--	return result;
--}
--
--/* computes the checksum of a memory block at buff, length len,
--   and adds in "sum" (32-bit)  */
--__wsum csum_partial(const void *buff, int len, __wsum sum)
--{
--	unsigned long long result = do_csum(buff, len);
--
--	/* add in old sum, and carry.. */
--	result += (__force u32)sum;
--	/* 32+c bits -> 32 bits */
--	result = (result & 0xffffffff) + (result >> 32);
--
--	pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n",
--		buff, len, sum, result);
--
--	return (__force __wsum)result;
--}
--
--/* Copy while checksumming, otherwise like csum_partial.  */
--__wsum
--csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
--{
--	sum = csum_partial(src, len, sum);
--	memcpy(dst, src, len);
--
--	return sum;
--}
--
--/* Copy from userspace and compute checksum.  If we catch an exception
--   then zero the rest of the buffer.  */
--__wsum
--csum_partial_copy_from_user(const void __user *src, void *dst, int len,
--			    __wsum sum, int *err_ptr)
--{
--	int missing;
--
--	pr_debug
--	    ("csum_partial_copy_from_user src %p, dest %p, len %d, sum %08x, err_ptr %p\n",
--	     src, dst, len, sum, err_ptr);
--	missing = copy_from_user(dst, src, len);
--	pr_debug("  access_ok %d\n", __access_ok((unsigned long) src, len));
--	pr_debug("  missing %d\n", missing);
--	if (missing) {
--		memset(dst + len - missing, 0, missing);
--		*err_ptr = -EFAULT;
--	}
--
--	return csum_partial(dst, len, sum);
--}
--
--/* Copy to userspace and compute checksum.  */
--__wsum
--csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len,
--			  __wsum sum, int *err_ptr)
--{
--	sum = csum_partial(src, len, sum);
--
--	if (copy_to_user(dst, src, len))
--		*err_ptr = -EFAULT;
--
--	return sum;
--}
--
--/*
-- *	This is a version of ip_compute_csum() optimized for IP headers,
-- *	which always checksum on 4 octet boundaries.
-- */
--__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
--{
--	pr_debug("ip_fast_csum %p,%d\n", iph, ihl);
--
--	return (__force __sum16)~do_csum(iph, ihl * 4);
--}
--
--__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
--				unsigned short len,
--				unsigned short proto, __wsum sum)
--{
--	unsigned long long result;
--
--	pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead));
--	pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead));
--
--	result = (__force u64) saddr + (__force u64) daddr +
--		 (__force u64) sum + ((len + proto) << 8);
--
--	/* Fold down to 32-bits so we don't loose in the typedef-less
--	   network stack.  */
--	/* 64 to 33 */
--	result = (result & 0xffffffff) + (result >> 32);
--	/* 33 to 32 */
--	result = (result & 0xffffffff) + (result >> 32);
--
--	pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n",
--		__FUNCTION__, saddr, daddr, len, proto, sum, result);
--
--	return (__wsum)result;
--}
--EXPORT_SYMBOL(csum_tcpudp_nofold);
-diff --git a/arch/sh64/lib/copy_user_memcpy.S b/arch/sh64/lib/copy_user_memcpy.S
-deleted file mode 100644
-index 2a62816..0000000
---- a/arch/sh64/lib/copy_user_memcpy.S
-+++ /dev/null
-@@ -1,217 +0,0 @@
--!
--! Fast SH memcpy
--!
--! by Toshiyasu Morita (tm at netcom.com)
--! hacked by J"orn Rernnecke (joern.rennecke at superh.com) ("o for o-umlaut)
--! SH5 code Copyright 2002 SuperH Ltd.
--!
--! Entry: ARG0: destination pointer
--!        ARG1: source pointer
--!        ARG2: byte count
--!
--! Exit:  RESULT: destination pointer
--!        any other registers in the range r0-r7: trashed
--!
--! Notes: Usually one wants to do small reads and write a longword, but
--!        unfortunately it is difficult in some cases to concatanate bytes
--!        into a longword on the SH, so this does a longword read and small
--!        writes.
--!
--! This implementation makes two assumptions about how it is called:
--!
--! 1.: If the byte count is nonzero, the address of the last byte to be
--!     copied is unsigned greater than the address of the first byte to
--!     be copied.  This could be easily swapped for a signed comparison,
--!     but the algorithm used needs some comparison.
--!
--! 2.: When there are two or three bytes in the last word of an 11-or-more
--!     bytes memory chunk to b copied, the rest of the word can be read
--!     without side effects.
--!     This could be easily changed by increasing the minumum size of
--!     a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
--!     however, this would cost a few extra cyles on average.
--!     For SHmedia, the assumption is that any quadword can be read in its
--!     enirety if at least one byte is included in the copy.
--
--/* Imported into Linux kernel by Richard Curnow.  This is used to implement the
--   __copy_user function in the general case, so it has to be a distinct
--   function from intra-kernel memcpy to allow for exception fix-ups in the
--   event that the user pointer is bad somewhere in the copy (e.g. due to
--   running off the end of the vma).
--
--   Note, this algorithm will be slightly wasteful in the case where the source
--   and destination pointers are equally aligned, because the stlo/sthi pairs
--   could then be merged back into single stores.  If there are a lot of cache
--   misses, this is probably offset by the stall lengths on the preloads.
--
--*/
--
--/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
-- * erratum.  The first two prefetches are nop-ed out to avoid upsetting the
-- * instruction counts used in the jump address calculation.
-- * */
--
--	.section .text..SHmedia32,"ax"
--	.little
--	.balign 32
--	.global copy_user_memcpy
--	.global copy_user_memcpy_end
--copy_user_memcpy:
--
--#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
--#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
--#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
--#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
--
--	nop ! ld.b r3,0,r63 ! TAKum03020
--	pta/l Large,tr0
--	movi 25,r0
--	bgeu/u r4,r0,tr0
--	nsb r4,r0
--	shlli r0,5,r0
--	movi (L1-L0+63*32 + 1) & 0xffff,r1
--	sub r1, r0, r0
--L0:	ptrel r0,tr0
--	add r2,r4,r5
--	ptabs r18,tr1
--	add r3,r4,r6
--	blink tr0,r63
--
--/* Rearranged to make cut2 safe */
--	.balign 8
--L4_7:	/* 4..7 byte memcpy cntd. */
--	stlo.l r2, 0, r0
--	or r6, r7, r6
--	sthi.l r5, -1, r6
--	stlo.l r5, -4, r6
--	blink tr1,r63
--
--	.balign 8
--L1:	/* 0 byte memcpy */
--	nop
--	blink tr1,r63
--	nop
--	nop
--	nop
--	nop
--
--L2_3:	/* 2 or 3 byte memcpy cntd. */
--	st.b r5,-1,r6
--	blink tr1,r63
--
--	/* 1 byte memcpy */
--	ld.b r3,0,r0
--	st.b r2,0,r0
--	blink tr1,r63
--
--L8_15:	/* 8..15 byte memcpy cntd. */
--	stlo.q r2, 0, r0
--	or r6, r7, r6
--	sthi.q r5, -1, r6
--	stlo.q r5, -8, r6
--	blink tr1,r63
--
--	/* 2 or 3 byte memcpy */
--	ld.b r3,0,r0
--	nop ! ld.b r2,0,r63 ! TAKum03020
--	ld.b r3,1,r1
--	st.b r2,0,r0
--	pta/l L2_3,tr0
--	ld.b r6,-1,r6
--	st.b r2,1,r1
--	blink tr0, r63
--
--	/* 4 .. 7 byte memcpy */
--	LDUAL (r3, 0, r0, r1)
--	pta L4_7, tr0
--	ldlo.l r6, -4, r7
--	or r0, r1, r0
--	sthi.l r2, 3, r0
--	ldhi.l r6, -1, r6
--	blink tr0, r63
--
--	/* 8 .. 15 byte memcpy */
--	LDUAQ (r3, 0, r0, r1)
--	pta L8_15, tr0
--	ldlo.q r6, -8, r7
--	or r0, r1, r0
--	sthi.q r2, 7, r0
--	ldhi.q r6, -1, r6
--	blink tr0, r63
--
--	/* 16 .. 24 byte memcpy */
--	LDUAQ (r3, 0, r0, r1)
--	LDUAQ (r3, 8, r8, r9)
--	or r0, r1, r0
--	sthi.q r2, 7, r0
--	or r8, r9, r8
--	sthi.q r2, 15, r8
--	ldlo.q r6, -8, r7
--	ldhi.q r6, -1, r6
--	stlo.q r2, 8, r8
--	stlo.q r2, 0, r0
--	or r6, r7, r6
--	sthi.q r5, -1, r6
--	stlo.q r5, -8, r6
--	blink tr1,r63
--
--Large:
--	! ld.b r2, 0, r63 ! TAKum03020
--	pta/l  Loop_ua, tr1
--	ori r3, -8, r7
--	sub r2, r7, r22
--	sub r3, r2, r6
--	add r2, r4, r5
--	ldlo.q r3, 0, r0
--	addi r5, -16, r5
--	movi 64+8, r27 ! could subtract r7 from that.
--	stlo.q r2, 0, r0
--	sthi.q r2, 7, r0
--	ldx.q r22, r6, r0
--	bgtu/l r27, r4, tr1
--
--	addi r5, -48, r27
--	pta/l Loop_line, tr0
--	addi r6, 64, r36
--	addi r6, -24, r19
--	addi r6, -16, r20
--	addi r6, -8, r21
--
--Loop_line:
--	! ldx.q r22, r36, r63 ! TAKum03020
--	alloco r22, 32
--	synco
--	addi r22, 32, r22
--	ldx.q r22, r19, r23
--	sthi.q r22, -25, r0
--	ldx.q r22, r20, r24
--	ldx.q r22, r21, r25
--	stlo.q r22, -32, r0
--	ldx.q r22, r6,  r0
--	sthi.q r22, -17, r23
--	sthi.q r22,  -9, r24
--	sthi.q r22,  -1, r25
--	stlo.q r22, -24, r23
--	stlo.q r22, -16, r24
--	stlo.q r22,  -8, r25
--	bgeu r27, r22, tr0
--
--Loop_ua:
--	addi r22, 8, r22
--	sthi.q r22, -1, r0
--	stlo.q r22, -8, r0
--	ldx.q r22, r6, r0
--	bgtu/l r5, r22, tr1
--
--	add r3, r4, r7
--	ldlo.q r7, -8, r1
--	sthi.q r22, 7, r0
--	ldhi.q r7, -1, r7
--	ptabs r18,tr1
--	stlo.q r22, 0, r0
--	or r1, r7, r1
--	sthi.q r5, 15, r1
--	stlo.q r5, 8, r1
--	blink tr1, r63
--copy_user_memcpy_end:
--	nop
-diff --git a/arch/sh64/lib/dbg.c b/arch/sh64/lib/dbg.c
-deleted file mode 100644
-index 97816e0..0000000
---- a/arch/sh64/lib/dbg.c
-+++ /dev/null
-@@ -1,430 +0,0 @@
--/*--------------------------------------------------------------------------
----
---- Identity : Linux50 Debug Funcions
----
---- File     : arch/sh64/lib/dbg.C
----
---- Copyright 2000, 2001 STMicroelectronics Limited.
---- Copyright 2004 Richard Curnow (evt_debug etc)
----
----------------------------------------------------------------------------*/
 -#include <linux/types.h>
--#include <linux/kernel.h>
--#include <linux/sched.h>
--#include <linux/mm.h>
--#include <linux/fs.h>
--#include <asm/mmu_context.h>
--
--typedef u64 regType_t;
--
--static regType_t getConfigReg(u64 id)
--{
--	register u64 reg __asm__("r2");
--	asm volatile ("getcfg   %1, 0, %0":"=r" (reg):"r"(id));
--	return (reg);
--}
--
--/* ======================================================================= */
--
--static char *szTab[] = { "4k", "64k", "1M", "512M" };
--static char *protTab[] = { "----",
--	"---R",
--	"--X-",
--	"--XR",
--	"-W--",
--	"-W-R",
--	"-WX-",
--	"-WXR",
--	"U---",
--	"U--R",
--	"U-X-",
--	"U-XR",
--	"UW--",
--	"UW-R",
--	"UWX-",
--	"UWXR"
--};
--#define  ITLB_BASE	0x00000000
--#define  DTLB_BASE	0x00800000
--#define  MAX_TLBs		64
--/* PTE High */
--#define  GET_VALID(pte)        ((pte) & 0x1)
--#define  GET_SHARED(pte)       ((pte) & 0x2)
--#define  GET_ASID(pte)         ((pte >> 2) & 0x0ff)
--#define  GET_EPN(pte)          ((pte) & 0xfffff000)
--
--/* PTE Low */
--#define  GET_CBEHAVIOR(pte)    ((pte) & 0x3)
--#define  GET_PAGE_SIZE(pte)    szTab[((pte >> 3) & 0x3)]
--#define  GET_PROTECTION(pte)   protTab[((pte >> 6) & 0xf)]
--#define  GET_PPN(pte)          ((pte) & 0xfffff000)
--
--#define PAGE_1K_MASK           0x00000000
--#define PAGE_4K_MASK           0x00000010
--#define PAGE_64K_MASK          0x00000080
--#define MMU_PAGESIZE_MASK      (PAGE_64K_MASK | PAGE_4K_MASK)
--#define PAGE_1MB_MASK          MMU_PAGESIZE_MASK
--#define PAGE_1K                (1024)
--#define PAGE_4K                (1024 * 4)
--#define PAGE_64K               (1024 * 64)
--#define PAGE_1MB               (1024 * 1024)
--
--#define HOW_TO_READ_TLB_CONTENT  \
--       "[ ID]  PPN         EPN        ASID  Share  CB  P.Size   PROT.\n"
--
--void print_single_tlb(unsigned long tlb, int single_print)
--{
--	regType_t pteH;
--	regType_t pteL;
--	unsigned int valid, shared, asid, epn, cb, ppn;
--	char *pSize;
--	char *pProt;
--
--	/*
--	   ** in case of single print <single_print> is true, this implies:
--	   **   1) print the TLB in any case also if NOT VALID
--	   **   2) print out the header
--	 */
--
--	pteH = getConfigReg(tlb);
--	valid = GET_VALID(pteH);
--	if (single_print)
--		printk(HOW_TO_READ_TLB_CONTENT);
--	else if (!valid)
--		return;
 -
--	pteL = getConfigReg(tlb + 1);
 -
--	shared = GET_SHARED(pteH);
--	asid = GET_ASID(pteH);
--	epn = GET_EPN(pteH);
--	cb = GET_CBEHAVIOR(pteL);
--	pSize = GET_PAGE_SIZE(pteL);
--	pProt = GET_PROTECTION(pteL);
--	ppn = GET_PPN(pteL);
--	printk("[%c%2ld]  0x%08x  0x%08x  %03d   %02x    %02x   %4s    %s\n",
--	       ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP),
--	       ppn, epn, asid, shared, cb, pSize, pProt);
--}
+-asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 -
--void print_dtlb(void)
+-static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 -{
--	int count;
--	unsigned long tlb;
--
--	printk(" ================= SH-5 D-TLBs Status ===================\n");
--	printk(HOW_TO_READ_TLB_CONTENT);
--	tlb = DTLB_BASE;
--	for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
--		print_single_tlb(tlb, 0);
--	printk
--	    (" =============================================================\n");
+-	twofish_enc_blk(tfm, dst, src);
 -}
 -
--void print_itlb(void)
+-static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 -{
--	int count;
--	unsigned long tlb;
--
--	printk(" ================= SH-5 I-TLBs Status ===================\n");
--	printk(HOW_TO_READ_TLB_CONTENT);
--	tlb = ITLB_BASE;
--	for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
--		print_single_tlb(tlb, 0);
--	printk
--	    (" =============================================================\n");
+-	twofish_dec_blk(tfm, dst, src);
 -}
 -
--/* ======================================================================= */
--
--#ifdef CONFIG_POOR_MANS_STRACE
--
--#include "syscalltab.h"
--
--struct ring_node {
--	int evt;
--	int ret_addr;
--	int event;
--	int tra;
--	int pid;
--	unsigned long sp;
--	unsigned long pc;
--};
--
--static struct ring_node event_ring[16];
--static int event_ptr = 0;
--
--struct stored_syscall_data {
--	int pid;
--	int syscall_number;
--};
--
--#define N_STORED_SYSCALLS 16
--
--static struct stored_syscall_data stored_syscalls[N_STORED_SYSCALLS];
--static int syscall_next=0;
--static int syscall_next_print=0;
--
--void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs)
--{
--	int syscallno = tra & 0xff;
--	unsigned long sp;
--	unsigned long stack_bottom;
--	int pid;
--	struct ring_node *rr;
--
--	pid = current->pid;
--	stack_bottom = (unsigned long) task_stack_page(current);
--	asm volatile("ori r15, 0, %0" : "=r" (sp));
--	rr = event_ring + event_ptr;
--	rr->evt = evt;
--	rr->ret_addr = ret_addr;
--	rr->event = event;
--	rr->tra = tra;
--	rr->pid = pid;
--	rr->sp = sp;
--	rr->pc = regs->pc;
--
--	if (sp < stack_bottom + 3092) {
--		printk("evt_debug : stack underflow report\n");
--		int i, j;
--		for (j=0, i = event_ptr; j<16; j++) {
--			rr = event_ring + i;
--			printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n",
--				rr->evt, rr->event, rr->tra, rr->pid, rr->sp, rr->pc);
--			i--;
--			i &= 15;
--		}
--		panic("STACK UNDERFLOW\n");
--	}
--
--	event_ptr = (event_ptr + 1) & 15;
--
--	if ((event == 2) && (evt == 0x160)) {
--		if (syscallno < NUM_SYSCALL_INFO_ENTRIES) {
--			/* Store the syscall information to print later.  We
--			 * can't print this now - currently we're running with
--			 * SR.BL=1, so we can't take a tlbmiss (which could occur
--			 * in the console drivers under printk).
--			 *
--			 * Just overwrite old entries on ring overflow - this
--			 * is only for last-hope debugging. */
--			stored_syscalls[syscall_next].pid = current->pid;
--			stored_syscalls[syscall_next].syscall_number = syscallno;
--			syscall_next++;
--			syscall_next &= (N_STORED_SYSCALLS - 1);
+-static struct crypto_alg alg = {
+-	.cra_name		=	"twofish",
+-	.cra_driver_name	=	"twofish-i586",
+-	.cra_priority		=	200,
+-	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+-	.cra_blocksize		=	TF_BLOCK_SIZE,
+-	.cra_ctxsize		=	sizeof(struct twofish_ctx),
+-	.cra_alignmask		=	3,
+-	.cra_module		=	THIS_MODULE,
+-	.cra_list		=	LIST_HEAD_INIT(alg.cra_list),
+-	.cra_u			=	{
+-		.cipher = {
+-			.cia_min_keysize	=	TF_MIN_KEY_SIZE,
+-			.cia_max_keysize	=	TF_MAX_KEY_SIZE,
+-			.cia_setkey		=	twofish_setkey,
+-			.cia_encrypt		=	twofish_encrypt,
+-			.cia_decrypt		=	twofish_decrypt
 -		}
 -	}
--}
--
--static void drain_syscalls(void) {
--	while (syscall_next_print != syscall_next) {
--		printk("Task %d: %s()\n",
--			stored_syscalls[syscall_next_print].pid,
--			syscall_info_table[stored_syscalls[syscall_next_print].syscall_number].name);
--			syscall_next_print++;
--			syscall_next_print &= (N_STORED_SYSCALLS - 1);
--	}
--}
--
--void evt_debug2(unsigned int ret)
--{
--	drain_syscalls();
--	printk("Task %d: syscall returns %08x\n", current->pid, ret);
--}
--
--void evt_debug_ret_from_irq(struct pt_regs *regs)
--{
--	int pid;
--	struct ring_node *rr;
--
--	pid = current->pid;
--	rr = event_ring + event_ptr;
--	rr->evt = 0xffff;
--	rr->ret_addr = 0;
--	rr->event = 0;
--	rr->tra = 0;
--	rr->pid = pid;
--	rr->pc = regs->pc;
--	event_ptr = (event_ptr + 1) & 15;
--}
--
--void evt_debug_ret_from_exc(struct pt_regs *regs)
--{
--	int pid;
--	struct ring_node *rr;
--
--	pid = current->pid;
--	rr = event_ring + event_ptr;
--	rr->evt = 0xfffe;
--	rr->ret_addr = 0;
--	rr->event = 0;
--	rr->tra = 0;
--	rr->pid = pid;
--	rr->pc = regs->pc;
--	event_ptr = (event_ptr + 1) & 15;
--}
--
--#endif /* CONFIG_POOR_MANS_STRACE */
--
--/* ======================================================================= */
--
--void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs)
--{
--
--	unsigned long long ah, al, bh, bl, ch, cl;
--
--	printk("\n");
--	printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n",
--	       ((from) ? from : "???"), current->pid, trapnr, signr);
--
--	asm volatile ("getcon   " __EXPEVT ", %0":"=r"(ah));
--	asm volatile ("getcon   " __EXPEVT ", %0":"=r"(al));
--	ah = (ah) >> 32;
--	al = (al) & 0xffffffff;
--	asm volatile ("getcon   " __KCR1 ", %0":"=r"(bh));
--	asm volatile ("getcon   " __KCR1 ", %0":"=r"(bl));
--	bh = (bh) >> 32;
--	bl = (bl) & 0xffffffff;
--	asm volatile ("getcon   " __INTEVT ", %0":"=r"(ch));
--	asm volatile ("getcon   " __INTEVT ", %0":"=r"(cl));
--	ch = (ch) >> 32;
--	cl = (cl) & 0xffffffff;
--	printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--
--	asm volatile ("getcon   " __PEXPEVT ", %0":"=r"(ah));
--	asm volatile ("getcon   " __PEXPEVT ", %0":"=r"(al));
--	ah = (ah) >> 32;
--	al = (al) & 0xffffffff;
--	asm volatile ("getcon   " __PSPC ", %0":"=r"(bh));
--	asm volatile ("getcon   " __PSPC ", %0":"=r"(bl));
--	bh = (bh) >> 32;
--	bl = (bl) & 0xffffffff;
--	asm volatile ("getcon   " __PSSR ", %0":"=r"(ch));
--	asm volatile ("getcon   " __PSSR ", %0":"=r"(cl));
--	ch = (ch) >> 32;
--	cl = (cl) & 0xffffffff;
--	printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--
--	ah = (regs->pc) >> 32;
--	al = (regs->pc) & 0xffffffff;
--	bh = (regs->regs[18]) >> 32;
--	bl = (regs->regs[18]) & 0xffffffff;
--	ch = (regs->regs[15]) >> 32;
--	cl = (regs->regs[15]) & 0xffffffff;
--	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--
--	ah = (regs->sr) >> 32;
--	al = (regs->sr) & 0xffffffff;
--	asm volatile ("getcon   " __TEA ", %0":"=r"(bh));
--	asm volatile ("getcon   " __TEA ", %0":"=r"(bl));
--	bh = (bh) >> 32;
--	bl = (bl) & 0xffffffff;
--	asm volatile ("getcon   " __KCR0 ", %0":"=r"(ch));
--	asm volatile ("getcon   " __KCR0 ", %0":"=r"(cl));
--	ch = (ch) >> 32;
--	cl = (cl) & 0xffffffff;
--	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--
--	ah = (regs->regs[0]) >> 32;
--	al = (regs->regs[0]) & 0xffffffff;
--	bh = (regs->regs[1]) >> 32;
--	bl = (regs->regs[1]) & 0xffffffff;
--	ch = (regs->regs[2]) >> 32;
--	cl = (regs->regs[2]) & 0xffffffff;
--	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--
--	ah = (regs->regs[3]) >> 32;
--	al = (regs->regs[3]) & 0xffffffff;
--	bh = (regs->regs[4]) >> 32;
--	bl = (regs->regs[4]) & 0xffffffff;
--	ch = (regs->regs[5]) >> 32;
--	cl = (regs->regs[5]) & 0xffffffff;
--	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--
--	ah = (regs->regs[6]) >> 32;
--	al = (regs->regs[6]) & 0xffffffff;
--	bh = (regs->regs[7]) >> 32;
--	bl = (regs->regs[7]) & 0xffffffff;
--	ch = (regs->regs[8]) >> 32;
--	cl = (regs->regs[8]) & 0xffffffff;
--	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--
--	ah = (regs->regs[9]) >> 32;
--	al = (regs->regs[9]) & 0xffffffff;
--	bh = (regs->regs[10]) >> 32;
--	bl = (regs->regs[10]) & 0xffffffff;
--	ch = (regs->regs[11]) >> 32;
--	cl = (regs->regs[11]) & 0xffffffff;
--	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--	printk("....\n");
--
--	ah = (regs->tregs[0]) >> 32;
--	al = (regs->tregs[0]) & 0xffffffff;
--	bh = (regs->tregs[1]) >> 32;
--	bl = (regs->tregs[1]) & 0xffffffff;
--	ch = (regs->tregs[2]) >> 32;
--	cl = (regs->tregs[2]) & 0xffffffff;
--	printk("T0  : %08Lx%08Lx T1  : %08Lx%08Lx T2  : %08Lx%08Lx\n",
--	       ah, al, bh, bl, ch, cl);
--	printk("....\n");
--
--	print_dtlb();
--	print_itlb();
--}
--
--/* ======================================================================= */
--
--/*
--** Depending on <base> scan the MMU, Data or Instruction side
--** looking for a valid mapping matching Eaddr & asid.
--** Return -1 if not found or the TLB id entry otherwise.
--** Note: it works only for 4k pages!
--*/
--static unsigned long
--lookup_mmu_side(unsigned long base, unsigned long Eaddr, unsigned long asid)
--{
--	regType_t pteH;
--	unsigned long epn;
--	int count;
--
--	epn = Eaddr & 0xfffff000;
--
--	for (count = 0; count < MAX_TLBs; count++, base += TLB_STEP) {
--		pteH = getConfigReg(base);
--		if (GET_VALID(pteH))
--			if ((unsigned long) GET_EPN(pteH) == epn)
--				if ((unsigned long) GET_ASID(pteH) == asid)
--					break;
--	}
--	return ((unsigned long) ((count < MAX_TLBs) ? base : -1));
--}
+-};
 -
--unsigned long lookup_dtlb(unsigned long Eaddr)
+-static int __init init(void)
 -{
--	unsigned long asid = get_asid();
--	return (lookup_mmu_side((u64) DTLB_BASE, Eaddr, asid));
+-	return crypto_register_alg(&alg);
 -}
 -
--unsigned long lookup_itlb(unsigned long Eaddr)
+-static void __exit fini(void)
 -{
--	unsigned long asid = get_asid();
--	return (lookup_mmu_side((u64) ITLB_BASE, Eaddr, asid));
+-	crypto_unregister_alg(&alg);
 -}
 -
--void print_page(struct page *page)
--{
--	printk("  page[%p] -> index 0x%lx,  count 0x%x,  flags 0x%lx\n",
--	       page, page->index, page_count(page), page->flags);
--	printk("       address_space = %p, pages =%ld\n", page->mapping,
--	       page->mapping->nrpages);
+-module_init(init);
+-module_exit(fini);
 -
--}
-diff --git a/arch/sh64/lib/io.c b/arch/sh64/lib/io.c
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION ("Twofish Cipher Algorithm, i586 asm optimized");
+-MODULE_ALIAS("twofish");
+diff --git a/arch/x86/crypto/twofish_64.c b/arch/x86/crypto/twofish_64.c
 deleted file mode 100644
-index a3f3a2b..0000000
---- a/arch/sh64/lib/io.c
+index 182d91d..0000000
+--- a/arch/x86/crypto/twofish_64.c
 +++ /dev/null
-@@ -1,128 +0,0 @@
+@@ -1,97 +0,0 @@
 -/*
-- * Copyright (C) 2000 David J. Mckay (david.mckay at st.com)
+- * Glue Code for optimized x86_64 assembler version of TWOFISH
 - *
-- * May be copied or modified under the terms of the GNU General Public
-- * License.  See linux/COPYING for more information.
+- * Originally Twofish for GPG
+- * By Matthew Skala <mskala at ansuz.sooke.bc.ca>, July 26, 1998
+- * 256-bit key length added March 20, 1999
+- * Some modifications to reduce the text size by Werner Koch, April, 1998
+- * Ported to the kerneli patch by Marc Mutz <Marc at Mutz.com>
+- * Ported to CryptoAPI by Colin Slater <hoho at tacomeat.net>
 - *
-- * This file contains the I/O routines for use on the overdrive board
+- * The original author has disclaimed all copyright interest in this
+- * code and thus put it in the public domain. The subsequent authors
+- * have put this under the GNU General Public License.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
 - *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+- * USA
+- *
+- * This code is a "clean room" implementation, written from the paper
+- * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
+- * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
+- * through http://www.counterpane.com/twofish.html
+- *
+- * For background information on multiplication in finite fields, used for
+- * the matrix operations in the key schedule, see the book _Contemporary
+- * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
+- * Third Edition.
 - */
 -
+-#include <crypto/twofish.h>
+-#include <linux/crypto.h>
+-#include <linux/init.h>
 -#include <linux/kernel.h>
--#include <linux/types.h>
--#include <linux/delay.h>
 -#include <linux/module.h>
--#include <asm/system.h>
--#include <asm/processor.h>
--#include <asm/io.h>
--
--/*  Now for the string version of these functions */
--void outsb(unsigned long port, const void *addr, unsigned long count)
--{
--	int i;
--	unsigned char *p = (unsigned char *) addr;
--
--	for (i = 0; i < count; i++, p++) {
--		outb(*p, port);
--	}
--}
--EXPORT_SYMBOL(outsb);
--
--void insb(unsigned long port, void *addr, unsigned long count)
--{
--	int i;
--	unsigned char *p = (unsigned char *) addr;
--
--	for (i = 0; i < count; i++, p++) {
--		*p = inb(port);
--	}
--}
--EXPORT_SYMBOL(insb);
--
--/* For the 16 and 32 bit string functions, we have to worry about alignment.
-- * The SH does not do unaligned accesses, so we have to read as bytes and
-- * then write as a word or dword.
-- * This can be optimised a lot more, especially in the case where the data
-- * is aligned
-- */
--
--void outsw(unsigned long port, const void *addr, unsigned long count)
--{
--	int i;
--	unsigned short tmp;
--	unsigned char *p = (unsigned char *) addr;
--
--	for (i = 0; i < count; i++, p += 2) {
--		tmp = (*p) | ((*(p + 1)) << 8);
--		outw(tmp, port);
--	}
--}
--EXPORT_SYMBOL(outsw);
--
--void insw(unsigned long port, void *addr, unsigned long count)
--{
--	int i;
--	unsigned short tmp;
--	unsigned char *p = (unsigned char *) addr;
--
--	for (i = 0; i < count; i++, p += 2) {
--		tmp = inw(port);
--		p[0] = tmp & 0xff;
--		p[1] = (tmp >> 8) & 0xff;
--	}
--}
--EXPORT_SYMBOL(insw);
--
--void outsl(unsigned long port, const void *addr, unsigned long count)
--{
--	int i;
--	unsigned tmp;
--	unsigned char *p = (unsigned char *) addr;
+-#include <linux/types.h>
 -
--	for (i = 0; i < count; i++, p += 4) {
--		tmp = (*p) | ((*(p + 1)) << 8) | ((*(p + 2)) << 16) |
--		    ((*(p + 3)) << 24);
--		outl(tmp, port);
--	}
--}
--EXPORT_SYMBOL(outsl);
+-asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 -
--void insl(unsigned long port, void *addr, unsigned long count)
+-static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 -{
--	int i;
--	unsigned tmp;
--	unsigned char *p = (unsigned char *) addr;
--
--	for (i = 0; i < count; i++, p += 4) {
--		tmp = inl(port);
--		p[0] = tmp & 0xff;
--		p[1] = (tmp >> 8) & 0xff;
--		p[2] = (tmp >> 16) & 0xff;
--		p[3] = (tmp >> 24) & 0xff;
--
--	}
+-	twofish_enc_blk(tfm, dst, src);
 -}
--EXPORT_SYMBOL(insl);
 -
--void memcpy_toio(void __iomem *to, const void *from, long count)
+-static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 -{
--	unsigned char *p = (unsigned char *) from;
--
--	while (count) {
--		count--;
--		writeb(*p++, to++);
--	}
+-	twofish_dec_blk(tfm, dst, src);
 -}
--EXPORT_SYMBOL(memcpy_toio);
--
--void memcpy_fromio(void *to, void __iomem *from, long count)
--{
--	int i;
--	unsigned char *p = (unsigned char *) to;
 -
--	for (i = 0; i < count; i++) {
--		p[i] = readb(from);
--		from++;
+-static struct crypto_alg alg = {
+-	.cra_name		=	"twofish",
+-	.cra_driver_name	=	"twofish-x86_64",
+-	.cra_priority		=	200,
+-	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+-	.cra_blocksize		=	TF_BLOCK_SIZE,
+-	.cra_ctxsize		=	sizeof(struct twofish_ctx),
+-	.cra_alignmask		=	3,
+-	.cra_module		=	THIS_MODULE,
+-	.cra_list		=	LIST_HEAD_INIT(alg.cra_list),
+-	.cra_u			=	{
+-		.cipher = {
+-			.cia_min_keysize	=	TF_MIN_KEY_SIZE,
+-			.cia_max_keysize	=	TF_MAX_KEY_SIZE,
+-			.cia_setkey		=	twofish_setkey,
+-			.cia_encrypt		=	twofish_encrypt,
+-			.cia_decrypt		=	twofish_decrypt
+-		}
 -	}
--}
--EXPORT_SYMBOL(memcpy_fromio);
-diff --git a/arch/sh64/lib/iomap.c b/arch/sh64/lib/iomap.c
-deleted file mode 100644
-index 253d1e3..0000000
---- a/arch/sh64/lib/iomap.c
-+++ /dev/null
-@@ -1,54 +0,0 @@
--/*
-- * arch/sh64/lib/iomap.c
-- *
-- * Generic sh64 iomap interface
-- *
-- * Copyright (C) 2004  Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/pci.h>
--#include <asm/io.h>
--
--void __iomem *__attribute__ ((weak))
--ioport_map(unsigned long port, unsigned int len)
--{
--	return (void __iomem *)port;
--}
--EXPORT_SYMBOL(ioport_map);
--
--void ioport_unmap(void __iomem *addr)
--{
--	/* Nothing .. */
--}
--EXPORT_SYMBOL(ioport_unmap);
--
--#ifdef CONFIG_PCI
--void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
--{
--	unsigned long start = pci_resource_start(dev, bar);
--	unsigned long len = pci_resource_len(dev, bar);
--	unsigned long flags = pci_resource_flags(dev, bar);
--
--	if (!len)
--		return NULL;
--	if (max && len > max)
--		len = max;
--	if (flags & IORESOURCE_IO)
--		return ioport_map(start + pciio_virt, len);
--	if (flags & IORESOURCE_MEM)
--		return (void __iomem *)start;
--
--	/* What? */
--	return NULL;
--}
--EXPORT_SYMBOL(pci_iomap);
+-};
 -
--void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+-static int __init init(void)
 -{
--	/* Nothing .. */
+-	return crypto_register_alg(&alg);
 -}
--EXPORT_SYMBOL(pci_iounmap);
--#endif
-diff --git a/arch/sh64/lib/memcpy.c b/arch/sh64/lib/memcpy.c
-deleted file mode 100644
-index fba436a..0000000
---- a/arch/sh64/lib/memcpy.c
-+++ /dev/null
-@@ -1,81 +0,0 @@
--/*
-- * Copyright (C) 2002 Mark Debbage (Mark.Debbage at superh.com)
-- *
-- * May be copied or modified under the terms of the GNU General Public
-- * License.  See linux/COPYING for more information.
-- *
-- */
--
--#include <linux/types.h>
--#include <asm/string.h>
--
--// This is a simplistic optimization of memcpy to increase the
--// granularity of access beyond one byte using aligned
--// loads and stores. This is not an optimal implementation
--// for SH-5 (especially with regard to prefetching and the cache),
--// and a better version should be provided later ...
 -
--void *memcpy(void *dest, const void *src, size_t count)
+-static void __exit fini(void)
 -{
--	char *d = (char *) dest, *s = (char *) src;
--
--	if (count >= 32) {
--		int i = 8 - (((unsigned long) d) & 0x7);
--
--		if (i != 8)
--			while (i-- && count--) {
--				*d++ = *s++;
--			}
--
--		if (((((unsigned long) d) & 0x7) == 0) &&
--		    ((((unsigned long) s) & 0x7) == 0)) {
--			while (count >= 32) {
--				unsigned long long t1, t2, t3, t4;
--				t1 = *(unsigned long long *) (s);
--				t2 = *(unsigned long long *) (s + 8);
--				t3 = *(unsigned long long *) (s + 16);
--				t4 = *(unsigned long long *) (s + 24);
--				*(unsigned long long *) (d) = t1;
--				*(unsigned long long *) (d + 8) = t2;
--				*(unsigned long long *) (d + 16) = t3;
--				*(unsigned long long *) (d + 24) = t4;
--				d += 32;
--				s += 32;
--				count -= 32;
--			}
--			while (count >= 8) {
--				*(unsigned long long *) d =
--				    *(unsigned long long *) s;
--				d += 8;
--				s += 8;
--				count -= 8;
--			}
--		}
--
--		if (((((unsigned long) d) & 0x3) == 0) &&
--		    ((((unsigned long) s) & 0x3) == 0)) {
--			while (count >= 4) {
--				*(unsigned long *) d = *(unsigned long *) s;
--				d += 4;
--				s += 4;
--				count -= 4;
--			}
--		}
--
--		if (((((unsigned long) d) & 0x1) == 0) &&
--		    ((((unsigned long) s) & 0x1) == 0)) {
--			while (count >= 2) {
--				*(unsigned short *) d = *(unsigned short *) s;
--				d += 2;
--				s += 2;
--				count -= 2;
--			}
--		}
--	}
--
--	while (count--) {
--		*d++ = *s++;
--	}
--
--	return d;
+-	crypto_unregister_alg(&alg);
 -}
-diff --git a/arch/sh64/lib/page_clear.S b/arch/sh64/lib/page_clear.S
-deleted file mode 100644
-index ac0111d..0000000
---- a/arch/sh64/lib/page_clear.S
-+++ /dev/null
-@@ -1,54 +0,0 @@
--/*
--   Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
--
--   This file is subject to the terms and conditions of the GNU General Public
--   License.  See the file "COPYING" in the main directory of this archive
--   for more details.
--
--   Tight version of memset for the case of just clearing a page.  It turns out
--   that having the alloco's spaced out slightly due to the increment/branch
--   pair causes them to contend less for access to the cache.  Similarly,
--   keeping the stores apart from the allocos causes less contention.  => Do two
--   separate loops.  Do multiple stores per loop to amortise the
--   increment/branch cost a little.
--
--   Parameters:
--   r2 : source effective address (start of page)
--
--   Always clears 4096 bytes.
--
--   Note : alloco guarded by synco to avoid TAKum03020 erratum
--
--*/
--
--	.section .text..SHmedia32,"ax"
--	.little
--
--	.balign 8
--	.global sh64_page_clear
--sh64_page_clear:
--	pta/l 1f, tr1
--	pta/l 2f, tr2
--	ptabs/l r18, tr0
--
--	movi 4096, r7
--	add  r2, r7, r7
--	add  r2, r63, r6
--1:
--	alloco r6, 0
--	synco	! TAKum03020
--	addi	r6, 32, r6
--	bgt/l	r7, r6, tr1
--
--	add  r2, r63, r6
--2:
--	st.q  r6,   0, r63
--	st.q  r6,   8, r63
--	st.q  r6,  16, r63
--	st.q  r6,  24, r63
--	addi r6, 32, r6
--	bgt/l r7, r6, tr2
--
--	blink tr0, r63
--
--
-diff --git a/arch/sh64/lib/page_copy.S b/arch/sh64/lib/page_copy.S
-deleted file mode 100644
-index e159c3c..0000000
---- a/arch/sh64/lib/page_copy.S
-+++ /dev/null
-@@ -1,91 +0,0 @@
--/*
--   Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
--
--   This file is subject to the terms and conditions of the GNU General Public
--   License.  See the file "COPYING" in the main directory of this archive
--   for more details.
--
--   Tight version of mempy for the case of just copying a page.
--   Prefetch strategy empirically optimised against RTL simulations
--   of SH5-101 cut2 eval chip with Cayman board DDR memory.
--
--   Parameters:
--   r2 : source effective address (start of page)
--   r3 : destination effective address (start of page)
 -
--   Always copies 4096 bytes.
+-module_init(init);
+-module_exit(fini);
 -
--   Points to review.
--   * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
--     It seems like the prefetch needs to be at at least 4 lines ahead to get
--     the data into the cache in time, and the allocos contend with outstanding
--     prefetches for the same cache set, so it's better to have the numbers
--     different.
--   */
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION ("Twofish Cipher Algorithm, x86_64 asm optimized");
+-MODULE_ALIAS("twofish");
+diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
+new file mode 100644
+index 0000000..cefaf8b
+--- /dev/null
++++ b/arch/x86/crypto/twofish_glue.c
+@@ -0,0 +1,97 @@
++/*
++ * Glue Code for assembler optimized version of TWOFISH
++ *
++ * Originally Twofish for GPG
++ * By Matthew Skala <mskala at ansuz.sooke.bc.ca>, July 26, 1998
++ * 256-bit key length added March 20, 1999
++ * Some modifications to reduce the text size by Werner Koch, April, 1998
++ * Ported to the kerneli patch by Marc Mutz <Marc at Mutz.com>
++ * Ported to CryptoAPI by Colin Slater <hoho at tacomeat.net>
++ *
++ * The original author has disclaimed all copyright interest in this
++ * code and thus put it in the public domain. The subsequent authors
++ * have put this under the GNU General Public License.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
++ * USA
++ *
++ * This code is a "clean room" implementation, written from the paper
++ * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
++ * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
++ * through http://www.counterpane.com/twofish.html
++ *
++ * For background information on multiplication in finite fields, used for
++ * the matrix operations in the key schedule, see the book _Contemporary
++ * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
++ * Third Edition.
++ */
++
++#include <crypto/twofish.h>
++#include <linux/crypto.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/types.h>
++
++asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
++asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
++
++static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
++{
++	twofish_enc_blk(tfm, dst, src);
++}
++
++static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
++{
++	twofish_dec_blk(tfm, dst, src);
++}
++
++static struct crypto_alg alg = {
++	.cra_name		=	"twofish",
++	.cra_driver_name	=	"twofish-asm",
++	.cra_priority		=	200,
++	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
++	.cra_blocksize		=	TF_BLOCK_SIZE,
++	.cra_ctxsize		=	sizeof(struct twofish_ctx),
++	.cra_alignmask		=	3,
++	.cra_module		=	THIS_MODULE,
++	.cra_list		=	LIST_HEAD_INIT(alg.cra_list),
++	.cra_u			=	{
++		.cipher = {
++			.cia_min_keysize	=	TF_MIN_KEY_SIZE,
++			.cia_max_keysize	=	TF_MAX_KEY_SIZE,
++			.cia_setkey		=	twofish_setkey,
++			.cia_encrypt		=	twofish_encrypt,
++			.cia_decrypt		=	twofish_decrypt
++		}
++	}
++};
++
++static int __init init(void)
++{
++	return crypto_register_alg(&alg);
++}
++
++static void __exit fini(void)
++{
++	crypto_unregister_alg(&alg);
++}
++
++module_init(init);
++module_exit(fini);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
++MODULE_ALIAS("twofish");
++MODULE_ALIAS("twofish-asm");
+diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
+index e2edda2..52d0ccf 100644
+--- a/arch/x86/ia32/Makefile
++++ b/arch/x86/ia32/Makefile
+@@ -2,9 +2,7 @@
+ # Makefile for the ia32 kernel emulation subsystem.
+ #
+ 
+-obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \
+-	ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o \
+-	mmap32.o
++obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o
+ 
+ sysv-$(CONFIG_SYSVIPC) := ipc32.o
+ obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
+@@ -13,40 +11,3 @@ obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
+ 
+ audit-class-$(CONFIG_AUDIT) := audit.o
+ obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
 -
--	.section .text..SHmedia32,"ax"
--	.little
+-$(obj)/syscall32_syscall.o: \
+-	$(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
 -
--	.balign 8
--	.global sh64_page_copy
--sh64_page_copy:
+-# Teach kbuild about targets
+-targets := $(foreach F,$(addprefix vsyscall-,sysenter syscall),\
+-		     $F.o $F.so $F.so.dbg)
 -
--	/* Copy 4096 bytes worth of data from r2 to r3.
--	   Do prefetches 4 lines ahead.
--	   Do alloco 2 lines ahead */
+-# The DSO images are built using a special linker script
+-quiet_cmd_syscall = SYSCALL $@
+-      cmd_syscall = $(CC) -m32 -nostdlib -shared \
+-			  $(call ld-option, -Wl$(comma)--hash-style=sysv) \
+-			   -Wl,-soname=linux-gate.so.1 -o $@ \
+-			   -Wl,-T,$(filter-out FORCE,$^)
 -
--	pta 1f, tr1
--	pta 2f, tr2
--	pta 3f, tr3
--	ptabs r18, tr0
+-$(obj)/%.so: OBJCOPYFLAGS := -S
+-$(obj)/%.so: $(obj)/%.so.dbg FORCE
+-	$(call if_changed,objcopy)
 -
--#if 0
--	/* TAKum03020 */
--	ld.q r2, 0x00, r63
--	ld.q r2, 0x20, r63
--	ld.q r2, 0x40, r63
--	ld.q r2, 0x60, r63
--#endif
--	alloco r3, 0x00
--	synco		! TAKum03020
--	alloco r3, 0x20
--	synco		! TAKum03020
+-$(obj)/vsyscall-sysenter.so.dbg $(obj)/vsyscall-syscall.so.dbg: \
+-$(obj)/vsyscall-%.so.dbg: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+-	$(call if_changed,syscall)
 -
--	movi 3968, r6
--	add  r3, r6, r6
--	addi r6, 64, r7
--	addi r7, 64, r8
--	sub r2, r3, r60
--	addi r60, 8, r61
--	addi r61, 8, r62
--	addi r62, 8, r23
--	addi r60, 0x80, r22
+-AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
+-AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
 -
--/* Minimal code size.  The extra branches inside the loop don't cost much
--   because they overlap with the time spent waiting for prefetches to
--   complete. */
--1:
--#if 0
--	/* TAKum03020 */
--	bge/u r3, r6, tr2  ! skip prefetch for last 4 lines
--	ldx.q r3, r22, r63 ! prefetch 4 lines hence
--#endif
--2:
--	bge/u r3, r7, tr3  ! skip alloco for last 2 lines
--	alloco r3, 0x40    ! alloc destination line 2 lines ahead
--	synco		! TAKum03020
--3:
--	ldx.q r3, r60, r36
--	ldx.q r3, r61, r37
--	ldx.q r3, r62, r38
--	ldx.q r3, r23, r39
--	st.q  r3,   0, r36
--	st.q  r3,   8, r37
--	st.q  r3,  16, r38
--	st.q  r3,  24, r39
--	addi r3, 32, r3
--	bgt/l r8, r3, tr1
+-vdsos := vdso32-sysenter.so vdso32-syscall.so
 -
--	blink tr0, r63	   ! return
+-quiet_cmd_vdso_install = INSTALL $@
+-      cmd_vdso_install = cp $(@:vdso32-%.so=$(obj)/vsyscall-%.so.dbg) \
+-			    $(MODLIB)/vdso/$@
 -
+-$(vdsos):
+-	@mkdir -p $(MODLIB)/vdso
+-	$(call cmd,vdso_install)
 -
-diff --git a/arch/sh64/lib/panic.c b/arch/sh64/lib/panic.c
+-vdso_install: $(vdsos)
+diff --git a/arch/x86/ia32/audit.c b/arch/x86/ia32/audit.c
+index 91b7b59..5d7b381 100644
+--- a/arch/x86/ia32/audit.c
++++ b/arch/x86/ia32/audit.c
+@@ -27,7 +27,7 @@ unsigned ia32_signal_class[] = {
+ 
+ int ia32_classify_syscall(unsigned syscall)
+ {
+-	switch(syscall) {
++	switch (syscall) {
+ 	case __NR_open:
+ 		return 2;
+ 	case __NR_openat:
+diff --git a/arch/x86/ia32/fpu32.c b/arch/x86/ia32/fpu32.c
 deleted file mode 100644
-index c9eb1cb..0000000
---- a/arch/sh64/lib/panic.c
+index 2c8209a..0000000
+--- a/arch/x86/ia32/fpu32.c
 +++ /dev/null
-@@ -1,58 +0,0 @@
--/*
-- * Copyright (C) 2003  Richard Curnow, SuperH UK Limited
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--
--#include <linux/kernel.h>
--#include <asm/io.h>
--#include <asm/registers.h>
--
--/* THIS IS A PHYSICAL ADDRESS */
--#define HDSP2534_ADDR (0x04002100)
+@@ -1,183 +0,0 @@
+-/* 
+- * Copyright 2002 Andi Kleen, SuSE Labs.
+- * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes.
+- * This is used for ptrace, signals and coredumps in 32bit emulation.
+- */ 
 -
--#ifdef CONFIG_SH_CAYMAN
+-#include <linux/sched.h>
+-#include <asm/sigcontext32.h>
+-#include <asm/processor.h>
+-#include <asm/uaccess.h>
+-#include <asm/i387.h>
 -
--static void poor_mans_delay(void)
+-static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
 -{
--	int i;
--	for (i = 0; i < 2500000; i++) {
--	}		/* poor man's delay */
+-	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
+- 
+-	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
+-        tmp = ~twd;
+-        tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
+-        /* and move the valid bits to the lower byte. */
+-        tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
+-        tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
+-        tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+-        return tmp;
 -}
 -
--static void show_value(unsigned long x)
+-static inline unsigned long twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
 -{
+-	struct _fpxreg *st = NULL;
+-	unsigned long tos = (fxsave->swd >> 11) & 7;
+-	unsigned long twd = (unsigned long) fxsave->twd;
+-	unsigned long tag;
+-	unsigned long ret = 0xffff0000;
 -	int i;
--	unsigned nibble;
--	for (i = 0; i < 8; i++) {
--		nibble = ((x >> (i * 4)) & 0xf);
 -
--		ctrl_outb(nibble + ((nibble > 9) ? 55 : 48),
--			  HDSP2534_ADDR + 0xe0 + ((7 - i) << 2));
+-#define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16);
+-
+-	for (i = 0 ; i < 8 ; i++) {
+-		if (twd & 0x1) {
+-			st = FPREG_ADDR( fxsave, (i - tos) & 7 );
+-
+-			switch (st->exponent & 0x7fff) {
+-			case 0x7fff:
+-				tag = 2;		/* Special */
+-				break;
+-			case 0x0000:
+-				if ( !st->significand[0] &&
+-				     !st->significand[1] &&
+-				     !st->significand[2] &&
+-				     !st->significand[3] ) {
+-					tag = 1;	/* Zero */
+-				} else {
+-					tag = 2;	/* Special */
+-				}
+-				break;
+-			default:
+-				if (st->significand[3] & 0x8000) {
+-					tag = 0;	/* Valid */
+-				} else {
+-					tag = 2;	/* Special */
+-				}
+-				break;
+-			}
+-		} else {
+-			tag = 3;			/* Empty */
+-		}
+-		ret |= (tag << (2 * i));
+-		twd = twd >> 1;
 -	}
+-	return ret;
 -}
 -
--#endif
 -
--void
--panic_handler(unsigned long panicPC, unsigned long panicSSR,
--	      unsigned long panicEXPEVT)
+-static inline int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave,
+-					 struct _fpstate_ia32 __user *buf)
 -{
--#ifdef CONFIG_SH_CAYMAN
--	while (1) {
--		/* This piece of code displays the PC on the LED display */
--		show_value(panicPC);
--		poor_mans_delay();
--		show_value(panicSSR);
--		poor_mans_delay();
--		show_value(panicEXPEVT);
--		poor_mans_delay();
--	}
--#endif
+-	struct _fpxreg *to;
+-	struct _fpreg __user *from;
+-	int i;
+-	u32 v;
+-	int err = 0;
 -
--	/* Never return from the panic handler */
--	for (;;) ;
+-#define G(num,val) err |= __get_user(val, num + (u32 __user *)buf)
+-	G(0, fxsave->cwd);
+-	G(1, fxsave->swd);
+-	G(2, fxsave->twd);
+-	fxsave->twd = twd_i387_to_fxsr(fxsave->twd);
+-	G(3, fxsave->rip);
+-	G(4, v);
+-	fxsave->fop = v>>16;	/* cs ignored */
+-	G(5, fxsave->rdp);
+-	/* 6: ds ignored */
+-#undef G
+-	if (err) 
+-		return -1; 
 -
+-	to = (struct _fpxreg *)&fxsave->st_space[0];
+-	from = &buf->_st[0];
+-	for (i = 0 ; i < 8 ; i++, to++, from++) {
+-		if (__copy_from_user(to, from, sizeof(*from)))
+-			return -1;
+-	}
+-	return 0;
 -}
-diff --git a/arch/sh64/lib/udelay.c b/arch/sh64/lib/udelay.c
-deleted file mode 100644
-index 3276539..0000000
---- a/arch/sh64/lib/udelay.c
-+++ /dev/null
-@@ -1,59 +0,0 @@
--/*
-- * arch/sh64/lib/udelay.c
-- *
-- * Delay routines, using a pre-computed "loops_per_jiffy" value.
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003, 2004  Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/sched.h>
--#include <asm/param.h>
--
--extern unsigned long loops_per_jiffy;
 -
--/*
-- * Use only for very small delays (< 1 msec).
-- *
-- * The active part of our cycle counter is only 32-bits wide, and
-- * we're treating the difference between two marks as signed.  On
-- * a 1GHz box, that's about 2 seconds.
-- */
 -
--void __delay(int loops)
+-static inline int convert_fxsr_to_user(struct _fpstate_ia32 __user *buf,
+-				       struct i387_fxsave_struct *fxsave,
+-				       struct pt_regs *regs,
+-				       struct task_struct *tsk)
 -{
--	long long dummy;
--	__asm__ __volatile__("gettr	tr0, %1\n\t"
--			     "pta	$+4, tr0\n\t"
--			     "addi	%0, -1, %0\n\t"
--			     "bne	%0, r63, tr0\n\t"
--			     "ptabs	%1, tr0\n\t":"=r"(loops),
--			     "=r"(dummy)
--			     :"0"(loops));
--}
+-	struct _fpreg __user *to;
+-	struct _fpxreg *from;
+-	int i;
+-	u16 cs,ds; 
+-	int err = 0; 
 -
--void __udelay(unsigned long long usecs, unsigned long lpj)
--{
--	usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj;
--	__delay((long long) usecs >> 32);
--}
+-	if (tsk == current) {
+-		/* should be actually ds/cs at fpu exception time,
+-		   but that information is not available in 64bit mode. */
+-		asm("movw %%ds,%0 " : "=r" (ds)); 
+-		asm("movw %%cs,%0 " : "=r" (cs)); 		
+-	} else { /* ptrace. task has stopped. */
+-		ds = tsk->thread.ds;
+-		cs = regs->cs;
+-	} 
 -
--void __ndelay(unsigned long long nsecs, unsigned long lpj)
--{
--	nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj;
--	__delay((long long) nsecs >> 32);
--}
+-#define P(num,val) err |= __put_user(val, num + (u32 __user *)buf)
+-	P(0, (u32)fxsave->cwd | 0xffff0000);
+-	P(1, (u32)fxsave->swd | 0xffff0000);
+-	P(2, twd_fxsr_to_i387(fxsave));
+-	P(3, (u32)fxsave->rip);
+-	P(4,  cs | ((u32)fxsave->fop) << 16); 
+-	P(5, fxsave->rdp);
+-	P(6, 0xffff0000 | ds);
+-#undef P
 -
--void udelay(unsigned long usecs)
--{
--	__udelay(usecs, loops_per_jiffy);
+-	if (err) 
+-		return -1; 
+-
+-	to = &buf->_st[0];
+-	from = (struct _fpxreg *) &fxsave->st_space[0];
+-	for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
+-		if (__copy_to_user(to, from, sizeof(*to)))
+-			return -1;
+-	}
+-	return 0;
 -}
 -
--void ndelay(unsigned long nsecs)
+-int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave) 
+-{ 
+-	clear_fpu(tsk);
+-	if (!fsave) { 
+-		if (__copy_from_user(&tsk->thread.i387.fxsave, 
+-				     &buf->_fxsr_env[0],
+-				     sizeof(struct i387_fxsave_struct)))
+-			return -1;
+-		tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
+-		set_stopped_child_used_math(tsk);
+-	} 
+-	return convert_fxsr_from_user(&tsk->thread.i387.fxsave, buf);
+-}  
+-
+-int save_i387_ia32(struct task_struct *tsk, 
+-		   struct _fpstate_ia32 __user *buf, 
+-		   struct pt_regs *regs,
+-		   int fsave)
 -{
--	__ndelay(nsecs, loops_per_jiffy);
+-	int err = 0;
+-
+-	init_fpu(tsk);
+-	if (convert_fxsr_to_user(buf, &tsk->thread.i387.fxsave, regs, tsk))
+-		return -1;
+-	if (fsave)
+-		return 0;
+-	err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
+-	if (fsave) 
+-		return err ? -1 : 1; 	
+-	err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
+-	err |= __copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
+-			      sizeof(struct i387_fxsave_struct));
+-	return err ? -1 : 1;
 -}
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index f82e1a9..e4c1207 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -25,6 +25,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/personality.h>
+ #include <linux/init.h>
++#include <linux/jiffies.h>
+ 
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -36,61 +37,67 @@
+ #undef WARN_OLD
+ #undef CORE_DUMP /* probably broken */
+ 
+-static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
+-static int load_aout_library(struct file*);
++static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
++static int load_aout_library(struct file *);
+ 
+ #ifdef CORE_DUMP
+-static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
++static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
++			  unsigned long limit);
+ 
+ /*
+  * fill in the user structure for a core dump..
+  */
+-static void dump_thread32(struct pt_regs * regs, struct user32 * dump)
++static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
+ {
+-	u32 fs,gs;
++	u32 fs, gs;
+ 
+ /* changed the size calculations - should hopefully work better. lbt */
+ 	dump->magic = CMAGIC;
+ 	dump->start_code = 0;
+-	dump->start_stack = regs->rsp & ~(PAGE_SIZE - 1);
++	dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
+ 	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+-	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
++	dump->u_dsize = ((unsigned long)
++			 (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ 	dump->u_dsize -= dump->u_tsize;
+ 	dump->u_ssize = 0;
+-	dump->u_debugreg[0] = current->thread.debugreg0;  
+-	dump->u_debugreg[1] = current->thread.debugreg1;  
+-	dump->u_debugreg[2] = current->thread.debugreg2;  
+-	dump->u_debugreg[3] = current->thread.debugreg3;  
+-	dump->u_debugreg[4] = 0;  
+-	dump->u_debugreg[5] = 0;  
+-	dump->u_debugreg[6] = current->thread.debugreg6;  
+-	dump->u_debugreg[7] = current->thread.debugreg7;  
 -
-diff --git a/arch/sh64/mach-cayman/Makefile b/arch/sh64/mach-cayman/Makefile
-deleted file mode 100644
-index 67a2258..0000000
---- a/arch/sh64/mach-cayman/Makefile
-+++ /dev/null
-@@ -1,11 +0,0 @@
--#
--# Makefile for the Hitachi Cayman specific parts of the kernel
--#
--# Note! Dependencies are done automagically by 'make dep', which also
--# removes any old dependencies. DON'T put your own dependencies here
--# unless it's something special (ie not a .c file).
--#
+-	if (dump->start_stack < 0xc0000000)
+-		dump->u_ssize = ((unsigned long) (0xc0000000 - dump->start_stack)) >> PAGE_SHIFT;
 -
--obj-y := setup.o irq.o iomap.o
--obj-$(CONFIG_HEARTBEAT)	+= led.o
+-	dump->regs.ebx = regs->rbx;
+-	dump->regs.ecx = regs->rcx;
+-	dump->regs.edx = regs->rdx;
+-	dump->regs.esi = regs->rsi;
+-	dump->regs.edi = regs->rdi;
+-	dump->regs.ebp = regs->rbp;
+-	dump->regs.eax = regs->rax;
++	dump->u_debugreg[0] = current->thread.debugreg0;
++	dump->u_debugreg[1] = current->thread.debugreg1;
++	dump->u_debugreg[2] = current->thread.debugreg2;
++	dump->u_debugreg[3] = current->thread.debugreg3;
++	dump->u_debugreg[4] = 0;
++	dump->u_debugreg[5] = 0;
++	dump->u_debugreg[6] = current->thread.debugreg6;
++	dump->u_debugreg[7] = current->thread.debugreg7;
++
++	if (dump->start_stack < 0xc0000000) {
++		unsigned long tmp;
++
++		tmp = (unsigned long) (0xc0000000 - dump->start_stack);
++		dump->u_ssize = tmp >> PAGE_SHIFT;
++	}
++
++	dump->regs.bx = regs->bx;
++	dump->regs.cx = regs->cx;
++	dump->regs.dx = regs->dx;
++	dump->regs.si = regs->si;
++	dump->regs.di = regs->di;
++	dump->regs.bp = regs->bp;
++	dump->regs.ax = regs->ax;
+ 	dump->regs.ds = current->thread.ds;
+ 	dump->regs.es = current->thread.es;
+ 	asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs;
+-	asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs; 
+-	dump->regs.orig_eax = regs->orig_rax;
+-	dump->regs.eip = regs->rip;
++	asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs;
++	dump->regs.orig_ax = regs->orig_ax;
++	dump->regs.ip = regs->ip;
+ 	dump->regs.cs = regs->cs;
+-	dump->regs.eflags = regs->eflags;
+-	dump->regs.esp = regs->rsp;
++	dump->regs.flags = regs->flags;
++	dump->regs.sp = regs->sp;
+ 	dump->regs.ss = regs->ss;
+ 
+ #if 1 /* FIXME */
+ 	dump->u_fpvalid = 0;
+ #else
+-	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
++	dump->u_fpvalid = dump_fpu(regs, &dump->i387);
+ #endif
+ }
+ 
+@@ -128,15 +135,19 @@ static int dump_write(struct file *file, const void *addr, int nr)
+ 	return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+ }
+ 
+-#define DUMP_WRITE(addr, nr)	\
++#define DUMP_WRITE(addr, nr)			     \
+ 	if (!dump_write(file, (void *)(addr), (nr))) \
+ 		goto end_coredump;
+ 
+-#define DUMP_SEEK(offset) \
+-if (file->f_op->llseek) { \
+-	if (file->f_op->llseek(file,(offset),0) != (offset)) \
+- 		goto end_coredump; \
+-} else file->f_pos = (offset)
++#define DUMP_SEEK(offset)						\
++	if (file->f_op->llseek) {					\
++		if (file->f_op->llseek(file, (offset), 0) != (offset))	\
++			goto end_coredump;				\
++	} else								\
++		file->f_pos = (offset)
++
++#define START_DATA()	(u.u_tsize << PAGE_SHIFT)
++#define START_STACK(u)	(u.start_stack)
+ 
+ /*
+  * Routine writes a core dump image in the current directory.
+@@ -148,62 +159,70 @@ if (file->f_op->llseek) { \
+  * dumping of the process results in another error..
+  */
+ 
+-static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
++static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
++			  unsigned long limit)
+ {
+ 	mm_segment_t fs;
+ 	int has_dumped = 0;
+ 	unsigned long dump_start, dump_size;
+ 	struct user32 dump;
+-#       define START_DATA(u)	(u.u_tsize << PAGE_SHIFT)
+-#       define START_STACK(u)   (u.start_stack)
+ 
+ 	fs = get_fs();
+ 	set_fs(KERNEL_DS);
+ 	has_dumped = 1;
+ 	current->flags |= PF_DUMPCORE;
+-       	strncpy(dump.u_comm, current->comm, sizeof(current->comm));
+-	dump.u_ar0 = (u32)(((unsigned long)(&dump.regs)) - ((unsigned long)(&dump)));
++	strncpy(dump.u_comm, current->comm, sizeof(current->comm));
++	dump.u_ar0 = (u32)(((unsigned long)(&dump.regs)) -
++			   ((unsigned long)(&dump)));
+ 	dump.signal = signr;
+ 	dump_thread32(regs, &dump);
+ 
+-/* If the size of the dump file exceeds the rlimit, then see what would happen
+-   if we wrote the stack, but not the data area.  */
++	/*
++	 * If the size of the dump file exceeds the rlimit, then see
++	 * what would happen if we wrote the stack, but not the data
++	 * area.
++	 */
+ 	if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > limit)
+ 		dump.u_dsize = 0;
+ 
+-/* Make sure we have enough room to write the stack and data areas. */
++	/* Make sure we have enough room to write the stack and data areas. */
+ 	if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
+ 		dump.u_ssize = 0;
+ 
+-/* make sure we actually have a data and stack area to dump */
++	/* make sure we actually have a data and stack area to dump */
+ 	set_fs(USER_DS);
+-	if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump), dump.u_dsize << PAGE_SHIFT))
++	if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump),
++		       dump.u_dsize << PAGE_SHIFT))
+ 		dump.u_dsize = 0;
+-	if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump), dump.u_ssize << PAGE_SHIFT))
++	if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump),
++		       dump.u_ssize << PAGE_SHIFT))
+ 		dump.u_ssize = 0;
+ 
+ 	set_fs(KERNEL_DS);
+-/* struct user */
+-	DUMP_WRITE(&dump,sizeof(dump));
+-/* Now dump all of the user data.  Include malloced stuff as well */
++	/* struct user */
++	DUMP_WRITE(&dump, sizeof(dump));
++	/* Now dump all of the user data.  Include malloced stuff as well */
+ 	DUMP_SEEK(PAGE_SIZE);
+-/* now we start writing out the user space info */
++	/* now we start writing out the user space info */
+ 	set_fs(USER_DS);
+-/* Dump the data area */
++	/* Dump the data area */
+ 	if (dump.u_dsize != 0) {
+ 		dump_start = START_DATA(dump);
+ 		dump_size = dump.u_dsize << PAGE_SHIFT;
+-		DUMP_WRITE(dump_start,dump_size);
++		DUMP_WRITE(dump_start, dump_size);
+ 	}
+-/* Now prepare to dump the stack area */
++	/* Now prepare to dump the stack area */
+ 	if (dump.u_ssize != 0) {
+ 		dump_start = START_STACK(dump);
+ 		dump_size = dump.u_ssize << PAGE_SHIFT;
+-		DUMP_WRITE(dump_start,dump_size);
++		DUMP_WRITE(dump_start, dump_size);
+ 	}
+-/* Finally dump the task struct.  Not be used by gdb, but could be useful */
++	/*
++	 * Finally dump the task struct.  Not be used by gdb, but
++	 * could be useful
++	 */
+ 	set_fs(KERNEL_DS);
+-	DUMP_WRITE(current,sizeof(*current));
++	DUMP_WRITE(current, sizeof(*current));
+ end_coredump:
+ 	set_fs(fs);
+ 	return has_dumped;
+@@ -217,35 +236,34 @@ end_coredump:
+  */
+ static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm)
+ {
+-	u32 __user *argv;
+-	u32 __user *envp;
+-	u32 __user *sp;
+-	int argc = bprm->argc;
+-	int envc = bprm->envc;
++	u32 __user *argv, *envp, *sp;
++	int argc = bprm->argc, envc = bprm->envc;
+ 
+ 	sp = (u32 __user *) ((-(unsigned long)sizeof(u32)) & (unsigned long) p);
+ 	sp -= envc+1;
+ 	envp = sp;
+ 	sp -= argc+1;
+ 	argv = sp;
+-	put_user((unsigned long) envp,--sp);
+-	put_user((unsigned long) argv,--sp);
+-	put_user(argc,--sp);
++	put_user((unsigned long) envp, --sp);
++	put_user((unsigned long) argv, --sp);
++	put_user(argc, --sp);
+ 	current->mm->arg_start = (unsigned long) p;
+-	while (argc-->0) {
++	while (argc-- > 0) {
+ 		char c;
+-		put_user((u32)(unsigned long)p,argv++);
++
++		put_user((u32)(unsigned long)p, argv++);
+ 		do {
+-			get_user(c,p++);
++			get_user(c, p++);
+ 		} while (c);
+ 	}
+ 	put_user(0, argv);
+ 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
+-	while (envc-->0) {
++	while (envc-- > 0) {
+ 		char c;
+-		put_user((u32)(unsigned long)p,envp++);
++
++		put_user((u32)(unsigned long)p, envp++);
+ 		do {
+-			get_user(c,p++);
++			get_user(c, p++);
+ 		} while (c);
+ 	}
+ 	put_user(0, envp);
+@@ -257,20 +275,18 @@ static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm)
+  * These are the functions used to load a.out style executables and shared
+  * libraries.  There is no binary dependent code anywhere else.
+  */
 -
-diff --git a/arch/sh64/mach-cayman/iomap.c b/arch/sh64/mach-cayman/iomap.c
+-static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
++static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ {
++	unsigned long error, fd_offset, rlim;
+ 	struct exec ex;
+-	unsigned long error;
+-	unsigned long fd_offset;
+-	unsigned long rlim;
+ 	int retval;
+ 
+ 	ex = *((struct exec *) bprm->buf);		/* exec-header */
+ 	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
+ 	     N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
+ 	    N_TRSIZE(ex) || N_DRSIZE(ex) ||
+-	    i_size_read(bprm->file->f_path.dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
++	    i_size_read(bprm->file->f_path.dentry->d_inode) <
++	    ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+ 		return -ENOEXEC;
+ 	}
+ 
+@@ -291,13 +307,13 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+ 	if (retval)
+ 		return retval;
+ 
+-	regs->cs = __USER32_CS; 
++	regs->cs = __USER32_CS;
+ 	regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
+ 		regs->r13 = regs->r14 = regs->r15 = 0;
+ 
+ 	/* OK, This is the point of no return */
+ 	set_personality(PER_LINUX);
+-	set_thread_flag(TIF_IA32); 
++	set_thread_flag(TIF_IA32);
+ 	clear_thread_flag(TIF_ABI_PENDING);
+ 
+ 	current->mm->end_code = ex.a_text +
+@@ -311,7 +327,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+ 
+ 	current->mm->mmap = NULL;
+ 	compute_creds(bprm);
+- 	current->flags &= ~PF_FORKNOEXEC;
++	current->flags &= ~PF_FORKNOEXEC;
+ 
+ 	if (N_MAGIC(ex) == OMAGIC) {
+ 		unsigned long text_addr, map_size;
+@@ -338,30 +354,31 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+ 			send_sig(SIGKILL, current, 0);
+ 			return error;
+ 		}
+-			 
++
+ 		flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data);
+ 	} else {
+ #ifdef WARN_OLD
+ 		static unsigned long error_time, error_time2;
+ 		if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
+-		    (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ)
+-		{
++		    (N_MAGIC(ex) != NMAGIC) &&
++				time_after(jiffies, error_time2 + 5*HZ)) {
+ 			printk(KERN_NOTICE "executable not page aligned\n");
+ 			error_time2 = jiffies;
+ 		}
+ 
+ 		if ((fd_offset & ~PAGE_MASK) != 0 &&
+-		    (jiffies-error_time) > 5*HZ)
+-		{
+-			printk(KERN_WARNING 
+-			       "fd_offset is not page aligned. Please convert program: %s\n",
++			    time_after(jiffies, error_time + 5*HZ)) {
++			printk(KERN_WARNING
++			       "fd_offset is not page aligned. Please convert "
++			       "program: %s\n",
+ 			       bprm->file->f_path.dentry->d_name.name);
+ 			error_time = jiffies;
+ 		}
+ #endif
+ 
+-		if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
++		if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
+ 			loff_t pos = fd_offset;
++
+ 			down_write(&current->mm->mmap_sem);
+ 			do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
+ 			up_write(&current->mm->mmap_sem);
+@@ -376,9 +393,10 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+ 
+ 		down_write(&current->mm->mmap_sem);
+ 		error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
+-			PROT_READ | PROT_EXEC,
+-			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT,
+-			fd_offset);
++				PROT_READ | PROT_EXEC,
++				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
++				MAP_EXECUTABLE | MAP_32BIT,
++				fd_offset);
+ 		up_write(&current->mm->mmap_sem);
+ 
+ 		if (error != N_TXTADDR(ex)) {
+@@ -387,9 +405,10 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+ 		}
+ 
+ 		down_write(&current->mm->mmap_sem);
+- 		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
++		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+ 				PROT_READ | PROT_WRITE | PROT_EXEC,
+-				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT,
++				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
++				MAP_EXECUTABLE | MAP_32BIT,
+ 				fd_offset + ex.a_text);
+ 		up_write(&current->mm->mmap_sem);
+ 		if (error != N_DATADDR(ex)) {
+@@ -403,9 +422,9 @@ beyond_if:
+ 	set_brk(current->mm->start_brk, current->mm->brk);
+ 
+ 	retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
+-	if (retval < 0) { 
+-		/* Someone check-me: is this error path enough? */ 
+-		send_sig(SIGKILL, current, 0); 
++	if (retval < 0) {
++		/* Someone check-me: is this error path enough? */
++		send_sig(SIGKILL, current, 0);
+ 		return retval;
+ 	}
+ 
+@@ -414,10 +433,10 @@ beyond_if:
+ 	/* start thread */
+ 	asm volatile("movl %0,%%fs" :: "r" (0)); \
+ 	asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS));
+-	load_gs_index(0); 
+-	(regs)->rip = ex.a_entry;
+-	(regs)->rsp = current->mm->start_stack;
+-	(regs)->eflags = 0x200;
++	load_gs_index(0);
++	(regs)->ip = ex.a_entry;
++	(regs)->sp = current->mm->start_stack;
++	(regs)->flags = 0x200;
+ 	(regs)->cs = __USER32_CS;
+ 	(regs)->ss = __USER32_DS;
+ 	regs->r8 = regs->r9 = regs->r10 = regs->r11 =
+@@ -425,7 +444,7 @@ beyond_if:
+ 	set_fs(USER_DS);
+ 	if (unlikely(current->ptrace & PT_PTRACED)) {
+ 		if (current->ptrace & PT_TRACE_EXEC)
+-			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
++			ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
+ 		else
+ 			send_sig(SIGTRAP, current, 0);
+ 	}
+@@ -434,9 +453,8 @@ beyond_if:
+ 
+ static int load_aout_library(struct file *file)
+ {
+-	struct inode * inode;
+-	unsigned long bss, start_addr, len;
+-	unsigned long error;
++	struct inode *inode;
++	unsigned long bss, start_addr, len, error;
+ 	int retval;
+ 	struct exec ex;
+ 
+@@ -450,7 +468,8 @@ static int load_aout_library(struct file *file)
+ 	/* We come in here for the regular a.out style of shared libraries */
+ 	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
+ 	    N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
+-	    i_size_read(inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
++	    i_size_read(inode) <
++	    ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+ 		goto out;
+ 	}
+ 
+@@ -467,10 +486,10 @@ static int load_aout_library(struct file *file)
+ 
+ #ifdef WARN_OLD
+ 		static unsigned long error_time;
+-		if ((jiffies-error_time) > 5*HZ)
+-		{
+-			printk(KERN_WARNING 
+-			       "N_TXTOFF is not page aligned. Please convert library: %s\n",
++		if (time_after(jiffies, error_time + 5*HZ)) {
++			printk(KERN_WARNING
++			       "N_TXTOFF is not page aligned. Please convert "
++			       "library: %s\n",
+ 			       file->f_path.dentry->d_name.name);
+ 			error_time = jiffies;
+ 		}
+@@ -478,11 +497,12 @@ static int load_aout_library(struct file *file)
+ 		down_write(&current->mm->mmap_sem);
+ 		do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
+ 		up_write(&current->mm->mmap_sem);
+-		
++
+ 		file->f_op->read(file, (char __user *)start_addr,
+ 			ex.a_text + ex.a_data, &pos);
+ 		flush_icache_range((unsigned long) start_addr,
+-				   (unsigned long) start_addr + ex.a_text + ex.a_data);
++				   (unsigned long) start_addr + ex.a_text +
++				   ex.a_data);
+ 
+ 		retval = 0;
+ 		goto out;
+diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
 deleted file mode 100644
-index a5c645f..0000000
---- a/arch/sh64/mach-cayman/iomap.c
+index 55822d2..0000000
+--- a/arch/x86/ia32/ia32_binfmt.c
 +++ /dev/null
-@@ -1,22 +0,0 @@
--/*
-- * arch/sh64/mach-cayman/iomap.c
-- *
-- * Cayman iomap interface
-- *
-- * Copyright (C) 2004  Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <asm/io.h>
--#include <asm/cayman.h>
+@@ -1,285 +0,0 @@
+-/* 
+- * Written 2000,2002 by Andi Kleen. 
+- * 
+- * Loosely based on the sparc64 and IA64 32bit emulation loaders.
+- * This tricks binfmt_elf.c into loading 32bit binaries using lots 
+- * of ugly preprocessor tricks. Talk about very very poor man's inheritance.
+- */ 
 -
--void __iomem *ioport_map(unsigned long port, unsigned int len)
--{
--	if (port < 0x400)
--		return (void __iomem *)((port << 2) | smsc_superio_virt);
+-#include <linux/types.h>
+-#include <linux/stddef.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/compat.h>
+-#include <linux/string.h>
+-#include <linux/binfmts.h>
+-#include <linux/mm.h>
+-#include <linux/security.h>
+-#include <linux/elfcore-compat.h>
 -
--	return (void __iomem *)port;
--}
+-#include <asm/segment.h> 
+-#include <asm/ptrace.h>
+-#include <asm/processor.h>
+-#include <asm/user32.h>
+-#include <asm/sigcontext32.h>
+-#include <asm/fpu32.h>
+-#include <asm/i387.h>
+-#include <asm/uaccess.h>
+-#include <asm/ia32.h>
+-#include <asm/vsyscall32.h>
 -
-diff --git a/arch/sh64/mach-cayman/irq.c b/arch/sh64/mach-cayman/irq.c
-deleted file mode 100644
-index aaad36d..0000000
---- a/arch/sh64/mach-cayman/irq.c
-+++ /dev/null
-@@ -1,195 +0,0 @@
--/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/kernel/irq_cayman.c
-- *
-- * SH-5 Cayman Interrupt Support
-- *
-- * This file handles the board specific parts of the Cayman interrupt system
-- *
-- * Copyright (C) 2002 Stuart Menefy
-- */
+-#undef	ELF_ARCH
+-#undef	ELF_CLASS
+-#define ELF_CLASS	ELFCLASS32
+-#define ELF_ARCH	EM_386
 -
--#include <asm/irq.h>
--#include <asm/page.h>
--#include <asm/io.h>
--#include <linux/irq.h>
--#include <linux/interrupt.h>
--#include <linux/signal.h>
--#include <asm/cayman.h>
+-#undef	elfhdr
+-#undef	elf_phdr
+-#undef	elf_note
+-#undef	elf_addr_t
+-#define elfhdr		elf32_hdr
+-#define elf_phdr	elf32_phdr
+-#define elf_note	elf32_note
+-#define elf_addr_t	Elf32_Off
 -
--unsigned long epld_virt;
+-#define ELF_NAME "elf/i386"
 -
--#define EPLD_BASE        0x04002000
--#define EPLD_STATUS_BASE (epld_virt + 0x10)
--#define EPLD_MASK_BASE   (epld_virt + 0x20)
+-#define AT_SYSINFO 32
+-#define AT_SYSINFO_EHDR		33
 -
--/* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto
--   the same SH-5 interrupt */
+-int sysctl_vsyscall32 = 1;
 -
--static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id)
--{
--        printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n");
--	return IRQ_NONE;
--}
+-#undef ARCH_DLINFO
+-#define ARCH_DLINFO do {  \
+-	if (sysctl_vsyscall32) { \
+-		current->mm->context.vdso = (void *)VSYSCALL32_BASE;	\
+-		NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \
+-		NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL32_BASE);    \
+-	}	\
+-} while(0)
 -
--static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id)
--{
--        printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq);
--	return IRQ_NONE;
--}
+-struct file;
 -
--static struct irqaction cayman_action_smsc = {
--	.name		= "Cayman SMSC Mux",
--	.handler	= cayman_interrupt_smsc,
--	.flags		= IRQF_DISABLED,
--};
+-#define IA32_EMULATOR 1
 -
--static struct irqaction cayman_action_pci2 = {
--	.name		= "Cayman PCI2 Mux",
--	.handler	= cayman_interrupt_pci2,
--	.flags		= IRQF_DISABLED,
--};
+-#undef ELF_ET_DYN_BASE
 -
--static void enable_cayman_irq(unsigned int irq)
--{
--	unsigned long flags;
--	unsigned long mask;
--	unsigned int reg;
--	unsigned char bit;
+-#define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x1000000)
 -
--	irq -= START_EXT_IRQS;
--	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
--	bit = 1<<(irq % 8);
--	local_irq_save(flags);
--	mask = ctrl_inl(reg);
--	mask |= bit;
--	ctrl_outl(mask, reg);
--	local_irq_restore(flags);
--}
+-#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
 -
--void disable_cayman_irq(unsigned int irq)
--{
--	unsigned long flags;
--	unsigned long mask;
--	unsigned int reg;
--	unsigned char bit;
+-#define _GET_SEG(x) \
+-	({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; })
 -
--	irq -= START_EXT_IRQS;
--	reg = EPLD_MASK_BASE + ((irq / 8) << 2);
--	bit = 1<<(irq % 8);
--	local_irq_save(flags);
--	mask = ctrl_inl(reg);
--	mask &= ~bit;
--	ctrl_outl(mask, reg);
--	local_irq_restore(flags);
--}
+-/* Assumes current==process to be dumped */
+-#undef	ELF_CORE_COPY_REGS
+-#define ELF_CORE_COPY_REGS(pr_reg, regs)       		\
+-	pr_reg[0] = regs->rbx;				\
+-	pr_reg[1] = regs->rcx;				\
+-	pr_reg[2] = regs->rdx;				\
+-	pr_reg[3] = regs->rsi;				\
+-	pr_reg[4] = regs->rdi;				\
+-	pr_reg[5] = regs->rbp;				\
+-	pr_reg[6] = regs->rax;				\
+-	pr_reg[7] = _GET_SEG(ds);   			\
+-	pr_reg[8] = _GET_SEG(es);			\
+-	pr_reg[9] = _GET_SEG(fs);			\
+-	pr_reg[10] = _GET_SEG(gs);			\
+-	pr_reg[11] = regs->orig_rax;			\
+-	pr_reg[12] = regs->rip;				\
+-	pr_reg[13] = regs->cs;				\
+-	pr_reg[14] = regs->eflags;			\
+-	pr_reg[15] = regs->rsp;				\
+-	pr_reg[16] = regs->ss;
 -
--static void ack_cayman_irq(unsigned int irq)
+-
+-#define elf_prstatus	compat_elf_prstatus
+-#define elf_prpsinfo	compat_elf_prpsinfo
+-#define elf_fpregset_t	struct user_i387_ia32_struct
+-#define	elf_fpxregset_t	struct user32_fxsr_struct
+-#define user		user32
+-
+-#undef elf_read_implies_exec
+-#define elf_read_implies_exec(ex, executable_stack)     (executable_stack != EXSTACK_DISABLE_X)
+-
+-#define elf_core_copy_regs		elf32_core_copy_regs
+-static inline void elf32_core_copy_regs(compat_elf_gregset_t *elfregs,
+-					struct pt_regs *regs)
 -{
--	disable_cayman_irq(irq);
+-	ELF_CORE_COPY_REGS((&elfregs->ebx), regs)
 -}
 -
--static void end_cayman_irq(unsigned int irq)
--{
--	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
--		enable_cayman_irq(irq);
+-#define elf_core_copy_task_regs		elf32_core_copy_task_regs
+-static inline int elf32_core_copy_task_regs(struct task_struct *t,
+-					    compat_elf_gregset_t* elfregs)
+-{	
+-	struct pt_regs *pp = task_pt_regs(t);
+-	ELF_CORE_COPY_REGS((&elfregs->ebx), pp);
+-	/* fix wrong segments */ 
+-	elfregs->ds = t->thread.ds;
+-	elfregs->fs = t->thread.fsindex;
+-	elfregs->gs = t->thread.gsindex;
+-	elfregs->es = t->thread.es;
+-	return 1; 
 -}
 -
--static unsigned int startup_cayman_irq(unsigned int irq)
+-#define elf_core_copy_task_fpregs	elf32_core_copy_task_fpregs
+-static inline int 
+-elf32_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs,
+-			    elf_fpregset_t *fpu)
 -{
--	enable_cayman_irq(irq);
--	return 0; /* never anything pending */
+-	struct _fpstate_ia32 *fpstate = (void*)fpu; 
+-	mm_segment_t oldfs = get_fs();
+-
+-	if (!tsk_used_math(tsk))
+-		return 0;
+-	if (!regs)
+-		regs = task_pt_regs(tsk);
+-	if (tsk == current)
+-		unlazy_fpu(tsk);
+-	set_fs(KERNEL_DS); 
+-	save_i387_ia32(tsk, fpstate, regs, 1);
+-	/* Correct for i386 bug. It puts the fop into the upper 16bits of 
+-	   the tag word (like FXSAVE), not into the fcs*/ 
+-	fpstate->cssel |= fpstate->tag & 0xffff0000; 
+-	set_fs(oldfs); 
+-	return 1; 
 -}
 -
--static void shutdown_cayman_irq(unsigned int irq)
+-#define ELF_CORE_COPY_XFPREGS 1
+-#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
+-#define elf_core_copy_task_xfpregs	elf32_core_copy_task_xfpregs
+-static inline int 
+-elf32_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
 -{
--	disable_cayman_irq(irq);
+-	struct pt_regs *regs = task_pt_regs(t);
+-	if (!tsk_used_math(t))
+-		return 0;
+-	if (t == current)
+-		unlazy_fpu(t); 
+-	memcpy(xfpu, &t->thread.i387.fxsave, sizeof(elf_fpxregset_t));
+-	xfpu->fcs = regs->cs; 
+-	xfpu->fos = t->thread.ds; /* right? */ 
+-	return 1;
 -}
 -
--struct hw_interrupt_type cayman_irq_type = {
--	.typename	= "Cayman-IRQ",
--	.startup	= startup_cayman_irq,
--	.shutdown	= shutdown_cayman_irq,
--	.enable		= enable_cayman_irq,
--	.disable	= disable_cayman_irq,
--	.ack		= ack_cayman_irq,
--	.end		= end_cayman_irq,
--};
--
--int cayman_irq_demux(int evt)
--{
--	int irq = intc_evt_to_irq[evt];
+-#undef elf_check_arch
+-#define elf_check_arch(x) \
+-	((x)->e_machine == EM_386)
 -
--	if (irq == SMSC_IRQ) {
--		unsigned long status;
--		int i;
+-extern int force_personality32;
 -
--		status = ctrl_inl(EPLD_STATUS_BASE) &
--			 ctrl_inl(EPLD_MASK_BASE) & 0xff;
--		if (status == 0) {
--			irq = -1;
--		} else {
--			for (i=0; i<8; i++) {
--				if (status & (1<<i))
--					break;
--			}
--			irq = START_EXT_IRQS + i;
--		}
--	}
+-#undef	ELF_EXEC_PAGESIZE
+-#undef	ELF_HWCAP
+-#undef	ELF_PLATFORM
+-#undef	SET_PERSONALITY
+-#define ELF_EXEC_PAGESIZE PAGE_SIZE
+-#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
+-#define ELF_PLATFORM  ("i686")
+-#define SET_PERSONALITY(ex, ibcs2)			\
+-do {							\
+-	unsigned long new_flags = 0;				\
+-	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)		\
+-		new_flags = _TIF_IA32;				\
+-	if ((current_thread_info()->flags & _TIF_IA32)		\
+-	    != new_flags)					\
+-		set_thread_flag(TIF_ABI_PENDING);		\
+-	else							\
+-		clear_thread_flag(TIF_ABI_PENDING);		\
+-	/* XXX This overwrites the user set personality */	\
+-	current->personality |= force_personality32;		\
+-} while (0)
 -
--	if (irq == PCI2_IRQ) {
--		unsigned long status;
--		int i;
+-/* Override some function names */
+-#define elf_format			elf32_format
 -
--		status = ctrl_inl(EPLD_STATUS_BASE + 3 * sizeof(u32)) &
--			 ctrl_inl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff;
--		if (status == 0) {
--			irq = -1;
--		} else {
--			for (i=0; i<8; i++) {
--				if (status & (1<<i))
--					break;
--			}
--			irq = START_EXT_IRQS + (3 * 8) + i;
--		}
--	}
+-#define init_elf_binfmt			init_elf32_binfmt
+-#define exit_elf_binfmt			exit_elf32_binfmt
 -
--	return irq;
--}
+-#define load_elf_binary load_elf32_binary
 -
--#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
--int cayman_irq_describe(char* p, int irq)
--{
--	if (irq < NR_INTC_IRQS) {
--		return intc_irq_describe(p, irq);
--	} else if (irq < NR_INTC_IRQS + 8) {
--		return sprintf(p, "(SMSC %d)", irq - NR_INTC_IRQS);
--	} else if ((irq >= NR_INTC_IRQS + 24) && (irq < NR_INTC_IRQS + 32)) {
--		return sprintf(p, "(PCI2 %d)", irq - (NR_INTC_IRQS + 24));
--	}
+-#undef	ELF_PLAT_INIT
+-#define ELF_PLAT_INIT(r, load_addr)	elf32_init(r)
 -
--	return 0;
--}
--#endif
+-#undef start_thread
+-#define start_thread(regs,new_rip,new_rsp) do { \
+-	asm volatile("movl %0,%%fs" :: "r" (0)); \
+-	asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); \
+-	load_gs_index(0); \
+-	(regs)->rip = (new_rip); \
+-	(regs)->rsp = (new_rsp); \
+-	(regs)->eflags = 0x200; \
+-	(regs)->cs = __USER32_CS; \
+-	(regs)->ss = __USER32_DS; \
+-	set_fs(USER_DS); \
+-} while(0) 
 -
--void init_cayman_irq(void)
--{
--	int i;
 -
--	epld_virt = onchip_remap(EPLD_BASE, 1024, "EPLD");
--	if (!epld_virt) {
--		printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n");
--		return;
--	}
+-#include <linux/module.h>
 -
--	for (i=0; i<NR_EXT_IRQS; i++) {
--		irq_desc[START_EXT_IRQS + i].chip = &cayman_irq_type;
--	}
+-MODULE_DESCRIPTION("Binary format loader for compatibility with IA32 ELF binaries."); 
+-MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
 -
--	/* Setup the SMSC interrupt */
--	setup_irq(SMSC_IRQ, &cayman_action_smsc);
--	setup_irq(PCI2_IRQ, &cayman_action_pci2);
--}
-diff --git a/arch/sh64/mach-cayman/led.c b/arch/sh64/mach-cayman/led.c
-deleted file mode 100644
-index b4e122f..0000000
---- a/arch/sh64/mach-cayman/led.c
-+++ /dev/null
-@@ -1,51 +0,0 @@
--/*
-- * arch/sh64/mach-cayman/led.c
-- *
-- * Copyright (C) 2002 Stuart Menefy <stuart.menefy at st.com>
-- *
-- * May be copied or modified under the terms of the GNU General Public
-- * License.  See linux/COPYING for more information.
-- *
-- * Flash the LEDs
-- */
--#include <asm/io.h>
+-#undef MODULE_DESCRIPTION
+-#undef MODULE_AUTHOR
 -
--/*
--** It is supposed these functions to be used for a low level
--** debugging (via Cayman LEDs), hence to be available as soon
--** as possible.
--** Unfortunately Cayman LEDs relies on Cayman EPLD to be mapped
--** (this happen when IRQ are initialized... quite late).
--** These triky dependencies should be removed. Temporary, it
--** may be enough to NOP until EPLD is mapped.
--*/
+-static void elf32_init(struct pt_regs *);
 -
--extern unsigned long epld_virt;
+-#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+-#define arch_setup_additional_pages syscall32_setup_pages
+-extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
 -
--#define LED_ADDR      (epld_virt + 0x008)
--#define HDSP2534_ADDR (epld_virt + 0x100)
+-#include "../../../fs/binfmt_elf.c" 
 -
--void mach_led(int position, int value)
+-static void elf32_init(struct pt_regs *regs)
 -{
--	if (!epld_virt)
--		return;
+-	struct task_struct *me = current; 
+-	regs->rdi = 0;
+-	regs->rsi = 0;
+-	regs->rdx = 0;
+-	regs->rcx = 0;
+-	regs->rax = 0;
+-	regs->rbx = 0; 
+-	regs->rbp = 0; 
+-	regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
+-		regs->r13 = regs->r14 = regs->r15 = 0; 
+-    me->thread.fs = 0; 
+-	me->thread.gs = 0;
+-	me->thread.fsindex = 0; 
+-	me->thread.gsindex = 0;
+-    me->thread.ds = __USER_DS; 
+-	me->thread.es = __USER_DS;
+-}
 -
--	if (value)
--		ctrl_outl(0, LED_ADDR);
--	else
--		ctrl_outl(1, LED_ADDR);
+-#ifdef CONFIG_SYSCTL
+-/* Register vsyscall32 into the ABI table */
+-#include <linux/sysctl.h>
 -
--}
+-static ctl_table abi_table2[] = {
+-	{
+-		.procname	= "vsyscall32",
+-		.data		= &sysctl_vsyscall32,
+-		.maxlen		= sizeof(int),
+-		.mode		= 0644,
+-		.proc_handler	= proc_dointvec
+-	},
+-	{}
+-};
 -
--void mach_alphanum(int position, unsigned char value)
--{
--	if (!epld_virt)
--		return;
+-static ctl_table abi_root_table2[] = {
+-	{
+-		.ctl_name = CTL_ABI,
+-		.procname = "abi",
+-		.mode = 0555,
+-		.child = abi_table2
+-	},
+-	{}
+-};
 -
--	ctrl_outb(value, HDSP2534_ADDR + 0xe0 + (position << 2));
+-static __init int ia32_binfmt_init(void)
+-{ 
+-	register_sysctl_table(abi_root_table2);
+-	return 0;
 -}
--
--void mach_alphanum_brightness(int setting)
--{
--	ctrl_outb(setting & 7, HDSP2534_ADDR + 0xc0);
+-__initcall(ia32_binfmt_init);
+-#endif
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index 6ea19c2..1c0503b 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -29,9 +29,8 @@
+ #include <asm/ia32_unistd.h>
+ #include <asm/user32.h>
+ #include <asm/sigcontext32.h>
+-#include <asm/fpu32.h>
+ #include <asm/proto.h>
+-#include <asm/vsyscall32.h>
++#include <asm/vdso.h>
+ 
+ #define DEBUG_SIG 0
+ 
+@@ -43,7 +42,8 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ {
+ 	int err;
+-	if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
++
++	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+ 		return -EFAULT;
+ 
+ 	/* If you change siginfo_t structure, please make sure that
+@@ -53,16 +53,19 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 	   3 ints plus the relevant union member.  */
+ 	err = __put_user(from->si_signo, &to->si_signo);
+ 	err |= __put_user(from->si_errno, &to->si_errno);
+- 	err |= __put_user((short)from->si_code, &to->si_code);
++	err |= __put_user((short)from->si_code, &to->si_code);
+ 
+ 	if (from->si_code < 0) {
+ 		err |= __put_user(from->si_pid, &to->si_pid);
+- 		err |= __put_user(from->si_uid, &to->si_uid);
+- 		err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
++		err |= __put_user(from->si_uid, &to->si_uid);
++		err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
+ 	} else {
+- 		/* First 32bits of unions are always present:
+- 		 * si_pid === si_band === si_tid === si_addr(LS half) */
+-		err |= __put_user(from->_sifields._pad[0], &to->_sifields._pad[0]);
++		/*
++		 * First 32bits of unions are always present:
++		 * si_pid === si_band === si_tid === si_addr(LS half)
++		 */
++		err |= __put_user(from->_sifields._pad[0],
++				  &to->_sifields._pad[0]);
+ 		switch (from->si_code >> 16) {
+ 		case __SI_FAULT >> 16:
+ 			break;
+@@ -76,14 +79,15 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 			err |= __put_user(from->si_uid, &to->si_uid);
+ 			break;
+ 		case __SI_POLL >> 16:
+-			err |= __put_user(from->si_fd, &to->si_fd); 
++			err |= __put_user(from->si_fd, &to->si_fd);
+ 			break;
+ 		case __SI_TIMER >> 16:
+-			err |= __put_user(from->si_overrun, &to->si_overrun); 
++			err |= __put_user(from->si_overrun, &to->si_overrun);
+ 			err |= __put_user(ptr_to_compat(from->si_ptr),
+-					&to->si_ptr);
++					  &to->si_ptr);
+ 			break;
+-		case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
++			 /* This is not generated by the kernel as of now.  */
++		case __SI_RT >> 16:
+ 		case __SI_MESGQ >> 16:
+ 			err |= __put_user(from->si_uid, &to->si_uid);
+ 			err |= __put_user(from->si_int, &to->si_int);
+@@ -97,7 +101,8 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ {
+ 	int err;
+ 	u32 ptr32;
+-	if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
++
++	if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
+ 		return -EFAULT;
+ 
+ 	err = __get_user(to->si_signo, &from->si_signo);
+@@ -112,8 +117,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ 	return err;
+ }
+ 
+-asmlinkage long
+-sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
++asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
+ {
+ 	mask &= _BLOCKABLE;
+ 	spin_lock_irq(&current->sighand->siglock);
+@@ -128,36 +132,37 @@ sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
+ 	return -ERESTARTNOHAND;
+ }
+ 
+-asmlinkage long
+-sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
+-		  stack_ia32_t __user *uoss_ptr, 
+-		  struct pt_regs *regs)
++asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
++				  stack_ia32_t __user *uoss_ptr,
++				  struct pt_regs *regs)
+ {
+-	stack_t uss,uoss; 
++	stack_t uss, uoss;
+ 	int ret;
+-	mm_segment_t seg; 
+-	if (uss_ptr) { 
++	mm_segment_t seg;
++
++	if (uss_ptr) {
+ 		u32 ptr;
+-		memset(&uss,0,sizeof(stack_t));
+-		if (!access_ok(VERIFY_READ,uss_ptr,sizeof(stack_ia32_t)) ||
++
++		memset(&uss, 0, sizeof(stack_t));
++		if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
+ 			    __get_user(ptr, &uss_ptr->ss_sp) ||
+ 			    __get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
+ 			    __get_user(uss.ss_size, &uss_ptr->ss_size))
+ 			return -EFAULT;
+ 		uss.ss_sp = compat_ptr(ptr);
+ 	}
+-	seg = get_fs(); 
+-	set_fs(KERNEL_DS); 
+-	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->rsp);
+-	set_fs(seg); 
++	seg = get_fs();
++	set_fs(KERNEL_DS);
++	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
++	set_fs(seg);
+ 	if (ret >= 0 && uoss_ptr)  {
+-		if (!access_ok(VERIFY_WRITE,uoss_ptr,sizeof(stack_ia32_t)) ||
++		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
+ 		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
+ 		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
+ 		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
+ 			ret = -EFAULT;
+-	} 	
+-	return ret;	
++	}
++	return ret;
+ }
+ 
+ /*
+@@ -186,87 +191,85 @@ struct rt_sigframe
+ 	char retcode[8];
+ };
+ 
+-static int
+-ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_ia32 __user *sc, unsigned int *peax)
++#define COPY(x)		{ 		\
++	unsigned int reg;		\
++	err |= __get_user(reg, &sc->x);	\
++	regs->x = reg;			\
++}
++
++#define RELOAD_SEG(seg,mask)						\
++	{ unsigned int cur;						\
++	  unsigned short pre;						\
++	  err |= __get_user(pre, &sc->seg);				\
++	  asm volatile("movl %%" #seg ",%0" : "=r" (cur));		\
++	  pre |= mask;							\
++	  if (pre != cur) loadsegment(seg, pre); }
++
++static int ia32_restore_sigcontext(struct pt_regs *regs,
++				   struct sigcontext_ia32 __user *sc,
++				   unsigned int *peax)
+ {
+-	unsigned int err = 0;
+-	
++	unsigned int tmpflags, gs, oldgs, err = 0;
++	struct _fpstate_ia32 __user *buf;
++	u32 tmp;
++
+ 	/* Always make any pending restarted system calls return -EINTR */
+ 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ 
+ #if DEBUG_SIG
+-	printk("SIG restore_sigcontext: sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
+-		sc, sc->err, sc->eip, sc->cs, sc->eflags);
++	printk(KERN_DEBUG "SIG restore_sigcontext: "
++	       "sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
++	       sc, sc->err, sc->ip, sc->cs, sc->flags);
+ #endif
+-#define COPY(x)		{ \
+-	unsigned int reg;			\
+-	err |= __get_user(reg, &sc->e ##x);	\
+-	regs->r ## x = reg;			\
 -}
-diff --git a/arch/sh64/mach-cayman/setup.c b/arch/sh64/mach-cayman/setup.c
+ 
+-#define RELOAD_SEG(seg,mask)						\
+-	{ unsigned int cur; 						\
+-	  unsigned short pre;						\
+-	  err |= __get_user(pre, &sc->seg);				\
+-    	  asm volatile("movl %%" #seg ",%0" : "=r" (cur));		\
+-	  pre |= mask; 							\
+-	  if (pre != cur) loadsegment(seg,pre); }
+-
+-	/* Reload fs and gs if they have changed in the signal handler.
+-	   This does not handle long fs/gs base changes in the handler, but 
+-	   does not clobber them at least in the normal case. */ 
+-	
+-	{
+-		unsigned gs, oldgs; 
+-		err |= __get_user(gs, &sc->gs);
+-		gs |= 3; 
+-		asm("movl %%gs,%0" : "=r" (oldgs));
+-		if (gs != oldgs)
+-		load_gs_index(gs); 
+-	} 
+-	RELOAD_SEG(fs,3);
+-	RELOAD_SEG(ds,3);
+-	RELOAD_SEG(es,3);
++	/*
++	 * Reload fs and gs if they have changed in the signal
++	 * handler.  This does not handle long fs/gs base changes in
++	 * the handler, but does not clobber them at least in the
++	 * normal case.
++	 */
++	err |= __get_user(gs, &sc->gs);
++	gs |= 3;
++	asm("movl %%gs,%0" : "=r" (oldgs));
++	if (gs != oldgs)
++		load_gs_index(gs);
++
++	RELOAD_SEG(fs, 3);
++	RELOAD_SEG(ds, 3);
++	RELOAD_SEG(es, 3);
+ 
+ 	COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
+ 	COPY(dx); COPY(cx); COPY(ip);
+-	/* Don't touch extended registers */ 
+-	
+-	err |= __get_user(regs->cs, &sc->cs); 
+-	regs->cs |= 3;  
+-	err |= __get_user(regs->ss, &sc->ss); 
+-	regs->ss |= 3; 
+-
+-	{
+-		unsigned int tmpflags;
+-		err |= __get_user(tmpflags, &sc->eflags);
+-		regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
+-		regs->orig_rax = -1;		/* disable syscall checks */
+-	}
++	/* Don't touch extended registers */
++
++	err |= __get_user(regs->cs, &sc->cs);
++	regs->cs |= 3;
++	err |= __get_user(regs->ss, &sc->ss);
++	regs->ss |= 3;
++
++	err |= __get_user(tmpflags, &sc->flags);
++	regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5);
++	/* disable syscall checks */
++	regs->orig_ax = -1;
++
++	err |= __get_user(tmp, &sc->fpstate);
++	buf = compat_ptr(tmp);
++	if (buf) {
++		if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
++			goto badframe;
++		err |= restore_i387_ia32(buf);
++	} else {
++		struct task_struct *me = current;
+ 
+-	{
+-		u32 tmp;
+-		struct _fpstate_ia32 __user * buf;
+-		err |= __get_user(tmp, &sc->fpstate);
+-		buf = compat_ptr(tmp);
+-		if (buf) {
+-			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
+-				goto badframe;
+-			err |= restore_i387_ia32(current, buf, 0);
+-		} else {
+-			struct task_struct *me = current;
+-			if (used_math()) {
+-				clear_fpu(me);
+-				clear_used_math();
+-			}
++		if (used_math()) {
++			clear_fpu(me);
++			clear_used_math();
+ 		}
+ 	}
+ 
+-	{ 
+-		u32 tmp;
+-		err |= __get_user(tmp, &sc->eax);
+-		*peax = tmp;
+-	}
++	err |= __get_user(tmp, &sc->ax);
++	*peax = tmp;
++
+ 	return err;
+ 
+ badframe:
+@@ -275,15 +278,16 @@ badframe:
+ 
+ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
+ {
+-	struct sigframe __user *frame = (struct sigframe __user *)(regs->rsp-8);
++	struct sigframe __user *frame = (struct sigframe __user *)(regs->sp-8);
+ 	sigset_t set;
+-	unsigned int eax;
++	unsigned int ax;
+ 
+ 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ 		goto badframe;
+ 	if (__get_user(set.sig[0], &frame->sc.oldmask)
+ 	    || (_COMPAT_NSIG_WORDS > 1
+-		&& __copy_from_user((((char *) &set.sig) + 4), &frame->extramask,
++		&& __copy_from_user((((char *) &set.sig) + 4),
++				    &frame->extramask,
+ 				    sizeof(frame->extramask))))
+ 		goto badframe;
+ 
+@@ -292,24 +296,24 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
+ 	current->blocked = set;
+ 	recalc_sigpending();
+ 	spin_unlock_irq(&current->sighand->siglock);
+-	
+-	if (ia32_restore_sigcontext(regs, &frame->sc, &eax))
++
++	if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
+ 		goto badframe;
+-	return eax;
++	return ax;
+ 
+ badframe:
+ 	signal_fault(regs, frame, "32bit sigreturn");
+ 	return 0;
+-}	
++}
+ 
+ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
+ {
+ 	struct rt_sigframe __user *frame;
+ 	sigset_t set;
+-	unsigned int eax;
++	unsigned int ax;
+ 	struct pt_regs tregs;
+ 
+-	frame = (struct rt_sigframe __user *)(regs->rsp - 4);
++	frame = (struct rt_sigframe __user *)(regs->sp - 4);
+ 
+ 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ 		goto badframe;
+@@ -321,28 +325,28 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
+ 	current->blocked = set;
+ 	recalc_sigpending();
+ 	spin_unlock_irq(&current->sighand->siglock);
+-	
+-	if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
++
++	if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
+ 		goto badframe;
+ 
+ 	tregs = *regs;
+ 	if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
+ 		goto badframe;
+ 
+-	return eax;
++	return ax;
+ 
+ badframe:
+-	signal_fault(regs,frame,"32bit rt sigreturn");
++	signal_fault(regs, frame, "32bit rt sigreturn");
+ 	return 0;
+-}	
++}
+ 
+ /*
+  * Set up a signal frame.
+  */
+ 
+-static int
+-ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate,
+-		 struct pt_regs *regs, unsigned int mask)
++static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
++				 struct _fpstate_ia32 __user *fpstate,
++				 struct pt_regs *regs, unsigned int mask)
+ {
+ 	int tmp, err = 0;
+ 
+@@ -356,26 +360,26 @@ ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __
+ 	__asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp));
+ 	err |= __put_user(tmp, (unsigned int __user *)&sc->es);
+ 
+-	err |= __put_user((u32)regs->rdi, &sc->edi);
+-	err |= __put_user((u32)regs->rsi, &sc->esi);
+-	err |= __put_user((u32)regs->rbp, &sc->ebp);
+-	err |= __put_user((u32)regs->rsp, &sc->esp);
+-	err |= __put_user((u32)regs->rbx, &sc->ebx);
+-	err |= __put_user((u32)regs->rdx, &sc->edx);
+-	err |= __put_user((u32)regs->rcx, &sc->ecx);
+-	err |= __put_user((u32)regs->rax, &sc->eax);
++	err |= __put_user((u32)regs->di, &sc->di);
++	err |= __put_user((u32)regs->si, &sc->si);
++	err |= __put_user((u32)regs->bp, &sc->bp);
++	err |= __put_user((u32)regs->sp, &sc->sp);
++	err |= __put_user((u32)regs->bx, &sc->bx);
++	err |= __put_user((u32)regs->dx, &sc->dx);
++	err |= __put_user((u32)regs->cx, &sc->cx);
++	err |= __put_user((u32)regs->ax, &sc->ax);
+ 	err |= __put_user((u32)regs->cs, &sc->cs);
+ 	err |= __put_user((u32)regs->ss, &sc->ss);
+ 	err |= __put_user(current->thread.trap_no, &sc->trapno);
+ 	err |= __put_user(current->thread.error_code, &sc->err);
+-	err |= __put_user((u32)regs->rip, &sc->eip);
+-	err |= __put_user((u32)regs->eflags, &sc->eflags);
+-	err |= __put_user((u32)regs->rsp, &sc->esp_at_signal);
++	err |= __put_user((u32)regs->ip, &sc->ip);
++	err |= __put_user((u32)regs->flags, &sc->flags);
++	err |= __put_user((u32)regs->sp, &sc->sp_at_signal);
+ 
+-	tmp = save_i387_ia32(current, fpstate, regs, 0);
++	tmp = save_i387_ia32(fpstate);
+ 	if (tmp < 0)
+ 		err = -EFAULT;
+-	else { 
++	else {
+ 		clear_used_math();
+ 		stts();
+ 		err |= __put_user(ptr_to_compat(tmp ? fpstate : NULL),
+@@ -392,40 +396,53 @@ ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __
+ /*
+  * Determine which stack to use..
+  */
+-static void __user *
+-get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
++static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
++				 size_t frame_size)
+ {
+-	unsigned long rsp;
++	unsigned long sp;
+ 
+ 	/* Default to using normal stack */
+-	rsp = regs->rsp;
++	sp = regs->sp;
+ 
+ 	/* This is the X/Open sanctioned signal stack switching.  */
+ 	if (ka->sa.sa_flags & SA_ONSTACK) {
+-		if (sas_ss_flags(rsp) == 0)
+-			rsp = current->sas_ss_sp + current->sas_ss_size;
++		if (sas_ss_flags(sp) == 0)
++			sp = current->sas_ss_sp + current->sas_ss_size;
+ 	}
+ 
+ 	/* This is the legacy signal stack switching. */
+ 	else if ((regs->ss & 0xffff) != __USER_DS &&
+ 		!(ka->sa.sa_flags & SA_RESTORER) &&
+-		 ka->sa.sa_restorer) {
+-		rsp = (unsigned long) ka->sa.sa_restorer;
+-	}
++		 ka->sa.sa_restorer)
++		sp = (unsigned long) ka->sa.sa_restorer;
+ 
+-	rsp -= frame_size;
++	sp -= frame_size;
+ 	/* Align the stack pointer according to the i386 ABI,
+ 	 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+-	rsp = ((rsp + 4) & -16ul) - 4;
+-	return (void __user *) rsp;
++	sp = ((sp + 4) & -16ul) - 4;
++	return (void __user *) sp;
+ }
+ 
+ int ia32_setup_frame(int sig, struct k_sigaction *ka,
+-		     compat_sigset_t *set, struct pt_regs * regs)
++		     compat_sigset_t *set, struct pt_regs *regs)
+ {
+ 	struct sigframe __user *frame;
++	void __user *restorer;
+ 	int err = 0;
+ 
++	/* copy_to_user optimizes that into a single 8 byte store */
++	static const struct {
++		u16 poplmovl;
++		u32 val;
++		u16 int80;
++		u16 pad;
++	} __attribute__((packed)) code = {
++		0xb858,		 /* popl %eax ; movl $...,%eax */
++		__NR_ia32_sigreturn,
++		0x80cd,		/* int $0x80 */
++		0,
++	};
++
+ 	frame = get_sigframe(ka, regs, sizeof(*frame));
+ 
+ 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+@@ -443,64 +460,53 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
+ 	if (_COMPAT_NSIG_WORDS > 1) {
+ 		err |= __copy_to_user(frame->extramask, &set->sig[1],
+ 				      sizeof(frame->extramask));
++		if (err)
++			goto give_sigsegv;
+ 	}
+-	if (err)
+-		goto give_sigsegv;
+ 
+-	/* Return stub is in 32bit vsyscall page */
+-	{ 
+-		void __user *restorer;
++	if (ka->sa.sa_flags & SA_RESTORER) {
++		restorer = ka->sa.sa_restorer;
++	} else {
++		/* Return stub is in 32bit vsyscall page */
+ 		if (current->binfmt->hasvdso)
+-			restorer = VSYSCALL32_SIGRETURN;
++			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
++						 sigreturn);
+ 		else
+-			restorer = (void *)&frame->retcode;
+-		if (ka->sa.sa_flags & SA_RESTORER)
+-			restorer = ka->sa.sa_restorer;       
+-		err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
+-	}
+-	/* These are actually not used anymore, but left because some 
+-	   gdb versions depend on them as a marker. */
+-	{ 
+-		/* copy_to_user optimizes that into a single 8 byte store */
+-		static const struct { 
+-			u16 poplmovl;
+-			u32 val;
+-			u16 int80;    
+-			u16 pad; 
+-		} __attribute__((packed)) code = { 
+-			0xb858,		 /* popl %eax ; movl $...,%eax */
+-			__NR_ia32_sigreturn,   
+-			0x80cd,		/* int $0x80 */
+-			0,
+-		}; 
+-		err |= __copy_to_user(frame->retcode, &code, 8); 
++			restorer = &frame->retcode;
+ 	}
++	err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
++
++	/*
++	 * These are actually not used anymore, but left because some
++	 * gdb versions depend on them as a marker.
++	 */
++	err |= __copy_to_user(frame->retcode, &code, 8);
+ 	if (err)
+ 		goto give_sigsegv;
+ 
+ 	/* Set up registers for signal handler */
+-	regs->rsp = (unsigned long) frame;
+-	regs->rip = (unsigned long) ka->sa.sa_handler;
++	regs->sp = (unsigned long) frame;
++	regs->ip = (unsigned long) ka->sa.sa_handler;
+ 
+ 	/* Make -mregparm=3 work */
+-	regs->rax = sig;
+-	regs->rdx = 0;
+-	regs->rcx = 0;
++	regs->ax = sig;
++	regs->dx = 0;
++	regs->cx = 0;
+ 
+-	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 
+-	asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 
++	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
++	asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
+ 
+-	regs->cs = __USER32_CS; 
+-	regs->ss = __USER32_DS; 
++	regs->cs = __USER32_CS;
++	regs->ss = __USER32_DS;
+ 
+ 	set_fs(USER_DS);
+-	regs->eflags &= ~TF_MASK;
++	regs->flags &= ~X86_EFLAGS_TF;
+ 	if (test_thread_flag(TIF_SINGLESTEP))
+ 		ptrace_notify(SIGTRAP);
+ 
+ #if DEBUG_SIG
+-	printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
+-		current->comm, current->pid, frame, regs->rip, frame->pretcode);
++	printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
++	       current->comm, current->pid, frame, regs->ip, frame->pretcode);
+ #endif
+ 
+ 	return 0;
+@@ -511,25 +517,34 @@ give_sigsegv:
+ }
+ 
+ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+-			compat_sigset_t *set, struct pt_regs * regs)
++			compat_sigset_t *set, struct pt_regs *regs)
+ {
+ 	struct rt_sigframe __user *frame;
++	struct exec_domain *ed = current_thread_info()->exec_domain;
++	void __user *restorer;
+ 	int err = 0;
+ 
++	/* __copy_to_user optimizes that into a single 8 byte store */
++	static const struct {
++		u8 movl;
++		u32 val;
++		u16 int80;
++		u16 pad;
++		u8  pad2;
++	} __attribute__((packed)) code = {
++		0xb8,
++		__NR_ia32_rt_sigreturn,
++		0x80cd,
++		0,
++	};
++
+ 	frame = get_sigframe(ka, regs, sizeof(*frame));
+ 
+ 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ 		goto give_sigsegv;
+ 
+-	{
+-		struct exec_domain *ed = current_thread_info()->exec_domain;
+-		err |= __put_user((ed
+-		    	   && ed->signal_invmap
+-		    	   && sig < 32
+-		    	   ? ed->signal_invmap[sig]
+-			   : sig),
+-			  &frame->sig);
+-	}
++	err |= __put_user((ed && ed->signal_invmap && sig < 32
++			   ? ed->signal_invmap[sig] : sig), &frame->sig);
+ 	err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
+ 	err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
+ 	err |= copy_siginfo_to_user32(&frame->info, info);
+@@ -540,73 +555,58 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 	err |= __put_user(0, &frame->uc.uc_flags);
+ 	err |= __put_user(0, &frame->uc.uc_link);
+ 	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+-	err |= __put_user(sas_ss_flags(regs->rsp),
++	err |= __put_user(sas_ss_flags(regs->sp),
+ 			  &frame->uc.uc_stack.ss_flags);
+ 	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ 	err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
+-			        regs, set->sig[0]);
++				     regs, set->sig[0]);
+ 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ 	if (err)
+ 		goto give_sigsegv;
+ 
+-	
+-	{ 
+-		void __user *restorer = VSYSCALL32_RTSIGRETURN; 
+-		if (ka->sa.sa_flags & SA_RESTORER)
+-			restorer = ka->sa.sa_restorer;       
+-		err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
+-	}
+-
+-	/* This is movl $,%eax ; int $0x80 */
+-	/* Not actually used anymore, but left because some gdb versions
+-	   need it. */ 
+-	{ 
+-		/* __copy_to_user optimizes that into a single 8 byte store */
+-		static const struct { 
+-			u8 movl; 
+-			u32 val; 
+-			u16 int80; 
+-			u16 pad;
+-			u8  pad2;				
+-		} __attribute__((packed)) code = { 
+-			0xb8,
+-			__NR_ia32_rt_sigreturn,
+-			0x80cd,
+-			0,
+-		}; 
+-		err |= __copy_to_user(frame->retcode, &code, 8); 
+-	} 
++	if (ka->sa.sa_flags & SA_RESTORER)
++		restorer = ka->sa.sa_restorer;
++	else
++		restorer = VDSO32_SYMBOL(current->mm->context.vdso,
++					 rt_sigreturn);
++	err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
++
++	/*
++	 * Not actually used anymore, but left because some gdb
++	 * versions need it.
++	 */
++	err |= __copy_to_user(frame->retcode, &code, 8);
+ 	if (err)
+ 		goto give_sigsegv;
+ 
+ 	/* Set up registers for signal handler */
+-	regs->rsp = (unsigned long) frame;
+-	regs->rip = (unsigned long) ka->sa.sa_handler;
++	regs->sp = (unsigned long) frame;
++	regs->ip = (unsigned long) ka->sa.sa_handler;
+ 
+ 	/* Make -mregparm=3 work */
+-	regs->rax = sig;
+-	regs->rdx = (unsigned long) &frame->info;
+-	regs->rcx = (unsigned long) &frame->uc;
++	regs->ax = sig;
++	regs->dx = (unsigned long) &frame->info;
++	regs->cx = (unsigned long) &frame->uc;
+ 
+ 	/* Make -mregparm=3 work */
+-	regs->rax = sig;
+-	regs->rdx = (unsigned long) &frame->info;
+-	regs->rcx = (unsigned long) &frame->uc;
++	regs->ax = sig;
++	regs->dx = (unsigned long) &frame->info;
++	regs->cx = (unsigned long) &frame->uc;
++
++	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
++	asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
+ 
+-	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 
+-	asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 
+-	
+-	regs->cs = __USER32_CS; 
+-	regs->ss = __USER32_DS; 
++	regs->cs = __USER32_CS;
++	regs->ss = __USER32_DS;
+ 
+ 	set_fs(USER_DS);
+-	regs->eflags &= ~TF_MASK;
++	regs->flags &= ~X86_EFLAGS_TF;
+ 	if (test_thread_flag(TIF_SINGLESTEP))
+ 		ptrace_notify(SIGTRAP);
+ 
+ #if DEBUG_SIG
+-	printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
+-		current->comm, current->pid, frame, regs->rip, frame->pretcode);
++	printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
++	       current->comm, current->pid, frame, regs->ip, frame->pretcode);
+ #endif
+ 
+ 	return 0;
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index df588f0..0db0a62 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -12,7 +12,6 @@
+ #include <asm/ia32_unistd.h>	
+ #include <asm/thread_info.h>	
+ #include <asm/segment.h>
+-#include <asm/vsyscall32.h>
+ #include <asm/irqflags.h>
+ #include <linux/linkage.h>
+ 
+@@ -104,7 +103,7 @@ ENTRY(ia32_sysenter_target)
+ 	pushfq
+ 	CFI_ADJUST_CFA_OFFSET 8
+ 	/*CFI_REL_OFFSET rflags,0*/
+-	movl	$VSYSCALL32_SYSEXIT, %r10d
++	movl	8*3-THREAD_SIZE+threadinfo_sysenter_return(%rsp), %r10d
+ 	CFI_REGISTER rip,r10
+ 	pushq	$__USER32_CS
+ 	CFI_ADJUST_CFA_OFFSET 8
+@@ -142,6 +141,8 @@ sysenter_do_call:
+ 	andl    $~TS_COMPAT,threadinfo_status(%r10)
+ 	/* clear IF, that popfq doesn't enable interrupts early */
+ 	andl  $~0x200,EFLAGS-R11(%rsp) 
++	movl	RIP-R11(%rsp),%edx		/* User %eip */
++	CFI_REGISTER rip,rdx
+ 	RESTORE_ARGS 1,24,1,1,1,1
+ 	popfq
+ 	CFI_ADJUST_CFA_OFFSET -8
+@@ -149,8 +150,6 @@ sysenter_do_call:
+ 	popq	%rcx				/* User %esp */
+ 	CFI_ADJUST_CFA_OFFSET -8
+ 	CFI_REGISTER rsp,rcx
+-	movl	$VSYSCALL32_SYSEXIT,%edx	/* User %eip */
+-	CFI_REGISTER rip,rdx
+ 	TRACE_IRQS_ON
+ 	swapgs
+ 	sti		/* sti only takes effect after the next instruction */
+@@ -644,8 +643,8 @@ ia32_sys_call_table:
+ 	.quad compat_sys_futex		/* 240 */
+ 	.quad compat_sys_sched_setaffinity
+ 	.quad compat_sys_sched_getaffinity
+-	.quad sys32_set_thread_area
+-	.quad sys32_get_thread_area
++	.quad sys_set_thread_area
++	.quad sys_get_thread_area
+ 	.quad compat_sys_io_setup	/* 245 */
+ 	.quad sys_io_destroy
+ 	.quad compat_sys_io_getevents
+diff --git a/arch/x86/ia32/ipc32.c b/arch/x86/ia32/ipc32.c
+index 7b3342e..d21991c 100644
+--- a/arch/x86/ia32/ipc32.c
++++ b/arch/x86/ia32/ipc32.c
+@@ -9,9 +9,8 @@
+ #include <linux/ipc.h>
+ #include <linux/compat.h>
+ 
+-asmlinkage long
+-sys32_ipc(u32 call, int first, int second, int third,
+-		compat_uptr_t ptr, u32 fifth)
++asmlinkage long sys32_ipc(u32 call, int first, int second, int third,
++			  compat_uptr_t ptr, u32 fifth)
+ {
+ 	int version;
+ 
+@@ -19,36 +18,35 @@ sys32_ipc(u32 call, int first, int second, int third,
+ 	call &= 0xffff;
+ 
+ 	switch (call) {
+-	      case SEMOP:
++	case SEMOP:
+ 		/* struct sembuf is the same on 32 and 64bit :)) */
+ 		return sys_semtimedop(first, compat_ptr(ptr), second, NULL);
+-	      case SEMTIMEDOP:
++	case SEMTIMEDOP:
+ 		return compat_sys_semtimedop(first, compat_ptr(ptr), second,
+ 						compat_ptr(fifth));
+-	      case SEMGET:
++	case SEMGET:
+ 		return sys_semget(first, second, third);
+-	      case SEMCTL:
++	case SEMCTL:
+ 		return compat_sys_semctl(first, second, third, compat_ptr(ptr));
+ 
+-	      case MSGSND:
++	case MSGSND:
+ 		return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
+-	      case MSGRCV:
++	case MSGRCV:
+ 		return compat_sys_msgrcv(first, second, fifth, third,
+ 					 version, compat_ptr(ptr));
+-	      case MSGGET:
++	case MSGGET:
+ 		return sys_msgget((key_t) first, second);
+-	      case MSGCTL:
++	case MSGCTL:
+ 		return compat_sys_msgctl(first, second, compat_ptr(ptr));
+ 
+-	      case SHMAT:
++	case SHMAT:
+ 		return compat_sys_shmat(first, second, third, version,
+ 					compat_ptr(ptr));
+-		break;
+-	      case SHMDT:
++	case SHMDT:
+ 		return sys_shmdt(compat_ptr(ptr));
+-	      case SHMGET:
++	case SHMGET:
+ 		return sys_shmget(first, (unsigned)second, third);
+-	      case SHMCTL:
++	case SHMCTL:
+ 		return compat_sys_shmctl(first, second, compat_ptr(ptr));
+ 	}
+ 	return -ENOSYS;
+diff --git a/arch/x86/ia32/mmap32.c b/arch/x86/ia32/mmap32.c
 deleted file mode 100644
-index 726c520..0000000
---- a/arch/sh64/mach-cayman/setup.c
+index e4b84b4..0000000
+--- a/arch/x86/ia32/mmap32.c
 +++ /dev/null
-@@ -1,239 +0,0 @@
+@@ -1,79 +0,0 @@
 -/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
+- *  linux/arch/x86_64/ia32/mm/mmap.c
 - *
-- * arch/sh64/mach-cayman/setup.c
+- *  flexible mmap layout support
 - *
-- * SH5 Cayman support
+- * Based on the i386 version which was
 - *
-- * This file handles the architecture-dependent parts of initialization
+- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+- * All Rights Reserved.
 - *
-- * Copyright David J. Mckay.
-- * Needs major work!
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
 - *
-- * benedict.gaster at superh.com:	 3rd May 2002
-- *    Added support for ramdisk, removing statically linked romfs at the same time.
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
 - *
-- * lethal at linux-sh.org:          15th May 2003
-- *    Use the generic procfs cpuinfo interface, just return a valid board name.
-- */
--#include <linux/init.h>
--#include <linux/kernel.h>
--#include <asm/platform.h>
--#include <asm/irq.h>
--#include <asm/io.h>
--
--/*
-- * Platform Dependent Interrupt Priorities.
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+- *
+- *
+- * Started by Ingo Molnar <mingo at elte.hu>
 - */
 -
--/* Using defaults defined in irq.h */
--#define	RES NO_PRIORITY		/* Disabled */
--#define IR0 IRL0_PRIORITY	/* IRLs */
--#define IR1 IRL1_PRIORITY
--#define IR2 IRL2_PRIORITY
--#define IR3 IRL3_PRIORITY
--#define PCA INTA_PRIORITY	/* PCI Ints */
--#define PCB INTB_PRIORITY
--#define PCC INTC_PRIORITY
--#define PCD INTD_PRIORITY
--#define SER TOP_PRIORITY
--#define ERR TOP_PRIORITY
--#define PW0 TOP_PRIORITY
--#define PW1 TOP_PRIORITY
--#define PW2 TOP_PRIORITY
--#define PW3 TOP_PRIORITY
--#define DM0 NO_PRIORITY		/* DMA Ints */
--#define DM1 NO_PRIORITY
--#define DM2 NO_PRIORITY
--#define DM3 NO_PRIORITY
--#define DAE NO_PRIORITY
--#define TU0 TIMER_PRIORITY	/* TMU Ints */
--#define TU1 NO_PRIORITY
--#define TU2 NO_PRIORITY
--#define TI2 NO_PRIORITY
--#define ATI NO_PRIORITY		/* RTC Ints */
--#define PRI NO_PRIORITY
--#define CUI RTC_PRIORITY
--#define ERI SCIF_PRIORITY	/* SCIF Ints */
--#define RXI SCIF_PRIORITY
--#define BRI SCIF_PRIORITY
--#define TXI SCIF_PRIORITY
--#define ITI TOP_PRIORITY	/* WDT Ints */
--
--/* Setup for the SMSC FDC37C935 */
--#define SMSC_SUPERIO_BASE	0x04000000
--#define SMSC_CONFIG_PORT_ADDR	0x3f0
--#define SMSC_INDEX_PORT_ADDR	SMSC_CONFIG_PORT_ADDR
--#define SMSC_DATA_PORT_ADDR	0x3f1
--
--#define SMSC_ENTER_CONFIG_KEY	0x55
--#define SMSC_EXIT_CONFIG_KEY	0xaa
--
--#define SMCS_LOGICAL_DEV_INDEX	0x07
--#define SMSC_DEVICE_ID_INDEX	0x20
--#define SMSC_DEVICE_REV_INDEX	0x21
--#define SMSC_ACTIVATE_INDEX	0x30
--#define SMSC_PRIMARY_BASE_INDEX  0x60
--#define SMSC_SECONDARY_BASE_INDEX 0x62
--#define SMSC_PRIMARY_INT_INDEX	0x70
--#define SMSC_SECONDARY_INT_INDEX 0x72
--
--#define SMSC_IDE1_DEVICE	1
--#define SMSC_KEYBOARD_DEVICE	7
--#define SMSC_CONFIG_REGISTERS	8
--
--#define SMSC_SUPERIO_READ_INDEXED(index) ({ \
--	outb((index), SMSC_INDEX_PORT_ADDR); \
--	inb(SMSC_DATA_PORT_ADDR); })
--#define SMSC_SUPERIO_WRITE_INDEXED(val, index) ({ \
--	outb((index), SMSC_INDEX_PORT_ADDR); \
--	outb((val),   SMSC_DATA_PORT_ADDR); })
--
--#define IDE1_PRIMARY_BASE	0x01f0
--#define IDE1_SECONDARY_BASE	0x03f6
--
--unsigned long smsc_superio_virt;
+-#include <linux/personality.h>
+-#include <linux/mm.h>
+-#include <linux/random.h>
+-#include <linux/sched.h>
 -
 -/*
-- * Platform dependent structures: maps and parms block.
-- */
--struct resource io_resources[] = {
--	/* To be updated with external devices */
--};
--
--struct resource kram_resources[] = {
--	/* These must be last in the array */
--	{ .name = "Kernel code", .start = 0, .end = 0 },
--	/* These must be last in the array */
--	{ .name = "Kernel data", .start = 0, .end = 0 }
--};
--
--struct resource xram_resources[] = {
--	/* To be updated with external devices */
--};
--
--struct resource rom_resources[] = {
--	/* To be updated with external devices */
--};
--
--struct sh64_platform platform_parms = {
--	.readonly_rootfs =	1,
--	.initial_root_dev =	0x0100,
--	.loader_type =		1,
--	.io_res_p =		io_resources,
--	.io_res_count =		ARRAY_SIZE(io_resources),
--	.kram_res_p =		kram_resources,
--	.kram_res_count =	ARRAY_SIZE(kram_resources),
--	.xram_res_p =		xram_resources,
--	.xram_res_count =	ARRAY_SIZE(xram_resources),
--	.rom_res_p =		rom_resources,
--	.rom_res_count =	ARRAY_SIZE(rom_resources),
--};
--
--int platform_int_priority[NR_INTC_IRQS] = {
--	IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD,	/* IRQ  0- 7 */
--	RES, RES, RES, RES, SER, ERR, PW3, PW2,	/* IRQ  8-15 */
--	PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES,	/* IRQ 16-23 */
--	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 24-31 */
--	TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI,	/* IRQ 32-39 */
--	RXI, BRI, TXI, RES, RES, RES, RES, RES,	/* IRQ 40-47 */
--	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 48-55 */
--	RES, RES, RES, RES, RES, RES, RES, ITI,	/* IRQ 56-63 */
--};
--
--static int __init smsc_superio_setup(void)
--{
--	unsigned char devid, devrev;
--
--	smsc_superio_virt = onchip_remap(SMSC_SUPERIO_BASE, 1024, "SMSC SuperIO");
--	if (!smsc_superio_virt) {
--		panic("Unable to remap SMSC SuperIO\n");
--	}
--
--	/* Initially the chip is in run state */
--	/* Put it into configuration state */
--	outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
--	outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
--
--	/* Read device ID info */
--	devid = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_ID_INDEX);
--	devrev = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_REV_INDEX);
--	printk("SMSC SuperIO devid %02x rev %02x\n", devid, devrev);
--
--	/* Select the keyboard device */
--	SMSC_SUPERIO_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX);
--
--	/* enable it */
--	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
--
--	/* Select the interrupts */
--	/* On a PC keyboard is IRQ1, mouse is IRQ12 */
--	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
--	SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
--
--#ifdef CONFIG_IDE
--	/*
--	 * Only IDE1 exists on the Cayman
--	 */
--
--	/* Power it on */
--	SMSC_SUPERIO_WRITE_INDEXED(1 << SMSC_IDE1_DEVICE, 0x22);
--
--	SMSC_SUPERIO_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX);
--	SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
--
--	SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE >> 8,
--				   SMSC_PRIMARY_BASE_INDEX + 0);
--	SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE & 0xff,
--				   SMSC_PRIMARY_BASE_INDEX + 1);
--
--	SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE >> 8,
--				   SMSC_SECONDARY_BASE_INDEX + 0);
--	SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE & 0xff,
--				   SMSC_SECONDARY_BASE_INDEX + 1);
--
--	SMSC_SUPERIO_WRITE_INDEXED(14, SMSC_PRIMARY_INT_INDEX);
--
--	SMSC_SUPERIO_WRITE_INDEXED(SMSC_CONFIG_REGISTERS,
--				   SMCS_LOGICAL_DEV_INDEX);
--
--	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */
--	SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
--	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
--	SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
--#endif
--
--	/* Exit the configuration state */
--	outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
--
--	return 0;
--}
--
--/* This is grotty, but, because kernel is always referenced on the link line
-- * before any devices, this is safe.
+- * Top of mmap area (just below the process stack).
+- *
+- * Leave an at least ~128 MB hole.
 - */
--__initcall(smsc_superio_setup);
+-#define MIN_GAP (128*1024*1024)
+-#define MAX_GAP (TASK_SIZE/6*5)
 -
--void __init platform_setup(void)
+-static inline unsigned long mmap_base(struct mm_struct *mm)
 -{
--	/* Cayman platform leaves the decision to head.S, for now */
--	platform_parms.fpu_flags = fpu_in_use;
--}
+-	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+-	unsigned long random_factor = 0;
 -
--void __init platform_monitor(void)
--{
--	/* Nothing yet .. */
--}
+-	if (current->flags & PF_RANDOMIZE)
+-		random_factor = get_random_int() % (1024*1024);
 -
--void __init platform_reserve(void)
--{
--	/* Nothing yet .. */
--}
+-	if (gap < MIN_GAP)
+-		gap = MIN_GAP;
+-	else if (gap > MAX_GAP)
+-		gap = MAX_GAP;
 -
--const char *get_system_type(void)
--{
--	return "Hitachi Cayman";
+-	return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
 -}
 -
-diff --git a/arch/sh64/mach-harp/Makefile b/arch/sh64/mach-harp/Makefile
-deleted file mode 100644
-index 2f2963f..0000000
---- a/arch/sh64/mach-harp/Makefile
-+++ /dev/null
-@@ -1 +0,0 @@
--obj-y := setup.o
-diff --git a/arch/sh64/mach-harp/setup.c b/arch/sh64/mach-harp/setup.c
-deleted file mode 100644
-index 05011cb..0000000
---- a/arch/sh64/mach-harp/setup.c
-+++ /dev/null
-@@ -1,129 +0,0 @@
--/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/mach-harp/setup.c
-- *
-- * SH-5 Simulator Platform Support
-- *
-- * This file handles the architecture-dependent parts of initialization
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- *
-- * benedict.gaster at superh.com:	 3rd May 2002
-- *    Added support for ramdisk, removing statically linked romfs at the same time. *
-- *
-- * lethal at linux-sh.org:          15th May 2003
-- *    Use the generic procfs cpuinfo interface, just return a valid board name.
-- */
--#include <linux/init.h>
--#include <linux/kernel.h>
--#include <asm/platform.h>
--#include <asm/irq.h>
--
--/*
-- * Platform Dependent Interrupt Priorities.
-- */
--
--/* Using defaults defined in irq.h */
--#define	RES NO_PRIORITY		/* Disabled */
--#define IR0 IRL0_PRIORITY	/* IRLs */
--#define IR1 IRL1_PRIORITY
--#define IR2 IRL2_PRIORITY
--#define IR3 IRL3_PRIORITY
--#define PCA INTA_PRIORITY	/* PCI Ints */
--#define PCB INTB_PRIORITY
--#define PCC INTC_PRIORITY
--#define PCD INTD_PRIORITY
--#define SER TOP_PRIORITY
--#define ERR TOP_PRIORITY
--#define PW0 TOP_PRIORITY
--#define PW1 TOP_PRIORITY
--#define PW2 TOP_PRIORITY
--#define PW3 TOP_PRIORITY
--#define DM0 NO_PRIORITY		/* DMA Ints */
--#define DM1 NO_PRIORITY
--#define DM2 NO_PRIORITY
--#define DM3 NO_PRIORITY
--#define DAE NO_PRIORITY
--#define TU0 TIMER_PRIORITY	/* TMU Ints */
--#define TU1 NO_PRIORITY
--#define TU2 NO_PRIORITY
--#define TI2 NO_PRIORITY
--#define ATI NO_PRIORITY		/* RTC Ints */
--#define PRI NO_PRIORITY
--#define CUI RTC_PRIORITY
--#define ERI SCIF_PRIORITY	/* SCIF Ints */
--#define RXI SCIF_PRIORITY
--#define BRI SCIF_PRIORITY
--#define TXI SCIF_PRIORITY
--#define ITI TOP_PRIORITY	/* WDT Ints */
--
 -/*
-- * Platform dependent structures: maps and parms block.
+- * This function, called very early during the creation of a new
+- * process VM image, sets up which VM layout function to use:
 - */
--struct resource io_resources[] = {
--	/* To be updated with external devices */
--};
--
--struct resource kram_resources[] = {
--	/* These must be last in the array */
--	{ .name = "Kernel code", .start = 0, .end = 0 },
--	/* These must be last in the array */
--	{ .name = "Kernel data", .start = 0, .end = 0 }
--};
--
--struct resource xram_resources[] = {
--	/* To be updated with external devices */
--};
--
--struct resource rom_resources[] = {
--	/* To be updated with external devices */
--};
--
--struct sh64_platform platform_parms = {
--	.readonly_rootfs =	1,
--	.initial_root_dev =	0x0100,
--	.loader_type =		1,
--	.io_res_p =		io_resources,
--	.io_res_count =		ARRAY_SIZE(io_resources),
--	.kram_res_p =		kram_resources,
--	.kram_res_count =	ARRAY_SIZE(kram_resources),
--	.xram_res_p =		xram_resources,
--	.xram_res_count =	ARRAY_SIZE(xram_resources),
--	.rom_res_p =		rom_resources,
--	.rom_res_count =	ARRAY_SIZE(rom_resources),
--};
--
--int platform_int_priority[NR_INTC_IRQS] = {
--	IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD,	/* IRQ  0- 7 */
--	RES, RES, RES, RES, SER, ERR, PW3, PW2,	/* IRQ  8-15 */
--	PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES,	/* IRQ 16-23 */
--	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 24-31 */
--	TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI,	/* IRQ 32-39 */
--	RXI, BRI, TXI, RES, RES, RES, RES, RES,	/* IRQ 40-47 */
--	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 48-55 */
--	RES, RES, RES, RES, RES, RES, RES, ITI,	/* IRQ 56-63 */
--};
--
--void __init platform_setup(void)
--{
--	/* Harp platform leaves the decision to head.S, for now */
--	platform_parms.fpu_flags = fpu_in_use;
--}
--
--void __init platform_monitor(void)
--{
--	/* Nothing yet .. */
--}
--
--void __init platform_reserve(void)
--{
--	/* Nothing yet .. */
--}
--
--const char *get_system_type(void)
+-void ia32_pick_mmap_layout(struct mm_struct *mm)
 -{
--	return "ST50 Harp";
+-	/*
+-	 * Fall back to the standard layout if the personality
+-	 * bit is set, or if the expected stack growth is unlimited:
+-	 */
+-	if (sysctl_legacy_va_layout ||
+-			(current->personality & ADDR_COMPAT_LAYOUT) ||
+-			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
+-		mm->mmap_base = TASK_UNMAPPED_BASE;
+-		mm->get_unmapped_area = arch_get_unmapped_area;
+-		mm->unmap_area = arch_unmap_area;
+-	} else {
+-		mm->mmap_base = mmap_base(mm);
+-		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+-		mm->unmap_area = arch_unmap_area_topdown;
+-	}
 -}
-diff --git a/arch/sh64/mach-sim/Makefile b/arch/sh64/mach-sim/Makefile
-deleted file mode 100644
-index 2f2963f..0000000
---- a/arch/sh64/mach-sim/Makefile
-+++ /dev/null
-@@ -1 +0,0 @@
--obj-y := setup.o
-diff --git a/arch/sh64/mach-sim/setup.c b/arch/sh64/mach-sim/setup.c
+diff --git a/arch/x86/ia32/ptrace32.c b/arch/x86/ia32/ptrace32.c
 deleted file mode 100644
-index e3386ec..0000000
---- a/arch/sh64/mach-sim/setup.c
+index 4a233ad..0000000
+--- a/arch/x86/ia32/ptrace32.c
 +++ /dev/null
-@@ -1,126 +0,0 @@
--/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/mach-sim/setup.c
-- *
-- * ST50 Simulator Platform Support
-- *
-- * This file handles the architecture-dependent parts of initialization
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
+@@ -1,404 +0,0 @@
+-/* 
+- * 32bit ptrace for x86-64.
 - *
-- * lethal at linux-sh.org:          15th May 2003
-- *    Use the generic procfs cpuinfo interface, just return a valid board name.
-- */
--#include <linux/init.h>
--#include <linux/kernel.h>
--#include <asm/platform.h>
--#include <asm/irq.h>
--
--/*
-- * Platform Dependent Interrupt Priorities.
-- */
+- * Copyright 2001,2002 Andi Kleen, SuSE Labs.
+- * Some parts copied from arch/i386/kernel/ptrace.c. See that file for earlier 
+- * copyright.
+- * 
+- * This allows to access 64bit processes too; but there is no way to see the extended 
+- * register contents.
+- */ 
 -
--/* Using defaults defined in irq.h */
--#define	RES NO_PRIORITY		/* Disabled */
--#define IR0 IRL0_PRIORITY	/* IRLs */
--#define IR1 IRL1_PRIORITY
--#define IR2 IRL2_PRIORITY
--#define IR3 IRL3_PRIORITY
--#define PCA INTA_PRIORITY	/* PCI Ints */
--#define PCB INTB_PRIORITY
--#define PCC INTC_PRIORITY
--#define PCD INTD_PRIORITY
--#define SER TOP_PRIORITY
--#define ERR TOP_PRIORITY
--#define PW0 TOP_PRIORITY
--#define PW1 TOP_PRIORITY
--#define PW2 TOP_PRIORITY
--#define PW3 TOP_PRIORITY
--#define DM0 NO_PRIORITY		/* DMA Ints */
--#define DM1 NO_PRIORITY
--#define DM2 NO_PRIORITY
--#define DM3 NO_PRIORITY
--#define DAE NO_PRIORITY
--#define TU0 TIMER_PRIORITY	/* TMU Ints */
--#define TU1 NO_PRIORITY
--#define TU2 NO_PRIORITY
--#define TI2 NO_PRIORITY
--#define ATI NO_PRIORITY		/* RTC Ints */
--#define PRI NO_PRIORITY
--#define CUI RTC_PRIORITY
--#define ERI SCIF_PRIORITY	/* SCIF Ints */
--#define RXI SCIF_PRIORITY
--#define BRI SCIF_PRIORITY
--#define TXI SCIF_PRIORITY
--#define ITI TOP_PRIORITY	/* WDT Ints */
+-#include <linux/kernel.h>
+-#include <linux/stddef.h>
+-#include <linux/sched.h>
+-#include <linux/syscalls.h>
+-#include <linux/unistd.h>
+-#include <linux/mm.h>
+-#include <linux/err.h>
+-#include <linux/ptrace.h>
+-#include <asm/ptrace.h>
+-#include <asm/compat.h>
+-#include <asm/uaccess.h>
+-#include <asm/user32.h>
+-#include <asm/user.h>
+-#include <asm/errno.h>
+-#include <asm/debugreg.h>
+-#include <asm/i387.h>
+-#include <asm/fpu32.h>
+-#include <asm/ia32.h>
 -
 -/*
-- * Platform dependent structures: maps and parms block.
+- * Determines which flags the user has access to [1 = access, 0 = no access].
+- * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
+- * Also masks reserved bits (31-22, 15, 5, 3, 1).
 - */
--struct resource io_resources[] = {
--	/* Nothing yet .. */
--};
--
--struct resource kram_resources[] = {
--	/* These must be last in the array */
--	{ .name = "Kernel code", .start = 0, .end = 0 },
--	/* These must be last in the array */
--	{ .name = "Kernel data", .start = 0, .end = 0 }
--};
--
--struct resource xram_resources[] = {
--	/* Nothing yet .. */
--};
--
--struct resource rom_resources[] = {
--	/* Nothing yet .. */
--};
--
--struct sh64_platform platform_parms = {
--	.readonly_rootfs =	1,
--	.initial_root_dev =	0x0100,
--	.loader_type =		1,
--	.io_res_p =		io_resources,
--	.io_res_count =		ARRAY_SIZE(io_resources),
--	.kram_res_p =		kram_resources,
--	.kram_res_count =	ARRAY_SIZE(kram_resources),
--	.xram_res_p =		xram_resources,
--	.xram_res_count =	ARRAY_SIZE(xram_resources),
--	.rom_res_p =		rom_resources,
--	.rom_res_count =	ARRAY_SIZE(rom_resources),
--};
+-#define FLAG_MASK 0x54dd5UL
 -
--int platform_int_priority[NR_IRQS] = {
--	IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD,	/* IRQ  0- 7 */
--	RES, RES, RES, RES, SER, ERR, PW3, PW2,	/* IRQ  8-15 */
--	PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES,	/* IRQ 16-23 */
--	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 24-31 */
--	TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI,	/* IRQ 32-39 */
--	RXI, BRI, TXI, RES, RES, RES, RES, RES,	/* IRQ 40-47 */
--	RES, RES, RES, RES, RES, RES, RES, RES,	/* IRQ 48-55 */
--	RES, RES, RES, RES, RES, RES, RES, ITI,	/* IRQ 56-63 */
--};
+-#define R32(l,q) \
+-	case offsetof(struct user32, regs.l): stack[offsetof(struct pt_regs, q)/8] = val; break
 -
--void __init platform_setup(void)
+-static int putreg32(struct task_struct *child, unsigned regno, u32 val)
 -{
--	/* Simulator platform leaves the decision to head.S */
--	platform_parms.fpu_flags = fpu_in_use;
--}
+-	int i;
+-	__u64 *stack = (__u64 *)task_pt_regs(child);
 -
--void __init platform_monitor(void)
--{
--	/* Nothing yet .. */
--}
+-	switch (regno) {
+-	case offsetof(struct user32, regs.fs):
+-		if (val && (val & 3) != 3) return -EIO; 
+-		child->thread.fsindex = val & 0xffff;
+-		break;
+-	case offsetof(struct user32, regs.gs):
+-		if (val && (val & 3) != 3) return -EIO; 
+-		child->thread.gsindex = val & 0xffff;
+-		break;
+-	case offsetof(struct user32, regs.ds):
+-		if (val && (val & 3) != 3) return -EIO; 
+-		child->thread.ds = val & 0xffff;
+-		break;
+-	case offsetof(struct user32, regs.es):
+-		child->thread.es = val & 0xffff;
+-		break;
+-	case offsetof(struct user32, regs.ss): 
+-		if ((val & 3) != 3) return -EIO;
+-        	stack[offsetof(struct pt_regs, ss)/8] = val & 0xffff;
+-		break;
+-	case offsetof(struct user32, regs.cs): 
+-		if ((val & 3) != 3) return -EIO;
+-		stack[offsetof(struct pt_regs, cs)/8] = val & 0xffff;
+-		break;
 -
--void __init platform_reserve(void)
--{
--	/* Nothing yet .. */
--}
+-	R32(ebx, rbx); 
+-	R32(ecx, rcx);
+-	R32(edx, rdx);
+-	R32(edi, rdi);
+-	R32(esi, rsi);
+-	R32(ebp, rbp);
+-	R32(eax, rax);
+-	R32(orig_eax, orig_rax);
+-	R32(eip, rip);
+-	R32(esp, rsp);
 -
--const char *get_system_type(void)
--{
--	return "SH-5 Simulator";
--}
-diff --git a/arch/sh64/mm/Makefile b/arch/sh64/mm/Makefile
-deleted file mode 100644
-index d0e8136..0000000
---- a/arch/sh64/mm/Makefile
-+++ /dev/null
-@@ -1,44 +0,0 @@
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 2000, 2001  Paolo Alberelli
--# Copyright (C) 2003, 2004  Paul Mundt
--#
--# Makefile for the sh64-specific parts of the Linux memory manager.
--#
--# Note! Dependencies are done automagically by 'make dep', which also
--# removes any old dependencies. DON'T put your own dependencies here
--# unless it's something special (ie not a .c file).
--#
+-	case offsetof(struct user32, regs.eflags): {
+-		__u64 *flags = &stack[offsetof(struct pt_regs, eflags)/8];
+-		val &= FLAG_MASK;
+-		*flags = val | (*flags & ~FLAG_MASK);
+-		break;
+-	}
 -
--obj-y := cache.o consistent.o extable.o fault.o init.o ioremap.o \
--	 tlbmiss.o tlb.o
+-	case offsetof(struct user32, u_debugreg[4]): 
+-	case offsetof(struct user32, u_debugreg[5]):
+-		return -EIO;
 -
--obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
+-	case offsetof(struct user32, u_debugreg[0]):
+-		child->thread.debugreg0 = val;
+-		break;
 -
--# Special flags for tlbmiss.o.  This puts restrictions on the number of
--# caller-save registers that the compiler can target when building this file.
--# This is required because the code is called from a context in entry.S where
--# very few registers have been saved in the exception handler (for speed
--# reasons).
--# The caller save registers that have been saved and which can be used are
--# r2,r3,r4,r5 : argument passing
--# r15, r18 : SP and LINK
--# tr0-4 : allow all caller-save TR's.  The compiler seems to be able to make
--#         use of them, so it's probably beneficial to performance to save them
--#         and have them available for it.
--#
--# The resources not listed below are callee save, i.e. the compiler is free to
--# use any of them and will spill them to the stack itself.
+-	case offsetof(struct user32, u_debugreg[1]):
+-		child->thread.debugreg1 = val;
+-		break;
 -
--CFLAGS_tlbmiss.o += -ffixed-r7 \
--	-ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
--	-ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
--	-ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
--	-ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
--	-ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
--	-ffixed-r41 -ffixed-r42 -ffixed-r43  \
--	-ffixed-r60 -ffixed-r61 -ffixed-r62 \
--	-fomit-frame-pointer
-diff --git a/arch/sh64/mm/cache.c b/arch/sh64/mm/cache.c
-deleted file mode 100644
-index 421487c..0000000
---- a/arch/sh64/mm/cache.c
-+++ /dev/null
-@@ -1,1032 +0,0 @@
--/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/mm/cache.c
-- *
-- * Original version Copyright (C) 2000, 2001  Paolo Alberelli
-- * Second version Copyright (C) benedict.gaster at superh.com 2002
-- * Third version Copyright Richard.Curnow at superh.com 2003
-- * Hacks to third version Copyright (C) 2003 Paul Mundt
-- */
+-	case offsetof(struct user32, u_debugreg[2]):
+-		child->thread.debugreg2 = val;
+-		break;
 -
--/****************************************************************************/
+-	case offsetof(struct user32, u_debugreg[3]):
+-		child->thread.debugreg3 = val;
+-		break;
 -
--#include <linux/init.h>
--#include <linux/mman.h>
--#include <linux/mm.h>
--#include <linux/threads.h>
--#include <asm/page.h>
--#include <asm/pgtable.h>
--#include <asm/processor.h>
--#include <asm/cache.h>
--#include <asm/tlb.h>
--#include <asm/io.h>
--#include <asm/uaccess.h>
--#include <asm/mmu_context.h>
--#include <asm/pgalloc.h> /* for flush_itlb_range */
+-	case offsetof(struct user32, u_debugreg[6]):
+-		child->thread.debugreg6 = val;
+-		break; 
 -
--#include <linux/proc_fs.h>
+-	case offsetof(struct user32, u_debugreg[7]):
+-		val &= ~DR_CONTROL_RESERVED;
+-		/* See arch/i386/kernel/ptrace.c for an explanation of
+-		 * this awkward check.*/
+-		for(i=0; i<4; i++)
+-			if ((0x5454 >> ((val >> (16 + 4*i)) & 0xf)) & 1)
+-			       return -EIO;
+-		child->thread.debugreg7 = val; 
+-		if (val)
+-			set_tsk_thread_flag(child, TIF_DEBUG);
+-		else
+-			clear_tsk_thread_flag(child, TIF_DEBUG);
+-		break; 
+-		    
+-	default:
+-		if (regno > sizeof(struct user32) || (regno & 3))
+-			return -EIO;
+-	       
+-		/* Other dummy fields in the virtual user structure are ignored */ 
+-		break; 		
+-	}
+-	return 0;
+-}
 -
--/* This function is in entry.S */
--extern unsigned long switch_and_save_asid(unsigned long new_asid);
+-#undef R32
 -
--/* Wired TLB entry for the D-cache */
--static unsigned long long dtlb_cache_slot;
+-#define R32(l,q) \
+-	case offsetof(struct user32, regs.l): *val = stack[offsetof(struct pt_regs, q)/8]; break
 -
--/**
-- * sh64_cache_init()
-- *
-- * This is pretty much just a straightforward clone of the SH
-- * detect_cpu_and_cache_system().
-- *
-- * This function is responsible for setting up all of the cache
-- * info dynamically as well as taking care of CPU probing and
-- * setting up the relevant subtype data.
-- *
-- * FIXME: For the time being, we only really support the SH5-101
-- * out of the box, and don't support dynamic probing for things
-- * like the SH5-103 or even cut2 of the SH5-101. Implement this
-- * later!
-- */
--int __init sh64_cache_init(void)
+-static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
 -{
--	/*
--	 * First, setup some sane values for the I-cache.
--	 */
--	cpu_data->icache.ways		= 4;
--	cpu_data->icache.sets		= 256;
--	cpu_data->icache.linesz		= L1_CACHE_BYTES;
--
--	/*
--	 * FIXME: This can probably be cleaned up a bit as well.. for example,
--	 * do we really need the way shift _and_ the way_step_shift ?? Judging
--	 * by the existing code, I would guess no.. is there any valid reason
--	 * why we need to be tracking this around?
--	 */
--	cpu_data->icache.way_shift	= 13;
--	cpu_data->icache.entry_shift	= 5;
--	cpu_data->icache.set_shift	= 4;
--	cpu_data->icache.way_step_shift	= 16;
--	cpu_data->icache.asid_shift	= 2;
--
--	/*
--	 * way offset = cache size / associativity, so just don't factor in
--	 * associativity in the first place..
--	 */
--	cpu_data->icache.way_ofs	= cpu_data->icache.sets *
--					  cpu_data->icache.linesz;
--
--	cpu_data->icache.asid_mask	= 0x3fc;
--	cpu_data->icache.idx_mask	= 0x1fe0;
--	cpu_data->icache.epn_mask	= 0xffffe000;
--	cpu_data->icache.flags		= 0;
+-	__u64 *stack = (__u64 *)task_pt_regs(child);
 -
--	/*
--	 * Next, setup some sane values for the D-cache.
--	 *
--	 * On the SH5, these are pretty consistent with the I-cache settings,
--	 * so we just copy over the existing definitions.. these can be fixed
--	 * up later, especially if we add runtime CPU probing.
--	 *
--	 * Though in the meantime it saves us from having to duplicate all of
--	 * the above definitions..
--	 */
--	cpu_data->dcache		= cpu_data->icache;
+-	switch (regno) {
+-	case offsetof(struct user32, regs.fs):
+-	        *val = child->thread.fsindex;
+-		break;
+-	case offsetof(struct user32, regs.gs):
+-		*val = child->thread.gsindex;
+-		break;
+-	case offsetof(struct user32, regs.ds):
+-		*val = child->thread.ds;
+-		break;
+-	case offsetof(struct user32, regs.es):
+-		*val = child->thread.es;
+-		break;
 -
--	/*
--	 * Setup any cache-related flags here
--	 */
--#if defined(CONFIG_DCACHE_WRITE_THROUGH)
--	set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags));
--#elif defined(CONFIG_DCACHE_WRITE_BACK)
--	set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags));
--#endif
+-	R32(cs, cs);
+-	R32(ss, ss);
+-	R32(ebx, rbx); 
+-	R32(ecx, rcx);
+-	R32(edx, rdx);
+-	R32(edi, rdi);
+-	R32(esi, rsi);
+-	R32(ebp, rbp);
+-	R32(eax, rax);
+-	R32(orig_eax, orig_rax);
+-	R32(eip, rip);
+-	R32(eflags, eflags);
+-	R32(esp, rsp);
 -
--	/*
--	 * We also need to reserve a slot for the D-cache in the DTLB, so we
--	 * do this now ..
--	 */
--	dtlb_cache_slot			= sh64_get_wired_dtlb_entry();
+-	case offsetof(struct user32, u_debugreg[0]): 
+-		*val = child->thread.debugreg0; 
+-		break; 
+-	case offsetof(struct user32, u_debugreg[1]): 
+-		*val = child->thread.debugreg1; 
+-		break; 
+-	case offsetof(struct user32, u_debugreg[2]): 
+-		*val = child->thread.debugreg2; 
+-		break; 
+-	case offsetof(struct user32, u_debugreg[3]): 
+-		*val = child->thread.debugreg3; 
+-		break; 
+-	case offsetof(struct user32, u_debugreg[6]): 
+-		*val = child->thread.debugreg6; 
+-		break; 
+-	case offsetof(struct user32, u_debugreg[7]): 
+-		*val = child->thread.debugreg7; 
+-		break; 
+-		    
+-	default:
+-		if (regno > sizeof(struct user32) || (regno & 3))
+-			return -EIO;
 -
+-		/* Other dummy fields in the virtual user structure are ignored */ 
+-		*val = 0;
+-		break; 		
+-	}
 -	return 0;
 -}
 -
--#ifdef CONFIG_DCACHE_DISABLED
--#define sh64_dcache_purge_all()					do { } while (0)
--#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0)
--#define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0)
--#define sh64_dcache_purge_phy_page(paddr)			do { } while (0)
--#define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0)
--#define sh64_dcache_purge_kernel_range(start, end)		do { } while (0)
--#define sh64_dcache_wback_current_user_range(start, end)	do { } while (0)
--#endif
--
--/*##########################################################################*/
--
--/* From here onwards, a rewrite of the implementation,
--   by Richard.Curnow at superh.com.
--
--   The major changes in this compared to the old version are;
--   1. use more selective purging through OCBP instead of using ALLOCO to purge
--      by natural replacement.  This avoids purging out unrelated cache lines
--      that happen to be in the same set.
--   2. exploit the APIs copy_user_page and clear_user_page better
--   3. be more selective about I-cache purging, in particular use invalidate_all
--      more sparingly.
--
--   */
--
--/*##########################################################################
--			       SUPPORT FUNCTIONS
--  ##########################################################################*/
--
--/****************************************************************************/
--/* The following group of functions deal with mapping and unmapping a temporary
--   page into the DTLB slot that have been set aside for our exclusive use. */
--/* In order to accomplish this, we use the generic interface for adding and
--   removing a wired slot entry as defined in arch/sh64/mm/tlb.c */
--/****************************************************************************/
--
--static unsigned long slot_own_flags;
+-#undef R32
 -
--static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr)
+-static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
 -{
--	local_irq_save(slot_own_flags);
--	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
+-	int ret;
+-	compat_siginfo_t __user *si32 = compat_ptr(data);
+-	siginfo_t ssi; 
+-	siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
+-	if (request == PTRACE_SETSIGINFO) {
+-		memset(&ssi, 0, sizeof(siginfo_t));
+-		ret = copy_siginfo_from_user32(&ssi, si32);
+-		if (ret)
+-			return ret;
+-		if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
+-			return -EFAULT;
+-	}
+-	ret = sys_ptrace(request, pid, addr, (unsigned long)si);
+-	if (ret)
+-		return ret;
+-	if (request == PTRACE_GETSIGINFO) {
+-		if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
+-			return -EFAULT;
+-		ret = copy_siginfo_to_user32(si32, &ssi);
+-	}
+-	return ret;
 -}
 -
--static inline void sh64_teardown_dtlb_cache_slot(void)
+-asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
 -{
--	sh64_teardown_tlb_slot(dtlb_cache_slot);
--	local_irq_restore(slot_own_flags);
--}
--
--/****************************************************************************/
--
--#ifndef CONFIG_ICACHE_DISABLED
+-	struct task_struct *child;
+-	struct pt_regs *childregs; 
+-	void __user *datap = compat_ptr(data);
+-	int ret;
+-	__u32 val;
 -
--static void __inline__ sh64_icache_inv_all(void)
--{
--	unsigned long long addr, flag, data;
--	unsigned int flags;
+-	switch (request) { 
+-	case PTRACE_TRACEME:
+-	case PTRACE_ATTACH:
+-	case PTRACE_KILL:
+-	case PTRACE_CONT:
+-	case PTRACE_SINGLESTEP:
+-	case PTRACE_DETACH:
+-	case PTRACE_SYSCALL:
+-	case PTRACE_OLDSETOPTIONS:
+-	case PTRACE_SETOPTIONS:
+-	case PTRACE_SET_THREAD_AREA:
+-	case PTRACE_GET_THREAD_AREA:
+-		return sys_ptrace(request, pid, addr, data); 
 -
--	addr=ICCR0;
--	flag=ICCR0_ICI;
--	data=0;
+-	default:
+-		return -EINVAL;
 -
--	/* Make this a critical section for safety (probably not strictly necessary.) */
--	local_irq_save(flags);
+-	case PTRACE_PEEKTEXT:
+-	case PTRACE_PEEKDATA:
+-	case PTRACE_POKEDATA:
+-	case PTRACE_POKETEXT:
+-	case PTRACE_POKEUSR:       
+-	case PTRACE_PEEKUSR:
+-	case PTRACE_GETREGS:
+-	case PTRACE_SETREGS:
+-	case PTRACE_SETFPREGS:
+-	case PTRACE_GETFPREGS:
+-	case PTRACE_SETFPXREGS:
+-	case PTRACE_GETFPXREGS:
+-	case PTRACE_GETEVENTMSG:
+-		break;
 -
--	/* Without %1 it gets unexplicably wrong */
--	asm volatile("getcfg	%3, 0, %0\n\t"
--			"or	%0, %2, %0\n\t"
--			"putcfg	%3, 0, %0\n\t"
--			"synci"
--			: "=&r" (data)
--			: "0" (data), "r" (flag), "r" (addr));
+-	case PTRACE_SETSIGINFO:
+-	case PTRACE_GETSIGINFO:
+-		return ptrace32_siginfo(request, pid, addr, data);
+-	}
 -
--	local_irq_restore(flags);
--}
+-	child = ptrace_get_task_struct(pid);
+-	if (IS_ERR(child))
+-		return PTR_ERR(child);
 -
--static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
--{
--	/* Invalidate range of addresses [start,end] from the I-cache, where
--	 * the addresses lie in the kernel superpage. */
+-	ret = ptrace_check_attach(child, request == PTRACE_KILL);
+-	if (ret < 0)
+-		goto out;
 -
--	unsigned long long ullend, addr, aligned_start;
--#if (NEFF == 32)
--	aligned_start = (unsigned long long)(signed long long)(signed long) start;
--#else
--#error "NEFF != 32"
--#endif
--	aligned_start &= L1_CACHE_ALIGN_MASK;
--	addr = aligned_start;
--#if (NEFF == 32)
--	ullend = (unsigned long long) (signed long long) (signed long) end;
--#else
--#error "NEFF != 32"
--#endif
--	while (addr <= ullend) {
--		asm __volatile__ ("icbi %0, 0" : : "r" (addr));
--		addr += L1_CACHE_BYTES;
--	}
--}
+-	childregs = task_pt_regs(child);
 -
--static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
--{
--	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
--	   Also, eaddr is page-aligned. */
+-	switch (request) {
+-	case PTRACE_PEEKDATA:
+-	case PTRACE_PEEKTEXT:
+-		ret = 0;
+-		if (access_process_vm(child, addr, &val, sizeof(u32), 0)!=sizeof(u32))
+-			ret = -EIO;
+-		else
+-			ret = put_user(val, (unsigned int __user *)datap); 
+-		break; 
 -
--	unsigned long long addr, end_addr;
--	unsigned long flags = 0;
--	unsigned long running_asid, vma_asid;
--	addr = eaddr;
--	end_addr = addr + PAGE_SIZE;
+-	case PTRACE_POKEDATA:
+-	case PTRACE_POKETEXT:
+-		ret = 0;
+-		if (access_process_vm(child, addr, &data, sizeof(u32), 1)!=sizeof(u32))
+-			ret = -EIO; 
+-		break;
 -
--	/* Check whether we can use the current ASID for the I-cache
--	   invalidation.  For example, if we're called via
--	   access_process_vm->flush_cache_page->here, (e.g. when reading from
--	   /proc), 'running_asid' will be that of the reader, not of the
--	   victim.
+-	case PTRACE_PEEKUSR:
+-		ret = getreg32(child, addr, &val);
+-		if (ret == 0)
+-			ret = put_user(val, (__u32 __user *)datap);
+-		break;
 -
--	   Also, note the risk that we might get pre-empted between the ASID
--	   compare and blocking IRQs, and before we regain control, the
--	   pid->ASID mapping changes.  However, the whole cache will get
--	   invalidated when the mapping is renewed, so the worst that can
--	   happen is that the loop below ends up invalidating somebody else's
--	   cache entries.
--	*/
+-	case PTRACE_POKEUSR:
+-		ret = putreg32(child, addr, data);
+-		break;
 -
--	running_asid = get_asid();
--	vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK);
--	if (running_asid != vma_asid) {
--		local_irq_save(flags);
--		switch_and_save_asid(vma_asid);
+-	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+-		int i;
+-	  	if (!access_ok(VERIFY_WRITE, datap, 16*4)) {
+-			ret = -EIO;
+-			break;
+-		}
+-		ret = 0;
+-		for ( i = 0; i <= 16*4 ; i += sizeof(__u32) ) {
+-			getreg32(child, i, &val);
+-			ret |= __put_user(val,(u32 __user *)datap);
+-			datap += sizeof(u32);
+-		}
+-		break;
 -	}
--	while (addr < end_addr) {
--		/* Worth unrolling a little */
--		asm __volatile__("icbi %0,  0" : : "r" (addr));
--		asm __volatile__("icbi %0, 32" : : "r" (addr));
--		asm __volatile__("icbi %0, 64" : : "r" (addr));
--		asm __volatile__("icbi %0, 96" : : "r" (addr));
--		addr += 128;
+-
+-	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+-		unsigned long tmp;
+-		int i;
+-	  	if (!access_ok(VERIFY_READ, datap, 16*4)) {
+-			ret = -EIO;
+-			break;
+-		}
+-		ret = 0; 
+-		for ( i = 0; i <= 16*4; i += sizeof(u32) ) {
+-			ret |= __get_user(tmp, (u32 __user *)datap);
+-			putreg32(child, i, tmp);
+-			datap += sizeof(u32);
+-		}
+-		break;
 -	}
--	if (running_asid != vma_asid) {
--		switch_and_save_asid(running_asid);
--		local_irq_restore(flags);
+-
+-	case PTRACE_GETFPREGS:
+-		ret = -EIO; 
+-		if (!access_ok(VERIFY_READ, compat_ptr(data), 
+-			       sizeof(struct user_i387_struct)))
+-			break;
+-		save_i387_ia32(child, datap, childregs, 1);
+-		ret = 0; 
+-			break;
+-
+-	case PTRACE_SETFPREGS:
+-		ret = -EIO;
+-		if (!access_ok(VERIFY_WRITE, datap, 
+-			       sizeof(struct user_i387_struct)))
+-			break;
+-		ret = 0;
+-		/* don't check EFAULT to be bug-to-bug compatible to i386 */
+-		restore_i387_ia32(child, datap, 1);
+-		break;
+-
+-	case PTRACE_GETFPXREGS: { 
+-		struct user32_fxsr_struct __user *u = datap;
+-		init_fpu(child); 
+-		ret = -EIO;
+-		if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+-			break;
+-			ret = -EFAULT;
+-		if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u)))
+-			break;
+-		ret = __put_user(childregs->cs, &u->fcs);
+-		ret |= __put_user(child->thread.ds, &u->fos); 
+-		break; 
+-	} 
+-	case PTRACE_SETFPXREGS: { 
+-		struct user32_fxsr_struct __user *u = datap;
+-		unlazy_fpu(child);
+-		ret = -EIO;
+-		if (!access_ok(VERIFY_READ, u, sizeof(*u)))
+-			break;
+-		/* no checking to be bug-to-bug compatible with i386. */
+-		/* but silence warning */
+-		if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)))
+-			;
+-		set_stopped_child_used_math(child);
+-		child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
+-		ret = 0; 
+-		break;
 -	}
--}
 -
--/****************************************************************************/
+-	case PTRACE_GETEVENTMSG:
+-		ret = put_user(child->ptrace_message,(unsigned int __user *)compat_ptr(data));
+-		break;
 -
--static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
--			  unsigned long start, unsigned long end)
--{
--	/* Used for invalidating big chunks of I-cache, i.e. assume the range
--	   is whole pages.  If 'start' or 'end' is not page aligned, the code
--	   is conservative and invalidates to the ends of the enclosing pages.
--	   This is functionally OK, just a performance loss. */
+-	default:
+-		BUG();
+-	}
 -
--	/* See the comments below in sh64_dcache_purge_user_range() regarding
--	   the choice of algorithm.  However, for the I-cache option (2) isn't
--	   available because there are no physical tags so aliases can't be
--	   resolved.  The icbi instruction has to be used through the user
--	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
--	   would be cheaper to use the selective code for a large range than is
--	   possible with the D-cache.  Just assume 64 for now as a working
--	   figure.
--	   */
+- out:
+-	put_task_struct(child);
+-	return ret;
+-}
 -
--	int n_pages;
+diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
+index bee96d6..abf71d2 100644
+--- a/arch/x86/ia32/sys_ia32.c
++++ b/arch/x86/ia32/sys_ia32.c
+@@ -1,29 +1,29 @@
+ /*
+  * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Based on
+- *             sys_sparc32 
++ *             sys_sparc32
+  *
+  * Copyright (C) 2000		VA Linux Co
+  * Copyright (C) 2000		Don Dugger <n0ano at valinux.com>
+- * Copyright (C) 1999 		Arun Sharma <arun.sharma at intel.com>
+- * Copyright (C) 1997,1998 	Jakub Jelinek (jj at sunsite.mff.cuni.cz)
+- * Copyright (C) 1997 		David S. Miller (davem at caip.rutgers.edu)
++ * Copyright (C) 1999		Arun Sharma <arun.sharma at intel.com>
++ * Copyright (C) 1997,1998	Jakub Jelinek (jj at sunsite.mff.cuni.cz)
++ * Copyright (C) 1997		David S. Miller (davem at caip.rutgers.edu)
+  * Copyright (C) 2000		Hewlett-Packard Co.
+  * Copyright (C) 2000		David Mosberger-Tang <davidm at hpl.hp.com>
+- * Copyright (C) 2000,2001,2002	Andi Kleen, SuSE Labs (x86-64 port) 
++ * Copyright (C) 2000,2001,2002	Andi Kleen, SuSE Labs (x86-64 port)
+  *
+  * These routines maintain argument size conversion between 32bit and 64bit
+- * environment. In 2.5 most of this should be moved to a generic directory. 
++ * environment. In 2.5 most of this should be moved to a generic directory.
+  *
+  * This file assumes that there is a hole at the end of user address space.
+- * 
+- * Some of the functions are LE specific currently. These are hopefully all marked.
+- * This should be fixed.
++ *
++ * Some of the functions are LE specific currently. These are
++ * hopefully all marked.  This should be fixed.
+  */
+ 
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+-#include <linux/fs.h> 
+-#include <linux/file.h> 
++#include <linux/fs.h>
++#include <linux/file.h>
+ #include <linux/signal.h>
+ #include <linux/syscalls.h>
+ #include <linux/resource.h>
+@@ -90,43 +90,44 @@ int cp_compat_stat(struct kstat *kbuf, struct compat_stat __user *ubuf)
+ 	if (sizeof(ino) < sizeof(kbuf->ino) && ino != kbuf->ino)
+ 		return -EOVERFLOW;
+ 	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct compat_stat)) ||
+-	    __put_user (old_encode_dev(kbuf->dev), &ubuf->st_dev) ||
+-	    __put_user (ino, &ubuf->st_ino) ||
+-	    __put_user (kbuf->mode, &ubuf->st_mode) ||
+-	    __put_user (kbuf->nlink, &ubuf->st_nlink) ||
+-	    __put_user (uid, &ubuf->st_uid) ||
+-	    __put_user (gid, &ubuf->st_gid) ||
+-	    __put_user (old_encode_dev(kbuf->rdev), &ubuf->st_rdev) ||
+-	    __put_user (kbuf->size, &ubuf->st_size) ||
+-	    __put_user (kbuf->atime.tv_sec, &ubuf->st_atime) ||
+-	    __put_user (kbuf->atime.tv_nsec, &ubuf->st_atime_nsec) ||
+-	    __put_user (kbuf->mtime.tv_sec, &ubuf->st_mtime) ||
+-	    __put_user (kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
+-	    __put_user (kbuf->ctime.tv_sec, &ubuf->st_ctime) ||
+-	    __put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
+-	    __put_user (kbuf->blksize, &ubuf->st_blksize) ||
+-	    __put_user (kbuf->blocks, &ubuf->st_blocks))
++	    __put_user(old_encode_dev(kbuf->dev), &ubuf->st_dev) ||
++	    __put_user(ino, &ubuf->st_ino) ||
++	    __put_user(kbuf->mode, &ubuf->st_mode) ||
++	    __put_user(kbuf->nlink, &ubuf->st_nlink) ||
++	    __put_user(uid, &ubuf->st_uid) ||
++	    __put_user(gid, &ubuf->st_gid) ||
++	    __put_user(old_encode_dev(kbuf->rdev), &ubuf->st_rdev) ||
++	    __put_user(kbuf->size, &ubuf->st_size) ||
++	    __put_user(kbuf->atime.tv_sec, &ubuf->st_atime) ||
++	    __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec) ||
++	    __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime) ||
++	    __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
++	    __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime) ||
++	    __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
++	    __put_user(kbuf->blksize, &ubuf->st_blksize) ||
++	    __put_user(kbuf->blocks, &ubuf->st_blocks))
+ 		return -EFAULT;
+ 	return 0;
+ }
+ 
+-asmlinkage long
+-sys32_truncate64(char __user * filename, unsigned long offset_low, unsigned long offset_high)
++asmlinkage long sys32_truncate64(char __user *filename,
++				 unsigned long offset_low,
++				 unsigned long offset_high)
+ {
+        return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
+ }
+ 
+-asmlinkage long
+-sys32_ftruncate64(unsigned int fd, unsigned long offset_low, unsigned long offset_high)
++asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
++				  unsigned long offset_high)
+ {
+        return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
+ }
+ 
+-/* Another set for IA32/LFS -- x86_64 struct stat is different due to 
+-   support for 64bit inode numbers. */
 -
--	if (!mm) return;
+-static int
+-cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
++/*
++ * Another set for IA32/LFS -- x86_64 struct stat is different due to
++ * support for 64bit inode numbers.
++ */
++static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ {
+ 	typeof(ubuf->st_uid) uid = 0;
+ 	typeof(ubuf->st_gid) gid = 0;
+@@ -134,38 +135,39 @@ cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ 	SET_GID(gid, stat->gid);
+ 	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+ 	    __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
+-	    __put_user (stat->ino, &ubuf->__st_ino) ||
+-	    __put_user (stat->ino, &ubuf->st_ino) ||
+-	    __put_user (stat->mode, &ubuf->st_mode) ||
+-	    __put_user (stat->nlink, &ubuf->st_nlink) ||
+-	    __put_user (uid, &ubuf->st_uid) ||
+-	    __put_user (gid, &ubuf->st_gid) ||
+-	    __put_user (huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
+-	    __put_user (stat->size, &ubuf->st_size) ||
+-	    __put_user (stat->atime.tv_sec, &ubuf->st_atime) ||
+-	    __put_user (stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
+-	    __put_user (stat->mtime.tv_sec, &ubuf->st_mtime) ||
+-	    __put_user (stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
+-	    __put_user (stat->ctime.tv_sec, &ubuf->st_ctime) ||
+-	    __put_user (stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
+-	    __put_user (stat->blksize, &ubuf->st_blksize) ||
+-	    __put_user (stat->blocks, &ubuf->st_blocks))
++	    __put_user(stat->ino, &ubuf->__st_ino) ||
++	    __put_user(stat->ino, &ubuf->st_ino) ||
++	    __put_user(stat->mode, &ubuf->st_mode) ||
++	    __put_user(stat->nlink, &ubuf->st_nlink) ||
++	    __put_user(uid, &ubuf->st_uid) ||
++	    __put_user(gid, &ubuf->st_gid) ||
++	    __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
++	    __put_user(stat->size, &ubuf->st_size) ||
++	    __put_user(stat->atime.tv_sec, &ubuf->st_atime) ||
++	    __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
++	    __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) ||
++	    __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
++	    __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) ||
++	    __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
++	    __put_user(stat->blksize, &ubuf->st_blksize) ||
++	    __put_user(stat->blocks, &ubuf->st_blocks))
+ 		return -EFAULT;
+ 	return 0;
+ }
+ 
+-asmlinkage long
+-sys32_stat64(char __user * filename, struct stat64 __user *statbuf)
++asmlinkage long sys32_stat64(char __user *filename,
++			     struct stat64 __user *statbuf)
+ {
+ 	struct kstat stat;
+ 	int ret = vfs_stat(filename, &stat);
++
+ 	if (!ret)
+ 		ret = cp_stat64(statbuf, &stat);
+ 	return ret;
+ }
+ 
+-asmlinkage long
+-sys32_lstat64(char __user * filename, struct stat64 __user *statbuf)
++asmlinkage long sys32_lstat64(char __user *filename,
++			      struct stat64 __user *statbuf)
+ {
+ 	struct kstat stat;
+ 	int ret = vfs_lstat(filename, &stat);
+@@ -174,8 +176,7 @@ sys32_lstat64(char __user * filename, struct stat64 __user *statbuf)
+ 	return ret;
+ }
+ 
+-asmlinkage long
+-sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
++asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
+ {
+ 	struct kstat stat;
+ 	int ret = vfs_fstat(fd, &stat);
+@@ -184,9 +185,8 @@ sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
+ 	return ret;
+ }
+ 
+-asmlinkage long
+-sys32_fstatat(unsigned int dfd, char __user *filename,
+-	      struct stat64 __user* statbuf, int flag)
++asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
++			      struct stat64 __user *statbuf, int flag)
+ {
+ 	struct kstat stat;
+ 	int error = -EINVAL;
+@@ -221,8 +221,7 @@ struct mmap_arg_struct {
+ 	unsigned int offset;
+ };
+ 
+-asmlinkage long
+-sys32_mmap(struct mmap_arg_struct __user *arg)
++asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
+ {
+ 	struct mmap_arg_struct a;
+ 	struct file *file = NULL;
+@@ -233,33 +232,33 @@ sys32_mmap(struct mmap_arg_struct __user *arg)
+ 		return -EFAULT;
+ 
+ 	if (a.offset & ~PAGE_MASK)
+-		return -EINVAL; 
++		return -EINVAL;
+ 
+ 	if (!(a.flags & MAP_ANONYMOUS)) {
+ 		file = fget(a.fd);
+ 		if (!file)
+ 			return -EBADF;
+ 	}
+-	
+-	mm = current->mm; 
+-	down_write(&mm->mmap_sem); 
+-	retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, a.offset>>PAGE_SHIFT);
++
++	mm = current->mm;
++	down_write(&mm->mmap_sem);
++	retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
++			       a.offset>>PAGE_SHIFT);
+ 	if (file)
+ 		fput(file);
+ 
+-	up_write(&mm->mmap_sem); 
++	up_write(&mm->mmap_sem);
+ 
+ 	return retval;
+ }
+ 
+-asmlinkage long 
+-sys32_mprotect(unsigned long start, size_t len, unsigned long prot)
++asmlinkage long sys32_mprotect(unsigned long start, size_t len,
++			       unsigned long prot)
+ {
+-	return sys_mprotect(start,len,prot); 
++	return sys_mprotect(start, len, prot);
+ }
+ 
+-asmlinkage long
+-sys32_pipe(int __user *fd)
++asmlinkage long sys32_pipe(int __user *fd)
+ {
+ 	int retval;
+ 	int fds[2];
+@@ -269,13 +268,13 @@ sys32_pipe(int __user *fd)
+ 		goto out;
+ 	if (copy_to_user(fd, fds, sizeof(fds)))
+ 		retval = -EFAULT;
+-  out:
++out:
+ 	return retval;
+ }
+ 
+-asmlinkage long
+-sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
+-		   struct sigaction32 __user *oact,  unsigned int sigsetsize)
++asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
++				   struct sigaction32 __user *oact,
++				   unsigned int sigsetsize)
+ {
+ 	struct k_sigaction new_ka, old_ka;
+ 	int ret;
+@@ -291,12 +290,17 @@ sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
+ 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ 		    __get_user(handler, &act->sa_handler) ||
+ 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+-		    __get_user(restorer, &act->sa_restorer)||
+-		    __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t)))
++		    __get_user(restorer, &act->sa_restorer) ||
++		    __copy_from_user(&set32, &act->sa_mask,
++				     sizeof(compat_sigset_t)))
+ 			return -EFAULT;
+ 		new_ka.sa.sa_handler = compat_ptr(handler);
+ 		new_ka.sa.sa_restorer = compat_ptr(restorer);
+-		/* FIXME: here we rely on _COMPAT_NSIG_WORS to be >= than _NSIG_WORDS << 1 */
++
++		/*
++		 * FIXME: here we rely on _COMPAT_NSIG_WORS to be >=
++		 * than _NSIG_WORDS << 1
++		 */
+ 		switch (_NSIG_WORDS) {
+ 		case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
+ 				| (((long)set32.sig[7]) << 32);
+@@ -312,7 +316,10 @@ sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
+ 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+ 
+ 	if (!ret && oact) {
+-		/* FIXME: here we rely on _COMPAT_NSIG_WORS to be >= than _NSIG_WORDS << 1 */
++		/*
++		 * FIXME: here we rely on _COMPAT_NSIG_WORS to be >=
++		 * than _NSIG_WORDS << 1
++		 */
+ 		switch (_NSIG_WORDS) {
+ 		case 4:
+ 			set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
+@@ -328,23 +335,26 @@ sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
+ 			set32.sig[0] = old_ka.sa.sa_mask.sig[0];
+ 		}
+ 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+-		    __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
+-		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) ||
++		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
++			       &oact->sa_handler) ||
++		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
++			       &oact->sa_restorer) ||
+ 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+-		    __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t)))
++		    __copy_to_user(&oact->sa_mask, &set32,
++				   sizeof(compat_sigset_t)))
+ 			return -EFAULT;
+ 	}
+ 
+ 	return ret;
+ }
+ 
+-asmlinkage long
+-sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact)
++asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
++				struct old_sigaction32 __user *oact)
+ {
+-        struct k_sigaction new_ka, old_ka;
+-        int ret;
++	struct k_sigaction new_ka, old_ka;
++	int ret;
+ 
+-        if (act) {
++	if (act) {
+ 		compat_old_sigset_t mask;
+ 		compat_uptr_t handler, restorer;
+ 
+@@ -359,33 +369,35 @@ sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigacti
+ 		new_ka.sa.sa_restorer = compat_ptr(restorer);
+ 
+ 		siginitset(&new_ka.sa.sa_mask, mask);
+-        }
++	}
+ 
+-        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
++	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+ 
+ 	if (!ret && oact) {
+ 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+-		    __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
+-		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) ||
++		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
++			       &oact->sa_handler) ||
++		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
++			       &oact->sa_restorer) ||
+ 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
+ 			return -EFAULT;
+-        }
++	}
+ 
+ 	return ret;
+ }
+ 
+-asmlinkage long
+-sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+-			compat_sigset_t __user *oset, unsigned int sigsetsize)
++asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
++				     compat_sigset_t __user *oset,
++				     unsigned int sigsetsize)
+ {
+ 	sigset_t s;
+ 	compat_sigset_t s32;
+ 	int ret;
+ 	mm_segment_t old_fs = get_fs();
+-	
++
+ 	if (set) {
+-		if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
++		if (copy_from_user(&s32, set, sizeof(compat_sigset_t)))
+ 			return -EFAULT;
+ 		switch (_NSIG_WORDS) {
+ 		case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+@@ -394,13 +406,14 @@ sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ 		case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+ 		}
+ 	}
+-	set_fs (KERNEL_DS);
++	set_fs(KERNEL_DS);
+ 	ret = sys_rt_sigprocmask(how,
+ 				 set ? (sigset_t __user *)&s : NULL,
+ 				 oset ? (sigset_t __user *)&s : NULL,
+-				 sigsetsize); 
+-	set_fs (old_fs);
+-	if (ret) return ret;
++				 sigsetsize);
++	set_fs(old_fs);
++	if (ret)
++		return ret;
+ 	if (oset) {
+ 		switch (_NSIG_WORDS) {
+ 		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+@@ -408,52 +421,49 @@ sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ 		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+ 		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+ 		}
+-		if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
++		if (copy_to_user(oset, &s32, sizeof(compat_sigset_t)))
+ 			return -EFAULT;
+ 	}
+ 	return 0;
+ }
+ 
+-static inline long
+-get_tv32(struct timeval *o, struct compat_timeval __user *i)
++static inline long get_tv32(struct timeval *o, struct compat_timeval __user *i)
+ {
+-	int err = -EFAULT; 
+-	if (access_ok(VERIFY_READ, i, sizeof(*i))) { 
++	int err = -EFAULT;
++
++	if (access_ok(VERIFY_READ, i, sizeof(*i))) {
+ 		err = __get_user(o->tv_sec, &i->tv_sec);
+ 		err |= __get_user(o->tv_usec, &i->tv_usec);
+ 	}
+-	return err; 
++	return err;
+ }
+ 
+-static inline long
+-put_tv32(struct compat_timeval __user *o, struct timeval *i)
++static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
+ {
+ 	int err = -EFAULT;
+-	if (access_ok(VERIFY_WRITE, o, sizeof(*o))) { 
++
++	if (access_ok(VERIFY_WRITE, o, sizeof(*o))) {
+ 		err = __put_user(i->tv_sec, &o->tv_sec);
+ 		err |= __put_user(i->tv_usec, &o->tv_usec);
+-	} 
+-	return err; 
++	}
++	return err;
+ }
+ 
+-extern unsigned int alarm_setitimer(unsigned int seconds);
 -
--	n_pages = ((end - start) >> PAGE_SHIFT);
--	if (n_pages >= 64) {
--		sh64_icache_inv_all();
--	} else {
--		unsigned long aligned_start;
--		unsigned long eaddr;
--		unsigned long after_last_page_start;
--		unsigned long mm_asid, current_asid;
--		unsigned long long flags = 0ULL;
+-asmlinkage long
+-sys32_alarm(unsigned int seconds)
++asmlinkage long sys32_alarm(unsigned int seconds)
+ {
+ 	return alarm_setitimer(seconds);
+ }
+ 
+-/* Translations due to time_t size differences.  Which affects all
+-   sorts of things, like timeval and itimerval.  */
 -
--		mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
--		current_asid = get_asid();
+-extern struct timezone sys_tz;
 -
--		if (mm_asid != current_asid) {
--			/* Switch ASID and run the invalidate loop under cli */
--			local_irq_save(flags);
--			switch_and_save_asid(mm_asid);
--		}
+-asmlinkage long
+-sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
++/*
++ * Translations due to time_t size differences. Which affects all
++ * sorts of things, like timeval and itimerval.
++ */
++asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv,
++				   struct timezone __user *tz)
+ {
+ 	if (tv) {
+ 		struct timeval ktv;
++
+ 		do_gettimeofday(&ktv);
+ 		if (put_tv32(tv, &ktv))
+ 			return -EFAULT;
+@@ -465,14 +475,14 @@ sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
+ 	return 0;
+ }
+ 
+-asmlinkage long
+-sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
++asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv,
++				   struct timezone __user *tz)
+ {
+ 	struct timeval ktv;
+ 	struct timespec kts;
+ 	struct timezone ktz;
+ 
+- 	if (tv) {
++	if (tv) {
+ 		if (get_tv32(&ktv, tv))
+ 			return -EFAULT;
+ 		kts.tv_sec = ktv.tv_sec;
+@@ -494,8 +504,7 @@ struct sel_arg_struct {
+ 	unsigned int tvp;
+ };
+ 
+-asmlinkage long
+-sys32_old_select(struct sel_arg_struct __user *arg)
++asmlinkage long sys32_old_select(struct sel_arg_struct __user *arg)
+ {
+ 	struct sel_arg_struct a;
+ 
+@@ -505,50 +514,45 @@ sys32_old_select(struct sel_arg_struct __user *arg)
+ 				 compat_ptr(a.exp), compat_ptr(a.tvp));
+ }
+ 
+-extern asmlinkage long
+-compat_sys_wait4(compat_pid_t pid, compat_uint_t * stat_addr, int options,
+-		 struct compat_rusage *ru);
 -
--		aligned_start = start & PAGE_MASK;
--		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
+-asmlinkage long
+-sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
++asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
++			      int options)
+ {
+ 	return compat_sys_wait4(pid, stat_addr, options, NULL);
+ }
+ 
+ /* 32-bit timeval and related flotsam.  */
+ 
+-asmlinkage long
+-sys32_sysfs(int option, u32 arg1, u32 arg2)
++asmlinkage long sys32_sysfs(int option, u32 arg1, u32 arg2)
+ {
+ 	return sys_sysfs(option, arg1, arg2);
+ }
+ 
+-asmlinkage long
+-sys32_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
++asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
++				    struct compat_timespec __user *interval)
+ {
+ 	struct timespec t;
+ 	int ret;
+-	mm_segment_t old_fs = get_fs ();
+-	
+-	set_fs (KERNEL_DS);
++	mm_segment_t old_fs = get_fs();
++
++	set_fs(KERNEL_DS);
+ 	ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
+-	set_fs (old_fs);
++	set_fs(old_fs);
+ 	if (put_compat_timespec(&t, interval))
+ 		return -EFAULT;
+ 	return ret;
+ }
+ 
+-asmlinkage long
+-sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
++asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
++				    compat_size_t sigsetsize)
+ {
+ 	sigset_t s;
+ 	compat_sigset_t s32;
+ 	int ret;
+ 	mm_segment_t old_fs = get_fs();
+-		
+-	set_fs (KERNEL_DS);
++
++	set_fs(KERNEL_DS);
+ 	ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
+-	set_fs (old_fs);
++	set_fs(old_fs);
+ 	if (!ret) {
+ 		switch (_NSIG_WORDS) {
+ 		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+@@ -556,30 +560,29 @@ sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
+ 		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+ 		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+ 		}
+-		if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
++		if (copy_to_user(set, &s32, sizeof(compat_sigset_t)))
+ 			return -EFAULT;
+ 	}
+ 	return ret;
+ }
+ 
+-asmlinkage long
+-sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
++asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
++				      compat_siginfo_t __user *uinfo)
+ {
+ 	siginfo_t info;
+ 	int ret;
+ 	mm_segment_t old_fs = get_fs();
+-	
++
+ 	if (copy_siginfo_from_user32(&info, uinfo))
+ 		return -EFAULT;
+-	set_fs (KERNEL_DS);
++	set_fs(KERNEL_DS);
+ 	ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
+-	set_fs (old_fs);
++	set_fs(old_fs);
+ 	return ret;
+ }
+ 
+ /* These are here just in case some old ia32 binary calls it. */
+-asmlinkage long
+-sys32_pause(void)
++asmlinkage long sys32_pause(void)
+ {
+ 	current->state = TASK_INTERRUPTIBLE;
+ 	schedule();
+@@ -599,25 +602,25 @@ struct sysctl_ia32 {
+ };
+ 
+ 
+-asmlinkage long
+-sys32_sysctl(struct sysctl_ia32 __user *args32)
++asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *args32)
+ {
+ 	struct sysctl_ia32 a32;
+-	mm_segment_t old_fs = get_fs ();
++	mm_segment_t old_fs = get_fs();
+ 	void __user *oldvalp, *newvalp;
+ 	size_t oldlen;
+ 	int __user *namep;
+ 	long ret;
+ 
+-	if (copy_from_user(&a32, args32, sizeof (a32)))
++	if (copy_from_user(&a32, args32, sizeof(a32)))
+ 		return -EFAULT;
+ 
+ 	/*
+-	 * We need to pre-validate these because we have to disable address checking
+-	 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
+-	 * user specifying bad addresses here.  Well, since we're dealing with 32 bit
+-	 * addresses, we KNOW that access_ok() will always succeed, so this is an
+-	 * expensive NOP, but so what...
++	 * We need to pre-validate these because we have to disable
++	 * address checking before calling do_sysctl() because of
++	 * OLDLEN but we can't run the risk of the user specifying bad
++	 * addresses here.  Well, since we're dealing with 32 bit
++	 * addresses, we KNOW that access_ok() will always succeed, so
++	 * this is an expensive NOP, but so what...
+ 	 */
+ 	namep = compat_ptr(a32.name);
+ 	oldvalp = compat_ptr(a32.oldval);
+@@ -636,34 +639,34 @@ sys32_sysctl(struct sysctl_ia32 __user *args32)
+ 	unlock_kernel();
+ 	set_fs(old_fs);
+ 
+-	if (oldvalp && put_user (oldlen, (int __user *)compat_ptr(a32.oldlenp)))
++	if (oldvalp && put_user(oldlen, (int __user *)compat_ptr(a32.oldlenp)))
+ 		return -EFAULT;
+ 
+ 	return ret;
+ }
+ #endif
+ 
+-/* warning: next two assume little endian */ 
+-asmlinkage long
+-sys32_pread(unsigned int fd, char __user *ubuf, u32 count, u32 poslo, u32 poshi)
++/* warning: next two assume little endian */
++asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
++			    u32 poslo, u32 poshi)
+ {
+ 	return sys_pread64(fd, ubuf, count,
+ 			 ((loff_t)AA(poshi) << 32) | AA(poslo));
+ }
+ 
+-asmlinkage long
+-sys32_pwrite(unsigned int fd, char __user *ubuf, u32 count, u32 poslo, u32 poshi)
++asmlinkage long sys32_pwrite(unsigned int fd, char __user *ubuf, u32 count,
++			     u32 poslo, u32 poshi)
+ {
+ 	return sys_pwrite64(fd, ubuf, count,
+ 			  ((loff_t)AA(poshi) << 32) | AA(poslo));
+ }
+ 
+ 
+-asmlinkage long
+-sys32_personality(unsigned long personality)
++asmlinkage long sys32_personality(unsigned long personality)
+ {
+ 	int ret;
+-	if (personality(current->personality) == PER_LINUX32 && 
++
++	if (personality(current->personality) == PER_LINUX32 &&
+ 		personality == PER_LINUX)
+ 		personality = PER_LINUX32;
+ 	ret = sys_personality(personality);
+@@ -672,34 +675,33 @@ sys32_personality(unsigned long personality)
+ 	return ret;
+ }
+ 
+-asmlinkage long
+-sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
++asmlinkage long sys32_sendfile(int out_fd, int in_fd,
++			       compat_off_t __user *offset, s32 count)
+ {
+ 	mm_segment_t old_fs = get_fs();
+ 	int ret;
+ 	off_t of;
+-	
++
+ 	if (offset && get_user(of, offset))
+ 		return -EFAULT;
+-		
++
+ 	set_fs(KERNEL_DS);
+ 	ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
+ 			   count);
+ 	set_fs(old_fs);
+-	
++
+ 	if (offset && put_user(of, offset))
+ 		return -EFAULT;
+-		
+ 	return ret;
+ }
+ 
+ asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
+-	unsigned long prot, unsigned long flags,
+-	unsigned long fd, unsigned long pgoff)
++			    unsigned long prot, unsigned long flags,
++			    unsigned long fd, unsigned long pgoff)
+ {
+ 	struct mm_struct *mm = current->mm;
+ 	unsigned long error;
+-	struct file * file = NULL;
++	struct file *file = NULL;
+ 
+ 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ 	if (!(flags & MAP_ANONYMOUS)) {
+@@ -717,36 +719,35 @@ asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
+ 	return error;
+ }
+ 
+-asmlinkage long sys32_olduname(struct oldold_utsname __user * name)
++asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
+ {
++	char *arch = "x86_64";
+ 	int err;
+ 
+ 	if (!name)
+ 		return -EFAULT;
+ 	if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
+ 		return -EFAULT;
+-  
+-  	down_read(&uts_sem);
 -
--		while (aligned_start < after_last_page_start) {
--			struct vm_area_struct *vma;
--			unsigned long vma_end;
--			vma = find_vma(mm, aligned_start);
--			if (!vma || (aligned_start <= vma->vm_end)) {
--				/* Avoid getting stuck in an error condition */
--				aligned_start += PAGE_SIZE;
--				continue;
--			}
--			vma_end = vma->vm_end;
--			if (vma->vm_flags & VM_EXEC) {
--				/* Executable */
--				eaddr = aligned_start;
--				while (eaddr < vma_end) {
--					sh64_icache_inv_user_page(vma, eaddr);
--					eaddr += PAGE_SIZE;
--				}
--			}
--			aligned_start = vma->vm_end; /* Skip to start of next region */
--		}
--		if (mm_asid != current_asid) {
--			switch_and_save_asid(current_asid);
--			local_irq_restore(flags);
--		}
+-	err = __copy_to_user(&name->sysname,&utsname()->sysname,
+-				__OLD_UTS_LEN);
+-	err |= __put_user(0,name->sysname+__OLD_UTS_LEN);
+-	err |= __copy_to_user(&name->nodename,&utsname()->nodename,
+-				__OLD_UTS_LEN);
+-	err |= __put_user(0,name->nodename+__OLD_UTS_LEN);
+-	err |= __copy_to_user(&name->release,&utsname()->release,
+-				__OLD_UTS_LEN);
+-	err |= __put_user(0,name->release+__OLD_UTS_LEN);
+-	err |= __copy_to_user(&name->version,&utsname()->version,
+-				__OLD_UTS_LEN);
+-	err |= __put_user(0,name->version+__OLD_UTS_LEN);
+-	{
+-		char *arch = "x86_64";
+-		if (personality(current->personality) == PER_LINUX32)
+-			arch = "i686";
+-		 
+-		err |= __copy_to_user(&name->machine, arch, strlen(arch)+1);
+-	}
++
++	down_read(&uts_sem);
++
++	err = __copy_to_user(&name->sysname, &utsname()->sysname,
++			     __OLD_UTS_LEN);
++	err |= __put_user(0, name->sysname+__OLD_UTS_LEN);
++	err |= __copy_to_user(&name->nodename, &utsname()->nodename,
++			      __OLD_UTS_LEN);
++	err |= __put_user(0, name->nodename+__OLD_UTS_LEN);
++	err |= __copy_to_user(&name->release, &utsname()->release,
++			      __OLD_UTS_LEN);
++	err |= __put_user(0, name->release+__OLD_UTS_LEN);
++	err |= __copy_to_user(&name->version, &utsname()->version,
++			      __OLD_UTS_LEN);
++	err |= __put_user(0, name->version+__OLD_UTS_LEN);
++
++	if (personality(current->personality) == PER_LINUX32)
++		arch = "i686";
++
++	err |= __copy_to_user(&name->machine, arch, strlen(arch) + 1);
+ 
+ 	up_read(&uts_sem);
+ 
+@@ -755,17 +756,19 @@ asmlinkage long sys32_olduname(struct oldold_utsname __user * name)
+ 	return err;
+ }
+ 
+-long sys32_uname(struct old_utsname __user * name)
++long sys32_uname(struct old_utsname __user *name)
+ {
+ 	int err;
++
+ 	if (!name)
+ 		return -EFAULT;
+ 	down_read(&uts_sem);
+-	err = copy_to_user(name, utsname(), sizeof (*name));
++	err = copy_to_user(name, utsname(), sizeof(*name));
+ 	up_read(&uts_sem);
+-	if (personality(current->personality) == PER_LINUX32) 
++	if (personality(current->personality) == PER_LINUX32)
+ 		err |= copy_to_user(&name->machine, "i686", 5);
+-	return err?-EFAULT:0;
++
++	return err ? -EFAULT : 0;
+ }
+ 
+ long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
+@@ -773,27 +776,28 @@ long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
+ 	struct ustat u;
+ 	mm_segment_t seg;
+ 	int ret;
+-	
+-	seg = get_fs(); 
+-	set_fs(KERNEL_DS); 
++
++	seg = get_fs();
++	set_fs(KERNEL_DS);
+ 	ret = sys_ustat(dev, (struct ustat __user *)&u);
+ 	set_fs(seg);
+-	if (ret >= 0) { 
+-		if (!access_ok(VERIFY_WRITE,u32p,sizeof(struct ustat32)) || 
+-		    __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
+-		    __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
+-		    __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
+-		    __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
+-			ret = -EFAULT;
 -	}
--}
--
--static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
--						unsigned long start, int len)
--{
--
--	/* Invalidate a small range of user context I-cache, not necessarily
--	   page (or even cache-line) aligned. */
--
--	unsigned long long eaddr = start;
--	unsigned long long eaddr_end = start + len;
--	unsigned long current_asid, mm_asid;
--	unsigned long long flags;
--	unsigned long long epage_start;
++	if (ret < 0)
++		return ret;
++
++	if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) ||
++	    __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
++	    __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
++	    __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
++	    __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
++		ret = -EFAULT;
+ 	return ret;
+-} 
++}
+ 
+ asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
+ 			     compat_uptr_t __user *envp, struct pt_regs *regs)
+ {
+ 	long error;
+-	char * filename;
++	char *filename;
+ 
+ 	filename = getname(name);
+ 	error = PTR_ERR(filename);
+@@ -812,18 +816,19 @@ asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
+ asmlinkage long sys32_clone(unsigned int clone_flags, unsigned int newsp,
+ 			    struct pt_regs *regs)
+ {
+-	void __user *parent_tid = (void __user *)regs->rdx;
+-	void __user *child_tid = (void __user *)regs->rdi;
++	void __user *parent_tid = (void __user *)regs->dx;
++	void __user *child_tid = (void __user *)regs->di;
++
+ 	if (!newsp)
+-		newsp = regs->rsp;
+-        return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
++		newsp = regs->sp;
++	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
+ }
+ 
+ /*
+- * Some system calls that need sign extended arguments. This could be done by a generic wrapper.
+- */ 
 -
--	/* Since this is used inside ptrace, the ASID in the mm context
--	   typically won't match current_asid.  We'll have to switch ASID to do
--	   this.  For safety, and given that the range will be small, do all
--	   this under cli.
+-long sys32_lseek (unsigned int fd, int offset, unsigned int whence)
++ * Some system calls that need sign extended arguments. This could be
++ * done by a generic wrapper.
++ */
++long sys32_lseek(unsigned int fd, int offset, unsigned int whence)
+ {
+ 	return sys_lseek(fd, offset, whence);
+ }
+@@ -832,49 +837,52 @@ long sys32_kill(int pid, int sig)
+ {
+ 	return sys_kill(pid, sig);
+ }
+- 
+-long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 
++
++long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
+ 			__u32 len_low, __u32 len_high, int advice)
+-{ 
++{
+ 	return sys_fadvise64_64(fd,
+ 			       (((u64)offset_high)<<32) | offset_low,
+ 			       (((u64)len_high)<<32) | len_low,
+-			       advice); 
+-} 
++				advice);
++}
+ 
+ long sys32_vm86_warning(void)
+-{ 
++{
+ 	struct task_struct *me = current;
+ 	static char lastcomm[sizeof(me->comm)];
++
+ 	if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
+-		compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
+-		       me->comm);
++		compat_printk(KERN_INFO
++			      "%s: vm86 mode not supported on 64 bit kernel\n",
++			      me->comm);
+ 		strncpy(lastcomm, me->comm, sizeof(lastcomm));
+-	} 
++	}
+ 	return -ENOSYS;
+-} 
++}
+ 
+ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
+-			  char __user * buf, size_t len)
++			  char __user *buf, size_t len)
+ {
+ 	return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
+ }
+ 
+-asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, size_t count)
++asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
++				   size_t count)
+ {
+ 	return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
+ }
+ 
+ asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
+-			   unsigned n_low, unsigned n_hi,  int flags)
++				      unsigned n_low, unsigned n_hi,  int flags)
+ {
+ 	return sys_sync_file_range(fd,
+ 				   ((u64)off_hi << 32) | off_low,
+ 				   ((u64)n_hi << 32) | n_low, flags);
+ }
+ 
+-asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, size_t len,
+-		     int advice)
++asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
++				size_t len, int advice)
+ {
+ 	return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
+ 				len, advice);
+diff --git a/arch/x86/ia32/syscall32.c b/arch/x86/ia32/syscall32.c
+deleted file mode 100644
+index 15013ba..0000000
+--- a/arch/x86/ia32/syscall32.c
++++ /dev/null
+@@ -1,83 +0,0 @@
+-/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
 -
--	   Note, there is a hazard that the ASID in mm->context is no longer
--	   actually associated with mm, i.e. if the mm->context has started a
--	   new cycle since mm was last active.  However, this is just a
--	   performance issue: all that happens is that we invalidate lines
--	   belonging to another mm, so the owning process has to refill them
--	   when that mm goes live again.  mm itself can't have any cache
--	   entries because there will have been a flush_cache_all when the new
--	   mm->context cycle started. */
+-/* vsyscall handling for 32bit processes. Map a stub page into it 
+-   on demand because 32bit cannot reach the kernel's fixmaps */
 -
--	/* Align to start of cache line.  Otherwise, suppose len==8 and start
--	   was at 32N+28 : the last 4 bytes wouldn't get invalidated. */
--	eaddr = start & L1_CACHE_ALIGN_MASK;
--	eaddr_end = start + len;
+-#include <linux/mm.h>
+-#include <linux/string.h>
+-#include <linux/kernel.h>
+-#include <linux/gfp.h>
+-#include <linux/init.h>
+-#include <linux/stringify.h>
+-#include <linux/security.h>
+-#include <asm/proto.h>
+-#include <asm/tlbflush.h>
+-#include <asm/ia32_unistd.h>
+-#include <asm/vsyscall32.h>
 -
--	local_irq_save(flags);
--	mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
--	current_asid = switch_and_save_asid(mm_asid);
+-extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
+-extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
+-extern int sysctl_vsyscall32;
 -
--	epage_start = eaddr & PAGE_MASK;
+-static struct page *syscall32_pages[1];
+-static int use_sysenter = -1;
 -
--	while (eaddr < eaddr_end)
--	{
--		asm __volatile__("icbi %0, 0" : : "r" (eaddr));
--		eaddr += L1_CACHE_BYTES;
--	}
--	switch_and_save_asid(current_asid);
--	local_irq_restore(flags);
--}
+-struct linux_binprm;
 -
--static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
+-/* Setup a VMA at program startup for the vsyscall page */
+-int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
 -{
--	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
--	   cache hit on the virtual tag the instruction ends there, without a
--	   TLB lookup. */
--
--	unsigned long long aligned_start;
--	unsigned long long ull_end;
--	unsigned long long addr;
--
--	ull_end = end;
+-	struct mm_struct *mm = current->mm;
+-	int ret;
 -
--	/* Just invalidate over the range using the natural addresses.  TLB
--	   miss handling will be OK (TBC).  Since it's for the current process,
--	   either we're already in the right ASID context, or the ASIDs have
--	   been recycled since we were last active in which case we might just
--	   invalidate another processes I-cache entries : no worries, just a
--	   performance drop for him. */
--	aligned_start = start & L1_CACHE_ALIGN_MASK;
--	addr = aligned_start;
--	while (addr < ull_end) {
--		asm __volatile__ ("icbi %0, 0" : : "r" (addr));
--		asm __volatile__ ("nop");
--		asm __volatile__ ("nop");
--		addr += L1_CACHE_BYTES;
--	}
+-	down_write(&mm->mmap_sem);
+-	/*
+-	 * MAYWRITE to allow gdb to COW and set breakpoints
+-	 *
+-	 * Make sure the vDSO gets into every core dump.
+-	 * Dumping its contents makes post-mortem fully interpretable later
+-	 * without matching up the same kernel and hardware config to see
+-	 * what PC values meant.
+-	 */
+-	/* Could randomize here */
+-	ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE,
+-				      VM_READ|VM_EXEC|
+-				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+-				      VM_ALWAYSDUMP,
+-				      syscall32_pages);
+-	up_write(&mm->mmap_sem);
+-	return ret;
 -}
 -
--#endif /* !CONFIG_ICACHE_DISABLED */
--
--/****************************************************************************/
--
--#ifndef CONFIG_DCACHE_DISABLED
--
--/* Buffer used as the target of alloco instructions to purge data from cache
--   sets by natural eviction. -- RPC */
--#define DUMMY_ALLOCO_AREA_SIZE L1_CACHE_SIZE_BYTES + (1024 * 4)
--static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
--
--/****************************************************************************/
+-static int __init init_syscall32(void)
+-{ 
+-	char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
+-	if (!syscall32_page) 
+-		panic("Cannot allocate syscall32 page"); 
+-	syscall32_pages[0] = virt_to_page(syscall32_page);
+- 	if (use_sysenter > 0) {
+- 		memcpy(syscall32_page, syscall32_sysenter,
+- 		       syscall32_sysenter_end - syscall32_sysenter);
+- 	} else {
+-  		memcpy(syscall32_page, syscall32_syscall,
+-  		       syscall32_syscall_end - syscall32_syscall);
+-  	}	
+-	return 0;
+-} 
+-	
+-__initcall(init_syscall32); 
 -
--static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
+-/* May not be __init: called during resume */
+-void syscall32_cpu_init(void)
 -{
--	/* Purge all ways in a particular block of sets, specified by the base
--	   set number and number of sets.  Can handle wrap-around, if that's
--	   needed.  */
--
--	int dummy_buffer_base_set;
--	unsigned long long eaddr, eaddr0, eaddr1;
--	int j;
--	int set_offset;
--
--	dummy_buffer_base_set = ((int)&dummy_alloco_area & cpu_data->dcache.idx_mask) >> cpu_data->dcache.entry_shift;
--	set_offset = sets_to_purge_base - dummy_buffer_base_set;
--
--	for (j=0; j<n_sets; j++, set_offset++) {
--		set_offset &= (cpu_data->dcache.sets - 1);
--		eaddr0 = (unsigned long long)dummy_alloco_area + (set_offset << cpu_data->dcache.entry_shift);
--
--		/* Do one alloco which hits the required set per cache way.  For
--		   write-back mode, this will purge the #ways resident lines.   There's
--		   little point unrolling this loop because the allocos stall more if
--		   they're too close together. */
--		eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
--		for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
--			asm __volatile__ ("alloco %0, 0" : : "r" (eaddr));
--			asm __volatile__ ("synco"); /* TAKum03020 */
--		}
--
--		eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
--		for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
--			/* Load from each address.  Required because alloco is a NOP if
--			   the cache is write-through.  Write-through is a config option. */
--			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
--				*(volatile unsigned char *)(int)eaddr;
--		}
--	}
+-	if (use_sysenter < 0)
+- 		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
 -
--	/* Don't use OCBI to invalidate the lines.  That costs cycles directly.
--	   If the dummy block is just left resident, it will naturally get
--	   evicted as required.  */
+-	/* Load these always in case some future AMD CPU supports
+-	   SYSENTER from compat mode too. */
+-	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+-	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
+-	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
 -
--	return;
+-	wrmsrl(MSR_CSTAR, ia32_cstar_target);
 -}
+diff --git a/arch/x86/ia32/syscall32_syscall.S b/arch/x86/ia32/syscall32_syscall.S
+deleted file mode 100644
+index 933f0f0..0000000
+--- a/arch/x86/ia32/syscall32_syscall.S
++++ /dev/null
+@@ -1,17 +0,0 @@
+-/* 32bit VDSOs mapped into user space. */
 -
--/****************************************************************************/
--
--static void sh64_dcache_purge_all(void)
--{
--	/* Purge the entire contents of the dcache.  The most efficient way to
--	   achieve this is to use alloco instructions on a region of unused
--	   memory equal in size to the cache, thereby causing the current
--	   contents to be discarded by natural eviction.  The alternative,
--	   namely reading every tag, setting up a mapping for the corresponding
--	   page and doing an OCBP for the line, would be much more expensive.
--	   */
--
--	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
+-	.section ".init.data","aw"
 -
--	return;
+-	.globl syscall32_syscall
+-	.globl syscall32_syscall_end
 -
--}
+-syscall32_syscall:
+-	.incbin "arch/x86/ia32/vsyscall-syscall.so"
+-syscall32_syscall_end:
 -
--/****************************************************************************/
+-	.globl syscall32_sysenter
+-	.globl syscall32_sysenter_end
 -
--static void sh64_dcache_purge_kernel_range(unsigned long start, unsigned long end)
--{
--	/* Purge the range of addresses [start,end] from the D-cache.  The
--	   addresses lie in the superpage mapping.  There's no harm if we
--	   overpurge at either end - just a small performance loss. */
--	unsigned long long ullend, addr, aligned_start;
--#if (NEFF == 32)
--	aligned_start = (unsigned long long)(signed long long)(signed long) start;
--#else
--#error "NEFF != 32"
--#endif
--	aligned_start &= L1_CACHE_ALIGN_MASK;
--	addr = aligned_start;
--#if (NEFF == 32)
--	ullend = (unsigned long long) (signed long long) (signed long) end;
--#else
--#error "NEFF != 32"
--#endif
--	while (addr <= ullend) {
--		asm __volatile__ ("ocbp %0, 0" : : "r" (addr));
--		addr += L1_CACHE_BYTES;
--	}
--	return;
--}
+-syscall32_sysenter:
+-	.incbin "arch/x86/ia32/vsyscall-sysenter.so"
+-syscall32_sysenter_end:
+diff --git a/arch/x86/ia32/tls32.c b/arch/x86/ia32/tls32.c
+deleted file mode 100644
+index 1cc4340..0000000
+--- a/arch/x86/ia32/tls32.c
++++ /dev/null
+@@ -1,163 +0,0 @@
+-#include <linux/kernel.h>
+-#include <linux/errno.h>
+-#include <linux/sched.h>
+-#include <linux/user.h>
 -
--/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
--   anything else in the kernel */
--#define MAGIC_PAGE0_START 0xffffffffec000000ULL
+-#include <asm/uaccess.h>
+-#include <asm/desc.h>
+-#include <asm/system.h>
+-#include <asm/ldt.h>
+-#include <asm/processor.h>
+-#include <asm/proto.h>
 -
--static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned long eaddr)
+-/*
+- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+- */
+-static int get_free_idx(void)
 -{
--	/* Purge the physical page 'paddr' from the cache.  It's known that any
--	   cache lines requiring attention have the same page colour as the the
--	   address 'eaddr'.
--
--	   This relies on the fact that the D-cache matches on physical tags
--	   when no virtual tag matches.  So we create an alias for the original
--	   page and purge through that.  (Alternatively, we could have done
--	   this by switching ASID to match the original mapping and purged
--	   through that, but that involves ASID switching cost + probably a
--	   TLBMISS + refill anyway.)
--	   */
--
--	unsigned long long magic_page_start;
--	unsigned long long magic_eaddr, magic_eaddr_end;
--
--	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
--
--	/* As long as the kernel is not pre-emptible, this doesn't need to be
--	   under cli/sti. */
--
--	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
--
--	magic_eaddr = magic_page_start;
--	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
--	while (magic_eaddr < magic_eaddr_end) {
--		/* Little point in unrolling this loop - the OCBPs are blocking
--		   and won't go any quicker (i.e. the loop overhead is parallel
--		   to part of the OCBP execution.) */
--		asm __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
--		magic_eaddr += L1_CACHE_BYTES;
--	}
+-	struct thread_struct *t = &current->thread;
+-	int idx;
 -
--	sh64_teardown_dtlb_cache_slot();
+-	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+-		if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
+-			return idx + GDT_ENTRY_TLS_MIN;
+-	return -ESRCH;
 -}
 -
--/****************************************************************************/
--
--static void sh64_dcache_purge_phy_page(unsigned long paddr)
+-/*
+- * Set a given TLS descriptor:
+- * When you want addresses > 32bit use arch_prctl() 
+- */
+-int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
 -{
--	/* Pure a page given its physical start address, by creating a
--	   temporary 1 page mapping and purging across that.  Even if we know
--	   the virtual address (& vma or mm) of the page, the method here is
--	   more elegant because it avoids issues of coping with page faults on
--	   the purge instructions (i.e. no special-case code required in the
--	   critical path in the TLB miss handling). */
--
--	unsigned long long eaddr_start, eaddr, eaddr_end;
--	int i;
--
--	/* As long as the kernel is not pre-emptible, this doesn't need to be
--	   under cli/sti. */
+-	struct user_desc info;
+-	struct n_desc_struct *desc;
+-	int cpu, idx;
 -
--	eaddr_start = MAGIC_PAGE0_START;
--	for (i=0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
--		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
+-	if (copy_from_user(&info, u_info, sizeof(info)))
+-		return -EFAULT;
 -
--		eaddr = eaddr_start;
--		eaddr_end = eaddr + PAGE_SIZE;
--		while (eaddr < eaddr_end) {
--			asm __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
--			eaddr += L1_CACHE_BYTES;
--		}
+-	idx = info.entry_number;
 -
--		sh64_teardown_dtlb_cache_slot();
--		eaddr_start += PAGE_SIZE;
+-	/*
+-	 * index -1 means the kernel should try to find and
+-	 * allocate an empty descriptor:
+-	 */
+-	if (idx == -1) {
+-		idx = get_free_idx();
+-		if (idx < 0)
+-			return idx;
+-		if (put_user(idx, &u_info->entry_number))
+-			return -EFAULT;
 -	}
--}
--
--static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
--				unsigned long addr, unsigned long end)
--{
--	pgd_t *pgd;
--	pmd_t *pmd;
--	pte_t *pte;
--	pte_t entry;
--	spinlock_t *ptl;
--	unsigned long paddr;
--
--	if (!mm)
--		return; /* No way to find physical address of page */
--
--	pgd = pgd_offset(mm, addr);
--	if (pgd_bad(*pgd))
--		return;
--
--	pmd = pmd_offset(pgd, addr);
--	if (pmd_none(*pmd) || pmd_bad(*pmd))
--		return;
--
--	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
--	do {
--		entry = *pte;
--		if (pte_none(entry) || !pte_present(entry))
--			continue;
--		paddr = pte_val(entry) & PAGE_MASK;
--		sh64_dcache_purge_coloured_phy_page(paddr, addr);
--	} while (pte++, addr += PAGE_SIZE, addr != end);
--	pte_unmap_unlock(pte - 1, ptl);
--}
--/****************************************************************************/
--
--static void sh64_dcache_purge_user_range(struct mm_struct *mm,
--			  unsigned long start, unsigned long end)
--{
--	/* There are at least 5 choices for the implementation of this, with
--	   pros (+), cons(-), comments(*):
--
--	   1. ocbp each line in the range through the original user's ASID
--	      + no lines spuriously evicted
--	      - tlbmiss handling (must either handle faults on demand => extra
--		special-case code in tlbmiss critical path), or map the page in
--		advance (=> flush_tlb_range in advance to avoid multiple hits)
--	      - ASID switching
--	      - expensive for large ranges
--
--	   2. temporarily map each page in the range to a special effective
--	      address and ocbp through the temporary mapping; relies on the
--	      fact that SH-5 OCB* always do TLB lookup and match on ptags (they
--	      never look at the etags)
--	      + no spurious evictions
--	      - expensive for large ranges
--	      * surely cheaper than (1)
--
--	   3. walk all the lines in the cache, check the tags, if a match
--	      occurs create a page mapping to ocbp the line through
--	      + no spurious evictions
--	      - tag inspection overhead
--	      - (especially for small ranges)
--	      - potential cost of setting up/tearing down page mapping for
--		every line that matches the range
--	      * cost partly independent of range size
--
--	   4. walk all the lines in the cache, check the tags, if a match
--	      occurs use 4 * alloco to purge the line (+3 other probably
--	      innocent victims) by natural eviction
--	      + no tlb mapping overheads
--	      - spurious evictions
--	      - tag inspection overhead
--
--	   5. implement like flush_cache_all
--	      + no tag inspection overhead
--	      - spurious evictions
--	      - bad for small ranges
--
--	   (1) can be ruled out as more expensive than (2).  (2) appears best
--	   for small ranges.  The choice between (3), (4) and (5) for large
--	   ranges and the range size for the large/small boundary need
--	   benchmarking to determine.
--
--	   For now use approach (2) for small ranges and (5) for large ones.
--
--	   */
--
--	int n_pages;
 -
--	n_pages = ((end - start) >> PAGE_SHIFT);
--	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
--#if 1
--		sh64_dcache_purge_all();
--#else
--		unsigned long long set, way;
--		unsigned long mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
--		for (set = 0; set < cpu_data->dcache.sets; set++) {
--			unsigned long long set_base_config_addr = CACHE_OC_ADDRESS_ARRAY + (set << cpu_data->dcache.set_shift);
--			for (way = 0; way < cpu_data->dcache.ways; way++) {
--				unsigned long long config_addr = set_base_config_addr + (way << cpu_data->dcache.way_step_shift);
--				unsigned long long tag0;
--				unsigned long line_valid;
+-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+-		return -EINVAL;
 -
--				asm __volatile__("getcfg %1, 0, %0" : "=r" (tag0) : "r" (config_addr));
--				line_valid = tag0 & SH_CACHE_VALID;
--				if (line_valid) {
--					unsigned long cache_asid;
--					unsigned long epn;
+-	desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
 -
--					cache_asid = (tag0 & cpu_data->dcache.asid_mask) >> cpu_data->dcache.asid_shift;
--					/* The next line needs some
--					   explanation.  The virtual tags
--					   encode bits [31:13] of the virtual
--					   address, bit [12] of the 'tag' being
--					   implied by the cache set index. */
--					epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift);
+-	/*
+-	 * We must not get preempted while modifying the TLS.
+-	 */
+-	cpu = get_cpu();
 -
--					if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) {
--						/* TODO : could optimise this
--						   call by batching multiple
--						   adjacent sets together. */
--						sh64_dcache_purge_sets(set, 1);
--						break; /* Don't waste time inspecting other ways for this set */
--					}
--				}
--			}
--		}
--#endif
+-	if (LDT_empty(&info)) {
+-		desc->a = 0;
+-		desc->b = 0;
 -	} else {
--		/* Small range, covered by a single page table page */
--		start &= PAGE_MASK;	/* should already be so */
--		end = PAGE_ALIGN(end);	/* should already be so */
--		sh64_dcache_purge_user_pages(mm, start, end);
--	}
--	return;
--}
--
--static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end)
--{
--	unsigned long long aligned_start;
--	unsigned long long ull_end;
--	unsigned long long addr;
--
--	ull_end = end;
--
--	/* Just wback over the range using the natural addresses.  TLB miss
--	   handling will be OK (TBC) : the range has just been written to by
--	   the signal frame setup code, so the PTEs must exist.
--
--	   Note, if we have CONFIG_PREEMPT and get preempted inside this loop,
--	   it doesn't matter, even if the pid->ASID mapping changes whilst
--	   we're away.  In that case the cache will have been flushed when the
--	   mapping was renewed.  So the writebacks below will be nugatory (and
--	   we'll doubtless have to fault the TLB entry/ies in again with the
--	   new ASID), but it's a rare case.
--	   */
--	aligned_start = start & L1_CACHE_ALIGN_MASK;
--	addr = aligned_start;
--	while (addr < ull_end) {
--		asm __volatile__ ("ocbwb %0, 0" : : "r" (addr));
--		addr += L1_CACHE_BYTES;
--	}
--}
--
--/****************************************************************************/
--
--/* These *MUST* lie in an area of virtual address space that's otherwise unused. */
--#define UNIQUE_EADDR_START 0xe0000000UL
--#define UNIQUE_EADDR_END   0xe8000000UL
--
--static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr)
--{
--	/* Given a physical address paddr, and a user virtual address
--	   user_eaddr which will eventually be mapped to it, create a one-off
--	   kernel-private eaddr mapped to the same paddr.  This is used for
--	   creating special destination pages for copy_user_page and
--	   clear_user_page */
--
--	static unsigned long current_pointer = UNIQUE_EADDR_START;
--	unsigned long coloured_pointer;
--
--	if (current_pointer == UNIQUE_EADDR_END) {
--		sh64_dcache_purge_all();
--		current_pointer = UNIQUE_EADDR_START;
+-		desc->a = LDT_entry_a(&info);
+-		desc->b = LDT_entry_b(&info);
 -	}
+-	if (t == &current->thread)
+-		load_TLS(t, cpu);
 -
--	coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK);
--	sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
--
--	current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
--
--	return coloured_pointer;
--}
--
--/****************************************************************************/
--
--static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address)
--{
--	void *coloured_to;
--
--	/* Discard any existing cache entries of the wrong colour.  These are
--	   present quite often, if the kernel has recently used the page
--	   internally, then given it up, then it's been allocated to the user.
--	   */
--	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
--
--	coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
--	sh64_page_copy(from, coloured_to);
--
--	sh64_teardown_dtlb_cache_slot();
--}
--
--static void sh64_clear_user_page_coloured(void *to, unsigned long address)
--{
--	void *coloured_to;
--
--	/* Discard any existing kernel-originated lines of the wrong colour (as
--	   above) */
--	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
--
--	coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
--	sh64_page_clear(coloured_to);
--
--	sh64_teardown_dtlb_cache_slot();
+-	put_cpu();
+-	return 0;
 -}
 -
--#endif /* !CONFIG_DCACHE_DISABLED */
--
--/****************************************************************************/
--
--/*##########################################################################
--			    EXTERNALLY CALLABLE API.
--  ##########################################################################*/
--
--/* These functions are described in Documentation/cachetlb.txt.
--   Each one of these functions varies in behaviour depending on whether the
--   I-cache and/or D-cache are configured out.
+-asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
+-{ 
+-	return do_set_thread_area(&current->thread, u_info); 
+-} 
 -
--   Note that the Linux term 'flush' corresponds to what is termed 'purge' in
--   the sh/sh64 jargon for the D-cache, i.e. write back dirty data then
--   invalidate the cache lines, and 'invalidate' for the I-cache.
--   */
 -
--#undef FLUSH_TRACE
+-/*
+- * Get the current Thread-Local Storage area:
+- */
 -
--void flush_cache_all(void)
--{
--	/* Invalidate the entire contents of both caches, after writing back to
--	   memory any dirty data from the D-cache. */
--	sh64_dcache_purge_all();
--	sh64_icache_inv_all();
--}
+-#define GET_BASE(desc) ( \
+-	(((desc)->a >> 16) & 0x0000ffff) | \
+-	(((desc)->b << 16) & 0x00ff0000) | \
+-	( (desc)->b        & 0xff000000)   )
 -
--/****************************************************************************/
+-#define GET_LIMIT(desc) ( \
+-	((desc)->a & 0x0ffff) | \
+-	 ((desc)->b & 0xf0000) )
+-	
+-#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
+-#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
+-#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
+-#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
+-#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
+-#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
+-#define GET_LONGMODE(desc)	(((desc)->b >> 21) & 1)
 -
--void flush_cache_mm(struct mm_struct *mm)
+-int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
 -{
--	/* Invalidate an entire user-address space from both caches, after
--	   writing back dirty data (e.g. for shared mmap etc). */
+-	struct user_desc info;
+-	struct n_desc_struct *desc;
+-	int idx;
 -
--	/* This could be coded selectively by inspecting all the tags then
--	   doing 4*alloco on any set containing a match (as for
--	   flush_cache_range), but fork/exit/execve (where this is called from)
--	   are expensive anyway. */
+-	if (get_user(idx, &u_info->entry_number))
+-		return -EFAULT;
+-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+-		return -EINVAL;
 -
--	/* Have to do a purge here, despite the comments re I-cache below.
--	   There could be odd-coloured dirty data associated with the mm still
--	   in the cache - if this gets written out through natural eviction
--	   after the kernel has reused the page there will be chaos.
--	   */
+-	desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
 -
--	sh64_dcache_purge_all();
+-	memset(&info, 0, sizeof(struct user_desc));
+-	info.entry_number = idx;
+-	info.base_addr = GET_BASE(desc);
+-	info.limit = GET_LIMIT(desc);
+-	info.seg_32bit = GET_32BIT(desc);
+-	info.contents = GET_CONTENTS(desc);
+-	info.read_exec_only = !GET_WRITABLE(desc);
+-	info.limit_in_pages = GET_LIMIT_PAGES(desc);
+-	info.seg_not_present = !GET_PRESENT(desc);
+-	info.useable = GET_USEABLE(desc);
+-	info.lm = GET_LONGMODE(desc);
 -
--	/* The mm being torn down won't ever be active again, so any Icache
--	   lines tagged with its ASID won't be visible for the rest of the
--	   lifetime of this ASID cycle.  Before the ASID gets reused, there
--	   will be a flush_cache_all.  Hence we don't need to touch the
--	   I-cache.  This is similar to the lack of action needed in
--	   flush_tlb_mm - see fault.c. */
+-	if (copy_to_user(u_info, &info, sizeof(info)))
+-		return -EFAULT;
+-	return 0;
 -}
 -
--/****************************************************************************/
--
--void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
--		       unsigned long end)
+-asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info)
 -{
--	struct mm_struct *mm = vma->vm_mm;
--
--	/* Invalidate (from both caches) the range [start,end) of virtual
--	   addresses from the user address space specified by mm, after writing
--	   back any dirty data.
--
--	   Note, 'end' is 1 byte beyond the end of the range to flush. */
--
--	sh64_dcache_purge_user_range(mm, start, end);
--	sh64_icache_inv_user_page_range(mm, start, end);
--}
+-	return do_get_thread_area(&current->thread, u_info);
+-} 
 -
--/****************************************************************************/
 -
--void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn)
+-int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
 -{
--	/* Invalidate any entries in either cache for the vma within the user
--	   address space vma->vm_mm for the page starting at virtual address
--	   'eaddr'.   This seems to be used primarily in breaking COW.  Note,
--	   the I-cache must be searched too in case the page in question is
--	   both writable and being executed from (e.g. stack trampolines.)
--
--	   Note, this is called with pte lock held.
--	   */
--
--	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
+-	struct n_desc_struct *desc;
+-	struct user_desc info;
+-	struct user_desc __user *cp;
+-	int idx;
+-	
+-	cp = (void __user *)childregs->rsi;
+-	if (copy_from_user(&info, cp, sizeof(info)))
+-		return -EFAULT;
+-	if (LDT_empty(&info))
+-		return -EINVAL;
+-	
+-	idx = info.entry_number;
+-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+-		return -EINVAL;
+-	
+-	desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
+-	desc->a = LDT_entry_a(&info);
+-	desc->b = LDT_entry_b(&info);
 -
--	if (vma->vm_flags & VM_EXEC) {
--		sh64_icache_inv_user_page(vma, eaddr);
--	}
+-	return 0;
 -}
+diff --git a/arch/x86/ia32/vsyscall-sigreturn.S b/arch/x86/ia32/vsyscall-sigreturn.S
+deleted file mode 100644
+index b383be0..0000000
+--- a/arch/x86/ia32/vsyscall-sigreturn.S
++++ /dev/null
+@@ -1,143 +0,0 @@
+-/*
+- * Common code for the sigreturn entry points on the vsyscall page.
+- * This code uses SYSCALL_ENTER_KERNEL (either syscall or int $0x80)
+- * to enter the kernel.
+- * This file is #include'd by vsyscall-*.S to define them after the
+- * vsyscall entry point.  The addresses we get for these entry points
+- * by doing ".balign 32" must match in both versions of the page.
+- */
 -
--/****************************************************************************/
--
--#ifndef CONFIG_DCACHE_DISABLED
--
--void copy_user_page(void *to, void *from, unsigned long address, struct page *page)
--{
--	/* 'from' and 'to' are kernel virtual addresses (within the superpage
--	   mapping of the physical RAM).  'address' is the user virtual address
--	   where the copy 'to' will be mapped after.  This allows a custom
--	   mapping to be used to ensure that the new copy is placed in the
--	   right cache sets for the user to see it without having to bounce it
--	   out via memory.  Note however : the call to flush_page_to_ram in
--	   (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
--	   very important case!
--
--	   TBD : can we guarantee that on every call, any cache entries for
--	   'from' are in the same colour sets as 'address' also?  i.e. is this
--	   always used just to deal with COW?  (I suspect not). */
--
--	/* There are two possibilities here for when the page 'from' was last accessed:
--	   * by the kernel : this is OK, no purge required.
--	   * by the/a user (e.g. for break_COW) : need to purge.
--
--	   If the potential user mapping at 'address' is the same colour as
--	   'from' there is no need to purge any cache lines from the 'from'
--	   page mapped into cache sets of colour 'address'.  (The copy will be
--	   accessing the page through 'from').
--	   */
--
--	if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) {
--		sh64_dcache_purge_coloured_phy_page(__pa(from), address);
--	}
--
--	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
--		/* No synonym problem on destination */
--		sh64_page_copy(from, to);
--	} else {
--		sh64_copy_user_page_coloured(to, from, address);
--	}
+-	.code32
+-	.section .text.sigreturn,"ax"
+-	.balign 32
+-	.globl __kernel_sigreturn
+-	.type __kernel_sigreturn, at function
+-__kernel_sigreturn:
+-.LSTART_sigreturn:
+-	popl %eax
+-	movl $__NR_ia32_sigreturn, %eax
+-	SYSCALL_ENTER_KERNEL
+-.LEND_sigreturn:
+-	.size __kernel_sigreturn,.-.LSTART_sigreturn
 -
--	/* Note, don't need to flush 'from' page from the cache again - it's
--	   done anyway by the generic code */
--}
+-	.section .text.rtsigreturn,"ax"
+-	.balign 32
+-	.globl __kernel_rt_sigreturn
+-	.type __kernel_rt_sigreturn, at function
+-__kernel_rt_sigreturn:
+-.LSTART_rt_sigreturn:
+-	movl $__NR_ia32_rt_sigreturn, %eax
+-	SYSCALL_ENTER_KERNEL
+-.LEND_rt_sigreturn:
+-	.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
 -
--void clear_user_page(void *to, unsigned long address, struct page *page)
--{
--	/* 'to' is a kernel virtual address (within the superpage
--	   mapping of the physical RAM).  'address' is the user virtual address
--	   where the 'to' page will be mapped after.  This allows a custom
--	   mapping to be used to ensure that the new copy is placed in the
--	   right cache sets for the user to see it without having to bounce it
--	   out via memory.
--	*/
+-	.section .eh_frame,"a", at progbits
+-.LSTARTFRAMES:
+-        .long .LENDCIES-.LSTARTCIES
+-.LSTARTCIES:
+-	.long 0			/* CIE ID */
+-	.byte 1			/* Version number */
+-	.string "zRS"		/* NUL-terminated augmentation string */
+-	.uleb128 1		/* Code alignment factor */
+-	.sleb128 -4		/* Data alignment factor */
+-	.byte 8			/* Return address register column */
+-	.uleb128 1		/* Augmentation value length */
+-	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+-	.byte 0x0c		/* DW_CFA_def_cfa */
+-	.uleb128 4
+-	.uleb128 4
+-	.byte 0x88		/* DW_CFA_offset, column 0x8 */
+-	.uleb128 1
+-	.align 4
+-.LENDCIES:
 -
--	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
--		/* No synonym problem on destination */
--		sh64_page_clear(to);
--	} else {
--		sh64_clear_user_page_coloured(to, address);
--	}
--}
+-	.long .LENDFDE2-.LSTARTFDE2	/* Length FDE */
+-.LSTARTFDE2:
+-	.long .LSTARTFDE2-.LSTARTFRAMES	/* CIE pointer */
+-	/* HACK: The dwarf2 unwind routines will subtract 1 from the
+-	   return address to get an address in the middle of the
+-	   presumed call instruction.  Since we didn't get here via
+-	   a call, we need to include the nop before the real start
+-	   to make up for it.  */
+-	.long .LSTART_sigreturn-1-.	/* PC-relative start address */
+-	.long .LEND_sigreturn-.LSTART_sigreturn+1
+-	.uleb128 0			/* Augmentation length */
+-	/* What follows are the instructions for the table generation.
+-	   We record the locations of each register saved.  This is
+-	   complicated by the fact that the "CFA" is always assumed to
+-	   be the value of the stack pointer in the caller.  This means
+-	   that we must define the CFA of this body of code to be the
+-	   saved value of the stack pointer in the sigcontext.  Which
+-	   also means that there is no fixed relation to the other 
+-	   saved registers, which means that we must use DW_CFA_expression
+-	   to compute their addresses.  It also means that when we 
+-	   adjust the stack with the popl, we have to do it all over again.  */
 -
--#endif /* !CONFIG_DCACHE_DISABLED */
+-#define do_cfa_expr(offset)						\
+-	.byte 0x0f;			/* DW_CFA_def_cfa_expression */	\
+-	.uleb128 1f-0f;			/*   length */			\
+-0:	.byte 0x74;			/*     DW_OP_breg4 */		\
+-	.sleb128 offset;		/*      offset */		\
+-	.byte 0x06;			/*     DW_OP_deref */		\
+-1:
 -
--/****************************************************************************/
+-#define do_expr(regno, offset)						\
+-	.byte 0x10;			/* DW_CFA_expression */		\
+-	.uleb128 regno;			/*   regno */			\
+-	.uleb128 1f-0f;			/*   length */			\
+-0:	.byte 0x74;			/*     DW_OP_breg4 */		\
+-	.sleb128 offset;		/*       offset */		\
+-1:
 -
--void flush_dcache_page(struct page *page)
--{
--	sh64_dcache_purge_phy_page(page_to_phys(page));
--	wmb();
--}
+-	do_cfa_expr(IA32_SIGCONTEXT_esp+4)
+-	do_expr(0, IA32_SIGCONTEXT_eax+4)
+-	do_expr(1, IA32_SIGCONTEXT_ecx+4)
+-	do_expr(2, IA32_SIGCONTEXT_edx+4)
+-	do_expr(3, IA32_SIGCONTEXT_ebx+4)
+-	do_expr(5, IA32_SIGCONTEXT_ebp+4)
+-	do_expr(6, IA32_SIGCONTEXT_esi+4)
+-	do_expr(7, IA32_SIGCONTEXT_edi+4)
+-	do_expr(8, IA32_SIGCONTEXT_eip+4)
 -
--/****************************************************************************/
+-	.byte 0x42	/* DW_CFA_advance_loc 2 -- nop; popl eax. */
 -
--void flush_icache_range(unsigned long start, unsigned long end)
--{
--	/* Flush the range [start,end] of kernel virtual adddress space from
--	   the I-cache.  The corresponding range must be purged from the
--	   D-cache also because the SH-5 doesn't have cache snooping between
--	   the caches.  The addresses will be visible through the superpage
--	   mapping, therefore it's guaranteed that there no cache entries for
--	   the range in cache sets of the wrong colour.
+-	do_cfa_expr(IA32_SIGCONTEXT_esp)
+-	do_expr(0, IA32_SIGCONTEXT_eax)
+-	do_expr(1, IA32_SIGCONTEXT_ecx)
+-	do_expr(2, IA32_SIGCONTEXT_edx)
+-	do_expr(3, IA32_SIGCONTEXT_ebx)
+-	do_expr(5, IA32_SIGCONTEXT_ebp)
+-	do_expr(6, IA32_SIGCONTEXT_esi)
+-	do_expr(7, IA32_SIGCONTEXT_edi)
+-	do_expr(8, IA32_SIGCONTEXT_eip)
 -
--	   Primarily used for cohering the I-cache after a module has
--	   been loaded.  */
+-	.align 4
+-.LENDFDE2:
 -
--	/* We also make sure to purge the same range from the D-cache since
--	   flush_page_to_ram() won't be doing this for us! */
+-	.long .LENDFDE3-.LSTARTFDE3	/* Length FDE */
+-.LSTARTFDE3:
+-	.long .LSTARTFDE3-.LSTARTFRAMES	/* CIE pointer */
+-	/* HACK: See above wrt unwind library assumptions.  */
+-	.long .LSTART_rt_sigreturn-1-.	/* PC-relative start address */
+-	.long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
+-	.uleb128 0			/* Augmentation */
+-	/* What follows are the instructions for the table generation.
+-	   We record the locations of each register saved.  This is
+-	   slightly less complicated than the above, since we don't
+-	   modify the stack pointer in the process.  */
 -
--	sh64_dcache_purge_kernel_range(start, end);
--	wmb();
--	sh64_icache_inv_kernel_range(start, end);
--}
+-	do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_esp)
+-	do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_eax)
+-	do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ecx)
+-	do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_edx)
+-	do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ebx)
+-	do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ebp)
+-	do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_esi)
+-	do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_edi)
+-	do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_eip)
 -
--/****************************************************************************/
+-	.align 4
+-.LENDFDE3:
 -
--void flush_icache_user_range(struct vm_area_struct *vma,
--			struct page *page, unsigned long addr, int len)
--{
--	/* Flush the range of user (defined by vma->vm_mm) address space
--	   starting at 'addr' for 'len' bytes from the cache.  The range does
--	   not straddle a page boundary, the unique physical page containing
--	   the range is 'page'.  This seems to be used mainly for invalidating
--	   an address range following a poke into the program text through the
--	   ptrace() call from another process (e.g. for BRK instruction
--	   insertion). */
+-#include "../../x86/kernel/vsyscall-note_32.S"
 -
--	sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
--	mb();
+diff --git a/arch/x86/ia32/vsyscall-syscall.S b/arch/x86/ia32/vsyscall-syscall.S
+deleted file mode 100644
+index cf9ef67..0000000
+--- a/arch/x86/ia32/vsyscall-syscall.S
++++ /dev/null
+@@ -1,69 +0,0 @@
+-/*
+- * Code for the vsyscall page.  This version uses the syscall instruction.
+- */
 -
--	if (vma->vm_flags & VM_EXEC) {
--		sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
--	}
--}
+-#include <asm/ia32_unistd.h>
+-#include <asm/asm-offsets.h>
+-#include <asm/segment.h>
 -
--/*##########################################################################
--			ARCH/SH64 PRIVATE CALLABLE API.
--  ##########################################################################*/
+-	.code32
+-	.text
+-	.section .text.vsyscall,"ax"
+-	.globl __kernel_vsyscall
+-	.type __kernel_vsyscall, at function
+-__kernel_vsyscall:
+-.LSTART_vsyscall:
+-	push	%ebp
+-.Lpush_ebp:
+-	movl	%ecx, %ebp
+-	syscall
+-	movl	$__USER32_DS, %ecx
+-	movl	%ecx, %ss
+-	movl	%ebp, %ecx
+-	popl	%ebp
+-.Lpop_ebp:
+-	ret
+-.LEND_vsyscall:
+-	.size __kernel_vsyscall,.-.LSTART_vsyscall
 -
--void flush_cache_sigtramp(unsigned long start, unsigned long end)
--{
--	/* For the address range [start,end), write back the data from the
--	   D-cache and invalidate the corresponding region of the I-cache for
--	   the current process.  Used to flush signal trampolines on the stack
--	   to make them executable. */
+-	.section .eh_frame,"a", at progbits
+-.LSTARTFRAME:
+-	.long .LENDCIE-.LSTARTCIE
+-.LSTARTCIE:
+-	.long 0			/* CIE ID */
+-	.byte 1			/* Version number */
+-	.string "zR"		/* NUL-terminated augmentation string */
+-	.uleb128 1		/* Code alignment factor */
+-	.sleb128 -4		/* Data alignment factor */
+-	.byte 8			/* Return address register column */
+-	.uleb128 1		/* Augmentation value length */
+-	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+-	.byte 0x0c		/* DW_CFA_def_cfa */
+-	.uleb128 4
+-	.uleb128 4
+-	.byte 0x88		/* DW_CFA_offset, column 0x8 */
+-	.uleb128 1
+-	.align 4
+-.LENDCIE:
 -
--	sh64_dcache_wback_current_user_range(start, end);
--	wmb();
--	sh64_icache_inv_current_user_range(start, end);
--}
+-	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
+-.LSTARTFDE1:
+-	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
+-	.long .LSTART_vsyscall-.	/* PC-relative start address */
+-	.long .LEND_vsyscall-.LSTART_vsyscall
+-	.uleb128 0			/* Augmentation length */
+-	/* What follows are the instructions for the table generation.
+-	   We have to record all changes of the stack pointer.  */
+-	.byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.uleb128 8
+-	.byte 0x85, 0x02	/* DW_CFA_offset %ebp -8 */
+-	.byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
+-	.byte 0xc5		/* DW_CFA_restore %ebp */
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.uleb128 4
+-	.align 4
+-.LENDFDE1:
 -
-diff --git a/arch/sh64/mm/consistent.c b/arch/sh64/mm/consistent.c
+-#define SYSCALL_ENTER_KERNEL	syscall
+-#include "vsyscall-sigreturn.S"
+diff --git a/arch/x86/ia32/vsyscall-sysenter.S b/arch/x86/ia32/vsyscall-sysenter.S
 deleted file mode 100644
-index c439620..0000000
---- a/arch/sh64/mm/consistent.c
+index ae056e5..0000000
+--- a/arch/x86/ia32/vsyscall-sysenter.S
 +++ /dev/null
-@@ -1,53 +0,0 @@
+@@ -1,95 +0,0 @@
 -/*
-- * Copyright (C) 2001 David J. Mckay (david.mckay at st.com)
-- * Copyright (C) 2003 Paul Mundt (lethal at linux-sh.org)
-- *
-- * May be copied or modified under the terms of the GNU General Public
-- * License.  See linux/COPYING for more information.
-- *
-- * Dynamic DMA mapping support.
+- * Code for the vsyscall page.  This version uses the sysenter instruction.
 - */
--#include <linux/types.h>
--#include <linux/mm.h>
--#include <linux/string.h>
--#include <linux/pci.h>
--#include <linux/dma-mapping.h>
--#include <linux/module.h>
--#include <asm/io.h>
--
--void *consistent_alloc(struct pci_dev *hwdev, size_t size,
--			   dma_addr_t *dma_handle)
--{
--	void *ret;
--	int gfp = GFP_ATOMIC;
--        void *vp;
--
--	if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
--		gfp |= GFP_DMA;
--
--	ret = (void *)__get_free_pages(gfp, get_order(size));
 -
--	/* now call our friend ioremap_nocache to give us an uncached area */
--        vp = ioremap_nocache(virt_to_phys(ret), size);
--
--	if (vp != NULL) {
--		memset(vp, 0, size);
--		*dma_handle = virt_to_phys(ret);
--		dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
--	}
+-#include <asm/ia32_unistd.h>
+-#include <asm/asm-offsets.h>
 -
--	return vp;
--}
--EXPORT_SYMBOL(consistent_alloc);
+-	.code32
+-	.text
+-	.section .text.vsyscall,"ax"
+-	.globl __kernel_vsyscall
+-	.type __kernel_vsyscall, at function
+-__kernel_vsyscall:
+-.LSTART_vsyscall:
+-	push	%ecx
+-.Lpush_ecx:
+-	push	%edx
+-.Lpush_edx:
+-	push	%ebp
+-.Lenter_kernel:
+-	movl	%esp,%ebp
+-	sysenter
+-	.space 7,0x90
+-	jmp	.Lenter_kernel
+-	/* 16: System call normal return point is here! */
+-	pop	%ebp
+-.Lpop_ebp:
+-	pop	%edx
+-.Lpop_edx:
+-	pop	%ecx
+-.Lpop_ecx:
+-	ret
+-.LEND_vsyscall:
+-	.size __kernel_vsyscall,.-.LSTART_vsyscall
 -
--void consistent_free(struct pci_dev *hwdev, size_t size,
--			 void *vaddr, dma_addr_t dma_handle)
--{
--	void *alloc;
+-	.section .eh_frame,"a", at progbits
+-.LSTARTFRAME:
+-	.long .LENDCIE-.LSTARTCIE
+-.LSTARTCIE:
+-	.long 0			/* CIE ID */
+-	.byte 1			/* Version number */
+-	.string "zR"		/* NUL-terminated augmentation string */
+-	.uleb128 1		/* Code alignment factor */
+-	.sleb128 -4		/* Data alignment factor */
+-	.byte 8			/* Return address register column */
+-	.uleb128 1		/* Augmentation value length */
+-	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+-	.byte 0x0c		/* DW_CFA_def_cfa */
+-	.uleb128 4
+-	.uleb128 4
+-	.byte 0x88		/* DW_CFA_offset, column 0x8 */
+-	.uleb128 1
+-	.align 4
+-.LENDCIE:
 -
--	alloc = phys_to_virt((unsigned long)dma_handle);
--	free_pages((unsigned long)alloc, get_order(size));
+-	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
+-.LSTARTFDE1:
+-	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
+-	.long .LSTART_vsyscall-.	/* PC-relative start address */
+-	.long .LEND_vsyscall-.LSTART_vsyscall
+-	.uleb128 0			/* Augmentation length */
+-	/* What follows are the instructions for the table generation.
+-	   We have to record all changes of the stack pointer.  */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpush_ecx-.LSTART_vsyscall
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x08		/* RA at offset 8 now */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpush_edx-.Lpush_ecx
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x0c		/* RA at offset 12 now */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lenter_kernel-.Lpush_edx
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x10		/* RA at offset 16 now */
+-	.byte 0x85, 0x04	/* DW_CFA_offset %ebp -16 */
+-	/* Finally the epilogue.  */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpop_ebp-.Lenter_kernel
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x12		/* RA at offset 12 now */
+-	.byte 0xc5		/* DW_CFA_restore %ebp */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpop_edx-.Lpop_ebp
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x08		/* RA at offset 8 now */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpop_ecx-.Lpop_edx
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x04		/* RA at offset 4 now */
+-	.align 4
+-.LENDFDE1:
 -
--	iounmap(vaddr);
--}
--EXPORT_SYMBOL(consistent_free);
-diff --git a/arch/sh64/mm/extable.c b/arch/sh64/mm/extable.c
+-#define SYSCALL_ENTER_KERNEL	int $0x80
+-#include "vsyscall-sigreturn.S"
+diff --git a/arch/x86/ia32/vsyscall.lds b/arch/x86/ia32/vsyscall.lds
 deleted file mode 100644
-index a2e6e05..0000000
---- a/arch/sh64/mm/extable.c
+index 1dc86ff..0000000
+--- a/arch/x86/ia32/vsyscall.lds
 +++ /dev/null
 @@ -1,80 +0,0 @@
 -/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/mm/extable.c
-- *
-- * Copyright (C) 2003 Richard Curnow
-- * Copyright (C) 2003, 2004  Paul Mundt
-- *
-- * Cloned from the 2.5 SH version..
+- * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
+- * object prelinked to its virtual address. This script controls its layout.
 - */
--#include <linux/rwsem.h>
--#include <linux/module.h>
--#include <asm/uaccess.h>
--
--extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
--extern void __copy_user_fixup(void);
--
--static const struct exception_table_entry __copy_user_fixup_ex = {
--	.fixup = (unsigned long)&__copy_user_fixup,
--};
 -
--/* Some functions that may trap due to a bad user-mode address have too many loads
--   and stores in them to make it at all practical to label each one and put them all in
--   the main exception table.
+-/* This must match <asm/fixmap.h>.  */
+-VSYSCALL_BASE = 0xffffe000;
 -
--   In particular, the fast memcpy routine is like this.  It's fix-up is just to fall back
--   to a slow byte-at-a-time copy, which is handled the conventional way.  So it's functionally
--   OK to just handle any trap occurring in the fast memcpy with that fixup. */
--static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
+-SECTIONS
 -{
--	if ((addr >= (unsigned long)&copy_user_memcpy) &&
--	    (addr <= (unsigned long)&copy_user_memcpy_end))
--		return &__copy_user_fixup_ex;
+-  . = VSYSCALL_BASE + SIZEOF_HEADERS;
 -
--	return NULL;
--}
+-  .hash           : { *(.hash) }		:text
+-  .gnu.hash       : { *(.gnu.hash) }
+-  .dynsym         : { *(.dynsym) }
+-  .dynstr         : { *(.dynstr) }
+-  .gnu.version    : { *(.gnu.version) }
+-  .gnu.version_d  : { *(.gnu.version_d) }
+-  .gnu.version_r  : { *(.gnu.version_r) }
 -
--/* Simple binary search */
--const struct exception_table_entry *
--search_extable(const struct exception_table_entry *first,
--		 const struct exception_table_entry *last,
--		 unsigned long value)
--{
--	const struct exception_table_entry *mid;
+-  /* This linker script is used both with -r and with -shared.
+-     For the layouts to match, we need to skip more than enough
+-     space for the dynamic symbol table et al.  If this amount
+-     is insufficient, ld -shared will barf.  Just increase it here.  */
+-  . = VSYSCALL_BASE + 0x400;
+-  
+-  .text.vsyscall   : { *(.text.vsyscall) } 	:text =0x90909090
 -
--	mid = check_exception_ranges(value);
--	if (mid)
--		return mid;
+-  /* This is an 32bit object and we cannot easily get the offsets
+-     into the 64bit kernel. Just hardcode them here. This assumes
+-     that all the stubs don't need more than 0x100 bytes. */
+-  . = VSYSCALL_BASE + 0x500;
 -
--        while (first <= last) {
--		long diff;
+-  .text.sigreturn  : { *(.text.sigreturn) }	:text =0x90909090
 -
--		mid = (last - first) / 2 + first;
--		diff = mid->insn - value;
--                if (diff == 0)
--                        return mid;
--                else if (diff < 0)
--                        first = mid+1;
--                else
--                        last = mid-1;
--        }
+-  . = VSYSCALL_BASE + 0x600;
 -
--        return NULL;
+-  .text.rtsigreturn : { *(.text.rtsigreturn) }   :text =0x90909090
+-	
+-  .note		  : { *(.note.*) }		:text :note
+-  .eh_frame_hdr   : { *(.eh_frame_hdr) }	:text :eh_frame_hdr
+-  .eh_frame       : { KEEP (*(.eh_frame)) }	:text
+-  .dynamic        : { *(.dynamic) }		:text :dynamic
+-  .useless        : {
+-  	*(.got.plt) *(.got)
+-	*(.data .data.* .gnu.linkonce.d.*)
+-	*(.dynbss)
+-	*(.bss .bss.* .gnu.linkonce.b.*)
+-  }						:text
 -}
 -
--int fixup_exception(struct pt_regs *regs)
+-/*
+- * We must supply the ELF program headers explicitly to get just one
+- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+- */
+-PHDRS
 -{
--	const struct exception_table_entry *fixup;
--
--	fixup = search_exception_tables(regs->pc);
--	if (fixup) {
--		regs->pc = fixup->fixup;
--		return 1;
--	}
--
--	return 0;
+-  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+-  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+-  note PT_NOTE FLAGS(4); /* PF_R */
+-  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
 -}
 -
-diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
-deleted file mode 100644
-index 7c79a1b..0000000
---- a/arch/sh64/mm/fault.c
-+++ /dev/null
-@@ -1,602 +0,0 @@
 -/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/mm/fault.c
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003  Richard Curnow (/proc/tlb, bug fixes)
-- * Copyright (C) 2003  Paul Mundt
-- *
+- * This controls what symbols we export from the DSO.
 - */
+-VERSION
+-{
+-  LINUX_2.5 {
+-    global:
+-    	__kernel_vsyscall;
+-    	__kernel_sigreturn;
+-    	__kernel_rt_sigreturn;
 -
--#include <linux/signal.h>
--#include <linux/rwsem.h>
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/errno.h>
--#include <linux/string.h>
--#include <linux/types.h>
--#include <linux/ptrace.h>
--#include <linux/mman.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/interrupt.h>
--
--#include <asm/system.h>
--#include <asm/io.h>
--#include <asm/tlb.h>
--#include <asm/uaccess.h>
--#include <asm/pgalloc.h>
--#include <asm/mmu_context.h>
--#include <asm/registers.h>		/* required by inline asm statements */
--
--#if defined(CONFIG_SH64_PROC_TLB)
--#include <linux/init.h>
--#include <linux/proc_fs.h>
--/* Count numbers of tlb refills in each region */
--static unsigned long long calls_to_update_mmu_cache = 0ULL;
--static unsigned long long calls_to_flush_tlb_page   = 0ULL;
--static unsigned long long calls_to_flush_tlb_range  = 0ULL;
--static unsigned long long calls_to_flush_tlb_mm     = 0ULL;
--static unsigned long long calls_to_flush_tlb_all    = 0ULL;
--unsigned long long calls_to_do_slow_page_fault = 0ULL;
--unsigned long long calls_to_do_fast_page_fault = 0ULL;
--
--/* Count size of ranges for flush_tlb_range */
--static unsigned long long flush_tlb_range_1         = 0ULL;
--static unsigned long long flush_tlb_range_2         = 0ULL;
--static unsigned long long flush_tlb_range_3_4       = 0ULL;
--static unsigned long long flush_tlb_range_5_7       = 0ULL;
--static unsigned long long flush_tlb_range_8_11      = 0ULL;
--static unsigned long long flush_tlb_range_12_15     = 0ULL;
--static unsigned long long flush_tlb_range_16_up     = 0ULL;
--
--static unsigned long long page_not_present          = 0ULL;
+-    local: *;
+-  };
+-}
 -
--#endif
+-/* The ELF entry point can be used to set the AT_SYSINFO value.  */
+-ENTRY(__kernel_vsyscall);
+diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
+index 3857334..6f81300 100644
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -1,9 +1,91 @@
+-ifeq ($(CONFIG_X86_32),y)
+-include ${srctree}/arch/x86/kernel/Makefile_32
+-else
+-include ${srctree}/arch/x86/kernel/Makefile_64
++#
++# Makefile for the linux kernel.
++#
++
++extra-y                := head_$(BITS).o init_task.o vmlinux.lds
++extra-$(CONFIG_X86_64) += head64.o
++
++CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
++CFLAGS_vsyscall_64.o := $(PROFILING) -g0
++
++obj-y			:= process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
++obj-y			+= traps_$(BITS).o irq_$(BITS).o
++obj-y			+= time_$(BITS).o ioport.o ldt.o
++obj-y			+= setup_$(BITS).o i8259_$(BITS).o
++obj-$(CONFIG_X86_32)	+= sys_i386_32.o i386_ksyms_32.o
++obj-$(CONFIG_X86_64)	+= sys_x86_64.o x8664_ksyms_64.o
++obj-$(CONFIG_X86_64)	+= syscall_64.o vsyscall_64.o setup64.o
++obj-y			+= pci-dma_$(BITS).o  bootflag.o e820_$(BITS).o
++obj-y			+= quirks.o i8237.o topology.o kdebugfs.o
++obj-y			+= alternative.o i8253.o
++obj-$(CONFIG_X86_64)	+= pci-nommu_64.o bugs_64.o
++obj-y			+= tsc_$(BITS).o io_delay.o rtc.o
++
++obj-y				+= i387.o
++obj-y				+= ptrace.o
++obj-y				+= ds.o
++obj-$(CONFIG_X86_32)		+= tls.o
++obj-$(CONFIG_IA32_EMULATION)	+= tls.o
++obj-y				+= step.o
++obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
++obj-y				+= cpu/
++obj-y				+= acpi/
++obj-$(CONFIG_X86_BIOS_REBOOT)	+= reboot.o
++obj-$(CONFIG_X86_64)		+= reboot.o
++obj-$(CONFIG_MCA)		+= mca_32.o
++obj-$(CONFIG_X86_MSR)		+= msr.o
++obj-$(CONFIG_X86_CPUID)		+= cpuid.o
++obj-$(CONFIG_MICROCODE)		+= microcode.o
++obj-$(CONFIG_PCI)		+= early-quirks.o
++obj-$(CONFIG_APM)		+= apm_32.o
++obj-$(CONFIG_X86_SMP)		+= smp_$(BITS).o smpboot_$(BITS).o tsc_sync.o
++obj-$(CONFIG_X86_32_SMP)	+= smpcommon_32.o
++obj-$(CONFIG_X86_64_SMP)	+= smp_64.o smpboot_64.o tsc_sync.o
++obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline_$(BITS).o
++obj-$(CONFIG_X86_MPPARSE)	+= mpparse_$(BITS).o
++obj-$(CONFIG_X86_LOCAL_APIC)	+= apic_$(BITS).o nmi_$(BITS).o
++obj-$(CONFIG_X86_IO_APIC)	+= io_apic_$(BITS).o
++obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups_32.o
++obj-$(CONFIG_KEXEC)		+= machine_kexec_$(BITS).o
++obj-$(CONFIG_KEXEC)		+= relocate_kernel_$(BITS).o crash.o
++obj-$(CONFIG_CRASH_DUMP)	+= crash_dump_$(BITS).o
++obj-$(CONFIG_X86_NUMAQ)		+= numaq_32.o
++obj-$(CONFIG_X86_SUMMIT_NUMA)	+= summit_32.o
++obj-$(CONFIG_X86_VSMP)		+= vsmp_64.o
++obj-$(CONFIG_KPROBES)		+= kprobes.o
++obj-$(CONFIG_MODULES)		+= module_$(BITS).o
++obj-$(CONFIG_ACPI_SRAT) 	+= srat_32.o
++obj-$(CONFIG_EFI) 		+= efi.o efi_$(BITS).o efi_stub_$(BITS).o
++obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault_32.o
++obj-$(CONFIG_VM86)		+= vm86_32.o
++obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
++
++obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
++
++obj-$(CONFIG_K8_NB)		+= k8.o
++obj-$(CONFIG_MGEODE_LX)		+= geode_32.o mfgpt_32.o
++obj-$(CONFIG_DEBUG_RODATA_TEST)	+= test_rodata.o
++obj-$(CONFIG_DEBUG_NX_TEST)	+= test_nx.o
++
++obj-$(CONFIG_VMI)		+= vmi_32.o vmiclock_32.o
++obj-$(CONFIG_PARAVIRT)		+= paravirt.o paravirt_patch_$(BITS).o
++
++ifdef CONFIG_INPUT_PCSPKR
++obj-y				+= pcspeaker.o
+ endif
+ 
+-# Workaround to delete .lds files with make clean
+-# The problem is that we do not enter Makefile_32 with make clean.
+-clean-files := vsyscall*.lds vsyscall*.so
++obj-$(CONFIG_SCx200)		+= scx200_32.o
++
++###
++# 64 bit specific files
++ifeq ($(CONFIG_X86_64),y)
++        obj-y				+= genapic_64.o genapic_flat_64.o
++        obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer_64.o
++        obj-$(CONFIG_AUDIT)		+= audit_64.o
++        obj-$(CONFIG_PM)		+= suspend_64.o
++        obj-$(CONFIG_HIBERNATION)	+= suspend_asm_64.o
++
++        obj-$(CONFIG_GART_IOMMU)	+= pci-gart_64.o aperture_64.o
++        obj-$(CONFIG_CALGARY_IOMMU)	+= pci-calgary_64.o tce_64.o
++        obj-$(CONFIG_SWIOTLB)		+= pci-swiotlb_64.o
++endif
+diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
+deleted file mode 100644
+index a7bc93c..0000000
+--- a/arch/x86/kernel/Makefile_32
++++ /dev/null
+@@ -1,88 +0,0 @@
+-#
+-# Makefile for the linux kernel.
+-#
 -
--extern void die(const char *,struct pt_regs *,long);
+-extra-y := head_32.o init_task.o vmlinux.lds
+-CPPFLAGS_vmlinux.lds += -Ui386
 -
--#define PFLAG(val,flag)   (( (val) & (flag) ) ? #flag : "" )
--#define PPROT(flag) PFLAG(pgprot_val(prot),flag)
+-obj-y	:= process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
+-		ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
+-		pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\
+-		quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o
 -
--static inline void print_prots(pgprot_t prot)
--{
--	printk("prot is 0x%08lx\n",pgprot_val(prot));
+-obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
+-obj-y				+= cpu/
+-obj-y				+= acpi/
+-obj-$(CONFIG_X86_BIOS_REBOOT)	+= reboot_32.o
+-obj-$(CONFIG_MCA)		+= mca_32.o
+-obj-$(CONFIG_X86_MSR)		+= msr.o
+-obj-$(CONFIG_X86_CPUID)		+= cpuid.o
+-obj-$(CONFIG_MICROCODE)		+= microcode.o
+-obj-$(CONFIG_PCI)		+= early-quirks.o
+-obj-$(CONFIG_APM)		+= apm_32.o
+-obj-$(CONFIG_X86_SMP)		+= smp_32.o smpboot_32.o tsc_sync.o
+-obj-$(CONFIG_SMP)		+= smpcommon_32.o
+-obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline_32.o
+-obj-$(CONFIG_X86_MPPARSE)	+= mpparse_32.o
+-obj-$(CONFIG_X86_LOCAL_APIC)	+= apic_32.o nmi_32.o
+-obj-$(CONFIG_X86_IO_APIC)	+= io_apic_32.o
+-obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups_32.o
+-obj-$(CONFIG_KEXEC)		+= machine_kexec_32.o relocate_kernel_32.o crash.o
+-obj-$(CONFIG_CRASH_DUMP)	+= crash_dump_32.o
+-obj-$(CONFIG_X86_NUMAQ)		+= numaq_32.o
+-obj-$(CONFIG_X86_SUMMIT_NUMA)	+= summit_32.o
+-obj-$(CONFIG_KPROBES)		+= kprobes_32.o
+-obj-$(CONFIG_MODULES)		+= module_32.o
+-obj-y				+= sysenter_32.o vsyscall_32.o
+-obj-$(CONFIG_ACPI_SRAT) 	+= srat_32.o
+-obj-$(CONFIG_EFI) 		+= efi_32.o efi_stub_32.o
+-obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault_32.o
+-obj-$(CONFIG_VM86)		+= vm86_32.o
+-obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+-obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
+-obj-$(CONFIG_K8_NB)		+= k8.o
+-obj-$(CONFIG_MGEODE_LX)		+= geode_32.o mfgpt_32.o
 -
--	printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
--	       PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
--}
+-obj-$(CONFIG_VMI)		+= vmi_32.o vmiclock_32.o
+-obj-$(CONFIG_PARAVIRT)		+= paravirt_32.o
+-obj-y				+= pcspeaker.o
 -
--static inline void print_vma(struct vm_area_struct *vma)
--{
--	printk("vma start 0x%08lx\n", vma->vm_start);
--	printk("vma end   0x%08lx\n", vma->vm_end);
+-obj-$(CONFIG_SCx200)		+= scx200_32.o
 -
--	print_prots(vma->vm_page_prot);
--	printk("vm_flags 0x%08lx\n", vma->vm_flags);
--}
+-# vsyscall_32.o contains the vsyscall DSO images as __initdata.
+-# We must build both images before we can assemble it.
+-# Note: kbuild does not track this dependency due to usage of .incbin
+-$(obj)/vsyscall_32.o: $(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so
+-targets += $(foreach F,int80 sysenter,vsyscall-$F_32.o vsyscall-$F_32.so)
+-targets += vsyscall-note_32.o vsyscall_32.lds
 -
--static inline void print_task(struct task_struct *tsk)
--{
--	printk("Task pid %d\n", task_pid_nr(tsk));
--}
+-# The DSO images are built using a special linker script.
+-quiet_cmd_syscall = SYSCALL $@
+-      cmd_syscall = $(CC) -m elf_i386 -nostdlib $(SYSCFLAGS_$(@F)) \
+-		          -Wl,-T,$(filter-out FORCE,$^) -o $@
 -
--static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
--{
--	pgd_t *dir;
--	pmd_t *pmd;
--	pte_t *pte;
--	pte_t entry;
+-export CPPFLAGS_vsyscall_32.lds += -P -C -Ui386
 -
--	dir = pgd_offset(mm, address);
--	if (pgd_none(*dir)) {
--		return NULL;
--	}
+-vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
+-		 $(call ld-option, -Wl$(comma)--hash-style=sysv)
+-SYSCFLAGS_vsyscall-sysenter_32.so	= $(vsyscall-flags)
+-SYSCFLAGS_vsyscall-int80_32.so	= $(vsyscall-flags)
 -
--	pmd = pmd_offset(dir, address);
--	if (pmd_none(*pmd)) {
--		return NULL;
--	}
+-$(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so: \
+-$(obj)/vsyscall-%.so: $(src)/vsyscall_32.lds \
+-		      $(obj)/vsyscall-%.o $(obj)/vsyscall-note_32.o FORCE
+-	$(call if_changed,syscall)
 -
--	pte = pte_offset_kernel(pmd, address);
--	entry = *pte;
+-# We also create a special relocatable object that should mirror the symbol
+-# table and layout of the linked DSO.  With ld -R we can then refer to
+-# these symbols in the kernel code rather than hand-coded addresses.
+-extra-y += vsyscall-syms.o
+-$(obj)/built-in.o: $(obj)/vsyscall-syms.o
+-$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
 -
--	if (pte_none(entry)) {
--		return NULL;
--	}
--	if (!pte_present(entry)) {
--		return NULL;
--	}
+-SYSCFLAGS_vsyscall-syms.o = -r
+-$(obj)/vsyscall-syms.o: $(src)/vsyscall_32.lds \
+-			$(obj)/vsyscall-sysenter_32.o $(obj)/vsyscall-note_32.o FORCE
+-	$(call if_changed,syscall)
 -
--	return pte;
--}
 -
--/*
-- * This routine handles page faults.  It determines the address,
-- * and the problem, and then passes it off to one of the appropriate
-- * routines.
-- */
--asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
--			      unsigned long textaccess, unsigned long address)
--{
--	struct task_struct *tsk;
--	struct mm_struct *mm;
--	struct vm_area_struct * vma;
--	const struct exception_table_entry *fixup;
--	pte_t *pte;
--	int fault;
+diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
+deleted file mode 100644
+index 5a88890..0000000
+--- a/arch/x86/kernel/Makefile_64
++++ /dev/null
+@@ -1,45 +0,0 @@
+-#
+-# Makefile for the linux kernel.
+-#
 -
--#if defined(CONFIG_SH64_PROC_TLB)
--        ++calls_to_do_slow_page_fault;
--#endif
+-extra-y 	:= head_64.o head64.o init_task.o vmlinux.lds
+-CPPFLAGS_vmlinux.lds += -Ux86_64
+-EXTRA_AFLAGS	:= -traditional
 -
--	/* SIM
--	 * Note this is now called with interrupts still disabled
--	 * This is to cope with being called for a missing IO port
--	 * address with interrupts disabled. This should be fixed as
--	 * soon as we have a better 'fast path' miss handler.
--	 *
--	 * Plus take care how you try and debug this stuff.
--	 * For example, writing debug data to a port which you
--	 * have just faulted on is not going to work.
--	 */
+-obj-y	:= process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
+-		ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
+-		x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \
+-		setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
+-		pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
+-		i8253.o
 -
--	tsk = current;
--	mm = tsk->mm;
+-obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
+-obj-y				+= cpu/
+-obj-y				+= acpi/
+-obj-$(CONFIG_X86_MSR)		+= msr.o
+-obj-$(CONFIG_MICROCODE)		+= microcode.o
+-obj-$(CONFIG_X86_CPUID)		+= cpuid.o
+-obj-$(CONFIG_SMP)		+= smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o
+-obj-y				+= apic_64.o  nmi_64.o
+-obj-y				+= io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o
+-obj-$(CONFIG_KEXEC)		+= machine_kexec_64.o relocate_kernel_64.o crash.o
+-obj-$(CONFIG_CRASH_DUMP)	+= crash_dump_64.o
+-obj-$(CONFIG_PM)		+= suspend_64.o
+-obj-$(CONFIG_HIBERNATION)	+= suspend_asm_64.o
+-obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+-obj-$(CONFIG_GART_IOMMU)	+= pci-gart_64.o aperture_64.o
+-obj-$(CONFIG_CALGARY_IOMMU)	+= pci-calgary_64.o tce_64.o
+-obj-$(CONFIG_SWIOTLB)		+= pci-swiotlb_64.o
+-obj-$(CONFIG_KPROBES)		+= kprobes_64.o
+-obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer_64.o
+-obj-$(CONFIG_X86_VSMP)		+= vsmp_64.o
+-obj-$(CONFIG_K8_NB)		+= k8.o
+-obj-$(CONFIG_AUDIT)		+= audit_64.o
 -
--	/* Not an IO address, so reenable interrupts */
--	local_irq_enable();
+-obj-$(CONFIG_MODULES)		+= module_64.o
+-obj-$(CONFIG_PCI)		+= early-quirks.o
 -
--	/*
--	 * If we're in an interrupt or have no user
--	 * context, we must not take the fault..
--	 */
--	if (in_atomic() || !mm)
--		goto no_context;
+-obj-y				+= topology.o
+-obj-y				+= pcspeaker.o
 -
--	/* TLB misses upon some cache flushes get done under cli() */
--	down_read(&mm->mmap_sem);
+-CFLAGS_vsyscall_64.o		:= $(PROFILING) -g0
+diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
+index 1351c39..19d3d6e 100644
+--- a/arch/x86/kernel/acpi/Makefile
++++ b/arch/x86/kernel/acpi/Makefile
+@@ -1,5 +1,5 @@
+ obj-$(CONFIG_ACPI)		+= boot.o
+-obj-$(CONFIG_ACPI_SLEEP)	+= sleep_$(BITS).o wakeup_$(BITS).o
++obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_$(BITS).o
+ 
+ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y				+= cstate.o processor.o
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+new file mode 100644
+index 0000000..6bc815c
+--- /dev/null
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -0,0 +1,87 @@
++/*
++ * sleep.c - x86-specific ACPI sleep support.
++ *
++ *  Copyright (C) 2001-2003 Patrick Mochel
++ *  Copyright (C) 2001-2003 Pavel Machek <pavel at suse.cz>
++ */
++
++#include <linux/acpi.h>
++#include <linux/bootmem.h>
++#include <linux/dmi.h>
++#include <linux/cpumask.h>
++
++#include <asm/smp.h>
++
++/* address in low memory of the wakeup routine. */
++unsigned long acpi_wakeup_address = 0;
++unsigned long acpi_realmode_flags;
++extern char wakeup_start, wakeup_end;
++
++extern unsigned long acpi_copy_wakeup_routine(unsigned long);
++
++/**
++ * acpi_save_state_mem - save kernel state
++ *
++ * Create an identity mapped page table and copy the wakeup routine to
++ * low memory.
++ */
++int acpi_save_state_mem(void)
++{
++	if (!acpi_wakeup_address) {
++		printk(KERN_ERR "Could not allocate memory during boot, S3 disabled\n");
++		return -ENOMEM;
++	}
++	memcpy((void *)acpi_wakeup_address, &wakeup_start,
++	       &wakeup_end - &wakeup_start);
++	acpi_copy_wakeup_routine(acpi_wakeup_address);
++
++	return 0;
++}
++
++/*
++ * acpi_restore_state - undo effects of acpi_save_state_mem
++ */
++void acpi_restore_state_mem(void)
++{
++}
++
++
++/**
++ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
++ *
++ * We allocate a page from the first 1MB of memory for the wakeup
++ * routine for when we come back from a sleep state. The
++ * runtime allocator allows specification of <16MB pages, but not
++ * <1MB pages.
++ */
++void __init acpi_reserve_bootmem(void)
++{
++	if ((&wakeup_end - &wakeup_start) > PAGE_SIZE*2) {
++		printk(KERN_ERR
++		       "ACPI: Wakeup code way too big, S3 disabled.\n");
++		return;
++	}
++
++	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2);
++	if (!acpi_wakeup_address)
++		printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
++}
++
++
++static int __init acpi_sleep_setup(char *str)
++{
++	while ((str != NULL) && (*str != '\0')) {
++		if (strncmp(str, "s3_bios", 7) == 0)
++			acpi_realmode_flags |= 1;
++		if (strncmp(str, "s3_mode", 7) == 0)
++			acpi_realmode_flags |= 2;
++		if (strncmp(str, "s3_beep", 7) == 0)
++			acpi_realmode_flags |= 4;
++		str = strchr(str, ',');
++		if (str != NULL)
++			str += strspn(str, ", \t");
++	}
++	return 1;
++}
++
++__setup("acpi_sleep=", acpi_sleep_setup);
+diff --git a/arch/x86/kernel/acpi/sleep_32.c b/arch/x86/kernel/acpi/sleep_32.c
+index 1069948..63fe552 100644
+--- a/arch/x86/kernel/acpi/sleep_32.c
++++ b/arch/x86/kernel/acpi/sleep_32.c
+@@ -12,76 +12,6 @@
+ 
+ #include <asm/smp.h>
+ 
+-/* address in low memory of the wakeup routine. */
+-unsigned long acpi_wakeup_address = 0;
+-unsigned long acpi_realmode_flags;
+-extern char wakeup_start, wakeup_end;
 -
--	vma = find_vma(mm, address);
+-extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
 -
--	if (!vma) {
--#ifdef DEBUG_FAULT
--		print_task(tsk);
--		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
--		       __FUNCTION__,__LINE__,
--		       address,regs->pc,textaccess,writeaccess);
--		show_regs(regs);
--#endif
--		goto bad_area;
--	}
--	if (vma->vm_start <= address) {
--		goto good_area;
--	}
+-/**
+- * acpi_save_state_mem - save kernel state
+- *
+- * Create an identity mapped page table and copy the wakeup routine to
+- * low memory.
+- */
+-int acpi_save_state_mem(void)
+-{
+-	if (!acpi_wakeup_address)
+-		return 1;
+-	memcpy((void *)acpi_wakeup_address, &wakeup_start,
+-	       &wakeup_end - &wakeup_start);
+-	acpi_copy_wakeup_routine(acpi_wakeup_address);
 -
--	if (!(vma->vm_flags & VM_GROWSDOWN)) {
--#ifdef DEBUG_FAULT
--		print_task(tsk);
--		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
--		       __FUNCTION__,__LINE__,
--		       address,regs->pc,textaccess,writeaccess);
--		show_regs(regs);
+-	return 0;
+-}
 -
--		print_vma(vma);
--#endif
--		goto bad_area;
--	}
--	if (expand_stack(vma, address)) {
--#ifdef DEBUG_FAULT
--		print_task(tsk);
--		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
--		       __FUNCTION__,__LINE__,
--		       address,regs->pc,textaccess,writeaccess);
--		show_regs(regs);
--#endif
--		goto bad_area;
--	}
 -/*
-- * Ok, we have a good vm_area for this memory access, so
-- * we can handle it..
+- * acpi_restore_state - undo effects of acpi_save_state_mem
 - */
--good_area:
--	if (textaccess) {
--		if (!(vma->vm_flags & VM_EXEC))
--			goto bad_area;
--	} else {
--		if (writeaccess) {
--			if (!(vma->vm_flags & VM_WRITE))
--				goto bad_area;
--		} else {
--			if (!(vma->vm_flags & VM_READ))
--				goto bad_area;
--		}
--	}
--
--	/*
--	 * If for any reason at all we couldn't handle the fault,
--	 * make sure we exit gracefully rather than endlessly redo
--	 * the fault.
--	 */
--survive:
--	fault = handle_mm_fault(mm, vma, address, writeaccess);
--	if (unlikely(fault & VM_FAULT_ERROR)) {
--		if (fault & VM_FAULT_OOM)
--			goto out_of_memory;
--		else if (fault & VM_FAULT_SIGBUS)
--			goto do_sigbus;
--		BUG();
--	}
--	if (fault & VM_FAULT_MAJOR)
--		tsk->maj_flt++;
--	else
--		tsk->min_flt++;
+-void acpi_restore_state_mem(void)
+-{
+-}
 -
--	/* If we get here, the page fault has been handled.  Do the TLB refill
--	   now from the newly-setup PTE, to avoid having to fault again right
--	   away on the same instruction. */
--	pte = lookup_pte (mm, address);
--	if (!pte) {
--		/* From empirical evidence, we can get here, due to
--		   !pte_present(pte).  (e.g. if a swap-in occurs, and the page
--		   is swapped back out again before the process that wanted it
--		   gets rescheduled?) */
--		goto no_pte;
+-/**
+- * acpi_reserve_bootmem - do _very_ early ACPI initialisation
+- *
+- * We allocate a page from the first 1MB of memory for the wakeup
+- * routine for when we come back from a sleep state. The
+- * runtime allocator allows specification of <16MB pages, but not
+- * <1MB pages.
+- */
+-void __init acpi_reserve_bootmem(void)
+-{
+-	if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) {
+-		printk(KERN_ERR
+-		       "ACPI: Wakeup code way too big, S3 disabled.\n");
+-		return;
 -	}
 -
--	__do_tlb_refill(address, textaccess, pte);
+-	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
+-	if (!acpi_wakeup_address)
+-		printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
+-}
 -
--no_pte:
+-static int __init acpi_sleep_setup(char *str)
+-{
+-	while ((str != NULL) && (*str != '\0')) {
+-		if (strncmp(str, "s3_bios", 7) == 0)
+-			acpi_realmode_flags |= 1;
+-		if (strncmp(str, "s3_mode", 7) == 0)
+-			acpi_realmode_flags |= 2;
+-		if (strncmp(str, "s3_beep", 7) == 0)
+-			acpi_realmode_flags |= 4;
+-		str = strchr(str, ',');
+-		if (str != NULL)
+-			str += strspn(str, ", \t");
+-	}
+-	return 1;
+-}
 -
--	up_read(&mm->mmap_sem);
--	return;
+-__setup("acpi_sleep=", acpi_sleep_setup);
 -
+ /* Ouch, we want to delete this. We already have better version in userspace, in
+    s2ram from suspend.sf.net project */
+ static __init int reset_videomode_after_s3(const struct dmi_system_id *d)
+diff --git a/arch/x86/kernel/acpi/sleep_64.c b/arch/x86/kernel/acpi/sleep_64.c
+deleted file mode 100644
+index da42de2..0000000
+--- a/arch/x86/kernel/acpi/sleep_64.c
++++ /dev/null
+@@ -1,117 +0,0 @@
 -/*
-- * Something tried to access memory that isn't in our memory map..
-- * Fix it, but check if it's kernel or user first..
+- *  acpi.c - Architecture-Specific Low-Level ACPI Support
+- *
+- *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh at intel.com>
+- *  Copyright (C) 2001 Jun Nakajima <jun.nakajima at intel.com>
+- *  Copyright (C) 2001 Patrick Mochel <mochel at osdl.org>
+- *  Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
+- *  Copyright (C) 2003 Pavel Machek, SuSE Labs
+- *
+- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- *
+- *  This program is free software; you can redistribute it and/or modify
+- *  it under the terms of the GNU General Public License as published by
+- *  the Free Software Foundation; either version 2 of the License, or
+- *  (at your option) any later version.
+- *
+- *  This program is distributed in the hope that it will be useful,
+- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- *  GNU General Public License for more details.
+- *
+- *  You should have received a copy of the GNU General Public License
+- *  along with this program; if not, write to the Free Software
+- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+- *
+- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 - */
--bad_area:
--#ifdef DEBUG_FAULT
--	printk("fault:bad area\n");
--#endif
--	up_read(&mm->mmap_sem);
 -
--	if (user_mode(regs)) {
--		static int count=0;
--		siginfo_t info;
--		if (count < 4) {
--			/* This is really to help debug faults when starting
--			 * usermode, so only need a few */
--			count++;
--			printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
--				address, task_pid_nr(current), current->comm,
--				(unsigned long) regs->pc);
--#if 0
--			show_regs(regs);
--#endif
--		}
--		if (is_global_init(tsk)) {
--			panic("INIT had user mode bad_area\n");
--		}
--		tsk->thread.address = address;
--		tsk->thread.error_code = writeaccess;
--		info.si_signo = SIGSEGV;
--		info.si_errno = 0;
--		info.si_addr = (void *) address;
--		force_sig_info(SIGSEGV, &info, tsk);
--		return;
--	}
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/types.h>
+-#include <linux/stddef.h>
+-#include <linux/slab.h>
+-#include <linux/pci.h>
+-#include <linux/bootmem.h>
+-#include <linux/acpi.h>
+-#include <linux/cpumask.h>
 -
--no_context:
--#ifdef DEBUG_FAULT
--	printk("fault:No context\n");
--#endif
--	/* Are we prepared to handle this kernel fault?  */
--	fixup = search_exception_tables(regs->pc);
--	if (fixup) {
--		regs->pc = fixup->fixup;
--		return;
--	}
+-#include <asm/mpspec.h>
+-#include <asm/io.h>
+-#include <asm/apic.h>
+-#include <asm/apicdef.h>
+-#include <asm/page.h>
+-#include <asm/pgtable.h>
+-#include <asm/pgalloc.h>
+-#include <asm/io_apic.h>
+-#include <asm/proto.h>
+-#include <asm/tlbflush.h>
 -
--/*
-- * Oops. The kernel tried to access some bad page. We'll have to
-- * terminate things with extreme prejudice.
+-/* --------------------------------------------------------------------------
+-                              Low-Level Sleep Support
+-   -------------------------------------------------------------------------- */
+-
+-/* address in low memory of the wakeup routine. */
+-unsigned long acpi_wakeup_address = 0;
+-unsigned long acpi_realmode_flags;
+-extern char wakeup_start, wakeup_end;
+-
+-extern unsigned long acpi_copy_wakeup_routine(unsigned long);
+-
+-/**
+- * acpi_save_state_mem - save kernel state
 - *
+- * Create an identity mapped page table and copy the wakeup routine to
+- * low memory.
 - */
--	if (address < PAGE_SIZE)
--		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
--	else
--		printk(KERN_ALERT "Unable to handle kernel paging request");
--	printk(" at virtual address %08lx\n", address);
--	printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
--	die("Oops", regs, writeaccess);
--	do_exit(SIGKILL);
+-int acpi_save_state_mem(void)
+-{
+-	memcpy((void *)acpi_wakeup_address, &wakeup_start,
+-	       &wakeup_end - &wakeup_start);
+-	acpi_copy_wakeup_routine(acpi_wakeup_address);
+-
+-	return 0;
+-}
 -
 -/*
-- * We ran out of memory, or some other thing happened to us that made
-- * us unable to handle the page fault gracefully.
+- * acpi_restore_state
 - */
--out_of_memory:
--	if (is_global_init(current)) {
--		panic("INIT out of memory\n");
--		yield();
--		goto survive;
+-void acpi_restore_state_mem(void)
+-{
+-}
+-
+-/**
+- * acpi_reserve_bootmem - do _very_ early ACPI initialisation
+- *
+- * We allocate a page in low memory for the wakeup
+- * routine for when we come back from a sleep state. The
+- * runtime allocator allows specification of <16M pages, but not
+- * <1M pages.
+- */
+-void __init acpi_reserve_bootmem(void)
+-{
+-	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2);
+-	if ((&wakeup_end - &wakeup_start) > (PAGE_SIZE*2))
+-		printk(KERN_CRIT
+-		       "ACPI: Wakeup code way too big, will crash on attempt"
+-		       " to suspend\n");
+-}
+-
+-static int __init acpi_sleep_setup(char *str)
+-{
+-	while ((str != NULL) && (*str != '\0')) {
+-		if (strncmp(str, "s3_bios", 7) == 0)
+-			acpi_realmode_flags |= 1;
+-		if (strncmp(str, "s3_mode", 7) == 0)
+-			acpi_realmode_flags |= 2;
+-		if (strncmp(str, "s3_beep", 7) == 0)
+-			acpi_realmode_flags |= 4;
+-		str = strchr(str, ',');
+-		if (str != NULL)
+-			str += strspn(str, ", \t");
 -	}
--	printk("fault:Out of memory\n");
--	up_read(&mm->mmap_sem);
--	if (is_global_init(current)) {
--		yield();
--		down_read(&mm->mmap_sem);
--		goto survive;
+-	return 1;
+-}
+-
+-__setup("acpi_sleep=", acpi_sleep_setup);
+-
+diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
+index 1e931aa..f53e327 100644
+--- a/arch/x86/kernel/acpi/wakeup_32.S
++++ b/arch/x86/kernel/acpi/wakeup_32.S
+@@ -1,4 +1,4 @@
+-.text
++	.section .text.page_aligned
+ #include <linux/linkage.h>
+ #include <asm/segment.h>
+ #include <asm/page.h>
+diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
+index 5ed3bc5..2e1b9e0 100644
+--- a/arch/x86/kernel/acpi/wakeup_64.S
++++ b/arch/x86/kernel/acpi/wakeup_64.S
+@@ -344,13 +344,13 @@ do_suspend_lowlevel:
+ 	call	save_processor_state
+ 
+ 	movq	$saved_context, %rax
+-	movq	%rsp, pt_regs_rsp(%rax)
+-	movq	%rbp, pt_regs_rbp(%rax)
+-	movq	%rsi, pt_regs_rsi(%rax)
+-	movq	%rdi, pt_regs_rdi(%rax)
+-	movq	%rbx, pt_regs_rbx(%rax)
+-	movq	%rcx, pt_regs_rcx(%rax)
+-	movq	%rdx, pt_regs_rdx(%rax)
++	movq	%rsp, pt_regs_sp(%rax)
++	movq	%rbp, pt_regs_bp(%rax)
++	movq	%rsi, pt_regs_si(%rax)
++	movq	%rdi, pt_regs_di(%rax)
++	movq	%rbx, pt_regs_bx(%rax)
++	movq	%rcx, pt_regs_cx(%rax)
++	movq	%rdx, pt_regs_dx(%rax)
+ 	movq	%r8, pt_regs_r8(%rax)
+ 	movq	%r9, pt_regs_r9(%rax)
+ 	movq	%r10, pt_regs_r10(%rax)
+@@ -360,7 +360,7 @@ do_suspend_lowlevel:
+ 	movq	%r14, pt_regs_r14(%rax)
+ 	movq	%r15, pt_regs_r15(%rax)
+ 	pushfq
+-	popq	pt_regs_eflags(%rax)
++	popq	pt_regs_flags(%rax)
+ 
+ 	movq	$.L97, saved_rip(%rip)
+ 
+@@ -391,15 +391,15 @@ do_suspend_lowlevel:
+ 	movq	%rbx, %cr2
+ 	movq	saved_context_cr0(%rax), %rbx
+ 	movq	%rbx, %cr0
+-	pushq	pt_regs_eflags(%rax)
++	pushq	pt_regs_flags(%rax)
+ 	popfq
+-	movq	pt_regs_rsp(%rax), %rsp
+-	movq	pt_regs_rbp(%rax), %rbp
+-	movq	pt_regs_rsi(%rax), %rsi
+-	movq	pt_regs_rdi(%rax), %rdi
+-	movq	pt_regs_rbx(%rax), %rbx
+-	movq	pt_regs_rcx(%rax), %rcx
+-	movq	pt_regs_rdx(%rax), %rdx
++	movq	pt_regs_sp(%rax), %rsp
++	movq	pt_regs_bp(%rax), %rbp
++	movq	pt_regs_si(%rax), %rsi
++	movq	pt_regs_di(%rax), %rdi
++	movq	pt_regs_bx(%rax), %rbx
++	movq	pt_regs_cx(%rax), %rcx
++	movq	pt_regs_dx(%rax), %rdx
+ 	movq	pt_regs_r8(%rax), %r8
+ 	movq	pt_regs_r9(%rax), %r9
+ 	movq	pt_regs_r10(%rax), %r10
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index d6405e0..45d79ea 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -273,6 +273,7 @@ struct smp_alt_module {
+ };
+ static LIST_HEAD(smp_alt_modules);
+ static DEFINE_SPINLOCK(smp_alt);
++static int smp_mode = 1;	/* protected by smp_alt */
+ 
+ void alternatives_smp_module_add(struct module *mod, char *name,
+ 				 void *locks, void *locks_end,
+@@ -341,12 +342,13 @@ void alternatives_smp_switch(int smp)
+ 
+ #ifdef CONFIG_LOCKDEP
+ 	/*
+-	 * A not yet fixed binutils section handling bug prevents
+-	 * alternatives-replacement from working reliably, so turn
+-	 * it off:
++	 * Older binutils section handling bug prevented
++	 * alternatives-replacement from working reliably.
++	 *
++	 * If this still occurs then you should see a hang
++	 * or crash shortly after this line:
+ 	 */
+-	printk("lockdep: not fixing up alternatives.\n");
+-	return;
++	printk("lockdep: fixing up alternatives.\n");
+ #endif
+ 
+ 	if (noreplace_smp || smp_alt_once)
+@@ -354,21 +356,29 @@ void alternatives_smp_switch(int smp)
+ 	BUG_ON(!smp && (num_online_cpus() > 1));
+ 
+ 	spin_lock_irqsave(&smp_alt, flags);
+-	if (smp) {
++
++	/*
++	 * Avoid unnecessary switches because it forces JIT based VMs to
++	 * throw away all cached translations, which can be quite costly.
++	 */
++	if (smp == smp_mode) {
++		/* nothing */
++	} else if (smp) {
+ 		printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
+-		clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
+-		clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
++		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
++		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
+ 		list_for_each_entry(mod, &smp_alt_modules, next)
+ 			alternatives_smp_lock(mod->locks, mod->locks_end,
+ 					      mod->text, mod->text_end);
+ 	} else {
+ 		printk(KERN_INFO "SMP alternatives: switching to UP code\n");
+-		set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
+-		set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
++		set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
++		set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
+ 		list_for_each_entry(mod, &smp_alt_modules, next)
+ 			alternatives_smp_unlock(mod->locks, mod->locks_end,
+ 						mod->text, mod->text_end);
+ 	}
++	smp_mode = smp;
+ 	spin_unlock_irqrestore(&smp_alt, flags);
+ }
+ 
+@@ -431,8 +441,9 @@ void __init alternative_instructions(void)
+ 	if (smp_alt_once) {
+ 		if (1 == num_possible_cpus()) {
+ 			printk(KERN_INFO "SMP alternatives: switching to UP code\n");
+-			set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
+-			set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
++			set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
++			set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
++
+ 			alternatives_smp_unlock(__smp_locks, __smp_locks_end,
+ 						_text, _etext);
+ 		}
+@@ -440,7 +451,10 @@ void __init alternative_instructions(void)
+ 		alternatives_smp_module_add(NULL, "core kernel",
+ 					    __smp_locks, __smp_locks_end,
+ 					    _text, _etext);
+-		alternatives_smp_switch(0);
++
++		/* Only switch to UP mode if we don't immediately boot others */
++		if (num_possible_cpus() == 1 || setup_max_cpus <= 1)
++			alternatives_smp_switch(0);
+ 	}
+ #endif
+  	apply_paravirt(__parainstructions, __parainstructions_end);
+diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
+index 5b69927..608152a 100644
+--- a/arch/x86/kernel/aperture_64.c
++++ b/arch/x86/kernel/aperture_64.c
+@@ -1,12 +1,12 @@
+-/* 
++/*
+  * Firmware replacement code.
+- * 
++ *
+  * Work around broken BIOSes that don't set an aperture or only set the
+- * aperture in the AGP bridge. 
+- * If all fails map the aperture over some low memory.  This is cheaper than 
+- * doing bounce buffering. The memory is lost. This is done at early boot 
+- * because only the bootmem allocator can allocate 32+MB. 
+- * 
++ * aperture in the AGP bridge.
++ * If all fails map the aperture over some low memory.  This is cheaper than
++ * doing bounce buffering. The memory is lost. This is done at early boot
++ * because only the bootmem allocator can allocate 32+MB.
++ *
+  * Copyright 2002 Andi Kleen, SuSE Labs.
+  */
+ #include <linux/kernel.h>
+@@ -30,7 +30,7 @@ int gart_iommu_aperture_disabled __initdata = 0;
+ int gart_iommu_aperture_allowed __initdata = 0;
+ 
+ int fallback_aper_order __initdata = 1; /* 64MB */
+-int fallback_aper_force __initdata = 0; 
++int fallback_aper_force __initdata = 0;
+ 
+ int fix_aperture __initdata = 1;
+ 
+@@ -49,167 +49,270 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
+ /* This code runs before the PCI subsystem is initialized, so just
+    access the northbridge directly. */
+ 
+-static u32 __init allocate_aperture(void) 
++static u32 __init allocate_aperture(void)
+ {
+ 	u32 aper_size;
+-	void *p; 
++	void *p;
+ 
+-	if (fallback_aper_order > 7) 
+-		fallback_aper_order = 7; 
+-	aper_size = (32 * 1024 * 1024) << fallback_aper_order; 
++	if (fallback_aper_order > 7)
++		fallback_aper_order = 7;
++	aper_size = (32 * 1024 * 1024) << fallback_aper_order;
+ 
+-	/* 
+-	 * Aperture has to be naturally aligned. This means an 2GB aperture won't
+-	 * have much chance of finding a place in the lower 4GB of memory.
+-	 * Unfortunately we cannot move it up because that would make the
+-	 * IOMMU useless.
++	/*
++	 * Aperture has to be naturally aligned. This means a 2GB aperture
++	 * won't have much chance of finding a place in the lower 4GB of
++	 * memory. Unfortunately we cannot move it up because that would
++	 * make the IOMMU useless.
+ 	 */
+ 	p = __alloc_bootmem_nopanic(aper_size, aper_size, 0);
+ 	if (!p || __pa(p)+aper_size > 0xffffffff) {
+-		printk("Cannot allocate aperture memory hole (%p,%uK)\n",
+-		       p, aper_size>>10);
++		printk(KERN_ERR
++			"Cannot allocate aperture memory hole (%p,%uK)\n",
++				p, aper_size>>10);
+ 		if (p)
+ 			free_bootmem(__pa(p), aper_size);
+ 		return 0;
+ 	}
+-	printk("Mapping aperture over %d KB of RAM @ %lx\n",
+-	       aper_size >> 10, __pa(p)); 
++	printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
++			aper_size >> 10, __pa(p));
+ 	insert_aperture_resource((u32)__pa(p), aper_size);
+-	return (u32)__pa(p); 
++
++	return (u32)__pa(p);
+ }
+ 
+ static int __init aperture_valid(u64 aper_base, u32 aper_size)
+-{ 
+-	if (!aper_base) 
+-		return 0;
+-	if (aper_size < 64*1024*1024) { 
+-		printk("Aperture too small (%d MB)\n", aper_size>>20);
++{
++	if (!aper_base)
+ 		return 0;
 -	}
--	printk("VM: killing process %s\n", tsk->comm);
--	if (user_mode(regs))
--		do_group_exit(SIGKILL);
--	goto no_context;
++
+ 	if (aper_base + aper_size > 0x100000000UL) {
+-		printk("Aperture beyond 4GB. Ignoring.\n");
+-		return 0; 
++		printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
++		return 0;
+ 	}
+ 	if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
+-		printk("Aperture pointing to e820 RAM. Ignoring.\n");
+-		return 0; 
+-	} 
++		printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
++		return 0;
++	}
++	if (aper_size < 64*1024*1024) {
++		printk(KERN_ERR "Aperture too small (%d MB)\n", aper_size>>20);
++		return 0;
++	}
++
+ 	return 1;
+-} 
++}
+ 
+ /* Find a PCI capability */
+-static __u32 __init find_cap(int num, int slot, int func, int cap) 
+-{ 
+-	u8 pos;
++static __u32 __init find_cap(int num, int slot, int func, int cap)
++{
+ 	int bytes;
+-	if (!(read_pci_config_16(num,slot,func,PCI_STATUS) & PCI_STATUS_CAP_LIST))
++	u8 pos;
++
++	if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
++						PCI_STATUS_CAP_LIST))
+ 		return 0;
+-	pos = read_pci_config_byte(num,slot,func,PCI_CAPABILITY_LIST);
+-	for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { 
++
++	pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
++	for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
+ 		u8 id;
+-		pos &= ~3; 
+-		id = read_pci_config_byte(num,slot,func,pos+PCI_CAP_LIST_ID);
++
++		pos &= ~3;
++		id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
+ 		if (id == 0xff)
+ 			break;
+-		if (id == cap) 
+-			return pos; 
+-		pos = read_pci_config_byte(num,slot,func,pos+PCI_CAP_LIST_NEXT); 
+-	} 
++		if (id == cap)
++			return pos;
++		pos = read_pci_config_byte(num, slot, func,
++						pos+PCI_CAP_LIST_NEXT);
++	}
+ 	return 0;
+-} 
++}
+ 
+ /* Read a standard AGPv3 bridge header */
+ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order)
+-{ 
++{
+ 	u32 apsize;
+ 	u32 apsizereg;
+ 	int nbits;
+ 	u32 aper_low, aper_hi;
+ 	u64 aper;
+ 
+-	printk("AGP bridge at %02x:%02x:%02x\n", num, slot, func);
+-	apsizereg = read_pci_config_16(num,slot,func, cap + 0x14);
++	printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", num, slot, func);
++	apsizereg = read_pci_config_16(num, slot, func, cap + 0x14);
+ 	if (apsizereg == 0xffffffff) {
+-		printk("APSIZE in AGP bridge unreadable\n");
++		printk(KERN_ERR "APSIZE in AGP bridge unreadable\n");
+ 		return 0;
+ 	}
+ 
+ 	apsize = apsizereg & 0xfff;
+ 	/* Some BIOS use weird encodings not in the AGPv3 table. */
+-	if (apsize & 0xff) 
+-		apsize |= 0xf00; 
++	if (apsize & 0xff)
++		apsize |= 0xf00;
+ 	nbits = hweight16(apsize);
+ 	*order = 7 - nbits;
+ 	if ((int)*order < 0) /* < 32MB */
+ 		*order = 0;
+-	
+-	aper_low = read_pci_config(num,slot,func, 0x10);
+-	aper_hi = read_pci_config(num,slot,func,0x14);
++
++	aper_low = read_pci_config(num, slot, func, 0x10);
++	aper_hi = read_pci_config(num, slot, func, 0x14);
+ 	aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
+ 
+-	printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", 
+-	       aper, 32 << *order, apsizereg);
++	printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
++			aper, 32 << *order, apsizereg);
+ 
+ 	if (!aperture_valid(aper, (32*1024*1024) << *order))
+-	    return 0;
+-	return (u32)aper; 
+-} 
 -
--do_sigbus:
--	printk("fault:Do sigbus\n");
--	up_read(&mm->mmap_sem);
+-/* Look for an AGP bridge. Windows only expects the aperture in the
+-   AGP bridge and some BIOS forget to initialize the Northbridge too.
+-   Work around this here. 
 -
--	/*
--	 * Send a sigbus, regardless of whether we were in kernel
--	 * or user mode.
--	 */
--	tsk->thread.address = address;
--	tsk->thread.error_code = writeaccess;
--	tsk->thread.trap_no = 14;
--	force_sig(SIGBUS, tsk);
+-   Do an PCI bus scan by hand because we're running before the PCI
+-   subsystem. 
++		return 0;
++	return (u32)aper;
++}
+ 
+-   All K8 AGP bridges are AGPv3 compliant, so we can do this scan
+-   generically. It's probably overkill to always scan all slots because
+-   the AGP bridges should be always an own bus on the HT hierarchy, 
+-   but do it here for future safety. */
++/*
++ * Look for an AGP bridge. Windows only expects the aperture in the
++ * AGP bridge and some BIOS forget to initialize the Northbridge too.
++ * Work around this here.
++ *
++ * Do an PCI bus scan by hand because we're running before the PCI
++ * subsystem.
++ *
++ * All K8 AGP bridges are AGPv3 compliant, so we can do this scan
++ * generically. It's probably overkill to always scan all slots because
++ * the AGP bridges should be always an own bus on the HT hierarchy,
++ * but do it here for future safety.
++ */
+ static __u32 __init search_agp_bridge(u32 *order, int *valid_agp)
+ {
+ 	int num, slot, func;
+ 
+ 	/* Poor man's PCI discovery */
+-	for (num = 0; num < 256; num++) { 
+-		for (slot = 0; slot < 32; slot++) { 
+-			for (func = 0; func < 8; func++) { 
++	for (num = 0; num < 256; num++) {
++		for (slot = 0; slot < 32; slot++) {
++			for (func = 0; func < 8; func++) {
+ 				u32 class, cap;
+ 				u8 type;
+-				class = read_pci_config(num,slot,func,
++				class = read_pci_config(num, slot, func,
+ 							PCI_CLASS_REVISION);
+ 				if (class == 0xffffffff)
+-					break; 
+-				
+-				switch (class >> 16) { 
++					break;
++
++				switch (class >> 16) {
+ 				case PCI_CLASS_BRIDGE_HOST:
+ 				case PCI_CLASS_BRIDGE_OTHER: /* needed? */
+ 					/* AGP bridge? */
+-					cap = find_cap(num,slot,func,PCI_CAP_ID_AGP);
++					cap = find_cap(num, slot, func,
++							PCI_CAP_ID_AGP);
+ 					if (!cap)
+ 						break;
+-					*valid_agp = 1; 
+-					return read_agp(num,slot,func,cap,order);
+-				} 
+-				
++					*valid_agp = 1;
++					return read_agp(num, slot, func, cap,
++							order);
++				}
++
+ 				/* No multi-function device? */
+-				type = read_pci_config_byte(num,slot,func,
++				type = read_pci_config_byte(num, slot, func,
+ 							       PCI_HEADER_TYPE);
+ 				if (!(type & 0x80))
+ 					break;
+-			} 
+-		} 
++			}
++		}
+ 	}
+-	printk("No AGP bridge found\n"); 
++	printk(KERN_INFO "No AGP bridge found\n");
++
+ 	return 0;
+ }
+ 
++static int gart_fix_e820 __initdata = 1;
++
++static int __init parse_gart_mem(char *p)
++{
++	if (!p)
++		return -EINVAL;
++
++	if (!strncmp(p, "off", 3))
++		gart_fix_e820 = 0;
++	else if (!strncmp(p, "on", 2))
++		gart_fix_e820 = 1;
++
++	return 0;
++}
++early_param("gart_fix_e820", parse_gart_mem);
++
++void __init early_gart_iommu_check(void)
++{
++	/*
++	 * in case it is enabled before, esp for kexec/kdump,
++	 * previous kernel already enable that. memset called
++	 * by allocate_aperture/__alloc_bootmem_nopanic cause restart.
++	 * or second kernel have different position for GART hole. and new
++	 * kernel could use hole as RAM that is still used by GART set by
++	 * first kernel
++	 * or BIOS forget to put that in reserved.
++	 * try to update e820 to make that region as reserved.
++	 */
++	int fix, num;
++	u32 ctl;
++	u32 aper_size = 0, aper_order = 0, last_aper_order = 0;
++	u64 aper_base = 0, last_aper_base = 0;
++	int aper_enabled = 0, last_aper_enabled = 0;
++
++	if (!early_pci_allowed())
++		return;
++
++	fix = 0;
++	for (num = 24; num < 32; num++) {
++		if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
++			continue;
++
++		ctl = read_pci_config(0, num, 3, 0x90);
++		aper_enabled = ctl & 1;
++		aper_order = (ctl >> 1) & 7;
++		aper_size = (32 * 1024 * 1024) << aper_order;
++		aper_base = read_pci_config(0, num, 3, 0x94) & 0x7fff;
++		aper_base <<= 25;
++
++		if ((last_aper_order && aper_order != last_aper_order) ||
++		    (last_aper_base && aper_base != last_aper_base) ||
++		    (last_aper_enabled && aper_enabled != last_aper_enabled)) {
++			fix = 1;
++			break;
++		}
++		last_aper_order = aper_order;
++		last_aper_base = aper_base;
++		last_aper_enabled = aper_enabled;
++	}
++
++	if (!fix && !aper_enabled)
++		return;
++
++	if (!aper_base || !aper_size || aper_base + aper_size > 0x100000000UL)
++		fix = 1;
++
++	if (gart_fix_e820 && !fix && aper_enabled) {
++		if (e820_any_mapped(aper_base, aper_base + aper_size,
++				    E820_RAM)) {
++			/* reserved it, so we can resuse it in second kernel */
++			printk(KERN_INFO "update e820 for GART\n");
++			add_memory_region(aper_base, aper_size, E820_RESERVED);
++			update_e820();
++		}
++		return;
++	}
++
++	/* different nodes have different setting, disable them all at first*/
++	for (num = 24; num < 32; num++) {
++		if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
++			continue;
++
++		ctl = read_pci_config(0, num, 3, 0x90);
++		ctl &= ~1;
++		write_pci_config(0, num, 3, 0x90, ctl);
++	}
++
++}
++
+ void __init gart_iommu_hole_init(void)
+-{ 
+-	int fix, num; 
++{
+ 	u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0;
+ 	u64 aper_base, last_aper_base = 0;
+-	int valid_agp = 0;
++	int fix, num, valid_agp = 0;
++	int node;
+ 
+ 	if (gart_iommu_aperture_disabled || !fix_aperture ||
+ 	    !early_pci_allowed())
+@@ -218,24 +321,26 @@ void __init gart_iommu_hole_init(void)
+ 	printk(KERN_INFO  "Checking aperture...\n");
+ 
+ 	fix = 0;
+-	for (num = 24; num < 32; num++) {		
++	node = 0;
++	for (num = 24; num < 32; num++) {
+ 		if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
+ 			continue;
+ 
+ 		iommu_detected = 1;
+ 		gart_iommu_aperture = 1;
+ 
+-		aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7; 
+-		aper_size = (32 * 1024 * 1024) << aper_order; 
++		aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7;
++		aper_size = (32 * 1024 * 1024) << aper_order;
+ 		aper_base = read_pci_config(0, num, 3, 0x94) & 0x7fff;
+-		aper_base <<= 25; 
++		aper_base <<= 25;
++
++		printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n",
++				node, aper_base, aper_size >> 20);
++		node++;
+ 
+-		printk("CPU %d: aperture @ %Lx size %u MB\n", num-24, 
+-		       aper_base, aper_size>>20);
+-		
+ 		if (!aperture_valid(aper_base, aper_size)) {
+-			fix = 1; 
+-			break; 
++			fix = 1;
++			break;
+ 		}
+ 
+ 		if ((last_aper_order && aper_order != last_aper_order) ||
+@@ -245,55 +350,64 @@ void __init gart_iommu_hole_init(void)
+ 		}
+ 		last_aper_order = aper_order;
+ 		last_aper_base = aper_base;
+-	} 
++	}
+ 
+ 	if (!fix && !fallback_aper_force) {
+ 		if (last_aper_base) {
+ 			unsigned long n = (32 * 1024 * 1024) << last_aper_order;
++
+ 			insert_aperture_resource((u32)last_aper_base, n);
+ 		}
+-		return; 
++		return;
+ 	}
+ 
+ 	if (!fallback_aper_force)
+-		aper_alloc = search_agp_bridge(&aper_order, &valid_agp); 
+-		
+-	if (aper_alloc) { 
++		aper_alloc = search_agp_bridge(&aper_order, &valid_agp);
++
++	if (aper_alloc) {
+ 		/* Got the aperture from the AGP bridge */
+ 	} else if (swiotlb && !valid_agp) {
+ 		/* Do nothing */
+ 	} else if ((!no_iommu && end_pfn > MAX_DMA32_PFN) ||
+ 		   force_iommu ||
+ 		   valid_agp ||
+-		   fallback_aper_force) { 
+-		printk("Your BIOS doesn't leave a aperture memory hole\n");
+-		printk("Please enable the IOMMU option in the BIOS setup\n");
+-		printk("This costs you %d MB of RAM\n",
+-		       32 << fallback_aper_order);
++		   fallback_aper_force) {
++		printk(KERN_ERR
++			"Your BIOS doesn't leave a aperture memory hole\n");
++		printk(KERN_ERR
++			"Please enable the IOMMU option in the BIOS setup\n");
++		printk(KERN_ERR
++			"This costs you %d MB of RAM\n",
++				32 << fallback_aper_order);
+ 
+ 		aper_order = fallback_aper_order;
+ 		aper_alloc = allocate_aperture();
+-		if (!aper_alloc) { 
+-			/* Could disable AGP and IOMMU here, but it's probably
+-			   not worth it. But the later users cannot deal with
+-			   bad apertures and turning on the aperture over memory
+-			   causes very strange problems, so it's better to 
+-			   panic early. */
++		if (!aper_alloc) {
++			/*
++			 * Could disable AGP and IOMMU here, but it's
++			 * probably not worth it. But the later users
++			 * cannot deal with bad apertures and turning
++			 * on the aperture over memory causes very
++			 * strange problems, so it's better to panic
++			 * early.
++			 */
+ 			panic("Not enough memory for aperture");
+ 		}
+-	} else { 
+-		return; 
+-	} 
++	} else {
++		return;
++	}
+ 
+ 	/* Fix up the north bridges */
+-	for (num = 24; num < 32; num++) { 		
++	for (num = 24; num < 32; num++) {
+ 		if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
+-			continue;	
 -
--	/* Kernel mode? Handle exceptions or die */
--	if (!user_mode(regs))
--		goto no_context;
--}
+-		/* Don't enable translation yet. That is done later. 
+-		   Assume this BIOS didn't initialise the GART so 
+-		   just overwrite all previous bits */ 
+-		write_pci_config(0, num, 3, 0x90, aper_order<<1); 
+-		write_pci_config(0, num, 3, 0x94, aper_alloc>>25); 
+-	} 
+-} 
++			continue;
++
++		/*
++		 * Don't enable translation yet. That is done later.
++		 * Assume this BIOS didn't initialise the GART so
++		 * just overwrite all previous bits
++		 */
++		write_pci_config(0, num, 3, 0x90, aper_order<<1);
++		write_pci_config(0, num, 3, 0x94, aper_alloc>>25);
++	}
++}
+diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
+index edb5108..35a568e 100644
+--- a/arch/x86/kernel/apic_32.c
++++ b/arch/x86/kernel/apic_32.c
+@@ -43,12 +43,10 @@
+ #include <mach_apicdef.h>
+ #include <mach_ipi.h>
+ 
+-#include "io_ports.h"
 -
+ /*
+  * Sanity check
+  */
+-#if (SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F
++#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
+ # error SPURIOUS_APIC_VECTOR definition error
+ #endif
+ 
+@@ -57,7 +55,7 @@
+  *
+  * -1=force-disable, +1=force-enable
+  */
+-static int enable_local_apic __initdata = 0;
++static int enable_local_apic __initdata;
+ 
+ /* Local APIC timer verification ok */
+ static int local_apic_timer_verify_ok;
+@@ -101,6 +99,8 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
+ /* Local APIC was disabled by the BIOS and enabled by the kernel */
+ static int enabled_via_apicbase;
+ 
++static unsigned long apic_phys;
++
+ /*
+  * Get the LAPIC version
+  */
+@@ -110,7 +110,7 @@ static inline int lapic_get_version(void)
+ }
+ 
+ /*
+- * Check, if the APIC is integrated or a seperate chip
++ * Check, if the APIC is integrated or a separate chip
+  */
+ static inline int lapic_is_integrated(void)
+ {
+@@ -135,9 +135,9 @@ void apic_wait_icr_idle(void)
+ 		cpu_relax();
+ }
+ 
+-unsigned long safe_apic_wait_icr_idle(void)
++u32 safe_apic_wait_icr_idle(void)
+ {
+-	unsigned long send_status;
++	u32 send_status;
+ 	int timeout;
+ 
+ 	timeout = 0;
+@@ -154,7 +154,7 @@ unsigned long safe_apic_wait_icr_idle(void)
+ /**
+  * enable_NMI_through_LVT0 - enable NMI through local vector table 0
+  */
+-void enable_NMI_through_LVT0 (void * dummy)
++void __cpuinit enable_NMI_through_LVT0(void)
+ {
+ 	unsigned int v = APIC_DM_NMI;
+ 
+@@ -379,8 +379,10 @@ void __init setup_boot_APIC_clock(void)
+ 	 */
+ 	if (local_apic_timer_disabled) {
+ 		/* No broadcast on UP ! */
+-		if (num_possible_cpus() > 1)
++		if (num_possible_cpus() > 1) {
++			lapic_clockevent.mult = 1;
+ 			setup_APIC_timer();
++		}
+ 		return;
+ 	}
+ 
+@@ -434,7 +436,7 @@ void __init setup_boot_APIC_clock(void)
+ 			       "with PM Timer: %ldms instead of 100ms\n",
+ 			       (long)res);
+ 			/* Correct the lapic counter value */
+-			res = (((u64) delta ) * pm_100ms);
++			res = (((u64) delta) * pm_100ms);
+ 			do_div(res, deltapm);
+ 			printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
+ 			       "%lu (%ld)\n", (unsigned long) res, delta);
+@@ -472,6 +474,19 @@ void __init setup_boot_APIC_clock(void)
+ 
+ 	local_apic_timer_verify_ok = 1;
+ 
++	/*
++	 * Do a sanity check on the APIC calibration result
++	 */
++	if (calibration_result < (1000000 / HZ)) {
++		local_irq_enable();
++		printk(KERN_WARNING
++		       "APIC frequency too slow, disabling apic timer\n");
++		/* No broadcast on UP ! */
++		if (num_possible_cpus() > 1)
++			setup_APIC_timer();
++		return;
++	}
++
+ 	/* We trust the pm timer based calibration */
+ 	if (!pm_referenced) {
+ 		apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
+@@ -563,6 +578,9 @@ static void local_apic_timer_interrupt(void)
+ 		return;
+ 	}
+ 
++	/*
++	 * the NMI deadlock-detector uses this.
++	 */
+ 	per_cpu(irq_stat, cpu).apic_timer_irqs++;
+ 
+ 	evt->event_handler(evt);
+@@ -576,8 +594,7 @@ static void local_apic_timer_interrupt(void)
+  * [ if a single-CPU system runs an SMP kernel then we call the local
+  *   interrupt as well. Thus we cannot inline the local irq ... ]
+  */
 -
--void flush_tlb_all(void);
+-void fastcall smp_apic_timer_interrupt(struct pt_regs *regs)
++void smp_apic_timer_interrupt(struct pt_regs *regs)
+ {
+ 	struct pt_regs *old_regs = set_irq_regs(regs);
+ 
+@@ -616,9 +633,14 @@ int setup_profiling_timer(unsigned int multiplier)
+  */
+ void clear_local_APIC(void)
+ {
+-	int maxlvt = lapic_get_maxlvt();
+-	unsigned long v;
++	int maxlvt;
++	u32 v;
++
++	/* APIC hasn't been mapped yet */
++	if (!apic_phys)
++		return;
+ 
++	maxlvt = lapic_get_maxlvt();
+ 	/*
+ 	 * Masking an LVT entry can trigger a local APIC error
+ 	 * if the vector is zero. Mask LVTERR first to prevent this.
+@@ -976,7 +998,8 @@ void __cpuinit setup_local_APIC(void)
+ 		value |= APIC_LVT_LEVEL_TRIGGER;
+ 	apic_write_around(APIC_LVT1, value);
+ 
+-	if (integrated && !esr_disable) {		/* !82489DX */
++	if (integrated && !esr_disable) {
++		/* !82489DX */
+ 		maxlvt = lapic_get_maxlvt();
+ 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
+ 			apic_write(APIC_ESR, 0);
+@@ -1020,7 +1043,7 @@ void __cpuinit setup_local_APIC(void)
+ /*
+  * Detect and initialize APIC
+  */
+-static int __init detect_init_APIC (void)
++static int __init detect_init_APIC(void)
+ {
+ 	u32 h, l, features;
+ 
+@@ -1077,7 +1100,7 @@ static int __init detect_init_APIC (void)
+ 		printk(KERN_WARNING "Could not enable APIC!\n");
+ 		return -1;
+ 	}
+-	set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++	set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
+ 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+ 
+ 	/* The BIOS may have set up the APIC at some other address */
+@@ -1104,8 +1127,6 @@ no_apic:
+  */
+ void __init init_apic_mappings(void)
+ {
+-	unsigned long apic_phys;
 -
--void update_mmu_cache(struct vm_area_struct * vma,
--			unsigned long address, pte_t pte)
+ 	/*
+ 	 * If no local APIC can be found then set up a fake all
+ 	 * zeroes page to simulate the local APIC and another
+@@ -1164,10 +1185,10 @@ fake_ioapic_page:
+  * This initializes the IO-APIC and APIC hardware if this is
+  * a UP kernel.
+  */
+-int __init APIC_init_uniprocessor (void)
++int __init APIC_init_uniprocessor(void)
+ {
+ 	if (enable_local_apic < 0)
+-		clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
+ 
+ 	if (!smp_found_config && !cpu_has_apic)
+ 		return -1;
+@@ -1179,7 +1200,7 @@ int __init APIC_init_uniprocessor (void)
+ 	    APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
+ 		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
+ 		       boot_cpu_physical_apicid);
+-		clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
+ 		return -1;
+ 	}
+ 
+@@ -1210,50 +1231,6 @@ int __init APIC_init_uniprocessor (void)
+ }
+ 
+ /*
+- * APIC command line parameters
+- */
+-static int __init parse_lapic(char *arg)
 -{
--#if defined(CONFIG_SH64_PROC_TLB)
--	++calls_to_update_mmu_cache;
--#endif
--
--	/*
--	 * This appears to get called once for every pte entry that gets
--	 * established => I don't think it's efficient to try refilling the
--	 * TLBs with the pages - some may not get accessed even.  Also, for
--	 * executable pages, it is impossible to determine reliably here which
--	 * TLB they should be mapped into (or both even).
--	 *
--	 * So, just do nothing here and handle faults on demand.  In the
--	 * TLBMISS handling case, the refill is now done anyway after the pte
--	 * has been fixed up, so that deals with most useful cases.
--	 */
+-	enable_local_apic = 1;
+-	return 0;
 -}
+-early_param("lapic", parse_lapic);
 -
--static void __flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+-static int __init parse_nolapic(char *arg)
 -{
--	unsigned long long match, pteh=0, lpage;
--	unsigned long tlb;
--	struct mm_struct *mm;
+-	enable_local_apic = -1;
+-	clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
+-	return 0;
+-}
+-early_param("nolapic", parse_nolapic);
 -
--	mm = vma->vm_mm;
+-static int __init parse_disable_lapic_timer(char *arg)
+-{
+-	local_apic_timer_disabled = 1;
+-	return 0;
+-}
+-early_param("nolapic_timer", parse_disable_lapic_timer);
 -
--	if (mm->context == NO_CONTEXT)
--		return;
+-static int __init parse_lapic_timer_c2_ok(char *arg)
+-{
+-	local_apic_timer_c2_ok = 1;
+-	return 0;
+-}
+-early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
 -
--	/*
--	 * Sign-extend based on neff.
--	 */
--	lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
--	match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;
--	match |= lpage;
+-static int __init apic_set_verbosity(char *str)
+-{
+-	if (strcmp("debug", str) == 0)
+-		apic_verbosity = APIC_DEBUG;
+-	else if (strcmp("verbose", str) == 0)
+-		apic_verbosity = APIC_VERBOSE;
+-	return 1;
+-}
 -
--        /* Do ITLB : don't bother for pages in non-exectutable VMAs */
--	if (vma->vm_flags & VM_EXEC) {
--		for_each_itlb_entry(tlb) {
--			asm volatile ("getcfg	%1, 0, %0"
--				      : "=r" (pteh)
--				      : "r" (tlb) );
+-__setup("apic=", apic_set_verbosity);
 -
--			if (pteh == match) {
--				__flush_tlb_slot(tlb);
--				break;
--			}
 -
--		}
--	}
+-/*
+  * Local APIC interrupts
+  */
+ 
+@@ -1306,7 +1283,7 @@ void smp_error_interrupt(struct pt_regs *regs)
+ 	   6: Received illegal vector
+ 	   7: Illegal register address
+ 	*/
+-	printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
++	printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
+ 		smp_processor_id(), v , v1);
+ 	irq_exit();
+ }
+@@ -1393,7 +1370,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
+ 			value = apic_read(APIC_LVT0);
+ 			value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
+ 				APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
+-				APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
++				APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
+ 			value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
+ 			value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
+ 			apic_write_around(APIC_LVT0, value);
+@@ -1530,7 +1507,7 @@ static int lapic_resume(struct sys_device *dev)
+  */
+ 
+ static struct sysdev_class lapic_sysclass = {
+-	set_kset_name("lapic"),
++	.name		= "lapic",
+ 	.resume		= lapic_resume,
+ 	.suspend	= lapic_suspend,
+ };
+@@ -1565,3 +1542,46 @@ device_initcall(init_lapic_sysfs);
+ static void apic_pm_activate(void) { }
+ 
+ #endif	/* CONFIG_PM */
++
++/*
++ * APIC command line parameters
++ */
++static int __init parse_lapic(char *arg)
++{
++	enable_local_apic = 1;
++	return 0;
++}
++early_param("lapic", parse_lapic);
++
++static int __init parse_nolapic(char *arg)
++{
++	enable_local_apic = -1;
++	clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
++	return 0;
++}
++early_param("nolapic", parse_nolapic);
++
++static int __init parse_disable_lapic_timer(char *arg)
++{
++	local_apic_timer_disabled = 1;
++	return 0;
++}
++early_param("nolapic_timer", parse_disable_lapic_timer);
++
++static int __init parse_lapic_timer_c2_ok(char *arg)
++{
++	local_apic_timer_c2_ok = 1;
++	return 0;
++}
++early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
++
++static int __init apic_set_verbosity(char *str)
++{
++	if (strcmp("debug", str) == 0)
++		apic_verbosity = APIC_DEBUG;
++	else if (strcmp("verbose", str) == 0)
++		apic_verbosity = APIC_VERBOSE;
++	return 1;
++}
++__setup("apic=", apic_set_verbosity);
++
+diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
+index f28ccb5..d8d03e0 100644
+--- a/arch/x86/kernel/apic_64.c
++++ b/arch/x86/kernel/apic_64.c
+@@ -23,32 +23,37 @@
+ #include <linux/mc146818rtc.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/sysdev.h>
+-#include <linux/module.h>
+ #include <linux/ioport.h>
+ #include <linux/clockchips.h>
++#include <linux/acpi_pmtmr.h>
++#include <linux/module.h>
+ 
+ #include <asm/atomic.h>
+ #include <asm/smp.h>
+ #include <asm/mtrr.h>
+ #include <asm/mpspec.h>
++#include <asm/hpet.h>
+ #include <asm/pgalloc.h>
+ #include <asm/mach_apic.h>
+ #include <asm/nmi.h>
+ #include <asm/idle.h>
+ #include <asm/proto.h>
+ #include <asm/timex.h>
+-#include <asm/hpet.h>
+ #include <asm/apic.h>
+ 
+-int apic_verbosity;
+ int disable_apic_timer __cpuinitdata;
+ static int apic_calibrate_pmtmr __initdata;
++int disable_apic;
+ 
+-/* Local APIC timer works in C2? */
++/* Local APIC timer works in C2 */
+ int local_apic_timer_c2_ok;
+ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
+ 
+-static struct resource *ioapic_resources;
++/*
++ * Debug level, exported for io_apic.c
++ */
++int apic_verbosity;
++
+ static struct resource lapic_resource = {
+ 	.name = "Local APIC",
+ 	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+@@ -60,10 +65,8 @@ static int lapic_next_event(unsigned long delta,
+ 			    struct clock_event_device *evt);
+ static void lapic_timer_setup(enum clock_event_mode mode,
+ 			      struct clock_event_device *evt);
 -
--        /* Do DTLB : any page could potentially be in here. */
--	for_each_dtlb_entry(tlb) {
--		asm volatile ("getcfg	%1, 0, %0"
--			      : "=r" (pteh)
--			      : "r" (tlb) );
+ static void lapic_timer_broadcast(cpumask_t mask);
 -
--		if (pteh == match) {
--			__flush_tlb_slot(tlb);
+-static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen);
++static void apic_pm_activate(void);
+ 
+ static struct clock_event_device lapic_clockevent = {
+ 	.name		= "lapic",
+@@ -78,6 +81,150 @@ static struct clock_event_device lapic_clockevent = {
+ };
+ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
+ 
++static unsigned long apic_phys;
++
++/*
++ * Get the LAPIC version
++ */
++static inline int lapic_get_version(void)
++{
++	return GET_APIC_VERSION(apic_read(APIC_LVR));
++}
++
++/*
++ * Check, if the APIC is integrated or a seperate chip
++ */
++static inline int lapic_is_integrated(void)
++{
++	return 1;
++}
++
++/*
++ * Check, whether this is a modern or a first generation APIC
++ */
++static int modern_apic(void)
++{
++	/* AMD systems use old APIC versions, so check the CPU */
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++	    boot_cpu_data.x86 >= 0xf)
++		return 1;
++	return lapic_get_version() >= 0x14;
++}
++
++void apic_wait_icr_idle(void)
++{
++	while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
++		cpu_relax();
++}
++
++u32 safe_apic_wait_icr_idle(void)
++{
++	u32 send_status;
++	int timeout;
++
++	timeout = 0;
++	do {
++		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
++		if (!send_status)
++			break;
++		udelay(100);
++	} while (timeout++ < 1000);
++
++	return send_status;
++}
++
++/**
++ * enable_NMI_through_LVT0 - enable NMI through local vector table 0
++ */
++void __cpuinit enable_NMI_through_LVT0(void)
++{
++	unsigned int v;
++
++	/* unmask and set to NMI */
++	v = APIC_DM_NMI;
++	apic_write(APIC_LVT0, v);
++}
++
++/**
++ * lapic_get_maxlvt - get the maximum number of local vector table entries
++ */
++int lapic_get_maxlvt(void)
++{
++	unsigned int v, maxlvt;
++
++	v = apic_read(APIC_LVR);
++	maxlvt = GET_APIC_MAXLVT(v);
++	return maxlvt;
++}
++
++/*
++ * This function sets up the local APIC timer, with a timeout of
++ * 'clocks' APIC bus clock. During calibration we actually call
++ * this function twice on the boot CPU, once with a bogus timeout
++ * value, second time for real. The other (noncalibrating) CPUs
++ * call this function only once, with the real, calibrated value.
++ *
++ * We do reads before writes even if unnecessary, to get around the
++ * P5 APIC double write bug.
++ */
++
++static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
++{
++	unsigned int lvtt_value, tmp_value;
++
++	lvtt_value = LOCAL_TIMER_VECTOR;
++	if (!oneshot)
++		lvtt_value |= APIC_LVT_TIMER_PERIODIC;
++	if (!irqen)
++		lvtt_value |= APIC_LVT_MASKED;
++
++	apic_write(APIC_LVTT, lvtt_value);
++
++	/*
++	 * Divide PICLK by 16
++	 */
++	tmp_value = apic_read(APIC_TDCR);
++	apic_write(APIC_TDCR, (tmp_value
++				& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
++				| APIC_TDR_DIV_16);
++
++	if (!oneshot)
++		apic_write(APIC_TMICT, clocks);
++}
++
++/*
++ * Setup extended LVT, AMD specific (K8, family 10h)
++ *
++ * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
++ * MCE interrupts are supported. Thus MCE offset must be set to 0.
++ */
++
++#define APIC_EILVT_LVTOFF_MCE 0
++#define APIC_EILVT_LVTOFF_IBS 1
++
++static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
++{
++	unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
++	unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
++
++	apic_write(reg, v);
++}
++
++u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
++{
++	setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
++	return APIC_EILVT_LVTOFF_MCE;
++}
++
++u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
++{
++	setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
++	return APIC_EILVT_LVTOFF_IBS;
++}
++
++/*
++ * Program the next event, relative to now
++ */
+ static int lapic_next_event(unsigned long delta,
+ 			    struct clock_event_device *evt)
+ {
+@@ -85,6 +232,9 @@ static int lapic_next_event(unsigned long delta,
+ 	return 0;
+ }
+ 
++/*
++ * Setup the lapic timer in periodic or oneshot mode
++ */
+ static void lapic_timer_setup(enum clock_event_mode mode,
+ 			      struct clock_event_device *evt)
+ {
+@@ -127,75 +277,261 @@ static void lapic_timer_broadcast(cpumask_t mask)
+ #endif
+ }
+ 
+-static void apic_pm_activate(void);
++/*
++ * Setup the local APIC timer for this CPU. Copy the initilized values
++ * of the boot CPU and register the clock event in the framework.
++ */
++static void setup_APIC_timer(void)
++{
++	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+ 
+-void apic_wait_icr_idle(void)
++	memcpy(levt, &lapic_clockevent, sizeof(*levt));
++	levt->cpumask = cpumask_of_cpu(smp_processor_id());
++
++	clockevents_register_device(levt);
++}
++
++/*
++ * In this function we calibrate APIC bus clocks to the external
++ * timer. Unfortunately we cannot use jiffies and the timer irq
++ * to calibrate, since some later bootup code depends on getting
++ * the first irq? Ugh.
++ *
++ * We want to do the calibration only once since we
++ * want to have local timer irqs syncron. CPUs connected
++ * by the same APIC bus have the very same bus frequency.
++ * And we want to have irqs off anyways, no accidental
++ * APIC irq that way.
++ */
++
++#define TICK_COUNT 100000000
++
++static void __init calibrate_APIC_clock(void)
+ {
+-	while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
+-		cpu_relax();
++	unsigned apic, apic_start;
++	unsigned long tsc, tsc_start;
++	int result;
++
++	local_irq_disable();
++
++	/*
++	 * Put whatever arbitrary (but long enough) timeout
++	 * value into the APIC clock, we just want to get the
++	 * counter running for calibration.
++	 *
++	 * No interrupt enable !
++	 */
++	__setup_APIC_LVTT(250000000, 0, 0);
++
++	apic_start = apic_read(APIC_TMCCT);
++#ifdef CONFIG_X86_PM_TIMER
++	if (apic_calibrate_pmtmr && pmtmr_ioport) {
++		pmtimer_wait(5000);  /* 5ms wait */
++		apic = apic_read(APIC_TMCCT);
++		result = (apic_start - apic) * 1000L / 5;
++	} else
++#endif
++	{
++		rdtscll(tsc_start);
++
++		do {
++			apic = apic_read(APIC_TMCCT);
++			rdtscll(tsc);
++		} while ((tsc - tsc_start) < TICK_COUNT &&
++				(apic_start - apic) < TICK_COUNT);
++
++		result = (apic_start - apic) * 1000L * tsc_khz /
++					(tsc - tsc_start);
++	}
++
++	local_irq_enable();
++
++	printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
++
++	printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
++		result / 1000 / 1000, result / 1000 % 1000);
++
++	/* Calculate the scaled math multiplication factor */
++	lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
++	lapic_clockevent.max_delta_ns =
++		clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
++	lapic_clockevent.min_delta_ns =
++		clockevent_delta2ns(0xF, &lapic_clockevent);
++
++	calibration_result = result / HZ;
+ }
+ 
+-unsigned int safe_apic_wait_icr_idle(void)
++/*
++ * Setup the boot APIC
++ *
++ * Calibrate and verify the result.
++ */
++void __init setup_boot_APIC_clock(void)
+ {
+-	unsigned int send_status;
+-	int timeout;
++	/*
++	 * The local apic timer can be disabled via the kernel commandline.
++	 * Register the lapic timer as a dummy clock event source on SMP
++	 * systems, so the broadcast mechanism is used. On UP systems simply
++	 * ignore it.
++	 */
++	if (disable_apic_timer) {
++		printk(KERN_INFO "Disabling APIC timer\n");
++		/* No broadcast on UP ! */
++		if (num_possible_cpus() > 1) {
++			lapic_clockevent.mult = 1;
++			setup_APIC_timer();
++		}
++		return;
++	}
+ 
+-	timeout = 0;
+-	do {
+-		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+-		if (!send_status)
 -			break;
--		}
--
--	}
--}
--
--void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+-		udelay(100);
+-	} while (timeout++ < 1000);
++	printk(KERN_INFO "Using local APIC timer interrupts.\n");
++	calibrate_APIC_clock();
+ 
+-	return send_status;
++	/*
++	 * Do a sanity check on the APIC calibration result
++	 */
++	if (calibration_result < (1000000 / HZ)) {
++		printk(KERN_WARNING
++		       "APIC frequency too slow, disabling apic timer\n");
++		/* No broadcast on UP ! */
++		if (num_possible_cpus() > 1)
++			setup_APIC_timer();
++		return;
++	}
++
++	/*
++	 * If nmi_watchdog is set to IO_APIC, we need the
++	 * PIT/HPET going.  Otherwise register lapic as a dummy
++	 * device.
++	 */
++	if (nmi_watchdog != NMI_IO_APIC)
++		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
++	else
++		printk(KERN_WARNING "APIC timer registered as dummy,"
++		       " due to nmi_watchdog=1!\n");
++
++	setup_APIC_timer();
+ }
+ 
+-void enable_NMI_through_LVT0 (void * dummy)
++/*
++ * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
++ * C1E flag only in the secondary CPU, so when we detect the wreckage
++ * we already have enabled the boot CPU local apic timer. Check, if
++ * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
++ * set the DUMMY flag again and force the broadcast mode in the
++ * clockevents layer.
++ */
++void __cpuinit check_boot_apic_timer_broadcast(void)
+ {
+-	unsigned int v;
++	if (!disable_apic_timer ||
++	    (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
++		return;
+ 
+-	/* unmask and set to NMI */
+-	v = APIC_DM_NMI;
+-	apic_write(APIC_LVT0, v);
++	printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
++	lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
++
++	local_irq_enable();
++	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
++	local_irq_disable();
+ }
+ 
+-int get_maxlvt(void)
++void __cpuinit setup_secondary_APIC_clock(void)
+ {
+-	unsigned int v, maxlvt;
++	check_boot_apic_timer_broadcast();
++	setup_APIC_timer();
++}
+ 
+-	v = apic_read(APIC_LVR);
+-	maxlvt = GET_APIC_MAXLVT(v);
+-	return maxlvt;
++/*
++ * The guts of the apic timer interrupt
++ */
++static void local_apic_timer_interrupt(void)
++{
++	int cpu = smp_processor_id();
++	struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
++
++	/*
++	 * Normally we should not be here till LAPIC has been initialized but
++	 * in some cases like kdump, its possible that there is a pending LAPIC
++	 * timer interrupt from previous kernel's context and is delivered in
++	 * new kernel the moment interrupts are enabled.
++	 *
++	 * Interrupts are enabled early and LAPIC is setup much later, hence
++	 * its possible that when we get here evt->event_handler is NULL.
++	 * Check for event_handler being NULL and discard the interrupt as
++	 * spurious.
++	 */
++	if (!evt->event_handler) {
++		printk(KERN_WARNING
++		       "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
++		/* Switch it off */
++		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
++		return;
++	}
++
++	/*
++	 * the NMI deadlock-detector uses this.
++	 */
++	add_pda(apic_timer_irqs, 1);
++
++	evt->event_handler(evt);
+ }
+ 
+ /*
+- * 'what should we do if we get a hw irq event on an illegal vector'.
+- * each architecture has to answer this themselves.
++ * Local APIC timer interrupt. This is the most natural way for doing
++ * local interrupts, but local timer interrupts can be emulated by
++ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
++ *
++ * [ if a single-CPU system runs an SMP kernel then we call the local
++ *   interrupt as well. Thus we cannot inline the local irq ... ]
+  */
+-void ack_bad_irq(unsigned int irq)
++void smp_apic_timer_interrupt(struct pt_regs *regs)
+ {
+-	printk("unexpected IRQ trap at vector %02x\n", irq);
++	struct pt_regs *old_regs = set_irq_regs(regs);
++
+ 	/*
+-	 * Currently unexpected vectors happen only on SMP and APIC.
+-	 * We _must_ ack these because every local APIC has only N
+-	 * irq slots per priority level, and a 'hanging, unacked' IRQ
+-	 * holds up an irq slot - in excessive cases (when multiple
+-	 * unexpected vectors occur) that might lock up the APIC
+-	 * completely.
+-	 * But don't ack when the APIC is disabled. -AK
++	 * NOTE! We'd better ACK the irq immediately,
++	 * because timer handling can be slow.
+ 	 */
+-	if (!disable_apic)
+-		ack_APIC_irq();
++	ack_APIC_irq();
++	/*
++	 * update_process_times() expects us to have done irq_enter().
++	 * Besides, if we don't timer interrupts ignore the global
++	 * interrupt lock, which is the WrongThing (tm) to do.
++	 */
++	exit_idle();
++	irq_enter();
++	local_apic_timer_interrupt();
++	irq_exit();
++	set_irq_regs(old_regs);
++}
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++	return -EINVAL;
+ }
+ 
++
++/*
++ * Local APIC start and shutdown
++ */
++
++/**
++ * clear_local_APIC - shutdown the local APIC
++ *
++ * This is called, when a CPU is disabled and before rebooting, so the state of
++ * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
++ * leftovers during boot.
++ */
+ void clear_local_APIC(void)
+ {
+-	int maxlvt;
+-	unsigned int v;
++	int maxlvt = lapic_get_maxlvt();
++	u32 v;
+ 
+-	maxlvt = get_maxlvt();
++	/* APIC hasn't been mapped yet */
++	if (!apic_phys)
++		return;
+ 
++	maxlvt = lapic_get_maxlvt();
+ 	/*
+ 	 * Masking an LVT entry can trigger a local APIC error
+ 	 * if the vector is zero. Mask LVTERR first to prevent this.
+@@ -233,45 +569,9 @@ void clear_local_APIC(void)
+ 	apic_read(APIC_ESR);
+ }
+ 
+-void disconnect_bsp_APIC(int virt_wire_setup)
 -{
--	unsigned long flags;
+-	/* Go back to Virtual Wire compatibility mode */
+-	unsigned long value;
 -
--#if defined(CONFIG_SH64_PROC_TLB)
--        ++calls_to_flush_tlb_page;
--#endif
+-	/* For the spurious interrupt use vector F, and enable it */
+-	value = apic_read(APIC_SPIV);
+-	value &= ~APIC_VECTOR_MASK;
+-	value |= APIC_SPIV_APIC_ENABLED;
+-	value |= 0xf;
+-	apic_write(APIC_SPIV, value);
 -
--	if (vma->vm_mm) {
--		page &= PAGE_MASK;
--		local_irq_save(flags);
--		__flush_tlb_page(vma, page);
--		local_irq_restore(flags);
+-	if (!virt_wire_setup) {
+-		/*
+-		 * For LVT0 make it edge triggered, active high,
+-		 * external and enabled
+-		 */
+-		value = apic_read(APIC_LVT0);
+-		value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
+-			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
+-			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
+-		value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
+-		value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
+-		apic_write(APIC_LVT0, value);
+-	} else {
+-		/* Disable LVT0 */
+-		apic_write(APIC_LVT0, APIC_LVT_MASKED);
 -	}
+-
+-	/* For LVT1 make it edge triggered, active high, nmi and enabled */
+-	value = apic_read(APIC_LVT1);
+-	value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
+-			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
+-			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
+-	value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
+-	value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
+-	apic_write(APIC_LVT1, value);
 -}
 -
--void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
--		     unsigned long end)
++/**
++ * disable_local_APIC - clear and disable the local APIC
++ */
+ void disable_local_APIC(void)
+ {
+ 	unsigned int value;
+@@ -333,7 +633,7 @@ int __init verify_local_APIC(void)
+ 	reg1 = GET_APIC_VERSION(reg0);
+ 	if (reg1 == 0x00 || reg1 == 0xff)
+ 		return 0;
+-	reg1 = get_maxlvt();
++	reg1 = lapic_get_maxlvt();
+ 	if (reg1 < 0x02 || reg1 == 0xff)
+ 		return 0;
+ 
+@@ -355,18 +655,20 @@ int __init verify_local_APIC(void)
+ 	 * compatibility mode, but most boxes are anymore.
+ 	 */
+ 	reg0 = apic_read(APIC_LVT0);
+-	apic_printk(APIC_DEBUG,"Getting LVT0: %x\n", reg0);
++	apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
+ 	reg1 = apic_read(APIC_LVT1);
+ 	apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
+ 
+ 	return 1;
+ }
+ 
++/**
++ * sync_Arb_IDs - synchronize APIC bus arbitration IDs
++ */
+ void __init sync_Arb_IDs(void)
+ {
+ 	/* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
+-	unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
+-	if (ver >= 0x14)	/* P4 or higher */
++	if (modern_apic())
+ 		return;
+ 
+ 	/*
+@@ -418,9 +720,12 @@ void __init init_bsp_APIC(void)
+ 	apic_write(APIC_LVT1, value);
+ }
+ 
+-void __cpuinit setup_local_APIC (void)
++/**
++ * setup_local_APIC - setup the local APIC
++ */
++void __cpuinit setup_local_APIC(void)
+ {
+-	unsigned int value, maxlvt;
++	unsigned int value;
+ 	int i, j;
+ 
+ 	value = apic_read(APIC_LVR);
+@@ -516,30 +821,217 @@ void __cpuinit setup_local_APIC (void)
+ 	else
+ 		value = APIC_DM_NMI | APIC_LVT_MASKED;
+ 	apic_write(APIC_LVT1, value);
++}
+ 
+-	{
+-		unsigned oldvalue;
+-		maxlvt = get_maxlvt();
+-		oldvalue = apic_read(APIC_ESR);
+-		value = ERROR_APIC_VECTOR;      // enables sending errors
+-		apic_write(APIC_LVTERR, value);
+-		/*
+-		 * spec says clear errors after enabling vector.
+-		 */
+-		if (maxlvt > 3)
+-			apic_write(APIC_ESR, 0);
+-		value = apic_read(APIC_ESR);
+-		if (value != oldvalue)
+-			apic_printk(APIC_VERBOSE,
+-			"ESR value after enabling vector: %08x, after %08x\n",
+-			oldvalue, value);
+-	}
++void __cpuinit lapic_setup_esr(void)
++{
++	unsigned maxlvt = lapic_get_maxlvt();
++
++	apic_write(APIC_LVTERR, ERROR_APIC_VECTOR);
++	/*
++	 * spec says clear errors after enabling vector.
++	 */
++	if (maxlvt > 3)
++		apic_write(APIC_ESR, 0);
++}
+ 
++void __cpuinit end_local_APIC_setup(void)
++{
++	lapic_setup_esr();
+ 	nmi_watchdog_default();
+ 	setup_apic_nmi_watchdog(NULL);
+ 	apic_pm_activate();
+ }
+ 
++/*
++ * Detect and enable local APICs on non-SMP boards.
++ * Original code written by Keir Fraser.
++ * On AMD64 we trust the BIOS - if it says no APIC it is likely
++ * not correctly set up (usually the APIC timer won't work etc.)
++ */
++static int __init detect_init_APIC(void)
++{
++	if (!cpu_has_apic) {
++		printk(KERN_INFO "No local APIC present\n");
++		return -1;
++	}
++
++	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++	boot_cpu_id = 0;
++	return 0;
++}
++
++/**
++ * init_apic_mappings - initialize APIC mappings
++ */
++void __init init_apic_mappings(void)
++{
++	/*
++	 * If no local APIC can be found then set up a fake all
++	 * zeroes page to simulate the local APIC and another
++	 * one for the IO-APIC.
++	 */
++	if (!smp_found_config && detect_init_APIC()) {
++		apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
++		apic_phys = __pa(apic_phys);
++	} else
++		apic_phys = mp_lapic_addr;
++
++	set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
++	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
++				APIC_BASE, apic_phys);
++
++	/* Put local APIC into the resource map. */
++	lapic_resource.start = apic_phys;
++	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
++	insert_resource(&iomem_resource, &lapic_resource);
++
++	/*
++	 * Fetch the APIC ID of the BSP in case we have a
++	 * default configuration (or the MP table is broken).
++	 */
++	boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++}
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor(void)
++{
++	if (disable_apic) {
++		printk(KERN_INFO "Apic disabled\n");
++		return -1;
++	}
++	if (!cpu_has_apic) {
++		disable_apic = 1;
++		printk(KERN_INFO "Apic disabled by BIOS\n");
++		return -1;
++	}
++
++	verify_local_APIC();
++
++	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
++	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
++
++	setup_local_APIC();
++
++	/*
++	 * Now enable IO-APICs, actually call clear_IO_APIC
++	 * We need clear_IO_APIC before enabling vector on BP
++	 */
++	if (!skip_ioapic_setup && nr_ioapics)
++		enable_IO_APIC();
++
++	end_local_APIC_setup();
++
++	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
++		setup_IO_APIC();
++	else
++		nr_ioapics = 0;
++	setup_boot_APIC_clock();
++	check_nmi_watchdog();
++	return 0;
++}
++
++/*
++ * Local APIC interrupts
++ */
++
++/*
++ * This interrupt should _never_ happen with our APIC/SMP architecture
++ */
++asmlinkage void smp_spurious_interrupt(void)
++{
++	unsigned int v;
++	exit_idle();
++	irq_enter();
++	/*
++	 * Check if this really is a spurious interrupt and ACK it
++	 * if it is a vectored one.  Just in case...
++	 * Spurious interrupts should not be ACKed.
++	 */
++	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
++	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
++		ack_APIC_irq();
++
++	add_pda(irq_spurious_count, 1);
++	irq_exit();
++}
++
++/*
++ * This interrupt should never happen with our APIC/SMP architecture
++ */
++asmlinkage void smp_error_interrupt(void)
++{
++	unsigned int v, v1;
++
++	exit_idle();
++	irq_enter();
++	/* First tickle the hardware, only then report what went on. -- REW */
++	v = apic_read(APIC_ESR);
++	apic_write(APIC_ESR, 0);
++	v1 = apic_read(APIC_ESR);
++	ack_APIC_irq();
++	atomic_inc(&irq_err_count);
++
++	/* Here is what the APIC error bits mean:
++	   0: Send CS error
++	   1: Receive CS error
++	   2: Send accept error
++	   3: Receive accept error
++	   4: Reserved
++	   5: Send illegal vector
++	   6: Received illegal vector
++	   7: Illegal register address
++	*/
++	printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
++		smp_processor_id(), v , v1);
++	irq_exit();
++}
++
++void disconnect_bsp_APIC(int virt_wire_setup)
++{
++	/* Go back to Virtual Wire compatibility mode */
++	unsigned long value;
++
++	/* For the spurious interrupt use vector F, and enable it */
++	value = apic_read(APIC_SPIV);
++	value &= ~APIC_VECTOR_MASK;
++	value |= APIC_SPIV_APIC_ENABLED;
++	value |= 0xf;
++	apic_write(APIC_SPIV, value);
++
++	if (!virt_wire_setup) {
++		/*
++		 * For LVT0 make it edge triggered, active high,
++		 * external and enabled
++		 */
++		value = apic_read(APIC_LVT0);
++		value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
++			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
++			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
++		value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
++		value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
++		apic_write(APIC_LVT0, value);
++	} else {
++		/* Disable LVT0 */
++		apic_write(APIC_LVT0, APIC_LVT_MASKED);
++	}
++
++	/* For LVT1 make it edge triggered, active high, nmi and enabled */
++	value = apic_read(APIC_LVT1);
++	value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
++			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
++			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
++	value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
++	value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
++	apic_write(APIC_LVT1, value);
++}
++
++/*
++ * Power management
++ */
+ #ifdef CONFIG_PM
+ 
+ static struct {
+@@ -571,7 +1063,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
+ 	if (!apic_pm_state.active)
+ 		return 0;
+ 
+-	maxlvt = get_maxlvt();
++	maxlvt = lapic_get_maxlvt();
+ 
+ 	apic_pm_state.apic_id = apic_read(APIC_ID);
+ 	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
+@@ -605,7 +1097,7 @@ static int lapic_resume(struct sys_device *dev)
+ 	if (!apic_pm_state.active)
+ 		return 0;
+ 
+-	maxlvt = get_maxlvt();
++	maxlvt = lapic_get_maxlvt();
+ 
+ 	local_irq_save(flags);
+ 	rdmsr(MSR_IA32_APICBASE, l, h);
+@@ -639,14 +1131,14 @@ static int lapic_resume(struct sys_device *dev)
+ }
+ 
+ static struct sysdev_class lapic_sysclass = {
+-	set_kset_name("lapic"),
++	.name		= "lapic",
+ 	.resume		= lapic_resume,
+ 	.suspend	= lapic_suspend,
+ };
+ 
+ static struct sys_device device_lapic = {
+-	.id		= 0,
+-	.cls		= &lapic_sysclass,
++	.id	= 0,
++	.cls	= &lapic_sysclass,
+ };
+ 
+ static void __cpuinit apic_pm_activate(void)
+@@ -657,9 +1149,11 @@ static void __cpuinit apic_pm_activate(void)
+ static int __init init_lapic_sysfs(void)
+ {
+ 	int error;
++
+ 	if (!cpu_has_apic)
+ 		return 0;
+ 	/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
++
+ 	error = sysdev_class_register(&lapic_sysclass);
+ 	if (!error)
+ 		error = sysdev_register(&device_lapic);
+@@ -673,423 +1167,6 @@ static void apic_pm_activate(void) { }
+ 
+ #endif	/* CONFIG_PM */
+ 
+-static int __init apic_set_verbosity(char *str)
 -{
--	unsigned long flags;
--	unsigned long long match, pteh=0, pteh_epn, pteh_low;
--	unsigned long tlb;
--	struct mm_struct *mm;
--
--	mm = vma->vm_mm;
--
--#if defined(CONFIG_SH64_PROC_TLB)
--	++calls_to_flush_tlb_range;
--
--	{
--		unsigned long size = (end - 1) - start;
--		size >>= 12; /* divide by PAGE_SIZE */
--		size++; /* end=start+4096 => 1 page */
--		switch (size) {
--		  case  1        : flush_tlb_range_1++;     break;
--		  case  2        : flush_tlb_range_2++;     break;
--		  case  3 ...  4 : flush_tlb_range_3_4++;   break;
--		  case  5 ...  7 : flush_tlb_range_5_7++;   break;
--		  case  8 ... 11 : flush_tlb_range_8_11++;  break;
--		  case 12 ... 15 : flush_tlb_range_12_15++; break;
--		  default        : flush_tlb_range_16_up++; break;
--		}
--	}
--#endif
--
--	if (mm->context == NO_CONTEXT)
--		return;
--
--	local_irq_save(flags);
--
--	start &= PAGE_MASK;
--	end &= PAGE_MASK;
--
--	match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;
--
--	/* Flush ITLB */
--	for_each_itlb_entry(tlb) {
--		asm volatile ("getcfg	%1, 0, %0"
--			      : "=r" (pteh)
--			      : "r" (tlb) );
--
--		pteh_epn = pteh & PAGE_MASK;
--		pteh_low = pteh & ~PAGE_MASK;
--
--		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
--			__flush_tlb_slot(tlb);
+-	if (str == NULL)  {
+-		skip_ioapic_setup = 0;
+-		ioapic_force = 1;
+-		return 0;
 -	}
--
--	/* Flush DTLB */
--	for_each_dtlb_entry(tlb) {
--		asm volatile ("getcfg	%1, 0, %0"
--			      : "=r" (pteh)
--			      : "r" (tlb) );
--
--		pteh_epn = pteh & PAGE_MASK;
--		pteh_low = pteh & ~PAGE_MASK;
--
--		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
--			__flush_tlb_slot(tlb);
+-	if (strcmp("debug", str) == 0)
+-		apic_verbosity = APIC_DEBUG;
+-	else if (strcmp("verbose", str) == 0)
+-		apic_verbosity = APIC_VERBOSE;
+-	else {
+-		printk(KERN_WARNING "APIC Verbosity level %s not recognised"
+-				" use apic=verbose or apic=debug\n", str);
+-		return -EINVAL;
 -	}
 -
--	local_irq_restore(flags);
+-	return 0;
 -}
+-early_param("apic", apic_set_verbosity);
 -
--void flush_tlb_mm(struct mm_struct *mm)
--{
--	unsigned long flags;
--
--#if defined(CONFIG_SH64_PROC_TLB)
--	++calls_to_flush_tlb_mm;
--#endif
--
--	if (mm->context == NO_CONTEXT)
--		return;
--
--	local_irq_save(flags);
--
--	mm->context=NO_CONTEXT;
--	if(mm==current->mm)
--		activate_context(mm);
--
--	local_irq_restore(flags);
--
--}
+-/*
+- * Detect and enable local APICs on non-SMP boards.
+- * Original code written by Keir Fraser.
+- * On AMD64 we trust the BIOS - if it says no APIC it is likely
+- * not correctly set up (usually the APIC timer won't work etc.)
+- */
 -
--void flush_tlb_all(void)
+-static int __init detect_init_APIC (void)
 -{
--	/* Invalidate all, including shared pages, excluding fixed TLBs */
--
--	unsigned long flags, tlb;
--
--#if defined(CONFIG_SH64_PROC_TLB)
--	++calls_to_flush_tlb_all;
--#endif
--
--	local_irq_save(flags);
--
--	/* Flush each ITLB entry */
--	for_each_itlb_entry(tlb) {
--		__flush_tlb_slot(tlb);
--	}
--
--	/* Flush each DTLB entry */
--	for_each_dtlb_entry(tlb) {
--		__flush_tlb_slot(tlb);
+-	if (!cpu_has_apic) {
+-		printk(KERN_INFO "No local APIC present\n");
+-		return -1;
 -	}
 -
--	local_irq_restore(flags);
--}
--
--void flush_tlb_kernel_range(unsigned long start, unsigned long end)
--{
--        /* FIXME: Optimize this later.. */
--        flush_tlb_all();
--}
--
--#if defined(CONFIG_SH64_PROC_TLB)
--/* Procfs interface to read the performance information */
--
--static int
--tlb_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
--{
--  int len=0;
--  len += sprintf(buf+len, "do_fast_page_fault   called %12lld times\n", calls_to_do_fast_page_fault);
--  len += sprintf(buf+len, "do_slow_page_fault   called %12lld times\n", calls_to_do_slow_page_fault);
--  len += sprintf(buf+len, "update_mmu_cache     called %12lld times\n", calls_to_update_mmu_cache);
--  len += sprintf(buf+len, "flush_tlb_page       called %12lld times\n", calls_to_flush_tlb_page);
--  len += sprintf(buf+len, "flush_tlb_range      called %12lld times\n", calls_to_flush_tlb_range);
--  len += sprintf(buf+len, "flush_tlb_mm         called %12lld times\n", calls_to_flush_tlb_mm);
--  len += sprintf(buf+len, "flush_tlb_all        called %12lld times\n", calls_to_flush_tlb_all);
--  len += sprintf(buf+len, "flush_tlb_range_sizes\n"
--                          " 1      : %12lld\n"
--                          " 2      : %12lld\n"
--                          " 3 -  4 : %12lld\n"
--                          " 5 -  7 : %12lld\n"
--                          " 8 - 11 : %12lld\n"
--                          "12 - 15 : %12lld\n"
--                          "16+     : %12lld\n",
--                          flush_tlb_range_1, flush_tlb_range_2, flush_tlb_range_3_4,
--                          flush_tlb_range_5_7, flush_tlb_range_8_11, flush_tlb_range_12_15,
--                          flush_tlb_range_16_up);
--  len += sprintf(buf+len, "page not present           %12lld times\n", page_not_present);
--  *eof = 1;
--  return len;
+-	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+-	boot_cpu_id = 0;
+-	return 0;
 -}
 -
--static int __init register_proc_tlb(void)
+-#ifdef CONFIG_X86_IO_APIC
+-static struct resource * __init ioapic_setup_resources(void)
 -{
--  create_proc_read_entry("tlb", 0, NULL, tlb_proc_info, NULL);
--  return 0;
--}
--
--__initcall(register_proc_tlb);
+-#define IOAPIC_RESOURCE_NAME_SIZE 11
+-	unsigned long n;
+-	struct resource *res;
+-	char *mem;
+-	int i;
 -
--#endif
-diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
-deleted file mode 100644
-index fa66daa..0000000
---- a/arch/sh64/mm/hugetlbpage.c
-+++ /dev/null
-@@ -1,105 +0,0 @@
--/*
-- * arch/sh64/mm/hugetlbpage.c
-- *
-- * SuperH HugeTLB page support.
-- *
-- * Cloned from sparc64 by Paul Mundt.
-- *
-- * Copyright (C) 2002, 2003 David S. Miller (davem at redhat.com)
-- */
+-	if (nr_ioapics <= 0)
+-		return NULL;
 -
--#include <linux/init.h>
--#include <linux/fs.h>
--#include <linux/mm.h>
--#include <linux/hugetlb.h>
--#include <linux/pagemap.h>
--#include <linux/slab.h>
--#include <linux/sysctl.h>
+-	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
+-	n *= nr_ioapics;
 -
--#include <asm/mman.h>
--#include <asm/pgalloc.h>
--#include <asm/tlb.h>
--#include <asm/tlbflush.h>
--#include <asm/cacheflush.h>
+-	mem = alloc_bootmem(n);
+-	res = (void *)mem;
 -
--pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
--{
--	pgd_t *pgd;
--	pmd_t *pmd;
--	pte_t *pte = NULL;
+-	if (mem != NULL) {
+-		memset(mem, 0, n);
+-		mem += sizeof(struct resource) * nr_ioapics;
 -
--	pgd = pgd_offset(mm, addr);
--	if (pgd) {
--		pmd = pmd_alloc(mm, pgd, addr);
--		if (pmd)
--			pte = pte_alloc_map(mm, pmd, addr);
+-		for (i = 0; i < nr_ioapics; i++) {
+-			res[i].name = mem;
+-			res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+-			sprintf(mem,  "IOAPIC %u", i);
+-			mem += IOAPIC_RESOURCE_NAME_SIZE;
+-		}
 -	}
--	return pte;
--}
--
--pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
--{
--	pgd_t *pgd;
--	pmd_t *pmd;
--	pte_t *pte = NULL;
 -
--	pgd = pgd_offset(mm, addr);
--	if (pgd) {
--		pmd = pmd_offset(pgd, addr);
--		if (pmd)
--			pte = pte_offset_map(pmd, addr);
--	}
--	return pte;
--}
+-	ioapic_resources = res;
 -
--int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
--{
--	return 0;
+-	return res;
 -}
 -
--void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
--		     pte_t *ptep, pte_t entry)
+-static int __init ioapic_insert_resources(void)
 -{
 -	int i;
+-	struct resource *r = ioapic_resources;
 -
--	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
--		set_pte_at(mm, addr, ptep, entry);
--		ptep++;
--		addr += PAGE_SIZE;
--		pte_val(entry) += PAGE_SIZE;
+-	if (!r) {
+-		printk("IO APIC resources could be not be allocated.\n");
+-		return -1;
 -	}
--}
--
--pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
--			      pte_t *ptep)
--{
--	pte_t entry;
--	int i;
--
--	entry = *ptep;
 -
--	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
--		pte_clear(mm, addr, ptep);
--		addr += PAGE_SIZE;
--		ptep++;
+-	for (i = 0; i < nr_ioapics; i++) {
+-		insert_resource(&iomem_resource, r);
+-		r++;
 -	}
 -
--	return entry;
--}
--
--struct page *follow_huge_addr(struct mm_struct *mm,
--			      unsigned long address, int write)
--{
--	return ERR_PTR(-EINVAL);
--}
--
--int pmd_huge(pmd_t pmd)
--{
 -	return 0;
 -}
 -
--struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
--			     pmd_t *pmd, int write)
--{
--	return NULL;
--}
-diff --git a/arch/sh64/mm/init.c b/arch/sh64/mm/init.c
-deleted file mode 100644
-index 21cf42d..0000000
---- a/arch/sh64/mm/init.c
-+++ /dev/null
-@@ -1,189 +0,0 @@
--/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/mm/init.c
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003, 2004  Paul Mundt
-- *
-- */
--
--#include <linux/init.h>
--#include <linux/rwsem.h>
--#include <linux/mm.h>
--#include <linux/swap.h>
--#include <linux/bootmem.h>
--
--#include <asm/mmu_context.h>
--#include <asm/page.h>
--#include <asm/pgalloc.h>
--#include <asm/pgtable.h>
--#include <asm/tlb.h>
--
--DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
--
--/*
-- * Cache of MMU context last used.
-- */
--unsigned long mmu_context_cache;
--pgd_t * mmu_pdtp_cache;
--int after_bootmem = 0;
--
--/*
-- * BAD_PAGE is the page that is used for page faults when linux
-- * is out-of-memory. Older versions of linux just did a
-- * do_exit(), but using this instead means there is less risk
-- * for a process dying in kernel mode, possibly leaving an inode
-- * unused etc..
-- *
-- * BAD_PAGETABLE is the accompanying page-table: it is initialized
-- * to point to BAD_PAGE entries.
-- *
-- * ZERO_PAGE is a special page that is used for zero-initialized
-- * data and COW.
-- */
--
--extern unsigned char empty_zero_page[PAGE_SIZE];
--extern unsigned char empty_bad_page[PAGE_SIZE];
--extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
--extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
--
--extern char _text, _etext, _edata, __bss_start, _end;
--extern char __init_begin, __init_end;
--
--/* It'd be good if these lines were in the standard header file. */
--#define START_PFN	(NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
--#define MAX_LOW_PFN	(NODE_DATA(0)->bdata->node_low_pfn)
--
--
--void show_mem(void)
--{
--	int i, total = 0, reserved = 0;
--	int shared = 0, cached = 0;
--
--	printk("Mem-info:\n");
--	show_free_areas();
--	printk("Free swap:       %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
--	i = max_mapnr;
--	while (i-- > 0) {
--		total++;
--		if (PageReserved(mem_map+i))
--			reserved++;
--		else if (PageSwapCache(mem_map+i))
--			cached++;
--		else if (page_count(mem_map+i))
--			shared += page_count(mem_map+i) - 1;
--	}
--	printk("%d pages of RAM\n",total);
--	printk("%d reserved pages\n",reserved);
--	printk("%d pages shared\n",shared);
--	printk("%d pages swap cached\n",cached);
--	printk("%ld pages in page table cache\n", quicklist_total_size());
--}
--
--/*
-- * paging_init() sets up the page tables.
-- *
-- * head.S already did a lot to set up address translation for the kernel.
-- * Here we comes with:
-- * . MMU enabled
-- * . ASID set (SR)
-- * .  some 512MB regions being mapped of which the most relevant here is:
-- *   . CACHED segment (ASID 0 [irrelevant], shared AND NOT user)
-- * . possible variable length regions being mapped as:
-- *   . UNCACHED segment (ASID 0 [irrelevant], shared AND NOT user)
-- * . All of the memory regions are placed, independently from the platform
-- *   on high addresses, above 0x80000000.
-- * . swapper_pg_dir is already cleared out by the .space directive
-- *   in any case swapper does not require a real page directory since
-- *   it's all kernel contained.
-- *
-- * Those pesky NULL-reference errors in the kernel are then
-- * dealt with by not mapping address 0x00000000 at all.
-- *
-- */
--void __init paging_init(void)
--{
--	unsigned long zones_size[MAX_NR_ZONES] = {0, };
--
--	pgd_init((unsigned long)swapper_pg_dir);
--	pgd_init((unsigned long)swapper_pg_dir +
--		 sizeof(pgd_t) * USER_PTRS_PER_PGD);
--
--	mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
--
--	zones_size[ZONE_NORMAL] = MAX_LOW_PFN - START_PFN;
--	NODE_DATA(0)->node_mem_map = NULL;
--	free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0);
--}
+-/* Insert the IO APIC resources after PCI initialization has occured to handle
+- * IO APICS that are mapped in on a BAR in PCI space. */
+-late_initcall(ioapic_insert_resources);
+-#endif
 -
--void __init mem_init(void)
+-void __init init_apic_mappings(void)
 -{
--	int codesize, reservedpages, datasize, initsize;
--	int tmp;
--
--	max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
--	high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE);
+-	unsigned long apic_phys;
 -
 -	/*
--         * Clear the zero-page.
--         * This is not required but we might want to re-use
--         * this very page to pass boot parameters, one day.
--         */
--	memset(empty_zero_page, 0, PAGE_SIZE);
--
--	/* this will put all low memory onto the freelists */
--	totalram_pages += free_all_bootmem_node(NODE_DATA(0));
--	reservedpages = 0;
--	for (tmp = 0; tmp < num_physpages; tmp++)
--		/*
--		 * Only count reserved RAM pages
--		 */
--		if (PageReserved(mem_map+tmp))
--			reservedpages++;
+-	 * If no local APIC can be found then set up a fake all
+-	 * zeroes page to simulate the local APIC and another
+-	 * one for the IO-APIC.
+-	 */
+-	if (!smp_found_config && detect_init_APIC()) {
+-		apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+-		apic_phys = __pa(apic_phys);
+-	} else
+-		apic_phys = mp_lapic_addr;
 -
--	after_bootmem = 1;
+-	set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
+-	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
+-				APIC_BASE, apic_phys);
 -
--	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
--	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
--	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+-	/* Put local APIC into the resource map. */
+-	lapic_resource.start = apic_phys;
+-	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
+-	insert_resource(&iomem_resource, &lapic_resource);
 -
--	printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
--		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
--		max_mapnr << (PAGE_SHIFT-10),
--		codesize >> 10,
--		reservedpages << (PAGE_SHIFT-10),
--		datasize >> 10,
--		initsize >> 10);
--}
+-	/*
+-	 * Fetch the APIC ID of the BSP in case we have a
+-	 * default configuration (or the MP table is broken).
+-	 */
+-	boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
 -
--void free_initmem(void)
--{
--	unsigned long addr;
+-	{
+-		unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
+-		int i;
+-		struct resource *ioapic_res;
 -
--	addr = (unsigned long)(&__init_begin);
--	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
--		ClearPageReserved(virt_to_page(addr));
--		init_page_count(virt_to_page(addr));
--		free_page(addr);
--		totalram_pages++;
--	}
--	printk ("Freeing unused kernel memory: %ldk freed\n", (&__init_end - &__init_begin) >> 10);
--}
+-		ioapic_res = ioapic_setup_resources();
+-		for (i = 0; i < nr_ioapics; i++) {
+-			if (smp_found_config) {
+-				ioapic_phys = mp_ioapics[i].mpc_apicaddr;
+-			} else {
+-				ioapic_phys = (unsigned long)
+-					alloc_bootmem_pages(PAGE_SIZE);
+-				ioapic_phys = __pa(ioapic_phys);
+-			}
+-			set_fixmap_nocache(idx, ioapic_phys);
+-			apic_printk(APIC_VERBOSE,
+-				    "mapped IOAPIC to %016lx (%016lx)\n",
+-				    __fix_to_virt(idx), ioapic_phys);
+-			idx++;
 -
--#ifdef CONFIG_BLK_DEV_INITRD
--void free_initrd_mem(unsigned long start, unsigned long end)
--{
--	unsigned long p;
--	for (p = start; p < end; p += PAGE_SIZE) {
--		ClearPageReserved(virt_to_page(p));
--		init_page_count(virt_to_page(p));
--		free_page(p);
--		totalram_pages++;
+-			if (ioapic_res != NULL) {
+-				ioapic_res->start = ioapic_phys;
+-				ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
+-				ioapic_res++;
+-			}
+-		}
 -	}
--	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
 -}
--#endif
 -
-diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c
-deleted file mode 100644
-index 535304e..0000000
---- a/arch/sh64/mm/ioremap.c
-+++ /dev/null
-@@ -1,388 +0,0 @@
 -/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/mm/ioremap.c
-- *
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- * Copyright (C) 2003, 2004  Paul Mundt
-- *
-- * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
-- * derived from arch/i386/mm/ioremap.c .
+- * This function sets up the local APIC timer, with a timeout of
+- * 'clocks' APIC bus clock. During calibration we actually call
+- * this function twice on the boot CPU, once with a bogus timeout
+- * value, second time for real. The other (noncalibrating) CPUs
+- * call this function only once, with the real, calibrated value.
 - *
-- *   (C) Copyright 1995 1996 Linus Torvalds
-- */
--#include <linux/kernel.h>
--#include <linux/slab.h>
--#include <linux/vmalloc.h>
--#include <linux/sched.h>
--#include <linux/string.h>
--#include <linux/io.h>
--#include <linux/ioport.h>
--#include <linux/bootmem.h>
--#include <linux/proc_fs.h>
--#include <linux/module.h>
--#include <asm/pgalloc.h>
--#include <asm/tlbflush.h>
--
--static void shmedia_mapioaddr(unsigned long, unsigned long);
--static unsigned long shmedia_ioremap(struct resource *, u32, int);
--
--/*
-- * Generic mapping function (not visible outside):
+- * We do reads before writes even if unnecessary, to get around the
+- * P5 APIC double write bug.
 - */
 -
--/*
-- * Remap an arbitrary physical address space into the kernel virtual
-- * address space. Needed when the kernel wants to access high addresses
-- * directly.
-- *
-- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-- * have to convert them into an offset in a page-aligned mapping, but the
-- * caller shouldn't need to know that small detail.
-- */
--void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+-static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
 -{
--	void * addr;
--	struct vm_struct * area;
--	unsigned long offset, last_addr;
--	pgprot_t pgprot;
+-	unsigned int lvtt_value, tmp_value;
 -
--	/* Don't allow wraparound or zero size */
--	last_addr = phys_addr + size - 1;
--	if (!size || last_addr < phys_addr)
--		return NULL;
+-	lvtt_value = LOCAL_TIMER_VECTOR;
+-	if (!oneshot)
+-		lvtt_value |= APIC_LVT_TIMER_PERIODIC;
+-	if (!irqen)
+-		lvtt_value |= APIC_LVT_MASKED;
 -
--	pgprot = __pgprot(_PAGE_PRESENT  | _PAGE_READ   |
--			  _PAGE_WRITE    | _PAGE_DIRTY  |
--			  _PAGE_ACCESSED | _PAGE_SHARED | flags);
+-	apic_write(APIC_LVTT, lvtt_value);
 -
 -	/*
--	 * Mappings have to be page-aligned
+-	 * Divide PICLK by 16
 -	 */
--	offset = phys_addr & ~PAGE_MASK;
--	phys_addr &= PAGE_MASK;
--	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+-	tmp_value = apic_read(APIC_TDCR);
+-	apic_write(APIC_TDCR, (tmp_value
+-				& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
+-				| APIC_TDR_DIV_16);
 -
--	/*
--	 * Ok, go for it..
--	 */
--	area = get_vm_area(size, VM_IOREMAP);
--	pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
--	if (!area)
--		return NULL;
--	area->phys_addr = phys_addr;
--	addr = area->addr;
--	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
--			       phys_addr, pgprot)) {
--		vunmap(addr);
--		return NULL;
--	}
--	return (void *) (offset + (char *)addr);
+-	if (!oneshot)
+-		apic_write(APIC_TMICT, clocks);
 -}
--EXPORT_SYMBOL(__ioremap);
 -
--void iounmap(void *addr)
+-static void setup_APIC_timer(void)
 -{
--	struct vm_struct *area;
+-	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
 -
--	vfree((void *) (PAGE_MASK & (unsigned long) addr));
--	area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
--	if (!area) {
--		printk(KERN_ERR "iounmap: bad address %p\n", addr);
--		return;
--	}
+-	memcpy(levt, &lapic_clockevent, sizeof(*levt));
+-	levt->cpumask = cpumask_of_cpu(smp_processor_id());
 -
--	kfree(area);
+-	clockevents_register_device(levt);
 -}
--EXPORT_SYMBOL(iounmap);
--
--static struct resource shmedia_iomap = {
--	.name	= "shmedia_iomap",
--	.start	= IOBASE_VADDR + PAGE_SIZE,
--	.end	= IOBASE_END - 1,
--};
--
--static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
--static void shmedia_unmapioaddr(unsigned long vaddr);
--static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
 -
 -/*
-- * We have the same problem as the SPARC, so lets have the same comment:
-- * Our mini-allocator...
-- * Boy this is gross! We need it because we must map I/O for
-- * timers and interrupt controller before the kmalloc is available.
+- * In this function we calibrate APIC bus clocks to the external
+- * timer. Unfortunately we cannot use jiffies and the timer irq
+- * to calibrate, since some later bootup code depends on getting
+- * the first irq? Ugh.
+- *
+- * We want to do the calibration only once since we
+- * want to have local timer irqs syncron. CPUs connected
+- * by the same APIC bus have the very same bus frequency.
+- * And we want to have irqs off anyways, no accidental
+- * APIC irq that way.
 - */
 -
--#define XNMLN  15
--#define XNRES  10
--
--struct xresource {
--	struct resource xres;   /* Must be first */
--	int xflag;              /* 1 == used */
--	char xname[XNMLN+1];
--};
--
--static struct xresource xresv[XNRES];
--
--static struct xresource *xres_alloc(void)
--{
--        struct xresource *xrp;
--        int n;
--
--        xrp = xresv;
--        for (n = 0; n < XNRES; n++) {
--                if (xrp->xflag == 0) {
--                        xrp->xflag = 1;
--                        return xrp;
--                }
--                xrp++;
--        }
--        return NULL;
--}
--
--static void xres_free(struct xresource *xrp)
--{
--	xrp->xflag = 0;
--}
--
--static struct resource *shmedia_find_resource(struct resource *root,
--					      unsigned long vaddr)
--{
--	struct resource *res;
--
--	for (res = root->child; res; res = res->sibling)
--		if (res->start <= vaddr && res->end >= vaddr)
--			return res;
--
--	return NULL;
--}
--
--static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
--				      const char *name)
--{
--        static int printed_full = 0;
--        struct xresource *xres;
--        struct resource *res;
--        char *tack;
--        int tlen;
--
--        if (name == NULL) name = "???";
--
--        if ((xres = xres_alloc()) != 0) {
--                tack = xres->xname;
--                res = &xres->xres;
--        } else {
--                if (!printed_full) {
--                        printk("%s: done with statics, switching to kmalloc\n",
--			       __FUNCTION__);
--                        printed_full = 1;
--                }
--                tlen = strlen(name);
--                tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
--                if (!tack)
--			return -ENOMEM;
--                memset(tack, 0, sizeof(struct resource));
--                res = (struct resource *) tack;
--                tack += sizeof (struct resource);
--        }
--
--        strncpy(tack, name, XNMLN);
--        tack[XNMLN] = 0;
--        res->name = tack;
--
--        return shmedia_ioremap(res, phys, size);
--}
+-#define TICK_COUNT 100000000
 -
--static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
+-static void __init calibrate_APIC_clock(void)
 -{
--        unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
--	unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
--        unsigned long va;
--        unsigned int psz;
--
--        if (allocate_resource(&shmedia_iomap, res, round_sz,
--			      shmedia_iomap.start, shmedia_iomap.end,
--			      PAGE_SIZE, NULL, NULL) != 0) {
--                panic("alloc_io_res(%s): cannot occupy\n",
--                    (res->name != NULL)? res->name: "???");
--        }
--
--        va = res->start;
--        pa &= PAGE_MASK;
--
--	psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
--
--	/* log at boot time ... */
--	printk("mapioaddr: %6s  [%2d page%s]  va 0x%08lx   pa 0x%08x\n",
--	       ((res->name != NULL) ? res->name : "???"),
--	       psz, psz == 1 ? " " : "s", va, pa);
--
--        for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
--                shmedia_mapioaddr(pa, va);
--                va += PAGE_SIZE;
--                pa += PAGE_SIZE;
--        }
+-	unsigned apic, apic_start;
+-	unsigned long tsc, tsc_start;
+-	int result;
 -
--        res->start += offset;
--        res->end = res->start + sz - 1;         /* not strictly necessary.. */
+-	local_irq_disable();
 -
--        return res->start;
--}
+-	/*
+-	 * Put whatever arbitrary (but long enough) timeout
+-	 * value into the APIC clock, we just want to get the
+-	 * counter running for calibration.
+-	 *
+-	 * No interrupt enable !
+-	 */
+-	__setup_APIC_LVTT(250000000, 0, 0);
 -
--static void shmedia_free_io(struct resource *res)
--{
--	unsigned long len = res->end - res->start + 1;
+-	apic_start = apic_read(APIC_TMCCT);
+-#ifdef CONFIG_X86_PM_TIMER
+-	if (apic_calibrate_pmtmr && pmtmr_ioport) {
+-		pmtimer_wait(5000);  /* 5ms wait */
+-		apic = apic_read(APIC_TMCCT);
+-		result = (apic_start - apic) * 1000L / 5;
+-	} else
+-#endif
+-	{
+-		rdtscll(tsc_start);
 -
--	BUG_ON((len & (PAGE_SIZE - 1)) != 0);
+-		do {
+-			apic = apic_read(APIC_TMCCT);
+-			rdtscll(tsc);
+-		} while ((tsc - tsc_start) < TICK_COUNT &&
+-				(apic_start - apic) < TICK_COUNT);
 -
--	while (len) {
--		len -= PAGE_SIZE;
--		shmedia_unmapioaddr(res->start + len);
+-		result = (apic_start - apic) * 1000L * tsc_khz /
+-					(tsc - tsc_start);
 -	}
 -
--	release_resource(res);
--}
+-	local_irq_enable();
 -
--static __init_refok void *sh64_get_page(void)
--{
--	extern int after_bootmem;
--	void *page;
+-	printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
 -
--	if (after_bootmem) {
--		page = (void *)get_zeroed_page(GFP_ATOMIC);
--	} else {
--		page = alloc_bootmem_pages(PAGE_SIZE);
--	}
+-	printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
+-		result / 1000 / 1000, result / 1000 % 1000);
 -
--	if (!page || ((unsigned long)page & ~PAGE_MASK))
--		panic("sh64_get_page: Out of memory already?\n");
+-	/* Calculate the scaled math multiplication factor */
+-	lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
+-	lapic_clockevent.max_delta_ns =
+-		clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+-	lapic_clockevent.min_delta_ns =
+-		clockevent_delta2ns(0xF, &lapic_clockevent);
 -
--	return page;
+-	calibration_result = result / HZ;
 -}
 -
--static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
+-void __init setup_boot_APIC_clock (void)
 -{
--	pgd_t *pgdp;
--	pmd_t *pmdp;
--	pte_t *ptep, pte;
--	pgprot_t prot;
--	unsigned long flags = 1; /* 1 = CB0-1 device */
--
--	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);
--
--	pgdp = pgd_offset_k(va);
--	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
--		pmdp = (pmd_t *)sh64_get_page();
--		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
--	}
--
--	pmdp = pmd_offset(pgdp, va);
--	if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
--		ptep = (pte_t *)sh64_get_page();
--		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
+-	/*
+-	 * The local apic timer can be disabled via the kernel commandline.
+-	 * Register the lapic timer as a dummy clock event source on SMP
+-	 * systems, so the broadcast mechanism is used. On UP systems simply
+-	 * ignore it.
+-	 */
+-	if (disable_apic_timer) {
+-		printk(KERN_INFO "Disabling APIC timer\n");
+-		/* No broadcast on UP ! */
+-		if (num_possible_cpus() > 1)
+-			setup_APIC_timer();
+-		return;
 -	}
 -
--	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
--			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);
--
--	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
--	ptep = pte_offset_kernel(pmdp, va);
--
--	if (!pte_none(*ptep) &&
--	    pte_val(*ptep) != pte_val(pte))
--		pte_ERROR(*ptep);
+-	printk(KERN_INFO "Using local APIC timer interrupts.\n");
+-	calibrate_APIC_clock();
 -
--	set_pte(ptep, pte);
+-	/*
+-	 * If nmi_watchdog is set to IO_APIC, we need the
+-	 * PIT/HPET going.  Otherwise register lapic as a dummy
+-	 * device.
+-	 */
+-	if (nmi_watchdog != NMI_IO_APIC)
+-		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+-	else
+-		printk(KERN_WARNING "APIC timer registered as dummy,"
+-		       " due to nmi_watchdog=1!\n");
 -
--	flush_tlb_kernel_range(va, PAGE_SIZE);
+-	setup_APIC_timer();
 -}
 -
--static void shmedia_unmapioaddr(unsigned long vaddr)
+-/*
+- * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
+- * C1E flag only in the secondary CPU, so when we detect the wreckage
+- * we already have enabled the boot CPU local apic timer. Check, if
+- * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
+- * set the DUMMY flag again and force the broadcast mode in the
+- * clockevents layer.
+- */
+-void __cpuinit check_boot_apic_timer_broadcast(void)
 -{
--	pgd_t *pgdp;
--	pmd_t *pmdp;
--	pte_t *ptep;
--
--	pgdp = pgd_offset_k(vaddr);
--	pmdp = pmd_offset(pgdp, vaddr);
--
--	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
--		return;
--
--	ptep = pte_offset_kernel(pmdp, vaddr);
--
--	if (pte_none(*ptep) || !pte_present(*ptep))
+-	if (!disable_apic_timer ||
+-	    (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
 -		return;
 -
--	clear_page((void *)ptep);
--	pte_clear(&init_mm, vaddr, ptep);
--}
--
--unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
--{
--	if (size < PAGE_SIZE)
--		size = PAGE_SIZE;
+-	printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
+-	lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
 -
--	return shmedia_alloc_io(phys, size, name);
+-	local_irq_enable();
+-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
+-	local_irq_disable();
 -}
 -
--void onchip_unmap(unsigned long vaddr)
+-void __cpuinit setup_secondary_APIC_clock(void)
 -{
--	struct resource *res;
--	unsigned int psz;
--
--	res = shmedia_find_resource(&shmedia_iomap, vaddr);
--	if (!res) {
--		printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
--		       __FUNCTION__, vaddr);
--		return;
--	}
--
--        psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
--
--        printk(KERN_DEBUG "unmapioaddr: %6s  [%2d page%s] freed\n",
--	       res->name, psz, psz == 1 ? " " : "s");
--
--	shmedia_free_io(res);
--
--	if ((char *)res >= (char *)xresv &&
--	    (char *)res <  (char *)&xresv[XNRES]) {
--		xres_free((struct xresource *)res);
--	} else {
--		kfree(res);
--	}
+-	check_boot_apic_timer_broadcast();
+-	setup_APIC_timer();
 -}
 -
--#ifdef CONFIG_PROC_FS
--static int
--ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
--		  void *data)
+-int setup_profiling_timer(unsigned int multiplier)
 -{
--	char *p = buf, *e = buf + length;
--	struct resource *r;
--	const char *nm;
--
--	for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
--		if (p + 32 >= e)        /* Better than nothing */
--			break;
--		if ((nm = r->name) == 0) nm = "???";
--		p += sprintf(p, "%08lx-%08lx: %s\n",
--			     (unsigned long)r->start,
--			     (unsigned long)r->end, nm);
--	}
--
--	return p-buf;
+-	return -EINVAL;
 -}
--#endif /* CONFIG_PROC_FS */
 -
--static int __init register_proc_onchip(void)
+-void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
+-			     unsigned char msg_type, unsigned char mask)
 -{
--#ifdef CONFIG_PROC_FS
--	create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
--#endif
--	return 0;
+-	unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
+-	unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
+-	apic_write(reg, v);
 -}
 -
--__initcall(register_proc_onchip);
-diff --git a/arch/sh64/mm/tlb.c b/arch/sh64/mm/tlb.c
-deleted file mode 100644
-index d517e7d..0000000
---- a/arch/sh64/mm/tlb.c
-+++ /dev/null
-@@ -1,166 +0,0 @@
 -/*
-- * arch/sh64/mm/tlb.c
-- *
-- * Copyright (C) 2003  Paul Mundt <lethal at linux-sh.org>
-- * Copyright (C) 2003  Richard Curnow <richard.curnow at superh.com>
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- */
--#include <linux/mm.h>
--#include <linux/init.h>
--#include <asm/page.h>
--#include <asm/tlb.h>
--#include <asm/mmu_context.h>
--
--/**
-- * sh64_tlb_init
-- *
-- * Perform initial setup for the DTLB and ITLB.
-- */
--int __init sh64_tlb_init(void)
--{
--	/* Assign some sane DTLB defaults */
--	cpu_data->dtlb.entries	= 64;
--	cpu_data->dtlb.step	= 0x10;
--
--	cpu_data->dtlb.first	= DTLB_FIXED | cpu_data->dtlb.step;
--	cpu_data->dtlb.next	= cpu_data->dtlb.first;
--
--	cpu_data->dtlb.last	= DTLB_FIXED |
--				  ((cpu_data->dtlb.entries - 1) *
--				   cpu_data->dtlb.step);
--
--	/* And again for the ITLB */
--	cpu_data->itlb.entries	= 64;
--	cpu_data->itlb.step	= 0x10;
--
--	cpu_data->itlb.first	= ITLB_FIXED | cpu_data->itlb.step;
--	cpu_data->itlb.next	= cpu_data->itlb.first;
--	cpu_data->itlb.last	= ITLB_FIXED |
--				  ((cpu_data->itlb.entries - 1) *
--				   cpu_data->itlb.step);
--
--	return 0;
--}
--
--/**
-- * sh64_next_free_dtlb_entry
-- *
-- * Find the next available DTLB entry
-- */
--unsigned long long sh64_next_free_dtlb_entry(void)
--{
--	return cpu_data->dtlb.next;
--}
--
--/**
-- * sh64_get_wired_dtlb_entry
+- * Local timer interrupt handler. It does both profiling and
+- * process statistics/rescheduling.
 - *
-- * Allocate a wired (locked-in) entry in the DTLB
+- * We do profiling in every local tick, statistics/rescheduling
+- * happen only every 'profiling multiplier' ticks. The default
+- * multiplier is 1 and it can be changed by writing the new multiplier
+- * value into /proc/profile.
 - */
--unsigned long long sh64_get_wired_dtlb_entry(void)
--{
--	unsigned long long entry = sh64_next_free_dtlb_entry();
--
--	cpu_data->dtlb.first += cpu_data->dtlb.step;
--	cpu_data->dtlb.next  += cpu_data->dtlb.step;
--
--	return entry;
--}
 -
--/**
-- * sh64_put_wired_dtlb_entry
-- *
-- * @entry:	Address of TLB slot.
-- *
-- * Free a wired (locked-in) entry in the DTLB.
-- *
-- * Works like a stack, last one to allocate must be first one to free.
-- */
--int sh64_put_wired_dtlb_entry(unsigned long long entry)
+-void smp_local_timer_interrupt(void)
 -{
--	__flush_tlb_slot(entry);
+-	int cpu = smp_processor_id();
+-	struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
 -
 -	/*
--	 * We don't do any particularly useful tracking of wired entries,
--	 * so this approach works like a stack .. last one to be allocated
--	 * has to be the first one to be freed.
+-	 * Normally we should not be here till LAPIC has been initialized but
+-	 * in some cases like kdump, its possible that there is a pending LAPIC
+-	 * timer interrupt from previous kernel's context and is delivered in
+-	 * new kernel the moment interrupts are enabled.
 -	 *
--	 * We could potentially load wired entries into a list and work on
--	 * rebalancing the list periodically (which also entails moving the
--	 * contents of a TLB entry) .. though I have a feeling that this is
--	 * more trouble than it's worth.
--	 */
--
--	/*
--	 * Entry must be valid .. we don't want any ITLB addresses!
+-	 * Interrupts are enabled early and LAPIC is setup much later, hence
+-	 * its possible that when we get here evt->event_handler is NULL.
+-	 * Check for event_handler being NULL and discard the interrupt as
+-	 * spurious.
 -	 */
--	if (entry <= DTLB_FIXED)
--		return -EINVAL;
+-	if (!evt->event_handler) {
+-		printk(KERN_WARNING
+-		       "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
+-		/* Switch it off */
+-		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
+-		return;
+-	}
 -
 -	/*
--	 * Next, check if we're within range to be freed. (ie, must be the
--	 * entry beneath the first 'free' entry!
+-	 * the NMI deadlock-detector uses this.
 -	 */
--	if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
--		return -EINVAL;
--
--	/* If we are, then bring this entry back into the list */
--	cpu_data->dtlb.first	-= cpu_data->dtlb.step;
--	cpu_data->dtlb.next	= entry;
+-	add_pda(apic_timer_irqs, 1);
 -
--	return 0;
+-	evt->event_handler(evt);
 -}
 -
--/**
-- * sh64_setup_tlb_slot
-- *
-- * @config_addr:	Address of TLB slot.
-- * @eaddr:		Virtual address.
-- * @asid:		Address Space Identifier.
-- * @paddr:		Physical address.
+-/*
+- * Local APIC timer interrupt. This is the most natural way for doing
+- * local interrupts, but local timer interrupts can be emulated by
+- * broadcast interrupts too. [in case the hw doesn't support APIC timers]
 - *
-- * Load up a virtual<->physical translation for @eaddr<->@paddr in the
-- * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
+- * [ if a single-CPU system runs an SMP kernel then we call the local
+- *   interrupt as well. Thus we cannot inline the local irq ... ]
 - */
--inline void sh64_setup_tlb_slot(unsigned long long config_addr,
--				unsigned long eaddr,
--				unsigned long asid,
--				unsigned long paddr)
+-void smp_apic_timer_interrupt(struct pt_regs *regs)
 -{
--	unsigned long long pteh, ptel;
--
--	/* Sign extension */
--#if (NEFF == 32)
--	pteh = (unsigned long long)(signed long long)(signed long) eaddr;
--#else
--#error "Can't sign extend more than 32 bits yet"
--#endif
--	pteh &= PAGE_MASK;
--	pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
--#if (NEFF == 32)
--	ptel = (unsigned long long)(signed long long)(signed long) paddr;
--#else
--#error "Can't sign extend more than 32 bits yet"
--#endif
--	ptel &= PAGE_MASK;
--	ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
+-	struct pt_regs *old_regs = set_irq_regs(regs);
 -
--	asm volatile("putcfg %0, 1, %1\n\t"
--			"putcfg %0, 0, %2\n"
--			: : "r" (config_addr), "r" (ptel), "r" (pteh));
+-	/*
+-	 * NOTE! We'd better ACK the irq immediately,
+-	 * because timer handling can be slow.
+-	 */
+-	ack_APIC_irq();
+-	/*
+-	 * update_process_times() expects us to have done irq_enter().
+-	 * Besides, if we don't timer interrupts ignore the global
+-	 * interrupt lock, which is the WrongThing (tm) to do.
+-	 */
+-	exit_idle();
+-	irq_enter();
+-	smp_local_timer_interrupt();
+-	irq_exit();
+-	set_irq_regs(old_regs);
 -}
 -
--/**
-- * sh64_teardown_tlb_slot
-- *
-- * @config_addr:	Address of TLB slot.
-- *
-- * Teardown any existing mapping in the TLB slot @config_addr.
-- */
--inline void sh64_teardown_tlb_slot(unsigned long long config_addr)
--	__attribute__ ((alias("__flush_tlb_slot")));
--
-diff --git a/arch/sh64/mm/tlbmiss.c b/arch/sh64/mm/tlbmiss.c
-deleted file mode 100644
-index b767d6c..0000000
---- a/arch/sh64/mm/tlbmiss.c
-+++ /dev/null
-@@ -1,279 +0,0 @@
--/*
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- *
-- * arch/sh64/mm/tlbmiss.c
-- *
-- * Original code from fault.c
-- * Copyright (C) 2000, 2001  Paolo Alberelli
-- *
-- * Fast PTE->TLB refill path
-- * Copyright (C) 2003 Richard.Curnow at superh.com
-- *
-- * IMPORTANT NOTES :
-- * The do_fast_page_fault function is called from a context in entry.S where very few registers
-- * have been saved.  In particular, the code in this file must be compiled not to use ANY
-- * caller-save registers that are not part of the restricted save set.  Also, it means that
-- * code in this file must not make calls to functions elsewhere in the kernel, or else the
-- * excepting context will see corruption in its caller-save registers.  Plus, the entry.S save
-- * area is non-reentrant, so this code has to run with SR.BL==1, i.e. no interrupts taken inside
-- * it and panic on any exception.
-- *
+ /*
+  * apic_is_clustered_box() -- Check if we can expect good TSC
+  *
+@@ -1103,21 +1180,34 @@ __cpuinit int apic_is_clustered_box(void)
+ {
+ 	int i, clusters, zeros;
+ 	unsigned id;
++	u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
+ 	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
+ 
+ 	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
+ 
+ 	for (i = 0; i < NR_CPUS; i++) {
+-		id = bios_cpu_apicid[i];
++		/* are we being called early in kernel startup? */
++		if (bios_cpu_apicid) {
++			id = bios_cpu_apicid[i];
++		}
++		else if (i < nr_cpu_ids) {
++			if (cpu_present(i))
++				id = per_cpu(x86_bios_cpu_apicid, i);
++			else
++				continue;
++		}
++		else
++			break;
++
+ 		if (id != BAD_APICID)
+ 			__set_bit(APIC_CLUSTERID(id), clustermap);
+ 	}
+ 
+ 	/* Problem:  Partially populated chassis may not have CPUs in some of
+ 	 * the APIC clusters they have been allocated.  Only present CPUs have
+-	 * bios_cpu_apicid entries, thus causing zeroes in the bitmap.  Since
+-	 * clusters are allocated sequentially, count zeros only if they are
+-	 * bounded by ones.
++	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
++	 * Since clusters are allocated sequentially, count zeros only if
++	 * they are bounded by ones.
+ 	 */
+ 	clusters = 0;
+ 	zeros = 0;
+@@ -1138,96 +1228,33 @@ __cpuinit int apic_is_clustered_box(void)
+ }
+ 
+ /*
+- * This interrupt should _never_ happen with our APIC/SMP architecture
 - */
--
--#include <linux/signal.h>
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/errno.h>
--#include <linux/string.h>
--#include <linux/types.h>
--#include <linux/ptrace.h>
--#include <linux/mman.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/interrupt.h>
--
--#include <asm/system.h>
--#include <asm/tlb.h>
--#include <asm/io.h>
--#include <asm/uaccess.h>
--#include <asm/pgalloc.h>
--#include <asm/mmu_context.h>
--#include <asm/registers.h>		/* required by inline asm statements */
--
--/* Callable from fault.c, so not static */
--inline void __do_tlb_refill(unsigned long address,
--                            unsigned long long is_text_not_data, pte_t *pte)
+-asmlinkage void smp_spurious_interrupt(void)
 -{
--	unsigned long long ptel;
--	unsigned long long pteh=0;
--	struct tlb_info *tlbp;
--	unsigned long long next;
--
--	/* Get PTEL first */
--	ptel = pte_val(*pte);
--
+-	unsigned int v;
+-	exit_idle();
+-	irq_enter();
 -	/*
--	 * Set PTEH register
+-	 * Check if this really is a spurious interrupt and ACK it
+-	 * if it is a vectored one.  Just in case...
+-	 * Spurious interrupts should not be ACKed.
 -	 */
--	pteh = address & MMU_VPN_MASK;
--
--	/* Sign extend based on neff. */
--#if (NEFF == 32)
--	/* Faster sign extension */
--	pteh = (unsigned long long)(signed long long)(signed long)pteh;
--#else
--	/* General case */
--	pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
--#endif
--
--	/* Set the ASID. */
--	pteh |= get_asid() << PTEH_ASID_SHIFT;
--	pteh |= PTEH_VALID;
--
--	/* Set PTEL register, set_pte has performed the sign extension */
--	ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
--
--	tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
--	next = tlbp->next;
--	__flush_tlb_slot(next);
--	asm volatile ("putcfg %0,1,%2\n\n\t"
--		      "putcfg %0,0,%1\n"
--		      :  : "r" (next), "r" (pteh), "r" (ptel) );
--
--	next += TLB_STEP;
--	if (next > tlbp->last) next = tlbp->first;
--	tlbp->next = next;
+-	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
+-	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
+-		ack_APIC_irq();
 -
+-	add_pda(irq_spurious_count, 1);
+-	irq_exit();
 -}
 -
--static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags,
--                                unsigned long long textaccess,
--				unsigned long address)
--{
--	pgd_t *dir;
--	pmd_t *pmd;
--	static pte_t *pte;
--	pte_t entry;
--
--	dir = pgd_offset_k(address);
--	pmd = pmd_offset(dir, address);
--
--	if (pmd_none(*pmd)) {
--		return 0;
--	}
--
--	if (pmd_bad(*pmd)) {
--		pmd_clear(pmd);
--		return 0;
--	}
--
--	pte = pte_offset_kernel(pmd, address);
--	entry = *pte;
--
--	if (pte_none(entry) || !pte_present(entry)) {
--		return 0;
--	}
--
--	if ((pte_val(entry) & protection_flags) != protection_flags) {
--		return 0;
--	}
--
--        __do_tlb_refill(address, textaccess, pte);
--
--	return 1;
--}
+-/*
+- * This interrupt should never happen with our APIC/SMP architecture
++ * APIC command line parameters
+  */
 -
--static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags,
--			unsigned long long textaccess,
--			unsigned long address)
+-asmlinkage void smp_error_interrupt(void)
 -{
--	pgd_t *dir;
--	pmd_t *pmd;
--	pte_t *pte;
--	pte_t entry;
--
--	/* NB. The PGD currently only contains a single entry - there is no
--	   page table tree stored for the top half of the address space since
--	   virtual pages in that region should never be mapped in user mode.
--	   (In kernel mode, the only things in that region are the 512Mb super
--	   page (locked in), and vmalloc (modules) +  I/O device pages (handled
--	   by handle_vmalloc_fault), so no PGD for the upper half is required
--	   by kernel mode either).
--
--	   See how mm->pgd is allocated and initialised in pgd_alloc to see why
--	   the next test is necessary.  - RPC */
--	if (address >= (unsigned long) TASK_SIZE) {
--		/* upper half - never has page table entries. */
--		return 0;
--	}
--	dir = pgd_offset(mm, address);
--	if (pgd_none(*dir)) {
--		return 0;
--	}
--	if (!pgd_present(*dir)) {
--		return 0;
--	}
--
--	pmd = pmd_offset(dir, address);
--	if (pmd_none(*pmd)) {
--		return 0;
--	}
--	if (!pmd_present(*pmd)) {
--		return 0;
--	}
--	pte = pte_offset_kernel(pmd, address);
--	entry = *pte;
--	if (pte_none(entry)) {
--		return 0;
--	}
--	if (!pte_present(entry)) {
--		return 0;
--	}
--
--	/* If the page doesn't have sufficient protection bits set to service the
--	   kind of fault being handled, there's not much point doing the TLB refill.
--	   Punt the fault to the general handler. */
--	if ((pte_val(entry) & protection_flags) != protection_flags) {
--		return 0;
--	}
+-	unsigned int v, v1;
 -
--        __do_tlb_refill(address, textaccess, pte);
+-	exit_idle();
+-	irq_enter();
+-	/* First tickle the hardware, only then report what went on. -- REW */
+-	v = apic_read(APIC_ESR);
+-	apic_write(APIC_ESR, 0);
+-	v1 = apic_read(APIC_ESR);
+-	ack_APIC_irq();
+-	atomic_inc(&irq_err_count);
 -
--	return 1;
+-	/* Here is what the APIC error bits mean:
+-	   0: Send CS error
+-	   1: Receive CS error
+-	   2: Send accept error
+-	   3: Receive accept error
+-	   4: Reserved
+-	   5: Send illegal vector
+-	   6: Received illegal vector
+-	   7: Illegal register address
+-	*/
+-	printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
+-		smp_processor_id(), v , v1);
+-	irq_exit();
 -}
 -
--/* Put all this information into one structure so that everything is just arithmetic
--   relative to a single base address.  This reduces the number of movi/shori pairs needed
--   just to load addresses of static data. */
--struct expevt_lookup {
--	unsigned short protection_flags[8];
--	unsigned char  is_text_access[8];
--	unsigned char  is_write_access[8];
--};
--
--#define PRU (1<<9)
--#define PRW (1<<8)
--#define PRX (1<<7)
--#define PRR (1<<6)
--
--#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
--#define YOUNG (_PAGE_ACCESSED)
--
--/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
--   the fault happened in user mode or privileged mode. */
--static struct expevt_lookup expevt_lookup_table = {
--	.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
--	.is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
--};
+-int disable_apic;
 -
 -/*
--   This routine handles page faults that can be serviced just by refilling a
--   TLB entry from an existing page table entry.  (This case represents a very
--   large majority of page faults.) Return 1 if the fault was successfully
--   handled.  Return 0 if the fault could not be handled.  (This leads into the
--   general fault handling in fault.c which deals with mapping file-backed
--   pages, stack growth, segmentation faults, swapping etc etc)
+- * This initializes the IO-APIC and APIC hardware if this is
+- * a UP kernel.
 - */
--asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
--			          unsigned long address)
--{
--	struct task_struct *tsk;
--	struct mm_struct *mm;
--	unsigned long long textaccess;
--	unsigned long long protection_flags;
--	unsigned long long index;
--	unsigned long long expevt4;
--
--	/* The next few lines implement a way of hashing EXPEVT into a small array index
--	   which can be used to lookup parameters specific to the type of TLBMISS being
--	   handled.  Note:
--	   ITLBMISS has EXPEVT==0xa40
--	   RTLBMISS has EXPEVT==0x040
--	   WTLBMISS has EXPEVT==0x060
--	*/
--
--	expevt4 = (expevt >> 4);
--	/* TODO : xor ssr_md into this expression too.  Then we can check that PRU is set
--	   when it needs to be. */
--	index = expevt4 ^ (expevt4 >> 5);
--	index &= 7;
--	protection_flags = expevt_lookup_table.protection_flags[index];
--	textaccess       = expevt_lookup_table.is_text_access[index];
--
--#ifdef CONFIG_SH64_PROC_TLB
--	++calls_to_do_fast_page_fault;
--#endif
--
--	/* SIM
--	 * Note this is now called with interrupts still disabled
--	 * This is to cope with being called for a missing IO port
--	 * address with interrupts disabled. This should be fixed as
--	 * soon as we have a better 'fast path' miss handler.
--	 *
--	 * Plus take care how you try and debug this stuff.
--	 * For example, writing debug data to a port which you
--	 * have just faulted on is not going to work.
--	 */
+-int __init APIC_init_uniprocessor (void)
++static int __init apic_set_verbosity(char *str)
+ {
+-	if (disable_apic) {
+-		printk(KERN_INFO "Apic disabled\n");
+-		return -1;
++	if (str == NULL)  {
++		skip_ioapic_setup = 0;
++		ioapic_force = 1;
++		return 0;
+ 	}
+-	if (!cpu_has_apic) {
+-		disable_apic = 1;
+-		printk(KERN_INFO "Apic disabled by BIOS\n");
+-		return -1;
++	if (strcmp("debug", str) == 0)
++		apic_verbosity = APIC_DEBUG;
++	else if (strcmp("verbose", str) == 0)
++		apic_verbosity = APIC_VERBOSE;
++	else {
++		printk(KERN_WARNING "APIC Verbosity level %s not recognised"
++				" use apic=verbose or apic=debug\n", str);
++		return -EINVAL;
+ 	}
+ 
+-	verify_local_APIC();
 -
--	tsk = current;
--	mm = tsk->mm;
+-	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
+-	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
 -
--	if ((address >= VMALLOC_START && address < VMALLOC_END) ||
--	    (address >= IOBASE_VADDR  && address < IOBASE_END)) {
--		if (ssr_md) {
--			/* Process-contexts can never have this address range mapped */
--			if (handle_vmalloc_fault(mm, protection_flags, textaccess, address)) {
--				return 1;
--			}
--		}
--	} else if (!in_interrupt() && mm) {
--		if (handle_tlbmiss(mm, protection_flags, textaccess, address)) {
--			return 1;
--		}
--	}
+-	setup_local_APIC();
 -
--	return 0;
--}
+-	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
+-		setup_IO_APIC();
+-	else
+-		nr_ioapics = 0;
+-	setup_boot_APIC_clock();
+-	check_nmi_watchdog();
+ 	return 0;
+ }
++early_param("apic", apic_set_verbosity);
+ 
+ static __init int setup_disableapic(char *str)
+ {
+ 	disable_apic = 1;
+-	clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++	clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
+ 	return 0;
+ }
+ early_param("disableapic", setup_disableapic);
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index af045ca..d4438ef 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -227,6 +227,7 @@
+ #include <linux/dmi.h>
+ #include <linux/suspend.h>
+ #include <linux/kthread.h>
++#include <linux/jiffies.h>
+ 
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -235,8 +236,6 @@
+ #include <asm/paravirt.h>
+ #include <asm/reboot.h>
+ 
+-#include "io_ports.h"
 -
-diff --git a/arch/sh64/oprofile/Makefile b/arch/sh64/oprofile/Makefile
-deleted file mode 100644
-index 11a451f..0000000
---- a/arch/sh64/oprofile/Makefile
-+++ /dev/null
-@@ -1,12 +0,0 @@
--obj-$(CONFIG_OPROFILE) += oprofile.o
+ #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
+ extern int (*console_blank_hook)(int);
+ #endif
+@@ -324,7 +323,7 @@ extern int (*console_blank_hook)(int);
+ /*
+  * Ignore suspend events for this amount of time after a resume
+  */
+-#define DEFAULT_BOUNCE_INTERVAL		(3 * HZ)
++#define DEFAULT_BOUNCE_INTERVAL	(3 * HZ)
+ 
+ /*
+  * Maximum number of events stored
+@@ -336,7 +335,7 @@ extern int (*console_blank_hook)(int);
+  */
+ struct apm_user {
+ 	int		magic;
+-	struct apm_user *	next;
++	struct apm_user *next;
+ 	unsigned int	suser: 1;
+ 	unsigned int	writer: 1;
+ 	unsigned int	reader: 1;
+@@ -372,44 +371,44 @@ struct apm_user {
+ static struct {
+ 	unsigned long	offset;
+ 	unsigned short	segment;
+-}				apm_bios_entry;
+-static int			clock_slowed;
+-static int			idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
+-static int			idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
+-static int			set_pm_idle;
+-static int			suspends_pending;
+-static int			standbys_pending;
+-static int			ignore_sys_suspend;
+-static int			ignore_normal_resume;
+-static int			bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL;
 -
--DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
--		oprof.o cpu_buffer.o buffer_sync.o \
--		event_buffer.o oprofile_files.o \
--		oprofilefs.o oprofile_stats.o \
--		timer_int.o )
+-static int			debug __read_mostly;
+-static int			smp __read_mostly;
+-static int			apm_disabled = -1;
++} apm_bios_entry;
++static int clock_slowed;
++static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
++static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
++static int set_pm_idle;
++static int suspends_pending;
++static int standbys_pending;
++static int ignore_sys_suspend;
++static int ignore_normal_resume;
++static int bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL;
++
++static int debug __read_mostly;
++static int smp __read_mostly;
++static int apm_disabled = -1;
+ #ifdef CONFIG_SMP
+-static int			power_off;
++static int power_off;
+ #else
+-static int			power_off = 1;
++static int power_off = 1;
+ #endif
+ #ifdef CONFIG_APM_REAL_MODE_POWER_OFF
+-static int			realmode_power_off = 1;
++static int realmode_power_off = 1;
+ #else
+-static int			realmode_power_off;
++static int realmode_power_off;
+ #endif
+ #ifdef CONFIG_APM_ALLOW_INTS
+-static int			allow_ints = 1;
++static int allow_ints = 1;
+ #else
+-static int			allow_ints;
++static int allow_ints;
+ #endif
+-static int			broken_psr;
++static int broken_psr;
+ 
+ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
+ static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+-static struct apm_user *	user_list;
++static struct apm_user *user_list;
+ static DEFINE_SPINLOCK(user_list_lock);
+-static const struct desc_struct	bad_bios_desc = { 0, 0x00409200 };
++static const struct desc_struct	bad_bios_desc = { { { 0, 0x00409200 } } };
+ 
+-static const char		driver_version[] = "1.16ac";	/* no spaces */
++static const char driver_version[] = "1.16ac";	/* no spaces */
+ 
+ static struct task_struct *kapmd_task;
+ 
+@@ -417,7 +416,7 @@ static struct task_struct *kapmd_task;
+  *	APM event names taken from the APM 1.2 specification. These are
+  *	the message codes that the BIOS uses to tell us about events
+  */
+-static const char *	const apm_event_name[] = {
++static const char * const apm_event_name[] = {
+ 	"system standby",
+ 	"system suspend",
+ 	"normal resume",
+@@ -435,14 +434,14 @@ static const char *	const apm_event_name[] = {
+ 
+ typedef struct lookup_t {
+ 	int	key;
+-	char *	msg;
++	char 	*msg;
+ } lookup_t;
+ 
+ /*
+  *	The BIOS returns a set of standard error codes in AX when the
+  *	carry flag is set.
+  */
+- 
++
+ static const lookup_t error_table[] = {
+ /* N/A	{ APM_SUCCESS,		"Operation succeeded" }, */
+ 	{ APM_DISABLED,		"Power management disabled" },
+@@ -472,24 +471,25 @@ static const lookup_t error_table[] = {
+  *	Write a meaningful log entry to the kernel log in the event of
+  *	an APM error.
+  */
+- 
++
+ static void apm_error(char *str, int err)
+ {
+-	int	i;
++	int i;
+ 
+ 	for (i = 0; i < ERROR_COUNT; i++)
+-		if (error_table[i].key == err) break;
++		if (error_table[i].key == err)
++			break;
+ 	if (i < ERROR_COUNT)
+ 		printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg);
+ 	else
+ 		printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n",
+-			str, err);
++		       str, err);
+ }
+ 
+ /*
+  * Lock APM functionality to physical CPU 0
+  */
+- 
++
+ #ifdef CONFIG_SMP
+ 
+ static cpumask_t apm_save_cpus(void)
+@@ -511,7 +511,7 @@ static inline void apm_restore_cpus(cpumask_t mask)
+ /*
+  *	No CPU lockdown needed on a uniprocessor
+  */
+- 
++
+ #define apm_save_cpus()		(current->cpus_allowed)
+ #define apm_restore_cpus(x)	(void)(x)
+ 
+@@ -590,7 +590,7 @@ static inline void apm_irq_restore(unsigned long flags)
+  *	code is returned in AH (bits 8-15 of eax) and this function
+  *	returns non-zero.
+  */
+- 
++
+ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
+ 	u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi)
+ {
+@@ -602,7 +602,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
+ 	struct desc_struct	*gdt;
+ 
+ 	cpus = apm_save_cpus();
+-	
++
+ 	cpu = get_cpu();
+ 	gdt = get_cpu_gdt_table(cpu);
+ 	save_desc_40 = gdt[0x40 / 8];
+@@ -616,7 +616,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
+ 	gdt[0x40 / 8] = save_desc_40;
+ 	put_cpu();
+ 	apm_restore_cpus(cpus);
+-	
++
+ 	return *eax & 0xff;
+ }
+ 
+@@ -645,7 +645,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
+ 	struct desc_struct	*gdt;
+ 
+ 	cpus = apm_save_cpus();
+-	
++
+ 	cpu = get_cpu();
+ 	gdt = get_cpu_gdt_table(cpu);
+ 	save_desc_40 = gdt[0x40 / 8];
+@@ -680,7 +680,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
+ 
+ static int apm_driver_version(u_short *val)
+ {
+-	u32	eax;
++	u32 eax;
+ 
+ 	if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax))
+ 		return (eax >> 8) & 0xff;
+@@ -704,16 +704,16 @@ static int apm_driver_version(u_short *val)
+  *	that APM 1.2 is in use. If no messges are pending the value 0x80
+  *	is returned (No power management events pending).
+  */
+- 
++
+ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
+ {
+-	u32	eax;
+-	u32	ebx;
+-	u32	ecx;
+-	u32	dummy;
++	u32 eax;
++	u32 ebx;
++	u32 ecx;
++	u32 dummy;
+ 
+ 	if (apm_bios_call(APM_FUNC_GET_EVENT, 0, 0, &eax, &ebx, &ecx,
+-			&dummy, &dummy))
++			  &dummy, &dummy))
+ 		return (eax >> 8) & 0xff;
+ 	*event = ebx;
+ 	if (apm_info.connection_version < 0x0102)
+@@ -736,10 +736,10 @@ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
+  *	The state holds the state to transition to, which may in fact
+  *	be an acceptance of a BIOS requested state change.
+  */
+- 
++
+ static int set_power_state(u_short what, u_short state)
+ {
+-	u32	eax;
++	u32 eax;
+ 
+ 	if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax))
+ 		return (eax >> 8) & 0xff;
+@@ -752,7 +752,7 @@ static int set_power_state(u_short what, u_short state)
+  *
+  *	Transition the entire system into a new APM power state.
+  */
+- 
++
+ static int set_system_power_state(u_short state)
+ {
+ 	return set_power_state(APM_DEVICE_ALL, state);
+@@ -766,13 +766,13 @@ static int set_system_power_state(u_short state)
+  *	to handle the idle request. On a success the function returns 1
+  *	if the BIOS did clock slowing or 0 otherwise.
+  */
+- 
++
+ static int apm_do_idle(void)
+ {
+-	u32	eax;
+-	u8	ret = 0;
+-	int	idled = 0;
+-	int	polling;
++	u32 eax;
++	u8 ret = 0;
++	int idled = 0;
++	int polling;
+ 
+ 	polling = !!(current_thread_info()->status & TS_POLLING);
+ 	if (polling) {
+@@ -799,10 +799,9 @@ static int apm_do_idle(void)
+ 		/* This always fails on some SMP boards running UP kernels.
+ 		 * Only report the failure the first 5 times.
+ 		 */
+-		if (++t < 5)
+-		{
++		if (++t < 5) {
+ 			printk(KERN_DEBUG "apm_do_idle failed (%d)\n",
+-					(eax >> 8) & 0xff);
++			       (eax >> 8) & 0xff);
+ 			t = jiffies;
+ 		}
+ 		return -1;
+@@ -814,15 +813,15 @@ static int apm_do_idle(void)
+ /**
+  *	apm_do_busy	-	inform the BIOS the CPU is busy
+  *
+- *	Request that the BIOS brings the CPU back to full performance. 
++ *	Request that the BIOS brings the CPU back to full performance.
+  */
+- 
++
+ static void apm_do_busy(void)
+ {
+-	u32	dummy;
++	u32 dummy;
+ 
+ 	if (clock_slowed || ALWAYS_CALL_BUSY) {
+-		(void) apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy);
++		(void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy);
+ 		clock_slowed = 0;
+ 	}
+ }
+@@ -833,15 +832,15 @@ static void apm_do_busy(void)
+  * power management - we probably want
+  * to conserve power.
+  */
+-#define IDLE_CALC_LIMIT   (HZ * 100)
+-#define IDLE_LEAKY_MAX    16
++#define IDLE_CALC_LIMIT	(HZ * 100)
++#define IDLE_LEAKY_MAX	16
+ 
+ static void (*original_pm_idle)(void) __read_mostly;
+ 
+ /**
+  * apm_cpu_idle		-	cpu idling for APM capable Linux
+  *
+- * This is the idling function the kernel executes when APM is available. It 
++ * This is the idling function the kernel executes when APM is available. It
+  * tries to do BIOS powermanagement based on the average system idle time.
+  * Furthermore it calls the system default idle routine.
+  */
+@@ -882,7 +881,8 @@ recalc:
+ 
+ 			t = jiffies;
+ 			switch (apm_do_idle()) {
+-			case 0: apm_idle_done = 1;
++			case 0:
++				apm_idle_done = 1;
+ 				if (t != jiffies) {
+ 					if (bucket) {
+ 						bucket = IDLE_LEAKY_MAX;
+@@ -893,7 +893,8 @@ recalc:
+ 					continue;
+ 				}
+ 				break;
+-			case 1: apm_idle_done = 1;
++			case 1:
++				apm_idle_done = 1;
+ 				break;
+ 			default: /* BIOS refused */
+ 				break;
+@@ -921,10 +922,10 @@ recalc:
+  *	the SMP call on CPU0 as some systems will only honour this call
+  *	on their first cpu.
+  */
+- 
++
+ static void apm_power_off(void)
+ {
+-	unsigned char	po_bios_call[] = {
++	unsigned char po_bios_call[] = {
+ 		0xb8, 0x00, 0x10,	/* movw  $0x1000,ax  */
+ 		0x8e, 0xd0,		/* movw  ax,ss       */
+ 		0xbc, 0x00, 0xf0,	/* movw  $0xf000,sp  */
+@@ -935,13 +936,12 @@ static void apm_power_off(void)
+ 	};
+ 
+ 	/* Some bioses don't like being called from CPU != 0 */
+-	if (apm_info.realmode_power_off)
+-	{
++	if (apm_info.realmode_power_off) {
+ 		(void)apm_save_cpus();
+ 		machine_real_restart(po_bios_call, sizeof(po_bios_call));
++	} else {
++		(void)set_system_power_state(APM_STATE_OFF);
+ 	}
+-	else
+-		(void) set_system_power_state(APM_STATE_OFF);
+ }
+ 
+ #ifdef CONFIG_APM_DO_ENABLE
+@@ -950,17 +950,17 @@ static void apm_power_off(void)
+  *	apm_enable_power_management - enable BIOS APM power management
+  *	@enable: enable yes/no
+  *
+- *	Enable or disable the APM BIOS power services. 
++ *	Enable or disable the APM BIOS power services.
+  */
+- 
++
+ static int apm_enable_power_management(int enable)
+ {
+-	u32	eax;
++	u32 eax;
+ 
+ 	if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED))
+ 		return APM_NOT_ENGAGED;
+ 	if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL,
+-			enable, &eax))
++				 enable, &eax))
+ 		return (eax >> 8) & 0xff;
+ 	if (enable)
+ 		apm_info.bios.flags &= ~APM_BIOS_DISABLED;
+@@ -983,19 +983,19 @@ static int apm_enable_power_management(int enable)
+  *	if reported is a lifetime in secodnds/minutes at current powwer
+  *	consumption.
+  */
+- 
++
+ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
+ {
+-	u32	eax;
+-	u32	ebx;
+-	u32	ecx;
+-	u32	edx;
+-	u32	dummy;
++	u32 eax;
++	u32 ebx;
++	u32 ecx;
++	u32 edx;
++	u32 dummy;
+ 
+ 	if (apm_info.get_power_status_broken)
+ 		return APM_32_UNSUPPORTED;
+ 	if (apm_bios_call(APM_FUNC_GET_STATUS, APM_DEVICE_ALL, 0,
+-			&eax, &ebx, &ecx, &edx, &dummy))
++			  &eax, &ebx, &ecx, &edx, &dummy))
+ 		return (eax >> 8) & 0xff;
+ 	*status = ebx;
+ 	*bat = ecx;
+@@ -1011,11 +1011,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
+ static int apm_get_battery_status(u_short which, u_short *status,
+ 				  u_short *bat, u_short *life, u_short *nbat)
+ {
+-	u32	eax;
+-	u32	ebx;
+-	u32	ecx;
+-	u32	edx;
+-	u32	esi;
++	u32 eax;
++	u32 ebx;
++	u32 ecx;
++	u32 edx;
++	u32 esi;
+ 
+ 	if (apm_info.connection_version < 0x0102) {
+ 		/* pretend we only have one battery. */
+@@ -1026,7 +1026,7 @@ static int apm_get_battery_status(u_short which, u_short *status,
+ 	}
+ 
+ 	if (apm_bios_call(APM_FUNC_GET_STATUS, (0x8000 | (which)), 0, &eax,
+-			&ebx, &ecx, &edx, &esi))
++			  &ebx, &ecx, &edx, &esi))
+ 		return (eax >> 8) & 0xff;
+ 	*status = ebx;
+ 	*bat = ecx;
+@@ -1044,10 +1044,10 @@ static int apm_get_battery_status(u_short which, u_short *status,
+  *	Activate or deactive power management on either a specific device
+  *	or the entire system (%APM_DEVICE_ALL).
+  */
+- 
++
+ static int apm_engage_power_management(u_short device, int enable)
+ {
+-	u32	eax;
++	u32 eax;
+ 
+ 	if ((enable == 0) && (device == APM_DEVICE_ALL)
+ 	    && (apm_info.bios.flags & APM_BIOS_DISABLED))
+@@ -1074,7 +1074,7 @@ static int apm_engage_power_management(u_short device, int enable)
+  *	all video devices. Typically the BIOS will do laptop backlight and
+  *	monitor powerdown for us.
+  */
+- 
++
+ static int apm_console_blank(int blank)
+ {
+ 	int error = APM_NOT_ENGAGED; /* silence gcc */
+@@ -1126,7 +1126,7 @@ static apm_event_t get_queued_event(struct apm_user *as)
+ 
+ static void queue_event(apm_event_t event, struct apm_user *sender)
+ {
+-	struct apm_user *	as;
++	struct apm_user *as;
+ 
+ 	spin_lock(&user_list_lock);
+ 	if (user_list == NULL)
+@@ -1174,11 +1174,11 @@ static void reinit_timer(void)
+ 
+ 	spin_lock_irqsave(&i8253_lock, flags);
+ 	/* set the clock to HZ */
+-	outb_p(0x34, PIT_MODE);		/* binary, mode 2, LSB/MSB, ch 0 */
++	outb_pit(0x34, PIT_MODE);		/* binary, mode 2, LSB/MSB, ch 0 */
+ 	udelay(10);
+-	outb_p(LATCH & 0xff, PIT_CH0);	/* LSB */
++	outb_pit(LATCH & 0xff, PIT_CH0);	/* LSB */
+ 	udelay(10);
+-	outb(LATCH >> 8, PIT_CH0);	/* MSB */
++	outb_pit(LATCH >> 8, PIT_CH0);	/* MSB */
+ 	udelay(10);
+ 	spin_unlock_irqrestore(&i8253_lock, flags);
+ #endif
+@@ -1186,7 +1186,7 @@ static void reinit_timer(void)
+ 
+ static int suspend(int vetoable)
+ {
+-	int		err;
++	int err;
+ 	struct apm_user	*as;
+ 
+ 	if (pm_send_all(PM_SUSPEND, (void *)3)) {
+@@ -1239,7 +1239,7 @@ static int suspend(int vetoable)
+ 
+ static void standby(void)
+ {
+-	int	err;
++	int err;
+ 
+ 	local_irq_disable();
+ 	device_power_down(PMSG_SUSPEND);
+@@ -1256,8 +1256,8 @@ static void standby(void)
+ 
+ static apm_event_t get_event(void)
+ {
+-	int		error;
+-	apm_event_t	event = APM_NO_EVENTS; /* silence gcc */
++	int error;
++	apm_event_t event = APM_NO_EVENTS; /* silence gcc */
+ 	apm_eventinfo_t	info;
+ 
+ 	static int notified;
+@@ -1275,9 +1275,9 @@ static apm_event_t get_event(void)
+ 
+ static void check_events(void)
+ {
+-	apm_event_t		event;
+-	static unsigned long	last_resume;
+-	static int		ignore_bounce;
++	apm_event_t event;
++	static unsigned long last_resume;
++	static int ignore_bounce;
+ 
+ 	while ((event = get_event()) != 0) {
+ 		if (debug) {
+@@ -1289,7 +1289,7 @@ static void check_events(void)
+ 				       "event 0x%02x\n", event);
+ 		}
+ 		if (ignore_bounce
+-		    && ((jiffies - last_resume) > bounce_interval))
++		    && (time_after(jiffies, last_resume + bounce_interval)))
+ 			ignore_bounce = 0;
+ 
+ 		switch (event) {
+@@ -1357,7 +1357,7 @@ static void check_events(void)
+ 			/*
+ 			 * We are not allowed to reject a critical suspend.
+ 			 */
+-			(void) suspend(0);
++			(void)suspend(0);
+ 			break;
+ 		}
+ 	}
+@@ -1365,12 +1365,12 @@ static void check_events(void)
+ 
+ static void apm_event_handler(void)
+ {
+-	static int	pending_count = 4;
+-	int		err;
++	static int pending_count = 4;
++	int err;
+ 
+ 	if ((standbys_pending > 0) || (suspends_pending > 0)) {
+ 		if ((apm_info.connection_version > 0x100) &&
+-				(pending_count-- <= 0)) {
++		    (pending_count-- <= 0)) {
+ 			pending_count = 4;
+ 			if (debug)
+ 				printk(KERN_DEBUG "apm: setting state busy\n");
+@@ -1418,9 +1418,9 @@ static int check_apm_user(struct apm_user *as, const char *func)
+ 
+ static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
+ {
+-	struct apm_user *	as;
+-	int			i;
+-	apm_event_t		event;
++	struct apm_user *as;
++	int i;
++	apm_event_t event;
+ 
+ 	as = fp->private_data;
+ 	if (check_apm_user(as, "read"))
+@@ -1459,9 +1459,9 @@ static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *
+ 	return 0;
+ }
+ 
+-static unsigned int do_poll(struct file *fp, poll_table * wait)
++static unsigned int do_poll(struct file *fp, poll_table *wait)
+ {
+-	struct apm_user * as;
++	struct apm_user *as;
+ 
+ 	as = fp->private_data;
+ 	if (check_apm_user(as, "poll"))
+@@ -1472,10 +1472,10 @@ static unsigned int do_poll(struct file *fp, poll_table * wait)
+ 	return 0;
+ }
+ 
+-static int do_ioctl(struct inode * inode, struct file *filp,
++static int do_ioctl(struct inode *inode, struct file *filp,
+ 		    u_int cmd, u_long arg)
+ {
+-	struct apm_user *	as;
++	struct apm_user *as;
+ 
+ 	as = filp->private_data;
+ 	if (check_apm_user(as, "ioctl"))
+@@ -1515,9 +1515,9 @@ static int do_ioctl(struct inode * inode, struct file *filp,
+ 	return 0;
+ }
+ 
+-static int do_release(struct inode * inode, struct file * filp)
++static int do_release(struct inode *inode, struct file *filp)
+ {
+-	struct apm_user *	as;
++	struct apm_user *as;
+ 
+ 	as = filp->private_data;
+ 	if (check_apm_user(as, "release"))
+@@ -1533,11 +1533,11 @@ static int do_release(struct inode * inode, struct file * filp)
+ 		if (suspends_pending <= 0)
+ 			(void) suspend(1);
+ 	}
+-  	spin_lock(&user_list_lock);
++	spin_lock(&user_list_lock);
+ 	if (user_list == as)
+ 		user_list = as->next;
+ 	else {
+-		struct apm_user *	as1;
++		struct apm_user *as1;
+ 
+ 		for (as1 = user_list;
+ 		     (as1 != NULL) && (as1->next != as);
+@@ -1553,9 +1553,9 @@ static int do_release(struct inode * inode, struct file * filp)
+ 	return 0;
+ }
+ 
+-static int do_open(struct inode * inode, struct file * filp)
++static int do_open(struct inode *inode, struct file *filp)
+ {
+-	struct apm_user *	as;
++	struct apm_user *as;
+ 
+ 	as = kmalloc(sizeof(*as), GFP_KERNEL);
+ 	if (as == NULL) {
+@@ -1569,7 +1569,7 @@ static int do_open(struct inode * inode, struct file * filp)
+ 	as->suspends_read = as->standbys_read = 0;
+ 	/*
+ 	 * XXX - this is a tiny bit broken, when we consider BSD
+-         * process accounting. If the device is opened by root, we
++	 * process accounting. If the device is opened by root, we
+ 	 * instantly flag that we used superuser privs. Who knows,
+ 	 * we might close the device immediately without doing a
+ 	 * privileged operation -- cevans
+@@ -1652,16 +1652,16 @@ static int proc_apm_show(struct seq_file *m, void *v)
+ 	   8) min = minutes; sec = seconds */
+ 
+ 	seq_printf(m, "%s %d.%d 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+-		     driver_version,
+-		     (apm_info.bios.version >> 8) & 0xff,
+-		     apm_info.bios.version & 0xff,
+-		     apm_info.bios.flags,
+-		     ac_line_status,
+-		     battery_status,
+-		     battery_flag,
+-		     percentage,
+-		     time_units,
+-		     units);
++		   driver_version,
++		   (apm_info.bios.version >> 8) & 0xff,
++		   apm_info.bios.version & 0xff,
++		   apm_info.bios.flags,
++		   ac_line_status,
++		   battery_status,
++		   battery_flag,
++		   percentage,
++		   time_units,
++		   units);
+ 	return 0;
+ }
+ 
+@@ -1684,8 +1684,8 @@ static int apm(void *unused)
+ 	unsigned short	cx;
+ 	unsigned short	dx;
+ 	int		error;
+-	char *		power_stat;
+-	char *		bat_stat;
++	char 		*power_stat;
++	char 		*bat_stat;
+ 
+ #ifdef CONFIG_SMP
+ 	/* 2002/08/01 - WT
+@@ -1744,23 +1744,41 @@ static int apm(void *unused)
+ 		}
+ 	}
+ 
+-	if (debug && (num_online_cpus() == 1 || smp )) {
++	if (debug && (num_online_cpus() == 1 || smp)) {
+ 		error = apm_get_power_status(&bx, &cx, &dx);
+ 		if (error)
+ 			printk(KERN_INFO "apm: power status not available\n");
+ 		else {
+ 			switch ((bx >> 8) & 0xff) {
+-			case 0: power_stat = "off line"; break;
+-			case 1: power_stat = "on line"; break;
+-			case 2: power_stat = "on backup power"; break;
+-			default: power_stat = "unknown"; break;
++			case 0:
++				power_stat = "off line";
++				break;
++			case 1:
++				power_stat = "on line";
++				break;
++			case 2:
++				power_stat = "on backup power";
++				break;
++			default:
++				power_stat = "unknown";
++				break;
+ 			}
+ 			switch (bx & 0xff) {
+-			case 0: bat_stat = "high"; break;
+-			case 1: bat_stat = "low"; break;
+-			case 2: bat_stat = "critical"; break;
+-			case 3: bat_stat = "charging"; break;
+-			default: bat_stat = "unknown"; break;
++			case 0:
++				bat_stat = "high";
++				break;
++			case 1:
++				bat_stat = "low";
++				break;
++			case 2:
++				bat_stat = "critical";
++				break;
++			case 3:
++				bat_stat = "charging";
++				break;
++			default:
++				bat_stat = "unknown";
++				break;
+ 			}
+ 			printk(KERN_INFO
+ 			       "apm: AC %s, battery status %s, battery life ",
+@@ -1777,8 +1795,8 @@ static int apm(void *unused)
+ 					printk("unknown\n");
+ 				else
+ 					printk("%d %s\n", dx & 0x7fff,
+-						(dx & 0x8000) ?
+-						"minutes" : "seconds");
++					       (dx & 0x8000) ?
++					       "minutes" : "seconds");
+ 			}
+ 		}
+ 	}
+@@ -1803,7 +1821,7 @@ static int apm(void *unused)
+ #ifndef MODULE
+ static int __init apm_setup(char *str)
+ {
+-	int	invert;
++	int invert;
+ 
+ 	while ((str != NULL) && (*str != '\0')) {
+ 		if (strncmp(str, "off", 3) == 0)
+@@ -1828,14 +1846,13 @@ static int __init apm_setup(char *str)
+ 		if ((strncmp(str, "power-off", 9) == 0) ||
+ 		    (strncmp(str, "power_off", 9) == 0))
+ 			power_off = !invert;
+-		if (strncmp(str, "smp", 3) == 0)
+-		{
++		if (strncmp(str, "smp", 3) == 0) {
+ 			smp = !invert;
+ 			idle_threshold = 100;
+ 		}
+ 		if ((strncmp(str, "allow-ints", 10) == 0) ||
+ 		    (strncmp(str, "allow_ints", 10) == 0))
+- 			apm_info.allow_ints = !invert;
++			apm_info.allow_ints = !invert;
+ 		if ((strncmp(str, "broken-psr", 10) == 0) ||
+ 		    (strncmp(str, "broken_psr", 10) == 0))
+ 			apm_info.get_power_status_broken = !invert;
+@@ -1881,7 +1898,8 @@ static int __init print_if_true(const struct dmi_system_id *d)
+  */
+ static int __init broken_ps2_resume(const struct dmi_system_id *d)
+ {
+-	printk(KERN_INFO "%s machine detected. Mousepad Resume Bug workaround hopefully not needed.\n", d->ident);
++	printk(KERN_INFO "%s machine detected. Mousepad Resume Bug "
++	       "workaround hopefully not needed.\n", d->ident);
+ 	return 0;
+ }
+ 
+@@ -1890,7 +1908,8 @@ static int __init set_realmode_power_off(const struct dmi_system_id *d)
+ {
+ 	if (apm_info.realmode_power_off == 0) {
+ 		apm_info.realmode_power_off = 1;
+-		printk(KERN_INFO "%s bios detected. Using realmode poweroff only.\n", d->ident);
++		printk(KERN_INFO "%s bios detected. "
++		       "Using realmode poweroff only.\n", d->ident);
+ 	}
+ 	return 0;
+ }
+@@ -1900,7 +1919,8 @@ static int __init set_apm_ints(const struct dmi_system_id *d)
+ {
+ 	if (apm_info.allow_ints == 0) {
+ 		apm_info.allow_ints = 1;
+-		printk(KERN_INFO "%s machine detected. Enabling interrupts during APM calls.\n", d->ident);
++		printk(KERN_INFO "%s machine detected. "
++		       "Enabling interrupts during APM calls.\n", d->ident);
+ 	}
+ 	return 0;
+ }
+@@ -1910,7 +1930,8 @@ static int __init apm_is_horked(const struct dmi_system_id *d)
+ {
+ 	if (apm_info.disabled == 0) {
+ 		apm_info.disabled = 1;
+-		printk(KERN_INFO "%s machine detected. Disabling APM.\n", d->ident);
++		printk(KERN_INFO "%s machine detected. "
++		       "Disabling APM.\n", d->ident);
+ 	}
+ 	return 0;
+ }
+@@ -1919,7 +1940,8 @@ static int __init apm_is_horked_d850md(const struct dmi_system_id *d)
+ {
+ 	if (apm_info.disabled == 0) {
+ 		apm_info.disabled = 1;
+-		printk(KERN_INFO "%s machine detected. Disabling APM.\n", d->ident);
++		printk(KERN_INFO "%s machine detected. "
++		       "Disabling APM.\n", d->ident);
+ 		printk(KERN_INFO "This bug is fixed in bios P15 which is available for \n");
+ 		printk(KERN_INFO "download from support.intel.com \n");
+ 	}
+@@ -1931,7 +1953,8 @@ static int __init apm_likes_to_melt(const struct dmi_system_id *d)
+ {
+ 	if (apm_info.forbid_idle == 0) {
+ 		apm_info.forbid_idle = 1;
+-		printk(KERN_INFO "%s machine detected. Disabling APM idle calls.\n", d->ident);
++		printk(KERN_INFO "%s machine detected. "
++		       "Disabling APM idle calls.\n", d->ident);
+ 	}
+ 	return 0;
+ }
+@@ -1954,7 +1977,8 @@ static int __init apm_likes_to_melt(const struct dmi_system_id *d)
+ static int __init broken_apm_power(const struct dmi_system_id *d)
+ {
+ 	apm_info.get_power_status_broken = 1;
+-	printk(KERN_WARNING "BIOS strings suggest APM bugs, disabling power status reporting.\n");
++	printk(KERN_WARNING "BIOS strings suggest APM bugs, "
++	       "disabling power status reporting.\n");
+ 	return 0;
+ }
+ 
+@@ -1965,7 +1989,8 @@ static int __init broken_apm_power(const struct dmi_system_id *d)
+ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
+ {
+ 	apm_info.get_power_status_swabinminutes = 1;
+-	printk(KERN_WARNING "BIOS strings suggest APM reports battery life in minutes and wrong byte order.\n");
++	printk(KERN_WARNING "BIOS strings suggest APM reports battery life "
++	       "in minutes and wrong byte order.\n");
+ 	return 0;
+ }
+ 
+@@ -1990,8 +2015,8 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
+ 		apm_is_horked, "Dell Inspiron 2500",
+ 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"),
+-			DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
+-			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
++			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
++			DMI_MATCH(DMI_BIOS_VERSION, "A11"), },
+ 	},
+ 	{	/* Allow interrupts during suspend on Dell Inspiron laptops*/
+ 		set_apm_ints, "Dell Inspiron", {
+@@ -2014,15 +2039,15 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
+ 		apm_is_horked, "Dell Dimension 4100",
+ 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"),
+-			DMI_MATCH(DMI_BIOS_VENDOR,"Intel Corp."),
+-			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
++			DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."),
++			DMI_MATCH(DMI_BIOS_VERSION, "A11"), },
+ 	},
+ 	{	/* Allow interrupts during suspend on Compaq Laptops*/
+ 		set_apm_ints, "Compaq 12XL125",
+ 		{	DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Compaq PC"),
+ 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+-			DMI_MATCH(DMI_BIOS_VERSION,"4.06"), },
++			DMI_MATCH(DMI_BIOS_VERSION, "4.06"), },
+ 	},
+ 	{	/* Allow interrupts during APM or the clock goes slow */
+ 		set_apm_ints, "ASUSTeK",
+@@ -2064,15 +2089,15 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
+ 		apm_is_horked, "Sharp PC-PJ/AX",
+ 		{	DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "PC-PJ/AX"),
+-			DMI_MATCH(DMI_BIOS_VENDOR,"SystemSoft"),
+-			DMI_MATCH(DMI_BIOS_VERSION,"Version R2.08"), },
++			DMI_MATCH(DMI_BIOS_VENDOR, "SystemSoft"),
++			DMI_MATCH(DMI_BIOS_VERSION, "Version R2.08"), },
+ 	},
+ 	{	/* APM crashes */
+ 		apm_is_horked, "Dell Inspiron 2500",
+ 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"),
+-			DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
+-			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
++			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
++			DMI_MATCH(DMI_BIOS_VERSION, "A11"), },
+ 	},
+ 	{	/* APM idle hangs */
+ 		apm_likes_to_melt, "Jabil AMD",
+@@ -2203,11 +2228,11 @@ static int __init apm_init(void)
+ 		return -ENODEV;
+ 	}
+ 	printk(KERN_INFO
+-		"apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n",
+-		((apm_info.bios.version >> 8) & 0xff),
+-		(apm_info.bios.version & 0xff),
+-		apm_info.bios.flags,
+-		driver_version);
++	       "apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n",
++	       ((apm_info.bios.version >> 8) & 0xff),
++	       (apm_info.bios.version & 0xff),
++	       apm_info.bios.flags,
++	       driver_version);
+ 	if ((apm_info.bios.flags & APM_32_BIT_SUPPORT) == 0) {
+ 		printk(KERN_INFO "apm: no 32 bit BIOS support\n");
+ 		return -ENODEV;
+@@ -2312,9 +2337,9 @@ static int __init apm_init(void)
+ 	}
+ 	wake_up_process(kapmd_task);
+ 
+-	if (num_online_cpus() > 1 && !smp ) {
++	if (num_online_cpus() > 1 && !smp) {
+ 		printk(KERN_NOTICE
+-		   "apm: disabled - APM is not SMP safe (power off active).\n");
++		       "apm: disabled - APM is not SMP safe (power off active).\n");
+ 		return 0;
+ 	}
+ 
+@@ -2339,7 +2364,7 @@ static int __init apm_init(void)
+ 
+ static void __exit apm_exit(void)
+ {
+-	int	error;
++	int error;
+ 
+ 	if (set_pm_idle) {
+ 		pm_idle = original_pm_idle;
+diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
+index 0e45981..afd8446 100644
+--- a/arch/x86/kernel/asm-offsets_32.c
++++ b/arch/x86/kernel/asm-offsets_32.c
+@@ -38,15 +38,15 @@ void foo(void);
+ 
+ void foo(void)
+ {
+-	OFFSET(SIGCONTEXT_eax, sigcontext, eax);
+-	OFFSET(SIGCONTEXT_ebx, sigcontext, ebx);
+-	OFFSET(SIGCONTEXT_ecx, sigcontext, ecx);
+-	OFFSET(SIGCONTEXT_edx, sigcontext, edx);
+-	OFFSET(SIGCONTEXT_esi, sigcontext, esi);
+-	OFFSET(SIGCONTEXT_edi, sigcontext, edi);
+-	OFFSET(SIGCONTEXT_ebp, sigcontext, ebp);
+-	OFFSET(SIGCONTEXT_esp, sigcontext, esp);
+-	OFFSET(SIGCONTEXT_eip, sigcontext, eip);
++	OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
++	OFFSET(IA32_SIGCONTEXT_bx, sigcontext, bx);
++	OFFSET(IA32_SIGCONTEXT_cx, sigcontext, cx);
++	OFFSET(IA32_SIGCONTEXT_dx, sigcontext, dx);
++	OFFSET(IA32_SIGCONTEXT_si, sigcontext, si);
++	OFFSET(IA32_SIGCONTEXT_di, sigcontext, di);
++	OFFSET(IA32_SIGCONTEXT_bp, sigcontext, bp);
++	OFFSET(IA32_SIGCONTEXT_sp, sigcontext, sp);
++	OFFSET(IA32_SIGCONTEXT_ip, sigcontext, ip);
+ 	BLANK();
+ 
+ 	OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
+@@ -70,39 +70,38 @@ void foo(void)
+ 	OFFSET(TI_cpu, thread_info, cpu);
+ 	BLANK();
+ 
+-	OFFSET(GDS_size, Xgt_desc_struct, size);
+-	OFFSET(GDS_address, Xgt_desc_struct, address);
+-	OFFSET(GDS_pad, Xgt_desc_struct, pad);
++	OFFSET(GDS_size, desc_ptr, size);
++	OFFSET(GDS_address, desc_ptr, address);
+ 	BLANK();
+ 
+-	OFFSET(PT_EBX, pt_regs, ebx);
+-	OFFSET(PT_ECX, pt_regs, ecx);
+-	OFFSET(PT_EDX, pt_regs, edx);
+-	OFFSET(PT_ESI, pt_regs, esi);
+-	OFFSET(PT_EDI, pt_regs, edi);
+-	OFFSET(PT_EBP, pt_regs, ebp);
+-	OFFSET(PT_EAX, pt_regs, eax);
+-	OFFSET(PT_DS,  pt_regs, xds);
+-	OFFSET(PT_ES,  pt_regs, xes);
+-	OFFSET(PT_FS,  pt_regs, xfs);
+-	OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
+-	OFFSET(PT_EIP, pt_regs, eip);
+-	OFFSET(PT_CS,  pt_regs, xcs);
+-	OFFSET(PT_EFLAGS, pt_regs, eflags);
+-	OFFSET(PT_OLDESP, pt_regs, esp);
+-	OFFSET(PT_OLDSS,  pt_regs, xss);
++	OFFSET(PT_EBX, pt_regs, bx);
++	OFFSET(PT_ECX, pt_regs, cx);
++	OFFSET(PT_EDX, pt_regs, dx);
++	OFFSET(PT_ESI, pt_regs, si);
++	OFFSET(PT_EDI, pt_regs, di);
++	OFFSET(PT_EBP, pt_regs, bp);
++	OFFSET(PT_EAX, pt_regs, ax);
++	OFFSET(PT_DS,  pt_regs, ds);
++	OFFSET(PT_ES,  pt_regs, es);
++	OFFSET(PT_FS,  pt_regs, fs);
++	OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
++	OFFSET(PT_EIP, pt_regs, ip);
++	OFFSET(PT_CS,  pt_regs, cs);
++	OFFSET(PT_EFLAGS, pt_regs, flags);
++	OFFSET(PT_OLDESP, pt_regs, sp);
++	OFFSET(PT_OLDSS,  pt_regs, ss);
+ 	BLANK();
+ 
+ 	OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
+-	OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
++	OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
+ 	BLANK();
+ 
+ 	OFFSET(pbe_address, pbe, address);
+ 	OFFSET(pbe_orig_address, pbe, orig_address);
+ 	OFFSET(pbe_next, pbe, next);
+ 
+-	/* Offset from the sysenter stack to tss.esp0 */
+-	DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
++	/* Offset from the sysenter stack to tss.sp0 */
++	DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
+ 		 sizeof(struct tss_struct));
+ 
+ 	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+@@ -111,8 +110,6 @@ void foo(void)
+ 	DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
+ 	DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
+ 
+-	DEFINE(VDSO_PRELINK_asm, VDSO_PRELINK);
 -
--profdrvr-y				:= op_model_null.o
+ 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+ 
+ #ifdef CONFIG_PARAVIRT
+@@ -123,7 +120,7 @@ void foo(void)
+ 	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
+ 	OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
+ 	OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+-	OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
++	OFFSET(PV_CPU_irq_enable_syscall_ret, pv_cpu_ops, irq_enable_syscall_ret);
+ 	OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+ #endif
+ 
+diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
+index d1b6ed9..494e1e0 100644
+--- a/arch/x86/kernel/asm-offsets_64.c
++++ b/arch/x86/kernel/asm-offsets_64.c
+@@ -38,7 +38,6 @@ int main(void)
+ #define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
+ 	ENTRY(state);
+ 	ENTRY(flags); 
+-	ENTRY(thread); 
+ 	ENTRY(pid);
+ 	BLANK();
+ #undef ENTRY
+@@ -47,6 +46,9 @@ int main(void)
+ 	ENTRY(addr_limit);
+ 	ENTRY(preempt_count);
+ 	ENTRY(status);
++#ifdef CONFIG_IA32_EMULATION
++	ENTRY(sysenter_return);
++#endif
+ 	BLANK();
+ #undef ENTRY
+ #define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
+@@ -59,17 +61,31 @@ int main(void)
+ 	ENTRY(data_offset);
+ 	BLANK();
+ #undef ENTRY
++#ifdef CONFIG_PARAVIRT
++	BLANK();
++	OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
++	OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
++	OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
++	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
++	OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
++	OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
++	OFFSET(PV_CPU_irq_enable_syscall_ret, pv_cpu_ops, irq_enable_syscall_ret);
++	OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
++	OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++#endif
++
++
+ #ifdef CONFIG_IA32_EMULATION
+ #define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
+-	ENTRY(eax);
+-	ENTRY(ebx);
+-	ENTRY(ecx);
+-	ENTRY(edx);
+-	ENTRY(esi);
+-	ENTRY(edi);
+-	ENTRY(ebp);
+-	ENTRY(esp);
+-	ENTRY(eip);
++	ENTRY(ax);
++	ENTRY(bx);
++	ENTRY(cx);
++	ENTRY(dx);
++	ENTRY(si);
++	ENTRY(di);
++	ENTRY(bp);
++	ENTRY(sp);
++	ENTRY(ip);
+ 	BLANK();
+ #undef ENTRY
+ 	DEFINE(IA32_RT_SIGFRAME_sigcontext,
+@@ -81,14 +97,14 @@ int main(void)
+ 	DEFINE(pbe_next, offsetof(struct pbe, next));
+ 	BLANK();
+ #define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
+-	ENTRY(rbx);
+-	ENTRY(rbx);
+-	ENTRY(rcx);
+-	ENTRY(rdx);
+-	ENTRY(rsp);
+-	ENTRY(rbp);
+-	ENTRY(rsi);
+-	ENTRY(rdi);
++	ENTRY(bx);
++	ENTRY(bx);
++	ENTRY(cx);
++	ENTRY(dx);
++	ENTRY(sp);
++	ENTRY(bp);
++	ENTRY(si);
++	ENTRY(di);
+ 	ENTRY(r8);
+ 	ENTRY(r9);
+ 	ENTRY(r10);
+@@ -97,7 +113,7 @@ int main(void)
+ 	ENTRY(r13);
+ 	ENTRY(r14);
+ 	ENTRY(r15);
+-	ENTRY(eflags);
++	ENTRY(flags);
+ 	BLANK();
+ #undef ENTRY
+ #define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
+@@ -108,7 +124,7 @@ int main(void)
+ 	ENTRY(cr8);
+ 	BLANK();
+ #undef ENTRY
+-	DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
++	DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
+ 	BLANK();
+ 	DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+ 	BLANK();
+diff --git a/arch/x86/kernel/bootflag.c b/arch/x86/kernel/bootflag.c
+index 0b98605..30f25a7 100644
+--- a/arch/x86/kernel/bootflag.c
++++ b/arch/x86/kernel/bootflag.c
+@@ -1,8 +1,6 @@
+ /*
+  *	Implement 'Simple Boot Flag Specification 2.0'
+  */
 -
--oprofile-y				:= $(DRIVER_OBJS) $(profdrvr-y)
 -
-diff --git a/arch/sh64/oprofile/op_model_null.c b/arch/sh64/oprofile/op_model_null.c
-deleted file mode 100644
-index a750ea1..0000000
---- a/arch/sh64/oprofile/op_model_null.c
-+++ /dev/null
-@@ -1,23 +0,0 @@
--/*
-- * arch/sh64/oprofile/op_model_null.c
-- *
-- * Copyright (C) 2003  Paul Mundt
-- *
-- * This file is subject to the terms and conditions of the GNU General Public
-- * License.  See the file "COPYING" in the main directory of this archive
-- * for more details.
-- */
--#include <linux/kernel.h>
--#include <linux/oprofile.h>
--#include <linux/init.h>
--#include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+@@ -14,40 +12,38 @@
+ 
+ #include <linux/mc146818rtc.h>
+ 
 -
--int __init oprofile_arch_init(struct oprofile_operations *ops)
--{
--	return -ENODEV;
--}
+ #define SBF_RESERVED (0x78)
+ #define SBF_PNPOS    (1<<0)
+ #define SBF_BOOTING  (1<<1)
+ #define SBF_DIAG     (1<<2)
+ #define SBF_PARITY   (1<<7)
+ 
 -
--void oprofile_arch_exit(void)
--{
--}
+ int sbf_port __initdata = -1;	/* set via acpi_boot_init() */
+ 
 -
-diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
-index a8b4200..216147d 100644
---- a/arch/sparc/kernel/vmlinux.lds.S
-+++ b/arch/sparc/kernel/vmlinux.lds.S
-@@ -48,12 +48,12 @@ SECTIONS
- 	__init_begin = .;
- 	.init.text : {
- 		_sinittext = .;
--		*(.init.text)
-+		INIT_TEXT
- 		_einittext = .;
- 	}
- 	__init_text_end = .;
- 	.init.data : {
--		*(.init.data)
-+		INIT_DATA
- 	}
- 	. = ALIGN(16);
- 	.init.setup : {
-@@ -102,8 +102,8 @@ SECTIONS
- 	_end = . ;
- 	PROVIDE (end = .);
- 	/DISCARD/ : {
--		*(.exit.text)
--		*(.exit.data)
-+		EXIT_TEXT
-+		EXIT_DATA
- 		*(.exitcall.exit)
+ static int __init parity(u8 v)
+ {
+ 	int x = 0;
+ 	int i;
+-	
+-	for(i=0;i<8;i++)
+-	{
+-		x^=(v&1);
+-		v>>=1;
++
++	for (i = 0; i < 8; i++) {
++		x ^= (v & 1);
++		v >>= 1;
  	}
++
+ 	return x;
+ }
  
-diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
-index 10b212a..26f5791 100644
---- a/arch/sparc64/Kconfig
-+++ b/arch/sparc64/Kconfig
-@@ -66,6 +66,9 @@ config AUDIT_ARCH
- 	bool
- 	default y
- 
-+config ARCH_SETS_UP_PER_CPU_AREA
-+	def_bool y
+ static void __init sbf_write(u8 v)
+ {
+ 	unsigned long flags;
+-	if(sbf_port != -1)
+-	{
 +
- config ARCH_NO_VIRT_TO_BUS
- 	def_bool y
++	if (sbf_port != -1) {
+ 		v &= ~SBF_PARITY;
+-		if(!parity(v))
+-			v|=SBF_PARITY;
++		if (!parity(v))
++			v |= SBF_PARITY;
  
-@@ -200,6 +203,11 @@ config US2E_FREQ
- 	  If in doubt, say N.
+-		printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n", sbf_port, v);
++		printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n",
++			sbf_port, v);
  
- # Global things across all Sun machines.
-+config GENERIC_LOCKBREAK
-+	bool
-+	default y
-+	depends on SMP && PREEMPT
+ 		spin_lock_irqsave(&rtc_lock, flags);
+ 		CMOS_WRITE(v, sbf_port);
+@@ -57,33 +53,41 @@ static void __init sbf_write(u8 v)
+ 
+ static u8 __init sbf_read(void)
+ {
+-	u8 v;
+ 	unsigned long flags;
+-	if(sbf_port == -1)
++	u8 v;
 +
- config RWSEM_GENERIC_SPINLOCK
- 	bool
++	if (sbf_port == -1)
+ 		return 0;
++
+ 	spin_lock_irqsave(&rtc_lock, flags);
+ 	v = CMOS_READ(sbf_port);
+ 	spin_unlock_irqrestore(&rtc_lock, flags);
++
+ 	return v;
+ }
  
-diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
-index 953be81..dc7bf1b 100644
---- a/arch/sparc64/kernel/unaligned.c
-+++ b/arch/sparc64/kernel/unaligned.c
-@@ -175,7 +175,7 @@ unsigned long compute_effective_address(struct pt_regs *regs,
+ static int __init sbf_value_valid(u8 v)
+ {
+-	if(v&SBF_RESERVED)		/* Reserved bits */
++	if (v & SBF_RESERVED)		/* Reserved bits */
+ 		return 0;
+-	if(!parity(v))
++	if (!parity(v))
+ 		return 0;
++
+ 	return 1;
  }
  
- /* This is just to make gcc think die_if_kernel does return... */
--static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
-+static void __used unaligned_panic(char *str, struct pt_regs *regs)
+ static int __init sbf_init(void)
  {
- 	die_if_kernel(str, regs);
+ 	u8 v;
+-	if(sbf_port == -1)
++
++	if (sbf_port == -1)
+ 		return 0;
++
+ 	v = sbf_read();
+-	if(!sbf_value_valid(v))
+-		printk(KERN_WARNING "Simple Boot Flag value 0x%x read from CMOS RAM was invalid\n",v);
++	if (!sbf_value_valid(v)) {
++		printk(KERN_WARNING "Simple Boot Flag value 0x%x read from "
++			"CMOS RAM was invalid\n", v);
++	}
+ 
+ 	v &= ~SBF_RESERVED;
+ 	v &= ~SBF_BOOTING;
+@@ -92,7 +96,7 @@ static int __init sbf_init(void)
+ 	v |= SBF_PNPOS;
+ #endif
+ 	sbf_write(v);
++
+ 	return 0;
  }
-diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
-index 9fcd503..01f8096 100644
---- a/arch/sparc64/kernel/vmlinux.lds.S
-+++ b/arch/sparc64/kernel/vmlinux.lds.S
-@@ -56,11 +56,11 @@ SECTIONS
- 	.init.text : {
- 		__init_begin = .;
- 		_sinittext = .;
--		*(.init.text)
-+		INIT_TEXT
- 		_einittext = .;
+-
+ module_init(sbf_init);
+diff --git a/arch/x86/kernel/bugs_64.c b/arch/x86/kernel/bugs_64.c
+index 9a189ce..8f520f9 100644
+--- a/arch/x86/kernel/bugs_64.c
++++ b/arch/x86/kernel/bugs_64.c
+@@ -13,7 +13,6 @@
+ void __init check_bugs(void)
+ {
+ 	identify_cpu(&boot_cpu_data);
+-	mtrr_bp_init();
+ #if !defined(CONFIG_SMP)
+ 	printk("CPU: ");
+ 	print_cpu_info(&boot_cpu_data);
+diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
+index 3e91d3e..238468a 100644
+--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
++++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
+@@ -45,6 +45,6 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
+ 			&regs[CR_ECX], &regs[CR_EDX]);
+ 
+ 		if (regs[cb->reg] & (1 << cb->bit))
+-			set_bit(cb->feature, c->x86_capability);
++			set_cpu_cap(c, cb->feature);
  	}
- 	.init.data : {
--		*(.init.data)
-+		INIT_DATA
+ }
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 1ff88c7..06fa159 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -63,6 +63,15 @@ static __cpuinit int amd_apic_timer_broken(void)
+ 
+ int force_mwait __cpuinitdata;
+ 
++void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
++{
++	if (cpuid_eax(0x80000000) >= 0x80000007) {
++		c->x86_power = cpuid_edx(0x80000007);
++		if (c->x86_power & (1<<8))
++			set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
++	}
++}
++
+ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ {
+ 	u32 l, h;
+@@ -85,6 +94,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
  	}
- 	. = ALIGN(16);
- 	.init.setup : {
-@@ -137,8 +137,8 @@ SECTIONS
- 	PROVIDE (end = .);
+ #endif
  
- 	/DISCARD/ : {
--		*(.exit.text)
--		*(.exit.data)
-+		EXIT_TEXT
-+		EXIT_DATA
- 		*(.exitcall.exit)
++	early_init_amd(c);
++
+ 	/*
+ 	 *	FIXME: We should handle the K5 here. Set up the write
+ 	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
+@@ -257,12 +268,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ 		c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
  	}
  
-diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
-index b1a77b1..99f9f96 100644
---- a/arch/um/drivers/ubd_kern.c
-+++ b/arch/um/drivers/ubd_kern.c
-@@ -475,17 +475,9 @@ static void do_ubd_request(struct request_queue * q);
- /* Only changed by ubd_init, which is an initcall. */
- int thread_fd = -1;
+-	if (cpuid_eax(0x80000000) >= 0x80000007) {
+-		c->x86_power = cpuid_edx(0x80000007);
+-		if (c->x86_power & (1<<8))
+-			set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+-	}
+-
+ #ifdef CONFIG_X86_HT
+ 	/*
+ 	 * On a AMD multi core setup the lower bits of the APIC id
+@@ -295,12 +300,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ 		local_apic_timer_disabled = 1;
+ #endif
  
--static void ubd_end_request(struct request *req, int bytes, int uptodate)
-+static void ubd_end_request(struct request *req, int bytes, int error)
+-	if (c->x86 == 0x10 && !force_mwait)
+-		clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
+-
+ 	/* K6s reports MCEs but don't actually have all the MSRs */
+ 	if (c->x86 < 6)
+ 		clear_bit(X86_FEATURE_MCE, c->x86_capability);
++
++	if (cpu_has_xmm)
++		set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
+ }
+ 
+ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 205fd5b..9b95edc 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -11,6 +11,7 @@
+ #include <linux/utsname.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
++#include <asm/processor-flags.h>
+ #include <asm/i387.h>
+ #include <asm/msr.h>
+ #include <asm/paravirt.h>
+@@ -35,7 +36,7 @@ __setup("mca-pentium", mca_pentium);
+ static int __init no_387(char *s)
  {
--	if (!end_that_request_first(req, uptodate, bytes >> 9)) {
--		struct ubd *dev = req->rq_disk->private_data;
--		unsigned long flags;
+ 	boot_cpu_data.hard_math = 0;
+-	write_cr0(0xE | read_cr0());
++	write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
+ 	return 1;
+ }
+ 
+@@ -153,7 +154,7 @@ static void __init check_config(void)
+  * If we configured ourselves for a TSC, we'd better have one!
+  */
+ #ifdef CONFIG_X86_TSC
+-	if (!cpu_has_tsc && !tsc_disable)
++	if (!cpu_has_tsc)
+ 		panic("Kernel compiled for Pentium+, requires TSC feature!");
+ #endif
+ 
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index e2fcf20..db28aa9 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -22,43 +22,48 @@
+ #include "cpu.h"
+ 
+ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
+-	[GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
+-	[GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
+-	[GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
+-	[GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
++	[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
++	[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
++	[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
++	[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
+ 	/*
+ 	 * Segments used for calling PnP BIOS have byte granularity.
+ 	 * They code segments and data segments have fixed 64k limits,
+ 	 * the transfer segment sizes are set at run time.
+ 	 */
+-	[GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
+-	[GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
+-	[GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
+-	[GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
+-	[GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
++	/* 32-bit code */
++	[GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
++	/* 16-bit code */
++	[GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
++	/* 16-bit data */
++	[GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
++	/* 16-bit data */
++	[GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
++	/* 16-bit data */
++	[GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
+ 	/*
+ 	 * The APM segments have byte granularity and their bases
+ 	 * are set at run time.  All have 64k limits.
+ 	 */
+-	[GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
++	/* 32-bit code */
++	[GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
+ 	/* 16-bit code */
+-	[GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
+-	[GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
++	[GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
++	/* data */
++	[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+ 
+-	[GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
+-	[GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
++	[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
++	[GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
+ } };
+ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+ 
++__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
++
+ static int cachesize_override __cpuinitdata = -1;
+-static int disable_x86_fxsr __cpuinitdata;
+ static int disable_x86_serial_nr __cpuinitdata = 1;
+-static int disable_x86_sep __cpuinitdata;
+ 
+ struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+ 
+-extern int disable_pse;
 -
--		add_disk_randomness(req->rq_disk);
--		spin_lock_irqsave(&dev->lock, flags);
--		end_that_request_last(req, uptodate);
--		spin_unlock_irqrestore(&dev->lock, flags);
--	}
-+	blk_end_request(req, error, bytes);
+ static void __cpuinit default_init(struct cpuinfo_x86 * c)
+ {
+ 	/* Not much we can do here... */
+@@ -207,16 +212,8 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+ 
+ static int __init x86_fxsr_setup(char * s)
+ {
+-	/* Tell all the other CPUs to not use it... */
+-	disable_x86_fxsr = 1;
+-
+-	/*
+-	 * ... and clear the bits early in the boot_cpu_data
+-	 * so that the bootup process doesn't try to do this
+-	 * either.
+-	 */
+-	clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
+-	clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
++	setup_clear_cpu_cap(X86_FEATURE_FXSR);
++	setup_clear_cpu_cap(X86_FEATURE_XMM);
+ 	return 1;
  }
+ __setup("nofxsr", x86_fxsr_setup);
+@@ -224,7 +221,7 @@ __setup("nofxsr", x86_fxsr_setup);
  
- /* Callable only from interrupt context - otherwise you need to do
-@@ -493,10 +485,10 @@ static void ubd_end_request(struct request *req, int bytes, int uptodate)
- static inline void ubd_finish(struct request *req, int bytes)
+ static int __init x86_sep_setup(char * s)
  {
- 	if(bytes < 0){
--		ubd_end_request(req, 0, 0);
-+		ubd_end_request(req, 0, -EIO);
- 		return;
+-	disable_x86_sep = 1;
++	setup_clear_cpu_cap(X86_FEATURE_SEP);
+ 	return 1;
+ }
+ __setup("nosep", x86_sep_setup);
+@@ -281,6 +278,33 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
+ 			c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
  	}
--	ubd_end_request(req, bytes, 1);
-+	ubd_end_request(req, bytes, 0);
  }
++static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
++{
++	u32 tfms, xlvl;
++	int ebx;
++
++	memset(&c->x86_capability, 0, sizeof c->x86_capability);
++	if (have_cpuid_p()) {
++		/* Intel-defined flags: level 0x00000001 */
++		if (c->cpuid_level >= 0x00000001) {
++			u32 capability, excap;
++			cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
++			c->x86_capability[0] = capability;
++			c->x86_capability[4] = excap;
++		}
++
++		/* AMD-defined flags: level 0x80000001 */
++		xlvl = cpuid_eax(0x80000000);
++		if ((xlvl & 0xffff0000) == 0x80000000) {
++			if (xlvl >= 0x80000001) {
++				c->x86_capability[1] = cpuid_edx(0x80000001);
++				c->x86_capability[6] = cpuid_ecx(0x80000001);
++			}
++		}
++
++	}
++
++}
  
- static LIST_HEAD(restart);
-diff --git a/arch/um/include/init.h b/arch/um/include/init.h
-index d4de7c0..cebc6ca 100644
---- a/arch/um/include/init.h
-+++ b/arch/um/include/init.h
-@@ -42,15 +42,15 @@ typedef void (*exitcall_t)(void);
+ /* Do minimum CPU detection early.
+    Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+@@ -300,6 +324,17 @@ static void __init early_cpu_detect(void)
+ 	cpu_detect(c);
  
- /* These are for everybody (although not all archs will actually
-    discard it in modules) */
--#define __init		__attribute__ ((__section__ (".init.text")))
--#define __initdata	__attribute__ ((__section__ (".init.data")))
--#define __exitdata	__attribute__ ((__section__(".exit.data")))
--#define __exit_call	__attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
-+#define __init		__section(.init.text)
-+#define __initdata	__section(.init.data)
-+#define __exitdata	__section(.exit.data)
-+#define __exit_call	__used __section(.exitcall.exit)
+ 	get_cpu_vendor(c, 1);
++
++	switch (c->x86_vendor) {
++	case X86_VENDOR_AMD:
++		early_init_amd(c);
++		break;
++	case X86_VENDOR_INTEL:
++		early_init_intel(c);
++		break;
++	}
++
++	early_get_cap(c);
+ }
  
- #ifdef MODULE
--#define __exit		__attribute__ ((__section__(".exit.text")))
-+#define __exit		__section(.exit.text)
- #else
--#define __exit		__attribute_used__ __attribute__ ((__section__(".exit.text")))
-+#define __exit		__used __section(.exit.text)
- #endif
+ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
+@@ -357,8 +392,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
+ 		init_scattered_cpuid_features(c);
+ 	}
  
+-	early_intel_workaround(c);
+-
+ #ifdef CONFIG_X86_HT
+ 	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
  #endif
-@@ -103,16 +103,16 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
-  * Mark functions and data as being only used at initialization
-  * or exit time.
+@@ -392,7 +425,7 @@ __setup("serialnumber", x86_serial_nr_setup);
+ /*
+  * This does the hard work of actually picking apart the CPU stuff...
   */
--#define __uml_init_setup	__attribute_used__ __attribute__ ((__section__ (".uml.setup.init")))
--#define __uml_setup_help	__attribute_used__ __attribute__ ((__section__ (".uml.help.init")))
--#define __uml_init_call		__attribute_used__ __attribute__ ((__section__ (".uml.initcall.init")))
--#define __uml_postsetup_call	__attribute_used__ __attribute__ ((__section__ (".uml.postsetup.init")))
--#define __uml_exit_call		__attribute_used__ __attribute__ ((__section__ (".uml.exitcall.exit")))
-+#define __uml_init_setup	__used __section(.uml.setup.init)
-+#define __uml_setup_help	__used __section(.uml.help.init)
-+#define __uml_init_call		__used __section(.uml.initcall.init)
-+#define __uml_postsetup_call	__used __section(.uml.postsetup.init)
-+#define __uml_exit_call		__used __section(.uml.exitcall.exit)
+-static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ {
+ 	int i;
  
- #ifndef __KERNEL__
+@@ -418,20 +451,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  
- #define __define_initcall(level,fn) \
--	static initcall_t __initcall_##fn __attribute_used__ \
-+	static initcall_t __initcall_##fn __used \
- 	__attribute__((__section__(".initcall" level ".init"))) = fn
+ 	generic_identify(c);
  
- /* Userspace initcalls shouldn't depend on anything in the kernel, so we'll
-@@ -122,7 +122,7 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
+-	printk(KERN_DEBUG "CPU: After generic identify, caps:");
+-	for (i = 0; i < NCAPINTS; i++)
+-		printk(" %08lx", c->x86_capability[i]);
+-	printk("\n");
+-
+-	if (this_cpu->c_identify) {
++	if (this_cpu->c_identify)
+ 		this_cpu->c_identify(c);
  
- #define __exitcall(fn) static exitcall_t __exitcall_##fn __exit_call = fn
+-		printk(KERN_DEBUG "CPU: After vendor identify, caps:");
+-		for (i = 0; i < NCAPINTS; i++)
+-			printk(" %08lx", c->x86_capability[i]);
+-		printk("\n");
+-	}
+-
+ 	/*
+ 	 * Vendor-specific initialization.  In this section we
+ 	 * canonicalize the feature flags, meaning if there are
+@@ -453,23 +475,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ 	 * we do "generic changes."
+ 	 */
  
--#define __init_call	__attribute_used__ __attribute__ ((__section__ (".initcall.init")))
-+#define __init_call	__used __section(.initcall.init)
+-	/* TSC disabled? */
+-	if ( tsc_disable )
+-		clear_bit(X86_FEATURE_TSC, c->x86_capability);
+-
+-	/* FXSR disabled? */
+-	if (disable_x86_fxsr) {
+-		clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+-		clear_bit(X86_FEATURE_XMM, c->x86_capability);
+-	}
+-
+-	/* SEP disabled? */
+-	if (disable_x86_sep)
+-		clear_bit(X86_FEATURE_SEP, c->x86_capability);
+-
+-	if (disable_pse)
+-		clear_bit(X86_FEATURE_PSE, c->x86_capability);
+-
+ 	/* If the model name is still unset, do table lookup. */
+ 	if ( !c->x86_model_id[0] ) {
+ 		char *p;
+@@ -482,13 +487,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ 				c->x86, c->x86_model);
+ 	}
+ 
+-	/* Now the feature flags better reflect actual CPU features! */
+-
+-	printk(KERN_DEBUG "CPU: After all inits, caps:");
+-	for (i = 0; i < NCAPINTS; i++)
+-		printk(" %08lx", c->x86_capability[i]);
+-	printk("\n");
+-
+ 	/*
+ 	 * On SMP, boot_cpu_data holds the common feature set between
+ 	 * all CPUs; so make sure that we indicate which features are
+@@ -501,8 +499,14 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+ 	}
+ 
++	/* Clear all flags overriden by options */
++	for (i = 0; i < NCAPINTS; i++)
++		c->x86_capability[i] ^= cleared_cpu_caps[i];
++
+ 	/* Init Machine Check Exception if available. */
+ 	mcheck_init(c);
++
++	select_idle_routine(c);
+ }
+ 
+ void __init identify_boot_cpu(void)
+@@ -510,7 +514,6 @@ void __init identify_boot_cpu(void)
+ 	identify_cpu(&boot_cpu_data);
+ 	sysenter_setup();
+ 	enable_sep_cpu();
+-	mtrr_bp_init();
+ }
  
+ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
+@@ -567,6 +570,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
+ }
  #endif
  
-diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
-index 3866f49..26090b7 100644
---- a/arch/um/kernel/dyn.lds.S
-+++ b/arch/um/kernel/dyn.lds.S
-@@ -17,7 +17,7 @@ SECTIONS
-   __init_begin = .;
-   .init.text : {
- 	_sinittext = .;
--	*(.init.text)
-+	INIT_TEXT
- 	_einittext = .;
-   }
++static __init int setup_noclflush(char *arg)
++{
++	setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
++	return 1;
++}
++__setup("noclflush", setup_noclflush);
++
+ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+ {
+ 	char *vendor = NULL;
+@@ -590,6 +600,17 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+ 		printk("\n");
+ }
  
-@@ -84,7 +84,7 @@ SECTIONS
++static __init int setup_disablecpuid(char *arg)
++{
++	int bit;
++	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
++		setup_clear_cpu_cap(bit);
++	else
++		return 0;
++	return 1;
++}
++__setup("clearcpuid=", setup_disablecpuid);
++
+ cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
  
-   #include "asm/common.lds.S"
+ /* This is hacky. :)
+@@ -620,21 +641,13 @@ void __init early_cpu_init(void)
+ 	nexgen_init_cpu();
+ 	umc_init_cpu();
+ 	early_cpu_detect();
+-
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+-	/* pse is not compatible with on-the-fly unmapping,
+-	 * disable it even if the cpus claim to support it.
+-	 */
+-	clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+-	disable_pse = 1;
+-#endif
+ }
  
--  init.data : { *(.init.data) }
-+  init.data : { INIT_DATA }
+ /* Make sure %fs is initialized properly in idle threads */
+ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+ {
+ 	memset(regs, 0, sizeof(struct pt_regs));
+-	regs->xfs = __KERNEL_PERCPU;
++	regs->fs = __KERNEL_PERCPU;
+ 	return regs;
+ }
  
-   /* Ensure the __preinit_array_start label is properly aligned.  We
-      could instead move the label definition inside the section, but
-diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
-index 1b388b4..7c7142b 100644
---- a/arch/um/kernel/ksyms.c
-+++ b/arch/um/kernel/ksyms.c
-@@ -71,10 +71,10 @@ EXPORT_SYMBOL(dump_thread);
+@@ -642,7 +655,7 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+  * it's on the real one. */
+ void switch_to_new_gdt(void)
+ {
+-	struct Xgt_desc_struct gdt_descr;
++	struct desc_ptr gdt_descr;
  
- /* required for SMP */
+ 	gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
+ 	gdt_descr.size = GDT_SIZE - 1;
+@@ -672,12 +685,6 @@ void __cpuinit cpu_init(void)
  
--extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
-+extern void __write_lock_failed(rwlock_t *rw);
- EXPORT_SYMBOL(__write_lock_failed);
+ 	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
+ 		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+-	if (tsc_disable && cpu_has_tsc) {
+-		printk(KERN_NOTICE "Disabling TSC...\n");
+-		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+-		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+-		set_in_cr4(X86_CR4_TSD);
+-	}
  
--extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
-+extern void __read_lock_failed(rwlock_t *rw);
- EXPORT_SYMBOL(__read_lock_failed);
+ 	load_idt(&idt_descr);
+ 	switch_to_new_gdt();
+@@ -691,7 +698,7 @@ void __cpuinit cpu_init(void)
+ 		BUG();
+ 	enter_lazy_tlb(&init_mm, curr);
+ 
+-	load_esp0(t, thread);
++	load_sp0(t, thread);
+ 	set_tss_desc(cpu,t);
+ 	load_TR_desc();
+ 	load_LDT(&init_mm.context);
+diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
+index 2f6432c..ad6527a 100644
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -24,5 +24,6 @@ extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
+ extern int get_model_name(struct cpuinfo_x86 *c);
+ extern void display_cacheinfo(struct cpuinfo_x86 *c);
+ 
+-extern void early_intel_workaround(struct cpuinfo_x86 *c);
++extern void early_init_intel(struct cpuinfo_x86 *c);
++extern void early_init_amd(struct cpuinfo_x86 *c);
+ 
+diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+index fea0af0..a962dcb 100644
+--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
++++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+@@ -67,7 +67,8 @@ struct acpi_cpufreq_data {
+ 	unsigned int cpu_feature;
+ };
+ 
+-static struct acpi_cpufreq_data *drv_data[NR_CPUS];
++static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
++
+ /* acpi_perf_data is a pointer to percpu data. */
+ static struct acpi_processor_performance *acpi_perf_data;
+ 
+@@ -218,14 +219,14 @@ static u32 get_cur_val(cpumask_t mask)
+ 	if (unlikely(cpus_empty(mask)))
+ 		return 0;
+ 
+-	switch (drv_data[first_cpu(mask)]->cpu_feature) {
++	switch (per_cpu(drv_data, first_cpu(mask))->cpu_feature) {
+ 	case SYSTEM_INTEL_MSR_CAPABLE:
+ 		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
+ 		cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+ 		break;
+ 	case SYSTEM_IO_CAPABLE:
+ 		cmd.type = SYSTEM_IO_CAPABLE;
+-		perf = drv_data[first_cpu(mask)]->acpi_data;
++		perf = per_cpu(drv_data, first_cpu(mask))->acpi_data;
+ 		cmd.addr.io.port = perf->control_register.address;
+ 		cmd.addr.io.bit_width = perf->control_register.bit_width;
+ 		break;
+@@ -325,7 +326,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
  
  #endif
-diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
-index 13df191..5828c1d 100644
---- a/arch/um/kernel/uml.lds.S
-+++ b/arch/um/kernel/uml.lds.S
-@@ -23,7 +23,7 @@ SECTIONS
-   __init_begin = .;
-   .init.text : {
- 	_sinittext = .;
--	*(.init.text)
-+	INIT_TEXT
- 	_einittext = .;
-   }
-   . = ALIGN(4096);
-@@ -48,7 +48,7 @@ SECTIONS
  
-   #include "asm/common.lds.S"
+-	retval = drv_data[cpu]->max_freq * perf_percent / 100;
++	retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
  
--  init.data : { *(init.data) }
-+  init.data : { INIT_DATA }
-   .data    :
-   {
-     . = ALIGN(KERNEL_STACK_SIZE);		/* init_task */
-diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
-index 0147227..19053d4 100644
---- a/arch/um/sys-i386/signal.c
-+++ b/arch/um/sys-i386/signal.c
-@@ -3,10 +3,10 @@
-  * Licensed under the GPL
-  */
+ 	put_cpu();
+ 	set_cpus_allowed(current, saved_mask);
+@@ -336,7 +337,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
  
--#include "linux/ptrace.h"
--#include "asm/unistd.h"
--#include "asm/uaccess.h"
--#include "asm/ucontext.h"
-+#include <linux/ptrace.h>
-+#include <asm/unistd.h>
-+#include <asm/uaccess.h>
-+#include <asm/ucontext.h>
- #include "frame_kern.h"
- #include "skas.h"
+ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+ {
+-	struct acpi_cpufreq_data *data = drv_data[cpu];
++	struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
+ 	unsigned int freq;
  
-@@ -18,17 +18,17 @@ void copy_sc(struct uml_pt_regs *regs, void *from)
- 	REGS_FS(regs->gp) = sc->fs;
- 	REGS_ES(regs->gp) = sc->es;
- 	REGS_DS(regs->gp) = sc->ds;
--	REGS_EDI(regs->gp) = sc->edi;
--	REGS_ESI(regs->gp) = sc->esi;
--	REGS_EBP(regs->gp) = sc->ebp;
--	REGS_SP(regs->gp) = sc->esp;
--	REGS_EBX(regs->gp) = sc->ebx;
--	REGS_EDX(regs->gp) = sc->edx;
--	REGS_ECX(regs->gp) = sc->ecx;
--	REGS_EAX(regs->gp) = sc->eax;
--	REGS_IP(regs->gp) = sc->eip;
-+	REGS_EDI(regs->gp) = sc->di;
-+	REGS_ESI(regs->gp) = sc->si;
-+	REGS_EBP(regs->gp) = sc->bp;
-+	REGS_SP(regs->gp) = sc->sp;
-+	REGS_EBX(regs->gp) = sc->bx;
-+	REGS_EDX(regs->gp) = sc->dx;
-+	REGS_ECX(regs->gp) = sc->cx;
-+	REGS_EAX(regs->gp) = sc->ax;
-+	REGS_IP(regs->gp) = sc->ip;
- 	REGS_CS(regs->gp) = sc->cs;
--	REGS_EFLAGS(regs->gp) = sc->eflags;
-+	REGS_EFLAGS(regs->gp) = sc->flags;
- 	REGS_SS(regs->gp) = sc->ss;
- }
+ 	dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
+@@ -370,7 +371,7 @@ static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
+ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
+ 			       unsigned int target_freq, unsigned int relation)
+ {
+-	struct acpi_cpufreq_data *data = drv_data[policy->cpu];
++	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ 	struct acpi_processor_performance *perf;
+ 	struct cpufreq_freqs freqs;
+ 	cpumask_t online_policy_cpus;
+@@ -466,7 +467,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
  
-@@ -229,18 +229,18 @@ static int copy_sc_to_user(struct sigcontext __user *to,
- 	sc.fs = REGS_FS(regs->regs.gp);
- 	sc.es = REGS_ES(regs->regs.gp);
- 	sc.ds = REGS_DS(regs->regs.gp);
--	sc.edi = REGS_EDI(regs->regs.gp);
--	sc.esi = REGS_ESI(regs->regs.gp);
--	sc.ebp = REGS_EBP(regs->regs.gp);
--	sc.esp = sp;
--	sc.ebx = REGS_EBX(regs->regs.gp);
--	sc.edx = REGS_EDX(regs->regs.gp);
--	sc.ecx = REGS_ECX(regs->regs.gp);
--	sc.eax = REGS_EAX(regs->regs.gp);
--	sc.eip = REGS_IP(regs->regs.gp);
-+	sc.di = REGS_EDI(regs->regs.gp);
-+	sc.si = REGS_ESI(regs->regs.gp);
-+	sc.bp = REGS_EBP(regs->regs.gp);
-+	sc.sp = sp;
-+	sc.bx = REGS_EBX(regs->regs.gp);
-+	sc.dx = REGS_EDX(regs->regs.gp);
-+	sc.cx = REGS_ECX(regs->regs.gp);
-+	sc.ax = REGS_EAX(regs->regs.gp);
-+	sc.ip = REGS_IP(regs->regs.gp);
- 	sc.cs = REGS_CS(regs->regs.gp);
--	sc.eflags = REGS_EFLAGS(regs->regs.gp);
--	sc.esp_at_signal = regs->regs.gp[UESP];
-+	sc.flags = REGS_EFLAGS(regs->regs.gp);
-+	sc.sp_at_signal = regs->regs.gp[UESP];
- 	sc.ss = regs->regs.gp[SS];
- 	sc.cr2 = fi->cr2;
- 	sc.err = fi->error_code;
-diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
-index 1778d33..7457436 100644
---- a/arch/um/sys-x86_64/signal.c
-+++ b/arch/um/sys-x86_64/signal.c
-@@ -4,11 +4,11 @@
-  * Licensed under the GPL
-  */
+ static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
+ {
+-	struct acpi_cpufreq_data *data = drv_data[policy->cpu];
++	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
  
--#include "linux/personality.h"
--#include "linux/ptrace.h"
--#include "asm/unistd.h"
--#include "asm/uaccess.h"
--#include "asm/ucontext.h"
-+#include <linux/personality.h>
-+#include <linux/ptrace.h>
-+#include <asm/unistd.h>
-+#include <asm/uaccess.h>
-+#include <asm/ucontext.h>
- #include "frame_kern.h"
- #include "skas.h"
+ 	dprintk("acpi_cpufreq_verify\n");
  
-@@ -27,16 +27,16 @@ void copy_sc(struct uml_pt_regs *regs, void *from)
- 	GETREG(regs, R13, sc, r13);
- 	GETREG(regs, R14, sc, r14);
- 	GETREG(regs, R15, sc, r15);
--	GETREG(regs, RDI, sc, rdi);
--	GETREG(regs, RSI, sc, rsi);
--	GETREG(regs, RBP, sc, rbp);
--	GETREG(regs, RBX, sc, rbx);
--	GETREG(regs, RDX, sc, rdx);
--	GETREG(regs, RAX, sc, rax);
--	GETREG(regs, RCX, sc, rcx);
--	GETREG(regs, RSP, sc, rsp);
--	GETREG(regs, RIP, sc, rip);
--	GETREG(regs, EFLAGS, sc, eflags);
-+	GETREG(regs, RDI, sc, di);
-+	GETREG(regs, RSI, sc, si);
-+	GETREG(regs, RBP, sc, bp);
-+	GETREG(regs, RBX, sc, bx);
-+	GETREG(regs, RDX, sc, dx);
-+	GETREG(regs, RAX, sc, ax);
-+	GETREG(regs, RCX, sc, cx);
-+	GETREG(regs, RSP, sc, sp);
-+	GETREG(regs, RIP, sc, ip);
-+	GETREG(regs, EFLAGS, sc, flags);
- 	GETREG(regs, CS, sc, cs);
+@@ -570,7 +571,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 		return -ENOMEM;
  
- #undef GETREG
-@@ -61,16 +61,16 @@ static int copy_sc_from_user(struct pt_regs *regs,
- 	err |= GETREG(regs, R13, from, r13);
- 	err |= GETREG(regs, R14, from, r14);
- 	err |= GETREG(regs, R15, from, r15);
--	err |= GETREG(regs, RDI, from, rdi);
--	err |= GETREG(regs, RSI, from, rsi);
--	err |= GETREG(regs, RBP, from, rbp);
--	err |= GETREG(regs, RBX, from, rbx);
--	err |= GETREG(regs, RDX, from, rdx);
--	err |= GETREG(regs, RAX, from, rax);
--	err |= GETREG(regs, RCX, from, rcx);
--	err |= GETREG(regs, RSP, from, rsp);
--	err |= GETREG(regs, RIP, from, rip);
--	err |= GETREG(regs, EFLAGS, from, eflags);
-+	err |= GETREG(regs, RDI, from, di);
-+	err |= GETREG(regs, RSI, from, si);
-+	err |= GETREG(regs, RBP, from, bp);
-+	err |= GETREG(regs, RBX, from, bx);
-+	err |= GETREG(regs, RDX, from, dx);
-+	err |= GETREG(regs, RAX, from, ax);
-+	err |= GETREG(regs, RCX, from, cx);
-+	err |= GETREG(regs, RSP, from, sp);
-+	err |= GETREG(regs, RIP, from, ip);
-+	err |= GETREG(regs, EFLAGS, from, flags);
- 	err |= GETREG(regs, CS, from, cs);
- 	if (err)
- 		return 1;
-@@ -108,19 +108,19 @@ static int copy_sc_to_user(struct sigcontext __user *to,
- 	__put_user((regs)->regs.gp[(regno) / sizeof(unsigned long)],	\
- 		   &(sc)->regname)
+ 	data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
+-	drv_data[cpu] = data;
++	per_cpu(drv_data, cpu) = data;
  
--	err |= PUTREG(regs, RDI, to, rdi);
--	err |= PUTREG(regs, RSI, to, rsi);
--	err |= PUTREG(regs, RBP, to, rbp);
-+	err |= PUTREG(regs, RDI, to, di);
-+	err |= PUTREG(regs, RSI, to, si);
-+	err |= PUTREG(regs, RBP, to, bp);
- 	/*
- 	 * Must use orignal RSP, which is passed in, rather than what's in
- 	 * the pt_regs, because that's already been updated to point at the
- 	 * signal frame.
- 	 */
--	err |= __put_user(sp, &to->rsp);
--	err |= PUTREG(regs, RBX, to, rbx);
--	err |= PUTREG(regs, RDX, to, rdx);
--	err |= PUTREG(regs, RCX, to, rcx);
--	err |= PUTREG(regs, RAX, to, rax);
-+	err |= __put_user(sp, &to->sp);
-+	err |= PUTREG(regs, RBX, to, bx);
-+	err |= PUTREG(regs, RDX, to, dx);
-+	err |= PUTREG(regs, RCX, to, cx);
-+	err |= PUTREG(regs, RAX, to, ax);
- 	err |= PUTREG(regs, R8, to, r8);
- 	err |= PUTREG(regs, R9, to, r9);
- 	err |= PUTREG(regs, R10, to, r10);
-@@ -135,8 +135,8 @@ static int copy_sc_to_user(struct sigcontext __user *to,
- 	err |= __put_user(fi->error_code, &to->err);
- 	err |= __put_user(fi->trap_no, &to->trapno);
+ 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
+ 		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+@@ -714,20 +715,20 @@ err_unreg:
+ 	acpi_processor_unregister_performance(perf, cpu);
+ err_free:
+ 	kfree(data);
+-	drv_data[cpu] = NULL;
++	per_cpu(drv_data, cpu) = NULL;
  
--	err |= PUTREG(regs, RIP, to, rip);
--	err |= PUTREG(regs, EFLAGS, to, eflags);
-+	err |= PUTREG(regs, RIP, to, ip);
-+	err |= PUTREG(regs, EFLAGS, to, flags);
- #undef PUTREG
+ 	return result;
+ }
  
- 	err |= __put_user(mask, &to->oldmask);
-diff --git a/arch/v850/kernel/vmlinux.lds.S b/arch/v850/kernel/vmlinux.lds.S
-index 6172599..d08cd1d 100644
---- a/arch/v850/kernel/vmlinux.lds.S
-+++ b/arch/v850/kernel/vmlinux.lds.S
-@@ -114,7 +114,7 @@
- #define DATA_CONTENTS							      \
- 		__sdata = . ;						      \
- 		DATA_DATA						      \
--			*(.exit.data)	/* 2.5 convention */		      \
-+			EXIT_DATA	/* 2.5 convention */		      \
- 			*(.data.exit)	/* 2.4 convention */		      \
- 		. = ALIGN (16) ;					      \
- 		*(.data.cacheline_aligned)				      \
-@@ -157,9 +157,9 @@
- 		. = ALIGN (4096) ;					      \
- 		__init_start = . ;					      \
- 			__sinittext = .;				      \
--			*(.init.text)	/* 2.5 convention */		      \
-+			INIT_TEXT	/* 2.5 convention */		      \
- 			__einittext = .;				      \
--			*(.init.data)					      \
-+			INIT_DATA					      \
- 			*(.text.init)	/* 2.4 convention */		      \
- 			*(.data.init)					      \
- 		INITCALL_CONTENTS					      \
-@@ -170,7 +170,7 @@
- #define ROMK_INIT_RAM_CONTENTS						      \
- 		. = ALIGN (4096) ;					      \
- 		__init_start = . ;					      \
--			*(.init.data)	/* 2.5 convention */		      \
-+			INIT_DATA	/* 2.5 convention */		      \
- 			*(.data.init)	/* 2.4 convention */		      \
- 		__init_end = . ;					      \
- 		. = ALIGN (4096) ;
-@@ -179,7 +179,7 @@
-    should go into ROM.  */	
- #define ROMK_INIT_ROM_CONTENTS						      \
- 			_sinittext = .;					      \
--			*(.init.text)	/* 2.5 convention */		      \
-+			INIT_TEXT	/* 2.5 convention */		      \
- 			_einittext = .;					      \
- 			*(.text.init)	/* 2.4 convention */		      \
- 		INITCALL_CONTENTS					      \
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 80b7ba4..fb3eea3 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -17,81 +17,69 @@ config X86_64
+ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+ {
+-	struct acpi_cpufreq_data *data = drv_data[policy->cpu];
++	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
  
- ### Arch settings
- config X86
--	bool
--	default y
-+	def_bool y
-+
-+config GENERIC_LOCKBREAK
-+	def_bool n
+ 	dprintk("acpi_cpufreq_cpu_exit\n");
  
- config GENERIC_TIME
--	bool
--	default y
-+	def_bool y
+ 	if (data) {
+ 		cpufreq_frequency_table_put_attr(policy->cpu);
+-		drv_data[policy->cpu] = NULL;
++		per_cpu(drv_data, policy->cpu) = NULL;
+ 		acpi_processor_unregister_performance(data->acpi_data,
+ 						      policy->cpu);
+ 		kfree(data);
+@@ -738,7 +739,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
  
- config GENERIC_CMOS_UPDATE
--	bool
--	default y
-+	def_bool y
+ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
+ {
+-	struct acpi_cpufreq_data *data = drv_data[policy->cpu];
++	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
  
- config CLOCKSOURCE_WATCHDOG
--	bool
--	default y
-+	def_bool y
+ 	dprintk("acpi_cpufreq_resume\n");
  
- config GENERIC_CLOCKEVENTS
--	bool
--	default y
-+	def_bool y
+diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
+index 749d00c..06fcce5 100644
+--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
++++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
+@@ -694,7 +694,7 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
+ 	if ( acpi_bus_get_device(obj_handle, &d) ) {
+ 		return 0;
+ 	}
+-	*return_value = (void *)acpi_driver_data(d);
++	*return_value = acpi_driver_data(d);
+ 	return 1;
+ }
  
- config GENERIC_CLOCKEVENTS_BROADCAST
--	bool
--	default y
-+	def_bool y
- 	depends on X86_64 || (X86_32 && X86_LOCAL_APIC)
+diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+index 99e1ef9..a052273 100644
+--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
++++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+@@ -52,7 +52,7 @@
+ /* serialize freq changes  */
+ static DEFINE_MUTEX(fidvid_mutex);
  
- config LOCKDEP_SUPPORT
--	bool
--	default y
-+	def_bool y
+-static struct powernow_k8_data *powernow_data[NR_CPUS];
++static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
  
- config STACKTRACE_SUPPORT
--	bool
--	default y
-+	def_bool y
+ static int cpu_family = CPU_OPTERON;
  
- config SEMAPHORE_SLEEPERS
--	bool
--	default y
-+	def_bool y
+@@ -1018,7 +1018,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
+ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
+ {
+ 	cpumask_t oldmask = CPU_MASK_ALL;
+-	struct powernow_k8_data *data = powernow_data[pol->cpu];
++	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ 	u32 checkfid;
+ 	u32 checkvid;
+ 	unsigned int newstate;
+@@ -1094,7 +1094,7 @@ err_out:
+ /* Driver entry point to verify the policy and range of frequencies */
+ static int powernowk8_verify(struct cpufreq_policy *pol)
+ {
+-	struct powernow_k8_data *data = powernow_data[pol->cpu];
++	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
  
- config MMU
--	bool
--	default y
-+	def_bool y
+ 	if (!data)
+ 		return -EINVAL;
+@@ -1202,7 +1202,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
+ 		dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ 			data->currfid, data->currvid);
  
- config ZONE_DMA
--	bool
--	default y
-+	def_bool y
+-	powernow_data[pol->cpu] = data;
++	per_cpu(powernow_data, pol->cpu) = data;
  
- config QUICKLIST
--	bool
--	default X86_32
-+	def_bool X86_32
+ 	return 0;
  
- config SBUS
- 	bool
+@@ -1216,7 +1216,7 @@ err_out:
  
- config GENERIC_ISA_DMA
--	bool
--	default y
-+	def_bool y
+ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
+ {
+-	struct powernow_k8_data *data = powernow_data[pol->cpu];
++	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
  
- config GENERIC_IOMAP
--	bool
--	default y
-+	def_bool y
+ 	if (!data)
+ 		return -EINVAL;
+@@ -1237,7 +1237,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
+ 	cpumask_t oldmask = current->cpus_allowed;
+ 	unsigned int khz = 0;
  
- config GENERIC_BUG
--	bool
--	default y
-+	def_bool y
- 	depends on BUG
+-	data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))];
++	data = per_cpu(powernow_data, first_cpu(per_cpu(cpu_core_map, cpu)));
  
- config GENERIC_HWEIGHT
--	bool
--	default y
-+	def_bool y
-+
-+config GENERIC_GPIO
-+	def_bool n
+ 	if (!data)
+ 		return -EINVAL;
+diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
+index 88d66fb..404a6a2 100644
+--- a/arch/x86/kernel/cpu/cyrix.c
++++ b/arch/x86/kernel/cpu/cyrix.c
+@@ -5,6 +5,7 @@
+ #include <asm/dma.h>
+ #include <asm/io.h>
+ #include <asm/processor-cyrix.h>
++#include <asm/processor-flags.h>
+ #include <asm/timer.h>
+ #include <asm/pci-direct.h>
+ #include <asm/tsc.h>
+@@ -126,15 +127,12 @@ static void __cpuinit set_cx86_reorder(void)
  
- config ARCH_MAY_HAVE_PC_FDC
--	bool
--	default y
-+	def_bool y
+ static void __cpuinit set_cx86_memwb(void)
+ {
+-	u32 cr0;
+-
+ 	printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
  
- config DMI
--	bool
--	default y
-+	def_bool y
+ 	/* CCR2 bit 2: unlock NW bit */
+ 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
+ 	/* set 'Not Write-through' */
+-	cr0 = 0x20000000;
+-	write_cr0(read_cr0() | cr0);
++	write_cr0(read_cr0() | X86_CR0_NW);
+ 	/* CCR2 bit 2: lock NW bit and set WT1 */
+ 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
+ }
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index cc8c501..d1c372b 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -11,6 +11,8 @@
+ #include <asm/pgtable.h>
+ #include <asm/msr.h>
+ #include <asm/uaccess.h>
++#include <asm/ptrace.h>
++#include <asm/ds.h>
  
- config RWSEM_GENERIC_SPINLOCK
- 	def_bool !X86_XADD
-@@ -112,6 +100,9 @@ config GENERIC_TIME_VSYSCALL
- 	bool
- 	default X86_64
+ #include "cpu.h"
  
-+config HAVE_SETUP_PER_CPU_AREA
-+	def_bool X86_64
-+
- config ARCH_SUPPORTS_OPROFILE
- 	bool
- 	default y
-@@ -144,9 +135,17 @@ config GENERIC_PENDING_IRQ
+@@ -27,13 +29,14 @@
+ struct movsl_mask movsl_mask __read_mostly;
+ #endif
  
- config X86_SMP
- 	bool
--	depends on X86_32 && SMP && !X86_VOYAGER
-+	depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
- 	default y
+-void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
++void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
+ {
+-	if (c->x86_vendor != X86_VENDOR_INTEL)
+-		return;
+ 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
+ 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
+ 		c->x86_cache_alignment = 128;
++	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
++		(c->x86 == 0x6 && c->x86_model >= 0x0e))
++		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+ }
  
-+config X86_32_SMP
-+	def_bool y
-+	depends on X86_32 && SMP
-+
-+config X86_64_SMP
-+	def_bool y
-+	depends on X86_64 && SMP
-+
- config X86_HT
- 	bool
- 	depends on SMP
-@@ -292,6 +291,18 @@ config X86_ES7000
- 	  Only choose this option if you have such a system, otherwise you
- 	  should say N here.
+ /*
+@@ -113,6 +116,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+ 	unsigned int l2 = 0;
+ 	char *p = NULL;
  
-+config X86_RDC321X
-+	bool "RDC R-321x SoC"
-+	depends on X86_32
-+	select M486
-+	select X86_REBOOTFIXUPS
-+	select GENERIC_GPIO
-+	select LEDS_GPIO
-+	help
-+	  This option is needed for RDC R-321x system-on-chip, also known
-+	  as R-8610-(G).
-+	  If you don't have one of these chips, you should say N here.
++	early_init_intel(c);
 +
- config X86_VSMP
- 	bool "Support for ScaleMP vSMP"
- 	depends on X86_64 && PCI
-@@ -303,8 +314,8 @@ config X86_VSMP
- endchoice
- 
- config SCHED_NO_NO_OMIT_FRAME_POINTER
--	bool "Single-depth WCHAN output"
--	default y
-+	def_bool y
-+	prompt "Single-depth WCHAN output"
- 	depends on X86_32
- 	help
- 	  Calculate simpler /proc/<PID>/wchan values. If this option
-@@ -314,18 +325,8 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
+ #ifdef CONFIG_X86_F00F_BUG
+ 	/*
+ 	 * All current models of Pentium and Pentium with MMX technology CPUs
+@@ -132,7 +137,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+ 	}
+ #endif
  
- 	  If in doubt, say "Y".
+-	select_idle_routine(c);
+ 	l2 = init_intel_cacheinfo(c);
+ 	if (c->cpuid_level > 9 ) {
+ 		unsigned eax = cpuid_eax(10);
+@@ -201,16 +205,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+ 	}
+ #endif
  
--config PARAVIRT
--	bool
--	depends on X86_32 && !(X86_VISWS || X86_VOYAGER)
--	help
--	  This changes the kernel so it can modify itself when it is run
--	  under a hypervisor, potentially improving performance significantly
--	  over full virtualization.  However, when run without a hypervisor
--	  the kernel is theoretically slower and slightly larger.
++	if (cpu_has_xmm2)
++		set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability);
+ 	if (c->x86 == 15) {
+ 		set_bit(X86_FEATURE_P4, c->x86_capability);
+-		set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability);
+ 	}
+ 	if (c->x86 == 6) 
+ 		set_bit(X86_FEATURE_P3, c->x86_capability);
+-	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+-		(c->x86 == 0x6 && c->x86_model >= 0x0e))
+-		set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
 -
- menuconfig PARAVIRT_GUEST
- 	bool "Paravirtualized guest support"
--	depends on X86_32
- 	help
- 	  Say Y here to get to see options related to running Linux under
- 	  various hypervisors.  This option alone does not add any kernel code.
-@@ -339,6 +340,7 @@ source "arch/x86/xen/Kconfig"
- config VMI
- 	bool "VMI Guest support"
- 	select PARAVIRT
-+	depends on X86_32
- 	depends on !(X86_VISWS || X86_VOYAGER)
- 	help
- 	  VMI provides a paravirtualized interface to the VMware ESX server
-@@ -348,40 +350,43 @@ config VMI
+ 	if (cpu_has_ds) {
+ 		unsigned int l1;
+ 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+@@ -219,6 +220,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+ 		if (!(l1 & (1<<12)))
+ 			set_bit(X86_FEATURE_PEBS, c->x86_capability);
+ 	}
++
++	if (cpu_has_bts)
++		ds_init_intel(c);
+ }
  
- source "arch/x86/lguest/Kconfig"
+ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+@@ -342,5 +346,22 @@ unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
+ EXPORT_SYMBOL(cmpxchg_386_u32);
+ #endif
  
-+config PARAVIRT
-+	bool "Enable paravirtualization code"
-+	depends on !(X86_VISWS || X86_VOYAGER)
-+	help
-+	  This changes the kernel so it can modify itself when it is run
-+	  under a hypervisor, potentially improving performance significantly
-+	  over full virtualization.  However, when run without a hypervisor
-+	  the kernel is theoretically slower and slightly larger.
++#ifndef CONFIG_X86_CMPXCHG64
++unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
++{
++	u64 prev;
++	unsigned long flags;
 +
- endif
++	/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
++	local_irq_save(flags);
++	prev = *(u64 *)ptr;
++	if (prev == old)
++		*(u64 *)ptr = new;
++	local_irq_restore(flags);
++	return prev;
++}
++EXPORT_SYMBOL(cmpxchg_486_u64);
++#endif
++
+ // arch_initcall(intel_cpu_init);
  
- config ACPI_SRAT
--	bool
--	default y
-+	def_bool y
- 	depends on X86_32 && ACPI && NUMA && (X86_SUMMIT || X86_GENERICARCH)
- 	select ACPI_NUMA
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index 9f530ff..8b4507b 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -733,10 +733,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ 	if (unlikely(retval < 0))
+ 		return retval;
  
- config HAVE_ARCH_PARSE_SRAT
--       bool
--       default y
--       depends on ACPI_SRAT
-+	def_bool y
-+	depends on ACPI_SRAT
+-	cache_kobject[cpu]->parent = &sys_dev->kobj;
+-	kobject_set_name(cache_kobject[cpu], "%s", "cache");
+-	cache_kobject[cpu]->ktype = &ktype_percpu_entry;
+-	retval = kobject_register(cache_kobject[cpu]);
++	retval = kobject_init_and_add(cache_kobject[cpu], &ktype_percpu_entry,
++				      &sys_dev->kobj, "%s", "cache");
+ 	if (retval < 0) {
+ 		cpuid4_cache_sysfs_exit(cpu);
+ 		return retval;
+@@ -746,23 +744,23 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ 		this_object = INDEX_KOBJECT_PTR(cpu,i);
+ 		this_object->cpu = cpu;
+ 		this_object->index = i;
+-		this_object->kobj.parent = cache_kobject[cpu];
+-		kobject_set_name(&(this_object->kobj), "index%1lu", i);
+-		this_object->kobj.ktype = &ktype_cache;
+-		retval = kobject_register(&(this_object->kobj));
++		retval = kobject_init_and_add(&(this_object->kobj),
++					      &ktype_cache, cache_kobject[cpu],
++					      "index%1lu", i);
+ 		if (unlikely(retval)) {
+ 			for (j = 0; j < i; j++) {
+-				kobject_unregister(
+-					&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
++				kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
+ 			}
+-			kobject_unregister(cache_kobject[cpu]);
++			kobject_put(cache_kobject[cpu]);
+ 			cpuid4_cache_sysfs_exit(cpu);
+ 			break;
+ 		}
++		kobject_uevent(&(this_object->kobj), KOBJ_ADD);
+ 	}
+ 	if (!retval)
+ 		cpu_set(cpu, cache_dev_map);
  
- config X86_SUMMIT_NUMA
--	bool
--	default y
-+	def_bool y
- 	depends on X86_32 && NUMA && (X86_SUMMIT || X86_GENERICARCH)
++	kobject_uevent(cache_kobject[cpu], KOBJ_ADD);
+ 	return retval;
+ }
  
- config X86_CYCLONE_TIMER
--	bool
--	default y
-+	def_bool y
- 	depends on X86_32 && X86_SUMMIT || X86_GENERICARCH
+@@ -778,8 +776,8 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+ 	cpu_clear(cpu, cache_dev_map);
  
- config ES7000_CLUSTERED_APIC
--	bool
--	default y
-+	def_bool y
- 	depends on SMP && X86_ES7000 && MPENTIUMIII
+ 	for (i = 0; i < num_cache_leaves; i++)
+-		kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
+-	kobject_unregister(cache_kobject[cpu]);
++		kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
++	kobject_put(cache_kobject[cpu]);
+ 	cpuid4_cache_sysfs_exit(cpu);
+ }
  
- source "arch/x86/Kconfig.cpu"
+diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c
+index eef63e3..e633c9c 100644
+--- a/arch/x86/kernel/cpu/mcheck/k7.c
++++ b/arch/x86/kernel/cpu/mcheck/k7.c
+@@ -16,7 +16,7 @@
+ #include "mce.h"
  
- config HPET_TIMER
--	bool
-+	def_bool X86_64
- 	prompt "HPET Timer Support" if X86_32
--	default X86_64
- 	help
-          Use the IA-PC HPET (High Precision Event Timer) to manage
-          time in preference to the PIT and RTC, if a HPET is
-@@ -399,9 +404,8 @@ config HPET_TIMER
-          Choose N to continue using the legacy 8254 timer.
+ /* Machine Check Handler For AMD Athlon/Duron */
+-static fastcall void k7_machine_check(struct pt_regs * regs, long error_code)
++static void k7_machine_check(struct pt_regs * regs, long error_code)
+ {
+ 	int recover=1;
+ 	u32 alow, ahigh, high, low;
+@@ -27,29 +27,32 @@ static fastcall void k7_machine_check(struct pt_regs * regs, long error_code)
+ 	if (mcgstl & (1<<0))	/* Recoverable ? */
+ 		recover=0;
  
- config HPET_EMULATE_RTC
--	bool
--	depends on HPET_TIMER && RTC=y
--	default y
-+	def_bool y
-+	depends on HPET_TIMER && (RTC=y || RTC=m)
+-	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
++	printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
+ 		smp_processor_id(), mcgsth, mcgstl);
  
- # Mark as embedded because too many people got it wrong.
- # The code disables itself when not needed.
-@@ -441,8 +445,8 @@ config CALGARY_IOMMU
- 	  If unsure, say Y.
+-	for (i=1; i<nr_mce_banks; i++) {
+-		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
++	for (i = 1; i < nr_mce_banks; i++) {
++		rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
+ 		if (high&(1<<31)) {
++			char misc[20];
++			char addr[24];
++			misc[0] = addr[0] = '\0';
+ 			if (high & (1<<29))
+ 				recover |= 1;
+ 			if (high & (1<<25))
+ 				recover |= 2;
+-			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
+ 			high &= ~(1<<31);
+ 			if (high & (1<<27)) {
+-				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
+-				printk ("[%08x%08x]", ahigh, alow);
++				rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
++				snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
+ 			}
+ 			if (high & (1<<26)) {
+-				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
+-				printk (" at %08x%08x", ahigh, alow);
++				rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
++				snprintf(addr, 24, " at %08x%08x", ahigh, alow);
+ 			}
+-			printk ("\n");
++			printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
++				smp_processor_id(), i, high, low, misc, addr);
+ 			/* Clear it */
+-			wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
++			wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
+ 			/* Serialize */
+ 			wmb();
+ 			add_taint(TAINT_MACHINE_CHECK);
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.h b/arch/x86/kernel/cpu/mcheck/mce.h
+index 81fb6e2..ae9f628 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.h
++++ b/arch/x86/kernel/cpu/mcheck/mce.h
+@@ -8,7 +8,7 @@ void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
+ void winchip_mcheck_init(struct cpuinfo_x86 *c);
  
- config CALGARY_IOMMU_ENABLED_BY_DEFAULT
--	bool "Should Calgary be enabled by default?"
--	default y
-+	def_bool y
-+	prompt "Should Calgary be enabled by default?"
- 	depends on CALGARY_IOMMU
- 	help
- 	  Should Calgary be enabled by default? if you choose 'y', Calgary
-@@ -486,9 +490,9 @@ config SCHED_SMT
- 	  N here.
+ /* Call the installed machine check handler for this CPU setup. */
+-extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code);
++extern void (*machine_check_vector)(struct pt_regs *, long error_code);
  
- config SCHED_MC
--	bool "Multi-core scheduler support"
-+	def_bool y
-+	prompt "Multi-core scheduler support"
- 	depends on (X86_64 && SMP) || (X86_32 && X86_HT)
--	default y
- 	help
- 	  Multi-core scheduler support improves the CPU scheduler's decision
- 	  making when dealing with multi-core CPU chips at a cost of slightly
-@@ -522,19 +526,16 @@ config X86_UP_IOAPIC
- 	  an IO-APIC, then the kernel will still run with no slowdown at all.
+ extern int nr_mce_banks;
  
- config X86_LOCAL_APIC
--	bool
-+	def_bool y
- 	depends on X86_64 || (X86_32 && (X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER) || X86_GENERICARCH))
--	default y
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
+index 34c781e..a5182dc 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
+@@ -22,13 +22,13 @@ int nr_mce_banks;
+ EXPORT_SYMBOL_GPL(nr_mce_banks);	/* non-fatal.o */
  
- config X86_IO_APIC
--	bool
-+	def_bool y
- 	depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)) || X86_GENERICARCH))
--	default y
+ /* Handle unconfigured int18 (should never happen) */
+-static fastcall void unexpected_machine_check(struct pt_regs * regs, long error_code)
++static void unexpected_machine_check(struct pt_regs * regs, long error_code)
+ {	
+ 	printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id());
+ }
  
- config X86_VISWS_APIC
--	bool
-+	def_bool y
- 	depends on X86_32 && X86_VISWS
--	default y
+ /* Call the installed machine check handler for this CPU setup. */
+-void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
++void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
  
- config X86_MCE
- 	bool "Machine Check Exception"
-@@ -554,17 +555,17 @@ config X86_MCE
- 	  the 386 and 486, so nearly everyone can say Y here.
+ /* This has to be run for each processor */
+ void mcheck_init(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
+index 4b21d29..9a699ed 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
+@@ -63,7 +63,7 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
+  * separate MCEs from kernel messages to avoid bogus bug reports.
+  */
  
- config X86_MCE_INTEL
--	bool "Intel MCE features"
-+	def_bool y
-+	prompt "Intel MCE features"
- 	depends on X86_64 && X86_MCE && X86_LOCAL_APIC
--	default y
- 	help
- 	   Additional support for intel specific MCE features such as
- 	   the thermal monitor.
+-struct mce_log mcelog = {
++static struct mce_log mcelog = {
+ 	MCE_LOG_SIGNATURE,
+ 	MCE_LOG_LEN,
+ };
+@@ -80,7 +80,7 @@ void mce_log(struct mce *mce)
+ 			/* When the buffer fills up discard new entries. Assume
+ 			   that the earlier errors are the more interesting. */
+ 			if (entry >= MCE_LOG_LEN) {
+-				set_bit(MCE_OVERFLOW, &mcelog.flags);
++				set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
+ 				return;
+ 			}
+ 			/* Old left over entry. Skip. */
+@@ -110,12 +110,12 @@ static void print_mce(struct mce *m)
+ 	       KERN_EMERG
+ 	       "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
+ 	       m->cpu, m->mcgstatus, m->bank, m->status);
+-	if (m->rip) {
++	if (m->ip) {
+ 		printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
+ 		       !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+-		       m->cs, m->rip);
++		       m->cs, m->ip);
+ 		if (m->cs == __KERNEL_CS)
+-			print_symbol("{%s}", m->rip);
++			print_symbol("{%s}", m->ip);
+ 		printk("\n");
+ 	}
+ 	printk(KERN_EMERG "TSC %Lx ", m->tsc);
+@@ -156,16 +156,16 @@ static int mce_available(struct cpuinfo_x86 *c)
+ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
+ {
+ 	if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
+-		m->rip = regs->rip;
++		m->ip = regs->ip;
+ 		m->cs = regs->cs;
+ 	} else {
+-		m->rip = 0;
++		m->ip = 0;
+ 		m->cs = 0;
+ 	}
+ 	if (rip_msr) {
+ 		/* Assume the RIP in the MSR is exact. Is this true? */
+ 		m->mcgstatus |= MCG_STATUS_EIPV;
+-		rdmsrl(rip_msr, m->rip);
++		rdmsrl(rip_msr, m->ip);
+ 		m->cs = 0;
+ 	}
+ }
+@@ -192,10 +192,10 @@ void do_machine_check(struct pt_regs * regs, long error_code)
  
- config X86_MCE_AMD
--	bool "AMD MCE features"
-+	def_bool y
-+	prompt "AMD MCE features"
- 	depends on X86_64 && X86_MCE && X86_LOCAL_APIC
--	default y
- 	help
- 	   Additional support for AMD specific MCE features such as
- 	   the DRAM Error Threshold.
-@@ -637,9 +638,9 @@ config I8K
- 	  Say N otherwise.
+ 	atomic_inc(&mce_entry);
  
- config X86_REBOOTFIXUPS
--	bool "Enable X86 board specific fixups for reboot"
-+	def_bool n
-+	prompt "Enable X86 board specific fixups for reboot"
- 	depends on X86_32 && X86
--	default n
- 	---help---
- 	  This enables chipset and/or board specific fixups to be done
- 	  in order to get reboot to work correctly. This is only needed on
-@@ -648,7 +649,7 @@ config X86_REBOOTFIXUPS
- 	  system.
+-	if (regs)
+-		notify_die(DIE_NMI, "machine check", regs, error_code, 18,
+-			   SIGKILL);
+-	if (!banks)
++	if ((regs
++	     && notify_die(DIE_NMI, "machine check", regs, error_code,
++			   18, SIGKILL) == NOTIFY_STOP)
++	    || !banks)
+ 		goto out2;
  
- 	  Currently, the only fixup is for the Geode machines using
--	  CS5530A and CS5536 chipsets.
-+	  CS5530A and CS5536 chipsets and the RDC R-321x SoC.
+ 	memset(&m, 0, sizeof(struct mce));
+@@ -288,7 +288,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
+ 		 * instruction which caused the MCE.
+ 		 */
+ 		if (m.mcgstatus & MCG_STATUS_EIPV)
+-			user_space = panicm.rip && (panicm.cs & 3);
++			user_space = panicm.ip && (panicm.cs & 3);
  
- 	  Say Y if you want to enable the fixup. Currently, it's safe to
- 	  enable this option even if you don't need it.
-@@ -672,9 +673,8 @@ config MICROCODE
- 	  module will be called microcode.
+ 		/*
+ 		 * If we know that the error was in user space, send a
+@@ -564,7 +564,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
+ 			loff_t *off)
+ {
+ 	unsigned long *cpu_tsc;
+-	static DECLARE_MUTEX(mce_read_sem);
++	static DEFINE_MUTEX(mce_read_mutex);
+ 	unsigned next;
+ 	char __user *buf = ubuf;
+ 	int i, err;
+@@ -573,12 +573,12 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
+ 	if (!cpu_tsc)
+ 		return -ENOMEM;
  
- config MICROCODE_OLD_INTERFACE
--	bool
-+	def_bool y
- 	depends on MICROCODE
--	default y
+-	down(&mce_read_sem);
++	mutex_lock(&mce_read_mutex);
+ 	next = rcu_dereference(mcelog.next);
  
- config X86_MSR
- 	tristate "/dev/cpu/*/msr - Model-specific register support"
-@@ -798,13 +798,12 @@ config PAGE_OFFSET
- 	depends on X86_32
+ 	/* Only supports full reads right now */
+ 	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
+-		up(&mce_read_sem);
++		mutex_unlock(&mce_read_mutex);
+ 		kfree(cpu_tsc);
+ 		return -EINVAL;
+ 	}
+@@ -621,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
+ 			memset(&mcelog.entry[i], 0, sizeof(struct mce));
+ 		}
+ 	}
+-	up(&mce_read_sem);
++	mutex_unlock(&mce_read_mutex);
+ 	kfree(cpu_tsc);
+ 	return err ? -EFAULT : buf - ubuf;
+ }
+@@ -634,8 +634,7 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
+ 	return 0;
+ }
  
- config HIGHMEM
--	bool
-+	def_bool y
- 	depends on X86_32 && (HIGHMEM64G || HIGHMEM4G)
--	default y
+-static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd,
+-		     unsigned long arg)
++static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ {
+ 	int __user *p = (int __user *)arg;
  
- config X86_PAE
--	bool "PAE (Physical Address Extension) Support"
--	default n
-+	def_bool n
-+	prompt "PAE (Physical Address Extension) Support"
- 	depends on X86_32 && !HIGHMEM4G
- 	select RESOURCES_64BIT
- 	help
-@@ -836,10 +835,10 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
- 	depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)
+@@ -664,7 +663,7 @@ static const struct file_operations mce_chrdev_ops = {
+ 	.release = mce_release,
+ 	.read = mce_read,
+ 	.poll = mce_poll,
+-	.ioctl = mce_ioctl,
++	.unlocked_ioctl = mce_ioctl,
+ };
  
- config K8_NUMA
--       bool "Old style AMD Opteron NUMA detection"
--       depends on X86_64 && NUMA && PCI
--       default y
--       help
-+	def_bool y
-+	prompt "Old style AMD Opteron NUMA detection"
-+	depends on X86_64 && NUMA && PCI
-+	help
- 	 Enable K8 NUMA node topology detection.  You should say Y here if
- 	 you have a multi processor AMD K8 system. This uses an old
- 	 method to read the NUMA configuration directly from the builtin
-@@ -847,10 +846,10 @@ config K8_NUMA
- 	 instead, which also takes priority if both are compiled in.
+ static struct miscdevice mce_log_device = {
+@@ -745,7 +744,7 @@ static void mce_restart(void)
  
- config X86_64_ACPI_NUMA
--	bool "ACPI NUMA detection"
-+	def_bool y
-+	prompt "ACPI NUMA detection"
- 	depends on X86_64 && NUMA && ACPI && PCI
- 	select ACPI_NUMA
--	default y
- 	help
- 	  Enable ACPI SRAT based node topology detection.
+ static struct sysdev_class mce_sysclass = {
+ 	.resume = mce_resume,
+-	set_kset_name("machinecheck"),
++	.name = "machinecheck",
+ };
  
-@@ -864,52 +863,53 @@ config NUMA_EMU
+ DEFINE_PER_CPU(struct sys_device, device_mce);
+@@ -855,8 +854,8 @@ static void mce_remove_device(unsigned int cpu)
+ }
  
- config NODES_SHIFT
- 	int
-+	range 1 15  if X86_64
- 	default "6" if X86_64
- 	default "4" if X86_NUMAQ
- 	default "3"
- 	depends on NEED_MULTIPLE_NODES
+ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
+-static int
+-mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
++static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
++				      unsigned long action, void *hcpu)
+ {
+ 	unsigned int cpu = (unsigned long)hcpu;
  
- config HAVE_ARCH_BOOTMEM_NODE
--	bool
-+	def_bool y
- 	depends on X86_32 && NUMA
--	default y
+@@ -873,7 +872,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ 	return NOTIFY_OK;
+ }
  
- config ARCH_HAVE_MEMORY_PRESENT
--	bool
-+	def_bool y
- 	depends on X86_32 && DISCONTIGMEM
--	default y
+-static struct notifier_block mce_cpu_notifier = {
++static struct notifier_block mce_cpu_notifier __cpuinitdata = {
+ 	.notifier_call = mce_cpu_callback,
+ };
  
- config NEED_NODE_MEMMAP_SIZE
--	bool
-+	def_bool y
- 	depends on X86_32 && (DISCONTIGMEM || SPARSEMEM)
--	default y
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+index 752fb16..32671da 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+@@ -65,7 +65,7 @@ static struct threshold_block threshold_defaults = {
+ };
  
- config HAVE_ARCH_ALLOC_REMAP
--	bool
-+	def_bool y
- 	depends on X86_32 && NUMA
--	default y
+ struct threshold_bank {
+-	struct kobject kobj;
++	struct kobject *kobj;
+ 	struct threshold_block *blocks;
+ 	cpumask_t cpus;
+ };
+@@ -118,6 +118,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
+ {
+ 	unsigned int bank, block;
+ 	unsigned int cpu = smp_processor_id();
++	u8 lvt_off;
+ 	u32 low = 0, high = 0, address = 0;
  
- config ARCH_FLATMEM_ENABLE
- 	def_bool y
--	depends on (X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC) || (X86_64 && !NUMA)
-+	depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA
+ 	for (bank = 0; bank < NR_BANKS; ++bank) {
+@@ -153,14 +154,13 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
+ 			if (shared_bank[bank] && c->cpu_core_id)
+ 				break;
+ #endif
++			lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR,
++						       APIC_EILVT_MSG_FIX, 0);
++
+ 			high &= ~MASK_LVTOFF_HI;
+-			high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
++			high |= lvt_off << 20;
+ 			wrmsr(address, low, high);
  
- config ARCH_DISCONTIGMEM_ENABLE
- 	def_bool y
--	depends on NUMA
-+	depends on NUMA && X86_32
+-			setup_APIC_extended_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
+-						THRESHOLD_APIC_VECTOR,
+-						K8_APIC_EXT_INT_MSG_FIX, 0);
+-
+ 			threshold_defaults.address = address;
+ 			threshold_restart_bank(&threshold_defaults, 0, 0);
+ 		}
+@@ -432,10 +432,9 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
+ 	else
+ 		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
  
- config ARCH_DISCONTIGMEM_DEFAULT
- 	def_bool y
--	depends on NUMA
-+	depends on NUMA && X86_32
+-	kobject_set_name(&b->kobj, "misc%i", block);
+-	b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
+-	b->kobj.ktype = &threshold_ktype;
+-	err = kobject_register(&b->kobj);
++	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
++				   per_cpu(threshold_banks, cpu)[bank]->kobj,
++				   "misc%i", block);
+ 	if (err)
+ 		goto out_free;
+ recurse:
+@@ -451,11 +450,14 @@ recurse:
+ 	if (err)
+ 		goto out_free;
+ 
++	if (b)
++		kobject_uevent(&b->kobj, KOBJ_ADD);
 +
-+config ARCH_SPARSEMEM_DEFAULT
-+	def_bool y
-+	depends on X86_64
+ 	return err;
  
- config ARCH_SPARSEMEM_ENABLE
- 	def_bool y
--	depends on NUMA || (EXPERIMENTAL && (X86_PC || X86_64))
-+	depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC)
- 	select SPARSEMEM_STATIC if X86_32
- 	select SPARSEMEM_VMEMMAP_ENABLE if X86_64
+ out_free:
+ 	if (b) {
+-		kobject_unregister(&b->kobj);
++		kobject_put(&b->kobj);
+ 		kfree(b);
+ 	}
+ 	return err;
+@@ -489,7 +491,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 			goto out;
  
- config ARCH_SELECT_MEMORY_MODEL
- 	def_bool y
--	depends on X86_32 && ARCH_SPARSEMEM_ENABLE
-+	depends on ARCH_SPARSEMEM_ENABLE
+ 		err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
+-					&b->kobj, name);
++					b->kobj, name);
+ 		if (err)
+ 			goto out;
  
- config ARCH_MEMORY_PROBE
- 	def_bool X86_64
-@@ -987,42 +987,32 @@ config MTRR
- 	  See <file:Documentation/mtrr.txt> for more information.
+@@ -505,16 +507,15 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 		goto out;
+ 	}
  
- config EFI
--	bool "Boot from EFI support"
--	depends on X86_32 && ACPI
--	default n
-+	def_bool n
-+	prompt "EFI runtime service support"
-+	depends on ACPI
- 	---help---
--	This enables the kernel to boot on EFI platforms using
--	system configuration information passed to it from the firmware.
--	This also enables the kernel to use any EFI runtime services that are
-+	This enables the kernel to use EFI runtime services that are
- 	available (such as the EFI variable services).
+-	kobject_set_name(&b->kobj, "threshold_bank%i", bank);
+-	b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
++	b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
++	if (!b->kobj)
++		goto out_free;
++
+ #ifndef CONFIG_SMP
+ 	b->cpus = CPU_MASK_ALL;
+ #else
+ 	b->cpus = per_cpu(cpu_core_map, cpu);
+ #endif
+-	err = kobject_register(&b->kobj);
+-	if (err)
+-		goto out_free;
  
--	This option is only useful on systems that have EFI firmware
--	and will result in a kernel image that is ~8k larger.  In addition,
--	you must use the latest ELILO loader available at
--	<http://elilo.sourceforge.net> in order to take advantage of
--	kernel initialization using EFI information (neither GRUB nor LILO know
--	anything about EFI).  However, even with this option, the resultant
--	kernel should continue to boot on existing non-EFI platforms.
-+	This option is only useful on systems that have EFI firmware.
-+  	In addition, you should use the latest ELILO loader available
-+  	at <http://elilo.sourceforge.net> in order to take advantage
-+  	of EFI runtime services. However, even with this option, the
-+  	resultant kernel should continue to boot on existing non-EFI
-+  	platforms.
+ 	per_cpu(threshold_banks, cpu)[bank] = b;
  
- config IRQBALANCE
--	bool "Enable kernel irq balancing"
-+	def_bool y
-+	prompt "Enable kernel irq balancing"
- 	depends on X86_32 && SMP && X86_IO_APIC
--	default y
- 	help
- 	  The default yes will allow the kernel to do irq load balancing.
- 	  Saying no will keep the kernel from doing irq load balancing.
+@@ -531,7 +532,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 			continue;
  
--# turning this on wastes a bunch of space.
--# Summit needs it only when NUMA is on
--config BOOT_IOREMAP
--	bool
--	depends on X86_32 && (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
--	default y
--
- config SECCOMP
--	bool "Enable seccomp to safely compute untrusted bytecode"
-+	def_bool y
-+	prompt "Enable seccomp to safely compute untrusted bytecode"
- 	depends on PROC_FS
--	default y
- 	help
- 	  This kernel feature is useful for number crunching applications
- 	  that may need to compute untrusted bytecode during their
-@@ -1189,11 +1179,11 @@ config HOTPLUG_CPU
- 	  suspend.
+ 		err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
+-					&b->kobj, name);
++					b->kobj, name);
+ 		if (err)
+ 			goto out;
  
- config COMPAT_VDSO
--	bool "Compat VDSO support"
--	default y
--	depends on X86_32
-+	def_bool y
-+	prompt "Compat VDSO support"
-+	depends on X86_32 || IA32_EMULATION
- 	help
--	  Map the VDSO to the predictable old-style address too.
-+	  Map the 32-bit VDSO to the predictable old-style address too.
- 	---help---
- 	  Say N here if you are running a sufficiently recent glibc
- 	  version (2.3.3 or later), to remove the high-mapped
-@@ -1207,30 +1197,26 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
- 	def_bool y
- 	depends on X86_64 || (X86_32 && HIGHMEM)
+@@ -554,7 +555,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
+ 	int err = 0;
  
--config MEMORY_HOTPLUG_RESERVE
--	def_bool X86_64
--	depends on (MEMORY_HOTPLUG && DISCONTIGMEM)
--
- config HAVE_ARCH_EARLY_PFN_TO_NID
- 	def_bool X86_64
- 	depends on NUMA
+ 	for (bank = 0; bank < NR_BANKS; ++bank) {
+-		if (!(per_cpu(bank_map, cpu) & 1 << bank))
++		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+ 			continue;
+ 		err = threshold_create_bank(cpu, bank);
+ 		if (err)
+@@ -581,7 +582,7 @@ static void deallocate_threshold_block(unsigned int cpu,
+ 		return;
  
--config OUT_OF_LINE_PFN_TO_PAGE
--	def_bool X86_64
--	depends on DISCONTIGMEM
--
- menu "Power management options"
- 	depends on !X86_VOYAGER
+ 	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+-		kobject_unregister(&pos->kobj);
++		kobject_put(&pos->kobj);
+ 		list_del(&pos->miscj);
+ 		kfree(pos);
+ 	}
+@@ -627,7 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
+ 	deallocate_threshold_block(cpu, bank);
  
- config ARCH_HIBERNATION_HEADER
--	bool
-+	def_bool y
- 	depends on X86_64 && HIBERNATION
--	default y
+ free_out:
+-	kobject_unregister(&b->kobj);
++	kobject_put(b->kobj);
+ 	kfree(b);
+ 	per_cpu(threshold_banks, cpu)[bank] = NULL;
+ }
+@@ -637,14 +638,14 @@ static void threshold_remove_device(unsigned int cpu)
+ 	unsigned int bank;
  
- source "kernel/power/Kconfig"
+ 	for (bank = 0; bank < NR_BANKS; ++bank) {
+-		if (!(per_cpu(bank_map, cpu) & 1 << bank))
++		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+ 			continue;
+ 		threshold_remove_bank(cpu, bank);
+ 	}
+ }
  
- source "drivers/acpi/Kconfig"
+ /* get notified when a cpu comes on/off */
+-static int threshold_cpu_callback(struct notifier_block *nfb,
++static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
+ 					    unsigned long action, void *hcpu)
+ {
+ 	/* cpu was unsigned int to begin with */
+@@ -669,7 +670,7 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
+ 	return NOTIFY_OK;
+ }
  
-+config X86_APM_BOOT
-+	bool
-+	default y
-+	depends on APM || APM_MODULE
-+
- menuconfig APM
- 	tristate "APM (Advanced Power Management) BIOS support"
- 	depends on X86_32 && PM_SLEEP && !X86_VISWS
-@@ -1371,7 +1357,7 @@ menu "Bus options (PCI etc.)"
- config PCI
- 	bool "PCI support" if !X86_VISWS
- 	depends on !X86_VOYAGER
--	default y if X86_VISWS
-+	default y
- 	select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
- 	help
- 	  Find out whether you have a PCI motherboard. PCI is the name of a
-@@ -1418,25 +1404,21 @@ config PCI_GOANY
- endchoice
+-static struct notifier_block threshold_cpu_notifier = {
++static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
+ 	.notifier_call = threshold_cpu_callback,
+ };
  
- config PCI_BIOS
--	bool
-+	def_bool y
- 	depends on X86_32 && !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
--	default y
+diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
+index be4dabf..cb03345 100644
+--- a/arch/x86/kernel/cpu/mcheck/p4.c
++++ b/arch/x86/kernel/cpu/mcheck/p4.c
+@@ -57,7 +57,7 @@ static void intel_thermal_interrupt(struct pt_regs *regs)
+ /* Thermal interrupt handler for this CPU setup */
+ static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt;
  
- # x86-64 doesn't support PCI BIOS access from long mode so always go direct.
- config PCI_DIRECT
--	bool
-+	def_bool y
- 	depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
--	default y
+-fastcall void smp_thermal_interrupt(struct pt_regs *regs)
++void smp_thermal_interrupt(struct pt_regs *regs)
+ {
+ 	irq_enter();
+ 	vendor_thermal_interrupt(regs);
+@@ -141,7 +141,7 @@ static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
+ 	rdmsr (MSR_IA32_MCG_EIP, r->eip, h);
+ }
  
- config PCI_MMCONFIG
--	bool
-+	def_bool y
- 	depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
--	default y
+-static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
++static void intel_machine_check(struct pt_regs * regs, long error_code)
+ {
+ 	int recover=1;
+ 	u32 alow, ahigh, high, low;
+@@ -152,38 +152,41 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
+ 	if (mcgstl & (1<<0))	/* Recoverable ? */
+ 		recover=0;
  
- config PCI_DOMAINS
--	bool
-+	def_bool y
- 	depends on PCI
--	default y
+-	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
++	printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
+ 		smp_processor_id(), mcgsth, mcgstl);
  
- config PCI_MMCONFIG
- 	bool "Support mmconfig PCI config space access"
-@@ -1453,9 +1435,9 @@ config DMAR
- 	  remapping devices.
+ 	if (mce_num_extended_msrs > 0) {
+ 		struct intel_mce_extended_msrs dbg;
+ 		intel_get_extended_msrs(&dbg);
+-		printk (KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n",
+-			smp_processor_id(), dbg.eip, dbg.eflags);
+-		printk (KERN_DEBUG "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n",
+-			dbg.eax, dbg.ebx, dbg.ecx, dbg.edx);
+-		printk (KERN_DEBUG "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
++		printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n"
++			"\teax: %08x ebx: %08x ecx: %08x edx: %08x\n"
++			"\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
++			smp_processor_id(), dbg.eip, dbg.eflags,
++			dbg.eax, dbg.ebx, dbg.ecx, dbg.edx,
+ 			dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
+ 	}
  
- config DMAR_GFX_WA
--	bool "Support for Graphics workaround"
-+	def_bool y
-+	prompt "Support for Graphics workaround"
- 	depends on DMAR
--	default y
- 	help
- 	 Current Graphics drivers tend to use physical address
- 	 for DMA and avoid using DMA APIs. Setting this config
-@@ -1464,9 +1446,8 @@ config DMAR_GFX_WA
- 	 to use physical addresses for DMA.
+-	for (i=0; i<nr_mce_banks; i++) {
+-		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
++	for (i = 0; i < nr_mce_banks; i++) {
++		rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
+ 		if (high & (1<<31)) {
++			char misc[20];
++			char addr[24];
++			misc[0] = addr[0] = '\0';
+ 			if (high & (1<<29))
+ 				recover |= 1;
+ 			if (high & (1<<25))
+ 				recover |= 2;
+-			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
+ 			high &= ~(1<<31);
+ 			if (high & (1<<27)) {
+-				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
+-				printk ("[%08x%08x]", ahigh, alow);
++				rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
++				snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
+ 			}
+ 			if (high & (1<<26)) {
+-				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
+-				printk (" at %08x%08x", ahigh, alow);
++				rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
++				snprintf(addr, 24, " at %08x%08x", ahigh, alow);
+ 			}
+-			printk ("\n");
++			printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
++				smp_processor_id(), i, high, low, misc, addr);
+ 		}
+ 	}
  
- config DMAR_FLOPPY_WA
--	bool
-+	def_bool y
- 	depends on DMAR
--	default y
- 	help
- 	 Floppy disk drivers are know to bypass DMA API calls
- 	 thereby failing to work when IOMMU is enabled. This
-@@ -1479,8 +1460,7 @@ source "drivers/pci/Kconfig"
+diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
+index 94bc43d..a18310a 100644
+--- a/arch/x86/kernel/cpu/mcheck/p5.c
++++ b/arch/x86/kernel/cpu/mcheck/p5.c
+@@ -16,7 +16,7 @@
+ #include "mce.h"
  
- # x86_64 have no ISA slots, but do have ISA-style DMA.
- config ISA_DMA_API
--	bool
--	default y
-+	def_bool y
+ /* Machine check handler for Pentium class Intel */
+-static fastcall void pentium_machine_check(struct pt_regs * regs, long error_code)
++static void pentium_machine_check(struct pt_regs * regs, long error_code)
+ {
+ 	u32 loaddr, hi, lotype;
+ 	rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
+diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c
+index deeae42..7434260 100644
+--- a/arch/x86/kernel/cpu/mcheck/p6.c
++++ b/arch/x86/kernel/cpu/mcheck/p6.c
+@@ -16,7 +16,7 @@
+ #include "mce.h"
  
- if X86_32
+ /* Machine Check Handler For PII/PIII */
+-static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
++static void intel_machine_check(struct pt_regs * regs, long error_code)
+ {
+ 	int recover=1;
+ 	u32 alow, ahigh, high, low;
+@@ -27,27 +27,30 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
+ 	if (mcgstl & (1<<0))	/* Recoverable ? */
+ 		recover=0;
  
-@@ -1546,9 +1526,9 @@ config SCx200HR_TIMER
- 	  other workaround is idle=poll boot option.
+-	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
++	printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
+ 		smp_processor_id(), mcgsth, mcgstl);
  
- config GEODE_MFGPT_TIMER
--	bool "Geode Multi-Function General Purpose Timer (MFGPT) events"
-+	def_bool y
-+	prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
- 	depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
--	default y
- 	help
- 	  This driver provides a clock event source based on the MFGPT
- 	  timer(s) in the CS5535 and CS5536 companion chip for the geode.
-@@ -1575,6 +1555,7 @@ source "fs/Kconfig.binfmt"
- config IA32_EMULATION
- 	bool "IA32 Emulation"
- 	depends on X86_64
-+	select COMPAT_BINFMT_ELF
- 	help
- 	  Include code to run 32-bit programs under a 64-bit kernel. You should
- 	  likely turn this on, unless you're 100% sure that you don't have any
-@@ -1587,18 +1568,16 @@ config IA32_AOUT
-          Support old a.out binaries in the 32bit emulation.
+-	for (i=0; i<nr_mce_banks; i++) {
+-		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
++	for (i = 0; i < nr_mce_banks; i++) {
++		rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
+ 		if (high & (1<<31)) {
++			char misc[20];
++			char addr[24];
++			misc[0] = addr[0] = '\0';
+ 			if (high & (1<<29))
+ 				recover |= 1;
+ 			if (high & (1<<25))
+ 				recover |= 2;
+-			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
+ 			high &= ~(1<<31);
+ 			if (high & (1<<27)) {
+-				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
+-				printk ("[%08x%08x]", ahigh, alow);
++				rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
++				snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
+ 			}
+ 			if (high & (1<<26)) {
+-				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
+-				printk (" at %08x%08x", ahigh, alow);
++				rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
++				snprintf(addr, 24, " at %08x%08x", ahigh, alow);
+ 			}
+-			printk ("\n");
++			printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
++				smp_processor_id(), i, high, low, misc, addr);
+ 		}
+ 	}
  
- config COMPAT
--	bool
-+	def_bool y
- 	depends on IA32_EMULATION
--	default y
+diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
+index 9e424b6..3d428d5 100644
+--- a/arch/x86/kernel/cpu/mcheck/winchip.c
++++ b/arch/x86/kernel/cpu/mcheck/winchip.c
+@@ -15,7 +15,7 @@
+ #include "mce.h"
  
- config COMPAT_FOR_U64_ALIGNMENT
- 	def_bool COMPAT
- 	depends on X86_64
+ /* Machine check handler for WinChip C6 */
+-static fastcall void winchip_machine_check(struct pt_regs * regs, long error_code)
++static void winchip_machine_check(struct pt_regs * regs, long error_code)
+ {
+ 	printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
+ 	add_taint(TAINT_MACHINE_CHECK);
+diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
+index 0949cdb..ee2331b 100644
+--- a/arch/x86/kernel/cpu/mtrr/amd.c
++++ b/arch/x86/kernel/cpu/mtrr/amd.c
+@@ -53,8 +53,6 @@ static void amd_set_mtrr(unsigned int reg, unsigned long base,
+     <base> The base address of the region.
+     <size> The size of the region. If this is 0 the region is disabled.
+     <type> The type of the region.
+-    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
+-    be done externally.
+     [RETURNS] Nothing.
+ */
+ {
+diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
+index 9964be3..8e139c7 100644
+--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
++++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
+@@ -4,6 +4,7 @@
+ #include <asm/msr.h>
+ #include <asm/io.h>
+ #include <asm/processor-cyrix.h>
++#include <asm/processor-flags.h>
+ #include "mtrr.h"
  
- config SYSVIPC_COMPAT
--	bool
-+	def_bool y
- 	depends on X86_64 && COMPAT && SYSVIPC
--	default y
+ int arr3_protected;
+@@ -142,7 +143,7 @@ static void prepare_set(void)
  
- endmenu
+ 	/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
+ 	    a side-effect  */
+-	cr0 = read_cr0() | 0x40000000;
++	cr0 = read_cr0() | X86_CR0_CD;
+ 	wbinvd();
+ 	write_cr0(cr0);
+ 	wbinvd();
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index 992f08d..103d61a 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -9,11 +9,12 @@
+ #include <asm/msr.h>
+ #include <asm/system.h>
+ #include <asm/cpufeature.h>
++#include <asm/processor-flags.h>
+ #include <asm/tlbflush.h>
+ #include "mtrr.h"
  
-diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index c301622..e09a6b7 100644
---- a/arch/x86/Kconfig.cpu
-+++ b/arch/x86/Kconfig.cpu
-@@ -219,10 +219,10 @@ config MGEODEGX1
- 	  Select this for a Geode GX1 (Cyrix MediaGX) chip.
+ struct mtrr_state {
+-	struct mtrr_var_range *var_ranges;
++	struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
+ 	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
+ 	unsigned char enabled;
+ 	unsigned char have_fixed;
+@@ -85,12 +86,6 @@ void __init get_mtrr_state(void)
+ 	struct mtrr_var_range *vrs;
+ 	unsigned lo, dummy;
  
- config MGEODE_LX
--       bool "Geode GX/LX"
-+	bool "Geode GX/LX"
- 	depends on X86_32
--       help
--         Select this for AMD Geode GX and LX processors.
-+	help
-+	  Select this for AMD Geode GX and LX processors.
+-	if (!mtrr_state.var_ranges) {
+-		mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range), 
+-						GFP_KERNEL);
+-		if (!mtrr_state.var_ranges)
+-			return;
+-	} 
+ 	vrs = mtrr_state.var_ranges;
  
- config MCYRIXIII
- 	bool "CyrixIII/VIA-C3"
-@@ -258,7 +258,7 @@ config MPSC
- 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
- 	  Xeon CPUs with Intel 64bit which is compatible with x86-64.
- 	  Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
--          Netburst core and shouldn't use this option. You can distinguish them
-+	  Netburst core and shouldn't use this option. You can distinguish them
- 	  using the cpu family field
- 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 	rdmsr(MTRRcap_MSR, lo, dummy);
+@@ -188,7 +183,7 @@ static inline void k8_enable_fixed_iorrs(void)
+  * \param changed pointer which indicates whether the MTRR needed to be changed
+  * \param msrwords pointer to the MSR values which the MSR should have
+  */
+-static void set_fixed_range(int msr, int * changed, unsigned int * msrwords)
++static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
+ {
+ 	unsigned lo, hi;
  
-@@ -317,81 +317,75 @@ config X86_L1_CACHE_SHIFT
- 	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7
+@@ -200,7 +195,7 @@ static void set_fixed_range(int msr, int * changed, unsigned int * msrwords)
+ 		    ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
+ 			k8_enable_fixed_iorrs();
+ 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
+-		*changed = TRUE;
++		*changed = true;
+ 	}
+ }
  
- config X86_XADD
--	bool
-+	def_bool y
- 	depends on X86_32 && !M386
--	default y
+@@ -260,7 +255,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
+ static int set_fixed_ranges(mtrr_type * frs)
+ {
+ 	unsigned long long *saved = (unsigned long long *) frs;
+-	int changed = FALSE;
++	bool changed = false;
+ 	int block=-1, range;
  
- config X86_PPRO_FENCE
--	bool
-+	bool "PentiumPro memory ordering errata workaround"
- 	depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
--	default y
-+	help
-+	  Old PentiumPro multiprocessor systems had errata that could cause memory
-+	  operations to violate the x86 ordering standard in rare cases. Enabling this
-+	  option will attempt to work around some (but not all) occurances of
-+	  this problem, at the cost of much heavier spinlock and memory barrier
-+	  operations.
-+
-+	  If unsure, say n here. Even distro kernels should think twice before enabling
-+	  this: there are few systems, and an unlikely bug.
+ 	while (fixed_range_blocks[++block].ranges)
+@@ -273,17 +268,17 @@ static int set_fixed_ranges(mtrr_type * frs)
  
- config X86_F00F_BUG
--	bool
-+	def_bool y
- 	depends on M586MMX || M586TSC || M586 || M486 || M386
--	default y
+ /*  Set the MSR pair relating to a var range. Returns TRUE if
+     changes are made  */
+-static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
++static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
+ {
+ 	unsigned int lo, hi;
+-	int changed = FALSE;
++	bool changed = false;
  
- config X86_WP_WORKS_OK
--	bool
-+	def_bool y
- 	depends on X86_32 && !M386
--	default y
+ 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
+ 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
+ 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
+ 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
+ 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
+-		changed = TRUE;
++		changed = true;
+ 	}
  
- config X86_INVLPG
--	bool
-+	def_bool y
- 	depends on X86_32 && !M386
--	default y
+ 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
+@@ -292,7 +287,7 @@ static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
+ 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
+ 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
+ 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
+-		changed = TRUE;
++		changed = true;
+ 	}
+ 	return changed;
+ }
+@@ -350,7 +345,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
+ 	spin_lock(&set_atomicity_lock);
  
- config X86_BSWAP
--	bool
-+	def_bool y
- 	depends on X86_32 && !M386
--	default y
+ 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+-	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
++	cr0 = read_cr0() | X86_CR0_CD;
+ 	write_cr0(cr0);
+ 	wbinvd();
  
- config X86_POPAD_OK
--	bool
-+	def_bool y
- 	depends on X86_32 && !M386
--	default y
+@@ -417,8 +412,6 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
+     <base> The base address of the region.
+     <size> The size of the region. If this is 0 the region is disabled.
+     <type> The type of the region.
+-    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
+-    be done externally.
+     [RETURNS] Nothing.
+ */
+ {
+diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
+index c7d8f17..91e150a 100644
+--- a/arch/x86/kernel/cpu/mtrr/if.c
++++ b/arch/x86/kernel/cpu/mtrr/if.c
+@@ -11,10 +11,6 @@
+ #include <asm/mtrr.h>
+ #include "mtrr.h"
  
- config X86_ALIGNMENT_16
--	bool
-+	def_bool y
- 	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
--	default y
+-/* RED-PEN: this is accessed without any locking */
+-extern unsigned int *usage_table;
+-
+-
+ #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
  
- config X86_GOOD_APIC
--	bool
-+	def_bool y
- 	depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON || MCORE2 || MVIAC7 || X86_64
--	default y
+ static const char *const mtrr_strings[MTRR_NUM_TYPES] =
+@@ -37,7 +33,7 @@ const char *mtrr_attrib_to_str(int x)
  
- config X86_INTEL_USERCOPY
--	bool
-+	def_bool y
- 	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
--	default y
+ static int
+ mtrr_file_add(unsigned long base, unsigned long size,
+-	      unsigned int type, char increment, struct file *file, int page)
++	      unsigned int type, bool increment, struct file *file, int page)
+ {
+ 	int reg, max;
+ 	unsigned int *fcount = FILE_FCOUNT(file); 
+@@ -55,7 +51,7 @@ mtrr_file_add(unsigned long base, unsigned long size,
+ 		base >>= PAGE_SHIFT;
+ 		size >>= PAGE_SHIFT;
+ 	}
+-	reg = mtrr_add_page(base, size, type, 1);
++	reg = mtrr_add_page(base, size, type, true);
+ 	if (reg >= 0)
+ 		++fcount[reg];
+ 	return reg;
+@@ -141,7 +137,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
+ 		size >>= PAGE_SHIFT;
+ 		err =
+ 		    mtrr_add_page((unsigned long) base, (unsigned long) size, i,
+-				  1);
++				  true);
+ 		if (err < 0)
+ 			return err;
+ 		return len;
+@@ -217,7 +213,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
+ 		if (!capable(CAP_SYS_ADMIN))
+ 			return -EPERM;
+ 		err =
+-		    mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
++		    mtrr_file_add(sentry.base, sentry.size, sentry.type, true,
+ 				  file, 0);
+ 		break;
+ 	case MTRRIOC_SET_ENTRY:
+@@ -226,7 +222,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
+ #endif
+ 		if (!capable(CAP_SYS_ADMIN))
+ 			return -EPERM;
+-		err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
++		err = mtrr_add(sentry.base, sentry.size, sentry.type, false);
+ 		break;
+ 	case MTRRIOC_DEL_ENTRY:
+ #ifdef CONFIG_COMPAT
+@@ -270,7 +266,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
+ 		if (!capable(CAP_SYS_ADMIN))
+ 			return -EPERM;
+ 		err =
+-		    mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
++		    mtrr_file_add(sentry.base, sentry.size, sentry.type, true,
+ 				  file, 1);
+ 		break;
+ 	case MTRRIOC_SET_PAGE_ENTRY:
+@@ -279,7 +275,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
+ #endif
+ 		if (!capable(CAP_SYS_ADMIN))
+ 			return -EPERM;
+-		err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
++		err =
++		    mtrr_add_page(sentry.base, sentry.size, sentry.type, false);
+ 		break;
+ 	case MTRRIOC_DEL_PAGE_ENTRY:
+ #ifdef CONFIG_COMPAT
+@@ -396,7 +393,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
+ 	for (i = 0; i < max; i++) {
+ 		mtrr_if->get(i, &base, &size, &type);
+ 		if (size == 0)
+-			usage_table[i] = 0;
++			mtrr_usage_table[i] = 0;
+ 		else {
+ 			if (size < (0x100000 >> PAGE_SHIFT)) {
+ 				/* less than 1MB */
+@@ -410,7 +407,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
+ 			len += seq_printf(seq, 
+ 				   "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n",
+ 			     i, base, base >> (20 - PAGE_SHIFT), size, factor,
+-			     mtrr_attrib_to_str(type), usage_table[i]);
++			     mtrr_attrib_to_str(type), mtrr_usage_table[i]);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index 3b20613..7159195 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -38,8 +38,8 @@
+ #include <linux/cpu.h>
+ #include <linux/mutex.h>
  
- config X86_USE_PPRO_CHECKSUM
--	bool
-+	def_bool y
- 	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2
--	default y
++#include <asm/e820.h>
+ #include <asm/mtrr.h>
+-
+ #include <asm/uaccess.h>
+ #include <asm/processor.h>
+ #include <asm/msr.h>
+@@ -47,7 +47,7 @@
  
- config X86_USE_3DNOW
--	bool
-+	def_bool y
- 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
--	default y
+ u32 num_var_ranges = 0;
  
- config X86_OOSTORE
--	bool
-+	def_bool y
- 	depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
--	default y
+-unsigned int *usage_table;
++unsigned int mtrr_usage_table[MAX_VAR_RANGES];
+ static DEFINE_MUTEX(mtrr_mutex);
  
- config X86_TSC
--	bool
-+	def_bool y
- 	depends on ((MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64
--	default y
+ u64 size_or_mask, size_and_mask;
+@@ -121,13 +121,8 @@ static void __init init_table(void)
+ 	int i, max;
  
- # this should be set for all -march=.. options where the compiler
- # generates cmov.
- config X86_CMOV
--	bool
-+	def_bool y
- 	depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7)
--	default y
+ 	max = num_var_ranges;
+-	if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
+-	    == NULL) {
+-		printk(KERN_ERR "mtrr: could not allocate\n");
+-		return;
+-	}
+ 	for (i = 0; i < max; i++)
+-		usage_table[i] = 1;
++		mtrr_usage_table[i] = 1;
+ }
  
- config X86_MINIMUM_CPU_FAMILY
- 	int
-@@ -399,3 +393,6 @@ config X86_MINIMUM_CPU_FAMILY
- 	default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK)
- 	default "3"
+ struct set_mtrr_data {
+@@ -311,7 +306,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
+  */
  
-+config X86_DEBUGCTLMSR
-+	def_bool y
-+	depends on !(M586MMX || M586TSC || M586 || M486 || M386)
-diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 761ca7b..2e1e3af 100644
---- a/arch/x86/Kconfig.debug
-+++ b/arch/x86/Kconfig.debug
-@@ -6,7 +6,7 @@ config TRACE_IRQFLAGS_SUPPORT
- source "lib/Kconfig.debug"
+ int mtrr_add_page(unsigned long base, unsigned long size, 
+-		  unsigned int type, char increment)
++		  unsigned int type, bool increment)
+ {
+ 	int i, replace, error;
+ 	mtrr_type ltype;
+@@ -349,7 +344,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
+ 	replace = -1;
  
- config EARLY_PRINTK
--	bool "Early printk" if EMBEDDED && DEBUG_KERNEL && X86_32
-+	bool "Early printk" if EMBEDDED
- 	default y
- 	help
- 	  Write kernel log output directly into the VGA buffer or to a serial
-@@ -40,22 +40,49 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
+ 	/* No CPU hotplug when we change MTRR entries */
+-	lock_cpu_hotplug();
++	get_online_cpus();
+ 	/*  Search for existing MTRR  */
+ 	mutex_lock(&mtrr_mutex);
+ 	for (i = 0; i < num_var_ranges; ++i) {
+@@ -383,7 +378,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
+ 			goto out;
+ 		}
+ 		if (increment)
+-			++usage_table[i];
++			++mtrr_usage_table[i];
+ 		error = i;
+ 		goto out;
+ 	}
+@@ -391,13 +386,15 @@ int mtrr_add_page(unsigned long base, unsigned long size,
+ 	i = mtrr_if->get_free_region(base, size, replace);
+ 	if (i >= 0) {
+ 		set_mtrr(i, base, size, type);
+-		if (likely(replace < 0))
+-			usage_table[i] = 1;
+-		else {
+-			usage_table[i] = usage_table[replace] + !!increment;
++		if (likely(replace < 0)) {
++			mtrr_usage_table[i] = 1;
++		} else {
++			mtrr_usage_table[i] = mtrr_usage_table[replace];
++			if (increment)
++				mtrr_usage_table[i]++;
+ 			if (unlikely(replace != i)) {
+ 				set_mtrr(replace, 0, 0, 0);
+-				usage_table[replace] = 0;
++				mtrr_usage_table[replace] = 0;
+ 			}
+ 		}
+ 	} else
+@@ -405,7 +402,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
+ 	error = i;
+  out:
+ 	mutex_unlock(&mtrr_mutex);
+-	unlock_cpu_hotplug();
++	put_online_cpus();
+ 	return error;
+ }
  
- config DEBUG_PAGEALLOC
- 	bool "Debug page memory allocations"
--	depends on DEBUG_KERNEL && !HIBERNATION && !HUGETLBFS
--	depends on X86_32
-+	depends on DEBUG_KERNEL && X86_32
- 	help
- 	  Unmap pages from the kernel linear mapping after free_pages().
- 	  This results in a large slowdown, but helps to find certain types
- 	  of memory corruptions.
+@@ -460,7 +457,7 @@ static int mtrr_check(unsigned long base, unsigned long size)
  
-+config DEBUG_PER_CPU_MAPS
-+	bool "Debug access to per_cpu maps"
-+	depends on DEBUG_KERNEL
-+	depends on X86_64_SMP
-+	default n
-+	help
-+	  Say Y to verify that the per_cpu map being accessed has
-+	  been setup.  Adds a fair amount of code to kernel memory
-+	  and decreases performance.
-+
-+	  Say N if unsure.
-+
- config DEBUG_RODATA
- 	bool "Write protect kernel read-only data structures"
-+	default y
- 	depends on DEBUG_KERNEL
- 	help
- 	  Mark the kernel read-only data as write-protected in the pagetables,
- 	  in order to catch accidental (and incorrect) writes to such const
--	  data. This option may have a slight performance impact because a
--	  portion of the kernel code won't be covered by a 2MB TLB anymore.
--	  If in doubt, say "N".
-+	  data. This is recommended so that we can catch kernel bugs sooner.
-+	  If in doubt, say "Y".
-+
-+config DEBUG_RODATA_TEST
-+	bool "Testcase for the DEBUG_RODATA feature"
-+	depends on DEBUG_RODATA
-+	help
-+	  This option enables a testcase for the DEBUG_RODATA
-+	  feature as well as for the change_page_attr() infrastructure.
-+	  If in doubt, say "N"
-+
-+config DEBUG_NX_TEST
-+	tristate "Testcase for the NX non-executable stack feature"
-+	depends on DEBUG_KERNEL && m
-+	help
-+	  This option enables a testcase for the CPU NX capability
-+	  and the software setup of this feature.
-+	  If in doubt, say "N"
+ int
+ mtrr_add(unsigned long base, unsigned long size, unsigned int type,
+-	 char increment)
++	 bool increment)
+ {
+ 	if (mtrr_check(base, size))
+ 		return -EINVAL;
+@@ -495,7 +492,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
  
- config 4KSTACKS
- 	bool "Use 4Kb for kernel stacks instead of 8Kb"
-@@ -75,8 +102,7 @@ config X86_FIND_SMP_CONFIG
+ 	max = num_var_ranges;
+ 	/* No CPU hotplug when we change MTRR entries */
+-	lock_cpu_hotplug();
++	get_online_cpus();
+ 	mutex_lock(&mtrr_mutex);
+ 	if (reg < 0) {
+ 		/*  Search for existing MTRR  */
+@@ -527,16 +524,16 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
+ 		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
+ 		goto out;
+ 	}
+-	if (usage_table[reg] < 1) {
++	if (mtrr_usage_table[reg] < 1) {
+ 		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
+ 		goto out;
+ 	}
+-	if (--usage_table[reg] < 1)
++	if (--mtrr_usage_table[reg] < 1)
+ 		set_mtrr(reg, 0, 0, 0);
+ 	error = reg;
+  out:
+ 	mutex_unlock(&mtrr_mutex);
+-	unlock_cpu_hotplug();
++	put_online_cpus();
+ 	return error;
+ }
+ /**
+@@ -591,16 +588,11 @@ struct mtrr_value {
+ 	unsigned long	lsize;
+ };
  
- config X86_MPPARSE
- 	def_bool y
--	depends on X86_LOCAL_APIC && !X86_VISWS
--	depends on X86_32
-+	depends on (X86_32 && (X86_LOCAL_APIC && !X86_VISWS)) || X86_64
+-static struct mtrr_value * mtrr_state;
++static struct mtrr_value mtrr_state[MAX_VAR_RANGES];
  
- config DOUBLEFAULT
- 	default y
-@@ -112,4 +138,91 @@ config IOMMU_LEAK
- 	  Add a simple leak tracer to the IOMMU code. This is useful when you
- 	  are debugging a buggy device driver that leaks IOMMU mappings.
+ static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
+ {
+ 	int i;
+-	int size = num_var_ranges * sizeof(struct mtrr_value);
+-
+-	mtrr_state = kzalloc(size,GFP_ATOMIC);
+-	if (!mtrr_state)
+-		return -ENOMEM;
  
-+#
-+# IO delay types:
-+#
+ 	for (i = 0; i < num_var_ranges; i++) {
+ 		mtrr_if->get(i,
+@@ -622,7 +614,6 @@ static int mtrr_restore(struct sys_device * sysdev)
+ 				 mtrr_state[i].lsize,
+ 				 mtrr_state[i].ltype);
+ 	}
+-	kfree(mtrr_state);
+ 	return 0;
+ }
+ 
+@@ -633,6 +624,112 @@ static struct sysdev_driver mtrr_sysdev_driver = {
+ 	.resume		= mtrr_restore,
+ };
+ 
++static int disable_mtrr_trim;
 +
-+config IO_DELAY_TYPE_0X80
-+	int
-+	default "0"
++static int __init disable_mtrr_trim_setup(char *str)
++{
++	disable_mtrr_trim = 1;
++	return 0;
++}
++early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
 +
-+config IO_DELAY_TYPE_0XED
-+	int
-+	default "1"
++/*
++ * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
++ * for memory >4GB. Check for that here.
++ * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
++ * apply to are wrong, but so far we don't know of any such case in the wild.
++ */
++#define Tom2Enabled (1U << 21)
++#define Tom2ForceMemTypeWB (1U << 22)
 +
-+config IO_DELAY_TYPE_UDELAY
-+	int
-+	default "2"
++static __init int amd_special_default_mtrr(void)
++{
++	u32 l, h;
 +
-+config IO_DELAY_TYPE_NONE
-+	int
-+	default "3"
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
++		return 0;
++	if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
++		return 0;
++	/* In case some hypervisor doesn't pass SYSCFG through */
++	if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
++		return 0;
++	/*
++	 * Memory between 4GB and top of mem is forced WB by this magic bit.
++	 * Reserved before K8RevF, but should be zero there.
++	 */
++	if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
++		 (Tom2Enabled | Tom2ForceMemTypeWB))
++		return 1;
++	return 0;
++}
 +
-+choice
-+	prompt "IO delay type"
-+	default IO_DELAY_0XED
++/**
++ * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
++ *
++ * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
++ * memory configurations.  This routine checks that the highest MTRR matches
++ * the end of memory, to make sure the MTRRs having a write back type cover
++ * all of the memory the kernel is intending to use. If not, it'll trim any
++ * memory off the end by adjusting end_pfn, removing it from the kernel's
++ * allocation pools, warning the user with an obnoxious message.
++ */
++int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
++{
++	unsigned long i, base, size, highest_addr = 0, def, dummy;
++	mtrr_type type;
++	u64 trim_start, trim_size;
 +
-+config IO_DELAY_0X80
-+	bool "port 0x80 based port-IO delay [recommended]"
-+	help
-+	  This is the traditional Linux IO delay used for in/out_p.
-+	  It is the most tested hence safest selection here.
++	/*
++	 * Make sure we only trim uncachable memory on machines that
++	 * support the Intel MTRR architecture:
++	 */
++	if (!is_cpu(INTEL) || disable_mtrr_trim)
++		return 0;
++	rdmsr(MTRRdefType_MSR, def, dummy);
++	def &= 0xff;
++	if (def != MTRR_TYPE_UNCACHABLE)
++		return 0;
 +
-+config IO_DELAY_0XED
-+	bool "port 0xed based port-IO delay"
-+	help
-+	  Use port 0xed as the IO delay. This frees up port 0x80 which is
-+	  often used as a hardware-debug port.
++	if (amd_special_default_mtrr())
++		return 0;
 +
-+config IO_DELAY_UDELAY
-+	bool "udelay based port-IO delay"
-+	help
-+	  Use udelay(2) as the IO delay method. This provides the delay
-+	  while not having any side-effect on the IO port space.
++	/* Find highest cached pfn */
++	for (i = 0; i < num_var_ranges; i++) {
++		mtrr_if->get(i, &base, &size, &type);
++		if (type != MTRR_TYPE_WRBACK)
++			continue;
++		base <<= PAGE_SHIFT;
++		size <<= PAGE_SHIFT;
++		if (highest_addr < base + size)
++			highest_addr = base + size;
++	}
 +
-+config IO_DELAY_NONE
-+	bool "no port-IO delay"
-+	help
-+	  No port-IO delay. Will break on old boxes that require port-IO
-+	  delay for certain operations. Should work on most new machines.
++	/* kvm/qemu doesn't have mtrr set right, don't trim them all */
++	if (!highest_addr) {
++		printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n");
++		WARN_ON(1);
++		return 0;
++	}
 +
-+endchoice
++	if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
++		printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
++			" all of memory, losing %LdMB of RAM.\n",
++			(((u64)end_pfn << PAGE_SHIFT) - highest_addr) >> 20);
 +
-+if IO_DELAY_0X80
-+config DEFAULT_IO_DELAY_TYPE
-+	int
-+	default IO_DELAY_TYPE_0X80
-+endif
++		WARN_ON(1);
 +
-+if IO_DELAY_0XED
-+config DEFAULT_IO_DELAY_TYPE
-+	int
-+	default IO_DELAY_TYPE_0XED
-+endif
++		printk(KERN_INFO "update e820 for mtrr\n");
++		trim_start = highest_addr;
++		trim_size = end_pfn;
++		trim_size <<= PAGE_SHIFT;
++		trim_size -= trim_start;
++		add_memory_region(trim_start, trim_size, E820_RESERVED);
++		update_e820();
++		return 1;
++	}
 +
-+if IO_DELAY_UDELAY
-+config DEFAULT_IO_DELAY_TYPE
-+	int
-+	default IO_DELAY_TYPE_UDELAY
-+endif
++	return 0;
++}
+ 
+ /**
+  * mtrr_bp_init - initialize mtrrs on the boot CPU
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
+index 289dfe6..fb74a2c 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+@@ -2,10 +2,8 @@
+  * local mtrr defines.
+  */
+ 
+-#ifndef TRUE
+-#define TRUE  1
+-#define FALSE 0
+-#endif
++#include <linux/types.h>
++#include <linux/stddef.h>
+ 
+ #define MTRRcap_MSR     0x0fe
+ #define MTRRdefType_MSR 0x2ff
+@@ -14,6 +12,7 @@
+ #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
+ 
+ #define NUM_FIXED_RANGES 88
++#define MAX_VAR_RANGES 256
+ #define MTRRfix64K_00000_MSR 0x250
+ #define MTRRfix16K_80000_MSR 0x258
+ #define MTRRfix16K_A0000_MSR 0x259
+@@ -34,6 +33,8 @@
+    an 8 bit field: */
+ typedef u8 mtrr_type;
+ 
++extern unsigned int mtrr_usage_table[MAX_VAR_RANGES];
 +
-+if IO_DELAY_NONE
-+config DEFAULT_IO_DELAY_TYPE
-+	int
-+	default IO_DELAY_TYPE_NONE
-+endif
+ struct mtrr_ops {
+ 	u32	vendor;
+ 	u32	use_intel_if;
+diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
+index 49e20c2..9f8ba92 100644
+--- a/arch/x86/kernel/cpu/mtrr/state.c
++++ b/arch/x86/kernel/cpu/mtrr/state.c
+@@ -4,6 +4,7 @@
+ #include <asm/mtrr.h>
+ #include <asm/msr.h>
+ #include <asm/processor-cyrix.h>
++#include <asm/processor-flags.h>
+ #include "mtrr.h"
+ 
+ 
+@@ -25,7 +26,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
+ 
+ 		/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
+ 		    a side-effect  */
+-		cr0 = read_cr0() | 0x40000000;
++		cr0 = read_cr0() | X86_CR0_CD;
+ 		wbinvd();
+ 		write_cr0(cr0);
+ 		wbinvd();
+diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
+index c02541e..9b83832 100644
+--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
++++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
+@@ -167,7 +167,6 @@ void release_evntsel_nmi(unsigned int msr)
+ 	clear_bit(counter, evntsel_nmi_owner);
+ }
+ 
+-EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
+ EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
+ EXPORT_SYMBOL(reserve_perfctr_nmi);
+ EXPORT_SYMBOL(release_perfctr_nmi);
+diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
+index 3900e46..0282132 100644
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -188,7 +188,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+ static void c_stop(struct seq_file *m, void *v)
+ {
+ }
+-struct seq_operations cpuinfo_op = {
++const struct seq_operations cpuinfo_op = {
+ 	.start	= c_start,
+ 	.next	= c_next,
+ 	.stop	= c_stop,
+diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
+index 05c9936..dec66e4 100644
+--- a/arch/x86/kernel/cpuid.c
++++ b/arch/x86/kernel/cpuid.c
+@@ -50,7 +50,7 @@ struct cpuid_command {
+ 
+ static void cpuid_smp_cpuid(void *cmd_block)
+ {
+-	struct cpuid_command *cmd = (struct cpuid_command *)cmd_block;
++	struct cpuid_command *cmd = cmd_block;
+ 
+ 	cpuid(cmd->reg, &cmd->data[0], &cmd->data[1], &cmd->data[2],
+ 		      &cmd->data[3]);
+@@ -157,15 +157,15 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
+ 
+ 	switch (action) {
+ 	case CPU_UP_PREPARE:
+-	case CPU_UP_PREPARE_FROZEN:
+ 		err = cpuid_device_create(cpu);
+ 		break;
+ 	case CPU_UP_CANCELED:
+-	case CPU_UP_CANCELED_FROZEN:
+ 	case CPU_DEAD:
+-	case CPU_DEAD_FROZEN:
+ 		cpuid_device_destroy(cpu);
+ 		break;
++	case CPU_UP_CANCELED_FROZEN:
++		destroy_suspended_device(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
++		break;
+ 	}
+ 	return err ? NOTIFY_BAD : NOTIFY_OK;
+ }
+diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
+index 40978af..a47798b 100644
+--- a/arch/x86/kernel/doublefault_32.c
++++ b/arch/x86/kernel/doublefault_32.c
+@@ -17,7 +17,7 @@ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+ 
+ static void doublefault_fn(void)
+ {
+-	struct Xgt_desc_struct gdt_desc = {0, 0};
++	struct desc_ptr gdt_desc = {0, 0};
+ 	unsigned long gdt, tss;
+ 
+ 	store_gdt(&gdt_desc);
+@@ -33,14 +33,15 @@ static void doublefault_fn(void)
+ 		printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
+ 
+ 		if (ptr_ok(tss)) {
+-			struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
++			struct x86_hw_tss *t = (struct x86_hw_tss *)tss;
+ 
+-			printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->eip, t->esp);
++			printk(KERN_EMERG "eip = %08lx, esp = %08lx\n",
++			       t->ip, t->sp);
+ 
+ 			printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
+-				t->eax, t->ebx, t->ecx, t->edx);
++				t->ax, t->bx, t->cx, t->dx);
+ 			printk(KERN_EMERG "esi = %08lx, edi = %08lx\n",
+-				t->esi, t->edi);
++				t->si, t->di);
+ 		}
+ 	}
+ 
+@@ -50,15 +51,15 @@ static void doublefault_fn(void)
+ 
+ struct tss_struct doublefault_tss __cacheline_aligned = {
+ 	.x86_tss = {
+-		.esp0		= STACK_START,
++		.sp0		= STACK_START,
+ 		.ss0		= __KERNEL_DS,
+ 		.ldt		= 0,
+ 		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,
+ 
+-		.eip		= (unsigned long) doublefault_fn,
++		.ip		= (unsigned long) doublefault_fn,
+ 		/* 0x2 bit is always set */
+-		.eflags		= X86_EFLAGS_SF | 0x2,
+-		.esp		= STACK_START,
++		.flags		= X86_EFLAGS_SF | 0x2,
++		.sp		= STACK_START,
+ 		.es		= __USER_DS,
+ 		.cs		= __KERNEL_CS,
+ 		.ss		= __KERNEL_DS,
+diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
+new file mode 100644
+index 0000000..1c5ca4d
+--- /dev/null
++++ b/arch/x86/kernel/ds.c
+@@ -0,0 +1,464 @@
++/*
++ * Debug Store support
++ *
++ * This provides a low-level interface to the hardware's Debug Store
++ * feature that is used for last branch recording (LBR) and
++ * precise-event based sampling (PEBS).
++ *
++ * Different architectures use a different DS layout/pointer size.
++ * The below functions therefore work on a void*.
++ *
++ *
++ * Since there is no user for PEBS, yet, only LBR (or branch
++ * trace store, BTS) is supported.
++ *
++ *
++ * Copyright (C) 2007 Intel Corporation.
++ * Markus Metzger <markus.t.metzger at intel.com>, Dec 2007
++ */
 +
-+config DEBUG_BOOT_PARAMS
-+	bool "Debug boot parameters"
-+	depends on DEBUG_KERNEL
-+	depends on DEBUG_FS
-+	help
-+	  This option will cause struct boot_params to be exported via debugfs.
++#include <asm/ds.h>
 +
-+config CPA_DEBUG
-+	bool "CPA self test code"
-+	depends on DEBUG_KERNEL
-+	help
-+	  Do change_page_attr self tests at boot.
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/slab.h>
 +
- endmenu
-diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 7aa1dc6..b08f182 100644
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -7,13 +7,252 @@ else
-         KBUILD_DEFCONFIG := $(ARCH)_defconfig
- endif
- 
--# No need to remake these files
--$(srctree)/arch/x86/Makefile%: ;
-+# BITS is used as extension for files which are available in a 32 bit
-+# and a 64 bit version to simplify shared Makefiles.
-+# e.g.: obj-y += foo_$(BITS).o
-+export BITS
- 
- ifeq ($(CONFIG_X86_32),y)
-+        BITS := 32
-         UTS_MACHINE := i386
--        include $(srctree)/arch/x86/Makefile_32
-+        CHECKFLAGS += -D__i386__
 +
-+        biarch := $(call cc-option,-m32)
-+        KBUILD_AFLAGS += $(biarch)
-+        KBUILD_CFLAGS += $(biarch)
++/*
++ * Debug Store (DS) save area configuration (see Intel64 and IA32
++ * Architectures Software Developer's Manual, section 18.5)
++ *
++ * The DS configuration consists of the following fields; different
++ * architetures vary in the size of those fields.
++ * - double-word aligned base linear address of the BTS buffer
++ * - write pointer into the BTS buffer
++ * - end linear address of the BTS buffer (one byte beyond the end of
++ *   the buffer)
++ * - interrupt pointer into BTS buffer
++ *   (interrupt occurs when write pointer passes interrupt pointer)
++ * - double-word aligned base linear address of the PEBS buffer
++ * - write pointer into the PEBS buffer
++ * - end linear address of the PEBS buffer (one byte beyond the end of
++ *   the buffer)
++ * - interrupt pointer into PEBS buffer
++ *   (interrupt occurs when write pointer passes interrupt pointer)
++ * - value to which counter is reset following counter overflow
++ *
++ * On later architectures, the last branch recording hardware uses
++ * 64bit pointers even in 32bit mode.
++ *
++ *
++ * Branch Trace Store (BTS) records store information about control
++ * flow changes. They at least provide the following information:
++ * - source linear address
++ * - destination linear address
++ *
++ * Netburst supported a predicated bit that had been dropped in later
++ * architectures. We do not suppor it.
++ *
++ *
++ * In order to abstract from the actual DS and BTS layout, we describe
++ * the access to the relevant fields.
++ * Thanks to Andi Kleen for proposing this design.
++ *
++ * The implementation, however, is not as general as it might seem. In
++ * order to stay somewhat simple and efficient, we assume an
++ * underlying unsigned type (mostly a pointer type) and we expect the
++ * field to be at least as big as that type.
++ */
 +
-+        ifdef CONFIG_RELOCATABLE
-+                LDFLAGS_vmlinux := --emit-relocs
-+        endif
++/*
++ * A special from_ip address to indicate that the BTS record is an
++ * info record that needs to be interpreted or skipped.
++ */
++#define BTS_ESCAPE_ADDRESS (-1)
 +
-+        KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
++/*
++ * A field access descriptor
++ */
++struct access_desc {
++	unsigned char offset;
++	unsigned char size;
++};
 +
-+        # prevent gcc from keeping the stack 16 byte aligned
-+        KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
++/*
++ * The configuration for a particular DS/BTS hardware implementation.
++ */
++struct ds_configuration {
++	/* the DS configuration */
++	unsigned char  sizeof_ds;
++	struct access_desc bts_buffer_base;
++	struct access_desc bts_index;
++	struct access_desc bts_absolute_maximum;
++	struct access_desc bts_interrupt_threshold;
++	/* the BTS configuration */
++	unsigned char  sizeof_bts;
++	struct access_desc from_ip;
++	struct access_desc to_ip;
++	/* BTS variants used to store additional information like
++	   timestamps */
++	struct access_desc info_type;
++	struct access_desc info_data;
++	unsigned long debugctl_mask;
++};
 +
-+        # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
-+        # a lot more stack due to the lack of sharing of stacklots:
-+        KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \
-+                echo $(call cc-option,-fno-unit-at-a-time); fi ;)
++/*
++ * The global configuration used by the below accessor functions
++ */
++static struct ds_configuration ds_cfg;
 +
-+        # CPU-specific tuning. Anything which can be shared with UML should go here.
-+        include $(srctree)/arch/x86/Makefile_32.cpu
-+        KBUILD_CFLAGS += $(cflags-y)
++/*
++ * Accessor functions for some DS and BTS fields using the above
++ * global ptrace_bts_cfg.
++ */
++static inline unsigned long get_bts_buffer_base(char *base)
++{
++	return *(unsigned long *)(base + ds_cfg.bts_buffer_base.offset);
++}
++static inline void set_bts_buffer_base(char *base, unsigned long value)
++{
++	(*(unsigned long *)(base + ds_cfg.bts_buffer_base.offset)) = value;
++}
++static inline unsigned long get_bts_index(char *base)
++{
++	return *(unsigned long *)(base + ds_cfg.bts_index.offset);
++}
++static inline void set_bts_index(char *base, unsigned long value)
++{
++	(*(unsigned long *)(base + ds_cfg.bts_index.offset)) = value;
++}
++static inline unsigned long get_bts_absolute_maximum(char *base)
++{
++	return *(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset);
++}
++static inline void set_bts_absolute_maximum(char *base, unsigned long value)
++{
++	(*(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset)) = value;
++}
++static inline unsigned long get_bts_interrupt_threshold(char *base)
++{
++	return *(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset);
++}
++static inline void set_bts_interrupt_threshold(char *base, unsigned long value)
++{
++	(*(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset)) = value;
++}
++static inline unsigned long get_from_ip(char *base)
++{
++	return *(unsigned long *)(base + ds_cfg.from_ip.offset);
++}
++static inline void set_from_ip(char *base, unsigned long value)
++{
++	(*(unsigned long *)(base + ds_cfg.from_ip.offset)) = value;
++}
++static inline unsigned long get_to_ip(char *base)
++{
++	return *(unsigned long *)(base + ds_cfg.to_ip.offset);
++}
++static inline void set_to_ip(char *base, unsigned long value)
++{
++	(*(unsigned long *)(base + ds_cfg.to_ip.offset)) = value;
++}
++static inline unsigned char get_info_type(char *base)
++{
++	return *(unsigned char *)(base + ds_cfg.info_type.offset);
++}
++static inline void set_info_type(char *base, unsigned char value)
++{
++	(*(unsigned char *)(base + ds_cfg.info_type.offset)) = value;
++}
++static inline unsigned long get_info_data(char *base)
++{
++	return *(unsigned long *)(base + ds_cfg.info_data.offset);
++}
++static inline void set_info_data(char *base, unsigned long value)
++{
++	(*(unsigned long *)(base + ds_cfg.info_data.offset)) = value;
++}
 +
-+        # temporary until string.h is fixed
-+        KBUILD_CFLAGS += -ffreestanding
- else
-+        BITS := 64
-         UTS_MACHINE := x86_64
--        include $(srctree)/arch/x86/Makefile_64
-+        CHECKFLAGS += -D__x86_64__ -m64
 +
-+        KBUILD_AFLAGS += -m64
-+        KBUILD_CFLAGS += -m64
++int ds_allocate(void **dsp, size_t bts_size_in_bytes)
++{
++	size_t bts_size_in_records;
++	unsigned long bts;
++	void *ds;
 +
-+        # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-+        cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-+        cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
++	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
++		return -EOPNOTSUPP;
 +
-+        cflags-$(CONFIG_MCORE2) += \
-+                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
-+        cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
-+        KBUILD_CFLAGS += $(cflags-y)
++	if (bts_size_in_bytes < 0)
++		return -EINVAL;
 +
-+        KBUILD_CFLAGS += -mno-red-zone
-+        KBUILD_CFLAGS += -mcmodel=kernel
++	bts_size_in_records =
++		bts_size_in_bytes / ds_cfg.sizeof_bts;
++	bts_size_in_bytes =
++		bts_size_in_records * ds_cfg.sizeof_bts;
 +
-+        # -funit-at-a-time shrinks the kernel .text considerably
-+        # unfortunately it makes reading oopses harder.
-+        KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
++	if (bts_size_in_bytes <= 0)
++		return -EINVAL;
 +
-+        # this works around some issues with generating unwind tables in older gccs
-+        # newer gccs do it by default
-+        KBUILD_CFLAGS += -maccumulate-outgoing-args
++	bts = (unsigned long)kzalloc(bts_size_in_bytes, GFP_KERNEL);
 +
-+        stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
-+        stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
-+                "$(CC)" -fstack-protector )
-+        stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
-+                "$(CC)" -fstack-protector-all )
++	if (!bts)
++		return -ENOMEM;
 +
-+        KBUILD_CFLAGS += $(stackp-y)
-+endif
++	ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
 +
-+# Stackpointer is addressed different for 32 bit and 64 bit x86
-+sp-$(CONFIG_X86_32) := esp
-+sp-$(CONFIG_X86_64) := rsp
++	if (!ds) {
++		kfree((void *)bts);
++		return -ENOMEM;
++	}
 +
-+# do binutils support CFI?
-+cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
-+# is .cfi_signal_frame supported too?
-+cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
-+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe)
-+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe)
++	set_bts_buffer_base(ds, bts);
++	set_bts_index(ds, bts);
++	set_bts_absolute_maximum(ds, bts + bts_size_in_bytes);
++	set_bts_interrupt_threshold(ds, bts + bts_size_in_bytes + 1);
 +
-+LDFLAGS := -m elf_$(UTS_MACHINE)
-+OBJCOPYFLAGS := -O binary -R .note -R .comment -S
++	*dsp = ds;
++	return 0;
++}
 +
-+# Speed up the build
-+KBUILD_CFLAGS += -pipe
-+# Workaround for a gcc prelease that unfortunately was shipped in a suse release
-+KBUILD_CFLAGS += -Wno-sign-compare
-+#
-+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-+# prevent gcc from generating any FP code by mistake
-+KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
++int ds_free(void **dsp)
++{
++	if (*dsp)
++		kfree((void *)get_bts_buffer_base(*dsp));
++	kfree(*dsp);
++	*dsp = 0;
 +
-+###
-+# Sub architecture support
-+# fcore-y is linked before mcore-y files.
++	return 0;
++}
 +
-+# Default subarch .c files
-+mcore-y  := arch/x86/mach-default/
++int ds_get_bts_size(void *ds)
++{
++	int size_in_bytes;
 +
-+# Voyager subarch support
-+mflags-$(CONFIG_X86_VOYAGER)	:= -Iinclude/asm-x86/mach-voyager
-+mcore-$(CONFIG_X86_VOYAGER)	:= arch/x86/mach-voyager/
++	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
++		return -EOPNOTSUPP;
 +
-+# VISWS subarch support
-+mflags-$(CONFIG_X86_VISWS)	:= -Iinclude/asm-x86/mach-visws
-+mcore-$(CONFIG_X86_VISWS)	:= arch/x86/mach-visws/
++	if (!ds)
++		return 0;
 +
-+# NUMAQ subarch support
-+mflags-$(CONFIG_X86_NUMAQ)	:= -Iinclude/asm-x86/mach-numaq
-+mcore-$(CONFIG_X86_NUMAQ)	:= arch/x86/mach-default/
++	size_in_bytes =
++		get_bts_absolute_maximum(ds) -
++		get_bts_buffer_base(ds);
++	return size_in_bytes;
++}
 +
-+# BIGSMP subarch support
-+mflags-$(CONFIG_X86_BIGSMP)	:= -Iinclude/asm-x86/mach-bigsmp
-+mcore-$(CONFIG_X86_BIGSMP)	:= arch/x86/mach-default/
++int ds_get_bts_end(void *ds)
++{
++	int size_in_bytes = ds_get_bts_size(ds);
 +
-+#Summit subarch support
-+mflags-$(CONFIG_X86_SUMMIT)	:= -Iinclude/asm-x86/mach-summit
-+mcore-$(CONFIG_X86_SUMMIT)	:= arch/x86/mach-default/
++	if (size_in_bytes <= 0)
++		return size_in_bytes;
 +
-+# generic subarchitecture
-+mflags-$(CONFIG_X86_GENERICARCH):= -Iinclude/asm-x86/mach-generic
-+fcore-$(CONFIG_X86_GENERICARCH)	+= arch/x86/mach-generic/
-+mcore-$(CONFIG_X86_GENERICARCH)	:= arch/x86/mach-default/
++	return size_in_bytes / ds_cfg.sizeof_bts;
++}
 +
++int ds_get_bts_index(void *ds)
++{
++	int index_offset_in_bytes;
 +
-+# ES7000 subarch support
-+mflags-$(CONFIG_X86_ES7000)	:= -Iinclude/asm-x86/mach-es7000
-+fcore-$(CONFIG_X86_ES7000)	:= arch/x86/mach-es7000/
-+mcore-$(CONFIG_X86_ES7000)	:= arch/x86/mach-default/
++	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
++		return -EOPNOTSUPP;
 +
-+# RDC R-321x subarch support
-+mflags-$(CONFIG_X86_RDC321X)	:= -Iinclude/asm-x86/mach-rdc321x
-+mcore-$(CONFIG_X86_RDC321X)	:= arch/x86/mach-default
-+core-$(CONFIG_X86_RDC321X)	+= arch/x86/mach-rdc321x/
++	index_offset_in_bytes =
++		get_bts_index(ds) -
++		get_bts_buffer_base(ds);
 +
-+# default subarch .h files
-+mflags-y += -Iinclude/asm-x86/mach-default
++	return index_offset_in_bytes / ds_cfg.sizeof_bts;
++}
 +
-+# 64 bit does not support subarch support - clear sub arch variables
-+fcore-$(CONFIG_X86_64)  :=
-+mcore-$(CONFIG_X86_64)  :=
-+mflags-$(CONFIG_X86_64) :=
++int ds_set_overflow(void *ds, int method)
++{
++	switch (method) {
++	case DS_O_SIGNAL:
++		return -EOPNOTSUPP;
++	case DS_O_WRAP:
++		return 0;
++	default:
++		return -EINVAL;
++	}
++}
 +
-+KBUILD_CFLAGS += $(mflags-y)
-+KBUILD_AFLAGS += $(mflags-y)
++int ds_get_overflow(void *ds)
++{
++	return DS_O_WRAP;
++}
 +
-+###
-+# Kernel objects
++int ds_clear(void *ds)
++{
++	int bts_size = ds_get_bts_size(ds);
++	unsigned long bts_base;
 +
-+head-y                := arch/x86/kernel/head_$(BITS).o
-+head-$(CONFIG_X86_64) += arch/x86/kernel/head64.o
-+head-y                += arch/x86/kernel/init_task.o
++	if (bts_size <= 0)
++		return bts_size;
 +
-+libs-y  += arch/x86/lib/
++	bts_base = get_bts_buffer_base(ds);
++	memset((void *)bts_base, 0, bts_size);
 +
-+# Sub architecture files that needs linking first
-+core-y += $(fcore-y)
++	set_bts_index(ds, bts_base);
++	return 0;
++}
 +
-+# Xen paravirtualization support
-+core-$(CONFIG_XEN) += arch/x86/xen/
++int ds_read_bts(void *ds, int index, struct bts_struct *out)
++{
++	void *bts;
 +
-+# lguest paravirtualization support
-+core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
++	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
++		return -EOPNOTSUPP;
 +
-+core-y += arch/x86/kernel/
-+core-y += arch/x86/mm/
++	if (index < 0)
++		return -EINVAL;
 +
-+# Remaining sub architecture files
-+core-y += $(mcore-y)
++	if (index >= ds_get_bts_size(ds))
++		return -EINVAL;
 +
-+core-y += arch/x86/crypto/
-+core-y += arch/x86/vdso/
-+core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
++	bts = (void *)(get_bts_buffer_base(ds) + (index * ds_cfg.sizeof_bts));
 +
-+# drivers-y are linked after core-y
-+drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
-+drivers-$(CONFIG_PCI)            += arch/x86/pci/
++	memset(out, 0, sizeof(*out));
++	if (get_from_ip(bts) == BTS_ESCAPE_ADDRESS) {
++		out->qualifier       = get_info_type(bts);
++		out->variant.jiffies = get_info_data(bts);
++	} else {
++		out->qualifier = BTS_BRANCH;
++		out->variant.lbr.from_ip = get_from_ip(bts);
++		out->variant.lbr.to_ip   = get_to_ip(bts);
++	}
 +
-+# must be linked after kernel/
-+drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/
++	return sizeof(*out);;
++}
 +
-+ifeq ($(CONFIG_X86_32),y)
-+drivers-$(CONFIG_PM) += arch/x86/power/
-+drivers-$(CONFIG_FB) += arch/x86/video/
- endif
++int ds_write_bts(void *ds, const struct bts_struct *in)
++{
++	unsigned long bts;
 +
-+####
-+# boot loader support. Several targets are kept for legacy purposes
++	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
++		return -EOPNOTSUPP;
 +
-+boot := arch/x86/boot
++	if (ds_get_bts_size(ds) <= 0)
++		return -ENXIO;
 +
-+PHONY += zImage bzImage compressed zlilo bzlilo \
-+         zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
++	bts = get_bts_index(ds);
 +
-+# Default kernel to build
-+all: bzImage
++	memset((void *)bts, 0, ds_cfg.sizeof_bts);
++	switch (in->qualifier) {
++	case BTS_INVALID:
++		break;
 +
-+# KBUILD_IMAGE specify target image being built
-+                    KBUILD_IMAGE := $(boot)/bzImage
-+zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage
++	case BTS_BRANCH:
++		set_from_ip((void *)bts, in->variant.lbr.from_ip);
++		set_to_ip((void *)bts, in->variant.lbr.to_ip);
++		break;
 +
-+zImage bzImage: vmlinux
-+	$(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
-+	$(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot
-+	$(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/bzImage
++	case BTS_TASK_ARRIVES:
++	case BTS_TASK_DEPARTS:
++		set_from_ip((void *)bts, BTS_ESCAPE_ADDRESS);
++		set_info_type((void *)bts, in->qualifier);
++		set_info_data((void *)bts, in->variant.jiffies);
++		break;
 +
-+compressed: zImage
++	default:
++		return -EINVAL;
++	}
 +
-+zlilo bzlilo: vmlinux
-+	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zlilo
++	bts = bts + ds_cfg.sizeof_bts;
++	if (bts >= get_bts_absolute_maximum(ds))
++		bts = get_bts_buffer_base(ds);
++	set_bts_index(ds, bts);
 +
-+zdisk bzdisk: vmlinux
-+	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk
++	return ds_cfg.sizeof_bts;
++}
 +
-+fdimage fdimage144 fdimage288 isoimage: vmlinux
-+	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
++unsigned long ds_debugctl_mask(void)
++{
++	return ds_cfg.debugctl_mask;
++}
 +
-+install: vdso_install
-+	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
++#ifdef __i386__
++static const struct ds_configuration ds_cfg_netburst = {
++	.sizeof_ds = 9 * 4,
++	.bts_buffer_base = { 0, 4 },
++	.bts_index = { 4, 4 },
++	.bts_absolute_maximum = { 8, 4 },
++	.bts_interrupt_threshold = { 12, 4 },
++	.sizeof_bts = 3 * 4,
++	.from_ip = { 0, 4 },
++	.to_ip = { 4, 4 },
++	.info_type = { 4, 1 },
++	.info_data = { 8, 4 },
++	.debugctl_mask = (1<<2)|(1<<3)
++};
 +
-+PHONY += vdso_install
-+vdso_install:
-+	$(Q)$(MAKE) $(build)=arch/x86/vdso $@
++static const struct ds_configuration ds_cfg_pentium_m = {
++	.sizeof_ds = 9 * 4,
++	.bts_buffer_base = { 0, 4 },
++	.bts_index = { 4, 4 },
++	.bts_absolute_maximum = { 8, 4 },
++	.bts_interrupt_threshold = { 12, 4 },
++	.sizeof_bts = 3 * 4,
++	.from_ip = { 0, 4 },
++	.to_ip = { 4, 4 },
++	.info_type = { 4, 1 },
++	.info_data = { 8, 4 },
++	.debugctl_mask = (1<<6)|(1<<7)
++};
++#endif /* _i386_ */
 +
-+archclean:
-+	$(Q)rm -rf $(objtree)/arch/i386
-+	$(Q)rm -rf $(objtree)/arch/x86_64
-+	$(Q)$(MAKE) $(clean)=$(boot)
++static const struct ds_configuration ds_cfg_core2 = {
++	.sizeof_ds = 9 * 8,
++	.bts_buffer_base = { 0, 8 },
++	.bts_index = { 8, 8 },
++	.bts_absolute_maximum = { 16, 8 },
++	.bts_interrupt_threshold = { 24, 8 },
++	.sizeof_bts = 3 * 8,
++	.from_ip = { 0, 8 },
++	.to_ip = { 8, 8 },
++	.info_type = { 8, 1 },
++	.info_data = { 16, 8 },
++	.debugctl_mask = (1<<6)|(1<<7)|(1<<9)
++};
 +
-+define archhelp
-+  echo  '* bzImage      - Compressed kernel image (arch/x86/boot/bzImage)'
-+  echo  '  install      - Install kernel using'
-+  echo  '                  (your) ~/bin/installkernel or'
-+  echo  '                  (distribution) /sbin/installkernel or'
-+  echo  '                  install to $$(INSTALL_PATH) and run lilo'
-+  echo  '  fdimage      - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
-+  echo  '  fdimage144   - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
-+  echo  '  fdimage288   - Create 2.8MB boot floppy image (arch/x86/boot/fdimage)'
-+  echo  '  isoimage     - Create a boot CD-ROM image (arch/x86/boot/image.iso)'
-+  echo  '                  bzdisk/fdimage*/isoimage also accept:'
-+  echo  '                  FDARGS="..."  arguments for the booted kernel'
-+  echo  '                  FDINITRD=file initrd for the booted kernel'
-+endef
++static inline void
++ds_configure(const struct ds_configuration *cfg)
++{
++	ds_cfg = *cfg;
++}
 +
-+CLEAN_FILES += arch/x86/boot/fdimage \
-+	       arch/x86/boot/image.iso \
-+	       arch/x86/boot/mtools.conf
-diff --git a/arch/x86/Makefile_32 b/arch/x86/Makefile_32
-deleted file mode 100644
-index 50394da..0000000
---- a/arch/x86/Makefile_32
-+++ /dev/null
-@@ -1,175 +0,0 @@
--#
--# i386 Makefile
--#
--# This file is included by the global makefile so that you can add your own
--# architecture-specific flags and dependencies. Remember to do have actions
--# for "archclean" cleaning up for this architecture.
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 1994 by Linus Torvalds
--#
--# 19990713  Artur Skawina <skawina at geocities.com>
--#           Added '-march' and '-mpreferred-stack-boundary' support
--#
--# 20050320  Kianusch Sayah Karadji <kianusch at sk-tech.net>
--#           Added support for GEODE CPU
--
--# BITS is used as extension for files which are available in a 32 bit
--# and a 64 bit version to simplify shared Makefiles.
--# e.g.: obj-y += foo_$(BITS).o
--BITS := 32
--export BITS
--
--HAS_BIARCH      := $(call cc-option-yn, -m32)
--ifeq ($(HAS_BIARCH),y)
--AS              := $(AS) --32
--LD              := $(LD) -m elf_i386
--CC              := $(CC) -m32
--endif
--
--LDFLAGS		:= -m elf_i386
--OBJCOPYFLAGS	:= -O binary -R .note -R .comment -S
--ifdef CONFIG_RELOCATABLE
--LDFLAGS_vmlinux := --emit-relocs
--endif
--CHECKFLAGS	+= -D__i386__
--
--KBUILD_CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return
--
--# prevent gcc from keeping the stack 16 byte aligned
--KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
--
--# CPU-specific tuning. Anything which can be shared with UML should go here.
--include $(srctree)/arch/x86/Makefile_32.cpu
--
--# temporary until string.h is fixed
--cflags-y += -ffreestanding
--
--# this works around some issues with generating unwind tables in older gccs
--# newer gccs do it by default
--cflags-y += -maccumulate-outgoing-args
--
--# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
--# a lot more stack due to the lack of sharing of stacklots:
--KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
--
--# do binutils support CFI?
--cflags-y += $(call as-instr,.cfi_startproc\n.cfi_rel_offset esp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
--KBUILD_AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_rel_offset esp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
--
--# is .cfi_signal_frame supported too?
--cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
--KBUILD_AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
--
--KBUILD_CFLAGS += $(cflags-y)
--
--# Default subarch .c files
--mcore-y  := arch/x86/mach-default
--
--# Voyager subarch support
--mflags-$(CONFIG_X86_VOYAGER)	:= -Iinclude/asm-x86/mach-voyager
--mcore-$(CONFIG_X86_VOYAGER)	:= arch/x86/mach-voyager
--
--# VISWS subarch support
--mflags-$(CONFIG_X86_VISWS)	:= -Iinclude/asm-x86/mach-visws
--mcore-$(CONFIG_X86_VISWS)	:= arch/x86/mach-visws
--
--# NUMAQ subarch support
--mflags-$(CONFIG_X86_NUMAQ)	:= -Iinclude/asm-x86/mach-numaq
--mcore-$(CONFIG_X86_NUMAQ)	:= arch/x86/mach-default
--
--# BIGSMP subarch support
--mflags-$(CONFIG_X86_BIGSMP)	:= -Iinclude/asm-x86/mach-bigsmp
--mcore-$(CONFIG_X86_BIGSMP)	:= arch/x86/mach-default
--
--#Summit subarch support
--mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-x86/mach-summit
--mcore-$(CONFIG_X86_SUMMIT)  := arch/x86/mach-default
--
--# generic subarchitecture
--mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-x86/mach-generic
--mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default
--core-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
--
--# ES7000 subarch support
--mflags-$(CONFIG_X86_ES7000)	:= -Iinclude/asm-x86/mach-es7000
--mcore-$(CONFIG_X86_ES7000)	:= arch/x86/mach-default
--core-$(CONFIG_X86_ES7000)	:= arch/x86/mach-es7000/
--
--# Xen paravirtualization support
--core-$(CONFIG_XEN)		+= arch/x86/xen/
--
--# lguest paravirtualization support
--core-$(CONFIG_LGUEST_GUEST)	+= arch/x86/lguest/
--
--# default subarch .h files
--mflags-y += -Iinclude/asm-x86/mach-default
--
--head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task.o
--
--libs-y 					+= arch/x86/lib/
--core-y					+= arch/x86/kernel/ \
--					   arch/x86/mm/ \
--					   $(mcore-y)/ \
--					   arch/x86/crypto/
--drivers-$(CONFIG_MATH_EMULATION)	+= arch/x86/math-emu/
--drivers-$(CONFIG_PCI)			+= arch/x86/pci/
--# must be linked after kernel/
--drivers-$(CONFIG_OPROFILE)		+= arch/x86/oprofile/
--drivers-$(CONFIG_PM)			+= arch/x86/power/
--drivers-$(CONFIG_FB)                    += arch/x86/video/
--
--KBUILD_CFLAGS += $(mflags-y)
--KBUILD_AFLAGS += $(mflags-y)
--
--boot := arch/x86/boot
--
--PHONY += zImage bzImage compressed zlilo bzlilo \
--         zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
--
--all: bzImage
--
--# KBUILD_IMAGE specify target image being built
--                    KBUILD_IMAGE := $(boot)/bzImage
--zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage
--
--zImage bzImage: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
--	$(Q)mkdir -p $(objtree)/arch/i386/boot
--	$(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
--
--compressed: zImage
--
--zlilo bzlilo: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zlilo
--
--zdisk bzdisk: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk
--
--fdimage fdimage144 fdimage288 isoimage: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
--
--install:
--	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
--
--archclean:
--	$(Q)rm -rf $(objtree)/arch/i386/boot
--	$(Q)$(MAKE) $(clean)=arch/x86/boot
--
--define archhelp
--  echo  '* bzImage	- Compressed kernel image (arch/x86/boot/bzImage)'
--  echo  '  install	- Install kernel using'
--  echo  '		   (your) ~/bin/installkernel or'
--  echo  '		   (distribution) /sbin/installkernel or'
--  echo  '		   install to $$(INSTALL_PATH) and run lilo'
--  echo  '  bzdisk       - Create a boot floppy in /dev/fd0'
--  echo  '  fdimage      - Create a boot floppy image'
--  echo  '  isoimage     - Create a boot CD-ROM image'
--endef
--
--CLEAN_FILES += arch/x86/boot/fdimage \
--	       arch/x86/boot/image.iso \
--	       arch/x86/boot/mtools.conf
-diff --git a/arch/x86/Makefile_64 b/arch/x86/Makefile_64
-deleted file mode 100644
-index a804860..0000000
---- a/arch/x86/Makefile_64
-+++ /dev/null
-@@ -1,144 +0,0 @@
--#
--# x86_64 Makefile
--#
--# This file is included by the global makefile so that you can add your own
--# architecture-specific flags and dependencies. Remember to do have actions
--# for "archclean" and "archdep" for cleaning up and making dependencies for
--# this architecture
--#
--# This file is subject to the terms and conditions of the GNU General Public
--# License.  See the file "COPYING" in the main directory of this archive
--# for more details.
--#
--# Copyright (C) 1994 by Linus Torvalds
--#
--# 19990713  Artur Skawina <skawina at geocities.com>
--#           Added '-march' and '-mpreferred-stack-boundary' support
--# 20000913  Pavel Machek <pavel at suse.cz>
--#	    Converted for x86_64 architecture
--# 20010105  Andi Kleen, add IA32 compiler.
--#           ....and later removed it again....
--#
--# $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $
--
--# BITS is used as extension for files which are available in a 32 bit
--# and a 64 bit version to simplify shared Makefiles.
--# e.g.: obj-y += foo_$(BITS).o
--BITS := 64
--export BITS
--
--LDFLAGS		:= -m elf_x86_64
--OBJCOPYFLAGS	:= -O binary -R .note -R .comment -S
--LDFLAGS_vmlinux :=
--CHECKFLAGS      += -D__x86_64__ -m64
++void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
++{
++	switch (c->x86) {
++	case 0x6:
++		switch (c->x86_model) {
++#ifdef __i386__
++		case 0xD:
++		case 0xE: /* Pentium M */
++			ds_configure(&ds_cfg_pentium_m);
++			break;
++#endif /* _i386_ */
++		case 0xF: /* Core2 */
++			ds_configure(&ds_cfg_core2);
++			break;
++		default:
++			/* sorry, don't know about them */
++			break;
++		}
++		break;
++	case 0xF:
++		switch (c->x86_model) {
++#ifdef __i386__
++		case 0x0:
++		case 0x1:
++		case 0x2: /* Netburst */
++			ds_configure(&ds_cfg_netburst);
++			break;
++#endif /* _i386_ */
++		default:
++			/* sorry, don't know about them */
++			break;
++		}
++		break;
++	default:
++		/* sorry, don't know about them */
++		break;
++	}
++}
+diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
+index 18f500d..4e16ef4 100644
+--- a/arch/x86/kernel/e820_32.c
++++ b/arch/x86/kernel/e820_32.c
+@@ -7,7 +7,6 @@
+ #include <linux/kexec.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+-#include <linux/efi.h>
+ #include <linux/pfn.h>
+ #include <linux/uaccess.h>
+ #include <linux/suspend.h>
+@@ -17,11 +16,6 @@
+ #include <asm/e820.h>
+ #include <asm/setup.h>
+ 
+-#ifdef CONFIG_EFI
+-int efi_enabled = 0;
+-EXPORT_SYMBOL(efi_enabled);
+-#endif
 -
--cflags-y	:=
--cflags-kernel-y	:=
--cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
--cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
--# gcc doesn't support -march=core2 yet as of gcc 4.3, but I hope it
--# will eventually. Use -mtune=generic as fallback
--cflags-$(CONFIG_MCORE2) += \
--	$(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
--cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+ struct e820map e820;
+ struct change_member {
+ 	struct e820entry *pbios; /* pointer to original bios entry */
+@@ -37,26 +31,6 @@ unsigned long pci_mem_start = 0x10000000;
+ EXPORT_SYMBOL(pci_mem_start);
+ #endif
+ extern int user_defined_memmap;
+-struct resource data_resource = {
+-	.name	= "Kernel data",
+-	.start	= 0,
+-	.end	= 0,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
+-};
 -
--cflags-y += -m64
--cflags-y += -mno-red-zone
--cflags-y += -mcmodel=kernel
--cflags-y += -pipe
--cflags-y += -Wno-sign-compare
--cflags-y += -fno-asynchronous-unwind-tables
--ifneq ($(CONFIG_DEBUG_INFO),y)
--# -fweb shrinks the kernel a bit, but the difference is very small
--# it also messes up debugging, so don't use it for now.
--#cflags-y += $(call cc-option,-fweb)
--endif
--# -funit-at-a-time shrinks the kernel .text considerably
--# unfortunately it makes reading oopses harder.
--cflags-y += $(call cc-option,-funit-at-a-time)
--# prevent gcc from generating any FP code by mistake
--cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
--# this works around some issues with generating unwind tables in older gccs
--# newer gccs do it by default
--cflags-y += -maccumulate-outgoing-args
+-struct resource code_resource = {
+-	.name	= "Kernel code",
+-	.start	= 0,
+-	.end	= 0,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
+-};
 -
--# do binutils support CFI?
--cflags-y += $(call as-instr,.cfi_startproc\n.cfi_rel_offset rsp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
--KBUILD_AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_rel_offset rsp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
+-struct resource bss_resource = {
+-	.name	= "Kernel bss",
+-	.start	= 0,
+-	.end	= 0,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
+-};
+ 
+ static struct resource system_rom_resource = {
+ 	.name	= "System ROM",
+@@ -111,60 +85,6 @@ static struct resource video_rom_resource = {
+ 	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ };
+ 
+-static struct resource video_ram_resource = {
+-	.name	= "Video RAM area",
+-	.start	= 0xa0000,
+-	.end	= 0xbffff,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
+-};
 -
--# is .cfi_signal_frame supported too?
--cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
--KBUILD_AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
+-static struct resource standard_io_resources[] = { {
+-	.name	= "dma1",
+-	.start	= 0x0000,
+-	.end	= 0x001f,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+-	.name	= "pic1",
+-	.start	= 0x0020,
+-	.end	= 0x0021,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+-	.name   = "timer0",
+-	.start	= 0x0040,
+-	.end    = 0x0043,
+-	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+-	.name   = "timer1",
+-	.start  = 0x0050,
+-	.end    = 0x0053,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+-	.name	= "keyboard",
+-	.start	= 0x0060,
+-	.end	= 0x006f,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+-	.name	= "dma page reg",
+-	.start	= 0x0080,
+-	.end	= 0x008f,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+-	.name	= "pic2",
+-	.start	= 0x00a0,
+-	.end	= 0x00a1,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+-	.name	= "dma2",
+-	.start	= 0x00c0,
+-	.end	= 0x00df,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+-	.name	= "fpu",
+-	.start	= 0x00f0,
+-	.end	= 0x00ff,
+-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+-} };
 -
--cflags-$(CONFIG_CC_STACKPROTECTOR) += $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh "$(CC)" -fstack-protector )
--cflags-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh "$(CC)" -fstack-protector-all )
+ #define ROMSIGNATURE 0xaa55
+ 
+ static int __init romsignature(const unsigned char *rom)
+@@ -260,10 +180,9 @@ static void __init probe_roms(void)
+  * Request address space for all standard RAM and ROM resources
+  * and also for regions reported as reserved by the e820.
+  */
+-static void __init
+-legacy_init_iomem_resources(struct resource *code_resource,
+-			    struct resource *data_resource,
+-			    struct resource *bss_resource)
++void __init init_iomem_resources(struct resource *code_resource,
++		struct resource *data_resource,
++		struct resource *bss_resource)
+ {
+ 	int i;
+ 
+@@ -305,35 +224,6 @@ legacy_init_iomem_resources(struct resource *code_resource,
+ 	}
+ }
+ 
+-/*
+- * Request address space for all standard resources
+- *
+- * This is called just before pcibios_init(), which is also a
+- * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
+- */
+-static int __init request_standard_resources(void)
+-{
+-	int i;
 -
--KBUILD_CFLAGS += $(cflags-y)
--CFLAGS_KERNEL += $(cflags-kernel-y)
--KBUILD_AFLAGS += -m64
+-	printk("Setting up standard PCI resources\n");
+-	if (efi_enabled)
+-		efi_initialize_iomem_resources(&code_resource,
+-				&data_resource, &bss_resource);
+-	else
+-		legacy_init_iomem_resources(&code_resource,
+-				&data_resource, &bss_resource);
 -
--head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task.o
+-	/* EFI systems may still have VGA */
+-	request_resource(&iomem_resource, &video_ram_resource);
 -
--libs-y 					+= arch/x86/lib/
--core-y					+= arch/x86/kernel/ \
--					   arch/x86/mm/ \
--					   arch/x86/crypto/ \
--					   arch/x86/vdso/
--core-$(CONFIG_IA32_EMULATION)		+= arch/x86/ia32/
--drivers-$(CONFIG_PCI)			+= arch/x86/pci/
--drivers-$(CONFIG_OPROFILE)		+= arch/x86/oprofile/
+-	/* request I/O space for devices used on all i[345]86 PCs */
+-	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+-		request_resource(&ioport_resource, &standard_io_resources[i]);
+-	return 0;
+-}
 -
--boot := arch/x86/boot
+-subsys_initcall(request_standard_resources);
 -
--PHONY += bzImage bzlilo install archmrproper \
--	 fdimage fdimage144 fdimage288 isoimage archclean
+ #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
+ /**
+  * e820_mark_nosave_regions - Find the ranges of physical addresses that do not
+@@ -370,19 +260,17 @@ void __init add_memory_region(unsigned long long start,
+ {
+ 	int x;
+ 
+-	if (!efi_enabled) {
+-       		x = e820.nr_map;
 -
--#Default target when executing "make"
--all: bzImage
+-		if (x == E820MAX) {
+-		    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+-		    return;
+-		}
++	x = e820.nr_map;
+ 
+-		e820.map[x].addr = start;
+-		e820.map[x].size = size;
+-		e820.map[x].type = type;
+-		e820.nr_map++;
++	if (x == E820MAX) {
++		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++		return;
+ 	}
++
++	e820.map[x].addr = start;
++	e820.map[x].size = size;
++	e820.map[x].type = type;
++	e820.nr_map++;
+ } /* add_memory_region */
+ 
+ /*
+@@ -598,29 +486,6 @@ int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+ }
+ 
+ /*
+- * Callback for efi_memory_walk.
+- */
+-static int __init
+-efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+-{
+-	unsigned long *max_pfn = arg, pfn;
 -
--BOOTIMAGE                     := arch/x86/boot/bzImage
--KBUILD_IMAGE                  := $(BOOTIMAGE)
+-	if (start < end) {
+-		pfn = PFN_UP(end -1);
+-		if (pfn > *max_pfn)
+-			*max_pfn = pfn;
+-	}
+-	return 0;
+-}
 -
--bzImage: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
--	$(Q)mkdir -p $(objtree)/arch/x86_64/boot
--	$(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
+-static int __init
+-efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
+-{
+-	memory_present(0, PFN_UP(start), PFN_DOWN(end));
+-	return 0;
+-}
 -
--bzlilo: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo
+-/*
+  * Find the highest page frame number we have available
+  */
+ void __init find_max_pfn(void)
+@@ -628,11 +493,6 @@ void __init find_max_pfn(void)
+ 	int i;
+ 
+ 	max_pfn = 0;
+-	if (efi_enabled) {
+-		efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+-		efi_memmap_walk(efi_memory_present_wrapper, NULL);
+-		return;
+-	}
+ 
+ 	for (i = 0; i < e820.nr_map; i++) {
+ 		unsigned long start, end;
+@@ -650,34 +510,12 @@ void __init find_max_pfn(void)
+ }
+ 
+ /*
+- * Free all available memory for boot time allocation.  Used
+- * as a callback function by efi_memory_walk()
+- */
 -
--bzdisk: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk
+-static int __init
+-free_available_memory(unsigned long start, unsigned long end, void *arg)
+-{
+-	/* check max_low_pfn */
+-	if (start >= (max_low_pfn << PAGE_SHIFT))
+-		return 0;
+-	if (end >= (max_low_pfn << PAGE_SHIFT))
+-		end = max_low_pfn << PAGE_SHIFT;
+-	if (start < end)
+-		free_bootmem(start, end - start);
 -
--fdimage fdimage144 fdimage288 isoimage: vmlinux
--	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
+-	return 0;
+-}
+-/*
+  * Register fully available low RAM pages with the bootmem allocator.
+  */
+ void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+ {
+ 	int i;
+ 
+-	if (efi_enabled) {
+-		efi_memmap_walk(free_available_memory, NULL);
+-		return;
+-	}
+ 	for (i = 0; i < e820.nr_map; i++) {
+ 		unsigned long curr_pfn, last_pfn, size;
+ 		/*
+@@ -785,56 +623,12 @@ void __init print_memory_map(char *who)
+ 	}
+ }
+ 
+-static __init __always_inline void efi_limit_regions(unsigned long long size)
+-{
+-	unsigned long long current_addr = 0;
+-	efi_memory_desc_t *md, *next_md;
+-	void *p, *p1;
+-	int i, j;
 -
--install: vdso_install
--	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ 
+-	j = 0;
+-	p1 = memmap.map;
+-	for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
+-		md = p;
+-		next_md = p1;
+-		current_addr = md->phys_addr +
+-			PFN_PHYS(md->num_pages);
+-		if (is_available_memory(md)) {
+-			if (md->phys_addr >= size) continue;
+-			memcpy(next_md, md, memmap.desc_size);
+-			if (current_addr >= size) {
+-				next_md->num_pages -=
+-					PFN_UP(current_addr-size);
+-			}
+-			p1 += memmap.desc_size;
+-			next_md = p1;
+-			j++;
+-		} else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
+-			   EFI_MEMORY_RUNTIME) {
+-			/* In order to make runtime services
+-			 * available we have to include runtime
+-			 * memory regions in memory map */
+-			memcpy(next_md, md, memmap.desc_size);
+-			p1 += memmap.desc_size;
+-			next_md = p1;
+-			j++;
+-		}
+-	}
+-	memmap.nr_map = j;
+-	memmap.map_end = memmap.map +
+-		(memmap.nr_map * memmap.desc_size);
+-}
 -
--vdso_install:
--ifeq ($(CONFIG_IA32_EMULATION),y)
--	$(Q)$(MAKE) $(build)=arch/x86/ia32 $@
--endif
--	$(Q)$(MAKE) $(build)=arch/x86/vdso $@
+ void __init limit_regions(unsigned long long size)
+ {
+ 	unsigned long long current_addr;
+ 	int i;
+ 
+ 	print_memory_map("limit_regions start");
+-	if (efi_enabled) {
+-		efi_limit_regions(size);
+-		return;
+-	}
+ 	for (i = 0; i < e820.nr_map; i++) {
+ 		current_addr = e820.map[i].addr + e820.map[i].size;
+ 		if (current_addr < size)
+@@ -955,3 +749,14 @@ static int __init parse_memmap(char *arg)
+ 	return 0;
+ }
+ early_param("memmap", parse_memmap);
++void __init update_e820(void)
++{
++	u8 nr_map;
++
++	nr_map = e820.nr_map;
++	if (sanitize_e820_map(e820.map, &nr_map))
++		return;
++	e820.nr_map = nr_map;
++	printk(KERN_INFO "modified physical RAM map:\n");
++	print_memory_map("modified");
++}
+diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
+index 04698e0..c617174 100644
+--- a/arch/x86/kernel/e820_64.c
++++ b/arch/x86/kernel/e820_64.c
+@@ -1,4 +1,4 @@
+-/* 
++/*
+  * Handle the memory map.
+  * The functions here do the job until bootmem takes over.
+  *
+@@ -26,80 +26,87 @@
+ #include <asm/proto.h>
+ #include <asm/setup.h>
+ #include <asm/sections.h>
++#include <asm/kdebug.h>
+ 
+ struct e820map e820;
+ 
+-/* 
++/*
+  * PFN of last memory page.
+  */
+-unsigned long end_pfn; 
+-EXPORT_SYMBOL(end_pfn);
++unsigned long end_pfn;
+ 
+-/* 
++/*
+  * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
+  * The direct mapping extends to end_pfn_map, so that we can directly access
+  * apertures, ACPI and other tables without having to play with fixmaps.
+- */ 
+-unsigned long end_pfn_map; 
++ */
++unsigned long end_pfn_map;
+ 
+-/* 
++/*
+  * Last pfn which the user wants to use.
+  */
+ static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
+ 
+-extern struct resource code_resource, data_resource, bss_resource;
 -
--archclean:
--	$(Q)rm -rf $(objtree)/arch/x86_64/boot
--	$(Q)$(MAKE) $(clean)=$(boot)
+-/* Check for some hardcoded bad areas that early boot is not allowed to touch */ 
+-static inline int bad_addr(unsigned long *addrp, unsigned long size)
+-{ 
+-	unsigned long addr = *addrp, last = addr + size; 
 -
--define archhelp
--  echo  '* bzImage	- Compressed kernel image (arch/x86/boot/bzImage)'
--  echo  '  install	- Install kernel using'
--  echo  '		   (your) ~/bin/installkernel or'
--  echo  '		   (distribution) /sbin/installkernel or'
--  echo  '		   install to $$(INSTALL_PATH) and run lilo'
--  echo  '  bzdisk       - Create a boot floppy in /dev/fd0'
--  echo  '  fdimage      - Create a boot floppy image'
--  echo  '  isoimage     - Create a boot CD-ROM image'
--endef
+-	/* various gunk below that needed for SMP startup */
+-	if (addr < 0x8000) { 
+-		*addrp = PAGE_ALIGN(0x8000);
+-		return 1; 
+-	}
 -
--CLEAN_FILES += arch/x86/boot/fdimage \
--	       arch/x86/boot/image.iso \
--	       arch/x86/boot/mtools.conf
+-	/* direct mapping tables of the kernel */
+-	if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { 
+-		*addrp = PAGE_ALIGN(table_end << PAGE_SHIFT);
+-		return 1;
+-	} 
 -
+-	/* initrd */ 
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+-		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
+-		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
+-		unsigned long ramdisk_end   = ramdisk_image+ramdisk_size;
 -
-diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
-index 7a3116c..349b81a 100644
---- a/arch/x86/boot/Makefile
-+++ b/arch/x86/boot/Makefile
-@@ -28,9 +28,11 @@ SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
- targets		:= vmlinux.bin setup.bin setup.elf zImage bzImage
- subdir- 	:= compressed
+-		if (last >= ramdisk_image && addr < ramdisk_end) {
+-			*addrp = PAGE_ALIGN(ramdisk_end);
+-			return 1;
+-		}
+-	} 
++/*
++ * Early reserved memory areas.
++ */
++#define MAX_EARLY_RES 20
++
++struct early_res {
++	unsigned long start, end;
++};
++static struct early_res early_res[MAX_EARLY_RES] __initdata = {
++	{ 0, PAGE_SIZE },			/* BIOS data page */
++#ifdef CONFIG_SMP
++	{ SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE },
+ #endif
+-	/* kernel code */
+-	if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) {
+-		*addrp = PAGE_ALIGN(__pa_symbol(&_end));
+-		return 1;
++	{}
++};
++
++void __init reserve_early(unsigned long start, unsigned long end)
++{
++	int i;
++	struct early_res *r;
++	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
++		r = &early_res[i];
++		if (end > r->start && start < r->end)
++			panic("Overlapping early reservations %lx-%lx to %lx-%lx\n",
++			      start, end, r->start, r->end);
+ 	}
++	if (i >= MAX_EARLY_RES)
++		panic("Too many early reservations");
++	r = &early_res[i];
++	r->start = start;
++	r->end = end;
++}
  
--setup-y		+= a20.o apm.o cmdline.o copy.o cpu.o cpucheck.o edd.o
-+setup-y		+= a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
- setup-y		+= header.o main.o mca.o memory.o pm.o pmjump.o
--setup-y		+= printf.o string.o tty.o video.o version.o voyager.o
-+setup-y		+= printf.o string.o tty.o video.o version.o
-+setup-$(CONFIG_X86_APM_BOOT) += apm.o
-+setup-$(CONFIG_X86_VOYAGER) += voyager.o
+-	if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
+-		*addrp = PAGE_ALIGN(ebda_addr + ebda_size);
+-		return 1;
++void __init early_res_to_bootmem(void)
++{
++	int i;
++	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
++		struct early_res *r = &early_res[i];
++		reserve_bootmem_generic(r->start, r->end - r->start);
+ 	}
++}
  
- # The link order of the video-*.o modules can matter.  In particular,
- # video-vga.o *must* be listed first, followed by video-vesa.o.
-@@ -49,10 +51,7 @@ HOSTCFLAGS_build.o := $(LINUXINCLUDE)
+-#ifdef CONFIG_NUMA
+-	/* NUMA memory to node map */
+-	if (last >= nodemap_addr && addr < nodemap_addr + nodemap_size) {
+-		*addrp = nodemap_addr + nodemap_size;
+-		return 1;
++/* Check for already reserved areas */
++static inline int bad_addr(unsigned long *addrp, unsigned long size)
++{
++	int i;
++	unsigned long addr = *addrp, last;
++	int changed = 0;
++again:
++	last = addr + size;
++	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
++		struct early_res *r = &early_res[i];
++		if (last >= r->start && addr < r->end) {
++			*addrp = addr = r->end;
++			changed = 1;
++			goto again;
++		}
+ 	}
+-#endif
+-	/* XXX ramdisk image here? */ 
+-	return 0;
+-} 
++	return changed;
++}
  
- # How to compile the 16-bit code.  Note we always compile for -march=i386,
- # that way we can complain to the user if the CPU is insufficient.
--cflags-$(CONFIG_X86_32) :=
--cflags-$(CONFIG_X86_64) := -m32
- KBUILD_CFLAGS	:= $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
--		   $(cflags-y) \
- 		   -Wall -Wstrict-prototypes \
- 		   -march=i386 -mregparm=3 \
- 		   -include $(srctree)/$(src)/code16gcc.h \
-@@ -62,6 +61,7 @@ KBUILD_CFLAGS	:= $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
- 			$(call cc-option, -fno-unit-at-a-time)) \
- 		   $(call cc-option, -fno-stack-protector) \
- 		   $(call cc-option, -mpreferred-stack-boundary=2)
-+KBUILD_CFLAGS +=   $(call cc-option,-m32)
- KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ /*
+  * This function checks if any part of the range <start,end> is mapped
+@@ -107,16 +114,18 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
+  */
+ int
+ e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
+-{ 
++{
+ 	int i;
+-	for (i = 0; i < e820.nr_map; i++) { 
+-		struct e820entry *ei = &e820.map[i]; 
+-		if (type && ei->type != type) 
++
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i];
++
++		if (type && ei->type != type)
+ 			continue;
+ 		if (ei->addr >= end || ei->addr + ei->size <= start)
+-			continue; 
+-		return 1; 
+-	} 
++			continue;
++		return 1;
++	}
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(e820_any_mapped);
+@@ -127,11 +136,14 @@ EXPORT_SYMBOL_GPL(e820_any_mapped);
+  * Note: this function only works correct if the e820 table is sorted and
+  * not-overlapping, which is the case
+  */
+-int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
++int __init e820_all_mapped(unsigned long start, unsigned long end,
++			   unsigned type)
+ {
+ 	int i;
++
+ 	for (i = 0; i < e820.nr_map; i++) {
+ 		struct e820entry *ei = &e820.map[i];
++
+ 		if (type && ei->type != type)
+ 			continue;
+ 		/* is the region (part) in overlap with the current region ?*/
+@@ -143,65 +155,73 @@ int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type
+ 		 */
+ 		if (ei->addr <= start)
+ 			start = ei->addr + ei->size;
+-		/* if start is now at or beyond end, we're done, full coverage */
++		/*
++		 * if start is now at or beyond end, we're done, full
++		 * coverage
++		 */
+ 		if (start >= end)
+-			return 1; /* we're done */
++			return 1;
+ 	}
+ 	return 0;
+ }
  
- $(obj)/zImage:  IMAGE_OFFSET := 0x1000
-diff --git a/arch/x86/boot/apm.c b/arch/x86/boot/apm.c
-index eab50c5..c117c7f 100644
---- a/arch/x86/boot/apm.c
-+++ b/arch/x86/boot/apm.c
-@@ -19,8 +19,6 @@
+-/* 
+- * Find a free area in a specific range. 
+- */ 
+-unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size) 
+-{ 
+-	int i; 
+-	for (i = 0; i < e820.nr_map; i++) { 
+-		struct e820entry *ei = &e820.map[i]; 
+-		unsigned long addr = ei->addr, last; 
+-		if (ei->type != E820_RAM) 
+-			continue; 
+-		if (addr < start) 
++/*
++ * Find a free area in a specific range.
++ */
++unsigned long __init find_e820_area(unsigned long start, unsigned long end,
++				    unsigned size)
++{
++	int i;
++
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i];
++		unsigned long addr = ei->addr, last;
++
++		if (ei->type != E820_RAM)
++			continue;
++		if (addr < start)
+ 			addr = start;
+-		if (addr > ei->addr + ei->size) 
+-			continue; 
++		if (addr > ei->addr + ei->size)
++			continue;
+ 		while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
+ 			;
+ 		last = PAGE_ALIGN(addr) + size;
+ 		if (last > ei->addr + ei->size)
+ 			continue;
+-		if (last > end) 
++		if (last > end)
+ 			continue;
+-		return addr; 
+-	} 
+-	return -1UL;		
+-} 
++		return addr;
++	}
++	return -1UL;
++}
  
- #include "boot.h"
+ /*
+  * Find the highest page frame number we have available
+  */
+ unsigned long __init e820_end_of_ram(void)
+ {
+-	unsigned long end_pfn = 0;
++	unsigned long end_pfn;
++
+ 	end_pfn = find_max_pfn_with_active_regions();
+-	
+-	if (end_pfn > end_pfn_map) 
++
++	if (end_pfn > end_pfn_map)
+ 		end_pfn_map = end_pfn;
+ 	if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
+ 		end_pfn_map = MAXMEM>>PAGE_SHIFT;
+ 	if (end_pfn > end_user_pfn)
+ 		end_pfn = end_user_pfn;
+-	if (end_pfn > end_pfn_map) 
+-		end_pfn = end_pfn_map; 
++	if (end_pfn > end_pfn_map)
++		end_pfn = end_pfn_map;
  
--#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
--
- int query_apm_bios(void)
+-	printk("end_pfn_map = %lu\n", end_pfn_map);
+-	return end_pfn;	
++	printk(KERN_INFO "end_pfn_map = %lu\n", end_pfn_map);
++	return end_pfn;
+ }
+ 
+ /*
+  * Mark e820 reserved areas as busy for the resource manager.
+  */
+-void __init e820_reserve_resources(void)
++void __init e820_reserve_resources(struct resource *code_resource,
++		struct resource *data_resource, struct resource *bss_resource)
  {
- 	u16 ax, bx, cx, dx, di;
-@@ -95,4 +93,3 @@ int query_apm_bios(void)
- 	return 0;
+ 	int i;
+ 	for (i = 0; i < e820.nr_map; i++) {
+@@ -219,13 +239,13 @@ void __init e820_reserve_resources(void)
+ 		request_resource(&iomem_resource, res);
+ 		if (e820.map[i].type == E820_RAM) {
+ 			/*
+-			 *  We don't know which RAM region contains kernel data,
+-			 *  so we try it repeatedly and let the resource manager
+-			 *  test it.
++			 * We don't know which RAM region contains kernel data,
++			 * so we try it repeatedly and let the resource manager
++			 * test it.
+ 			 */
+-			request_resource(res, &code_resource);
+-			request_resource(res, &data_resource);
+-			request_resource(res, &bss_resource);
++			request_resource(res, code_resource);
++			request_resource(res, data_resource);
++			request_resource(res, bss_resource);
+ #ifdef CONFIG_KEXEC
+ 			if (crashk_res.start != crashk_res.end)
+ 				request_resource(res, &crashk_res);
+@@ -322,9 +342,9 @@ e820_register_active_regions(int nid, unsigned long start_pfn,
+ 			add_active_range(nid, ei_startpfn, ei_endpfn);
  }
  
--#endif
-diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
-index d2b5adf..7822a49 100644
---- a/arch/x86/boot/boot.h
-+++ b/arch/x86/boot/boot.h
-@@ -109,7 +109,7 @@ typedef unsigned int addr_t;
- static inline u8 rdfs8(addr_t addr)
+-/* 
++/*
+  * Add a memory region to the kernel e820 map.
+- */ 
++ */
+ void __init add_memory_region(unsigned long start, unsigned long size, int type)
  {
- 	u8 v;
--	asm volatile("movb %%fs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr));
-+	asm volatile("movb %%fs:%1,%0" : "=q" (v) : "m" (*(u8 *)addr));
- 	return v;
+ 	int x = e820.nr_map;
+@@ -349,9 +369,7 @@ unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
+ {
+ 	unsigned long start_pfn = start >> PAGE_SHIFT;
+ 	unsigned long end_pfn = end >> PAGE_SHIFT;
+-	unsigned long ei_startpfn;
+-	unsigned long ei_endpfn;
+-	unsigned long ram = 0;
++	unsigned long ei_startpfn, ei_endpfn, ram = 0;
+ 	int i;
+ 
+ 	for (i = 0; i < e820.nr_map; i++) {
+@@ -363,28 +381,31 @@ unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
+ 	return end - start - (ram << PAGE_SHIFT);
  }
- static inline u16 rdfs16(addr_t addr)
-@@ -127,21 +127,21 @@ static inline u32 rdfs32(addr_t addr)
  
- static inline void wrfs8(u8 v, addr_t addr)
+-void __init e820_print_map(char *who)
++static void __init e820_print_map(char *who)
  {
--	asm volatile("movb %1,%%fs:%0" : "+m" (*(u8 *)addr) : "r" (v));
-+	asm volatile("movb %1,%%fs:%0" : "+m" (*(u8 *)addr) : "qi" (v));
+ 	int i;
+ 
+ 	for (i = 0; i < e820.nr_map; i++) {
+ 		printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
+-			(unsigned long long) e820.map[i].addr,
+-			(unsigned long long) (e820.map[i].addr + e820.map[i].size));
++		       (unsigned long long) e820.map[i].addr,
++		       (unsigned long long)
++		       (e820.map[i].addr + e820.map[i].size));
+ 		switch (e820.map[i].type) {
+-		case E820_RAM:	printk("(usable)\n");
+-				break;
++		case E820_RAM:
++			printk(KERN_CONT "(usable)\n");
++			break;
+ 		case E820_RESERVED:
+-				printk("(reserved)\n");
+-				break;
++			printk(KERN_CONT "(reserved)\n");
++			break;
+ 		case E820_ACPI:
+-				printk("(ACPI data)\n");
+-				break;
++			printk(KERN_CONT "(ACPI data)\n");
++			break;
+ 		case E820_NVS:
+-				printk("(ACPI NVS)\n");
+-				break;
+-		default:	printk("type %u\n", e820.map[i].type);
+-				break;
++			printk(KERN_CONT "(ACPI NVS)\n");
++			break;
++		default:
++			printk(KERN_CONT "type %u\n", e820.map[i].type);
++			break;
+ 		}
+ 	}
  }
- static inline void wrfs16(u16 v, addr_t addr)
+@@ -392,11 +413,11 @@ void __init e820_print_map(char *who)
+ /*
+  * Sanitize the BIOS e820 map.
+  *
+- * Some e820 responses include overlapping entries.  The following 
++ * Some e820 responses include overlapping entries. The following
+  * replaces the original e820 map with a new one, removing overlaps.
+  *
+  */
+-static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++static int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map)
  {
--	asm volatile("movw %1,%%fs:%0" : "+m" (*(u16 *)addr) : "r" (v));
-+	asm volatile("movw %1,%%fs:%0" : "+m" (*(u16 *)addr) : "ri" (v));
+ 	struct change_member {
+ 		struct e820entry *pbios; /* pointer to original bios entry */
+@@ -416,7 +437,8 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+ 	int i;
+ 
+ 	/*
+-		Visually we're performing the following (1,2,3,4 = memory types)...
++		Visually we're performing the following
++		(1,2,3,4 = memory types)...
+ 
+ 		Sample memory map (w/overlaps):
+ 		   ____22__________________
+@@ -458,22 +480,23 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+ 	old_nr = *pnr_map;
+ 
+ 	/* bail out if we find any unreasonable addresses in bios map */
+-	for (i=0; i<old_nr; i++)
++	for (i = 0; i < old_nr; i++)
+ 		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+ 			return -1;
+ 
+ 	/* create pointers for initial change-point information (for sorting) */
+-	for (i=0; i < 2*old_nr; i++)
++	for (i = 0; i < 2 * old_nr; i++)
+ 		change_point[i] = &change_point_list[i];
+ 
+ 	/* record all known change-points (starting and ending addresses),
+ 	   omitting those that are for empty memory regions */
+ 	chgidx = 0;
+-	for (i=0; i < old_nr; i++)	{
++	for (i = 0; i < old_nr; i++)	{
+ 		if (biosmap[i].size != 0) {
+ 			change_point[chgidx]->addr = biosmap[i].addr;
+ 			change_point[chgidx++]->pbios = &biosmap[i];
+-			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++			change_point[chgidx]->addr = biosmap[i].addr +
++				biosmap[i].size;
+ 			change_point[chgidx++]->pbios = &biosmap[i];
+ 		}
+ 	}
+@@ -483,75 +506,106 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+ 	still_changing = 1;
+ 	while (still_changing)	{
+ 		still_changing = 0;
+-		for (i=1; i < chg_nr; i++)  {
+-			/* if <current_addr> > <last_addr>, swap */
+-			/* or, if current=<start_addr> & last=<end_addr>, swap */
+-			if ((change_point[i]->addr < change_point[i-1]->addr) ||
+-				((change_point[i]->addr == change_point[i-1]->addr) &&
+-				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
+-				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+-			   )
+-			{
++		for (i = 1; i < chg_nr; i++)  {
++			unsigned long long curaddr, lastaddr;
++			unsigned long long curpbaddr, lastpbaddr;
++
++			curaddr = change_point[i]->addr;
++			lastaddr = change_point[i - 1]->addr;
++			curpbaddr = change_point[i]->pbios->addr;
++			lastpbaddr = change_point[i - 1]->pbios->addr;
++
++			/*
++			 * swap entries, when:
++			 *
++			 * curaddr > lastaddr or
++			 * curaddr == lastaddr and curaddr == curpbaddr and
++			 * lastaddr != lastpbaddr
++			 */
++			if (curaddr < lastaddr ||
++			    (curaddr == lastaddr && curaddr == curpbaddr &&
++			     lastaddr != lastpbaddr)) {
+ 				change_tmp = change_point[i];
+ 				change_point[i] = change_point[i-1];
+ 				change_point[i-1] = change_tmp;
+-				still_changing=1;
++				still_changing = 1;
+ 			}
+ 		}
+ 	}
+ 
+ 	/* create a new bios memory map, removing overlaps */
+-	overlap_entries=0;	 /* number of entries in the overlap table */
+-	new_bios_entry=0;	 /* index for creating new bios map entries */
++	overlap_entries = 0;	 /* number of entries in the overlap table */
++	new_bios_entry = 0;	 /* index for creating new bios map entries */
+ 	last_type = 0;		 /* start with undefined memory type */
+ 	last_addr = 0;		 /* start with 0 as last starting address */
++
+ 	/* loop through change-points, determining affect on the new bios map */
+-	for (chgidx=0; chgidx < chg_nr; chgidx++)
+-	{
++	for (chgidx = 0; chgidx < chg_nr; chgidx++) {
+ 		/* keep track of all overlapping bios entries */
+-		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+-		{
+-			/* add map entry to overlap list (> 1 entry implies an overlap) */
+-			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+-		}
+-		else
+-		{
+-			/* remove entry from list (order independent, so swap with last) */
+-			for (i=0; i<overlap_entries; i++)
+-			{
+-				if (overlap_list[i] == change_point[chgidx]->pbios)
+-					overlap_list[i] = overlap_list[overlap_entries-1];
++		if (change_point[chgidx]->addr ==
++		    change_point[chgidx]->pbios->addr) {
++			/*
++			 * add map entry to overlap list (> 1 entry
++			 * implies an overlap)
++			 */
++			overlap_list[overlap_entries++] =
++				change_point[chgidx]->pbios;
++		} else {
++			/*
++			 * remove entry from list (order independent,
++			 * so swap with last)
++			 */
++			for (i = 0; i < overlap_entries; i++) {
++				if (overlap_list[i] ==
++				    change_point[chgidx]->pbios)
++					overlap_list[i] =
++						overlap_list[overlap_entries-1];
+ 			}
+ 			overlap_entries--;
+ 		}
+-		/* if there are overlapping entries, decide which "type" to use */
+-		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++		/*
++		 * if there are overlapping entries, decide which
++		 * "type" to use (larger value takes precedence --
++		 * 1=usable, 2,3,4,4+=unusable)
++		 */
+ 		current_type = 0;
+-		for (i=0; i<overlap_entries; i++)
++		for (i = 0; i < overlap_entries; i++)
+ 			if (overlap_list[i]->type > current_type)
+ 				current_type = overlap_list[i]->type;
+-		/* continue building up new bios map based on this information */
++		/*
++		 * continue building up new bios map based on this
++		 * information
++		 */
+ 		if (current_type != last_type)	{
+ 			if (last_type != 0)	 {
+ 				new_bios[new_bios_entry].size =
+ 					change_point[chgidx]->addr - last_addr;
+-				/* move forward only if the new size was non-zero */
++				/*
++				 * move forward only if the new size
++				 * was non-zero
++				 */
+ 				if (new_bios[new_bios_entry].size != 0)
++					/*
++					 * no more space left for new
++					 * bios entries ?
++					 */
+ 					if (++new_bios_entry >= E820MAX)
+-						break; 	/* no more space left for new bios entries */
++						break;
+ 			}
+ 			if (current_type != 0)	{
+-				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++				new_bios[new_bios_entry].addr =
++					change_point[chgidx]->addr;
+ 				new_bios[new_bios_entry].type = current_type;
+-				last_addr=change_point[chgidx]->addr;
++				last_addr = change_point[chgidx]->addr;
+ 			}
+ 			last_type = current_type;
+ 		}
+ 	}
+-	new_nr = new_bios_entry;   /* retain count for new bios entries */
++	/* retain count for new bios entries */
++	new_nr = new_bios_entry;
+ 
+ 	/* copy new bios mapping into original location */
+-	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++	memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry));
+ 	*pnr_map = new_nr;
+ 
+ 	return 0;
+@@ -566,7 +620,7 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+  * will have given us a memory map that we can use to properly
+  * set up memory.  If we aren't, we'll fake a memory map.
+  */
+-static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++static int __init copy_e820_map(struct e820entry *biosmap, int nr_map)
+ {
+ 	/* Only one memory region (or negative)? Ignore it */
+ 	if (nr_map < 2)
+@@ -583,18 +637,20 @@ static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+ 			return -1;
+ 
+ 		add_memory_region(start, size, type);
+-	} while (biosmap++,--nr_map);
++	} while (biosmap++, --nr_map);
+ 	return 0;
  }
- static inline void wrfs32(u32 v, addr_t addr)
+ 
+-void early_panic(char *msg)
++static void early_panic(char *msg)
  {
--	asm volatile("movl %1,%%fs:%0" : "+m" (*(u32 *)addr) : "r" (v));
-+	asm volatile("movl %1,%%fs:%0" : "+m" (*(u32 *)addr) : "ri" (v));
+ 	early_printk(msg);
+ 	panic(msg);
+ }
+ 
+-void __init setup_memory_region(void)
++/* We're not void only for x86 32-bit compat */
++char * __init machine_specific_memory_setup(void)
+ {
++	char *who = "BIOS-e820";
+ 	/*
+ 	 * Try to copy the BIOS-supplied E820-map.
+ 	 *
+@@ -605,7 +661,10 @@ void __init setup_memory_region(void)
+ 	if (copy_e820_map(boot_params.e820_map, boot_params.e820_entries) < 0)
+ 		early_panic("Cannot find a valid memory map");
+ 	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+-	e820_print_map("BIOS-e820");
++	e820_print_map(who);
++
++	/* In case someone cares... */
++	return who;
+ }
+ 
+ static int __init parse_memopt(char *p)
+@@ -613,9 +672,9 @@ static int __init parse_memopt(char *p)
+ 	if (!p)
+ 		return -EINVAL;
+ 	end_user_pfn = memparse(p, &p);
+-	end_user_pfn >>= PAGE_SHIFT;	
++	end_user_pfn >>= PAGE_SHIFT;
+ 	return 0;
+-} 
++}
+ early_param("mem", parse_memopt);
+ 
+ static int userdef __initdata;
+@@ -627,9 +686,9 @@ static int __init parse_memmap_opt(char *p)
+ 
+ 	if (!strcmp(p, "exactmap")) {
+ #ifdef CONFIG_CRASH_DUMP
+-		/* If we are doing a crash dump, we
+-		 * still need to know the real mem
+-		 * size before original memory map is
++		/*
++		 * If we are doing a crash dump, we still need to know
++		 * the real mem size before original memory map is
+ 		 * reset.
+ 		 */
+ 		e820_register_active_regions(0, 0, -1UL);
+@@ -646,6 +705,8 @@ static int __init parse_memmap_opt(char *p)
+ 	mem_size = memparse(p, &p);
+ 	if (p == oldp)
+ 		return -EINVAL;
++
++	userdef = 1;
+ 	if (*p == '@') {
+ 		start_at = memparse(p+1, &p);
+ 		add_memory_region(start_at, mem_size, E820_RAM);
+@@ -665,11 +726,29 @@ early_param("memmap", parse_memmap_opt);
+ void __init finish_e820_parsing(void)
+ {
+ 	if (userdef) {
++		char nr = e820.nr_map;
++
++		if (sanitize_e820_map(e820.map, &nr) < 0)
++			early_panic("Invalid user supplied memory map");
++		e820.nr_map = nr;
++
+ 		printk(KERN_INFO "user-defined physical RAM map:\n");
+ 		e820_print_map("user");
+ 	}
  }
  
- static inline u8 rdgs8(addr_t addr)
- {
- 	u8 v;
--	asm volatile("movb %%gs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr));
-+	asm volatile("movb %%gs:%1,%0" : "=q" (v) : "m" (*(u8 *)addr));
- 	return v;
++void __init update_e820(void)
++{
++	u8 nr_map;
++
++	nr_map = e820.nr_map;
++	if (sanitize_e820_map(e820.map, &nr_map))
++		return;
++	e820.nr_map = nr_map;
++	printk(KERN_INFO "modified physical RAM map:\n");
++	e820_print_map("modified");
++}
++
+ unsigned long pci_mem_start = 0xaeedbabe;
+ EXPORT_SYMBOL(pci_mem_start);
+ 
+@@ -713,8 +792,10 @@ __init void e820_setup_gap(void)
+ 
+ 	if (!found) {
+ 		gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
+-		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
+-		       KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
++		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit "
++		       "address range\n"
++		       KERN_ERR "PCI: Unassigned devices with 32bit resource "
++		       "registers may break!\n");
+ 	}
+ 
+ 	/*
+@@ -727,8 +808,9 @@ __init void e820_setup_gap(void)
+ 	/* Fun with two's complement */
+ 	pci_mem_start = (gapstart + round) & -round;
+ 
+-	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
+-		pci_mem_start, gapstart, gapsize);
++	printk(KERN_INFO
++	       "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
++	       pci_mem_start, gapstart, gapsize);
  }
- static inline u16 rdgs16(addr_t addr)
-@@ -159,15 +159,15 @@ static inline u32 rdgs32(addr_t addr)
  
- static inline void wrgs8(u8 v, addr_t addr)
+ int __init arch_get_ram_range(int slot, u64 *addr, u64 *size)
+diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
+index 88bb83e..9f51e1e 100644
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -21,7 +21,33 @@
+ #include <asm/gart.h>
+ #endif
+ 
+-static void __init via_bugs(void)
++static void __init fix_hypertransport_config(int num, int slot, int func)
++{
++	u32 htcfg;
++	/*
++	 * we found a hypertransport bus
++	 * make sure that we are broadcasting
++	 * interrupts to all cpus on the ht bus
++	 * if we're using extended apic ids
++	 */
++	htcfg = read_pci_config(num, slot, func, 0x68);
++	if (htcfg & (1 << 18)) {
++		printk(KERN_INFO "Detected use of extended apic ids "
++				 "on hypertransport bus\n");
++		if ((htcfg & (1 << 17)) == 0) {
++			printk(KERN_INFO "Enabling hypertransport extended "
++					 "apic interrupt broadcast\n");
++			printk(KERN_INFO "Note this is a bios bug, "
++					 "please contact your hw vendor\n");
++			htcfg |= (1 << 17);
++			write_pci_config(num, slot, func, 0x68, htcfg);
++		}
++	}
++
++
++}
++
++static void __init via_bugs(int  num, int slot, int func)
  {
--	asm volatile("movb %1,%%gs:%0" : "+m" (*(u8 *)addr) : "r" (v));
-+	asm volatile("movb %1,%%gs:%0" : "+m" (*(u8 *)addr) : "qi" (v));
- }
- static inline void wrgs16(u16 v, addr_t addr)
+ #ifdef CONFIG_GART_IOMMU
+ 	if ((end_pfn > MAX_DMA32_PFN ||  force_iommu) &&
+@@ -44,7 +70,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header)
+ #endif /* CONFIG_X86_IO_APIC */
+ #endif /* CONFIG_ACPI */
+ 
+-static void __init nvidia_bugs(void)
++static void __init nvidia_bugs(int num, int slot, int func)
  {
--	asm volatile("movw %1,%%gs:%0" : "+m" (*(u16 *)addr) : "r" (v));
-+	asm volatile("movw %1,%%gs:%0" : "+m" (*(u16 *)addr) : "ri" (v));
+ #ifdef CONFIG_ACPI
+ #ifdef CONFIG_X86_IO_APIC
+@@ -72,7 +98,7 @@ static void __init nvidia_bugs(void)
+ 
  }
- static inline void wrgs32(u32 v, addr_t addr)
+ 
+-static void __init ati_bugs(void)
++static void __init ati_bugs(int num, int slot, int func)
  {
--	asm volatile("movl %1,%%gs:%0" : "+m" (*(u32 *)addr) : "r" (v));
-+	asm volatile("movl %1,%%gs:%0" : "+m" (*(u32 *)addr) : "ri" (v));
+ #ifdef CONFIG_X86_IO_APIC
+ 	if (timer_over_8254 == 1) {
+@@ -83,18 +109,67 @@ static void __init ati_bugs(void)
+ #endif
  }
  
- /* Note: these only return true/false, not a signed return value! */
-@@ -241,6 +241,7 @@ int query_apm_bios(void);
- 
- /* cmdline.c */
- int cmdline_find_option(const char *option, char *buffer, int bufsize);
-+int cmdline_find_option_bool(const char *option);
++#define QFLAG_APPLY_ONCE 	0x1
++#define QFLAG_APPLIED		0x2
++#define QFLAG_DONE		(QFLAG_APPLY_ONCE|QFLAG_APPLIED)
+ struct chipset {
+-	u16 vendor;
+-	void (*f)(void);
++	u32 vendor;
++	u32 device;
++	u32 class;
++	u32 class_mask;
++	u32 flags;
++	void (*f)(int num, int slot, int func);
+ };
  
- /* cpu.c, cpucheck.c */
- int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
-diff --git a/arch/x86/boot/cmdline.c b/arch/x86/boot/cmdline.c
-index 34bb778..680408a 100644
---- a/arch/x86/boot/cmdline.c
-+++ b/arch/x86/boot/cmdline.c
-@@ -95,3 +95,68 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize)
+ static struct chipset early_qrk[] __initdata = {
+-	{ PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
+-	{ PCI_VENDOR_ID_VIA, via_bugs },
+-	{ PCI_VENDOR_ID_ATI, ati_bugs },
++	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
++	  PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
++	{ PCI_VENDOR_ID_VIA, PCI_ANY_ID,
++	  PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs },
++	{ PCI_VENDOR_ID_ATI, PCI_ANY_ID,
++	  PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, ati_bugs },
++	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
++	  PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config },
+ 	{}
+ };
  
- 	return len;
- }
-+
-+/*
-+ * Find a boolean option (like quiet,noapic,nosmp....)
-+ *
-+ * Returns the position of that option (starts counting with 1)
-+ * or 0 on not found
-+ */
-+int cmdline_find_option_bool(const char *option)
++static void __init check_dev_quirk(int num, int slot, int func)
 +{
-+	u32 cmdline_ptr = boot_params.hdr.cmd_line_ptr;
-+	addr_t cptr;
-+	char c;
-+	int pos = 0, wstart = 0;
-+	const char *opptr = NULL;
-+	enum {
-+		st_wordstart,	/* Start of word/after whitespace */
-+		st_wordcmp,	/* Comparing this word */
-+		st_wordskip,	/* Miscompare, skip */
-+	} state = st_wordstart;
-+
-+	if (!cmdline_ptr || cmdline_ptr >= 0x100000)
-+		return -1;	/* No command line, or inaccessible */
-+
-+	cptr = cmdline_ptr & 0xf;
-+	set_fs(cmdline_ptr >> 4);
++	u16 class;
++	u16 vendor;
++	u16 device;
++	u8 type;
++	int i;
 +
-+	while (cptr < 0x10000) {
-+		c = rdfs8(cptr++);
-+		pos++;
++	class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
 +
-+		switch (state) {
-+		case st_wordstart:
-+			if (!c)
-+				return 0;
-+			else if (myisspace(c))
-+				break;
++	if (class == 0xffff)
++		return;
 +
-+			state = st_wordcmp;
-+			opptr = option;
-+			wstart = pos;
-+			/* fall through */
++	vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID);
 +
-+		case st_wordcmp:
-+			if (!*opptr)
-+				if (!c || myisspace(c))
-+					return wstart;
-+				else
-+					state = st_wordskip;
-+			else if (!c)
-+				return 0;
-+			else if (c != *opptr++)
-+				state = st_wordskip;
-+			break;
++	device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
 +
-+		case st_wordskip:
-+			if (!c)
-+				return 0;
-+			else if (myisspace(c))
-+				state = st_wordstart;
-+			break;
-+		}
++	for (i = 0; early_qrk[i].f != NULL; i++) {
++		if (((early_qrk[i].vendor == PCI_ANY_ID) ||
++			(early_qrk[i].vendor == vendor)) &&
++			((early_qrk[i].device == PCI_ANY_ID) ||
++			(early_qrk[i].device == device)) &&
++			(!((early_qrk[i].class ^ class) &
++			    early_qrk[i].class_mask))) {
++				if ((early_qrk[i].flags &
++				     QFLAG_DONE) != QFLAG_DONE)
++					early_qrk[i].f(num, slot, func);
++				early_qrk[i].flags |= QFLAG_APPLIED;
++			}
 +	}
 +
-+	return 0;	/* Buffer overrun */
++	type = read_pci_config_byte(num, slot, func,
++				    PCI_HEADER_TYPE);
++	if (!(type & 0x80))
++		return;
 +}
-diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
-index 52c1db8..fe24cea 100644
---- a/arch/x86/boot/compressed/Makefile
-+++ b/arch/x86/boot/compressed/Makefile
-@@ -1,5 +1,63 @@
-+#
-+# linux/arch/x86/boot/compressed/Makefile
-+#
-+# create a compressed vmlinux image from the original vmlinux
-+#
-+
-+targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o
-+
-+KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
-+KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
-+cflags-$(CONFIG_X86_64) := -mcmodel=small
-+KBUILD_CFLAGS += $(cflags-y)
-+KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
-+KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
-+
-+KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
-+
-+LDFLAGS := -m elf_$(UTS_MACHINE)
-+LDFLAGS_vmlinux := -T
-+
-+$(obj)/vmlinux: $(src)/vmlinux_$(BITS).lds $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/piggy.o FORCE
-+	$(call if_changed,ld)
-+	@:
-+
-+$(obj)/vmlinux.bin: vmlinux FORCE
-+	$(call if_changed,objcopy)
-+
-+
- ifeq ($(CONFIG_X86_32),y)
--include ${srctree}/arch/x86/boot/compressed/Makefile_32
-+targets += vmlinux.bin.all vmlinux.relocs
-+hostprogs-y := relocs
-+
-+quiet_cmd_relocs = RELOCS  $@
-+      cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
-+$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
-+	$(call if_changed,relocs)
-+
-+vmlinux.bin.all-y := $(obj)/vmlinux.bin
-+vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs
-+quiet_cmd_relocbin = BUILD   $@
-+      cmd_relocbin = cat $(filter-out FORCE,$^) > $@
-+$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE
-+	$(call if_changed,relocbin)
-+
-+ifdef CONFIG_RELOCATABLE
-+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
-+	$(call if_changed,gzip)
- else
--include ${srctree}/arch/x86/boot/compressed/Makefile_64
-+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
-+	$(call if_changed,gzip)
- endif
-+LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
-+
-+else
-+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
-+	$(call if_changed,gzip)
-+
-+LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
-+endif
-+
 +
-+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
-+	$(call if_changed,ld)
-diff --git a/arch/x86/boot/compressed/Makefile_32 b/arch/x86/boot/compressed/Makefile_32
-deleted file mode 100644
-index e43ff7c..0000000
---- a/arch/x86/boot/compressed/Makefile_32
-+++ /dev/null
-@@ -1,50 +0,0 @@
--#
--# linux/arch/x86/boot/compressed/Makefile
--#
--# create a compressed vmlinux image from the original vmlinux
--#
--
--targets		:= vmlinux vmlinux.bin vmlinux.bin.gz head_32.o misc_32.o piggy.o \
--			vmlinux.bin.all vmlinux.relocs
--EXTRA_AFLAGS	:= -traditional
--
--LDFLAGS_vmlinux := -T
--hostprogs-y	:= relocs
--
--KBUILD_CFLAGS  := -m32 -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
--	   -fno-strict-aliasing -fPIC \
--	   $(call cc-option,-ffreestanding) \
--	   $(call cc-option,-fno-stack-protector)
--LDFLAGS := -m elf_i386
--
--$(obj)/vmlinux: $(src)/vmlinux_32.lds $(obj)/head_32.o $(obj)/misc_32.o $(obj)/piggy.o FORCE
--	$(call if_changed,ld)
--	@:
--
--$(obj)/vmlinux.bin: vmlinux FORCE
--	$(call if_changed,objcopy)
--
--quiet_cmd_relocs = RELOCS  $@
--      cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
--$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
--	$(call if_changed,relocs)
--
--vmlinux.bin.all-y := $(obj)/vmlinux.bin
--vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs
--quiet_cmd_relocbin = BUILD   $@
--      cmd_relocbin = cat $(filter-out FORCE,$^) > $@
--$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE
--	$(call if_changed,relocbin)
--
--ifdef CONFIG_RELOCATABLE
--$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
--	$(call if_changed,gzip)
--else
--$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
--	$(call if_changed,gzip)
--endif
--
--LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
--
--$(obj)/piggy.o: $(src)/vmlinux_32.scr $(obj)/vmlinux.bin.gz FORCE
--	$(call if_changed,ld)
-diff --git a/arch/x86/boot/compressed/Makefile_64 b/arch/x86/boot/compressed/Makefile_64
-deleted file mode 100644
-index 7801e8d..0000000
---- a/arch/x86/boot/compressed/Makefile_64
-+++ /dev/null
-@@ -1,30 +0,0 @@
--#
--# linux/arch/x86/boot/compressed/Makefile
--#
--# create a compressed vmlinux image from the original vmlinux
--#
--
--targets		:= vmlinux vmlinux.bin vmlinux.bin.gz head_64.o misc_64.o piggy.o
--
--KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUXINCLUDE) -O2  \
--	  -fno-strict-aliasing -fPIC -mcmodel=small \
--	   $(call cc-option, -ffreestanding) \
--	   $(call cc-option, -fno-stack-protector)
--KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
--LDFLAGS := -m elf_x86_64
--
--LDFLAGS_vmlinux := -T
--$(obj)/vmlinux: $(src)/vmlinux_64.lds $(obj)/head_64.o $(obj)/misc_64.o $(obj)/piggy.o FORCE
--	$(call if_changed,ld)
--	@:
+ void __init early_quirks(void)
+ {
+ 	int num, slot, func;
+@@ -103,36 +178,8 @@ void __init early_quirks(void)
+ 		return;
+ 
+ 	/* Poor man's PCI discovery */
+-	for (num = 0; num < 32; num++) {
+-		for (slot = 0; slot < 32; slot++) {
+-			for (func = 0; func < 8; func++) {
+-				u32 class;
+-				u32 vendor;
+-				u8 type;
+-				int i;
+-				class = read_pci_config(num,slot,func,
+-							PCI_CLASS_REVISION);
+-				if (class == 0xffffffff)
+-					break;
 -
--$(obj)/vmlinux.bin: vmlinux FORCE
--	$(call if_changed,objcopy)
+-				if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
+-					continue;
 -
--$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
--	$(call if_changed,gzip)
+-				vendor = read_pci_config(num, slot, func,
+-							 PCI_VENDOR_ID);
+-				vendor &= 0xffff;
 -
--LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
+-				for (i = 0; early_qrk[i].f; i++)
+-					if (early_qrk[i].vendor == vendor) {
+-						early_qrk[i].f();
+-						return;
+-					}
 -
--$(obj)/piggy.o: $(obj)/vmlinux_64.scr $(obj)/vmlinux.bin.gz FORCE
--	$(call if_changed,ld)
-diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+-				type = read_pci_config_byte(num, slot, func,
+-							    PCI_HEADER_TYPE);
+-				if (!(type & 0x80))
+-					break;
+-			}
+-		}
+-	}
++	for (num = 0; num < 32; num++)
++		for (slot = 0; slot < 32; slot++)
++			for (func = 0; func < 8; func++)
++				check_dev_quirk(num, slot, func);
+ }
+diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
 new file mode 100644
-index 0000000..8182e32
+index 0000000..1411324
 --- /dev/null
-+++ b/arch/x86/boot/compressed/misc.c
-@@ -0,0 +1,413 @@
-+/*
-+ * misc.c
-+ *
-+ * This is a collection of several routines from gzip-1.0.3
-+ * adapted for Linux.
-+ *
-+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
-+ * puts by Nick Holloway 1993, better puts by Martin Mares 1995
-+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
-+ */
-+
-+/*
-+ * we have to be careful, because no indirections are allowed here, and
-+ * paravirt_ops is a kind of one. As it will only run in baremetal anyway,
-+ * we just keep it from happening
-+ */
-+#undef CONFIG_PARAVIRT
-+#ifdef CONFIG_X86_64
-+#define _LINUX_STRING_H_ 1
-+#define __LINUX_BITMAP_H 1
-+#endif
-+
-+#include <linux/linkage.h>
-+#include <linux/screen_info.h>
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/boot.h>
-+
-+/* WARNING!!
-+ * This code is compiled with -fPIC and it is relocated dynamically
-+ * at run time, but no relocation processing is performed.
-+ * This means that it is not safe to place pointers in static structures.
-+ */
-+
++++ b/arch/x86/kernel/efi.c
+@@ -0,0 +1,512 @@
 +/*
-+ * Getting to provable safe in place decompression is hard.
-+ * Worst case behaviours need to be analyzed.
-+ * Background information:
-+ *
-+ * The file layout is:
-+ *    magic[2]
-+ *    method[1]
-+ *    flags[1]
-+ *    timestamp[4]
-+ *    extraflags[1]
-+ *    os[1]
-+ *    compressed data blocks[N]
-+ *    crc[4] orig_len[4]
-+ *
-+ * resulting in 18 bytes of non compressed data overhead.
-+ *
-+ * Files divided into blocks
-+ * 1 bit (last block flag)
-+ * 2 bits (block type)
-+ *
-+ * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
-+ * The smallest block type encoding is always used.
-+ *
-+ * stored:
-+ *    32 bits length in bytes.
-+ *
-+ * fixed:
-+ *    magic fixed tree.
-+ *    symbols.
-+ *
-+ * dynamic:
-+ *    dynamic tree encoding.
-+ *    symbols.
-+ *
-+ *
-+ * The buffer for decompression in place is the length of the
-+ * uncompressed data, plus a small amount extra to keep the algorithm safe.
-+ * The compressed data is placed at the end of the buffer.  The output
-+ * pointer is placed at the start of the buffer and the input pointer
-+ * is placed where the compressed data starts.  Problems will occur
-+ * when the output pointer overruns the input pointer.
-+ *
-+ * The output pointer can only overrun the input pointer if the input
-+ * pointer is moving faster than the output pointer.  A condition only
-+ * triggered by data whose compressed form is larger than the uncompressed
-+ * form.
-+ *
-+ * The worst case at the block level is a growth of the compressed data
-+ * of 5 bytes per 32767 bytes.
++ * Common EFI (Extensible Firmware Interface) support functions
++ * Based on Extensible Firmware Interface Specification version 1.0
 + *
-+ * The worst case internal to a compressed block is very hard to figure.
-+ * The worst case can at least be boundined by having one bit that represents
-+ * 32764 bytes and then all of the rest of the bytes representing the very
-+ * very last byte.
++ * Copyright (C) 1999 VA Linux Systems
++ * Copyright (C) 1999 Walt Drummond <drummond at valinux.com>
++ * Copyright (C) 1999-2002 Hewlett-Packard Co.
++ *	David Mosberger-Tang <davidm at hpl.hp.com>
++ *	Stephane Eranian <eranian at hpl.hp.com>
++ * Copyright (C) 2005-2008 Intel Co.
++ *	Fenghua Yu <fenghua.yu at intel.com>
++ *	Bibo Mao <bibo.mao at intel.com>
++ *	Chandramouli Narayanan <mouli at linux.intel.com>
++ *	Huang Ying <ying.huang at intel.com>
 + *
-+ * All of which is enough to compute an amount of extra data that is required
-+ * to be safe.  To avoid problems at the block level allocating 5 extra bytes
-+ * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
-+ * adding an extra 32767 bytes (the worst case uncompressed block size) is
-+ * sufficient, to ensure that in the worst case the decompressed data for
-+ * block will stop the byte before the compressed data for a block begins.
-+ * To avoid problems with the compressed data's meta information an extra 18
-+ * bytes are needed.  Leading to the formula:
++ * Copied from efi_32.c to eliminate the duplicated code between EFI
++ * 32/64 support code. --ying 2007-10-26
 + *
-+ * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
++ * All EFI Runtime Services are not implemented yet as EFI only
++ * supports physical mode addressing on SoftSDV. This is to be fixed
++ * in a future version.  --drummond 1999-07-20
 + *
-+ * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
-+ * Adding 32768 instead of 32767 just makes for round numbers.
-+ * Adding the decompressor_size is necessary as it musht live after all
-+ * of the data as well.  Last I measured the decompressor is about 14K.
-+ * 10K of actual data and 4K of bss.
++ * Implemented EFI runtime services and virtual mode calls.  --davidm
 + *
++ * Goutham Rao: <goutham.rao at intel.com>
++ *	Skip non-WB memory and ignore empty memory ranges.
 + */
 +
-+/*
-+ * gzip declarations
-+ */
-+
-+#define OF(args)  args
-+#define STATIC static
-+
-+#undef memset
-+#undef memcpy
-+#define memzero(s, n)     memset ((s), 0, (n))
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/efi.h>
++#include <linux/bootmem.h>
++#include <linux/spinlock.h>
++#include <linux/uaccess.h>
++#include <linux/time.h>
++#include <linux/io.h>
++#include <linux/reboot.h>
++#include <linux/bcd.h>
 +
-+typedef unsigned char  uch;
-+typedef unsigned short ush;
-+typedef unsigned long  ulg;
++#include <asm/setup.h>
++#include <asm/efi.h>
++#include <asm/time.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
 +
-+#define WSIZE 0x80000000	/* Window size must be at least 32k,
-+				 * and a power of two
-+				 * We don't actually have a window just
-+				 * a huge output buffer so I report
-+				 * a 2G windows size, as that should
-+				 * always be larger than our output buffer.
-+				 */
++#define EFI_DEBUG	1
++#define PFX 		"EFI: "
 +
-+static uch *inbuf;	/* input buffer */
-+static uch *window;	/* Sliding window buffer, (and final output buffer) */
++int efi_enabled;
++EXPORT_SYMBOL(efi_enabled);
 +
-+static unsigned insize;  /* valid bytes in inbuf */
-+static unsigned inptr;   /* index of next byte to be processed in inbuf */
-+static unsigned outcnt;  /* bytes in output buffer */
++struct efi efi;
++EXPORT_SYMBOL(efi);
 +
-+/* gzip flag byte */
-+#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
-+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-+#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
-+#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
-+#define COMMENT      0x10 /* bit 4 set: file comment present */
-+#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
-+#define RESERVED     0xC0 /* bit 6,7:   reserved */
++struct efi_memory_map memmap;
 +
-+#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
-+		
-+/* Diagnostic functions */
-+#ifdef DEBUG
-+#  define Assert(cond,msg) {if(!(cond)) error(msg);}
-+#  define Trace(x) fprintf x
-+#  define Tracev(x) {if (verbose) fprintf x ;}
-+#  define Tracevv(x) {if (verbose>1) fprintf x ;}
-+#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-+#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-+#else
-+#  define Assert(cond,msg)
-+#  define Trace(x)
-+#  define Tracev(x)
-+#  define Tracevv(x)
-+#  define Tracec(c,x)
-+#  define Tracecv(c,x)
-+#endif
++struct efi efi_phys __initdata;
++static efi_system_table_t efi_systab __initdata;
 +
-+static int  fill_inbuf(void);
-+static void flush_window(void);
-+static void error(char *m);
-+static void gzip_mark(void **);
-+static void gzip_release(void **);
-+  
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+static unsigned char *real_mode; /* Pointer to real-mode data */
++static int __init setup_noefi(char *arg)
++{
++	efi_enabled = 0;
++	return 0;
++}
++early_param("noefi", setup_noefi);
 +
-+#define RM_EXT_MEM_K   (*(unsigned short *)(real_mode + 0x2))
-+#ifndef STANDARD_MEMORY_BIOS_CALL
-+#define RM_ALT_MEM_K   (*(unsigned long *)(real_mode + 0x1e0))
-+#endif
-+#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
++static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
++{
++	return efi_call_virt2(get_time, tm, tc);
++}
 +
-+extern unsigned char input_data[];
-+extern int input_len;
++static efi_status_t virt_efi_set_time(efi_time_t *tm)
++{
++	return efi_call_virt1(set_time, tm);
++}
 +
-+static long bytes_out = 0;
++static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
++					     efi_bool_t *pending,
++					     efi_time_t *tm)
++{
++	return efi_call_virt3(get_wakeup_time,
++			      enabled, pending, tm);
++}
 +
-+static void *malloc(int size);
-+static void free(void *where);
++static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
++{
++	return efi_call_virt2(set_wakeup_time,
++			      enabled, tm);
++}
 +
-+static void *memset(void *s, int c, unsigned n);
-+static void *memcpy(void *dest, const void *src, unsigned n);
++static efi_status_t virt_efi_get_variable(efi_char16_t *name,
++					  efi_guid_t *vendor,
++					  u32 *attr,
++					  unsigned long *data_size,
++					  void *data)
++{
++	return efi_call_virt5(get_variable,
++			      name, vendor, attr,
++			      data_size, data);
++}
 +
-+static void putstr(const char *);
++static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
++					       efi_char16_t *name,
++					       efi_guid_t *vendor)
++{
++	return efi_call_virt3(get_next_variable,
++			      name_size, name, vendor);
++}
 +
-+#ifdef CONFIG_X86_64
-+#define memptr long
-+#else
-+#define memptr unsigned
-+#endif
++static efi_status_t virt_efi_set_variable(efi_char16_t *name,
++					  efi_guid_t *vendor,
++					  unsigned long attr,
++					  unsigned long data_size,
++					  void *data)
++{
++	return efi_call_virt5(set_variable,
++			      name, vendor, attr,
++			      data_size, data);
++}
 +
-+static memptr free_mem_ptr;
-+static memptr free_mem_end_ptr;
++static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
++{
++	return efi_call_virt1(get_next_high_mono_count, count);
++}
 +
-+#ifdef CONFIG_X86_64
-+#define HEAP_SIZE             0x7000
-+#else
-+#define HEAP_SIZE             0x4000
-+#endif
++static void virt_efi_reset_system(int reset_type,
++				  efi_status_t status,
++				  unsigned long data_size,
++				  efi_char16_t *data)
++{
++	efi_call_virt4(reset_system, reset_type, status,
++		       data_size, data);
++}
 +
-+static char *vidmem = (char *)0xb8000;
-+static int vidport;
-+static int lines, cols;
++static efi_status_t virt_efi_set_virtual_address_map(
++	unsigned long memory_map_size,
++	unsigned long descriptor_size,
++	u32 descriptor_version,
++	efi_memory_desc_t *virtual_map)
++{
++	return efi_call_virt4(set_virtual_address_map,
++			      memory_map_size, descriptor_size,
++			      descriptor_version, virtual_map);
++}
 +
-+#ifdef CONFIG_X86_NUMAQ
-+void *xquad_portio;
-+#endif
++static efi_status_t __init phys_efi_set_virtual_address_map(
++	unsigned long memory_map_size,
++	unsigned long descriptor_size,
++	u32 descriptor_version,
++	efi_memory_desc_t *virtual_map)
++{
++	efi_status_t status;
 +
-+#include "../../../../lib/inflate.c"
++	efi_call_phys_prelog();
++	status = efi_call_phys4(efi_phys.set_virtual_address_map,
++				memory_map_size, descriptor_size,
++				descriptor_version, virtual_map);
++	efi_call_phys_epilog();
++	return status;
++}
 +
-+static void *malloc(int size)
++static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
++					     efi_time_cap_t *tc)
 +{
-+	void *p;
-+
-+	if (size <0) error("Malloc error");
-+	if (free_mem_ptr <= 0) error("Memory error");
++	efi_status_t status;
 +
-+	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
++	efi_call_phys_prelog();
++	status = efi_call_phys2(efi_phys.get_time, tm, tc);
++	efi_call_phys_epilog();
++	return status;
++}
 +
-+	p = (void *)free_mem_ptr;
-+	free_mem_ptr += size;
++int efi_set_rtc_mmss(unsigned long nowtime)
++{
++	int real_seconds, real_minutes;
++	efi_status_t 	status;
++	efi_time_t 	eft;
++	efi_time_cap_t 	cap;
 +
-+	if (free_mem_ptr >= free_mem_end_ptr)
-+		error("Out of memory");
++	status = efi.get_time(&eft, &cap);
++	if (status != EFI_SUCCESS) {
++		printk(KERN_ERR "Oops: efitime: can't read time!\n");
++		return -1;
++	}
 +
-+	return p;
-+}
++	real_seconds = nowtime % 60;
++	real_minutes = nowtime / 60;
++	if (((abs(real_minutes - eft.minute) + 15)/30) & 1)
++		real_minutes += 30;
++	real_minutes %= 60;
++	eft.minute = real_minutes;
++	eft.second = real_seconds;
 +
-+static void free(void *where)
-+{	/* Don't care */
++	status = efi.set_time(&eft);
++	if (status != EFI_SUCCESS) {
++		printk(KERN_ERR "Oops: efitime: can't write time!\n");
++		return -1;
++	}
++	return 0;
 +}
 +
-+static void gzip_mark(void **ptr)
++unsigned long efi_get_time(void)
 +{
-+	*ptr = (void *) free_mem_ptr;
-+}
++	efi_status_t status;
++	efi_time_t eft;
++	efi_time_cap_t cap;
 +
-+static void gzip_release(void **ptr)
-+{
-+	free_mem_ptr = (memptr) *ptr;
++	status = efi.get_time(&eft, &cap);
++	if (status != EFI_SUCCESS)
++		printk(KERN_ERR "Oops: efitime: can't read time!\n");
++
++	return mktime(eft.year, eft.month, eft.day, eft.hour,
++		      eft.minute, eft.second);
 +}
-+ 
-+static void scroll(void)
++
++#if EFI_DEBUG
++static void __init print_efi_memmap(void)
 +{
++	efi_memory_desc_t *md;
++	void *p;
 +	int i;
 +
-+	memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
-+	for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
-+		vidmem[i] = ' ';
++	for (p = memmap.map, i = 0;
++	     p < memmap.map_end;
++	     p += memmap.desc_size, i++) {
++		md = p;
++		printk(KERN_INFO PFX "mem%02u: type=%u, attr=0x%llx, "
++			"range=[0x%016llx-0x%016llx) (%lluMB)\n",
++			i, md->type, md->attribute, md->phys_addr,
++			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
++			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
++	}
 +}
++#endif  /*  EFI_DEBUG  */
 +
-+static void putstr(const char *s)
++void __init efi_init(void)
 +{
-+	int x,y,pos;
-+	char c;
++	efi_config_table_t *config_tables;
++	efi_runtime_services_t *runtime;
++	efi_char16_t *c16;
++	char vendor[100] = "unknown";
++	int i = 0;
++	void *tmp;
 +
 +#ifdef CONFIG_X86_32
-+	if (RM_SCREEN_INFO.orig_video_mode == 0 && lines == 0 && cols == 0)
-+		return;
++	efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
++	memmap.phys_map = (void *)boot_params.efi_info.efi_memmap;
++#else
++	efi_phys.systab = (efi_system_table_t *)
++		(boot_params.efi_info.efi_systab |
++		 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
++	memmap.phys_map = (void *)
++		(boot_params.efi_info.efi_memmap |
++		 ((__u64)boot_params.efi_info.efi_memmap_hi<<32));
 +#endif
++	memmap.nr_map = boot_params.efi_info.efi_memmap_size /
++		boot_params.efi_info.efi_memdesc_size;
++	memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
++	memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
 +
-+	x = RM_SCREEN_INFO.orig_x;
-+	y = RM_SCREEN_INFO.orig_y;
++	efi.systab = early_ioremap((unsigned long)efi_phys.systab,
++				   sizeof(efi_system_table_t));
++	if (efi.systab == NULL)
++		printk(KERN_ERR "Couldn't map the EFI system table!\n");
++	memcpy(&efi_systab, efi.systab, sizeof(efi_system_table_t));
++	early_iounmap(efi.systab, sizeof(efi_system_table_t));
++	efi.systab = &efi_systab;
 +
-+	while ( ( c = *s++ ) != '\0' ) {
-+		if ( c == '\n' ) {
-+			x = 0;
-+			if ( ++y >= lines ) {
-+				scroll();
-+				y--;
-+			}
-+		} else {
-+			vidmem [(x + cols * y) * 2] = c;
-+			if ( ++x >= cols ) {
-+				x = 0;
-+				if ( ++y >= lines ) {
-+					scroll();
-+					y--;
-+				}
-+			}
++	/*
++	 * Verify the EFI Table
++	 */
++	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
++		printk(KERN_ERR "EFI system table signature incorrect!\n");
++	if ((efi.systab->hdr.revision >> 16) == 0)
++		printk(KERN_ERR "Warning: EFI system table version "
++		       "%d.%02d, expected 1.00 or greater!\n",
++		       efi.systab->hdr.revision >> 16,
++		       efi.systab->hdr.revision & 0xffff);
++
++	/*
++	 * Show what we know for posterity
++	 */
++	c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
++	if (c16) {
++		for (i = 0; i < sizeof(vendor) && *c16; ++i)
++			vendor[i] = *c16++;
++		vendor[i] = '\0';
++	} else
++		printk(KERN_ERR PFX "Could not map the firmware vendor!\n");
++	early_iounmap(tmp, 2);
++
++	printk(KERN_INFO "EFI v%u.%.02u by %s \n",
++	       efi.systab->hdr.revision >> 16,
++	       efi.systab->hdr.revision & 0xffff, vendor);
++
++	/*
++	 * Let's see what config tables the firmware passed to us.
++	 */
++	config_tables = early_ioremap(
++		efi.systab->tables,
++		efi.systab->nr_tables * sizeof(efi_config_table_t));
++	if (config_tables == NULL)
++		printk(KERN_ERR "Could not map EFI Configuration Table!\n");
++
++	printk(KERN_INFO);
++	for (i = 0; i < efi.systab->nr_tables; i++) {
++		if (!efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID)) {
++			efi.mps = config_tables[i].table;
++			printk(" MPS=0x%lx ", config_tables[i].table);
++		} else if (!efi_guidcmp(config_tables[i].guid,
++					ACPI_20_TABLE_GUID)) {
++			efi.acpi20 = config_tables[i].table;
++			printk(" ACPI 2.0=0x%lx ", config_tables[i].table);
++		} else if (!efi_guidcmp(config_tables[i].guid,
++					ACPI_TABLE_GUID)) {
++			efi.acpi = config_tables[i].table;
++			printk(" ACPI=0x%lx ", config_tables[i].table);
++		} else if (!efi_guidcmp(config_tables[i].guid,
++					SMBIOS_TABLE_GUID)) {
++			efi.smbios = config_tables[i].table;
++			printk(" SMBIOS=0x%lx ", config_tables[i].table);
++		} else if (!efi_guidcmp(config_tables[i].guid,
++					HCDP_TABLE_GUID)) {
++			efi.hcdp = config_tables[i].table;
++			printk(" HCDP=0x%lx ", config_tables[i].table);
++		} else if (!efi_guidcmp(config_tables[i].guid,
++					UGA_IO_PROTOCOL_GUID)) {
++			efi.uga = config_tables[i].table;
++			printk(" UGA=0x%lx ", config_tables[i].table);
 +		}
 +	}
++	printk("\n");
++	early_iounmap(config_tables,
++			  efi.systab->nr_tables * sizeof(efi_config_table_t));
 +
-+	RM_SCREEN_INFO.orig_x = x;
-+	RM_SCREEN_INFO.orig_y = y;
++	/*
++	 * Check out the runtime services table. We need to map
++	 * the runtime services table so that we can grab the physical
++	 * address of several of the EFI runtime functions, needed to
++	 * set the firmware into virtual mode.
++	 */
++	runtime = early_ioremap((unsigned long)efi.systab->runtime,
++				sizeof(efi_runtime_services_t));
++	if (runtime != NULL) {
++		/*
++		 * We will only need *early* access to the following
++		 * two EFI runtime services before set_virtual_address_map
++		 * is invoked.
++		 */
++		efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
++		efi_phys.set_virtual_address_map =
++			(efi_set_virtual_address_map_t *)
++			runtime->set_virtual_address_map;
++		/*
++		 * Make efi_get_time can be called before entering
++		 * virtual mode.
++		 */
++		efi.get_time = phys_efi_get_time;
++	} else
++		printk(KERN_ERR "Could not map the EFI runtime service "
++		       "table!\n");
++	early_iounmap(runtime, sizeof(efi_runtime_services_t));
 +
-+	pos = (x + cols * y) * 2;	/* Update cursor position */
-+	outb(14, vidport);
-+	outb(0xff & (pos >> 9), vidport+1);
-+	outb(15, vidport);
-+	outb(0xff & (pos >> 1), vidport+1);
-+}
++	/* Map the EFI memory map */
++	memmap.map = early_ioremap((unsigned long)memmap.phys_map,
++				   memmap.nr_map * memmap.desc_size);
++	if (memmap.map == NULL)
++		printk(KERN_ERR "Could not map the EFI memory map!\n");
++	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
++	if (memmap.desc_size != sizeof(efi_memory_desc_t))
++		printk(KERN_WARNING "Kernel-defined memdesc"
++		       "doesn't match the one from EFI!\n");
 +
-+static void* memset(void* s, int c, unsigned n)
-+{
-+	int i;
-+	char *ss = s;
++	/* Setup for EFI runtime service */
++	reboot_type = BOOT_EFI;
 +
-+	for (i=0;i<n;i++) ss[i] = c;
-+	return s;
++#if EFI_DEBUG
++	print_efi_memmap();
++#endif
 +}
 +
-+static void* memcpy(void* dest, const void* src, unsigned n)
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
++static void __init runtime_code_page_mkexec(void)
 +{
-+	int i;
-+	const char *s = src;
-+	char *d = dest;
++	efi_memory_desc_t *md;
++	unsigned long end;
++	void *p;
 +
-+	for (i=0;i<n;i++) d[i] = s[i];
-+	return dest;
-+}
++	if (!(__supported_pte_mask & _PAGE_NX))
++		return;
 +
-+/* ===========================================================================
-+ * Fill the input buffer. This is called only when the buffer is empty
-+ * and at least one byte is really needed.
-+ */
-+static int fill_inbuf(void)
-+{
-+	error("ran out of input data");
-+	return 0;
++	/* Make EFI runtime service code area executable */
++	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++		md = p;
++		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
++		if (md->type == EFI_RUNTIME_SERVICES_CODE &&
++		    (end >> PAGE_SHIFT) <= max_pfn_mapped) {
++			set_memory_x(md->virt_addr, md->num_pages);
++			set_memory_uc(md->virt_addr, md->num_pages);
++		}
++	}
++	__flush_tlb_all();
 +}
++#else
++static inline void __init runtime_code_page_mkexec(void) { }
++#endif
 +
-+/* ===========================================================================
-+ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
-+ * (Used for the decompressed data only.)
++/*
++ * This function will switch the EFI runtime services to virtual mode.
++ * Essentially, look through the EFI memmap and map every region that
++ * has the runtime attribute bit set in its memory descriptor and update
++ * that memory descriptor with the virtual address obtained from ioremap().
++ * This enables the runtime services to be called without having to
++ * thunk back into physical mode for every invocation.
 + */
-+static void flush_window(void)
++void __init efi_enter_virtual_mode(void)
 +{
-+	/* With my window equal to my output buffer
-+	 * I only need to compute the crc here.
-+	 */
-+	ulg c = crc;         /* temporary variable */
-+	unsigned n;
-+	uch *in, ch;
++	efi_memory_desc_t *md;
++	efi_status_t status;
++	unsigned long end;
++	void *p;
 +
-+	in = window;
-+	for (n = 0; n < outcnt; n++) {
-+		ch = *in++;
-+		c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
++	efi.systab = NULL;
++	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++		md = p;
++		if (!(md->attribute & EFI_MEMORY_RUNTIME))
++			continue;
++		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
++		if ((md->attribute & EFI_MEMORY_WB) &&
++		    ((end >> PAGE_SHIFT) <= max_pfn_mapped))
++			md->virt_addr = (unsigned long)__va(md->phys_addr);
++		else
++			md->virt_addr = (unsigned long)
++				efi_ioremap(md->phys_addr,
++					    md->num_pages << EFI_PAGE_SHIFT);
++		if (!md->virt_addr)
++			printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
++			       (unsigned long long)md->phys_addr);
++		if ((md->phys_addr <= (unsigned long)efi_phys.systab) &&
++		    ((unsigned long)efi_phys.systab < end))
++			efi.systab = (efi_system_table_t *)(unsigned long)
++				(md->virt_addr - md->phys_addr +
++				 (unsigned long)efi_phys.systab);
 +	}
-+	crc = c;
-+	bytes_out += (ulg)outcnt;
-+	outcnt = 0;
-+}
 +
-+static void error(char *x)
-+{
-+	putstr("\n\n");
-+	putstr(x);
-+	putstr("\n\n -- System halted");
++	BUG_ON(!efi.systab);
 +
-+	while (1)
-+		asm("hlt");
++	status = phys_efi_set_virtual_address_map(
++		memmap.desc_size * memmap.nr_map,
++		memmap.desc_size,
++		memmap.desc_version,
++		memmap.phys_map);
++
++	if (status != EFI_SUCCESS) {
++		printk(KERN_ALERT "Unable to switch EFI into virtual mode "
++		       "(status=%lx)!\n", status);
++		panic("EFI call to SetVirtualAddressMap() failed!");
++	}
++
++	/*
++	 * Now that EFI is in virtual mode, update the function
++	 * pointers in the runtime service table to the new virtual addresses.
++	 *
++	 * Call EFI services through wrapper functions.
++	 */
++	efi.get_time = virt_efi_get_time;
++	efi.set_time = virt_efi_set_time;
++	efi.get_wakeup_time = virt_efi_get_wakeup_time;
++	efi.set_wakeup_time = virt_efi_set_wakeup_time;
++	efi.get_variable = virt_efi_get_variable;
++	efi.get_next_variable = virt_efi_get_next_variable;
++	efi.set_variable = virt_efi_set_variable;
++	efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
++	efi.reset_system = virt_efi_reset_system;
++	efi.set_virtual_address_map = virt_efi_set_virtual_address_map;
++	runtime_code_page_mkexec();
++	early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
++	memmap.map = NULL;
 +}
 +
-+asmlinkage void decompress_kernel(void *rmode, memptr heap,
-+				  uch *input_data, unsigned long input_len,
-+				  uch *output)
++/*
++ * Convenience functions to obtain memory types and attributes
++ */
++u32 efi_mem_type(unsigned long phys_addr)
 +{
-+	real_mode = rmode;
++	efi_memory_desc_t *md;
++	void *p;
 +
-+	if (RM_SCREEN_INFO.orig_video_mode == 7) {
-+		vidmem = (char *) 0xb0000;
-+		vidport = 0x3b4;
-+	} else {
-+		vidmem = (char *) 0xb8000;
-+		vidport = 0x3d4;
++	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++		md = p;
++		if ((md->phys_addr <= phys_addr) &&
++		    (phys_addr < (md->phys_addr +
++				  (md->num_pages << EFI_PAGE_SHIFT))))
++			return md->type;
 +	}
++	return 0;
++}
 +
-+	lines = RM_SCREEN_INFO.orig_video_lines;
-+	cols = RM_SCREEN_INFO.orig_video_cols;
-+
-+	window = output;		/* Output buffer (Normally at 1M) */
-+	free_mem_ptr     = heap;	/* Heap */
-+	free_mem_end_ptr = heap + HEAP_SIZE;
-+	inbuf  = input_data;		/* Input buffer */
-+	insize = input_len;
-+	inptr  = 0;
-+
-+#ifdef CONFIG_X86_64
-+	if ((ulg)output & (__KERNEL_ALIGN - 1))
-+		error("Destination address not 2M aligned");
-+	if ((ulg)output >= 0xffffffffffUL)
-+		error("Destination address too large");
-+#else
-+	if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
-+		error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
-+	if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
-+		error("Destination address too large");
-+#ifndef CONFIG_RELOCATABLE
-+	if ((u32)output != LOAD_PHYSICAL_ADDR)
-+		error("Wrong destination address");
-+#endif
-+#endif
++u64 efi_mem_attributes(unsigned long phys_addr)
++{
++	efi_memory_desc_t *md;
++	void *p;
 +
-+	makecrc();
-+	putstr("\nDecompressing Linux... ");
-+	gunzip();
-+	putstr("done.\nBooting the kernel.\n");
-+	return;
++	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++		md = p;
++		if ((md->phys_addr <= phys_addr) &&
++		    (phys_addr < (md->phys_addr +
++				  (md->num_pages << EFI_PAGE_SHIFT))))
++			return md->attribute;
++	}
++	return 0;
 +}
-diff --git a/arch/x86/boot/compressed/misc_32.c b/arch/x86/boot/compressed/misc_32.c
-deleted file mode 100644
-index b74d60d..0000000
---- a/arch/x86/boot/compressed/misc_32.c
-+++ /dev/null
-@@ -1,382 +0,0 @@
--/*
-- * misc.c
-- * 
-- * This is a collection of several routines from gzip-1.0.3 
-- * adapted for Linux.
-- *
-- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
-- * puts by Nick Holloway 1993, better puts by Martin Mares 1995
-- * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
-- */
--
--#undef CONFIG_PARAVIRT
--#include <linux/linkage.h>
--#include <linux/vmalloc.h>
--#include <linux/screen_info.h>
--#include <asm/io.h>
--#include <asm/page.h>
--#include <asm/boot.h>
--
--/* WARNING!!
-- * This code is compiled with -fPIC and it is relocated dynamically
-- * at run time, but no relocation processing is performed.
-- * This means that it is not safe to place pointers in static structures.
-- */
--
--/*
-- * Getting to provable safe in place decompression is hard.
-- * Worst case behaviours need to be analyzed.
-- * Background information:
-- *
-- * The file layout is:
-- *    magic[2]
-- *    method[1]
-- *    flags[1]
-- *    timestamp[4]
-- *    extraflags[1]
-- *    os[1]
-- *    compressed data blocks[N]
-- *    crc[4] orig_len[4]
-- *
-- * resulting in 18 bytes of non compressed data overhead.
-- *
-- * Files divided into blocks
-- * 1 bit (last block flag)
-- * 2 bits (block type)
-- *
-- * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
-- * The smallest block type encoding is always used.
-- *
-- * stored:
-- *    32 bits length in bytes.
-- *
-- * fixed:
-- *    magic fixed tree.
-- *    symbols.
-- *
-- * dynamic:
-- *    dynamic tree encoding.
-- *    symbols.
-- *
-- *
-- * The buffer for decompression in place is the length of the
-- * uncompressed data, plus a small amount extra to keep the algorithm safe.
-- * The compressed data is placed at the end of the buffer.  The output
-- * pointer is placed at the start of the buffer and the input pointer
-- * is placed where the compressed data starts.  Problems will occur
-- * when the output pointer overruns the input pointer.
-- *
-- * The output pointer can only overrun the input pointer if the input
-- * pointer is moving faster than the output pointer.  A condition only
-- * triggered by data whose compressed form is larger than the uncompressed
-- * form.
-- *
-- * The worst case at the block level is a growth of the compressed data
-- * of 5 bytes per 32767 bytes.
-- *
-- * The worst case internal to a compressed block is very hard to figure.
-- * The worst case can at least be boundined by having one bit that represents
-- * 32764 bytes and then all of the rest of the bytes representing the very
-- * very last byte.
-- *
-- * All of which is enough to compute an amount of extra data that is required
-- * to be safe.  To avoid problems at the block level allocating 5 extra bytes
-- * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
-- * adding an extra 32767 bytes (the worst case uncompressed block size) is
-- * sufficient, to ensure that in the worst case the decompressed data for
-- * block will stop the byte before the compressed data for a block begins.
-- * To avoid problems with the compressed data's meta information an extra 18
-- * bytes are needed.  Leading to the formula:
-- *
-- * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
-- *
-- * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
-- * Adding 32768 instead of 32767 just makes for round numbers.
-- * Adding the decompressor_size is necessary as it musht live after all
-- * of the data as well.  Last I measured the decompressor is about 14K.
-- * 10K of actual data and 4K of bss.
-- *
-- */
--
--/*
-- * gzip declarations
-- */
--
--#define OF(args)  args
--#define STATIC static
--
--#undef memset
--#undef memcpy
--#define memzero(s, n)     memset ((s), 0, (n))
--
--typedef unsigned char  uch;
--typedef unsigned short ush;
--typedef unsigned long  ulg;
--
--#define WSIZE 0x80000000	/* Window size must be at least 32k,
--				 * and a power of two
--				 * We don't actually have a window just
--				 * a huge output buffer so I report
--				 * a 2G windows size, as that should
--				 * always be larger than our output buffer.
--				 */
--
--static uch *inbuf;	/* input buffer */
--static uch *window;	/* Sliding window buffer, (and final output buffer) */
--
--static unsigned insize;  /* valid bytes in inbuf */
--static unsigned inptr;   /* index of next byte to be processed in inbuf */
--static unsigned outcnt;  /* bytes in output buffer */
+diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
+index e2be78f..cb91f98 100644
+--- a/arch/x86/kernel/efi_32.c
++++ b/arch/x86/kernel/efi_32.c
+@@ -20,40 +20,15 @@
+  */
+ 
+ #include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/mm.h>
+ #include <linux/types.h>
+-#include <linux/time.h>
+-#include <linux/spinlock.h>
+-#include <linux/bootmem.h>
+ #include <linux/ioport.h>
+-#include <linux/module.h>
+ #include <linux/efi.h>
+-#include <linux/kexec.h>
+ 
+-#include <asm/setup.h>
+ #include <asm/io.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+-#include <asm/processor.h>
+-#include <asm/desc.h>
+ #include <asm/tlbflush.h>
+ 
+-#define EFI_DEBUG	0
+-#define PFX 		"EFI: "
 -
--/* gzip flag byte */
--#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
--#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
--#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
--#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
--#define COMMENT      0x10 /* bit 4 set: file comment present */
--#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
--#define RESERVED     0xC0 /* bit 6,7:   reserved */
+-extern efi_status_t asmlinkage efi_call_phys(void *, ...);
 -
--#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
--		
--/* Diagnostic functions */
--#ifdef DEBUG
--#  define Assert(cond,msg) {if(!(cond)) error(msg);}
--#  define Trace(x) fprintf x
--#  define Tracev(x) {if (verbose) fprintf x ;}
--#  define Tracevv(x) {if (verbose>1) fprintf x ;}
--#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
--#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
--#else
--#  define Assert(cond,msg)
--#  define Trace(x)
--#  define Tracev(x)
--#  define Tracevv(x)
--#  define Tracec(c,x)
--#  define Tracecv(c,x)
--#endif
+-struct efi efi;
+-EXPORT_SYMBOL(efi);
+-static struct efi efi_phys;
+-struct efi_memory_map memmap;
 -
--static int  fill_inbuf(void);
--static void flush_window(void);
--static void error(char *m);
--static void gzip_mark(void **);
--static void gzip_release(void **);
--  
 -/*
-- * This is set up by the setup-routine at boot-time
+- * We require an early boot_ioremap mapping mechanism initially
 - */
--static unsigned char *real_mode; /* Pointer to real-mode data */
--
--#define RM_EXT_MEM_K   (*(unsigned short *)(real_mode + 0x2))
--#ifndef STANDARD_MEMORY_BIOS_CALL
--#define RM_ALT_MEM_K   (*(unsigned long *)(real_mode + 0x1e0))
--#endif
--#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
--
--extern unsigned char input_data[];
--extern int input_len;
--
--static long bytes_out = 0;
--
--static void *malloc(int size);
--static void free(void *where);
--
--static void *memset(void *s, int c, unsigned n);
--static void *memcpy(void *dest, const void *src, unsigned n);
--
--static void putstr(const char *);
--
--static unsigned long free_mem_ptr;
--static unsigned long free_mem_end_ptr;
--
--#define HEAP_SIZE             0x4000
--
--static char *vidmem = (char *)0xb8000;
--static int vidport;
--static int lines, cols;
--
--#ifdef CONFIG_X86_NUMAQ
--void *xquad_portio;
--#endif
--
--#include "../../../../lib/inflate.c"
--
--static void *malloc(int size)
--{
--	void *p;
--
--	if (size <0) error("Malloc error");
--	if (free_mem_ptr <= 0) error("Memory error");
--
--	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
--
--	p = (void *)free_mem_ptr;
--	free_mem_ptr += size;
--
--	if (free_mem_ptr >= free_mem_end_ptr)
--		error("Out of memory");
--
--	return p;
--}
+-extern void * boot_ioremap(unsigned long, unsigned long);
 -
--static void free(void *where)
--{	/* Don't care */
+ /*
+  * To make EFI call EFI runtime service in physical addressing mode we need
+  * prelog/epilog before/after the invocation to disable interrupt, to
+@@ -62,16 +37,14 @@ extern void * boot_ioremap(unsigned long, unsigned long);
+  */
+ 
+ static unsigned long efi_rt_eflags;
+-static DEFINE_SPINLOCK(efi_rt_lock);
+ static pgd_t efi_bak_pg_dir_pointer[2];
+ 
+-static void efi_call_phys_prelog(void) __acquires(efi_rt_lock)
++void efi_call_phys_prelog(void)
+ {
+ 	unsigned long cr4;
+ 	unsigned long temp;
+-	struct Xgt_desc_struct gdt_descr;
++	struct desc_ptr gdt_descr;
+ 
+-	spin_lock(&efi_rt_lock);
+ 	local_irq_save(efi_rt_eflags);
+ 
+ 	/*
+@@ -101,17 +74,17 @@ static void efi_call_phys_prelog(void) __acquires(efi_rt_lock)
+ 	/*
+ 	 * After the lock is released, the original page table is restored.
+ 	 */
+-	local_flush_tlb();
++	__flush_tlb_all();
+ 
+ 	gdt_descr.address = __pa(get_cpu_gdt_table(0));
+ 	gdt_descr.size = GDT_SIZE - 1;
+ 	load_gdt(&gdt_descr);
+ }
+ 
+-static void efi_call_phys_epilog(void) __releases(efi_rt_lock)
++void efi_call_phys_epilog(void)
+ {
+ 	unsigned long cr4;
+-	struct Xgt_desc_struct gdt_descr;
++	struct desc_ptr gdt_descr;
+ 
+ 	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
+ 	gdt_descr.size = GDT_SIZE - 1;
+@@ -132,586 +105,7 @@ static void efi_call_phys_epilog(void) __releases(efi_rt_lock)
+ 	/*
+ 	 * After the lock is released, the original page table is restored.
+ 	 */
+-	local_flush_tlb();
++	__flush_tlb_all();
+ 
+ 	local_irq_restore(efi_rt_eflags);
+-	spin_unlock(&efi_rt_lock);
 -}
 -
--static void gzip_mark(void **ptr)
+-static efi_status_t
+-phys_efi_set_virtual_address_map(unsigned long memory_map_size,
+-				 unsigned long descriptor_size,
+-				 u32 descriptor_version,
+-				 efi_memory_desc_t *virtual_map)
 -{
--	*ptr = (void *) free_mem_ptr;
--}
+-	efi_status_t status;
 -
--static void gzip_release(void **ptr)
--{
--	free_mem_ptr = (unsigned long) *ptr;
+-	efi_call_phys_prelog();
+-	status = efi_call_phys(efi_phys.set_virtual_address_map,
+-				     memory_map_size, descriptor_size,
+-				     descriptor_version, virtual_map);
+-	efi_call_phys_epilog();
+-	return status;
 -}
-- 
--static void scroll(void)
+-
+-static efi_status_t
+-phys_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 -{
--	int i;
+-	efi_status_t status;
 -
--	memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
--	for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
--		vidmem[i] = ' ';
+-	efi_call_phys_prelog();
+-	status = efi_call_phys(efi_phys.get_time, tm, tc);
+-	efi_call_phys_epilog();
+-	return status;
 -}
 -
--static void putstr(const char *s)
+-inline int efi_set_rtc_mmss(unsigned long nowtime)
 -{
--	int x,y,pos;
--	char c;
--
--	if (RM_SCREEN_INFO.orig_video_mode == 0 && lines == 0 && cols == 0)
--		return;
+-	int real_seconds, real_minutes;
+-	efi_status_t 	status;
+-	efi_time_t 	eft;
+-	efi_time_cap_t 	cap;
 -
--	x = RM_SCREEN_INFO.orig_x;
--	y = RM_SCREEN_INFO.orig_y;
+-	spin_lock(&efi_rt_lock);
+-	status = efi.get_time(&eft, &cap);
+-	spin_unlock(&efi_rt_lock);
+-	if (status != EFI_SUCCESS)
+-		panic("Ooops, efitime: can't read time!\n");
+-	real_seconds = nowtime % 60;
+-	real_minutes = nowtime / 60;
 -
--	while ( ( c = *s++ ) != '\0' ) {
--		if ( c == '\n' ) {
--			x = 0;
--			if ( ++y >= lines ) {
--				scroll();
--				y--;
--			}
--		} else {
--			vidmem [ ( x + cols * y ) * 2 ] = c;
--			if ( ++x >= cols ) {
--				x = 0;
--				if ( ++y >= lines ) {
--					scroll();
--					y--;
--				}
--			}
--		}
--	}
+-	if (((abs(real_minutes - eft.minute) + 15)/30) & 1)
+-		real_minutes += 30;
+-	real_minutes %= 60;
 -
--	RM_SCREEN_INFO.orig_x = x;
--	RM_SCREEN_INFO.orig_y = y;
+-	eft.minute = real_minutes;
+-	eft.second = real_seconds;
 -
--	pos = (x + cols * y) * 2;	/* Update cursor position */
--	outb_p(14, vidport);
--	outb_p(0xff & (pos >> 9), vidport+1);
--	outb_p(15, vidport);
--	outb_p(0xff & (pos >> 1), vidport+1);
+-	if (status != EFI_SUCCESS) {
+-		printk("Ooops: efitime: can't read time!\n");
+-		return -1;
+-	}
+-	return 0;
 -}
--
--static void* memset(void* s, int c, unsigned n)
+-/*
+- * This is used during kernel init before runtime
+- * services have been remapped and also during suspend, therefore,
+- * we'll need to call both in physical and virtual modes.
+- */
+-inline unsigned long efi_get_time(void)
 -{
--	int i;
--	char *ss = (char*)s;
+-	efi_status_t status;
+-	efi_time_t eft;
+-	efi_time_cap_t cap;
 -
--	for (i=0;i<n;i++) ss[i] = c;
--	return s;
--}
+-	if (efi.get_time) {
+-		/* if we are in virtual mode use remapped function */
+- 		status = efi.get_time(&eft, &cap);
+-	} else {
+-		/* we are in physical mode */
+-		status = phys_efi_get_time(&eft, &cap);
+-	}
 -
--static void* memcpy(void* dest, const void* src, unsigned n)
--{
--	int i;
--	char *d = (char *)dest, *s = (char *)src;
+-	if (status != EFI_SUCCESS)
+-		printk("Oops: efitime: can't read time status: 0x%lx\n",status);
 -
--	for (i=0;i<n;i++) d[i] = s[i];
--	return dest;
+-	return mktime(eft.year, eft.month, eft.day, eft.hour,
+-			eft.minute, eft.second);
 -}
 -
--/* ===========================================================================
-- * Fill the input buffer. This is called only when the buffer is empty
-- * and at least one byte is really needed.
-- */
--static int fill_inbuf(void)
+-int is_available_memory(efi_memory_desc_t * md)
 -{
--	error("ran out of input data");
+-	if (!(md->attribute & EFI_MEMORY_WB))
+-		return 0;
+-
+-	switch (md->type) {
+-		case EFI_LOADER_CODE:
+-		case EFI_LOADER_DATA:
+-		case EFI_BOOT_SERVICES_CODE:
+-		case EFI_BOOT_SERVICES_DATA:
+-		case EFI_CONVENTIONAL_MEMORY:
+-			return 1;
+-	}
 -	return 0;
 -}
 -
--/* ===========================================================================
-- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
-- * (Used for the decompressed data only.)
+-/*
+- * We need to map the EFI memory map again after paging_init().
 - */
--static void flush_window(void)
+-void __init efi_map_memmap(void)
 -{
--	/* With my window equal to my output buffer
--	 * I only need to compute the crc here.
--	 */
--	ulg c = crc;         /* temporary variable */
--	unsigned n;
--	uch *in, ch;
--
--	in = window;
--	for (n = 0; n < outcnt; n++) {
--		ch = *in++;
--		c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
--	}
--	crc = c;
--	bytes_out += (ulg)outcnt;
--	outcnt = 0;
--}
+-	memmap.map = NULL;
 -
--static void error(char *x)
--{
--	putstr("\n\n");
--	putstr(x);
--	putstr("\n\n -- System halted");
+-	memmap.map = bt_ioremap((unsigned long) memmap.phys_map,
+-			(memmap.nr_map * memmap.desc_size));
+-	if (memmap.map == NULL)
+-		printk(KERN_ERR PFX "Could not remap the EFI memmap!\n");
 -
--	while(1);	/* Halt */
+-	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
 -}
 -
--asmlinkage void decompress_kernel(void *rmode, unsigned long end,
--			uch *input_data, unsigned long input_len, uch *output)
+-#if EFI_DEBUG
+-static void __init print_efi_memmap(void)
 -{
--	real_mode = rmode;
+-	efi_memory_desc_t *md;
+-	void *p;
+-	int i;
 -
--	if (RM_SCREEN_INFO.orig_video_mode == 7) {
--		vidmem = (char *) 0xb0000;
--		vidport = 0x3b4;
--	} else {
--		vidmem = (char *) 0xb8000;
--		vidport = 0x3d4;
+-	for (p = memmap.map, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
+-		md = p;
+-		printk(KERN_INFO "mem%02u: type=%u, attr=0x%llx, "
+-			"range=[0x%016llx-0x%016llx) (%lluMB)\n",
+-			i, md->type, md->attribute, md->phys_addr,
+-			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+-			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
 -	}
--
--	lines = RM_SCREEN_INFO.orig_video_lines;
--	cols = RM_SCREEN_INFO.orig_video_cols;
--
--	window = output;  	/* Output buffer (Normally at 1M) */
--	free_mem_ptr     = end;	/* Heap  */
--	free_mem_end_ptr = end + HEAP_SIZE;
--	inbuf  = input_data;	/* Input buffer */
--	insize = input_len;
--	inptr  = 0;
--
--	if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
--		error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
--	if (end > ((-__PAGE_OFFSET-(512 <<20)-1) & 0x7fffffff))
--		error("Destination address too large");
--#ifndef CONFIG_RELOCATABLE
--	if ((u32)output != LOAD_PHYSICAL_ADDR)
--		error("Wrong destination address");
--#endif
--
--	makecrc();
--	putstr("Uncompressing Linux... ");
--	gunzip();
--	putstr("Ok, booting the kernel.\n");
--	return;
 -}
-diff --git a/arch/x86/boot/compressed/misc_64.c b/arch/x86/boot/compressed/misc_64.c
-deleted file mode 100644
-index 6ea015a..0000000
---- a/arch/x86/boot/compressed/misc_64.c
-+++ /dev/null
-@@ -1,371 +0,0 @@
+-#endif  /*  EFI_DEBUG  */
+-
 -/*
-- * misc.c
-- * 
-- * This is a collection of several routines from gzip-1.0.3 
-- * adapted for Linux.
-- *
-- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
-- * puts by Nick Holloway 1993, better puts by Martin Mares 1995
-- * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+- * Walks the EFI memory map and calls CALLBACK once for each EFI
+- * memory descriptor that has memory that is available for kernel use.
 - */
+-void efi_memmap_walk(efi_freemem_callback_t callback, void *arg)
+-{
+-	int prev_valid = 0;
+-	struct range {
+-		unsigned long start;
+-		unsigned long end;
+-	} uninitialized_var(prev), curr;
+-	efi_memory_desc_t *md;
+-	unsigned long start, end;
+-	void *p;
 -
--#define _LINUX_STRING_H_ 1
--#define __LINUX_BITMAP_H 1
--
--#include <linux/linkage.h>
--#include <linux/screen_info.h>
--#include <asm/io.h>
--#include <asm/page.h>
--
--/* WARNING!!
-- * This code is compiled with -fPIC and it is relocated dynamically
-- * at run time, but no relocation processing is performed.
-- * This means that it is not safe to place pointers in static structures.
-- */
+-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+-		md = p;
 -
--/*
-- * Getting to provable safe in place decompression is hard.
-- * Worst case behaviours need to be analyzed.
-- * Background information:
-- *
-- * The file layout is:
-- *    magic[2]
-- *    method[1]
-- *    flags[1]
-- *    timestamp[4]
-- *    extraflags[1]
-- *    os[1]
-- *    compressed data blocks[N]
-- *    crc[4] orig_len[4]
-- *
-- * resulting in 18 bytes of non compressed data overhead.
-- *
-- * Files divided into blocks
-- * 1 bit (last block flag)
-- * 2 bits (block type)
-- *
-- * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
-- * The smallest block type encoding is always used.
-- *
-- * stored:
-- *    32 bits length in bytes.
-- *
-- * fixed:
-- *    magic fixed tree.
-- *    symbols.
-- *
-- * dynamic:
-- *    dynamic tree encoding.
-- *    symbols.
-- *
-- *
-- * The buffer for decompression in place is the length of the
-- * uncompressed data, plus a small amount extra to keep the algorithm safe.
-- * The compressed data is placed at the end of the buffer.  The output
-- * pointer is placed at the start of the buffer and the input pointer
-- * is placed where the compressed data starts.  Problems will occur
-- * when the output pointer overruns the input pointer.
-- *
-- * The output pointer can only overrun the input pointer if the input
-- * pointer is moving faster than the output pointer.  A condition only
-- * triggered by data whose compressed form is larger than the uncompressed
-- * form.
-- *
-- * The worst case at the block level is a growth of the compressed data
-- * of 5 bytes per 32767 bytes.
-- *
-- * The worst case internal to a compressed block is very hard to figure.
-- * The worst case can at least be boundined by having one bit that represents
-- * 32764 bytes and then all of the rest of the bytes representing the very
-- * very last byte.
-- *
-- * All of which is enough to compute an amount of extra data that is required
-- * to be safe.  To avoid problems at the block level allocating 5 extra bytes
-- * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
-- * adding an extra 32767 bytes (the worst case uncompressed block size) is
-- * sufficient, to ensure that in the worst case the decompressed data for
-- * block will stop the byte before the compressed data for a block begins.
-- * To avoid problems with the compressed data's meta information an extra 18
-- * bytes are needed.  Leading to the formula:
-- *
-- * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
-- *
-- * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
-- * Adding 32768 instead of 32767 just makes for round numbers.
-- * Adding the decompressor_size is necessary as it musht live after all
-- * of the data as well.  Last I measured the decompressor is about 14K.
-- * 10K of actual data and 4K of bss.
-- *
-- */
+-		if ((md->num_pages == 0) || (!is_available_memory(md)))
+-			continue;
 -
--/*
-- * gzip declarations
-- */
+-		curr.start = md->phys_addr;
+-		curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT);
 -
--#define OF(args)  args
--#define STATIC static
+-		if (!prev_valid) {
+-			prev = curr;
+-			prev_valid = 1;
+-		} else {
+-			if (curr.start < prev.start)
+-				printk(KERN_INFO PFX "Unordered memory map\n");
+-			if (prev.end == curr.start)
+-				prev.end = curr.end;
+-			else {
+-				start =
+-				    (unsigned long) (PAGE_ALIGN(prev.start));
+-				end = (unsigned long) (prev.end & PAGE_MASK);
+-				if ((end > start)
+-				    && (*callback) (start, end, arg) < 0)
+-					return;
+-				prev = curr;
+-			}
+-		}
+-	}
+-	if (prev_valid) {
+-		start = (unsigned long) PAGE_ALIGN(prev.start);
+-		end = (unsigned long) (prev.end & PAGE_MASK);
+-		if (end > start)
+-			(*callback) (start, end, arg);
+-	}
+-}
 -
--#undef memset
--#undef memcpy
--#define memzero(s, n)     memset ((s), 0, (n))
+-void __init efi_init(void)
+-{
+-	efi_config_table_t *config_tables;
+-	efi_runtime_services_t *runtime;
+-	efi_char16_t *c16;
+-	char vendor[100] = "unknown";
+-	unsigned long num_config_tables;
+-	int i = 0;
 -
--typedef unsigned char  uch;
--typedef unsigned short ush;
--typedef unsigned long  ulg;
+-	memset(&efi, 0, sizeof(efi) );
+-	memset(&efi_phys, 0, sizeof(efi_phys));
 -
--#define WSIZE 0x80000000	/* Window size must be at least 32k,
--				 * and a power of two
--				 * We don't actually have a window just
--				 * a huge output buffer so I report
--				 * a 2G windows size, as that should
--				 * always be larger than our output buffer.
--				 */
+-	efi_phys.systab =
+-		(efi_system_table_t *)boot_params.efi_info.efi_systab;
+-	memmap.phys_map = (void *)boot_params.efi_info.efi_memmap;
+-	memmap.nr_map = boot_params.efi_info.efi_memmap_size/
+-		boot_params.efi_info.efi_memdesc_size;
+-	memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
+-	memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
 -
--static uch *inbuf;	/* input buffer */
--static uch *window;	/* Sliding window buffer, (and final output buffer) */
+-	efi.systab = (efi_system_table_t *)
+-		boot_ioremap((unsigned long) efi_phys.systab,
+-			sizeof(efi_system_table_t));
+-	/*
+-	 * Verify the EFI Table
+-	 */
+-	if (efi.systab == NULL)
+-		printk(KERN_ERR PFX "Woah! Couldn't map the EFI system table.\n");
+-	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+-		printk(KERN_ERR PFX "Woah! EFI system table signature incorrect\n");
+-	if ((efi.systab->hdr.revision >> 16) == 0)
+-		printk(KERN_ERR PFX "Warning: EFI system table version "
+-		       "%d.%02d, expected 1.00 or greater\n",
+-		       efi.systab->hdr.revision >> 16,
+-		       efi.systab->hdr.revision & 0xffff);
 -
--static unsigned insize;  /* valid bytes in inbuf */
--static unsigned inptr;   /* index of next byte to be processed in inbuf */
--static unsigned outcnt;  /* bytes in output buffer */
+-	/*
+-	 * Grab some details from the system table
+-	 */
+-	num_config_tables = efi.systab->nr_tables;
+-	config_tables = (efi_config_table_t *)efi.systab->tables;
+-	runtime = efi.systab->runtime;
 -
--/* gzip flag byte */
--#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
--#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
--#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
--#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
--#define COMMENT      0x10 /* bit 4 set: file comment present */
--#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
--#define RESERVED     0xC0 /* bit 6,7:   reserved */
+-	/*
+-	 * Show what we know for posterity
+-	 */
+-	c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2);
+-	if (c16) {
+-		for (i = 0; i < (sizeof(vendor) - 1) && *c16; ++i)
+-			vendor[i] = *c16++;
+-		vendor[i] = '\0';
+-	} else
+-		printk(KERN_ERR PFX "Could not map the firmware vendor!\n");
 -
--#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
--		
--/* Diagnostic functions */
--#ifdef DEBUG
--#  define Assert(cond,msg) {if(!(cond)) error(msg);}
--#  define Trace(x) fprintf x
--#  define Tracev(x) {if (verbose) fprintf x ;}
--#  define Tracevv(x) {if (verbose>1) fprintf x ;}
--#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
--#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
--#else
--#  define Assert(cond,msg)
--#  define Trace(x)
--#  define Tracev(x)
--#  define Tracevv(x)
--#  define Tracec(c,x)
--#  define Tracecv(c,x)
--#endif
+-	printk(KERN_INFO PFX "EFI v%u.%.02u by %s \n",
+-	       efi.systab->hdr.revision >> 16,
+-	       efi.systab->hdr.revision & 0xffff, vendor);
 -
--static int  fill_inbuf(void);
--static void flush_window(void);
--static void error(char *m);
--static void gzip_mark(void **);
--static void gzip_release(void **);
--  
--/*
-- * This is set up by the setup-routine at boot-time
-- */
--static unsigned char *real_mode; /* Pointer to real-mode data */
+-	/*
+-	 * Let's see what config tables the firmware passed to us.
+-	 */
+-	config_tables = (efi_config_table_t *)
+-				boot_ioremap((unsigned long) config_tables,
+-			        num_config_tables * sizeof(efi_config_table_t));
 -
--#define RM_EXT_MEM_K   (*(unsigned short *)(real_mode + 0x2))
--#ifndef STANDARD_MEMORY_BIOS_CALL
--#define RM_ALT_MEM_K   (*(unsigned long *)(real_mode + 0x1e0))
--#endif
--#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
+-	if (config_tables == NULL)
+-		printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n");
 -
--extern unsigned char input_data[];
--extern int input_len;
+-	efi.mps        = EFI_INVALID_TABLE_ADDR;
+-	efi.acpi       = EFI_INVALID_TABLE_ADDR;
+-	efi.acpi20     = EFI_INVALID_TABLE_ADDR;
+-	efi.smbios     = EFI_INVALID_TABLE_ADDR;
+-	efi.sal_systab = EFI_INVALID_TABLE_ADDR;
+-	efi.boot_info  = EFI_INVALID_TABLE_ADDR;
+-	efi.hcdp       = EFI_INVALID_TABLE_ADDR;
+-	efi.uga        = EFI_INVALID_TABLE_ADDR;
 -
--static long bytes_out = 0;
+-	for (i = 0; i < num_config_tables; i++) {
+-		if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
+-			efi.mps = config_tables[i].table;
+-			printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table);
+-		} else
+-		    if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
+-			efi.acpi20 = config_tables[i].table;
+-			printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table);
+-		} else
+-		    if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
+-			efi.acpi = config_tables[i].table;
+-			printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table);
+-		} else
+-		    if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
+-			efi.smbios = config_tables[i].table;
+-			printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table);
+-		} else
+-		    if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
+-			efi.hcdp = config_tables[i].table;
+-			printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table);
+-		} else
+-		    if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) {
+-			efi.uga = config_tables[i].table;
+-			printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table);
+-		}
+-	}
+-	printk("\n");
 -
--static void *malloc(int size);
--static void free(void *where);
+-	/*
+-	 * Check out the runtime services table. We need to map
+-	 * the runtime services table so that we can grab the physical
+-	 * address of several of the EFI runtime functions, needed to
+-	 * set the firmware into virtual mode.
+-	 */
 -
--static void *memset(void *s, int c, unsigned n);
--static void *memcpy(void *dest, const void *src, unsigned n);
+-	runtime = (efi_runtime_services_t *) boot_ioremap((unsigned long)
+-						runtime,
+-				      		sizeof(efi_runtime_services_t));
+-	if (runtime != NULL) {
+-		/*
+-	 	 * We will only need *early* access to the following
+-		 * two EFI runtime services before set_virtual_address_map
+-		 * is invoked.
+- 	 	 */
+-		efi_phys.get_time = (efi_get_time_t *) runtime->get_time;
+-		efi_phys.set_virtual_address_map =
+-			(efi_set_virtual_address_map_t *)
+-				runtime->set_virtual_address_map;
+-	} else
+-		printk(KERN_ERR PFX "Could not map the runtime service table!\n");
 -
--static void putstr(const char *);
+-	/* Map the EFI memory map for use until paging_init() */
+-	memmap.map = boot_ioremap(boot_params.efi_info.efi_memmap,
+-				  boot_params.efi_info.efi_memmap_size);
+-	if (memmap.map == NULL)
+-		printk(KERN_ERR PFX "Could not map the EFI memory map!\n");
 -
--static long free_mem_ptr;
--static long free_mem_end_ptr;
+-	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
 -
--#define HEAP_SIZE             0x7000
+-#if EFI_DEBUG
+-	print_efi_memmap();
+-#endif
+-}
 -
--static char *vidmem = (char *)0xb8000;
--static int vidport;
--static int lines, cols;
+-static inline void __init check_range_for_systab(efi_memory_desc_t *md)
+-{
+-	if (((unsigned long)md->phys_addr <= (unsigned long)efi_phys.systab) &&
+-		((unsigned long)efi_phys.systab < md->phys_addr +
+-		((unsigned long)md->num_pages << EFI_PAGE_SHIFT))) {
+-		unsigned long addr;
 -
--#include "../../../../lib/inflate.c"
+-		addr = md->virt_addr - md->phys_addr +
+-			(unsigned long)efi_phys.systab;
+-		efi.systab = (efi_system_table_t *)addr;
+-	}
+-}
 -
--static void *malloc(int size)
--{
--	void *p;
+-/*
+- * Wrap all the virtual calls in a way that forces the parameters on the stack.
+- */
 -
--	if (size <0) error("Malloc error");
--	if (free_mem_ptr <= 0) error("Memory error");
+-#define efi_call_virt(f, args...) \
+-     ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
 -
--	free_mem_ptr = (free_mem_ptr + 3) & ~3;	/* Align */
+-static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+-{
+-	return efi_call_virt(get_time, tm, tc);
+-}
 -
--	p = (void *)free_mem_ptr;
--	free_mem_ptr += size;
+-static efi_status_t virt_efi_set_time (efi_time_t *tm)
+-{
+-	return efi_call_virt(set_time, tm);
+-}
 -
--	if (free_mem_ptr >= free_mem_end_ptr)
--		error("Out of memory");
+-static efi_status_t virt_efi_get_wakeup_time (efi_bool_t *enabled,
+-					      efi_bool_t *pending,
+-					      efi_time_t *tm)
+-{
+-	return efi_call_virt(get_wakeup_time, enabled, pending, tm);
+-}
 -
--	return p;
+-static efi_status_t virt_efi_set_wakeup_time (efi_bool_t enabled,
+-					      efi_time_t *tm)
+-{
+-	return efi_call_virt(set_wakeup_time, enabled, tm);
 -}
 -
--static void free(void *where)
--{	/* Don't care */
+-static efi_status_t virt_efi_get_variable (efi_char16_t *name,
+-					   efi_guid_t *vendor, u32 *attr,
+-					   unsigned long *data_size, void *data)
+-{
+-	return efi_call_virt(get_variable, name, vendor, attr, data_size, data);
 -}
 -
--static void gzip_mark(void **ptr)
+-static efi_status_t virt_efi_get_next_variable (unsigned long *name_size,
+-						efi_char16_t *name,
+-						efi_guid_t *vendor)
 -{
--	*ptr = (void *) free_mem_ptr;
+-	return efi_call_virt(get_next_variable, name_size, name, vendor);
 -}
 -
--static void gzip_release(void **ptr)
+-static efi_status_t virt_efi_set_variable (efi_char16_t *name,
+-					   efi_guid_t *vendor,
+-					   unsigned long attr,
+-					   unsigned long data_size, void *data)
 -{
--	free_mem_ptr = (long) *ptr;
+-	return efi_call_virt(set_variable, name, vendor, attr, data_size, data);
 -}
-- 
--static void scroll(void)
+-
+-static efi_status_t virt_efi_get_next_high_mono_count (u32 *count)
 -{
--	int i;
+-	return efi_call_virt(get_next_high_mono_count, count);
+-}
 -
--	memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
--	for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
--		vidmem[i] = ' ';
+-static void virt_efi_reset_system (int reset_type, efi_status_t status,
+-				   unsigned long data_size,
+-				   efi_char16_t *data)
+-{
+-	efi_call_virt(reset_system, reset_type, status, data_size, data);
 -}
 -
--static void putstr(const char *s)
+-/*
+- * This function will switch the EFI runtime services to virtual mode.
+- * Essentially, look through the EFI memmap and map every region that
+- * has the runtime attribute bit set in its memory descriptor and update
+- * that memory descriptor with the virtual address obtained from ioremap().
+- * This enables the runtime services to be called without having to
+- * thunk back into physical mode for every invocation.
+- */
+-
+-void __init efi_enter_virtual_mode(void)
 -{
--	int x,y,pos;
--	char c;
+-	efi_memory_desc_t *md;
+-	efi_status_t status;
+-	void *p;
 -
--	x = RM_SCREEN_INFO.orig_x;
--	y = RM_SCREEN_INFO.orig_y;
+-	efi.systab = NULL;
 -
--	while ( ( c = *s++ ) != '\0' ) {
--		if ( c == '\n' ) {
--			x = 0;
--			if ( ++y >= lines ) {
--				scroll();
--				y--;
--			}
--		} else {
--			vidmem [ ( x + cols * y ) * 2 ] = c; 
--			if ( ++x >= cols ) {
--				x = 0;
--				if ( ++y >= lines ) {
--					scroll();
--					y--;
--				}
--			}
+-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+-		md = p;
+-
+-		if (!(md->attribute & EFI_MEMORY_RUNTIME))
+-			continue;
+-
+-		md->virt_addr = (unsigned long)ioremap(md->phys_addr,
+-			md->num_pages << EFI_PAGE_SHIFT);
+-		if (!(unsigned long)md->virt_addr) {
+-			printk(KERN_ERR PFX "ioremap of 0x%lX failed\n",
+-				(unsigned long)md->phys_addr);
 -		}
+-		/* update the virtual address of the EFI system table */
+-		check_range_for_systab(md);
 -	}
 -
--	RM_SCREEN_INFO.orig_x = x;
--	RM_SCREEN_INFO.orig_y = y;
--
--	pos = (x + cols * y) * 2;	/* Update cursor position */
--	outb_p(14, vidport);
--	outb_p(0xff & (pos >> 9), vidport+1);
--	outb_p(15, vidport);
--	outb_p(0xff & (pos >> 1), vidport+1);
--}
+-	BUG_ON(!efi.systab);
 -
--static void* memset(void* s, int c, unsigned n)
--{
--	int i;
--	char *ss = (char*)s;
+-	status = phys_efi_set_virtual_address_map(
+-			memmap.desc_size * memmap.nr_map,
+-			memmap.desc_size,
+-			memmap.desc_version,
+-		       	memmap.phys_map);
 -
--	for (i=0;i<n;i++) ss[i] = c;
--	return s;
--}
+-	if (status != EFI_SUCCESS) {
+-		printk (KERN_ALERT "You are screwed! "
+-			"Unable to switch EFI into virtual mode "
+-			"(status=%lx)\n", status);
+-		panic("EFI call to SetVirtualAddressMap() failed!");
+-	}
 -
--static void* memcpy(void* dest, const void* src, unsigned n)
--{
--	int i;
--	char *d = (char *)dest, *s = (char *)src;
+-	/*
+-	 * Now that EFI is in virtual mode, update the function
+-	 * pointers in the runtime service table to the new virtual addresses.
+-	 */
 -
--	for (i=0;i<n;i++) d[i] = s[i];
--	return dest;
+-	efi.get_time = virt_efi_get_time;
+-	efi.set_time = virt_efi_set_time;
+-	efi.get_wakeup_time = virt_efi_get_wakeup_time;
+-	efi.set_wakeup_time = virt_efi_set_wakeup_time;
+-	efi.get_variable = virt_efi_get_variable;
+-	efi.get_next_variable = virt_efi_get_next_variable;
+-	efi.set_variable = virt_efi_set_variable;
+-	efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
+-	efi.reset_system = virt_efi_reset_system;
 -}
 -
--/* ===========================================================================
-- * Fill the input buffer. This is called only when the buffer is empty
-- * and at least one byte is really needed.
-- */
--static int fill_inbuf(void)
+-void __init
+-efi_initialize_iomem_resources(struct resource *code_resource,
+-			       struct resource *data_resource,
+-			       struct resource *bss_resource)
 -{
--	error("ran out of input data");
--	return 0;
--}
+-	struct resource *res;
+-	efi_memory_desc_t *md;
+-	void *p;
 -
--/* ===========================================================================
-- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
-- * (Used for the decompressed data only.)
-- */
--static void flush_window(void)
--{
--	/* With my window equal to my output buffer
--	 * I only need to compute the crc here.
--	 */
--	ulg c = crc;         /* temporary variable */
--	unsigned n;
--	uch *in, ch;
+-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+-		md = p;
 -
--	in = window;
--	for (n = 0; n < outcnt; n++) {
--		ch = *in++;
--		c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+-		if ((md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >
+-		    0x100000000ULL)
+-			continue;
+-		res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+-		switch (md->type) {
+-		case EFI_RESERVED_TYPE:
+-			res->name = "Reserved Memory";
+-			break;
+-		case EFI_LOADER_CODE:
+-			res->name = "Loader Code";
+-			break;
+-		case EFI_LOADER_DATA:
+-			res->name = "Loader Data";
+-			break;
+-		case EFI_BOOT_SERVICES_DATA:
+-			res->name = "BootServices Data";
+-			break;
+-		case EFI_BOOT_SERVICES_CODE:
+-			res->name = "BootServices Code";
+-			break;
+-		case EFI_RUNTIME_SERVICES_CODE:
+-			res->name = "Runtime Service Code";
+-			break;
+-		case EFI_RUNTIME_SERVICES_DATA:
+-			res->name = "Runtime Service Data";
+-			break;
+-		case EFI_CONVENTIONAL_MEMORY:
+-			res->name = "Conventional Memory";
+-			break;
+-		case EFI_UNUSABLE_MEMORY:
+-			res->name = "Unusable Memory";
+-			break;
+-		case EFI_ACPI_RECLAIM_MEMORY:
+-			res->name = "ACPI Reclaim";
+-			break;
+-		case EFI_ACPI_MEMORY_NVS:
+-			res->name = "ACPI NVS";
+-			break;
+-		case EFI_MEMORY_MAPPED_IO:
+-			res->name = "Memory Mapped IO";
+-			break;
+-		case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+-			res->name = "Memory Mapped IO Port Space";
+-			break;
+-		default:
+-			res->name = "Reserved";
+-			break;
+-		}
+-		res->start = md->phys_addr;
+-		res->end = res->start + ((md->num_pages << EFI_PAGE_SHIFT) - 1);
+-		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+-		if (request_resource(&iomem_resource, res) < 0)
+-			printk(KERN_ERR PFX "Failed to allocate res %s : "
+-				"0x%llx-0x%llx\n", res->name,
+-				(unsigned long long)res->start,
+-				(unsigned long long)res->end);
+-		/*
+-		 * We don't know which region contains kernel data so we try
+-		 * it repeatedly and let the resource manager test it.
+-		 */
+-		if (md->type == EFI_CONVENTIONAL_MEMORY) {
+-			request_resource(res, code_resource);
+-			request_resource(res, data_resource);
+-			request_resource(res, bss_resource);
+-#ifdef CONFIG_KEXEC
+-			request_resource(res, &crashk_res);
+-#endif
+-		}
 -	}
--	crc = c;
--	bytes_out += (ulg)outcnt;
--	outcnt = 0;
 -}
 -
--static void error(char *x)
+-/*
+- * Convenience functions to obtain memory types and attributes
+- */
+-
+-u32 efi_mem_type(unsigned long phys_addr)
 -{
--	putstr("\n\n");
--	putstr(x);
--	putstr("\n\n -- System halted");
+-	efi_memory_desc_t *md;
+-	void *p;
 -
--	while(1);	/* Halt */
+-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+-		md = p;
+-		if ((md->phys_addr <= phys_addr) && (phys_addr <
+-			(md->phys_addr + (md-> num_pages << EFI_PAGE_SHIFT)) ))
+-			return md->type;
+-	}
+-	return 0;
 -}
 -
--asmlinkage void decompress_kernel(void *rmode, unsigned long heap,
--	uch *input_data, unsigned long input_len, uch *output)
+-u64 efi_mem_attributes(unsigned long phys_addr)
 -{
--	real_mode = rmode;
+-	efi_memory_desc_t *md;
+-	void *p;
 -
--	if (RM_SCREEN_INFO.orig_video_mode == 7) {
--		vidmem = (char *) 0xb0000;
--		vidport = 0x3b4;
--	} else {
--		vidmem = (char *) 0xb8000;
--		vidport = 0x3d4;
+-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+-		md = p;
+-		if ((md->phys_addr <= phys_addr) && (phys_addr <
+-			(md->phys_addr + (md-> num_pages << EFI_PAGE_SHIFT)) ))
+-			return md->attribute;
 -	}
--
--	lines = RM_SCREEN_INFO.orig_video_lines;
--	cols = RM_SCREEN_INFO.orig_video_cols;
--
--	window = output;  		/* Output buffer (Normally at 1M) */
--	free_mem_ptr     = heap;	/* Heap  */
--	free_mem_end_ptr = heap + HEAP_SIZE;
--	inbuf  = input_data;		/* Input buffer */
--	insize = input_len;
--	inptr  = 0;
--
--	if ((ulg)output & (__KERNEL_ALIGN - 1))
--		error("Destination address not 2M aligned");
--	if ((ulg)output >= 0xffffffffffUL)
--		error("Destination address too large");
--
--	makecrc();
--	putstr(".\nDecompressing Linux...");
--	gunzip();
--	putstr("done.\nBooting the kernel.\n");
--	return;
--}
-diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
-index 7a0d00b..d01ea42 100644
---- a/arch/x86/boot/compressed/relocs.c
-+++ b/arch/x86/boot/compressed/relocs.c
-@@ -27,11 +27,6 @@ static unsigned long *relocs;
-  * absolute relocations present w.r.t these symbols.
-  */
- static const char* safe_abs_relocs[] = {
--		"__kernel_vsyscall",
--		"__kernel_rt_sigreturn",
--		"__kernel_sigreturn",
--		"SYSENTER_RETURN",
--		"VDSO_NOTE_MASK",
- 		"xen_irq_disable_direct_reloc",
- 		"xen_save_fl_direct_reloc",
- };
-@@ -45,6 +40,8 @@ static int is_safe_abs_reloc(const char* sym_name)
- 			/* Match found */
- 			return 1;
- 	}
-+	if (strncmp(sym_name, "VDSO", 4) == 0)
-+		return 1;
- 	if (strncmp(sym_name, "__crc_", 6) == 0)
- 		return 1;
- 	return 0;
-diff --git a/arch/x86/boot/compressed/vmlinux.scr b/arch/x86/boot/compressed/vmlinux.scr
+-	return 0;
+ }
+diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
 new file mode 100644
-index 0000000..f02382a
+index 0000000..4b73992
 --- /dev/null
-+++ b/arch/x86/boot/compressed/vmlinux.scr
-@@ -0,0 +1,10 @@
-+SECTIONS
++++ b/arch/x86/kernel/efi_64.c
+@@ -0,0 +1,134 @@
++/*
++ * x86_64 specific EFI support functions
++ * Based on Extensible Firmware Interface Specification version 1.0
++ *
++ * Copyright (C) 2005-2008 Intel Co.
++ *	Fenghua Yu <fenghua.yu at intel.com>
++ *	Bibo Mao <bibo.mao at intel.com>
++ *	Chandramouli Narayanan <mouli at linux.intel.com>
++ *	Huang Ying <ying.huang at intel.com>
++ *
++ * Code to convert EFI to E820 map has been implemented in elilo bootloader
++ * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
++ * is setup appropriately for EFI runtime code.
++ * - mouli 06/14/2007.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <linux/types.h>
++#include <linux/spinlock.h>
++#include <linux/bootmem.h>
++#include <linux/ioport.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/reboot.h>
++
++#include <asm/setup.h>
++#include <asm/page.h>
++#include <asm/e820.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
++#include <asm/proto.h>
++#include <asm/efi.h>
++
++static pgd_t save_pgd __initdata;
++static unsigned long efi_flags __initdata;
++
++static void __init early_mapping_set_exec(unsigned long start,
++					  unsigned long end,
++					  int executable)
 +{
-+  .rodata.compressed : {
-+	input_len = .;
-+	LONG(input_data_end - input_data) input_data = .;
-+	*(.data)
-+	output_len = . - 4;
-+	input_data_end = .;
++	pte_t *kpte;
++	int level;
++
++	while (start < end) {
++		kpte = lookup_address((unsigned long)__va(start), &level);
++		BUG_ON(!kpte);
++		if (executable)
++			set_pte(kpte, pte_mkexec(*kpte));
++		else
++			set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
++					    __supported_pte_mask));
++		if (level == 4)
++			start = (start + PMD_SIZE) & PMD_MASK;
++		else
++			start = (start + PAGE_SIZE) & PAGE_MASK;
 +	}
 +}
-diff --git a/arch/x86/boot/compressed/vmlinux_32.lds b/arch/x86/boot/compressed/vmlinux_32.lds
-index cc4854f..bb3c483 100644
---- a/arch/x86/boot/compressed/vmlinux_32.lds
-+++ b/arch/x86/boot/compressed/vmlinux_32.lds
-@@ -3,17 +3,17 @@ OUTPUT_ARCH(i386)
- ENTRY(startup_32)
- SECTIONS
- {
--        /* Be careful parts of head.S assume startup_32 is at
--         * address 0.
-+	/* Be careful parts of head_32.S assume startup_32 is at
-+	 * address 0.
- 	 */
--	. =  0 	;
-+	. = 0;
- 	.text.head : {
- 		_head = . ;
- 		*(.text.head)
- 		_ehead = . ;
- 	}
--	.data.compressed : {
--		*(.data.compressed)
-+	.rodata.compressed : {
-+		*(.rodata.compressed)
- 	}
- 	.text :	{
- 		_text = .; 	/* Text */
-diff --git a/arch/x86/boot/compressed/vmlinux_32.scr b/arch/x86/boot/compressed/vmlinux_32.scr
-deleted file mode 100644
-index 707a88f..0000000
---- a/arch/x86/boot/compressed/vmlinux_32.scr
-+++ /dev/null
-@@ -1,10 +0,0 @@
--SECTIONS
--{
--  .data.compressed : {
--	input_len = .;
--	LONG(input_data_end - input_data) input_data = .; 
--	*(.data) 
--	output_len = . - 4;
--	input_data_end = .; 
--	}
--}
-diff --git a/arch/x86/boot/compressed/vmlinux_64.lds b/arch/x86/boot/compressed/vmlinux_64.lds
-index 94c13e5..f6e5b44 100644
---- a/arch/x86/boot/compressed/vmlinux_64.lds
-+++ b/arch/x86/boot/compressed/vmlinux_64.lds
-@@ -3,15 +3,19 @@ OUTPUT_ARCH(i386:x86-64)
- ENTRY(startup_64)
- SECTIONS
- {
--	/* Be careful parts of head.S assume startup_32 is at
-- 	 * address 0.
-+	/* Be careful parts of head_64.S assume startup_64 is at
-+	 * address 0.
- 	 */
- 	. = 0;
--	.text :	{
-+	.text.head : {
- 		_head = . ;
- 		*(.text.head)
- 		_ehead = . ;
--		*(.text.compressed)
-+	}
-+	.rodata.compressed : {
-+		*(.rodata.compressed)
++
++static void __init early_runtime_code_mapping_set_exec(int executable)
++{
++	efi_memory_desc_t *md;
++	void *p;
++
++	if (!(__supported_pte_mask & _PAGE_NX))
++		return;
++
++	/* Make EFI runtime service code area executable */
++	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++		md = p;
++		if (md->type == EFI_RUNTIME_SERVICES_CODE) {
++			unsigned long end;
++			end = md->phys_addr + (md->num_pages << PAGE_SHIFT);
++			early_mapping_set_exec(md->phys_addr, end, executable);
++		}
 +	}
-+	.text :	{
- 		_text = .; 	/* Text */
- 		*(.text)
- 		*(.text.*)
-diff --git a/arch/x86/boot/compressed/vmlinux_64.scr b/arch/x86/boot/compressed/vmlinux_64.scr
-deleted file mode 100644
-index bd1429c..0000000
---- a/arch/x86/boot/compressed/vmlinux_64.scr
-+++ /dev/null
-@@ -1,10 +0,0 @@
--SECTIONS
--{
--  .text.compressed : {
--	input_len = .;
--	LONG(input_data_end - input_data) input_data = .;
--	*(.data)
--	output_len = . - 4;
--	input_data_end = .;
--	}
--}
-diff --git a/arch/x86/boot/edd.c b/arch/x86/boot/edd.c
-index bd138e4..8721dc4 100644
---- a/arch/x86/boot/edd.c
-+++ b/arch/x86/boot/edd.c
-@@ -129,6 +129,7 @@ void query_edd(void)
- 	char eddarg[8];
- 	int do_mbr = 1;
- 	int do_edd = 1;
-+	int be_quiet;
- 	int devno;
- 	struct edd_info ei, *edp;
- 	u32 *mbrptr;
-@@ -140,12 +141,21 @@ void query_edd(void)
- 			do_edd = 0;
- 	}
- 
-+	be_quiet = cmdline_find_option_bool("quiet");
++}
 +
- 	edp    = boot_params.eddbuf;
- 	mbrptr = boot_params.edd_mbr_sig_buffer;
- 
- 	if (!do_edd)
- 		return;
- 
-+	/* Bugs in OnBoard or AddOnCards Bios may hang the EDD probe,
-+	 * so give a hint if this happens.
++void __init efi_call_phys_prelog(void)
++{
++	unsigned long vaddress;
++
++	local_irq_save(efi_flags);
++	early_runtime_code_mapping_set_exec(1);
++	vaddress = (unsigned long)__va(0x0UL);
++	save_pgd = *pgd_offset_k(0x0UL);
++	set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
++	__flush_tlb_all();
++}
++
++void __init efi_call_phys_epilog(void)
++{
++	/*
++	 * After the lock is released, the original page table is restored.
 +	 */
++	set_pgd(pgd_offset_k(0x0UL), save_pgd);
++	early_runtime_code_mapping_set_exec(0);
++	__flush_tlb_all();
++	local_irq_restore(efi_flags);
++}
 +
-+	if (!be_quiet)
-+		printf("Probing EDD (edd=off to disable)... ");
++void __init efi_reserve_bootmem(void)
++{
++	reserve_bootmem_generic((unsigned long)memmap.phys_map,
++				memmap.nr_map * memmap.desc_size);
++}
 +
- 	for (devno = 0x80; devno < 0x80+EDD_MBR_SIG_MAX; devno++) {
- 		/*
- 		 * Scan the BIOS-supported hard disks and query EDD
-@@ -162,6 +172,9 @@ void query_edd(void)
- 		if (do_mbr && !read_mbr_sig(devno, &ei, mbrptr++))
- 			boot_params.edd_mbr_sig_buf_entries = devno-0x80+1;
- 	}
++void __iomem * __init efi_ioremap(unsigned long offset,
++				  unsigned long size)
++{
++	static unsigned pages_mapped;
++	unsigned long last_addr;
++	unsigned i, pages;
 +
-+	if (!be_quiet)
-+		printf("ok\n");
- }
++	last_addr = offset + size - 1;
++	offset &= PAGE_MASK;
++	pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
++	if (pages_mapped + pages > MAX_EFI_IO_PAGES)
++		return NULL;
++
++	for (i = 0; i < pages; i++) {
++		__set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
++			     offset, PAGE_KERNEL_EXEC_NOCACHE);
++		offset += PAGE_SIZE;
++		pages_mapped++;
++	}
++
++	return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
++					     (pages_mapped - pages));
++}
+diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
+new file mode 100644
+index 0000000..99b47d4
+--- /dev/null
++++ b/arch/x86/kernel/efi_stub_64.S
+@@ -0,0 +1,109 @@
++/*
++ * Function calling ABI conversion from Linux to EFI for x86_64
++ *
++ * Copyright (C) 2007 Intel Corp
++ *	Bibo Mao <bibo.mao at intel.com>
++ *	Huang Ying <ying.huang at intel.com>
++ */
++
++#include <linux/linkage.h>
++
++#define SAVE_XMM			\
++	mov %rsp, %rax;			\
++	subq $0x70, %rsp;		\
++	and $~0xf, %rsp;		\
++	mov %rax, (%rsp);		\
++	mov %cr0, %rax;			\
++	clts;				\
++	mov %rax, 0x8(%rsp);		\
++	movaps %xmm0, 0x60(%rsp);	\
++	movaps %xmm1, 0x50(%rsp);	\
++	movaps %xmm2, 0x40(%rsp);	\
++	movaps %xmm3, 0x30(%rsp);	\
++	movaps %xmm4, 0x20(%rsp);	\
++	movaps %xmm5, 0x10(%rsp)
++
++#define RESTORE_XMM			\
++	movaps 0x60(%rsp), %xmm0;	\
++	movaps 0x50(%rsp), %xmm1;	\
++	movaps 0x40(%rsp), %xmm2;	\
++	movaps 0x30(%rsp), %xmm3;	\
++	movaps 0x20(%rsp), %xmm4;	\
++	movaps 0x10(%rsp), %xmm5;	\
++	mov 0x8(%rsp), %rsi;		\
++	mov %rsi, %cr0;			\
++	mov (%rsp), %rsp
++
++ENTRY(efi_call0)
++	SAVE_XMM
++	subq $32, %rsp
++	call *%rdi
++	addq $32, %rsp
++	RESTORE_XMM
++	ret
++
++ENTRY(efi_call1)
++	SAVE_XMM
++	subq $32, %rsp
++	mov  %rsi, %rcx
++	call *%rdi
++	addq $32, %rsp
++	RESTORE_XMM
++	ret
++
++ENTRY(efi_call2)
++	SAVE_XMM
++	subq $32, %rsp
++	mov  %rsi, %rcx
++	call *%rdi
++	addq $32, %rsp
++	RESTORE_XMM
++	ret
++
++ENTRY(efi_call3)
++	SAVE_XMM
++	subq $32, %rsp
++	mov  %rcx, %r8
++	mov  %rsi, %rcx
++	call *%rdi
++	addq $32, %rsp
++	RESTORE_XMM
++	ret
++
++ENTRY(efi_call4)
++	SAVE_XMM
++	subq $32, %rsp
++	mov %r8, %r9
++	mov %rcx, %r8
++	mov %rsi, %rcx
++	call *%rdi
++	addq $32, %rsp
++	RESTORE_XMM
++	ret
++
++ENTRY(efi_call5)
++	SAVE_XMM
++	subq $48, %rsp
++	mov %r9, 32(%rsp)
++	mov %r8, %r9
++	mov %rcx, %r8
++	mov %rsi, %rcx
++	call *%rdi
++	addq $48, %rsp
++	RESTORE_XMM
++	ret
++
++ENTRY(efi_call6)
++	SAVE_XMM
++	mov (%rsp), %rax
++	mov 8(%rax), %rax
++	subq $48, %rsp
++	mov %r9, 32(%rsp)
++	mov %rax, 40(%rsp)
++	mov %r8, %r9
++	mov %rcx, %r8
++	mov %rsi, %rcx
++	call *%rdi
++	addq $48, %rsp
++	RESTORE_XMM
++	ret
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index dc7f938..be5c31d 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -58,7 +58,7 @@
+  * for paravirtualization.  The following will never clobber any registers:
+  *   INTERRUPT_RETURN (aka. "iret")
+  *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
+- *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
++ *   ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit").
+  *
+  * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
+  * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
+@@ -283,12 +283,12 @@ END(resume_kernel)
+    the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
  
- #endif
-diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
-index 4cc5b04..64ad901 100644
---- a/arch/x86/boot/header.S
-+++ b/arch/x86/boot/header.S
-@@ -195,10 +195,13 @@ cmd_line_ptr:	.long	0		# (Header version 0x0202 or later)
- 					# can be located anywhere in
- 					# low memory 0x10000 or higher.
+ 	# sysenter call handler stub
+-ENTRY(sysenter_entry)
++ENTRY(ia32_sysenter_target)
+ 	CFI_STARTPROC simple
+ 	CFI_SIGNAL_FRAME
+ 	CFI_DEF_CFA esp, 0
+ 	CFI_REGISTER esp, ebp
+-	movl TSS_sysenter_esp0(%esp),%esp
++	movl TSS_sysenter_sp0(%esp),%esp
+ sysenter_past_esp:
+ 	/*
+ 	 * No need to follow this irqs on/off section: the syscall
+@@ -351,7 +351,7 @@ sysenter_past_esp:
+ 	xorl %ebp,%ebp
+ 	TRACE_IRQS_ON
+ 1:	mov  PT_FS(%esp), %fs
+-	ENABLE_INTERRUPTS_SYSEXIT
++	ENABLE_INTERRUPTS_SYSCALL_RET
+ 	CFI_ENDPROC
+ .pushsection .fixup,"ax"
+ 2:	movl $0,PT_FS(%esp)
+@@ -360,7 +360,7 @@ sysenter_past_esp:
+ 	.align 4
+ 	.long 1b,2b
+ .popsection
+-ENDPROC(sysenter_entry)
++ENDPROC(ia32_sysenter_target)
  
--ramdisk_max:	.long (-__PAGE_OFFSET-(512 << 20)-1) & 0x7fffffff
-+ramdisk_max:	.long 0x7fffffff
- 					# (Header version 0x0203 or later)
- 					# The highest safe address for
- 					# the contents of an initrd
-+					# The current kernel allows up to 4 GB,
-+					# but leave it at 2 GB to avoid
-+					# possible bootloader bugs.
+ 	# system call handler stub
+ ENTRY(system_call)
+@@ -583,7 +583,7 @@ END(syscall_badsys)
+  * Build the entry stubs and pointer table with
+  * some assembler magic.
+  */
+-.data
++.section .rodata,"a"
+ ENTRY(interrupt)
+ .text
  
- kernel_alignment:  .long CONFIG_PHYSICAL_ALIGN	#physical addr alignment
- 						#required for protected mode
-diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
-index 1f95750..7828da5 100644
---- a/arch/x86/boot/main.c
-+++ b/arch/x86/boot/main.c
-@@ -100,20 +100,32 @@ static void set_bios_mode(void)
+@@ -743,7 +743,7 @@ END(device_not_available)
+  * that sets up the real kernel stack. Check here, since we can't
+  * allow the wrong stack to be used.
+  *
+- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
+  * already pushed 3 words if it hits on the sysenter instruction:
+  * eflags, cs and eip.
+  *
+@@ -755,7 +755,7 @@ END(device_not_available)
+ 	cmpw $__KERNEL_CS,4(%esp);		\
+ 	jne ok;					\
+ label:						\
+-	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
++	movl TSS_sysenter_sp0+offset(%esp),%esp;	\
+ 	CFI_DEF_CFA esp, 0;			\
+ 	CFI_UNDEFINED eip;			\
+ 	pushfl;					\
+@@ -768,7 +768,7 @@ label:						\
+ 
+ KPROBE_ENTRY(debug)
+ 	RING0_INT_FRAME
+-	cmpl $sysenter_entry,(%esp)
++	cmpl $ia32_sysenter_target,(%esp)
+ 	jne debug_stack_correct
+ 	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
+ debug_stack_correct:
+@@ -799,7 +799,7 @@ KPROBE_ENTRY(nmi)
+ 	popl %eax
+ 	CFI_ADJUST_CFA_OFFSET -4
+ 	je nmi_espfix_stack
+-	cmpl $sysenter_entry,(%esp)
++	cmpl $ia32_sysenter_target,(%esp)
+ 	je nmi_stack_fixup
+ 	pushl %eax
+ 	CFI_ADJUST_CFA_OFFSET 4
+@@ -812,7 +812,7 @@ KPROBE_ENTRY(nmi)
+ 	popl %eax
+ 	CFI_ADJUST_CFA_OFFSET -4
+ 	jae nmi_stack_correct
+-	cmpl $sysenter_entry,12(%esp)
++	cmpl $ia32_sysenter_target,12(%esp)
+ 	je nmi_debug_stack_check
+ nmi_stack_correct:
+ 	/* We have a RING0_INT_FRAME here */
+@@ -882,10 +882,10 @@ ENTRY(native_iret)
+ .previous
+ END(native_iret)
+ 
+-ENTRY(native_irq_enable_sysexit)
++ENTRY(native_irq_enable_syscall_ret)
+ 	sti
+ 	sysexit
+-END(native_irq_enable_sysexit)
++END(native_irq_enable_syscall_ret)
  #endif
- }
  
--void main(void)
-+static void init_heap(void)
- {
--	/* First, copy the boot header into the "zeropage" */
--	copy_boot_params();
-+	char *stack_end;
+ KPROBE_ENTRY(int3)
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 3a058bb..bea8474 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -50,6 +50,7 @@
+ #include <asm/hw_irq.h>
+ #include <asm/page.h>
+ #include <asm/irqflags.h>
++#include <asm/paravirt.h>
  
--	/* End of heap check */
- 	if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
--		heap_end = (char *)(boot_params.hdr.heap_end_ptr
--				    +0x200-STACK_SIZE);
-+		asm("leal %P1(%%esp),%0"
-+		    : "=r" (stack_end) : "i" (-STACK_SIZE));
-+
-+		heap_end = (char *)
-+			((size_t)boot_params.hdr.heap_end_ptr + 0x200);
-+		if (heap_end > stack_end)
-+			heap_end = stack_end;
- 	} else {
- 		/* Boot protocol 2.00 only, no heap available */
- 		puts("WARNING: Ancient bootloader, some functionality "
- 		     "may be limited!\n");
- 	}
-+}
+ 	.code64
+ 
+@@ -57,6 +58,13 @@
+ #define retint_kernel retint_restore_args
+ #endif	
+ 
++#ifdef CONFIG_PARAVIRT
++ENTRY(native_irq_enable_syscall_ret)
++	movq	%gs:pda_oldrsp,%rsp
++	swapgs
++	sysretq
++#endif /* CONFIG_PARAVIRT */
 +
-+void main(void)
-+{
-+	/* First, copy the boot header into the "zeropage" */
-+	copy_boot_params();
+ 
+ .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -216,14 +224,21 @@ ENTRY(system_call)
+ 	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
+ 	CFI_REGISTER	rip,rcx
+ 	/*CFI_REGISTER	rflags,r11*/
+-	swapgs
++	SWAPGS_UNSAFE_STACK
++	/*
++	 * A hypervisor implementation might want to use a label
++	 * after the swapgs, so that it can do the swapgs
++	 * for the guest and jump here on syscall.
++	 */
++ENTRY(system_call_after_swapgs)
 +
-+	/* End of heap check */
-+	init_heap();
+ 	movq	%rsp,%gs:pda_oldrsp 
+ 	movq	%gs:pda_kernelstack,%rsp
+ 	/*
+ 	 * No need to follow this irqs off/on section - it's straight
+ 	 * and short:
+ 	 */
+-	sti					
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	SAVE_ARGS 8,1
+ 	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
+ 	movq  %rcx,RIP-ARGOFFSET(%rsp)
+@@ -246,7 +261,7 @@ ret_from_sys_call:
+ sysret_check:		
+ 	LOCKDEP_SYS_EXIT
+ 	GET_THREAD_INFO(%rcx)
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	movl threadinfo_flags(%rcx),%edx
+ 	andl %edi,%edx
+@@ -260,9 +275,7 @@ sysret_check:
+ 	CFI_REGISTER	rip,rcx
+ 	RESTORE_ARGS 0,-ARG_SKIP,1
+ 	/*CFI_REGISTER	rflags,r11*/
+-	movq	%gs:pda_oldrsp,%rsp
+-	swapgs
+-	sysretq
++	ENABLE_INTERRUPTS_SYSCALL_RET
  
- 	/* Make sure we have all the proper CPU support */
- 	if (validate_cpu()) {
-@@ -131,9 +143,6 @@ void main(void)
- 	/* Set keyboard repeat rate (why?) */
- 	keyboard_set_repeat();
+ 	CFI_RESTORE_STATE
+ 	/* Handle reschedules */
+@@ -271,7 +284,7 @@ sysret_careful:
+ 	bt $TIF_NEED_RESCHED,%edx
+ 	jnc sysret_signal
+ 	TRACE_IRQS_ON
+-	sti
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	pushq %rdi
+ 	CFI_ADJUST_CFA_OFFSET 8
+ 	call schedule
+@@ -282,8 +295,8 @@ sysret_careful:
+ 	/* Handle a signal */ 
+ sysret_signal:
+ 	TRACE_IRQS_ON
+-	sti
+-	testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
++	ENABLE_INTERRUPTS(CLBR_NONE)
++	testl $_TIF_DO_NOTIFY_MASK,%edx
+ 	jz    1f
  
--	/* Set the video mode */
--	set_video();
--
- 	/* Query MCA information */
- 	query_mca();
+ 	/* Really a signal */
+@@ -295,7 +308,7 @@ sysret_signal:
+ 1:	movl $_TIF_NEED_RESCHED,%edi
+ 	/* Use IRET because user could have changed frame. This
+ 	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	jmp int_with_check
+ 	
+@@ -327,7 +340,7 @@ tracesys:
+  */
+ 	.globl int_ret_from_sys_call
+ int_ret_from_sys_call:
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	testl $3,CS-ARGOFFSET(%rsp)
+ 	je retint_restore_args
+@@ -349,20 +362,20 @@ int_careful:
+ 	bt $TIF_NEED_RESCHED,%edx
+ 	jnc  int_very_careful
+ 	TRACE_IRQS_ON
+-	sti
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	pushq %rdi
+ 	CFI_ADJUST_CFA_OFFSET 8
+ 	call schedule
+ 	popq %rdi
+ 	CFI_ADJUST_CFA_OFFSET -8
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	jmp int_with_check
  
-@@ -154,6 +163,10 @@ void main(void)
- #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
- 	query_edd();
- #endif
-+
-+	/* Set the video mode */
-+	set_video();
+ 	/* handle signals and tracing -- both require a full stack frame */
+ int_very_careful:
+ 	TRACE_IRQS_ON
+-	sti
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	SAVE_REST
+ 	/* Check for syscall exit trace */	
+ 	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
+@@ -377,7 +390,7 @@ int_very_careful:
+ 	jmp int_restore_rest
+ 	
+ int_signal:
+-	testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
++	testl $_TIF_DO_NOTIFY_MASK,%edx
+ 	jz 1f
+ 	movq %rsp,%rdi		# &ptregs -> arg1
+ 	xorl %esi,%esi		# oldset -> arg2
+@@ -385,7 +398,7 @@ int_signal:
+ 1:	movl $_TIF_NEED_RESCHED,%edi	
+ int_restore_rest:
+ 	RESTORE_REST
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	jmp int_with_check
+ 	CFI_ENDPROC
+@@ -506,7 +519,7 @@ END(stub_rt_sigreturn)
+ 	CFI_DEF_CFA_REGISTER	rbp
+ 	testl $3,CS(%rdi)
+ 	je 1f
+-	swapgs	
++	SWAPGS
+ 	/* irqcount is used to check if a CPU is already on an interrupt
+ 	   stack or not. While this is essentially redundant with preempt_count
+ 	   it is a little cheaper to use a separate counter in the PDA
+@@ -527,7 +540,7 @@ ENTRY(common_interrupt)
+ 	interrupt do_IRQ
+ 	/* 0(%rsp): oldrsp-ARGOFFSET */
+ ret_from_intr:
+-	cli	
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	decl %gs:pda_irqcount
+ 	leaveq
+@@ -556,13 +569,13 @@ retint_swapgs:		/* return to user-space */
+ 	/*
+ 	 * The iretq could re-enable interrupts:
+ 	 */
+-	cli
++	DISABLE_INTERRUPTS(CLBR_ANY)
+ 	TRACE_IRQS_IRETQ
+-	swapgs 
++	SWAPGS
+ 	jmp restore_args
+ 
+ retint_restore_args:	/* return to kernel space */
+-	cli
++	DISABLE_INTERRUPTS(CLBR_ANY)
+ 	/*
+ 	 * The iretq could re-enable interrupts:
+ 	 */
+@@ -570,10 +583,14 @@ retint_restore_args:	/* return to kernel space */
+ restore_args:
+ 	RESTORE_ARGS 0,8,0						
+ iret_label:	
++#ifdef CONFIG_PARAVIRT
++	INTERRUPT_RETURN
++#endif
++ENTRY(native_iret)
+ 	iretq
+ 
+ 	.section __ex_table,"a"
+-	.quad iret_label,bad_iret	
++	.quad native_iret, bad_iret
+ 	.previous
+ 	.section .fixup,"ax"
+ 	/* force a signal here? this matches i386 behaviour */
+@@ -581,39 +598,39 @@ iret_label:
+ bad_iret:
+ 	movq $11,%rdi	/* SIGSEGV */
+ 	TRACE_IRQS_ON
+-	sti
+-	jmp do_exit			
+-	.previous	
+-	
++	ENABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
++	jmp do_exit
++	.previous
 +
- 	/* Do the last things and invoke protected mode */
- 	go_to_protected_mode();
+ 	/* edi: workmask, edx: work */
+ retint_careful:
+ 	CFI_RESTORE_STATE
+ 	bt    $TIF_NEED_RESCHED,%edx
+ 	jnc   retint_signal
+ 	TRACE_IRQS_ON
+-	sti
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	pushq %rdi
+ 	CFI_ADJUST_CFA_OFFSET	8
+ 	call  schedule
+ 	popq %rdi		
+ 	CFI_ADJUST_CFA_OFFSET	-8
+ 	GET_THREAD_INFO(%rcx)
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	jmp retint_check
+ 	
+ retint_signal:
+-	testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
++	testl $_TIF_DO_NOTIFY_MASK,%edx
+ 	jz    retint_swapgs
+ 	TRACE_IRQS_ON
+-	sti
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	SAVE_REST
+ 	movq $-1,ORIG_RAX(%rsp) 			
+ 	xorl %esi,%esi		# oldset
+ 	movq %rsp,%rdi		# &pt_regs
+ 	call do_notify_resume
+ 	RESTORE_REST
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	movl $_TIF_NEED_RESCHED,%edi
+ 	GET_THREAD_INFO(%rcx)
+@@ -731,7 +748,7 @@ END(spurious_interrupt)
+ 	rdmsr
+ 	testl %edx,%edx
+ 	js    1f
+-	swapgs
++	SWAPGS
+ 	xorl  %ebx,%ebx
+ 1:
+ 	.if \ist
+@@ -747,7 +764,7 @@ END(spurious_interrupt)
+ 	.if \ist
+ 	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ 	.endif
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	.if \irqtrace
+ 	TRACE_IRQS_OFF
+ 	.endif
+@@ -776,10 +793,10 @@ paranoid_swapgs\trace:
+ 	.if \trace
+ 	TRACE_IRQS_IRETQ 0
+ 	.endif
+-	swapgs
++	SWAPGS_UNSAFE_STACK
+ paranoid_restore\trace:
+ 	RESTORE_ALL 8
+-	iretq
++	INTERRUPT_RETURN
+ paranoid_userspace\trace:
+ 	GET_THREAD_INFO(%rcx)
+ 	movl threadinfo_flags(%rcx),%ebx
+@@ -794,11 +811,11 @@ paranoid_userspace\trace:
+ 	.if \trace
+ 	TRACE_IRQS_ON
+ 	.endif
+-	sti
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	xorl %esi,%esi 			/* arg2: oldset */
+ 	movq %rsp,%rdi 			/* arg1: &pt_regs */
+ 	call do_notify_resume
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	.if \trace
+ 	TRACE_IRQS_OFF
+ 	.endif
+@@ -807,9 +824,9 @@ paranoid_schedule\trace:
+ 	.if \trace
+ 	TRACE_IRQS_ON
+ 	.endif
+-	sti
++	ENABLE_INTERRUPTS(CLBR_ANY)
+ 	call schedule
+-	cli
++	DISABLE_INTERRUPTS(CLBR_ANY)
+ 	.if \trace
+ 	TRACE_IRQS_OFF
+ 	.endif
+@@ -862,7 +879,7 @@ KPROBE_ENTRY(error_entry)
+ 	testl $3,CS(%rsp)
+ 	je  error_kernelspace
+ error_swapgs:	
+-	swapgs
++	SWAPGS
+ error_sti:	
+ 	movq %rdi,RDI(%rsp) 	
+ 	CFI_REL_OFFSET	rdi,RDI
+@@ -874,7 +891,7 @@ error_sti:
+ error_exit:
+ 	movl %ebx,%eax
+ 	RESTORE_REST
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	GET_THREAD_INFO(%rcx)	
+ 	testl %eax,%eax
+@@ -911,12 +928,12 @@ ENTRY(load_gs_index)
+ 	CFI_STARTPROC
+ 	pushf
+ 	CFI_ADJUST_CFA_OFFSET 8
+-	cli
+-        swapgs
++	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
++        SWAPGS
+ gs_change:     
+         movl %edi,%gs   
+ 2:	mfence		/* workaround */
+-	swapgs
++	SWAPGS
+         popf
+ 	CFI_ADJUST_CFA_OFFSET -8
+         ret
+@@ -930,7 +947,7 @@ ENDPROC(load_gs_index)
+         .section .fixup,"ax"
+ 	/* running with kernelgs */
+ bad_gs: 
+-	swapgs			/* switch back to user gs */
++	SWAPGS			/* switch back to user gs */
+ 	xorl %eax,%eax
+         movl %eax,%gs
+         jmp  2b
+diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
+index ce703e2..4ae7b64 100644
+--- a/arch/x86/kernel/genapic_64.c
++++ b/arch/x86/kernel/genapic_64.c
+@@ -24,18 +24,11 @@
+ #include <acpi/acpi_bus.h>
+ #endif
+ 
+-/*
+- * which logical CPU number maps to which CPU (physical APIC ID)
+- *
+- * The following static array is used during kernel startup
+- * and the x86_cpu_to_apicid_ptr contains the address of the
+- * array during this time.  Is it zeroed when the per_cpu
+- * data area is removed.
+- */
+-u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata
++/* which logical CPU number maps to which CPU (physical APIC ID) */
++u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata
+ 					= { [0 ... NR_CPUS-1] = BAD_APICID };
+-void *x86_cpu_to_apicid_ptr;
+-DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
++void *x86_cpu_to_apicid_early_ptr;
++DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
+ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
+ 
+ struct genapic __read_mostly *genapic = &apic_flat;
+diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c
+index f12d8c5..9c7f7d3 100644
+--- a/arch/x86/kernel/geode_32.c
++++ b/arch/x86/kernel/geode_32.c
+@@ -1,6 +1,7 @@
+ /*
+  * AMD Geode southbridge support code
+  * Copyright (C) 2006, Advanced Micro Devices, Inc.
++ * Copyright (C) 2007, Andres Salomon <dilinger at debian.org>
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of version 2 of the GNU General Public License
+@@ -51,45 +52,62 @@ EXPORT_SYMBOL_GPL(geode_get_dev_base);
+ 
+ /* === GPIO API === */
+ 
+-void geode_gpio_set(unsigned int gpio, unsigned int reg)
++void geode_gpio_set(u32 gpio, unsigned int reg)
+ {
+ 	u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
+ 
+ 	if (!base)
+ 		return;
+ 
+-	if (gpio < 16)
+-		outl(1 << gpio, base + reg);
+-	else
+-		outl(1 << (gpio - 16), base + 0x80 + reg);
++	/* low bank register */
++	if (gpio & 0xFFFF)
++		outl(gpio & 0xFFFF, base + reg);
++	/* high bank register */
++	gpio >>= 16;
++	if (gpio)
++		outl(gpio, base + 0x80 + reg);
  }
-diff --git a/arch/x86/boot/pm.c b/arch/x86/boot/pm.c
-index 09fb342..1a0f936 100644
---- a/arch/x86/boot/pm.c
-+++ b/arch/x86/boot/pm.c
-@@ -104,7 +104,7 @@ static void reset_coprocessor(void)
- 	(((u64)(base & 0xff000000) << 32) |	\
- 	 ((u64)flags << 40) |			\
- 	 ((u64)(limit & 0x00ff0000) << 32) |	\
--	 ((u64)(base & 0x00ffff00) << 16) |	\
-+	 ((u64)(base & 0x00ffffff) << 16) |	\
- 	 ((u64)(limit & 0x0000ffff)))
+ EXPORT_SYMBOL_GPL(geode_gpio_set);
  
- struct gdt_ptr {
-@@ -121,6 +121,10 @@ static void setup_gdt(void)
- 		[GDT_ENTRY_BOOT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff),
- 		/* DS: data, read/write, 4 GB, base 0 */
- 		[GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff),
-+		/* TSS: 32-bit tss, 104 bytes, base 4096 */
-+		/* We only have a TSS here to keep Intel VT happy;
-+		   we don't actually use it for anything. */
-+		[GDT_ENTRY_BOOT_TSS] = GDT_ENTRY(0x0089, 4096, 103),
- 	};
- 	/* Xen HVM incorrectly stores a pointer to the gdt_ptr, instead
- 	   of the gdt_ptr contents.  Thus, make it static so it will
-diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
-index fa6bed1..f5402d5 100644
---- a/arch/x86/boot/pmjump.S
-+++ b/arch/x86/boot/pmjump.S
-@@ -15,6 +15,7 @@
-  */
+-void geode_gpio_clear(unsigned int gpio, unsigned int reg)
++void geode_gpio_clear(u32 gpio, unsigned int reg)
+ {
+ 	u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
  
- #include <asm/boot.h>
-+#include <asm/processor-flags.h>
- #include <asm/segment.h>
+ 	if (!base)
+ 		return;
  
- 	.text
-@@ -29,28 +30,55 @@
-  */
- protected_mode_jump:
- 	movl	%edx, %esi		# Pointer to boot_params table
--	movl	%eax, 2f		# Patch ljmpl instruction
-+
-+	xorl	%ebx, %ebx
-+	movw	%cs, %bx
-+	shll	$4, %ebx
-+	addl	%ebx, 2f
+-	if (gpio < 16)
+-		outl(1 << (gpio + 16), base + reg);
+-	else
+-		outl(1 << gpio, base + 0x80 + reg);
++	/* low bank register */
++	if (gpio & 0xFFFF)
++		outl((gpio & 0xFFFF) << 16, base + reg);
++	/* high bank register */
++	gpio &= (0xFFFF << 16);
++	if (gpio)
++		outl(gpio, base + 0x80 + reg);
+ }
+ EXPORT_SYMBOL_GPL(geode_gpio_clear);
  
- 	movw	$__BOOT_DS, %cx
--	xorl	%ebx, %ebx		# Per the 32-bit boot protocol
--	xorl	%ebp, %ebp		# Per the 32-bit boot protocol
--	xorl	%edi, %edi		# Per the 32-bit boot protocol
-+	movw	$__BOOT_TSS, %di
+-int geode_gpio_isset(unsigned int gpio, unsigned int reg)
++int geode_gpio_isset(u32 gpio, unsigned int reg)
+ {
+ 	u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
++	u32 val;
  
- 	movl	%cr0, %edx
--	orb	$1, %dl			# Protected mode (PE) bit
-+	orb	$X86_CR0_PE, %dl	# Protected mode
- 	movl	%edx, %cr0
- 	jmp	1f			# Short jump to serialize on 386/486
- 1:
+ 	if (!base)
+ 		return 0;
  
--	movw	%cx, %ds
--	movw	%cx, %es
--	movw	%cx, %fs
--	movw	%cx, %gs
--	movw	%cx, %ss
--
--	# Jump to the 32-bit entrypoint
-+	# Transition to 32-bit mode
- 	.byte	0x66, 0xea		# ljmpl opcode
--2:	.long	0			# offset
-+2:	.long	in_pm32			# offset
- 	.word	__BOOT_CS		# segment
+-	if (gpio < 16)
+-		return (inl(base + reg) & (1 << gpio)) ? 1 : 0;
+-	else
+-		return (inl(base + 0x80 + reg) & (1 << (gpio - 16))) ? 1 : 0;
++	/* low bank register */
++	if (gpio & 0xFFFF) {
++		val = inl(base + reg) & (gpio & 0xFFFF);
++		if ((gpio & 0xFFFF) == val)
++			return 1;
++	}
++	/* high bank register */
++	gpio >>= 16;
++	if (gpio) {
++		val = inl(base + 0x80 + reg) & gpio;
++		if (gpio == val)
++			return 1;
++	}
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(geode_gpio_isset);
  
- 	.size	protected_mode_jump, .-protected_mode_jump
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 6b34693..a317336 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -10,6 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/percpu.h>
++#include <linux/start_kernel.h>
+ 
+ #include <asm/processor.h>
+ #include <asm/proto.h>
+@@ -19,12 +20,14 @@
+ #include <asm/pgtable.h>
+ #include <asm/tlbflush.h>
+ #include <asm/sections.h>
++#include <asm/kdebug.h>
++#include <asm/e820.h>
+ 
+ static void __init zap_identity_mappings(void)
+ {
+ 	pgd_t *pgd = pgd_offset_k(0UL);
+ 	pgd_clear(pgd);
+-	__flush_tlb();
++	__flush_tlb_all();
+ }
+ 
+ /* Don't add a printk in there. printk relies on the PDA which is not initialized 
+@@ -46,6 +49,35 @@ static void __init copy_bootdata(char *real_mode_data)
+ 	}
+ }
+ 
++#define EBDA_ADDR_POINTER 0x40E
 +
-+	.code32
-+	.type	in_pm32, @function
-+in_pm32:
-+	# Set up data segments for flat 32-bit mode
-+	movl	%ecx, %ds
-+	movl	%ecx, %es
-+	movl	%ecx, %fs
-+	movl	%ecx, %gs
-+	movl	%ecx, %ss
-+	# The 32-bit code sets up its own stack, but this way we do have
-+	# a valid stack if some debugging hack wants to use it.
-+	addl	%ebx, %esp
++static __init void reserve_ebda(void)
++{
++	unsigned ebda_addr, ebda_size;
 +
-+	# Set up TR to make Intel VT happy
-+	ltr	%di
++	/*
++	 * there is a real-mode segmented pointer pointing to the
++	 * 4K EBDA area at 0x40E
++	 */
++	ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
++	ebda_addr <<= 4;
 +
-+	# Clear registers to allow for future extensions to the
-+	# 32-bit boot protocol
-+	xorl	%ecx, %ecx
-+	xorl	%edx, %edx
-+	xorl	%ebx, %ebx
-+	xorl	%ebp, %ebp
-+	xorl	%edi, %edi
++	if (!ebda_addr)
++		return;
 +
-+	# Set up LDTR to make Intel VT happy
-+	lldt	%cx
++	ebda_size = *(unsigned short *)__va(ebda_addr);
 +
-+	jmpl	*%eax			# Jump to the 32-bit entrypoint
++	/* Round EBDA up to pages */
++	if (ebda_size == 0)
++		ebda_size = 1;
++	ebda_size <<= 10;
++	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
++	if (ebda_size > 64*1024)
++		ebda_size = 64*1024;
 +
-+	.size	in_pm32, .-in_pm32
-diff --git a/arch/x86/boot/video-bios.c b/arch/x86/boot/video-bios.c
-index ed0672a..ff664a1 100644
---- a/arch/x86/boot/video-bios.c
-+++ b/arch/x86/boot/video-bios.c
-@@ -104,6 +104,7 @@ static int bios_probe(void)
++	reserve_early(ebda_addr, ebda_addr + ebda_size);
++}
++
+ void __init x86_64_start_kernel(char * real_mode_data)
+ {
+ 	int i;
+@@ -56,8 +88,13 @@ void __init x86_64_start_kernel(char * real_mode_data)
+ 	/* Make NULL pointers segfault */
+ 	zap_identity_mappings();
  
- 		mi = GET_HEAP(struct mode_info, 1);
- 		mi->mode = VIDEO_FIRST_BIOS+mode;
-+		mi->depth = 0;	/* text */
- 		mi->x = rdfs16(0x44a);
- 		mi->y = rdfs8(0x484)+1;
- 		nmodes++;
-@@ -116,7 +117,7 @@ static int bios_probe(void)
+-	for (i = 0; i < IDT_ENTRIES; i++)
++	for (i = 0; i < IDT_ENTRIES; i++) {
++#ifdef CONFIG_EARLY_PRINTK
++		set_intr_gate(i, &early_idt_handlers[i]);
++#else
+ 		set_intr_gate(i, early_idt_handler);
++#endif
++	}
+ 	load_idt((const struct desc_ptr *)&idt_descr);
  
- __videocard video_bios =
- {
--	.card_name	= "BIOS (scanned)",
-+	.card_name	= "BIOS",
- 	.probe		= bios_probe,
- 	.set_mode	= bios_set_mode,
- 	.unsafe		= 1,
-diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
-index 4716b9a..662dd2f 100644
---- a/arch/x86/boot/video-vesa.c
-+++ b/arch/x86/boot/video-vesa.c
-@@ -79,20 +79,28 @@ static int vesa_probe(void)
- 			/* Text Mode, TTY BIOS supported,
- 			   supported by hardware */
- 			mi = GET_HEAP(struct mode_info, 1);
--			mi->mode = mode + VIDEO_FIRST_VESA;
--			mi->x    = vminfo.h_res;
--			mi->y    = vminfo.v_res;
-+			mi->mode  = mode + VIDEO_FIRST_VESA;
-+			mi->depth = 0; /* text */
-+			mi->x     = vminfo.h_res;
-+			mi->y     = vminfo.v_res;
- 			nmodes++;
--		} else if ((vminfo.mode_attr & 0x99) == 0x99) {
-+		} else if ((vminfo.mode_attr & 0x99) == 0x99 &&
-+			   (vminfo.memory_layout == 4 ||
-+			    vminfo.memory_layout == 6) &&
-+			   vminfo.memory_planes == 1) {
- #ifdef CONFIG_FB
- 			/* Graphics mode, color, linear frame buffer
--			   supported -- register the mode but hide from
--			   the menu.  Only do this if framebuffer is
--			   configured, however, otherwise the user will
--			   be left without a screen. */
-+			   supported.  Only register the mode if
-+			   if framebuffer is configured, however,
-+			   otherwise the user will be left without a screen.
-+			   We don't require CONFIG_FB_VESA, however, since
-+			   some of the other framebuffer drivers can use
-+			   this mode-setting, too. */
- 			mi = GET_HEAP(struct mode_info, 1);
- 			mi->mode = mode + VIDEO_FIRST_VESA;
--			mi->x = mi->y = 0;
-+			mi->depth = vminfo.bpp;
-+			mi->x = vminfo.h_res;
-+			mi->y = vminfo.v_res;
- 			nmodes++;
- #endif
- 		}
-diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
-index aef02f9..7259387 100644
---- a/arch/x86/boot/video-vga.c
-+++ b/arch/x86/boot/video-vga.c
-@@ -18,22 +18,22 @@
- #include "video.h"
+ 	early_printk("Kernel alive\n");
+@@ -67,8 +104,24 @@ void __init x86_64_start_kernel(char * real_mode_data)
  
- static struct mode_info vga_modes[] = {
--	{ VIDEO_80x25,  80, 25 },
--	{ VIDEO_8POINT, 80, 50 },
--	{ VIDEO_80x43,  80, 43 },
--	{ VIDEO_80x28,  80, 28 },
--	{ VIDEO_80x30,  80, 30 },
--	{ VIDEO_80x34,  80, 34 },
--	{ VIDEO_80x60,  80, 60 },
-+	{ VIDEO_80x25,  80, 25, 0 },
-+	{ VIDEO_8POINT, 80, 50, 0 },
-+	{ VIDEO_80x43,  80, 43, 0 },
-+	{ VIDEO_80x28,  80, 28, 0 },
-+	{ VIDEO_80x30,  80, 30, 0 },
-+	{ VIDEO_80x34,  80, 34, 0 },
-+	{ VIDEO_80x60,  80, 60, 0 },
- };
+ 	pda_init(0);
+ 	copy_bootdata(__va(real_mode_data));
+-#ifdef CONFIG_SMP
+-	cpu_set(0, cpu_online_map);
+-#endif
++
++	reserve_early(__pa_symbol(&_text), __pa_symbol(&_end));
++
++	/* Reserve INITRD */
++	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
++		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
++		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
++		unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
++		reserve_early(ramdisk_image, ramdisk_end);
++	}
++
++	reserve_ebda();
++
++	/*
++	 * At this point everything still needed from the boot loader
++	 * or BIOS or kernel text should be early reserved or marked not
++	 * RAM in e820. All other memory is free game.
++	 */
++
+ 	start_kernel();
+ }
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index fbad51f..5d8c573 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -9,6 +9,7 @@
  
- static struct mode_info ega_modes[] = {
--	{ VIDEO_80x25,  80, 25 },
--	{ VIDEO_8POINT, 80, 43 },
-+	{ VIDEO_80x25,  80, 25, 0 },
-+	{ VIDEO_8POINT, 80, 43, 0 },
- };
+ .text
+ #include <linux/threads.h>
++#include <linux/init.h>
+ #include <linux/linkage.h>
+ #include <asm/segment.h>
+ #include <asm/page.h>
+@@ -151,7 +152,9 @@ WEAK(xen_entry)
+ 	/* Unknown implementation; there's really
+ 	   nothing we can do at this point. */
+ 	ud2a
+-.data
++
++	__INITDATA
++
+ subarch_entries:
+ 	.long default_entry		/* normal x86/PC */
+ 	.long lguest_entry		/* lguest hypervisor */
+@@ -199,7 +202,6 @@ default_entry:
+ 	addl $0x67, %eax			/* 0x67 == _PAGE_TABLE */
+ 	movl %eax, 4092(%edx)
  
- static struct mode_info cga_modes[] = {
--	{ VIDEO_80x25,  80, 25 },
-+	{ VIDEO_80x25,  80, 25, 0 },
- };
+-	xorl %ebx,%ebx				/* This is the boot CPU (BSP) */
+ 	jmp 3f
+ /*
+  * Non-boot CPU entry point; entered from trampoline.S
+@@ -222,6 +224,8 @@ ENTRY(startup_32_smp)
+ 	movl %eax,%es
+ 	movl %eax,%fs
+ 	movl %eax,%gs
++#endif /* CONFIG_SMP */
++3:
  
- __videocard video_vga;
-diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
-index ad9712f..696d08f 100644
---- a/arch/x86/boot/video.c
-+++ b/arch/x86/boot/video.c
-@@ -293,13 +293,28 @@ static void display_menu(void)
- 	struct mode_info *mi;
- 	char ch;
- 	int i;
-+	int nmodes;
-+	int modes_per_line;
-+	int col;
+ /*
+  *	New page tables may be in 4Mbyte page mode and may
+@@ -268,12 +272,6 @@ ENTRY(startup_32_smp)
+ 	wrmsr
  
--	puts("Mode:    COLSxROWS:\n");
-+	nmodes = 0;
-+	for (card = video_cards; card < video_cards_end; card++)
-+		nmodes += card->nmodes;
+ 6:
+-	/* This is a secondary processor (AP) */
+-	xorl %ebx,%ebx
+-	incl %ebx
+-
+-#endif /* CONFIG_SMP */
+-3:
  
-+	modes_per_line = 1;
-+	if (nmodes >= 20)
-+		modes_per_line = 3;
-+
-+	for (col = 0; col < modes_per_line; col++)
-+		puts("Mode: Resolution:  Type: ");
-+	putchar('\n');
+ /*
+  * Enable paging
+@@ -297,7 +295,7 @@ ENTRY(startup_32_smp)
+ 	popfl
+ 
+ #ifdef CONFIG_SMP
+-	andl %ebx,%ebx
++	cmpb $0, ready
+ 	jz  1f				/* Initial CPU cleans BSS */
+ 	jmp checkCPUtype
+ 1:
+@@ -502,6 +500,7 @@ early_fault:
+ 	call printk
+ #endif
+ #endif
++	call dump_stack
+ hlt_loop:
+ 	hlt
+ 	jmp hlt_loop
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index b6167fe..1d5a7a3 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -19,6 +19,13 @@
+ #include <asm/msr.h>
+ #include <asm/cache.h>
+ 
++#ifdef CONFIG_PARAVIRT
++#include <asm/asm-offsets.h>
++#include <asm/paravirt.h>
++#else
++#define GET_CR2_INTO_RCX movq %cr2, %rcx
++#endif
 +
-+	col = 0;
- 	ch = '0';
- 	for (card = video_cards; card < video_cards_end; card++) {
- 		mi = card->modes;
- 		for (i = 0; i < card->nmodes; i++, mi++) {
-+			char resbuf[32];
- 			int visible = mi->x && mi->y;
- 			u16 mode_id = mi->mode ? mi->mode :
- 				(mi->y << 8)+mi->x;
-@@ -307,8 +322,18 @@ static void display_menu(void)
- 			if (!visible)
- 				continue; /* Hidden mode */
+ /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
+  * because we need identity-mapped pages.
+  *
+@@ -260,14 +267,43 @@ init_rsp:
+ bad_address:
+ 	jmp bad_address
  
--			printf("%c  %04X  %3dx%-3d  %s\n",
--			       ch, mode_id, mi->x, mi->y, card->card_name);
-+			if (mi->depth)
-+				sprintf(resbuf, "%dx%d", mi->y, mi->depth);
-+			else
-+				sprintf(resbuf, "%d", mi->y);
++#ifdef CONFIG_EARLY_PRINTK
++.macro early_idt_tramp first, last
++	.ifgt \last-\first
++	early_idt_tramp \first, \last-1
++	.endif
++	movl $\last,%esi
++	jmp early_idt_handler
++.endm
 +
-+			printf("%c %03X %4dx%-7s %-6s",
-+			       ch, mode_id, mi->x, resbuf, card->card_name);
-+			col++;
-+			if (col >= modes_per_line) {
-+				putchar('\n');
-+				col = 0;
-+			}
++	.globl early_idt_handlers
++early_idt_handlers:
++	early_idt_tramp 0, 63
++	early_idt_tramp 64, 127
++	early_idt_tramp 128, 191
++	early_idt_tramp 192, 255
++#endif
++
+ ENTRY(early_idt_handler)
++#ifdef CONFIG_EARLY_PRINTK
+ 	cmpl $2,early_recursion_flag(%rip)
+ 	jz  1f
+ 	incl early_recursion_flag(%rip)
++	GET_CR2_INTO_RCX
++	movq %rcx,%r9
++	xorl %r8d,%r8d		# zero for error code
++	movl %esi,%ecx		# get vector number
++	# Test %ecx against mask of vectors that push error code.
++	cmpl $31,%ecx
++	ja 0f
++	movl $1,%eax
++	salq %cl,%rax
++	testl $0x27d00,%eax
++	je 0f
++	popq %r8		# get error code
++0:	movq 0(%rsp),%rcx	# get ip
++	movq 8(%rsp),%rdx	# get cs
+ 	xorl %eax,%eax
+-	movq 8(%rsp),%rsi	# get rip
+-	movq (%rsp),%rdx
+-	movq %cr2,%rcx
+ 	leaq early_idt_msg(%rip),%rdi
+ 	call early_printk
+ 	cmpl $2,early_recursion_flag(%rip)
+@@ -278,15 +314,19 @@ ENTRY(early_idt_handler)
+ 	movq 8(%rsp),%rsi	# get rip again
+ 	call __print_symbol
+ #endif
++#endif /* EARLY_PRINTK */
+ 1:	hlt
+ 	jmp 1b
++
++#ifdef CONFIG_EARLY_PRINTK
+ early_recursion_flag:
+ 	.long 0
  
- 			if (ch == '9')
- 				ch = 'a';
-@@ -318,6 +343,8 @@ static void display_menu(void)
- 				ch++;
- 		}
- 	}
-+	if (col)
-+		putchar('\n');
- }
+ early_idt_msg:
+-	.asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
++	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
+ early_idt_ripmsg:
+ 	.asciz "RIP %s\n"
++#endif /* CONFIG_EARLY_PRINTK */
  
- #define H(x)	((x)-'a'+10)
-diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h
-index b92447d..d69347f 100644
---- a/arch/x86/boot/video.h
-+++ b/arch/x86/boot/video.h
-@@ -83,7 +83,8 @@ void store_screen(void);
+ .balign PAGE_SIZE
  
- struct mode_info {
- 	u16 mode;		/* Mode number (vga= style) */
--	u8  x, y;		/* Width, height */
-+	u16 x, y;		/* Width, height */
-+	u16 depth;		/* Bits per pixel, 0 for text mode */
- };
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 2f99ee2..429d084 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -6,7 +6,6 @@
+ #include <linux/init.h>
+ #include <linux/sysdev.h>
+ #include <linux/pm.h>
+-#include <linux/delay.h>
  
- struct card_info {
-diff --git a/arch/x86/boot/voyager.c b/arch/x86/boot/voyager.c
-index 61c8fe0..6499e32 100644
---- a/arch/x86/boot/voyager.c
-+++ b/arch/x86/boot/voyager.c
-@@ -16,8 +16,6 @@
+ #include <asm/fixmap.h>
+ #include <asm/hpet.h>
+@@ -16,7 +15,8 @@
+ #define HPET_MASK	CLOCKSOURCE_MASK(32)
+ #define HPET_SHIFT	22
  
- #include "boot.h"
+-/* FSEC = 10^-15 NSEC = 10^-9 */
++/* FSEC = 10^-15
++   NSEC = 10^-9 */
+ #define FSEC_PER_NSEC	1000000
  
--#ifdef CONFIG_X86_VOYAGER
--
- int query_voyager(void)
+ /*
+@@ -107,6 +107,7 @@ int is_hpet_enabled(void)
  {
- 	u8 err;
-@@ -42,5 +40,3 @@ int query_voyager(void)
- 	copy_from_fs(data_ptr, di, 7);	/* Table is 7 bytes apparently */
- 	return 0;
+ 	return is_hpet_capable() && hpet_legacy_int_enabled;
  }
--
--#endif /* CONFIG_X86_VOYAGER */
-diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
-index 54ee176..77562e7 100644
---- a/arch/x86/configs/i386_defconfig
-+++ b/arch/x86/configs/i386_defconfig
-@@ -99,9 +99,9 @@ CONFIG_IOSCHED_NOOP=y
- CONFIG_IOSCHED_AS=y
- CONFIG_IOSCHED_DEADLINE=y
- CONFIG_IOSCHED_CFQ=y
--CONFIG_DEFAULT_AS=y
-+# CONFIG_DEFAULT_AS is not set
- # CONFIG_DEFAULT_DEADLINE is not set
--# CONFIG_DEFAULT_CFQ is not set
-+CONFIG_DEFAULT_CFQ=y
- # CONFIG_DEFAULT_NOOP is not set
- CONFIG_DEFAULT_IOSCHED="anticipatory"
- 
-diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
-index 38a83f9..9e2b0ef 100644
---- a/arch/x86/configs/x86_64_defconfig
-+++ b/arch/x86/configs/x86_64_defconfig
-@@ -145,15 +145,6 @@ CONFIG_K8_NUMA=y
- CONFIG_NODES_SHIFT=6
- CONFIG_X86_64_ACPI_NUMA=y
- CONFIG_NUMA_EMU=y
--CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
--CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
--CONFIG_ARCH_SPARSEMEM_ENABLE=y
--CONFIG_SELECT_MEMORY_MODEL=y
--# CONFIG_FLATMEM_MANUAL is not set
--CONFIG_DISCONTIGMEM_MANUAL=y
--# CONFIG_SPARSEMEM_MANUAL is not set
--CONFIG_DISCONTIGMEM=y
--CONFIG_FLAT_NODE_MEM_MAP=y
- CONFIG_NEED_MULTIPLE_NODES=y
- # CONFIG_SPARSEMEM_STATIC is not set
- CONFIG_SPLIT_PTLOCK_CPUS=4
-diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
-index 46bb609..3874c2d 100644
---- a/arch/x86/crypto/Makefile
-+++ b/arch/x86/crypto/Makefile
-@@ -4,12 +4,16 @@
- 
- obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
- obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
-+obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
++EXPORT_SYMBOL_GPL(is_hpet_enabled);
  
- obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
- obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
-+obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+ /*
+  * When the hpet driver (/dev/hpet) is enabled, we need to reserve
+@@ -132,16 +133,13 @@ static void hpet_reserve_platform_timers(unsigned long id)
+ #ifdef CONFIG_HPET_EMULATE_RTC
+ 	hpet_reserve_timer(&hd, 1);
+ #endif
+-
+ 	hd.hd_irq[0] = HPET_LEGACY_8254;
+ 	hd.hd_irq[1] = HPET_LEGACY_RTC;
  
--aes-i586-y := aes-i586-asm_32.o aes_32.o
--twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
-+aes-i586-y := aes-i586-asm_32.o aes_glue.o
-+twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
-+salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
+-	for (i = 2; i < nrtimers; timer++, i++)
+-		hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
+-			Tn_INT_ROUTE_CNF_SHIFT;
+-
++       for (i = 2; i < nrtimers; timer++, i++)
++	       hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
++		       Tn_INT_ROUTE_CNF_SHIFT;
+ 	hpet_alloc(&hd);
+-
+ }
+ #else
+ static void hpet_reserve_platform_timers(unsigned long id) { }
+@@ -478,6 +476,7 @@ void hpet_disable(void)
+  */
+ #include <linux/mc146818rtc.h>
+ #include <linux/rtc.h>
++#include <asm/rtc.h>
  
--aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
--twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
-+aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
-+twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
-+salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
-diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
-index f942f0c..1093bed 100644
---- a/arch/x86/crypto/aes-i586-asm_32.S
-+++ b/arch/x86/crypto/aes-i586-asm_32.S
-@@ -46,9 +46,9 @@
- #define in_blk 16
+ #define DEFAULT_RTC_INT_FREQ	64
+ #define DEFAULT_RTC_SHIFT	6
+@@ -492,6 +491,38 @@ static unsigned long hpet_default_delta;
+ static unsigned long hpet_pie_delta;
+ static unsigned long hpet_pie_limit;
  
- /* offsets in crypto_tfm structure */
--#define ekey (crypto_tfm_ctx_offset + 0)
--#define nrnd (crypto_tfm_ctx_offset + 256)
--#define dkey (crypto_tfm_ctx_offset + 260)
-+#define klen (crypto_tfm_ctx_offset + 0)
-+#define ekey (crypto_tfm_ctx_offset + 4)
-+#define dkey (crypto_tfm_ctx_offset + 244)
++static rtc_irq_handler irq_handler;
++
++/*
++ * Registers a IRQ handler.
++ */
++int hpet_register_irq_handler(rtc_irq_handler handler)
++{
++	if (!is_hpet_enabled())
++		return -ENODEV;
++	if (irq_handler)
++		return -EBUSY;
++
++	irq_handler = handler;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
++
++/*
++ * Deregisters the IRQ handler registered with hpet_register_irq_handler()
++ * and does cleanup.
++ */
++void hpet_unregister_irq_handler(rtc_irq_handler handler)
++{
++	if (!is_hpet_enabled())
++		return;
++
++	irq_handler = NULL;
++	hpet_rtc_flags = 0;
++}
++EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
++
+ /*
+  * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
+  * is not supported by all HPET implementations for timer 1.
+@@ -533,6 +564,7 @@ int hpet_rtc_timer_init(void)
  
- // register mapping for encrypt and decrypt subroutines
+ 	return 1;
+ }
++EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
  
-@@ -221,8 +221,8 @@
+ /*
+  * The functions below are called from rtc driver.
+@@ -547,6 +579,7 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
+ 	hpet_rtc_flags &= ~bit_mask;
+ 	return 1;
+ }
++EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
  
- .global  aes_enc_blk
+ int hpet_set_rtc_irq_bit(unsigned long bit_mask)
+ {
+@@ -562,6 +595,7 @@ int hpet_set_rtc_irq_bit(unsigned long bit_mask)
  
--.extern  ft_tab
--.extern  fl_tab
-+.extern  crypto_ft_tab
-+.extern  crypto_fl_tab
+ 	return 1;
+ }
++EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
  
- .align 4
+ int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
+ 			unsigned char sec)
+@@ -575,6 +609,7 @@ int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
  
-@@ -236,7 +236,7 @@ aes_enc_blk:
- 1:	push    %ebx
- 	mov     in_blk+4(%esp),%r2
- 	push    %esi
--	mov     nrnd(%ebp),%r3   // number of rounds
-+	mov     klen(%ebp),%r3   // key size
- 	push    %edi
- #if ekey != 0
- 	lea     ekey(%ebp),%ebp  // key pointer
-@@ -255,26 +255,26 @@ aes_enc_blk:
+ 	return 1;
+ }
++EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
  
- 	sub     $8,%esp		// space for register saves on stack
- 	add     $16,%ebp	// increment to next round key
--	cmp     $12,%r3
-+	cmp     $24,%r3
- 	jb      4f		// 10 rounds for 128-bit key
- 	lea     32(%ebp),%ebp
- 	je      3f		// 12 rounds for 192-bit key
- 	lea     32(%ebp),%ebp
+ int hpet_set_periodic_freq(unsigned long freq)
+ {
+@@ -593,11 +628,13 @@ int hpet_set_periodic_freq(unsigned long freq)
+ 	}
+ 	return 1;
+ }
++EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
  
--2:	fwd_rnd1( -64(%ebp) ,ft_tab)	// 14 rounds for 256-bit key
--	fwd_rnd2( -48(%ebp) ,ft_tab)
--3:	fwd_rnd1( -32(%ebp) ,ft_tab)	// 12 rounds for 192-bit key
--	fwd_rnd2( -16(%ebp) ,ft_tab)
--4:	fwd_rnd1(    (%ebp) ,ft_tab)	// 10 rounds for 128-bit key
--	fwd_rnd2( +16(%ebp) ,ft_tab)
--	fwd_rnd1( +32(%ebp) ,ft_tab)
--	fwd_rnd2( +48(%ebp) ,ft_tab)
--	fwd_rnd1( +64(%ebp) ,ft_tab)
--	fwd_rnd2( +80(%ebp) ,ft_tab)
--	fwd_rnd1( +96(%ebp) ,ft_tab)
--	fwd_rnd2(+112(%ebp) ,ft_tab)
--	fwd_rnd1(+128(%ebp) ,ft_tab)
--	fwd_rnd2(+144(%ebp) ,fl_tab)	// last round uses a different table
-+2:	fwd_rnd1( -64(%ebp), crypto_ft_tab)	// 14 rounds for 256-bit key
-+	fwd_rnd2( -48(%ebp), crypto_ft_tab)
-+3:	fwd_rnd1( -32(%ebp), crypto_ft_tab)	// 12 rounds for 192-bit key
-+	fwd_rnd2( -16(%ebp), crypto_ft_tab)
-+4:	fwd_rnd1(    (%ebp), crypto_ft_tab)	// 10 rounds for 128-bit key
-+	fwd_rnd2( +16(%ebp), crypto_ft_tab)
-+	fwd_rnd1( +32(%ebp), crypto_ft_tab)
-+	fwd_rnd2( +48(%ebp), crypto_ft_tab)
-+	fwd_rnd1( +64(%ebp), crypto_ft_tab)
-+	fwd_rnd2( +80(%ebp), crypto_ft_tab)
-+	fwd_rnd1( +96(%ebp), crypto_ft_tab)
-+	fwd_rnd2(+112(%ebp), crypto_ft_tab)
-+	fwd_rnd1(+128(%ebp), crypto_ft_tab)
-+	fwd_rnd2(+144(%ebp), crypto_fl_tab)	// last round uses a different table
+ int hpet_rtc_dropped_irq(void)
+ {
+ 	return is_hpet_enabled();
+ }
++EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
  
- // move final values to the output array.  CAUTION: the 
- // order of these assigns rely on the register mappings
-@@ -297,8 +297,8 @@ aes_enc_blk:
+ static void hpet_rtc_timer_reinit(void)
+ {
+@@ -641,9 +678,10 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+ 	unsigned long rtc_int_flag = 0;
  
- .global  aes_dec_blk
+ 	hpet_rtc_timer_reinit();
++	memset(&curr_time, 0, sizeof(struct rtc_time));
  
--.extern  it_tab
--.extern  il_tab
-+.extern  crypto_it_tab
-+.extern  crypto_il_tab
+ 	if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
+-		rtc_get_rtc_time(&curr_time);
++		get_rtc_time(&curr_time);
  
- .align 4
+ 	if (hpet_rtc_flags & RTC_UIE &&
+ 	    curr_time.tm_sec != hpet_prev_update_sec) {
+@@ -665,8 +703,10 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
  
-@@ -312,14 +312,11 @@ aes_dec_blk:
- 1:	push    %ebx
- 	mov     in_blk+4(%esp),%r2
- 	push    %esi
--	mov     nrnd(%ebp),%r3   // number of rounds
-+	mov     klen(%ebp),%r3   // key size
- 	push    %edi
- #if dkey != 0
- 	lea     dkey(%ebp),%ebp  // key pointer
+ 	if (rtc_int_flag) {
+ 		rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
+-		rtc_interrupt(rtc_int_flag, dev_id);
++		if (irq_handler)
++			irq_handler(rtc_int_flag, dev_id);
+ 	}
+ 	return IRQ_HANDLED;
+ }
++EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
  #endif
--	mov     %r3,%r0
--	shl     $4,%r0
--	add     %r0,%ebp
- 	
- // input four columns and xor in first round key
+diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
+index 02112fc..0616278 100644
+--- a/arch/x86/kernel/i386_ksyms_32.c
++++ b/arch/x86/kernel/i386_ksyms_32.c
+@@ -22,12 +22,5 @@ EXPORT_SYMBOL(__put_user_8);
  
-@@ -333,27 +330,27 @@ aes_dec_blk:
- 	xor     12(%ebp),%r5
+ EXPORT_SYMBOL(strstr);
  
- 	sub     $8,%esp		// space for register saves on stack
--	sub     $16,%ebp	// increment to next round key
--	cmp     $12,%r3
-+	add     $16,%ebp	// increment to next round key
-+	cmp     $24,%r3
- 	jb      4f		// 10 rounds for 128-bit key
--	lea     -32(%ebp),%ebp
-+	lea     32(%ebp),%ebp
- 	je      3f		// 12 rounds for 192-bit key
--	lea     -32(%ebp),%ebp
+-#ifdef CONFIG_SMP
+-extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
+-extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
+-EXPORT_SYMBOL(__write_lock_failed);
+-EXPORT_SYMBOL(__read_lock_failed);
+-#endif
 -
--2:	inv_rnd1( +64(%ebp), it_tab)	// 14 rounds for 256-bit key
--	inv_rnd2( +48(%ebp), it_tab)
--3:	inv_rnd1( +32(%ebp), it_tab)	// 12 rounds for 192-bit key
--	inv_rnd2( +16(%ebp), it_tab)
--4:	inv_rnd1(    (%ebp), it_tab)	// 10 rounds for 128-bit key
--	inv_rnd2( -16(%ebp), it_tab)
--	inv_rnd1( -32(%ebp), it_tab)
--	inv_rnd2( -48(%ebp), it_tab)
--	inv_rnd1( -64(%ebp), it_tab)
--	inv_rnd2( -80(%ebp), it_tab)
--	inv_rnd1( -96(%ebp), it_tab)
--	inv_rnd2(-112(%ebp), it_tab)
--	inv_rnd1(-128(%ebp), it_tab)
--	inv_rnd2(-144(%ebp), il_tab)	// last round uses a different table
-+	lea     32(%ebp),%ebp
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(empty_zero_page);
+diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
+new file mode 100644
+index 0000000..26719bd
+--- /dev/null
++++ b/arch/x86/kernel/i387.c
+@@ -0,0 +1,479 @@
++/*
++ *  Copyright (C) 1994 Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *  General FPU state handling cleanups
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ */
 +
-+2:	inv_rnd1( -64(%ebp), crypto_it_tab)	// 14 rounds for 256-bit key
-+	inv_rnd2( -48(%ebp), crypto_it_tab)
-+3:	inv_rnd1( -32(%ebp), crypto_it_tab)	// 12 rounds for 192-bit key
-+	inv_rnd2( -16(%ebp), crypto_it_tab)
-+4:	inv_rnd1(    (%ebp), crypto_it_tab)	// 10 rounds for 128-bit key
-+	inv_rnd2( +16(%ebp), crypto_it_tab)
-+	inv_rnd1( +32(%ebp), crypto_it_tab)
-+	inv_rnd2( +48(%ebp), crypto_it_tab)
-+	inv_rnd1( +64(%ebp), crypto_it_tab)
-+	inv_rnd2( +80(%ebp), crypto_it_tab)
-+	inv_rnd1( +96(%ebp), crypto_it_tab)
-+	inv_rnd2(+112(%ebp), crypto_it_tab)
-+	inv_rnd1(+128(%ebp), crypto_it_tab)
-+	inv_rnd2(+144(%ebp), crypto_il_tab)	// last round uses a different table
- 
- // move final values to the output array.  CAUTION: the 
- // order of these assigns rely on the register mappings
-diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
-index 26b40de..a120f52 100644
---- a/arch/x86/crypto/aes-x86_64-asm_64.S
-+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
-@@ -8,10 +8,10 @@
-  * including this sentence is retained in full.
-  */
- 
--.extern aes_ft_tab
--.extern aes_it_tab
--.extern aes_fl_tab
--.extern aes_il_tab
-+.extern crypto_ft_tab
-+.extern crypto_it_tab
-+.extern crypto_fl_tab
-+.extern crypto_il_tab
- 
- .text
- 
-@@ -56,13 +56,13 @@
- 	.align	8;			\
- FUNC:	movq	r1,r2;			\
- 	movq	r3,r4;			\
--	leaq	BASE+KEY+52(r8),r9;	\
-+	leaq	BASE+KEY+48+4(r8),r9;	\
- 	movq	r10,r11;		\
- 	movl	(r7),r5 ## E;		\
- 	movl	4(r7),r1 ## E;		\
- 	movl	8(r7),r6 ## E;		\
- 	movl	12(r7),r7 ## E;		\
--	movl	BASE(r8),r10 ## E;	\
-+	movl	BASE+0(r8),r10 ## E;	\
- 	xorl	-48(r9),r5 ## E;	\
- 	xorl	-44(r9),r1 ## E;	\
- 	xorl	-40(r9),r6 ## E;	\
-@@ -154,37 +154,37 @@ FUNC:	movq	r1,r2;			\
- /* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
- 
- 	entry(aes_enc_blk,0,enc128,enc192)
--	encrypt_round(aes_ft_tab,-96)
--	encrypt_round(aes_ft_tab,-80)
--enc192:	encrypt_round(aes_ft_tab,-64)
--	encrypt_round(aes_ft_tab,-48)
--enc128:	encrypt_round(aes_ft_tab,-32)
--	encrypt_round(aes_ft_tab,-16)
--	encrypt_round(aes_ft_tab,  0)
--	encrypt_round(aes_ft_tab, 16)
--	encrypt_round(aes_ft_tab, 32)
--	encrypt_round(aes_ft_tab, 48)
--	encrypt_round(aes_ft_tab, 64)
--	encrypt_round(aes_ft_tab, 80)
--	encrypt_round(aes_ft_tab, 96)
--	encrypt_final(aes_fl_tab,112)
-+	encrypt_round(crypto_ft_tab,-96)
-+	encrypt_round(crypto_ft_tab,-80)
-+enc192:	encrypt_round(crypto_ft_tab,-64)
-+	encrypt_round(crypto_ft_tab,-48)
-+enc128:	encrypt_round(crypto_ft_tab,-32)
-+	encrypt_round(crypto_ft_tab,-16)
-+	encrypt_round(crypto_ft_tab,  0)
-+	encrypt_round(crypto_ft_tab, 16)
-+	encrypt_round(crypto_ft_tab, 32)
-+	encrypt_round(crypto_ft_tab, 48)
-+	encrypt_round(crypto_ft_tab, 64)
-+	encrypt_round(crypto_ft_tab, 80)
-+	encrypt_round(crypto_ft_tab, 96)
-+	encrypt_final(crypto_fl_tab,112)
- 	return
- 
- /* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
- 
- 	entry(aes_dec_blk,240,dec128,dec192)
--	decrypt_round(aes_it_tab,-96)
--	decrypt_round(aes_it_tab,-80)
--dec192:	decrypt_round(aes_it_tab,-64)
--	decrypt_round(aes_it_tab,-48)
--dec128:	decrypt_round(aes_it_tab,-32)
--	decrypt_round(aes_it_tab,-16)
--	decrypt_round(aes_it_tab,  0)
--	decrypt_round(aes_it_tab, 16)
--	decrypt_round(aes_it_tab, 32)
--	decrypt_round(aes_it_tab, 48)
--	decrypt_round(aes_it_tab, 64)
--	decrypt_round(aes_it_tab, 80)
--	decrypt_round(aes_it_tab, 96)
--	decrypt_final(aes_il_tab,112)
-+	decrypt_round(crypto_it_tab,-96)
-+	decrypt_round(crypto_it_tab,-80)
-+dec192:	decrypt_round(crypto_it_tab,-64)
-+	decrypt_round(crypto_it_tab,-48)
-+dec128:	decrypt_round(crypto_it_tab,-32)
-+	decrypt_round(crypto_it_tab,-16)
-+	decrypt_round(crypto_it_tab,  0)
-+	decrypt_round(crypto_it_tab, 16)
-+	decrypt_round(crypto_it_tab, 32)
-+	decrypt_round(crypto_it_tab, 48)
-+	decrypt_round(crypto_it_tab, 64)
-+	decrypt_round(crypto_it_tab, 80)
-+	decrypt_round(crypto_it_tab, 96)
-+	decrypt_final(crypto_il_tab,112)
- 	return
-diff --git a/arch/x86/crypto/aes_32.c b/arch/x86/crypto/aes_32.c
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/regset.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/math_emu.h>
++#include <asm/sigcontext.h>
++#include <asm/user.h>
++#include <asm/ptrace.h>
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_X86_64
++
++#include <asm/sigcontext32.h>
++#include <asm/user32.h>
++
++#else
++
++#define	save_i387_ia32		save_i387
++#define	restore_i387_ia32	restore_i387
++
++#define _fpstate_ia32 		_fpstate
++#define user_i387_ia32_struct	user_i387_struct
++#define user32_fxsr_struct	user_fxsr_struct
++
++#endif
++
++#ifdef CONFIG_MATH_EMULATION
++#define HAVE_HWFP (boot_cpu_data.hard_math)
++#else
++#define HAVE_HWFP 1
++#endif
++
++unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
++
++void mxcsr_feature_mask_init(void)
++{
++	unsigned long mask = 0;
++	clts();
++	if (cpu_has_fxsr) {
++		memset(&current->thread.i387.fxsave, 0,
++		       sizeof(struct i387_fxsave_struct));
++		asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
++		mask = current->thread.i387.fxsave.mxcsr_mask;
++		if (mask == 0)
++			mask = 0x0000ffbf;
++	}
++	mxcsr_feature_mask &= mask;
++	stts();
++}
++
++#ifdef CONFIG_X86_64
++/*
++ * Called at bootup to set up the initial FPU state that is later cloned
++ * into all processes.
++ */
++void __cpuinit fpu_init(void)
++{
++	unsigned long oldcr0 = read_cr0();
++	extern void __bad_fxsave_alignment(void);
++
++	if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
++		__bad_fxsave_alignment();
++	set_in_cr4(X86_CR4_OSFXSR);
++	set_in_cr4(X86_CR4_OSXMMEXCPT);
++
++	write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
++
++	mxcsr_feature_mask_init();
++	/* clean state in init */
++	current_thread_info()->status = 0;
++	clear_used_math();
++}
++#endif	/* CONFIG_X86_64 */
++
++/*
++ * The _current_ task is using the FPU for the first time
++ * so initialize it and set the mxcsr to its default
++ * value at reset if we support XMM instructions and then
++ * remeber the current task has used the FPU.
++ */
++void init_fpu(struct task_struct *tsk)
++{
++	if (tsk_used_math(tsk)) {
++		if (tsk == current)
++			unlazy_fpu(tsk);
++		return;
++	}
++
++	if (cpu_has_fxsr) {
++		memset(&tsk->thread.i387.fxsave, 0,
++		       sizeof(struct i387_fxsave_struct));
++		tsk->thread.i387.fxsave.cwd = 0x37f;
++		if (cpu_has_xmm)
++			tsk->thread.i387.fxsave.mxcsr = MXCSR_DEFAULT;
++	} else {
++		memset(&tsk->thread.i387.fsave, 0,
++		       sizeof(struct i387_fsave_struct));
++		tsk->thread.i387.fsave.cwd = 0xffff037fu;
++		tsk->thread.i387.fsave.swd = 0xffff0000u;
++		tsk->thread.i387.fsave.twd = 0xffffffffu;
++		tsk->thread.i387.fsave.fos = 0xffff0000u;
++	}
++	/*
++	 * Only the device not available exception or ptrace can call init_fpu.
++	 */
++	set_stopped_child_used_math(tsk);
++}
++
++int fpregs_active(struct task_struct *target, const struct user_regset *regset)
++{
++	return tsk_used_math(target) ? regset->n : 0;
++}
++
++int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
++{
++	return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
++}
++
++int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
++		unsigned int pos, unsigned int count,
++		void *kbuf, void __user *ubuf)
++{
++	if (!cpu_has_fxsr)
++		return -ENODEV;
++
++	unlazy_fpu(target);
++
++	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
++				   &target->thread.i387.fxsave, 0, -1);
++}
++
++int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
++		unsigned int pos, unsigned int count,
++		const void *kbuf, const void __user *ubuf)
++{
++	int ret;
++
++	if (!cpu_has_fxsr)
++		return -ENODEV;
++
++	unlazy_fpu(target);
++	set_stopped_child_used_math(target);
++
++	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
++				 &target->thread.i387.fxsave, 0, -1);
++
++	/*
++	 * mxcsr reserved bits must be masked to zero for security reasons.
++	 */
++	target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
++
++	return ret;
++}
++
++#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
++
++/*
++ * FPU tag word conversions.
++ */
++
++static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
++{
++	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
++
++	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
++	tmp = ~twd;
++	tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
++	/* and move the valid bits to the lower byte. */
++	tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
++	tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
++	tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
++	return tmp;
++}
++
++#define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16);
++#define FP_EXP_TAG_VALID	0
++#define FP_EXP_TAG_ZERO		1
++#define FP_EXP_TAG_SPECIAL	2
++#define FP_EXP_TAG_EMPTY	3
++
++static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
++{
++	struct _fpxreg *st;
++	u32 tos = (fxsave->swd >> 11) & 7;
++	u32 twd = (unsigned long) fxsave->twd;
++	u32 tag;
++	u32 ret = 0xffff0000u;
++	int i;
++
++	for (i = 0; i < 8; i++, twd >>= 1) {
++		if (twd & 0x1) {
++			st = FPREG_ADDR(fxsave, (i - tos) & 7);
++
++			switch (st->exponent & 0x7fff) {
++			case 0x7fff:
++				tag = FP_EXP_TAG_SPECIAL;
++				break;
++			case 0x0000:
++				if (!st->significand[0] &&
++				    !st->significand[1] &&
++				    !st->significand[2] &&
++				    !st->significand[3])
++					tag = FP_EXP_TAG_ZERO;
++				else
++					tag = FP_EXP_TAG_SPECIAL;
++				break;
++			default:
++				if (st->significand[3] & 0x8000)
++					tag = FP_EXP_TAG_VALID;
++				else
++					tag = FP_EXP_TAG_SPECIAL;
++				break;
++			}
++		} else {
++			tag = FP_EXP_TAG_EMPTY;
++		}
++		ret |= tag << (2 * i);
++	}
++	return ret;
++}
++
++/*
++ * FXSR floating point environment conversions.
++ */
++
++static void convert_from_fxsr(struct user_i387_ia32_struct *env,
++			      struct task_struct *tsk)
++{
++	struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
++	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
++	struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
++	int i;
++
++	env->cwd = fxsave->cwd | 0xffff0000u;
++	env->swd = fxsave->swd | 0xffff0000u;
++	env->twd = twd_fxsr_to_i387(fxsave);
++
++#ifdef CONFIG_X86_64
++	env->fip = fxsave->rip;
++	env->foo = fxsave->rdp;
++	if (tsk == current) {
++		/*
++		 * should be actually ds/cs at fpu exception time, but
++		 * that information is not available in 64bit mode.
++		 */
++		asm("mov %%ds,%0" : "=r" (env->fos));
++		asm("mov %%cs,%0" : "=r" (env->fcs));
++	} else {
++		struct pt_regs *regs = task_pt_regs(tsk);
++		env->fos = 0xffff0000 | tsk->thread.ds;
++		env->fcs = regs->cs;
++	}
++#else
++	env->fip = fxsave->fip;
++	env->fcs = fxsave->fcs;
++	env->foo = fxsave->foo;
++	env->fos = fxsave->fos;
++#endif
++
++	for (i = 0; i < 8; ++i)
++		memcpy(&to[i], &from[i], sizeof(to[0]));
++}
++
++static void convert_to_fxsr(struct task_struct *tsk,
++			    const struct user_i387_ia32_struct *env)
++
++{
++	struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
++	struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
++	struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
++	int i;
++
++	fxsave->cwd = env->cwd;
++	fxsave->swd = env->swd;
++	fxsave->twd = twd_i387_to_fxsr(env->twd);
++	fxsave->fop = (u16) ((u32) env->fcs >> 16);
++#ifdef CONFIG_X86_64
++	fxsave->rip = env->fip;
++	fxsave->rdp = env->foo;
++	/* cs and ds ignored */
++#else
++	fxsave->fip = env->fip;
++	fxsave->fcs = (env->fcs & 0xffff);
++	fxsave->foo = env->foo;
++	fxsave->fos = env->fos;
++#endif
++
++	for (i = 0; i < 8; ++i)
++		memcpy(&to[i], &from[i], sizeof(from[0]));
++}
++
++int fpregs_get(struct task_struct *target, const struct user_regset *regset,
++	       unsigned int pos, unsigned int count,
++	       void *kbuf, void __user *ubuf)
++{
++	struct user_i387_ia32_struct env;
++
++	if (!HAVE_HWFP)
++		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
++
++	unlazy_fpu(target);
++
++	if (!cpu_has_fxsr)
++		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
++					   &target->thread.i387.fsave, 0, -1);
++
++	if (kbuf && pos == 0 && count == sizeof(env)) {
++		convert_from_fxsr(kbuf, target);
++		return 0;
++	}
++
++	convert_from_fxsr(&env, target);
++	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
++}
++
++int fpregs_set(struct task_struct *target, const struct user_regset *regset,
++	       unsigned int pos, unsigned int count,
++	       const void *kbuf, const void __user *ubuf)
++{
++	struct user_i387_ia32_struct env;
++	int ret;
++
++	if (!HAVE_HWFP)
++		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
++
++	unlazy_fpu(target);
++	set_stopped_child_used_math(target);
++
++	if (!cpu_has_fxsr)
++		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
++					  &target->thread.i387.fsave, 0, -1);
++
++	if (pos > 0 || count < sizeof(env))
++		convert_from_fxsr(&env, target);
++
++	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
++	if (!ret)
++		convert_to_fxsr(target, &env);
++
++	return ret;
++}
++
++/*
++ * Signal frame handlers.
++ */
++
++static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
++{
++	struct task_struct *tsk = current;
++
++	unlazy_fpu(tsk);
++	tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
++	if (__copy_to_user(buf, &tsk->thread.i387.fsave,
++			   sizeof(struct i387_fsave_struct)))
++		return -1;
++	return 1;
++}
++
++static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
++{
++	struct task_struct *tsk = current;
++	struct user_i387_ia32_struct env;
++	int err = 0;
++
++	unlazy_fpu(tsk);
++
++	convert_from_fxsr(&env, tsk);
++	if (__copy_to_user(buf, &env, sizeof(env)))
++		return -1;
++
++	err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
++	err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
++	if (err)
++		return -1;
++
++	if (__copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
++			   sizeof(struct i387_fxsave_struct)))
++		return -1;
++	return 1;
++}
++
++int save_i387_ia32(struct _fpstate_ia32 __user *buf)
++{
++	if (!used_math())
++		return 0;
++
++	/* This will cause a "finit" to be triggered by the next
++	 * attempted FPU operation by the 'current' process.
++	 */
++	clear_used_math();
++
++	if (HAVE_HWFP) {
++		if (cpu_has_fxsr) {
++			return save_i387_fxsave(buf);
++		} else {
++			return save_i387_fsave(buf);
++		}
++	} else {
++		return fpregs_soft_get(current, NULL,
++				       0, sizeof(struct user_i387_ia32_struct),
++				       NULL, buf) ? -1 : 1;
++	}
++}
++
++static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
++{
++	struct task_struct *tsk = current;
++	clear_fpu(tsk);
++	return __copy_from_user(&tsk->thread.i387.fsave, buf,
++				sizeof(struct i387_fsave_struct));
++}
++
++static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
++{
++	int err;
++	struct task_struct *tsk = current;
++	struct user_i387_ia32_struct env;
++	clear_fpu(tsk);
++	err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
++			       sizeof(struct i387_fxsave_struct));
++	/* mxcsr reserved bits must be masked to zero for security reasons */
++	tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
++	if (err || __copy_from_user(&env, buf, sizeof(env)))
++		return 1;
++	convert_to_fxsr(tsk, &env);
++	return 0;
++}
++
++int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
++{
++	int err;
++
++	if (HAVE_HWFP) {
++		if (cpu_has_fxsr) {
++			err = restore_i387_fxsave(buf);
++		} else {
++			err = restore_i387_fsave(buf);
++		}
++	} else {
++		err = fpregs_soft_set(current, NULL,
++				      0, sizeof(struct user_i387_ia32_struct),
++				      NULL, buf) != 0;
++	}
++	set_used_math();
++	return err;
++}
++
++/*
++ * FPU state for core dumps.
++ * This is only used for a.out dumps now.
++ * It is declared generically using elf_fpregset_t (which is
++ * struct user_i387_struct) but is in fact only used for 32-bit
++ * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
++ */
++int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
++{
++	int fpvalid;
++	struct task_struct *tsk = current;
++
++	fpvalid = !!used_math();
++	if (fpvalid)
++		fpvalid = !fpregs_get(tsk, NULL,
++				      0, sizeof(struct user_i387_ia32_struct),
++				      fpu, NULL);
++
++	return fpvalid;
++}
++EXPORT_SYMBOL(dump_fpu);
++
++#endif	/* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+diff --git a/arch/x86/kernel/i387_32.c b/arch/x86/kernel/i387_32.c
 deleted file mode 100644
-index 49aad93..0000000
---- a/arch/x86/crypto/aes_32.c
+index 7d2e12f..0000000
+--- a/arch/x86/kernel/i387_32.c
 +++ /dev/null
-@@ -1,515 +0,0 @@
--/* 
-- * 
-- * Glue Code for optimized 586 assembler version of AES
-- *
-- * Copyright (c) 2002, Dr Brian Gladman <>, Worcester, UK.
-- * All rights reserved.
-- *
-- * LICENSE TERMS
-- *
-- * The free distribution and use of this software in both source and binary
-- * form is allowed (with or without changes) provided that:
-- *
-- *   1. distributions of this source code include the above copyright
-- *      notice, this list of conditions and the following disclaimer;
-- *
-- *   2. distributions in binary form include the above copyright
-- *      notice, this list of conditions and the following disclaimer
-- *      in the documentation and/or other associated materials;
-- *
-- *   3. the copyright holder's name is not used to endorse products
-- *      built using this software without specific written permission.
-- *
-- * ALTERNATIVELY, provided that this notice is retained in full, this product
-- * may be distributed under the terms of the GNU General Public License (GPL),
-- * in which case the provisions of the GPL apply INSTEAD OF those given above.
-- *
-- * DISCLAIMER
-- *
-- * This software is provided 'as is' with no explicit or implied warranties
-- * in respect of its properties, including, but not limited to, correctness
-- * and/or fitness for purpose.
-- *
-- * Copyright (c) 2003, Adam J. Richter <adam at yggdrasil.com> (conversion to
-- * 2.5 API).
-- * Copyright (c) 2003, 2004 Fruhwirth Clemens <clemens at endorphin.org>
-- * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris at redhat.com>
+@@ -1,544 +0,0 @@
+-/*
+- *  Copyright (C) 1994 Linus Torvalds
 - *
+- *  Pentium III FXSR, SSE support
+- *  General FPU state handling cleanups
+- *	Gareth Hughes <gareth at valinux.com>, May 2000
 - */
 -
--#include <asm/byteorder.h>
--#include <linux/kernel.h>
+-#include <linux/sched.h>
 -#include <linux/module.h>
--#include <linux/init.h>
--#include <linux/types.h>
--#include <linux/crypto.h>
--#include <linux/linkage.h>
--
--asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
--asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
--
--#define AES_MIN_KEY_SIZE	16
--#define AES_MAX_KEY_SIZE	32
--#define AES_BLOCK_SIZE		16
--#define AES_KS_LENGTH		4 * AES_BLOCK_SIZE
--#define RC_LENGTH		29
--
--struct aes_ctx {
--	u32 ekey[AES_KS_LENGTH];
--	u32 rounds;
--	u32 dkey[AES_KS_LENGTH];
--};
+-#include <asm/processor.h>
+-#include <asm/i387.h>
+-#include <asm/math_emu.h>
+-#include <asm/sigcontext.h>
+-#include <asm/user.h>
+-#include <asm/ptrace.h>
+-#include <asm/uaccess.h>
 -
--#define WPOLY 0x011b
--#define bytes2word(b0, b1, b2, b3)  \
--	(((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
+-#ifdef CONFIG_MATH_EMULATION
+-#define HAVE_HWFP (boot_cpu_data.hard_math)
+-#else
+-#define HAVE_HWFP 1
+-#endif
 -
--/* define the finite field multiplies required for Rijndael */
--#define f2(x) ((x) ? pow[log[x] + 0x19] : 0)
--#define f3(x) ((x) ? pow[log[x] + 0x01] : 0)
--#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0)
--#define fb(x) ((x) ? pow[log[x] + 0x68] : 0)
--#define fd(x) ((x) ? pow[log[x] + 0xee] : 0)
--#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0)
--#define fi(x) ((x) ?   pow[255 - log[x]]: 0)
+-static unsigned long mxcsr_feature_mask __read_mostly = 0xffffffff;
 -
--static inline u32 upr(u32 x, int n)
+-void mxcsr_feature_mask_init(void)
 -{
--	return (x << 8 * n) | (x >> (32 - 8 * n));
+-	unsigned long mask = 0;
+-	clts();
+-	if (cpu_has_fxsr) {
+-		memset(&current->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
+-		asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave)); 
+-		mask = current->thread.i387.fxsave.mxcsr_mask;
+-		if (mask == 0) mask = 0x0000ffbf;
+-	} 
+-	mxcsr_feature_mask &= mask;
+-	stts();
 -}
 -
--static inline u8 bval(u32 x, int n)
+-/*
+- * The _current_ task is using the FPU for the first time
+- * so initialize it and set the mxcsr to its default
+- * value at reset if we support XMM instructions and then
+- * remeber the current task has used the FPU.
+- */
+-void init_fpu(struct task_struct *tsk)
 -{
--	return x >> 8 * n;
+-	if (cpu_has_fxsr) {
+-		memset(&tsk->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
+-		tsk->thread.i387.fxsave.cwd = 0x37f;
+-		if (cpu_has_xmm)
+-			tsk->thread.i387.fxsave.mxcsr = 0x1f80;
+-	} else {
+-		memset(&tsk->thread.i387.fsave, 0, sizeof(struct i387_fsave_struct));
+-		tsk->thread.i387.fsave.cwd = 0xffff037fu;
+-		tsk->thread.i387.fsave.swd = 0xffff0000u;
+-		tsk->thread.i387.fsave.twd = 0xffffffffu;
+-		tsk->thread.i387.fsave.fos = 0xffff0000u;
+-	}
+-	/* only the device not available exception or ptrace can call init_fpu */
+-	set_stopped_child_used_math(tsk);
 -}
 -
--/* The forward and inverse affine transformations used in the S-box */
--#define fwd_affine(x) \
--	(w = (u32)x, w ^= (w<<1)^(w<<2)^(w<<3)^(w<<4), 0x63^(u8)(w^(w>>8)))
--
--#define inv_affine(x) \
--	(w = (u32)x, w = (w<<1)^(w<<3)^(w<<6), 0x05^(u8)(w^(w>>8)))
--
--static u32 rcon_tab[RC_LENGTH];
--
--u32 ft_tab[4][256];
--u32 fl_tab[4][256];
--static u32 im_tab[4][256];
--u32 il_tab[4][256];
--u32 it_tab[4][256];
+-/*
+- * FPU lazy state save handling.
+- */
 -
--static void gen_tabs(void)
+-void kernel_fpu_begin(void)
 -{
--	u32 i, w;
--	u8 pow[512], log[256];
+-	struct thread_info *thread = current_thread_info();
 -
--	/*
--	 * log and power tables for GF(2^8) finite field with
--	 * WPOLY as modular polynomial - the simplest primitive
--	 * root is 0x03, used here to generate the tables.
--	 */
--	i = 0; w = 1; 
--	
--	do {
--		pow[i] = (u8)w;
--		pow[i + 255] = (u8)w;
--		log[w] = (u8)i++;
--		w ^=  (w << 1) ^ (w & 0x80 ? WPOLY : 0);
--	} while (w != 1);
--	
--	for(i = 0, w = 1; i < RC_LENGTH; ++i) {
--		rcon_tab[i] = bytes2word(w, 0, 0, 0);
--		w = f2(w);
+-	preempt_disable();
+-	if (thread->status & TS_USEDFPU) {
+-		__save_init_fpu(thread->task);
+-		return;
 -	}
+-	clts();
+-}
+-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
 -
--	for(i = 0; i < 256; ++i) {
--		u8 b;
--		
--		b = fwd_affine(fi((u8)i));
--		w = bytes2word(f2(b), b, b, f3(b));
--
--		/* tables for a normal encryption round */
--		ft_tab[0][i] = w;
--		ft_tab[1][i] = upr(w, 1);
--		ft_tab[2][i] = upr(w, 2);
--		ft_tab[3][i] = upr(w, 3);
--		w = bytes2word(b, 0, 0, 0);
--		
--		/*
--		 * tables for last encryption round
--		 * (may also be used in the key schedule)
--		 */
--		fl_tab[0][i] = w;
--		fl_tab[1][i] = upr(w, 1);
--		fl_tab[2][i] = upr(w, 2);
--		fl_tab[3][i] = upr(w, 3);
--		
--		b = fi(inv_affine((u8)i));
--		w = bytes2word(fe(b), f9(b), fd(b), fb(b));
--
--		/* tables for the inverse mix column operation  */
--		im_tab[0][b] = w;
--		im_tab[1][b] = upr(w, 1);
--		im_tab[2][b] = upr(w, 2);
--		im_tab[3][b] = upr(w, 3);
--
--		/* tables for a normal decryption round */
--		it_tab[0][i] = w;
--		it_tab[1][i] = upr(w,1);
--		it_tab[2][i] = upr(w,2);
--		it_tab[3][i] = upr(w,3);
+-/*
+- * FPU tag word conversions.
+- */
 -
--		w = bytes2word(b, 0, 0, 0);
--		
--		/* tables for last decryption round */
--		il_tab[0][i] = w;
--		il_tab[1][i] = upr(w,1);
--		il_tab[2][i] = upr(w,2);
--		il_tab[3][i] = upr(w,3);
--    }
+-static inline unsigned short twd_i387_to_fxsr( unsigned short twd )
+-{
+-	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
+- 
+-	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
+-        tmp = ~twd;
+-        tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
+-        /* and move the valid bits to the lower byte. */
+-        tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
+-        tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
+-        tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+-        return tmp;
 -}
 -
--#define four_tables(x,tab,vf,rf,c)		\
--(	tab[0][bval(vf(x,0,c),rf(0,c))]	^	\
--	tab[1][bval(vf(x,1,c),rf(1,c))] ^	\
--	tab[2][bval(vf(x,2,c),rf(2,c))] ^	\
--	tab[3][bval(vf(x,3,c),rf(3,c))]		\
--)
--
--#define vf1(x,r,c)  (x)
--#define rf1(r,c)    (r)
--#define rf2(r,c)    ((r-c)&3)
+-static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave )
+-{
+-	struct _fpxreg *st = NULL;
+-	unsigned long tos = (fxsave->swd >> 11) & 7;
+-	unsigned long twd = (unsigned long) fxsave->twd;
+-	unsigned long tag;
+-	unsigned long ret = 0xffff0000u;
+-	int i;
 -
--#define inv_mcol(x) four_tables(x,im_tab,vf1,rf1,0)
--#define ls_box(x,c) four_tables(x,fl_tab,vf1,rf2,c)
+-#define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16);
 -
--#define ff(x) inv_mcol(x)
+-	for ( i = 0 ; i < 8 ; i++ ) {
+-		if ( twd & 0x1 ) {
+-			st = FPREG_ADDR( fxsave, (i - tos) & 7 );
 -
--#define ke4(k,i)							\
--{									\
--	k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i];		\
--	k[4*(i)+5] = ss[1] ^= ss[0];					\
--	k[4*(i)+6] = ss[2] ^= ss[1];					\
--	k[4*(i)+7] = ss[3] ^= ss[2];					\
+-			switch ( st->exponent & 0x7fff ) {
+-			case 0x7fff:
+-				tag = 2;		/* Special */
+-				break;
+-			case 0x0000:
+-				if ( !st->significand[0] &&
+-				     !st->significand[1] &&
+-				     !st->significand[2] &&
+-				     !st->significand[3] ) {
+-					tag = 1;	/* Zero */
+-				} else {
+-					tag = 2;	/* Special */
+-				}
+-				break;
+-			default:
+-				if ( st->significand[3] & 0x8000 ) {
+-					tag = 0;	/* Valid */
+-				} else {
+-					tag = 2;	/* Special */
+-				}
+-				break;
+-			}
+-		} else {
+-			tag = 3;			/* Empty */
+-		}
+-		ret |= (tag << (2 * i));
+-		twd = twd >> 1;
+-	}
+-	return ret;
 -}
 -
--#define kel4(k,i)							\
--{									\
--	k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i];		\
--	k[4*(i)+5] = ss[1] ^= ss[0];					\
--	k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2];	\
--}
+-/*
+- * FPU state interaction.
+- */
 -
--#define ke6(k,i)							\
--{									\
--	k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i];		\
--	k[6*(i)+ 7] = ss[1] ^= ss[0];					\
--	k[6*(i)+ 8] = ss[2] ^= ss[1];					\
--	k[6*(i)+ 9] = ss[3] ^= ss[2];					\
--	k[6*(i)+10] = ss[4] ^= ss[3];					\
--	k[6*(i)+11] = ss[5] ^= ss[4];					\
+-unsigned short get_fpu_cwd( struct task_struct *tsk )
+-{
+-	if ( cpu_has_fxsr ) {
+-		return tsk->thread.i387.fxsave.cwd;
+-	} else {
+-		return (unsigned short)tsk->thread.i387.fsave.cwd;
+-	}
 -}
 -
--#define kel6(k,i)							\
--{									\
--	k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i];		\
--	k[6*(i)+ 7] = ss[1] ^= ss[0];					\
--	k[6*(i)+ 8] = ss[2] ^= ss[1];					\
--	k[6*(i)+ 9] = ss[3] ^= ss[2];					\
+-unsigned short get_fpu_swd( struct task_struct *tsk )
+-{
+-	if ( cpu_has_fxsr ) {
+-		return tsk->thread.i387.fxsave.swd;
+-	} else {
+-		return (unsigned short)tsk->thread.i387.fsave.swd;
+-	}
 -}
 -
--#define ke8(k,i)							\
--{									\
--	k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i];		\
--	k[8*(i)+ 9] = ss[1] ^= ss[0];					\
--	k[8*(i)+10] = ss[2] ^= ss[1];					\
--	k[8*(i)+11] = ss[3] ^= ss[2];					\
--	k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0);				\
--	k[8*(i)+13] = ss[5] ^= ss[4];					\
--	k[8*(i)+14] = ss[6] ^= ss[5];					\
--	k[8*(i)+15] = ss[7] ^= ss[6];					\
+-#if 0
+-unsigned short get_fpu_twd( struct task_struct *tsk )
+-{
+-	if ( cpu_has_fxsr ) {
+-		return tsk->thread.i387.fxsave.twd;
+-	} else {
+-		return (unsigned short)tsk->thread.i387.fsave.twd;
+-	}
 -}
+-#endif  /*  0  */
 -
--#define kel8(k,i)							\
--{									\
--	k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i];		\
--	k[8*(i)+ 9] = ss[1] ^= ss[0];					\
--	k[8*(i)+10] = ss[2] ^= ss[1];					\
--	k[8*(i)+11] = ss[3] ^= ss[2];					\
+-unsigned short get_fpu_mxcsr( struct task_struct *tsk )
+-{
+-	if ( cpu_has_xmm ) {
+-		return tsk->thread.i387.fxsave.mxcsr;
+-	} else {
+-		return 0x1f80;
+-	}
 -}
 -
--#define kdf4(k,i)							\
--{									\
--	ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3];				\
--	ss[1] = ss[1] ^ ss[3];						\
--	ss[2] = ss[2] ^ ss[3];						\
--	ss[3] = ss[3];							\
--	ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i];			\
--	ss[i % 4] ^= ss[4];						\
--	ss[4] ^= k[4*(i)];						\
--	k[4*(i)+4] = ff(ss[4]);						\
--	ss[4] ^= k[4*(i)+1];						\
--	k[4*(i)+5] = ff(ss[4]);						\
--	ss[4] ^= k[4*(i)+2];						\
--	k[4*(i)+6] = ff(ss[4]);						\
--	ss[4] ^= k[4*(i)+3];						\
--	k[4*(i)+7] = ff(ss[4]);						\
--}
+-#if 0
 -
--#define kd4(k,i)							\
--{									\
--	ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i];			\
--	ss[i % 4] ^= ss[4];						\
--	ss[4] = ff(ss[4]);						\
--	k[4*(i)+4] = ss[4] ^= k[4*(i)];					\
--	k[4*(i)+5] = ss[4] ^= k[4*(i)+1];				\
--	k[4*(i)+6] = ss[4] ^= k[4*(i)+2];				\
--	k[4*(i)+7] = ss[4] ^= k[4*(i)+3];				\
+-void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
+-{
+-	if ( cpu_has_fxsr ) {
+-		tsk->thread.i387.fxsave.cwd = cwd;
+-	} else {
+-		tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u);
+-	}
 -}
 -
--#define kdl4(k,i)							\
--{									\
--	ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i];			\
--	ss[i % 4] ^= ss[4];						\
--	k[4*(i)+4] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3];			\
--	k[4*(i)+5] = ss[1] ^ ss[3];					\
--	k[4*(i)+6] = ss[0];						\
--	k[4*(i)+7] = ss[1];						\
+-void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
+-{
+-	if ( cpu_has_fxsr ) {
+-		tsk->thread.i387.fxsave.swd = swd;
+-	} else {
+-		tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u);
+-	}
 -}
 -
--#define kdf6(k,i)							\
--{									\
--	ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i];				\
--	k[6*(i)+ 6] = ff(ss[0]);					\
--	ss[1] ^= ss[0];							\
--	k[6*(i)+ 7] = ff(ss[1]);					\
--	ss[2] ^= ss[1];							\
--	k[6*(i)+ 8] = ff(ss[2]);					\
--	ss[3] ^= ss[2];							\
--	k[6*(i)+ 9] = ff(ss[3]);					\
--	ss[4] ^= ss[3];							\
--	k[6*(i)+10] = ff(ss[4]);					\
--	ss[5] ^= ss[4];							\
--	k[6*(i)+11] = ff(ss[5]);					\
+-void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
+-{
+-	if ( cpu_has_fxsr ) {
+-		tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd);
+-	} else {
+-		tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u);
+-	}
 -}
 -
--#define kd6(k,i)							\
--{									\
--	ss[6] = ls_box(ss[5],3) ^ rcon_tab[i];				\
--	ss[0] ^= ss[6]; ss[6] = ff(ss[6]);				\
--	k[6*(i)+ 6] = ss[6] ^= k[6*(i)];				\
--	ss[1] ^= ss[0];							\
--	k[6*(i)+ 7] = ss[6] ^= k[6*(i)+ 1];				\
--	ss[2] ^= ss[1];							\
--	k[6*(i)+ 8] = ss[6] ^= k[6*(i)+ 2];				\
--	ss[3] ^= ss[2];							\
--	k[6*(i)+ 9] = ss[6] ^= k[6*(i)+ 3];				\
--	ss[4] ^= ss[3];							\
--	k[6*(i)+10] = ss[6] ^= k[6*(i)+ 4];				\
--	ss[5] ^= ss[4];							\
--	k[6*(i)+11] = ss[6] ^= k[6*(i)+ 5];				\
--}
+-#endif  /*  0  */
 -
--#define kdl6(k,i)							\
--{									\
--	ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i];				\
--	k[6*(i)+ 6] = ss[0];						\
--	ss[1] ^= ss[0];							\
--	k[6*(i)+ 7] = ss[1];						\
--	ss[2] ^= ss[1];							\
--	k[6*(i)+ 8] = ss[2];						\
--	ss[3] ^= ss[2];							\
--	k[6*(i)+ 9] = ss[3];						\
--}
+-/*
+- * FXSR floating point environment conversions.
+- */
 -
--#define kdf8(k,i)							\
--{									\
--	ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i];				\
--	k[8*(i)+ 8] = ff(ss[0]);					\
--	ss[1] ^= ss[0];							\
--	k[8*(i)+ 9] = ff(ss[1]);					\
--	ss[2] ^= ss[1];							\
--	k[8*(i)+10] = ff(ss[2]);					\
--	ss[3] ^= ss[2];							\
--	k[8*(i)+11] = ff(ss[3]);					\
--	ss[4] ^= ls_box(ss[3],0);					\
--	k[8*(i)+12] = ff(ss[4]);					\
--	ss[5] ^= ss[4];							\
--	k[8*(i)+13] = ff(ss[5]);					\
--	ss[6] ^= ss[5];							\
--	k[8*(i)+14] = ff(ss[6]);					\
--	ss[7] ^= ss[6];							\
--	k[8*(i)+15] = ff(ss[7]);					\
--}
+-static int convert_fxsr_to_user( struct _fpstate __user *buf,
+-					struct i387_fxsave_struct *fxsave )
+-{
+-	unsigned long env[7];
+-	struct _fpreg __user *to;
+-	struct _fpxreg *from;
+-	int i;
 -
--#define kd8(k,i)							\
--{									\
--	u32 __g = ls_box(ss[7],3) ^ rcon_tab[i];			\
--	ss[0] ^= __g;							\
--	__g = ff(__g);							\
--	k[8*(i)+ 8] = __g ^= k[8*(i)];					\
--	ss[1] ^= ss[0];							\
--	k[8*(i)+ 9] = __g ^= k[8*(i)+ 1];				\
--	ss[2] ^= ss[1];							\
--	k[8*(i)+10] = __g ^= k[8*(i)+ 2];				\
--	ss[3] ^= ss[2];							\
--	k[8*(i)+11] = __g ^= k[8*(i)+ 3];				\
--	__g = ls_box(ss[3],0);						\
--	ss[4] ^= __g;							\
--	__g = ff(__g);							\
--	k[8*(i)+12] = __g ^= k[8*(i)+ 4];				\
--	ss[5] ^= ss[4];							\
--	k[8*(i)+13] = __g ^= k[8*(i)+ 5];				\
--	ss[6] ^= ss[5];							\
--	k[8*(i)+14] = __g ^= k[8*(i)+ 6];				\
--	ss[7] ^= ss[6];							\
--	k[8*(i)+15] = __g ^= k[8*(i)+ 7];				\
--}
+-	env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
+-	env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
+-	env[2] = twd_fxsr_to_i387(fxsave);
+-	env[3] = fxsave->fip;
+-	env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
+-	env[5] = fxsave->foo;
+-	env[6] = fxsave->fos;
 -
--#define kdl8(k,i)							\
--{									\
--	ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i];				\
--	k[8*(i)+ 8] = ss[0];						\
--	ss[1] ^= ss[0];							\
--	k[8*(i)+ 9] = ss[1];						\
--	ss[2] ^= ss[1];							\
--	k[8*(i)+10] = ss[2];						\
--	ss[3] ^= ss[2];							\
--	k[8*(i)+11] = ss[3];						\
+-	if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) )
+-		return 1;
+-
+-	to = &buf->_st[0];
+-	from = (struct _fpxreg *) &fxsave->st_space[0];
+-	for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
+-		unsigned long __user *t = (unsigned long __user *)to;
+-		unsigned long *f = (unsigned long *)from;
+-
+-		if (__put_user(*f, t) ||
+-				__put_user(*(f + 1), t + 1) ||
+-				__put_user(from->exponent, &to->exponent))
+-			return 1;
+-	}
+-	return 0;
 -}
 -
--static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
--		       unsigned int key_len)
+-static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave,
+-					  struct _fpstate __user *buf )
 -{
+-	unsigned long env[7];
+-	struct _fpxreg *to;
+-	struct _fpreg __user *from;
 -	int i;
--	u32 ss[8];
--	struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
--	const __le32 *key = (const __le32 *)in_key;
--	u32 *flags = &tfm->crt_flags;
--
--	/* encryption schedule */
--	
--	ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]);
--	ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]);
--	ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]);
--	ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]);
--
--	switch(key_len) {
--	case 16:
--		for (i = 0; i < 9; i++)
--			ke4(ctx->ekey, i);
--		kel4(ctx->ekey, 9);
--		ctx->rounds = 10;
--		break;
--		
--	case 24:
--		ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
--		ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
--		for (i = 0; i < 7; i++)
--			ke6(ctx->ekey, i);
--		kel6(ctx->ekey, 7); 
--		ctx->rounds = 12;
--		break;
 -
--	case 32:
--		ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
--		ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
--		ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]);
--		ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]);
--		for (i = 0; i < 6; i++)
--			ke8(ctx->ekey, i);
--		kel8(ctx->ekey, 6);
--		ctx->rounds = 14;
--		break;
+-	if ( __copy_from_user( env, buf, 7 * sizeof(long) ) )
+-		return 1;
 -
--	default:
--		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
--		return -EINVAL;
--	}
--	
--	/* decryption schedule */
--	
--	ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]);
--	ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]);
--	ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]);
--	ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]);
+-	fxsave->cwd = (unsigned short)(env[0] & 0xffff);
+-	fxsave->swd = (unsigned short)(env[1] & 0xffff);
+-	fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
+-	fxsave->fip = env[3];
+-	fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
+-	fxsave->fcs = (env[4] & 0xffff);
+-	fxsave->foo = env[5];
+-	fxsave->fos = env[6];
 -
--	switch (key_len) {
--	case 16:
--		kdf4(ctx->dkey, 0);
--		for (i = 1; i < 9; i++)
--			kd4(ctx->dkey, i);
--		kdl4(ctx->dkey, 9);
--		break;
--		
--	case 24:
--		ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
--		ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
--		kdf6(ctx->dkey, 0);
--		for (i = 1; i < 7; i++)
--			kd6(ctx->dkey, i);
--		kdl6(ctx->dkey, 7);
--		break;
+-	to = (struct _fpxreg *) &fxsave->st_space[0];
+-	from = &buf->_st[0];
+-	for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
+-		unsigned long *t = (unsigned long *)to;
+-		unsigned long __user *f = (unsigned long __user *)from;
 -
--	case 32:
--		ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
--		ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
--		ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6]));
--		ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7]));
--		kdf8(ctx->dkey, 0);
--		for (i = 1; i < 6; i++)
--			kd8(ctx->dkey, i);
--		kdl8(ctx->dkey, 6);
--		break;
+-		if (__get_user(*t, f) ||
+-				__get_user(*(t + 1), f + 1) ||
+-				__get_user(to->exponent, &from->exponent))
+-			return 1;
 -	}
 -	return 0;
 -}
 -
--static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+-/*
+- * Signal frame handlers.
+- */
+-
+-static inline int save_i387_fsave( struct _fpstate __user *buf )
 -{
--	aes_enc_blk(tfm, dst, src);
+-	struct task_struct *tsk = current;
+-
+-	unlazy_fpu( tsk );
+-	tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
+-	if ( __copy_to_user( buf, &tsk->thread.i387.fsave,
+-			     sizeof(struct i387_fsave_struct) ) )
+-		return -1;
+-	return 1;
 -}
 -
--static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+-static int save_i387_fxsave( struct _fpstate __user *buf )
 -{
--	aes_dec_blk(tfm, dst, src);
+-	struct task_struct *tsk = current;
+-	int err = 0;
+-
+-	unlazy_fpu( tsk );
+-
+-	if ( convert_fxsr_to_user( buf, &tsk->thread.i387.fxsave ) )
+-		return -1;
+-
+-	err |= __put_user( tsk->thread.i387.fxsave.swd, &buf->status );
+-	err |= __put_user( X86_FXSR_MAGIC, &buf->magic );
+-	if ( err )
+-		return -1;
+-
+-	if ( __copy_to_user( &buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
+-			     sizeof(struct i387_fxsave_struct) ) )
+-		return -1;
+-	return 1;
 -}
 -
--static struct crypto_alg aes_alg = {
--	.cra_name		=	"aes",
--	.cra_driver_name	=	"aes-i586",
--	.cra_priority		=	200,
--	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
--	.cra_blocksize		=	AES_BLOCK_SIZE,
--	.cra_ctxsize		=	sizeof(struct aes_ctx),
--	.cra_module		=	THIS_MODULE,
--	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
--	.cra_u			=	{
--		.cipher = {
--			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
--			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
--			.cia_setkey	   	= 	aes_set_key,
--			.cia_encrypt	 	=	aes_encrypt,
--			.cia_decrypt	  	=	aes_decrypt
+-int save_i387( struct _fpstate __user *buf )
+-{
+-	if ( !used_math() )
+-		return 0;
+-
+-	/* This will cause a "finit" to be triggered by the next
+-	 * attempted FPU operation by the 'current' process.
+-	 */
+-	clear_used_math();
+-
+-	if ( HAVE_HWFP ) {
+-		if ( cpu_has_fxsr ) {
+-			return save_i387_fxsave( buf );
+-		} else {
+-			return save_i387_fsave( buf );
 -		}
+-	} else {
+-		return save_i387_soft( &current->thread.i387.soft, buf );
 -	}
--};
+-}
 -
--static int __init aes_init(void)
+-static inline int restore_i387_fsave( struct _fpstate __user *buf )
 -{
--	gen_tabs();
--	return crypto_register_alg(&aes_alg);
+-	struct task_struct *tsk = current;
+-	clear_fpu( tsk );
+-	return __copy_from_user( &tsk->thread.i387.fsave, buf,
+-				 sizeof(struct i387_fsave_struct) );
 -}
 -
--static void __exit aes_fini(void)
+-static int restore_i387_fxsave( struct _fpstate __user *buf )
 -{
--	crypto_unregister_alg(&aes_alg);
+-	int err;
+-	struct task_struct *tsk = current;
+-	clear_fpu( tsk );
+-	err = __copy_from_user( &tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
+-				sizeof(struct i387_fxsave_struct) );
+-	/* mxcsr reserved bits must be masked to zero for security reasons */
+-	tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
+-	return err ? 1 : convert_fxsr_from_user( &tsk->thread.i387.fxsave, buf );
 -}
 -
--module_init(aes_init);
--module_exit(aes_fini);
+-int restore_i387( struct _fpstate __user *buf )
+-{
+-	int err;
+-
+-	if ( HAVE_HWFP ) {
+-		if ( cpu_has_fxsr ) {
+-			err = restore_i387_fxsave( buf );
+-		} else {
+-			err = restore_i387_fsave( buf );
+-		}
+-	} else {
+-		err = restore_i387_soft( &current->thread.i387.soft, buf );
+-	}
+-	set_used_math();
+-	return err;
+-}
 -
--MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, i586 asm optimized");
--MODULE_LICENSE("Dual BSD/GPL");
--MODULE_AUTHOR("Fruhwirth Clemens, James Morris, Brian Gladman, Adam Richter");
--MODULE_ALIAS("aes");
-diff --git a/arch/x86/crypto/aes_64.c b/arch/x86/crypto/aes_64.c
-deleted file mode 100644
-index 5cdb13e..0000000
---- a/arch/x86/crypto/aes_64.c
-+++ /dev/null
-@@ -1,336 +0,0 @@
 -/*
-- * Cryptographic API.
-- *
-- * AES Cipher Algorithm.
-- *
-- * Based on Brian Gladman's code.
-- *
-- * Linux developers:
-- *  Alexander Kjeldaas <astor at fast.no>
-- *  Herbert Valerio Riedel <hvr at hvrlab.org>
-- *  Kyle McMartin <kyle at debian.org>
-- *  Adam J. Richter <adam at yggdrasil.com> (conversion to 2.5 API).
-- *  Andreas Steinmetz <ast at domdv.de> (adapted to x86_64 assembler)
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- *
-- * ---------------------------------------------------------------------------
-- * Copyright (c) 2002, Dr Brian Gladman <brg at gladman.me.uk>, Worcester, UK.
-- * All rights reserved.
-- *
-- * LICENSE TERMS
-- *
-- * The free distribution and use of this software in both source and binary
-- * form is allowed (with or without changes) provided that:
-- *
-- *   1. distributions of this source code include the above copyright
-- *      notice, this list of conditions and the following disclaimer;
-- *
-- *   2. distributions in binary form include the above copyright
-- *      notice, this list of conditions and the following disclaimer
-- *      in the documentation and/or other associated materials;
-- *
-- *   3. the copyright holder's name is not used to endorse products
-- *      built using this software without specific written permission.
-- *
-- * ALTERNATIVELY, provided that this notice is retained in full, this product
-- * may be distributed under the terms of the GNU General Public License (GPL),
-- * in which case the provisions of the GPL apply INSTEAD OF those given above.
-- *
-- * DISCLAIMER
-- *
-- * This software is provided 'as is' with no explicit or implied warranties
-- * in respect of its properties, including, but not limited to, correctness
-- * and/or fitness for purpose.
-- * ---------------------------------------------------------------------------
+- * ptrace request handlers.
 - */
 -
--/* Some changes from the Gladman version:
--    s/RIJNDAEL(e_key)/E_KEY/g
--    s/RIJNDAEL(d_key)/D_KEY/g
--*/
--
--#include <asm/byteorder.h>
--#include <linux/bitops.h>
--#include <linux/crypto.h>
--#include <linux/errno.h>
--#include <linux/init.h>
--#include <linux/module.h>
--#include <linux/types.h>
--
--#define AES_MIN_KEY_SIZE	16
--#define AES_MAX_KEY_SIZE	32
+-static inline int get_fpregs_fsave( struct user_i387_struct __user *buf,
+-				    struct task_struct *tsk )
+-{
+-	return __copy_to_user( buf, &tsk->thread.i387.fsave,
+-			       sizeof(struct user_i387_struct) );
+-}
 -
--#define AES_BLOCK_SIZE		16
+-static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf,
+-				     struct task_struct *tsk )
+-{
+-	return convert_fxsr_to_user( (struct _fpstate __user *)buf,
+-				     &tsk->thread.i387.fxsave );
+-}
 -
--/*
-- * #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
-- */
--static inline u8 byte(const u32 x, const unsigned n)
+-int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk )
 -{
--	return x >> (n << 3);
+-	if ( HAVE_HWFP ) {
+-		if ( cpu_has_fxsr ) {
+-			return get_fpregs_fxsave( buf, tsk );
+-		} else {
+-			return get_fpregs_fsave( buf, tsk );
+-		}
+-	} else {
+-		return save_i387_soft( &tsk->thread.i387.soft,
+-				       (struct _fpstate __user *)buf );
+-	}
 -}
 -
--struct aes_ctx
+-static inline int set_fpregs_fsave( struct task_struct *tsk,
+-				    struct user_i387_struct __user *buf )
 -{
--	u32 key_length;
--	u32 buf[120];
--};
+-	return __copy_from_user( &tsk->thread.i387.fsave, buf,
+-				 sizeof(struct user_i387_struct) );
+-}
 -
--#define E_KEY (&ctx->buf[0])
--#define D_KEY (&ctx->buf[60])
+-static inline int set_fpregs_fxsave( struct task_struct *tsk,
+-				     struct user_i387_struct __user *buf )
+-{
+-	return convert_fxsr_from_user( &tsk->thread.i387.fxsave,
+-				       (struct _fpstate __user *)buf );
+-}
 -
--static u8 pow_tab[256] __initdata;
--static u8 log_tab[256] __initdata;
--static u8 sbx_tab[256] __initdata;
--static u8 isb_tab[256] __initdata;
--static u32 rco_tab[10];
--u32 aes_ft_tab[4][256];
--u32 aes_it_tab[4][256];
+-int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf )
+-{
+-	if ( HAVE_HWFP ) {
+-		if ( cpu_has_fxsr ) {
+-			return set_fpregs_fxsave( tsk, buf );
+-		} else {
+-			return set_fpregs_fsave( tsk, buf );
+-		}
+-	} else {
+-		return restore_i387_soft( &tsk->thread.i387.soft,
+-					  (struct _fpstate __user *)buf );
+-	}
+-}
 -
--u32 aes_fl_tab[4][256];
--u32 aes_il_tab[4][256];
+-int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk )
+-{
+-	if ( cpu_has_fxsr ) {
+-		if (__copy_to_user( buf, &tsk->thread.i387.fxsave,
+-				    sizeof(struct user_fxsr_struct) ))
+-			return -EFAULT;
+-		return 0;
+-	} else {
+-		return -EIO;
+-	}
+-}
 -
--static inline u8 f_mult(u8 a, u8 b)
+-int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf )
 -{
--	u8 aa = log_tab[a], cc = aa + log_tab[b];
+-	int ret = 0;
 -
--	return pow_tab[cc + (cc < aa ? 1 : 0)];
+-	if ( cpu_has_fxsr ) {
+-		if (__copy_from_user( &tsk->thread.i387.fxsave, buf,
+-				  sizeof(struct user_fxsr_struct) ))
+-			ret = -EFAULT;
+-		/* mxcsr reserved bits must be masked to zero for security reasons */
+-		tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
+-	} else {
+-		ret = -EIO;
+-	}
+-	return ret;
 -}
 -
--#define ff_mult(a, b) (a && b ? f_mult(a, b) : 0)
+-/*
+- * FPU state for core dumps.
+- */
 -
--#define ls_box(x)				\
--	(aes_fl_tab[0][byte(x, 0)] ^		\
--	 aes_fl_tab[1][byte(x, 1)] ^		\
--	 aes_fl_tab[2][byte(x, 2)] ^		\
--	 aes_fl_tab[3][byte(x, 3)])
+-static inline void copy_fpu_fsave( struct task_struct *tsk,
+-				   struct user_i387_struct *fpu )
+-{
+-	memcpy( fpu, &tsk->thread.i387.fsave,
+-		sizeof(struct user_i387_struct) );
+-}
 -
--static void __init gen_tabs(void)
+-static inline void copy_fpu_fxsave( struct task_struct *tsk,
+-				   struct user_i387_struct *fpu )
 -{
--	u32 i, t;
--	u8 p, q;
+-	unsigned short *to;
+-	unsigned short *from;
+-	int i;
 -
--	/* log and power tables for GF(2**8) finite field with
--	   0x011b as modular polynomial - the simplest primitive
--	   root is 0x03, used here to generate the tables */
+-	memcpy( fpu, &tsk->thread.i387.fxsave, 7 * sizeof(long) );
 -
--	for (i = 0, p = 1; i < 256; ++i) {
--		pow_tab[i] = (u8)p;
--		log_tab[p] = (u8)i;
+-	to = (unsigned short *)&fpu->st_space[0];
+-	from = (unsigned short *)&tsk->thread.i387.fxsave.st_space[0];
+-	for ( i = 0 ; i < 8 ; i++, to += 5, from += 8 ) {
+-		memcpy( to, from, 5 * sizeof(unsigned short) );
+-	}
+-}
 -
--		p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0);
+-int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
+-{
+-	int fpvalid;
+-	struct task_struct *tsk = current;
+-
+-	fpvalid = !!used_math();
+-	if ( fpvalid ) {
+-		unlazy_fpu( tsk );
+-		if ( cpu_has_fxsr ) {
+-			copy_fpu_fxsave( tsk, fpu );
+-		} else {
+-			copy_fpu_fsave( tsk, fpu );
+-		}
 -	}
 -
--	log_tab[1] = 0;
+-	return fpvalid;
+-}
+-EXPORT_SYMBOL(dump_fpu);
 -
--	for (i = 0, p = 1; i < 10; ++i) {
--		rco_tab[i] = p;
+-int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
+-{
+-	int fpvalid = !!tsk_used_math(tsk);
 -
--		p = (p << 1) ^ (p & 0x80 ? 0x01b : 0);
+-	if (fpvalid) {
+-		if (tsk == current)
+-			unlazy_fpu(tsk);
+-		if (cpu_has_fxsr)
+-			copy_fpu_fxsave(tsk, fpu);
+-		else
+-			copy_fpu_fsave(tsk, fpu);
 -	}
+-	return fpvalid;
+-}
 -
--	for (i = 0; i < 256; ++i) {
--		p = (i ? pow_tab[255 - log_tab[i]] : 0);
--		q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2));
--		p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2));
--		sbx_tab[i] = p;
--		isb_tab[p] = (u8)i;
--	}
+-int dump_task_extended_fpu(struct task_struct *tsk, struct user_fxsr_struct *fpu)
+-{
+-	int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr;
 -
--	for (i = 0; i < 256; ++i) {
--		p = sbx_tab[i];
+-	if (fpvalid) {
+-		if (tsk == current)
+-		       unlazy_fpu(tsk);
+-		memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(*fpu));
+-	}
+-	return fpvalid;
+-}
+diff --git a/arch/x86/kernel/i387_64.c b/arch/x86/kernel/i387_64.c
+deleted file mode 100644
+index bfaff28..0000000
+--- a/arch/x86/kernel/i387_64.c
++++ /dev/null
+@@ -1,150 +0,0 @@
+-/*
+- *  Copyright (C) 1994 Linus Torvalds
+- *  Copyright (C) 2002 Andi Kleen, SuSE Labs
+- *
+- *  Pentium III FXSR, SSE support
+- *  General FPU state handling cleanups
+- *	Gareth Hughes <gareth at valinux.com>, May 2000
+- * 
+- *  x86-64 rework 2002 Andi Kleen. 
+- *  Does direct fxsave in and out of user space now for signal handlers.
+- *  All the FSAVE<->FXSAVE conversion code has been moved to the 32bit emulation,
+- *  the 64bit user space sees a FXSAVE frame directly. 
+- */
 -
--		t = p;
--		aes_fl_tab[0][i] = t;
--		aes_fl_tab[1][i] = rol32(t, 8);
--		aes_fl_tab[2][i] = rol32(t, 16);
--		aes_fl_tab[3][i] = rol32(t, 24);
+-#include <linux/sched.h>
+-#include <linux/init.h>
+-#include <asm/processor.h>
+-#include <asm/i387.h>
+-#include <asm/sigcontext.h>
+-#include <asm/user.h>
+-#include <asm/ptrace.h>
+-#include <asm/uaccess.h>
 -
--		t = ((u32)ff_mult(2, p)) |
--		    ((u32)p << 8) |
--		    ((u32)p << 16) | ((u32)ff_mult(3, p) << 24);
+-unsigned int mxcsr_feature_mask __read_mostly = 0xffffffff;
 -
--		aes_ft_tab[0][i] = t;
--		aes_ft_tab[1][i] = rol32(t, 8);
--		aes_ft_tab[2][i] = rol32(t, 16);
--		aes_ft_tab[3][i] = rol32(t, 24);
+-void mxcsr_feature_mask_init(void)
+-{
+-	unsigned int mask;
+-	clts();
+-	memset(&current->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
+-	asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
+-	mask = current->thread.i387.fxsave.mxcsr_mask;
+-	if (mask == 0) mask = 0x0000ffbf;
+-	mxcsr_feature_mask &= mask;
+-	stts();
+-}
 -
--		p = isb_tab[i];
+-/*
+- * Called at bootup to set up the initial FPU state that is later cloned
+- * into all processes.
+- */
+-void __cpuinit fpu_init(void)
+-{
+-	unsigned long oldcr0 = read_cr0();
+-	extern void __bad_fxsave_alignment(void);
+-		
+-	if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
+-		__bad_fxsave_alignment();
+-	set_in_cr4(X86_CR4_OSFXSR);
+-	set_in_cr4(X86_CR4_OSXMMEXCPT);
 -
--		t = p;
--		aes_il_tab[0][i] = t;
--		aes_il_tab[1][i] = rol32(t, 8);
--		aes_il_tab[2][i] = rol32(t, 16);
--		aes_il_tab[3][i] = rol32(t, 24);
+-	write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
 -
--		t = ((u32)ff_mult(14, p)) |
--		    ((u32)ff_mult(9, p) << 8) |
--		    ((u32)ff_mult(13, p) << 16) |
--		    ((u32)ff_mult(11, p) << 24);
+-	mxcsr_feature_mask_init();
+-	/* clean state in init */
+-	current_thread_info()->status = 0;
+-	clear_used_math();
+-}
 -
--		aes_it_tab[0][i] = t;
--		aes_it_tab[1][i] = rol32(t, 8);
--		aes_it_tab[2][i] = rol32(t, 16);
--		aes_it_tab[3][i] = rol32(t, 24);
--	}
+-void init_fpu(struct task_struct *child)
+-{
+-	if (tsk_used_math(child)) {
+-		if (child == current)
+-			unlazy_fpu(child);
+-		return;
+-	}	
+-	memset(&child->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
+-	child->thread.i387.fxsave.cwd = 0x37f;
+-	child->thread.i387.fxsave.mxcsr = 0x1f80;
+-	/* only the device not available exception or ptrace can call init_fpu */
+-	set_stopped_child_used_math(child);
 -}
 -
--#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
+-/*
+- * Signal frame handlers.
+- */
 -
--#define imix_col(y, x)			\
--	u    = star_x(x);		\
--	v    = star_x(u);		\
--	w    = star_x(v);		\
--	t    = w ^ (x);			\
--	(y)  = u ^ v ^ w;		\
--	(y) ^= ror32(u ^ t,  8) ^	\
--	       ror32(v ^ t, 16) ^	\
--	       ror32(t, 24)
+-int save_i387(struct _fpstate __user *buf)
+-{
+-	struct task_struct *tsk = current;
+-	int err = 0;
 -
--/* initialise the key schedule from the user supplied key */
+-	BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
+-			sizeof(tsk->thread.i387.fxsave));
 -
--#define loop4(i)					\
--{							\
--	t = ror32(t,  8); t = ls_box(t) ^ rco_tab[i];	\
--	t ^= E_KEY[4 * i];     E_KEY[4 * i + 4] = t;	\
--	t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t;	\
--	t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t;	\
--	t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t;	\
+-	if ((unsigned long)buf % 16) 
+-		printk("save_i387: bad fpstate %p\n",buf); 
+-
+-	if (!used_math())
+-		return 0;
+-	clear_used_math(); /* trigger finit */
+-	if (task_thread_info(tsk)->status & TS_USEDFPU) {
+-		err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
+-		if (err) return err;
+-		task_thread_info(tsk)->status &= ~TS_USEDFPU;
+-		stts();
+-	} else {
+-		if (__copy_to_user(buf, &tsk->thread.i387.fxsave,
+-				   sizeof(struct i387_fxsave_struct)))
+-			return -1;
+-	}
+-	return 1;
 -}
 -
--#define loop6(i)					\
--{							\
--	t = ror32(t,  8); t = ls_box(t) ^ rco_tab[i];	\
--	t ^= E_KEY[6 * i];     E_KEY[6 * i + 6] = t;	\
--	t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t;	\
--	t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t;	\
--	t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t;	\
--	t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t;	\
--	t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t;	\
+-/*
+- * ptrace request handlers.
+- */
+-
+-int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *tsk)
+-{
+-	init_fpu(tsk);
+-	return __copy_to_user(buf, &tsk->thread.i387.fxsave,
+-			       sizeof(struct user_i387_struct)) ? -EFAULT : 0;
 -}
 -
--#define loop8(i)					\
--{							\
--	t = ror32(t,  8); ; t = ls_box(t) ^ rco_tab[i];	\
--	t ^= E_KEY[8 * i];     E_KEY[8 * i + 8] = t;	\
--	t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t;	\
--	t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t;	\
--	t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t;	\
--	t  = E_KEY[8 * i + 4] ^ ls_box(t);		\
--	E_KEY[8 * i + 12] = t;				\
--	t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t;	\
--	t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t;	\
--	t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t;	\
+-int set_fpregs(struct task_struct *tsk, struct user_i387_struct __user *buf)
+-{
+-	if (__copy_from_user(&tsk->thread.i387.fxsave, buf, 
+-			     sizeof(struct user_i387_struct)))
+-		return -EFAULT;
+-		return 0;
 -}
 -
--static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
--		       unsigned int key_len)
+-/*
+- * FPU state for core dumps.
+- */
+-
+-int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
 -{
--	struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
--	const __le32 *key = (const __le32 *)in_key;
--	u32 *flags = &tfm->crt_flags;
--	u32 i, j, t, u, v, w;
+-	struct task_struct *tsk = current;
 -
--	if (key_len % 8) {
--		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
--		return -EINVAL;
--	}
+-	if (!used_math())
+-		return 0;
 -
--	ctx->key_length = key_len;
+-	unlazy_fpu(tsk);
+-	memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(struct user_i387_struct)); 
+-	return 1; 
+-}
+-
+-int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
+-{
+-	int fpvalid = !!tsk_used_math(tsk);
+-
+-	if (fpvalid) {
+-		if (tsk == current)
+-			unlazy_fpu(tsk);
+-		memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(struct user_i387_struct)); 	
+-}
+-	return fpvalid;
+-}
+diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c
+index 2931383..dbd6c1d 100644
+--- a/arch/x86/kernel/i8237.c
++++ b/arch/x86/kernel/i8237.c
+@@ -51,7 +51,7 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state)
+ }
+ 
+ static struct sysdev_class i8237_sysdev_class = {
+-	set_kset_name("i8237"),
++	.name = "i8237",
+ 	.suspend = i8237A_suspend,
+ 	.resume = i8237A_resume,
+ };
+diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
+index a42c807..ef62b07 100644
+--- a/arch/x86/kernel/i8253.c
++++ b/arch/x86/kernel/i8253.c
+@@ -13,10 +13,17 @@
+ #include <asm/delay.h>
+ #include <asm/i8253.h>
+ #include <asm/io.h>
++#include <asm/hpet.h>
+ 
+ DEFINE_SPINLOCK(i8253_lock);
+ EXPORT_SYMBOL(i8253_lock);
+ 
++#ifdef CONFIG_X86_32
++static void pit_disable_clocksource(void);
++#else
++static inline void pit_disable_clocksource(void) { }
++#endif
++
+ /*
+  * HPET replaces the PIT, when enabled. So we need to know, which of
+  * the two timers is used
+@@ -31,38 +38,38 @@ struct clock_event_device *global_clock_event;
+ static void init_pit_timer(enum clock_event_mode mode,
+ 			   struct clock_event_device *evt)
+ {
+-	unsigned long flags;
 -
--	D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]);
--	D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]);
--	D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]);
--	D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]);
+-	spin_lock_irqsave(&i8253_lock, flags);
++	spin_lock(&i8253_lock);
+ 
+ 	switch(mode) {
+ 	case CLOCK_EVT_MODE_PERIODIC:
+ 		/* binary, mode 2, LSB/MSB, ch 0 */
+-		outb_p(0x34, PIT_MODE);
+-		outb_p(LATCH & 0xff , PIT_CH0);	/* LSB */
+-		outb(LATCH >> 8 , PIT_CH0);	/* MSB */
++		outb_pit(0x34, PIT_MODE);
++		outb_pit(LATCH & 0xff , PIT_CH0);	/* LSB */
++		outb_pit(LATCH >> 8 , PIT_CH0);		/* MSB */
+ 		break;
+ 
+ 	case CLOCK_EVT_MODE_SHUTDOWN:
+ 	case CLOCK_EVT_MODE_UNUSED:
+ 		if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
+ 		    evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+-			outb_p(0x30, PIT_MODE);
+-			outb_p(0, PIT_CH0);
+-			outb_p(0, PIT_CH0);
++			outb_pit(0x30, PIT_MODE);
++			outb_pit(0, PIT_CH0);
++			outb_pit(0, PIT_CH0);
+ 		}
++		pit_disable_clocksource();
+ 		break;
+ 
+ 	case CLOCK_EVT_MODE_ONESHOT:
+ 		/* One shot setup */
+-		outb_p(0x38, PIT_MODE);
++		pit_disable_clocksource();
++		outb_pit(0x38, PIT_MODE);
+ 		break;
+ 
+ 	case CLOCK_EVT_MODE_RESUME:
+ 		/* Nothing to do here */
+ 		break;
+ 	}
+-	spin_unlock_irqrestore(&i8253_lock, flags);
++	spin_unlock(&i8253_lock);
+ }
+ 
+ /*
+@@ -72,12 +79,10 @@ static void init_pit_timer(enum clock_event_mode mode,
+  */
+ static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
+ {
+-	unsigned long flags;
 -
--	switch (key_len) {
--	case 16:
--		t = E_KEY[3];
--		for (i = 0; i < 10; ++i)
--			loop4(i);
--		break;
+-	spin_lock_irqsave(&i8253_lock, flags);
+-	outb_p(delta & 0xff , PIT_CH0);	/* LSB */
+-	outb(delta >> 8 , PIT_CH0);	/* MSB */
+-	spin_unlock_irqrestore(&i8253_lock, flags);
++	spin_lock(&i8253_lock);
++	outb_pit(delta & 0xff , PIT_CH0);	/* LSB */
++	outb_pit(delta >> 8 , PIT_CH0);		/* MSB */
++	spin_unlock(&i8253_lock);
+ 
+ 	return 0;
+ }
+@@ -148,15 +153,15 @@ static cycle_t pit_read(void)
+ 	 * count), it cannot be newer.
+ 	 */
+ 	jifs = jiffies;
+-	outb_p(0x00, PIT_MODE);	/* latch the count ASAP */
+-	count = inb_p(PIT_CH0);	/* read the latched count */
+-	count |= inb_p(PIT_CH0) << 8;
++	outb_pit(0x00, PIT_MODE);	/* latch the count ASAP */
++	count = inb_pit(PIT_CH0);	/* read the latched count */
++	count |= inb_pit(PIT_CH0) << 8;
+ 
+ 	/* VIA686a test code... reset the latch if count > max + 1 */
+ 	if (count > LATCH) {
+-		outb_p(0x34, PIT_MODE);
+-		outb_p(LATCH & 0xff, PIT_CH0);
+-		outb(LATCH >> 8, PIT_CH0);
++		outb_pit(0x34, PIT_MODE);
++		outb_pit(LATCH & 0xff, PIT_CH0);
++		outb_pit(LATCH >> 8, PIT_CH0);
+ 		count = LATCH - 1;
+ 	}
+ 
+@@ -195,9 +200,28 @@ static struct clocksource clocksource_pit = {
+ 	.shift	= 20,
+ };
+ 
++static void pit_disable_clocksource(void)
++{
++	/*
++	 * Use mult to check whether it is registered or not
++	 */
++	if (clocksource_pit.mult) {
++		clocksource_unregister(&clocksource_pit);
++		clocksource_pit.mult = 0;
++	}
++}
++
+ static int __init init_pit_clocksource(void)
+ {
+-	if (num_possible_cpus() > 1) /* PIT does not scale! */
++	 /*
++	  * Several reasons not to register PIT as a clocksource:
++	  *
++	  * - On SMP PIT does not scale due to i8253_lock
++	  * - when HPET is enabled
++	  * - when local APIC timer is active (PIT is switched off)
++	  */
++	if (num_possible_cpus() > 1 || is_hpet_enabled() ||
++	    pit_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
+ 		return 0;
+ 
+ 	clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
+diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
+index f634fc7..2d25b77 100644
+--- a/arch/x86/kernel/i8259_32.c
++++ b/arch/x86/kernel/i8259_32.c
+@@ -21,8 +21,6 @@
+ #include <asm/arch_hooks.h>
+ #include <asm/i8259.h>
+ 
+-#include <io_ports.h>
 -
--	case 24:
--		E_KEY[4] = le32_to_cpu(key[4]);
--		t = E_KEY[5] = le32_to_cpu(key[5]);
--		for (i = 0; i < 8; ++i)
--			loop6 (i);
--		break;
+ /*
+  * This is the 'legacy' 8259A Programmable Interrupt Controller,
+  * present in the majority of PC/AT boxes.
+@@ -258,7 +256,7 @@ static int i8259A_shutdown(struct sys_device *dev)
+ }
+ 
+ static struct sysdev_class i8259_sysdev_class = {
+-	set_kset_name("i8259"),
++	.name = "i8259",
+ 	.suspend = i8259A_suspend,
+ 	.resume = i8259A_resume,
+ 	.shutdown = i8259A_shutdown,
+@@ -291,20 +289,20 @@ void init_8259A(int auto_eoi)
+ 	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
+ 
+ 	/*
+-	 * outb_p - this has to work on a wide range of PC hardware.
++	 * outb_pic - this has to work on a wide range of PC hardware.
+ 	 */
+-	outb_p(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
+-	outb_p(0x20 + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
+-	outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
++	outb_pic(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
++	outb_pic(0x20 + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
++	outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
+ 	if (auto_eoi)	/* master does Auto EOI */
+-		outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
++		outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+ 	else		/* master expects normal EOI */
+-		outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
++		outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+ 
+-	outb_p(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
+-	outb_p(0x20 + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
+-	outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
+-	outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
++	outb_pic(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
++	outb_pic(0x20 + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
++	outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
++	outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
+ 	if (auto_eoi)
+ 		/*
+ 		 * In AEOI mode we just have to mask the interrupt
+@@ -341,7 +339,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
+ 	outb(0,0xF0);
+ 	if (ignore_fpu_irq || !boot_cpu_data.hard_math)
+ 		return IRQ_NONE;
+-	math_error((void __user *)get_irq_regs()->eip);
++	math_error((void __user *)get_irq_regs()->ip);
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
+index 3f27ea0..fa57a15 100644
+--- a/arch/x86/kernel/i8259_64.c
++++ b/arch/x86/kernel/i8259_64.c
+@@ -21,6 +21,7 @@
+ #include <asm/delay.h>
+ #include <asm/desc.h>
+ #include <asm/apic.h>
++#include <asm/i8259.h>
+ 
+ /*
+  * Common place to define all x86 IRQ vectors
+@@ -48,7 +49,7 @@
+  */
+ 
+ /*
+- * The IO-APIC gives us many more interrupt sources. Most of these 
++ * The IO-APIC gives us many more interrupt sources. Most of these
+  * are unused but an SMP system is supposed to have enough memory ...
+  * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
+  * across the spectrum, so we really want to be prepared to get all
+@@ -76,7 +77,7 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
+ 	IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
+ 
+ /* for the irq vectors */
+-static void (*interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
++static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
+ 					  IRQLIST_16(0x2), IRQLIST_16(0x3),
+ 	IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
+ 	IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
+@@ -114,11 +115,7 @@ static struct irq_chip i8259A_chip = {
+ /*
+  * This contains the irq mask for both 8259A irq controllers,
+  */
+-static unsigned int cached_irq_mask = 0xffff;
 -
--	case 32:
--		E_KEY[4] = le32_to_cpu(key[4]);
--		E_KEY[5] = le32_to_cpu(key[5]);
--		E_KEY[6] = le32_to_cpu(key[6]);
--		t = E_KEY[7] = le32_to_cpu(key[7]);
--		for (i = 0; i < 7; ++i)
--			loop8(i);
--		break;
--	}
+-#define __byte(x,y) 	(((unsigned char *)&(y))[x])
+-#define cached_21	(__byte(0,cached_irq_mask))
+-#define cached_A1	(__byte(1,cached_irq_mask))
++unsigned int cached_irq_mask = 0xffff;
+ 
+ /*
+  * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
+@@ -139,9 +136,9 @@ void disable_8259A_irq(unsigned int irq)
+ 	spin_lock_irqsave(&i8259A_lock, flags);
+ 	cached_irq_mask |= mask;
+ 	if (irq & 8)
+-		outb(cached_A1,0xA1);
++		outb(cached_slave_mask, PIC_SLAVE_IMR);
+ 	else
+-		outb(cached_21,0x21);
++		outb(cached_master_mask, PIC_MASTER_IMR);
+ 	spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+ 
+@@ -153,9 +150,9 @@ void enable_8259A_irq(unsigned int irq)
+ 	spin_lock_irqsave(&i8259A_lock, flags);
+ 	cached_irq_mask &= mask;
+ 	if (irq & 8)
+-		outb(cached_A1,0xA1);
++		outb(cached_slave_mask, PIC_SLAVE_IMR);
+ 	else
+-		outb(cached_21,0x21);
++		outb(cached_master_mask, PIC_MASTER_IMR);
+ 	spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+ 
+@@ -167,9 +164,9 @@ int i8259A_irq_pending(unsigned int irq)
+ 
+ 	spin_lock_irqsave(&i8259A_lock, flags);
+ 	if (irq < 8)
+-		ret = inb(0x20) & mask;
++		ret = inb(PIC_MASTER_CMD) & mask;
+ 	else
+-		ret = inb(0xA0) & (mask >> 8);
++		ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
+ 	spin_unlock_irqrestore(&i8259A_lock, flags);
+ 
+ 	return ret;
+@@ -196,14 +193,14 @@ static inline int i8259A_irq_real(unsigned int irq)
+ 	int irqmask = 1<<irq;
+ 
+ 	if (irq < 8) {
+-		outb(0x0B,0x20);		/* ISR register */
+-		value = inb(0x20) & irqmask;
+-		outb(0x0A,0x20);		/* back to the IRR register */
++		outb(0x0B,PIC_MASTER_CMD);	/* ISR register */
++		value = inb(PIC_MASTER_CMD) & irqmask;
++		outb(0x0A,PIC_MASTER_CMD);	/* back to the IRR register */
+ 		return value;
+ 	}
+-	outb(0x0B,0xA0);		/* ISR register */
+-	value = inb(0xA0) & (irqmask >> 8);
+-	outb(0x0A,0xA0);		/* back to the IRR register */
++	outb(0x0B,PIC_SLAVE_CMD);	/* ISR register */
++	value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
++	outb(0x0A,PIC_SLAVE_CMD);	/* back to the IRR register */
+ 	return value;
+ }
+ 
+@@ -240,14 +237,17 @@ static void mask_and_ack_8259A(unsigned int irq)
+ 
+ handle_real_irq:
+ 	if (irq & 8) {
+-		inb(0xA1);		/* DUMMY - (do we need this?) */
+-		outb(cached_A1,0xA1);
+-		outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
+-		outb(0x62,0x20);	/* 'Specific EOI' to master-IRQ2 */
++		inb(PIC_SLAVE_IMR);	/* DUMMY - (do we need this?) */
++		outb(cached_slave_mask, PIC_SLAVE_IMR);
++		/* 'Specific EOI' to slave */
++		outb(0x60+(irq&7),PIC_SLAVE_CMD);
++		 /* 'Specific EOI' to master-IRQ2 */
++		outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD);
+ 	} else {
+-		inb(0x21);		/* DUMMY - (do we need this?) */
+-		outb(cached_21,0x21);
+-		outb(0x60+irq,0x20);	/* 'Specific EOI' to master */
++		inb(PIC_MASTER_IMR);	/* DUMMY - (do we need this?) */
++		outb(cached_master_mask, PIC_MASTER_IMR);
++		/* 'Specific EOI' to master */
++		outb(0x60+irq,PIC_MASTER_CMD);
+ 	}
+ 	spin_unlock_irqrestore(&i8259A_lock, flags);
+ 	return;
+@@ -270,7 +270,8 @@ spurious_8259A_irq:
+ 		 * lets ACK and report it. [once per IRQ]
+ 		 */
+ 		if (!(spurious_irq_mask & irqmask)) {
+-			printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
++			printk(KERN_DEBUG
++			       "spurious 8259A interrupt: IRQ%d.\n", irq);
+ 			spurious_irq_mask |= irqmask;
+ 		}
+ 		atomic_inc(&irq_err_count);
+@@ -283,51 +284,6 @@ spurious_8259A_irq:
+ 	}
+ }
+ 
+-void init_8259A(int auto_eoi)
+-{
+-	unsigned long flags;
 -
--	D_KEY[0] = E_KEY[key_len + 24];
--	D_KEY[1] = E_KEY[key_len + 25];
--	D_KEY[2] = E_KEY[key_len + 26];
--	D_KEY[3] = E_KEY[key_len + 27];
+-	i8259A_auto_eoi = auto_eoi;
 -
--	for (i = 4; i < key_len + 24; ++i) {
--		j = key_len + 24 - (i & ~3) + (i & 3);
--		imix_col(D_KEY[j], E_KEY[i]);
--	}
+-	spin_lock_irqsave(&i8259A_lock, flags);
 -
--	return 0;
--}
+-	outb(0xff, 0x21);	/* mask all of 8259A-1 */
+-	outb(0xff, 0xA1);	/* mask all of 8259A-2 */
 -
--asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
--asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+-	/*
+-	 * outb_p - this has to work on a wide range of PC hardware.
+-	 */
+-	outb_p(0x11, 0x20);	/* ICW1: select 8259A-1 init */
+-	outb_p(IRQ0_VECTOR, 0x21);	/* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
+-	outb_p(0x04, 0x21);	/* 8259A-1 (the master) has a slave on IR2 */
+-	if (auto_eoi)
+-		outb_p(0x03, 0x21);	/* master does Auto EOI */
+-	else
+-		outb_p(0x01, 0x21);	/* master expects normal EOI */
 -
--static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
--{
--	aes_enc_blk(tfm, dst, src);
--}
+-	outb_p(0x11, 0xA0);	/* ICW1: select 8259A-2 init */
+-	outb_p(IRQ8_VECTOR, 0xA1);	/* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
+-	outb_p(0x02, 0xA1);	/* 8259A-2 is a slave on master's IR2 */
+-	outb_p(0x01, 0xA1);	/* (slave's support for AEOI in flat mode
+-				    is to be investigated) */
 -
--static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
--{
--	aes_dec_blk(tfm, dst, src);
--}
+-	if (auto_eoi)
+-		/*
+-		 * in AEOI mode we just have to mask the interrupt
+-		 * when acking.
+-		 */
+-		i8259A_chip.mask_ack = disable_8259A_irq;
+-	else
+-		i8259A_chip.mask_ack = mask_and_ack_8259A;
 -
--static struct crypto_alg aes_alg = {
--	.cra_name		=	"aes",
--	.cra_driver_name	=	"aes-x86_64",
--	.cra_priority		=	200,
--	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
--	.cra_blocksize		=	AES_BLOCK_SIZE,
--	.cra_ctxsize		=	sizeof(struct aes_ctx),
--	.cra_module		=	THIS_MODULE,
--	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
--	.cra_u			=	{
--		.cipher = {
--			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
--			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
--			.cia_setkey	   	= 	aes_set_key,
--			.cia_encrypt	 	=	aes_encrypt,
--			.cia_decrypt	  	=	aes_decrypt
--		}
--	}
--};
+-	udelay(100);		/* wait for 8259A to initialize */
 -
--static int __init aes_init(void)
--{
--	gen_tabs();
--	return crypto_register_alg(&aes_alg);
--}
+-	outb(cached_21, 0x21);	/* restore master IRQ mask */
+-	outb(cached_A1, 0xA1);	/* restore slave IRQ mask */
 -
--static void __exit aes_fini(void)
--{
--	crypto_unregister_alg(&aes_alg);
+-	spin_unlock_irqrestore(&i8259A_lock, flags);
 -}
 -
--module_init(aes_init);
--module_exit(aes_fini);
--
--MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
--MODULE_LICENSE("GPL");
--MODULE_ALIAS("aes");
-diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
-new file mode 100644
-index 0000000..71f4578
---- /dev/null
-+++ b/arch/x86/crypto/aes_glue.c
-@@ -0,0 +1,57 @@
-+/*
-+ * Glue Code for the asm optimized version of the AES Cipher Algorithm
-+ *
-+ */
+ static char irq_trigger[2];
+ /**
+  * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
+@@ -364,13 +320,13 @@ static int i8259A_shutdown(struct sys_device *dev)
+ 	 * the kernel initialization code can get it
+ 	 * out of.
+ 	 */
+-	outb(0xff, 0x21);	/* mask all of 8259A-1 */
+-	outb(0xff, 0xA1);	/* mask all of 8259A-1 */
++	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
++	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-1 */
+ 	return 0;
+ }
+ 
+ static struct sysdev_class i8259_sysdev_class = {
+-	set_kset_name("i8259"),
++	.name = "i8259",
+ 	.suspend = i8259A_suspend,
+ 	.resume = i8259A_resume,
+ 	.shutdown = i8259A_shutdown,
+@@ -391,6 +347,58 @@ static int __init i8259A_init_sysfs(void)
+ 
+ device_initcall(i8259A_init_sysfs);
+ 
++void init_8259A(int auto_eoi)
++{
++	unsigned long flags;
 +
-+#include <crypto/aes.h>
++	i8259A_auto_eoi = auto_eoi;
 +
-+asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-+asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
++	spin_lock_irqsave(&i8259A_lock, flags);
 +
-+static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-+{
-+	aes_enc_blk(tfm, dst, src);
++	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
++	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
++
++	/*
++	 * outb_pic - this has to work on a wide range of PC hardware.
++	 */
++	outb_pic(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
++	/* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
++	outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
++	/* 8259A-1 (the master) has a slave on IR2 */
++	outb_pic(0x04, PIC_MASTER_IMR);
++	if (auto_eoi)	/* master does Auto EOI */
++		outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
++	else		/* master expects normal EOI */
++		outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
++
++	outb_pic(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
++	/* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
++	outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR);
++	/* 8259A-2 is a slave on master's IR2 */
++	outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);
++	/* (slave's support for AEOI in flat mode is to be investigated) */
++	outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
++
++	if (auto_eoi)
++		/*
++		 * In AEOI mode we just have to mask the interrupt
++		 * when acking.
++		 */
++		i8259A_chip.mask_ack = disable_8259A_irq;
++	else
++		i8259A_chip.mask_ack = mask_and_ack_8259A;
++
++	udelay(100);		/* wait for 8259A to initialize */
++
++	outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
++	outb(cached_slave_mask, PIC_SLAVE_IMR);	  /* restore slave IRQ mask */
++
++	spin_unlock_irqrestore(&i8259A_lock, flags);
 +}
 +
-+static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
++
++
++
+ /*
+  * IRQ2 is cascade interrupt to second interrupt controller
+  */
+@@ -448,7 +456,9 @@ void __init init_ISA_irqs (void)
+ 	}
+ }
+ 
+-void __init init_IRQ(void)
++void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
++
++void __init native_init_IRQ(void)
+ {
+ 	int i;
+ 
+diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
+index 468c9c4..5b3ce79 100644
+--- a/arch/x86/kernel/init_task.c
++++ b/arch/x86/kernel/init_task.c
+@@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES;
+ static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+ struct mm_struct init_mm = INIT_MM(init_mm);
+-EXPORT_SYMBOL(init_mm);
+ 
+ /*
+  * Initial thread structure.
+diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
+index a6b1490..4ca5486 100644
+--- a/arch/x86/kernel/io_apic_32.c
++++ b/arch/x86/kernel/io_apic_32.c
+@@ -35,6 +35,7 @@
+ #include <linux/htirq.h>
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
++#include <linux/jiffies.h>	/* time_after() */
+ 
+ #include <asm/io.h>
+ #include <asm/smp.h>
+@@ -48,8 +49,6 @@
+ #include <mach_apic.h>
+ #include <mach_apicdef.h>
+ 
+-#include "io_ports.h"
+-
+ int (*ioapic_renumber_irq)(int ioapic, int irq);
+ atomic_t irq_mis_count;
+ 
+@@ -351,7 +350,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
+ # include <asm/processor.h>	/* kernel_thread() */
+ # include <linux/kernel_stat.h>	/* kstat */
+ # include <linux/slab.h>		/* kmalloc() */
+-# include <linux/timer.h>	/* time_after() */
++# include <linux/timer.h>
+  
+ #define IRQBALANCE_CHECK_ARCH -999
+ #define MAX_BALANCED_IRQ_INTERVAL	(5*HZ)
+@@ -727,7 +726,7 @@ late_initcall(balanced_irq_init);
+ #endif /* CONFIG_SMP */
+ 
+ #ifndef CONFIG_SMP
+-void fastcall send_IPI_self(int vector)
++void send_IPI_self(int vector)
+ {
+ 	unsigned int cfg;
+ 
+@@ -1900,7 +1899,7 @@ static int __init timer_irq_works(void)
+ 	 * might have cached one ExtINT interrupt.  Finally, at
+ 	 * least one tick may be lost due to delays.
+ 	 */
+-	if (jiffies - t1 > 4)
++	if (time_after(jiffies, t1 + 4))
+ 		return 1;
+ 
+ 	return 0;
+@@ -2080,7 +2079,7 @@ static struct irq_chip lapic_chip __read_mostly = {
+ 	.eoi		= ack_apic,
+ };
+ 
+-static void setup_nmi (void)
++static void __init setup_nmi(void)
+ {
+ 	/*
+  	 * Dirty trick to enable the NMI watchdog ...
+@@ -2093,7 +2092,7 @@ static void setup_nmi (void)
+ 	 */ 
+ 	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
+ 
+-	on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
++	enable_NMI_through_LVT0();
+ 
+ 	apic_printk(APIC_VERBOSE, " done.\n");
+ }
+@@ -2401,7 +2400,7 @@ static int ioapic_resume(struct sys_device *dev)
+ }
+ 
+ static struct sysdev_class ioapic_sysdev_class = {
+-	set_kset_name("ioapic"),
++	.name = "ioapic",
+ 	.suspend = ioapic_suspend,
+ 	.resume = ioapic_resume,
+ };
+diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
+index cbac167..1627c0d 100644
+--- a/arch/x86/kernel/io_apic_64.c
++++ b/arch/x86/kernel/io_apic_64.c
+@@ -32,9 +32,11 @@
+ #include <linux/msi.h>
+ #include <linux/htirq.h>
+ #include <linux/dmar.h>
++#include <linux/jiffies.h>
+ #ifdef CONFIG_ACPI
+ #include <acpi/acpi_bus.h>
+ #endif
++#include <linux/bootmem.h>
+ 
+ #include <asm/idle.h>
+ #include <asm/io.h>
+@@ -1069,7 +1071,7 @@ void __apicdebuginit print_local_APIC(void * dummy)
+ 	v = apic_read(APIC_LVR);
+ 	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
+ 	ver = GET_APIC_VERSION(v);
+-	maxlvt = get_maxlvt();
++	maxlvt = lapic_get_maxlvt();
+ 
+ 	v = apic_read(APIC_TASKPRI);
+ 	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
+@@ -1171,7 +1173,7 @@ void __apicdebuginit print_PIC(void)
+ 
+ #endif  /*  0  */
+ 
+-static void __init enable_IO_APIC(void)
++void __init enable_IO_APIC(void)
+ {
+ 	union IO_APIC_reg_01 reg_01;
+ 	int i8259_apic, i8259_pin;
+@@ -1298,7 +1300,7 @@ static int __init timer_irq_works(void)
+ 	 */
+ 
+ 	/* jiffies wrap? */
+-	if (jiffies - t1 > 4)
++	if (time_after(jiffies, t1 + 4))
+ 		return 1;
+ 	return 0;
+ }
+@@ -1411,7 +1413,7 @@ static void irq_complete_move(unsigned int irq)
+ 	if (likely(!cfg->move_in_progress))
+ 		return;
+ 
+-	vector = ~get_irq_regs()->orig_rax;
++	vector = ~get_irq_regs()->orig_ax;
+ 	me = smp_processor_id();
+ 	if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
+ 		cpumask_t cleanup_mask;
+@@ -1438,7 +1440,7 @@ static void ack_apic_level(unsigned int irq)
+ 	int do_unmask_irq = 0;
+ 
+ 	irq_complete_move(irq);
+-#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
++#ifdef CONFIG_GENERIC_PENDING_IRQ
+ 	/* If we are moving the irq we need to mask it */
+ 	if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
+ 		do_unmask_irq = 1;
+@@ -1565,7 +1567,7 @@ static struct hw_interrupt_type lapic_irq_type __read_mostly = {
+ 	.end = end_lapic_irq,
+ };
+ 
+-static void setup_nmi (void)
++static void __init setup_nmi(void)
+ {
+ 	/*
+  	 * Dirty trick to enable the NMI watchdog ...
+@@ -1578,7 +1580,7 @@ static void setup_nmi (void)
+ 	 */ 
+ 	printk(KERN_INFO "activating NMI Watchdog ...");
+ 
+-	enable_NMI_through_LVT0(NULL);
++	enable_NMI_through_LVT0();
+ 
+ 	printk(" done.\n");
+ }
+@@ -1654,7 +1656,7 @@ static inline void unlock_ExtINT_logic(void)
+  *
+  * FIXME: really need to revamp this for modern platforms only.
+  */
+-static inline void check_timer(void)
++static inline void __init check_timer(void)
+ {
+ 	struct irq_cfg *cfg = irq_cfg + 0;
+ 	int apic1, pin1, apic2, pin2;
+@@ -1788,7 +1790,10 @@ __setup("no_timer_check", notimercheck);
+ 
+ void __init setup_IO_APIC(void)
+ {
+-	enable_IO_APIC();
++
++	/*
++	 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
++	 */
+ 
+ 	if (acpi_ioapic)
+ 		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
+@@ -1850,7 +1855,7 @@ static int ioapic_resume(struct sys_device *dev)
+ }
+ 
+ static struct sysdev_class ioapic_sysdev_class = {
+-	set_kset_name("ioapic"),
++	.name = "ioapic",
+ 	.suspend = ioapic_suspend,
+ 	.resume = ioapic_resume,
+ };
+@@ -2288,3 +2293,92 @@ void __init setup_ioapic_dest(void)
+ }
+ #endif
+ 
++#define IOAPIC_RESOURCE_NAME_SIZE 11
++
++static struct resource *ioapic_resources;
++
++static struct resource * __init ioapic_setup_resources(void)
 +{
-+	aes_dec_blk(tfm, dst, src);
-+}
++	unsigned long n;
++	struct resource *res;
++	char *mem;
++	int i;
 +
-+static struct crypto_alg aes_alg = {
-+	.cra_name		= "aes",
-+	.cra_driver_name	= "aes-asm",
-+	.cra_priority		= 200,
-+	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
-+	.cra_blocksize		= AES_BLOCK_SIZE,
-+	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-+	.cra_module		= THIS_MODULE,
-+	.cra_list		= LIST_HEAD_INIT(aes_alg.cra_list),
-+	.cra_u	= {
-+		.cipher	= {
-+			.cia_min_keysize	= AES_MIN_KEY_SIZE,
-+			.cia_max_keysize	= AES_MAX_KEY_SIZE,
-+			.cia_setkey		= crypto_aes_set_key,
-+			.cia_encrypt		= aes_encrypt,
-+			.cia_decrypt		= aes_decrypt
++	if (nr_ioapics <= 0)
++		return NULL;
++
++	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
++	n *= nr_ioapics;
++
++	mem = alloc_bootmem(n);
++	res = (void *)mem;
++
++	if (mem != NULL) {
++		memset(mem, 0, n);
++		mem += sizeof(struct resource) * nr_ioapics;
++
++		for (i = 0; i < nr_ioapics; i++) {
++			res[i].name = mem;
++			res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++			sprintf(mem,  "IOAPIC %u", i);
++			mem += IOAPIC_RESOURCE_NAME_SIZE;
 +		}
 +	}
-+};
 +
-+static int __init aes_init(void)
++	ioapic_resources = res;
++
++	return res;
++}
++
++void __init ioapic_init_mappings(void)
 +{
-+	return crypto_register_alg(&aes_alg);
++	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
++	struct resource *ioapic_res;
++	int i;
++
++	ioapic_res = ioapic_setup_resources();
++	for (i = 0; i < nr_ioapics; i++) {
++		if (smp_found_config) {
++			ioapic_phys = mp_ioapics[i].mpc_apicaddr;
++		} else {
++			ioapic_phys = (unsigned long)
++				alloc_bootmem_pages(PAGE_SIZE);
++			ioapic_phys = __pa(ioapic_phys);
++		}
++		set_fixmap_nocache(idx, ioapic_phys);
++		apic_printk(APIC_VERBOSE,
++			    "mapped IOAPIC to %016lx (%016lx)\n",
++			    __fix_to_virt(idx), ioapic_phys);
++		idx++;
++
++		if (ioapic_res != NULL) {
++			ioapic_res->start = ioapic_phys;
++			ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
++			ioapic_res++;
++		}
++	}
 +}
 +
-+static void __exit aes_fini(void)
++static int __init ioapic_insert_resources(void)
 +{
-+	crypto_unregister_alg(&aes_alg);
++	int i;
++	struct resource *r = ioapic_resources;
++
++	if (!r) {
++		printk(KERN_ERR
++		       "IO APIC resources could be not be allocated.\n");
++		return -1;
++	}
++
++	for (i = 0; i < nr_ioapics; i++) {
++		insert_resource(&iomem_resource, r);
++		r++;
++	}
++
++	return 0;
 +}
 +
-+module_init(aes_init);
-+module_exit(aes_fini);
++/* Insert the IO APIC resources after PCI initialization has occured to handle
++ * IO APICS that are mapped in on a BAR in PCI space. */
++late_initcall(ioapic_insert_resources);
 +
-+MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
-+MODULE_LICENSE("GPL");
-+MODULE_ALIAS("aes");
-+MODULE_ALIAS("aes-asm");
-diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S
+diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
 new file mode 100644
-index 0000000..72eb306
+index 0000000..bd49321
 --- /dev/null
-+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
-@@ -0,0 +1,1114 @@
-+# salsa20_pm.s version 20051229
-+# D. J. Bernstein
-+# Public domain.
++++ b/arch/x86/kernel/io_delay.c
+@@ -0,0 +1,114 @@
++/*
++ * I/O delay strategies for inb_p/outb_p
++ *
++ * Allow for a DMI based override of port 0x80, needed for certain HP laptops
++ * and possibly other systems. Also allow for the gradual elimination of
++ * outb_p/inb_p API uses.
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/dmi.h>
++#include <asm/io.h>
 +
-+# enter ECRYPT_encrypt_bytes
-+.text
-+.p2align 5
-+.globl ECRYPT_encrypt_bytes
-+ECRYPT_encrypt_bytes:
-+	mov	%esp,%eax
-+	and	$31,%eax
-+	add	$256,%eax
-+	sub	%eax,%esp
-+	# eax_stack = eax
-+	movl	%eax,80(%esp)
-+	# ebx_stack = ebx
-+	movl	%ebx,84(%esp)
-+	# esi_stack = esi
-+	movl	%esi,88(%esp)
-+	# edi_stack = edi
-+	movl	%edi,92(%esp)
-+	# ebp_stack = ebp
-+	movl	%ebp,96(%esp)
-+	# x = arg1
-+	movl	4(%esp,%eax),%edx
-+	# m = arg2
-+	movl	8(%esp,%eax),%esi
-+	# out = arg3
-+	movl	12(%esp,%eax),%edi
-+	# bytes = arg4
-+	movl	16(%esp,%eax),%ebx
-+	# bytes -= 0
-+	sub	$0,%ebx
-+	# goto done if unsigned<=
-+	jbe	._done
-+._start:
-+	# in0 = *(uint32 *) (x + 0)
-+	movl	0(%edx),%eax
-+	# in1 = *(uint32 *) (x + 4)
-+	movl	4(%edx),%ecx
-+	# in2 = *(uint32 *) (x + 8)
-+	movl	8(%edx),%ebp
-+	# j0 = in0
-+	movl	%eax,164(%esp)
-+	# in3 = *(uint32 *) (x + 12)
-+	movl	12(%edx),%eax
-+	# j1 = in1
-+	movl	%ecx,168(%esp)
-+	# in4 = *(uint32 *) (x + 16)
-+	movl	16(%edx),%ecx
-+	# j2 = in2
-+	movl	%ebp,172(%esp)
-+	# in5 = *(uint32 *) (x + 20)
-+	movl	20(%edx),%ebp
-+	# j3 = in3
-+	movl	%eax,176(%esp)
-+	# in6 = *(uint32 *) (x + 24)
-+	movl	24(%edx),%eax
-+	# j4 = in4
-+	movl	%ecx,180(%esp)
-+	# in7 = *(uint32 *) (x + 28)
-+	movl	28(%edx),%ecx
-+	# j5 = in5
-+	movl	%ebp,184(%esp)
-+	# in8 = *(uint32 *) (x + 32)
-+	movl	32(%edx),%ebp
-+	# j6 = in6
-+	movl	%eax,188(%esp)
-+	# in9 = *(uint32 *) (x + 36)
-+	movl	36(%edx),%eax
-+	# j7 = in7
-+	movl	%ecx,192(%esp)
-+	# in10 = *(uint32 *) (x + 40)
-+	movl	40(%edx),%ecx
-+	# j8 = in8
-+	movl	%ebp,196(%esp)
-+	# in11 = *(uint32 *) (x + 44)
-+	movl	44(%edx),%ebp
-+	# j9 = in9
-+	movl	%eax,200(%esp)
-+	# in12 = *(uint32 *) (x + 48)
-+	movl	48(%edx),%eax
-+	# j10 = in10
-+	movl	%ecx,204(%esp)
-+	# in13 = *(uint32 *) (x + 52)
-+	movl	52(%edx),%ecx
-+	# j11 = in11
-+	movl	%ebp,208(%esp)
-+	# in14 = *(uint32 *) (x + 56)
-+	movl	56(%edx),%ebp
-+	# j12 = in12
-+	movl	%eax,212(%esp)
-+	# in15 = *(uint32 *) (x + 60)
-+	movl	60(%edx),%eax
-+	# j13 = in13
-+	movl	%ecx,216(%esp)
-+	# j14 = in14
-+	movl	%ebp,220(%esp)
-+	# j15 = in15
-+	movl	%eax,224(%esp)
-+	# x_backup = x
-+	movl	%edx,64(%esp)
-+._bytesatleast1:
-+	#   bytes - 64
-+	cmp	$64,%ebx
-+	#   goto nocopy if unsigned>=
-+	jae	._nocopy
-+	#     ctarget = out
-+	movl	%edi,228(%esp)
-+	#     out = &tmp
-+	leal	0(%esp),%edi
-+	#     i = bytes
-+	mov	%ebx,%ecx
-+	#     while (i) { *out++ = *m++; --i }
-+	rep	movsb
-+	#     out = &tmp
-+	leal	0(%esp),%edi
-+	#     m = &tmp
-+	leal	0(%esp),%esi
-+._nocopy:
-+	#   out_backup = out
-+	movl	%edi,72(%esp)
-+	#   m_backup = m
-+	movl	%esi,68(%esp)
-+	#   bytes_backup = bytes
-+	movl	%ebx,76(%esp)
-+	#   in0 = j0
-+	movl	164(%esp),%eax
-+	#   in1 = j1
-+	movl	168(%esp),%ecx
-+	#   in2 = j2
-+	movl	172(%esp),%edx
-+	#   in3 = j3
-+	movl	176(%esp),%ebx
-+	#   x0 = in0
-+	movl	%eax,100(%esp)
-+	#   x1 = in1
-+	movl	%ecx,104(%esp)
-+	#   x2 = in2
-+	movl	%edx,108(%esp)
-+	#   x3 = in3
-+	movl	%ebx,112(%esp)
-+	#   in4 = j4
-+	movl	180(%esp),%eax
-+	#   in5 = j5
-+	movl	184(%esp),%ecx
-+	#   in6 = j6
-+	movl	188(%esp),%edx
-+	#   in7 = j7
-+	movl	192(%esp),%ebx
-+	#   x4 = in4
-+	movl	%eax,116(%esp)
-+	#   x5 = in5
-+	movl	%ecx,120(%esp)
-+	#   x6 = in6
-+	movl	%edx,124(%esp)
-+	#   x7 = in7
-+	movl	%ebx,128(%esp)
-+	#   in8 = j8
-+	movl	196(%esp),%eax
-+	#   in9 = j9
-+	movl	200(%esp),%ecx
-+	#   in10 = j10
-+	movl	204(%esp),%edx
-+	#   in11 = j11
-+	movl	208(%esp),%ebx
-+	#   x8 = in8
-+	movl	%eax,132(%esp)
-+	#   x9 = in9
-+	movl	%ecx,136(%esp)
-+	#   x10 = in10
-+	movl	%edx,140(%esp)
-+	#   x11 = in11
-+	movl	%ebx,144(%esp)
-+	#   in12 = j12
-+	movl	212(%esp),%eax
-+	#   in13 = j13
-+	movl	216(%esp),%ecx
-+	#   in14 = j14
-+	movl	220(%esp),%edx
-+	#   in15 = j15
-+	movl	224(%esp),%ebx
-+	#   x12 = in12
-+	movl	%eax,148(%esp)
-+	#   x13 = in13
-+	movl	%ecx,152(%esp)
-+	#   x14 = in14
-+	movl	%edx,156(%esp)
-+	#   x15 = in15
-+	movl	%ebx,160(%esp)
-+	#   i = 20
-+	mov	$20,%ebp
-+	# p = x0
-+	movl	100(%esp),%eax
-+	# s = x5
-+	movl	120(%esp),%ecx
-+	# t = x10
-+	movl	140(%esp),%edx
-+	# w = x15
-+	movl	160(%esp),%ebx
-+._mainloop:
-+	# x0 = p
-+	movl	%eax,100(%esp)
-+	# 				x10 = t
-+	movl	%edx,140(%esp)
-+	# p += x12
-+	addl	148(%esp),%eax
-+	# 		x5 = s
-+	movl	%ecx,120(%esp)
-+	# 				t += x6
-+	addl	124(%esp),%edx
-+	# 						x15 = w
-+	movl	%ebx,160(%esp)
-+	# 		r = x1
-+	movl	104(%esp),%esi
-+	# 		r += s
-+	add	%ecx,%esi
-+	# 						v = x11
-+	movl	144(%esp),%edi
-+	# 						v += w
-+	add	%ebx,%edi
-+	# p <<<= 7
-+	rol	$7,%eax
-+	# p ^= x4
-+	xorl	116(%esp),%eax
-+	# 				t <<<= 7
-+	rol	$7,%edx
-+	# 				t ^= x14
-+	xorl	156(%esp),%edx
-+	# 		r <<<= 7
-+	rol	$7,%esi
-+	# 		r ^= x9
-+	xorl	136(%esp),%esi
-+	# 						v <<<= 7
-+	rol	$7,%edi
-+	# 						v ^= x3
-+	xorl	112(%esp),%edi
-+	# x4 = p
-+	movl	%eax,116(%esp)
-+	# 				x14 = t
-+	movl	%edx,156(%esp)
-+	# p += x0
-+	addl	100(%esp),%eax
-+	# 		x9 = r
-+	movl	%esi,136(%esp)
-+	# 				t += x10
-+	addl	140(%esp),%edx
-+	# 						x3 = v
-+	movl	%edi,112(%esp)
-+	# p <<<= 9
-+	rol	$9,%eax
-+	# p ^= x8
-+	xorl	132(%esp),%eax
-+	# 				t <<<= 9
-+	rol	$9,%edx
-+	# 				t ^= x2
-+	xorl	108(%esp),%edx
-+	# 		s += r
-+	add	%esi,%ecx
-+	# 		s <<<= 9
-+	rol	$9,%ecx
-+	# 		s ^= x13
-+	xorl	152(%esp),%ecx
-+	# 						w += v
-+	add	%edi,%ebx
-+	# 						w <<<= 9
-+	rol	$9,%ebx
-+	# 						w ^= x7
-+	xorl	128(%esp),%ebx
-+	# x8 = p
-+	movl	%eax,132(%esp)
-+	# 				x2 = t
-+	movl	%edx,108(%esp)
-+	# p += x4
-+	addl	116(%esp),%eax
-+	# 		x13 = s
-+	movl	%ecx,152(%esp)
-+	# 				t += x14
-+	addl	156(%esp),%edx
-+	# 						x7 = w
-+	movl	%ebx,128(%esp)
-+	# p <<<= 13
-+	rol	$13,%eax
-+	# p ^= x12
-+	xorl	148(%esp),%eax
-+	# 				t <<<= 13
-+	rol	$13,%edx
-+	# 				t ^= x6
-+	xorl	124(%esp),%edx
-+	# 		r += s
-+	add	%ecx,%esi
-+	# 		r <<<= 13
-+	rol	$13,%esi
-+	# 		r ^= x1
-+	xorl	104(%esp),%esi
-+	# 						v += w
-+	add	%ebx,%edi
-+	# 						v <<<= 13
-+	rol	$13,%edi
-+	# 						v ^= x11
-+	xorl	144(%esp),%edi
-+	# x12 = p
-+	movl	%eax,148(%esp)
-+	# 				x6 = t
-+	movl	%edx,124(%esp)
-+	# p += x8
-+	addl	132(%esp),%eax
-+	# 		x1 = r
-+	movl	%esi,104(%esp)
-+	# 				t += x2
-+	addl	108(%esp),%edx
-+	# 						x11 = v
-+	movl	%edi,144(%esp)
-+	# p <<<= 18
-+	rol	$18,%eax
-+	# p ^= x0
-+	xorl	100(%esp),%eax
-+	# 				t <<<= 18
-+	rol	$18,%edx
-+	# 				t ^= x10
-+	xorl	140(%esp),%edx
-+	# 		s += r
-+	add	%esi,%ecx
-+	# 		s <<<= 18
-+	rol	$18,%ecx
-+	# 		s ^= x5
-+	xorl	120(%esp),%ecx
-+	# 						w += v
-+	add	%edi,%ebx
-+	# 						w <<<= 18
-+	rol	$18,%ebx
-+	# 						w ^= x15
-+	xorl	160(%esp),%ebx
-+	# x0 = p
-+	movl	%eax,100(%esp)
-+	# 				x10 = t
-+	movl	%edx,140(%esp)
-+	# p += x3
-+	addl	112(%esp),%eax
-+	# p <<<= 7
-+	rol	$7,%eax
-+	# 		x5 = s
-+	movl	%ecx,120(%esp)
-+	# 				t += x9
-+	addl	136(%esp),%edx
-+	# 						x15 = w
-+	movl	%ebx,160(%esp)
-+	# 		r = x4
-+	movl	116(%esp),%esi
-+	# 		r += s
-+	add	%ecx,%esi
-+	# 						v = x14
-+	movl	156(%esp),%edi
-+	# 						v += w
-+	add	%ebx,%edi
-+	# p ^= x1
-+	xorl	104(%esp),%eax
-+	# 				t <<<= 7
-+	rol	$7,%edx
-+	# 				t ^= x11
-+	xorl	144(%esp),%edx
-+	# 		r <<<= 7
-+	rol	$7,%esi
-+	# 		r ^= x6
-+	xorl	124(%esp),%esi
-+	# 						v <<<= 7
-+	rol	$7,%edi
-+	# 						v ^= x12
-+	xorl	148(%esp),%edi
-+	# x1 = p
-+	movl	%eax,104(%esp)
-+	# 				x11 = t
-+	movl	%edx,144(%esp)
-+	# p += x0
-+	addl	100(%esp),%eax
-+	# 		x6 = r
-+	movl	%esi,124(%esp)
-+	# 				t += x10
-+	addl	140(%esp),%edx
-+	# 						x12 = v
-+	movl	%edi,148(%esp)
-+	# p <<<= 9
-+	rol	$9,%eax
-+	# p ^= x2
-+	xorl	108(%esp),%eax
-+	# 				t <<<= 9
-+	rol	$9,%edx
-+	# 				t ^= x8
-+	xorl	132(%esp),%edx
-+	# 		s += r
-+	add	%esi,%ecx
-+	# 		s <<<= 9
-+	rol	$9,%ecx
-+	# 		s ^= x7
-+	xorl	128(%esp),%ecx
-+	# 						w += v
-+	add	%edi,%ebx
-+	# 						w <<<= 9
-+	rol	$9,%ebx
-+	# 						w ^= x13
-+	xorl	152(%esp),%ebx
-+	# x2 = p
-+	movl	%eax,108(%esp)
-+	# 				x8 = t
-+	movl	%edx,132(%esp)
-+	# p += x1
-+	addl	104(%esp),%eax
-+	# 		x7 = s
-+	movl	%ecx,128(%esp)
-+	# 				t += x11
-+	addl	144(%esp),%edx
-+	# 						x13 = w
-+	movl	%ebx,152(%esp)
-+	# p <<<= 13
-+	rol	$13,%eax
-+	# p ^= x3
-+	xorl	112(%esp),%eax
-+	# 				t <<<= 13
-+	rol	$13,%edx
-+	# 				t ^= x9
-+	xorl	136(%esp),%edx
-+	# 		r += s
-+	add	%ecx,%esi
-+	# 		r <<<= 13
-+	rol	$13,%esi
-+	# 		r ^= x4
-+	xorl	116(%esp),%esi
-+	# 						v += w
-+	add	%ebx,%edi
-+	# 						v <<<= 13
-+	rol	$13,%edi
-+	# 						v ^= x14
-+	xorl	156(%esp),%edi
-+	# x3 = p
-+	movl	%eax,112(%esp)
-+	# 				x9 = t
-+	movl	%edx,136(%esp)
-+	# p += x2
-+	addl	108(%esp),%eax
-+	# 		x4 = r
-+	movl	%esi,116(%esp)
-+	# 				t += x8
-+	addl	132(%esp),%edx
-+	# 						x14 = v
-+	movl	%edi,156(%esp)
-+	# p <<<= 18
-+	rol	$18,%eax
-+	# p ^= x0
-+	xorl	100(%esp),%eax
-+	# 				t <<<= 18
-+	rol	$18,%edx
-+	# 				t ^= x10
-+	xorl	140(%esp),%edx
-+	# 		s += r
-+	add	%esi,%ecx
-+	# 		s <<<= 18
-+	rol	$18,%ecx
-+	# 		s ^= x5
-+	xorl	120(%esp),%ecx
-+	# 						w += v
-+	add	%edi,%ebx
-+	# 						w <<<= 18
-+	rol	$18,%ebx
-+	# 						w ^= x15
-+	xorl	160(%esp),%ebx
-+	# x0 = p
-+	movl	%eax,100(%esp)
-+	# 				x10 = t
-+	movl	%edx,140(%esp)
-+	# p += x12
-+	addl	148(%esp),%eax
-+	# 		x5 = s
-+	movl	%ecx,120(%esp)
-+	# 				t += x6
-+	addl	124(%esp),%edx
-+	# 						x15 = w
-+	movl	%ebx,160(%esp)
-+	# 		r = x1
-+	movl	104(%esp),%esi
-+	# 		r += s
-+	add	%ecx,%esi
-+	# 						v = x11
-+	movl	144(%esp),%edi
-+	# 						v += w
-+	add	%ebx,%edi
-+	# p <<<= 7
-+	rol	$7,%eax
-+	# p ^= x4
-+	xorl	116(%esp),%eax
-+	# 				t <<<= 7
-+	rol	$7,%edx
-+	# 				t ^= x14
-+	xorl	156(%esp),%edx
-+	# 		r <<<= 7
-+	rol	$7,%esi
-+	# 		r ^= x9
-+	xorl	136(%esp),%esi
-+	# 						v <<<= 7
-+	rol	$7,%edi
-+	# 						v ^= x3
-+	xorl	112(%esp),%edi
-+	# x4 = p
-+	movl	%eax,116(%esp)
-+	# 				x14 = t
-+	movl	%edx,156(%esp)
-+	# p += x0
-+	addl	100(%esp),%eax
-+	# 		x9 = r
-+	movl	%esi,136(%esp)
-+	# 				t += x10
-+	addl	140(%esp),%edx
-+	# 						x3 = v
-+	movl	%edi,112(%esp)
-+	# p <<<= 9
-+	rol	$9,%eax
-+	# p ^= x8
-+	xorl	132(%esp),%eax
-+	# 				t <<<= 9
-+	rol	$9,%edx
-+	# 				t ^= x2
-+	xorl	108(%esp),%edx
-+	# 		s += r
-+	add	%esi,%ecx
-+	# 		s <<<= 9
-+	rol	$9,%ecx
-+	# 		s ^= x13
-+	xorl	152(%esp),%ecx
-+	# 						w += v
-+	add	%edi,%ebx
-+	# 						w <<<= 9
-+	rol	$9,%ebx
-+	# 						w ^= x7
-+	xorl	128(%esp),%ebx
-+	# x8 = p
-+	movl	%eax,132(%esp)
-+	# 				x2 = t
-+	movl	%edx,108(%esp)
-+	# p += x4
-+	addl	116(%esp),%eax
-+	# 		x13 = s
-+	movl	%ecx,152(%esp)
-+	# 				t += x14
-+	addl	156(%esp),%edx
-+	# 						x7 = w
-+	movl	%ebx,128(%esp)
-+	# p <<<= 13
-+	rol	$13,%eax
-+	# p ^= x12
-+	xorl	148(%esp),%eax
-+	# 				t <<<= 13
-+	rol	$13,%edx
-+	# 				t ^= x6
-+	xorl	124(%esp),%edx
-+	# 		r += s
-+	add	%ecx,%esi
-+	# 		r <<<= 13
-+	rol	$13,%esi
-+	# 		r ^= x1
-+	xorl	104(%esp),%esi
-+	# 						v += w
-+	add	%ebx,%edi
-+	# 						v <<<= 13
-+	rol	$13,%edi
-+	# 						v ^= x11
-+	xorl	144(%esp),%edi
-+	# x12 = p
-+	movl	%eax,148(%esp)
-+	# 				x6 = t
-+	movl	%edx,124(%esp)
-+	# p += x8
-+	addl	132(%esp),%eax
-+	# 		x1 = r
-+	movl	%esi,104(%esp)
-+	# 				t += x2
-+	addl	108(%esp),%edx
-+	# 						x11 = v
-+	movl	%edi,144(%esp)
-+	# p <<<= 18
-+	rol	$18,%eax
-+	# p ^= x0
-+	xorl	100(%esp),%eax
-+	# 				t <<<= 18
-+	rol	$18,%edx
-+	# 				t ^= x10
-+	xorl	140(%esp),%edx
-+	# 		s += r
-+	add	%esi,%ecx
-+	# 		s <<<= 18
-+	rol	$18,%ecx
-+	# 		s ^= x5
-+	xorl	120(%esp),%ecx
-+	# 						w += v
-+	add	%edi,%ebx
-+	# 						w <<<= 18
-+	rol	$18,%ebx
-+	# 						w ^= x15
-+	xorl	160(%esp),%ebx
-+	# x0 = p
-+	movl	%eax,100(%esp)
-+	# 				x10 = t
-+	movl	%edx,140(%esp)
-+	# p += x3
-+	addl	112(%esp),%eax
-+	# p <<<= 7
-+	rol	$7,%eax
-+	# 		x5 = s
-+	movl	%ecx,120(%esp)
-+	# 				t += x9
-+	addl	136(%esp),%edx
-+	# 						x15 = w
-+	movl	%ebx,160(%esp)
-+	# 		r = x4
-+	movl	116(%esp),%esi
-+	# 		r += s
-+	add	%ecx,%esi
-+	# 						v = x14
-+	movl	156(%esp),%edi
-+	# 						v += w
-+	add	%ebx,%edi
-+	# p ^= x1
-+	xorl	104(%esp),%eax
-+	# 				t <<<= 7
-+	rol	$7,%edx
-+	# 				t ^= x11
-+	xorl	144(%esp),%edx
-+	# 		r <<<= 7
-+	rol	$7,%esi
-+	# 		r ^= x6
-+	xorl	124(%esp),%esi
-+	# 						v <<<= 7
-+	rol	$7,%edi
-+	# 						v ^= x12
-+	xorl	148(%esp),%edi
-+	# x1 = p
-+	movl	%eax,104(%esp)
-+	# 				x11 = t
-+	movl	%edx,144(%esp)
-+	# p += x0
-+	addl	100(%esp),%eax
-+	# 		x6 = r
-+	movl	%esi,124(%esp)
-+	# 				t += x10
-+	addl	140(%esp),%edx
-+	# 						x12 = v
-+	movl	%edi,148(%esp)
-+	# p <<<= 9
-+	rol	$9,%eax
-+	# p ^= x2
-+	xorl	108(%esp),%eax
-+	# 				t <<<= 9
-+	rol	$9,%edx
-+	# 				t ^= x8
-+	xorl	132(%esp),%edx
-+	# 		s += r
-+	add	%esi,%ecx
-+	# 		s <<<= 9
-+	rol	$9,%ecx
-+	# 		s ^= x7
-+	xorl	128(%esp),%ecx
-+	# 						w += v
-+	add	%edi,%ebx
-+	# 						w <<<= 9
-+	rol	$9,%ebx
-+	# 						w ^= x13
-+	xorl	152(%esp),%ebx
-+	# x2 = p
-+	movl	%eax,108(%esp)
-+	# 				x8 = t
-+	movl	%edx,132(%esp)
-+	# p += x1
-+	addl	104(%esp),%eax
-+	# 		x7 = s
-+	movl	%ecx,128(%esp)
-+	# 				t += x11
-+	addl	144(%esp),%edx
-+	# 						x13 = w
-+	movl	%ebx,152(%esp)
-+	# p <<<= 13
-+	rol	$13,%eax
-+	# p ^= x3
-+	xorl	112(%esp),%eax
-+	# 				t <<<= 13
-+	rol	$13,%edx
-+	# 				t ^= x9
-+	xorl	136(%esp),%edx
-+	# 		r += s
-+	add	%ecx,%esi
-+	# 		r <<<= 13
-+	rol	$13,%esi
-+	# 		r ^= x4
-+	xorl	116(%esp),%esi
-+	# 						v += w
-+	add	%ebx,%edi
-+	# 						v <<<= 13
-+	rol	$13,%edi
-+	# 						v ^= x14
-+	xorl	156(%esp),%edi
-+	# x3 = p
-+	movl	%eax,112(%esp)
-+	# 				x9 = t
-+	movl	%edx,136(%esp)
-+	# p += x2
-+	addl	108(%esp),%eax
-+	# 		x4 = r
-+	movl	%esi,116(%esp)
-+	# 				t += x8
-+	addl	132(%esp),%edx
-+	# 						x14 = v
-+	movl	%edi,156(%esp)
-+	# p <<<= 18
-+	rol	$18,%eax
-+	# p ^= x0
-+	xorl	100(%esp),%eax
-+	# 				t <<<= 18
-+	rol	$18,%edx
-+	# 				t ^= x10
-+	xorl	140(%esp),%edx
-+	# 		s += r
-+	add	%esi,%ecx
-+	# 		s <<<= 18
-+	rol	$18,%ecx
-+	# 		s ^= x5
-+	xorl	120(%esp),%ecx
-+	# 						w += v
-+	add	%edi,%ebx
-+	# 						w <<<= 18
-+	rol	$18,%ebx
-+	# 						w ^= x15
-+	xorl	160(%esp),%ebx
-+	# i -= 4
-+	sub	$4,%ebp
-+	# goto mainloop if unsigned >
-+	ja	._mainloop
-+	# x0 = p
-+	movl	%eax,100(%esp)
-+	# x5 = s
-+	movl	%ecx,120(%esp)
-+	# x10 = t
-+	movl	%edx,140(%esp)
-+	# x15 = w
-+	movl	%ebx,160(%esp)
-+	#   out = out_backup
-+	movl	72(%esp),%edi
-+	#   m = m_backup
-+	movl	68(%esp),%esi
-+	#   in0 = x0
-+	movl	100(%esp),%eax
-+	#   in1 = x1
-+	movl	104(%esp),%ecx
-+	#   in0 += j0
-+	addl	164(%esp),%eax
-+	#   in1 += j1
-+	addl	168(%esp),%ecx
-+	#   in0 ^= *(uint32 *) (m + 0)
-+	xorl	0(%esi),%eax
-+	#   in1 ^= *(uint32 *) (m + 4)
-+	xorl	4(%esi),%ecx
-+	#   *(uint32 *) (out + 0) = in0
-+	movl	%eax,0(%edi)
-+	#   *(uint32 *) (out + 4) = in1
-+	movl	%ecx,4(%edi)
-+	#   in2 = x2
-+	movl	108(%esp),%eax
-+	#   in3 = x3
-+	movl	112(%esp),%ecx
-+	#   in2 += j2
-+	addl	172(%esp),%eax
-+	#   in3 += j3
-+	addl	176(%esp),%ecx
-+	#   in2 ^= *(uint32 *) (m + 8)
-+	xorl	8(%esi),%eax
-+	#   in3 ^= *(uint32 *) (m + 12)
-+	xorl	12(%esi),%ecx
-+	#   *(uint32 *) (out + 8) = in2
-+	movl	%eax,8(%edi)
-+	#   *(uint32 *) (out + 12) = in3
-+	movl	%ecx,12(%edi)
-+	#   in4 = x4
-+	movl	116(%esp),%eax
-+	#   in5 = x5
-+	movl	120(%esp),%ecx
-+	#   in4 += j4
-+	addl	180(%esp),%eax
-+	#   in5 += j5
-+	addl	184(%esp),%ecx
-+	#   in4 ^= *(uint32 *) (m + 16)
-+	xorl	16(%esi),%eax
-+	#   in5 ^= *(uint32 *) (m + 20)
-+	xorl	20(%esi),%ecx
-+	#   *(uint32 *) (out + 16) = in4
-+	movl	%eax,16(%edi)
-+	#   *(uint32 *) (out + 20) = in5
-+	movl	%ecx,20(%edi)
-+	#   in6 = x6
-+	movl	124(%esp),%eax
-+	#   in7 = x7
-+	movl	128(%esp),%ecx
-+	#   in6 += j6
-+	addl	188(%esp),%eax
-+	#   in7 += j7
-+	addl	192(%esp),%ecx
-+	#   in6 ^= *(uint32 *) (m + 24)
-+	xorl	24(%esi),%eax
-+	#   in7 ^= *(uint32 *) (m + 28)
-+	xorl	28(%esi),%ecx
-+	#   *(uint32 *) (out + 24) = in6
-+	movl	%eax,24(%edi)
-+	#   *(uint32 *) (out + 28) = in7
-+	movl	%ecx,28(%edi)
-+	#   in8 = x8
-+	movl	132(%esp),%eax
-+	#   in9 = x9
-+	movl	136(%esp),%ecx
-+	#   in8 += j8
-+	addl	196(%esp),%eax
-+	#   in9 += j9
-+	addl	200(%esp),%ecx
-+	#   in8 ^= *(uint32 *) (m + 32)
-+	xorl	32(%esi),%eax
-+	#   in9 ^= *(uint32 *) (m + 36)
-+	xorl	36(%esi),%ecx
-+	#   *(uint32 *) (out + 32) = in8
-+	movl	%eax,32(%edi)
-+	#   *(uint32 *) (out + 36) = in9
-+	movl	%ecx,36(%edi)
-+	#   in10 = x10
-+	movl	140(%esp),%eax
-+	#   in11 = x11
-+	movl	144(%esp),%ecx
-+	#   in10 += j10
-+	addl	204(%esp),%eax
-+	#   in11 += j11
-+	addl	208(%esp),%ecx
-+	#   in10 ^= *(uint32 *) (m + 40)
-+	xorl	40(%esi),%eax
-+	#   in11 ^= *(uint32 *) (m + 44)
-+	xorl	44(%esi),%ecx
-+	#   *(uint32 *) (out + 40) = in10
-+	movl	%eax,40(%edi)
-+	#   *(uint32 *) (out + 44) = in11
-+	movl	%ecx,44(%edi)
-+	#   in12 = x12
-+	movl	148(%esp),%eax
-+	#   in13 = x13
-+	movl	152(%esp),%ecx
-+	#   in12 += j12
-+	addl	212(%esp),%eax
-+	#   in13 += j13
-+	addl	216(%esp),%ecx
-+	#   in12 ^= *(uint32 *) (m + 48)
-+	xorl	48(%esi),%eax
-+	#   in13 ^= *(uint32 *) (m + 52)
-+	xorl	52(%esi),%ecx
-+	#   *(uint32 *) (out + 48) = in12
-+	movl	%eax,48(%edi)
-+	#   *(uint32 *) (out + 52) = in13
-+	movl	%ecx,52(%edi)
-+	#   in14 = x14
-+	movl	156(%esp),%eax
-+	#   in15 = x15
-+	movl	160(%esp),%ecx
-+	#   in14 += j14
-+	addl	220(%esp),%eax
-+	#   in15 += j15
-+	addl	224(%esp),%ecx
-+	#   in14 ^= *(uint32 *) (m + 56)
-+	xorl	56(%esi),%eax
-+	#   in15 ^= *(uint32 *) (m + 60)
-+	xorl	60(%esi),%ecx
-+	#   *(uint32 *) (out + 56) = in14
-+	movl	%eax,56(%edi)
-+	#   *(uint32 *) (out + 60) = in15
-+	movl	%ecx,60(%edi)
-+	#   bytes = bytes_backup
-+	movl	76(%esp),%ebx
-+	#   in8 = j8
-+	movl	196(%esp),%eax
-+	#   in9 = j9
-+	movl	200(%esp),%ecx
-+	#   in8 += 1
-+	add	$1,%eax
-+	#   in9 += 0 + carry
-+	adc	$0,%ecx
-+	#   j8 = in8
-+	movl	%eax,196(%esp)
-+	#   j9 = in9
-+	movl	%ecx,200(%esp)
-+	#   bytes - 64
-+	cmp	$64,%ebx
-+	#   goto bytesatleast65 if unsigned>
-+	ja	._bytesatleast65
-+	#     goto bytesatleast64 if unsigned>=
-+	jae	._bytesatleast64
-+	#       m = out
-+	mov	%edi,%esi
-+	#       out = ctarget
-+	movl	228(%esp),%edi
-+	#       i = bytes
-+	mov	%ebx,%ecx
-+	#       while (i) { *out++ = *m++; --i }
-+	rep	movsb
-+._bytesatleast64:
-+	#     x = x_backup
-+	movl	64(%esp),%eax
-+	#     in8 = j8
-+	movl	196(%esp),%ecx
-+	#     in9 = j9
-+	movl	200(%esp),%edx
-+	#     *(uint32 *) (x + 32) = in8
-+	movl	%ecx,32(%eax)
-+	#     *(uint32 *) (x + 36) = in9
-+	movl	%edx,36(%eax)
-+._done:
-+	#     eax = eax_stack
-+	movl	80(%esp),%eax
-+	#     ebx = ebx_stack
-+	movl	84(%esp),%ebx
-+	#     esi = esi_stack
-+	movl	88(%esp),%esi
-+	#     edi = edi_stack
-+	movl	92(%esp),%edi
-+	#     ebp = ebp_stack
-+	movl	96(%esp),%ebp
-+	#     leave
-+	add	%eax,%esp
-+	ret
-+._bytesatleast65:
-+	#   bytes -= 64
-+	sub	$64,%ebx
-+	#   out += 64
-+	add	$64,%edi
-+	#   m += 64
-+	add	$64,%esi
-+	# goto bytesatleast1
-+	jmp	._bytesatleast1
-+# enter ECRYPT_keysetup
-+.text
-+.p2align 5
-+.globl ECRYPT_keysetup
-+ECRYPT_keysetup:
-+	mov	%esp,%eax
-+	and	$31,%eax
-+	add	$256,%eax
-+	sub	%eax,%esp
-+	#   eax_stack = eax
-+	movl	%eax,64(%esp)
-+	#   ebx_stack = ebx
-+	movl	%ebx,68(%esp)
-+	#   esi_stack = esi
-+	movl	%esi,72(%esp)
-+	#   edi_stack = edi
-+	movl	%edi,76(%esp)
-+	#   ebp_stack = ebp
-+	movl	%ebp,80(%esp)
-+	#   k = arg2
-+	movl	8(%esp,%eax),%ecx
-+	#   kbits = arg3
-+	movl	12(%esp,%eax),%edx
-+	#   x = arg1
-+	movl	4(%esp,%eax),%eax
-+	#   in1 = *(uint32 *) (k + 0)
-+	movl	0(%ecx),%ebx
-+	#   in2 = *(uint32 *) (k + 4)
-+	movl	4(%ecx),%esi
-+	#   in3 = *(uint32 *) (k + 8)
-+	movl	8(%ecx),%edi
-+	#   in4 = *(uint32 *) (k + 12)
-+	movl	12(%ecx),%ebp
-+	#   *(uint32 *) (x + 4) = in1
-+	movl	%ebx,4(%eax)
-+	#   *(uint32 *) (x + 8) = in2
-+	movl	%esi,8(%eax)
-+	#   *(uint32 *) (x + 12) = in3
-+	movl	%edi,12(%eax)
-+	#   *(uint32 *) (x + 16) = in4
-+	movl	%ebp,16(%eax)
-+	#   kbits - 256
-+	cmp	$256,%edx
-+	#   goto kbits128 if unsigned<
-+	jb	._kbits128
-+._kbits256:
-+	#     in11 = *(uint32 *) (k + 16)
-+	movl	16(%ecx),%edx
-+	#     in12 = *(uint32 *) (k + 20)
-+	movl	20(%ecx),%ebx
-+	#     in13 = *(uint32 *) (k + 24)
-+	movl	24(%ecx),%esi
-+	#     in14 = *(uint32 *) (k + 28)
-+	movl	28(%ecx),%ecx
-+	#     *(uint32 *) (x + 44) = in11
-+	movl	%edx,44(%eax)
-+	#     *(uint32 *) (x + 48) = in12
-+	movl	%ebx,48(%eax)
-+	#     *(uint32 *) (x + 52) = in13
-+	movl	%esi,52(%eax)
-+	#     *(uint32 *) (x + 56) = in14
-+	movl	%ecx,56(%eax)
-+	#     in0 = 1634760805
-+	mov	$1634760805,%ecx
-+	#     in5 = 857760878
-+	mov	$857760878,%edx
-+	#     in10 = 2036477234
-+	mov	$2036477234,%ebx
-+	#     in15 = 1797285236
-+	mov	$1797285236,%esi
-+	#     *(uint32 *) (x + 0) = in0
-+	movl	%ecx,0(%eax)
-+	#     *(uint32 *) (x + 20) = in5
-+	movl	%edx,20(%eax)
-+	#     *(uint32 *) (x + 40) = in10
-+	movl	%ebx,40(%eax)
-+	#     *(uint32 *) (x + 60) = in15
-+	movl	%esi,60(%eax)
-+	#   goto keysetupdone
-+	jmp	._keysetupdone
-+._kbits128:
-+	#     in11 = *(uint32 *) (k + 0)
-+	movl	0(%ecx),%edx
-+	#     in12 = *(uint32 *) (k + 4)
-+	movl	4(%ecx),%ebx
-+	#     in13 = *(uint32 *) (k + 8)
-+	movl	8(%ecx),%esi
-+	#     in14 = *(uint32 *) (k + 12)
-+	movl	12(%ecx),%ecx
-+	#     *(uint32 *) (x + 44) = in11
-+	movl	%edx,44(%eax)
-+	#     *(uint32 *) (x + 48) = in12
-+	movl	%ebx,48(%eax)
-+	#     *(uint32 *) (x + 52) = in13
-+	movl	%esi,52(%eax)
-+	#     *(uint32 *) (x + 56) = in14
-+	movl	%ecx,56(%eax)
-+	#     in0 = 1634760805
-+	mov	$1634760805,%ecx
-+	#     in5 = 824206446
-+	mov	$824206446,%edx
-+	#     in10 = 2036477238
-+	mov	$2036477238,%ebx
-+	#     in15 = 1797285236
-+	mov	$1797285236,%esi
-+	#     *(uint32 *) (x + 0) = in0
-+	movl	%ecx,0(%eax)
-+	#     *(uint32 *) (x + 20) = in5
-+	movl	%edx,20(%eax)
-+	#     *(uint32 *) (x + 40) = in10
-+	movl	%ebx,40(%eax)
-+	#     *(uint32 *) (x + 60) = in15
-+	movl	%esi,60(%eax)
-+._keysetupdone:
-+	#   eax = eax_stack
-+	movl	64(%esp),%eax
-+	#   ebx = ebx_stack
-+	movl	68(%esp),%ebx
-+	#   esi = esi_stack
-+	movl	72(%esp),%esi
-+	#   edi = edi_stack
-+	movl	76(%esp),%edi
-+	#   ebp = ebp_stack
-+	movl	80(%esp),%ebp
-+	# leave
-+	add	%eax,%esp
-+	ret
-+# enter ECRYPT_ivsetup
-+.text
-+.p2align 5
-+.globl ECRYPT_ivsetup
-+ECRYPT_ivsetup:
-+	mov	%esp,%eax
-+	and	$31,%eax
-+	add	$256,%eax
-+	sub	%eax,%esp
-+	#   eax_stack = eax
-+	movl	%eax,64(%esp)
-+	#   ebx_stack = ebx
-+	movl	%ebx,68(%esp)
-+	#   esi_stack = esi
-+	movl	%esi,72(%esp)
-+	#   edi_stack = edi
-+	movl	%edi,76(%esp)
-+	#   ebp_stack = ebp
-+	movl	%ebp,80(%esp)
-+	#   iv = arg2
-+	movl	8(%esp,%eax),%ecx
-+	#   x = arg1
-+	movl	4(%esp,%eax),%eax
-+	#   in6 = *(uint32 *) (iv + 0)
-+	movl	0(%ecx),%edx
-+	#   in7 = *(uint32 *) (iv + 4)
-+	movl	4(%ecx),%ecx
-+	#   in8 = 0
-+	mov	$0,%ebx
-+	#   in9 = 0
-+	mov	$0,%esi
-+	#   *(uint32 *) (x + 24) = in6
-+	movl	%edx,24(%eax)
-+	#   *(uint32 *) (x + 28) = in7
-+	movl	%ecx,28(%eax)
-+	#   *(uint32 *) (x + 32) = in8
-+	movl	%ebx,32(%eax)
-+	#   *(uint32 *) (x + 36) = in9
-+	movl	%esi,36(%eax)
-+	#   eax = eax_stack
-+	movl	64(%esp),%eax
-+	#   ebx = ebx_stack
-+	movl	68(%esp),%ebx
-+	#   esi = esi_stack
-+	movl	72(%esp),%esi
-+	#   edi = edi_stack
-+	movl	76(%esp),%edi
-+	#   ebp = ebp_stack
-+	movl	80(%esp),%ebp
-+	# leave
-+	add	%eax,%esp
-+	ret
-diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
++int io_delay_type __read_mostly = CONFIG_DEFAULT_IO_DELAY_TYPE;
++EXPORT_SYMBOL_GPL(io_delay_type);
++
++static int __initdata io_delay_override;
++
++/*
++ * Paravirt wants native_io_delay to be a constant.
++ */
++void native_io_delay(void)
++{
++	switch (io_delay_type) {
++	default:
++	case CONFIG_IO_DELAY_TYPE_0X80:
++		asm volatile ("outb %al, $0x80");
++		break;
++	case CONFIG_IO_DELAY_TYPE_0XED:
++		asm volatile ("outb %al, $0xed");
++		break;
++	case CONFIG_IO_DELAY_TYPE_UDELAY:
++		/*
++		 * 2 usecs is an upper-bound for the outb delay but
++		 * note that udelay doesn't have the bus-level
++		 * side-effects that outb does, nor does udelay() have
++		 * precise timings during very early bootup (the delays
++		 * are shorter until calibrated):
++		 */
++		udelay(2);
++	case CONFIG_IO_DELAY_TYPE_NONE:
++		break;
++	}
++}
++EXPORT_SYMBOL(native_io_delay);
++
++static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
++{
++	if (io_delay_type == CONFIG_IO_DELAY_TYPE_0X80) {
++		printk(KERN_NOTICE "%s: using 0xed I/O delay port\n",
++			id->ident);
++		io_delay_type = CONFIG_IO_DELAY_TYPE_0XED;
++	}
++
++	return 0;
++}
++
++/*
++ * Quirk table for systems that misbehave (lock up, etc.) if port
++ * 0x80 is used:
++ */
++static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
++	{
++		.callback	= dmi_io_delay_0xed_port,
++		.ident		= "Compaq Presario V6000",
++		.matches	= {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
++			DMI_MATCH(DMI_BOARD_NAME, "30B7")
++		}
++	},
++	{
++		.callback	= dmi_io_delay_0xed_port,
++		.ident		= "HP Pavilion dv9000z",
++		.matches	= {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
++			DMI_MATCH(DMI_BOARD_NAME, "30B9")
++		}
++	},
++	{
++		.callback	= dmi_io_delay_0xed_port,
++		.ident		= "HP Pavilion tx1000",
++		.matches	= {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
++			DMI_MATCH(DMI_BOARD_NAME, "30BF")
++		}
++	},
++	{ }
++};
++
++void __init io_delay_init(void)
++{
++	if (!io_delay_override)
++		dmi_check_system(io_delay_0xed_port_dmi_table);
++}
++
++static int __init io_delay_param(char *s)
++{
++	if (!strcmp(s, "0x80"))
++		io_delay_type = CONFIG_IO_DELAY_TYPE_0X80;
++	else if (!strcmp(s, "0xed"))
++		io_delay_type = CONFIG_IO_DELAY_TYPE_0XED;
++	else if (!strcmp(s, "udelay"))
++		io_delay_type = CONFIG_IO_DELAY_TYPE_UDELAY;
++	else if (!strcmp(s, "none"))
++		io_delay_type = CONFIG_IO_DELAY_TYPE_NONE;
++	else
++		return -EINVAL;
++
++	io_delay_override = 1;
++	return 0;
++}
++
++early_param("io_delay", io_delay_param);
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
 new file mode 100644
-index 0000000..6214a9b
+index 0000000..50e5e4a
 --- /dev/null
-+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
-@@ -0,0 +1,920 @@
-+# enter ECRYPT_encrypt_bytes
-+.text
-+.p2align 5
-+.globl ECRYPT_encrypt_bytes
-+ECRYPT_encrypt_bytes:
-+	mov	%rsp,%r11
-+	and	$31,%r11
-+	add	$256,%r11
-+	sub	%r11,%rsp
-+	# x = arg1
-+	mov	%rdi,%r8
-+	# m = arg2
-+	mov	%rsi,%rsi
-+	# out = arg3
-+	mov	%rdx,%rdi
-+	# bytes = arg4
-+	mov	%rcx,%rdx
-+	#               unsigned>? bytes - 0
-+	cmp	$0,%rdx
-+	# comment:fp stack unchanged by jump
-+	# goto done if !unsigned>
-+	jbe	._done
-+	# comment:fp stack unchanged by fallthrough
-+# start:
-+._start:
-+	# r11_stack = r11
-+	movq	%r11,0(%rsp)
-+	# r12_stack = r12
-+	movq	%r12,8(%rsp)
-+	# r13_stack = r13
-+	movq	%r13,16(%rsp)
-+	# r14_stack = r14
-+	movq	%r14,24(%rsp)
-+	# r15_stack = r15
-+	movq	%r15,32(%rsp)
-+	# rbx_stack = rbx
-+	movq	%rbx,40(%rsp)
-+	# rbp_stack = rbp
-+	movq	%rbp,48(%rsp)
-+	# in0 = *(uint64 *) (x + 0)
-+	movq	0(%r8),%rcx
-+	# in2 = *(uint64 *) (x + 8)
-+	movq	8(%r8),%r9
-+	# in4 = *(uint64 *) (x + 16)
-+	movq	16(%r8),%rax
-+	# in6 = *(uint64 *) (x + 24)
-+	movq	24(%r8),%r10
-+	# in8 = *(uint64 *) (x + 32)
-+	movq	32(%r8),%r11
-+	# in10 = *(uint64 *) (x + 40)
-+	movq	40(%r8),%r12
-+	# in12 = *(uint64 *) (x + 48)
-+	movq	48(%r8),%r13
-+	# in14 = *(uint64 *) (x + 56)
-+	movq	56(%r8),%r14
-+	# j0 = in0
-+	movq	%rcx,56(%rsp)
-+	# j2 = in2
-+	movq	%r9,64(%rsp)
-+	# j4 = in4
-+	movq	%rax,72(%rsp)
-+	# j6 = in6
-+	movq	%r10,80(%rsp)
-+	# j8 = in8
-+	movq	%r11,88(%rsp)
-+	# j10 = in10
-+	movq	%r12,96(%rsp)
-+	# j12 = in12
-+	movq	%r13,104(%rsp)
-+	# j14 = in14
-+	movq	%r14,112(%rsp)
-+	# x_backup = x
-+	movq	%r8,120(%rsp)
-+# bytesatleast1:
-+._bytesatleast1:
-+	#                   unsigned<? bytes - 64
-+	cmp	$64,%rdx
-+	# comment:fp stack unchanged by jump
-+	#   goto nocopy if !unsigned<
-+	jae	._nocopy
-+	#     ctarget = out
-+	movq	%rdi,128(%rsp)
-+	#     out = &tmp
-+	leaq	192(%rsp),%rdi
-+	#     i = bytes
-+	mov	%rdx,%rcx
-+	#     while (i) { *out++ = *m++; --i }
-+	rep	movsb
-+	#     out = &tmp
-+	leaq	192(%rsp),%rdi
-+	#     m = &tmp
-+	leaq	192(%rsp),%rsi
-+	# comment:fp stack unchanged by fallthrough
-+#   nocopy:
-+._nocopy:
-+	#   out_backup = out
-+	movq	%rdi,136(%rsp)
-+	#   m_backup = m
-+	movq	%rsi,144(%rsp)
-+	#   bytes_backup = bytes
-+	movq	%rdx,152(%rsp)
-+	#   x1 = j0
-+	movq	56(%rsp),%rdi
-+	#   x0 = x1
-+	mov	%rdi,%rdx
-+	#   (uint64) x1 >>= 32
-+	shr	$32,%rdi
-+	#   		x3 = j2
-+	movq	64(%rsp),%rsi
-+	#   		x2 = x3
-+	mov	%rsi,%rcx
-+	#   		(uint64) x3 >>= 32
-+	shr	$32,%rsi
-+	#   x5 = j4
-+	movq	72(%rsp),%r8
-+	#   x4 = x5
-+	mov	%r8,%r9
-+	#   (uint64) x5 >>= 32
-+	shr	$32,%r8
-+	#   x5_stack = x5
-+	movq	%r8,160(%rsp)
-+	#   		x7 = j6
-+	movq	80(%rsp),%r8
-+	#   		x6 = x7
-+	mov	%r8,%rax
-+	#   		(uint64) x7 >>= 32
-+	shr	$32,%r8
-+	#   x9 = j8
-+	movq	88(%rsp),%r10
-+	#   x8 = x9
-+	mov	%r10,%r11
-+	#   (uint64) x9 >>= 32
-+	shr	$32,%r10
-+	#   		x11 = j10
-+	movq	96(%rsp),%r12
-+	#   		x10 = x11
-+	mov	%r12,%r13
-+	#   		x10_stack = x10
-+	movq	%r13,168(%rsp)
-+	#   		(uint64) x11 >>= 32
-+	shr	$32,%r12
-+	#   x13 = j12
-+	movq	104(%rsp),%r13
-+	#   x12 = x13
-+	mov	%r13,%r14
-+	#   (uint64) x13 >>= 32
-+	shr	$32,%r13
-+	#   		x15 = j14
-+	movq	112(%rsp),%r15
-+	#   		x14 = x15
-+	mov	%r15,%rbx
-+	#   		(uint64) x15 >>= 32
-+	shr	$32,%r15
-+	#   		x15_stack = x15
-+	movq	%r15,176(%rsp)
-+	#   i = 20
-+	mov	$20,%r15
-+#   mainloop:
-+._mainloop:
-+	#   i_backup = i
-+	movq	%r15,184(%rsp)
-+	# 		x5 = x5_stack
-+	movq	160(%rsp),%r15
-+	# a = x12 + x0
-+	lea	(%r14,%rdx),%rbp
-+	# (uint32) a <<<= 7
-+	rol	$7,%ebp
-+	# x4 ^= a
-+	xor	%rbp,%r9
-+	# 		b = x1 + x5
-+	lea	(%rdi,%r15),%rbp
-+	# 		(uint32) b <<<= 7
-+	rol	$7,%ebp
-+	# 		x9 ^= b
-+	xor	%rbp,%r10
-+	# a = x0 + x4
-+	lea	(%rdx,%r9),%rbp
-+	# (uint32) a <<<= 9
-+	rol	$9,%ebp
-+	# x8 ^= a
-+	xor	%rbp,%r11
-+	# 		b = x5 + x9
-+	lea	(%r15,%r10),%rbp
-+	# 		(uint32) b <<<= 9
-+	rol	$9,%ebp
-+	# 		x13 ^= b
-+	xor	%rbp,%r13
-+	# a = x4 + x8
-+	lea	(%r9,%r11),%rbp
-+	# (uint32) a <<<= 13
-+	rol	$13,%ebp
-+	# x12 ^= a
-+	xor	%rbp,%r14
-+	# 		b = x9 + x13
-+	lea	(%r10,%r13),%rbp
-+	# 		(uint32) b <<<= 13
-+	rol	$13,%ebp
-+	# 		x1 ^= b
-+	xor	%rbp,%rdi
-+	# a = x8 + x12
-+	lea	(%r11,%r14),%rbp
-+	# (uint32) a <<<= 18
-+	rol	$18,%ebp
-+	# x0 ^= a
-+	xor	%rbp,%rdx
-+	# 		b = x13 + x1
-+	lea	(%r13,%rdi),%rbp
-+	# 		(uint32) b <<<= 18
-+	rol	$18,%ebp
-+	# 		x5 ^= b
-+	xor	%rbp,%r15
-+	# 				x10 = x10_stack
-+	movq	168(%rsp),%rbp
-+	# 		x5_stack = x5
-+	movq	%r15,160(%rsp)
-+	# 				c = x6 + x10
-+	lea	(%rax,%rbp),%r15
-+	# 				(uint32) c <<<= 7
-+	rol	$7,%r15d
-+	# 				x14 ^= c
-+	xor	%r15,%rbx
-+	# 				c = x10 + x14
-+	lea	(%rbp,%rbx),%r15
-+	# 				(uint32) c <<<= 9
-+	rol	$9,%r15d
-+	# 				x2 ^= c
-+	xor	%r15,%rcx
-+	# 				c = x14 + x2
-+	lea	(%rbx,%rcx),%r15
-+	# 				(uint32) c <<<= 13
-+	rol	$13,%r15d
-+	# 				x6 ^= c
-+	xor	%r15,%rax
-+	# 				c = x2 + x6
-+	lea	(%rcx,%rax),%r15
-+	# 				(uint32) c <<<= 18
-+	rol	$18,%r15d
-+	# 				x10 ^= c
-+	xor	%r15,%rbp
-+	# 						x15 = x15_stack
-+	movq	176(%rsp),%r15
-+	# 				x10_stack = x10
-+	movq	%rbp,168(%rsp)
-+	# 						d = x11 + x15
-+	lea	(%r12,%r15),%rbp
-+	# 						(uint32) d <<<= 7
-+	rol	$7,%ebp
-+	# 						x3 ^= d
-+	xor	%rbp,%rsi
-+	# 						d = x15 + x3
-+	lea	(%r15,%rsi),%rbp
-+	# 						(uint32) d <<<= 9
-+	rol	$9,%ebp
-+	# 						x7 ^= d
-+	xor	%rbp,%r8
-+	# 						d = x3 + x7
-+	lea	(%rsi,%r8),%rbp
-+	# 						(uint32) d <<<= 13
-+	rol	$13,%ebp
-+	# 						x11 ^= d
-+	xor	%rbp,%r12
-+	# 						d = x7 + x11
-+	lea	(%r8,%r12),%rbp
-+	# 						(uint32) d <<<= 18
-+	rol	$18,%ebp
-+	# 						x15 ^= d
-+	xor	%rbp,%r15
-+	# 						x15_stack = x15
-+	movq	%r15,176(%rsp)
-+	# 		x5 = x5_stack
-+	movq	160(%rsp),%r15
-+	# a = x3 + x0
-+	lea	(%rsi,%rdx),%rbp
-+	# (uint32) a <<<= 7
-+	rol	$7,%ebp
-+	# x1 ^= a
-+	xor	%rbp,%rdi
-+	# 		b = x4 + x5
-+	lea	(%r9,%r15),%rbp
-+	# 		(uint32) b <<<= 7
-+	rol	$7,%ebp
-+	# 		x6 ^= b
-+	xor	%rbp,%rax
-+	# a = x0 + x1
-+	lea	(%rdx,%rdi),%rbp
-+	# (uint32) a <<<= 9
-+	rol	$9,%ebp
-+	# x2 ^= a
-+	xor	%rbp,%rcx
-+	# 		b = x5 + x6
-+	lea	(%r15,%rax),%rbp
-+	# 		(uint32) b <<<= 9
-+	rol	$9,%ebp
-+	# 		x7 ^= b
-+	xor	%rbp,%r8
-+	# a = x1 + x2
-+	lea	(%rdi,%rcx),%rbp
-+	# (uint32) a <<<= 13
-+	rol	$13,%ebp
-+	# x3 ^= a
-+	xor	%rbp,%rsi
-+	# 		b = x6 + x7
-+	lea	(%rax,%r8),%rbp
-+	# 		(uint32) b <<<= 13
-+	rol	$13,%ebp
-+	# 		x4 ^= b
-+	xor	%rbp,%r9
-+	# a = x2 + x3
-+	lea	(%rcx,%rsi),%rbp
-+	# (uint32) a <<<= 18
-+	rol	$18,%ebp
-+	# x0 ^= a
-+	xor	%rbp,%rdx
-+	# 		b = x7 + x4
-+	lea	(%r8,%r9),%rbp
-+	# 		(uint32) b <<<= 18
-+	rol	$18,%ebp
-+	# 		x5 ^= b
-+	xor	%rbp,%r15
-+	# 				x10 = x10_stack
-+	movq	168(%rsp),%rbp
-+	# 		x5_stack = x5
-+	movq	%r15,160(%rsp)
-+	# 				c = x9 + x10
-+	lea	(%r10,%rbp),%r15
-+	# 				(uint32) c <<<= 7
-+	rol	$7,%r15d
-+	# 				x11 ^= c
-+	xor	%r15,%r12
-+	# 				c = x10 + x11
-+	lea	(%rbp,%r12),%r15
-+	# 				(uint32) c <<<= 9
-+	rol	$9,%r15d
-+	# 				x8 ^= c
-+	xor	%r15,%r11
-+	# 				c = x11 + x8
-+	lea	(%r12,%r11),%r15
-+	# 				(uint32) c <<<= 13
-+	rol	$13,%r15d
-+	# 				x9 ^= c
-+	xor	%r15,%r10
-+	# 				c = x8 + x9
-+	lea	(%r11,%r10),%r15
-+	# 				(uint32) c <<<= 18
-+	rol	$18,%r15d
-+	# 				x10 ^= c
-+	xor	%r15,%rbp
-+	# 						x15 = x15_stack
-+	movq	176(%rsp),%r15
-+	# 				x10_stack = x10
-+	movq	%rbp,168(%rsp)
-+	# 						d = x14 + x15
-+	lea	(%rbx,%r15),%rbp
-+	# 						(uint32) d <<<= 7
-+	rol	$7,%ebp
-+	# 						x12 ^= d
-+	xor	%rbp,%r14
-+	# 						d = x15 + x12
-+	lea	(%r15,%r14),%rbp
-+	# 						(uint32) d <<<= 9
-+	rol	$9,%ebp
-+	# 						x13 ^= d
-+	xor	%rbp,%r13
-+	# 						d = x12 + x13
-+	lea	(%r14,%r13),%rbp
-+	# 						(uint32) d <<<= 13
-+	rol	$13,%ebp
-+	# 						x14 ^= d
-+	xor	%rbp,%rbx
-+	# 						d = x13 + x14
-+	lea	(%r13,%rbx),%rbp
-+	# 						(uint32) d <<<= 18
-+	rol	$18,%ebp
-+	# 						x15 ^= d
-+	xor	%rbp,%r15
-+	# 						x15_stack = x15
-+	movq	%r15,176(%rsp)
-+	# 		x5 = x5_stack
-+	movq	160(%rsp),%r15
-+	# a = x12 + x0
-+	lea	(%r14,%rdx),%rbp
-+	# (uint32) a <<<= 7
-+	rol	$7,%ebp
-+	# x4 ^= a
-+	xor	%rbp,%r9
-+	# 		b = x1 + x5
-+	lea	(%rdi,%r15),%rbp
-+	# 		(uint32) b <<<= 7
-+	rol	$7,%ebp
-+	# 		x9 ^= b
-+	xor	%rbp,%r10
-+	# a = x0 + x4
-+	lea	(%rdx,%r9),%rbp
-+	# (uint32) a <<<= 9
-+	rol	$9,%ebp
-+	# x8 ^= a
-+	xor	%rbp,%r11
-+	# 		b = x5 + x9
-+	lea	(%r15,%r10),%rbp
-+	# 		(uint32) b <<<= 9
-+	rol	$9,%ebp
-+	# 		x13 ^= b
-+	xor	%rbp,%r13
-+	# a = x4 + x8
-+	lea	(%r9,%r11),%rbp
-+	# (uint32) a <<<= 13
-+	rol	$13,%ebp
-+	# x12 ^= a
-+	xor	%rbp,%r14
-+	# 		b = x9 + x13
-+	lea	(%r10,%r13),%rbp
-+	# 		(uint32) b <<<= 13
-+	rol	$13,%ebp
-+	# 		x1 ^= b
-+	xor	%rbp,%rdi
-+	# a = x8 + x12
-+	lea	(%r11,%r14),%rbp
-+	# (uint32) a <<<= 18
-+	rol	$18,%ebp
-+	# x0 ^= a
-+	xor	%rbp,%rdx
-+	# 		b = x13 + x1
-+	lea	(%r13,%rdi),%rbp
-+	# 		(uint32) b <<<= 18
-+	rol	$18,%ebp
-+	# 		x5 ^= b
-+	xor	%rbp,%r15
-+	# 				x10 = x10_stack
-+	movq	168(%rsp),%rbp
-+	# 		x5_stack = x5
-+	movq	%r15,160(%rsp)
-+	# 				c = x6 + x10
-+	lea	(%rax,%rbp),%r15
-+	# 				(uint32) c <<<= 7
-+	rol	$7,%r15d
-+	# 				x14 ^= c
-+	xor	%r15,%rbx
-+	# 				c = x10 + x14
-+	lea	(%rbp,%rbx),%r15
-+	# 				(uint32) c <<<= 9
-+	rol	$9,%r15d
-+	# 				x2 ^= c
-+	xor	%r15,%rcx
-+	# 				c = x14 + x2
-+	lea	(%rbx,%rcx),%r15
-+	# 				(uint32) c <<<= 13
-+	rol	$13,%r15d
-+	# 				x6 ^= c
-+	xor	%r15,%rax
-+	# 				c = x2 + x6
-+	lea	(%rcx,%rax),%r15
-+	# 				(uint32) c <<<= 18
-+	rol	$18,%r15d
-+	# 				x10 ^= c
-+	xor	%r15,%rbp
-+	# 						x15 = x15_stack
-+	movq	176(%rsp),%r15
-+	# 				x10_stack = x10
-+	movq	%rbp,168(%rsp)
-+	# 						d = x11 + x15
-+	lea	(%r12,%r15),%rbp
-+	# 						(uint32) d <<<= 7
-+	rol	$7,%ebp
-+	# 						x3 ^= d
-+	xor	%rbp,%rsi
-+	# 						d = x15 + x3
-+	lea	(%r15,%rsi),%rbp
-+	# 						(uint32) d <<<= 9
-+	rol	$9,%ebp
-+	# 						x7 ^= d
-+	xor	%rbp,%r8
-+	# 						d = x3 + x7
-+	lea	(%rsi,%r8),%rbp
-+	# 						(uint32) d <<<= 13
-+	rol	$13,%ebp
-+	# 						x11 ^= d
-+	xor	%rbp,%r12
-+	# 						d = x7 + x11
-+	lea	(%r8,%r12),%rbp
-+	# 						(uint32) d <<<= 18
-+	rol	$18,%ebp
-+	# 						x15 ^= d
-+	xor	%rbp,%r15
-+	# 						x15_stack = x15
-+	movq	%r15,176(%rsp)
-+	# 		x5 = x5_stack
-+	movq	160(%rsp),%r15
-+	# a = x3 + x0
-+	lea	(%rsi,%rdx),%rbp
-+	# (uint32) a <<<= 7
-+	rol	$7,%ebp
-+	# x1 ^= a
-+	xor	%rbp,%rdi
-+	# 		b = x4 + x5
-+	lea	(%r9,%r15),%rbp
-+	# 		(uint32) b <<<= 7
-+	rol	$7,%ebp
-+	# 		x6 ^= b
-+	xor	%rbp,%rax
-+	# a = x0 + x1
-+	lea	(%rdx,%rdi),%rbp
-+	# (uint32) a <<<= 9
-+	rol	$9,%ebp
-+	# x2 ^= a
-+	xor	%rbp,%rcx
-+	# 		b = x5 + x6
-+	lea	(%r15,%rax),%rbp
-+	# 		(uint32) b <<<= 9
-+	rol	$9,%ebp
-+	# 		x7 ^= b
-+	xor	%rbp,%r8
-+	# a = x1 + x2
-+	lea	(%rdi,%rcx),%rbp
-+	# (uint32) a <<<= 13
-+	rol	$13,%ebp
-+	# x3 ^= a
-+	xor	%rbp,%rsi
-+	# 		b = x6 + x7
-+	lea	(%rax,%r8),%rbp
-+	# 		(uint32) b <<<= 13
-+	rol	$13,%ebp
-+	# 		x4 ^= b
-+	xor	%rbp,%r9
-+	# a = x2 + x3
-+	lea	(%rcx,%rsi),%rbp
-+	# (uint32) a <<<= 18
-+	rol	$18,%ebp
-+	# x0 ^= a
-+	xor	%rbp,%rdx
-+	# 		b = x7 + x4
-+	lea	(%r8,%r9),%rbp
-+	# 		(uint32) b <<<= 18
-+	rol	$18,%ebp
-+	# 		x5 ^= b
-+	xor	%rbp,%r15
-+	# 				x10 = x10_stack
-+	movq	168(%rsp),%rbp
-+	# 		x5_stack = x5
-+	movq	%r15,160(%rsp)
-+	# 				c = x9 + x10
-+	lea	(%r10,%rbp),%r15
-+	# 				(uint32) c <<<= 7
-+	rol	$7,%r15d
-+	# 				x11 ^= c
-+	xor	%r15,%r12
-+	# 				c = x10 + x11
-+	lea	(%rbp,%r12),%r15
-+	# 				(uint32) c <<<= 9
-+	rol	$9,%r15d
-+	# 				x8 ^= c
-+	xor	%r15,%r11
-+	# 				c = x11 + x8
-+	lea	(%r12,%r11),%r15
-+	# 				(uint32) c <<<= 13
-+	rol	$13,%r15d
-+	# 				x9 ^= c
-+	xor	%r15,%r10
-+	# 				c = x8 + x9
-+	lea	(%r11,%r10),%r15
-+	# 				(uint32) c <<<= 18
-+	rol	$18,%r15d
-+	# 				x10 ^= c
-+	xor	%r15,%rbp
-+	# 						x15 = x15_stack
-+	movq	176(%rsp),%r15
-+	# 				x10_stack = x10
-+	movq	%rbp,168(%rsp)
-+	# 						d = x14 + x15
-+	lea	(%rbx,%r15),%rbp
-+	# 						(uint32) d <<<= 7
-+	rol	$7,%ebp
-+	# 						x12 ^= d
-+	xor	%rbp,%r14
-+	# 						d = x15 + x12
-+	lea	(%r15,%r14),%rbp
-+	# 						(uint32) d <<<= 9
-+	rol	$9,%ebp
-+	# 						x13 ^= d
-+	xor	%rbp,%r13
-+	# 						d = x12 + x13
-+	lea	(%r14,%r13),%rbp
-+	# 						(uint32) d <<<= 13
-+	rol	$13,%ebp
-+	# 						x14 ^= d
-+	xor	%rbp,%rbx
-+	# 						d = x13 + x14
-+	lea	(%r13,%rbx),%rbp
-+	# 						(uint32) d <<<= 18
-+	rol	$18,%ebp
-+	# 						x15 ^= d
-+	xor	%rbp,%r15
-+	# 						x15_stack = x15
-+	movq	%r15,176(%rsp)
-+	#   i = i_backup
-+	movq	184(%rsp),%r15
-+	#                  unsigned>? i -= 4
-+	sub	$4,%r15
-+	# comment:fp stack unchanged by jump
-+	# goto mainloop if unsigned>
-+	ja	._mainloop
-+	#   (uint32) x2 += j2
-+	addl	64(%rsp),%ecx
-+	#   x3 <<= 32
-+	shl	$32,%rsi
-+	#   x3 += j2
-+	addq	64(%rsp),%rsi
-+	#   (uint64) x3 >>= 32
-+	shr	$32,%rsi
-+	#   x3 <<= 32
-+	shl	$32,%rsi
-+	#   x2 += x3
-+	add	%rsi,%rcx
-+	#   (uint32) x6 += j6
-+	addl	80(%rsp),%eax
-+	#   x7 <<= 32
-+	shl	$32,%r8
-+	#   x7 += j6
-+	addq	80(%rsp),%r8
-+	#   (uint64) x7 >>= 32
-+	shr	$32,%r8
-+	#   x7 <<= 32
-+	shl	$32,%r8
-+	#   x6 += x7
-+	add	%r8,%rax
-+	#   (uint32) x8 += j8
-+	addl	88(%rsp),%r11d
-+	#   x9 <<= 32
-+	shl	$32,%r10
-+	#   x9 += j8
-+	addq	88(%rsp),%r10
-+	#   (uint64) x9 >>= 32
-+	shr	$32,%r10
-+	#   x9 <<= 32
-+	shl	$32,%r10
-+	#   x8 += x9
-+	add	%r10,%r11
-+	#   (uint32) x12 += j12
-+	addl	104(%rsp),%r14d
-+	#   x13 <<= 32
-+	shl	$32,%r13
-+	#   x13 += j12
-+	addq	104(%rsp),%r13
-+	#   (uint64) x13 >>= 32
-+	shr	$32,%r13
-+	#   x13 <<= 32
-+	shl	$32,%r13
-+	#   x12 += x13
-+	add	%r13,%r14
-+	#   (uint32) x0 += j0
-+	addl	56(%rsp),%edx
-+	#   x1 <<= 32
-+	shl	$32,%rdi
-+	#   x1 += j0
-+	addq	56(%rsp),%rdi
-+	#   (uint64) x1 >>= 32
-+	shr	$32,%rdi
-+	#   x1 <<= 32
-+	shl	$32,%rdi
-+	#   x0 += x1
-+	add	%rdi,%rdx
-+	#   x5 = x5_stack
-+	movq	160(%rsp),%rdi
-+	#   (uint32) x4 += j4
-+	addl	72(%rsp),%r9d
-+	#   x5 <<= 32
-+	shl	$32,%rdi
-+	#   x5 += j4
-+	addq	72(%rsp),%rdi
-+	#   (uint64) x5 >>= 32
-+	shr	$32,%rdi
-+	#   x5 <<= 32
-+	shl	$32,%rdi
-+	#   x4 += x5
-+	add	%rdi,%r9
-+	#   x10 = x10_stack
-+	movq	168(%rsp),%r8
-+	#   (uint32) x10 += j10
-+	addl	96(%rsp),%r8d
-+	#   x11 <<= 32
-+	shl	$32,%r12
-+	#   x11 += j10
-+	addq	96(%rsp),%r12
-+	#   (uint64) x11 >>= 32
-+	shr	$32,%r12
-+	#   x11 <<= 32
-+	shl	$32,%r12
-+	#   x10 += x11
-+	add	%r12,%r8
-+	#   x15 = x15_stack
-+	movq	176(%rsp),%rdi
-+	#   (uint32) x14 += j14
-+	addl	112(%rsp),%ebx
-+	#   x15 <<= 32
-+	shl	$32,%rdi
-+	#   x15 += j14
-+	addq	112(%rsp),%rdi
-+	#   (uint64) x15 >>= 32
-+	shr	$32,%rdi
-+	#   x15 <<= 32
-+	shl	$32,%rdi
-+	#   x14 += x15
-+	add	%rdi,%rbx
-+	#   out = out_backup
-+	movq	136(%rsp),%rdi
-+	#   m = m_backup
-+	movq	144(%rsp),%rsi
-+	#   x0 ^= *(uint64 *) (m + 0)
-+	xorq	0(%rsi),%rdx
-+	#   *(uint64 *) (out + 0) = x0
-+	movq	%rdx,0(%rdi)
-+	#   x2 ^= *(uint64 *) (m + 8)
-+	xorq	8(%rsi),%rcx
-+	#   *(uint64 *) (out + 8) = x2
-+	movq	%rcx,8(%rdi)
-+	#   x4 ^= *(uint64 *) (m + 16)
-+	xorq	16(%rsi),%r9
-+	#   *(uint64 *) (out + 16) = x4
-+	movq	%r9,16(%rdi)
-+	#   x6 ^= *(uint64 *) (m + 24)
-+	xorq	24(%rsi),%rax
-+	#   *(uint64 *) (out + 24) = x6
-+	movq	%rax,24(%rdi)
-+	#   x8 ^= *(uint64 *) (m + 32)
-+	xorq	32(%rsi),%r11
-+	#   *(uint64 *) (out + 32) = x8
-+	movq	%r11,32(%rdi)
-+	#   x10 ^= *(uint64 *) (m + 40)
-+	xorq	40(%rsi),%r8
-+	#   *(uint64 *) (out + 40) = x10
-+	movq	%r8,40(%rdi)
-+	#   x12 ^= *(uint64 *) (m + 48)
-+	xorq	48(%rsi),%r14
-+	#   *(uint64 *) (out + 48) = x12
-+	movq	%r14,48(%rdi)
-+	#   x14 ^= *(uint64 *) (m + 56)
-+	xorq	56(%rsi),%rbx
-+	#   *(uint64 *) (out + 56) = x14
-+	movq	%rbx,56(%rdi)
-+	#   bytes = bytes_backup
-+	movq	152(%rsp),%rdx
-+	#   in8 = j8
-+	movq	88(%rsp),%rcx
-+	#   in8 += 1
-+	add	$1,%rcx
-+	#   j8 = in8
-+	movq	%rcx,88(%rsp)
-+	#                          unsigned>? unsigned<? bytes - 64
-+	cmp	$64,%rdx
-+	# comment:fp stack unchanged by jump
-+	#   goto bytesatleast65 if unsigned>
-+	ja	._bytesatleast65
-+	# comment:fp stack unchanged by jump
-+	#     goto bytesatleast64 if !unsigned<
-+	jae	._bytesatleast64
-+	#       m = out
-+	mov	%rdi,%rsi
-+	#       out = ctarget
-+	movq	128(%rsp),%rdi
-+	#       i = bytes
-+	mov	%rdx,%rcx
-+	#       while (i) { *out++ = *m++; --i }
-+	rep	movsb
-+	# comment:fp stack unchanged by fallthrough
-+#     bytesatleast64:
-+._bytesatleast64:
-+	#     x = x_backup
-+	movq	120(%rsp),%rdi
-+	#     in8 = j8
-+	movq	88(%rsp),%rsi
-+	#     *(uint64 *) (x + 32) = in8
-+	movq	%rsi,32(%rdi)
-+	#     r11 = r11_stack
-+	movq	0(%rsp),%r11
-+	#     r12 = r12_stack
-+	movq	8(%rsp),%r12
-+	#     r13 = r13_stack
-+	movq	16(%rsp),%r13
-+	#     r14 = r14_stack
-+	movq	24(%rsp),%r14
-+	#     r15 = r15_stack
-+	movq	32(%rsp),%r15
-+	#     rbx = rbx_stack
-+	movq	40(%rsp),%rbx
-+	#     rbp = rbp_stack
-+	movq	48(%rsp),%rbp
-+	# comment:fp stack unchanged by fallthrough
-+#     done:
-+._done:
-+	#     leave
-+	add	%r11,%rsp
-+	mov	%rdi,%rax
-+	mov	%rsi,%rdx
-+	ret
-+#   bytesatleast65:
-+._bytesatleast65:
-+	#   bytes -= 64
-+	sub	$64,%rdx
-+	#   out += 64
-+	add	$64,%rdi
-+	#   m += 64
-+	add	$64,%rsi
-+	# comment:fp stack unchanged by jump
-+	# goto bytesatleast1
-+	jmp	._bytesatleast1
-+# enter ECRYPT_keysetup
-+.text
-+.p2align 5
-+.globl ECRYPT_keysetup
-+ECRYPT_keysetup:
-+	mov	%rsp,%r11
-+	and	$31,%r11
-+	add	$256,%r11
-+	sub	%r11,%rsp
-+	#   k = arg2
-+	mov	%rsi,%rsi
-+	#   kbits = arg3
-+	mov	%rdx,%rdx
-+	#   x = arg1
-+	mov	%rdi,%rdi
-+	#   in0 = *(uint64 *) (k + 0)
-+	movq	0(%rsi),%r8
-+	#   in2 = *(uint64 *) (k + 8)
-+	movq	8(%rsi),%r9
-+	#   *(uint64 *) (x + 4) = in0
-+	movq	%r8,4(%rdi)
-+	#   *(uint64 *) (x + 12) = in2
-+	movq	%r9,12(%rdi)
-+	#                    unsigned<? kbits - 256
-+	cmp	$256,%rdx
-+	# comment:fp stack unchanged by jump
-+	#   goto kbits128 if unsigned<
-+	jb	._kbits128
-+#   kbits256:
-+._kbits256:
-+	#     in10 = *(uint64 *) (k + 16)
-+	movq	16(%rsi),%rdx
-+	#     in12 = *(uint64 *) (k + 24)
-+	movq	24(%rsi),%rsi
-+	#     *(uint64 *) (x + 44) = in10
-+	movq	%rdx,44(%rdi)
-+	#     *(uint64 *) (x + 52) = in12
-+	movq	%rsi,52(%rdi)
-+	#     in0 = 1634760805
-+	mov	$1634760805,%rsi
-+	#     in4 = 857760878
-+	mov	$857760878,%rdx
-+	#     in10 = 2036477234
-+	mov	$2036477234,%rcx
-+	#     in14 = 1797285236
-+	mov	$1797285236,%r8
-+	#     *(uint32 *) (x + 0) = in0
-+	movl	%esi,0(%rdi)
-+	#     *(uint32 *) (x + 20) = in4
-+	movl	%edx,20(%rdi)
-+	#     *(uint32 *) (x + 40) = in10
-+	movl	%ecx,40(%rdi)
-+	#     *(uint32 *) (x + 60) = in14
-+	movl	%r8d,60(%rdi)
-+	# comment:fp stack unchanged by jump
-+	#   goto keysetupdone
-+	jmp	._keysetupdone
-+#   kbits128:
-+._kbits128:
-+	#     in10 = *(uint64 *) (k + 0)
-+	movq	0(%rsi),%rdx
-+	#     in12 = *(uint64 *) (k + 8)
-+	movq	8(%rsi),%rsi
-+	#     *(uint64 *) (x + 44) = in10
-+	movq	%rdx,44(%rdi)
-+	#     *(uint64 *) (x + 52) = in12
-+	movq	%rsi,52(%rdi)
-+	#     in0 = 1634760805
-+	mov	$1634760805,%rsi
-+	#     in4 = 824206446
-+	mov	$824206446,%rdx
-+	#     in10 = 2036477238
-+	mov	$2036477238,%rcx
-+	#     in14 = 1797285236
-+	mov	$1797285236,%r8
-+	#     *(uint32 *) (x + 0) = in0
-+	movl	%esi,0(%rdi)
-+	#     *(uint32 *) (x + 20) = in4
-+	movl	%edx,20(%rdi)
-+	#     *(uint32 *) (x + 40) = in10
-+	movl	%ecx,40(%rdi)
-+	#     *(uint32 *) (x + 60) = in14
-+	movl	%r8d,60(%rdi)
-+#   keysetupdone:
-+._keysetupdone:
-+	# leave
-+	add	%r11,%rsp
-+	mov	%rdi,%rax
-+	mov	%rsi,%rdx
-+	ret
-+# enter ECRYPT_ivsetup
-+.text
-+.p2align 5
-+.globl ECRYPT_ivsetup
-+ECRYPT_ivsetup:
-+	mov	%rsp,%r11
-+	and	$31,%r11
-+	add	$256,%r11
-+	sub	%r11,%rsp
-+	#   iv = arg2
-+	mov	%rsi,%rsi
-+	#   x = arg1
-+	mov	%rdi,%rdi
-+	#   in6 = *(uint64 *) (iv + 0)
-+	movq	0(%rsi),%rsi
-+	#   in8 = 0
-+	mov	$0,%r8
-+	#   *(uint64 *) (x + 24) = in6
-+	movq	%rsi,24(%rdi)
-+	#   *(uint64 *) (x + 32) = in8
-+	movq	%r8,32(%rdi)
-+	# leave
-+	add	%r11,%rsp
-+	mov	%rdi,%rax
-+	mov	%rsi,%rdx
-+	ret
-diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
++++ b/arch/x86/kernel/ioport.c
+@@ -0,0 +1,154 @@
++/*
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus. 32/64 bits code unification by Miguel Botón.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/smp.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <linux/syscalls.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base,
++		       unsigned int extent, int new_value)
++{
++	unsigned int i;
++
++	for (i = base; i < base + extent; i++) {
++		if (new_value)
++			__set_bit(i, bitmap);
++		else
++			__clear_bit(i, bitmap);
++	}
++}
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++	struct thread_struct * t = &current->thread;
++	struct tss_struct * tss;
++	unsigned int i, max_long, bytes, bytes_updated;
++
++	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++		return -EINVAL;
++	if (turn_on && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	/*
++	 * If it's the first ioperm() call in this thread's lifetime, set the
++	 * IO bitmap up. ioperm() is much less timing critical than clone(),
++	 * this is why we delay this operation until now:
++	 */
++	if (!t->io_bitmap_ptr) {
++		unsigned long *bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++
++		if (!bitmap)
++			return -ENOMEM;
++
++		memset(bitmap, 0xff, IO_BITMAP_BYTES);
++		t->io_bitmap_ptr = bitmap;
++		set_thread_flag(TIF_IO_BITMAP);
++	}
++
++	/*
++	 * do it in the per-thread copy and in the TSS ...
++	 *
++	 * Disable preemption via get_cpu() - we must not switch away
++	 * because the ->io_bitmap_max value must match the bitmap
++	 * contents:
++	 */
++	tss = &per_cpu(init_tss, get_cpu());
++
++	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++	/*
++	 * Search for a (possibly new) maximum. This is simple and stupid,
++	 * to keep it obviously correct:
++	 */
++	max_long = 0;
++	for (i = 0; i < IO_BITMAP_LONGS; i++)
++		if (t->io_bitmap_ptr[i] != ~0UL)
++			max_long = i;
++
++	bytes = (max_long + 1) * sizeof(unsigned long);
++	bytes_updated = max(bytes, t->io_bitmap_max);
++
++	t->io_bitmap_max = bytes;
++
++#ifdef CONFIG_X86_32
++	/*
++	 * Sets the lazy trigger so that the next I/O operation will
++	 * reload the correct bitmap.
++	 * Reset the owner so that a process switch will not set
++	 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
++	 */
++	tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
++	tss->io_bitmap_owner = NULL;
++#else
++	/* Update the TSS: */
++	memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
++#endif
++
++	put_cpu();
++
++	return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ * Here we just change the flags value on the stack: we allow
++ * only the super-user to do it. This depends on the stack-layout
++ * on system-call entry - see also fork() and the signal handling
++ * code.
++ */
++static int do_iopl(unsigned int level, struct pt_regs *regs)
++{
++	unsigned int old = (regs->flags >> 12) & 3;
++
++	if (level > 3)
++		return -EINVAL;
++	/* Trying to gain more privileges? */
++	if (level > old) {
++		if (!capable(CAP_SYS_RAWIO))
++			return -EPERM;
++	}
++	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
++
++	return 0;
++}
++
++#ifdef CONFIG_X86_32
++asmlinkage long sys_iopl(unsigned long regsp)
++{
++	struct pt_regs *regs = (struct pt_regs *)&regsp;
++	unsigned int level = regs->bx;
++	struct thread_struct *t = &current->thread;
++	int rc;
++
++	rc = do_iopl(level, regs);
++	if (rc < 0)
++		goto out;
++
++	t->iopl = level << 12;
++	set_iopl_mask(t->iopl);
++out:
++	return rc;
++}
++#else
++asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
++{
++	return do_iopl(level, regs);
++}
++#endif
+diff --git a/arch/x86/kernel/ioport_32.c b/arch/x86/kernel/ioport_32.c
+deleted file mode 100644
+index 4ed48dc..0000000
+--- a/arch/x86/kernel/ioport_32.c
++++ /dev/null
+@@ -1,151 +0,0 @@
+-/*
+- * This contains the io-permission bitmap code - written by obz, with changes
+- * by Linus.
+- */
+-
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/capability.h>
+-#include <linux/errno.h>
+-#include <linux/types.h>
+-#include <linux/ioport.h>
+-#include <linux/smp.h>
+-#include <linux/stddef.h>
+-#include <linux/slab.h>
+-#include <linux/thread_info.h>
+-#include <linux/syscalls.h>
+-
+-/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+-static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
+-{
+-	unsigned long mask;
+-	unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
+-	unsigned int low_index = base & (BITS_PER_LONG-1);
+-	int length = low_index + extent;
+-
+-	if (low_index != 0) {
+-		mask = (~0UL << low_index);
+-		if (length < BITS_PER_LONG)
+-			mask &= ~(~0UL << length);
+-		if (new_value)
+-			*bitmap_base++ |= mask;
+-		else
+-			*bitmap_base++ &= ~mask;
+-		length -= BITS_PER_LONG;
+-	}
+-
+-	mask = (new_value ? ~0UL : 0UL);
+-	while (length >= BITS_PER_LONG) {
+-		*bitmap_base++ = mask;
+-		length -= BITS_PER_LONG;
+-	}
+-
+-	if (length > 0) {
+-		mask = ~(~0UL << length);
+-		if (new_value)
+-			*bitmap_base++ |= mask;
+-		else
+-			*bitmap_base++ &= ~mask;
+-	}
+-}
+-
+-
+-/*
+- * this changes the io permissions bitmap in the current task.
+- */
+-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+-{
+-	unsigned long i, max_long, bytes, bytes_updated;
+-	struct thread_struct * t = &current->thread;
+-	struct tss_struct * tss;
+-	unsigned long *bitmap;
+-
+-	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+-		return -EINVAL;
+-	if (turn_on && !capable(CAP_SYS_RAWIO))
+-		return -EPERM;
+-
+-	/*
+-	 * If it's the first ioperm() call in this thread's lifetime, set the
+-	 * IO bitmap up. ioperm() is much less timing critical than clone(),
+-	 * this is why we delay this operation until now:
+-	 */
+-	if (!t->io_bitmap_ptr) {
+-		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+-		if (!bitmap)
+-			return -ENOMEM;
+-
+-		memset(bitmap, 0xff, IO_BITMAP_BYTES);
+-		t->io_bitmap_ptr = bitmap;
+-		set_thread_flag(TIF_IO_BITMAP);
+-	}
+-
+-	/*
+-	 * do it in the per-thread copy and in the TSS ...
+-	 *
+-	 * Disable preemption via get_cpu() - we must not switch away
+-	 * because the ->io_bitmap_max value must match the bitmap
+-	 * contents:
+-	 */
+-	tss = &per_cpu(init_tss, get_cpu());
+-
+-	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+-
+-	/*
+-	 * Search for a (possibly new) maximum. This is simple and stupid,
+-	 * to keep it obviously correct:
+-	 */
+-	max_long = 0;
+-	for (i = 0; i < IO_BITMAP_LONGS; i++)
+-		if (t->io_bitmap_ptr[i] != ~0UL)
+-			max_long = i;
+-
+-	bytes = (max_long + 1) * sizeof(long);
+-	bytes_updated = max(bytes, t->io_bitmap_max);
+-
+-	t->io_bitmap_max = bytes;
+-
+-	/*
+-	 * Sets the lazy trigger so that the next I/O operation will
+-	 * reload the correct bitmap.
+-	 * Reset the owner so that a process switch will not set
+-	 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
+-	 */
+-	tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+-	tss->io_bitmap_owner = NULL;
+-
+-	put_cpu();
+-
+-	return 0;
+-}
+-
+-/*
+- * sys_iopl has to be used when you want to access the IO ports
+- * beyond the 0x3ff range: to get the full 65536 ports bitmapped
+- * you'd need 8kB of bitmaps/process, which is a bit excessive.
+- *
+- * Here we just change the eflags value on the stack: we allow
+- * only the super-user to do it. This depends on the stack-layout
+- * on system-call entry - see also fork() and the signal handling
+- * code.
+- */
+-
+-asmlinkage long sys_iopl(unsigned long unused)
+-{
+-	volatile struct pt_regs * regs = (struct pt_regs *) &unused;
+-	unsigned int level = regs->ebx;
+-	unsigned int old = (regs->eflags >> 12) & 3;
+-	struct thread_struct *t = &current->thread;
+-
+-	if (level > 3)
+-		return -EINVAL;
+-	/* Trying to gain more privileges? */
+-	if (level > old) {
+-		if (!capable(CAP_SYS_RAWIO))
+-			return -EPERM;
+-	}
+-	t->iopl = level << 12;
+-	regs->eflags = (regs->eflags & ~X86_EFLAGS_IOPL) | t->iopl;
+-	set_iopl_mask(t->iopl);
+-	return 0;
+-}
+diff --git a/arch/x86/kernel/ioport_64.c b/arch/x86/kernel/ioport_64.c
+deleted file mode 100644
+index 5f62fad..0000000
+--- a/arch/x86/kernel/ioport_64.c
++++ /dev/null
+@@ -1,117 +0,0 @@
+-/*
+- * This contains the io-permission bitmap code - written by obz, with changes
+- * by Linus.
+- */
+-
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/capability.h>
+-#include <linux/errno.h>
+-#include <linux/types.h>
+-#include <linux/ioport.h>
+-#include <linux/smp.h>
+-#include <linux/stddef.h>
+-#include <linux/slab.h>
+-#include <linux/thread_info.h>
+-#include <linux/syscalls.h>
+-
+-/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+-static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
+-{
+-	int i;
+-		if (new_value)
+-		for (i = base; i < base + extent; i++) 
+-			__set_bit(i, bitmap); 
+-		else
+-		for (i = base; i < base + extent; i++) 
+-			clear_bit(i, bitmap); 
+-}
+-
+-/*
+- * this changes the io permissions bitmap in the current task.
+- */
+-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+-{
+-	unsigned int i, max_long, bytes, bytes_updated;
+-	struct thread_struct * t = &current->thread;
+-	struct tss_struct * tss;
+-	unsigned long *bitmap;
+-
+-	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+-		return -EINVAL;
+-	if (turn_on && !capable(CAP_SYS_RAWIO))
+-		return -EPERM;
+-
+-	/*
+-	 * If it's the first ioperm() call in this thread's lifetime, set the
+-	 * IO bitmap up. ioperm() is much less timing critical than clone(),
+-	 * this is why we delay this operation until now:
+-	 */
+-	if (!t->io_bitmap_ptr) {
+-		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+-		if (!bitmap)
+-			return -ENOMEM;
+-
+-		memset(bitmap, 0xff, IO_BITMAP_BYTES);
+-		t->io_bitmap_ptr = bitmap;
+-		set_thread_flag(TIF_IO_BITMAP);
+-	}
+-
+-	/*
+-	 * do it in the per-thread copy and in the TSS ...
+-	 *
+-	 * Disable preemption via get_cpu() - we must not switch away
+-	 * because the ->io_bitmap_max value must match the bitmap
+-	 * contents:
+-	 */
+-	tss = &per_cpu(init_tss, get_cpu());
+-
+-	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+-
+-	/*
+-	 * Search for a (possibly new) maximum. This is simple and stupid,
+-	 * to keep it obviously correct:
+-	 */
+-	max_long = 0;
+-	for (i = 0; i < IO_BITMAP_LONGS; i++)
+-		if (t->io_bitmap_ptr[i] != ~0UL)
+-			max_long = i;
+-
+-	bytes = (max_long + 1) * sizeof(long);
+-	bytes_updated = max(bytes, t->io_bitmap_max);
+-
+-	t->io_bitmap_max = bytes;
+-
+-	/* Update the TSS: */
+-	memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
+-
+-	put_cpu();
+-
+-	return 0;
+-}
+-
+-/*
+- * sys_iopl has to be used when you want to access the IO ports
+- * beyond the 0x3ff range: to get the full 65536 ports bitmapped
+- * you'd need 8kB of bitmaps/process, which is a bit excessive.
+- *
+- * Here we just change the eflags value on the stack: we allow
+- * only the super-user to do it. This depends on the stack-layout
+- * on system-call entry - see also fork() and the signal handling
+- * code.
+- */
+-
+-asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
+-{
+-	unsigned int old = (regs->eflags >> 12) & 3;
+-
+-	if (level > 3)
+-		return -EINVAL;
+-	/* Trying to gain more privileges? */
+-	if (level > old) {
+-		if (!capable(CAP_SYS_RAWIO))
+-			return -EPERM;
+-	}
+-	regs->eflags = (regs->eflags &~ X86_EFLAGS_IOPL) | (level << 12);
+-	return 0;
+-}
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index d3fde94..cef054b 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -66,11 +66,11 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
+  * SMP cross-CPU interrupts have their own specific
+  * handlers).
+  */
+-fastcall unsigned int do_IRQ(struct pt_regs *regs)
++unsigned int do_IRQ(struct pt_regs *regs)
+ {	
+ 	struct pt_regs *old_regs;
+ 	/* high bit used in ret_from_ code */
+-	int irq = ~regs->orig_eax;
++	int irq = ~regs->orig_ax;
+ 	struct irq_desc *desc = irq_desc + irq;
+ #ifdef CONFIG_4KSTACKS
+ 	union irq_ctx *curctx, *irqctx;
+@@ -88,13 +88,13 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ 	/* Debugging check for stack overflow: is there less than 1KB free? */
+ 	{
+-		long esp;
++		long sp;
+ 
+ 		__asm__ __volatile__("andl %%esp,%0" :
+-					"=r" (esp) : "0" (THREAD_SIZE - 1));
+-		if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
++					"=r" (sp) : "0" (THREAD_SIZE - 1));
++		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ 			printk("do_IRQ: stack overflow: %ld\n",
+-				esp - sizeof(struct thread_info));
++				sp - sizeof(struct thread_info));
+ 			dump_stack();
+ 		}
+ 	}
+@@ -112,7 +112,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
+ 	 * current stack (which is the irq stack already after all)
+ 	 */
+ 	if (curctx != irqctx) {
+-		int arg1, arg2, ebx;
++		int arg1, arg2, bx;
+ 
+ 		/* build the stack frame on the IRQ stack */
+ 		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
+@@ -128,10 +128,10 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
+ 			(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+ 
+ 		asm volatile(
+-			"       xchgl  %%ebx,%%esp      \n"
+-			"       call   *%%edi           \n"
+-			"       movl   %%ebx,%%esp      \n"
+-			: "=a" (arg1), "=d" (arg2), "=b" (ebx)
++			"       xchgl  %%ebx,%%esp    \n"
++			"       call   *%%edi         \n"
++			"       movl   %%ebx,%%esp    \n"
++			: "=a" (arg1), "=d" (arg2), "=b" (bx)
+ 			:  "0" (irq),   "1" (desc),  "2" (isp),
+ 			   "D" (desc->handle_irq)
+ 			: "memory", "cc"
+diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
+index 6b5c730..3aac154 100644
+--- a/arch/x86/kernel/irq_64.c
++++ b/arch/x86/kernel/irq_64.c
+@@ -20,6 +20,26 @@
+ 
+ atomic_t irq_err_count;
+ 
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++	printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
++	/*
++	 * Currently unexpected vectors happen only on SMP and APIC.
++	 * We _must_ ack these because every local APIC has only N
++	 * irq slots per priority level, and a 'hanging, unacked' IRQ
++	 * holds up an irq slot - in excessive cases (when multiple
++	 * unexpected vectors occur) that might lock up the APIC
++	 * completely.
++	 * But don't ack when the APIC is disabled. -AK
++	 */
++	if (!disable_apic)
++		ack_APIC_irq();
++}
++
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /*
+  * Probabilistic stack overflow check:
+@@ -33,11 +53,11 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+ 	u64 curbase = (u64)task_stack_page(current);
+ 	static unsigned long warned = -60*HZ;
+ 
+-	if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
+-	    regs->rsp <  curbase + sizeof(struct thread_info) + 128 &&
++	if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE &&
++	    regs->sp <  curbase + sizeof(struct thread_info) + 128 &&
+ 	    time_after(jiffies, warned + 60*HZ)) {
+-		printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
+-		       current->comm, curbase, regs->rsp);
++		printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
++		       current->comm, curbase, regs->sp);
+ 		show_stack(NULL,NULL);
+ 		warned = jiffies;
+ 	}
+@@ -142,7 +162,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
+ 	struct pt_regs *old_regs = set_irq_regs(regs);
+ 
+ 	/* high bit used in ret_from_ code  */
+-	unsigned vector = ~regs->orig_rax;
++	unsigned vector = ~regs->orig_ax;
+ 	unsigned irq;
+ 
+ 	exit_idle();
+diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
+new file mode 100644
+index 0000000..7335430
+--- /dev/null
++++ b/arch/x86/kernel/kdebugfs.c
+@@ -0,0 +1,65 @@
++/*
++ * Architecture specific debugfs files
++ *
++ * Copyright (C) 2007, Intel Corp.
++ *	Huang Ying <ying.huang at intel.com>
++ *
++ * This file is released under the GPLv2.
++ */
++
++#include <linux/debugfs.h>
++#include <linux/stat.h>
++#include <linux/init.h>
++
++#include <asm/setup.h>
++
++#ifdef CONFIG_DEBUG_BOOT_PARAMS
++static struct debugfs_blob_wrapper boot_params_blob = {
++	.data = &boot_params,
++	.size = sizeof(boot_params),
++};
++
++static int __init boot_params_kdebugfs_init(void)
++{
++	int error;
++	struct dentry *dbp, *version, *data;
++
++	dbp = debugfs_create_dir("boot_params", NULL);
++	if (!dbp) {
++		error = -ENOMEM;
++		goto err_return;
++	}
++	version = debugfs_create_x16("version", S_IRUGO, dbp,
++				     &boot_params.hdr.version);
++	if (!version) {
++		error = -ENOMEM;
++		goto err_dir;
++	}
++	data = debugfs_create_blob("data", S_IRUGO, dbp,
++				   &boot_params_blob);
++	if (!data) {
++		error = -ENOMEM;
++		goto err_version;
++	}
++	return 0;
++err_version:
++	debugfs_remove(version);
++err_dir:
++	debugfs_remove(dbp);
++err_return:
++	return error;
++}
++#endif
++
++static int __init arch_kdebugfs_init(void)
++{
++	int error = 0;
++
++#ifdef CONFIG_DEBUG_BOOT_PARAMS
++	error = boot_params_kdebugfs_init();
++#endif
++
++	return error;
++}
++
++arch_initcall(arch_kdebugfs_init);
+diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
+new file mode 100644
+index 0000000..a99e764
+--- /dev/null
++++ b/arch/x86/kernel/kprobes.c
+@@ -0,0 +1,1066 @@
++/*
++ *  Kernel Probes (KProbes)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * Copyright (C) IBM Corporation, 2002, 2004
++ *
++ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna at in.ibm.com> Kernel
++ *		Probes initial implementation ( includes contributions from
++ *		Rusty Russell).
++ * 2004-July	Suparna Bhattacharya <suparna at in.ibm.com> added jumper probes
++ *		interface to access function arguments.
++ * 2004-Oct	Jim Keniston <jkenisto at us.ibm.com> and Prasanna S Panchamukhi
++ *		<prasanna at in.ibm.com> adapted for x86_64 from i386.
++ * 2005-Mar	Roland McGrath <roland at redhat.com>
++ *		Fixed to handle %rip-relative addressing mode correctly.
++ * 2005-May	Hien Nguyen <hien at us.ibm.com>, Jim Keniston
++ *		<jkenisto at us.ibm.com> and Prasanna S Panchamukhi
++ *		<prasanna at in.ibm.com> added function-return probes.
++ * 2005-May	Rusty Lynch <rusty.lynch at intel.com>
++ * 		Added function return probes functionality
++ * 2006-Feb	Masami Hiramatsu <hiramatu at sdl.hitachi.co.jp> added
++ * 		kprobe-booster and kretprobe-booster for i386.
++ * 2007-Dec	Masami Hiramatsu <mhiramat at redhat.com> added kprobe-booster
++ * 		and kretprobe-booster for x86-64
++ * 2007-Dec	Masami Hiramatsu <mhiramat at redhat.com>, Arjan van de Ven
++ * 		<arjan at infradead.org> and Jim Keniston <jkenisto at us.ibm.com>
++ * 		unified x86 kprobes code.
++ */
++
++#include <linux/kprobes.h>
++#include <linux/ptrace.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/hardirq.h>
++#include <linux/preempt.h>
++#include <linux/module.h>
++#include <linux/kdebug.h>
++
++#include <asm/cacheflush.h>
++#include <asm/desc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/alternative.h>
++
++void jprobe_return_end(void);
++
++DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
++DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
++
++#ifdef CONFIG_X86_64
++#define stack_addr(regs) ((unsigned long *)regs->sp)
++#else
++/*
++ * "&regs->sp" looks wrong, but it's correct for x86_32.  x86_32 CPUs
++ * don't save the ss and esp registers if the CPU is already in kernel
++ * mode when it traps.  So for kprobes, regs->sp and regs->ss are not
++ * the [nonexistent] saved stack pointer and ss register, but rather
++ * the top 8 bytes of the pre-int3 stack.  So &regs->sp happens to
++ * point to the top of the pre-int3 stack.
++ */
++#define stack_addr(regs) ((unsigned long *)&regs->sp)
++#endif
++
++#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
++	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
++	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
++	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
++	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
++	 << (row % 32))
++	/*
++	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
++	 * Groups, and some special opcodes can not boost.
++	 */
++static const u32 twobyte_is_boostable[256 / 32] = {
++	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
++	/*      ----------------------------------------------          */
++	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
++	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
++	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
++	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
++	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
++	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
++	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
++	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
++	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
++	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
++	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
++	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
++	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
++	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
++	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
++	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
++	/*      -----------------------------------------------         */
++	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
++};
++static const u32 onebyte_has_modrm[256 / 32] = {
++	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
++	/*      -----------------------------------------------         */
++	W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */
++	W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */
++	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */
++	W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */
++	W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
++	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
++	W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */
++	W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */
++	W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
++	W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */
++	W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */
++	W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */
++	W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */
++	W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
++	W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */
++	W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1)   /* f0 */
++	/*      -----------------------------------------------         */
++	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
++};
++static const u32 twobyte_has_modrm[256 / 32] = {
++	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
++	/*      -----------------------------------------------         */
++	W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */
++	W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */
++	W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */
++	W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */
++	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */
++	W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */
++	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */
++	W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */
++	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */
++	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */
++	W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */
++	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */
++	W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */
++	W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */
++	W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */
++	W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)   /* ff */
++	/*      -----------------------------------------------         */
++	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
++};
++#undef W
++
++struct kretprobe_blackpoint kretprobe_blacklist[] = {
++	{"__switch_to", }, /* This function switches only current task, but
++			      doesn't switch kernel stack.*/
++	{NULL, NULL}	/* Terminator */
++};
++const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
++
++/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
++static void __kprobes set_jmp_op(void *from, void *to)
++{
++	struct __arch_jmp_op {
++		char op;
++		s32 raddr;
++	} __attribute__((packed)) * jop;
++	jop = (struct __arch_jmp_op *)from;
++	jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
++	jop->op = RELATIVEJUMP_INSTRUCTION;
++}
++
++/*
++ * Check for the REX prefix which can only exist on X86_64
++ * X86_32 always returns 0
++ */
++static int __kprobes is_REX_prefix(kprobe_opcode_t *insn)
++{
++#ifdef CONFIG_X86_64
++	if ((*insn & 0xf0) == 0x40)
++		return 1;
++#endif
++	return 0;
++}
++
++/*
++ * Returns non-zero if opcode is boostable.
++ * RIP relative instructions are adjusted at copying time in 64 bits mode
++ */
++static int __kprobes can_boost(kprobe_opcode_t *opcodes)
++{
++	kprobe_opcode_t opcode;
++	kprobe_opcode_t *orig_opcodes = opcodes;
++
++retry:
++	if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
++		return 0;
++	opcode = *(opcodes++);
++
++	/* 2nd-byte opcode */
++	if (opcode == 0x0f) {
++		if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
++			return 0;
++		return test_bit(*opcodes,
++				(unsigned long *)twobyte_is_boostable);
++	}
++
++	switch (opcode & 0xf0) {
++#ifdef CONFIG_X86_64
++	case 0x40:
++		goto retry; /* REX prefix is boostable */
++#endif
++	case 0x60:
++		if (0x63 < opcode && opcode < 0x67)
++			goto retry; /* prefixes */
++		/* can't boost Address-size override and bound */
++		return (opcode != 0x62 && opcode != 0x67);
++	case 0x70:
++		return 0; /* can't boost conditional jump */
++	case 0xc0:
++		/* can't boost software-interruptions */
++		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
++	case 0xd0:
++		/* can boost AA* and XLAT */
++		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
++	case 0xe0:
++		/* can boost in/out and absolute jmps */
++		return ((opcode & 0x04) || opcode == 0xea);
++	case 0xf0:
++		if ((opcode & 0x0c) == 0 && opcode != 0xf1)
++			goto retry; /* lock/rep(ne) prefix */
++		/* clear and set flags are boostable */
++		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
++	default:
++		/* segment override prefixes are boostable */
++		if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
++			goto retry; /* prefixes */
++		/* CS override prefix and call are not boostable */
++		return (opcode != 0x2e && opcode != 0x9a);
++	}
++}
++
++/*
++ * Returns non-zero if opcode modifies the interrupt flag.
++ */
++static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
++{
++	switch (*insn) {
++	case 0xfa:		/* cli */
++	case 0xfb:		/* sti */
++	case 0xcf:		/* iret/iretd */
++	case 0x9d:		/* popf/popfd */
++		return 1;
++	}
++
++	/*
++	 * on X86_64, 0x40-0x4f are REX prefixes so we need to look
++	 * at the next byte instead.. but of course not recurse infinitely
++	 */
++	if (is_REX_prefix(insn))
++		return is_IF_modifier(++insn);
++
++	return 0;
++}
++
++/*
++ * Adjust the displacement if the instruction uses the %rip-relative
++ * addressing mode.
++ * If it does, Return the address of the 32-bit displacement word.
++ * If not, return null.
++ * Only applicable to 64-bit x86.
++ */
++static void __kprobes fix_riprel(struct kprobe *p)
++{
++#ifdef CONFIG_X86_64
++	u8 *insn = p->ainsn.insn;
++	s64 disp;
++	int need_modrm;
++
++	/* Skip legacy instruction prefixes.  */
++	while (1) {
++		switch (*insn) {
++		case 0x66:
++		case 0x67:
++		case 0x2e:
++		case 0x3e:
++		case 0x26:
++		case 0x64:
++		case 0x65:
++		case 0x36:
++		case 0xf0:
++		case 0xf3:
++		case 0xf2:
++			++insn;
++			continue;
++		}
++		break;
++	}
++
++	/* Skip REX instruction prefix.  */
++	if (is_REX_prefix(insn))
++		++insn;
++
++	if (*insn == 0x0f) {
++		/* Two-byte opcode.  */
++		++insn;
++		need_modrm = test_bit(*insn,
++				      (unsigned long *)twobyte_has_modrm);
++	} else
++		/* One-byte opcode.  */
++		need_modrm = test_bit(*insn,
++				      (unsigned long *)onebyte_has_modrm);
++
++	if (need_modrm) {
++		u8 modrm = *++insn;
++		if ((modrm & 0xc7) == 0x05) {
++			/* %rip+disp32 addressing mode */
++			/* Displacement follows ModRM byte.  */
++			++insn;
++			/*
++			 * The copied instruction uses the %rip-relative
++			 * addressing mode.  Adjust the displacement for the
++			 * difference between the original location of this
++			 * instruction and the location of the copy that will
++			 * actually be run.  The tricky bit here is making sure
++			 * that the sign extension happens correctly in this
++			 * calculation, since we need a signed 32-bit result to
++			 * be sign-extended to 64 bits when it's added to the
++			 * %rip value and yield the same 64-bit result that the
++			 * sign-extension of the original signed 32-bit
++			 * displacement would have given.
++			 */
++			disp = (u8 *) p->addr + *((s32 *) insn) -
++			       (u8 *) p->ainsn.insn;
++			BUG_ON((s64) (s32) disp != disp); /* Sanity check.  */
++			*(s32 *)insn = (s32) disp;
++		}
++	}
++#endif
++}
++
++static void __kprobes arch_copy_kprobe(struct kprobe *p)
++{
++	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
++
++	fix_riprel(p);
++
++	if (can_boost(p->addr))
++		p->ainsn.boostable = 0;
++	else
++		p->ainsn.boostable = -1;
++
++	p->opcode = *p->addr;
++}
++
++int __kprobes arch_prepare_kprobe(struct kprobe *p)
++{
++	/* insn: must be on special executable page on x86. */
++	p->ainsn.insn = get_insn_slot();
++	if (!p->ainsn.insn)
++		return -ENOMEM;
++	arch_copy_kprobe(p);
++	return 0;
++}
++
++void __kprobes arch_arm_kprobe(struct kprobe *p)
++{
++	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
++}
++
++void __kprobes arch_disarm_kprobe(struct kprobe *p)
++{
++	text_poke(p->addr, &p->opcode, 1);
++}
++
++void __kprobes arch_remove_kprobe(struct kprobe *p)
++{
++	mutex_lock(&kprobe_mutex);
++	free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
++	mutex_unlock(&kprobe_mutex);
++}
++
++static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
++{
++	kcb->prev_kprobe.kp = kprobe_running();
++	kcb->prev_kprobe.status = kcb->kprobe_status;
++	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
++	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
++}
++
++static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
++{
++	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
++	kcb->kprobe_status = kcb->prev_kprobe.status;
++	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
++	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
++}
++
++static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
++				struct kprobe_ctlblk *kcb)
++{
++	__get_cpu_var(current_kprobe) = p;
++	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
++		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
++	if (is_IF_modifier(p->ainsn.insn))
++		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
++}
++
++static void __kprobes clear_btf(void)
++{
++	if (test_thread_flag(TIF_DEBUGCTLMSR))
++		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
++}
++
++static void __kprobes restore_btf(void)
++{
++	if (test_thread_flag(TIF_DEBUGCTLMSR))
++		wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr);
++}
++
++static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
++{
++	clear_btf();
++	regs->flags |= X86_EFLAGS_TF;
++	regs->flags &= ~X86_EFLAGS_IF;
++	/* single step inline if the instruction is an int3 */
++	if (p->opcode == BREAKPOINT_INSTRUCTION)
++		regs->ip = (unsigned long)p->addr;
++	else
++		regs->ip = (unsigned long)p->ainsn.insn;
++}
++
++/* Called with kretprobe_lock held */
++void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
++				      struct pt_regs *regs)
++{
++	unsigned long *sara = stack_addr(regs);
++
++	ri->ret_addr = (kprobe_opcode_t *) *sara;
++
++	/* Replace the return addr with trampoline addr */
++	*sara = (unsigned long) &kretprobe_trampoline;
++}
++
++static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
++				       struct kprobe_ctlblk *kcb)
++{
++#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
++	if (p->ainsn.boostable == 1 && !p->post_handler) {
++		/* Boost up -- we can execute copied instructions directly */
++		reset_current_kprobe();
++		regs->ip = (unsigned long)p->ainsn.insn;
++		preempt_enable_no_resched();
++		return;
++	}
++#endif
++	prepare_singlestep(p, regs);
++	kcb->kprobe_status = KPROBE_HIT_SS;
++}
++
++/*
++ * We have reentered the kprobe_handler(), since another probe was hit while
++ * within the handler. We save the original kprobes variables and just single
++ * step on the instruction of the new probe without calling any user handlers.
++ */
++static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
++				    struct kprobe_ctlblk *kcb)
++{
++	switch (kcb->kprobe_status) {
++	case KPROBE_HIT_SSDONE:
++#ifdef CONFIG_X86_64
++		/* TODO: Provide re-entrancy from post_kprobes_handler() and
++		 * avoid exception stack corruption while single-stepping on
++		 * the instruction of the new probe.
++		 */
++		arch_disarm_kprobe(p);
++		regs->ip = (unsigned long)p->addr;
++		reset_current_kprobe();
++		preempt_enable_no_resched();
++		break;
++#endif
++	case KPROBE_HIT_ACTIVE:
++		save_previous_kprobe(kcb);
++		set_current_kprobe(p, regs, kcb);
++		kprobes_inc_nmissed_count(p);
++		prepare_singlestep(p, regs);
++		kcb->kprobe_status = KPROBE_REENTER;
++		break;
++	case KPROBE_HIT_SS:
++		if (p == kprobe_running()) {
++			regs->flags &= ~TF_MASK;
++			regs->flags |= kcb->kprobe_saved_flags;
++			return 0;
++		} else {
++			/* A probe has been hit in the codepath leading up
++			 * to, or just after, single-stepping of a probed
++			 * instruction. This entire codepath should strictly
++			 * reside in .kprobes.text section. Raise a warning
++			 * to highlight this peculiar case.
++			 */
++		}
++	default:
++		/* impossible cases */
++		WARN_ON(1);
++		return 0;
++	}
++
++	return 1;
++}
++
++/*
++ * Interrupts are disabled on entry as trap3 is an interrupt gate and they
++ * remain disabled thorough out this function.
++ */
++static int __kprobes kprobe_handler(struct pt_regs *regs)
++{
++	kprobe_opcode_t *addr;
++	struct kprobe *p;
++	struct kprobe_ctlblk *kcb;
++
++	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
++	if (*addr != BREAKPOINT_INSTRUCTION) {
++		/*
++		 * The breakpoint instruction was removed right
++		 * after we hit it.  Another cpu has removed
++		 * either a probepoint or a debugger breakpoint
++		 * at this address.  In either case, no further
++		 * handling of this interrupt is appropriate.
++		 * Back up over the (now missing) int3 and run
++		 * the original instruction.
++		 */
++		regs->ip = (unsigned long)addr;
++		return 1;
++	}
++
++	/*
++	 * We don't want to be preempted for the entire
++	 * duration of kprobe processing. We conditionally
++	 * re-enable preemption at the end of this function,
++	 * and also in reenter_kprobe() and setup_singlestep().
++	 */
++	preempt_disable();
++
++	kcb = get_kprobe_ctlblk();
++	p = get_kprobe(addr);
++
++	if (p) {
++		if (kprobe_running()) {
++			if (reenter_kprobe(p, regs, kcb))
++				return 1;
++		} else {
++			set_current_kprobe(p, regs, kcb);
++			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
++
++			/*
++			 * If we have no pre-handler or it returned 0, we
++			 * continue with normal processing.  If we have a
++			 * pre-handler and it returned non-zero, it prepped
++			 * for calling the break_handler below on re-entry
++			 * for jprobe processing, so get out doing nothing
++			 * more here.
++			 */
++			if (!p->pre_handler || !p->pre_handler(p, regs))
++				setup_singlestep(p, regs, kcb);
++			return 1;
++		}
++	} else if (kprobe_running()) {
++		p = __get_cpu_var(current_kprobe);
++		if (p->break_handler && p->break_handler(p, regs)) {
++			setup_singlestep(p, regs, kcb);
++			return 1;
++		}
++	} /* else: not a kprobe fault; let the kernel handle it */
++
++	preempt_enable_no_resched();
++	return 0;
++}
++
++/*
++ * When a retprobed function returns, this code saves registers and
++ * calls trampoline_handler() runs, which calls the kretprobe's handler.
++ */
++void __kprobes kretprobe_trampoline_holder(void)
++{
++	asm volatile (
++			".global kretprobe_trampoline\n"
++			"kretprobe_trampoline: \n"
++#ifdef CONFIG_X86_64
++			/* We don't bother saving the ss register */
++			"	pushq %rsp\n"
++			"	pushfq\n"
++			/*
++			 * Skip cs, ip, orig_ax.
++			 * trampoline_handler() will plug in these values
++			 */
++			"	subq $24, %rsp\n"
++			"	pushq %rdi\n"
++			"	pushq %rsi\n"
++			"	pushq %rdx\n"
++			"	pushq %rcx\n"
++			"	pushq %rax\n"
++			"	pushq %r8\n"
++			"	pushq %r9\n"
++			"	pushq %r10\n"
++			"	pushq %r11\n"
++			"	pushq %rbx\n"
++			"	pushq %rbp\n"
++			"	pushq %r12\n"
++			"	pushq %r13\n"
++			"	pushq %r14\n"
++			"	pushq %r15\n"
++			"	movq %rsp, %rdi\n"
++			"	call trampoline_handler\n"
++			/* Replace saved sp with true return address. */
++			"	movq %rax, 152(%rsp)\n"
++			"	popq %r15\n"
++			"	popq %r14\n"
++			"	popq %r13\n"
++			"	popq %r12\n"
++			"	popq %rbp\n"
++			"	popq %rbx\n"
++			"	popq %r11\n"
++			"	popq %r10\n"
++			"	popq %r9\n"
++			"	popq %r8\n"
++			"	popq %rax\n"
++			"	popq %rcx\n"
++			"	popq %rdx\n"
++			"	popq %rsi\n"
++			"	popq %rdi\n"
++			/* Skip orig_ax, ip, cs */
++			"	addq $24, %rsp\n"
++			"	popfq\n"
++#else
++			"	pushf\n"
++			/*
++			 * Skip cs, ip, orig_ax.
++			 * trampoline_handler() will plug in these values
++			 */
++			"	subl $12, %esp\n"
++			"	pushl %fs\n"
++			"	pushl %ds\n"
++			"	pushl %es\n"
++			"	pushl %eax\n"
++			"	pushl %ebp\n"
++			"	pushl %edi\n"
++			"	pushl %esi\n"
++			"	pushl %edx\n"
++			"	pushl %ecx\n"
++			"	pushl %ebx\n"
++			"	movl %esp, %eax\n"
++			"	call trampoline_handler\n"
++			/* Move flags to cs */
++			"	movl 52(%esp), %edx\n"
++			"	movl %edx, 48(%esp)\n"
++			/* Replace saved flags with true return address. */
++			"	movl %eax, 52(%esp)\n"
++			"	popl %ebx\n"
++			"	popl %ecx\n"
++			"	popl %edx\n"
++			"	popl %esi\n"
++			"	popl %edi\n"
++			"	popl %ebp\n"
++			"	popl %eax\n"
++			/* Skip ip, orig_ax, es, ds, fs */
++			"	addl $20, %esp\n"
++			"	popf\n"
++#endif
++			"	ret\n");
++}
++
++/*
++ * Called from kretprobe_trampoline
++ */
++void * __kprobes trampoline_handler(struct pt_regs *regs)
++{
++	struct kretprobe_instance *ri = NULL;
++	struct hlist_head *head, empty_rp;
++	struct hlist_node *node, *tmp;
++	unsigned long flags, orig_ret_address = 0;
++	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
++
++	INIT_HLIST_HEAD(&empty_rp);
++	spin_lock_irqsave(&kretprobe_lock, flags);
++	head = kretprobe_inst_table_head(current);
++	/* fixup registers */
++#ifdef CONFIG_X86_64
++	regs->cs = __KERNEL_CS;
++#else
++	regs->cs = __KERNEL_CS | get_kernel_rpl();
++#endif
++	regs->ip = trampoline_address;
++	regs->orig_ax = ~0UL;
++
++	/*
++	 * It is possible to have multiple instances associated with a given
++	 * task either because multiple functions in the call path have
++	 * return probes installed on them, and/or more then one
++	 * return probe was registered for a target function.
++	 *
++	 * We can handle this because:
++	 *     - instances are always pushed into the head of the list
++	 *     - when multiple return probes are registered for the same
++	 *	 function, the (chronologically) first instance's ret_addr
++	 *	 will be the real return address, and all the rest will
++	 *	 point to kretprobe_trampoline.
++	 */
++	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
++		if (ri->task != current)
++			/* another task is sharing our hash bucket */
++			continue;
++
++		if (ri->rp && ri->rp->handler) {
++			__get_cpu_var(current_kprobe) = &ri->rp->kp;
++			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
++			ri->rp->handler(ri, regs);
++			__get_cpu_var(current_kprobe) = NULL;
++		}
++
++		orig_ret_address = (unsigned long)ri->ret_addr;
++		recycle_rp_inst(ri, &empty_rp);
++
++		if (orig_ret_address != trampoline_address)
++			/*
++			 * This is the real return address. Any other
++			 * instances associated with this task are for
++			 * other calls deeper on the call stack
++			 */
++			break;
++	}
++
++	kretprobe_assert(ri, orig_ret_address, trampoline_address);
++
++	spin_unlock_irqrestore(&kretprobe_lock, flags);
++
++	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
++		hlist_del(&ri->hlist);
++		kfree(ri);
++	}
++	return (void *)orig_ret_address;
++}
++
++/*
++ * Called after single-stepping.  p->addr is the address of the
++ * instruction whose first byte has been replaced by the "int 3"
++ * instruction.  To avoid the SMP problems that can occur when we
++ * temporarily put back the original opcode to single-step, we
++ * single-stepped a copy of the instruction.  The address of this
++ * copy is p->ainsn.insn.
++ *
++ * This function prepares to return from the post-single-step
++ * interrupt.  We have to fix up the stack as follows:
++ *
++ * 0) Except in the case of absolute or indirect jump or call instructions,
++ * the new ip is relative to the copied instruction.  We need to make
++ * it relative to the original instruction.
++ *
++ * 1) If the single-stepped instruction was pushfl, then the TF and IF
++ * flags are set in the just-pushed flags, and may need to be cleared.
++ *
++ * 2) If the single-stepped instruction was a call, the return address
++ * that is atop the stack is the address following the copied instruction.
++ * We need to make it the address following the original instruction.
++ *
++ * If this is the first time we've single-stepped the instruction at
++ * this probepoint, and the instruction is boostable, boost it: add a
++ * jump instruction after the copied instruction, that jumps to the next
++ * instruction after the probepoint.
++ */
++static void __kprobes resume_execution(struct kprobe *p,
++		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
++{
++	unsigned long *tos = stack_addr(regs);
++	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++	unsigned long orig_ip = (unsigned long)p->addr;
++	kprobe_opcode_t *insn = p->ainsn.insn;
++
++	/*skip the REX prefix*/
++	if (is_REX_prefix(insn))
++		insn++;
++
++	regs->flags &= ~X86_EFLAGS_TF;
++	switch (*insn) {
++	case 0x9c:	/* pushfl */
++		*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
++		*tos |= kcb->kprobe_old_flags;
++		break;
++	case 0xc2:	/* iret/ret/lret */
++	case 0xc3:
++	case 0xca:
++	case 0xcb:
++	case 0xcf:
++	case 0xea:	/* jmp absolute -- ip is correct */
++		/* ip is already adjusted, no more changes required */
++		p->ainsn.boostable = 1;
++		goto no_change;
++	case 0xe8:	/* call relative - Fix return addr */
++		*tos = orig_ip + (*tos - copy_ip);
++		break;
++#ifdef CONFIG_X86_32
++	case 0x9a:	/* call absolute -- same as call absolute, indirect */
++		*tos = orig_ip + (*tos - copy_ip);
++		goto no_change;
++#endif
++	case 0xff:
++		if ((insn[1] & 0x30) == 0x10) {
++			/*
++			 * call absolute, indirect
++			 * Fix return addr; ip is correct.
++			 * But this is not boostable
++			 */
++			*tos = orig_ip + (*tos - copy_ip);
++			goto no_change;
++		} else if (((insn[1] & 0x31) == 0x20) ||
++			   ((insn[1] & 0x31) == 0x21)) {
++			/*
++			 * jmp near and far, absolute indirect
++			 * ip is correct. And this is boostable
++			 */
++			p->ainsn.boostable = 1;
++			goto no_change;
++		}
++	default:
++		break;
++	}
++
++	if (p->ainsn.boostable == 0) {
++		if ((regs->ip > copy_ip) &&
++		    (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
++			/*
++			 * These instructions can be executed directly if it
++			 * jumps back to correct address.
++			 */
++			set_jmp_op((void *)regs->ip,
++				   (void *)orig_ip + (regs->ip - copy_ip));
++			p->ainsn.boostable = 1;
++		} else {
++			p->ainsn.boostable = -1;
++		}
++	}
++
++	regs->ip += orig_ip - copy_ip;
++
++no_change:
++	restore_btf();
++}
++
++/*
++ * Interrupts are disabled on entry as trap1 is an interrupt gate and they
++ * remain disabled thoroughout this function.
++ */
++static int __kprobes post_kprobe_handler(struct pt_regs *regs)
++{
++	struct kprobe *cur = kprobe_running();
++	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++
++	if (!cur)
++		return 0;
++
++	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
++		kcb->kprobe_status = KPROBE_HIT_SSDONE;
++		cur->post_handler(cur, regs, 0);
++	}
++
++	resume_execution(cur, regs, kcb);
++	regs->flags |= kcb->kprobe_saved_flags;
++	trace_hardirqs_fixup_flags(regs->flags);
++
++	/* Restore back the original saved kprobes variables and continue. */
++	if (kcb->kprobe_status == KPROBE_REENTER) {
++		restore_previous_kprobe(kcb);
++		goto out;
++	}
++	reset_current_kprobe();
++out:
++	preempt_enable_no_resched();
++
++	/*
++	 * if somebody else is singlestepping across a probe point, flags
++	 * will have TF set, in which case, continue the remaining processing
++	 * of do_debug, as if this is not a probe hit.
++	 */
++	if (regs->flags & X86_EFLAGS_TF)
++		return 0;
++
++	return 1;
++}
++
++int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
++{
++	struct kprobe *cur = kprobe_running();
++	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++
++	switch (kcb->kprobe_status) {
++	case KPROBE_HIT_SS:
++	case KPROBE_REENTER:
++		/*
++		 * We are here because the instruction being single
++		 * stepped caused a page fault. We reset the current
++		 * kprobe and the ip points back to the probe address
++		 * and allow the page fault handler to continue as a
++		 * normal page fault.
++		 */
++		regs->ip = (unsigned long)cur->addr;
++		regs->flags |= kcb->kprobe_old_flags;
++		if (kcb->kprobe_status == KPROBE_REENTER)
++			restore_previous_kprobe(kcb);
++		else
++			reset_current_kprobe();
++		preempt_enable_no_resched();
++		break;
++	case KPROBE_HIT_ACTIVE:
++	case KPROBE_HIT_SSDONE:
++		/*
++		 * We increment the nmissed count for accounting,
++		 * we can also use npre/npostfault count for accounting
++		 * these specific fault cases.
++		 */
++		kprobes_inc_nmissed_count(cur);
++
++		/*
++		 * We come here because instructions in the pre/post
++		 * handler caused the page_fault, this could happen
++		 * if handler tries to access user space by
++		 * copy_from_user(), get_user() etc. Let the
++		 * user-specified handler try to fix it first.
++		 */
++		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
++			return 1;
++
++		/*
++		 * In case the user-specified fault handler returned
++		 * zero, try to fix up.
++		 */
++		if (fixup_exception(regs))
++			return 1;
++
++		/*
++		 * fixup routine could not handle it,
++		 * Let do_page_fault() fix it.
++		 */
++		break;
++	default:
++		break;
++	}
++	return 0;
++}
++
++/*
++ * Wrapper routine for handling exceptions.
++ */
++int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
++				       unsigned long val, void *data)
++{
++	struct die_args *args = data;
++	int ret = NOTIFY_DONE;
++
++	if (args->regs && user_mode_vm(args->regs))
++		return ret;
++
++	switch (val) {
++	case DIE_INT3:
++		if (kprobe_handler(args->regs))
++			ret = NOTIFY_STOP;
++		break;
++	case DIE_DEBUG:
++		if (post_kprobe_handler(args->regs))
++			ret = NOTIFY_STOP;
++		break;
++	case DIE_GPF:
++		/*
++		 * To be potentially processing a kprobe fault and to
++		 * trust the result from kprobe_running(), we have
++		 * be non-preemptible.
++		 */
++		if (!preemptible() && kprobe_running() &&
++		    kprobe_fault_handler(args->regs, args->trapnr))
++			ret = NOTIFY_STOP;
++		break;
++	default:
++		break;
++	}
++	return ret;
++}
++
++int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
++{
++	struct jprobe *jp = container_of(p, struct jprobe, kp);
++	unsigned long addr;
++	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++
++	kcb->jprobe_saved_regs = *regs;
++	kcb->jprobe_saved_sp = stack_addr(regs);
++	addr = (unsigned long)(kcb->jprobe_saved_sp);
++
++	/*
++	 * As Linus pointed out, gcc assumes that the callee
++	 * owns the argument space and could overwrite it, e.g.
++	 * tailcall optimization. So, to be absolutely safe
++	 * we also save and restore enough stack bytes to cover
++	 * the argument area.
++	 */
++	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
++	       MIN_STACK_SIZE(addr));
++	regs->flags &= ~X86_EFLAGS_IF;
++	trace_hardirqs_off();
++	regs->ip = (unsigned long)(jp->entry);
++	return 1;
++}
++
++void __kprobes jprobe_return(void)
++{
++	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++
++	asm volatile (
++#ifdef CONFIG_X86_64
++			"       xchg   %%rbx,%%rsp	\n"
++#else
++			"       xchgl   %%ebx,%%esp	\n"
++#endif
++			"       int3			\n"
++			"       .globl jprobe_return_end\n"
++			"       jprobe_return_end:	\n"
++			"       nop			\n"::"b"
++			(kcb->jprobe_saved_sp):"memory");
++}
++
++int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
++{
++	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++	u8 *addr = (u8 *) (regs->ip - 1);
++	struct jprobe *jp = container_of(p, struct jprobe, kp);
++
++	if ((addr > (u8 *) jprobe_return) &&
++	    (addr < (u8 *) jprobe_return_end)) {
++		if (stack_addr(regs) != kcb->jprobe_saved_sp) {
++			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
++			printk(KERN_ERR
++			       "current sp %p does not match saved sp %p\n",
++			       stack_addr(regs), kcb->jprobe_saved_sp);
++			printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
++			show_registers(saved_regs);
++			printk(KERN_ERR "Current registers\n");
++			show_registers(regs);
++			BUG();
++		}
++		*regs = kcb->jprobe_saved_regs;
++		memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
++		       kcb->jprobes_stack,
++		       MIN_STACK_SIZE(kcb->jprobe_saved_sp));
++		preempt_enable_no_resched();
++		return 1;
++	}
++	return 0;
++}
++
++int __init arch_init_kprobes(void)
++{
++	return 0;
++}
++
++int __kprobes arch_trampoline_kprobe(struct kprobe *p)
++{
++	return 0;
++}
+diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
+deleted file mode 100644
+index 3a020f7..0000000
+--- a/arch/x86/kernel/kprobes_32.c
++++ /dev/null
+@@ -1,756 +0,0 @@
+-/*
+- *  Kernel Probes (KProbes)
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- *
+- * Copyright (C) IBM Corporation, 2002, 2004
+- *
+- * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna at in.ibm.com> Kernel
+- *		Probes initial implementation ( includes contributions from
+- *		Rusty Russell).
+- * 2004-July	Suparna Bhattacharya <suparna at in.ibm.com> added jumper probes
+- *		interface to access function arguments.
+- * 2005-May	Hien Nguyen <hien at us.ibm.com>, Jim Keniston
+- *		<jkenisto at us.ibm.com> and Prasanna S Panchamukhi
+- *		<prasanna at in.ibm.com> added function-return probes.
+- */
+-
+-#include <linux/kprobes.h>
+-#include <linux/ptrace.h>
+-#include <linux/preempt.h>
+-#include <linux/kdebug.h>
+-#include <asm/cacheflush.h>
+-#include <asm/desc.h>
+-#include <asm/uaccess.h>
+-#include <asm/alternative.h>
+-
+-void jprobe_return_end(void);
+-
+-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+-
+-struct kretprobe_blackpoint kretprobe_blacklist[] = {
+-	{"__switch_to", }, /* This function switches only current task, but
+-			     doesn't switch kernel stack.*/
+-	{NULL, NULL}	/* Terminator */
+-};
+-const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
+-
+-/* insert a jmp code */
+-static __always_inline void set_jmp_op(void *from, void *to)
+-{
+-	struct __arch_jmp_op {
+-		char op;
+-		long raddr;
+-	} __attribute__((packed)) *jop;
+-	jop = (struct __arch_jmp_op *)from;
+-	jop->raddr = (long)(to) - ((long)(from) + 5);
+-	jop->op = RELATIVEJUMP_INSTRUCTION;
+-}
+-
+-/*
+- * returns non-zero if opcodes can be boosted.
+- */
+-static __always_inline int can_boost(kprobe_opcode_t *opcodes)
+-{
+-#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf)		      \
+-	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
+-	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
+-	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
+-	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
+-	 << (row % 32))
+-	/*
+-	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
+-	 * Groups, and some special opcodes can not be boost.
+-	 */
+-	static const unsigned long twobyte_is_boostable[256 / 32] = {
+-		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
+-		/*      -------------------------------         */
+-		W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
+-		W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
+-		W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
+-		W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
+-		W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
+-		W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
+-		W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
+-		W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
+-		W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
+-		W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
+-		W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
+-		W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
+-		W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
+-		W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
+-		W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
+-		W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0)  /* f0 */
+-		/*      -------------------------------         */
+-		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
+-	};
+-#undef W
+-	kprobe_opcode_t opcode;
+-	kprobe_opcode_t *orig_opcodes = opcodes;
+-retry:
+-	if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
+-		return 0;
+-	opcode = *(opcodes++);
+-
+-	/* 2nd-byte opcode */
+-	if (opcode == 0x0f) {
+-		if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
+-			return 0;
+-		return test_bit(*opcodes, twobyte_is_boostable);
+-	}
+-
+-	switch (opcode & 0xf0) {
+-	case 0x60:
+-		if (0x63 < opcode && opcode < 0x67)
+-			goto retry; /* prefixes */
+-		/* can't boost Address-size override and bound */
+-		return (opcode != 0x62 && opcode != 0x67);
+-	case 0x70:
+-		return 0; /* can't boost conditional jump */
+-	case 0xc0:
+-		/* can't boost software-interruptions */
+-		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
+-	case 0xd0:
+-		/* can boost AA* and XLAT */
+-		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
+-	case 0xe0:
+-		/* can boost in/out and absolute jmps */
+-		return ((opcode & 0x04) || opcode == 0xea);
+-	case 0xf0:
+-		if ((opcode & 0x0c) == 0 && opcode != 0xf1)
+-			goto retry; /* lock/rep(ne) prefix */
+-		/* clear and set flags can be boost */
+-		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+-	default:
+-		if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
+-			goto retry; /* prefixes */
+-		/* can't boost CS override and call */
+-		return (opcode != 0x2e && opcode != 0x9a);
+-	}
+-}
+-
+-/*
+- * returns non-zero if opcode modifies the interrupt flag.
+- */
+-static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
+-{
+-	switch (opcode) {
+-	case 0xfa:		/* cli */
+-	case 0xfb:		/* sti */
+-	case 0xcf:		/* iret/iretd */
+-	case 0x9d:		/* popf/popfd */
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-int __kprobes arch_prepare_kprobe(struct kprobe *p)
+-{
+-	/* insn: must be on special executable page on i386. */
+-	p->ainsn.insn = get_insn_slot();
+-	if (!p->ainsn.insn)
+-		return -ENOMEM;
+-
+-	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+-	p->opcode = *p->addr;
+-	if (can_boost(p->addr)) {
+-		p->ainsn.boostable = 0;
+-	} else {
+-		p->ainsn.boostable = -1;
+-	}
+-	return 0;
+-}
+-
+-void __kprobes arch_arm_kprobe(struct kprobe *p)
+-{
+-	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
+-}
+-
+-void __kprobes arch_disarm_kprobe(struct kprobe *p)
+-{
+-	text_poke(p->addr, &p->opcode, 1);
+-}
+-
+-void __kprobes arch_remove_kprobe(struct kprobe *p)
+-{
+-	mutex_lock(&kprobe_mutex);
+-	free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
+-	mutex_unlock(&kprobe_mutex);
+-}
+-
+-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+-{
+-	kcb->prev_kprobe.kp = kprobe_running();
+-	kcb->prev_kprobe.status = kcb->kprobe_status;
+-	kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
+-	kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
+-}
+-
+-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+-{
+-	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+-	kcb->kprobe_status = kcb->prev_kprobe.status;
+-	kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
+-	kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
+-}
+-
+-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+-				struct kprobe_ctlblk *kcb)
+-{
+-	__get_cpu_var(current_kprobe) = p;
+-	kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
+-		= (regs->eflags & (TF_MASK | IF_MASK));
+-	if (is_IF_modifier(p->opcode))
+-		kcb->kprobe_saved_eflags &= ~IF_MASK;
+-}
+-
+-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+-{
+-	regs->eflags |= TF_MASK;
+-	regs->eflags &= ~IF_MASK;
+-	/*single step inline if the instruction is an int3*/
+-	if (p->opcode == BREAKPOINT_INSTRUCTION)
+-		regs->eip = (unsigned long)p->addr;
+-	else
+-		regs->eip = (unsigned long)p->ainsn.insn;
+-}
+-
+-/* Called with kretprobe_lock held */
+-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+-				      struct pt_regs *regs)
+-{
+-	unsigned long *sara = (unsigned long *)&regs->esp;
+-
+-	ri->ret_addr = (kprobe_opcode_t *) *sara;
+-
+-	/* Replace the return addr with trampoline addr */
+-	*sara = (unsigned long) &kretprobe_trampoline;
+-}
+-
+-/*
+- * Interrupts are disabled on entry as trap3 is an interrupt gate and they
+- * remain disabled thorough out this function.
+- */
+-static int __kprobes kprobe_handler(struct pt_regs *regs)
+-{
+-	struct kprobe *p;
+-	int ret = 0;
+-	kprobe_opcode_t *addr;
+-	struct kprobe_ctlblk *kcb;
+-
+-	addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
+-
+-	/*
+-	 * We don't want to be preempted for the entire
+-	 * duration of kprobe processing
+-	 */
+-	preempt_disable();
+-	kcb = get_kprobe_ctlblk();
+-
+-	/* Check we're not actually recursing */
+-	if (kprobe_running()) {
+-		p = get_kprobe(addr);
+-		if (p) {
+-			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+-				*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+-				regs->eflags &= ~TF_MASK;
+-				regs->eflags |= kcb->kprobe_saved_eflags;
+-				goto no_kprobe;
+-			}
+-			/* We have reentered the kprobe_handler(), since
+-			 * another probe was hit while within the handler.
+-			 * We here save the original kprobes variables and
+-			 * just single step on the instruction of the new probe
+-			 * without calling any user handlers.
+-			 */
+-			save_previous_kprobe(kcb);
+-			set_current_kprobe(p, regs, kcb);
+-			kprobes_inc_nmissed_count(p);
+-			prepare_singlestep(p, regs);
+-			kcb->kprobe_status = KPROBE_REENTER;
+-			return 1;
+-		} else {
+-			if (*addr != BREAKPOINT_INSTRUCTION) {
+-			/* The breakpoint instruction was removed by
+-			 * another cpu right after we hit, no further
+-			 * handling of this interrupt is appropriate
+-			 */
+-				regs->eip -= sizeof(kprobe_opcode_t);
+-				ret = 1;
+-				goto no_kprobe;
+-			}
+-			p = __get_cpu_var(current_kprobe);
+-			if (p->break_handler && p->break_handler(p, regs)) {
+-				goto ss_probe;
+-			}
+-		}
+-		goto no_kprobe;
+-	}
+-
+-	p = get_kprobe(addr);
+-	if (!p) {
+-		if (*addr != BREAKPOINT_INSTRUCTION) {
+-			/*
+-			 * The breakpoint instruction was removed right
+-			 * after we hit it.  Another cpu has removed
+-			 * either a probepoint or a debugger breakpoint
+-			 * at this address.  In either case, no further
+-			 * handling of this interrupt is appropriate.
+-			 * Back up over the (now missing) int3 and run
+-			 * the original instruction.
+-			 */
+-			regs->eip -= sizeof(kprobe_opcode_t);
+-			ret = 1;
+-		}
+-		/* Not one of ours: let kernel handle it */
+-		goto no_kprobe;
+-	}
+-
+-	set_current_kprobe(p, regs, kcb);
+-	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+-
+-	if (p->pre_handler && p->pre_handler(p, regs))
+-		/* handler has already set things up, so skip ss setup */
+-		return 1;
+-
+-ss_probe:
+-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
+-	if (p->ainsn.boostable == 1 && !p->post_handler){
+-		/* Boost up -- we can execute copied instructions directly */
+-		reset_current_kprobe();
+-		regs->eip = (unsigned long)p->ainsn.insn;
+-		preempt_enable_no_resched();
+-		return 1;
+-	}
+-#endif
+-	prepare_singlestep(p, regs);
+-	kcb->kprobe_status = KPROBE_HIT_SS;
+-	return 1;
+-
+-no_kprobe:
+-	preempt_enable_no_resched();
+-	return ret;
+-}
+-
+-/*
+- * For function-return probes, init_kprobes() establishes a probepoint
+- * here. When a retprobed function returns, this probe is hit and
+- * trampoline_probe_handler() runs, calling the kretprobe's handler.
+- */
+- void __kprobes kretprobe_trampoline_holder(void)
+- {
+-	asm volatile ( ".global kretprobe_trampoline\n"
+-			"kretprobe_trampoline: \n"
+-			"	pushf\n"
+-			/* skip cs, eip, orig_eax */
+-			"	subl $12, %esp\n"
+-			"	pushl %fs\n"
+-			"	pushl %ds\n"
+-			"	pushl %es\n"
+-			"	pushl %eax\n"
+-			"	pushl %ebp\n"
+-			"	pushl %edi\n"
+-			"	pushl %esi\n"
+-			"	pushl %edx\n"
+-			"	pushl %ecx\n"
+-			"	pushl %ebx\n"
+-			"	movl %esp, %eax\n"
+-			"	call trampoline_handler\n"
+-			/* move eflags to cs */
+-			"	movl 52(%esp), %edx\n"
+-			"	movl %edx, 48(%esp)\n"
+-			/* save true return address on eflags */
+-			"	movl %eax, 52(%esp)\n"
+-			"	popl %ebx\n"
+-			"	popl %ecx\n"
+-			"	popl %edx\n"
+-			"	popl %esi\n"
+-			"	popl %edi\n"
+-			"	popl %ebp\n"
+-			"	popl %eax\n"
+-			/* skip eip, orig_eax, es, ds, fs */
+-			"	addl $20, %esp\n"
+-			"	popf\n"
+-			"	ret\n");
+-}
+-
+-/*
+- * Called from kretprobe_trampoline
+- */
+-fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
+-{
+-	struct kretprobe_instance *ri = NULL;
+-	struct hlist_head *head, empty_rp;
+-	struct hlist_node *node, *tmp;
+-	unsigned long flags, orig_ret_address = 0;
+-	unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+-
+-	INIT_HLIST_HEAD(&empty_rp);
+-	spin_lock_irqsave(&kretprobe_lock, flags);
+-	head = kretprobe_inst_table_head(current);
+-	/* fixup registers */
+-	regs->xcs = __KERNEL_CS | get_kernel_rpl();
+-	regs->eip = trampoline_address;
+-	regs->orig_eax = 0xffffffff;
+-
+-	/*
+-	 * It is possible to have multiple instances associated with a given
+-	 * task either because an multiple functions in the call path
+-	 * have a return probe installed on them, and/or more then one return
+-	 * return probe was registered for a target function.
+-	 *
+-	 * We can handle this because:
+-	 *     - instances are always inserted at the head of the list
+-	 *     - when multiple return probes are registered for the same
+-	 *       function, the first instance's ret_addr will point to the
+-	 *       real return address, and all the rest will point to
+-	 *       kretprobe_trampoline
+-	 */
+-	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+-		if (ri->task != current)
+-			/* another task is sharing our hash bucket */
+-			continue;
+-
+-		if (ri->rp && ri->rp->handler){
+-			__get_cpu_var(current_kprobe) = &ri->rp->kp;
+-			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+-			ri->rp->handler(ri, regs);
+-			__get_cpu_var(current_kprobe) = NULL;
+-		}
+-
+-		orig_ret_address = (unsigned long)ri->ret_addr;
+-		recycle_rp_inst(ri, &empty_rp);
+-
+-		if (orig_ret_address != trampoline_address)
+-			/*
+-			 * This is the real return address. Any other
+-			 * instances associated with this task are for
+-			 * other calls deeper on the call stack
+-			 */
+-			break;
+-	}
+-
+-	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+-	spin_unlock_irqrestore(&kretprobe_lock, flags);
+-
+-	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+-		hlist_del(&ri->hlist);
+-		kfree(ri);
+-	}
+-	return (void*)orig_ret_address;
+-}
+-
+-/*
+- * Called after single-stepping.  p->addr is the address of the
+- * instruction whose first byte has been replaced by the "int 3"
+- * instruction.  To avoid the SMP problems that can occur when we
+- * temporarily put back the original opcode to single-step, we
+- * single-stepped a copy of the instruction.  The address of this
+- * copy is p->ainsn.insn.
+- *
+- * This function prepares to return from the post-single-step
+- * interrupt.  We have to fix up the stack as follows:
+- *
+- * 0) Except in the case of absolute or indirect jump or call instructions,
+- * the new eip is relative to the copied instruction.  We need to make
+- * it relative to the original instruction.
+- *
+- * 1) If the single-stepped instruction was pushfl, then the TF and IF
+- * flags are set in the just-pushed eflags, and may need to be cleared.
+- *
+- * 2) If the single-stepped instruction was a call, the return address
+- * that is atop the stack is the address following the copied instruction.
+- * We need to make it the address following the original instruction.
+- *
+- * This function also checks instruction size for preparing direct execution.
+- */
+-static void __kprobes resume_execution(struct kprobe *p,
+-		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+-{
+-	unsigned long *tos = (unsigned long *)&regs->esp;
+-	unsigned long copy_eip = (unsigned long)p->ainsn.insn;
+-	unsigned long orig_eip = (unsigned long)p->addr;
+-
+-	regs->eflags &= ~TF_MASK;
+-	switch (p->ainsn.insn[0]) {
+-	case 0x9c:		/* pushfl */
+-		*tos &= ~(TF_MASK | IF_MASK);
+-		*tos |= kcb->kprobe_old_eflags;
+-		break;
+-	case 0xc2:		/* iret/ret/lret */
+-	case 0xc3:
+-	case 0xca:
+-	case 0xcb:
+-	case 0xcf:
+-	case 0xea:		/* jmp absolute -- eip is correct */
+-		/* eip is already adjusted, no more changes required */
+-		p->ainsn.boostable = 1;
+-		goto no_change;
+-	case 0xe8:		/* call relative - Fix return addr */
+-		*tos = orig_eip + (*tos - copy_eip);
+-		break;
+-	case 0x9a:		/* call absolute -- same as call absolute, indirect */
+-		*tos = orig_eip + (*tos - copy_eip);
+-		goto no_change;
+-	case 0xff:
+-		if ((p->ainsn.insn[1] & 0x30) == 0x10) {
+-			/*
+-			 * call absolute, indirect
+-			 * Fix return addr; eip is correct.
+-			 * But this is not boostable
+-			 */
+-			*tos = orig_eip + (*tos - copy_eip);
+-			goto no_change;
+-		} else if (((p->ainsn.insn[1] & 0x31) == 0x20) ||	/* jmp near, absolute indirect */
+-			   ((p->ainsn.insn[1] & 0x31) == 0x21)) {	/* jmp far, absolute indirect */
+-			/* eip is correct. And this is boostable */
+-			p->ainsn.boostable = 1;
+-			goto no_change;
+-		}
+-	default:
+-		break;
+-	}
+-
+-	if (p->ainsn.boostable == 0) {
+-		if ((regs->eip > copy_eip) &&
+-		    (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
+-			/*
+-			 * These instructions can be executed directly if it
+-			 * jumps back to correct address.
+-			 */
+-			set_jmp_op((void *)regs->eip,
+-				   (void *)orig_eip + (regs->eip - copy_eip));
+-			p->ainsn.boostable = 1;
+-		} else {
+-			p->ainsn.boostable = -1;
+-		}
+-	}
+-
+-	regs->eip = orig_eip + (regs->eip - copy_eip);
+-
+-no_change:
+-	return;
+-}
+-
+-/*
+- * Interrupts are disabled on entry as trap1 is an interrupt gate and they
+- * remain disabled thoroughout this function.
+- */
+-static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+-{
+-	struct kprobe *cur = kprobe_running();
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-
+-	if (!cur)
+-		return 0;
+-
+-	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+-		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+-		cur->post_handler(cur, regs, 0);
+-	}
+-
+-	resume_execution(cur, regs, kcb);
+-	regs->eflags |= kcb->kprobe_saved_eflags;
+-	trace_hardirqs_fixup_flags(regs->eflags);
+-
+-	/*Restore back the original saved kprobes variables and continue. */
+-	if (kcb->kprobe_status == KPROBE_REENTER) {
+-		restore_previous_kprobe(kcb);
+-		goto out;
+-	}
+-	reset_current_kprobe();
+-out:
+-	preempt_enable_no_resched();
+-
+-	/*
+-	 * if somebody else is singlestepping across a probe point, eflags
+-	 * will have TF set, in which case, continue the remaining processing
+-	 * of do_debug, as if this is not a probe hit.
+-	 */
+-	if (regs->eflags & TF_MASK)
+-		return 0;
+-
+-	return 1;
+-}
+-
+-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+-{
+-	struct kprobe *cur = kprobe_running();
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-
+-	switch(kcb->kprobe_status) {
+-	case KPROBE_HIT_SS:
+-	case KPROBE_REENTER:
+-		/*
+-		 * We are here because the instruction being single
+-		 * stepped caused a page fault. We reset the current
+-		 * kprobe and the eip points back to the probe address
+-		 * and allow the page fault handler to continue as a
+-		 * normal page fault.
+-		 */
+-		regs->eip = (unsigned long)cur->addr;
+-		regs->eflags |= kcb->kprobe_old_eflags;
+-		if (kcb->kprobe_status == KPROBE_REENTER)
+-			restore_previous_kprobe(kcb);
+-		else
+-			reset_current_kprobe();
+-		preempt_enable_no_resched();
+-		break;
+-	case KPROBE_HIT_ACTIVE:
+-	case KPROBE_HIT_SSDONE:
+-		/*
+-		 * We increment the nmissed count for accounting,
+-		 * we can also use npre/npostfault count for accouting
+-		 * these specific fault cases.
+-		 */
+-		kprobes_inc_nmissed_count(cur);
+-
+-		/*
+-		 * We come here because instructions in the pre/post
+-		 * handler caused the page_fault, this could happen
+-		 * if handler tries to access user space by
+-		 * copy_from_user(), get_user() etc. Let the
+-		 * user-specified handler try to fix it first.
+-		 */
+-		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+-			return 1;
+-
+-		/*
+-		 * In case the user-specified fault handler returned
+-		 * zero, try to fix up.
+-		 */
+-		if (fixup_exception(regs))
+-			return 1;
+-
+-		/*
+-		 * fixup_exception() could not handle it,
+-		 * Let do_page_fault() fix it.
+-		 */
+-		break;
+-	default:
+-		break;
+-	}
+-	return 0;
+-}
+-
+-/*
+- * Wrapper routine to for handling exceptions.
+- */
+-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+-				       unsigned long val, void *data)
+-{
+-	struct die_args *args = (struct die_args *)data;
+-	int ret = NOTIFY_DONE;
+-
+-	if (args->regs && user_mode_vm(args->regs))
+-		return ret;
+-
+-	switch (val) {
+-	case DIE_INT3:
+-		if (kprobe_handler(args->regs))
+-			ret = NOTIFY_STOP;
+-		break;
+-	case DIE_DEBUG:
+-		if (post_kprobe_handler(args->regs))
+-			ret = NOTIFY_STOP;
+-		break;
+-	case DIE_GPF:
+-		/* kprobe_running() needs smp_processor_id() */
+-		preempt_disable();
+-		if (kprobe_running() &&
+-		    kprobe_fault_handler(args->regs, args->trapnr))
+-			ret = NOTIFY_STOP;
+-		preempt_enable();
+-		break;
+-	default:
+-		break;
+-	}
+-	return ret;
+-}
+-
+-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+-{
+-	struct jprobe *jp = container_of(p, struct jprobe, kp);
+-	unsigned long addr;
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-
+-	kcb->jprobe_saved_regs = *regs;
+-	kcb->jprobe_saved_esp = &regs->esp;
+-	addr = (unsigned long)(kcb->jprobe_saved_esp);
+-
+-	/*
+-	 * TBD: As Linus pointed out, gcc assumes that the callee
+-	 * owns the argument space and could overwrite it, e.g.
+-	 * tailcall optimization. So, to be absolutely safe
+-	 * we also save and restore enough stack bytes to cover
+-	 * the argument area.
+-	 */
+-	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
+-			MIN_STACK_SIZE(addr));
+-	regs->eflags &= ~IF_MASK;
+-	trace_hardirqs_off();
+-	regs->eip = (unsigned long)(jp->entry);
+-	return 1;
+-}
+-
+-void __kprobes jprobe_return(void)
+-{
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-
+-	asm volatile ("       xchgl   %%ebx,%%esp     \n"
+-		      "       int3			\n"
+-		      "       .globl jprobe_return_end	\n"
+-		      "       jprobe_return_end:	\n"
+-		      "       nop			\n"::"b"
+-		      (kcb->jprobe_saved_esp):"memory");
+-}
+-
+-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+-{
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-	u8 *addr = (u8 *) (regs->eip - 1);
+-	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
+-	struct jprobe *jp = container_of(p, struct jprobe, kp);
+-
+-	if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
+-		if (&regs->esp != kcb->jprobe_saved_esp) {
+-			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
+-			printk("current esp %p does not match saved esp %p\n",
+-			       &regs->esp, kcb->jprobe_saved_esp);
+-			printk("Saved registers for jprobe %p\n", jp);
+-			show_registers(saved_regs);
+-			printk("Current registers\n");
+-			show_registers(regs);
+-			BUG();
+-		}
+-		*regs = kcb->jprobe_saved_regs;
+-		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+-		       MIN_STACK_SIZE(stack_addr));
+-		preempt_enable_no_resched();
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+-{
+-	return 0;
+-}
+-
+-int __init arch_init_kprobes(void)
+-{
+-	return 0;
+-}
+diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
+deleted file mode 100644
+index 5df19a9..0000000
+--- a/arch/x86/kernel/kprobes_64.c
++++ /dev/null
+@@ -1,749 +0,0 @@
+-/*
+- *  Kernel Probes (KProbes)
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- *
+- * Copyright (C) IBM Corporation, 2002, 2004
+- *
+- * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna at in.ibm.com> Kernel
+- *		Probes initial implementation ( includes contributions from
+- *		Rusty Russell).
+- * 2004-July	Suparna Bhattacharya <suparna at in.ibm.com> added jumper probes
+- *		interface to access function arguments.
+- * 2004-Oct	Jim Keniston <kenistoj at us.ibm.com> and Prasanna S Panchamukhi
+- *		<prasanna at in.ibm.com> adapted for x86_64
+- * 2005-Mar	Roland McGrath <roland at redhat.com>
+- *		Fixed to handle %rip-relative addressing mode correctly.
+- * 2005-May     Rusty Lynch <rusty.lynch at intel.com>
+- *              Added function return probes functionality
+- */
+-
+-#include <linux/kprobes.h>
+-#include <linux/ptrace.h>
+-#include <linux/string.h>
+-#include <linux/slab.h>
+-#include <linux/preempt.h>
+-#include <linux/module.h>
+-#include <linux/kdebug.h>
+-
+-#include <asm/pgtable.h>
+-#include <asm/uaccess.h>
+-#include <asm/alternative.h>
+-
+-void jprobe_return_end(void);
+-static void __kprobes arch_copy_kprobe(struct kprobe *p);
+-
+-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+-
+-struct kretprobe_blackpoint kretprobe_blacklist[] = {
+-	{"__switch_to", }, /* This function switches only current task, but
+-			      doesn't switch kernel stack.*/
+-	{NULL, NULL}	/* Terminator */
+-};
+-const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
+-
+-/*
+- * returns non-zero if opcode modifies the interrupt flag.
+- */
+-static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
+-{
+-	switch (*insn) {
+-	case 0xfa:		/* cli */
+-	case 0xfb:		/* sti */
+-	case 0xcf:		/* iret/iretd */
+-	case 0x9d:		/* popf/popfd */
+-		return 1;
+-	}
+-
+-	if (*insn  >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
+-		return 1;
+-	return 0;
+-}
+-
+-int __kprobes arch_prepare_kprobe(struct kprobe *p)
+-{
+-	/* insn: must be on special executable page on x86_64. */
+-	p->ainsn.insn = get_insn_slot();
+-	if (!p->ainsn.insn) {
+-		return -ENOMEM;
+-	}
+-	arch_copy_kprobe(p);
+-	return 0;
+-}
+-
+-/*
+- * Determine if the instruction uses the %rip-relative addressing mode.
+- * If it does, return the address of the 32-bit displacement word.
+- * If not, return null.
+- */
+-static s32 __kprobes *is_riprel(u8 *insn)
+-{
+-#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf)		      \
+-	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
+-	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
+-	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
+-	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
+-	 << (row % 64))
+-	static const u64 onebyte_has_modrm[256 / 64] = {
+-		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
+-		/*      -------------------------------         */
+-		W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */
+-		W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */
+-		W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */
+-		W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */
+-		W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */
+-		W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */
+-		W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */
+-		W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */
+-		W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */
+-		W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */
+-		W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */
+-		W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */
+-		W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */
+-		W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */
+-		W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */
+-		W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1)  /* f0 */
+-		/*      -------------------------------         */
+-		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
+-	};
+-	static const u64 twobyte_has_modrm[256 / 64] = {
+-		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
+-		/*      -------------------------------         */
+-		W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */
+-		W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */
+-		W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */
+-		W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */
+-		W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */
+-		W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */
+-		W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */
+-		W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */
+-		W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */
+-		W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */
+-		W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */
+-		W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */
+-		W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */
+-		W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */
+-		W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */
+-		W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0)  /* ff */
+-		/*      -------------------------------         */
+-		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
+-	};
+-#undef	W
+-	int need_modrm;
+-
+-	/* Skip legacy instruction prefixes.  */
+-	while (1) {
+-		switch (*insn) {
+-		case 0x66:
+-		case 0x67:
+-		case 0x2e:
+-		case 0x3e:
+-		case 0x26:
+-		case 0x64:
+-		case 0x65:
+-		case 0x36:
+-		case 0xf0:
+-		case 0xf3:
+-		case 0xf2:
+-			++insn;
+-			continue;
+-		}
+-		break;
+-	}
+-
+-	/* Skip REX instruction prefix.  */
+-	if ((*insn & 0xf0) == 0x40)
+-		++insn;
+-
+-	if (*insn == 0x0f) {	/* Two-byte opcode.  */
+-		++insn;
+-		need_modrm = test_bit(*insn, twobyte_has_modrm);
+-	} else {		/* One-byte opcode.  */
+-		need_modrm = test_bit(*insn, onebyte_has_modrm);
+-	}
+-
+-	if (need_modrm) {
+-		u8 modrm = *++insn;
+-		if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
+-			/* Displacement follows ModRM byte.  */
+-			return (s32 *) ++insn;
+-		}
+-	}
+-
+-	/* No %rip-relative addressing mode here.  */
+-	return NULL;
+-}
+-
+-static void __kprobes arch_copy_kprobe(struct kprobe *p)
+-{
+-	s32 *ripdisp;
+-	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
+-	ripdisp = is_riprel(p->ainsn.insn);
+-	if (ripdisp) {
+-		/*
+-		 * The copied instruction uses the %rip-relative
+-		 * addressing mode.  Adjust the displacement for the
+-		 * difference between the original location of this
+-		 * instruction and the location of the copy that will
+-		 * actually be run.  The tricky bit here is making sure
+-		 * that the sign extension happens correctly in this
+-		 * calculation, since we need a signed 32-bit result to
+-		 * be sign-extended to 64 bits when it's added to the
+-		 * %rip value and yield the same 64-bit result that the
+-		 * sign-extension of the original signed 32-bit
+-		 * displacement would have given.
+-		 */
+-		s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn;
+-		BUG_ON((s64) (s32) disp != disp); /* Sanity check.  */
+-		*ripdisp = disp;
+-	}
+-	p->opcode = *p->addr;
+-}
+-
+-void __kprobes arch_arm_kprobe(struct kprobe *p)
+-{
+-	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
+-}
+-
+-void __kprobes arch_disarm_kprobe(struct kprobe *p)
+-{
+-	text_poke(p->addr, &p->opcode, 1);
+-}
+-
+-void __kprobes arch_remove_kprobe(struct kprobe *p)
+-{
+-	mutex_lock(&kprobe_mutex);
+-	free_insn_slot(p->ainsn.insn, 0);
+-	mutex_unlock(&kprobe_mutex);
+-}
+-
+-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+-{
+-	kcb->prev_kprobe.kp = kprobe_running();
+-	kcb->prev_kprobe.status = kcb->kprobe_status;
+-	kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags;
+-	kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
+-}
+-
+-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+-{
+-	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+-	kcb->kprobe_status = kcb->prev_kprobe.status;
+-	kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags;
+-	kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
+-}
+-
+-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+-				struct kprobe_ctlblk *kcb)
+-{
+-	__get_cpu_var(current_kprobe) = p;
+-	kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags
+-		= (regs->eflags & (TF_MASK | IF_MASK));
+-	if (is_IF_modifier(p->ainsn.insn))
+-		kcb->kprobe_saved_rflags &= ~IF_MASK;
+-}
+-
+-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+-{
+-	regs->eflags |= TF_MASK;
+-	regs->eflags &= ~IF_MASK;
+-	/*single step inline if the instruction is an int3*/
+-	if (p->opcode == BREAKPOINT_INSTRUCTION)
+-		regs->rip = (unsigned long)p->addr;
+-	else
+-		regs->rip = (unsigned long)p->ainsn.insn;
+-}
+-
+-/* Called with kretprobe_lock held */
+-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+-				      struct pt_regs *regs)
+-{
+-	unsigned long *sara = (unsigned long *)regs->rsp;
+-
+-	ri->ret_addr = (kprobe_opcode_t *) *sara;
+-	/* Replace the return addr with trampoline addr */
+-	*sara = (unsigned long) &kretprobe_trampoline;
+-}
+-
+-int __kprobes kprobe_handler(struct pt_regs *regs)
+-{
+-	struct kprobe *p;
+-	int ret = 0;
+-	kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
+-	struct kprobe_ctlblk *kcb;
+-
+-	/*
+-	 * We don't want to be preempted for the entire
+-	 * duration of kprobe processing
+-	 */
+-	preempt_disable();
+-	kcb = get_kprobe_ctlblk();
+-
+-	/* Check we're not actually recursing */
+-	if (kprobe_running()) {
+-		p = get_kprobe(addr);
+-		if (p) {
+-			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+-				*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+-				regs->eflags &= ~TF_MASK;
+-				regs->eflags |= kcb->kprobe_saved_rflags;
+-				goto no_kprobe;
+-			} else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
+-				/* TODO: Provide re-entrancy from
+-				 * post_kprobes_handler() and avoid exception
+-				 * stack corruption while single-stepping on
+-				 * the instruction of the new probe.
+-				 */
+-				arch_disarm_kprobe(p);
+-				regs->rip = (unsigned long)p->addr;
+-				reset_current_kprobe();
+-				ret = 1;
+-			} else {
+-				/* We have reentered the kprobe_handler(), since
+-				 * another probe was hit while within the
+-				 * handler. We here save the original kprobe
+-				 * variables and just single step on instruction
+-				 * of the new probe without calling any user
+-				 * handlers.
+-				 */
+-				save_previous_kprobe(kcb);
+-				set_current_kprobe(p, regs, kcb);
+-				kprobes_inc_nmissed_count(p);
+-				prepare_singlestep(p, regs);
+-				kcb->kprobe_status = KPROBE_REENTER;
+-				return 1;
+-			}
+-		} else {
+-			if (*addr != BREAKPOINT_INSTRUCTION) {
+-			/* The breakpoint instruction was removed by
+-			 * another cpu right after we hit, no further
+-			 * handling of this interrupt is appropriate
+-			 */
+-				regs->rip = (unsigned long)addr;
+-				ret = 1;
+-				goto no_kprobe;
+-			}
+-			p = __get_cpu_var(current_kprobe);
+-			if (p->break_handler && p->break_handler(p, regs)) {
+-				goto ss_probe;
+-			}
+-		}
+-		goto no_kprobe;
+-	}
+-
+-	p = get_kprobe(addr);
+-	if (!p) {
+-		if (*addr != BREAKPOINT_INSTRUCTION) {
+-			/*
+-			 * The breakpoint instruction was removed right
+-			 * after we hit it.  Another cpu has removed
+-			 * either a probepoint or a debugger breakpoint
+-			 * at this address.  In either case, no further
+-			 * handling of this interrupt is appropriate.
+-			 * Back up over the (now missing) int3 and run
+-			 * the original instruction.
+-			 */
+-			regs->rip = (unsigned long)addr;
+-			ret = 1;
+-		}
+-		/* Not one of ours: let kernel handle it */
+-		goto no_kprobe;
+-	}
+-
+-	set_current_kprobe(p, regs, kcb);
+-	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+-
+-	if (p->pre_handler && p->pre_handler(p, regs))
+-		/* handler has already set things up, so skip ss setup */
+-		return 1;
+-
+-ss_probe:
+-	prepare_singlestep(p, regs);
+-	kcb->kprobe_status = KPROBE_HIT_SS;
+-	return 1;
+-
+-no_kprobe:
+-	preempt_enable_no_resched();
+-	return ret;
+-}
+-
+-/*
+- * For function-return probes, init_kprobes() establishes a probepoint
+- * here. When a retprobed function returns, this probe is hit and
+- * trampoline_probe_handler() runs, calling the kretprobe's handler.
+- */
+- void kretprobe_trampoline_holder(void)
+- {
+- 	asm volatile (  ".global kretprobe_trampoline\n"
+- 			"kretprobe_trampoline: \n"
+- 			"nop\n");
+- }
+-
+-/*
+- * Called when we hit the probe point at kretprobe_trampoline
+- */
+-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+-{
+-	struct kretprobe_instance *ri = NULL;
+-	struct hlist_head *head, empty_rp;
+-	struct hlist_node *node, *tmp;
+-	unsigned long flags, orig_ret_address = 0;
+-	unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+-
+-	INIT_HLIST_HEAD(&empty_rp);
+-	spin_lock_irqsave(&kretprobe_lock, flags);
+-	head = kretprobe_inst_table_head(current);
+-
+-	/*
+-	 * It is possible to have multiple instances associated with a given
+-	 * task either because an multiple functions in the call path
+-	 * have a return probe installed on them, and/or more then one return
+-	 * return probe was registered for a target function.
+-	 *
+-	 * We can handle this because:
+-	 *     - instances are always inserted at the head of the list
+-	 *     - when multiple return probes are registered for the same
+-	 *       function, the first instance's ret_addr will point to the
+-	 *       real return address, and all the rest will point to
+-	 *       kretprobe_trampoline
+-	 */
+-	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+-		if (ri->task != current)
+-			/* another task is sharing our hash bucket */
+-			continue;
+-
+-		if (ri->rp && ri->rp->handler)
+-			ri->rp->handler(ri, regs);
+-
+-		orig_ret_address = (unsigned long)ri->ret_addr;
+-		recycle_rp_inst(ri, &empty_rp);
+-
+-		if (orig_ret_address != trampoline_address)
+-			/*
+-			 * This is the real return address. Any other
+-			 * instances associated with this task are for
+-			 * other calls deeper on the call stack
+-			 */
+-			break;
+-	}
+-
+-	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+-	regs->rip = orig_ret_address;
+-
+-	reset_current_kprobe();
+-	spin_unlock_irqrestore(&kretprobe_lock, flags);
+-	preempt_enable_no_resched();
+-
+-	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+-		hlist_del(&ri->hlist);
+-		kfree(ri);
+-	}
+-	/*
+-	 * By returning a non-zero value, we are telling
+-	 * kprobe_handler() that we don't want the post_handler
+-	 * to run (and have re-enabled preemption)
+-	 */
+-	return 1;
+-}
+-
+-/*
+- * Called after single-stepping.  p->addr is the address of the
+- * instruction whose first byte has been replaced by the "int 3"
+- * instruction.  To avoid the SMP problems that can occur when we
+- * temporarily put back the original opcode to single-step, we
+- * single-stepped a copy of the instruction.  The address of this
+- * copy is p->ainsn.insn.
+- *
+- * This function prepares to return from the post-single-step
+- * interrupt.  We have to fix up the stack as follows:
+- *
+- * 0) Except in the case of absolute or indirect jump or call instructions,
+- * the new rip is relative to the copied instruction.  We need to make
+- * it relative to the original instruction.
+- *
+- * 1) If the single-stepped instruction was pushfl, then the TF and IF
+- * flags are set in the just-pushed eflags, and may need to be cleared.
+- *
+- * 2) If the single-stepped instruction was a call, the return address
+- * that is atop the stack is the address following the copied instruction.
+- * We need to make it the address following the original instruction.
+- */
+-static void __kprobes resume_execution(struct kprobe *p,
+-		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+-{
+-	unsigned long *tos = (unsigned long *)regs->rsp;
+-	unsigned long copy_rip = (unsigned long)p->ainsn.insn;
+-	unsigned long orig_rip = (unsigned long)p->addr;
+-	kprobe_opcode_t *insn = p->ainsn.insn;
+-
+-	/*skip the REX prefix*/
+-	if (*insn >= 0x40 && *insn <= 0x4f)
+-		insn++;
+-
+-	regs->eflags &= ~TF_MASK;
+-	switch (*insn) {
+-	case 0x9c:	/* pushfl */
+-		*tos &= ~(TF_MASK | IF_MASK);
+-		*tos |= kcb->kprobe_old_rflags;
+-		break;
+-	case 0xc2:	/* iret/ret/lret */
+-	case 0xc3:
+-	case 0xca:
+-	case 0xcb:
+-	case 0xcf:
+-	case 0xea:	/* jmp absolute -- ip is correct */
+-		/* ip is already adjusted, no more changes required */
+-		goto no_change;
+-	case 0xe8:	/* call relative - Fix return addr */
+-		*tos = orig_rip + (*tos - copy_rip);
+-		break;
+-	case 0xff:
+-		if ((insn[1] & 0x30) == 0x10) {
+-			/* call absolute, indirect */
+-			/* Fix return addr; ip is correct. */
+-			*tos = orig_rip + (*tos - copy_rip);
+-			goto no_change;
+-		} else if (((insn[1] & 0x31) == 0x20) ||	/* jmp near, absolute indirect */
+-			   ((insn[1] & 0x31) == 0x21)) {	/* jmp far, absolute indirect */
+-			/* ip is correct. */
+-			goto no_change;
+-		}
+-	default:
+-		break;
+-	}
+-
+-	regs->rip = orig_rip + (regs->rip - copy_rip);
+-no_change:
+-
+-	return;
+-}
+-
+-int __kprobes post_kprobe_handler(struct pt_regs *regs)
+-{
+-	struct kprobe *cur = kprobe_running();
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-
+-	if (!cur)
+-		return 0;
+-
+-	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+-		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+-		cur->post_handler(cur, regs, 0);
+-	}
+-
+-	resume_execution(cur, regs, kcb);
+-	regs->eflags |= kcb->kprobe_saved_rflags;
+-	trace_hardirqs_fixup_flags(regs->eflags);
+-
+-	/* Restore the original saved kprobes variables and continue. */
+-	if (kcb->kprobe_status == KPROBE_REENTER) {
+-		restore_previous_kprobe(kcb);
+-		goto out;
+-	}
+-	reset_current_kprobe();
+-out:
+-	preempt_enable_no_resched();
+-
+-	/*
+-	 * if somebody else is singlestepping across a probe point, eflags
+-	 * will have TF set, in which case, continue the remaining processing
+-	 * of do_debug, as if this is not a probe hit.
+-	 */
+-	if (regs->eflags & TF_MASK)
+-		return 0;
+-
+-	return 1;
+-}
+-
+-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+-{
+-	struct kprobe *cur = kprobe_running();
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-	const struct exception_table_entry *fixup;
+-
+-	switch(kcb->kprobe_status) {
+-	case KPROBE_HIT_SS:
+-	case KPROBE_REENTER:
+-		/*
+-		 * We are here because the instruction being single
+-		 * stepped caused a page fault. We reset the current
+-		 * kprobe and the rip points back to the probe address
+-		 * and allow the page fault handler to continue as a
+-		 * normal page fault.
+-		 */
+-		regs->rip = (unsigned long)cur->addr;
+-		regs->eflags |= kcb->kprobe_old_rflags;
+-		if (kcb->kprobe_status == KPROBE_REENTER)
+-			restore_previous_kprobe(kcb);
+-		else
+-			reset_current_kprobe();
+-		preempt_enable_no_resched();
+-		break;
+-	case KPROBE_HIT_ACTIVE:
+-	case KPROBE_HIT_SSDONE:
+-		/*
+-		 * We increment the nmissed count for accounting,
+-		 * we can also use npre/npostfault count for accouting
+-		 * these specific fault cases.
+-		 */
+-		kprobes_inc_nmissed_count(cur);
+-
+-		/*
+-		 * We come here because instructions in the pre/post
+-		 * handler caused the page_fault, this could happen
+-		 * if handler tries to access user space by
+-		 * copy_from_user(), get_user() etc. Let the
+-		 * user-specified handler try to fix it first.
+-		 */
+-		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+-			return 1;
+-
+-		/*
+-		 * In case the user-specified fault handler returned
+-		 * zero, try to fix up.
+-		 */
+-		fixup = search_exception_tables(regs->rip);
+-		if (fixup) {
+-			regs->rip = fixup->fixup;
+-			return 1;
+-		}
+-
+-		/*
+-		 * fixup() could not handle it,
+-		 * Let do_page_fault() fix it.
+-		 */
+-		break;
+-	default:
+-		break;
+-	}
+-	return 0;
+-}
+-
+-/*
+- * Wrapper routine for handling exceptions.
+- */
+-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+-				       unsigned long val, void *data)
+-{
+-	struct die_args *args = (struct die_args *)data;
+-	int ret = NOTIFY_DONE;
+-
+-	if (args->regs && user_mode(args->regs))
+-		return ret;
+-
+-	switch (val) {
+-	case DIE_INT3:
+-		if (kprobe_handler(args->regs))
+-			ret = NOTIFY_STOP;
+-		break;
+-	case DIE_DEBUG:
+-		if (post_kprobe_handler(args->regs))
+-			ret = NOTIFY_STOP;
+-		break;
+-	case DIE_GPF:
+-		/* kprobe_running() needs smp_processor_id() */
+-		preempt_disable();
+-		if (kprobe_running() &&
+-		    kprobe_fault_handler(args->regs, args->trapnr))
+-			ret = NOTIFY_STOP;
+-		preempt_enable();
+-		break;
+-	default:
+-		break;
+-	}
+-	return ret;
+-}
+-
+-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+-{
+-	struct jprobe *jp = container_of(p, struct jprobe, kp);
+-	unsigned long addr;
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-
+-	kcb->jprobe_saved_regs = *regs;
+-	kcb->jprobe_saved_rsp = (long *) regs->rsp;
+-	addr = (unsigned long)(kcb->jprobe_saved_rsp);
+-	/*
+-	 * As Linus pointed out, gcc assumes that the callee
+-	 * owns the argument space and could overwrite it, e.g.
+-	 * tailcall optimization. So, to be absolutely safe
+-	 * we also save and restore enough stack bytes to cover
+-	 * the argument area.
+-	 */
+-	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
+-			MIN_STACK_SIZE(addr));
+-	regs->eflags &= ~IF_MASK;
+-	trace_hardirqs_off();
+-	regs->rip = (unsigned long)(jp->entry);
+-	return 1;
+-}
+-
+-void __kprobes jprobe_return(void)
+-{
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-
+-	asm volatile ("       xchg   %%rbx,%%rsp     \n"
+-		      "       int3			\n"
+-		      "       .globl jprobe_return_end	\n"
+-		      "       jprobe_return_end:	\n"
+-		      "       nop			\n"::"b"
+-		      (kcb->jprobe_saved_rsp):"memory");
+-}
+-
+-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+-{
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+-	u8 *addr = (u8 *) (regs->rip - 1);
+-	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp);
+-	struct jprobe *jp = container_of(p, struct jprobe, kp);
+-
+-	if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
+-		if ((unsigned long *)regs->rsp != kcb->jprobe_saved_rsp) {
+-			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
+-			printk("current rsp %p does not match saved rsp %p\n",
+-			       (long *)regs->rsp, kcb->jprobe_saved_rsp);
+-			printk("Saved registers for jprobe %p\n", jp);
+-			show_registers(saved_regs);
+-			printk("Current registers\n");
+-			show_registers(regs);
+-			BUG();
+-		}
+-		*regs = kcb->jprobe_saved_regs;
+-		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+-		       MIN_STACK_SIZE(stack_addr));
+-		preempt_enable_no_resched();
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-static struct kprobe trampoline_p = {
+-	.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+-	.pre_handler = trampoline_probe_handler
+-};
+-
+-int __init arch_init_kprobes(void)
+-{
+-	return register_kprobe(&trampoline_p);
+-}
+-
+-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+-{
+-	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
+-		return 1;
+-
+-	return 0;
+-}
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
 new file mode 100644
-index 0000000..bccb76d
+index 0000000..8a7660c
 --- /dev/null
-+++ b/arch/x86/crypto/salsa20_glue.c
-@@ -0,0 +1,129 @@
++++ b/arch/x86/kernel/ldt.c
+@@ -0,0 +1,260 @@
 +/*
-+ * Glue code for optimized assembly version of  Salsa20.
-+ *
-+ * Copyright (c) 2007 Tan Swee Heng <thesweeheng at gmail.com>
-+ *
-+ * The assembly codes are public domain assembly codes written by Daniel. J.
-+ * Bernstein <djb at cr.yp.to>. The codes are modified to include indentation
-+ * and to remove extraneous comments and functions that are not needed.
-+ * - i586 version, renamed as salsa20-i586-asm_32.S
-+ *   available from <http://cr.yp.to/snuffle/salsa20/x86-pm/salsa20.s>
-+ * - x86-64 version, renamed as salsa20-x86_64-asm_64.S
-+ *   available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s>
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
-+ * any later version.
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ * Copyright (C) 2002 Andi Kleen
 + *
++ * This handles calls from both 32bit and 64bit mode.
 + */
 +
-+#include <crypto/algapi.h>
-+#include <linux/module.h>
-+#include <linux/crypto.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/vmalloc.h>
 +
-+#define SALSA20_IV_SIZE        8U
-+#define SALSA20_MIN_KEY_SIZE  16U
-+#define SALSA20_MAX_KEY_SIZE  32U
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
 +
-+// use the ECRYPT_* function names
-+#define salsa20_keysetup        ECRYPT_keysetup
-+#define salsa20_ivsetup         ECRYPT_ivsetup
-+#define salsa20_encrypt_bytes   ECRYPT_encrypt_bytes
++#ifdef CONFIG_SMP
++static void flush_ldt(void *null)
++{
++	if (current->active_mm)
++		load_LDT(&current->active_mm->context);
++}
++#endif
 +
-+struct salsa20_ctx
++static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 +{
-+	u32 input[16];
-+};
++	void *oldldt, *newldt;
++	int oldsize;
 +
-+asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k,
-+				 u32 keysize, u32 ivsize);
-+asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv);
-+asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx,
-+				      const u8 *src, u8 *dst, u32 bytes);
++	if (mincount <= pc->size)
++		return 0;
++	oldsize = pc->size;
++	mincount = (mincount + 511) & (~511);
++	if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
++		newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
++	else
++		newldt = (void *)__get_free_page(GFP_KERNEL);
 +
-+static int setkey(struct crypto_tfm *tfm, const u8 *key,
-+		  unsigned int keysize)
-+{
-+	struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
-+	salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8);
++	if (!newldt)
++		return -ENOMEM;
++
++	if (oldsize)
++		memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
++	oldldt = pc->ldt;
++	memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
++	       (mincount - oldsize) * LDT_ENTRY_SIZE);
++
++#ifdef CONFIG_X86_64
++	/* CHECKME: Do we really need this ? */
++	wmb();
++#endif
++	pc->ldt = newldt;
++	wmb();
++	pc->size = mincount;
++	wmb();
++
++	if (reload) {
++#ifdef CONFIG_SMP
++		cpumask_t mask;
++
++		preempt_disable();
++		load_LDT(pc);
++		mask = cpumask_of_cpu(smp_processor_id());
++		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++			smp_call_function(flush_ldt, NULL, 1, 1);
++		preempt_enable();
++#else
++		load_LDT(pc);
++#endif
++	}
++	if (oldsize) {
++		if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(oldldt);
++		else
++			put_page(virt_to_page(oldldt));
++	}
 +	return 0;
 +}
 +
-+static int encrypt(struct blkcipher_desc *desc,
-+		   struct scatterlist *dst, struct scatterlist *src,
-+		   unsigned int nbytes)
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
 +{
-+	struct blkcipher_walk walk;
-+	struct crypto_blkcipher *tfm = desc->tfm;
-+	struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
-+	int err;
++	int err = alloc_ldt(new, old->size, 0);
 +
-+	blkcipher_walk_init(&walk, dst, src, nbytes);
-+	err = blkcipher_walk_virt_block(desc, &walk, 64);
++	if (err < 0)
++		return err;
++	memcpy(new->ldt, old->ldt, old->size * LDT_ENTRY_SIZE);
++	return 0;
++}
 +
-+	salsa20_ivsetup(ctx, walk.iv);
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++	struct mm_struct *old_mm;
++	int retval = 0;
 +
-+	if (likely(walk.nbytes == nbytes))
-+	{
-+		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
-+				      walk.dst.virt.addr, nbytes);
-+		return blkcipher_walk_done(desc, &walk, 0);
++	mutex_init(&mm->context.lock);
++	mm->context.size = 0;
++	old_mm = current->mm;
++	if (old_mm && old_mm->context.size > 0) {
++		mutex_lock(&old_mm->context.lock);
++		retval = copy_ldt(&mm->context, &old_mm->context);
++		mutex_unlock(&old_mm->context.lock);
 +	}
++	return retval;
++}
 +
-+	while (walk.nbytes >= 64) {
-+		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
-+				      walk.dst.virt.addr,
-+				      walk.nbytes - (walk.nbytes % 64));
-+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
++/*
++ * No need to lock the MM as we are the last user
++ *
++ * 64bit: Don't touch the LDT register - we're already in the next thread.
++ */
++void destroy_context(struct mm_struct *mm)
++{
++	if (mm->context.size) {
++#ifdef CONFIG_X86_32
++		/* CHECKME: Can this ever happen ? */
++		if (mm == current->active_mm)
++			clear_LDT();
++#endif
++		if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(mm->context.ldt);
++		else
++			put_page(virt_to_page(mm->context.ldt));
++		mm->context.size = 0;
 +	}
++}
 +
-+	if (walk.nbytes) {
-+		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
-+				      walk.dst.virt.addr, walk.nbytes);
-+		err = blkcipher_walk_done(desc, &walk, 0);
-+	}
++static int read_ldt(void __user *ptr, unsigned long bytecount)
++{
++	int err;
++	unsigned long size;
++	struct mm_struct *mm = current->mm;
 +
-+	return err;
-+}
++	if (!mm->context.size)
++		return 0;
++	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
++		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
 +
-+static struct crypto_alg alg = {
-+	.cra_name           =   "salsa20",
-+	.cra_driver_name    =   "salsa20-asm",
-+	.cra_priority       =   200,
-+	.cra_flags          =   CRYPTO_ALG_TYPE_BLKCIPHER,
-+	.cra_type           =   &crypto_blkcipher_type,
-+	.cra_blocksize      =   1,
-+	.cra_ctxsize        =   sizeof(struct salsa20_ctx),
-+	.cra_alignmask      =	3,
-+	.cra_module         =   THIS_MODULE,
-+	.cra_list           =   LIST_HEAD_INIT(alg.cra_list),
-+	.cra_u              =   {
-+		.blkcipher = {
-+			.setkey         =   setkey,
-+			.encrypt        =   encrypt,
-+			.decrypt        =   encrypt,
-+			.min_keysize    =   SALSA20_MIN_KEY_SIZE,
-+			.max_keysize    =   SALSA20_MAX_KEY_SIZE,
-+			.ivsize         =   SALSA20_IV_SIZE,
++	mutex_lock(&mm->context.lock);
++	size = mm->context.size * LDT_ENTRY_SIZE;
++	if (size > bytecount)
++		size = bytecount;
++
++	err = 0;
++	if (copy_to_user(ptr, mm->context.ldt, size))
++		err = -EFAULT;
++	mutex_unlock(&mm->context.lock);
++	if (err < 0)
++		goto error_return;
++	if (size != bytecount) {
++		/* zero-fill the rest */
++		if (clear_user(ptr + size, bytecount - size) != 0) {
++			err = -EFAULT;
++			goto error_return;
 +		}
 +	}
-+};
++	return bytecount;
++error_return:
++	return err;
++}
 +
-+static int __init init(void)
++static int read_default_ldt(void __user *ptr, unsigned long bytecount)
 +{
-+	return crypto_register_alg(&alg);
++	/* CHECKME: Can we use _one_ random number ? */
++#ifdef CONFIG_X86_32
++	unsigned long size = 5 * sizeof(struct desc_struct);
++#else
++	unsigned long size = 128;
++#endif
++	if (bytecount > size)
++		bytecount = size;
++	if (clear_user(ptr, bytecount))
++		return -EFAULT;
++	return bytecount;
 +}
 +
-+static void __exit fini(void)
++static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 +{
-+	crypto_unregister_alg(&alg);
++	struct mm_struct *mm = current->mm;
++	struct desc_struct ldt;
++	int error;
++	struct user_desc ldt_info;
++
++	error = -EINVAL;
++	if (bytecount != sizeof(ldt_info))
++		goto out;
++	error = -EFAULT;
++	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
++		goto out;
++
++	error = -EINVAL;
++	if (ldt_info.entry_number >= LDT_ENTRIES)
++		goto out;
++	if (ldt_info.contents == 3) {
++		if (oldmode)
++			goto out;
++		if (ldt_info.seg_not_present == 0)
++			goto out;
++	}
++
++	mutex_lock(&mm->context.lock);
++	if (ldt_info.entry_number >= mm->context.size) {
++		error = alloc_ldt(&current->mm->context,
++				  ldt_info.entry_number + 1, 1);
++		if (error < 0)
++			goto out_unlock;
++	}
++
++	/* Allow LDTs to be cleared by the user. */
++	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++		if (oldmode || LDT_empty(&ldt_info)) {
++			memset(&ldt, 0, sizeof(ldt));
++			goto install;
++		}
++	}
++
++	fill_ldt(&ldt, &ldt_info);
++	if (oldmode)
++		ldt.avl = 0;
++
++	/* Install the new entry ...  */
++install:
++	write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
++	error = 0;
++
++out_unlock:
++	mutex_unlock(&mm->context.lock);
++out:
++	return error;
 +}
 +
-+module_init(init);
-+module_exit(fini);
++asmlinkage int sys_modify_ldt(int func, void __user *ptr,
++			      unsigned long bytecount)
++{
++	int ret = -ENOSYS;
 +
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
-+MODULE_ALIAS("salsa20");
-+MODULE_ALIAS("salsa20-asm");
-diff --git a/arch/x86/crypto/twofish_32.c b/arch/x86/crypto/twofish_32.c
++	switch (func) {
++	case 0:
++		ret = read_ldt(ptr, bytecount);
++		break;
++	case 1:
++		ret = write_ldt(ptr, bytecount, 1);
++		break;
++	case 2:
++		ret = read_default_ldt(ptr, bytecount);
++		break;
++	case 0x11:
++		ret = write_ldt(ptr, bytecount, 0);
++		break;
++	}
++	return ret;
++}
+diff --git a/arch/x86/kernel/ldt_32.c b/arch/x86/kernel/ldt_32.c
 deleted file mode 100644
-index e3004df..0000000
---- a/arch/x86/crypto/twofish_32.c
+index 9ff90a2..0000000
+--- a/arch/x86/kernel/ldt_32.c
 +++ /dev/null
-@@ -1,97 +0,0 @@
+@@ -1,248 +0,0 @@
 -/*
-- *  Glue Code for optimized 586 assembler version of TWOFISH
-- *
-- * Originally Twofish for GPG
-- * By Matthew Skala <mskala at ansuz.sooke.bc.ca>, July 26, 1998
-- * 256-bit key length added March 20, 1999
-- * Some modifications to reduce the text size by Werner Koch, April, 1998
-- * Ported to the kerneli patch by Marc Mutz <Marc at Mutz.com>
-- * Ported to CryptoAPI by Colin Slater <hoho at tacomeat.net>
-- *
-- * The original author has disclaimed all copyright interest in this
-- * code and thus put it in the public domain. The subsequent authors
-- * have put this under the GNU General Public License.
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, write to the Free Software
-- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
-- * USA
-- *
-- * This code is a "clean room" implementation, written from the paper
-- * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
-- * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
-- * through http://www.counterpane.com/twofish.html
-- *
-- * For background information on multiplication in finite fields, used for
-- * the matrix operations in the key schedule, see the book _Contemporary
-- * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
-- * Third Edition.
+- * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+- * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
 - */
 -
--#include <crypto/twofish.h>
--#include <linux/crypto.h>
--#include <linux/init.h>
--#include <linux/module.h>
--#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/sched.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/vmalloc.h>
+-#include <linux/slab.h>
 -
+-#include <asm/uaccess.h>
+-#include <asm/system.h>
+-#include <asm/ldt.h>
+-#include <asm/desc.h>
+-#include <asm/mmu_context.h>
 -
--asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
--asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
+-static void flush_ldt(void *null)
+-{
+-	if (current->active_mm)
+-		load_LDT(&current->active_mm->context);
+-}
+-#endif
 -
--static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 -{
--	twofish_enc_blk(tfm, dst, src);
+-	void *oldldt;
+-	void *newldt;
+-	int oldsize;
+-
+-	if (mincount <= pc->size)
+-		return 0;
+-	oldsize = pc->size;
+-	mincount = (mincount+511)&(~511);
+-	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+-		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+-	else
+-		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+-
+-	if (!newldt)
+-		return -ENOMEM;
+-
+-	if (oldsize)
+-		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+-	oldldt = pc->ldt;
+-	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
+-	pc->ldt = newldt;
+-	wmb();
+-	pc->size = mincount;
+-	wmb();
+-
+-	if (reload) {
+-#ifdef CONFIG_SMP
+-		cpumask_t mask;
+-		preempt_disable();
+-		load_LDT(pc);
+-		mask = cpumask_of_cpu(smp_processor_id());
+-		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+-			smp_call_function(flush_ldt, NULL, 1, 1);
+-		preempt_enable();
+-#else
+-		load_LDT(pc);
+-#endif
+-	}
+-	if (oldsize) {
+-		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+-			vfree(oldldt);
+-		else
+-			kfree(oldldt);
+-	}
+-	return 0;
 -}
 -
--static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
 -{
--	twofish_dec_blk(tfm, dst, src);
+-	int err = alloc_ldt(new, old->size, 0);
+-	if (err < 0)
+-		return err;
+-	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+-	return 0;
 -}
 -
--static struct crypto_alg alg = {
--	.cra_name		=	"twofish",
--	.cra_driver_name	=	"twofish-i586",
--	.cra_priority		=	200,
--	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
--	.cra_blocksize		=	TF_BLOCK_SIZE,
--	.cra_ctxsize		=	sizeof(struct twofish_ctx),
--	.cra_alignmask		=	3,
--	.cra_module		=	THIS_MODULE,
--	.cra_list		=	LIST_HEAD_INIT(alg.cra_list),
--	.cra_u			=	{
--		.cipher = {
--			.cia_min_keysize	=	TF_MIN_KEY_SIZE,
--			.cia_max_keysize	=	TF_MAX_KEY_SIZE,
--			.cia_setkey		=	twofish_setkey,
--			.cia_encrypt		=	twofish_encrypt,
--			.cia_decrypt		=	twofish_decrypt
+-/*
+- * we do not have to muck with descriptors here, that is
+- * done in switch_mm() as needed.
+- */
+-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+-{
+-	struct mm_struct * old_mm;
+-	int retval = 0;
+-
+-	mutex_init(&mm->context.lock);
+-	mm->context.size = 0;
+-	old_mm = current->mm;
+-	if (old_mm && old_mm->context.size > 0) {
+-		mutex_lock(&old_mm->context.lock);
+-		retval = copy_ldt(&mm->context, &old_mm->context);
+-		mutex_unlock(&old_mm->context.lock);
+-	}
+-	return retval;
+-}
+-
+-/*
+- * No need to lock the MM as we are the last user
+- */
+-void destroy_context(struct mm_struct *mm)
+-{
+-	if (mm->context.size) {
+-		if (mm == current->active_mm)
+-			clear_LDT();
+-		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+-			vfree(mm->context.ldt);
+-		else
+-			kfree(mm->context.ldt);
+-		mm->context.size = 0;
+-	}
+-}
+-
+-static int read_ldt(void __user * ptr, unsigned long bytecount)
+-{
+-	int err;
+-	unsigned long size;
+-	struct mm_struct * mm = current->mm;
+-
+-	if (!mm->context.size)
+-		return 0;
+-	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+-		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+-
+-	mutex_lock(&mm->context.lock);
+-	size = mm->context.size*LDT_ENTRY_SIZE;
+-	if (size > bytecount)
+-		size = bytecount;
+-
+-	err = 0;
+-	if (copy_to_user(ptr, mm->context.ldt, size))
+-		err = -EFAULT;
+-	mutex_unlock(&mm->context.lock);
+-	if (err < 0)
+-		goto error_return;
+-	if (size != bytecount) {
+-		/* zero-fill the rest */
+-		if (clear_user(ptr+size, bytecount-size) != 0) {
+-			err = -EFAULT;
+-			goto error_return;
 -		}
 -	}
--};
+-	return bytecount;
+-error_return:
+-	return err;
+-}
 -
--static int __init init(void)
+-static int read_default_ldt(void __user * ptr, unsigned long bytecount)
 -{
--	return crypto_register_alg(&alg);
+-	int err;
+-	unsigned long size;
+-
+-	err = 0;
+-	size = 5*sizeof(struct desc_struct);
+-	if (size > bytecount)
+-		size = bytecount;
+-
+-	err = size;
+-	if (clear_user(ptr, size))
+-		err = -EFAULT;
+-
+-	return err;
 -}
 -
--static void __exit fini(void)
+-static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
 -{
--	crypto_unregister_alg(&alg);
+-	struct mm_struct * mm = current->mm;
+-	__u32 entry_1, entry_2;
+-	int error;
+-	struct user_desc ldt_info;
+-
+-	error = -EINVAL;
+-	if (bytecount != sizeof(ldt_info))
+-		goto out;
+-	error = -EFAULT; 	
+-	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
+-		goto out;
+-
+-	error = -EINVAL;
+-	if (ldt_info.entry_number >= LDT_ENTRIES)
+-		goto out;
+-	if (ldt_info.contents == 3) {
+-		if (oldmode)
+-			goto out;
+-		if (ldt_info.seg_not_present == 0)
+-			goto out;
+-	}
+-
+-	mutex_lock(&mm->context.lock);
+-	if (ldt_info.entry_number >= mm->context.size) {
+-		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
+-		if (error < 0)
+-			goto out_unlock;
+-	}
+-
+-   	/* Allow LDTs to be cleared by the user. */
+-   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+-		if (oldmode || LDT_empty(&ldt_info)) {
+-			entry_1 = 0;
+-			entry_2 = 0;
+-			goto install;
+-		}
+-	}
+-
+-	entry_1 = LDT_entry_a(&ldt_info);
+-	entry_2 = LDT_entry_b(&ldt_info);
+-	if (oldmode)
+-		entry_2 &= ~(1 << 20);
+-
+-	/* Install the new entry ...  */
+-install:
+-	write_ldt_entry(mm->context.ldt, ldt_info.entry_number, entry_1, entry_2);
+-	error = 0;
+-
+-out_unlock:
+-	mutex_unlock(&mm->context.lock);
+-out:
+-	return error;
 -}
 -
--module_init(init);
--module_exit(fini);
+-asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+-{
+-	int ret = -ENOSYS;
 -
--MODULE_LICENSE("GPL");
--MODULE_DESCRIPTION ("Twofish Cipher Algorithm, i586 asm optimized");
--MODULE_ALIAS("twofish");
-diff --git a/arch/x86/crypto/twofish_64.c b/arch/x86/crypto/twofish_64.c
+-	switch (func) {
+-	case 0:
+-		ret = read_ldt(ptr, bytecount);
+-		break;
+-	case 1:
+-		ret = write_ldt(ptr, bytecount, 1);
+-		break;
+-	case 2:
+-		ret = read_default_ldt(ptr, bytecount);
+-		break;
+-	case 0x11:
+-		ret = write_ldt(ptr, bytecount, 0);
+-		break;
+-	}
+-	return ret;
+-}
+diff --git a/arch/x86/kernel/ldt_64.c b/arch/x86/kernel/ldt_64.c
 deleted file mode 100644
-index 182d91d..0000000
---- a/arch/x86/crypto/twofish_64.c
+index 60e57ab..0000000
+--- a/arch/x86/kernel/ldt_64.c
 +++ /dev/null
-@@ -1,97 +0,0 @@
+@@ -1,250 +0,0 @@
 -/*
-- * Glue Code for optimized x86_64 assembler version of TWOFISH
-- *
-- * Originally Twofish for GPG
-- * By Matthew Skala <mskala at ansuz.sooke.bc.ca>, July 26, 1998
-- * 256-bit key length added March 20, 1999
-- * Some modifications to reduce the text size by Werner Koch, April, 1998
-- * Ported to the kerneli patch by Marc Mutz <Marc at Mutz.com>
-- * Ported to CryptoAPI by Colin Slater <hoho at tacomeat.net>
-- *
-- * The original author has disclaimed all copyright interest in this
-- * code and thus put it in the public domain. The subsequent authors
-- * have put this under the GNU General Public License.
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, write to the Free Software
-- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
-- * USA
-- *
-- * This code is a "clean room" implementation, written from the paper
-- * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
-- * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
-- * through http://www.counterpane.com/twofish.html
-- *
-- * For background information on multiplication in finite fields, used for
-- * the matrix operations in the key schedule, see the book _Contemporary
-- * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
-- * Third Edition.
+- * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+- * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
+- * Copyright (C) 2002 Andi Kleen
+- * 
+- * This handles calls from both 32bit and 64bit mode.
 - */
 -
--#include <crypto/twofish.h>
--#include <linux/crypto.h>
--#include <linux/init.h>
--#include <linux/kernel.h>
--#include <linux/module.h>
--#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/sched.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/vmalloc.h>
+-#include <linux/slab.h>
 -
--asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
--asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-#include <asm/uaccess.h>
+-#include <asm/system.h>
+-#include <asm/ldt.h>
+-#include <asm/desc.h>
+-#include <asm/proto.h>
 -
--static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+-#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
+-static void flush_ldt(void *null)
 -{
--	twofish_enc_blk(tfm, dst, src);
+-	if (current->active_mm)
+-               load_LDT(&current->active_mm->context);
 -}
+-#endif
 -
--static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+-static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
 -{
--	twofish_dec_blk(tfm, dst, src);
+-	void *oldldt;
+-	void *newldt;
+-	unsigned oldsize;
+-
+-	if (mincount <= (unsigned)pc->size)
+-		return 0;
+-	oldsize = pc->size;
+-	mincount = (mincount+511)&(~511);
+-	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+-		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+-	else
+-		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+-
+-	if (!newldt)
+-		return -ENOMEM;
+-
+-	if (oldsize)
+-		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+-	oldldt = pc->ldt;
+-	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
+-	wmb();
+-	pc->ldt = newldt;
+-	wmb();
+-	pc->size = mincount;
+-	wmb();
+-	if (reload) {
+-#ifdef CONFIG_SMP
+-		cpumask_t mask;
+-
+-		preempt_disable();
+-		mask = cpumask_of_cpu(smp_processor_id());
+-		load_LDT(pc);
+-		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+-			smp_call_function(flush_ldt, NULL, 1, 1);
+-		preempt_enable();
+-#else
+-		load_LDT(pc);
+-#endif
+-	}
+-	if (oldsize) {
+-		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+-			vfree(oldldt);
+-		else
+-			kfree(oldldt);
+-	}
+-	return 0;
 -}
 -
--static struct crypto_alg alg = {
--	.cra_name		=	"twofish",
--	.cra_driver_name	=	"twofish-x86_64",
--	.cra_priority		=	200,
--	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
--	.cra_blocksize		=	TF_BLOCK_SIZE,
--	.cra_ctxsize		=	sizeof(struct twofish_ctx),
--	.cra_alignmask		=	3,
--	.cra_module		=	THIS_MODULE,
--	.cra_list		=	LIST_HEAD_INIT(alg.cra_list),
--	.cra_u			=	{
--		.cipher = {
--			.cia_min_keysize	=	TF_MIN_KEY_SIZE,
--			.cia_max_keysize	=	TF_MAX_KEY_SIZE,
--			.cia_setkey		=	twofish_setkey,
--			.cia_encrypt		=	twofish_encrypt,
--			.cia_decrypt		=	twofish_decrypt
+-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+-{
+-	int err = alloc_ldt(new, old->size, 0);
+-	if (err < 0)
+-		return err;
+-	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+-	return 0;
+-}
+-
+-/*
+- * we do not have to muck with descriptors here, that is
+- * done in switch_mm() as needed.
+- */
+-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+-{
+-	struct mm_struct * old_mm;
+-	int retval = 0;
+-
+-	mutex_init(&mm->context.lock);
+-	mm->context.size = 0;
+-	old_mm = current->mm;
+-	if (old_mm && old_mm->context.size > 0) {
+-		mutex_lock(&old_mm->context.lock);
+-		retval = copy_ldt(&mm->context, &old_mm->context);
+-		mutex_unlock(&old_mm->context.lock);
+-	}
+-	return retval;
+-}
+-
+-/*
+- * 
+- * Don't touch the LDT register - we're already in the next thread.
+- */
+-void destroy_context(struct mm_struct *mm)
+-{
+-	if (mm->context.size) {
+-		if ((unsigned)mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+-			vfree(mm->context.ldt);
+-		else
+-			kfree(mm->context.ldt);
+-		mm->context.size = 0;
+-	}
+-}
+-
+-static int read_ldt(void __user * ptr, unsigned long bytecount)
+-{
+-	int err;
+-	unsigned long size;
+-	struct mm_struct * mm = current->mm;
+-
+-	if (!mm->context.size)
+-		return 0;
+-	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+-		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+-
+-	mutex_lock(&mm->context.lock);
+-	size = mm->context.size*LDT_ENTRY_SIZE;
+-	if (size > bytecount)
+-		size = bytecount;
+-
+-	err = 0;
+-	if (copy_to_user(ptr, mm->context.ldt, size))
+-		err = -EFAULT;
+-	mutex_unlock(&mm->context.lock);
+-	if (err < 0)
+-		goto error_return;
+-	if (size != bytecount) {
+-		/* zero-fill the rest */
+-		if (clear_user(ptr+size, bytecount-size) != 0) {
+-			err = -EFAULT;
+-			goto error_return;
 -		}
 -	}
--};
+-	return bytecount;
+-error_return:
+-	return err;
+-}
 -
--static int __init init(void)
+-static int read_default_ldt(void __user * ptr, unsigned long bytecount)
 -{
--	return crypto_register_alg(&alg);
+-	/* Arbitrary number */ 
+-	/* x86-64 default LDT is all zeros */
+-	if (bytecount > 128) 
+-		bytecount = 128; 	
+-	if (clear_user(ptr, bytecount))
+-		return -EFAULT;
+-	return bytecount; 
 -}
 -
--static void __exit fini(void)
+-static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
 -{
--	crypto_unregister_alg(&alg);
+-	struct task_struct *me = current;
+-	struct mm_struct * mm = me->mm;
+-	__u32 entry_1, entry_2, *lp;
+-	int error;
+-	struct user_desc ldt_info;
+-
+-	error = -EINVAL;
+-
+-	if (bytecount != sizeof(ldt_info))
+-		goto out;
+-	error = -EFAULT; 	
+-	if (copy_from_user(&ldt_info, ptr, bytecount))
+-		goto out;
+-
+-	error = -EINVAL;
+-	if (ldt_info.entry_number >= LDT_ENTRIES)
+-		goto out;
+-	if (ldt_info.contents == 3) {
+-		if (oldmode)
+-			goto out;
+-		if (ldt_info.seg_not_present == 0)
+-			goto out;
+-	}
+-
+-	mutex_lock(&mm->context.lock);
+-	if (ldt_info.entry_number >= (unsigned)mm->context.size) {
+-		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
+-		if (error < 0)
+-			goto out_unlock;
+-	}
+-
+-	lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
+-
+-   	/* Allow LDTs to be cleared by the user. */
+-   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+-		if (oldmode || LDT_empty(&ldt_info)) {
+-			entry_1 = 0;
+-			entry_2 = 0;
+-			goto install;
+-		}
+-	}
+-
+-	entry_1 = LDT_entry_a(&ldt_info);
+-	entry_2 = LDT_entry_b(&ldt_info);
+-	if (oldmode)
+-		entry_2 &= ~(1 << 20);
+-
+-	/* Install the new entry ...  */
+-install:
+-	*lp	= entry_1;
+-	*(lp+1)	= entry_2;
+-	error = 0;
+-
+-out_unlock:
+-	mutex_unlock(&mm->context.lock);
+-out:
+-	return error;
 -}
 -
--module_init(init);
--module_exit(fini);
+-asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+-{
+-	int ret = -ENOSYS;
 -
--MODULE_LICENSE("GPL");
--MODULE_DESCRIPTION ("Twofish Cipher Algorithm, x86_64 asm optimized");
--MODULE_ALIAS("twofish");
-diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
+-	switch (func) {
+-	case 0:
+-		ret = read_ldt(ptr, bytecount);
+-		break;
+-	case 1:
+-		ret = write_ldt(ptr, bytecount, 1);
+-		break;
+-	case 2:
+-		ret = read_default_ldt(ptr, bytecount);
+-		break;
+-	case 0x11:
+-		ret = write_ldt(ptr, bytecount, 0);
+-		break;
+-	}
+-	return ret;
+-}
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index 11b935f..c1cfd60 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -32,7 +32,7 @@ static u32 kexec_pte1[1024] PAGE_ALIGNED;
+ 
+ static void set_idt(void *newidt, __u16 limit)
+ {
+-	struct Xgt_desc_struct curidt;
++	struct desc_ptr curidt;
+ 
+ 	/* ia32 supports unaliged loads & stores */
+ 	curidt.size    = limit;
+@@ -44,7 +44,7 @@ static void set_idt(void *newidt, __u16 limit)
+ 
+ static void set_gdt(void *newgdt, __u16 limit)
+ {
+-	struct Xgt_desc_struct curgdt;
++	struct desc_ptr curgdt;
+ 
+ 	/* ia32 supports unaligned loads & stores */
+ 	curgdt.size    = limit;
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index aa3d2c8..a1fef42 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -234,10 +234,5 @@ NORET_TYPE void machine_kexec(struct kimage *image)
+ void arch_crash_save_vmcoreinfo(void)
+ {
+ 	VMCOREINFO_SYMBOL(init_level4_pgt);
+-
+-#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
+-	VMCOREINFO_SYMBOL(node_data);
+-	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
+-#endif
+ }
+ 
+diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
+index 3960ab7..219f86e 100644
+--- a/arch/x86/kernel/mfgpt_32.c
++++ b/arch/x86/kernel/mfgpt_32.c
+@@ -63,6 +63,21 @@ static int __init mfgpt_disable(char *s)
+ }
+ __setup("nomfgpt", mfgpt_disable);
+ 
++/* Reset the MFGPT timers. This is required by some broken BIOSes which already
++ * do the same and leave the system in an unstable state. TinyBIOS 0.98 is
++ * affected at least (0.99 is OK with MFGPT workaround left to off).
++ */
++static int __init mfgpt_fix(char *s)
++{
++	u32 val, dummy;
++
++	/* The following udocumented bit resets the MFGPT timers */
++	val = 0xFF; dummy = 0;
++	wrmsr(0x5140002B, val, dummy);
++	return 1;
++}
++__setup("mfgptfix", mfgpt_fix);
++
+ /*
+  * Check whether any MFGPTs are available for the kernel to use.  In most
+  * cases, firmware that uses AMD's VSA code will claim all timers during
+diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
+index 09c3152..6ff447f 100644
+--- a/arch/x86/kernel/microcode.c
++++ b/arch/x86/kernel/microcode.c
+@@ -244,8 +244,8 @@ static int microcode_sanity_check(void *mc)
+ 		return 0;
+ 	/* check extended signature checksum */
+ 	for (i = 0; i < ext_sigcount; i++) {
+-		ext_sig = (struct extended_signature *)((void *)ext_header
+-			+ EXT_HEADER_SIZE + EXT_SIGNATURE_SIZE * i);
++		ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
++			  EXT_SIGNATURE_SIZE * i;
+ 		sum = orig_sum
+ 			- (mc_header->sig + mc_header->pf + mc_header->cksum)
+ 			+ (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
+@@ -279,11 +279,9 @@ static int get_maching_microcode(void *mc, int cpu)
+ 	if (total_size <= get_datasize(mc_header) + MC_HEADER_SIZE)
+ 		return 0;
+ 
+-	ext_header = (struct extended_sigtable *)(mc +
+-			get_datasize(mc_header) + MC_HEADER_SIZE);
++	ext_header = mc + get_datasize(mc_header) + MC_HEADER_SIZE;
+ 	ext_sigcount = ext_header->count;
+-	ext_sig = (struct extended_signature *)((void *)ext_header
+-			+ EXT_HEADER_SIZE);
++	ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
+ 	for (i = 0; i < ext_sigcount; i++) {
+ 		if (microcode_update_match(cpu, mc_header,
+ 				ext_sig->sig, ext_sig->pf))
+@@ -436,7 +434,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
+ 		return -EINVAL;
+ 	}
+ 
+-	lock_cpu_hotplug();
++	get_online_cpus();
+ 	mutex_lock(&microcode_mutex);
+ 
+ 	user_buffer = (void __user *) buf;
+@@ -447,7 +445,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
+ 		ret = (ssize_t)len;
+ 
+ 	mutex_unlock(&microcode_mutex);
+-	unlock_cpu_hotplug();
++	put_online_cpus();
+ 
+ 	return ret;
+ }
+@@ -539,7 +537,7 @@ static int cpu_request_microcode(int cpu)
+ 		pr_debug("ucode data file %s load failed\n", name);
+ 		return error;
+ 	}
+-	buf = (void *)firmware->data;
++	buf = firmware->data;
+ 	size = firmware->size;
+ 	while ((offset = get_next_ucode_from_buffer(&mc, buf, size, offset))
+ 			> 0) {
+@@ -658,14 +656,14 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
+ 
+ 		old = current->cpus_allowed;
+ 
+-		lock_cpu_hotplug();
++		get_online_cpus();
+ 		set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ 
+ 		mutex_lock(&microcode_mutex);
+ 		if (uci->valid)
+ 			err = cpu_request_microcode(cpu);
+ 		mutex_unlock(&microcode_mutex);
+-		unlock_cpu_hotplug();
++		put_online_cpus();
+ 		set_cpus_allowed(current, old);
+ 	}
+ 	if (err)
+@@ -817,9 +815,9 @@ static int __init microcode_init (void)
+ 		return PTR_ERR(microcode_pdev);
+ 	}
+ 
+-	lock_cpu_hotplug();
++	get_online_cpus();
+ 	error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver);
+-	unlock_cpu_hotplug();
++	put_online_cpus();
+ 	if (error) {
+ 		microcode_dev_exit();
+ 		platform_device_unregister(microcode_pdev);
+@@ -839,9 +837,9 @@ static void __exit microcode_exit (void)
+ 
+ 	unregister_hotcpu_notifier(&mc_cpu_notifier);
+ 
+-	lock_cpu_hotplug();
++	get_online_cpus();
+ 	sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+-	unlock_cpu_hotplug();
++	put_online_cpus();
+ 
+ 	platform_device_unregister(microcode_pdev);
+ }
+diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c
+index 7a05a7f..67009cd 100644
+--- a/arch/x86/kernel/mpparse_32.c
++++ b/arch/x86/kernel/mpparse_32.c
+@@ -68,7 +68,7 @@ unsigned int def_to_bigsmp = 0;
+ /* Processor that is doing the boot up */
+ unsigned int boot_cpu_physical_apicid = -1U;
+ /* Internal processor count */
+-unsigned int __cpuinitdata num_processors;
++unsigned int num_processors;
+ 
+ /* Bitmask of physically existing CPUs */
+ physid_mask_t phys_cpu_present_map;
+@@ -258,7 +258,7 @@ static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
+ 	if (!(m->mpc_flags & MPC_APIC_USABLE))
+ 		return;
+ 
+-	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
++	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
+ 		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
+ 	if (nr_ioapics >= MAX_IO_APICS) {
+ 		printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
+@@ -405,9 +405,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
+ 
+ 	mps_oem_check(mpc, oem, str);
+ 
+-	printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
++	printk("APIC at: 0x%X\n", mpc->mpc_lapic);
+ 
+-	/* 
++	/*
+ 	 * Save the local APIC address (it might be non-default) -- but only
+ 	 * if we're not using ACPI.
+ 	 */
+@@ -721,7 +721,7 @@ static int __init smp_scan_config (unsigned long base, unsigned long length)
+ 	unsigned long *bp = phys_to_virt(base);
+ 	struct intel_mp_floating *mpf;
+ 
+-	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++	printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length);
+ 	if (sizeof(*mpf) != 16)
+ 		printk("Error: MPF size\n");
+ 
+@@ -734,8 +734,8 @@ static int __init smp_scan_config (unsigned long base, unsigned long length)
+ 				|| (mpf->mpf_specification == 4)) ) {
+ 
+ 			smp_found_config = 1;
+-			printk(KERN_INFO "found SMP MP-table at %08lx\n",
+-						virt_to_phys(mpf));
++			printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
++				mpf, virt_to_phys(mpf));
+ 			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
+ 			if (mpf->mpf_physptr) {
+ 				/*
+@@ -918,14 +918,14 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
+ 	 */
+ 	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
+ 	mp_ioapic_routing[idx].gsi_base = gsi_base;
+-	mp_ioapic_routing[idx].gsi_end = gsi_base + 
++	mp_ioapic_routing[idx].gsi_end = gsi_base +
+ 		io_apic_get_redir_entries(idx);
+ 
+-	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
+-		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
+-		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
+-		mp_ioapic_routing[idx].gsi_base,
+-		mp_ioapic_routing[idx].gsi_end);
++	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
++	       "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
++	       mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++	       mp_ioapic_routing[idx].gsi_base,
++	       mp_ioapic_routing[idx].gsi_end);
+ }
+ 
+ void __init
+@@ -1041,15 +1041,16 @@ void __init mp_config_acpi_legacy_irqs (void)
+ }
+ 
+ #define MAX_GSI_NUM	4096
++#define IRQ_COMPRESSION_START	64
+ 
+ int mp_register_gsi(u32 gsi, int triggering, int polarity)
+ {
+ 	int ioapic = -1;
+ 	int ioapic_pin = 0;
+ 	int idx, bit = 0;
+-	static int pci_irq = 16;
++	static int pci_irq = IRQ_COMPRESSION_START;
+ 	/*
+-	 * Mapping between Global System Interrups, which
++	 * Mapping between Global System Interrupts, which
+ 	 * represent all possible interrupts, and IRQs
+ 	 * assigned to actual devices.
+ 	 */
+@@ -1086,12 +1087,16 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
+ 	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
+ 		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
+ 			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
+-		return gsi_to_irq[gsi];
++		return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
+ 	}
+ 
+ 	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
+ 
+-	if (triggering == ACPI_LEVEL_SENSITIVE) {
++	/*
++	 * For GSI >= 64, use IRQ compression
++	 */
++	if ((gsi >= IRQ_COMPRESSION_START)
++		&& (triggering == ACPI_LEVEL_SENSITIVE)) {
+ 		/*
+ 		 * For PCI devices assign IRQs in order, avoiding gaps
+ 		 * due to unused I/O APIC pins.
+diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c
+index ef4aab1..72ab140 100644
+--- a/arch/x86/kernel/mpparse_64.c
++++ b/arch/x86/kernel/mpparse_64.c
+@@ -60,14 +60,18 @@ unsigned int boot_cpu_id = -1U;
+ EXPORT_SYMBOL(boot_cpu_id);
+ 
+ /* Internal processor count */
+-unsigned int num_processors __cpuinitdata = 0;
++unsigned int num_processors;
+ 
+ unsigned disabled_cpus __cpuinitdata;
+ 
+ /* Bitmask of physically existing CPUs */
+ physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
+ 
+-u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
++				= { [0 ... NR_CPUS-1] = BAD_APICID };
++void *x86_bios_cpu_apicid_early_ptr;
++DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
++EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
+ 
+ 
+ /*
+@@ -118,24 +122,22 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
+ 	physid_set(m->mpc_apicid, phys_cpu_present_map);
+  	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
+  		/*
+- 		 * bios_cpu_apicid is required to have processors listed
++		 * x86_bios_cpu_apicid is required to have processors listed
+  		 * in same order as logical cpu numbers. Hence the first
+  		 * entry is BSP, and so on.
+  		 */
+ 		cpu = 0;
+  	}
+-	bios_cpu_apicid[cpu] = m->mpc_apicid;
+-	/*
+-	 * We get called early in the the start_kernel initialization
+-	 * process when the per_cpu data area is not yet setup, so we
+-	 * use a static array that is removed after the per_cpu data
+-	 * area is created.
+-	 */
+-	if (x86_cpu_to_apicid_ptr) {
+-		u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr;
+-		x86_cpu_to_apicid[cpu] = m->mpc_apicid;
++	/* are we being called early in kernel startup? */
++	if (x86_cpu_to_apicid_early_ptr) {
++		u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
++		u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
++
++		cpu_to_apicid[cpu] = m->mpc_apicid;
++		bios_cpu_apicid[cpu] = m->mpc_apicid;
+ 	} else {
+ 		per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
++		per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
+ 	}
+ 
+ 	cpu_set(cpu, cpu_possible_map);
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index ee6eba4..21f6e3c 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -155,15 +155,15 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
+ 
+ 	switch (action) {
+ 	case CPU_UP_PREPARE:
+-	case CPU_UP_PREPARE_FROZEN:
+ 		err = msr_device_create(cpu);
+ 		break;
+ 	case CPU_UP_CANCELED:
+-	case CPU_UP_CANCELED_FROZEN:
+ 	case CPU_DEAD:
+-	case CPU_DEAD_FROZEN:
+ 		msr_device_destroy(cpu);
+ 		break;
++	case CPU_UP_CANCELED_FROZEN:
++		destroy_suspended_device(msr_class, MKDEV(MSR_MAJOR, cpu));
++		break;
+ 	}
+ 	return err ? NOTIFY_BAD : NOTIFY_OK;
+ }
+diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
+index 852db29..edd4136 100644
+--- a/arch/x86/kernel/nmi_32.c
++++ b/arch/x86/kernel/nmi_32.c
+@@ -51,13 +51,13 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
+ 
+ static int endflag __initdata = 0;
+ 
++#ifdef CONFIG_SMP
+ /* The performance counters used by NMI_LOCAL_APIC don't trigger when
+  * the CPU is idle. To make sure the NMI watchdog really ticks on all
+  * CPUs during the test make them busy.
+  */
+ static __init void nmi_cpu_busy(void *data)
+ {
+-#ifdef CONFIG_SMP
+ 	local_irq_enable_in_hardirq();
+ 	/* Intentionally don't use cpu_relax here. This is
+ 	   to make sure that the performance counter really ticks,
+@@ -67,8 +67,8 @@ static __init void nmi_cpu_busy(void *data)
+ 	   care if they get somewhat less cycles. */
+ 	while (endflag == 0)
+ 		mb();
+-#endif
+ }
++#endif
+ 
+ static int __init check_nmi_watchdog(void)
+ {
+@@ -87,11 +87,13 @@ static int __init check_nmi_watchdog(void)
+ 
+ 	printk(KERN_INFO "Testing NMI watchdog ... ");
+ 
++#ifdef CONFIG_SMP
+ 	if (nmi_watchdog == NMI_LOCAL_APIC)
+ 		smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
++#endif
+ 
+ 	for_each_possible_cpu(cpu)
+-		prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
++		prev_nmi_count[cpu] = nmi_count(cpu);
+ 	local_irq_enable();
+ 	mdelay((20*1000)/nmi_hz); // wait 20 ticks
+ 
+@@ -176,7 +178,7 @@ static int lapic_nmi_resume(struct sys_device *dev)
+ 
+ 
+ static struct sysdev_class nmi_sysclass = {
+-	set_kset_name("lapic_nmi"),
++	.name		= "lapic_nmi",
+ 	.resume		= lapic_nmi_resume,
+ 	.suspend	= lapic_nmi_suspend,
+ };
+@@ -237,10 +239,10 @@ void acpi_nmi_disable(void)
+ 		on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
+ }
+ 
+-void setup_apic_nmi_watchdog (void *unused)
++void setup_apic_nmi_watchdog(void *unused)
+ {
+ 	if (__get_cpu_var(wd_enabled))
+- 		return;
++		return;
+ 
+ 	/* cheap hack to support suspend/resume */
+ 	/* if cpu0 is not active neither should the other cpus */
+@@ -329,7 +331,7 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
+ 	unsigned int sum;
+ 	int touched = 0;
+ 	int cpu = smp_processor_id();
+-	int rc=0;
++	int rc = 0;
+ 
+ 	/* check for other users first */
+ 	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
+diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
+index 4253c4e..fb99484 100644
+--- a/arch/x86/kernel/nmi_64.c
++++ b/arch/x86/kernel/nmi_64.c
+@@ -39,7 +39,7 @@ static cpumask_t backtrace_mask = CPU_MASK_NONE;
+  *  0: the lapic NMI watchdog is disabled, but can be enabled
+  */
+ atomic_t nmi_active = ATOMIC_INIT(0);		/* oprofile uses this */
+-int panic_on_timeout;
++static int panic_on_timeout;
+ 
+ unsigned int nmi_watchdog = NMI_DEFAULT;
+ static unsigned int nmi_hz = HZ;
+@@ -78,22 +78,22 @@ static __init void nmi_cpu_busy(void *data)
+ }
+ #endif
+ 
+-int __init check_nmi_watchdog (void)
++int __init check_nmi_watchdog(void)
+ {
+-	int *counts;
++	int *prev_nmi_count;
+ 	int cpu;
+ 
+-	if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) 
++	if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED))
+ 		return 0;
+ 
+ 	if (!atomic_read(&nmi_active))
+ 		return 0;
+ 
+-	counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
+-	if (!counts)
++	prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
++	if (!prev_nmi_count)
+ 		return -1;
+ 
+-	printk(KERN_INFO "testing NMI watchdog ... ");
++	printk(KERN_INFO "Testing NMI watchdog ... ");
+ 
+ #ifdef CONFIG_SMP
+ 	if (nmi_watchdog == NMI_LOCAL_APIC)
+@@ -101,30 +101,29 @@ int __init check_nmi_watchdog (void)
+ #endif
+ 
+ 	for (cpu = 0; cpu < NR_CPUS; cpu++)
+-		counts[cpu] = cpu_pda(cpu)->__nmi_count;
++		prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count;
+ 	local_irq_enable();
+ 	mdelay((20*1000)/nmi_hz); // wait 20 ticks
+ 
+ 	for_each_online_cpu(cpu) {
+ 		if (!per_cpu(wd_enabled, cpu))
+ 			continue;
+-		if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
++		if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) {
+ 			printk(KERN_WARNING "WARNING: CPU#%d: NMI "
+ 			       "appears to be stuck (%d->%d)!\n",
+-			       cpu,
+-			       counts[cpu],
+-			       cpu_pda(cpu)->__nmi_count);
++				cpu,
++				prev_nmi_count[cpu],
++				cpu_pda(cpu)->__nmi_count);
+ 			per_cpu(wd_enabled, cpu) = 0;
+ 			atomic_dec(&nmi_active);
+ 		}
+ 	}
++	endflag = 1;
+ 	if (!atomic_read(&nmi_active)) {
+-		kfree(counts);
++		kfree(prev_nmi_count);
+ 		atomic_set(&nmi_active, -1);
+-		endflag = 1;
+ 		return -1;
+ 	}
+-	endflag = 1;
+ 	printk("OK.\n");
+ 
+ 	/* now that we know it works we can reduce NMI frequency to
+@@ -132,11 +131,11 @@ int __init check_nmi_watchdog (void)
+ 	if (nmi_watchdog == NMI_LOCAL_APIC)
+ 		nmi_hz = lapic_adjust_nmi_hz(1);
+ 
+-	kfree(counts);
++	kfree(prev_nmi_count);
+ 	return 0;
+ }
+ 
+-int __init setup_nmi_watchdog(char *str)
++static int __init setup_nmi_watchdog(char *str)
+ {
+ 	int nmi;
+ 
+@@ -159,34 +158,6 @@ int __init setup_nmi_watchdog(char *str)
+ 
+ __setup("nmi_watchdog=", setup_nmi_watchdog);
+ 
+-
+-static void __acpi_nmi_disable(void *__unused)
+-{
+-	apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
+-}
+-
+-/*
+- * Disable timer based NMIs on all CPUs:
+- */
+-void acpi_nmi_disable(void)
+-{
+-	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
+-		on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
+-}
+-
+-static void __acpi_nmi_enable(void *__unused)
+-{
+-	apic_write(APIC_LVT0, APIC_DM_NMI);
+-}
+-
+-/*
+- * Enable timer based NMIs on all CPUs:
+- */
+-void acpi_nmi_enable(void)
+-{
+-	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
+-		on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
+-}
+ #ifdef CONFIG_PM
+ 
+ static int nmi_pm_active; /* nmi_active before suspend */
+@@ -211,13 +182,13 @@ static int lapic_nmi_resume(struct sys_device *dev)
+ }
+ 
+ static struct sysdev_class nmi_sysclass = {
+-	set_kset_name("lapic_nmi"),
++	.name		= "lapic_nmi",
+ 	.resume		= lapic_nmi_resume,
+ 	.suspend	= lapic_nmi_suspend,
+ };
+ 
+ static struct sys_device device_lapic_nmi = {
+-	.id		= 0,
++	.id	= 0,
+ 	.cls	= &nmi_sysclass,
+ };
+ 
+@@ -231,7 +202,7 @@ static int __init init_lapic_nmi_sysfs(void)
+ 	if (nmi_watchdog != NMI_LOCAL_APIC)
+ 		return 0;
+ 
+-	if ( atomic_read(&nmi_active) < 0 )
++	if (atomic_read(&nmi_active) < 0)
+ 		return 0;
+ 
+ 	error = sysdev_class_register(&nmi_sysclass);
+@@ -244,9 +215,37 @@ late_initcall(init_lapic_nmi_sysfs);
+ 
+ #endif	/* CONFIG_PM */
+ 
++static void __acpi_nmi_enable(void *__unused)
++{
++	apic_write(APIC_LVT0, APIC_DM_NMI);
++}
++
++/*
++ * Enable timer based NMIs on all CPUs:
++ */
++void acpi_nmi_enable(void)
++{
++	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
++		on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
++}
++
++static void __acpi_nmi_disable(void *__unused)
++{
++	apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
++}
++
++/*
++ * Disable timer based NMIs on all CPUs:
++ */
++void acpi_nmi_disable(void)
++{
++	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
++		on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
++}
++
+ void setup_apic_nmi_watchdog(void *unused)
+ {
+-	if (__get_cpu_var(wd_enabled) == 1)
++	if (__get_cpu_var(wd_enabled))
+ 		return;
+ 
+ 	/* cheap hack to support suspend/resume */
+@@ -311,8 +310,9 @@ void touch_nmi_watchdog(void)
+ 		}
+ 	}
+ 
+- 	touch_softlockup_watchdog();
++	touch_softlockup_watchdog();
+ }
++EXPORT_SYMBOL(touch_nmi_watchdog);
+ 
+ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
+ {
+@@ -479,4 +479,3 @@ void __trigger_all_cpu_backtrace(void)
+ 
+ EXPORT_SYMBOL(nmi_active);
+ EXPORT_SYMBOL(nmi_watchdog);
+-EXPORT_SYMBOL(touch_nmi_watchdog);
+diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
+index 9000d82..e65281b 100644
+--- a/arch/x86/kernel/numaq_32.c
++++ b/arch/x86/kernel/numaq_32.c
+@@ -82,7 +82,7 @@ static int __init numaq_tsc_disable(void)
+ {
+ 	if (num_online_nodes() > 1) {
+ 		printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
+-		tsc_disable = 1;
++		setup_clear_cpu_cap(X86_FEATURE_TSC);
+ 	}
+ 	return 0;
+ }
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
 new file mode 100644
-index 0000000..cefaf8b
+index 0000000..075962c
 --- /dev/null
-+++ b/arch/x86/crypto/twofish_glue.c
-@@ -0,0 +1,97 @@
++++ b/arch/x86/kernel/paravirt.c
+@@ -0,0 +1,440 @@
++/*  Paravirtualization interfaces
++    Copyright (C) 2006 Rusty Russell IBM Corporation
++
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++
++    2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
++*/
++
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/bcd.h>
++#include <linux/highmem.h>
++
++#include <asm/bug.h>
++#include <asm/paravirt.h>
++#include <asm/desc.h>
++#include <asm/setup.h>
++#include <asm/arch_hooks.h>
++#include <asm/time.h>
++#include <asm/irq.h>
++#include <asm/delay.h>
++#include <asm/fixmap.h>
++#include <asm/apic.h>
++#include <asm/tlbflush.h>
++#include <asm/timer.h>
++
++/* nop stub */
++void _paravirt_nop(void)
++{
++}
++
++static void __init default_banner(void)
++{
++	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
++	       pv_info.name);
++}
++
++char *memory_setup(void)
++{
++	return pv_init_ops.memory_setup();
++}
++
++/* Simple instruction patching code. */
++#define DEF_NATIVE(ops, name, code)					\
++	extern const char start_##ops##_##name[], end_##ops##_##name[];	\
++	asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
++
++/* Undefined instruction for dealing with missing ops pointers. */
++static const unsigned char ud2a[] = { 0x0f, 0x0b };
++
++unsigned paravirt_patch_nop(void)
++{
++	return 0;
++}
++
++unsigned paravirt_patch_ignore(unsigned len)
++{
++	return len;
++}
++
++struct branch {
++	unsigned char opcode;
++	u32 delta;
++} __attribute__((packed));
++
++unsigned paravirt_patch_call(void *insnbuf,
++			     const void *target, u16 tgt_clobbers,
++			     unsigned long addr, u16 site_clobbers,
++			     unsigned len)
++{
++	struct branch *b = insnbuf;
++	unsigned long delta = (unsigned long)target - (addr+5);
++
++	if (tgt_clobbers & ~site_clobbers)
++		return len;	/* target would clobber too much for this site */
++	if (len < 5)
++		return len;	/* call too long for patch site */
++
++	b->opcode = 0xe8; /* call */
++	b->delta = delta;
++	BUILD_BUG_ON(sizeof(*b) != 5);
++
++	return 5;
++}
++
++unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
++			    unsigned long addr, unsigned len)
++{
++	struct branch *b = insnbuf;
++	unsigned long delta = (unsigned long)target - (addr+5);
++
++	if (len < 5)
++		return len;	/* call too long for patch site */
++
++	b->opcode = 0xe9;	/* jmp */
++	b->delta = delta;
++
++	return 5;
++}
++
++/* Neat trick to map patch type back to the call within the
++ * corresponding structure. */
++static void *get_call_destination(u8 type)
++{
++	struct paravirt_patch_template tmpl = {
++		.pv_init_ops = pv_init_ops,
++		.pv_time_ops = pv_time_ops,
++		.pv_cpu_ops = pv_cpu_ops,
++		.pv_irq_ops = pv_irq_ops,
++		.pv_apic_ops = pv_apic_ops,
++		.pv_mmu_ops = pv_mmu_ops,
++	};
++	return *((void **)&tmpl + type);
++}
++
++unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
++				unsigned long addr, unsigned len)
++{
++	void *opfunc = get_call_destination(type);
++	unsigned ret;
++
++	if (opfunc == NULL)
++		/* If there's no function, patch it with a ud2a (BUG) */
++		ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
++	else if (opfunc == paravirt_nop)
++		/* If the operation is a nop, then nop the callsite */
++		ret = paravirt_patch_nop();
++	else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
++		 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret))
++		/* If operation requires a jmp, then jmp */
++		ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
++	else
++		/* Otherwise call the function; assume target could
++		   clobber any caller-save reg */
++		ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
++					  addr, clobbers, len);
++
++	return ret;
++}
++
++unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
++			      const char *start, const char *end)
++{
++	unsigned insn_len = end - start;
++
++	if (insn_len > len || start == NULL)
++		insn_len = len;
++	else
++		memcpy(insnbuf, start, insn_len);
++
++	return insn_len;
++}
++
++void init_IRQ(void)
++{
++	pv_irq_ops.init_IRQ();
++}
++
++static void native_flush_tlb(void)
++{
++	__native_flush_tlb();
++}
++
 +/*
-+ * Glue Code for assembler optimized version of TWOFISH
-+ *
-+ * Originally Twofish for GPG
-+ * By Matthew Skala <mskala at ansuz.sooke.bc.ca>, July 26, 1998
-+ * 256-bit key length added March 20, 1999
-+ * Some modifications to reduce the text size by Werner Koch, April, 1998
-+ * Ported to the kerneli patch by Marc Mutz <Marc at Mutz.com>
-+ * Ported to CryptoAPI by Colin Slater <hoho at tacomeat.net>
-+ *
-+ * The original author has disclaimed all copyright interest in this
-+ * code and thus put it in the public domain. The subsequent authors
-+ * have put this under the GNU General Public License.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
-+ * USA
-+ *
-+ * This code is a "clean room" implementation, written from the paper
-+ * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
-+ * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
-+ * through http://www.counterpane.com/twofish.html
++ * Global pages have to be flushed a bit differently. Not a real
++ * performance problem because this does not happen often.
++ */
++static void native_flush_tlb_global(void)
++{
++	__native_flush_tlb_global();
++}
++
++static void native_flush_tlb_single(unsigned long addr)
++{
++	__native_flush_tlb_single(addr);
++}
++
++/* These are in entry.S */
++extern void native_iret(void);
++extern void native_irq_enable_syscall_ret(void);
++
++static int __init print_banner(void)
++{
++	pv_init_ops.banner();
++	return 0;
++}
++core_initcall(print_banner);
++
++static struct resource reserve_ioports = {
++	.start = 0,
++	.end = IO_SPACE_LIMIT,
++	.name = "paravirt-ioport",
++	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
++};
++
++static struct resource reserve_iomem = {
++	.start = 0,
++	.end = -1,
++	.name = "paravirt-iomem",
++	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
++};
++
++/*
++ * Reserve the whole legacy IO space to prevent any legacy drivers
++ * from wasting time probing for their hardware.  This is a fairly
++ * brute-force approach to disabling all non-virtual drivers.
 + *
-+ * For background information on multiplication in finite fields, used for
-+ * the matrix operations in the key schedule, see the book _Contemporary
-+ * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
-+ * Third Edition.
++ * Note that this must be called very early to have any effect.
 + */
++int paravirt_disable_iospace(void)
++{
++	int ret;
 +
-+#include <crypto/twofish.h>
-+#include <linux/crypto.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/types.h>
++	ret = request_resource(&ioport_resource, &reserve_ioports);
++	if (ret == 0) {
++		ret = request_resource(&iomem_resource, &reserve_iomem);
++		if (ret)
++			release_resource(&reserve_ioports);
++	}
++
++	return ret;
++}
++
++static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
++
++static inline void enter_lazy(enum paravirt_lazy_mode mode)
++{
++	BUG_ON(__get_cpu_var(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
++	BUG_ON(preemptible());
++
++	__get_cpu_var(paravirt_lazy_mode) = mode;
++}
++
++void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
++{
++	BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode);
++	BUG_ON(preemptible());
++
++	__get_cpu_var(paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
++}
++
++void paravirt_enter_lazy_mmu(void)
++{
++	enter_lazy(PARAVIRT_LAZY_MMU);
++}
++
++void paravirt_leave_lazy_mmu(void)
++{
++	paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
++}
++
++void paravirt_enter_lazy_cpu(void)
++{
++	enter_lazy(PARAVIRT_LAZY_CPU);
++}
++
++void paravirt_leave_lazy_cpu(void)
++{
++	paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
++}
++
++enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
++{
++	return __get_cpu_var(paravirt_lazy_mode);
++}
++
++struct pv_info pv_info = {
++	.name = "bare hardware",
++	.paravirt_enabled = 0,
++	.kernel_rpl = 0,
++	.shared_kernel_pmd = 1,	/* Only used when CONFIG_X86_PAE is set */
++};
++
++struct pv_init_ops pv_init_ops = {
++	.patch = native_patch,
++	.banner = default_banner,
++	.arch_setup = paravirt_nop,
++	.memory_setup = machine_specific_memory_setup,
++};
++
++struct pv_time_ops pv_time_ops = {
++	.time_init = hpet_time_init,
++	.get_wallclock = native_get_wallclock,
++	.set_wallclock = native_set_wallclock,
++	.sched_clock = native_sched_clock,
++	.get_cpu_khz = native_calculate_cpu_khz,
++};
++
++struct pv_irq_ops pv_irq_ops = {
++	.init_IRQ = native_init_IRQ,
++	.save_fl = native_save_fl,
++	.restore_fl = native_restore_fl,
++	.irq_disable = native_irq_disable,
++	.irq_enable = native_irq_enable,
++	.safe_halt = native_safe_halt,
++	.halt = native_halt,
++};
++
++struct pv_cpu_ops pv_cpu_ops = {
++	.cpuid = native_cpuid,
++	.get_debugreg = native_get_debugreg,
++	.set_debugreg = native_set_debugreg,
++	.clts = native_clts,
++	.read_cr0 = native_read_cr0,
++	.write_cr0 = native_write_cr0,
++	.read_cr4 = native_read_cr4,
++	.read_cr4_safe = native_read_cr4_safe,
++	.write_cr4 = native_write_cr4,
++#ifdef CONFIG_X86_64
++	.read_cr8 = native_read_cr8,
++	.write_cr8 = native_write_cr8,
++#endif
++	.wbinvd = native_wbinvd,
++	.read_msr = native_read_msr_safe,
++	.write_msr = native_write_msr_safe,
++	.read_tsc = native_read_tsc,
++	.read_pmc = native_read_pmc,
++	.read_tscp = native_read_tscp,
++	.load_tr_desc = native_load_tr_desc,
++	.set_ldt = native_set_ldt,
++	.load_gdt = native_load_gdt,
++	.load_idt = native_load_idt,
++	.store_gdt = native_store_gdt,
++	.store_idt = native_store_idt,
++	.store_tr = native_store_tr,
++	.load_tls = native_load_tls,
++	.write_ldt_entry = native_write_ldt_entry,
++	.write_gdt_entry = native_write_gdt_entry,
++	.write_idt_entry = native_write_idt_entry,
++	.load_sp0 = native_load_sp0,
 +
-+asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-+asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
++	.irq_enable_syscall_ret = native_irq_enable_syscall_ret,
++	.iret = native_iret,
++	.swapgs = native_swapgs,
 +
-+static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-+{
-+	twofish_enc_blk(tfm, dst, src);
-+}
++	.set_iopl_mask = native_set_iopl_mask,
++	.io_delay = native_io_delay,
 +
-+static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-+{
-+	twofish_dec_blk(tfm, dst, src);
-+}
++	.lazy_mode = {
++		.enter = paravirt_nop,
++		.leave = paravirt_nop,
++	},
++};
 +
-+static struct crypto_alg alg = {
-+	.cra_name		=	"twofish",
-+	.cra_driver_name	=	"twofish-asm",
-+	.cra_priority		=	200,
-+	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
-+	.cra_blocksize		=	TF_BLOCK_SIZE,
-+	.cra_ctxsize		=	sizeof(struct twofish_ctx),
-+	.cra_alignmask		=	3,
-+	.cra_module		=	THIS_MODULE,
-+	.cra_list		=	LIST_HEAD_INIT(alg.cra_list),
-+	.cra_u			=	{
-+		.cipher = {
-+			.cia_min_keysize	=	TF_MIN_KEY_SIZE,
-+			.cia_max_keysize	=	TF_MAX_KEY_SIZE,
-+			.cia_setkey		=	twofish_setkey,
-+			.cia_encrypt		=	twofish_encrypt,
-+			.cia_decrypt		=	twofish_decrypt
-+		}
-+	}
++struct pv_apic_ops pv_apic_ops = {
++#ifdef CONFIG_X86_LOCAL_APIC
++	.apic_write = native_apic_write,
++	.apic_write_atomic = native_apic_write_atomic,
++	.apic_read = native_apic_read,
++	.setup_boot_clock = setup_boot_APIC_clock,
++	.setup_secondary_clock = setup_secondary_APIC_clock,
++	.startup_ipi_hook = paravirt_nop,
++#endif
 +};
 +
-+static int __init init(void)
-+{
-+	return crypto_register_alg(&alg);
-+}
++struct pv_mmu_ops pv_mmu_ops = {
++#ifndef CONFIG_X86_64
++	.pagetable_setup_start = native_pagetable_setup_start,
++	.pagetable_setup_done = native_pagetable_setup_done,
++#endif
 +
-+static void __exit fini(void)
-+{
-+	crypto_unregister_alg(&alg);
-+}
++	.read_cr2 = native_read_cr2,
++	.write_cr2 = native_write_cr2,
++	.read_cr3 = native_read_cr3,
++	.write_cr3 = native_write_cr3,
 +
-+module_init(init);
-+module_exit(fini);
++	.flush_tlb_user = native_flush_tlb,
++	.flush_tlb_kernel = native_flush_tlb_global,
++	.flush_tlb_single = native_flush_tlb_single,
++	.flush_tlb_others = native_flush_tlb_others,
 +
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
-+MODULE_ALIAS("twofish");
-+MODULE_ALIAS("twofish-asm");
-diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
-index e2edda2..52d0ccf 100644
---- a/arch/x86/ia32/Makefile
-+++ b/arch/x86/ia32/Makefile
-@@ -2,9 +2,7 @@
- # Makefile for the ia32 kernel emulation subsystem.
- #
- 
--obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \
--	ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o \
--	mmap32.o
-+obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o
- 
- sysv-$(CONFIG_SYSVIPC) := ipc32.o
- obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
-@@ -13,40 +11,3 @@ obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
- 
- audit-class-$(CONFIG_AUDIT) := audit.o
- obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
--
--$(obj)/syscall32_syscall.o: \
--	$(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
++	.alloc_pt = paravirt_nop,
++	.alloc_pd = paravirt_nop,
++	.alloc_pd_clone = paravirt_nop,
++	.release_pt = paravirt_nop,
++	.release_pd = paravirt_nop,
++
++	.set_pte = native_set_pte,
++	.set_pte_at = native_set_pte_at,
++	.set_pmd = native_set_pmd,
++	.pte_update = paravirt_nop,
++	.pte_update_defer = paravirt_nop,
++
++#ifdef CONFIG_HIGHPTE
++	.kmap_atomic_pte = kmap_atomic,
++#endif
++
++#if PAGETABLE_LEVELS >= 3
++#ifdef CONFIG_X86_PAE
++	.set_pte_atomic = native_set_pte_atomic,
++	.set_pte_present = native_set_pte_present,
++	.pte_clear = native_pte_clear,
++	.pmd_clear = native_pmd_clear,
++#endif
++	.set_pud = native_set_pud,
++	.pmd_val = native_pmd_val,
++	.make_pmd = native_make_pmd,
++
++#if PAGETABLE_LEVELS == 4
++	.pud_val = native_pud_val,
++	.make_pud = native_make_pud,
++	.set_pgd = native_set_pgd,
++#endif
++#endif /* PAGETABLE_LEVELS >= 3 */
++
++	.pte_val = native_pte_val,
++	.pgd_val = native_pgd_val,
++
++	.make_pte = native_make_pte,
++	.make_pgd = native_make_pgd,
++
++	.dup_mmap = paravirt_nop,
++	.exit_mmap = paravirt_nop,
++	.activate_mm = paravirt_nop,
++
++	.lazy_mode = {
++		.enter = paravirt_nop,
++		.leave = paravirt_nop,
++	},
++};
++
++EXPORT_SYMBOL_GPL(pv_time_ops);
++EXPORT_SYMBOL    (pv_cpu_ops);
++EXPORT_SYMBOL    (pv_mmu_ops);
++EXPORT_SYMBOL_GPL(pv_apic_ops);
++EXPORT_SYMBOL_GPL(pv_info);
++EXPORT_SYMBOL    (pv_irq_ops);
+diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c
+deleted file mode 100644
+index f500079..0000000
+--- a/arch/x86/kernel/paravirt_32.c
++++ /dev/null
+@@ -1,472 +0,0 @@
+-/*  Paravirtualization interfaces
+-    Copyright (C) 2006 Rusty Russell IBM Corporation
 -
--# Teach kbuild about targets
--targets := $(foreach F,$(addprefix vsyscall-,sysenter syscall),\
--		     $F.o $F.so $F.so.dbg)
+-    This program is free software; you can redistribute it and/or modify
+-    it under the terms of the GNU General Public License as published by
+-    the Free Software Foundation; either version 2 of the License, or
+-    (at your option) any later version.
 -
--# The DSO images are built using a special linker script
--quiet_cmd_syscall = SYSCALL $@
--      cmd_syscall = $(CC) -m32 -nostdlib -shared \
--			  $(call ld-option, -Wl$(comma)--hash-style=sysv) \
--			   -Wl,-soname=linux-gate.so.1 -o $@ \
--			   -Wl,-T,$(filter-out FORCE,$^)
+-    This program is distributed in the hope that it will be useful,
+-    but WITHOUT ANY WARRANTY; without even the implied warranty of
+-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-    GNU General Public License for more details.
 -
--$(obj)/%.so: OBJCOPYFLAGS := -S
--$(obj)/%.so: $(obj)/%.so.dbg FORCE
--	$(call if_changed,objcopy)
+-    You should have received a copy of the GNU General Public License
+-    along with this program; if not, write to the Free Software
+-    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+-*/
+-#include <linux/errno.h>
+-#include <linux/module.h>
+-#include <linux/efi.h>
+-#include <linux/bcd.h>
+-#include <linux/highmem.h>
 -
--$(obj)/vsyscall-sysenter.so.dbg $(obj)/vsyscall-syscall.so.dbg: \
--$(obj)/vsyscall-%.so.dbg: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
--	$(call if_changed,syscall)
+-#include <asm/bug.h>
+-#include <asm/paravirt.h>
+-#include <asm/desc.h>
+-#include <asm/setup.h>
+-#include <asm/arch_hooks.h>
+-#include <asm/time.h>
+-#include <asm/irq.h>
+-#include <asm/delay.h>
+-#include <asm/fixmap.h>
+-#include <asm/apic.h>
+-#include <asm/tlbflush.h>
+-#include <asm/timer.h>
 -
--AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
--AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
+-/* nop stub */
+-void _paravirt_nop(void)
+-{
+-}
 -
--vdsos := vdso32-sysenter.so vdso32-syscall.so
+-static void __init default_banner(void)
+-{
+-	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+-	       pv_info.name);
+-}
 -
--quiet_cmd_vdso_install = INSTALL $@
--      cmd_vdso_install = cp $(@:vdso32-%.so=$(obj)/vsyscall-%.so.dbg) \
--			    $(MODLIB)/vdso/$@
+-char *memory_setup(void)
+-{
+-	return pv_init_ops.memory_setup();
+-}
 -
--$(vdsos):
--	@mkdir -p $(MODLIB)/vdso
--	$(call cmd,vdso_install)
+-/* Simple instruction patching code. */
+-#define DEF_NATIVE(ops, name, code)					\
+-	extern const char start_##ops##_##name[], end_##ops##_##name[];	\
+-	asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
 -
--vdso_install: $(vdsos)
-diff --git a/arch/x86/ia32/audit.c b/arch/x86/ia32/audit.c
-index 91b7b59..5d7b381 100644
---- a/arch/x86/ia32/audit.c
-+++ b/arch/x86/ia32/audit.c
-@@ -27,7 +27,7 @@ unsigned ia32_signal_class[] = {
- 
- int ia32_classify_syscall(unsigned syscall)
- {
--	switch(syscall) {
-+	switch (syscall) {
- 	case __NR_open:
- 		return 2;
- 	case __NR_openat:
-diff --git a/arch/x86/ia32/fpu32.c b/arch/x86/ia32/fpu32.c
-deleted file mode 100644
-index 2c8209a..0000000
---- a/arch/x86/ia32/fpu32.c
-+++ /dev/null
-@@ -1,183 +0,0 @@
--/* 
-- * Copyright 2002 Andi Kleen, SuSE Labs.
-- * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes.
-- * This is used for ptrace, signals and coredumps in 32bit emulation.
-- */ 
+-DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
+-DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
+-DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
+-DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
+-DEF_NATIVE(pv_cpu_ops, iret, "iret");
+-DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
+-DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
+-DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
+-DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
+-DEF_NATIVE(pv_cpu_ops, clts, "clts");
+-DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
 -
--#include <linux/sched.h>
--#include <asm/sigcontext32.h>
--#include <asm/processor.h>
--#include <asm/uaccess.h>
--#include <asm/i387.h>
+-/* Undefined instruction for dealing with missing ops pointers. */
+-static const unsigned char ud2a[] = { 0x0f, 0x0b };
 -
--static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
+-static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+-			     unsigned long addr, unsigned len)
 -{
--	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
-- 
--	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
--        tmp = ~twd;
--        tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
--        /* and move the valid bits to the lower byte. */
--        tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
--        tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
--        tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
--        return tmp;
--}
+-	const unsigned char *start, *end;
+-	unsigned ret;
 -
--static inline unsigned long twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
--{
--	struct _fpxreg *st = NULL;
--	unsigned long tos = (fxsave->swd >> 11) & 7;
--	unsigned long twd = (unsigned long) fxsave->twd;
--	unsigned long tag;
--	unsigned long ret = 0xffff0000;
--	int i;
+-	switch(type) {
+-#define SITE(ops, x)						\
+-	case PARAVIRT_PATCH(ops.x):				\
+-		start = start_##ops##_##x;			\
+-		end = end_##ops##_##x;				\
+-		goto patch_site
 -
--#define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16);
+-	SITE(pv_irq_ops, irq_disable);
+-	SITE(pv_irq_ops, irq_enable);
+-	SITE(pv_irq_ops, restore_fl);
+-	SITE(pv_irq_ops, save_fl);
+-	SITE(pv_cpu_ops, iret);
+-	SITE(pv_cpu_ops, irq_enable_sysexit);
+-	SITE(pv_mmu_ops, read_cr2);
+-	SITE(pv_mmu_ops, read_cr3);
+-	SITE(pv_mmu_ops, write_cr3);
+-	SITE(pv_cpu_ops, clts);
+-	SITE(pv_cpu_ops, read_tsc);
+-#undef SITE
 -
--	for (i = 0 ; i < 8 ; i++) {
--		if (twd & 0x1) {
--			st = FPREG_ADDR( fxsave, (i - tos) & 7 );
+-	patch_site:
+-		ret = paravirt_patch_insns(ibuf, len, start, end);
+-		break;
 -
--			switch (st->exponent & 0x7fff) {
--			case 0x7fff:
--				tag = 2;		/* Special */
--				break;
--			case 0x0000:
--				if ( !st->significand[0] &&
--				     !st->significand[1] &&
--				     !st->significand[2] &&
--				     !st->significand[3] ) {
--					tag = 1;	/* Zero */
--				} else {
--					tag = 2;	/* Special */
--				}
--				break;
--			default:
--				if (st->significand[3] & 0x8000) {
--					tag = 0;	/* Valid */
--				} else {
--					tag = 2;	/* Special */
--				}
--				break;
--			}
--		} else {
--			tag = 3;			/* Empty */
--		}
--		ret |= (tag << (2 * i));
--		twd = twd >> 1;
+-	default:
+-		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
+-		break;
 -	}
+-
 -	return ret;
 -}
 -
--
--static inline int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave,
--					 struct _fpstate_ia32 __user *buf)
+-unsigned paravirt_patch_nop(void)
 -{
--	struct _fpxreg *to;
--	struct _fpreg __user *from;
--	int i;
--	u32 v;
--	int err = 0;
--
--#define G(num,val) err |= __get_user(val, num + (u32 __user *)buf)
--	G(0, fxsave->cwd);
--	G(1, fxsave->swd);
--	G(2, fxsave->twd);
--	fxsave->twd = twd_i387_to_fxsr(fxsave->twd);
--	G(3, fxsave->rip);
--	G(4, v);
--	fxsave->fop = v>>16;	/* cs ignored */
--	G(5, fxsave->rdp);
--	/* 6: ds ignored */
--#undef G
--	if (err) 
--		return -1; 
--
--	to = (struct _fpxreg *)&fxsave->st_space[0];
--	from = &buf->_st[0];
--	for (i = 0 ; i < 8 ; i++, to++, from++) {
--		if (__copy_from_user(to, from, sizeof(*from)))
--			return -1;
--	}
 -	return 0;
 -}
 -
--
--static inline int convert_fxsr_to_user(struct _fpstate_ia32 __user *buf,
--				       struct i387_fxsave_struct *fxsave,
--				       struct pt_regs *regs,
--				       struct task_struct *tsk)
+-unsigned paravirt_patch_ignore(unsigned len)
 -{
--	struct _fpreg __user *to;
--	struct _fpxreg *from;
--	int i;
--	u16 cs,ds; 
--	int err = 0; 
+-	return len;
+-}
 -
--	if (tsk == current) {
--		/* should be actually ds/cs at fpu exception time,
--		   but that information is not available in 64bit mode. */
--		asm("movw %%ds,%0 " : "=r" (ds)); 
--		asm("movw %%cs,%0 " : "=r" (cs)); 		
--	} else { /* ptrace. task has stopped. */
--		ds = tsk->thread.ds;
--		cs = regs->cs;
--	} 
+-struct branch {
+-	unsigned char opcode;
+-	u32 delta;
+-} __attribute__((packed));
 -
--#define P(num,val) err |= __put_user(val, num + (u32 __user *)buf)
--	P(0, (u32)fxsave->cwd | 0xffff0000);
--	P(1, (u32)fxsave->swd | 0xffff0000);
--	P(2, twd_fxsr_to_i387(fxsave));
--	P(3, (u32)fxsave->rip);
--	P(4,  cs | ((u32)fxsave->fop) << 16); 
--	P(5, fxsave->rdp);
--	P(6, 0xffff0000 | ds);
--#undef P
+-unsigned paravirt_patch_call(void *insnbuf,
+-			     const void *target, u16 tgt_clobbers,
+-			     unsigned long addr, u16 site_clobbers,
+-			     unsigned len)
+-{
+-	struct branch *b = insnbuf;
+-	unsigned long delta = (unsigned long)target - (addr+5);
 -
--	if (err) 
--		return -1; 
+-	if (tgt_clobbers & ~site_clobbers)
+-		return len;	/* target would clobber too much for this site */
+-	if (len < 5)
+-		return len;	/* call too long for patch site */
 -
--	to = &buf->_st[0];
--	from = (struct _fpxreg *) &fxsave->st_space[0];
--	for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
--		if (__copy_to_user(to, from, sizeof(*to)))
--			return -1;
--	}
--	return 0;
--}
+-	b->opcode = 0xe8; /* call */
+-	b->delta = delta;
+-	BUILD_BUG_ON(sizeof(*b) != 5);
 -
--int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave) 
--{ 
--	clear_fpu(tsk);
--	if (!fsave) { 
--		if (__copy_from_user(&tsk->thread.i387.fxsave, 
--				     &buf->_fxsr_env[0],
--				     sizeof(struct i387_fxsave_struct)))
--			return -1;
--		tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
--		set_stopped_child_used_math(tsk);
--	} 
--	return convert_fxsr_from_user(&tsk->thread.i387.fxsave, buf);
--}  
+-	return 5;
+-}
 -
--int save_i387_ia32(struct task_struct *tsk, 
--		   struct _fpstate_ia32 __user *buf, 
--		   struct pt_regs *regs,
--		   int fsave)
+-unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
+-			    unsigned long addr, unsigned len)
 -{
--	int err = 0;
+-	struct branch *b = insnbuf;
+-	unsigned long delta = (unsigned long)target - (addr+5);
 -
--	init_fpu(tsk);
--	if (convert_fxsr_to_user(buf, &tsk->thread.i387.fxsave, regs, tsk))
--		return -1;
--	if (fsave)
--		return 0;
--	err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
--	if (fsave) 
--		return err ? -1 : 1; 	
--	err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
--	err |= __copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
--			      sizeof(struct i387_fxsave_struct));
--	return err ? -1 : 1;
+-	if (len < 5)
+-		return len;	/* call too long for patch site */
+-
+-	b->opcode = 0xe9;	/* jmp */
+-	b->delta = delta;
+-
+-	return 5;
 -}
-diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
-index f82e1a9..e4c1207 100644
---- a/arch/x86/ia32/ia32_aout.c
-+++ b/arch/x86/ia32/ia32_aout.c
-@@ -25,6 +25,7 @@
- #include <linux/binfmts.h>
- #include <linux/personality.h>
- #include <linux/init.h>
-+#include <linux/jiffies.h>
- 
- #include <asm/system.h>
- #include <asm/uaccess.h>
-@@ -36,61 +37,67 @@
- #undef WARN_OLD
- #undef CORE_DUMP /* probably broken */
- 
--static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
--static int load_aout_library(struct file*);
-+static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
-+static int load_aout_library(struct file *);
- 
- #ifdef CORE_DUMP
--static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
-+static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
-+			  unsigned long limit);
- 
- /*
-  * fill in the user structure for a core dump..
-  */
--static void dump_thread32(struct pt_regs * regs, struct user32 * dump)
-+static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
- {
--	u32 fs,gs;
-+	u32 fs, gs;
- 
- /* changed the size calculations - should hopefully work better. lbt */
- 	dump->magic = CMAGIC;
- 	dump->start_code = 0;
--	dump->start_stack = regs->rsp & ~(PAGE_SIZE - 1);
-+	dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
- 	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
--	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
-+	dump->u_dsize = ((unsigned long)
-+			 (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
- 	dump->u_dsize -= dump->u_tsize;
- 	dump->u_ssize = 0;
--	dump->u_debugreg[0] = current->thread.debugreg0;  
--	dump->u_debugreg[1] = current->thread.debugreg1;  
--	dump->u_debugreg[2] = current->thread.debugreg2;  
--	dump->u_debugreg[3] = current->thread.debugreg3;  
--	dump->u_debugreg[4] = 0;  
--	dump->u_debugreg[5] = 0;  
--	dump->u_debugreg[6] = current->thread.debugreg6;  
--	dump->u_debugreg[7] = current->thread.debugreg7;  
 -
--	if (dump->start_stack < 0xc0000000)
--		dump->u_ssize = ((unsigned long) (0xc0000000 - dump->start_stack)) >> PAGE_SHIFT;
+-/* Neat trick to map patch type back to the call within the
+- * corresponding structure. */
+-static void *get_call_destination(u8 type)
+-{
+-	struct paravirt_patch_template tmpl = {
+-		.pv_init_ops = pv_init_ops,
+-		.pv_time_ops = pv_time_ops,
+-		.pv_cpu_ops = pv_cpu_ops,
+-		.pv_irq_ops = pv_irq_ops,
+-		.pv_apic_ops = pv_apic_ops,
+-		.pv_mmu_ops = pv_mmu_ops,
+-	};
+-	return *((void **)&tmpl + type);
+-}
 -
--	dump->regs.ebx = regs->rbx;
--	dump->regs.ecx = regs->rcx;
--	dump->regs.edx = regs->rdx;
--	dump->regs.esi = regs->rsi;
--	dump->regs.edi = regs->rdi;
--	dump->regs.ebp = regs->rbp;
--	dump->regs.eax = regs->rax;
-+	dump->u_debugreg[0] = current->thread.debugreg0;
-+	dump->u_debugreg[1] = current->thread.debugreg1;
-+	dump->u_debugreg[2] = current->thread.debugreg2;
-+	dump->u_debugreg[3] = current->thread.debugreg3;
-+	dump->u_debugreg[4] = 0;
-+	dump->u_debugreg[5] = 0;
-+	dump->u_debugreg[6] = current->thread.debugreg6;
-+	dump->u_debugreg[7] = current->thread.debugreg7;
-+
-+	if (dump->start_stack < 0xc0000000) {
-+		unsigned long tmp;
-+
-+		tmp = (unsigned long) (0xc0000000 - dump->start_stack);
-+		dump->u_ssize = tmp >> PAGE_SHIFT;
-+	}
-+
-+	dump->regs.bx = regs->bx;
-+	dump->regs.cx = regs->cx;
-+	dump->regs.dx = regs->dx;
-+	dump->regs.si = regs->si;
-+	dump->regs.di = regs->di;
-+	dump->regs.bp = regs->bp;
-+	dump->regs.ax = regs->ax;
- 	dump->regs.ds = current->thread.ds;
- 	dump->regs.es = current->thread.es;
- 	asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs;
--	asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs; 
--	dump->regs.orig_eax = regs->orig_rax;
--	dump->regs.eip = regs->rip;
-+	asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs;
-+	dump->regs.orig_ax = regs->orig_ax;
-+	dump->regs.ip = regs->ip;
- 	dump->regs.cs = regs->cs;
--	dump->regs.eflags = regs->eflags;
--	dump->regs.esp = regs->rsp;
-+	dump->regs.flags = regs->flags;
-+	dump->regs.sp = regs->sp;
- 	dump->regs.ss = regs->ss;
- 
- #if 1 /* FIXME */
- 	dump->u_fpvalid = 0;
- #else
--	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-+	dump->u_fpvalid = dump_fpu(regs, &dump->i387);
- #endif
- }
- 
-@@ -128,15 +135,19 @@ static int dump_write(struct file *file, const void *addr, int nr)
- 	return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
- }
- 
--#define DUMP_WRITE(addr, nr)	\
-+#define DUMP_WRITE(addr, nr)			     \
- 	if (!dump_write(file, (void *)(addr), (nr))) \
- 		goto end_coredump;
- 
--#define DUMP_SEEK(offset) \
--if (file->f_op->llseek) { \
--	if (file->f_op->llseek(file,(offset),0) != (offset)) \
-- 		goto end_coredump; \
--} else file->f_pos = (offset)
-+#define DUMP_SEEK(offset)						\
-+	if (file->f_op->llseek) {					\
-+		if (file->f_op->llseek(file, (offset), 0) != (offset))	\
-+			goto end_coredump;				\
-+	} else								\
-+		file->f_pos = (offset)
-+
-+#define START_DATA()	(u.u_tsize << PAGE_SHIFT)
-+#define START_STACK(u)	(u.start_stack)
- 
- /*
-  * Routine writes a core dump image in the current directory.
-@@ -148,62 +159,70 @@ if (file->f_op->llseek) { \
-  * dumping of the process results in another error..
-  */
- 
--static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
-+static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
-+			  unsigned long limit)
- {
- 	mm_segment_t fs;
- 	int has_dumped = 0;
- 	unsigned long dump_start, dump_size;
- 	struct user32 dump;
--#       define START_DATA(u)	(u.u_tsize << PAGE_SHIFT)
--#       define START_STACK(u)   (u.start_stack)
- 
- 	fs = get_fs();
- 	set_fs(KERNEL_DS);
- 	has_dumped = 1;
- 	current->flags |= PF_DUMPCORE;
--       	strncpy(dump.u_comm, current->comm, sizeof(current->comm));
--	dump.u_ar0 = (u32)(((unsigned long)(&dump.regs)) - ((unsigned long)(&dump)));
-+	strncpy(dump.u_comm, current->comm, sizeof(current->comm));
-+	dump.u_ar0 = (u32)(((unsigned long)(&dump.regs)) -
-+			   ((unsigned long)(&dump)));
- 	dump.signal = signr;
- 	dump_thread32(regs, &dump);
- 
--/* If the size of the dump file exceeds the rlimit, then see what would happen
--   if we wrote the stack, but not the data area.  */
-+	/*
-+	 * If the size of the dump file exceeds the rlimit, then see
-+	 * what would happen if we wrote the stack, but not the data
-+	 * area.
-+	 */
- 	if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > limit)
- 		dump.u_dsize = 0;
- 
--/* Make sure we have enough room to write the stack and data areas. */
-+	/* Make sure we have enough room to write the stack and data areas. */
- 	if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
- 		dump.u_ssize = 0;
- 
--/* make sure we actually have a data and stack area to dump */
-+	/* make sure we actually have a data and stack area to dump */
- 	set_fs(USER_DS);
--	if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump), dump.u_dsize << PAGE_SHIFT))
-+	if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump),
-+		       dump.u_dsize << PAGE_SHIFT))
- 		dump.u_dsize = 0;
--	if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump), dump.u_ssize << PAGE_SHIFT))
-+	if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump),
-+		       dump.u_ssize << PAGE_SHIFT))
- 		dump.u_ssize = 0;
- 
- 	set_fs(KERNEL_DS);
--/* struct user */
--	DUMP_WRITE(&dump,sizeof(dump));
--/* Now dump all of the user data.  Include malloced stuff as well */
-+	/* struct user */
-+	DUMP_WRITE(&dump, sizeof(dump));
-+	/* Now dump all of the user data.  Include malloced stuff as well */
- 	DUMP_SEEK(PAGE_SIZE);
--/* now we start writing out the user space info */
-+	/* now we start writing out the user space info */
- 	set_fs(USER_DS);
--/* Dump the data area */
-+	/* Dump the data area */
- 	if (dump.u_dsize != 0) {
- 		dump_start = START_DATA(dump);
- 		dump_size = dump.u_dsize << PAGE_SHIFT;
--		DUMP_WRITE(dump_start,dump_size);
-+		DUMP_WRITE(dump_start, dump_size);
- 	}
--/* Now prepare to dump the stack area */
-+	/* Now prepare to dump the stack area */
- 	if (dump.u_ssize != 0) {
- 		dump_start = START_STACK(dump);
- 		dump_size = dump.u_ssize << PAGE_SHIFT;
--		DUMP_WRITE(dump_start,dump_size);
-+		DUMP_WRITE(dump_start, dump_size);
- 	}
--/* Finally dump the task struct.  Not be used by gdb, but could be useful */
-+	/*
-+	 * Finally dump the task struct.  Not be used by gdb, but
-+	 * could be useful
-+	 */
- 	set_fs(KERNEL_DS);
--	DUMP_WRITE(current,sizeof(*current));
-+	DUMP_WRITE(current, sizeof(*current));
- end_coredump:
- 	set_fs(fs);
- 	return has_dumped;
-@@ -217,35 +236,34 @@ end_coredump:
-  */
- static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm)
- {
--	u32 __user *argv;
--	u32 __user *envp;
--	u32 __user *sp;
--	int argc = bprm->argc;
--	int envc = bprm->envc;
-+	u32 __user *argv, *envp, *sp;
-+	int argc = bprm->argc, envc = bprm->envc;
- 
- 	sp = (u32 __user *) ((-(unsigned long)sizeof(u32)) & (unsigned long) p);
- 	sp -= envc+1;
- 	envp = sp;
- 	sp -= argc+1;
- 	argv = sp;
--	put_user((unsigned long) envp,--sp);
--	put_user((unsigned long) argv,--sp);
--	put_user(argc,--sp);
-+	put_user((unsigned long) envp, --sp);
-+	put_user((unsigned long) argv, --sp);
-+	put_user(argc, --sp);
- 	current->mm->arg_start = (unsigned long) p;
--	while (argc-->0) {
-+	while (argc-- > 0) {
- 		char c;
--		put_user((u32)(unsigned long)p,argv++);
-+
-+		put_user((u32)(unsigned long)p, argv++);
- 		do {
--			get_user(c,p++);
-+			get_user(c, p++);
- 		} while (c);
- 	}
- 	put_user(0, argv);
- 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
--	while (envc-->0) {
-+	while (envc-- > 0) {
- 		char c;
--		put_user((u32)(unsigned long)p,envp++);
-+
-+		put_user((u32)(unsigned long)p, envp++);
- 		do {
--			get_user(c,p++);
-+			get_user(c, p++);
- 		} while (c);
- 	}
- 	put_user(0, envp);
-@@ -257,20 +275,18 @@ static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm)
-  * These are the functions used to load a.out style executables and shared
-  * libraries.  There is no binary dependent code anywhere else.
-  */
+-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+-				unsigned long addr, unsigned len)
+-{
+-	void *opfunc = get_call_destination(type);
+-	unsigned ret;
 -
--static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
-+static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
- {
-+	unsigned long error, fd_offset, rlim;
- 	struct exec ex;
--	unsigned long error;
--	unsigned long fd_offset;
--	unsigned long rlim;
- 	int retval;
- 
- 	ex = *((struct exec *) bprm->buf);		/* exec-header */
- 	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
- 	     N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
- 	    N_TRSIZE(ex) || N_DRSIZE(ex) ||
--	    i_size_read(bprm->file->f_path.dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
-+	    i_size_read(bprm->file->f_path.dentry->d_inode) <
-+	    ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
- 		return -ENOEXEC;
- 	}
- 
-@@ -291,13 +307,13 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
- 	if (retval)
- 		return retval;
- 
--	regs->cs = __USER32_CS; 
-+	regs->cs = __USER32_CS;
- 	regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
- 		regs->r13 = regs->r14 = regs->r15 = 0;
- 
- 	/* OK, This is the point of no return */
- 	set_personality(PER_LINUX);
--	set_thread_flag(TIF_IA32); 
-+	set_thread_flag(TIF_IA32);
- 	clear_thread_flag(TIF_ABI_PENDING);
- 
- 	current->mm->end_code = ex.a_text +
-@@ -311,7 +327,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
- 
- 	current->mm->mmap = NULL;
- 	compute_creds(bprm);
-- 	current->flags &= ~PF_FORKNOEXEC;
-+	current->flags &= ~PF_FORKNOEXEC;
- 
- 	if (N_MAGIC(ex) == OMAGIC) {
- 		unsigned long text_addr, map_size;
-@@ -338,30 +354,31 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
- 			send_sig(SIGKILL, current, 0);
- 			return error;
- 		}
--			 
-+
- 		flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data);
- 	} else {
- #ifdef WARN_OLD
- 		static unsigned long error_time, error_time2;
- 		if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
--		    (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ)
--		{
-+		    (N_MAGIC(ex) != NMAGIC) &&
-+				time_after(jiffies, error_time2 + 5*HZ)) {
- 			printk(KERN_NOTICE "executable not page aligned\n");
- 			error_time2 = jiffies;
- 		}
- 
- 		if ((fd_offset & ~PAGE_MASK) != 0 &&
--		    (jiffies-error_time) > 5*HZ)
--		{
--			printk(KERN_WARNING 
--			       "fd_offset is not page aligned. Please convert program: %s\n",
-+			    time_after(jiffies, error_time + 5*HZ)) {
-+			printk(KERN_WARNING
-+			       "fd_offset is not page aligned. Please convert "
-+			       "program: %s\n",
- 			       bprm->file->f_path.dentry->d_name.name);
- 			error_time = jiffies;
- 		}
- #endif
- 
--		if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
-+		if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
- 			loff_t pos = fd_offset;
-+
- 			down_write(&current->mm->mmap_sem);
- 			do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
- 			up_write(&current->mm->mmap_sem);
-@@ -376,9 +393,10 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
- 
- 		down_write(&current->mm->mmap_sem);
- 		error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
--			PROT_READ | PROT_EXEC,
--			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT,
--			fd_offset);
-+				PROT_READ | PROT_EXEC,
-+				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
-+				MAP_EXECUTABLE | MAP_32BIT,
-+				fd_offset);
- 		up_write(&current->mm->mmap_sem);
- 
- 		if (error != N_TXTADDR(ex)) {
-@@ -387,9 +405,10 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
- 		}
- 
- 		down_write(&current->mm->mmap_sem);
-- 		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
-+		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
- 				PROT_READ | PROT_WRITE | PROT_EXEC,
--				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT,
-+				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
-+				MAP_EXECUTABLE | MAP_32BIT,
- 				fd_offset + ex.a_text);
- 		up_write(&current->mm->mmap_sem);
- 		if (error != N_DATADDR(ex)) {
-@@ -403,9 +422,9 @@ beyond_if:
- 	set_brk(current->mm->start_brk, current->mm->brk);
- 
- 	retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
--	if (retval < 0) { 
--		/* Someone check-me: is this error path enough? */ 
--		send_sig(SIGKILL, current, 0); 
-+	if (retval < 0) {
-+		/* Someone check-me: is this error path enough? */
-+		send_sig(SIGKILL, current, 0);
- 		return retval;
- 	}
- 
-@@ -414,10 +433,10 @@ beyond_if:
- 	/* start thread */
- 	asm volatile("movl %0,%%fs" :: "r" (0)); \
- 	asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS));
--	load_gs_index(0); 
--	(regs)->rip = ex.a_entry;
--	(regs)->rsp = current->mm->start_stack;
--	(regs)->eflags = 0x200;
-+	load_gs_index(0);
-+	(regs)->ip = ex.a_entry;
-+	(regs)->sp = current->mm->start_stack;
-+	(regs)->flags = 0x200;
- 	(regs)->cs = __USER32_CS;
- 	(regs)->ss = __USER32_DS;
- 	regs->r8 = regs->r9 = regs->r10 = regs->r11 =
-@@ -425,7 +444,7 @@ beyond_if:
- 	set_fs(USER_DS);
- 	if (unlikely(current->ptrace & PT_PTRACED)) {
- 		if (current->ptrace & PT_TRACE_EXEC)
--			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
-+			ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
- 		else
- 			send_sig(SIGTRAP, current, 0);
- 	}
-@@ -434,9 +453,8 @@ beyond_if:
- 
- static int load_aout_library(struct file *file)
- {
--	struct inode * inode;
--	unsigned long bss, start_addr, len;
--	unsigned long error;
-+	struct inode *inode;
-+	unsigned long bss, start_addr, len, error;
- 	int retval;
- 	struct exec ex;
- 
-@@ -450,7 +468,8 @@ static int load_aout_library(struct file *file)
- 	/* We come in here for the regular a.out style of shared libraries */
- 	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
- 	    N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
--	    i_size_read(inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
-+	    i_size_read(inode) <
-+	    ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
- 		goto out;
- 	}
- 
-@@ -467,10 +486,10 @@ static int load_aout_library(struct file *file)
- 
- #ifdef WARN_OLD
- 		static unsigned long error_time;
--		if ((jiffies-error_time) > 5*HZ)
--		{
--			printk(KERN_WARNING 
--			       "N_TXTOFF is not page aligned. Please convert library: %s\n",
-+		if (time_after(jiffies, error_time + 5*HZ)) {
-+			printk(KERN_WARNING
-+			       "N_TXTOFF is not page aligned. Please convert "
-+			       "library: %s\n",
- 			       file->f_path.dentry->d_name.name);
- 			error_time = jiffies;
- 		}
-@@ -478,11 +497,12 @@ static int load_aout_library(struct file *file)
- 		down_write(&current->mm->mmap_sem);
- 		do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
- 		up_write(&current->mm->mmap_sem);
--		
-+
- 		file->f_op->read(file, (char __user *)start_addr,
- 			ex.a_text + ex.a_data, &pos);
- 		flush_icache_range((unsigned long) start_addr,
--				   (unsigned long) start_addr + ex.a_text + ex.a_data);
-+				   (unsigned long) start_addr + ex.a_text +
-+				   ex.a_data);
- 
- 		retval = 0;
- 		goto out;
-diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
-deleted file mode 100644
-index 55822d2..0000000
---- a/arch/x86/ia32/ia32_binfmt.c
-+++ /dev/null
-@@ -1,285 +0,0 @@
--/* 
-- * Written 2000,2002 by Andi Kleen. 
-- * 
-- * Loosely based on the sparc64 and IA64 32bit emulation loaders.
-- * This tricks binfmt_elf.c into loading 32bit binaries using lots 
-- * of ugly preprocessor tricks. Talk about very very poor man's inheritance.
-- */ 
+-	if (opfunc == NULL)
+-		/* If there's no function, patch it with a ud2a (BUG) */
+-		ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+-	else if (opfunc == paravirt_nop)
+-		/* If the operation is a nop, then nop the callsite */
+-		ret = paravirt_patch_nop();
+-	else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+-		 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit))
+-		/* If operation requires a jmp, then jmp */
+-		ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
+-	else
+-		/* Otherwise call the function; assume target could
+-		   clobber any caller-save reg */
+-		ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
+-					  addr, clobbers, len);
 -
--#include <linux/types.h>
--#include <linux/stddef.h>
--#include <linux/rwsem.h>
--#include <linux/sched.h>
--#include <linux/compat.h>
--#include <linux/string.h>
--#include <linux/binfmts.h>
--#include <linux/mm.h>
--#include <linux/security.h>
--#include <linux/elfcore-compat.h>
+-	return ret;
+-}
 -
--#include <asm/segment.h> 
--#include <asm/ptrace.h>
--#include <asm/processor.h>
--#include <asm/user32.h>
--#include <asm/sigcontext32.h>
--#include <asm/fpu32.h>
--#include <asm/i387.h>
--#include <asm/uaccess.h>
--#include <asm/ia32.h>
--#include <asm/vsyscall32.h>
+-unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+-			      const char *start, const char *end)
+-{
+-	unsigned insn_len = end - start;
 -
--#undef	ELF_ARCH
--#undef	ELF_CLASS
--#define ELF_CLASS	ELFCLASS32
--#define ELF_ARCH	EM_386
+-	if (insn_len > len || start == NULL)
+-		insn_len = len;
+-	else
+-		memcpy(insnbuf, start, insn_len);
 -
--#undef	elfhdr
--#undef	elf_phdr
--#undef	elf_note
--#undef	elf_addr_t
--#define elfhdr		elf32_hdr
--#define elf_phdr	elf32_phdr
--#define elf_note	elf32_note
--#define elf_addr_t	Elf32_Off
+-	return insn_len;
+-}
 -
--#define ELF_NAME "elf/i386"
+-void init_IRQ(void)
+-{
+-	pv_irq_ops.init_IRQ();
+-}
 -
--#define AT_SYSINFO 32
--#define AT_SYSINFO_EHDR		33
+-static void native_flush_tlb(void)
+-{
+-	__native_flush_tlb();
+-}
 -
--int sysctl_vsyscall32 = 1;
+-/*
+- * Global pages have to be flushed a bit differently. Not a real
+- * performance problem because this does not happen often.
+- */
+-static void native_flush_tlb_global(void)
+-{
+-	__native_flush_tlb_global();
+-}
 -
--#undef ARCH_DLINFO
--#define ARCH_DLINFO do {  \
--	if (sysctl_vsyscall32) { \
--		current->mm->context.vdso = (void *)VSYSCALL32_BASE;	\
--		NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \
--		NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL32_BASE);    \
--	}	\
--} while(0)
+-static void native_flush_tlb_single(unsigned long addr)
+-{
+-	__native_flush_tlb_single(addr);
+-}
 -
--struct file;
+-/* These are in entry.S */
+-extern void native_iret(void);
+-extern void native_irq_enable_sysexit(void);
 -
--#define IA32_EMULATOR 1
+-static int __init print_banner(void)
+-{
+-	pv_init_ops.banner();
+-	return 0;
+-}
+-core_initcall(print_banner);
 -
--#undef ELF_ET_DYN_BASE
+-static struct resource reserve_ioports = {
+-	.start = 0,
+-	.end = IO_SPACE_LIMIT,
+-	.name = "paravirt-ioport",
+-	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
+-};
 -
--#define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x1000000)
+-static struct resource reserve_iomem = {
+-	.start = 0,
+-	.end = -1,
+-	.name = "paravirt-iomem",
+-	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+-};
 -
--#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
+-/*
+- * Reserve the whole legacy IO space to prevent any legacy drivers
+- * from wasting time probing for their hardware.  This is a fairly
+- * brute-force approach to disabling all non-virtual drivers.
+- *
+- * Note that this must be called very early to have any effect.
+- */
+-int paravirt_disable_iospace(void)
+-{
+-	int ret;
 -
--#define _GET_SEG(x) \
--	({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; })
+-	ret = request_resource(&ioport_resource, &reserve_ioports);
+-	if (ret == 0) {
+-		ret = request_resource(&iomem_resource, &reserve_iomem);
+-		if (ret)
+-			release_resource(&reserve_ioports);
+-	}
 -
--/* Assumes current==process to be dumped */
--#undef	ELF_CORE_COPY_REGS
--#define ELF_CORE_COPY_REGS(pr_reg, regs)       		\
--	pr_reg[0] = regs->rbx;				\
--	pr_reg[1] = regs->rcx;				\
--	pr_reg[2] = regs->rdx;				\
--	pr_reg[3] = regs->rsi;				\
--	pr_reg[4] = regs->rdi;				\
--	pr_reg[5] = regs->rbp;				\
--	pr_reg[6] = regs->rax;				\
--	pr_reg[7] = _GET_SEG(ds);   			\
--	pr_reg[8] = _GET_SEG(es);			\
--	pr_reg[9] = _GET_SEG(fs);			\
--	pr_reg[10] = _GET_SEG(gs);			\
--	pr_reg[11] = regs->orig_rax;			\
--	pr_reg[12] = regs->rip;				\
--	pr_reg[13] = regs->cs;				\
--	pr_reg[14] = regs->eflags;			\
--	pr_reg[15] = regs->rsp;				\
--	pr_reg[16] = regs->ss;
+-	return ret;
+-}
 -
+-static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
 -
--#define elf_prstatus	compat_elf_prstatus
--#define elf_prpsinfo	compat_elf_prpsinfo
--#define elf_fpregset_t	struct user_i387_ia32_struct
--#define	elf_fpxregset_t	struct user32_fxsr_struct
--#define user		user32
+-static inline void enter_lazy(enum paravirt_lazy_mode mode)
+-{
+-	BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
+-	BUG_ON(preemptible());
 -
--#undef elf_read_implies_exec
--#define elf_read_implies_exec(ex, executable_stack)     (executable_stack != EXSTACK_DISABLE_X)
+-	x86_write_percpu(paravirt_lazy_mode, mode);
+-}
 -
--#define elf_core_copy_regs		elf32_core_copy_regs
--static inline void elf32_core_copy_regs(compat_elf_gregset_t *elfregs,
--					struct pt_regs *regs)
+-void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
 -{
--	ELF_CORE_COPY_REGS((&elfregs->ebx), regs)
+-	BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
+-	BUG_ON(preemptible());
+-
+-	x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
 -}
 -
--#define elf_core_copy_task_regs		elf32_core_copy_task_regs
--static inline int elf32_core_copy_task_regs(struct task_struct *t,
--					    compat_elf_gregset_t* elfregs)
--{	
--	struct pt_regs *pp = task_pt_regs(t);
--	ELF_CORE_COPY_REGS((&elfregs->ebx), pp);
--	/* fix wrong segments */ 
--	elfregs->ds = t->thread.ds;
--	elfregs->fs = t->thread.fsindex;
--	elfregs->gs = t->thread.gsindex;
--	elfregs->es = t->thread.es;
--	return 1; 
+-void paravirt_enter_lazy_mmu(void)
+-{
+-	enter_lazy(PARAVIRT_LAZY_MMU);
 -}
 -
--#define elf_core_copy_task_fpregs	elf32_core_copy_task_fpregs
--static inline int 
--elf32_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs,
--			    elf_fpregset_t *fpu)
+-void paravirt_leave_lazy_mmu(void)
 -{
--	struct _fpstate_ia32 *fpstate = (void*)fpu; 
--	mm_segment_t oldfs = get_fs();
+-	paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
+-}
 -
--	if (!tsk_used_math(tsk))
--		return 0;
--	if (!regs)
--		regs = task_pt_regs(tsk);
--	if (tsk == current)
--		unlazy_fpu(tsk);
--	set_fs(KERNEL_DS); 
--	save_i387_ia32(tsk, fpstate, regs, 1);
--	/* Correct for i386 bug. It puts the fop into the upper 16bits of 
--	   the tag word (like FXSAVE), not into the fcs*/ 
--	fpstate->cssel |= fpstate->tag & 0xffff0000; 
--	set_fs(oldfs); 
--	return 1; 
+-void paravirt_enter_lazy_cpu(void)
+-{
+-	enter_lazy(PARAVIRT_LAZY_CPU);
 -}
 -
--#define ELF_CORE_COPY_XFPREGS 1
--#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
--#define elf_core_copy_task_xfpregs	elf32_core_copy_task_xfpregs
--static inline int 
--elf32_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
+-void paravirt_leave_lazy_cpu(void)
 -{
--	struct pt_regs *regs = task_pt_regs(t);
--	if (!tsk_used_math(t))
--		return 0;
--	if (t == current)
--		unlazy_fpu(t); 
--	memcpy(xfpu, &t->thread.i387.fxsave, sizeof(elf_fpxregset_t));
--	xfpu->fcs = regs->cs; 
--	xfpu->fos = t->thread.ds; /* right? */ 
--	return 1;
+-	paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
 -}
 -
--#undef elf_check_arch
--#define elf_check_arch(x) \
--	((x)->e_machine == EM_386)
+-enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+-{
+-	return x86_read_percpu(paravirt_lazy_mode);
+-}
 -
--extern int force_personality32;
+-struct pv_info pv_info = {
+-	.name = "bare hardware",
+-	.paravirt_enabled = 0,
+-	.kernel_rpl = 0,
+-	.shared_kernel_pmd = 1,	/* Only used when CONFIG_X86_PAE is set */
+-};
 -
--#undef	ELF_EXEC_PAGESIZE
--#undef	ELF_HWCAP
--#undef	ELF_PLATFORM
--#undef	SET_PERSONALITY
--#define ELF_EXEC_PAGESIZE PAGE_SIZE
--#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
--#define ELF_PLATFORM  ("i686")
--#define SET_PERSONALITY(ex, ibcs2)			\
--do {							\
--	unsigned long new_flags = 0;				\
--	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)		\
--		new_flags = _TIF_IA32;				\
--	if ((current_thread_info()->flags & _TIF_IA32)		\
--	    != new_flags)					\
--		set_thread_flag(TIF_ABI_PENDING);		\
--	else							\
--		clear_thread_flag(TIF_ABI_PENDING);		\
--	/* XXX This overwrites the user set personality */	\
--	current->personality |= force_personality32;		\
--} while (0)
+-struct pv_init_ops pv_init_ops = {
+-	.patch = native_patch,
+-	.banner = default_banner,
+-	.arch_setup = paravirt_nop,
+-	.memory_setup = machine_specific_memory_setup,
+-};
 -
--/* Override some function names */
--#define elf_format			elf32_format
+-struct pv_time_ops pv_time_ops = {
+-	.time_init = hpet_time_init,
+-	.get_wallclock = native_get_wallclock,
+-	.set_wallclock = native_set_wallclock,
+-	.sched_clock = native_sched_clock,
+-	.get_cpu_khz = native_calculate_cpu_khz,
+-};
 -
--#define init_elf_binfmt			init_elf32_binfmt
--#define exit_elf_binfmt			exit_elf32_binfmt
+-struct pv_irq_ops pv_irq_ops = {
+-	.init_IRQ = native_init_IRQ,
+-	.save_fl = native_save_fl,
+-	.restore_fl = native_restore_fl,
+-	.irq_disable = native_irq_disable,
+-	.irq_enable = native_irq_enable,
+-	.safe_halt = native_safe_halt,
+-	.halt = native_halt,
+-};
 -
--#define load_elf_binary load_elf32_binary
+-struct pv_cpu_ops pv_cpu_ops = {
+-	.cpuid = native_cpuid,
+-	.get_debugreg = native_get_debugreg,
+-	.set_debugreg = native_set_debugreg,
+-	.clts = native_clts,
+-	.read_cr0 = native_read_cr0,
+-	.write_cr0 = native_write_cr0,
+-	.read_cr4 = native_read_cr4,
+-	.read_cr4_safe = native_read_cr4_safe,
+-	.write_cr4 = native_write_cr4,
+-	.wbinvd = native_wbinvd,
+-	.read_msr = native_read_msr_safe,
+-	.write_msr = native_write_msr_safe,
+-	.read_tsc = native_read_tsc,
+-	.read_pmc = native_read_pmc,
+-	.load_tr_desc = native_load_tr_desc,
+-	.set_ldt = native_set_ldt,
+-	.load_gdt = native_load_gdt,
+-	.load_idt = native_load_idt,
+-	.store_gdt = native_store_gdt,
+-	.store_idt = native_store_idt,
+-	.store_tr = native_store_tr,
+-	.load_tls = native_load_tls,
+-	.write_ldt_entry = write_dt_entry,
+-	.write_gdt_entry = write_dt_entry,
+-	.write_idt_entry = write_dt_entry,
+-	.load_esp0 = native_load_esp0,
 -
--#undef	ELF_PLAT_INIT
--#define ELF_PLAT_INIT(r, load_addr)	elf32_init(r)
+-	.irq_enable_sysexit = native_irq_enable_sysexit,
+-	.iret = native_iret,
 -
--#undef start_thread
--#define start_thread(regs,new_rip,new_rsp) do { \
--	asm volatile("movl %0,%%fs" :: "r" (0)); \
--	asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); \
--	load_gs_index(0); \
--	(regs)->rip = (new_rip); \
--	(regs)->rsp = (new_rsp); \
--	(regs)->eflags = 0x200; \
--	(regs)->cs = __USER32_CS; \
--	(regs)->ss = __USER32_DS; \
--	set_fs(USER_DS); \
--} while(0) 
+-	.set_iopl_mask = native_set_iopl_mask,
+-	.io_delay = native_io_delay,
 -
+-	.lazy_mode = {
+-		.enter = paravirt_nop,
+-		.leave = paravirt_nop,
+-	},
+-};
 -
--#include <linux/module.h>
+-struct pv_apic_ops pv_apic_ops = {
+-#ifdef CONFIG_X86_LOCAL_APIC
+-	.apic_write = native_apic_write,
+-	.apic_write_atomic = native_apic_write_atomic,
+-	.apic_read = native_apic_read,
+-	.setup_boot_clock = setup_boot_APIC_clock,
+-	.setup_secondary_clock = setup_secondary_APIC_clock,
+-	.startup_ipi_hook = paravirt_nop,
+-#endif
+-};
 -
--MODULE_DESCRIPTION("Binary format loader for compatibility with IA32 ELF binaries."); 
--MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
+-struct pv_mmu_ops pv_mmu_ops = {
+-	.pagetable_setup_start = native_pagetable_setup_start,
+-	.pagetable_setup_done = native_pagetable_setup_done,
 -
--#undef MODULE_DESCRIPTION
--#undef MODULE_AUTHOR
+-	.read_cr2 = native_read_cr2,
+-	.write_cr2 = native_write_cr2,
+-	.read_cr3 = native_read_cr3,
+-	.write_cr3 = native_write_cr3,
 -
--static void elf32_init(struct pt_regs *);
+-	.flush_tlb_user = native_flush_tlb,
+-	.flush_tlb_kernel = native_flush_tlb_global,
+-	.flush_tlb_single = native_flush_tlb_single,
+-	.flush_tlb_others = native_flush_tlb_others,
 -
--#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
--#define arch_setup_additional_pages syscall32_setup_pages
--extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+-	.alloc_pt = paravirt_nop,
+-	.alloc_pd = paravirt_nop,
+-	.alloc_pd_clone = paravirt_nop,
+-	.release_pt = paravirt_nop,
+-	.release_pd = paravirt_nop,
 -
--#include "../../../fs/binfmt_elf.c" 
+-	.set_pte = native_set_pte,
+-	.set_pte_at = native_set_pte_at,
+-	.set_pmd = native_set_pmd,
+-	.pte_update = paravirt_nop,
+-	.pte_update_defer = paravirt_nop,
 -
--static void elf32_init(struct pt_regs *regs)
--{
--	struct task_struct *me = current; 
--	regs->rdi = 0;
--	regs->rsi = 0;
--	regs->rdx = 0;
--	regs->rcx = 0;
--	regs->rax = 0;
--	regs->rbx = 0; 
--	regs->rbp = 0; 
--	regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
--		regs->r13 = regs->r14 = regs->r15 = 0; 
--    me->thread.fs = 0; 
--	me->thread.gs = 0;
--	me->thread.fsindex = 0; 
--	me->thread.gsindex = 0;
--    me->thread.ds = __USER_DS; 
--	me->thread.es = __USER_DS;
--}
+-#ifdef CONFIG_HIGHPTE
+-	.kmap_atomic_pte = kmap_atomic,
+-#endif
 -
--#ifdef CONFIG_SYSCTL
--/* Register vsyscall32 into the ABI table */
--#include <linux/sysctl.h>
+-#ifdef CONFIG_X86_PAE
+-	.set_pte_atomic = native_set_pte_atomic,
+-	.set_pte_present = native_set_pte_present,
+-	.set_pud = native_set_pud,
+-	.pte_clear = native_pte_clear,
+-	.pmd_clear = native_pmd_clear,
 -
--static ctl_table abi_table2[] = {
--	{
--		.procname	= "vsyscall32",
--		.data		= &sysctl_vsyscall32,
--		.maxlen		= sizeof(int),
--		.mode		= 0644,
--		.proc_handler	= proc_dointvec
--	},
--	{}
--};
+-	.pmd_val = native_pmd_val,
+-	.make_pmd = native_make_pmd,
+-#endif
 -
--static ctl_table abi_root_table2[] = {
--	{
--		.ctl_name = CTL_ABI,
--		.procname = "abi",
--		.mode = 0555,
--		.child = abi_table2
+-	.pte_val = native_pte_val,
+-	.pgd_val = native_pgd_val,
+-
+-	.make_pte = native_make_pte,
+-	.make_pgd = native_make_pgd,
+-
+-	.dup_mmap = paravirt_nop,
+-	.exit_mmap = paravirt_nop,
+-	.activate_mm = paravirt_nop,
+-
+-	.lazy_mode = {
+-		.enter = paravirt_nop,
+-		.leave = paravirt_nop,
 -	},
--	{}
 -};
 -
--static __init int ia32_binfmt_init(void)
--{ 
--	register_sysctl_table(abi_root_table2);
--	return 0;
--}
--__initcall(ia32_binfmt_init);
--#endif
-diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index 6ea19c2..1c0503b 100644
---- a/arch/x86/ia32/ia32_signal.c
-+++ b/arch/x86/ia32/ia32_signal.c
-@@ -29,9 +29,8 @@
- #include <asm/ia32_unistd.h>
- #include <asm/user32.h>
- #include <asm/sigcontext32.h>
--#include <asm/fpu32.h>
- #include <asm/proto.h>
--#include <asm/vsyscall32.h>
-+#include <asm/vdso.h>
+-EXPORT_SYMBOL_GPL(pv_time_ops);
+-EXPORT_SYMBOL    (pv_cpu_ops);
+-EXPORT_SYMBOL    (pv_mmu_ops);
+-EXPORT_SYMBOL_GPL(pv_apic_ops);
+-EXPORT_SYMBOL_GPL(pv_info);
+-EXPORT_SYMBOL    (pv_irq_ops);
+diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
+new file mode 100644
+index 0000000..82fc5fc
+--- /dev/null
++++ b/arch/x86/kernel/paravirt_patch_32.c
+@@ -0,0 +1,49 @@
++#include <asm/paravirt.h>
++
++DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
++DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
++DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
++DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
++DEF_NATIVE(pv_cpu_ops, iret, "iret");
++DEF_NATIVE(pv_cpu_ops, irq_enable_syscall_ret, "sti; sysexit");
++DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
++DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
++DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
++DEF_NATIVE(pv_cpu_ops, clts, "clts");
++DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
++
++unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
++		      unsigned long addr, unsigned len)
++{
++	const unsigned char *start, *end;
++	unsigned ret;
++
++#define PATCH_SITE(ops, x)					\
++		case PARAVIRT_PATCH(ops.x):			\
++			start = start_##ops##_##x;		\
++			end = end_##ops##_##x;			\
++			goto patch_site
++	switch(type) {
++		PATCH_SITE(pv_irq_ops, irq_disable);
++		PATCH_SITE(pv_irq_ops, irq_enable);
++		PATCH_SITE(pv_irq_ops, restore_fl);
++		PATCH_SITE(pv_irq_ops, save_fl);
++		PATCH_SITE(pv_cpu_ops, iret);
++		PATCH_SITE(pv_cpu_ops, irq_enable_syscall_ret);
++		PATCH_SITE(pv_mmu_ops, read_cr2);
++		PATCH_SITE(pv_mmu_ops, read_cr3);
++		PATCH_SITE(pv_mmu_ops, write_cr3);
++		PATCH_SITE(pv_cpu_ops, clts);
++		PATCH_SITE(pv_cpu_ops, read_tsc);
++
++	patch_site:
++		ret = paravirt_patch_insns(ibuf, len, start, end);
++		break;
++
++	default:
++		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
++		break;
++	}
++#undef PATCH_SITE
++	return ret;
++}
+diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
+new file mode 100644
+index 0000000..7d904e1
+--- /dev/null
++++ b/arch/x86/kernel/paravirt_patch_64.c
+@@ -0,0 +1,57 @@
++#include <asm/paravirt.h>
++#include <asm/asm-offsets.h>
++#include <linux/stringify.h>
++
++DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
++DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
++DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
++DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
++DEF_NATIVE(pv_cpu_ops, iret, "iretq");
++DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
++DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
++DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
++DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
++DEF_NATIVE(pv_cpu_ops, clts, "clts");
++DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
++
++/* the three commands give us more control to how to return from a syscall */
++DEF_NATIVE(pv_cpu_ops, irq_enable_syscall_ret, "movq %gs:" __stringify(pda_oldrsp) ", %rsp; swapgs; sysretq;");
++DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
++
++unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
++		      unsigned long addr, unsigned len)
++{
++	const unsigned char *start, *end;
++	unsigned ret;
++
++#define PATCH_SITE(ops, x)					\
++		case PARAVIRT_PATCH(ops.x):			\
++			start = start_##ops##_##x;		\
++			end = end_##ops##_##x;			\
++			goto patch_site
++	switch(type) {
++		PATCH_SITE(pv_irq_ops, restore_fl);
++		PATCH_SITE(pv_irq_ops, save_fl);
++		PATCH_SITE(pv_irq_ops, irq_enable);
++		PATCH_SITE(pv_irq_ops, irq_disable);
++		PATCH_SITE(pv_cpu_ops, iret);
++		PATCH_SITE(pv_cpu_ops, irq_enable_syscall_ret);
++		PATCH_SITE(pv_cpu_ops, swapgs);
++		PATCH_SITE(pv_mmu_ops, read_cr2);
++		PATCH_SITE(pv_mmu_ops, read_cr3);
++		PATCH_SITE(pv_mmu_ops, write_cr3);
++		PATCH_SITE(pv_cpu_ops, clts);
++		PATCH_SITE(pv_mmu_ops, flush_tlb_single);
++		PATCH_SITE(pv_cpu_ops, wbinvd);
++
++	patch_site:
++		ret = paravirt_patch_insns(ibuf, len, start, end);
++		break;
++
++	default:
++		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
++		break;
++	}
++#undef PATCH_SITE
++	return ret;
++}
+diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
+index 6bf1f71..21f34db 100644
+--- a/arch/x86/kernel/pci-calgary_64.c
++++ b/arch/x86/kernel/pci-calgary_64.c
+@@ -30,7 +30,6 @@
+ #include <linux/spinlock.h>
+ #include <linux/string.h>
+ #include <linux/dma-mapping.h>
+-#include <linux/init.h>
+ #include <linux/bitops.h>
+ #include <linux/pci_ids.h>
+ #include <linux/pci.h>
+@@ -183,7 +182,7 @@ static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
  
- #define DEBUG_SIG 0
+ /* enable this to stress test the chip's TCE cache */
+ #ifdef CONFIG_IOMMU_DEBUG
+-int debugging __read_mostly = 1;
++static int debugging = 1;
  
-@@ -43,7 +42,8 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
- int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ static inline unsigned long verify_bit_range(unsigned long* bitmap,
+ 	int expected, unsigned long start, unsigned long end)
+@@ -202,7 +201,7 @@ static inline unsigned long verify_bit_range(unsigned long* bitmap,
+ 	return ~0UL;
+ }
+ #else /* debugging is disabled */
+-int debugging __read_mostly = 0;
++static int debugging;
+ 
+ static inline unsigned long verify_bit_range(unsigned long* bitmap,
+ 	int expected, unsigned long start, unsigned long end)
+diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
+index 5552d23..a82473d 100644
+--- a/arch/x86/kernel/pci-dma_64.c
++++ b/arch/x86/kernel/pci-dma_64.c
+@@ -13,7 +13,6 @@
+ #include <asm/calgary.h>
+ 
+ int iommu_merge __read_mostly = 0;
+-EXPORT_SYMBOL(iommu_merge);
+ 
+ dma_addr_t bad_dma_address __read_mostly;
+ EXPORT_SYMBOL(bad_dma_address);
+@@ -230,7 +229,7 @@ EXPORT_SYMBOL(dma_set_mask);
+  * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
+  * documentation.
+  */
+-__init int iommu_setup(char *p)
++static __init int iommu_setup(char *p)
  {
- 	int err;
--	if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+ 	iommu_merge = 1;
+ 
+diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
+index 06bcba5..4d5cc71 100644
+--- a/arch/x86/kernel/pci-gart_64.c
++++ b/arch/x86/kernel/pci-gart_64.c
+@@ -1,12 +1,12 @@
+ /*
+  * Dynamic DMA mapping support for AMD Hammer.
+- * 
++ *
+  * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
+  * This allows to use PCI devices that only support 32bit addresses on systems
+- * with more than 4GB. 
++ * with more than 4GB.
+  *
+  * See Documentation/DMA-mapping.txt for the interface specification.
+- * 
++ *
+  * Copyright 2002 Andi Kleen, SuSE Labs.
+  * Subject to the GNU General Public License v2 only.
+  */
+@@ -37,23 +37,26 @@
+ #include <asm/k8.h>
+ 
+ static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
+-static unsigned long iommu_size; 	/* size of remapping area bytes */
++static unsigned long iommu_size;	/* size of remapping area bytes */
+ static unsigned long iommu_pages;	/* .. and in pages */
+ 
+-static u32 *iommu_gatt_base; 		/* Remapping table */
++static u32 *iommu_gatt_base;		/* Remapping table */
+ 
+-/* If this is disabled the IOMMU will use an optimized flushing strategy
+-   of only flushing when an mapping is reused. With it true the GART is flushed 
+-   for every mapping. Problem is that doing the lazy flush seems to trigger
+-   bugs with some popular PCI cards, in particular 3ware (but has been also
+-   also seen with Qlogic at least). */
++/*
++ * If this is disabled the IOMMU will use an optimized flushing strategy
++ * of only flushing when an mapping is reused. With it true the GART is
++ * flushed for every mapping. Problem is that doing the lazy flush seems
++ * to trigger bugs with some popular PCI cards, in particular 3ware (but
++ * has been also also seen with Qlogic at least).
++ */
+ int iommu_fullflush = 1;
+ 
+-/* Allocation bitmap for the remapping area */ 
++/* Allocation bitmap for the remapping area: */
+ static DEFINE_SPINLOCK(iommu_bitmap_lock);
+-static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
++/* Guarded by iommu_bitmap_lock: */
++static unsigned long *iommu_gart_bitmap;
+ 
+-static u32 gart_unmapped_entry; 
++static u32 gart_unmapped_entry;
+ 
+ #define GPTE_VALID    1
+ #define GPTE_COHERENT 2
+@@ -61,10 +64,10 @@ static u32 gart_unmapped_entry;
+ 	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
+ #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
+ 
+-#define to_pages(addr,size) \
++#define to_pages(addr, size) \
+ 	(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
+ 
+-#define EMERGENCY_PAGES 32 /* = 128KB */ 
++#define EMERGENCY_PAGES 32 /* = 128KB */
+ 
+ #ifdef CONFIG_AGP
+ #define AGPEXTERN extern
+@@ -77,130 +80,152 @@ AGPEXTERN int agp_memory_reserved;
+ AGPEXTERN __u32 *agp_gatt_table;
+ 
+ static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
+-static int need_flush; 		/* global flush state. set for each gart wrap */
++static int need_flush;		/* global flush state. set for each gart wrap */
+ 
+-static unsigned long alloc_iommu(int size) 
+-{ 	
++static unsigned long alloc_iommu(int size)
++{
+ 	unsigned long offset, flags;
+ 
+-	spin_lock_irqsave(&iommu_bitmap_lock, flags);	
+-	offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
++	spin_lock_irqsave(&iommu_bitmap_lock, flags);
++	offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
++					iommu_pages, size);
+ 	if (offset == -1) {
+ 		need_flush = 1;
+-		offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
++		offset = find_next_zero_string(iommu_gart_bitmap, 0,
++						iommu_pages, size);
+ 	}
+-	if (offset != -1) { 
+-		set_bit_string(iommu_gart_bitmap, offset, size); 
+-		next_bit = offset+size; 
+-		if (next_bit >= iommu_pages) { 
++	if (offset != -1) {
++		set_bit_string(iommu_gart_bitmap, offset, size);
++		next_bit = offset+size;
++		if (next_bit >= iommu_pages) {
+ 			next_bit = 0;
+ 			need_flush = 1;
+-		} 
+-	} 
++		}
++	}
+ 	if (iommu_fullflush)
+ 		need_flush = 1;
+-	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);      
++	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
 +
-+	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- 		return -EFAULT;
+ 	return offset;
+-} 
++}
  
- 	/* If you change siginfo_t structure, please make sure that
-@@ -53,16 +53,19 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
- 	   3 ints plus the relevant union member.  */
- 	err = __put_user(from->si_signo, &to->si_signo);
- 	err |= __put_user(from->si_errno, &to->si_errno);
-- 	err |= __put_user((short)from->si_code, &to->si_code);
-+	err |= __put_user((short)from->si_code, &to->si_code);
+ static void free_iommu(unsigned long offset, int size)
+-{ 
++{
+ 	unsigned long flags;
++
+ 	spin_lock_irqsave(&iommu_bitmap_lock, flags);
+ 	__clear_bit_string(iommu_gart_bitmap, offset, size);
+ 	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
+-} 
++}
  
- 	if (from->si_code < 0) {
- 		err |= __put_user(from->si_pid, &to->si_pid);
-- 		err |= __put_user(from->si_uid, &to->si_uid);
-- 		err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
-+		err |= __put_user(from->si_uid, &to->si_uid);
-+		err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
- 	} else {
-- 		/* First 32bits of unions are always present:
-- 		 * si_pid === si_band === si_tid === si_addr(LS half) */
--		err |= __put_user(from->_sifields._pad[0], &to->_sifields._pad[0]);
-+		/*
-+		 * First 32bits of unions are always present:
-+		 * si_pid === si_band === si_tid === si_addr(LS half)
-+		 */
-+		err |= __put_user(from->_sifields._pad[0],
-+				  &to->_sifields._pad[0]);
- 		switch (from->si_code >> 16) {
- 		case __SI_FAULT >> 16:
- 			break;
-@@ -76,14 +79,15 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
- 			err |= __put_user(from->si_uid, &to->si_uid);
- 			break;
- 		case __SI_POLL >> 16:
--			err |= __put_user(from->si_fd, &to->si_fd); 
-+			err |= __put_user(from->si_fd, &to->si_fd);
- 			break;
- 		case __SI_TIMER >> 16:
--			err |= __put_user(from->si_overrun, &to->si_overrun); 
-+			err |= __put_user(from->si_overrun, &to->si_overrun);
- 			err |= __put_user(ptr_to_compat(from->si_ptr),
--					&to->si_ptr);
-+					  &to->si_ptr);
- 			break;
--		case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
-+			 /* This is not generated by the kernel as of now.  */
-+		case __SI_RT >> 16:
- 		case __SI_MESGQ >> 16:
- 			err |= __put_user(from->si_uid, &to->si_uid);
- 			err |= __put_user(from->si_int, &to->si_int);
-@@ -97,7 +101,8 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
- {
- 	int err;
- 	u32 ptr32;
--	if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
+-/* 
++/*
+  * Use global flush state to avoid races with multiple flushers.
+  */
+ static void flush_gart(void)
+-{ 
++{
+ 	unsigned long flags;
 +
-+	if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
- 		return -EFAULT;
+ 	spin_lock_irqsave(&iommu_bitmap_lock, flags);
+ 	if (need_flush) {
+ 		k8_flush_garts();
+ 		need_flush = 0;
+-	} 
++	}
+ 	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
+-} 
++}
  
- 	err = __get_user(to->si_signo, &from->si_signo);
-@@ -112,8 +117,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
- 	return err;
- }
+ #ifdef CONFIG_IOMMU_LEAK
  
--asmlinkage long
--sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
-+asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
+-#define SET_LEAK(x) if (iommu_leak_tab) \
+-			iommu_leak_tab[x] = __builtin_return_address(0);
+-#define CLEAR_LEAK(x) if (iommu_leak_tab) \
+-			iommu_leak_tab[x] = NULL;
++#define SET_LEAK(x)							\
++	do {								\
++		if (iommu_leak_tab)					\
++			iommu_leak_tab[x] = __builtin_return_address(0);\
++	} while (0)
++
++#define CLEAR_LEAK(x)							\
++	do {								\
++		if (iommu_leak_tab)					\
++			iommu_leak_tab[x] = NULL;			\
++	} while (0)
+ 
+ /* Debugging aid for drivers that don't free their IOMMU tables */
+-static void **iommu_leak_tab; 
++static void **iommu_leak_tab;
+ static int leak_trace;
+ static int iommu_leak_pages = 20;
++
+ static void dump_leak(void)
  {
- 	mask &= _BLOCKABLE;
- 	spin_lock_irq(&current->sighand->siglock);
-@@ -128,36 +132,37 @@ sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
- 	return -ERESTARTNOHAND;
+ 	int i;
+-	static int dump; 
+-	if (dump || !iommu_leak_tab) return;
++	static int dump;
++
++	if (dump || !iommu_leak_tab)
++		return;
+ 	dump = 1;
+-	show_stack(NULL,NULL);
+-	/* Very crude. dump some from the end of the table too */ 
+-	printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages); 
+-	for (i = 0; i < iommu_leak_pages; i+=2) {
+-		printk("%lu: ", iommu_pages-i);
+-		printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
+-		printk("%c", (i+1)%2 == 0 ? '\n' : ' '); 
+-	} 
+-	printk("\n");
++	show_stack(NULL, NULL);
++
++	/* Very crude. dump some from the end of the table too */
++	printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
++	       iommu_leak_pages);
++	for (i = 0; i < iommu_leak_pages; i += 2) {
++		printk(KERN_DEBUG "%lu: ", iommu_pages-i);
++		printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
++		printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
++	}
++	printk(KERN_DEBUG "\n");
  }
+ #else
+-#define SET_LEAK(x)
+-#define CLEAR_LEAK(x)
++# define SET_LEAK(x)
++# define CLEAR_LEAK(x)
+ #endif
  
--asmlinkage long
--sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
--		  stack_ia32_t __user *uoss_ptr, 
--		  struct pt_regs *regs)
-+asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
-+				  stack_ia32_t __user *uoss_ptr,
-+				  struct pt_regs *regs)
+ static void iommu_full(struct device *dev, size_t size, int dir)
  {
--	stack_t uss,uoss; 
-+	stack_t uss, uoss;
- 	int ret;
--	mm_segment_t seg; 
--	if (uss_ptr) { 
-+	mm_segment_t seg;
-+
-+	if (uss_ptr) {
- 		u32 ptr;
--		memset(&uss,0,sizeof(stack_t));
--		if (!access_ok(VERIFY_READ,uss_ptr,sizeof(stack_ia32_t)) ||
+-	/* 
++	/*
+ 	 * Ran out of IOMMU space for this operation. This is very bad.
+ 	 * Unfortunately the drivers cannot handle this operation properly.
+-	 * Return some non mapped prereserved space in the aperture and 
++	 * Return some non mapped prereserved space in the aperture and
+ 	 * let the Northbridge deal with it. This will result in garbage
+ 	 * in the IO operation. When the size exceeds the prereserved space
+-	 * memory corruption will occur or random memory will be DMAed 
++	 * memory corruption will occur or random memory will be DMAed
+ 	 * out. Hopefully no network devices use single mappings that big.
+-	 */ 
+-	
+-	printk(KERN_ERR 
+-  "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
+-	       size, dev->bus_id);
++	 */
 +
-+		memset(&uss, 0, sizeof(stack_t));
-+		if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
- 			    __get_user(ptr, &uss_ptr->ss_sp) ||
- 			    __get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
- 			    __get_user(uss.ss_size, &uss_ptr->ss_size))
- 			return -EFAULT;
- 		uss.ss_sp = compat_ptr(ptr);
- 	}
--	seg = get_fs(); 
--	set_fs(KERNEL_DS); 
--	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->rsp);
--	set_fs(seg); 
-+	seg = get_fs();
-+	set_fs(KERNEL_DS);
-+	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
-+	set_fs(seg);
- 	if (ret >= 0 && uoss_ptr)  {
--		if (!access_ok(VERIFY_WRITE,uoss_ptr,sizeof(stack_ia32_t)) ||
-+		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
- 		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
- 		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
- 		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
- 			ret = -EFAULT;
--	} 	
--	return ret;	
++	printk(KERN_ERR
++		"PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
++		size, dev->bus_id);
+ 
+ 	if (size > PAGE_SIZE*EMERGENCY_PAGES) {
+ 		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
+ 			panic("PCI-DMA: Memory would be corrupted\n");
+-		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) 
+-			panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
+-	} 
+-
++		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++			panic(KERN_ERR
++				"PCI-DMA: Random memory would be DMAed\n");
 +	}
-+	return ret;
+ #ifdef CONFIG_IOMMU_LEAK
+-	dump_leak(); 
++	dump_leak();
+ #endif
+-} 
++}
+ 
+-static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
+-{ 
++static inline int
++need_iommu(struct device *dev, unsigned long addr, size_t size)
++{
+ 	u64 mask = *dev->dma_mask;
+ 	int high = addr + size > mask;
+ 	int mmu = high;
+-	if (force_iommu) 
+-		mmu = 1; 
+-	return mmu; 
++
++	if (force_iommu)
++		mmu = 1;
++
++	return mmu;
+ }
+ 
+-static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
+-{ 
++static inline int
++nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
++{
+ 	u64 mask = *dev->dma_mask;
+ 	int high = addr + size > mask;
+ 	int mmu = high;
+-	return mmu; 
++
++	return mmu;
+ }
+ 
+ /* Map a single continuous physical area into the IOMMU.
+@@ -208,13 +233,14 @@ static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t
+  */
+ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
+ 				size_t size, int dir)
+-{ 
++{
+ 	unsigned long npages = to_pages(phys_mem, size);
+ 	unsigned long iommu_page = alloc_iommu(npages);
+ 	int i;
++
+ 	if (iommu_page == -1) {
+ 		if (!nonforced_iommu(dev, phys_mem, size))
+-			return phys_mem; 
++			return phys_mem;
+ 		if (panic_on_overflow)
+ 			panic("dma_map_area overflow %lu bytes\n", size);
+ 		iommu_full(dev, size, dir);
+@@ -229,35 +255,39 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
+ 	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
  }
  
- /*
-@@ -186,87 +191,85 @@ struct rt_sigframe
- 	char retcode[8];
- };
- 
--static int
--ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_ia32 __user *sc, unsigned int *peax)
-+#define COPY(x)		{ 		\
-+	unsigned int reg;		\
-+	err |= __get_user(reg, &sc->x);	\
-+	regs->x = reg;			\
-+}
+-static dma_addr_t gart_map_simple(struct device *dev, char *buf,
+-				 size_t size, int dir)
++static dma_addr_t
++gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
+ {
+ 	dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
 +
-+#define RELOAD_SEG(seg,mask)						\
-+	{ unsigned int cur;						\
-+	  unsigned short pre;						\
-+	  err |= __get_user(pre, &sc->seg);				\
-+	  asm volatile("movl %%" #seg ",%0" : "=r" (cur));		\
-+	  pre |= mask;							\
-+	  if (pre != cur) loadsegment(seg, pre); }
+ 	flush_gart();
 +
-+static int ia32_restore_sigcontext(struct pt_regs *regs,
-+				   struct sigcontext_ia32 __user *sc,
-+				   unsigned int *peax)
+ 	return map;
+ }
+ 
+ /* Map a single area into the IOMMU */
+-static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
++static dma_addr_t
++gart_map_single(struct device *dev, void *addr, size_t size, int dir)
  {
--	unsigned int err = 0;
--	
-+	unsigned int tmpflags, gs, oldgs, err = 0;
-+	struct _fpstate_ia32 __user *buf;
-+	u32 tmp;
-+
- 	/* Always make any pending restarted system calls return -EINTR */
- 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ 	unsigned long phys_mem, bus;
  
- #if DEBUG_SIG
--	printk("SIG restore_sigcontext: sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
--		sc, sc->err, sc->eip, sc->cs, sc->eflags);
-+	printk(KERN_DEBUG "SIG restore_sigcontext: "
-+	       "sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
-+	       sc, sc->err, sc->ip, sc->cs, sc->flags);
- #endif
--#define COPY(x)		{ \
--	unsigned int reg;			\
--	err |= __get_user(reg, &sc->e ##x);	\
--	regs->r ## x = reg;			\
--}
+ 	if (!dev)
+ 		dev = &fallback_dev;
  
--#define RELOAD_SEG(seg,mask)						\
--	{ unsigned int cur; 						\
--	  unsigned short pre;						\
--	  err |= __get_user(pre, &sc->seg);				\
--    	  asm volatile("movl %%" #seg ",%0" : "=r" (cur));		\
--	  pre |= mask; 							\
--	  if (pre != cur) loadsegment(seg,pre); }
--
--	/* Reload fs and gs if they have changed in the signal handler.
--	   This does not handle long fs/gs base changes in the handler, but 
--	   does not clobber them at least in the normal case. */ 
--	
--	{
--		unsigned gs, oldgs; 
--		err |= __get_user(gs, &sc->gs);
--		gs |= 3; 
--		asm("movl %%gs,%0" : "=r" (oldgs));
--		if (gs != oldgs)
--		load_gs_index(gs); 
--	} 
--	RELOAD_SEG(fs,3);
--	RELOAD_SEG(ds,3);
--	RELOAD_SEG(es,3);
-+	/*
-+	 * Reload fs and gs if they have changed in the signal
-+	 * handler.  This does not handle long fs/gs base changes in
-+	 * the handler, but does not clobber them at least in the
-+	 * normal case.
-+	 */
-+	err |= __get_user(gs, &sc->gs);
-+	gs |= 3;
-+	asm("movl %%gs,%0" : "=r" (oldgs));
-+	if (gs != oldgs)
-+		load_gs_index(gs);
+-	phys_mem = virt_to_phys(addr); 
++	phys_mem = virt_to_phys(addr);
+ 	if (!need_iommu(dev, phys_mem, size))
+-		return phys_mem; 
++		return phys_mem;
+ 
+ 	bus = gart_map_simple(dev, addr, size, dir);
+-	return bus; 
 +
-+	RELOAD_SEG(fs, 3);
-+	RELOAD_SEG(ds, 3);
-+	RELOAD_SEG(es, 3);
++	return bus;
+ }
  
- 	COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
- 	COPY(dx); COPY(cx); COPY(ip);
--	/* Don't touch extended registers */ 
--	
--	err |= __get_user(regs->cs, &sc->cs); 
--	regs->cs |= 3;  
--	err |= __get_user(regs->ss, &sc->ss); 
--	regs->ss |= 3; 
--
--	{
--		unsigned int tmpflags;
--		err |= __get_user(tmpflags, &sc->eflags);
--		regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
--		regs->orig_rax = -1;		/* disable syscall checks */
--	}
-+	/* Don't touch extended registers */
+ /*
+  * Free a DMA mapping.
+  */
+ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
+-		      size_t size, int direction)
++			      size_t size, int direction)
+ {
+ 	unsigned long iommu_page;
+ 	int npages;
+@@ -266,6 +296,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ 	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
+ 	    dma_addr >= iommu_bus_base + iommu_size)
+ 		return;
 +
-+	err |= __get_user(regs->cs, &sc->cs);
-+	regs->cs |= 3;
-+	err |= __get_user(regs->ss, &sc->ss);
-+	regs->ss |= 3;
+ 	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
+ 	npages = to_pages(dma_addr, size);
+ 	for (i = 0; i < npages; i++) {
+@@ -278,7 +309,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ /*
+  * Wrapper for pci_unmap_single working with scatterlists.
+  */
+-static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
++static void
++gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+ {
+ 	struct scatterlist *s;
+ 	int i;
+@@ -303,12 +335,13 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
+ 
+ 	for_each_sg(sg, s, nents, i) {
+ 		unsigned long addr = sg_phys(s);
+-		if (nonforced_iommu(dev, addr, s->length)) { 
 +
-+	err |= __get_user(tmpflags, &sc->flags);
-+	regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5);
-+	/* disable syscall checks */
-+	regs->orig_ax = -1;
++		if (nonforced_iommu(dev, addr, s->length)) {
+ 			addr = dma_map_area(dev, addr, s->length, dir);
+-			if (addr == bad_dma_address) { 
+-				if (i > 0) 
++			if (addr == bad_dma_address) {
++				if (i > 0)
+ 					gart_unmap_sg(dev, sg, i, dir);
+-				nents = 0; 
++				nents = 0;
+ 				sg[0].dma_length = 0;
+ 				break;
+ 			}
+@@ -317,15 +350,16 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
+ 		s->dma_length = s->length;
+ 	}
+ 	flush_gart();
 +
-+	err |= __get_user(tmp, &sc->fpstate);
-+	buf = compat_ptr(tmp);
-+	if (buf) {
-+		if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
-+			goto badframe;
-+		err |= restore_i387_ia32(buf);
-+	} else {
-+		struct task_struct *me = current;
+ 	return nents;
+ }
  
--	{
--		u32 tmp;
--		struct _fpstate_ia32 __user * buf;
--		err |= __get_user(tmp, &sc->fpstate);
--		buf = compat_ptr(tmp);
--		if (buf) {
--			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
--				goto badframe;
--			err |= restore_i387_ia32(current, buf, 0);
--		} else {
--			struct task_struct *me = current;
--			if (used_math()) {
--				clear_fpu(me);
--				clear_used_math();
--			}
-+		if (used_math()) {
-+			clear_fpu(me);
-+			clear_used_math();
- 		}
- 	}
+ /* Map multiple scatterlist entries continuous into the first. */
+ static int __dma_map_cont(struct scatterlist *start, int nelems,
+-		      struct scatterlist *sout, unsigned long pages)
++			  struct scatterlist *sout, unsigned long pages)
+ {
+ 	unsigned long iommu_start = alloc_iommu(pages);
+-	unsigned long iommu_page = iommu_start; 
++	unsigned long iommu_page = iommu_start;
+ 	struct scatterlist *s;
+ 	int i;
  
--	{ 
--		u32 tmp;
--		err |= __get_user(tmp, &sc->eax);
--		*peax = tmp;
--	}
-+	err |= __get_user(tmp, &sc->ax);
-+	*peax = tmp;
+@@ -335,32 +369,33 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
+ 	for_each_sg(start, s, nelems, i) {
+ 		unsigned long pages, addr;
+ 		unsigned long phys_addr = s->dma_address;
+-		
 +
- 	return err;
+ 		BUG_ON(s != start && s->offset);
+ 		if (s == start) {
+ 			sout->dma_address = iommu_bus_base;
+ 			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
+ 			sout->dma_length = s->length;
+-		} else { 
+-			sout->dma_length += s->length; 
++		} else {
++			sout->dma_length += s->length;
+ 		}
  
- badframe:
-@@ -275,15 +278,16 @@ badframe:
+ 		addr = phys_addr;
+-		pages = to_pages(s->offset, s->length); 
+-		while (pages--) { 
+-			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 
++		pages = to_pages(s->offset, s->length);
++		while (pages--) {
++			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
+ 			SET_LEAK(iommu_page);
+ 			addr += PAGE_SIZE;
+ 			iommu_page++;
+ 		}
+-	} 
+-	BUG_ON(iommu_page - iommu_start != pages);	
++	}
++	BUG_ON(iommu_page - iommu_start != pages);
++
+ 	return 0;
+ }
  
- asmlinkage long sys32_sigreturn(struct pt_regs *regs)
+-static inline int dma_map_cont(struct scatterlist *start, int nelems,
+-		      struct scatterlist *sout,
+-		      unsigned long pages, int need)
++static inline int
++dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
++	     unsigned long pages, int need)
  {
--	struct sigframe __user *frame = (struct sigframe __user *)(regs->rsp-8);
-+	struct sigframe __user *frame = (struct sigframe __user *)(regs->sp-8);
- 	sigset_t set;
--	unsigned int eax;
-+	unsigned int ax;
+ 	if (!need) {
+ 		BUG_ON(nelems != 1);
+@@ -370,22 +405,19 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems,
+ 	}
+ 	return __dma_map_cont(start, nelems, sout, pages);
+ }
+-		
++
+ /*
+  * DMA map all entries in a scatterlist.
+- * Merge chunks that have page aligned sizes into a continuous mapping. 
++ * Merge chunks that have page aligned sizes into a continuous mapping.
+  */
+-static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+-			int dir)
++static int
++gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+ {
+-	int i;
+-	int out;
+-	int start;
+-	unsigned long pages = 0;
+-	int need = 0, nextneed;
+ 	struct scatterlist *s, *ps, *start_sg, *sgmap;
++	int need = 0, nextneed, i, out, start;
++	unsigned long pages = 0;
  
- 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- 		goto badframe;
- 	if (__get_user(set.sig[0], &frame->sc.oldmask)
- 	    || (_COMPAT_NSIG_WORDS > 1
--		&& __copy_from_user((((char *) &set.sig) + 4), &frame->extramask,
-+		&& __copy_from_user((((char *) &set.sig) + 4),
-+				    &frame->extramask,
- 				    sizeof(frame->extramask))))
- 		goto badframe;
+-	if (nents == 0) 
++	if (nents == 0)
+ 		return 0;
  
-@@ -292,24 +296,24 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
- 	current->blocked = set;
- 	recalc_sigpending();
- 	spin_unlock_irq(&current->sighand->siglock);
--	
--	if (ia32_restore_sigcontext(regs, &frame->sc, &eax))
+ 	if (!dev)
+@@ -397,15 +429,19 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ 	ps = NULL; /* shut up gcc */
+ 	for_each_sg(sg, s, nents, i) {
+ 		dma_addr_t addr = sg_phys(s);
 +
-+	if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
- 		goto badframe;
--	return eax;
-+	return ax;
+ 		s->dma_address = addr;
+-		BUG_ON(s->length == 0); 
++		BUG_ON(s->length == 0);
  
- badframe:
- 	signal_fault(regs, frame, "32bit sigreturn");
+-		nextneed = need_iommu(dev, addr, s->length); 
++		nextneed = need_iommu(dev, addr, s->length);
+ 
+ 		/* Handle the previous not yet processed entries */
+ 		if (i > start) {
+-			/* Can only merge when the last chunk ends on a page 
+-			   boundary and the new one doesn't have an offset. */
++			/*
++			 * Can only merge when the last chunk ends on a
++			 * page boundary and the new one doesn't have an
++			 * offset.
++			 */
+ 			if (!iommu_merge || !nextneed || !need || s->offset ||
+ 			    (ps->offset + ps->length) % PAGE_SIZE) {
+ 				if (dma_map_cont(start_sg, i - start, sgmap,
+@@ -436,6 +472,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ error:
+ 	flush_gart();
+ 	gart_unmap_sg(dev, sg, out, dir);
++
+ 	/* When it was forced or merged try again in a dumb way */
+ 	if (force_iommu || iommu_merge) {
+ 		out = dma_map_sg_nonforce(dev, sg, nents, dir);
+@@ -444,64 +481,68 @@ error:
+ 	}
+ 	if (panic_on_overflow)
+ 		panic("dma_map_sg: overflow on %lu pages\n", pages);
++
+ 	iommu_full(dev, pages << PAGE_SHIFT, dir);
+ 	for_each_sg(sg, s, nents, i)
+ 		s->dma_address = bad_dma_address;
  	return 0;
--}	
+-} 
 +}
  
- asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
- {
- 	struct rt_sigframe __user *frame;
- 	sigset_t set;
--	unsigned int eax;
-+	unsigned int ax;
- 	struct pt_regs tregs;
+ static int no_agp;
  
--	frame = (struct rt_sigframe __user *)(regs->rsp - 4);
-+	frame = (struct rt_sigframe __user *)(regs->sp - 4);
+ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
+-{ 
+-	unsigned long a; 
+-	if (!iommu_size) { 
+-		iommu_size = aper_size; 
+-		if (!no_agp) 
+-			iommu_size /= 2; 
+-	} 
+-
+-	a = aper + iommu_size; 
++{
++	unsigned long a;
++
++	if (!iommu_size) {
++		iommu_size = aper_size;
++		if (!no_agp)
++			iommu_size /= 2;
++	}
++
++	a = aper + iommu_size;
+ 	iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
  
- 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- 		goto badframe;
-@@ -321,28 +325,28 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
- 	current->blocked = set;
- 	recalc_sigpending();
- 	spin_unlock_irq(&current->sighand->siglock);
+-	if (iommu_size < 64*1024*1024) 
++	if (iommu_size < 64*1024*1024) {
+ 		printk(KERN_WARNING
+-  "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20); 
 -	
--	if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
++			"PCI-DMA: Warning: Small IOMMU %luMB."
++			" Consider increasing the AGP aperture in BIOS\n",
++				iommu_size >> 20);
++	}
 +
-+	if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
- 		goto badframe;
+ 	return iommu_size;
+-} 
++}
  
- 	tregs = *regs;
- 	if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
- 		goto badframe;
+-static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) 
+-{ 
+-	unsigned aper_size = 0, aper_base_32;
++static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
++{
++	unsigned aper_size = 0, aper_base_32, aper_order;
+ 	u64 aper_base;
+-	unsigned aper_order;
  
--	return eax;
-+	return ax;
+-	pci_read_config_dword(dev, 0x94, &aper_base_32); 
++	pci_read_config_dword(dev, 0x94, &aper_base_32);
+ 	pci_read_config_dword(dev, 0x90, &aper_order);
+-	aper_order = (aper_order >> 1) & 7;	
++	aper_order = (aper_order >> 1) & 7;
  
- badframe:
--	signal_fault(regs,frame,"32bit rt sigreturn");
-+	signal_fault(regs, frame, "32bit rt sigreturn");
- 	return 0;
--}	
+-	aper_base = aper_base_32 & 0x7fff; 
++	aper_base = aper_base_32 & 0x7fff;
+ 	aper_base <<= 25;
+ 
+-	aper_size = (32 * 1024 * 1024) << aper_order; 
+-       if (aper_base + aper_size > 0x100000000UL || !aper_size)
++	aper_size = (32 * 1024 * 1024) << aper_order;
++	if (aper_base + aper_size > 0x100000000UL || !aper_size)
+ 		aper_base = 0;
+ 
+ 	*size = aper_size;
+ 	return aper_base;
+-} 
 +}
  
- /*
-  * Set up a signal frame.
+-/* 
++/*
+  * Private Northbridge GATT initialization in case we cannot use the
+- * AGP driver for some reason.  
++ * AGP driver for some reason.
   */
+ static __init int init_k8_gatt(struct agp_kern_info *info)
+-{ 
++{
++	unsigned aper_size, gatt_size, new_aper_size;
++	unsigned aper_base, new_aper_base;
+ 	struct pci_dev *dev;
+ 	void *gatt;
+-	unsigned aper_base, new_aper_base;
+-	unsigned aper_size, gatt_size, new_aper_size;
+ 	int i;
  
--static int
--ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate,
--		 struct pt_regs *regs, unsigned int mask)
-+static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
-+				 struct _fpstate_ia32 __user *fpstate,
-+				 struct pt_regs *regs, unsigned int mask)
- {
- 	int tmp, err = 0;
+ 	printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
+@@ -509,75 +550,75 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
+ 	dev = NULL;
+ 	for (i = 0; i < num_k8_northbridges; i++) {
+ 		dev = k8_northbridges[i];
+-		new_aper_base = read_aperture(dev, &new_aper_size); 
+-		if (!new_aper_base) 
+-			goto nommu; 
+-		
+-		if (!aper_base) { 
++		new_aper_base = read_aperture(dev, &new_aper_size);
++		if (!new_aper_base)
++			goto nommu;
++
++		if (!aper_base) {
+ 			aper_size = new_aper_size;
+ 			aper_base = new_aper_base;
+-		} 
+-		if (aper_size != new_aper_size || aper_base != new_aper_base) 
++		}
++		if (aper_size != new_aper_size || aper_base != new_aper_base)
+ 			goto nommu;
+ 	}
+ 	if (!aper_base)
+-		goto nommu; 
++		goto nommu;
+ 	info->aper_base = aper_base;
+-	info->aper_size = aper_size>>20; 
++	info->aper_size = aper_size >> 20;
  
-@@ -356,26 +360,26 @@ ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __
- 	__asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp));
- 	err |= __put_user(tmp, (unsigned int __user *)&sc->es);
+-	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 
+-	gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); 
+-	if (!gatt) 
++	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
++	gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
++	if (!gatt)
+ 		panic("Cannot allocate GATT table");
+-	if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
++	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
+ 		panic("Could not set GART PTEs to uncacheable pages");
+-	global_flush_tlb();
  
--	err |= __put_user((u32)regs->rdi, &sc->edi);
--	err |= __put_user((u32)regs->rsi, &sc->esi);
--	err |= __put_user((u32)regs->rbp, &sc->ebp);
--	err |= __put_user((u32)regs->rsp, &sc->esp);
--	err |= __put_user((u32)regs->rbx, &sc->ebx);
--	err |= __put_user((u32)regs->rdx, &sc->edx);
--	err |= __put_user((u32)regs->rcx, &sc->ecx);
--	err |= __put_user((u32)regs->rax, &sc->eax);
-+	err |= __put_user((u32)regs->di, &sc->di);
-+	err |= __put_user((u32)regs->si, &sc->si);
-+	err |= __put_user((u32)regs->bp, &sc->bp);
-+	err |= __put_user((u32)regs->sp, &sc->sp);
-+	err |= __put_user((u32)regs->bx, &sc->bx);
-+	err |= __put_user((u32)regs->dx, &sc->dx);
-+	err |= __put_user((u32)regs->cx, &sc->cx);
-+	err |= __put_user((u32)regs->ax, &sc->ax);
- 	err |= __put_user((u32)regs->cs, &sc->cs);
- 	err |= __put_user((u32)regs->ss, &sc->ss);
- 	err |= __put_user(current->thread.trap_no, &sc->trapno);
- 	err |= __put_user(current->thread.error_code, &sc->err);
--	err |= __put_user((u32)regs->rip, &sc->eip);
--	err |= __put_user((u32)regs->eflags, &sc->eflags);
--	err |= __put_user((u32)regs->rsp, &sc->esp_at_signal);
-+	err |= __put_user((u32)regs->ip, &sc->ip);
-+	err |= __put_user((u32)regs->flags, &sc->flags);
-+	err |= __put_user((u32)regs->sp, &sc->sp_at_signal);
+-	memset(gatt, 0, gatt_size); 
++	memset(gatt, 0, gatt_size);
+ 	agp_gatt_table = gatt;
  
--	tmp = save_i387_ia32(current, fpstate, regs, 0);
-+	tmp = save_i387_ia32(fpstate);
- 	if (tmp < 0)
- 		err = -EFAULT;
--	else { 
-+	else {
- 		clear_used_math();
- 		stts();
- 		err |= __put_user(ptr_to_compat(tmp ? fpstate : NULL),
-@@ -392,40 +396,53 @@ ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __
- /*
-  * Determine which stack to use..
-  */
--static void __user *
--get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
-+static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
-+				 size_t frame_size)
- {
--	unsigned long rsp;
-+	unsigned long sp;
+ 	for (i = 0; i < num_k8_northbridges; i++) {
+-		u32 ctl; 
+-		u32 gatt_reg; 
++		u32 gatt_reg;
++		u32 ctl;
  
- 	/* Default to using normal stack */
--	rsp = regs->rsp;
-+	sp = regs->sp;
+ 		dev = k8_northbridges[i];
+-		gatt_reg = __pa(gatt) >> 12; 
+-		gatt_reg <<= 4; 
++		gatt_reg = __pa(gatt) >> 12;
++		gatt_reg <<= 4;
+ 		pci_write_config_dword(dev, 0x98, gatt_reg);
+-		pci_read_config_dword(dev, 0x90, &ctl); 
++		pci_read_config_dword(dev, 0x90, &ctl);
  
- 	/* This is the X/Open sanctioned signal stack switching.  */
- 	if (ka->sa.sa_flags & SA_ONSTACK) {
--		if (sas_ss_flags(rsp) == 0)
--			rsp = current->sas_ss_sp + current->sas_ss_size;
-+		if (sas_ss_flags(sp) == 0)
-+			sp = current->sas_ss_sp + current->sas_ss_size;
+ 		ctl |= 1;
+ 		ctl &= ~((1<<4) | (1<<5));
+ 
+-		pci_write_config_dword(dev, 0x90, ctl); 
++		pci_write_config_dword(dev, 0x90, ctl);
  	}
+ 	flush_gart();
+-	
+-	printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); 
++
++	printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
++	       aper_base, aper_size>>10);
+ 	return 0;
  
- 	/* This is the legacy signal stack switching. */
- 	else if ((regs->ss & 0xffff) != __USER_DS &&
- 		!(ka->sa.sa_flags & SA_RESTORER) &&
--		 ka->sa.sa_restorer) {
--		rsp = (unsigned long) ka->sa.sa_restorer;
--	}
-+		 ka->sa.sa_restorer)
-+		sp = (unsigned long) ka->sa.sa_restorer;
+  nommu:
+- 	/* Should not happen anymore */
++	/* Should not happen anymore */
+ 	printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
+ 	       KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
+-	return -1; 
+-} 
++	return -1;
++}
  
--	rsp -= frame_size;
-+	sp -= frame_size;
- 	/* Align the stack pointer according to the i386 ABI,
- 	 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
--	rsp = ((rsp + 4) & -16ul) - 4;
--	return (void __user *) rsp;
-+	sp = ((sp + 4) & -16ul) - 4;
-+	return (void __user *) sp;
- }
+ extern int agp_amd64_init(void);
  
- int ia32_setup_frame(int sig, struct k_sigaction *ka,
--		     compat_sigset_t *set, struct pt_regs * regs)
-+		     compat_sigset_t *set, struct pt_regs *regs)
- {
- 	struct sigframe __user *frame;
-+	void __user *restorer;
- 	int err = 0;
+ static const struct dma_mapping_ops gart_dma_ops = {
+-	.mapping_error = NULL,
+-	.map_single = gart_map_single,
+-	.map_simple = gart_map_simple,
+-	.unmap_single = gart_unmap_single,
+-	.sync_single_for_cpu = NULL,
+-	.sync_single_for_device = NULL,
+-	.sync_single_range_for_cpu = NULL,
+-	.sync_single_range_for_device = NULL,
+-	.sync_sg_for_cpu = NULL,
+-	.sync_sg_for_device = NULL,
+-	.map_sg = gart_map_sg,
+-	.unmap_sg = gart_unmap_sg,
++	.mapping_error			= NULL,
++	.map_single			= gart_map_single,
++	.map_simple			= gart_map_simple,
++	.unmap_single			= gart_unmap_single,
++	.sync_single_for_cpu		= NULL,
++	.sync_single_for_device		= NULL,
++	.sync_single_range_for_cpu	= NULL,
++	.sync_single_range_for_device	= NULL,
++	.sync_sg_for_cpu		= NULL,
++	.sync_sg_for_device		= NULL,
++	.map_sg				= gart_map_sg,
++	.unmap_sg			= gart_unmap_sg,
+ };
  
-+	/* copy_to_user optimizes that into a single 8 byte store */
-+	static const struct {
-+		u16 poplmovl;
-+		u32 val;
-+		u16 int80;
-+		u16 pad;
-+	} __attribute__((packed)) code = {
-+		0xb858,		 /* popl %eax ; movl $...,%eax */
-+		__NR_ia32_sigreturn,
-+		0x80cd,		/* int $0x80 */
-+		0,
-+	};
-+
- 	frame = get_sigframe(ka, regs, sizeof(*frame));
+ void gart_iommu_shutdown(void)
+@@ -588,23 +629,23 @@ void gart_iommu_shutdown(void)
+ 	if (no_agp && (dma_ops != &gart_dma_ops))
+ 		return;
  
- 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-@@ -443,64 +460,53 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
- 	if (_COMPAT_NSIG_WORDS > 1) {
- 		err |= __copy_to_user(frame->extramask, &set->sig[1],
- 				      sizeof(frame->extramask));
-+		if (err)
-+			goto give_sigsegv;
+-        for (i = 0; i < num_k8_northbridges; i++) {
+-                u32 ctl;
++	for (i = 0; i < num_k8_northbridges; i++) {
++		u32 ctl;
+ 
+-                dev = k8_northbridges[i];
+-                pci_read_config_dword(dev, 0x90, &ctl);
++		dev = k8_northbridges[i];
++		pci_read_config_dword(dev, 0x90, &ctl);
+ 
+-                ctl &= ~1;
++		ctl &= ~1;
+ 
+-                pci_write_config_dword(dev, 0x90, ctl);
+-        }
++		pci_write_config_dword(dev, 0x90, ctl);
++	}
+ }
+ 
+ void __init gart_iommu_init(void)
+-{ 
++{
+ 	struct agp_kern_info info;
+-	unsigned long aper_size;
+ 	unsigned long iommu_start;
++	unsigned long aper_size;
+ 	unsigned long scratch;
+ 	long i;
+ 
+@@ -614,14 +655,14 @@ void __init gart_iommu_init(void)
  	}
--	if (err)
--		goto give_sigsegv;
  
--	/* Return stub is in 32bit vsyscall page */
--	{ 
--		void __user *restorer;
-+	if (ka->sa.sa_flags & SA_RESTORER) {
-+		restorer = ka->sa.sa_restorer;
-+	} else {
-+		/* Return stub is in 32bit vsyscall page */
- 		if (current->binfmt->hasvdso)
--			restorer = VSYSCALL32_SIGRETURN;
-+			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-+						 sigreturn);
- 		else
--			restorer = (void *)&frame->retcode;
--		if (ka->sa.sa_flags & SA_RESTORER)
--			restorer = ka->sa.sa_restorer;       
--		err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
--	}
--	/* These are actually not used anymore, but left because some 
--	   gdb versions depend on them as a marker. */
--	{ 
--		/* copy_to_user optimizes that into a single 8 byte store */
--		static const struct { 
--			u16 poplmovl;
--			u32 val;
--			u16 int80;    
--			u16 pad; 
--		} __attribute__((packed)) code = { 
--			0xb858,		 /* popl %eax ; movl $...,%eax */
--			__NR_ia32_sigreturn,   
--			0x80cd,		/* int $0x80 */
--			0,
--		}; 
--		err |= __copy_to_user(frame->retcode, &code, 8); 
-+			restorer = &frame->retcode;
+ #ifndef CONFIG_AGP_AMD64
+-	no_agp = 1; 
++	no_agp = 1;
+ #else
+ 	/* Makefile puts PCI initialization via subsys_initcall first. */
+ 	/* Add other K8 AGP bridge drivers here */
+-	no_agp = no_agp || 
+-		(agp_amd64_init() < 0) || 
++	no_agp = no_agp ||
++		(agp_amd64_init() < 0) ||
+ 		(agp_copy_info(agp_bridge, &info) < 0);
+-#endif	
++#endif
+ 
+ 	if (swiotlb)
+ 		return;
+@@ -643,77 +684,78 @@ void __init gart_iommu_init(void)
  	}
-+	err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
+ 
+ 	printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
+-	aper_size = info.aper_size * 1024 * 1024;	
+-	iommu_size = check_iommu_size(info.aper_base, aper_size); 
+-	iommu_pages = iommu_size >> PAGE_SHIFT; 
+-
+-	iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL, 
+-						    get_order(iommu_pages/8)); 
+-	if (!iommu_gart_bitmap) 
+-		panic("Cannot allocate iommu bitmap\n"); 
++	aper_size = info.aper_size * 1024 * 1024;
++	iommu_size = check_iommu_size(info.aper_base, aper_size);
++	iommu_pages = iommu_size >> PAGE_SHIFT;
 +
++	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
++						      get_order(iommu_pages/8));
++	if (!iommu_gart_bitmap)
++		panic("Cannot allocate iommu bitmap\n");
+ 	memset(iommu_gart_bitmap, 0, iommu_pages/8);
+ 
+ #ifdef CONFIG_IOMMU_LEAK
+-	if (leak_trace) { 
+-		iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, 
++	if (leak_trace) {
++		iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
+ 				  get_order(iommu_pages*sizeof(void *)));
+-		if (iommu_leak_tab) 
+-			memset(iommu_leak_tab, 0, iommu_pages * 8); 
++		if (iommu_leak_tab)
++			memset(iommu_leak_tab, 0, iommu_pages * 8);
+ 		else
+-			printk("PCI-DMA: Cannot allocate leak trace area\n"); 
+-	} 
++			printk(KERN_DEBUG
++			       "PCI-DMA: Cannot allocate leak trace area\n");
++	}
+ #endif
+ 
+-	/* 
 +	/*
-+	 * These are actually not used anymore, but left because some
-+	 * gdb versions depend on them as a marker.
+ 	 * Out of IOMMU space handling.
+-	 * Reserve some invalid pages at the beginning of the GART. 
+-	 */ 
+-	set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 
++	 * Reserve some invalid pages at the beginning of the GART.
 +	 */
-+	err |= __copy_to_user(frame->retcode, &code, 8);
- 	if (err)
- 		goto give_sigsegv;
++	set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
  
- 	/* Set up registers for signal handler */
--	regs->rsp = (unsigned long) frame;
--	regs->rip = (unsigned long) ka->sa.sa_handler;
-+	regs->sp = (unsigned long) frame;
-+	regs->ip = (unsigned long) ka->sa.sa_handler;
+-	agp_memory_reserved = iommu_size;	
++	agp_memory_reserved = iommu_size;
+ 	printk(KERN_INFO
+ 	       "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
+-	       iommu_size>>20); 
++	       iommu_size >> 20);
  
- 	/* Make -mregparm=3 work */
--	regs->rax = sig;
--	regs->rdx = 0;
--	regs->rcx = 0;
-+	regs->ax = sig;
-+	regs->dx = 0;
-+	regs->cx = 0;
+-	iommu_start = aper_size - iommu_size;	
+-	iommu_bus_base = info.aper_base + iommu_start; 
++	iommu_start = aper_size - iommu_size;
++	iommu_bus_base = info.aper_base + iommu_start;
+ 	bad_dma_address = iommu_bus_base;
+ 	iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
  
--	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 
--	asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 
-+	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
-+	asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
+-	/* 
++	/*
+ 	 * Unmap the IOMMU part of the GART. The alias of the page is
+ 	 * always mapped with cache enabled and there is no full cache
+ 	 * coherency across the GART remapping. The unmapping avoids
+ 	 * automatic prefetches from the CPU allocating cache lines in
+ 	 * there. All CPU accesses are done via the direct mapping to
+ 	 * the backing memory. The GART address is only used by PCI
+-	 * devices. 
++	 * devices.
+ 	 */
+ 	clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
  
--	regs->cs = __USER32_CS; 
--	regs->ss = __USER32_DS; 
-+	regs->cs = __USER32_CS;
-+	regs->ss = __USER32_DS;
+-	/* 
+-	 * Try to workaround a bug (thanks to BenH) 
+-	 * Set unmapped entries to a scratch page instead of 0. 
++	/*
++	 * Try to workaround a bug (thanks to BenH)
++	 * Set unmapped entries to a scratch page instead of 0.
+ 	 * Any prefetches that hit unmapped entries won't get an bus abort
+ 	 * then.
+ 	 */
+-	scratch = get_zeroed_page(GFP_KERNEL); 
+-	if (!scratch) 
++	scratch = get_zeroed_page(GFP_KERNEL);
++	if (!scratch)
+ 		panic("Cannot allocate iommu scratch page");
+ 	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
+-	for (i = EMERGENCY_PAGES; i < iommu_pages; i++) 
++	for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
+ 		iommu_gatt_base[i] = gart_unmapped_entry;
  
- 	set_fs(USER_DS);
--	regs->eflags &= ~TF_MASK;
-+	regs->flags &= ~X86_EFLAGS_TF;
- 	if (test_thread_flag(TIF_SINGLESTEP))
- 		ptrace_notify(SIGTRAP);
+ 	flush_gart();
+ 	dma_ops = &gart_dma_ops;
+-} 
++}
  
- #if DEBUG_SIG
--	printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
--		current->comm, current->pid, frame, regs->rip, frame->pretcode);
-+	printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
-+	       current->comm, current->pid, frame, regs->ip, frame->pretcode);
+ void __init gart_parse_options(char *p)
+ {
+ 	int arg;
+ 
+ #ifdef CONFIG_IOMMU_LEAK
+-	if (!strncmp(p,"leak",4)) {
++	if (!strncmp(p, "leak", 4)) {
+ 		leak_trace = 1;
+ 		p += 4;
+ 		if (*p == '=') ++p;
+@@ -723,18 +765,18 @@ void __init gart_parse_options(char *p)
  #endif
+ 	if (isdigit(*p) && get_option(&p, &arg))
+ 		iommu_size = arg;
+-	if (!strncmp(p, "fullflush",8))
++	if (!strncmp(p, "fullflush", 8))
+ 		iommu_fullflush = 1;
+-	if (!strncmp(p, "nofullflush",11))
++	if (!strncmp(p, "nofullflush", 11))
+ 		iommu_fullflush = 0;
+-	if (!strncmp(p,"noagp",5))
++	if (!strncmp(p, "noagp", 5))
+ 		no_agp = 1;
+-	if (!strncmp(p, "noaperture",10))
++	if (!strncmp(p, "noaperture", 10))
+ 		fix_aperture = 0;
+ 	/* duplicated from pci-dma.c */
+-	if (!strncmp(p,"force",5))
++	if (!strncmp(p, "force", 5))
+ 		gart_iommu_aperture_allowed = 1;
+-	if (!strncmp(p,"allowed",7))
++	if (!strncmp(p, "allowed", 7))
+ 		gart_iommu_aperture_allowed = 1;
+ 	if (!strncmp(p, "memaper", 7)) {
+ 		fallback_aper_force = 1;
+diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
+index 102866d..82a0a67 100644
+--- a/arch/x86/kernel/pci-swiotlb_64.c
++++ b/arch/x86/kernel/pci-swiotlb_64.c
+@@ -10,7 +10,6 @@
+ #include <asm/dma.h>
  
- 	return 0;
-@@ -511,25 +517,34 @@ give_sigsegv:
+ int swiotlb __read_mostly;
+-EXPORT_SYMBOL(swiotlb);
+ 
+ const struct dma_mapping_ops swiotlb_dma_ops = {
+ 	.mapping_error = swiotlb_dma_mapping_error,
+diff --git a/arch/x86/kernel/pmtimer_64.c b/arch/x86/kernel/pmtimer_64.c
+index ae8f912..b112406 100644
+--- a/arch/x86/kernel/pmtimer_64.c
++++ b/arch/x86/kernel/pmtimer_64.c
+@@ -19,13 +19,13 @@
+ #include <linux/time.h>
+ #include <linux/init.h>
+ #include <linux/cpumask.h>
++#include <linux/acpi_pmtmr.h>
++
+ #include <asm/io.h>
+ #include <asm/proto.h>
+ #include <asm/msr.h>
+ #include <asm/vsyscall.h>
+ 
+-#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
+-
+ static inline u32 cyc2us(u32 cycles)
+ {
+ 	/* The Power Management Timer ticks at 3.579545 ticks per microsecond.
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 46d391d..968371a 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -55,6 +55,7 @@
+ 
+ #include <asm/tlbflush.h>
+ #include <asm/cpu.h>
++#include <asm/kdebug.h>
+ 
+ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+ 
+@@ -74,7 +75,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number);
+  */
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+-	return ((unsigned long *)tsk->thread.esp)[3];
++	return ((unsigned long *)tsk->thread.sp)[3];
  }
  
- int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
--			compat_sigset_t *set, struct pt_regs * regs)
-+			compat_sigset_t *set, struct pt_regs *regs)
+ /*
+@@ -113,10 +114,19 @@ void default_idle(void)
+ 		smp_mb();
+ 
+ 		local_irq_disable();
+-		if (!need_resched())
++		if (!need_resched()) {
++			ktime_t t0, t1;
++			u64 t0n, t1n;
++
++			t0 = ktime_get();
++			t0n = ktime_to_ns(t0);
+ 			safe_halt();	/* enables interrupts racelessly */
+-		else
+-			local_irq_enable();
++			local_irq_disable();
++			t1 = ktime_get();
++			t1n = ktime_to_ns(t1);
++			sched_clock_idle_wakeup_event(t1n - t0n);
++		}
++		local_irq_enable();
+ 		current_thread_info()->status |= TS_POLLING;
+ 	} else {
+ 		/* loop is done by the caller */
+@@ -132,7 +142,7 @@ EXPORT_SYMBOL(default_idle);
+  * to poll the ->work.need_resched flag instead of waiting for the
+  * cross-CPU IPI to arrive. Use this option with caution.
+  */
+-static void poll_idle (void)
++static void poll_idle(void)
  {
- 	struct rt_sigframe __user *frame;
-+	struct exec_domain *ed = current_thread_info()->exec_domain;
-+	void __user *restorer;
- 	int err = 0;
+ 	cpu_relax();
+ }
+@@ -188,6 +198,9 @@ void cpu_idle(void)
+ 			rmb();
+ 			idle = pm_idle;
  
-+	/* __copy_to_user optimizes that into a single 8 byte store */
-+	static const struct {
-+		u8 movl;
-+		u32 val;
-+		u16 int80;
-+		u16 pad;
-+		u8  pad2;
-+	} __attribute__((packed)) code = {
-+		0xb8,
-+		__NR_ia32_rt_sigreturn,
-+		0x80cd,
-+		0,
-+	};
++			if (rcu_pending(cpu))
++				rcu_check_callbacks(cpu, 0);
 +
- 	frame = get_sigframe(ka, regs, sizeof(*frame));
+ 			if (!idle)
+ 				idle = default_idle;
  
- 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- 		goto give_sigsegv;
+@@ -255,13 +268,13 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
+  * New with Core Duo processors, MWAIT can take some hints based on CPU
+  * capability.
+  */
+-void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
++void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
+ {
+ 	if (!need_resched()) {
+ 		__monitor((void *)&current_thread_info()->flags, 0, 0);
+ 		smp_mb();
+ 		if (!need_resched())
+-			__mwait(eax, ecx);
++			__mwait(ax, cx);
+ 	}
+ }
  
--	{
--		struct exec_domain *ed = current_thread_info()->exec_domain;
--		err |= __put_user((ed
--		    	   && ed->signal_invmap
--		    	   && sig < 32
--		    	   ? ed->signal_invmap[sig]
--			   : sig),
--			  &frame->sig);
--	}
-+	err |= __put_user((ed && ed->signal_invmap && sig < 32
-+			   ? ed->signal_invmap[sig] : sig), &frame->sig);
- 	err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
- 	err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
- 	err |= copy_siginfo_to_user32(&frame->info, info);
-@@ -540,73 +555,58 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 	err |= __put_user(0, &frame->uc.uc_flags);
- 	err |= __put_user(0, &frame->uc.uc_link);
- 	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
--	err |= __put_user(sas_ss_flags(regs->rsp),
-+	err |= __put_user(sas_ss_flags(regs->sp),
- 			  &frame->uc.uc_stack.ss_flags);
- 	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- 	err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
--			        regs, set->sig[0]);
-+				     regs, set->sig[0]);
- 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- 	if (err)
- 		goto give_sigsegv;
+@@ -272,19 +285,37 @@ static void mwait_idle(void)
+ 	mwait_idle_with_hints(0, 0);
+ }
  
--	
--	{ 
--		void __user *restorer = VSYSCALL32_RTSIGRETURN; 
--		if (ka->sa.sa_flags & SA_RESTORER)
--			restorer = ka->sa.sa_restorer;       
--		err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
--	}
--
--	/* This is movl $,%eax ; int $0x80 */
--	/* Not actually used anymore, but left because some gdb versions
--	   need it. */ 
--	{ 
--		/* __copy_to_user optimizes that into a single 8 byte store */
--		static const struct { 
--			u8 movl; 
--			u32 val; 
--			u16 int80; 
--			u16 pad;
--			u8  pad2;				
--		} __attribute__((packed)) code = { 
--			0xb8,
--			__NR_ia32_rt_sigreturn,
--			0x80cd,
--			0,
--		}; 
--		err |= __copy_to_user(frame->retcode, &code, 8); 
--	} 
-+	if (ka->sa.sa_flags & SA_RESTORER)
-+		restorer = ka->sa.sa_restorer;
-+	else
-+		restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-+					 rt_sigreturn);
-+	err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
++static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
++{
++	if (force_mwait)
++		return 1;
++	/* Any C1 states supported? */
++	return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
++}
 +
-+	/*
-+	 * Not actually used anymore, but left because some gdb
-+	 * versions need it.
-+	 */
-+	err |= __copy_to_user(frame->retcode, &code, 8);
- 	if (err)
- 		goto give_sigsegv;
+ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+ {
+-	if (cpu_has(c, X86_FEATURE_MWAIT)) {
+-		printk("monitor/mwait feature present.\n");
++	static int selected;
++
++	if (selected)
++		return;
++#ifdef CONFIG_X86_SMP
++	if (pm_idle == poll_idle && smp_num_siblings > 1) {
++		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
++			" performance may degrade.\n");
++	}
++#endif
++	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
+ 		/*
+ 		 * Skip, if setup has overridden idle.
+ 		 * One CPU supports mwait => All CPUs supports mwait
+ 		 */
+ 		if (!pm_idle) {
+-			printk("using mwait in idle threads.\n");
++			printk(KERN_INFO "using mwait in idle threads.\n");
+ 			pm_idle = mwait_idle;
+ 		}
+ 	}
++	selected = 1;
+ }
  
- 	/* Set up registers for signal handler */
--	regs->rsp = (unsigned long) frame;
--	regs->rip = (unsigned long) ka->sa.sa_handler;
-+	regs->sp = (unsigned long) frame;
-+	regs->ip = (unsigned long) ka->sa.sa_handler;
+ static int __init idle_setup(char *str)
+@@ -292,10 +323,6 @@ static int __init idle_setup(char *str)
+ 	if (!strcmp(str, "poll")) {
+ 		printk("using polling idle threads.\n");
+ 		pm_idle = poll_idle;
+-#ifdef CONFIG_X86_SMP
+-		if (smp_num_siblings > 1)
+-			printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
+-#endif
+ 	} else if (!strcmp(str, "mwait"))
+ 		force_mwait = 1;
+ 	else
+@@ -310,15 +337,15 @@ void __show_registers(struct pt_regs *regs, int all)
+ {
+ 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
+ 	unsigned long d0, d1, d2, d3, d6, d7;
+-	unsigned long esp;
++	unsigned long sp;
+ 	unsigned short ss, gs;
  
- 	/* Make -mregparm=3 work */
--	regs->rax = sig;
--	regs->rdx = (unsigned long) &frame->info;
--	regs->rcx = (unsigned long) &frame->uc;
-+	regs->ax = sig;
-+	regs->dx = (unsigned long) &frame->info;
-+	regs->cx = (unsigned long) &frame->uc;
+ 	if (user_mode_vm(regs)) {
+-		esp = regs->esp;
+-		ss = regs->xss & 0xffff;
++		sp = regs->sp;
++		ss = regs->ss & 0xffff;
+ 		savesegment(gs, gs);
+ 	} else {
+-		esp = (unsigned long) (&regs->esp);
++		sp = (unsigned long) (&regs->sp);
+ 		savesegment(ss, ss);
+ 		savesegment(gs, gs);
+ 	}
+@@ -331,17 +358,17 @@ void __show_registers(struct pt_regs *regs, int all)
+ 			init_utsname()->version);
  
- 	/* Make -mregparm=3 work */
--	regs->rax = sig;
--	regs->rdx = (unsigned long) &frame->info;
--	regs->rcx = (unsigned long) &frame->uc;
-+	regs->ax = sig;
-+	regs->dx = (unsigned long) &frame->info;
-+	regs->cx = (unsigned long) &frame->uc;
-+
-+	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
-+	asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
+ 	printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
+-			0xffff & regs->xcs, regs->eip, regs->eflags,
++			0xffff & regs->cs, regs->ip, regs->flags,
+ 			smp_processor_id());
+-	print_symbol("EIP is at %s\n", regs->eip);
++	print_symbol("EIP is at %s\n", regs->ip);
  
--	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 
--	asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 
--	
--	regs->cs = __USER32_CS; 
--	regs->ss = __USER32_DS; 
-+	regs->cs = __USER32_CS;
-+	regs->ss = __USER32_DS;
+ 	printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+-		regs->eax, regs->ebx, regs->ecx, regs->edx);
++		regs->ax, regs->bx, regs->cx, regs->dx);
+ 	printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
+-		regs->esi, regs->edi, regs->ebp, esp);
++		regs->si, regs->di, regs->bp, sp);
+ 	printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
+-	       regs->xds & 0xffff, regs->xes & 0xffff,
+-	       regs->xfs & 0xffff, gs, ss);
++	       regs->ds & 0xffff, regs->es & 0xffff,
++	       regs->fs & 0xffff, gs, ss);
  
- 	set_fs(USER_DS);
--	regs->eflags &= ~TF_MASK;
-+	regs->flags &= ~X86_EFLAGS_TF;
- 	if (test_thread_flag(TIF_SINGLESTEP))
- 		ptrace_notify(SIGTRAP);
+ 	if (!all)
+ 		return;
+@@ -369,12 +396,12 @@ void __show_registers(struct pt_regs *regs, int all)
+ void show_regs(struct pt_regs *regs)
+ {
+ 	__show_registers(regs, 1);
+-	show_trace(NULL, regs, &regs->esp);
++	show_trace(NULL, regs, &regs->sp, regs->bp);
+ }
  
- #if DEBUG_SIG
--	printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
--		current->comm, current->pid, frame, regs->rip, frame->pretcode);
-+	printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
-+	       current->comm, current->pid, frame, regs->ip, frame->pretcode);
- #endif
+ /*
+- * This gets run with %ebx containing the
+- * function to call, and %edx containing
++ * This gets run with %bx containing the
++ * function to call, and %dx containing
+  * the "args".
+  */
+ extern void kernel_thread_helper(void);
+@@ -388,16 +415,16 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  
- 	return 0;
-diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index df588f0..0db0a62 100644
---- a/arch/x86/ia32/ia32entry.S
-+++ b/arch/x86/ia32/ia32entry.S
-@@ -12,7 +12,6 @@
- #include <asm/ia32_unistd.h>	
- #include <asm/thread_info.h>	
- #include <asm/segment.h>
--#include <asm/vsyscall32.h>
- #include <asm/irqflags.h>
- #include <linux/linkage.h>
+ 	memset(&regs, 0, sizeof(regs));
  
-@@ -104,7 +103,7 @@ ENTRY(ia32_sysenter_target)
- 	pushfq
- 	CFI_ADJUST_CFA_OFFSET 8
- 	/*CFI_REL_OFFSET rflags,0*/
--	movl	$VSYSCALL32_SYSEXIT, %r10d
-+	movl	8*3-THREAD_SIZE+threadinfo_sysenter_return(%rsp), %r10d
- 	CFI_REGISTER rip,r10
- 	pushq	$__USER32_CS
- 	CFI_ADJUST_CFA_OFFSET 8
-@@ -142,6 +141,8 @@ sysenter_do_call:
- 	andl    $~TS_COMPAT,threadinfo_status(%r10)
- 	/* clear IF, that popfq doesn't enable interrupts early */
- 	andl  $~0x200,EFLAGS-R11(%rsp) 
-+	movl	RIP-R11(%rsp),%edx		/* User %eip */
-+	CFI_REGISTER rip,rdx
- 	RESTORE_ARGS 1,24,1,1,1,1
- 	popfq
- 	CFI_ADJUST_CFA_OFFSET -8
-@@ -149,8 +150,6 @@ sysenter_do_call:
- 	popq	%rcx				/* User %esp */
- 	CFI_ADJUST_CFA_OFFSET -8
- 	CFI_REGISTER rsp,rcx
--	movl	$VSYSCALL32_SYSEXIT,%edx	/* User %eip */
--	CFI_REGISTER rip,rdx
- 	TRACE_IRQS_ON
- 	swapgs
- 	sti		/* sti only takes effect after the next instruction */
-@@ -644,8 +643,8 @@ ia32_sys_call_table:
- 	.quad compat_sys_futex		/* 240 */
- 	.quad compat_sys_sched_setaffinity
- 	.quad compat_sys_sched_getaffinity
--	.quad sys32_set_thread_area
--	.quad sys32_get_thread_area
-+	.quad sys_set_thread_area
-+	.quad sys_get_thread_area
- 	.quad compat_sys_io_setup	/* 245 */
- 	.quad sys_io_destroy
- 	.quad compat_sys_io_getevents
-diff --git a/arch/x86/ia32/ipc32.c b/arch/x86/ia32/ipc32.c
-index 7b3342e..d21991c 100644
---- a/arch/x86/ia32/ipc32.c
-+++ b/arch/x86/ia32/ipc32.c
-@@ -9,9 +9,8 @@
- #include <linux/ipc.h>
- #include <linux/compat.h>
+-	regs.ebx = (unsigned long) fn;
+-	regs.edx = (unsigned long) arg;
++	regs.bx = (unsigned long) fn;
++	regs.dx = (unsigned long) arg;
  
--asmlinkage long
--sys32_ipc(u32 call, int first, int second, int third,
--		compat_uptr_t ptr, u32 fifth)
-+asmlinkage long sys32_ipc(u32 call, int first, int second, int third,
-+			  compat_uptr_t ptr, u32 fifth)
+-	regs.xds = __USER_DS;
+-	regs.xes = __USER_DS;
+-	regs.xfs = __KERNEL_PERCPU;
+-	regs.orig_eax = -1;
+-	regs.eip = (unsigned long) kernel_thread_helper;
+-	regs.xcs = __KERNEL_CS | get_kernel_rpl();
+-	regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
++	regs.ds = __USER_DS;
++	regs.es = __USER_DS;
++	regs.fs = __KERNEL_PERCPU;
++	regs.orig_ax = -1;
++	regs.ip = (unsigned long) kernel_thread_helper;
++	regs.cs = __KERNEL_CS | get_kernel_rpl();
++	regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
+ 
+ 	/* Ok, create the new process.. */
+ 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+@@ -435,7 +462,12 @@ void flush_thread(void)
  {
- 	int version;
+ 	struct task_struct *tsk = current;
  
-@@ -19,36 +18,35 @@ sys32_ipc(u32 call, int first, int second, int third,
- 	call &= 0xffff;
+-	memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
++	tsk->thread.debugreg0 = 0;
++	tsk->thread.debugreg1 = 0;
++	tsk->thread.debugreg2 = 0;
++	tsk->thread.debugreg3 = 0;
++	tsk->thread.debugreg6 = 0;
++	tsk->thread.debugreg7 = 0;
+ 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
+ 	clear_tsk_thread_flag(tsk, TIF_DEBUG);
+ 	/*
+@@ -460,7 +492,7 @@ void prepare_to_copy(struct task_struct *tsk)
+ 	unlazy_fpu(tsk);
+ }
  
- 	switch (call) {
--	      case SEMOP:
-+	case SEMOP:
- 		/* struct sembuf is the same on 32 and 64bit :)) */
- 		return sys_semtimedop(first, compat_ptr(ptr), second, NULL);
--	      case SEMTIMEDOP:
-+	case SEMTIMEDOP:
- 		return compat_sys_semtimedop(first, compat_ptr(ptr), second,
- 						compat_ptr(fifth));
--	      case SEMGET:
-+	case SEMGET:
- 		return sys_semget(first, second, third);
--	      case SEMCTL:
-+	case SEMCTL:
- 		return compat_sys_semctl(first, second, third, compat_ptr(ptr));
+-int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
++int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+ 	unsigned long unused,
+ 	struct task_struct * p, struct pt_regs * regs)
+ {
+@@ -470,15 +502,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
  
--	      case MSGSND:
-+	case MSGSND:
- 		return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
--	      case MSGRCV:
-+	case MSGRCV:
- 		return compat_sys_msgrcv(first, second, fifth, third,
- 					 version, compat_ptr(ptr));
--	      case MSGGET:
-+	case MSGGET:
- 		return sys_msgget((key_t) first, second);
--	      case MSGCTL:
-+	case MSGCTL:
- 		return compat_sys_msgctl(first, second, compat_ptr(ptr));
+ 	childregs = task_pt_regs(p);
+ 	*childregs = *regs;
+-	childregs->eax = 0;
+-	childregs->esp = esp;
++	childregs->ax = 0;
++	childregs->sp = sp;
  
--	      case SHMAT:
-+	case SHMAT:
- 		return compat_sys_shmat(first, second, third, version,
- 					compat_ptr(ptr));
--		break;
--	      case SHMDT:
-+	case SHMDT:
- 		return sys_shmdt(compat_ptr(ptr));
--	      case SHMGET:
-+	case SHMGET:
- 		return sys_shmget(first, (unsigned)second, third);
--	      case SHMCTL:
-+	case SHMCTL:
- 		return compat_sys_shmctl(first, second, compat_ptr(ptr));
+-	p->thread.esp = (unsigned long) childregs;
+-	p->thread.esp0 = (unsigned long) (childregs+1);
++	p->thread.sp = (unsigned long) childregs;
++	p->thread.sp0 = (unsigned long) (childregs+1);
+ 
+-	p->thread.eip = (unsigned long) ret_from_fork;
++	p->thread.ip = (unsigned long) ret_from_fork;
+ 
+-	savesegment(gs,p->thread.gs);
++	savesegment(gs, p->thread.gs);
+ 
+ 	tsk = current;
+ 	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
+@@ -491,32 +523,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+ 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
  	}
- 	return -ENOSYS;
-diff --git a/arch/x86/ia32/mmap32.c b/arch/x86/ia32/mmap32.c
-deleted file mode 100644
-index e4b84b4..0000000
---- a/arch/x86/ia32/mmap32.c
-+++ /dev/null
-@@ -1,79 +0,0 @@
--/*
-- *  linux/arch/x86_64/ia32/mm/mmap.c
-- *
-- *  flexible mmap layout support
-- *
-- * Based on the i386 version which was
-- *
-- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
-- * All Rights Reserved.
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, write to the Free Software
-- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-- *
-- *
-- * Started by Ingo Molnar <mingo at elte.hu>
-- */
--
--#include <linux/personality.h>
--#include <linux/mm.h>
--#include <linux/random.h>
--#include <linux/sched.h>
--
--/*
-- * Top of mmap area (just below the process stack).
-- *
-- * Leave an at least ~128 MB hole.
-- */
--#define MIN_GAP (128*1024*1024)
--#define MAX_GAP (TASK_SIZE/6*5)
--
--static inline unsigned long mmap_base(struct mm_struct *mm)
--{
--	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
--	unsigned long random_factor = 0;
--
--	if (current->flags & PF_RANDOMIZE)
--		random_factor = get_random_int() % (1024*1024);
+ 
++	err = 0;
++
+ 	/*
+ 	 * Set a new TLS for the child thread?
+ 	 */
+-	if (clone_flags & CLONE_SETTLS) {
+-		struct desc_struct *desc;
+-		struct user_desc info;
+-		int idx;
 -
--	if (gap < MIN_GAP)
--		gap = MIN_GAP;
--	else if (gap > MAX_GAP)
--		gap = MAX_GAP;
+-		err = -EFAULT;
+-		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
+-			goto out;
+-		err = -EINVAL;
+-		if (LDT_empty(&info))
+-			goto out;
 -
--	return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
--}
+-		idx = info.entry_number;
+-		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+-			goto out;
 -
--/*
-- * This function, called very early during the creation of a new
-- * process VM image, sets up which VM layout function to use:
-- */
--void ia32_pick_mmap_layout(struct mm_struct *mm)
--{
--	/*
--	 * Fall back to the standard layout if the personality
--	 * bit is set, or if the expected stack growth is unlimited:
--	 */
--	if (sysctl_legacy_va_layout ||
--			(current->personality & ADDR_COMPAT_LAYOUT) ||
--			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
--		mm->mmap_base = TASK_UNMAPPED_BASE;
--		mm->get_unmapped_area = arch_get_unmapped_area;
--		mm->unmap_area = arch_unmap_area;
--	} else {
--		mm->mmap_base = mmap_base(mm);
--		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
--		mm->unmap_area = arch_unmap_area_topdown;
+-		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+-		desc->a = LDT_entry_a(&info);
+-		desc->b = LDT_entry_b(&info);
 -	}
--}
-diff --git a/arch/x86/ia32/ptrace32.c b/arch/x86/ia32/ptrace32.c
-deleted file mode 100644
-index 4a233ad..0000000
---- a/arch/x86/ia32/ptrace32.c
-+++ /dev/null
-@@ -1,404 +0,0 @@
++	if (clone_flags & CLONE_SETTLS)
++		err = do_set_thread_area(p, -1,
++			(struct user_desc __user *)childregs->si, 0);
+ 
+-	err = 0;
+- out:
+ 	if (err && p->thread.io_bitmap_ptr) {
+ 		kfree(p->thread.io_bitmap_ptr);
+ 		p->thread.io_bitmap_max = 0;
+@@ -529,62 +544,52 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+  */
+ void dump_thread(struct pt_regs * regs, struct user * dump)
+ {
+-	int i;
++	u16 gs;
+ 
+ /* changed the size calculations - should hopefully work better. lbt */
+ 	dump->magic = CMAGIC;
+ 	dump->start_code = 0;
+-	dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
++	dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
+ 	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+ 	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ 	dump->u_dsize -= dump->u_tsize;
+ 	dump->u_ssize = 0;
+-	for (i = 0; i < 8; i++)
+-		dump->u_debugreg[i] = current->thread.debugreg[i];  
++	dump->u_debugreg[0] = current->thread.debugreg0;
++	dump->u_debugreg[1] = current->thread.debugreg1;
++	dump->u_debugreg[2] = current->thread.debugreg2;
++	dump->u_debugreg[3] = current->thread.debugreg3;
++	dump->u_debugreg[4] = 0;
++	dump->u_debugreg[5] = 0;
++	dump->u_debugreg[6] = current->thread.debugreg6;
++	dump->u_debugreg[7] = current->thread.debugreg7;
+ 
+ 	if (dump->start_stack < TASK_SIZE)
+ 		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+ 
+-	dump->regs.ebx = regs->ebx;
+-	dump->regs.ecx = regs->ecx;
+-	dump->regs.edx = regs->edx;
+-	dump->regs.esi = regs->esi;
+-	dump->regs.edi = regs->edi;
+-	dump->regs.ebp = regs->ebp;
+-	dump->regs.eax = regs->eax;
+-	dump->regs.ds = regs->xds;
+-	dump->regs.es = regs->xes;
+-	dump->regs.fs = regs->xfs;
+-	savesegment(gs,dump->regs.gs);
+-	dump->regs.orig_eax = regs->orig_eax;
+-	dump->regs.eip = regs->eip;
+-	dump->regs.cs = regs->xcs;
+-	dump->regs.eflags = regs->eflags;
+-	dump->regs.esp = regs->esp;
+-	dump->regs.ss = regs->xss;
++	dump->regs.bx = regs->bx;
++	dump->regs.cx = regs->cx;
++	dump->regs.dx = regs->dx;
++	dump->regs.si = regs->si;
++	dump->regs.di = regs->di;
++	dump->regs.bp = regs->bp;
++	dump->regs.ax = regs->ax;
++	dump->regs.ds = (u16)regs->ds;
++	dump->regs.es = (u16)regs->es;
++	dump->regs.fs = (u16)regs->fs;
++	savesegment(gs,gs);
++	dump->regs.orig_ax = regs->orig_ax;
++	dump->regs.ip = regs->ip;
++	dump->regs.cs = (u16)regs->cs;
++	dump->regs.flags = regs->flags;
++	dump->regs.sp = regs->sp;
++	dump->regs.ss = (u16)regs->ss;
+ 
+ 	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+ }
+ EXPORT_SYMBOL(dump_thread);
+ 
 -/* 
-- * 32bit ptrace for x86-64.
-- *
-- * Copyright 2001,2002 Andi Kleen, SuSE Labs.
-- * Some parts copied from arch/i386/kernel/ptrace.c. See that file for earlier 
-- * copyright.
-- * 
-- * This allows to access 64bit processes too; but there is no way to see the extended 
-- * register contents.
-- */ 
--
--#include <linux/kernel.h>
--#include <linux/stddef.h>
--#include <linux/sched.h>
--#include <linux/syscalls.h>
--#include <linux/unistd.h>
--#include <linux/mm.h>
--#include <linux/err.h>
--#include <linux/ptrace.h>
--#include <asm/ptrace.h>
--#include <asm/compat.h>
--#include <asm/uaccess.h>
--#include <asm/user32.h>
--#include <asm/user.h>
--#include <asm/errno.h>
--#include <asm/debugreg.h>
--#include <asm/i387.h>
--#include <asm/fpu32.h>
--#include <asm/ia32.h>
--
--/*
-- * Determines which flags the user has access to [1 = access, 0 = no access].
-- * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
-- * Also masks reserved bits (31-22, 15, 5, 3, 1).
+- * Capture the user space registers if the task is not running (in user space)
 - */
--#define FLAG_MASK 0x54dd5UL
--
--#define R32(l,q) \
--	case offsetof(struct user32, regs.l): stack[offsetof(struct pt_regs, q)/8] = val; break
--
--static int putreg32(struct task_struct *child, unsigned regno, u32 val)
--{
--	int i;
--	__u64 *stack = (__u64 *)task_pt_regs(child);
--
--	switch (regno) {
--	case offsetof(struct user32, regs.fs):
--		if (val && (val & 3) != 3) return -EIO; 
--		child->thread.fsindex = val & 0xffff;
--		break;
--	case offsetof(struct user32, regs.gs):
--		if (val && (val & 3) != 3) return -EIO; 
--		child->thread.gsindex = val & 0xffff;
--		break;
--	case offsetof(struct user32, regs.ds):
--		if (val && (val & 3) != 3) return -EIO; 
--		child->thread.ds = val & 0xffff;
--		break;
--	case offsetof(struct user32, regs.es):
--		child->thread.es = val & 0xffff;
--		break;
--	case offsetof(struct user32, regs.ss): 
--		if ((val & 3) != 3) return -EIO;
--        	stack[offsetof(struct pt_regs, ss)/8] = val & 0xffff;
--		break;
--	case offsetof(struct user32, regs.cs): 
--		if ((val & 3) != 3) return -EIO;
--		stack[offsetof(struct pt_regs, cs)/8] = val & 0xffff;
--		break;
--
--	R32(ebx, rbx); 
--	R32(ecx, rcx);
--	R32(edx, rdx);
--	R32(edi, rdi);
--	R32(esi, rsi);
--	R32(ebp, rbp);
--	R32(eax, rax);
--	R32(orig_eax, orig_rax);
--	R32(eip, rip);
--	R32(esp, rsp);
--
--	case offsetof(struct user32, regs.eflags): {
--		__u64 *flags = &stack[offsetof(struct pt_regs, eflags)/8];
--		val &= FLAG_MASK;
--		*flags = val | (*flags & ~FLAG_MASK);
--		break;
--	}
--
--	case offsetof(struct user32, u_debugreg[4]): 
--	case offsetof(struct user32, u_debugreg[5]):
--		return -EIO;
--
--	case offsetof(struct user32, u_debugreg[0]):
--		child->thread.debugreg0 = val;
--		break;
--
--	case offsetof(struct user32, u_debugreg[1]):
--		child->thread.debugreg1 = val;
--		break;
--
--	case offsetof(struct user32, u_debugreg[2]):
--		child->thread.debugreg2 = val;
--		break;
--
--	case offsetof(struct user32, u_debugreg[3]):
--		child->thread.debugreg3 = val;
--		break;
--
--	case offsetof(struct user32, u_debugreg[6]):
--		child->thread.debugreg6 = val;
--		break; 
--
--	case offsetof(struct user32, u_debugreg[7]):
--		val &= ~DR_CONTROL_RESERVED;
--		/* See arch/i386/kernel/ptrace.c for an explanation of
--		 * this awkward check.*/
--		for(i=0; i<4; i++)
--			if ((0x5454 >> ((val >> (16 + 4*i)) & 0xf)) & 1)
--			       return -EIO;
--		child->thread.debugreg7 = val; 
--		if (val)
--			set_tsk_thread_flag(child, TIF_DEBUG);
--		else
--			clear_tsk_thread_flag(child, TIF_DEBUG);
--		break; 
--		    
--	default:
--		if (regno > sizeof(struct user32) || (regno & 3))
--			return -EIO;
--	       
--		/* Other dummy fields in the virtual user structure are ignored */ 
--		break; 		
--	}
--	return 0;
--}
--
--#undef R32
--
--#define R32(l,q) \
--	case offsetof(struct user32, regs.l): *val = stack[offsetof(struct pt_regs, q)/8]; break
--
--static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
+-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 -{
--	__u64 *stack = (__u64 *)task_pt_regs(child);
--
--	switch (regno) {
--	case offsetof(struct user32, regs.fs):
--	        *val = child->thread.fsindex;
--		break;
--	case offsetof(struct user32, regs.gs):
--		*val = child->thread.gsindex;
--		break;
--	case offsetof(struct user32, regs.ds):
--		*val = child->thread.ds;
--		break;
--	case offsetof(struct user32, regs.es):
--		*val = child->thread.es;
--		break;
--
--	R32(cs, cs);
--	R32(ss, ss);
--	R32(ebx, rbx); 
--	R32(ecx, rcx);
--	R32(edx, rdx);
--	R32(edi, rdi);
--	R32(esi, rsi);
--	R32(ebp, rbp);
--	R32(eax, rax);
--	R32(orig_eax, orig_rax);
--	R32(eip, rip);
--	R32(eflags, eflags);
--	R32(esp, rsp);
+-	struct pt_regs ptregs = *task_pt_regs(tsk);
+-	ptregs.xcs &= 0xffff;
+-	ptregs.xds &= 0xffff;
+-	ptregs.xes &= 0xffff;
+-	ptregs.xss &= 0xffff;
 -
--	case offsetof(struct user32, u_debugreg[0]): 
--		*val = child->thread.debugreg0; 
--		break; 
--	case offsetof(struct user32, u_debugreg[1]): 
--		*val = child->thread.debugreg1; 
--		break; 
--	case offsetof(struct user32, u_debugreg[2]): 
--		*val = child->thread.debugreg2; 
--		break; 
--	case offsetof(struct user32, u_debugreg[3]): 
--		*val = child->thread.debugreg3; 
--		break; 
--	case offsetof(struct user32, u_debugreg[6]): 
--		*val = child->thread.debugreg6; 
--		break; 
--	case offsetof(struct user32, u_debugreg[7]): 
--		*val = child->thread.debugreg7; 
--		break; 
--		    
--	default:
--		if (regno > sizeof(struct user32) || (regno & 3))
--			return -EIO;
+-	elf_core_copy_regs(regs, &ptregs);
 -
--		/* Other dummy fields in the virtual user structure are ignored */ 
--		*val = 0;
--		break; 		
--	}
--	return 0;
+-	return 1;
 -}
 -
--#undef R32
--
--static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
--{
--	int ret;
--	compat_siginfo_t __user *si32 = compat_ptr(data);
--	siginfo_t ssi; 
--	siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
--	if (request == PTRACE_SETSIGINFO) {
--		memset(&ssi, 0, sizeof(siginfo_t));
--		ret = copy_siginfo_from_user32(&ssi, si32);
--		if (ret)
--			return ret;
--		if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
--			return -EFAULT;
--	}
--	ret = sys_ptrace(request, pid, addr, (unsigned long)si);
--	if (ret)
--		return ret;
--	if (request == PTRACE_GETSIGINFO) {
--		if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
--			return -EFAULT;
--		ret = copy_siginfo_to_user32(si32, &ssi);
--	}
--	return ret;
+ #ifdef CONFIG_SECCOMP
+-void hard_disable_TSC(void)
++static void hard_disable_TSC(void)
+ {
+ 	write_cr4(read_cr4() | X86_CR4_TSD);
+ }
+@@ -599,7 +604,7 @@ void disable_TSC(void)
+ 		hard_disable_TSC();
+ 	preempt_enable();
+ }
+-void hard_enable_TSC(void)
++static void hard_enable_TSC(void)
+ {
+ 	write_cr4(read_cr4() & ~X86_CR4_TSD);
+ }
+@@ -609,18 +614,32 @@ static noinline void
+ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ 		 struct tss_struct *tss)
+ {
+-	struct thread_struct *next;
++	struct thread_struct *prev, *next;
++	unsigned long debugctl;
+ 
++	prev = &prev_p->thread;
+ 	next = &next_p->thread;
+ 
++	debugctl = prev->debugctlmsr;
++	if (next->ds_area_msr != prev->ds_area_msr) {
++		/* we clear debugctl to make sure DS
++		 * is not in use when we change it */
++		debugctl = 0;
++		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
++		wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
++	}
++
++	if (next->debugctlmsr != debugctl)
++		wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
++
+ 	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
+-		set_debugreg(next->debugreg[0], 0);
+-		set_debugreg(next->debugreg[1], 1);
+-		set_debugreg(next->debugreg[2], 2);
+-		set_debugreg(next->debugreg[3], 3);
++		set_debugreg(next->debugreg0, 0);
++		set_debugreg(next->debugreg1, 1);
++		set_debugreg(next->debugreg2, 2);
++		set_debugreg(next->debugreg3, 3);
+ 		/* no 4 and 5 */
+-		set_debugreg(next->debugreg[6], 6);
+-		set_debugreg(next->debugreg[7], 7);
++		set_debugreg(next->debugreg6, 6);
++		set_debugreg(next->debugreg7, 7);
+ 	}
+ 
+ #ifdef CONFIG_SECCOMP
+@@ -634,6 +653,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ 	}
+ #endif
+ 
++	if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
++		ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
++
++	if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
++		ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
++
++
+ 	if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+ 		/*
+ 		 * Disable the bitmap via an invalid offset. We still cache
+@@ -687,11 +713,11 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+  * More important, however, is the fact that this allows us much
+  * more flexibility.
+  *
+- * The return value (in %eax) will be the "prev" task after
++ * The return value (in %ax) will be the "prev" task after
+  * the task-switch, and shows up in ret_from_fork in entry.S,
+  * for example.
+  */
+-struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ {
+ 	struct thread_struct *prev = &prev_p->thread,
+ 				 *next = &next_p->thread;
+@@ -710,7 +736,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
+ 	/*
+ 	 * Reload esp0.
+ 	 */
+-	load_esp0(tss, next);
++	load_sp0(tss, next);
+ 
+ 	/*
+ 	 * Save away %gs. No need to save %fs, as it was saved on the
+@@ -774,7 +800,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
+ 
+ asmlinkage int sys_fork(struct pt_regs regs)
+ {
+-	return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++	return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
+ }
+ 
+ asmlinkage int sys_clone(struct pt_regs regs)
+@@ -783,12 +809,12 @@ asmlinkage int sys_clone(struct pt_regs regs)
+ 	unsigned long newsp;
+ 	int __user *parent_tidptr, *child_tidptr;
+ 
+-	clone_flags = regs.ebx;
+-	newsp = regs.ecx;
+-	parent_tidptr = (int __user *)regs.edx;
+-	child_tidptr = (int __user *)regs.edi;
++	clone_flags = regs.bx;
++	newsp = regs.cx;
++	parent_tidptr = (int __user *)regs.dx;
++	child_tidptr = (int __user *)regs.di;
+ 	if (!newsp)
+-		newsp = regs.esp;
++		newsp = regs.sp;
+ 	return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
+ }
+ 
+@@ -804,7 +830,7 @@ asmlinkage int sys_clone(struct pt_regs regs)
+  */
+ asmlinkage int sys_vfork(struct pt_regs regs)
+ {
+-	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
+ }
+ 
+ /*
+@@ -815,18 +841,15 @@ asmlinkage int sys_execve(struct pt_regs regs)
+ 	int error;
+ 	char * filename;
+ 
+-	filename = getname((char __user *) regs.ebx);
++	filename = getname((char __user *) regs.bx);
+ 	error = PTR_ERR(filename);
+ 	if (IS_ERR(filename))
+ 		goto out;
+ 	error = do_execve(filename,
+-			(char __user * __user *) regs.ecx,
+-			(char __user * __user *) regs.edx,
++			(char __user * __user *) regs.cx,
++			(char __user * __user *) regs.dx,
+ 			&regs);
+ 	if (error == 0) {
+-		task_lock(current);
+-		current->ptrace &= ~PT_DTRACE;
+-		task_unlock(current);
+ 		/* Make sure we don't return using sysenter.. */
+ 		set_thread_flag(TIF_IRET);
+ 	}
+@@ -840,145 +863,37 @@ out:
+ 
+ unsigned long get_wchan(struct task_struct *p)
+ {
+-	unsigned long ebp, esp, eip;
++	unsigned long bp, sp, ip;
+ 	unsigned long stack_page;
+ 	int count = 0;
+ 	if (!p || p == current || p->state == TASK_RUNNING)
+ 		return 0;
+ 	stack_page = (unsigned long)task_stack_page(p);
+-	esp = p->thread.esp;
+-	if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
++	sp = p->thread.sp;
++	if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
+ 		return 0;
+-	/* include/asm-i386/system.h:switch_to() pushes ebp last. */
+-	ebp = *(unsigned long *) esp;
++	/* include/asm-i386/system.h:switch_to() pushes bp last. */
++	bp = *(unsigned long *) sp;
+ 	do {
+-		if (ebp < stack_page || ebp > top_ebp+stack_page)
++		if (bp < stack_page || bp > top_ebp+stack_page)
+ 			return 0;
+-		eip = *(unsigned long *) (ebp+4);
+-		if (!in_sched_functions(eip))
+-			return eip;
+-		ebp = *(unsigned long *) ebp;
++		ip = *(unsigned long *) (bp+4);
++		if (!in_sched_functions(ip))
++			return ip;
++		bp = *(unsigned long *) bp;
+ 	} while (count++ < 16);
+ 	return 0;
+ }
+ 
+-/*
+- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+- */
+-static int get_free_idx(void)
+-{
+-	struct thread_struct *t = &current->thread;
+-	int idx;
+-
+-	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+-		if (desc_empty(t->tls_array + idx))
+-			return idx + GDT_ENTRY_TLS_MIN;
+-	return -ESRCH;
 -}
 -
--asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
+-/*
+- * Set a given TLS descriptor:
+- */
+-asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
 -{
--	struct task_struct *child;
--	struct pt_regs *childregs; 
--	void __user *datap = compat_ptr(data);
--	int ret;
--	__u32 val;
+-	struct thread_struct *t = &current->thread;
+-	struct user_desc info;
+-	struct desc_struct *desc;
+-	int cpu, idx;
 -
--	switch (request) { 
--	case PTRACE_TRACEME:
--	case PTRACE_ATTACH:
--	case PTRACE_KILL:
--	case PTRACE_CONT:
--	case PTRACE_SINGLESTEP:
--	case PTRACE_DETACH:
--	case PTRACE_SYSCALL:
--	case PTRACE_OLDSETOPTIONS:
--	case PTRACE_SETOPTIONS:
--	case PTRACE_SET_THREAD_AREA:
--	case PTRACE_GET_THREAD_AREA:
--		return sys_ptrace(request, pid, addr, data); 
+-	if (copy_from_user(&info, u_info, sizeof(info)))
+-		return -EFAULT;
+-	idx = info.entry_number;
 -
--	default:
+-	/*
+-	 * index -1 means the kernel should try to find and
+-	 * allocate an empty descriptor:
+-	 */
+-	if (idx == -1) {
+-		idx = get_free_idx();
+-		if (idx < 0)
+-			return idx;
+-		if (put_user(idx, &u_info->entry_number))
+-			return -EFAULT;
+-	}
+-
+-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
 -		return -EINVAL;
 -
--	case PTRACE_PEEKTEXT:
--	case PTRACE_PEEKDATA:
--	case PTRACE_POKEDATA:
--	case PTRACE_POKETEXT:
--	case PTRACE_POKEUSR:       
--	case PTRACE_PEEKUSR:
--	case PTRACE_GETREGS:
--	case PTRACE_SETREGS:
--	case PTRACE_SETFPREGS:
--	case PTRACE_GETFPREGS:
--	case PTRACE_SETFPXREGS:
--	case PTRACE_GETFPXREGS:
--	case PTRACE_GETEVENTMSG:
--		break;
+-	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
 -
--	case PTRACE_SETSIGINFO:
--	case PTRACE_GETSIGINFO:
--		return ptrace32_siginfo(request, pid, addr, data);
+-	/*
+-	 * We must not get preempted while modifying the TLS.
+-	 */
+-	cpu = get_cpu();
+-
+-	if (LDT_empty(&info)) {
+-		desc->a = 0;
+-		desc->b = 0;
+-	} else {
+-		desc->a = LDT_entry_a(&info);
+-		desc->b = LDT_entry_b(&info);
 -	}
+-	load_TLS(t, cpu);
 -
--	child = ptrace_get_task_struct(pid);
--	if (IS_ERR(child))
--		return PTR_ERR(child);
+-	put_cpu();
 -
--	ret = ptrace_check_attach(child, request == PTRACE_KILL);
--	if (ret < 0)
--		goto out;
+-	return 0;
+-}
 -
--	childregs = task_pt_regs(child);
+-/*
+- * Get the current Thread-Local Storage area:
+- */
 -
--	switch (request) {
--	case PTRACE_PEEKDATA:
--	case PTRACE_PEEKTEXT:
--		ret = 0;
--		if (access_process_vm(child, addr, &val, sizeof(u32), 0)!=sizeof(u32))
--			ret = -EIO;
--		else
--			ret = put_user(val, (unsigned int __user *)datap); 
--		break; 
+-#define GET_BASE(desc) ( \
+-	(((desc)->a >> 16) & 0x0000ffff) | \
+-	(((desc)->b << 16) & 0x00ff0000) | \
+-	( (desc)->b        & 0xff000000)   )
 -
--	case PTRACE_POKEDATA:
--	case PTRACE_POKETEXT:
--		ret = 0;
--		if (access_process_vm(child, addr, &data, sizeof(u32), 1)!=sizeof(u32))
--			ret = -EIO; 
--		break;
+-#define GET_LIMIT(desc) ( \
+-	((desc)->a & 0x0ffff) | \
+-	 ((desc)->b & 0xf0000) )
+-	
+-#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
+-#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
+-#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
+-#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
+-#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
+-#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
 -
--	case PTRACE_PEEKUSR:
--		ret = getreg32(child, addr, &val);
--		if (ret == 0)
--			ret = put_user(val, (__u32 __user *)datap);
--		break;
+-asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
+-{
+-	struct user_desc info;
+-	struct desc_struct *desc;
+-	int idx;
 -
--	case PTRACE_POKEUSR:
--		ret = putreg32(child, addr, data);
--		break;
+-	if (get_user(idx, &u_info->entry_number))
+-		return -EFAULT;
+-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+-		return -EINVAL;
 -
--	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
--		int i;
--	  	if (!access_ok(VERIFY_WRITE, datap, 16*4)) {
--			ret = -EIO;
--			break;
--		}
--		ret = 0;
--		for ( i = 0; i <= 16*4 ; i += sizeof(__u32) ) {
--			getreg32(child, i, &val);
--			ret |= __put_user(val,(u32 __user *)datap);
--			datap += sizeof(u32);
--		}
--		break;
--	}
+-	memset(&info, 0, sizeof(info));
 -
--	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
--		unsigned long tmp;
--		int i;
--	  	if (!access_ok(VERIFY_READ, datap, 16*4)) {
--			ret = -EIO;
--			break;
--		}
--		ret = 0; 
--		for ( i = 0; i <= 16*4; i += sizeof(u32) ) {
--			ret |= __get_user(tmp, (u32 __user *)datap);
--			putreg32(child, i, tmp);
--			datap += sizeof(u32);
--		}
--		break;
--	}
+-	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
 -
--	case PTRACE_GETFPREGS:
--		ret = -EIO; 
--		if (!access_ok(VERIFY_READ, compat_ptr(data), 
--			       sizeof(struct user_i387_struct)))
--			break;
--		save_i387_ia32(child, datap, childregs, 1);
--		ret = 0; 
--			break;
+-	info.entry_number = idx;
+-	info.base_addr = GET_BASE(desc);
+-	info.limit = GET_LIMIT(desc);
+-	info.seg_32bit = GET_32BIT(desc);
+-	info.contents = GET_CONTENTS(desc);
+-	info.read_exec_only = !GET_WRITABLE(desc);
+-	info.limit_in_pages = GET_LIMIT_PAGES(desc);
+-	info.seg_not_present = !GET_PRESENT(desc);
+-	info.useable = GET_USEABLE(desc);
 -
--	case PTRACE_SETFPREGS:
--		ret = -EIO;
--		if (!access_ok(VERIFY_WRITE, datap, 
--			       sizeof(struct user_i387_struct)))
--			break;
--		ret = 0;
--		/* don't check EFAULT to be bug-to-bug compatible to i386 */
--		restore_i387_ia32(child, datap, 1);
--		break;
+-	if (copy_to_user(u_info, &info, sizeof(info)))
+-		return -EFAULT;
+-	return 0;
+-}
 -
--	case PTRACE_GETFPXREGS: { 
--		struct user32_fxsr_struct __user *u = datap;
--		init_fpu(child); 
--		ret = -EIO;
--		if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
--			break;
--			ret = -EFAULT;
--		if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u)))
--			break;
--		ret = __put_user(childregs->cs, &u->fcs);
--		ret |= __put_user(child->thread.ds, &u->fos); 
--		break; 
--	} 
--	case PTRACE_SETFPXREGS: { 
--		struct user32_fxsr_struct __user *u = datap;
--		unlazy_fpu(child);
--		ret = -EIO;
--		if (!access_ok(VERIFY_READ, u, sizeof(*u)))
--			break;
--		/* no checking to be bug-to-bug compatible with i386. */
--		/* but silence warning */
--		if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)))
--			;
--		set_stopped_child_used_math(child);
--		child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
--		ret = 0; 
--		break;
--	}
+ unsigned long arch_align_stack(unsigned long sp)
+ {
+ 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ 		sp -= get_random_int() % 8192;
+ 	return sp & ~0xf;
+ }
++
++unsigned long arch_randomize_brk(struct mm_struct *mm)
++{
++	unsigned long range_end = mm->brk + 0x02000000;
++	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
++}
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index ab79e1d..137a861 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -3,7 +3,7 @@
+  *
+  *  Pentium III FXSR, SSE support
+  *	Gareth Hughes <gareth at valinux.com>, May 2000
+- * 
++ *
+  *  X86-64 port
+  *	Andi Kleen.
+  *
+@@ -19,19 +19,19 @@
+ #include <linux/cpu.h>
+ #include <linux/errno.h>
+ #include <linux/sched.h>
++#include <linux/fs.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+-#include <linux/fs.h>
+ #include <linux/elfcore.h>
+ #include <linux/smp.h>
+ #include <linux/slab.h>
+ #include <linux/user.h>
+-#include <linux/module.h>
+ #include <linux/a.out.h>
+ #include <linux/interrupt.h>
++#include <linux/utsname.h>
+ #include <linux/delay.h>
++#include <linux/module.h>
+ #include <linux/ptrace.h>
+-#include <linux/utsname.h>
+ #include <linux/random.h>
+ #include <linux/notifier.h>
+ #include <linux/kprobes.h>
+@@ -72,13 +72,6 @@ void idle_notifier_register(struct notifier_block *n)
+ {
+ 	atomic_notifier_chain_register(&idle_notifier, n);
+ }
+-EXPORT_SYMBOL_GPL(idle_notifier_register);
 -
--	case PTRACE_GETEVENTMSG:
--		ret = put_user(child->ptrace_message,(unsigned int __user *)compat_ptr(data));
--		break;
+-void idle_notifier_unregister(struct notifier_block *n)
+-{
+-	atomic_notifier_chain_unregister(&idle_notifier, n);
+-}
+-EXPORT_SYMBOL(idle_notifier_unregister);
+ 
+ void enter_idle(void)
+ {
+@@ -106,7 +99,7 @@ void exit_idle(void)
+  * We use this if we don't have any better
+  * idle routine..
+  */
+-static void default_idle(void)
++void default_idle(void)
+ {
+ 	current_thread_info()->status &= ~TS_POLLING;
+ 	/*
+@@ -116,11 +109,18 @@ static void default_idle(void)
+ 	smp_mb();
+ 	local_irq_disable();
+ 	if (!need_resched()) {
+-		/* Enables interrupts one instruction before HLT.
+-		   x86 special cases this so there is no race. */
+-		safe_halt();
+-	} else
+-		local_irq_enable();
++		ktime_t t0, t1;
++		u64 t0n, t1n;
++
++		t0 = ktime_get();
++		t0n = ktime_to_ns(t0);
++		safe_halt();	/* enables interrupts racelessly */
++		local_irq_disable();
++		t1 = ktime_get();
++		t1n = ktime_to_ns(t1);
++		sched_clock_idle_wakeup_event(t1n - t0n);
++	}
++	local_irq_enable();
+ 	current_thread_info()->status |= TS_POLLING;
+ }
+ 
+@@ -129,54 +129,12 @@ static void default_idle(void)
+  * to poll the ->need_resched flag instead of waiting for the
+  * cross-CPU IPI to arrive. Use this option with caution.
+  */
+-static void poll_idle (void)
++static void poll_idle(void)
+ {
+ 	local_irq_enable();
+ 	cpu_relax();
+ }
+ 
+-static void do_nothing(void *unused)
+-{
+-}
 -
--	default:
--		BUG();
+-void cpu_idle_wait(void)
+-{
+-	unsigned int cpu, this_cpu = get_cpu();
+-	cpumask_t map, tmp = current->cpus_allowed;
+-
+-	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
+-	put_cpu();
+-
+-	cpus_clear(map);
+-	for_each_online_cpu(cpu) {
+-		per_cpu(cpu_idle_state, cpu) = 1;
+-		cpu_set(cpu, map);
 -	}
 -
-- out:
--	put_task_struct(child);
--	return ret;
+-	__get_cpu_var(cpu_idle_state) = 0;
+-
+-	wmb();
+-	do {
+-		ssleep(1);
+-		for_each_online_cpu(cpu) {
+-			if (cpu_isset(cpu, map) &&
+-					!per_cpu(cpu_idle_state, cpu))
+-				cpu_clear(cpu, map);
+-		}
+-		cpus_and(map, map, cpu_online_map);
+-		/*
+-		 * We waited 1 sec, if a CPU still did not call idle
+-		 * it may be because it is in idle and not waking up
+-		 * because it has nothing to do.
+-		 * Give all the remaining CPUS a kick.
+-		 */
+-		smp_call_function_mask(map, do_nothing, 0, 0);
+-	} while (!cpus_empty(map));
+-
+-	set_cpus_allowed(current, tmp);
 -}
+-EXPORT_SYMBOL_GPL(cpu_idle_wait);
 -
-diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
-index bee96d6..abf71d2 100644
---- a/arch/x86/ia32/sys_ia32.c
-+++ b/arch/x86/ia32/sys_ia32.c
-@@ -1,29 +1,29 @@
+ #ifdef CONFIG_HOTPLUG_CPU
+ DECLARE_PER_CPU(int, cpu_state);
+ 
+@@ -207,19 +165,18 @@ static inline void play_dead(void)
+  * low exit latency (ie sit in a loop waiting for
+  * somebody to say that they'd like to reschedule)
+  */
+-void cpu_idle (void)
++void cpu_idle(void)
+ {
+ 	current_thread_info()->status |= TS_POLLING;
+ 	/* endless idle loop with no priority at all */
+ 	while (1) {
++		tick_nohz_stop_sched_tick();
+ 		while (!need_resched()) {
+ 			void (*idle)(void);
+ 
+ 			if (__get_cpu_var(cpu_idle_state))
+ 				__get_cpu_var(cpu_idle_state) = 0;
+ 
+-			tick_nohz_stop_sched_tick();
+-
+ 			rmb();
+ 			idle = pm_idle;
+ 			if (!idle)
+@@ -247,6 +204,47 @@ void cpu_idle (void)
+ 	}
+ }
+ 
++static void do_nothing(void *unused)
++{
++}
++
++void cpu_idle_wait(void)
++{
++	unsigned int cpu, this_cpu = get_cpu();
++	cpumask_t map, tmp = current->cpus_allowed;
++
++	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++	put_cpu();
++
++	cpus_clear(map);
++	for_each_online_cpu(cpu) {
++		per_cpu(cpu_idle_state, cpu) = 1;
++		cpu_set(cpu, map);
++	}
++
++	__get_cpu_var(cpu_idle_state) = 0;
++
++	wmb();
++	do {
++		ssleep(1);
++		for_each_online_cpu(cpu) {
++			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++				cpu_clear(cpu, map);
++		}
++		cpus_and(map, map, cpu_online_map);
++		/*
++		 * We waited 1 sec, if a CPU still did not call idle
++		 * it may be because it is in idle and not waking up
++		 * because it has nothing to do.
++		 * Give all the remaining CPUS a kick.
++		 */
++		smp_call_function_mask(map, do_nothing, 0, 0);
++	} while (!cpus_empty(map));
++
++	set_cpus_allowed(current, tmp);
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
  /*
-  * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Based on
-- *             sys_sparc32 
-+ *             sys_sparc32
-  *
-  * Copyright (C) 2000		VA Linux Co
-  * Copyright (C) 2000		Don Dugger <n0ano at valinux.com>
-- * Copyright (C) 1999 		Arun Sharma <arun.sharma at intel.com>
-- * Copyright (C) 1997,1998 	Jakub Jelinek (jj at sunsite.mff.cuni.cz)
-- * Copyright (C) 1997 		David S. Miller (davem at caip.rutgers.edu)
-+ * Copyright (C) 1999		Arun Sharma <arun.sharma at intel.com>
-+ * Copyright (C) 1997,1998	Jakub Jelinek (jj at sunsite.mff.cuni.cz)
-+ * Copyright (C) 1997		David S. Miller (davem at caip.rutgers.edu)
-  * Copyright (C) 2000		Hewlett-Packard Co.
-  * Copyright (C) 2000		David Mosberger-Tang <davidm at hpl.hp.com>
-- * Copyright (C) 2000,2001,2002	Andi Kleen, SuSE Labs (x86-64 port) 
-+ * Copyright (C) 2000,2001,2002	Andi Kleen, SuSE Labs (x86-64 port)
+  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+  * which can obviate IPI to trigger checking of need_resched.
+@@ -257,13 +255,13 @@ void cpu_idle (void)
+  * New with Core Duo processors, MWAIT can take some hints based on CPU
+  * capability.
+  */
+-void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
++void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
+ {
+ 	if (!need_resched()) {
+ 		__monitor((void *)&current_thread_info()->flags, 0, 0);
+ 		smp_mb();
+ 		if (!need_resched())
+-			__mwait(eax, ecx);
++			__mwait(ax, cx);
+ 	}
+ }
+ 
+@@ -282,25 +280,41 @@ static void mwait_idle(void)
+ 	}
+ }
+ 
++
++static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
++{
++	if (force_mwait)
++		return 1;
++	/* Any C1 states supported? */
++	return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
++}
++
+ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+ {
+-	static int printed;
+-	if (cpu_has(c, X86_FEATURE_MWAIT)) {
++	static int selected;
++
++	if (selected)
++		return;
++#ifdef CONFIG_X86_SMP
++	if (pm_idle == poll_idle && smp_num_siblings > 1) {
++		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
++			" performance may degrade.\n");
++	}
++#endif
++	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
+ 		/*
+ 		 * Skip, if setup has overridden idle.
+ 		 * One CPU supports mwait => All CPUs supports mwait
+ 		 */
+ 		if (!pm_idle) {
+-			if (!printed) {
+-				printk(KERN_INFO "using mwait in idle threads.\n");
+-				printed = 1;
+-			}
++			printk(KERN_INFO "using mwait in idle threads.\n");
+ 			pm_idle = mwait_idle;
+ 		}
+ 	}
++	selected = 1;
+ }
+ 
+-static int __init idle_setup (char *str)
++static int __init idle_setup(char *str)
+ {
+ 	if (!strcmp(str, "poll")) {
+ 		printk("using polling idle threads.\n");
+@@ -315,13 +329,13 @@ static int __init idle_setup (char *str)
+ }
+ early_param("idle", idle_setup);
+ 
+-/* Prints also some state that isn't saved in the pt_regs */ 
++/* Prints also some state that isn't saved in the pt_regs */
+ void __show_regs(struct pt_regs * regs)
+ {
+ 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
+ 	unsigned long d0, d1, d2, d3, d6, d7;
+-	unsigned int fsindex,gsindex;
+-	unsigned int ds,cs,es; 
++	unsigned int fsindex, gsindex;
++	unsigned int ds, cs, es;
+ 
+ 	printk("\n");
+ 	print_modules();
+@@ -330,16 +344,16 @@ void __show_regs(struct pt_regs * regs)
+ 		init_utsname()->release,
+ 		(int)strcspn(init_utsname()->version, " "),
+ 		init_utsname()->version);
+-	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
+-	printk_address(regs->rip); 
+-	printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->rsp,
+-		regs->eflags);
++	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
++	printk_address(regs->ip, 1);
++	printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->sp,
++		regs->flags);
+ 	printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
+-	       regs->rax, regs->rbx, regs->rcx);
++	       regs->ax, regs->bx, regs->cx);
+ 	printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
+-	       regs->rdx, regs->rsi, regs->rdi); 
++	       regs->dx, regs->si, regs->di);
+ 	printk("RBP: %016lx R08: %016lx R09: %016lx\n",
+-	       regs->rbp, regs->r8, regs->r9); 
++	       regs->bp, regs->r8, regs->r9);
+ 	printk("R10: %016lx R11: %016lx R12: %016lx\n",
+ 	       regs->r10, regs->r11, regs->r12); 
+ 	printk("R13: %016lx R14: %016lx R15: %016lx\n",
+@@ -379,7 +393,7 @@ void show_regs(struct pt_regs *regs)
+ {
+ 	printk("CPU %d:", smp_processor_id());
+ 	__show_regs(regs);
+-	show_trace(NULL, regs, (void *)(regs + 1));
++	show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
+ }
+ 
+ /*
+@@ -390,7 +404,7 @@ void exit_thread(void)
+ 	struct task_struct *me = current;
+ 	struct thread_struct *t = &me->thread;
+ 
+-	if (me->thread.io_bitmap_ptr) { 
++	if (me->thread.io_bitmap_ptr) {
+ 		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
+ 
+ 		kfree(t->io_bitmap_ptr);
+@@ -426,7 +440,7 @@ void flush_thread(void)
+ 	tsk->thread.debugreg3 = 0;
+ 	tsk->thread.debugreg6 = 0;
+ 	tsk->thread.debugreg7 = 0;
+-	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
++	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ 	/*
+ 	 * Forget coprocessor state..
+ 	 */
+@@ -449,26 +463,21 @@ void release_thread(struct task_struct *dead_task)
+ 
+ static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
+ {
+-	struct user_desc ud = { 
++	struct user_desc ud = {
+ 		.base_addr = addr,
+ 		.limit = 0xfffff,
+ 		.seg_32bit = 1,
+ 		.limit_in_pages = 1,
+ 		.useable = 1,
+ 	};
+-	struct n_desc_struct *desc = (void *)t->thread.tls_array;
++	struct desc_struct *desc = t->thread.tls_array;
+ 	desc += tls;
+-	desc->a = LDT_entry_a(&ud); 
+-	desc->b = LDT_entry_b(&ud); 
++	fill_ldt(desc, &ud);
+ }
+ 
+ static inline u32 read_32bit_tls(struct task_struct *t, int tls)
+ {
+-	struct desc_struct *desc = (void *)t->thread.tls_array;
+-	desc += tls;
+-	return desc->base0 | 
+-		(((u32)desc->base1) << 16) | 
+-		(((u32)desc->base2) << 24);
++	return get_desc_base(&t->thread.tls_array[tls]);
+ }
+ 
+ /*
+@@ -480,7 +489,7 @@ void prepare_to_copy(struct task_struct *tsk)
+ 	unlazy_fpu(tsk);
+ }
+ 
+-int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 
++int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+ 		unsigned long unused,
+ 	struct task_struct * p, struct pt_regs * regs)
+ {
+@@ -492,14 +501,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
+ 			(THREAD_SIZE + task_stack_page(p))) - 1;
+ 	*childregs = *regs;
+ 
+-	childregs->rax = 0;
+-	childregs->rsp = rsp;
+-	if (rsp == ~0UL)
+-		childregs->rsp = (unsigned long)childregs;
++	childregs->ax = 0;
++	childregs->sp = sp;
++	if (sp == ~0UL)
++		childregs->sp = (unsigned long)childregs;
+ 
+-	p->thread.rsp = (unsigned long) childregs;
+-	p->thread.rsp0 = (unsigned long) (childregs+1);
+-	p->thread.userrsp = me->thread.userrsp; 
++	p->thread.sp = (unsigned long) childregs;
++	p->thread.sp0 = (unsigned long) (childregs+1);
++	p->thread.usersp = me->thread.usersp;
+ 
+ 	set_tsk_thread_flag(p, TIF_FORK);
+ 
+@@ -520,7 +529,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
+ 		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
+ 				IO_BITMAP_BYTES);
+ 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
+-	} 
++	}
+ 
+ 	/*
+ 	 * Set a new TLS for the child thread?
+@@ -528,7 +537,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
+ 	if (clone_flags & CLONE_SETTLS) {
+ #ifdef CONFIG_IA32_EMULATION
+ 		if (test_thread_flag(TIF_IA32))
+-			err = ia32_child_tls(p, childregs); 
++			err = do_set_thread_area(p, -1,
++				(struct user_desc __user *)childregs->si, 0);
+ 		else 			
+ #endif	 
+ 			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
+@@ -547,17 +557,30 @@ out:
+ /*
+  * This special macro can be used to load a debugging register
+  */
+-#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
++#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
+ 
+ static inline void __switch_to_xtra(struct task_struct *prev_p,
+-			     	    struct task_struct *next_p,
+-			     	    struct tss_struct *tss)
++				    struct task_struct *next_p,
++				    struct tss_struct *tss)
+ {
+ 	struct thread_struct *prev, *next;
++	unsigned long debugctl;
+ 
+ 	prev = &prev_p->thread,
+ 	next = &next_p->thread;
+ 
++	debugctl = prev->debugctlmsr;
++	if (next->ds_area_msr != prev->ds_area_msr) {
++		/* we clear debugctl to make sure DS
++		 * is not in use when we change it */
++		debugctl = 0;
++		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
++		wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
++	}
++
++	if (next->debugctlmsr != debugctl)
++		wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr);
++
+ 	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
+ 		loaddebug(next, 0);
+ 		loaddebug(next, 1);
+@@ -581,12 +604,18 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
+ 		 */
+ 		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
+ 	}
++
++	if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
++		ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
++
++	if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
++		ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
+ }
+ 
+ /*
+  *	switch_to(x,y) should switch tasks from x to y.
   *
-  * These routines maintain argument size conversion between 32bit and 64bit
-- * environment. In 2.5 most of this should be moved to a generic directory. 
-+ * environment. In 2.5 most of this should be moved to a generic directory.
+- * This could still be optimized: 
++ * This could still be optimized:
+  * - fold all the options into a flag word and test it with a single test.
+  * - could test fs/gs bitsliced
   *
-  * This file assumes that there is a hole at the end of user address space.
-- * 
-- * Some of the functions are LE specific currently. These are hopefully all marked.
-- * This should be fixed.
-+ *
-+ * Some of the functions are LE specific currently. These are
-+ * hopefully all marked.  This should be fixed.
+@@ -597,7 +626,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ {
+ 	struct thread_struct *prev = &prev_p->thread,
+ 				 *next = &next_p->thread;
+-	int cpu = smp_processor_id();  
++	int cpu = smp_processor_id();
+ 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ 
+ 	/* we're going to use this soon, after a few expensive things */
+@@ -607,7 +636,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	/*
+ 	 * Reload esp0, LDT and the page table pointer:
+ 	 */
+-	tss->rsp0 = next->rsp0;
++	load_sp0(tss, next);
+ 
+ 	/* 
+ 	 * Switch DS and ES.
+@@ -666,8 +695,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	/* 
+ 	 * Switch the PDA and FPU contexts.
+ 	 */
+-	prev->userrsp = read_pda(oldrsp); 
+-	write_pda(oldrsp, next->userrsp); 
++	prev->usersp = read_pda(oldrsp);
++	write_pda(oldrsp, next->usersp);
+ 	write_pda(pcurrent, next_p); 
+ 
+ 	write_pda(kernelstack,
+@@ -684,8 +713,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	/*
+ 	 * Now maybe reload the debug registers and handle I/O bitmaps
+ 	 */
+-	if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
+-	    || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
++	if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
++		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
+ 		__switch_to_xtra(prev_p, next_p, tss);
+ 
+ 	/* If the task has used fpu the last 5 timeslices, just do a full
+@@ -700,7 +729,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ /*
+  * sys_execve() executes a new program.
   */
+-asmlinkage 
++asmlinkage
+ long sys_execve(char __user *name, char __user * __user *argv,
+ 		char __user * __user *envp, struct pt_regs regs)
+ {
+@@ -712,11 +741,6 @@ long sys_execve(char __user *name, char __user * __user *argv,
+ 	if (IS_ERR(filename)) 
+ 		return error;
+ 	error = do_execve(filename, argv, envp, &regs); 
+-	if (error == 0) {
+-		task_lock(current);
+-		current->ptrace &= ~PT_DTRACE;
+-		task_unlock(current);
+-	}
+ 	putname(filename);
+ 	return error;
+ }
+@@ -726,18 +750,18 @@ void set_personality_64bit(void)
+ 	/* inherit personality from parent */
  
- #include <linux/kernel.h>
- #include <linux/sched.h>
--#include <linux/fs.h> 
--#include <linux/file.h> 
-+#include <linux/fs.h>
-+#include <linux/file.h>
- #include <linux/signal.h>
- #include <linux/syscalls.h>
- #include <linux/resource.h>
-@@ -90,43 +90,44 @@ int cp_compat_stat(struct kstat *kbuf, struct compat_stat __user *ubuf)
- 	if (sizeof(ino) < sizeof(kbuf->ino) && ino != kbuf->ino)
- 		return -EOVERFLOW;
- 	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct compat_stat)) ||
--	    __put_user (old_encode_dev(kbuf->dev), &ubuf->st_dev) ||
--	    __put_user (ino, &ubuf->st_ino) ||
--	    __put_user (kbuf->mode, &ubuf->st_mode) ||
--	    __put_user (kbuf->nlink, &ubuf->st_nlink) ||
--	    __put_user (uid, &ubuf->st_uid) ||
--	    __put_user (gid, &ubuf->st_gid) ||
--	    __put_user (old_encode_dev(kbuf->rdev), &ubuf->st_rdev) ||
--	    __put_user (kbuf->size, &ubuf->st_size) ||
--	    __put_user (kbuf->atime.tv_sec, &ubuf->st_atime) ||
--	    __put_user (kbuf->atime.tv_nsec, &ubuf->st_atime_nsec) ||
--	    __put_user (kbuf->mtime.tv_sec, &ubuf->st_mtime) ||
--	    __put_user (kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
--	    __put_user (kbuf->ctime.tv_sec, &ubuf->st_ctime) ||
--	    __put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
--	    __put_user (kbuf->blksize, &ubuf->st_blksize) ||
--	    __put_user (kbuf->blocks, &ubuf->st_blocks))
-+	    __put_user(old_encode_dev(kbuf->dev), &ubuf->st_dev) ||
-+	    __put_user(ino, &ubuf->st_ino) ||
-+	    __put_user(kbuf->mode, &ubuf->st_mode) ||
-+	    __put_user(kbuf->nlink, &ubuf->st_nlink) ||
-+	    __put_user(uid, &ubuf->st_uid) ||
-+	    __put_user(gid, &ubuf->st_gid) ||
-+	    __put_user(old_encode_dev(kbuf->rdev), &ubuf->st_rdev) ||
-+	    __put_user(kbuf->size, &ubuf->st_size) ||
-+	    __put_user(kbuf->atime.tv_sec, &ubuf->st_atime) ||
-+	    __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec) ||
-+	    __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime) ||
-+	    __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
-+	    __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime) ||
-+	    __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
-+	    __put_user(kbuf->blksize, &ubuf->st_blksize) ||
-+	    __put_user(kbuf->blocks, &ubuf->st_blocks))
- 		return -EFAULT;
- 	return 0;
+ 	/* Make sure to be in 64bit mode */
+-	clear_thread_flag(TIF_IA32); 
++	clear_thread_flag(TIF_IA32);
+ 
+ 	/* TBD: overwrites user setup. Should have two bits.
+ 	   But 64bit processes have always behaved this way,
+ 	   so it's not too bad. The main problem is just that
+-   	   32bit childs are affected again. */
++	   32bit childs are affected again. */
+ 	current->personality &= ~READ_IMPLIES_EXEC;
  }
  
--asmlinkage long
--sys32_truncate64(char __user * filename, unsigned long offset_low, unsigned long offset_high)
-+asmlinkage long sys32_truncate64(char __user *filename,
-+				 unsigned long offset_low,
-+				 unsigned long offset_high)
+ asmlinkage long sys_fork(struct pt_regs *regs)
  {
-        return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
+-	return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
++	return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
  }
  
--asmlinkage long
--sys32_ftruncate64(unsigned int fd, unsigned long offset_low, unsigned long offset_high)
-+asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
-+				  unsigned long offset_high)
+ asmlinkage long
+@@ -745,7 +769,7 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
+ 	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
  {
-        return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
+ 	if (!newsp)
+-		newsp = regs->rsp;
++		newsp = regs->sp;
+ 	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
  }
  
--/* Another set for IA32/LFS -- x86_64 struct stat is different due to 
--   support for 64bit inode numbers. */
--
--static int
--cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
-+/*
-+ * Another set for IA32/LFS -- x86_64 struct stat is different due to
-+ * support for 64bit inode numbers.
-+ */
-+static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+@@ -761,29 +785,29 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
+  */
+ asmlinkage long sys_vfork(struct pt_regs *regs)
  {
- 	typeof(ubuf->st_uid) uid = 0;
- 	typeof(ubuf->st_gid) gid = 0;
-@@ -134,38 +135,39 @@ cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
- 	SET_GID(gid, stat->gid);
- 	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
- 	    __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
--	    __put_user (stat->ino, &ubuf->__st_ino) ||
--	    __put_user (stat->ino, &ubuf->st_ino) ||
--	    __put_user (stat->mode, &ubuf->st_mode) ||
--	    __put_user (stat->nlink, &ubuf->st_nlink) ||
--	    __put_user (uid, &ubuf->st_uid) ||
--	    __put_user (gid, &ubuf->st_gid) ||
--	    __put_user (huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
--	    __put_user (stat->size, &ubuf->st_size) ||
--	    __put_user (stat->atime.tv_sec, &ubuf->st_atime) ||
--	    __put_user (stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
--	    __put_user (stat->mtime.tv_sec, &ubuf->st_mtime) ||
--	    __put_user (stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
--	    __put_user (stat->ctime.tv_sec, &ubuf->st_ctime) ||
--	    __put_user (stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
--	    __put_user (stat->blksize, &ubuf->st_blksize) ||
--	    __put_user (stat->blocks, &ubuf->st_blocks))
-+	    __put_user(stat->ino, &ubuf->__st_ino) ||
-+	    __put_user(stat->ino, &ubuf->st_ino) ||
-+	    __put_user(stat->mode, &ubuf->st_mode) ||
-+	    __put_user(stat->nlink, &ubuf->st_nlink) ||
-+	    __put_user(uid, &ubuf->st_uid) ||
-+	    __put_user(gid, &ubuf->st_gid) ||
-+	    __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
-+	    __put_user(stat->size, &ubuf->st_size) ||
-+	    __put_user(stat->atime.tv_sec, &ubuf->st_atime) ||
-+	    __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
-+	    __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) ||
-+	    __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
-+	    __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) ||
-+	    __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
-+	    __put_user(stat->blksize, &ubuf->st_blksize) ||
-+	    __put_user(stat->blocks, &ubuf->st_blocks))
- 		return -EFAULT;
- 	return 0;
+-	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
+ 		    NULL, NULL);
  }
  
--asmlinkage long
--sys32_stat64(char __user * filename, struct stat64 __user *statbuf)
-+asmlinkage long sys32_stat64(char __user *filename,
-+			     struct stat64 __user *statbuf)
+ unsigned long get_wchan(struct task_struct *p)
  {
- 	struct kstat stat;
- 	int ret = vfs_stat(filename, &stat);
+ 	unsigned long stack;
+-	u64 fp,rip;
++	u64 fp,ip;
+ 	int count = 0;
+ 
+ 	if (!p || p == current || p->state==TASK_RUNNING)
+ 		return 0; 
+ 	stack = (unsigned long)task_stack_page(p);
+-	if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
++	if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
+ 		return 0;
+-	fp = *(u64 *)(p->thread.rsp);
++	fp = *(u64 *)(p->thread.sp);
+ 	do { 
+ 		if (fp < (unsigned long)stack ||
+ 		    fp > (unsigned long)stack+THREAD_SIZE)
+ 			return 0; 
+-		rip = *(u64 *)(fp+8); 
+-		if (!in_sched_functions(rip))
+-			return rip; 
++		ip = *(u64 *)(fp+8);
++		if (!in_sched_functions(ip))
++			return ip;
+ 		fp = *(u64 *)fp; 
+ 	} while (count++ < 16); 
+ 	return 0;
+@@ -824,19 +848,19 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
+ 		/* Not strictly needed for fs, but do it for symmetry
+ 		   with gs */
+ 		if (addr >= TASK_SIZE_OF(task))
+-			return -EPERM; 
++			return -EPERM;
+ 		cpu = get_cpu();
+-		/* handle small bases via the GDT because that's faster to 
++		/* handle small bases via the GDT because that's faster to
+ 		   switch. */
+-		if (addr <= 0xffffffff) { 
++		if (addr <= 0xffffffff) {
+ 			set_32bit_tls(task, FS_TLS, addr);
+-			if (doit) { 
+-				load_TLS(&task->thread, cpu); 
++			if (doit) {
++				load_TLS(&task->thread, cpu);
+ 				asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
+ 			}
+ 			task->thread.fsindex = FS_TLS_SEL;
+ 			task->thread.fs = 0;
+-		} else { 
++		} else {
+ 			task->thread.fsindex = 0;
+ 			task->thread.fs = addr;
+ 			if (doit) {
+@@ -848,24 +872,24 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
+ 		}
+ 		put_cpu();
+ 		break;
+-	case ARCH_GET_FS: { 
+-		unsigned long base; 
++	case ARCH_GET_FS: {
++		unsigned long base;
+ 		if (task->thread.fsindex == FS_TLS_SEL)
+ 			base = read_32bit_tls(task, FS_TLS);
+ 		else if (doit)
+ 			rdmsrl(MSR_FS_BASE, base);
+ 		else
+ 			base = task->thread.fs;
+-		ret = put_user(base, (unsigned long __user *)addr); 
+-		break; 
++		ret = put_user(base, (unsigned long __user *)addr);
++		break;
+ 	}
+-	case ARCH_GET_GS: { 
++	case ARCH_GET_GS: {
+ 		unsigned long base;
+ 		unsigned gsindex;
+ 		if (task->thread.gsindex == GS_TLS_SEL)
+ 			base = read_32bit_tls(task, GS_TLS);
+ 		else if (doit) {
+- 			asm("movl %%gs,%0" : "=r" (gsindex));
++			asm("movl %%gs,%0" : "=r" (gsindex));
+ 			if (gsindex)
+ 				rdmsrl(MSR_KERNEL_GS_BASE, base);
+ 			else
+@@ -873,39 +897,21 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
+ 		}
+ 		else
+ 			base = task->thread.gs;
+-		ret = put_user(base, (unsigned long __user *)addr); 
++		ret = put_user(base, (unsigned long __user *)addr);
+ 		break;
+ 	}
+ 
+ 	default:
+ 		ret = -EINVAL;
+ 		break;
+-	} 
++	}
+ 
+-	return ret;	
+-} 
++	return ret;
++}
+ 
+ long sys_arch_prctl(int code, unsigned long addr)
+ {
+ 	return do_arch_prctl(current, code, addr);
+-} 
+-
+-/* 
+- * Capture the user space registers if the task is not running (in user space)
+- */
+-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+-{
+-	struct pt_regs *pp, ptregs;
+-
+-	pp = task_pt_regs(tsk);
+-
+-	ptregs = *pp; 
+-	ptregs.cs &= 0xffff;
+-	ptregs.ss &= 0xffff;
+-
+-	elf_core_copy_regs(regs, &ptregs);
+- 
+-	return 1;
+ }
+ 
+ unsigned long arch_align_stack(unsigned long sp)
+@@ -914,3 +920,9 @@ unsigned long arch_align_stack(unsigned long sp)
+ 		sp -= get_random_int() % 8192;
+ 	return sp & ~0xf;
+ }
++
++unsigned long arch_randomize_brk(struct mm_struct *mm)
++{
++	unsigned long range_end = mm->brk + 0x02000000;
++	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
++}
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+new file mode 100644
+index 0000000..96286df
+--- /dev/null
++++ b/arch/x86/kernel/ptrace.c
+@@ -0,0 +1,1545 @@
++/* By Ross Biro 1/23/92 */
++/*
++ * Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ *
++ * BTS tracing
++ *	Markus Metzger <markus.t.metzger at intel.com>, Dec 2007
++ */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/errno.h>
++#include <linux/ptrace.h>
++#include <linux/regset.h>
++#include <linux/user.h>
++#include <linux/elf.h>
++#include <linux/security.h>
++#include <linux/audit.h>
++#include <linux/seccomp.h>
++#include <linux/signal.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/debugreg.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/prctl.h>
++#include <asm/proto.h>
++#include <asm/ds.h>
++
++#include "tls.h"
++
++enum x86_regset {
++	REGSET_GENERAL,
++	REGSET_FP,
++	REGSET_XFP,
++	REGSET_TLS,
++};
++
++/*
++ * does not yet catch signals sent when the child dies.
++ * in exit.c or in signal.c.
++ */
++
++/*
++ * Determines which flags the user has access to [1 = access, 0 = no access].
++ */
++#define FLAG_MASK_32		((unsigned long)			\
++				 (X86_EFLAGS_CF | X86_EFLAGS_PF |	\
++				  X86_EFLAGS_AF | X86_EFLAGS_ZF |	\
++				  X86_EFLAGS_SF | X86_EFLAGS_TF |	\
++				  X86_EFLAGS_DF | X86_EFLAGS_OF |	\
++				  X86_EFLAGS_RF | X86_EFLAGS_AC))
++
++/*
++ * Determines whether a value may be installed in a segment register.
++ */
++static inline bool invalid_selector(u16 value)
++{
++	return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
++}
++
++#ifdef CONFIG_X86_32
++
++#define FLAG_MASK		FLAG_MASK_32
++
++static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
++{
++	BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
++	regno >>= 2;
++	if (regno > FS)
++		--regno;
++	return &regs->bx + regno;
++}
++
++static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
++{
++	/*
++	 * Returning the value truncates it to 16 bits.
++	 */
++	unsigned int retval;
++	if (offset != offsetof(struct user_regs_struct, gs))
++		retval = *pt_regs_access(task_pt_regs(task), offset);
++	else {
++		retval = task->thread.gs;
++		if (task == current)
++			savesegment(gs, retval);
++	}
++	return retval;
++}
++
++static int set_segment_reg(struct task_struct *task,
++			   unsigned long offset, u16 value)
++{
++	/*
++	 * The value argument was already truncated to 16 bits.
++	 */
++	if (invalid_selector(value))
++		return -EIO;
++
++	if (offset != offsetof(struct user_regs_struct, gs))
++		*pt_regs_access(task_pt_regs(task), offset) = value;
++	else {
++		task->thread.gs = value;
++		if (task == current)
++			/*
++			 * The user-mode %gs is not affected by
++			 * kernel entry, so we must update the CPU.
++			 */
++			loadsegment(gs, value);
++	}
++
++	return 0;
++}
++
++static unsigned long debugreg_addr_limit(struct task_struct *task)
++{
++	return TASK_SIZE - 3;
++}
++
++#else  /* CONFIG_X86_64 */
++
++#define FLAG_MASK		(FLAG_MASK_32 | X86_EFLAGS_NT)
++
++static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
++{
++	BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
++	return &regs->r15 + (offset / sizeof(regs->r15));
++}
++
++static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
++{
++	/*
++	 * Returning the value truncates it to 16 bits.
++	 */
++	unsigned int seg;
++
++	switch (offset) {
++	case offsetof(struct user_regs_struct, fs):
++		if (task == current) {
++			/* Older gas can't assemble movq %?s,%r?? */
++			asm("movl %%fs,%0" : "=r" (seg));
++			return seg;
++		}
++		return task->thread.fsindex;
++	case offsetof(struct user_regs_struct, gs):
++		if (task == current) {
++			asm("movl %%gs,%0" : "=r" (seg));
++			return seg;
++		}
++		return task->thread.gsindex;
++	case offsetof(struct user_regs_struct, ds):
++		if (task == current) {
++			asm("movl %%ds,%0" : "=r" (seg));
++			return seg;
++		}
++		return task->thread.ds;
++	case offsetof(struct user_regs_struct, es):
++		if (task == current) {
++			asm("movl %%es,%0" : "=r" (seg));
++			return seg;
++		}
++		return task->thread.es;
++
++	case offsetof(struct user_regs_struct, cs):
++	case offsetof(struct user_regs_struct, ss):
++		break;
++	}
++	return *pt_regs_access(task_pt_regs(task), offset);
++}
++
++static int set_segment_reg(struct task_struct *task,
++			   unsigned long offset, u16 value)
++{
++	/*
++	 * The value argument was already truncated to 16 bits.
++	 */
++	if (invalid_selector(value))
++		return -EIO;
++
++	switch (offset) {
++	case offsetof(struct user_regs_struct,fs):
++		/*
++		 * If this is setting fs as for normal 64-bit use but
++		 * setting fs_base has implicitly changed it, leave it.
++		 */
++		if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
++		     task->thread.fs != 0) ||
++		    (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
++		     task->thread.fs == 0))
++			break;
++		task->thread.fsindex = value;
++		if (task == current)
++			loadsegment(fs, task->thread.fsindex);
++		break;
++	case offsetof(struct user_regs_struct,gs):
++		/*
++		 * If this is setting gs as for normal 64-bit use but
++		 * setting gs_base has implicitly changed it, leave it.
++		 */
++		if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
++		     task->thread.gs != 0) ||
++		    (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
++		     task->thread.gs == 0))
++			break;
++		task->thread.gsindex = value;
++		if (task == current)
++			load_gs_index(task->thread.gsindex);
++		break;
++	case offsetof(struct user_regs_struct,ds):
++		task->thread.ds = value;
++		if (task == current)
++			loadsegment(ds, task->thread.ds);
++		break;
++	case offsetof(struct user_regs_struct,es):
++		task->thread.es = value;
++		if (task == current)
++			loadsegment(es, task->thread.es);
++		break;
++
++		/*
++		 * Can't actually change these in 64-bit mode.
++		 */
++	case offsetof(struct user_regs_struct,cs):
++#ifdef CONFIG_IA32_EMULATION
++		if (test_tsk_thread_flag(task, TIF_IA32))
++			task_pt_regs(task)->cs = value;
++#endif
++		break;
++	case offsetof(struct user_regs_struct,ss):
++#ifdef CONFIG_IA32_EMULATION
++		if (test_tsk_thread_flag(task, TIF_IA32))
++			task_pt_regs(task)->ss = value;
++#endif
++		break;
++	}
++
++	return 0;
++}
++
++static unsigned long debugreg_addr_limit(struct task_struct *task)
++{
++#ifdef CONFIG_IA32_EMULATION
++	if (test_tsk_thread_flag(task, TIF_IA32))
++		return IA32_PAGE_OFFSET - 3;
++#endif
++	return TASK_SIZE64 - 7;
++}
++
++#endif	/* CONFIG_X86_32 */
++
++static unsigned long get_flags(struct task_struct *task)
++{
++	unsigned long retval = task_pt_regs(task)->flags;
++
++	/*
++	 * If the debugger set TF, hide it from the readout.
++	 */
++	if (test_tsk_thread_flag(task, TIF_FORCED_TF))
++		retval &= ~X86_EFLAGS_TF;
++
++	return retval;
++}
++
++static int set_flags(struct task_struct *task, unsigned long value)
++{
++	struct pt_regs *regs = task_pt_regs(task);
++
++	/*
++	 * If the user value contains TF, mark that
++	 * it was not "us" (the debugger) that set it.
++	 * If not, make sure it stays set if we had.
++	 */
++	if (value & X86_EFLAGS_TF)
++		clear_tsk_thread_flag(task, TIF_FORCED_TF);
++	else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
++		value |= X86_EFLAGS_TF;
++
++	regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
++
++	return 0;
++}
++
++static int putreg(struct task_struct *child,
++		  unsigned long offset, unsigned long value)
++{
++	switch (offset) {
++	case offsetof(struct user_regs_struct, cs):
++	case offsetof(struct user_regs_struct, ds):
++	case offsetof(struct user_regs_struct, es):
++	case offsetof(struct user_regs_struct, fs):
++	case offsetof(struct user_regs_struct, gs):
++	case offsetof(struct user_regs_struct, ss):
++		return set_segment_reg(child, offset, value);
++
++	case offsetof(struct user_regs_struct, flags):
++		return set_flags(child, value);
++
++#ifdef CONFIG_X86_64
++	case offsetof(struct user_regs_struct,fs_base):
++		if (value >= TASK_SIZE_OF(child))
++			return -EIO;
++		/*
++		 * When changing the segment base, use do_arch_prctl
++		 * to set either thread.fs or thread.fsindex and the
++		 * corresponding GDT slot.
++		 */
++		if (child->thread.fs != value)
++			return do_arch_prctl(child, ARCH_SET_FS, value);
++		return 0;
++	case offsetof(struct user_regs_struct,gs_base):
++		/*
++		 * Exactly the same here as the %fs handling above.
++		 */
++		if (value >= TASK_SIZE_OF(child))
++			return -EIO;
++		if (child->thread.gs != value)
++			return do_arch_prctl(child, ARCH_SET_GS, value);
++		return 0;
++#endif
++	}
++
++	*pt_regs_access(task_pt_regs(child), offset) = value;
++	return 0;
++}
++
++static unsigned long getreg(struct task_struct *task, unsigned long offset)
++{
++	switch (offset) {
++	case offsetof(struct user_regs_struct, cs):
++	case offsetof(struct user_regs_struct, ds):
++	case offsetof(struct user_regs_struct, es):
++	case offsetof(struct user_regs_struct, fs):
++	case offsetof(struct user_regs_struct, gs):
++	case offsetof(struct user_regs_struct, ss):
++		return get_segment_reg(task, offset);
++
++	case offsetof(struct user_regs_struct, flags):
++		return get_flags(task);
++
++#ifdef CONFIG_X86_64
++	case offsetof(struct user_regs_struct, fs_base): {
++		/*
++		 * do_arch_prctl may have used a GDT slot instead of
++		 * the MSR.  To userland, it appears the same either
++		 * way, except the %fs segment selector might not be 0.
++		 */
++		unsigned int seg = task->thread.fsindex;
++		if (task->thread.fs != 0)
++			return task->thread.fs;
++		if (task == current)
++			asm("movl %%fs,%0" : "=r" (seg));
++		if (seg != FS_TLS_SEL)
++			return 0;
++		return get_desc_base(&task->thread.tls_array[FS_TLS]);
++	}
++	case offsetof(struct user_regs_struct, gs_base): {
++		/*
++		 * Exactly the same here as the %fs handling above.
++		 */
++		unsigned int seg = task->thread.gsindex;
++		if (task->thread.gs != 0)
++			return task->thread.gs;
++		if (task == current)
++			asm("movl %%gs,%0" : "=r" (seg));
++		if (seg != GS_TLS_SEL)
++			return 0;
++		return get_desc_base(&task->thread.tls_array[GS_TLS]);
++	}
++#endif
++	}
++
++	return *pt_regs_access(task_pt_regs(task), offset);
++}
++
++static int genregs_get(struct task_struct *target,
++		       const struct user_regset *regset,
++		       unsigned int pos, unsigned int count,
++		       void *kbuf, void __user *ubuf)
++{
++	if (kbuf) {
++		unsigned long *k = kbuf;
++		while (count > 0) {
++			*k++ = getreg(target, pos);
++			count -= sizeof(*k);
++			pos += sizeof(*k);
++		}
++	} else {
++		unsigned long __user *u = ubuf;
++		while (count > 0) {
++			if (__put_user(getreg(target, pos), u++))
++				return -EFAULT;
++			count -= sizeof(*u);
++			pos += sizeof(*u);
++		}
++	}
++
++	return 0;
++}
++
++static int genregs_set(struct task_struct *target,
++		       const struct user_regset *regset,
++		       unsigned int pos, unsigned int count,
++		       const void *kbuf, const void __user *ubuf)
++{
++	int ret = 0;
++	if (kbuf) {
++		const unsigned long *k = kbuf;
++		while (count > 0 && !ret) {
++			ret = putreg(target, pos, *k++);
++			count -= sizeof(*k);
++			pos += sizeof(*k);
++		}
++	} else {
++		const unsigned long  __user *u = ubuf;
++		while (count > 0 && !ret) {
++			unsigned long word;
++			ret = __get_user(word, u++);
++			if (ret)
++				break;
++			ret = putreg(target, pos, word);
++			count -= sizeof(*u);
++			pos += sizeof(*u);
++		}
++	}
++	return ret;
++}
++
++/*
++ * This function is trivial and will be inlined by the compiler.
++ * Having it separates the implementation details of debug
++ * registers from the interface details of ptrace.
++ */
++static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
++{
++	switch (n) {
++	case 0:		return child->thread.debugreg0;
++	case 1:		return child->thread.debugreg1;
++	case 2:		return child->thread.debugreg2;
++	case 3:		return child->thread.debugreg3;
++	case 6:		return child->thread.debugreg6;
++	case 7:		return child->thread.debugreg7;
++	}
++	return 0;
++}
++
++static int ptrace_set_debugreg(struct task_struct *child,
++			       int n, unsigned long data)
++{
++	int i;
++
++	if (unlikely(n == 4 || n == 5))
++		return -EIO;
++
++	if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
++		return -EIO;
++
++	switch (n) {
++	case 0:		child->thread.debugreg0 = data; break;
++	case 1:		child->thread.debugreg1 = data; break;
++	case 2:		child->thread.debugreg2 = data; break;
++	case 3:		child->thread.debugreg3 = data; break;
++
++	case 6:
++		if ((data & ~0xffffffffUL) != 0)
++			return -EIO;
++		child->thread.debugreg6 = data;
++		break;
++
++	case 7:
++		/*
++		 * Sanity-check data. Take one half-byte at once with
++		 * check = (val >> (16 + 4*i)) & 0xf. It contains the
++		 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
++		 * 2 and 3 are LENi. Given a list of invalid values,
++		 * we do mask |= 1 << invalid_value, so that
++		 * (mask >> check) & 1 is a correct test for invalid
++		 * values.
++		 *
++		 * R/Wi contains the type of the breakpoint /
++		 * watchpoint, LENi contains the length of the watched
++		 * data in the watchpoint case.
++		 *
++		 * The invalid values are:
++		 * - LENi == 0x10 (undefined), so mask |= 0x0f00.	[32-bit]
++		 * - R/Wi == 0x10 (break on I/O reads or writes), so
++		 *   mask |= 0x4444.
++		 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
++		 *   0x1110.
++		 *
++		 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
++		 *
++		 * See the Intel Manual "System Programming Guide",
++		 * 15.2.4
++		 *
++		 * Note that LENi == 0x10 is defined on x86_64 in long
++		 * mode (i.e. even for 32-bit userspace software, but
++		 * 64-bit kernel), so the x86_64 mask value is 0x5454.
++		 * See the AMD manual no. 24593 (AMD64 System Programming)
++		 */
++#ifdef CONFIG_X86_32
++#define	DR7_MASK	0x5f54
++#else
++#define	DR7_MASK	0x5554
++#endif
++		data &= ~DR_CONTROL_RESERVED;
++		for (i = 0; i < 4; i++)
++			if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
++				return -EIO;
++		child->thread.debugreg7 = data;
++		if (data)
++			set_tsk_thread_flag(child, TIF_DEBUG);
++		else
++			clear_tsk_thread_flag(child, TIF_DEBUG);
++		break;
++	}
++
++	return 0;
++}
++
++static int ptrace_bts_get_size(struct task_struct *child)
++{
++	if (!child->thread.ds_area_msr)
++		return -ENXIO;
++
++	return ds_get_bts_index((void *)child->thread.ds_area_msr);
++}
++
++static int ptrace_bts_read_record(struct task_struct *child,
++				  long index,
++				  struct bts_struct __user *out)
++{
++	struct bts_struct ret;
++	int retval;
++	int bts_end;
++	int bts_index;
++
++	if (!child->thread.ds_area_msr)
++		return -ENXIO;
++
++	if (index < 0)
++		return -EINVAL;
++
++	bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
++	if (bts_end <= index)
++		return -EINVAL;
++
++	/* translate the ptrace bts index into the ds bts index */
++	bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr);
++	bts_index -= (index + 1);
++	if (bts_index < 0)
++		bts_index += bts_end;
++
++	retval = ds_read_bts((void *)child->thread.ds_area_msr,
++			     bts_index, &ret);
++	if (retval < 0)
++		return retval;
++
++	if (copy_to_user(out, &ret, sizeof(ret)))
++		return -EFAULT;
++
++	return sizeof(ret);
++}
++
++static int ptrace_bts_write_record(struct task_struct *child,
++				   const struct bts_struct *in)
++{
++	int retval;
++
++	if (!child->thread.ds_area_msr)
++		return -ENXIO;
++
++	retval = ds_write_bts((void *)child->thread.ds_area_msr, in);
++	if (retval)
++		return retval;
++
++	return sizeof(*in);
++}
++
++static int ptrace_bts_clear(struct task_struct *child)
++{
++	if (!child->thread.ds_area_msr)
++		return -ENXIO;
++
++	return ds_clear((void *)child->thread.ds_area_msr);
++}
++
++static int ptrace_bts_drain(struct task_struct *child,
++			    long size,
++			    struct bts_struct __user *out)
++{
++	int end, i;
++	void *ds = (void *)child->thread.ds_area_msr;
++
++	if (!ds)
++		return -ENXIO;
++
++	end = ds_get_bts_index(ds);
++	if (end <= 0)
++		return end;
++
++	if (size < (end * sizeof(struct bts_struct)))
++		return -EIO;
++
++	for (i = 0; i < end; i++, out++) {
++		struct bts_struct ret;
++		int retval;
++
++		retval = ds_read_bts(ds, i, &ret);
++		if (retval < 0)
++			return retval;
++
++		if (copy_to_user(out, &ret, sizeof(ret)))
++			return -EFAULT;
++	}
++
++	ds_clear(ds);
++
++	return end;
++}
++
++static int ptrace_bts_realloc(struct task_struct *child,
++			      int size, int reduce_size)
++{
++	unsigned long rlim, vm;
++	int ret, old_size;
++
++	if (size < 0)
++		return -EINVAL;
++
++	old_size = ds_get_bts_size((void *)child->thread.ds_area_msr);
++	if (old_size < 0)
++		return old_size;
++
++	ret = ds_free((void **)&child->thread.ds_area_msr);
++	if (ret < 0)
++		goto out;
++
++	size >>= PAGE_SHIFT;
++	old_size >>= PAGE_SHIFT;
++
++	current->mm->total_vm  -= old_size;
++	current->mm->locked_vm -= old_size;
++
++	if (size == 0)
++		goto out;
++
++	rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
++	vm = current->mm->total_vm  + size;
++	if (rlim < vm) {
++		ret = -ENOMEM;
++
++		if (!reduce_size)
++			goto out;
++
++		size = rlim - current->mm->total_vm;
++		if (size <= 0)
++			goto out;
++	}
++
++	rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
++	vm = current->mm->locked_vm  + size;
++	if (rlim < vm) {
++		ret = -ENOMEM;
++
++		if (!reduce_size)
++			goto out;
++
++		size = rlim - current->mm->locked_vm;
++		if (size <= 0)
++			goto out;
++	}
++
++	ret = ds_allocate((void **)&child->thread.ds_area_msr,
++			  size << PAGE_SHIFT);
++	if (ret < 0)
++		goto out;
++
++	current->mm->total_vm  += size;
++	current->mm->locked_vm += size;
++
++out:
++	if (child->thread.ds_area_msr)
++		set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
++	else
++		clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
++
++	return ret;
++}
++
++static int ptrace_bts_config(struct task_struct *child,
++			     long cfg_size,
++			     const struct ptrace_bts_config __user *ucfg)
++{
++	struct ptrace_bts_config cfg;
++	int bts_size, ret = 0;
++	void *ds;
++
++	if (cfg_size < sizeof(cfg))
++		return -EIO;
++
++	if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
++		return -EFAULT;
++
++	if ((int)cfg.size < 0)
++		return -EINVAL;
++
++	bts_size = 0;
++	ds = (void *)child->thread.ds_area_msr;
++	if (ds) {
++		bts_size = ds_get_bts_size(ds);
++		if (bts_size < 0)
++			return bts_size;
++	}
++	cfg.size = PAGE_ALIGN(cfg.size);
++
++	if (bts_size != cfg.size) {
++		ret = ptrace_bts_realloc(child, cfg.size,
++					 cfg.flags & PTRACE_BTS_O_CUT_SIZE);
++		if (ret < 0)
++			goto errout;
++
++		ds = (void *)child->thread.ds_area_msr;
++	}
++
++	if (cfg.flags & PTRACE_BTS_O_SIGNAL)
++		ret = ds_set_overflow(ds, DS_O_SIGNAL);
++	else
++		ret = ds_set_overflow(ds, DS_O_WRAP);
++	if (ret < 0)
++		goto errout;
++
++	if (cfg.flags & PTRACE_BTS_O_TRACE)
++		child->thread.debugctlmsr |= ds_debugctl_mask();
++	else
++		child->thread.debugctlmsr &= ~ds_debugctl_mask();
++
++	if (cfg.flags & PTRACE_BTS_O_SCHED)
++		set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
++	else
++		clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
++
++	ret = sizeof(cfg);
++
++out:
++	if (child->thread.debugctlmsr)
++		set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
++	else
++		clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
++
++	return ret;
++
++errout:
++	child->thread.debugctlmsr &= ~ds_debugctl_mask();
++	clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
++	goto out;
++}
++
++static int ptrace_bts_status(struct task_struct *child,
++			     long cfg_size,
++			     struct ptrace_bts_config __user *ucfg)
++{
++	void *ds = (void *)child->thread.ds_area_msr;
++	struct ptrace_bts_config cfg;
++
++	if (cfg_size < sizeof(cfg))
++		return -EIO;
++
++	memset(&cfg, 0, sizeof(cfg));
++
++	if (ds) {
++		cfg.size = ds_get_bts_size(ds);
++
++		if (ds_get_overflow(ds) == DS_O_SIGNAL)
++			cfg.flags |= PTRACE_BTS_O_SIGNAL;
++
++		if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
++		    child->thread.debugctlmsr & ds_debugctl_mask())
++			cfg.flags |= PTRACE_BTS_O_TRACE;
++
++		if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
++			cfg.flags |= PTRACE_BTS_O_SCHED;
++	}
++
++	cfg.bts_size = sizeof(struct bts_struct);
++
++	if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
++		return -EFAULT;
++
++	return sizeof(cfg);
++}
++
++void ptrace_bts_take_timestamp(struct task_struct *tsk,
++			       enum bts_qualifier qualifier)
++{
++	struct bts_struct rec = {
++		.qualifier = qualifier,
++		.variant.jiffies = jiffies_64
++	};
++
++	ptrace_bts_write_record(tsk, &rec);
++}
++
++/*
++ * Called by kernel/ptrace.c when detaching..
++ *
++ * Make sure the single step bit is not set.
++ */
++void ptrace_disable(struct task_struct *child)
++{
++	user_disable_single_step(child);
++#ifdef TIF_SYSCALL_EMU
++	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
++#endif
++	if (child->thread.ds_area_msr) {
++		ptrace_bts_realloc(child, 0, 0);
++		child->thread.debugctlmsr &= ~ds_debugctl_mask();
++		if (!child->thread.debugctlmsr)
++			clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
++		clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
++	}
++}
++
++#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
++static const struct user_regset_view user_x86_32_view; /* Initialized below. */
++#endif
++
++long arch_ptrace(struct task_struct *child, long request, long addr, long data)
++{
++	int ret;
++	unsigned long __user *datap = (unsigned long __user *)data;
++
++	switch (request) {
++	/* read the word at location addr in the USER area. */
++	case PTRACE_PEEKUSR: {
++		unsigned long tmp;
++
++		ret = -EIO;
++		if ((addr & (sizeof(data) - 1)) || addr < 0 ||
++		    addr >= sizeof(struct user))
++			break;
++
++		tmp = 0;  /* Default return condition */
++		if (addr < sizeof(struct user_regs_struct))
++			tmp = getreg(child, addr);
++		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
++			 addr <= offsetof(struct user, u_debugreg[7])) {
++			addr -= offsetof(struct user, u_debugreg[0]);
++			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
++		}
++		ret = put_user(tmp, datap);
++		break;
++	}
++
++	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
++		ret = -EIO;
++		if ((addr & (sizeof(data) - 1)) || addr < 0 ||
++		    addr >= sizeof(struct user))
++			break;
++
++		if (addr < sizeof(struct user_regs_struct))
++			ret = putreg(child, addr, data);
++		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
++			 addr <= offsetof(struct user, u_debugreg[7])) {
++			addr -= offsetof(struct user, u_debugreg[0]);
++			ret = ptrace_set_debugreg(child,
++						  addr / sizeof(data), data);
++		}
++		break;
++
++	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
++		return copy_regset_to_user(child,
++					   task_user_regset_view(current),
++					   REGSET_GENERAL,
++					   0, sizeof(struct user_regs_struct),
++					   datap);
++
++	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
++		return copy_regset_from_user(child,
++					     task_user_regset_view(current),
++					     REGSET_GENERAL,
++					     0, sizeof(struct user_regs_struct),
++					     datap);
++
++	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
++		return copy_regset_to_user(child,
++					   task_user_regset_view(current),
++					   REGSET_FP,
++					   0, sizeof(struct user_i387_struct),
++					   datap);
++
++	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
++		return copy_regset_from_user(child,
++					     task_user_regset_view(current),
++					     REGSET_FP,
++					     0, sizeof(struct user_i387_struct),
++					     datap);
++
++#ifdef CONFIG_X86_32
++	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
++		return copy_regset_to_user(child, &user_x86_32_view,
++					   REGSET_XFP,
++					   0, sizeof(struct user_fxsr_struct),
++					   datap);
++
++	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
++		return copy_regset_from_user(child, &user_x86_32_view,
++					     REGSET_XFP,
++					     0, sizeof(struct user_fxsr_struct),
++					     datap);
++#endif
++
++#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
++	case PTRACE_GET_THREAD_AREA:
++		if (addr < 0)
++			return -EIO;
++		ret = do_get_thread_area(child, addr,
++					 (struct user_desc __user *) data);
++		break;
++
++	case PTRACE_SET_THREAD_AREA:
++		if (addr < 0)
++			return -EIO;
++		ret = do_set_thread_area(child, addr,
++					 (struct user_desc __user *) data, 0);
++		break;
++#endif
++
++#ifdef CONFIG_X86_64
++		/* normal 64bit interface to access TLS data.
++		   Works just like arch_prctl, except that the arguments
++		   are reversed. */
++	case PTRACE_ARCH_PRCTL:
++		ret = do_arch_prctl(child, data, addr);
++		break;
++#endif
++
++	case PTRACE_BTS_CONFIG:
++		ret = ptrace_bts_config
++			(child, data, (struct ptrace_bts_config __user *)addr);
++		break;
 +
- 	if (!ret)
- 		ret = cp_stat64(statbuf, &stat);
- 	return ret;
- }
- 
--asmlinkage long
--sys32_lstat64(char __user * filename, struct stat64 __user *statbuf)
-+asmlinkage long sys32_lstat64(char __user *filename,
-+			      struct stat64 __user *statbuf)
- {
- 	struct kstat stat;
- 	int ret = vfs_lstat(filename, &stat);
-@@ -174,8 +176,7 @@ sys32_lstat64(char __user * filename, struct stat64 __user *statbuf)
- 	return ret;
- }
- 
--asmlinkage long
--sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
-+asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
- {
- 	struct kstat stat;
- 	int ret = vfs_fstat(fd, &stat);
-@@ -184,9 +185,8 @@ sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
- 	return ret;
- }
- 
--asmlinkage long
--sys32_fstatat(unsigned int dfd, char __user *filename,
--	      struct stat64 __user* statbuf, int flag)
-+asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
-+			      struct stat64 __user *statbuf, int flag)
- {
- 	struct kstat stat;
- 	int error = -EINVAL;
-@@ -221,8 +221,7 @@ struct mmap_arg_struct {
- 	unsigned int offset;
- };
- 
--asmlinkage long
--sys32_mmap(struct mmap_arg_struct __user *arg)
-+asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
- {
- 	struct mmap_arg_struct a;
- 	struct file *file = NULL;
-@@ -233,33 +232,33 @@ sys32_mmap(struct mmap_arg_struct __user *arg)
- 		return -EFAULT;
- 
- 	if (a.offset & ~PAGE_MASK)
--		return -EINVAL; 
-+		return -EINVAL;
- 
- 	if (!(a.flags & MAP_ANONYMOUS)) {
- 		file = fget(a.fd);
- 		if (!file)
- 			return -EBADF;
- 	}
--	
--	mm = current->mm; 
--	down_write(&mm->mmap_sem); 
--	retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, a.offset>>PAGE_SHIFT);
++	case PTRACE_BTS_STATUS:
++		ret = ptrace_bts_status
++			(child, data, (struct ptrace_bts_config __user *)addr);
++		break;
 +
-+	mm = current->mm;
-+	down_write(&mm->mmap_sem);
-+	retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
-+			       a.offset>>PAGE_SHIFT);
- 	if (file)
- 		fput(file);
- 
--	up_write(&mm->mmap_sem); 
-+	up_write(&mm->mmap_sem);
- 
- 	return retval;
- }
- 
--asmlinkage long 
--sys32_mprotect(unsigned long start, size_t len, unsigned long prot)
-+asmlinkage long sys32_mprotect(unsigned long start, size_t len,
-+			       unsigned long prot)
- {
--	return sys_mprotect(start,len,prot); 
-+	return sys_mprotect(start, len, prot);
- }
- 
--asmlinkage long
--sys32_pipe(int __user *fd)
-+asmlinkage long sys32_pipe(int __user *fd)
- {
- 	int retval;
- 	int fds[2];
-@@ -269,13 +268,13 @@ sys32_pipe(int __user *fd)
- 		goto out;
- 	if (copy_to_user(fd, fds, sizeof(fds)))
- 		retval = -EFAULT;
--  out:
-+out:
- 	return retval;
- }
- 
--asmlinkage long
--sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
--		   struct sigaction32 __user *oact,  unsigned int sigsetsize)
-+asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
-+				   struct sigaction32 __user *oact,
-+				   unsigned int sigsetsize)
- {
- 	struct k_sigaction new_ka, old_ka;
- 	int ret;
-@@ -291,12 +290,17 @@ sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
- 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- 		    __get_user(handler, &act->sa_handler) ||
- 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
--		    __get_user(restorer, &act->sa_restorer)||
--		    __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t)))
-+		    __get_user(restorer, &act->sa_restorer) ||
-+		    __copy_from_user(&set32, &act->sa_mask,
-+				     sizeof(compat_sigset_t)))
- 			return -EFAULT;
- 		new_ka.sa.sa_handler = compat_ptr(handler);
- 		new_ka.sa.sa_restorer = compat_ptr(restorer);
--		/* FIXME: here we rely on _COMPAT_NSIG_WORS to be >= than _NSIG_WORDS << 1 */
++	case PTRACE_BTS_SIZE:
++		ret = ptrace_bts_get_size(child);
++		break;
++
++	case PTRACE_BTS_GET:
++		ret = ptrace_bts_read_record
++			(child, data, (struct bts_struct __user *) addr);
++		break;
++
++	case PTRACE_BTS_CLEAR:
++		ret = ptrace_bts_clear(child);
++		break;
++
++	case PTRACE_BTS_DRAIN:
++		ret = ptrace_bts_drain
++			(child, data, (struct bts_struct __user *) addr);
++		break;
++
++	default:
++		ret = ptrace_request(child, request, addr, data);
++		break;
++	}
++
++	return ret;
++}
++
++#ifdef CONFIG_IA32_EMULATION
++
++#include <linux/compat.h>
++#include <linux/syscalls.h>
++#include <asm/ia32.h>
++#include <asm/user32.h>
++
++#define R32(l,q)							\
++	case offsetof(struct user32, regs.l):				\
++		regs->q = value; break
++
++#define SEG32(rs)							\
++	case offsetof(struct user32, regs.rs):				\
++		return set_segment_reg(child,				\
++				       offsetof(struct user_regs_struct, rs), \
++				       value);				\
++		break
++
++static int putreg32(struct task_struct *child, unsigned regno, u32 value)
++{
++	struct pt_regs *regs = task_pt_regs(child);
++
++	switch (regno) {
++
++	SEG32(cs);
++	SEG32(ds);
++	SEG32(es);
++	SEG32(fs);
++	SEG32(gs);
++	SEG32(ss);
++
++	R32(ebx, bx);
++	R32(ecx, cx);
++	R32(edx, dx);
++	R32(edi, di);
++	R32(esi, si);
++	R32(ebp, bp);
++	R32(eax, ax);
++	R32(orig_eax, orig_ax);
++	R32(eip, ip);
++	R32(esp, sp);
++
++	case offsetof(struct user32, regs.eflags):
++		return set_flags(child, value);
++
++	case offsetof(struct user32, u_debugreg[0]) ...
++		offsetof(struct user32, u_debugreg[7]):
++		regno -= offsetof(struct user32, u_debugreg[0]);
++		return ptrace_set_debugreg(child, regno / 4, value);
++
++	default:
++		if (regno > sizeof(struct user32) || (regno & 3))
++			return -EIO;
 +
 +		/*
-+		 * FIXME: here we rely on _COMPAT_NSIG_WORS to be >=
-+		 * than _NSIG_WORDS << 1
++		 * Other dummy fields in the virtual user structure
++		 * are ignored
 +		 */
- 		switch (_NSIG_WORDS) {
- 		case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
- 				| (((long)set32.sig[7]) << 32);
-@@ -312,7 +316,10 @@ sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
- 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
- 
- 	if (!ret && oact) {
--		/* FIXME: here we rely on _COMPAT_NSIG_WORS to be >= than _NSIG_WORDS << 1 */
++		break;
++	}
++	return 0;
++}
++
++#undef R32
++#undef SEG32
++
++#define R32(l,q)							\
++	case offsetof(struct user32, regs.l):				\
++		*val = regs->q; break
++
++#define SEG32(rs)							\
++	case offsetof(struct user32, regs.rs):				\
++		*val = get_segment_reg(child,				\
++				       offsetof(struct user_regs_struct, rs)); \
++		break
++
++static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
++{
++	struct pt_regs *regs = task_pt_regs(child);
++
++	switch (regno) {
++
++	SEG32(ds);
++	SEG32(es);
++	SEG32(fs);
++	SEG32(gs);
++
++	R32(cs, cs);
++	R32(ss, ss);
++	R32(ebx, bx);
++	R32(ecx, cx);
++	R32(edx, dx);
++	R32(edi, di);
++	R32(esi, si);
++	R32(ebp, bp);
++	R32(eax, ax);
++	R32(orig_eax, orig_ax);
++	R32(eip, ip);
++	R32(esp, sp);
++
++	case offsetof(struct user32, regs.eflags):
++		*val = get_flags(child);
++		break;
++
++	case offsetof(struct user32, u_debugreg[0]) ...
++		offsetof(struct user32, u_debugreg[7]):
++		regno -= offsetof(struct user32, u_debugreg[0]);
++		*val = ptrace_get_debugreg(child, regno / 4);
++		break;
++
++	default:
++		if (regno > sizeof(struct user32) || (regno & 3))
++			return -EIO;
++
 +		/*
-+		 * FIXME: here we rely on _COMPAT_NSIG_WORS to be >=
-+		 * than _NSIG_WORDS << 1
++		 * Other dummy fields in the virtual user structure
++		 * are ignored
 +		 */
- 		switch (_NSIG_WORDS) {
- 		case 4:
- 			set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
-@@ -328,23 +335,26 @@ sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
- 			set32.sig[0] = old_ka.sa.sa_mask.sig[0];
- 		}
- 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
--		    __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
--		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) ||
-+		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
-+			       &oact->sa_handler) ||
-+		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
-+			       &oact->sa_restorer) ||
- 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
--		    __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t)))
-+		    __copy_to_user(&oact->sa_mask, &set32,
-+				   sizeof(compat_sigset_t)))
- 			return -EFAULT;
- 	}
- 
- 	return ret;
- }
- 
--asmlinkage long
--sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact)
-+asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
-+				struct old_sigaction32 __user *oact)
- {
--        struct k_sigaction new_ka, old_ka;
--        int ret;
-+	struct k_sigaction new_ka, old_ka;
-+	int ret;
- 
--        if (act) {
-+	if (act) {
- 		compat_old_sigset_t mask;
- 		compat_uptr_t handler, restorer;
- 
-@@ -359,33 +369,35 @@ sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigacti
- 		new_ka.sa.sa_restorer = compat_ptr(restorer);
- 
- 		siginitset(&new_ka.sa.sa_mask, mask);
--        }
++		*val = 0;
++		break;
 +	}
- 
--        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
- 
- 	if (!ret && oact) {
- 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
--		    __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
--		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) ||
-+		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
-+			       &oact->sa_handler) ||
-+		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
-+			       &oact->sa_restorer) ||
- 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
- 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
- 			return -EFAULT;
--        }
++	return 0;
++}
++
++#undef R32
++#undef SEG32
++
++static int genregs32_get(struct task_struct *target,
++			 const struct user_regset *regset,
++			 unsigned int pos, unsigned int count,
++			 void *kbuf, void __user *ubuf)
++{
++	if (kbuf) {
++		compat_ulong_t *k = kbuf;
++		while (count > 0) {
++			getreg32(target, pos, k++);
++			count -= sizeof(*k);
++			pos += sizeof(*k);
++		}
++	} else {
++		compat_ulong_t __user *u = ubuf;
++		while (count > 0) {
++			compat_ulong_t word;
++			getreg32(target, pos, &word);
++			if (__put_user(word, u++))
++				return -EFAULT;
++			count -= sizeof(*u);
++			pos += sizeof(*u);
++		}
 +	}
- 
- 	return ret;
- }
- 
--asmlinkage long
--sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
--			compat_sigset_t __user *oset, unsigned int sigsetsize)
-+asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
-+				     compat_sigset_t __user *oset,
-+				     unsigned int sigsetsize)
- {
- 	sigset_t s;
- 	compat_sigset_t s32;
- 	int ret;
- 	mm_segment_t old_fs = get_fs();
--	
 +
- 	if (set) {
--		if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
-+		if (copy_from_user(&s32, set, sizeof(compat_sigset_t)))
- 			return -EFAULT;
- 		switch (_NSIG_WORDS) {
- 		case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
-@@ -394,13 +406,14 @@ sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
- 		case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
- 		}
- 	}
--	set_fs (KERNEL_DS);
-+	set_fs(KERNEL_DS);
- 	ret = sys_rt_sigprocmask(how,
- 				 set ? (sigset_t __user *)&s : NULL,
- 				 oset ? (sigset_t __user *)&s : NULL,
--				 sigsetsize); 
--	set_fs (old_fs);
--	if (ret) return ret;
-+				 sigsetsize);
-+	set_fs(old_fs);
++	return 0;
++}
++
++static int genregs32_set(struct task_struct *target,
++			 const struct user_regset *regset,
++			 unsigned int pos, unsigned int count,
++			 const void *kbuf, const void __user *ubuf)
++{
++	int ret = 0;
++	if (kbuf) {
++		const compat_ulong_t *k = kbuf;
++		while (count > 0 && !ret) {
++			ret = putreg(target, pos, *k++);
++			count -= sizeof(*k);
++			pos += sizeof(*k);
++		}
++	} else {
++		const compat_ulong_t __user *u = ubuf;
++		while (count > 0 && !ret) {
++			compat_ulong_t word;
++			ret = __get_user(word, u++);
++			if (ret)
++				break;
++			ret = putreg(target, pos, word);
++			count -= sizeof(*u);
++			pos += sizeof(*u);
++		}
++	}
++	return ret;
++}
++
++static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
++{
++	siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
++	compat_siginfo_t __user *si32 = compat_ptr(data);
++	siginfo_t ssi;
++	int ret;
++
++	if (request == PTRACE_SETSIGINFO) {
++		memset(&ssi, 0, sizeof(siginfo_t));
++		ret = copy_siginfo_from_user32(&ssi, si32);
++		if (ret)
++			return ret;
++		if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
++			return -EFAULT;
++	}
++	ret = sys_ptrace(request, pid, addr, (unsigned long)si);
 +	if (ret)
 +		return ret;
- 	if (oset) {
- 		switch (_NSIG_WORDS) {
- 		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
-@@ -408,52 +421,49 @@ sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
- 		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
- 		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
- 		}
--		if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
-+		if (copy_to_user(oset, &s32, sizeof(compat_sigset_t)))
- 			return -EFAULT;
- 	}
- 	return 0;
- }
- 
--static inline long
--get_tv32(struct timeval *o, struct compat_timeval __user *i)
-+static inline long get_tv32(struct timeval *o, struct compat_timeval __user *i)
- {
--	int err = -EFAULT; 
--	if (access_ok(VERIFY_READ, i, sizeof(*i))) { 
-+	int err = -EFAULT;
++	if (request == PTRACE_GETSIGINFO) {
++		if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
++			return -EFAULT;
++		ret = copy_siginfo_to_user32(si32, &ssi);
++	}
++	return ret;
++}
 +
-+	if (access_ok(VERIFY_READ, i, sizeof(*i))) {
- 		err = __get_user(o->tv_sec, &i->tv_sec);
- 		err |= __get_user(o->tv_usec, &i->tv_usec);
- 	}
--	return err; 
-+	return err;
- }
- 
--static inline long
--put_tv32(struct compat_timeval __user *o, struct timeval *i)
-+static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
- {
- 	int err = -EFAULT;
--	if (access_ok(VERIFY_WRITE, o, sizeof(*o))) { 
++asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
++{
++	struct task_struct *child;
++	struct pt_regs *childregs;
++	void __user *datap = compat_ptr(data);
++	int ret;
++	__u32 val;
 +
-+	if (access_ok(VERIFY_WRITE, o, sizeof(*o))) {
- 		err = __put_user(i->tv_sec, &o->tv_sec);
- 		err |= __put_user(i->tv_usec, &o->tv_usec);
--	} 
--	return err; 
++	switch (request) {
++	case PTRACE_TRACEME:
++	case PTRACE_ATTACH:
++	case PTRACE_KILL:
++	case PTRACE_CONT:
++	case PTRACE_SINGLESTEP:
++	case PTRACE_SINGLEBLOCK:
++	case PTRACE_DETACH:
++	case PTRACE_SYSCALL:
++	case PTRACE_OLDSETOPTIONS:
++	case PTRACE_SETOPTIONS:
++	case PTRACE_SET_THREAD_AREA:
++	case PTRACE_GET_THREAD_AREA:
++	case PTRACE_BTS_CONFIG:
++	case PTRACE_BTS_STATUS:
++	case PTRACE_BTS_SIZE:
++	case PTRACE_BTS_GET:
++	case PTRACE_BTS_CLEAR:
++	case PTRACE_BTS_DRAIN:
++		return sys_ptrace(request, pid, addr, data);
++
++	default:
++		return -EINVAL;
++
++	case PTRACE_PEEKTEXT:
++	case PTRACE_PEEKDATA:
++	case PTRACE_POKEDATA:
++	case PTRACE_POKETEXT:
++	case PTRACE_POKEUSR:
++	case PTRACE_PEEKUSR:
++	case PTRACE_GETREGS:
++	case PTRACE_SETREGS:
++	case PTRACE_SETFPREGS:
++	case PTRACE_GETFPREGS:
++	case PTRACE_SETFPXREGS:
++	case PTRACE_GETFPXREGS:
++	case PTRACE_GETEVENTMSG:
++		break;
++
++	case PTRACE_SETSIGINFO:
++	case PTRACE_GETSIGINFO:
++		return ptrace32_siginfo(request, pid, addr, data);
 +	}
-+	return err;
- }
- 
--extern unsigned int alarm_setitimer(unsigned int seconds);
--
--asmlinkage long
--sys32_alarm(unsigned int seconds)
-+asmlinkage long sys32_alarm(unsigned int seconds)
- {
- 	return alarm_setitimer(seconds);
- }
- 
--/* Translations due to time_t size differences.  Which affects all
--   sorts of things, like timeval and itimerval.  */
--
--extern struct timezone sys_tz;
--
--asmlinkage long
--sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-+/*
-+ * Translations due to time_t size differences. Which affects all
-+ * sorts of things, like timeval and itimerval.
-+ */
-+asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv,
-+				   struct timezone __user *tz)
- {
- 	if (tv) {
- 		struct timeval ktv;
 +
- 		do_gettimeofday(&ktv);
- 		if (put_tv32(tv, &ktv))
- 			return -EFAULT;
-@@ -465,14 +475,14 @@ sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
- 	return 0;
- }
- 
--asmlinkage long
--sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-+asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv,
-+				   struct timezone __user *tz)
- {
- 	struct timeval ktv;
- 	struct timespec kts;
- 	struct timezone ktz;
- 
-- 	if (tv) {
-+	if (tv) {
- 		if (get_tv32(&ktv, tv))
- 			return -EFAULT;
- 		kts.tv_sec = ktv.tv_sec;
-@@ -494,8 +504,7 @@ struct sel_arg_struct {
- 	unsigned int tvp;
- };
- 
--asmlinkage long
--sys32_old_select(struct sel_arg_struct __user *arg)
-+asmlinkage long sys32_old_select(struct sel_arg_struct __user *arg)
- {
- 	struct sel_arg_struct a;
- 
-@@ -505,50 +514,45 @@ sys32_old_select(struct sel_arg_struct __user *arg)
- 				 compat_ptr(a.exp), compat_ptr(a.tvp));
- }
- 
--extern asmlinkage long
--compat_sys_wait4(compat_pid_t pid, compat_uint_t * stat_addr, int options,
--		 struct compat_rusage *ru);
--
--asmlinkage long
--sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
-+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
-+			      int options)
- {
- 	return compat_sys_wait4(pid, stat_addr, options, NULL);
- }
- 
- /* 32-bit timeval and related flotsam.  */
- 
--asmlinkage long
--sys32_sysfs(int option, u32 arg1, u32 arg2)
-+asmlinkage long sys32_sysfs(int option, u32 arg1, u32 arg2)
- {
- 	return sys_sysfs(option, arg1, arg2);
- }
- 
--asmlinkage long
--sys32_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
-+asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
-+				    struct compat_timespec __user *interval)
- {
- 	struct timespec t;
- 	int ret;
--	mm_segment_t old_fs = get_fs ();
--	
--	set_fs (KERNEL_DS);
-+	mm_segment_t old_fs = get_fs();
++	child = ptrace_get_task_struct(pid);
++	if (IS_ERR(child))
++		return PTR_ERR(child);
 +
-+	set_fs(KERNEL_DS);
- 	ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
--	set_fs (old_fs);
-+	set_fs(old_fs);
- 	if (put_compat_timespec(&t, interval))
- 		return -EFAULT;
- 	return ret;
- }
- 
--asmlinkage long
--sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
-+asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
-+				    compat_size_t sigsetsize)
- {
- 	sigset_t s;
- 	compat_sigset_t s32;
- 	int ret;
- 	mm_segment_t old_fs = get_fs();
--		
--	set_fs (KERNEL_DS);
++	ret = ptrace_check_attach(child, request == PTRACE_KILL);
++	if (ret < 0)
++		goto out;
 +
-+	set_fs(KERNEL_DS);
- 	ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
--	set_fs (old_fs);
-+	set_fs(old_fs);
- 	if (!ret) {
- 		switch (_NSIG_WORDS) {
- 		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
-@@ -556,30 +560,29 @@ sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
- 		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
- 		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
- 		}
--		if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
-+		if (copy_to_user(set, &s32, sizeof(compat_sigset_t)))
- 			return -EFAULT;
- 	}
- 	return ret;
- }
- 
--asmlinkage long
--sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
-+asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
-+				      compat_siginfo_t __user *uinfo)
- {
- 	siginfo_t info;
- 	int ret;
- 	mm_segment_t old_fs = get_fs();
--	
++	childregs = task_pt_regs(child);
 +
- 	if (copy_siginfo_from_user32(&info, uinfo))
- 		return -EFAULT;
--	set_fs (KERNEL_DS);
-+	set_fs(KERNEL_DS);
- 	ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
--	set_fs (old_fs);
-+	set_fs(old_fs);
- 	return ret;
- }
- 
- /* These are here just in case some old ia32 binary calls it. */
--asmlinkage long
--sys32_pause(void)
-+asmlinkage long sys32_pause(void)
- {
- 	current->state = TASK_INTERRUPTIBLE;
- 	schedule();
-@@ -599,25 +602,25 @@ struct sysctl_ia32 {
- };
- 
- 
--asmlinkage long
--sys32_sysctl(struct sysctl_ia32 __user *args32)
-+asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *args32)
- {
- 	struct sysctl_ia32 a32;
--	mm_segment_t old_fs = get_fs ();
-+	mm_segment_t old_fs = get_fs();
- 	void __user *oldvalp, *newvalp;
- 	size_t oldlen;
- 	int __user *namep;
- 	long ret;
- 
--	if (copy_from_user(&a32, args32, sizeof (a32)))
-+	if (copy_from_user(&a32, args32, sizeof(a32)))
- 		return -EFAULT;
- 
- 	/*
--	 * We need to pre-validate these because we have to disable address checking
--	 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
--	 * user specifying bad addresses here.  Well, since we're dealing with 32 bit
--	 * addresses, we KNOW that access_ok() will always succeed, so this is an
--	 * expensive NOP, but so what...
-+	 * We need to pre-validate these because we have to disable
-+	 * address checking before calling do_sysctl() because of
-+	 * OLDLEN but we can't run the risk of the user specifying bad
-+	 * addresses here.  Well, since we're dealing with 32 bit
-+	 * addresses, we KNOW that access_ok() will always succeed, so
-+	 * this is an expensive NOP, but so what...
- 	 */
- 	namep = compat_ptr(a32.name);
- 	oldvalp = compat_ptr(a32.oldval);
-@@ -636,34 +639,34 @@ sys32_sysctl(struct sysctl_ia32 __user *args32)
- 	unlock_kernel();
- 	set_fs(old_fs);
- 
--	if (oldvalp && put_user (oldlen, (int __user *)compat_ptr(a32.oldlenp)))
-+	if (oldvalp && put_user(oldlen, (int __user *)compat_ptr(a32.oldlenp)))
- 		return -EFAULT;
- 
- 	return ret;
- }
- #endif
- 
--/* warning: next two assume little endian */ 
--asmlinkage long
--sys32_pread(unsigned int fd, char __user *ubuf, u32 count, u32 poslo, u32 poshi)
-+/* warning: next two assume little endian */
-+asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
-+			    u32 poslo, u32 poshi)
- {
- 	return sys_pread64(fd, ubuf, count,
- 			 ((loff_t)AA(poshi) << 32) | AA(poslo));
- }
- 
--asmlinkage long
--sys32_pwrite(unsigned int fd, char __user *ubuf, u32 count, u32 poslo, u32 poshi)
-+asmlinkage long sys32_pwrite(unsigned int fd, char __user *ubuf, u32 count,
-+			     u32 poslo, u32 poshi)
- {
- 	return sys_pwrite64(fd, ubuf, count,
- 			  ((loff_t)AA(poshi) << 32) | AA(poslo));
- }
- 
- 
--asmlinkage long
--sys32_personality(unsigned long personality)
-+asmlinkage long sys32_personality(unsigned long personality)
- {
- 	int ret;
--	if (personality(current->personality) == PER_LINUX32 && 
++	switch (request) {
++	case PTRACE_PEEKUSR:
++		ret = getreg32(child, addr, &val);
++		if (ret == 0)
++			ret = put_user(val, (__u32 __user *)datap);
++		break;
 +
-+	if (personality(current->personality) == PER_LINUX32 &&
- 		personality == PER_LINUX)
- 		personality = PER_LINUX32;
- 	ret = sys_personality(personality);
-@@ -672,34 +675,33 @@ sys32_personality(unsigned long personality)
- 	return ret;
- }
- 
--asmlinkage long
--sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
-+asmlinkage long sys32_sendfile(int out_fd, int in_fd,
-+			       compat_off_t __user *offset, s32 count)
- {
- 	mm_segment_t old_fs = get_fs();
- 	int ret;
- 	off_t of;
--	
++	case PTRACE_POKEUSR:
++		ret = putreg32(child, addr, data);
++		break;
 +
- 	if (offset && get_user(of, offset))
- 		return -EFAULT;
--		
++	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
++		return copy_regset_to_user(child, &user_x86_32_view,
++					   REGSET_GENERAL,
++					   0, sizeof(struct user_regs_struct32),
++					   datap);
 +
- 	set_fs(KERNEL_DS);
- 	ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
- 			   count);
- 	set_fs(old_fs);
--	
++	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
++		return copy_regset_from_user(child, &user_x86_32_view,
++					     REGSET_GENERAL, 0,
++					     sizeof(struct user_regs_struct32),
++					     datap);
 +
- 	if (offset && put_user(of, offset))
- 		return -EFAULT;
--		
- 	return ret;
- }
- 
- asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
--	unsigned long prot, unsigned long flags,
--	unsigned long fd, unsigned long pgoff)
-+			    unsigned long prot, unsigned long flags,
-+			    unsigned long fd, unsigned long pgoff)
- {
- 	struct mm_struct *mm = current->mm;
- 	unsigned long error;
--	struct file * file = NULL;
-+	struct file *file = NULL;
- 
- 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- 	if (!(flags & MAP_ANONYMOUS)) {
-@@ -717,36 +719,35 @@ asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
- 	return error;
- }
- 
--asmlinkage long sys32_olduname(struct oldold_utsname __user * name)
-+asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
- {
-+	char *arch = "x86_64";
- 	int err;
- 
- 	if (!name)
- 		return -EFAULT;
- 	if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
- 		return -EFAULT;
--  
--  	down_read(&uts_sem);
--
--	err = __copy_to_user(&name->sysname,&utsname()->sysname,
--				__OLD_UTS_LEN);
--	err |= __put_user(0,name->sysname+__OLD_UTS_LEN);
--	err |= __copy_to_user(&name->nodename,&utsname()->nodename,
--				__OLD_UTS_LEN);
--	err |= __put_user(0,name->nodename+__OLD_UTS_LEN);
--	err |= __copy_to_user(&name->release,&utsname()->release,
--				__OLD_UTS_LEN);
--	err |= __put_user(0,name->release+__OLD_UTS_LEN);
--	err |= __copy_to_user(&name->version,&utsname()->version,
--				__OLD_UTS_LEN);
--	err |= __put_user(0,name->version+__OLD_UTS_LEN);
--	{
--		char *arch = "x86_64";
--		if (personality(current->personality) == PER_LINUX32)
--			arch = "i686";
--		 
--		err |= __copy_to_user(&name->machine, arch, strlen(arch)+1);
--	}
++	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
++		return copy_regset_to_user(child, &user_x86_32_view,
++					   REGSET_FP, 0,
++					   sizeof(struct user_i387_ia32_struct),
++					   datap);
 +
-+	down_read(&uts_sem);
++	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
++		return copy_regset_from_user(
++			child, &user_x86_32_view, REGSET_FP,
++			0, sizeof(struct user_i387_ia32_struct), datap);
 +
-+	err = __copy_to_user(&name->sysname, &utsname()->sysname,
-+			     __OLD_UTS_LEN);
-+	err |= __put_user(0, name->sysname+__OLD_UTS_LEN);
-+	err |= __copy_to_user(&name->nodename, &utsname()->nodename,
-+			      __OLD_UTS_LEN);
-+	err |= __put_user(0, name->nodename+__OLD_UTS_LEN);
-+	err |= __copy_to_user(&name->release, &utsname()->release,
-+			      __OLD_UTS_LEN);
-+	err |= __put_user(0, name->release+__OLD_UTS_LEN);
-+	err |= __copy_to_user(&name->version, &utsname()->version,
-+			      __OLD_UTS_LEN);
-+	err |= __put_user(0, name->version+__OLD_UTS_LEN);
++	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
++		return copy_regset_to_user(child, &user_x86_32_view,
++					   REGSET_XFP, 0,
++					   sizeof(struct user32_fxsr_struct),
++					   datap);
 +
-+	if (personality(current->personality) == PER_LINUX32)
-+		arch = "i686";
++	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
++		return copy_regset_from_user(child, &user_x86_32_view,
++					     REGSET_XFP, 0,
++					     sizeof(struct user32_fxsr_struct),
++					     datap);
 +
-+	err |= __copy_to_user(&name->machine, arch, strlen(arch) + 1);
- 
- 	up_read(&uts_sem);
- 
-@@ -755,17 +756,19 @@ asmlinkage long sys32_olduname(struct oldold_utsname __user * name)
- 	return err;
- }
- 
--long sys32_uname(struct old_utsname __user * name)
-+long sys32_uname(struct old_utsname __user *name)
- {
- 	int err;
++	default:
++		return compat_ptrace_request(child, request, addr, data);
++	}
 +
- 	if (!name)
- 		return -EFAULT;
- 	down_read(&uts_sem);
--	err = copy_to_user(name, utsname(), sizeof (*name));
-+	err = copy_to_user(name, utsname(), sizeof(*name));
- 	up_read(&uts_sem);
--	if (personality(current->personality) == PER_LINUX32) 
-+	if (personality(current->personality) == PER_LINUX32)
- 		err |= copy_to_user(&name->machine, "i686", 5);
--	return err?-EFAULT:0;
++ out:
++	put_task_struct(child);
++	return ret;
++}
 +
-+	return err ? -EFAULT : 0;
- }
- 
- long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
-@@ -773,27 +776,28 @@ long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
- 	struct ustat u;
- 	mm_segment_t seg;
- 	int ret;
--	
--	seg = get_fs(); 
--	set_fs(KERNEL_DS); 
++#endif	/* CONFIG_IA32_EMULATION */
 +
-+	seg = get_fs();
-+	set_fs(KERNEL_DS);
- 	ret = sys_ustat(dev, (struct ustat __user *)&u);
- 	set_fs(seg);
--	if (ret >= 0) { 
--		if (!access_ok(VERIFY_WRITE,u32p,sizeof(struct ustat32)) || 
--		    __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
--		    __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
--		    __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
--		    __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
--			ret = -EFAULT;
--	}
-+	if (ret < 0)
-+		return ret;
++#ifdef CONFIG_X86_64
 +
-+	if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) ||
-+	    __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
-+	    __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
-+	    __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
-+	    __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
-+		ret = -EFAULT;
- 	return ret;
--} 
++static const struct user_regset x86_64_regsets[] = {
++	[REGSET_GENERAL] = {
++		.core_note_type = NT_PRSTATUS,
++		.n = sizeof(struct user_regs_struct) / sizeof(long),
++		.size = sizeof(long), .align = sizeof(long),
++		.get = genregs_get, .set = genregs_set
++	},
++	[REGSET_FP] = {
++		.core_note_type = NT_PRFPREG,
++		.n = sizeof(struct user_i387_struct) / sizeof(long),
++		.size = sizeof(long), .align = sizeof(long),
++		.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
++	},
++};
++
++static const struct user_regset_view user_x86_64_view = {
++	.name = "x86_64", .e_machine = EM_X86_64,
++	.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
++};
++
++#else  /* CONFIG_X86_32 */
++
++#define user_regs_struct32	user_regs_struct
++#define genregs32_get		genregs_get
++#define genregs32_set		genregs_set
++
++#endif	/* CONFIG_X86_64 */
++
++#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
++static const struct user_regset x86_32_regsets[] = {
++	[REGSET_GENERAL] = {
++		.core_note_type = NT_PRSTATUS,
++		.n = sizeof(struct user_regs_struct32) / sizeof(u32),
++		.size = sizeof(u32), .align = sizeof(u32),
++		.get = genregs32_get, .set = genregs32_set
++	},
++	[REGSET_FP] = {
++		.core_note_type = NT_PRFPREG,
++		.n = sizeof(struct user_i387_struct) / sizeof(u32),
++		.size = sizeof(u32), .align = sizeof(u32),
++		.active = fpregs_active, .get = fpregs_get, .set = fpregs_set
++	},
++	[REGSET_XFP] = {
++		.core_note_type = NT_PRXFPREG,
++		.n = sizeof(struct user_i387_struct) / sizeof(u32),
++		.size = sizeof(u32), .align = sizeof(u32),
++		.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
++	},
++	[REGSET_TLS] = {
++		.core_note_type = NT_386_TLS,
++		.n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
++		.size = sizeof(struct user_desc),
++		.align = sizeof(struct user_desc),
++		.active = regset_tls_active,
++		.get = regset_tls_get, .set = regset_tls_set
++	},
++};
++
++static const struct user_regset_view user_x86_32_view = {
++	.name = "i386", .e_machine = EM_386,
++	.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
++};
++#endif
++
++const struct user_regset_view *task_user_regset_view(struct task_struct *task)
++{
++#ifdef CONFIG_IA32_EMULATION
++	if (test_tsk_thread_flag(task, TIF_IA32))
++#endif
++#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
++		return &user_x86_32_view;
++#endif
++#ifdef CONFIG_X86_64
++	return &user_x86_64_view;
++#endif
 +}
- 
- asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
- 			     compat_uptr_t __user *envp, struct pt_regs *regs)
- {
- 	long error;
--	char * filename;
-+	char *filename;
- 
- 	filename = getname(name);
- 	error = PTR_ERR(filename);
-@@ -812,18 +816,19 @@ asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
- asmlinkage long sys32_clone(unsigned int clone_flags, unsigned int newsp,
- 			    struct pt_regs *regs)
- {
--	void __user *parent_tid = (void __user *)regs->rdx;
--	void __user *child_tid = (void __user *)regs->rdi;
-+	void __user *parent_tid = (void __user *)regs->dx;
-+	void __user *child_tid = (void __user *)regs->di;
 +
- 	if (!newsp)
--		newsp = regs->rsp;
--        return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
-+		newsp = regs->sp;
-+	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
- }
- 
- /*
-- * Some system calls that need sign extended arguments. This could be done by a generic wrapper.
-- */ 
--
--long sys32_lseek (unsigned int fd, int offset, unsigned int whence)
-+ * Some system calls that need sign extended arguments. This could be
-+ * done by a generic wrapper.
++#ifdef CONFIG_X86_32
++
++void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
++{
++	struct siginfo info;
++
++	tsk->thread.trap_no = 1;
++	tsk->thread.error_code = error_code;
++
++	memset(&info, 0, sizeof(info));
++	info.si_signo = SIGTRAP;
++	info.si_code = TRAP_BRKPT;
++
++	/* User-mode ip? */
++	info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
++
++	/* Send us the fake SIGTRAP */
++	force_sig_info(SIGTRAP, &info, tsk);
++}
++
++/* notification of system call entry/exit
++ * - triggered by current->work.syscall_trace
 + */
-+long sys32_lseek(unsigned int fd, int offset, unsigned int whence)
- {
- 	return sys_lseek(fd, offset, whence);
- }
-@@ -832,49 +837,52 @@ long sys32_kill(int pid, int sig)
- {
- 	return sys_kill(pid, sig);
- }
-- 
--long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 
++__attribute__((regparm(3)))
++int do_syscall_trace(struct pt_regs *regs, int entryexit)
++{
++	int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
++	/*
++	 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
++	 * interception
++	 */
++	int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
++	int ret = 0;
 +
-+long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
- 			__u32 len_low, __u32 len_high, int advice)
--{ 
++	/* do the secure computing check first */
++	if (!entryexit)
++		secure_computing(regs->orig_ax);
++
++	if (unlikely(current->audit_context)) {
++		if (entryexit)
++			audit_syscall_exit(AUDITSC_RESULT(regs->ax),
++						regs->ax);
++		/* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
++		 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
++		 * not used, entry.S will call us only on syscall exit, not
++		 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
++		 * calling send_sigtrap() on syscall entry.
++		 *
++		 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
++		 * is_singlestep is false, despite his name, so we will still do
++		 * the correct thing.
++		 */
++		else if (is_singlestep)
++			goto out;
++	}
++
++	if (!(current->ptrace & PT_PTRACED))
++		goto out;
++
++	/* If a process stops on the 1st tracepoint with SYSCALL_TRACE
++	 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
++	 * here. We have to check this and return */
++	if (is_sysemu && entryexit)
++		return 0;
++
++	/* Fake a debug trap */
++	if (is_singlestep)
++		send_sigtrap(current, regs, 0);
++
++ 	if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
++		goto out;
++
++	/* the 0x80 provides a way for the tracing parent to distinguish
++	   between a syscall stop and SIGTRAP delivery */
++	/* Note that the debugger could change the result of test_thread_flag!*/
++	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
++
++	/*
++	 * this isn't the same as continuing with a signal, but it will do
++	 * for normal use.  strace only continues with a signal if the
++	 * stopping signal is not SIGTRAP.  -brl
++	 */
++	if (current->exit_code) {
++		send_sig(current->exit_code, current, 1);
++		current->exit_code = 0;
++	}
++	ret = is_sysemu;
++out:
++	if (unlikely(current->audit_context) && !entryexit)
++		audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
++				    regs->bx, regs->cx, regs->dx, regs->si);
++	if (ret == 0)
++		return 0;
++
++	regs->orig_ax = -1; /* force skip of syscall restarting */
++	if (unlikely(current->audit_context))
++		audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
++	return 1;
++}
++
++#else  /* CONFIG_X86_64 */
++
++static void syscall_trace(struct pt_regs *regs)
 +{
- 	return sys_fadvise64_64(fd,
- 			       (((u64)offset_high)<<32) | offset_low,
- 			       (((u64)len_high)<<32) | len_low,
--			       advice); 
--} 
-+				advice);
++
++#if 0
++	printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
++	       current->comm,
++	       regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
++	       current_thread_info()->flags, current->ptrace);
++#endif
++
++	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
++				? 0x80 : 0));
++	/*
++	 * this isn't the same as continuing with a signal, but it will do
++	 * for normal use.  strace only continues with a signal if the
++	 * stopping signal is not SIGTRAP.  -brl
++	 */
++	if (current->exit_code) {
++		send_sig(current->exit_code, current, 1);
++		current->exit_code = 0;
++	}
 +}
- 
- long sys32_vm86_warning(void)
--{ 
++
++asmlinkage void syscall_trace_enter(struct pt_regs *regs)
 +{
- 	struct task_struct *me = current;
- 	static char lastcomm[sizeof(me->comm)];
++	/* do the secure computing check first */
++	secure_computing(regs->orig_ax);
 +
- 	if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
--		compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
--		       me->comm);
-+		compat_printk(KERN_INFO
-+			      "%s: vm86 mode not supported on 64 bit kernel\n",
-+			      me->comm);
- 		strncpy(lastcomm, me->comm, sizeof(lastcomm));
--	} 
++	if (test_thread_flag(TIF_SYSCALL_TRACE)
++	    && (current->ptrace & PT_PTRACED))
++		syscall_trace(regs);
++
++	if (unlikely(current->audit_context)) {
++		if (test_thread_flag(TIF_IA32)) {
++			audit_syscall_entry(AUDIT_ARCH_I386,
++					    regs->orig_ax,
++					    regs->bx, regs->cx,
++					    regs->dx, regs->si);
++		} else {
++			audit_syscall_entry(AUDIT_ARCH_X86_64,
++					    regs->orig_ax,
++					    regs->di, regs->si,
++					    regs->dx, regs->r10);
++		}
 +	}
- 	return -ENOSYS;
--} 
 +}
- 
- long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
--			  char __user * buf, size_t len)
-+			  char __user *buf, size_t len)
- {
- 	return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
- }
- 
--asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, size_t count)
-+asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
-+				   size_t count)
- {
- 	return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
- }
- 
- asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
--			   unsigned n_low, unsigned n_hi,  int flags)
-+				      unsigned n_low, unsigned n_hi,  int flags)
- {
- 	return sys_sync_file_range(fd,
- 				   ((u64)off_hi << 32) | off_low,
- 				   ((u64)n_hi << 32) | n_low, flags);
- }
- 
--asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, size_t len,
--		     int advice)
-+asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
-+				size_t len, int advice)
- {
- 	return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
- 				len, advice);
-diff --git a/arch/x86/ia32/syscall32.c b/arch/x86/ia32/syscall32.c
-deleted file mode 100644
-index 15013ba..0000000
---- a/arch/x86/ia32/syscall32.c
-+++ /dev/null
-@@ -1,83 +0,0 @@
--/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
--
--/* vsyscall handling for 32bit processes. Map a stub page into it 
--   on demand because 32bit cannot reach the kernel's fixmaps */
--
--#include <linux/mm.h>
--#include <linux/string.h>
--#include <linux/kernel.h>
--#include <linux/gfp.h>
--#include <linux/init.h>
--#include <linux/stringify.h>
--#include <linux/security.h>
--#include <asm/proto.h>
--#include <asm/tlbflush.h>
--#include <asm/ia32_unistd.h>
--#include <asm/vsyscall32.h>
--
--extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
--extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
--extern int sysctl_vsyscall32;
--
--static struct page *syscall32_pages[1];
--static int use_sysenter = -1;
--
--struct linux_binprm;
--
--/* Setup a VMA at program startup for the vsyscall page */
--int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
--{
--	struct mm_struct *mm = current->mm;
--	int ret;
--
--	down_write(&mm->mmap_sem);
--	/*
--	 * MAYWRITE to allow gdb to COW and set breakpoints
--	 *
--	 * Make sure the vDSO gets into every core dump.
--	 * Dumping its contents makes post-mortem fully interpretable later
--	 * without matching up the same kernel and hardware config to see
--	 * what PC values meant.
--	 */
--	/* Could randomize here */
--	ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE,
--				      VM_READ|VM_EXEC|
--				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
--				      VM_ALWAYSDUMP,
--				      syscall32_pages);
--	up_write(&mm->mmap_sem);
--	return ret;
--}
--
--static int __init init_syscall32(void)
--{ 
--	char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
--	if (!syscall32_page) 
--		panic("Cannot allocate syscall32 page"); 
--	syscall32_pages[0] = virt_to_page(syscall32_page);
-- 	if (use_sysenter > 0) {
-- 		memcpy(syscall32_page, syscall32_sysenter,
-- 		       syscall32_sysenter_end - syscall32_sysenter);
-- 	} else {
--  		memcpy(syscall32_page, syscall32_syscall,
--  		       syscall32_syscall_end - syscall32_syscall);
--  	}	
--	return 0;
--} 
--	
--__initcall(init_syscall32); 
--
--/* May not be __init: called during resume */
--void syscall32_cpu_init(void)
--{
--	if (use_sysenter < 0)
-- 		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
--
--	/* Load these always in case some future AMD CPU supports
--	   SYSENTER from compat mode too. */
--	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
--	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
--	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
--
--	wrmsrl(MSR_CSTAR, ia32_cstar_target);
--}
-diff --git a/arch/x86/ia32/syscall32_syscall.S b/arch/x86/ia32/syscall32_syscall.S
++
++asmlinkage void syscall_trace_leave(struct pt_regs *regs)
++{
++	if (unlikely(current->audit_context))
++		audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
++
++	if ((test_thread_flag(TIF_SYSCALL_TRACE)
++	     || test_thread_flag(TIF_SINGLESTEP))
++	    && (current->ptrace & PT_PTRACED))
++		syscall_trace(regs);
++}
++
++#endif	/* CONFIG_X86_32 */
+diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
 deleted file mode 100644
-index 933f0f0..0000000
---- a/arch/x86/ia32/syscall32_syscall.S
+index ff5431c..0000000
+--- a/arch/x86/kernel/ptrace_32.c
 +++ /dev/null
-@@ -1,17 +0,0 @@
--/* 32bit VDSOs mapped into user space. */
--
--	.section ".init.data","aw"
--
--	.globl syscall32_syscall
--	.globl syscall32_syscall_end
--
--syscall32_syscall:
--	.incbin "arch/x86/ia32/vsyscall-syscall.so"
--syscall32_syscall_end:
--
--	.globl syscall32_sysenter
--	.globl syscall32_sysenter_end
+@@ -1,717 +0,0 @@
+-/* By Ross Biro 1/23/92 */
+-/*
+- * Pentium III FXSR, SSE support
+- *	Gareth Hughes <gareth at valinux.com>, May 2000
+- */
 -
--syscall32_sysenter:
--	.incbin "arch/x86/ia32/vsyscall-sysenter.so"
--syscall32_sysenter_end:
-diff --git a/arch/x86/ia32/tls32.c b/arch/x86/ia32/tls32.c
-deleted file mode 100644
-index 1cc4340..0000000
---- a/arch/x86/ia32/tls32.c
-+++ /dev/null
-@@ -1,163 +0,0 @@
 -#include <linux/kernel.h>
--#include <linux/errno.h>
 -#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/errno.h>
+-#include <linux/ptrace.h>
 -#include <linux/user.h>
+-#include <linux/security.h>
+-#include <linux/audit.h>
+-#include <linux/seccomp.h>
+-#include <linux/signal.h>
 -
 -#include <asm/uaccess.h>
--#include <asm/desc.h>
+-#include <asm/pgtable.h>
 -#include <asm/system.h>
--#include <asm/ldt.h>
 -#include <asm/processor.h>
--#include <asm/proto.h>
+-#include <asm/i387.h>
+-#include <asm/debugreg.h>
+-#include <asm/ldt.h>
+-#include <asm/desc.h>
 -
 -/*
-- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+- * does not yet catch signals sent when the child dies.
+- * in exit.c or in signal.c.
 - */
--static int get_free_idx(void)
--{
--	struct thread_struct *t = &current->thread;
--	int idx;
--
--	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
--		if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
--			return idx + GDT_ENTRY_TLS_MIN;
--	return -ESRCH;
--}
 -
 -/*
-- * Set a given TLS descriptor:
-- * When you want addresses > 32bit use arch_prctl() 
+- * Determines which flags the user has access to [1 = access, 0 = no access].
+- * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9).
+- * Also masks reserved bits (31-22, 15, 5, 3, 1).
 - */
--int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
--{
--	struct user_desc info;
--	struct n_desc_struct *desc;
--	int cpu, idx;
--
--	if (copy_from_user(&info, u_info, sizeof(info)))
--		return -EFAULT;
--
--	idx = info.entry_number;
--
--	/*
--	 * index -1 means the kernel should try to find and
--	 * allocate an empty descriptor:
--	 */
--	if (idx == -1) {
--		idx = get_free_idx();
--		if (idx < 0)
--			return idx;
--		if (put_user(idx, &u_info->entry_number))
--			return -EFAULT;
--	}
--
--	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
--		return -EINVAL;
--
--	desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
+-#define FLAG_MASK 0x00050dd5
 -
--	/*
--	 * We must not get preempted while modifying the TLS.
--	 */
--	cpu = get_cpu();
+-/* set's the trap flag. */
+-#define TRAP_FLAG 0x100
 -
--	if (LDT_empty(&info)) {
--		desc->a = 0;
--		desc->b = 0;
--	} else {
--		desc->a = LDT_entry_a(&info);
--		desc->b = LDT_entry_b(&info);
--	}
--	if (t == &current->thread)
--		load_TLS(t, cpu);
+-/*
+- * Offset of eflags on child stack..
+- */
+-#define EFL_OFFSET offsetof(struct pt_regs, eflags)
 -
--	put_cpu();
--	return 0;
+-static inline struct pt_regs *get_child_regs(struct task_struct *task)
+-{
+-	void *stack_top = (void *)task->thread.esp0;
+-	return stack_top - sizeof(struct pt_regs);
 -}
 -
--asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
--{ 
--	return do_set_thread_area(&current->thread, u_info); 
--} 
+-/*
+- * This routine will get a word off of the processes privileged stack.
+- * the offset is bytes into the pt_regs structure on the stack.
+- * This routine assumes that all the privileged stacks are in our
+- * data space.
+- */   
+-static inline int get_stack_long(struct task_struct *task, int offset)
+-{
+-	unsigned char *stack;
 -
+-	stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
+-	stack += offset;
+-	return (*((int *)stack));
+-}
 -
 -/*
-- * Get the current Thread-Local Storage area:
+- * This routine will put a word on the processes privileged stack.
+- * the offset is bytes into the pt_regs structure on the stack.
+- * This routine assumes that all the privileged stacks are in our
+- * data space.
 - */
--
--#define GET_BASE(desc) ( \
--	(((desc)->a >> 16) & 0x0000ffff) | \
--	(((desc)->b << 16) & 0x00ff0000) | \
--	( (desc)->b        & 0xff000000)   )
--
--#define GET_LIMIT(desc) ( \
--	((desc)->a & 0x0ffff) | \
--	 ((desc)->b & 0xf0000) )
--	
--#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
--#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
--#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
--#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
--#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
--#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
--#define GET_LONGMODE(desc)	(((desc)->b >> 21) & 1)
--
--int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
+-static inline int put_stack_long(struct task_struct *task, int offset,
+-	unsigned long data)
 -{
--	struct user_desc info;
--	struct n_desc_struct *desc;
--	int idx;
--
--	if (get_user(idx, &u_info->entry_number))
--		return -EFAULT;
--	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
--		return -EINVAL;
--
--	desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
--
--	memset(&info, 0, sizeof(struct user_desc));
--	info.entry_number = idx;
--	info.base_addr = GET_BASE(desc);
--	info.limit = GET_LIMIT(desc);
--	info.seg_32bit = GET_32BIT(desc);
--	info.contents = GET_CONTENTS(desc);
--	info.read_exec_only = !GET_WRITABLE(desc);
--	info.limit_in_pages = GET_LIMIT_PAGES(desc);
--	info.seg_not_present = !GET_PRESENT(desc);
--	info.useable = GET_USEABLE(desc);
--	info.lm = GET_LONGMODE(desc);
+-	unsigned char * stack;
 -
--	if (copy_to_user(u_info, &info, sizeof(info)))
--		return -EFAULT;
+-	stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
+-	stack += offset;
+-	*(unsigned long *) stack = data;
 -	return 0;
 -}
 -
--asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info)
--{
--	return do_get_thread_area(&current->thread, u_info);
--} 
--
--
--int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
+-static int putreg(struct task_struct *child,
+-	unsigned long regno, unsigned long value)
 -{
--	struct n_desc_struct *desc;
--	struct user_desc info;
--	struct user_desc __user *cp;
--	int idx;
--	
--	cp = (void __user *)childregs->rsi;
--	if (copy_from_user(&info, cp, sizeof(info)))
--		return -EFAULT;
--	if (LDT_empty(&info))
--		return -EINVAL;
--	
--	idx = info.entry_number;
--	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
--		return -EINVAL;
--	
--	desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
--	desc->a = LDT_entry_a(&info);
--	desc->b = LDT_entry_b(&info);
--
+-	switch (regno >> 2) {
+-		case GS:
+-			if (value && (value & 3) != 3)
+-				return -EIO;
+-			child->thread.gs = value;
+-			return 0;
+-		case DS:
+-		case ES:
+-		case FS:
+-			if (value && (value & 3) != 3)
+-				return -EIO;
+-			value &= 0xffff;
+-			break;
+-		case SS:
+-		case CS:
+-			if ((value & 3) != 3)
+-				return -EIO;
+-			value &= 0xffff;
+-			break;
+-		case EFL:
+-			value &= FLAG_MASK;
+-			value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
+-			break;
+-	}
+-	if (regno > FS*4)
+-		regno -= 1*4;
+-	put_stack_long(child, regno, value);
 -	return 0;
 -}
-diff --git a/arch/x86/ia32/vsyscall-sigreturn.S b/arch/x86/ia32/vsyscall-sigreturn.S
-deleted file mode 100644
-index b383be0..0000000
---- a/arch/x86/ia32/vsyscall-sigreturn.S
-+++ /dev/null
-@@ -1,143 +0,0 @@
--/*
-- * Common code for the sigreturn entry points on the vsyscall page.
-- * This code uses SYSCALL_ENTER_KERNEL (either syscall or int $0x80)
-- * to enter the kernel.
-- * This file is #include'd by vsyscall-*.S to define them after the
-- * vsyscall entry point.  The addresses we get for these entry points
-- * by doing ".balign 32" must match in both versions of the page.
-- */
--
--	.code32
--	.section .text.sigreturn,"ax"
--	.balign 32
--	.globl __kernel_sigreturn
--	.type __kernel_sigreturn, at function
--__kernel_sigreturn:
--.LSTART_sigreturn:
--	popl %eax
--	movl $__NR_ia32_sigreturn, %eax
--	SYSCALL_ENTER_KERNEL
--.LEND_sigreturn:
--	.size __kernel_sigreturn,.-.LSTART_sigreturn
--
--	.section .text.rtsigreturn,"ax"
--	.balign 32
--	.globl __kernel_rt_sigreturn
--	.type __kernel_rt_sigreturn, at function
--__kernel_rt_sigreturn:
--.LSTART_rt_sigreturn:
--	movl $__NR_ia32_rt_sigreturn, %eax
--	SYSCALL_ENTER_KERNEL
--.LEND_rt_sigreturn:
--	.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
--
--	.section .eh_frame,"a", at progbits
--.LSTARTFRAMES:
--        .long .LENDCIES-.LSTARTCIES
--.LSTARTCIES:
--	.long 0			/* CIE ID */
--	.byte 1			/* Version number */
--	.string "zRS"		/* NUL-terminated augmentation string */
--	.uleb128 1		/* Code alignment factor */
--	.sleb128 -4		/* Data alignment factor */
--	.byte 8			/* Return address register column */
--	.uleb128 1		/* Augmentation value length */
--	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
--	.byte 0x0c		/* DW_CFA_def_cfa */
--	.uleb128 4
--	.uleb128 4
--	.byte 0x88		/* DW_CFA_offset, column 0x8 */
--	.uleb128 1
--	.align 4
--.LENDCIES:
--
--	.long .LENDFDE2-.LSTARTFDE2	/* Length FDE */
--.LSTARTFDE2:
--	.long .LSTARTFDE2-.LSTARTFRAMES	/* CIE pointer */
--	/* HACK: The dwarf2 unwind routines will subtract 1 from the
--	   return address to get an address in the middle of the
--	   presumed call instruction.  Since we didn't get here via
--	   a call, we need to include the nop before the real start
--	   to make up for it.  */
--	.long .LSTART_sigreturn-1-.	/* PC-relative start address */
--	.long .LEND_sigreturn-.LSTART_sigreturn+1
--	.uleb128 0			/* Augmentation length */
--	/* What follows are the instructions for the table generation.
--	   We record the locations of each register saved.  This is
--	   complicated by the fact that the "CFA" is always assumed to
--	   be the value of the stack pointer in the caller.  This means
--	   that we must define the CFA of this body of code to be the
--	   saved value of the stack pointer in the sigcontext.  Which
--	   also means that there is no fixed relation to the other 
--	   saved registers, which means that we must use DW_CFA_expression
--	   to compute their addresses.  It also means that when we 
--	   adjust the stack with the popl, we have to do it all over again.  */
--
--#define do_cfa_expr(offset)						\
--	.byte 0x0f;			/* DW_CFA_def_cfa_expression */	\
--	.uleb128 1f-0f;			/*   length */			\
--0:	.byte 0x74;			/*     DW_OP_breg4 */		\
--	.sleb128 offset;		/*      offset */		\
--	.byte 0x06;			/*     DW_OP_deref */		\
--1:
--
--#define do_expr(regno, offset)						\
--	.byte 0x10;			/* DW_CFA_expression */		\
--	.uleb128 regno;			/*   regno */			\
--	.uleb128 1f-0f;			/*   length */			\
--0:	.byte 0x74;			/*     DW_OP_breg4 */		\
--	.sleb128 offset;		/*       offset */		\
--1:
--
--	do_cfa_expr(IA32_SIGCONTEXT_esp+4)
--	do_expr(0, IA32_SIGCONTEXT_eax+4)
--	do_expr(1, IA32_SIGCONTEXT_ecx+4)
--	do_expr(2, IA32_SIGCONTEXT_edx+4)
--	do_expr(3, IA32_SIGCONTEXT_ebx+4)
--	do_expr(5, IA32_SIGCONTEXT_ebp+4)
--	do_expr(6, IA32_SIGCONTEXT_esi+4)
--	do_expr(7, IA32_SIGCONTEXT_edi+4)
--	do_expr(8, IA32_SIGCONTEXT_eip+4)
--
--	.byte 0x42	/* DW_CFA_advance_loc 2 -- nop; popl eax. */
--
--	do_cfa_expr(IA32_SIGCONTEXT_esp)
--	do_expr(0, IA32_SIGCONTEXT_eax)
--	do_expr(1, IA32_SIGCONTEXT_ecx)
--	do_expr(2, IA32_SIGCONTEXT_edx)
--	do_expr(3, IA32_SIGCONTEXT_ebx)
--	do_expr(5, IA32_SIGCONTEXT_ebp)
--	do_expr(6, IA32_SIGCONTEXT_esi)
--	do_expr(7, IA32_SIGCONTEXT_edi)
--	do_expr(8, IA32_SIGCONTEXT_eip)
 -
--	.align 4
--.LENDFDE2:
--
--	.long .LENDFDE3-.LSTARTFDE3	/* Length FDE */
--.LSTARTFDE3:
--	.long .LSTARTFDE3-.LSTARTFRAMES	/* CIE pointer */
--	/* HACK: See above wrt unwind library assumptions.  */
--	.long .LSTART_rt_sigreturn-1-.	/* PC-relative start address */
--	.long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
--	.uleb128 0			/* Augmentation */
--	/* What follows are the instructions for the table generation.
--	   We record the locations of each register saved.  This is
--	   slightly less complicated than the above, since we don't
--	   modify the stack pointer in the process.  */
--
--	do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_esp)
--	do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_eax)
--	do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ecx)
--	do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_edx)
--	do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ebx)
--	do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ebp)
--	do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_esi)
--	do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_edi)
--	do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_eip)
--
--	.align 4
--.LENDFDE3:
--
--#include "../../x86/kernel/vsyscall-note_32.S"
--
-diff --git a/arch/x86/ia32/vsyscall-syscall.S b/arch/x86/ia32/vsyscall-syscall.S
-deleted file mode 100644
-index cf9ef67..0000000
---- a/arch/x86/ia32/vsyscall-syscall.S
-+++ /dev/null
-@@ -1,69 +0,0 @@
--/*
-- * Code for the vsyscall page.  This version uses the syscall instruction.
-- */
+-static unsigned long getreg(struct task_struct *child,
+-	unsigned long regno)
+-{
+-	unsigned long retval = ~0UL;
 -
--#include <asm/ia32_unistd.h>
--#include <asm/asm-offsets.h>
--#include <asm/segment.h>
+-	switch (regno >> 2) {
+-		case GS:
+-			retval = child->thread.gs;
+-			break;
+-		case DS:
+-		case ES:
+-		case FS:
+-		case SS:
+-		case CS:
+-			retval = 0xffff;
+-			/* fall through */
+-		default:
+-			if (regno > FS*4)
+-				regno -= 1*4;
+-			retval &= get_stack_long(child, regno);
+-	}
+-	return retval;
+-}
 -
--	.code32
--	.text
--	.section .text.vsyscall,"ax"
--	.globl __kernel_vsyscall
--	.type __kernel_vsyscall, at function
--__kernel_vsyscall:
--.LSTART_vsyscall:
--	push	%ebp
--.Lpush_ebp:
--	movl	%ecx, %ebp
--	syscall
--	movl	$__USER32_DS, %ecx
--	movl	%ecx, %ss
--	movl	%ebp, %ecx
--	popl	%ebp
--.Lpop_ebp:
--	ret
--.LEND_vsyscall:
--	.size __kernel_vsyscall,.-.LSTART_vsyscall
+-#define LDT_SEGMENT 4
 -
--	.section .eh_frame,"a", at progbits
--.LSTARTFRAME:
--	.long .LENDCIE-.LSTARTCIE
--.LSTARTCIE:
--	.long 0			/* CIE ID */
--	.byte 1			/* Version number */
--	.string "zR"		/* NUL-terminated augmentation string */
--	.uleb128 1		/* Code alignment factor */
--	.sleb128 -4		/* Data alignment factor */
--	.byte 8			/* Return address register column */
--	.uleb128 1		/* Augmentation value length */
--	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
--	.byte 0x0c		/* DW_CFA_def_cfa */
--	.uleb128 4
--	.uleb128 4
--	.byte 0x88		/* DW_CFA_offset, column 0x8 */
--	.uleb128 1
--	.align 4
--.LENDCIE:
+-static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs)
+-{
+-	unsigned long addr, seg;
 -
--	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
--.LSTARTFDE1:
--	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
--	.long .LSTART_vsyscall-.	/* PC-relative start address */
--	.long .LEND_vsyscall-.LSTART_vsyscall
--	.uleb128 0			/* Augmentation length */
--	/* What follows are the instructions for the table generation.
--	   We have to record all changes of the stack pointer.  */
--	.byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.uleb128 8
--	.byte 0x85, 0x02	/* DW_CFA_offset %ebp -8 */
--	.byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
--	.byte 0xc5		/* DW_CFA_restore %ebp */
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.uleb128 4
--	.align 4
--.LENDFDE1:
+-	addr = regs->eip;
+-	seg = regs->xcs & 0xffff;
+-	if (regs->eflags & VM_MASK) {
+-		addr = (addr & 0xffff) + (seg << 4);
+-		return addr;
+-	}
 -
--#define SYSCALL_ENTER_KERNEL	syscall
--#include "vsyscall-sigreturn.S"
-diff --git a/arch/x86/ia32/vsyscall-sysenter.S b/arch/x86/ia32/vsyscall-sysenter.S
-deleted file mode 100644
-index ae056e5..0000000
---- a/arch/x86/ia32/vsyscall-sysenter.S
-+++ /dev/null
-@@ -1,95 +0,0 @@
--/*
-- * Code for the vsyscall page.  This version uses the sysenter instruction.
-- */
+-	/*
+-	 * We'll assume that the code segments in the GDT
+-	 * are all zero-based. That is largely true: the
+-	 * TLS segments are used for data, and the PNPBIOS
+-	 * and APM bios ones we just ignore here.
+-	 */
+-	if (seg & LDT_SEGMENT) {
+-		u32 *desc;
+-		unsigned long base;
 -
--#include <asm/ia32_unistd.h>
--#include <asm/asm-offsets.h>
+-		seg &= ~7UL;
 -
--	.code32
--	.text
--	.section .text.vsyscall,"ax"
--	.globl __kernel_vsyscall
--	.type __kernel_vsyscall, at function
--__kernel_vsyscall:
--.LSTART_vsyscall:
--	push	%ecx
--.Lpush_ecx:
--	push	%edx
--.Lpush_edx:
--	push	%ebp
--.Lenter_kernel:
--	movl	%esp,%ebp
--	sysenter
--	.space 7,0x90
--	jmp	.Lenter_kernel
--	/* 16: System call normal return point is here! */
--	pop	%ebp
--.Lpop_ebp:
--	pop	%edx
--.Lpop_edx:
--	pop	%ecx
--.Lpop_ecx:
--	ret
--.LEND_vsyscall:
--	.size __kernel_vsyscall,.-.LSTART_vsyscall
+-		mutex_lock(&child->mm->context.lock);
+-		if (unlikely((seg >> 3) >= child->mm->context.size))
+-			addr = -1L; /* bogus selector, access would fault */
+-		else {
+-			desc = child->mm->context.ldt + seg;
+-			base = ((desc[0] >> 16) |
+-				((desc[1] & 0xff) << 16) |
+-				(desc[1] & 0xff000000));
 -
--	.section .eh_frame,"a", at progbits
--.LSTARTFRAME:
--	.long .LENDCIE-.LSTARTCIE
--.LSTARTCIE:
--	.long 0			/* CIE ID */
--	.byte 1			/* Version number */
--	.string "zR"		/* NUL-terminated augmentation string */
--	.uleb128 1		/* Code alignment factor */
--	.sleb128 -4		/* Data alignment factor */
--	.byte 8			/* Return address register column */
--	.uleb128 1		/* Augmentation value length */
--	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
--	.byte 0x0c		/* DW_CFA_def_cfa */
--	.uleb128 4
--	.uleb128 4
--	.byte 0x88		/* DW_CFA_offset, column 0x8 */
--	.uleb128 1
--	.align 4
--.LENDCIE:
+-			/* 16-bit code segment? */
+-			if (!((desc[1] >> 22) & 1))
+-				addr &= 0xffff;
+-			addr += base;
+-		}
+-		mutex_unlock(&child->mm->context.lock);
+-	}
+-	return addr;
+-}
 -
--	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
--.LSTARTFDE1:
--	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
--	.long .LSTART_vsyscall-.	/* PC-relative start address */
--	.long .LEND_vsyscall-.LSTART_vsyscall
--	.uleb128 0			/* Augmentation length */
--	/* What follows are the instructions for the table generation.
--	   We have to record all changes of the stack pointer.  */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpush_ecx-.LSTART_vsyscall
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x08		/* RA at offset 8 now */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpush_edx-.Lpush_ecx
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x0c		/* RA at offset 12 now */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lenter_kernel-.Lpush_edx
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x10		/* RA at offset 16 now */
--	.byte 0x85, 0x04	/* DW_CFA_offset %ebp -16 */
--	/* Finally the epilogue.  */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpop_ebp-.Lenter_kernel
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x12		/* RA at offset 12 now */
--	.byte 0xc5		/* DW_CFA_restore %ebp */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpop_edx-.Lpop_ebp
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x08		/* RA at offset 8 now */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpop_ecx-.Lpop_edx
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x04		/* RA at offset 4 now */
--	.align 4
--.LENDFDE1:
+-static inline int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+-{
+-	int i, copied;
+-	unsigned char opcode[15];
+-	unsigned long addr = convert_eip_to_linear(child, regs);
 -
--#define SYSCALL_ENTER_KERNEL	int $0x80
--#include "vsyscall-sigreturn.S"
-diff --git a/arch/x86/ia32/vsyscall.lds b/arch/x86/ia32/vsyscall.lds
-deleted file mode 100644
-index 1dc86ff..0000000
---- a/arch/x86/ia32/vsyscall.lds
-+++ /dev/null
-@@ -1,80 +0,0 @@
--/*
-- * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
-- * object prelinked to its virtual address. This script controls its layout.
-- */
+-	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+-	for (i = 0; i < copied; i++) {
+-		switch (opcode[i]) {
+-		/* popf and iret */
+-		case 0x9d: case 0xcf:
+-			return 1;
+-		/* opcode and address size prefixes */
+-		case 0x66: case 0x67:
+-			continue;
+-		/* irrelevant prefixes (segment overrides and repeats) */
+-		case 0x26: case 0x2e:
+-		case 0x36: case 0x3e:
+-		case 0x64: case 0x65:
+-		case 0xf0: case 0xf2: case 0xf3:
+-			continue;
 -
--/* This must match <asm/fixmap.h>.  */
--VSYSCALL_BASE = 0xffffe000;
+-		/*
+-		 * pushf: NOTE! We should probably not let
+-		 * the user see the TF bit being set. But
+-		 * it's more pain than it's worth to avoid
+-		 * it, and a debugger could emulate this
+-		 * all in user space if it _really_ cares.
+-		 */
+-		case 0x9c:
+-		default:
+-			return 0;
+-		}
+-	}
+-	return 0;
+-}
 -
--SECTIONS
+-static void set_singlestep(struct task_struct *child)
 -{
--  . = VSYSCALL_BASE + SIZEOF_HEADERS;
+-	struct pt_regs *regs = get_child_regs(child);
 -
--  .hash           : { *(.hash) }		:text
--  .gnu.hash       : { *(.gnu.hash) }
--  .dynsym         : { *(.dynsym) }
--  .dynstr         : { *(.dynstr) }
--  .gnu.version    : { *(.gnu.version) }
--  .gnu.version_d  : { *(.gnu.version_d) }
--  .gnu.version_r  : { *(.gnu.version_r) }
+-	/*
+-	 * Always set TIF_SINGLESTEP - this guarantees that 
+-	 * we single-step system calls etc..  This will also
+-	 * cause us to set TF when returning to user mode.
+-	 */
+-	set_tsk_thread_flag(child, TIF_SINGLESTEP);
 -
--  /* This linker script is used both with -r and with -shared.
--     For the layouts to match, we need to skip more than enough
--     space for the dynamic symbol table et al.  If this amount
--     is insufficient, ld -shared will barf.  Just increase it here.  */
--  . = VSYSCALL_BASE + 0x400;
--  
--  .text.vsyscall   : { *(.text.vsyscall) } 	:text =0x90909090
+-	/*
+-	 * If TF was already set, don't do anything else
+-	 */
+-	if (regs->eflags & TRAP_FLAG)
+-		return;
 -
--  /* This is an 32bit object and we cannot easily get the offsets
--     into the 64bit kernel. Just hardcode them here. This assumes
--     that all the stubs don't need more than 0x100 bytes. */
--  . = VSYSCALL_BASE + 0x500;
+-	/* Set TF on the kernel stack.. */
+-	regs->eflags |= TRAP_FLAG;
 -
--  .text.sigreturn  : { *(.text.sigreturn) }	:text =0x90909090
+-	/*
+-	 * ..but if TF is changed by the instruction we will trace,
+-	 * don't mark it as being "us" that set it, so that we
+-	 * won't clear it by hand later.
+-	 */
+-	if (is_setting_trap_flag(child, regs))
+-		return;
+-	
+-	child->ptrace |= PT_DTRACE;
+-}
 -
--  . = VSYSCALL_BASE + 0x600;
+-static void clear_singlestep(struct task_struct *child)
+-{
+-	/* Always clear TIF_SINGLESTEP... */
+-	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 -
--  .text.rtsigreturn : { *(.text.rtsigreturn) }   :text =0x90909090
--	
--  .note		  : { *(.note.*) }		:text :note
--  .eh_frame_hdr   : { *(.eh_frame_hdr) }	:text :eh_frame_hdr
--  .eh_frame       : { KEEP (*(.eh_frame)) }	:text
--  .dynamic        : { *(.dynamic) }		:text :dynamic
--  .useless        : {
--  	*(.got.plt) *(.got)
--	*(.data .data.* .gnu.linkonce.d.*)
--	*(.dynbss)
--	*(.bss .bss.* .gnu.linkonce.b.*)
--  }						:text
+-	/* But touch TF only if it was set by us.. */
+-	if (child->ptrace & PT_DTRACE) {
+-		struct pt_regs *regs = get_child_regs(child);
+-		regs->eflags &= ~TRAP_FLAG;
+-		child->ptrace &= ~PT_DTRACE;
+-	}
 -}
 -
 -/*
-- * We must supply the ELF program headers explicitly to get just one
-- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+- * Called by kernel/ptrace.c when detaching..
+- *
+- * Make sure the single step bit is not set.
 - */
--PHDRS
+-void ptrace_disable(struct task_struct *child)
+-{ 
+-	clear_singlestep(child);
+-	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+-}
+-
+-/*
+- * Perform get_thread_area on behalf of the traced child.
+- */
+-static int
+-ptrace_get_thread_area(struct task_struct *child,
+-		       int idx, struct user_desc __user *user_desc)
 -{
--  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
--  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
--  note PT_NOTE FLAGS(4); /* PF_R */
--  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
+-	struct user_desc info;
+-	struct desc_struct *desc;
+-
+-/*
+- * Get the current Thread-Local Storage area:
+- */
+-
+-#define GET_BASE(desc) ( \
+-	(((desc)->a >> 16) & 0x0000ffff) | \
+-	(((desc)->b << 16) & 0x00ff0000) | \
+-	( (desc)->b        & 0xff000000)   )
+-
+-#define GET_LIMIT(desc) ( \
+-	((desc)->a & 0x0ffff) | \
+-	 ((desc)->b & 0xf0000) )
+-
+-#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
+-#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
+-#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
+-#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
+-#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
+-#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
+-
+-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+-		return -EINVAL;
+-
+-	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+-
+-	info.entry_number = idx;
+-	info.base_addr = GET_BASE(desc);
+-	info.limit = GET_LIMIT(desc);
+-	info.seg_32bit = GET_32BIT(desc);
+-	info.contents = GET_CONTENTS(desc);
+-	info.read_exec_only = !GET_WRITABLE(desc);
+-	info.limit_in_pages = GET_LIMIT_PAGES(desc);
+-	info.seg_not_present = !GET_PRESENT(desc);
+-	info.useable = GET_USEABLE(desc);
+-
+-	if (copy_to_user(user_desc, &info, sizeof(info)))
+-		return -EFAULT;
+-
+-	return 0;
 -}
 -
 -/*
-- * This controls what symbols we export from the DSO.
+- * Perform set_thread_area on behalf of the traced child.
 - */
--VERSION
+-static int
+-ptrace_set_thread_area(struct task_struct *child,
+-		       int idx, struct user_desc __user *user_desc)
 -{
--  LINUX_2.5 {
--    global:
--    	__kernel_vsyscall;
--    	__kernel_sigreturn;
--    	__kernel_rt_sigreturn;
+-	struct user_desc info;
+-	struct desc_struct *desc;
 -
--    local: *;
--  };
+-	if (copy_from_user(&info, user_desc, sizeof(info)))
+-		return -EFAULT;
+-
+-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+-		return -EINVAL;
+-
+-	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+-	if (LDT_empty(&info)) {
+-		desc->a = 0;
+-		desc->b = 0;
+-	} else {
+-		desc->a = LDT_entry_a(&info);
+-		desc->b = LDT_entry_b(&info);
+-	}
+-
+-	return 0;
 -}
 -
--/* The ELF entry point can be used to set the AT_SYSINFO value.  */
--ENTRY(__kernel_vsyscall);
-diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
-index 3857334..6f81300 100644
---- a/arch/x86/kernel/Makefile
-+++ b/arch/x86/kernel/Makefile
-@@ -1,9 +1,91 @@
--ifeq ($(CONFIG_X86_32),y)
--include ${srctree}/arch/x86/kernel/Makefile_32
--else
--include ${srctree}/arch/x86/kernel/Makefile_64
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+extra-y                := head_$(BITS).o init_task.o vmlinux.lds
-+extra-$(CONFIG_X86_64) += head64.o
-+
-+CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
-+CFLAGS_vsyscall_64.o := $(PROFILING) -g0
-+
-+obj-y			:= process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
-+obj-y			+= traps_$(BITS).o irq_$(BITS).o
-+obj-y			+= time_$(BITS).o ioport.o ldt.o
-+obj-y			+= setup_$(BITS).o i8259_$(BITS).o
-+obj-$(CONFIG_X86_32)	+= sys_i386_32.o i386_ksyms_32.o
-+obj-$(CONFIG_X86_64)	+= sys_x86_64.o x8664_ksyms_64.o
-+obj-$(CONFIG_X86_64)	+= syscall_64.o vsyscall_64.o setup64.o
-+obj-y			+= pci-dma_$(BITS).o  bootflag.o e820_$(BITS).o
-+obj-y			+= quirks.o i8237.o topology.o kdebugfs.o
-+obj-y			+= alternative.o i8253.o
-+obj-$(CONFIG_X86_64)	+= pci-nommu_64.o bugs_64.o
-+obj-y			+= tsc_$(BITS).o io_delay.o rtc.o
-+
-+obj-y				+= i387.o
-+obj-y				+= ptrace.o
-+obj-y				+= ds.o
-+obj-$(CONFIG_X86_32)		+= tls.o
-+obj-$(CONFIG_IA32_EMULATION)	+= tls.o
-+obj-y				+= step.o
-+obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
-+obj-y				+= cpu/
-+obj-y				+= acpi/
-+obj-$(CONFIG_X86_BIOS_REBOOT)	+= reboot.o
-+obj-$(CONFIG_X86_64)		+= reboot.o
-+obj-$(CONFIG_MCA)		+= mca_32.o
-+obj-$(CONFIG_X86_MSR)		+= msr.o
-+obj-$(CONFIG_X86_CPUID)		+= cpuid.o
-+obj-$(CONFIG_MICROCODE)		+= microcode.o
-+obj-$(CONFIG_PCI)		+= early-quirks.o
-+obj-$(CONFIG_APM)		+= apm_32.o
-+obj-$(CONFIG_X86_SMP)		+= smp_$(BITS).o smpboot_$(BITS).o tsc_sync.o
-+obj-$(CONFIG_X86_32_SMP)	+= smpcommon_32.o
-+obj-$(CONFIG_X86_64_SMP)	+= smp_64.o smpboot_64.o tsc_sync.o
-+obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline_$(BITS).o
-+obj-$(CONFIG_X86_MPPARSE)	+= mpparse_$(BITS).o
-+obj-$(CONFIG_X86_LOCAL_APIC)	+= apic_$(BITS).o nmi_$(BITS).o
-+obj-$(CONFIG_X86_IO_APIC)	+= io_apic_$(BITS).o
-+obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups_32.o
-+obj-$(CONFIG_KEXEC)		+= machine_kexec_$(BITS).o
-+obj-$(CONFIG_KEXEC)		+= relocate_kernel_$(BITS).o crash.o
-+obj-$(CONFIG_CRASH_DUMP)	+= crash_dump_$(BITS).o
-+obj-$(CONFIG_X86_NUMAQ)		+= numaq_32.o
-+obj-$(CONFIG_X86_SUMMIT_NUMA)	+= summit_32.o
-+obj-$(CONFIG_X86_VSMP)		+= vsmp_64.o
-+obj-$(CONFIG_KPROBES)		+= kprobes.o
-+obj-$(CONFIG_MODULES)		+= module_$(BITS).o
-+obj-$(CONFIG_ACPI_SRAT) 	+= srat_32.o
-+obj-$(CONFIG_EFI) 		+= efi.o efi_$(BITS).o efi_stub_$(BITS).o
-+obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault_32.o
-+obj-$(CONFIG_VM86)		+= vm86_32.o
-+obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-+
-+obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
-+
-+obj-$(CONFIG_K8_NB)		+= k8.o
-+obj-$(CONFIG_MGEODE_LX)		+= geode_32.o mfgpt_32.o
-+obj-$(CONFIG_DEBUG_RODATA_TEST)	+= test_rodata.o
-+obj-$(CONFIG_DEBUG_NX_TEST)	+= test_nx.o
-+
-+obj-$(CONFIG_VMI)		+= vmi_32.o vmiclock_32.o
-+obj-$(CONFIG_PARAVIRT)		+= paravirt.o paravirt_patch_$(BITS).o
-+
-+ifdef CONFIG_INPUT_PCSPKR
-+obj-y				+= pcspeaker.o
- endif
- 
--# Workaround to delete .lds files with make clean
--# The problem is that we do not enter Makefile_32 with make clean.
--clean-files := vsyscall*.lds vsyscall*.so
-+obj-$(CONFIG_SCx200)		+= scx200_32.o
-+
-+###
-+# 64 bit specific files
-+ifeq ($(CONFIG_X86_64),y)
-+        obj-y				+= genapic_64.o genapic_flat_64.o
-+        obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer_64.o
-+        obj-$(CONFIG_AUDIT)		+= audit_64.o
-+        obj-$(CONFIG_PM)		+= suspend_64.o
-+        obj-$(CONFIG_HIBERNATION)	+= suspend_asm_64.o
-+
-+        obj-$(CONFIG_GART_IOMMU)	+= pci-gart_64.o aperture_64.o
-+        obj-$(CONFIG_CALGARY_IOMMU)	+= pci-calgary_64.o tce_64.o
-+        obj-$(CONFIG_SWIOTLB)		+= pci-swiotlb_64.o
-+endif
-diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
-deleted file mode 100644
-index a7bc93c..0000000
---- a/arch/x86/kernel/Makefile_32
-+++ /dev/null
-@@ -1,88 +0,0 @@
--#
--# Makefile for the linux kernel.
--#
+-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+-{
+-	struct user * dummy = NULL;
+-	int i, ret;
+-	unsigned long __user *datap = (unsigned long __user *)data;
 -
--extra-y := head_32.o init_task.o vmlinux.lds
--CPPFLAGS_vmlinux.lds += -Ui386
+-	switch (request) {
+-	/* when I and D space are separate, these will need to be fixed. */
+-	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
+-	case PTRACE_PEEKDATA:
+-		ret = generic_ptrace_peekdata(child, addr, data);
+-		break;
 -
--obj-y	:= process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
--		ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
--		pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\
--		quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o
+-	/* read the word at location addr in the USER area. */
+-	case PTRACE_PEEKUSR: {
+-		unsigned long tmp;
 -
--obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
--obj-y				+= cpu/
--obj-y				+= acpi/
--obj-$(CONFIG_X86_BIOS_REBOOT)	+= reboot_32.o
--obj-$(CONFIG_MCA)		+= mca_32.o
--obj-$(CONFIG_X86_MSR)		+= msr.o
--obj-$(CONFIG_X86_CPUID)		+= cpuid.o
--obj-$(CONFIG_MICROCODE)		+= microcode.o
--obj-$(CONFIG_PCI)		+= early-quirks.o
--obj-$(CONFIG_APM)		+= apm_32.o
--obj-$(CONFIG_X86_SMP)		+= smp_32.o smpboot_32.o tsc_sync.o
--obj-$(CONFIG_SMP)		+= smpcommon_32.o
--obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline_32.o
--obj-$(CONFIG_X86_MPPARSE)	+= mpparse_32.o
--obj-$(CONFIG_X86_LOCAL_APIC)	+= apic_32.o nmi_32.o
--obj-$(CONFIG_X86_IO_APIC)	+= io_apic_32.o
--obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups_32.o
--obj-$(CONFIG_KEXEC)		+= machine_kexec_32.o relocate_kernel_32.o crash.o
--obj-$(CONFIG_CRASH_DUMP)	+= crash_dump_32.o
--obj-$(CONFIG_X86_NUMAQ)		+= numaq_32.o
--obj-$(CONFIG_X86_SUMMIT_NUMA)	+= summit_32.o
--obj-$(CONFIG_KPROBES)		+= kprobes_32.o
--obj-$(CONFIG_MODULES)		+= module_32.o
--obj-y				+= sysenter_32.o vsyscall_32.o
--obj-$(CONFIG_ACPI_SRAT) 	+= srat_32.o
--obj-$(CONFIG_EFI) 		+= efi_32.o efi_stub_32.o
--obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault_32.o
--obj-$(CONFIG_VM86)		+= vm86_32.o
--obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
--obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
--obj-$(CONFIG_K8_NB)		+= k8.o
--obj-$(CONFIG_MGEODE_LX)		+= geode_32.o mfgpt_32.o
+-		ret = -EIO;
+-		if ((addr & 3) || addr < 0 || 
+-		    addr > sizeof(struct user) - 3)
+-			break;
 -
--obj-$(CONFIG_VMI)		+= vmi_32.o vmiclock_32.o
--obj-$(CONFIG_PARAVIRT)		+= paravirt_32.o
--obj-y				+= pcspeaker.o
+-		tmp = 0;  /* Default return condition */
+-		if(addr < FRAME_SIZE*sizeof(long))
+-			tmp = getreg(child, addr);
+-		if(addr >= (long) &dummy->u_debugreg[0] &&
+-		   addr <= (long) &dummy->u_debugreg[7]){
+-			addr -= (long) &dummy->u_debugreg[0];
+-			addr = addr >> 2;
+-			tmp = child->thread.debugreg[addr];
+-		}
+-		ret = put_user(tmp, datap);
+-		break;
+-	}
 -
--obj-$(CONFIG_SCx200)		+= scx200_32.o
+-	/* when I and D space are separate, this will have to be fixed. */
+-	case PTRACE_POKETEXT: /* write the word at location addr. */
+-	case PTRACE_POKEDATA:
+-		ret = generic_ptrace_pokedata(child, addr, data);
+-		break;
 -
--# vsyscall_32.o contains the vsyscall DSO images as __initdata.
--# We must build both images before we can assemble it.
--# Note: kbuild does not track this dependency due to usage of .incbin
--$(obj)/vsyscall_32.o: $(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so
--targets += $(foreach F,int80 sysenter,vsyscall-$F_32.o vsyscall-$F_32.so)
--targets += vsyscall-note_32.o vsyscall_32.lds
+-	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+-		ret = -EIO;
+-		if ((addr & 3) || addr < 0 || 
+-		    addr > sizeof(struct user) - 3)
+-			break;
 -
--# The DSO images are built using a special linker script.
--quiet_cmd_syscall = SYSCALL $@
--      cmd_syscall = $(CC) -m elf_i386 -nostdlib $(SYSCFLAGS_$(@F)) \
--		          -Wl,-T,$(filter-out FORCE,$^) -o $@
+-		if (addr < FRAME_SIZE*sizeof(long)) {
+-			ret = putreg(child, addr, data);
+-			break;
+-		}
+-		/* We need to be very careful here.  We implicitly
+-		   want to modify a portion of the task_struct, and we
+-		   have to be selective about what portions we allow someone
+-		   to modify. */
 -
--export CPPFLAGS_vsyscall_32.lds += -P -C -Ui386
+-		  ret = -EIO;
+-		  if(addr >= (long) &dummy->u_debugreg[0] &&
+-		     addr <= (long) &dummy->u_debugreg[7]){
 -
--vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
--		 $(call ld-option, -Wl$(comma)--hash-style=sysv)
--SYSCFLAGS_vsyscall-sysenter_32.so	= $(vsyscall-flags)
--SYSCFLAGS_vsyscall-int80_32.so	= $(vsyscall-flags)
+-			  if(addr == (long) &dummy->u_debugreg[4]) break;
+-			  if(addr == (long) &dummy->u_debugreg[5]) break;
+-			  if(addr < (long) &dummy->u_debugreg[4] &&
+-			     ((unsigned long) data) >= TASK_SIZE-3) break;
+-			  
+-			  /* Sanity-check data. Take one half-byte at once with
+-			   * check = (val >> (16 + 4*i)) & 0xf. It contains the
+-			   * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
+-			   * 2 and 3 are LENi. Given a list of invalid values,
+-			   * we do mask |= 1 << invalid_value, so that
+-			   * (mask >> check) & 1 is a correct test for invalid
+-			   * values.
+-			   *
+-			   * R/Wi contains the type of the breakpoint /
+-			   * watchpoint, LENi contains the length of the watched
+-			   * data in the watchpoint case.
+-			   *
+-			   * The invalid values are:
+-			   * - LENi == 0x10 (undefined), so mask |= 0x0f00.
+-			   * - R/Wi == 0x10 (break on I/O reads or writes), so
+-			   *   mask |= 0x4444.
+-			   * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
+-			   *   0x1110.
+-			   *
+-			   * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
+-			   *
+-			   * See the Intel Manual "System Programming Guide",
+-			   * 15.2.4
+-			   *
+-			   * Note that LENi == 0x10 is defined on x86_64 in long
+-			   * mode (i.e. even for 32-bit userspace software, but
+-			   * 64-bit kernel), so the x86_64 mask value is 0x5454.
+-			   * See the AMD manual no. 24593 (AMD64 System
+-			   * Programming)*/
 -
--$(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so: \
--$(obj)/vsyscall-%.so: $(src)/vsyscall_32.lds \
--		      $(obj)/vsyscall-%.o $(obj)/vsyscall-note_32.o FORCE
--	$(call if_changed,syscall)
+-			  if(addr == (long) &dummy->u_debugreg[7]) {
+-				  data &= ~DR_CONTROL_RESERVED;
+-				  for(i=0; i<4; i++)
+-					  if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
+-						  goto out_tsk;
+-				  if (data)
+-					  set_tsk_thread_flag(child, TIF_DEBUG);
+-				  else
+-					  clear_tsk_thread_flag(child, TIF_DEBUG);
+-			  }
+-			  addr -= (long) &dummy->u_debugreg;
+-			  addr = addr >> 2;
+-			  child->thread.debugreg[addr] = data;
+-			  ret = 0;
+-		  }
+-		  break;
 -
--# We also create a special relocatable object that should mirror the symbol
--# table and layout of the linked DSO.  With ld -R we can then refer to
--# these symbols in the kernel code rather than hand-coded addresses.
--extra-y += vsyscall-syms.o
--$(obj)/built-in.o: $(obj)/vsyscall-syms.o
--$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
+-	case PTRACE_SYSEMU: /* continue and stop at next syscall, which will not be executed */
+-	case PTRACE_SYSCALL:	/* continue and stop at next (return from) syscall */
+-	case PTRACE_CONT:	/* restart after signal. */
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		if (request == PTRACE_SYSEMU) {
+-			set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		} else if (request == PTRACE_SYSCALL) {
+-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+-		} else {
+-			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		}
+-		child->exit_code = data;
+-		/* make sure the single step bit is not set. */
+-		clear_singlestep(child);
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
 -
--SYSCFLAGS_vsyscall-syms.o = -r
--$(obj)/vsyscall-syms.o: $(src)/vsyscall_32.lds \
--			$(obj)/vsyscall-sysenter_32.o $(obj)/vsyscall-note_32.o FORCE
--	$(call if_changed,syscall)
+-/*
+- * make the child exit.  Best I can do is send it a sigkill. 
+- * perhaps it should be put in the status that it wants to 
+- * exit.
+- */
+-	case PTRACE_KILL:
+-		ret = 0;
+-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
+-			break;
+-		child->exit_code = SIGKILL;
+-		/* make sure the single step bit is not set. */
+-		clear_singlestep(child);
+-		wake_up_process(child);
+-		break;
 -
+-	case PTRACE_SYSEMU_SINGLESTEP: /* Same as SYSEMU, but singlestep if not syscall */
+-	case PTRACE_SINGLESTEP:	/* set the trap flag. */
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
 -
-diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
-deleted file mode 100644
-index 5a88890..0000000
---- a/arch/x86/kernel/Makefile_64
-+++ /dev/null
-@@ -1,45 +0,0 @@
--#
--# Makefile for the linux kernel.
--#
+-		if (request == PTRACE_SYSEMU_SINGLESTEP)
+-			set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+-		else
+-			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 -
--extra-y 	:= head_64.o head64.o init_task.o vmlinux.lds
--CPPFLAGS_vmlinux.lds += -Ux86_64
--EXTRA_AFLAGS	:= -traditional
+-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+-		set_singlestep(child);
+-		child->exit_code = data;
+-		/* give it a chance to run. */
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
 -
--obj-y	:= process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
--		ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
--		x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \
--		setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
--		pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
--		i8253.o
+-	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+-	  	if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
+-			__put_user(getreg(child, i), datap);
+-			datap++;
+-		}
+-		ret = 0;
+-		break;
+-	}
 -
--obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
--obj-y				+= cpu/
--obj-y				+= acpi/
--obj-$(CONFIG_X86_MSR)		+= msr.o
--obj-$(CONFIG_MICROCODE)		+= microcode.o
--obj-$(CONFIG_X86_CPUID)		+= cpuid.o
--obj-$(CONFIG_SMP)		+= smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o
--obj-y				+= apic_64.o  nmi_64.o
--obj-y				+= io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o
--obj-$(CONFIG_KEXEC)		+= machine_kexec_64.o relocate_kernel_64.o crash.o
--obj-$(CONFIG_CRASH_DUMP)	+= crash_dump_64.o
--obj-$(CONFIG_PM)		+= suspend_64.o
--obj-$(CONFIG_HIBERNATION)	+= suspend_asm_64.o
--obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
--obj-$(CONFIG_GART_IOMMU)	+= pci-gart_64.o aperture_64.o
--obj-$(CONFIG_CALGARY_IOMMU)	+= pci-calgary_64.o tce_64.o
--obj-$(CONFIG_SWIOTLB)		+= pci-swiotlb_64.o
--obj-$(CONFIG_KPROBES)		+= kprobes_64.o
--obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer_64.o
--obj-$(CONFIG_X86_VSMP)		+= vsmp_64.o
--obj-$(CONFIG_K8_NB)		+= k8.o
--obj-$(CONFIG_AUDIT)		+= audit_64.o
+-	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+-		unsigned long tmp;
+-	  	if (!access_ok(VERIFY_READ, datap, FRAME_SIZE*sizeof(long))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
+-			__get_user(tmp, datap);
+-			putreg(child, i, tmp);
+-			datap++;
+-		}
+-		ret = 0;
+-		break;
+-	}
 -
--obj-$(CONFIG_MODULES)		+= module_64.o
--obj-$(CONFIG_PCI)		+= early-quirks.o
+-	case PTRACE_GETFPREGS: { /* Get the child FPU state. */
+-		if (!access_ok(VERIFY_WRITE, datap,
+-			       sizeof(struct user_i387_struct))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		ret = 0;
+-		if (!tsk_used_math(child))
+-			init_fpu(child);
+-		get_fpregs((struct user_i387_struct __user *)data, child);
+-		break;
+-	}
 -
--obj-y				+= topology.o
--obj-y				+= pcspeaker.o
+-	case PTRACE_SETFPREGS: { /* Set the child FPU state. */
+-		if (!access_ok(VERIFY_READ, datap,
+-			       sizeof(struct user_i387_struct))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		set_stopped_child_used_math(child);
+-		set_fpregs(child, (struct user_i387_struct __user *)data);
+-		ret = 0;
+-		break;
+-	}
 -
--CFLAGS_vsyscall_64.o		:= $(PROFILING) -g0
-diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
-index 1351c39..19d3d6e 100644
---- a/arch/x86/kernel/acpi/Makefile
-+++ b/arch/x86/kernel/acpi/Makefile
-@@ -1,5 +1,5 @@
- obj-$(CONFIG_ACPI)		+= boot.o
--obj-$(CONFIG_ACPI_SLEEP)	+= sleep_$(BITS).o wakeup_$(BITS).o
-+obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_$(BITS).o
- 
- ifneq ($(CONFIG_ACPI_PROCESSOR),)
- obj-y				+= cstate.o processor.o
-diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
-new file mode 100644
-index 0000000..6bc815c
---- /dev/null
-+++ b/arch/x86/kernel/acpi/sleep.c
-@@ -0,0 +1,87 @@
-+/*
-+ * sleep.c - x86-specific ACPI sleep support.
-+ *
-+ *  Copyright (C) 2001-2003 Patrick Mochel
-+ *  Copyright (C) 2001-2003 Pavel Machek <pavel at suse.cz>
-+ */
-+
-+#include <linux/acpi.h>
-+#include <linux/bootmem.h>
-+#include <linux/dmi.h>
-+#include <linux/cpumask.h>
-+
-+#include <asm/smp.h>
-+
-+/* address in low memory of the wakeup routine. */
-+unsigned long acpi_wakeup_address = 0;
-+unsigned long acpi_realmode_flags;
-+extern char wakeup_start, wakeup_end;
-+
-+extern unsigned long acpi_copy_wakeup_routine(unsigned long);
-+
-+/**
-+ * acpi_save_state_mem - save kernel state
-+ *
-+ * Create an identity mapped page table and copy the wakeup routine to
-+ * low memory.
-+ */
-+int acpi_save_state_mem(void)
-+{
-+	if (!acpi_wakeup_address) {
-+		printk(KERN_ERR "Could not allocate memory during boot, S3 disabled\n");
-+		return -ENOMEM;
-+	}
-+	memcpy((void *)acpi_wakeup_address, &wakeup_start,
-+	       &wakeup_end - &wakeup_start);
-+	acpi_copy_wakeup_routine(acpi_wakeup_address);
-+
-+	return 0;
-+}
-+
-+/*
-+ * acpi_restore_state - undo effects of acpi_save_state_mem
-+ */
-+void acpi_restore_state_mem(void)
-+{
-+}
-+
-+
-+/**
-+ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
-+ *
-+ * We allocate a page from the first 1MB of memory for the wakeup
-+ * routine for when we come back from a sleep state. The
-+ * runtime allocator allows specification of <16MB pages, but not
-+ * <1MB pages.
-+ */
-+void __init acpi_reserve_bootmem(void)
-+{
-+	if ((&wakeup_end - &wakeup_start) > PAGE_SIZE*2) {
-+		printk(KERN_ERR
-+		       "ACPI: Wakeup code way too big, S3 disabled.\n");
-+		return;
-+	}
-+
-+	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2);
-+	if (!acpi_wakeup_address)
-+		printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
-+}
-+
-+
-+static int __init acpi_sleep_setup(char *str)
-+{
-+	while ((str != NULL) && (*str != '\0')) {
-+		if (strncmp(str, "s3_bios", 7) == 0)
-+			acpi_realmode_flags |= 1;
-+		if (strncmp(str, "s3_mode", 7) == 0)
-+			acpi_realmode_flags |= 2;
-+		if (strncmp(str, "s3_beep", 7) == 0)
-+			acpi_realmode_flags |= 4;
-+		str = strchr(str, ',');
-+		if (str != NULL)
-+			str += strspn(str, ", \t");
-+	}
-+	return 1;
-+}
-+
-+__setup("acpi_sleep=", acpi_sleep_setup);
-diff --git a/arch/x86/kernel/acpi/sleep_32.c b/arch/x86/kernel/acpi/sleep_32.c
-index 1069948..63fe552 100644
---- a/arch/x86/kernel/acpi/sleep_32.c
-+++ b/arch/x86/kernel/acpi/sleep_32.c
-@@ -12,76 +12,6 @@
- 
- #include <asm/smp.h>
- 
--/* address in low memory of the wakeup routine. */
--unsigned long acpi_wakeup_address = 0;
--unsigned long acpi_realmode_flags;
--extern char wakeup_start, wakeup_end;
+-	case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
+-		if (!access_ok(VERIFY_WRITE, datap,
+-			       sizeof(struct user_fxsr_struct))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		if (!tsk_used_math(child))
+-			init_fpu(child);
+-		ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
+-		break;
+-	}
 -
--extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
+-	case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
+-		if (!access_ok(VERIFY_READ, datap,
+-			       sizeof(struct user_fxsr_struct))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		set_stopped_child_used_math(child);
+-		ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
+-		break;
+-	}
 -
--/**
-- * acpi_save_state_mem - save kernel state
-- *
-- * Create an identity mapped page table and copy the wakeup routine to
-- * low memory.
-- */
--int acpi_save_state_mem(void)
--{
--	if (!acpi_wakeup_address)
--		return 1;
--	memcpy((void *)acpi_wakeup_address, &wakeup_start,
--	       &wakeup_end - &wakeup_start);
--	acpi_copy_wakeup_routine(acpi_wakeup_address);
+-	case PTRACE_GET_THREAD_AREA:
+-		ret = ptrace_get_thread_area(child, addr,
+-					(struct user_desc __user *) data);
+-		break;
 -
--	return 0;
+-	case PTRACE_SET_THREAD_AREA:
+-		ret = ptrace_set_thread_area(child, addr,
+-					(struct user_desc __user *) data);
+-		break;
+-
+-	default:
+-		ret = ptrace_request(child, request, addr, data);
+-		break;
+-	}
+- out_tsk:
+-	return ret;
 -}
 -
--/*
-- * acpi_restore_state - undo effects of acpi_save_state_mem
-- */
--void acpi_restore_state_mem(void)
+-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
 -{
+-	struct siginfo info;
+-
+-	tsk->thread.trap_no = 1;
+-	tsk->thread.error_code = error_code;
+-
+-	memset(&info, 0, sizeof(info));
+-	info.si_signo = SIGTRAP;
+-	info.si_code = TRAP_BRKPT;
+-
+-	/* User-mode eip? */
+-	info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
+-
+-	/* Send us the fake SIGTRAP */
+-	force_sig_info(SIGTRAP, &info, tsk);
 -}
 -
--/**
-- * acpi_reserve_bootmem - do _very_ early ACPI initialisation
-- *
-- * We allocate a page from the first 1MB of memory for the wakeup
-- * routine for when we come back from a sleep state. The
-- * runtime allocator allows specification of <16MB pages, but not
-- * <1MB pages.
+-/* notification of system call entry/exit
+- * - triggered by current->work.syscall_trace
 - */
--void __init acpi_reserve_bootmem(void)
+-__attribute__((regparm(3)))
+-int do_syscall_trace(struct pt_regs *regs, int entryexit)
 -{
--	if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) {
--		printk(KERN_ERR
--		       "ACPI: Wakeup code way too big, S3 disabled.\n");
--		return;
+-	int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
+-	/*
+-	 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
+-	 * interception
+-	 */
+-	int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
+-	int ret = 0;
+-
+-	/* do the secure computing check first */
+-	if (!entryexit)
+-		secure_computing(regs->orig_eax);
+-
+-	if (unlikely(current->audit_context)) {
+-		if (entryexit)
+-			audit_syscall_exit(AUDITSC_RESULT(regs->eax),
+-						regs->eax);
+-		/* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
+-		 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
+-		 * not used, entry.S will call us only on syscall exit, not
+-		 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
+-		 * calling send_sigtrap() on syscall entry.
+-		 *
+-		 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
+-		 * is_singlestep is false, despite his name, so we will still do
+-		 * the correct thing.
+-		 */
+-		else if (is_singlestep)
+-			goto out;
 -	}
 -
--	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
--	if (!acpi_wakeup_address)
--		printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
--}
+-	if (!(current->ptrace & PT_PTRACED))
+-		goto out;
 -
--static int __init acpi_sleep_setup(char *str)
--{
--	while ((str != NULL) && (*str != '\0')) {
--		if (strncmp(str, "s3_bios", 7) == 0)
--			acpi_realmode_flags |= 1;
--		if (strncmp(str, "s3_mode", 7) == 0)
--			acpi_realmode_flags |= 2;
--		if (strncmp(str, "s3_beep", 7) == 0)
--			acpi_realmode_flags |= 4;
--		str = strchr(str, ',');
--		if (str != NULL)
--			str += strspn(str, ", \t");
+-	/* If a process stops on the 1st tracepoint with SYSCALL_TRACE
+-	 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
+-	 * here. We have to check this and return */
+-	if (is_sysemu && entryexit)
+-		return 0;
+-
+-	/* Fake a debug trap */
+-	if (is_singlestep)
+-		send_sigtrap(current, regs, 0);
+-
+- 	if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
+-		goto out;
+-
+-	/* the 0x80 provides a way for the tracing parent to distinguish
+-	   between a syscall stop and SIGTRAP delivery */
+-	/* Note that the debugger could change the result of test_thread_flag!*/
+-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
+-
+-	/*
+-	 * this isn't the same as continuing with a signal, but it will do
+-	 * for normal use.  strace only continues with a signal if the
+-	 * stopping signal is not SIGTRAP.  -brl
+-	 */
+-	if (current->exit_code) {
+-		send_sig(current->exit_code, current, 1);
+-		current->exit_code = 0;
 -	}
+-	ret = is_sysemu;
+-out:
+-	if (unlikely(current->audit_context) && !entryexit)
+-		audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax,
+-				    regs->ebx, regs->ecx, regs->edx, regs->esi);
+-	if (ret == 0)
+-		return 0;
+-
+-	regs->orig_eax = -1; /* force skip of syscall restarting */
+-	if (unlikely(current->audit_context))
+-		audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
 -	return 1;
 -}
--
--__setup("acpi_sleep=", acpi_sleep_setup);
--
- /* Ouch, we want to delete this. We already have better version in userspace, in
-    s2ram from suspend.sf.net project */
- static __init int reset_videomode_after_s3(const struct dmi_system_id *d)
-diff --git a/arch/x86/kernel/acpi/sleep_64.c b/arch/x86/kernel/acpi/sleep_64.c
+diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
 deleted file mode 100644
-index da42de2..0000000
---- a/arch/x86/kernel/acpi/sleep_64.c
+index 607085f..0000000
+--- a/arch/x86/kernel/ptrace_64.c
 +++ /dev/null
-@@ -1,117 +0,0 @@
+@@ -1,621 +0,0 @@
+-/* By Ross Biro 1/23/92 */
 -/*
-- *  acpi.c - Architecture-Specific Low-Level ACPI Support
-- *
-- *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh at intel.com>
-- *  Copyright (C) 2001 Jun Nakajima <jun.nakajima at intel.com>
-- *  Copyright (C) 2001 Patrick Mochel <mochel at osdl.org>
-- *  Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
-- *  Copyright (C) 2003 Pavel Machek, SuSE Labs
-- *
-- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- *
-- *  This program is free software; you can redistribute it and/or modify
-- *  it under the terms of the GNU General Public License as published by
-- *  the Free Software Foundation; either version 2 of the License, or
-- *  (at your option) any later version.
-- *
-- *  This program is distributed in the hope that it will be useful,
-- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
-- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-- *  GNU General Public License for more details.
-- *
-- *  You should have received a copy of the GNU General Public License
-- *  along with this program; if not, write to the Free Software
-- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-- *
-- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- * Pentium III FXSR, SSE support
+- *	Gareth Hughes <gareth at valinux.com>, May 2000
+- * 
+- * x86-64 port 2000-2002 Andi Kleen
 - */
 -
 -#include <linux/kernel.h>
--#include <linux/init.h>
--#include <linux/types.h>
--#include <linux/stddef.h>
--#include <linux/slab.h>
--#include <linux/pci.h>
--#include <linux/bootmem.h>
--#include <linux/acpi.h>
--#include <linux/cpumask.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/errno.h>
+-#include <linux/ptrace.h>
+-#include <linux/user.h>
+-#include <linux/security.h>
+-#include <linux/audit.h>
+-#include <linux/seccomp.h>
+-#include <linux/signal.h>
 -
--#include <asm/mpspec.h>
--#include <asm/io.h>
--#include <asm/apic.h>
--#include <asm/apicdef.h>
--#include <asm/page.h>
+-#include <asm/uaccess.h>
 -#include <asm/pgtable.h>
--#include <asm/pgalloc.h>
--#include <asm/io_apic.h>
+-#include <asm/system.h>
+-#include <asm/processor.h>
+-#include <asm/i387.h>
+-#include <asm/debugreg.h>
+-#include <asm/ldt.h>
+-#include <asm/desc.h>
 -#include <asm/proto.h>
--#include <asm/tlbflush.h>
+-#include <asm/ia32.h>
 -
--/* --------------------------------------------------------------------------
--                              Low-Level Sleep Support
--   -------------------------------------------------------------------------- */
+-/*
+- * does not yet catch signals sent when the child dies.
+- * in exit.c or in signal.c.
+- */
 -
--/* address in low memory of the wakeup routine. */
--unsigned long acpi_wakeup_address = 0;
--unsigned long acpi_realmode_flags;
--extern char wakeup_start, wakeup_end;
+-/*
+- * Determines which flags the user has access to [1 = access, 0 = no access].
+- * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
+- * Also masks reserved bits (63-22, 15, 5, 3, 1).
+- */
+-#define FLAG_MASK 0x54dd5UL
 -
--extern unsigned long acpi_copy_wakeup_routine(unsigned long);
+-/* set's the trap flag. */
+-#define TRAP_FLAG 0x100UL
 -
--/**
-- * acpi_save_state_mem - save kernel state
-- *
-- * Create an identity mapped page table and copy the wakeup routine to
-- * low memory.
+-/*
+- * eflags and offset of eflags on child stack..
 - */
--int acpi_save_state_mem(void)
+-#define EFLAGS offsetof(struct pt_regs, eflags)
+-#define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
+-
+-/*
+- * this routine will get a word off of the processes privileged stack. 
+- * the offset is how far from the base addr as stored in the TSS.  
+- * this routine assumes that all the privileged stacks are in our
+- * data space.
+- */   
+-static inline unsigned long get_stack_long(struct task_struct *task, int offset)
 -{
--	memcpy((void *)acpi_wakeup_address, &wakeup_start,
--	       &wakeup_end - &wakeup_start);
--	acpi_copy_wakeup_routine(acpi_wakeup_address);
+-	unsigned char *stack;
 -
--	return 0;
+-	stack = (unsigned char *)task->thread.rsp0;
+-	stack += offset;
+-	return (*((unsigned long *)stack));
 -}
 -
 -/*
-- * acpi_restore_state
+- * this routine will put a word on the processes privileged stack. 
+- * the offset is how far from the base addr as stored in the TSS.  
+- * this routine assumes that all the privileged stacks are in our
+- * data space.
 - */
--void acpi_restore_state_mem(void)
+-static inline long put_stack_long(struct task_struct *task, int offset,
+-	unsigned long data)
 -{
--}
+-	unsigned char * stack;
 -
--/**
-- * acpi_reserve_bootmem - do _very_ early ACPI initialisation
-- *
-- * We allocate a page in low memory for the wakeup
-- * routine for when we come back from a sleep state. The
-- * runtime allocator allows specification of <16M pages, but not
-- * <1M pages.
-- */
--void __init acpi_reserve_bootmem(void)
--{
--	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2);
--	if ((&wakeup_end - &wakeup_start) > (PAGE_SIZE*2))
--		printk(KERN_CRIT
--		       "ACPI: Wakeup code way too big, will crash on attempt"
--		       " to suspend\n");
+-	stack = (unsigned char *) task->thread.rsp0;
+-	stack += offset;
+-	*(unsigned long *) stack = data;
+-	return 0;
 -}
 -
--static int __init acpi_sleep_setup(char *str)
+-#define LDT_SEGMENT 4
+-
+-unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
 -{
--	while ((str != NULL) && (*str != '\0')) {
--		if (strncmp(str, "s3_bios", 7) == 0)
--			acpi_realmode_flags |= 1;
--		if (strncmp(str, "s3_mode", 7) == 0)
--			acpi_realmode_flags |= 2;
--		if (strncmp(str, "s3_beep", 7) == 0)
--			acpi_realmode_flags |= 4;
--		str = strchr(str, ',');
--		if (str != NULL)
--			str += strspn(str, ", \t");
+-	unsigned long addr, seg;
+-
+-	addr = regs->rip;
+-	seg = regs->cs & 0xffff;
+-
+-	/*
+-	 * We'll assume that the code segments in the GDT
+-	 * are all zero-based. That is largely true: the
+-	 * TLS segments are used for data, and the PNPBIOS
+-	 * and APM bios ones we just ignore here.
+-	 */
+-	if (seg & LDT_SEGMENT) {
+-		u32 *desc;
+-		unsigned long base;
+-
+-		seg &= ~7UL;
+-
+-		mutex_lock(&child->mm->context.lock);
+-		if (unlikely((seg >> 3) >= child->mm->context.size))
+-			addr = -1L; /* bogus selector, access would fault */
+-		else {
+-			desc = child->mm->context.ldt + seg;
+-			base = ((desc[0] >> 16) |
+-				((desc[1] & 0xff) << 16) |
+-				(desc[1] & 0xff000000));
+-
+-			/* 16-bit code segment? */
+-			if (!((desc[1] >> 22) & 1))
+-				addr &= 0xffff;
+-			addr += base;
+-		}
+-		mutex_unlock(&child->mm->context.lock);
 -	}
--	return 1;
+-
+-	return addr;
 -}
 -
--__setup("acpi_sleep=", acpi_sleep_setup);
+-static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+-{
+-	int i, copied;
+-	unsigned char opcode[15];
+-	unsigned long addr = convert_rip_to_linear(child, regs);
 -
-diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
-index 1e931aa..f53e327 100644
---- a/arch/x86/kernel/acpi/wakeup_32.S
-+++ b/arch/x86/kernel/acpi/wakeup_32.S
-@@ -1,4 +1,4 @@
--.text
-+	.section .text.page_aligned
- #include <linux/linkage.h>
- #include <asm/segment.h>
- #include <asm/page.h>
-diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
-index 5ed3bc5..2e1b9e0 100644
---- a/arch/x86/kernel/acpi/wakeup_64.S
-+++ b/arch/x86/kernel/acpi/wakeup_64.S
-@@ -344,13 +344,13 @@ do_suspend_lowlevel:
- 	call	save_processor_state
- 
- 	movq	$saved_context, %rax
--	movq	%rsp, pt_regs_rsp(%rax)
--	movq	%rbp, pt_regs_rbp(%rax)
--	movq	%rsi, pt_regs_rsi(%rax)
--	movq	%rdi, pt_regs_rdi(%rax)
--	movq	%rbx, pt_regs_rbx(%rax)
--	movq	%rcx, pt_regs_rcx(%rax)
--	movq	%rdx, pt_regs_rdx(%rax)
-+	movq	%rsp, pt_regs_sp(%rax)
-+	movq	%rbp, pt_regs_bp(%rax)
-+	movq	%rsi, pt_regs_si(%rax)
-+	movq	%rdi, pt_regs_di(%rax)
-+	movq	%rbx, pt_regs_bx(%rax)
-+	movq	%rcx, pt_regs_cx(%rax)
-+	movq	%rdx, pt_regs_dx(%rax)
- 	movq	%r8, pt_regs_r8(%rax)
- 	movq	%r9, pt_regs_r9(%rax)
- 	movq	%r10, pt_regs_r10(%rax)
-@@ -360,7 +360,7 @@ do_suspend_lowlevel:
- 	movq	%r14, pt_regs_r14(%rax)
- 	movq	%r15, pt_regs_r15(%rax)
- 	pushfq
--	popq	pt_regs_eflags(%rax)
-+	popq	pt_regs_flags(%rax)
- 
- 	movq	$.L97, saved_rip(%rip)
- 
-@@ -391,15 +391,15 @@ do_suspend_lowlevel:
- 	movq	%rbx, %cr2
- 	movq	saved_context_cr0(%rax), %rbx
- 	movq	%rbx, %cr0
--	pushq	pt_regs_eflags(%rax)
-+	pushq	pt_regs_flags(%rax)
- 	popfq
--	movq	pt_regs_rsp(%rax), %rsp
--	movq	pt_regs_rbp(%rax), %rbp
--	movq	pt_regs_rsi(%rax), %rsi
--	movq	pt_regs_rdi(%rax), %rdi
--	movq	pt_regs_rbx(%rax), %rbx
--	movq	pt_regs_rcx(%rax), %rcx
--	movq	pt_regs_rdx(%rax), %rdx
-+	movq	pt_regs_sp(%rax), %rsp
-+	movq	pt_regs_bp(%rax), %rbp
-+	movq	pt_regs_si(%rax), %rsi
-+	movq	pt_regs_di(%rax), %rdi
-+	movq	pt_regs_bx(%rax), %rbx
-+	movq	pt_regs_cx(%rax), %rcx
-+	movq	pt_regs_dx(%rax), %rdx
- 	movq	pt_regs_r8(%rax), %r8
- 	movq	pt_regs_r9(%rax), %r9
- 	movq	pt_regs_r10(%rax), %r10
-diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
-index d6405e0..45d79ea 100644
---- a/arch/x86/kernel/alternative.c
-+++ b/arch/x86/kernel/alternative.c
-@@ -273,6 +273,7 @@ struct smp_alt_module {
- };
- static LIST_HEAD(smp_alt_modules);
- static DEFINE_SPINLOCK(smp_alt);
-+static int smp_mode = 1;	/* protected by smp_alt */
- 
- void alternatives_smp_module_add(struct module *mod, char *name,
- 				 void *locks, void *locks_end,
-@@ -341,12 +342,13 @@ void alternatives_smp_switch(int smp)
- 
- #ifdef CONFIG_LOCKDEP
- 	/*
--	 * A not yet fixed binutils section handling bug prevents
--	 * alternatives-replacement from working reliably, so turn
--	 * it off:
-+	 * Older binutils section handling bug prevented
-+	 * alternatives-replacement from working reliably.
-+	 *
-+	 * If this still occurs then you should see a hang
-+	 * or crash shortly after this line:
- 	 */
--	printk("lockdep: not fixing up alternatives.\n");
--	return;
-+	printk("lockdep: fixing up alternatives.\n");
- #endif
- 
- 	if (noreplace_smp || smp_alt_once)
-@@ -354,21 +356,29 @@ void alternatives_smp_switch(int smp)
- 	BUG_ON(!smp && (num_online_cpus() > 1));
- 
- 	spin_lock_irqsave(&smp_alt, flags);
--	if (smp) {
-+
-+	/*
-+	 * Avoid unnecessary switches because it forces JIT based VMs to
-+	 * throw away all cached translations, which can be quite costly.
-+	 */
-+	if (smp == smp_mode) {
-+		/* nothing */
-+	} else if (smp) {
- 		printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
--		clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
--		clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
-+		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
-+		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
- 		list_for_each_entry(mod, &smp_alt_modules, next)
- 			alternatives_smp_lock(mod->locks, mod->locks_end,
- 					      mod->text, mod->text_end);
- 	} else {
- 		printk(KERN_INFO "SMP alternatives: switching to UP code\n");
--		set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
--		set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
-+		set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
-+		set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
- 		list_for_each_entry(mod, &smp_alt_modules, next)
- 			alternatives_smp_unlock(mod->locks, mod->locks_end,
- 						mod->text, mod->text_end);
- 	}
-+	smp_mode = smp;
- 	spin_unlock_irqrestore(&smp_alt, flags);
- }
- 
-@@ -431,8 +441,9 @@ void __init alternative_instructions(void)
- 	if (smp_alt_once) {
- 		if (1 == num_possible_cpus()) {
- 			printk(KERN_INFO "SMP alternatives: switching to UP code\n");
--			set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
--			set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
-+			set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
-+			set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
-+
- 			alternatives_smp_unlock(__smp_locks, __smp_locks_end,
- 						_text, _etext);
- 		}
-@@ -440,7 +451,10 @@ void __init alternative_instructions(void)
- 		alternatives_smp_module_add(NULL, "core kernel",
- 					    __smp_locks, __smp_locks_end,
- 					    _text, _etext);
--		alternatives_smp_switch(0);
-+
-+		/* Only switch to UP mode if we don't immediately boot others */
-+		if (num_possible_cpus() == 1 || setup_max_cpus <= 1)
-+			alternatives_smp_switch(0);
- 	}
- #endif
-  	apply_paravirt(__parainstructions, __parainstructions_end);
-diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
-index 5b69927..608152a 100644
---- a/arch/x86/kernel/aperture_64.c
-+++ b/arch/x86/kernel/aperture_64.c
-@@ -1,12 +1,12 @@
--/* 
-+/*
-  * Firmware replacement code.
-- * 
-+ *
-  * Work around broken BIOSes that don't set an aperture or only set the
-- * aperture in the AGP bridge. 
-- * If all fails map the aperture over some low memory.  This is cheaper than 
-- * doing bounce buffering. The memory is lost. This is done at early boot 
-- * because only the bootmem allocator can allocate 32+MB. 
-- * 
-+ * aperture in the AGP bridge.
-+ * If all fails map the aperture over some low memory.  This is cheaper than
-+ * doing bounce buffering. The memory is lost. This is done at early boot
-+ * because only the bootmem allocator can allocate 32+MB.
-+ *
-  * Copyright 2002 Andi Kleen, SuSE Labs.
-  */
- #include <linux/kernel.h>
-@@ -30,7 +30,7 @@ int gart_iommu_aperture_disabled __initdata = 0;
- int gart_iommu_aperture_allowed __initdata = 0;
- 
- int fallback_aper_order __initdata = 1; /* 64MB */
--int fallback_aper_force __initdata = 0; 
-+int fallback_aper_force __initdata = 0;
- 
- int fix_aperture __initdata = 1;
- 
-@@ -49,167 +49,270 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
- /* This code runs before the PCI subsystem is initialized, so just
-    access the northbridge directly. */
- 
--static u32 __init allocate_aperture(void) 
-+static u32 __init allocate_aperture(void)
- {
- 	u32 aper_size;
--	void *p; 
-+	void *p;
- 
--	if (fallback_aper_order > 7) 
--		fallback_aper_order = 7; 
--	aper_size = (32 * 1024 * 1024) << fallback_aper_order; 
-+	if (fallback_aper_order > 7)
-+		fallback_aper_order = 7;
-+	aper_size = (32 * 1024 * 1024) << fallback_aper_order;
- 
--	/* 
--	 * Aperture has to be naturally aligned. This means an 2GB aperture won't
--	 * have much chance of finding a place in the lower 4GB of memory.
--	 * Unfortunately we cannot move it up because that would make the
--	 * IOMMU useless.
-+	/*
-+	 * Aperture has to be naturally aligned. This means a 2GB aperture
-+	 * won't have much chance of finding a place in the lower 4GB of
-+	 * memory. Unfortunately we cannot move it up because that would
-+	 * make the IOMMU useless.
- 	 */
- 	p = __alloc_bootmem_nopanic(aper_size, aper_size, 0);
- 	if (!p || __pa(p)+aper_size > 0xffffffff) {
--		printk("Cannot allocate aperture memory hole (%p,%uK)\n",
--		       p, aper_size>>10);
-+		printk(KERN_ERR
-+			"Cannot allocate aperture memory hole (%p,%uK)\n",
-+				p, aper_size>>10);
- 		if (p)
- 			free_bootmem(__pa(p), aper_size);
- 		return 0;
- 	}
--	printk("Mapping aperture over %d KB of RAM @ %lx\n",
--	       aper_size >> 10, __pa(p)); 
-+	printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
-+			aper_size >> 10, __pa(p));
- 	insert_aperture_resource((u32)__pa(p), aper_size);
--	return (u32)__pa(p); 
-+
-+	return (u32)__pa(p);
- }
- 
- static int __init aperture_valid(u64 aper_base, u32 aper_size)
--{ 
--	if (!aper_base) 
--		return 0;
--	if (aper_size < 64*1024*1024) { 
--		printk("Aperture too small (%d MB)\n", aper_size>>20);
-+{
-+	if (!aper_base)
- 		return 0;
+-	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+-	for (i = 0; i < copied; i++) {
+-		switch (opcode[i]) {
+-		/* popf and iret */
+-		case 0x9d: case 0xcf:
+-			return 1;
+-
+-			/* CHECKME: 64 65 */
+-
+-		/* opcode and address size prefixes */
+-		case 0x66: case 0x67:
+-			continue;
+-		/* irrelevant prefixes (segment overrides and repeats) */
+-		case 0x26: case 0x2e:
+-		case 0x36: case 0x3e:
+-		case 0x64: case 0x65:
+-		case 0xf2: case 0xf3:
+-			continue;
+-
+-		case 0x40 ... 0x4f:
+-			if (regs->cs != __USER_CS)
+-				/* 32-bit mode: register increment */
+-				return 0;
+-			/* 64-bit mode: REX prefix */
+-			continue;
+-
+-			/* CHECKME: f2, f3 */
+-
+-		/*
+-		 * pushf: NOTE! We should probably not let
+-		 * the user see the TF bit being set. But
+-		 * it's more pain than it's worth to avoid
+-		 * it, and a debugger could emulate this
+-		 * all in user space if it _really_ cares.
+-		 */
+-		case 0x9c:
+-		default:
+-			return 0;
+-		}
 -	}
-+
- 	if (aper_base + aper_size > 0x100000000UL) {
--		printk("Aperture beyond 4GB. Ignoring.\n");
--		return 0; 
-+		printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
-+		return 0;
- 	}
- 	if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
--		printk("Aperture pointing to e820 RAM. Ignoring.\n");
--		return 0; 
--	} 
-+		printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
-+		return 0;
-+	}
-+	if (aper_size < 64*1024*1024) {
-+		printk(KERN_ERR "Aperture too small (%d MB)\n", aper_size>>20);
-+		return 0;
-+	}
-+
- 	return 1;
--} 
-+}
- 
- /* Find a PCI capability */
--static __u32 __init find_cap(int num, int slot, int func, int cap) 
--{ 
--	u8 pos;
-+static __u32 __init find_cap(int num, int slot, int func, int cap)
-+{
- 	int bytes;
--	if (!(read_pci_config_16(num,slot,func,PCI_STATUS) & PCI_STATUS_CAP_LIST))
-+	u8 pos;
-+
-+	if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
-+						PCI_STATUS_CAP_LIST))
- 		return 0;
--	pos = read_pci_config_byte(num,slot,func,PCI_CAPABILITY_LIST);
--	for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { 
-+
-+	pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
-+	for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
- 		u8 id;
--		pos &= ~3; 
--		id = read_pci_config_byte(num,slot,func,pos+PCI_CAP_LIST_ID);
-+
-+		pos &= ~3;
-+		id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
- 		if (id == 0xff)
- 			break;
--		if (id == cap) 
--			return pos; 
--		pos = read_pci_config_byte(num,slot,func,pos+PCI_CAP_LIST_NEXT); 
--	} 
-+		if (id == cap)
-+			return pos;
-+		pos = read_pci_config_byte(num, slot, func,
-+						pos+PCI_CAP_LIST_NEXT);
-+	}
- 	return 0;
--} 
-+}
- 
- /* Read a standard AGPv3 bridge header */
- static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order)
--{ 
-+{
- 	u32 apsize;
- 	u32 apsizereg;
- 	int nbits;
- 	u32 aper_low, aper_hi;
- 	u64 aper;
- 
--	printk("AGP bridge at %02x:%02x:%02x\n", num, slot, func);
--	apsizereg = read_pci_config_16(num,slot,func, cap + 0x14);
-+	printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", num, slot, func);
-+	apsizereg = read_pci_config_16(num, slot, func, cap + 0x14);
- 	if (apsizereg == 0xffffffff) {
--		printk("APSIZE in AGP bridge unreadable\n");
-+		printk(KERN_ERR "APSIZE in AGP bridge unreadable\n");
- 		return 0;
- 	}
- 
- 	apsize = apsizereg & 0xfff;
- 	/* Some BIOS use weird encodings not in the AGPv3 table. */
--	if (apsize & 0xff) 
--		apsize |= 0xf00; 
-+	if (apsize & 0xff)
-+		apsize |= 0xf00;
- 	nbits = hweight16(apsize);
- 	*order = 7 - nbits;
- 	if ((int)*order < 0) /* < 32MB */
- 		*order = 0;
--	
--	aper_low = read_pci_config(num,slot,func, 0x10);
--	aper_hi = read_pci_config(num,slot,func,0x14);
-+
-+	aper_low = read_pci_config(num, slot, func, 0x10);
-+	aper_hi = read_pci_config(num, slot, func, 0x14);
- 	aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
- 
--	printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", 
--	       aper, 32 << *order, apsizereg);
-+	printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
-+			aper, 32 << *order, apsizereg);
- 
- 	if (!aperture_valid(aper, (32*1024*1024) << *order))
--	    return 0;
--	return (u32)aper; 
--} 
+-	return 0;
+-}
 -
--/* Look for an AGP bridge. Windows only expects the aperture in the
--   AGP bridge and some BIOS forget to initialize the Northbridge too.
--   Work around this here. 
+-static void set_singlestep(struct task_struct *child)
+-{
+-	struct pt_regs *regs = task_pt_regs(child);
 -
--   Do an PCI bus scan by hand because we're running before the PCI
--   subsystem. 
-+		return 0;
-+	return (u32)aper;
-+}
- 
--   All K8 AGP bridges are AGPv3 compliant, so we can do this scan
--   generically. It's probably overkill to always scan all slots because
--   the AGP bridges should be always an own bus on the HT hierarchy, 
--   but do it here for future safety. */
-+/*
-+ * Look for an AGP bridge. Windows only expects the aperture in the
-+ * AGP bridge and some BIOS forget to initialize the Northbridge too.
-+ * Work around this here.
-+ *
-+ * Do an PCI bus scan by hand because we're running before the PCI
-+ * subsystem.
-+ *
-+ * All K8 AGP bridges are AGPv3 compliant, so we can do this scan
-+ * generically. It's probably overkill to always scan all slots because
-+ * the AGP bridges should be always an own bus on the HT hierarchy,
-+ * but do it here for future safety.
-+ */
- static __u32 __init search_agp_bridge(u32 *order, int *valid_agp)
- {
- 	int num, slot, func;
- 
- 	/* Poor man's PCI discovery */
--	for (num = 0; num < 256; num++) { 
--		for (slot = 0; slot < 32; slot++) { 
--			for (func = 0; func < 8; func++) { 
-+	for (num = 0; num < 256; num++) {
-+		for (slot = 0; slot < 32; slot++) {
-+			for (func = 0; func < 8; func++) {
- 				u32 class, cap;
- 				u8 type;
--				class = read_pci_config(num,slot,func,
-+				class = read_pci_config(num, slot, func,
- 							PCI_CLASS_REVISION);
- 				if (class == 0xffffffff)
--					break; 
--				
--				switch (class >> 16) { 
-+					break;
-+
-+				switch (class >> 16) {
- 				case PCI_CLASS_BRIDGE_HOST:
- 				case PCI_CLASS_BRIDGE_OTHER: /* needed? */
- 					/* AGP bridge? */
--					cap = find_cap(num,slot,func,PCI_CAP_ID_AGP);
-+					cap = find_cap(num, slot, func,
-+							PCI_CAP_ID_AGP);
- 					if (!cap)
- 						break;
--					*valid_agp = 1; 
--					return read_agp(num,slot,func,cap,order);
--				} 
--				
-+					*valid_agp = 1;
-+					return read_agp(num, slot, func, cap,
-+							order);
-+				}
-+
- 				/* No multi-function device? */
--				type = read_pci_config_byte(num,slot,func,
-+				type = read_pci_config_byte(num, slot, func,
- 							       PCI_HEADER_TYPE);
- 				if (!(type & 0x80))
- 					break;
--			} 
--		} 
-+			}
-+		}
- 	}
--	printk("No AGP bridge found\n"); 
-+	printk(KERN_INFO "No AGP bridge found\n");
-+
- 	return 0;
- }
- 
-+static int gart_fix_e820 __initdata = 1;
-+
-+static int __init parse_gart_mem(char *p)
-+{
-+	if (!p)
-+		return -EINVAL;
-+
-+	if (!strncmp(p, "off", 3))
-+		gart_fix_e820 = 0;
-+	else if (!strncmp(p, "on", 2))
-+		gart_fix_e820 = 1;
-+
-+	return 0;
-+}
-+early_param("gart_fix_e820", parse_gart_mem);
-+
-+void __init early_gart_iommu_check(void)
-+{
-+	/*
-+	 * in case it is enabled before, esp for kexec/kdump,
-+	 * previous kernel already enable that. memset called
-+	 * by allocate_aperture/__alloc_bootmem_nopanic cause restart.
-+	 * or second kernel have different position for GART hole. and new
-+	 * kernel could use hole as RAM that is still used by GART set by
-+	 * first kernel
-+	 * or BIOS forget to put that in reserved.
-+	 * try to update e820 to make that region as reserved.
-+	 */
-+	int fix, num;
-+	u32 ctl;
-+	u32 aper_size = 0, aper_order = 0, last_aper_order = 0;
-+	u64 aper_base = 0, last_aper_base = 0;
-+	int aper_enabled = 0, last_aper_enabled = 0;
-+
-+	if (!early_pci_allowed())
-+		return;
-+
-+	fix = 0;
-+	for (num = 24; num < 32; num++) {
-+		if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
-+			continue;
-+
-+		ctl = read_pci_config(0, num, 3, 0x90);
-+		aper_enabled = ctl & 1;
-+		aper_order = (ctl >> 1) & 7;
-+		aper_size = (32 * 1024 * 1024) << aper_order;
-+		aper_base = read_pci_config(0, num, 3, 0x94) & 0x7fff;
-+		aper_base <<= 25;
-+
-+		if ((last_aper_order && aper_order != last_aper_order) ||
-+		    (last_aper_base && aper_base != last_aper_base) ||
-+		    (last_aper_enabled && aper_enabled != last_aper_enabled)) {
-+			fix = 1;
-+			break;
-+		}
-+		last_aper_order = aper_order;
-+		last_aper_base = aper_base;
-+		last_aper_enabled = aper_enabled;
-+	}
-+
-+	if (!fix && !aper_enabled)
-+		return;
-+
-+	if (!aper_base || !aper_size || aper_base + aper_size > 0x100000000UL)
-+		fix = 1;
-+
-+	if (gart_fix_e820 && !fix && aper_enabled) {
-+		if (e820_any_mapped(aper_base, aper_base + aper_size,
-+				    E820_RAM)) {
-+			/* reserved it, so we can resuse it in second kernel */
-+			printk(KERN_INFO "update e820 for GART\n");
-+			add_memory_region(aper_base, aper_size, E820_RESERVED);
-+			update_e820();
-+		}
-+		return;
-+	}
-+
-+	/* different nodes have different setting, disable them all at first*/
-+	for (num = 24; num < 32; num++) {
-+		if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
-+			continue;
-+
-+		ctl = read_pci_config(0, num, 3, 0x90);
-+		ctl &= ~1;
-+		write_pci_config(0, num, 3, 0x90, ctl);
-+	}
-+
-+}
-+
- void __init gart_iommu_hole_init(void)
--{ 
--	int fix, num; 
-+{
- 	u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0;
- 	u64 aper_base, last_aper_base = 0;
--	int valid_agp = 0;
-+	int fix, num, valid_agp = 0;
-+	int node;
- 
- 	if (gart_iommu_aperture_disabled || !fix_aperture ||
- 	    !early_pci_allowed())
-@@ -218,24 +321,26 @@ void __init gart_iommu_hole_init(void)
- 	printk(KERN_INFO  "Checking aperture...\n");
- 
- 	fix = 0;
--	for (num = 24; num < 32; num++) {		
-+	node = 0;
-+	for (num = 24; num < 32; num++) {
- 		if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
- 			continue;
- 
- 		iommu_detected = 1;
- 		gart_iommu_aperture = 1;
- 
--		aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7; 
--		aper_size = (32 * 1024 * 1024) << aper_order; 
-+		aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7;
-+		aper_size = (32 * 1024 * 1024) << aper_order;
- 		aper_base = read_pci_config(0, num, 3, 0x94) & 0x7fff;
--		aper_base <<= 25; 
-+		aper_base <<= 25;
-+
-+		printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n",
-+				node, aper_base, aper_size >> 20);
-+		node++;
- 
--		printk("CPU %d: aperture @ %Lx size %u MB\n", num-24, 
--		       aper_base, aper_size>>20);
--		
- 		if (!aperture_valid(aper_base, aper_size)) {
--			fix = 1; 
--			break; 
-+			fix = 1;
-+			break;
- 		}
- 
- 		if ((last_aper_order && aper_order != last_aper_order) ||
-@@ -245,55 +350,64 @@ void __init gart_iommu_hole_init(void)
- 		}
- 		last_aper_order = aper_order;
- 		last_aper_base = aper_base;
--	} 
-+	}
- 
- 	if (!fix && !fallback_aper_force) {
- 		if (last_aper_base) {
- 			unsigned long n = (32 * 1024 * 1024) << last_aper_order;
-+
- 			insert_aperture_resource((u32)last_aper_base, n);
- 		}
--		return; 
-+		return;
- 	}
- 
- 	if (!fallback_aper_force)
--		aper_alloc = search_agp_bridge(&aper_order, &valid_agp); 
--		
--	if (aper_alloc) { 
-+		aper_alloc = search_agp_bridge(&aper_order, &valid_agp);
-+
-+	if (aper_alloc) {
- 		/* Got the aperture from the AGP bridge */
- 	} else if (swiotlb && !valid_agp) {
- 		/* Do nothing */
- 	} else if ((!no_iommu && end_pfn > MAX_DMA32_PFN) ||
- 		   force_iommu ||
- 		   valid_agp ||
--		   fallback_aper_force) { 
--		printk("Your BIOS doesn't leave a aperture memory hole\n");
--		printk("Please enable the IOMMU option in the BIOS setup\n");
--		printk("This costs you %d MB of RAM\n",
--		       32 << fallback_aper_order);
-+		   fallback_aper_force) {
-+		printk(KERN_ERR
-+			"Your BIOS doesn't leave a aperture memory hole\n");
-+		printk(KERN_ERR
-+			"Please enable the IOMMU option in the BIOS setup\n");
-+		printk(KERN_ERR
-+			"This costs you %d MB of RAM\n",
-+				32 << fallback_aper_order);
- 
- 		aper_order = fallback_aper_order;
- 		aper_alloc = allocate_aperture();
--		if (!aper_alloc) { 
--			/* Could disable AGP and IOMMU here, but it's probably
--			   not worth it. But the later users cannot deal with
--			   bad apertures and turning on the aperture over memory
--			   causes very strange problems, so it's better to 
--			   panic early. */
-+		if (!aper_alloc) {
-+			/*
-+			 * Could disable AGP and IOMMU here, but it's
-+			 * probably not worth it. But the later users
-+			 * cannot deal with bad apertures and turning
-+			 * on the aperture over memory causes very
-+			 * strange problems, so it's better to panic
-+			 * early.
-+			 */
- 			panic("Not enough memory for aperture");
- 		}
--	} else { 
--		return; 
--	} 
-+	} else {
-+		return;
-+	}
- 
- 	/* Fix up the north bridges */
--	for (num = 24; num < 32; num++) { 		
-+	for (num = 24; num < 32; num++) {
- 		if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
--			continue;	
+-	/*
+-	 * Always set TIF_SINGLESTEP - this guarantees that
+-	 * we single-step system calls etc..  This will also
+-	 * cause us to set TF when returning to user mode.
+-	 */
+-	set_tsk_thread_flag(child, TIF_SINGLESTEP);
 -
--		/* Don't enable translation yet. That is done later. 
--		   Assume this BIOS didn't initialise the GART so 
--		   just overwrite all previous bits */ 
--		write_pci_config(0, num, 3, 0x90, aper_order<<1); 
--		write_pci_config(0, num, 3, 0x94, aper_alloc>>25); 
--	} 
--} 
-+			continue;
-+
-+		/*
-+		 * Don't enable translation yet. That is done later.
-+		 * Assume this BIOS didn't initialise the GART so
-+		 * just overwrite all previous bits
-+		 */
-+		write_pci_config(0, num, 3, 0x90, aper_order<<1);
-+		write_pci_config(0, num, 3, 0x94, aper_alloc>>25);
-+	}
-+}
-diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
-index edb5108..35a568e 100644
---- a/arch/x86/kernel/apic_32.c
-+++ b/arch/x86/kernel/apic_32.c
-@@ -43,12 +43,10 @@
- #include <mach_apicdef.h>
- #include <mach_ipi.h>
- 
--#include "io_ports.h"
+-	/*
+-	 * If TF was already set, don't do anything else
+-	 */
+-	if (regs->eflags & TRAP_FLAG)
+-		return;
 -
- /*
-  * Sanity check
-  */
--#if (SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F
-+#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
- # error SPURIOUS_APIC_VECTOR definition error
- #endif
- 
-@@ -57,7 +55,7 @@
-  *
-  * -1=force-disable, +1=force-enable
-  */
--static int enable_local_apic __initdata = 0;
-+static int enable_local_apic __initdata;
- 
- /* Local APIC timer verification ok */
- static int local_apic_timer_verify_ok;
-@@ -101,6 +99,8 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
- /* Local APIC was disabled by the BIOS and enabled by the kernel */
- static int enabled_via_apicbase;
- 
-+static unsigned long apic_phys;
-+
- /*
-  * Get the LAPIC version
-  */
-@@ -110,7 +110,7 @@ static inline int lapic_get_version(void)
- }
- 
- /*
-- * Check, if the APIC is integrated or a seperate chip
-+ * Check, if the APIC is integrated or a separate chip
-  */
- static inline int lapic_is_integrated(void)
- {
-@@ -135,9 +135,9 @@ void apic_wait_icr_idle(void)
- 		cpu_relax();
- }
- 
--unsigned long safe_apic_wait_icr_idle(void)
-+u32 safe_apic_wait_icr_idle(void)
- {
--	unsigned long send_status;
-+	u32 send_status;
- 	int timeout;
- 
- 	timeout = 0;
-@@ -154,7 +154,7 @@ unsigned long safe_apic_wait_icr_idle(void)
- /**
-  * enable_NMI_through_LVT0 - enable NMI through local vector table 0
-  */
--void enable_NMI_through_LVT0 (void * dummy)
-+void __cpuinit enable_NMI_through_LVT0(void)
- {
- 	unsigned int v = APIC_DM_NMI;
- 
-@@ -379,8 +379,10 @@ void __init setup_boot_APIC_clock(void)
- 	 */
- 	if (local_apic_timer_disabled) {
- 		/* No broadcast on UP ! */
--		if (num_possible_cpus() > 1)
-+		if (num_possible_cpus() > 1) {
-+			lapic_clockevent.mult = 1;
- 			setup_APIC_timer();
-+		}
- 		return;
- 	}
- 
-@@ -434,7 +436,7 @@ void __init setup_boot_APIC_clock(void)
- 			       "with PM Timer: %ldms instead of 100ms\n",
- 			       (long)res);
- 			/* Correct the lapic counter value */
--			res = (((u64) delta ) * pm_100ms);
-+			res = (((u64) delta) * pm_100ms);
- 			do_div(res, deltapm);
- 			printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
- 			       "%lu (%ld)\n", (unsigned long) res, delta);
-@@ -472,6 +474,19 @@ void __init setup_boot_APIC_clock(void)
- 
- 	local_apic_timer_verify_ok = 1;
- 
-+	/*
-+	 * Do a sanity check on the APIC calibration result
-+	 */
-+	if (calibration_result < (1000000 / HZ)) {
-+		local_irq_enable();
-+		printk(KERN_WARNING
-+		       "APIC frequency too slow, disabling apic timer\n");
-+		/* No broadcast on UP ! */
-+		if (num_possible_cpus() > 1)
-+			setup_APIC_timer();
-+		return;
-+	}
-+
- 	/* We trust the pm timer based calibration */
- 	if (!pm_referenced) {
- 		apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
-@@ -563,6 +578,9 @@ static void local_apic_timer_interrupt(void)
- 		return;
- 	}
- 
-+	/*
-+	 * the NMI deadlock-detector uses this.
-+	 */
- 	per_cpu(irq_stat, cpu).apic_timer_irqs++;
- 
- 	evt->event_handler(evt);
-@@ -576,8 +594,7 @@ static void local_apic_timer_interrupt(void)
-  * [ if a single-CPU system runs an SMP kernel then we call the local
-  *   interrupt as well. Thus we cannot inline the local irq ... ]
-  */
+-	/* Set TF on the kernel stack.. */
+-	regs->eflags |= TRAP_FLAG;
 -
--void fastcall smp_apic_timer_interrupt(struct pt_regs *regs)
-+void smp_apic_timer_interrupt(struct pt_regs *regs)
- {
- 	struct pt_regs *old_regs = set_irq_regs(regs);
- 
-@@ -616,9 +633,14 @@ int setup_profiling_timer(unsigned int multiplier)
-  */
- void clear_local_APIC(void)
- {
--	int maxlvt = lapic_get_maxlvt();
--	unsigned long v;
-+	int maxlvt;
-+	u32 v;
-+
-+	/* APIC hasn't been mapped yet */
-+	if (!apic_phys)
-+		return;
- 
-+	maxlvt = lapic_get_maxlvt();
- 	/*
- 	 * Masking an LVT entry can trigger a local APIC error
- 	 * if the vector is zero. Mask LVTERR first to prevent this.
-@@ -976,7 +998,8 @@ void __cpuinit setup_local_APIC(void)
- 		value |= APIC_LVT_LEVEL_TRIGGER;
- 	apic_write_around(APIC_LVT1, value);
- 
--	if (integrated && !esr_disable) {		/* !82489DX */
-+	if (integrated && !esr_disable) {
-+		/* !82489DX */
- 		maxlvt = lapic_get_maxlvt();
- 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
- 			apic_write(APIC_ESR, 0);
-@@ -1020,7 +1043,7 @@ void __cpuinit setup_local_APIC(void)
- /*
-  * Detect and initialize APIC
-  */
--static int __init detect_init_APIC (void)
-+static int __init detect_init_APIC(void)
- {
- 	u32 h, l, features;
- 
-@@ -1077,7 +1100,7 @@ static int __init detect_init_APIC (void)
- 		printk(KERN_WARNING "Could not enable APIC!\n");
- 		return -1;
- 	}
--	set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
-+	set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
- 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
- 
- 	/* The BIOS may have set up the APIC at some other address */
-@@ -1104,8 +1127,6 @@ no_apic:
-  */
- void __init init_apic_mappings(void)
- {
--	unsigned long apic_phys;
+-	/*
+-	 * ..but if TF is changed by the instruction we will trace,
+-	 * don't mark it as being "us" that set it, so that we
+-	 * won't clear it by hand later.
+-	 */
+-	if (is_setting_trap_flag(child, regs))
+-		return;
 -
- 	/*
- 	 * If no local APIC can be found then set up a fake all
- 	 * zeroes page to simulate the local APIC and another
-@@ -1164,10 +1185,10 @@ fake_ioapic_page:
-  * This initializes the IO-APIC and APIC hardware if this is
-  * a UP kernel.
-  */
--int __init APIC_init_uniprocessor (void)
-+int __init APIC_init_uniprocessor(void)
- {
- 	if (enable_local_apic < 0)
--		clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
-+		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
- 
- 	if (!smp_found_config && !cpu_has_apic)
- 		return -1;
-@@ -1179,7 +1200,7 @@ int __init APIC_init_uniprocessor (void)
- 	    APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
- 		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
- 		       boot_cpu_physical_apicid);
--		clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
-+		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
- 		return -1;
- 	}
- 
-@@ -1210,50 +1231,6 @@ int __init APIC_init_uniprocessor (void)
- }
- 
- /*
-- * APIC command line parameters
-- */
--static int __init parse_lapic(char *arg)
+-	child->ptrace |= PT_DTRACE;
+-}
+-
+-static void clear_singlestep(struct task_struct *child)
 -{
--	enable_local_apic = 1;
--	return 0;
+-	/* Always clear TIF_SINGLESTEP... */
+-	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+-
+-	/* But touch TF only if it was set by us.. */
+-	if (child->ptrace & PT_DTRACE) {
+-		struct pt_regs *regs = task_pt_regs(child);
+-		regs->eflags &= ~TRAP_FLAG;
+-		child->ptrace &= ~PT_DTRACE;
+-	}
 -}
--early_param("lapic", parse_lapic);
 -
--static int __init parse_nolapic(char *arg)
+-/*
+- * Called by kernel/ptrace.c when detaching..
+- *
+- * Make sure the single step bit is not set.
+- */
+-void ptrace_disable(struct task_struct *child)
+-{ 
+-	clear_singlestep(child);
+-}
+-
+-static int putreg(struct task_struct *child,
+-	unsigned long regno, unsigned long value)
 -{
--	enable_local_apic = -1;
--	clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
+-	unsigned long tmp; 
+-	
+-	switch (regno) {
+-		case offsetof(struct user_regs_struct,fs):
+-			if (value && (value & 3) != 3)
+-				return -EIO;
+-			child->thread.fsindex = value & 0xffff; 
+-			return 0;
+-		case offsetof(struct user_regs_struct,gs):
+-			if (value && (value & 3) != 3)
+-				return -EIO;
+-			child->thread.gsindex = value & 0xffff;
+-			return 0;
+-		case offsetof(struct user_regs_struct,ds):
+-			if (value && (value & 3) != 3)
+-				return -EIO;
+-			child->thread.ds = value & 0xffff;
+-			return 0;
+-		case offsetof(struct user_regs_struct,es): 
+-			if (value && (value & 3) != 3)
+-				return -EIO;
+-			child->thread.es = value & 0xffff;
+-			return 0;
+-		case offsetof(struct user_regs_struct,ss):
+-			if ((value & 3) != 3)
+-				return -EIO;
+-			value &= 0xffff;
+-			return 0;
+-		case offsetof(struct user_regs_struct,fs_base):
+-			if (value >= TASK_SIZE_OF(child))
+-				return -EIO;
+-			child->thread.fs = value;
+-			return 0;
+-		case offsetof(struct user_regs_struct,gs_base):
+-			if (value >= TASK_SIZE_OF(child))
+-				return -EIO;
+-			child->thread.gs = value;
+-			return 0;
+-		case offsetof(struct user_regs_struct, eflags):
+-			value &= FLAG_MASK;
+-			tmp = get_stack_long(child, EFL_OFFSET); 
+-			tmp &= ~FLAG_MASK; 
+-			value |= tmp;
+-			break;
+-		case offsetof(struct user_regs_struct,cs): 
+-			if ((value & 3) != 3)
+-				return -EIO;
+-			value &= 0xffff;
+-			break;
+-	}
+-	put_stack_long(child, regno - sizeof(struct pt_regs), value);
 -	return 0;
 -}
--early_param("nolapic", parse_nolapic);
 -
--static int __init parse_disable_lapic_timer(char *arg)
+-static unsigned long getreg(struct task_struct *child, unsigned long regno)
 -{
--	local_apic_timer_disabled = 1;
--	return 0;
+-	unsigned long val;
+-	switch (regno) {
+-		case offsetof(struct user_regs_struct, fs):
+-			return child->thread.fsindex;
+-		case offsetof(struct user_regs_struct, gs):
+-			return child->thread.gsindex;
+-		case offsetof(struct user_regs_struct, ds):
+-			return child->thread.ds;
+-		case offsetof(struct user_regs_struct, es):
+-			return child->thread.es; 
+-		case offsetof(struct user_regs_struct, fs_base):
+-			return child->thread.fs;
+-		case offsetof(struct user_regs_struct, gs_base):
+-			return child->thread.gs;
+-		default:
+-			regno = regno - sizeof(struct pt_regs);
+-			val = get_stack_long(child, regno);
+-			if (test_tsk_thread_flag(child, TIF_IA32))
+-				val &= 0xffffffff;
+-			return val;
+-	}
+-
 -}
--early_param("nolapic_timer", parse_disable_lapic_timer);
 -
--static int __init parse_lapic_timer_c2_ok(char *arg)
+-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 -{
--	local_apic_timer_c2_ok = 1;
--	return 0;
+-	long i, ret;
+-	unsigned ui;
+-
+-	switch (request) {
+-	/* when I and D space are separate, these will need to be fixed. */
+-	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
+-	case PTRACE_PEEKDATA:
+-		ret = generic_ptrace_peekdata(child, addr, data);
+-		break;
+-
+-	/* read the word at location addr in the USER area. */
+-	case PTRACE_PEEKUSR: {
+-		unsigned long tmp;
+-
+-		ret = -EIO;
+-		if ((addr & 7) ||
+-		    addr > sizeof(struct user) - 7)
+-			break;
+-
+-		switch (addr) { 
+-		case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
+-			tmp = getreg(child, addr);
+-			break;
+-		case offsetof(struct user, u_debugreg[0]):
+-			tmp = child->thread.debugreg0;
+-			break;
+-		case offsetof(struct user, u_debugreg[1]):
+-			tmp = child->thread.debugreg1;
+-			break;
+-		case offsetof(struct user, u_debugreg[2]):
+-			tmp = child->thread.debugreg2;
+-			break;
+-		case offsetof(struct user, u_debugreg[3]):
+-			tmp = child->thread.debugreg3;
+-			break;
+-		case offsetof(struct user, u_debugreg[6]):
+-			tmp = child->thread.debugreg6;
+-			break;
+-		case offsetof(struct user, u_debugreg[7]):
+-			tmp = child->thread.debugreg7;
+-			break;
+-		default:
+-			tmp = 0;
+-			break;
+-		}
+-		ret = put_user(tmp,(unsigned long __user *) data);
+-		break;
+-	}
+-
+-	/* when I and D space are separate, this will have to be fixed. */
+-	case PTRACE_POKETEXT: /* write the word at location addr. */
+-	case PTRACE_POKEDATA:
+-		ret = generic_ptrace_pokedata(child, addr, data);
+-		break;
+-
+-	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+-	{
+-		int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
+-		ret = -EIO;
+-		if ((addr & 7) ||
+-		    addr > sizeof(struct user) - 7)
+-			break;
+-
+-		switch (addr) { 
+-		case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
+-			ret = putreg(child, addr, data);
+-			break;
+-		/* Disallows to set a breakpoint into the vsyscall */
+-		case offsetof(struct user, u_debugreg[0]):
+-			if (data >= TASK_SIZE_OF(child) - dsize) break;
+-			child->thread.debugreg0 = data;
+-			ret = 0;
+-			break;
+-		case offsetof(struct user, u_debugreg[1]):
+-			if (data >= TASK_SIZE_OF(child) - dsize) break;
+-			child->thread.debugreg1 = data;
+-			ret = 0;
+-			break;
+-		case offsetof(struct user, u_debugreg[2]):
+-			if (data >= TASK_SIZE_OF(child) - dsize) break;
+-			child->thread.debugreg2 = data;
+-			ret = 0;
+-			break;
+-		case offsetof(struct user, u_debugreg[3]):
+-			if (data >= TASK_SIZE_OF(child) - dsize) break;
+-			child->thread.debugreg3 = data;
+-			ret = 0;
+-			break;
+-		case offsetof(struct user, u_debugreg[6]):
+-				  if (data >> 32)
+-				break; 
+-			child->thread.debugreg6 = data;
+-			ret = 0;
+-			break;
+-		case offsetof(struct user, u_debugreg[7]):
+-			/* See arch/i386/kernel/ptrace.c for an explanation of
+-			 * this awkward check.*/
+-			data &= ~DR_CONTROL_RESERVED;
+-			for(i=0; i<4; i++)
+-				if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
+-					break;
+-			if (i == 4) {
+-			  child->thread.debugreg7 = data;
+-			  if (data)
+-			  	set_tsk_thread_flag(child, TIF_DEBUG);
+-			  else
+-			  	clear_tsk_thread_flag(child, TIF_DEBUG);
+-			  ret = 0;
+-		  	}
+-		  break;
+-		}
+-		break;
+-	}
+-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+-	case PTRACE_CONT:    /* restart after signal. */
+-
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		if (request == PTRACE_SYSCALL)
+-			set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
+-		else
+-			clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
+-		clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+-		child->exit_code = data;
+-		/* make sure the single step bit is not set. */
+-		clear_singlestep(child);
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
+-
+-#ifdef CONFIG_IA32_EMULATION
+-		/* This makes only sense with 32bit programs. Allow a
+-		   64bit debugger to fully examine them too. Better
+-		   don't use it against 64bit processes, use
+-		   PTRACE_ARCH_PRCTL instead. */
+-	case PTRACE_SET_THREAD_AREA: {
+-		struct user_desc __user *p;
+-		int old; 
+-		p = (struct user_desc __user *)data;
+-		get_user(old,  &p->entry_number); 
+-		put_user(addr, &p->entry_number);
+-		ret = do_set_thread_area(&child->thread, p);
+-		put_user(old,  &p->entry_number); 
+-		break;
+-	case PTRACE_GET_THREAD_AREA:
+-		p = (struct user_desc __user *)data;
+-		get_user(old,  &p->entry_number); 
+-		put_user(addr, &p->entry_number);
+-		ret = do_get_thread_area(&child->thread, p);
+-		put_user(old,  &p->entry_number); 
+-		break;
+-	} 
+-#endif
+-		/* normal 64bit interface to access TLS data. 
+-		   Works just like arch_prctl, except that the arguments
+-		   are reversed. */
+-	case PTRACE_ARCH_PRCTL: 
+-		ret = do_arch_prctl(child, data, addr);
+-		break;
+-
+-/*
+- * make the child exit.  Best I can do is send it a sigkill. 
+- * perhaps it should be put in the status that it wants to 
+- * exit.
+- */
+-	case PTRACE_KILL:
+-		ret = 0;
+-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
+-			break;
+-		clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+-		child->exit_code = SIGKILL;
+-		/* make sure the single step bit is not set. */
+-		clear_singlestep(child);
+-		wake_up_process(child);
+-		break;
+-
+-	case PTRACE_SINGLESTEP:    /* set the trap flag. */
+-		ret = -EIO;
+-		if (!valid_signal(data))
+-			break;
+-		clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
+-		set_singlestep(child);
+-		child->exit_code = data;
+-		/* give it a chance to run. */
+-		wake_up_process(child);
+-		ret = 0;
+-		break;
+-
+-	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+-	  	if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
+-			       sizeof(struct user_regs_struct))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		ret = 0;
+-		for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
+-			ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
+-			data += sizeof(long);
+-		}
+-		break;
+-	}
+-
+-	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+-		unsigned long tmp;
+-	  	if (!access_ok(VERIFY_READ, (unsigned __user *)data,
+-			       sizeof(struct user_regs_struct))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		ret = 0;
+-		for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
+-			ret = __get_user(tmp, (unsigned long __user *) data);
+-			if (ret)
+-				break;
+-			ret = putreg(child, ui, tmp);
+-			if (ret)
+-				break;
+-			data += sizeof(long);
+-		}
+-		break;
+-	}
+-
+-	case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
+-		if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
+-			       sizeof(struct user_i387_struct))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		ret = get_fpregs((struct user_i387_struct __user *)data, child);
+-		break;
+-	}
+-
+-	case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
+-		if (!access_ok(VERIFY_READ, (unsigned __user *)data,
+-			       sizeof(struct user_i387_struct))) {
+-			ret = -EIO;
+-			break;
+-		}
+-		set_stopped_child_used_math(child);
+-		ret = set_fpregs(child, (struct user_i387_struct __user *)data);
+-		break;
+-	}
+-
+-	default:
+-		ret = ptrace_request(child, request, addr, data);
+-		break;
+-	}
+-	return ret;
 -}
--early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
 -
--static int __init apic_set_verbosity(char *str)
+-static void syscall_trace(struct pt_regs *regs)
 -{
--	if (strcmp("debug", str) == 0)
--		apic_verbosity = APIC_DEBUG;
--	else if (strcmp("verbose", str) == 0)
--		apic_verbosity = APIC_VERBOSE;
--	return 1;
+-
+-#if 0
+-	printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
+-	       current->comm,
+-	       regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
+-	       current_thread_info()->flags, current->ptrace); 
+-#endif
+-
+-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+-				? 0x80 : 0));
+-	/*
+-	 * this isn't the same as continuing with a signal, but it will do
+-	 * for normal use.  strace only continues with a signal if the
+-	 * stopping signal is not SIGTRAP.  -brl
+-	 */
+-	if (current->exit_code) {
+-		send_sig(current->exit_code, current, 1);
+-		current->exit_code = 0;
+-	}
 -}
 -
--__setup("apic=", apic_set_verbosity);
+-asmlinkage void syscall_trace_enter(struct pt_regs *regs)
+-{
+-	/* do the secure computing check first */
+-	secure_computing(regs->orig_rax);
 -
+-	if (test_thread_flag(TIF_SYSCALL_TRACE)
+-	    && (current->ptrace & PT_PTRACED))
+-		syscall_trace(regs);
 -
--/*
-  * Local APIC interrupts
-  */
- 
-@@ -1306,7 +1283,7 @@ void smp_error_interrupt(struct pt_regs *regs)
- 	   6: Received illegal vector
- 	   7: Illegal register address
- 	*/
--	printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
-+	printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
- 		smp_processor_id(), v , v1);
- 	irq_exit();
- }
-@@ -1393,7 +1370,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
- 			value = apic_read(APIC_LVT0);
- 			value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
- 				APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
--				APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
-+				APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
- 			value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
- 			value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
- 			apic_write_around(APIC_LVT0, value);
-@@ -1530,7 +1507,7 @@ static int lapic_resume(struct sys_device *dev)
-  */
+-	if (unlikely(current->audit_context)) {
+-		if (test_thread_flag(TIF_IA32)) {
+-			audit_syscall_entry(AUDIT_ARCH_I386,
+-					    regs->orig_rax,
+-					    regs->rbx, regs->rcx,
+-					    regs->rdx, regs->rsi);
+-		} else {
+-			audit_syscall_entry(AUDIT_ARCH_X86_64,
+-					    regs->orig_rax,
+-					    regs->rdi, regs->rsi,
+-					    regs->rdx, regs->r10);
+-		}
+-	}
+-}
+-
+-asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+-{
+-	if (unlikely(current->audit_context))
+-		audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
+-
+-	if ((test_thread_flag(TIF_SYSCALL_TRACE)
+-	     || test_thread_flag(TIF_SINGLESTEP))
+-	    && (current->ptrace & PT_PTRACED))
+-		syscall_trace(regs);
+-}
+diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
+index fab30e1..150ba29 100644
+--- a/arch/x86/kernel/quirks.c
++++ b/arch/x86/kernel/quirks.c
+@@ -162,6 +162,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
+ 			 ich_force_enable_hpet);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
+ 			 ich_force_enable_hpet);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
++			 ich_force_enable_hpet);
  
- static struct sysdev_class lapic_sysclass = {
--	set_kset_name("lapic"),
-+	.name		= "lapic",
- 	.resume		= lapic_resume,
- 	.suspend	= lapic_suspend,
- };
-@@ -1565,3 +1542,46 @@ device_initcall(init_lapic_sysfs);
- static void apic_pm_activate(void) { }
  
- #endif	/* CONFIG_PM */
+ static struct pci_dev *cached_dev;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+new file mode 100644
+index 0000000..5818dc2
+--- /dev/null
++++ b/arch/x86/kernel/reboot.c
+@@ -0,0 +1,451 @@
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/pm.h>
++#include <linux/efi.h>
++#include <acpi/reboot.h>
++#include <asm/io.h>
++#include <asm/apic.h>
++#include <asm/desc.h>
++#include <asm/hpet.h>
++#include <asm/reboot_fixups.h>
++#include <asm/reboot.h>
++
++#ifdef CONFIG_X86_32
++# include <linux/dmi.h>
++# include <linux/ctype.h>
++# include <linux/mc146818rtc.h>
++# include <asm/pgtable.h>
++#else
++# include <asm/iommu.h>
++#endif
 +
 +/*
-+ * APIC command line parameters
++ * Power off function, if any
 + */
-+static int __init parse_lapic(char *arg)
-+{
-+	enable_local_apic = 1;
-+	return 0;
-+}
-+early_param("lapic", parse_lapic);
++void (*pm_power_off)(void);
++EXPORT_SYMBOL(pm_power_off);
 +
-+static int __init parse_nolapic(char *arg)
-+{
-+	enable_local_apic = -1;
-+	clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
-+	return 0;
-+}
-+early_param("nolapic", parse_nolapic);
++static long no_idt[3];
++static int reboot_mode;
++enum reboot_type reboot_type = BOOT_KBD;
++int reboot_force;
 +
-+static int __init parse_disable_lapic_timer(char *arg)
-+{
-+	local_apic_timer_disabled = 1;
-+	return 0;
-+}
-+early_param("nolapic_timer", parse_disable_lapic_timer);
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
++static int reboot_cpu = -1;
++#endif
 +
-+static int __init parse_lapic_timer_c2_ok(char *arg)
++/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old]
++   warm   Don't set the cold reboot flag
++   cold   Set the cold reboot flag
++   bios   Reboot by jumping through the BIOS (only for X86_32)
++   smp    Reboot by executing reset on BSP or other CPU (only for X86_32)
++   triple Force a triple fault (init)
++   kbd    Use the keyboard controller. cold reset (default)
++   acpi   Use the RESET_REG in the FADT
++   efi    Use efi reset_system runtime service
++   force  Avoid anything that could hang.
++ */
++static int __init reboot_setup(char *str)
 +{
-+	local_apic_timer_c2_ok = 1;
-+	return 0;
-+}
-+early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
++	for (;;) {
++		switch (*str) {
++		case 'w':
++			reboot_mode = 0x1234;
++			break;
 +
-+static int __init apic_set_verbosity(char *str)
-+{
-+	if (strcmp("debug", str) == 0)
-+		apic_verbosity = APIC_DEBUG;
-+	else if (strcmp("verbose", str) == 0)
-+		apic_verbosity = APIC_VERBOSE;
++		case 'c':
++			reboot_mode = 0;
++			break;
++
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_SMP
++		case 's':
++			if (isdigit(*(str+1))) {
++				reboot_cpu = (int) (*(str+1) - '0');
++				if (isdigit(*(str+2)))
++					reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
++			}
++				/* we will leave sorting out the final value
++				   when we are ready to reboot, since we might not
++				   have set up boot_cpu_id or smp_num_cpu */
++			break;
++#endif /* CONFIG_SMP */
++
++		case 'b':
++#endif
++		case 'a':
++		case 'k':
++		case 't':
++		case 'e':
++			reboot_type = *str;
++			break;
++
++		case 'f':
++			reboot_force = 1;
++			break;
++		}
++
++		str = strchr(str, ',');
++		if (str)
++			str++;
++		else
++			break;
++	}
 +	return 1;
 +}
-+__setup("apic=", apic_set_verbosity);
 +
-diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
-index f28ccb5..d8d03e0 100644
---- a/arch/x86/kernel/apic_64.c
-+++ b/arch/x86/kernel/apic_64.c
-@@ -23,32 +23,37 @@
- #include <linux/mc146818rtc.h>
- #include <linux/kernel_stat.h>
- #include <linux/sysdev.h>
--#include <linux/module.h>
- #include <linux/ioport.h>
- #include <linux/clockchips.h>
-+#include <linux/acpi_pmtmr.h>
-+#include <linux/module.h>
- 
- #include <asm/atomic.h>
- #include <asm/smp.h>
- #include <asm/mtrr.h>
- #include <asm/mpspec.h>
-+#include <asm/hpet.h>
- #include <asm/pgalloc.h>
- #include <asm/mach_apic.h>
- #include <asm/nmi.h>
- #include <asm/idle.h>
- #include <asm/proto.h>
- #include <asm/timex.h>
--#include <asm/hpet.h>
- #include <asm/apic.h>
- 
--int apic_verbosity;
- int disable_apic_timer __cpuinitdata;
- static int apic_calibrate_pmtmr __initdata;
-+int disable_apic;
- 
--/* Local APIC timer works in C2? */
-+/* Local APIC timer works in C2 */
- int local_apic_timer_c2_ok;
- EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
- 
--static struct resource *ioapic_resources;
-+/*
-+ * Debug level, exported for io_apic.c
-+ */
-+int apic_verbosity;
++__setup("reboot=", reboot_setup);
 +
- static struct resource lapic_resource = {
- 	.name = "Local APIC",
- 	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
-@@ -60,10 +65,8 @@ static int lapic_next_event(unsigned long delta,
- 			    struct clock_event_device *evt);
- static void lapic_timer_setup(enum clock_event_mode mode,
- 			      struct clock_event_device *evt);
--
- static void lapic_timer_broadcast(cpumask_t mask);
--
--static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen);
-+static void apic_pm_activate(void);
- 
- static struct clock_event_device lapic_clockevent = {
- 	.name		= "lapic",
-@@ -78,6 +81,150 @@ static struct clock_event_device lapic_clockevent = {
- };
- static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
- 
-+static unsigned long apic_phys;
 +
++#ifdef CONFIG_X86_32
 +/*
-+ * Get the LAPIC version
++ * Reboot options and system auto-detection code provided by
++ * Dell Inc. so their systems "just work". :-)
 + */
-+static inline int lapic_get_version(void)
-+{
-+	return GET_APIC_VERSION(apic_read(APIC_LVR));
-+}
 +
 +/*
-+ * Check, if the APIC is integrated or a seperate chip
++ * Some machines require the "reboot=b"  commandline option,
++ * this quirk makes that automatic.
 + */
-+static inline int lapic_is_integrated(void)
++static int __init set_bios_reboot(const struct dmi_system_id *d)
 +{
-+	return 1;
++	if (reboot_type != BOOT_BIOS) {
++		reboot_type = BOOT_BIOS;
++		printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
++	}
++	return 0;
 +}
 +
-+/*
-+ * Check, whether this is a modern or a first generation APIC
-+ */
-+static int modern_apic(void)
-+{
-+	/* AMD systems use old APIC versions, so check the CPU */
-+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+	    boot_cpu_data.x86 >= 0xf)
-+		return 1;
-+	return lapic_get_version() >= 0x14;
-+}
++static struct dmi_system_id __initdata reboot_dmi_table[] = {
++	{	/* Handle problems with rebooting on Dell E520's */
++		.callback = set_bios_reboot,
++		.ident = "Dell E520",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
++		},
++	},
++	{	/* Handle problems with rebooting on Dell 1300's */
++		.callback = set_bios_reboot,
++		.ident = "Dell PowerEdge 1300",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
++		},
++	},
++	{	/* Handle problems with rebooting on Dell 300's */
++		.callback = set_bios_reboot,
++		.ident = "Dell PowerEdge 300",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
++		},
++	},
++	{       /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
++		.callback = set_bios_reboot,
++		.ident = "Dell OptiPlex 745",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
++			DMI_MATCH(DMI_BOARD_NAME, "0WF810"),
++		},
++	},
++	{	/* Handle problems with rebooting on Dell 2400's */
++		.callback = set_bios_reboot,
++		.ident = "Dell PowerEdge 2400",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
++		},
++	},
++	{	/* Handle problems with rebooting on HP laptops */
++		.callback = set_bios_reboot,
++		.ident = "HP Compaq Laptop",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
++		},
++	},
++	{ }
++};
 +
-+void apic_wait_icr_idle(void)
++static int __init reboot_init(void)
 +{
-+	while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
-+		cpu_relax();
++	dmi_check_system(reboot_dmi_table);
++	return 0;
 +}
++core_initcall(reboot_init);
 +
-+u32 safe_apic_wait_icr_idle(void)
++/* The following code and data reboots the machine by switching to real
++   mode and jumping to the BIOS reset entry point, as if the CPU has
++   really been reset.  The previous version asked the keyboard
++   controller to pulse the CPU reset line, which is more thorough, but
++   doesn't work with at least one type of 486 motherboard.  It is easy
++   to stop this code working; hence the copious comments. */
++static unsigned long long
++real_mode_gdt_entries [3] =
 +{
-+	u32 send_status;
-+	int timeout;
++	0x0000000000000000ULL,	/* Null descriptor */
++	0x00009a000000ffffULL,	/* 16-bit real-mode 64k code at 0x00000000 */
++	0x000092000100ffffULL	/* 16-bit real-mode 64k data at 0x00000100 */
++};
 +
-+	timeout = 0;
-+	do {
-+		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-+		if (!send_status)
-+			break;
-+		udelay(100);
-+	} while (timeout++ < 1000);
++static struct desc_ptr
++real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
++real_mode_idt = { 0x3ff, 0 };
 +
-+	return send_status;
-+}
++/* This is 16-bit protected mode code to disable paging and the cache,
++   switch to real mode and jump to the BIOS reset code.
 +
-+/**
-+ * enable_NMI_through_LVT0 - enable NMI through local vector table 0
-+ */
-+void __cpuinit enable_NMI_through_LVT0(void)
-+{
-+	unsigned int v;
++   The instruction that switches to real mode by writing to CR0 must be
++   followed immediately by a far jump instruction, which set CS to a
++   valid value for real mode, and flushes the prefetch queue to avoid
++   running instructions that have already been decoded in protected
++   mode.
 +
-+	/* unmask and set to NMI */
-+	v = APIC_DM_NMI;
-+	apic_write(APIC_LVT0, v);
-+}
++   Clears all the flags except ET, especially PG (paging), PE
++   (protected-mode enable) and TS (task switch for coprocessor state
++   save).  Flushes the TLB after paging has been disabled.  Sets CD and
++   NW, to disable the cache on a 486, and invalidates the cache.  This
++   is more like the state of a 486 after reset.  I don't know if
++   something else should be done for other chips.
 +
-+/**
-+ * lapic_get_maxlvt - get the maximum number of local vector table entries
-+ */
-+int lapic_get_maxlvt(void)
++   More could be done here to set up the registers as if a CPU reset had
++   occurred; hopefully real BIOSs don't assume much. */
++static unsigned char real_mode_switch [] =
 +{
-+	unsigned int v, maxlvt;
-+
-+	v = apic_read(APIC_LVR);
-+	maxlvt = GET_APIC_MAXLVT(v);
-+	return maxlvt;
-+}
++	0x66, 0x0f, 0x20, 0xc0,			/*    movl  %cr0,%eax        */
++	0x66, 0x83, 0xe0, 0x11,			/*    andl  $0x00000011,%eax */
++	0x66, 0x0d, 0x00, 0x00, 0x00, 0x60,	/*    orl   $0x60000000,%eax */
++	0x66, 0x0f, 0x22, 0xc0,			/*    movl  %eax,%cr0        */
++	0x66, 0x0f, 0x22, 0xd8,			/*    movl  %eax,%cr3        */
++	0x66, 0x0f, 0x20, 0xc3,			/*    movl  %cr0,%ebx        */
++	0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60,	/*    andl  $0x60000000,%ebx */
++	0x74, 0x02,				/*    jz    f                */
++	0x0f, 0x09,				/*    wbinvd                 */
++	0x24, 0x10,				/* f: andb  $0x10,al         */
++	0x66, 0x0f, 0x22, 0xc0			/*    movl  %eax,%cr0        */
++};
++static unsigned char jump_to_bios [] =
++{
++	0xea, 0x00, 0x00, 0xff, 0xff		/*    ljmp  $0xffff,$0x0000  */
++};
 +
 +/*
-+ * This function sets up the local APIC timer, with a timeout of
-+ * 'clocks' APIC bus clock. During calibration we actually call
-+ * this function twice on the boot CPU, once with a bogus timeout
-+ * value, second time for real. The other (noncalibrating) CPUs
-+ * call this function only once, with the real, calibrated value.
-+ *
-+ * We do reads before writes even if unnecessary, to get around the
-+ * P5 APIC double write bug.
++ * Switch to real mode and then execute the code
++ * specified by the code and length parameters.
++ * We assume that length will aways be less that 100!
 + */
-+
-+static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
++void machine_real_restart(unsigned char *code, int length)
 +{
-+	unsigned int lvtt_value, tmp_value;
++	local_irq_disable();
 +
-+	lvtt_value = LOCAL_TIMER_VECTOR;
-+	if (!oneshot)
-+		lvtt_value |= APIC_LVT_TIMER_PERIODIC;
-+	if (!irqen)
-+		lvtt_value |= APIC_LVT_MASKED;
++	/* Write zero to CMOS register number 0x0f, which the BIOS POST
++	   routine will recognize as telling it to do a proper reboot.  (Well
++	   that's what this book in front of me says -- it may only apply to
++	   the Phoenix BIOS though, it's not clear).  At the same time,
++	   disable NMIs by setting the top bit in the CMOS address register,
++	   as we're about to do peculiar things to the CPU.  I'm not sure if
++	   `outb_p' is needed instead of just `outb'.  Use it to be on the
++	   safe side.  (Yes, CMOS_WRITE does outb_p's. -  Paul G.)
++	 */
++	spin_lock(&rtc_lock);
++	CMOS_WRITE(0x00, 0x8f);
++	spin_unlock(&rtc_lock);
 +
-+	apic_write(APIC_LVTT, lvtt_value);
++	/* Remap the kernel at virtual address zero, as well as offset zero
++	   from the kernel segment.  This assumes the kernel segment starts at
++	   virtual address PAGE_OFFSET. */
++	memcpy(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
++		sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
 +
 +	/*
-+	 * Divide PICLK by 16
++	 * Use `swapper_pg_dir' as our page directory.
 +	 */
-+	tmp_value = apic_read(APIC_TDCR);
-+	apic_write(APIC_TDCR, (tmp_value
-+				& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
-+				| APIC_TDR_DIV_16);
++	load_cr3(swapper_pg_dir);
 +
-+	if (!oneshot)
-+		apic_write(APIC_TMICT, clocks);
-+}
++	/* Write 0x1234 to absolute memory location 0x472.  The BIOS reads
++	   this on booting to tell it to "Bypass memory test (also warm
++	   boot)".  This seems like a fairly standard thing that gets set by
++	   REBOOT.COM programs, and the previous reset routine did this
++	   too. */
++	*((unsigned short *)0x472) = reboot_mode;
 +
-+/*
-+ * Setup extended LVT, AMD specific (K8, family 10h)
-+ *
-+ * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
-+ * MCE interrupts are supported. Thus MCE offset must be set to 0.
-+ */
++	/* For the switch to real mode, copy some code to low memory.  It has
++	   to be in the first 64k because it is running in 16-bit mode, and it
++	   has to have the same physical and virtual address, because it turns
++	   off paging.  Copy it near the end of the first page, out of the way
++	   of BIOS variables. */
++	memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
++		real_mode_switch, sizeof (real_mode_switch));
++	memcpy((void *)(0x1000 - 100), code, length);
 +
-+#define APIC_EILVT_LVTOFF_MCE 0
-+#define APIC_EILVT_LVTOFF_IBS 1
++	/* Set up the IDT for real mode. */
++	load_idt(&real_mode_idt);
 +
-+static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
-+{
-+	unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
-+	unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
++	/* Set up a GDT from which we can load segment descriptors for real
++	   mode.  The GDT is not used in real mode; it is just needed here to
++	   prepare the descriptors. */
++	load_gdt(&real_mode_gdt);
 +
-+	apic_write(reg, v);
-+}
++	/* Load the data segment registers, and thus the descriptors ready for
++	   real mode.  The base address of each segment is 0x100, 16 times the
++	   selector value being loaded here.  This is so that the segment
++	   registers don't have to be reloaded after switching to real mode:
++	   the values are consistent for real mode operation already. */
++	__asm__ __volatile__ ("movl $0x0010,%%eax\n"
++				"\tmovl %%eax,%%ds\n"
++				"\tmovl %%eax,%%es\n"
++				"\tmovl %%eax,%%fs\n"
++				"\tmovl %%eax,%%gs\n"
++				"\tmovl %%eax,%%ss" : : : "eax");
 +
-+u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
-+{
-+	setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
-+	return APIC_EILVT_LVTOFF_MCE;
++	/* Jump to the 16-bit code that we copied earlier.  It disables paging
++	   and the cache, switches to real mode, and jumps to the BIOS reset
++	   entry point. */
++	__asm__ __volatile__ ("ljmp $0x0008,%0"
++				:
++				: "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
 +}
++#ifdef CONFIG_APM_MODULE
++EXPORT_SYMBOL(machine_real_restart);
++#endif
 +
-+u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
-+{
-+	setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
-+	return APIC_EILVT_LVTOFF_IBS;
-+}
++#endif /* CONFIG_X86_32 */
 +
-+/*
-+ * Program the next event, relative to now
-+ */
- static int lapic_next_event(unsigned long delta,
- 			    struct clock_event_device *evt)
- {
-@@ -85,6 +232,9 @@ static int lapic_next_event(unsigned long delta,
- 	return 0;
- }
- 
-+/*
-+ * Setup the lapic timer in periodic or oneshot mode
-+ */
- static void lapic_timer_setup(enum clock_event_mode mode,
- 			      struct clock_event_device *evt)
- {
-@@ -127,75 +277,261 @@ static void lapic_timer_broadcast(cpumask_t mask)
- #endif
- }
- 
--static void apic_pm_activate(void);
-+/*
-+ * Setup the local APIC timer for this CPU. Copy the initilized values
-+ * of the boot CPU and register the clock event in the framework.
-+ */
-+static void setup_APIC_timer(void)
++static inline void kb_wait(void)
 +{
-+	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
- 
--void apic_wait_icr_idle(void)
-+	memcpy(levt, &lapic_clockevent, sizeof(*levt));
-+	levt->cpumask = cpumask_of_cpu(smp_processor_id());
++	int i;
 +
-+	clockevents_register_device(levt);
++	for (i = 0; i < 0x10000; i++) {
++		if ((inb(0x64) & 0x02) == 0)
++			break;
++		udelay(2);
++	}
 +}
 +
-+/*
-+ * In this function we calibrate APIC bus clocks to the external
-+ * timer. Unfortunately we cannot use jiffies and the timer irq
-+ * to calibrate, since some later bootup code depends on getting
-+ * the first irq? Ugh.
-+ *
-+ * We want to do the calibration only once since we
-+ * want to have local timer irqs syncron. CPUs connected
-+ * by the same APIC bus have the very same bus frequency.
-+ * And we want to have irqs off anyways, no accidental
-+ * APIC irq that way.
-+ */
-+
-+#define TICK_COUNT 100000000
-+
-+static void __init calibrate_APIC_clock(void)
- {
--	while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
--		cpu_relax();
-+	unsigned apic, apic_start;
-+	unsigned long tsc, tsc_start;
-+	int result;
++void machine_emergency_restart(void)
++{
++	int i;
 +
-+	local_irq_disable();
++	/* Tell the BIOS if we want cold or warm reboot */
++	*((unsigned short *)__va(0x472)) = reboot_mode;
 +
-+	/*
-+	 * Put whatever arbitrary (but long enough) timeout
-+	 * value into the APIC clock, we just want to get the
-+	 * counter running for calibration.
-+	 *
-+	 * No interrupt enable !
-+	 */
-+	__setup_APIC_LVTT(250000000, 0, 0);
++	for (;;) {
++		/* Could also try the reset bit in the Hammer NB */
++		switch (reboot_type) {
++		case BOOT_KBD:
++			for (i = 0; i < 10; i++) {
++				kb_wait();
++				udelay(50);
++				outb(0xfe, 0x64); /* pulse reset low */
++				udelay(50);
++			}
 +
-+	apic_start = apic_read(APIC_TMCCT);
-+#ifdef CONFIG_X86_PM_TIMER
-+	if (apic_calibrate_pmtmr && pmtmr_ioport) {
-+		pmtimer_wait(5000);  /* 5ms wait */
-+		apic = apic_read(APIC_TMCCT);
-+		result = (apic_start - apic) * 1000L / 5;
-+	} else
-+#endif
-+	{
-+		rdtscll(tsc_start);
++		case BOOT_TRIPLE:
++			load_idt((const struct desc_ptr *)&no_idt);
++			__asm__ __volatile__("int3");
 +
-+		do {
-+			apic = apic_read(APIC_TMCCT);
-+			rdtscll(tsc);
-+		} while ((tsc - tsc_start) < TICK_COUNT &&
-+				(apic_start - apic) < TICK_COUNT);
++			reboot_type = BOOT_KBD;
++			break;
 +
-+		result = (apic_start - apic) * 1000L * tsc_khz /
-+					(tsc - tsc_start);
-+	}
++#ifdef CONFIG_X86_32
++		case BOOT_BIOS:
++			machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
 +
-+	local_irq_enable();
++			reboot_type = BOOT_KBD;
++			break;
++#endif
 +
-+	printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
++		case BOOT_ACPI:
++			acpi_reboot();
++			reboot_type = BOOT_KBD;
++			break;
 +
-+	printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
-+		result / 1000 / 1000, result / 1000 % 1000);
 +
-+	/* Calculate the scaled math multiplication factor */
-+	lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
-+	lapic_clockevent.max_delta_ns =
-+		clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
-+	lapic_clockevent.min_delta_ns =
-+		clockevent_delta2ns(0xF, &lapic_clockevent);
++		case BOOT_EFI:
++			if (efi_enabled)
++				efi.reset_system(reboot_mode ? EFI_RESET_WARM : EFI_RESET_COLD,
++						 EFI_SUCCESS, 0, NULL);
 +
-+	calibration_result = result / HZ;
- }
- 
--unsigned int safe_apic_wait_icr_idle(void)
-+/*
-+ * Setup the boot APIC
-+ *
-+ * Calibrate and verify the result.
-+ */
-+void __init setup_boot_APIC_clock(void)
- {
--	unsigned int send_status;
--	int timeout;
-+	/*
-+	 * The local apic timer can be disabled via the kernel commandline.
-+	 * Register the lapic timer as a dummy clock event source on SMP
-+	 * systems, so the broadcast mechanism is used. On UP systems simply
-+	 * ignore it.
-+	 */
-+	if (disable_apic_timer) {
-+		printk(KERN_INFO "Disabling APIC timer\n");
-+		/* No broadcast on UP ! */
-+		if (num_possible_cpus() > 1) {
-+			lapic_clockevent.mult = 1;
-+			setup_APIC_timer();
++			reboot_type = BOOT_KBD;
++			break;
 +		}
-+		return;
-+	}
- 
--	timeout = 0;
--	do {
--		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
--		if (!send_status)
--			break;
--		udelay(100);
--	} while (timeout++ < 1000);
-+	printk(KERN_INFO "Using local APIC timer interrupts.\n");
-+	calibrate_APIC_clock();
- 
--	return send_status;
-+	/*
-+	 * Do a sanity check on the APIC calibration result
-+	 */
-+	if (calibration_result < (1000000 / HZ)) {
-+		printk(KERN_WARNING
-+		       "APIC frequency too slow, disabling apic timer\n");
-+		/* No broadcast on UP ! */
-+		if (num_possible_cpus() > 1)
-+			setup_APIC_timer();
-+		return;
-+	}
-+
-+	/*
-+	 * If nmi_watchdog is set to IO_APIC, we need the
-+	 * PIT/HPET going.  Otherwise register lapic as a dummy
-+	 * device.
-+	 */
-+	if (nmi_watchdog != NMI_IO_APIC)
-+		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
-+	else
-+		printk(KERN_WARNING "APIC timer registered as dummy,"
-+		       " due to nmi_watchdog=1!\n");
-+
-+	setup_APIC_timer();
- }
- 
--void enable_NMI_through_LVT0 (void * dummy)
-+/*
-+ * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
-+ * C1E flag only in the secondary CPU, so when we detect the wreckage
-+ * we already have enabled the boot CPU local apic timer. Check, if
-+ * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
-+ * set the DUMMY flag again and force the broadcast mode in the
-+ * clockevents layer.
-+ */
-+void __cpuinit check_boot_apic_timer_broadcast(void)
- {
--	unsigned int v;
-+	if (!disable_apic_timer ||
-+	    (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
-+		return;
- 
--	/* unmask and set to NMI */
--	v = APIC_DM_NMI;
--	apic_write(APIC_LVT0, v);
-+	printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
-+	lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
-+
-+	local_irq_enable();
-+	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
-+	local_irq_disable();
- }
- 
--int get_maxlvt(void)
-+void __cpuinit setup_secondary_APIC_clock(void)
- {
--	unsigned int v, maxlvt;
-+	check_boot_apic_timer_broadcast();
-+	setup_APIC_timer();
-+}
- 
--	v = apic_read(APIC_LVR);
--	maxlvt = GET_APIC_MAXLVT(v);
--	return maxlvt;
-+/*
-+ * The guts of the apic timer interrupt
-+ */
-+static void local_apic_timer_interrupt(void)
-+{
-+	int cpu = smp_processor_id();
-+	struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
-+
-+	/*
-+	 * Normally we should not be here till LAPIC has been initialized but
-+	 * in some cases like kdump, its possible that there is a pending LAPIC
-+	 * timer interrupt from previous kernel's context and is delivered in
-+	 * new kernel the moment interrupts are enabled.
-+	 *
-+	 * Interrupts are enabled early and LAPIC is setup much later, hence
-+	 * its possible that when we get here evt->event_handler is NULL.
-+	 * Check for event_handler being NULL and discard the interrupt as
-+	 * spurious.
-+	 */
-+	if (!evt->event_handler) {
-+		printk(KERN_WARNING
-+		       "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
-+		/* Switch it off */
-+		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
-+		return;
 +	}
-+
-+	/*
-+	 * the NMI deadlock-detector uses this.
-+	 */
-+	add_pda(apic_timer_irqs, 1);
-+
-+	evt->event_handler(evt);
- }
- 
- /*
-- * 'what should we do if we get a hw irq event on an illegal vector'.
-- * each architecture has to answer this themselves.
-+ * Local APIC timer interrupt. This is the most natural way for doing
-+ * local interrupts, but local timer interrupts can be emulated by
-+ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
-+ *
-+ * [ if a single-CPU system runs an SMP kernel then we call the local
-+ *   interrupt as well. Thus we cannot inline the local irq ... ]
-  */
--void ack_bad_irq(unsigned int irq)
-+void smp_apic_timer_interrupt(struct pt_regs *regs)
- {
--	printk("unexpected IRQ trap at vector %02x\n", irq);
-+	struct pt_regs *old_regs = set_irq_regs(regs);
-+
- 	/*
--	 * Currently unexpected vectors happen only on SMP and APIC.
--	 * We _must_ ack these because every local APIC has only N
--	 * irq slots per priority level, and a 'hanging, unacked' IRQ
--	 * holds up an irq slot - in excessive cases (when multiple
--	 * unexpected vectors occur) that might lock up the APIC
--	 * completely.
--	 * But don't ack when the APIC is disabled. -AK
-+	 * NOTE! We'd better ACK the irq immediately,
-+	 * because timer handling can be slow.
- 	 */
--	if (!disable_apic)
--		ack_APIC_irq();
-+	ack_APIC_irq();
-+	/*
-+	 * update_process_times() expects us to have done irq_enter().
-+	 * Besides, if we don't timer interrupts ignore the global
-+	 * interrupt lock, which is the WrongThing (tm) to do.
-+	 */
-+	exit_idle();
-+	irq_enter();
-+	local_apic_timer_interrupt();
-+	irq_exit();
-+	set_irq_regs(old_regs);
-+}
-+
-+int setup_profiling_timer(unsigned int multiplier)
-+{
-+	return -EINVAL;
- }
- 
-+
-+/*
-+ * Local APIC start and shutdown
-+ */
-+
-+/**
-+ * clear_local_APIC - shutdown the local APIC
-+ *
-+ * This is called, when a CPU is disabled and before rebooting, so the state of
-+ * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
-+ * leftovers during boot.
-+ */
- void clear_local_APIC(void)
- {
--	int maxlvt;
--	unsigned int v;
-+	int maxlvt = lapic_get_maxlvt();
-+	u32 v;
- 
--	maxlvt = get_maxlvt();
-+	/* APIC hasn't been mapped yet */
-+	if (!apic_phys)
-+		return;
- 
-+	maxlvt = lapic_get_maxlvt();
- 	/*
- 	 * Masking an LVT entry can trigger a local APIC error
- 	 * if the vector is zero. Mask LVTERR first to prevent this.
-@@ -233,45 +569,9 @@ void clear_local_APIC(void)
- 	apic_read(APIC_ESR);
- }
- 
--void disconnect_bsp_APIC(int virt_wire_setup)
--{
--	/* Go back to Virtual Wire compatibility mode */
--	unsigned long value;
--
--	/* For the spurious interrupt use vector F, and enable it */
--	value = apic_read(APIC_SPIV);
--	value &= ~APIC_VECTOR_MASK;
--	value |= APIC_SPIV_APIC_ENABLED;
--	value |= 0xf;
--	apic_write(APIC_SPIV, value);
--
--	if (!virt_wire_setup) {
--		/*
--		 * For LVT0 make it edge triggered, active high,
--		 * external and enabled
--		 */
--		value = apic_read(APIC_LVT0);
--		value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
--			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
--			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
--		value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
--		value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
--		apic_write(APIC_LVT0, value);
--	} else {
--		/* Disable LVT0 */
--		apic_write(APIC_LVT0, APIC_LVT_MASKED);
--	}
--
--	/* For LVT1 make it edge triggered, active high, nmi and enabled */
--	value = apic_read(APIC_LVT1);
--	value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
--			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
--			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
--	value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
--	value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
--	apic_write(APIC_LVT1, value);
--}
--
-+/**
-+ * disable_local_APIC - clear and disable the local APIC
-+ */
- void disable_local_APIC(void)
- {
- 	unsigned int value;
-@@ -333,7 +633,7 @@ int __init verify_local_APIC(void)
- 	reg1 = GET_APIC_VERSION(reg0);
- 	if (reg1 == 0x00 || reg1 == 0xff)
- 		return 0;
--	reg1 = get_maxlvt();
-+	reg1 = lapic_get_maxlvt();
- 	if (reg1 < 0x02 || reg1 == 0xff)
- 		return 0;
- 
-@@ -355,18 +655,20 @@ int __init verify_local_APIC(void)
- 	 * compatibility mode, but most boxes are anymore.
- 	 */
- 	reg0 = apic_read(APIC_LVT0);
--	apic_printk(APIC_DEBUG,"Getting LVT0: %x\n", reg0);
-+	apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
- 	reg1 = apic_read(APIC_LVT1);
- 	apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
- 
- 	return 1;
- }
- 
-+/**
-+ * sync_Arb_IDs - synchronize APIC bus arbitration IDs
-+ */
- void __init sync_Arb_IDs(void)
- {
- 	/* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
--	unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
--	if (ver >= 0x14)	/* P4 or higher */
-+	if (modern_apic())
- 		return;
- 
- 	/*
-@@ -418,9 +720,12 @@ void __init init_bsp_APIC(void)
- 	apic_write(APIC_LVT1, value);
- }
- 
--void __cpuinit setup_local_APIC (void)
-+/**
-+ * setup_local_APIC - setup the local APIC
-+ */
-+void __cpuinit setup_local_APIC(void)
- {
--	unsigned int value, maxlvt;
-+	unsigned int value;
- 	int i, j;
- 
- 	value = apic_read(APIC_LVR);
-@@ -516,30 +821,217 @@ void __cpuinit setup_local_APIC (void)
- 	else
- 		value = APIC_DM_NMI | APIC_LVT_MASKED;
- 	apic_write(APIC_LVT1, value);
 +}
- 
--	{
--		unsigned oldvalue;
--		maxlvt = get_maxlvt();
--		oldvalue = apic_read(APIC_ESR);
--		value = ERROR_APIC_VECTOR;      // enables sending errors
--		apic_write(APIC_LVTERR, value);
--		/*
--		 * spec says clear errors after enabling vector.
--		 */
--		if (maxlvt > 3)
--			apic_write(APIC_ESR, 0);
--		value = apic_read(APIC_ESR);
--		if (value != oldvalue)
--			apic_printk(APIC_VERBOSE,
--			"ESR value after enabling vector: %08x, after %08x\n",
--			oldvalue, value);
--	}
-+void __cpuinit lapic_setup_esr(void)
-+{
-+	unsigned maxlvt = lapic_get_maxlvt();
 +
-+	apic_write(APIC_LVTERR, ERROR_APIC_VECTOR);
-+	/*
-+	 * spec says clear errors after enabling vector.
-+	 */
-+	if (maxlvt > 3)
-+		apic_write(APIC_ESR, 0);
-+}
- 
-+void __cpuinit end_local_APIC_setup(void)
-+{
-+	lapic_setup_esr();
- 	nmi_watchdog_default();
- 	setup_apic_nmi_watchdog(NULL);
- 	apic_pm_activate();
- }
- 
-+/*
-+ * Detect and enable local APICs on non-SMP boards.
-+ * Original code written by Keir Fraser.
-+ * On AMD64 we trust the BIOS - if it says no APIC it is likely
-+ * not correctly set up (usually the APIC timer won't work etc.)
-+ */
-+static int __init detect_init_APIC(void)
++void machine_shutdown(void)
 +{
-+	if (!cpu_has_apic) {
-+		printk(KERN_INFO "No local APIC present\n");
-+		return -1;
-+	}
++	/* Stop the cpus and apics */
++#ifdef CONFIG_SMP
++	int reboot_cpu_id;
 +
-+	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-+	boot_cpu_id = 0;
-+	return 0;
-+}
++	/* The boot cpu is always logical cpu 0 */
++	reboot_cpu_id = 0;
 +
-+/**
-+ * init_apic_mappings - initialize APIC mappings
-+ */
-+void __init init_apic_mappings(void)
-+{
-+	/*
-+	 * If no local APIC can be found then set up a fake all
-+	 * zeroes page to simulate the local APIC and another
-+	 * one for the IO-APIC.
-+	 */
-+	if (!smp_found_config && detect_init_APIC()) {
-+		apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
-+		apic_phys = __pa(apic_phys);
-+	} else
-+		apic_phys = mp_lapic_addr;
++#ifdef CONFIG_X86_32
++	/* See if there has been given a command line override */
++	if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
++		cpu_isset(reboot_cpu, cpu_online_map))
++		reboot_cpu_id = reboot_cpu;
++#endif
 +
-+	set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
-+	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
-+				APIC_BASE, apic_phys);
++	/* Make certain the cpu I'm about to reboot on is online */
++	if (!cpu_isset(reboot_cpu_id, cpu_online_map))
++		reboot_cpu_id = smp_processor_id();
 +
-+	/* Put local APIC into the resource map. */
-+	lapic_resource.start = apic_phys;
-+	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
-+	insert_resource(&iomem_resource, &lapic_resource);
++	/* Make certain I only run on the appropriate processor */
++	set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
 +
-+	/*
-+	 * Fetch the APIC ID of the BSP in case we have a
-+	 * default configuration (or the MP table is broken).
++	/* O.K Now that I'm on the appropriate processor,
++	 * stop all of the others.
 +	 */
-+	boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
-+}
-+
-+/*
-+ * This initializes the IO-APIC and APIC hardware if this is
-+ * a UP kernel.
-+ */
-+int __init APIC_init_uniprocessor(void)
-+{
-+	if (disable_apic) {
-+		printk(KERN_INFO "Apic disabled\n");
-+		return -1;
-+	}
-+	if (!cpu_has_apic) {
-+		disable_apic = 1;
-+		printk(KERN_INFO "Apic disabled by BIOS\n");
-+		return -1;
-+	}
-+
-+	verify_local_APIC();
-+
-+	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
-+	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
++	smp_send_stop();
++#endif
 +
-+	setup_local_APIC();
++	lapic_shutdown();
 +
-+	/*
-+	 * Now enable IO-APICs, actually call clear_IO_APIC
-+	 * We need clear_IO_APIC before enabling vector on BP
-+	 */
-+	if (!skip_ioapic_setup && nr_ioapics)
-+		enable_IO_APIC();
++#ifdef CONFIG_X86_IO_APIC
++	disable_IO_APIC();
++#endif
 +
-+	end_local_APIC_setup();
++#ifdef CONFIG_HPET_TIMER
++	hpet_disable();
++#endif
 +
-+	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
-+		setup_IO_APIC();
-+	else
-+		nr_ioapics = 0;
-+	setup_boot_APIC_clock();
-+	check_nmi_watchdog();
-+	return 0;
++#ifdef CONFIG_X86_64
++	pci_iommu_shutdown();
++#endif
 +}
 +
-+/*
-+ * Local APIC interrupts
-+ */
-+
-+/*
-+ * This interrupt should _never_ happen with our APIC/SMP architecture
-+ */
-+asmlinkage void smp_spurious_interrupt(void)
++void machine_restart(char *__unused)
 +{
-+	unsigned int v;
-+	exit_idle();
-+	irq_enter();
-+	/*
-+	 * Check if this really is a spurious interrupt and ACK it
-+	 * if it is a vectored one.  Just in case...
-+	 * Spurious interrupts should not be ACKed.
-+	 */
-+	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
-+	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
-+		ack_APIC_irq();
++	printk("machine restart\n");
 +
-+	add_pda(irq_spurious_count, 1);
-+	irq_exit();
++	if (!reboot_force)
++		machine_shutdown();
++	machine_emergency_restart();
 +}
 +
-+/*
-+ * This interrupt should never happen with our APIC/SMP architecture
-+ */
-+asmlinkage void smp_error_interrupt(void)
++void machine_halt(void)
 +{
-+	unsigned int v, v1;
-+
-+	exit_idle();
-+	irq_enter();
-+	/* First tickle the hardware, only then report what went on. -- REW */
-+	v = apic_read(APIC_ESR);
-+	apic_write(APIC_ESR, 0);
-+	v1 = apic_read(APIC_ESR);
-+	ack_APIC_irq();
-+	atomic_inc(&irq_err_count);
-+
-+	/* Here is what the APIC error bits mean:
-+	   0: Send CS error
-+	   1: Receive CS error
-+	   2: Send accept error
-+	   3: Receive accept error
-+	   4: Reserved
-+	   5: Send illegal vector
-+	   6: Received illegal vector
-+	   7: Illegal register address
-+	*/
-+	printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
-+		smp_processor_id(), v , v1);
-+	irq_exit();
 +}
 +
-+void disconnect_bsp_APIC(int virt_wire_setup)
++void machine_power_off(void)
 +{
-+	/* Go back to Virtual Wire compatibility mode */
-+	unsigned long value;
-+
-+	/* For the spurious interrupt use vector F, and enable it */
-+	value = apic_read(APIC_SPIV);
-+	value &= ~APIC_VECTOR_MASK;
-+	value |= APIC_SPIV_APIC_ENABLED;
-+	value |= 0xf;
-+	apic_write(APIC_SPIV, value);
-+
-+	if (!virt_wire_setup) {
-+		/*
-+		 * For LVT0 make it edge triggered, active high,
-+		 * external and enabled
-+		 */
-+		value = apic_read(APIC_LVT0);
-+		value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
-+			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
-+			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
-+		value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
-+		value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
-+		apic_write(APIC_LVT0, value);
-+	} else {
-+		/* Disable LVT0 */
-+		apic_write(APIC_LVT0, APIC_LVT_MASKED);
++	if (pm_power_off) {
++		if (!reboot_force)
++			machine_shutdown();
++		pm_power_off();
 +	}
-+
-+	/* For LVT1 make it edge triggered, active high, nmi and enabled */
-+	value = apic_read(APIC_LVT1);
-+	value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
-+			APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
-+			APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
-+	value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
-+	value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
-+	apic_write(APIC_LVT1, value);
 +}
 +
-+/*
-+ * Power management
-+ */
- #ifdef CONFIG_PM
- 
- static struct {
-@@ -571,7 +1063,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
- 	if (!apic_pm_state.active)
- 		return 0;
- 
--	maxlvt = get_maxlvt();
-+	maxlvt = lapic_get_maxlvt();
- 
- 	apic_pm_state.apic_id = apic_read(APIC_ID);
- 	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
-@@ -605,7 +1097,7 @@ static int lapic_resume(struct sys_device *dev)
- 	if (!apic_pm_state.active)
- 		return 0;
- 
--	maxlvt = get_maxlvt();
-+	maxlvt = lapic_get_maxlvt();
- 
- 	local_irq_save(flags);
- 	rdmsr(MSR_IA32_APICBASE, l, h);
-@@ -639,14 +1131,14 @@ static int lapic_resume(struct sys_device *dev)
- }
- 
- static struct sysdev_class lapic_sysclass = {
--	set_kset_name("lapic"),
-+	.name		= "lapic",
- 	.resume		= lapic_resume,
- 	.suspend	= lapic_suspend,
- };
- 
- static struct sys_device device_lapic = {
--	.id		= 0,
--	.cls		= &lapic_sysclass,
-+	.id	= 0,
-+	.cls	= &lapic_sysclass,
- };
- 
- static void __cpuinit apic_pm_activate(void)
-@@ -657,9 +1149,11 @@ static void __cpuinit apic_pm_activate(void)
- static int __init init_lapic_sysfs(void)
- {
- 	int error;
-+
- 	if (!cpu_has_apic)
- 		return 0;
- 	/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
-+
- 	error = sysdev_class_register(&lapic_sysclass);
- 	if (!error)
- 		error = sysdev_register(&device_lapic);
-@@ -673,423 +1167,6 @@ static void apic_pm_activate(void) { }
- 
- #endif	/* CONFIG_PM */
- 
--static int __init apic_set_verbosity(char *str)
++struct machine_ops machine_ops = {
++	.power_off = machine_power_off,
++	.shutdown = machine_shutdown,
++	.emergency_restart = machine_emergency_restart,
++	.restart = machine_restart,
++	.halt = machine_halt
++};
+diff --git a/arch/x86/kernel/reboot_32.c b/arch/x86/kernel/reboot_32.c
+deleted file mode 100644
+index bb1a0f8..0000000
+--- a/arch/x86/kernel/reboot_32.c
++++ /dev/null
+@@ -1,413 +0,0 @@
+-#include <linux/mm.h>
+-#include <linux/module.h>
+-#include <linux/delay.h>
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+-#include <linux/mc146818rtc.h>
+-#include <linux/efi.h>
+-#include <linux/dmi.h>
+-#include <linux/ctype.h>
+-#include <linux/pm.h>
+-#include <linux/reboot.h>
+-#include <asm/uaccess.h>
+-#include <asm/apic.h>
+-#include <asm/hpet.h>
+-#include <asm/desc.h>
+-#include "mach_reboot.h"
+-#include <asm/reboot_fixups.h>
+-#include <asm/reboot.h>
+-
+-/*
+- * Power off function, if any
+- */
+-void (*pm_power_off)(void);
+-EXPORT_SYMBOL(pm_power_off);
+-
+-static int reboot_mode;
+-static int reboot_thru_bios;
+-
+-#ifdef CONFIG_SMP
+-static int reboot_cpu = -1;
+-#endif
+-static int __init reboot_setup(char *str)
 -{
--	if (str == NULL)  {
--		skip_ioapic_setup = 0;
--		ioapic_force = 1;
--		return 0;
--	}
--	if (strcmp("debug", str) == 0)
--		apic_verbosity = APIC_DEBUG;
--	else if (strcmp("verbose", str) == 0)
--		apic_verbosity = APIC_VERBOSE;
--	else {
--		printk(KERN_WARNING "APIC Verbosity level %s not recognised"
--				" use apic=verbose or apic=debug\n", str);
--		return -EINVAL;
+-	while(1) {
+-		switch (*str) {
+-		case 'w': /* "warm" reboot (no memory testing etc) */
+-			reboot_mode = 0x1234;
+-			break;
+-		case 'c': /* "cold" reboot (with memory testing etc) */
+-			reboot_mode = 0x0;
+-			break;
+-		case 'b': /* "bios" reboot by jumping through the BIOS */
+-			reboot_thru_bios = 1;
+-			break;
+-		case 'h': /* "hard" reboot by toggling RESET and/or crashing the CPU */
+-			reboot_thru_bios = 0;
+-			break;
+-#ifdef CONFIG_SMP
+-		case 's': /* "smp" reboot by executing reset on BSP or other CPU*/
+-			if (isdigit(*(str+1))) {
+-				reboot_cpu = (int) (*(str+1) - '0');
+-				if (isdigit(*(str+2)))
+-					reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
+-			}
+-				/* we will leave sorting out the final value 
+-				when we are ready to reboot, since we might not
+- 				have set up boot_cpu_id or smp_num_cpu */
+-			break;
+-#endif
+-		}
+-		if((str = strchr(str,',')) != NULL)
+-			str++;
+-		else
+-			break;
 -	}
--
--	return 0;
+-	return 1;
 -}
--early_param("apic", apic_set_verbosity);
+-
+-__setup("reboot=", reboot_setup);
 -
 -/*
-- * Detect and enable local APICs on non-SMP boards.
-- * Original code written by Keir Fraser.
-- * On AMD64 we trust the BIOS - if it says no APIC it is likely
-- * not correctly set up (usually the APIC timer won't work etc.)
+- * Reboot options and system auto-detection code provided by
+- * Dell Inc. so their systems "just work". :-)
 - */
 -
--static int __init detect_init_APIC (void)
+-/*
+- * Some machines require the "reboot=b"  commandline option, this quirk makes that automatic.
+- */
+-static int __init set_bios_reboot(const struct dmi_system_id *d)
 -{
--	if (!cpu_has_apic) {
--		printk(KERN_INFO "No local APIC present\n");
--		return -1;
+-	if (!reboot_thru_bios) {
+-		reboot_thru_bios = 1;
+-		printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
 -	}
--
--	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
--	boot_cpu_id = 0;
 -	return 0;
 -}
 -
--#ifdef CONFIG_X86_IO_APIC
--static struct resource * __init ioapic_setup_resources(void)
--{
--#define IOAPIC_RESOURCE_NAME_SIZE 11
--	unsigned long n;
--	struct resource *res;
--	char *mem;
--	int i;
--
--	if (nr_ioapics <= 0)
--		return NULL;
+-static struct dmi_system_id __initdata reboot_dmi_table[] = {
+-	{	/* Handle problems with rebooting on Dell E520's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell E520",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
+-		},
+-	},
+-	{	/* Handle problems with rebooting on Dell 1300's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell PowerEdge 1300",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
+-		},
+-	},
+-	{	/* Handle problems with rebooting on Dell 300's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell PowerEdge 300",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
+-		},
+-	},
+-	{       /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
+-		.callback = set_bios_reboot,
+-		.ident = "Dell OptiPlex 745",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+-			DMI_MATCH(DMI_BOARD_NAME, "0WF810"),
+-		},
+-	},
+-	{	/* Handle problems with rebooting on Dell 2400's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell PowerEdge 2400",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
+-		},
+-	},
+-	{	/* Handle problems with rebooting on HP laptops */
+-		.callback = set_bios_reboot,
+-		.ident = "HP Compaq Laptop",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
+-		},
+-	},
+-	{ }
+-};
 -
--	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
--	n *= nr_ioapics;
+-static int __init reboot_init(void)
+-{
+-	dmi_check_system(reboot_dmi_table);
+-	return 0;
+-}
 -
--	mem = alloc_bootmem(n);
--	res = (void *)mem;
+-core_initcall(reboot_init);
 -
--	if (mem != NULL) {
--		memset(mem, 0, n);
--		mem += sizeof(struct resource) * nr_ioapics;
+-/* The following code and data reboots the machine by switching to real
+-   mode and jumping to the BIOS reset entry point, as if the CPU has
+-   really been reset.  The previous version asked the keyboard
+-   controller to pulse the CPU reset line, which is more thorough, but
+-   doesn't work with at least one type of 486 motherboard.  It is easy
+-   to stop this code working; hence the copious comments. */
 -
--		for (i = 0; i < nr_ioapics; i++) {
--			res[i].name = mem;
--			res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
--			sprintf(mem,  "IOAPIC %u", i);
--			mem += IOAPIC_RESOURCE_NAME_SIZE;
--		}
--	}
+-static unsigned long long
+-real_mode_gdt_entries [3] =
+-{
+-	0x0000000000000000ULL,	/* Null descriptor */
+-	0x00009a000000ffffULL,	/* 16-bit real-mode 64k code at 0x00000000 */
+-	0x000092000100ffffULL	/* 16-bit real-mode 64k data at 0x00000100 */
+-};
 -
--	ioapic_resources = res;
+-static struct Xgt_desc_struct
+-real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
+-real_mode_idt = { 0x3ff, 0 },
+-no_idt = { 0, 0 };
 -
--	return res;
--}
 -
--static int __init ioapic_insert_resources(void)
--{
--	int i;
--	struct resource *r = ioapic_resources;
+-/* This is 16-bit protected mode code to disable paging and the cache,
+-   switch to real mode and jump to the BIOS reset code.
 -
--	if (!r) {
--		printk("IO APIC resources could be not be allocated.\n");
--		return -1;
--	}
+-   The instruction that switches to real mode by writing to CR0 must be
+-   followed immediately by a far jump instruction, which set CS to a
+-   valid value for real mode, and flushes the prefetch queue to avoid
+-   running instructions that have already been decoded in protected
+-   mode.
 -
--	for (i = 0; i < nr_ioapics; i++) {
--		insert_resource(&iomem_resource, r);
--		r++;
--	}
+-   Clears all the flags except ET, especially PG (paging), PE
+-   (protected-mode enable) and TS (task switch for coprocessor state
+-   save).  Flushes the TLB after paging has been disabled.  Sets CD and
+-   NW, to disable the cache on a 486, and invalidates the cache.  This
+-   is more like the state of a 486 after reset.  I don't know if
+-   something else should be done for other chips.
 -
--	return 0;
--}
+-   More could be done here to set up the registers as if a CPU reset had
+-   occurred; hopefully real BIOSs don't assume much. */
 -
--/* Insert the IO APIC resources after PCI initialization has occured to handle
-- * IO APICS that are mapped in on a BAR in PCI space. */
--late_initcall(ioapic_insert_resources);
--#endif
+-static unsigned char real_mode_switch [] =
+-{
+-	0x66, 0x0f, 0x20, 0xc0,			/*    movl  %cr0,%eax        */
+-	0x66, 0x83, 0xe0, 0x11,			/*    andl  $0x00000011,%eax */
+-	0x66, 0x0d, 0x00, 0x00, 0x00, 0x60,	/*    orl   $0x60000000,%eax */
+-	0x66, 0x0f, 0x22, 0xc0,			/*    movl  %eax,%cr0        */
+-	0x66, 0x0f, 0x22, 0xd8,			/*    movl  %eax,%cr3        */
+-	0x66, 0x0f, 0x20, 0xc3,			/*    movl  %cr0,%ebx        */
+-	0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60,	/*    andl  $0x60000000,%ebx */
+-	0x74, 0x02,				/*    jz    f                */
+-	0x0f, 0x09,				/*    wbinvd                 */
+-	0x24, 0x10,				/* f: andb  $0x10,al         */
+-	0x66, 0x0f, 0x22, 0xc0			/*    movl  %eax,%cr0        */
+-};
+-static unsigned char jump_to_bios [] =
+-{
+-	0xea, 0x00, 0x00, 0xff, 0xff		/*    ljmp  $0xffff,$0x0000  */
+-};
 -
--void __init init_apic_mappings(void)
+-/*
+- * Switch to real mode and then execute the code
+- * specified by the code and length parameters.
+- * We assume that length will aways be less that 100!
+- */
+-void machine_real_restart(unsigned char *code, int length)
 -{
--	unsigned long apic_phys;
+-	local_irq_disable();
 -
--	/*
--	 * If no local APIC can be found then set up a fake all
--	 * zeroes page to simulate the local APIC and another
--	 * one for the IO-APIC.
+-	/* Write zero to CMOS register number 0x0f, which the BIOS POST
+-	   routine will recognize as telling it to do a proper reboot.  (Well
+-	   that's what this book in front of me says -- it may only apply to
+-	   the Phoenix BIOS though, it's not clear).  At the same time,
+-	   disable NMIs by setting the top bit in the CMOS address register,
+-	   as we're about to do peculiar things to the CPU.  I'm not sure if
+-	   `outb_p' is needed instead of just `outb'.  Use it to be on the
+-	   safe side.  (Yes, CMOS_WRITE does outb_p's. -  Paul G.)
 -	 */
--	if (!smp_found_config && detect_init_APIC()) {
--		apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
--		apic_phys = __pa(apic_phys);
--	} else
--		apic_phys = mp_lapic_addr;
 -
--	set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
--	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
--				APIC_BASE, apic_phys);
+-	spin_lock(&rtc_lock);
+-	CMOS_WRITE(0x00, 0x8f);
+-	spin_unlock(&rtc_lock);
 -
--	/* Put local APIC into the resource map. */
--	lapic_resource.start = apic_phys;
--	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
--	insert_resource(&iomem_resource, &lapic_resource);
+-	/* Remap the kernel at virtual address zero, as well as offset zero
+-	   from the kernel segment.  This assumes the kernel segment starts at
+-	   virtual address PAGE_OFFSET. */
+-
+-	memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
+-		sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
 -
 -	/*
--	 * Fetch the APIC ID of the BSP in case we have a
--	 * default configuration (or the MP table is broken).
+-	 * Use `swapper_pg_dir' as our page directory.
 -	 */
--	boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
+-	load_cr3(swapper_pg_dir);
 -
--	{
--		unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
--		int i;
--		struct resource *ioapic_res;
+-	/* Write 0x1234 to absolute memory location 0x472.  The BIOS reads
+-	   this on booting to tell it to "Bypass memory test (also warm
+-	   boot)".  This seems like a fairly standard thing that gets set by
+-	   REBOOT.COM programs, and the previous reset routine did this
+-	   too. */
 -
--		ioapic_res = ioapic_setup_resources();
--		for (i = 0; i < nr_ioapics; i++) {
--			if (smp_found_config) {
--				ioapic_phys = mp_ioapics[i].mpc_apicaddr;
--			} else {
--				ioapic_phys = (unsigned long)
--					alloc_bootmem_pages(PAGE_SIZE);
--				ioapic_phys = __pa(ioapic_phys);
--			}
--			set_fixmap_nocache(idx, ioapic_phys);
--			apic_printk(APIC_VERBOSE,
--				    "mapped IOAPIC to %016lx (%016lx)\n",
--				    __fix_to_virt(idx), ioapic_phys);
--			idx++;
+-	*((unsigned short *)0x472) = reboot_mode;
 -
--			if (ioapic_res != NULL) {
--				ioapic_res->start = ioapic_phys;
--				ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
--				ioapic_res++;
--			}
--		}
--	}
--}
+-	/* For the switch to real mode, copy some code to low memory.  It has
+-	   to be in the first 64k because it is running in 16-bit mode, and it
+-	   has to have the same physical and virtual address, because it turns
+-	   off paging.  Copy it near the end of the first page, out of the way
+-	   of BIOS variables. */
 -
--/*
-- * This function sets up the local APIC timer, with a timeout of
-- * 'clocks' APIC bus clock. During calibration we actually call
-- * this function twice on the boot CPU, once with a bogus timeout
-- * value, second time for real. The other (noncalibrating) CPUs
-- * call this function only once, with the real, calibrated value.
-- *
-- * We do reads before writes even if unnecessary, to get around the
-- * P5 APIC double write bug.
-- */
+-	memcpy ((void *) (0x1000 - sizeof (real_mode_switch) - 100),
+-		real_mode_switch, sizeof (real_mode_switch));
+-	memcpy ((void *) (0x1000 - 100), code, length);
+-
+-	/* Set up the IDT for real mode. */
 -
--static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
--{
--	unsigned int lvtt_value, tmp_value;
+-	load_idt(&real_mode_idt);
 -
--	lvtt_value = LOCAL_TIMER_VECTOR;
--	if (!oneshot)
--		lvtt_value |= APIC_LVT_TIMER_PERIODIC;
--	if (!irqen)
--		lvtt_value |= APIC_LVT_MASKED;
+-	/* Set up a GDT from which we can load segment descriptors for real
+-	   mode.  The GDT is not used in real mode; it is just needed here to
+-	   prepare the descriptors. */
 -
--	apic_write(APIC_LVTT, lvtt_value);
+-	load_gdt(&real_mode_gdt);
 -
--	/*
--	 * Divide PICLK by 16
--	 */
--	tmp_value = apic_read(APIC_TDCR);
--	apic_write(APIC_TDCR, (tmp_value
--				& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
--				| APIC_TDR_DIV_16);
+-	/* Load the data segment registers, and thus the descriptors ready for
+-	   real mode.  The base address of each segment is 0x100, 16 times the
+-	   selector value being loaded here.  This is so that the segment
+-	   registers don't have to be reloaded after switching to real mode:
+-	   the values are consistent for real mode operation already. */
 -
--	if (!oneshot)
--		apic_write(APIC_TMICT, clocks);
+-	__asm__ __volatile__ ("movl $0x0010,%%eax\n"
+-				"\tmovl %%eax,%%ds\n"
+-				"\tmovl %%eax,%%es\n"
+-				"\tmovl %%eax,%%fs\n"
+-				"\tmovl %%eax,%%gs\n"
+-				"\tmovl %%eax,%%ss" : : : "eax");
+-
+-	/* Jump to the 16-bit code that we copied earlier.  It disables paging
+-	   and the cache, switches to real mode, and jumps to the BIOS reset
+-	   entry point. */
+-
+-	__asm__ __volatile__ ("ljmp $0x0008,%0"
+-				:
+-				: "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100)));
 -}
+-#ifdef CONFIG_APM_MODULE
+-EXPORT_SYMBOL(machine_real_restart);
+-#endif
 -
--static void setup_APIC_timer(void)
+-static void native_machine_shutdown(void)
 -{
--	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+-#ifdef CONFIG_SMP
+-	int reboot_cpu_id;
 -
--	memcpy(levt, &lapic_clockevent, sizeof(*levt));
--	levt->cpumask = cpumask_of_cpu(smp_processor_id());
+-	/* The boot cpu is always logical cpu 0 */
+-	reboot_cpu_id = 0;
 -
--	clockevents_register_device(levt);
--}
+-	/* See if there has been given a command line override */
+-	if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
+-		cpu_isset(reboot_cpu, cpu_online_map)) {
+-		reboot_cpu_id = reboot_cpu;
+-	}
 -
--/*
-- * In this function we calibrate APIC bus clocks to the external
-- * timer. Unfortunately we cannot use jiffies and the timer irq
-- * to calibrate, since some later bootup code depends on getting
-- * the first irq? Ugh.
-- *
-- * We want to do the calibration only once since we
-- * want to have local timer irqs syncron. CPUs connected
-- * by the same APIC bus have the very same bus frequency.
-- * And we want to have irqs off anyways, no accidental
-- * APIC irq that way.
-- */
+-	/* Make certain the cpu I'm rebooting on is online */
+-	if (!cpu_isset(reboot_cpu_id, cpu_online_map)) {
+-		reboot_cpu_id = smp_processor_id();
+-	}
 -
--#define TICK_COUNT 100000000
+-	/* Make certain I only run on the appropriate processor */
+-	set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
 -
--static void __init calibrate_APIC_clock(void)
--{
--	unsigned apic, apic_start;
--	unsigned long tsc, tsc_start;
--	int result;
+-	/* O.K. Now that I'm on the appropriate processor, stop
+-	 * all of the others, and disable their local APICs.
+-	 */
 -
--	local_irq_disable();
+-	smp_send_stop();
+-#endif /* CONFIG_SMP */
 -
--	/*
--	 * Put whatever arbitrary (but long enough) timeout
--	 * value into the APIC clock, we just want to get the
--	 * counter running for calibration.
--	 *
--	 * No interrupt enable !
--	 */
--	__setup_APIC_LVTT(250000000, 0, 0);
+-	lapic_shutdown();
 -
--	apic_start = apic_read(APIC_TMCCT);
--#ifdef CONFIG_X86_PM_TIMER
--	if (apic_calibrate_pmtmr && pmtmr_ioport) {
--		pmtimer_wait(5000);  /* 5ms wait */
--		apic = apic_read(APIC_TMCCT);
--		result = (apic_start - apic) * 1000L / 5;
--	} else
+-#ifdef CONFIG_X86_IO_APIC
+-	disable_IO_APIC();
 -#endif
--	{
--		rdtscll(tsc_start);
+-#ifdef CONFIG_HPET_TIMER
+-	hpet_disable();
+-#endif
+-}
 -
--		do {
--			apic = apic_read(APIC_TMCCT);
--			rdtscll(tsc);
--		} while ((tsc - tsc_start) < TICK_COUNT &&
--				(apic_start - apic) < TICK_COUNT);
+-void __attribute__((weak)) mach_reboot_fixups(void)
+-{
+-}
 -
--		result = (apic_start - apic) * 1000L * tsc_khz /
--					(tsc - tsc_start);
+-static void native_machine_emergency_restart(void)
+-{
+-	if (!reboot_thru_bios) {
+-		if (efi_enabled) {
+-			efi.reset_system(EFI_RESET_COLD, EFI_SUCCESS, 0, NULL);
+-			load_idt(&no_idt);
+-			__asm__ __volatile__("int3");
+-		}
+-		/* rebooting needs to touch the page at absolute addr 0 */
+-		*((unsigned short *)__va(0x472)) = reboot_mode;
+-		for (;;) {
+-			mach_reboot_fixups(); /* for board specific fixups */
+-			mach_reboot();
+-			/* That didn't work - force a triple fault.. */
+-			load_idt(&no_idt);
+-			__asm__ __volatile__("int3");
+-		}
 -	}
+-	if (efi_enabled)
+-		efi.reset_system(EFI_RESET_WARM, EFI_SUCCESS, 0, NULL);
 -
--	local_irq_enable();
--
--	printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
--
--	printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
--		result / 1000 / 1000, result / 1000 % 1000);
+-	machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
+-}
 -
--	/* Calculate the scaled math multiplication factor */
--	lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
--	lapic_clockevent.max_delta_ns =
--		clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
--	lapic_clockevent.min_delta_ns =
--		clockevent_delta2ns(0xF, &lapic_clockevent);
+-static void native_machine_restart(char * __unused)
+-{
+-	machine_shutdown();
+-	machine_emergency_restart();
+-}
 -
--	calibration_result = result / HZ;
+-static void native_machine_halt(void)
+-{
 -}
 -
--void __init setup_boot_APIC_clock (void)
+-static void native_machine_power_off(void)
 -{
--	/*
--	 * The local apic timer can be disabled via the kernel commandline.
--	 * Register the lapic timer as a dummy clock event source on SMP
--	 * systems, so the broadcast mechanism is used. On UP systems simply
--	 * ignore it.
--	 */
--	if (disable_apic_timer) {
--		printk(KERN_INFO "Disabling APIC timer\n");
--		/* No broadcast on UP ! */
--		if (num_possible_cpus() > 1)
--			setup_APIC_timer();
--		return;
+-	if (pm_power_off) {
+-		machine_shutdown();
+-		pm_power_off();
 -	}
+-}
 -
--	printk(KERN_INFO "Using local APIC timer interrupts.\n");
--	calibrate_APIC_clock();
 -
--	/*
--	 * If nmi_watchdog is set to IO_APIC, we need the
--	 * PIT/HPET going.  Otherwise register lapic as a dummy
--	 * device.
--	 */
--	if (nmi_watchdog != NMI_IO_APIC)
--		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
--	else
--		printk(KERN_WARNING "APIC timer registered as dummy,"
--		       " due to nmi_watchdog=1!\n");
+-struct machine_ops machine_ops = {
+-	.power_off = native_machine_power_off,
+-	.shutdown = native_machine_shutdown,
+-	.emergency_restart = native_machine_emergency_restart,
+-	.restart = native_machine_restart,
+-	.halt = native_machine_halt,
+-};
 -
--	setup_APIC_timer();
+-void machine_power_off(void)
+-{
+-	machine_ops.power_off();
 -}
 -
--/*
-- * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
-- * C1E flag only in the secondary CPU, so when we detect the wreckage
-- * we already have enabled the boot CPU local apic timer. Check, if
-- * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
-- * set the DUMMY flag again and force the broadcast mode in the
-- * clockevents layer.
-- */
--void __cpuinit check_boot_apic_timer_broadcast(void)
+-void machine_shutdown(void)
 -{
--	if (!disable_apic_timer ||
--	    (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
--		return;
--
--	printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
--	lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
--
--	local_irq_enable();
--	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
--	local_irq_disable();
+-	machine_ops.shutdown();
 -}
 -
--void __cpuinit setup_secondary_APIC_clock(void)
+-void machine_emergency_restart(void)
 -{
--	check_boot_apic_timer_broadcast();
--	setup_APIC_timer();
+-	machine_ops.emergency_restart();
 -}
 -
--int setup_profiling_timer(unsigned int multiplier)
+-void machine_restart(char *cmd)
 -{
--	return -EINVAL;
+-	machine_ops.restart(cmd);
 -}
 -
--void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
--			     unsigned char msg_type, unsigned char mask)
+-void machine_halt(void)
 -{
--	unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
--	unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
--	apic_write(reg, v);
+-	machine_ops.halt();
 -}
+diff --git a/arch/x86/kernel/reboot_64.c b/arch/x86/kernel/reboot_64.c
+deleted file mode 100644
+index 53620a9..0000000
+--- a/arch/x86/kernel/reboot_64.c
++++ /dev/null
+@@ -1,176 +0,0 @@
+-/* Various gunk just to reboot the machine. */ 
+-#include <linux/module.h>
+-#include <linux/reboot.h>
+-#include <linux/init.h>
+-#include <linux/smp.h>
+-#include <linux/kernel.h>
+-#include <linux/ctype.h>
+-#include <linux/string.h>
+-#include <linux/pm.h>
+-#include <linux/kdebug.h>
+-#include <linux/sched.h>
+-#include <asm/io.h>
+-#include <asm/delay.h>
+-#include <asm/desc.h>
+-#include <asm/hw_irq.h>
+-#include <asm/system.h>
+-#include <asm/pgtable.h>
+-#include <asm/tlbflush.h>
+-#include <asm/apic.h>
+-#include <asm/hpet.h>
+-#include <asm/gart.h>
 -
 -/*
-- * Local timer interrupt handler. It does both profiling and
-- * process statistics/rescheduling.
-- *
-- * We do profiling in every local tick, statistics/rescheduling
-- * happen only every 'profiling multiplier' ticks. The default
-- * multiplier is 1 and it can be changed by writing the new multiplier
-- * value into /proc/profile.
+- * Power off function, if any
 - */
+-void (*pm_power_off)(void);
+-EXPORT_SYMBOL(pm_power_off);
 -
--void smp_local_timer_interrupt(void)
--{
--	int cpu = smp_processor_id();
--	struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
+-static long no_idt[3];
+-static enum { 
+-	BOOT_TRIPLE = 't',
+-	BOOT_KBD = 'k'
+-} reboot_type = BOOT_KBD;
+-static int reboot_mode = 0;
+-int reboot_force;
 -
--	/*
--	 * Normally we should not be here till LAPIC has been initialized but
--	 * in some cases like kdump, its possible that there is a pending LAPIC
--	 * timer interrupt from previous kernel's context and is delivered in
--	 * new kernel the moment interrupts are enabled.
--	 *
--	 * Interrupts are enabled early and LAPIC is setup much later, hence
--	 * its possible that when we get here evt->event_handler is NULL.
--	 * Check for event_handler being NULL and discard the interrupt as
--	 * spurious.
--	 */
--	if (!evt->event_handler) {
--		printk(KERN_WARNING
--		       "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
--		/* Switch it off */
--		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
--		return;
--	}
+-/* reboot=t[riple] | k[bd] [, [w]arm | [c]old]
+-   warm   Don't set the cold reboot flag
+-   cold   Set the cold reboot flag
+-   triple Force a triple fault (init)
+-   kbd    Use the keyboard controller. cold reset (default)
+-   force  Avoid anything that could hang.
+- */ 
+-static int __init reboot_setup(char *str)
+-{
+-	for (;;) {
+-		switch (*str) {
+-		case 'w': 
+-			reboot_mode = 0x1234;
+-			break;
 -
--	/*
--	 * the NMI deadlock-detector uses this.
--	 */
--	add_pda(apic_timer_irqs, 1);
+-		case 'c':
+-			reboot_mode = 0;
+-			break;
 -
--	evt->event_handler(evt);
+-		case 't':
+-		case 'b':
+-		case 'k':
+-			reboot_type = *str;
+-			break;
+-		case 'f':
+-			reboot_force = 1;
+-			break;
+-		}
+-		if((str = strchr(str,',')) != NULL)
+-			str++;
+-		else
+-			break;
+-	}
+-	return 1;
 -}
 -
--/*
-- * Local APIC timer interrupt. This is the most natural way for doing
-- * local interrupts, but local timer interrupts can be emulated by
-- * broadcast interrupts too. [in case the hw doesn't support APIC timers]
-- *
-- * [ if a single-CPU system runs an SMP kernel then we call the local
-- *   interrupt as well. Thus we cannot inline the local irq ... ]
-- */
--void smp_apic_timer_interrupt(struct pt_regs *regs)
+-__setup("reboot=", reboot_setup);
+-
+-static inline void kb_wait(void)
 -{
--	struct pt_regs *old_regs = set_irq_regs(regs);
+-	int i;
 -
--	/*
--	 * NOTE! We'd better ACK the irq immediately,
--	 * because timer handling can be slow.
--	 */
--	ack_APIC_irq();
--	/*
--	 * update_process_times() expects us to have done irq_enter().
--	 * Besides, if we don't timer interrupts ignore the global
--	 * interrupt lock, which is the WrongThing (tm) to do.
--	 */
--	exit_idle();
--	irq_enter();
--	smp_local_timer_interrupt();
--	irq_exit();
--	set_irq_regs(old_regs);
+-	for (i=0; i<0x10000; i++)
+-		if ((inb_p(0x64) & 0x02) == 0)
+-			break;
 -}
 -
- /*
-  * apic_is_clustered_box() -- Check if we can expect good TSC
-  *
-@@ -1103,21 +1180,34 @@ __cpuinit int apic_is_clustered_box(void)
- {
- 	int i, clusters, zeros;
- 	unsigned id;
-+	u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
- 	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
- 
- 	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
- 
- 	for (i = 0; i < NR_CPUS; i++) {
--		id = bios_cpu_apicid[i];
-+		/* are we being called early in kernel startup? */
-+		if (bios_cpu_apicid) {
-+			id = bios_cpu_apicid[i];
-+		}
-+		else if (i < nr_cpu_ids) {
-+			if (cpu_present(i))
-+				id = per_cpu(x86_bios_cpu_apicid, i);
-+			else
-+				continue;
-+		}
-+		else
-+			break;
-+
- 		if (id != BAD_APICID)
- 			__set_bit(APIC_CLUSTERID(id), clustermap);
- 	}
- 
- 	/* Problem:  Partially populated chassis may not have CPUs in some of
- 	 * the APIC clusters they have been allocated.  Only present CPUs have
--	 * bios_cpu_apicid entries, thus causing zeroes in the bitmap.  Since
--	 * clusters are allocated sequentially, count zeros only if they are
--	 * bounded by ones.
-+	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
-+	 * Since clusters are allocated sequentially, count zeros only if
-+	 * they are bounded by ones.
- 	 */
- 	clusters = 0;
- 	zeros = 0;
-@@ -1138,96 +1228,33 @@ __cpuinit int apic_is_clustered_box(void)
- }
- 
- /*
-- * This interrupt should _never_ happen with our APIC/SMP architecture
-- */
--asmlinkage void smp_spurious_interrupt(void)
+-void machine_shutdown(void)
 -{
--	unsigned int v;
--	exit_idle();
--	irq_enter();
--	/*
--	 * Check if this really is a spurious interrupt and ACK it
--	 * if it is a vectored one.  Just in case...
--	 * Spurious interrupts should not be ACKed.
+-	unsigned long flags;
+-
+-	/* Stop the cpus and apics */
+-#ifdef CONFIG_SMP
+-	int reboot_cpu_id;
+-
+-	/* The boot cpu is always logical cpu 0 */
+-	reboot_cpu_id = 0;
+-
+-	/* Make certain the cpu I'm about to reboot on is online */
+-	if (!cpu_isset(reboot_cpu_id, cpu_online_map)) {
+-		reboot_cpu_id = smp_processor_id();
+-	}
+-
+-	/* Make certain I only run on the appropriate processor */
+-	set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
+-
+-	/* O.K Now that I'm on the appropriate processor,
+-	 * stop all of the others.
 -	 */
--	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
--	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
--		ack_APIC_irq();
+-	smp_send_stop();
+-#endif
 -
--	add_pda(irq_spurious_count, 1);
--	irq_exit();
--}
+-	local_irq_save(flags);
 -
--/*
-- * This interrupt should never happen with our APIC/SMP architecture
-+ * APIC command line parameters
-  */
+-#ifndef CONFIG_SMP
+-	disable_local_APIC();
+-#endif
 -
--asmlinkage void smp_error_interrupt(void)
+-	disable_IO_APIC();
+-
+-#ifdef CONFIG_HPET_TIMER
+-	hpet_disable();
+-#endif
+-	local_irq_restore(flags);
+-
+-	pci_iommu_shutdown();
+-}
+-
+-void machine_emergency_restart(void)
 -{
--	unsigned int v, v1;
+-	int i;
 -
--	exit_idle();
--	irq_enter();
--	/* First tickle the hardware, only then report what went on. -- REW */
--	v = apic_read(APIC_ESR);
--	apic_write(APIC_ESR, 0);
--	v1 = apic_read(APIC_ESR);
--	ack_APIC_irq();
--	atomic_inc(&irq_err_count);
+-	/* Tell the BIOS if we want cold or warm reboot */
+-	*((unsigned short *)__va(0x472)) = reboot_mode;
+-       
+-	for (;;) {
+-		/* Could also try the reset bit in the Hammer NB */
+-		switch (reboot_type) { 
+-		case BOOT_KBD:
+-		for (i=0; i<10; i++) {
+-			kb_wait();
+-			udelay(50);
+-			outb(0xfe,0x64);         /* pulse reset low */
+-			udelay(50);
+-		}
 -
--	/* Here is what the APIC error bits mean:
--	   0: Send CS error
--	   1: Receive CS error
--	   2: Send accept error
--	   3: Receive accept error
--	   4: Reserved
--	   5: Send illegal vector
--	   6: Received illegal vector
--	   7: Illegal register address
--	*/
--	printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
--		smp_processor_id(), v , v1);
--	irq_exit();
+-		case BOOT_TRIPLE: 
+-			load_idt((const struct desc_ptr *)&no_idt);
+-			__asm__ __volatile__("int3");
+-
+-			reboot_type = BOOT_KBD;
+-			break;
+-		}      
+-	}      
 -}
 -
--int disable_apic;
+-void machine_restart(char * __unused)
+-{
+-	printk("machine restart\n");
 -
--/*
-- * This initializes the IO-APIC and APIC hardware if this is
-- * a UP kernel.
-- */
--int __init APIC_init_uniprocessor (void)
-+static int __init apic_set_verbosity(char *str)
- {
--	if (disable_apic) {
--		printk(KERN_INFO "Apic disabled\n");
--		return -1;
-+	if (str == NULL)  {
-+		skip_ioapic_setup = 0;
-+		ioapic_force = 1;
-+		return 0;
- 	}
--	if (!cpu_has_apic) {
--		disable_apic = 1;
--		printk(KERN_INFO "Apic disabled by BIOS\n");
--		return -1;
-+	if (strcmp("debug", str) == 0)
-+		apic_verbosity = APIC_DEBUG;
-+	else if (strcmp("verbose", str) == 0)
-+		apic_verbosity = APIC_VERBOSE;
-+	else {
-+		printk(KERN_WARNING "APIC Verbosity level %s not recognised"
-+				" use apic=verbose or apic=debug\n", str);
-+		return -EINVAL;
- 	}
- 
--	verify_local_APIC();
+-	if (!reboot_force) {
+-		machine_shutdown();
+-	}
+-	machine_emergency_restart();
+-}
 -
--	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
--	apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
+-void machine_halt(void)
+-{
+-}
 -
--	setup_local_APIC();
+-void machine_power_off(void)
+-{
+-	if (pm_power_off) {
+-		if (!reboot_force) {
+-			machine_shutdown();
+-		}
+-		pm_power_off();
+-	}
+-}
 -
--	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
--		setup_IO_APIC();
--	else
--		nr_ioapics = 0;
--	setup_boot_APIC_clock();
--	check_nmi_watchdog();
- 	return 0;
- }
-+early_param("apic", apic_set_verbosity);
- 
- static __init int setup_disableapic(char *str)
- {
- 	disable_apic = 1;
--	clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
-+	clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
- 	return 0;
+diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
+index f452726..dec0b5e 100644
+--- a/arch/x86/kernel/reboot_fixups_32.c
++++ b/arch/x86/kernel/reboot_fixups_32.c
+@@ -30,6 +30,19 @@ static void cs5536_warm_reset(struct pci_dev *dev)
+ 	udelay(50); /* shouldn't get here but be safe and spin a while */
  }
- early_param("disableapic", setup_disableapic);
-diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
-index af045ca..d4438ef 100644
---- a/arch/x86/kernel/apm_32.c
-+++ b/arch/x86/kernel/apm_32.c
-@@ -227,6 +227,7 @@
- #include <linux/dmi.h>
- #include <linux/suspend.h>
- #include <linux/kthread.h>
-+#include <linux/jiffies.h>
- 
- #include <asm/system.h>
- #include <asm/uaccess.h>
-@@ -235,8 +236,6 @@
- #include <asm/paravirt.h>
- #include <asm/reboot.h>
- 
--#include "io_ports.h"
--
- #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
- extern int (*console_blank_hook)(int);
- #endif
-@@ -324,7 +323,7 @@ extern int (*console_blank_hook)(int);
- /*
-  * Ignore suspend events for this amount of time after a resume
-  */
--#define DEFAULT_BOUNCE_INTERVAL		(3 * HZ)
-+#define DEFAULT_BOUNCE_INTERVAL	(3 * HZ)
  
- /*
-  * Maximum number of events stored
-@@ -336,7 +335,7 @@ extern int (*console_blank_hook)(int);
-  */
- struct apm_user {
- 	int		magic;
--	struct apm_user *	next;
-+	struct apm_user *next;
- 	unsigned int	suser: 1;
- 	unsigned int	writer: 1;
- 	unsigned int	reader: 1;
-@@ -372,44 +371,44 @@ struct apm_user {
- static struct {
- 	unsigned long	offset;
- 	unsigned short	segment;
--}				apm_bios_entry;
--static int			clock_slowed;
--static int			idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
--static int			idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
--static int			set_pm_idle;
--static int			suspends_pending;
--static int			standbys_pending;
--static int			ignore_sys_suspend;
--static int			ignore_normal_resume;
--static int			bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL;
--
--static int			debug __read_mostly;
--static int			smp __read_mostly;
--static int			apm_disabled = -1;
-+} apm_bios_entry;
-+static int clock_slowed;
-+static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
-+static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
-+static int set_pm_idle;
-+static int suspends_pending;
-+static int standbys_pending;
-+static int ignore_sys_suspend;
-+static int ignore_normal_resume;
-+static int bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL;
++static void rdc321x_reset(struct pci_dev *dev)
++{
++	unsigned i;
++	/* Voluntary reset the watchdog timer */
++	outl(0x80003840, 0xCF8);
++	/* Generate a CPU reset on next tick */
++	i = inl(0xCFC);
++	/* Use the minimum timer resolution */
++	i |= 0x1600;
++	outl(i, 0xCFC);
++	outb(1, 0x92);
++}
 +
-+static int debug __read_mostly;
-+static int smp __read_mostly;
-+static int apm_disabled = -1;
- #ifdef CONFIG_SMP
--static int			power_off;
-+static int power_off;
- #else
--static int			power_off = 1;
-+static int power_off = 1;
- #endif
- #ifdef CONFIG_APM_REAL_MODE_POWER_OFF
--static int			realmode_power_off = 1;
-+static int realmode_power_off = 1;
- #else
--static int			realmode_power_off;
-+static int realmode_power_off;
- #endif
- #ifdef CONFIG_APM_ALLOW_INTS
--static int			allow_ints = 1;
-+static int allow_ints = 1;
- #else
--static int			allow_ints;
-+static int allow_ints;
- #endif
--static int			broken_psr;
-+static int broken_psr;
- 
- static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
- static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
--static struct apm_user *	user_list;
-+static struct apm_user *user_list;
- static DEFINE_SPINLOCK(user_list_lock);
--static const struct desc_struct	bad_bios_desc = { 0, 0x00409200 };
-+static const struct desc_struct	bad_bios_desc = { { { 0, 0x00409200 } } };
- 
--static const char		driver_version[] = "1.16ac";	/* no spaces */
-+static const char driver_version[] = "1.16ac";	/* no spaces */
- 
- static struct task_struct *kapmd_task;
- 
-@@ -417,7 +416,7 @@ static struct task_struct *kapmd_task;
-  *	APM event names taken from the APM 1.2 specification. These are
-  *	the message codes that the BIOS uses to tell us about events
-  */
--static const char *	const apm_event_name[] = {
-+static const char * const apm_event_name[] = {
- 	"system standby",
- 	"system suspend",
- 	"normal resume",
-@@ -435,14 +434,14 @@ static const char *	const apm_event_name[] = {
- 
- typedef struct lookup_t {
- 	int	key;
--	char *	msg;
-+	char 	*msg;
- } lookup_t;
+ struct device_fixup {
+ 	unsigned int vendor;
+ 	unsigned int device;
+@@ -40,6 +53,7 @@ static struct device_fixup fixups_table[] = {
+ { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset },
+ { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset },
++{ PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030, rdc321x_reset },
+ };
  
  /*
-  *	The BIOS returns a set of standard error codes in AX when the
-  *	carry flag is set.
-  */
-- 
+diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
+new file mode 100644
+index 0000000..eb9b1a1
+--- /dev/null
++++ b/arch/x86/kernel/rtc.c
+@@ -0,0 +1,204 @@
++/*
++ * RTC related functions
++ */
++#include <linux/acpi.h>
++#include <linux/bcd.h>
++#include <linux/mc146818rtc.h>
 +
- static const lookup_t error_table[] = {
- /* N/A	{ APM_SUCCESS,		"Operation succeeded" }, */
- 	{ APM_DISABLED,		"Power management disabled" },
-@@ -472,24 +471,25 @@ static const lookup_t error_table[] = {
-  *	Write a meaningful log entry to the kernel log in the event of
-  *	an APM error.
-  */
-- 
++#include <asm/time.h>
++#include <asm/vsyscall.h>
 +
- static void apm_error(char *str, int err)
- {
--	int	i;
-+	int i;
- 
- 	for (i = 0; i < ERROR_COUNT; i++)
--		if (error_table[i].key == err) break;
-+		if (error_table[i].key == err)
-+			break;
- 	if (i < ERROR_COUNT)
- 		printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg);
- 	else
- 		printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n",
--			str, err);
-+		       str, err);
- }
- 
- /*
-  * Lock APM functionality to physical CPU 0
-  */
-- 
++#ifdef CONFIG_X86_32
++# define CMOS_YEARS_OFFS 1900
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with.  It is required for NMI access to the
++ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
++#else
++/*
++ * x86-64 systems only exists since 2002.
++ * This will work up to Dec 31, 2100
++ */
++# define CMOS_YEARS_OFFS 2000
++#endif
 +
- #ifdef CONFIG_SMP
- 
- static cpumask_t apm_save_cpus(void)
-@@ -511,7 +511,7 @@ static inline void apm_restore_cpus(cpumask_t mask)
- /*
-  *	No CPU lockdown needed on a uniprocessor
-  */
-- 
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
 +
- #define apm_save_cpus()		(current->cpus_allowed)
- #define apm_restore_cpus(x)	(void)(x)
- 
-@@ -590,7 +590,7 @@ static inline void apm_irq_restore(unsigned long flags)
-  *	code is returned in AH (bits 8-15 of eax) and this function
-  *	returns non-zero.
-  */
-- 
++/*
++ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
++ * called 500 ms after the second nowtime has started, because when
++ * nowtime is written into the registers of the CMOS clock, it will
++ * jump to the next second precisely 500 ms later. Check the Motorola
++ * MC146818A or Dallas DS12887 data sheet for details.
++ *
++ * BUG: This routine does not handle hour overflow properly; it just
++ *      sets the minutes. Usually you'll only notice that after reboot!
++ */
++int mach_set_rtc_mmss(unsigned long nowtime)
++{
++	int retval = 0;
++	int real_seconds, real_minutes, cmos_minutes;
++	unsigned char save_control, save_freq_select;
 +
- static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
- 	u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi)
- {
-@@ -602,7 +602,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
- 	struct desc_struct	*gdt;
- 
- 	cpus = apm_save_cpus();
--	
++	 /* tell the clock it's being set */
++	save_control = CMOS_READ(RTC_CONTROL);
++	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
 +
- 	cpu = get_cpu();
- 	gdt = get_cpu_gdt_table(cpu);
- 	save_desc_40 = gdt[0x40 / 8];
-@@ -616,7 +616,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
- 	gdt[0x40 / 8] = save_desc_40;
- 	put_cpu();
- 	apm_restore_cpus(cpus);
--	
++	/* stop and reset prescaler */
++	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
 +
- 	return *eax & 0xff;
- }
- 
-@@ -645,7 +645,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
- 	struct desc_struct	*gdt;
- 
- 	cpus = apm_save_cpus();
--	
++	cmos_minutes = CMOS_READ(RTC_MINUTES);
++	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
++		BCD_TO_BIN(cmos_minutes);
 +
- 	cpu = get_cpu();
- 	gdt = get_cpu_gdt_table(cpu);
- 	save_desc_40 = gdt[0x40 / 8];
-@@ -680,7 +680,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
- 
- static int apm_driver_version(u_short *val)
- {
--	u32	eax;
-+	u32 eax;
- 
- 	if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax))
- 		return (eax >> 8) & 0xff;
-@@ -704,16 +704,16 @@ static int apm_driver_version(u_short *val)
-  *	that APM 1.2 is in use. If no messges are pending the value 0x80
-  *	is returned (No power management events pending).
-  */
-- 
++	/*
++	 * since we're only adjusting minutes and seconds,
++	 * don't interfere with hour overflow. This avoids
++	 * messing with unknown time zones but requires your
++	 * RTC not to be off by more than 15 minutes
++	 */
++	real_seconds = nowtime % 60;
++	real_minutes = nowtime / 60;
++	/* correct for half hour time zone */
++	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
++		real_minutes += 30;
++	real_minutes %= 60;
 +
- static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
- {
--	u32	eax;
--	u32	ebx;
--	u32	ecx;
--	u32	dummy;
-+	u32 eax;
-+	u32 ebx;
-+	u32 ecx;
-+	u32 dummy;
- 
- 	if (apm_bios_call(APM_FUNC_GET_EVENT, 0, 0, &eax, &ebx, &ecx,
--			&dummy, &dummy))
-+			  &dummy, &dummy))
- 		return (eax >> 8) & 0xff;
- 	*event = ebx;
- 	if (apm_info.connection_version < 0x0102)
-@@ -736,10 +736,10 @@ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
-  *	The state holds the state to transition to, which may in fact
-  *	be an acceptance of a BIOS requested state change.
-  */
-- 
++	if (abs(real_minutes - cmos_minutes) < 30) {
++		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++			BIN_TO_BCD(real_seconds);
++			BIN_TO_BCD(real_minutes);
++		}
++		CMOS_WRITE(real_seconds,RTC_SECONDS);
++		CMOS_WRITE(real_minutes,RTC_MINUTES);
++	} else {
++		printk(KERN_WARNING
++		       "set_rtc_mmss: can't update from %d to %d\n",
++		       cmos_minutes, real_minutes);
++		retval = -1;
++	}
 +
- static int set_power_state(u_short what, u_short state)
- {
--	u32	eax;
-+	u32 eax;
- 
- 	if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax))
- 		return (eax >> 8) & 0xff;
-@@ -752,7 +752,7 @@ static int set_power_state(u_short what, u_short state)
-  *
-  *	Transition the entire system into a new APM power state.
-  */
-- 
++	/* The following flags have to be released exactly in this order,
++	 * otherwise the DS12887 (popular MC146818A clone with integrated
++	 * battery and quartz) will not reset the oscillator and will not
++	 * update precisely 500 ms later. You won't find this mentioned in
++	 * the Dallas Semiconductor data sheets, but who believes data
++	 * sheets anyway ...                           -- Markus Kuhn
++	 */
++	CMOS_WRITE(save_control, RTC_CONTROL);
++	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
 +
- static int set_system_power_state(u_short state)
- {
- 	return set_power_state(APM_DEVICE_ALL, state);
-@@ -766,13 +766,13 @@ static int set_system_power_state(u_short state)
-  *	to handle the idle request. On a success the function returns 1
-  *	if the BIOS did clock slowing or 0 otherwise.
-  */
-- 
++	return retval;
++}
 +
- static int apm_do_idle(void)
- {
--	u32	eax;
--	u8	ret = 0;
--	int	idled = 0;
--	int	polling;
-+	u32 eax;
-+	u8 ret = 0;
-+	int idled = 0;
-+	int polling;
- 
- 	polling = !!(current_thread_info()->status & TS_POLLING);
- 	if (polling) {
-@@ -799,10 +799,9 @@ static int apm_do_idle(void)
- 		/* This always fails on some SMP boards running UP kernels.
- 		 * Only report the failure the first 5 times.
- 		 */
--		if (++t < 5)
--		{
-+		if (++t < 5) {
- 			printk(KERN_DEBUG "apm_do_idle failed (%d)\n",
--					(eax >> 8) & 0xff);
-+			       (eax >> 8) & 0xff);
- 			t = jiffies;
- 		}
- 		return -1;
-@@ -814,15 +813,15 @@ static int apm_do_idle(void)
- /**
-  *	apm_do_busy	-	inform the BIOS the CPU is busy
-  *
-- *	Request that the BIOS brings the CPU back to full performance. 
-+ *	Request that the BIOS brings the CPU back to full performance.
-  */
-- 
++unsigned long mach_get_cmos_time(void)
++{
++	unsigned int year, mon, day, hour, min, sec, century = 0;
 +
- static void apm_do_busy(void)
- {
--	u32	dummy;
-+	u32 dummy;
- 
- 	if (clock_slowed || ALWAYS_CALL_BUSY) {
--		(void) apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy);
-+		(void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy);
- 		clock_slowed = 0;
- 	}
- }
-@@ -833,15 +832,15 @@ static void apm_do_busy(void)
-  * power management - we probably want
-  * to conserve power.
-  */
--#define IDLE_CALC_LIMIT   (HZ * 100)
--#define IDLE_LEAKY_MAX    16
-+#define IDLE_CALC_LIMIT	(HZ * 100)
-+#define IDLE_LEAKY_MAX	16
- 
- static void (*original_pm_idle)(void) __read_mostly;
- 
- /**
-  * apm_cpu_idle		-	cpu idling for APM capable Linux
-  *
-- * This is the idling function the kernel executes when APM is available. It 
-+ * This is the idling function the kernel executes when APM is available. It
-  * tries to do BIOS powermanagement based on the average system idle time.
-  * Furthermore it calls the system default idle routine.
-  */
-@@ -882,7 +881,8 @@ recalc:
- 
- 			t = jiffies;
- 			switch (apm_do_idle()) {
--			case 0: apm_idle_done = 1;
-+			case 0:
-+				apm_idle_done = 1;
- 				if (t != jiffies) {
- 					if (bucket) {
- 						bucket = IDLE_LEAKY_MAX;
-@@ -893,7 +893,8 @@ recalc:
- 					continue;
- 				}
- 				break;
--			case 1: apm_idle_done = 1;
-+			case 1:
-+				apm_idle_done = 1;
- 				break;
- 			default: /* BIOS refused */
- 				break;
-@@ -921,10 +922,10 @@ recalc:
-  *	the SMP call on CPU0 as some systems will only honour this call
-  *	on their first cpu.
-  */
-- 
++	/*
++	 * If UIP is clear, then we have >= 244 microseconds before
++	 * RTC registers will be updated.  Spec sheet says that this
++	 * is the reliable way to read RTC - registers. If UIP is set
++	 * then the register access might be invalid.
++	 */
++	while ((CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
++		cpu_relax();
 +
- static void apm_power_off(void)
- {
--	unsigned char	po_bios_call[] = {
-+	unsigned char po_bios_call[] = {
- 		0xb8, 0x00, 0x10,	/* movw  $0x1000,ax  */
- 		0x8e, 0xd0,		/* movw  ax,ss       */
- 		0xbc, 0x00, 0xf0,	/* movw  $0xf000,sp  */
-@@ -935,13 +936,12 @@ static void apm_power_off(void)
- 	};
- 
- 	/* Some bioses don't like being called from CPU != 0 */
--	if (apm_info.realmode_power_off)
--	{
-+	if (apm_info.realmode_power_off) {
- 		(void)apm_save_cpus();
- 		machine_real_restart(po_bios_call, sizeof(po_bios_call));
++	sec = CMOS_READ(RTC_SECONDS);
++	min = CMOS_READ(RTC_MINUTES);
++	hour = CMOS_READ(RTC_HOURS);
++	day = CMOS_READ(RTC_DAY_OF_MONTH);
++	mon = CMOS_READ(RTC_MONTH);
++	year = CMOS_READ(RTC_YEAR);
++
++#if defined(CONFIG_ACPI) && defined(CONFIG_X86_64)
++	/* CHECKME: Is this really 64bit only ??? */
++	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
++	    acpi_gbl_FADT.century)
++		century = CMOS_READ(acpi_gbl_FADT.century);
++#endif
++
++	if (RTC_ALWAYS_BCD || !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)) {
++		BCD_TO_BIN(sec);
++		BCD_TO_BIN(min);
++		BCD_TO_BIN(hour);
++		BCD_TO_BIN(day);
++		BCD_TO_BIN(mon);
++		BCD_TO_BIN(year);
++	}
++
++	if (century) {
++		BCD_TO_BIN(century);
++		year += century * 100;
++		printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
 +	} else {
-+		(void)set_system_power_state(APM_STATE_OFF);
- 	}
--	else
--		(void) set_system_power_state(APM_STATE_OFF);
- }
- 
- #ifdef CONFIG_APM_DO_ENABLE
-@@ -950,17 +950,17 @@ static void apm_power_off(void)
-  *	apm_enable_power_management - enable BIOS APM power management
-  *	@enable: enable yes/no
-  *
-- *	Enable or disable the APM BIOS power services. 
-+ *	Enable or disable the APM BIOS power services.
-  */
-- 
++		year += CMOS_YEARS_OFFS;
++		if (year < 1970)
++			year += 100;
++	}
 +
- static int apm_enable_power_management(int enable)
- {
--	u32	eax;
-+	u32 eax;
- 
- 	if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED))
- 		return APM_NOT_ENGAGED;
- 	if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL,
--			enable, &eax))
-+				 enable, &eax))
- 		return (eax >> 8) & 0xff;
- 	if (enable)
- 		apm_info.bios.flags &= ~APM_BIOS_DISABLED;
-@@ -983,19 +983,19 @@ static int apm_enable_power_management(int enable)
-  *	if reported is a lifetime in secodnds/minutes at current powwer
-  *	consumption.
-  */
-- 
++	return mktime(year, mon, day, hour, min, sec);
++}
 +
- static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
- {
--	u32	eax;
--	u32	ebx;
--	u32	ecx;
--	u32	edx;
--	u32	dummy;
-+	u32 eax;
-+	u32 ebx;
-+	u32 ecx;
-+	u32 edx;
-+	u32 dummy;
- 
- 	if (apm_info.get_power_status_broken)
- 		return APM_32_UNSUPPORTED;
- 	if (apm_bios_call(APM_FUNC_GET_STATUS, APM_DEVICE_ALL, 0,
--			&eax, &ebx, &ecx, &edx, &dummy))
-+			  &eax, &ebx, &ecx, &edx, &dummy))
- 		return (eax >> 8) & 0xff;
- 	*status = ebx;
- 	*bat = ecx;
-@@ -1011,11 +1011,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
- static int apm_get_battery_status(u_short which, u_short *status,
- 				  u_short *bat, u_short *life, u_short *nbat)
- {
--	u32	eax;
--	u32	ebx;
--	u32	ecx;
--	u32	edx;
--	u32	esi;
-+	u32 eax;
-+	u32 ebx;
-+	u32 ecx;
-+	u32 edx;
-+	u32 esi;
- 
- 	if (apm_info.connection_version < 0x0102) {
- 		/* pretend we only have one battery. */
-@@ -1026,7 +1026,7 @@ static int apm_get_battery_status(u_short which, u_short *status,
- 	}
- 
- 	if (apm_bios_call(APM_FUNC_GET_STATUS, (0x8000 | (which)), 0, &eax,
--			&ebx, &ecx, &edx, &esi))
-+			  &ebx, &ecx, &edx, &esi))
- 		return (eax >> 8) & 0xff;
- 	*status = ebx;
- 	*bat = ecx;
-@@ -1044,10 +1044,10 @@ static int apm_get_battery_status(u_short which, u_short *status,
-  *	Activate or deactive power management on either a specific device
-  *	or the entire system (%APM_DEVICE_ALL).
-  */
-- 
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++	unsigned char val;
 +
- static int apm_engage_power_management(u_short device, int enable)
- {
--	u32	eax;
-+	u32 eax;
- 
- 	if ((enable == 0) && (device == APM_DEVICE_ALL)
- 	    && (apm_info.bios.flags & APM_BIOS_DISABLED))
-@@ -1074,7 +1074,7 @@ static int apm_engage_power_management(u_short device, int enable)
-  *	all video devices. Typically the BIOS will do laptop backlight and
-  *	monitor powerdown for us.
-  */
-- 
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	val = inb_p(RTC_PORT(1));
++	lock_cmos_suffix(addr);
++	return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
 +
- static int apm_console_blank(int blank)
- {
- 	int error = APM_NOT_ENGAGED; /* silence gcc */
-@@ -1126,7 +1126,7 @@ static apm_event_t get_queued_event(struct apm_user *as)
- 
- static void queue_event(apm_event_t event, struct apm_user *sender)
- {
--	struct apm_user *	as;
-+	struct apm_user *as;
- 
- 	spin_lock(&user_list_lock);
- 	if (user_list == NULL)
-@@ -1174,11 +1174,11 @@ static void reinit_timer(void)
- 
- 	spin_lock_irqsave(&i8253_lock, flags);
- 	/* set the clock to HZ */
--	outb_p(0x34, PIT_MODE);		/* binary, mode 2, LSB/MSB, ch 0 */
-+	outb_pit(0x34, PIT_MODE);		/* binary, mode 2, LSB/MSB, ch 0 */
- 	udelay(10);
--	outb_p(LATCH & 0xff, PIT_CH0);	/* LSB */
-+	outb_pit(LATCH & 0xff, PIT_CH0);	/* LSB */
- 	udelay(10);
--	outb(LATCH >> 8, PIT_CH0);	/* MSB */
-+	outb_pit(LATCH >> 8, PIT_CH0);	/* MSB */
- 	udelay(10);
- 	spin_unlock_irqrestore(&i8253_lock, flags);
- #endif
-@@ -1186,7 +1186,7 @@ static void reinit_timer(void)
- 
- static int suspend(int vetoable)
- {
--	int		err;
-+	int err;
- 	struct apm_user	*as;
- 
- 	if (pm_send_all(PM_SUSPEND, (void *)3)) {
-@@ -1239,7 +1239,7 @@ static int suspend(int vetoable)
- 
- static void standby(void)
- {
--	int	err;
-+	int err;
- 
- 	local_irq_disable();
- 	device_power_down(PMSG_SUSPEND);
-@@ -1256,8 +1256,8 @@ static void standby(void)
- 
- static apm_event_t get_event(void)
- {
--	int		error;
--	apm_event_t	event = APM_NO_EVENTS; /* silence gcc */
-+	int error;
-+	apm_event_t event = APM_NO_EVENTS; /* silence gcc */
- 	apm_eventinfo_t	info;
- 
- 	static int notified;
-@@ -1275,9 +1275,9 @@ static apm_event_t get_event(void)
- 
- static void check_events(void)
- {
--	apm_event_t		event;
--	static unsigned long	last_resume;
--	static int		ignore_bounce;
-+	apm_event_t event;
-+	static unsigned long last_resume;
-+	static int ignore_bounce;
- 
- 	while ((event = get_event()) != 0) {
- 		if (debug) {
-@@ -1289,7 +1289,7 @@ static void check_events(void)
- 				       "event 0x%02x\n", event);
- 		}
- 		if (ignore_bounce
--		    && ((jiffies - last_resume) > bounce_interval))
-+		    && (time_after(jiffies, last_resume + bounce_interval)))
- 			ignore_bounce = 0;
- 
- 		switch (event) {
-@@ -1357,7 +1357,7 @@ static void check_events(void)
- 			/*
- 			 * We are not allowed to reject a critical suspend.
- 			 */
--			(void) suspend(0);
-+			(void)suspend(0);
- 			break;
- 		}
- 	}
-@@ -1365,12 +1365,12 @@ static void check_events(void)
- 
- static void apm_event_handler(void)
- {
--	static int	pending_count = 4;
--	int		err;
-+	static int pending_count = 4;
-+	int err;
- 
- 	if ((standbys_pending > 0) || (suspends_pending > 0)) {
- 		if ((apm_info.connection_version > 0x100) &&
--				(pending_count-- <= 0)) {
-+		    (pending_count-- <= 0)) {
- 			pending_count = 4;
- 			if (debug)
- 				printk(KERN_DEBUG "apm: setting state busy\n");
-@@ -1418,9 +1418,9 @@ static int check_apm_user(struct apm_user *as, const char *func)
- 
- static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
- {
--	struct apm_user *	as;
--	int			i;
--	apm_event_t		event;
-+	struct apm_user *as;
-+	int i;
-+	apm_event_t event;
- 
- 	as = fp->private_data;
- 	if (check_apm_user(as, "read"))
-@@ -1459,9 +1459,9 @@ static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *
- 	return 0;
- }
- 
--static unsigned int do_poll(struct file *fp, poll_table * wait)
-+static unsigned int do_poll(struct file *fp, poll_table *wait)
- {
--	struct apm_user * as;
-+	struct apm_user *as;
- 
- 	as = fp->private_data;
- 	if (check_apm_user(as, "poll"))
-@@ -1472,10 +1472,10 @@ static unsigned int do_poll(struct file *fp, poll_table * wait)
- 	return 0;
- }
- 
--static int do_ioctl(struct inode * inode, struct file *filp,
-+static int do_ioctl(struct inode *inode, struct file *filp,
- 		    u_int cmd, u_long arg)
- {
--	struct apm_user *	as;
-+	struct apm_user *as;
- 
- 	as = filp->private_data;
- 	if (check_apm_user(as, "ioctl"))
-@@ -1515,9 +1515,9 @@ static int do_ioctl(struct inode * inode, struct file *filp,
- 	return 0;
- }
- 
--static int do_release(struct inode * inode, struct file * filp)
-+static int do_release(struct inode *inode, struct file *filp)
- {
--	struct apm_user *	as;
-+	struct apm_user *as;
- 
- 	as = filp->private_data;
- 	if (check_apm_user(as, "release"))
-@@ -1533,11 +1533,11 @@ static int do_release(struct inode * inode, struct file * filp)
- 		if (suspends_pending <= 0)
- 			(void) suspend(1);
- 	}
--  	spin_lock(&user_list_lock);
-+	spin_lock(&user_list_lock);
- 	if (user_list == as)
- 		user_list = as->next;
- 	else {
--		struct apm_user *	as1;
-+		struct apm_user *as1;
- 
- 		for (as1 = user_list;
- 		     (as1 != NULL) && (as1->next != as);
-@@ -1553,9 +1553,9 @@ static int do_release(struct inode * inode, struct file * filp)
- 	return 0;
- }
- 
--static int do_open(struct inode * inode, struct file * filp)
-+static int do_open(struct inode *inode, struct file *filp)
- {
--	struct apm_user *	as;
-+	struct apm_user *as;
- 
- 	as = kmalloc(sizeof(*as), GFP_KERNEL);
- 	if (as == NULL) {
-@@ -1569,7 +1569,7 @@ static int do_open(struct inode * inode, struct file * filp)
- 	as->suspends_read = as->standbys_read = 0;
- 	/*
- 	 * XXX - this is a tiny bit broken, when we consider BSD
--         * process accounting. If the device is opened by root, we
-+	 * process accounting. If the device is opened by root, we
- 	 * instantly flag that we used superuser privs. Who knows,
- 	 * we might close the device immediately without doing a
- 	 * privileged operation -- cevans
-@@ -1652,16 +1652,16 @@ static int proc_apm_show(struct seq_file *m, void *v)
- 	   8) min = minutes; sec = seconds */
- 
- 	seq_printf(m, "%s %d.%d 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
--		     driver_version,
--		     (apm_info.bios.version >> 8) & 0xff,
--		     apm_info.bios.version & 0xff,
--		     apm_info.bios.flags,
--		     ac_line_status,
--		     battery_status,
--		     battery_flag,
--		     percentage,
--		     time_units,
--		     units);
-+		   driver_version,
-+		   (apm_info.bios.version >> 8) & 0xff,
-+		   apm_info.bios.version & 0xff,
-+		   apm_info.bios.flags,
-+		   ac_line_status,
-+		   battery_status,
-+		   battery_flag,
-+		   percentage,
-+		   time_units,
-+		   units);
- 	return 0;
- }
- 
-@@ -1684,8 +1684,8 @@ static int apm(void *unused)
- 	unsigned short	cx;
- 	unsigned short	dx;
- 	int		error;
--	char *		power_stat;
--	char *		bat_stat;
-+	char 		*power_stat;
-+	char 		*bat_stat;
- 
- #ifdef CONFIG_SMP
- 	/* 2002/08/01 - WT
-@@ -1744,23 +1744,41 @@ static int apm(void *unused)
- 		}
- 	}
- 
--	if (debug && (num_online_cpus() == 1 || smp )) {
-+	if (debug && (num_online_cpus() == 1 || smp)) {
- 		error = apm_get_power_status(&bx, &cx, &dx);
- 		if (error)
- 			printk(KERN_INFO "apm: power status not available\n");
- 		else {
- 			switch ((bx >> 8) & 0xff) {
--			case 0: power_stat = "off line"; break;
--			case 1: power_stat = "on line"; break;
--			case 2: power_stat = "on backup power"; break;
--			default: power_stat = "unknown"; break;
-+			case 0:
-+				power_stat = "off line";
-+				break;
-+			case 1:
-+				power_stat = "on line";
-+				break;
-+			case 2:
-+				power_stat = "on backup power";
-+				break;
-+			default:
-+				power_stat = "unknown";
-+				break;
- 			}
- 			switch (bx & 0xff) {
--			case 0: bat_stat = "high"; break;
--			case 1: bat_stat = "low"; break;
--			case 2: bat_stat = "critical"; break;
--			case 3: bat_stat = "charging"; break;
--			default: bat_stat = "unknown"; break;
-+			case 0:
-+				bat_stat = "high";
-+				break;
-+			case 1:
-+				bat_stat = "low";
-+				break;
-+			case 2:
-+				bat_stat = "critical";
-+				break;
-+			case 3:
-+				bat_stat = "charging";
-+				break;
-+			default:
-+				bat_stat = "unknown";
-+				break;
- 			}
- 			printk(KERN_INFO
- 			       "apm: AC %s, battery status %s, battery life ",
-@@ -1777,8 +1795,8 @@ static int apm(void *unused)
- 					printk("unknown\n");
- 				else
- 					printk("%d %s\n", dx & 0x7fff,
--						(dx & 0x8000) ?
--						"minutes" : "seconds");
-+					       (dx & 0x8000) ?
-+					       "minutes" : "seconds");
- 			}
- 		}
- 	}
-@@ -1803,7 +1821,7 @@ static int apm(void *unused)
- #ifndef MODULE
- static int __init apm_setup(char *str)
- {
--	int	invert;
-+	int invert;
- 
- 	while ((str != NULL) && (*str != '\0')) {
- 		if (strncmp(str, "off", 3) == 0)
-@@ -1828,14 +1846,13 @@ static int __init apm_setup(char *str)
- 		if ((strncmp(str, "power-off", 9) == 0) ||
- 		    (strncmp(str, "power_off", 9) == 0))
- 			power_off = !invert;
--		if (strncmp(str, "smp", 3) == 0)
--		{
-+		if (strncmp(str, "smp", 3) == 0) {
- 			smp = !invert;
- 			idle_threshold = 100;
- 		}
- 		if ((strncmp(str, "allow-ints", 10) == 0) ||
- 		    (strncmp(str, "allow_ints", 10) == 0))
-- 			apm_info.allow_ints = !invert;
-+			apm_info.allow_ints = !invert;
- 		if ((strncmp(str, "broken-psr", 10) == 0) ||
- 		    (strncmp(str, "broken_psr", 10) == 0))
- 			apm_info.get_power_status_broken = !invert;
-@@ -1881,7 +1898,8 @@ static int __init print_if_true(const struct dmi_system_id *d)
-  */
- static int __init broken_ps2_resume(const struct dmi_system_id *d)
- {
--	printk(KERN_INFO "%s machine detected. Mousepad Resume Bug workaround hopefully not needed.\n", d->ident);
-+	printk(KERN_INFO "%s machine detected. Mousepad Resume Bug "
-+	       "workaround hopefully not needed.\n", d->ident);
- 	return 0;
- }
- 
-@@ -1890,7 +1908,8 @@ static int __init set_realmode_power_off(const struct dmi_system_id *d)
- {
- 	if (apm_info.realmode_power_off == 0) {
- 		apm_info.realmode_power_off = 1;
--		printk(KERN_INFO "%s bios detected. Using realmode poweroff only.\n", d->ident);
-+		printk(KERN_INFO "%s bios detected. "
-+		       "Using realmode poweroff only.\n", d->ident);
- 	}
- 	return 0;
- }
-@@ -1900,7 +1919,8 @@ static int __init set_apm_ints(const struct dmi_system_id *d)
- {
- 	if (apm_info.allow_ints == 0) {
- 		apm_info.allow_ints = 1;
--		printk(KERN_INFO "%s machine detected. Enabling interrupts during APM calls.\n", d->ident);
-+		printk(KERN_INFO "%s machine detected. "
-+		       "Enabling interrupts during APM calls.\n", d->ident);
- 	}
- 	return 0;
- }
-@@ -1910,7 +1930,8 @@ static int __init apm_is_horked(const struct dmi_system_id *d)
- {
- 	if (apm_info.disabled == 0) {
- 		apm_info.disabled = 1;
--		printk(KERN_INFO "%s machine detected. Disabling APM.\n", d->ident);
-+		printk(KERN_INFO "%s machine detected. "
-+		       "Disabling APM.\n", d->ident);
- 	}
- 	return 0;
- }
-@@ -1919,7 +1940,8 @@ static int __init apm_is_horked_d850md(const struct dmi_system_id *d)
- {
- 	if (apm_info.disabled == 0) {
- 		apm_info.disabled = 1;
--		printk(KERN_INFO "%s machine detected. Disabling APM.\n", d->ident);
-+		printk(KERN_INFO "%s machine detected. "
-+		       "Disabling APM.\n", d->ident);
- 		printk(KERN_INFO "This bug is fixed in bios P15 which is available for \n");
- 		printk(KERN_INFO "download from support.intel.com \n");
- 	}
-@@ -1931,7 +1953,8 @@ static int __init apm_likes_to_melt(const struct dmi_system_id *d)
- {
- 	if (apm_info.forbid_idle == 0) {
- 		apm_info.forbid_idle = 1;
--		printk(KERN_INFO "%s machine detected. Disabling APM idle calls.\n", d->ident);
-+		printk(KERN_INFO "%s machine detected. "
-+		       "Disabling APM idle calls.\n", d->ident);
- 	}
- 	return 0;
- }
-@@ -1954,7 +1977,8 @@ static int __init apm_likes_to_melt(const struct dmi_system_id *d)
- static int __init broken_apm_power(const struct dmi_system_id *d)
- {
- 	apm_info.get_power_status_broken = 1;
--	printk(KERN_WARNING "BIOS strings suggest APM bugs, disabling power status reporting.\n");
-+	printk(KERN_WARNING "BIOS strings suggest APM bugs, "
-+	       "disabling power status reporting.\n");
- 	return 0;
- }
- 
-@@ -1965,7 +1989,8 @@ static int __init broken_apm_power(const struct dmi_system_id *d)
- static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
- {
- 	apm_info.get_power_status_swabinminutes = 1;
--	printk(KERN_WARNING "BIOS strings suggest APM reports battery life in minutes and wrong byte order.\n");
-+	printk(KERN_WARNING "BIOS strings suggest APM reports battery life "
-+	       "in minutes and wrong byte order.\n");
- 	return 0;
- }
- 
-@@ -1990,8 +2015,8 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
- 		apm_is_horked, "Dell Inspiron 2500",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"),
--			DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
--			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
-+			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-+			DMI_MATCH(DMI_BIOS_VERSION, "A11"), },
- 	},
- 	{	/* Allow interrupts during suspend on Dell Inspiron laptops*/
- 		set_apm_ints, "Dell Inspiron", {
-@@ -2014,15 +2039,15 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
- 		apm_is_horked, "Dell Dimension 4100",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"),
--			DMI_MATCH(DMI_BIOS_VENDOR,"Intel Corp."),
--			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
-+			DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."),
-+			DMI_MATCH(DMI_BIOS_VERSION, "A11"), },
- 	},
- 	{	/* Allow interrupts during suspend on Compaq Laptops*/
- 		set_apm_ints, "Compaq 12XL125",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "Compaq PC"),
- 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
--			DMI_MATCH(DMI_BIOS_VERSION,"4.06"), },
-+			DMI_MATCH(DMI_BIOS_VERSION, "4.06"), },
- 	},
- 	{	/* Allow interrupts during APM or the clock goes slow */
- 		set_apm_ints, "ASUSTeK",
-@@ -2064,15 +2089,15 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
- 		apm_is_horked, "Sharp PC-PJ/AX",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "PC-PJ/AX"),
--			DMI_MATCH(DMI_BIOS_VENDOR,"SystemSoft"),
--			DMI_MATCH(DMI_BIOS_VERSION,"Version R2.08"), },
-+			DMI_MATCH(DMI_BIOS_VENDOR, "SystemSoft"),
-+			DMI_MATCH(DMI_BIOS_VERSION, "Version R2.08"), },
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Dell Inspiron 2500",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"),
--			DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
--			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
-+			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-+			DMI_MATCH(DMI_BIOS_VERSION, "A11"), },
- 	},
- 	{	/* APM idle hangs */
- 		apm_likes_to_melt, "Jabil AMD",
-@@ -2203,11 +2228,11 @@ static int __init apm_init(void)
- 		return -ENODEV;
- 	}
- 	printk(KERN_INFO
--		"apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n",
--		((apm_info.bios.version >> 8) & 0xff),
--		(apm_info.bios.version & 0xff),
--		apm_info.bios.flags,
--		driver_version);
-+	       "apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n",
-+	       ((apm_info.bios.version >> 8) & 0xff),
-+	       (apm_info.bios.version & 0xff),
-+	       apm_info.bios.flags,
-+	       driver_version);
- 	if ((apm_info.bios.flags & APM_32_BIT_SUPPORT) == 0) {
- 		printk(KERN_INFO "apm: no 32 bit BIOS support\n");
- 		return -ENODEV;
-@@ -2312,9 +2337,9 @@ static int __init apm_init(void)
- 	}
- 	wake_up_process(kapmd_task);
- 
--	if (num_online_cpus() > 1 && !smp ) {
-+	if (num_online_cpus() > 1 && !smp) {
- 		printk(KERN_NOTICE
--		   "apm: disabled - APM is not SMP safe (power off active).\n");
-+		       "apm: disabled - APM is not SMP safe (power off active).\n");
- 		return 0;
- 	}
- 
-@@ -2339,7 +2364,7 @@ static int __init apm_init(void)
- 
- static void __exit apm_exit(void)
- {
--	int	error;
-+	int error;
- 
- 	if (set_pm_idle) {
- 		pm_idle = original_pm_idle;
-diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
-index 0e45981..afd8446 100644
---- a/arch/x86/kernel/asm-offsets_32.c
-+++ b/arch/x86/kernel/asm-offsets_32.c
-@@ -38,15 +38,15 @@ void foo(void);
- 
- void foo(void)
- {
--	OFFSET(SIGCONTEXT_eax, sigcontext, eax);
--	OFFSET(SIGCONTEXT_ebx, sigcontext, ebx);
--	OFFSET(SIGCONTEXT_ecx, sigcontext, ecx);
--	OFFSET(SIGCONTEXT_edx, sigcontext, edx);
--	OFFSET(SIGCONTEXT_esi, sigcontext, esi);
--	OFFSET(SIGCONTEXT_edi, sigcontext, edi);
--	OFFSET(SIGCONTEXT_ebp, sigcontext, ebp);
--	OFFSET(SIGCONTEXT_esp, sigcontext, esp);
--	OFFSET(SIGCONTEXT_eip, sigcontext, eip);
-+	OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
-+	OFFSET(IA32_SIGCONTEXT_bx, sigcontext, bx);
-+	OFFSET(IA32_SIGCONTEXT_cx, sigcontext, cx);
-+	OFFSET(IA32_SIGCONTEXT_dx, sigcontext, dx);
-+	OFFSET(IA32_SIGCONTEXT_si, sigcontext, si);
-+	OFFSET(IA32_SIGCONTEXT_di, sigcontext, di);
-+	OFFSET(IA32_SIGCONTEXT_bp, sigcontext, bp);
-+	OFFSET(IA32_SIGCONTEXT_sp, sigcontext, sp);
-+	OFFSET(IA32_SIGCONTEXT_ip, sigcontext, ip);
- 	BLANK();
- 
- 	OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
-@@ -70,39 +70,38 @@ void foo(void)
- 	OFFSET(TI_cpu, thread_info, cpu);
- 	BLANK();
- 
--	OFFSET(GDS_size, Xgt_desc_struct, size);
--	OFFSET(GDS_address, Xgt_desc_struct, address);
--	OFFSET(GDS_pad, Xgt_desc_struct, pad);
-+	OFFSET(GDS_size, desc_ptr, size);
-+	OFFSET(GDS_address, desc_ptr, address);
- 	BLANK();
- 
--	OFFSET(PT_EBX, pt_regs, ebx);
--	OFFSET(PT_ECX, pt_regs, ecx);
--	OFFSET(PT_EDX, pt_regs, edx);
--	OFFSET(PT_ESI, pt_regs, esi);
--	OFFSET(PT_EDI, pt_regs, edi);
--	OFFSET(PT_EBP, pt_regs, ebp);
--	OFFSET(PT_EAX, pt_regs, eax);
--	OFFSET(PT_DS,  pt_regs, xds);
--	OFFSET(PT_ES,  pt_regs, xes);
--	OFFSET(PT_FS,  pt_regs, xfs);
--	OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
--	OFFSET(PT_EIP, pt_regs, eip);
--	OFFSET(PT_CS,  pt_regs, xcs);
--	OFFSET(PT_EFLAGS, pt_regs, eflags);
--	OFFSET(PT_OLDESP, pt_regs, esp);
--	OFFSET(PT_OLDSS,  pt_regs, xss);
-+	OFFSET(PT_EBX, pt_regs, bx);
-+	OFFSET(PT_ECX, pt_regs, cx);
-+	OFFSET(PT_EDX, pt_regs, dx);
-+	OFFSET(PT_ESI, pt_regs, si);
-+	OFFSET(PT_EDI, pt_regs, di);
-+	OFFSET(PT_EBP, pt_regs, bp);
-+	OFFSET(PT_EAX, pt_regs, ax);
-+	OFFSET(PT_DS,  pt_regs, ds);
-+	OFFSET(PT_ES,  pt_regs, es);
-+	OFFSET(PT_FS,  pt_regs, fs);
-+	OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
-+	OFFSET(PT_EIP, pt_regs, ip);
-+	OFFSET(PT_CS,  pt_regs, cs);
-+	OFFSET(PT_EFLAGS, pt_regs, flags);
-+	OFFSET(PT_OLDESP, pt_regs, sp);
-+	OFFSET(PT_OLDSS,  pt_regs, ss);
- 	BLANK();
- 
- 	OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
--	OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
-+	OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
- 	BLANK();
- 
- 	OFFSET(pbe_address, pbe, address);
- 	OFFSET(pbe_orig_address, pbe, orig_address);
- 	OFFSET(pbe_next, pbe, next);
- 
--	/* Offset from the sysenter stack to tss.esp0 */
--	DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
-+	/* Offset from the sysenter stack to tss.sp0 */
-+	DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
- 		 sizeof(struct tss_struct));
- 
- 	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
-@@ -111,8 +110,6 @@ void foo(void)
- 	DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
- 	DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
- 
--	DEFINE(VDSO_PRELINK_asm, VDSO_PRELINK);
--
- 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
- 
- #ifdef CONFIG_PARAVIRT
-@@ -123,7 +120,7 @@ void foo(void)
- 	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
- 	OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
- 	OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
--	OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
-+	OFFSET(PV_CPU_irq_enable_syscall_ret, pv_cpu_ops, irq_enable_syscall_ret);
- 	OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
- #endif
- 
-diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
-index d1b6ed9..494e1e0 100644
---- a/arch/x86/kernel/asm-offsets_64.c
-+++ b/arch/x86/kernel/asm-offsets_64.c
-@@ -38,7 +38,6 @@ int main(void)
- #define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
- 	ENTRY(state);
- 	ENTRY(flags); 
--	ENTRY(thread); 
- 	ENTRY(pid);
- 	BLANK();
- #undef ENTRY
-@@ -47,6 +46,9 @@ int main(void)
- 	ENTRY(addr_limit);
- 	ENTRY(preempt_count);
- 	ENTRY(status);
-+#ifdef CONFIG_IA32_EMULATION
-+	ENTRY(sysenter_return);
-+#endif
- 	BLANK();
- #undef ENTRY
- #define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
-@@ -59,17 +61,31 @@ int main(void)
- 	ENTRY(data_offset);
- 	BLANK();
- #undef ENTRY
-+#ifdef CONFIG_PARAVIRT
-+	BLANK();
-+	OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
-+	OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
-+	OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
-+	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
-+	OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
-+	OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
-+	OFFSET(PV_CPU_irq_enable_syscall_ret, pv_cpu_ops, irq_enable_syscall_ret);
-+	OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
-+	OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
-+#endif
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	outb_p(val, RTC_PORT(1));
++	lock_cmos_suffix(addr);
++}
++EXPORT_SYMBOL(rtc_cmos_write);
 +
++static int set_rtc_mmss(unsigned long nowtime)
++{
++	int retval;
++	unsigned long flags;
 +
- #ifdef CONFIG_IA32_EMULATION
- #define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
--	ENTRY(eax);
--	ENTRY(ebx);
--	ENTRY(ecx);
--	ENTRY(edx);
--	ENTRY(esi);
--	ENTRY(edi);
--	ENTRY(ebp);
--	ENTRY(esp);
--	ENTRY(eip);
-+	ENTRY(ax);
-+	ENTRY(bx);
-+	ENTRY(cx);
-+	ENTRY(dx);
-+	ENTRY(si);
-+	ENTRY(di);
-+	ENTRY(bp);
-+	ENTRY(sp);
-+	ENTRY(ip);
- 	BLANK();
- #undef ENTRY
- 	DEFINE(IA32_RT_SIGFRAME_sigcontext,
-@@ -81,14 +97,14 @@ int main(void)
- 	DEFINE(pbe_next, offsetof(struct pbe, next));
- 	BLANK();
- #define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
--	ENTRY(rbx);
--	ENTRY(rbx);
--	ENTRY(rcx);
--	ENTRY(rdx);
--	ENTRY(rsp);
--	ENTRY(rbp);
--	ENTRY(rsi);
--	ENTRY(rdi);
-+	ENTRY(bx);
-+	ENTRY(bx);
-+	ENTRY(cx);
-+	ENTRY(dx);
-+	ENTRY(sp);
-+	ENTRY(bp);
-+	ENTRY(si);
-+	ENTRY(di);
- 	ENTRY(r8);
- 	ENTRY(r9);
- 	ENTRY(r10);
-@@ -97,7 +113,7 @@ int main(void)
- 	ENTRY(r13);
- 	ENTRY(r14);
- 	ENTRY(r15);
--	ENTRY(eflags);
-+	ENTRY(flags);
- 	BLANK();
- #undef ENTRY
- #define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
-@@ -108,7 +124,7 @@ int main(void)
- 	ENTRY(cr8);
- 	BLANK();
- #undef ENTRY
--	DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
-+	DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
- 	BLANK();
- 	DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
- 	BLANK();
-diff --git a/arch/x86/kernel/bootflag.c b/arch/x86/kernel/bootflag.c
-index 0b98605..30f25a7 100644
---- a/arch/x86/kernel/bootflag.c
-+++ b/arch/x86/kernel/bootflag.c
-@@ -1,8 +1,6 @@
- /*
-  *	Implement 'Simple Boot Flag Specification 2.0'
-  */
--
--
- #include <linux/types.h>
- #include <linux/kernel.h>
- #include <linux/init.h>
-@@ -14,40 +12,38 @@
- 
- #include <linux/mc146818rtc.h>
- 
--
- #define SBF_RESERVED (0x78)
- #define SBF_PNPOS    (1<<0)
- #define SBF_BOOTING  (1<<1)
- #define SBF_DIAG     (1<<2)
- #define SBF_PARITY   (1<<7)
- 
--
- int sbf_port __initdata = -1;	/* set via acpi_boot_init() */
- 
--
- static int __init parity(u8 v)
- {
- 	int x = 0;
- 	int i;
--	
--	for(i=0;i<8;i++)
--	{
--		x^=(v&1);
--		v>>=1;
++	spin_lock_irqsave(&rtc_lock, flags);
++	retval = set_wallclock(nowtime);
++	spin_unlock_irqrestore(&rtc_lock, flags);
 +
-+	for (i = 0; i < 8; i++) {
-+		x ^= (v & 1);
-+		v >>= 1;
- 	}
++	return retval;
++}
 +
- 	return x;
- }
- 
- static void __init sbf_write(u8 v)
- {
- 	unsigned long flags;
--	if(sbf_port != -1)
--	{
++/* not static: needed by APM */
++unsigned long read_persistent_clock(void)
++{
++	unsigned long retval, flags;
 +
-+	if (sbf_port != -1) {
- 		v &= ~SBF_PARITY;
--		if(!parity(v))
--			v|=SBF_PARITY;
-+		if (!parity(v))
-+			v |= SBF_PARITY;
- 
--		printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n", sbf_port, v);
-+		printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n",
-+			sbf_port, v);
- 
- 		spin_lock_irqsave(&rtc_lock, flags);
- 		CMOS_WRITE(v, sbf_port);
-@@ -57,33 +53,41 @@ static void __init sbf_write(u8 v)
- 
- static u8 __init sbf_read(void)
- {
--	u8 v;
- 	unsigned long flags;
--	if(sbf_port == -1)
-+	u8 v;
++	spin_lock_irqsave(&rtc_lock, flags);
++	retval = get_wallclock();
++	spin_unlock_irqrestore(&rtc_lock, flags);
 +
-+	if (sbf_port == -1)
- 		return 0;
++	return retval;
++}
 +
- 	spin_lock_irqsave(&rtc_lock, flags);
- 	v = CMOS_READ(sbf_port);
- 	spin_unlock_irqrestore(&rtc_lock, flags);
++int update_persistent_clock(struct timespec now)
++{
++	return set_rtc_mmss(now.tv_sec);
++}
 +
- 	return v;
- }
++unsigned long long native_read_tsc(void)
++{
++	return __native_read_tsc();
++}
++EXPORT_SYMBOL(native_read_tsc);
++
+diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c
+index 3558ac7..309366f 100644
+--- a/arch/x86/kernel/setup64.c
++++ b/arch/x86/kernel/setup64.c
+@@ -24,7 +24,11 @@
+ #include <asm/sections.h>
+ #include <asm/setup.h>
+ 
++#ifndef CONFIG_DEBUG_BOOT_PARAMS
+ struct boot_params __initdata boot_params;
++#else
++struct boot_params boot_params;
++#endif
  
- static int __init sbf_value_valid(u8 v)
- {
--	if(v&SBF_RESERVED)		/* Reserved bits */
-+	if (v & SBF_RESERVED)		/* Reserved bits */
- 		return 0;
--	if(!parity(v))
-+	if (!parity(v))
- 		return 0;
-+
- 	return 1;
- }
+ cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
  
- static int __init sbf_init(void)
- {
- 	u8 v;
--	if(sbf_port == -1)
-+
-+	if (sbf_port == -1)
- 		return 0;
-+
- 	v = sbf_read();
--	if(!sbf_value_valid(v))
--		printk(KERN_WARNING "Simple Boot Flag value 0x%x read from CMOS RAM was invalid\n",v);
-+	if (!sbf_value_valid(v)) {
-+		printk(KERN_WARNING "Simple Boot Flag value 0x%x read from "
-+			"CMOS RAM was invalid\n", v);
-+	}
+@@ -37,6 +41,8 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
+ char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
  
- 	v &= ~SBF_RESERVED;
- 	v &= ~SBF_BOOTING;
-@@ -92,7 +96,7 @@ static int __init sbf_init(void)
- 	v |= SBF_PNPOS;
- #endif
- 	sbf_write(v);
+ unsigned long __supported_pte_mask __read_mostly = ~0UL;
++EXPORT_SYMBOL_GPL(__supported_pte_mask);
 +
- 	return 0;
- }
--
- module_init(sbf_init);
-diff --git a/arch/x86/kernel/bugs_64.c b/arch/x86/kernel/bugs_64.c
-index 9a189ce..8f520f9 100644
---- a/arch/x86/kernel/bugs_64.c
-+++ b/arch/x86/kernel/bugs_64.c
-@@ -13,7 +13,6 @@
- void __init check_bugs(void)
- {
- 	identify_cpu(&boot_cpu_data);
--	mtrr_bp_init();
- #if !defined(CONFIG_SMP)
- 	printk("CPU: ");
- 	print_cpu_info(&boot_cpu_data);
-diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
-index 3e91d3e..238468a 100644
---- a/arch/x86/kernel/cpu/addon_cpuid_features.c
-+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
-@@ -45,6 +45,6 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
- 			&regs[CR_ECX], &regs[CR_EDX]);
- 
- 		if (regs[cb->reg] & (1 << cb->bit))
--			set_bit(cb->feature, c->x86_capability);
-+			set_cpu_cap(c, cb->feature);
- 	}
- }
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index 1ff88c7..06fa159 100644
---- a/arch/x86/kernel/cpu/amd.c
-+++ b/arch/x86/kernel/cpu/amd.c
-@@ -63,6 +63,15 @@ static __cpuinit int amd_apic_timer_broken(void)
+ static int do_not_nx __cpuinitdata = 0;
  
- int force_mwait __cpuinitdata;
+ /* noexec=on|off
+@@ -80,6 +86,43 @@ static int __init nonx32_setup(char *str)
+ __setup("noexec32=", nonx32_setup);
  
-+void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
+ /*
++ * Copy data used in early init routines from the initial arrays to the
++ * per cpu data areas.  These arrays then become expendable and the
++ * *_early_ptr's are zeroed indicating that the static arrays are gone.
++ */
++static void __init setup_per_cpu_maps(void)
 +{
-+	if (cpuid_eax(0x80000000) >= 0x80000007) {
-+		c->x86_power = cpuid_edx(0x80000007);
-+		if (c->x86_power & (1<<8))
-+			set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
++	int cpu;
++
++	for_each_possible_cpu(cpu) {
++#ifdef CONFIG_SMP
++		if (per_cpu_offset(cpu)) {
++#endif
++			per_cpu(x86_cpu_to_apicid, cpu) =
++						x86_cpu_to_apicid_init[cpu];
++			per_cpu(x86_bios_cpu_apicid, cpu) =
++						x86_bios_cpu_apicid_init[cpu];
++#ifdef CONFIG_NUMA
++			per_cpu(x86_cpu_to_node_map, cpu) =
++						x86_cpu_to_node_map_init[cpu];
++#endif
++#ifdef CONFIG_SMP
++		}
++		else
++			printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
++									cpu);
++#endif
 +	}
++
++	/* indicate the early static arrays will soon be gone */
++	x86_cpu_to_apicid_early_ptr = NULL;
++	x86_bios_cpu_apicid_early_ptr = NULL;
++#ifdef CONFIG_NUMA
++	x86_cpu_to_node_map_early_ptr = NULL;
++#endif
 +}
 +
- static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- {
- 	u32 l, h;
-@@ -85,6 +94,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- 	}
- #endif
++/*
+  * Great future plan:
+  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
+  * Always point %gs to its beginning
+@@ -100,18 +143,21 @@ void __init setup_per_cpu_areas(void)
+ 	for_each_cpu_mask (i, cpu_possible_map) {
+ 		char *ptr;
  
-+	early_init_amd(c);
-+
- 	/*
- 	 *	FIXME: We should handle the K5 here. Set up the write
- 	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
-@@ -257,12 +268,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- 		c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+-		if (!NODE_DATA(cpu_to_node(i))) {
++		if (!NODE_DATA(early_cpu_to_node(i))) {
+ 			printk("cpu with no node %d, num_online_nodes %d\n",
+ 			       i, num_online_nodes());
+ 			ptr = alloc_bootmem_pages(size);
+ 		} else { 
+-			ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
++			ptr = alloc_bootmem_pages_node(NODE_DATA(early_cpu_to_node(i)), size);
+ 		}
+ 		if (!ptr)
+ 			panic("Cannot allocate cpu data for CPU %d\n", i);
+ 		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
+ 		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
  	}
- 
--	if (cpuid_eax(0x80000000) >= 0x80000007) {
--		c->x86_power = cpuid_edx(0x80000007);
--		if (c->x86_power & (1<<8))
--			set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
--	}
--
- #ifdef CONFIG_X86_HT
- 	/*
- 	 * On a AMD multi core setup the lower bits of the APIC id
-@@ -295,12 +300,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- 		local_apic_timer_disabled = 1;
- #endif
- 
--	if (c->x86 == 0x10 && !force_mwait)
--		clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
--
- 	/* K6s reports MCEs but don't actually have all the MSRs */
- 	if (c->x86 < 6)
- 		clear_bit(X86_FEATURE_MCE, c->x86_capability);
 +
-+	if (cpu_has_xmm)
-+		set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
- }
- 
- static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index 205fd5b..9b95edc 100644
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -11,6 +11,7 @@
- #include <linux/utsname.h>
- #include <asm/bugs.h>
- #include <asm/processor.h>
-+#include <asm/processor-flags.h>
- #include <asm/i387.h>
- #include <asm/msr.h>
- #include <asm/paravirt.h>
-@@ -35,7 +36,7 @@ __setup("mca-pentium", mca_pentium);
- static int __init no_387(char *s)
- {
- 	boot_cpu_data.hard_math = 0;
--	write_cr0(0xE | read_cr0());
-+	write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
- 	return 1;
- }
++	/* setup percpu data maps early */
++	setup_per_cpu_maps();
+ } 
  
-@@ -153,7 +154,7 @@ static void __init check_config(void)
-  * If we configured ourselves for a TSC, we'd better have one!
-  */
- #ifdef CONFIG_X86_TSC
--	if (!cpu_has_tsc && !tsc_disable)
-+	if (!cpu_has_tsc)
- 		panic("Kernel compiled for Pentium+, requires TSC feature!");
+ void pda_init(int cpu)
+@@ -169,7 +215,8 @@ void syscall_init(void)
  #endif
  
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index e2fcf20..db28aa9 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -22,43 +22,48 @@
- #include "cpu.h"
+ 	/* Flags to clear on syscall */
+-	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
++	wrmsrl(MSR_SYSCALL_MASK,
++	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
+ }
  
- DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
--	[GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
--	[GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
--	[GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
--	[GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
-+	[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
-+	[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
-+	[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
-+	[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
- 	/*
- 	 * Segments used for calling PnP BIOS have byte granularity.
- 	 * They code segments and data segments have fixed 64k limits,
- 	 * the transfer segment sizes are set at run time.
- 	 */
--	[GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
--	[GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
--	[GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
--	[GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
--	[GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
-+	/* 32-bit code */
-+	[GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
-+	/* 16-bit code */
-+	[GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
-+	/* 16-bit data */
-+	[GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
-+	/* 16-bit data */
-+	[GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
-+	/* 16-bit data */
-+	[GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
- 	/*
- 	 * The APM segments have byte granularity and their bases
- 	 * are set at run time.  All have 64k limits.
+ void __cpuinit check_efer(void)
+@@ -227,7 +274,7 @@ void __cpuinit cpu_init (void)
+ 	 * and set up the GDT descriptor:
  	 */
--	[GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
-+	/* 32-bit code */
-+	[GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
- 	/* 16-bit code */
--	[GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
--	[GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
-+	[GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
-+	/* data */
-+	[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+ 	if (cpu)
+- 		memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
++		memcpy(get_cpu_gdt_table(cpu), cpu_gdt_table, GDT_SIZE);
  
--	[GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
--	[GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
-+	[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
-+	[GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
- } };
- EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+ 	cpu_gdt_descr[cpu].size = GDT_SIZE;
+ 	load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
+@@ -257,10 +304,10 @@ void __cpuinit cpu_init (void)
+ 				      v, cpu); 
+ 		}
+ 		estacks += PAGE_SIZE << order[v];
+-		orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
++		orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
+ 	}
  
-+__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
-+
- static int cachesize_override __cpuinitdata = -1;
--static int disable_x86_fxsr __cpuinitdata;
- static int disable_x86_serial_nr __cpuinitdata = 1;
--static int disable_x86_sep __cpuinitdata;
+-	t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
++	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+ 	/*
+ 	 * <= is required because the CPU will access up to
+ 	 * 8 bits beyond the end of the IO permission bitmap.
+diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
+index 9c24b45..62adc5f 100644
+--- a/arch/x86/kernel/setup_32.c
++++ b/arch/x86/kernel/setup_32.c
+@@ -44,9 +44,12 @@
+ #include <linux/crash_dump.h>
+ #include <linux/dmi.h>
+ #include <linux/pfn.h>
++#include <linux/pci.h>
++#include <linux/init_ohci1394_dma.h>
  
- struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+ #include <video/edid.h>
  
--extern int disable_pse;
--
- static void __cpuinit default_init(struct cpuinfo_x86 * c)
- {
- 	/* Not much we can do here... */
-@@ -207,16 +212,8 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
++#include <asm/mtrr.h>
+ #include <asm/apic.h>
+ #include <asm/e820.h>
+ #include <asm/mpspec.h>
+@@ -67,14 +70,83 @@
+    address, and must not be in the .bss segment! */
+ unsigned long init_pg_tables_end __initdata = ~0UL;
  
- static int __init x86_fxsr_setup(char * s)
- {
--	/* Tell all the other CPUs to not use it... */
--	disable_x86_fxsr = 1;
+-int disable_pse __cpuinitdata = 0;
 -
--	/*
--	 * ... and clear the bits early in the boot_cpu_data
--	 * so that the bootup process doesn't try to do this
--	 * either.
--	 */
--	clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
--	clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
-+	setup_clear_cpu_cap(X86_FEATURE_FXSR);
-+	setup_clear_cpu_cap(X86_FEATURE_XMM);
- 	return 1;
- }
- __setup("nofxsr", x86_fxsr_setup);
-@@ -224,7 +221,7 @@ __setup("nofxsr", x86_fxsr_setup);
- 
- static int __init x86_sep_setup(char * s)
- {
--	disable_x86_sep = 1;
-+	setup_clear_cpu_cap(X86_FEATURE_SEP);
- 	return 1;
- }
- __setup("nosep", x86_sep_setup);
-@@ -281,6 +278,33 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
- 			c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
- 	}
- }
-+static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
-+{
-+	u32 tfms, xlvl;
-+	int ebx;
-+
-+	memset(&c->x86_capability, 0, sizeof c->x86_capability);
-+	if (have_cpuid_p()) {
-+		/* Intel-defined flags: level 0x00000001 */
-+		if (c->cpuid_level >= 0x00000001) {
-+			u32 capability, excap;
-+			cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
-+			c->x86_capability[0] = capability;
-+			c->x86_capability[4] = excap;
-+		}
-+
-+		/* AMD-defined flags: level 0x80000001 */
-+		xlvl = cpuid_eax(0x80000000);
-+		if ((xlvl & 0xffff0000) == 0x80000000) {
-+			if (xlvl >= 0x80000001) {
-+				c->x86_capability[1] = cpuid_edx(0x80000001);
-+				c->x86_capability[6] = cpuid_ecx(0x80000001);
-+			}
-+		}
+ /*
+  * Machine setup..
+  */
+-extern struct resource code_resource;
+-extern struct resource data_resource;
+-extern struct resource bss_resource;
++static struct resource data_resource = {
++	.name	= "Kernel data",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
++};
 +
-+	}
++static struct resource code_resource = {
++	.name	= "Kernel code",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
++};
 +
-+}
- 
- /* Do minimum CPU detection early.
-    Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-@@ -300,6 +324,17 @@ static void __init early_cpu_detect(void)
- 	cpu_detect(c);
- 
- 	get_cpu_vendor(c, 1);
++static struct resource bss_resource = {
++	.name	= "Kernel bss",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
++};
 +
-+	switch (c->x86_vendor) {
-+	case X86_VENDOR_AMD:
-+		early_init_amd(c);
-+		break;
-+	case X86_VENDOR_INTEL:
-+		early_init_intel(c);
-+		break;
-+	}
++static struct resource video_ram_resource = {
++	.name	= "Video RAM area",
++	.start	= 0xa0000,
++	.end	= 0xbffff,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
++};
 +
-+	early_get_cap(c);
- }
- 
- static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
-@@ -357,8 +392,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
- 		init_scattered_cpuid_features(c);
- 	}
- 
--	early_intel_workaround(c);
--
- #ifdef CONFIG_X86_HT
- 	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
- #endif
-@@ -392,7 +425,7 @@ __setup("serialnumber", x86_serial_nr_setup);
- /*
-  * This does the hard work of actually picking apart the CPU stuff...
-  */
--static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
-+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- {
- 	int i;
++static struct resource standard_io_resources[] = { {
++	.name	= "dma1",
++	.start	= 0x0000,
++	.end	= 0x001f,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "pic1",
++	.start	= 0x0020,
++	.end	= 0x0021,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name   = "timer0",
++	.start	= 0x0040,
++	.end    = 0x0043,
++	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name   = "timer1",
++	.start  = 0x0050,
++	.end    = 0x0053,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "keyboard",
++	.start	= 0x0060,
++	.end	= 0x006f,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "dma page reg",
++	.start	= 0x0080,
++	.end	= 0x008f,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "pic2",
++	.start	= 0x00a0,
++	.end	= 0x00a1,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "dma2",
++	.start	= 0x00c0,
++	.end	= 0x00df,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "fpu",
++	.start	= 0x00f0,
++	.end	= 0x00ff,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++} };
  
-@@ -418,20 +451,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ /* cpu data as detected by the assembly code in head.S */
+ struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+@@ -116,13 +188,17 @@ extern int root_mountflags;
  
- 	generic_identify(c);
+ unsigned long saved_videomode;
  
--	printk(KERN_DEBUG "CPU: After generic identify, caps:");
--	for (i = 0; i < NCAPINTS; i++)
--		printk(" %08lx", c->x86_capability[i]);
--	printk("\n");
--
--	if (this_cpu->c_identify) {
-+	if (this_cpu->c_identify)
- 		this_cpu->c_identify(c);
+-#define RAMDISK_IMAGE_START_MASK  	0x07FF
++#define RAMDISK_IMAGE_START_MASK	0x07FF
+ #define RAMDISK_PROMPT_FLAG		0x8000
+-#define RAMDISK_LOAD_FLAG		0x4000	
++#define RAMDISK_LOAD_FLAG		0x4000
  
--		printk(KERN_DEBUG "CPU: After vendor identify, caps:");
--		for (i = 0; i < NCAPINTS; i++)
--			printk(" %08lx", c->x86_capability[i]);
--		printk("\n");
--	}
--
- 	/*
- 	 * Vendor-specific initialization.  In this section we
- 	 * canonicalize the feature flags, meaning if there are
-@@ -453,23 +475,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- 	 * we do "generic changes."
- 	 */
+ static char __initdata command_line[COMMAND_LINE_SIZE];
  
--	/* TSC disabled? */
--	if ( tsc_disable )
--		clear_bit(X86_FEATURE_TSC, c->x86_capability);
--
--	/* FXSR disabled? */
--	if (disable_x86_fxsr) {
--		clear_bit(X86_FEATURE_FXSR, c->x86_capability);
--		clear_bit(X86_FEATURE_XMM, c->x86_capability);
--	}
--
--	/* SEP disabled? */
--	if (disable_x86_sep)
--		clear_bit(X86_FEATURE_SEP, c->x86_capability);
--
--	if (disable_pse)
--		clear_bit(X86_FEATURE_PSE, c->x86_capability);
--
- 	/* If the model name is still unset, do table lookup. */
- 	if ( !c->x86_model_id[0] ) {
- 		char *p;
-@@ -482,13 +487,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- 				c->x86, c->x86_model);
- 	}
++#ifndef CONFIG_DEBUG_BOOT_PARAMS
+ struct boot_params __initdata boot_params;
++#else
++struct boot_params boot_params;
++#endif
  
--	/* Now the feature flags better reflect actual CPU features! */
--
--	printk(KERN_DEBUG "CPU: After all inits, caps:");
--	for (i = 0; i < NCAPINTS; i++)
--		printk(" %08lx", c->x86_capability[i]);
--	printk("\n");
--
- 	/*
- 	 * On SMP, boot_cpu_data holds the common feature set between
- 	 * all CPUs; so make sure that we indicate which features are
-@@ -501,8 +499,14 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
- 	}
+ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+ struct edd edd;
+@@ -166,8 +242,7 @@ static int __init parse_mem(char *arg)
+ 		return -EINVAL;
  
-+	/* Clear all flags overriden by options */
-+	for (i = 0; i < NCAPINTS; i++)
-+		c->x86_capability[i] ^= cleared_cpu_caps[i];
-+
- 	/* Init Machine Check Exception if available. */
- 	mcheck_init(c);
+ 	if (strcmp(arg, "nopentium") == 0) {
+-		clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+-		disable_pse = 1;
++		setup_clear_cpu_cap(X86_FEATURE_PSE);
+ 	} else {
+ 		/* If the user specifies memory size, we
+ 		 * limit the BIOS-provided memory map to
+@@ -176,7 +251,7 @@ static int __init parse_mem(char *arg)
+ 		 * trim the existing memory map.
+ 		 */
+ 		unsigned long long mem_size;
+- 
 +
-+	select_idle_routine(c);
- }
- 
- void __init identify_boot_cpu(void)
-@@ -510,7 +514,6 @@ void __init identify_boot_cpu(void)
- 	identify_cpu(&boot_cpu_data);
- 	sysenter_setup();
- 	enable_sep_cpu();
--	mtrr_bp_init();
+ 		mem_size = memparse(arg, &arg);
+ 		limit_regions(mem_size);
+ 		user_defined_memmap = 1;
+@@ -315,7 +390,7 @@ static void __init reserve_ebda_region(void)
+ 	unsigned int addr;
+ 	addr = get_bios_ebda();
+ 	if (addr)
+-		reserve_bootmem(addr, PAGE_SIZE);	
++		reserve_bootmem(addr, PAGE_SIZE);
  }
  
- void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
-@@ -567,6 +570,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
- }
+ #ifndef CONFIG_NEED_MULTIPLE_NODES
+@@ -420,6 +495,100 @@ static inline void __init reserve_crashkernel(void)
+ {}
  #endif
  
-+static __init int setup_noclflush(char *arg)
++#ifdef CONFIG_BLK_DEV_INITRD
++
++static bool do_relocate_initrd = false;
++
++static void __init reserve_initrd(void)
 +{
-+	setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
-+	return 1;
++	unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
++	unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
++	unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
++	unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
++	unsigned long ramdisk_here;
++
++	initrd_start = 0;
++
++	if (!boot_params.hdr.type_of_loader ||
++	    !ramdisk_image || !ramdisk_size)
++		return;		/* No initrd provided by bootloader */
++
++	if (ramdisk_end < ramdisk_image) {
++		printk(KERN_ERR "initrd wraps around end of memory, "
++		       "disabling initrd\n");
++		return;
++	}
++	if (ramdisk_size >= end_of_lowmem/2) {
++		printk(KERN_ERR "initrd too large to handle, "
++		       "disabling initrd\n");
++		return;
++	}
++	if (ramdisk_end <= end_of_lowmem) {
++		/* All in lowmem, easy case */
++		reserve_bootmem(ramdisk_image, ramdisk_size);
++		initrd_start = ramdisk_image + PAGE_OFFSET;
++		initrd_end = initrd_start+ramdisk_size;
++		return;
++	}
++
++	/* We need to move the initrd down into lowmem */
++	ramdisk_here = (end_of_lowmem - ramdisk_size) & PAGE_MASK;
++
++	/* Note: this includes all the lowmem currently occupied by
++	   the initrd, we rely on that fact to keep the data intact. */
++	reserve_bootmem(ramdisk_here, ramdisk_size);
++	initrd_start = ramdisk_here + PAGE_OFFSET;
++	initrd_end   = initrd_start + ramdisk_size;
++
++	do_relocate_initrd = true;
 +}
-+__setup("noclflush", setup_noclflush);
 +
- void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
- {
- 	char *vendor = NULL;
-@@ -590,6 +600,17 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
- 		printk("\n");
- }
- 
-+static __init int setup_disablecpuid(char *arg)
++#define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
++
++static void __init relocate_initrd(void)
 +{
-+	int bit;
-+	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
-+		setup_clear_cpu_cap(bit);
-+	else
-+		return 0;
-+	return 1;
++	unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
++	unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
++	unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
++	unsigned long ramdisk_here;
++	unsigned long slop, clen, mapaddr;
++	char *p, *q;
++
++	if (!do_relocate_initrd)
++		return;
++
++	ramdisk_here = initrd_start - PAGE_OFFSET;
++
++	q = (char *)initrd_start;
++
++	/* Copy any lowmem portion of the initrd */
++	if (ramdisk_image < end_of_lowmem) {
++		clen = end_of_lowmem - ramdisk_image;
++		p = (char *)__va(ramdisk_image);
++		memcpy(q, p, clen);
++		q += clen;
++		ramdisk_image += clen;
++		ramdisk_size  -= clen;
++	}
++
++	/* Copy the highmem portion of the initrd */
++	while (ramdisk_size) {
++		slop = ramdisk_image & ~PAGE_MASK;
++		clen = ramdisk_size;
++		if (clen > MAX_MAP_CHUNK-slop)
++			clen = MAX_MAP_CHUNK-slop;
++		mapaddr = ramdisk_image & PAGE_MASK;
++		p = early_ioremap(mapaddr, clen+slop);
++		memcpy(q, p+slop, clen);
++		early_iounmap(p, clen+slop);
++		q += clen;
++		ramdisk_image += clen;
++		ramdisk_size  -= clen;
++	}
 +}
-+__setup("clearcpuid=", setup_disablecpuid);
 +
- cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
- 
- /* This is hacky. :)
-@@ -620,21 +641,13 @@ void __init early_cpu_init(void)
- 	nexgen_init_cpu();
- 	umc_init_cpu();
- 	early_cpu_detect();
--
--#ifdef CONFIG_DEBUG_PAGEALLOC
--	/* pse is not compatible with on-the-fly unmapping,
--	 * disable it even if the cpus claim to support it.
--	 */
--	clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
--	disable_pse = 1;
--#endif
- }
- 
- /* Make sure %fs is initialized properly in idle threads */
- struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
++#endif /* CONFIG_BLK_DEV_INITRD */
++
+ void __init setup_bootmem_allocator(void)
  {
- 	memset(regs, 0, sizeof(struct pt_regs));
--	regs->xfs = __KERNEL_PERCPU;
-+	regs->fs = __KERNEL_PERCPU;
- 	return regs;
+ 	unsigned long bootmap_size;
+@@ -475,26 +644,10 @@ void __init setup_bootmem_allocator(void)
+ 	 */
+ 	find_smp_config();
+ #endif
+-	numa_kva_reserve();
+ #ifdef CONFIG_BLK_DEV_INITRD
+-	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+-		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
+-		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
+-		unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
+-		unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
+-
+-		if (ramdisk_end <= end_of_lowmem) {
+-			reserve_bootmem(ramdisk_image, ramdisk_size);
+-			initrd_start = ramdisk_image + PAGE_OFFSET;
+-			initrd_end = initrd_start+ramdisk_size;
+-		} else {
+-			printk(KERN_ERR "initrd extends beyond end of memory "
+-			       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+-			       ramdisk_end, end_of_lowmem);
+-			initrd_start = 0;
+-		}
+-	}
++	reserve_initrd();
+ #endif
++	numa_kva_reserve();
+ 	reserve_crashkernel();
  }
  
-@@ -642,7 +655,7 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
-  * it's on the real one. */
- void switch_to_new_gdt(void)
- {
--	struct Xgt_desc_struct gdt_descr;
-+	struct desc_ptr gdt_descr;
+@@ -545,17 +698,11 @@ void __init setup_arch(char **cmdline_p)
+ 	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
+ 	pre_setup_arch_hook();
+ 	early_cpu_init();
++	early_ioremap_init();
  
- 	gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
- 	gdt_descr.size = GDT_SIZE - 1;
-@@ -672,12 +685,6 @@ void __cpuinit cpu_init(void)
+-	/*
+-	 * FIXME: This isn't an official loader_type right
+-	 * now but does currently work with elilo.
+-	 * If we were configured as an EFI kernel, check to make
+-	 * sure that we were loaded correctly from elilo and that
+-	 * the system table is valid.  If not, then initialize normally.
+-	 */
+ #ifdef CONFIG_EFI
+-	if ((boot_params.hdr.type_of_loader == 0x50) &&
+-	    boot_params.efi_info.efi_systab)
++	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
++		     "EL32", 4))
+ 		efi_enabled = 1;
+ #endif
  
- 	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
- 		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
--	if (tsc_disable && cpu_has_tsc) {
--		printk(KERN_NOTICE "Disabling TSC...\n");
--		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
--		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
--		set_in_cr4(X86_CR4_TSD);
+@@ -579,12 +726,9 @@ void __init setup_arch(char **cmdline_p)
+ 	rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
+ #endif
+ 	ARCH_SETUP
+-	if (efi_enabled)
+-		efi_init();
+-	else {
+-		printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+-		print_memory_map(memory_setup());
 -	}
++
++	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++	print_memory_map(memory_setup());
  
- 	load_idt(&idt_descr);
- 	switch_to_new_gdt();
-@@ -691,7 +698,7 @@ void __cpuinit cpu_init(void)
- 		BUG();
- 	enter_lazy_tlb(&init_mm, curr);
- 
--	load_esp0(t, thread);
-+	load_sp0(t, thread);
- 	set_tss_desc(cpu,t);
- 	load_TR_desc();
- 	load_LDT(&init_mm.context);
-diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
-index 2f6432c..ad6527a 100644
---- a/arch/x86/kernel/cpu/cpu.h
-+++ b/arch/x86/kernel/cpu/cpu.h
-@@ -24,5 +24,6 @@ extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
- extern int get_model_name(struct cpuinfo_x86 *c);
- extern void display_cacheinfo(struct cpuinfo_x86 *c);
- 
--extern void early_intel_workaround(struct cpuinfo_x86 *c);
-+extern void early_init_intel(struct cpuinfo_x86 *c);
-+extern void early_init_amd(struct cpuinfo_x86 *c);
+ 	copy_edd();
  
-diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
-index fea0af0..a962dcb 100644
---- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
-+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
-@@ -67,7 +67,8 @@ struct acpi_cpufreq_data {
- 	unsigned int cpu_feature;
- };
+@@ -612,8 +756,16 @@ void __init setup_arch(char **cmdline_p)
+ 	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ 	*cmdline_p = command_line;
  
--static struct acpi_cpufreq_data *drv_data[NR_CPUS];
-+static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
++	if (efi_enabled)
++		efi_init();
 +
- /* acpi_perf_data is a pointer to percpu data. */
- static struct acpi_processor_performance *acpi_perf_data;
- 
-@@ -218,14 +219,14 @@ static u32 get_cur_val(cpumask_t mask)
- 	if (unlikely(cpus_empty(mask)))
- 		return 0;
- 
--	switch (drv_data[first_cpu(mask)]->cpu_feature) {
-+	switch (per_cpu(drv_data, first_cpu(mask))->cpu_feature) {
- 	case SYSTEM_INTEL_MSR_CAPABLE:
- 		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- 		cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
- 		break;
- 	case SYSTEM_IO_CAPABLE:
- 		cmd.type = SYSTEM_IO_CAPABLE;
--		perf = drv_data[first_cpu(mask)]->acpi_data;
-+		perf = per_cpu(drv_data, first_cpu(mask))->acpi_data;
- 		cmd.addr.io.port = perf->control_register.address;
- 		cmd.addr.io.bit_width = perf->control_register.bit_width;
- 		break;
-@@ -325,7 +326,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
+ 	max_low_pfn = setup_memory();
  
++	/* update e820 for memory not covered by WB MTRRs */
++	mtrr_bp_init();
++	if (mtrr_trim_uncached_memory(max_pfn))
++		max_low_pfn = setup_memory();
++
+ #ifdef CONFIG_VMI
+ 	/*
+ 	 * Must be after max_low_pfn is determined, and before kernel
+@@ -636,6 +788,16 @@ void __init setup_arch(char **cmdline_p)
+ 	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
  #endif
+ 	paging_init();
++
++	/*
++	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
++	 */
++
++#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
++	if (init_ohci1394_dma_early)
++		init_ohci1394_dma_on_all_controllers();
++#endif
++
+ 	remapped_pgdat_init();
+ 	sparse_init();
+ 	zone_sizes_init();
+@@ -644,15 +806,19 @@ void __init setup_arch(char **cmdline_p)
+ 	 * NOTE: at this point the bootmem allocator is fully available.
+ 	 */
  
--	retval = drv_data[cpu]->max_freq * perf_percent / 100;
-+	retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
- 
- 	put_cpu();
- 	set_cpus_allowed(current, saved_mask);
-@@ -336,7 +337,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
- 
- static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
- {
--	struct acpi_cpufreq_data *data = drv_data[cpu];
-+	struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
- 	unsigned int freq;
- 
- 	dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
-@@ -370,7 +371,7 @@ static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
- static int acpi_cpufreq_target(struct cpufreq_policy *policy,
- 			       unsigned int target_freq, unsigned int relation)
- {
--	struct acpi_cpufreq_data *data = drv_data[policy->cpu];
-+	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
- 	struct acpi_processor_performance *perf;
- 	struct cpufreq_freqs freqs;
- 	cpumask_t online_policy_cpus;
-@@ -466,7 +467,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
- 
- static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
- {
--	struct acpi_cpufreq_data *data = drv_data[policy->cpu];
-+	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
- 
- 	dprintk("acpi_cpufreq_verify\n");
- 
-@@ -570,7 +571,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
- 		return -ENOMEM;
- 
- 	data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
--	drv_data[cpu] = data;
-+	per_cpu(drv_data, cpu) = data;
- 
- 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
- 		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
-@@ -714,20 +715,20 @@ err_unreg:
- 	acpi_processor_unregister_performance(perf, cpu);
- err_free:
- 	kfree(data);
--	drv_data[cpu] = NULL;
-+	per_cpu(drv_data, cpu) = NULL;
- 
- 	return result;
- }
- 
- static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
- {
--	struct acpi_cpufreq_data *data = drv_data[policy->cpu];
-+	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++#ifdef CONFIG_BLK_DEV_INITRD
++	relocate_initrd();
++#endif
++
+ 	paravirt_post_allocator_init();
  
- 	dprintk("acpi_cpufreq_cpu_exit\n");
+ 	dmi_scan_machine();
  
- 	if (data) {
- 		cpufreq_frequency_table_put_attr(policy->cpu);
--		drv_data[policy->cpu] = NULL;
-+		per_cpu(drv_data, policy->cpu) = NULL;
- 		acpi_processor_unregister_performance(data->acpi_data,
- 						      policy->cpu);
- 		kfree(data);
-@@ -738,7 +739,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
++	io_delay_init();
++
+ #ifdef CONFIG_X86_GENERICARCH
+ 	generic_apic_probe();
+-#endif	
+-	if (efi_enabled)
+-		efi_map_memmap();
++#endif
  
- static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
- {
--	struct acpi_cpufreq_data *data = drv_data[policy->cpu];
-+	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ #ifdef CONFIG_ACPI
+ 	/*
+@@ -661,9 +827,7 @@ void __init setup_arch(char **cmdline_p)
+ 	acpi_boot_table_init();
+ #endif
  
- 	dprintk("acpi_cpufreq_resume\n");
+-#ifdef CONFIG_PCI
+ 	early_quirks();
+-#endif
  
-diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
-index 749d00c..06fcce5 100644
---- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
-+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
-@@ -694,7 +694,7 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
- 	if ( acpi_bus_get_device(obj_handle, &d) ) {
- 		return 0;
- 	}
--	*return_value = (void *)acpi_driver_data(d);
-+	*return_value = acpi_driver_data(d);
- 	return 1;
+ #ifdef CONFIG_ACPI
+ 	acpi_boot_init();
+@@ -692,3 +856,26 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+ #endif
  }
++
++/*
++ * Request address space for all standard resources
++ *
++ * This is called just before pcibios_init(), which is also a
++ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
++ */
++static int __init request_standard_resources(void)
++{
++	int i;
++
++	printk(KERN_INFO "Setting up standard PCI resources\n");
++	init_iomem_resources(&code_resource, &data_resource, &bss_resource);
++
++	request_resource(&iomem_resource, &video_ram_resource);
++
++	/* request I/O space for devices used on all i[345]86 PCs */
++	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
++		request_resource(&ioport_resource, &standard_io_resources[i]);
++	return 0;
++}
++
++subsys_initcall(request_standard_resources);
+diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
+index 30d94d1..77fb87b 100644
+--- a/arch/x86/kernel/setup_64.c
++++ b/arch/x86/kernel/setup_64.c
+@@ -30,6 +30,7 @@
+ #include <linux/crash_dump.h>
+ #include <linux/root_dev.h>
+ #include <linux/pci.h>
++#include <linux/efi.h>
+ #include <linux/acpi.h>
+ #include <linux/kallsyms.h>
+ #include <linux/edd.h>
+@@ -39,10 +40,13 @@
+ #include <linux/dmi.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/ctype.h>
++#include <linux/uaccess.h>
++#include <linux/init_ohci1394_dma.h>
  
-diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
-index 99e1ef9..a052273 100644
---- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
-+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
-@@ -52,7 +52,7 @@
- /* serialize freq changes  */
- static DEFINE_MUTEX(fidvid_mutex);
+ #include <asm/mtrr.h>
+ #include <asm/uaccess.h>
+ #include <asm/system.h>
++#include <asm/vsyscall.h>
+ #include <asm/io.h>
+ #include <asm/smp.h>
+ #include <asm/msr.h>
+@@ -50,6 +54,7 @@
+ #include <video/edid.h>
+ #include <asm/e820.h>
+ #include <asm/dma.h>
++#include <asm/gart.h>
+ #include <asm/mpspec.h>
+ #include <asm/mmu_context.h>
+ #include <asm/proto.h>
+@@ -59,6 +64,15 @@
+ #include <asm/sections.h>
+ #include <asm/dmi.h>
+ #include <asm/cacheflush.h>
++#include <asm/mce.h>
++#include <asm/ds.h>
++#include <asm/topology.h>
++
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#else
++#define ARCH_SETUP
++#endif
  
--static struct powernow_k8_data *powernow_data[NR_CPUS];
-+static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
+ /*
+  * Machine setup..
+@@ -67,6 +81,8 @@
+ struct cpuinfo_x86 boot_cpu_data __read_mostly;
+ EXPORT_SYMBOL(boot_cpu_data);
  
- static int cpu_family = CPU_OPTERON;
++__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
++
+ unsigned long mmu_cr4_features;
  
-@@ -1018,7 +1018,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
- static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
- {
- 	cpumask_t oldmask = CPU_MASK_ALL;
--	struct powernow_k8_data *data = powernow_data[pol->cpu];
-+	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
- 	u32 checkfid;
- 	u32 checkvid;
- 	unsigned int newstate;
-@@ -1094,7 +1094,7 @@ err_out:
- /* Driver entry point to verify the policy and range of frequencies */
- static int powernowk8_verify(struct cpufreq_policy *pol)
- {
--	struct powernow_k8_data *data = powernow_data[pol->cpu];
-+	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ /* Boot loader ID as an integer, for the benefit of proc_dointvec */
+@@ -76,7 +92,7 @@ unsigned long saved_video_mode;
  
- 	if (!data)
- 		return -EINVAL;
-@@ -1202,7 +1202,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
- 		dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
- 			data->currfid, data->currvid);
+ int force_mwait __cpuinitdata;
  
--	powernow_data[pol->cpu] = data;
-+	per_cpu(powernow_data, pol->cpu) = data;
+-/* 
++/*
+  * Early DMI memory
+  */
+ int dmi_alloc_index;
+@@ -122,25 +138,27 @@ struct resource standard_io_resources[] = {
  
- 	return 0;
+ #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
  
-@@ -1216,7 +1216,7 @@ err_out:
+-struct resource data_resource = {
++static struct resource data_resource = {
+ 	.name = "Kernel data",
+ 	.start = 0,
+ 	.end = 0,
+ 	.flags = IORESOURCE_RAM,
+ };
+-struct resource code_resource = {
++static struct resource code_resource = {
+ 	.name = "Kernel code",
+ 	.start = 0,
+ 	.end = 0,
+ 	.flags = IORESOURCE_RAM,
+ };
+-struct resource bss_resource = {
++static struct resource bss_resource = {
+ 	.name = "Kernel bss",
+ 	.start = 0,
+ 	.end = 0,
+ 	.flags = IORESOURCE_RAM,
+ };
  
- static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
- {
--	struct powernow_k8_data *data = powernow_data[pol->cpu];
-+	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
++static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
++
+ #ifdef CONFIG_PROC_VMCORE
+ /* elfcorehdr= specifies the location of elf core header
+  * stored by the crashed kernel. This option will be passed
+@@ -166,12 +184,12 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
+ 	bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
+ 	bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
+ 	if (bootmap == -1L)
+-		panic("Cannot find bootmem map of size %ld\n",bootmap_size);
++		panic("Cannot find bootmem map of size %ld\n", bootmap_size);
+ 	bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
+ 	e820_register_active_regions(0, start_pfn, end_pfn);
+ 	free_bootmem_with_active_regions(0, end_pfn);
+ 	reserve_bootmem(bootmap, bootmap_size);
+-} 
++}
+ #endif
  
- 	if (!data)
- 		return -EINVAL;
-@@ -1237,7 +1237,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
- 	cpumask_t oldmask = current->cpus_allowed;
- 	unsigned int khz = 0;
+ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+@@ -205,7 +223,8 @@ static void __init reserve_crashkernel(void)
+ 	unsigned long long crash_size, crash_base;
+ 	int ret;
  
--	data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))];
-+	data = per_cpu(powernow_data, first_cpu(per_cpu(cpu_core_map, cpu)));
+-	free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
++	free_mem =
++		((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
  
- 	if (!data)
- 		return -EINVAL;
-diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
-index 88d66fb..404a6a2 100644
---- a/arch/x86/kernel/cpu/cyrix.c
-+++ b/arch/x86/kernel/cpu/cyrix.c
-@@ -5,6 +5,7 @@
- #include <asm/dma.h>
- #include <asm/io.h>
- #include <asm/processor-cyrix.h>
-+#include <asm/processor-flags.h>
- #include <asm/timer.h>
- #include <asm/pci-direct.h>
- #include <asm/tsc.h>
-@@ -126,15 +127,12 @@ static void __cpuinit set_cx86_reorder(void)
+ 	ret = parse_crashkernel(boot_command_line, free_mem,
+ 			&crash_size, &crash_base);
+@@ -229,33 +248,21 @@ static inline void __init reserve_crashkernel(void)
+ {}
+ #endif
  
- static void __cpuinit set_cx86_memwb(void)
+-#define EBDA_ADDR_POINTER 0x40E
+-
+-unsigned __initdata ebda_addr;
+-unsigned __initdata ebda_size;
+-
+-static void discover_ebda(void)
++/* Overridden in paravirt.c if CONFIG_PARAVIRT */
++void __attribute__((weak)) __init memory_setup(void)
  {
--	u32 cr0;
+-	/*
+-	 * there is a real-mode segmented pointer pointing to the 
+-	 * 4K EBDA area at 0x40E
+-	 */
+-	ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
+-	ebda_addr <<= 4;
 -
- 	printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
- 
- 	/* CCR2 bit 2: unlock NW bit */
- 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
- 	/* set 'Not Write-through' */
--	cr0 = 0x20000000;
--	write_cr0(read_cr0() | cr0);
-+	write_cr0(read_cr0() | X86_CR0_NW);
- 	/* CCR2 bit 2: lock NW bit and set WT1 */
- 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
+-	ebda_size = *(unsigned short *)__va(ebda_addr);
+-
+-	/* Round EBDA up to pages */
+-	if (ebda_size == 0)
+-		ebda_size = 1;
+-	ebda_size <<= 10;
+-	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
+-	if (ebda_size > 64*1024)
+-		ebda_size = 64*1024;
++       machine_specific_memory_setup();
  }
-diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
-index cc8c501..d1c372b 100644
---- a/arch/x86/kernel/cpu/intel.c
-+++ b/arch/x86/kernel/cpu/intel.c
-@@ -11,6 +11,8 @@
- #include <asm/pgtable.h>
- #include <asm/msr.h>
- #include <asm/uaccess.h>
-+#include <asm/ptrace.h>
-+#include <asm/ds.h>
  
- #include "cpu.h"
++/*
++ * setup_arch - architecture-specific boot-time initializations
++ *
++ * Note: On x86_64, fixmaps are ready for use even before this is called.
++ */
+ void __init setup_arch(char **cmdline_p)
+ {
++	unsigned i;
++
+ 	printk(KERN_INFO "Command line: %s\n", boot_command_line);
  
-@@ -27,13 +29,14 @@
- struct movsl_mask movsl_mask __read_mostly;
+ 	ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
+@@ -269,7 +276,15 @@ void __init setup_arch(char **cmdline_p)
+ 	rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
+ 	rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
  #endif
+-	setup_memory_region();
++#ifdef CONFIG_EFI
++	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
++		     "EL64", 4))
++		efi_enabled = 1;
++#endif
++
++	ARCH_SETUP
++
++	memory_setup();
+ 	copy_edd();
  
--void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
-+void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
- {
--	if (c->x86_vendor != X86_VENDOR_INTEL)
--		return;
- 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
- 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
- 		c->x86_cache_alignment = 128;
-+	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-+		(c->x86 == 0x6 && c->x86_model >= 0x0e))
-+		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- }
+ 	if (!boot_params.hdr.root_flags)
+@@ -293,27 +308,47 @@ void __init setup_arch(char **cmdline_p)
  
- /*
-@@ -113,6 +116,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
- 	unsigned int l2 = 0;
- 	char *p = NULL;
+ 	parse_early_param();
  
-+	early_init_intel(c);
++#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
++	if (init_ohci1394_dma_early)
++		init_ohci1394_dma_on_all_controllers();
++#endif
 +
- #ifdef CONFIG_X86_F00F_BUG
+ 	finish_e820_parsing();
+ 
++	early_gart_iommu_check();
++
+ 	e820_register_active_regions(0, 0, -1UL);
  	/*
- 	 * All current models of Pentium and Pentium with MMX technology CPUs
-@@ -132,7 +137,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
- 	}
- #endif
+ 	 * partially used pages are not usable - thus
+ 	 * we are rounding upwards:
+ 	 */
+ 	end_pfn = e820_end_of_ram();
++	/* update e820 for memory not covered by WB MTRRs */
++	mtrr_bp_init();
++	if (mtrr_trim_uncached_memory(end_pfn)) {
++		e820_register_active_regions(0, 0, -1UL);
++		end_pfn = e820_end_of_ram();
++	}
++
+ 	num_physpages = end_pfn;
  
--	select_idle_routine(c);
- 	l2 = init_intel_cacheinfo(c);
- 	if (c->cpuid_level > 9 ) {
- 		unsigned eax = cpuid_eax(10);
-@@ -201,16 +205,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
- 	}
- #endif
+ 	check_efer();
  
-+	if (cpu_has_xmm2)
-+		set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability);
- 	if (c->x86 == 15) {
- 		set_bit(X86_FEATURE_P4, c->x86_capability);
--		set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability);
- 	}
- 	if (c->x86 == 6) 
- 		set_bit(X86_FEATURE_P3, c->x86_capability);
--	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
--		(c->x86 == 0x6 && c->x86_model >= 0x0e))
--		set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+-	discover_ebda();
 -
- 	if (cpu_has_ds) {
- 		unsigned int l1;
- 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
-@@ -219,6 +220,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
- 		if (!(l1 & (1<<12)))
- 			set_bit(X86_FEATURE_PEBS, c->x86_capability);
- 	}
-+
-+	if (cpu_has_bts)
-+		ds_init_intel(c);
- }
+ 	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
++	if (efi_enabled)
++		efi_init();
  
- static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-@@ -342,5 +346,22 @@ unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
- EXPORT_SYMBOL(cmpxchg_386_u32);
- #endif
+ 	dmi_scan_machine();
  
-+#ifndef CONFIG_X86_CMPXCHG64
-+unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
-+{
-+	u64 prev;
-+	unsigned long flags;
++	io_delay_init();
 +
-+	/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
-+	local_irq_save(flags);
-+	prev = *(u64 *)ptr;
-+	if (prev == old)
-+		*(u64 *)ptr = new;
-+	local_irq_restore(flags);
-+	return prev;
-+}
-+EXPORT_SYMBOL(cmpxchg_486_u64);
+ #ifdef CONFIG_SMP
+-	/* setup to use the static apicid table during kernel startup */
+-	x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
++	/* setup to use the early static init tables during kernel startup */
++	x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
++	x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
++#ifdef CONFIG_NUMA
++	x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
 +#endif
-+
- // arch_initcall(intel_cpu_init);
- 
-diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
-index 9f530ff..8b4507b 100644
---- a/arch/x86/kernel/cpu/intel_cacheinfo.c
-+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
-@@ -733,10 +733,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
- 	if (unlikely(retval < 0))
- 		return retval;
- 
--	cache_kobject[cpu]->parent = &sys_dev->kobj;
--	kobject_set_name(cache_kobject[cpu], "%s", "cache");
--	cache_kobject[cpu]->ktype = &ktype_percpu_entry;
--	retval = kobject_register(cache_kobject[cpu]);
-+	retval = kobject_init_and_add(cache_kobject[cpu], &ktype_percpu_entry,
-+				      &sys_dev->kobj, "%s", "cache");
- 	if (retval < 0) {
- 		cpuid4_cache_sysfs_exit(cpu);
- 		return retval;
-@@ -746,23 +744,23 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
- 		this_object = INDEX_KOBJECT_PTR(cpu,i);
- 		this_object->cpu = cpu;
- 		this_object->index = i;
--		this_object->kobj.parent = cache_kobject[cpu];
--		kobject_set_name(&(this_object->kobj), "index%1lu", i);
--		this_object->kobj.ktype = &ktype_cache;
--		retval = kobject_register(&(this_object->kobj));
-+		retval = kobject_init_and_add(&(this_object->kobj),
-+					      &ktype_cache, cache_kobject[cpu],
-+					      "index%1lu", i);
- 		if (unlikely(retval)) {
- 			for (j = 0; j < i; j++) {
--				kobject_unregister(
--					&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
-+				kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
- 			}
--			kobject_unregister(cache_kobject[cpu]);
-+			kobject_put(cache_kobject[cpu]);
- 			cpuid4_cache_sysfs_exit(cpu);
- 			break;
- 		}
-+		kobject_uevent(&(this_object->kobj), KOBJ_ADD);
- 	}
- 	if (!retval)
- 		cpu_set(cpu, cache_dev_map);
- 
-+	kobject_uevent(cache_kobject[cpu], KOBJ_ADD);
- 	return retval;
- }
+ #endif
  
-@@ -778,8 +776,8 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
- 	cpu_clear(cpu, cache_dev_map);
+ #ifdef CONFIG_ACPI
+@@ -340,48 +375,26 @@ void __init setup_arch(char **cmdline_p)
+ #endif
  
- 	for (i = 0; i < num_cache_leaves; i++)
--		kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
--	kobject_unregister(cache_kobject[cpu]);
-+		kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
-+	kobject_put(cache_kobject[cpu]);
- 	cpuid4_cache_sysfs_exit(cpu);
- }
+ #ifdef CONFIG_NUMA
+-	numa_initmem_init(0, end_pfn); 
++	numa_initmem_init(0, end_pfn);
+ #else
+ 	contig_initmem_init(0, end_pfn);
+ #endif
  
-diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c
-index eef63e3..e633c9c 100644
---- a/arch/x86/kernel/cpu/mcheck/k7.c
-+++ b/arch/x86/kernel/cpu/mcheck/k7.c
-@@ -16,7 +16,7 @@
- #include "mce.h"
+-	/* Reserve direct mapping */
+-	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
+-				(table_end - table_start) << PAGE_SHIFT);
+-
+-	/* reserve kernel */
+-	reserve_bootmem_generic(__pa_symbol(&_text),
+-				__pa_symbol(&_end) - __pa_symbol(&_text));
++	early_res_to_bootmem();
  
- /* Machine Check Handler For AMD Athlon/Duron */
--static fastcall void k7_machine_check(struct pt_regs * regs, long error_code)
-+static void k7_machine_check(struct pt_regs * regs, long error_code)
- {
- 	int recover=1;
- 	u32 alow, ahigh, high, low;
-@@ -27,29 +27,32 @@ static fastcall void k7_machine_check(struct pt_regs * regs, long error_code)
- 	if (mcgstl & (1<<0))	/* Recoverable ? */
- 		recover=0;
++#ifdef CONFIG_ACPI_SLEEP
+ 	/*
+-	 * reserve physical page 0 - it's a special BIOS page on many boxes,
+-	 * enabling clean reboots, SMP operation, laptop functions.
++	 * Reserve low memory region for sleep support.
+ 	 */
+-	reserve_bootmem_generic(0, PAGE_SIZE);
+-
+-	/* reserve ebda region */
+-	if (ebda_addr)
+-		reserve_bootmem_generic(ebda_addr, ebda_size);
+-#ifdef CONFIG_NUMA
+-	/* reserve nodemap region */
+-	if (nodemap_addr)
+-		reserve_bootmem_generic(nodemap_addr, nodemap_size);
++       acpi_reserve_bootmem();
+ #endif
  
--	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
-+	printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
- 		smp_processor_id(), mcgsth, mcgstl);
+-#ifdef CONFIG_SMP
+-	/* Reserve SMP trampoline */
+-	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
+-#endif
++	if (efi_enabled)
++		efi_reserve_bootmem();
  
--	for (i=1; i<nr_mce_banks; i++) {
--		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
-+	for (i = 1; i < nr_mce_banks; i++) {
-+		rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
- 		if (high&(1<<31)) {
-+			char misc[20];
-+			char addr[24];
-+			misc[0] = addr[0] = '\0';
- 			if (high & (1<<29))
- 				recover |= 1;
- 			if (high & (1<<25))
- 				recover |= 2;
--			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
- 			high &= ~(1<<31);
- 			if (high & (1<<27)) {
--				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
--				printk ("[%08x%08x]", ahigh, alow);
-+				rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
-+				snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
- 			}
- 			if (high & (1<<26)) {
--				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
--				printk (" at %08x%08x", ahigh, alow);
-+				rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
-+				snprintf(addr, 24, " at %08x%08x", ahigh, alow);
- 			}
--			printk ("\n");
-+			printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
-+				smp_processor_id(), i, high, low, misc, addr);
- 			/* Clear it */
--			wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
-+			wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
- 			/* Serialize */
- 			wmb();
- 			add_taint(TAINT_MACHINE_CHECK);
-diff --git a/arch/x86/kernel/cpu/mcheck/mce.h b/arch/x86/kernel/cpu/mcheck/mce.h
-index 81fb6e2..ae9f628 100644
---- a/arch/x86/kernel/cpu/mcheck/mce.h
-+++ b/arch/x86/kernel/cpu/mcheck/mce.h
-@@ -8,7 +8,7 @@ void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
- void winchip_mcheck_init(struct cpuinfo_x86 *c);
+-#ifdef CONFIG_ACPI_SLEEP
+        /*
+-        * Reserve low memory region for sleep support.
+-        */
+-       acpi_reserve_bootmem();
+-#endif
+-	/*
+-	 * Find and reserve possible boot-time SMP configuration:
+-	 */
++	* Find and reserve possible boot-time SMP configuration:
++	*/
+ 	find_smp_config();
+ #ifdef CONFIG_BLK_DEV_INITRD
+ 	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+@@ -395,6 +408,8 @@ void __init setup_arch(char **cmdline_p)
+ 			initrd_start = ramdisk_image + PAGE_OFFSET;
+ 			initrd_end = initrd_start+ramdisk_size;
+ 		} else {
++			/* Assumes everything on node 0 */
++			free_bootmem(ramdisk_image, ramdisk_size);
+ 			printk(KERN_ERR "initrd extends beyond end of memory "
+ 			       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ 			       ramdisk_end, end_of_mem);
+@@ -404,17 +419,10 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+ 	reserve_crashkernel();
+ 	paging_init();
++	map_vsyscall();
  
- /* Call the installed machine check handler for this CPU setup. */
--extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code);
-+extern void (*machine_check_vector)(struct pt_regs *, long error_code);
+-#ifdef CONFIG_PCI
+ 	early_quirks();
+-#endif
  
- extern int nr_mce_banks;
+-	/*
+-	 * set this early, so we dont allocate cpu0
+-	 * if MADT list doesnt list BSP first
+-	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
+-	 */
+-	cpu_set(0, cpu_present_map);
+ #ifdef CONFIG_ACPI
+ 	/*
+ 	 * Read APIC and some other early information from ACPI tables.
+@@ -430,25 +438,24 @@ void __init setup_arch(char **cmdline_p)
+ 	if (smp_found_config)
+ 		get_smp_config();
+ 	init_apic_mappings();
++	ioapic_init_mappings();
  
-diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
-index 34c781e..a5182dc 100644
---- a/arch/x86/kernel/cpu/mcheck/mce_32.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
-@@ -22,13 +22,13 @@ int nr_mce_banks;
- EXPORT_SYMBOL_GPL(nr_mce_banks);	/* non-fatal.o */
+ 	/*
+ 	 * We trust e820 completely. No explicit ROM probing in memory.
+- 	 */
+-	e820_reserve_resources(); 
++	 */
++	e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
+ 	e820_mark_nosave_regions();
  
- /* Handle unconfigured int18 (should never happen) */
--static fastcall void unexpected_machine_check(struct pt_regs * regs, long error_code)
-+static void unexpected_machine_check(struct pt_regs * regs, long error_code)
- {	
- 	printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id());
- }
+-	{
+-	unsigned i;
+ 	/* request I/O space for devices used on all i[345]86 PCs */
+ 	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+ 		request_resource(&ioport_resource, &standard_io_resources[i]);
+-	}
  
- /* Call the installed machine check handler for this CPU setup. */
--void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
-+void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
+ 	e820_setup_gap();
  
- /* This has to be run for each processor */
- void mcheck_init(struct cpuinfo_x86 *c)
-diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
-index 4b21d29..9a699ed 100644
---- a/arch/x86/kernel/cpu/mcheck/mce_64.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
-@@ -63,7 +63,7 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
-  * separate MCEs from kernel messages to avoid bogus bug reports.
-  */
+ #ifdef CONFIG_VT
+ #if defined(CONFIG_VGA_CONSOLE)
+-	conswitchp = &vga_con;
++	if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++		conswitchp = &vga_con;
+ #elif defined(CONFIG_DUMMY_CONSOLE)
+ 	conswitchp = &dummy_con;
+ #endif
+@@ -479,9 +486,10 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
  
--struct mce_log mcelog = {
-+static struct mce_log mcelog = {
- 	MCE_LOG_SIGNATURE,
- 	MCE_LOG_LEN,
- };
-@@ -80,7 +80,7 @@ void mce_log(struct mce *mce)
- 			/* When the buffer fills up discard new entries. Assume
- 			   that the earlier errors are the more interesting. */
- 			if (entry >= MCE_LOG_LEN) {
--				set_bit(MCE_OVERFLOW, &mcelog.flags);
-+				set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
- 				return;
- 			}
- 			/* Old left over entry. Skip. */
-@@ -110,12 +110,12 @@ static void print_mce(struct mce *m)
- 	       KERN_EMERG
- 	       "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
- 	       m->cpu, m->mcgstatus, m->bank, m->status);
--	if (m->rip) {
-+	if (m->ip) {
- 		printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
- 		       !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
--		       m->cs, m->rip);
-+		       m->cs, m->ip);
- 		if (m->cs == __KERNEL_CS)
--			print_symbol("{%s}", m->rip);
-+			print_symbol("{%s}", m->ip);
- 		printk("\n");
+ 	if (n >= 0x80000005) {
+ 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
+-		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+-			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+-		c->x86_cache_size=(ecx>>24)+(edx>>24);
++		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
++		       "D cache %dK (%d bytes/line)\n",
++		       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++		c->x86_cache_size = (ecx>>24) + (edx>>24);
+ 		/* On K8 L1 TLB is inclusive, so don't count it */
+ 		c->x86_tlbsize = 0;
  	}
- 	printk(KERN_EMERG "TSC %Lx ", m->tsc);
-@@ -156,16 +156,16 @@ static int mce_available(struct cpuinfo_x86 *c)
- static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
- {
- 	if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
--		m->rip = regs->rip;
-+		m->ip = regs->ip;
- 		m->cs = regs->cs;
- 	} else {
--		m->rip = 0;
-+		m->ip = 0;
- 		m->cs = 0;
+@@ -495,11 +503,8 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
+ 		printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+ 		c->x86_cache_size, ecx & 0xFF);
  	}
- 	if (rip_msr) {
- 		/* Assume the RIP in the MSR is exact. Is this true? */
- 		m->mcgstatus |= MCG_STATUS_EIPV;
--		rdmsrl(rip_msr, m->rip);
-+		rdmsrl(rip_msr, m->ip);
- 		m->cs = 0;
+-
+-	if (n >= 0x80000007)
+-		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
+ 	if (n >= 0x80000008) {
+-		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
++		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
+ 		c->x86_virt_bits = (eax >> 8) & 0xff;
+ 		c->x86_phys_bits = eax & 0xff;
  	}
- }
-@@ -192,10 +192,10 @@ void do_machine_check(struct pt_regs * regs, long error_code)
- 
- 	atomic_inc(&mce_entry);
- 
--	if (regs)
--		notify_die(DIE_NMI, "machine check", regs, error_code, 18,
--			   SIGKILL);
--	if (!banks)
-+	if ((regs
-+	     && notify_die(DIE_NMI, "machine check", regs, error_code,
-+			   18, SIGKILL) == NOTIFY_STOP)
-+	    || !banks)
- 		goto out2;
- 
- 	memset(&m, 0, sizeof(struct mce));
-@@ -288,7 +288,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
- 		 * instruction which caused the MCE.
- 		 */
- 		if (m.mcgstatus & MCG_STATUS_EIPV)
--			user_space = panicm.rip && (panicm.cs & 3);
-+			user_space = panicm.ip && (panicm.cs & 3);
- 
- 		/*
- 		 * If we know that the error was in user space, send a
-@@ -564,7 +564,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
- 			loff_t *off)
+@@ -508,14 +513,15 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
+ #ifdef CONFIG_NUMA
+ static int nearby_node(int apicid)
  {
- 	unsigned long *cpu_tsc;
--	static DECLARE_MUTEX(mce_read_sem);
-+	static DEFINE_MUTEX(mce_read_mutex);
- 	unsigned next;
- 	char __user *buf = ubuf;
- 	int i, err;
-@@ -573,12 +573,12 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
- 	if (!cpu_tsc)
- 		return -ENOMEM;
- 
--	down(&mce_read_sem);
-+	mutex_lock(&mce_read_mutex);
- 	next = rcu_dereference(mcelog.next);
- 
- 	/* Only supports full reads right now */
- 	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
--		up(&mce_read_sem);
-+		mutex_unlock(&mce_read_mutex);
- 		kfree(cpu_tsc);
- 		return -EINVAL;
+-	int i;
++	int i, node;
++
+ 	for (i = apicid - 1; i >= 0; i--) {
+-		int node = apicid_to_node[i];
++		node = apicid_to_node[i];
+ 		if (node != NUMA_NO_NODE && node_online(node))
+ 			return node;
  	}
-@@ -621,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
- 			memset(&mcelog.entry[i], 0, sizeof(struct mce));
- 		}
+ 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
+-		int node = apicid_to_node[i];
++		node = apicid_to_node[i];
+ 		if (node != NUMA_NO_NODE && node_online(node))
+ 			return node;
  	}
--	up(&mce_read_sem);
-+	mutex_unlock(&mce_read_mutex);
- 	kfree(cpu_tsc);
- 	return err ? -EFAULT : buf - ubuf;
- }
-@@ -634,8 +634,7 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
- 	return 0;
- }
- 
--static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd,
--		     unsigned long arg)
-+static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
- {
- 	int __user *p = (int __user *)arg;
- 
-@@ -664,7 +663,7 @@ static const struct file_operations mce_chrdev_ops = {
- 	.release = mce_release,
- 	.read = mce_read,
- 	.poll = mce_poll,
--	.ioctl = mce_ioctl,
-+	.unlocked_ioctl = mce_ioctl,
- };
- 
- static struct miscdevice mce_log_device = {
-@@ -745,7 +744,7 @@ static void mce_restart(void)
- 
- static struct sysdev_class mce_sysclass = {
- 	.resume = mce_resume,
--	set_kset_name("machinecheck"),
-+	.name = "machinecheck",
- };
- 
- DEFINE_PER_CPU(struct sys_device, device_mce);
-@@ -855,8 +854,8 @@ static void mce_remove_device(unsigned int cpu)
- }
- 
- /* Get notified when a cpu comes on/off. Be hotplug friendly. */
--static int
--mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
-+static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
-+				      unsigned long action, void *hcpu)
- {
- 	unsigned int cpu = (unsigned long)hcpu;
- 
-@@ -873,7 +872,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
- 	return NOTIFY_OK;
- }
- 
--static struct notifier_block mce_cpu_notifier = {
-+static struct notifier_block mce_cpu_notifier __cpuinitdata = {
- 	.notifier_call = mce_cpu_callback,
- };
- 
-diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
-index 752fb16..32671da 100644
---- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
-@@ -65,7 +65,7 @@ static struct threshold_block threshold_defaults = {
- };
- 
- struct threshold_bank {
--	struct kobject kobj;
-+	struct kobject *kobj;
- 	struct threshold_block *blocks;
- 	cpumask_t cpus;
- };
-@@ -118,6 +118,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
+@@ -527,7 +533,7 @@ static int nearby_node(int apicid)
+  * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
+  * Assumes number of cores is a power of two.
+  */
+-static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
  {
- 	unsigned int bank, block;
- 	unsigned int cpu = smp_processor_id();
-+	u8 lvt_off;
- 	u32 low = 0, high = 0, address = 0;
- 
- 	for (bank = 0; bank < NR_BANKS; ++bank) {
-@@ -153,14 +154,13 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
- 			if (shared_bank[bank] && c->cpu_core_id)
- 				break;
+ #ifdef CONFIG_SMP
+ 	unsigned bits;
+@@ -536,7 +542,54 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
+ 	int node = 0;
+ 	unsigned apicid = hard_smp_processor_id();
  #endif
-+			lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR,
-+						       APIC_EILVT_MSG_FIX, 0);
+-	unsigned ecx = cpuid_ecx(0x80000008);
++	bits = c->x86_coreid_bits;
 +
- 			high &= ~MASK_LVTOFF_HI;
--			high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
-+			high |= lvt_off << 20;
- 			wrmsr(address, low, high);
- 
--			setup_APIC_extended_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
--						THRESHOLD_APIC_VECTOR,
--						K8_APIC_EXT_INT_MSG_FIX, 0);
--
- 			threshold_defaults.address = address;
- 			threshold_restart_bank(&threshold_defaults, 0, 0);
- 		}
-@@ -432,10 +432,9 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
- 	else
- 		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
- 
--	kobject_set_name(&b->kobj, "misc%i", block);
--	b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
--	b->kobj.ktype = &threshold_ktype;
--	err = kobject_register(&b->kobj);
-+	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
-+				   per_cpu(threshold_banks, cpu)[bank]->kobj,
-+				   "misc%i", block);
- 	if (err)
- 		goto out_free;
- recurse:
-@@ -451,11 +450,14 @@ recurse:
- 	if (err)
- 		goto out_free;
- 
-+	if (b)
-+		kobject_uevent(&b->kobj, KOBJ_ADD);
++	/* Low order bits define the core id (index of core in socket) */
++	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
++	/* Convert the APIC ID into the socket ID */
++	c->phys_proc_id = phys_pkg_id(bits);
 +
- 	return err;
- 
- out_free:
- 	if (b) {
--		kobject_unregister(&b->kobj);
-+		kobject_put(&b->kobj);
- 		kfree(b);
- 	}
- 	return err;
-@@ -489,7 +491,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
- 			goto out;
- 
- 		err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
--					&b->kobj, name);
-+					b->kobj, name);
- 		if (err)
- 			goto out;
- 
-@@ -505,16 +507,15 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
- 		goto out;
- 	}
- 
--	kobject_set_name(&b->kobj, "threshold_bank%i", bank);
--	b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
-+	b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
-+	if (!b->kobj)
-+		goto out_free;
++#ifdef CONFIG_NUMA
++	node = c->phys_proc_id;
++	if (apicid_to_node[apicid] != NUMA_NO_NODE)
++		node = apicid_to_node[apicid];
++	if (!node_online(node)) {
++		/* Two possibilities here:
++		   - The CPU is missing memory and no node was created.
++		   In that case try picking one from a nearby CPU
++		   - The APIC IDs differ from the HyperTransport node IDs
++		   which the K8 northbridge parsing fills in.
++		   Assume they are all increased by a constant offset,
++		   but in the same order as the HT nodeids.
++		   If that doesn't result in a usable node fall back to the
++		   path for the previous case.  */
 +
- #ifndef CONFIG_SMP
- 	b->cpus = CPU_MASK_ALL;
- #else
- 	b->cpus = per_cpu(cpu_core_map, cpu);
- #endif
--	err = kobject_register(&b->kobj);
--	if (err)
--		goto out_free;
- 
- 	per_cpu(threshold_banks, cpu)[bank] = b;
- 
-@@ -531,7 +532,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
- 			continue;
- 
- 		err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
--					&b->kobj, name);
-+					b->kobj, name);
- 		if (err)
- 			goto out;
- 
-@@ -554,7 +555,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
- 	int err = 0;
- 
- 	for (bank = 0; bank < NR_BANKS; ++bank) {
--		if (!(per_cpu(bank_map, cpu) & 1 << bank))
-+		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
- 			continue;
- 		err = threshold_create_bank(cpu, bank);
- 		if (err)
-@@ -581,7 +582,7 @@ static void deallocate_threshold_block(unsigned int cpu,
- 		return;
- 
- 	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
--		kobject_unregister(&pos->kobj);
-+		kobject_put(&pos->kobj);
- 		list_del(&pos->miscj);
- 		kfree(pos);
- 	}
-@@ -627,7 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
- 	deallocate_threshold_block(cpu, bank);
- 
- free_out:
--	kobject_unregister(&b->kobj);
-+	kobject_put(b->kobj);
- 	kfree(b);
- 	per_cpu(threshold_banks, cpu)[bank] = NULL;
- }
-@@ -637,14 +638,14 @@ static void threshold_remove_device(unsigned int cpu)
- 	unsigned int bank;
- 
- 	for (bank = 0; bank < NR_BANKS; ++bank) {
--		if (!(per_cpu(bank_map, cpu) & 1 << bank))
-+		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
- 			continue;
- 		threshold_remove_bank(cpu, bank);
- 	}
- }
- 
- /* get notified when a cpu comes on/off */
--static int threshold_cpu_callback(struct notifier_block *nfb,
-+static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
- 					    unsigned long action, void *hcpu)
- {
- 	/* cpu was unsigned int to begin with */
-@@ -669,7 +670,7 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
- 	return NOTIFY_OK;
- }
- 
--static struct notifier_block threshold_cpu_notifier = {
-+static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
- 	.notifier_call = threshold_cpu_callback,
- };
- 
-diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
-index be4dabf..cb03345 100644
---- a/arch/x86/kernel/cpu/mcheck/p4.c
-+++ b/arch/x86/kernel/cpu/mcheck/p4.c
-@@ -57,7 +57,7 @@ static void intel_thermal_interrupt(struct pt_regs *regs)
- /* Thermal interrupt handler for this CPU setup */
- static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt;
- 
--fastcall void smp_thermal_interrupt(struct pt_regs *regs)
-+void smp_thermal_interrupt(struct pt_regs *regs)
- {
- 	irq_enter();
- 	vendor_thermal_interrupt(regs);
-@@ -141,7 +141,7 @@ static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
- 	rdmsr (MSR_IA32_MCG_EIP, r->eip, h);
- }
- 
--static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
-+static void intel_machine_check(struct pt_regs * regs, long error_code)
- {
- 	int recover=1;
- 	u32 alow, ahigh, high, low;
-@@ -152,38 +152,41 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
- 	if (mcgstl & (1<<0))	/* Recoverable ? */
- 		recover=0;
++		int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
++
++		if (ht_nodeid >= 0 &&
++		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
++			node = apicid_to_node[ht_nodeid];
++		/* Pick a nearby node */
++		if (!node_online(node))
++			node = nearby_node(apicid);
++	}
++	numa_set_node(cpu, node);
++
++	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++#endif
++}
++
++static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++	unsigned bits, ecx;
++
++	/* Multi core CPU? */
++	if (c->extended_cpuid_level < 0x80000008)
++		return;
++
++	ecx = cpuid_ecx(0x80000008);
  
--	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
-+	printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
- 		smp_processor_id(), mcgsth, mcgstl);
+ 	c->x86_max_cores = (ecx & 0xff) + 1;
  
- 	if (mce_num_extended_msrs > 0) {
- 		struct intel_mce_extended_msrs dbg;
- 		intel_get_extended_msrs(&dbg);
--		printk (KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n",
--			smp_processor_id(), dbg.eip, dbg.eflags);
--		printk (KERN_DEBUG "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n",
--			dbg.eax, dbg.ebx, dbg.ecx, dbg.edx);
--		printk (KERN_DEBUG "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
-+		printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n"
-+			"\teax: %08x ebx: %08x ecx: %08x edx: %08x\n"
-+			"\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
-+			smp_processor_id(), dbg.eip, dbg.eflags,
-+			dbg.eax, dbg.ebx, dbg.ecx, dbg.edx,
- 			dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
+@@ -549,37 +602,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
+ 			bits++;
  	}
  
--	for (i=0; i<nr_mce_banks; i++) {
--		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
-+	for (i = 0; i < nr_mce_banks; i++) {
-+		rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
- 		if (high & (1<<31)) {
-+			char misc[20];
-+			char addr[24];
-+			misc[0] = addr[0] = '\0';
- 			if (high & (1<<29))
- 				recover |= 1;
- 			if (high & (1<<25))
- 				recover |= 2;
--			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
- 			high &= ~(1<<31);
- 			if (high & (1<<27)) {
--				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
--				printk ("[%08x%08x]", ahigh, alow);
-+				rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
-+				snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
- 			}
- 			if (high & (1<<26)) {
--				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
--				printk (" at %08x%08x", ahigh, alow);
-+				rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
-+				snprintf(addr, 24, " at %08x%08x", ahigh, alow);
- 			}
--			printk ("\n");
-+			printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
-+				smp_processor_id(), i, high, low, misc, addr);
- 		}
- 	}
+-	/* Low order bits define the core id (index of core in socket) */
+-	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
+-	/* Convert the APIC ID into the socket ID */
+-	c->phys_proc_id = phys_pkg_id(bits);
+-
+-#ifdef CONFIG_NUMA
+-  	node = c->phys_proc_id;
+- 	if (apicid_to_node[apicid] != NUMA_NO_NODE)
+- 		node = apicid_to_node[apicid];
+- 	if (!node_online(node)) {
+- 		/* Two possibilities here:
+- 		   - The CPU is missing memory and no node was created.
+- 		   In that case try picking one from a nearby CPU
+- 		   - The APIC IDs differ from the HyperTransport node IDs
+- 		   which the K8 northbridge parsing fills in.
+- 		   Assume they are all increased by a constant offset,
+- 		   but in the same order as the HT nodeids.
+- 		   If that doesn't result in a usable node fall back to the
+- 		   path for the previous case.  */
+-		int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
+- 		if (ht_nodeid >= 0 &&
+- 		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
+- 			node = apicid_to_node[ht_nodeid];
+- 		/* Pick a nearby node */
+- 		if (!node_online(node))
+- 			node = nearby_node(apicid);
+- 	}
+-	numa_set_node(cpu, node);
++	c->x86_coreid_bits = bits;
  
-diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
-index 94bc43d..a18310a 100644
---- a/arch/x86/kernel/cpu/mcheck/p5.c
-+++ b/arch/x86/kernel/cpu/mcheck/p5.c
-@@ -16,7 +16,7 @@
- #include "mce.h"
+-	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
+-#endif
+ #endif
+ }
  
- /* Machine check handler for Pentium class Intel */
--static fastcall void pentium_machine_check(struct pt_regs * regs, long error_code)
-+static void pentium_machine_check(struct pt_regs * regs, long error_code)
+@@ -595,8 +619,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
+ /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
+ static __cpuinit int amd_apic_timer_broken(void)
  {
- 	u32 loaddr, hi, lotype;
- 	rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
-diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c
-index deeae42..7434260 100644
---- a/arch/x86/kernel/cpu/mcheck/p6.c
-+++ b/arch/x86/kernel/cpu/mcheck/p6.c
-@@ -16,7 +16,7 @@
- #include "mce.h"
+-	u32 lo, hi;
+-	u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
++	u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
++
+ 	switch (eax & CPUID_XFAM) {
+ 	case CPUID_XFAM_K8:
+ 		if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
+@@ -614,6 +638,15 @@ static __cpuinit int amd_apic_timer_broken(void)
+ 	return 0;
+ }
  
- /* Machine Check Handler For PII/PIII */
--static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
-+static void intel_machine_check(struct pt_regs * regs, long error_code)
++static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
++{
++	early_init_amd_mc(c);
++
++ 	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
++	if (c->x86_power & (1<<8))
++		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
++}
++
+ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
  {
- 	int recover=1;
- 	u32 alow, ahigh, high, low;
-@@ -27,27 +27,30 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
- 	if (mcgstl & (1<<0))	/* Recoverable ? */
- 		recover=0;
- 
--	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
-+	printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
- 		smp_processor_id(), mcgsth, mcgstl);
- 
--	for (i=0; i<nr_mce_banks; i++) {
--		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
-+	for (i = 0; i < nr_mce_banks; i++) {
-+		rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
- 		if (high & (1<<31)) {
-+			char misc[20];
-+			char addr[24];
-+			misc[0] = addr[0] = '\0';
- 			if (high & (1<<29))
- 				recover |= 1;
- 			if (high & (1<<25))
- 				recover |= 2;
--			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
- 			high &= ~(1<<31);
- 			if (high & (1<<27)) {
--				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
--				printk ("[%08x%08x]", ahigh, alow);
-+				rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
-+				snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
- 			}
- 			if (high & (1<<26)) {
--				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
--				printk (" at %08x%08x", ahigh, alow);
-+				rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
-+				snprintf(addr, 24, " at %08x%08x", ahigh, alow);
- 			}
--			printk ("\n");
-+			printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
-+				smp_processor_id(), i, high, low, misc, addr);
- 		}
- 	}
- 
-diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
-index 9e424b6..3d428d5 100644
---- a/arch/x86/kernel/cpu/mcheck/winchip.c
-+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
-@@ -15,7 +15,7 @@
- #include "mce.h"
+ 	unsigned level;
+@@ -624,7 +657,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ 	/*
+ 	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
+ 	 * bit 6 of msr C001_0015
+- 	 *
++	 *
+ 	 * Errata 63 for SH-B3 steppings
+ 	 * Errata 122 for all steppings (F+ have it disabled by default)
+ 	 */
+@@ -637,35 +670,32 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
  
- /* Machine check handler for WinChip C6 */
--static fastcall void winchip_machine_check(struct pt_regs * regs, long error_code)
-+static void winchip_machine_check(struct pt_regs * regs, long error_code)
- {
- 	printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
- 	add_taint(TAINT_MACHINE_CHECK);
-diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
-index 0949cdb..ee2331b 100644
---- a/arch/x86/kernel/cpu/mtrr/amd.c
-+++ b/arch/x86/kernel/cpu/mtrr/amd.c
-@@ -53,8 +53,6 @@ static void amd_set_mtrr(unsigned int reg, unsigned long base,
-     <base> The base address of the region.
-     <size> The size of the region. If this is 0 the region is disabled.
-     <type> The type of the region.
--    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
--    be done externally.
-     [RETURNS] Nothing.
- */
- {
-diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
-index 9964be3..8e139c7 100644
---- a/arch/x86/kernel/cpu/mtrr/cyrix.c
-+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
-@@ -4,6 +4,7 @@
- #include <asm/msr.h>
- #include <asm/io.h>
- #include <asm/processor-cyrix.h>
-+#include <asm/processor-flags.h>
- #include "mtrr.h"
+ 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+ 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+-	clear_bit(0*32+31, &c->x86_capability);
+-	
++	clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
++
+ 	/* On C+ stepping K8 rep microcode works well for copy/memset */
+ 	level = cpuid_eax(1);
+-	if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
+-		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++	if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
++			     level >= 0x0f58))
++		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+ 	if (c->x86 == 0x10 || c->x86 == 0x11)
+-		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  
- int arr3_protected;
-@@ -142,7 +143,7 @@ static void prepare_set(void)
+ 	/* Enable workaround for FXSAVE leak */
+ 	if (c->x86 >= 6)
+-		set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
++		set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
  
- 	/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
- 	    a side-effect  */
--	cr0 = read_cr0() | 0x40000000;
-+	cr0 = read_cr0() | X86_CR0_CD;
- 	wbinvd();
- 	write_cr0(cr0);
- 	wbinvd();
-diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
-index 992f08d..103d61a 100644
---- a/arch/x86/kernel/cpu/mtrr/generic.c
-+++ b/arch/x86/kernel/cpu/mtrr/generic.c
-@@ -9,11 +9,12 @@
- #include <asm/msr.h>
- #include <asm/system.h>
- #include <asm/cpufeature.h>
-+#include <asm/processor-flags.h>
- #include <asm/tlbflush.h>
- #include "mtrr.h"
+ 	level = get_model_name(c);
+ 	if (!level) {
+-		switch (c->x86) { 
++		switch (c->x86) {
+ 		case 15:
+ 			/* Should distinguish Models here, but this is only
+ 			   a fallback anyways. */
+ 			strcpy(c->x86_model_id, "Hammer");
+-			break; 
+-		} 
+-	} 
++			break;
++		}
++	}
+ 	display_cacheinfo(c);
  
- struct mtrr_state {
--	struct mtrr_var_range *var_ranges;
-+	struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
- 	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
- 	unsigned char enabled;
- 	unsigned char have_fixed;
-@@ -85,12 +86,6 @@ void __init get_mtrr_state(void)
- 	struct mtrr_var_range *vrs;
- 	unsigned lo, dummy;
+-	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
+-	if (c->x86_power & (1<<8))
+-		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+-
+ 	/* Multi core CPU? */
+ 	if (c->extended_cpuid_level >= 0x80000008)
+ 		amd_detect_cmp(c);
+@@ -677,41 +707,38 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ 		num_cache_leaves = 3;
  
--	if (!mtrr_state.var_ranges) {
--		mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range), 
--						GFP_KERNEL);
--		if (!mtrr_state.var_ranges)
--			return;
--	} 
- 	vrs = mtrr_state.var_ranges;
+ 	if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
+-		set_bit(X86_FEATURE_K8, &c->x86_capability);
+-
+-	/* RDTSC can be speculated around */
+-	clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++		set_cpu_cap(c, X86_FEATURE_K8);
  
- 	rdmsr(MTRRcap_MSR, lo, dummy);
-@@ -188,7 +183,7 @@ static inline void k8_enable_fixed_iorrs(void)
-  * \param changed pointer which indicates whether the MTRR needed to be changed
-  * \param msrwords pointer to the MSR values which the MSR should have
-  */
--static void set_fixed_range(int msr, int * changed, unsigned int * msrwords)
-+static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
- {
- 	unsigned lo, hi;
+-	/* Family 10 doesn't support C states in MWAIT so don't use it */
+-	if (c->x86 == 0x10 && !force_mwait)
+-		clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
++	/* MFENCE stops RDTSC speculation */
++	set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
  
-@@ -200,7 +195,7 @@ static void set_fixed_range(int msr, int * changed, unsigned int * msrwords)
- 		    ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
- 			k8_enable_fixed_iorrs();
- 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
--		*changed = TRUE;
-+		*changed = true;
- 	}
+ 	if (amd_apic_timer_broken())
+ 		disable_apic_timer = 1;
  }
  
-@@ -260,7 +255,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
- static int set_fixed_ranges(mtrr_type * frs)
+-static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++void __cpuinit detect_ht(struct cpuinfo_x86 *c)
  {
- 	unsigned long long *saved = (unsigned long long *) frs;
--	int changed = FALSE;
-+	bool changed = false;
- 	int block=-1, range;
+ #ifdef CONFIG_SMP
+-	u32 	eax, ebx, ecx, edx;
+-	int 	index_msb, core_bits;
++	u32 eax, ebx, ecx, edx;
++	int index_msb, core_bits;
  
- 	while (fixed_range_blocks[++block].ranges)
-@@ -273,17 +268,17 @@ static int set_fixed_ranges(mtrr_type * frs)
+ 	cpuid(1, &eax, &ebx, &ecx, &edx);
  
- /*  Set the MSR pair relating to a var range. Returns TRUE if
-     changes are made  */
--static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
-+static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
- {
- 	unsigned int lo, hi;
--	int changed = FALSE;
-+	bool changed = false;
  
- 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
- 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
- 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
- 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
- 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
--		changed = TRUE;
-+		changed = true;
- 	}
+ 	if (!cpu_has(c, X86_FEATURE_HT))
+ 		return;
+- 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
++	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+ 		goto out;
  
- 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
-@@ -292,7 +287,7 @@ static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
- 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
- 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
- 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
--		changed = TRUE;
-+		changed = true;
- 	}
- 	return changed;
- }
-@@ -350,7 +345,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
- 	spin_lock(&set_atomicity_lock);
+ 	smp_num_siblings = (ebx & 0xff0000) >> 16;
  
- 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
--	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
-+	cr0 = read_cr0() | X86_CR0_CD;
- 	write_cr0(cr0);
- 	wbinvd();
+ 	if (smp_num_siblings == 1) {
+ 		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
+-	} else if (smp_num_siblings > 1 ) {
++	} else if (smp_num_siblings > 1) {
  
-@@ -417,8 +412,6 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
-     <base> The base address of the region.
-     <size> The size of the region. If this is 0 the region is disabled.
-     <type> The type of the region.
--    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
--    be done externally.
-     [RETURNS] Nothing.
- */
- {
-diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
-index c7d8f17..91e150a 100644
---- a/arch/x86/kernel/cpu/mtrr/if.c
-+++ b/arch/x86/kernel/cpu/mtrr/if.c
-@@ -11,10 +11,6 @@
- #include <asm/mtrr.h>
- #include "mtrr.h"
+ 		if (smp_num_siblings > NR_CPUS) {
+-			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
++			printk(KERN_WARNING "CPU: Unsupported number of "
++			       "siblings %d", smp_num_siblings);
+ 			smp_num_siblings = 1;
+ 			return;
+ 		}
+@@ -721,7 +748,7 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
  
--/* RED-PEN: this is accessed without any locking */
--extern unsigned int *usage_table;
--
--
- #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
+ 		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
  
- static const char *const mtrr_strings[MTRR_NUM_TYPES] =
-@@ -37,7 +33,7 @@ const char *mtrr_attrib_to_str(int x)
+-		index_msb = get_count_order(smp_num_siblings) ;
++		index_msb = get_count_order(smp_num_siblings);
  
- static int
- mtrr_file_add(unsigned long base, unsigned long size,
--	      unsigned int type, char increment, struct file *file, int page)
-+	      unsigned int type, bool increment, struct file *file, int page)
- {
- 	int reg, max;
- 	unsigned int *fcount = FILE_FCOUNT(file); 
-@@ -55,7 +51,7 @@ mtrr_file_add(unsigned long base, unsigned long size,
- 		base >>= PAGE_SHIFT;
- 		size >>= PAGE_SHIFT;
+ 		core_bits = get_count_order(c->x86_max_cores);
+ 
+@@ -730,8 +757,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
  	}
--	reg = mtrr_add_page(base, size, type, 1);
-+	reg = mtrr_add_page(base, size, type, true);
- 	if (reg >= 0)
- 		++fcount[reg];
- 	return reg;
-@@ -141,7 +137,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
- 		size >>= PAGE_SHIFT;
- 		err =
- 		    mtrr_add_page((unsigned long) base, (unsigned long) size, i,
--				  1);
-+				  true);
- 		if (err < 0)
- 			return err;
- 		return len;
-@@ -217,7 +213,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
- 		if (!capable(CAP_SYS_ADMIN))
- 			return -EPERM;
- 		err =
--		    mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
-+		    mtrr_file_add(sentry.base, sentry.size, sentry.type, true,
- 				  file, 0);
- 		break;
- 	case MTRRIOC_SET_ENTRY:
-@@ -226,7 +222,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
+ out:
+ 	if ((c->x86_max_cores * smp_num_siblings) > 1) {
+-		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
+-		printk(KERN_INFO  "CPU: Processor Core ID: %d\n", c->cpu_core_id);
++		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
++		       c->phys_proc_id);
++		printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
++		       c->cpu_core_id);
+ 	}
+ 
  #endif
- 		if (!capable(CAP_SYS_ADMIN))
- 			return -EPERM;
--		err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
-+		err = mtrr_add(sentry.base, sentry.size, sentry.type, false);
- 		break;
- 	case MTRRIOC_DEL_ENTRY:
- #ifdef CONFIG_COMPAT
-@@ -270,7 +266,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
- 		if (!capable(CAP_SYS_ADMIN))
- 			return -EPERM;
- 		err =
--		    mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
-+		    mtrr_file_add(sentry.base, sentry.size, sentry.type, true,
- 				  file, 1);
- 		break;
- 	case MTRRIOC_SET_PAGE_ENTRY:
-@@ -279,7 +275,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
+@@ -773,28 +802,39 @@ static void srat_detect_node(void)
  #endif
- 		if (!capable(CAP_SYS_ADMIN))
- 			return -EPERM;
--		err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
-+		err =
-+		    mtrr_add_page(sentry.base, sentry.size, sentry.type, false);
- 		break;
- 	case MTRRIOC_DEL_PAGE_ENTRY:
- #ifdef CONFIG_COMPAT
-@@ -396,7 +393,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
- 	for (i = 0; i < max; i++) {
- 		mtrr_if->get(i, &base, &size, &type);
- 		if (size == 0)
--			usage_table[i] = 0;
-+			mtrr_usage_table[i] = 0;
- 		else {
- 			if (size < (0x100000 >> PAGE_SHIFT)) {
- 				/* less than 1MB */
-@@ -410,7 +407,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
- 			len += seq_printf(seq, 
- 				   "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n",
- 			     i, base, base >> (20 - PAGE_SHIFT), size, factor,
--			     mtrr_attrib_to_str(type), usage_table[i]);
-+			     mtrr_attrib_to_str(type), mtrr_usage_table[i]);
- 		}
- 	}
- 	return 0;
-diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index 3b20613..7159195 100644
---- a/arch/x86/kernel/cpu/mtrr/main.c
-+++ b/arch/x86/kernel/cpu/mtrr/main.c
-@@ -38,8 +38,8 @@
- #include <linux/cpu.h>
- #include <linux/mutex.h>
+ }
  
-+#include <asm/e820.h>
- #include <asm/mtrr.h>
--
- #include <asm/uaccess.h>
- #include <asm/processor.h>
- #include <asm/msr.h>
-@@ -47,7 +47,7 @@
++static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
++{
++	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
++	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
++		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++}
++
+ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+ {
+ 	/* Cache sizes */
+ 	unsigned n;
  
- u32 num_var_ranges = 0;
+ 	init_intel_cacheinfo(c);
+-	if (c->cpuid_level > 9 ) {
++	if (c->cpuid_level > 9) {
+ 		unsigned eax = cpuid_eax(10);
+ 		/* Check for version and the number of counters */
+ 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
+-			set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
++			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
+ 	}
  
--unsigned int *usage_table;
-+unsigned int mtrr_usage_table[MAX_VAR_RANGES];
- static DEFINE_MUTEX(mtrr_mutex);
+ 	if (cpu_has_ds) {
+ 		unsigned int l1, l2;
+ 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+ 		if (!(l1 & (1<<11)))
+-			set_bit(X86_FEATURE_BTS, c->x86_capability);
++			set_cpu_cap(c, X86_FEATURE_BTS);
+ 		if (!(l1 & (1<<12)))
+-			set_bit(X86_FEATURE_PEBS, c->x86_capability);
++			set_cpu_cap(c, X86_FEATURE_PEBS);
+ 	}
  
- u64 size_or_mask, size_and_mask;
-@@ -121,13 +121,8 @@ static void __init init_table(void)
- 	int i, max;
++
++	if (cpu_has_bts)
++		ds_init_intel(c);
++
+ 	n = c->extended_cpuid_level;
+ 	if (n >= 0x80000008) {
+ 		unsigned eax = cpuid_eax(0x80000008);
+@@ -811,14 +851,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+ 		c->x86_cache_alignment = c->x86_clflush_size * 2;
+ 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+ 	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
+-		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+ 	if (c->x86 == 6)
+-		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
+-	if (c->x86 == 15)
+-		set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+-	else
+-		clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+- 	c->x86_max_cores = intel_num_cpu_cores(c);
++		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
++	set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
++	c->x86_max_cores = intel_num_cpu_cores(c);
  
- 	max = num_var_ranges;
--	if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
--	    == NULL) {
--		printk(KERN_ERR "mtrr: could not allocate\n");
--		return;
--	}
- 	for (i = 0; i < max; i++)
--		usage_table[i] = 1;
-+		mtrr_usage_table[i] = 1;
+ 	srat_detect_node();
  }
- 
- struct set_mtrr_data {
-@@ -311,7 +306,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
-  */
- 
- int mtrr_add_page(unsigned long base, unsigned long size, 
--		  unsigned int type, char increment)
-+		  unsigned int type, bool increment)
- {
- 	int i, replace, error;
- 	mtrr_type ltype;
-@@ -349,7 +344,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
- 	replace = -1;
- 
- 	/* No CPU hotplug when we change MTRR entries */
--	lock_cpu_hotplug();
-+	get_online_cpus();
- 	/*  Search for existing MTRR  */
- 	mutex_lock(&mtrr_mutex);
- 	for (i = 0; i < num_var_ranges; ++i) {
-@@ -383,7 +378,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
- 			goto out;
- 		}
- 		if (increment)
--			++usage_table[i];
-+			++mtrr_usage_table[i];
- 		error = i;
- 		goto out;
- 	}
-@@ -391,13 +386,15 @@ int mtrr_add_page(unsigned long base, unsigned long size,
- 	i = mtrr_if->get_free_region(base, size, replace);
- 	if (i >= 0) {
- 		set_mtrr(i, base, size, type);
--		if (likely(replace < 0))
--			usage_table[i] = 1;
--		else {
--			usage_table[i] = usage_table[replace] + !!increment;
-+		if (likely(replace < 0)) {
-+			mtrr_usage_table[i] = 1;
-+		} else {
-+			mtrr_usage_table[i] = mtrr_usage_table[replace];
-+			if (increment)
-+				mtrr_usage_table[i]++;
- 			if (unlikely(replace != i)) {
- 				set_mtrr(replace, 0, 0, 0);
--				usage_table[replace] = 0;
-+				mtrr_usage_table[replace] = 0;
- 			}
- 		}
- 	} else
-@@ -405,7 +402,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
- 	error = i;
-  out:
- 	mutex_unlock(&mtrr_mutex);
--	unlock_cpu_hotplug();
-+	put_online_cpus();
- 	return error;
+@@ -835,18 +872,12 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+ 		c->x86_vendor = X86_VENDOR_UNKNOWN;
  }
  
-@@ -460,7 +457,7 @@ static int mtrr_check(unsigned long base, unsigned long size)
- 
- int
- mtrr_add(unsigned long base, unsigned long size, unsigned int type,
--	 char increment)
-+	 bool increment)
+-struct cpu_model_info {
+-	int vendor;
+-	int family;
+-	char *model_names[16];
+-};
+-
+ /* Do some early cpuid on the boot CPU to get some parameter that are
+    needed before check_bugs. Everything advanced is in identify_cpu
+    below. */
+-void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
++static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
  {
- 	if (mtrr_check(base, size))
- 		return -EINVAL;
-@@ -495,7 +492,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
+-	u32 tfms;
++	u32 tfms, xlvl;
  
- 	max = num_var_ranges;
- 	/* No CPU hotplug when we change MTRR entries */
--	lock_cpu_hotplug();
-+	get_online_cpus();
- 	mutex_lock(&mtrr_mutex);
- 	if (reg < 0) {
- 		/*  Search for existing MTRR  */
-@@ -527,16 +524,16 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
- 		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
- 		goto out;
- 	}
--	if (usage_table[reg] < 1) {
-+	if (mtrr_usage_table[reg] < 1) {
- 		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
- 		goto out;
- 	}
--	if (--usage_table[reg] < 1)
-+	if (--mtrr_usage_table[reg] < 1)
- 		set_mtrr(reg, 0, 0, 0);
- 	error = reg;
-  out:
- 	mutex_unlock(&mtrr_mutex);
--	unlock_cpu_hotplug();
-+	put_online_cpus();
- 	return error;
- }
- /**
-@@ -591,16 +588,11 @@ struct mtrr_value {
- 	unsigned long	lsize;
- };
+ 	c->loops_per_jiffy = loops_per_jiffy;
+ 	c->x86_cache_size = -1;
+@@ -857,6 +888,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
+ 	c->x86_clflush_size = 64;
+ 	c->x86_cache_alignment = c->x86_clflush_size;
+ 	c->x86_max_cores = 1;
++	c->x86_coreid_bits = 0;
+ 	c->extended_cpuid_level = 0;
+ 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
  
--static struct mtrr_value * mtrr_state;
-+static struct mtrr_value mtrr_state[MAX_VAR_RANGES];
+@@ -865,7 +897,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
+ 	      (unsigned int *)&c->x86_vendor_id[0],
+ 	      (unsigned int *)&c->x86_vendor_id[8],
+ 	      (unsigned int *)&c->x86_vendor_id[4]);
+-		
++
+ 	get_cpu_vendor(c);
  
- static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
- {
- 	int i;
--	int size = num_var_ranges * sizeof(struct mtrr_value);
+ 	/* Initialize the standard set of capabilities */
+@@ -883,7 +915,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
+ 			c->x86 += (tfms >> 20) & 0xff;
+ 		if (c->x86 >= 0x6)
+ 			c->x86_model += ((tfms >> 16) & 0xF) << 4;
+-		if (c->x86_capability[0] & (1<<19)) 
++		if (c->x86_capability[0] & (1<<19))
+ 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
+ 	} else {
+ 		/* Have CPUID level 0 only - unheard of */
+@@ -893,18 +925,6 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
+ #ifdef CONFIG_SMP
+ 	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
+ #endif
+-}
 -
--	mtrr_state = kzalloc(size,GFP_ATOMIC);
--	if (!mtrr_state)
--		return -ENOMEM;
- 
- 	for (i = 0; i < num_var_ranges; i++) {
- 		mtrr_if->get(i,
-@@ -622,7 +614,6 @@ static int mtrr_restore(struct sys_device * sysdev)
- 				 mtrr_state[i].lsize,
- 				 mtrr_state[i].ltype);
+-/*
+- * This does the hard work of actually picking apart the CPU stuff...
+- */
+-void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+-{
+-	int i;
+-	u32 xlvl;
+-
+-	early_identify_cpu(c);
+-
+ 	/* AMD-defined flags: level 0x80000001 */
+ 	xlvl = cpuid_eax(0x80000000);
+ 	c->extended_cpuid_level = xlvl;
+@@ -925,6 +945,30 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ 			c->x86_capability[2] = cpuid_edx(0x80860001);
  	}
--	kfree(mtrr_state);
- 	return 0;
- }
- 
-@@ -633,6 +624,112 @@ static struct sysdev_driver mtrr_sysdev_driver = {
- 	.resume		= mtrr_restore,
- };
  
-+static int disable_mtrr_trim;
++	c->extended_cpuid_level = cpuid_eax(0x80000000);
++	if (c->extended_cpuid_level >= 0x80000007)
++		c->x86_power = cpuid_edx(0x80000007);
++
++	switch (c->x86_vendor) {
++	case X86_VENDOR_AMD:
++		early_init_amd(c);
++		break;
++	case X86_VENDOR_INTEL:
++		early_init_intel(c);
++		break;
++	}
 +
-+static int __init disable_mtrr_trim_setup(char *str)
-+{
-+	disable_mtrr_trim = 1;
-+	return 0;
 +}
-+early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
 +
 +/*
-+ * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
-+ * for memory >4GB. Check for that here.
-+ * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
-+ * apply to are wrong, but so far we don't know of any such case in the wild.
++ * This does the hard work of actually picking apart the CPU stuff...
 + */
-+#define Tom2Enabled (1U << 21)
-+#define Tom2ForceMemTypeWB (1U << 22)
-+
-+static __init int amd_special_default_mtrr(void)
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 +{
-+	u32 l, h;
++	int i;
 +
-+	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
-+		return 0;
-+	if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
-+		return 0;
-+	/* In case some hypervisor doesn't pass SYSCFG through */
-+	if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
-+		return 0;
-+	/*
-+	 * Memory between 4GB and top of mem is forced WB by this magic bit.
-+	 * Reserved before K8RevF, but should be zero there.
-+	 */
-+	if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
-+		 (Tom2Enabled | Tom2ForceMemTypeWB))
-+		return 1;
-+	return 0;
-+}
++	early_identify_cpu(c);
 +
-+/**
-+ * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
-+ *
-+ * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
-+ * memory configurations.  This routine checks that the highest MTRR matches
-+ * the end of memory, to make sure the MTRRs having a write back type cover
-+ * all of the memory the kernel is intending to use. If not, it'll trim any
-+ * memory off the end by adjusting end_pfn, removing it from the kernel's
-+ * allocation pools, warning the user with an obnoxious message.
-+ */
-+int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
-+{
-+	unsigned long i, base, size, highest_addr = 0, def, dummy;
-+	mtrr_type type;
-+	u64 trim_start, trim_size;
+ 	init_scattered_cpuid_features(c);
+ 
+ 	c->apicid = phys_pkg_id(0);
+@@ -954,8 +998,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ 		break;
+ 	}
+ 
+-	select_idle_routine(c);
+-	detect_ht(c); 
++	detect_ht(c);
+ 
+ 	/*
+ 	 * On SMP, boot_cpu_data holds the common feature set between
+@@ -965,32 +1008,56 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ 	 */
+ 	if (c != &boot_cpu_data) {
+ 		/* AND the already accumulated flags with these */
+-		for (i = 0 ; i < NCAPINTS ; i++)
++		for (i = 0; i < NCAPINTS; i++)
+ 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+ 	}
+ 
++	/* Clear all flags overriden by options */
++	for (i = 0; i < NCAPINTS; i++)
++		c->x86_capability[i] ^= cleared_cpu_caps[i];
 +
-+	/*
-+	 * Make sure we only trim uncachable memory on machines that
-+	 * support the Intel MTRR architecture:
-+	 */
-+	if (!is_cpu(INTEL) || disable_mtrr_trim)
-+		return 0;
-+	rdmsr(MTRRdefType_MSR, def, dummy);
-+	def &= 0xff;
-+	if (def != MTRR_TYPE_UNCACHABLE)
-+		return 0;
+ #ifdef CONFIG_X86_MCE
+ 	mcheck_init(c);
+ #endif
++	select_idle_routine(c);
 +
-+	if (amd_special_default_mtrr())
-+		return 0;
+ 	if (c != &boot_cpu_data)
+ 		mtrr_ap_init();
+ #ifdef CONFIG_NUMA
+ 	numa_add_cpu(smp_processor_id());
+ #endif
 +
-+	/* Find highest cached pfn */
-+	for (i = 0; i < num_var_ranges; i++) {
-+		mtrr_if->get(i, &base, &size, &type);
-+		if (type != MTRR_TYPE_WRBACK)
-+			continue;
-+		base <<= PAGE_SHIFT;
-+		size <<= PAGE_SHIFT;
-+		if (highest_addr < base + size)
-+			highest_addr = base + size;
-+	}
++}
 +
-+	/* kvm/qemu doesn't have mtrr set right, don't trim them all */
-+	if (!highest_addr) {
-+		printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n");
-+		WARN_ON(1);
++static __init int setup_noclflush(char *arg)
++{
++	setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
++	return 1;
+ }
+- 
++__setup("noclflush", setup_noclflush);
+ 
+ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+ {
+ 	if (c->x86_model_id[0])
+-		printk("%s", c->x86_model_id);
++		printk(KERN_INFO "%s", c->x86_model_id);
+ 
+-	if (c->x86_mask || c->cpuid_level >= 0) 
+-		printk(" stepping %02x\n", c->x86_mask);
++	if (c->x86_mask || c->cpuid_level >= 0)
++		printk(KERN_CONT " stepping %02x\n", c->x86_mask);
+ 	else
+-		printk("\n");
++		printk(KERN_CONT "\n");
+ }
+ 
++static __init int setup_disablecpuid(char *arg)
++{
++	int bit;
++	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
++		setup_clear_cpu_cap(bit);
++	else
 +		return 0;
-+	}
++	return 1;
++}
++__setup("clearcpuid=", setup_disablecpuid);
 +
-+	if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
-+		printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
-+			" all of memory, losing %LdMB of RAM.\n",
-+			(((u64)end_pfn << PAGE_SHIFT) - highest_addr) >> 20);
+ /*
+  *	Get CPU information for use by the procfs.
+  */
+@@ -998,9 +1065,9 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+ static int show_cpuinfo(struct seq_file *m, void *v)
+ {
+ 	struct cpuinfo_x86 *c = v;
+-	int cpu = 0;
++	int cpu = 0, i;
+ 
+-	/* 
++	/*
+ 	 * These flag bits must match the definitions in <asm/cpufeature.h>.
+ 	 * NULL means this bit is undefined or reserved; either way it doesn't
+ 	 * have meaning as far as Linux is concerned.  Note that it's important
+@@ -1010,10 +1077,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 	 */
+ 	static const char *const x86_cap_flags[] = {
+ 		/* Intel-defined */
+-	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
+-	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
+-	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
+-	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
++		"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
++		"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
++		"pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
++		"fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
+ 
+ 		/* AMD-defined */
+ 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+@@ -1080,34 +1147,35 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 	cpu = c->cpu_index;
+ #endif
+ 
+-	seq_printf(m,"processor\t: %u\n"
+-		     "vendor_id\t: %s\n"
+-		     "cpu family\t: %d\n"
+-		     "model\t\t: %d\n"
+-		     "model name\t: %s\n",
+-		     (unsigned)cpu,
+-		     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
+-		     c->x86,
+-		     (int)c->x86_model,
+-		     c->x86_model_id[0] ? c->x86_model_id : "unknown");
+-	
++	seq_printf(m, "processor\t: %u\n"
++		   "vendor_id\t: %s\n"
++		   "cpu family\t: %d\n"
++		   "model\t\t: %d\n"
++		   "model name\t: %s\n",
++		   (unsigned)cpu,
++		   c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
++		   c->x86,
++		   (int)c->x86_model,
++		   c->x86_model_id[0] ? c->x86_model_id : "unknown");
 +
-+		WARN_ON(1);
+ 	if (c->x86_mask || c->cpuid_level >= 0)
+ 		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
+ 	else
+ 		seq_printf(m, "stepping\t: unknown\n");
+-	
+-	if (cpu_has(c,X86_FEATURE_TSC)) {
 +
-+		printk(KERN_INFO "update e820 for mtrr\n");
-+		trim_start = highest_addr;
-+		trim_size = end_pfn;
-+		trim_size <<= PAGE_SHIFT;
-+		trim_size -= trim_start;
-+		add_memory_region(trim_start, trim_size, E820_RESERVED);
-+		update_e820();
-+		return 1;
-+	}
++	if (cpu_has(c, X86_FEATURE_TSC)) {
+ 		unsigned int freq = cpufreq_quick_get((unsigned)cpu);
 +
-+	return 0;
-+}
- 
- /**
-  * mtrr_bp_init - initialize mtrrs on the boot CPU
-diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
-index 289dfe6..fb74a2c 100644
---- a/arch/x86/kernel/cpu/mtrr/mtrr.h
-+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
-@@ -2,10 +2,8 @@
-  * local mtrr defines.
-  */
- 
--#ifndef TRUE
--#define TRUE  1
--#define FALSE 0
--#endif
-+#include <linux/types.h>
-+#include <linux/stddef.h>
+ 		if (!freq)
+ 			freq = cpu_khz;
+ 		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+-			     freq / 1000, (freq % 1000));
++			   freq / 1000, (freq % 1000));
+ 	}
  
- #define MTRRcap_MSR     0x0fe
- #define MTRRdefType_MSR 0x2ff
-@@ -14,6 +12,7 @@
- #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
+ 	/* Cache size */
+-	if (c->x86_cache_size >= 0) 
++	if (c->x86_cache_size >= 0)
+ 		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
+-	
++
+ #ifdef CONFIG_SMP
+ 	if (smp_num_siblings * c->x86_max_cores > 1) {
+ 		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
+@@ -1116,48 +1184,43 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
+ 		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
+ 	}
+-#endif	
++#endif
  
- #define NUM_FIXED_RANGES 88
-+#define MAX_VAR_RANGES 256
- #define MTRRfix64K_00000_MSR 0x250
- #define MTRRfix16K_80000_MSR 0x258
- #define MTRRfix16K_A0000_MSR 0x259
-@@ -34,6 +33,8 @@
-    an 8 bit field: */
- typedef u8 mtrr_type;
+ 	seq_printf(m,
+-	        "fpu\t\t: yes\n"
+-	        "fpu_exception\t: yes\n"
+-	        "cpuid level\t: %d\n"
+-	        "wp\t\t: yes\n"
+-	        "flags\t\t:",
++		   "fpu\t\t: yes\n"
++		   "fpu_exception\t: yes\n"
++		   "cpuid level\t: %d\n"
++		   "wp\t\t: yes\n"
++		   "flags\t\t:",
+ 		   c->cpuid_level);
  
-+extern unsigned int mtrr_usage_table[MAX_VAR_RANGES];
+-	{ 
+-		int i; 
+-		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
+-			if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
+-				seq_printf(m, " %s", x86_cap_flags[i]);
+-	}
+-		
++	for (i = 0; i < 32*NCAPINTS; i++)
++		if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
++			seq_printf(m, " %s", x86_cap_flags[i]);
 +
- struct mtrr_ops {
- 	u32	vendor;
- 	u32	use_intel_if;
-diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
-index 49e20c2..9f8ba92 100644
---- a/arch/x86/kernel/cpu/mtrr/state.c
-+++ b/arch/x86/kernel/cpu/mtrr/state.c
-@@ -4,6 +4,7 @@
- #include <asm/mtrr.h>
- #include <asm/msr.h>
- #include <asm/processor-cyrix.h>
-+#include <asm/processor-flags.h>
- #include "mtrr.h"
+ 	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
+ 		   c->loops_per_jiffy/(500000/HZ),
+ 		   (c->loops_per_jiffy/(5000/HZ)) % 100);
  
+-	if (c->x86_tlbsize > 0) 
++	if (c->x86_tlbsize > 0)
+ 		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
+ 	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
+ 	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
  
-@@ -25,7 +26,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
+-	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
++	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
+ 		   c->x86_phys_bits, c->x86_virt_bits);
  
- 		/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
- 		    a side-effect  */
--		cr0 = read_cr0() | 0x40000000;
-+		cr0 = read_cr0() | X86_CR0_CD;
- 		wbinvd();
- 		write_cr0(cr0);
- 		wbinvd();
-diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
-index c02541e..9b83832 100644
---- a/arch/x86/kernel/cpu/perfctr-watchdog.c
-+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
-@@ -167,7 +167,6 @@ void release_evntsel_nmi(unsigned int msr)
- 	clear_bit(counter, evntsel_nmi_owner);
- }
+ 	seq_printf(m, "power management:");
+-	{
+-		unsigned i;
+-		for (i = 0; i < 32; i++) 
+-			if (c->x86_power & (1 << i)) {
+-				if (i < ARRAY_SIZE(x86_power_flags) &&
+-					x86_power_flags[i])
+-					seq_printf(m, "%s%s",
+-						x86_power_flags[i][0]?" ":"",
+-						x86_power_flags[i]);
+-				else
+-					seq_printf(m, " [%d]", i);
+-			}
++	for (i = 0; i < 32; i++) {
++		if (c->x86_power & (1 << i)) {
++			if (i < ARRAY_SIZE(x86_power_flags) &&
++			    x86_power_flags[i])
++				seq_printf(m, "%s%s",
++					   x86_power_flags[i][0]?" ":"",
++					   x86_power_flags[i]);
++			else
++				seq_printf(m, " [%d]", i);
++		}
+ 	}
  
--EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
- EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
- EXPORT_SYMBOL(reserve_perfctr_nmi);
- EXPORT_SYMBOL(release_perfctr_nmi);
-diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
-index 3900e46..0282132 100644
---- a/arch/x86/kernel/cpu/proc.c
-+++ b/arch/x86/kernel/cpu/proc.c
-@@ -188,7 +188,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
- static void c_stop(struct seq_file *m, void *v)
+ 	seq_printf(m, "\n\n");
+@@ -1184,8 +1247,8 @@ static void c_stop(struct seq_file *m, void *v)
  {
  }
+ 
 -struct seq_operations cpuinfo_op = {
+-	.start =c_start,
 +const struct seq_operations cpuinfo_op = {
- 	.start	= c_start,
- 	.next	= c_next,
- 	.stop	= c_stop,
-diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
-index 05c9936..dec66e4 100644
---- a/arch/x86/kernel/cpuid.c
-+++ b/arch/x86/kernel/cpuid.c
-@@ -50,7 +50,7 @@ struct cpuid_command {
++	.start = c_start,
+ 	.next =	c_next,
+ 	.stop =	c_stop,
+ 	.show =	show_cpuinfo,
+diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
+index 9bdd830..caee1f0 100644
+--- a/arch/x86/kernel/signal_32.c
++++ b/arch/x86/kernel/signal_32.c
+@@ -23,6 +23,7 @@
+ #include <asm/ucontext.h>
+ #include <asm/uaccess.h>
+ #include <asm/i387.h>
++#include <asm/vdso.h>
+ #include "sigframe_32.h"
  
- static void cpuid_smp_cpuid(void *cmd_block)
- {
--	struct cpuid_command *cmd = (struct cpuid_command *)cmd_block;
-+	struct cpuid_command *cmd = cmd_block;
+ #define DEBUG_SIG 0
+@@ -81,14 +82,14 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
+ }
  
- 	cpuid(cmd->reg, &cmd->data[0], &cmd->data[1], &cmd->data[2],
- 		      &cmd->data[3]);
-@@ -157,15 +157,15 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
+ asmlinkage int
+-sys_sigaltstack(unsigned long ebx)
++sys_sigaltstack(unsigned long bx)
+ {
+ 	/* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
+-	struct pt_regs *regs = (struct pt_regs *)&ebx;
+-	const stack_t __user *uss = (const stack_t __user *)ebx;
+-	stack_t __user *uoss = (stack_t __user *)regs->ecx;
++	struct pt_regs *regs = (struct pt_regs *)&bx;
++	const stack_t __user *uss = (const stack_t __user *)bx;
++	stack_t __user *uoss = (stack_t __user *)regs->cx;
  
- 	switch (action) {
- 	case CPU_UP_PREPARE:
--	case CPU_UP_PREPARE_FROZEN:
- 		err = cpuid_device_create(cpu);
- 		break;
- 	case CPU_UP_CANCELED:
--	case CPU_UP_CANCELED_FROZEN:
- 	case CPU_DEAD:
--	case CPU_DEAD_FROZEN:
- 		cpuid_device_destroy(cpu);
- 		break;
-+	case CPU_UP_CANCELED_FROZEN:
-+		destroy_suspended_device(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
-+		break;
- 	}
- 	return err ? NOTIFY_BAD : NOTIFY_OK;
+-	return do_sigaltstack(uss, uoss, regs->esp);
++	return do_sigaltstack(uss, uoss, regs->sp);
  }
-diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
-index 40978af..a47798b 100644
---- a/arch/x86/kernel/doublefault_32.c
-+++ b/arch/x86/kernel/doublefault_32.c
-@@ -17,7 +17,7 @@ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
  
- static void doublefault_fn(void)
- {
--	struct Xgt_desc_struct gdt_desc = {0, 0};
-+	struct desc_ptr gdt_desc = {0, 0};
- 	unsigned long gdt, tss;
  
- 	store_gdt(&gdt_desc);
-@@ -33,14 +33,15 @@ static void doublefault_fn(void)
- 		printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
+@@ -109,12 +110,12 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
+ #define COPY_SEG(seg)							\
+ 	{ unsigned short tmp;						\
+ 	  err |= __get_user(tmp, &sc->seg);				\
+-	  regs->x##seg = tmp; }
++	  regs->seg = tmp; }
  
- 		if (ptr_ok(tss)) {
--			struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
-+			struct x86_hw_tss *t = (struct x86_hw_tss *)tss;
+ #define COPY_SEG_STRICT(seg)						\
+ 	{ unsigned short tmp;						\
+ 	  err |= __get_user(tmp, &sc->seg);				\
+-	  regs->x##seg = tmp|3; }
++	  regs->seg = tmp|3; }
  
--			printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->eip, t->esp);
-+			printk(KERN_EMERG "eip = %08lx, esp = %08lx\n",
-+			       t->ip, t->sp);
+ #define GET_SEG(seg)							\
+ 	{ unsigned short tmp;						\
+@@ -130,22 +131,22 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
+ 	COPY_SEG(fs);
+ 	COPY_SEG(es);
+ 	COPY_SEG(ds);
+-	COPY(edi);
+-	COPY(esi);
+-	COPY(ebp);
+-	COPY(esp);
+-	COPY(ebx);
+-	COPY(edx);
+-	COPY(ecx);
+-	COPY(eip);
++	COPY(di);
++	COPY(si);
++	COPY(bp);
++	COPY(sp);
++	COPY(bx);
++	COPY(dx);
++	COPY(cx);
++	COPY(ip);
+ 	COPY_SEG_STRICT(cs);
+ 	COPY_SEG_STRICT(ss);
+ 	
+ 	{
+ 		unsigned int tmpflags;
+-		err |= __get_user(tmpflags, &sc->eflags);
+-		regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+-		regs->orig_eax = -1;		/* disable syscall checks */
++		err |= __get_user(tmpflags, &sc->flags);
++		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
++		regs->orig_ax = -1;		/* disable syscall checks */
+ 	}
  
- 			printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
--				t->eax, t->ebx, t->ecx, t->edx);
-+				t->ax, t->bx, t->cx, t->dx);
- 			printk(KERN_EMERG "esi = %08lx, edi = %08lx\n",
--				t->esi, t->edi);
-+				t->si, t->di);
+ 	{
+@@ -164,7 +165,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
  		}
  	}
  
-@@ -50,15 +51,15 @@ static void doublefault_fn(void)
+-	err |= __get_user(*peax, &sc->eax);
++	err |= __get_user(*peax, &sc->ax);
+ 	return err;
  
- struct tss_struct doublefault_tss __cacheline_aligned = {
- 	.x86_tss = {
--		.esp0		= STACK_START,
-+		.sp0		= STACK_START,
- 		.ss0		= __KERNEL_DS,
- 		.ldt		= 0,
- 		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,
+ badframe:
+@@ -174,9 +175,9 @@ badframe:
+ asmlinkage int sys_sigreturn(unsigned long __unused)
+ {
+ 	struct pt_regs *regs = (struct pt_regs *) &__unused;
+-	struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8);
++	struct sigframe __user *frame = (struct sigframe __user *)(regs->sp - 8);
+ 	sigset_t set;
+-	int eax;
++	int ax;
  
--		.eip		= (unsigned long) doublefault_fn,
-+		.ip		= (unsigned long) doublefault_fn,
- 		/* 0x2 bit is always set */
--		.eflags		= X86_EFLAGS_SF | 0x2,
--		.esp		= STACK_START,
-+		.flags		= X86_EFLAGS_SF | 0x2,
-+		.sp		= STACK_START,
- 		.es		= __USER_DS,
- 		.cs		= __KERNEL_CS,
- 		.ss		= __KERNEL_DS,
-diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
-new file mode 100644
-index 0000000..1c5ca4d
---- /dev/null
-+++ b/arch/x86/kernel/ds.c
-@@ -0,0 +1,464 @@
-+/*
-+ * Debug Store support
-+ *
-+ * This provides a low-level interface to the hardware's Debug Store
-+ * feature that is used for last branch recording (LBR) and
-+ * precise-event based sampling (PEBS).
-+ *
-+ * Different architectures use a different DS layout/pointer size.
-+ * The below functions therefore work on a void*.
-+ *
-+ *
-+ * Since there is no user for PEBS, yet, only LBR (or branch
-+ * trace store, BTS) is supported.
-+ *
-+ *
-+ * Copyright (C) 2007 Intel Corporation.
-+ * Markus Metzger <markus.t.metzger at intel.com>, Dec 2007
-+ */
-+
-+#include <asm/ds.h>
-+
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+
-+
-+/*
-+ * Debug Store (DS) save area configuration (see Intel64 and IA32
-+ * Architectures Software Developer's Manual, section 18.5)
-+ *
-+ * The DS configuration consists of the following fields; different
-+ * architetures vary in the size of those fields.
-+ * - double-word aligned base linear address of the BTS buffer
-+ * - write pointer into the BTS buffer
-+ * - end linear address of the BTS buffer (one byte beyond the end of
-+ *   the buffer)
-+ * - interrupt pointer into BTS buffer
-+ *   (interrupt occurs when write pointer passes interrupt pointer)
-+ * - double-word aligned base linear address of the PEBS buffer
-+ * - write pointer into the PEBS buffer
-+ * - end linear address of the PEBS buffer (one byte beyond the end of
-+ *   the buffer)
-+ * - interrupt pointer into PEBS buffer
-+ *   (interrupt occurs when write pointer passes interrupt pointer)
-+ * - value to which counter is reset following counter overflow
-+ *
-+ * On later architectures, the last branch recording hardware uses
-+ * 64bit pointers even in 32bit mode.
-+ *
-+ *
-+ * Branch Trace Store (BTS) records store information about control
-+ * flow changes. They at least provide the following information:
-+ * - source linear address
-+ * - destination linear address
-+ *
-+ * Netburst supported a predicated bit that had been dropped in later
-+ * architectures. We do not suppor it.
-+ *
-+ *
-+ * In order to abstract from the actual DS and BTS layout, we describe
-+ * the access to the relevant fields.
-+ * Thanks to Andi Kleen for proposing this design.
-+ *
-+ * The implementation, however, is not as general as it might seem. In
-+ * order to stay somewhat simple and efficient, we assume an
-+ * underlying unsigned type (mostly a pointer type) and we expect the
-+ * field to be at least as big as that type.
-+ */
-+
-+/*
-+ * A special from_ip address to indicate that the BTS record is an
-+ * info record that needs to be interpreted or skipped.
-+ */
-+#define BTS_ESCAPE_ADDRESS (-1)
-+
-+/*
-+ * A field access descriptor
-+ */
-+struct access_desc {
-+	unsigned char offset;
-+	unsigned char size;
-+};
-+
-+/*
-+ * The configuration for a particular DS/BTS hardware implementation.
-+ */
-+struct ds_configuration {
-+	/* the DS configuration */
-+	unsigned char  sizeof_ds;
-+	struct access_desc bts_buffer_base;
-+	struct access_desc bts_index;
-+	struct access_desc bts_absolute_maximum;
-+	struct access_desc bts_interrupt_threshold;
-+	/* the BTS configuration */
-+	unsigned char  sizeof_bts;
-+	struct access_desc from_ip;
-+	struct access_desc to_ip;
-+	/* BTS variants used to store additional information like
-+	   timestamps */
-+	struct access_desc info_type;
-+	struct access_desc info_data;
-+	unsigned long debugctl_mask;
-+};
-+
-+/*
-+ * The global configuration used by the below accessor functions
-+ */
-+static struct ds_configuration ds_cfg;
-+
-+/*
-+ * Accessor functions for some DS and BTS fields using the above
-+ * global ptrace_bts_cfg.
-+ */
-+static inline unsigned long get_bts_buffer_base(char *base)
-+{
-+	return *(unsigned long *)(base + ds_cfg.bts_buffer_base.offset);
-+}
-+static inline void set_bts_buffer_base(char *base, unsigned long value)
-+{
-+	(*(unsigned long *)(base + ds_cfg.bts_buffer_base.offset)) = value;
-+}
-+static inline unsigned long get_bts_index(char *base)
-+{
-+	return *(unsigned long *)(base + ds_cfg.bts_index.offset);
-+}
-+static inline void set_bts_index(char *base, unsigned long value)
-+{
-+	(*(unsigned long *)(base + ds_cfg.bts_index.offset)) = value;
-+}
-+static inline unsigned long get_bts_absolute_maximum(char *base)
-+{
-+	return *(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset);
-+}
-+static inline void set_bts_absolute_maximum(char *base, unsigned long value)
-+{
-+	(*(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset)) = value;
-+}
-+static inline unsigned long get_bts_interrupt_threshold(char *base)
-+{
-+	return *(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset);
-+}
-+static inline void set_bts_interrupt_threshold(char *base, unsigned long value)
-+{
-+	(*(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset)) = value;
-+}
-+static inline unsigned long get_from_ip(char *base)
-+{
-+	return *(unsigned long *)(base + ds_cfg.from_ip.offset);
-+}
-+static inline void set_from_ip(char *base, unsigned long value)
-+{
-+	(*(unsigned long *)(base + ds_cfg.from_ip.offset)) = value;
-+}
-+static inline unsigned long get_to_ip(char *base)
-+{
-+	return *(unsigned long *)(base + ds_cfg.to_ip.offset);
-+}
-+static inline void set_to_ip(char *base, unsigned long value)
-+{
-+	(*(unsigned long *)(base + ds_cfg.to_ip.offset)) = value;
-+}
-+static inline unsigned char get_info_type(char *base)
-+{
-+	return *(unsigned char *)(base + ds_cfg.info_type.offset);
-+}
-+static inline void set_info_type(char *base, unsigned char value)
-+{
-+	(*(unsigned char *)(base + ds_cfg.info_type.offset)) = value;
-+}
-+static inline unsigned long get_info_data(char *base)
-+{
-+	return *(unsigned long *)(base + ds_cfg.info_data.offset);
-+}
-+static inline void set_info_data(char *base, unsigned long value)
-+{
-+	(*(unsigned long *)(base + ds_cfg.info_data.offset)) = value;
-+}
-+
-+
-+int ds_allocate(void **dsp, size_t bts_size_in_bytes)
-+{
-+	size_t bts_size_in_records;
-+	unsigned long bts;
-+	void *ds;
-+
-+	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
-+		return -EOPNOTSUPP;
-+
-+	if (bts_size_in_bytes < 0)
-+		return -EINVAL;
-+
-+	bts_size_in_records =
-+		bts_size_in_bytes / ds_cfg.sizeof_bts;
-+	bts_size_in_bytes =
-+		bts_size_in_records * ds_cfg.sizeof_bts;
-+
-+	if (bts_size_in_bytes <= 0)
-+		return -EINVAL;
-+
-+	bts = (unsigned long)kzalloc(bts_size_in_bytes, GFP_KERNEL);
-+
-+	if (!bts)
-+		return -ENOMEM;
-+
-+	ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
-+
-+	if (!ds) {
-+		kfree((void *)bts);
-+		return -ENOMEM;
-+	}
-+
-+	set_bts_buffer_base(ds, bts);
-+	set_bts_index(ds, bts);
-+	set_bts_absolute_maximum(ds, bts + bts_size_in_bytes);
-+	set_bts_interrupt_threshold(ds, bts + bts_size_in_bytes + 1);
-+
-+	*dsp = ds;
-+	return 0;
-+}
-+
-+int ds_free(void **dsp)
-+{
-+	if (*dsp)
-+		kfree((void *)get_bts_buffer_base(*dsp));
-+	kfree(*dsp);
-+	*dsp = 0;
-+
-+	return 0;
-+}
-+
-+int ds_get_bts_size(void *ds)
-+{
-+	int size_in_bytes;
-+
-+	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
-+		return -EOPNOTSUPP;
-+
-+	if (!ds)
-+		return 0;
-+
-+	size_in_bytes =
-+		get_bts_absolute_maximum(ds) -
-+		get_bts_buffer_base(ds);
-+	return size_in_bytes;
-+}
-+
-+int ds_get_bts_end(void *ds)
-+{
-+	int size_in_bytes = ds_get_bts_size(ds);
-+
-+	if (size_in_bytes <= 0)
-+		return size_in_bytes;
-+
-+	return size_in_bytes / ds_cfg.sizeof_bts;
-+}
-+
-+int ds_get_bts_index(void *ds)
-+{
-+	int index_offset_in_bytes;
-+
-+	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
-+		return -EOPNOTSUPP;
-+
-+	index_offset_in_bytes =
-+		get_bts_index(ds) -
-+		get_bts_buffer_base(ds);
-+
-+	return index_offset_in_bytes / ds_cfg.sizeof_bts;
-+}
-+
-+int ds_set_overflow(void *ds, int method)
-+{
-+	switch (method) {
-+	case DS_O_SIGNAL:
-+		return -EOPNOTSUPP;
-+	case DS_O_WRAP:
-+		return 0;
-+	default:
-+		return -EINVAL;
-+	}
-+}
-+
-+int ds_get_overflow(void *ds)
-+{
-+	return DS_O_WRAP;
-+}
-+
-+int ds_clear(void *ds)
-+{
-+	int bts_size = ds_get_bts_size(ds);
-+	unsigned long bts_base;
-+
-+	if (bts_size <= 0)
-+		return bts_size;
-+
-+	bts_base = get_bts_buffer_base(ds);
-+	memset((void *)bts_base, 0, bts_size);
-+
-+	set_bts_index(ds, bts_base);
-+	return 0;
-+}
-+
-+int ds_read_bts(void *ds, int index, struct bts_struct *out)
-+{
-+	void *bts;
-+
-+	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
-+		return -EOPNOTSUPP;
-+
-+	if (index < 0)
-+		return -EINVAL;
-+
-+	if (index >= ds_get_bts_size(ds))
-+		return -EINVAL;
-+
-+	bts = (void *)(get_bts_buffer_base(ds) + (index * ds_cfg.sizeof_bts));
-+
-+	memset(out, 0, sizeof(*out));
-+	if (get_from_ip(bts) == BTS_ESCAPE_ADDRESS) {
-+		out->qualifier       = get_info_type(bts);
-+		out->variant.jiffies = get_info_data(bts);
-+	} else {
-+		out->qualifier = BTS_BRANCH;
-+		out->variant.lbr.from_ip = get_from_ip(bts);
-+		out->variant.lbr.to_ip   = get_to_ip(bts);
-+	}
-+
-+	return sizeof(*out);;
-+}
-+
-+int ds_write_bts(void *ds, const struct bts_struct *in)
-+{
-+	unsigned long bts;
-+
-+	if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
-+		return -EOPNOTSUPP;
-+
-+	if (ds_get_bts_size(ds) <= 0)
-+		return -ENXIO;
-+
-+	bts = get_bts_index(ds);
-+
-+	memset((void *)bts, 0, ds_cfg.sizeof_bts);
-+	switch (in->qualifier) {
-+	case BTS_INVALID:
-+		break;
-+
-+	case BTS_BRANCH:
-+		set_from_ip((void *)bts, in->variant.lbr.from_ip);
-+		set_to_ip((void *)bts, in->variant.lbr.to_ip);
-+		break;
-+
-+	case BTS_TASK_ARRIVES:
-+	case BTS_TASK_DEPARTS:
-+		set_from_ip((void *)bts, BTS_ESCAPE_ADDRESS);
-+		set_info_type((void *)bts, in->qualifier);
-+		set_info_data((void *)bts, in->variant.jiffies);
-+		break;
-+
-+	default:
-+		return -EINVAL;
-+	}
-+
-+	bts = bts + ds_cfg.sizeof_bts;
-+	if (bts >= get_bts_absolute_maximum(ds))
-+		bts = get_bts_buffer_base(ds);
-+	set_bts_index(ds, bts);
-+
-+	return ds_cfg.sizeof_bts;
-+}
-+
-+unsigned long ds_debugctl_mask(void)
-+{
-+	return ds_cfg.debugctl_mask;
-+}
-+
-+#ifdef __i386__
-+static const struct ds_configuration ds_cfg_netburst = {
-+	.sizeof_ds = 9 * 4,
-+	.bts_buffer_base = { 0, 4 },
-+	.bts_index = { 4, 4 },
-+	.bts_absolute_maximum = { 8, 4 },
-+	.bts_interrupt_threshold = { 12, 4 },
-+	.sizeof_bts = 3 * 4,
-+	.from_ip = { 0, 4 },
-+	.to_ip = { 4, 4 },
-+	.info_type = { 4, 1 },
-+	.info_data = { 8, 4 },
-+	.debugctl_mask = (1<<2)|(1<<3)
-+};
-+
-+static const struct ds_configuration ds_cfg_pentium_m = {
-+	.sizeof_ds = 9 * 4,
-+	.bts_buffer_base = { 0, 4 },
-+	.bts_index = { 4, 4 },
-+	.bts_absolute_maximum = { 8, 4 },
-+	.bts_interrupt_threshold = { 12, 4 },
-+	.sizeof_bts = 3 * 4,
-+	.from_ip = { 0, 4 },
-+	.to_ip = { 4, 4 },
-+	.info_type = { 4, 1 },
-+	.info_data = { 8, 4 },
-+	.debugctl_mask = (1<<6)|(1<<7)
-+};
-+#endif /* _i386_ */
-+
-+static const struct ds_configuration ds_cfg_core2 = {
-+	.sizeof_ds = 9 * 8,
-+	.bts_buffer_base = { 0, 8 },
-+	.bts_index = { 8, 8 },
-+	.bts_absolute_maximum = { 16, 8 },
-+	.bts_interrupt_threshold = { 24, 8 },
-+	.sizeof_bts = 3 * 8,
-+	.from_ip = { 0, 8 },
-+	.to_ip = { 8, 8 },
-+	.info_type = { 8, 1 },
-+	.info_data = { 16, 8 },
-+	.debugctl_mask = (1<<6)|(1<<7)|(1<<9)
-+};
-+
-+static inline void
-+ds_configure(const struct ds_configuration *cfg)
-+{
-+	ds_cfg = *cfg;
-+}
-+
-+void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
-+{
-+	switch (c->x86) {
-+	case 0x6:
-+		switch (c->x86_model) {
-+#ifdef __i386__
-+		case 0xD:
-+		case 0xE: /* Pentium M */
-+			ds_configure(&ds_cfg_pentium_m);
-+			break;
-+#endif /* _i386_ */
-+		case 0xF: /* Core2 */
-+			ds_configure(&ds_cfg_core2);
-+			break;
-+		default:
-+			/* sorry, don't know about them */
-+			break;
-+		}
-+		break;
-+	case 0xF:
-+		switch (c->x86_model) {
-+#ifdef __i386__
-+		case 0x0:
-+		case 0x1:
-+		case 0x2: /* Netburst */
-+			ds_configure(&ds_cfg_netburst);
-+			break;
-+#endif /* _i386_ */
-+		default:
-+			/* sorry, don't know about them */
-+			break;
-+		}
-+		break;
-+	default:
-+		/* sorry, don't know about them */
-+		break;
+ 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ 		goto badframe;
+@@ -192,17 +193,20 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
+ 	recalc_sigpending();
+ 	spin_unlock_irq(&current->sighand->siglock);
+ 	
+-	if (restore_sigcontext(regs, &frame->sc, &eax))
++	if (restore_sigcontext(regs, &frame->sc, &ax))
+ 		goto badframe;
+-	return eax;
++	return ax;
+ 
+ badframe:
+-	if (show_unhandled_signals && printk_ratelimit())
+-		printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx"
+-		       " esp:%lx oeax:%lx\n",
++	if (show_unhandled_signals && printk_ratelimit()) {
++		printk("%s%s[%d] bad frame in sigreturn frame:%p ip:%lx"
++		       " sp:%lx oeax:%lx",
+ 		    task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
+-		    current->comm, task_pid_nr(current), frame, regs->eip,
+-		    regs->esp, regs->orig_eax);
++		    current->comm, task_pid_nr(current), frame, regs->ip,
++		    regs->sp, regs->orig_ax);
++		print_vma_addr(" in ", regs->ip);
++		printk("\n");
 +	}
-+}
-diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
-index 18f500d..4e16ef4 100644
---- a/arch/x86/kernel/e820_32.c
-+++ b/arch/x86/kernel/e820_32.c
-@@ -7,7 +7,6 @@
- #include <linux/kexec.h>
- #include <linux/module.h>
- #include <linux/mm.h>
--#include <linux/efi.h>
- #include <linux/pfn.h>
- #include <linux/uaccess.h>
- #include <linux/suspend.h>
-@@ -17,11 +16,6 @@
- #include <asm/e820.h>
- #include <asm/setup.h>
  
--#ifdef CONFIG_EFI
--int efi_enabled = 0;
--EXPORT_SYMBOL(efi_enabled);
--#endif
--
- struct e820map e820;
- struct change_member {
- 	struct e820entry *pbios; /* pointer to original bios entry */
-@@ -37,26 +31,6 @@ unsigned long pci_mem_start = 0x10000000;
- EXPORT_SYMBOL(pci_mem_start);
- #endif
- extern int user_defined_memmap;
--struct resource data_resource = {
--	.name	= "Kernel data",
--	.start	= 0,
--	.end	= 0,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
--};
--
--struct resource code_resource = {
--	.name	= "Kernel code",
--	.start	= 0,
--	.end	= 0,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
--};
--
--struct resource bss_resource = {
--	.name	= "Kernel bss",
--	.start	= 0,
--	.end	= 0,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
--};
+ 	force_sig(SIGSEGV, current);
+ 	return 0;
+@@ -211,9 +215,9 @@ badframe:
+ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
+ {
+ 	struct pt_regs *regs = (struct pt_regs *) &__unused;
+-	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->esp - 4);
++	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->sp - 4);
+ 	sigset_t set;
+-	int eax;
++	int ax;
  
- static struct resource system_rom_resource = {
- 	.name	= "System ROM",
-@@ -111,60 +85,6 @@ static struct resource video_rom_resource = {
- 	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
- };
+ 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ 		goto badframe;
+@@ -226,13 +230,13 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
+ 	recalc_sigpending();
+ 	spin_unlock_irq(&current->sighand->siglock);
+ 	
+-	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
++	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
+ 		goto badframe;
  
--static struct resource video_ram_resource = {
--	.name	= "Video RAM area",
--	.start	= 0xa0000,
--	.end	= 0xbffff,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
--};
--
--static struct resource standard_io_resources[] = { {
--	.name	= "dma1",
--	.start	= 0x0000,
--	.end	= 0x001f,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
--}, {
--	.name	= "pic1",
--	.start	= 0x0020,
--	.end	= 0x0021,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
--}, {
--	.name   = "timer0",
--	.start	= 0x0040,
--	.end    = 0x0043,
--	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
--}, {
--	.name   = "timer1",
--	.start  = 0x0050,
--	.end    = 0x0053,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
--}, {
--	.name	= "keyboard",
--	.start	= 0x0060,
--	.end	= 0x006f,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
--}, {
--	.name	= "dma page reg",
--	.start	= 0x0080,
--	.end	= 0x008f,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
--}, {
--	.name	= "pic2",
--	.start	= 0x00a0,
--	.end	= 0x00a1,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
--}, {
--	.name	= "dma2",
--	.start	= 0x00c0,
--	.end	= 0x00df,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
--}, {
--	.name	= "fpu",
--	.start	= 0x00f0,
--	.end	= 0x00ff,
--	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
--} };
--
- #define ROMSIGNATURE 0xaa55
+-	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
++	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
+ 		goto badframe;
  
- static int __init romsignature(const unsigned char *rom)
-@@ -260,10 +180,9 @@ static void __init probe_roms(void)
-  * Request address space for all standard RAM and ROM resources
-  * and also for regions reported as reserved by the e820.
-  */
--static void __init
--legacy_init_iomem_resources(struct resource *code_resource,
--			    struct resource *data_resource,
--			    struct resource *bss_resource)
-+void __init init_iomem_resources(struct resource *code_resource,
-+		struct resource *data_resource,
-+		struct resource *bss_resource)
+-	return eax;
++	return ax;
+ 
+ badframe:
+ 	force_sig(SIGSEGV, current);
+@@ -249,27 +253,27 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
  {
- 	int i;
+ 	int tmp, err = 0;
  
-@@ -305,35 +224,6 @@ legacy_init_iomem_resources(struct resource *code_resource,
- 	}
- }
+-	err |= __put_user(regs->xfs, (unsigned int __user *)&sc->fs);
++	err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
+ 	savesegment(gs, tmp);
+ 	err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
  
--/*
-- * Request address space for all standard resources
-- *
-- * This is called just before pcibios_init(), which is also a
-- * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
-- */
--static int __init request_standard_resources(void)
--{
--	int i;
--
--	printk("Setting up standard PCI resources\n");
--	if (efi_enabled)
--		efi_initialize_iomem_resources(&code_resource,
--				&data_resource, &bss_resource);
--	else
--		legacy_init_iomem_resources(&code_resource,
--				&data_resource, &bss_resource);
--
--	/* EFI systems may still have VGA */
--	request_resource(&iomem_resource, &video_ram_resource);
--
--	/* request I/O space for devices used on all i[345]86 PCs */
--	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
--		request_resource(&ioport_resource, &standard_io_resources[i]);
--	return 0;
--}
--
--subsys_initcall(request_standard_resources);
--
- #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
- /**
-  * e820_mark_nosave_regions - Find the ranges of physical addresses that do not
-@@ -370,19 +260,17 @@ void __init add_memory_region(unsigned long long start,
+-	err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
+-	err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
+-	err |= __put_user(regs->edi, &sc->edi);
+-	err |= __put_user(regs->esi, &sc->esi);
+-	err |= __put_user(regs->ebp, &sc->ebp);
+-	err |= __put_user(regs->esp, &sc->esp);
+-	err |= __put_user(regs->ebx, &sc->ebx);
+-	err |= __put_user(regs->edx, &sc->edx);
+-	err |= __put_user(regs->ecx, &sc->ecx);
+-	err |= __put_user(regs->eax, &sc->eax);
++	err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
++	err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
++	err |= __put_user(regs->di, &sc->di);
++	err |= __put_user(regs->si, &sc->si);
++	err |= __put_user(regs->bp, &sc->bp);
++	err |= __put_user(regs->sp, &sc->sp);
++	err |= __put_user(regs->bx, &sc->bx);
++	err |= __put_user(regs->dx, &sc->dx);
++	err |= __put_user(regs->cx, &sc->cx);
++	err |= __put_user(regs->ax, &sc->ax);
+ 	err |= __put_user(current->thread.trap_no, &sc->trapno);
+ 	err |= __put_user(current->thread.error_code, &sc->err);
+-	err |= __put_user(regs->eip, &sc->eip);
+-	err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
+-	err |= __put_user(regs->eflags, &sc->eflags);
+-	err |= __put_user(regs->esp, &sc->esp_at_signal);
+-	err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
++	err |= __put_user(regs->ip, &sc->ip);
++	err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
++	err |= __put_user(regs->flags, &sc->flags);
++	err |= __put_user(regs->sp, &sc->sp_at_signal);
++	err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
+ 
+ 	tmp = save_i387(fpstate);
+ 	if (tmp < 0)
+@@ -290,29 +294,36 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
+ static inline void __user *
+ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
  {
- 	int x;
+-	unsigned long esp;
++	unsigned long sp;
  
--	if (!efi_enabled) {
--       		x = e820.nr_map;
--
--		if (x == E820MAX) {
--		    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
--		    return;
--		}
-+	x = e820.nr_map;
+ 	/* Default to using normal stack */
+-	esp = regs->esp;
++	sp = regs->sp;
++
++	/*
++	 * If we are on the alternate signal stack and would overflow it, don't.
++	 * Return an always-bogus address instead so we will die with SIGSEGV.
++	 */
++	if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
++		return (void __user *) -1L;
  
--		e820.map[x].addr = start;
--		e820.map[x].size = size;
--		e820.map[x].type = type;
--		e820.nr_map++;
-+	if (x == E820MAX) {
-+		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+		return;
+ 	/* This is the X/Open sanctioned signal stack switching.  */
+ 	if (ka->sa.sa_flags & SA_ONSTACK) {
+-		if (sas_ss_flags(esp) == 0)
+-			esp = current->sas_ss_sp + current->sas_ss_size;
++		if (sas_ss_flags(sp) == 0)
++			sp = current->sas_ss_sp + current->sas_ss_size;
  	}
-+
-+	e820.map[x].addr = start;
-+	e820.map[x].size = size;
-+	e820.map[x].type = type;
-+	e820.nr_map++;
- } /* add_memory_region */
  
- /*
-@@ -598,29 +486,6 @@ int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+ 	/* This is the legacy signal stack switching. */
+-	else if ((regs->xss & 0xffff) != __USER_DS &&
++	else if ((regs->ss & 0xffff) != __USER_DS &&
+ 		 !(ka->sa.sa_flags & SA_RESTORER) &&
+ 		 ka->sa.sa_restorer) {
+-		esp = (unsigned long) ka->sa.sa_restorer;
++		sp = (unsigned long) ka->sa.sa_restorer;
+ 	}
+ 
+-	esp -= frame_size;
++	sp -= frame_size;
+ 	/* Align the stack pointer according to the i386 ABI,
+ 	 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+-	esp = ((esp + 4) & -16ul) - 4;
+-	return (void __user *) esp;
++	sp = ((sp + 4) & -16ul) - 4;
++	return (void __user *) sp;
  }
  
- /*
-- * Callback for efi_memory_walk.
-- */
--static int __init
--efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
--{
--	unsigned long *max_pfn = arg, pfn;
--
--	if (start < end) {
--		pfn = PFN_UP(end -1);
--		if (pfn > *max_pfn)
--			*max_pfn = pfn;
--	}
--	return 0;
--}
--
--static int __init
--efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
--{
--	memory_present(0, PFN_UP(start), PFN_DOWN(end));
--	return 0;
--}
--
--/*
-  * Find the highest page frame number we have available
-  */
- void __init find_max_pfn(void)
-@@ -628,11 +493,6 @@ void __init find_max_pfn(void)
- 	int i;
+ /* These symbols are defined with the addresses in the vsyscall page.
+@@ -355,9 +366,9 @@ static int setup_frame(int sig, struct k_sigaction *ka,
+ 	}
  
- 	max_pfn = 0;
--	if (efi_enabled) {
--		efi_memmap_walk(efi_find_max_pfn, &max_pfn);
--		efi_memmap_walk(efi_memory_present_wrapper, NULL);
--		return;
--	}
+ 	if (current->binfmt->hasvdso)
+-		restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
++		restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+ 	else
+-		restorer = (void *)&frame->retcode;
++		restorer = &frame->retcode;
+ 	if (ka->sa.sa_flags & SA_RESTORER)
+ 		restorer = ka->sa.sa_restorer;
  
- 	for (i = 0; i < e820.nr_map; i++) {
- 		unsigned long start, end;
-@@ -650,34 +510,12 @@ void __init find_max_pfn(void)
- }
+@@ -379,16 +390,16 @@ static int setup_frame(int sig, struct k_sigaction *ka,
+ 		goto give_sigsegv;
  
- /*
-- * Free all available memory for boot time allocation.  Used
-- * as a callback function by efi_memory_walk()
-- */
--
--static int __init
--free_available_memory(unsigned long start, unsigned long end, void *arg)
--{
--	/* check max_low_pfn */
--	if (start >= (max_low_pfn << PAGE_SHIFT))
--		return 0;
--	if (end >= (max_low_pfn << PAGE_SHIFT))
--		end = max_low_pfn << PAGE_SHIFT;
--	if (start < end)
--		free_bootmem(start, end - start);
--
--	return 0;
--}
--/*
-  * Register fully available low RAM pages with the bootmem allocator.
-  */
- void __init register_bootmem_low_pages(unsigned long max_low_pfn)
- {
- 	int i;
+ 	/* Set up registers for signal handler */
+-	regs->esp = (unsigned long) frame;
+-	regs->eip = (unsigned long) ka->sa.sa_handler;
+-	regs->eax = (unsigned long) sig;
+-	regs->edx = (unsigned long) 0;
+-	regs->ecx = (unsigned long) 0;
++	regs->sp = (unsigned long) frame;
++	regs->ip = (unsigned long) ka->sa.sa_handler;
++	regs->ax = (unsigned long) sig;
++	regs->dx = (unsigned long) 0;
++	regs->cx = (unsigned long) 0;
  
--	if (efi_enabled) {
--		efi_memmap_walk(free_available_memory, NULL);
--		return;
--	}
- 	for (i = 0; i < e820.nr_map; i++) {
- 		unsigned long curr_pfn, last_pfn, size;
- 		/*
-@@ -785,56 +623,12 @@ void __init print_memory_map(char *who)
- 	}
- }
+-	regs->xds = __USER_DS;
+-	regs->xes = __USER_DS;
+-	regs->xss = __USER_DS;
+-	regs->xcs = __USER_CS;
++	regs->ds = __USER_DS;
++	regs->es = __USER_DS;
++	regs->ss = __USER_DS;
++	regs->cs = __USER_CS;
  
--static __init __always_inline void efi_limit_regions(unsigned long long size)
--{
--	unsigned long long current_addr = 0;
--	efi_memory_desc_t *md, *next_md;
--	void *p, *p1;
--	int i, j;
--
--	j = 0;
--	p1 = memmap.map;
--	for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
--		md = p;
--		next_md = p1;
--		current_addr = md->phys_addr +
--			PFN_PHYS(md->num_pages);
--		if (is_available_memory(md)) {
--			if (md->phys_addr >= size) continue;
--			memcpy(next_md, md, memmap.desc_size);
--			if (current_addr >= size) {
--				next_md->num_pages -=
--					PFN_UP(current_addr-size);
--			}
--			p1 += memmap.desc_size;
--			next_md = p1;
--			j++;
--		} else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
--			   EFI_MEMORY_RUNTIME) {
--			/* In order to make runtime services
--			 * available we have to include runtime
--			 * memory regions in memory map */
--			memcpy(next_md, md, memmap.desc_size);
--			p1 += memmap.desc_size;
--			next_md = p1;
--			j++;
--		}
--	}
--	memmap.nr_map = j;
--	memmap.map_end = memmap.map +
--		(memmap.nr_map * memmap.desc_size);
--}
--
- void __init limit_regions(unsigned long long size)
- {
- 	unsigned long long current_addr;
- 	int i;
+ 	/*
+ 	 * Clear TF when entering the signal handler, but
+@@ -396,13 +407,13 @@ static int setup_frame(int sig, struct k_sigaction *ka,
+ 	 * The tracer may want to single-step inside the
+ 	 * handler too.
+ 	 */
+-	regs->eflags &= ~TF_MASK;
++	regs->flags &= ~TF_MASK;
+ 	if (test_thread_flag(TIF_SINGLESTEP))
+ 		ptrace_notify(SIGTRAP);
+ 
+ #if DEBUG_SIG
+ 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+-		current->comm, current->pid, frame, regs->eip, frame->pretcode);
++		current->comm, current->pid, frame, regs->ip, frame->pretcode);
+ #endif
  
- 	print_memory_map("limit_regions start");
--	if (efi_enabled) {
--		efi_limit_regions(size);
--		return;
--	}
- 	for (i = 0; i < e820.nr_map; i++) {
- 		current_addr = e820.map[i].addr + e820.map[i].size;
- 		if (current_addr < size)
-@@ -955,3 +749,14 @@ static int __init parse_memmap(char *arg)
  	return 0;
- }
- early_param("memmap", parse_memmap);
-+void __init update_e820(void)
-+{
-+	u8 nr_map;
-+
-+	nr_map = e820.nr_map;
-+	if (sanitize_e820_map(e820.map, &nr_map))
-+		return;
-+	e820.nr_map = nr_map;
-+	printk(KERN_INFO "modified physical RAM map:\n");
-+	print_memory_map("modified");
-+}
-diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
-index 04698e0..c617174 100644
---- a/arch/x86/kernel/e820_64.c
-+++ b/arch/x86/kernel/e820_64.c
-@@ -1,4 +1,4 @@
--/* 
-+/*
-  * Handle the memory map.
-  * The functions here do the job until bootmem takes over.
-  *
-@@ -26,80 +26,87 @@
- #include <asm/proto.h>
- #include <asm/setup.h>
- #include <asm/sections.h>
-+#include <asm/kdebug.h>
+@@ -442,7 +453,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 	err |= __put_user(0, &frame->uc.uc_flags);
+ 	err |= __put_user(0, &frame->uc.uc_link);
+ 	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+-	err |= __put_user(sas_ss_flags(regs->esp),
++	err |= __put_user(sas_ss_flags(regs->sp),
+ 			  &frame->uc.uc_stack.ss_flags);
+ 	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ 	err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
+@@ -452,13 +463,13 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 		goto give_sigsegv;
  
- struct e820map e820;
+ 	/* Set up to return from userspace.  */
+-	restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
++	restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ 	if (ka->sa.sa_flags & SA_RESTORER)
+ 		restorer = ka->sa.sa_restorer;
+ 	err |= __put_user(restorer, &frame->pretcode);
+ 	 
+ 	/*
+-	 * This is movl $,%eax ; int $0x80
++	 * This is movl $,%ax ; int $0x80
+ 	 *
+ 	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
+ 	 * reasons and because gdb uses it as a signature to notice
+@@ -472,16 +483,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 		goto give_sigsegv;
  
--/* 
-+/*
-  * PFN of last memory page.
-  */
--unsigned long end_pfn; 
--EXPORT_SYMBOL(end_pfn);
-+unsigned long end_pfn;
+ 	/* Set up registers for signal handler */
+-	regs->esp = (unsigned long) frame;
+-	regs->eip = (unsigned long) ka->sa.sa_handler;
+-	regs->eax = (unsigned long) usig;
+-	regs->edx = (unsigned long) &frame->info;
+-	regs->ecx = (unsigned long) &frame->uc;
++	regs->sp = (unsigned long) frame;
++	regs->ip = (unsigned long) ka->sa.sa_handler;
++	regs->ax = (unsigned long) usig;
++	regs->dx = (unsigned long) &frame->info;
++	regs->cx = (unsigned long) &frame->uc;
  
--/* 
-+/*
-  * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
-  * The direct mapping extends to end_pfn_map, so that we can directly access
-  * apertures, ACPI and other tables without having to play with fixmaps.
-- */ 
--unsigned long end_pfn_map; 
-+ */
-+unsigned long end_pfn_map;
+-	regs->xds = __USER_DS;
+-	regs->xes = __USER_DS;
+-	regs->xss = __USER_DS;
+-	regs->xcs = __USER_CS;
++	regs->ds = __USER_DS;
++	regs->es = __USER_DS;
++	regs->ss = __USER_DS;
++	regs->cs = __USER_CS;
  
--/* 
-+/*
-  * Last pfn which the user wants to use.
-  */
- static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
+ 	/*
+ 	 * Clear TF when entering the signal handler, but
+@@ -489,13 +500,13 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 	 * The tracer may want to single-step inside the
+ 	 * handler too.
+ 	 */
+-	regs->eflags &= ~TF_MASK;
++	regs->flags &= ~TF_MASK;
+ 	if (test_thread_flag(TIF_SINGLESTEP))
+ 		ptrace_notify(SIGTRAP);
  
--extern struct resource code_resource, data_resource, bss_resource;
--
--/* Check for some hardcoded bad areas that early boot is not allowed to touch */ 
--static inline int bad_addr(unsigned long *addrp, unsigned long size)
--{ 
--	unsigned long addr = *addrp, last = addr + size; 
--
--	/* various gunk below that needed for SMP startup */
--	if (addr < 0x8000) { 
--		*addrp = PAGE_ALIGN(0x8000);
--		return 1; 
--	}
--
--	/* direct mapping tables of the kernel */
--	if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { 
--		*addrp = PAGE_ALIGN(table_end << PAGE_SHIFT);
--		return 1;
--	} 
--
--	/* initrd */ 
--#ifdef CONFIG_BLK_DEV_INITRD
--	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
--		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
--		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
--		unsigned long ramdisk_end   = ramdisk_image+ramdisk_size;
--
--		if (last >= ramdisk_image && addr < ramdisk_end) {
--			*addrp = PAGE_ALIGN(ramdisk_end);
--			return 1;
--		}
--	} 
-+/*
-+ * Early reserved memory areas.
-+ */
-+#define MAX_EARLY_RES 20
-+
-+struct early_res {
-+	unsigned long start, end;
-+};
-+static struct early_res early_res[MAX_EARLY_RES] __initdata = {
-+	{ 0, PAGE_SIZE },			/* BIOS data page */
-+#ifdef CONFIG_SMP
-+	{ SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE },
+ #if DEBUG_SIG
+ 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+-		current->comm, current->pid, frame, regs->eip, frame->pretcode);
++		current->comm, current->pid, frame, regs->ip, frame->pretcode);
  #endif
--	/* kernel code */
--	if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) {
--		*addrp = PAGE_ALIGN(__pa_symbol(&_end));
--		return 1;
-+	{}
-+};
-+
-+void __init reserve_early(unsigned long start, unsigned long end)
-+{
-+	int i;
-+	struct early_res *r;
-+	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
-+		r = &early_res[i];
-+		if (end > r->start && start < r->end)
-+			panic("Overlapping early reservations %lx-%lx to %lx-%lx\n",
-+			      start, end, r->start, r->end);
- 	}
-+	if (i >= MAX_EARLY_RES)
-+		panic("Too many early reservations");
-+	r = &early_res[i];
-+	r->start = start;
-+	r->end = end;
-+}
  
--	if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
--		*addrp = PAGE_ALIGN(ebda_addr + ebda_size);
--		return 1;
-+void __init early_res_to_bootmem(void)
-+{
-+	int i;
-+	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
-+		struct early_res *r = &early_res[i];
-+		reserve_bootmem_generic(r->start, r->end - r->start);
- 	}
-+}
+ 	return 0;
+@@ -516,35 +527,33 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ 	int ret;
  
--#ifdef CONFIG_NUMA
--	/* NUMA memory to node map */
--	if (last >= nodemap_addr && addr < nodemap_addr + nodemap_size) {
--		*addrp = nodemap_addr + nodemap_size;
--		return 1;
-+/* Check for already reserved areas */
-+static inline int bad_addr(unsigned long *addrp, unsigned long size)
-+{
-+	int i;
-+	unsigned long addr = *addrp, last;
-+	int changed = 0;
-+again:
-+	last = addr + size;
-+	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
-+		struct early_res *r = &early_res[i];
-+		if (last >= r->start && addr < r->end) {
-+			*addrp = addr = r->end;
-+			changed = 1;
-+			goto again;
-+		}
- 	}
--#endif
--	/* XXX ramdisk image here? */ 
--	return 0;
--} 
-+	return changed;
-+}
+ 	/* Are we from a system call? */
+-	if (regs->orig_eax >= 0) {
++	if (regs->orig_ax >= 0) {
+ 		/* If so, check system call restarting.. */
+-		switch (regs->eax) {
++		switch (regs->ax) {
+ 		        case -ERESTART_RESTARTBLOCK:
+ 			case -ERESTARTNOHAND:
+-				regs->eax = -EINTR;
++				regs->ax = -EINTR;
+ 				break;
  
- /*
-  * This function checks if any part of the range <start,end> is mapped
-@@ -107,16 +114,18 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
-  */
- int
- e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
--{ 
-+{
- 	int i;
--	for (i = 0; i < e820.nr_map; i++) { 
--		struct e820entry *ei = &e820.map[i]; 
--		if (type && ei->type != type) 
-+
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i];
-+
-+		if (type && ei->type != type)
- 			continue;
- 		if (ei->addr >= end || ei->addr + ei->size <= start)
--			continue; 
--		return 1; 
--	} 
-+			continue;
-+		return 1;
-+	}
- 	return 0;
- }
- EXPORT_SYMBOL_GPL(e820_any_mapped);
-@@ -127,11 +136,14 @@ EXPORT_SYMBOL_GPL(e820_any_mapped);
-  * Note: this function only works correct if the e820 table is sorted and
-  * not-overlapping, which is the case
-  */
--int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
-+int __init e820_all_mapped(unsigned long start, unsigned long end,
-+			   unsigned type)
- {
- 	int i;
-+
- 	for (i = 0; i < e820.nr_map; i++) {
- 		struct e820entry *ei = &e820.map[i];
-+
- 		if (type && ei->type != type)
- 			continue;
- 		/* is the region (part) in overlap with the current region ?*/
-@@ -143,65 +155,73 @@ int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type
- 		 */
- 		if (ei->addr <= start)
- 			start = ei->addr + ei->size;
--		/* if start is now at or beyond end, we're done, full coverage */
-+		/*
-+		 * if start is now at or beyond end, we're done, full
-+		 * coverage
-+		 */
- 		if (start >= end)
--			return 1; /* we're done */
-+			return 1;
+ 			case -ERESTARTSYS:
+ 				if (!(ka->sa.sa_flags & SA_RESTART)) {
+-					regs->eax = -EINTR;
++					regs->ax = -EINTR;
+ 					break;
+ 				}
+ 			/* fallthrough */
+ 			case -ERESTARTNOINTR:
+-				regs->eax = regs->orig_eax;
+-				regs->eip -= 2;
++				regs->ax = regs->orig_ax;
++				regs->ip -= 2;
+ 		}
  	}
- 	return 0;
- }
  
--/* 
-- * Find a free area in a specific range. 
-- */ 
--unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size) 
--{ 
--	int i; 
--	for (i = 0; i < e820.nr_map; i++) { 
--		struct e820entry *ei = &e820.map[i]; 
--		unsigned long addr = ei->addr, last; 
--		if (ei->type != E820_RAM) 
--			continue; 
--		if (addr < start) 
-+/*
-+ * Find a free area in a specific range.
-+ */
-+unsigned long __init find_e820_area(unsigned long start, unsigned long end,
-+				    unsigned size)
-+{
-+	int i;
-+
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i];
-+		unsigned long addr = ei->addr, last;
-+
-+		if (ei->type != E820_RAM)
-+			continue;
-+		if (addr < start)
- 			addr = start;
--		if (addr > ei->addr + ei->size) 
--			continue; 
-+		if (addr > ei->addr + ei->size)
-+			continue;
- 		while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
- 			;
- 		last = PAGE_ALIGN(addr) + size;
- 		if (last > ei->addr + ei->size)
- 			continue;
--		if (last > end) 
-+		if (last > end)
- 			continue;
--		return addr; 
--	} 
--	return -1UL;		
--} 
-+		return addr;
-+	}
-+	return -1UL;
-+}
+ 	/*
+-	 * If TF is set due to a debugger (PT_DTRACE), clear the TF flag so
+-	 * that register information in the sigcontext is correct.
++	 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
++	 * flag so that register information in the sigcontext is correct.
+ 	 */
+-	if (unlikely(regs->eflags & TF_MASK)
+-	    && likely(current->ptrace & PT_DTRACE)) {
+-		current->ptrace &= ~PT_DTRACE;
+-		regs->eflags &= ~TF_MASK;
+-	}
++	if (unlikely(regs->flags & X86_EFLAGS_TF) &&
++	    likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
++		regs->flags &= ~X86_EFLAGS_TF;
  
- /*
-  * Find the highest page frame number we have available
+ 	/* Set up the stack frame */
+ 	if (ka->sa.sa_flags & SA_SIGINFO)
+@@ -569,7 +578,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+  * want to handle. Thus you cannot kill init even with a SIGKILL even by
+  * mistake.
   */
- unsigned long __init e820_end_of_ram(void)
+-static void fastcall do_signal(struct pt_regs *regs)
++static void do_signal(struct pt_regs *regs)
  {
--	unsigned long end_pfn = 0;
-+	unsigned long end_pfn;
-+
- 	end_pfn = find_max_pfn_with_active_regions();
--	
--	if (end_pfn > end_pfn_map) 
-+
-+	if (end_pfn > end_pfn_map)
- 		end_pfn_map = end_pfn;
- 	if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
- 		end_pfn_map = MAXMEM>>PAGE_SHIFT;
- 	if (end_pfn > end_user_pfn)
- 		end_pfn = end_user_pfn;
--	if (end_pfn > end_pfn_map) 
--		end_pfn = end_pfn_map; 
-+	if (end_pfn > end_pfn_map)
-+		end_pfn = end_pfn_map;
+ 	siginfo_t info;
+ 	int signr;
+@@ -599,8 +608,8 @@ static void fastcall do_signal(struct pt_regs *regs)
+ 		 * have been cleared if the watchpoint triggered
+ 		 * inside the kernel.
+ 		 */
+-		if (unlikely(current->thread.debugreg[7]))
+-			set_debugreg(current->thread.debugreg[7], 7);
++		if (unlikely(current->thread.debugreg7))
++			set_debugreg(current->thread.debugreg7, 7);
  
--	printk("end_pfn_map = %lu\n", end_pfn_map);
--	return end_pfn;	
-+	printk(KERN_INFO "end_pfn_map = %lu\n", end_pfn_map);
-+	return end_pfn;
- }
+ 		/* Whee!  Actually deliver the signal.  */
+ 		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
+@@ -616,19 +625,19 @@ static void fastcall do_signal(struct pt_regs *regs)
+ 	}
  
- /*
-  * Mark e820 reserved areas as busy for the resource manager.
-  */
--void __init e820_reserve_resources(void)
-+void __init e820_reserve_resources(struct resource *code_resource,
-+		struct resource *data_resource, struct resource *bss_resource)
- {
- 	int i;
- 	for (i = 0; i < e820.nr_map; i++) {
-@@ -219,13 +239,13 @@ void __init e820_reserve_resources(void)
- 		request_resource(&iomem_resource, res);
- 		if (e820.map[i].type == E820_RAM) {
- 			/*
--			 *  We don't know which RAM region contains kernel data,
--			 *  so we try it repeatedly and let the resource manager
--			 *  test it.
-+			 * We don't know which RAM region contains kernel data,
-+			 * so we try it repeatedly and let the resource manager
-+			 * test it.
- 			 */
--			request_resource(res, &code_resource);
--			request_resource(res, &data_resource);
--			request_resource(res, &bss_resource);
-+			request_resource(res, code_resource);
-+			request_resource(res, data_resource);
-+			request_resource(res, bss_resource);
- #ifdef CONFIG_KEXEC
- 			if (crashk_res.start != crashk_res.end)
- 				request_resource(res, &crashk_res);
-@@ -322,9 +342,9 @@ e820_register_active_regions(int nid, unsigned long start_pfn,
- 			add_active_range(nid, ei_startpfn, ei_endpfn);
- }
+ 	/* Did we come from a system call? */
+-	if (regs->orig_eax >= 0) {
++	if (regs->orig_ax >= 0) {
+ 		/* Restart the system call - no handlers present */
+-		switch (regs->eax) {
++		switch (regs->ax) {
+ 		case -ERESTARTNOHAND:
+ 		case -ERESTARTSYS:
+ 		case -ERESTARTNOINTR:
+-			regs->eax = regs->orig_eax;
+-			regs->eip -= 2;
++			regs->ax = regs->orig_ax;
++			regs->ip -= 2;
+ 			break;
  
--/* 
-+/*
-  * Add a memory region to the kernel e820 map.
-- */ 
-+ */
- void __init add_memory_region(unsigned long start, unsigned long size, int type)
- {
- 	int x = e820.nr_map;
-@@ -349,9 +369,7 @@ unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
+ 		case -ERESTART_RESTARTBLOCK:
+-			regs->eax = __NR_restart_syscall;
+-			regs->eip -= 2;
++			regs->ax = __NR_restart_syscall;
++			regs->ip -= 2;
+ 			break;
+ 		}
+ 	}
+@@ -651,13 +660,16 @@ void do_notify_resume(struct pt_regs *regs, void *_unused,
  {
- 	unsigned long start_pfn = start >> PAGE_SHIFT;
- 	unsigned long end_pfn = end >> PAGE_SHIFT;
--	unsigned long ei_startpfn;
--	unsigned long ei_endpfn;
--	unsigned long ram = 0;
-+	unsigned long ei_startpfn, ei_endpfn, ram = 0;
- 	int i;
+ 	/* Pending single-step? */
+ 	if (thread_info_flags & _TIF_SINGLESTEP) {
+-		regs->eflags |= TF_MASK;
++		regs->flags |= TF_MASK;
+ 		clear_thread_flag(TIF_SINGLESTEP);
+ 	}
  
- 	for (i = 0; i < e820.nr_map; i++) {
-@@ -363,28 +381,31 @@ unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
- 	return end - start - (ram << PAGE_SHIFT);
+ 	/* deal with pending signal delivery */
+ 	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+ 		do_signal(regs);
++
++	if (thread_info_flags & _TIF_HRTICK_RESCHED)
++		hrtick_resched();
+ 	
+ 	clear_thread_flag(TIF_IRET);
  }
- 
--void __init e820_print_map(char *who)
-+static void __init e820_print_map(char *who)
+diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
+index ab086b0..7347bb1 100644
+--- a/arch/x86/kernel/signal_64.c
++++ b/arch/x86/kernel/signal_64.c
+@@ -39,7 +39,7 @@ asmlinkage long
+ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ 		struct pt_regs *regs)
  {
- 	int i;
- 
- 	for (i = 0; i < e820.nr_map; i++) {
- 		printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
--			(unsigned long long) e820.map[i].addr,
--			(unsigned long long) (e820.map[i].addr + e820.map[i].size));
-+		       (unsigned long long) e820.map[i].addr,
-+		       (unsigned long long)
-+		       (e820.map[i].addr + e820.map[i].size));
- 		switch (e820.map[i].type) {
--		case E820_RAM:	printk("(usable)\n");
--				break;
-+		case E820_RAM:
-+			printk(KERN_CONT "(usable)\n");
-+			break;
- 		case E820_RESERVED:
--				printk("(reserved)\n");
--				break;
-+			printk(KERN_CONT "(reserved)\n");
-+			break;
- 		case E820_ACPI:
--				printk("(ACPI data)\n");
--				break;
-+			printk(KERN_CONT "(ACPI data)\n");
-+			break;
- 		case E820_NVS:
--				printk("(ACPI NVS)\n");
--				break;
--		default:	printk("type %u\n", e820.map[i].type);
--				break;
-+			printk(KERN_CONT "(ACPI NVS)\n");
-+			break;
-+		default:
-+			printk(KERN_CONT "type %u\n", e820.map[i].type);
-+			break;
- 		}
- 	}
+-	return do_sigaltstack(uss, uoss, regs->rsp);
++	return do_sigaltstack(uss, uoss, regs->sp);
  }
-@@ -392,11 +413,11 @@ void __init e820_print_map(char *who)
- /*
-  * Sanitize the BIOS e820 map.
-  *
-- * Some e820 responses include overlapping entries.  The following 
-+ * Some e820 responses include overlapping entries. The following
-  * replaces the original e820 map with a new one, removing overlaps.
-  *
-  */
--static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-+static int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map)
- {
- 	struct change_member {
- 		struct e820entry *pbios; /* pointer to original bios entry */
-@@ -416,7 +437,8 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
- 	int i;
  
- 	/*
--		Visually we're performing the following (1,2,3,4 = memory types)...
-+		Visually we're performing the following
-+		(1,2,3,4 = memory types)...
  
- 		Sample memory map (w/overlaps):
- 		   ____22__________________
-@@ -458,22 +480,23 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
- 	old_nr = *pnr_map;
+@@ -64,8 +64,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
  
- 	/* bail out if we find any unreasonable addresses in bios map */
--	for (i=0; i<old_nr; i++)
-+	for (i = 0; i < old_nr; i++)
- 		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
- 			return -1;
+ #define COPY(x)		err |= __get_user(regs->x, &sc->x)
  
- 	/* create pointers for initial change-point information (for sorting) */
--	for (i=0; i < 2*old_nr; i++)
-+	for (i = 0; i < 2 * old_nr; i++)
- 		change_point[i] = &change_point_list[i];
+-	COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx);
+-	COPY(rdx); COPY(rcx); COPY(rip);
++	COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
++	COPY(dx); COPY(cx); COPY(ip);
+ 	COPY(r8);
+ 	COPY(r9);
+ 	COPY(r10);
+@@ -86,9 +86,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
  
- 	/* record all known change-points (starting and ending addresses),
- 	   omitting those that are for empty memory regions */
- 	chgidx = 0;
--	for (i=0; i < old_nr; i++)	{
-+	for (i = 0; i < old_nr; i++)	{
- 		if (biosmap[i].size != 0) {
- 			change_point[chgidx]->addr = biosmap[i].addr;
- 			change_point[chgidx++]->pbios = &biosmap[i];
--			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+			change_point[chgidx]->addr = biosmap[i].addr +
-+				biosmap[i].size;
- 			change_point[chgidx++]->pbios = &biosmap[i];
- 		}
- 	}
-@@ -483,75 +506,106 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
- 	still_changing = 1;
- 	while (still_changing)	{
- 		still_changing = 0;
--		for (i=1; i < chg_nr; i++)  {
--			/* if <current_addr> > <last_addr>, swap */
--			/* or, if current=<start_addr> & last=<end_addr>, swap */
--			if ((change_point[i]->addr < change_point[i-1]->addr) ||
--				((change_point[i]->addr == change_point[i-1]->addr) &&
--				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
--				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
--			   )
--			{
-+		for (i = 1; i < chg_nr; i++)  {
-+			unsigned long long curaddr, lastaddr;
-+			unsigned long long curpbaddr, lastpbaddr;
-+
-+			curaddr = change_point[i]->addr;
-+			lastaddr = change_point[i - 1]->addr;
-+			curpbaddr = change_point[i]->pbios->addr;
-+			lastpbaddr = change_point[i - 1]->pbios->addr;
-+
-+			/*
-+			 * swap entries, when:
-+			 *
-+			 * curaddr > lastaddr or
-+			 * curaddr == lastaddr and curaddr == curpbaddr and
-+			 * lastaddr != lastpbaddr
-+			 */
-+			if (curaddr < lastaddr ||
-+			    (curaddr == lastaddr && curaddr == curpbaddr &&
-+			     lastaddr != lastpbaddr)) {
- 				change_tmp = change_point[i];
- 				change_point[i] = change_point[i-1];
- 				change_point[i-1] = change_tmp;
--				still_changing=1;
-+				still_changing = 1;
- 			}
- 		}
+ 	{
+ 		unsigned int tmpflags;
+-		err |= __get_user(tmpflags, &sc->eflags);
+-		regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
+-		regs->orig_rax = -1;		/* disable syscall checks */
++		err |= __get_user(tmpflags, &sc->flags);
++		regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5);
++		regs->orig_ax = -1;		/* disable syscall checks */
  	}
  
- 	/* create a new bios memory map, removing overlaps */
--	overlap_entries=0;	 /* number of entries in the overlap table */
--	new_bios_entry=0;	 /* index for creating new bios map entries */
-+	overlap_entries = 0;	 /* number of entries in the overlap table */
-+	new_bios_entry = 0;	 /* index for creating new bios map entries */
- 	last_type = 0;		 /* start with undefined memory type */
- 	last_addr = 0;		 /* start with 0 as last starting address */
-+
- 	/* loop through change-points, determining affect on the new bios map */
--	for (chgidx=0; chgidx < chg_nr; chgidx++)
--	{
-+	for (chgidx = 0; chgidx < chg_nr; chgidx++) {
- 		/* keep track of all overlapping bios entries */
--		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
--		{
--			/* add map entry to overlap list (> 1 entry implies an overlap) */
--			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
--		}
--		else
--		{
--			/* remove entry from list (order independent, so swap with last) */
--			for (i=0; i<overlap_entries; i++)
--			{
--				if (overlap_list[i] == change_point[chgidx]->pbios)
--					overlap_list[i] = overlap_list[overlap_entries-1];
-+		if (change_point[chgidx]->addr ==
-+		    change_point[chgidx]->pbios->addr) {
-+			/*
-+			 * add map entry to overlap list (> 1 entry
-+			 * implies an overlap)
-+			 */
-+			overlap_list[overlap_entries++] =
-+				change_point[chgidx]->pbios;
-+		} else {
-+			/*
-+			 * remove entry from list (order independent,
-+			 * so swap with last)
-+			 */
-+			for (i = 0; i < overlap_entries; i++) {
-+				if (overlap_list[i] ==
-+				    change_point[chgidx]->pbios)
-+					overlap_list[i] =
-+						overlap_list[overlap_entries-1];
- 			}
- 			overlap_entries--;
- 		}
--		/* if there are overlapping entries, decide which "type" to use */
--		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+		/*
-+		 * if there are overlapping entries, decide which
-+		 * "type" to use (larger value takes precedence --
-+		 * 1=usable, 2,3,4,4+=unusable)
-+		 */
- 		current_type = 0;
--		for (i=0; i<overlap_entries; i++)
-+		for (i = 0; i < overlap_entries; i++)
- 			if (overlap_list[i]->type > current_type)
- 				current_type = overlap_list[i]->type;
--		/* continue building up new bios map based on this information */
-+		/*
-+		 * continue building up new bios map based on this
-+		 * information
-+		 */
- 		if (current_type != last_type)	{
- 			if (last_type != 0)	 {
- 				new_bios[new_bios_entry].size =
- 					change_point[chgidx]->addr - last_addr;
--				/* move forward only if the new size was non-zero */
-+				/*
-+				 * move forward only if the new size
-+				 * was non-zero
-+				 */
- 				if (new_bios[new_bios_entry].size != 0)
-+					/*
-+					 * no more space left for new
-+					 * bios entries ?
-+					 */
- 					if (++new_bios_entry >= E820MAX)
--						break; 	/* no more space left for new bios entries */
-+						break;
- 			}
- 			if (current_type != 0)	{
--				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+				new_bios[new_bios_entry].addr =
-+					change_point[chgidx]->addr;
- 				new_bios[new_bios_entry].type = current_type;
--				last_addr=change_point[chgidx]->addr;
-+				last_addr = change_point[chgidx]->addr;
- 			}
- 			last_type = current_type;
+ 	{
+@@ -108,7 +108,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
  		}
  	}
--	new_nr = new_bios_entry;   /* retain count for new bios entries */
-+	/* retain count for new bios entries */
-+	new_nr = new_bios_entry;
  
- 	/* copy new bios mapping into original location */
--	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+	memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry));
- 	*pnr_map = new_nr;
+-	err |= __get_user(*prax, &sc->rax);
++	err |= __get_user(*prax, &sc->ax);
+ 	return err;
  
- 	return 0;
-@@ -566,7 +620,7 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-  * will have given us a memory map that we can use to properly
-  * set up memory.  If we aren't, we'll fake a memory map.
-  */
--static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-+static int __init copy_e820_map(struct e820entry *biosmap, int nr_map)
+ badframe:
+@@ -119,9 +119,9 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
  {
- 	/* Only one memory region (or negative)? Ignore it */
- 	if (nr_map < 2)
-@@ -583,18 +637,20 @@ static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
- 			return -1;
+ 	struct rt_sigframe __user *frame;
+ 	sigset_t set;
+-	unsigned long eax;
++	unsigned long ax;
  
- 		add_memory_region(start, size, type);
--	} while (biosmap++,--nr_map);
-+	} while (biosmap++, --nr_map);
- 	return 0;
- }
+-	frame = (struct rt_sigframe __user *)(regs->rsp - 8);
++	frame = (struct rt_sigframe __user *)(regs->sp - 8);
+ 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) {
+ 		goto badframe;
+ 	} 
+@@ -135,17 +135,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+ 	recalc_sigpending();
+ 	spin_unlock_irq(&current->sighand->siglock);
+ 	
+-	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
++	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
+ 		goto badframe;
  
--void early_panic(char *msg)
-+static void early_panic(char *msg)
- {
- 	early_printk(msg);
- 	panic(msg);
- }
+ #ifdef DEBUG_SIG
+-	printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs->rip,regs->rsp,frame,eax);
++	printk("%d sigreturn ip:%lx sp:%lx frame:%p ax:%lx\n",current->pid,regs->ip,regs->sp,frame,ax);
+ #endif
  
--void __init setup_memory_region(void)
-+/* We're not void only for x86 32-bit compat */
-+char * __init machine_specific_memory_setup(void)
- {
-+	char *who = "BIOS-e820";
- 	/*
- 	 * Try to copy the BIOS-supplied E820-map.
- 	 *
-@@ -605,7 +661,10 @@ void __init setup_memory_region(void)
- 	if (copy_e820_map(boot_params.e820_map, boot_params.e820_entries) < 0)
- 		early_panic("Cannot find a valid memory map");
- 	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
--	e820_print_map("BIOS-e820");
-+	e820_print_map(who);
-+
-+	/* In case someone cares... */
-+	return who;
- }
+-	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->rsp) == -EFAULT)
++	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
+ 		goto badframe;
  
- static int __init parse_memopt(char *p)
-@@ -613,9 +672,9 @@ static int __init parse_memopt(char *p)
- 	if (!p)
- 		return -EINVAL;
- 	end_user_pfn = memparse(p, &p);
--	end_user_pfn >>= PAGE_SHIFT;	
-+	end_user_pfn >>= PAGE_SHIFT;
- 	return 0;
--} 
-+}
- early_param("mem", parse_memopt);
+-	return eax;
++	return ax;
  
- static int userdef __initdata;
-@@ -627,9 +686,9 @@ static int __init parse_memmap_opt(char *p)
+ badframe:
+ 	signal_fault(regs,frame,"sigreturn");
+@@ -165,14 +165,14 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
+ 	err |= __put_user(0, &sc->gs);
+ 	err |= __put_user(0, &sc->fs);
  
- 	if (!strcmp(p, "exactmap")) {
- #ifdef CONFIG_CRASH_DUMP
--		/* If we are doing a crash dump, we
--		 * still need to know the real mem
--		 * size before original memory map is
-+		/*
-+		 * If we are doing a crash dump, we still need to know
-+		 * the real mem size before original memory map is
- 		 * reset.
- 		 */
- 		e820_register_active_regions(0, 0, -1UL);
-@@ -646,6 +705,8 @@ static int __init parse_memmap_opt(char *p)
- 	mem_size = memparse(p, &p);
- 	if (p == oldp)
- 		return -EINVAL;
-+
-+	userdef = 1;
- 	if (*p == '@') {
- 		start_at = memparse(p+1, &p);
- 		add_memory_region(start_at, mem_size, E820_RAM);
-@@ -665,11 +726,29 @@ early_param("memmap", parse_memmap_opt);
- void __init finish_e820_parsing(void)
- {
- 	if (userdef) {
-+		char nr = e820.nr_map;
-+
-+		if (sanitize_e820_map(e820.map, &nr) < 0)
-+			early_panic("Invalid user supplied memory map");
-+		e820.nr_map = nr;
-+
- 		printk(KERN_INFO "user-defined physical RAM map:\n");
- 		e820_print_map("user");
- 	}
- }
+-	err |= __put_user(regs->rdi, &sc->rdi);
+-	err |= __put_user(regs->rsi, &sc->rsi);
+-	err |= __put_user(regs->rbp, &sc->rbp);
+-	err |= __put_user(regs->rsp, &sc->rsp);
+-	err |= __put_user(regs->rbx, &sc->rbx);
+-	err |= __put_user(regs->rdx, &sc->rdx);
+-	err |= __put_user(regs->rcx, &sc->rcx);
+-	err |= __put_user(regs->rax, &sc->rax);
++	err |= __put_user(regs->di, &sc->di);
++	err |= __put_user(regs->si, &sc->si);
++	err |= __put_user(regs->bp, &sc->bp);
++	err |= __put_user(regs->sp, &sc->sp);
++	err |= __put_user(regs->bx, &sc->bx);
++	err |= __put_user(regs->dx, &sc->dx);
++	err |= __put_user(regs->cx, &sc->cx);
++	err |= __put_user(regs->ax, &sc->ax);
+ 	err |= __put_user(regs->r8, &sc->r8);
+ 	err |= __put_user(regs->r9, &sc->r9);
+ 	err |= __put_user(regs->r10, &sc->r10);
+@@ -183,8 +183,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
+ 	err |= __put_user(regs->r15, &sc->r15);
+ 	err |= __put_user(me->thread.trap_no, &sc->trapno);
+ 	err |= __put_user(me->thread.error_code, &sc->err);
+-	err |= __put_user(regs->rip, &sc->rip);
+-	err |= __put_user(regs->eflags, &sc->eflags);
++	err |= __put_user(regs->ip, &sc->ip);
++	err |= __put_user(regs->flags, &sc->flags);
+ 	err |= __put_user(mask, &sc->oldmask);
+ 	err |= __put_user(me->thread.cr2, &sc->cr2);
  
-+void __init update_e820(void)
-+{
-+	u8 nr_map;
-+
-+	nr_map = e820.nr_map;
-+	if (sanitize_e820_map(e820.map, &nr_map))
-+		return;
-+	e820.nr_map = nr_map;
-+	printk(KERN_INFO "modified physical RAM map:\n");
-+	e820_print_map("modified");
-+}
-+
- unsigned long pci_mem_start = 0xaeedbabe;
- EXPORT_SYMBOL(pci_mem_start);
+@@ -198,18 +198,18 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
+ static void __user *
+ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
+ {
+-	unsigned long rsp;
++	unsigned long sp;
  
-@@ -713,8 +792,10 @@ __init void e820_setup_gap(void)
+ 	/* Default to using normal stack - redzone*/
+-	rsp = regs->rsp - 128;
++	sp = regs->sp - 128;
  
- 	if (!found) {
- 		gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
--		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
--		       KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
-+		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit "
-+		       "address range\n"
-+		       KERN_ERR "PCI: Unassigned devices with 32bit resource "
-+		       "registers may break!\n");
+ 	/* This is the X/Open sanctioned signal stack switching.  */
+ 	if (ka->sa.sa_flags & SA_ONSTACK) {
+-		if (sas_ss_flags(rsp) == 0)
+-			rsp = current->sas_ss_sp + current->sas_ss_size;
++		if (sas_ss_flags(sp) == 0)
++			sp = current->sas_ss_sp + current->sas_ss_size;
  	}
  
- 	/*
-@@ -727,8 +808,9 @@ __init void e820_setup_gap(void)
- 	/* Fun with two's complement */
- 	pci_mem_start = (gapstart + round) & -round;
- 
--	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
--		pci_mem_start, gapstart, gapsize);
-+	printk(KERN_INFO
-+	       "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-+	       pci_mem_start, gapstart, gapsize);
+-	return (void __user *)round_down(rsp - size, 16); 
++	return (void __user *)round_down(sp - size, 16);
  }
  
- int __init arch_get_ram_range(int slot, u64 *addr, u64 *size)
-diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
-index 88bb83e..9f51e1e 100644
---- a/arch/x86/kernel/early-quirks.c
-+++ b/arch/x86/kernel/early-quirks.c
-@@ -21,7 +21,33 @@
- #include <asm/gart.h>
+ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+@@ -246,7 +246,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 	err |= __put_user(0, &frame->uc.uc_flags);
+ 	err |= __put_user(0, &frame->uc.uc_link);
+ 	err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+-	err |= __put_user(sas_ss_flags(regs->rsp),
++	err |= __put_user(sas_ss_flags(regs->sp),
+ 			  &frame->uc.uc_stack.ss_flags);
+ 	err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ 	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
+@@ -271,21 +271,21 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 		goto give_sigsegv;
+ 
+ #ifdef DEBUG_SIG
+-	printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax);
++	printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax);
  #endif
  
--static void __init via_bugs(void)
-+static void __init fix_hypertransport_config(int num, int slot, int func)
-+{
-+	u32 htcfg;
-+	/*
-+	 * we found a hypertransport bus
-+	 * make sure that we are broadcasting
-+	 * interrupts to all cpus on the ht bus
-+	 * if we're using extended apic ids
-+	 */
-+	htcfg = read_pci_config(num, slot, func, 0x68);
-+	if (htcfg & (1 << 18)) {
-+		printk(KERN_INFO "Detected use of extended apic ids "
-+				 "on hypertransport bus\n");
-+		if ((htcfg & (1 << 17)) == 0) {
-+			printk(KERN_INFO "Enabling hypertransport extended "
-+					 "apic interrupt broadcast\n");
-+			printk(KERN_INFO "Note this is a bios bug, "
-+					 "please contact your hw vendor\n");
-+			htcfg |= (1 << 17);
-+			write_pci_config(num, slot, func, 0x68, htcfg);
-+		}
-+	}
-+
-+
-+}
-+
-+static void __init via_bugs(int  num, int slot, int func)
- {
- #ifdef CONFIG_GART_IOMMU
- 	if ((end_pfn > MAX_DMA32_PFN ||  force_iommu) &&
-@@ -44,7 +70,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header)
- #endif /* CONFIG_X86_IO_APIC */
- #endif /* CONFIG_ACPI */
+ 	/* Set up registers for signal handler */
+-	regs->rdi = sig;
++	regs->di = sig;
+ 	/* In case the signal handler was declared without prototypes */ 
+-	regs->rax = 0;	
++	regs->ax = 0;
  
--static void __init nvidia_bugs(void)
-+static void __init nvidia_bugs(int num, int slot, int func)
- {
- #ifdef CONFIG_ACPI
- #ifdef CONFIG_X86_IO_APIC
-@@ -72,7 +98,7 @@ static void __init nvidia_bugs(void)
+ 	/* This also works for non SA_SIGINFO handlers because they expect the
+ 	   next argument after the signal number on the stack. */
+-	regs->rsi = (unsigned long)&frame->info; 
+-	regs->rdx = (unsigned long)&frame->uc; 
+-	regs->rip = (unsigned long) ka->sa.sa_handler;
++	regs->si = (unsigned long)&frame->info;
++	regs->dx = (unsigned long)&frame->uc;
++	regs->ip = (unsigned long) ka->sa.sa_handler;
  
- }
+-	regs->rsp = (unsigned long)frame;
++	regs->sp = (unsigned long)frame;
  
--static void __init ati_bugs(void)
-+static void __init ati_bugs(int num, int slot, int func)
- {
- #ifdef CONFIG_X86_IO_APIC
- 	if (timer_over_8254 == 1) {
-@@ -83,18 +109,67 @@ static void __init ati_bugs(void)
+ 	/* Set up the CS register to run signal handlers in 64-bit mode,
+ 	   even if the handler happens to be interrupting 32-bit code. */
+@@ -295,12 +295,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 	   see include/asm-x86_64/uaccess.h for details. */
+ 	set_fs(USER_DS);
+ 
+-	regs->eflags &= ~TF_MASK;
++	regs->flags &= ~X86_EFLAGS_TF;
+ 	if (test_thread_flag(TIF_SINGLESTEP))
+ 		ptrace_notify(SIGTRAP);
+ #ifdef DEBUG_SIG
+ 	printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n",
+-		current->comm, current->pid, frame, regs->rip, frame->pretcode);
++		current->comm, current->pid, frame, regs->ip, frame->pretcode);
  #endif
- }
  
-+#define QFLAG_APPLY_ONCE 	0x1
-+#define QFLAG_APPLIED		0x2
-+#define QFLAG_DONE		(QFLAG_APPLY_ONCE|QFLAG_APPLIED)
- struct chipset {
--	u16 vendor;
--	void (*f)(void);
-+	u32 vendor;
-+	u32 device;
-+	u32 class;
-+	u32 class_mask;
-+	u32 flags;
-+	void (*f)(int num, int slot, int func);
- };
+ 	return 0;
+@@ -321,44 +321,40 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ 	int ret;
  
- static struct chipset early_qrk[] __initdata = {
--	{ PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
--	{ PCI_VENDOR_ID_VIA, via_bugs },
--	{ PCI_VENDOR_ID_ATI, ati_bugs },
-+	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
-+	  PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
-+	{ PCI_VENDOR_ID_VIA, PCI_ANY_ID,
-+	  PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs },
-+	{ PCI_VENDOR_ID_ATI, PCI_ANY_ID,
-+	  PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, ati_bugs },
-+	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
-+	  PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config },
- 	{}
- };
+ #ifdef DEBUG_SIG
+-	printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n",
++	printk("handle_signal pid:%d sig:%lu ip:%lx sp:%lx regs=%p\n",
+ 		current->pid, sig,
+-		regs->rip, regs->rsp, regs);
++		regs->ip, regs->sp, regs);
+ #endif
  
-+static void __init check_dev_quirk(int num, int slot, int func)
-+{
-+	u16 class;
-+	u16 vendor;
-+	u16 device;
-+	u8 type;
-+	int i;
-+
-+	class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
-+
-+	if (class == 0xffff)
-+		return;
-+
-+	vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID);
-+
-+	device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
-+
-+	for (i = 0; early_qrk[i].f != NULL; i++) {
-+		if (((early_qrk[i].vendor == PCI_ANY_ID) ||
-+			(early_qrk[i].vendor == vendor)) &&
-+			((early_qrk[i].device == PCI_ANY_ID) ||
-+			(early_qrk[i].device == device)) &&
-+			(!((early_qrk[i].class ^ class) &
-+			    early_qrk[i].class_mask))) {
-+				if ((early_qrk[i].flags &
-+				     QFLAG_DONE) != QFLAG_DONE)
-+					early_qrk[i].f(num, slot, func);
-+				early_qrk[i].flags |= QFLAG_APPLIED;
-+			}
-+	}
-+
-+	type = read_pci_config_byte(num, slot, func,
-+				    PCI_HEADER_TYPE);
-+	if (!(type & 0x80))
-+		return;
-+}
-+
- void __init early_quirks(void)
- {
- 	int num, slot, func;
-@@ -103,36 +178,8 @@ void __init early_quirks(void)
- 		return;
+ 	/* Are we from a system call? */
+-	if ((long)regs->orig_rax >= 0) {
++	if ((long)regs->orig_ax >= 0) {
+ 		/* If so, check system call restarting.. */
+-		switch (regs->rax) {
++		switch (regs->ax) {
+ 		        case -ERESTART_RESTARTBLOCK:
+ 			case -ERESTARTNOHAND:
+-				regs->rax = -EINTR;
++				regs->ax = -EINTR;
+ 				break;
  
- 	/* Poor man's PCI discovery */
--	for (num = 0; num < 32; num++) {
--		for (slot = 0; slot < 32; slot++) {
--			for (func = 0; func < 8; func++) {
--				u32 class;
--				u32 vendor;
--				u8 type;
--				int i;
--				class = read_pci_config(num,slot,func,
--							PCI_CLASS_REVISION);
--				if (class == 0xffffffff)
--					break;
--
--				if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
--					continue;
--
--				vendor = read_pci_config(num, slot, func,
--							 PCI_VENDOR_ID);
--				vendor &= 0xffff;
--
--				for (i = 0; early_qrk[i].f; i++)
--					if (early_qrk[i].vendor == vendor) {
--						early_qrk[i].f();
--						return;
--					}
--
--				type = read_pci_config_byte(num, slot, func,
--							    PCI_HEADER_TYPE);
--				if (!(type & 0x80))
--					break;
--			}
+ 			case -ERESTARTSYS:
+ 				if (!(ka->sa.sa_flags & SA_RESTART)) {
+-					regs->rax = -EINTR;
++					regs->ax = -EINTR;
+ 					break;
+ 				}
+ 				/* fallthrough */
+ 			case -ERESTARTNOINTR:
+-				regs->rax = regs->orig_rax;
+-				regs->rip -= 2;
++				regs->ax = regs->orig_ax;
++				regs->ip -= 2;
+ 				break;
+ 		}
+ 	}
+ 
+ 	/*
+-	 * If TF is set due to a debugger (PT_DTRACE), clear the TF
+-	 * flag so that register information in the sigcontext is
+-	 * correct.
++	 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
++	 * flag so that register information in the sigcontext is correct.
+ 	 */
+-	if (unlikely(regs->eflags & TF_MASK)) {
+-		if (likely(current->ptrace & PT_DTRACE)) {
+-			current->ptrace &= ~PT_DTRACE;
+-			regs->eflags &= ~TF_MASK;
 -		}
 -	}
-+	for (num = 0; num < 32; num++)
-+		for (slot = 0; slot < 32; slot++)
-+			for (func = 0; func < 8; func++)
-+				check_dev_quirk(num, slot, func);
- }
-diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
-new file mode 100644
-index 0000000..1411324
---- /dev/null
-+++ b/arch/x86/kernel/efi.c
-@@ -0,0 +1,512 @@
-+/*
-+ * Common EFI (Extensible Firmware Interface) support functions
-+ * Based on Extensible Firmware Interface Specification version 1.0
-+ *
-+ * Copyright (C) 1999 VA Linux Systems
-+ * Copyright (C) 1999 Walt Drummond <drummond at valinux.com>
-+ * Copyright (C) 1999-2002 Hewlett-Packard Co.
-+ *	David Mosberger-Tang <davidm at hpl.hp.com>
-+ *	Stephane Eranian <eranian at hpl.hp.com>
-+ * Copyright (C) 2005-2008 Intel Co.
-+ *	Fenghua Yu <fenghua.yu at intel.com>
-+ *	Bibo Mao <bibo.mao at intel.com>
-+ *	Chandramouli Narayanan <mouli at linux.intel.com>
-+ *	Huang Ying <ying.huang at intel.com>
-+ *
-+ * Copied from efi_32.c to eliminate the duplicated code between EFI
-+ * 32/64 support code. --ying 2007-10-26
-+ *
-+ * All EFI Runtime Services are not implemented yet as EFI only
-+ * supports physical mode addressing on SoftSDV. This is to be fixed
-+ * in a future version.  --drummond 1999-07-20
-+ *
-+ * Implemented EFI runtime services and virtual mode calls.  --davidm
-+ *
-+ * Goutham Rao: <goutham.rao at intel.com>
-+ *	Skip non-WB memory and ignore empty memory ranges.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/efi.h>
-+#include <linux/bootmem.h>
-+#include <linux/spinlock.h>
-+#include <linux/uaccess.h>
-+#include <linux/time.h>
-+#include <linux/io.h>
-+#include <linux/reboot.h>
-+#include <linux/bcd.h>
-+
-+#include <asm/setup.h>
-+#include <asm/efi.h>
-+#include <asm/time.h>
-+#include <asm/cacheflush.h>
-+#include <asm/tlbflush.h>
-+
-+#define EFI_DEBUG	1
-+#define PFX 		"EFI: "
-+
-+int efi_enabled;
-+EXPORT_SYMBOL(efi_enabled);
-+
-+struct efi efi;
-+EXPORT_SYMBOL(efi);
-+
-+struct efi_memory_map memmap;
-+
-+struct efi efi_phys __initdata;
-+static efi_system_table_t efi_systab __initdata;
-+
-+static int __init setup_noefi(char *arg)
-+{
-+	efi_enabled = 0;
-+	return 0;
-+}
-+early_param("noefi", setup_noefi);
-+
-+static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
-+{
-+	return efi_call_virt2(get_time, tm, tc);
-+}
-+
-+static efi_status_t virt_efi_set_time(efi_time_t *tm)
-+{
-+	return efi_call_virt1(set_time, tm);
-+}
-+
-+static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
-+					     efi_bool_t *pending,
-+					     efi_time_t *tm)
-+{
-+	return efi_call_virt3(get_wakeup_time,
-+			      enabled, pending, tm);
-+}
-+
-+static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
-+{
-+	return efi_call_virt2(set_wakeup_time,
-+			      enabled, tm);
-+}
-+
-+static efi_status_t virt_efi_get_variable(efi_char16_t *name,
-+					  efi_guid_t *vendor,
-+					  u32 *attr,
-+					  unsigned long *data_size,
-+					  void *data)
-+{
-+	return efi_call_virt5(get_variable,
-+			      name, vendor, attr,
-+			      data_size, data);
-+}
-+
-+static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
-+					       efi_char16_t *name,
-+					       efi_guid_t *vendor)
-+{
-+	return efi_call_virt3(get_next_variable,
-+			      name_size, name, vendor);
-+}
-+
-+static efi_status_t virt_efi_set_variable(efi_char16_t *name,
-+					  efi_guid_t *vendor,
-+					  unsigned long attr,
-+					  unsigned long data_size,
-+					  void *data)
-+{
-+	return efi_call_virt5(set_variable,
-+			      name, vendor, attr,
-+			      data_size, data);
-+}
-+
-+static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
-+{
-+	return efi_call_virt1(get_next_high_mono_count, count);
-+}
-+
-+static void virt_efi_reset_system(int reset_type,
-+				  efi_status_t status,
-+				  unsigned long data_size,
-+				  efi_char16_t *data)
-+{
-+	efi_call_virt4(reset_system, reset_type, status,
-+		       data_size, data);
-+}
-+
-+static efi_status_t virt_efi_set_virtual_address_map(
-+	unsigned long memory_map_size,
-+	unsigned long descriptor_size,
-+	u32 descriptor_version,
-+	efi_memory_desc_t *virtual_map)
-+{
-+	return efi_call_virt4(set_virtual_address_map,
-+			      memory_map_size, descriptor_size,
-+			      descriptor_version, virtual_map);
-+}
-+
-+static efi_status_t __init phys_efi_set_virtual_address_map(
-+	unsigned long memory_map_size,
-+	unsigned long descriptor_size,
-+	u32 descriptor_version,
-+	efi_memory_desc_t *virtual_map)
-+{
-+	efi_status_t status;
-+
-+	efi_call_phys_prelog();
-+	status = efi_call_phys4(efi_phys.set_virtual_address_map,
-+				memory_map_size, descriptor_size,
-+				descriptor_version, virtual_map);
-+	efi_call_phys_epilog();
-+	return status;
-+}
-+
-+static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
-+					     efi_time_cap_t *tc)
-+{
-+	efi_status_t status;
-+
-+	efi_call_phys_prelog();
-+	status = efi_call_phys2(efi_phys.get_time, tm, tc);
-+	efi_call_phys_epilog();
-+	return status;
-+}
-+
-+int efi_set_rtc_mmss(unsigned long nowtime)
-+{
-+	int real_seconds, real_minutes;
-+	efi_status_t 	status;
-+	efi_time_t 	eft;
-+	efi_time_cap_t 	cap;
-+
-+	status = efi.get_time(&eft, &cap);
-+	if (status != EFI_SUCCESS) {
-+		printk(KERN_ERR "Oops: efitime: can't read time!\n");
-+		return -1;
-+	}
-+
-+	real_seconds = nowtime % 60;
-+	real_minutes = nowtime / 60;
-+	if (((abs(real_minutes - eft.minute) + 15)/30) & 1)
-+		real_minutes += 30;
-+	real_minutes %= 60;
-+	eft.minute = real_minutes;
-+	eft.second = real_seconds;
-+
-+	status = efi.set_time(&eft);
-+	if (status != EFI_SUCCESS) {
-+		printk(KERN_ERR "Oops: efitime: can't write time!\n");
-+		return -1;
-+	}
-+	return 0;
-+}
-+
-+unsigned long efi_get_time(void)
-+{
-+	efi_status_t status;
-+	efi_time_t eft;
-+	efi_time_cap_t cap;
-+
-+	status = efi.get_time(&eft, &cap);
-+	if (status != EFI_SUCCESS)
-+		printk(KERN_ERR "Oops: efitime: can't read time!\n");
-+
-+	return mktime(eft.year, eft.month, eft.day, eft.hour,
-+		      eft.minute, eft.second);
-+}
-+
-+#if EFI_DEBUG
-+static void __init print_efi_memmap(void)
-+{
-+	efi_memory_desc_t *md;
-+	void *p;
-+	int i;
-+
-+	for (p = memmap.map, i = 0;
-+	     p < memmap.map_end;
-+	     p += memmap.desc_size, i++) {
-+		md = p;
-+		printk(KERN_INFO PFX "mem%02u: type=%u, attr=0x%llx, "
-+			"range=[0x%016llx-0x%016llx) (%lluMB)\n",
-+			i, md->type, md->attribute, md->phys_addr,
-+			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
-+			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
-+	}
-+}
-+#endif  /*  EFI_DEBUG  */
-+
-+void __init efi_init(void)
-+{
-+	efi_config_table_t *config_tables;
-+	efi_runtime_services_t *runtime;
-+	efi_char16_t *c16;
-+	char vendor[100] = "unknown";
-+	int i = 0;
-+	void *tmp;
-+
-+#ifdef CONFIG_X86_32
-+	efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
-+	memmap.phys_map = (void *)boot_params.efi_info.efi_memmap;
-+#else
-+	efi_phys.systab = (efi_system_table_t *)
-+		(boot_params.efi_info.efi_systab |
-+		 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
-+	memmap.phys_map = (void *)
-+		(boot_params.efi_info.efi_memmap |
-+		 ((__u64)boot_params.efi_info.efi_memmap_hi<<32));
-+#endif
-+	memmap.nr_map = boot_params.efi_info.efi_memmap_size /
-+		boot_params.efi_info.efi_memdesc_size;
-+	memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
-+	memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
-+
-+	efi.systab = early_ioremap((unsigned long)efi_phys.systab,
-+				   sizeof(efi_system_table_t));
-+	if (efi.systab == NULL)
-+		printk(KERN_ERR "Couldn't map the EFI system table!\n");
-+	memcpy(&efi_systab, efi.systab, sizeof(efi_system_table_t));
-+	early_iounmap(efi.systab, sizeof(efi_system_table_t));
-+	efi.systab = &efi_systab;
-+
-+	/*
-+	 * Verify the EFI Table
-+	 */
-+	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
-+		printk(KERN_ERR "EFI system table signature incorrect!\n");
-+	if ((efi.systab->hdr.revision >> 16) == 0)
-+		printk(KERN_ERR "Warning: EFI system table version "
-+		       "%d.%02d, expected 1.00 or greater!\n",
-+		       efi.systab->hdr.revision >> 16,
-+		       efi.systab->hdr.revision & 0xffff);
-+
-+	/*
-+	 * Show what we know for posterity
-+	 */
-+	c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
-+	if (c16) {
-+		for (i = 0; i < sizeof(vendor) && *c16; ++i)
-+			vendor[i] = *c16++;
-+		vendor[i] = '\0';
-+	} else
-+		printk(KERN_ERR PFX "Could not map the firmware vendor!\n");
-+	early_iounmap(tmp, 2);
-+
-+	printk(KERN_INFO "EFI v%u.%.02u by %s \n",
-+	       efi.systab->hdr.revision >> 16,
-+	       efi.systab->hdr.revision & 0xffff, vendor);
-+
-+	/*
-+	 * Let's see what config tables the firmware passed to us.
-+	 */
-+	config_tables = early_ioremap(
-+		efi.systab->tables,
-+		efi.systab->nr_tables * sizeof(efi_config_table_t));
-+	if (config_tables == NULL)
-+		printk(KERN_ERR "Could not map EFI Configuration Table!\n");
-+
-+	printk(KERN_INFO);
-+	for (i = 0; i < efi.systab->nr_tables; i++) {
-+		if (!efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID)) {
-+			efi.mps = config_tables[i].table;
-+			printk(" MPS=0x%lx ", config_tables[i].table);
-+		} else if (!efi_guidcmp(config_tables[i].guid,
-+					ACPI_20_TABLE_GUID)) {
-+			efi.acpi20 = config_tables[i].table;
-+			printk(" ACPI 2.0=0x%lx ", config_tables[i].table);
-+		} else if (!efi_guidcmp(config_tables[i].guid,
-+					ACPI_TABLE_GUID)) {
-+			efi.acpi = config_tables[i].table;
-+			printk(" ACPI=0x%lx ", config_tables[i].table);
-+		} else if (!efi_guidcmp(config_tables[i].guid,
-+					SMBIOS_TABLE_GUID)) {
-+			efi.smbios = config_tables[i].table;
-+			printk(" SMBIOS=0x%lx ", config_tables[i].table);
-+		} else if (!efi_guidcmp(config_tables[i].guid,
-+					HCDP_TABLE_GUID)) {
-+			efi.hcdp = config_tables[i].table;
-+			printk(" HCDP=0x%lx ", config_tables[i].table);
-+		} else if (!efi_guidcmp(config_tables[i].guid,
-+					UGA_IO_PROTOCOL_GUID)) {
-+			efi.uga = config_tables[i].table;
-+			printk(" UGA=0x%lx ", config_tables[i].table);
-+		}
-+	}
-+	printk("\n");
-+	early_iounmap(config_tables,
-+			  efi.systab->nr_tables * sizeof(efi_config_table_t));
-+
-+	/*
-+	 * Check out the runtime services table. We need to map
-+	 * the runtime services table so that we can grab the physical
-+	 * address of several of the EFI runtime functions, needed to
-+	 * set the firmware into virtual mode.
-+	 */
-+	runtime = early_ioremap((unsigned long)efi.systab->runtime,
-+				sizeof(efi_runtime_services_t));
-+	if (runtime != NULL) {
-+		/*
-+		 * We will only need *early* access to the following
-+		 * two EFI runtime services before set_virtual_address_map
-+		 * is invoked.
-+		 */
-+		efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
-+		efi_phys.set_virtual_address_map =
-+			(efi_set_virtual_address_map_t *)
-+			runtime->set_virtual_address_map;
-+		/*
-+		 * Make efi_get_time can be called before entering
-+		 * virtual mode.
-+		 */
-+		efi.get_time = phys_efi_get_time;
-+	} else
-+		printk(KERN_ERR "Could not map the EFI runtime service "
-+		       "table!\n");
-+	early_iounmap(runtime, sizeof(efi_runtime_services_t));
-+
-+	/* Map the EFI memory map */
-+	memmap.map = early_ioremap((unsigned long)memmap.phys_map,
-+				   memmap.nr_map * memmap.desc_size);
-+	if (memmap.map == NULL)
-+		printk(KERN_ERR "Could not map the EFI memory map!\n");
-+	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
-+	if (memmap.desc_size != sizeof(efi_memory_desc_t))
-+		printk(KERN_WARNING "Kernel-defined memdesc"
-+		       "doesn't match the one from EFI!\n");
-+
-+	/* Setup for EFI runtime service */
-+	reboot_type = BOOT_EFI;
-+
-+#if EFI_DEBUG
-+	print_efi_memmap();
-+#endif
-+}
-+
-+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-+static void __init runtime_code_page_mkexec(void)
-+{
-+	efi_memory_desc_t *md;
-+	unsigned long end;
-+	void *p;
-+
-+	if (!(__supported_pte_mask & _PAGE_NX))
-+		return;
-+
-+	/* Make EFI runtime service code area executable */
-+	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+		md = p;
-+		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-+		if (md->type == EFI_RUNTIME_SERVICES_CODE &&
-+		    (end >> PAGE_SHIFT) <= max_pfn_mapped) {
-+			set_memory_x(md->virt_addr, md->num_pages);
-+			set_memory_uc(md->virt_addr, md->num_pages);
-+		}
-+	}
-+	__flush_tlb_all();
-+}
-+#else
-+static inline void __init runtime_code_page_mkexec(void) { }
-+#endif
-+
-+/*
-+ * This function will switch the EFI runtime services to virtual mode.
-+ * Essentially, look through the EFI memmap and map every region that
-+ * has the runtime attribute bit set in its memory descriptor and update
-+ * that memory descriptor with the virtual address obtained from ioremap().
-+ * This enables the runtime services to be called without having to
-+ * thunk back into physical mode for every invocation.
-+ */
-+void __init efi_enter_virtual_mode(void)
-+{
-+	efi_memory_desc_t *md;
-+	efi_status_t status;
-+	unsigned long end;
-+	void *p;
-+
-+	efi.systab = NULL;
-+	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+		md = p;
-+		if (!(md->attribute & EFI_MEMORY_RUNTIME))
-+			continue;
-+		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-+		if ((md->attribute & EFI_MEMORY_WB) &&
-+		    ((end >> PAGE_SHIFT) <= max_pfn_mapped))
-+			md->virt_addr = (unsigned long)__va(md->phys_addr);
-+		else
-+			md->virt_addr = (unsigned long)
-+				efi_ioremap(md->phys_addr,
-+					    md->num_pages << EFI_PAGE_SHIFT);
-+		if (!md->virt_addr)
-+			printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
-+			       (unsigned long long)md->phys_addr);
-+		if ((md->phys_addr <= (unsigned long)efi_phys.systab) &&
-+		    ((unsigned long)efi_phys.systab < end))
-+			efi.systab = (efi_system_table_t *)(unsigned long)
-+				(md->virt_addr - md->phys_addr +
-+				 (unsigned long)efi_phys.systab);
-+	}
-+
-+	BUG_ON(!efi.systab);
-+
-+	status = phys_efi_set_virtual_address_map(
-+		memmap.desc_size * memmap.nr_map,
-+		memmap.desc_size,
-+		memmap.desc_version,
-+		memmap.phys_map);
-+
-+	if (status != EFI_SUCCESS) {
-+		printk(KERN_ALERT "Unable to switch EFI into virtual mode "
-+		       "(status=%lx)!\n", status);
-+		panic("EFI call to SetVirtualAddressMap() failed!");
-+	}
-+
-+	/*
-+	 * Now that EFI is in virtual mode, update the function
-+	 * pointers in the runtime service table to the new virtual addresses.
-+	 *
-+	 * Call EFI services through wrapper functions.
-+	 */
-+	efi.get_time = virt_efi_get_time;
-+	efi.set_time = virt_efi_set_time;
-+	efi.get_wakeup_time = virt_efi_get_wakeup_time;
-+	efi.set_wakeup_time = virt_efi_set_wakeup_time;
-+	efi.get_variable = virt_efi_get_variable;
-+	efi.get_next_variable = virt_efi_get_next_variable;
-+	efi.set_variable = virt_efi_set_variable;
-+	efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
-+	efi.reset_system = virt_efi_reset_system;
-+	efi.set_virtual_address_map = virt_efi_set_virtual_address_map;
-+	runtime_code_page_mkexec();
-+	early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
-+	memmap.map = NULL;
-+}
-+
-+/*
-+ * Convenience functions to obtain memory types and attributes
-+ */
-+u32 efi_mem_type(unsigned long phys_addr)
-+{
-+	efi_memory_desc_t *md;
-+	void *p;
-+
-+	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+		md = p;
-+		if ((md->phys_addr <= phys_addr) &&
-+		    (phys_addr < (md->phys_addr +
-+				  (md->num_pages << EFI_PAGE_SHIFT))))
-+			return md->type;
-+	}
-+	return 0;
-+}
-+
-+u64 efi_mem_attributes(unsigned long phys_addr)
-+{
-+	efi_memory_desc_t *md;
-+	void *p;
-+
-+	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+		md = p;
-+		if ((md->phys_addr <= phys_addr) &&
-+		    (phys_addr < (md->phys_addr +
-+				  (md->num_pages << EFI_PAGE_SHIFT))))
-+			return md->attribute;
-+	}
-+	return 0;
-+}
-diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
-index e2be78f..cb91f98 100644
---- a/arch/x86/kernel/efi_32.c
-+++ b/arch/x86/kernel/efi_32.c
-@@ -20,40 +20,15 @@
-  */
++	if (unlikely(regs->flags & X86_EFLAGS_TF) &&
++	    likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
++		regs->flags &= ~X86_EFLAGS_TF;
  
- #include <linux/kernel.h>
--#include <linux/init.h>
--#include <linux/mm.h>
- #include <linux/types.h>
--#include <linux/time.h>
--#include <linux/spinlock.h>
--#include <linux/bootmem.h>
- #include <linux/ioport.h>
--#include <linux/module.h>
- #include <linux/efi.h>
--#include <linux/kexec.h>
+ #ifdef CONFIG_IA32_EMULATION
+ 	if (test_thread_flag(TIF_IA32)) {
+@@ -430,21 +426,21 @@ static void do_signal(struct pt_regs *regs)
+ 	}
  
--#include <asm/setup.h>
- #include <asm/io.h>
- #include <asm/page.h>
- #include <asm/pgtable.h>
--#include <asm/processor.h>
--#include <asm/desc.h>
- #include <asm/tlbflush.h>
+ 	/* Did we come from a system call? */
+-	if ((long)regs->orig_rax >= 0) {
++	if ((long)regs->orig_ax >= 0) {
+ 		/* Restart the system call - no handlers present */
+-		long res = regs->rax;
++		long res = regs->ax;
+ 		switch (res) {
+ 		case -ERESTARTNOHAND:
+ 		case -ERESTARTSYS:
+ 		case -ERESTARTNOINTR:
+-			regs->rax = regs->orig_rax;
+-			regs->rip -= 2;
++			regs->ax = regs->orig_ax;
++			regs->ip -= 2;
+ 			break;
+ 		case -ERESTART_RESTARTBLOCK:
+-			regs->rax = test_thread_flag(TIF_IA32) ?
++			regs->ax = test_thread_flag(TIF_IA32) ?
+ 					__NR_ia32_restart_syscall :
+ 					__NR_restart_syscall;
+-			regs->rip -= 2;
++			regs->ip -= 2;
+ 			break;
+ 		}
+ 	}
+@@ -461,13 +457,13 @@ void
+ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
+ {
+ #ifdef DEBUG_SIG
+-	printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%p pending:%x\n",
+-	       thread_info_flags, regs->rip, regs->rsp, __builtin_return_address(0),signal_pending(current)); 
++	printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n",
++	       thread_info_flags, regs->ip, regs->sp, __builtin_return_address(0),signal_pending(current));
+ #endif
+ 	       
+ 	/* Pending single-step? */
+ 	if (thread_info_flags & _TIF_SINGLESTEP) {
+-		regs->eflags |= TF_MASK;
++		regs->flags |= X86_EFLAGS_TF;
+ 		clear_thread_flag(TIF_SINGLESTEP);
+ 	}
+ 
+@@ -480,14 +476,20 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
+ 	/* deal with pending signal delivery */
+ 	if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
+ 		do_signal(regs);
++
++	if (thread_info_flags & _TIF_HRTICK_RESCHED)
++		hrtick_resched();
+ }
+ 
+ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
+ { 
+ 	struct task_struct *me = current; 
+-	if (show_unhandled_signals && printk_ratelimit())
+-		printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n",
+-	       me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax); 
++	if (show_unhandled_signals && printk_ratelimit()) {
++		printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
++	       me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax);
++		print_vma_addr(" in ", regs->ip);
++		printk("\n");
++	}
  
--#define EFI_DEBUG	0
--#define PFX 		"EFI: "
--
--extern efi_status_t asmlinkage efi_call_phys(void *, ...);
--
--struct efi efi;
--EXPORT_SYMBOL(efi);
--static struct efi efi_phys;
--struct efi_memory_map memmap;
--
--/*
-- * We require an early boot_ioremap mapping mechanism initially
-- */
--extern void * boot_ioremap(unsigned long, unsigned long);
--
- /*
-  * To make EFI call EFI runtime service in physical addressing mode we need
-  * prelog/epilog before/after the invocation to disable interrupt, to
-@@ -62,16 +37,14 @@ extern void * boot_ioremap(unsigned long, unsigned long);
-  */
+ 	force_sig(SIGSEGV, me); 
+ } 
+diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
+index fcaa026..dc0cde9 100644
+--- a/arch/x86/kernel/smp_32.c
++++ b/arch/x86/kernel/smp_32.c
+@@ -159,7 +159,7 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector)
+ 	apic_write_around(APIC_ICR, cfg);
+ }
  
- static unsigned long efi_rt_eflags;
--static DEFINE_SPINLOCK(efi_rt_lock);
- static pgd_t efi_bak_pg_dir_pointer[2];
+-void fastcall send_IPI_self(int vector)
++void send_IPI_self(int vector)
+ {
+ 	__send_IPI_shortcut(APIC_DEST_SELF, vector);
+ }
+@@ -223,7 +223,7 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
+ 	 */ 
  
--static void efi_call_phys_prelog(void) __acquires(efi_rt_lock)
-+void efi_call_phys_prelog(void)
+ 	local_irq_save(flags);
+-	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
++	for_each_possible_cpu(query_cpu) {
+ 		if (cpu_isset(query_cpu, mask)) {
+ 			__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
+ 					      vector);
+@@ -256,13 +256,14 @@ static DEFINE_SPINLOCK(tlbstate_lock);
+  * We need to reload %cr3 since the page tables may be going
+  * away from under us..
+  */
+-void leave_mm(unsigned long cpu)
++void leave_mm(int cpu)
  {
- 	unsigned long cr4;
- 	unsigned long temp;
--	struct Xgt_desc_struct gdt_descr;
-+	struct desc_ptr gdt_descr;
+ 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
+ 		BUG();
+ 	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
+ 	load_cr3(swapper_pg_dir);
+ }
++EXPORT_SYMBOL_GPL(leave_mm);
  
--	spin_lock(&efi_rt_lock);
- 	local_irq_save(efi_rt_eflags);
+ /*
+  *
+@@ -310,7 +311,7 @@ void leave_mm(unsigned long cpu)
+  * 2) Leave the mm if we are in the lazy tlb mode.
+  */
  
- 	/*
-@@ -101,17 +74,17 @@ static void efi_call_phys_prelog(void) __acquires(efi_rt_lock)
- 	/*
- 	 * After the lock is released, the original page table is restored.
- 	 */
--	local_flush_tlb();
-+	__flush_tlb_all();
+-fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
++void smp_invalidate_interrupt(struct pt_regs *regs)
+ {
+ 	unsigned long cpu;
  
- 	gdt_descr.address = __pa(get_cpu_gdt_table(0));
- 	gdt_descr.size = GDT_SIZE - 1;
- 	load_gdt(&gdt_descr);
+@@ -638,13 +639,13 @@ static void native_smp_send_stop(void)
+  * all the work is done automatically when
+  * we return from the interrupt.
+  */
+-fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
++void smp_reschedule_interrupt(struct pt_regs *regs)
+ {
+ 	ack_APIC_irq();
+ 	__get_cpu_var(irq_stat).irq_resched_count++;
  }
  
--static void efi_call_phys_epilog(void) __releases(efi_rt_lock)
-+void efi_call_phys_epilog(void)
+-fastcall void smp_call_function_interrupt(struct pt_regs *regs)
++void smp_call_function_interrupt(struct pt_regs *regs)
  {
- 	unsigned long cr4;
--	struct Xgt_desc_struct gdt_descr;
-+	struct desc_ptr gdt_descr;
+ 	void (*func) (void *info) = call_data->func;
+ 	void *info = call_data->info;
+@@ -675,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id)
+ {
+ 	int i;
  
- 	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
- 	gdt_descr.size = GDT_SIZE - 1;
-@@ -132,586 +105,7 @@ static void efi_call_phys_epilog(void) __releases(efi_rt_lock)
- 	/*
- 	 * After the lock is released, the original page table is restored.
- 	 */
--	local_flush_tlb();
-+	__flush_tlb_all();
+-	for (i = 0; i < NR_CPUS; i++) {
++	for_each_possible_cpu(i) {
+ 		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
+ 			return i;
+ 	}
+diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
+index 03fa6ed..2fd74b0 100644
+--- a/arch/x86/kernel/smp_64.c
++++ b/arch/x86/kernel/smp_64.c
+@@ -29,7 +29,7 @@
+ #include <asm/idle.h>
  
- 	local_irq_restore(efi_rt_eflags);
--	spin_unlock(&efi_rt_lock);
--}
--
--static efi_status_t
--phys_efi_set_virtual_address_map(unsigned long memory_map_size,
--				 unsigned long descriptor_size,
--				 u32 descriptor_version,
--				 efi_memory_desc_t *virtual_map)
--{
--	efi_status_t status;
--
--	efi_call_phys_prelog();
--	status = efi_call_phys(efi_phys.set_virtual_address_map,
--				     memory_map_size, descriptor_size,
--				     descriptor_version, virtual_map);
--	efi_call_phys_epilog();
--	return status;
--}
--
--static efi_status_t
--phys_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
--{
--	efi_status_t status;
--
--	efi_call_phys_prelog();
--	status = efi_call_phys(efi_phys.get_time, tm, tc);
--	efi_call_phys_epilog();
--	return status;
--}
--
--inline int efi_set_rtc_mmss(unsigned long nowtime)
--{
--	int real_seconds, real_minutes;
--	efi_status_t 	status;
--	efi_time_t 	eft;
--	efi_time_cap_t 	cap;
--
--	spin_lock(&efi_rt_lock);
--	status = efi.get_time(&eft, &cap);
--	spin_unlock(&efi_rt_lock);
--	if (status != EFI_SUCCESS)
--		panic("Ooops, efitime: can't read time!\n");
--	real_seconds = nowtime % 60;
--	real_minutes = nowtime / 60;
--
--	if (((abs(real_minutes - eft.minute) + 15)/30) & 1)
--		real_minutes += 30;
--	real_minutes %= 60;
--
--	eft.minute = real_minutes;
--	eft.second = real_seconds;
--
--	if (status != EFI_SUCCESS) {
--		printk("Ooops: efitime: can't read time!\n");
--		return -1;
--	}
--	return 0;
--}
--/*
-- * This is used during kernel init before runtime
-- * services have been remapped and also during suspend, therefore,
-- * we'll need to call both in physical and virtual modes.
-- */
--inline unsigned long efi_get_time(void)
--{
--	efi_status_t status;
--	efi_time_t eft;
--	efi_time_cap_t cap;
--
--	if (efi.get_time) {
--		/* if we are in virtual mode use remapped function */
-- 		status = efi.get_time(&eft, &cap);
--	} else {
--		/* we are in physical mode */
--		status = phys_efi_get_time(&eft, &cap);
--	}
--
--	if (status != EFI_SUCCESS)
--		printk("Oops: efitime: can't read time status: 0x%lx\n",status);
--
--	return mktime(eft.year, eft.month, eft.day, eft.hour,
--			eft.minute, eft.second);
--}
--
--int is_available_memory(efi_memory_desc_t * md)
--{
--	if (!(md->attribute & EFI_MEMORY_WB))
--		return 0;
--
--	switch (md->type) {
--		case EFI_LOADER_CODE:
--		case EFI_LOADER_DATA:
--		case EFI_BOOT_SERVICES_CODE:
--		case EFI_BOOT_SERVICES_DATA:
--		case EFI_CONVENTIONAL_MEMORY:
--			return 1;
--	}
--	return 0;
--}
--
--/*
-- * We need to map the EFI memory map again after paging_init().
-- */
--void __init efi_map_memmap(void)
--{
--	memmap.map = NULL;
--
--	memmap.map = bt_ioremap((unsigned long) memmap.phys_map,
--			(memmap.nr_map * memmap.desc_size));
--	if (memmap.map == NULL)
--		printk(KERN_ERR PFX "Could not remap the EFI memmap!\n");
--
--	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
--}
--
--#if EFI_DEBUG
--static void __init print_efi_memmap(void)
--{
--	efi_memory_desc_t *md;
--	void *p;
--	int i;
--
--	for (p = memmap.map, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
--		md = p;
--		printk(KERN_INFO "mem%02u: type=%u, attr=0x%llx, "
--			"range=[0x%016llx-0x%016llx) (%lluMB)\n",
--			i, md->type, md->attribute, md->phys_addr,
--			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
--			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
--	}
--}
--#endif  /*  EFI_DEBUG  */
--
--/*
-- * Walks the EFI memory map and calls CALLBACK once for each EFI
-- * memory descriptor that has memory that is available for kernel use.
-- */
--void efi_memmap_walk(efi_freemem_callback_t callback, void *arg)
--{
--	int prev_valid = 0;
--	struct range {
--		unsigned long start;
--		unsigned long end;
--	} uninitialized_var(prev), curr;
--	efi_memory_desc_t *md;
--	unsigned long start, end;
--	void *p;
--
--	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
--		md = p;
--
--		if ((md->num_pages == 0) || (!is_available_memory(md)))
--			continue;
--
--		curr.start = md->phys_addr;
--		curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT);
--
--		if (!prev_valid) {
--			prev = curr;
--			prev_valid = 1;
--		} else {
--			if (curr.start < prev.start)
--				printk(KERN_INFO PFX "Unordered memory map\n");
--			if (prev.end == curr.start)
--				prev.end = curr.end;
--			else {
--				start =
--				    (unsigned long) (PAGE_ALIGN(prev.start));
--				end = (unsigned long) (prev.end & PAGE_MASK);
--				if ((end > start)
--				    && (*callback) (start, end, arg) < 0)
--					return;
--				prev = curr;
--			}
--		}
--	}
--	if (prev_valid) {
--		start = (unsigned long) PAGE_ALIGN(prev.start);
--		end = (unsigned long) (prev.end & PAGE_MASK);
--		if (end > start)
--			(*callback) (start, end, arg);
--	}
--}
--
--void __init efi_init(void)
--{
--	efi_config_table_t *config_tables;
--	efi_runtime_services_t *runtime;
--	efi_char16_t *c16;
--	char vendor[100] = "unknown";
--	unsigned long num_config_tables;
--	int i = 0;
--
--	memset(&efi, 0, sizeof(efi) );
--	memset(&efi_phys, 0, sizeof(efi_phys));
--
--	efi_phys.systab =
--		(efi_system_table_t *)boot_params.efi_info.efi_systab;
--	memmap.phys_map = (void *)boot_params.efi_info.efi_memmap;
--	memmap.nr_map = boot_params.efi_info.efi_memmap_size/
--		boot_params.efi_info.efi_memdesc_size;
--	memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
--	memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
--
--	efi.systab = (efi_system_table_t *)
--		boot_ioremap((unsigned long) efi_phys.systab,
--			sizeof(efi_system_table_t));
--	/*
--	 * Verify the EFI Table
--	 */
--	if (efi.systab == NULL)
--		printk(KERN_ERR PFX "Woah! Couldn't map the EFI system table.\n");
--	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
--		printk(KERN_ERR PFX "Woah! EFI system table signature incorrect\n");
--	if ((efi.systab->hdr.revision >> 16) == 0)
--		printk(KERN_ERR PFX "Warning: EFI system table version "
--		       "%d.%02d, expected 1.00 or greater\n",
--		       efi.systab->hdr.revision >> 16,
--		       efi.systab->hdr.revision & 0xffff);
--
--	/*
--	 * Grab some details from the system table
--	 */
--	num_config_tables = efi.systab->nr_tables;
--	config_tables = (efi_config_table_t *)efi.systab->tables;
--	runtime = efi.systab->runtime;
--
--	/*
--	 * Show what we know for posterity
--	 */
--	c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2);
--	if (c16) {
--		for (i = 0; i < (sizeof(vendor) - 1) && *c16; ++i)
--			vendor[i] = *c16++;
--		vendor[i] = '\0';
--	} else
--		printk(KERN_ERR PFX "Could not map the firmware vendor!\n");
--
--	printk(KERN_INFO PFX "EFI v%u.%.02u by %s \n",
--	       efi.systab->hdr.revision >> 16,
--	       efi.systab->hdr.revision & 0xffff, vendor);
--
--	/*
--	 * Let's see what config tables the firmware passed to us.
--	 */
--	config_tables = (efi_config_table_t *)
--				boot_ioremap((unsigned long) config_tables,
--			        num_config_tables * sizeof(efi_config_table_t));
--
--	if (config_tables == NULL)
--		printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n");
--
--	efi.mps        = EFI_INVALID_TABLE_ADDR;
--	efi.acpi       = EFI_INVALID_TABLE_ADDR;
--	efi.acpi20     = EFI_INVALID_TABLE_ADDR;
--	efi.smbios     = EFI_INVALID_TABLE_ADDR;
--	efi.sal_systab = EFI_INVALID_TABLE_ADDR;
--	efi.boot_info  = EFI_INVALID_TABLE_ADDR;
--	efi.hcdp       = EFI_INVALID_TABLE_ADDR;
--	efi.uga        = EFI_INVALID_TABLE_ADDR;
--
--	for (i = 0; i < num_config_tables; i++) {
--		if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
--			efi.mps = config_tables[i].table;
--			printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table);
--		} else
--		    if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
--			efi.acpi20 = config_tables[i].table;
--			printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table);
--		} else
--		    if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
--			efi.acpi = config_tables[i].table;
--			printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table);
--		} else
--		    if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
--			efi.smbios = config_tables[i].table;
--			printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table);
--		} else
--		    if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
--			efi.hcdp = config_tables[i].table;
--			printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table);
--		} else
--		    if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) {
--			efi.uga = config_tables[i].table;
--			printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table);
--		}
--	}
--	printk("\n");
--
--	/*
--	 * Check out the runtime services table. We need to map
--	 * the runtime services table so that we can grab the physical
--	 * address of several of the EFI runtime functions, needed to
--	 * set the firmware into virtual mode.
--	 */
--
--	runtime = (efi_runtime_services_t *) boot_ioremap((unsigned long)
--						runtime,
--				      		sizeof(efi_runtime_services_t));
--	if (runtime != NULL) {
--		/*
--	 	 * We will only need *early* access to the following
--		 * two EFI runtime services before set_virtual_address_map
--		 * is invoked.
-- 	 	 */
--		efi_phys.get_time = (efi_get_time_t *) runtime->get_time;
--		efi_phys.set_virtual_address_map =
--			(efi_set_virtual_address_map_t *)
--				runtime->set_virtual_address_map;
--	} else
--		printk(KERN_ERR PFX "Could not map the runtime service table!\n");
--
--	/* Map the EFI memory map for use until paging_init() */
--	memmap.map = boot_ioremap(boot_params.efi_info.efi_memmap,
--				  boot_params.efi_info.efi_memmap_size);
--	if (memmap.map == NULL)
--		printk(KERN_ERR PFX "Could not map the EFI memory map!\n");
--
--	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
--
--#if EFI_DEBUG
--	print_efi_memmap();
--#endif
--}
--
--static inline void __init check_range_for_systab(efi_memory_desc_t *md)
--{
--	if (((unsigned long)md->phys_addr <= (unsigned long)efi_phys.systab) &&
--		((unsigned long)efi_phys.systab < md->phys_addr +
--		((unsigned long)md->num_pages << EFI_PAGE_SHIFT))) {
--		unsigned long addr;
--
--		addr = md->virt_addr - md->phys_addr +
--			(unsigned long)efi_phys.systab;
--		efi.systab = (efi_system_table_t *)addr;
--	}
--}
--
--/*
-- * Wrap all the virtual calls in a way that forces the parameters on the stack.
-- */
--
--#define efi_call_virt(f, args...) \
--     ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
--
--static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
--{
--	return efi_call_virt(get_time, tm, tc);
--}
--
--static efi_status_t virt_efi_set_time (efi_time_t *tm)
--{
--	return efi_call_virt(set_time, tm);
--}
--
--static efi_status_t virt_efi_get_wakeup_time (efi_bool_t *enabled,
--					      efi_bool_t *pending,
--					      efi_time_t *tm)
--{
--	return efi_call_virt(get_wakeup_time, enabled, pending, tm);
--}
--
--static efi_status_t virt_efi_set_wakeup_time (efi_bool_t enabled,
--					      efi_time_t *tm)
--{
--	return efi_call_virt(set_wakeup_time, enabled, tm);
--}
--
--static efi_status_t virt_efi_get_variable (efi_char16_t *name,
--					   efi_guid_t *vendor, u32 *attr,
--					   unsigned long *data_size, void *data)
--{
--	return efi_call_virt(get_variable, name, vendor, attr, data_size, data);
--}
--
--static efi_status_t virt_efi_get_next_variable (unsigned long *name_size,
--						efi_char16_t *name,
--						efi_guid_t *vendor)
--{
--	return efi_call_virt(get_next_variable, name_size, name, vendor);
--}
--
--static efi_status_t virt_efi_set_variable (efi_char16_t *name,
--					   efi_guid_t *vendor,
--					   unsigned long attr,
--					   unsigned long data_size, void *data)
--{
--	return efi_call_virt(set_variable, name, vendor, attr, data_size, data);
--}
--
--static efi_status_t virt_efi_get_next_high_mono_count (u32 *count)
--{
--	return efi_call_virt(get_next_high_mono_count, count);
--}
--
--static void virt_efi_reset_system (int reset_type, efi_status_t status,
--				   unsigned long data_size,
--				   efi_char16_t *data)
--{
--	efi_call_virt(reset_system, reset_type, status, data_size, data);
--}
--
--/*
-- * This function will switch the EFI runtime services to virtual mode.
-- * Essentially, look through the EFI memmap and map every region that
-- * has the runtime attribute bit set in its memory descriptor and update
-- * that memory descriptor with the virtual address obtained from ioremap().
-- * This enables the runtime services to be called without having to
-- * thunk back into physical mode for every invocation.
-- */
--
--void __init efi_enter_virtual_mode(void)
--{
--	efi_memory_desc_t *md;
--	efi_status_t status;
--	void *p;
--
--	efi.systab = NULL;
--
--	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
--		md = p;
--
--		if (!(md->attribute & EFI_MEMORY_RUNTIME))
--			continue;
--
--		md->virt_addr = (unsigned long)ioremap(md->phys_addr,
--			md->num_pages << EFI_PAGE_SHIFT);
--		if (!(unsigned long)md->virt_addr) {
--			printk(KERN_ERR PFX "ioremap of 0x%lX failed\n",
--				(unsigned long)md->phys_addr);
--		}
--		/* update the virtual address of the EFI system table */
--		check_range_for_systab(md);
--	}
--
--	BUG_ON(!efi.systab);
--
--	status = phys_efi_set_virtual_address_map(
--			memmap.desc_size * memmap.nr_map,
--			memmap.desc_size,
--			memmap.desc_version,
--		       	memmap.phys_map);
--
--	if (status != EFI_SUCCESS) {
--		printk (KERN_ALERT "You are screwed! "
--			"Unable to switch EFI into virtual mode "
--			"(status=%lx)\n", status);
--		panic("EFI call to SetVirtualAddressMap() failed!");
--	}
--
--	/*
--	 * Now that EFI is in virtual mode, update the function
--	 * pointers in the runtime service table to the new virtual addresses.
--	 */
--
--	efi.get_time = virt_efi_get_time;
--	efi.set_time = virt_efi_set_time;
--	efi.get_wakeup_time = virt_efi_get_wakeup_time;
--	efi.set_wakeup_time = virt_efi_set_wakeup_time;
--	efi.get_variable = virt_efi_get_variable;
--	efi.get_next_variable = virt_efi_get_next_variable;
--	efi.set_variable = virt_efi_set_variable;
--	efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
--	efi.reset_system = virt_efi_reset_system;
--}
--
--void __init
--efi_initialize_iomem_resources(struct resource *code_resource,
--			       struct resource *data_resource,
--			       struct resource *bss_resource)
--{
--	struct resource *res;
--	efi_memory_desc_t *md;
--	void *p;
--
--	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
--		md = p;
--
--		if ((md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >
--		    0x100000000ULL)
--			continue;
--		res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
--		switch (md->type) {
--		case EFI_RESERVED_TYPE:
--			res->name = "Reserved Memory";
--			break;
--		case EFI_LOADER_CODE:
--			res->name = "Loader Code";
--			break;
--		case EFI_LOADER_DATA:
--			res->name = "Loader Data";
--			break;
--		case EFI_BOOT_SERVICES_DATA:
--			res->name = "BootServices Data";
--			break;
--		case EFI_BOOT_SERVICES_CODE:
--			res->name = "BootServices Code";
--			break;
--		case EFI_RUNTIME_SERVICES_CODE:
--			res->name = "Runtime Service Code";
--			break;
--		case EFI_RUNTIME_SERVICES_DATA:
--			res->name = "Runtime Service Data";
--			break;
--		case EFI_CONVENTIONAL_MEMORY:
--			res->name = "Conventional Memory";
--			break;
--		case EFI_UNUSABLE_MEMORY:
--			res->name = "Unusable Memory";
--			break;
--		case EFI_ACPI_RECLAIM_MEMORY:
--			res->name = "ACPI Reclaim";
--			break;
--		case EFI_ACPI_MEMORY_NVS:
--			res->name = "ACPI NVS";
--			break;
--		case EFI_MEMORY_MAPPED_IO:
--			res->name = "Memory Mapped IO";
--			break;
--		case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
--			res->name = "Memory Mapped IO Port Space";
--			break;
--		default:
--			res->name = "Reserved";
--			break;
--		}
--		res->start = md->phys_addr;
--		res->end = res->start + ((md->num_pages << EFI_PAGE_SHIFT) - 1);
--		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
--		if (request_resource(&iomem_resource, res) < 0)
--			printk(KERN_ERR PFX "Failed to allocate res %s : "
--				"0x%llx-0x%llx\n", res->name,
--				(unsigned long long)res->start,
--				(unsigned long long)res->end);
--		/*
--		 * We don't know which region contains kernel data so we try
--		 * it repeatedly and let the resource manager test it.
--		 */
--		if (md->type == EFI_CONVENTIONAL_MEMORY) {
--			request_resource(res, code_resource);
--			request_resource(res, data_resource);
--			request_resource(res, bss_resource);
--#ifdef CONFIG_KEXEC
--			request_resource(res, &crashk_res);
--#endif
--		}
--	}
--}
--
--/*
-- * Convenience functions to obtain memory types and attributes
-- */
--
--u32 efi_mem_type(unsigned long phys_addr)
--{
--	efi_memory_desc_t *md;
--	void *p;
--
--	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
--		md = p;
--		if ((md->phys_addr <= phys_addr) && (phys_addr <
--			(md->phys_addr + (md-> num_pages << EFI_PAGE_SHIFT)) ))
--			return md->type;
--	}
--	return 0;
--}
--
--u64 efi_mem_attributes(unsigned long phys_addr)
--{
--	efi_memory_desc_t *md;
--	void *p;
--
--	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
--		md = p;
--		if ((md->phys_addr <= phys_addr) && (phys_addr <
--			(md->phys_addr + (md-> num_pages << EFI_PAGE_SHIFT)) ))
--			return md->attribute;
--	}
--	return 0;
- }
-diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
-new file mode 100644
-index 0000000..4b73992
---- /dev/null
-+++ b/arch/x86/kernel/efi_64.c
-@@ -0,0 +1,134 @@
-+/*
-+ * x86_64 specific EFI support functions
-+ * Based on Extensible Firmware Interface Specification version 1.0
-+ *
-+ * Copyright (C) 2005-2008 Intel Co.
-+ *	Fenghua Yu <fenghua.yu at intel.com>
-+ *	Bibo Mao <bibo.mao at intel.com>
-+ *	Chandramouli Narayanan <mouli at linux.intel.com>
-+ *	Huang Ying <ying.huang at intel.com>
-+ *
-+ * Code to convert EFI to E820 map has been implemented in elilo bootloader
-+ * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
-+ * is setup appropriately for EFI runtime code.
-+ * - mouli 06/14/2007.
-+ *
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/mm.h>
-+#include <linux/types.h>
-+#include <linux/spinlock.h>
-+#include <linux/bootmem.h>
-+#include <linux/ioport.h>
-+#include <linux/module.h>
-+#include <linux/efi.h>
-+#include <linux/uaccess.h>
-+#include <linux/io.h>
-+#include <linux/reboot.h>
-+
-+#include <asm/setup.h>
-+#include <asm/page.h>
-+#include <asm/e820.h>
-+#include <asm/pgtable.h>
-+#include <asm/tlbflush.h>
-+#include <asm/proto.h>
-+#include <asm/efi.h>
-+
-+static pgd_t save_pgd __initdata;
-+static unsigned long efi_flags __initdata;
-+
-+static void __init early_mapping_set_exec(unsigned long start,
-+					  unsigned long end,
-+					  int executable)
-+{
-+	pte_t *kpte;
-+	int level;
-+
-+	while (start < end) {
-+		kpte = lookup_address((unsigned long)__va(start), &level);
-+		BUG_ON(!kpte);
-+		if (executable)
-+			set_pte(kpte, pte_mkexec(*kpte));
-+		else
-+			set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
-+					    __supported_pte_mask));
-+		if (level == 4)
-+			start = (start + PMD_SIZE) & PMD_MASK;
-+		else
-+			start = (start + PAGE_SIZE) & PAGE_MASK;
-+	}
-+}
-+
-+static void __init early_runtime_code_mapping_set_exec(int executable)
-+{
-+	efi_memory_desc_t *md;
-+	void *p;
-+
-+	if (!(__supported_pte_mask & _PAGE_NX))
-+		return;
-+
-+	/* Make EFI runtime service code area executable */
-+	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+		md = p;
-+		if (md->type == EFI_RUNTIME_SERVICES_CODE) {
-+			unsigned long end;
-+			end = md->phys_addr + (md->num_pages << PAGE_SHIFT);
-+			early_mapping_set_exec(md->phys_addr, end, executable);
-+		}
-+	}
-+}
-+
-+void __init efi_call_phys_prelog(void)
-+{
-+	unsigned long vaddress;
-+
-+	local_irq_save(efi_flags);
-+	early_runtime_code_mapping_set_exec(1);
-+	vaddress = (unsigned long)__va(0x0UL);
-+	save_pgd = *pgd_offset_k(0x0UL);
-+	set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
-+	__flush_tlb_all();
-+}
-+
-+void __init efi_call_phys_epilog(void)
-+{
-+	/*
-+	 * After the lock is released, the original page table is restored.
-+	 */
-+	set_pgd(pgd_offset_k(0x0UL), save_pgd);
-+	early_runtime_code_mapping_set_exec(0);
-+	__flush_tlb_all();
-+	local_irq_restore(efi_flags);
-+}
-+
-+void __init efi_reserve_bootmem(void)
-+{
-+	reserve_bootmem_generic((unsigned long)memmap.phys_map,
-+				memmap.nr_map * memmap.desc_size);
-+}
-+
-+void __iomem * __init efi_ioremap(unsigned long offset,
-+				  unsigned long size)
-+{
-+	static unsigned pages_mapped;
-+	unsigned long last_addr;
-+	unsigned i, pages;
-+
-+	last_addr = offset + size - 1;
-+	offset &= PAGE_MASK;
-+	pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
-+	if (pages_mapped + pages > MAX_EFI_IO_PAGES)
-+		return NULL;
-+
-+	for (i = 0; i < pages; i++) {
-+		__set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
-+			     offset, PAGE_KERNEL_EXEC_NOCACHE);
-+		offset += PAGE_SIZE;
-+		pages_mapped++;
-+	}
-+
-+	return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
-+					     (pages_mapped - pages));
-+}
-diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
-new file mode 100644
-index 0000000..99b47d4
---- /dev/null
-+++ b/arch/x86/kernel/efi_stub_64.S
-@@ -0,0 +1,109 @@
-+/*
-+ * Function calling ABI conversion from Linux to EFI for x86_64
-+ *
-+ * Copyright (C) 2007 Intel Corp
-+ *	Bibo Mao <bibo.mao at intel.com>
-+ *	Huang Ying <ying.huang at intel.com>
-+ */
-+
-+#include <linux/linkage.h>
-+
-+#define SAVE_XMM			\
-+	mov %rsp, %rax;			\
-+	subq $0x70, %rsp;		\
-+	and $~0xf, %rsp;		\
-+	mov %rax, (%rsp);		\
-+	mov %cr0, %rax;			\
-+	clts;				\
-+	mov %rax, 0x8(%rsp);		\
-+	movaps %xmm0, 0x60(%rsp);	\
-+	movaps %xmm1, 0x50(%rsp);	\
-+	movaps %xmm2, 0x40(%rsp);	\
-+	movaps %xmm3, 0x30(%rsp);	\
-+	movaps %xmm4, 0x20(%rsp);	\
-+	movaps %xmm5, 0x10(%rsp)
-+
-+#define RESTORE_XMM			\
-+	movaps 0x60(%rsp), %xmm0;	\
-+	movaps 0x50(%rsp), %xmm1;	\
-+	movaps 0x40(%rsp), %xmm2;	\
-+	movaps 0x30(%rsp), %xmm3;	\
-+	movaps 0x20(%rsp), %xmm4;	\
-+	movaps 0x10(%rsp), %xmm5;	\
-+	mov 0x8(%rsp), %rsi;		\
-+	mov %rsi, %cr0;			\
-+	mov (%rsp), %rsp
-+
-+ENTRY(efi_call0)
-+	SAVE_XMM
-+	subq $32, %rsp
-+	call *%rdi
-+	addq $32, %rsp
-+	RESTORE_XMM
-+	ret
-+
-+ENTRY(efi_call1)
-+	SAVE_XMM
-+	subq $32, %rsp
-+	mov  %rsi, %rcx
-+	call *%rdi
-+	addq $32, %rsp
-+	RESTORE_XMM
-+	ret
-+
-+ENTRY(efi_call2)
-+	SAVE_XMM
-+	subq $32, %rsp
-+	mov  %rsi, %rcx
-+	call *%rdi
-+	addq $32, %rsp
-+	RESTORE_XMM
-+	ret
-+
-+ENTRY(efi_call3)
-+	SAVE_XMM
-+	subq $32, %rsp
-+	mov  %rcx, %r8
-+	mov  %rsi, %rcx
-+	call *%rdi
-+	addq $32, %rsp
-+	RESTORE_XMM
-+	ret
-+
-+ENTRY(efi_call4)
-+	SAVE_XMM
-+	subq $32, %rsp
-+	mov %r8, %r9
-+	mov %rcx, %r8
-+	mov %rsi, %rcx
-+	call *%rdi
-+	addq $32, %rsp
-+	RESTORE_XMM
-+	ret
-+
-+ENTRY(efi_call5)
-+	SAVE_XMM
-+	subq $48, %rsp
-+	mov %r9, 32(%rsp)
-+	mov %r8, %r9
-+	mov %rcx, %r8
-+	mov %rsi, %rcx
-+	call *%rdi
-+	addq $48, %rsp
-+	RESTORE_XMM
-+	ret
-+
-+ENTRY(efi_call6)
-+	SAVE_XMM
-+	mov (%rsp), %rax
-+	mov 8(%rax), %rax
-+	subq $48, %rsp
-+	mov %r9, 32(%rsp)
-+	mov %rax, 40(%rsp)
-+	mov %r8, %r9
-+	mov %rcx, %r8
-+	mov %rsi, %rcx
-+	call *%rdi
-+	addq $48, %rsp
-+	RESTORE_XMM
-+	ret
-diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
-index dc7f938..be5c31d 100644
---- a/arch/x86/kernel/entry_32.S
-+++ b/arch/x86/kernel/entry_32.S
-@@ -58,7 +58,7 @@
-  * for paravirtualization.  The following will never clobber any registers:
-  *   INTERRUPT_RETURN (aka. "iret")
-  *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
-- *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
-+ *   ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit").
+ /*
+- *	Smarter SMP flushing macros. 
++ *	Smarter SMP flushing macros.
+  *		c/o Linus Torvalds.
   *
-  * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
-  * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
-@@ -283,12 +283,12 @@ END(resume_kernel)
-    the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
- 
- 	# sysenter call handler stub
--ENTRY(sysenter_entry)
-+ENTRY(ia32_sysenter_target)
- 	CFI_STARTPROC simple
- 	CFI_SIGNAL_FRAME
- 	CFI_DEF_CFA esp, 0
- 	CFI_REGISTER esp, ebp
--	movl TSS_sysenter_esp0(%esp),%esp
-+	movl TSS_sysenter_sp0(%esp),%esp
- sysenter_past_esp:
- 	/*
- 	 * No need to follow this irqs on/off section: the syscall
-@@ -351,7 +351,7 @@ sysenter_past_esp:
- 	xorl %ebp,%ebp
- 	TRACE_IRQS_ON
- 1:	mov  PT_FS(%esp), %fs
--	ENABLE_INTERRUPTS_SYSEXIT
-+	ENABLE_INTERRUPTS_SYSCALL_RET
- 	CFI_ENDPROC
- .pushsection .fixup,"ax"
- 2:	movl $0,PT_FS(%esp)
-@@ -360,7 +360,7 @@ sysenter_past_esp:
- 	.align 4
- 	.long 1b,2b
- .popsection
--ENDPROC(sysenter_entry)
-+ENDPROC(ia32_sysenter_target)
+  *	These mean you can really definitely utterly forget about
+@@ -37,15 +37,15 @@
+  *
+  *	Optimizations Manfred Spraul <manfred at colorfullife.com>
+  *
+- * 	More scalable flush, from Andi Kleen
++ *	More scalable flush, from Andi Kleen
+  *
+- * 	To avoid global state use 8 different call vectors.
+- * 	Each CPU uses a specific vector to trigger flushes on other
+- * 	CPUs. Depending on the received vector the target CPUs look into
++ *	To avoid global state use 8 different call vectors.
++ *	Each CPU uses a specific vector to trigger flushes on other
++ *	CPUs. Depending on the received vector the target CPUs look into
+  *	the right per cpu variable for the flush data.
+  *
+- * 	With more than 8 CPUs they are hashed to the 8 available
+- * 	vectors. The limited global vector space forces us to this right now.
++ *	With more than 8 CPUs they are hashed to the 8 available
++ *	vectors. The limited global vector space forces us to this right now.
+  *	In future when interrupts are split into per CPU domains this could be
+  *	fixed, at the cost of triggering multiple IPIs in some cases.
+  */
+@@ -55,7 +55,6 @@ union smp_flush_state {
+ 		cpumask_t flush_cpumask;
+ 		struct mm_struct *flush_mm;
+ 		unsigned long flush_va;
+-#define FLUSH_ALL	-1ULL
+ 		spinlock_t tlbstate_lock;
+ 	};
+ 	char pad[SMP_CACHE_BYTES];
+@@ -67,16 +66,17 @@ union smp_flush_state {
+ static DEFINE_PER_CPU(union smp_flush_state, flush_state);
  
- 	# system call handler stub
- ENTRY(system_call)
-@@ -583,7 +583,7 @@ END(syscall_badsys)
-  * Build the entry stubs and pointer table with
-  * some assembler magic.
+ /*
+- * We cannot call mmdrop() because we are in interrupt context, 
++ * We cannot call mmdrop() because we are in interrupt context,
+  * instead update mm->cpu_vm_mask.
   */
--.data
-+.section .rodata,"a"
- ENTRY(interrupt)
- .text
+-static inline void leave_mm(int cpu)
++void leave_mm(int cpu)
+ {
+ 	if (read_pda(mmu_state) == TLBSTATE_OK)
+ 		BUG();
+ 	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
+ 	load_cr3(swapper_pg_dir);
+ }
++EXPORT_SYMBOL_GPL(leave_mm);
  
-@@ -743,7 +743,7 @@ END(device_not_available)
-  * that sets up the real kernel stack. Check here, since we can't
-  * allow the wrong stack to be used.
+ /*
   *
-- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
-+ * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
-  * already pushed 3 words if it hits on the sysenter instruction:
-  * eflags, cs and eip.
+@@ -85,25 +85,25 @@ static inline void leave_mm(int cpu)
+  * 1) switch_mm() either 1a) or 1b)
+  * 1a) thread switch to a different mm
+  * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
+- * 	Stop ipi delivery for the old mm. This is not synchronized with
+- * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
+- * 	for the wrong mm, and in the worst case we perform a superfluous
+- * 	tlb flush.
++ *	Stop ipi delivery for the old mm. This is not synchronized with
++ *	the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ *	for the wrong mm, and in the worst case we perform a superfluous
++ *	tlb flush.
+  * 1a2) set cpu mmu_state to TLBSTATE_OK
+- * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ *	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
+  *	was in lazy tlb mode.
+  * 1a3) update cpu active_mm
+- * 	Now cpu0 accepts tlb flushes for the new mm.
++ *	Now cpu0 accepts tlb flushes for the new mm.
+  * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
+- * 	Now the other cpus will send tlb flush ipis.
++ *	Now the other cpus will send tlb flush ipis.
+  * 1a4) change cr3.
+  * 1b) thread switch without mm change
+  *	cpu active_mm is correct, cpu0 already handles
+  *	flush ipis.
+  * 1b1) set cpu mmu_state to TLBSTATE_OK
+  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
+- * 	Atomically set the bit [other cpus will start sending flush ipis],
+- * 	and test the bit.
++ *	Atomically set the bit [other cpus will start sending flush ipis],
++ *	and test the bit.
+  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
+  * 2) switch %%esp, ie current
   *
-@@ -755,7 +755,7 @@ END(device_not_available)
- 	cmpw $__KERNEL_CS,4(%esp);		\
- 	jne ok;					\
- label:						\
--	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
-+	movl TSS_sysenter_sp0+offset(%esp),%esp;	\
- 	CFI_DEF_CFA esp, 0;			\
- 	CFI_UNDEFINED eip;			\
- 	pushfl;					\
-@@ -768,7 +768,7 @@ label:						\
- 
- KPROBE_ENTRY(debug)
- 	RING0_INT_FRAME
--	cmpl $sysenter_entry,(%esp)
-+	cmpl $ia32_sysenter_target,(%esp)
- 	jne debug_stack_correct
- 	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
- debug_stack_correct:
-@@ -799,7 +799,7 @@ KPROBE_ENTRY(nmi)
- 	popl %eax
- 	CFI_ADJUST_CFA_OFFSET -4
- 	je nmi_espfix_stack
--	cmpl $sysenter_entry,(%esp)
-+	cmpl $ia32_sysenter_target,(%esp)
- 	je nmi_stack_fixup
- 	pushl %eax
- 	CFI_ADJUST_CFA_OFFSET 4
-@@ -812,7 +812,7 @@ KPROBE_ENTRY(nmi)
- 	popl %eax
- 	CFI_ADJUST_CFA_OFFSET -4
- 	jae nmi_stack_correct
--	cmpl $sysenter_entry,12(%esp)
-+	cmpl $ia32_sysenter_target,12(%esp)
- 	je nmi_debug_stack_check
- nmi_stack_correct:
- 	/* We have a RING0_INT_FRAME here */
-@@ -882,10 +882,10 @@ ENTRY(native_iret)
- .previous
- END(native_iret)
- 
--ENTRY(native_irq_enable_sysexit)
-+ENTRY(native_irq_enable_syscall_ret)
- 	sti
- 	sysexit
--END(native_irq_enable_sysexit)
-+END(native_irq_enable_syscall_ret)
- #endif
- 
- KPROBE_ENTRY(int3)
-diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 3a058bb..bea8474 100644
---- a/arch/x86/kernel/entry_64.S
-+++ b/arch/x86/kernel/entry_64.S
-@@ -50,6 +50,7 @@
- #include <asm/hw_irq.h>
- #include <asm/page.h>
- #include <asm/irqflags.h>
-+#include <asm/paravirt.h>
+@@ -137,12 +137,12 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
+ 	 * orig_rax contains the negated interrupt vector.
+ 	 * Use that to determine where the sender put the data.
+ 	 */
+-	sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
++	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
+ 	f = &per_cpu(flush_state, sender);
  
- 	.code64
+ 	if (!cpu_isset(cpu, f->flush_cpumask))
+ 		goto out;
+-		/* 
++		/*
+ 		 * This was a BUG() but until someone can quote me the
+ 		 * line from the intel manual that guarantees an IPI to
+ 		 * multiple CPUs is retried _only_ on the erroring CPUs
+@@ -150,10 +150,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
+ 		 *
+ 		 * BUG();
+ 		 */
+-		 
++
+ 	if (f->flush_mm == read_pda(active_mm)) {
+ 		if (read_pda(mmu_state) == TLBSTATE_OK) {
+-			if (f->flush_va == FLUSH_ALL)
++			if (f->flush_va == TLB_FLUSH_ALL)
+ 				local_flush_tlb();
+ 			else
+ 				__flush_tlb_one(f->flush_va);
+@@ -166,19 +166,22 @@ out:
+ 	add_pda(irq_tlb_count, 1);
+ }
  
-@@ -57,6 +58,13 @@
- #define retint_kernel retint_restore_args
- #endif	
+-static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+-						unsigned long va)
++void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
++			     unsigned long va)
+ {
+ 	int sender;
+ 	union smp_flush_state *f;
++	cpumask_t cpumask = *cpumaskp;
  
-+#ifdef CONFIG_PARAVIRT
-+ENTRY(native_irq_enable_syscall_ret)
-+	movq	%gs:pda_oldrsp,%rsp
-+	swapgs
-+	sysretq
-+#endif /* CONFIG_PARAVIRT */
-+
+ 	/* Caller has disabled preemption */
+ 	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
+ 	f = &per_cpu(flush_state, sender);
  
- .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
- #ifdef CONFIG_TRACE_IRQFLAGS
-@@ -216,14 +224,21 @@ ENTRY(system_call)
- 	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
- 	CFI_REGISTER	rip,rcx
- 	/*CFI_REGISTER	rflags,r11*/
--	swapgs
-+	SWAPGS_UNSAFE_STACK
+-	/* Could avoid this lock when
+-	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
+-	   probably not worth checking this for a cache-hot lock. */
 +	/*
-+	 * A hypervisor implementation might want to use a label
-+	 * after the swapgs, so that it can do the swapgs
-+	 * for the guest and jump here on syscall.
++	 * Could avoid this lock when
++	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
++	 * probably not worth checking this for a cache-hot lock.
 +	 */
-+ENTRY(system_call_after_swapgs)
+ 	spin_lock(&f->tlbstate_lock);
+ 
+ 	f->flush_mm = mm;
+@@ -202,14 +205,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ int __cpuinit init_smp_flush(void)
+ {
+ 	int i;
 +
- 	movq	%rsp,%gs:pda_oldrsp 
- 	movq	%gs:pda_kernelstack,%rsp
- 	/*
- 	 * No need to follow this irqs off/on section - it's straight
- 	 * and short:
- 	 */
--	sti					
-+	ENABLE_INTERRUPTS(CLBR_NONE)
- 	SAVE_ARGS 8,1
- 	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
- 	movq  %rcx,RIP-ARGOFFSET(%rsp)
-@@ -246,7 +261,7 @@ ret_from_sys_call:
- sysret_check:		
- 	LOCKDEP_SYS_EXIT
- 	GET_THREAD_INFO(%rcx)
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	movl threadinfo_flags(%rcx),%edx
- 	andl %edi,%edx
-@@ -260,9 +275,7 @@ sysret_check:
- 	CFI_REGISTER	rip,rcx
- 	RESTORE_ARGS 0,-ARG_SKIP,1
- 	/*CFI_REGISTER	rflags,r11*/
--	movq	%gs:pda_oldrsp,%rsp
--	swapgs
--	sysretq
-+	ENABLE_INTERRUPTS_SYSCALL_RET
+ 	for_each_cpu_mask(i, cpu_possible_map) {
+ 		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
+ 	}
+ 	return 0;
+ }
+-
+ core_initcall(init_smp_flush);
+-	
++
+ void flush_tlb_current_task(void)
+ {
+ 	struct mm_struct *mm = current->mm;
+@@ -221,10 +224,9 @@ void flush_tlb_current_task(void)
  
- 	CFI_RESTORE_STATE
- 	/* Handle reschedules */
-@@ -271,7 +284,7 @@ sysret_careful:
- 	bt $TIF_NEED_RESCHED,%edx
- 	jnc sysret_signal
- 	TRACE_IRQS_ON
--	sti
-+	ENABLE_INTERRUPTS(CLBR_NONE)
- 	pushq %rdi
- 	CFI_ADJUST_CFA_OFFSET 8
- 	call schedule
-@@ -282,8 +295,8 @@ sysret_careful:
- 	/* Handle a signal */ 
- sysret_signal:
- 	TRACE_IRQS_ON
--	sti
--	testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
-+	ENABLE_INTERRUPTS(CLBR_NONE)
-+	testl $_TIF_DO_NOTIFY_MASK,%edx
- 	jz    1f
+ 	local_flush_tlb();
+ 	if (!cpus_empty(cpu_mask))
+-		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ 	preempt_enable();
+ }
+-EXPORT_SYMBOL(flush_tlb_current_task);
  
- 	/* Really a signal */
-@@ -295,7 +308,7 @@ sysret_signal:
- 1:	movl $_TIF_NEED_RESCHED,%edi
- 	/* Use IRET because user could have changed frame. This
- 	   works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	jmp int_with_check
- 	
-@@ -327,7 +340,7 @@ tracesys:
+ void flush_tlb_mm (struct mm_struct * mm)
+ {
+@@ -241,11 +243,10 @@ void flush_tlb_mm (struct mm_struct * mm)
+ 			leave_mm(smp_processor_id());
+ 	}
+ 	if (!cpus_empty(cpu_mask))
+-		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ 
+ 	preempt_enable();
+ }
+-EXPORT_SYMBOL(flush_tlb_mm);
+ 
+ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+ {
+@@ -259,8 +260,8 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+ 	if (current->active_mm == mm) {
+ 		if(current->mm)
+ 			__flush_tlb_one(va);
+-		 else
+-		 	leave_mm(smp_processor_id());
++		else
++			leave_mm(smp_processor_id());
+ 	}
+ 
+ 	if (!cpus_empty(cpu_mask))
+@@ -268,7 +269,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+ 
+ 	preempt_enable();
+ }
+-EXPORT_SYMBOL(flush_tlb_page);
+ 
+ static void do_flush_tlb_all(void* info)
+ {
+@@ -325,11 +325,9 @@ void unlock_ipi_call_lock(void)
+  * this function sends a 'generic call function' IPI to all other CPU
+  * of the system defined in the mask.
+  */
+-
+-static int
+-__smp_call_function_mask(cpumask_t mask,
+-			 void (*func)(void *), void *info,
+-			 int wait)
++static int __smp_call_function_mask(cpumask_t mask,
++				    void (*func)(void *), void *info,
++				    int wait)
+ {
+ 	struct call_data_struct data;
+ 	cpumask_t allbutself;
+@@ -417,11 +415,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
   */
- 	.globl int_ret_from_sys_call
- int_ret_from_sys_call:
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	testl $3,CS-ARGOFFSET(%rsp)
- 	je retint_restore_args
-@@ -349,20 +362,20 @@ int_careful:
- 	bt $TIF_NEED_RESCHED,%edx
- 	jnc  int_very_careful
- 	TRACE_IRQS_ON
--	sti
-+	ENABLE_INTERRUPTS(CLBR_NONE)
- 	pushq %rdi
- 	CFI_ADJUST_CFA_OFFSET 8
- 	call schedule
- 	popq %rdi
- 	CFI_ADJUST_CFA_OFFSET -8
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	jmp int_with_check
  
- 	/* handle signals and tracing -- both require a full stack frame */
- int_very_careful:
- 	TRACE_IRQS_ON
--	sti
-+	ENABLE_INTERRUPTS(CLBR_NONE)
- 	SAVE_REST
- 	/* Check for syscall exit trace */	
- 	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
-@@ -377,7 +390,7 @@ int_very_careful:
- 	jmp int_restore_rest
- 	
- int_signal:
--	testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
-+	testl $_TIF_DO_NOTIFY_MASK,%edx
- 	jz 1f
- 	movq %rsp,%rdi		# &ptregs -> arg1
- 	xorl %esi,%esi		# oldset -> arg2
-@@ -385,7 +398,7 @@ int_signal:
- 1:	movl $_TIF_NEED_RESCHED,%edi	
- int_restore_rest:
- 	RESTORE_REST
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	jmp int_with_check
- 	CFI_ENDPROC
-@@ -506,7 +519,7 @@ END(stub_rt_sigreturn)
- 	CFI_DEF_CFA_REGISTER	rbp
- 	testl $3,CS(%rdi)
- 	je 1f
--	swapgs	
-+	SWAPGS
- 	/* irqcount is used to check if a CPU is already on an interrupt
- 	   stack or not. While this is essentially redundant with preempt_count
- 	   it is a little cheaper to use a separate counter in the PDA
-@@ -527,7 +540,7 @@ ENTRY(common_interrupt)
- 	interrupt do_IRQ
- 	/* 0(%rsp): oldrsp-ARGOFFSET */
- ret_from_intr:
--	cli	
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	decl %gs:pda_irqcount
- 	leaveq
-@@ -556,13 +569,13 @@ retint_swapgs:		/* return to user-space */
- 	/*
- 	 * The iretq could re-enable interrupts:
- 	 */
--	cli
-+	DISABLE_INTERRUPTS(CLBR_ANY)
- 	TRACE_IRQS_IRETQ
--	swapgs 
-+	SWAPGS
- 	jmp restore_args
+ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
+-	int nonatomic, int wait)
++			      int nonatomic, int wait)
+ {
+ 	/* prevent preemption and reschedule on another processor */
+-	int ret;
+-	int me = get_cpu();
++	int ret, me = get_cpu();
  
- retint_restore_args:	/* return to kernel space */
--	cli
-+	DISABLE_INTERRUPTS(CLBR_ANY)
- 	/*
- 	 * The iretq could re-enable interrupts:
+ 	/* Can deadlock when called with interrupts disabled */
+ 	WARN_ON(irqs_disabled());
+@@ -471,9 +468,9 @@ static void stop_this_cpu(void *dummy)
  	 */
-@@ -570,10 +583,14 @@ retint_restore_args:	/* return to kernel space */
- restore_args:
- 	RESTORE_ARGS 0,8,0						
- iret_label:	
-+#ifdef CONFIG_PARAVIRT
-+	INTERRUPT_RETURN
-+#endif
-+ENTRY(native_iret)
- 	iretq
+ 	cpu_clear(smp_processor_id(), cpu_online_map);
+ 	disable_local_APIC();
+-	for (;;) 
++	for (;;)
+ 		halt();
+-} 
++}
  
- 	.section __ex_table,"a"
--	.quad iret_label,bad_iret	
-+	.quad native_iret, bad_iret
- 	.previous
- 	.section .fixup,"ax"
- 	/* force a signal here? this matches i386 behaviour */
-@@ -581,39 +598,39 @@ iret_label:
- bad_iret:
- 	movq $11,%rdi	/* SIGSEGV */
- 	TRACE_IRQS_ON
--	sti
--	jmp do_exit			
--	.previous	
--	
-+	ENABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
-+	jmp do_exit
-+	.previous
-+
- 	/* edi: workmask, edx: work */
- retint_careful:
- 	CFI_RESTORE_STATE
- 	bt    $TIF_NEED_RESCHED,%edx
- 	jnc   retint_signal
- 	TRACE_IRQS_ON
--	sti
-+	ENABLE_INTERRUPTS(CLBR_NONE)
- 	pushq %rdi
- 	CFI_ADJUST_CFA_OFFSET	8
- 	call  schedule
- 	popq %rdi		
- 	CFI_ADJUST_CFA_OFFSET	-8
- 	GET_THREAD_INFO(%rcx)
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	jmp retint_check
- 	
- retint_signal:
--	testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
-+	testl $_TIF_DO_NOTIFY_MASK,%edx
- 	jz    retint_swapgs
- 	TRACE_IRQS_ON
--	sti
-+	ENABLE_INTERRUPTS(CLBR_NONE)
- 	SAVE_REST
- 	movq $-1,ORIG_RAX(%rsp) 			
- 	xorl %esi,%esi		# oldset
- 	movq %rsp,%rdi		# &pt_regs
- 	call do_notify_resume
- 	RESTORE_REST
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	movl $_TIF_NEED_RESCHED,%edi
- 	GET_THREAD_INFO(%rcx)
-@@ -731,7 +748,7 @@ END(spurious_interrupt)
- 	rdmsr
- 	testl %edx,%edx
- 	js    1f
--	swapgs
-+	SWAPGS
- 	xorl  %ebx,%ebx
- 1:
- 	.if \ist
-@@ -747,7 +764,7 @@ END(spurious_interrupt)
- 	.if \ist
- 	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
- 	.endif
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	.if \irqtrace
- 	TRACE_IRQS_OFF
- 	.endif
-@@ -776,10 +793,10 @@ paranoid_swapgs\trace:
- 	.if \trace
- 	TRACE_IRQS_IRETQ 0
- 	.endif
--	swapgs
-+	SWAPGS_UNSAFE_STACK
- paranoid_restore\trace:
- 	RESTORE_ALL 8
--	iretq
-+	INTERRUPT_RETURN
- paranoid_userspace\trace:
- 	GET_THREAD_INFO(%rcx)
- 	movl threadinfo_flags(%rcx),%ebx
-@@ -794,11 +811,11 @@ paranoid_userspace\trace:
- 	.if \trace
- 	TRACE_IRQS_ON
- 	.endif
--	sti
-+	ENABLE_INTERRUPTS(CLBR_NONE)
- 	xorl %esi,%esi 			/* arg2: oldset */
- 	movq %rsp,%rdi 			/* arg1: &pt_regs */
- 	call do_notify_resume
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	.if \trace
- 	TRACE_IRQS_OFF
- 	.endif
-@@ -807,9 +824,9 @@ paranoid_schedule\trace:
- 	.if \trace
- 	TRACE_IRQS_ON
- 	.endif
--	sti
-+	ENABLE_INTERRUPTS(CLBR_ANY)
- 	call schedule
--	cli
-+	DISABLE_INTERRUPTS(CLBR_ANY)
- 	.if \trace
- 	TRACE_IRQS_OFF
- 	.endif
-@@ -862,7 +879,7 @@ KPROBE_ENTRY(error_entry)
- 	testl $3,CS(%rsp)
- 	je  error_kernelspace
- error_swapgs:	
--	swapgs
-+	SWAPGS
- error_sti:	
- 	movq %rdi,RDI(%rsp) 	
- 	CFI_REL_OFFSET	rdi,RDI
-@@ -874,7 +891,7 @@ error_sti:
- error_exit:
- 	movl %ebx,%eax
- 	RESTORE_REST
--	cli
-+	DISABLE_INTERRUPTS(CLBR_NONE)
- 	TRACE_IRQS_OFF
- 	GET_THREAD_INFO(%rcx)	
- 	testl %eax,%eax
-@@ -911,12 +928,12 @@ ENTRY(load_gs_index)
- 	CFI_STARTPROC
- 	pushf
- 	CFI_ADJUST_CFA_OFFSET 8
--	cli
--        swapgs
-+	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
-+        SWAPGS
- gs_change:     
-         movl %edi,%gs   
- 2:	mfence		/* workaround */
--	swapgs
-+	SWAPGS
-         popf
- 	CFI_ADJUST_CFA_OFFSET -8
-         ret
-@@ -930,7 +947,7 @@ ENDPROC(load_gs_index)
-         .section .fixup,"ax"
- 	/* running with kernelgs */
- bad_gs: 
--	swapgs			/* switch back to user gs */
-+	SWAPGS			/* switch back to user gs */
- 	xorl %eax,%eax
-         movl %eax,%gs
-         jmp  2b
-diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
-index ce703e2..4ae7b64 100644
---- a/arch/x86/kernel/genapic_64.c
-+++ b/arch/x86/kernel/genapic_64.c
-@@ -24,18 +24,11 @@
- #include <acpi/acpi_bus.h>
- #endif
+ void smp_send_stop(void)
+ {
+diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
+index 4ea80cb..5787a0c 100644
+--- a/arch/x86/kernel/smpboot_32.c
++++ b/arch/x86/kernel/smpboot_32.c
+@@ -83,7 +83,6 @@ EXPORT_SYMBOL(cpu_online_map);
+ 
+ cpumask_t cpu_callin_map;
+ cpumask_t cpu_callout_map;
+-EXPORT_SYMBOL(cpu_callout_map);
+ cpumask_t cpu_possible_map;
+ EXPORT_SYMBOL(cpu_possible_map);
+ static cpumask_t smp_commenced_mask;
+@@ -92,15 +91,10 @@ static cpumask_t smp_commenced_mask;
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
+ EXPORT_PER_CPU_SYMBOL(cpu_info);
  
 -/*
-- * which logical CPU number maps to which CPU (physical APIC ID)
-- *
 - * The following static array is used during kernel startup
 - * and the x86_cpu_to_apicid_ptr contains the address of the
 - * array during this time.  Is it zeroed when the per_cpu
 - * data area is removed.
 - */
--u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata
 +/* which logical CPU number maps to which CPU (physical APIC ID) */
-+u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata
- 					= { [0 ... NR_CPUS-1] = BAD_APICID };
+ u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
+ 			{ [0 ... NR_CPUS-1] = BAD_APICID };
 -void *x86_cpu_to_apicid_ptr;
--DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
 +void *x86_cpu_to_apicid_early_ptr;
-+DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
+ DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
  EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
  
- struct genapic __read_mostly *genapic = &apic_flat;
-diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c
-index f12d8c5..9c7f7d3 100644
---- a/arch/x86/kernel/geode_32.c
-+++ b/arch/x86/kernel/geode_32.c
-@@ -1,6 +1,7 @@
- /*
-  * AMD Geode southbridge support code
-  * Copyright (C) 2006, Advanced Micro Devices, Inc.
-+ * Copyright (C) 2007, Andres Salomon <dilinger at debian.org>
-  *
-  * This program is free software; you can redistribute it and/or
-  * modify it under the terms of version 2 of the GNU General Public License
-@@ -51,45 +52,62 @@ EXPORT_SYMBOL_GPL(geode_get_dev_base);
+@@ -113,7 +107,6 @@ u8 apicid_2_node[MAX_APICID];
+ extern const unsigned char trampoline_data [];
+ extern const unsigned char trampoline_end  [];
+ static unsigned char *trampoline_base;
+-static int trampoline_exec;
  
- /* === GPIO API === */
+ static void map_cpu_to_logical_apicid(void);
  
--void geode_gpio_set(unsigned int gpio, unsigned int reg)
-+void geode_gpio_set(u32 gpio, unsigned int reg)
+@@ -138,17 +131,13 @@ static unsigned long __cpuinit setup_trampoline(void)
+  */
+ void __init smp_alloc_memory(void)
  {
- 	u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
- 
- 	if (!base)
- 		return;
- 
--	if (gpio < 16)
--		outl(1 << gpio, base + reg);
--	else
--		outl(1 << (gpio - 16), base + 0x80 + reg);
-+	/* low bank register */
-+	if (gpio & 0xFFFF)
-+		outl(gpio & 0xFFFF, base + reg);
-+	/* high bank register */
-+	gpio >>= 16;
-+	if (gpio)
-+		outl(gpio, base + 0x80 + reg);
+-	trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
++	trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
+ 	/*
+ 	 * Has to be in very low memory so we can execute
+ 	 * real-mode AP code.
+ 	 */
+ 	if (__pa(trampoline_base) >= 0x9F000)
+ 		BUG();
+-	/*
+-	 * Make the SMP trampoline executable:
+-	 */
+-	trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
  }
- EXPORT_SYMBOL_GPL(geode_gpio_set);
  
--void geode_gpio_clear(unsigned int gpio, unsigned int reg)
-+void geode_gpio_clear(u32 gpio, unsigned int reg)
+ /*
+@@ -405,7 +394,7 @@ static void __cpuinit start_secondary(void *unused)
+ 	setup_secondary_clock();
+ 	if (nmi_watchdog == NMI_IO_APIC) {
+ 		disable_8259A_irq(0);
+-		enable_NMI_through_LVT0(NULL);
++		enable_NMI_through_LVT0();
+ 		enable_8259A_irq(0);
+ 	}
+ 	/*
+@@ -448,38 +437,38 @@ void __devinit initialize_secondary(void)
  {
- 	u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
- 
- 	if (!base)
- 		return;
+ 	/*
+ 	 * We don't actually need to load the full TSS,
+-	 * basically just the stack pointer and the eip.
++	 * basically just the stack pointer and the ip.
+ 	 */
  
--	if (gpio < 16)
--		outl(1 << (gpio + 16), base + reg);
--	else
--		outl(1 << gpio, base + 0x80 + reg);
-+	/* low bank register */
-+	if (gpio & 0xFFFF)
-+		outl((gpio & 0xFFFF) << 16, base + reg);
-+	/* high bank register */
-+	gpio &= (0xFFFF << 16);
-+	if (gpio)
-+		outl(gpio, base + 0x80 + reg);
+ 	asm volatile(
+ 		"movl %0,%%esp\n\t"
+ 		"jmp *%1"
+ 		:
+-		:"m" (current->thread.esp),"m" (current->thread.eip));
++		:"m" (current->thread.sp),"m" (current->thread.ip));
  }
- EXPORT_SYMBOL_GPL(geode_gpio_clear);
  
--int geode_gpio_isset(unsigned int gpio, unsigned int reg)
-+int geode_gpio_isset(u32 gpio, unsigned int reg)
- {
- 	u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
-+	u32 val;
+ /* Static state in head.S used to set up a CPU */
+ extern struct {
+-	void * esp;
++	void * sp;
+ 	unsigned short ss;
+ } stack_start;
  
- 	if (!base)
- 		return 0;
+ #ifdef CONFIG_NUMA
  
--	if (gpio < 16)
--		return (inl(base + reg) & (1 << gpio)) ? 1 : 0;
--	else
--		return (inl(base + 0x80 + reg) & (1 << (gpio - 16))) ? 1 : 0;
-+	/* low bank register */
-+	if (gpio & 0xFFFF) {
-+		val = inl(base + reg) & (gpio & 0xFFFF);
-+		if ((gpio & 0xFFFF) == val)
-+			return 1;
-+	}
-+	/* high bank register */
-+	gpio >>= 16;
-+	if (gpio) {
-+		val = inl(base + 0x80 + reg) & gpio;
-+		if (gpio == val)
-+			return 1;
-+	}
-+	return 0;
- }
- EXPORT_SYMBOL_GPL(geode_gpio_isset);
+ /* which logical CPUs are on which nodes */
+-cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
++cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
+ 				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
+-EXPORT_SYMBOL(node_2_cpu_mask);
++EXPORT_SYMBOL(node_to_cpumask_map);
+ /* which node each logical CPU is on */
+-int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
+-EXPORT_SYMBOL(cpu_2_node);
++int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
++EXPORT_SYMBOL(cpu_to_node_map);
  
-diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
-index 6b34693..a317336 100644
---- a/arch/x86/kernel/head64.c
-+++ b/arch/x86/kernel/head64.c
-@@ -10,6 +10,7 @@
- #include <linux/kernel.h>
- #include <linux/string.h>
- #include <linux/percpu.h>
-+#include <linux/start_kernel.h>
+ /* set up a mapping between cpu and node. */
+ static inline void map_cpu_to_node(int cpu, int node)
+ {
+ 	printk("Mapping cpu %d to node %d\n", cpu, node);
+-	cpu_set(cpu, node_2_cpu_mask[node]);
+-	cpu_2_node[cpu] = node;
++	cpu_set(cpu, node_to_cpumask_map[node]);
++	cpu_to_node_map[cpu] = node;
+ }
  
- #include <asm/processor.h>
- #include <asm/proto.h>
-@@ -19,12 +20,14 @@
- #include <asm/pgtable.h>
- #include <asm/tlbflush.h>
- #include <asm/sections.h>
-+#include <asm/kdebug.h>
-+#include <asm/e820.h>
+ /* undo a mapping between cpu and node. */
+@@ -489,8 +478,8 @@ static inline void unmap_cpu_to_node(int cpu)
  
- static void __init zap_identity_mappings(void)
- {
- 	pgd_t *pgd = pgd_offset_k(0UL);
- 	pgd_clear(pgd);
--	__flush_tlb();
-+	__flush_tlb_all();
+ 	printk("Unmapping cpu %d from all nodes\n", cpu);
+ 	for (node = 0; node < MAX_NUMNODES; node ++)
+-		cpu_clear(cpu, node_2_cpu_mask[node]);
+-	cpu_2_node[cpu] = 0;
++		cpu_clear(cpu, node_to_cpumask_map[node]);
++	cpu_to_node_map[cpu] = 0;
  }
+ #else /* !CONFIG_NUMA */
  
- /* Don't add a printk in there. printk relies on the PDA which is not initialized 
-@@ -46,6 +49,35 @@ static void __init copy_bootdata(char *real_mode_data)
+@@ -668,7 +657,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
+ 	 * target processor state.
+ 	 */
+ 	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
+-		         (unsigned long) stack_start.esp);
++		         (unsigned long) stack_start.sp);
+ 
+ 	/*
+ 	 * Run STARTUP IPI loop.
+@@ -754,7 +743,7 @@ static inline struct task_struct * __cpuinit alloc_idle_task(int cpu)
+ 		/* initialize thread_struct.  we really want to avoid destroy
+ 		 * idle tread
+ 		 */
+-		idle->thread.esp = (unsigned long)task_pt_regs(idle);
++		idle->thread.sp = (unsigned long)task_pt_regs(idle);
+ 		init_idle(idle, cpu);
+ 		return idle;
  	}
- }
+@@ -799,7 +788,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
+  	per_cpu(current_task, cpu) = idle;
+ 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
  
-+#define EBDA_ADDR_POINTER 0x40E
-+
-+static __init void reserve_ebda(void)
-+{
-+	unsigned ebda_addr, ebda_size;
-+
-+	/*
-+	 * there is a real-mode segmented pointer pointing to the
-+	 * 4K EBDA area at 0x40E
-+	 */
-+	ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
-+	ebda_addr <<= 4;
-+
-+	if (!ebda_addr)
-+		return;
-+
-+	ebda_size = *(unsigned short *)__va(ebda_addr);
-+
-+	/* Round EBDA up to pages */
-+	if (ebda_size == 0)
-+		ebda_size = 1;
-+	ebda_size <<= 10;
-+	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
-+	if (ebda_size > 64*1024)
-+		ebda_size = 64*1024;
-+
-+	reserve_early(ebda_addr, ebda_addr + ebda_size);
-+}
-+
- void __init x86_64_start_kernel(char * real_mode_data)
- {
- 	int i;
-@@ -56,8 +88,13 @@ void __init x86_64_start_kernel(char * real_mode_data)
- 	/* Make NULL pointers segfault */
- 	zap_identity_mappings();
+-	idle->thread.eip = (unsigned long) start_secondary;
++	idle->thread.ip = (unsigned long) start_secondary;
+ 	/* start_eip had better be page-aligned! */
+ 	start_eip = setup_trampoline();
  
--	for (i = 0; i < IDT_ENTRIES; i++)
-+	for (i = 0; i < IDT_ENTRIES; i++) {
-+#ifdef CONFIG_EARLY_PRINTK
-+		set_intr_gate(i, &early_idt_handlers[i]);
-+#else
- 		set_intr_gate(i, early_idt_handler);
-+#endif
-+	}
- 	load_idt((const struct desc_ptr *)&idt_descr);
+@@ -807,9 +796,9 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
+ 	alternatives_smp_switch(1);
  
- 	early_printk("Kernel alive\n");
-@@ -67,8 +104,24 @@ void __init x86_64_start_kernel(char * real_mode_data)
+ 	/* So we see what's up   */
+-	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
++	printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
+ 	/* Stack for startup_32 can be just as for start_secondary onwards */
+-	stack_start.esp = (void *) idle->thread.esp;
++	stack_start.sp = (void *) idle->thread.sp;
  
- 	pda_init(0);
- 	copy_bootdata(__va(real_mode_data));
--#ifdef CONFIG_SMP
--	cpu_set(0, cpu_online_map);
+ 	irq_ctx_init(cpu);
+ 
+@@ -1091,7 +1080,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
+ 	 * Allow the user to impress friends.
+ 	 */
+ 	Dprintk("Before bogomips.\n");
+-	for (cpu = 0; cpu < NR_CPUS; cpu++)
++	for_each_possible_cpu(cpu)
+ 		if (cpu_isset(cpu, cpu_callout_map))
+ 			bogosum += cpu_data(cpu).loops_per_jiffy;
+ 	printk(KERN_INFO
+@@ -1122,7 +1111,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
+ 	 * construct cpu_sibling_map, so that we can tell sibling CPUs
+ 	 * efficiently.
+ 	 */
+-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
++	for_each_possible_cpu(cpu) {
+ 		cpus_clear(per_cpu(cpu_sibling_map, cpu));
+ 		cpus_clear(per_cpu(cpu_core_map, cpu));
+ 	}
+@@ -1296,12 +1285,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
+ 	setup_ioapic_dest();
+ #endif
+ 	zap_low_mappings();
+-#ifndef CONFIG_HOTPLUG_CPU
+-	/*
+-	 * Disable executability of the SMP trampoline:
+-	 */
+-	set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
 -#endif
-+
-+	reserve_early(__pa_symbol(&_text), __pa_symbol(&_end));
-+
-+	/* Reserve INITRD */
-+	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
-+		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
-+		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
-+		unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
-+		reserve_early(ramdisk_image, ramdisk_end);
-+	}
-+
-+	reserve_ebda();
-+
-+	/*
-+	 * At this point everything still needed from the boot loader
-+	 * or BIOS or kernel text should be early reserved or marked not
-+	 * RAM in e820. All other memory is free game.
-+	 */
-+
- 	start_kernel();
  }
-diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
-index fbad51f..5d8c573 100644
---- a/arch/x86/kernel/head_32.S
-+++ b/arch/x86/kernel/head_32.S
-@@ -9,6 +9,7 @@
  
- .text
- #include <linux/threads.h>
-+#include <linux/init.h>
- #include <linux/linkage.h>
- #include <asm/segment.h>
- #include <asm/page.h>
-@@ -151,7 +152,9 @@ WEAK(xen_entry)
- 	/* Unknown implementation; there's really
- 	   nothing we can do at this point. */
- 	ud2a
--.data
-+
-+	__INITDATA
-+
- subarch_entries:
- 	.long default_entry		/* normal x86/PC */
- 	.long lguest_entry		/* lguest hypervisor */
-@@ -199,7 +202,6 @@ default_entry:
- 	addl $0x67, %eax			/* 0x67 == _PAGE_TABLE */
- 	movl %eax, 4092(%edx)
+ void __init smp_intr_init(void)
+diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
+index aaf4e12..cc64b80 100644
+--- a/arch/x86/kernel/smpboot_64.c
++++ b/arch/x86/kernel/smpboot_64.c
+@@ -65,7 +65,7 @@ int smp_num_siblings = 1;
+ EXPORT_SYMBOL(smp_num_siblings);
  
--	xorl %ebx,%ebx				/* This is the boot CPU (BSP) */
- 	jmp 3f
- /*
-  * Non-boot CPU entry point; entered from trampoline.S
-@@ -222,6 +224,8 @@ ENTRY(startup_32_smp)
- 	movl %eax,%es
- 	movl %eax,%fs
- 	movl %eax,%gs
-+#endif /* CONFIG_SMP */
-+3:
+ /* Last level cache ID of each logical CPU */
+-DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
++DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
+ 
+ /* Bitmask of currently online CPUs */
+ cpumask_t cpu_online_map __read_mostly;
+@@ -78,8 +78,6 @@ EXPORT_SYMBOL(cpu_online_map);
+  */
+ cpumask_t cpu_callin_map;
+ cpumask_t cpu_callout_map;
+-EXPORT_SYMBOL(cpu_callout_map);
+-
+ cpumask_t cpu_possible_map;
+ EXPORT_SYMBOL(cpu_possible_map);
+ 
+@@ -113,10 +111,20 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
+  * a new thread. Also avoids complicated thread destroy functionality
+  * for idle threads.
+  */
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
++ * removed after init for !CONFIG_HOTPLUG_CPU.
++ */
++static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
++#define get_idle_for_cpu(x)     (per_cpu(idle_thread_array, x))
++#define set_idle_for_cpu(x,p)   (per_cpu(idle_thread_array, x) = (p))
++#else
+ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+-
+ #define get_idle_for_cpu(x)     (idle_thread_array[(x)])
+ #define set_idle_for_cpu(x,p)   (idle_thread_array[(x)] = (p))
++#endif
++
  
  /*
-  *	New page tables may be in 4Mbyte page mode and may
-@@ -268,12 +272,6 @@ ENTRY(startup_32_smp)
- 	wrmsr
+  * Currently trivial. Write the real->protected mode
+@@ -212,6 +220,7 @@ void __cpuinit smp_callin(void)
  
- 6:
--	/* This is a secondary processor (AP) */
--	xorl %ebx,%ebx
--	incl %ebx
--
--#endif /* CONFIG_SMP */
--3:
+ 	Dprintk("CALLIN, before setup_local_APIC().\n");
+ 	setup_local_APIC();
++	end_local_APIC_setup();
  
- /*
-  * Enable paging
-@@ -297,7 +295,7 @@ ENTRY(startup_32_smp)
- 	popfl
+ 	/*
+ 	 * Get our bogomips.
+@@ -338,7 +347,7 @@ void __cpuinit start_secondary(void)
  
- #ifdef CONFIG_SMP
--	andl %ebx,%ebx
-+	cmpb $0, ready
- 	jz  1f				/* Initial CPU cleans BSS */
- 	jmp checkCPUtype
- 1:
-@@ -502,6 +500,7 @@ early_fault:
- 	call printk
- #endif
- #endif
-+	call dump_stack
- hlt_loop:
- 	hlt
- 	jmp hlt_loop
-diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index b6167fe..1d5a7a3 100644
---- a/arch/x86/kernel/head_64.S
-+++ b/arch/x86/kernel/head_64.S
-@@ -19,6 +19,13 @@
- #include <asm/msr.h>
- #include <asm/cache.h>
+ 	if (nmi_watchdog == NMI_IO_APIC) {
+ 		disable_8259A_irq(0);
+-		enable_NMI_through_LVT0(NULL);
++		enable_NMI_through_LVT0();
+ 		enable_8259A_irq(0);
+ 	}
  
-+#ifdef CONFIG_PARAVIRT
-+#include <asm/asm-offsets.h>
-+#include <asm/paravirt.h>
-+#else
-+#define GET_CR2_INTO_RCX movq %cr2, %rcx
-+#endif
-+
- /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
-  * because we need identity-mapped pages.
-  *
-@@ -260,14 +267,43 @@ init_rsp:
- bad_address:
- 	jmp bad_address
+@@ -370,7 +379,7 @@ void __cpuinit start_secondary(void)
  
-+#ifdef CONFIG_EARLY_PRINTK
-+.macro early_idt_tramp first, last
-+	.ifgt \last-\first
-+	early_idt_tramp \first, \last-1
-+	.endif
-+	movl $\last,%esi
-+	jmp early_idt_handler
-+.endm
-+
-+	.globl early_idt_handlers
-+early_idt_handlers:
-+	early_idt_tramp 0, 63
-+	early_idt_tramp 64, 127
-+	early_idt_tramp 128, 191
-+	early_idt_tramp 192, 255
-+#endif
-+
- ENTRY(early_idt_handler)
-+#ifdef CONFIG_EARLY_PRINTK
- 	cmpl $2,early_recursion_flag(%rip)
- 	jz  1f
- 	incl early_recursion_flag(%rip)
-+	GET_CR2_INTO_RCX
-+	movq %rcx,%r9
-+	xorl %r8d,%r8d		# zero for error code
-+	movl %esi,%ecx		# get vector number
-+	# Test %ecx against mask of vectors that push error code.
-+	cmpl $31,%ecx
-+	ja 0f
-+	movl $1,%eax
-+	salq %cl,%rax
-+	testl $0x27d00,%eax
-+	je 0f
-+	popq %r8		# get error code
-+0:	movq 0(%rsp),%rcx	# get ip
-+	movq 8(%rsp),%rdx	# get cs
- 	xorl %eax,%eax
--	movq 8(%rsp),%rsi	# get rip
--	movq (%rsp),%rdx
--	movq %cr2,%rcx
- 	leaq early_idt_msg(%rip),%rdi
- 	call early_printk
- 	cmpl $2,early_recursion_flag(%rip)
-@@ -278,15 +314,19 @@ ENTRY(early_idt_handler)
- 	movq 8(%rsp),%rsi	# get rip again
- 	call __print_symbol
- #endif
-+#endif /* EARLY_PRINTK */
- 1:	hlt
- 	jmp 1b
-+
-+#ifdef CONFIG_EARLY_PRINTK
- early_recursion_flag:
- 	.long 0
+ 	unlock_ipi_call_lock();
  
- early_idt_msg:
--	.asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
-+	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
- early_idt_ripmsg:
- 	.asciz "RIP %s\n"
-+#endif /* CONFIG_EARLY_PRINTK */
+-	setup_secondary_APIC_clock();
++	setup_secondary_clock();
  
- .balign PAGE_SIZE
+ 	cpu_idle();
+ }
+@@ -384,19 +393,20 @@ static void inquire_remote_apic(int apicid)
+ 	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
+ 	char *names[] = { "ID", "VERSION", "SPIV" };
+ 	int timeout;
+-	unsigned int status;
++	u32 status;
  
-diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
-index 2f99ee2..429d084 100644
---- a/arch/x86/kernel/hpet.c
-+++ b/arch/x86/kernel/hpet.c
-@@ -6,7 +6,6 @@
- #include <linux/init.h>
- #include <linux/sysdev.h>
- #include <linux/pm.h>
--#include <linux/delay.h>
+ 	printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
  
- #include <asm/fixmap.h>
- #include <asm/hpet.h>
-@@ -16,7 +15,8 @@
- #define HPET_MASK	CLOCKSOURCE_MASK(32)
- #define HPET_SHIFT	22
+ 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
+-		printk("... APIC #%d %s: ", apicid, names[i]);
++		printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
  
--/* FSEC = 10^-15 NSEC = 10^-9 */
-+/* FSEC = 10^-15
-+   NSEC = 10^-9 */
- #define FSEC_PER_NSEC	1000000
+ 		/*
+ 		 * Wait for idle.
+ 		 */
+ 		status = safe_apic_wait_icr_idle();
+ 		if (status)
+-			printk("a previous APIC delivery may have failed\n");
++			printk(KERN_CONT
++			       "a previous APIC delivery may have failed\n");
  
- /*
-@@ -107,6 +107,7 @@ int is_hpet_enabled(void)
- {
- 	return is_hpet_capable() && hpet_legacy_int_enabled;
+ 		apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
+ 		apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
+@@ -410,10 +420,10 @@ static void inquire_remote_apic(int apicid)
+ 		switch (status) {
+ 		case APIC_ICR_RR_VALID:
+ 			status = apic_read(APIC_RRR);
+-			printk("%08x\n", status);
++			printk(KERN_CONT "%08x\n", status);
+ 			break;
+ 		default:
+-			printk("failed\n");
++			printk(KERN_CONT "failed\n");
+ 		}
+ 	}
  }
-+EXPORT_SYMBOL_GPL(is_hpet_enabled);
+@@ -466,7 +476,7 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
+ 	 */
+ 	Dprintk("#startup loops: %d.\n", num_starts);
  
- /*
-  * When the hpet driver (/dev/hpet) is enabled, we need to reserve
-@@ -132,16 +133,13 @@ static void hpet_reserve_platform_timers(unsigned long id)
- #ifdef CONFIG_HPET_EMULATE_RTC
- 	hpet_reserve_timer(&hd, 1);
- #endif
--
- 	hd.hd_irq[0] = HPET_LEGACY_8254;
- 	hd.hd_irq[1] = HPET_LEGACY_RTC;
+-	maxlvt = get_maxlvt();
++	maxlvt = lapic_get_maxlvt();
  
--	for (i = 2; i < nrtimers; timer++, i++)
--		hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
--			Tn_INT_ROUTE_CNF_SHIFT;
--
-+       for (i = 2; i < nrtimers; timer++, i++)
-+	       hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
-+		       Tn_INT_ROUTE_CNF_SHIFT;
- 	hpet_alloc(&hd);
--
- }
- #else
- static void hpet_reserve_platform_timers(unsigned long id) { }
-@@ -478,6 +476,7 @@ void hpet_disable(void)
-  */
- #include <linux/mc146818rtc.h>
- #include <linux/rtc.h>
-+#include <asm/rtc.h>
+ 	for (j = 1; j <= num_starts; j++) {
+ 		Dprintk("Sending STARTUP #%d.\n",j);
+@@ -577,7 +587,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
+ 	c_idle.idle = get_idle_for_cpu(cpu);
  
- #define DEFAULT_RTC_INT_FREQ	64
- #define DEFAULT_RTC_SHIFT	6
-@@ -492,6 +491,38 @@ static unsigned long hpet_default_delta;
- static unsigned long hpet_pie_delta;
- static unsigned long hpet_pie_limit;
+ 	if (c_idle.idle) {
+-		c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
++		c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
+ 			(THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
+ 		init_idle(c_idle.idle, cpu);
+ 		goto do_rest;
+@@ -613,8 +623,8 @@ do_rest:
  
-+static rtc_irq_handler irq_handler;
-+
-+/*
-+ * Registers a IRQ handler.
-+ */
-+int hpet_register_irq_handler(rtc_irq_handler handler)
-+{
-+	if (!is_hpet_enabled())
-+		return -ENODEV;
-+	if (irq_handler)
-+		return -EBUSY;
-+
-+	irq_handler = handler;
-+
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
-+
-+/*
-+ * Deregisters the IRQ handler registered with hpet_register_irq_handler()
-+ * and does cleanup.
-+ */
-+void hpet_unregister_irq_handler(rtc_irq_handler handler)
-+{
-+	if (!is_hpet_enabled())
-+		return;
-+
-+	irq_handler = NULL;
-+	hpet_rtc_flags = 0;
-+}
-+EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
-+
- /*
-  * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
-  * is not supported by all HPET implementations for timer 1.
-@@ -533,6 +564,7 @@ int hpet_rtc_timer_init(void)
+ 	start_rip = setup_trampoline();
  
- 	return 1;
- }
-+EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
+-	init_rsp = c_idle.idle->thread.rsp;
+-	per_cpu(init_tss,cpu).rsp0 = init_rsp;
++	init_rsp = c_idle.idle->thread.sp;
++	load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
+ 	initial_code = start_secondary;
+ 	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
  
- /*
-  * The functions below are called from rtc driver.
-@@ -547,6 +579,7 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
- 	hpet_rtc_flags &= ~bit_mask;
- 	return 1;
+@@ -691,7 +701,7 @@ do_rest:
+ 	}
+ 	if (boot_error) {
+ 		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
+-		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
++		clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
+ 		clear_node_cpumask(cpu); /* was set by numa_add_cpu */
+ 		cpu_clear(cpu, cpu_present_map);
+ 		cpu_clear(cpu, cpu_possible_map);
+@@ -841,24 +851,16 @@ static int __init smp_sanity_check(unsigned max_cpus)
+ 	return 0;
  }
-+EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
  
- int hpet_set_rtc_irq_bit(unsigned long bit_mask)
+-/*
+- * Copy apicid's found by MP_processor_info from initial array to the per cpu
+- * data area.  The x86_cpu_to_apicid_init array is then expendable and the
+- * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
+- * longer available.
+- */
+-void __init smp_set_apicids(void)
++static void __init smp_cpu_index_default(void)
  {
-@@ -562,6 +595,7 @@ int hpet_set_rtc_irq_bit(unsigned long bit_mask)
+-	int cpu;
++	int i;
++	struct cpuinfo_x86 *c;
  
- 	return 1;
+-	for_each_cpu_mask(cpu, cpu_possible_map) {
+-		if (per_cpu_offset(cpu))
+-			per_cpu(x86_cpu_to_apicid, cpu) =
+-						x86_cpu_to_apicid_init[cpu];
++	for_each_cpu_mask(i, cpu_possible_map) {
++		c = &cpu_data(i);
++		/* mark all to hotplug */
++		c->cpu_index = NR_CPUS;
+ 	}
+-
+-	/* indicate the static array will be going away soon */
+-	x86_cpu_to_apicid_ptr = NULL;
  }
-+EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
  
- int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
- 			unsigned char sec)
-@@ -575,6 +609,7 @@ int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
+ /*
+@@ -868,9 +870,9 @@ void __init smp_set_apicids(void)
+ void __init smp_prepare_cpus(unsigned int max_cpus)
+ {
+ 	nmi_watchdog_default();
++	smp_cpu_index_default();
+ 	current_cpu_data = boot_cpu_data;
+ 	current_thread_info()->cpu = 0;  /* needed? */
+-	smp_set_apicids();
+ 	set_cpu_sibling_map(0);
  
- 	return 1;
+ 	if (smp_sanity_check(max_cpus) < 0) {
+@@ -885,6 +887,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ 	 */
+ 	setup_local_APIC();
+ 
++	/*
++	 * Enable IO APIC before setting up error vector
++	 */
++	if (!skip_ioapic_setup && nr_ioapics)
++		enable_IO_APIC();
++	end_local_APIC_setup();
++
+ 	if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
+ 		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
+ 		      GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
+@@ -903,7 +912,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ 	 * Set up local APIC timer on boot CPU.
+ 	 */
+ 
+-	setup_boot_APIC_clock();
++	setup_boot_clock();
  }
-+EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
  
- int hpet_set_periodic_freq(unsigned long freq)
+ /*
+@@ -912,7 +921,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ void __init smp_prepare_boot_cpu(void)
  {
-@@ -593,11 +628,13 @@ int hpet_set_periodic_freq(unsigned long freq)
- 	}
- 	return 1;
+ 	int me = smp_processor_id();
+-	cpu_set(me, cpu_online_map);
++	/* already set me in cpu_online_map in boot_cpu_init() */
+ 	cpu_set(me, cpu_callout_map);
+ 	per_cpu(cpu_state, me) = CPU_ONLINE;
  }
-+EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
+@@ -1016,7 +1025,7 @@ void remove_cpu_from_maps(void)
  
- int hpet_rtc_dropped_irq(void)
- {
- 	return is_hpet_enabled();
+ 	cpu_clear(cpu, cpu_callout_map);
+ 	cpu_clear(cpu, cpu_callin_map);
+-	clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
++	clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
+ 	clear_node_cpumask(cpu);
  }
-+EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
  
- static void hpet_rtc_timer_reinit(void)
+diff --git a/arch/x86/kernel/smpcommon_32.c b/arch/x86/kernel/smpcommon_32.c
+index bbfe85a..8bc38af 100644
+--- a/arch/x86/kernel/smpcommon_32.c
++++ b/arch/x86/kernel/smpcommon_32.c
+@@ -14,10 +14,11 @@ __cpuinit void init_gdt(int cpu)
  {
-@@ -641,9 +678,10 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
- 	unsigned long rtc_int_flag = 0;
- 
- 	hpet_rtc_timer_reinit();
-+	memset(&curr_time, 0, sizeof(struct rtc_time));
+ 	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
  
- 	if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
--		rtc_get_rtc_time(&curr_time);
-+		get_rtc_time(&curr_time);
+-	pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
+-			(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
++	pack_descriptor(&gdt[GDT_ENTRY_PERCPU],
+ 			__per_cpu_offset[cpu], 0xFFFFF,
+-			0x80 | DESCTYPE_S | 0x2, 0x8);
++			0x2 | DESCTYPE_S, 0x8);
++
++	gdt[GDT_ENTRY_PERCPU].s = 1;
  
- 	if (hpet_rtc_flags & RTC_UIE &&
- 	    curr_time.tm_sec != hpet_prev_update_sec) {
-@@ -665,8 +703,10 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+ 	per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
+ 	per_cpu(cpu_number, cpu) = cpu;
+diff --git a/arch/x86/kernel/srat_32.c b/arch/x86/kernel/srat_32.c
+index 2a8713e..2bf6903 100644
+--- a/arch/x86/kernel/srat_32.c
++++ b/arch/x86/kernel/srat_32.c
+@@ -57,8 +57,6 @@ static struct node_memory_chunk_s node_memory_chunk[MAXCHUNKS];
+ static int num_memory_chunks;		/* total number of memory chunks */
+ static u8 __initdata apicid_to_pxm[MAX_APICID];
  
- 	if (rtc_int_flag) {
- 		rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
--		rtc_interrupt(rtc_int_flag, dev_id);
-+		if (irq_handler)
-+			irq_handler(rtc_int_flag, dev_id);
+-extern void * boot_ioremap(unsigned long, unsigned long);
+-
+ /* Identify CPU proximity domains */
+ static void __init parse_cpu_affinity_structure(char *p)
+ {
+@@ -299,7 +297,7 @@ int __init get_memcfg_from_srat(void)
  	}
- 	return IRQ_HANDLED;
+ 
+ 	rsdt = (struct acpi_table_rsdt *)
+-	    boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
++	    early_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
+ 
+ 	if (!rsdt) {
+ 		printk(KERN_WARNING
+@@ -339,11 +337,11 @@ int __init get_memcfg_from_srat(void)
+ 	for (i = 0; i < tables; i++) {
+ 		/* Map in header, then map in full table length. */
+ 		header = (struct acpi_table_header *)
+-			boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
++			early_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
+ 		if (!header)
+ 			break;
+ 		header = (struct acpi_table_header *)
+-			boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
++			early_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
+ 		if (!header)
+ 			break;
+ 
+diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
+index 6fa6cf0..02f0f61 100644
+--- a/arch/x86/kernel/stacktrace.c
++++ b/arch/x86/kernel/stacktrace.c
+@@ -22,9 +22,23 @@ static int save_stack_stack(void *data, char *name)
+ 	return -1;
  }
-+EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
- #endif
-diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
-index 02112fc..0616278 100644
---- a/arch/x86/kernel/i386_ksyms_32.c
-+++ b/arch/x86/kernel/i386_ksyms_32.c
-@@ -22,12 +22,5 @@ EXPORT_SYMBOL(__put_user_8);
  
- EXPORT_SYMBOL(strstr);
+-static void save_stack_address(void *data, unsigned long addr)
++static void save_stack_address(void *data, unsigned long addr, int reliable)
++{
++	struct stack_trace *trace = data;
++	if (trace->skip > 0) {
++		trace->skip--;
++		return;
++	}
++	if (trace->nr_entries < trace->max_entries)
++		trace->entries[trace->nr_entries++] = addr;
++}
++
++static void
++save_stack_address_nosched(void *data, unsigned long addr, int reliable)
+ {
+ 	struct stack_trace *trace = (struct stack_trace *)data;
++	if (in_sched_functions(addr))
++		return;
+ 	if (trace->skip > 0) {
+ 		trace->skip--;
+ 		return;
+@@ -40,13 +54,26 @@ static const struct stacktrace_ops save_stack_ops = {
+ 	.address = save_stack_address,
+ };
  
--#ifdef CONFIG_SMP
--extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
--extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
--EXPORT_SYMBOL(__write_lock_failed);
--EXPORT_SYMBOL(__read_lock_failed);
--#endif
--
- EXPORT_SYMBOL(csum_partial);
- EXPORT_SYMBOL(empty_zero_page);
-diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
++static const struct stacktrace_ops save_stack_ops_nosched = {
++	.warning = save_stack_warning,
++	.warning_symbol = save_stack_warning_symbol,
++	.stack = save_stack_stack,
++	.address = save_stack_address_nosched,
++};
++
+ /*
+  * Save stack-backtrace addresses into a stack_trace buffer.
+  */
+ void save_stack_trace(struct stack_trace *trace)
+ {
+-	dump_trace(current, NULL, NULL, &save_stack_ops, trace);
++	dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
++	if (trace->nr_entries < trace->max_entries)
++		trace->entries[trace->nr_entries++] = ULONG_MAX;
++}
++
++void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
++{
++	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
+ 	if (trace->nr_entries < trace->max_entries)
+ 		trace->entries[trace->nr_entries++] = ULONG_MAX;
+ }
+-EXPORT_SYMBOL(save_stack_trace);
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
 new file mode 100644
-index 0000000..26719bd
+index 0000000..2ef1a5f
 --- /dev/null
-+++ b/arch/x86/kernel/i387.c
-@@ -0,0 +1,479 @@
++++ b/arch/x86/kernel/step.c
+@@ -0,0 +1,203 @@
 +/*
-+ *  Copyright (C) 1994 Linus Torvalds
-+ *
-+ *  Pentium III FXSR, SSE support
-+ *  General FPU state handling cleanups
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ * x86 single-step support code, common to 32-bit and 64-bit.
 + */
-+
 +#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/regset.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/math_emu.h>
-+#include <asm/sigcontext.h>
-+#include <asm/user.h>
-+#include <asm/ptrace.h>
-+#include <asm/uaccess.h>
-+
-+#ifdef CONFIG_X86_64
-+
-+#include <asm/sigcontext32.h>
-+#include <asm/user32.h>
-+
-+#else
++#include <linux/mm.h>
++#include <linux/ptrace.h>
 +
-+#define	save_i387_ia32		save_i387
-+#define	restore_i387_ia32	restore_i387
++unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
++{
++	unsigned long addr, seg;
 +
-+#define _fpstate_ia32 		_fpstate
-+#define user_i387_ia32_struct	user_i387_struct
-+#define user32_fxsr_struct	user_fxsr_struct
++	addr = regs->ip;
++	seg = regs->cs & 0xffff;
++	if (v8086_mode(regs)) {
++		addr = (addr & 0xffff) + (seg << 4);
++		return addr;
++	}
 +
-+#endif
++	/*
++	 * We'll assume that the code segments in the GDT
++	 * are all zero-based. That is largely true: the
++	 * TLS segments are used for data, and the PNPBIOS
++	 * and APM bios ones we just ignore here.
++	 */
++	if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
++		u32 *desc;
++		unsigned long base;
 +
-+#ifdef CONFIG_MATH_EMULATION
-+#define HAVE_HWFP (boot_cpu_data.hard_math)
-+#else
-+#define HAVE_HWFP 1
-+#endif
++		seg &= ~7UL;
 +
-+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
++		mutex_lock(&child->mm->context.lock);
++		if (unlikely((seg >> 3) >= child->mm->context.size))
++			addr = -1L; /* bogus selector, access would fault */
++		else {
++			desc = child->mm->context.ldt + seg;
++			base = ((desc[0] >> 16) |
++				((desc[1] & 0xff) << 16) |
++				(desc[1] & 0xff000000));
 +
-+void mxcsr_feature_mask_init(void)
-+{
-+	unsigned long mask = 0;
-+	clts();
-+	if (cpu_has_fxsr) {
-+		memset(&current->thread.i387.fxsave, 0,
-+		       sizeof(struct i387_fxsave_struct));
-+		asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
-+		mask = current->thread.i387.fxsave.mxcsr_mask;
-+		if (mask == 0)
-+			mask = 0x0000ffbf;
++			/* 16-bit code segment? */
++			if (!((desc[1] >> 22) & 1))
++				addr &= 0xffff;
++			addr += base;
++		}
++		mutex_unlock(&child->mm->context.lock);
 +	}
-+	mxcsr_feature_mask &= mask;
-+	stts();
-+}
-+
-+#ifdef CONFIG_X86_64
-+/*
-+ * Called at bootup to set up the initial FPU state that is later cloned
-+ * into all processes.
-+ */
-+void __cpuinit fpu_init(void)
-+{
-+	unsigned long oldcr0 = read_cr0();
-+	extern void __bad_fxsave_alignment(void);
-+
-+	if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
-+		__bad_fxsave_alignment();
-+	set_in_cr4(X86_CR4_OSFXSR);
-+	set_in_cr4(X86_CR4_OSXMMEXCPT);
-+
-+	write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
 +
-+	mxcsr_feature_mask_init();
-+	/* clean state in init */
-+	current_thread_info()->status = 0;
-+	clear_used_math();
++	return addr;
 +}
-+#endif	/* CONFIG_X86_64 */
 +
-+/*
-+ * The _current_ task is using the FPU for the first time
-+ * so initialize it and set the mxcsr to its default
-+ * value at reset if we support XMM instructions and then
-+ * remeber the current task has used the FPU.
-+ */
-+void init_fpu(struct task_struct *tsk)
++static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
 +{
-+	if (tsk_used_math(tsk)) {
-+		if (tsk == current)
-+			unlazy_fpu(tsk);
-+		return;
-+	}
++	int i, copied;
++	unsigned char opcode[15];
++	unsigned long addr = convert_ip_to_linear(child, regs);
 +
-+	if (cpu_has_fxsr) {
-+		memset(&tsk->thread.i387.fxsave, 0,
-+		       sizeof(struct i387_fxsave_struct));
-+		tsk->thread.i387.fxsave.cwd = 0x37f;
-+		if (cpu_has_xmm)
-+			tsk->thread.i387.fxsave.mxcsr = MXCSR_DEFAULT;
-+	} else {
-+		memset(&tsk->thread.i387.fsave, 0,
-+		       sizeof(struct i387_fsave_struct));
-+		tsk->thread.i387.fsave.cwd = 0xffff037fu;
-+		tsk->thread.i387.fsave.swd = 0xffff0000u;
-+		tsk->thread.i387.fsave.twd = 0xffffffffu;
-+		tsk->thread.i387.fsave.fos = 0xffff0000u;
-+	}
-+	/*
-+	 * Only the device not available exception or ptrace can call init_fpu.
-+	 */
-+	set_stopped_child_used_math(tsk);
-+}
++	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
++	for (i = 0; i < copied; i++) {
++		switch (opcode[i]) {
++		/* popf and iret */
++		case 0x9d: case 0xcf:
++			return 1;
 +
-+int fpregs_active(struct task_struct *target, const struct user_regset *regset)
-+{
-+	return tsk_used_math(target) ? regset->n : 0;
-+}
++			/* CHECKME: 64 65 */
 +
-+int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
-+{
-+	return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
-+}
++		/* opcode and address size prefixes */
++		case 0x66: case 0x67:
++			continue;
++		/* irrelevant prefixes (segment overrides and repeats) */
++		case 0x26: case 0x2e:
++		case 0x36: case 0x3e:
++		case 0x64: case 0x65:
++		case 0xf0: case 0xf2: case 0xf3:
++			continue;
 +
-+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
-+		unsigned int pos, unsigned int count,
-+		void *kbuf, void __user *ubuf)
-+{
-+	if (!cpu_has_fxsr)
-+		return -ENODEV;
++#ifdef CONFIG_X86_64
++		case 0x40 ... 0x4f:
++			if (regs->cs != __USER_CS)
++				/* 32-bit mode: register increment */
++				return 0;
++			/* 64-bit mode: REX prefix */
++			continue;
++#endif
 +
-+	unlazy_fpu(target);
++			/* CHECKME: f2, f3 */
 +
-+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-+				   &target->thread.i387.fxsave, 0, -1);
++		/*
++		 * pushf: NOTE! We should probably not let
++		 * the user see the TF bit being set. But
++		 * it's more pain than it's worth to avoid
++		 * it, and a debugger could emulate this
++		 * all in user space if it _really_ cares.
++		 */
++		case 0x9c:
++		default:
++			return 0;
++		}
++	}
++	return 0;
 +}
 +
-+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
-+		unsigned int pos, unsigned int count,
-+		const void *kbuf, const void __user *ubuf)
++/*
++ * Enable single-stepping.  Return nonzero if user mode is not using TF itself.
++ */
++static int enable_single_step(struct task_struct *child)
 +{
-+	int ret;
++	struct pt_regs *regs = task_pt_regs(child);
 +
-+	if (!cpu_has_fxsr)
-+		return -ENODEV;
++	/*
++	 * Always set TIF_SINGLESTEP - this guarantees that
++	 * we single-step system calls etc..  This will also
++	 * cause us to set TF when returning to user mode.
++	 */
++	set_tsk_thread_flag(child, TIF_SINGLESTEP);
 +
-+	unlazy_fpu(target);
-+	set_stopped_child_used_math(target);
++	/*
++	 * If TF was already set, don't do anything else
++	 */
++	if (regs->flags & X86_EFLAGS_TF)
++		return 0;
 +
-+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-+				 &target->thread.i387.fxsave, 0, -1);
++	/* Set TF on the kernel stack.. */
++	regs->flags |= X86_EFLAGS_TF;
 +
 +	/*
-+	 * mxcsr reserved bits must be masked to zero for security reasons.
++	 * ..but if TF is changed by the instruction we will trace,
++	 * don't mark it as being "us" that set it, so that we
++	 * won't clear it by hand later.
 +	 */
-+	target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
++	if (is_setting_trap_flag(child, regs))
++		return 0;
 +
-+	return ret;
-+}
++	set_tsk_thread_flag(child, TIF_FORCED_TF);
 +
-+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
++	return 1;
++}
 +
 +/*
-+ * FPU tag word conversions.
++ * Install this value in MSR_IA32_DEBUGCTLMSR whenever child is running.
 + */
-+
-+static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
-+{
-+	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
-+
-+	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
-+	tmp = ~twd;
-+	tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
-+	/* and move the valid bits to the lower byte. */
-+	tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
-+	tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
-+	tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
-+	return tmp;
-+}
-+
-+#define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16);
-+#define FP_EXP_TAG_VALID	0
-+#define FP_EXP_TAG_ZERO		1
-+#define FP_EXP_TAG_SPECIAL	2
-+#define FP_EXP_TAG_EMPTY	3
-+
-+static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
++static void write_debugctlmsr(struct task_struct *child, unsigned long val)
 +{
-+	struct _fpxreg *st;
-+	u32 tos = (fxsave->swd >> 11) & 7;
-+	u32 twd = (unsigned long) fxsave->twd;
-+	u32 tag;
-+	u32 ret = 0xffff0000u;
-+	int i;
++	child->thread.debugctlmsr = val;
 +
-+	for (i = 0; i < 8; i++, twd >>= 1) {
-+		if (twd & 0x1) {
-+			st = FPREG_ADDR(fxsave, (i - tos) & 7);
++	if (child != current)
++		return;
 +
-+			switch (st->exponent & 0x7fff) {
-+			case 0x7fff:
-+				tag = FP_EXP_TAG_SPECIAL;
-+				break;
-+			case 0x0000:
-+				if (!st->significand[0] &&
-+				    !st->significand[1] &&
-+				    !st->significand[2] &&
-+				    !st->significand[3])
-+					tag = FP_EXP_TAG_ZERO;
-+				else
-+					tag = FP_EXP_TAG_SPECIAL;
-+				break;
-+			default:
-+				if (st->significand[3] & 0x8000)
-+					tag = FP_EXP_TAG_VALID;
-+				else
-+					tag = FP_EXP_TAG_SPECIAL;
-+				break;
-+			}
-+		} else {
-+			tag = FP_EXP_TAG_EMPTY;
-+		}
-+		ret |= tag << (2 * i);
-+	}
-+	return ret;
++	wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
 +}
 +
 +/*
-+ * FXSR floating point environment conversions.
++ * Enable single or block step.
 + */
-+
-+static void convert_from_fxsr(struct user_i387_ia32_struct *env,
-+			      struct task_struct *tsk)
++static void enable_step(struct task_struct *child, bool block)
 +{
-+	struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
-+	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
-+	struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
-+	int i;
-+
-+	env->cwd = fxsave->cwd | 0xffff0000u;
-+	env->swd = fxsave->swd | 0xffff0000u;
-+	env->twd = twd_fxsr_to_i387(fxsave);
-+
-+#ifdef CONFIG_X86_64
-+	env->fip = fxsave->rip;
-+	env->foo = fxsave->rdp;
-+	if (tsk == current) {
-+		/*
-+		 * should be actually ds/cs at fpu exception time, but
-+		 * that information is not available in 64bit mode.
-+		 */
-+		asm("mov %%ds,%0" : "=r" (env->fos));
-+		asm("mov %%cs,%0" : "=r" (env->fcs));
++	/*
++	 * Make sure block stepping (BTF) is not enabled unless it should be.
++	 * Note that we don't try to worry about any is_setting_trap_flag()
++	 * instructions after the first when using block stepping.
++	 * So noone should try to use debugger block stepping in a program
++	 * that uses user-mode single stepping itself.
++	 */
++	if (enable_single_step(child) && block) {
++		set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
++		write_debugctlmsr(child,
++				  child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
 +	} else {
-+		struct pt_regs *regs = task_pt_regs(tsk);
-+		env->fos = 0xffff0000 | tsk->thread.ds;
-+		env->fcs = regs->cs;
-+	}
-+#else
-+	env->fip = fxsave->fip;
-+	env->fcs = fxsave->fcs;
-+	env->foo = fxsave->foo;
-+	env->fos = fxsave->fos;
-+#endif
++	    write_debugctlmsr(child,
++			      child->thread.debugctlmsr & ~TIF_DEBUGCTLMSR);
 +
-+	for (i = 0; i < 8; ++i)
-+		memcpy(&to[i], &from[i], sizeof(to[0]));
++	    if (!child->thread.debugctlmsr)
++		    clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
++	}
 +}
 +
-+static void convert_to_fxsr(struct task_struct *tsk,
-+			    const struct user_i387_ia32_struct *env)
-+
++void user_enable_single_step(struct task_struct *child)
 +{
-+	struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
-+	struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
-+	struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
-+	int i;
-+
-+	fxsave->cwd = env->cwd;
-+	fxsave->swd = env->swd;
-+	fxsave->twd = twd_i387_to_fxsr(env->twd);
-+	fxsave->fop = (u16) ((u32) env->fcs >> 16);
-+#ifdef CONFIG_X86_64
-+	fxsave->rip = env->fip;
-+	fxsave->rdp = env->foo;
-+	/* cs and ds ignored */
-+#else
-+	fxsave->fip = env->fip;
-+	fxsave->fcs = (env->fcs & 0xffff);
-+	fxsave->foo = env->foo;
-+	fxsave->fos = env->fos;
-+#endif
-+
-+	for (i = 0; i < 8; ++i)
-+		memcpy(&to[i], &from[i], sizeof(from[0]));
++	enable_step(child, 0);
 +}
 +
-+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
-+	       unsigned int pos, unsigned int count,
-+	       void *kbuf, void __user *ubuf)
++void user_enable_block_step(struct task_struct *child)
 +{
-+	struct user_i387_ia32_struct env;
-+
-+	if (!HAVE_HWFP)
-+		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
-+
-+	unlazy_fpu(target);
-+
-+	if (!cpu_has_fxsr)
-+		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-+					   &target->thread.i387.fsave, 0, -1);
-+
-+	if (kbuf && pos == 0 && count == sizeof(env)) {
-+		convert_from_fxsr(kbuf, target);
-+		return 0;
-+	}
-+
-+	convert_from_fxsr(&env, target);
-+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
++	enable_step(child, 1);
 +}
 +
-+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
-+	       unsigned int pos, unsigned int count,
-+	       const void *kbuf, const void __user *ubuf)
++void user_disable_single_step(struct task_struct *child)
 +{
-+	struct user_i387_ia32_struct env;
-+	int ret;
-+
-+	if (!HAVE_HWFP)
-+		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
-+
-+	unlazy_fpu(target);
-+	set_stopped_child_used_math(target);
-+
-+	if (!cpu_has_fxsr)
-+		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-+					  &target->thread.i387.fsave, 0, -1);
++	/*
++	 * Make sure block stepping (BTF) is disabled.
++	 */
++	write_debugctlmsr(child,
++			  child->thread.debugctlmsr & ~TIF_DEBUGCTLMSR);
 +
-+	if (pos > 0 || count < sizeof(env))
-+		convert_from_fxsr(&env, target);
++	if (!child->thread.debugctlmsr)
++		clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
 +
-+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
-+	if (!ret)
-+		convert_to_fxsr(target, &env);
++	/* Always clear TIF_SINGLESTEP... */
++	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 +
-+	return ret;
++	/* But touch TF only if it was set by us.. */
++	if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
++		task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
 +}
+diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
+index 2e5efaa..0919951 100644
+--- a/arch/x86/kernel/suspend_64.c
++++ b/arch/x86/kernel/suspend_64.c
+@@ -17,9 +17,26 @@
+ /* References to section boundaries */
+ extern const void __nosave_begin, __nosave_end;
+ 
++static void fix_processor_context(void);
 +
-+/*
-+ * Signal frame handlers.
+ struct saved_context saved_context;
+ 
+-void __save_processor_state(struct saved_context *ctxt)
++/**
++ *	__save_processor_state - save CPU registers before creating a
++ *		hibernation image and before restoring the memory state from it
++ *	@ctxt - structure to store the registers contents in
++ *
++ *	NOTE: If there is a CPU register the modification of which by the
++ *	boot kernel (ie. the kernel used for loading the hibernation image)
++ *	might affect the operations of the restored target kernel (ie. the one
++ *	saved in the hibernation image), then its contents must be saved by this
++ *	function.  In other words, if kernel A is hibernated and different
++ *	kernel B is used for loading the hibernation image into memory, the
++ *	kernel A's __save_processor_state() function must save all registers
++ *	needed by kernel A, so that it can operate correctly after the resume
++ *	regardless of what kernel B does in the meantime.
 + */
++static void __save_processor_state(struct saved_context *ctxt)
+ {
+ 	kernel_fpu_begin();
+ 
+@@ -69,7 +86,12 @@ static void do_fpu_end(void)
+ 	kernel_fpu_end();
+ }
+ 
+-void __restore_processor_state(struct saved_context *ctxt)
++/**
++ *	__restore_processor_state - restore the contents of CPU registers saved
++ *		by __save_processor_state()
++ *	@ctxt - structure to load the registers contents from
++ */
++static void __restore_processor_state(struct saved_context *ctxt)
+ {
+ 	/*
+ 	 * control registers
+@@ -113,14 +135,14 @@ void restore_processor_state(void)
+ 	__restore_processor_state(&saved_context);
+ }
+ 
+-void fix_processor_context(void)
++static void fix_processor_context(void)
+ {
+ 	int cpu = smp_processor_id();
+ 	struct tss_struct *t = &per_cpu(init_tss, cpu);
+ 
+ 	set_tss_desc(cpu,t);	/* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
+ 
+-	cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
++	get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
+ 
+ 	syscall_init();                         /* This sets MSR_*STAR and related */
+ 	load_TR_desc();				/* This does ltr */
+diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S
+index 72f9521..aeb9a4d 100644
+--- a/arch/x86/kernel/suspend_asm_64.S
++++ b/arch/x86/kernel/suspend_asm_64.S
+@@ -18,13 +18,13 @@
+ 
+ ENTRY(swsusp_arch_suspend)
+ 	movq	$saved_context, %rax
+-	movq	%rsp, pt_regs_rsp(%rax)
+-	movq	%rbp, pt_regs_rbp(%rax)
+-	movq	%rsi, pt_regs_rsi(%rax)
+-	movq	%rdi, pt_regs_rdi(%rax)
+-	movq	%rbx, pt_regs_rbx(%rax)
+-	movq	%rcx, pt_regs_rcx(%rax)
+-	movq	%rdx, pt_regs_rdx(%rax)
++	movq	%rsp, pt_regs_sp(%rax)
++	movq	%rbp, pt_regs_bp(%rax)
++	movq	%rsi, pt_regs_si(%rax)
++	movq	%rdi, pt_regs_di(%rax)
++	movq	%rbx, pt_regs_bx(%rax)
++	movq	%rcx, pt_regs_cx(%rax)
++	movq	%rdx, pt_regs_dx(%rax)
+ 	movq	%r8, pt_regs_r8(%rax)
+ 	movq	%r9, pt_regs_r9(%rax)
+ 	movq	%r10, pt_regs_r10(%rax)
+@@ -34,7 +34,7 @@ ENTRY(swsusp_arch_suspend)
+ 	movq	%r14, pt_regs_r14(%rax)
+ 	movq	%r15, pt_regs_r15(%rax)
+ 	pushfq
+-	popq	pt_regs_eflags(%rax)
++	popq	pt_regs_flags(%rax)
+ 
+ 	/* save the address of restore_registers */
+ 	movq	$restore_registers, %rax
+@@ -115,13 +115,13 @@ ENTRY(restore_registers)
+ 
+ 	/* We don't restore %rax, it must be 0 anyway */
+ 	movq	$saved_context, %rax
+-	movq	pt_regs_rsp(%rax), %rsp
+-	movq	pt_regs_rbp(%rax), %rbp
+-	movq	pt_regs_rsi(%rax), %rsi
+-	movq	pt_regs_rdi(%rax), %rdi
+-	movq	pt_regs_rbx(%rax), %rbx
+-	movq	pt_regs_rcx(%rax), %rcx
+-	movq	pt_regs_rdx(%rax), %rdx
++	movq	pt_regs_sp(%rax), %rsp
++	movq	pt_regs_bp(%rax), %rbp
++	movq	pt_regs_si(%rax), %rsi
++	movq	pt_regs_di(%rax), %rdi
++	movq	pt_regs_bx(%rax), %rbx
++	movq	pt_regs_cx(%rax), %rcx
++	movq	pt_regs_dx(%rax), %rdx
+ 	movq	pt_regs_r8(%rax), %r8
+ 	movq	pt_regs_r9(%rax), %r9
+ 	movq	pt_regs_r10(%rax), %r10
+@@ -130,7 +130,7 @@ ENTRY(restore_registers)
+ 	movq	pt_regs_r13(%rax), %r13
+ 	movq	pt_regs_r14(%rax), %r14
+ 	movq	pt_regs_r15(%rax), %r15
+-	pushq	pt_regs_eflags(%rax)
++	pushq	pt_regs_flags(%rax)
+ 	popfq
+ 
+ 	xorq	%rax, %rax
+diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
+index 907942e..bd802a5 100644
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -12,6 +12,7 @@
+ #include <linux/file.h>
+ #include <linux/utsname.h>
+ #include <linux/personality.h>
++#include <linux/random.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/ia32.h>
+@@ -65,6 +66,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+ 			   unsigned long *end)
+ {
+ 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
++		unsigned long new_begin;
+ 		/* This is usually used needed to map code in small
+ 		   model, so it needs to be in the first 31bit. Limit
+ 		   it to that.  This means we need to move the
+@@ -74,6 +76,11 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+ 		   of playground for now. -AK */ 
+ 		*begin = 0x40000000; 
+ 		*end = 0x80000000;		
++		if (current->flags & PF_RANDOMIZE) {
++			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
++			if (new_begin)
++				*begin = new_begin;
++		}
+ 	} else {
+ 		*begin = TASK_UNMAPPED_BASE;
+ 		*end = TASK_SIZE; 
+@@ -143,6 +150,97 @@ full_search:
+ 	}
+ }
+ 
 +
-+static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
-+{
-+	struct task_struct *tsk = current;
-+
-+	unlazy_fpu(tsk);
-+	tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
-+	if (__copy_to_user(buf, &tsk->thread.i387.fsave,
-+			   sizeof(struct i387_fsave_struct)))
-+		return -1;
-+	return 1;
-+}
-+
-+static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
++			  const unsigned long len, const unsigned long pgoff,
++			  const unsigned long flags)
 +{
-+	struct task_struct *tsk = current;
-+	struct user_i387_ia32_struct env;
-+	int err = 0;
++	struct vm_area_struct *vma;
++	struct mm_struct *mm = current->mm;
++	unsigned long addr = addr0;
 +
-+	unlazy_fpu(tsk);
++	/* requested length too big for entire address space */
++	if (len > TASK_SIZE)
++		return -ENOMEM;
 +
-+	convert_from_fxsr(&env, tsk);
-+	if (__copy_to_user(buf, &env, sizeof(env)))
-+		return -1;
++	if (flags & MAP_FIXED)
++		return addr;
 +
-+	err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
-+	err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
-+	if (err)
-+		return -1;
++	/* for MAP_32BIT mappings we force the legact mmap base */
++	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
++		goto bottomup;
 +
-+	if (__copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
-+			   sizeof(struct i387_fxsave_struct)))
-+		return -1;
-+	return 1;
-+}
++	/* requesting a specific address */
++	if (addr) {
++		addr = PAGE_ALIGN(addr);
++		vma = find_vma(mm, addr);
++		if (TASK_SIZE - len >= addr &&
++				(!vma || addr + len <= vma->vm_start))
++			return addr;
++	}
 +
-+int save_i387_ia32(struct _fpstate_ia32 __user *buf)
-+{
-+	if (!used_math())
-+		return 0;
++	/* check if free_area_cache is useful for us */
++	if (len <= mm->cached_hole_size) {
++		mm->cached_hole_size = 0;
++		mm->free_area_cache = mm->mmap_base;
++	}
 +
-+	/* This will cause a "finit" to be triggered by the next
-+	 * attempted FPU operation by the 'current' process.
-+	 */
-+	clear_used_math();
++	/* either no address requested or can't fit in requested address hole */
++	addr = mm->free_area_cache;
 +
-+	if (HAVE_HWFP) {
-+		if (cpu_has_fxsr) {
-+			return save_i387_fxsave(buf);
-+		} else {
-+			return save_i387_fsave(buf);
-+		}
-+	} else {
-+		return fpregs_soft_get(current, NULL,
-+				       0, sizeof(struct user_i387_ia32_struct),
-+				       NULL, buf) ? -1 : 1;
++	/* make sure it can fit in the remaining address space */
++	if (addr > len) {
++		vma = find_vma(mm, addr-len);
++		if (!vma || addr <= vma->vm_start)
++			/* remember the address as a hint for next time */
++			return (mm->free_area_cache = addr-len);
 +	}
-+}
 +
-+static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
-+{
-+	struct task_struct *tsk = current;
-+	clear_fpu(tsk);
-+	return __copy_from_user(&tsk->thread.i387.fsave, buf,
-+				sizeof(struct i387_fsave_struct));
-+}
++	if (mm->mmap_base < len)
++		goto bottomup;
 +
-+static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
-+{
-+	int err;
-+	struct task_struct *tsk = current;
-+	struct user_i387_ia32_struct env;
-+	clear_fpu(tsk);
-+	err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
-+			       sizeof(struct i387_fxsave_struct));
-+	/* mxcsr reserved bits must be masked to zero for security reasons */
-+	tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
-+	if (err || __copy_from_user(&env, buf, sizeof(env)))
-+		return 1;
-+	convert_to_fxsr(tsk, &env);
-+	return 0;
-+}
++	addr = mm->mmap_base-len;
 +
-+int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
-+{
-+	int err;
++	do {
++		/*
++		 * Lookup failure means no vma is above this address,
++		 * else if new region fits below vma->vm_start,
++		 * return with success:
++		 */
++		vma = find_vma(mm, addr);
++		if (!vma || addr+len <= vma->vm_start)
++			/* remember the address as a hint for next time */
++			return (mm->free_area_cache = addr);
 +
-+	if (HAVE_HWFP) {
-+		if (cpu_has_fxsr) {
-+			err = restore_i387_fxsave(buf);
-+		} else {
-+			err = restore_i387_fsave(buf);
-+		}
-+	} else {
-+		err = fpregs_soft_set(current, NULL,
-+				      0, sizeof(struct user_i387_ia32_struct),
-+				      NULL, buf) != 0;
-+	}
-+	set_used_math();
-+	return err;
-+}
++		/* remember the largest hole we saw so far */
++		if (addr + mm->cached_hole_size < vma->vm_start)
++			mm->cached_hole_size = vma->vm_start - addr;
 +
-+/*
-+ * FPU state for core dumps.
-+ * This is only used for a.out dumps now.
-+ * It is declared generically using elf_fpregset_t (which is
-+ * struct user_i387_struct) but is in fact only used for 32-bit
-+ * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
-+ */
-+int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
-+{
-+	int fpvalid;
-+	struct task_struct *tsk = current;
++		/* try just below the current vma->vm_start */
++		addr = vma->vm_start-len;
++	} while (len < vma->vm_start);
 +
-+	fpvalid = !!used_math();
-+	if (fpvalid)
-+		fpvalid = !fpregs_get(tsk, NULL,
-+				      0, sizeof(struct user_i387_ia32_struct),
-+				      fpu, NULL);
++bottomup:
++	/*
++	 * A failed mmap() very likely causes application failure,
++	 * so fall back to the bottom-up function here. This scenario
++	 * can happen with large stack limits and large mmap()
++	 * allocations.
++	 */
++	mm->cached_hole_size = ~0UL;
++	mm->free_area_cache = TASK_UNMAPPED_BASE;
++	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++	/*
++	 * Restore the topdown base:
++	 */
++	mm->free_area_cache = mm->mmap_base;
++	mm->cached_hole_size = ~0UL;
 +
-+	return fpvalid;
++	return addr;
 +}
-+EXPORT_SYMBOL(dump_fpu);
 +
-+#endif	/* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
-diff --git a/arch/x86/kernel/i387_32.c b/arch/x86/kernel/i387_32.c
++
+ asmlinkage long sys_uname(struct new_utsname __user * name)
+ {
+ 	int err;
+diff --git a/arch/x86/kernel/sysenter_32.c b/arch/x86/kernel/sysenter_32.c
 deleted file mode 100644
-index 7d2e12f..0000000
---- a/arch/x86/kernel/i387_32.c
+index 5a2d951..0000000
+--- a/arch/x86/kernel/sysenter_32.c
 +++ /dev/null
-@@ -1,544 +0,0 @@
+@@ -1,346 +0,0 @@
 -/*
-- *  Copyright (C) 1994 Linus Torvalds
+- * (C) Copyright 2002 Linus Torvalds
+- * Portions based on the vdso-randomization code from exec-shield:
+- * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
 - *
-- *  Pentium III FXSR, SSE support
-- *  General FPU state handling cleanups
-- *	Gareth Hughes <gareth at valinux.com>, May 2000
+- * This file contains the needed initializations to support sysenter.
 - */
 -
+-#include <linux/init.h>
+-#include <linux/smp.h>
+-#include <linux/thread_info.h>
 -#include <linux/sched.h>
+-#include <linux/gfp.h>
+-#include <linux/string.h>
+-#include <linux/elf.h>
+-#include <linux/mm.h>
+-#include <linux/err.h>
 -#include <linux/module.h>
--#include <asm/processor.h>
--#include <asm/i387.h>
--#include <asm/math_emu.h>
--#include <asm/sigcontext.h>
--#include <asm/user.h>
--#include <asm/ptrace.h>
--#include <asm/uaccess.h>
 -
--#ifdef CONFIG_MATH_EMULATION
--#define HAVE_HWFP (boot_cpu_data.hard_math)
+-#include <asm/cpufeature.h>
+-#include <asm/msr.h>
+-#include <asm/pgtable.h>
+-#include <asm/unistd.h>
+-#include <asm/elf.h>
+-#include <asm/tlbflush.h>
+-
+-enum {
+-	VDSO_DISABLED = 0,
+-	VDSO_ENABLED = 1,
+-	VDSO_COMPAT = 2,
+-};
+-
+-#ifdef CONFIG_COMPAT_VDSO
+-#define VDSO_DEFAULT	VDSO_COMPAT
 -#else
--#define HAVE_HWFP 1
+-#define VDSO_DEFAULT	VDSO_ENABLED
 -#endif
 -
--static unsigned long mxcsr_feature_mask __read_mostly = 0xffffffff;
+-/*
+- * Should the kernel map a VDSO page into processes and pass its
+- * address down to glibc upon exec()?
+- */
+-unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
 -
--void mxcsr_feature_mask_init(void)
+-EXPORT_SYMBOL_GPL(vdso_enabled);
+-
+-static int __init vdso_setup(char *s)
 -{
--	unsigned long mask = 0;
--	clts();
--	if (cpu_has_fxsr) {
--		memset(&current->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
--		asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave)); 
--		mask = current->thread.i387.fxsave.mxcsr_mask;
--		if (mask == 0) mask = 0x0000ffbf;
--	} 
--	mxcsr_feature_mask &= mask;
--	stts();
+-	vdso_enabled = simple_strtoul(s, NULL, 0);
+-
+-	return 1;
 -}
 -
--/*
-- * The _current_ task is using the FPU for the first time
-- * so initialize it and set the mxcsr to its default
-- * value at reset if we support XMM instructions and then
-- * remeber the current task has used the FPU.
-- */
--void init_fpu(struct task_struct *tsk)
+-__setup("vdso=", vdso_setup);
+-
+-extern asmlinkage void sysenter_entry(void);
+-
+-static __init void reloc_symtab(Elf32_Ehdr *ehdr,
+-				unsigned offset, unsigned size)
 -{
--	if (cpu_has_fxsr) {
--		memset(&tsk->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
--		tsk->thread.i387.fxsave.cwd = 0x37f;
--		if (cpu_has_xmm)
--			tsk->thread.i387.fxsave.mxcsr = 0x1f80;
--	} else {
--		memset(&tsk->thread.i387.fsave, 0, sizeof(struct i387_fsave_struct));
--		tsk->thread.i387.fsave.cwd = 0xffff037fu;
--		tsk->thread.i387.fsave.swd = 0xffff0000u;
--		tsk->thread.i387.fsave.twd = 0xffffffffu;
--		tsk->thread.i387.fsave.fos = 0xffff0000u;
+-	Elf32_Sym *sym = (void *)ehdr + offset;
+-	unsigned nsym = size / sizeof(*sym);
+-	unsigned i;
+-
+-	for(i = 0; i < nsym; i++, sym++) {
+-		if (sym->st_shndx == SHN_UNDEF ||
+-		    sym->st_shndx == SHN_ABS)
+-			continue;  /* skip */
+-
+-		if (sym->st_shndx > SHN_LORESERVE) {
+-			printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
+-			       sym->st_shndx);
+-			continue;
+-		}
+-
+-		switch(ELF_ST_TYPE(sym->st_info)) {
+-		case STT_OBJECT:
+-		case STT_FUNC:
+-		case STT_SECTION:
+-		case STT_FILE:
+-			sym->st_value += VDSO_HIGH_BASE;
+-		}
 -	}
--	/* only the device not available exception or ptrace can call init_fpu */
--	set_stopped_child_used_math(tsk);
 -}
 -
--/*
-- * FPU lazy state save handling.
-- */
+-static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
+-{
+-	Elf32_Dyn *dyn = (void *)ehdr + offset;
 -
--void kernel_fpu_begin(void)
+-	for(; dyn->d_tag != DT_NULL; dyn++)
+-		switch(dyn->d_tag) {
+-		case DT_PLTGOT:
+-		case DT_HASH:
+-		case DT_STRTAB:
+-		case DT_SYMTAB:
+-		case DT_RELA:
+-		case DT_INIT:
+-		case DT_FINI:
+-		case DT_REL:
+-		case DT_DEBUG:
+-		case DT_JMPREL:
+-		case DT_VERSYM:
+-		case DT_VERDEF:
+-		case DT_VERNEED:
+-		case DT_ADDRRNGLO ... DT_ADDRRNGHI:
+-			/* definitely pointers needing relocation */
+-			dyn->d_un.d_ptr += VDSO_HIGH_BASE;
+-			break;
+-
+-		case DT_ENCODING ... OLD_DT_LOOS-1:
+-		case DT_LOOS ... DT_HIOS-1:
+-			/* Tags above DT_ENCODING are pointers if
+-			   they're even */
+-			if (dyn->d_tag >= DT_ENCODING &&
+-			    (dyn->d_tag & 1) == 0)
+-				dyn->d_un.d_ptr += VDSO_HIGH_BASE;
+-			break;
+-
+-		case DT_VERDEFNUM:
+-		case DT_VERNEEDNUM:
+-		case DT_FLAGS_1:
+-		case DT_RELACOUNT:
+-		case DT_RELCOUNT:
+-		case DT_VALRNGLO ... DT_VALRNGHI:
+-			/* definitely not pointers */
+-			break;
+-
+-		case OLD_DT_LOOS ... DT_LOOS-1:
+-		case DT_HIOS ... DT_VALRNGLO-1:
+-		default:
+-			if (dyn->d_tag > DT_ENCODING)
+-				printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
+-				       dyn->d_tag);
+-			break;
+-		}
+-}
+-
+-static __init void relocate_vdso(Elf32_Ehdr *ehdr)
 -{
--	struct thread_info *thread = current_thread_info();
+-	Elf32_Phdr *phdr;
+-	Elf32_Shdr *shdr;
+-	int i;
 -
--	preempt_disable();
--	if (thread->status & TS_USEDFPU) {
--		__save_init_fpu(thread->task);
+-	BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 ||
+-	       !elf_check_arch(ehdr) ||
+-	       ehdr->e_type != ET_DYN);
+-
+-	ehdr->e_entry += VDSO_HIGH_BASE;
+-
+-	/* rebase phdrs */
+-	phdr = (void *)ehdr + ehdr->e_phoff;
+-	for (i = 0; i < ehdr->e_phnum; i++) {
+-		phdr[i].p_vaddr += VDSO_HIGH_BASE;
+-
+-		/* relocate dynamic stuff */
+-		if (phdr[i].p_type == PT_DYNAMIC)
+-			reloc_dyn(ehdr, phdr[i].p_offset);
+-	}
+-
+-	/* rebase sections */
+-	shdr = (void *)ehdr + ehdr->e_shoff;
+-	for(i = 0; i < ehdr->e_shnum; i++) {
+-		if (!(shdr[i].sh_flags & SHF_ALLOC))
+-			continue;
+-
+-		shdr[i].sh_addr += VDSO_HIGH_BASE;
+-
+-		if (shdr[i].sh_type == SHT_SYMTAB ||
+-		    shdr[i].sh_type == SHT_DYNSYM)
+-			reloc_symtab(ehdr, shdr[i].sh_offset,
+-				     shdr[i].sh_size);
+-	}
+-}
+-
+-void enable_sep_cpu(void)
+-{
+-	int cpu = get_cpu();
+-	struct tss_struct *tss = &per_cpu(init_tss, cpu);
+-
+-	if (!boot_cpu_has(X86_FEATURE_SEP)) {
+-		put_cpu();
 -		return;
 -	}
--	clts();
+-
+-	tss->x86_tss.ss1 = __KERNEL_CS;
+-	tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
+-	wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+-	wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
+-	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+-	put_cpu();	
+-}
+-
+-static struct vm_area_struct gate_vma;
+-
+-static int __init gate_vma_init(void)
+-{
+-	gate_vma.vm_mm = NULL;
+-	gate_vma.vm_start = FIXADDR_USER_START;
+-	gate_vma.vm_end = FIXADDR_USER_END;
+-	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+-	gate_vma.vm_page_prot = __P101;
+-	/*
+-	 * Make sure the vDSO gets into every core dump.
+-	 * Dumping its contents makes post-mortem fully interpretable later
+-	 * without matching up the same kernel and hardware config to see
+-	 * what PC values meant.
+-	 */
+-	gate_vma.vm_flags |= VM_ALWAYSDUMP;
+-	return 0;
 -}
--EXPORT_SYMBOL_GPL(kernel_fpu_begin);
 -
 -/*
-- * FPU tag word conversions.
+- * These symbols are defined by vsyscall.o to mark the bounds
+- * of the ELF DSO images included therein.
 - */
+-extern const char vsyscall_int80_start, vsyscall_int80_end;
+-extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+-static struct page *syscall_pages[1];
 -
--static inline unsigned short twd_i387_to_fxsr( unsigned short twd )
+-static void map_compat_vdso(int map)
 -{
--	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
-- 
--	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
--        tmp = ~twd;
--        tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
--        /* and move the valid bits to the lower byte. */
--        tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
--        tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
--        tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
--        return tmp;
+-	static int vdso_mapped;
+-
+-	if (map == vdso_mapped)
+-		return;
+-
+-	vdso_mapped = map;
+-
+-	__set_fixmap(FIX_VDSO, page_to_pfn(syscall_pages[0]) << PAGE_SHIFT,
+-		     map ? PAGE_READONLY_EXEC : PAGE_NONE);
+-
+-	/* flush stray tlbs */
+-	flush_tlb_all();
 -}
 -
--static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave )
+-int __init sysenter_setup(void)
 -{
--	struct _fpxreg *st = NULL;
--	unsigned long tos = (fxsave->swd >> 11) & 7;
--	unsigned long twd = (unsigned long) fxsave->twd;
--	unsigned long tag;
--	unsigned long ret = 0xffff0000u;
--	int i;
+-	void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
+-	const void *vsyscall;
+-	size_t vsyscall_len;
 -
--#define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16);
+-	syscall_pages[0] = virt_to_page(syscall_page);
 -
--	for ( i = 0 ; i < 8 ; i++ ) {
--		if ( twd & 0x1 ) {
--			st = FPREG_ADDR( fxsave, (i - tos) & 7 );
+-	gate_vma_init();
 -
--			switch ( st->exponent & 0x7fff ) {
--			case 0x7fff:
--				tag = 2;		/* Special */
--				break;
--			case 0x0000:
--				if ( !st->significand[0] &&
--				     !st->significand[1] &&
--				     !st->significand[2] &&
--				     !st->significand[3] ) {
--					tag = 1;	/* Zero */
--				} else {
--					tag = 2;	/* Special */
--				}
--				break;
--			default:
--				if ( st->significand[3] & 0x8000 ) {
--					tag = 0;	/* Valid */
--				} else {
--					tag = 2;	/* Special */
--				}
--				break;
--			}
--		} else {
--			tag = 3;			/* Empty */
+-	printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
+-
+-	if (!boot_cpu_has(X86_FEATURE_SEP)) {
+-		vsyscall = &vsyscall_int80_start;
+-		vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start;
+-	} else {
+-		vsyscall = &vsyscall_sysenter_start;
+-		vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start;
+-	}
+-
+-	memcpy(syscall_page, vsyscall, vsyscall_len);
+-	relocate_vdso(syscall_page);
+-
+-	return 0;
+-}
+-
+-/* Defined in vsyscall-sysenter.S */
+-extern void SYSENTER_RETURN;
+-
+-/* Setup a VMA at program startup for the vsyscall page */
+-int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
+-{
+-	struct mm_struct *mm = current->mm;
+-	unsigned long addr;
+-	int ret = 0;
+-	bool compat;
+-
+-	down_write(&mm->mmap_sem);
+-
+-	/* Test compat mode once here, in case someone
+-	   changes it via sysctl */
+-	compat = (vdso_enabled == VDSO_COMPAT);
+-
+-	map_compat_vdso(compat);
+-
+-	if (compat)
+-		addr = VDSO_HIGH_BASE;
+-	else {
+-		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+-		if (IS_ERR_VALUE(addr)) {
+-			ret = addr;
+-			goto up_fail;
 -		}
--		ret |= (tag << (2 * i));
--		twd = twd >> 1;
+-
+-		/*
+-		 * MAYWRITE to allow gdb to COW and set breakpoints
+-		 *
+-		 * Make sure the vDSO gets into every core dump.
+-		 * Dumping its contents makes post-mortem fully
+-		 * interpretable later without matching up the same
+-		 * kernel and hardware config to see what PC values
+-		 * meant.
+-		 */
+-		ret = install_special_mapping(mm, addr, PAGE_SIZE,
+-					      VM_READ|VM_EXEC|
+-					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+-					      VM_ALWAYSDUMP,
+-					      syscall_pages);
+-
+-		if (ret)
+-			goto up_fail;
 -	}
+-
+-	current->mm->context.vdso = (void *)addr;
+-	current_thread_info()->sysenter_return =
+-		(void *)VDSO_SYM(&SYSENTER_RETURN);
+-
+-  up_fail:
+-	up_write(&mm->mmap_sem);
+-
 -	return ret;
 -}
 -
--/*
-- * FPU state interaction.
-- */
+-const char *arch_vma_name(struct vm_area_struct *vma)
+-{
+-	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+-		return "[vdso]";
+-	return NULL;
+-}
 -
--unsigned short get_fpu_cwd( struct task_struct *tsk )
+-struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
 -{
--	if ( cpu_has_fxsr ) {
--		return tsk->thread.i387.fxsave.cwd;
--	} else {
--		return (unsigned short)tsk->thread.i387.fsave.cwd;
--	}
+-	struct mm_struct *mm = tsk->mm;
+-
+-	/* Check to see if this task was created in compat vdso mode */
+-	if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
+-		return &gate_vma;
+-	return NULL;
 -}
 -
--unsigned short get_fpu_swd( struct task_struct *tsk )
+-int in_gate_area(struct task_struct *task, unsigned long addr)
 -{
--	if ( cpu_has_fxsr ) {
--		return tsk->thread.i387.fxsave.swd;
--	} else {
--		return (unsigned short)tsk->thread.i387.fsave.swd;
--	}
+-	const struct vm_area_struct *vma = get_gate_vma(task);
+-
+-	return vma && addr >= vma->vm_start && addr < vma->vm_end;
 -}
 -
--#if 0
--unsigned short get_fpu_twd( struct task_struct *tsk )
+-int in_gate_area_no_task(unsigned long addr)
 -{
--	if ( cpu_has_fxsr ) {
--		return tsk->thread.i387.fxsave.twd;
--	} else {
--		return (unsigned short)tsk->thread.i387.fsave.twd;
--	}
+-	return 0;
 -}
--#endif  /*  0  */
+diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c
+new file mode 100644
+index 0000000..ae0ef2e
+--- /dev/null
++++ b/arch/x86/kernel/test_nx.c
+@@ -0,0 +1,181 @@
++/*
++ * test_nx.c: functional test for NX functionality
++ *
++ * (C) Copyright 2008 Intel Corporation
++ * Author: Arjan van de Ven <arjan at linux.intel.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++#include <linux/module.h>
++#include <linux/sort.h>
++#include <asm/uaccess.h>
++
++extern int rodata_test_data;
++
++/*
++ * This file checks 4 things:
++ * 1) Check if the stack is not executable
++ * 2) Check if kmalloc memory is not executable
++ * 3) Check if the .rodata section is not executable
++ * 4) Check if the .data section of a module is not executable
++ *
++ * To do this, the test code tries to execute memory in stack/kmalloc/etc,
++ * and then checks if the expected trap happens.
++ *
++ * Sadly, this implies having a dynamic exception handling table entry.
++ * ... which can be done (and will make Rusty cry)... but it can only
++ * be done in a stand-alone module with only 1 entry total.
++ * (otherwise we'd have to sort and that's just too messy)
++ */
++
++
++
++/*
++ * We want to set up an exception handling point on our stack,
++ * which means a variable value. This function is rather dirty
++ * and walks the exception table of the module, looking for a magic
++ * marker and replaces it with a specific function.
++ */
++static void fudze_exception_table(void *marker, void *new)
++{
++	struct module *mod = THIS_MODULE;
++	struct exception_table_entry *extable;
++
++	/*
++	 * Note: This module has only 1 exception table entry,
++	 * so searching and sorting is not needed. If that changes,
++	 * this would be the place to search and re-sort the exception
++	 * table.
++	 */
++	if (mod->num_exentries > 1) {
++		printk(KERN_ERR "test_nx: too many exception table entries!\n");
++		printk(KERN_ERR "test_nx: test results are not reliable.\n");
++		return;
++	}
++	extable = (struct exception_table_entry *)mod->extable;
++	extable[0].insn = (unsigned long)new;
++}
++
++
++/*
++ * exception tables get their symbols translated so we need
++ * to use a fake function to put in there, which we can then
++ * replace at runtime.
++ */
++void foo_label(void);
++
++/*
++ * returns 0 for not-executable, negative for executable
++ *
++ * Note: we cannot allow this function to be inlined, because
++ * that would give us more than 1 exception table entry.
++ * This in turn would break the assumptions above.
++ */
++static noinline int test_address(void *address)
++{
++	unsigned long result;
++
++	/* Set up an exception table entry for our address */
++	fudze_exception_table(&foo_label, address);
++	result = 1;
++	asm volatile(
++		"foo_label:\n"
++		"0:	call *%[fake_code]\n"
++		"1:\n"
++		".section .fixup,\"ax\"\n"
++		"2:	mov %[zero], %[rslt]\n"
++		"	ret\n"
++		".previous\n"
++		".section __ex_table,\"a\"\n"
++		"       .align 8\n"
++#ifdef CONFIG_X86_32
++		"	.long 0b\n"
++		"	.long 2b\n"
++#else
++		"	.quad 0b\n"
++		"	.quad 2b\n"
++#endif
++		".previous\n"
++		: [rslt] "=r" (result)
++		: [fake_code] "r" (address), [zero] "r" (0UL), "0" (result)
++	);
++	/* change the exception table back for the next round */
++	fudze_exception_table(address, &foo_label);
++
++	if (result)
++		return -ENODEV;
++	return 0;
++}
++
++static unsigned char test_data = 0xC3; /* 0xC3 is the opcode for "ret" */
++
++static int test_NX(void)
++{
++	int ret = 0;
++	/* 0xC3 is the opcode for "ret" */
++	char stackcode[] = {0xC3, 0x90, 0 };
++	char *heap;
++
++	test_data = 0xC3;
++
++	printk(KERN_INFO "Testing NX protection\n");
++
++	/* Test 1: check if the stack is not executable */
++	if (test_address(&stackcode)) {
++		printk(KERN_ERR "test_nx: stack was executable\n");
++		ret = -ENODEV;
++	}
++
++
++	/* Test 2: Check if the heap is executable */
++	heap = kmalloc(64, GFP_KERNEL);
++	if (!heap)
++		return -ENOMEM;
++	heap[0] = 0xC3; /* opcode for "ret" */
++
++	if (test_address(heap)) {
++		printk(KERN_ERR "test_nx: heap was executable\n");
++		ret = -ENODEV;
++	}
++	kfree(heap);
++
++	/*
++	 * The following 2 tests currently fail, this needs to get fixed
++	 * Until then, don't run them to avoid too many people getting scared
++	 * by the error message
++	 */
++#if 0
++
++#ifdef CONFIG_DEBUG_RODATA
++	/* Test 3: Check if the .rodata section is executable */
++	if (rodata_test_data != 0xC3) {
++		printk(KERN_ERR "test_nx: .rodata marker has invalid value\n");
++		ret = -ENODEV;
++	} else if (test_address(&rodata_test_data)) {
++		printk(KERN_ERR "test_nx: .rodata section is executable\n");
++		ret = -ENODEV;
++	}
++#endif
++
++	/* Test 4: Check if the .data section of a module is executable */
++	if (test_address(&test_data)) {
++		printk(KERN_ERR "test_nx: .data section is executable\n");
++		ret = -ENODEV;
++	}
++
++#endif
++	return 0;
++}
++
++static void test_exit(void)
++{
++}
++
++module_init(test_NX);
++module_exit(test_exit);
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Testcase for the NX infrastructure");
++MODULE_AUTHOR("Arjan van de Ven <arjan at linux.intel.com>");
+diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
+new file mode 100644
+index 0000000..4c16377
+--- /dev/null
++++ b/arch/x86/kernel/test_rodata.c
+@@ -0,0 +1,86 @@
++/*
++ * test_rodata.c: functional test for mark_rodata_ro function
++ *
++ * (C) Copyright 2008 Intel Corporation
++ * Author: Arjan van de Ven <arjan at linux.intel.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++#include <linux/module.h>
++#include <asm/sections.h>
++extern int rodata_test_data;
++
++int rodata_test(void)
++{
++	unsigned long result;
++	unsigned long start, end;
++
++	/* test 1: read the value */
++	/* If this test fails, some previous testrun has clobbered the state */
++	if (!rodata_test_data) {
++		printk(KERN_ERR "rodata_test: test 1 fails (start data)\n");
++		return -ENODEV;
++	}
++
++	/* test 2: write to the variable; this should fault */
++	/*
++	 * If this test fails, we managed to overwrite the data
++	 *
++	 * This is written in assembly to be able to catch the
++	 * exception that is supposed to happen in the correct
++	 * case
++	 */
++
++	result = 1;
++	asm volatile(
++		"0:	mov %[zero],(%[rodata_test])\n"
++		"	mov %[zero], %[rslt]\n"
++		"1:\n"
++		".section .fixup,\"ax\"\n"
++		"2:	jmp 1b\n"
++		".previous\n"
++		".section __ex_table,\"a\"\n"
++		"       .align 16\n"
++#ifdef CONFIG_X86_32
++		"	.long 0b,2b\n"
++#else
++		"	.quad 0b,2b\n"
++#endif
++		".previous"
++		: [rslt] "=r" (result)
++		: [rodata_test] "r" (&rodata_test_data), [zero] "r" (0UL)
++	);
++
++
++	if (!result) {
++		printk(KERN_ERR "rodata_test: test data was not read only\n");
++		return -ENODEV;
++	}
++
++	/* test 3: check the value hasn't changed */
++	/* If this test fails, we managed to overwrite the data */
++	if (!rodata_test_data) {
++		printk(KERN_ERR "rodata_test: Test 3 failes (end data)\n");
++		return -ENODEV;
++	}
++	/* test 4: check if the rodata section is 4Kb aligned */
++	start = (unsigned long)__start_rodata;
++	end = (unsigned long)__end_rodata;
++	if (start & (PAGE_SIZE - 1)) {
++		printk(KERN_ERR "rodata_test: .rodata is not 4k aligned\n");
++		return -ENODEV;
++	}
++	if (end & (PAGE_SIZE - 1)) {
++		printk(KERN_ERR "rodata_test: .rodata end is not 4k aligned\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Testcase for the DEBUG_RODATA infrastructure");
++MODULE_AUTHOR("Arjan van de Ven <arjan at linux.intel.com>");
+diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
+index 8a322c9..1a89e93 100644
+--- a/arch/x86/kernel/time_32.c
++++ b/arch/x86/kernel/time_32.c
+@@ -28,98 +28,20 @@
+  *	serialize accesses to xtime/lost_ticks).
+  */
+ 
+-#include <linux/errno.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/param.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
++#include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/time.h>
+-#include <linux/delay.h>
+-#include <linux/init.h>
+-#include <linux/smp.h>
+-#include <linux/module.h>
+-#include <linux/sysdev.h>
+-#include <linux/bcd.h>
+-#include <linux/efi.h>
+ #include <linux/mca.h>
+ 
+-#include <asm/io.h>
+-#include <asm/smp.h>
+-#include <asm/irq.h>
+-#include <asm/msr.h>
+-#include <asm/delay.h>
+-#include <asm/mpspec.h>
+-#include <asm/uaccess.h>
+-#include <asm/processor.h>
+-#include <asm/timer.h>
+-#include <asm/time.h>
 -
--unsigned short get_fpu_mxcsr( struct task_struct *tsk )
--{
--	if ( cpu_has_xmm ) {
--		return tsk->thread.i387.fxsave.mxcsr;
--	} else {
--		return 0x1f80;
--	}
--}
+-#include "mach_time.h"
 -
--#if 0
+-#include <linux/timex.h>
 -
--void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
--{
--	if ( cpu_has_fxsr ) {
--		tsk->thread.i387.fxsave.cwd = cwd;
--	} else {
--		tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u);
--	}
--}
+-#include <asm/hpet.h>
 -
--void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
--{
--	if ( cpu_has_fxsr ) {
--		tsk->thread.i387.fxsave.swd = swd;
--	} else {
--		tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u);
--	}
--}
+ #include <asm/arch_hooks.h>
 -
--void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
--{
--	if ( cpu_has_fxsr ) {
--		tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd);
--	} else {
--		tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u);
--	}
--}
+-#include "io_ports.h"
 -
--#endif  /*  0  */
+-#include <asm/i8259.h>
++#include <asm/hpet.h>
++#include <asm/time.h>
+ 
+ #include "do_timer.h"
+ 
+ unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
+ EXPORT_SYMBOL(cpu_khz);
+ 
+-DEFINE_SPINLOCK(rtc_lock);
+-EXPORT_SYMBOL(rtc_lock);
 -
 -/*
-- * FXSR floating point environment conversions.
+- * This is a special lock that is owned by the CPU and holds the index
+- * register we are working with.  It is required for NMI access to the
+- * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
 - */
+-volatile unsigned long cmos_lock = 0;
+-EXPORT_SYMBOL(cmos_lock);
 -
--static int convert_fxsr_to_user( struct _fpstate __user *buf,
--					struct i387_fxsave_struct *fxsave )
+-/* Routines for accessing the CMOS RAM/RTC. */
+-unsigned char rtc_cmos_read(unsigned char addr)
 -{
--	unsigned long env[7];
--	struct _fpreg __user *to;
--	struct _fpxreg *from;
--	int i;
--
--	env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
--	env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
--	env[2] = twd_fxsr_to_i387(fxsave);
--	env[3] = fxsave->fip;
--	env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
--	env[5] = fxsave->foo;
--	env[6] = fxsave->fos;
--
--	if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) )
--		return 1;
--
--	to = &buf->_st[0];
--	from = (struct _fpxreg *) &fxsave->st_space[0];
--	for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
--		unsigned long __user *t = (unsigned long __user *)to;
--		unsigned long *f = (unsigned long *)from;
--
--		if (__put_user(*f, t) ||
--				__put_user(*(f + 1), t + 1) ||
--				__put_user(from->exponent, &to->exponent))
--			return 1;
--	}
--	return 0;
+-	unsigned char val;
+-	lock_cmos_prefix(addr);
+-	outb_p(addr, RTC_PORT(0));
+-	val = inb_p(RTC_PORT(1));
+-	lock_cmos_suffix(addr);
+-	return val;
 -}
+-EXPORT_SYMBOL(rtc_cmos_read);
 -
--static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave,
--					  struct _fpstate __user *buf )
+-void rtc_cmos_write(unsigned char val, unsigned char addr)
 -{
--	unsigned long env[7];
--	struct _fpxreg *to;
--	struct _fpreg __user *from;
--	int i;
--
--	if ( __copy_from_user( env, buf, 7 * sizeof(long) ) )
--		return 1;
--
--	fxsave->cwd = (unsigned short)(env[0] & 0xffff);
--	fxsave->swd = (unsigned short)(env[1] & 0xffff);
--	fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
--	fxsave->fip = env[3];
--	fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
--	fxsave->fcs = (env[4] & 0xffff);
--	fxsave->foo = env[5];
--	fxsave->fos = env[6];
--
--	to = (struct _fpxreg *) &fxsave->st_space[0];
--	from = &buf->_st[0];
--	for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
--		unsigned long *t = (unsigned long *)to;
--		unsigned long __user *f = (unsigned long __user *)from;
--
--		if (__get_user(*t, f) ||
--				__get_user(*(t + 1), f + 1) ||
--				__get_user(to->exponent, &from->exponent))
--			return 1;
--	}
--	return 0;
+-	lock_cmos_prefix(addr);
+-	outb_p(addr, RTC_PORT(0));
+-	outb_p(val, RTC_PORT(1));
+-	lock_cmos_suffix(addr);
 -}
+-EXPORT_SYMBOL(rtc_cmos_write);
 -
--/*
-- * Signal frame handlers.
-- */
--
--static inline int save_i387_fsave( struct _fpstate __user *buf )
+-static int set_rtc_mmss(unsigned long nowtime)
 -{
--	struct task_struct *tsk = current;
+-	int retval;
+-	unsigned long flags;
 -
--	unlazy_fpu( tsk );
--	tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
--	if ( __copy_to_user( buf, &tsk->thread.i387.fsave,
--			     sizeof(struct i387_fsave_struct) ) )
--		return -1;
--	return 1;
+-	/* gets recalled with irq locally disabled */
+-	/* XXX - does irqsave resolve this? -johnstul */
+-	spin_lock_irqsave(&rtc_lock, flags);
+-	retval = set_wallclock(nowtime);
+-	spin_unlock_irqrestore(&rtc_lock, flags);
+-
+-	return retval;
 -}
 -
--static int save_i387_fxsave( struct _fpstate __user *buf )
+-
+ int timer_ack;
+ 
+ unsigned long profile_pc(struct pt_regs *regs)
+@@ -127,17 +49,17 @@ unsigned long profile_pc(struct pt_regs *regs)
+ 	unsigned long pc = instruction_pointer(regs);
+ 
+ #ifdef CONFIG_SMP
+-	if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs) &&
++	if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->cs) &&
+ 	    in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+-		return *(unsigned long *)(regs->ebp + 4);
++		return *(unsigned long *)(regs->bp + 4);
+ #else
+-		unsigned long *sp = (unsigned long *)&regs->esp;
++		unsigned long *sp = (unsigned long *)&regs->sp;
+ 
+ 		/* Return address is either directly at stack pointer
+-		   or above a saved eflags. Eflags has bits 22-31 zero,
++		   or above a saved flags. Eflags has bits 22-31 zero,
+ 		   kernel addresses don't. */
+- 		if (sp[0] >> 22)
++		if (sp[0] >> 22)
+ 			return sp[0];
+ 		if (sp[1] >> 22)
+ 			return sp[1];
+@@ -193,26 +115,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-/* not static: needed by APM */
+-unsigned long read_persistent_clock(void)
 -{
--	struct task_struct *tsk = current;
--	int err = 0;
+-	unsigned long retval;
+-	unsigned long flags;
 -
--	unlazy_fpu( tsk );
+-	spin_lock_irqsave(&rtc_lock, flags);
 -
--	if ( convert_fxsr_to_user( buf, &tsk->thread.i387.fxsave ) )
--		return -1;
+-	retval = get_wallclock();
 -
--	err |= __put_user( tsk->thread.i387.fxsave.swd, &buf->status );
--	err |= __put_user( X86_FXSR_MAGIC, &buf->magic );
--	if ( err )
--		return -1;
+-	spin_unlock_irqrestore(&rtc_lock, flags);
 -
--	if ( __copy_to_user( &buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
--			     sizeof(struct i387_fxsave_struct) ) )
--		return -1;
--	return 1;
+-	return retval;
 -}
 -
--int save_i387( struct _fpstate __user *buf )
+-int update_persistent_clock(struct timespec now)
 -{
--	if ( !used_math() )
--		return 0;
+-	return set_rtc_mmss(now.tv_sec);
+-}
 -
--	/* This will cause a "finit" to be triggered by the next
--	 * attempted FPU operation by the 'current' process.
--	 */
--	clear_used_math();
+ extern void (*late_time_init)(void);
+ /* Duplicate of time_init() below, with hpet_enable part added */
+ void __init hpet_time_init(void)
+diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
+index 368b194..0380795 100644
+--- a/arch/x86/kernel/time_64.c
++++ b/arch/x86/kernel/time_64.c
+@@ -11,43 +11,18 @@
+  *  RTC support code taken from arch/i386/kernel/timers/time_hpet.c
+  */
+ 
+-#include <linux/kernel.h>
+-#include <linux/sched.h>
+-#include <linux/interrupt.h>
++#include <linux/clockchips.h>
+ #include <linux/init.h>
+-#include <linux/mc146818rtc.h>
+-#include <linux/time.h>
+-#include <linux/ioport.h>
++#include <linux/interrupt.h>
+ #include <linux/module.h>
+-#include <linux/device.h>
+-#include <linux/sysdev.h>
+-#include <linux/bcd.h>
+-#include <linux/notifier.h>
+-#include <linux/cpu.h>
+-#include <linux/kallsyms.h>
+-#include <linux/acpi.h>
+-#include <linux/clockchips.h>
++#include <linux/time.h>
+ 
+-#ifdef CONFIG_ACPI
+-#include <acpi/achware.h>	/* for PM timer frequency */
+-#include <acpi/acpi_bus.h>
+-#endif
+ #include <asm/i8253.h>
+-#include <asm/pgtable.h>
+-#include <asm/vsyscall.h>
+-#include <asm/timex.h>
+-#include <asm/proto.h>
+-#include <asm/hpet.h>
+-#include <asm/sections.h>
+-#include <linux/hpet.h>
+-#include <asm/apic.h>
+ #include <asm/hpet.h>
+-#include <asm/mpspec.h>
+ #include <asm/nmi.h>
+ #include <asm/vgtod.h>
 -
--	if ( HAVE_HWFP ) {
--		if ( cpu_has_fxsr ) {
--			return save_i387_fxsave( buf );
--		} else {
--			return save_i387_fsave( buf );
--		}
--	} else {
--		return save_i387_soft( &current->thread.i387.soft, buf );
--	}
--}
+-DEFINE_SPINLOCK(rtc_lock);
+-EXPORT_SYMBOL(rtc_lock);
++#include <asm/time.h>
++#include <asm/timer.h>
+ 
+ volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+ 
+@@ -56,10 +31,10 @@ unsigned long profile_pc(struct pt_regs *regs)
+ 	unsigned long pc = instruction_pointer(regs);
+ 
+ 	/* Assume the lock function has either no stack frame or a copy
+-	   of eflags from PUSHF
++	   of flags from PUSHF
+ 	   Eflags always has bits 22 and up cleared unlike kernel addresses. */
+ 	if (!user_mode(regs) && in_lock_functions(pc)) {
+-		unsigned long *sp = (unsigned long *)regs->rsp;
++		unsigned long *sp = (unsigned long *)regs->sp;
+ 		if (sp[0] >> 22)
+ 			return sp[0];
+ 		if (sp[1] >> 22)
+@@ -69,82 +44,6 @@ unsigned long profile_pc(struct pt_regs *regs)
+ }
+ EXPORT_SYMBOL(profile_pc);
+ 
+-/*
+- * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
+- * ms after the second nowtime has started, because when nowtime is written
+- * into the registers of the CMOS clock, it will jump to the next second
+- * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
+- * sheet for details.
+- */
 -
--static inline int restore_i387_fsave( struct _fpstate __user *buf )
+-static int set_rtc_mmss(unsigned long nowtime)
 -{
--	struct task_struct *tsk = current;
--	clear_fpu( tsk );
--	return __copy_from_user( &tsk->thread.i387.fsave, buf,
--				 sizeof(struct i387_fsave_struct) );
--}
+-	int retval = 0;
+-	int real_seconds, real_minutes, cmos_minutes;
+-	unsigned char control, freq_select;
+-	unsigned long flags;
 -
--static int restore_i387_fxsave( struct _fpstate __user *buf )
--{
--	int err;
--	struct task_struct *tsk = current;
--	clear_fpu( tsk );
--	err = __copy_from_user( &tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
--				sizeof(struct i387_fxsave_struct) );
--	/* mxcsr reserved bits must be masked to zero for security reasons */
--	tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
--	return err ? 1 : convert_fxsr_from_user( &tsk->thread.i387.fxsave, buf );
--}
+-/*
+- * set_rtc_mmss is called when irqs are enabled, so disable irqs here
+- */
+-	spin_lock_irqsave(&rtc_lock, flags);
+-/*
+- * Tell the clock it's being set and stop it.
+- */
+-	control = CMOS_READ(RTC_CONTROL);
+-	CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
 -
--int restore_i387( struct _fpstate __user *buf )
--{
--	int err;
+-	freq_select = CMOS_READ(RTC_FREQ_SELECT);
+-	CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
 -
--	if ( HAVE_HWFP ) {
--		if ( cpu_has_fxsr ) {
--			err = restore_i387_fxsave( buf );
--		} else {
--			err = restore_i387_fsave( buf );
--		}
--	} else {
--		err = restore_i387_soft( &current->thread.i387.soft, buf );
--	}
--	set_used_math();
--	return err;
--}
+-	cmos_minutes = CMOS_READ(RTC_MINUTES);
+-		BCD_TO_BIN(cmos_minutes);
 -
 -/*
-- * ptrace request handlers.
+- * since we're only adjusting minutes and seconds, don't interfere with hour
+- * overflow. This avoids messing with unknown time zones but requires your RTC
+- * not to be off by more than 15 minutes. Since we're calling it only when
+- * our clock is externally synchronized using NTP, this shouldn't be a problem.
 - */
 -
--static inline int get_fpregs_fsave( struct user_i387_struct __user *buf,
--				    struct task_struct *tsk )
--{
--	return __copy_to_user( buf, &tsk->thread.i387.fsave,
--			       sizeof(struct user_i387_struct) );
--}
--
--static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf,
--				     struct task_struct *tsk )
--{
--	return convert_fxsr_to_user( (struct _fpstate __user *)buf,
--				     &tsk->thread.i387.fxsave );
--}
+-	real_seconds = nowtime % 60;
+-	real_minutes = nowtime / 60;
+-	if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
+-		real_minutes += 30;		/* correct for half hour time zone */
+-	real_minutes %= 60;
 -
--int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk )
--{
--	if ( HAVE_HWFP ) {
--		if ( cpu_has_fxsr ) {
--			return get_fpregs_fxsave( buf, tsk );
--		} else {
--			return get_fpregs_fsave( buf, tsk );
--		}
+-	if (abs(real_minutes - cmos_minutes) >= 30) {
+-		printk(KERN_WARNING "time.c: can't update CMOS clock "
+-		       "from %d to %d\n", cmos_minutes, real_minutes);
+-		retval = -1;
 -	} else {
--		return save_i387_soft( &tsk->thread.i387.soft,
--				       (struct _fpstate __user *)buf );
+-		BIN_TO_BCD(real_seconds);
+-		BIN_TO_BCD(real_minutes);
+-		CMOS_WRITE(real_seconds, RTC_SECONDS);
+-		CMOS_WRITE(real_minutes, RTC_MINUTES);
 -	}
--}
 -
--static inline int set_fpregs_fsave( struct task_struct *tsk,
--				    struct user_i387_struct __user *buf )
--{
--	return __copy_from_user( &tsk->thread.i387.fsave, buf,
--				 sizeof(struct user_i387_struct) );
--}
+-/*
+- * The following flags have to be released exactly in this order, otherwise the
+- * DS12887 (popular MC146818A clone with integrated battery and quartz) will
+- * not reset the oscillator and will not update precisely 500 ms later. You
+- * won't find this mentioned in the Dallas Semiconductor data sheets, but who
+- * believes data sheets anyway ... -- Markus Kuhn
+- */
 -
--static inline int set_fpregs_fxsave( struct task_struct *tsk,
--				     struct user_i387_struct __user *buf )
--{
--	return convert_fxsr_from_user( &tsk->thread.i387.fxsave,
--				       (struct _fpstate __user *)buf );
--}
+-	CMOS_WRITE(control, RTC_CONTROL);
+-	CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
 -
--int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf )
--{
--	if ( HAVE_HWFP ) {
--		if ( cpu_has_fxsr ) {
--			return set_fpregs_fxsave( tsk, buf );
--		} else {
--			return set_fpregs_fsave( tsk, buf );
--		}
--	} else {
--		return restore_i387_soft( &tsk->thread.i387.soft,
--					  (struct _fpstate __user *)buf );
--	}
+-	spin_unlock_irqrestore(&rtc_lock, flags);
+-
+-	return retval;
 -}
 -
--int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk )
+-int update_persistent_clock(struct timespec now)
 -{
--	if ( cpu_has_fxsr ) {
--		if (__copy_to_user( buf, &tsk->thread.i387.fxsave,
--				    sizeof(struct user_fxsr_struct) ))
--			return -EFAULT;
--		return 0;
--	} else {
--		return -EIO;
--	}
+-	return set_rtc_mmss(now.tv_sec);
 -}
 -
--int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf )
+ static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
+ {
+ 	add_pda(irq0_irqs, 1);
+@@ -154,67 +53,10 @@ static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-unsigned long read_persistent_clock(void)
 -{
--	int ret = 0;
+-	unsigned int year, mon, day, hour, min, sec;
+-	unsigned long flags;
+-	unsigned century = 0;
 -
--	if ( cpu_has_fxsr ) {
--		if (__copy_from_user( &tsk->thread.i387.fxsave, buf,
--				  sizeof(struct user_fxsr_struct) ))
--			ret = -EFAULT;
--		/* mxcsr reserved bits must be masked to zero for security reasons */
--		tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
--	} else {
--		ret = -EIO;
--	}
--	return ret;
--}
+-	spin_lock_irqsave(&rtc_lock, flags);
+-	/*
+-	 * if UIP is clear, then we have >= 244 microseconds before RTC
+-	 * registers will be updated.  Spec sheet says that this is the
+-	 * reliable way to read RTC - registers invalid (off bus) during update
+-	 */
+-	while ((CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+-		cpu_relax();
 -
--/*
-- * FPU state for core dumps.
-- */
 -
--static inline void copy_fpu_fsave( struct task_struct *tsk,
--				   struct user_i387_struct *fpu )
--{
--	memcpy( fpu, &tsk->thread.i387.fsave,
--		sizeof(struct user_i387_struct) );
--}
+-	/* now read all RTC registers while stable with interrupts disabled */
+-	sec = CMOS_READ(RTC_SECONDS);
+-	min = CMOS_READ(RTC_MINUTES);
+-	hour = CMOS_READ(RTC_HOURS);
+-	day = CMOS_READ(RTC_DAY_OF_MONTH);
+-	mon = CMOS_READ(RTC_MONTH);
+-	year = CMOS_READ(RTC_YEAR);
+-#ifdef CONFIG_ACPI
+-	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+-				acpi_gbl_FADT.century)
+-		century = CMOS_READ(acpi_gbl_FADT.century);
+-#endif
+-	spin_unlock_irqrestore(&rtc_lock, flags);
 -
--static inline void copy_fpu_fxsave( struct task_struct *tsk,
--				   struct user_i387_struct *fpu )
--{
--	unsigned short *to;
--	unsigned short *from;
--	int i;
+-	/*
+-	 * We know that x86-64 always uses BCD format, no need to check the
+-	 * config register.
+-	 */
 -
--	memcpy( fpu, &tsk->thread.i387.fxsave, 7 * sizeof(long) );
+-	BCD_TO_BIN(sec);
+-	BCD_TO_BIN(min);
+-	BCD_TO_BIN(hour);
+-	BCD_TO_BIN(day);
+-	BCD_TO_BIN(mon);
+-	BCD_TO_BIN(year);
 -
--	to = (unsigned short *)&fpu->st_space[0];
--	from = (unsigned short *)&tsk->thread.i387.fxsave.st_space[0];
--	for ( i = 0 ; i < 8 ; i++, to += 5, from += 8 ) {
--		memcpy( to, from, 5 * sizeof(unsigned short) );
+-	if (century) {
+-		BCD_TO_BIN(century);
+-		year += century * 100;
+-		printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
+-	} else {
+-		/*
+-		 * x86-64 systems only exists since 2002.
+-		 * This will work up to Dec 31, 2100
+-		 */
+-		year += 2000;
 -	}
+-
+-	return mktime(year, mon, day, hour, min, sec);
 -}
 -
--int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
--{
--	int fpvalid;
--	struct task_struct *tsk = current;
+ /* calibrate_cpu is used on systems with fixed rate TSCs to determine
+  * processor frequency */
+ #define TICK_COUNT 100000000
+-static unsigned int __init tsc_calibrate_cpu_khz(void)
++unsigned long __init native_calculate_cpu_khz(void)
+ {
+ 	int tsc_start, tsc_now;
+ 	int i, no_ctr_free;
+@@ -241,7 +83,7 @@ static unsigned int __init tsc_calibrate_cpu_khz(void)
+ 	rdtscl(tsc_start);
+ 	do {
+ 		rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
+-		tsc_now = get_cycles_sync();
++		tsc_now = get_cycles();
+ 	} while ((tsc_now - tsc_start) < TICK_COUNT);
+ 
+ 	local_irq_restore(flags);
+@@ -264,20 +106,22 @@ static struct irqaction irq0 = {
+ 	.name		= "timer"
+ };
+ 
+-void __init time_init(void)
++void __init hpet_time_init(void)
+ {
+ 	if (!hpet_enable())
+ 		setup_pit_timer();
+ 
+ 	setup_irq(0, &irq0);
++}
+ 
++void __init time_init(void)
++{
+ 	tsc_calibrate();
+ 
+ 	cpu_khz = tsc_khz;
+ 	if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
+-		boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+-		boot_cpu_data.x86 == 16)
+-		cpu_khz = tsc_calibrate_cpu_khz();
++		(boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
++		cpu_khz = calculate_cpu_khz();
+ 
+ 	if (unsynchronized_tsc())
+ 		mark_tsc_unstable("TSCs unsynchronized");
+@@ -290,4 +134,5 @@ void __init time_init(void)
+ 	printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
+ 		cpu_khz / 1000, cpu_khz % 1000);
+ 	init_tsc_clocksource();
++	late_time_init = choose_time_init();
+ }
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+new file mode 100644
+index 0000000..6dfd4e7
+--- /dev/null
++++ b/arch/x86/kernel/tls.c
+@@ -0,0 +1,213 @@
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/user.h>
++#include <linux/regset.h>
++
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/processor.h>
++#include <asm/proto.h>
++
++#include "tls.h"
++
++/*
++ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ */
++static int get_free_idx(void)
++{
++	struct thread_struct *t = &current->thread;
++	int idx;
++
++	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
++		if (desc_empty(&t->tls_array[idx]))
++			return idx + GDT_ENTRY_TLS_MIN;
++	return -ESRCH;
++}
++
++static void set_tls_desc(struct task_struct *p, int idx,
++			 const struct user_desc *info, int n)
++{
++	struct thread_struct *t = &p->thread;
++	struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
++	int cpu;
++
++	/*
++	 * We must not get preempted while modifying the TLS.
++	 */
++	cpu = get_cpu();
++
++	while (n-- > 0) {
++		if (LDT_empty(info))
++			desc->a = desc->b = 0;
++		else
++			fill_ldt(desc, info);
++		++info;
++		++desc;
++	}
++
++	if (t == &current->thread)
++		load_TLS(t, cpu);
++
++	put_cpu();
++}
++
++/*
++ * Set a given TLS descriptor:
++ */
++int do_set_thread_area(struct task_struct *p, int idx,
++		       struct user_desc __user *u_info,
++		       int can_allocate)
++{
++	struct user_desc info;
++
++	if (copy_from_user(&info, u_info, sizeof(info)))
++		return -EFAULT;
++
++	if (idx == -1)
++		idx = info.entry_number;
++
++	/*
++	 * index -1 means the kernel should try to find and
++	 * allocate an empty descriptor:
++	 */
++	if (idx == -1 && can_allocate) {
++		idx = get_free_idx();
++		if (idx < 0)
++			return idx;
++		if (put_user(idx, &u_info->entry_number))
++			return -EFAULT;
++	}
++
++	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++		return -EINVAL;
++
++	set_tls_desc(p, idx, &info, 1);
++
++	return 0;
++}
++
++asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
++{
++	return do_set_thread_area(current, -1, u_info, 1);
++}
++
++
++/*
++ * Get the current Thread-Local Storage area:
++ */
++
++static void fill_user_desc(struct user_desc *info, int idx,
++			   const struct desc_struct *desc)
++
++{
++	memset(info, 0, sizeof(*info));
++	info->entry_number = idx;
++	info->base_addr = get_desc_base(desc);
++	info->limit = get_desc_limit(desc);
++	info->seg_32bit = desc->d;
++	info->contents = desc->type >> 2;
++	info->read_exec_only = !(desc->type & 2);
++	info->limit_in_pages = desc->g;
++	info->seg_not_present = !desc->p;
++	info->useable = desc->avl;
++#ifdef CONFIG_X86_64
++	info->lm = desc->l;
++#endif
++}
++
++int do_get_thread_area(struct task_struct *p, int idx,
++		       struct user_desc __user *u_info)
++{
++	struct user_desc info;
++
++	if (idx == -1 && get_user(idx, &u_info->entry_number))
++		return -EFAULT;
++
++	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++		return -EINVAL;
++
++	fill_user_desc(&info, idx,
++		       &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
++
++	if (copy_to_user(u_info, &info, sizeof(info)))
++		return -EFAULT;
++	return 0;
++}
++
++asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
++{
++	return do_get_thread_area(current, -1, u_info);
++}
++
++int regset_tls_active(struct task_struct *target,
++		      const struct user_regset *regset)
++{
++	struct thread_struct *t = &target->thread;
++	int n = GDT_ENTRY_TLS_ENTRIES;
++	while (n > 0 && desc_empty(&t->tls_array[n - 1]))
++		--n;
++	return n;
++}
++
++int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
++		   unsigned int pos, unsigned int count,
++		   void *kbuf, void __user *ubuf)
++{
++	const struct desc_struct *tls;
++
++	if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
++	    (pos % sizeof(struct user_desc)) != 0 ||
++	    (count % sizeof(struct user_desc)) != 0)
++		return -EINVAL;
++
++	pos /= sizeof(struct user_desc);
++	count /= sizeof(struct user_desc);
++
++	tls = &target->thread.tls_array[pos];
++
++	if (kbuf) {
++		struct user_desc *info = kbuf;
++		while (count-- > 0)
++			fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
++				       tls++);
++	} else {
++		struct user_desc __user *u_info = ubuf;
++		while (count-- > 0) {
++			struct user_desc info;
++			fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
++			if (__copy_to_user(u_info++, &info, sizeof(info)))
++				return -EFAULT;
++		}
++	}
++
++	return 0;
++}
++
++int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
++		   unsigned int pos, unsigned int count,
++		   const void *kbuf, const void __user *ubuf)
++{
++	struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
++	const struct user_desc *info;
++
++	if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
++	    (pos % sizeof(struct user_desc)) != 0 ||
++	    (count % sizeof(struct user_desc)) != 0)
++		return -EINVAL;
++
++	if (kbuf)
++		info = kbuf;
++	else if (__copy_from_user(infobuf, ubuf, count))
++		return -EFAULT;
++	else
++		info = infobuf;
++
++	set_tls_desc(target,
++		     GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
++		     info, count / sizeof(struct user_desc));
++
++	return 0;
++}
+diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
+new file mode 100644
+index 0000000..2f083a2
+--- /dev/null
++++ b/arch/x86/kernel/tls.h
+@@ -0,0 +1,21 @@
++/*
++ * Internal declarations for x86 TLS implementation functions.
++ *
++ * Copyright (C) 2007 Red Hat, Inc.  All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ */
++
++#ifndef _ARCH_X86_KERNEL_TLS_H
++
++#include <linux/regset.h>
++
++extern user_regset_active_fn regset_tls_active;
++extern user_regset_get_fn regset_tls_get;
++extern user_regset_set_fn regset_tls_set;
++
++#endif	/* _ARCH_X86_KERNEL_TLS_H */
+diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
+index 7e16d67..78cbb65 100644
+--- a/arch/x86/kernel/topology.c
++++ b/arch/x86/kernel/topology.c
+@@ -31,9 +31,10 @@
+ #include <linux/mmzone.h>
+ #include <asm/cpu.h>
+ 
+-static struct i386_cpu cpu_devices[NR_CPUS];
++static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
+ 
+-int __cpuinit arch_register_cpu(int num)
++#ifdef CONFIG_HOTPLUG_CPU
++int arch_register_cpu(int num)
+ {
+ 	/*
+ 	 * CPU0 cannot be offlined due to several
+@@ -44,21 +45,23 @@ int __cpuinit arch_register_cpu(int num)
+ 	 * Also certain PCI quirks require not to enable hotplug control
+ 	 * for all CPU's.
+ 	 */
+-#ifdef CONFIG_HOTPLUG_CPU
+ 	if (num)
+-		cpu_devices[num].cpu.hotpluggable = 1;
+-#endif
 -
--	fpvalid = !!used_math();
--	if ( fpvalid ) {
--		unlazy_fpu( tsk );
--		if ( cpu_has_fxsr ) {
--			copy_fpu_fxsave( tsk, fpu );
--		} else {
--			copy_fpu_fsave( tsk, fpu );
--		}
+-	return register_cpu(&cpu_devices[num].cpu, num);
++		per_cpu(cpu_devices, num).cpu.hotpluggable = 1;
++	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
+ }
++EXPORT_SYMBOL(arch_register_cpu);
+ 
+-#ifdef CONFIG_HOTPLUG_CPU
+ void arch_unregister_cpu(int num)
+ {
+-	return unregister_cpu(&cpu_devices[num].cpu);
++	return unregister_cpu(&per_cpu(cpu_devices, num).cpu);
+ }
+-EXPORT_SYMBOL(arch_register_cpu);
+ EXPORT_SYMBOL(arch_unregister_cpu);
++#else
++int arch_register_cpu(int num)
++{
++	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
++}
++EXPORT_SYMBOL(arch_register_cpu);
+ #endif /*CONFIG_HOTPLUG_CPU*/
+ 
+ static int __init topology_init(void)
+diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
+index 02d1e1e..3cf7297 100644
+--- a/arch/x86/kernel/traps_32.c
++++ b/arch/x86/kernel/traps_32.c
+@@ -76,7 +76,8 @@ char ignore_fpu_irq = 0;
+  * F0 0F bug workaround.. We have a special link segment
+  * for this.
+  */
+-struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++gate_desc idt_table[256]
++	__attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
+ 
+ asmlinkage void divide_error(void);
+ asmlinkage void debug(void);
+@@ -101,6 +102,34 @@ asmlinkage void machine_check(void);
+ int kstack_depth_to_print = 24;
+ static unsigned int code_bytes = 64;
+ 
++void printk_address(unsigned long address, int reliable)
++{
++#ifdef CONFIG_KALLSYMS
++	unsigned long offset = 0, symsize;
++	const char *symname;
++	char *modname;
++	char *delim = ":";
++	char namebuf[128];
++	char reliab[4] = "";
++
++	symname = kallsyms_lookup(address, &symsize, &offset,
++					&modname, namebuf);
++	if (!symname) {
++		printk(" [<%08lx>]\n", address);
++		return;
++	}
++	if (!reliable)
++		strcpy(reliab, "? ");
++
++	if (!modname)
++		modname = delim = "";
++	printk(" [<%08lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
++		address, reliab, delim, modname, delim, symname, offset, symsize);
++#else
++	printk(" [<%08lx>]\n", address);
++#endif
++}
++
+ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned size)
+ {
+ 	return	p > (void *)tinfo &&
+@@ -114,48 +143,35 @@ struct stack_frame {
+ };
+ 
+ static inline unsigned long print_context_stack(struct thread_info *tinfo,
+-				unsigned long *stack, unsigned long ebp,
++				unsigned long *stack, unsigned long bp,
+ 				const struct stacktrace_ops *ops, void *data)
+ {
+-#ifdef	CONFIG_FRAME_POINTER
+-	struct stack_frame *frame = (struct stack_frame *)ebp;
+-	while (valid_stack_ptr(tinfo, frame, sizeof(*frame))) {
+-		struct stack_frame *next;
+-		unsigned long addr;
++	struct stack_frame *frame = (struct stack_frame *)bp;
+ 
+-		addr = frame->return_address;
+-		ops->address(data, addr);
+-		/*
+-		 * break out of recursive entries (such as
+-		 * end_of_stack_stop_unwind_function). Also,
+-		 * we can never allow a frame pointer to
+-		 * move downwards!
+-		 */
+-		next = frame->next_frame;
+-		if (next <= frame)
+-			break;
+-		frame = next;
 -	}
+-#else
+ 	while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
+ 		unsigned long addr;
+ 
+-		addr = *stack++;
+-		if (__kernel_text_address(addr))
+-			ops->address(data, addr);
++		addr = *stack;
++		if (__kernel_text_address(addr)) {
++			if ((unsigned long) stack == bp + 4) {
++				ops->address(data, addr, 1);
++				frame = frame->next_frame;
++				bp = (unsigned long) frame;
++			} else {
++				ops->address(data, addr, bp == 0);
++			}
++		}
++		stack++;
+ 	}
+-#endif
+-	return ebp;
++	return bp;
+ }
+ 
+ #define MSG(msg) ops->warning(data, msg)
+ 
+ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+-	        unsigned long *stack,
++		unsigned long *stack, unsigned long bp,
+ 		const struct stacktrace_ops *ops, void *data)
+ {
+-	unsigned long ebp = 0;
 -
--	return fpvalid;
--}
--EXPORT_SYMBOL(dump_fpu);
+ 	if (!task)
+ 		task = current;
+ 
+@@ -163,17 +179,17 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ 		unsigned long dummy;
+ 		stack = &dummy;
+ 		if (task != current)
+-			stack = (unsigned long *)task->thread.esp;
++			stack = (unsigned long *)task->thread.sp;
+ 	}
+ 
+ #ifdef CONFIG_FRAME_POINTER
+-	if (!ebp) {
++	if (!bp) {
+ 		if (task == current) {
+-			/* Grab ebp right from our regs */
+-			asm ("movl %%ebp, %0" : "=r" (ebp) : );
++			/* Grab bp right from our regs */
++			asm ("movl %%ebp, %0" : "=r" (bp) : );
+ 		} else {
+-			/* ebp is the last reg pushed by switch_to */
+-			ebp = *(unsigned long *) task->thread.esp;
++			/* bp is the last reg pushed by switch_to */
++			bp = *(unsigned long *) task->thread.sp;
+ 		}
+ 	}
+ #endif
+@@ -182,7 +198,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ 		struct thread_info *context;
+ 		context = (struct thread_info *)
+ 			((unsigned long)stack & (~(THREAD_SIZE - 1)));
+-		ebp = print_context_stack(context, stack, ebp, ops, data);
++		bp = print_context_stack(context, stack, bp, ops, data);
+ 		/* Should be after the line below, but somewhere
+ 		   in early boot context comes out corrupted and we
+ 		   can't reference it -AK */
+@@ -217,9 +233,11 @@ static int print_trace_stack(void *data, char *name)
+ /*
+  * Print one address/symbol entries per line.
+  */
+-static void print_trace_address(void *data, unsigned long addr)
++static void print_trace_address(void *data, unsigned long addr, int reliable)
+ {
+ 	printk("%s [<%08lx>] ", (char *)data, addr);
++	if (!reliable)
++		printk("? ");
+ 	print_symbol("%s\n", addr);
+ 	touch_nmi_watchdog();
+ }
+@@ -233,32 +251,32 @@ static const struct stacktrace_ops print_trace_ops = {
+ 
+ static void
+ show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+-		   unsigned long * stack, char *log_lvl)
++		unsigned long *stack, unsigned long bp, char *log_lvl)
+ {
+-	dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
++	dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
+ 	printk("%s =======================\n", log_lvl);
+ }
+ 
+ void show_trace(struct task_struct *task, struct pt_regs *regs,
+-		unsigned long * stack)
++		unsigned long *stack, unsigned long bp)
+ {
+-	show_trace_log_lvl(task, regs, stack, "");
++	show_trace_log_lvl(task, regs, stack, bp, "");
+ }
+ 
+ static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+-			       unsigned long *esp, char *log_lvl)
++		       unsigned long *sp, unsigned long bp, char *log_lvl)
+ {
+ 	unsigned long *stack;
+ 	int i;
+ 
+-	if (esp == NULL) {
++	if (sp == NULL) {
+ 		if (task)
+-			esp = (unsigned long*)task->thread.esp;
++			sp = (unsigned long*)task->thread.sp;
+ 		else
+-			esp = (unsigned long *)&esp;
++			sp = (unsigned long *)&sp;
+ 	}
+ 
+-	stack = esp;
++	stack = sp;
+ 	for(i = 0; i < kstack_depth_to_print; i++) {
+ 		if (kstack_end(stack))
+ 			break;
+@@ -267,13 +285,13 @@ static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ 		printk("%08lx ", *stack++);
+ 	}
+ 	printk("\n%sCall Trace:\n", log_lvl);
+-	show_trace_log_lvl(task, regs, esp, log_lvl);
++	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
+ }
+ 
+-void show_stack(struct task_struct *task, unsigned long *esp)
++void show_stack(struct task_struct *task, unsigned long *sp)
+ {
+ 	printk("       ");
+-	show_stack_log_lvl(task, NULL, esp, "");
++	show_stack_log_lvl(task, NULL, sp, 0, "");
+ }
+ 
+ /*
+@@ -282,13 +300,19 @@ void show_stack(struct task_struct *task, unsigned long *esp)
+ void dump_stack(void)
+ {
+ 	unsigned long stack;
++	unsigned long bp = 0;
++
++#ifdef CONFIG_FRAME_POINTER
++	if (!bp)
++		asm("movl %%ebp, %0" : "=r" (bp):);
++#endif
+ 
+ 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+ 		current->pid, current->comm, print_tainted(),
+ 		init_utsname()->release,
+ 		(int)strcspn(init_utsname()->version, " "),
+ 		init_utsname()->version);
+-	show_trace(current, NULL, &stack);
++	show_trace(current, NULL, &stack, bp);
+ }
+ 
+ EXPORT_SYMBOL(dump_stack);
+@@ -307,30 +331,30 @@ void show_registers(struct pt_regs *regs)
+ 	 * time of the fault..
+ 	 */
+ 	if (!user_mode_vm(regs)) {
+-		u8 *eip;
++		u8 *ip;
+ 		unsigned int code_prologue = code_bytes * 43 / 64;
+ 		unsigned int code_len = code_bytes;
+ 		unsigned char c;
+ 
+ 		printk("\n" KERN_EMERG "Stack: ");
+-		show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG);
++		show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
+ 
+ 		printk(KERN_EMERG "Code: ");
+ 
+-		eip = (u8 *)regs->eip - code_prologue;
+-		if (eip < (u8 *)PAGE_OFFSET ||
+-			probe_kernel_address(eip, c)) {
++		ip = (u8 *)regs->ip - code_prologue;
++		if (ip < (u8 *)PAGE_OFFSET ||
++			probe_kernel_address(ip, c)) {
+ 			/* try starting at EIP */
+-			eip = (u8 *)regs->eip;
++			ip = (u8 *)regs->ip;
+ 			code_len = code_len - code_prologue + 1;
+ 		}
+-		for (i = 0; i < code_len; i++, eip++) {
+-			if (eip < (u8 *)PAGE_OFFSET ||
+-				probe_kernel_address(eip, c)) {
++		for (i = 0; i < code_len; i++, ip++) {
++			if (ip < (u8 *)PAGE_OFFSET ||
++				probe_kernel_address(ip, c)) {
+ 				printk(" Bad EIP value.");
+ 				break;
+ 			}
+-			if (eip == (u8 *)regs->eip)
++			if (ip == (u8 *)regs->ip)
+ 				printk("<%02x> ", c);
+ 			else
+ 				printk("%02x ", c);
+@@ -339,18 +363,57 @@ void show_registers(struct pt_regs *regs)
+ 	printk("\n");
+ }	
+ 
+-int is_valid_bugaddr(unsigned long eip)
++int is_valid_bugaddr(unsigned long ip)
+ {
+ 	unsigned short ud2;
+ 
+-	if (eip < PAGE_OFFSET)
++	if (ip < PAGE_OFFSET)
+ 		return 0;
+-	if (probe_kernel_address((unsigned short *)eip, ud2))
++	if (probe_kernel_address((unsigned short *)ip, ud2))
+ 		return 0;
+ 
+ 	return ud2 == 0x0b0f;
+ }
+ 
++static int die_counter;
++
++int __kprobes __die(const char * str, struct pt_regs * regs, long err)
++{
++	unsigned long sp;
++	unsigned short ss;
++
++	printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++	printk("PREEMPT ");
++#endif
++#ifdef CONFIG_SMP
++	printk("SMP ");
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	printk("DEBUG_PAGEALLOC");
++#endif
++	printk("\n");
++
++	if (notify_die(DIE_OOPS, str, regs, err,
++				current->thread.trap_no, SIGSEGV) !=
++			NOTIFY_STOP) {
++		show_registers(regs);
++		/* Executive summary in case the oops scrolled away */
++		sp = (unsigned long) (&regs->sp);
++		savesegment(ss, ss);
++		if (user_mode(regs)) {
++			sp = regs->sp;
++			ss = regs->ss & 0xffff;
++		}
++		printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
++		print_symbol("%s", regs->ip);
++		printk(" SS:ESP %04x:%08lx\n", ss, sp);
++		return 0;
++	} else {
++		return 1;
++	}
++}
++
+ /*
+  * This is gone through when something in the kernel has done something bad and
+  * is about to be terminated.
+@@ -366,7 +429,6 @@ void die(const char * str, struct pt_regs * regs, long err)
+ 		.lock_owner =		-1,
+ 		.lock_owner_depth =	0
+ 	};
+-	static int die_counter;
+ 	unsigned long flags;
+ 
+ 	oops_enter();
+@@ -382,43 +444,13 @@ void die(const char * str, struct pt_regs * regs, long err)
+ 		raw_local_irq_save(flags);
+ 
+ 	if (++die.lock_owner_depth < 3) {
+-		unsigned long esp;
+-		unsigned short ss;
++		report_bug(regs->ip, regs);
+ 
+-		report_bug(regs->eip, regs);
 -
--int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
--{
--	int fpvalid = !!tsk_used_math(tsk);
+-		printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff,
+-		       ++die_counter);
+-#ifdef CONFIG_PREEMPT
+-		printk("PREEMPT ");
+-#endif
+-#ifdef CONFIG_SMP
+-		printk("SMP ");
+-#endif
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+-		printk("DEBUG_PAGEALLOC");
+-#endif
+-		printk("\n");
 -
--	if (fpvalid) {
--		if (tsk == current)
--			unlazy_fpu(tsk);
--		if (cpu_has_fxsr)
--			copy_fpu_fxsave(tsk, fpu);
+-		if (notify_die(DIE_OOPS, str, regs, err,
+-					current->thread.trap_no, SIGSEGV) !=
+-				NOTIFY_STOP) {
+-			show_registers(regs);
+-			/* Executive summary in case the oops scrolled away */
+-			esp = (unsigned long) (&regs->esp);
+-			savesegment(ss, ss);
+-			if (user_mode(regs)) {
+-				esp = regs->esp;
+-				ss = regs->xss & 0xffff;
+-			}
+-			printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
+-			print_symbol("%s", regs->eip);
+-			printk(" SS:ESP %04x:%08lx\n", ss, esp);
+-		}
 -		else
--			copy_fpu_fsave(tsk, fpu);
--	}
--	return fpvalid;
--}
--
--int dump_task_extended_fpu(struct task_struct *tsk, struct user_fxsr_struct *fpu)
--{
--	int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr;
--
--	if (fpvalid) {
--		if (tsk == current)
--		       unlazy_fpu(tsk);
--		memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(*fpu));
--	}
--	return fpvalid;
--}
-diff --git a/arch/x86/kernel/i387_64.c b/arch/x86/kernel/i387_64.c
-deleted file mode 100644
-index bfaff28..0000000
---- a/arch/x86/kernel/i387_64.c
-+++ /dev/null
-@@ -1,150 +0,0 @@
++		if (__die(str, regs, err))
+ 			regs = NULL;
+-  	} else
++	} else {
+ 		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
++	}
+ 
+ 	bust_spinlocks(0);
+ 	die.lock_owner = -1;
+@@ -454,7 +486,7 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
+ {
+ 	struct task_struct *tsk = current;
+ 
+-	if (regs->eflags & VM_MASK) {
++	if (regs->flags & VM_MASK) {
+ 		if (vm86)
+ 			goto vm86_trap;
+ 		goto trap_signal;
+@@ -500,7 +532,7 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
+ }
+ 
+ #define DO_ERROR(trapnr, signr, str, name) \
+-fastcall void do_##name(struct pt_regs * regs, long error_code) \
++void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ 						== NOTIFY_STOP) \
+@@ -509,7 +541,7 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ }
+ 
+ #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
+-fastcall void do_##name(struct pt_regs * regs, long error_code) \
++void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ 	siginfo_t info; \
+ 	if (irq) \
+@@ -525,7 +557,7 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ }
+ 
+ #define DO_VM86_ERROR(trapnr, signr, str, name) \
+-fastcall void do_##name(struct pt_regs * regs, long error_code) \
++void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ 						== NOTIFY_STOP) \
+@@ -534,7 +566,7 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ }
+ 
+ #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+-fastcall void do_##name(struct pt_regs * regs, long error_code) \
++void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ 	siginfo_t info; \
+ 	info.si_signo = signr; \
+@@ -548,13 +580,13 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ 	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
+ }
+ 
+-DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
++DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->ip)
+ #ifndef CONFIG_KPROBES
+ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
+ #endif
+ DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
+ DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
+-DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0)
++DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0)
+ DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
+ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+ DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
+@@ -562,7 +594,7 @@ DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
+ DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
+ DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
+ 
+-fastcall void __kprobes do_general_protection(struct pt_regs * regs,
++void __kprobes do_general_protection(struct pt_regs * regs,
+ 					      long error_code)
+ {
+ 	int cpu = get_cpu();
+@@ -596,7 +628,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
+ 	}
+ 	put_cpu();
+ 
+-	if (regs->eflags & VM_MASK)
++	if (regs->flags & VM_MASK)
+ 		goto gp_in_vm86;
+ 
+ 	if (!user_mode(regs))
+@@ -605,11 +637,14 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
+ 	current->thread.error_code = error_code;
+ 	current->thread.trap_no = 13;
+ 	if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&
+-	    printk_ratelimit())
++	    printk_ratelimit()) {
+ 		printk(KERN_INFO
+-		    "%s[%d] general protection eip:%lx esp:%lx error:%lx\n",
++		    "%s[%d] general protection ip:%lx sp:%lx error:%lx",
+ 		    current->comm, task_pid_nr(current),
+-		    regs->eip, regs->esp, error_code);
++		    regs->ip, regs->sp, error_code);
++		print_vma_addr(" in ", regs->ip);
++		printk("\n");
++	}
+ 
+ 	force_sig(SIGSEGV, current);
+ 	return;
+@@ -705,8 +740,8 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
+ 	*/
+ 	bust_spinlocks(1);
+ 	printk(KERN_EMERG "%s", msg);
+-	printk(" on CPU%d, eip %08lx, registers:\n",
+-		smp_processor_id(), regs->eip);
++	printk(" on CPU%d, ip %08lx, registers:\n",
++		smp_processor_id(), regs->ip);
+ 	show_registers(regs);
+ 	console_silent();
+ 	spin_unlock(&nmi_print_lock);
+@@ -763,7 +798,7 @@ static __kprobes void default_do_nmi(struct pt_regs * regs)
+ 
+ static int ignore_nmis;
+ 
+-fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
++__kprobes void do_nmi(struct pt_regs * regs, long error_code)
+ {
+ 	int cpu;
+ 
+@@ -792,7 +827,7 @@ void restart_nmi(void)
+ }
+ 
+ #ifdef CONFIG_KPROBES
+-fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
++void __kprobes do_int3(struct pt_regs *regs, long error_code)
+ {
+ 	trace_hardirqs_fixup();
+ 
+@@ -828,7 +863,7 @@ fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
+  * find every occurrence of the TF bit that could be saved away even
+  * by user code)
+  */
+-fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
++void __kprobes do_debug(struct pt_regs * regs, long error_code)
+ {
+ 	unsigned int condition;
+ 	struct task_struct *tsk = current;
+@@ -837,24 +872,30 @@ fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
+ 
+ 	get_debugreg(condition, 6);
+ 
++	/*
++	 * The processor cleared BTF, so don't mark that we need it set.
++	 */
++	clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
++	tsk->thread.debugctlmsr = 0;
++
+ 	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+ 					SIGTRAP) == NOTIFY_STOP)
+ 		return;
+ 	/* It's safe to allow irq's after DR6 has been saved */
+-	if (regs->eflags & X86_EFLAGS_IF)
++	if (regs->flags & X86_EFLAGS_IF)
+ 		local_irq_enable();
+ 
+ 	/* Mask out spurious debug traps due to lazy DR7 setting */
+ 	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+-		if (!tsk->thread.debugreg[7])
++		if (!tsk->thread.debugreg7)
+ 			goto clear_dr7;
+ 	}
+ 
+-	if (regs->eflags & VM_MASK)
++	if (regs->flags & VM_MASK)
+ 		goto debug_vm86;
+ 
+ 	/* Save debug status register where ptrace can see it */
+-	tsk->thread.debugreg[6] = condition;
++	tsk->thread.debugreg6 = condition;
+ 
+ 	/*
+ 	 * Single-stepping through TF: make sure we ignore any events in
+@@ -886,7 +927,7 @@ debug_vm86:
+ 
+ clear_TF_reenable:
+ 	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+-	regs->eflags &= ~TF_MASK;
++	regs->flags &= ~TF_MASK;
+ 	return;
+ }
+ 
+@@ -895,7 +936,7 @@ clear_TF_reenable:
+  * the correct behaviour even in the presence of the asynchronous
+  * IRQ13 behaviour
+  */
+-void math_error(void __user *eip)
++void math_error(void __user *ip)
+ {
+ 	struct task_struct * task;
+ 	siginfo_t info;
+@@ -911,7 +952,7 @@ void math_error(void __user *eip)
+ 	info.si_signo = SIGFPE;
+ 	info.si_errno = 0;
+ 	info.si_code = __SI_FAULT;
+-	info.si_addr = eip;
++	info.si_addr = ip;
+ 	/*
+ 	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ 	 * status.  0x3f is the exception bits in these regs, 0x200 is the
+@@ -954,13 +995,13 @@ void math_error(void __user *eip)
+ 	force_sig_info(SIGFPE, &info, task);
+ }
+ 
+-fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++void do_coprocessor_error(struct pt_regs * regs, long error_code)
+ {
+ 	ignore_fpu_irq = 1;
+-	math_error((void __user *)regs->eip);
++	math_error((void __user *)regs->ip);
+ }
+ 
+-static void simd_math_error(void __user *eip)
++static void simd_math_error(void __user *ip)
+ {
+ 	struct task_struct * task;
+ 	siginfo_t info;
+@@ -976,7 +1017,7 @@ static void simd_math_error(void __user *eip)
+ 	info.si_signo = SIGFPE;
+ 	info.si_errno = 0;
+ 	info.si_code = __SI_FAULT;
+-	info.si_addr = eip;
++	info.si_addr = ip;
+ 	/*
+ 	 * The SIMD FPU exceptions are handled a little differently, as there
+ 	 * is only a single status/control register.  Thus, to determine which
+@@ -1008,19 +1049,19 @@ static void simd_math_error(void __user *eip)
+ 	force_sig_info(SIGFPE, &info, task);
+ }
+ 
+-fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++void do_simd_coprocessor_error(struct pt_regs * regs,
+ 					  long error_code)
+ {
+ 	if (cpu_has_xmm) {
+ 		/* Handle SIMD FPU exceptions on PIII+ processors. */
+ 		ignore_fpu_irq = 1;
+-		simd_math_error((void __user *)regs->eip);
++		simd_math_error((void __user *)regs->ip);
+ 	} else {
+ 		/*
+ 		 * Handle strange cache flush from user space exception
+ 		 * in all other cases.  This is undocumented behaviour.
+ 		 */
+-		if (regs->eflags & VM_MASK) {
++		if (regs->flags & VM_MASK) {
+ 			handle_vm86_fault((struct kernel_vm86_regs *)regs,
+ 					  error_code);
+ 			return;
+@@ -1032,7 +1073,7 @@ fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
+ 	}
+ }
+ 
+-fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
++void do_spurious_interrupt_bug(struct pt_regs * regs,
+ 					  long error_code)
+ {
+ #if 0
+@@ -1041,7 +1082,7 @@ fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
+ #endif
+ }
+ 
+-fastcall unsigned long patch_espfix_desc(unsigned long uesp,
++unsigned long patch_espfix_desc(unsigned long uesp,
+ 					  unsigned long kesp)
+ {
+ 	struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
+@@ -1095,51 +1136,17 @@ asmlinkage void math_emulate(long arg)
+ 
+ #endif /* CONFIG_MATH_EMULATION */
+ 
 -/*
-- *  Copyright (C) 1994 Linus Torvalds
-- *  Copyright (C) 2002 Andi Kleen, SuSE Labs
-- *
-- *  Pentium III FXSR, SSE support
-- *  General FPU state handling cleanups
-- *	Gareth Hughes <gareth at valinux.com>, May 2000
-- * 
-- *  x86-64 rework 2002 Andi Kleen. 
-- *  Does direct fxsave in and out of user space now for signal handlers.
-- *  All the FSAVE<->FXSAVE conversion code has been moved to the 32bit emulation,
-- *  the 64bit user space sees a FXSAVE frame directly. 
+- * This needs to use 'idt_table' rather than 'idt', and
+- * thus use the _nonmapped_ version of the IDT, as the
+- * Pentium F0 0F bugfix can have resulted in the mapped
+- * IDT being write-protected.
 - */
--
--#include <linux/sched.h>
--#include <linux/init.h>
--#include <asm/processor.h>
--#include <asm/i387.h>
--#include <asm/sigcontext.h>
--#include <asm/user.h>
--#include <asm/ptrace.h>
--#include <asm/uaccess.h>
--
--unsigned int mxcsr_feature_mask __read_mostly = 0xffffffff;
--
--void mxcsr_feature_mask_init(void)
+-void set_intr_gate(unsigned int n, void *addr)
 -{
--	unsigned int mask;
--	clts();
--	memset(&current->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
--	asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
--	mask = current->thread.i387.fxsave.mxcsr_mask;
--	if (mask == 0) mask = 0x0000ffbf;
--	mxcsr_feature_mask &= mask;
--	stts();
+-	_set_gate(n, DESCTYPE_INT, addr, __KERNEL_CS);
 -}
 -
 -/*
-- * Called at bootup to set up the initial FPU state that is later cloned
-- * into all processes.
+- * This routine sets up an interrupt gate at directory privilege level 3.
 - */
--void __cpuinit fpu_init(void)
--{
--	unsigned long oldcr0 = read_cr0();
--	extern void __bad_fxsave_alignment(void);
--		
--	if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
--		__bad_fxsave_alignment();
--	set_in_cr4(X86_CR4_OSFXSR);
--	set_in_cr4(X86_CR4_OSXMMEXCPT);
--
--	write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
--
--	mxcsr_feature_mask_init();
--	/* clean state in init */
--	current_thread_info()->status = 0;
--	clear_used_math();
--}
--
--void init_fpu(struct task_struct *child)
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
 -{
--	if (tsk_used_math(child)) {
--		if (child == current)
--			unlazy_fpu(child);
--		return;
--	}	
--	memset(&child->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
--	child->thread.i387.fxsave.cwd = 0x37f;
--	child->thread.i387.fxsave.mxcsr = 0x1f80;
--	/* only the device not available exception or ptrace can call init_fpu */
--	set_stopped_child_used_math(child);
+-	_set_gate(n, DESCTYPE_INT | DESCTYPE_DPL3, addr, __KERNEL_CS);
 -}
 -
--/*
-- * Signal frame handlers.
-- */
--
--int save_i387(struct _fpstate __user *buf)
+-static void __init set_trap_gate(unsigned int n, void *addr)
 -{
--	struct task_struct *tsk = current;
--	int err = 0;
--
--	BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
--			sizeof(tsk->thread.i387.fxsave));
--
--	if ((unsigned long)buf % 16) 
--		printk("save_i387: bad fpstate %p\n",buf); 
--
--	if (!used_math())
--		return 0;
--	clear_used_math(); /* trigger finit */
--	if (task_thread_info(tsk)->status & TS_USEDFPU) {
--		err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
--		if (err) return err;
--		task_thread_info(tsk)->status &= ~TS_USEDFPU;
--		stts();
--	} else {
--		if (__copy_to_user(buf, &tsk->thread.i387.fxsave,
--				   sizeof(struct i387_fxsave_struct)))
--			return -1;
--	}
--	return 1;
+-	_set_gate(n, DESCTYPE_TRAP, addr, __KERNEL_CS);
 -}
 -
--/*
-- * ptrace request handlers.
-- */
--
--int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *tsk)
+-static void __init set_system_gate(unsigned int n, void *addr)
 -{
--	init_fpu(tsk);
--	return __copy_to_user(buf, &tsk->thread.i387.fxsave,
--			       sizeof(struct user_i387_struct)) ? -EFAULT : 0;
+-	_set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, addr, __KERNEL_CS);
 -}
 -
--int set_fpregs(struct task_struct *tsk, struct user_i387_struct __user *buf)
+-static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
 -{
--	if (__copy_from_user(&tsk->thread.i387.fxsave, buf, 
--			     sizeof(struct user_i387_struct)))
--		return -EFAULT;
--		return 0;
+-	_set_gate(n, DESCTYPE_TASK, (void *)0, (gdt_entry<<3));
 -}
 -
--/*
-- * FPU state for core dumps.
-- */
--
--int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
--{
--	struct task_struct *tsk = current;
--
--	if (!used_math())
--		return 0;
--
--	unlazy_fpu(tsk);
--	memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(struct user_i387_struct)); 
--	return 1; 
+ 
+ void __init trap_init(void)
+ {
+ 	int i;
+ 
+ #ifdef CONFIG_EISA
+-	void __iomem *p = ioremap(0x0FFFD9, 4);
++	void __iomem *p = early_ioremap(0x0FFFD9, 4);
+ 	if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
+ 		EISA_bus = 1;
+ 	}
+-	iounmap(p);
++	early_iounmap(p, 4);
+ #endif
+ 
+ #ifdef CONFIG_X86_LOCAL_APIC
+diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
+index cc68b92..efc66df 100644
+--- a/arch/x86/kernel/traps_64.c
++++ b/arch/x86/kernel/traps_64.c
+@@ -74,22 +74,24 @@ asmlinkage void alignment_check(void);
+ asmlinkage void machine_check(void);
+ asmlinkage void spurious_interrupt_bug(void);
+ 
++static unsigned int code_bytes = 64;
++
+ static inline void conditional_sti(struct pt_regs *regs)
+ {
+-	if (regs->eflags & X86_EFLAGS_IF)
++	if (regs->flags & X86_EFLAGS_IF)
+ 		local_irq_enable();
+ }
+ 
+ static inline void preempt_conditional_sti(struct pt_regs *regs)
+ {
+ 	preempt_disable();
+-	if (regs->eflags & X86_EFLAGS_IF)
++	if (regs->flags & X86_EFLAGS_IF)
+ 		local_irq_enable();
+ }
+ 
+ static inline void preempt_conditional_cli(struct pt_regs *regs)
+ {
+-	if (regs->eflags & X86_EFLAGS_IF)
++	if (regs->flags & X86_EFLAGS_IF)
+ 		local_irq_disable();
+ 	/* Make sure to not schedule here because we could be running
+ 	   on an exception stack. */
+@@ -98,14 +100,15 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
+ 
+ int kstack_depth_to_print = 12;
+ 
+-#ifdef CONFIG_KALLSYMS
+-void printk_address(unsigned long address)
++void printk_address(unsigned long address, int reliable)
+ {
++#ifdef CONFIG_KALLSYMS
+ 	unsigned long offset = 0, symsize;
+ 	const char *symname;
+ 	char *modname;
+ 	char *delim = ":";
+-	char namebuf[128];
++	char namebuf[KSYM_NAME_LEN];
++	char reliab[4] = "";
+ 
+ 	symname = kallsyms_lookup(address, &symsize, &offset,
+ 					&modname, namebuf);
+@@ -113,17 +116,17 @@ void printk_address(unsigned long address)
+ 		printk(" [<%016lx>]\n", address);
+ 		return;
+ 	}
++	if (!reliable)
++		strcpy(reliab, "? ");
++
+ 	if (!modname)
+-		modname = delim = ""; 		
+-	printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
+-		address, delim, modname, delim, symname, offset, symsize);
 -}
--
--int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
++		modname = delim = "";
++	printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
++		address, reliab, delim, modname, delim, symname, offset, symsize);
+ #else
+-void printk_address(unsigned long address)
 -{
--	int fpvalid = !!tsk_used_math(tsk);
--
--	if (fpvalid) {
--		if (tsk == current)
--			unlazy_fpu(tsk);
--		memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(struct user_i387_struct)); 	
--}
--	return fpvalid;
+ 	printk(" [<%016lx>]\n", address);
 -}
-diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c
-index 2931383..dbd6c1d 100644
---- a/arch/x86/kernel/i8237.c
-+++ b/arch/x86/kernel/i8237.c
-@@ -51,7 +51,7 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state)
+ #endif
++}
+ 
+ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
+ 					unsigned *usedp, char **idp)
+@@ -208,14 +211,53 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
+  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+  */
+ 
+-static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++static inline int valid_stack_ptr(struct thread_info *tinfo,
++			void *p, unsigned int size, void *end)
++{
++	void *t = tinfo;
++	if (end) {
++		if (p < end && p >= (end-THREAD_SIZE))
++			return 1;
++		else
++			return 0;
++	}
++	return p > t && p < t + THREAD_SIZE - size;
++}
++
++/* The form of the top of the frame on the stack */
++struct stack_frame {
++	struct stack_frame *next_frame;
++	unsigned long return_address;
++};
++
++
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++				unsigned long *stack, unsigned long bp,
++				const struct stacktrace_ops *ops, void *data,
++				unsigned long *end)
+ {
+-	void *t = (void *)tinfo;
+-        return p > t && p < t + THREAD_SIZE - 3;
++	struct stack_frame *frame = (struct stack_frame *)bp;
++
++	while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
++		unsigned long addr;
++
++		addr = *stack;
++		if (__kernel_text_address(addr)) {
++			if ((unsigned long) stack == bp + 8) {
++				ops->address(data, addr, 1);
++				frame = frame->next_frame;
++				bp = (unsigned long) frame;
++			} else {
++				ops->address(data, addr, bp == 0);
++			}
++		}
++		stack++;
++	}
++	return bp;
  }
  
- static struct sysdev_class i8237_sysdev_class = {
--	set_kset_name("i8237"),
-+	.name = "i8237",
- 	.suspend = i8237A_suspend,
- 	.resume = i8237A_resume,
- };
-diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
-index a42c807..ef62b07 100644
---- a/arch/x86/kernel/i8253.c
-+++ b/arch/x86/kernel/i8253.c
-@@ -13,10 +13,17 @@
- #include <asm/delay.h>
- #include <asm/i8253.h>
- #include <asm/io.h>
-+#include <asm/hpet.h>
+ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+-		unsigned long *stack,
++		unsigned long *stack, unsigned long bp,
+ 		const struct stacktrace_ops *ops, void *data)
+ {
+ 	const unsigned cpu = get_cpu();
+@@ -225,36 +267,28 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
  
- DEFINE_SPINLOCK(i8253_lock);
- EXPORT_SYMBOL(i8253_lock);
+ 	if (!tsk)
+ 		tsk = current;
++	tinfo = task_thread_info(tsk);
  
-+#ifdef CONFIG_X86_32
-+static void pit_disable_clocksource(void);
-+#else
-+static inline void pit_disable_clocksource(void) { }
+ 	if (!stack) {
+ 		unsigned long dummy;
+ 		stack = &dummy;
+ 		if (tsk && tsk != current)
+-			stack = (unsigned long *)tsk->thread.rsp;
++			stack = (unsigned long *)tsk->thread.sp;
+ 	}
+ 
+-	/*
+-	 * Print function call entries within a stack. 'cond' is the
+-	 * "end of stackframe" condition, that the 'stack++'
+-	 * iteration will eventually trigger.
+-	 */
+-#define HANDLE_STACK(cond) \
+-	do while (cond) { \
+-		unsigned long addr = *stack++; \
+-		/* Use unlocked access here because except for NMIs	\
+-		   we should be already protected against module unloads */ \
+-		if (__kernel_text_address(addr)) { \
+-			/* \
+-			 * If the address is either in the text segment of the \
+-			 * kernel, or in the region which contains vmalloc'ed \
+-			 * memory, it *may* be the address of a calling \
+-			 * routine; if so, print it so that someone tracing \
+-			 * down the cause of the crash will be able to figure \
+-			 * out the call path that was taken. \
+-			 */ \
+-			ops->address(data, addr);   \
+-		} \
+-	} while (0)
++#ifdef CONFIG_FRAME_POINTER
++	if (!bp) {
++		if (tsk == current) {
++			/* Grab bp right from our regs */
++			asm("movq %%rbp, %0" : "=r" (bp):);
++		} else {
++			/* bp is the last reg pushed by switch_to */
++			bp = *(unsigned long *) tsk->thread.sp;
++		}
++	}
 +#endif
 +
- /*
-  * HPET replaces the PIT, when enabled. So we need to know, which of
-  * the two timers is used
-@@ -31,38 +38,38 @@ struct clock_event_device *global_clock_event;
- static void init_pit_timer(enum clock_event_mode mode,
- 			   struct clock_event_device *evt)
++
+ 
+ 	/*
+ 	 * Print function call entries in all stacks, starting at the
+@@ -270,7 +304,9 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+ 		if (estack_end) {
+ 			if (ops->stack(data, id) < 0)
+ 				break;
+-			HANDLE_STACK (stack < estack_end);
++
++			bp = print_context_stack(tinfo, stack, bp, ops,
++							data, estack_end);
+ 			ops->stack(data, "<EOE>");
+ 			/*
+ 			 * We link to the next stack via the
+@@ -288,7 +324,8 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+ 			if (stack >= irqstack && stack < irqstack_end) {
+ 				if (ops->stack(data, "IRQ") < 0)
+ 					break;
+-				HANDLE_STACK (stack < irqstack_end);
++				bp = print_context_stack(tinfo, stack, bp,
++						ops, data, irqstack_end);
+ 				/*
+ 				 * We link to the next stack (which would be
+ 				 * the process stack normally) the last
+@@ -306,9 +343,7 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+ 	/*
+ 	 * This handles the process stack:
+ 	 */
+-	tinfo = task_thread_info(tsk);
+-	HANDLE_STACK (valid_stack_ptr(tinfo, stack));
+-#undef HANDLE_STACK
++	bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
+ 	put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+@@ -331,10 +366,10 @@ static int print_trace_stack(void *data, char *name)
+ 	return 0;
+ }
+ 
+-static void print_trace_address(void *data, unsigned long addr)
++static void print_trace_address(void *data, unsigned long addr, int reliable)
  {
--	unsigned long flags;
--
--	spin_lock_irqsave(&i8253_lock, flags);
-+	spin_lock(&i8253_lock);
+ 	touch_nmi_watchdog();
+-	printk_address(addr);
++	printk_address(addr, reliable);
+ }
  
- 	switch(mode) {
- 	case CLOCK_EVT_MODE_PERIODIC:
- 		/* binary, mode 2, LSB/MSB, ch 0 */
--		outb_p(0x34, PIT_MODE);
--		outb_p(LATCH & 0xff , PIT_CH0);	/* LSB */
--		outb(LATCH >> 8 , PIT_CH0);	/* MSB */
-+		outb_pit(0x34, PIT_MODE);
-+		outb_pit(LATCH & 0xff , PIT_CH0);	/* LSB */
-+		outb_pit(LATCH >> 8 , PIT_CH0);		/* MSB */
- 		break;
+ static const struct stacktrace_ops print_trace_ops = {
+@@ -345,15 +380,17 @@ static const struct stacktrace_ops print_trace_ops = {
+ };
  
- 	case CLOCK_EVT_MODE_SHUTDOWN:
- 	case CLOCK_EVT_MODE_UNUSED:
- 		if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
- 		    evt->mode == CLOCK_EVT_MODE_ONESHOT) {
--			outb_p(0x30, PIT_MODE);
--			outb_p(0, PIT_CH0);
--			outb_p(0, PIT_CH0);
-+			outb_pit(0x30, PIT_MODE);
-+			outb_pit(0, PIT_CH0);
-+			outb_pit(0, PIT_CH0);
- 		}
-+		pit_disable_clocksource();
- 		break;
+ void
+-show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack)
++show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
++		unsigned long bp)
+ {
+ 	printk("\nCall Trace:\n");
+-	dump_trace(tsk, regs, stack, &print_trace_ops, NULL);
++	dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL);
+ 	printk("\n");
+ }
  
- 	case CLOCK_EVT_MODE_ONESHOT:
- 		/* One shot setup */
--		outb_p(0x38, PIT_MODE);
-+		pit_disable_clocksource();
-+		outb_pit(0x38, PIT_MODE);
- 		break;
+ static void
+-_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
++_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
++							unsigned long bp)
+ {
+ 	unsigned long *stack;
+ 	int i;
+@@ -364,14 +401,14 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
+ 	// debugging aid: "show_stack(NULL, NULL);" prints the
+ 	// back trace for this cpu.
  
- 	case CLOCK_EVT_MODE_RESUME:
- 		/* Nothing to do here */
- 		break;
+-	if (rsp == NULL) {
++	if (sp == NULL) {
+ 		if (tsk)
+-			rsp = (unsigned long *)tsk->thread.rsp;
++			sp = (unsigned long *)tsk->thread.sp;
+ 		else
+-			rsp = (unsigned long *)&rsp;
++			sp = (unsigned long *)&sp;
  	}
--	spin_unlock_irqrestore(&i8253_lock, flags);
-+	spin_unlock(&i8253_lock);
+ 
+-	stack = rsp;
++	stack = sp;
+ 	for(i=0; i < kstack_depth_to_print; i++) {
+ 		if (stack >= irqstack && stack <= irqstack_end) {
+ 			if (stack == irqstack_end) {
+@@ -387,12 +424,12 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
+ 		printk(" %016lx", *stack++);
+ 		touch_nmi_watchdog();
+ 	}
+-	show_trace(tsk, regs, rsp);
++	show_trace(tsk, regs, sp, bp);
+ }
+ 
+-void show_stack(struct task_struct *tsk, unsigned long * rsp)
++void show_stack(struct task_struct *tsk, unsigned long * sp)
+ {
+-	_show_stack(tsk, NULL, rsp);
++	_show_stack(tsk, NULL, sp, 0);
  }
  
  /*
-@@ -72,12 +79,10 @@ static void init_pit_timer(enum clock_event_mode mode,
-  */
- static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
+@@ -401,13 +438,19 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
+ void dump_stack(void)
  {
--	unsigned long flags;
--
--	spin_lock_irqsave(&i8253_lock, flags);
--	outb_p(delta & 0xff , PIT_CH0);	/* LSB */
--	outb(delta >> 8 , PIT_CH0);	/* MSB */
--	spin_unlock_irqrestore(&i8253_lock, flags);
-+	spin_lock(&i8253_lock);
-+	outb_pit(delta & 0xff , PIT_CH0);	/* LSB */
-+	outb_pit(delta >> 8 , PIT_CH0);		/* MSB */
-+	spin_unlock(&i8253_lock);
+ 	unsigned long dummy;
++	unsigned long bp = 0;
++
++#ifdef CONFIG_FRAME_POINTER
++	if (!bp)
++		asm("movq %%rbp, %0" : "=r" (bp):);
++#endif
  
- 	return 0;
+ 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+ 		current->pid, current->comm, print_tainted(),
+ 		init_utsname()->release,
+ 		(int)strcspn(init_utsname()->version, " "),
+ 		init_utsname()->version);
+-	show_trace(NULL, NULL, &dummy);
++	show_trace(NULL, NULL, &dummy, bp);
  }
-@@ -148,15 +153,15 @@ static cycle_t pit_read(void)
- 	 * count), it cannot be newer.
- 	 */
- 	jifs = jiffies;
--	outb_p(0x00, PIT_MODE);	/* latch the count ASAP */
--	count = inb_p(PIT_CH0);	/* read the latched count */
--	count |= inb_p(PIT_CH0) << 8;
-+	outb_pit(0x00, PIT_MODE);	/* latch the count ASAP */
-+	count = inb_pit(PIT_CH0);	/* read the latched count */
-+	count |= inb_pit(PIT_CH0) << 8;
- 
- 	/* VIA686a test code... reset the latch if count > max + 1 */
- 	if (count > LATCH) {
--		outb_p(0x34, PIT_MODE);
--		outb_p(LATCH & 0xff, PIT_CH0);
--		outb(LATCH >> 8, PIT_CH0);
-+		outb_pit(0x34, PIT_MODE);
-+		outb_pit(LATCH & 0xff, PIT_CH0);
-+		outb_pit(LATCH >> 8, PIT_CH0);
- 		count = LATCH - 1;
- 	}
  
-@@ -195,9 +200,28 @@ static struct clocksource clocksource_pit = {
- 	.shift	= 20,
- };
+ EXPORT_SYMBOL(dump_stack);
+@@ -415,12 +458,15 @@ EXPORT_SYMBOL(dump_stack);
+ void show_registers(struct pt_regs *regs)
+ {
+ 	int i;
+-	int in_kernel = !user_mode(regs);
+-	unsigned long rsp;
++	unsigned long sp;
+ 	const int cpu = smp_processor_id();
+ 	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
++	u8 *ip;
++	unsigned int code_prologue = code_bytes * 43 / 64;
++	unsigned int code_len = code_bytes;
  
-+static void pit_disable_clocksource(void)
-+{
-+	/*
-+	 * Use mult to check whether it is registered or not
-+	 */
-+	if (clocksource_pit.mult) {
-+		clocksource_unregister(&clocksource_pit);
-+		clocksource_pit.mult = 0;
-+	}
-+}
+-	rsp = regs->rsp;
++	sp = regs->sp;
++	ip = (u8 *) regs->ip - code_prologue;
+ 	printk("CPU %d ", cpu);
+ 	__show_regs(regs);
+ 	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
+@@ -430,45 +476,43 @@ void show_registers(struct pt_regs *regs)
+ 	 * When in-kernel, we also print out the stack and code at the
+ 	 * time of the fault..
+ 	 */
+-	if (in_kernel) {
++	if (!user_mode(regs)) {
++		unsigned char c;
+ 		printk("Stack: ");
+-		_show_stack(NULL, regs, (unsigned long*)rsp);
+-
+-		printk("\nCode: ");
+-		if (regs->rip < PAGE_OFFSET)
+-			goto bad;
+-
+-		for (i=0; i<20; i++) {
+-			unsigned char c;
+-			if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
+-bad:
++		_show_stack(NULL, regs, (unsigned long *)sp, regs->bp);
++		printk("\n");
 +
- static int __init init_pit_clocksource(void)
++		printk(KERN_EMERG "Code: ");
++		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
++			/* try starting at RIP */
++			ip = (u8 *) regs->ip;
++			code_len = code_len - code_prologue + 1;
++		}
++		for (i = 0; i < code_len; i++, ip++) {
++			if (ip < (u8 *)PAGE_OFFSET ||
++					probe_kernel_address(ip, c)) {
+ 				printk(" Bad RIP value.");
+ 				break;
+ 			}
+-			printk("%02x ", c);
++			if (ip == (u8 *)regs->ip)
++				printk("<%02x> ", c);
++			else
++				printk("%02x ", c);
+ 		}
+ 	}
+ 	printk("\n");
+ }	
+ 
+-int is_valid_bugaddr(unsigned long rip)
++int is_valid_bugaddr(unsigned long ip)
  {
--	if (num_possible_cpus() > 1) /* PIT does not scale! */
-+	 /*
-+	  * Several reasons not to register PIT as a clocksource:
-+	  *
-+	  * - On SMP PIT does not scale due to i8253_lock
-+	  * - when HPET is enabled
-+	  * - when local APIC timer is active (PIT is switched off)
-+	  */
-+	if (num_possible_cpus() > 1 || is_hpet_enabled() ||
-+	    pit_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
+ 	unsigned short ud2;
+ 
+-	if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2)))
++	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
  		return 0;
  
- 	clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
-diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
-index f634fc7..2d25b77 100644
---- a/arch/x86/kernel/i8259_32.c
-+++ b/arch/x86/kernel/i8259_32.c
-@@ -21,8 +21,6 @@
- #include <asm/arch_hooks.h>
- #include <asm/i8259.h>
+ 	return ud2 == 0x0b0f;
+ }
  
--#include <io_ports.h>
+-#ifdef CONFIG_BUG
+-void out_of_line_bug(void)
+-{ 
+-	BUG(); 
+-} 
+-EXPORT_SYMBOL(out_of_line_bug);
+-#endif
 -
- /*
-  * This is the 'legacy' 8259A Programmable Interrupt Controller,
-  * present in the majority of PC/AT boxes.
-@@ -258,7 +256,7 @@ static int i8259A_shutdown(struct sys_device *dev)
+ static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+ static int die_owner = -1;
+ static unsigned int die_nest_count;
+@@ -496,7 +540,7 @@ unsigned __kprobes long oops_begin(void)
+ 	return flags;
  }
  
- static struct sysdev_class i8259_sysdev_class = {
--	set_kset_name("i8259"),
-+	.name = "i8259",
- 	.suspend = i8259A_suspend,
- 	.resume = i8259A_resume,
- 	.shutdown = i8259A_shutdown,
-@@ -291,20 +289,20 @@ void init_8259A(int auto_eoi)
- 	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
+-void __kprobes oops_end(unsigned long flags)
++void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ { 
+ 	die_owner = -1;
+ 	bust_spinlocks(0);
+@@ -505,12 +549,17 @@ void __kprobes oops_end(unsigned long flags)
+ 		/* Nest count reaches zero, release the lock. */
+ 		__raw_spin_unlock(&die_lock);
+ 	raw_local_irq_restore(flags);
++	if (!regs) {
++		oops_exit();
++		return;
++	}
+ 	if (panic_on_oops)
+ 		panic("Fatal exception");
+ 	oops_exit();
++	do_exit(signr);
+ }
  
- 	/*
--	 * outb_p - this has to work on a wide range of PC hardware.
-+	 * outb_pic - this has to work on a wide range of PC hardware.
- 	 */
--	outb_p(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
--	outb_p(0x20 + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
--	outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
-+	outb_pic(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
-+	outb_pic(0x20 + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
-+	outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
- 	if (auto_eoi)	/* master does Auto EOI */
--		outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
-+		outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
- 	else		/* master expects normal EOI */
--		outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
-+		outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+-void __kprobes __die(const char * str, struct pt_regs * regs, long err)
++int __kprobes __die(const char * str, struct pt_regs * regs, long err)
+ {
+ 	static int die_counter;
+ 	printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
+@@ -524,15 +573,17 @@ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
+ 	printk("DEBUG_PAGEALLOC");
+ #endif
+ 	printk("\n");
+-	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
++	if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
++		return 1;
+ 	show_registers(regs);
+ 	add_taint(TAINT_DIE);
+ 	/* Executive summary in case the oops scrolled away */
+ 	printk(KERN_ALERT "RIP ");
+-	printk_address(regs->rip); 
+-	printk(" RSP <%016lx>\n", regs->rsp); 
++	printk_address(regs->ip, 1);
++	printk(" RSP <%016lx>\n", regs->sp);
+ 	if (kexec_should_crash(current))
+ 		crash_kexec(regs);
++	return 0;
+ }
  
--	outb_p(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
--	outb_p(0x20 + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
--	outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
--	outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
-+	outb_pic(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
-+	outb_pic(0x20 + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
-+	outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
-+	outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
- 	if (auto_eoi)
- 		/*
- 		 * In AEOI mode we just have to mask the interrupt
-@@ -341,7 +339,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
- 	outb(0,0xF0);
- 	if (ignore_fpu_irq || !boot_cpu_data.hard_math)
- 		return IRQ_NONE;
--	math_error((void __user *)get_irq_regs()->eip);
-+	math_error((void __user *)get_irq_regs()->ip);
- 	return IRQ_HANDLED;
+ void die(const char * str, struct pt_regs * regs, long err)
+@@ -540,11 +591,11 @@ void die(const char * str, struct pt_regs * regs, long err)
+ 	unsigned long flags = oops_begin();
+ 
+ 	if (!user_mode(regs))
+-		report_bug(regs->rip, regs);
++		report_bug(regs->ip, regs);
+ 
+-	__die(str, regs, err);
+-	oops_end(flags);
+-	do_exit(SIGSEGV); 
++	if (__die(str, regs, err))
++		regs = NULL;
++	oops_end(flags, regs, SIGSEGV);
  }
  
-diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
-index 3f27ea0..fa57a15 100644
---- a/arch/x86/kernel/i8259_64.c
-+++ b/arch/x86/kernel/i8259_64.c
-@@ -21,6 +21,7 @@
- #include <asm/delay.h>
- #include <asm/desc.h>
- #include <asm/apic.h>
-+#include <asm/i8259.h>
+ void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
+@@ -561,10 +612,10 @@ void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
+ 		crash_kexec(regs);
+ 	if (do_panic || panic_on_oops)
+ 		panic("Non maskable interrupt");
+-	oops_end(flags);
++	oops_end(flags, NULL, SIGBUS);
+ 	nmi_exit();
+ 	local_irq_enable();
+-	do_exit(SIGSEGV);
++	do_exit(SIGBUS);
+ }
  
- /*
-  * Common place to define all x86 IRQ vectors
-@@ -48,7 +49,7 @@
-  */
+ static void __kprobes do_trap(int trapnr, int signr, char *str,
+@@ -588,11 +639,14 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
+ 		tsk->thread.trap_no = trapnr;
  
- /*
-- * The IO-APIC gives us many more interrupt sources. Most of these 
-+ * The IO-APIC gives us many more interrupt sources. Most of these
-  * are unused but an SMP system is supposed to have enough memory ...
-  * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
-  * across the spectrum, so we really want to be prepared to get all
-@@ -76,7 +77,7 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
- 	IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
+ 		if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
+-		    printk_ratelimit())
++		    printk_ratelimit()) {
+ 			printk(KERN_INFO
+-			       "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
++			       "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+ 			       tsk->comm, tsk->pid, str,
+-			       regs->rip, regs->rsp, error_code); 
++			       regs->ip, regs->sp, error_code);
++			print_vma_addr(" in ", regs->ip);
++			printk("\n");
++		}
  
- /* for the irq vectors */
--static void (*interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
-+static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
- 					  IRQLIST_16(0x2), IRQLIST_16(0x3),
- 	IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
- 	IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
-@@ -114,11 +115,7 @@ static struct irq_chip i8259A_chip = {
- /*
-  * This contains the irq mask for both 8259A irq controllers,
-  */
--static unsigned int cached_irq_mask = 0xffff;
--
--#define __byte(x,y) 	(((unsigned char *)&(y))[x])
--#define cached_21	(__byte(0,cached_irq_mask))
--#define cached_A1	(__byte(1,cached_irq_mask))
-+unsigned int cached_irq_mask = 0xffff;
+ 		if (info)
+ 			force_sig_info(signr, info, tsk);
+@@ -602,19 +656,12 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
+ 	}
  
- /*
-  * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
-@@ -139,9 +136,9 @@ void disable_8259A_irq(unsigned int irq)
- 	spin_lock_irqsave(&i8259A_lock, flags);
- 	cached_irq_mask |= mask;
- 	if (irq & 8)
--		outb(cached_A1,0xA1);
-+		outb(cached_slave_mask, PIC_SLAVE_IMR);
- 	else
--		outb(cached_21,0x21);
-+		outb(cached_master_mask, PIC_MASTER_IMR);
- 	spin_unlock_irqrestore(&i8259A_lock, flags);
+ 
+-	/* kernel trap */ 
+-	{	     
+-		const struct exception_table_entry *fixup;
+-		fixup = search_exception_tables(regs->rip);
+-		if (fixup)
+-			regs->rip = fixup->fixup;
+-		else {
+-			tsk->thread.error_code = error_code;
+-			tsk->thread.trap_no = trapnr;
+-			die(str, regs, error_code);
+-		}
+-		return;
++	if (!fixup_exception(regs)) {
++		tsk->thread.error_code = error_code;
++		tsk->thread.trap_no = trapnr;
++		die(str, regs, error_code);
+ 	}
++	return;
  }
  
-@@ -153,9 +150,9 @@ void enable_8259A_irq(unsigned int irq)
- 	spin_lock_irqsave(&i8259A_lock, flags);
- 	cached_irq_mask &= mask;
- 	if (irq & 8)
--		outb(cached_A1,0xA1);
-+		outb(cached_slave_mask, PIC_SLAVE_IMR);
- 	else
--		outb(cached_21,0x21);
-+		outb(cached_master_mask, PIC_MASTER_IMR);
- 	spin_unlock_irqrestore(&i8259A_lock, flags);
+ #define DO_ERROR(trapnr, signr, str, name) \
+@@ -643,10 +690,10 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+ 	do_trap(trapnr, signr, str, regs, error_code, &info); \
  }
  
-@@ -167,9 +164,9 @@ int i8259A_irq_pending(unsigned int irq)
+-DO_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->rip)
++DO_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->ip)
+ DO_ERROR( 4, SIGSEGV, "overflow", overflow)
+ DO_ERROR( 5, SIGSEGV, "bounds", bounds)
+-DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
++DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
+ DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
+ DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
+ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+@@ -694,32 +741,28 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
+ 		tsk->thread.trap_no = 13;
  
- 	spin_lock_irqsave(&i8259A_lock, flags);
- 	if (irq < 8)
--		ret = inb(0x20) & mask;
-+		ret = inb(PIC_MASTER_CMD) & mask;
- 	else
--		ret = inb(0xA0) & (mask >> 8);
-+		ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- 	spin_unlock_irqrestore(&i8259A_lock, flags);
+ 		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+-		    printk_ratelimit())
++		    printk_ratelimit()) {
+ 			printk(KERN_INFO
+-		       "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
++		       "%s[%d] general protection ip:%lx sp:%lx error:%lx",
+ 			       tsk->comm, tsk->pid,
+-			       regs->rip, regs->rsp, error_code); 
++			       regs->ip, regs->sp, error_code);
++			print_vma_addr(" in ", regs->ip);
++			printk("\n");
++		}
  
- 	return ret;
-@@ -196,14 +193,14 @@ static inline int i8259A_irq_real(unsigned int irq)
- 	int irqmask = 1<<irq;
+ 		force_sig(SIGSEGV, tsk);
+ 		return;
+ 	} 
  
- 	if (irq < 8) {
--		outb(0x0B,0x20);		/* ISR register */
--		value = inb(0x20) & irqmask;
--		outb(0x0A,0x20);		/* back to the IRR register */
-+		outb(0x0B,PIC_MASTER_CMD);	/* ISR register */
-+		value = inb(PIC_MASTER_CMD) & irqmask;
-+		outb(0x0A,PIC_MASTER_CMD);	/* back to the IRR register */
- 		return value;
- 	}
--	outb(0x0B,0xA0);		/* ISR register */
--	value = inb(0xA0) & (irqmask >> 8);
--	outb(0x0A,0xA0);		/* back to the IRR register */
-+	outb(0x0B,PIC_SLAVE_CMD);	/* ISR register */
-+	value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
-+	outb(0x0A,PIC_SLAVE_CMD);	/* back to the IRR register */
- 	return value;
+-	/* kernel gp */
+-	{
+-		const struct exception_table_entry *fixup;
+-		fixup = search_exception_tables(regs->rip);
+-		if (fixup) {
+-			regs->rip = fixup->fixup;
+-			return;
+-		}
++	if (fixup_exception(regs))
++		return;
+ 
+-		tsk->thread.error_code = error_code;
+-		tsk->thread.trap_no = 13;
+-		if (notify_die(DIE_GPF, "general protection fault", regs,
+-					error_code, 13, SIGSEGV) == NOTIFY_STOP)
+-			return;
+-		die("general protection fault", regs, error_code);
+-	}
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 13;
++	if (notify_die(DIE_GPF, "general protection fault", regs,
++				error_code, 13, SIGSEGV) == NOTIFY_STOP)
++		return;
++	die("general protection fault", regs, error_code);
  }
  
-@@ -240,14 +237,17 @@ static void mask_and_ack_8259A(unsigned int irq)
+ static __kprobes void
+@@ -832,15 +875,15 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+ {
+ 	struct pt_regs *regs = eregs;
+ 	/* Did already sync */
+-	if (eregs == (struct pt_regs *)eregs->rsp)
++	if (eregs == (struct pt_regs *)eregs->sp)
+ 		;
+ 	/* Exception from user space */
+ 	else if (user_mode(eregs))
+ 		regs = task_pt_regs(current);
+ 	/* Exception from kernel and interrupts are enabled. Move to
+  	   kernel process stack. */
+-	else if (eregs->eflags & X86_EFLAGS_IF)
+-		regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
++	else if (eregs->flags & X86_EFLAGS_IF)
++		regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
+ 	if (eregs != regs)
+ 		*regs = *eregs;
+ 	return regs;
+@@ -858,6 +901,12 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
  
- handle_real_irq:
- 	if (irq & 8) {
--		inb(0xA1);		/* DUMMY - (do we need this?) */
--		outb(cached_A1,0xA1);
--		outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
--		outb(0x62,0x20);	/* 'Specific EOI' to master-IRQ2 */
-+		inb(PIC_SLAVE_IMR);	/* DUMMY - (do we need this?) */
-+		outb(cached_slave_mask, PIC_SLAVE_IMR);
-+		/* 'Specific EOI' to slave */
-+		outb(0x60+(irq&7),PIC_SLAVE_CMD);
-+		 /* 'Specific EOI' to master-IRQ2 */
-+		outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD);
- 	} else {
--		inb(0x21);		/* DUMMY - (do we need this?) */
--		outb(cached_21,0x21);
--		outb(0x60+irq,0x20);	/* 'Specific EOI' to master */
-+		inb(PIC_MASTER_IMR);	/* DUMMY - (do we need this?) */
-+		outb(cached_master_mask, PIC_MASTER_IMR);
-+		/* 'Specific EOI' to master */
-+		outb(0x60+irq,PIC_MASTER_CMD);
- 	}
- 	spin_unlock_irqrestore(&i8259A_lock, flags);
- 	return;
-@@ -270,7 +270,8 @@ spurious_8259A_irq:
- 		 * lets ACK and report it. [once per IRQ]
- 		 */
- 		if (!(spurious_irq_mask & irqmask)) {
--			printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
-+			printk(KERN_DEBUG
-+			       "spurious 8259A interrupt: IRQ%d.\n", irq);
- 			spurious_irq_mask |= irqmask;
- 		}
- 		atomic_inc(&irq_err_count);
-@@ -283,51 +284,6 @@ spurious_8259A_irq:
- 	}
- }
+ 	get_debugreg(condition, 6);
  
--void init_8259A(int auto_eoi)
--{
--	unsigned long flags;
--
--	i8259A_auto_eoi = auto_eoi;
--
--	spin_lock_irqsave(&i8259A_lock, flags);
--
--	outb(0xff, 0x21);	/* mask all of 8259A-1 */
--	outb(0xff, 0xA1);	/* mask all of 8259A-2 */
--
--	/*
--	 * outb_p - this has to work on a wide range of PC hardware.
--	 */
--	outb_p(0x11, 0x20);	/* ICW1: select 8259A-1 init */
--	outb_p(IRQ0_VECTOR, 0x21);	/* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
--	outb_p(0x04, 0x21);	/* 8259A-1 (the master) has a slave on IR2 */
--	if (auto_eoi)
--		outb_p(0x03, 0x21);	/* master does Auto EOI */
--	else
--		outb_p(0x01, 0x21);	/* master expects normal EOI */
--
--	outb_p(0x11, 0xA0);	/* ICW1: select 8259A-2 init */
--	outb_p(IRQ8_VECTOR, 0xA1);	/* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
--	outb_p(0x02, 0xA1);	/* 8259A-2 is a slave on master's IR2 */
--	outb_p(0x01, 0xA1);	/* (slave's support for AEOI in flat mode
--				    is to be investigated) */
--
--	if (auto_eoi)
++	/*
++	 * The processor cleared BTF, so don't mark that we need it set.
++	 */
++	clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
++	tsk->thread.debugctlmsr = 0;
++
+ 	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+ 						SIGTRAP) == NOTIFY_STOP)
+ 		return;
+@@ -873,27 +922,14 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
+ 
+ 	tsk->thread.debugreg6 = condition;
+ 
+-	/* Mask out spurious TF errors due to lazy TF clearing */
++
++	/*
++	 * Single-stepping through TF: make sure we ignore any events in
++	 * kernel space (but re-enable TF when returning to user mode).
++	 */
+ 	if (condition & DR_STEP) {
 -		/*
--		 * in AEOI mode we just have to mask the interrupt
--		 * when acking.
+-		 * The TF error should be masked out only if the current
+-		 * process is not traced and if the TRAP flag has been set
+-		 * previously by a tracing process (condition detected by
+-		 * the PT_DTRACE flag); remember that the i386 TRAP flag
+-		 * can be modified by the process itself in user mode,
+-		 * allowing programs to debug themselves without the ptrace()
+-		 * interface.
 -		 */
--		i8259A_chip.mask_ack = disable_8259A_irq;
--	else
--		i8259A_chip.mask_ack = mask_and_ack_8259A;
--
--	udelay(100);		/* wait for 8259A to initialize */
--
--	outb(cached_21, 0x21);	/* restore master IRQ mask */
--	outb(cached_A1, 0xA1);	/* restore slave IRQ mask */
--
--	spin_unlock_irqrestore(&i8259A_lock, flags);
--}
--
- static char irq_trigger[2];
- /**
-  * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
-@@ -364,13 +320,13 @@ static int i8259A_shutdown(struct sys_device *dev)
- 	 * the kernel initialization code can get it
- 	 * out of.
- 	 */
--	outb(0xff, 0x21);	/* mask all of 8259A-1 */
--	outb(0xff, 0xA1);	/* mask all of 8259A-1 */
-+	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
-+	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-1 */
- 	return 0;
+                 if (!user_mode(regs))
+                        goto clear_TF_reenable;
+-		/*
+-		 * Was the TF flag set by a debugger? If so, clear it now,
+-		 * so that register information is correct.
+-		 */
+-		if (tsk->ptrace & PT_DTRACE) {
+-			regs->eflags &= ~TF_MASK;
+-			tsk->ptrace &= ~PT_DTRACE;
+-		}
+ 	}
+ 
+ 	/* Ok, finally something we can handle */
+@@ -902,7 +938,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
+ 	info.si_signo = SIGTRAP;
+ 	info.si_errno = 0;
+ 	info.si_code = TRAP_BRKPT;
+-	info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
++	info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
+ 	force_sig_info(SIGTRAP, &info, tsk);
+ 
+ clear_dr7:
+@@ -912,18 +948,15 @@ clear_dr7:
+ 
+ clear_TF_reenable:
+ 	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+-	regs->eflags &= ~TF_MASK;
++	regs->flags &= ~X86_EFLAGS_TF;
+ 	preempt_conditional_cli(regs);
  }
  
- static struct sysdev_class i8259_sysdev_class = {
--	set_kset_name("i8259"),
-+	.name = "i8259",
- 	.suspend = i8259A_suspend,
- 	.resume = i8259A_resume,
- 	.shutdown = i8259A_shutdown,
-@@ -391,6 +347,58 @@ static int __init i8259A_init_sysfs(void)
+ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
+ {
+-	const struct exception_table_entry *fixup;
+-	fixup = search_exception_tables(regs->rip);
+-	if (fixup) {
+-		regs->rip = fixup->fixup;
++	if (fixup_exception(regs))
+ 		return 1;
+-	}
++
+ 	notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
+ 	/* Illegal floating point operation in the kernel */
+ 	current->thread.trap_no = trapnr;
+@@ -938,7 +971,7 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
+  */
+ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
+ {
+-	void __user *rip = (void __user *)(regs->rip);
++	void __user *ip = (void __user *)(regs->ip);
+ 	struct task_struct * task;
+ 	siginfo_t info;
+ 	unsigned short cwd, swd;
+@@ -958,7 +991,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
+ 	info.si_signo = SIGFPE;
+ 	info.si_errno = 0;
+ 	info.si_code = __SI_FAULT;
+-	info.si_addr = rip;
++	info.si_addr = ip;
+ 	/*
+ 	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ 	 * status.  0x3f is the exception bits in these regs, 0x200 is the
+@@ -1007,7 +1040,7 @@ asmlinkage void bad_intr(void)
  
- device_initcall(i8259A_init_sysfs);
+ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
+ {
+-	void __user *rip = (void __user *)(regs->rip);
++	void __user *ip = (void __user *)(regs->ip);
+ 	struct task_struct * task;
+ 	siginfo_t info;
+ 	unsigned short mxcsr;
+@@ -1027,7 +1060,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
+ 	info.si_signo = SIGFPE;
+ 	info.si_errno = 0;
+ 	info.si_code = __SI_FAULT;
+-	info.si_addr = rip;
++	info.si_addr = ip;
+ 	/*
+ 	 * The SIMD FPU exceptions are handled a little differently, as there
+ 	 * is only a single status/control register.  Thus, to determine which
+@@ -1089,6 +1122,7 @@ asmlinkage void math_state_restore(void)
+ 	task_thread_info(me)->status |= TS_USEDFPU;
+ 	me->fpu_counter++;
+ }
++EXPORT_SYMBOL_GPL(math_state_restore);
  
-+void init_8259A(int auto_eoi)
+ void __init trap_init(void)
+ {
+@@ -1144,3 +1178,14 @@ static int __init kstack_setup(char *s)
+ 	return 0;
+ }
+ early_param("kstack", kstack_setup);
++
++
++static int __init code_bytes_setup(char *s)
 +{
-+	unsigned long flags;
++	code_bytes = simple_strtoul(s, NULL, 0);
++	if (code_bytes > 8192)
++		code_bytes = 8192;
 +
-+	i8259A_auto_eoi = auto_eoi;
++	return 1;
++}
++__setup("code_bytes=", code_bytes_setup);
+diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
+index 9ebc0da..43517e3 100644
+--- a/arch/x86/kernel/tsc_32.c
++++ b/arch/x86/kernel/tsc_32.c
+@@ -5,6 +5,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/init.h>
+ #include <linux/dmi.h>
++#include <linux/percpu.h>
+ 
+ #include <asm/delay.h>
+ #include <asm/tsc.h>
+@@ -23,8 +24,6 @@ static int tsc_enabled;
+ unsigned int tsc_khz;
+ EXPORT_SYMBOL_GPL(tsc_khz);
+ 
+-int tsc_disable;
+-
+ #ifdef CONFIG_X86_TSC
+ static int __init tsc_setup(char *str)
+ {
+@@ -39,8 +38,7 @@ static int __init tsc_setup(char *str)
+  */
+ static int __init tsc_setup(char *str)
+ {
+-	tsc_disable = 1;
+-
++	setup_clear_cpu_cap(X86_FEATURE_TSC);
+ 	return 1;
+ }
+ #endif
+@@ -80,13 +78,31 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
+  *
+  *			-johnstul at us.ibm.com "math is hard, lets go shopping!"
+  */
+-unsigned long cyc2ns_scale __read_mostly;
+ 
+-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
++DEFINE_PER_CPU(unsigned long, cyc2ns);
+ 
+-static inline void set_cyc2ns_scale(unsigned long cpu_khz)
++static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+ {
+-	cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
++	unsigned long flags, prev_scale, *scale;
++	unsigned long long tsc_now, ns_now;
 +
-+	spin_lock_irqsave(&i8259A_lock, flags);
++	local_irq_save(flags);
++	sched_clock_idle_sleep_event();
 +
-+	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
-+	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
++	scale = &per_cpu(cyc2ns, cpu);
++
++	rdtscll(tsc_now);
++	ns_now = __cycles_2_ns(tsc_now);
++
++	prev_scale = *scale;
++	if (cpu_khz)
++		*scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
 +
 +	/*
-+	 * outb_pic - this has to work on a wide range of PC hardware.
++	 * Start smoothly with the new frequency:
 +	 */
-+	outb_pic(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
-+	/* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
-+	outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
-+	/* 8259A-1 (the master) has a slave on IR2 */
-+	outb_pic(0x04, PIC_MASTER_IMR);
-+	if (auto_eoi)	/* master does Auto EOI */
-+		outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
-+	else		/* master expects normal EOI */
-+		outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
-+
-+	outb_pic(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
-+	/* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
-+	outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR);
-+	/* 8259A-2 is a slave on master's IR2 */
-+	outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);
-+	/* (slave's support for AEOI in flat mode is to be investigated) */
-+	outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
++	sched_clock_idle_wakeup_event(0);
++	local_irq_restore(flags);
+ }
+ 
+ /*
+@@ -239,7 +255,9 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+ 						ref_freq, freq->new);
+ 			if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
+ 				tsc_khz = cpu_khz;
+-				set_cyc2ns_scale(cpu_khz);
++				preempt_disable();
++				set_cyc2ns_scale(cpu_khz, smp_processor_id());
++				preempt_enable();
+ 				/*
+ 				 * TSC based sched_clock turns
+ 				 * to junk w/ cpufreq
+@@ -333,6 +351,11 @@ __cpuinit int unsynchronized_tsc(void)
+ {
+ 	if (!cpu_has_tsc || tsc_unstable)
+ 		return 1;
 +
-+	if (auto_eoi)
-+		/*
-+		 * In AEOI mode we just have to mask the interrupt
-+		 * when acking.
-+		 */
-+		i8259A_chip.mask_ack = disable_8259A_irq;
-+	else
-+		i8259A_chip.mask_ack = mask_and_ack_8259A;
++	/* Anything with constant TSC should be synchronized */
++	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
++		return 0;
 +
-+	udelay(100);		/* wait for 8259A to initialize */
+ 	/*
+ 	 * Intel systems are normally all synchronized.
+ 	 * Exceptions must mark TSC as unstable:
+@@ -367,7 +390,9 @@ static inline void check_geode_tsc_reliable(void) { }
+ 
+ void __init tsc_init(void)
+ {
+-	if (!cpu_has_tsc || tsc_disable)
++	int cpu;
 +
-+	outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
-+	outb(cached_slave_mask, PIC_SLAVE_IMR);	  /* restore slave IRQ mask */
++	if (!cpu_has_tsc)
+ 		goto out_no_tsc;
+ 
+ 	cpu_khz = calculate_cpu_khz();
+@@ -380,7 +405,15 @@ void __init tsc_init(void)
+ 				(unsigned long)cpu_khz / 1000,
+ 				(unsigned long)cpu_khz % 1000);
+ 
+-	set_cyc2ns_scale(cpu_khz);
++	/*
++	 * Secondary CPUs do not run through tsc_init(), so set up
++	 * all the scale factors for all CPUs, assuming the same
++	 * speed as the bootup CPU. (cpufreq notifiers will fix this
++	 * up if their speed diverges)
++	 */
++	for_each_possible_cpu(cpu)
++		set_cyc2ns_scale(cpu_khz, cpu);
 +
-+	spin_unlock_irqrestore(&i8259A_lock, flags);
-+}
+ 	use_tsc_delay();
+ 
+ 	/* Check and install the TSC clocksource */
+@@ -403,10 +436,5 @@ void __init tsc_init(void)
+ 	return;
+ 
+ out_no_tsc:
+-	/*
+-	 * Set the tsc_disable flag if there's no TSC support, this
+-	 * makes it a fast flag for the kernel to see whether it
+-	 * should be using the TSC.
+-	 */
+-	tsc_disable = 1;
++	setup_clear_cpu_cap(X86_FEATURE_TSC);
+ }
+diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
+index 9c70af4..947554d 100644
+--- a/arch/x86/kernel/tsc_64.c
++++ b/arch/x86/kernel/tsc_64.c
+@@ -10,6 +10,7 @@
+ 
+ #include <asm/hpet.h>
+ #include <asm/timex.h>
++#include <asm/timer.h>
+ 
+ static int notsc __initdata = 0;
+ 
+@@ -18,19 +19,51 @@ EXPORT_SYMBOL(cpu_khz);
+ unsigned int tsc_khz;
+ EXPORT_SYMBOL(tsc_khz);
+ 
+-static unsigned int cyc2ns_scale __read_mostly;
++/* Accelerators for sched_clock()
++ * convert from cycles(64bits) => nanoseconds (64bits)
++ *  basic equation:
++ *		ns = cycles / (freq / ns_per_sec)
++ *		ns = cycles * (ns_per_sec / freq)
++ *		ns = cycles * (10^9 / (cpu_khz * 10^3))
++ *		ns = cycles * (10^6 / cpu_khz)
++ *
++ *	Then we use scaling math (suggested by george at mvista.com) to get:
++ *		ns = cycles * (10^6 * SC / cpu_khz) / SC
++ *		ns = cycles * cyc2ns_scale / SC
++ *
++ *	And since SC is a constant power of two, we can convert the div
++ *  into a shift.
++ *
++ *  We can use khz divisor instead of mhz to keep a better precision, since
++ *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
++ *  (mathieu.desnoyers at polymtl.ca)
++ *
++ *			-johnstul at us.ibm.com "math is hard, lets go shopping!"
++ */
++DEFINE_PER_CPU(unsigned long, cyc2ns);
+ 
+-static inline void set_cyc2ns_scale(unsigned long khz)
++static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+ {
+-	cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
+-}
++	unsigned long flags, prev_scale, *scale;
++	unsigned long long tsc_now, ns_now;
+ 
+-static unsigned long long cycles_2_ns(unsigned long long cyc)
+-{
+-	return (cyc * cyc2ns_scale) >> NS_SCALE;
++	local_irq_save(flags);
++	sched_clock_idle_sleep_event();
 +
++	scale = &per_cpu(cyc2ns, cpu);
 +
++	rdtscll(tsc_now);
++	ns_now = __cycles_2_ns(tsc_now);
 +
++	prev_scale = *scale;
++	if (cpu_khz)
++		*scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
 +
- /*
-  * IRQ2 is cascade interrupt to second interrupt controller
-  */
-@@ -448,7 +456,9 @@ void __init init_ISA_irqs (void)
- 	}
++	sched_clock_idle_wakeup_event(0);
++	local_irq_restore(flags);
  }
  
--void __init init_IRQ(void)
-+void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+-unsigned long long sched_clock(void)
++unsigned long long native_sched_clock(void)
+ {
+ 	unsigned long a = 0;
+ 
+@@ -44,12 +77,27 @@ unsigned long long sched_clock(void)
+ 	return cycles_2_ns(a);
+ }
+ 
++/* We need to define a real function for sched_clock, to override the
++   weak default version */
++#ifdef CONFIG_PARAVIRT
++unsigned long long sched_clock(void)
++{
++	return paravirt_sched_clock();
++}
++#else
++unsigned long long
++sched_clock(void) __attribute__((alias("native_sched_clock")));
++#endif
 +
-+void __init native_init_IRQ(void)
++
+ static int tsc_unstable;
+ 
+-inline int check_tsc_unstable(void)
++int check_tsc_unstable(void)
  {
+ 	return tsc_unstable;
+ }
++EXPORT_SYMBOL_GPL(check_tsc_unstable);
++
+ #ifdef CONFIG_CPU_FREQ
+ 
+ /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
+@@ -100,7 +148,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ 			mark_tsc_unstable("cpufreq changes");
+ 	}
+ 
+-	set_cyc2ns_scale(tsc_khz_ref);
++	preempt_disable();
++	set_cyc2ns_scale(tsc_khz_ref, smp_processor_id());
++	preempt_enable();
+ 
+ 	return 0;
+ }
+@@ -133,12 +183,12 @@ static unsigned long __init tsc_read_refs(unsigned long *pm,
  	int i;
  
-diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
-index 468c9c4..5b3ce79 100644
---- a/arch/x86/kernel/init_task.c
-+++ b/arch/x86/kernel/init_task.c
-@@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES;
- static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
- static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
- struct mm_struct init_mm = INIT_MM(init_mm);
--EXPORT_SYMBOL(init_mm);
+ 	for (i = 0; i < MAX_RETRIES; i++) {
+-		t1 = get_cycles_sync();
++		t1 = get_cycles();
+ 		if (hpet)
+ 			*hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
+ 		else
+ 			*pm = acpi_pm_read_early();
+-		t2 = get_cycles_sync();
++		t2 = get_cycles();
+ 		if ((t2 - t1) < SMI_TRESHOLD)
+ 			return t2;
+ 	}
+@@ -151,7 +201,7 @@ static unsigned long __init tsc_read_refs(unsigned long *pm,
+ void __init tsc_calibrate(void)
+ {
+ 	unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2;
+-	int hpet = is_hpet_enabled();
++	int hpet = is_hpet_enabled(), cpu;
  
- /*
-  * Initial thread structure.
-diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
-index a6b1490..4ca5486 100644
---- a/arch/x86/kernel/io_apic_32.c
-+++ b/arch/x86/kernel/io_apic_32.c
-@@ -35,6 +35,7 @@
- #include <linux/htirq.h>
- #include <linux/freezer.h>
- #include <linux/kthread.h>
-+#include <linux/jiffies.h>	/* time_after() */
+ 	local_irq_save(flags);
  
- #include <asm/io.h>
- #include <asm/smp.h>
-@@ -48,8 +49,6 @@
- #include <mach_apic.h>
- #include <mach_apicdef.h>
+@@ -162,9 +212,9 @@ void __init tsc_calibrate(void)
+ 	outb(0xb0, 0x43);
+ 	outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
+ 	outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
+-	tr1 = get_cycles_sync();
++	tr1 = get_cycles();
+ 	while ((inb(0x61) & 0x20) == 0);
+-	tr2 = get_cycles_sync();
++	tr2 = get_cycles();
  
--#include "io_ports.h"
--
- int (*ioapic_renumber_irq)(int ioapic, int irq);
- atomic_t irq_mis_count;
+ 	tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
  
-@@ -351,7 +350,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
- # include <asm/processor.h>	/* kernel_thread() */
- # include <linux/kernel_stat.h>	/* kstat */
- # include <linux/slab.h>		/* kmalloc() */
--# include <linux/timer.h>	/* time_after() */
-+# include <linux/timer.h>
-  
- #define IRQBALANCE_CHECK_ARCH -999
- #define MAX_BALANCED_IRQ_INTERVAL	(5*HZ)
-@@ -727,7 +726,7 @@ late_initcall(balanced_irq_init);
- #endif /* CONFIG_SMP */
+@@ -206,7 +256,9 @@ void __init tsc_calibrate(void)
+ 	}
  
- #ifndef CONFIG_SMP
--void fastcall send_IPI_self(int vector)
-+void send_IPI_self(int vector)
- {
- 	unsigned int cfg;
+ 	tsc_khz = tsc2 / tsc1;
+-	set_cyc2ns_scale(tsc_khz);
++
++	for_each_possible_cpu(cpu)
++		set_cyc2ns_scale(tsc_khz, cpu);
+ }
  
-@@ -1900,7 +1899,7 @@ static int __init timer_irq_works(void)
- 	 * might have cached one ExtINT interrupt.  Finally, at
- 	 * least one tick may be lost due to delays.
- 	 */
--	if (jiffies - t1 > 4)
-+	if (time_after(jiffies, t1 + 4))
+ /*
+@@ -222,17 +274,9 @@ __cpuinit int unsynchronized_tsc(void)
+ 	if (apic_is_clustered_box())
  		return 1;
+ #endif
+-	/* Most intel systems have synchronized TSCs except for
+-	   multi node systems */
+-	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+-#ifdef CONFIG_ACPI
+-		/* But TSC doesn't tick in C3 so don't use it there */
+-		if (acpi_gbl_FADT.header.length > 0 &&
+-		    acpi_gbl_FADT.C3latency < 1000)
+-			return 1;
+-#endif
++
++	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+ 		return 0;
+-	}
  
- 	return 0;
-@@ -2080,7 +2079,7 @@ static struct irq_chip lapic_chip __read_mostly = {
- 	.eoi		= ack_apic,
- };
+ 	/* Assume multi socket systems are not synchronized */
+ 	return num_present_cpus() > 1;
+@@ -250,13 +294,13 @@ __setup("notsc", notsc_setup);
+ /* clock source code: */
+ static cycle_t read_tsc(void)
+ {
+-	cycle_t ret = (cycle_t)get_cycles_sync();
++	cycle_t ret = (cycle_t)get_cycles();
+ 	return ret;
+ }
  
--static void setup_nmi (void)
-+static void __init setup_nmi(void)
+ static cycle_t __vsyscall_fn vread_tsc(void)
  {
- 	/*
-  	 * Dirty trick to enable the NMI watchdog ...
-@@ -2093,7 +2092,7 @@ static void setup_nmi (void)
- 	 */ 
- 	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
+-	cycle_t ret = (cycle_t)get_cycles_sync();
++	cycle_t ret = (cycle_t)vget_cycles();
+ 	return ret;
+ }
  
--	on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
-+	enable_NMI_through_LVT0();
+diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
+index 9125efe..0577825 100644
+--- a/arch/x86/kernel/tsc_sync.c
++++ b/arch/x86/kernel/tsc_sync.c
+@@ -46,7 +46,7 @@ static __cpuinit void check_tsc_warp(void)
+ 	cycles_t start, now, prev, end;
+ 	int i;
  
- 	apic_printk(APIC_VERBOSE, " done.\n");
- }
-@@ -2401,7 +2400,7 @@ static int ioapic_resume(struct sys_device *dev)
+-	start = get_cycles_sync();
++	start = get_cycles();
+ 	/*
+ 	 * The measurement runs for 20 msecs:
+ 	 */
+@@ -61,18 +61,18 @@ static __cpuinit void check_tsc_warp(void)
+ 		 */
+ 		__raw_spin_lock(&sync_lock);
+ 		prev = last_tsc;
+-		now = get_cycles_sync();
++		now = get_cycles();
+ 		last_tsc = now;
+ 		__raw_spin_unlock(&sync_lock);
+ 
+ 		/*
+ 		 * Be nice every now and then (and also check whether
+-		 * measurement is done [we also insert a 100 million
++		 * measurement is done [we also insert a 10 million
+ 		 * loops safety exit, so we dont lock up in case the
+ 		 * TSC readout is totally broken]):
+ 		 */
+ 		if (unlikely(!(i & 7))) {
+-			if (now > end || i > 100000000)
++			if (now > end || i > 10000000)
+ 				break;
+ 			cpu_relax();
+ 			touch_nmi_watchdog();
+@@ -87,7 +87,11 @@ static __cpuinit void check_tsc_warp(void)
+ 			nr_warps++;
+ 			__raw_spin_unlock(&sync_lock);
+ 		}
+-
++	}
++	if (!(now-start)) {
++		printk("Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
++			now-start, end-start);
++		WARN_ON(1);
+ 	}
  }
  
- static struct sysdev_class ioapic_sysdev_class = {
--	set_kset_name("ioapic"),
-+	.name = "ioapic",
- 	.suspend = ioapic_suspend,
- 	.resume = ioapic_resume,
- };
-diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
-index cbac167..1627c0d 100644
---- a/arch/x86/kernel/io_apic_64.c
-+++ b/arch/x86/kernel/io_apic_64.c
-@@ -32,9 +32,11 @@
- #include <linux/msi.h>
- #include <linux/htirq.h>
- #include <linux/dmar.h>
-+#include <linux/jiffies.h>
- #ifdef CONFIG_ACPI
- #include <acpi/acpi_bus.h>
- #endif
-+#include <linux/bootmem.h>
+@@ -129,24 +133,24 @@ void __cpuinit check_tsc_sync_source(int cpu)
+ 	while (atomic_read(&stop_count) != cpus-1)
+ 		cpu_relax();
  
- #include <asm/idle.h>
- #include <asm/io.h>
-@@ -1069,7 +1071,7 @@ void __apicdebuginit print_local_APIC(void * dummy)
- 	v = apic_read(APIC_LVR);
- 	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
- 	ver = GET_APIC_VERSION(v);
--	maxlvt = get_maxlvt();
-+	maxlvt = lapic_get_maxlvt();
+-	/*
+-	 * Reset it - just in case we boot another CPU later:
+-	 */
+-	atomic_set(&start_count, 0);
+-
+ 	if (nr_warps) {
+ 		printk("\n");
+ 		printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs,"
+ 				    " turning off TSC clock.\n", max_warp);
+ 		mark_tsc_unstable("check_tsc_sync_source failed");
+-		nr_warps = 0;
+-		max_warp = 0;
+-		last_tsc = 0;
+ 	} else {
+ 		printk(" passed.\n");
+ 	}
  
- 	v = apic_read(APIC_TASKPRI);
- 	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-@@ -1171,7 +1173,7 @@ void __apicdebuginit print_PIC(void)
+ 	/*
++	 * Reset it - just in case we boot another CPU later:
++	 */
++	atomic_set(&start_count, 0);
++	nr_warps = 0;
++	max_warp = 0;
++	last_tsc = 0;
++
++	/*
+ 	 * Let the target continue with the bootup:
+ 	 */
+ 	atomic_inc(&stop_count);
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index 157e4be..738c210 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -70,10 +70,10 @@
+ /*
+  * 8- and 16-bit register defines..
+  */
+-#define AL(regs)	(((unsigned char *)&((regs)->pt.eax))[0])
+-#define AH(regs)	(((unsigned char *)&((regs)->pt.eax))[1])
+-#define IP(regs)	(*(unsigned short *)&((regs)->pt.eip))
+-#define SP(regs)	(*(unsigned short *)&((regs)->pt.esp))
++#define AL(regs)	(((unsigned char *)&((regs)->pt.ax))[0])
++#define AH(regs)	(((unsigned char *)&((regs)->pt.ax))[1])
++#define IP(regs)	(*(unsigned short *)&((regs)->pt.ip))
++#define SP(regs)	(*(unsigned short *)&((regs)->pt.sp))
  
- #endif  /*  0  */
+ /*
+  * virtual flags (16 and 32-bit versions)
+@@ -93,12 +93,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
+ {
+ 	int ret = 0;
  
--static void __init enable_IO_APIC(void)
-+void __init enable_IO_APIC(void)
+-	/* kernel_vm86_regs is missing xgs, so copy everything up to
++	/* kernel_vm86_regs is missing gs, so copy everything up to
+ 	   (but not including) orig_eax, and then rest including orig_eax. */
+-	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax));
+-	ret += copy_to_user(&user->orig_eax, &regs->pt.orig_eax,
++	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
++	ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
+ 			    sizeof(struct kernel_vm86_regs) -
+-			    offsetof(struct kernel_vm86_regs, pt.orig_eax));
++			    offsetof(struct kernel_vm86_regs, pt.orig_ax));
+ 
+ 	return ret;
+ }
+@@ -110,18 +110,17 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
  {
- 	union IO_APIC_reg_01 reg_01;
- 	int i8259_apic, i8259_pin;
-@@ -1298,7 +1300,7 @@ static int __init timer_irq_works(void)
- 	 */
+ 	int ret = 0;
  
- 	/* jiffies wrap? */
--	if (jiffies - t1 > 4)
-+	if (time_after(jiffies, t1 + 4))
- 		return 1;
- 	return 0;
+-	/* copy eax-xfs inclusive */
+-	ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax));
+-	/* copy orig_eax-__gsh+extra */
+-	ret += copy_from_user(&regs->pt.orig_eax, &user->orig_eax,
++	/* copy ax-fs inclusive */
++	ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
++	/* copy orig_ax-__gsh+extra */
++	ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
+ 			      sizeof(struct kernel_vm86_regs) -
+-			      offsetof(struct kernel_vm86_regs, pt.orig_eax) +
++			      offsetof(struct kernel_vm86_regs, pt.orig_ax) +
+ 			      extra);
+ 	return ret;
  }
-@@ -1411,7 +1413,7 @@ static void irq_complete_move(unsigned int irq)
- 	if (likely(!cfg->move_in_progress))
- 		return;
  
--	vector = ~get_irq_regs()->orig_rax;
-+	vector = ~get_irq_regs()->orig_ax;
- 	me = smp_processor_id();
- 	if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
- 		cpumask_t cleanup_mask;
-@@ -1438,7 +1440,7 @@ static void ack_apic_level(unsigned int irq)
- 	int do_unmask_irq = 0;
+-struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
+-struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
++struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
+ {
+ 	struct tss_struct *tss;
+ 	struct pt_regs *ret;
+@@ -138,7 +137,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+ 		printk("no vm86_info: BAD\n");
+ 		do_exit(SIGSEGV);
+ 	}
+-	set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
++	set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
+ 	tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
+ 	tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
+ 	if (tmp) {
+@@ -147,15 +146,15 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+ 	}
  
- 	irq_complete_move(irq);
--#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
-+#ifdef CONFIG_GENERIC_PENDING_IRQ
- 	/* If we are moving the irq we need to mask it */
- 	if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
- 		do_unmask_irq = 1;
-@@ -1565,7 +1567,7 @@ static struct hw_interrupt_type lapic_irq_type __read_mostly = {
- 	.end = end_lapic_irq,
- };
+ 	tss = &per_cpu(init_tss, get_cpu());
+-	current->thread.esp0 = current->thread.saved_esp0;
++	current->thread.sp0 = current->thread.saved_sp0;
+ 	current->thread.sysenter_cs = __KERNEL_CS;
+-	load_esp0(tss, &current->thread);
+-	current->thread.saved_esp0 = 0;
++	load_sp0(tss, &current->thread);
++	current->thread.saved_sp0 = 0;
+ 	put_cpu();
  
--static void setup_nmi (void)
-+static void __init setup_nmi(void)
+ 	ret = KVM86->regs32;
+ 
+-	ret->xfs = current->thread.saved_fs;
++	ret->fs = current->thread.saved_fs;
+ 	loadsegment(gs, current->thread.saved_gs);
+ 
+ 	return ret;
+@@ -197,7 +196,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
+ 
+ asmlinkage int sys_vm86old(struct pt_regs regs)
  {
- 	/*
-  	 * Dirty trick to enable the NMI watchdog ...
-@@ -1578,7 +1580,7 @@ static void setup_nmi (void)
- 	 */ 
- 	printk(KERN_INFO "activating NMI Watchdog ...");
+-	struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx;
++	struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
+ 	struct kernel_vm86_struct info; /* declare this _on top_,
+ 					 * this avoids wasting of stack space.
+ 					 * This remains on the stack until we
+@@ -207,7 +206,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
+ 	int tmp, ret = -EPERM;
  
--	enable_NMI_through_LVT0(NULL);
-+	enable_NMI_through_LVT0();
+ 	tsk = current;
+-	if (tsk->thread.saved_esp0)
++	if (tsk->thread.saved_sp0)
+ 		goto out;
+ 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+ 				       offsetof(struct kernel_vm86_struct, vm86plus) -
+@@ -237,12 +236,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
+ 	struct vm86plus_struct __user *v86;
  
- 	printk(" done.\n");
+ 	tsk = current;
+-	switch (regs.ebx) {
++	switch (regs.bx) {
+ 		case VM86_REQUEST_IRQ:
+ 		case VM86_FREE_IRQ:
+ 		case VM86_GET_IRQ_BITS:
+ 		case VM86_GET_AND_RESET_IRQ:
+-			ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx);
++			ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
+ 			goto out;
+ 		case VM86_PLUS_INSTALL_CHECK:
+ 			/* NOTE: on old vm86 stuff this will return the error
+@@ -256,9 +255,9 @@ asmlinkage int sys_vm86(struct pt_regs regs)
+ 
+ 	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
+ 	ret = -EPERM;
+-	if (tsk->thread.saved_esp0)
++	if (tsk->thread.saved_sp0)
+ 		goto out;
+-	v86 = (struct vm86plus_struct __user *)regs.ecx;
++	v86 = (struct vm86plus_struct __user *)regs.cx;
+ 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+ 				       offsetof(struct kernel_vm86_struct, regs32) -
+ 				       sizeof(info.regs));
+@@ -281,23 +280,23 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
+ /*
+  * make sure the vm86() system call doesn't try to do anything silly
+  */
+-	info->regs.pt.xds = 0;
+-	info->regs.pt.xes = 0;
+-	info->regs.pt.xfs = 0;
++	info->regs.pt.ds = 0;
++	info->regs.pt.es = 0;
++	info->regs.pt.fs = 0;
+ 
+ /* we are clearing gs later just before "jmp resume_userspace",
+  * because it is not saved/restored.
+  */
+ 
+ /*
+- * The eflags register is also special: we cannot trust that the user
++ * The flags register is also special: we cannot trust that the user
+  * has set it up safely, so this makes sure interrupt etc flags are
+  * inherited from protected mode.
+  */
+- 	VEFLAGS = info->regs.pt.eflags;
+-	info->regs.pt.eflags &= SAFE_MASK;
+-	info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK;
+-	info->regs.pt.eflags |= VM_MASK;
++	VEFLAGS = info->regs.pt.flags;
++	info->regs.pt.flags &= SAFE_MASK;
++	info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
++	info->regs.pt.flags |= VM_MASK;
+ 
+ 	switch (info->cpu_type) {
+ 		case CPU_286:
+@@ -315,18 +314,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
+ 	}
+ 
+ /*
+- * Save old state, set default return value (%eax) to 0
++ * Save old state, set default return value (%ax) to 0
+  */
+-	info->regs32->eax = 0;
+-	tsk->thread.saved_esp0 = tsk->thread.esp0;
+-	tsk->thread.saved_fs = info->regs32->xfs;
++	info->regs32->ax = 0;
++	tsk->thread.saved_sp0 = tsk->thread.sp0;
++	tsk->thread.saved_fs = info->regs32->fs;
+ 	savesegment(gs, tsk->thread.saved_gs);
+ 
+ 	tss = &per_cpu(init_tss, get_cpu());
+-	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
++	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ 	if (cpu_has_sep)
+ 		tsk->thread.sysenter_cs = 0;
+-	load_esp0(tss, &tsk->thread);
++	load_sp0(tss, &tsk->thread);
+ 	put_cpu();
+ 
+ 	tsk->thread.screen_bitmap = info->screen_bitmap;
+@@ -352,7 +351,7 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
+ 	struct pt_regs * regs32;
+ 
+ 	regs32 = save_v86_state(regs16);
+-	regs32->eax = retval;
++	regs32->ax = retval;
+ 	__asm__ __volatile__("movl %0,%%esp\n\t"
+ 		"movl %1,%%ebp\n\t"
+ 		"jmp resume_userspace"
+@@ -373,30 +372,30 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
+ 
+ static inline void clear_TF(struct kernel_vm86_regs * regs)
+ {
+-	regs->pt.eflags &= ~TF_MASK;
++	regs->pt.flags &= ~TF_MASK;
  }
-@@ -1654,7 +1656,7 @@ static inline void unlock_ExtINT_logic(void)
-  *
-  * FIXME: really need to revamp this for modern platforms only.
+ 
+ static inline void clear_AC(struct kernel_vm86_regs * regs)
+ {
+-	regs->pt.eflags &= ~AC_MASK;
++	regs->pt.flags &= ~AC_MASK;
+ }
+ 
+ /* It is correct to call set_IF(regs) from the set_vflags_*
+  * functions. However someone forgot to call clear_IF(regs)
+  * in the opposite case.
+  * After the command sequence CLI PUSHF STI POPF you should
+- * end up with interrups disabled, but you ended up with
++ * end up with interrupts disabled, but you ended up with
+  * interrupts enabled.
+  *  ( I was testing my own changes, but the only bug I
+  *    could find was in a function I had not changed. )
+  * [KD]
   */
--static inline void check_timer(void)
-+static inline void __init check_timer(void)
+ 
+-static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
++static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs)
  {
- 	struct irq_cfg *cfg = irq_cfg + 0;
- 	int apic1, pin1, apic2, pin2;
-@@ -1788,7 +1790,10 @@ __setup("no_timer_check", notimercheck);
+-	set_flags(VEFLAGS, eflags, current->thread.v86mask);
+-	set_flags(regs->pt.eflags, eflags, SAFE_MASK);
+-	if (eflags & IF_MASK)
++	set_flags(VEFLAGS, flags, current->thread.v86mask);
++	set_flags(regs->pt.flags, flags, SAFE_MASK);
++	if (flags & IF_MASK)
+ 		set_IF(regs);
+ 	else
+ 		clear_IF(regs);
+@@ -405,7 +404,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
+ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
+ {
+ 	set_flags(VFLAGS, flags, current->thread.v86mask);
+-	set_flags(regs->pt.eflags, flags, SAFE_MASK);
++	set_flags(regs->pt.flags, flags, SAFE_MASK);
+ 	if (flags & IF_MASK)
+ 		set_IF(regs);
+ 	else
+@@ -414,7 +413,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
  
- void __init setup_IO_APIC(void)
+ static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
  {
--	enable_IO_APIC();
-+
-+	/*
-+	 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
-+	 */
+-	unsigned long flags = regs->pt.eflags & RETURN_MASK;
++	unsigned long flags = regs->pt.flags & RETURN_MASK;
  
- 	if (acpi_ioapic)
- 		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
-@@ -1850,7 +1855,7 @@ static int ioapic_resume(struct sys_device *dev)
+ 	if (VEFLAGS & VIF_MASK)
+ 		flags |= IF_MASK;
+@@ -518,7 +517,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
+ 	unsigned long __user *intr_ptr;
+ 	unsigned long segoffs;
+ 
+-	if (regs->pt.xcs == BIOSSEG)
++	if (regs->pt.cs == BIOSSEG)
+ 		goto cannot_handle;
+ 	if (is_revectored(i, &KVM86->int_revectored))
+ 		goto cannot_handle;
+@@ -530,9 +529,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
+ 	if ((segoffs >> 16) == BIOSSEG)
+ 		goto cannot_handle;
+ 	pushw(ssp, sp, get_vflags(regs), cannot_handle);
+-	pushw(ssp, sp, regs->pt.xcs, cannot_handle);
++	pushw(ssp, sp, regs->pt.cs, cannot_handle);
+ 	pushw(ssp, sp, IP(regs), cannot_handle);
+-	regs->pt.xcs = segoffs >> 16;
++	regs->pt.cs = segoffs >> 16;
+ 	SP(regs) -= 6;
+ 	IP(regs) = segoffs & 0xffff;
+ 	clear_TF(regs);
+@@ -549,7 +548,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
+ 	if (VMPI.is_vm86pus) {
+ 		if ( (trapno==3) || (trapno==1) )
+ 			return_to_32bit(regs, VM86_TRAP + (trapno << 8));
+-		do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs));
++		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
+ 		return 0;
+ 	}
+ 	if (trapno !=1)
+@@ -585,10 +584,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
+ 		handle_vm86_trap(regs, 0, 1); \
+ 	return; } while (0)
+ 
+-	orig_flags = *(unsigned short *)&regs->pt.eflags;
++	orig_flags = *(unsigned short *)&regs->pt.flags;
+ 
+-	csp = (unsigned char __user *) (regs->pt.xcs << 4);
+-	ssp = (unsigned char __user *) (regs->pt.xss << 4);
++	csp = (unsigned char __user *) (regs->pt.cs << 4);
++	ssp = (unsigned char __user *) (regs->pt.ss << 4);
+ 	sp = SP(regs);
+ 	ip = IP(regs);
+ 
+@@ -675,7 +674,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
+ 			SP(regs) += 6;
+ 		}
+ 		IP(regs) = newip;
+-		regs->pt.xcs = newcs;
++		regs->pt.cs = newcs;
+ 		CHECK_IF_IN_TRAP;
+ 		if (data32) {
+ 			set_vflags_long(newflags, regs);
+diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
+index f02bad6..4525bc2 100644
+--- a/arch/x86/kernel/vmi_32.c
++++ b/arch/x86/kernel/vmi_32.c
+@@ -62,7 +62,10 @@ static struct {
+ 	void (*cpuid)(void /* non-c */);
+ 	void (*_set_ldt)(u32 selector);
+ 	void (*set_tr)(u32 selector);
+-	void (*set_kernel_stack)(u32 selector, u32 esp0);
++	void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
++	void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
++	void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
++	void (*set_kernel_stack)(u32 selector, u32 sp0);
+ 	void (*allocate_page)(u32, u32, u32, u32, u32);
+ 	void (*release_page)(u32, u32);
+ 	void (*set_pte)(pte_t, pte_t *, unsigned);
+@@ -88,13 +91,13 @@ struct vmi_timer_ops vmi_timer_ops;
+ #define IRQ_PATCH_DISABLE  5
+ 
+ static inline void patch_offset(void *insnbuf,
+-				unsigned long eip, unsigned long dest)
++				unsigned long ip, unsigned long dest)
+ {
+-        *(unsigned long *)(insnbuf+1) = dest-eip-5;
++        *(unsigned long *)(insnbuf+1) = dest-ip-5;
  }
  
- static struct sysdev_class ioapic_sysdev_class = {
--	set_kset_name("ioapic"),
-+	.name = "ioapic",
- 	.suspend = ioapic_suspend,
- 	.resume = ioapic_resume,
- };
-@@ -2288,3 +2293,92 @@ void __init setup_ioapic_dest(void)
+ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
+-			       unsigned long eip)
++			       unsigned long ip)
+ {
+ 	u64 reloc;
+ 	struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
+@@ -103,13 +106,13 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
+ 		case VMI_RELOCATION_CALL_REL:
+ 			BUG_ON(len < 5);
+ 			*(char *)insnbuf = MNEM_CALL;
+-			patch_offset(insnbuf, eip, (unsigned long)rel->eip);
++			patch_offset(insnbuf, ip, (unsigned long)rel->eip);
+ 			return 5;
+ 
+ 		case VMI_RELOCATION_JUMP_REL:
+ 			BUG_ON(len < 5);
+ 			*(char *)insnbuf = MNEM_JMP;
+-			patch_offset(insnbuf, eip, (unsigned long)rel->eip);
++			patch_offset(insnbuf, ip, (unsigned long)rel->eip);
+ 			return 5;
+ 
+ 		case VMI_RELOCATION_NOP:
+@@ -131,25 +134,25 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
+  * sequence.  The callee does nop padding for us.
+  */
+ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
+-			  unsigned long eip, unsigned len)
++			  unsigned long ip, unsigned len)
+ {
+ 	switch (type) {
+ 		case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
+ 			return patch_internal(VMI_CALL_DisableInterrupts, len,
+-					      insns, eip);
++					      insns, ip);
+ 		case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
+ 			return patch_internal(VMI_CALL_EnableInterrupts, len,
+-					      insns, eip);
++					      insns, ip);
+ 		case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
+ 			return patch_internal(VMI_CALL_SetInterruptMask, len,
+-					      insns, eip);
++					      insns, ip);
+ 		case PARAVIRT_PATCH(pv_irq_ops.save_fl):
+ 			return patch_internal(VMI_CALL_GetInterruptMask, len,
+-					      insns, eip);
++					      insns, ip);
+ 		case PARAVIRT_PATCH(pv_cpu_ops.iret):
+-			return patch_internal(VMI_CALL_IRET, len, insns, eip);
+-		case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
+-			return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip);
++			return patch_internal(VMI_CALL_IRET, len, insns, ip);
++		case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret):
++			return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
+ 		default:
+ 			break;
+ 	}
+@@ -157,36 +160,36 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
  }
- #endif
  
-+#define IOAPIC_RESOURCE_NAME_SIZE 11
-+
-+static struct resource *ioapic_resources;
-+
-+static struct resource * __init ioapic_setup_resources(void)
-+{
-+	unsigned long n;
-+	struct resource *res;
-+	char *mem;
-+	int i;
-+
-+	if (nr_ioapics <= 0)
-+		return NULL;
-+
-+	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
-+	n *= nr_ioapics;
-+
-+	mem = alloc_bootmem(n);
-+	res = (void *)mem;
-+
-+	if (mem != NULL) {
-+		memset(mem, 0, n);
-+		mem += sizeof(struct resource) * nr_ioapics;
-+
-+		for (i = 0; i < nr_ioapics; i++) {
-+			res[i].name = mem;
-+			res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+			sprintf(mem,  "IOAPIC %u", i);
-+			mem += IOAPIC_RESOURCE_NAME_SIZE;
-+		}
-+	}
-+
-+	ioapic_resources = res;
-+
-+	return res;
-+}
-+
-+void __init ioapic_init_mappings(void)
-+{
-+	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
-+	struct resource *ioapic_res;
-+	int i;
-+
-+	ioapic_res = ioapic_setup_resources();
-+	for (i = 0; i < nr_ioapics; i++) {
-+		if (smp_found_config) {
-+			ioapic_phys = mp_ioapics[i].mpc_apicaddr;
-+		} else {
-+			ioapic_phys = (unsigned long)
-+				alloc_bootmem_pages(PAGE_SIZE);
-+			ioapic_phys = __pa(ioapic_phys);
-+		}
-+		set_fixmap_nocache(idx, ioapic_phys);
-+		apic_printk(APIC_VERBOSE,
-+			    "mapped IOAPIC to %016lx (%016lx)\n",
-+			    __fix_to_virt(idx), ioapic_phys);
-+		idx++;
-+
-+		if (ioapic_res != NULL) {
-+			ioapic_res->start = ioapic_phys;
-+			ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
-+			ioapic_res++;
-+		}
-+	}
-+}
-+
-+static int __init ioapic_insert_resources(void)
-+{
-+	int i;
-+	struct resource *r = ioapic_resources;
-+
-+	if (!r) {
-+		printk(KERN_ERR
-+		       "IO APIC resources could be not be allocated.\n");
-+		return -1;
-+	}
-+
-+	for (i = 0; i < nr_ioapics; i++) {
-+		insert_resource(&iomem_resource, r);
-+		r++;
-+	}
-+
-+	return 0;
-+}
-+
-+/* Insert the IO APIC resources after PCI initialization has occured to handle
-+ * IO APICS that are mapped in on a BAR in PCI space. */
-+late_initcall(ioapic_insert_resources);
-+
-diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
-new file mode 100644
-index 0000000..bd49321
---- /dev/null
-+++ b/arch/x86/kernel/io_delay.c
-@@ -0,0 +1,114 @@
-+/*
-+ * I/O delay strategies for inb_p/outb_p
-+ *
-+ * Allow for a DMI based override of port 0x80, needed for certain HP laptops
-+ * and possibly other systems. Also allow for the gradual elimination of
-+ * outb_p/inb_p API uses.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/dmi.h>
-+#include <asm/io.h>
-+
-+int io_delay_type __read_mostly = CONFIG_DEFAULT_IO_DELAY_TYPE;
-+EXPORT_SYMBOL_GPL(io_delay_type);
-+
-+static int __initdata io_delay_override;
-+
-+/*
-+ * Paravirt wants native_io_delay to be a constant.
-+ */
-+void native_io_delay(void)
-+{
-+	switch (io_delay_type) {
-+	default:
-+	case CONFIG_IO_DELAY_TYPE_0X80:
-+		asm volatile ("outb %al, $0x80");
-+		break;
-+	case CONFIG_IO_DELAY_TYPE_0XED:
-+		asm volatile ("outb %al, $0xed");
-+		break;
-+	case CONFIG_IO_DELAY_TYPE_UDELAY:
-+		/*
-+		 * 2 usecs is an upper-bound for the outb delay but
-+		 * note that udelay doesn't have the bus-level
-+		 * side-effects that outb does, nor does udelay() have
-+		 * precise timings during very early bootup (the delays
-+		 * are shorter until calibrated):
-+		 */
-+		udelay(2);
-+	case CONFIG_IO_DELAY_TYPE_NONE:
-+		break;
-+	}
-+}
-+EXPORT_SYMBOL(native_io_delay);
-+
-+static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
-+{
-+	if (io_delay_type == CONFIG_IO_DELAY_TYPE_0X80) {
-+		printk(KERN_NOTICE "%s: using 0xed I/O delay port\n",
-+			id->ident);
-+		io_delay_type = CONFIG_IO_DELAY_TYPE_0XED;
-+	}
-+
-+	return 0;
-+}
-+
-+/*
-+ * Quirk table for systems that misbehave (lock up, etc.) if port
-+ * 0x80 is used:
-+ */
-+static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
-+	{
-+		.callback	= dmi_io_delay_0xed_port,
-+		.ident		= "Compaq Presario V6000",
-+		.matches	= {
-+			DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
-+			DMI_MATCH(DMI_BOARD_NAME, "30B7")
-+		}
-+	},
-+	{
-+		.callback	= dmi_io_delay_0xed_port,
-+		.ident		= "HP Pavilion dv9000z",
-+		.matches	= {
-+			DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
-+			DMI_MATCH(DMI_BOARD_NAME, "30B9")
-+		}
-+	},
-+	{
-+		.callback	= dmi_io_delay_0xed_port,
-+		.ident		= "HP Pavilion tx1000",
-+		.matches	= {
-+			DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
-+			DMI_MATCH(DMI_BOARD_NAME, "30BF")
-+		}
-+	},
-+	{ }
-+};
-+
-+void __init io_delay_init(void)
-+{
-+	if (!io_delay_override)
-+		dmi_check_system(io_delay_0xed_port_dmi_table);
-+}
-+
-+static int __init io_delay_param(char *s)
-+{
-+	if (!strcmp(s, "0x80"))
-+		io_delay_type = CONFIG_IO_DELAY_TYPE_0X80;
-+	else if (!strcmp(s, "0xed"))
-+		io_delay_type = CONFIG_IO_DELAY_TYPE_0XED;
-+	else if (!strcmp(s, "udelay"))
-+		io_delay_type = CONFIG_IO_DELAY_TYPE_UDELAY;
-+	else if (!strcmp(s, "none"))
-+		io_delay_type = CONFIG_IO_DELAY_TYPE_NONE;
-+	else
-+		return -EINVAL;
-+
-+	io_delay_override = 1;
-+	return 0;
-+}
-+
-+early_param("io_delay", io_delay_param);
-diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
-new file mode 100644
-index 0000000..50e5e4a
---- /dev/null
-+++ b/arch/x86/kernel/ioport.c
-@@ -0,0 +1,154 @@
-+/*
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus. 32/64 bits code unification by Miguel Botón.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/smp.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <linux/syscalls.h>
-+
-+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-+static void set_bitmap(unsigned long *bitmap, unsigned int base,
-+		       unsigned int extent, int new_value)
-+{
-+	unsigned int i;
-+
-+	for (i = base; i < base + extent; i++) {
-+		if (new_value)
-+			__set_bit(i, bitmap);
-+		else
-+			__clear_bit(i, bitmap);
-+	}
-+}
-+
-+/*
-+ * this changes the io permissions bitmap in the current task.
-+ */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-+{
-+	struct thread_struct * t = &current->thread;
-+	struct tss_struct * tss;
-+	unsigned int i, max_long, bytes, bytes_updated;
-+
-+	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
-+		return -EINVAL;
-+	if (turn_on && !capable(CAP_SYS_RAWIO))
-+		return -EPERM;
-+
-+	/*
-+	 * If it's the first ioperm() call in this thread's lifetime, set the
-+	 * IO bitmap up. ioperm() is much less timing critical than clone(),
-+	 * this is why we delay this operation until now:
-+	 */
-+	if (!t->io_bitmap_ptr) {
-+		unsigned long *bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+
-+		if (!bitmap)
-+			return -ENOMEM;
-+
-+		memset(bitmap, 0xff, IO_BITMAP_BYTES);
-+		t->io_bitmap_ptr = bitmap;
-+		set_thread_flag(TIF_IO_BITMAP);
-+	}
-+
-+	/*
-+	 * do it in the per-thread copy and in the TSS ...
-+	 *
-+	 * Disable preemption via get_cpu() - we must not switch away
-+	 * because the ->io_bitmap_max value must match the bitmap
-+	 * contents:
-+	 */
-+	tss = &per_cpu(init_tss, get_cpu());
-+
-+	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
-+
-+	/*
-+	 * Search for a (possibly new) maximum. This is simple and stupid,
-+	 * to keep it obviously correct:
-+	 */
-+	max_long = 0;
-+	for (i = 0; i < IO_BITMAP_LONGS; i++)
-+		if (t->io_bitmap_ptr[i] != ~0UL)
-+			max_long = i;
-+
-+	bytes = (max_long + 1) * sizeof(unsigned long);
-+	bytes_updated = max(bytes, t->io_bitmap_max);
-+
-+	t->io_bitmap_max = bytes;
-+
-+#ifdef CONFIG_X86_32
-+	/*
-+	 * Sets the lazy trigger so that the next I/O operation will
-+	 * reload the correct bitmap.
-+	 * Reset the owner so that a process switch will not set
-+	 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
-+	 */
-+	tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
-+	tss->io_bitmap_owner = NULL;
-+#else
-+	/* Update the TSS: */
-+	memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
-+#endif
-+
-+	put_cpu();
-+
-+	return 0;
+ /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
+-static void vmi_cpuid(unsigned int *eax, unsigned int *ebx,
+-                               unsigned int *ecx, unsigned int *edx)
++static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
++                               unsigned int *cx, unsigned int *dx)
+ {
+ 	int override = 0;
+-	if (*eax == 1)
++	if (*ax == 1)
+ 		override = 1;
+         asm volatile ("call *%6"
+-                      : "=a" (*eax),
+-                        "=b" (*ebx),
+-                        "=c" (*ecx),
+-                        "=d" (*edx)
+-                      : "0" (*eax), "2" (*ecx), "r" (vmi_ops.cpuid));
++                      : "=a" (*ax),
++                        "=b" (*bx),
++                        "=c" (*cx),
++                        "=d" (*dx)
++                      : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
+ 	if (override) {
+ 		if (disable_pse)
+-			*edx &= ~X86_FEATURE_PSE;
++			*dx &= ~X86_FEATURE_PSE;
+ 		if (disable_pge)
+-			*edx &= ~X86_FEATURE_PGE;
++			*dx &= ~X86_FEATURE_PGE;
+ 		if (disable_sep)
+-			*edx &= ~X86_FEATURE_SEP;
++			*dx &= ~X86_FEATURE_SEP;
+ 		if (disable_tsc)
+-			*edx &= ~X86_FEATURE_TSC;
++			*dx &= ~X86_FEATURE_TSC;
+ 		if (disable_mtrr)
+-			*edx &= ~X86_FEATURE_MTRR;
++			*dx &= ~X86_FEATURE_MTRR;
+ 	}
+ }
+ 
+ static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
+ {
+ 	if (gdt[nr].a != new->a || gdt[nr].b != new->b)
+-		write_gdt_entry(gdt, nr, new->a, new->b);
++		write_gdt_entry(gdt, nr, new, 0);
+ }
+ 
+ static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
+@@ -200,12 +203,12 @@ static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
+ static void vmi_set_ldt(const void *addr, unsigned entries)
+ {
+ 	unsigned cpu = smp_processor_id();
+-	u32 low, high;
++	struct desc_struct desc;
+ 
+-	pack_descriptor(&low, &high, (unsigned long)addr,
++	pack_descriptor(&desc, (unsigned long)addr,
+ 			entries * sizeof(struct desc_struct) - 1,
+-			DESCTYPE_LDT, 0);
+-	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, low, high);
++			DESC_LDT, 0);
++	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
+ 	vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
+ }
+ 
+@@ -214,17 +217,37 @@ static void vmi_set_tr(void)
+ 	vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
+ }
+ 
+-static void vmi_load_esp0(struct tss_struct *tss,
++static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
++{
++	u32 *idt_entry = (u32 *)g;
++	vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[2]);
 +}
 +
-+/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ * Here we just change the flags value on the stack: we allow
-+ * only the super-user to do it. This depends on the stack-layout
-+ * on system-call entry - see also fork() and the signal handling
-+ * code.
-+ */
-+static int do_iopl(unsigned int level, struct pt_regs *regs)
++static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
++				const void *desc, int type)
 +{
-+	unsigned int old = (regs->flags >> 12) & 3;
-+
-+	if (level > 3)
-+		return -EINVAL;
-+	/* Trying to gain more privileges? */
-+	if (level > old) {
-+		if (!capable(CAP_SYS_RAWIO))
-+			return -EPERM;
-+	}
-+	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
-+
-+	return 0;
++	u32 *gdt_entry = (u32 *)desc;
++	vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[2]);
 +}
 +
-+#ifdef CONFIG_X86_32
-+asmlinkage long sys_iopl(unsigned long regsp)
++static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
++				const void *desc)
 +{
-+	struct pt_regs *regs = (struct pt_regs *)&regsp;
-+	unsigned int level = regs->bx;
-+	struct thread_struct *t = &current->thread;
-+	int rc;
++	u32 *ldt_entry = (u32 *)desc;
++	vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[2]);
++}
 +
-+	rc = do_iopl(level, regs);
-+	if (rc < 0)
-+		goto out;
++static void vmi_load_sp0(struct tss_struct *tss,
+ 				   struct thread_struct *thread)
+ {
+-	tss->x86_tss.esp0 = thread->esp0;
++	tss->x86_tss.sp0 = thread->sp0;
+ 
+ 	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
+ 	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
+ 		tss->x86_tss.ss1 = thread->sysenter_cs;
+ 		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ 	}
+-	vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0);
++	vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
+ }
+ 
+ static void vmi_flush_tlb_user(void)
+@@ -375,7 +398,7 @@ static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
+ 	vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
+ }
+ 
+-static void vmi_allocate_pd(u32 pfn)
++static void vmi_allocate_pd(struct mm_struct *mm, u32 pfn)
+ {
+  	/*
+ 	 * This call comes in very early, before mem_map is setup.
+@@ -452,7 +475,7 @@ static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep
+ static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+ {
+ #ifdef CONFIG_X86_PAE
+-	const pte_t pte = { pmdval.pmd, pmdval.pmd >> 32 };
++	const pte_t pte = { .pte = pmdval.pmd };
+ 	vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD);
+ #else
+ 	const pte_t pte = { pmdval.pud.pgd.pgd };
+@@ -485,21 +508,21 @@ static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t
+ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
+ {
+ 	/* Um, eww */
+-	const pte_t pte = { pudval.pgd.pgd, pudval.pgd.pgd >> 32 };
++	const pte_t pte = { .pte = pudval.pgd.pgd };
+ 	vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD);
+ 	vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
+ }
+ 
+ static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+-	const pte_t pte = { 0 };
++	const pte_t pte = { .pte = 0 };
+ 	vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
+ 	vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
+ }
+ 
+ static void vmi_pmd_clear(pmd_t *pmd)
+ {
+-	const pte_t pte = { 0 };
++	const pte_t pte = { .pte = 0 };
+ 	vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD);
+ 	vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
+ }
+@@ -790,10 +813,13 @@ static inline int __init activate_vmi(void)
+ 	para_fill(pv_cpu_ops.store_idt, GetIDT);
+ 	para_fill(pv_cpu_ops.store_tr, GetTR);
+ 	pv_cpu_ops.load_tls = vmi_load_tls;
+-	para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);
+-	para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry);
+-	para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry);
+-	para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
++	para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry,
++		  write_ldt_entry, WriteLDTEntry);
++	para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
++		  write_gdt_entry, WriteGDTEntry);
++	para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
++		  write_idt_entry, WriteIDTEntry);
++	para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
+ 	para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
+ 	para_fill(pv_cpu_ops.io_delay, IODelay);
+ 
+@@ -870,7 +896,7 @@ static inline int __init activate_vmi(void)
+ 	 * the backend.  They are performance critical anyway, so requiring
+ 	 * a patch is not a big problem.
+ 	 */
+-	pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
++	pv_cpu_ops.irq_enable_syscall_ret = (void *)0xfeedbab0;
+ 	pv_cpu_ops.iret = (void *)0xbadbab0;
+ 
+ #ifdef CONFIG_SMP
+@@ -963,19 +989,19 @@ static int __init parse_vmi(char *arg)
+ 		return -EINVAL;
+ 
+ 	if (!strcmp(arg, "disable_pge")) {
+-		clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
++		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
+ 		disable_pge = 1;
+ 	} else if (!strcmp(arg, "disable_pse")) {
+-		clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
+ 		disable_pse = 1;
+ 	} else if (!strcmp(arg, "disable_sep")) {
+-		clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
+ 		disable_sep = 1;
+ 	} else if (!strcmp(arg, "disable_tsc")) {
+-		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
++		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
+ 		disable_tsc = 1;
+ 	} else if (!strcmp(arg, "disable_mtrr")) {
+-		clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
++		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
+ 		disable_mtrr = 1;
+ 	} else if (!strcmp(arg, "disable_timer")) {
+ 		disable_vmi_timer = 1;
+diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
+index b1b5ab0..a2b0307 100644
+--- a/arch/x86/kernel/vmiclock_32.c
++++ b/arch/x86/kernel/vmiclock_32.c
+@@ -35,7 +35,6 @@
+ #include <asm/i8253.h>
+ 
+ #include <irq_vectors.h>
+-#include "io_ports.h"
+ 
+ #define VMI_ONESHOT  (VMI_ALARM_IS_ONESHOT  | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
+ #define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
+@@ -238,7 +237,7 @@ static void __devinit vmi_time_init_clockevent(void)
+ void __init vmi_time_init(void)
+ {
+ 	/* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */
+-	outb_p(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
++	outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
+ 
+ 	vmi_time_init_clockevent();
+ 	setup_irq(0, &vmi_clock_action);
+diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
+index 7d72cce..f1148ac 100644
+--- a/arch/x86/kernel/vmlinux_32.lds.S
++++ b/arch/x86/kernel/vmlinux_32.lds.S
+@@ -8,12 +8,6 @@
+  * put it inside the section definition.
+  */
+ 
+-/* Don't define absolute symbols until and unless you know that symbol
+- * value is should remain constant even if kernel image is relocated
+- * at run time. Absolute symbols are not relocated. If symbol value should
+- * change if kernel is relocated, make the symbol section relative and
+- * put it inside the section definition.
+- */
+ #define LOAD_OFFSET __PAGE_OFFSET
+ 
+ #include <asm-generic/vmlinux.lds.h>
+@@ -44,6 +38,8 @@ SECTIONS
+ 
+   /* read-only */
+   .text : AT(ADDR(.text) - LOAD_OFFSET) {
++	. = ALIGN(4096); /* not really needed, already page aligned */
++	*(.text.page_aligned)
+ 	TEXT_TEXT
+ 	SCHED_TEXT
+ 	LOCK_TEXT
+@@ -131,10 +127,12 @@ SECTIONS
+   .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+   	__init_begin = .;
+ 	_sinittext = .;
+-	*(.init.text)
++	INIT_TEXT
+ 	_einittext = .;
+   }
+-  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
++  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
++	INIT_DATA
++  }
+   . = ALIGN(16);
+   .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+   	__setup_start = .;
+@@ -169,8 +167,12 @@ SECTIONS
+   }
+   /* .exit.text is discard at runtime, not link time, to deal with references
+      from .altinstructions and .eh_frame */
+-  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
+-  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
++  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
++	EXIT_TEXT
++  }
++  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
++	EXIT_DATA
++  }
+ #if defined(CONFIG_BLK_DEV_INITRD)
+   . = ALIGN(4096);
+   .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
+index ba8ea97..0992b99 100644
+--- a/arch/x86/kernel/vmlinux_64.lds.S
++++ b/arch/x86/kernel/vmlinux_64.lds.S
+@@ -37,16 +37,15 @@ SECTIONS
+ 	KPROBES_TEXT
+ 	*(.fixup)
+ 	*(.gnu.warning)
+-	} :text = 0x9090
+-  				/* out-of-line lock text */
+-  .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
+-
+-  _etext = .;			/* End of text section */
++	_etext = .;			/* End of text section */
++  } :text = 0x9090
+ 
+   . = ALIGN(16);		/* Exception table */
+-  __start___ex_table = .;
+-  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
+-  __stop___ex_table = .;
++  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
++  	__start___ex_table = .;
++	 *(__ex_table)
++  	__stop___ex_table = .;
++  }
+ 
+   NOTES :text :note
+ 
+@@ -155,12 +154,15 @@ SECTIONS
+   __init_begin = .;
+   .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+ 	_sinittext = .;
+-	*(.init.text)
++	INIT_TEXT
+ 	_einittext = .;
+   }
+-  __initdata_begin = .;
+-  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
+-  __initdata_end = .;
++  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
++	__initdata_begin = .;
++	INIT_DATA
++	__initdata_end = .;
++   }
 +
-+	t->iopl = level << 12;
-+	set_iopl_mask(t->iopl);
-+out:
-+	return rc;
-+}
-+#else
-+asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
-+{
-+	return do_iopl(level, regs);
-+}
-+#endif
-diff --git a/arch/x86/kernel/ioport_32.c b/arch/x86/kernel/ioport_32.c
+   . = ALIGN(16);
+   __setup_start = .;
+   .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
+@@ -176,6 +178,14 @@ SECTIONS
+   }
+   __con_initcall_end = .;
+   SECURITY_INIT
++
++  . = ALIGN(8);
++  .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
++  __parainstructions = .;
++       *(.parainstructions)
++  __parainstructions_end = .;
++  }
++
+   . = ALIGN(8);
+   __alt_instructions = .;
+   .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+@@ -187,8 +197,12 @@ SECTIONS
+   }
+   /* .exit.text is discard at runtime, not link time, to deal with references
+      from .altinstructions and .eh_frame */
+-  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
+-  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
++  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
++	EXIT_TEXT
++  }
++  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
++	EXIT_DATA
++  }
+ 
+ /* vdso blob that is mapped into user space */
+   vdso_start = . ;
+diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
+index 414caf0..d971210 100644
+--- a/arch/x86/kernel/vsmp_64.c
++++ b/arch/x86/kernel/vsmp_64.c
+@@ -25,21 +25,24 @@ static int __init vsmp_init(void)
+ 		return 0;
+ 
+ 	/* Check if we are running on a ScaleMP vSMP box */
+-	if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != PCI_VENDOR_ID_SCALEMP) ||
+-	    (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != PCI_DEVICE_ID_SCALEMP_VSMP_CTL))
++	if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) !=
++	     PCI_VENDOR_ID_SCALEMP) ||
++	    (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) !=
++	     PCI_DEVICE_ID_SCALEMP_VSMP_CTL))
+ 		return 0;
+ 
+ 	/* set vSMP magic bits to indicate vSMP capable kernel */
+ 	address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8);
+ 	cap = readl(address);
+ 	ctl = readl(address + 4);
+-	printk("vSMP CTL: capabilities:0x%08x  control:0x%08x\n", cap, ctl);
++	printk(KERN_INFO "vSMP CTL: capabilities:0x%08x  control:0x%08x\n",
++	       cap, ctl);
+ 	if (cap & ctl & (1 << 4)) {
+ 		/* Turn on vSMP IRQ fastpath handling (see system.h) */
+ 		ctl &= ~(1 << 4);
+ 		writel(ctl, address + 4);
+ 		ctl = readl(address + 4);
+-		printk("vSMP CTL: control set to:0x%08x\n", ctl);
++		printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
+ 	}
+ 
+ 	iounmap(address);
+diff --git a/arch/x86/kernel/vsyscall-int80_32.S b/arch/x86/kernel/vsyscall-int80_32.S
 deleted file mode 100644
-index 4ed48dc..0000000
---- a/arch/x86/kernel/ioport_32.c
+index 103cab6..0000000
+--- a/arch/x86/kernel/vsyscall-int80_32.S
 +++ /dev/null
-@@ -1,151 +0,0 @@
+@@ -1,53 +0,0 @@
 -/*
-- * This contains the io-permission bitmap code - written by obz, with changes
-- * by Linus.
+- * Code for the vsyscall page.  This version uses the old int $0x80 method.
+- *
+- * NOTE:
+- * 1) __kernel_vsyscall _must_ be first in this page.
+- * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
+- *    for details.
 - */
 -
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/capability.h>
--#include <linux/errno.h>
--#include <linux/types.h>
--#include <linux/ioport.h>
--#include <linux/smp.h>
--#include <linux/stddef.h>
--#include <linux/slab.h>
--#include <linux/thread_info.h>
--#include <linux/syscalls.h>
+-	.text
+-	.globl __kernel_vsyscall
+-	.type __kernel_vsyscall, at function
+-__kernel_vsyscall:
+-.LSTART_vsyscall:
+-	int $0x80
+-	ret
+-.LEND_vsyscall:
+-	.size __kernel_vsyscall,.-.LSTART_vsyscall
+-	.previous
 -
--/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
--static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
--{
--	unsigned long mask;
--	unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
--	unsigned int low_index = base & (BITS_PER_LONG-1);
--	int length = low_index + extent;
+-	.section .eh_frame,"a", at progbits
+-.LSTARTFRAMEDLSI:
+-	.long .LENDCIEDLSI-.LSTARTCIEDLSI
+-.LSTARTCIEDLSI:
+-	.long 0			/* CIE ID */
+-	.byte 1			/* Version number */
+-	.string "zR"		/* NUL-terminated augmentation string */
+-	.uleb128 1		/* Code alignment factor */
+-	.sleb128 -4		/* Data alignment factor */
+-	.byte 8			/* Return address register column */
+-	.uleb128 1		/* Augmentation value length */
+-	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+-	.byte 0x0c		/* DW_CFA_def_cfa */
+-	.uleb128 4
+-	.uleb128 4
+-	.byte 0x88		/* DW_CFA_offset, column 0x8 */
+-	.uleb128 1
+-	.align 4
+-.LENDCIEDLSI:
+-	.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
+-.LSTARTFDEDLSI:
+-	.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
+-	.long .LSTART_vsyscall-.	/* PC-relative start address */
+-	.long .LEND_vsyscall-.LSTART_vsyscall
+-	.uleb128 0
+-	.align 4
+-.LENDFDEDLSI:
+-	.previous
 -
--	if (low_index != 0) {
--		mask = (~0UL << low_index);
--		if (length < BITS_PER_LONG)
--			mask &= ~(~0UL << length);
--		if (new_value)
--			*bitmap_base++ |= mask;
--		else
--			*bitmap_base++ &= ~mask;
--		length -= BITS_PER_LONG;
--	}
+-/*
+- * Get the common code for the sigreturn entry points.
+- */
+-#include "vsyscall-sigreturn_32.S"
+diff --git a/arch/x86/kernel/vsyscall-note_32.S b/arch/x86/kernel/vsyscall-note_32.S
+deleted file mode 100644
+index fcf376a..0000000
+--- a/arch/x86/kernel/vsyscall-note_32.S
++++ /dev/null
+@@ -1,45 +0,0 @@
+-/*
+- * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+- * Here we can supply some information useful to userland.
+- */
 -
--	mask = (new_value ? ~0UL : 0UL);
--	while (length >= BITS_PER_LONG) {
--		*bitmap_base++ = mask;
--		length -= BITS_PER_LONG;
--	}
+-#include <linux/version.h>
+-#include <linux/elfnote.h>
 -
--	if (length > 0) {
--		mask = ~(~0UL << length);
--		if (new_value)
--			*bitmap_base++ |= mask;
--		else
--			*bitmap_base++ &= ~mask;
--	}
--}
+-/* Ideally this would use UTS_NAME, but using a quoted string here
+-   doesn't work. Remember to change this when changing the
+-   kernel's name. */
+-ELFNOTE_START(Linux, 0, "a")
+-	.long LINUX_VERSION_CODE
+-ELFNOTE_END
+-
+-#ifdef CONFIG_XEN
+-/*
+- * Add a special note telling glibc's dynamic linker a fake hardware
+- * flavor that it will use to choose the search path for libraries in the
+- * same way it uses real hardware capabilities like "mmx".
+- * We supply "nosegneg" as the fake capability, to indicate that we
+- * do not like negative offsets in instructions using segment overrides,
+- * since we implement those inefficiently.  This makes it possible to
+- * install libraries optimized to avoid those access patterns in someplace
+- * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
+- * corresponding to the bits here is needed to make ldconfig work right.
+- * It should contain:
+- *	hwcap 1 nosegneg
+- * to match the mapping of bit to name that we give here.
+- *
+- * At runtime, the fake hardware feature will be considered to be present
+- * if its bit is set in the mask word.  So, we start with the mask 0, and
+- * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
+- */
 -
+-#include "../../x86/xen/vdso.h"	/* Defines VDSO_NOTE_NONEGSEG_BIT.  */
 -
+-	.globl VDSO_NOTE_MASK
+-ELFNOTE_START(GNU, 2, "a")
+-	.long 1			/* ncaps */
+-VDSO_NOTE_MASK:
+-	.long 0			/* mask */
+-	.byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg"	/* bit, name */
+-ELFNOTE_END
+-#endif
+diff --git a/arch/x86/kernel/vsyscall-sigreturn_32.S b/arch/x86/kernel/vsyscall-sigreturn_32.S
+deleted file mode 100644
+index a92262f..0000000
+--- a/arch/x86/kernel/vsyscall-sigreturn_32.S
++++ /dev/null
+@@ -1,143 +0,0 @@
 -/*
-- * this changes the io permissions bitmap in the current task.
+- * Common code for the sigreturn entry points on the vsyscall page.
+- * So far this code is the same for both int80 and sysenter versions.
+- * This file is #include'd by vsyscall-*.S to define them after the
+- * vsyscall entry point.  The kernel assumes that the addresses of these
+- * routines are constant for all vsyscall implementations.
 - */
--asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
--{
--	unsigned long i, max_long, bytes, bytes_updated;
--	struct thread_struct * t = &current->thread;
--	struct tss_struct * tss;
--	unsigned long *bitmap;
 -
--	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
--		return -EINVAL;
--	if (turn_on && !capable(CAP_SYS_RAWIO))
--		return -EPERM;
+-#include <asm/unistd.h>
+-#include <asm/asm-offsets.h>
 -
--	/*
--	 * If it's the first ioperm() call in this thread's lifetime, set the
--	 * IO bitmap up. ioperm() is much less timing critical than clone(),
--	 * this is why we delay this operation until now:
--	 */
--	if (!t->io_bitmap_ptr) {
--		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
--		if (!bitmap)
--			return -ENOMEM;
 -
--		memset(bitmap, 0xff, IO_BITMAP_BYTES);
--		t->io_bitmap_ptr = bitmap;
--		set_thread_flag(TIF_IO_BITMAP);
--	}
+-/* XXX
+-   Should these be named "_sigtramp" or something?
+-*/
 -
--	/*
--	 * do it in the per-thread copy and in the TSS ...
--	 *
--	 * Disable preemption via get_cpu() - we must not switch away
--	 * because the ->io_bitmap_max value must match the bitmap
--	 * contents:
--	 */
--	tss = &per_cpu(init_tss, get_cpu());
+-	.text
+-	.org __kernel_vsyscall+32,0x90
+-	.globl __kernel_sigreturn
+-	.type __kernel_sigreturn, at function
+-__kernel_sigreturn:
+-.LSTART_sigreturn:
+-	popl %eax		/* XXX does this mean it needs unwind info? */
+-	movl $__NR_sigreturn, %eax
+-	int $0x80
+-.LEND_sigreturn:
+-	.size __kernel_sigreturn,.-.LSTART_sigreturn
 -
--	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+-	.balign 32
+-	.globl __kernel_rt_sigreturn
+-	.type __kernel_rt_sigreturn, at function
+-__kernel_rt_sigreturn:
+-.LSTART_rt_sigreturn:
+-	movl $__NR_rt_sigreturn, %eax
+-	int $0x80
+-.LEND_rt_sigreturn:
+-	.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
+-	.balign 32
+-	.previous
 -
--	/*
--	 * Search for a (possibly new) maximum. This is simple and stupid,
--	 * to keep it obviously correct:
--	 */
--	max_long = 0;
--	for (i = 0; i < IO_BITMAP_LONGS; i++)
--		if (t->io_bitmap_ptr[i] != ~0UL)
--			max_long = i;
+-	.section .eh_frame,"a", at progbits
+-.LSTARTFRAMEDLSI1:
+-	.long .LENDCIEDLSI1-.LSTARTCIEDLSI1
+-.LSTARTCIEDLSI1:
+-	.long 0			/* CIE ID */
+-	.byte 1			/* Version number */
+-	.string "zRS"		/* NUL-terminated augmentation string */
+-	.uleb128 1		/* Code alignment factor */
+-	.sleb128 -4		/* Data alignment factor */
+-	.byte 8			/* Return address register column */
+-	.uleb128 1		/* Augmentation value length */
+-	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+-	.byte 0			/* DW_CFA_nop */
+-	.align 4
+-.LENDCIEDLSI1:
+-	.long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */
+-.LSTARTFDEDLSI1:
+-	.long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */
+-	/* HACK: The dwarf2 unwind routines will subtract 1 from the
+-	   return address to get an address in the middle of the
+-	   presumed call instruction.  Since we didn't get here via
+-	   a call, we need to include the nop before the real start
+-	   to make up for it.  */
+-	.long .LSTART_sigreturn-1-.	/* PC-relative start address */
+-	.long .LEND_sigreturn-.LSTART_sigreturn+1
+-	.uleb128 0			/* Augmentation */
+-	/* What follows are the instructions for the table generation.
+-	   We record the locations of each register saved.  This is
+-	   complicated by the fact that the "CFA" is always assumed to
+-	   be the value of the stack pointer in the caller.  This means
+-	   that we must define the CFA of this body of code to be the
+-	   saved value of the stack pointer in the sigcontext.  Which
+-	   also means that there is no fixed relation to the other 
+-	   saved registers, which means that we must use DW_CFA_expression
+-	   to compute their addresses.  It also means that when we 
+-	   adjust the stack with the popl, we have to do it all over again.  */
 -
--	bytes = (max_long + 1) * sizeof(long);
--	bytes_updated = max(bytes, t->io_bitmap_max);
+-#define do_cfa_expr(offset)						\
+-	.byte 0x0f;			/* DW_CFA_def_cfa_expression */	\
+-	.uleb128 1f-0f;			/*   length */			\
+-0:	.byte 0x74;			/*     DW_OP_breg4 */		\
+-	.sleb128 offset;		/*      offset */		\
+-	.byte 0x06;			/*     DW_OP_deref */		\
+-1:
 -
--	t->io_bitmap_max = bytes;
+-#define do_expr(regno, offset)						\
+-	.byte 0x10;			/* DW_CFA_expression */		\
+-	.uleb128 regno;			/*   regno */			\
+-	.uleb128 1f-0f;			/*   length */			\
+-0:	.byte 0x74;			/*     DW_OP_breg4 */		\
+-	.sleb128 offset;		/*       offset */		\
+-1:
 -
--	/*
--	 * Sets the lazy trigger so that the next I/O operation will
--	 * reload the correct bitmap.
--	 * Reset the owner so that a process switch will not set
--	 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
--	 */
--	tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
--	tss->io_bitmap_owner = NULL;
+-	do_cfa_expr(SIGCONTEXT_esp+4)
+-	do_expr(0, SIGCONTEXT_eax+4)
+-	do_expr(1, SIGCONTEXT_ecx+4)
+-	do_expr(2, SIGCONTEXT_edx+4)
+-	do_expr(3, SIGCONTEXT_ebx+4)
+-	do_expr(5, SIGCONTEXT_ebp+4)
+-	do_expr(6, SIGCONTEXT_esi+4)
+-	do_expr(7, SIGCONTEXT_edi+4)
+-	do_expr(8, SIGCONTEXT_eip+4)
 -
--	put_cpu();
+-	.byte 0x42	/* DW_CFA_advance_loc 2 -- nop; popl eax. */
 -
--	return 0;
--}
+-	do_cfa_expr(SIGCONTEXT_esp)
+-	do_expr(0, SIGCONTEXT_eax)
+-	do_expr(1, SIGCONTEXT_ecx)
+-	do_expr(2, SIGCONTEXT_edx)
+-	do_expr(3, SIGCONTEXT_ebx)
+-	do_expr(5, SIGCONTEXT_ebp)
+-	do_expr(6, SIGCONTEXT_esi)
+-	do_expr(7, SIGCONTEXT_edi)
+-	do_expr(8, SIGCONTEXT_eip)
 -
--/*
-- * sys_iopl has to be used when you want to access the IO ports
-- * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-- * you'd need 8kB of bitmaps/process, which is a bit excessive.
-- *
-- * Here we just change the eflags value on the stack: we allow
-- * only the super-user to do it. This depends on the stack-layout
-- * on system-call entry - see also fork() and the signal handling
-- * code.
-- */
+-	.align 4
+-.LENDFDEDLSI1:
 -
--asmlinkage long sys_iopl(unsigned long unused)
--{
--	volatile struct pt_regs * regs = (struct pt_regs *) &unused;
--	unsigned int level = regs->ebx;
--	unsigned int old = (regs->eflags >> 12) & 3;
--	struct thread_struct *t = &current->thread;
+-	.long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */
+-.LSTARTFDEDLSI2:
+-	.long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */
+-	/* HACK: See above wrt unwind library assumptions.  */
+-	.long .LSTART_rt_sigreturn-1-.	/* PC-relative start address */
+-	.long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
+-	.uleb128 0			/* Augmentation */
+-	/* What follows are the instructions for the table generation.
+-	   We record the locations of each register saved.  This is
+-	   slightly less complicated than the above, since we don't
+-	   modify the stack pointer in the process.  */
 -
--	if (level > 3)
--		return -EINVAL;
--	/* Trying to gain more privileges? */
--	if (level > old) {
--		if (!capable(CAP_SYS_RAWIO))
--			return -EPERM;
--	}
--	t->iopl = level << 12;
--	regs->eflags = (regs->eflags & ~X86_EFLAGS_IOPL) | t->iopl;
--	set_iopl_mask(t->iopl);
--	return 0;
--}
-diff --git a/arch/x86/kernel/ioport_64.c b/arch/x86/kernel/ioport_64.c
+-	do_cfa_expr(RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_esp)
+-	do_expr(0, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_eax)
+-	do_expr(1, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_ecx)
+-	do_expr(2, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_edx)
+-	do_expr(3, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_ebx)
+-	do_expr(5, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_ebp)
+-	do_expr(6, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_esi)
+-	do_expr(7, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_edi)
+-	do_expr(8, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_eip)
+-
+-	.align 4
+-.LENDFDEDLSI2:
+-	.previous
+diff --git a/arch/x86/kernel/vsyscall-sysenter_32.S b/arch/x86/kernel/vsyscall-sysenter_32.S
 deleted file mode 100644
-index 5f62fad..0000000
---- a/arch/x86/kernel/ioport_64.c
+index ed879bf..0000000
+--- a/arch/x86/kernel/vsyscall-sysenter_32.S
 +++ /dev/null
-@@ -1,117 +0,0 @@
+@@ -1,122 +0,0 @@
 -/*
-- * This contains the io-permission bitmap code - written by obz, with changes
-- * by Linus.
+- * Code for the vsyscall page.  This version uses the sysenter instruction.
+- *
+- * NOTE:
+- * 1) __kernel_vsyscall _must_ be first in this page.
+- * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
+- *    for details.
 - */
 -
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/capability.h>
--#include <linux/errno.h>
--#include <linux/types.h>
--#include <linux/ioport.h>
--#include <linux/smp.h>
--#include <linux/stddef.h>
--#include <linux/slab.h>
--#include <linux/thread_info.h>
--#include <linux/syscalls.h>
--
--/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
--static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
--{
--	int i;
--		if (new_value)
--		for (i = base; i < base + extent; i++) 
--			__set_bit(i, bitmap); 
--		else
--		for (i = base; i < base + extent; i++) 
--			clear_bit(i, bitmap); 
--}
--
 -/*
-- * this changes the io permissions bitmap in the current task.
+- * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
+- * %ecx itself for arg2. The pushing is because the sysexit instruction
+- * (found in entry.S) requires that we clobber %ecx with the desired %esp.
+- * User code might expect that %ecx is unclobbered though, as it would be
+- * for returning via the iret instruction, so we must push and pop.
+- *
+- * The caller puts arg3 in %edx, which the sysexit instruction requires
+- * for %eip. Thus, exactly as for arg2, we must push and pop.
+- *
+- * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
+- * instruction clobbers %esp, the user's %esp won't even survive entry
+- * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
+- * arg6 from the stack.
+- *
+- * You can not use this vsyscall for the clone() syscall because the
+- * three dwords on the parent stack do not get copied to the child.
 - */
--asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
--{
--	unsigned int i, max_long, bytes, bytes_updated;
--	struct thread_struct * t = &current->thread;
--	struct tss_struct * tss;
--	unsigned long *bitmap;
+-	.text
+-	.globl __kernel_vsyscall
+-	.type __kernel_vsyscall, at function
+-__kernel_vsyscall:
+-.LSTART_vsyscall:
+-	push %ecx
+-.Lpush_ecx:
+-	push %edx
+-.Lpush_edx:
+-	push %ebp
+-.Lenter_kernel:
+-	movl %esp,%ebp
+-	sysenter
 -
--	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
--		return -EINVAL;
--	if (turn_on && !capable(CAP_SYS_RAWIO))
--		return -EPERM;
+-	/* 7: align return point with nop's to make disassembly easier */
+-	.space 7,0x90
 -
--	/*
--	 * If it's the first ioperm() call in this thread's lifetime, set the
--	 * IO bitmap up. ioperm() is much less timing critical than clone(),
--	 * this is why we delay this operation until now:
--	 */
--	if (!t->io_bitmap_ptr) {
--		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
--		if (!bitmap)
--			return -ENOMEM;
+-	/* 14: System call restart point is here! (SYSENTER_RETURN-2) */
+-	jmp .Lenter_kernel
+-	/* 16: System call normal return point is here! */
+-	.globl SYSENTER_RETURN	/* Symbol used by sysenter.c  */
+-SYSENTER_RETURN:
+-	pop %ebp
+-.Lpop_ebp:
+-	pop %edx
+-.Lpop_edx:
+-	pop %ecx
+-.Lpop_ecx:
+-	ret
+-.LEND_vsyscall:
+-	.size __kernel_vsyscall,.-.LSTART_vsyscall
+-	.previous
 -
--		memset(bitmap, 0xff, IO_BITMAP_BYTES);
--		t->io_bitmap_ptr = bitmap;
--		set_thread_flag(TIF_IO_BITMAP);
--	}
+-	.section .eh_frame,"a", at progbits
+-.LSTARTFRAMEDLSI:
+-	.long .LENDCIEDLSI-.LSTARTCIEDLSI
+-.LSTARTCIEDLSI:
+-	.long 0			/* CIE ID */
+-	.byte 1			/* Version number */
+-	.string "zR"		/* NUL-terminated augmentation string */
+-	.uleb128 1		/* Code alignment factor */
+-	.sleb128 -4		/* Data alignment factor */
+-	.byte 8			/* Return address register column */
+-	.uleb128 1		/* Augmentation value length */
+-	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+-	.byte 0x0c		/* DW_CFA_def_cfa */
+-	.uleb128 4
+-	.uleb128 4
+-	.byte 0x88		/* DW_CFA_offset, column 0x8 */
+-	.uleb128 1
+-	.align 4
+-.LENDCIEDLSI:
+-	.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
+-.LSTARTFDEDLSI:
+-	.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
+-	.long .LSTART_vsyscall-.	/* PC-relative start address */
+-	.long .LEND_vsyscall-.LSTART_vsyscall
+-	.uleb128 0
+-	/* What follows are the instructions for the table generation.
+-	   We have to record all changes of the stack pointer.  */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpush_ecx-.LSTART_vsyscall
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x08		/* RA at offset 8 now */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpush_edx-.Lpush_ecx
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x0c		/* RA at offset 12 now */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lenter_kernel-.Lpush_edx
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x10		/* RA at offset 16 now */
+-	.byte 0x85, 0x04	/* DW_CFA_offset %ebp -16 */
+-	/* Finally the epilogue.  */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpop_ebp-.Lenter_kernel
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x0c		/* RA at offset 12 now */
+-	.byte 0xc5		/* DW_CFA_restore %ebp */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpop_edx-.Lpop_ebp
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x08		/* RA at offset 8 now */
+-	.byte 0x04		/* DW_CFA_advance_loc4 */
+-	.long .Lpop_ecx-.Lpop_edx
+-	.byte 0x0e		/* DW_CFA_def_cfa_offset */
+-	.byte 0x04		/* RA at offset 4 now */
+-	.align 4
+-.LENDFDEDLSI:
+-	.previous
 -
--	/*
--	 * do it in the per-thread copy and in the TSS ...
--	 *
--	 * Disable preemption via get_cpu() - we must not switch away
--	 * because the ->io_bitmap_max value must match the bitmap
--	 * contents:
--	 */
--	tss = &per_cpu(init_tss, get_cpu());
+-/*
+- * Get the common code for the sigreturn entry points.
+- */
+-#include "vsyscall-sigreturn_32.S"
+diff --git a/arch/x86/kernel/vsyscall_32.S b/arch/x86/kernel/vsyscall_32.S
+deleted file mode 100644
+index a5ab3dc..0000000
+--- a/arch/x86/kernel/vsyscall_32.S
++++ /dev/null
+@@ -1,15 +0,0 @@
+-#include <linux/init.h>
 -
--	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+-__INITDATA
 -
--	/*
--	 * Search for a (possibly new) maximum. This is simple and stupid,
--	 * to keep it obviously correct:
--	 */
--	max_long = 0;
--	for (i = 0; i < IO_BITMAP_LONGS; i++)
--		if (t->io_bitmap_ptr[i] != ~0UL)
--			max_long = i;
+-	.globl vsyscall_int80_start, vsyscall_int80_end
+-vsyscall_int80_start:
+-	.incbin "arch/x86/kernel/vsyscall-int80_32.so"
+-vsyscall_int80_end:
 -
--	bytes = (max_long + 1) * sizeof(long);
--	bytes_updated = max(bytes, t->io_bitmap_max);
+-	.globl vsyscall_sysenter_start, vsyscall_sysenter_end
+-vsyscall_sysenter_start:
+-	.incbin "arch/x86/kernel/vsyscall-sysenter_32.so"
+-vsyscall_sysenter_end:
 -
--	t->io_bitmap_max = bytes;
+-__FINIT
+diff --git a/arch/x86/kernel/vsyscall_32.lds.S b/arch/x86/kernel/vsyscall_32.lds.S
+deleted file mode 100644
+index 4a8b0ed..0000000
+--- a/arch/x86/kernel/vsyscall_32.lds.S
++++ /dev/null
+@@ -1,67 +0,0 @@
+-/*
+- * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
+- * object prelinked to its virtual address, and with only one read-only
+- * segment (that fits in one page).  This script controls its layout.
+- */
+-#include <asm/asm-offsets.h>
 -
--	/* Update the TSS: */
--	memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
+-SECTIONS
+-{
+-  . = VDSO_PRELINK_asm + SIZEOF_HEADERS;
 -
--	put_cpu();
+-  .hash           : { *(.hash) }		:text
+-  .gnu.hash       : { *(.gnu.hash) }
+-  .dynsym         : { *(.dynsym) }
+-  .dynstr         : { *(.dynstr) }
+-  .gnu.version    : { *(.gnu.version) }
+-  .gnu.version_d  : { *(.gnu.version_d) }
+-  .gnu.version_r  : { *(.gnu.version_r) }
 -
--	return 0;
+-  /* This linker script is used both with -r and with -shared.
+-     For the layouts to match, we need to skip more than enough
+-     space for the dynamic symbol table et al.  If this amount
+-     is insufficient, ld -shared will barf.  Just increase it here.  */
+-  . = VDSO_PRELINK_asm + 0x400;
+-
+-  .text           : { *(.text) }		:text =0x90909090
+-  .note		  : { *(.note.*) }		:text :note
+-  .eh_frame_hdr   : { *(.eh_frame_hdr) }	:text :eh_frame_hdr
+-  .eh_frame       : { KEEP (*(.eh_frame)) }	:text
+-  .dynamic        : { *(.dynamic) }		:text :dynamic
+-  .useless        : {
+-  	*(.got.plt) *(.got)
+-	*(.data .data.* .gnu.linkonce.d.*)
+-	*(.dynbss)
+-	*(.bss .bss.* .gnu.linkonce.b.*)
+-  }						:text
 -}
 -
 -/*
-- * sys_iopl has to be used when you want to access the IO ports
-- * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-- * you'd need 8kB of bitmaps/process, which is a bit excessive.
-- *
-- * Here we just change the eflags value on the stack: we allow
-- * only the super-user to do it. This depends on the stack-layout
-- * on system-call entry - see also fork() and the signal handling
-- * code.
+- * We must supply the ELF program headers explicitly to get just one
+- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
 - */
+-PHDRS
+-{
+-  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+-  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+-  note PT_NOTE FLAGS(4); /* PF_R */
+-  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
+-}
 -
--asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
+-/*
+- * This controls what symbols we export from the DSO.
+- */
+-VERSION
 -{
--	unsigned int old = (regs->eflags >> 12) & 3;
+-  LINUX_2.5 {
+-    global:
+-    	__kernel_vsyscall;
+-    	__kernel_sigreturn;
+-    	__kernel_rt_sigreturn;
 -
--	if (level > 3)
--		return -EINVAL;
--	/* Trying to gain more privileges? */
--	if (level > old) {
--		if (!capable(CAP_SYS_RAWIO))
--			return -EPERM;
--	}
--	regs->eflags = (regs->eflags &~ X86_EFLAGS_IOPL) | (level << 12);
--	return 0;
+-    local: *;
+-  };
 -}
-diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
-index d3fde94..cef054b 100644
---- a/arch/x86/kernel/irq_32.c
-+++ b/arch/x86/kernel/irq_32.c
-@@ -66,11 +66,11 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
-  * SMP cross-CPU interrupts have their own specific
-  * handlers).
-  */
--fastcall unsigned int do_IRQ(struct pt_regs *regs)
-+unsigned int do_IRQ(struct pt_regs *regs)
- {	
- 	struct pt_regs *old_regs;
- 	/* high bit used in ret_from_ code */
--	int irq = ~regs->orig_eax;
-+	int irq = ~regs->orig_ax;
- 	struct irq_desc *desc = irq_desc + irq;
- #ifdef CONFIG_4KSTACKS
- 	union irq_ctx *curctx, *irqctx;
-@@ -88,13 +88,13 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
- #ifdef CONFIG_DEBUG_STACKOVERFLOW
- 	/* Debugging check for stack overflow: is there less than 1KB free? */
- 	{
--		long esp;
-+		long sp;
- 
- 		__asm__ __volatile__("andl %%esp,%0" :
--					"=r" (esp) : "0" (THREAD_SIZE - 1));
--		if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
-+					"=r" (sp) : "0" (THREAD_SIZE - 1));
-+		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- 			printk("do_IRQ: stack overflow: %ld\n",
--				esp - sizeof(struct thread_info));
-+				sp - sizeof(struct thread_info));
- 			dump_stack();
- 		}
- 	}
-@@ -112,7 +112,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
- 	 * current stack (which is the irq stack already after all)
- 	 */
- 	if (curctx != irqctx) {
--		int arg1, arg2, ebx;
-+		int arg1, arg2, bx;
+-
+-/* The ELF entry point can be used to set the AT_SYSINFO value.  */
+-ENTRY(__kernel_vsyscall);
+diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
+index ad4005c..3f82427 100644
+--- a/arch/x86/kernel/vsyscall_64.c
++++ b/arch/x86/kernel/vsyscall_64.c
+@@ -43,7 +43,7 @@
+ #include <asm/vgtod.h>
  
- 		/* build the stack frame on the IRQ stack */
- 		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-@@ -128,10 +128,10 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
- 			(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+ #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+-#define __syscall_clobber "r11","rcx","memory"
++#define __syscall_clobber "r11","cx","memory"
+ #define __pa_vsymbol(x)			\
+ 	({unsigned long v;  		\
+ 	extern char __vsyscall_0; 	\
+@@ -190,7 +190,7 @@ time_t __vsyscall(1) vtime(time_t *t)
+ long __vsyscall(2)
+ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
+ {
+-	unsigned int dummy, p;
++	unsigned int p;
+ 	unsigned long j = 0;
  
- 		asm volatile(
--			"       xchgl  %%ebx,%%esp      \n"
--			"       call   *%%edi           \n"
--			"       movl   %%ebx,%%esp      \n"
--			: "=a" (arg1), "=d" (arg2), "=b" (ebx)
-+			"       xchgl  %%ebx,%%esp    \n"
-+			"       call   *%%edi         \n"
-+			"       movl   %%ebx,%%esp    \n"
-+			: "=a" (arg1), "=d" (arg2), "=b" (bx)
- 			:  "0" (irq),   "1" (desc),  "2" (isp),
- 			   "D" (desc->handle_irq)
- 			: "memory", "cc"
-diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
-index 6b5c730..3aac154 100644
---- a/arch/x86/kernel/irq_64.c
-+++ b/arch/x86/kernel/irq_64.c
-@@ -20,6 +20,26 @@
+ 	/* Fast cache - only recompute value once per jiffies and avoid
+@@ -205,7 +205,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
+ 		p = tcache->blob[1];
+ 	} else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
+ 		/* Load per CPU data from RDTSCP */
+-		rdtscp(dummy, dummy, p);
++		native_read_tscp(&p);
+ 	} else {
+ 		/* Load per CPU data from GDT */
+ 		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+@@ -297,7 +297,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
+ 	/* Store cpu number in limit so that it can be loaded quickly
+ 	   in user space in vgetcpu.
+ 	   12 bits for the CPU and 8 bits for the node. */
+-	d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
++	d = (unsigned long *)(get_cpu_gdt_table(cpu) + GDT_ENTRY_PER_CPU);
+ 	*d = 0x0f40000000000ULL;
+ 	*d |= cpu;
+ 	*d |= (node & 0xf) << 12;
+@@ -319,7 +319,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
+ 	return NOTIFY_DONE;
+ }
  
- atomic_t irq_err_count;
+-static void __init map_vsyscall(void)
++void __init map_vsyscall(void)
+ {
+ 	extern char __vsyscall_0;
+ 	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+@@ -335,7 +335,6 @@ static int __init vsyscall_init(void)
+ 	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
+ 	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
+ 	BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
+-	map_vsyscall();
+ #ifdef CONFIG_SYSCTL
+ 	register_sysctl_table(kernel_root_table2);
+ #endif
+diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
+index 77c25b3..a66e9c1 100644
+--- a/arch/x86/kernel/x8664_ksyms_64.c
++++ b/arch/x86/kernel/x8664_ksyms_64.c
+@@ -8,6 +8,7 @@
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
++#include <asm/desc.h>
  
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+	printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
-+	/*
-+	 * Currently unexpected vectors happen only on SMP and APIC.
-+	 * We _must_ ack these because every local APIC has only N
-+	 * irq slots per priority level, and a 'hanging, unacked' IRQ
-+	 * holds up an irq slot - in excessive cases (when multiple
-+	 * unexpected vectors occur) that might lock up the APIC
-+	 * completely.
-+	 * But don't ack when the APIC is disabled. -AK
-+	 */
-+	if (!disable_apic)
-+		ack_APIC_irq();
-+}
-+
- #ifdef CONFIG_DEBUG_STACKOVERFLOW
- /*
-  * Probabilistic stack overflow check:
-@@ -33,11 +53,11 @@ static inline void stack_overflow_check(struct pt_regs *regs)
- 	u64 curbase = (u64)task_stack_page(current);
- 	static unsigned long warned = -60*HZ;
+ EXPORT_SYMBOL(kernel_thread);
  
--	if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
--	    regs->rsp <  curbase + sizeof(struct thread_info) + 128 &&
-+	if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE &&
-+	    regs->sp <  curbase + sizeof(struct thread_info) + 128 &&
- 	    time_after(jiffies, warned + 60*HZ)) {
--		printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
--		       current->comm, curbase, regs->rsp);
-+		printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
-+		       current->comm, curbase, regs->sp);
- 		show_stack(NULL,NULL);
- 		warned = jiffies;
- 	}
-@@ -142,7 +162,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
- 	struct pt_regs *old_regs = set_irq_regs(regs);
+@@ -34,13 +35,6 @@ EXPORT_SYMBOL(__copy_from_user_inatomic);
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(clear_page);
  
- 	/* high bit used in ret_from_ code  */
--	unsigned vector = ~regs->orig_rax;
-+	unsigned vector = ~regs->orig_ax;
- 	unsigned irq;
+-#ifdef CONFIG_SMP
+-extern void  __write_lock_failed(rwlock_t *rw);
+-extern void  __read_lock_failed(rwlock_t *rw);
+-EXPORT_SYMBOL(__write_lock_failed);
+-EXPORT_SYMBOL(__read_lock_failed);
+-#endif
+-
+ /* Export string functions. We normally rely on gcc builtin for most of these,
+    but gcc sometimes decides not to inline them. */    
+ #undef memcpy
+@@ -60,3 +54,8 @@ EXPORT_SYMBOL(init_level4_pgt);
+ EXPORT_SYMBOL(load_gs_index);
  
- 	exit_idle();
-diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
+ EXPORT_SYMBOL(_proxy_pda);
++
++#ifdef CONFIG_PARAVIRT
++/* Virtualized guests may want to use it */
++EXPORT_SYMBOL_GPL(cpu_gdt_descr);
++#endif
+diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
 new file mode 100644
-index 0000000..7335430
+index 0000000..c83e1c9
 --- /dev/null
-+++ b/arch/x86/kernel/kdebugfs.c
-@@ -0,0 +1,65 @@
-+/*
-+ * Architecture specific debugfs files
-+ *
-+ * Copyright (C) 2007, Intel Corp.
-+ *	Huang Ying <ying.huang at intel.com>
-+ *
-+ * This file is released under the GPLv2.
-+ */
++++ b/arch/x86/kvm/Kconfig
+@@ -0,0 +1,57 @@
++#
++# KVM configuration
++#
++config HAVE_KVM
++       bool
 +
-+#include <linux/debugfs.h>
-+#include <linux/stat.h>
-+#include <linux/init.h>
++menuconfig VIRTUALIZATION
++	bool "Virtualization"
++	depends on HAVE_KVM || X86
++	default y
++	---help---
++	  Say Y here to get to see options for using your Linux host to run other
++	  operating systems inside virtual machines (guests).
++	  This option alone does not add any kernel code.
++
++	  If you say N, all options in this submenu will be skipped and disabled.
++
++if VIRTUALIZATION
++
++config KVM
++	tristate "Kernel-based Virtual Machine (KVM) support"
++	depends on HAVE_KVM && EXPERIMENTAL
++	select PREEMPT_NOTIFIERS
++	select ANON_INODES
++	---help---
++	  Support hosting fully virtualized guest machines using hardware
++	  virtualization extensions.  You will need a fairly recent
++	  processor equipped with virtualization extensions. You will also
++	  need to select one or more of the processor modules below.
 +
-+#include <asm/setup.h>
++	  This module provides access to the hardware capabilities through
++	  a character device node named /dev/kvm.
 +
-+#ifdef CONFIG_DEBUG_BOOT_PARAMS
-+static struct debugfs_blob_wrapper boot_params_blob = {
-+	.data = &boot_params,
-+	.size = sizeof(boot_params),
-+};
++	  To compile this as a module, choose M here: the module
++	  will be called kvm.
 +
-+static int __init boot_params_kdebugfs_init(void)
-+{
-+	int error;
-+	struct dentry *dbp, *version, *data;
++	  If unsure, say N.
 +
-+	dbp = debugfs_create_dir("boot_params", NULL);
-+	if (!dbp) {
-+		error = -ENOMEM;
-+		goto err_return;
-+	}
-+	version = debugfs_create_x16("version", S_IRUGO, dbp,
-+				     &boot_params.hdr.version);
-+	if (!version) {
-+		error = -ENOMEM;
-+		goto err_dir;
-+	}
-+	data = debugfs_create_blob("data", S_IRUGO, dbp,
-+				   &boot_params_blob);
-+	if (!data) {
-+		error = -ENOMEM;
-+		goto err_version;
-+	}
-+	return 0;
-+err_version:
-+	debugfs_remove(version);
-+err_dir:
-+	debugfs_remove(dbp);
-+err_return:
-+	return error;
-+}
-+#endif
++config KVM_INTEL
++	tristate "KVM for Intel processors support"
++	depends on KVM
++	---help---
++	  Provides support for KVM on Intel processors equipped with the VT
++	  extensions.
 +
-+static int __init arch_kdebugfs_init(void)
-+{
-+	int error = 0;
++config KVM_AMD
++	tristate "KVM for AMD processors support"
++	depends on KVM
++	---help---
++	  Provides support for KVM on AMD processors equipped with the AMD-V
++	  (SVM) extensions.
 +
-+#ifdef CONFIG_DEBUG_BOOT_PARAMS
-+	error = boot_params_kdebugfs_init();
-+#endif
++# OK, it's a little counter-intuitive to do this, but it puts it neatly under
++# the virtualization menu.
++source drivers/lguest/Kconfig
 +
-+	return error;
-+}
++endif # VIRTUALIZATION
+diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
+new file mode 100644
+index 0000000..ffdd0b3
+--- /dev/null
++++ b/arch/x86/kvm/Makefile
+@@ -0,0 +1,14 @@
++#
++# Makefile for Kernel-based Virtual Machine module
++#
 +
-+arch_initcall(arch_kdebugfs_init);
-diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
++common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
++
++EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
++
++kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o
++obj-$(CONFIG_KVM) += kvm.o
++kvm-intel-objs = vmx.o
++obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
++kvm-amd-objs = svm.o
++obj-$(CONFIG_KVM_AMD) += kvm-amd.o
+diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
 new file mode 100644
-index 0000000..a99e764
+index 0000000..ab29cf2
 --- /dev/null
-+++ b/arch/x86/kernel/kprobes.c
-@@ -0,0 +1,1066 @@
++++ b/arch/x86/kvm/i8259.c
+@@ -0,0 +1,450 @@
 +/*
-+ *  Kernel Probes (KProbes)
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
++ * 8259 interrupt controller emulation
 + *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
++ * Copyright (c) 2003-2004 Fabrice Bellard
++ * Copyright (c) 2007 Intel Corporation
 + *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to deal
++ * in the Software without restriction, including without limitation the rights
++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++ * copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
 + *
-+ * Copyright (C) IBM Corporation, 2002, 2004
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
 + *
-+ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna at in.ibm.com> Kernel
-+ *		Probes initial implementation ( includes contributions from
-+ *		Rusty Russell).
-+ * 2004-July	Suparna Bhattacharya <suparna at in.ibm.com> added jumper probes
-+ *		interface to access function arguments.
-+ * 2004-Oct	Jim Keniston <jkenisto at us.ibm.com> and Prasanna S Panchamukhi
-+ *		<prasanna at in.ibm.com> adapted for x86_64 from i386.
-+ * 2005-Mar	Roland McGrath <roland at redhat.com>
-+ *		Fixed to handle %rip-relative addressing mode correctly.
-+ * 2005-May	Hien Nguyen <hien at us.ibm.com>, Jim Keniston
-+ *		<jkenisto at us.ibm.com> and Prasanna S Panchamukhi
-+ *		<prasanna at in.ibm.com> added function-return probes.
-+ * 2005-May	Rusty Lynch <rusty.lynch at intel.com>
-+ * 		Added function return probes functionality
-+ * 2006-Feb	Masami Hiramatsu <hiramatu at sdl.hitachi.co.jp> added
-+ * 		kprobe-booster and kretprobe-booster for i386.
-+ * 2007-Dec	Masami Hiramatsu <mhiramat at redhat.com> added kprobe-booster
-+ * 		and kretprobe-booster for x86-64
-+ * 2007-Dec	Masami Hiramatsu <mhiramat at redhat.com>, Arjan van de Ven
-+ * 		<arjan at infradead.org> and Jim Keniston <jkenisto at us.ibm.com>
-+ * 		unified x86 kprobes code.
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++ * THE SOFTWARE.
++ * Authors:
++ *   Yaozu (Eddie) Dong <Eddie.dong at intel.com>
++ *   Port from Qemu.
 + */
++#include <linux/mm.h>
++#include "irq.h"
 +
-+#include <linux/kprobes.h>
-+#include <linux/ptrace.h>
-+#include <linux/string.h>
-+#include <linux/slab.h>
-+#include <linux/hardirq.h>
-+#include <linux/preempt.h>
-+#include <linux/module.h>
-+#include <linux/kdebug.h>
-+
-+#include <asm/cacheflush.h>
-+#include <asm/desc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/alternative.h>
-+
-+void jprobe_return_end(void);
-+
-+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
-+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
++#include <linux/kvm_host.h>
 +
-+#ifdef CONFIG_X86_64
-+#define stack_addr(regs) ((unsigned long *)regs->sp)
-+#else
 +/*
-+ * "&regs->sp" looks wrong, but it's correct for x86_32.  x86_32 CPUs
-+ * don't save the ss and esp registers if the CPU is already in kernel
-+ * mode when it traps.  So for kprobes, regs->sp and regs->ss are not
-+ * the [nonexistent] saved stack pointer and ss register, but rather
-+ * the top 8 bytes of the pre-int3 stack.  So &regs->sp happens to
-+ * point to the top of the pre-int3 stack.
++ * set irq level. If an edge is detected, then the IRR is set to 1
 + */
-+#define stack_addr(regs) ((unsigned long *)&regs->sp)
-+#endif
-+
-+#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
-+	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
-+	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
-+	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
-+	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
-+	 << (row % 32))
-+	/*
-+	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
-+	 * Groups, and some special opcodes can not boost.
-+	 */
-+static const u32 twobyte_is_boostable[256 / 32] = {
-+	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
-+	/*      ----------------------------------------------          */
-+	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
-+	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
-+	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
-+	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
-+	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
-+	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
-+	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
-+	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
-+	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
-+	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
-+	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
-+	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
-+	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
-+	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
-+	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
-+	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
-+	/*      -----------------------------------------------         */
-+	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
-+};
-+static const u32 onebyte_has_modrm[256 / 32] = {
-+	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
-+	/*      -----------------------------------------------         */
-+	W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */
-+	W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */
-+	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */
-+	W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */
-+	W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
-+	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
-+	W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */
-+	W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */
-+	W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
-+	W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */
-+	W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */
-+	W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */
-+	W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */
-+	W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
-+	W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */
-+	W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1)   /* f0 */
-+	/*      -----------------------------------------------         */
-+	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
-+};
-+static const u32 twobyte_has_modrm[256 / 32] = {
-+	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
-+	/*      -----------------------------------------------         */
-+	W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */
-+	W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */
-+	W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */
-+	W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */
-+	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */
-+	W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */
-+	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */
-+	W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */
-+	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */
-+	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */
-+	W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */
-+	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */
-+	W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */
-+	W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */
-+	W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */
-+	W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)   /* ff */
-+	/*      -----------------------------------------------         */
-+	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
-+};
-+#undef W
-+
-+struct kretprobe_blackpoint kretprobe_blacklist[] = {
-+	{"__switch_to", }, /* This function switches only current task, but
-+			      doesn't switch kernel stack.*/
-+	{NULL, NULL}	/* Terminator */
-+};
-+const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
-+
-+/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-+static void __kprobes set_jmp_op(void *from, void *to)
++static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
 +{
-+	struct __arch_jmp_op {
-+		char op;
-+		s32 raddr;
-+	} __attribute__((packed)) * jop;
-+	jop = (struct __arch_jmp_op *)from;
-+	jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
-+	jop->op = RELATIVEJUMP_INSTRUCTION;
++	int mask;
++	mask = 1 << irq;
++	if (s->elcr & mask)	/* level triggered */
++		if (level) {
++			s->irr |= mask;
++			s->last_irr |= mask;
++		} else {
++			s->irr &= ~mask;
++			s->last_irr &= ~mask;
++		}
++	else	/* edge triggered */
++		if (level) {
++			if ((s->last_irr & mask) == 0)
++				s->irr |= mask;
++			s->last_irr |= mask;
++		} else
++			s->last_irr &= ~mask;
 +}
 +
 +/*
-+ * Check for the REX prefix which can only exist on X86_64
-+ * X86_32 always returns 0
++ * return the highest priority found in mask (highest = smallest
++ * number). Return 8 if no irq
 + */
-+static int __kprobes is_REX_prefix(kprobe_opcode_t *insn)
++static inline int get_priority(struct kvm_kpic_state *s, int mask)
 +{
-+#ifdef CONFIG_X86_64
-+	if ((*insn & 0xf0) == 0x40)
-+		return 1;
-+#endif
-+	return 0;
++	int priority;
++	if (mask == 0)
++		return 8;
++	priority = 0;
++	while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
++		priority++;
++	return priority;
 +}
 +
 +/*
-+ * Returns non-zero if opcode is boostable.
-+ * RIP relative instructions are adjusted at copying time in 64 bits mode
++ * return the pic wanted interrupt. return -1 if none
 + */
-+static int __kprobes can_boost(kprobe_opcode_t *opcodes)
++static int pic_get_irq(struct kvm_kpic_state *s)
 +{
-+	kprobe_opcode_t opcode;
-+	kprobe_opcode_t *orig_opcodes = opcodes;
-+
-+retry:
-+	if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
-+		return 0;
-+	opcode = *(opcodes++);
-+
-+	/* 2nd-byte opcode */
-+	if (opcode == 0x0f) {
-+		if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
-+			return 0;
-+		return test_bit(*opcodes,
-+				(unsigned long *)twobyte_is_boostable);
-+	}
++	int mask, cur_priority, priority;
 +
-+	switch (opcode & 0xf0) {
-+#ifdef CONFIG_X86_64
-+	case 0x40:
-+		goto retry; /* REX prefix is boostable */
-+#endif
-+	case 0x60:
-+		if (0x63 < opcode && opcode < 0x67)
-+			goto retry; /* prefixes */
-+		/* can't boost Address-size override and bound */
-+		return (opcode != 0x62 && opcode != 0x67);
-+	case 0x70:
-+		return 0; /* can't boost conditional jump */
-+	case 0xc0:
-+		/* can't boost software-interruptions */
-+		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
-+	case 0xd0:
-+		/* can boost AA* and XLAT */
-+		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
-+	case 0xe0:
-+		/* can boost in/out and absolute jmps */
-+		return ((opcode & 0x04) || opcode == 0xea);
-+	case 0xf0:
-+		if ((opcode & 0x0c) == 0 && opcode != 0xf1)
-+			goto retry; /* lock/rep(ne) prefix */
-+		/* clear and set flags are boostable */
-+		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
-+	default:
-+		/* segment override prefixes are boostable */
-+		if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
-+			goto retry; /* prefixes */
-+		/* CS override prefix and call are not boostable */
-+		return (opcode != 0x2e && opcode != 0x9a);
-+	}
++	mask = s->irr & ~s->imr;
++	priority = get_priority(s, mask);
++	if (priority == 8)
++		return -1;
++	/*
++	 * compute current priority. If special fully nested mode on the
++	 * master, the IRQ coming from the slave is not taken into account
++	 * for the priority computation.
++	 */
++	mask = s->isr;
++	if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
++		mask &= ~(1 << 2);
++	cur_priority = get_priority(s, mask);
++	if (priority < cur_priority)
++		/*
++		 * higher priority found: an irq should be generated
++		 */
++		return (priority + s->priority_add) & 7;
++	else
++		return -1;
 +}
 +
 +/*
-+ * Returns non-zero if opcode modifies the interrupt flag.
++ * raise irq to CPU if necessary. must be called every time the active
++ * irq may change
 + */
-+static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
++static void pic_update_irq(struct kvm_pic *s)
 +{
-+	switch (*insn) {
-+	case 0xfa:		/* cli */
-+	case 0xfb:		/* sti */
-+	case 0xcf:		/* iret/iretd */
-+	case 0x9d:		/* popf/popfd */
-+		return 1;
++	int irq2, irq;
++
++	irq2 = pic_get_irq(&s->pics[1]);
++	if (irq2 >= 0) {
++		/*
++		 * if irq request by slave pic, signal master PIC
++		 */
++		pic_set_irq1(&s->pics[0], 2, 1);
++		pic_set_irq1(&s->pics[0], 2, 0);
 +	}
++	irq = pic_get_irq(&s->pics[0]);
++	if (irq >= 0)
++		s->irq_request(s->irq_request_opaque, 1);
++	else
++		s->irq_request(s->irq_request_opaque, 0);
++}
 +
-+	/*
-+	 * on X86_64, 0x40-0x4f are REX prefixes so we need to look
-+	 * at the next byte instead.. but of course not recurse infinitely
-+	 */
-+	if (is_REX_prefix(insn))
-+		return is_IF_modifier(++insn);
++void kvm_pic_update_irq(struct kvm_pic *s)
++{
++	pic_update_irq(s);
++}
 +
-+	return 0;
++void kvm_pic_set_irq(void *opaque, int irq, int level)
++{
++	struct kvm_pic *s = opaque;
++
++	pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
++	pic_update_irq(s);
 +}
 +
 +/*
-+ * Adjust the displacement if the instruction uses the %rip-relative
-+ * addressing mode.
-+ * If it does, Return the address of the 32-bit displacement word.
-+ * If not, return null.
-+ * Only applicable to 64-bit x86.
++ * acknowledge interrupt 'irq'
 + */
-+static void __kprobes fix_riprel(struct kprobe *p)
++static inline void pic_intack(struct kvm_kpic_state *s, int irq)
 +{
-+#ifdef CONFIG_X86_64
-+	u8 *insn = p->ainsn.insn;
-+	s64 disp;
-+	int need_modrm;
++	if (s->auto_eoi) {
++		if (s->rotate_on_auto_eoi)
++			s->priority_add = (irq + 1) & 7;
++	} else
++		s->isr |= (1 << irq);
++	/*
++	 * We don't clear a level sensitive interrupt here
++	 */
++	if (!(s->elcr & (1 << irq)))
++		s->irr &= ~(1 << irq);
++}
 +
-+	/* Skip legacy instruction prefixes.  */
-+	while (1) {
-+		switch (*insn) {
-+		case 0x66:
-+		case 0x67:
-+		case 0x2e:
-+		case 0x3e:
-+		case 0x26:
-+		case 0x64:
-+		case 0x65:
-+		case 0x36:
-+		case 0xf0:
-+		case 0xf3:
-+		case 0xf2:
-+			++insn;
-+			continue;
-+		}
-+		break;
++int kvm_pic_read_irq(struct kvm_pic *s)
++{
++	int irq, irq2, intno;
++
++	irq = pic_get_irq(&s->pics[0]);
++	if (irq >= 0) {
++		pic_intack(&s->pics[0], irq);
++		if (irq == 2) {
++			irq2 = pic_get_irq(&s->pics[1]);
++			if (irq2 >= 0)
++				pic_intack(&s->pics[1], irq2);
++			else
++				/*
++				 * spurious IRQ on slave controller
++				 */
++				irq2 = 7;
++			intno = s->pics[1].irq_base + irq2;
++			irq = irq2 + 8;
++		} else
++			intno = s->pics[0].irq_base + irq;
++	} else {
++		/*
++		 * spurious IRQ on host controller
++		 */
++		irq = 7;
++		intno = s->pics[0].irq_base + irq;
 +	}
++	pic_update_irq(s);
 +
-+	/* Skip REX instruction prefix.  */
-+	if (is_REX_prefix(insn))
-+		++insn;
++	return intno;
++}
 +
-+	if (*insn == 0x0f) {
-+		/* Two-byte opcode.  */
-+		++insn;
-+		need_modrm = test_bit(*insn,
-+				      (unsigned long *)twobyte_has_modrm);
-+	} else
-+		/* One-byte opcode.  */
-+		need_modrm = test_bit(*insn,
-+				      (unsigned long *)onebyte_has_modrm);
++void kvm_pic_reset(struct kvm_kpic_state *s)
++{
++	s->last_irr = 0;
++	s->irr = 0;
++	s->imr = 0;
++	s->isr = 0;
++	s->priority_add = 0;
++	s->irq_base = 0;
++	s->read_reg_select = 0;
++	s->poll = 0;
++	s->special_mask = 0;
++	s->init_state = 0;
++	s->auto_eoi = 0;
++	s->rotate_on_auto_eoi = 0;
++	s->special_fully_nested_mode = 0;
++	s->init4 = 0;
++}
 +
-+	if (need_modrm) {
-+		u8 modrm = *++insn;
-+		if ((modrm & 0xc7) == 0x05) {
-+			/* %rip+disp32 addressing mode */
-+			/* Displacement follows ModRM byte.  */
-+			++insn;
++static void pic_ioport_write(void *opaque, u32 addr, u32 val)
++{
++	struct kvm_kpic_state *s = opaque;
++	int priority, cmd, irq;
++
++	addr &= 1;
++	if (addr == 0) {
++		if (val & 0x10) {
++			kvm_pic_reset(s);	/* init */
 +			/*
-+			 * The copied instruction uses the %rip-relative
-+			 * addressing mode.  Adjust the displacement for the
-+			 * difference between the original location of this
-+			 * instruction and the location of the copy that will
-+			 * actually be run.  The tricky bit here is making sure
-+			 * that the sign extension happens correctly in this
-+			 * calculation, since we need a signed 32-bit result to
-+			 * be sign-extended to 64 bits when it's added to the
-+			 * %rip value and yield the same 64-bit result that the
-+			 * sign-extension of the original signed 32-bit
-+			 * displacement would have given.
++			 * deassert a pending interrupt
 +			 */
-+			disp = (u8 *) p->addr + *((s32 *) insn) -
-+			       (u8 *) p->ainsn.insn;
-+			BUG_ON((s64) (s32) disp != disp); /* Sanity check.  */
-+			*(s32 *)insn = (s32) disp;
++			s->pics_state->irq_request(s->pics_state->
++						   irq_request_opaque, 0);
++			s->init_state = 1;
++			s->init4 = val & 1;
++			if (val & 0x02)
++				printk(KERN_ERR "single mode not supported");
++			if (val & 0x08)
++				printk(KERN_ERR
++				       "level sensitive irq not supported");
++		} else if (val & 0x08) {
++			if (val & 0x04)
++				s->poll = 1;
++			if (val & 0x02)
++				s->read_reg_select = val & 1;
++			if (val & 0x40)
++				s->special_mask = (val >> 5) & 1;
++		} else {
++			cmd = val >> 5;
++			switch (cmd) {
++			case 0:
++			case 4:
++				s->rotate_on_auto_eoi = cmd >> 2;
++				break;
++			case 1:	/* end of interrupt */
++			case 5:
++				priority = get_priority(s, s->isr);
++				if (priority != 8) {
++					irq = (priority + s->priority_add) & 7;
++					s->isr &= ~(1 << irq);
++					if (cmd == 5)
++						s->priority_add = (irq + 1) & 7;
++					pic_update_irq(s->pics_state);
++				}
++				break;
++			case 3:
++				irq = val & 7;
++				s->isr &= ~(1 << irq);
++				pic_update_irq(s->pics_state);
++				break;
++			case 6:
++				s->priority_add = (val + 1) & 7;
++				pic_update_irq(s->pics_state);
++				break;
++			case 7:
++				irq = val & 7;
++				s->isr &= ~(1 << irq);
++				s->priority_add = (irq + 1) & 7;
++				pic_update_irq(s->pics_state);
++				break;
++			default:
++				break;	/* no operation */
++			}
++		}
++	} else
++		switch (s->init_state) {
++		case 0:		/* normal mode */
++			s->imr = val;
++			pic_update_irq(s->pics_state);
++			break;
++		case 1:
++			s->irq_base = val & 0xf8;
++			s->init_state = 2;
++			break;
++		case 2:
++			if (s->init4)
++				s->init_state = 3;
++			else
++				s->init_state = 0;
++			break;
++		case 3:
++			s->special_fully_nested_mode = (val >> 4) & 1;
++			s->auto_eoi = (val >> 1) & 1;
++			s->init_state = 0;
++			break;
 +		}
-+	}
-+#endif
 +}
 +
-+static void __kprobes arch_copy_kprobe(struct kprobe *p)
++static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
 +{
-+	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-+
-+	fix_riprel(p);
++	int ret;
 +
-+	if (can_boost(p->addr))
-+		p->ainsn.boostable = 0;
-+	else
-+		p->ainsn.boostable = -1;
++	ret = pic_get_irq(s);
++	if (ret >= 0) {
++		if (addr1 >> 7) {
++			s->pics_state->pics[0].isr &= ~(1 << 2);
++			s->pics_state->pics[0].irr &= ~(1 << 2);
++		}
++		s->irr &= ~(1 << ret);
++		s->isr &= ~(1 << ret);
++		if (addr1 >> 7 || ret != 2)
++			pic_update_irq(s->pics_state);
++	} else {
++		ret = 0x07;
++		pic_update_irq(s->pics_state);
++	}
 +
-+	p->opcode = *p->addr;
++	return ret;
 +}
 +
-+int __kprobes arch_prepare_kprobe(struct kprobe *p)
++static u32 pic_ioport_read(void *opaque, u32 addr1)
 +{
-+	/* insn: must be on special executable page on x86. */
-+	p->ainsn.insn = get_insn_slot();
-+	if (!p->ainsn.insn)
-+		return -ENOMEM;
-+	arch_copy_kprobe(p);
-+	return 0;
-+}
++	struct kvm_kpic_state *s = opaque;
++	unsigned int addr;
++	int ret;
 +
-+void __kprobes arch_arm_kprobe(struct kprobe *p)
-+{
-+	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
++	addr = addr1;
++	addr &= 1;
++	if (s->poll) {
++		ret = pic_poll_read(s, addr1);
++		s->poll = 0;
++	} else
++		if (addr == 0)
++			if (s->read_reg_select)
++				ret = s->isr;
++			else
++				ret = s->irr;
++		else
++			ret = s->imr;
++	return ret;
 +}
 +
-+void __kprobes arch_disarm_kprobe(struct kprobe *p)
++static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
 +{
-+	text_poke(p->addr, &p->opcode, 1);
++	struct kvm_kpic_state *s = opaque;
++	s->elcr = val & s->elcr_mask;
 +}
 +
-+void __kprobes arch_remove_kprobe(struct kprobe *p)
++static u32 elcr_ioport_read(void *opaque, u32 addr1)
 +{
-+	mutex_lock(&kprobe_mutex);
-+	free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
-+	mutex_unlock(&kprobe_mutex);
++	struct kvm_kpic_state *s = opaque;
++	return s->elcr;
 +}
 +
-+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
++static int picdev_in_range(struct kvm_io_device *this, gpa_t addr)
 +{
-+	kcb->prev_kprobe.kp = kprobe_running();
-+	kcb->prev_kprobe.status = kcb->kprobe_status;
-+	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
-+	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
++	switch (addr) {
++	case 0x20:
++	case 0x21:
++	case 0xa0:
++	case 0xa1:
++	case 0x4d0:
++	case 0x4d1:
++		return 1;
++	default:
++		return 0;
++	}
 +}
 +
-+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
++static void picdev_write(struct kvm_io_device *this,
++			 gpa_t addr, int len, const void *val)
 +{
-+	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
-+	kcb->kprobe_status = kcb->prev_kprobe.status;
-+	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
-+	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
-+}
++	struct kvm_pic *s = this->private;
++	unsigned char data = *(unsigned char *)val;
 +
-+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
-+				struct kprobe_ctlblk *kcb)
-+{
-+	__get_cpu_var(current_kprobe) = p;
-+	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
-+		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
-+	if (is_IF_modifier(p->ainsn.insn))
-+		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
++	if (len != 1) {
++		if (printk_ratelimit())
++			printk(KERN_ERR "PIC: non byte write\n");
++		return;
++	}
++	switch (addr) {
++	case 0x20:
++	case 0x21:
++	case 0xa0:
++	case 0xa1:
++		pic_ioport_write(&s->pics[addr >> 7], addr, data);
++		break;
++	case 0x4d0:
++	case 0x4d1:
++		elcr_ioport_write(&s->pics[addr & 1], addr, data);
++		break;
++	}
 +}
 +
-+static void __kprobes clear_btf(void)
++static void picdev_read(struct kvm_io_device *this,
++			gpa_t addr, int len, void *val)
 +{
-+	if (test_thread_flag(TIF_DEBUGCTLMSR))
-+		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
-+}
++	struct kvm_pic *s = this->private;
++	unsigned char data = 0;
 +
-+static void __kprobes restore_btf(void)
-+{
-+	if (test_thread_flag(TIF_DEBUGCTLMSR))
-+		wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr);
++	if (len != 1) {
++		if (printk_ratelimit())
++			printk(KERN_ERR "PIC: non byte read\n");
++		return;
++	}
++	switch (addr) {
++	case 0x20:
++	case 0x21:
++	case 0xa0:
++	case 0xa1:
++		data = pic_ioport_read(&s->pics[addr >> 7], addr);
++		break;
++	case 0x4d0:
++	case 0x4d1:
++		data = elcr_ioport_read(&s->pics[addr & 1], addr);
++		break;
++	}
++	*(unsigned char *)val = data;
 +}
 +
-+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
++/*
++ * callback when PIC0 irq status changed
++ */
++static void pic_irq_request(void *opaque, int level)
 +{
-+	clear_btf();
-+	regs->flags |= X86_EFLAGS_TF;
-+	regs->flags &= ~X86_EFLAGS_IF;
-+	/* single step inline if the instruction is an int3 */
-+	if (p->opcode == BREAKPOINT_INSTRUCTION)
-+		regs->ip = (unsigned long)p->addr;
-+	else
-+		regs->ip = (unsigned long)p->ainsn.insn;
++	struct kvm *kvm = opaque;
++	struct kvm_vcpu *vcpu = kvm->vcpus[0];
++
++	pic_irqchip(kvm)->output = level;
++	if (vcpu)
++		kvm_vcpu_kick(vcpu);
 +}
 +
-+/* Called with kretprobe_lock held */
-+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
-+				      struct pt_regs *regs)
++struct kvm_pic *kvm_create_pic(struct kvm *kvm)
 +{
-+	unsigned long *sara = stack_addr(regs);
-+
-+	ri->ret_addr = (kprobe_opcode_t *) *sara;
++	struct kvm_pic *s;
++	s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
++	if (!s)
++		return NULL;
++	s->pics[0].elcr_mask = 0xf8;
++	s->pics[1].elcr_mask = 0xde;
++	s->irq_request = pic_irq_request;
++	s->irq_request_opaque = kvm;
++	s->pics[0].pics_state = s;
++	s->pics[1].pics_state = s;
 +
-+	/* Replace the return addr with trampoline addr */
-+	*sara = (unsigned long) &kretprobe_trampoline;
++	/*
++	 * Initialize PIO device
++	 */
++	s->dev.read = picdev_read;
++	s->dev.write = picdev_write;
++	s->dev.in_range = picdev_in_range;
++	s->dev.private = s;
++	kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev);
++	return s;
 +}
+diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
+new file mode 100644
+index 0000000..e571475
+--- /dev/null
++++ b/arch/x86/kvm/irq.c
+@@ -0,0 +1,78 @@
++/*
++ * irq.c: API for in kernel interrupt controller
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
++ * Place - Suite 330, Boston, MA 02111-1307 USA.
++ * Authors:
++ *   Yaozu (Eddie) Dong <Eddie.dong at intel.com>
++ *
++ */
 +
-+static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
-+				       struct kprobe_ctlblk *kcb)
++#include <linux/module.h>
++#include <linux/kvm_host.h>
++
++#include "irq.h"
++
++/*
++ * check if there is pending interrupt without
++ * intack.
++ */
++int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
 +{
-+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
-+	if (p->ainsn.boostable == 1 && !p->post_handler) {
-+		/* Boost up -- we can execute copied instructions directly */
-+		reset_current_kprobe();
-+		regs->ip = (unsigned long)p->ainsn.insn;
-+		preempt_enable_no_resched();
-+		return;
++	struct kvm_pic *s;
++
++	if (kvm_apic_has_interrupt(v) == -1) {	/* LAPIC */
++		if (kvm_apic_accept_pic_intr(v)) {
++			s = pic_irqchip(v->kvm);	/* PIC */
++			return s->output;
++		} else
++			return 0;
 +	}
-+#endif
-+	prepare_singlestep(p, regs);
-+	kcb->kprobe_status = KPROBE_HIT_SS;
++	return 1;
 +}
++EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
 +
 +/*
-+ * We have reentered the kprobe_handler(), since another probe was hit while
-+ * within the handler. We save the original kprobes variables and just single
-+ * step on the instruction of the new probe without calling any user handlers.
++ * Read pending interrupt vector and intack.
 + */
-+static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
-+				    struct kprobe_ctlblk *kcb)
++int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
 +{
-+	switch (kcb->kprobe_status) {
-+	case KPROBE_HIT_SSDONE:
-+#ifdef CONFIG_X86_64
-+		/* TODO: Provide re-entrancy from post_kprobes_handler() and
-+		 * avoid exception stack corruption while single-stepping on
-+		 * the instruction of the new probe.
-+		 */
-+		arch_disarm_kprobe(p);
-+		regs->ip = (unsigned long)p->addr;
-+		reset_current_kprobe();
-+		preempt_enable_no_resched();
-+		break;
-+#endif
-+	case KPROBE_HIT_ACTIVE:
-+		save_previous_kprobe(kcb);
-+		set_current_kprobe(p, regs, kcb);
-+		kprobes_inc_nmissed_count(p);
-+		prepare_singlestep(p, regs);
-+		kcb->kprobe_status = KPROBE_REENTER;
-+		break;
-+	case KPROBE_HIT_SS:
-+		if (p == kprobe_running()) {
-+			regs->flags &= ~TF_MASK;
-+			regs->flags |= kcb->kprobe_saved_flags;
-+			return 0;
-+		} else {
-+			/* A probe has been hit in the codepath leading up
-+			 * to, or just after, single-stepping of a probed
-+			 * instruction. This entire codepath should strictly
-+			 * reside in .kprobes.text section. Raise a warning
-+			 * to highlight this peculiar case.
-+			 */
++	struct kvm_pic *s;
++	int vector;
++
++	vector = kvm_get_apic_interrupt(v);	/* APIC */
++	if (vector == -1) {
++		if (kvm_apic_accept_pic_intr(v)) {
++			s = pic_irqchip(v->kvm);
++			s->output = 0;		/* PIC */
++			vector = kvm_pic_read_irq(s);
 +		}
-+	default:
-+		/* impossible cases */
-+		WARN_ON(1);
-+		return 0;
 +	}
++	return vector;
++}
++EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
 +
-+	return 1;
++void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
++{
++	kvm_inject_apic_timer_irqs(vcpu);
++	/* TODO: PIT, RTC etc. */
 +}
++EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
 +
++void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
++{
++	kvm_apic_timer_intr_post(vcpu, vec);
++	/* TODO: PIT, RTC etc. */
++}
++EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
+diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
+new file mode 100644
+index 0000000..fa5ed5d
+--- /dev/null
++++ b/arch/x86/kvm/irq.h
+@@ -0,0 +1,88 @@
 +/*
-+ * Interrupts are disabled on entry as trap3 is an interrupt gate and they
-+ * remain disabled thorough out this function.
++ * irq.h: in kernel interrupt controller related definitions
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
++ * Place - Suite 330, Boston, MA 02111-1307 USA.
++ * Authors:
++ *   Yaozu (Eddie) Dong <Eddie.dong at intel.com>
++ *
 + */
-+static int __kprobes kprobe_handler(struct pt_regs *regs)
-+{
-+	kprobe_opcode_t *addr;
-+	struct kprobe *p;
-+	struct kprobe_ctlblk *kcb;
 +
-+	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
-+	if (*addr != BREAKPOINT_INSTRUCTION) {
-+		/*
-+		 * The breakpoint instruction was removed right
-+		 * after we hit it.  Another cpu has removed
-+		 * either a probepoint or a debugger breakpoint
-+		 * at this address.  In either case, no further
-+		 * handling of this interrupt is appropriate.
-+		 * Back up over the (now missing) int3 and run
-+		 * the original instruction.
-+		 */
-+		regs->ip = (unsigned long)addr;
-+		return 1;
-+	}
++#ifndef __IRQ_H
++#define __IRQ_H
 +
-+	/*
-+	 * We don't want to be preempted for the entire
-+	 * duration of kprobe processing. We conditionally
-+	 * re-enable preemption at the end of this function,
-+	 * and also in reenter_kprobe() and setup_singlestep().
-+	 */
-+	preempt_disable();
++#include <linux/mm_types.h>
++#include <linux/hrtimer.h>
++#include <linux/kvm_host.h>
 +
-+	kcb = get_kprobe_ctlblk();
-+	p = get_kprobe(addr);
++#include "iodev.h"
++#include "ioapic.h"
++#include "lapic.h"
 +
-+	if (p) {
-+		if (kprobe_running()) {
-+			if (reenter_kprobe(p, regs, kcb))
-+				return 1;
-+		} else {
-+			set_current_kprobe(p, regs, kcb);
-+			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
++struct kvm;
++struct kvm_vcpu;
 +
-+			/*
-+			 * If we have no pre-handler or it returned 0, we
-+			 * continue with normal processing.  If we have a
-+			 * pre-handler and it returned non-zero, it prepped
-+			 * for calling the break_handler below on re-entry
-+			 * for jprobe processing, so get out doing nothing
-+			 * more here.
-+			 */
-+			if (!p->pre_handler || !p->pre_handler(p, regs))
-+				setup_singlestep(p, regs, kcb);
-+			return 1;
-+		}
-+	} else if (kprobe_running()) {
-+		p = __get_cpu_var(current_kprobe);
-+		if (p->break_handler && p->break_handler(p, regs)) {
-+			setup_singlestep(p, regs, kcb);
-+			return 1;
-+		}
-+	} /* else: not a kprobe fault; let the kernel handle it */
++typedef void irq_request_func(void *opaque, int level);
 +
-+	preempt_enable_no_resched();
-+	return 0;
++struct kvm_kpic_state {
++	u8 last_irr;	/* edge detection */
++	u8 irr;		/* interrupt request register */
++	u8 imr;		/* interrupt mask register */
++	u8 isr;		/* interrupt service register */
++	u8 priority_add;	/* highest irq priority */
++	u8 irq_base;
++	u8 read_reg_select;
++	u8 poll;
++	u8 special_mask;
++	u8 init_state;
++	u8 auto_eoi;
++	u8 rotate_on_auto_eoi;
++	u8 special_fully_nested_mode;
++	u8 init4;		/* true if 4 byte init */
++	u8 elcr;		/* PIIX edge/trigger selection */
++	u8 elcr_mask;
++	struct kvm_pic *pics_state;
++};
++
++struct kvm_pic {
++	struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
++	irq_request_func *irq_request;
++	void *irq_request_opaque;
++	int output;		/* intr from master PIC */
++	struct kvm_io_device dev;
++};
++
++struct kvm_pic *kvm_create_pic(struct kvm *kvm);
++void kvm_pic_set_irq(void *opaque, int irq, int level);
++int kvm_pic_read_irq(struct kvm_pic *s);
++void kvm_pic_update_irq(struct kvm_pic *s);
++
++static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
++{
++	return kvm->arch.vpic;
 +}
 +
-+/*
-+ * When a retprobed function returns, this code saves registers and
-+ * calls trampoline_handler() runs, which calls the kretprobe's handler.
-+ */
-+void __kprobes kretprobe_trampoline_holder(void)
++static inline int irqchip_in_kernel(struct kvm *kvm)
 +{
-+	asm volatile (
-+			".global kretprobe_trampoline\n"
-+			"kretprobe_trampoline: \n"
-+#ifdef CONFIG_X86_64
-+			/* We don't bother saving the ss register */
-+			"	pushq %rsp\n"
-+			"	pushfq\n"
-+			/*
-+			 * Skip cs, ip, orig_ax.
-+			 * trampoline_handler() will plug in these values
-+			 */
-+			"	subq $24, %rsp\n"
-+			"	pushq %rdi\n"
-+			"	pushq %rsi\n"
-+			"	pushq %rdx\n"
-+			"	pushq %rcx\n"
-+			"	pushq %rax\n"
-+			"	pushq %r8\n"
-+			"	pushq %r9\n"
-+			"	pushq %r10\n"
-+			"	pushq %r11\n"
-+			"	pushq %rbx\n"
-+			"	pushq %rbp\n"
-+			"	pushq %r12\n"
-+			"	pushq %r13\n"
-+			"	pushq %r14\n"
-+			"	pushq %r15\n"
-+			"	movq %rsp, %rdi\n"
-+			"	call trampoline_handler\n"
-+			/* Replace saved sp with true return address. */
-+			"	movq %rax, 152(%rsp)\n"
-+			"	popq %r15\n"
-+			"	popq %r14\n"
-+			"	popq %r13\n"
-+			"	popq %r12\n"
-+			"	popq %rbp\n"
-+			"	popq %rbx\n"
-+			"	popq %r11\n"
-+			"	popq %r10\n"
-+			"	popq %r9\n"
-+			"	popq %r8\n"
-+			"	popq %rax\n"
-+			"	popq %rcx\n"
-+			"	popq %rdx\n"
-+			"	popq %rsi\n"
-+			"	popq %rdi\n"
-+			/* Skip orig_ax, ip, cs */
-+			"	addq $24, %rsp\n"
-+			"	popfq\n"
-+#else
-+			"	pushf\n"
-+			/*
-+			 * Skip cs, ip, orig_ax.
-+			 * trampoline_handler() will plug in these values
-+			 */
-+			"	subl $12, %esp\n"
-+			"	pushl %fs\n"
-+			"	pushl %ds\n"
-+			"	pushl %es\n"
-+			"	pushl %eax\n"
-+			"	pushl %ebp\n"
-+			"	pushl %edi\n"
-+			"	pushl %esi\n"
-+			"	pushl %edx\n"
-+			"	pushl %ecx\n"
-+			"	pushl %ebx\n"
-+			"	movl %esp, %eax\n"
-+			"	call trampoline_handler\n"
-+			/* Move flags to cs */
-+			"	movl 52(%esp), %edx\n"
-+			"	movl %edx, 48(%esp)\n"
-+			/* Replace saved flags with true return address. */
-+			"	movl %eax, 52(%esp)\n"
-+			"	popl %ebx\n"
-+			"	popl %ecx\n"
-+			"	popl %edx\n"
-+			"	popl %esi\n"
-+			"	popl %edi\n"
-+			"	popl %ebp\n"
-+			"	popl %eax\n"
-+			/* Skip ip, orig_ax, es, ds, fs */
-+			"	addl $20, %esp\n"
-+			"	popf\n"
-+#endif
-+			"	ret\n");
++	return pic_irqchip(kvm) != NULL;
 +}
 +
-+/*
-+ * Called from kretprobe_trampoline
-+ */
-+void * __kprobes trampoline_handler(struct pt_regs *regs)
-+{
-+	struct kretprobe_instance *ri = NULL;
-+	struct hlist_head *head, empty_rp;
-+	struct hlist_node *node, *tmp;
-+	unsigned long flags, orig_ret_address = 0;
-+	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
++void kvm_pic_reset(struct kvm_kpic_state *s);
++
++void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
++void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
++void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
++void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
++
++#endif
+diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
+new file mode 100644
+index 0000000..ecdfe97
+--- /dev/null
++++ b/arch/x86/kvm/kvm_svm.h
+@@ -0,0 +1,45 @@
++#ifndef __KVM_SVM_H
++#define __KVM_SVM_H
++
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/kvm_host.h>
++#include <asm/msr.h>
++
++#include "svm.h"
 +
-+	INIT_HLIST_HEAD(&empty_rp);
-+	spin_lock_irqsave(&kretprobe_lock, flags);
-+	head = kretprobe_inst_table_head(current);
-+	/* fixup registers */
++static const u32 host_save_user_msrs[] = {
 +#ifdef CONFIG_X86_64
-+	regs->cs = __KERNEL_CS;
-+#else
-+	regs->cs = __KERNEL_CS | get_kernel_rpl();
++	MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
++	MSR_FS_BASE,
 +#endif
-+	regs->ip = trampoline_address;
-+	regs->orig_ax = ~0UL;
++	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
++};
 +
-+	/*
-+	 * It is possible to have multiple instances associated with a given
-+	 * task either because multiple functions in the call path have
-+	 * return probes installed on them, and/or more then one
-+	 * return probe was registered for a target function.
-+	 *
-+	 * We can handle this because:
-+	 *     - instances are always pushed into the head of the list
-+	 *     - when multiple return probes are registered for the same
-+	 *	 function, the (chronologically) first instance's ret_addr
-+	 *	 will be the real return address, and all the rest will
-+	 *	 point to kretprobe_trampoline.
-+	 */
-+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
-+		if (ri->task != current)
-+			/* another task is sharing our hash bucket */
-+			continue;
++#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
++#define NUM_DB_REGS 4
 +
-+		if (ri->rp && ri->rp->handler) {
-+			__get_cpu_var(current_kprobe) = &ri->rp->kp;
-+			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
-+			ri->rp->handler(ri, regs);
-+			__get_cpu_var(current_kprobe) = NULL;
-+		}
++struct kvm_vcpu;
 +
-+		orig_ret_address = (unsigned long)ri->ret_addr;
-+		recycle_rp_inst(ri, &empty_rp);
++struct vcpu_svm {
++	struct kvm_vcpu vcpu;
++	struct vmcb *vmcb;
++	unsigned long vmcb_pa;
++	struct svm_cpu_data *svm_data;
++	uint64_t asid_generation;
 +
-+		if (orig_ret_address != trampoline_address)
-+			/*
-+			 * This is the real return address. Any other
-+			 * instances associated with this task are for
-+			 * other calls deeper on the call stack
-+			 */
-+			break;
-+	}
++	unsigned long db_regs[NUM_DB_REGS];
 +
-+	kretprobe_assert(ri, orig_ret_address, trampoline_address);
++	u64 next_rip;
 +
-+	spin_unlock_irqrestore(&kretprobe_lock, flags);
++	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
++	u64 host_gs_base;
++	unsigned long host_cr2;
++	unsigned long host_db_regs[NUM_DB_REGS];
++	unsigned long host_dr6;
++	unsigned long host_dr7;
++};
 +
-+	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
-+		hlist_del(&ri->hlist);
-+		kfree(ri);
-+	}
-+	return (void *)orig_ret_address;
-+}
++#endif
++
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+new file mode 100644
+index 0000000..2cbee94
+--- /dev/null
++++ b/arch/x86/kvm/lapic.c
+@@ -0,0 +1,1154 @@
 +
 +/*
-+ * Called after single-stepping.  p->addr is the address of the
-+ * instruction whose first byte has been replaced by the "int 3"
-+ * instruction.  To avoid the SMP problems that can occur when we
-+ * temporarily put back the original opcode to single-step, we
-+ * single-stepped a copy of the instruction.  The address of this
-+ * copy is p->ainsn.insn.
-+ *
-+ * This function prepares to return from the post-single-step
-+ * interrupt.  We have to fix up the stack as follows:
++ * Local APIC virtualization
 + *
-+ * 0) Except in the case of absolute or indirect jump or call instructions,
-+ * the new ip is relative to the copied instruction.  We need to make
-+ * it relative to the original instruction.
++ * Copyright (C) 2006 Qumranet, Inc.
++ * Copyright (C) 2007 Novell
++ * Copyright (C) 2007 Intel
 + *
-+ * 1) If the single-stepped instruction was pushfl, then the TF and IF
-+ * flags are set in the just-pushed flags, and may need to be cleared.
++ * Authors:
++ *   Dor Laor <dor.laor at qumranet.com>
++ *   Gregory Haskins <ghaskins at novell.com>
++ *   Yaozu (Eddie) Dong <eddie.dong at intel.com>
 + *
-+ * 2) If the single-stepped instruction was a call, the return address
-+ * that is atop the stack is the address following the copied instruction.
-+ * We need to make it the address following the original instruction.
++ * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
 + *
-+ * If this is the first time we've single-stepped the instruction at
-+ * this probepoint, and the instruction is boostable, boost it: add a
-+ * jump instruction after the copied instruction, that jumps to the next
-+ * instruction after the probepoint.
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
 + */
-+static void __kprobes resume_execution(struct kprobe *p,
-+		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
++
++#include <linux/kvm_host.h>
++#include <linux/kvm.h>
++#include <linux/mm.h>
++#include <linux/highmem.h>
++#include <linux/smp.h>
++#include <linux/hrtimer.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <asm/processor.h>
++#include <asm/msr.h>
++#include <asm/page.h>
++#include <asm/current.h>
++#include <asm/apicdef.h>
++#include <asm/atomic.h>
++#include <asm/div64.h>
++#include "irq.h"
++
++#define PRId64 "d"
++#define PRIx64 "llx"
++#define PRIu64 "u"
++#define PRIo64 "o"
++
++#define APIC_BUS_CYCLE_NS 1
++
++/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
++#define apic_debug(fmt, arg...)
++
++#define APIC_LVT_NUM			6
++/* 14 is the version for Xeon and Pentium 8.4.8*/
++#define APIC_VERSION			(0x14UL | ((APIC_LVT_NUM - 1) << 16))
++#define LAPIC_MMIO_LENGTH		(1 << 12)
++/* followed define is not in apicdef.h */
++#define APIC_SHORT_MASK			0xc0000
++#define APIC_DEST_NOSHORT		0x0
++#define APIC_DEST_MASK			0x800
++#define MAX_APIC_VECTOR			256
++
++#define VEC_POS(v) ((v) & (32 - 1))
++#define REG_POS(v) (((v) >> 5) << 4)
++
++static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
 +{
-+	unsigned long *tos = stack_addr(regs);
-+	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
-+	unsigned long orig_ip = (unsigned long)p->addr;
-+	kprobe_opcode_t *insn = p->ainsn.insn;
++	return *((u32 *) (apic->regs + reg_off));
++}
 +
-+	/*skip the REX prefix*/
-+	if (is_REX_prefix(insn))
-+		insn++;
++static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
++{
++	*((u32 *) (apic->regs + reg_off)) = val;
++}
 +
-+	regs->flags &= ~X86_EFLAGS_TF;
-+	switch (*insn) {
-+	case 0x9c:	/* pushfl */
-+		*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
-+		*tos |= kcb->kprobe_old_flags;
-+		break;
-+	case 0xc2:	/* iret/ret/lret */
-+	case 0xc3:
-+	case 0xca:
-+	case 0xcb:
-+	case 0xcf:
-+	case 0xea:	/* jmp absolute -- ip is correct */
-+		/* ip is already adjusted, no more changes required */
-+		p->ainsn.boostable = 1;
-+		goto no_change;
-+	case 0xe8:	/* call relative - Fix return addr */
-+		*tos = orig_ip + (*tos - copy_ip);
-+		break;
-+#ifdef CONFIG_X86_32
-+	case 0x9a:	/* call absolute -- same as call absolute, indirect */
-+		*tos = orig_ip + (*tos - copy_ip);
-+		goto no_change;
-+#endif
-+	case 0xff:
-+		if ((insn[1] & 0x30) == 0x10) {
-+			/*
-+			 * call absolute, indirect
-+			 * Fix return addr; ip is correct.
-+			 * But this is not boostable
-+			 */
-+			*tos = orig_ip + (*tos - copy_ip);
-+			goto no_change;
-+		} else if (((insn[1] & 0x31) == 0x20) ||
-+			   ((insn[1] & 0x31) == 0x21)) {
-+			/*
-+			 * jmp near and far, absolute indirect
-+			 * ip is correct. And this is boostable
-+			 */
-+			p->ainsn.boostable = 1;
-+			goto no_change;
-+		}
-+	default:
-+		break;
-+	}
++static inline int apic_test_and_set_vector(int vec, void *bitmap)
++{
++	return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
++}
 +
-+	if (p->ainsn.boostable == 0) {
-+		if ((regs->ip > copy_ip) &&
-+		    (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
-+			/*
-+			 * These instructions can be executed directly if it
-+			 * jumps back to correct address.
-+			 */
-+			set_jmp_op((void *)regs->ip,
-+				   (void *)orig_ip + (regs->ip - copy_ip));
-+			p->ainsn.boostable = 1;
-+		} else {
-+			p->ainsn.boostable = -1;
-+		}
-+	}
++static inline int apic_test_and_clear_vector(int vec, void *bitmap)
++{
++	return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
++}
 +
-+	regs->ip += orig_ip - copy_ip;
++static inline void apic_set_vector(int vec, void *bitmap)
++{
++	set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
++}
 +
-+no_change:
-+	restore_btf();
++static inline void apic_clear_vector(int vec, void *bitmap)
++{
++	clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
 +}
 +
-+/*
-+ * Interrupts are disabled on entry as trap1 is an interrupt gate and they
-+ * remain disabled thoroughout this function.
-+ */
-+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
++static inline int apic_hw_enabled(struct kvm_lapic *apic)
 +{
-+	struct kprobe *cur = kprobe_running();
-+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++	return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
++}
 +
-+	if (!cur)
-+		return 0;
++static inline int  apic_sw_enabled(struct kvm_lapic *apic)
++{
++	return apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED;
++}
 +
-+	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
-+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
-+		cur->post_handler(cur, regs, 0);
-+	}
++static inline int apic_enabled(struct kvm_lapic *apic)
++{
++	return apic_sw_enabled(apic) &&	apic_hw_enabled(apic);
++}
 +
-+	resume_execution(cur, regs, kcb);
-+	regs->flags |= kcb->kprobe_saved_flags;
-+	trace_hardirqs_fixup_flags(regs->flags);
++#define LVT_MASK	\
++	(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
 +
-+	/* Restore back the original saved kprobes variables and continue. */
-+	if (kcb->kprobe_status == KPROBE_REENTER) {
-+		restore_previous_kprobe(kcb);
-+		goto out;
-+	}
-+	reset_current_kprobe();
-+out:
-+	preempt_enable_no_resched();
++#define LINT_MASK	\
++	(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
++	 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
 +
-+	/*
-+	 * if somebody else is singlestepping across a probe point, flags
-+	 * will have TF set, in which case, continue the remaining processing
-+	 * of do_debug, as if this is not a probe hit.
-+	 */
-+	if (regs->flags & X86_EFLAGS_TF)
++static inline int kvm_apic_id(struct kvm_lapic *apic)
++{
++	return (apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
++}
++
++static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
++{
++	return !(apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
++}
++
++static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
++{
++	return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
++}
++
++static inline int apic_lvtt_period(struct kvm_lapic *apic)
++{
++	return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC;
++}
++
++static unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
++	LVT_MASK | APIC_LVT_TIMER_PERIODIC,	/* LVTT */
++	LVT_MASK | APIC_MODE_MASK,	/* LVTTHMR */
++	LVT_MASK | APIC_MODE_MASK,	/* LVTPC */
++	LINT_MASK, LINT_MASK,	/* LVT0-1 */
++	LVT_MASK		/* LVTERR */
++};
++
++static int find_highest_vector(void *bitmap)
++{
++	u32 *word = bitmap;
++	int word_offset = MAX_APIC_VECTOR >> 5;
++
++	while ((word_offset != 0) && (word[(--word_offset) << 2] == 0))
++		continue;
++
++	if (likely(!word_offset && !word[0]))
++		return -1;
++	else
++		return fls(word[word_offset << 2]) - 1 + (word_offset << 5);
++}
++
++static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
++{
++	return apic_test_and_set_vector(vec, apic->regs + APIC_IRR);
++}
++
++static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
++{
++	apic_clear_vector(vec, apic->regs + APIC_IRR);
++}
++
++static inline int apic_find_highest_irr(struct kvm_lapic *apic)
++{
++	int result;
++
++	result = find_highest_vector(apic->regs + APIC_IRR);
++	ASSERT(result == -1 || result >= 16);
++
++	return result;
++}
++
++int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++	int highest_irr;
++
++	if (!apic)
 +		return 0;
++	highest_irr = apic_find_highest_irr(apic);
 +
-+	return 1;
++	return highest_irr;
 +}
++EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
 +
-+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
++int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
 +{
-+	struct kprobe *cur = kprobe_running();
-+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++	struct kvm_lapic *apic = vcpu->arch.apic;
 +
-+	switch (kcb->kprobe_status) {
-+	case KPROBE_HIT_SS:
-+	case KPROBE_REENTER:
-+		/*
-+		 * We are here because the instruction being single
-+		 * stepped caused a page fault. We reset the current
-+		 * kprobe and the ip points back to the probe address
-+		 * and allow the page fault handler to continue as a
-+		 * normal page fault.
-+		 */
-+		regs->ip = (unsigned long)cur->addr;
-+		regs->flags |= kcb->kprobe_old_flags;
-+		if (kcb->kprobe_status == KPROBE_REENTER)
-+			restore_previous_kprobe(kcb);
++	if (!apic_test_and_set_irr(vec, apic)) {
++		/* a new pending irq is set in IRR */
++		if (trig)
++			apic_set_vector(vec, apic->regs + APIC_TMR);
 +		else
-+			reset_current_kprobe();
-+		preempt_enable_no_resched();
++			apic_clear_vector(vec, apic->regs + APIC_TMR);
++		kvm_vcpu_kick(apic->vcpu);
++		return 1;
++	}
++	return 0;
++}
++
++static inline int apic_find_highest_isr(struct kvm_lapic *apic)
++{
++	int result;
++
++	result = find_highest_vector(apic->regs + APIC_ISR);
++	ASSERT(result == -1 || result >= 16);
++
++	return result;
++}
++
++static void apic_update_ppr(struct kvm_lapic *apic)
++{
++	u32 tpr, isrv, ppr;
++	int isr;
++
++	tpr = apic_get_reg(apic, APIC_TASKPRI);
++	isr = apic_find_highest_isr(apic);
++	isrv = (isr != -1) ? isr : 0;
++
++	if ((tpr & 0xf0) >= (isrv & 0xf0))
++		ppr = tpr & 0xff;
++	else
++		ppr = isrv & 0xf0;
++
++	apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
++		   apic, ppr, isr, isrv);
++
++	apic_set_reg(apic, APIC_PROCPRI, ppr);
++}
++
++static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
++{
++	apic_set_reg(apic, APIC_TASKPRI, tpr);
++	apic_update_ppr(apic);
++}
++
++int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
++{
++	return kvm_apic_id(apic) == dest;
++}
++
++int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
++{
++	int result = 0;
++	u8 logical_id;
++
++	logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR));
++
++	switch (apic_get_reg(apic, APIC_DFR)) {
++	case APIC_DFR_FLAT:
++		if (logical_id & mda)
++			result = 1;
++		break;
++	case APIC_DFR_CLUSTER:
++		if (((logical_id >> 4) == (mda >> 0x4))
++		    && (logical_id & mda & 0xf))
++			result = 1;
 +		break;
-+	case KPROBE_HIT_ACTIVE:
-+	case KPROBE_HIT_SSDONE:
-+		/*
-+		 * We increment the nmissed count for accounting,
-+		 * we can also use npre/npostfault count for accounting
-+		 * these specific fault cases.
-+		 */
-+		kprobes_inc_nmissed_count(cur);
++	default:
++		printk(KERN_WARNING "Bad DFR vcpu %d: %08x\n",
++		       apic->vcpu->vcpu_id, apic_get_reg(apic, APIC_DFR));
++		break;
++	}
 +
-+		/*
-+		 * We come here because instructions in the pre/post
-+		 * handler caused the page_fault, this could happen
-+		 * if handler tries to access user space by
-+		 * copy_from_user(), get_user() etc. Let the
-+		 * user-specified handler try to fix it first.
-+		 */
-+		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
-+			return 1;
++	return result;
++}
 +
-+		/*
-+		 * In case the user-specified fault handler returned
-+		 * zero, try to fix up.
-+		 */
-+		if (fixup_exception(regs))
-+			return 1;
++static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
++			   int short_hand, int dest, int dest_mode)
++{
++	int result = 0;
++	struct kvm_lapic *target = vcpu->arch.apic;
 +
-+		/*
-+		 * fixup routine could not handle it,
-+		 * Let do_page_fault() fix it.
-+		 */
++	apic_debug("target %p, source %p, dest 0x%x, "
++		   "dest_mode 0x%x, short_hand 0x%x",
++		   target, source, dest, dest_mode, short_hand);
++
++	ASSERT(!target);
++	switch (short_hand) {
++	case APIC_DEST_NOSHORT:
++		if (dest_mode == 0) {
++			/* Physical mode. */
++			if ((dest == 0xFF) || (dest == kvm_apic_id(target)))
++				result = 1;
++		} else
++			/* Logical mode. */
++			result = kvm_apic_match_logical_addr(target, dest);
++		break;
++	case APIC_DEST_SELF:
++		if (target == source)
++			result = 1;
++		break;
++	case APIC_DEST_ALLINC:
++		result = 1;
++		break;
++	case APIC_DEST_ALLBUT:
++		if (target != source)
++			result = 1;
 +		break;
 +	default:
++		printk(KERN_WARNING "Bad dest shorthand value %x\n",
++		       short_hand);
 +		break;
 +	}
-+	return 0;
++
++	return result;
 +}
 +
 +/*
-+ * Wrapper routine for handling exceptions.
++ * Add a pending IRQ into lapic.
++ * Return 1 if successfully added and 0 if discarded.
 + */
-+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
-+				       unsigned long val, void *data)
++static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
++			     int vector, int level, int trig_mode)
 +{
-+	struct die_args *args = data;
-+	int ret = NOTIFY_DONE;
++	int orig_irr, result = 0;
++	struct kvm_vcpu *vcpu = apic->vcpu;
 +
-+	if (args->regs && user_mode_vm(args->regs))
-+		return ret;
++	switch (delivery_mode) {
++	case APIC_DM_FIXED:
++	case APIC_DM_LOWEST:
++		/* FIXME add logic for vcpu on reset */
++		if (unlikely(!apic_enabled(apic)))
++			break;
 +
-+	switch (val) {
-+	case DIE_INT3:
-+		if (kprobe_handler(args->regs))
-+			ret = NOTIFY_STOP;
++		orig_irr = apic_test_and_set_irr(vector, apic);
++		if (orig_irr && trig_mode) {
++			apic_debug("level trig mode repeatedly for vector %d",
++				   vector);
++			break;
++		}
++
++		if (trig_mode) {
++			apic_debug("level trig mode for vector %d", vector);
++			apic_set_vector(vector, apic->regs + APIC_TMR);
++		} else
++			apic_clear_vector(vector, apic->regs + APIC_TMR);
++
++		if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
++			kvm_vcpu_kick(vcpu);
++		else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) {
++			vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
++			if (waitqueue_active(&vcpu->wq))
++				wake_up_interruptible(&vcpu->wq);
++		}
++
++		result = (orig_irr == 0);
 +		break;
-+	case DIE_DEBUG:
-+		if (post_kprobe_handler(args->regs))
-+			ret = NOTIFY_STOP;
++
++	case APIC_DM_REMRD:
++		printk(KERN_DEBUG "Ignoring delivery mode 3\n");
 +		break;
-+	case DIE_GPF:
-+		/*
-+		 * To be potentially processing a kprobe fault and to
-+		 * trust the result from kprobe_running(), we have
-+		 * be non-preemptible.
-+		 */
-+		if (!preemptible() && kprobe_running() &&
-+		    kprobe_fault_handler(args->regs, args->trapnr))
-+			ret = NOTIFY_STOP;
++
++	case APIC_DM_SMI:
++		printk(KERN_DEBUG "Ignoring guest SMI\n");
++		break;
++	case APIC_DM_NMI:
++		printk(KERN_DEBUG "Ignoring guest NMI\n");
++		break;
++
++	case APIC_DM_INIT:
++		if (level) {
++			if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
++				printk(KERN_DEBUG
++				       "INIT on a runnable vcpu %d\n",
++				       vcpu->vcpu_id);
++			vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED;
++			kvm_vcpu_kick(vcpu);
++		} else {
++			printk(KERN_DEBUG
++			       "Ignoring de-assert INIT to vcpu %d\n",
++			       vcpu->vcpu_id);
++		}
++
++		break;
++
++	case APIC_DM_STARTUP:
++		printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
++		       vcpu->vcpu_id, vector);
++		if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
++			vcpu->arch.sipi_vector = vector;
++			vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
++			if (waitqueue_active(&vcpu->wq))
++				wake_up_interruptible(&vcpu->wq);
++		}
 +		break;
++
 +	default:
++		printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
++		       delivery_mode);
 +		break;
 +	}
-+	return ret;
++	return result;
 +}
 +
-+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
++static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
++				       unsigned long bitmap)
 +{
-+	struct jprobe *jp = container_of(p, struct jprobe, kp);
-+	unsigned long addr;
-+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++	int last;
++	int next;
++	struct kvm_lapic *apic = NULL;
 +
-+	kcb->jprobe_saved_regs = *regs;
-+	kcb->jprobe_saved_sp = stack_addr(regs);
-+	addr = (unsigned long)(kcb->jprobe_saved_sp);
++	last = kvm->arch.round_robin_prev_vcpu;
++	next = last;
++
++	do {
++		if (++next == KVM_MAX_VCPUS)
++			next = 0;
++		if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
++			continue;
++		apic = kvm->vcpus[next]->arch.apic;
++		if (apic && apic_enabled(apic))
++			break;
++		apic = NULL;
++	} while (next != last);
++	kvm->arch.round_robin_prev_vcpu = next;
++
++	if (!apic)
++		printk(KERN_DEBUG "vcpu not ready for apic_round_robin\n");
++
++	return apic;
++}
++
++struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
++		unsigned long bitmap)
++{
++	struct kvm_lapic *apic;
++
++	apic = kvm_apic_round_robin(kvm, vector, bitmap);
++	if (apic)
++		return apic->vcpu;
++	return NULL;
++}
++
++static void apic_set_eoi(struct kvm_lapic *apic)
++{
++	int vector = apic_find_highest_isr(apic);
 +
 +	/*
-+	 * As Linus pointed out, gcc assumes that the callee
-+	 * owns the argument space and could overwrite it, e.g.
-+	 * tailcall optimization. So, to be absolutely safe
-+	 * we also save and restore enough stack bytes to cover
-+	 * the argument area.
++	 * Not every write EOI will has corresponding ISR,
++	 * one example is when Kernel check timer on setup_IO_APIC
 +	 */
-+	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
-+	       MIN_STACK_SIZE(addr));
-+	regs->flags &= ~X86_EFLAGS_IF;
-+	trace_hardirqs_off();
-+	regs->ip = (unsigned long)(jp->entry);
-+	return 1;
++	if (vector == -1)
++		return;
++
++	apic_clear_vector(vector, apic->regs + APIC_ISR);
++	apic_update_ppr(apic);
++
++	if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR))
++		kvm_ioapic_update_eoi(apic->vcpu->kvm, vector);
 +}
 +
-+void __kprobes jprobe_return(void)
++static void apic_send_ipi(struct kvm_lapic *apic)
 +{
-+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++	u32 icr_low = apic_get_reg(apic, APIC_ICR);
++	u32 icr_high = apic_get_reg(apic, APIC_ICR2);
 +
-+	asm volatile (
-+#ifdef CONFIG_X86_64
-+			"       xchg   %%rbx,%%rsp	\n"
-+#else
-+			"       xchgl   %%ebx,%%esp	\n"
-+#endif
-+			"       int3			\n"
-+			"       .globl jprobe_return_end\n"
-+			"       jprobe_return_end:	\n"
-+			"       nop			\n"::"b"
-+			(kcb->jprobe_saved_sp):"memory");
++	unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
++	unsigned int short_hand = icr_low & APIC_SHORT_MASK;
++	unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
++	unsigned int level = icr_low & APIC_INT_ASSERT;
++	unsigned int dest_mode = icr_low & APIC_DEST_MASK;
++	unsigned int delivery_mode = icr_low & APIC_MODE_MASK;
++	unsigned int vector = icr_low & APIC_VECTOR_MASK;
++
++	struct kvm_vcpu *target;
++	struct kvm_vcpu *vcpu;
++	unsigned long lpr_map = 0;
++	int i;
++
++	apic_debug("icr_high 0x%x, icr_low 0x%x, "
++		   "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
++		   "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
++		   icr_high, icr_low, short_hand, dest,
++		   trig_mode, level, dest_mode, delivery_mode, vector);
++
++	for (i = 0; i < KVM_MAX_VCPUS; i++) {
++		vcpu = apic->vcpu->kvm->vcpus[i];
++		if (!vcpu)
++			continue;
++
++		if (vcpu->arch.apic &&
++		    apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
++			if (delivery_mode == APIC_DM_LOWEST)
++				set_bit(vcpu->vcpu_id, &lpr_map);
++			else
++				__apic_accept_irq(vcpu->arch.apic, delivery_mode,
++						  vector, level, trig_mode);
++		}
++	}
++
++	if (delivery_mode == APIC_DM_LOWEST) {
++		target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
++		if (target != NULL)
++			__apic_accept_irq(target->arch.apic, delivery_mode,
++					  vector, level, trig_mode);
++	}
 +}
 +
-+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
++static u32 apic_get_tmcct(struct kvm_lapic *apic)
 +{
-+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-+	u8 *addr = (u8 *) (regs->ip - 1);
-+	struct jprobe *jp = container_of(p, struct jprobe, kp);
++	u64 counter_passed;
++	ktime_t passed, now;
++	u32 tmcct;
 +
-+	if ((addr > (u8 *) jprobe_return) &&
-+	    (addr < (u8 *) jprobe_return_end)) {
-+		if (stack_addr(regs) != kcb->jprobe_saved_sp) {
-+			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
-+			printk(KERN_ERR
-+			       "current sp %p does not match saved sp %p\n",
-+			       stack_addr(regs), kcb->jprobe_saved_sp);
-+			printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
-+			show_registers(saved_regs);
-+			printk(KERN_ERR "Current registers\n");
-+			show_registers(regs);
-+			BUG();
++	ASSERT(apic != NULL);
++
++	now = apic->timer.dev.base->get_time();
++	tmcct = apic_get_reg(apic, APIC_TMICT);
++
++	/* if initial count is 0, current count should also be 0 */
++	if (tmcct == 0)
++		return 0;
++
++	if (unlikely(ktime_to_ns(now) <=
++		ktime_to_ns(apic->timer.last_update))) {
++		/* Wrap around */
++		passed = ktime_add(( {
++				    (ktime_t) {
++				    .tv64 = KTIME_MAX -
++				    (apic->timer.last_update).tv64}; }
++				   ), now);
++		apic_debug("time elapsed\n");
++	} else
++		passed = ktime_sub(now, apic->timer.last_update);
++
++	counter_passed = div64_64(ktime_to_ns(passed),
++				  (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
++
++	if (counter_passed > tmcct) {
++		if (unlikely(!apic_lvtt_period(apic))) {
++			/* one-shot timers stick at 0 until reset */
++			tmcct = 0;
++		} else {
++			/*
++			 * periodic timers reset to APIC_TMICT when they
++			 * hit 0. The while loop simulates this happening N
++			 * times. (counter_passed %= tmcct) would also work,
++			 * but might be slower or not work on 32-bit??
++			 */
++			while (counter_passed > tmcct)
++				counter_passed -= tmcct;
++			tmcct -= counter_passed;
 +		}
-+		*regs = kcb->jprobe_saved_regs;
-+		memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
-+		       kcb->jprobes_stack,
-+		       MIN_STACK_SIZE(kcb->jprobe_saved_sp));
-+		preempt_enable_no_resched();
-+		return 1;
++	} else {
++		tmcct -= counter_passed;
 +	}
-+	return 0;
++
++	return tmcct;
 +}
 +
-+int __init arch_init_kprobes(void)
++static void __report_tpr_access(struct kvm_lapic *apic, bool write)
 +{
-+	return 0;
++	struct kvm_vcpu *vcpu = apic->vcpu;
++	struct kvm_run *run = vcpu->run;
++
++	set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests);
++	kvm_x86_ops->cache_regs(vcpu);
++	run->tpr_access.rip = vcpu->arch.rip;
++	run->tpr_access.is_write = write;
 +}
 +
-+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
++static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
 +{
-+	return 0;
++	if (apic->vcpu->arch.tpr_access_reporting)
++		__report_tpr_access(apic, write);
 +}
-diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
-deleted file mode 100644
-index 3a020f7..0000000
---- a/arch/x86/kernel/kprobes_32.c
-+++ /dev/null
-@@ -1,756 +0,0 @@
--/*
-- *  Kernel Probes (KProbes)
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, write to the Free Software
-- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-- *
-- * Copyright (C) IBM Corporation, 2002, 2004
-- *
-- * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna at in.ibm.com> Kernel
-- *		Probes initial implementation ( includes contributions from
-- *		Rusty Russell).
-- * 2004-July	Suparna Bhattacharya <suparna at in.ibm.com> added jumper probes
-- *		interface to access function arguments.
-- * 2005-May	Hien Nguyen <hien at us.ibm.com>, Jim Keniston
-- *		<jkenisto at us.ibm.com> and Prasanna S Panchamukhi
-- *		<prasanna at in.ibm.com> added function-return probes.
-- */
--
--#include <linux/kprobes.h>
--#include <linux/ptrace.h>
--#include <linux/preempt.h>
--#include <linux/kdebug.h>
--#include <asm/cacheflush.h>
--#include <asm/desc.h>
--#include <asm/uaccess.h>
--#include <asm/alternative.h>
--
--void jprobe_return_end(void);
--
--DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
--DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
--
--struct kretprobe_blackpoint kretprobe_blacklist[] = {
--	{"__switch_to", }, /* This function switches only current task, but
--			     doesn't switch kernel stack.*/
--	{NULL, NULL}	/* Terminator */
--};
--const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
--
--/* insert a jmp code */
--static __always_inline void set_jmp_op(void *from, void *to)
--{
--	struct __arch_jmp_op {
--		char op;
--		long raddr;
--	} __attribute__((packed)) *jop;
--	jop = (struct __arch_jmp_op *)from;
--	jop->raddr = (long)(to) - ((long)(from) + 5);
--	jop->op = RELATIVEJUMP_INSTRUCTION;
--}
--
--/*
-- * returns non-zero if opcodes can be boosted.
-- */
--static __always_inline int can_boost(kprobe_opcode_t *opcodes)
--{
--#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf)		      \
--	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
--	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
--	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
--	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
--	 << (row % 32))
--	/*
--	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
--	 * Groups, and some special opcodes can not be boost.
--	 */
--	static const unsigned long twobyte_is_boostable[256 / 32] = {
--		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
--		/*      -------------------------------         */
--		W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
--		W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
--		W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
--		W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
--		W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
--		W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
--		W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
--		W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
--		W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
--		W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
--		W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
--		W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
--		W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
--		W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
--		W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
--		W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0)  /* f0 */
--		/*      -------------------------------         */
--		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
--	};
--#undef W
--	kprobe_opcode_t opcode;
--	kprobe_opcode_t *orig_opcodes = opcodes;
--retry:
--	if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
--		return 0;
--	opcode = *(opcodes++);
--
--	/* 2nd-byte opcode */
--	if (opcode == 0x0f) {
--		if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
--			return 0;
--		return test_bit(*opcodes, twobyte_is_boostable);
--	}
--
--	switch (opcode & 0xf0) {
--	case 0x60:
--		if (0x63 < opcode && opcode < 0x67)
--			goto retry; /* prefixes */
--		/* can't boost Address-size override and bound */
--		return (opcode != 0x62 && opcode != 0x67);
--	case 0x70:
--		return 0; /* can't boost conditional jump */
--	case 0xc0:
--		/* can't boost software-interruptions */
--		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
--	case 0xd0:
--		/* can boost AA* and XLAT */
--		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
--	case 0xe0:
--		/* can boost in/out and absolute jmps */
--		return ((opcode & 0x04) || opcode == 0xea);
--	case 0xf0:
--		if ((opcode & 0x0c) == 0 && opcode != 0xf1)
--			goto retry; /* lock/rep(ne) prefix */
--		/* clear and set flags can be boost */
--		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
--	default:
--		if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
--			goto retry; /* prefixes */
--		/* can't boost CS override and call */
--		return (opcode != 0x2e && opcode != 0x9a);
--	}
--}
--
--/*
-- * returns non-zero if opcode modifies the interrupt flag.
-- */
--static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
--{
--	switch (opcode) {
--	case 0xfa:		/* cli */
--	case 0xfb:		/* sti */
--	case 0xcf:		/* iret/iretd */
--	case 0x9d:		/* popf/popfd */
--		return 1;
--	}
--	return 0;
--}
--
--int __kprobes arch_prepare_kprobe(struct kprobe *p)
--{
--	/* insn: must be on special executable page on i386. */
--	p->ainsn.insn = get_insn_slot();
--	if (!p->ainsn.insn)
--		return -ENOMEM;
--
--	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
--	p->opcode = *p->addr;
--	if (can_boost(p->addr)) {
--		p->ainsn.boostable = 0;
--	} else {
--		p->ainsn.boostable = -1;
--	}
--	return 0;
--}
--
--void __kprobes arch_arm_kprobe(struct kprobe *p)
--{
--	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
--}
--
--void __kprobes arch_disarm_kprobe(struct kprobe *p)
--{
--	text_poke(p->addr, &p->opcode, 1);
--}
--
--void __kprobes arch_remove_kprobe(struct kprobe *p)
--{
--	mutex_lock(&kprobe_mutex);
--	free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
--	mutex_unlock(&kprobe_mutex);
--}
--
--static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
--{
--	kcb->prev_kprobe.kp = kprobe_running();
--	kcb->prev_kprobe.status = kcb->kprobe_status;
--	kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
--	kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
--}
--
--static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
--{
--	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
--	kcb->kprobe_status = kcb->prev_kprobe.status;
--	kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
--	kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
--}
--
--static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
--				struct kprobe_ctlblk *kcb)
--{
--	__get_cpu_var(current_kprobe) = p;
--	kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
--		= (regs->eflags & (TF_MASK | IF_MASK));
--	if (is_IF_modifier(p->opcode))
--		kcb->kprobe_saved_eflags &= ~IF_MASK;
--}
--
--static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
--{
--	regs->eflags |= TF_MASK;
--	regs->eflags &= ~IF_MASK;
--	/*single step inline if the instruction is an int3*/
--	if (p->opcode == BREAKPOINT_INSTRUCTION)
--		regs->eip = (unsigned long)p->addr;
--	else
--		regs->eip = (unsigned long)p->ainsn.insn;
--}
--
--/* Called with kretprobe_lock held */
--void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
--				      struct pt_regs *regs)
--{
--	unsigned long *sara = (unsigned long *)&regs->esp;
--
--	ri->ret_addr = (kprobe_opcode_t *) *sara;
--
--	/* Replace the return addr with trampoline addr */
--	*sara = (unsigned long) &kretprobe_trampoline;
--}
--
--/*
-- * Interrupts are disabled on entry as trap3 is an interrupt gate and they
-- * remain disabled thorough out this function.
-- */
--static int __kprobes kprobe_handler(struct pt_regs *regs)
--{
--	struct kprobe *p;
--	int ret = 0;
--	kprobe_opcode_t *addr;
--	struct kprobe_ctlblk *kcb;
--
--	addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
--
--	/*
--	 * We don't want to be preempted for the entire
--	 * duration of kprobe processing
--	 */
--	preempt_disable();
--	kcb = get_kprobe_ctlblk();
--
--	/* Check we're not actually recursing */
--	if (kprobe_running()) {
--		p = get_kprobe(addr);
--		if (p) {
--			if (kcb->kprobe_status == KPROBE_HIT_SS &&
--				*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
--				regs->eflags &= ~TF_MASK;
--				regs->eflags |= kcb->kprobe_saved_eflags;
--				goto no_kprobe;
--			}
--			/* We have reentered the kprobe_handler(), since
--			 * another probe was hit while within the handler.
--			 * We here save the original kprobes variables and
--			 * just single step on the instruction of the new probe
--			 * without calling any user handlers.
--			 */
--			save_previous_kprobe(kcb);
--			set_current_kprobe(p, regs, kcb);
--			kprobes_inc_nmissed_count(p);
--			prepare_singlestep(p, regs);
--			kcb->kprobe_status = KPROBE_REENTER;
--			return 1;
--		} else {
--			if (*addr != BREAKPOINT_INSTRUCTION) {
--			/* The breakpoint instruction was removed by
--			 * another cpu right after we hit, no further
--			 * handling of this interrupt is appropriate
--			 */
--				regs->eip -= sizeof(kprobe_opcode_t);
--				ret = 1;
--				goto no_kprobe;
--			}
--			p = __get_cpu_var(current_kprobe);
--			if (p->break_handler && p->break_handler(p, regs)) {
--				goto ss_probe;
--			}
--		}
--		goto no_kprobe;
--	}
--
--	p = get_kprobe(addr);
--	if (!p) {
--		if (*addr != BREAKPOINT_INSTRUCTION) {
--			/*
--			 * The breakpoint instruction was removed right
--			 * after we hit it.  Another cpu has removed
--			 * either a probepoint or a debugger breakpoint
--			 * at this address.  In either case, no further
--			 * handling of this interrupt is appropriate.
--			 * Back up over the (now missing) int3 and run
--			 * the original instruction.
--			 */
--			regs->eip -= sizeof(kprobe_opcode_t);
--			ret = 1;
--		}
--		/* Not one of ours: let kernel handle it */
--		goto no_kprobe;
--	}
--
--	set_current_kprobe(p, regs, kcb);
--	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
--
--	if (p->pre_handler && p->pre_handler(p, regs))
--		/* handler has already set things up, so skip ss setup */
--		return 1;
--
--ss_probe:
--#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
--	if (p->ainsn.boostable == 1 && !p->post_handler){
--		/* Boost up -- we can execute copied instructions directly */
--		reset_current_kprobe();
--		regs->eip = (unsigned long)p->ainsn.insn;
--		preempt_enable_no_resched();
--		return 1;
--	}
--#endif
--	prepare_singlestep(p, regs);
--	kcb->kprobe_status = KPROBE_HIT_SS;
--	return 1;
--
--no_kprobe:
--	preempt_enable_no_resched();
--	return ret;
--}
--
--/*
-- * For function-return probes, init_kprobes() establishes a probepoint
-- * here. When a retprobed function returns, this probe is hit and
-- * trampoline_probe_handler() runs, calling the kretprobe's handler.
-- */
-- void __kprobes kretprobe_trampoline_holder(void)
-- {
--	asm volatile ( ".global kretprobe_trampoline\n"
--			"kretprobe_trampoline: \n"
--			"	pushf\n"
--			/* skip cs, eip, orig_eax */
--			"	subl $12, %esp\n"
--			"	pushl %fs\n"
--			"	pushl %ds\n"
--			"	pushl %es\n"
--			"	pushl %eax\n"
--			"	pushl %ebp\n"
--			"	pushl %edi\n"
--			"	pushl %esi\n"
--			"	pushl %edx\n"
--			"	pushl %ecx\n"
--			"	pushl %ebx\n"
--			"	movl %esp, %eax\n"
--			"	call trampoline_handler\n"
--			/* move eflags to cs */
--			"	movl 52(%esp), %edx\n"
--			"	movl %edx, 48(%esp)\n"
--			/* save true return address on eflags */
--			"	movl %eax, 52(%esp)\n"
--			"	popl %ebx\n"
--			"	popl %ecx\n"
--			"	popl %edx\n"
--			"	popl %esi\n"
--			"	popl %edi\n"
--			"	popl %ebp\n"
--			"	popl %eax\n"
--			/* skip eip, orig_eax, es, ds, fs */
--			"	addl $20, %esp\n"
--			"	popf\n"
--			"	ret\n");
--}
--
--/*
-- * Called from kretprobe_trampoline
-- */
--fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
--{
--	struct kretprobe_instance *ri = NULL;
--	struct hlist_head *head, empty_rp;
--	struct hlist_node *node, *tmp;
--	unsigned long flags, orig_ret_address = 0;
--	unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
--
--	INIT_HLIST_HEAD(&empty_rp);
--	spin_lock_irqsave(&kretprobe_lock, flags);
--	head = kretprobe_inst_table_head(current);
--	/* fixup registers */
--	regs->xcs = __KERNEL_CS | get_kernel_rpl();
--	regs->eip = trampoline_address;
--	regs->orig_eax = 0xffffffff;
--
--	/*
--	 * It is possible to have multiple instances associated with a given
--	 * task either because an multiple functions in the call path
--	 * have a return probe installed on them, and/or more then one return
--	 * return probe was registered for a target function.
--	 *
--	 * We can handle this because:
--	 *     - instances are always inserted at the head of the list
--	 *     - when multiple return probes are registered for the same
--	 *       function, the first instance's ret_addr will point to the
--	 *       real return address, and all the rest will point to
--	 *       kretprobe_trampoline
--	 */
--	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
--		if (ri->task != current)
--			/* another task is sharing our hash bucket */
--			continue;
--
--		if (ri->rp && ri->rp->handler){
--			__get_cpu_var(current_kprobe) = &ri->rp->kp;
--			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
--			ri->rp->handler(ri, regs);
--			__get_cpu_var(current_kprobe) = NULL;
--		}
--
--		orig_ret_address = (unsigned long)ri->ret_addr;
--		recycle_rp_inst(ri, &empty_rp);
--
--		if (orig_ret_address != trampoline_address)
--			/*
--			 * This is the real return address. Any other
--			 * instances associated with this task are for
--			 * other calls deeper on the call stack
--			 */
--			break;
--	}
--
--	kretprobe_assert(ri, orig_ret_address, trampoline_address);
--	spin_unlock_irqrestore(&kretprobe_lock, flags);
--
--	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
--		hlist_del(&ri->hlist);
--		kfree(ri);
--	}
--	return (void*)orig_ret_address;
--}
--
--/*
-- * Called after single-stepping.  p->addr is the address of the
-- * instruction whose first byte has been replaced by the "int 3"
-- * instruction.  To avoid the SMP problems that can occur when we
-- * temporarily put back the original opcode to single-step, we
-- * single-stepped a copy of the instruction.  The address of this
-- * copy is p->ainsn.insn.
-- *
-- * This function prepares to return from the post-single-step
-- * interrupt.  We have to fix up the stack as follows:
-- *
-- * 0) Except in the case of absolute or indirect jump or call instructions,
-- * the new eip is relative to the copied instruction.  We need to make
-- * it relative to the original instruction.
-- *
-- * 1) If the single-stepped instruction was pushfl, then the TF and IF
-- * flags are set in the just-pushed eflags, and may need to be cleared.
-- *
-- * 2) If the single-stepped instruction was a call, the return address
-- * that is atop the stack is the address following the copied instruction.
-- * We need to make it the address following the original instruction.
-- *
-- * This function also checks instruction size for preparing direct execution.
-- */
--static void __kprobes resume_execution(struct kprobe *p,
--		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
--{
--	unsigned long *tos = (unsigned long *)&regs->esp;
--	unsigned long copy_eip = (unsigned long)p->ainsn.insn;
--	unsigned long orig_eip = (unsigned long)p->addr;
--
--	regs->eflags &= ~TF_MASK;
--	switch (p->ainsn.insn[0]) {
--	case 0x9c:		/* pushfl */
--		*tos &= ~(TF_MASK | IF_MASK);
--		*tos |= kcb->kprobe_old_eflags;
--		break;
--	case 0xc2:		/* iret/ret/lret */
--	case 0xc3:
--	case 0xca:
--	case 0xcb:
--	case 0xcf:
--	case 0xea:		/* jmp absolute -- eip is correct */
--		/* eip is already adjusted, no more changes required */
--		p->ainsn.boostable = 1;
--		goto no_change;
--	case 0xe8:		/* call relative - Fix return addr */
--		*tos = orig_eip + (*tos - copy_eip);
--		break;
--	case 0x9a:		/* call absolute -- same as call absolute, indirect */
--		*tos = orig_eip + (*tos - copy_eip);
--		goto no_change;
--	case 0xff:
--		if ((p->ainsn.insn[1] & 0x30) == 0x10) {
--			/*
--			 * call absolute, indirect
--			 * Fix return addr; eip is correct.
--			 * But this is not boostable
--			 */
--			*tos = orig_eip + (*tos - copy_eip);
--			goto no_change;
--		} else if (((p->ainsn.insn[1] & 0x31) == 0x20) ||	/* jmp near, absolute indirect */
--			   ((p->ainsn.insn[1] & 0x31) == 0x21)) {	/* jmp far, absolute indirect */
--			/* eip is correct. And this is boostable */
--			p->ainsn.boostable = 1;
--			goto no_change;
--		}
--	default:
--		break;
--	}
--
--	if (p->ainsn.boostable == 0) {
--		if ((regs->eip > copy_eip) &&
--		    (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
--			/*
--			 * These instructions can be executed directly if it
--			 * jumps back to correct address.
--			 */
--			set_jmp_op((void *)regs->eip,
--				   (void *)orig_eip + (regs->eip - copy_eip));
--			p->ainsn.boostable = 1;
--		} else {
--			p->ainsn.boostable = -1;
--		}
--	}
--
--	regs->eip = orig_eip + (regs->eip - copy_eip);
--
--no_change:
--	return;
--}
--
--/*
-- * Interrupts are disabled on entry as trap1 is an interrupt gate and they
-- * remain disabled thoroughout this function.
-- */
--static int __kprobes post_kprobe_handler(struct pt_regs *regs)
--{
--	struct kprobe *cur = kprobe_running();
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--
--	if (!cur)
--		return 0;
--
--	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
--		kcb->kprobe_status = KPROBE_HIT_SSDONE;
--		cur->post_handler(cur, regs, 0);
--	}
--
--	resume_execution(cur, regs, kcb);
--	regs->eflags |= kcb->kprobe_saved_eflags;
--	trace_hardirqs_fixup_flags(regs->eflags);
--
--	/*Restore back the original saved kprobes variables and continue. */
--	if (kcb->kprobe_status == KPROBE_REENTER) {
--		restore_previous_kprobe(kcb);
--		goto out;
--	}
--	reset_current_kprobe();
--out:
--	preempt_enable_no_resched();
--
--	/*
--	 * if somebody else is singlestepping across a probe point, eflags
--	 * will have TF set, in which case, continue the remaining processing
--	 * of do_debug, as if this is not a probe hit.
--	 */
--	if (regs->eflags & TF_MASK)
--		return 0;
--
--	return 1;
--}
--
--int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
--{
--	struct kprobe *cur = kprobe_running();
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--
--	switch(kcb->kprobe_status) {
--	case KPROBE_HIT_SS:
--	case KPROBE_REENTER:
--		/*
--		 * We are here because the instruction being single
--		 * stepped caused a page fault. We reset the current
--		 * kprobe and the eip points back to the probe address
--		 * and allow the page fault handler to continue as a
--		 * normal page fault.
--		 */
--		regs->eip = (unsigned long)cur->addr;
--		regs->eflags |= kcb->kprobe_old_eflags;
--		if (kcb->kprobe_status == KPROBE_REENTER)
--			restore_previous_kprobe(kcb);
--		else
--			reset_current_kprobe();
--		preempt_enable_no_resched();
--		break;
--	case KPROBE_HIT_ACTIVE:
--	case KPROBE_HIT_SSDONE:
--		/*
--		 * We increment the nmissed count for accounting,
--		 * we can also use npre/npostfault count for accouting
--		 * these specific fault cases.
--		 */
--		kprobes_inc_nmissed_count(cur);
--
--		/*
--		 * We come here because instructions in the pre/post
--		 * handler caused the page_fault, this could happen
--		 * if handler tries to access user space by
--		 * copy_from_user(), get_user() etc. Let the
--		 * user-specified handler try to fix it first.
--		 */
--		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
--			return 1;
--
--		/*
--		 * In case the user-specified fault handler returned
--		 * zero, try to fix up.
--		 */
--		if (fixup_exception(regs))
--			return 1;
--
--		/*
--		 * fixup_exception() could not handle it,
--		 * Let do_page_fault() fix it.
--		 */
--		break;
--	default:
--		break;
--	}
--	return 0;
--}
--
--/*
-- * Wrapper routine to for handling exceptions.
-- */
--int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
--				       unsigned long val, void *data)
--{
--	struct die_args *args = (struct die_args *)data;
--	int ret = NOTIFY_DONE;
--
--	if (args->regs && user_mode_vm(args->regs))
--		return ret;
--
--	switch (val) {
--	case DIE_INT3:
--		if (kprobe_handler(args->regs))
--			ret = NOTIFY_STOP;
--		break;
--	case DIE_DEBUG:
--		if (post_kprobe_handler(args->regs))
--			ret = NOTIFY_STOP;
--		break;
--	case DIE_GPF:
--		/* kprobe_running() needs smp_processor_id() */
--		preempt_disable();
--		if (kprobe_running() &&
--		    kprobe_fault_handler(args->regs, args->trapnr))
--			ret = NOTIFY_STOP;
--		preempt_enable();
--		break;
--	default:
--		break;
--	}
--	return ret;
--}
--
--int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
--{
--	struct jprobe *jp = container_of(p, struct jprobe, kp);
--	unsigned long addr;
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--
--	kcb->jprobe_saved_regs = *regs;
--	kcb->jprobe_saved_esp = &regs->esp;
--	addr = (unsigned long)(kcb->jprobe_saved_esp);
--
--	/*
--	 * TBD: As Linus pointed out, gcc assumes that the callee
--	 * owns the argument space and could overwrite it, e.g.
--	 * tailcall optimization. So, to be absolutely safe
--	 * we also save and restore enough stack bytes to cover
--	 * the argument area.
--	 */
--	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
--			MIN_STACK_SIZE(addr));
--	regs->eflags &= ~IF_MASK;
--	trace_hardirqs_off();
--	regs->eip = (unsigned long)(jp->entry);
--	return 1;
--}
--
--void __kprobes jprobe_return(void)
--{
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--
--	asm volatile ("       xchgl   %%ebx,%%esp     \n"
--		      "       int3			\n"
--		      "       .globl jprobe_return_end	\n"
--		      "       jprobe_return_end:	\n"
--		      "       nop			\n"::"b"
--		      (kcb->jprobe_saved_esp):"memory");
--}
--
--int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
--{
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--	u8 *addr = (u8 *) (regs->eip - 1);
--	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
--	struct jprobe *jp = container_of(p, struct jprobe, kp);
--
--	if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
--		if (&regs->esp != kcb->jprobe_saved_esp) {
--			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
--			printk("current esp %p does not match saved esp %p\n",
--			       &regs->esp, kcb->jprobe_saved_esp);
--			printk("Saved registers for jprobe %p\n", jp);
--			show_registers(saved_regs);
--			printk("Current registers\n");
--			show_registers(regs);
--			BUG();
--		}
--		*regs = kcb->jprobe_saved_regs;
--		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
--		       MIN_STACK_SIZE(stack_addr));
--		preempt_enable_no_resched();
--		return 1;
--	}
--	return 0;
--}
--
--int __kprobes arch_trampoline_kprobe(struct kprobe *p)
--{
--	return 0;
--}
--
--int __init arch_init_kprobes(void)
--{
--	return 0;
--}
-diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
-deleted file mode 100644
-index 5df19a9..0000000
---- a/arch/x86/kernel/kprobes_64.c
-+++ /dev/null
-@@ -1,749 +0,0 @@
--/*
-- *  Kernel Probes (KProbes)
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, write to the Free Software
-- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-- *
-- * Copyright (C) IBM Corporation, 2002, 2004
-- *
-- * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna at in.ibm.com> Kernel
-- *		Probes initial implementation ( includes contributions from
-- *		Rusty Russell).
-- * 2004-July	Suparna Bhattacharya <suparna at in.ibm.com> added jumper probes
-- *		interface to access function arguments.
-- * 2004-Oct	Jim Keniston <kenistoj at us.ibm.com> and Prasanna S Panchamukhi
-- *		<prasanna at in.ibm.com> adapted for x86_64
-- * 2005-Mar	Roland McGrath <roland at redhat.com>
-- *		Fixed to handle %rip-relative addressing mode correctly.
-- * 2005-May     Rusty Lynch <rusty.lynch at intel.com>
-- *              Added function return probes functionality
-- */
--
--#include <linux/kprobes.h>
--#include <linux/ptrace.h>
--#include <linux/string.h>
--#include <linux/slab.h>
--#include <linux/preempt.h>
--#include <linux/module.h>
--#include <linux/kdebug.h>
--
--#include <asm/pgtable.h>
--#include <asm/uaccess.h>
--#include <asm/alternative.h>
--
--void jprobe_return_end(void);
--static void __kprobes arch_copy_kprobe(struct kprobe *p);
--
--DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
--DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
--
--struct kretprobe_blackpoint kretprobe_blacklist[] = {
--	{"__switch_to", }, /* This function switches only current task, but
--			      doesn't switch kernel stack.*/
--	{NULL, NULL}	/* Terminator */
--};
--const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
--
--/*
-- * returns non-zero if opcode modifies the interrupt flag.
-- */
--static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
--{
--	switch (*insn) {
--	case 0xfa:		/* cli */
--	case 0xfb:		/* sti */
--	case 0xcf:		/* iret/iretd */
--	case 0x9d:		/* popf/popfd */
--		return 1;
--	}
--
--	if (*insn  >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
--		return 1;
--	return 0;
--}
--
--int __kprobes arch_prepare_kprobe(struct kprobe *p)
--{
--	/* insn: must be on special executable page on x86_64. */
--	p->ainsn.insn = get_insn_slot();
--	if (!p->ainsn.insn) {
--		return -ENOMEM;
--	}
--	arch_copy_kprobe(p);
--	return 0;
--}
--
--/*
-- * Determine if the instruction uses the %rip-relative addressing mode.
-- * If it does, return the address of the 32-bit displacement word.
-- * If not, return null.
-- */
--static s32 __kprobes *is_riprel(u8 *insn)
--{
--#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf)		      \
--	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
--	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
--	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
--	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
--	 << (row % 64))
--	static const u64 onebyte_has_modrm[256 / 64] = {
--		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
--		/*      -------------------------------         */
--		W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */
--		W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */
--		W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */
--		W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */
--		W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */
--		W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */
--		W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */
--		W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */
--		W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */
--		W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */
--		W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */
--		W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */
--		W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */
--		W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */
--		W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */
--		W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1)  /* f0 */
--		/*      -------------------------------         */
--		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
--	};
--	static const u64 twobyte_has_modrm[256 / 64] = {
--		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
--		/*      -------------------------------         */
--		W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */
--		W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */
--		W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */
--		W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */
--		W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */
--		W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */
--		W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */
--		W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */
--		W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */
--		W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */
--		W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */
--		W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */
--		W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */
--		W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */
--		W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */
--		W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0)  /* ff */
--		/*      -------------------------------         */
--		/*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
--	};
--#undef	W
--	int need_modrm;
--
--	/* Skip legacy instruction prefixes.  */
--	while (1) {
--		switch (*insn) {
--		case 0x66:
--		case 0x67:
--		case 0x2e:
--		case 0x3e:
--		case 0x26:
--		case 0x64:
--		case 0x65:
--		case 0x36:
--		case 0xf0:
--		case 0xf3:
--		case 0xf2:
--			++insn;
--			continue;
--		}
--		break;
--	}
--
--	/* Skip REX instruction prefix.  */
--	if ((*insn & 0xf0) == 0x40)
--		++insn;
--
--	if (*insn == 0x0f) {	/* Two-byte opcode.  */
--		++insn;
--		need_modrm = test_bit(*insn, twobyte_has_modrm);
--	} else {		/* One-byte opcode.  */
--		need_modrm = test_bit(*insn, onebyte_has_modrm);
--	}
--
--	if (need_modrm) {
--		u8 modrm = *++insn;
--		if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
--			/* Displacement follows ModRM byte.  */
--			return (s32 *) ++insn;
--		}
--	}
--
--	/* No %rip-relative addressing mode here.  */
--	return NULL;
--}
--
--static void __kprobes arch_copy_kprobe(struct kprobe *p)
--{
--	s32 *ripdisp;
--	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
--	ripdisp = is_riprel(p->ainsn.insn);
--	if (ripdisp) {
--		/*
--		 * The copied instruction uses the %rip-relative
--		 * addressing mode.  Adjust the displacement for the
--		 * difference between the original location of this
--		 * instruction and the location of the copy that will
--		 * actually be run.  The tricky bit here is making sure
--		 * that the sign extension happens correctly in this
--		 * calculation, since we need a signed 32-bit result to
--		 * be sign-extended to 64 bits when it's added to the
--		 * %rip value and yield the same 64-bit result that the
--		 * sign-extension of the original signed 32-bit
--		 * displacement would have given.
--		 */
--		s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn;
--		BUG_ON((s64) (s32) disp != disp); /* Sanity check.  */
--		*ripdisp = disp;
--	}
--	p->opcode = *p->addr;
--}
--
--void __kprobes arch_arm_kprobe(struct kprobe *p)
--{
--	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
--}
--
--void __kprobes arch_disarm_kprobe(struct kprobe *p)
--{
--	text_poke(p->addr, &p->opcode, 1);
--}
--
--void __kprobes arch_remove_kprobe(struct kprobe *p)
--{
--	mutex_lock(&kprobe_mutex);
--	free_insn_slot(p->ainsn.insn, 0);
--	mutex_unlock(&kprobe_mutex);
--}
--
--static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
--{
--	kcb->prev_kprobe.kp = kprobe_running();
--	kcb->prev_kprobe.status = kcb->kprobe_status;
--	kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags;
--	kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
--}
--
--static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
--{
--	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
--	kcb->kprobe_status = kcb->prev_kprobe.status;
--	kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags;
--	kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
--}
--
--static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
--				struct kprobe_ctlblk *kcb)
--{
--	__get_cpu_var(current_kprobe) = p;
--	kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags
--		= (regs->eflags & (TF_MASK | IF_MASK));
--	if (is_IF_modifier(p->ainsn.insn))
--		kcb->kprobe_saved_rflags &= ~IF_MASK;
--}
--
--static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
--{
--	regs->eflags |= TF_MASK;
--	regs->eflags &= ~IF_MASK;
--	/*single step inline if the instruction is an int3*/
--	if (p->opcode == BREAKPOINT_INSTRUCTION)
--		regs->rip = (unsigned long)p->addr;
--	else
--		regs->rip = (unsigned long)p->ainsn.insn;
--}
--
--/* Called with kretprobe_lock held */
--void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
--				      struct pt_regs *regs)
--{
--	unsigned long *sara = (unsigned long *)regs->rsp;
--
--	ri->ret_addr = (kprobe_opcode_t *) *sara;
--	/* Replace the return addr with trampoline addr */
--	*sara = (unsigned long) &kretprobe_trampoline;
--}
--
--int __kprobes kprobe_handler(struct pt_regs *regs)
--{
--	struct kprobe *p;
--	int ret = 0;
--	kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
--	struct kprobe_ctlblk *kcb;
--
--	/*
--	 * We don't want to be preempted for the entire
--	 * duration of kprobe processing
--	 */
--	preempt_disable();
--	kcb = get_kprobe_ctlblk();
--
--	/* Check we're not actually recursing */
--	if (kprobe_running()) {
--		p = get_kprobe(addr);
--		if (p) {
--			if (kcb->kprobe_status == KPROBE_HIT_SS &&
--				*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
--				regs->eflags &= ~TF_MASK;
--				regs->eflags |= kcb->kprobe_saved_rflags;
--				goto no_kprobe;
--			} else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
--				/* TODO: Provide re-entrancy from
--				 * post_kprobes_handler() and avoid exception
--				 * stack corruption while single-stepping on
--				 * the instruction of the new probe.
--				 */
--				arch_disarm_kprobe(p);
--				regs->rip = (unsigned long)p->addr;
--				reset_current_kprobe();
--				ret = 1;
--			} else {
--				/* We have reentered the kprobe_handler(), since
--				 * another probe was hit while within the
--				 * handler. We here save the original kprobe
--				 * variables and just single step on instruction
--				 * of the new probe without calling any user
--				 * handlers.
--				 */
--				save_previous_kprobe(kcb);
--				set_current_kprobe(p, regs, kcb);
--				kprobes_inc_nmissed_count(p);
--				prepare_singlestep(p, regs);
--				kcb->kprobe_status = KPROBE_REENTER;
--				return 1;
--			}
--		} else {
--			if (*addr != BREAKPOINT_INSTRUCTION) {
--			/* The breakpoint instruction was removed by
--			 * another cpu right after we hit, no further
--			 * handling of this interrupt is appropriate
--			 */
--				regs->rip = (unsigned long)addr;
--				ret = 1;
--				goto no_kprobe;
--			}
--			p = __get_cpu_var(current_kprobe);
--			if (p->break_handler && p->break_handler(p, regs)) {
--				goto ss_probe;
--			}
--		}
--		goto no_kprobe;
--	}
--
--	p = get_kprobe(addr);
--	if (!p) {
--		if (*addr != BREAKPOINT_INSTRUCTION) {
--			/*
--			 * The breakpoint instruction was removed right
--			 * after we hit it.  Another cpu has removed
--			 * either a probepoint or a debugger breakpoint
--			 * at this address.  In either case, no further
--			 * handling of this interrupt is appropriate.
--			 * Back up over the (now missing) int3 and run
--			 * the original instruction.
--			 */
--			regs->rip = (unsigned long)addr;
--			ret = 1;
--		}
--		/* Not one of ours: let kernel handle it */
--		goto no_kprobe;
--	}
--
--	set_current_kprobe(p, regs, kcb);
--	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
--
--	if (p->pre_handler && p->pre_handler(p, regs))
--		/* handler has already set things up, so skip ss setup */
--		return 1;
--
--ss_probe:
--	prepare_singlestep(p, regs);
--	kcb->kprobe_status = KPROBE_HIT_SS;
--	return 1;
--
--no_kprobe:
--	preempt_enable_no_resched();
--	return ret;
--}
--
--/*
-- * For function-return probes, init_kprobes() establishes a probepoint
-- * here. When a retprobed function returns, this probe is hit and
-- * trampoline_probe_handler() runs, calling the kretprobe's handler.
-- */
-- void kretprobe_trampoline_holder(void)
-- {
-- 	asm volatile (  ".global kretprobe_trampoline\n"
-- 			"kretprobe_trampoline: \n"
-- 			"nop\n");
-- }
--
--/*
-- * Called when we hit the probe point at kretprobe_trampoline
-- */
--int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
--{
--	struct kretprobe_instance *ri = NULL;
--	struct hlist_head *head, empty_rp;
--	struct hlist_node *node, *tmp;
--	unsigned long flags, orig_ret_address = 0;
--	unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
--
--	INIT_HLIST_HEAD(&empty_rp);
--	spin_lock_irqsave(&kretprobe_lock, flags);
--	head = kretprobe_inst_table_head(current);
--
--	/*
--	 * It is possible to have multiple instances associated with a given
--	 * task either because an multiple functions in the call path
--	 * have a return probe installed on them, and/or more then one return
--	 * return probe was registered for a target function.
--	 *
--	 * We can handle this because:
--	 *     - instances are always inserted at the head of the list
--	 *     - when multiple return probes are registered for the same
--	 *       function, the first instance's ret_addr will point to the
--	 *       real return address, and all the rest will point to
--	 *       kretprobe_trampoline
--	 */
--	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
--		if (ri->task != current)
--			/* another task is sharing our hash bucket */
--			continue;
--
--		if (ri->rp && ri->rp->handler)
--			ri->rp->handler(ri, regs);
--
--		orig_ret_address = (unsigned long)ri->ret_addr;
--		recycle_rp_inst(ri, &empty_rp);
--
--		if (orig_ret_address != trampoline_address)
--			/*
--			 * This is the real return address. Any other
--			 * instances associated with this task are for
--			 * other calls deeper on the call stack
--			 */
--			break;
--	}
--
--	kretprobe_assert(ri, orig_ret_address, trampoline_address);
--	regs->rip = orig_ret_address;
--
--	reset_current_kprobe();
--	spin_unlock_irqrestore(&kretprobe_lock, flags);
--	preempt_enable_no_resched();
--
--	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
--		hlist_del(&ri->hlist);
--		kfree(ri);
--	}
--	/*
--	 * By returning a non-zero value, we are telling
--	 * kprobe_handler() that we don't want the post_handler
--	 * to run (and have re-enabled preemption)
--	 */
--	return 1;
--}
--
--/*
-- * Called after single-stepping.  p->addr is the address of the
-- * instruction whose first byte has been replaced by the "int 3"
-- * instruction.  To avoid the SMP problems that can occur when we
-- * temporarily put back the original opcode to single-step, we
-- * single-stepped a copy of the instruction.  The address of this
-- * copy is p->ainsn.insn.
-- *
-- * This function prepares to return from the post-single-step
-- * interrupt.  We have to fix up the stack as follows:
-- *
-- * 0) Except in the case of absolute or indirect jump or call instructions,
-- * the new rip is relative to the copied instruction.  We need to make
-- * it relative to the original instruction.
-- *
-- * 1) If the single-stepped instruction was pushfl, then the TF and IF
-- * flags are set in the just-pushed eflags, and may need to be cleared.
-- *
-- * 2) If the single-stepped instruction was a call, the return address
-- * that is atop the stack is the address following the copied instruction.
-- * We need to make it the address following the original instruction.
-- */
--static void __kprobes resume_execution(struct kprobe *p,
--		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
--{
--	unsigned long *tos = (unsigned long *)regs->rsp;
--	unsigned long copy_rip = (unsigned long)p->ainsn.insn;
--	unsigned long orig_rip = (unsigned long)p->addr;
--	kprobe_opcode_t *insn = p->ainsn.insn;
--
--	/*skip the REX prefix*/
--	if (*insn >= 0x40 && *insn <= 0x4f)
--		insn++;
--
--	regs->eflags &= ~TF_MASK;
--	switch (*insn) {
--	case 0x9c:	/* pushfl */
--		*tos &= ~(TF_MASK | IF_MASK);
--		*tos |= kcb->kprobe_old_rflags;
--		break;
--	case 0xc2:	/* iret/ret/lret */
--	case 0xc3:
--	case 0xca:
--	case 0xcb:
--	case 0xcf:
--	case 0xea:	/* jmp absolute -- ip is correct */
--		/* ip is already adjusted, no more changes required */
--		goto no_change;
--	case 0xe8:	/* call relative - Fix return addr */
--		*tos = orig_rip + (*tos - copy_rip);
--		break;
--	case 0xff:
--		if ((insn[1] & 0x30) == 0x10) {
--			/* call absolute, indirect */
--			/* Fix return addr; ip is correct. */
--			*tos = orig_rip + (*tos - copy_rip);
--			goto no_change;
--		} else if (((insn[1] & 0x31) == 0x20) ||	/* jmp near, absolute indirect */
--			   ((insn[1] & 0x31) == 0x21)) {	/* jmp far, absolute indirect */
--			/* ip is correct. */
--			goto no_change;
--		}
--	default:
--		break;
--	}
--
--	regs->rip = orig_rip + (regs->rip - copy_rip);
--no_change:
--
--	return;
--}
--
--int __kprobes post_kprobe_handler(struct pt_regs *regs)
--{
--	struct kprobe *cur = kprobe_running();
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--
--	if (!cur)
--		return 0;
--
--	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
--		kcb->kprobe_status = KPROBE_HIT_SSDONE;
--		cur->post_handler(cur, regs, 0);
--	}
--
--	resume_execution(cur, regs, kcb);
--	regs->eflags |= kcb->kprobe_saved_rflags;
--	trace_hardirqs_fixup_flags(regs->eflags);
--
--	/* Restore the original saved kprobes variables and continue. */
--	if (kcb->kprobe_status == KPROBE_REENTER) {
--		restore_previous_kprobe(kcb);
--		goto out;
--	}
--	reset_current_kprobe();
--out:
--	preempt_enable_no_resched();
--
--	/*
--	 * if somebody else is singlestepping across a probe point, eflags
--	 * will have TF set, in which case, continue the remaining processing
--	 * of do_debug, as if this is not a probe hit.
--	 */
--	if (regs->eflags & TF_MASK)
--		return 0;
--
--	return 1;
--}
--
--int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
--{
--	struct kprobe *cur = kprobe_running();
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--	const struct exception_table_entry *fixup;
--
--	switch(kcb->kprobe_status) {
--	case KPROBE_HIT_SS:
--	case KPROBE_REENTER:
--		/*
--		 * We are here because the instruction being single
--		 * stepped caused a page fault. We reset the current
--		 * kprobe and the rip points back to the probe address
--		 * and allow the page fault handler to continue as a
--		 * normal page fault.
--		 */
--		regs->rip = (unsigned long)cur->addr;
--		regs->eflags |= kcb->kprobe_old_rflags;
--		if (kcb->kprobe_status == KPROBE_REENTER)
--			restore_previous_kprobe(kcb);
--		else
--			reset_current_kprobe();
--		preempt_enable_no_resched();
--		break;
--	case KPROBE_HIT_ACTIVE:
--	case KPROBE_HIT_SSDONE:
--		/*
--		 * We increment the nmissed count for accounting,
--		 * we can also use npre/npostfault count for accouting
--		 * these specific fault cases.
--		 */
--		kprobes_inc_nmissed_count(cur);
--
--		/*
--		 * We come here because instructions in the pre/post
--		 * handler caused the page_fault, this could happen
--		 * if handler tries to access user space by
--		 * copy_from_user(), get_user() etc. Let the
--		 * user-specified handler try to fix it first.
--		 */
--		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
--			return 1;
--
--		/*
--		 * In case the user-specified fault handler returned
--		 * zero, try to fix up.
--		 */
--		fixup = search_exception_tables(regs->rip);
--		if (fixup) {
--			regs->rip = fixup->fixup;
--			return 1;
--		}
--
--		/*
--		 * fixup() could not handle it,
--		 * Let do_page_fault() fix it.
--		 */
--		break;
--	default:
--		break;
--	}
--	return 0;
--}
--
--/*
-- * Wrapper routine for handling exceptions.
-- */
--int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
--				       unsigned long val, void *data)
--{
--	struct die_args *args = (struct die_args *)data;
--	int ret = NOTIFY_DONE;
--
--	if (args->regs && user_mode(args->regs))
--		return ret;
--
--	switch (val) {
--	case DIE_INT3:
--		if (kprobe_handler(args->regs))
--			ret = NOTIFY_STOP;
--		break;
--	case DIE_DEBUG:
--		if (post_kprobe_handler(args->regs))
--			ret = NOTIFY_STOP;
--		break;
--	case DIE_GPF:
--		/* kprobe_running() needs smp_processor_id() */
--		preempt_disable();
--		if (kprobe_running() &&
--		    kprobe_fault_handler(args->regs, args->trapnr))
--			ret = NOTIFY_STOP;
--		preempt_enable();
--		break;
--	default:
--		break;
--	}
--	return ret;
--}
--
--int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
--{
--	struct jprobe *jp = container_of(p, struct jprobe, kp);
--	unsigned long addr;
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--
--	kcb->jprobe_saved_regs = *regs;
--	kcb->jprobe_saved_rsp = (long *) regs->rsp;
--	addr = (unsigned long)(kcb->jprobe_saved_rsp);
--	/*
--	 * As Linus pointed out, gcc assumes that the callee
--	 * owns the argument space and could overwrite it, e.g.
--	 * tailcall optimization. So, to be absolutely safe
--	 * we also save and restore enough stack bytes to cover
--	 * the argument area.
--	 */
--	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
--			MIN_STACK_SIZE(addr));
--	regs->eflags &= ~IF_MASK;
--	trace_hardirqs_off();
--	regs->rip = (unsigned long)(jp->entry);
--	return 1;
--}
--
--void __kprobes jprobe_return(void)
--{
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--
--	asm volatile ("       xchg   %%rbx,%%rsp     \n"
--		      "       int3			\n"
--		      "       .globl jprobe_return_end	\n"
--		      "       jprobe_return_end:	\n"
--		      "       nop			\n"::"b"
--		      (kcb->jprobe_saved_rsp):"memory");
--}
--
--int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
--{
--	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
--	u8 *addr = (u8 *) (regs->rip - 1);
--	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp);
--	struct jprobe *jp = container_of(p, struct jprobe, kp);
--
--	if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
--		if ((unsigned long *)regs->rsp != kcb->jprobe_saved_rsp) {
--			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
--			printk("current rsp %p does not match saved rsp %p\n",
--			       (long *)regs->rsp, kcb->jprobe_saved_rsp);
--			printk("Saved registers for jprobe %p\n", jp);
--			show_registers(saved_regs);
--			printk("Current registers\n");
--			show_registers(regs);
--			BUG();
--		}
--		*regs = kcb->jprobe_saved_regs;
--		memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
--		       MIN_STACK_SIZE(stack_addr));
--		preempt_enable_no_resched();
--		return 1;
--	}
--	return 0;
--}
--
--static struct kprobe trampoline_p = {
--	.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
--	.pre_handler = trampoline_probe_handler
--};
--
--int __init arch_init_kprobes(void)
--{
--	return register_kprobe(&trampoline_p);
--}
--
--int __kprobes arch_trampoline_kprobe(struct kprobe *p)
--{
--	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
--		return 1;
--
--	return 0;
--}
-diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
++
++static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
++{
++	u32 val = 0;
++
++	if (offset >= LAPIC_MMIO_LENGTH)
++		return 0;
++
++	switch (offset) {
++	case APIC_ARBPRI:
++		printk(KERN_WARNING "Access APIC ARBPRI register "
++		       "which is for P6\n");
++		break;
++
++	case APIC_TMCCT:	/* Timer CCR */
++		val = apic_get_tmcct(apic);
++		break;
++
++	case APIC_TASKPRI:
++		report_tpr_access(apic, false);
++		/* fall thru */
++	default:
++		apic_update_ppr(apic);
++		val = apic_get_reg(apic, offset);
++		break;
++	}
++
++	return val;
++}
++
++static void apic_mmio_read(struct kvm_io_device *this,
++			   gpa_t address, int len, void *data)
++{
++	struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
++	unsigned int offset = address - apic->base_address;
++	unsigned char alignment = offset & 0xf;
++	u32 result;
++
++	if ((alignment + len) > 4) {
++		printk(KERN_ERR "KVM_APIC_READ: alignment error %lx %d",
++		       (unsigned long)address, len);
++		return;
++	}
++	result = __apic_read(apic, offset & ~0xf);
++
++	switch (len) {
++	case 1:
++	case 2:
++	case 4:
++		memcpy(data, (char *)&result + alignment, len);
++		break;
++	default:
++		printk(KERN_ERR "Local APIC read with len = %x, "
++		       "should be 1,2, or 4 instead\n", len);
++		break;
++	}
++}
++
++static void update_divide_count(struct kvm_lapic *apic)
++{
++	u32 tmp1, tmp2, tdcr;
++
++	tdcr = apic_get_reg(apic, APIC_TDCR);
++	tmp1 = tdcr & 0xf;
++	tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
++	apic->timer.divide_count = 0x1 << (tmp2 & 0x7);
++
++	apic_debug("timer divide count is 0x%x\n",
++				   apic->timer.divide_count);
++}
++
++static void start_apic_timer(struct kvm_lapic *apic)
++{
++	ktime_t now = apic->timer.dev.base->get_time();
++
++	apic->timer.last_update = now;
++
++	apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
++		    APIC_BUS_CYCLE_NS * apic->timer.divide_count;
++	atomic_set(&apic->timer.pending, 0);
++	hrtimer_start(&apic->timer.dev,
++		      ktime_add_ns(now, apic->timer.period),
++		      HRTIMER_MODE_ABS);
++
++	apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
++			   PRIx64 ", "
++			   "timer initial count 0x%x, period %lldns, "
++			   "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__,
++			   APIC_BUS_CYCLE_NS, ktime_to_ns(now),
++			   apic_get_reg(apic, APIC_TMICT),
++			   apic->timer.period,
++			   ktime_to_ns(ktime_add_ns(now,
++					apic->timer.period)));
++}
++
++static void apic_mmio_write(struct kvm_io_device *this,
++			    gpa_t address, int len, const void *data)
++{
++	struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
++	unsigned int offset = address - apic->base_address;
++	unsigned char alignment = offset & 0xf;
++	u32 val;
++
++	/*
++	 * APIC register must be aligned on 128-bits boundary.
++	 * 32/64/128 bits registers must be accessed thru 32 bits.
++	 * Refer SDM 8.4.1
++	 */
++	if (len != 4 || alignment) {
++		if (printk_ratelimit())
++			printk(KERN_ERR "apic write: bad size=%d %lx\n",
++			       len, (long)address);
++		return;
++	}
++
++	val = *(u32 *) data;
++
++	/* too common printing */
++	if (offset != APIC_EOI)
++		apic_debug("%s: offset 0x%x with length 0x%x, and value is "
++			   "0x%x\n", __FUNCTION__, offset, len, val);
++
++	offset &= 0xff0;
++
++	switch (offset) {
++	case APIC_ID:		/* Local APIC ID */
++		apic_set_reg(apic, APIC_ID, val);
++		break;
++
++	case APIC_TASKPRI:
++		report_tpr_access(apic, true);
++		apic_set_tpr(apic, val & 0xff);
++		break;
++
++	case APIC_EOI:
++		apic_set_eoi(apic);
++		break;
++
++	case APIC_LDR:
++		apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
++		break;
++
++	case APIC_DFR:
++		apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
++		break;
++
++	case APIC_SPIV:
++		apic_set_reg(apic, APIC_SPIV, val & 0x3ff);
++		if (!(val & APIC_SPIV_APIC_ENABLED)) {
++			int i;
++			u32 lvt_val;
++
++			for (i = 0; i < APIC_LVT_NUM; i++) {
++				lvt_val = apic_get_reg(apic,
++						       APIC_LVTT + 0x10 * i);
++				apic_set_reg(apic, APIC_LVTT + 0x10 * i,
++					     lvt_val | APIC_LVT_MASKED);
++			}
++			atomic_set(&apic->timer.pending, 0);
++
++		}
++		break;
++
++	case APIC_ICR:
++		/* No delay here, so we always clear the pending bit */
++		apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
++		apic_send_ipi(apic);
++		break;
++
++	case APIC_ICR2:
++		apic_set_reg(apic, APIC_ICR2, val & 0xff000000);
++		break;
++
++	case APIC_LVTT:
++	case APIC_LVTTHMR:
++	case APIC_LVTPC:
++	case APIC_LVT0:
++	case APIC_LVT1:
++	case APIC_LVTERR:
++		/* TODO: Check vector */
++		if (!apic_sw_enabled(apic))
++			val |= APIC_LVT_MASKED;
++
++		val &= apic_lvt_mask[(offset - APIC_LVTT) >> 4];
++		apic_set_reg(apic, offset, val);
++
++		break;
++
++	case APIC_TMICT:
++		hrtimer_cancel(&apic->timer.dev);
++		apic_set_reg(apic, APIC_TMICT, val);
++		start_apic_timer(apic);
++		return;
++
++	case APIC_TDCR:
++		if (val & 4)
++			printk(KERN_ERR "KVM_WRITE:TDCR %x\n", val);
++		apic_set_reg(apic, APIC_TDCR, val);
++		update_divide_count(apic);
++		break;
++
++	default:
++		apic_debug("Local APIC Write to read-only register %x\n",
++			   offset);
++		break;
++	}
++
++}
++
++static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr)
++{
++	struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
++	int ret = 0;
++
++
++	if (apic_hw_enabled(apic) &&
++	    (addr >= apic->base_address) &&
++	    (addr < (apic->base_address + LAPIC_MMIO_LENGTH)))
++		ret = 1;
++
++	return ret;
++}
++
++void kvm_free_lapic(struct kvm_vcpu *vcpu)
++{
++	if (!vcpu->arch.apic)
++		return;
++
++	hrtimer_cancel(&vcpu->arch.apic->timer.dev);
++
++	if (vcpu->arch.apic->regs_page)
++		__free_page(vcpu->arch.apic->regs_page);
++
++	kfree(vcpu->arch.apic);
++}
++
++/*
++ *----------------------------------------------------------------------
++ * LAPIC interface
++ *----------------------------------------------------------------------
++ */
++
++void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	if (!apic)
++		return;
++	apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
++		     | (apic_get_reg(apic, APIC_TASKPRI) & 4));
++}
++
++u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++	u64 tpr;
++
++	if (!apic)
++		return 0;
++	tpr = (u64) apic_get_reg(apic, APIC_TASKPRI);
++
++	return (tpr & 0xf0) >> 4;
++}
++EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
++
++void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	if (!apic) {
++		value |= MSR_IA32_APICBASE_BSP;
++		vcpu->arch.apic_base = value;
++		return;
++	}
++	if (apic->vcpu->vcpu_id)
++		value &= ~MSR_IA32_APICBASE_BSP;
++
++	vcpu->arch.apic_base = value;
++	apic->base_address = apic->vcpu->arch.apic_base &
++			     MSR_IA32_APICBASE_BASE;
++
++	/* with FSB delivery interrupt, we can restart APIC functionality */
++	apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
++		   "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
++
++}
++
++u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.apic_base;
++}
++EXPORT_SYMBOL_GPL(kvm_lapic_get_base);
++
++void kvm_lapic_reset(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic;
++	int i;
++
++	apic_debug("%s\n", __FUNCTION__);
++
++	ASSERT(vcpu);
++	apic = vcpu->arch.apic;
++	ASSERT(apic != NULL);
++
++	/* Stop the timer in case it's a reset to an active apic */
++	hrtimer_cancel(&apic->timer.dev);
++
++	apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
++	apic_set_reg(apic, APIC_LVR, APIC_VERSION);
++
++	for (i = 0; i < APIC_LVT_NUM; i++)
++		apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
++	apic_set_reg(apic, APIC_LVT0,
++		     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
++
++	apic_set_reg(apic, APIC_DFR, 0xffffffffU);
++	apic_set_reg(apic, APIC_SPIV, 0xff);
++	apic_set_reg(apic, APIC_TASKPRI, 0);
++	apic_set_reg(apic, APIC_LDR, 0);
++	apic_set_reg(apic, APIC_ESR, 0);
++	apic_set_reg(apic, APIC_ICR, 0);
++	apic_set_reg(apic, APIC_ICR2, 0);
++	apic_set_reg(apic, APIC_TDCR, 0);
++	apic_set_reg(apic, APIC_TMICT, 0);
++	for (i = 0; i < 8; i++) {
++		apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
++		apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
++		apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
++	}
++	update_divide_count(apic);
++	atomic_set(&apic->timer.pending, 0);
++	if (vcpu->vcpu_id == 0)
++		vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
++	apic_update_ppr(apic);
++
++	apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
++		   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
++		   vcpu, kvm_apic_id(apic),
++		   vcpu->arch.apic_base, apic->base_address);
++}
++EXPORT_SYMBOL_GPL(kvm_lapic_reset);
++
++int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++	int ret = 0;
++
++	if (!apic)
++		return 0;
++	ret = apic_enabled(apic);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
++
++/*
++ *----------------------------------------------------------------------
++ * timer interface
++ *----------------------------------------------------------------------
++ */
++
++/* TODO: make sure __apic_timer_fn runs in current pCPU */
++static int __apic_timer_fn(struct kvm_lapic *apic)
++{
++	int result = 0;
++	wait_queue_head_t *q = &apic->vcpu->wq;
++
++	atomic_inc(&apic->timer.pending);
++	if (waitqueue_active(q)) {
++		apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
++		wake_up_interruptible(q);
++	}
++	if (apic_lvtt_period(apic)) {
++		result = 1;
++		apic->timer.dev.expires = ktime_add_ns(
++					apic->timer.dev.expires,
++					apic->timer.period);
++	}
++	return result;
++}
++
++static int __inject_apic_timer_irq(struct kvm_lapic *apic)
++{
++	int vector;
++
++	vector = apic_lvt_vector(apic, APIC_LVTT);
++	return __apic_accept_irq(apic, APIC_DM_FIXED, vector, 1, 0);
++}
++
++static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
++{
++	struct kvm_lapic *apic;
++	int restart_timer = 0;
++
++	apic = container_of(data, struct kvm_lapic, timer.dev);
++
++	restart_timer = __apic_timer_fn(apic);
++
++	if (restart_timer)
++		return HRTIMER_RESTART;
++	else
++		return HRTIMER_NORESTART;
++}
++
++int kvm_create_lapic(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic;
++
++	ASSERT(vcpu != NULL);
++	apic_debug("apic_init %d\n", vcpu->vcpu_id);
++
++	apic = kzalloc(sizeof(*apic), GFP_KERNEL);
++	if (!apic)
++		goto nomem;
++
++	vcpu->arch.apic = apic;
++
++	apic->regs_page = alloc_page(GFP_KERNEL);
++	if (apic->regs_page == NULL) {
++		printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
++		       vcpu->vcpu_id);
++		goto nomem_free_apic;
++	}
++	apic->regs = page_address(apic->regs_page);
++	memset(apic->regs, 0, PAGE_SIZE);
++	apic->vcpu = vcpu;
++
++	hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++	apic->timer.dev.function = apic_timer_fn;
++	apic->base_address = APIC_DEFAULT_PHYS_BASE;
++	vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
++
++	kvm_lapic_reset(vcpu);
++	apic->dev.read = apic_mmio_read;
++	apic->dev.write = apic_mmio_write;
++	apic->dev.in_range = apic_mmio_range;
++	apic->dev.private = apic;
++
++	return 0;
++nomem_free_apic:
++	kfree(apic);
++nomem:
++	return -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(kvm_create_lapic);
++
++int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++	int highest_irr;
++
++	if (!apic || !apic_enabled(apic))
++		return -1;
++
++	apic_update_ppr(apic);
++	highest_irr = apic_find_highest_irr(apic);
++	if ((highest_irr == -1) ||
++	    ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI)))
++		return -1;
++	return highest_irr;
++}
++
++int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
++{
++	u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
++	int r = 0;
++
++	if (vcpu->vcpu_id == 0) {
++		if (!apic_hw_enabled(vcpu->arch.apic))
++			r = 1;
++		if ((lvt0 & APIC_LVT_MASKED) == 0 &&
++		    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
++			r = 1;
++	}
++	return r;
++}
++
++void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	if (apic && apic_lvt_enabled(apic, APIC_LVTT) &&
++		atomic_read(&apic->timer.pending) > 0) {
++		if (__inject_apic_timer_irq(apic))
++			atomic_dec(&apic->timer.pending);
++	}
++}
++
++void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
++		apic->timer.last_update = ktime_add_ns(
++				apic->timer.last_update,
++				apic->timer.period);
++}
++
++int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
++{
++	int vector = kvm_apic_has_interrupt(vcpu);
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	if (vector == -1)
++		return -1;
++
++	apic_set_vector(vector, apic->regs + APIC_ISR);
++	apic_update_ppr(apic);
++	apic_clear_irr(vector, apic);
++	return vector;
++}
++
++void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	apic->base_address = vcpu->arch.apic_base &
++			     MSR_IA32_APICBASE_BASE;
++	apic_set_reg(apic, APIC_LVR, APIC_VERSION);
++	apic_update_ppr(apic);
++	hrtimer_cancel(&apic->timer.dev);
++	update_divide_count(apic);
++	start_apic_timer(apic);
++}
++
++void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++	struct hrtimer *timer;
++
++	if (!apic)
++		return;
++
++	timer = &apic->timer.dev;
++	if (hrtimer_cancel(timer))
++		hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
++}
++
++void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
++{
++	u32 data;
++	void *vapic;
++
++	if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
++		return;
++
++	vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
++	data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
++	kunmap_atomic(vapic, KM_USER0);
++
++	apic_set_tpr(vcpu->arch.apic, data & 0xff);
++}
++
++void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
++{
++	u32 data, tpr;
++	int max_irr, max_isr;
++	struct kvm_lapic *apic;
++	void *vapic;
++
++	if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
++		return;
++
++	apic = vcpu->arch.apic;
++	tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff;
++	max_irr = apic_find_highest_irr(apic);
++	if (max_irr < 0)
++		max_irr = 0;
++	max_isr = apic_find_highest_isr(apic);
++	if (max_isr < 0)
++		max_isr = 0;
++	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
++
++	vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
++	*(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
++	kunmap_atomic(vapic, KM_USER0);
++}
++
++void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
++{
++	if (!irqchip_in_kernel(vcpu->kvm))
++		return;
++
++	vcpu->arch.apic->vapic_addr = vapic_addr;
++}
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
 new file mode 100644
-index 0000000..8a7660c
+index 0000000..676c396
 --- /dev/null
-+++ b/arch/x86/kernel/ldt.c
-@@ -0,0 +1,260 @@
++++ b/arch/x86/kvm/lapic.h
+@@ -0,0 +1,50 @@
++#ifndef __KVM_X86_LAPIC_H
++#define __KVM_X86_LAPIC_H
++
++#include "iodev.h"
++
++#include <linux/kvm_host.h>
++
++struct kvm_lapic {
++	unsigned long base_address;
++	struct kvm_io_device dev;
++	struct {
++		atomic_t pending;
++		s64 period;	/* unit: ns */
++		u32 divide_count;
++		ktime_t last_update;
++		struct hrtimer dev;
++	} timer;
++	struct kvm_vcpu *vcpu;
++	struct page *regs_page;
++	void *regs;
++	gpa_t vapic_addr;
++	struct page *vapic_page;
++};
++int kvm_create_lapic(struct kvm_vcpu *vcpu);
++void kvm_free_lapic(struct kvm_vcpu *vcpu);
++
++int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
++int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
++int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
++void kvm_lapic_reset(struct kvm_vcpu *vcpu);
++u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
++void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
++void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
++
++int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
++int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
++int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
++
++u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
++void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
++void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
++int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
++int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
++void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
++
++void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
++void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
++void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
++
++#endif
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+new file mode 100644
+index 0000000..8efdcdb
+--- /dev/null
++++ b/arch/x86/kvm/mmu.c
+@@ -0,0 +1,1885 @@
 +/*
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ * Copyright (C) 2002 Andi Kleen
++ * Kernel-based Virtual Machine driver for Linux
++ *
++ * This module enables machines with Intel VT-x extensions to run virtual
++ * machines without emulation or binary translation.
++ *
++ * MMU support
++ *
++ * Copyright (C) 2006 Qumranet, Inc.
++ *
++ * Authors:
++ *   Yaniv Kamay  <yaniv at qumranet.com>
++ *   Avi Kivity   <avi at qumranet.com>
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
 + *
-+ * This handles calls from both 32bit and 64bit mode.
 + */
 +
-+#include <linux/errno.h>
-+#include <linux/sched.h>
++#include "vmx.h"
++#include "mmu.h"
++
++#include <linux/kvm_host.h>
++#include <linux/types.h>
 +#include <linux/string.h>
 +#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/vmalloc.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/swap.h>
 +
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/mmu_context.h>
++#include <asm/page.h>
++#include <asm/cmpxchg.h>
++#include <asm/io.h>
 +
-+#ifdef CONFIG_SMP
-+static void flush_ldt(void *null)
++#undef MMU_DEBUG
++
++#undef AUDIT
++
++#ifdef AUDIT
++static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
++#else
++static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
++#endif
++
++#ifdef MMU_DEBUG
++
++#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
++#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
++
++#else
++
++#define pgprintk(x...) do { } while (0)
++#define rmap_printk(x...) do { } while (0)
++
++#endif
++
++#if defined(MMU_DEBUG) || defined(AUDIT)
++static int dbg = 1;
++#endif
++
++#ifndef MMU_DEBUG
++#define ASSERT(x) do { } while (0)
++#else
++#define ASSERT(x)							\
++	if (!(x)) {							\
++		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
++		       __FILE__, __LINE__, #x);				\
++	}
++#endif
++
++#define PT64_PT_BITS 9
++#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
++#define PT32_PT_BITS 10
++#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
++
++#define PT_WRITABLE_SHIFT 1
++
++#define PT_PRESENT_MASK (1ULL << 0)
++#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
++#define PT_USER_MASK (1ULL << 2)
++#define PT_PWT_MASK (1ULL << 3)
++#define PT_PCD_MASK (1ULL << 4)
++#define PT_ACCESSED_MASK (1ULL << 5)
++#define PT_DIRTY_MASK (1ULL << 6)
++#define PT_PAGE_SIZE_MASK (1ULL << 7)
++#define PT_PAT_MASK (1ULL << 7)
++#define PT_GLOBAL_MASK (1ULL << 8)
++#define PT64_NX_SHIFT 63
++#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
++
++#define PT_PAT_SHIFT 7
++#define PT_DIR_PAT_SHIFT 12
++#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
++
++#define PT32_DIR_PSE36_SIZE 4
++#define PT32_DIR_PSE36_SHIFT 13
++#define PT32_DIR_PSE36_MASK \
++	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
++
++
++#define PT_FIRST_AVAIL_BITS_SHIFT 9
++#define PT64_SECOND_AVAIL_BITS_SHIFT 52
++
++#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
++
++#define VALID_PAGE(x) ((x) != INVALID_PAGE)
++
++#define PT64_LEVEL_BITS 9
++
++#define PT64_LEVEL_SHIFT(level) \
++		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
++
++#define PT64_LEVEL_MASK(level) \
++		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
++
++#define PT64_INDEX(address, level)\
++	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
++
++
++#define PT32_LEVEL_BITS 10
++
++#define PT32_LEVEL_SHIFT(level) \
++		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
++
++#define PT32_LEVEL_MASK(level) \
++		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
++
++#define PT32_INDEX(address, level)\
++	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
++
++
++#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
++#define PT64_DIR_BASE_ADDR_MASK \
++	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
++
++#define PT32_BASE_ADDR_MASK PAGE_MASK
++#define PT32_DIR_BASE_ADDR_MASK \
++	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
++
++#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
++			| PT64_NX_MASK)
++
++#define PFERR_PRESENT_MASK (1U << 0)
++#define PFERR_WRITE_MASK (1U << 1)
++#define PFERR_USER_MASK (1U << 2)
++#define PFERR_FETCH_MASK (1U << 4)
++
++#define PT64_ROOT_LEVEL 4
++#define PT32_ROOT_LEVEL 2
++#define PT32E_ROOT_LEVEL 3
++
++#define PT_DIRECTORY_LEVEL 2
++#define PT_PAGE_TABLE_LEVEL 1
++
++#define RMAP_EXT 4
++
++#define ACC_EXEC_MASK    1
++#define ACC_WRITE_MASK   PT_WRITABLE_MASK
++#define ACC_USER_MASK    PT_USER_MASK
++#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
++
++struct kvm_rmap_desc {
++	u64 *shadow_ptes[RMAP_EXT];
++	struct kvm_rmap_desc *more;
++};
++
++static struct kmem_cache *pte_chain_cache;
++static struct kmem_cache *rmap_desc_cache;
++static struct kmem_cache *mmu_page_header_cache;
++
++static u64 __read_mostly shadow_trap_nonpresent_pte;
++static u64 __read_mostly shadow_notrap_nonpresent_pte;
++
++void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
 +{
-+	if (current->active_mm)
-+		load_LDT(&current->active_mm->context);
++	shadow_trap_nonpresent_pte = trap_pte;
++	shadow_notrap_nonpresent_pte = notrap_pte;
 +}
-+#endif
++EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
 +
-+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++static int is_write_protection(struct kvm_vcpu *vcpu)
 +{
-+	void *oldldt, *newldt;
-+	int oldsize;
++	return vcpu->arch.cr0 & X86_CR0_WP;
++}
 +
-+	if (mincount <= pc->size)
-+		return 0;
-+	oldsize = pc->size;
-+	mincount = (mincount + 511) & (~511);
-+	if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
-+		newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
-+	else
-+		newldt = (void *)__get_free_page(GFP_KERNEL);
++static int is_cpuid_PSE36(void)
++{
++	return 1;
++}
 +
-+	if (!newldt)
-+		return -ENOMEM;
++static int is_nx(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.shadow_efer & EFER_NX;
++}
 +
-+	if (oldsize)
-+		memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
-+	oldldt = pc->ldt;
-+	memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
-+	       (mincount - oldsize) * LDT_ENTRY_SIZE);
++static int is_present_pte(unsigned long pte)
++{
++	return pte & PT_PRESENT_MASK;
++}
 +
-+#ifdef CONFIG_X86_64
-+	/* CHECKME: Do we really need this ? */
-+	wmb();
-+#endif
-+	pc->ldt = newldt;
-+	wmb();
-+	pc->size = mincount;
-+	wmb();
++static int is_shadow_present_pte(u64 pte)
++{
++	pte &= ~PT_SHADOW_IO_MARK;
++	return pte != shadow_trap_nonpresent_pte
++		&& pte != shadow_notrap_nonpresent_pte;
++}
 +
-+	if (reload) {
-+#ifdef CONFIG_SMP
-+		cpumask_t mask;
++static int is_writeble_pte(unsigned long pte)
++{
++	return pte & PT_WRITABLE_MASK;
++}
 +
-+		preempt_disable();
-+		load_LDT(pc);
-+		mask = cpumask_of_cpu(smp_processor_id());
-+		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+			smp_call_function(flush_ldt, NULL, 1, 1);
-+		preempt_enable();
++static int is_dirty_pte(unsigned long pte)
++{
++	return pte & PT_DIRTY_MASK;
++}
++
++static int is_io_pte(unsigned long pte)
++{
++	return pte & PT_SHADOW_IO_MARK;
++}
++
++static int is_rmap_pte(u64 pte)
++{
++	return pte != shadow_trap_nonpresent_pte
++		&& pte != shadow_notrap_nonpresent_pte;
++}
++
++static gfn_t pse36_gfn_delta(u32 gpte)
++{
++	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
++
++	return (gpte & PT32_DIR_PSE36_MASK) << shift;
++}
++
++static void set_shadow_pte(u64 *sptep, u64 spte)
++{
++#ifdef CONFIG_X86_64
++	set_64bit((unsigned long *)sptep, spte);
 +#else
-+		load_LDT(pc);
++	set_64bit((unsigned long long *)sptep, spte);
 +#endif
-+	}
-+	if (oldsize) {
-+		if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(oldldt);
-+		else
-+			put_page(virt_to_page(oldldt));
++}
++
++static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
++				  struct kmem_cache *base_cache, int min)
++{
++	void *obj;
++
++	if (cache->nobjs >= min)
++		return 0;
++	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
++		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
++		if (!obj)
++			return -ENOMEM;
++		cache->objects[cache->nobjs++] = obj;
 +	}
 +	return 0;
 +}
 +
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 +{
-+	int err = alloc_ldt(new, old->size, 0);
++	while (mc->nobjs)
++		kfree(mc->objects[--mc->nobjs]);
++}
 +
-+	if (err < 0)
-+		return err;
-+	memcpy(new->ldt, old->ldt, old->size * LDT_ENTRY_SIZE);
++static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
++				       int min)
++{
++	struct page *page;
++
++	if (cache->nobjs >= min)
++		return 0;
++	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
++		page = alloc_page(GFP_KERNEL);
++		if (!page)
++			return -ENOMEM;
++		set_page_private(page, 0);
++		cache->objects[cache->nobjs++] = page_address(page);
++	}
 +	return 0;
 +}
 +
++static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
++{
++	while (mc->nobjs)
++		free_page((unsigned long)mc->objects[--mc->nobjs]);
++}
++
++static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
++{
++	int r;
++
++	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
++				   pte_chain_cache, 4);
++	if (r)
++		goto out;
++	r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
++				   rmap_desc_cache, 1);
++	if (r)
++		goto out;
++	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
++	if (r)
++		goto out;
++	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
++				   mmu_page_header_cache, 4);
++out:
++	return r;
++}
++
++static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
++{
++	mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
++	mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
++	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
++	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
++}
++
++static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
++				    size_t size)
++{
++	void *p;
++
++	BUG_ON(!mc->nobjs);
++	p = mc->objects[--mc->nobjs];
++	memset(p, 0, size);
++	return p;
++}
++
++static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
++{
++	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
++				      sizeof(struct kvm_pte_chain));
++}
++
++static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
++{
++	kfree(pc);
++}
++
++static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
++{
++	return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
++				      sizeof(struct kvm_rmap_desc));
++}
++
++static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
++{
++	kfree(rd);
++}
++
 +/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
++ * Take gfn and return the reverse mapping to it.
++ * Note: gfn must be unaliased before this function get called
 + */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++
++static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
 +{
-+	struct mm_struct *old_mm;
-+	int retval = 0;
++	struct kvm_memory_slot *slot;
 +
-+	mutex_init(&mm->context.lock);
-+	mm->context.size = 0;
-+	old_mm = current->mm;
-+	if (old_mm && old_mm->context.size > 0) {
-+		mutex_lock(&old_mm->context.lock);
-+		retval = copy_ldt(&mm->context, &old_mm->context);
-+		mutex_unlock(&old_mm->context.lock);
-+	}
-+	return retval;
++	slot = gfn_to_memslot(kvm, gfn);
++	return &slot->rmap[gfn - slot->base_gfn];
 +}
 +
 +/*
-+ * No need to lock the MM as we are the last user
++ * Reverse mapping data structures:
 + *
-+ * 64bit: Don't touch the LDT register - we're already in the next thread.
++ * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
++ * that points to page_address(page).
++ *
++ * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
++ * containing more mappings.
 + */
-+void destroy_context(struct mm_struct *mm)
++static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 +{
-+	if (mm->context.size) {
-+#ifdef CONFIG_X86_32
-+		/* CHECKME: Can this ever happen ? */
-+		if (mm == current->active_mm)
-+			clear_LDT();
-+#endif
-+		if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(mm->context.ldt);
++	struct kvm_mmu_page *sp;
++	struct kvm_rmap_desc *desc;
++	unsigned long *rmapp;
++	int i;
++
++	if (!is_rmap_pte(*spte))
++		return;
++	gfn = unalias_gfn(vcpu->kvm, gfn);
++	sp = page_header(__pa(spte));
++	sp->gfns[spte - sp->spt] = gfn;
++	rmapp = gfn_to_rmap(vcpu->kvm, gfn);
++	if (!*rmapp) {
++		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
++		*rmapp = (unsigned long)spte;
++	} else if (!(*rmapp & 1)) {
++		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
++		desc = mmu_alloc_rmap_desc(vcpu);
++		desc->shadow_ptes[0] = (u64 *)*rmapp;
++		desc->shadow_ptes[1] = spte;
++		*rmapp = (unsigned long)desc | 1;
++	} else {
++		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
++		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
++		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
++			desc = desc->more;
++		if (desc->shadow_ptes[RMAP_EXT-1]) {
++			desc->more = mmu_alloc_rmap_desc(vcpu);
++			desc = desc->more;
++		}
++		for (i = 0; desc->shadow_ptes[i]; ++i)
++			;
++		desc->shadow_ptes[i] = spte;
++	}
++}
++
++static void rmap_desc_remove_entry(unsigned long *rmapp,
++				   struct kvm_rmap_desc *desc,
++				   int i,
++				   struct kvm_rmap_desc *prev_desc)
++{
++	int j;
++
++	for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
++		;
++	desc->shadow_ptes[i] = desc->shadow_ptes[j];
++	desc->shadow_ptes[j] = NULL;
++	if (j != 0)
++		return;
++	if (!prev_desc && !desc->more)
++		*rmapp = (unsigned long)desc->shadow_ptes[0];
++	else
++		if (prev_desc)
++			prev_desc->more = desc->more;
 +		else
-+			put_page(virt_to_page(mm->context.ldt));
-+		mm->context.size = 0;
++			*rmapp = (unsigned long)desc->more | 1;
++	mmu_free_rmap_desc(desc);
++}
++
++static void rmap_remove(struct kvm *kvm, u64 *spte)
++{
++	struct kvm_rmap_desc *desc;
++	struct kvm_rmap_desc *prev_desc;
++	struct kvm_mmu_page *sp;
++	struct page *page;
++	unsigned long *rmapp;
++	int i;
++
++	if (!is_rmap_pte(*spte))
++		return;
++	sp = page_header(__pa(spte));
++	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
++	mark_page_accessed(page);
++	if (is_writeble_pte(*spte))
++		kvm_release_page_dirty(page);
++	else
++		kvm_release_page_clean(page);
++	rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
++	if (!*rmapp) {
++		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
++		BUG();
++	} else if (!(*rmapp & 1)) {
++		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
++		if ((u64 *)*rmapp != spte) {
++			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
++			       spte, *spte);
++			BUG();
++		}
++		*rmapp = 0;
++	} else {
++		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
++		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
++		prev_desc = NULL;
++		while (desc) {
++			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
++				if (desc->shadow_ptes[i] == spte) {
++					rmap_desc_remove_entry(rmapp,
++							       desc, i,
++							       prev_desc);
++					return;
++				}
++			prev_desc = desc;
++			desc = desc->more;
++		}
++		BUG();
 +	}
 +}
 +
-+static int read_ldt(void __user *ptr, unsigned long bytecount)
++static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
 +{
-+	int err;
-+	unsigned long size;
-+	struct mm_struct *mm = current->mm;
++	struct kvm_rmap_desc *desc;
++	struct kvm_rmap_desc *prev_desc;
++	u64 *prev_spte;
++	int i;
 +
-+	if (!mm->context.size)
-+		return 0;
-+	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
-+		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
++	if (!*rmapp)
++		return NULL;
++	else if (!(*rmapp & 1)) {
++		if (!spte)
++			return (u64 *)*rmapp;
++		return NULL;
++	}
++	desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
++	prev_desc = NULL;
++	prev_spte = NULL;
++	while (desc) {
++		for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
++			if (prev_spte == spte)
++				return desc->shadow_ptes[i];
++			prev_spte = desc->shadow_ptes[i];
++		}
++		desc = desc->more;
++	}
++	return NULL;
++}
 +
-+	mutex_lock(&mm->context.lock);
-+	size = mm->context.size * LDT_ENTRY_SIZE;
-+	if (size > bytecount)
-+		size = bytecount;
++static void rmap_write_protect(struct kvm *kvm, u64 gfn)
++{
++	unsigned long *rmapp;
++	u64 *spte;
++	int write_protected = 0;
 +
-+	err = 0;
-+	if (copy_to_user(ptr, mm->context.ldt, size))
-+		err = -EFAULT;
-+	mutex_unlock(&mm->context.lock);
-+	if (err < 0)
-+		goto error_return;
-+	if (size != bytecount) {
-+		/* zero-fill the rest */
-+		if (clear_user(ptr + size, bytecount - size) != 0) {
-+			err = -EFAULT;
-+			goto error_return;
++	gfn = unalias_gfn(kvm, gfn);
++	rmapp = gfn_to_rmap(kvm, gfn);
++
++	spte = rmap_next(kvm, rmapp, NULL);
++	while (spte) {
++		BUG_ON(!spte);
++		BUG_ON(!(*spte & PT_PRESENT_MASK));
++		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
++		if (is_writeble_pte(*spte)) {
++			set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
++			write_protected = 1;
 +		}
++		spte = rmap_next(kvm, rmapp, spte);
 +	}
-+	return bytecount;
-+error_return:
-+	return err;
++	if (write_protected)
++		kvm_flush_remote_tlbs(kvm);
 +}
 +
-+static int read_default_ldt(void __user *ptr, unsigned long bytecount)
++#ifdef MMU_DEBUG
++static int is_empty_shadow_page(u64 *spt)
 +{
-+	/* CHECKME: Can we use _one_ random number ? */
-+#ifdef CONFIG_X86_32
-+	unsigned long size = 5 * sizeof(struct desc_struct);
-+#else
-+	unsigned long size = 128;
++	u64 *pos;
++	u64 *end;
++
++	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
++		if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
++			printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
++			       pos, *pos);
++			return 0;
++		}
++	return 1;
++}
 +#endif
-+	if (bytecount > size)
-+		bytecount = size;
-+	if (clear_user(ptr, bytecount))
-+		return -EFAULT;
-+	return bytecount;
++
++static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
++{
++	ASSERT(is_empty_shadow_page(sp->spt));
++	list_del(&sp->link);
++	__free_page(virt_to_page(sp->spt));
++	__free_page(virt_to_page(sp->gfns));
++	kfree(sp);
++	++kvm->arch.n_free_mmu_pages;
 +}
 +
-+static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
++static unsigned kvm_page_table_hashfn(gfn_t gfn)
 +{
-+	struct mm_struct *mm = current->mm;
-+	struct desc_struct ldt;
-+	int error;
-+	struct user_desc ldt_info;
++	return gfn;
++}
 +
-+	error = -EINVAL;
-+	if (bytecount != sizeof(ldt_info))
-+		goto out;
-+	error = -EFAULT;
-+	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
-+		goto out;
++static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
++					       u64 *parent_pte)
++{
++	struct kvm_mmu_page *sp;
 +
-+	error = -EINVAL;
-+	if (ldt_info.entry_number >= LDT_ENTRIES)
-+		goto out;
-+	if (ldt_info.contents == 3) {
-+		if (oldmode)
-+			goto out;
-+		if (ldt_info.seg_not_present == 0)
-+			goto out;
++	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
++	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
++	sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
++	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
++	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
++	ASSERT(is_empty_shadow_page(sp->spt));
++	sp->slot_bitmap = 0;
++	sp->multimapped = 0;
++	sp->parent_pte = parent_pte;
++	--vcpu->kvm->arch.n_free_mmu_pages;
++	return sp;
++}
++
++static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
++				    struct kvm_mmu_page *sp, u64 *parent_pte)
++{
++	struct kvm_pte_chain *pte_chain;
++	struct hlist_node *node;
++	int i;
++
++	if (!parent_pte)
++		return;
++	if (!sp->multimapped) {
++		u64 *old = sp->parent_pte;
++
++		if (!old) {
++			sp->parent_pte = parent_pte;
++			return;
++		}
++		sp->multimapped = 1;
++		pte_chain = mmu_alloc_pte_chain(vcpu);
++		INIT_HLIST_HEAD(&sp->parent_ptes);
++		hlist_add_head(&pte_chain->link, &sp->parent_ptes);
++		pte_chain->parent_ptes[0] = old;
++	}
++	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
++		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
++			continue;
++		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
++			if (!pte_chain->parent_ptes[i]) {
++				pte_chain->parent_ptes[i] = parent_pte;
++				return;
++			}
 +	}
++	pte_chain = mmu_alloc_pte_chain(vcpu);
++	BUG_ON(!pte_chain);
++	hlist_add_head(&pte_chain->link, &sp->parent_ptes);
++	pte_chain->parent_ptes[0] = parent_pte;
++}
 +
-+	mutex_lock(&mm->context.lock);
-+	if (ldt_info.entry_number >= mm->context.size) {
-+		error = alloc_ldt(&current->mm->context,
-+				  ldt_info.entry_number + 1, 1);
-+		if (error < 0)
-+			goto out_unlock;
++static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
++				       u64 *parent_pte)
++{
++	struct kvm_pte_chain *pte_chain;
++	struct hlist_node *node;
++	int i;
++
++	if (!sp->multimapped) {
++		BUG_ON(sp->parent_pte != parent_pte);
++		sp->parent_pte = NULL;
++		return;
 +	}
++	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
++		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
++			if (!pte_chain->parent_ptes[i])
++				break;
++			if (pte_chain->parent_ptes[i] != parent_pte)
++				continue;
++			while (i + 1 < NR_PTE_CHAIN_ENTRIES
++				&& pte_chain->parent_ptes[i + 1]) {
++				pte_chain->parent_ptes[i]
++					= pte_chain->parent_ptes[i + 1];
++				++i;
++			}
++			pte_chain->parent_ptes[i] = NULL;
++			if (i == 0) {
++				hlist_del(&pte_chain->link);
++				mmu_free_pte_chain(pte_chain);
++				if (hlist_empty(&sp->parent_ptes)) {
++					sp->multimapped = 0;
++					sp->parent_pte = NULL;
++				}
++			}
++			return;
++		}
++	BUG();
++}
 +
-+	/* Allow LDTs to be cleared by the user. */
-+	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+		if (oldmode || LDT_empty(&ldt_info)) {
-+			memset(&ldt, 0, sizeof(ldt));
-+			goto install;
++static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
++{
++	unsigned index;
++	struct hlist_head *bucket;
++	struct kvm_mmu_page *sp;
++	struct hlist_node *node;
++
++	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
++	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
++	bucket = &kvm->arch.mmu_page_hash[index];
++	hlist_for_each_entry(sp, node, bucket, hash_link)
++		if (sp->gfn == gfn && !sp->role.metaphysical) {
++			pgprintk("%s: found role %x\n",
++				 __FUNCTION__, sp->role.word);
++			return sp;
++		}
++	return NULL;
++}
++
++static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
++					     gfn_t gfn,
++					     gva_t gaddr,
++					     unsigned level,
++					     int metaphysical,
++					     unsigned access,
++					     u64 *parent_pte,
++					     bool *new_page)
++{
++	union kvm_mmu_page_role role;
++	unsigned index;
++	unsigned quadrant;
++	struct hlist_head *bucket;
++	struct kvm_mmu_page *sp;
++	struct hlist_node *node;
++
++	role.word = 0;
++	role.glevels = vcpu->arch.mmu.root_level;
++	role.level = level;
++	role.metaphysical = metaphysical;
++	role.access = access;
++	if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
++		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
++		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
++		role.quadrant = quadrant;
++	}
++	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
++		 gfn, role.word);
++	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
++	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
++	hlist_for_each_entry(sp, node, bucket, hash_link)
++		if (sp->gfn == gfn && sp->role.word == role.word) {
++			mmu_page_add_parent_pte(vcpu, sp, parent_pte);
++			pgprintk("%s: found\n", __FUNCTION__);
++			return sp;
++		}
++	++vcpu->kvm->stat.mmu_cache_miss;
++	sp = kvm_mmu_alloc_page(vcpu, parent_pte);
++	if (!sp)
++		return sp;
++	pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
++	sp->gfn = gfn;
++	sp->role = role;
++	hlist_add_head(&sp->hash_link, bucket);
++	vcpu->arch.mmu.prefetch_page(vcpu, sp);
++	if (!metaphysical)
++		rmap_write_protect(vcpu->kvm, gfn);
++	if (new_page)
++		*new_page = 1;
++	return sp;
++}
++
++static void kvm_mmu_page_unlink_children(struct kvm *kvm,
++					 struct kvm_mmu_page *sp)
++{
++	unsigned i;
++	u64 *pt;
++	u64 ent;
++
++	pt = sp->spt;
++
++	if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
++		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
++			if (is_shadow_present_pte(pt[i]))
++				rmap_remove(kvm, &pt[i]);
++			pt[i] = shadow_trap_nonpresent_pte;
 +		}
++		kvm_flush_remote_tlbs(kvm);
++		return;
 +	}
 +
-+	fill_ldt(&ldt, &ldt_info);
-+	if (oldmode)
-+		ldt.avl = 0;
++	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
++		ent = pt[i];
 +
-+	/* Install the new entry ...  */
-+install:
-+	write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
-+	error = 0;
++		pt[i] = shadow_trap_nonpresent_pte;
++		if (!is_shadow_present_pte(ent))
++			continue;
++		ent &= PT64_BASE_ADDR_MASK;
++		mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
++	}
++	kvm_flush_remote_tlbs(kvm);
++}
 +
-+out_unlock:
-+	mutex_unlock(&mm->context.lock);
-+out:
-+	return error;
++static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
++{
++	mmu_page_remove_parent_pte(sp, parent_pte);
 +}
 +
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr,
-+			      unsigned long bytecount)
++static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
 +{
-+	int ret = -ENOSYS;
++	int i;
 +
-+	switch (func) {
-+	case 0:
-+		ret = read_ldt(ptr, bytecount);
-+		break;
-+	case 1:
-+		ret = write_ldt(ptr, bytecount, 1);
-+		break;
-+	case 2:
-+		ret = read_default_ldt(ptr, bytecount);
-+		break;
-+	case 0x11:
-+		ret = write_ldt(ptr, bytecount, 0);
-+		break;
-+	}
-+	return ret;
++	for (i = 0; i < KVM_MAX_VCPUS; ++i)
++		if (kvm->vcpus[i])
++			kvm->vcpus[i]->arch.last_pte_updated = NULL;
 +}
-diff --git a/arch/x86/kernel/ldt_32.c b/arch/x86/kernel/ldt_32.c
-deleted file mode 100644
-index 9ff90a2..0000000
---- a/arch/x86/kernel/ldt_32.c
-+++ /dev/null
-@@ -1,248 +0,0 @@
--/*
-- * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-- * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-- */
--
--#include <linux/errno.h>
--#include <linux/sched.h>
--#include <linux/string.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/vmalloc.h>
--#include <linux/slab.h>
--
--#include <asm/uaccess.h>
--#include <asm/system.h>
--#include <asm/ldt.h>
--#include <asm/desc.h>
--#include <asm/mmu_context.h>
--
--#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
--static void flush_ldt(void *null)
--{
--	if (current->active_mm)
--		load_LDT(&current->active_mm->context);
--}
--#endif
--
--static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
--{
--	void *oldldt;
--	void *newldt;
--	int oldsize;
--
--	if (mincount <= pc->size)
--		return 0;
--	oldsize = pc->size;
--	mincount = (mincount+511)&(~511);
--	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
--		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
--	else
--		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
--
--	if (!newldt)
--		return -ENOMEM;
--
--	if (oldsize)
--		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
--	oldldt = pc->ldt;
--	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
--	pc->ldt = newldt;
--	wmb();
--	pc->size = mincount;
--	wmb();
--
--	if (reload) {
--#ifdef CONFIG_SMP
--		cpumask_t mask;
--		preempt_disable();
--		load_LDT(pc);
--		mask = cpumask_of_cpu(smp_processor_id());
--		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
--			smp_call_function(flush_ldt, NULL, 1, 1);
--		preempt_enable();
--#else
--		load_LDT(pc);
--#endif
--	}
--	if (oldsize) {
--		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
--			vfree(oldldt);
--		else
--			kfree(oldldt);
--	}
--	return 0;
--}
--
--static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
--{
--	int err = alloc_ldt(new, old->size, 0);
--	if (err < 0)
--		return err;
--	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
--	return 0;
--}
--
--/*
-- * we do not have to muck with descriptors here, that is
-- * done in switch_mm() as needed.
-- */
--int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
--{
--	struct mm_struct * old_mm;
--	int retval = 0;
--
--	mutex_init(&mm->context.lock);
--	mm->context.size = 0;
--	old_mm = current->mm;
--	if (old_mm && old_mm->context.size > 0) {
--		mutex_lock(&old_mm->context.lock);
--		retval = copy_ldt(&mm->context, &old_mm->context);
--		mutex_unlock(&old_mm->context.lock);
--	}
--	return retval;
--}
--
--/*
-- * No need to lock the MM as we are the last user
-- */
--void destroy_context(struct mm_struct *mm)
--{
--	if (mm->context.size) {
--		if (mm == current->active_mm)
--			clear_LDT();
--		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
--			vfree(mm->context.ldt);
--		else
--			kfree(mm->context.ldt);
--		mm->context.size = 0;
--	}
--}
--
--static int read_ldt(void __user * ptr, unsigned long bytecount)
--{
--	int err;
--	unsigned long size;
--	struct mm_struct * mm = current->mm;
--
--	if (!mm->context.size)
--		return 0;
--	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
--		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
--
--	mutex_lock(&mm->context.lock);
--	size = mm->context.size*LDT_ENTRY_SIZE;
--	if (size > bytecount)
--		size = bytecount;
--
--	err = 0;
--	if (copy_to_user(ptr, mm->context.ldt, size))
--		err = -EFAULT;
--	mutex_unlock(&mm->context.lock);
--	if (err < 0)
--		goto error_return;
--	if (size != bytecount) {
--		/* zero-fill the rest */
--		if (clear_user(ptr+size, bytecount-size) != 0) {
--			err = -EFAULT;
--			goto error_return;
--		}
--	}
--	return bytecount;
--error_return:
--	return err;
--}
--
--static int read_default_ldt(void __user * ptr, unsigned long bytecount)
--{
--	int err;
--	unsigned long size;
--
--	err = 0;
--	size = 5*sizeof(struct desc_struct);
--	if (size > bytecount)
--		size = bytecount;
--
--	err = size;
--	if (clear_user(ptr, size))
--		err = -EFAULT;
--
--	return err;
--}
--
--static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
--{
--	struct mm_struct * mm = current->mm;
--	__u32 entry_1, entry_2;
--	int error;
--	struct user_desc ldt_info;
--
--	error = -EINVAL;
--	if (bytecount != sizeof(ldt_info))
--		goto out;
--	error = -EFAULT; 	
--	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
--		goto out;
--
--	error = -EINVAL;
--	if (ldt_info.entry_number >= LDT_ENTRIES)
--		goto out;
--	if (ldt_info.contents == 3) {
--		if (oldmode)
--			goto out;
--		if (ldt_info.seg_not_present == 0)
--			goto out;
--	}
--
--	mutex_lock(&mm->context.lock);
--	if (ldt_info.entry_number >= mm->context.size) {
--		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
--		if (error < 0)
--			goto out_unlock;
--	}
--
--   	/* Allow LDTs to be cleared by the user. */
--   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
--		if (oldmode || LDT_empty(&ldt_info)) {
--			entry_1 = 0;
--			entry_2 = 0;
--			goto install;
--		}
--	}
--
--	entry_1 = LDT_entry_a(&ldt_info);
--	entry_2 = LDT_entry_b(&ldt_info);
--	if (oldmode)
--		entry_2 &= ~(1 << 20);
--
--	/* Install the new entry ...  */
--install:
--	write_ldt_entry(mm->context.ldt, ldt_info.entry_number, entry_1, entry_2);
--	error = 0;
--
--out_unlock:
--	mutex_unlock(&mm->context.lock);
--out:
--	return error;
--}
--
--asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
--{
--	int ret = -ENOSYS;
--
--	switch (func) {
--	case 0:
--		ret = read_ldt(ptr, bytecount);
--		break;
--	case 1:
--		ret = write_ldt(ptr, bytecount, 1);
--		break;
--	case 2:
--		ret = read_default_ldt(ptr, bytecount);
--		break;
--	case 0x11:
--		ret = write_ldt(ptr, bytecount, 0);
--		break;
--	}
--	return ret;
--}
-diff --git a/arch/x86/kernel/ldt_64.c b/arch/x86/kernel/ldt_64.c
-deleted file mode 100644
-index 60e57ab..0000000
---- a/arch/x86/kernel/ldt_64.c
-+++ /dev/null
-@@ -1,250 +0,0 @@
--/*
-- * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-- * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-- * Copyright (C) 2002 Andi Kleen
-- * 
-- * This handles calls from both 32bit and 64bit mode.
-- */
--
--#include <linux/errno.h>
--#include <linux/sched.h>
--#include <linux/string.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/vmalloc.h>
--#include <linux/slab.h>
--
--#include <asm/uaccess.h>
--#include <asm/system.h>
--#include <asm/ldt.h>
--#include <asm/desc.h>
--#include <asm/proto.h>
--
--#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
--static void flush_ldt(void *null)
--{
--	if (current->active_mm)
--               load_LDT(&current->active_mm->context);
--}
--#endif
--
--static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
--{
--	void *oldldt;
--	void *newldt;
--	unsigned oldsize;
--
--	if (mincount <= (unsigned)pc->size)
--		return 0;
--	oldsize = pc->size;
--	mincount = (mincount+511)&(~511);
--	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
--		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
--	else
--		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
--
--	if (!newldt)
--		return -ENOMEM;
--
--	if (oldsize)
--		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
--	oldldt = pc->ldt;
--	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
--	wmb();
--	pc->ldt = newldt;
--	wmb();
--	pc->size = mincount;
--	wmb();
--	if (reload) {
--#ifdef CONFIG_SMP
--		cpumask_t mask;
--
--		preempt_disable();
--		mask = cpumask_of_cpu(smp_processor_id());
--		load_LDT(pc);
--		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
--			smp_call_function(flush_ldt, NULL, 1, 1);
--		preempt_enable();
--#else
--		load_LDT(pc);
--#endif
--	}
--	if (oldsize) {
--		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
--			vfree(oldldt);
--		else
--			kfree(oldldt);
--	}
--	return 0;
--}
--
--static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
--{
--	int err = alloc_ldt(new, old->size, 0);
--	if (err < 0)
--		return err;
--	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
--	return 0;
--}
--
--/*
-- * we do not have to muck with descriptors here, that is
-- * done in switch_mm() as needed.
-- */
--int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
--{
--	struct mm_struct * old_mm;
--	int retval = 0;
--
--	mutex_init(&mm->context.lock);
--	mm->context.size = 0;
--	old_mm = current->mm;
--	if (old_mm && old_mm->context.size > 0) {
--		mutex_lock(&old_mm->context.lock);
--		retval = copy_ldt(&mm->context, &old_mm->context);
--		mutex_unlock(&old_mm->context.lock);
--	}
--	return retval;
--}
--
--/*
-- * 
-- * Don't touch the LDT register - we're already in the next thread.
-- */
--void destroy_context(struct mm_struct *mm)
--{
--	if (mm->context.size) {
--		if ((unsigned)mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
--			vfree(mm->context.ldt);
--		else
--			kfree(mm->context.ldt);
--		mm->context.size = 0;
--	}
--}
--
--static int read_ldt(void __user * ptr, unsigned long bytecount)
--{
--	int err;
--	unsigned long size;
--	struct mm_struct * mm = current->mm;
--
--	if (!mm->context.size)
--		return 0;
--	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
--		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
--
--	mutex_lock(&mm->context.lock);
--	size = mm->context.size*LDT_ENTRY_SIZE;
--	if (size > bytecount)
--		size = bytecount;
--
--	err = 0;
--	if (copy_to_user(ptr, mm->context.ldt, size))
--		err = -EFAULT;
--	mutex_unlock(&mm->context.lock);
--	if (err < 0)
--		goto error_return;
--	if (size != bytecount) {
--		/* zero-fill the rest */
--		if (clear_user(ptr+size, bytecount-size) != 0) {
--			err = -EFAULT;
--			goto error_return;
--		}
--	}
--	return bytecount;
--error_return:
--	return err;
--}
--
--static int read_default_ldt(void __user * ptr, unsigned long bytecount)
--{
--	/* Arbitrary number */ 
--	/* x86-64 default LDT is all zeros */
--	if (bytecount > 128) 
--		bytecount = 128; 	
--	if (clear_user(ptr, bytecount))
--		return -EFAULT;
--	return bytecount; 
--}
--
--static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
--{
--	struct task_struct *me = current;
--	struct mm_struct * mm = me->mm;
--	__u32 entry_1, entry_2, *lp;
--	int error;
--	struct user_desc ldt_info;
--
--	error = -EINVAL;
--
--	if (bytecount != sizeof(ldt_info))
--		goto out;
--	error = -EFAULT; 	
--	if (copy_from_user(&ldt_info, ptr, bytecount))
--		goto out;
--
--	error = -EINVAL;
--	if (ldt_info.entry_number >= LDT_ENTRIES)
--		goto out;
--	if (ldt_info.contents == 3) {
--		if (oldmode)
--			goto out;
--		if (ldt_info.seg_not_present == 0)
--			goto out;
--	}
--
--	mutex_lock(&mm->context.lock);
--	if (ldt_info.entry_number >= (unsigned)mm->context.size) {
--		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
--		if (error < 0)
--			goto out_unlock;
--	}
--
--	lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
--
--   	/* Allow LDTs to be cleared by the user. */
--   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
--		if (oldmode || LDT_empty(&ldt_info)) {
--			entry_1 = 0;
--			entry_2 = 0;
--			goto install;
--		}
--	}
--
--	entry_1 = LDT_entry_a(&ldt_info);
--	entry_2 = LDT_entry_b(&ldt_info);
--	if (oldmode)
--		entry_2 &= ~(1 << 20);
--
--	/* Install the new entry ...  */
--install:
--	*lp	= entry_1;
--	*(lp+1)	= entry_2;
--	error = 0;
--
--out_unlock:
--	mutex_unlock(&mm->context.lock);
--out:
--	return error;
--}
--
--asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
--{
--	int ret = -ENOSYS;
--
--	switch (func) {
--	case 0:
--		ret = read_ldt(ptr, bytecount);
--		break;
--	case 1:
--		ret = write_ldt(ptr, bytecount, 1);
--		break;
--	case 2:
--		ret = read_default_ldt(ptr, bytecount);
--		break;
--	case 0x11:
--		ret = write_ldt(ptr, bytecount, 0);
--		break;
--	}
--	return ret;
--}
-diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
-index 11b935f..c1cfd60 100644
---- a/arch/x86/kernel/machine_kexec_32.c
-+++ b/arch/x86/kernel/machine_kexec_32.c
-@@ -32,7 +32,7 @@ static u32 kexec_pte1[1024] PAGE_ALIGNED;
- 
- static void set_idt(void *newidt, __u16 limit)
- {
--	struct Xgt_desc_struct curidt;
-+	struct desc_ptr curidt;
- 
- 	/* ia32 supports unaliged loads & stores */
- 	curidt.size    = limit;
-@@ -44,7 +44,7 @@ static void set_idt(void *newidt, __u16 limit)
- 
- static void set_gdt(void *newgdt, __u16 limit)
- {
--	struct Xgt_desc_struct curgdt;
-+	struct desc_ptr curgdt;
- 
- 	/* ia32 supports unaligned loads & stores */
- 	curgdt.size    = limit;
-diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
-index aa3d2c8..a1fef42 100644
---- a/arch/x86/kernel/machine_kexec_64.c
-+++ b/arch/x86/kernel/machine_kexec_64.c
-@@ -234,10 +234,5 @@ NORET_TYPE void machine_kexec(struct kimage *image)
- void arch_crash_save_vmcoreinfo(void)
- {
- 	VMCOREINFO_SYMBOL(init_level4_pgt);
--
--#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
--	VMCOREINFO_SYMBOL(node_data);
--	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
--#endif
- }
- 
-diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
-index 3960ab7..219f86e 100644
---- a/arch/x86/kernel/mfgpt_32.c
-+++ b/arch/x86/kernel/mfgpt_32.c
-@@ -63,6 +63,21 @@ static int __init mfgpt_disable(char *s)
- }
- __setup("nomfgpt", mfgpt_disable);
- 
-+/* Reset the MFGPT timers. This is required by some broken BIOSes which already
-+ * do the same and leave the system in an unstable state. TinyBIOS 0.98 is
-+ * affected at least (0.99 is OK with MFGPT workaround left to off).
-+ */
-+static int __init mfgpt_fix(char *s)
++
++static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 +{
-+	u32 val, dummy;
++	u64 *parent_pte;
 +
-+	/* The following udocumented bit resets the MFGPT timers */
-+	val = 0xFF; dummy = 0;
-+	wrmsr(0x5140002B, val, dummy);
-+	return 1;
++	++kvm->stat.mmu_shadow_zapped;
++	while (sp->multimapped || sp->parent_pte) {
++		if (!sp->multimapped)
++			parent_pte = sp->parent_pte;
++		else {
++			struct kvm_pte_chain *chain;
++
++			chain = container_of(sp->parent_ptes.first,
++					     struct kvm_pte_chain, link);
++			parent_pte = chain->parent_ptes[0];
++		}
++		BUG_ON(!parent_pte);
++		kvm_mmu_put_page(sp, parent_pte);
++		set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
++	}
++	kvm_mmu_page_unlink_children(kvm, sp);
++	if (!sp->root_count) {
++		hlist_del(&sp->hash_link);
++		kvm_mmu_free_page(kvm, sp);
++	} else
++		list_move(&sp->link, &kvm->arch.active_mmu_pages);
++	kvm_mmu_reset_last_pte_updated(kvm);
 +}
-+__setup("mfgptfix", mfgpt_fix);
 +
- /*
-  * Check whether any MFGPTs are available for the kernel to use.  In most
-  * cases, firmware that uses AMD's VSA code will claim all timers during
-diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
-index 09c3152..6ff447f 100644
---- a/arch/x86/kernel/microcode.c
-+++ b/arch/x86/kernel/microcode.c
-@@ -244,8 +244,8 @@ static int microcode_sanity_check(void *mc)
- 		return 0;
- 	/* check extended signature checksum */
- 	for (i = 0; i < ext_sigcount; i++) {
--		ext_sig = (struct extended_signature *)((void *)ext_header
--			+ EXT_HEADER_SIZE + EXT_SIGNATURE_SIZE * i);
-+		ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
-+			  EXT_SIGNATURE_SIZE * i;
- 		sum = orig_sum
- 			- (mc_header->sig + mc_header->pf + mc_header->cksum)
- 			+ (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
-@@ -279,11 +279,9 @@ static int get_maching_microcode(void *mc, int cpu)
- 	if (total_size <= get_datasize(mc_header) + MC_HEADER_SIZE)
- 		return 0;
- 
--	ext_header = (struct extended_sigtable *)(mc +
--			get_datasize(mc_header) + MC_HEADER_SIZE);
-+	ext_header = mc + get_datasize(mc_header) + MC_HEADER_SIZE;
- 	ext_sigcount = ext_header->count;
--	ext_sig = (struct extended_signature *)((void *)ext_header
--			+ EXT_HEADER_SIZE);
-+	ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
- 	for (i = 0; i < ext_sigcount; i++) {
- 		if (microcode_update_match(cpu, mc_header,
- 				ext_sig->sig, ext_sig->pf))
-@@ -436,7 +434,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
- 		return -EINVAL;
- 	}
- 
--	lock_cpu_hotplug();
-+	get_online_cpus();
- 	mutex_lock(&microcode_mutex);
- 
- 	user_buffer = (void __user *) buf;
-@@ -447,7 +445,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
- 		ret = (ssize_t)len;
- 
- 	mutex_unlock(&microcode_mutex);
--	unlock_cpu_hotplug();
-+	put_online_cpus();
- 
- 	return ret;
- }
-@@ -539,7 +537,7 @@ static int cpu_request_microcode(int cpu)
- 		pr_debug("ucode data file %s load failed\n", name);
- 		return error;
- 	}
--	buf = (void *)firmware->data;
-+	buf = firmware->data;
- 	size = firmware->size;
- 	while ((offset = get_next_ucode_from_buffer(&mc, buf, size, offset))
- 			> 0) {
-@@ -658,14 +656,14 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
- 
- 		old = current->cpus_allowed;
- 
--		lock_cpu_hotplug();
-+		get_online_cpus();
- 		set_cpus_allowed(current, cpumask_of_cpu(cpu));
- 
- 		mutex_lock(&microcode_mutex);
- 		if (uci->valid)
- 			err = cpu_request_microcode(cpu);
- 		mutex_unlock(&microcode_mutex);
--		unlock_cpu_hotplug();
-+		put_online_cpus();
- 		set_cpus_allowed(current, old);
- 	}
- 	if (err)
-@@ -817,9 +815,9 @@ static int __init microcode_init (void)
- 		return PTR_ERR(microcode_pdev);
- 	}
- 
--	lock_cpu_hotplug();
-+	get_online_cpus();
- 	error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver);
--	unlock_cpu_hotplug();
-+	put_online_cpus();
- 	if (error) {
- 		microcode_dev_exit();
- 		platform_device_unregister(microcode_pdev);
-@@ -839,9 +837,9 @@ static void __exit microcode_exit (void)
- 
- 	unregister_hotcpu_notifier(&mc_cpu_notifier);
- 
--	lock_cpu_hotplug();
-+	get_online_cpus();
- 	sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
--	unlock_cpu_hotplug();
-+	put_online_cpus();
- 
- 	platform_device_unregister(microcode_pdev);
- }
-diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c
-index 7a05a7f..67009cd 100644
---- a/arch/x86/kernel/mpparse_32.c
-+++ b/arch/x86/kernel/mpparse_32.c
-@@ -68,7 +68,7 @@ unsigned int def_to_bigsmp = 0;
- /* Processor that is doing the boot up */
- unsigned int boot_cpu_physical_apicid = -1U;
- /* Internal processor count */
--unsigned int __cpuinitdata num_processors;
-+unsigned int num_processors;
- 
- /* Bitmask of physically existing CPUs */
- physid_mask_t phys_cpu_present_map;
-@@ -258,7 +258,7 @@ static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
- 	if (!(m->mpc_flags & MPC_APIC_USABLE))
- 		return;
- 
--	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
-+	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
- 		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
- 	if (nr_ioapics >= MAX_IO_APICS) {
- 		printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
-@@ -405,9 +405,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
- 
- 	mps_oem_check(mpc, oem, str);
- 
--	printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
-+	printk("APIC at: 0x%X\n", mpc->mpc_lapic);
- 
--	/* 
++/*
++ * Changing the number of mmu pages allocated to the vm
++ * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
++ */
++void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
++{
 +	/*
- 	 * Save the local APIC address (it might be non-default) -- but only
- 	 * if we're not using ACPI.
- 	 */
-@@ -721,7 +721,7 @@ static int __init smp_scan_config (unsigned long base, unsigned long length)
- 	unsigned long *bp = phys_to_virt(base);
- 	struct intel_mp_floating *mpf;
- 
--	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+	printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length);
- 	if (sizeof(*mpf) != 16)
- 		printk("Error: MPF size\n");
- 
-@@ -734,8 +734,8 @@ static int __init smp_scan_config (unsigned long base, unsigned long length)
- 				|| (mpf->mpf_specification == 4)) ) {
- 
- 			smp_found_config = 1;
--			printk(KERN_INFO "found SMP MP-table at %08lx\n",
--						virt_to_phys(mpf));
-+			printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
-+				mpf, virt_to_phys(mpf));
- 			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
- 			if (mpf->mpf_physptr) {
- 				/*
-@@ -918,14 +918,14 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
- 	 */
- 	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
- 	mp_ioapic_routing[idx].gsi_base = gsi_base;
--	mp_ioapic_routing[idx].gsi_end = gsi_base + 
-+	mp_ioapic_routing[idx].gsi_end = gsi_base +
- 		io_apic_get_redir_entries(idx);
- 
--	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
--		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
--		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
--		mp_ioapic_routing[idx].gsi_base,
--		mp_ioapic_routing[idx].gsi_end);
-+	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
-+	       "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
-+	       mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+	       mp_ioapic_routing[idx].gsi_base,
-+	       mp_ioapic_routing[idx].gsi_end);
- }
- 
- void __init
-@@ -1041,15 +1041,16 @@ void __init mp_config_acpi_legacy_irqs (void)
- }
- 
- #define MAX_GSI_NUM	4096
-+#define IRQ_COMPRESSION_START	64
- 
- int mp_register_gsi(u32 gsi, int triggering, int polarity)
- {
- 	int ioapic = -1;
- 	int ioapic_pin = 0;
- 	int idx, bit = 0;
--	static int pci_irq = 16;
-+	static int pci_irq = IRQ_COMPRESSION_START;
- 	/*
--	 * Mapping between Global System Interrups, which
-+	 * Mapping between Global System Interrupts, which
- 	 * represent all possible interrupts, and IRQs
- 	 * assigned to actual devices.
- 	 */
-@@ -1086,12 +1087,16 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
- 	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
- 		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
- 			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
--		return gsi_to_irq[gsi];
-+		return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
- 	}
- 
- 	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
- 
--	if (triggering == ACPI_LEVEL_SENSITIVE) {
++	 * If we set the number of mmu pages to be smaller be than the
++	 * number of actived pages , we must to free some mmu pages before we
++	 * change the value
++	 */
++
++	if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
++	    kvm_nr_mmu_pages) {
++		int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
++				       - kvm->arch.n_free_mmu_pages;
++
++		while (n_used_mmu_pages > kvm_nr_mmu_pages) {
++			struct kvm_mmu_page *page;
++
++			page = container_of(kvm->arch.active_mmu_pages.prev,
++					    struct kvm_mmu_page, link);
++			kvm_mmu_zap_page(kvm, page);
++			n_used_mmu_pages--;
++		}
++		kvm->arch.n_free_mmu_pages = 0;
++	}
++	else
++		kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
++					 - kvm->arch.n_alloc_mmu_pages;
++
++	kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
++}
++
++static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
++{
++	unsigned index;
++	struct hlist_head *bucket;
++	struct kvm_mmu_page *sp;
++	struct hlist_node *node, *n;
++	int r;
++
++	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
++	r = 0;
++	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
++	bucket = &kvm->arch.mmu_page_hash[index];
++	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
++		if (sp->gfn == gfn && !sp->role.metaphysical) {
++			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
++				 sp->role.word);
++			kvm_mmu_zap_page(kvm, sp);
++			r = 1;
++		}
++	return r;
++}
++
++static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
++{
++	struct kvm_mmu_page *sp;
++
++	while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
++		pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
++		kvm_mmu_zap_page(kvm, sp);
++	}
++}
++
++static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
++{
++	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
++	struct kvm_mmu_page *sp = page_header(__pa(pte));
++
++	__set_bit(slot, &sp->slot_bitmap);
++}
++
++struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
++{
++	gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
++
++	if (gpa == UNMAPPED_GVA)
++		return NULL;
++	return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
++}
++
++static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
++			 unsigned pt_access, unsigned pte_access,
++			 int user_fault, int write_fault, int dirty,
++			 int *ptwrite, gfn_t gfn, struct page *page)
++{
++	u64 spte;
++	int was_rmapped = is_rmap_pte(*shadow_pte);
++	int was_writeble = is_writeble_pte(*shadow_pte);
++
++	pgprintk("%s: spte %llx access %x write_fault %d"
++		 " user_fault %d gfn %lx\n",
++		 __FUNCTION__, *shadow_pte, pt_access,
++		 write_fault, user_fault, gfn);
++
 +	/*
-+	 * For GSI >= 64, use IRQ compression
++	 * We don't set the accessed bit, since we sometimes want to see
++	 * whether the guest actually used the pte (in order to detect
++	 * demand paging).
 +	 */
-+	if ((gsi >= IRQ_COMPRESSION_START)
-+		&& (triggering == ACPI_LEVEL_SENSITIVE)) {
- 		/*
- 		 * For PCI devices assign IRQs in order, avoiding gaps
- 		 * due to unused I/O APIC pins.
-diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c
-index ef4aab1..72ab140 100644
---- a/arch/x86/kernel/mpparse_64.c
-+++ b/arch/x86/kernel/mpparse_64.c
-@@ -60,14 +60,18 @@ unsigned int boot_cpu_id = -1U;
- EXPORT_SYMBOL(boot_cpu_id);
- 
- /* Internal processor count */
--unsigned int num_processors __cpuinitdata = 0;
-+unsigned int num_processors;
- 
- unsigned disabled_cpus __cpuinitdata;
- 
- /* Bitmask of physically existing CPUs */
- physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
- 
--u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
-+				= { [0 ... NR_CPUS-1] = BAD_APICID };
-+void *x86_bios_cpu_apicid_early_ptr;
-+DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
-+EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
- 
- 
- /*
-@@ -118,24 +122,22 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
- 	physid_set(m->mpc_apicid, phys_cpu_present_map);
-  	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-  		/*
-- 		 * bios_cpu_apicid is required to have processors listed
-+		 * x86_bios_cpu_apicid is required to have processors listed
-  		 * in same order as logical cpu numbers. Hence the first
-  		 * entry is BSP, and so on.
-  		 */
- 		cpu = 0;
-  	}
--	bios_cpu_apicid[cpu] = m->mpc_apicid;
--	/*
--	 * We get called early in the the start_kernel initialization
--	 * process when the per_cpu data area is not yet setup, so we
--	 * use a static array that is removed after the per_cpu data
--	 * area is created.
--	 */
--	if (x86_cpu_to_apicid_ptr) {
--		u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr;
--		x86_cpu_to_apicid[cpu] = m->mpc_apicid;
-+	/* are we being called early in kernel startup? */
-+	if (x86_cpu_to_apicid_early_ptr) {
-+		u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
-+		u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
++	spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
++	if (!dirty)
++		pte_access &= ~ACC_WRITE_MASK;
++	if (!(pte_access & ACC_EXEC_MASK))
++		spte |= PT64_NX_MASK;
 +
-+		cpu_to_apicid[cpu] = m->mpc_apicid;
-+		bios_cpu_apicid[cpu] = m->mpc_apicid;
- 	} else {
- 		per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
-+		per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
- 	}
- 
- 	cpu_set(cpu, cpu_possible_map);
-diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
-index ee6eba4..21f6e3c 100644
---- a/arch/x86/kernel/msr.c
-+++ b/arch/x86/kernel/msr.c
-@@ -155,15 +155,15 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
- 
- 	switch (action) {
- 	case CPU_UP_PREPARE:
--	case CPU_UP_PREPARE_FROZEN:
- 		err = msr_device_create(cpu);
- 		break;
- 	case CPU_UP_CANCELED:
--	case CPU_UP_CANCELED_FROZEN:
- 	case CPU_DEAD:
--	case CPU_DEAD_FROZEN:
- 		msr_device_destroy(cpu);
- 		break;
-+	case CPU_UP_CANCELED_FROZEN:
-+		destroy_suspended_device(msr_class, MKDEV(MSR_MAJOR, cpu));
-+		break;
- 	}
- 	return err ? NOTIFY_BAD : NOTIFY_OK;
- }
-diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
-index 852db29..edd4136 100644
---- a/arch/x86/kernel/nmi_32.c
-+++ b/arch/x86/kernel/nmi_32.c
-@@ -51,13 +51,13 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
- 
- static int endflag __initdata = 0;
- 
-+#ifdef CONFIG_SMP
- /* The performance counters used by NMI_LOCAL_APIC don't trigger when
-  * the CPU is idle. To make sure the NMI watchdog really ticks on all
-  * CPUs during the test make them busy.
-  */
- static __init void nmi_cpu_busy(void *data)
- {
--#ifdef CONFIG_SMP
- 	local_irq_enable_in_hardirq();
- 	/* Intentionally don't use cpu_relax here. This is
- 	   to make sure that the performance counter really ticks,
-@@ -67,8 +67,8 @@ static __init void nmi_cpu_busy(void *data)
- 	   care if they get somewhat less cycles. */
- 	while (endflag == 0)
- 		mb();
--#endif
- }
++	spte |= PT_PRESENT_MASK;
++	if (pte_access & ACC_USER_MASK)
++		spte |= PT_USER_MASK;
++
++	if (is_error_page(page)) {
++		set_shadow_pte(shadow_pte,
++			       shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
++		kvm_release_page_clean(page);
++		return;
++	}
++
++	spte |= page_to_phys(page);
++
++	if ((pte_access & ACC_WRITE_MASK)
++	    || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
++		struct kvm_mmu_page *shadow;
++
++		spte |= PT_WRITABLE_MASK;
++		if (user_fault) {
++			mmu_unshadow(vcpu->kvm, gfn);
++			goto unshadowed;
++		}
++
++		shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
++		if (shadow) {
++			pgprintk("%s: found shadow page for %lx, marking ro\n",
++				 __FUNCTION__, gfn);
++			pte_access &= ~ACC_WRITE_MASK;
++			if (is_writeble_pte(spte)) {
++				spte &= ~PT_WRITABLE_MASK;
++				kvm_x86_ops->tlb_flush(vcpu);
++			}
++			if (write_fault)
++				*ptwrite = 1;
++		}
++	}
++
++unshadowed:
++
++	if (pte_access & ACC_WRITE_MASK)
++		mark_page_dirty(vcpu->kvm, gfn);
++
++	pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
++	set_shadow_pte(shadow_pte, spte);
++	page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
++	if (!was_rmapped) {
++		rmap_add(vcpu, shadow_pte, gfn);
++		if (!is_rmap_pte(*shadow_pte))
++			kvm_release_page_clean(page);
++	} else {
++		if (was_writeble)
++			kvm_release_page_dirty(page);
++		else
++			kvm_release_page_clean(page);
++	}
++	if (!ptwrite || !*ptwrite)
++		vcpu->arch.last_pte_updated = shadow_pte;
++}
++
++static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
++{
++}
++
++static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
++			   gfn_t gfn, struct page *page)
++{
++	int level = PT32E_ROOT_LEVEL;
++	hpa_t table_addr = vcpu->arch.mmu.root_hpa;
++	int pt_write = 0;
++
++	for (; ; level--) {
++		u32 index = PT64_INDEX(v, level);
++		u64 *table;
++
++		ASSERT(VALID_PAGE(table_addr));
++		table = __va(table_addr);
++
++		if (level == 1) {
++			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
++				     0, write, 1, &pt_write, gfn, page);
++			return pt_write || is_io_pte(table[index]);
++		}
++
++		if (table[index] == shadow_trap_nonpresent_pte) {
++			struct kvm_mmu_page *new_table;
++			gfn_t pseudo_gfn;
++
++			pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
++				>> PAGE_SHIFT;
++			new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
++						     v, level - 1,
++						     1, ACC_ALL, &table[index],
++						     NULL);
++			if (!new_table) {
++				pgprintk("nonpaging_map: ENOMEM\n");
++				kvm_release_page_clean(page);
++				return -ENOMEM;
++			}
++
++			table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
++				| PT_WRITABLE_MASK | PT_USER_MASK;
++		}
++		table_addr = table[index] & PT64_BASE_ADDR_MASK;
++	}
++}
++
++static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
++{
++	int r;
++
++	struct page *page;
++
++	down_read(&current->mm->mmap_sem);
++	page = gfn_to_page(vcpu->kvm, gfn);
++
++	spin_lock(&vcpu->kvm->mmu_lock);
++	kvm_mmu_free_some_pages(vcpu);
++	r = __nonpaging_map(vcpu, v, write, gfn, page);
++	spin_unlock(&vcpu->kvm->mmu_lock);
++
++	up_read(&current->mm->mmap_sem);
++
++	return r;
++}
++
++
++static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
++				    struct kvm_mmu_page *sp)
++{
++	int i;
++
++	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
++		sp->spt[i] = shadow_trap_nonpresent_pte;
++}
++
++static void mmu_free_roots(struct kvm_vcpu *vcpu)
++{
++	int i;
++	struct kvm_mmu_page *sp;
++
++	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
++		return;
++	spin_lock(&vcpu->kvm->mmu_lock);
++#ifdef CONFIG_X86_64
++	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
++		hpa_t root = vcpu->arch.mmu.root_hpa;
++
++		sp = page_header(root);
++		--sp->root_count;
++		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
++		spin_unlock(&vcpu->kvm->mmu_lock);
++		return;
++	}
 +#endif
- 
- static int __init check_nmi_watchdog(void)
- {
-@@ -87,11 +87,13 @@ static int __init check_nmi_watchdog(void)
- 
- 	printk(KERN_INFO "Testing NMI watchdog ... ");
- 
-+#ifdef CONFIG_SMP
- 	if (nmi_watchdog == NMI_LOCAL_APIC)
- 		smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
++	for (i = 0; i < 4; ++i) {
++		hpa_t root = vcpu->arch.mmu.pae_root[i];
++
++		if (root) {
++			root &= PT64_BASE_ADDR_MASK;
++			sp = page_header(root);
++			--sp->root_count;
++		}
++		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
++	}
++	spin_unlock(&vcpu->kvm->mmu_lock);
++	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
++}
++
++static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
++{
++	int i;
++	gfn_t root_gfn;
++	struct kvm_mmu_page *sp;
++
++	root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
++
++#ifdef CONFIG_X86_64
++	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
++		hpa_t root = vcpu->arch.mmu.root_hpa;
++
++		ASSERT(!VALID_PAGE(root));
++		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
++				      PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
++		root = __pa(sp->spt);
++		++sp->root_count;
++		vcpu->arch.mmu.root_hpa = root;
++		return;
++	}
 +#endif
- 
- 	for_each_possible_cpu(cpu)
--		prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
-+		prev_nmi_count[cpu] = nmi_count(cpu);
- 	local_irq_enable();
- 	mdelay((20*1000)/nmi_hz); // wait 20 ticks
- 
-@@ -176,7 +178,7 @@ static int lapic_nmi_resume(struct sys_device *dev)
- 
- 
- static struct sysdev_class nmi_sysclass = {
--	set_kset_name("lapic_nmi"),
-+	.name		= "lapic_nmi",
- 	.resume		= lapic_nmi_resume,
- 	.suspend	= lapic_nmi_suspend,
- };
-@@ -237,10 +239,10 @@ void acpi_nmi_disable(void)
- 		on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
- }
- 
--void setup_apic_nmi_watchdog (void *unused)
-+void setup_apic_nmi_watchdog(void *unused)
- {
- 	if (__get_cpu_var(wd_enabled))
-- 		return;
++	for (i = 0; i < 4; ++i) {
++		hpa_t root = vcpu->arch.mmu.pae_root[i];
++
++		ASSERT(!VALID_PAGE(root));
++		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
++			if (!is_present_pte(vcpu->arch.pdptrs[i])) {
++				vcpu->arch.mmu.pae_root[i] = 0;
++				continue;
++			}
++			root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
++		} else if (vcpu->arch.mmu.root_level == 0)
++			root_gfn = 0;
++		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
++				      PT32_ROOT_LEVEL, !is_paging(vcpu),
++				      ACC_ALL, NULL, NULL);
++		root = __pa(sp->spt);
++		++sp->root_count;
++		vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
++	}
++	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
++}
++
++static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
++{
++	return vaddr;
++}
++
++static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
++				u32 error_code)
++{
++	gfn_t gfn;
++	int r;
++
++	pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
++	r = mmu_topup_memory_caches(vcpu);
++	if (r)
++		return r;
++
++	ASSERT(vcpu);
++	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
++
++	gfn = gva >> PAGE_SHIFT;
++
++	return nonpaging_map(vcpu, gva & PAGE_MASK,
++			     error_code & PFERR_WRITE_MASK, gfn);
++}
++
++static void nonpaging_free(struct kvm_vcpu *vcpu)
++{
++	mmu_free_roots(vcpu);
++}
++
++static int nonpaging_init_context(struct kvm_vcpu *vcpu)
++{
++	struct kvm_mmu *context = &vcpu->arch.mmu;
++
++	context->new_cr3 = nonpaging_new_cr3;
++	context->page_fault = nonpaging_page_fault;
++	context->gva_to_gpa = nonpaging_gva_to_gpa;
++	context->free = nonpaging_free;
++	context->prefetch_page = nonpaging_prefetch_page;
++	context->root_level = 0;
++	context->shadow_root_level = PT32E_ROOT_LEVEL;
++	context->root_hpa = INVALID_PAGE;
++	return 0;
++}
++
++void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
++{
++	++vcpu->stat.tlb_flush;
++	kvm_x86_ops->tlb_flush(vcpu);
++}
++
++static void paging_new_cr3(struct kvm_vcpu *vcpu)
++{
++	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
++	mmu_free_roots(vcpu);
++}
++
++static void inject_page_fault(struct kvm_vcpu *vcpu,
++			      u64 addr,
++			      u32 err_code)
++{
++	kvm_inject_page_fault(vcpu, addr, err_code);
++}
++
++static void paging_free(struct kvm_vcpu *vcpu)
++{
++	nonpaging_free(vcpu);
++}
++
++#define PTTYPE 64
++#include "paging_tmpl.h"
++#undef PTTYPE
++
++#define PTTYPE 32
++#include "paging_tmpl.h"
++#undef PTTYPE
++
++static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
++{
++	struct kvm_mmu *context = &vcpu->arch.mmu;
++
++	ASSERT(is_pae(vcpu));
++	context->new_cr3 = paging_new_cr3;
++	context->page_fault = paging64_page_fault;
++	context->gva_to_gpa = paging64_gva_to_gpa;
++	context->prefetch_page = paging64_prefetch_page;
++	context->free = paging_free;
++	context->root_level = level;
++	context->shadow_root_level = level;
++	context->root_hpa = INVALID_PAGE;
++	return 0;
++}
++
++static int paging64_init_context(struct kvm_vcpu *vcpu)
++{
++	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
++}
++
++static int paging32_init_context(struct kvm_vcpu *vcpu)
++{
++	struct kvm_mmu *context = &vcpu->arch.mmu;
++
++	context->new_cr3 = paging_new_cr3;
++	context->page_fault = paging32_page_fault;
++	context->gva_to_gpa = paging32_gva_to_gpa;
++	context->free = paging_free;
++	context->prefetch_page = paging32_prefetch_page;
++	context->root_level = PT32_ROOT_LEVEL;
++	context->shadow_root_level = PT32E_ROOT_LEVEL;
++	context->root_hpa = INVALID_PAGE;
++	return 0;
++}
++
++static int paging32E_init_context(struct kvm_vcpu *vcpu)
++{
++	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
++}
++
++static int init_kvm_mmu(struct kvm_vcpu *vcpu)
++{
++	ASSERT(vcpu);
++	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
++
++	if (!is_paging(vcpu))
++		return nonpaging_init_context(vcpu);
++	else if (is_long_mode(vcpu))
++		return paging64_init_context(vcpu);
++	else if (is_pae(vcpu))
++		return paging32E_init_context(vcpu);
++	else
++		return paging32_init_context(vcpu);
++}
++
++static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
++{
++	ASSERT(vcpu);
++	if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
++		vcpu->arch.mmu.free(vcpu);
++		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
++	}
++}
++
++int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
++{
++	destroy_kvm_mmu(vcpu);
++	return init_kvm_mmu(vcpu);
++}
++EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
++
++int kvm_mmu_load(struct kvm_vcpu *vcpu)
++{
++	int r;
++
++	r = mmu_topup_memory_caches(vcpu);
++	if (r)
++		goto out;
++	spin_lock(&vcpu->kvm->mmu_lock);
++	kvm_mmu_free_some_pages(vcpu);
++	mmu_alloc_roots(vcpu);
++	spin_unlock(&vcpu->kvm->mmu_lock);
++	kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
++	kvm_mmu_flush_tlb(vcpu);
++out:
++	return r;
++}
++EXPORT_SYMBOL_GPL(kvm_mmu_load);
++
++void kvm_mmu_unload(struct kvm_vcpu *vcpu)
++{
++	mmu_free_roots(vcpu);
++}
++
++static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
++				  struct kvm_mmu_page *sp,
++				  u64 *spte)
++{
++	u64 pte;
++	struct kvm_mmu_page *child;
++
++	pte = *spte;
++	if (is_shadow_present_pte(pte)) {
++		if (sp->role.level == PT_PAGE_TABLE_LEVEL)
++			rmap_remove(vcpu->kvm, spte);
++		else {
++			child = page_header(pte & PT64_BASE_ADDR_MASK);
++			mmu_page_remove_parent_pte(child, spte);
++		}
++	}
++	set_shadow_pte(spte, shadow_trap_nonpresent_pte);
++}
++
++static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
++				  struct kvm_mmu_page *sp,
++				  u64 *spte,
++				  const void *new, int bytes,
++				  int offset_in_pte)
++{
++	if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++		++vcpu->kvm->stat.mmu_pde_zapped;
 +		return;
- 
- 	/* cheap hack to support suspend/resume */
- 	/* if cpu0 is not active neither should the other cpus */
-@@ -329,7 +331,7 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
- 	unsigned int sum;
- 	int touched = 0;
- 	int cpu = smp_processor_id();
--	int rc=0;
-+	int rc = 0;
- 
- 	/* check for other users first */
- 	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
-diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
-index 4253c4e..fb99484 100644
---- a/arch/x86/kernel/nmi_64.c
-+++ b/arch/x86/kernel/nmi_64.c
-@@ -39,7 +39,7 @@ static cpumask_t backtrace_mask = CPU_MASK_NONE;
-  *  0: the lapic NMI watchdog is disabled, but can be enabled
-  */
- atomic_t nmi_active = ATOMIC_INIT(0);		/* oprofile uses this */
--int panic_on_timeout;
-+static int panic_on_timeout;
- 
- unsigned int nmi_watchdog = NMI_DEFAULT;
- static unsigned int nmi_hz = HZ;
-@@ -78,22 +78,22 @@ static __init void nmi_cpu_busy(void *data)
- }
- #endif
- 
--int __init check_nmi_watchdog (void)
-+int __init check_nmi_watchdog(void)
- {
--	int *counts;
-+	int *prev_nmi_count;
- 	int cpu;
- 
--	if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) 
-+	if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED))
- 		return 0;
- 
- 	if (!atomic_read(&nmi_active))
- 		return 0;
- 
--	counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
--	if (!counts)
-+	prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
-+	if (!prev_nmi_count)
- 		return -1;
- 
--	printk(KERN_INFO "testing NMI watchdog ... ");
-+	printk(KERN_INFO "Testing NMI watchdog ... ");
- 
- #ifdef CONFIG_SMP
- 	if (nmi_watchdog == NMI_LOCAL_APIC)
-@@ -101,30 +101,29 @@ int __init check_nmi_watchdog (void)
- #endif
- 
- 	for (cpu = 0; cpu < NR_CPUS; cpu++)
--		counts[cpu] = cpu_pda(cpu)->__nmi_count;
-+		prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count;
- 	local_irq_enable();
- 	mdelay((20*1000)/nmi_hz); // wait 20 ticks
- 
- 	for_each_online_cpu(cpu) {
- 		if (!per_cpu(wd_enabled, cpu))
- 			continue;
--		if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
-+		if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) {
- 			printk(KERN_WARNING "WARNING: CPU#%d: NMI "
- 			       "appears to be stuck (%d->%d)!\n",
--			       cpu,
--			       counts[cpu],
--			       cpu_pda(cpu)->__nmi_count);
-+				cpu,
-+				prev_nmi_count[cpu],
-+				cpu_pda(cpu)->__nmi_count);
- 			per_cpu(wd_enabled, cpu) = 0;
- 			atomic_dec(&nmi_active);
- 		}
- 	}
-+	endflag = 1;
- 	if (!atomic_read(&nmi_active)) {
--		kfree(counts);
-+		kfree(prev_nmi_count);
- 		atomic_set(&nmi_active, -1);
--		endflag = 1;
- 		return -1;
- 	}
--	endflag = 1;
- 	printk("OK.\n");
- 
- 	/* now that we know it works we can reduce NMI frequency to
-@@ -132,11 +131,11 @@ int __init check_nmi_watchdog (void)
- 	if (nmi_watchdog == NMI_LOCAL_APIC)
- 		nmi_hz = lapic_adjust_nmi_hz(1);
- 
--	kfree(counts);
-+	kfree(prev_nmi_count);
- 	return 0;
- }
- 
--int __init setup_nmi_watchdog(char *str)
-+static int __init setup_nmi_watchdog(char *str)
- {
- 	int nmi;
- 
-@@ -159,34 +158,6 @@ int __init setup_nmi_watchdog(char *str)
- 
- __setup("nmi_watchdog=", setup_nmi_watchdog);
- 
--
--static void __acpi_nmi_disable(void *__unused)
--{
--	apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
--}
--
--/*
-- * Disable timer based NMIs on all CPUs:
-- */
--void acpi_nmi_disable(void)
--{
--	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
--		on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
--}
--
--static void __acpi_nmi_enable(void *__unused)
--{
--	apic_write(APIC_LVT0, APIC_DM_NMI);
--}
--
--/*
-- * Enable timer based NMIs on all CPUs:
-- */
--void acpi_nmi_enable(void)
--{
--	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
--		on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
--}
- #ifdef CONFIG_PM
- 
- static int nmi_pm_active; /* nmi_active before suspend */
-@@ -211,13 +182,13 @@ static int lapic_nmi_resume(struct sys_device *dev)
- }
- 
- static struct sysdev_class nmi_sysclass = {
--	set_kset_name("lapic_nmi"),
-+	.name		= "lapic_nmi",
- 	.resume		= lapic_nmi_resume,
- 	.suspend	= lapic_nmi_suspend,
- };
- 
- static struct sys_device device_lapic_nmi = {
--	.id		= 0,
-+	.id	= 0,
- 	.cls	= &nmi_sysclass,
- };
- 
-@@ -231,7 +202,7 @@ static int __init init_lapic_nmi_sysfs(void)
- 	if (nmi_watchdog != NMI_LOCAL_APIC)
- 		return 0;
- 
--	if ( atomic_read(&nmi_active) < 0 )
-+	if (atomic_read(&nmi_active) < 0)
- 		return 0;
- 
- 	error = sysdev_class_register(&nmi_sysclass);
-@@ -244,9 +215,37 @@ late_initcall(init_lapic_nmi_sysfs);
- 
- #endif	/* CONFIG_PM */
- 
-+static void __acpi_nmi_enable(void *__unused)
++	}
++
++	++vcpu->kvm->stat.mmu_pte_updated;
++	if (sp->role.glevels == PT32_ROOT_LEVEL)
++		paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
++	else
++		paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
++}
++
++static bool need_remote_flush(u64 old, u64 new)
++{
++	if (!is_shadow_present_pte(old))
++		return false;
++	if (!is_shadow_present_pte(new))
++		return true;
++	if ((old ^ new) & PT64_BASE_ADDR_MASK)
++		return true;
++	old ^= PT64_NX_MASK;
++	new ^= PT64_NX_MASK;
++	return (old & ~new & PT64_PERM_MASK) != 0;
++}
++
++static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
++{
++	if (need_remote_flush(old, new))
++		kvm_flush_remote_tlbs(vcpu->kvm);
++	else
++		kvm_mmu_flush_tlb(vcpu);
++}
++
++static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
++{
++	u64 *spte = vcpu->arch.last_pte_updated;
++
++	return !!(spte && (*spte & PT_ACCESSED_MASK));
++}
++
++static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
++					  const u8 *new, int bytes)
++{
++	gfn_t gfn;
++	int r;
++	u64 gpte = 0;
++
++	if (bytes != 4 && bytes != 8)
++		return;
++
++	/*
++	 * Assume that the pte write on a page table of the same type
++	 * as the current vcpu paging mode.  This is nearly always true
++	 * (might be false while changing modes).  Note it is verified later
++	 * by update_pte().
++	 */
++	if (is_pae(vcpu)) {
++		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
++		if ((bytes == 4) && (gpa % 4 == 0)) {
++			r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
++			if (r)
++				return;
++			memcpy((void *)&gpte + (gpa % 8), new, 4);
++		} else if ((bytes == 8) && (gpa % 8 == 0)) {
++			memcpy((void *)&gpte, new, 8);
++		}
++	} else {
++		if ((bytes == 4) && (gpa % 4 == 0))
++			memcpy((void *)&gpte, new, 4);
++	}
++	if (!is_present_pte(gpte))
++		return;
++	gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
++	vcpu->arch.update_pte.gfn = gfn;
++	vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
++}
++
++void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
++		       const u8 *new, int bytes)
++{
++	gfn_t gfn = gpa >> PAGE_SHIFT;
++	struct kvm_mmu_page *sp;
++	struct hlist_node *node, *n;
++	struct hlist_head *bucket;
++	unsigned index;
++	u64 entry;
++	u64 *spte;
++	unsigned offset = offset_in_page(gpa);
++	unsigned pte_size;
++	unsigned page_offset;
++	unsigned misaligned;
++	unsigned quadrant;
++	int level;
++	int flooded = 0;
++	int npte;
++
++	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
++	mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
++	spin_lock(&vcpu->kvm->mmu_lock);
++	kvm_mmu_free_some_pages(vcpu);
++	++vcpu->kvm->stat.mmu_pte_write;
++	kvm_mmu_audit(vcpu, "pre pte write");
++	if (gfn == vcpu->arch.last_pt_write_gfn
++	    && !last_updated_pte_accessed(vcpu)) {
++		++vcpu->arch.last_pt_write_count;
++		if (vcpu->arch.last_pt_write_count >= 3)
++			flooded = 1;
++	} else {
++		vcpu->arch.last_pt_write_gfn = gfn;
++		vcpu->arch.last_pt_write_count = 1;
++		vcpu->arch.last_pte_updated = NULL;
++	}
++	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
++	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
++	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
++		if (sp->gfn != gfn || sp->role.metaphysical)
++			continue;
++		pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
++		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
++		misaligned |= bytes < 4;
++		if (misaligned || flooded) {
++			/*
++			 * Misaligned accesses are too much trouble to fix
++			 * up; also, they usually indicate a page is not used
++			 * as a page table.
++			 *
++			 * If we're seeing too many writes to a page,
++			 * it may no longer be a page table, or we may be
++			 * forking, in which case it is better to unmap the
++			 * page.
++			 */
++			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
++				 gpa, bytes, sp->role.word);
++			kvm_mmu_zap_page(vcpu->kvm, sp);
++			++vcpu->kvm->stat.mmu_flooded;
++			continue;
++		}
++		page_offset = offset;
++		level = sp->role.level;
++		npte = 1;
++		if (sp->role.glevels == PT32_ROOT_LEVEL) {
++			page_offset <<= 1;	/* 32->64 */
++			/*
++			 * A 32-bit pde maps 4MB while the shadow pdes map
++			 * only 2MB.  So we need to double the offset again
++			 * and zap two pdes instead of one.
++			 */
++			if (level == PT32_ROOT_LEVEL) {
++				page_offset &= ~7; /* kill rounding error */
++				page_offset <<= 1;
++				npte = 2;
++			}
++			quadrant = page_offset >> PAGE_SHIFT;
++			page_offset &= ~PAGE_MASK;
++			if (quadrant != sp->role.quadrant)
++				continue;
++		}
++		spte = &sp->spt[page_offset / sizeof(*spte)];
++		while (npte--) {
++			entry = *spte;
++			mmu_pte_write_zap_pte(vcpu, sp, spte);
++			mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
++					      page_offset & (pte_size - 1));
++			mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++			++spte;
++		}
++	}
++	kvm_mmu_audit(vcpu, "post pte write");
++	spin_unlock(&vcpu->kvm->mmu_lock);
++	if (vcpu->arch.update_pte.page) {
++		kvm_release_page_clean(vcpu->arch.update_pte.page);
++		vcpu->arch.update_pte.page = NULL;
++	}
++}
++
++int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
++{
++	gpa_t gpa;
++	int r;
++
++	down_read(&current->mm->mmap_sem);
++	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
++	up_read(&current->mm->mmap_sem);
++
++	spin_lock(&vcpu->kvm->mmu_lock);
++	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
++	spin_unlock(&vcpu->kvm->mmu_lock);
++	return r;
++}
++
++void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
++{
++	while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
++		struct kvm_mmu_page *sp;
++
++		sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
++				  struct kvm_mmu_page, link);
++		kvm_mmu_zap_page(vcpu->kvm, sp);
++		++vcpu->kvm->stat.mmu_recycled;
++	}
++}
++
++int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
++{
++	int r;
++	enum emulation_result er;
++
++	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
++	if (r < 0)
++		goto out;
++
++	if (!r) {
++		r = 1;
++		goto out;
++	}
++
++	r = mmu_topup_memory_caches(vcpu);
++	if (r)
++		goto out;
++
++	er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
++
++	switch (er) {
++	case EMULATE_DONE:
++		return 1;
++	case EMULATE_DO_MMIO:
++		++vcpu->stat.mmio_exits;
++		return 0;
++	case EMULATE_FAIL:
++		kvm_report_emulation_failure(vcpu, "pagetable");
++		return 1;
++	default:
++		BUG();
++	}
++out:
++	return r;
++}
++EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
++
++static void free_mmu_pages(struct kvm_vcpu *vcpu)
++{
++	struct kvm_mmu_page *sp;
++
++	while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
++		sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
++				  struct kvm_mmu_page, link);
++		kvm_mmu_zap_page(vcpu->kvm, sp);
++	}
++	free_page((unsigned long)vcpu->arch.mmu.pae_root);
++}
++
++static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
++{
++	struct page *page;
++	int i;
++
++	ASSERT(vcpu);
++
++	if (vcpu->kvm->arch.n_requested_mmu_pages)
++		vcpu->kvm->arch.n_free_mmu_pages =
++					vcpu->kvm->arch.n_requested_mmu_pages;
++	else
++		vcpu->kvm->arch.n_free_mmu_pages =
++					vcpu->kvm->arch.n_alloc_mmu_pages;
++	/*
++	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
++	 * Therefore we need to allocate shadow page tables in the first
++	 * 4GB of memory, which happens to fit the DMA32 zone.
++	 */
++	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
++	if (!page)
++		goto error_1;
++	vcpu->arch.mmu.pae_root = page_address(page);
++	for (i = 0; i < 4; ++i)
++		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
++
++	return 0;
++
++error_1:
++	free_mmu_pages(vcpu);
++	return -ENOMEM;
++}
++
++int kvm_mmu_create(struct kvm_vcpu *vcpu)
++{
++	ASSERT(vcpu);
++	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
++
++	return alloc_mmu_pages(vcpu);
++}
++
++int kvm_mmu_setup(struct kvm_vcpu *vcpu)
 +{
-+	apic_write(APIC_LVT0, APIC_DM_NMI);
++	ASSERT(vcpu);
++	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
++
++	return init_kvm_mmu(vcpu);
 +}
 +
-+/*
-+ * Enable timer based NMIs on all CPUs:
-+ */
-+void acpi_nmi_enable(void)
++void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 +{
-+	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
-+		on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
++	ASSERT(vcpu);
++
++	destroy_kvm_mmu(vcpu);
++	free_mmu_pages(vcpu);
++	mmu_free_memory_caches(vcpu);
 +}
 +
-+static void __acpi_nmi_disable(void *__unused)
++void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 +{
-+	apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
++	struct kvm_mmu_page *sp;
++
++	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
++		int i;
++		u64 *pt;
++
++		if (!test_bit(slot, &sp->slot_bitmap))
++			continue;
++
++		pt = sp->spt;
++		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
++			/* avoid RMW */
++			if (pt[i] & PT_WRITABLE_MASK)
++				pt[i] &= ~PT_WRITABLE_MASK;
++	}
++}
++
++void kvm_mmu_zap_all(struct kvm *kvm)
++{
++	struct kvm_mmu_page *sp, *node;
++
++	spin_lock(&kvm->mmu_lock);
++	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
++		kvm_mmu_zap_page(kvm, sp);
++	spin_unlock(&kvm->mmu_lock);
++
++	kvm_flush_remote_tlbs(kvm);
++}
++
++void kvm_mmu_module_exit(void)
++{
++	if (pte_chain_cache)
++		kmem_cache_destroy(pte_chain_cache);
++	if (rmap_desc_cache)
++		kmem_cache_destroy(rmap_desc_cache);
++	if (mmu_page_header_cache)
++		kmem_cache_destroy(mmu_page_header_cache);
++}
++
++int kvm_mmu_module_init(void)
++{
++	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
++					    sizeof(struct kvm_pte_chain),
++					    0, 0, NULL);
++	if (!pte_chain_cache)
++		goto nomem;
++	rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
++					    sizeof(struct kvm_rmap_desc),
++					    0, 0, NULL);
++	if (!rmap_desc_cache)
++		goto nomem;
++
++	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
++						  sizeof(struct kvm_mmu_page),
++						  0, 0, NULL);
++	if (!mmu_page_header_cache)
++		goto nomem;
++
++	return 0;
++
++nomem:
++	kvm_mmu_module_exit();
++	return -ENOMEM;
 +}
 +
 +/*
-+ * Disable timer based NMIs on all CPUs:
++ * Caculate mmu pages needed for kvm.
 + */
-+void acpi_nmi_disable(void)
++unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
 +{
-+	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
-+		on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
++	int i;
++	unsigned int nr_mmu_pages;
++	unsigned int  nr_pages = 0;
++
++	for (i = 0; i < kvm->nmemslots; i++)
++		nr_pages += kvm->memslots[i].npages;
++
++	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
++	nr_mmu_pages = max(nr_mmu_pages,
++			(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
++
++	return nr_mmu_pages;
 +}
 +
- void setup_apic_nmi_watchdog(void *unused)
- {
--	if (__get_cpu_var(wd_enabled) == 1)
-+	if (__get_cpu_var(wd_enabled))
- 		return;
- 
- 	/* cheap hack to support suspend/resume */
-@@ -311,8 +310,9 @@ void touch_nmi_watchdog(void)
- 		}
- 	}
- 
-- 	touch_softlockup_watchdog();
-+	touch_softlockup_watchdog();
- }
-+EXPORT_SYMBOL(touch_nmi_watchdog);
- 
- int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
- {
-@@ -479,4 +479,3 @@ void __trigger_all_cpu_backtrace(void)
- 
- EXPORT_SYMBOL(nmi_active);
- EXPORT_SYMBOL(nmi_watchdog);
--EXPORT_SYMBOL(touch_nmi_watchdog);
-diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
-index 9000d82..e65281b 100644
---- a/arch/x86/kernel/numaq_32.c
-+++ b/arch/x86/kernel/numaq_32.c
-@@ -82,7 +82,7 @@ static int __init numaq_tsc_disable(void)
- {
- 	if (num_online_nodes() > 1) {
- 		printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
--		tsc_disable = 1;
-+		setup_clear_cpu_cap(X86_FEATURE_TSC);
- 	}
- 	return 0;
- }
-diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
++#ifdef AUDIT
++
++static const char *audit_msg;
++
++static gva_t canonicalize(gva_t gva)
++{
++#ifdef CONFIG_X86_64
++	gva = (long long)(gva << 16) >> 16;
++#endif
++	return gva;
++}
++
++static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
++				gva_t va, int level)
++{
++	u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
++	int i;
++	gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
++
++	for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
++		u64 ent = pt[i];
++
++		if (ent == shadow_trap_nonpresent_pte)
++			continue;
++
++		va = canonicalize(va);
++		if (level > 1) {
++			if (ent == shadow_notrap_nonpresent_pte)
++				printk(KERN_ERR "audit: (%s) nontrapping pte"
++				       " in nonleaf level: levels %d gva %lx"
++				       " level %d pte %llx\n", audit_msg,
++				       vcpu->arch.mmu.root_level, va, level, ent);
++
++			audit_mappings_page(vcpu, ent, va, level - 1);
++		} else {
++			gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
++			struct page *page = gpa_to_page(vcpu, gpa);
++			hpa_t hpa = page_to_phys(page);
++
++			if (is_shadow_present_pte(ent)
++			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
++				printk(KERN_ERR "xx audit error: (%s) levels %d"
++				       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
++				       audit_msg, vcpu->arch.mmu.root_level,
++				       va, gpa, hpa, ent,
++				       is_shadow_present_pte(ent));
++			else if (ent == shadow_notrap_nonpresent_pte
++				 && !is_error_hpa(hpa))
++				printk(KERN_ERR "audit: (%s) notrap shadow,"
++				       " valid guest gva %lx\n", audit_msg, va);
++			kvm_release_page_clean(page);
++
++		}
++	}
++}
++
++static void audit_mappings(struct kvm_vcpu *vcpu)
++{
++	unsigned i;
++
++	if (vcpu->arch.mmu.root_level == 4)
++		audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
++	else
++		for (i = 0; i < 4; ++i)
++			if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
++				audit_mappings_page(vcpu,
++						    vcpu->arch.mmu.pae_root[i],
++						    i << 30,
++						    2);
++}
++
++static int count_rmaps(struct kvm_vcpu *vcpu)
++{
++	int nmaps = 0;
++	int i, j, k;
++
++	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
++		struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
++		struct kvm_rmap_desc *d;
++
++		for (j = 0; j < m->npages; ++j) {
++			unsigned long *rmapp = &m->rmap[j];
++
++			if (!*rmapp)
++				continue;
++			if (!(*rmapp & 1)) {
++				++nmaps;
++				continue;
++			}
++			d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
++			while (d) {
++				for (k = 0; k < RMAP_EXT; ++k)
++					if (d->shadow_ptes[k])
++						++nmaps;
++					else
++						break;
++				d = d->more;
++			}
++		}
++	}
++	return nmaps;
++}
++
++static int count_writable_mappings(struct kvm_vcpu *vcpu)
++{
++	int nmaps = 0;
++	struct kvm_mmu_page *sp;
++	int i;
++
++	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
++		u64 *pt = sp->spt;
++
++		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
++			continue;
++
++		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
++			u64 ent = pt[i];
++
++			if (!(ent & PT_PRESENT_MASK))
++				continue;
++			if (!(ent & PT_WRITABLE_MASK))
++				continue;
++			++nmaps;
++		}
++	}
++	return nmaps;
++}
++
++static void audit_rmap(struct kvm_vcpu *vcpu)
++{
++	int n_rmap = count_rmaps(vcpu);
++	int n_actual = count_writable_mappings(vcpu);
++
++	if (n_rmap != n_actual)
++		printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
++		       __FUNCTION__, audit_msg, n_rmap, n_actual);
++}
++
++static void audit_write_protection(struct kvm_vcpu *vcpu)
++{
++	struct kvm_mmu_page *sp;
++	struct kvm_memory_slot *slot;
++	unsigned long *rmapp;
++	gfn_t gfn;
++
++	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
++		if (sp->role.metaphysical)
++			continue;
++
++		slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
++		gfn = unalias_gfn(vcpu->kvm, sp->gfn);
++		rmapp = &slot->rmap[gfn - slot->base_gfn];
++		if (*rmapp)
++			printk(KERN_ERR "%s: (%s) shadow page has writable"
++			       " mappings: gfn %lx role %x\n",
++			       __FUNCTION__, audit_msg, sp->gfn,
++			       sp->role.word);
++	}
++}
++
++static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
++{
++	int olddbg = dbg;
++
++	dbg = 0;
++	audit_msg = msg;
++	audit_rmap(vcpu);
++	audit_write_protection(vcpu);
++	audit_mappings(vcpu);
++	dbg = olddbg;
++}
++
++#endif
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
 new file mode 100644
-index 0000000..075962c
+index 0000000..1fce19e
 --- /dev/null
-+++ b/arch/x86/kernel/paravirt.c
-@@ -0,0 +1,440 @@
-+/*  Paravirtualization interfaces
-+    Copyright (C) 2006 Rusty Russell IBM Corporation
++++ b/arch/x86/kvm/mmu.h
+@@ -0,0 +1,44 @@
++#ifndef __KVM_X86_MMU_H
++#define __KVM_X86_MMU_H
 +
-+    This program is free software; you can redistribute it and/or modify
-+    it under the terms of the GNU General Public License as published by
-+    the Free Software Foundation; either version 2 of the License, or
-+    (at your option) any later version.
++#include <linux/kvm_host.h>
 +
-+    This program is distributed in the hope that it will be useful,
-+    but WITHOUT ANY WARRANTY; without even the implied warranty of
-+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+    GNU General Public License for more details.
++static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
++{
++	if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
++		__kvm_mmu_free_some_pages(vcpu);
++}
 +
-+    You should have received a copy of the GNU General Public License
-+    along with this program; if not, write to the Free Software
-+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
++{
++	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
++		return 0;
 +
-+    2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
-+*/
++	return kvm_mmu_load(vcpu);
++}
++
++static inline int is_long_mode(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_X86_64
++	return vcpu->arch.shadow_efer & EFER_LME;
++#else
++	return 0;
++#endif
++}
++
++static inline int is_pae(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.cr4 & X86_CR4_PAE;
++}
++
++static inline int is_pse(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.cr4 & X86_CR4_PSE;
++}
++
++static inline int is_paging(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.cr0 & X86_CR0_PG;
++}
++
++#endif
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+new file mode 100644
+index 0000000..03ba860
+--- /dev/null
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -0,0 +1,484 @@
++/*
++ * Kernel-based Virtual Machine driver for Linux
++ *
++ * This module enables machines with Intel VT-x extensions to run virtual
++ * machines without emulation or binary translation.
++ *
++ * MMU support
++ *
++ * Copyright (C) 2006 Qumranet, Inc.
++ *
++ * Authors:
++ *   Yaniv Kamay  <yaniv at qumranet.com>
++ *   Avi Kivity   <avi at qumranet.com>
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
++ *
++ */
++
++/*
++ * We need the mmu code to access both 32-bit and 64-bit guest ptes,
++ * so the code in this file is compiled twice, once per pte size.
++ */
++
++#if PTTYPE == 64
++	#define pt_element_t u64
++	#define guest_walker guest_walker64
++	#define FNAME(name) paging##64_##name
++	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
++	#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
++	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
++	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
++	#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
++	#define PT_LEVEL_BITS PT64_LEVEL_BITS
++	#ifdef CONFIG_X86_64
++	#define PT_MAX_FULL_LEVELS 4
++	#define CMPXCHG cmpxchg
++	#else
++	#define CMPXCHG cmpxchg64
++	#define PT_MAX_FULL_LEVELS 2
++	#endif
++#elif PTTYPE == 32
++	#define pt_element_t u32
++	#define guest_walker guest_walker32
++	#define FNAME(name) paging##32_##name
++	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
++	#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
++	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
++	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
++	#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
++	#define PT_LEVEL_BITS PT32_LEVEL_BITS
++	#define PT_MAX_FULL_LEVELS 2
++	#define CMPXCHG cmpxchg
++#else
++	#error Invalid PTTYPE value
++#endif
++
++#define gpte_to_gfn FNAME(gpte_to_gfn)
++#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
++
++/*
++ * The guest_walker structure emulates the behavior of the hardware page
++ * table walker.
++ */
++struct guest_walker {
++	int level;
++	gfn_t table_gfn[PT_MAX_FULL_LEVELS];
++	pt_element_t ptes[PT_MAX_FULL_LEVELS];
++	gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
++	unsigned pt_access;
++	unsigned pte_access;
++	gfn_t gfn;
++	u32 error_code;
++};
++
++static gfn_t gpte_to_gfn(pt_element_t gpte)
++{
++	return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
++}
++
++static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
++{
++	return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
++}
++
++static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
++			 gfn_t table_gfn, unsigned index,
++			 pt_element_t orig_pte, pt_element_t new_pte)
++{
++	pt_element_t ret;
++	pt_element_t *table;
++	struct page *page;
++
++	page = gfn_to_page(kvm, table_gfn);
++	table = kmap_atomic(page, KM_USER0);
++
++	ret = CMPXCHG(&table[index], orig_pte, new_pte);
++
++	kunmap_atomic(table, KM_USER0);
++
++	kvm_release_page_dirty(page);
++
++	return (ret != orig_pte);
++}
++
++static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
++{
++	unsigned access;
++
++	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
++#if PTTYPE == 64
++	if (is_nx(vcpu))
++		access &= ~(gpte >> PT64_NX_SHIFT);
++#endif
++	return access;
++}
++
++/*
++ * Fetch a guest pte for a guest virtual address
++ */
++static int FNAME(walk_addr)(struct guest_walker *walker,
++			    struct kvm_vcpu *vcpu, gva_t addr,
++			    int write_fault, int user_fault, int fetch_fault)
++{
++	pt_element_t pte;
++	gfn_t table_gfn;
++	unsigned index, pt_access, pte_access;
++	gpa_t pte_gpa;
++
++	pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
++walk:
++	walker->level = vcpu->arch.mmu.root_level;
++	pte = vcpu->arch.cr3;
++#if PTTYPE == 64
++	if (!is_long_mode(vcpu)) {
++		pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
++		if (!is_present_pte(pte))
++			goto not_present;
++		--walker->level;
++	}
++#endif
++	ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
++	       (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
++
++	pt_access = ACC_ALL;
++
++	for (;;) {
++		index = PT_INDEX(addr, walker->level);
++
++		table_gfn = gpte_to_gfn(pte);
++		pte_gpa = gfn_to_gpa(table_gfn);
++		pte_gpa += index * sizeof(pt_element_t);
++		walker->table_gfn[walker->level - 1] = table_gfn;
++		walker->pte_gpa[walker->level - 1] = pte_gpa;
++		pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
++			 walker->level - 1, table_gfn);
++
++		kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
++
++		if (!is_present_pte(pte))
++			goto not_present;
++
++		if (write_fault && !is_writeble_pte(pte))
++			if (user_fault || is_write_protection(vcpu))
++				goto access_error;
++
++		if (user_fault && !(pte & PT_USER_MASK))
++			goto access_error;
++
++#if PTTYPE == 64
++		if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
++			goto access_error;
++#endif
++
++		if (!(pte & PT_ACCESSED_MASK)) {
++			mark_page_dirty(vcpu->kvm, table_gfn);
++			if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
++			    index, pte, pte|PT_ACCESSED_MASK))
++				goto walk;
++			pte |= PT_ACCESSED_MASK;
++		}
++
++		pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
++
++		walker->ptes[walker->level - 1] = pte;
++
++		if (walker->level == PT_PAGE_TABLE_LEVEL) {
++			walker->gfn = gpte_to_gfn(pte);
++			break;
++		}
++
++		if (walker->level == PT_DIRECTORY_LEVEL
++		    && (pte & PT_PAGE_SIZE_MASK)
++		    && (PTTYPE == 64 || is_pse(vcpu))) {
++			walker->gfn = gpte_to_gfn_pde(pte);
++			walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
++			if (PTTYPE == 32 && is_cpuid_PSE36())
++				walker->gfn += pse36_gfn_delta(pte);
++			break;
++		}
++
++		pt_access = pte_access;
++		--walker->level;
++	}
++
++	if (write_fault && !is_dirty_pte(pte)) {
++		bool ret;
++
++		mark_page_dirty(vcpu->kvm, table_gfn);
++		ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
++			    pte|PT_DIRTY_MASK);
++		if (ret)
++			goto walk;
++		pte |= PT_DIRTY_MASK;
++		kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
++		walker->ptes[walker->level - 1] = pte;
++	}
++
++	walker->pt_access = pt_access;
++	walker->pte_access = pte_access;
++	pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
++		 __FUNCTION__, (u64)pte, pt_access, pte_access);
++	return 1;
++
++not_present:
++	walker->error_code = 0;
++	goto err;
++
++access_error:
++	walker->error_code = PFERR_PRESENT_MASK;
++
++err:
++	if (write_fault)
++		walker->error_code |= PFERR_WRITE_MASK;
++	if (user_fault)
++		walker->error_code |= PFERR_USER_MASK;
++	if (fetch_fault)
++		walker->error_code |= PFERR_FETCH_MASK;
++	return 0;
++}
++
++static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
++			      u64 *spte, const void *pte, int bytes,
++			      int offset_in_pte)
++{
++	pt_element_t gpte;
++	unsigned pte_access;
++	struct page *npage;
++
++	gpte = *(const pt_element_t *)pte;
++	if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
++		if (!offset_in_pte && !is_present_pte(gpte))
++			set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
++		return;
++	}
++	if (bytes < sizeof(pt_element_t))
++		return;
++	pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
++	pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
++	if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
++		return;
++	npage = vcpu->arch.update_pte.page;
++	if (!npage)
++		return;
++	get_page(npage);
++	mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
++		     gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
++}
++
++/*
++ * Fetch a shadow pte for a specific level in the paging hierarchy.
++ */
++static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
++			 struct guest_walker *walker,
++			 int user_fault, int write_fault, int *ptwrite,
++			 struct page *page)
++{
++	hpa_t shadow_addr;
++	int level;
++	u64 *shadow_ent;
++	unsigned access = walker->pt_access;
++
++	if (!is_present_pte(walker->ptes[walker->level - 1]))
++		return NULL;
++
++	shadow_addr = vcpu->arch.mmu.root_hpa;
++	level = vcpu->arch.mmu.shadow_root_level;
++	if (level == PT32E_ROOT_LEVEL) {
++		shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
++		shadow_addr &= PT64_BASE_ADDR_MASK;
++		--level;
++	}
++
++	for (; ; level--) {
++		u32 index = SHADOW_PT_INDEX(addr, level);
++		struct kvm_mmu_page *shadow_page;
++		u64 shadow_pte;
++		int metaphysical;
++		gfn_t table_gfn;
++		bool new_page = 0;
++
++		shadow_ent = ((u64 *)__va(shadow_addr)) + index;
++		if (level == PT_PAGE_TABLE_LEVEL)
++			break;
++		if (is_shadow_present_pte(*shadow_ent)) {
++			shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
++			continue;
++		}
++
++		if (level - 1 == PT_PAGE_TABLE_LEVEL
++		    && walker->level == PT_DIRECTORY_LEVEL) {
++			metaphysical = 1;
++			if (!is_dirty_pte(walker->ptes[level - 1]))
++				access &= ~ACC_WRITE_MASK;
++			table_gfn = gpte_to_gfn(walker->ptes[level - 1]);
++		} else {
++			metaphysical = 0;
++			table_gfn = walker->table_gfn[level - 2];
++		}
++		shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
++					       metaphysical, access,
++					       shadow_ent, &new_page);
++		if (new_page && !metaphysical) {
++			int r;
++			pt_element_t curr_pte;
++			r = kvm_read_guest_atomic(vcpu->kvm,
++						  walker->pte_gpa[level - 2],
++						  &curr_pte, sizeof(curr_pte));
++			if (r || curr_pte != walker->ptes[level - 2]) {
++				kvm_release_page_clean(page);
++				return NULL;
++			}
++		}
++		shadow_addr = __pa(shadow_page->spt);
++		shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
++			| PT_WRITABLE_MASK | PT_USER_MASK;
++		*shadow_ent = shadow_pte;
++	}
++
++	mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
++		     user_fault, write_fault,
++		     walker->ptes[walker->level-1] & PT_DIRTY_MASK,
++		     ptwrite, walker->gfn, page);
++
++	return shadow_ent;
++}
++
++/*
++ * Page fault handler.  There are several causes for a page fault:
++ *   - there is no shadow pte for the guest pte
++ *   - write access through a shadow pte marked read only so that we can set
++ *     the dirty bit
++ *   - write access to a shadow pte marked read only so we can update the page
++ *     dirty bitmap, when userspace requests it
++ *   - mmio access; in this case we will never install a present shadow pte
++ *   - normal guest page fault due to the guest pte marked not present, not
++ *     writable, or not executable
++ *
++ *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
++ *           a negative value on error.
++ */
++static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
++			       u32 error_code)
++{
++	int write_fault = error_code & PFERR_WRITE_MASK;
++	int user_fault = error_code & PFERR_USER_MASK;
++	int fetch_fault = error_code & PFERR_FETCH_MASK;
++	struct guest_walker walker;
++	u64 *shadow_pte;
++	int write_pt = 0;
++	int r;
++	struct page *page;
++
++	pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
++	kvm_mmu_audit(vcpu, "pre page fault");
++
++	r = mmu_topup_memory_caches(vcpu);
++	if (r)
++		return r;
++
++	down_read(&current->mm->mmap_sem);
++	/*
++	 * Look up the shadow pte for the faulting address.
++	 */
++	r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
++			     fetch_fault);
++
++	/*
++	 * The page is not mapped by the guest.  Let the guest handle it.
++	 */
++	if (!r) {
++		pgprintk("%s: guest page fault\n", __FUNCTION__);
++		inject_page_fault(vcpu, addr, walker.error_code);
++		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
++		up_read(&current->mm->mmap_sem);
++		return 0;
++	}
++
++	page = gfn_to_page(vcpu->kvm, walker.gfn);
++
++	spin_lock(&vcpu->kvm->mmu_lock);
++	kvm_mmu_free_some_pages(vcpu);
++	shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
++				  &write_pt, page);
++	pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
++		 shadow_pte, *shadow_pte, write_pt);
++
++	if (!write_pt)
++		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
++
++	/*
++	 * mmio: emulate if accessible, otherwise its a guest fault.
++	 */
++	if (shadow_pte && is_io_pte(*shadow_pte)) {
++		spin_unlock(&vcpu->kvm->mmu_lock);
++		up_read(&current->mm->mmap_sem);
++		return 1;
++	}
++
++	++vcpu->stat.pf_fixed;
++	kvm_mmu_audit(vcpu, "post page fault (fixed)");
++	spin_unlock(&vcpu->kvm->mmu_lock);
++	up_read(&current->mm->mmap_sem);
++
++	return write_pt;
++}
++
++static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
++{
++	struct guest_walker walker;
++	gpa_t gpa = UNMAPPED_GVA;
++	int r;
++
++	r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
++
++	if (r) {
++		gpa = gfn_to_gpa(walker.gfn);
++		gpa |= vaddr & ~PAGE_MASK;
++	}
++
++	return gpa;
++}
++
++static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
++				 struct kvm_mmu_page *sp)
++{
++	int i, offset = 0, r = 0;
++	pt_element_t pt;
++
++	if (sp->role.metaphysical
++	    || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
++		nonpaging_prefetch_page(vcpu, sp);
++		return;
++	}
++
++	if (PTTYPE == 32)
++		offset = sp->role.quadrant << PT64_LEVEL_BITS;
++
++	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
++		gpa_t pte_gpa = gfn_to_gpa(sp->gfn);
++		pte_gpa += (i+offset) * sizeof(pt_element_t);
++
++		r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt,
++					  sizeof(pt_element_t));
++		if (r || is_present_pte(pt))
++			sp->spt[i] = shadow_trap_nonpresent_pte;
++		else
++			sp->spt[i] = shadow_notrap_nonpresent_pte;
++	}
++}
++
++#undef pt_element_t
++#undef guest_walker
++#undef FNAME
++#undef PT_BASE_ADDR_MASK
++#undef PT_INDEX
++#undef SHADOW_PT_INDEX
++#undef PT_LEVEL_MASK
++#undef PT_DIR_BASE_ADDR_MASK
++#undef PT_LEVEL_BITS
++#undef PT_MAX_FULL_LEVELS
++#undef gpte_to_gfn
++#undef gpte_to_gfn_pde
++#undef CMPXCHG
+diff --git a/arch/x86/kvm/segment_descriptor.h b/arch/x86/kvm/segment_descriptor.h
+new file mode 100644
+index 0000000..56fc4c8
+--- /dev/null
++++ b/arch/x86/kvm/segment_descriptor.h
+@@ -0,0 +1,29 @@
++#ifndef __SEGMENT_DESCRIPTOR_H
++#define __SEGMENT_DESCRIPTOR_H
++
++struct segment_descriptor {
++	u16 limit_low;
++	u16 base_low;
++	u8  base_mid;
++	u8  type : 4;
++	u8  system : 1;
++	u8  dpl : 2;
++	u8  present : 1;
++	u8  limit_high : 4;
++	u8  avl : 1;
++	u8  long_mode : 1;
++	u8  default_op : 1;
++	u8  granularity : 1;
++	u8  base_high;
++} __attribute__((packed));
++
++#ifdef CONFIG_X86_64
++/* LDT or TSS descriptor in the GDT. 16 bytes. */
++struct segment_descriptor_64 {
++	struct segment_descriptor s;
++	u32 base_higher;
++	u32 pad_zero;
++};
++
++#endif
++#endif
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+new file mode 100644
+index 0000000..de755cb
+--- /dev/null
++++ b/arch/x86/kvm/svm.c
+@@ -0,0 +1,1731 @@
++/*
++ * Kernel-based Virtual Machine driver for Linux
++ *
++ * AMD SVM support
++ *
++ * Copyright (C) 2006 Qumranet, Inc.
++ *
++ * Authors:
++ *   Yaniv Kamay  <yaniv at qumranet.com>
++ *   Avi Kivity   <avi at qumranet.com>
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
++ *
++ */
++#include <linux/kvm_host.h>
++
++#include "kvm_svm.h"
++#include "irq.h"
++#include "mmu.h"
 +
-+#include <linux/errno.h>
 +#include <linux/module.h>
-+#include <linux/efi.h>
-+#include <linux/bcd.h>
++#include <linux/kernel.h>
++#include <linux/vmalloc.h>
 +#include <linux/highmem.h>
++#include <linux/sched.h>
 +
-+#include <asm/bug.h>
-+#include <asm/paravirt.h>
 +#include <asm/desc.h>
-+#include <asm/setup.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/time.h>
-+#include <asm/irq.h>
-+#include <asm/delay.h>
-+#include <asm/fixmap.h>
-+#include <asm/apic.h>
-+#include <asm/tlbflush.h>
-+#include <asm/timer.h>
 +
-+/* nop stub */
-+void _paravirt_nop(void)
++MODULE_AUTHOR("Qumranet");
++MODULE_LICENSE("GPL");
++
++#define IOPM_ALLOC_ORDER 2
++#define MSRPM_ALLOC_ORDER 1
++
++#define DB_VECTOR 1
++#define UD_VECTOR 6
++#define GP_VECTOR 13
++
++#define DR7_GD_MASK (1 << 13)
++#define DR6_BD_MASK (1 << 13)
++
++#define SEG_TYPE_LDT 2
++#define SEG_TYPE_BUSY_TSS16 3
++
++#define SVM_FEATURE_NPT  (1 << 0)
++#define SVM_FEATURE_LBRV (1 << 1)
++#define SVM_DEATURE_SVML (1 << 2)
++
++static void kvm_reput_irq(struct vcpu_svm *svm);
++
++static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 +{
++	return container_of(vcpu, struct vcpu_svm, vcpu);
 +}
 +
-+static void __init default_banner(void)
++unsigned long iopm_base;
++unsigned long msrpm_base;
++
++struct kvm_ldttss_desc {
++	u16 limit0;
++	u16 base0;
++	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
++	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
++	u32 base3;
++	u32 zero1;
++} __attribute__((packed));
++
++struct svm_cpu_data {
++	int cpu;
++
++	u64 asid_generation;
++	u32 max_asid;
++	u32 next_asid;
++	struct kvm_ldttss_desc *tss_desc;
++
++	struct page *save_area;
++};
++
++static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
++static uint32_t svm_features;
++
++struct svm_init_data {
++	int cpu;
++	int r;
++};
++
++static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
++
++#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
++#define MSRS_RANGE_SIZE 2048
++#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
++
++#define MAX_INST_SIZE 15
++
++static inline u32 svm_has(u32 feat)
 +{
-+	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
-+	       pv_info.name);
++	return svm_features & feat;
 +}
 +
-+char *memory_setup(void)
++static inline u8 pop_irq(struct kvm_vcpu *vcpu)
 +{
-+	return pv_init_ops.memory_setup();
++	int word_index = __ffs(vcpu->arch.irq_summary);
++	int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
++	int irq = word_index * BITS_PER_LONG + bit_index;
++
++	clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
++	if (!vcpu->arch.irq_pending[word_index])
++		clear_bit(word_index, &vcpu->arch.irq_summary);
++	return irq;
 +}
 +
-+/* Simple instruction patching code. */
-+#define DEF_NATIVE(ops, name, code)					\
-+	extern const char start_##ops##_##name[], end_##ops##_##name[];	\
-+	asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
++static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
++{
++	set_bit(irq, vcpu->arch.irq_pending);
++	set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
++}
 +
-+/* Undefined instruction for dealing with missing ops pointers. */
-+static const unsigned char ud2a[] = { 0x0f, 0x0b };
++static inline void clgi(void)
++{
++	asm volatile (SVM_CLGI);
++}
 +
-+unsigned paravirt_patch_nop(void)
++static inline void stgi(void)
++{
++	asm volatile (SVM_STGI);
++}
++
++static inline void invlpga(unsigned long addr, u32 asid)
++{
++	asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
++}
++
++static inline unsigned long kvm_read_cr2(void)
++{
++	unsigned long cr2;
++
++	asm volatile ("mov %%cr2, %0" : "=r" (cr2));
++	return cr2;
++}
++
++static inline void kvm_write_cr2(unsigned long val)
++{
++	asm volatile ("mov %0, %%cr2" :: "r" (val));
++}
++
++static inline unsigned long read_dr6(void)
++{
++	unsigned long dr6;
++
++	asm volatile ("mov %%dr6, %0" : "=r" (dr6));
++	return dr6;
++}
++
++static inline void write_dr6(unsigned long val)
++{
++	asm volatile ("mov %0, %%dr6" :: "r" (val));
++}
++
++static inline unsigned long read_dr7(void)
++{
++	unsigned long dr7;
++
++	asm volatile ("mov %%dr7, %0" : "=r" (dr7));
++	return dr7;
++}
++
++static inline void write_dr7(unsigned long val)
++{
++	asm volatile ("mov %0, %%dr7" :: "r" (val));
++}
++
++static inline void force_new_asid(struct kvm_vcpu *vcpu)
++{
++	to_svm(vcpu)->asid_generation--;
++}
++
++static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
++{
++	force_new_asid(vcpu);
++}
++
++static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
++{
++	if (!(efer & EFER_LMA))
++		efer &= ~EFER_LME;
++
++	to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
++	vcpu->arch.shadow_efer = efer;
++}
++
++static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
++				bool has_error_code, u32 error_code)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	svm->vmcb->control.event_inj = nr
++		| SVM_EVTINJ_VALID
++		| (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
++		| SVM_EVTINJ_TYPE_EXEPT;
++	svm->vmcb->control.event_inj_err = error_code;
++}
++
++static bool svm_exception_injected(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
++}
++
++static int is_external_interrupt(u32 info)
++{
++	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
++	return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
++}
++
++static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	if (!svm->next_rip) {
++		printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
++		return;
++	}
++	if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE)
++		printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
++		       __FUNCTION__,
++		       svm->vmcb->save.rip,
++		       svm->next_rip);
++
++	vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
++	svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
++
++	vcpu->arch.interrupt_window_open = 1;
++}
++
++static int has_svm(void)
++{
++	uint32_t eax, ebx, ecx, edx;
++
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
++		printk(KERN_INFO "has_svm: not amd\n");
++		return 0;
++	}
++
++	cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
++	if (eax < SVM_CPUID_FUNC) {
++		printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
++		return 0;
++	}
++
++	cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
++	if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
++		printk(KERN_DEBUG "has_svm: svm not available\n");
++		return 0;
++	}
++	return 1;
++}
++
++static void svm_hardware_disable(void *garbage)
++{
++	struct svm_cpu_data *svm_data
++		= per_cpu(svm_data, raw_smp_processor_id());
++
++	if (svm_data) {
++		uint64_t efer;
++
++		wrmsrl(MSR_VM_HSAVE_PA, 0);
++		rdmsrl(MSR_EFER, efer);
++		wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
++		per_cpu(svm_data, raw_smp_processor_id()) = NULL;
++		__free_page(svm_data->save_area);
++		kfree(svm_data);
++	}
++}
++
++static void svm_hardware_enable(void *garbage)
++{
++
++	struct svm_cpu_data *svm_data;
++	uint64_t efer;
++#ifdef CONFIG_X86_64
++	struct desc_ptr gdt_descr;
++#else
++	struct desc_ptr gdt_descr;
++#endif
++	struct desc_struct *gdt;
++	int me = raw_smp_processor_id();
++
++	if (!has_svm()) {
++		printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
++		return;
++	}
++	svm_data = per_cpu(svm_data, me);
++
++	if (!svm_data) {
++		printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
++		       me);
++		return;
++	}
++
++	svm_data->asid_generation = 1;
++	svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
++	svm_data->next_asid = svm_data->max_asid + 1;
++	svm_features = cpuid_edx(SVM_CPUID_FUNC);
++
++	asm volatile ("sgdt %0" : "=m"(gdt_descr));
++	gdt = (struct desc_struct *)gdt_descr.address;
++	svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
++
++	rdmsrl(MSR_EFER, efer);
++	wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
++
++	wrmsrl(MSR_VM_HSAVE_PA,
++	       page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
++}
++
++static int svm_cpu_init(int cpu)
 +{
++	struct svm_cpu_data *svm_data;
++	int r;
++
++	svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
++	if (!svm_data)
++		return -ENOMEM;
++	svm_data->cpu = cpu;
++	svm_data->save_area = alloc_page(GFP_KERNEL);
++	r = -ENOMEM;
++	if (!svm_data->save_area)
++		goto err_1;
++
++	per_cpu(svm_data, cpu) = svm_data;
++
 +	return 0;
++
++err_1:
++	kfree(svm_data);
++	return r;
++
 +}
 +
-+unsigned paravirt_patch_ignore(unsigned len)
++static void set_msr_interception(u32 *msrpm, unsigned msr,
++				 int read, int write)
 +{
-+	return len;
++	int i;
++
++	for (i = 0; i < NUM_MSR_MAPS; i++) {
++		if (msr >= msrpm_ranges[i] &&
++		    msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
++			u32 msr_offset = (i * MSRS_IN_RANGE + msr -
++					  msrpm_ranges[i]) * 2;
++
++			u32 *base = msrpm + (msr_offset / 32);
++			u32 msr_shift = msr_offset % 32;
++			u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
++			*base = (*base & ~(0x3 << msr_shift)) |
++				(mask << msr_shift);
++			return;
++		}
++	}
++	BUG();
 +}
 +
-+struct branch {
-+	unsigned char opcode;
-+	u32 delta;
-+} __attribute__((packed));
++static __init int svm_hardware_setup(void)
++{
++	int cpu;
++	struct page *iopm_pages;
++	struct page *msrpm_pages;
++	void *iopm_va, *msrpm_va;
++	int r;
 +
-+unsigned paravirt_patch_call(void *insnbuf,
-+			     const void *target, u16 tgt_clobbers,
-+			     unsigned long addr, u16 site_clobbers,
-+			     unsigned len)
++	iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
++
++	if (!iopm_pages)
++		return -ENOMEM;
++
++	iopm_va = page_address(iopm_pages);
++	memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
++	clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
++	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
++
++
++	msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
++
++	r = -ENOMEM;
++	if (!msrpm_pages)
++		goto err_1;
++
++	msrpm_va = page_address(msrpm_pages);
++	memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
++	msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
++
++#ifdef CONFIG_X86_64
++	set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
++	set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
++	set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
++	set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
++	set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
++	set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
++#endif
++	set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
++	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
++	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
++	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
++
++	for_each_online_cpu(cpu) {
++		r = svm_cpu_init(cpu);
++		if (r)
++			goto err_2;
++	}
++	return 0;
++
++err_2:
++	__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
++	msrpm_base = 0;
++err_1:
++	__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
++	iopm_base = 0;
++	return r;
++}
++
++static __exit void svm_hardware_unsetup(void)
 +{
-+	struct branch *b = insnbuf;
-+	unsigned long delta = (unsigned long)target - (addr+5);
++	__free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
++	__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
++	iopm_base = msrpm_base = 0;
++}
++
++static void init_seg(struct vmcb_seg *seg)
++{
++	seg->selector = 0;
++	seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
++		SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
++	seg->limit = 0xffff;
++	seg->base = 0;
++}
++
++static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
++{
++	seg->selector = 0;
++	seg->attrib = SVM_SELECTOR_P_MASK | type;
++	seg->limit = 0xffff;
++	seg->base = 0;
++}
++
++static void init_vmcb(struct vmcb *vmcb)
++{
++	struct vmcb_control_area *control = &vmcb->control;
++	struct vmcb_save_area *save = &vmcb->save;
++
++	control->intercept_cr_read = 	INTERCEPT_CR0_MASK |
++					INTERCEPT_CR3_MASK |
++					INTERCEPT_CR4_MASK |
++					INTERCEPT_CR8_MASK;
++
++	control->intercept_cr_write = 	INTERCEPT_CR0_MASK |
++					INTERCEPT_CR3_MASK |
++					INTERCEPT_CR4_MASK |
++					INTERCEPT_CR8_MASK;
++
++	control->intercept_dr_read = 	INTERCEPT_DR0_MASK |
++					INTERCEPT_DR1_MASK |
++					INTERCEPT_DR2_MASK |
++					INTERCEPT_DR3_MASK;
++
++	control->intercept_dr_write = 	INTERCEPT_DR0_MASK |
++					INTERCEPT_DR1_MASK |
++					INTERCEPT_DR2_MASK |
++					INTERCEPT_DR3_MASK |
++					INTERCEPT_DR5_MASK |
++					INTERCEPT_DR7_MASK;
++
++	control->intercept_exceptions = (1 << PF_VECTOR) |
++					(1 << UD_VECTOR);
++
++
++	control->intercept = 	(1ULL << INTERCEPT_INTR) |
++				(1ULL << INTERCEPT_NMI) |
++				(1ULL << INTERCEPT_SMI) |
++		/*
++		 * selective cr0 intercept bug?
++		 *    	0:   0f 22 d8                mov    %eax,%cr3
++		 *	3:   0f 20 c0                mov    %cr0,%eax
++		 *	6:   0d 00 00 00 80          or     $0x80000000,%eax
++		 *	b:   0f 22 c0                mov    %eax,%cr0
++		 * set cr3 ->interception
++		 * get cr0 ->interception
++		 * set cr0 -> no interception
++		 */
++		/*              (1ULL << INTERCEPT_SELECTIVE_CR0) | */
++				(1ULL << INTERCEPT_CPUID) |
++				(1ULL << INTERCEPT_INVD) |
++				(1ULL << INTERCEPT_HLT) |
++				(1ULL << INTERCEPT_INVLPGA) |
++				(1ULL << INTERCEPT_IOIO_PROT) |
++				(1ULL << INTERCEPT_MSR_PROT) |
++				(1ULL << INTERCEPT_TASK_SWITCH) |
++				(1ULL << INTERCEPT_SHUTDOWN) |
++				(1ULL << INTERCEPT_VMRUN) |
++				(1ULL << INTERCEPT_VMMCALL) |
++				(1ULL << INTERCEPT_VMLOAD) |
++				(1ULL << INTERCEPT_VMSAVE) |
++				(1ULL << INTERCEPT_STGI) |
++				(1ULL << INTERCEPT_CLGI) |
++				(1ULL << INTERCEPT_SKINIT) |
++				(1ULL << INTERCEPT_WBINVD) |
++				(1ULL << INTERCEPT_MONITOR) |
++				(1ULL << INTERCEPT_MWAIT);
++
++	control->iopm_base_pa = iopm_base;
++	control->msrpm_base_pa = msrpm_base;
++	control->tsc_offset = 0;
++	control->int_ctl = V_INTR_MASKING_MASK;
++
++	init_seg(&save->es);
++	init_seg(&save->ss);
++	init_seg(&save->ds);
++	init_seg(&save->fs);
++	init_seg(&save->gs);
++
++	save->cs.selector = 0xf000;
++	/* Executable/Readable Code Segment */
++	save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
++		SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
++	save->cs.limit = 0xffff;
++	/*
++	 * cs.base should really be 0xffff0000, but vmx can't handle that, so
++	 * be consistent with it.
++	 *
++	 * Replace when we have real mode working for vmx.
++	 */
++	save->cs.base = 0xf0000;
 +
-+	if (tgt_clobbers & ~site_clobbers)
-+		return len;	/* target would clobber too much for this site */
-+	if (len < 5)
-+		return len;	/* call too long for patch site */
++	save->gdtr.limit = 0xffff;
++	save->idtr.limit = 0xffff;
 +
-+	b->opcode = 0xe8; /* call */
-+	b->delta = delta;
-+	BUILD_BUG_ON(sizeof(*b) != 5);
++	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
++	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
 +
-+	return 5;
++	save->efer = MSR_EFER_SVME_MASK;
++	save->dr6 = 0xffff0ff0;
++	save->dr7 = 0x400;
++	save->rflags = 2;
++	save->rip = 0x0000fff0;
++
++	/*
++	 * cr0 val on cpu init should be 0x60000010, we enable cpu
++	 * cache by default. the orderly way is to enable cache in bios.
++	 */
++	save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
++	save->cr4 = X86_CR4_PAE;
++	/* rdx = ?? */
 +}
 +
-+unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
-+			    unsigned long addr, unsigned len)
++static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
 +{
-+	struct branch *b = insnbuf;
-+	unsigned long delta = (unsigned long)target - (addr+5);
++	struct vcpu_svm *svm = to_svm(vcpu);
 +
-+	if (len < 5)
-+		return len;	/* call too long for patch site */
++	init_vmcb(svm->vmcb);
 +
-+	b->opcode = 0xe9;	/* jmp */
-+	b->delta = delta;
++	if (vcpu->vcpu_id != 0) {
++		svm->vmcb->save.rip = 0;
++		svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
++		svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
++	}
 +
-+	return 5;
++	return 0;
 +}
 +
-+/* Neat trick to map patch type back to the call within the
-+ * corresponding structure. */
-+static void *get_call_destination(u8 type)
++static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 +{
-+	struct paravirt_patch_template tmpl = {
-+		.pv_init_ops = pv_init_ops,
-+		.pv_time_ops = pv_time_ops,
-+		.pv_cpu_ops = pv_cpu_ops,
-+		.pv_irq_ops = pv_irq_ops,
-+		.pv_apic_ops = pv_apic_ops,
-+		.pv_mmu_ops = pv_mmu_ops,
-+	};
-+	return *((void **)&tmpl + type);
++	struct vcpu_svm *svm;
++	struct page *page;
++	int err;
++
++	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
++	if (!svm) {
++		err = -ENOMEM;
++		goto out;
++	}
++
++	err = kvm_vcpu_init(&svm->vcpu, kvm, id);
++	if (err)
++		goto free_svm;
++
++	page = alloc_page(GFP_KERNEL);
++	if (!page) {
++		err = -ENOMEM;
++		goto uninit;
++	}
++
++	svm->vmcb = page_address(page);
++	clear_page(svm->vmcb);
++	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
++	svm->asid_generation = 0;
++	memset(svm->db_regs, 0, sizeof(svm->db_regs));
++	init_vmcb(svm->vmcb);
++
++	fx_init(&svm->vcpu);
++	svm->vcpu.fpu_active = 1;
++	svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
++	if (svm->vcpu.vcpu_id == 0)
++		svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
++
++	return &svm->vcpu;
++
++uninit:
++	kvm_vcpu_uninit(&svm->vcpu);
++free_svm:
++	kmem_cache_free(kvm_vcpu_cache, svm);
++out:
++	return ERR_PTR(err);
 +}
 +
-+unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
-+				unsigned long addr, unsigned len)
++static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 +{
-+	void *opfunc = get_call_destination(type);
-+	unsigned ret;
++	struct vcpu_svm *svm = to_svm(vcpu);
 +
-+	if (opfunc == NULL)
-+		/* If there's no function, patch it with a ud2a (BUG) */
-+		ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
-+	else if (opfunc == paravirt_nop)
-+		/* If the operation is a nop, then nop the callsite */
-+		ret = paravirt_patch_nop();
-+	else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
-+		 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret))
-+		/* If operation requires a jmp, then jmp */
-+		ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
-+	else
-+		/* Otherwise call the function; assume target could
-+		   clobber any caller-save reg */
-+		ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
-+					  addr, clobbers, len);
++	__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
++	kvm_vcpu_uninit(vcpu);
++	kmem_cache_free(kvm_vcpu_cache, svm);
++}
 +
-+	return ret;
++static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++	int i;
++
++	if (unlikely(cpu != vcpu->cpu)) {
++		u64 tsc_this, delta;
++
++		/*
++		 * Make sure that the guest sees a monotonically
++		 * increasing TSC.
++		 */
++		rdtscll(tsc_this);
++		delta = vcpu->arch.host_tsc - tsc_this;
++		svm->vmcb->control.tsc_offset += delta;
++		vcpu->cpu = cpu;
++		kvm_migrate_apic_timer(vcpu);
++	}
++
++	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
++		rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 +}
 +
-+unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
-+			      const char *start, const char *end)
++static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 +{
-+	unsigned insn_len = end - start;
++	struct vcpu_svm *svm = to_svm(vcpu);
++	int i;
 +
-+	if (insn_len > len || start == NULL)
-+		insn_len = len;
-+	else
-+		memcpy(insnbuf, start, insn_len);
++	++vcpu->stat.host_state_reload;
++	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
++		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 +
-+	return insn_len;
++	rdtscll(vcpu->arch.host_tsc);
 +}
 +
-+void init_IRQ(void)
++static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
 +{
-+	pv_irq_ops.init_IRQ();
 +}
 +
-+static void native_flush_tlb(void)
++static void svm_cache_regs(struct kvm_vcpu *vcpu)
 +{
-+	__native_flush_tlb();
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
++	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
++	vcpu->arch.rip = svm->vmcb->save.rip;
 +}
 +
-+/*
-+ * Global pages have to be flushed a bit differently. Not a real
-+ * performance problem because this does not happen often.
-+ */
-+static void native_flush_tlb_global(void)
++static void svm_decache_regs(struct kvm_vcpu *vcpu)
 +{
-+	__native_flush_tlb_global();
++	struct vcpu_svm *svm = to_svm(vcpu);
++	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
++	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
++	svm->vmcb->save.rip = vcpu->arch.rip;
 +}
 +
-+static void native_flush_tlb_single(unsigned long addr)
++static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
 +{
-+	__native_flush_tlb_single(addr);
++	return to_svm(vcpu)->vmcb->save.rflags;
 +}
 +
-+/* These are in entry.S */
-+extern void native_iret(void);
-+extern void native_irq_enable_syscall_ret(void);
++static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
++{
++	to_svm(vcpu)->vmcb->save.rflags = rflags;
++}
 +
-+static int __init print_banner(void)
++static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
 +{
-+	pv_init_ops.banner();
++	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
++
++	switch (seg) {
++	case VCPU_SREG_CS: return &save->cs;
++	case VCPU_SREG_DS: return &save->ds;
++	case VCPU_SREG_ES: return &save->es;
++	case VCPU_SREG_FS: return &save->fs;
++	case VCPU_SREG_GS: return &save->gs;
++	case VCPU_SREG_SS: return &save->ss;
++	case VCPU_SREG_TR: return &save->tr;
++	case VCPU_SREG_LDTR: return &save->ldtr;
++	}
++	BUG();
++	return NULL;
++}
++
++static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
++{
++	struct vmcb_seg *s = svm_seg(vcpu, seg);
++
++	return s->base;
++}
++
++static void svm_get_segment(struct kvm_vcpu *vcpu,
++			    struct kvm_segment *var, int seg)
++{
++	struct vmcb_seg *s = svm_seg(vcpu, seg);
++
++	var->base = s->base;
++	var->limit = s->limit;
++	var->selector = s->selector;
++	var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
++	var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
++	var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
++	var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
++	var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
++	var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
++	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
++	var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
++	var->unusable = !var->present;
++}
++
++static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	dt->limit = svm->vmcb->save.idtr.limit;
++	dt->base = svm->vmcb->save.idtr.base;
++}
++
++static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	svm->vmcb->save.idtr.limit = dt->limit;
++	svm->vmcb->save.idtr.base = dt->base ;
++}
++
++static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	dt->limit = svm->vmcb->save.gdtr.limit;
++	dt->base = svm->vmcb->save.gdtr.base;
++}
++
++static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	svm->vmcb->save.gdtr.limit = dt->limit;
++	svm->vmcb->save.gdtr.base = dt->base ;
++}
++
++static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
++{
++}
++
++static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++#ifdef CONFIG_X86_64
++	if (vcpu->arch.shadow_efer & EFER_LME) {
++		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
++			vcpu->arch.shadow_efer |= EFER_LMA;
++			svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
++		}
++
++		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
++			vcpu->arch.shadow_efer &= ~EFER_LMA;
++			svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
++		}
++	}
++#endif
++	if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
++		svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
++		vcpu->fpu_active = 1;
++	}
++
++	vcpu->arch.cr0 = cr0;
++	cr0 |= X86_CR0_PG | X86_CR0_WP;
++	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
++	svm->vmcb->save.cr0 = cr0;
++}
++
++static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
++{
++       vcpu->arch.cr4 = cr4;
++       to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
++}
++
++static void svm_set_segment(struct kvm_vcpu *vcpu,
++			    struct kvm_segment *var, int seg)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++	struct vmcb_seg *s = svm_seg(vcpu, seg);
++
++	s->base = var->base;
++	s->limit = var->limit;
++	s->selector = var->selector;
++	if (var->unusable)
++		s->attrib = 0;
++	else {
++		s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
++		s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
++		s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
++		s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
++		s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
++		s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
++		s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
++		s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
++	}
++	if (seg == VCPU_SREG_CS)
++		svm->vmcb->save.cpl
++			= (svm->vmcb->save.cs.attrib
++			   >> SVM_SELECTOR_DPL_SHIFT) & 3;
++
++}
++
++/* FIXME:
++
++	svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
++	svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
++
++*/
++
++static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
++{
++	return -EOPNOTSUPP;
++}
++
++static int svm_get_irq(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++	u32 exit_int_info = svm->vmcb->control.exit_int_info;
++
++	if (is_external_interrupt(exit_int_info))
++		return exit_int_info & SVM_EVTINJ_VEC_MASK;
++	return -1;
++}
++
++static void load_host_msrs(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_X86_64
++	wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
++#endif
++}
++
++static void save_host_msrs(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_X86_64
++	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
++#endif
++}
++
++static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
++{
++	if (svm_data->next_asid > svm_data->max_asid) {
++		++svm_data->asid_generation;
++		svm_data->next_asid = 1;
++		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
++	}
++
++	svm->vcpu.cpu = svm_data->cpu;
++	svm->asid_generation = svm_data->asid_generation;
++	svm->vmcb->control.asid = svm_data->next_asid++;
++}
++
++static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
++{
++	return to_svm(vcpu)->db_regs[dr];
++}
++
++static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
++		       int *exception)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	*exception = 0;
++
++	if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
++		svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
++		svm->vmcb->save.dr6 |= DR6_BD_MASK;
++		*exception = DB_VECTOR;
++		return;
++	}
++
++	switch (dr) {
++	case 0 ... 3:
++		svm->db_regs[dr] = value;
++		return;
++	case 4 ... 5:
++		if (vcpu->arch.cr4 & X86_CR4_DE) {
++			*exception = UD_VECTOR;
++			return;
++		}
++	case 7: {
++		if (value & ~((1ULL << 32) - 1)) {
++			*exception = GP_VECTOR;
++			return;
++		}
++		svm->vmcb->save.dr7 = value;
++		return;
++	}
++	default:
++		printk(KERN_DEBUG "%s: unexpected dr %u\n",
++		       __FUNCTION__, dr);
++		*exception = UD_VECTOR;
++		return;
++	}
++}
++
++static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	u32 exit_int_info = svm->vmcb->control.exit_int_info;
++	struct kvm *kvm = svm->vcpu.kvm;
++	u64 fault_address;
++	u32 error_code;
++
++	if (!irqchip_in_kernel(kvm) &&
++		is_external_interrupt(exit_int_info))
++		push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
++
++	fault_address  = svm->vmcb->control.exit_info_2;
++	error_code = svm->vmcb->control.exit_info_1;
++	return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
++}
++
++static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	int er;
++
++	er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
++	if (er != EMULATE_DONE)
++		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
++	return 1;
++}
++
++static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
++	if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
++		svm->vmcb->save.cr0 &= ~X86_CR0_TS;
++	svm->vcpu.fpu_active = 1;
++
++	return 1;
++}
++
++static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	/*
++	 * VMCB is undefined after a SHUTDOWN intercept
++	 * so reinitialize it.
++	 */
++	clear_page(svm->vmcb);
++	init_vmcb(svm->vmcb);
++
++	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
 +	return 0;
 +}
-+core_initcall(print_banner);
 +
-+static struct resource reserve_ioports = {
-+	.start = 0,
-+	.end = IO_SPACE_LIMIT,
-+	.name = "paravirt-ioport",
-+	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
-+};
++static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
++	int size, down, in, string, rep;
++	unsigned port;
 +
-+static struct resource reserve_iomem = {
-+	.start = 0,
-+	.end = -1,
-+	.name = "paravirt-iomem",
-+	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
++	++svm->vcpu.stat.io_exits;
++
++	svm->next_rip = svm->vmcb->control.exit_info_2;
++
++	string = (io_info & SVM_IOIO_STR_MASK) != 0;
++
++	if (string) {
++		if (emulate_instruction(&svm->vcpu,
++					kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
++			return 0;
++		return 1;
++	}
++
++	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
++	port = io_info >> 16;
++	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
++	rep = (io_info & SVM_IOIO_REP_MASK) != 0;
++	down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
++
++	return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
++}
++
++static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	return 1;
++}
++
++static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	svm->next_rip = svm->vmcb->save.rip + 1;
++	skip_emulated_instruction(&svm->vcpu);
++	return kvm_emulate_halt(&svm->vcpu);
++}
++
++static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	svm->next_rip = svm->vmcb->save.rip + 3;
++	skip_emulated_instruction(&svm->vcpu);
++	kvm_emulate_hypercall(&svm->vcpu);
++	return 1;
++}
++
++static int invalid_op_interception(struct vcpu_svm *svm,
++				   struct kvm_run *kvm_run)
++{
++	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
++	return 1;
++}
++
++static int task_switch_interception(struct vcpu_svm *svm,
++				    struct kvm_run *kvm_run)
++{
++	pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__);
++	kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
++	return 0;
++}
++
++static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	svm->next_rip = svm->vmcb->save.rip + 2;
++	kvm_emulate_cpuid(&svm->vcpu);
++	return 1;
++}
++
++static int emulate_on_interception(struct vcpu_svm *svm,
++				   struct kvm_run *kvm_run)
++{
++	if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
++		pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
++	return 1;
++}
++
++static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
++	if (irqchip_in_kernel(svm->vcpu.kvm))
++		return 1;
++	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
++	return 0;
++}
++
++static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	switch (ecx) {
++	case MSR_IA32_TIME_STAMP_COUNTER: {
++		u64 tsc;
++
++		rdtscll(tsc);
++		*data = svm->vmcb->control.tsc_offset + tsc;
++		break;
++	}
++	case MSR_K6_STAR:
++		*data = svm->vmcb->save.star;
++		break;
++#ifdef CONFIG_X86_64
++	case MSR_LSTAR:
++		*data = svm->vmcb->save.lstar;
++		break;
++	case MSR_CSTAR:
++		*data = svm->vmcb->save.cstar;
++		break;
++	case MSR_KERNEL_GS_BASE:
++		*data = svm->vmcb->save.kernel_gs_base;
++		break;
++	case MSR_SYSCALL_MASK:
++		*data = svm->vmcb->save.sfmask;
++		break;
++#endif
++	case MSR_IA32_SYSENTER_CS:
++		*data = svm->vmcb->save.sysenter_cs;
++		break;
++	case MSR_IA32_SYSENTER_EIP:
++		*data = svm->vmcb->save.sysenter_eip;
++		break;
++	case MSR_IA32_SYSENTER_ESP:
++		*data = svm->vmcb->save.sysenter_esp;
++		break;
++	default:
++		return kvm_get_msr_common(vcpu, ecx, data);
++	}
++	return 0;
++}
++
++static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
++	u64 data;
++
++	if (svm_get_msr(&svm->vcpu, ecx, &data))
++		kvm_inject_gp(&svm->vcpu, 0);
++	else {
++		svm->vmcb->save.rax = data & 0xffffffff;
++		svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
++		svm->next_rip = svm->vmcb->save.rip + 2;
++		skip_emulated_instruction(&svm->vcpu);
++	}
++	return 1;
++}
++
++static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	switch (ecx) {
++	case MSR_IA32_TIME_STAMP_COUNTER: {
++		u64 tsc;
++
++		rdtscll(tsc);
++		svm->vmcb->control.tsc_offset = data - tsc;
++		break;
++	}
++	case MSR_K6_STAR:
++		svm->vmcb->save.star = data;
++		break;
++#ifdef CONFIG_X86_64
++	case MSR_LSTAR:
++		svm->vmcb->save.lstar = data;
++		break;
++	case MSR_CSTAR:
++		svm->vmcb->save.cstar = data;
++		break;
++	case MSR_KERNEL_GS_BASE:
++		svm->vmcb->save.kernel_gs_base = data;
++		break;
++	case MSR_SYSCALL_MASK:
++		svm->vmcb->save.sfmask = data;
++		break;
++#endif
++	case MSR_IA32_SYSENTER_CS:
++		svm->vmcb->save.sysenter_cs = data;
++		break;
++	case MSR_IA32_SYSENTER_EIP:
++		svm->vmcb->save.sysenter_eip = data;
++		break;
++	case MSR_IA32_SYSENTER_ESP:
++		svm->vmcb->save.sysenter_esp = data;
++		break;
++	case MSR_K7_EVNTSEL0:
++	case MSR_K7_EVNTSEL1:
++	case MSR_K7_EVNTSEL2:
++	case MSR_K7_EVNTSEL3:
++		/*
++		 * only support writing 0 to the performance counters for now
++		 * to make Windows happy. Should be replaced by a real
++		 * performance counter emulation later.
++		 */
++		if (data != 0)
++			goto unhandled;
++		break;
++	default:
++	unhandled:
++		return kvm_set_msr_common(vcpu, ecx, data);
++	}
++	return 0;
++}
++
++static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
++	u64 data = (svm->vmcb->save.rax & -1u)
++		| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
++	svm->next_rip = svm->vmcb->save.rip + 2;
++	if (svm_set_msr(&svm->vcpu, ecx, data))
++		kvm_inject_gp(&svm->vcpu, 0);
++	else
++		skip_emulated_instruction(&svm->vcpu);
++	return 1;
++}
++
++static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
++{
++	if (svm->vmcb->control.exit_info_1)
++		return wrmsr_interception(svm, kvm_run);
++	else
++		return rdmsr_interception(svm, kvm_run);
++}
++
++static int interrupt_window_interception(struct vcpu_svm *svm,
++				   struct kvm_run *kvm_run)
++{
++	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
++	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
++	/*
++	 * If the user space waits to inject interrupts, exit as soon as
++	 * possible
++	 */
++	if (kvm_run->request_interrupt_window &&
++	    !svm->vcpu.arch.irq_summary) {
++		++svm->vcpu.stat.irq_window_exits;
++		kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
++		return 0;
++	}
++
++	return 1;
++}
++
++static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
++				      struct kvm_run *kvm_run) = {
++	[SVM_EXIT_READ_CR0]           		= emulate_on_interception,
++	[SVM_EXIT_READ_CR3]           		= emulate_on_interception,
++	[SVM_EXIT_READ_CR4]           		= emulate_on_interception,
++	[SVM_EXIT_READ_CR8]           		= emulate_on_interception,
++	/* for now: */
++	[SVM_EXIT_WRITE_CR0]          		= emulate_on_interception,
++	[SVM_EXIT_WRITE_CR3]          		= emulate_on_interception,
++	[SVM_EXIT_WRITE_CR4]          		= emulate_on_interception,
++	[SVM_EXIT_WRITE_CR8]          		= cr8_write_interception,
++	[SVM_EXIT_READ_DR0] 			= emulate_on_interception,
++	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
++	[SVM_EXIT_READ_DR2]			= emulate_on_interception,
++	[SVM_EXIT_READ_DR3]			= emulate_on_interception,
++	[SVM_EXIT_WRITE_DR0]			= emulate_on_interception,
++	[SVM_EXIT_WRITE_DR1]			= emulate_on_interception,
++	[SVM_EXIT_WRITE_DR2]			= emulate_on_interception,
++	[SVM_EXIT_WRITE_DR3]			= emulate_on_interception,
++	[SVM_EXIT_WRITE_DR5]			= emulate_on_interception,
++	[SVM_EXIT_WRITE_DR7]			= emulate_on_interception,
++	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
++	[SVM_EXIT_EXCP_BASE + PF_VECTOR] 	= pf_interception,
++	[SVM_EXIT_EXCP_BASE + NM_VECTOR] 	= nm_interception,
++	[SVM_EXIT_INTR] 			= nop_on_interception,
++	[SVM_EXIT_NMI]				= nop_on_interception,
++	[SVM_EXIT_SMI]				= nop_on_interception,
++	[SVM_EXIT_INIT]				= nop_on_interception,
++	[SVM_EXIT_VINTR]			= interrupt_window_interception,
++	/* [SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception, */
++	[SVM_EXIT_CPUID]			= cpuid_interception,
++	[SVM_EXIT_INVD]                         = emulate_on_interception,
++	[SVM_EXIT_HLT]				= halt_interception,
++	[SVM_EXIT_INVLPG]			= emulate_on_interception,
++	[SVM_EXIT_INVLPGA]			= invalid_op_interception,
++	[SVM_EXIT_IOIO] 		  	= io_interception,
++	[SVM_EXIT_MSR]				= msr_interception,
++	[SVM_EXIT_TASK_SWITCH]			= task_switch_interception,
++	[SVM_EXIT_SHUTDOWN]			= shutdown_interception,
++	[SVM_EXIT_VMRUN]			= invalid_op_interception,
++	[SVM_EXIT_VMMCALL]			= vmmcall_interception,
++	[SVM_EXIT_VMLOAD]			= invalid_op_interception,
++	[SVM_EXIT_VMSAVE]			= invalid_op_interception,
++	[SVM_EXIT_STGI]				= invalid_op_interception,
++	[SVM_EXIT_CLGI]				= invalid_op_interception,
++	[SVM_EXIT_SKINIT]			= invalid_op_interception,
++	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
++	[SVM_EXIT_MONITOR]			= invalid_op_interception,
++	[SVM_EXIT_MWAIT]			= invalid_op_interception,
 +};
 +
-+/*
-+ * Reserve the whole legacy IO space to prevent any legacy drivers
-+ * from wasting time probing for their hardware.  This is a fairly
-+ * brute-force approach to disabling all non-virtual drivers.
-+ *
-+ * Note that this must be called very early to have any effect.
-+ */
-+int paravirt_disable_iospace(void)
++
++static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 +{
-+	int ret;
++	struct vcpu_svm *svm = to_svm(vcpu);
++	u32 exit_code = svm->vmcb->control.exit_code;
 +
-+	ret = request_resource(&ioport_resource, &reserve_ioports);
-+	if (ret == 0) {
-+		ret = request_resource(&iomem_resource, &reserve_iomem);
-+		if (ret)
-+			release_resource(&reserve_ioports);
++	kvm_reput_irq(svm);
++
++	if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
++		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
++		kvm_run->fail_entry.hardware_entry_failure_reason
++			= svm->vmcb->control.exit_code;
++		return 0;
 +	}
 +
-+	return ret;
++	if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
++	    exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
++		printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
++		       "exit_code 0x%x\n",
++		       __FUNCTION__, svm->vmcb->control.exit_int_info,
++		       exit_code);
++
++	if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
++	    || !svm_exit_handlers[exit_code]) {
++		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
++		kvm_run->hw.hardware_exit_reason = exit_code;
++		return 0;
++	}
++
++	return svm_exit_handlers[exit_code](svm, kvm_run);
 +}
 +
-+static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
++static void reload_tss(struct kvm_vcpu *vcpu)
++{
++	int cpu = raw_smp_processor_id();
 +
-+static inline void enter_lazy(enum paravirt_lazy_mode mode)
++	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
++	svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
++	load_TR_desc();
++}
++
++static void pre_svm_run(struct vcpu_svm *svm)
 +{
-+	BUG_ON(__get_cpu_var(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
-+	BUG_ON(preemptible());
++	int cpu = raw_smp_processor_id();
 +
-+	__get_cpu_var(paravirt_lazy_mode) = mode;
++	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
++
++	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
++	if (svm->vcpu.cpu != cpu ||
++	    svm->asid_generation != svm_data->asid_generation)
++		new_asid(svm, svm_data);
 +}
 +
-+void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
++
++static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
 +{
-+	BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode);
-+	BUG_ON(preemptible());
++	struct vmcb_control_area *control;
 +
-+	__get_cpu_var(paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
++	control = &svm->vmcb->control;
++	control->int_vector = irq;
++	control->int_ctl &= ~V_INTR_PRIO_MASK;
++	control->int_ctl |= V_IRQ_MASK |
++		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
 +}
 +
-+void paravirt_enter_lazy_mmu(void)
++static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
 +{
-+	enter_lazy(PARAVIRT_LAZY_MMU);
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	svm_inject_irq(svm, irq);
 +}
 +
-+void paravirt_leave_lazy_mmu(void)
++static void svm_intr_assist(struct kvm_vcpu *vcpu)
 +{
-+	paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
++	struct vcpu_svm *svm = to_svm(vcpu);
++	struct vmcb *vmcb = svm->vmcb;
++	int intr_vector = -1;
++
++	if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
++	    ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
++		intr_vector = vmcb->control.exit_int_info &
++			      SVM_EVTINJ_VEC_MASK;
++		vmcb->control.exit_int_info = 0;
++		svm_inject_irq(svm, intr_vector);
++		return;
++	}
++
++	if (vmcb->control.int_ctl & V_IRQ_MASK)
++		return;
++
++	if (!kvm_cpu_has_interrupt(vcpu))
++		return;
++
++	if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
++	    (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
++	    (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
++		/* unable to deliver irq, set pending irq */
++		vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
++		svm_inject_irq(svm, 0x0);
++		return;
++	}
++	/* Okay, we can deliver the interrupt: grab it and update PIC state. */
++	intr_vector = kvm_cpu_get_interrupt(vcpu);
++	svm_inject_irq(svm, intr_vector);
++	kvm_timer_intr_post(vcpu, intr_vector);
 +}
 +
-+void paravirt_enter_lazy_cpu(void)
++static void kvm_reput_irq(struct vcpu_svm *svm)
 +{
-+	enter_lazy(PARAVIRT_LAZY_CPU);
++	struct vmcb_control_area *control = &svm->vmcb->control;
++
++	if ((control->int_ctl & V_IRQ_MASK)
++	    && !irqchip_in_kernel(svm->vcpu.kvm)) {
++		control->int_ctl &= ~V_IRQ_MASK;
++		push_irq(&svm->vcpu, control->int_vector);
++	}
++
++	svm->vcpu.arch.interrupt_window_open =
++		!(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
 +}
 +
-+void paravirt_leave_lazy_cpu(void)
++static void svm_do_inject_vector(struct vcpu_svm *svm)
 +{
-+	paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
++	struct kvm_vcpu *vcpu = &svm->vcpu;
++	int word_index = __ffs(vcpu->arch.irq_summary);
++	int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
++	int irq = word_index * BITS_PER_LONG + bit_index;
++
++	clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
++	if (!vcpu->arch.irq_pending[word_index])
++		clear_bit(word_index, &vcpu->arch.irq_summary);
++	svm_inject_irq(svm, irq);
 +}
 +
-+enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
++static void do_interrupt_requests(struct kvm_vcpu *vcpu,
++				       struct kvm_run *kvm_run)
 +{
-+	return __get_cpu_var(paravirt_lazy_mode);
++	struct vcpu_svm *svm = to_svm(vcpu);
++	struct vmcb_control_area *control = &svm->vmcb->control;
++
++	svm->vcpu.arch.interrupt_window_open =
++		(!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
++		 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
++
++	if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
++		/*
++		 * If interrupts enabled, and not blocked by sti or mov ss. Good.
++		 */
++		svm_do_inject_vector(svm);
++
++	/*
++	 * Interrupts blocked.  Wait for unblock.
++	 */
++	if (!svm->vcpu.arch.interrupt_window_open &&
++	    (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
++		control->intercept |= 1ULL << INTERCEPT_VINTR;
++	 else
++		control->intercept &= ~(1ULL << INTERCEPT_VINTR);
 +}
 +
-+struct pv_info pv_info = {
-+	.name = "bare hardware",
-+	.paravirt_enabled = 0,
-+	.kernel_rpl = 0,
-+	.shared_kernel_pmd = 1,	/* Only used when CONFIG_X86_PAE is set */
-+};
++static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
++{
++	return 0;
++}
 +
-+struct pv_init_ops pv_init_ops = {
-+	.patch = native_patch,
-+	.banner = default_banner,
-+	.arch_setup = paravirt_nop,
-+	.memory_setup = machine_specific_memory_setup,
++static void save_db_regs(unsigned long *db_regs)
++{
++	asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
++	asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
++	asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
++	asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
++}
++
++static void load_db_regs(unsigned long *db_regs)
++{
++	asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
++	asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
++	asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
++	asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
++}
++
++static void svm_flush_tlb(struct kvm_vcpu *vcpu)
++{
++	force_new_asid(vcpu);
++}
++
++static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
++{
++}
++
++static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++	u16 fs_selector;
++	u16 gs_selector;
++	u16 ldt_selector;
++
++	pre_svm_run(svm);
++
++	save_host_msrs(vcpu);
++	fs_selector = read_fs();
++	gs_selector = read_gs();
++	ldt_selector = read_ldt();
++	svm->host_cr2 = kvm_read_cr2();
++	svm->host_dr6 = read_dr6();
++	svm->host_dr7 = read_dr7();
++	svm->vmcb->save.cr2 = vcpu->arch.cr2;
++
++	if (svm->vmcb->save.dr7 & 0xff) {
++		write_dr7(0);
++		save_db_regs(svm->host_db_regs);
++		load_db_regs(svm->db_regs);
++	}
++
++	clgi();
++
++	local_irq_enable();
++
++	asm volatile (
++#ifdef CONFIG_X86_64
++		"push %%rbp; \n\t"
++#else
++		"push %%ebp; \n\t"
++#endif
++
++#ifdef CONFIG_X86_64
++		"mov %c[rbx](%[svm]), %%rbx \n\t"
++		"mov %c[rcx](%[svm]), %%rcx \n\t"
++		"mov %c[rdx](%[svm]), %%rdx \n\t"
++		"mov %c[rsi](%[svm]), %%rsi \n\t"
++		"mov %c[rdi](%[svm]), %%rdi \n\t"
++		"mov %c[rbp](%[svm]), %%rbp \n\t"
++		"mov %c[r8](%[svm]),  %%r8  \n\t"
++		"mov %c[r9](%[svm]),  %%r9  \n\t"
++		"mov %c[r10](%[svm]), %%r10 \n\t"
++		"mov %c[r11](%[svm]), %%r11 \n\t"
++		"mov %c[r12](%[svm]), %%r12 \n\t"
++		"mov %c[r13](%[svm]), %%r13 \n\t"
++		"mov %c[r14](%[svm]), %%r14 \n\t"
++		"mov %c[r15](%[svm]), %%r15 \n\t"
++#else
++		"mov %c[rbx](%[svm]), %%ebx \n\t"
++		"mov %c[rcx](%[svm]), %%ecx \n\t"
++		"mov %c[rdx](%[svm]), %%edx \n\t"
++		"mov %c[rsi](%[svm]), %%esi \n\t"
++		"mov %c[rdi](%[svm]), %%edi \n\t"
++		"mov %c[rbp](%[svm]), %%ebp \n\t"
++#endif
++
++#ifdef CONFIG_X86_64
++		/* Enter guest mode */
++		"push %%rax \n\t"
++		"mov %c[vmcb](%[svm]), %%rax \n\t"
++		SVM_VMLOAD "\n\t"
++		SVM_VMRUN "\n\t"
++		SVM_VMSAVE "\n\t"
++		"pop %%rax \n\t"
++#else
++		/* Enter guest mode */
++		"push %%eax \n\t"
++		"mov %c[vmcb](%[svm]), %%eax \n\t"
++		SVM_VMLOAD "\n\t"
++		SVM_VMRUN "\n\t"
++		SVM_VMSAVE "\n\t"
++		"pop %%eax \n\t"
++#endif
++
++		/* Save guest registers, load host registers */
++#ifdef CONFIG_X86_64
++		"mov %%rbx, %c[rbx](%[svm]) \n\t"
++		"mov %%rcx, %c[rcx](%[svm]) \n\t"
++		"mov %%rdx, %c[rdx](%[svm]) \n\t"
++		"mov %%rsi, %c[rsi](%[svm]) \n\t"
++		"mov %%rdi, %c[rdi](%[svm]) \n\t"
++		"mov %%rbp, %c[rbp](%[svm]) \n\t"
++		"mov %%r8,  %c[r8](%[svm]) \n\t"
++		"mov %%r9,  %c[r9](%[svm]) \n\t"
++		"mov %%r10, %c[r10](%[svm]) \n\t"
++		"mov %%r11, %c[r11](%[svm]) \n\t"
++		"mov %%r12, %c[r12](%[svm]) \n\t"
++		"mov %%r13, %c[r13](%[svm]) \n\t"
++		"mov %%r14, %c[r14](%[svm]) \n\t"
++		"mov %%r15, %c[r15](%[svm]) \n\t"
++
++		"pop  %%rbp; \n\t"
++#else
++		"mov %%ebx, %c[rbx](%[svm]) \n\t"
++		"mov %%ecx, %c[rcx](%[svm]) \n\t"
++		"mov %%edx, %c[rdx](%[svm]) \n\t"
++		"mov %%esi, %c[rsi](%[svm]) \n\t"
++		"mov %%edi, %c[rdi](%[svm]) \n\t"
++		"mov %%ebp, %c[rbp](%[svm]) \n\t"
++
++		"pop  %%ebp; \n\t"
++#endif
++		:
++		: [svm]"a"(svm),
++		  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
++		  [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
++		  [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
++		  [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
++		  [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
++		  [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
++		  [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
++#ifdef CONFIG_X86_64
++		  , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
++		  [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
++		  [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
++		  [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
++		  [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
++		  [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
++		  [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
++		  [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
++#endif
++		: "cc", "memory"
++#ifdef CONFIG_X86_64
++		, "rbx", "rcx", "rdx", "rsi", "rdi"
++		, "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
++#else
++		, "ebx", "ecx", "edx" , "esi", "edi"
++#endif
++		);
++
++	if ((svm->vmcb->save.dr7 & 0xff))
++		load_db_regs(svm->host_db_regs);
++
++	vcpu->arch.cr2 = svm->vmcb->save.cr2;
++
++	write_dr6(svm->host_dr6);
++	write_dr7(svm->host_dr7);
++	kvm_write_cr2(svm->host_cr2);
++
++	load_fs(fs_selector);
++	load_gs(gs_selector);
++	load_ldt(ldt_selector);
++	load_host_msrs(vcpu);
++
++	reload_tss(vcpu);
++
++	local_irq_disable();
++
++	stgi();
++
++	svm->next_rip = 0;
++}
++
++static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	svm->vmcb->save.cr3 = root;
++	force_new_asid(vcpu);
++
++	if (vcpu->fpu_active) {
++		svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
++		svm->vmcb->save.cr0 |= X86_CR0_TS;
++		vcpu->fpu_active = 0;
++	}
++}
++
++static int is_disabled(void)
++{
++	u64 vm_cr;
++
++	rdmsrl(MSR_VM_CR, vm_cr);
++	if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
++		return 1;
++
++	return 0;
++}
++
++static void
++svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
++{
++	/*
++	 * Patch in the VMMCALL instruction:
++	 */
++	hypercall[0] = 0x0f;
++	hypercall[1] = 0x01;
++	hypercall[2] = 0xd9;
++}
++
++static void svm_check_processor_compat(void *rtn)
++{
++	*(int *)rtn = 0;
++}
++
++static bool svm_cpu_has_accelerated_tpr(void)
++{
++	return false;
++}
++
++static struct kvm_x86_ops svm_x86_ops = {
++	.cpu_has_kvm_support = has_svm,
++	.disabled_by_bios = is_disabled,
++	.hardware_setup = svm_hardware_setup,
++	.hardware_unsetup = svm_hardware_unsetup,
++	.check_processor_compatibility = svm_check_processor_compat,
++	.hardware_enable = svm_hardware_enable,
++	.hardware_disable = svm_hardware_disable,
++	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
++
++	.vcpu_create = svm_create_vcpu,
++	.vcpu_free = svm_free_vcpu,
++	.vcpu_reset = svm_vcpu_reset,
++
++	.prepare_guest_switch = svm_prepare_guest_switch,
++	.vcpu_load = svm_vcpu_load,
++	.vcpu_put = svm_vcpu_put,
++	.vcpu_decache = svm_vcpu_decache,
++
++	.set_guest_debug = svm_guest_debug,
++	.get_msr = svm_get_msr,
++	.set_msr = svm_set_msr,
++	.get_segment_base = svm_get_segment_base,
++	.get_segment = svm_get_segment,
++	.set_segment = svm_set_segment,
++	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
++	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
++	.set_cr0 = svm_set_cr0,
++	.set_cr3 = svm_set_cr3,
++	.set_cr4 = svm_set_cr4,
++	.set_efer = svm_set_efer,
++	.get_idt = svm_get_idt,
++	.set_idt = svm_set_idt,
++	.get_gdt = svm_get_gdt,
++	.set_gdt = svm_set_gdt,
++	.get_dr = svm_get_dr,
++	.set_dr = svm_set_dr,
++	.cache_regs = svm_cache_regs,
++	.decache_regs = svm_decache_regs,
++	.get_rflags = svm_get_rflags,
++	.set_rflags = svm_set_rflags,
++
++	.tlb_flush = svm_flush_tlb,
++
++	.run = svm_vcpu_run,
++	.handle_exit = handle_exit,
++	.skip_emulated_instruction = skip_emulated_instruction,
++	.patch_hypercall = svm_patch_hypercall,
++	.get_irq = svm_get_irq,
++	.set_irq = svm_set_irq,
++	.queue_exception = svm_queue_exception,
++	.exception_injected = svm_exception_injected,
++	.inject_pending_irq = svm_intr_assist,
++	.inject_pending_vectors = do_interrupt_requests,
++
++	.set_tss_addr = svm_set_tss_addr,
++};
++
++static int __init svm_init(void)
++{
++	return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
++			      THIS_MODULE);
++}
++
++static void __exit svm_exit(void)
++{
++	kvm_exit();
++}
++
++module_init(svm_init)
++module_exit(svm_exit)
+diff --git a/arch/x86/kvm/svm.h b/arch/x86/kvm/svm.h
+new file mode 100644
+index 0000000..5fd5049
+--- /dev/null
++++ b/arch/x86/kvm/svm.h
+@@ -0,0 +1,325 @@
++#ifndef __SVM_H
++#define __SVM_H
++
++enum {
++	INTERCEPT_INTR,
++	INTERCEPT_NMI,
++	INTERCEPT_SMI,
++	INTERCEPT_INIT,
++	INTERCEPT_VINTR,
++	INTERCEPT_SELECTIVE_CR0,
++	INTERCEPT_STORE_IDTR,
++	INTERCEPT_STORE_GDTR,
++	INTERCEPT_STORE_LDTR,
++	INTERCEPT_STORE_TR,
++	INTERCEPT_LOAD_IDTR,
++	INTERCEPT_LOAD_GDTR,
++	INTERCEPT_LOAD_LDTR,
++	INTERCEPT_LOAD_TR,
++	INTERCEPT_RDTSC,
++	INTERCEPT_RDPMC,
++	INTERCEPT_PUSHF,
++	INTERCEPT_POPF,
++	INTERCEPT_CPUID,
++	INTERCEPT_RSM,
++	INTERCEPT_IRET,
++	INTERCEPT_INTn,
++	INTERCEPT_INVD,
++	INTERCEPT_PAUSE,
++	INTERCEPT_HLT,
++	INTERCEPT_INVLPG,
++	INTERCEPT_INVLPGA,
++	INTERCEPT_IOIO_PROT,
++	INTERCEPT_MSR_PROT,
++	INTERCEPT_TASK_SWITCH,
++	INTERCEPT_FERR_FREEZE,
++	INTERCEPT_SHUTDOWN,
++	INTERCEPT_VMRUN,
++	INTERCEPT_VMMCALL,
++	INTERCEPT_VMLOAD,
++	INTERCEPT_VMSAVE,
++	INTERCEPT_STGI,
++	INTERCEPT_CLGI,
++	INTERCEPT_SKINIT,
++	INTERCEPT_RDTSCP,
++	INTERCEPT_ICEBP,
++	INTERCEPT_WBINVD,
++	INTERCEPT_MONITOR,
++	INTERCEPT_MWAIT,
++	INTERCEPT_MWAIT_COND,
++};
++
++
++struct __attribute__ ((__packed__)) vmcb_control_area {
++	u16 intercept_cr_read;
++	u16 intercept_cr_write;
++	u16 intercept_dr_read;
++	u16 intercept_dr_write;
++	u32 intercept_exceptions;
++	u64 intercept;
++	u8 reserved_1[44];
++	u64 iopm_base_pa;
++	u64 msrpm_base_pa;
++	u64 tsc_offset;
++	u32 asid;
++	u8 tlb_ctl;
++	u8 reserved_2[3];
++	u32 int_ctl;
++	u32 int_vector;
++	u32 int_state;
++	u8 reserved_3[4];
++	u32 exit_code;
++	u32 exit_code_hi;
++	u64 exit_info_1;
++	u64 exit_info_2;
++	u32 exit_int_info;
++	u32 exit_int_info_err;
++	u64 nested_ctl;
++	u8 reserved_4[16];
++	u32 event_inj;
++	u32 event_inj_err;
++	u64 nested_cr3;
++	u64 lbr_ctl;
++	u8 reserved_5[832];
++};
++
++
++#define TLB_CONTROL_DO_NOTHING 0
++#define TLB_CONTROL_FLUSH_ALL_ASID 1
++
++#define V_TPR_MASK 0x0f
++
++#define V_IRQ_SHIFT 8
++#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
++
++#define V_INTR_PRIO_SHIFT 16
++#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
++
++#define V_IGN_TPR_SHIFT 20
++#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
++
++#define V_INTR_MASKING_SHIFT 24
++#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
++
++#define SVM_INTERRUPT_SHADOW_MASK 1
++
++#define SVM_IOIO_STR_SHIFT 2
++#define SVM_IOIO_REP_SHIFT 3
++#define SVM_IOIO_SIZE_SHIFT 4
++#define SVM_IOIO_ASIZE_SHIFT 7
++
++#define SVM_IOIO_TYPE_MASK 1
++#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
++#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
++#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
++#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
++
++struct __attribute__ ((__packed__)) vmcb_seg {
++	u16 selector;
++	u16 attrib;
++	u32 limit;
++	u64 base;
++};
++
++struct __attribute__ ((__packed__)) vmcb_save_area {
++	struct vmcb_seg es;
++	struct vmcb_seg cs;
++	struct vmcb_seg ss;
++	struct vmcb_seg ds;
++	struct vmcb_seg fs;
++	struct vmcb_seg gs;
++	struct vmcb_seg gdtr;
++	struct vmcb_seg ldtr;
++	struct vmcb_seg idtr;
++	struct vmcb_seg tr;
++	u8 reserved_1[43];
++	u8 cpl;
++	u8 reserved_2[4];
++	u64 efer;
++	u8 reserved_3[112];
++	u64 cr4;
++	u64 cr3;
++	u64 cr0;
++	u64 dr7;
++	u64 dr6;
++	u64 rflags;
++	u64 rip;
++	u8 reserved_4[88];
++	u64 rsp;
++	u8 reserved_5[24];
++	u64 rax;
++	u64 star;
++	u64 lstar;
++	u64 cstar;
++	u64 sfmask;
++	u64 kernel_gs_base;
++	u64 sysenter_cs;
++	u64 sysenter_esp;
++	u64 sysenter_eip;
++	u64 cr2;
++	u8 reserved_6[32];
++	u64 g_pat;
++	u64 dbgctl;
++	u64 br_from;
++	u64 br_to;
++	u64 last_excp_from;
++	u64 last_excp_to;
++};
++
++struct __attribute__ ((__packed__)) vmcb {
++	struct vmcb_control_area control;
++	struct vmcb_save_area save;
++};
++
++#define SVM_CPUID_FEATURE_SHIFT 2
++#define SVM_CPUID_FUNC 0x8000000a
++
++#define MSR_EFER_SVME_MASK (1ULL << 12)
++#define MSR_VM_CR       0xc0010114
++#define MSR_VM_HSAVE_PA 0xc0010117ULL
++
++#define SVM_VM_CR_SVM_DISABLE 4
++
++#define SVM_SELECTOR_S_SHIFT 4
++#define SVM_SELECTOR_DPL_SHIFT 5
++#define SVM_SELECTOR_P_SHIFT 7
++#define SVM_SELECTOR_AVL_SHIFT 8
++#define SVM_SELECTOR_L_SHIFT 9
++#define SVM_SELECTOR_DB_SHIFT 10
++#define SVM_SELECTOR_G_SHIFT 11
++
++#define SVM_SELECTOR_TYPE_MASK (0xf)
++#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
++#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
++#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
++#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
++#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
++#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
++#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
++
++#define SVM_SELECTOR_WRITE_MASK (1 << 1)
++#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
++#define SVM_SELECTOR_CODE_MASK (1 << 3)
++
++#define INTERCEPT_CR0_MASK 1
++#define INTERCEPT_CR3_MASK (1 << 3)
++#define INTERCEPT_CR4_MASK (1 << 4)
++#define INTERCEPT_CR8_MASK (1 << 8)
++
++#define INTERCEPT_DR0_MASK 1
++#define INTERCEPT_DR1_MASK (1 << 1)
++#define INTERCEPT_DR2_MASK (1 << 2)
++#define INTERCEPT_DR3_MASK (1 << 3)
++#define INTERCEPT_DR4_MASK (1 << 4)
++#define INTERCEPT_DR5_MASK (1 << 5)
++#define INTERCEPT_DR6_MASK (1 << 6)
++#define INTERCEPT_DR7_MASK (1 << 7)
++
++#define SVM_EVTINJ_VEC_MASK 0xff
++
++#define SVM_EVTINJ_TYPE_SHIFT 8
++#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
++
++#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
++#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
++#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
++#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
++
++#define SVM_EVTINJ_VALID (1 << 31)
++#define SVM_EVTINJ_VALID_ERR (1 << 11)
++
++#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
++
++#define	SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
++#define	SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
++#define	SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
++#define	SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
++
++#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
++#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
++
++#define	SVM_EXIT_READ_CR0 	0x000
++#define	SVM_EXIT_READ_CR3 	0x003
++#define	SVM_EXIT_READ_CR4 	0x004
++#define	SVM_EXIT_READ_CR8 	0x008
++#define	SVM_EXIT_WRITE_CR0 	0x010
++#define	SVM_EXIT_WRITE_CR3 	0x013
++#define	SVM_EXIT_WRITE_CR4 	0x014
++#define	SVM_EXIT_WRITE_CR8 	0x018
++#define	SVM_EXIT_READ_DR0 	0x020
++#define	SVM_EXIT_READ_DR1 	0x021
++#define	SVM_EXIT_READ_DR2 	0x022
++#define	SVM_EXIT_READ_DR3 	0x023
++#define	SVM_EXIT_READ_DR4 	0x024
++#define	SVM_EXIT_READ_DR5 	0x025
++#define	SVM_EXIT_READ_DR6 	0x026
++#define	SVM_EXIT_READ_DR7 	0x027
++#define	SVM_EXIT_WRITE_DR0 	0x030
++#define	SVM_EXIT_WRITE_DR1 	0x031
++#define	SVM_EXIT_WRITE_DR2 	0x032
++#define	SVM_EXIT_WRITE_DR3 	0x033
++#define	SVM_EXIT_WRITE_DR4 	0x034
++#define	SVM_EXIT_WRITE_DR5 	0x035
++#define	SVM_EXIT_WRITE_DR6 	0x036
++#define	SVM_EXIT_WRITE_DR7 	0x037
++#define SVM_EXIT_EXCP_BASE      0x040
++#define SVM_EXIT_INTR		0x060
++#define SVM_EXIT_NMI		0x061
++#define SVM_EXIT_SMI		0x062
++#define SVM_EXIT_INIT		0x063
++#define SVM_EXIT_VINTR		0x064
++#define SVM_EXIT_CR0_SEL_WRITE	0x065
++#define SVM_EXIT_IDTR_READ	0x066
++#define SVM_EXIT_GDTR_READ	0x067
++#define SVM_EXIT_LDTR_READ	0x068
++#define SVM_EXIT_TR_READ	0x069
++#define SVM_EXIT_IDTR_WRITE	0x06a
++#define SVM_EXIT_GDTR_WRITE	0x06b
++#define SVM_EXIT_LDTR_WRITE	0x06c
++#define SVM_EXIT_TR_WRITE	0x06d
++#define SVM_EXIT_RDTSC		0x06e
++#define SVM_EXIT_RDPMC		0x06f
++#define SVM_EXIT_PUSHF		0x070
++#define SVM_EXIT_POPF		0x071
++#define SVM_EXIT_CPUID		0x072
++#define SVM_EXIT_RSM		0x073
++#define SVM_EXIT_IRET		0x074
++#define SVM_EXIT_SWINT		0x075
++#define SVM_EXIT_INVD		0x076
++#define SVM_EXIT_PAUSE		0x077
++#define SVM_EXIT_HLT		0x078
++#define SVM_EXIT_INVLPG		0x079
++#define SVM_EXIT_INVLPGA	0x07a
++#define SVM_EXIT_IOIO		0x07b
++#define SVM_EXIT_MSR		0x07c
++#define SVM_EXIT_TASK_SWITCH	0x07d
++#define SVM_EXIT_FERR_FREEZE	0x07e
++#define SVM_EXIT_SHUTDOWN	0x07f
++#define SVM_EXIT_VMRUN		0x080
++#define SVM_EXIT_VMMCALL	0x081
++#define SVM_EXIT_VMLOAD		0x082
++#define SVM_EXIT_VMSAVE		0x083
++#define SVM_EXIT_STGI		0x084
++#define SVM_EXIT_CLGI		0x085
++#define SVM_EXIT_SKINIT		0x086
++#define SVM_EXIT_RDTSCP		0x087
++#define SVM_EXIT_ICEBP		0x088
++#define SVM_EXIT_WBINVD		0x089
++#define SVM_EXIT_MONITOR	0x08a
++#define SVM_EXIT_MWAIT		0x08b
++#define SVM_EXIT_MWAIT_COND	0x08c
++#define SVM_EXIT_NPF  		0x400
++
++#define SVM_EXIT_ERR		-1
++
++#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
++
++#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
++#define SVM_VMRUN  ".byte 0x0f, 0x01, 0xd8"
++#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
++#define SVM_CLGI   ".byte 0x0f, 0x01, 0xdd"
++#define SVM_STGI   ".byte 0x0f, 0x01, 0xdc"
++#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
++
++#endif
++
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+new file mode 100644
+index 0000000..ad36447
+--- /dev/null
++++ b/arch/x86/kvm/vmx.c
+@@ -0,0 +1,2679 @@
++/*
++ * Kernel-based Virtual Machine driver for Linux
++ *
++ * This module enables machines with Intel VT-x extensions to run virtual
++ * machines without emulation or binary translation.
++ *
++ * Copyright (C) 2006 Qumranet, Inc.
++ *
++ * Authors:
++ *   Avi Kivity   <avi at qumranet.com>
++ *   Yaniv Kamay  <yaniv at qumranet.com>
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
++ *
++ */
++
++#include "irq.h"
++#include "vmx.h"
++#include "segment_descriptor.h"
++#include "mmu.h"
++
++#include <linux/kvm_host.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/highmem.h>
++#include <linux/sched.h>
++#include <linux/moduleparam.h>
++
++#include <asm/io.h>
++#include <asm/desc.h>
++
++MODULE_AUTHOR("Qumranet");
++MODULE_LICENSE("GPL");
++
++static int bypass_guest_pf = 1;
++module_param(bypass_guest_pf, bool, 0);
++
++struct vmcs {
++	u32 revision_id;
++	u32 abort;
++	char data[0];
++};
++
++struct vcpu_vmx {
++	struct kvm_vcpu       vcpu;
++	int                   launched;
++	u8                    fail;
++	u32                   idt_vectoring_info;
++	struct kvm_msr_entry *guest_msrs;
++	struct kvm_msr_entry *host_msrs;
++	int                   nmsrs;
++	int                   save_nmsrs;
++	int                   msr_offset_efer;
++#ifdef CONFIG_X86_64
++	int                   msr_offset_kernel_gs_base;
++#endif
++	struct vmcs          *vmcs;
++	struct {
++		int           loaded;
++		u16           fs_sel, gs_sel, ldt_sel;
++		int           gs_ldt_reload_needed;
++		int           fs_reload_needed;
++		int           guest_efer_loaded;
++	} host_state;
++	struct {
++		struct {
++			bool pending;
++			u8 vector;
++			unsigned rip;
++		} irq;
++	} rmode;
 +};
 +
-+struct pv_time_ops pv_time_ops = {
-+	.time_init = hpet_time_init,
-+	.get_wallclock = native_get_wallclock,
-+	.set_wallclock = native_set_wallclock,
-+	.sched_clock = native_sched_clock,
-+	.get_cpu_khz = native_calculate_cpu_khz,
++static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
++{
++	return container_of(vcpu, struct vcpu_vmx, vcpu);
++}
++
++static int init_rmode_tss(struct kvm *kvm);
++
++static DEFINE_PER_CPU(struct vmcs *, vmxarea);
++static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
++
++static struct page *vmx_io_bitmap_a;
++static struct page *vmx_io_bitmap_b;
++
++static struct vmcs_config {
++	int size;
++	int order;
++	u32 revision_id;
++	u32 pin_based_exec_ctrl;
++	u32 cpu_based_exec_ctrl;
++	u32 cpu_based_2nd_exec_ctrl;
++	u32 vmexit_ctrl;
++	u32 vmentry_ctrl;
++} vmcs_config;
++
++#define VMX_SEGMENT_FIELD(seg)					\
++	[VCPU_SREG_##seg] = {                                   \
++		.selector = GUEST_##seg##_SELECTOR,		\
++		.base = GUEST_##seg##_BASE,		   	\
++		.limit = GUEST_##seg##_LIMIT,		   	\
++		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
++	}
++
++static struct kvm_vmx_segment_field {
++	unsigned selector;
++	unsigned base;
++	unsigned limit;
++	unsigned ar_bytes;
++} kvm_vmx_segment_fields[] = {
++	VMX_SEGMENT_FIELD(CS),
++	VMX_SEGMENT_FIELD(DS),
++	VMX_SEGMENT_FIELD(ES),
++	VMX_SEGMENT_FIELD(FS),
++	VMX_SEGMENT_FIELD(GS),
++	VMX_SEGMENT_FIELD(SS),
++	VMX_SEGMENT_FIELD(TR),
++	VMX_SEGMENT_FIELD(LDTR),
 +};
 +
-+struct pv_irq_ops pv_irq_ops = {
-+	.init_IRQ = native_init_IRQ,
-+	.save_fl = native_save_fl,
-+	.restore_fl = native_restore_fl,
-+	.irq_disable = native_irq_disable,
-+	.irq_enable = native_irq_enable,
-+	.safe_halt = native_safe_halt,
-+	.halt = native_halt,
++/*
++ * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
++ * away by decrementing the array size.
++ */
++static const u32 vmx_msr_index[] = {
++#ifdef CONFIG_X86_64
++	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
++#endif
++	MSR_EFER, MSR_K6_STAR,
 +};
++#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 +
-+struct pv_cpu_ops pv_cpu_ops = {
-+	.cpuid = native_cpuid,
-+	.get_debugreg = native_get_debugreg,
-+	.set_debugreg = native_set_debugreg,
-+	.clts = native_clts,
-+	.read_cr0 = native_read_cr0,
-+	.write_cr0 = native_write_cr0,
-+	.read_cr4 = native_read_cr4,
-+	.read_cr4_safe = native_read_cr4_safe,
-+	.write_cr4 = native_write_cr4,
++static void load_msrs(struct kvm_msr_entry *e, int n)
++{
++	int i;
++
++	for (i = 0; i < n; ++i)
++		wrmsrl(e[i].index, e[i].data);
++}
++
++static void save_msrs(struct kvm_msr_entry *e, int n)
++{
++	int i;
++
++	for (i = 0; i < n; ++i)
++		rdmsrl(e[i].index, e[i].data);
++}
++
++static inline int is_page_fault(u32 intr_info)
++{
++	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
++			     INTR_INFO_VALID_MASK)) ==
++		(INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
++}
++
++static inline int is_no_device(u32 intr_info)
++{
++	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
++			     INTR_INFO_VALID_MASK)) ==
++		(INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
++}
++
++static inline int is_invalid_opcode(u32 intr_info)
++{
++	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
++			     INTR_INFO_VALID_MASK)) ==
++		(INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
++}
++
++static inline int is_external_interrupt(u32 intr_info)
++{
++	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
++		== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
++}
++
++static inline int cpu_has_vmx_tpr_shadow(void)
++{
++	return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
++}
++
++static inline int vm_need_tpr_shadow(struct kvm *kvm)
++{
++	return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
++}
++
++static inline int cpu_has_secondary_exec_ctrls(void)
++{
++	return (vmcs_config.cpu_based_exec_ctrl &
++		CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
++}
++
++static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
++{
++	return (vmcs_config.cpu_based_2nd_exec_ctrl &
++		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
++}
++
++static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
++{
++	return ((cpu_has_vmx_virtualize_apic_accesses()) &&
++		(irqchip_in_kernel(kvm)));
++}
++
++static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
++{
++	int i;
++
++	for (i = 0; i < vmx->nmsrs; ++i)
++		if (vmx->guest_msrs[i].index == msr)
++			return i;
++	return -1;
++}
++
++static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
++{
++	int i;
++
++	i = __find_msr_index(vmx, msr);
++	if (i >= 0)
++		return &vmx->guest_msrs[i];
++	return NULL;
++}
++
++static void vmcs_clear(struct vmcs *vmcs)
++{
++	u64 phys_addr = __pa(vmcs);
++	u8 error;
++
++	asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
++		      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
++		      : "cc", "memory");
++	if (error)
++		printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
++		       vmcs, phys_addr);
++}
++
++static void __vcpu_clear(void *arg)
++{
++	struct vcpu_vmx *vmx = arg;
++	int cpu = raw_smp_processor_id();
++
++	if (vmx->vcpu.cpu == cpu)
++		vmcs_clear(vmx->vmcs);
++	if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
++		per_cpu(current_vmcs, cpu) = NULL;
++	rdtscll(vmx->vcpu.arch.host_tsc);
++}
++
++static void vcpu_clear(struct vcpu_vmx *vmx)
++{
++	if (vmx->vcpu.cpu == -1)
++		return;
++	smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
++	vmx->launched = 0;
++}
++
++static unsigned long vmcs_readl(unsigned long field)
++{
++	unsigned long value;
++
++	asm volatile (ASM_VMX_VMREAD_RDX_RAX
++		      : "=a"(value) : "d"(field) : "cc");
++	return value;
++}
++
++static u16 vmcs_read16(unsigned long field)
++{
++	return vmcs_readl(field);
++}
++
++static u32 vmcs_read32(unsigned long field)
++{
++	return vmcs_readl(field);
++}
++
++static u64 vmcs_read64(unsigned long field)
++{
 +#ifdef CONFIG_X86_64
-+	.read_cr8 = native_read_cr8,
-+	.write_cr8 = native_write_cr8,
++	return vmcs_readl(field);
++#else
++	return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
 +#endif
-+	.wbinvd = native_wbinvd,
-+	.read_msr = native_read_msr_safe,
-+	.write_msr = native_write_msr_safe,
-+	.read_tsc = native_read_tsc,
-+	.read_pmc = native_read_pmc,
-+	.read_tscp = native_read_tscp,
-+	.load_tr_desc = native_load_tr_desc,
-+	.set_ldt = native_set_ldt,
-+	.load_gdt = native_load_gdt,
-+	.load_idt = native_load_idt,
-+	.store_gdt = native_store_gdt,
-+	.store_idt = native_store_idt,
-+	.store_tr = native_store_tr,
-+	.load_tls = native_load_tls,
-+	.write_ldt_entry = native_write_ldt_entry,
-+	.write_gdt_entry = native_write_gdt_entry,
-+	.write_idt_entry = native_write_idt_entry,
-+	.load_sp0 = native_load_sp0,
++}
 +
-+	.irq_enable_syscall_ret = native_irq_enable_syscall_ret,
-+	.iret = native_iret,
-+	.swapgs = native_swapgs,
++static noinline void vmwrite_error(unsigned long field, unsigned long value)
++{
++	printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
++	       field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
++	dump_stack();
++}
 +
-+	.set_iopl_mask = native_set_iopl_mask,
-+	.io_delay = native_io_delay,
++static void vmcs_writel(unsigned long field, unsigned long value)
++{
++	u8 error;
 +
-+	.lazy_mode = {
-+		.enter = paravirt_nop,
-+		.leave = paravirt_nop,
-+	},
-+};
++	asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
++		       : "=q"(error) : "a"(value), "d"(field) : "cc");
++	if (unlikely(error))
++		vmwrite_error(field, value);
++}
 +
-+struct pv_apic_ops pv_apic_ops = {
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	.apic_write = native_apic_write,
-+	.apic_write_atomic = native_apic_write_atomic,
-+	.apic_read = native_apic_read,
-+	.setup_boot_clock = setup_boot_APIC_clock,
-+	.setup_secondary_clock = setup_secondary_APIC_clock,
-+	.startup_ipi_hook = paravirt_nop,
++static void vmcs_write16(unsigned long field, u16 value)
++{
++	vmcs_writel(field, value);
++}
++
++static void vmcs_write32(unsigned long field, u32 value)
++{
++	vmcs_writel(field, value);
++}
++
++static void vmcs_write64(unsigned long field, u64 value)
++{
++#ifdef CONFIG_X86_64
++	vmcs_writel(field, value);
++#else
++	vmcs_writel(field, value);
++	asm volatile ("");
++	vmcs_writel(field+1, value >> 32);
 +#endif
-+};
++}
 +
-+struct pv_mmu_ops pv_mmu_ops = {
++static void vmcs_clear_bits(unsigned long field, u32 mask)
++{
++	vmcs_writel(field, vmcs_readl(field) & ~mask);
++}
++
++static void vmcs_set_bits(unsigned long field, u32 mask)
++{
++	vmcs_writel(field, vmcs_readl(field) | mask);
++}
++
++static void update_exception_bitmap(struct kvm_vcpu *vcpu)
++{
++	u32 eb;
++
++	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
++	if (!vcpu->fpu_active)
++		eb |= 1u << NM_VECTOR;
++	if (vcpu->guest_debug.enabled)
++		eb |= 1u << 1;
++	if (vcpu->arch.rmode.active)
++		eb = ~0;
++	vmcs_write32(EXCEPTION_BITMAP, eb);
++}
++
++static void reload_tss(void)
++{
 +#ifndef CONFIG_X86_64
-+	.pagetable_setup_start = native_pagetable_setup_start,
-+	.pagetable_setup_done = native_pagetable_setup_done,
++
++	/*
++	 * VT restores TR but not its size.  Useless.
++	 */
++	struct descriptor_table gdt;
++	struct segment_descriptor *descs;
++
++	get_gdt(&gdt);
++	descs = (void *)gdt.base;
++	descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++	load_TR_desc();
 +#endif
++}
 +
-+	.read_cr2 = native_read_cr2,
-+	.write_cr2 = native_write_cr2,
-+	.read_cr3 = native_read_cr3,
-+	.write_cr3 = native_write_cr3,
++static void load_transition_efer(struct vcpu_vmx *vmx)
++{
++	int efer_offset = vmx->msr_offset_efer;
++	u64 host_efer = vmx->host_msrs[efer_offset].data;
++	u64 guest_efer = vmx->guest_msrs[efer_offset].data;
++	u64 ignore_bits;
 +
-+	.flush_tlb_user = native_flush_tlb,
-+	.flush_tlb_kernel = native_flush_tlb_global,
-+	.flush_tlb_single = native_flush_tlb_single,
-+	.flush_tlb_others = native_flush_tlb_others,
++	if (efer_offset < 0)
++		return;
++	/*
++	 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
++	 * outside long mode
++	 */
++	ignore_bits = EFER_NX | EFER_SCE;
++#ifdef CONFIG_X86_64
++	ignore_bits |= EFER_LMA | EFER_LME;
++	/* SCE is meaningful only in long mode on Intel */
++	if (guest_efer & EFER_LMA)
++		ignore_bits &= ~(u64)EFER_SCE;
++#endif
++	if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
++		return;
 +
-+	.alloc_pt = paravirt_nop,
-+	.alloc_pd = paravirt_nop,
-+	.alloc_pd_clone = paravirt_nop,
-+	.release_pt = paravirt_nop,
-+	.release_pd = paravirt_nop,
++	vmx->host_state.guest_efer_loaded = 1;
++	guest_efer &= ~ignore_bits;
++	guest_efer |= host_efer & ignore_bits;
++	wrmsrl(MSR_EFER, guest_efer);
++	vmx->vcpu.stat.efer_reload++;
++}
 +
-+	.set_pte = native_set_pte,
-+	.set_pte_at = native_set_pte_at,
-+	.set_pmd = native_set_pmd,
-+	.pte_update = paravirt_nop,
-+	.pte_update_defer = paravirt_nop,
++static void reload_host_efer(struct vcpu_vmx *vmx)
++{
++	if (vmx->host_state.guest_efer_loaded) {
++		vmx->host_state.guest_efer_loaded = 0;
++		load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
++	}
++}
 +
-+#ifdef CONFIG_HIGHPTE
-+	.kmap_atomic_pte = kmap_atomic,
++static void vmx_save_host_state(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++
++	if (vmx->host_state.loaded)
++		return;
++
++	vmx->host_state.loaded = 1;
++	/*
++	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
++	 * allow segment selectors with cpl > 0 or ti == 1.
++	 */
++	vmx->host_state.ldt_sel = read_ldt();
++	vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
++	vmx->host_state.fs_sel = read_fs();
++	if (!(vmx->host_state.fs_sel & 7)) {
++		vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
++		vmx->host_state.fs_reload_needed = 0;
++	} else {
++		vmcs_write16(HOST_FS_SELECTOR, 0);
++		vmx->host_state.fs_reload_needed = 1;
++	}
++	vmx->host_state.gs_sel = read_gs();
++	if (!(vmx->host_state.gs_sel & 7))
++		vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
++	else {
++		vmcs_write16(HOST_GS_SELECTOR, 0);
++		vmx->host_state.gs_ldt_reload_needed = 1;
++	}
++
++#ifdef CONFIG_X86_64
++	vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
++	vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
++#else
++	vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
++	vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
 +#endif
 +
-+#if PAGETABLE_LEVELS >= 3
-+#ifdef CONFIG_X86_PAE
-+	.set_pte_atomic = native_set_pte_atomic,
-+	.set_pte_present = native_set_pte_present,
-+	.pte_clear = native_pte_clear,
-+	.pmd_clear = native_pmd_clear,
++#ifdef CONFIG_X86_64
++	if (is_long_mode(&vmx->vcpu))
++		save_msrs(vmx->host_msrs +
++			  vmx->msr_offset_kernel_gs_base, 1);
++
 +#endif
-+	.set_pud = native_set_pud,
-+	.pmd_val = native_pmd_val,
-+	.make_pmd = native_make_pmd,
++	load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
++	load_transition_efer(vmx);
++}
 +
-+#if PAGETABLE_LEVELS == 4
-+	.pud_val = native_pud_val,
-+	.make_pud = native_make_pud,
-+	.set_pgd = native_set_pgd,
++static void vmx_load_host_state(struct vcpu_vmx *vmx)
++{
++	unsigned long flags;
++
++	if (!vmx->host_state.loaded)
++		return;
++
++	++vmx->vcpu.stat.host_state_reload;
++	vmx->host_state.loaded = 0;
++	if (vmx->host_state.fs_reload_needed)
++		load_fs(vmx->host_state.fs_sel);
++	if (vmx->host_state.gs_ldt_reload_needed) {
++		load_ldt(vmx->host_state.ldt_sel);
++		/*
++		 * If we have to reload gs, we must take care to
++		 * preserve our gs base.
++		 */
++		local_irq_save(flags);
++		load_gs(vmx->host_state.gs_sel);
++#ifdef CONFIG_X86_64
++		wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
 +#endif
-+#endif /* PAGETABLE_LEVELS >= 3 */
++		local_irq_restore(flags);
++	}
++	reload_tss();
++	save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
++	load_msrs(vmx->host_msrs, vmx->save_nmsrs);
++	reload_host_efer(vmx);
++}
 +
-+	.pte_val = native_pte_val,
-+	.pgd_val = native_pgd_val,
++/*
++ * Switches to specified vcpu, until a matching vcpu_put(), but assumes
++ * vcpu mutex is already taken.
++ */
++static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	u64 phys_addr = __pa(vmx->vmcs);
++	u64 tsc_this, delta;
 +
-+	.make_pte = native_make_pte,
-+	.make_pgd = native_make_pgd,
++	if (vcpu->cpu != cpu) {
++		vcpu_clear(vmx);
++		kvm_migrate_apic_timer(vcpu);
++	}
 +
-+	.dup_mmap = paravirt_nop,
-+	.exit_mmap = paravirt_nop,
-+	.activate_mm = paravirt_nop,
++	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
++		u8 error;
 +
-+	.lazy_mode = {
-+		.enter = paravirt_nop,
-+		.leave = paravirt_nop,
-+	},
-+};
++		per_cpu(current_vmcs, cpu) = vmx->vmcs;
++		asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
++			      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
++			      : "cc");
++		if (error)
++			printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
++			       vmx->vmcs, phys_addr);
++	}
 +
-+EXPORT_SYMBOL_GPL(pv_time_ops);
-+EXPORT_SYMBOL    (pv_cpu_ops);
-+EXPORT_SYMBOL    (pv_mmu_ops);
-+EXPORT_SYMBOL_GPL(pv_apic_ops);
-+EXPORT_SYMBOL_GPL(pv_info);
-+EXPORT_SYMBOL    (pv_irq_ops);
-diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c
-deleted file mode 100644
-index f500079..0000000
---- a/arch/x86/kernel/paravirt_32.c
-+++ /dev/null
-@@ -1,472 +0,0 @@
--/*  Paravirtualization interfaces
--    Copyright (C) 2006 Rusty Russell IBM Corporation
--
--    This program is free software; you can redistribute it and/or modify
--    it under the terms of the GNU General Public License as published by
--    the Free Software Foundation; either version 2 of the License, or
--    (at your option) any later version.
--
--    This program is distributed in the hope that it will be useful,
--    but WITHOUT ANY WARRANTY; without even the implied warranty of
--    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
--    GNU General Public License for more details.
--
--    You should have received a copy of the GNU General Public License
--    along with this program; if not, write to the Free Software
--    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
--*/
--#include <linux/errno.h>
--#include <linux/module.h>
--#include <linux/efi.h>
--#include <linux/bcd.h>
--#include <linux/highmem.h>
--
--#include <asm/bug.h>
--#include <asm/paravirt.h>
--#include <asm/desc.h>
--#include <asm/setup.h>
--#include <asm/arch_hooks.h>
--#include <asm/time.h>
--#include <asm/irq.h>
--#include <asm/delay.h>
--#include <asm/fixmap.h>
--#include <asm/apic.h>
--#include <asm/tlbflush.h>
--#include <asm/timer.h>
--
--/* nop stub */
--void _paravirt_nop(void)
--{
--}
--
--static void __init default_banner(void)
--{
--	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
--	       pv_info.name);
--}
--
--char *memory_setup(void)
--{
--	return pv_init_ops.memory_setup();
--}
--
--/* Simple instruction patching code. */
--#define DEF_NATIVE(ops, name, code)					\
--	extern const char start_##ops##_##name[], end_##ops##_##name[];	\
--	asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
--
--DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
--DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
--DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
--DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
--DEF_NATIVE(pv_cpu_ops, iret, "iret");
--DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
--DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
--DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
--DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
--DEF_NATIVE(pv_cpu_ops, clts, "clts");
--DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
--
--/* Undefined instruction for dealing with missing ops pointers. */
--static const unsigned char ud2a[] = { 0x0f, 0x0b };
--
--static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
--			     unsigned long addr, unsigned len)
--{
--	const unsigned char *start, *end;
--	unsigned ret;
--
--	switch(type) {
--#define SITE(ops, x)						\
--	case PARAVIRT_PATCH(ops.x):				\
--		start = start_##ops##_##x;			\
--		end = end_##ops##_##x;				\
--		goto patch_site
--
--	SITE(pv_irq_ops, irq_disable);
--	SITE(pv_irq_ops, irq_enable);
--	SITE(pv_irq_ops, restore_fl);
--	SITE(pv_irq_ops, save_fl);
--	SITE(pv_cpu_ops, iret);
--	SITE(pv_cpu_ops, irq_enable_sysexit);
--	SITE(pv_mmu_ops, read_cr2);
--	SITE(pv_mmu_ops, read_cr3);
--	SITE(pv_mmu_ops, write_cr3);
--	SITE(pv_cpu_ops, clts);
--	SITE(pv_cpu_ops, read_tsc);
--#undef SITE
--
--	patch_site:
--		ret = paravirt_patch_insns(ibuf, len, start, end);
--		break;
--
--	default:
--		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
--		break;
--	}
--
--	return ret;
--}
--
--unsigned paravirt_patch_nop(void)
--{
--	return 0;
--}
--
--unsigned paravirt_patch_ignore(unsigned len)
--{
--	return len;
--}
--
--struct branch {
--	unsigned char opcode;
--	u32 delta;
--} __attribute__((packed));
--
--unsigned paravirt_patch_call(void *insnbuf,
--			     const void *target, u16 tgt_clobbers,
--			     unsigned long addr, u16 site_clobbers,
--			     unsigned len)
--{
--	struct branch *b = insnbuf;
--	unsigned long delta = (unsigned long)target - (addr+5);
--
--	if (tgt_clobbers & ~site_clobbers)
--		return len;	/* target would clobber too much for this site */
--	if (len < 5)
--		return len;	/* call too long for patch site */
--
--	b->opcode = 0xe8; /* call */
--	b->delta = delta;
--	BUILD_BUG_ON(sizeof(*b) != 5);
--
--	return 5;
--}
--
--unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
--			    unsigned long addr, unsigned len)
--{
--	struct branch *b = insnbuf;
--	unsigned long delta = (unsigned long)target - (addr+5);
--
--	if (len < 5)
--		return len;	/* call too long for patch site */
--
--	b->opcode = 0xe9;	/* jmp */
--	b->delta = delta;
--
--	return 5;
--}
--
--/* Neat trick to map patch type back to the call within the
-- * corresponding structure. */
--static void *get_call_destination(u8 type)
--{
--	struct paravirt_patch_template tmpl = {
--		.pv_init_ops = pv_init_ops,
--		.pv_time_ops = pv_time_ops,
--		.pv_cpu_ops = pv_cpu_ops,
--		.pv_irq_ops = pv_irq_ops,
--		.pv_apic_ops = pv_apic_ops,
--		.pv_mmu_ops = pv_mmu_ops,
--	};
--	return *((void **)&tmpl + type);
--}
--
--unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
--				unsigned long addr, unsigned len)
--{
--	void *opfunc = get_call_destination(type);
--	unsigned ret;
--
--	if (opfunc == NULL)
--		/* If there's no function, patch it with a ud2a (BUG) */
--		ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
--	else if (opfunc == paravirt_nop)
--		/* If the operation is a nop, then nop the callsite */
--		ret = paravirt_patch_nop();
--	else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
--		 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit))
--		/* If operation requires a jmp, then jmp */
--		ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
--	else
--		/* Otherwise call the function; assume target could
--		   clobber any caller-save reg */
--		ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
--					  addr, clobbers, len);
--
--	return ret;
--}
--
--unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
--			      const char *start, const char *end)
--{
--	unsigned insn_len = end - start;
--
--	if (insn_len > len || start == NULL)
--		insn_len = len;
--	else
--		memcpy(insnbuf, start, insn_len);
--
--	return insn_len;
--}
--
--void init_IRQ(void)
--{
--	pv_irq_ops.init_IRQ();
--}
--
--static void native_flush_tlb(void)
--{
--	__native_flush_tlb();
--}
--
--/*
-- * Global pages have to be flushed a bit differently. Not a real
-- * performance problem because this does not happen often.
-- */
--static void native_flush_tlb_global(void)
--{
--	__native_flush_tlb_global();
--}
--
--static void native_flush_tlb_single(unsigned long addr)
--{
--	__native_flush_tlb_single(addr);
--}
--
--/* These are in entry.S */
--extern void native_iret(void);
--extern void native_irq_enable_sysexit(void);
--
--static int __init print_banner(void)
--{
--	pv_init_ops.banner();
--	return 0;
--}
--core_initcall(print_banner);
--
--static struct resource reserve_ioports = {
--	.start = 0,
--	.end = IO_SPACE_LIMIT,
--	.name = "paravirt-ioport",
--	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
--};
--
--static struct resource reserve_iomem = {
--	.start = 0,
--	.end = -1,
--	.name = "paravirt-iomem",
--	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
--};
--
--/*
-- * Reserve the whole legacy IO space to prevent any legacy drivers
-- * from wasting time probing for their hardware.  This is a fairly
-- * brute-force approach to disabling all non-virtual drivers.
-- *
-- * Note that this must be called very early to have any effect.
-- */
--int paravirt_disable_iospace(void)
--{
--	int ret;
--
--	ret = request_resource(&ioport_resource, &reserve_ioports);
--	if (ret == 0) {
--		ret = request_resource(&iomem_resource, &reserve_iomem);
--		if (ret)
--			release_resource(&reserve_ioports);
--	}
--
--	return ret;
--}
--
--static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
--
--static inline void enter_lazy(enum paravirt_lazy_mode mode)
--{
--	BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
--	BUG_ON(preemptible());
--
--	x86_write_percpu(paravirt_lazy_mode, mode);
--}
--
--void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
--{
--	BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
--	BUG_ON(preemptible());
--
--	x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
--}
--
--void paravirt_enter_lazy_mmu(void)
--{
--	enter_lazy(PARAVIRT_LAZY_MMU);
--}
--
--void paravirt_leave_lazy_mmu(void)
--{
--	paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
--}
--
--void paravirt_enter_lazy_cpu(void)
--{
--	enter_lazy(PARAVIRT_LAZY_CPU);
--}
--
--void paravirt_leave_lazy_cpu(void)
--{
--	paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
--}
--
--enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
--{
--	return x86_read_percpu(paravirt_lazy_mode);
--}
--
--struct pv_info pv_info = {
--	.name = "bare hardware",
--	.paravirt_enabled = 0,
--	.kernel_rpl = 0,
--	.shared_kernel_pmd = 1,	/* Only used when CONFIG_X86_PAE is set */
--};
--
--struct pv_init_ops pv_init_ops = {
--	.patch = native_patch,
--	.banner = default_banner,
--	.arch_setup = paravirt_nop,
--	.memory_setup = machine_specific_memory_setup,
--};
--
--struct pv_time_ops pv_time_ops = {
--	.time_init = hpet_time_init,
--	.get_wallclock = native_get_wallclock,
--	.set_wallclock = native_set_wallclock,
--	.sched_clock = native_sched_clock,
--	.get_cpu_khz = native_calculate_cpu_khz,
--};
--
--struct pv_irq_ops pv_irq_ops = {
--	.init_IRQ = native_init_IRQ,
--	.save_fl = native_save_fl,
--	.restore_fl = native_restore_fl,
--	.irq_disable = native_irq_disable,
--	.irq_enable = native_irq_enable,
--	.safe_halt = native_safe_halt,
--	.halt = native_halt,
--};
--
--struct pv_cpu_ops pv_cpu_ops = {
--	.cpuid = native_cpuid,
--	.get_debugreg = native_get_debugreg,
--	.set_debugreg = native_set_debugreg,
--	.clts = native_clts,
--	.read_cr0 = native_read_cr0,
--	.write_cr0 = native_write_cr0,
--	.read_cr4 = native_read_cr4,
--	.read_cr4_safe = native_read_cr4_safe,
--	.write_cr4 = native_write_cr4,
--	.wbinvd = native_wbinvd,
--	.read_msr = native_read_msr_safe,
--	.write_msr = native_write_msr_safe,
--	.read_tsc = native_read_tsc,
--	.read_pmc = native_read_pmc,
--	.load_tr_desc = native_load_tr_desc,
--	.set_ldt = native_set_ldt,
--	.load_gdt = native_load_gdt,
--	.load_idt = native_load_idt,
--	.store_gdt = native_store_gdt,
--	.store_idt = native_store_idt,
--	.store_tr = native_store_tr,
--	.load_tls = native_load_tls,
--	.write_ldt_entry = write_dt_entry,
--	.write_gdt_entry = write_dt_entry,
--	.write_idt_entry = write_dt_entry,
--	.load_esp0 = native_load_esp0,
--
--	.irq_enable_sysexit = native_irq_enable_sysexit,
--	.iret = native_iret,
--
--	.set_iopl_mask = native_set_iopl_mask,
--	.io_delay = native_io_delay,
--
--	.lazy_mode = {
--		.enter = paravirt_nop,
--		.leave = paravirt_nop,
--	},
--};
--
--struct pv_apic_ops pv_apic_ops = {
--#ifdef CONFIG_X86_LOCAL_APIC
--	.apic_write = native_apic_write,
--	.apic_write_atomic = native_apic_write_atomic,
--	.apic_read = native_apic_read,
--	.setup_boot_clock = setup_boot_APIC_clock,
--	.setup_secondary_clock = setup_secondary_APIC_clock,
--	.startup_ipi_hook = paravirt_nop,
--#endif
--};
--
--struct pv_mmu_ops pv_mmu_ops = {
--	.pagetable_setup_start = native_pagetable_setup_start,
--	.pagetable_setup_done = native_pagetable_setup_done,
--
--	.read_cr2 = native_read_cr2,
--	.write_cr2 = native_write_cr2,
--	.read_cr3 = native_read_cr3,
--	.write_cr3 = native_write_cr3,
--
--	.flush_tlb_user = native_flush_tlb,
--	.flush_tlb_kernel = native_flush_tlb_global,
--	.flush_tlb_single = native_flush_tlb_single,
--	.flush_tlb_others = native_flush_tlb_others,
--
--	.alloc_pt = paravirt_nop,
--	.alloc_pd = paravirt_nop,
--	.alloc_pd_clone = paravirt_nop,
--	.release_pt = paravirt_nop,
--	.release_pd = paravirt_nop,
--
--	.set_pte = native_set_pte,
--	.set_pte_at = native_set_pte_at,
--	.set_pmd = native_set_pmd,
--	.pte_update = paravirt_nop,
--	.pte_update_defer = paravirt_nop,
--
--#ifdef CONFIG_HIGHPTE
--	.kmap_atomic_pte = kmap_atomic,
--#endif
--
--#ifdef CONFIG_X86_PAE
--	.set_pte_atomic = native_set_pte_atomic,
--	.set_pte_present = native_set_pte_present,
--	.set_pud = native_set_pud,
--	.pte_clear = native_pte_clear,
--	.pmd_clear = native_pmd_clear,
--
--	.pmd_val = native_pmd_val,
--	.make_pmd = native_make_pmd,
--#endif
--
--	.pte_val = native_pte_val,
--	.pgd_val = native_pgd_val,
--
--	.make_pte = native_make_pte,
--	.make_pgd = native_make_pgd,
--
--	.dup_mmap = paravirt_nop,
--	.exit_mmap = paravirt_nop,
--	.activate_mm = paravirt_nop,
--
--	.lazy_mode = {
--		.enter = paravirt_nop,
--		.leave = paravirt_nop,
--	},
--};
--
--EXPORT_SYMBOL_GPL(pv_time_ops);
--EXPORT_SYMBOL    (pv_cpu_ops);
--EXPORT_SYMBOL    (pv_mmu_ops);
--EXPORT_SYMBOL_GPL(pv_apic_ops);
--EXPORT_SYMBOL_GPL(pv_info);
--EXPORT_SYMBOL    (pv_irq_ops);
-diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
-new file mode 100644
-index 0000000..82fc5fc
---- /dev/null
-+++ b/arch/x86/kernel/paravirt_patch_32.c
-@@ -0,0 +1,49 @@
-+#include <asm/paravirt.h>
++	if (vcpu->cpu != cpu) {
++		struct descriptor_table dt;
++		unsigned long sysenter_esp;
++
++		vcpu->cpu = cpu;
++		/*
++		 * Linux uses per-cpu TSS and GDT, so set these when switching
++		 * processors.
++		 */
++		vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
++		get_gdt(&dt);
++		vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
++
++		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
++		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
++
++		/*
++		 * Make sure the time stamp counter is monotonous.
++		 */
++		rdtscll(tsc_this);
++		delta = vcpu->arch.host_tsc - tsc_this;
++		vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
++	}
++}
++
++static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
++{
++	vmx_load_host_state(to_vmx(vcpu));
++}
++
++static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
++{
++	if (vcpu->fpu_active)
++		return;
++	vcpu->fpu_active = 1;
++	vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
++	if (vcpu->arch.cr0 & X86_CR0_TS)
++		vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
++	update_exception_bitmap(vcpu);
++}
++
++static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
++{
++	if (!vcpu->fpu_active)
++		return;
++	vcpu->fpu_active = 0;
++	vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
++	update_exception_bitmap(vcpu);
++}
++
++static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
++{
++	vcpu_clear(to_vmx(vcpu));
++}
++
++static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
++{
++	return vmcs_readl(GUEST_RFLAGS);
++}
++
++static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
++{
++	if (vcpu->arch.rmode.active)
++		rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
++	vmcs_writel(GUEST_RFLAGS, rflags);
++}
++
++static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
++{
++	unsigned long rip;
++	u32 interruptibility;
++
++	rip = vmcs_readl(GUEST_RIP);
++	rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
++	vmcs_writel(GUEST_RIP, rip);
++
++	/*
++	 * We emulated an instruction, so temporary interrupt blocking
++	 * should be removed, if set.
++	 */
++	interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
++	if (interruptibility & 3)
++		vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
++			     interruptibility & ~3);
++	vcpu->arch.interrupt_window_open = 1;
++}
++
++static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
++				bool has_error_code, u32 error_code)
++{
++	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
++		     nr | INTR_TYPE_EXCEPTION
++		     | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0)
++		     | INTR_INFO_VALID_MASK);
++	if (has_error_code)
++		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
++}
++
++static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++
++	return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
++}
++
++/*
++ * Swap MSR entry in host/guest MSR entry array.
++ */
++#ifdef CONFIG_X86_64
++static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
++{
++	struct kvm_msr_entry tmp;
++
++	tmp = vmx->guest_msrs[to];
++	vmx->guest_msrs[to] = vmx->guest_msrs[from];
++	vmx->guest_msrs[from] = tmp;
++	tmp = vmx->host_msrs[to];
++	vmx->host_msrs[to] = vmx->host_msrs[from];
++	vmx->host_msrs[from] = tmp;
++}
++#endif
++
++/*
++ * Set up the vmcs to automatically save and restore system
++ * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
++ * mode, as fiddling with msrs is very expensive.
++ */
++static void setup_msrs(struct vcpu_vmx *vmx)
++{
++	int save_nmsrs;
++
++	save_nmsrs = 0;
++#ifdef CONFIG_X86_64
++	if (is_long_mode(&vmx->vcpu)) {
++		int index;
++
++		index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
++		if (index >= 0)
++			move_msr_up(vmx, index, save_nmsrs++);
++		index = __find_msr_index(vmx, MSR_LSTAR);
++		if (index >= 0)
++			move_msr_up(vmx, index, save_nmsrs++);
++		index = __find_msr_index(vmx, MSR_CSTAR);
++		if (index >= 0)
++			move_msr_up(vmx, index, save_nmsrs++);
++		index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
++		if (index >= 0)
++			move_msr_up(vmx, index, save_nmsrs++);
++		/*
++		 * MSR_K6_STAR is only needed on long mode guests, and only
++		 * if efer.sce is enabled.
++		 */
++		index = __find_msr_index(vmx, MSR_K6_STAR);
++		if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
++			move_msr_up(vmx, index, save_nmsrs++);
++	}
++#endif
++	vmx->save_nmsrs = save_nmsrs;
++
++#ifdef CONFIG_X86_64
++	vmx->msr_offset_kernel_gs_base =
++		__find_msr_index(vmx, MSR_KERNEL_GS_BASE);
++#endif
++	vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
++}
++
++/*
++ * reads and returns guest's timestamp counter "register"
++ * guest_tsc = host_tsc + tsc_offset    -- 21.3
++ */
++static u64 guest_read_tsc(void)
++{
++	u64 host_tsc, tsc_offset;
++
++	rdtscll(host_tsc);
++	tsc_offset = vmcs_read64(TSC_OFFSET);
++	return host_tsc + tsc_offset;
++}
++
++/*
++ * writes 'guest_tsc' into guest's timestamp counter "register"
++ * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
++ */
++static void guest_write_tsc(u64 guest_tsc)
++{
++	u64 host_tsc;
++
++	rdtscll(host_tsc);
++	vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
++}
++
++/*
++ * Reads an msr value (of 'msr_index') into 'pdata'.
++ * Returns 0 on success, non-0 otherwise.
++ * Assumes vcpu_load() was already called.
++ */
++static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
++{
++	u64 data;
++	struct kvm_msr_entry *msr;
++
++	if (!pdata) {
++		printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
++		return -EINVAL;
++	}
++
++	switch (msr_index) {
++#ifdef CONFIG_X86_64
++	case MSR_FS_BASE:
++		data = vmcs_readl(GUEST_FS_BASE);
++		break;
++	case MSR_GS_BASE:
++		data = vmcs_readl(GUEST_GS_BASE);
++		break;
++	case MSR_EFER:
++		return kvm_get_msr_common(vcpu, msr_index, pdata);
++#endif
++	case MSR_IA32_TIME_STAMP_COUNTER:
++		data = guest_read_tsc();
++		break;
++	case MSR_IA32_SYSENTER_CS:
++		data = vmcs_read32(GUEST_SYSENTER_CS);
++		break;
++	case MSR_IA32_SYSENTER_EIP:
++		data = vmcs_readl(GUEST_SYSENTER_EIP);
++		break;
++	case MSR_IA32_SYSENTER_ESP:
++		data = vmcs_readl(GUEST_SYSENTER_ESP);
++		break;
++	default:
++		msr = find_msr_entry(to_vmx(vcpu), msr_index);
++		if (msr) {
++			data = msr->data;
++			break;
++		}
++		return kvm_get_msr_common(vcpu, msr_index, pdata);
++	}
++
++	*pdata = data;
++	return 0;
++}
++
++/*
++ * Writes msr value into into the appropriate "register".
++ * Returns 0 on success, non-0 otherwise.
++ * Assumes vcpu_load() was already called.
++ */
++static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	struct kvm_msr_entry *msr;
++	int ret = 0;
++
++	switch (msr_index) {
++#ifdef CONFIG_X86_64
++	case MSR_EFER:
++		ret = kvm_set_msr_common(vcpu, msr_index, data);
++		if (vmx->host_state.loaded) {
++			reload_host_efer(vmx);
++			load_transition_efer(vmx);
++		}
++		break;
++	case MSR_FS_BASE:
++		vmcs_writel(GUEST_FS_BASE, data);
++		break;
++	case MSR_GS_BASE:
++		vmcs_writel(GUEST_GS_BASE, data);
++		break;
++#endif
++	case MSR_IA32_SYSENTER_CS:
++		vmcs_write32(GUEST_SYSENTER_CS, data);
++		break;
++	case MSR_IA32_SYSENTER_EIP:
++		vmcs_writel(GUEST_SYSENTER_EIP, data);
++		break;
++	case MSR_IA32_SYSENTER_ESP:
++		vmcs_writel(GUEST_SYSENTER_ESP, data);
++		break;
++	case MSR_IA32_TIME_STAMP_COUNTER:
++		guest_write_tsc(data);
++		break;
++	default:
++		msr = find_msr_entry(vmx, msr_index);
++		if (msr) {
++			msr->data = data;
++			if (vmx->host_state.loaded)
++				load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
++			break;
++		}
++		ret = kvm_set_msr_common(vcpu, msr_index, data);
++	}
++
++	return ret;
++}
++
++/*
++ * Sync the rsp and rip registers into the vcpu structure.  This allows
++ * registers to be accessed by indexing vcpu->arch.regs.
++ */
++static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
++{
++	vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
++	vcpu->arch.rip = vmcs_readl(GUEST_RIP);
++}
++
++/*
++ * Syncs rsp and rip back into the vmcs.  Should be called after possible
++ * modification.
++ */
++static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
++{
++	vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
++	vmcs_writel(GUEST_RIP, vcpu->arch.rip);
++}
++
++static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
++{
++	unsigned long dr7 = 0x400;
++	int old_singlestep;
++
++	old_singlestep = vcpu->guest_debug.singlestep;
++
++	vcpu->guest_debug.enabled = dbg->enabled;
++	if (vcpu->guest_debug.enabled) {
++		int i;
++
++		dr7 |= 0x200;  /* exact */
++		for (i = 0; i < 4; ++i) {
++			if (!dbg->breakpoints[i].enabled)
++				continue;
++			vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
++			dr7 |= 2 << (i*2);    /* global enable */
++			dr7 |= 0 << (i*4+16); /* execution breakpoint */
++		}
++
++		vcpu->guest_debug.singlestep = dbg->singlestep;
++	} else
++		vcpu->guest_debug.singlestep = 0;
++
++	if (old_singlestep && !vcpu->guest_debug.singlestep) {
++		unsigned long flags;
++
++		flags = vmcs_readl(GUEST_RFLAGS);
++		flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
++		vmcs_writel(GUEST_RFLAGS, flags);
++	}
++
++	update_exception_bitmap(vcpu);
++	vmcs_writel(GUEST_DR7, dr7);
++
++	return 0;
++}
++
++static int vmx_get_irq(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	u32 idtv_info_field;
++
++	idtv_info_field = vmx->idt_vectoring_info;
++	if (idtv_info_field & INTR_INFO_VALID_MASK) {
++		if (is_external_interrupt(idtv_info_field))
++			return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
++		else
++			printk(KERN_DEBUG "pending exception: not handled yet\n");
++	}
++	return -1;
++}
 +
-+DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
-+DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
-+DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
-+DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
-+DEF_NATIVE(pv_cpu_ops, iret, "iret");
-+DEF_NATIVE(pv_cpu_ops, irq_enable_syscall_ret, "sti; sysexit");
-+DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
-+DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
-+DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
-+DEF_NATIVE(pv_cpu_ops, clts, "clts");
-+DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
++static __init int cpu_has_kvm_support(void)
++{
++	unsigned long ecx = cpuid_ecx(1);
++	return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
++}
 +
-+unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
-+		      unsigned long addr, unsigned len)
++static __init int vmx_disabled_by_bios(void)
 +{
-+	const unsigned char *start, *end;
-+	unsigned ret;
++	u64 msr;
 +
-+#define PATCH_SITE(ops, x)					\
-+		case PARAVIRT_PATCH(ops.x):			\
-+			start = start_##ops##_##x;		\
-+			end = end_##ops##_##x;			\
-+			goto patch_site
-+	switch(type) {
-+		PATCH_SITE(pv_irq_ops, irq_disable);
-+		PATCH_SITE(pv_irq_ops, irq_enable);
-+		PATCH_SITE(pv_irq_ops, restore_fl);
-+		PATCH_SITE(pv_irq_ops, save_fl);
-+		PATCH_SITE(pv_cpu_ops, iret);
-+		PATCH_SITE(pv_cpu_ops, irq_enable_syscall_ret);
-+		PATCH_SITE(pv_mmu_ops, read_cr2);
-+		PATCH_SITE(pv_mmu_ops, read_cr3);
-+		PATCH_SITE(pv_mmu_ops, write_cr3);
-+		PATCH_SITE(pv_cpu_ops, clts);
-+		PATCH_SITE(pv_cpu_ops, read_tsc);
++	rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
++	return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
++		       MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
++	    == MSR_IA32_FEATURE_CONTROL_LOCKED;
++	/* locked but not enabled */
++}
 +
-+	patch_site:
-+		ret = paravirt_patch_insns(ibuf, len, start, end);
-+		break;
++static void hardware_enable(void *garbage)
++{
++	int cpu = raw_smp_processor_id();
++	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
++	u64 old;
 +
-+	default:
-+		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
-+		break;
++	rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
++	if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
++		    MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
++	    != (MSR_IA32_FEATURE_CONTROL_LOCKED |
++		MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
++		/* enable and lock */
++		wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
++		       MSR_IA32_FEATURE_CONTROL_LOCKED |
++		       MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
++	write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
++	asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
++		      : "memory", "cc");
++}
++
++static void hardware_disable(void *garbage)
++{
++	asm volatile (ASM_VMX_VMXOFF : : : "cc");
++}
++
++static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
++				      u32 msr, u32 *result)
++{
++	u32 vmx_msr_low, vmx_msr_high;
++	u32 ctl = ctl_min | ctl_opt;
++
++	rdmsr(msr, vmx_msr_low, vmx_msr_high);
++
++	ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
++	ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
++
++	/* Ensure minimum (required) set of control bits are supported. */
++	if (ctl_min & ~ctl)
++		return -EIO;
++
++	*result = ctl;
++	return 0;
++}
++
++static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
++{
++	u32 vmx_msr_low, vmx_msr_high;
++	u32 min, opt;
++	u32 _pin_based_exec_control = 0;
++	u32 _cpu_based_exec_control = 0;
++	u32 _cpu_based_2nd_exec_control = 0;
++	u32 _vmexit_control = 0;
++	u32 _vmentry_control = 0;
++
++	min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
++	opt = 0;
++	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
++				&_pin_based_exec_control) < 0)
++		return -EIO;
++
++	min = CPU_BASED_HLT_EXITING |
++#ifdef CONFIG_X86_64
++	      CPU_BASED_CR8_LOAD_EXITING |
++	      CPU_BASED_CR8_STORE_EXITING |
++#endif
++	      CPU_BASED_USE_IO_BITMAPS |
++	      CPU_BASED_MOV_DR_EXITING |
++	      CPU_BASED_USE_TSC_OFFSETING;
++	opt = CPU_BASED_TPR_SHADOW |
++	      CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
++	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
++				&_cpu_based_exec_control) < 0)
++		return -EIO;
++#ifdef CONFIG_X86_64
++	if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
++		_cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
++					   ~CPU_BASED_CR8_STORE_EXITING;
++#endif
++	if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
++		min = 0;
++		opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
++			SECONDARY_EXEC_WBINVD_EXITING;
++		if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
++					&_cpu_based_2nd_exec_control) < 0)
++			return -EIO;
 +	}
-+#undef PATCH_SITE
-+	return ret;
++#ifndef CONFIG_X86_64
++	if (!(_cpu_based_2nd_exec_control &
++				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
++		_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
++#endif
++
++	min = 0;
++#ifdef CONFIG_X86_64
++	min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
++#endif
++	opt = 0;
++	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
++				&_vmexit_control) < 0)
++		return -EIO;
++
++	min = opt = 0;
++	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
++				&_vmentry_control) < 0)
++		return -EIO;
++
++	rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
++
++	/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
++	if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
++		return -EIO;
++
++#ifdef CONFIG_X86_64
++	/* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
++	if (vmx_msr_high & (1u<<16))
++		return -EIO;
++#endif
++
++	/* Require Write-Back (WB) memory type for VMCS accesses. */
++	if (((vmx_msr_high >> 18) & 15) != 6)
++		return -EIO;
++
++	vmcs_conf->size = vmx_msr_high & 0x1fff;
++	vmcs_conf->order = get_order(vmcs_config.size);
++	vmcs_conf->revision_id = vmx_msr_low;
++
++	vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
++	vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
++	vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
++	vmcs_conf->vmexit_ctrl         = _vmexit_control;
++	vmcs_conf->vmentry_ctrl        = _vmentry_control;
++
++	return 0;
 +}
-diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
-new file mode 100644
-index 0000000..7d904e1
---- /dev/null
-+++ b/arch/x86/kernel/paravirt_patch_64.c
-@@ -0,0 +1,57 @@
-+#include <asm/paravirt.h>
-+#include <asm/asm-offsets.h>
-+#include <linux/stringify.h>
 +
-+DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
-+DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
-+DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
-+DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-+DEF_NATIVE(pv_cpu_ops, iret, "iretq");
-+DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
-+DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
-+DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-+DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
-+DEF_NATIVE(pv_cpu_ops, clts, "clts");
-+DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
++static struct vmcs *alloc_vmcs_cpu(int cpu)
++{
++	int node = cpu_to_node(cpu);
++	struct page *pages;
++	struct vmcs *vmcs;
 +
-+/* the three commands give us more control to how to return from a syscall */
-+DEF_NATIVE(pv_cpu_ops, irq_enable_syscall_ret, "movq %gs:" __stringify(pda_oldrsp) ", %rsp; swapgs; sysretq;");
-+DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
++	pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
++	if (!pages)
++		return NULL;
++	vmcs = page_address(pages);
++	memset(vmcs, 0, vmcs_config.size);
++	vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
++	return vmcs;
++}
 +
-+unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
-+		      unsigned long addr, unsigned len)
++static struct vmcs *alloc_vmcs(void)
 +{
-+	const unsigned char *start, *end;
-+	unsigned ret;
++	return alloc_vmcs_cpu(raw_smp_processor_id());
++}
 +
-+#define PATCH_SITE(ops, x)					\
-+		case PARAVIRT_PATCH(ops.x):			\
-+			start = start_##ops##_##x;		\
-+			end = end_##ops##_##x;			\
-+			goto patch_site
-+	switch(type) {
-+		PATCH_SITE(pv_irq_ops, restore_fl);
-+		PATCH_SITE(pv_irq_ops, save_fl);
-+		PATCH_SITE(pv_irq_ops, irq_enable);
-+		PATCH_SITE(pv_irq_ops, irq_disable);
-+		PATCH_SITE(pv_cpu_ops, iret);
-+		PATCH_SITE(pv_cpu_ops, irq_enable_syscall_ret);
-+		PATCH_SITE(pv_cpu_ops, swapgs);
-+		PATCH_SITE(pv_mmu_ops, read_cr2);
-+		PATCH_SITE(pv_mmu_ops, read_cr3);
-+		PATCH_SITE(pv_mmu_ops, write_cr3);
-+		PATCH_SITE(pv_cpu_ops, clts);
-+		PATCH_SITE(pv_mmu_ops, flush_tlb_single);
-+		PATCH_SITE(pv_cpu_ops, wbinvd);
++static void free_vmcs(struct vmcs *vmcs)
++{
++	free_pages((unsigned long)vmcs, vmcs_config.order);
++}
 +
-+	patch_site:
-+		ret = paravirt_patch_insns(ibuf, len, start, end);
-+		break;
++static void free_kvm_area(void)
++{
++	int cpu;
 +
-+	default:
-+		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
-+		break;
++	for_each_online_cpu(cpu)
++		free_vmcs(per_cpu(vmxarea, cpu));
++}
++
++static __init int alloc_kvm_area(void)
++{
++	int cpu;
++
++	for_each_online_cpu(cpu) {
++		struct vmcs *vmcs;
++
++		vmcs = alloc_vmcs_cpu(cpu);
++		if (!vmcs) {
++			free_kvm_area();
++			return -ENOMEM;
++		}
++
++		per_cpu(vmxarea, cpu) = vmcs;
 +	}
-+#undef PATCH_SITE
++	return 0;
++}
++
++static __init int hardware_setup(void)
++{
++	if (setup_vmcs_config(&vmcs_config) < 0)
++		return -EIO;
++	return alloc_kvm_area();
++}
++
++static __exit void hardware_unsetup(void)
++{
++	free_kvm_area();
++}
++
++static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
++{
++	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
++
++	if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
++		vmcs_write16(sf->selector, save->selector);
++		vmcs_writel(sf->base, save->base);
++		vmcs_write32(sf->limit, save->limit);
++		vmcs_write32(sf->ar_bytes, save->ar);
++	} else {
++		u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
++			<< AR_DPL_SHIFT;
++		vmcs_write32(sf->ar_bytes, 0x93 | dpl);
++	}
++}
++
++static void enter_pmode(struct kvm_vcpu *vcpu)
++{
++	unsigned long flags;
++
++	vcpu->arch.rmode.active = 0;
++
++	vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
++	vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
++	vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
++
++	flags = vmcs_readl(GUEST_RFLAGS);
++	flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
++	flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
++	vmcs_writel(GUEST_RFLAGS, flags);
++
++	vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
++			(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
++
++	update_exception_bitmap(vcpu);
++
++	fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
++	fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
++	fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
++	fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
++
++	vmcs_write16(GUEST_SS_SELECTOR, 0);
++	vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
++
++	vmcs_write16(GUEST_CS_SELECTOR,
++		     vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
++	vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
++}
++
++static gva_t rmode_tss_base(struct kvm *kvm)
++{
++	if (!kvm->arch.tss_addr) {
++		gfn_t base_gfn = kvm->memslots[0].base_gfn +
++				 kvm->memslots[0].npages - 3;
++		return base_gfn << PAGE_SHIFT;
++	}
++	return kvm->arch.tss_addr;
++}
++
++static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
++{
++	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
++
++	save->selector = vmcs_read16(sf->selector);
++	save->base = vmcs_readl(sf->base);
++	save->limit = vmcs_read32(sf->limit);
++	save->ar = vmcs_read32(sf->ar_bytes);
++	vmcs_write16(sf->selector, save->base >> 4);
++	vmcs_write32(sf->base, save->base & 0xfffff);
++	vmcs_write32(sf->limit, 0xffff);
++	vmcs_write32(sf->ar_bytes, 0xf3);
++}
++
++static void enter_rmode(struct kvm_vcpu *vcpu)
++{
++	unsigned long flags;
++
++	vcpu->arch.rmode.active = 1;
++
++	vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
++	vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
++
++	vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
++	vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
++
++	vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
++	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
++
++	flags = vmcs_readl(GUEST_RFLAGS);
++	vcpu->arch.rmode.save_iopl
++		= (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
++
++	flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
++
++	vmcs_writel(GUEST_RFLAGS, flags);
++	vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
++	update_exception_bitmap(vcpu);
++
++	vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
++	vmcs_write32(GUEST_SS_LIMIT, 0xffff);
++	vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
++
++	vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
++	vmcs_write32(GUEST_CS_LIMIT, 0xffff);
++	if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
++		vmcs_writel(GUEST_CS_BASE, 0xf0000);
++	vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
++
++	fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
++	fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
++	fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
++	fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
++
++	kvm_mmu_reset_context(vcpu);
++	init_rmode_tss(vcpu->kvm);
++}
++
++#ifdef CONFIG_X86_64
++
++static void enter_lmode(struct kvm_vcpu *vcpu)
++{
++	u32 guest_tr_ar;
++
++	guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
++	if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
++		printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
++		       __FUNCTION__);
++		vmcs_write32(GUEST_TR_AR_BYTES,
++			     (guest_tr_ar & ~AR_TYPE_MASK)
++			     | AR_TYPE_BUSY_64_TSS);
++	}
++
++	vcpu->arch.shadow_efer |= EFER_LMA;
++
++	find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
++	vmcs_write32(VM_ENTRY_CONTROLS,
++		     vmcs_read32(VM_ENTRY_CONTROLS)
++		     | VM_ENTRY_IA32E_MODE);
++}
++
++static void exit_lmode(struct kvm_vcpu *vcpu)
++{
++	vcpu->arch.shadow_efer &= ~EFER_LMA;
++
++	vmcs_write32(VM_ENTRY_CONTROLS,
++		     vmcs_read32(VM_ENTRY_CONTROLS)
++		     & ~VM_ENTRY_IA32E_MODE);
++}
++
++#endif
++
++static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
++{
++	vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
++	vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
++}
++
++static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
++{
++	vmx_fpu_deactivate(vcpu);
++
++	if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
++		enter_pmode(vcpu);
++
++	if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
++		enter_rmode(vcpu);
++
++#ifdef CONFIG_X86_64
++	if (vcpu->arch.shadow_efer & EFER_LME) {
++		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
++			enter_lmode(vcpu);
++		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
++			exit_lmode(vcpu);
++	}
++#endif
++
++	vmcs_writel(CR0_READ_SHADOW, cr0);
++	vmcs_writel(GUEST_CR0,
++		    (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
++	vcpu->arch.cr0 = cr0;
++
++	if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
++		vmx_fpu_activate(vcpu);
++}
++
++static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
++{
++	vmcs_writel(GUEST_CR3, cr3);
++	if (vcpu->arch.cr0 & X86_CR0_PE)
++		vmx_fpu_deactivate(vcpu);
++}
++
++static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
++{
++	vmcs_writel(CR4_READ_SHADOW, cr4);
++	vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ?
++		    KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
++	vcpu->arch.cr4 = cr4;
++}
++
++#ifdef CONFIG_X86_64
++
++static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
++
++	vcpu->arch.shadow_efer = efer;
++	if (efer & EFER_LMA) {
++		vmcs_write32(VM_ENTRY_CONTROLS,
++				     vmcs_read32(VM_ENTRY_CONTROLS) |
++				     VM_ENTRY_IA32E_MODE);
++		msr->data = efer;
++
++	} else {
++		vmcs_write32(VM_ENTRY_CONTROLS,
++				     vmcs_read32(VM_ENTRY_CONTROLS) &
++				     ~VM_ENTRY_IA32E_MODE);
++
++		msr->data = efer & ~EFER_LME;
++	}
++	setup_msrs(vmx);
++}
++
++#endif
++
++static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
++{
++	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
++
++	return vmcs_readl(sf->base);
++}
++
++static void vmx_get_segment(struct kvm_vcpu *vcpu,
++			    struct kvm_segment *var, int seg)
++{
++	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
++	u32 ar;
++
++	var->base = vmcs_readl(sf->base);
++	var->limit = vmcs_read32(sf->limit);
++	var->selector = vmcs_read16(sf->selector);
++	ar = vmcs_read32(sf->ar_bytes);
++	if (ar & AR_UNUSABLE_MASK)
++		ar = 0;
++	var->type = ar & 15;
++	var->s = (ar >> 4) & 1;
++	var->dpl = (ar >> 5) & 3;
++	var->present = (ar >> 7) & 1;
++	var->avl = (ar >> 12) & 1;
++	var->l = (ar >> 13) & 1;
++	var->db = (ar >> 14) & 1;
++	var->g = (ar >> 15) & 1;
++	var->unusable = (ar >> 16) & 1;
++}
++
++static u32 vmx_segment_access_rights(struct kvm_segment *var)
++{
++	u32 ar;
++
++	if (var->unusable)
++		ar = 1 << 16;
++	else {
++		ar = var->type & 15;
++		ar |= (var->s & 1) << 4;
++		ar |= (var->dpl & 3) << 5;
++		ar |= (var->present & 1) << 7;
++		ar |= (var->avl & 1) << 12;
++		ar |= (var->l & 1) << 13;
++		ar |= (var->db & 1) << 14;
++		ar |= (var->g & 1) << 15;
++	}
++	if (ar == 0) /* a 0 value means unusable */
++		ar = AR_UNUSABLE_MASK;
++
++	return ar;
++}
++
++static void vmx_set_segment(struct kvm_vcpu *vcpu,
++			    struct kvm_segment *var, int seg)
++{
++	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
++	u32 ar;
++
++	if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
++		vcpu->arch.rmode.tr.selector = var->selector;
++		vcpu->arch.rmode.tr.base = var->base;
++		vcpu->arch.rmode.tr.limit = var->limit;
++		vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
++		return;
++	}
++	vmcs_writel(sf->base, var->base);
++	vmcs_write32(sf->limit, var->limit);
++	vmcs_write16(sf->selector, var->selector);
++	if (vcpu->arch.rmode.active && var->s) {
++		/*
++		 * Hack real-mode segments into vm86 compatibility.
++		 */
++		if (var->base == 0xffff0000 && var->selector == 0xf000)
++			vmcs_writel(sf->base, 0xf0000);
++		ar = 0xf3;
++	} else
++		ar = vmx_segment_access_rights(var);
++	vmcs_write32(sf->ar_bytes, ar);
++}
++
++static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
++{
++	u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
++
++	*db = (ar >> 14) & 1;
++	*l = (ar >> 13) & 1;
++}
++
++static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
++{
++	dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
++	dt->base = vmcs_readl(GUEST_IDTR_BASE);
++}
++
++static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
++{
++	vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
++	vmcs_writel(GUEST_IDTR_BASE, dt->base);
++}
++
++static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
++{
++	dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
++	dt->base = vmcs_readl(GUEST_GDTR_BASE);
++}
++
++static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
++{
++	vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
++	vmcs_writel(GUEST_GDTR_BASE, dt->base);
++}
++
++static int init_rmode_tss(struct kvm *kvm)
++{
++	gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
++	u16 data = 0;
++	int ret = 0;
++	int r;
++
++	down_read(&current->mm->mmap_sem);
++	r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
++	if (r < 0)
++		goto out;
++	data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
++	r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
++	if (r < 0)
++		goto out;
++	r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
++	if (r < 0)
++		goto out;
++	r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
++	if (r < 0)
++		goto out;
++	data = ~0;
++	r = kvm_write_guest_page(kvm, fn, &data,
++				 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
++				 sizeof(u8));
++	if (r < 0)
++		goto out;
++
++	ret = 1;
++out:
++	up_read(&current->mm->mmap_sem);
 +	return ret;
 +}
-diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
-index 6bf1f71..21f34db 100644
---- a/arch/x86/kernel/pci-calgary_64.c
-+++ b/arch/x86/kernel/pci-calgary_64.c
-@@ -30,7 +30,6 @@
- #include <linux/spinlock.h>
- #include <linux/string.h>
- #include <linux/dma-mapping.h>
--#include <linux/init.h>
- #include <linux/bitops.h>
- #include <linux/pci_ids.h>
- #include <linux/pci.h>
-@@ -183,7 +182,7 @@ static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
- 
- /* enable this to stress test the chip's TCE cache */
- #ifdef CONFIG_IOMMU_DEBUG
--int debugging __read_mostly = 1;
-+static int debugging = 1;
- 
- static inline unsigned long verify_bit_range(unsigned long* bitmap,
- 	int expected, unsigned long start, unsigned long end)
-@@ -202,7 +201,7 @@ static inline unsigned long verify_bit_range(unsigned long* bitmap,
- 	return ~0UL;
- }
- #else /* debugging is disabled */
--int debugging __read_mostly = 0;
-+static int debugging;
- 
- static inline unsigned long verify_bit_range(unsigned long* bitmap,
- 	int expected, unsigned long start, unsigned long end)
-diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
-index 5552d23..a82473d 100644
---- a/arch/x86/kernel/pci-dma_64.c
-+++ b/arch/x86/kernel/pci-dma_64.c
-@@ -13,7 +13,6 @@
- #include <asm/calgary.h>
- 
- int iommu_merge __read_mostly = 0;
--EXPORT_SYMBOL(iommu_merge);
- 
- dma_addr_t bad_dma_address __read_mostly;
- EXPORT_SYMBOL(bad_dma_address);
-@@ -230,7 +229,7 @@ EXPORT_SYMBOL(dma_set_mask);
-  * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
-  * documentation.
-  */
--__init int iommu_setup(char *p)
-+static __init int iommu_setup(char *p)
- {
- 	iommu_merge = 1;
- 
-diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
-index 06bcba5..4d5cc71 100644
---- a/arch/x86/kernel/pci-gart_64.c
-+++ b/arch/x86/kernel/pci-gart_64.c
-@@ -1,12 +1,12 @@
- /*
-  * Dynamic DMA mapping support for AMD Hammer.
-- * 
-+ *
-  * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
-  * This allows to use PCI devices that only support 32bit addresses on systems
-- * with more than 4GB. 
-+ * with more than 4GB.
-  *
-  * See Documentation/DMA-mapping.txt for the interface specification.
-- * 
-+ *
-  * Copyright 2002 Andi Kleen, SuSE Labs.
-  * Subject to the GNU General Public License v2 only.
-  */
-@@ -37,23 +37,26 @@
- #include <asm/k8.h>
- 
- static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
--static unsigned long iommu_size; 	/* size of remapping area bytes */
-+static unsigned long iommu_size;	/* size of remapping area bytes */
- static unsigned long iommu_pages;	/* .. and in pages */
- 
--static u32 *iommu_gatt_base; 		/* Remapping table */
-+static u32 *iommu_gatt_base;		/* Remapping table */
- 
--/* If this is disabled the IOMMU will use an optimized flushing strategy
--   of only flushing when an mapping is reused. With it true the GART is flushed 
--   for every mapping. Problem is that doing the lazy flush seems to trigger
--   bugs with some popular PCI cards, in particular 3ware (but has been also
--   also seen with Qlogic at least). */
++
++static void seg_setup(int seg)
++{
++	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
++
++	vmcs_write16(sf->selector, 0);
++	vmcs_writel(sf->base, 0);
++	vmcs_write32(sf->limit, 0xffff);
++	vmcs_write32(sf->ar_bytes, 0x93);
++}
++
++static int alloc_apic_access_page(struct kvm *kvm)
++{
++	struct kvm_userspace_memory_region kvm_userspace_mem;
++	int r = 0;
++
++	down_write(&current->mm->mmap_sem);
++	if (kvm->arch.apic_access_page)
++		goto out;
++	kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
++	kvm_userspace_mem.flags = 0;
++	kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
++	kvm_userspace_mem.memory_size = PAGE_SIZE;
++	r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
++	if (r)
++		goto out;
++	kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
++out:
++	up_write(&current->mm->mmap_sem);
++	return r;
++}
++
 +/*
-+ * If this is disabled the IOMMU will use an optimized flushing strategy
-+ * of only flushing when an mapping is reused. With it true the GART is
-+ * flushed for every mapping. Problem is that doing the lazy flush seems
-+ * to trigger bugs with some popular PCI cards, in particular 3ware (but
-+ * has been also also seen with Qlogic at least).
++ * Sets up the vmcs for emulated real mode.
 + */
- int iommu_fullflush = 1;
- 
--/* Allocation bitmap for the remapping area */ 
-+/* Allocation bitmap for the remapping area: */
- static DEFINE_SPINLOCK(iommu_bitmap_lock);
--static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
-+/* Guarded by iommu_bitmap_lock: */
-+static unsigned long *iommu_gart_bitmap;
- 
--static u32 gart_unmapped_entry; 
-+static u32 gart_unmapped_entry;
- 
- #define GPTE_VALID    1
- #define GPTE_COHERENT 2
-@@ -61,10 +64,10 @@ static u32 gart_unmapped_entry;
- 	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
- #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
- 
--#define to_pages(addr,size) \
-+#define to_pages(addr, size) \
- 	(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
- 
--#define EMERGENCY_PAGES 32 /* = 128KB */ 
-+#define EMERGENCY_PAGES 32 /* = 128KB */
- 
- #ifdef CONFIG_AGP
- #define AGPEXTERN extern
-@@ -77,130 +80,152 @@ AGPEXTERN int agp_memory_reserved;
- AGPEXTERN __u32 *agp_gatt_table;
- 
- static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
--static int need_flush; 		/* global flush state. set for each gart wrap */
-+static int need_flush;		/* global flush state. set for each gart wrap */
- 
--static unsigned long alloc_iommu(int size) 
--{ 	
-+static unsigned long alloc_iommu(int size)
++static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 +{
- 	unsigned long offset, flags;
- 
--	spin_lock_irqsave(&iommu_bitmap_lock, flags);	
--	offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
-+	spin_lock_irqsave(&iommu_bitmap_lock, flags);
-+	offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
-+					iommu_pages, size);
- 	if (offset == -1) {
- 		need_flush = 1;
--		offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
-+		offset = find_next_zero_string(iommu_gart_bitmap, 0,
-+						iommu_pages, size);
- 	}
--	if (offset != -1) { 
--		set_bit_string(iommu_gart_bitmap, offset, size); 
--		next_bit = offset+size; 
--		if (next_bit >= iommu_pages) { 
-+	if (offset != -1) {
-+		set_bit_string(iommu_gart_bitmap, offset, size);
-+		next_bit = offset+size;
-+		if (next_bit >= iommu_pages) {
- 			next_bit = 0;
- 			need_flush = 1;
--		} 
--	} 
-+		}
++	u32 host_sysenter_cs;
++	u32 junk;
++	unsigned long a;
++	struct descriptor_table dt;
++	int i;
++	unsigned long kvm_vmx_return;
++	u32 exec_control;
++
++	/* I/O */
++	vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
++	vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
++
++	vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
++
++	/* Control */
++	vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
++		vmcs_config.pin_based_exec_ctrl);
++
++	exec_control = vmcs_config.cpu_based_exec_ctrl;
++	if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
++		exec_control &= ~CPU_BASED_TPR_SHADOW;
++#ifdef CONFIG_X86_64
++		exec_control |= CPU_BASED_CR8_STORE_EXITING |
++				CPU_BASED_CR8_LOAD_EXITING;
++#endif
 +	}
- 	if (iommu_fullflush)
- 		need_flush = 1;
--	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);      
-+	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
++	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
 +
- 	return offset;
--} 
++	if (cpu_has_secondary_exec_ctrls()) {
++		exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
++		if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
++			exec_control &=
++				~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
++		vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
++	}
++
++	vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
++	vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
++	vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
++
++	vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
++	vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
++	vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
++
++	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
++	vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
++	vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
++	vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
++	vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
++	vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
++#ifdef CONFIG_X86_64
++	rdmsrl(MSR_FS_BASE, a);
++	vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
++	rdmsrl(MSR_GS_BASE, a);
++	vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
++#else
++	vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
++	vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
++#endif
++
++	vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
++
++	get_idt(&dt);
++	vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
++
++	asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
++	vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
++	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
++	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
++	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
++
++	rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
++	vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
++	rdmsrl(MSR_IA32_SYSENTER_ESP, a);
++	vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
++	rdmsrl(MSR_IA32_SYSENTER_EIP, a);
++	vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
++
++	for (i = 0; i < NR_VMX_MSR; ++i) {
++		u32 index = vmx_msr_index[i];
++		u32 data_low, data_high;
++		u64 data;
++		int j = vmx->nmsrs;
++
++		if (rdmsr_safe(index, &data_low, &data_high) < 0)
++			continue;
++		if (wrmsr_safe(index, data_low, data_high) < 0)
++			continue;
++		data = data_low | ((u64)data_high << 32);
++		vmx->host_msrs[j].index = index;
++		vmx->host_msrs[j].reserved = 0;
++		vmx->host_msrs[j].data = data;
++		vmx->guest_msrs[j] = vmx->host_msrs[j];
++		++vmx->nmsrs;
++	}
++
++	vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
++
++	/* 22.2.1, 20.8.1 */
++	vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
++
++	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
++	vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
++
++	if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
++		if (alloc_apic_access_page(vmx->vcpu.kvm) != 0)
++			return -ENOMEM;
++
++	return 0;
 +}
- 
- static void free_iommu(unsigned long offset, int size)
--{ 
++
++static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 +{
- 	unsigned long flags;
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	u64 msr;
++	int ret;
 +
- 	spin_lock_irqsave(&iommu_bitmap_lock, flags);
- 	__clear_bit_string(iommu_gart_bitmap, offset, size);
- 	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
--} 
++	if (!init_rmode_tss(vmx->vcpu.kvm)) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	vmx->vcpu.arch.rmode.active = 0;
++
++	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
++	set_cr8(&vmx->vcpu, 0);
++	msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
++	if (vmx->vcpu.vcpu_id == 0)
++		msr |= MSR_IA32_APICBASE_BSP;
++	kvm_set_apic_base(&vmx->vcpu, msr);
++
++	fx_init(&vmx->vcpu);
++
++	/*
++	 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
++	 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
++	 */
++	if (vmx->vcpu.vcpu_id == 0) {
++		vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
++		vmcs_writel(GUEST_CS_BASE, 0x000f0000);
++	} else {
++		vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
++		vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
++	}
++	vmcs_write32(GUEST_CS_LIMIT, 0xffff);
++	vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
++
++	seg_setup(VCPU_SREG_DS);
++	seg_setup(VCPU_SREG_ES);
++	seg_setup(VCPU_SREG_FS);
++	seg_setup(VCPU_SREG_GS);
++	seg_setup(VCPU_SREG_SS);
++
++	vmcs_write16(GUEST_TR_SELECTOR, 0);
++	vmcs_writel(GUEST_TR_BASE, 0);
++	vmcs_write32(GUEST_TR_LIMIT, 0xffff);
++	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
++
++	vmcs_write16(GUEST_LDTR_SELECTOR, 0);
++	vmcs_writel(GUEST_LDTR_BASE, 0);
++	vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
++	vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
++
++	vmcs_write32(GUEST_SYSENTER_CS, 0);
++	vmcs_writel(GUEST_SYSENTER_ESP, 0);
++	vmcs_writel(GUEST_SYSENTER_EIP, 0);
++
++	vmcs_writel(GUEST_RFLAGS, 0x02);
++	if (vmx->vcpu.vcpu_id == 0)
++		vmcs_writel(GUEST_RIP, 0xfff0);
++	else
++		vmcs_writel(GUEST_RIP, 0);
++	vmcs_writel(GUEST_RSP, 0);
++
++	/* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
++	vmcs_writel(GUEST_DR7, 0x400);
++
++	vmcs_writel(GUEST_GDTR_BASE, 0);
++	vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
++
++	vmcs_writel(GUEST_IDTR_BASE, 0);
++	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
++
++	vmcs_write32(GUEST_ACTIVITY_STATE, 0);
++	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
++	vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
++
++	guest_write_tsc(0);
++
++	/* Special registers */
++	vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
++
++	setup_msrs(vmx);
++
++	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
++
++	if (cpu_has_vmx_tpr_shadow()) {
++		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
++		if (vm_need_tpr_shadow(vmx->vcpu.kvm))
++			vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
++				page_to_phys(vmx->vcpu.arch.apic->regs_page));
++		vmcs_write32(TPR_THRESHOLD, 0);
++	}
++
++	if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
++		vmcs_write64(APIC_ACCESS_ADDR,
++			     page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
++
++	vmx->vcpu.arch.cr0 = 0x60000010;
++	vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
++	vmx_set_cr4(&vmx->vcpu, 0);
++#ifdef CONFIG_X86_64
++	vmx_set_efer(&vmx->vcpu, 0);
++#endif
++	vmx_fpu_activate(&vmx->vcpu);
++	update_exception_bitmap(&vmx->vcpu);
++
++	return 0;
++
++out:
++	return ret;
 +}
- 
--/* 
-+/*
-  * Use global flush state to avoid races with multiple flushers.
-  */
- static void flush_gart(void)
--{ 
++
++static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
 +{
- 	unsigned long flags;
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
 +
- 	spin_lock_irqsave(&iommu_bitmap_lock, flags);
- 	if (need_flush) {
- 		k8_flush_garts();
- 		need_flush = 0;
--	} 
++	if (vcpu->arch.rmode.active) {
++		vmx->rmode.irq.pending = true;
++		vmx->rmode.irq.vector = irq;
++		vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
++		vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
++			     irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
++		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
++		vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
++		return;
 +	}
- 	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
--} 
++	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
++			irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 +}
- 
- #ifdef CONFIG_IOMMU_LEAK
- 
--#define SET_LEAK(x) if (iommu_leak_tab) \
--			iommu_leak_tab[x] = __builtin_return_address(0);
--#define CLEAR_LEAK(x) if (iommu_leak_tab) \
--			iommu_leak_tab[x] = NULL;
-+#define SET_LEAK(x)							\
-+	do {								\
-+		if (iommu_leak_tab)					\
-+			iommu_leak_tab[x] = __builtin_return_address(0);\
-+	} while (0)
 +
-+#define CLEAR_LEAK(x)							\
-+	do {								\
-+		if (iommu_leak_tab)					\
-+			iommu_leak_tab[x] = NULL;			\
-+	} while (0)
- 
- /* Debugging aid for drivers that don't free their IOMMU tables */
--static void **iommu_leak_tab; 
-+static void **iommu_leak_tab;
- static int leak_trace;
- static int iommu_leak_pages = 20;
++static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
++{
++	int word_index = __ffs(vcpu->arch.irq_summary);
++	int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
++	int irq = word_index * BITS_PER_LONG + bit_index;
 +
- static void dump_leak(void)
- {
- 	int i;
--	static int dump; 
--	if (dump || !iommu_leak_tab) return;
-+	static int dump;
++	clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
++	if (!vcpu->arch.irq_pending[word_index])
++		clear_bit(word_index, &vcpu->arch.irq_summary);
++	vmx_inject_irq(vcpu, irq);
++}
 +
-+	if (dump || !iommu_leak_tab)
-+		return;
- 	dump = 1;
--	show_stack(NULL,NULL);
--	/* Very crude. dump some from the end of the table too */ 
--	printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages); 
--	for (i = 0; i < iommu_leak_pages; i+=2) {
--		printk("%lu: ", iommu_pages-i);
--		printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
--		printk("%c", (i+1)%2 == 0 ? '\n' : ' '); 
--	} 
--	printk("\n");
-+	show_stack(NULL, NULL);
 +
-+	/* Very crude. dump some from the end of the table too */
-+	printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
-+	       iommu_leak_pages);
-+	for (i = 0; i < iommu_leak_pages; i += 2) {
-+		printk(KERN_DEBUG "%lu: ", iommu_pages-i);
-+		printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
-+		printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
++static void do_interrupt_requests(struct kvm_vcpu *vcpu,
++				       struct kvm_run *kvm_run)
++{
++	u32 cpu_based_vm_exec_control;
++
++	vcpu->arch.interrupt_window_open =
++		((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
++		 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
++
++	if (vcpu->arch.interrupt_window_open &&
++	    vcpu->arch.irq_summary &&
++	    !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
++		/*
++		 * If interrupts enabled, and not blocked by sti or mov ss. Good.
++		 */
++		kvm_do_inject_irq(vcpu);
++
++	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
++	if (!vcpu->arch.interrupt_window_open &&
++	    (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
++		/*
++		 * Interrupts blocked.  Wait for unblock.
++		 */
++		cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
++	else
++		cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
++	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
++}
++
++static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
++{
++	int ret;
++	struct kvm_userspace_memory_region tss_mem = {
++		.slot = 8,
++		.guest_phys_addr = addr,
++		.memory_size = PAGE_SIZE * 3,
++		.flags = 0,
++	};
++
++	ret = kvm_set_memory_region(kvm, &tss_mem, 0);
++	if (ret)
++		return ret;
++	kvm->arch.tss_addr = addr;
++	return 0;
++}
++
++static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
++{
++	struct kvm_guest_debug *dbg = &vcpu->guest_debug;
++
++	set_debugreg(dbg->bp[0], 0);
++	set_debugreg(dbg->bp[1], 1);
++	set_debugreg(dbg->bp[2], 2);
++	set_debugreg(dbg->bp[3], 3);
++
++	if (dbg->singlestep) {
++		unsigned long flags;
++
++		flags = vmcs_readl(GUEST_RFLAGS);
++		flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
++		vmcs_writel(GUEST_RFLAGS, flags);
 +	}
-+	printk(KERN_DEBUG "\n");
- }
- #else
--#define SET_LEAK(x)
--#define CLEAR_LEAK(x)
-+# define SET_LEAK(x)
-+# define CLEAR_LEAK(x)
- #endif
- 
- static void iommu_full(struct device *dev, size_t size, int dir)
- {
--	/* 
++}
++
++static int handle_rmode_exception(struct kvm_vcpu *vcpu,
++				  int vec, u32 err_code)
++{
++	if (!vcpu->arch.rmode.active)
++		return 0;
++
 +	/*
- 	 * Ran out of IOMMU space for this operation. This is very bad.
- 	 * Unfortunately the drivers cannot handle this operation properly.
--	 * Return some non mapped prereserved space in the aperture and 
-+	 * Return some non mapped prereserved space in the aperture and
- 	 * let the Northbridge deal with it. This will result in garbage
- 	 * in the IO operation. When the size exceeds the prereserved space
--	 * memory corruption will occur or random memory will be DMAed 
-+	 * memory corruption will occur or random memory will be DMAed
- 	 * out. Hopefully no network devices use single mappings that big.
--	 */ 
--	
--	printk(KERN_ERR 
--  "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
--	       size, dev->bus_id);
++	 * Instruction with address size override prefix opcode 0x67
++	 * Cause the #SS fault with 0 error code in VM86 mode.
 +	 */
++	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
++		if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
++			return 1;
++	return 0;
++}
 +
-+	printk(KERN_ERR
-+		"PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
-+		size, dev->bus_id);
- 
- 	if (size > PAGE_SIZE*EMERGENCY_PAGES) {
- 		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- 			panic("PCI-DMA: Memory would be corrupted\n");
--		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) 
--			panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
--	} 
--
-+		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+			panic(KERN_ERR
-+				"PCI-DMA: Random memory would be DMAed\n");
++static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	u32 intr_info, error_code;
++	unsigned long cr2, rip;
++	u32 vect_info;
++	enum emulation_result er;
++
++	vect_info = vmx->idt_vectoring_info;
++	intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
++
++	if ((vect_info & VECTORING_INFO_VALID_MASK) &&
++						!is_page_fault(intr_info))
++		printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
++		       "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
++
++	if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
++		int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
++		set_bit(irq, vcpu->arch.irq_pending);
++		set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
 +	}
- #ifdef CONFIG_IOMMU_LEAK
--	dump_leak(); 
-+	dump_leak();
- #endif
--} 
++
++	if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
++		return 1;  /* already handled by vmx_vcpu_run() */
++
++	if (is_no_device(intr_info)) {
++		vmx_fpu_activate(vcpu);
++		return 1;
++	}
++
++	if (is_invalid_opcode(intr_info)) {
++		er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
++		if (er != EMULATE_DONE)
++			kvm_queue_exception(vcpu, UD_VECTOR);
++		return 1;
++	}
++
++	error_code = 0;
++	rip = vmcs_readl(GUEST_RIP);
++	if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
++		error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
++	if (is_page_fault(intr_info)) {
++		cr2 = vmcs_readl(EXIT_QUALIFICATION);
++		return kvm_mmu_page_fault(vcpu, cr2, error_code);
++	}
++
++	if (vcpu->arch.rmode.active &&
++	    handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
++								error_code)) {
++		if (vcpu->arch.halt_request) {
++			vcpu->arch.halt_request = 0;
++			return kvm_emulate_halt(vcpu);
++		}
++		return 1;
++	}
++
++	if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
++	    (INTR_TYPE_EXCEPTION | 1)) {
++		kvm_run->exit_reason = KVM_EXIT_DEBUG;
++		return 0;
++	}
++	kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
++	kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
++	kvm_run->ex.error_code = error_code;
++	return 0;
 +}
- 
--static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
--{ 
-+static inline int
-+need_iommu(struct device *dev, unsigned long addr, size_t size)
-+{
- 	u64 mask = *dev->dma_mask;
- 	int high = addr + size > mask;
- 	int mmu = high;
--	if (force_iommu) 
--		mmu = 1; 
--	return mmu; 
 +
-+	if (force_iommu)
-+		mmu = 1;
++static int handle_external_interrupt(struct kvm_vcpu *vcpu,
++				     struct kvm_run *kvm_run)
++{
++	++vcpu->stat.irq_exits;
++	return 1;
++}
 +
-+	return mmu;
- }
- 
--static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
--{ 
-+static inline int
-+nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
++static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 +{
- 	u64 mask = *dev->dma_mask;
- 	int high = addr + size > mask;
- 	int mmu = high;
--	return mmu; 
++	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
++	return 0;
++}
 +
-+	return mmu;
- }
- 
- /* Map a single continuous physical area into the IOMMU.
-@@ -208,13 +233,14 @@ static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t
-  */
- static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
- 				size_t size, int dir)
--{ 
++static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 +{
- 	unsigned long npages = to_pages(phys_mem, size);
- 	unsigned long iommu_page = alloc_iommu(npages);
- 	int i;
++	unsigned long exit_qualification;
++	int size, down, in, string, rep;
++	unsigned port;
 +
- 	if (iommu_page == -1) {
- 		if (!nonforced_iommu(dev, phys_mem, size))
--			return phys_mem; 
-+			return phys_mem;
- 		if (panic_on_overflow)
- 			panic("dma_map_area overflow %lu bytes\n", size);
- 		iommu_full(dev, size, dir);
-@@ -229,35 +255,39 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
- 	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
- }
- 
--static dma_addr_t gart_map_simple(struct device *dev, char *buf,
--				 size_t size, int dir)
-+static dma_addr_t
-+gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
- {
- 	dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
++	++vcpu->stat.io_exits;
++	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
++	string = (exit_qualification & 16) != 0;
 +
- 	flush_gart();
++	if (string) {
++		if (emulate_instruction(vcpu,
++					kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
++			return 0;
++		return 1;
++	}
 +
- 	return map;
- }
- 
- /* Map a single area into the IOMMU */
--static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
-+static dma_addr_t
-+gart_map_single(struct device *dev, void *addr, size_t size, int dir)
- {
- 	unsigned long phys_mem, bus;
- 
- 	if (!dev)
- 		dev = &fallback_dev;
- 
--	phys_mem = virt_to_phys(addr); 
-+	phys_mem = virt_to_phys(addr);
- 	if (!need_iommu(dev, phys_mem, size))
--		return phys_mem; 
-+		return phys_mem;
- 
- 	bus = gart_map_simple(dev, addr, size, dir);
--	return bus; 
++	size = (exit_qualification & 7) + 1;
++	in = (exit_qualification & 8) != 0;
++	down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
++	rep = (exit_qualification & 32) != 0;
++	port = exit_qualification >> 16;
 +
-+	return bus;
- }
- 
- /*
-  * Free a DMA mapping.
-  */
- static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
--		      size_t size, int direction)
-+			      size_t size, int direction)
- {
- 	unsigned long iommu_page;
- 	int npages;
-@@ -266,6 +296,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
- 	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
- 	    dma_addr >= iommu_bus_base + iommu_size)
- 		return;
++	return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
++}
 +
- 	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
- 	npages = to_pages(dma_addr, size);
- 	for (i = 0; i < npages; i++) {
-@@ -278,7 +309,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
- /*
-  * Wrapper for pci_unmap_single working with scatterlists.
-  */
--static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
 +static void
-+gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
- {
- 	struct scatterlist *s;
- 	int i;
-@@ -303,12 +335,13 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
- 
- 	for_each_sg(sg, s, nents, i) {
- 		unsigned long addr = sg_phys(s);
--		if (nonforced_iommu(dev, addr, s->length)) { 
++vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
++{
++	/*
++	 * Patch in the VMCALL instruction:
++	 */
++	hypercall[0] = 0x0f;
++	hypercall[1] = 0x01;
++	hypercall[2] = 0xc1;
++}
 +
-+		if (nonforced_iommu(dev, addr, s->length)) {
- 			addr = dma_map_area(dev, addr, s->length, dir);
--			if (addr == bad_dma_address) { 
--				if (i > 0) 
-+			if (addr == bad_dma_address) {
-+				if (i > 0)
- 					gart_unmap_sg(dev, sg, i, dir);
--				nents = 0; 
-+				nents = 0;
- 				sg[0].dma_length = 0;
- 				break;
- 			}
-@@ -317,15 +350,16 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
- 		s->dma_length = s->length;
- 	}
- 	flush_gart();
++static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	unsigned long exit_qualification;
++	int cr;
++	int reg;
 +
- 	return nents;
- }
- 
- /* Map multiple scatterlist entries continuous into the first. */
- static int __dma_map_cont(struct scatterlist *start, int nelems,
--		      struct scatterlist *sout, unsigned long pages)
-+			  struct scatterlist *sout, unsigned long pages)
- {
- 	unsigned long iommu_start = alloc_iommu(pages);
--	unsigned long iommu_page = iommu_start; 
-+	unsigned long iommu_page = iommu_start;
- 	struct scatterlist *s;
- 	int i;
- 
-@@ -335,32 +369,33 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
- 	for_each_sg(start, s, nelems, i) {
- 		unsigned long pages, addr;
- 		unsigned long phys_addr = s->dma_address;
--		
++	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
++	cr = exit_qualification & 15;
++	reg = (exit_qualification >> 8) & 15;
++	switch ((exit_qualification >> 4) & 3) {
++	case 0: /* mov to cr */
++		switch (cr) {
++		case 0:
++			vcpu_load_rsp_rip(vcpu);
++			set_cr0(vcpu, vcpu->arch.regs[reg]);
++			skip_emulated_instruction(vcpu);
++			return 1;
++		case 3:
++			vcpu_load_rsp_rip(vcpu);
++			set_cr3(vcpu, vcpu->arch.regs[reg]);
++			skip_emulated_instruction(vcpu);
++			return 1;
++		case 4:
++			vcpu_load_rsp_rip(vcpu);
++			set_cr4(vcpu, vcpu->arch.regs[reg]);
++			skip_emulated_instruction(vcpu);
++			return 1;
++		case 8:
++			vcpu_load_rsp_rip(vcpu);
++			set_cr8(vcpu, vcpu->arch.regs[reg]);
++			skip_emulated_instruction(vcpu);
++			if (irqchip_in_kernel(vcpu->kvm))
++				return 1;
++			kvm_run->exit_reason = KVM_EXIT_SET_TPR;
++			return 0;
++		};
++		break;
++	case 2: /* clts */
++		vcpu_load_rsp_rip(vcpu);
++		vmx_fpu_deactivate(vcpu);
++		vcpu->arch.cr0 &= ~X86_CR0_TS;
++		vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
++		vmx_fpu_activate(vcpu);
++		skip_emulated_instruction(vcpu);
++		return 1;
++	case 1: /*mov from cr*/
++		switch (cr) {
++		case 3:
++			vcpu_load_rsp_rip(vcpu);
++			vcpu->arch.regs[reg] = vcpu->arch.cr3;
++			vcpu_put_rsp_rip(vcpu);
++			skip_emulated_instruction(vcpu);
++			return 1;
++		case 8:
++			vcpu_load_rsp_rip(vcpu);
++			vcpu->arch.regs[reg] = get_cr8(vcpu);
++			vcpu_put_rsp_rip(vcpu);
++			skip_emulated_instruction(vcpu);
++			return 1;
++		}
++		break;
++	case 3: /* lmsw */
++		lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
 +
- 		BUG_ON(s != start && s->offset);
- 		if (s == start) {
- 			sout->dma_address = iommu_bus_base;
- 			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
- 			sout->dma_length = s->length;
--		} else { 
--			sout->dma_length += s->length; 
-+		} else {
-+			sout->dma_length += s->length;
- 		}
- 
- 		addr = phys_addr;
--		pages = to_pages(s->offset, s->length); 
--		while (pages--) { 
--			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 
-+		pages = to_pages(s->offset, s->length);
-+		while (pages--) {
-+			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
- 			SET_LEAK(iommu_page);
- 			addr += PAGE_SIZE;
- 			iommu_page++;
- 		}
--	} 
--	BUG_ON(iommu_page - iommu_start != pages);	
++		skip_emulated_instruction(vcpu);
++		return 1;
++	default:
++		break;
 +	}
-+	BUG_ON(iommu_page - iommu_start != pages);
++	kvm_run->exit_reason = 0;
++	pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
++	       (int)(exit_qualification >> 4) & 3, cr);
++	return 0;
++}
 +
- 	return 0;
- }
- 
--static inline int dma_map_cont(struct scatterlist *start, int nelems,
--		      struct scatterlist *sout,
--		      unsigned long pages, int need)
-+static inline int
-+dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
-+	     unsigned long pages, int need)
- {
- 	if (!need) {
- 		BUG_ON(nelems != 1);
-@@ -370,22 +405,19 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems,
- 	}
- 	return __dma_map_cont(start, nelems, sout, pages);
- }
--		
++static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	unsigned long exit_qualification;
++	unsigned long val;
++	int dr, reg;
 +
- /*
-  * DMA map all entries in a scatterlist.
-- * Merge chunks that have page aligned sizes into a continuous mapping. 
-+ * Merge chunks that have page aligned sizes into a continuous mapping.
-  */
--static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
--			int dir)
-+static int
-+gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
- {
--	int i;
--	int out;
--	int start;
--	unsigned long pages = 0;
--	int need = 0, nextneed;
- 	struct scatterlist *s, *ps, *start_sg, *sgmap;
-+	int need = 0, nextneed, i, out, start;
-+	unsigned long pages = 0;
- 
--	if (nents == 0) 
-+	if (nents == 0)
- 		return 0;
- 
- 	if (!dev)
-@@ -397,15 +429,19 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- 	ps = NULL; /* shut up gcc */
- 	for_each_sg(sg, s, nents, i) {
- 		dma_addr_t addr = sg_phys(s);
++	/*
++	 * FIXME: this code assumes the host is debugging the guest.
++	 *        need to deal with guest debugging itself too.
++	 */
++	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
++	dr = exit_qualification & 7;
++	reg = (exit_qualification >> 8) & 15;
++	vcpu_load_rsp_rip(vcpu);
++	if (exit_qualification & 16) {
++		/* mov from dr */
++		switch (dr) {
++		case 6:
++			val = 0xffff0ff0;
++			break;
++		case 7:
++			val = 0x400;
++			break;
++		default:
++			val = 0;
++		}
++		vcpu->arch.regs[reg] = val;
++	} else {
++		/* mov to dr */
++	}
++	vcpu_put_rsp_rip(vcpu);
++	skip_emulated_instruction(vcpu);
++	return 1;
++}
 +
- 		s->dma_address = addr;
--		BUG_ON(s->length == 0); 
-+		BUG_ON(s->length == 0);
- 
--		nextneed = need_iommu(dev, addr, s->length); 
-+		nextneed = need_iommu(dev, addr, s->length);
- 
- 		/* Handle the previous not yet processed entries */
- 		if (i > start) {
--			/* Can only merge when the last chunk ends on a page 
--			   boundary and the new one doesn't have an offset. */
-+			/*
-+			 * Can only merge when the last chunk ends on a
-+			 * page boundary and the new one doesn't have an
-+			 * offset.
-+			 */
- 			if (!iommu_merge || !nextneed || !need || s->offset ||
- 			    (ps->offset + ps->length) % PAGE_SIZE) {
- 				if (dma_map_cont(start_sg, i - start, sgmap,
-@@ -436,6 +472,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- error:
- 	flush_gart();
- 	gart_unmap_sg(dev, sg, out, dir);
++static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	kvm_emulate_cpuid(vcpu);
++	return 1;
++}
 +
- 	/* When it was forced or merged try again in a dumb way */
- 	if (force_iommu || iommu_merge) {
- 		out = dma_map_sg_nonforce(dev, sg, nents, dir);
-@@ -444,64 +481,68 @@ error:
- 	}
- 	if (panic_on_overflow)
- 		panic("dma_map_sg: overflow on %lu pages\n", pages);
++static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
++	u64 data;
 +
- 	iommu_full(dev, pages << PAGE_SHIFT, dir);
- 	for_each_sg(sg, s, nents, i)
- 		s->dma_address = bad_dma_address;
- 	return 0;
--} 
++	if (vmx_get_msr(vcpu, ecx, &data)) {
++		kvm_inject_gp(vcpu, 0);
++		return 1;
++	}
++
++	/* FIXME: handling of bits 32:63 of rax, rdx */
++	vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
++	vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
++	skip_emulated_instruction(vcpu);
++	return 1;
 +}
- 
- static int no_agp;
- 
- static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
--{ 
--	unsigned long a; 
--	if (!iommu_size) { 
--		iommu_size = aper_size; 
--		if (!no_agp) 
--			iommu_size /= 2; 
--	} 
--
--	a = aper + iommu_size; 
++
++static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 +{
-+	unsigned long a;
++	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
++	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
++		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 +
-+	if (!iommu_size) {
-+		iommu_size = aper_size;
-+		if (!no_agp)
-+			iommu_size /= 2;
++	if (vmx_set_msr(vcpu, ecx, data) != 0) {
++		kvm_inject_gp(vcpu, 0);
++		return 1;
 +	}
 +
-+	a = aper + iommu_size;
- 	iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
- 
--	if (iommu_size < 64*1024*1024) 
-+	if (iommu_size < 64*1024*1024) {
- 		printk(KERN_WARNING
--  "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20); 
--	
-+			"PCI-DMA: Warning: Small IOMMU %luMB."
-+			" Consider increasing the AGP aperture in BIOS\n",
-+				iommu_size >> 20);
++	skip_emulated_instruction(vcpu);
++	return 1;
++}
++
++static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
++				      struct kvm_run *kvm_run)
++{
++	return 1;
++}
++
++static int handle_interrupt_window(struct kvm_vcpu *vcpu,
++				   struct kvm_run *kvm_run)
++{
++	u32 cpu_based_vm_exec_control;
++
++	/* clear pending irq */
++	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
++	cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
++	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
++	/*
++	 * If the user space waits to inject interrupts, exit as soon as
++	 * possible
++	 */
++	if (kvm_run->request_interrupt_window &&
++	    !vcpu->arch.irq_summary) {
++		kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
++		++vcpu->stat.irq_window_exits;
++		return 0;
 +	}
++	return 1;
++}
 +
- 	return iommu_size;
--} 
++static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	skip_emulated_instruction(vcpu);
++	return kvm_emulate_halt(vcpu);
 +}
- 
--static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) 
--{ 
--	unsigned aper_size = 0, aper_base_32;
-+static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
++
++static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 +{
-+	unsigned aper_size = 0, aper_base_32, aper_order;
- 	u64 aper_base;
--	unsigned aper_order;
- 
--	pci_read_config_dword(dev, 0x94, &aper_base_32); 
-+	pci_read_config_dword(dev, 0x94, &aper_base_32);
- 	pci_read_config_dword(dev, 0x90, &aper_order);
--	aper_order = (aper_order >> 1) & 7;	
-+	aper_order = (aper_order >> 1) & 7;
- 
--	aper_base = aper_base_32 & 0x7fff; 
-+	aper_base = aper_base_32 & 0x7fff;
- 	aper_base <<= 25;
- 
--	aper_size = (32 * 1024 * 1024) << aper_order; 
--       if (aper_base + aper_size > 0x100000000UL || !aper_size)
-+	aper_size = (32 * 1024 * 1024) << aper_order;
-+	if (aper_base + aper_size > 0x100000000UL || !aper_size)
- 		aper_base = 0;
- 
- 	*size = aper_size;
- 	return aper_base;
--} 
++	skip_emulated_instruction(vcpu);
++	kvm_emulate_hypercall(vcpu);
++	return 1;
 +}
- 
--/* 
++
++static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	skip_emulated_instruction(vcpu);
++	/* TODO: Add support for VT-d/pass-through device */
++	return 1;
++}
++
++static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	u64 exit_qualification;
++	enum emulation_result er;
++	unsigned long offset;
++
++	exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
++	offset = exit_qualification & 0xffful;
++
++	er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
++
++	if (er !=  EMULATE_DONE) {
++		printk(KERN_ERR
++		       "Fail to handle apic access vmexit! Offset is 0x%lx\n",
++		       offset);
++		return -ENOTSUPP;
++	}
++	return 1;
++}
++
 +/*
-  * Private Northbridge GATT initialization in case we cannot use the
-- * AGP driver for some reason.  
-+ * AGP driver for some reason.
-  */
- static __init int init_k8_gatt(struct agp_kern_info *info)
--{ 
++ * The exit handlers return 1 if the exit was handled fully and guest execution
++ * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
++ * to be done to userspace and return 0.
++ */
++static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
++				      struct kvm_run *kvm_run) = {
++	[EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
++	[EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
++	[EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
++	[EXIT_REASON_IO_INSTRUCTION]          = handle_io,
++	[EXIT_REASON_CR_ACCESS]               = handle_cr,
++	[EXIT_REASON_DR_ACCESS]               = handle_dr,
++	[EXIT_REASON_CPUID]                   = handle_cpuid,
++	[EXIT_REASON_MSR_READ]                = handle_rdmsr,
++	[EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
++	[EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
++	[EXIT_REASON_HLT]                     = handle_halt,
++	[EXIT_REASON_VMCALL]                  = handle_vmcall,
++	[EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
++	[EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
++	[EXIT_REASON_WBINVD]                  = handle_wbinvd,
++};
++
++static const int kvm_vmx_max_exit_handlers =
++	ARRAY_SIZE(kvm_vmx_exit_handlers);
++
++/*
++ * The guest has exited.  See if we can fix it or if we need userspace
++ * assistance.
++ */
++static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
++{
++	u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	u32 vectoring_info = vmx->idt_vectoring_info;
++
++	if (unlikely(vmx->fail)) {
++		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
++		kvm_run->fail_entry.hardware_entry_failure_reason
++			= vmcs_read32(VM_INSTRUCTION_ERROR);
++		return 0;
++	}
++
++	if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
++				exit_reason != EXIT_REASON_EXCEPTION_NMI)
++		printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
++		       "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
++	if (exit_reason < kvm_vmx_max_exit_handlers
++	    && kvm_vmx_exit_handlers[exit_reason])
++		return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
++	else {
++		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
++		kvm_run->hw.hardware_exit_reason = exit_reason;
++	}
++	return 0;
++}
++
++static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
 +{
-+	unsigned aper_size, gatt_size, new_aper_size;
-+	unsigned aper_base, new_aper_base;
- 	struct pci_dev *dev;
- 	void *gatt;
--	unsigned aper_base, new_aper_base;
--	unsigned aper_size, gatt_size, new_aper_size;
- 	int i;
- 
- 	printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
-@@ -509,75 +550,75 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
- 	dev = NULL;
- 	for (i = 0; i < num_k8_northbridges; i++) {
- 		dev = k8_northbridges[i];
--		new_aper_base = read_aperture(dev, &new_aper_size); 
--		if (!new_aper_base) 
--			goto nommu; 
--		
--		if (!aper_base) { 
-+		new_aper_base = read_aperture(dev, &new_aper_size);
-+		if (!new_aper_base)
-+			goto nommu;
++}
 +
-+		if (!aper_base) {
- 			aper_size = new_aper_size;
- 			aper_base = new_aper_base;
--		} 
--		if (aper_size != new_aper_size || aper_base != new_aper_base) 
++static void update_tpr_threshold(struct kvm_vcpu *vcpu)
++{
++	int max_irr, tpr;
++
++	if (!vm_need_tpr_shadow(vcpu->kvm))
++		return;
++
++	if (!kvm_lapic_enabled(vcpu) ||
++	    ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
++		vmcs_write32(TPR_THRESHOLD, 0);
++		return;
++	}
++
++	tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
++	vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
++}
++
++static void enable_irq_window(struct kvm_vcpu *vcpu)
++{
++	u32 cpu_based_vm_exec_control;
++
++	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
++	cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
++	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
++}
++
++static void vmx_intr_assist(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	u32 idtv_info_field, intr_info_field;
++	int has_ext_irq, interrupt_window_open;
++	int vector;
++
++	update_tpr_threshold(vcpu);
++
++	has_ext_irq = kvm_cpu_has_interrupt(vcpu);
++	intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
++	idtv_info_field = vmx->idt_vectoring_info;
++	if (intr_info_field & INTR_INFO_VALID_MASK) {
++		if (idtv_info_field & INTR_INFO_VALID_MASK) {
++			/* TODO: fault when IDT_Vectoring */
++			if (printk_ratelimit())
++				printk(KERN_ERR "Fault when IDT_Vectoring\n");
 +		}
-+		if (aper_size != new_aper_size || aper_base != new_aper_base)
- 			goto nommu;
- 	}
- 	if (!aper_base)
--		goto nommu; 
-+		goto nommu;
- 	info->aper_base = aper_base;
--	info->aper_size = aper_size>>20; 
-+	info->aper_size = aper_size >> 20;
- 
--	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 
--	gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); 
--	if (!gatt) 
-+	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
-+	gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
-+	if (!gatt)
- 		panic("Cannot allocate GATT table");
--	if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
-+	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
- 		panic("Could not set GART PTEs to uncacheable pages");
--	global_flush_tlb();
- 
--	memset(gatt, 0, gatt_size); 
-+	memset(gatt, 0, gatt_size);
- 	agp_gatt_table = gatt;
- 
- 	for (i = 0; i < num_k8_northbridges; i++) {
--		u32 ctl; 
--		u32 gatt_reg; 
-+		u32 gatt_reg;
-+		u32 ctl;
- 
- 		dev = k8_northbridges[i];
--		gatt_reg = __pa(gatt) >> 12; 
--		gatt_reg <<= 4; 
-+		gatt_reg = __pa(gatt) >> 12;
-+		gatt_reg <<= 4;
- 		pci_write_config_dword(dev, 0x98, gatt_reg);
--		pci_read_config_dword(dev, 0x90, &ctl); 
-+		pci_read_config_dword(dev, 0x90, &ctl);
- 
- 		ctl |= 1;
- 		ctl &= ~((1<<4) | (1<<5));
- 
--		pci_write_config_dword(dev, 0x90, ctl); 
-+		pci_write_config_dword(dev, 0x90, ctl);
- 	}
- 	flush_gart();
--	
--	printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); 
++		if (has_ext_irq)
++			enable_irq_window(vcpu);
++		return;
++	}
++	if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
++		if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
++		    == INTR_TYPE_EXT_INTR
++		    && vcpu->arch.rmode.active) {
++			u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
 +
-+	printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
-+	       aper_base, aper_size>>10);
- 	return 0;
- 
-  nommu:
-- 	/* Should not happen anymore */
-+	/* Should not happen anymore */
- 	printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
- 	       KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
--	return -1; 
--} 
-+	return -1;
++			vmx_inject_irq(vcpu, vect);
++			if (unlikely(has_ext_irq))
++				enable_irq_window(vcpu);
++			return;
++		}
++
++		vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
++		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
++				vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
++
++		if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
++			vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
++				vmcs_read32(IDT_VECTORING_ERROR_CODE));
++		if (unlikely(has_ext_irq))
++			enable_irq_window(vcpu);
++		return;
++	}
++	if (!has_ext_irq)
++		return;
++	interrupt_window_open =
++		((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
++		 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
++	if (interrupt_window_open) {
++		vector = kvm_cpu_get_interrupt(vcpu);
++		vmx_inject_irq(vcpu, vector);
++		kvm_timer_intr_post(vcpu, vector);
++	} else
++		enable_irq_window(vcpu);
 +}
- 
- extern int agp_amd64_init(void);
- 
- static const struct dma_mapping_ops gart_dma_ops = {
--	.mapping_error = NULL,
--	.map_single = gart_map_single,
--	.map_simple = gart_map_simple,
--	.unmap_single = gart_unmap_single,
--	.sync_single_for_cpu = NULL,
--	.sync_single_for_device = NULL,
--	.sync_single_range_for_cpu = NULL,
--	.sync_single_range_for_device = NULL,
--	.sync_sg_for_cpu = NULL,
--	.sync_sg_for_device = NULL,
--	.map_sg = gart_map_sg,
--	.unmap_sg = gart_unmap_sg,
-+	.mapping_error			= NULL,
-+	.map_single			= gart_map_single,
-+	.map_simple			= gart_map_simple,
-+	.unmap_single			= gart_unmap_single,
-+	.sync_single_for_cpu		= NULL,
-+	.sync_single_for_device		= NULL,
-+	.sync_single_range_for_cpu	= NULL,
-+	.sync_single_range_for_device	= NULL,
-+	.sync_sg_for_cpu		= NULL,
-+	.sync_sg_for_device		= NULL,
-+	.map_sg				= gart_map_sg,
-+	.unmap_sg			= gart_unmap_sg,
- };
- 
- void gart_iommu_shutdown(void)
-@@ -588,23 +629,23 @@ void gart_iommu_shutdown(void)
- 	if (no_agp && (dma_ops != &gart_dma_ops))
- 		return;
- 
--        for (i = 0; i < num_k8_northbridges; i++) {
--                u32 ctl;
-+	for (i = 0; i < num_k8_northbridges; i++) {
-+		u32 ctl;
- 
--                dev = k8_northbridges[i];
--                pci_read_config_dword(dev, 0x90, &ctl);
-+		dev = k8_northbridges[i];
-+		pci_read_config_dword(dev, 0x90, &ctl);
- 
--                ctl &= ~1;
-+		ctl &= ~1;
- 
--                pci_write_config_dword(dev, 0x90, ctl);
--        }
-+		pci_write_config_dword(dev, 0x90, ctl);
++
++/*
++ * Failure to inject an interrupt should give us the information
++ * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
++ * when fetching the interrupt redirection bitmap in the real-mode
++ * tss, this doesn't happen.  So we do it ourselves.
++ */
++static void fixup_rmode_irq(struct vcpu_vmx *vmx)
++{
++	vmx->rmode.irq.pending = 0;
++	if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip)
++		return;
++	vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip);
++	if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
++		vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
++		vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
++		return;
 +	}
- }
- 
- void __init gart_iommu_init(void)
--{ 
++	vmx->idt_vectoring_info =
++		VECTORING_INFO_VALID_MASK
++		| INTR_TYPE_EXT_INTR
++		| vmx->rmode.irq.vector;
++}
++
++static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 +{
- 	struct agp_kern_info info;
--	unsigned long aper_size;
- 	unsigned long iommu_start;
-+	unsigned long aper_size;
- 	unsigned long scratch;
- 	long i;
- 
-@@ -614,14 +655,14 @@ void __init gart_iommu_init(void)
- 	}
- 
- #ifndef CONFIG_AGP_AMD64
--	no_agp = 1; 
-+	no_agp = 1;
- #else
- 	/* Makefile puts PCI initialization via subsys_initcall first. */
- 	/* Add other K8 AGP bridge drivers here */
--	no_agp = no_agp || 
--		(agp_amd64_init() < 0) || 
-+	no_agp = no_agp ||
-+		(agp_amd64_init() < 0) ||
- 		(agp_copy_info(agp_bridge, &info) < 0);
--#endif	
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	u32 intr_info;
++
++	/*
++	 * Loading guest fpu may have cleared host cr0.ts
++	 */
++	vmcs_writel(HOST_CR0, read_cr0());
++
++	asm(
++		/* Store host registers */
++#ifdef CONFIG_X86_64
++		"push %%rdx; push %%rbp;"
++		"push %%rcx \n\t"
++#else
++		"push %%edx; push %%ebp;"
++		"push %%ecx \n\t"
 +#endif
- 
- 	if (swiotlb)
- 		return;
-@@ -643,77 +684,78 @@ void __init gart_iommu_init(void)
- 	}
- 
- 	printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
--	aper_size = info.aper_size * 1024 * 1024;	
--	iommu_size = check_iommu_size(info.aper_base, aper_size); 
--	iommu_pages = iommu_size >> PAGE_SHIFT; 
--
--	iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL, 
--						    get_order(iommu_pages/8)); 
--	if (!iommu_gart_bitmap) 
--		panic("Cannot allocate iommu bitmap\n"); 
-+	aper_size = info.aper_size * 1024 * 1024;
-+	iommu_size = check_iommu_size(info.aper_base, aper_size);
-+	iommu_pages = iommu_size >> PAGE_SHIFT;
++		ASM_VMX_VMWRITE_RSP_RDX "\n\t"
++		/* Check if vmlaunch of vmresume is needed */
++		"cmpl $0, %c[launched](%0) \n\t"
++		/* Load guest registers.  Don't clobber flags. */
++#ifdef CONFIG_X86_64
++		"mov %c[cr2](%0), %%rax \n\t"
++		"mov %%rax, %%cr2 \n\t"
++		"mov %c[rax](%0), %%rax \n\t"
++		"mov %c[rbx](%0), %%rbx \n\t"
++		"mov %c[rdx](%0), %%rdx \n\t"
++		"mov %c[rsi](%0), %%rsi \n\t"
++		"mov %c[rdi](%0), %%rdi \n\t"
++		"mov %c[rbp](%0), %%rbp \n\t"
++		"mov %c[r8](%0),  %%r8  \n\t"
++		"mov %c[r9](%0),  %%r9  \n\t"
++		"mov %c[r10](%0), %%r10 \n\t"
++		"mov %c[r11](%0), %%r11 \n\t"
++		"mov %c[r12](%0), %%r12 \n\t"
++		"mov %c[r13](%0), %%r13 \n\t"
++		"mov %c[r14](%0), %%r14 \n\t"
++		"mov %c[r15](%0), %%r15 \n\t"
++		"mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */
++#else
++		"mov %c[cr2](%0), %%eax \n\t"
++		"mov %%eax,   %%cr2 \n\t"
++		"mov %c[rax](%0), %%eax \n\t"
++		"mov %c[rbx](%0), %%ebx \n\t"
++		"mov %c[rdx](%0), %%edx \n\t"
++		"mov %c[rsi](%0), %%esi \n\t"
++		"mov %c[rdi](%0), %%edi \n\t"
++		"mov %c[rbp](%0), %%ebp \n\t"
++		"mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */
++#endif
++		/* Enter guest mode */
++		"jne .Llaunched \n\t"
++		ASM_VMX_VMLAUNCH "\n\t"
++		"jmp .Lkvm_vmx_return \n\t"
++		".Llaunched: " ASM_VMX_VMRESUME "\n\t"
++		".Lkvm_vmx_return: "
++		/* Save guest registers, load host registers, keep flags */
++#ifdef CONFIG_X86_64
++		"xchg %0,     (%%rsp) \n\t"
++		"mov %%rax, %c[rax](%0) \n\t"
++		"mov %%rbx, %c[rbx](%0) \n\t"
++		"pushq (%%rsp); popq %c[rcx](%0) \n\t"
++		"mov %%rdx, %c[rdx](%0) \n\t"
++		"mov %%rsi, %c[rsi](%0) \n\t"
++		"mov %%rdi, %c[rdi](%0) \n\t"
++		"mov %%rbp, %c[rbp](%0) \n\t"
++		"mov %%r8,  %c[r8](%0) \n\t"
++		"mov %%r9,  %c[r9](%0) \n\t"
++		"mov %%r10, %c[r10](%0) \n\t"
++		"mov %%r11, %c[r11](%0) \n\t"
++		"mov %%r12, %c[r12](%0) \n\t"
++		"mov %%r13, %c[r13](%0) \n\t"
++		"mov %%r14, %c[r14](%0) \n\t"
++		"mov %%r15, %c[r15](%0) \n\t"
++		"mov %%cr2, %%rax   \n\t"
++		"mov %%rax, %c[cr2](%0) \n\t"
++
++		"pop  %%rbp; pop  %%rbp; pop  %%rdx \n\t"
++#else
++		"xchg %0, (%%esp) \n\t"
++		"mov %%eax, %c[rax](%0) \n\t"
++		"mov %%ebx, %c[rbx](%0) \n\t"
++		"pushl (%%esp); popl %c[rcx](%0) \n\t"
++		"mov %%edx, %c[rdx](%0) \n\t"
++		"mov %%esi, %c[rsi](%0) \n\t"
++		"mov %%edi, %c[rdi](%0) \n\t"
++		"mov %%ebp, %c[rbp](%0) \n\t"
++		"mov %%cr2, %%eax  \n\t"
++		"mov %%eax, %c[cr2](%0) \n\t"
++
++		"pop %%ebp; pop %%ebp; pop %%edx \n\t"
++#endif
++		"setbe %c[fail](%0) \n\t"
++	      : : "c"(vmx), "d"((unsigned long)HOST_RSP),
++		[launched]"i"(offsetof(struct vcpu_vmx, launched)),
++		[fail]"i"(offsetof(struct vcpu_vmx, fail)),
++		[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
++		[rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
++		[rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
++		[rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
++		[rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
++		[rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
++		[rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
++#ifdef CONFIG_X86_64
++		[r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
++		[r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
++		[r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
++		[r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
++		[r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
++		[r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
++		[r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
++		[r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
++#endif
++		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
++	      : "cc", "memory"
++#ifdef CONFIG_X86_64
++		, "rbx", "rdi", "rsi"
++		, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
++#else
++		, "ebx", "edi", "rsi"
++#endif
++	      );
 +
-+	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
-+						      get_order(iommu_pages/8));
-+	if (!iommu_gart_bitmap)
-+		panic("Cannot allocate iommu bitmap\n");
- 	memset(iommu_gart_bitmap, 0, iommu_pages/8);
- 
- #ifdef CONFIG_IOMMU_LEAK
--	if (leak_trace) { 
--		iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, 
-+	if (leak_trace) {
-+		iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
- 				  get_order(iommu_pages*sizeof(void *)));
--		if (iommu_leak_tab) 
--			memset(iommu_leak_tab, 0, iommu_pages * 8); 
-+		if (iommu_leak_tab)
-+			memset(iommu_leak_tab, 0, iommu_pages * 8);
- 		else
--			printk("PCI-DMA: Cannot allocate leak trace area\n"); 
--	} 
-+			printk(KERN_DEBUG
-+			       "PCI-DMA: Cannot allocate leak trace area\n");
++	vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
++	if (vmx->rmode.irq.pending)
++		fixup_rmode_irq(vmx);
++
++	vcpu->arch.interrupt_window_open =
++		(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
++
++	asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
++	vmx->launched = 1;
++
++	intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
++
++	/* We need to handle NMIs before interrupts are enabled */
++	if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
++		asm("int $2");
++}
++
++static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++
++	if (vmx->vmcs) {
++		on_each_cpu(__vcpu_clear, vmx, 0, 1);
++		free_vmcs(vmx->vmcs);
++		vmx->vmcs = NULL;
 +	}
- #endif
- 
--	/* 
++}
++
++static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++
++	vmx_free_vmcs(vcpu);
++	kfree(vmx->host_msrs);
++	kfree(vmx->guest_msrs);
++	kvm_vcpu_uninit(vcpu);
++	kmem_cache_free(kvm_vcpu_cache, vmx);
++}
++
++static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
++{
++	int err;
++	struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
++	int cpu;
++
++	if (!vmx)
++		return ERR_PTR(-ENOMEM);
++
++	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
++	if (err)
++		goto free_vcpu;
++
++	vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
++	if (!vmx->guest_msrs) {
++		err = -ENOMEM;
++		goto uninit_vcpu;
++	}
++
++	vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
++	if (!vmx->host_msrs)
++		goto free_guest_msrs;
++
++	vmx->vmcs = alloc_vmcs();
++	if (!vmx->vmcs)
++		goto free_msrs;
++
++	vmcs_clear(vmx->vmcs);
++
++	cpu = get_cpu();
++	vmx_vcpu_load(&vmx->vcpu, cpu);
++	err = vmx_vcpu_setup(vmx);
++	vmx_vcpu_put(&vmx->vcpu);
++	put_cpu();
++	if (err)
++		goto free_vmcs;
++
++	return &vmx->vcpu;
++
++free_vmcs:
++	free_vmcs(vmx->vmcs);
++free_msrs:
++	kfree(vmx->host_msrs);
++free_guest_msrs:
++	kfree(vmx->guest_msrs);
++uninit_vcpu:
++	kvm_vcpu_uninit(&vmx->vcpu);
++free_vcpu:
++	kmem_cache_free(kvm_vcpu_cache, vmx);
++	return ERR_PTR(err);
++}
++
++static void __init vmx_check_processor_compat(void *rtn)
++{
++	struct vmcs_config vmcs_conf;
++
++	*(int *)rtn = 0;
++	if (setup_vmcs_config(&vmcs_conf) < 0)
++		*(int *)rtn = -EIO;
++	if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
++		printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
++				smp_processor_id());
++		*(int *)rtn = -EIO;
++	}
++}
++
++static struct kvm_x86_ops vmx_x86_ops = {
++	.cpu_has_kvm_support = cpu_has_kvm_support,
++	.disabled_by_bios = vmx_disabled_by_bios,
++	.hardware_setup = hardware_setup,
++	.hardware_unsetup = hardware_unsetup,
++	.check_processor_compatibility = vmx_check_processor_compat,
++	.hardware_enable = hardware_enable,
++	.hardware_disable = hardware_disable,
++	.cpu_has_accelerated_tpr = cpu_has_vmx_virtualize_apic_accesses,
++
++	.vcpu_create = vmx_create_vcpu,
++	.vcpu_free = vmx_free_vcpu,
++	.vcpu_reset = vmx_vcpu_reset,
++
++	.prepare_guest_switch = vmx_save_host_state,
++	.vcpu_load = vmx_vcpu_load,
++	.vcpu_put = vmx_vcpu_put,
++	.vcpu_decache = vmx_vcpu_decache,
++
++	.set_guest_debug = set_guest_debug,
++	.guest_debug_pre = kvm_guest_debug_pre,
++	.get_msr = vmx_get_msr,
++	.set_msr = vmx_set_msr,
++	.get_segment_base = vmx_get_segment_base,
++	.get_segment = vmx_get_segment,
++	.set_segment = vmx_set_segment,
++	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
++	.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
++	.set_cr0 = vmx_set_cr0,
++	.set_cr3 = vmx_set_cr3,
++	.set_cr4 = vmx_set_cr4,
++#ifdef CONFIG_X86_64
++	.set_efer = vmx_set_efer,
++#endif
++	.get_idt = vmx_get_idt,
++	.set_idt = vmx_set_idt,
++	.get_gdt = vmx_get_gdt,
++	.set_gdt = vmx_set_gdt,
++	.cache_regs = vcpu_load_rsp_rip,
++	.decache_regs = vcpu_put_rsp_rip,
++	.get_rflags = vmx_get_rflags,
++	.set_rflags = vmx_set_rflags,
++
++	.tlb_flush = vmx_flush_tlb,
++
++	.run = vmx_vcpu_run,
++	.handle_exit = kvm_handle_exit,
++	.skip_emulated_instruction = skip_emulated_instruction,
++	.patch_hypercall = vmx_patch_hypercall,
++	.get_irq = vmx_get_irq,
++	.set_irq = vmx_inject_irq,
++	.queue_exception = vmx_queue_exception,
++	.exception_injected = vmx_exception_injected,
++	.inject_pending_irq = vmx_intr_assist,
++	.inject_pending_vectors = do_interrupt_requests,
++
++	.set_tss_addr = vmx_set_tss_addr,
++};
++
++static int __init vmx_init(void)
++{
++	void *iova;
++	int r;
++
++	vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
++	if (!vmx_io_bitmap_a)
++		return -ENOMEM;
++
++	vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
++	if (!vmx_io_bitmap_b) {
++		r = -ENOMEM;
++		goto out;
++	}
++
 +	/*
- 	 * Out of IOMMU space handling.
--	 * Reserve some invalid pages at the beginning of the GART. 
--	 */ 
--	set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 
-+	 * Reserve some invalid pages at the beginning of the GART.
++	 * Allow direct access to the PC debug port (it is often used for I/O
++	 * delays, but the vmexits simply slow things down).
 +	 */
-+	set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
- 
--	agp_memory_reserved = iommu_size;	
-+	agp_memory_reserved = iommu_size;
- 	printk(KERN_INFO
- 	       "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
--	       iommu_size>>20); 
-+	       iommu_size >> 20);
- 
--	iommu_start = aper_size - iommu_size;	
--	iommu_bus_base = info.aper_base + iommu_start; 
-+	iommu_start = aper_size - iommu_size;
-+	iommu_bus_base = info.aper_base + iommu_start;
- 	bad_dma_address = iommu_bus_base;
- 	iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
- 
--	/* 
-+	/*
- 	 * Unmap the IOMMU part of the GART. The alias of the page is
- 	 * always mapped with cache enabled and there is no full cache
- 	 * coherency across the GART remapping. The unmapping avoids
- 	 * automatic prefetches from the CPU allocating cache lines in
- 	 * there. All CPU accesses are done via the direct mapping to
- 	 * the backing memory. The GART address is only used by PCI
--	 * devices. 
-+	 * devices.
- 	 */
- 	clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
- 
--	/* 
--	 * Try to workaround a bug (thanks to BenH) 
--	 * Set unmapped entries to a scratch page instead of 0. 
-+	/*
-+	 * Try to workaround a bug (thanks to BenH)
-+	 * Set unmapped entries to a scratch page instead of 0.
- 	 * Any prefetches that hit unmapped entries won't get an bus abort
- 	 * then.
- 	 */
--	scratch = get_zeroed_page(GFP_KERNEL); 
--	if (!scratch) 
-+	scratch = get_zeroed_page(GFP_KERNEL);
-+	if (!scratch)
- 		panic("Cannot allocate iommu scratch page");
- 	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
--	for (i = EMERGENCY_PAGES; i < iommu_pages; i++) 
-+	for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
- 		iommu_gatt_base[i] = gart_unmapped_entry;
- 
- 	flush_gart();
- 	dma_ops = &gart_dma_ops;
--} 
++	iova = kmap(vmx_io_bitmap_a);
++	memset(iova, 0xff, PAGE_SIZE);
++	clear_bit(0x80, iova);
++	kunmap(vmx_io_bitmap_a);
++
++	iova = kmap(vmx_io_bitmap_b);
++	memset(iova, 0xff, PAGE_SIZE);
++	kunmap(vmx_io_bitmap_b);
++
++	r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
++	if (r)
++		goto out1;
++
++	if (bypass_guest_pf)
++		kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
++
++	return 0;
++
++out1:
++	__free_page(vmx_io_bitmap_b);
++out:
++	__free_page(vmx_io_bitmap_a);
++	return r;
 +}
- 
- void __init gart_parse_options(char *p)
- {
- 	int arg;
- 
- #ifdef CONFIG_IOMMU_LEAK
--	if (!strncmp(p,"leak",4)) {
-+	if (!strncmp(p, "leak", 4)) {
- 		leak_trace = 1;
- 		p += 4;
- 		if (*p == '=') ++p;
-@@ -723,18 +765,18 @@ void __init gart_parse_options(char *p)
- #endif
- 	if (isdigit(*p) && get_option(&p, &arg))
- 		iommu_size = arg;
--	if (!strncmp(p, "fullflush",8))
-+	if (!strncmp(p, "fullflush", 8))
- 		iommu_fullflush = 1;
--	if (!strncmp(p, "nofullflush",11))
-+	if (!strncmp(p, "nofullflush", 11))
- 		iommu_fullflush = 0;
--	if (!strncmp(p,"noagp",5))
-+	if (!strncmp(p, "noagp", 5))
- 		no_agp = 1;
--	if (!strncmp(p, "noaperture",10))
-+	if (!strncmp(p, "noaperture", 10))
- 		fix_aperture = 0;
- 	/* duplicated from pci-dma.c */
--	if (!strncmp(p,"force",5))
-+	if (!strncmp(p, "force", 5))
- 		gart_iommu_aperture_allowed = 1;
--	if (!strncmp(p,"allowed",7))
-+	if (!strncmp(p, "allowed", 7))
- 		gart_iommu_aperture_allowed = 1;
- 	if (!strncmp(p, "memaper", 7)) {
- 		fallback_aper_force = 1;
-diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
-index 102866d..82a0a67 100644
---- a/arch/x86/kernel/pci-swiotlb_64.c
-+++ b/arch/x86/kernel/pci-swiotlb_64.c
-@@ -10,7 +10,6 @@
- #include <asm/dma.h>
- 
- int swiotlb __read_mostly;
--EXPORT_SYMBOL(swiotlb);
- 
- const struct dma_mapping_ops swiotlb_dma_ops = {
- 	.mapping_error = swiotlb_dma_mapping_error,
-diff --git a/arch/x86/kernel/pmtimer_64.c b/arch/x86/kernel/pmtimer_64.c
-index ae8f912..b112406 100644
---- a/arch/x86/kernel/pmtimer_64.c
-+++ b/arch/x86/kernel/pmtimer_64.c
-@@ -19,13 +19,13 @@
- #include <linux/time.h>
- #include <linux/init.h>
- #include <linux/cpumask.h>
-+#include <linux/acpi_pmtmr.h>
 +
- #include <asm/io.h>
- #include <asm/proto.h>
- #include <asm/msr.h>
- #include <asm/vsyscall.h>
- 
--#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
--
- static inline u32 cyc2us(u32 cycles)
- {
- 	/* The Power Management Timer ticks at 3.579545 ticks per microsecond.
-diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index 46d391d..968371a 100644
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -55,6 +55,7 @@
- 
- #include <asm/tlbflush.h>
- #include <asm/cpu.h>
-+#include <asm/kdebug.h>
- 
- asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
- 
-@@ -74,7 +75,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number);
-  */
- unsigned long thread_saved_pc(struct task_struct *tsk)
- {
--	return ((unsigned long *)tsk->thread.esp)[3];
-+	return ((unsigned long *)tsk->thread.sp)[3];
- }
- 
- /*
-@@ -113,10 +114,19 @@ void default_idle(void)
- 		smp_mb();
- 
- 		local_irq_disable();
--		if (!need_resched())
-+		if (!need_resched()) {
-+			ktime_t t0, t1;
-+			u64 t0n, t1n;
++static void __exit vmx_exit(void)
++{
++	__free_page(vmx_io_bitmap_b);
++	__free_page(vmx_io_bitmap_a);
 +
-+			t0 = ktime_get();
-+			t0n = ktime_to_ns(t0);
- 			safe_halt();	/* enables interrupts racelessly */
--		else
--			local_irq_enable();
-+			local_irq_disable();
-+			t1 = ktime_get();
-+			t1n = ktime_to_ns(t1);
-+			sched_clock_idle_wakeup_event(t1n - t0n);
-+		}
-+		local_irq_enable();
- 		current_thread_info()->status |= TS_POLLING;
- 	} else {
- 		/* loop is done by the caller */
-@@ -132,7 +142,7 @@ EXPORT_SYMBOL(default_idle);
-  * to poll the ->work.need_resched flag instead of waiting for the
-  * cross-CPU IPI to arrive. Use this option with caution.
-  */
--static void poll_idle (void)
-+static void poll_idle(void)
- {
- 	cpu_relax();
- }
-@@ -188,6 +198,9 @@ void cpu_idle(void)
- 			rmb();
- 			idle = pm_idle;
- 
-+			if (rcu_pending(cpu))
-+				rcu_check_callbacks(cpu, 0);
++	kvm_exit();
++}
 +
- 			if (!idle)
- 				idle = default_idle;
- 
-@@ -255,13 +268,13 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
-  * New with Core Duo processors, MWAIT can take some hints based on CPU
-  * capability.
-  */
--void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
-+void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
- {
- 	if (!need_resched()) {
- 		__monitor((void *)&current_thread_info()->flags, 0, 0);
- 		smp_mb();
- 		if (!need_resched())
--			__mwait(eax, ecx);
-+			__mwait(ax, cx);
- 	}
- }
- 
-@@ -272,19 +285,37 @@ static void mwait_idle(void)
- 	mwait_idle_with_hints(0, 0);
- }
- 
-+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
++module_init(vmx_init)
++module_exit(vmx_exit)
+diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
+new file mode 100644
+index 0000000..d52ae8d
+--- /dev/null
++++ b/arch/x86/kvm/vmx.h
+@@ -0,0 +1,324 @@
++#ifndef VMX_H
++#define VMX_H
++
++/*
++ * vmx.h: VMX Architecture related definitions
++ * Copyright (c) 2004, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
++ * Place - Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * A few random additions are:
++ * Copyright (C) 2006 Qumranet
++ *    Avi Kivity <avi at qumranet.com>
++ *    Yaniv Kamay <yaniv at qumranet.com>
++ *
++ */
++
++/*
++ * Definitions of Primary Processor-Based VM-Execution Controls.
++ */
++#define CPU_BASED_VIRTUAL_INTR_PENDING          0x00000004
++#define CPU_BASED_USE_TSC_OFFSETING             0x00000008
++#define CPU_BASED_HLT_EXITING                   0x00000080
++#define CPU_BASED_INVLPG_EXITING                0x00000200
++#define CPU_BASED_MWAIT_EXITING                 0x00000400
++#define CPU_BASED_RDPMC_EXITING                 0x00000800
++#define CPU_BASED_RDTSC_EXITING                 0x00001000
++#define CPU_BASED_CR8_LOAD_EXITING              0x00080000
++#define CPU_BASED_CR8_STORE_EXITING             0x00100000
++#define CPU_BASED_TPR_SHADOW                    0x00200000
++#define CPU_BASED_MOV_DR_EXITING                0x00800000
++#define CPU_BASED_UNCOND_IO_EXITING             0x01000000
++#define CPU_BASED_USE_IO_BITMAPS                0x02000000
++#define CPU_BASED_USE_MSR_BITMAPS               0x10000000
++#define CPU_BASED_MONITOR_EXITING               0x20000000
++#define CPU_BASED_PAUSE_EXITING                 0x40000000
++#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS   0x80000000
++/*
++ * Definitions of Secondary Processor-Based VM-Execution Controls.
++ */
++#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
++#define SECONDARY_EXEC_WBINVD_EXITING		0x00000040
++
++
++#define PIN_BASED_EXT_INTR_MASK                 0x00000001
++#define PIN_BASED_NMI_EXITING                   0x00000008
++#define PIN_BASED_VIRTUAL_NMIS                  0x00000020
++
++#define VM_EXIT_HOST_ADDR_SPACE_SIZE            0x00000200
++#define VM_EXIT_ACK_INTR_ON_EXIT                0x00008000
++
++#define VM_ENTRY_IA32E_MODE                     0x00000200
++#define VM_ENTRY_SMM                            0x00000400
++#define VM_ENTRY_DEACT_DUAL_MONITOR             0x00000800
++
++/* VMCS Encodings */
++enum vmcs_field {
++	GUEST_ES_SELECTOR               = 0x00000800,
++	GUEST_CS_SELECTOR               = 0x00000802,
++	GUEST_SS_SELECTOR               = 0x00000804,
++	GUEST_DS_SELECTOR               = 0x00000806,
++	GUEST_FS_SELECTOR               = 0x00000808,
++	GUEST_GS_SELECTOR               = 0x0000080a,
++	GUEST_LDTR_SELECTOR             = 0x0000080c,
++	GUEST_TR_SELECTOR               = 0x0000080e,
++	HOST_ES_SELECTOR                = 0x00000c00,
++	HOST_CS_SELECTOR                = 0x00000c02,
++	HOST_SS_SELECTOR                = 0x00000c04,
++	HOST_DS_SELECTOR                = 0x00000c06,
++	HOST_FS_SELECTOR                = 0x00000c08,
++	HOST_GS_SELECTOR                = 0x00000c0a,
++	HOST_TR_SELECTOR                = 0x00000c0c,
++	IO_BITMAP_A                     = 0x00002000,
++	IO_BITMAP_A_HIGH                = 0x00002001,
++	IO_BITMAP_B                     = 0x00002002,
++	IO_BITMAP_B_HIGH                = 0x00002003,
++	MSR_BITMAP                      = 0x00002004,
++	MSR_BITMAP_HIGH                 = 0x00002005,
++	VM_EXIT_MSR_STORE_ADDR          = 0x00002006,
++	VM_EXIT_MSR_STORE_ADDR_HIGH     = 0x00002007,
++	VM_EXIT_MSR_LOAD_ADDR           = 0x00002008,
++	VM_EXIT_MSR_LOAD_ADDR_HIGH      = 0x00002009,
++	VM_ENTRY_MSR_LOAD_ADDR          = 0x0000200a,
++	VM_ENTRY_MSR_LOAD_ADDR_HIGH     = 0x0000200b,
++	TSC_OFFSET                      = 0x00002010,
++	TSC_OFFSET_HIGH                 = 0x00002011,
++	VIRTUAL_APIC_PAGE_ADDR          = 0x00002012,
++	VIRTUAL_APIC_PAGE_ADDR_HIGH     = 0x00002013,
++	APIC_ACCESS_ADDR		= 0x00002014,
++	APIC_ACCESS_ADDR_HIGH		= 0x00002015,
++	VMCS_LINK_POINTER               = 0x00002800,
++	VMCS_LINK_POINTER_HIGH          = 0x00002801,
++	GUEST_IA32_DEBUGCTL             = 0x00002802,
++	GUEST_IA32_DEBUGCTL_HIGH        = 0x00002803,
++	PIN_BASED_VM_EXEC_CONTROL       = 0x00004000,
++	CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,
++	EXCEPTION_BITMAP                = 0x00004004,
++	PAGE_FAULT_ERROR_CODE_MASK      = 0x00004006,
++	PAGE_FAULT_ERROR_CODE_MATCH     = 0x00004008,
++	CR3_TARGET_COUNT                = 0x0000400a,
++	VM_EXIT_CONTROLS                = 0x0000400c,
++	VM_EXIT_MSR_STORE_COUNT         = 0x0000400e,
++	VM_EXIT_MSR_LOAD_COUNT          = 0x00004010,
++	VM_ENTRY_CONTROLS               = 0x00004012,
++	VM_ENTRY_MSR_LOAD_COUNT         = 0x00004014,
++	VM_ENTRY_INTR_INFO_FIELD        = 0x00004016,
++	VM_ENTRY_EXCEPTION_ERROR_CODE   = 0x00004018,
++	VM_ENTRY_INSTRUCTION_LEN        = 0x0000401a,
++	TPR_THRESHOLD                   = 0x0000401c,
++	SECONDARY_VM_EXEC_CONTROL       = 0x0000401e,
++	VM_INSTRUCTION_ERROR            = 0x00004400,
++	VM_EXIT_REASON                  = 0x00004402,
++	VM_EXIT_INTR_INFO               = 0x00004404,
++	VM_EXIT_INTR_ERROR_CODE         = 0x00004406,
++	IDT_VECTORING_INFO_FIELD        = 0x00004408,
++	IDT_VECTORING_ERROR_CODE        = 0x0000440a,
++	VM_EXIT_INSTRUCTION_LEN         = 0x0000440c,
++	VMX_INSTRUCTION_INFO            = 0x0000440e,
++	GUEST_ES_LIMIT                  = 0x00004800,
++	GUEST_CS_LIMIT                  = 0x00004802,
++	GUEST_SS_LIMIT                  = 0x00004804,
++	GUEST_DS_LIMIT                  = 0x00004806,
++	GUEST_FS_LIMIT                  = 0x00004808,
++	GUEST_GS_LIMIT                  = 0x0000480a,
++	GUEST_LDTR_LIMIT                = 0x0000480c,
++	GUEST_TR_LIMIT                  = 0x0000480e,
++	GUEST_GDTR_LIMIT                = 0x00004810,
++	GUEST_IDTR_LIMIT                = 0x00004812,
++	GUEST_ES_AR_BYTES               = 0x00004814,
++	GUEST_CS_AR_BYTES               = 0x00004816,
++	GUEST_SS_AR_BYTES               = 0x00004818,
++	GUEST_DS_AR_BYTES               = 0x0000481a,
++	GUEST_FS_AR_BYTES               = 0x0000481c,
++	GUEST_GS_AR_BYTES               = 0x0000481e,
++	GUEST_LDTR_AR_BYTES             = 0x00004820,
++	GUEST_TR_AR_BYTES               = 0x00004822,
++	GUEST_INTERRUPTIBILITY_INFO     = 0x00004824,
++	GUEST_ACTIVITY_STATE            = 0X00004826,
++	GUEST_SYSENTER_CS               = 0x0000482A,
++	HOST_IA32_SYSENTER_CS           = 0x00004c00,
++	CR0_GUEST_HOST_MASK             = 0x00006000,
++	CR4_GUEST_HOST_MASK             = 0x00006002,
++	CR0_READ_SHADOW                 = 0x00006004,
++	CR4_READ_SHADOW                 = 0x00006006,
++	CR3_TARGET_VALUE0               = 0x00006008,
++	CR3_TARGET_VALUE1               = 0x0000600a,
++	CR3_TARGET_VALUE2               = 0x0000600c,
++	CR3_TARGET_VALUE3               = 0x0000600e,
++	EXIT_QUALIFICATION              = 0x00006400,
++	GUEST_LINEAR_ADDRESS            = 0x0000640a,
++	GUEST_CR0                       = 0x00006800,
++	GUEST_CR3                       = 0x00006802,
++	GUEST_CR4                       = 0x00006804,
++	GUEST_ES_BASE                   = 0x00006806,
++	GUEST_CS_BASE                   = 0x00006808,
++	GUEST_SS_BASE                   = 0x0000680a,
++	GUEST_DS_BASE                   = 0x0000680c,
++	GUEST_FS_BASE                   = 0x0000680e,
++	GUEST_GS_BASE                   = 0x00006810,
++	GUEST_LDTR_BASE                 = 0x00006812,
++	GUEST_TR_BASE                   = 0x00006814,
++	GUEST_GDTR_BASE                 = 0x00006816,
++	GUEST_IDTR_BASE                 = 0x00006818,
++	GUEST_DR7                       = 0x0000681a,
++	GUEST_RSP                       = 0x0000681c,
++	GUEST_RIP                       = 0x0000681e,
++	GUEST_RFLAGS                    = 0x00006820,
++	GUEST_PENDING_DBG_EXCEPTIONS    = 0x00006822,
++	GUEST_SYSENTER_ESP              = 0x00006824,
++	GUEST_SYSENTER_EIP              = 0x00006826,
++	HOST_CR0                        = 0x00006c00,
++	HOST_CR3                        = 0x00006c02,
++	HOST_CR4                        = 0x00006c04,
++	HOST_FS_BASE                    = 0x00006c06,
++	HOST_GS_BASE                    = 0x00006c08,
++	HOST_TR_BASE                    = 0x00006c0a,
++	HOST_GDTR_BASE                  = 0x00006c0c,
++	HOST_IDTR_BASE                  = 0x00006c0e,
++	HOST_IA32_SYSENTER_ESP          = 0x00006c10,
++	HOST_IA32_SYSENTER_EIP          = 0x00006c12,
++	HOST_RSP                        = 0x00006c14,
++	HOST_RIP                        = 0x00006c16,
++};
++
++#define VMX_EXIT_REASONS_FAILED_VMENTRY         0x80000000
++
++#define EXIT_REASON_EXCEPTION_NMI       0
++#define EXIT_REASON_EXTERNAL_INTERRUPT  1
++#define EXIT_REASON_TRIPLE_FAULT        2
++
++#define EXIT_REASON_PENDING_INTERRUPT   7
++
++#define EXIT_REASON_TASK_SWITCH         9
++#define EXIT_REASON_CPUID               10
++#define EXIT_REASON_HLT                 12
++#define EXIT_REASON_INVLPG              14
++#define EXIT_REASON_RDPMC               15
++#define EXIT_REASON_RDTSC               16
++#define EXIT_REASON_VMCALL              18
++#define EXIT_REASON_VMCLEAR             19
++#define EXIT_REASON_VMLAUNCH            20
++#define EXIT_REASON_VMPTRLD             21
++#define EXIT_REASON_VMPTRST             22
++#define EXIT_REASON_VMREAD              23
++#define EXIT_REASON_VMRESUME            24
++#define EXIT_REASON_VMWRITE             25
++#define EXIT_REASON_VMOFF               26
++#define EXIT_REASON_VMON                27
++#define EXIT_REASON_CR_ACCESS           28
++#define EXIT_REASON_DR_ACCESS           29
++#define EXIT_REASON_IO_INSTRUCTION      30
++#define EXIT_REASON_MSR_READ            31
++#define EXIT_REASON_MSR_WRITE           32
++#define EXIT_REASON_MWAIT_INSTRUCTION   36
++#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
++#define EXIT_REASON_APIC_ACCESS         44
++#define EXIT_REASON_WBINVD		54
++
++/*
++ * Interruption-information format
++ */
++#define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
++#define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
++#define INTR_INFO_DELIEVER_CODE_MASK    0x800           /* 11 */
++#define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
++
++#define VECTORING_INFO_VECTOR_MASK           	INTR_INFO_VECTOR_MASK
++#define VECTORING_INFO_TYPE_MASK        	INTR_INFO_INTR_TYPE_MASK
++#define VECTORING_INFO_DELIEVER_CODE_MASK    	INTR_INFO_DELIEVER_CODE_MASK
++#define VECTORING_INFO_VALID_MASK       	INTR_INFO_VALID_MASK
++
++#define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
++#define INTR_TYPE_EXCEPTION             (3 << 8) /* processor exception */
++#define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
++
++/*
++ * Exit Qualifications for MOV for Control Register Access
++ */
++#define CONTROL_REG_ACCESS_NUM          0x7     /* 2:0, number of control reg.*/
++#define CONTROL_REG_ACCESS_TYPE         0x30    /* 5:4, access type */
++#define CONTROL_REG_ACCESS_REG          0xf00   /* 10:8, general purpose reg. */
++#define LMSW_SOURCE_DATA_SHIFT 16
++#define LMSW_SOURCE_DATA  (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */
++#define REG_EAX                         (0 << 8)
++#define REG_ECX                         (1 << 8)
++#define REG_EDX                         (2 << 8)
++#define REG_EBX                         (3 << 8)
++#define REG_ESP                         (4 << 8)
++#define REG_EBP                         (5 << 8)
++#define REG_ESI                         (6 << 8)
++#define REG_EDI                         (7 << 8)
++#define REG_R8                         (8 << 8)
++#define REG_R9                         (9 << 8)
++#define REG_R10                        (10 << 8)
++#define REG_R11                        (11 << 8)
++#define REG_R12                        (12 << 8)
++#define REG_R13                        (13 << 8)
++#define REG_R14                        (14 << 8)
++#define REG_R15                        (15 << 8)
++
++/*
++ * Exit Qualifications for MOV for Debug Register Access
++ */
++#define DEBUG_REG_ACCESS_NUM            0x7     /* 2:0, number of debug reg. */
++#define DEBUG_REG_ACCESS_TYPE           0x10    /* 4, direction of access */
++#define TYPE_MOV_TO_DR                  (0 << 4)
++#define TYPE_MOV_FROM_DR                (1 << 4)
++#define DEBUG_REG_ACCESS_REG            0xf00   /* 11:8, general purpose reg. */
++
++
++/* segment AR */
++#define SEGMENT_AR_L_MASK (1 << 13)
++
++#define AR_TYPE_ACCESSES_MASK 1
++#define AR_TYPE_READABLE_MASK (1 << 1)
++#define AR_TYPE_WRITEABLE_MASK (1 << 2)
++#define AR_TYPE_CODE_MASK (1 << 3)
++#define AR_TYPE_MASK 0x0f
++#define AR_TYPE_BUSY_64_TSS 11
++#define AR_TYPE_BUSY_32_TSS 11
++#define AR_TYPE_BUSY_16_TSS 3
++#define AR_TYPE_LDT 2
++
++#define AR_UNUSABLE_MASK (1 << 16)
++#define AR_S_MASK (1 << 4)
++#define AR_P_MASK (1 << 7)
++#define AR_L_MASK (1 << 13)
++#define AR_DB_MASK (1 << 14)
++#define AR_G_MASK (1 << 15)
++#define AR_DPL_SHIFT 5
++#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3)
++
++#define AR_RESERVD_MASK 0xfffe0f00
++
++#define MSR_IA32_VMX_BASIC                      0x480
++#define MSR_IA32_VMX_PINBASED_CTLS              0x481
++#define MSR_IA32_VMX_PROCBASED_CTLS             0x482
++#define MSR_IA32_VMX_EXIT_CTLS                  0x483
++#define MSR_IA32_VMX_ENTRY_CTLS                 0x484
++#define MSR_IA32_VMX_MISC                       0x485
++#define MSR_IA32_VMX_CR0_FIXED0                 0x486
++#define MSR_IA32_VMX_CR0_FIXED1                 0x487
++#define MSR_IA32_VMX_CR4_FIXED0                 0x488
++#define MSR_IA32_VMX_CR4_FIXED1                 0x489
++#define MSR_IA32_VMX_VMCS_ENUM                  0x48a
++#define MSR_IA32_VMX_PROCBASED_CTLS2            0x48b
++
++#define MSR_IA32_FEATURE_CONTROL                0x3a
++#define MSR_IA32_FEATURE_CONTROL_LOCKED         0x1
++#define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED  0x4
++
++#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT	9
++
++#endif
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+new file mode 100644
+index 0000000..8f94a0b
+--- /dev/null
++++ b/arch/x86/kvm/x86.c
+@@ -0,0 +1,3287 @@
++/*
++ * Kernel-based Virtual Machine driver for Linux
++ *
++ * derived from drivers/kvm/kvm_main.c
++ *
++ * Copyright (C) 2006 Qumranet, Inc.
++ *
++ * Authors:
++ *   Avi Kivity   <avi at qumranet.com>
++ *   Yaniv Kamay  <yaniv at qumranet.com>
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
++ *
++ */
++
++#include <linux/kvm_host.h>
++#include "segment_descriptor.h"
++#include "irq.h"
++#include "mmu.h"
++
++#include <linux/kvm.h>
++#include <linux/fs.h>
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/mman.h>
++#include <linux/highmem.h>
++
++#include <asm/uaccess.h>
++#include <asm/msr.h>
++
++#define MAX_IO_MSRS 256
++#define CR0_RESERVED_BITS						\
++	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
++			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
++			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
++#define CR4_RESERVED_BITS						\
++	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
++			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE	\
++			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR	\
++			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
++
++#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
++#define EFER_RESERVED_BITS 0xfffffffffffff2fe
++
++#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
++#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
++
++struct kvm_x86_ops *kvm_x86_ops;
++
++struct kvm_stats_debugfs_item debugfs_entries[] = {
++	{ "pf_fixed", VCPU_STAT(pf_fixed) },
++	{ "pf_guest", VCPU_STAT(pf_guest) },
++	{ "tlb_flush", VCPU_STAT(tlb_flush) },
++	{ "invlpg", VCPU_STAT(invlpg) },
++	{ "exits", VCPU_STAT(exits) },
++	{ "io_exits", VCPU_STAT(io_exits) },
++	{ "mmio_exits", VCPU_STAT(mmio_exits) },
++	{ "signal_exits", VCPU_STAT(signal_exits) },
++	{ "irq_window", VCPU_STAT(irq_window_exits) },
++	{ "halt_exits", VCPU_STAT(halt_exits) },
++	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
++	{ "request_irq", VCPU_STAT(request_irq_exits) },
++	{ "irq_exits", VCPU_STAT(irq_exits) },
++	{ "host_state_reload", VCPU_STAT(host_state_reload) },
++	{ "efer_reload", VCPU_STAT(efer_reload) },
++	{ "fpu_reload", VCPU_STAT(fpu_reload) },
++	{ "insn_emulation", VCPU_STAT(insn_emulation) },
++	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
++	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
++	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
++	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
++	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
++	{ "mmu_flooded", VM_STAT(mmu_flooded) },
++	{ "mmu_recycled", VM_STAT(mmu_recycled) },
++	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
++	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
++	{ NULL }
++};
++
++
++unsigned long segment_base(u16 selector)
++{
++	struct descriptor_table gdt;
++	struct segment_descriptor *d;
++	unsigned long table_base;
++	unsigned long v;
++
++	if (selector == 0)
++		return 0;
++
++	asm("sgdt %0" : "=m"(gdt));
++	table_base = gdt.base;
++
++	if (selector & 4) {           /* from ldt */
++		u16 ldt_selector;
++
++		asm("sldt %0" : "=g"(ldt_selector));
++		table_base = segment_base(ldt_selector);
++	}
++	d = (struct segment_descriptor *)(table_base + (selector & ~7));
++	v = d->base_low | ((unsigned long)d->base_mid << 16) |
++		((unsigned long)d->base_high << 24);
++#ifdef CONFIG_X86_64
++	if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
++		v |= ((unsigned long) \
++		      ((struct segment_descriptor_64 *)d)->base_higher) << 32;
++#endif
++	return v;
++}
++EXPORT_SYMBOL_GPL(segment_base);
++
++u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
 +{
-+	if (force_mwait)
-+		return 1;
-+	/* Any C1 states supported? */
-+	return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
++	if (irqchip_in_kernel(vcpu->kvm))
++		return vcpu->arch.apic_base;
++	else
++		return vcpu->arch.apic_base;
 +}
++EXPORT_SYMBOL_GPL(kvm_get_apic_base);
 +
- void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
- {
--	if (cpu_has(c, X86_FEATURE_MWAIT)) {
--		printk("monitor/mwait feature present.\n");
-+	static int selected;
++void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
++{
++	/* TODO: reserve bits check */
++	if (irqchip_in_kernel(vcpu->kvm))
++		kvm_lapic_set_base(vcpu, data);
++	else
++		vcpu->arch.apic_base = data;
++}
++EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 +
-+	if (selected)
++void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
++{
++	WARN_ON(vcpu->arch.exception.pending);
++	vcpu->arch.exception.pending = true;
++	vcpu->arch.exception.has_error_code = false;
++	vcpu->arch.exception.nr = nr;
++}
++EXPORT_SYMBOL_GPL(kvm_queue_exception);
++
++void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
++			   u32 error_code)
++{
++	++vcpu->stat.pf_guest;
++	if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) {
++		printk(KERN_DEBUG "kvm: inject_page_fault:"
++		       " double fault 0x%lx\n", addr);
++		vcpu->arch.exception.nr = DF_VECTOR;
++		vcpu->arch.exception.error_code = 0;
 +		return;
-+#ifdef CONFIG_X86_SMP
-+	if (pm_idle == poll_idle && smp_num_siblings > 1) {
-+		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
-+			" performance may degrade.\n");
 +	}
-+#endif
-+	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- 		/*
- 		 * Skip, if setup has overridden idle.
- 		 * One CPU supports mwait => All CPUs supports mwait
- 		 */
- 		if (!pm_idle) {
--			printk("using mwait in idle threads.\n");
-+			printk(KERN_INFO "using mwait in idle threads.\n");
- 			pm_idle = mwait_idle;
- 		}
- 	}
-+	selected = 1;
- }
- 
- static int __init idle_setup(char *str)
-@@ -292,10 +323,6 @@ static int __init idle_setup(char *str)
- 	if (!strcmp(str, "poll")) {
- 		printk("using polling idle threads.\n");
- 		pm_idle = poll_idle;
--#ifdef CONFIG_X86_SMP
--		if (smp_num_siblings > 1)
--			printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
--#endif
- 	} else if (!strcmp(str, "mwait"))
- 		force_mwait = 1;
- 	else
-@@ -310,15 +337,15 @@ void __show_registers(struct pt_regs *regs, int all)
- {
- 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
- 	unsigned long d0, d1, d2, d3, d6, d7;
--	unsigned long esp;
-+	unsigned long sp;
- 	unsigned short ss, gs;
- 
- 	if (user_mode_vm(regs)) {
--		esp = regs->esp;
--		ss = regs->xss & 0xffff;
-+		sp = regs->sp;
-+		ss = regs->ss & 0xffff;
- 		savesegment(gs, gs);
- 	} else {
--		esp = (unsigned long) (&regs->esp);
-+		sp = (unsigned long) (&regs->sp);
- 		savesegment(ss, ss);
- 		savesegment(gs, gs);
- 	}
-@@ -331,17 +358,17 @@ void __show_registers(struct pt_regs *regs, int all)
- 			init_utsname()->version);
- 
- 	printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
--			0xffff & regs->xcs, regs->eip, regs->eflags,
-+			0xffff & regs->cs, regs->ip, regs->flags,
- 			smp_processor_id());
--	print_symbol("EIP is at %s\n", regs->eip);
-+	print_symbol("EIP is at %s\n", regs->ip);
- 
- 	printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
--		regs->eax, regs->ebx, regs->ecx, regs->edx);
-+		regs->ax, regs->bx, regs->cx, regs->dx);
- 	printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
--		regs->esi, regs->edi, regs->ebp, esp);
-+		regs->si, regs->di, regs->bp, sp);
- 	printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
--	       regs->xds & 0xffff, regs->xes & 0xffff,
--	       regs->xfs & 0xffff, gs, ss);
-+	       regs->ds & 0xffff, regs->es & 0xffff,
-+	       regs->fs & 0xffff, gs, ss);
- 
- 	if (!all)
- 		return;
-@@ -369,12 +396,12 @@ void __show_registers(struct pt_regs *regs, int all)
- void show_regs(struct pt_regs *regs)
- {
- 	__show_registers(regs, 1);
--	show_trace(NULL, regs, &regs->esp);
-+	show_trace(NULL, regs, &regs->sp, regs->bp);
- }
- 
- /*
-- * This gets run with %ebx containing the
-- * function to call, and %edx containing
-+ * This gets run with %bx containing the
-+ * function to call, and %dx containing
-  * the "args".
-  */
- extern void kernel_thread_helper(void);
-@@ -388,16 +415,16 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
- 
- 	memset(&regs, 0, sizeof(regs));
- 
--	regs.ebx = (unsigned long) fn;
--	regs.edx = (unsigned long) arg;
-+	regs.bx = (unsigned long) fn;
-+	regs.dx = (unsigned long) arg;
- 
--	regs.xds = __USER_DS;
--	regs.xes = __USER_DS;
--	regs.xfs = __KERNEL_PERCPU;
--	regs.orig_eax = -1;
--	regs.eip = (unsigned long) kernel_thread_helper;
--	regs.xcs = __KERNEL_CS | get_kernel_rpl();
--	regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
-+	regs.ds = __USER_DS;
-+	regs.es = __USER_DS;
-+	regs.fs = __KERNEL_PERCPU;
-+	regs.orig_ax = -1;
-+	regs.ip = (unsigned long) kernel_thread_helper;
-+	regs.cs = __KERNEL_CS | get_kernel_rpl();
-+	regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
- 
- 	/* Ok, create the new process.. */
- 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-@@ -435,7 +462,12 @@ void flush_thread(void)
- {
- 	struct task_struct *tsk = current;
- 
--	memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
-+	tsk->thread.debugreg0 = 0;
-+	tsk->thread.debugreg1 = 0;
-+	tsk->thread.debugreg2 = 0;
-+	tsk->thread.debugreg3 = 0;
-+	tsk->thread.debugreg6 = 0;
-+	tsk->thread.debugreg7 = 0;
- 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
- 	clear_tsk_thread_flag(tsk, TIF_DEBUG);
- 	/*
-@@ -460,7 +492,7 @@ void prepare_to_copy(struct task_struct *tsk)
- 	unlazy_fpu(tsk);
- }
- 
--int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
- 	unsigned long unused,
- 	struct task_struct * p, struct pt_regs * regs)
- {
-@@ -470,15 +502,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
- 
- 	childregs = task_pt_regs(p);
- 	*childregs = *regs;
--	childregs->eax = 0;
--	childregs->esp = esp;
-+	childregs->ax = 0;
-+	childregs->sp = sp;
- 
--	p->thread.esp = (unsigned long) childregs;
--	p->thread.esp0 = (unsigned long) (childregs+1);
-+	p->thread.sp = (unsigned long) childregs;
-+	p->thread.sp0 = (unsigned long) (childregs+1);
- 
--	p->thread.eip = (unsigned long) ret_from_fork;
-+	p->thread.ip = (unsigned long) ret_from_fork;
- 
--	savesegment(gs,p->thread.gs);
-+	savesegment(gs, p->thread.gs);
- 
- 	tsk = current;
- 	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
-@@ -491,32 +523,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
- 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
- 	}
- 
-+	err = 0;
++	vcpu->arch.cr2 = addr;
++	kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
++}
 +
- 	/*
- 	 * Set a new TLS for the child thread?
- 	 */
--	if (clone_flags & CLONE_SETTLS) {
--		struct desc_struct *desc;
--		struct user_desc info;
--		int idx;
--
--		err = -EFAULT;
--		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
--			goto out;
--		err = -EINVAL;
--		if (LDT_empty(&info))
--			goto out;
--
--		idx = info.entry_number;
--		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
--			goto out;
--
--		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
--		desc->a = LDT_entry_a(&info);
--		desc->b = LDT_entry_b(&info);
--	}
-+	if (clone_flags & CLONE_SETTLS)
-+		err = do_set_thread_area(p, -1,
-+			(struct user_desc __user *)childregs->si, 0);
- 
--	err = 0;
-- out:
- 	if (err && p->thread.io_bitmap_ptr) {
- 		kfree(p->thread.io_bitmap_ptr);
- 		p->thread.io_bitmap_max = 0;
-@@ -529,62 +544,52 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
-  */
- void dump_thread(struct pt_regs * regs, struct user * dump)
- {
--	int i;
-+	u16 gs;
- 
- /* changed the size calculations - should hopefully work better. lbt */
- 	dump->magic = CMAGIC;
- 	dump->start_code = 0;
--	dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
-+	dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
- 	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- 	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
- 	dump->u_dsize -= dump->u_tsize;
- 	dump->u_ssize = 0;
--	for (i = 0; i < 8; i++)
--		dump->u_debugreg[i] = current->thread.debugreg[i];  
-+	dump->u_debugreg[0] = current->thread.debugreg0;
-+	dump->u_debugreg[1] = current->thread.debugreg1;
-+	dump->u_debugreg[2] = current->thread.debugreg2;
-+	dump->u_debugreg[3] = current->thread.debugreg3;
-+	dump->u_debugreg[4] = 0;
-+	dump->u_debugreg[5] = 0;
-+	dump->u_debugreg[6] = current->thread.debugreg6;
-+	dump->u_debugreg[7] = current->thread.debugreg7;
- 
- 	if (dump->start_stack < TASK_SIZE)
- 		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
- 
--	dump->regs.ebx = regs->ebx;
--	dump->regs.ecx = regs->ecx;
--	dump->regs.edx = regs->edx;
--	dump->regs.esi = regs->esi;
--	dump->regs.edi = regs->edi;
--	dump->regs.ebp = regs->ebp;
--	dump->regs.eax = regs->eax;
--	dump->regs.ds = regs->xds;
--	dump->regs.es = regs->xes;
--	dump->regs.fs = regs->xfs;
--	savesegment(gs,dump->regs.gs);
--	dump->regs.orig_eax = regs->orig_eax;
--	dump->regs.eip = regs->eip;
--	dump->regs.cs = regs->xcs;
--	dump->regs.eflags = regs->eflags;
--	dump->regs.esp = regs->esp;
--	dump->regs.ss = regs->xss;
-+	dump->regs.bx = regs->bx;
-+	dump->regs.cx = regs->cx;
-+	dump->regs.dx = regs->dx;
-+	dump->regs.si = regs->si;
-+	dump->regs.di = regs->di;
-+	dump->regs.bp = regs->bp;
-+	dump->regs.ax = regs->ax;
-+	dump->regs.ds = (u16)regs->ds;
-+	dump->regs.es = (u16)regs->es;
-+	dump->regs.fs = (u16)regs->fs;
-+	savesegment(gs,gs);
-+	dump->regs.orig_ax = regs->orig_ax;
-+	dump->regs.ip = regs->ip;
-+	dump->regs.cs = (u16)regs->cs;
-+	dump->regs.flags = regs->flags;
-+	dump->regs.sp = regs->sp;
-+	dump->regs.ss = (u16)regs->ss;
- 
- 	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
- }
- EXPORT_SYMBOL(dump_thread);
- 
--/* 
-- * Capture the user space registers if the task is not running (in user space)
-- */
--int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
--{
--	struct pt_regs ptregs = *task_pt_regs(tsk);
--	ptregs.xcs &= 0xffff;
--	ptregs.xds &= 0xffff;
--	ptregs.xes &= 0xffff;
--	ptregs.xss &= 0xffff;
--
--	elf_core_copy_regs(regs, &ptregs);
--
--	return 1;
--}
--
- #ifdef CONFIG_SECCOMP
--void hard_disable_TSC(void)
-+static void hard_disable_TSC(void)
- {
- 	write_cr4(read_cr4() | X86_CR4_TSD);
- }
-@@ -599,7 +604,7 @@ void disable_TSC(void)
- 		hard_disable_TSC();
- 	preempt_enable();
- }
--void hard_enable_TSC(void)
-+static void hard_enable_TSC(void)
- {
- 	write_cr4(read_cr4() & ~X86_CR4_TSD);
- }
-@@ -609,18 +614,32 @@ static noinline void
- __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- 		 struct tss_struct *tss)
- {
--	struct thread_struct *next;
-+	struct thread_struct *prev, *next;
-+	unsigned long debugctl;
- 
-+	prev = &prev_p->thread;
- 	next = &next_p->thread;
- 
-+	debugctl = prev->debugctlmsr;
-+	if (next->ds_area_msr != prev->ds_area_msr) {
-+		/* we clear debugctl to make sure DS
-+		 * is not in use when we change it */
-+		debugctl = 0;
-+		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
-+		wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
++void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
++{
++	WARN_ON(vcpu->arch.exception.pending);
++	vcpu->arch.exception.pending = true;
++	vcpu->arch.exception.has_error_code = true;
++	vcpu->arch.exception.nr = nr;
++	vcpu->arch.exception.error_code = error_code;
++}
++EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
++
++static void __queue_exception(struct kvm_vcpu *vcpu)
++{
++	kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
++				     vcpu->arch.exception.has_error_code,
++				     vcpu->arch.exception.error_code);
++}
++
++/*
++ * Load the pae pdptrs.  Return true is they are all valid.
++ */
++int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
++{
++	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
++	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
++	int i;
++	int ret;
++	u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
++
++	down_read(&current->mm->mmap_sem);
++	ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
++				  offset * sizeof(u64), sizeof(pdpte));
++	if (ret < 0) {
++		ret = 0;
++		goto out;
++	}
++	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
++		if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
++			ret = 0;
++			goto out;
++		}
 +	}
++	ret = 1;
 +
-+	if (next->debugctlmsr != debugctl)
-+		wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
++	memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
++out:
++	up_read(&current->mm->mmap_sem);
 +
- 	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
--		set_debugreg(next->debugreg[0], 0);
--		set_debugreg(next->debugreg[1], 1);
--		set_debugreg(next->debugreg[2], 2);
--		set_debugreg(next->debugreg[3], 3);
-+		set_debugreg(next->debugreg0, 0);
-+		set_debugreg(next->debugreg1, 1);
-+		set_debugreg(next->debugreg2, 2);
-+		set_debugreg(next->debugreg3, 3);
- 		/* no 4 and 5 */
--		set_debugreg(next->debugreg[6], 6);
--		set_debugreg(next->debugreg[7], 7);
-+		set_debugreg(next->debugreg6, 6);
-+		set_debugreg(next->debugreg7, 7);
- 	}
- 
- #ifdef CONFIG_SECCOMP
-@@ -634,6 +653,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- 	}
- #endif
- 
-+	if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
-+		ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
++	return ret;
++}
 +
-+	if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
-+		ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
++static bool pdptrs_changed(struct kvm_vcpu *vcpu)
++{
++	u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
++	bool changed = true;
++	int r;
 +
++	if (is_long_mode(vcpu) || !is_pae(vcpu))
++		return false;
 +
- 	if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
- 		/*
- 		 * Disable the bitmap via an invalid offset. We still cache
-@@ -687,11 +713,11 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-  * More important, however, is the fact that this allows us much
-  * more flexibility.
-  *
-- * The return value (in %eax) will be the "prev" task after
-+ * The return value (in %ax) will be the "prev" task after
-  * the task-switch, and shows up in ret_from_fork in entry.S,
-  * for example.
-  */
--struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- {
- 	struct thread_struct *prev = &prev_p->thread,
- 				 *next = &next_p->thread;
-@@ -710,7 +736,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
- 	/*
- 	 * Reload esp0.
- 	 */
--	load_esp0(tss, next);
-+	load_sp0(tss, next);
- 
- 	/*
- 	 * Save away %gs. No need to save %fs, as it was saved on the
-@@ -774,7 +800,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
- 
- asmlinkage int sys_fork(struct pt_regs regs)
- {
--	return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+	return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
- }
- 
- asmlinkage int sys_clone(struct pt_regs regs)
-@@ -783,12 +809,12 @@ asmlinkage int sys_clone(struct pt_regs regs)
- 	unsigned long newsp;
- 	int __user *parent_tidptr, *child_tidptr;
- 
--	clone_flags = regs.ebx;
--	newsp = regs.ecx;
--	parent_tidptr = (int __user *)regs.edx;
--	child_tidptr = (int __user *)regs.edi;
-+	clone_flags = regs.bx;
-+	newsp = regs.cx;
-+	parent_tidptr = (int __user *)regs.dx;
-+	child_tidptr = (int __user *)regs.di;
- 	if (!newsp)
--		newsp = regs.esp;
-+		newsp = regs.sp;
- 	return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
- }
- 
-@@ -804,7 +830,7 @@ asmlinkage int sys_clone(struct pt_regs regs)
-  */
- asmlinkage int sys_vfork(struct pt_regs regs)
- {
--	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
- }
- 
- /*
-@@ -815,18 +841,15 @@ asmlinkage int sys_execve(struct pt_regs regs)
- 	int error;
- 	char * filename;
- 
--	filename = getname((char __user *) regs.ebx);
-+	filename = getname((char __user *) regs.bx);
- 	error = PTR_ERR(filename);
- 	if (IS_ERR(filename))
- 		goto out;
- 	error = do_execve(filename,
--			(char __user * __user *) regs.ecx,
--			(char __user * __user *) regs.edx,
-+			(char __user * __user *) regs.cx,
-+			(char __user * __user *) regs.dx,
- 			&regs);
- 	if (error == 0) {
--		task_lock(current);
--		current->ptrace &= ~PT_DTRACE;
--		task_unlock(current);
- 		/* Make sure we don't return using sysenter.. */
- 		set_thread_flag(TIF_IRET);
- 	}
-@@ -840,145 +863,37 @@ out:
- 
- unsigned long get_wchan(struct task_struct *p)
- {
--	unsigned long ebp, esp, eip;
-+	unsigned long bp, sp, ip;
- 	unsigned long stack_page;
- 	int count = 0;
- 	if (!p || p == current || p->state == TASK_RUNNING)
- 		return 0;
- 	stack_page = (unsigned long)task_stack_page(p);
--	esp = p->thread.esp;
--	if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
-+	sp = p->thread.sp;
-+	if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
- 		return 0;
--	/* include/asm-i386/system.h:switch_to() pushes ebp last. */
--	ebp = *(unsigned long *) esp;
-+	/* include/asm-i386/system.h:switch_to() pushes bp last. */
-+	bp = *(unsigned long *) sp;
- 	do {
--		if (ebp < stack_page || ebp > top_ebp+stack_page)
-+		if (bp < stack_page || bp > top_ebp+stack_page)
- 			return 0;
--		eip = *(unsigned long *) (ebp+4);
--		if (!in_sched_functions(eip))
--			return eip;
--		ebp = *(unsigned long *) ebp;
-+		ip = *(unsigned long *) (bp+4);
-+		if (!in_sched_functions(ip))
-+			return ip;
-+		bp = *(unsigned long *) bp;
- 	} while (count++ < 16);
- 	return 0;
- }
- 
--/*
-- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
-- */
--static int get_free_idx(void)
--{
--	struct thread_struct *t = &current->thread;
--	int idx;
--
--	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
--		if (desc_empty(t->tls_array + idx))
--			return idx + GDT_ENTRY_TLS_MIN;
--	return -ESRCH;
--}
--
--/*
-- * Set a given TLS descriptor:
-- */
--asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
--{
--	struct thread_struct *t = &current->thread;
--	struct user_desc info;
--	struct desc_struct *desc;
--	int cpu, idx;
--
--	if (copy_from_user(&info, u_info, sizeof(info)))
--		return -EFAULT;
--	idx = info.entry_number;
--
--	/*
--	 * index -1 means the kernel should try to find and
--	 * allocate an empty descriptor:
--	 */
--	if (idx == -1) {
--		idx = get_free_idx();
--		if (idx < 0)
--			return idx;
--		if (put_user(idx, &u_info->entry_number))
--			return -EFAULT;
--	}
--
--	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
--		return -EINVAL;
--
--	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
--
--	/*
--	 * We must not get preempted while modifying the TLS.
--	 */
--	cpu = get_cpu();
--
--	if (LDT_empty(&info)) {
--		desc->a = 0;
--		desc->b = 0;
--	} else {
--		desc->a = LDT_entry_a(&info);
--		desc->b = LDT_entry_b(&info);
--	}
--	load_TLS(t, cpu);
--
--	put_cpu();
--
--	return 0;
--}
--
--/*
-- * Get the current Thread-Local Storage area:
-- */
--
--#define GET_BASE(desc) ( \
--	(((desc)->a >> 16) & 0x0000ffff) | \
--	(((desc)->b << 16) & 0x00ff0000) | \
--	( (desc)->b        & 0xff000000)   )
--
--#define GET_LIMIT(desc) ( \
--	((desc)->a & 0x0ffff) | \
--	 ((desc)->b & 0xf0000) )
--	
--#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
--#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
--#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
--#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
--#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
--#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
--
--asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
--{
--	struct user_desc info;
--	struct desc_struct *desc;
--	int idx;
--
--	if (get_user(idx, &u_info->entry_number))
--		return -EFAULT;
--	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
--		return -EINVAL;
--
--	memset(&info, 0, sizeof(info));
--
--	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
--
--	info.entry_number = idx;
--	info.base_addr = GET_BASE(desc);
--	info.limit = GET_LIMIT(desc);
--	info.seg_32bit = GET_32BIT(desc);
--	info.contents = GET_CONTENTS(desc);
--	info.read_exec_only = !GET_WRITABLE(desc);
--	info.limit_in_pages = GET_LIMIT_PAGES(desc);
--	info.seg_not_present = !GET_PRESENT(desc);
--	info.useable = GET_USEABLE(desc);
--
--	if (copy_to_user(u_info, &info, sizeof(info)))
--		return -EFAULT;
--	return 0;
--}
--
- unsigned long arch_align_stack(unsigned long sp)
- {
- 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- 		sp -= get_random_int() % 8192;
- 	return sp & ~0xf;
- }
++	down_read(&current->mm->mmap_sem);
++	r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
++	if (r < 0)
++		goto out;
++	changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
++out:
++	up_read(&current->mm->mmap_sem);
 +
-+unsigned long arch_randomize_brk(struct mm_struct *mm)
-+{
-+	unsigned long range_end = mm->brk + 0x02000000;
-+	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
++	return changed;
 +}
-diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
-index ab79e1d..137a861 100644
---- a/arch/x86/kernel/process_64.c
-+++ b/arch/x86/kernel/process_64.c
-@@ -3,7 +3,7 @@
-  *
-  *  Pentium III FXSR, SSE support
-  *	Gareth Hughes <gareth at valinux.com>, May 2000
-- * 
-+ *
-  *  X86-64 port
-  *	Andi Kleen.
-  *
-@@ -19,19 +19,19 @@
- #include <linux/cpu.h>
- #include <linux/errno.h>
- #include <linux/sched.h>
-+#include <linux/fs.h>
- #include <linux/kernel.h>
- #include <linux/mm.h>
--#include <linux/fs.h>
- #include <linux/elfcore.h>
- #include <linux/smp.h>
- #include <linux/slab.h>
- #include <linux/user.h>
--#include <linux/module.h>
- #include <linux/a.out.h>
- #include <linux/interrupt.h>
-+#include <linux/utsname.h>
- #include <linux/delay.h>
-+#include <linux/module.h>
- #include <linux/ptrace.h>
--#include <linux/utsname.h>
- #include <linux/random.h>
- #include <linux/notifier.h>
- #include <linux/kprobes.h>
-@@ -72,13 +72,6 @@ void idle_notifier_register(struct notifier_block *n)
- {
- 	atomic_notifier_chain_register(&idle_notifier, n);
- }
--EXPORT_SYMBOL_GPL(idle_notifier_register);
--
--void idle_notifier_unregister(struct notifier_block *n)
--{
--	atomic_notifier_chain_unregister(&idle_notifier, n);
--}
--EXPORT_SYMBOL(idle_notifier_unregister);
- 
- void enter_idle(void)
- {
-@@ -106,7 +99,7 @@ void exit_idle(void)
-  * We use this if we don't have any better
-  * idle routine..
-  */
--static void default_idle(void)
-+void default_idle(void)
- {
- 	current_thread_info()->status &= ~TS_POLLING;
- 	/*
-@@ -116,11 +109,18 @@ static void default_idle(void)
- 	smp_mb();
- 	local_irq_disable();
- 	if (!need_resched()) {
--		/* Enables interrupts one instruction before HLT.
--		   x86 special cases this so there is no race. */
--		safe_halt();
--	} else
--		local_irq_enable();
-+		ktime_t t0, t1;
-+		u64 t0n, t1n;
 +
-+		t0 = ktime_get();
-+		t0n = ktime_to_ns(t0);
-+		safe_halt();	/* enables interrupts racelessly */
-+		local_irq_disable();
-+		t1 = ktime_get();
-+		t1n = ktime_to_ns(t1);
-+		sched_clock_idle_wakeup_event(t1n - t0n);
++void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
++{
++	if (cr0 & CR0_RESERVED_BITS) {
++		printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
++		       cr0, vcpu->arch.cr0);
++		kvm_inject_gp(vcpu, 0);
++		return;
 +	}
-+	local_irq_enable();
- 	current_thread_info()->status |= TS_POLLING;
- }
- 
-@@ -129,54 +129,12 @@ static void default_idle(void)
-  * to poll the ->need_resched flag instead of waiting for the
-  * cross-CPU IPI to arrive. Use this option with caution.
-  */
--static void poll_idle (void)
-+static void poll_idle(void)
- {
- 	local_irq_enable();
- 	cpu_relax();
- }
- 
--static void do_nothing(void *unused)
--{
--}
--
--void cpu_idle_wait(void)
--{
--	unsigned int cpu, this_cpu = get_cpu();
--	cpumask_t map, tmp = current->cpus_allowed;
--
--	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
--	put_cpu();
--
--	cpus_clear(map);
--	for_each_online_cpu(cpu) {
--		per_cpu(cpu_idle_state, cpu) = 1;
--		cpu_set(cpu, map);
--	}
--
--	__get_cpu_var(cpu_idle_state) = 0;
--
--	wmb();
--	do {
--		ssleep(1);
--		for_each_online_cpu(cpu) {
--			if (cpu_isset(cpu, map) &&
--					!per_cpu(cpu_idle_state, cpu))
--				cpu_clear(cpu, map);
--		}
--		cpus_and(map, map, cpu_online_map);
--		/*
--		 * We waited 1 sec, if a CPU still did not call idle
--		 * it may be because it is in idle and not waking up
--		 * because it has nothing to do.
--		 * Give all the remaining CPUS a kick.
--		 */
--		smp_call_function_mask(map, do_nothing, 0, 0);
--	} while (!cpus_empty(map));
--
--	set_cpus_allowed(current, tmp);
--}
--EXPORT_SYMBOL_GPL(cpu_idle_wait);
--
- #ifdef CONFIG_HOTPLUG_CPU
- DECLARE_PER_CPU(int, cpu_state);
- 
-@@ -207,19 +165,18 @@ static inline void play_dead(void)
-  * low exit latency (ie sit in a loop waiting for
-  * somebody to say that they'd like to reschedule)
-  */
--void cpu_idle (void)
-+void cpu_idle(void)
- {
- 	current_thread_info()->status |= TS_POLLING;
- 	/* endless idle loop with no priority at all */
- 	while (1) {
-+		tick_nohz_stop_sched_tick();
- 		while (!need_resched()) {
- 			void (*idle)(void);
- 
- 			if (__get_cpu_var(cpu_idle_state))
- 				__get_cpu_var(cpu_idle_state) = 0;
- 
--			tick_nohz_stop_sched_tick();
--
- 			rmb();
- 			idle = pm_idle;
- 			if (!idle)
-@@ -247,6 +204,47 @@ void cpu_idle (void)
- 	}
- }
- 
-+static void do_nothing(void *unused)
++
++	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
++		printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
++		kvm_inject_gp(vcpu, 0);
++		return;
++	}
++
++	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
++		printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
++		       "and a clear PE flag\n");
++		kvm_inject_gp(vcpu, 0);
++		return;
++	}
++
++	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
++#ifdef CONFIG_X86_64
++		if ((vcpu->arch.shadow_efer & EFER_LME)) {
++			int cs_db, cs_l;
++
++			if (!is_pae(vcpu)) {
++				printk(KERN_DEBUG "set_cr0: #GP, start paging "
++				       "in long mode while PAE is disabled\n");
++				kvm_inject_gp(vcpu, 0);
++				return;
++			}
++			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
++			if (cs_l) {
++				printk(KERN_DEBUG "set_cr0: #GP, start paging "
++				       "in long mode while CS.L == 1\n");
++				kvm_inject_gp(vcpu, 0);
++				return;
++
++			}
++		} else
++#endif
++		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
++			printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
++			       "reserved bits\n");
++			kvm_inject_gp(vcpu, 0);
++			return;
++		}
++
++	}
++
++	kvm_x86_ops->set_cr0(vcpu, cr0);
++	vcpu->arch.cr0 = cr0;
++
++	kvm_mmu_reset_context(vcpu);
++	return;
++}
++EXPORT_SYMBOL_GPL(set_cr0);
++
++void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 +{
++	set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
 +}
++EXPORT_SYMBOL_GPL(lmsw);
 +
-+void cpu_idle_wait(void)
++void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 +{
-+	unsigned int cpu, this_cpu = get_cpu();
-+	cpumask_t map, tmp = current->cpus_allowed;
++	if (cr4 & CR4_RESERVED_BITS) {
++		printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
++		kvm_inject_gp(vcpu, 0);
++		return;
++	}
 +
-+	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+	put_cpu();
++	if (is_long_mode(vcpu)) {
++		if (!(cr4 & X86_CR4_PAE)) {
++			printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
++			       "in long mode\n");
++			kvm_inject_gp(vcpu, 0);
++			return;
++		}
++	} else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
++		   && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
++		printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
++		kvm_inject_gp(vcpu, 0);
++		return;
++	}
 +
-+	cpus_clear(map);
-+	for_each_online_cpu(cpu) {
-+		per_cpu(cpu_idle_state, cpu) = 1;
-+		cpu_set(cpu, map);
++	if (cr4 & X86_CR4_VMXE) {
++		printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
++		kvm_inject_gp(vcpu, 0);
++		return;
 +	}
++	kvm_x86_ops->set_cr4(vcpu, cr4);
++	vcpu->arch.cr4 = cr4;
++	kvm_mmu_reset_context(vcpu);
++}
++EXPORT_SYMBOL_GPL(set_cr4);
 +
-+	__get_cpu_var(cpu_idle_state) = 0;
++void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
++{
++	if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
++		kvm_mmu_flush_tlb(vcpu);
++		return;
++	}
 +
-+	wmb();
-+	do {
-+		ssleep(1);
-+		for_each_online_cpu(cpu) {
-+			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
-+				cpu_clear(cpu, map);
++	if (is_long_mode(vcpu)) {
++		if (cr3 & CR3_L_MODE_RESERVED_BITS) {
++			printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
++			kvm_inject_gp(vcpu, 0);
++			return;
++		}
++	} else {
++		if (is_pae(vcpu)) {
++			if (cr3 & CR3_PAE_RESERVED_BITS) {
++				printk(KERN_DEBUG
++				       "set_cr3: #GP, reserved bits\n");
++				kvm_inject_gp(vcpu, 0);
++				return;
++			}
++			if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
++				printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
++				       "reserved bits\n");
++				kvm_inject_gp(vcpu, 0);
++				return;
++			}
 +		}
-+		cpus_and(map, map, cpu_online_map);
 +		/*
-+		 * We waited 1 sec, if a CPU still did not call idle
-+		 * it may be because it is in idle and not waking up
-+		 * because it has nothing to do.
-+		 * Give all the remaining CPUS a kick.
++		 * We don't check reserved bits in nonpae mode, because
++		 * this isn't enforced, and VMware depends on this.
 +		 */
-+		smp_call_function_mask(map, do_nothing, 0, 0);
-+	} while (!cpus_empty(map));
++	}
 +
-+	set_cpus_allowed(current, tmp);
++	down_read(&current->mm->mmap_sem);
++	/*
++	 * Does the new cr3 value map to physical memory? (Note, we
++	 * catch an invalid cr3 even in real-mode, because it would
++	 * cause trouble later on when we turn on paging anyway.)
++	 *
++	 * A real CPU would silently accept an invalid cr3 and would
++	 * attempt to use it - with largely undefined (and often hard
++	 * to debug) behavior on the guest side.
++	 */
++	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
++		kvm_inject_gp(vcpu, 0);
++	else {
++		vcpu->arch.cr3 = cr3;
++		vcpu->arch.mmu.new_cr3(vcpu);
++	}
++	up_read(&current->mm->mmap_sem);
 +}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
++EXPORT_SYMBOL_GPL(set_cr3);
 +
- /*
-  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
-  * which can obviate IPI to trigger checking of need_resched.
-@@ -257,13 +255,13 @@ void cpu_idle (void)
-  * New with Core Duo processors, MWAIT can take some hints based on CPU
-  * capability.
-  */
--void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
-+void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
- {
- 	if (!need_resched()) {
- 		__monitor((void *)&current_thread_info()->flags, 0, 0);
- 		smp_mb();
- 		if (!need_resched())
--			__mwait(eax, ecx);
-+			__mwait(ax, cx);
- 	}
- }
- 
-@@ -282,25 +280,41 @@ static void mwait_idle(void)
- 	}
- }
- 
++void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
++{
++	if (cr8 & CR8_RESERVED_BITS) {
++		printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
++		kvm_inject_gp(vcpu, 0);
++		return;
++	}
++	if (irqchip_in_kernel(vcpu->kvm))
++		kvm_lapic_set_tpr(vcpu, cr8);
++	else
++		vcpu->arch.cr8 = cr8;
++}
++EXPORT_SYMBOL_GPL(set_cr8);
 +
-+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
++unsigned long get_cr8(struct kvm_vcpu *vcpu)
 +{
-+	if (force_mwait)
-+		return 1;
-+	/* Any C1 states supported? */
-+	return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
++	if (irqchip_in_kernel(vcpu->kvm))
++		return kvm_lapic_get_cr8(vcpu);
++	else
++		return vcpu->arch.cr8;
 +}
++EXPORT_SYMBOL_GPL(get_cr8);
 +
- void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
- {
--	static int printed;
--	if (cpu_has(c, X86_FEATURE_MWAIT)) {
-+	static int selected;
++/*
++ * List of msr numbers which we expose to userspace through KVM_GET_MSRS
++ * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
++ *
++ * This list is modified at module load time to reflect the
++ * capabilities of the host cpu.
++ */
++static u32 msrs_to_save[] = {
++	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
++	MSR_K6_STAR,
++#ifdef CONFIG_X86_64
++	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
++#endif
++	MSR_IA32_TIME_STAMP_COUNTER,
++};
 +
-+	if (selected)
++static unsigned num_msrs_to_save;
++
++static u32 emulated_msrs[] = {
++	MSR_IA32_MISC_ENABLE,
++};
++
++#ifdef CONFIG_X86_64
++
++static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
++{
++	if (efer & EFER_RESERVED_BITS) {
++		printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
++		       efer);
++		kvm_inject_gp(vcpu, 0);
 +		return;
-+#ifdef CONFIG_X86_SMP
-+	if (pm_idle == poll_idle && smp_num_siblings > 1) {
-+		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
-+			" performance may degrade.\n");
 +	}
-+#endif
-+	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- 		/*
- 		 * Skip, if setup has overridden idle.
- 		 * One CPU supports mwait => All CPUs supports mwait
- 		 */
- 		if (!pm_idle) {
--			if (!printed) {
--				printk(KERN_INFO "using mwait in idle threads.\n");
--				printed = 1;
--			}
-+			printk(KERN_INFO "using mwait in idle threads.\n");
- 			pm_idle = mwait_idle;
- 		}
- 	}
-+	selected = 1;
- }
- 
--static int __init idle_setup (char *str)
-+static int __init idle_setup(char *str)
- {
- 	if (!strcmp(str, "poll")) {
- 		printk("using polling idle threads.\n");
-@@ -315,13 +329,13 @@ static int __init idle_setup (char *str)
- }
- early_param("idle", idle_setup);
- 
--/* Prints also some state that isn't saved in the pt_regs */ 
-+/* Prints also some state that isn't saved in the pt_regs */
- void __show_regs(struct pt_regs * regs)
- {
- 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
- 	unsigned long d0, d1, d2, d3, d6, d7;
--	unsigned int fsindex,gsindex;
--	unsigned int ds,cs,es; 
-+	unsigned int fsindex, gsindex;
-+	unsigned int ds, cs, es;
- 
- 	printk("\n");
- 	print_modules();
-@@ -330,16 +344,16 @@ void __show_regs(struct pt_regs * regs)
- 		init_utsname()->release,
- 		(int)strcspn(init_utsname()->version, " "),
- 		init_utsname()->version);
--	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
--	printk_address(regs->rip); 
--	printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->rsp,
--		regs->eflags);
-+	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
-+	printk_address(regs->ip, 1);
-+	printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->sp,
-+		regs->flags);
- 	printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
--	       regs->rax, regs->rbx, regs->rcx);
-+	       regs->ax, regs->bx, regs->cx);
- 	printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
--	       regs->rdx, regs->rsi, regs->rdi); 
-+	       regs->dx, regs->si, regs->di);
- 	printk("RBP: %016lx R08: %016lx R09: %016lx\n",
--	       regs->rbp, regs->r8, regs->r9); 
-+	       regs->bp, regs->r8, regs->r9);
- 	printk("R10: %016lx R11: %016lx R12: %016lx\n",
- 	       regs->r10, regs->r11, regs->r12); 
- 	printk("R13: %016lx R14: %016lx R15: %016lx\n",
-@@ -379,7 +393,7 @@ void show_regs(struct pt_regs *regs)
- {
- 	printk("CPU %d:", smp_processor_id());
- 	__show_regs(regs);
--	show_trace(NULL, regs, (void *)(regs + 1));
-+	show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
- }
- 
- /*
-@@ -390,7 +404,7 @@ void exit_thread(void)
- 	struct task_struct *me = current;
- 	struct thread_struct *t = &me->thread;
- 
--	if (me->thread.io_bitmap_ptr) { 
-+	if (me->thread.io_bitmap_ptr) {
- 		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
- 
- 		kfree(t->io_bitmap_ptr);
-@@ -426,7 +440,7 @@ void flush_thread(void)
- 	tsk->thread.debugreg3 = 0;
- 	tsk->thread.debugreg6 = 0;
- 	tsk->thread.debugreg7 = 0;
--	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
-+	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
- 	/*
- 	 * Forget coprocessor state..
- 	 */
-@@ -449,26 +463,21 @@ void release_thread(struct task_struct *dead_task)
- 
- static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
- {
--	struct user_desc ud = { 
-+	struct user_desc ud = {
- 		.base_addr = addr,
- 		.limit = 0xfffff,
- 		.seg_32bit = 1,
- 		.limit_in_pages = 1,
- 		.useable = 1,
- 	};
--	struct n_desc_struct *desc = (void *)t->thread.tls_array;
-+	struct desc_struct *desc = t->thread.tls_array;
- 	desc += tls;
--	desc->a = LDT_entry_a(&ud); 
--	desc->b = LDT_entry_b(&ud); 
-+	fill_ldt(desc, &ud);
- }
- 
- static inline u32 read_32bit_tls(struct task_struct *t, int tls)
- {
--	struct desc_struct *desc = (void *)t->thread.tls_array;
--	desc += tls;
--	return desc->base0 | 
--		(((u32)desc->base1) << 16) | 
--		(((u32)desc->base2) << 24);
-+	return get_desc_base(&t->thread.tls_array[tls]);
- }
- 
- /*
-@@ -480,7 +489,7 @@ void prepare_to_copy(struct task_struct *tsk)
- 	unlazy_fpu(tsk);
- }
- 
--int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
- 		unsigned long unused,
- 	struct task_struct * p, struct pt_regs * regs)
- {
-@@ -492,14 +501,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
- 			(THREAD_SIZE + task_stack_page(p))) - 1;
- 	*childregs = *regs;
- 
--	childregs->rax = 0;
--	childregs->rsp = rsp;
--	if (rsp == ~0UL)
--		childregs->rsp = (unsigned long)childregs;
-+	childregs->ax = 0;
-+	childregs->sp = sp;
-+	if (sp == ~0UL)
-+		childregs->sp = (unsigned long)childregs;
- 
--	p->thread.rsp = (unsigned long) childregs;
--	p->thread.rsp0 = (unsigned long) (childregs+1);
--	p->thread.userrsp = me->thread.userrsp; 
-+	p->thread.sp = (unsigned long) childregs;
-+	p->thread.sp0 = (unsigned long) (childregs+1);
-+	p->thread.usersp = me->thread.usersp;
- 
- 	set_tsk_thread_flag(p, TIF_FORK);
- 
-@@ -520,7 +529,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
- 		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
- 				IO_BITMAP_BYTES);
- 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
--	} 
++
++	if (is_paging(vcpu)
++	    && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
++		printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
++		kvm_inject_gp(vcpu, 0);
++		return;
 +	}
- 
- 	/*
- 	 * Set a new TLS for the child thread?
-@@ -528,7 +537,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
- 	if (clone_flags & CLONE_SETTLS) {
- #ifdef CONFIG_IA32_EMULATION
- 		if (test_thread_flag(TIF_IA32))
--			err = ia32_child_tls(p, childregs); 
-+			err = do_set_thread_area(p, -1,
-+				(struct user_desc __user *)childregs->si, 0);
- 		else 			
- #endif	 
- 			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
-@@ -547,17 +557,30 @@ out:
- /*
-  * This special macro can be used to load a debugging register
-  */
--#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
-+#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
- 
- static inline void __switch_to_xtra(struct task_struct *prev_p,
--			     	    struct task_struct *next_p,
--			     	    struct tss_struct *tss)
-+				    struct task_struct *next_p,
-+				    struct tss_struct *tss)
- {
- 	struct thread_struct *prev, *next;
-+	unsigned long debugctl;
- 
- 	prev = &prev_p->thread,
- 	next = &next_p->thread;
- 
-+	debugctl = prev->debugctlmsr;
-+	if (next->ds_area_msr != prev->ds_area_msr) {
-+		/* we clear debugctl to make sure DS
-+		 * is not in use when we change it */
-+		debugctl = 0;
-+		wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
-+		wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
++
++	kvm_x86_ops->set_efer(vcpu, efer);
++
++	efer &= ~EFER_LMA;
++	efer |= vcpu->arch.shadow_efer & EFER_LMA;
++
++	vcpu->arch.shadow_efer = efer;
++}
++
++#endif
++
++/*
++ * Writes msr value into into the appropriate "register".
++ * Returns 0 on success, non-0 otherwise.
++ * Assumes vcpu_load() was already called.
++ */
++int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
++{
++	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
++}
++
++/*
++ * Adapt set_msr() to msr_io()'s calling convention
++ */
++static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
++{
++	return kvm_set_msr(vcpu, index, *data);
++}
++
++
++int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
++{
++	switch (msr) {
++#ifdef CONFIG_X86_64
++	case MSR_EFER:
++		set_efer(vcpu, data);
++		break;
++#endif
++	case MSR_IA32_MC0_STATUS:
++		pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
++		       __FUNCTION__, data);
++		break;
++	case MSR_IA32_MCG_STATUS:
++		pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
++			__FUNCTION__, data);
++		break;
++	case MSR_IA32_UCODE_REV:
++	case MSR_IA32_UCODE_WRITE:
++	case 0x200 ... 0x2ff: /* MTRRs */
++		break;
++	case MSR_IA32_APICBASE:
++		kvm_set_apic_base(vcpu, data);
++		break;
++	case MSR_IA32_MISC_ENABLE:
++		vcpu->arch.ia32_misc_enable_msr = data;
++		break;
++	default:
++		pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
++		return 1;
 +	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_msr_common);
 +
-+	if (next->debugctlmsr != debugctl)
-+		wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr);
 +
- 	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
- 		loaddebug(next, 0);
- 		loaddebug(next, 1);
-@@ -581,12 +604,18 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
- 		 */
- 		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
- 	}
++/*
++ * Reads an msr value (of 'msr_index') into 'pdata'.
++ * Returns 0 on success, non-0 otherwise.
++ * Assumes vcpu_load() was already called.
++ */
++int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
++{
++	return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
++}
 +
-+	if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
-+		ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
++int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++{
++	u64 data;
 +
-+	if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
-+		ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
- }
- 
- /*
-  *	switch_to(x,y) should switch tasks from x to y.
-  *
-- * This could still be optimized: 
-+ * This could still be optimized:
-  * - fold all the options into a flag word and test it with a single test.
-  * - could test fs/gs bitsliced
-  *
-@@ -597,7 +626,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- {
- 	struct thread_struct *prev = &prev_p->thread,
- 				 *next = &next_p->thread;
--	int cpu = smp_processor_id();  
-+	int cpu = smp_processor_id();
- 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
- 
- 	/* we're going to use this soon, after a few expensive things */
-@@ -607,7 +636,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- 	/*
- 	 * Reload esp0, LDT and the page table pointer:
- 	 */
--	tss->rsp0 = next->rsp0;
-+	load_sp0(tss, next);
- 
- 	/* 
- 	 * Switch DS and ES.
-@@ -666,8 +695,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- 	/* 
- 	 * Switch the PDA and FPU contexts.
- 	 */
--	prev->userrsp = read_pda(oldrsp); 
--	write_pda(oldrsp, next->userrsp); 
-+	prev->usersp = read_pda(oldrsp);
-+	write_pda(oldrsp, next->usersp);
- 	write_pda(pcurrent, next_p); 
- 
- 	write_pda(kernelstack,
-@@ -684,8 +713,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- 	/*
- 	 * Now maybe reload the debug registers and handle I/O bitmaps
- 	 */
--	if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
--	    || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
-+	if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
-+		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
- 		__switch_to_xtra(prev_p, next_p, tss);
- 
- 	/* If the task has used fpu the last 5 timeslices, just do a full
-@@ -700,7 +729,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- /*
-  * sys_execve() executes a new program.
-  */
--asmlinkage 
-+asmlinkage
- long sys_execve(char __user *name, char __user * __user *argv,
- 		char __user * __user *envp, struct pt_regs regs)
- {
-@@ -712,11 +741,6 @@ long sys_execve(char __user *name, char __user * __user *argv,
- 	if (IS_ERR(filename)) 
- 		return error;
- 	error = do_execve(filename, argv, envp, &regs); 
--	if (error == 0) {
--		task_lock(current);
--		current->ptrace &= ~PT_DTRACE;
--		task_unlock(current);
--	}
- 	putname(filename);
- 	return error;
- }
-@@ -726,18 +750,18 @@ void set_personality_64bit(void)
- 	/* inherit personality from parent */
- 
- 	/* Make sure to be in 64bit mode */
--	clear_thread_flag(TIF_IA32); 
-+	clear_thread_flag(TIF_IA32);
- 
- 	/* TBD: overwrites user setup. Should have two bits.
- 	   But 64bit processes have always behaved this way,
- 	   so it's not too bad. The main problem is just that
--   	   32bit childs are affected again. */
-+	   32bit childs are affected again. */
- 	current->personality &= ~READ_IMPLIES_EXEC;
- }
- 
- asmlinkage long sys_fork(struct pt_regs *regs)
- {
--	return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
-+	return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
- }
- 
- asmlinkage long
-@@ -745,7 +769,7 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
- 	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
- {
- 	if (!newsp)
--		newsp = regs->rsp;
-+		newsp = regs->sp;
- 	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
- }
- 
-@@ -761,29 +785,29 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
-  */
- asmlinkage long sys_vfork(struct pt_regs *regs)
- {
--	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
- 		    NULL, NULL);
- }
- 
- unsigned long get_wchan(struct task_struct *p)
- {
- 	unsigned long stack;
--	u64 fp,rip;
-+	u64 fp,ip;
- 	int count = 0;
- 
- 	if (!p || p == current || p->state==TASK_RUNNING)
- 		return 0; 
- 	stack = (unsigned long)task_stack_page(p);
--	if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
-+	if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
- 		return 0;
--	fp = *(u64 *)(p->thread.rsp);
-+	fp = *(u64 *)(p->thread.sp);
- 	do { 
- 		if (fp < (unsigned long)stack ||
- 		    fp > (unsigned long)stack+THREAD_SIZE)
- 			return 0; 
--		rip = *(u64 *)(fp+8); 
--		if (!in_sched_functions(rip))
--			return rip; 
-+		ip = *(u64 *)(fp+8);
-+		if (!in_sched_functions(ip))
-+			return ip;
- 		fp = *(u64 *)fp; 
- 	} while (count++ < 16); 
- 	return 0;
-@@ -824,19 +848,19 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
- 		/* Not strictly needed for fs, but do it for symmetry
- 		   with gs */
- 		if (addr >= TASK_SIZE_OF(task))
--			return -EPERM; 
-+			return -EPERM;
- 		cpu = get_cpu();
--		/* handle small bases via the GDT because that's faster to 
-+		/* handle small bases via the GDT because that's faster to
- 		   switch. */
--		if (addr <= 0xffffffff) { 
-+		if (addr <= 0xffffffff) {
- 			set_32bit_tls(task, FS_TLS, addr);
--			if (doit) { 
--				load_TLS(&task->thread, cpu); 
-+			if (doit) {
-+				load_TLS(&task->thread, cpu);
- 				asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
- 			}
- 			task->thread.fsindex = FS_TLS_SEL;
- 			task->thread.fs = 0;
--		} else { 
-+		} else {
- 			task->thread.fsindex = 0;
- 			task->thread.fs = addr;
- 			if (doit) {
-@@ -848,24 +872,24 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
- 		}
- 		put_cpu();
- 		break;
--	case ARCH_GET_FS: { 
--		unsigned long base; 
-+	case ARCH_GET_FS: {
-+		unsigned long base;
- 		if (task->thread.fsindex == FS_TLS_SEL)
- 			base = read_32bit_tls(task, FS_TLS);
- 		else if (doit)
- 			rdmsrl(MSR_FS_BASE, base);
- 		else
- 			base = task->thread.fs;
--		ret = put_user(base, (unsigned long __user *)addr); 
--		break; 
-+		ret = put_user(base, (unsigned long __user *)addr);
++	switch (msr) {
++	case 0xc0010010: /* SYSCFG */
++	case 0xc0010015: /* HWCR */
++	case MSR_IA32_PLATFORM_ID:
++	case MSR_IA32_P5_MC_ADDR:
++	case MSR_IA32_P5_MC_TYPE:
++	case MSR_IA32_MC0_CTL:
++	case MSR_IA32_MCG_STATUS:
++	case MSR_IA32_MCG_CAP:
++	case MSR_IA32_MC0_MISC:
++	case MSR_IA32_MC0_MISC+4:
++	case MSR_IA32_MC0_MISC+8:
++	case MSR_IA32_MC0_MISC+12:
++	case MSR_IA32_MC0_MISC+16:
++	case MSR_IA32_UCODE_REV:
++	case MSR_IA32_PERF_STATUS:
++	case MSR_IA32_EBL_CR_POWERON:
++		/* MTRR registers */
++	case 0xfe:
++	case 0x200 ... 0x2ff:
++		data = 0;
 +		break;
- 	}
--	case ARCH_GET_GS: { 
-+	case ARCH_GET_GS: {
- 		unsigned long base;
- 		unsigned gsindex;
- 		if (task->thread.gsindex == GS_TLS_SEL)
- 			base = read_32bit_tls(task, GS_TLS);
- 		else if (doit) {
-- 			asm("movl %%gs,%0" : "=r" (gsindex));
-+			asm("movl %%gs,%0" : "=r" (gsindex));
- 			if (gsindex)
- 				rdmsrl(MSR_KERNEL_GS_BASE, base);
- 			else
-@@ -873,39 +897,21 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
- 		}
- 		else
- 			base = task->thread.gs;
--		ret = put_user(base, (unsigned long __user *)addr); 
-+		ret = put_user(base, (unsigned long __user *)addr);
- 		break;
- 	}
- 
- 	default:
- 		ret = -EINVAL;
- 		break;
--	} 
++	case 0xcd: /* fsb frequency */
++		data = 3;
++		break;
++	case MSR_IA32_APICBASE:
++		data = kvm_get_apic_base(vcpu);
++		break;
++	case MSR_IA32_MISC_ENABLE:
++		data = vcpu->arch.ia32_misc_enable_msr;
++		break;
++#ifdef CONFIG_X86_64
++	case MSR_EFER:
++		data = vcpu->arch.shadow_efer;
++		break;
++#endif
++	default:
++		pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
++		return 1;
 +	}
- 
--	return ret;	
--} 
-+	return ret;
++	*pdata = data;
++	return 0;
 +}
- 
- long sys_arch_prctl(int code, unsigned long addr)
- {
- 	return do_arch_prctl(current, code, addr);
--} 
--
--/* 
-- * Capture the user space registers if the task is not running (in user space)
-- */
--int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
--{
--	struct pt_regs *pp, ptregs;
--
--	pp = task_pt_regs(tsk);
--
--	ptregs = *pp; 
--	ptregs.cs &= 0xffff;
--	ptregs.ss &= 0xffff;
--
--	elf_core_copy_regs(regs, &ptregs);
-- 
--	return 1;
- }
- 
- unsigned long arch_align_stack(unsigned long sp)
-@@ -914,3 +920,9 @@ unsigned long arch_align_stack(unsigned long sp)
- 		sp -= get_random_int() % 8192;
- 	return sp & ~0xf;
- }
++EXPORT_SYMBOL_GPL(kvm_get_msr_common);
 +
-+unsigned long arch_randomize_brk(struct mm_struct *mm)
++/*
++ * Read or write a bunch of msrs. All parameters are kernel addresses.
++ *
++ * @return number of msrs set successfully.
++ */
++static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
++		    struct kvm_msr_entry *entries,
++		    int (*do_msr)(struct kvm_vcpu *vcpu,
++				  unsigned index, u64 *data))
 +{
-+	unsigned long range_end = mm->brk + 0x02000000;
-+	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
++	int i;
++
++	vcpu_load(vcpu);
++
++	for (i = 0; i < msrs->nmsrs; ++i)
++		if (do_msr(vcpu, entries[i].index, &entries[i].data))
++			break;
++
++	vcpu_put(vcpu);
++
++	return i;
 +}
-diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
-new file mode 100644
-index 0000000..96286df
---- /dev/null
-+++ b/arch/x86/kernel/ptrace.c
-@@ -0,0 +1,1545 @@
-+/* By Ross Biro 1/23/92 */
++
 +/*
-+ * Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ * Read or write a bunch of msrs. Parameters are user addresses.
 + *
-+ * BTS tracing
-+ *	Markus Metzger <markus.t.metzger at intel.com>, Dec 2007
++ * @return number of msrs set successfully.
 + */
++static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
++		  int (*do_msr)(struct kvm_vcpu *vcpu,
++				unsigned index, u64 *data),
++		  int writeback)
++{
++	struct kvm_msrs msrs;
++	struct kvm_msr_entry *entries;
++	int r, n;
++	unsigned size;
 +
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/errno.h>
-+#include <linux/ptrace.h>
-+#include <linux/regset.h>
-+#include <linux/user.h>
-+#include <linux/elf.h>
-+#include <linux/security.h>
-+#include <linux/audit.h>
-+#include <linux/seccomp.h>
-+#include <linux/signal.h>
++	r = -EFAULT;
++	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
++		goto out;
 +
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/debugreg.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/prctl.h>
-+#include <asm/proto.h>
-+#include <asm/ds.h>
++	r = -E2BIG;
++	if (msrs.nmsrs >= MAX_IO_MSRS)
++		goto out;
 +
-+#include "tls.h"
++	r = -ENOMEM;
++	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
++	entries = vmalloc(size);
++	if (!entries)
++		goto out;
 +
-+enum x86_regset {
-+	REGSET_GENERAL,
-+	REGSET_FP,
-+	REGSET_XFP,
-+	REGSET_TLS,
-+};
++	r = -EFAULT;
++	if (copy_from_user(entries, user_msrs->entries, size))
++		goto out_free;
++
++	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
++	if (r < 0)
++		goto out_free;
++
++	r = -EFAULT;
++	if (writeback && copy_to_user(user_msrs->entries, entries, size))
++		goto out_free;
++
++	r = n;
++
++out_free:
++	vfree(entries);
++out:
++	return r;
++}
 +
 +/*
-+ * does not yet catch signals sent when the child dies.
-+ * in exit.c or in signal.c.
++ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
++ * cached on it.
 + */
++void decache_vcpus_on_cpu(int cpu)
++{
++	struct kvm *vm;
++	struct kvm_vcpu *vcpu;
++	int i;
++
++	spin_lock(&kvm_lock);
++	list_for_each_entry(vm, &vm_list, vm_list)
++		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
++			vcpu = vm->vcpus[i];
++			if (!vcpu)
++				continue;
++			/*
++			 * If the vcpu is locked, then it is running on some
++			 * other cpu and therefore it is not cached on the
++			 * cpu in question.
++			 *
++			 * If it's not locked, check the last cpu it executed
++			 * on.
++			 */
++			if (mutex_trylock(&vcpu->mutex)) {
++				if (vcpu->cpu == cpu) {
++					kvm_x86_ops->vcpu_decache(vcpu);
++					vcpu->cpu = -1;
++				}
++				mutex_unlock(&vcpu->mutex);
++			}
++		}
++	spin_unlock(&kvm_lock);
++}
++
++int kvm_dev_ioctl_check_extension(long ext)
++{
++	int r;
++
++	switch (ext) {
++	case KVM_CAP_IRQCHIP:
++	case KVM_CAP_HLT:
++	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
++	case KVM_CAP_USER_MEMORY:
++	case KVM_CAP_SET_TSS_ADDR:
++	case KVM_CAP_EXT_CPUID:
++		r = 1;
++		break;
++	case KVM_CAP_VAPIC:
++		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
++		break;
++	default:
++		r = 0;
++		break;
++	}
++	return r;
++
++}
++
++long kvm_arch_dev_ioctl(struct file *filp,
++			unsigned int ioctl, unsigned long arg)
++{
++	void __user *argp = (void __user *)arg;
++	long r;
++
++	switch (ioctl) {
++	case KVM_GET_MSR_INDEX_LIST: {
++		struct kvm_msr_list __user *user_msr_list = argp;
++		struct kvm_msr_list msr_list;
++		unsigned n;
++
++		r = -EFAULT;
++		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
++			goto out;
++		n = msr_list.nmsrs;
++		msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
++		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
++			goto out;
++		r = -E2BIG;
++		if (n < num_msrs_to_save)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
++				 num_msrs_to_save * sizeof(u32)))
++			goto out;
++		if (copy_to_user(user_msr_list->indices
++				 + num_msrs_to_save * sizeof(u32),
++				 &emulated_msrs,
++				 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
++			goto out;
++		r = 0;
++		break;
++	}
++	default:
++		r = -EINVAL;
++	}
++out:
++	return r;
++}
++
++void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
++{
++	kvm_x86_ops->vcpu_load(vcpu, cpu);
++}
++
++void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
++{
++	kvm_x86_ops->vcpu_put(vcpu);
++	kvm_put_guest_fpu(vcpu);
++}
++
++static int is_efer_nx(void)
++{
++	u64 efer;
++
++	rdmsrl(MSR_EFER, efer);
++	return efer & EFER_NX;
++}
++
++static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
++{
++	int i;
++	struct kvm_cpuid_entry2 *e, *entry;
++
++	entry = NULL;
++	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
++		e = &vcpu->arch.cpuid_entries[i];
++		if (e->function == 0x80000001) {
++			entry = e;
++			break;
++		}
++	}
++	if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
++		entry->edx &= ~(1 << 20);
++		printk(KERN_INFO "kvm: guest NX capability removed\n");
++	}
++}
++
++/* when an old userspace process fills a new kernel module */
++static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
++				    struct kvm_cpuid *cpuid,
++				    struct kvm_cpuid_entry __user *entries)
++{
++	int r, i;
++	struct kvm_cpuid_entry *cpuid_entries;
++
++	r = -E2BIG;
++	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
++		goto out;
++	r = -ENOMEM;
++	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
++	if (!cpuid_entries)
++		goto out;
++	r = -EFAULT;
++	if (copy_from_user(cpuid_entries, entries,
++			   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
++		goto out_free;
++	for (i = 0; i < cpuid->nent; i++) {
++		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
++		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
++		vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
++		vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
++		vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
++		vcpu->arch.cpuid_entries[i].index = 0;
++		vcpu->arch.cpuid_entries[i].flags = 0;
++		vcpu->arch.cpuid_entries[i].padding[0] = 0;
++		vcpu->arch.cpuid_entries[i].padding[1] = 0;
++		vcpu->arch.cpuid_entries[i].padding[2] = 0;
++	}
++	vcpu->arch.cpuid_nent = cpuid->nent;
++	cpuid_fix_nx_cap(vcpu);
++	r = 0;
++
++out_free:
++	vfree(cpuid_entries);
++out:
++	return r;
++}
++
++static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
++				    struct kvm_cpuid2 *cpuid,
++				    struct kvm_cpuid_entry2 __user *entries)
++{
++	int r;
++
++	r = -E2BIG;
++	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
++		goto out;
++	r = -EFAULT;
++	if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
++			   cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
++		goto out;
++	vcpu->arch.cpuid_nent = cpuid->nent;
++	return 0;
++
++out:
++	return r;
++}
++
++static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
++				    struct kvm_cpuid2 *cpuid,
++				    struct kvm_cpuid_entry2 __user *entries)
++{
++	int r;
++
++	r = -E2BIG;
++	if (cpuid->nent < vcpu->arch.cpuid_nent)
++		goto out;
++	r = -EFAULT;
++	if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
++			   vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
++		goto out;
++	return 0;
++
++out:
++	cpuid->nent = vcpu->arch.cpuid_nent;
++	return r;
++}
++
++static inline u32 bit(int bitno)
++{
++	return 1 << (bitno & 31);
++}
++
++static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
++			  u32 index)
++{
++	entry->function = function;
++	entry->index = index;
++	cpuid_count(entry->function, entry->index,
++		&entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
++	entry->flags = 0;
++}
++
++static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
++			 u32 index, int *nent, int maxnent)
++{
++	const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
++		bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
++		bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
++		bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
++		bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
++		bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
++		bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
++		bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
++		bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
++		bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
++	const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
++		bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
++		bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
++		bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
++		bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
++		bit(X86_FEATURE_PGE) |
++		bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
++		bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
++		bit(X86_FEATURE_SYSCALL) |
++		(bit(X86_FEATURE_NX) && is_efer_nx()) |
++#ifdef CONFIG_X86_64
++		bit(X86_FEATURE_LM) |
++#endif
++		bit(X86_FEATURE_MMXEXT) |
++		bit(X86_FEATURE_3DNOWEXT) |
++		bit(X86_FEATURE_3DNOW);
++	const u32 kvm_supported_word3_x86_features =
++		bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
++	const u32 kvm_supported_word6_x86_features =
++		bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
++
++	/* all func 2 cpuid_count() should be called on the same cpu */
++	get_cpu();
++	do_cpuid_1_ent(entry, function, index);
++	++*nent;
++
++	switch (function) {
++	case 0:
++		entry->eax = min(entry->eax, (u32)0xb);
++		break;
++	case 1:
++		entry->edx &= kvm_supported_word0_x86_features;
++		entry->ecx &= kvm_supported_word3_x86_features;
++		break;
++	/* function 2 entries are STATEFUL. That is, repeated cpuid commands
++	 * may return different values. This forces us to get_cpu() before
++	 * issuing the first command, and also to emulate this annoying behavior
++	 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
++	case 2: {
++		int t, times = entry->eax & 0xff;
++
++		entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
++		for (t = 1; t < times && *nent < maxnent; ++t) {
++			do_cpuid_1_ent(&entry[t], function, 0);
++			entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
++			++*nent;
++		}
++		break;
++	}
++	/* function 4 and 0xb have additional index. */
++	case 4: {
++		int index, cache_type;
++
++		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++		/* read more entries until cache_type is zero */
++		for (index = 1; *nent < maxnent; ++index) {
++			cache_type = entry[index - 1].eax & 0x1f;
++			if (!cache_type)
++				break;
++			do_cpuid_1_ent(&entry[index], function, index);
++			entry[index].flags |=
++			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++			++*nent;
++		}
++		break;
++	}
++	case 0xb: {
++		int index, level_type;
++
++		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++		/* read more entries until level_type is zero */
++		for (index = 1; *nent < maxnent; ++index) {
++			level_type = entry[index - 1].ecx & 0xff;
++			if (!level_type)
++				break;
++			do_cpuid_1_ent(&entry[index], function, index);
++			entry[index].flags |=
++			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++			++*nent;
++		}
++		break;
++	}
++	case 0x80000000:
++		entry->eax = min(entry->eax, 0x8000001a);
++		break;
++	case 0x80000001:
++		entry->edx &= kvm_supported_word1_x86_features;
++		entry->ecx &= kvm_supported_word6_x86_features;
++		break;
++	}
++	put_cpu();
++}
++
++static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm,
++				    struct kvm_cpuid2 *cpuid,
++				    struct kvm_cpuid_entry2 __user *entries)
++{
++	struct kvm_cpuid_entry2 *cpuid_entries;
++	int limit, nent = 0, r = -E2BIG;
++	u32 func;
++
++	if (cpuid->nent < 1)
++		goto out;
++	r = -ENOMEM;
++	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
++	if (!cpuid_entries)
++		goto out;
++
++	do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
++	limit = cpuid_entries[0].eax;
++	for (func = 1; func <= limit && nent < cpuid->nent; ++func)
++		do_cpuid_ent(&cpuid_entries[nent], func, 0,
++				&nent, cpuid->nent);
++	r = -E2BIG;
++	if (nent >= cpuid->nent)
++		goto out_free;
++
++	do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
++	limit = cpuid_entries[nent - 1].eax;
++	for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
++		do_cpuid_ent(&cpuid_entries[nent], func, 0,
++			       &nent, cpuid->nent);
++	r = -EFAULT;
++	if (copy_to_user(entries, cpuid_entries,
++			nent * sizeof(struct kvm_cpuid_entry2)))
++		goto out_free;
++	cpuid->nent = nent;
++	r = 0;
++
++out_free:
++	vfree(cpuid_entries);
++out:
++	return r;
++}
++
++static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
++				    struct kvm_lapic_state *s)
++{
++	vcpu_load(vcpu);
++	memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
++	vcpu_put(vcpu);
++
++	return 0;
++}
++
++static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
++				    struct kvm_lapic_state *s)
++{
++	vcpu_load(vcpu);
++	memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
++	kvm_apic_post_state_restore(vcpu);
++	vcpu_put(vcpu);
++
++	return 0;
++}
++
++static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
++				    struct kvm_interrupt *irq)
++{
++	if (irq->irq < 0 || irq->irq >= 256)
++		return -EINVAL;
++	if (irqchip_in_kernel(vcpu->kvm))
++		return -ENXIO;
++	vcpu_load(vcpu);
++
++	set_bit(irq->irq, vcpu->arch.irq_pending);
++	set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
++
++	vcpu_put(vcpu);
++
++	return 0;
++}
++
++static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
++					   struct kvm_tpr_access_ctl *tac)
++{
++	if (tac->flags)
++		return -EINVAL;
++	vcpu->arch.tpr_access_reporting = !!tac->enabled;
++	return 0;
++}
++
++long kvm_arch_vcpu_ioctl(struct file *filp,
++			 unsigned int ioctl, unsigned long arg)
++{
++	struct kvm_vcpu *vcpu = filp->private_data;
++	void __user *argp = (void __user *)arg;
++	int r;
++
++	switch (ioctl) {
++	case KVM_GET_LAPIC: {
++		struct kvm_lapic_state lapic;
++
++		memset(&lapic, 0, sizeof lapic);
++		r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
++		if (r)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(argp, &lapic, sizeof lapic))
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_SET_LAPIC: {
++		struct kvm_lapic_state lapic;
++
++		r = -EFAULT;
++		if (copy_from_user(&lapic, argp, sizeof lapic))
++			goto out;
++		r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
++		if (r)
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_INTERRUPT: {
++		struct kvm_interrupt irq;
++
++		r = -EFAULT;
++		if (copy_from_user(&irq, argp, sizeof irq))
++			goto out;
++		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
++		if (r)
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_SET_CPUID: {
++		struct kvm_cpuid __user *cpuid_arg = argp;
++		struct kvm_cpuid cpuid;
++
++		r = -EFAULT;
++		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
++			goto out;
++		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
++		if (r)
++			goto out;
++		break;
++	}
++	case KVM_SET_CPUID2: {
++		struct kvm_cpuid2 __user *cpuid_arg = argp;
++		struct kvm_cpuid2 cpuid;
++
++		r = -EFAULT;
++		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
++			goto out;
++		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
++				cpuid_arg->entries);
++		if (r)
++			goto out;
++		break;
++	}
++	case KVM_GET_CPUID2: {
++		struct kvm_cpuid2 __user *cpuid_arg = argp;
++		struct kvm_cpuid2 cpuid;
++
++		r = -EFAULT;
++		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
++			goto out;
++		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
++				cpuid_arg->entries);
++		if (r)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_GET_MSRS:
++		r = msr_io(vcpu, argp, kvm_get_msr, 1);
++		break;
++	case KVM_SET_MSRS:
++		r = msr_io(vcpu, argp, do_set_msr, 0);
++		break;
++	case KVM_TPR_ACCESS_REPORTING: {
++		struct kvm_tpr_access_ctl tac;
++
++		r = -EFAULT;
++		if (copy_from_user(&tac, argp, sizeof tac))
++			goto out;
++		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
++		if (r)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(argp, &tac, sizeof tac))
++			goto out;
++		r = 0;
++		break;
++	};
++	case KVM_SET_VAPIC_ADDR: {
++		struct kvm_vapic_addr va;
++
++		r = -EINVAL;
++		if (!irqchip_in_kernel(vcpu->kvm))
++			goto out;
++		r = -EFAULT;
++		if (copy_from_user(&va, argp, sizeof va))
++			goto out;
++		r = 0;
++		kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++		break;
++	}
++	default:
++		r = -EINVAL;
++	}
++out:
++	return r;
++}
++
++static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
++{
++	int ret;
++
++	if (addr > (unsigned int)(-3 * PAGE_SIZE))
++		return -1;
++	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
++	return ret;
++}
++
++static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
++					  u32 kvm_nr_mmu_pages)
++{
++	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
++		return -EINVAL;
++
++	down_write(&current->mm->mmap_sem);
++
++	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
++	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
++
++	up_write(&current->mm->mmap_sem);
++	return 0;
++}
++
++static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
++{
++	return kvm->arch.n_alloc_mmu_pages;
++}
++
++gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
++{
++	int i;
++	struct kvm_mem_alias *alias;
++
++	for (i = 0; i < kvm->arch.naliases; ++i) {
++		alias = &kvm->arch.aliases[i];
++		if (gfn >= alias->base_gfn
++		    && gfn < alias->base_gfn + alias->npages)
++			return alias->target_gfn + gfn - alias->base_gfn;
++	}
++	return gfn;
++}
 +
 +/*
-+ * Determines which flags the user has access to [1 = access, 0 = no access].
++ * Set a new alias region.  Aliases map a portion of physical memory into
++ * another portion.  This is useful for memory windows, for example the PC
++ * VGA region.
 + */
-+#define FLAG_MASK_32		((unsigned long)			\
-+				 (X86_EFLAGS_CF | X86_EFLAGS_PF |	\
-+				  X86_EFLAGS_AF | X86_EFLAGS_ZF |	\
-+				  X86_EFLAGS_SF | X86_EFLAGS_TF |	\
-+				  X86_EFLAGS_DF | X86_EFLAGS_OF |	\
-+				  X86_EFLAGS_RF | X86_EFLAGS_AC))
++static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
++					 struct kvm_memory_alias *alias)
++{
++	int r, n;
++	struct kvm_mem_alias *p;
++
++	r = -EINVAL;
++	/* General sanity checks */
++	if (alias->memory_size & (PAGE_SIZE - 1))
++		goto out;
++	if (alias->guest_phys_addr & (PAGE_SIZE - 1))
++		goto out;
++	if (alias->slot >= KVM_ALIAS_SLOTS)
++		goto out;
++	if (alias->guest_phys_addr + alias->memory_size
++	    < alias->guest_phys_addr)
++		goto out;
++	if (alias->target_phys_addr + alias->memory_size
++	    < alias->target_phys_addr)
++		goto out;
++
++	down_write(&current->mm->mmap_sem);
++
++	p = &kvm->arch.aliases[alias->slot];
++	p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
++	p->npages = alias->memory_size >> PAGE_SHIFT;
++	p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
++
++	for (n = KVM_ALIAS_SLOTS; n > 0; --n)
++		if (kvm->arch.aliases[n - 1].npages)
++			break;
++	kvm->arch.naliases = n;
++
++	kvm_mmu_zap_all(kvm);
++
++	up_write(&current->mm->mmap_sem);
++
++	return 0;
++
++out:
++	return r;
++}
++
++static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
++{
++	int r;
++
++	r = 0;
++	switch (chip->chip_id) {
++	case KVM_IRQCHIP_PIC_MASTER:
++		memcpy(&chip->chip.pic,
++			&pic_irqchip(kvm)->pics[0],
++			sizeof(struct kvm_pic_state));
++		break;
++	case KVM_IRQCHIP_PIC_SLAVE:
++		memcpy(&chip->chip.pic,
++			&pic_irqchip(kvm)->pics[1],
++			sizeof(struct kvm_pic_state));
++		break;
++	case KVM_IRQCHIP_IOAPIC:
++		memcpy(&chip->chip.ioapic,
++			ioapic_irqchip(kvm),
++			sizeof(struct kvm_ioapic_state));
++		break;
++	default:
++		r = -EINVAL;
++		break;
++	}
++	return r;
++}
++
++static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
++{
++	int r;
++
++	r = 0;
++	switch (chip->chip_id) {
++	case KVM_IRQCHIP_PIC_MASTER:
++		memcpy(&pic_irqchip(kvm)->pics[0],
++			&chip->chip.pic,
++			sizeof(struct kvm_pic_state));
++		break;
++	case KVM_IRQCHIP_PIC_SLAVE:
++		memcpy(&pic_irqchip(kvm)->pics[1],
++			&chip->chip.pic,
++			sizeof(struct kvm_pic_state));
++		break;
++	case KVM_IRQCHIP_IOAPIC:
++		memcpy(ioapic_irqchip(kvm),
++			&chip->chip.ioapic,
++			sizeof(struct kvm_ioapic_state));
++		break;
++	default:
++		r = -EINVAL;
++		break;
++	}
++	kvm_pic_update_irq(pic_irqchip(kvm));
++	return r;
++}
 +
 +/*
-+ * Determines whether a value may be installed in a segment register.
++ * Get (and clear) the dirty memory log for a memory slot.
 + */
-+static inline bool invalid_selector(u16 value)
++int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
++				      struct kvm_dirty_log *log)
 +{
-+	return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
++	int r;
++	int n;
++	struct kvm_memory_slot *memslot;
++	int is_dirty = 0;
++
++	down_write(&current->mm->mmap_sem);
++
++	r = kvm_get_dirty_log(kvm, log, &is_dirty);
++	if (r)
++		goto out;
++
++	/* If nothing is dirty, don't bother messing with page tables. */
++	if (is_dirty) {
++		kvm_mmu_slot_remove_write_access(kvm, log->slot);
++		kvm_flush_remote_tlbs(kvm);
++		memslot = &kvm->memslots[log->slot];
++		n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
++		memset(memslot->dirty_bitmap, 0, n);
++	}
++	r = 0;
++out:
++	up_write(&current->mm->mmap_sem);
++	return r;
 +}
 +
-+#ifdef CONFIG_X86_32
++long kvm_arch_vm_ioctl(struct file *filp,
++		       unsigned int ioctl, unsigned long arg)
++{
++	struct kvm *kvm = filp->private_data;
++	void __user *argp = (void __user *)arg;
++	int r = -EINVAL;
 +
-+#define FLAG_MASK		FLAG_MASK_32
++	switch (ioctl) {
++	case KVM_SET_TSS_ADDR:
++		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
++		if (r < 0)
++			goto out;
++		break;
++	case KVM_SET_MEMORY_REGION: {
++		struct kvm_memory_region kvm_mem;
++		struct kvm_userspace_memory_region kvm_userspace_mem;
 +
-+static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
++		r = -EFAULT;
++		if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
++			goto out;
++		kvm_userspace_mem.slot = kvm_mem.slot;
++		kvm_userspace_mem.flags = kvm_mem.flags;
++		kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
++		kvm_userspace_mem.memory_size = kvm_mem.memory_size;
++		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
++		if (r)
++			goto out;
++		break;
++	}
++	case KVM_SET_NR_MMU_PAGES:
++		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
++		if (r)
++			goto out;
++		break;
++	case KVM_GET_NR_MMU_PAGES:
++		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
++		break;
++	case KVM_SET_MEMORY_ALIAS: {
++		struct kvm_memory_alias alias;
++
++		r = -EFAULT;
++		if (copy_from_user(&alias, argp, sizeof alias))
++			goto out;
++		r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
++		if (r)
++			goto out;
++		break;
++	}
++	case KVM_CREATE_IRQCHIP:
++		r = -ENOMEM;
++		kvm->arch.vpic = kvm_create_pic(kvm);
++		if (kvm->arch.vpic) {
++			r = kvm_ioapic_init(kvm);
++			if (r) {
++				kfree(kvm->arch.vpic);
++				kvm->arch.vpic = NULL;
++				goto out;
++			}
++		} else
++			goto out;
++		break;
++	case KVM_IRQ_LINE: {
++		struct kvm_irq_level irq_event;
++
++		r = -EFAULT;
++		if (copy_from_user(&irq_event, argp, sizeof irq_event))
++			goto out;
++		if (irqchip_in_kernel(kvm)) {
++			mutex_lock(&kvm->lock);
++			if (irq_event.irq < 16)
++				kvm_pic_set_irq(pic_irqchip(kvm),
++					irq_event.irq,
++					irq_event.level);
++			kvm_ioapic_set_irq(kvm->arch.vioapic,
++					irq_event.irq,
++					irq_event.level);
++			mutex_unlock(&kvm->lock);
++			r = 0;
++		}
++		break;
++	}
++	case KVM_GET_IRQCHIP: {
++		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
++		struct kvm_irqchip chip;
++
++		r = -EFAULT;
++		if (copy_from_user(&chip, argp, sizeof chip))
++			goto out;
++		r = -ENXIO;
++		if (!irqchip_in_kernel(kvm))
++			goto out;
++		r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
++		if (r)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(argp, &chip, sizeof chip))
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_SET_IRQCHIP: {
++		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
++		struct kvm_irqchip chip;
++
++		r = -EFAULT;
++		if (copy_from_user(&chip, argp, sizeof chip))
++			goto out;
++		r = -ENXIO;
++		if (!irqchip_in_kernel(kvm))
++			goto out;
++		r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
++		if (r)
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_GET_SUPPORTED_CPUID: {
++		struct kvm_cpuid2 __user *cpuid_arg = argp;
++		struct kvm_cpuid2 cpuid;
++
++		r = -EFAULT;
++		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
++			goto out;
++		r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid,
++			cpuid_arg->entries);
++		if (r)
++			goto out;
++
++		r = -EFAULT;
++		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
++			goto out;
++		r = 0;
++		break;
++	}
++	default:
++		;
++	}
++out:
++	return r;
++}
++
++static void kvm_init_msr_list(void)
 +{
-+	BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
-+	regno >>= 2;
-+	if (regno > FS)
-+		--regno;
-+	return &regs->bx + regno;
++	u32 dummy[2];
++	unsigned i, j;
++
++	for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
++		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
++			continue;
++		if (j < i)
++			msrs_to_save[j] = msrs_to_save[i];
++		j++;
++	}
++	num_msrs_to_save = j;
 +}
 +
-+static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
++/*
++ * Only apic need an MMIO device hook, so shortcut now..
++ */
++static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
++						gpa_t addr)
++{
++	struct kvm_io_device *dev;
++
++	if (vcpu->arch.apic) {
++		dev = &vcpu->arch.apic->dev;
++		if (dev->in_range(dev, addr))
++			return dev;
++	}
++	return NULL;
++}
++
++
++static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
++						gpa_t addr)
++{
++	struct kvm_io_device *dev;
++
++	dev = vcpu_find_pervcpu_dev(vcpu, addr);
++	if (dev == NULL)
++		dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
++	return dev;
++}
++
++int emulator_read_std(unsigned long addr,
++			     void *val,
++			     unsigned int bytes,
++			     struct kvm_vcpu *vcpu)
++{
++	void *data = val;
++	int r = X86EMUL_CONTINUE;
++
++	down_read(&current->mm->mmap_sem);
++	while (bytes) {
++		gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++		unsigned offset = addr & (PAGE_SIZE-1);
++		unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
++		int ret;
++
++		if (gpa == UNMAPPED_GVA) {
++			r = X86EMUL_PROPAGATE_FAULT;
++			goto out;
++		}
++		ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
++		if (ret < 0) {
++			r = X86EMUL_UNHANDLEABLE;
++			goto out;
++		}
++
++		bytes -= tocopy;
++		data += tocopy;
++		addr += tocopy;
++	}
++out:
++	up_read(&current->mm->mmap_sem);
++	return r;
++}
++EXPORT_SYMBOL_GPL(emulator_read_std);
++
++static int emulator_read_emulated(unsigned long addr,
++				  void *val,
++				  unsigned int bytes,
++				  struct kvm_vcpu *vcpu)
 +{
++	struct kvm_io_device *mmio_dev;
++	gpa_t                 gpa;
++
++	if (vcpu->mmio_read_completed) {
++		memcpy(val, vcpu->mmio_data, bytes);
++		vcpu->mmio_read_completed = 0;
++		return X86EMUL_CONTINUE;
++	}
++
++	down_read(&current->mm->mmap_sem);
++	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++	up_read(&current->mm->mmap_sem);
++
++	/* For APIC access vmexit */
++	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
++		goto mmio;
++
++	if (emulator_read_std(addr, val, bytes, vcpu)
++			== X86EMUL_CONTINUE)
++		return X86EMUL_CONTINUE;
++	if (gpa == UNMAPPED_GVA)
++		return X86EMUL_PROPAGATE_FAULT;
++
++mmio:
 +	/*
-+	 * Returning the value truncates it to 16 bits.
++	 * Is this MMIO handled locally?
 +	 */
-+	unsigned int retval;
-+	if (offset != offsetof(struct user_regs_struct, gs))
-+		retval = *pt_regs_access(task_pt_regs(task), offset);
-+	else {
-+		retval = task->thread.gs;
-+		if (task == current)
-+			savesegment(gs, retval);
++	mutex_lock(&vcpu->kvm->lock);
++	mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
++	if (mmio_dev) {
++		kvm_iodevice_read(mmio_dev, gpa, bytes, val);
++		mutex_unlock(&vcpu->kvm->lock);
++		return X86EMUL_CONTINUE;
 +	}
-+	return retval;
++	mutex_unlock(&vcpu->kvm->lock);
++
++	vcpu->mmio_needed = 1;
++	vcpu->mmio_phys_addr = gpa;
++	vcpu->mmio_size = bytes;
++	vcpu->mmio_is_write = 0;
++
++	return X86EMUL_UNHANDLEABLE;
 +}
 +
-+static int set_segment_reg(struct task_struct *task,
-+			   unsigned long offset, u16 value)
++static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
++			       const void *val, int bytes)
++{
++	int ret;
++
++	down_read(&current->mm->mmap_sem);
++	ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
++	if (ret < 0) {
++		up_read(&current->mm->mmap_sem);
++		return 0;
++	}
++	kvm_mmu_pte_write(vcpu, gpa, val, bytes);
++	up_read(&current->mm->mmap_sem);
++	return 1;
++}
++
++static int emulator_write_emulated_onepage(unsigned long addr,
++					   const void *val,
++					   unsigned int bytes,
++					   struct kvm_vcpu *vcpu)
 +{
++	struct kvm_io_device *mmio_dev;
++	gpa_t                 gpa;
++
++	down_read(&current->mm->mmap_sem);
++	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++	up_read(&current->mm->mmap_sem);
++
++	if (gpa == UNMAPPED_GVA) {
++		kvm_inject_page_fault(vcpu, addr, 2);
++		return X86EMUL_PROPAGATE_FAULT;
++	}
++
++	/* For APIC access vmexit */
++	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
++		goto mmio;
++
++	if (emulator_write_phys(vcpu, gpa, val, bytes))
++		return X86EMUL_CONTINUE;
++
++mmio:
 +	/*
-+	 * The value argument was already truncated to 16 bits.
++	 * Is this MMIO handled locally?
 +	 */
-+	if (invalid_selector(value))
-+		return -EIO;
++	mutex_lock(&vcpu->kvm->lock);
++	mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
++	if (mmio_dev) {
++		kvm_iodevice_write(mmio_dev, gpa, bytes, val);
++		mutex_unlock(&vcpu->kvm->lock);
++		return X86EMUL_CONTINUE;
++	}
++	mutex_unlock(&vcpu->kvm->lock);
 +
-+	if (offset != offsetof(struct user_regs_struct, gs))
-+		*pt_regs_access(task_pt_regs(task), offset) = value;
-+	else {
-+		task->thread.gs = value;
-+		if (task == current)
-+			/*
-+			 * The user-mode %gs is not affected by
-+			 * kernel entry, so we must update the CPU.
-+			 */
-+			loadsegment(gs, value);
++	vcpu->mmio_needed = 1;
++	vcpu->mmio_phys_addr = gpa;
++	vcpu->mmio_size = bytes;
++	vcpu->mmio_is_write = 1;
++	memcpy(vcpu->mmio_data, val, bytes);
++
++	return X86EMUL_CONTINUE;
++}
++
++int emulator_write_emulated(unsigned long addr,
++				   const void *val,
++				   unsigned int bytes,
++				   struct kvm_vcpu *vcpu)
++{
++	/* Crossing a page boundary? */
++	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
++		int rc, now;
++
++		now = -addr & ~PAGE_MASK;
++		rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
++		if (rc != X86EMUL_CONTINUE)
++			return rc;
++		addr += now;
++		val += now;
++		bytes -= now;
 +	}
++	return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
++}
++EXPORT_SYMBOL_GPL(emulator_write_emulated);
 +
-+	return 0;
++static int emulator_cmpxchg_emulated(unsigned long addr,
++				     const void *old,
++				     const void *new,
++				     unsigned int bytes,
++				     struct kvm_vcpu *vcpu)
++{
++	static int reported;
++
++	if (!reported) {
++		reported = 1;
++		printk(KERN_WARNING "kvm: emulating exchange as write\n");
++	}
++#ifndef CONFIG_X86_64
++	/* guests cmpxchg8b have to be emulated atomically */
++	if (bytes == 8) {
++		gpa_t gpa;
++		struct page *page;
++		char *addr;
++		u64 val;
++
++		down_read(&current->mm->mmap_sem);
++		gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++
++		if (gpa == UNMAPPED_GVA ||
++		   (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
++			goto emul_write;
++
++		if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
++			goto emul_write;
++
++		val = *(u64 *)new;
++		page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
++		addr = kmap_atomic(page, KM_USER0);
++		set_64bit((u64 *)(addr + offset_in_page(gpa)), val);
++		kunmap_atomic(addr, KM_USER0);
++		kvm_release_page_dirty(page);
++	emul_write:
++		up_read(&current->mm->mmap_sem);
++	}
++#endif
++
++	return emulator_write_emulated(addr, new, bytes, vcpu);
 +}
 +
-+static unsigned long debugreg_addr_limit(struct task_struct *task)
++static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 +{
-+	return TASK_SIZE - 3;
++	return kvm_x86_ops->get_segment_base(vcpu, seg);
 +}
 +
-+#else  /* CONFIG_X86_64 */
++int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
++{
++	return X86EMUL_CONTINUE;
++}
 +
-+#define FLAG_MASK		(FLAG_MASK_32 | X86_EFLAGS_NT)
++int emulate_clts(struct kvm_vcpu *vcpu)
++{
++	kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
++	return X86EMUL_CONTINUE;
++}
 +
-+static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
++int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 +{
-+	BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
-+	return &regs->r15 + (offset / sizeof(regs->r15));
++	struct kvm_vcpu *vcpu = ctxt->vcpu;
++
++	switch (dr) {
++	case 0 ... 3:
++		*dest = kvm_x86_ops->get_dr(vcpu, dr);
++		return X86EMUL_CONTINUE;
++	default:
++		pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
++		return X86EMUL_UNHANDLEABLE;
++	}
 +}
 +
-+static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
++int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 +{
-+	/*
-+	 * Returning the value truncates it to 16 bits.
-+	 */
-+	unsigned int seg;
++	unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
++	int exception;
 +
-+	switch (offset) {
-+	case offsetof(struct user_regs_struct, fs):
-+		if (task == current) {
-+			/* Older gas can't assemble movq %?s,%r?? */
-+			asm("movl %%fs,%0" : "=r" (seg));
-+			return seg;
++	kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
++	if (exception) {
++		/* FIXME: better handling */
++		return X86EMUL_UNHANDLEABLE;
++	}
++	return X86EMUL_CONTINUE;
++}
++
++void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
++{
++	static int reported;
++	u8 opcodes[4];
++	unsigned long rip = vcpu->arch.rip;
++	unsigned long rip_linear;
++
++	rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
++
++	if (reported)
++		return;
++
++	emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
++
++	printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
++	       context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
++	reported = 1;
++}
++EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
++
++struct x86_emulate_ops emulate_ops = {
++	.read_std            = emulator_read_std,
++	.read_emulated       = emulator_read_emulated,
++	.write_emulated      = emulator_write_emulated,
++	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
++};
++
++int emulate_instruction(struct kvm_vcpu *vcpu,
++			struct kvm_run *run,
++			unsigned long cr2,
++			u16 error_code,
++			int emulation_type)
++{
++	int r;
++	struct decode_cache *c;
++
++	vcpu->arch.mmio_fault_cr2 = cr2;
++	kvm_x86_ops->cache_regs(vcpu);
++
++	vcpu->mmio_is_write = 0;
++	vcpu->arch.pio.string = 0;
++
++	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
++		int cs_db, cs_l;
++		kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
++
++		vcpu->arch.emulate_ctxt.vcpu = vcpu;
++		vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
++		vcpu->arch.emulate_ctxt.mode =
++			(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
++			? X86EMUL_MODE_REAL : cs_l
++			? X86EMUL_MODE_PROT64 :	cs_db
++			? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
++
++		if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
++			vcpu->arch.emulate_ctxt.cs_base = 0;
++			vcpu->arch.emulate_ctxt.ds_base = 0;
++			vcpu->arch.emulate_ctxt.es_base = 0;
++			vcpu->arch.emulate_ctxt.ss_base = 0;
++		} else {
++			vcpu->arch.emulate_ctxt.cs_base =
++					get_segment_base(vcpu, VCPU_SREG_CS);
++			vcpu->arch.emulate_ctxt.ds_base =
++					get_segment_base(vcpu, VCPU_SREG_DS);
++			vcpu->arch.emulate_ctxt.es_base =
++					get_segment_base(vcpu, VCPU_SREG_ES);
++			vcpu->arch.emulate_ctxt.ss_base =
++					get_segment_base(vcpu, VCPU_SREG_SS);
++		}
++
++		vcpu->arch.emulate_ctxt.gs_base =
++					get_segment_base(vcpu, VCPU_SREG_GS);
++		vcpu->arch.emulate_ctxt.fs_base =
++					get_segment_base(vcpu, VCPU_SREG_FS);
++
++		r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
++
++		/* Reject the instructions other than VMCALL/VMMCALL when
++		 * try to emulate invalid opcode */
++		c = &vcpu->arch.emulate_ctxt.decode;
++		if ((emulation_type & EMULTYPE_TRAP_UD) &&
++		    (!(c->twobyte && c->b == 0x01 &&
++		      (c->modrm_reg == 0 || c->modrm_reg == 3) &&
++		       c->modrm_mod == 3 && c->modrm_rm == 1)))
++			return EMULATE_FAIL;
++
++		++vcpu->stat.insn_emulation;
++		if (r)  {
++			++vcpu->stat.insn_emulation_fail;
++			if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
++				return EMULATE_DONE;
++			return EMULATE_FAIL;
 +		}
-+		return task->thread.fsindex;
-+	case offsetof(struct user_regs_struct, gs):
-+		if (task == current) {
-+			asm("movl %%gs,%0" : "=r" (seg));
-+			return seg;
++	}
++
++	r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
++
++	if (vcpu->arch.pio.string)
++		return EMULATE_DO_MMIO;
++
++	if ((r || vcpu->mmio_is_write) && run) {
++		run->exit_reason = KVM_EXIT_MMIO;
++		run->mmio.phys_addr = vcpu->mmio_phys_addr;
++		memcpy(run->mmio.data, vcpu->mmio_data, 8);
++		run->mmio.len = vcpu->mmio_size;
++		run->mmio.is_write = vcpu->mmio_is_write;
++	}
++
++	if (r) {
++		if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
++			return EMULATE_DONE;
++		if (!vcpu->mmio_needed) {
++			kvm_report_emulation_failure(vcpu, "mmio");
++			return EMULATE_FAIL;
 +		}
-+		return task->thread.gsindex;
-+	case offsetof(struct user_regs_struct, ds):
-+		if (task == current) {
-+			asm("movl %%ds,%0" : "=r" (seg));
-+			return seg;
++		return EMULATE_DO_MMIO;
++	}
++
++	kvm_x86_ops->decache_regs(vcpu);
++	kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
++
++	if (vcpu->mmio_is_write) {
++		vcpu->mmio_needed = 0;
++		return EMULATE_DO_MMIO;
++	}
++
++	return EMULATE_DONE;
++}
++EXPORT_SYMBOL_GPL(emulate_instruction);
++
++static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
++		if (vcpu->arch.pio.guest_pages[i]) {
++			kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
++			vcpu->arch.pio.guest_pages[i] = NULL;
 +		}
-+		return task->thread.ds;
-+	case offsetof(struct user_regs_struct, es):
-+		if (task == current) {
-+			asm("movl %%es,%0" : "=r" (seg));
-+			return seg;
++}
++
++static int pio_copy_data(struct kvm_vcpu *vcpu)
++{
++	void *p = vcpu->arch.pio_data;
++	void *q;
++	unsigned bytes;
++	int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
++
++	q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
++		 PAGE_KERNEL);
++	if (!q) {
++		free_pio_guest_pages(vcpu);
++		return -ENOMEM;
++	}
++	q += vcpu->arch.pio.guest_page_offset;
++	bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
++	if (vcpu->arch.pio.in)
++		memcpy(q, p, bytes);
++	else
++		memcpy(p, q, bytes);
++	q -= vcpu->arch.pio.guest_page_offset;
++	vunmap(q);
++	free_pio_guest_pages(vcpu);
++	return 0;
++}
++
++int complete_pio(struct kvm_vcpu *vcpu)
++{
++	struct kvm_pio_request *io = &vcpu->arch.pio;
++	long delta;
++	int r;
++
++	kvm_x86_ops->cache_regs(vcpu);
++
++	if (!io->string) {
++		if (io->in)
++			memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
++			       io->size);
++	} else {
++		if (io->in) {
++			r = pio_copy_data(vcpu);
++			if (r) {
++				kvm_x86_ops->cache_regs(vcpu);
++				return r;
++			}
 +		}
-+		return task->thread.es;
 +
-+	case offsetof(struct user_regs_struct, cs):
-+	case offsetof(struct user_regs_struct, ss):
-+		break;
++		delta = 1;
++		if (io->rep) {
++			delta *= io->cur_count;
++			/*
++			 * The size of the register should really depend on
++			 * current address size.
++			 */
++			vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
++		}
++		if (io->down)
++			delta = -delta;
++		delta *= io->size;
++		if (io->in)
++			vcpu->arch.regs[VCPU_REGS_RDI] += delta;
++		else
++			vcpu->arch.regs[VCPU_REGS_RSI] += delta;
 +	}
-+	return *pt_regs_access(task_pt_regs(task), offset);
++
++	kvm_x86_ops->decache_regs(vcpu);
++
++	io->count -= io->cur_count;
++	io->cur_count = 0;
++
++	return 0;
 +}
 +
-+static int set_segment_reg(struct task_struct *task,
-+			   unsigned long offset, u16 value)
++static void kernel_pio(struct kvm_io_device *pio_dev,
++		       struct kvm_vcpu *vcpu,
++		       void *pd)
 +{
-+	/*
-+	 * The value argument was already truncated to 16 bits.
-+	 */
-+	if (invalid_selector(value))
-+		return -EIO;
++	/* TODO: String I/O for in kernel device */
 +
-+	switch (offset) {
-+	case offsetof(struct user_regs_struct,fs):
++	mutex_lock(&vcpu->kvm->lock);
++	if (vcpu->arch.pio.in)
++		kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
++				  vcpu->arch.pio.size,
++				  pd);
++	else
++		kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
++				   vcpu->arch.pio.size,
++				   pd);
++	mutex_unlock(&vcpu->kvm->lock);
++}
++
++static void pio_string_write(struct kvm_io_device *pio_dev,
++			     struct kvm_vcpu *vcpu)
++{
++	struct kvm_pio_request *io = &vcpu->arch.pio;
++	void *pd = vcpu->arch.pio_data;
++	int i;
++
++	mutex_lock(&vcpu->kvm->lock);
++	for (i = 0; i < io->cur_count; i++) {
++		kvm_iodevice_write(pio_dev, io->port,
++				   io->size,
++				   pd);
++		pd += io->size;
++	}
++	mutex_unlock(&vcpu->kvm->lock);
++}
++
++static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
++					       gpa_t addr)
++{
++	return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
++}
++
++int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
++		  int size, unsigned port)
++{
++	struct kvm_io_device *pio_dev;
++
++	vcpu->run->exit_reason = KVM_EXIT_IO;
++	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
++	vcpu->run->io.size = vcpu->arch.pio.size = size;
++	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
++	vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
++	vcpu->run->io.port = vcpu->arch.pio.port = port;
++	vcpu->arch.pio.in = in;
++	vcpu->arch.pio.string = 0;
++	vcpu->arch.pio.down = 0;
++	vcpu->arch.pio.guest_page_offset = 0;
++	vcpu->arch.pio.rep = 0;
++
++	kvm_x86_ops->cache_regs(vcpu);
++	memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
++	kvm_x86_ops->decache_regs(vcpu);
++
++	kvm_x86_ops->skip_emulated_instruction(vcpu);
++
++	pio_dev = vcpu_find_pio_dev(vcpu, port);
++	if (pio_dev) {
++		kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
++		complete_pio(vcpu);
++		return 1;
++	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_emulate_pio);
++
++int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
++		  int size, unsigned long count, int down,
++		  gva_t address, int rep, unsigned port)
++{
++	unsigned now, in_page;
++	int i, ret = 0;
++	int nr_pages = 1;
++	struct page *page;
++	struct kvm_io_device *pio_dev;
++
++	vcpu->run->exit_reason = KVM_EXIT_IO;
++	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
++	vcpu->run->io.size = vcpu->arch.pio.size = size;
++	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
++	vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
++	vcpu->run->io.port = vcpu->arch.pio.port = port;
++	vcpu->arch.pio.in = in;
++	vcpu->arch.pio.string = 1;
++	vcpu->arch.pio.down = down;
++	vcpu->arch.pio.guest_page_offset = offset_in_page(address);
++	vcpu->arch.pio.rep = rep;
++
++	if (!count) {
++		kvm_x86_ops->skip_emulated_instruction(vcpu);
++		return 1;
++	}
++
++	if (!down)
++		in_page = PAGE_SIZE - offset_in_page(address);
++	else
++		in_page = offset_in_page(address) + size;
++	now = min(count, (unsigned long)in_page / size);
++	if (!now) {
 +		/*
-+		 * If this is setting fs as for normal 64-bit use but
-+		 * setting fs_base has implicitly changed it, leave it.
++		 * String I/O straddles page boundary.  Pin two guest pages
++		 * so that we satisfy atomicity constraints.  Do just one
++		 * transaction to avoid complexity.
 +		 */
-+		if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
-+		     task->thread.fs != 0) ||
-+		    (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
-+		     task->thread.fs == 0))
-+			break;
-+		task->thread.fsindex = value;
-+		if (task == current)
-+			loadsegment(fs, task->thread.fsindex);
-+		break;
-+	case offsetof(struct user_regs_struct,gs):
++		nr_pages = 2;
++		now = 1;
++	}
++	if (down) {
 +		/*
-+		 * If this is setting gs as for normal 64-bit use but
-+		 * setting gs_base has implicitly changed it, leave it.
++		 * String I/O in reverse.  Yuck.  Kill the guest, fix later.
 +		 */
-+		if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
-+		     task->thread.gs != 0) ||
-+		    (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
-+		     task->thread.gs == 0))
-+			break;
-+		task->thread.gsindex = value;
-+		if (task == current)
-+			load_gs_index(task->thread.gsindex);
++		pr_unimpl(vcpu, "guest string pio down\n");
++		kvm_inject_gp(vcpu, 0);
++		return 1;
++	}
++	vcpu->run->io.count = now;
++	vcpu->arch.pio.cur_count = now;
++
++	if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
++		kvm_x86_ops->skip_emulated_instruction(vcpu);
++
++	for (i = 0; i < nr_pages; ++i) {
++		down_read(&current->mm->mmap_sem);
++		page = gva_to_page(vcpu, address + i * PAGE_SIZE);
++		vcpu->arch.pio.guest_pages[i] = page;
++		up_read(&current->mm->mmap_sem);
++		if (!page) {
++			kvm_inject_gp(vcpu, 0);
++			free_pio_guest_pages(vcpu);
++			return 1;
++		}
++	}
++
++	pio_dev = vcpu_find_pio_dev(vcpu, port);
++	if (!vcpu->arch.pio.in) {
++		/* string PIO write */
++		ret = pio_copy_data(vcpu);
++		if (ret >= 0 && pio_dev) {
++			pio_string_write(pio_dev, vcpu);
++			complete_pio(vcpu);
++			if (vcpu->arch.pio.count == 0)
++				ret = 1;
++		}
++	} else if (pio_dev)
++		pr_unimpl(vcpu, "no string pio read support yet, "
++		       "port %x size %d count %ld\n",
++			port, size, count);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
++
++int kvm_arch_init(void *opaque)
++{
++	int r;
++	struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
++
++	if (kvm_x86_ops) {
++		printk(KERN_ERR "kvm: already loaded the other module\n");
++		r = -EEXIST;
++		goto out;
++	}
++
++	if (!ops->cpu_has_kvm_support()) {
++		printk(KERN_ERR "kvm: no hardware support\n");
++		r = -EOPNOTSUPP;
++		goto out;
++	}
++	if (ops->disabled_by_bios()) {
++		printk(KERN_ERR "kvm: disabled by bios\n");
++		r = -EOPNOTSUPP;
++		goto out;
++	}
++
++	r = kvm_mmu_module_init();
++	if (r)
++		goto out;
++
++	kvm_init_msr_list();
++
++	kvm_x86_ops = ops;
++	kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
++	return 0;
++
++out:
++	return r;
++}
++
++void kvm_arch_exit(void)
++{
++	kvm_x86_ops = NULL;
++	kvm_mmu_module_exit();
++}
++
++int kvm_emulate_halt(struct kvm_vcpu *vcpu)
++{
++	++vcpu->stat.halt_exits;
++	if (irqchip_in_kernel(vcpu->kvm)) {
++		vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
++		kvm_vcpu_block(vcpu);
++		if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
++			return -EINTR;
++		return 1;
++	} else {
++		vcpu->run->exit_reason = KVM_EXIT_HLT;
++		return 0;
++	}
++}
++EXPORT_SYMBOL_GPL(kvm_emulate_halt);
++
++int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
++{
++	unsigned long nr, a0, a1, a2, a3, ret;
++
++	kvm_x86_ops->cache_regs(vcpu);
++
++	nr = vcpu->arch.regs[VCPU_REGS_RAX];
++	a0 = vcpu->arch.regs[VCPU_REGS_RBX];
++	a1 = vcpu->arch.regs[VCPU_REGS_RCX];
++	a2 = vcpu->arch.regs[VCPU_REGS_RDX];
++	a3 = vcpu->arch.regs[VCPU_REGS_RSI];
++
++	if (!is_long_mode(vcpu)) {
++		nr &= 0xFFFFFFFF;
++		a0 &= 0xFFFFFFFF;
++		a1 &= 0xFFFFFFFF;
++		a2 &= 0xFFFFFFFF;
++		a3 &= 0xFFFFFFFF;
++	}
++
++	switch (nr) {
++	case KVM_HC_VAPIC_POLL_IRQ:
++		ret = 0;
 +		break;
-+	case offsetof(struct user_regs_struct,ds):
-+		task->thread.ds = value;
-+		if (task == current)
-+			loadsegment(ds, task->thread.ds);
++	default:
++		ret = -KVM_ENOSYS;
 +		break;
-+	case offsetof(struct user_regs_struct,es):
-+		task->thread.es = value;
-+		if (task == current)
-+			loadsegment(es, task->thread.es);
++	}
++	vcpu->arch.regs[VCPU_REGS_RAX] = ret;
++	kvm_x86_ops->decache_regs(vcpu);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
++
++int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
++{
++	char instruction[3];
++	int ret = 0;
++
++
++	/*
++	 * Blow out the MMU to ensure that no other VCPU has an active mapping
++	 * to ensure that the updated hypercall appears atomically across all
++	 * VCPUs.
++	 */
++	kvm_mmu_zap_all(vcpu->kvm);
++
++	kvm_x86_ops->cache_regs(vcpu);
++	kvm_x86_ops->patch_hypercall(vcpu, instruction);
++	if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
++	    != X86EMUL_CONTINUE)
++		ret = -EFAULT;
++
++	return ret;
++}
++
++static u64 mk_cr_64(u64 curr_cr, u32 new_val)
++{
++	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
++}
++
++void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
++{
++	struct descriptor_table dt = { limit, base };
++
++	kvm_x86_ops->set_gdt(vcpu, &dt);
++}
++
++void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
++{
++	struct descriptor_table dt = { limit, base };
++
++	kvm_x86_ops->set_idt(vcpu, &dt);
++}
++
++void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
++		   unsigned long *rflags)
++{
++	lmsw(vcpu, msw);
++	*rflags = kvm_x86_ops->get_rflags(vcpu);
++}
++
++unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
++{
++	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
++	switch (cr) {
++	case 0:
++		return vcpu->arch.cr0;
++	case 2:
++		return vcpu->arch.cr2;
++	case 3:
++		return vcpu->arch.cr3;
++	case 4:
++		return vcpu->arch.cr4;
++	case 8:
++		return get_cr8(vcpu);
++	default:
++		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
++		return 0;
++	}
++}
++
++void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
++		     unsigned long *rflags)
++{
++	switch (cr) {
++	case 0:
++		set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
++		*rflags = kvm_x86_ops->get_rflags(vcpu);
++		break;
++	case 2:
++		vcpu->arch.cr2 = val;
++		break;
++	case 3:
++		set_cr3(vcpu, val);
++		break;
++	case 4:
++		set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
++		break;
++	case 8:
++		set_cr8(vcpu, val & 0xfUL);
 +		break;
++	default:
++		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
++	}
++}
++
++static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
++{
++	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
++	int j, nent = vcpu->arch.cpuid_nent;
++
++	e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
++	/* when no next entry is found, the current entry[i] is reselected */
++	for (j = i + 1; j == i; j = (j + 1) % nent) {
++		struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
++		if (ej->function == e->function) {
++			ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
++			return j;
++		}
++	}
++	return 0; /* silence gcc, even though control never reaches here */
++}
++
++/* find an entry with matching function, matching index (if needed), and that
++ * should be read next (if it's stateful) */
++static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
++	u32 function, u32 index)
++{
++	if (e->function != function)
++		return 0;
++	if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
++		return 0;
++	if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
++		!(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
++		return 0;
++	return 1;
++}
++
++void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
++{
++	int i;
++	u32 function, index;
++	struct kvm_cpuid_entry2 *e, *best;
 +
++	kvm_x86_ops->cache_regs(vcpu);
++	function = vcpu->arch.regs[VCPU_REGS_RAX];
++	index = vcpu->arch.regs[VCPU_REGS_RCX];
++	vcpu->arch.regs[VCPU_REGS_RAX] = 0;
++	vcpu->arch.regs[VCPU_REGS_RBX] = 0;
++	vcpu->arch.regs[VCPU_REGS_RCX] = 0;
++	vcpu->arch.regs[VCPU_REGS_RDX] = 0;
++	best = NULL;
++	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
++		e = &vcpu->arch.cpuid_entries[i];
++		if (is_matching_cpuid_entry(e, function, index)) {
++			if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
++				move_to_next_stateful_cpuid_entry(vcpu, i);
++			best = e;
++			break;
++		}
 +		/*
-+		 * Can't actually change these in 64-bit mode.
++		 * Both basic or both extended?
 +		 */
-+	case offsetof(struct user_regs_struct,cs):
-+#ifdef CONFIG_IA32_EMULATION
-+		if (test_tsk_thread_flag(task, TIF_IA32))
-+			task_pt_regs(task)->cs = value;
-+#endif
-+		break;
-+	case offsetof(struct user_regs_struct,ss):
-+#ifdef CONFIG_IA32_EMULATION
-+		if (test_tsk_thread_flag(task, TIF_IA32))
-+			task_pt_regs(task)->ss = value;
-+#endif
-+		break;
++		if (((e->function ^ function) & 0x80000000) == 0)
++			if (!best || e->function > best->function)
++				best = e;
++	}
++	if (best) {
++		vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
++		vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
++		vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
++		vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
++	}
++	kvm_x86_ops->decache_regs(vcpu);
++	kvm_x86_ops->skip_emulated_instruction(vcpu);
++}
++EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
++
++/*
++ * Check if userspace requested an interrupt window, and that the
++ * interrupt window is open.
++ *
++ * No need to exit to userspace if we already have an interrupt queued.
++ */
++static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
++					  struct kvm_run *kvm_run)
++{
++	return (!vcpu->arch.irq_summary &&
++		kvm_run->request_interrupt_window &&
++		vcpu->arch.interrupt_window_open &&
++		(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
++}
++
++static void post_kvm_run_save(struct kvm_vcpu *vcpu,
++			      struct kvm_run *kvm_run)
++{
++	kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
++	kvm_run->cr8 = get_cr8(vcpu);
++	kvm_run->apic_base = kvm_get_apic_base(vcpu);
++	if (irqchip_in_kernel(vcpu->kvm))
++		kvm_run->ready_for_interrupt_injection = 1;
++	else
++		kvm_run->ready_for_interrupt_injection =
++					(vcpu->arch.interrupt_window_open &&
++					 vcpu->arch.irq_summary == 0);
++}
++
++static void vapic_enter(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++	struct page *page;
++
++	if (!apic || !apic->vapic_addr)
++		return;
++
++	down_read(&current->mm->mmap_sem);
++	page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
++	vcpu->arch.apic->vapic_page = page;
++	up_read(&current->mm->mmap_sem);
++}
++
++static void vapic_exit(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	if (!apic || !apic->vapic_addr)
++		return;
++
++	kvm_release_page_dirty(apic->vapic_page);
++	mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
++}
++
++static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++	int r;
++
++	if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
++		pr_debug("vcpu %d received sipi with vector # %x\n",
++		       vcpu->vcpu_id, vcpu->arch.sipi_vector);
++		kvm_lapic_reset(vcpu);
++		r = kvm_x86_ops->vcpu_reset(vcpu);
++		if (r)
++			return r;
++		vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
++	}
++
++	vapic_enter(vcpu);
++
++preempted:
++	if (vcpu->guest_debug.enabled)
++		kvm_x86_ops->guest_debug_pre(vcpu);
++
++again:
++	r = kvm_mmu_reload(vcpu);
++	if (unlikely(r))
++		goto out;
++
++	if (vcpu->requests) {
++		if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
++			__kvm_migrate_apic_timer(vcpu);
++		if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
++				       &vcpu->requests)) {
++			kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
++			r = 0;
++			goto out;
++		}
++	}
++
++	kvm_inject_pending_timer_irqs(vcpu);
++
++	preempt_disable();
++
++	kvm_x86_ops->prepare_guest_switch(vcpu);
++	kvm_load_guest_fpu(vcpu);
++
++	local_irq_disable();
++
++	if (need_resched()) {
++		local_irq_enable();
++		preempt_enable();
++		r = 1;
++		goto out;
++	}
++
++	if (signal_pending(current)) {
++		local_irq_enable();
++		preempt_enable();
++		r = -EINTR;
++		kvm_run->exit_reason = KVM_EXIT_INTR;
++		++vcpu->stat.signal_exits;
++		goto out;
++	}
++
++	if (vcpu->arch.exception.pending)
++		__queue_exception(vcpu);
++	else if (irqchip_in_kernel(vcpu->kvm))
++		kvm_x86_ops->inject_pending_irq(vcpu);
++	else
++		kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
++
++	kvm_lapic_sync_to_vapic(vcpu);
++
++	vcpu->guest_mode = 1;
++	kvm_guest_enter();
++
++	if (vcpu->requests)
++		if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
++			kvm_x86_ops->tlb_flush(vcpu);
++
++	kvm_x86_ops->run(vcpu, kvm_run);
++
++	vcpu->guest_mode = 0;
++	local_irq_enable();
++
++	++vcpu->stat.exits;
++
++	/*
++	 * We must have an instruction between local_irq_enable() and
++	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
++	 * the interrupt shadow.  The stat.exits increment will do nicely.
++	 * But we need to prevent reordering, hence this barrier():
++	 */
++	barrier();
++
++	kvm_guest_exit();
++
++	preempt_enable();
++
++	/*
++	 * Profile KVM exit RIPs:
++	 */
++	if (unlikely(prof_on == KVM_PROFILING)) {
++		kvm_x86_ops->cache_regs(vcpu);
++		profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
++	}
++
++	if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
++		vcpu->arch.exception.pending = false;
++
++	kvm_lapic_sync_from_vapic(vcpu);
++
++	r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
++
++	if (r > 0) {
++		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
++			r = -EINTR;
++			kvm_run->exit_reason = KVM_EXIT_INTR;
++			++vcpu->stat.request_irq_exits;
++			goto out;
++		}
++		if (!need_resched())
++			goto again;
 +	}
 +
-+	return 0;
++out:
++	if (r > 0) {
++		kvm_resched(vcpu);
++		goto preempted;
++	}
++
++	post_kvm_run_save(vcpu, kvm_run);
++
++	vapic_exit(vcpu);
++
++	return r;
 +}
 +
-+static unsigned long debugreg_addr_limit(struct task_struct *task)
++int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 +{
-+#ifdef CONFIG_IA32_EMULATION
-+	if (test_tsk_thread_flag(task, TIF_IA32))
-+		return IA32_PAGE_OFFSET - 3;
++	int r;
++	sigset_t sigsaved;
++
++	vcpu_load(vcpu);
++
++	if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
++		kvm_vcpu_block(vcpu);
++		vcpu_put(vcpu);
++		return -EAGAIN;
++	}
++
++	if (vcpu->sigset_active)
++		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
++
++	/* re-sync apic's tpr */
++	if (!irqchip_in_kernel(vcpu->kvm))
++		set_cr8(vcpu, kvm_run->cr8);
++
++	if (vcpu->arch.pio.cur_count) {
++		r = complete_pio(vcpu);
++		if (r)
++			goto out;
++	}
++#if CONFIG_HAS_IOMEM
++	if (vcpu->mmio_needed) {
++		memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
++		vcpu->mmio_read_completed = 1;
++		vcpu->mmio_needed = 0;
++		r = emulate_instruction(vcpu, kvm_run,
++					vcpu->arch.mmio_fault_cr2, 0,
++					EMULTYPE_NO_DECODE);
++		if (r == EMULATE_DO_MMIO) {
++			/*
++			 * Read-modify-write.  Back to userspace.
++			 */
++			r = 0;
++			goto out;
++		}
++	}
 +#endif
-+	return TASK_SIZE64 - 7;
-+}
++	if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
++		kvm_x86_ops->cache_regs(vcpu);
++		vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
++		kvm_x86_ops->decache_regs(vcpu);
++	}
 +
-+#endif	/* CONFIG_X86_32 */
++	r = __vcpu_run(vcpu, kvm_run);
 +
-+static unsigned long get_flags(struct task_struct *task)
++out:
++	if (vcpu->sigset_active)
++		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
++
++	vcpu_put(vcpu);
++	return r;
++}
++
++int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 +{
-+	unsigned long retval = task_pt_regs(task)->flags;
++	vcpu_load(vcpu);
++
++	kvm_x86_ops->cache_regs(vcpu);
++
++	regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
++	regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
++	regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
++	regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
++	regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
++	regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
++	regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
++	regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
++#ifdef CONFIG_X86_64
++	regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
++	regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
++	regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
++	regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
++	regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
++	regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
++	regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
++	regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
++#endif
++
++	regs->rip = vcpu->arch.rip;
++	regs->rflags = kvm_x86_ops->get_rflags(vcpu);
 +
 +	/*
-+	 * If the debugger set TF, hide it from the readout.
++	 * Don't leak debug flags in case they were set for guest debugging
 +	 */
-+	if (test_tsk_thread_flag(task, TIF_FORCED_TF))
-+		retval &= ~X86_EFLAGS_TF;
++	if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
++		regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
 +
-+	return retval;
++	vcpu_put(vcpu);
++
++	return 0;
 +}
 +
-+static int set_flags(struct task_struct *task, unsigned long value)
++int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 +{
-+	struct pt_regs *regs = task_pt_regs(task);
++	vcpu_load(vcpu);
 +
-+	/*
-+	 * If the user value contains TF, mark that
-+	 * it was not "us" (the debugger) that set it.
-+	 * If not, make sure it stays set if we had.
-+	 */
-+	if (value & X86_EFLAGS_TF)
-+		clear_tsk_thread_flag(task, TIF_FORCED_TF);
-+	else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
-+		value |= X86_EFLAGS_TF;
++	vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
++	vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
++	vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
++	vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
++	vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
++	vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
++	vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
++	vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
++#ifdef CONFIG_X86_64
++	vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
++	vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
++	vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
++	vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
++	vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
++	vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
++	vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
++	vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
++#endif
 +
-+	regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
++	vcpu->arch.rip = regs->rip;
++	kvm_x86_ops->set_rflags(vcpu, regs->rflags);
++
++	kvm_x86_ops->decache_regs(vcpu);
++
++	vcpu_put(vcpu);
 +
 +	return 0;
 +}
 +
-+static int putreg(struct task_struct *child,
-+		  unsigned long offset, unsigned long value)
++static void get_segment(struct kvm_vcpu *vcpu,
++			struct kvm_segment *var, int seg)
 +{
-+	switch (offset) {
-+	case offsetof(struct user_regs_struct, cs):
-+	case offsetof(struct user_regs_struct, ds):
-+	case offsetof(struct user_regs_struct, es):
-+	case offsetof(struct user_regs_struct, fs):
-+	case offsetof(struct user_regs_struct, gs):
-+	case offsetof(struct user_regs_struct, ss):
-+		return set_segment_reg(child, offset, value);
++	return kvm_x86_ops->get_segment(vcpu, var, seg);
++}
 +
-+	case offsetof(struct user_regs_struct, flags):
-+		return set_flags(child, value);
++void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
++{
++	struct kvm_segment cs;
 +
-+#ifdef CONFIG_X86_64
-+	case offsetof(struct user_regs_struct,fs_base):
-+		if (value >= TASK_SIZE_OF(child))
-+			return -EIO;
-+		/*
-+		 * When changing the segment base, use do_arch_prctl
-+		 * to set either thread.fs or thread.fsindex and the
-+		 * corresponding GDT slot.
-+		 */
-+		if (child->thread.fs != value)
-+			return do_arch_prctl(child, ARCH_SET_FS, value);
-+		return 0;
-+	case offsetof(struct user_regs_struct,gs_base):
-+		/*
-+		 * Exactly the same here as the %fs handling above.
-+		 */
-+		if (value >= TASK_SIZE_OF(child))
-+			return -EIO;
-+		if (child->thread.gs != value)
-+			return do_arch_prctl(child, ARCH_SET_GS, value);
-+		return 0;
-+#endif
-+	}
++	get_segment(vcpu, &cs, VCPU_SREG_CS);
++	*db = cs.db;
++	*l = cs.l;
++}
++EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
++
++int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
++				  struct kvm_sregs *sregs)
++{
++	struct descriptor_table dt;
++	int pending_vec;
++
++	vcpu_load(vcpu);
++
++	get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
++	get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
++	get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
++	get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
++	get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
++	get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
++
++	get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
++	get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
++
++	kvm_x86_ops->get_idt(vcpu, &dt);
++	sregs->idt.limit = dt.limit;
++	sregs->idt.base = dt.base;
++	kvm_x86_ops->get_gdt(vcpu, &dt);
++	sregs->gdt.limit = dt.limit;
++	sregs->gdt.base = dt.base;
++
++	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
++	sregs->cr0 = vcpu->arch.cr0;
++	sregs->cr2 = vcpu->arch.cr2;
++	sregs->cr3 = vcpu->arch.cr3;
++	sregs->cr4 = vcpu->arch.cr4;
++	sregs->cr8 = get_cr8(vcpu);
++	sregs->efer = vcpu->arch.shadow_efer;
++	sregs->apic_base = kvm_get_apic_base(vcpu);
++
++	if (irqchip_in_kernel(vcpu->kvm)) {
++		memset(sregs->interrupt_bitmap, 0,
++		       sizeof sregs->interrupt_bitmap);
++		pending_vec = kvm_x86_ops->get_irq(vcpu);
++		if (pending_vec >= 0)
++			set_bit(pending_vec,
++				(unsigned long *)sregs->interrupt_bitmap);
++	} else
++		memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
++		       sizeof sregs->interrupt_bitmap);
++
++	vcpu_put(vcpu);
 +
-+	*pt_regs_access(task_pt_regs(child), offset) = value;
 +	return 0;
 +}
 +
-+static unsigned long getreg(struct task_struct *task, unsigned long offset)
++static void set_segment(struct kvm_vcpu *vcpu,
++			struct kvm_segment *var, int seg)
 +{
-+	switch (offset) {
-+	case offsetof(struct user_regs_struct, cs):
-+	case offsetof(struct user_regs_struct, ds):
-+	case offsetof(struct user_regs_struct, es):
-+	case offsetof(struct user_regs_struct, fs):
-+	case offsetof(struct user_regs_struct, gs):
-+	case offsetof(struct user_regs_struct, ss):
-+		return get_segment_reg(task, offset);
++	return kvm_x86_ops->set_segment(vcpu, var, seg);
++}
 +
-+	case offsetof(struct user_regs_struct, flags):
-+		return get_flags(task);
++int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
++				  struct kvm_sregs *sregs)
++{
++	int mmu_reset_needed = 0;
++	int i, pending_vec, max_bits;
++	struct descriptor_table dt;
++
++	vcpu_load(vcpu);
++
++	dt.limit = sregs->idt.limit;
++	dt.base = sregs->idt.base;
++	kvm_x86_ops->set_idt(vcpu, &dt);
++	dt.limit = sregs->gdt.limit;
++	dt.base = sregs->gdt.base;
++	kvm_x86_ops->set_gdt(vcpu, &dt);
++
++	vcpu->arch.cr2 = sregs->cr2;
++	mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
++	vcpu->arch.cr3 = sregs->cr3;
++
++	set_cr8(vcpu, sregs->cr8);
 +
++	mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
 +#ifdef CONFIG_X86_64
-+	case offsetof(struct user_regs_struct, fs_base): {
-+		/*
-+		 * do_arch_prctl may have used a GDT slot instead of
-+		 * the MSR.  To userland, it appears the same either
-+		 * way, except the %fs segment selector might not be 0.
-+		 */
-+		unsigned int seg = task->thread.fsindex;
-+		if (task->thread.fs != 0)
-+			return task->thread.fs;
-+		if (task == current)
-+			asm("movl %%fs,%0" : "=r" (seg));
-+		if (seg != FS_TLS_SEL)
-+			return 0;
-+		return get_desc_base(&task->thread.tls_array[FS_TLS]);
-+	}
-+	case offsetof(struct user_regs_struct, gs_base): {
-+		/*
-+		 * Exactly the same here as the %fs handling above.
-+		 */
-+		unsigned int seg = task->thread.gsindex;
-+		if (task->thread.gs != 0)
-+			return task->thread.gs;
-+		if (task == current)
-+			asm("movl %%gs,%0" : "=r" (seg));
-+		if (seg != GS_TLS_SEL)
-+			return 0;
-+		return get_desc_base(&task->thread.tls_array[GS_TLS]);
-+	}
++	kvm_x86_ops->set_efer(vcpu, sregs->efer);
 +#endif
-+	}
++	kvm_set_apic_base(vcpu, sregs->apic_base);
 +
-+	return *pt_regs_access(task_pt_regs(task), offset);
-+}
++	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
 +
-+static int genregs_get(struct task_struct *target,
-+		       const struct user_regset *regset,
-+		       unsigned int pos, unsigned int count,
-+		       void *kbuf, void __user *ubuf)
-+{
-+	if (kbuf) {
-+		unsigned long *k = kbuf;
-+		while (count > 0) {
-+			*k++ = getreg(target, pos);
-+			count -= sizeof(*k);
-+			pos += sizeof(*k);
-+		}
++	mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
++	vcpu->arch.cr0 = sregs->cr0;
++	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
++
++	mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
++	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
++	if (!is_long_mode(vcpu) && is_pae(vcpu))
++		load_pdptrs(vcpu, vcpu->arch.cr3);
++
++	if (mmu_reset_needed)
++		kvm_mmu_reset_context(vcpu);
++
++	if (!irqchip_in_kernel(vcpu->kvm)) {
++		memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
++		       sizeof vcpu->arch.irq_pending);
++		vcpu->arch.irq_summary = 0;
++		for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
++			if (vcpu->arch.irq_pending[i])
++				__set_bit(i, &vcpu->arch.irq_summary);
 +	} else {
-+		unsigned long __user *u = ubuf;
-+		while (count > 0) {
-+			if (__put_user(getreg(target, pos), u++))
-+				return -EFAULT;
-+			count -= sizeof(*u);
-+			pos += sizeof(*u);
++		max_bits = (sizeof sregs->interrupt_bitmap) << 3;
++		pending_vec = find_first_bit(
++			(const unsigned long *)sregs->interrupt_bitmap,
++			max_bits);
++		/* Only pending external irq is handled here */
++		if (pending_vec < max_bits) {
++			kvm_x86_ops->set_irq(vcpu, pending_vec);
++			pr_debug("Set back pending irq %d\n",
++				 pending_vec);
 +		}
 +	}
 +
++	set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
++	set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
++	set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
++	set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
++	set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
++	set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
++
++	set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
++	set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
++
++	vcpu_put(vcpu);
++
 +	return 0;
 +}
 +
-+static int genregs_set(struct task_struct *target,
-+		       const struct user_regset *regset,
-+		       unsigned int pos, unsigned int count,
-+		       const void *kbuf, const void __user *ubuf)
++int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
++				    struct kvm_debug_guest *dbg)
 +{
-+	int ret = 0;
-+	if (kbuf) {
-+		const unsigned long *k = kbuf;
-+		while (count > 0 && !ret) {
-+			ret = putreg(target, pos, *k++);
-+			count -= sizeof(*k);
-+			pos += sizeof(*k);
-+		}
-+	} else {
-+		const unsigned long  __user *u = ubuf;
-+		while (count > 0 && !ret) {
-+			unsigned long word;
-+			ret = __get_user(word, u++);
-+			if (ret)
-+				break;
-+			ret = putreg(target, pos, word);
-+			count -= sizeof(*u);
-+			pos += sizeof(*u);
-+		}
-+	}
-+	return ret;
++	int r;
++
++	vcpu_load(vcpu);
++
++	r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
++
++	vcpu_put(vcpu);
++
++	return r;
 +}
 +
 +/*
-+ * This function is trivial and will be inlined by the compiler.
-+ * Having it separates the implementation details of debug
-+ * registers from the interface details of ptrace.
++ * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
++ * we have asm/x86/processor.h
 + */
-+static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
++struct fxsave {
++	u16	cwd;
++	u16	swd;
++	u16	twd;
++	u16	fop;
++	u64	rip;
++	u64	rdp;
++	u32	mxcsr;
++	u32	mxcsr_mask;
++	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
++#ifdef CONFIG_X86_64
++	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 256 bytes */
++#else
++	u32	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
++#endif
++};
++
++/*
++ * Translate a guest virtual address to a guest physical address.
++ */
++int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
++				    struct kvm_translation *tr)
 +{
-+	switch (n) {
-+	case 0:		return child->thread.debugreg0;
-+	case 1:		return child->thread.debugreg1;
-+	case 2:		return child->thread.debugreg2;
-+	case 3:		return child->thread.debugreg3;
-+	case 6:		return child->thread.debugreg6;
-+	case 7:		return child->thread.debugreg7;
-+	}
++	unsigned long vaddr = tr->linear_address;
++	gpa_t gpa;
++
++	vcpu_load(vcpu);
++	down_read(&current->mm->mmap_sem);
++	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
++	up_read(&current->mm->mmap_sem);
++	tr->physical_address = gpa;
++	tr->valid = gpa != UNMAPPED_GVA;
++	tr->writeable = 1;
++	tr->usermode = 0;
++	vcpu_put(vcpu);
++
 +	return 0;
 +}
 +
-+static int ptrace_set_debugreg(struct task_struct *child,
-+			       int n, unsigned long data)
++int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 +{
-+	int i;
++	struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
 +
-+	if (unlikely(n == 4 || n == 5))
-+		return -EIO;
++	vcpu_load(vcpu);
 +
-+	if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
-+		return -EIO;
++	memcpy(fpu->fpr, fxsave->st_space, 128);
++	fpu->fcw = fxsave->cwd;
++	fpu->fsw = fxsave->swd;
++	fpu->ftwx = fxsave->twd;
++	fpu->last_opcode = fxsave->fop;
++	fpu->last_ip = fxsave->rip;
++	fpu->last_dp = fxsave->rdp;
++	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
 +
-+	switch (n) {
-+	case 0:		child->thread.debugreg0 = data; break;
-+	case 1:		child->thread.debugreg1 = data; break;
-+	case 2:		child->thread.debugreg2 = data; break;
-+	case 3:		child->thread.debugreg3 = data; break;
++	vcpu_put(vcpu);
 +
-+	case 6:
-+		if ((data & ~0xffffffffUL) != 0)
-+			return -EIO;
-+		child->thread.debugreg6 = data;
-+		break;
++	return 0;
++}
 +
-+	case 7:
-+		/*
-+		 * Sanity-check data. Take one half-byte at once with
-+		 * check = (val >> (16 + 4*i)) & 0xf. It contains the
-+		 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
-+		 * 2 and 3 are LENi. Given a list of invalid values,
-+		 * we do mask |= 1 << invalid_value, so that
-+		 * (mask >> check) & 1 is a correct test for invalid
-+		 * values.
-+		 *
-+		 * R/Wi contains the type of the breakpoint /
-+		 * watchpoint, LENi contains the length of the watched
-+		 * data in the watchpoint case.
-+		 *
-+		 * The invalid values are:
-+		 * - LENi == 0x10 (undefined), so mask |= 0x0f00.	[32-bit]
-+		 * - R/Wi == 0x10 (break on I/O reads or writes), so
-+		 *   mask |= 0x4444.
-+		 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
-+		 *   0x1110.
-+		 *
-+		 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
-+		 *
-+		 * See the Intel Manual "System Programming Guide",
-+		 * 15.2.4
-+		 *
-+		 * Note that LENi == 0x10 is defined on x86_64 in long
-+		 * mode (i.e. even for 32-bit userspace software, but
-+		 * 64-bit kernel), so the x86_64 mask value is 0x5454.
-+		 * See the AMD manual no. 24593 (AMD64 System Programming)
-+		 */
-+#ifdef CONFIG_X86_32
-+#define	DR7_MASK	0x5f54
-+#else
-+#define	DR7_MASK	0x5554
-+#endif
-+		data &= ~DR_CONTROL_RESERVED;
-+		for (i = 0; i < 4; i++)
-+			if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
-+				return -EIO;
-+		child->thread.debugreg7 = data;
-+		if (data)
-+			set_tsk_thread_flag(child, TIF_DEBUG);
-+		else
-+			clear_tsk_thread_flag(child, TIF_DEBUG);
-+		break;
-+	}
++int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
++{
++	struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
++
++	vcpu_load(vcpu);
++
++	memcpy(fxsave->st_space, fpu->fpr, 128);
++	fxsave->cwd = fpu->fcw;
++	fxsave->swd = fpu->fsw;
++	fxsave->twd = fpu->ftwx;
++	fxsave->fop = fpu->last_opcode;
++	fxsave->rip = fpu->last_ip;
++	fxsave->rdp = fpu->last_dp;
++	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
++
++	vcpu_put(vcpu);
 +
 +	return 0;
 +}
 +
-+static int ptrace_bts_get_size(struct task_struct *child)
++void fx_init(struct kvm_vcpu *vcpu)
 +{
-+	if (!child->thread.ds_area_msr)
-+		return -ENXIO;
++	unsigned after_mxcsr_mask;
 +
-+	return ds_get_bts_index((void *)child->thread.ds_area_msr);
++	/* Initialize guest FPU by resetting ours and saving into guest's */
++	preempt_disable();
++	fx_save(&vcpu->arch.host_fx_image);
++	fpu_init();
++	fx_save(&vcpu->arch.guest_fx_image);
++	fx_restore(&vcpu->arch.host_fx_image);
++	preempt_enable();
++
++	vcpu->arch.cr0 |= X86_CR0_ET;
++	after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
++	vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
++	memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
++	       0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
 +}
++EXPORT_SYMBOL_GPL(fx_init);
 +
-+static int ptrace_bts_read_record(struct task_struct *child,
-+				  long index,
-+				  struct bts_struct __user *out)
++void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 +{
-+	struct bts_struct ret;
-+	int retval;
-+	int bts_end;
-+	int bts_index;
++	if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
++		return;
 +
-+	if (!child->thread.ds_area_msr)
-+		return -ENXIO;
++	vcpu->guest_fpu_loaded = 1;
++	fx_save(&vcpu->arch.host_fx_image);
++	fx_restore(&vcpu->arch.guest_fx_image);
++}
++EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
 +
-+	if (index < 0)
-+		return -EINVAL;
++void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
++{
++	if (!vcpu->guest_fpu_loaded)
++		return;
 +
-+	bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
-+	if (bts_end <= index)
-+		return -EINVAL;
++	vcpu->guest_fpu_loaded = 0;
++	fx_save(&vcpu->arch.guest_fx_image);
++	fx_restore(&vcpu->arch.host_fx_image);
++	++vcpu->stat.fpu_reload;
++}
++EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
 +
-+	/* translate the ptrace bts index into the ds bts index */
-+	bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr);
-+	bts_index -= (index + 1);
-+	if (bts_index < 0)
-+		bts_index += bts_end;
++void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
++{
++	kvm_x86_ops->vcpu_free(vcpu);
++}
 +
-+	retval = ds_read_bts((void *)child->thread.ds_area_msr,
-+			     bts_index, &ret);
-+	if (retval < 0)
-+		return retval;
++struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
++						unsigned int id)
++{
++	return kvm_x86_ops->vcpu_create(kvm, id);
++}
 +
-+	if (copy_to_user(out, &ret, sizeof(ret)))
-+		return -EFAULT;
++int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
++{
++	int r;
 +
-+	return sizeof(ret);
++	/* We do fxsave: this must be aligned. */
++	BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
++
++	vcpu_load(vcpu);
++	r = kvm_arch_vcpu_reset(vcpu);
++	if (r == 0)
++		r = kvm_mmu_setup(vcpu);
++	vcpu_put(vcpu);
++	if (r < 0)
++		goto free_vcpu;
++
++	return 0;
++free_vcpu:
++	kvm_x86_ops->vcpu_free(vcpu);
++	return r;
 +}
 +
-+static int ptrace_bts_write_record(struct task_struct *child,
-+				   const struct bts_struct *in)
++void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 +{
-+	int retval;
++	vcpu_load(vcpu);
++	kvm_mmu_unload(vcpu);
++	vcpu_put(vcpu);
 +
-+	if (!child->thread.ds_area_msr)
-+		return -ENXIO;
++	kvm_x86_ops->vcpu_free(vcpu);
++}
 +
-+	retval = ds_write_bts((void *)child->thread.ds_area_msr, in);
-+	if (retval)
-+		return retval;
++int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
++{
++	return kvm_x86_ops->vcpu_reset(vcpu);
++}
 +
-+	return sizeof(*in);
++void kvm_arch_hardware_enable(void *garbage)
++{
++	kvm_x86_ops->hardware_enable(garbage);
 +}
 +
-+static int ptrace_bts_clear(struct task_struct *child)
++void kvm_arch_hardware_disable(void *garbage)
 +{
-+	if (!child->thread.ds_area_msr)
-+		return -ENXIO;
++	kvm_x86_ops->hardware_disable(garbage);
++}
 +
-+	return ds_clear((void *)child->thread.ds_area_msr);
++int kvm_arch_hardware_setup(void)
++{
++	return kvm_x86_ops->hardware_setup();
 +}
 +
-+static int ptrace_bts_drain(struct task_struct *child,
-+			    long size,
-+			    struct bts_struct __user *out)
++void kvm_arch_hardware_unsetup(void)
 +{
-+	int end, i;
-+	void *ds = (void *)child->thread.ds_area_msr;
++	kvm_x86_ops->hardware_unsetup();
++}
 +
-+	if (!ds)
-+		return -ENXIO;
++void kvm_arch_check_processor_compat(void *rtn)
++{
++	kvm_x86_ops->check_processor_compatibility(rtn);
++}
 +
-+	end = ds_get_bts_index(ds);
-+	if (end <= 0)
-+		return end;
++int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
++{
++	struct page *page;
++	struct kvm *kvm;
++	int r;
 +
-+	if (size < (end * sizeof(struct bts_struct)))
-+		return -EIO;
++	BUG_ON(vcpu->kvm == NULL);
++	kvm = vcpu->kvm;
 +
-+	for (i = 0; i < end; i++, out++) {
-+		struct bts_struct ret;
-+		int retval;
++	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
++	if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
++		vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
++	else
++		vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED;
++
++	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++	if (!page) {
++		r = -ENOMEM;
++		goto fail;
++	}
++	vcpu->arch.pio_data = page_address(page);
 +
-+		retval = ds_read_bts(ds, i, &ret);
-+		if (retval < 0)
-+			return retval;
++	r = kvm_mmu_create(vcpu);
++	if (r < 0)
++		goto fail_free_pio_data;
 +
-+		if (copy_to_user(out, &ret, sizeof(ret)))
-+			return -EFAULT;
++	if (irqchip_in_kernel(kvm)) {
++		r = kvm_create_lapic(vcpu);
++		if (r < 0)
++			goto fail_mmu_destroy;
 +	}
 +
-+	ds_clear(ds);
++	return 0;
 +
-+	return end;
++fail_mmu_destroy:
++	kvm_mmu_destroy(vcpu);
++fail_free_pio_data:
++	free_page((unsigned long)vcpu->arch.pio_data);
++fail:
++	return r;
 +}
 +
-+static int ptrace_bts_realloc(struct task_struct *child,
-+			      int size, int reduce_size)
++void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 +{
-+	unsigned long rlim, vm;
-+	int ret, old_size;
++	kvm_free_lapic(vcpu);
++	kvm_mmu_destroy(vcpu);
++	free_page((unsigned long)vcpu->arch.pio_data);
++}
 +
-+	if (size < 0)
-+		return -EINVAL;
++struct  kvm *kvm_arch_create_vm(void)
++{
++	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
 +
-+	old_size = ds_get_bts_size((void *)child->thread.ds_area_msr);
-+	if (old_size < 0)
-+		return old_size;
++	if (!kvm)
++		return ERR_PTR(-ENOMEM);
 +
-+	ret = ds_free((void **)&child->thread.ds_area_msr);
-+	if (ret < 0)
-+		goto out;
++	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
 +
-+	size >>= PAGE_SHIFT;
-+	old_size >>= PAGE_SHIFT;
++	return kvm;
++}
 +
-+	current->mm->total_vm  -= old_size;
-+	current->mm->locked_vm -= old_size;
++static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
++{
++	vcpu_load(vcpu);
++	kvm_mmu_unload(vcpu);
++	vcpu_put(vcpu);
++}
 +
-+	if (size == 0)
-+		goto out;
++static void kvm_free_vcpus(struct kvm *kvm)
++{
++	unsigned int i;
 +
-+	rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
-+	vm = current->mm->total_vm  + size;
-+	if (rlim < vm) {
-+		ret = -ENOMEM;
++	/*
++	 * Unpin any mmu pages first.
++	 */
++	for (i = 0; i < KVM_MAX_VCPUS; ++i)
++		if (kvm->vcpus[i])
++			kvm_unload_vcpu_mmu(kvm->vcpus[i]);
++	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
++		if (kvm->vcpus[i]) {
++			kvm_arch_vcpu_free(kvm->vcpus[i]);
++			kvm->vcpus[i] = NULL;
++		}
++	}
 +
-+		if (!reduce_size)
-+			goto out;
++}
 +
-+		size = rlim - current->mm->total_vm;
-+		if (size <= 0)
-+			goto out;
-+	}
++void kvm_arch_destroy_vm(struct kvm *kvm)
++{
++	kfree(kvm->arch.vpic);
++	kfree(kvm->arch.vioapic);
++	kvm_free_vcpus(kvm);
++	kvm_free_physmem(kvm);
++	kfree(kvm);
++}
 +
-+	rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
-+	vm = current->mm->locked_vm  + size;
-+	if (rlim < vm) {
-+		ret = -ENOMEM;
++int kvm_arch_set_memory_region(struct kvm *kvm,
++				struct kvm_userspace_memory_region *mem,
++				struct kvm_memory_slot old,
++				int user_alloc)
++{
++	int npages = mem->memory_size >> PAGE_SHIFT;
++	struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
 +
-+		if (!reduce_size)
-+			goto out;
++	/*To keep backward compatibility with older userspace,
++	 *x86 needs to hanlde !user_alloc case.
++	 */
++	if (!user_alloc) {
++		if (npages && !old.rmap) {
++			memslot->userspace_addr = do_mmap(NULL, 0,
++						     npages * PAGE_SIZE,
++						     PROT_READ | PROT_WRITE,
++						     MAP_SHARED | MAP_ANONYMOUS,
++						     0);
 +
-+		size = rlim - current->mm->locked_vm;
-+		if (size <= 0)
-+			goto out;
++			if (IS_ERR((void *)memslot->userspace_addr))
++				return PTR_ERR((void *)memslot->userspace_addr);
++		} else {
++			if (!old.user_alloc && old.rmap) {
++				int ret;
++
++				ret = do_munmap(current->mm, old.userspace_addr,
++						old.npages * PAGE_SIZE);
++				if (ret < 0)
++					printk(KERN_WARNING
++				       "kvm_vm_ioctl_set_memory_region: "
++				       "failed to munmap memory\n");
++			}
++		}
 +	}
 +
-+	ret = ds_allocate((void **)&child->thread.ds_area_msr,
-+			  size << PAGE_SHIFT);
-+	if (ret < 0)
-+		goto out;
++	if (!kvm->arch.n_requested_mmu_pages) {
++		unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
++		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
++	}
 +
-+	current->mm->total_vm  += size;
-+	current->mm->locked_vm += size;
++	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
++	kvm_flush_remote_tlbs(kvm);
 +
-+out:
-+	if (child->thread.ds_area_msr)
-+		set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
-+	else
-+		clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
++	return 0;
++}
 +
-+	return ret;
++int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
++	       || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
 +}
 +
-+static int ptrace_bts_config(struct task_struct *child,
-+			     long cfg_size,
-+			     const struct ptrace_bts_config __user *ucfg)
++static void vcpu_kick_intr(void *info)
 +{
-+	struct ptrace_bts_config cfg;
-+	int bts_size, ret = 0;
-+	void *ds;
++#ifdef DEBUG
++	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
++	printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
++#endif
++}
 +
-+	if (cfg_size < sizeof(cfg))
-+		return -EIO;
++void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
++{
++	int ipi_pcpu = vcpu->cpu;
 +
-+	if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
-+		return -EFAULT;
++	if (waitqueue_active(&vcpu->wq)) {
++		wake_up_interruptible(&vcpu->wq);
++		++vcpu->stat.halt_wakeup;
++	}
++	if (vcpu->guest_mode)
++		smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
++}
+diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
+new file mode 100644
+index 0000000..7958600
+--- /dev/null
++++ b/arch/x86/kvm/x86_emulate.c
+@@ -0,0 +1,1912 @@
++/******************************************************************************
++ * x86_emulate.c
++ *
++ * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
++ *
++ * Copyright (c) 2005 Keir Fraser
++ *
++ * Linux coding style, mod r/m decoder, segment base fixes, real-mode
++ * privileged instructions:
++ *
++ * Copyright (C) 2006 Qumranet
++ *
++ *   Avi Kivity <avi at qumranet.com>
++ *   Yaniv Kamay <yaniv at qumranet.com>
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
++ *
++ * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
++ */
 +
-+	if ((int)cfg.size < 0)
-+		return -EINVAL;
++#ifndef __KERNEL__
++#include <stdio.h>
++#include <stdint.h>
++#include <public/xen.h>
++#define DPRINTF(_f, _a ...) printf(_f , ## _a)
++#else
++#include <linux/kvm_host.h>
++#define DPRINTF(x...) do {} while (0)
++#endif
++#include <linux/module.h>
++#include <asm/kvm_x86_emulate.h>
 +
-+	bts_size = 0;
-+	ds = (void *)child->thread.ds_area_msr;
-+	if (ds) {
-+		bts_size = ds_get_bts_size(ds);
-+		if (bts_size < 0)
-+			return bts_size;
-+	}
-+	cfg.size = PAGE_ALIGN(cfg.size);
++/*
++ * Opcode effective-address decode tables.
++ * Note that we only emulate instructions that have at least one memory
++ * operand (excluding implicit stack references). We assume that stack
++ * references and instruction fetches will never occur in special memory
++ * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
++ * not be handled.
++ */
++
++/* Operand sizes: 8-bit operands or specified/overridden size. */
++#define ByteOp      (1<<0)	/* 8-bit operands. */
++/* Destination operand type. */
++#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
++#define DstReg      (2<<1)	/* Register operand. */
++#define DstMem      (3<<1)	/* Memory operand. */
++#define DstMask     (3<<1)
++/* Source operand type. */
++#define SrcNone     (0<<3)	/* No source operand. */
++#define SrcImplicit (0<<3)	/* Source operand is implicit in the opcode. */
++#define SrcReg      (1<<3)	/* Register operand. */
++#define SrcMem      (2<<3)	/* Memory operand. */
++#define SrcMem16    (3<<3)	/* Memory operand (16-bit). */
++#define SrcMem32    (4<<3)	/* Memory operand (32-bit). */
++#define SrcImm      (5<<3)	/* Immediate operand. */
++#define SrcImmByte  (6<<3)	/* 8-bit sign-extended immediate operand. */
++#define SrcMask     (7<<3)
++/* Generic ModRM decode. */
++#define ModRM       (1<<6)
++/* Destination is only written; never read. */
++#define Mov         (1<<7)
++#define BitOp       (1<<8)
++#define MemAbs      (1<<9)      /* Memory operand is absolute displacement */
++#define String      (1<<10)     /* String instruction (rep capable) */
++#define Stack       (1<<11)     /* Stack instruction (push/pop) */
++
++static u16 opcode_table[256] = {
++	/* 0x00 - 0x07 */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
++	0, 0, 0, 0,
++	/* 0x08 - 0x0F */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
++	0, 0, 0, 0,
++	/* 0x10 - 0x17 */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
++	0, 0, 0, 0,
++	/* 0x18 - 0x1F */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
++	0, 0, 0, 0,
++	/* 0x20 - 0x27 */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
++	SrcImmByte, SrcImm, 0, 0,
++	/* 0x28 - 0x2F */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
++	0, 0, 0, 0,
++	/* 0x30 - 0x37 */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
++	0, 0, 0, 0,
++	/* 0x38 - 0x3F */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
++	0, 0, 0, 0,
++	/* 0x40 - 0x47 */
++	DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
++	/* 0x48 - 0x4F */
++	DstReg, DstReg, DstReg, DstReg,	DstReg, DstReg, DstReg, DstReg,
++	/* 0x50 - 0x57 */
++	SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
++	SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
++	/* 0x58 - 0x5F */
++	DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
++	DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
++	/* 0x60 - 0x67 */
++	0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
++	0, 0, 0, 0,
++	/* 0x68 - 0x6F */
++	0, 0, ImplicitOps | Mov | Stack, 0,
++	SrcNone  | ByteOp  | ImplicitOps, SrcNone  | ImplicitOps, /* insb, insw/insd */
++	SrcNone  | ByteOp  | ImplicitOps, SrcNone  | ImplicitOps, /* outsb, outsw/outsd */
++	/* 0x70 - 0x77 */
++	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
++	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
++	/* 0x78 - 0x7F */
++	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
++	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
++	/* 0x80 - 0x87 */
++	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
++	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
++	/* 0x88 - 0x8F */
++	ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
++	ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	0, ModRM | DstReg, 0, DstMem | SrcNone | ModRM | Mov | Stack,
++	/* 0x90 - 0x9F */
++	0, 0, 0, 0, 0, 0, 0, 0,
++	0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
++	/* 0xA0 - 0xA7 */
++	ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
++	ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs,
++	ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
++	ByteOp | ImplicitOps | String, ImplicitOps | String,
++	/* 0xA8 - 0xAF */
++	0, 0, ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
++	ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
++	ByteOp | ImplicitOps | String, ImplicitOps | String,
++	/* 0xB0 - 0xBF */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0xC0 - 0xC7 */
++	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
++	0, ImplicitOps | Stack, 0, 0,
++	ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
++	/* 0xC8 - 0xCF */
++	0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0xD0 - 0xD7 */
++	ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
++	ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
++	0, 0, 0, 0,
++	/* 0xD8 - 0xDF */
++	0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0xE0 - 0xE7 */
++	0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0xE8 - 0xEF */
++	ImplicitOps | Stack, SrcImm|ImplicitOps, 0, SrcImmByte|ImplicitOps,
++	0, 0, 0, 0,
++	/* 0xF0 - 0xF7 */
++	0, 0, 0, 0,
++	ImplicitOps, ImplicitOps,
++	ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
++	/* 0xF8 - 0xFF */
++	ImplicitOps, 0, ImplicitOps, ImplicitOps,
++	0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
++};
++
++static u16 twobyte_table[256] = {
++	/* 0x00 - 0x0F */
++	0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
++	ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
++	/* 0x10 - 0x1F */
++	0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
++	/* 0x20 - 0x2F */
++	ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
++	0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0x30 - 0x3F */
++	ImplicitOps, 0, ImplicitOps, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0x40 - 0x47 */
++	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	/* 0x48 - 0x4F */
++	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
++	/* 0x50 - 0x5F */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0x60 - 0x6F */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0x70 - 0x7F */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0x80 - 0x8F */
++	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
++	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
++	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
++	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
++	/* 0x90 - 0x9F */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0xA0 - 0xA7 */
++	0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0,
++	/* 0xA8 - 0xAF */
++	0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0,
++	/* 0xB0 - 0xB7 */
++	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0,
++	    DstMem | SrcReg | ModRM | BitOp,
++	0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
++	    DstReg | SrcMem16 | ModRM | Mov,
++	/* 0xB8 - 0xBF */
++	0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp,
++	0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
++	    DstReg | SrcMem16 | ModRM | Mov,
++	/* 0xC0 - 0xCF */
++	0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM,
++	0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0xD0 - 0xDF */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0xE0 - 0xEF */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++	/* 0xF0 - 0xFF */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
++};
++
++/* EFLAGS bit definitions. */
++#define EFLG_OF (1<<11)
++#define EFLG_DF (1<<10)
++#define EFLG_SF (1<<7)
++#define EFLG_ZF (1<<6)
++#define EFLG_AF (1<<4)
++#define EFLG_PF (1<<2)
++#define EFLG_CF (1<<0)
++
++/*
++ * Instruction emulation:
++ * Most instructions are emulated directly via a fragment of inline assembly
++ * code. This allows us to save/restore EFLAGS and thus very easily pick up
++ * any modified flags.
++ */
++
++#if defined(CONFIG_X86_64)
++#define _LO32 "k"		/* force 32-bit operand */
++#define _STK  "%%rsp"		/* stack pointer */
++#elif defined(__i386__)
++#define _LO32 ""		/* force 32-bit operand */
++#define _STK  "%%esp"		/* stack pointer */
++#endif
++
++/*
++ * These EFLAGS bits are restored from saved value during emulation, and
++ * any changes are written back to the saved value after emulation.
++ */
++#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
++
++/* Before executing instruction: restore necessary bits in EFLAGS. */
++#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
++	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
++	"movl %"_sav",%"_LO32 _tmp"; "                                  \
++	"push %"_tmp"; "                                                \
++	"push %"_tmp"; "                                                \
++	"movl %"_msk",%"_LO32 _tmp"; "                                  \
++	"andl %"_LO32 _tmp",("_STK"); "                                 \
++	"pushf; "                                                       \
++	"notl %"_LO32 _tmp"; "                                          \
++	"andl %"_LO32 _tmp",("_STK"); "                                 \
++	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
++	"pop  %"_tmp"; "                                                \
++	"orl  %"_LO32 _tmp",("_STK"); "                                 \
++	"popf; "                                                        \
++	"pop  %"_sav"; "
++
++/* After executing instruction: write-back necessary bits in EFLAGS. */
++#define _POST_EFLAGS(_sav, _msk, _tmp) \
++	/* _sav |= EFLAGS & _msk; */		\
++	"pushf; "				\
++	"pop  %"_tmp"; "			\
++	"andl %"_msk",%"_LO32 _tmp"; "		\
++	"orl  %"_LO32 _tmp",%"_sav"; "
++
++/* Raw emulation: instruction has two explicit operands. */
++#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
++	do { 								    \
++		unsigned long _tmp;					    \
++									    \
++		switch ((_dst).bytes) {					    \
++		case 2:							    \
++			__asm__ __volatile__ (				    \
++				_PRE_EFLAGS("0", "4", "2")		    \
++				_op"w %"_wx"3,%1; "			    \
++				_POST_EFLAGS("0", "4", "2")		    \
++				: "=m" (_eflags), "=m" ((_dst).val),        \
++				  "=&r" (_tmp)				    \
++				: _wy ((_src).val), "i" (EFLAGS_MASK));     \
++			break;						    \
++		case 4:							    \
++			__asm__ __volatile__ (				    \
++				_PRE_EFLAGS("0", "4", "2")		    \
++				_op"l %"_lx"3,%1; "			    \
++				_POST_EFLAGS("0", "4", "2")		    \
++				: "=m" (_eflags), "=m" ((_dst).val),	    \
++				  "=&r" (_tmp)				    \
++				: _ly ((_src).val), "i" (EFLAGS_MASK));     \
++			break;						    \
++		case 8:							    \
++			__emulate_2op_8byte(_op, _src, _dst,		    \
++					    _eflags, _qx, _qy);		    \
++			break;						    \
++		}							    \
++	} while (0)
 +
-+	if (bts_size != cfg.size) {
-+		ret = ptrace_bts_realloc(child, cfg.size,
-+					 cfg.flags & PTRACE_BTS_O_CUT_SIZE);
-+		if (ret < 0)
-+			goto errout;
++#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
++	do {								     \
++		unsigned long _tmp;					     \
++		switch ((_dst).bytes) {				             \
++		case 1:							     \
++			__asm__ __volatile__ (				     \
++				_PRE_EFLAGS("0", "4", "2")		     \
++				_op"b %"_bx"3,%1; "			     \
++				_POST_EFLAGS("0", "4", "2")		     \
++				: "=m" (_eflags), "=m" ((_dst).val),	     \
++				  "=&r" (_tmp)				     \
++				: _by ((_src).val), "i" (EFLAGS_MASK));      \
++			break;						     \
++		default:						     \
++			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
++					     _wx, _wy, _lx, _ly, _qx, _qy);  \
++			break;						     \
++		}							     \
++	} while (0)
 +
-+		ds = (void *)child->thread.ds_area_msr;
-+	}
++/* Source operand is byte-sized and may be restricted to just %cl. */
++#define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
++	__emulate_2op(_op, _src, _dst, _eflags,				\
++		      "b", "c", "b", "c", "b", "c", "b", "c")
++
++/* Source operand is byte, word, long or quad sized. */
++#define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
++	__emulate_2op(_op, _src, _dst, _eflags,				\
++		      "b", "q", "w", "r", _LO32, "r", "", "r")
++
++/* Source operand is word, long or quad sized. */
++#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
++	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
++			     "w", "r", _LO32, "r", "", "r")
 +
-+	if (cfg.flags & PTRACE_BTS_O_SIGNAL)
-+		ret = ds_set_overflow(ds, DS_O_SIGNAL);
-+	else
-+		ret = ds_set_overflow(ds, DS_O_WRAP);
-+	if (ret < 0)
-+		goto errout;
++/* Instruction has only one explicit operand (no source operand). */
++#define emulate_1op(_op, _dst, _eflags)                                    \
++	do {								\
++		unsigned long _tmp;					\
++									\
++		switch ((_dst).bytes) {				        \
++		case 1:							\
++			__asm__ __volatile__ (				\
++				_PRE_EFLAGS("0", "3", "2")		\
++				_op"b %1; "				\
++				_POST_EFLAGS("0", "3", "2")		\
++				: "=m" (_eflags), "=m" ((_dst).val),	\
++				  "=&r" (_tmp)				\
++				: "i" (EFLAGS_MASK));			\
++			break;						\
++		case 2:							\
++			__asm__ __volatile__ (				\
++				_PRE_EFLAGS("0", "3", "2")		\
++				_op"w %1; "				\
++				_POST_EFLAGS("0", "3", "2")		\
++				: "=m" (_eflags), "=m" ((_dst).val),	\
++				  "=&r" (_tmp)				\
++				: "i" (EFLAGS_MASK));			\
++			break;						\
++		case 4:							\
++			__asm__ __volatile__ (				\
++				_PRE_EFLAGS("0", "3", "2")		\
++				_op"l %1; "				\
++				_POST_EFLAGS("0", "3", "2")		\
++				: "=m" (_eflags), "=m" ((_dst).val),	\
++				  "=&r" (_tmp)				\
++				: "i" (EFLAGS_MASK));			\
++			break;						\
++		case 8:							\
++			__emulate_1op_8byte(_op, _dst, _eflags);	\
++			break;						\
++		}							\
++	} while (0)
 +
-+	if (cfg.flags & PTRACE_BTS_O_TRACE)
-+		child->thread.debugctlmsr |= ds_debugctl_mask();
-+	else
-+		child->thread.debugctlmsr &= ~ds_debugctl_mask();
++/* Emulate an instruction with quadword operands (x86/64 only). */
++#if defined(CONFIG_X86_64)
++#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy)           \
++	do {								  \
++		__asm__ __volatile__ (					  \
++			_PRE_EFLAGS("0", "4", "2")			  \
++			_op"q %"_qx"3,%1; "				  \
++			_POST_EFLAGS("0", "4", "2")			  \
++			: "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \
++			: _qy ((_src).val), "i" (EFLAGS_MASK));		\
++	} while (0)
 +
-+	if (cfg.flags & PTRACE_BTS_O_SCHED)
-+		set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
-+	else
-+		clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
++#define __emulate_1op_8byte(_op, _dst, _eflags)                           \
++	do {								  \
++		__asm__ __volatile__ (					  \
++			_PRE_EFLAGS("0", "3", "2")			  \
++			_op"q %1; "					  \
++			_POST_EFLAGS("0", "3", "2")			  \
++			: "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \
++			: "i" (EFLAGS_MASK));				  \
++	} while (0)
 +
-+	ret = sizeof(cfg);
++#elif defined(__i386__)
++#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy)
++#define __emulate_1op_8byte(_op, _dst, _eflags)
++#endif				/* __i386__ */
++
++/* Fetch next part of the instruction being emulated. */
++#define insn_fetch(_type, _size, _eip)                                  \
++({	unsigned long _x;						\
++	rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size));		\
++	if (rc != 0)							\
++		goto done;						\
++	(_eip) += (_size);						\
++	(_type)_x;							\
++})
 +
-+out:
-+	if (child->thread.debugctlmsr)
-+		set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
-+	else
-+		clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
++/* Access/update address held in a register, based on addressing mode. */
++#define address_mask(reg)						\
++	((c->ad_bytes == sizeof(unsigned long)) ? 			\
++		(reg) :	((reg) & ((1UL << (c->ad_bytes << 3)) - 1)))
++#define register_address(base, reg)                                     \
++	((base) + address_mask(reg))
++#define register_address_increment(reg, inc)                            \
++	do {								\
++		/* signed type ensures sign extension to long */        \
++		int _inc = (inc);					\
++		if (c->ad_bytes == sizeof(unsigned long))		\
++			(reg) += _inc;					\
++		else							\
++			(reg) = ((reg) & 				\
++				 ~((1UL << (c->ad_bytes << 3)) - 1)) |	\
++				(((reg) + _inc) &			\
++				 ((1UL << (c->ad_bytes << 3)) - 1));	\
++	} while (0)
 +
-+	return ret;
++#define JMP_REL(rel) 							\
++	do {								\
++		register_address_increment(c->eip, rel);		\
++	} while (0)
 +
-+errout:
-+	child->thread.debugctlmsr &= ~ds_debugctl_mask();
-+	clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
-+	goto out;
++static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
++			      struct x86_emulate_ops *ops,
++			      unsigned long linear, u8 *dest)
++{
++	struct fetch_cache *fc = &ctxt->decode.fetch;
++	int rc;
++	int size;
++
++	if (linear < fc->start || linear >= fc->end) {
++		size = min(15UL, PAGE_SIZE - offset_in_page(linear));
++		rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
++		if (rc)
++			return rc;
++		fc->start = linear;
++		fc->end = linear + size;
++	}
++	*dest = fc->data[linear - fc->start];
++	return 0;
 +}
 +
-+static int ptrace_bts_status(struct task_struct *child,
-+			     long cfg_size,
-+			     struct ptrace_bts_config __user *ucfg)
++static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
++			 struct x86_emulate_ops *ops,
++			 unsigned long eip, void *dest, unsigned size)
 +{
-+	void *ds = (void *)child->thread.ds_area_msr;
-+	struct ptrace_bts_config cfg;
++	int rc = 0;
 +
-+	if (cfg_size < sizeof(cfg))
-+		return -EIO;
++	eip += ctxt->cs_base;
++	while (size--) {
++		rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
++		if (rc)
++			return rc;
++	}
++	return 0;
++}
 +
-+	memset(&cfg, 0, sizeof(cfg));
++/*
++ * Given the 'reg' portion of a ModRM byte, and a register block, return a
++ * pointer into the block that addresses the relevant register.
++ * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
++ */
++static void *decode_register(u8 modrm_reg, unsigned long *regs,
++			     int highbyte_regs)
++{
++	void *p;
 +
-+	if (ds) {
-+		cfg.size = ds_get_bts_size(ds);
++	p = &regs[modrm_reg];
++	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
++		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
++	return p;
++}
 +
-+		if (ds_get_overflow(ds) == DS_O_SIGNAL)
-+			cfg.flags |= PTRACE_BTS_O_SIGNAL;
++static int read_descriptor(struct x86_emulate_ctxt *ctxt,
++			   struct x86_emulate_ops *ops,
++			   void *ptr,
++			   u16 *size, unsigned long *address, int op_bytes)
++{
++	int rc;
 +
-+		if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
-+		    child->thread.debugctlmsr & ds_debugctl_mask())
-+			cfg.flags |= PTRACE_BTS_O_TRACE;
++	if (op_bytes == 2)
++		op_bytes = 3;
++	*address = 0;
++	rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
++			   ctxt->vcpu);
++	if (rc)
++		return rc;
++	rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
++			   ctxt->vcpu);
++	return rc;
++}
 +
-+		if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
-+			cfg.flags |= PTRACE_BTS_O_SCHED;
++static int test_cc(unsigned int condition, unsigned int flags)
++{
++	int rc = 0;
++
++	switch ((condition & 15) >> 1) {
++	case 0: /* o */
++		rc |= (flags & EFLG_OF);
++		break;
++	case 1: /* b/c/nae */
++		rc |= (flags & EFLG_CF);
++		break;
++	case 2: /* z/e */
++		rc |= (flags & EFLG_ZF);
++		break;
++	case 3: /* be/na */
++		rc |= (flags & (EFLG_CF|EFLG_ZF));
++		break;
++	case 4: /* s */
++		rc |= (flags & EFLG_SF);
++		break;
++	case 5: /* p/pe */
++		rc |= (flags & EFLG_PF);
++		break;
++	case 7: /* le/ng */
++		rc |= (flags & EFLG_ZF);
++		/* fall through */
++	case 6: /* l/nge */
++		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
++		break;
 +	}
 +
-+	cfg.bts_size = sizeof(struct bts_struct);
++	/* Odd condition identifiers (lsb == 1) have inverted sense. */
++	return (!!rc ^ (condition & 1));
++}
 +
-+	if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
-+		return -EFAULT;
++static void decode_register_operand(struct operand *op,
++				    struct decode_cache *c,
++				    int inhibit_bytereg)
++{
++	unsigned reg = c->modrm_reg;
++	int highbyte_regs = c->rex_prefix == 0;
 +
-+	return sizeof(cfg);
++	if (!(c->d & ModRM))
++		reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
++	op->type = OP_REG;
++	if ((c->d & ByteOp) && !inhibit_bytereg) {
++		op->ptr = decode_register(reg, c->regs, highbyte_regs);
++		op->val = *(u8 *)op->ptr;
++		op->bytes = 1;
++	} else {
++		op->ptr = decode_register(reg, c->regs, 0);
++		op->bytes = c->op_bytes;
++		switch (op->bytes) {
++		case 2:
++			op->val = *(u16 *)op->ptr;
++			break;
++		case 4:
++			op->val = *(u32 *)op->ptr;
++			break;
++		case 8:
++			op->val = *(u64 *) op->ptr;
++			break;
++		}
++	}
++	op->orig_val = op->val;
 +}
 +
-+void ptrace_bts_take_timestamp(struct task_struct *tsk,
-+			       enum bts_qualifier qualifier)
++static int decode_modrm(struct x86_emulate_ctxt *ctxt,
++			struct x86_emulate_ops *ops)
 +{
-+	struct bts_struct rec = {
-+		.qualifier = qualifier,
-+		.variant.jiffies = jiffies_64
-+	};
++	struct decode_cache *c = &ctxt->decode;
++	u8 sib;
++	int index_reg = 0, base_reg = 0, scale, rip_relative = 0;
++	int rc = 0;
 +
-+	ptrace_bts_write_record(tsk, &rec);
++	if (c->rex_prefix) {
++		c->modrm_reg = (c->rex_prefix & 4) << 1;	/* REX.R */
++		index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
++		c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
++	}
++
++	c->modrm = insn_fetch(u8, 1, c->eip);
++	c->modrm_mod |= (c->modrm & 0xc0) >> 6;
++	c->modrm_reg |= (c->modrm & 0x38) >> 3;
++	c->modrm_rm |= (c->modrm & 0x07);
++	c->modrm_ea = 0;
++	c->use_modrm_ea = 1;
++
++	if (c->modrm_mod == 3) {
++		c->modrm_val = *(unsigned long *)
++			decode_register(c->modrm_rm, c->regs, c->d & ByteOp);
++		return rc;
++	}
++
++	if (c->ad_bytes == 2) {
++		unsigned bx = c->regs[VCPU_REGS_RBX];
++		unsigned bp = c->regs[VCPU_REGS_RBP];
++		unsigned si = c->regs[VCPU_REGS_RSI];
++		unsigned di = c->regs[VCPU_REGS_RDI];
++
++		/* 16-bit ModR/M decode. */
++		switch (c->modrm_mod) {
++		case 0:
++			if (c->modrm_rm == 6)
++				c->modrm_ea += insn_fetch(u16, 2, c->eip);
++			break;
++		case 1:
++			c->modrm_ea += insn_fetch(s8, 1, c->eip);
++			break;
++		case 2:
++			c->modrm_ea += insn_fetch(u16, 2, c->eip);
++			break;
++		}
++		switch (c->modrm_rm) {
++		case 0:
++			c->modrm_ea += bx + si;
++			break;
++		case 1:
++			c->modrm_ea += bx + di;
++			break;
++		case 2:
++			c->modrm_ea += bp + si;
++			break;
++		case 3:
++			c->modrm_ea += bp + di;
++			break;
++		case 4:
++			c->modrm_ea += si;
++			break;
++		case 5:
++			c->modrm_ea += di;
++			break;
++		case 6:
++			if (c->modrm_mod != 0)
++				c->modrm_ea += bp;
++			break;
++		case 7:
++			c->modrm_ea += bx;
++			break;
++		}
++		if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
++		    (c->modrm_rm == 6 && c->modrm_mod != 0))
++			if (!c->override_base)
++				c->override_base = &ctxt->ss_base;
++		c->modrm_ea = (u16)c->modrm_ea;
++	} else {
++		/* 32/64-bit ModR/M decode. */
++		switch (c->modrm_rm) {
++		case 4:
++		case 12:
++			sib = insn_fetch(u8, 1, c->eip);
++			index_reg |= (sib >> 3) & 7;
++			base_reg |= sib & 7;
++			scale = sib >> 6;
++
++			switch (base_reg) {
++			case 5:
++				if (c->modrm_mod != 0)
++					c->modrm_ea += c->regs[base_reg];
++				else
++					c->modrm_ea +=
++						insn_fetch(s32, 4, c->eip);
++				break;
++			default:
++				c->modrm_ea += c->regs[base_reg];
++			}
++			switch (index_reg) {
++			case 4:
++				break;
++			default:
++				c->modrm_ea += c->regs[index_reg] << scale;
++			}
++			break;
++		case 5:
++			if (c->modrm_mod != 0)
++				c->modrm_ea += c->regs[c->modrm_rm];
++			else if (ctxt->mode == X86EMUL_MODE_PROT64)
++				rip_relative = 1;
++			break;
++		default:
++			c->modrm_ea += c->regs[c->modrm_rm];
++			break;
++		}
++		switch (c->modrm_mod) {
++		case 0:
++			if (c->modrm_rm == 5)
++				c->modrm_ea += insn_fetch(s32, 4, c->eip);
++			break;
++		case 1:
++			c->modrm_ea += insn_fetch(s8, 1, c->eip);
++			break;
++		case 2:
++			c->modrm_ea += insn_fetch(s32, 4, c->eip);
++			break;
++		}
++	}
++	if (rip_relative) {
++		c->modrm_ea += c->eip;
++		switch (c->d & SrcMask) {
++		case SrcImmByte:
++			c->modrm_ea += 1;
++			break;
++		case SrcImm:
++			if (c->d & ByteOp)
++				c->modrm_ea += 1;
++			else
++				if (c->op_bytes == 8)
++					c->modrm_ea += 4;
++				else
++					c->modrm_ea += c->op_bytes;
++		}
++	}
++done:
++	return rc;
 +}
 +
-+/*
-+ * Called by kernel/ptrace.c when detaching..
-+ *
-+ * Make sure the single step bit is not set.
-+ */
-+void ptrace_disable(struct task_struct *child)
++static int decode_abs(struct x86_emulate_ctxt *ctxt,
++		      struct x86_emulate_ops *ops)
 +{
-+	user_disable_single_step(child);
-+#ifdef TIF_SYSCALL_EMU
-+	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
-+#endif
-+	if (child->thread.ds_area_msr) {
-+		ptrace_bts_realloc(child, 0, 0);
-+		child->thread.debugctlmsr &= ~ds_debugctl_mask();
-+		if (!child->thread.debugctlmsr)
-+			clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
-+		clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
++	struct decode_cache *c = &ctxt->decode;
++	int rc = 0;
++
++	switch (c->ad_bytes) {
++	case 2:
++		c->modrm_ea = insn_fetch(u16, 2, c->eip);
++		break;
++	case 4:
++		c->modrm_ea = insn_fetch(u32, 4, c->eip);
++		break;
++	case 8:
++		c->modrm_ea = insn_fetch(u64, 8, c->eip);
++		break;
 +	}
++done:
++	return rc;
 +}
 +
-+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-+static const struct user_regset_view user_x86_32_view; /* Initialized below. */
-+#endif
-+
-+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
++int
++x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
 +{
-+	int ret;
-+	unsigned long __user *datap = (unsigned long __user *)data;
++	struct decode_cache *c = &ctxt->decode;
++	int rc = 0;
++	int mode = ctxt->mode;
++	int def_op_bytes, def_ad_bytes;
 +
-+	switch (request) {
-+	/* read the word at location addr in the USER area. */
-+	case PTRACE_PEEKUSR: {
-+		unsigned long tmp;
++	/* Shadow copy of register state. Committed on successful emulation. */
 +
-+		ret = -EIO;
-+		if ((addr & (sizeof(data) - 1)) || addr < 0 ||
-+		    addr >= sizeof(struct user))
++	memset(c, 0, sizeof(struct decode_cache));
++	c->eip = ctxt->vcpu->arch.rip;
++	memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
++
++	switch (mode) {
++	case X86EMUL_MODE_REAL:
++	case X86EMUL_MODE_PROT16:
++		def_op_bytes = def_ad_bytes = 2;
++		break;
++	case X86EMUL_MODE_PROT32:
++		def_op_bytes = def_ad_bytes = 4;
++		break;
++#ifdef CONFIG_X86_64
++	case X86EMUL_MODE_PROT64:
++		def_op_bytes = 4;
++		def_ad_bytes = 8;
++		break;
++#endif
++	default:
++		return -1;
++	}
++
++	c->op_bytes = def_op_bytes;
++	c->ad_bytes = def_ad_bytes;
++
++	/* Legacy prefixes. */
++	for (;;) {
++		switch (c->b = insn_fetch(u8, 1, c->eip)) {
++		case 0x66:	/* operand-size override */
++			/* switch between 2/4 bytes */
++			c->op_bytes = def_op_bytes ^ 6;
++			break;
++		case 0x67:	/* address-size override */
++			if (mode == X86EMUL_MODE_PROT64)
++				/* switch between 4/8 bytes */
++				c->ad_bytes = def_ad_bytes ^ 12;
++			else
++				/* switch between 2/4 bytes */
++				c->ad_bytes = def_ad_bytes ^ 6;
++			break;
++		case 0x2e:	/* CS override */
++			c->override_base = &ctxt->cs_base;
++			break;
++		case 0x3e:	/* DS override */
++			c->override_base = &ctxt->ds_base;
++			break;
++		case 0x26:	/* ES override */
++			c->override_base = &ctxt->es_base;
++			break;
++		case 0x64:	/* FS override */
++			c->override_base = &ctxt->fs_base;
++			break;
++		case 0x65:	/* GS override */
++			c->override_base = &ctxt->gs_base;
 +			break;
++		case 0x36:	/* SS override */
++			c->override_base = &ctxt->ss_base;
++			break;
++		case 0x40 ... 0x4f: /* REX */
++			if (mode != X86EMUL_MODE_PROT64)
++				goto done_prefixes;
++			c->rex_prefix = c->b;
++			continue;
++		case 0xf0:	/* LOCK */
++			c->lock_prefix = 1;
++			break;
++		case 0xf2:	/* REPNE/REPNZ */
++			c->rep_prefix = REPNE_PREFIX;
++			break;
++		case 0xf3:	/* REP/REPE/REPZ */
++			c->rep_prefix = REPE_PREFIX;
++			break;
++		default:
++			goto done_prefixes;
++		}
 +
-+		tmp = 0;  /* Default return condition */
-+		if (addr < sizeof(struct user_regs_struct))
-+			tmp = getreg(child, addr);
-+		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
-+			 addr <= offsetof(struct user, u_debugreg[7])) {
-+			addr -= offsetof(struct user, u_debugreg[0]);
-+			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
++		/* Any legacy prefix after a REX prefix nullifies its effect. */
++
++		c->rex_prefix = 0;
++	}
++
++done_prefixes:
++
++	/* REX prefix. */
++	if (c->rex_prefix)
++		if (c->rex_prefix & 8)
++			c->op_bytes = 8;	/* REX.W */
++
++	/* Opcode byte(s). */
++	c->d = opcode_table[c->b];
++	if (c->d == 0) {
++		/* Two-byte opcode? */
++		if (c->b == 0x0f) {
++			c->twobyte = 1;
++			c->b = insn_fetch(u8, 1, c->eip);
++			c->d = twobyte_table[c->b];
++		}
++
++		/* Unrecognised? */
++		if (c->d == 0) {
++			DPRINTF("Cannot emulate %02x\n", c->b);
++			return -1;
 +		}
-+		ret = put_user(tmp, datap);
-+		break;
 +	}
 +
-+	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
-+		ret = -EIO;
-+		if ((addr & (sizeof(data) - 1)) || addr < 0 ||
-+		    addr >= sizeof(struct user))
++	if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
++		c->op_bytes = 8;
++
++	/* ModRM and SIB bytes. */
++	if (c->d & ModRM)
++		rc = decode_modrm(ctxt, ops);
++	else if (c->d & MemAbs)
++		rc = decode_abs(ctxt, ops);
++	if (rc)
++		goto done;
++
++	if (!c->override_base)
++		c->override_base = &ctxt->ds_base;
++	if (mode == X86EMUL_MODE_PROT64 &&
++	    c->override_base != &ctxt->fs_base &&
++	    c->override_base != &ctxt->gs_base)
++		c->override_base = NULL;
++
++	if (c->override_base)
++		c->modrm_ea += *c->override_base;
++
++	if (c->ad_bytes != 8)
++		c->modrm_ea = (u32)c->modrm_ea;
++	/*
++	 * Decode and fetch the source operand: register, memory
++	 * or immediate.
++	 */
++	switch (c->d & SrcMask) {
++	case SrcNone:
++		break;
++	case SrcReg:
++		decode_register_operand(&c->src, c, 0);
++		break;
++	case SrcMem16:
++		c->src.bytes = 2;
++		goto srcmem_common;
++	case SrcMem32:
++		c->src.bytes = 4;
++		goto srcmem_common;
++	case SrcMem:
++		c->src.bytes = (c->d & ByteOp) ? 1 :
++							   c->op_bytes;
++		/* Don't fetch the address for invlpg: it could be unmapped. */
++		if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
++			break;
++	srcmem_common:
++		/*
++		 * For instructions with a ModR/M byte, switch to register
++		 * access if Mod = 3.
++		 */
++		if ((c->d & ModRM) && c->modrm_mod == 3) {
++			c->src.type = OP_REG;
++			break;
++		}
++		c->src.type = OP_MEM;
++		break;
++	case SrcImm:
++		c->src.type = OP_IMM;
++		c->src.ptr = (unsigned long *)c->eip;
++		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
++		if (c->src.bytes == 8)
++			c->src.bytes = 4;
++		/* NB. Immediates are sign-extended as necessary. */
++		switch (c->src.bytes) {
++		case 1:
++			c->src.val = insn_fetch(s8, 1, c->eip);
++			break;
++		case 2:
++			c->src.val = insn_fetch(s16, 2, c->eip);
++			break;
++		case 4:
++			c->src.val = insn_fetch(s32, 4, c->eip);
 +			break;
++		}
++		break;
++	case SrcImmByte:
++		c->src.type = OP_IMM;
++		c->src.ptr = (unsigned long *)c->eip;
++		c->src.bytes = 1;
++		c->src.val = insn_fetch(s8, 1, c->eip);
++		break;
++	}
 +
-+		if (addr < sizeof(struct user_regs_struct))
-+			ret = putreg(child, addr, data);
-+		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
-+			 addr <= offsetof(struct user, u_debugreg[7])) {
-+			addr -= offsetof(struct user, u_debugreg[0]);
-+			ret = ptrace_set_debugreg(child,
-+						  addr / sizeof(data), data);
++	/* Decode and fetch the destination operand: register or memory. */
++	switch (c->d & DstMask) {
++	case ImplicitOps:
++		/* Special instructions do their own operand decoding. */
++		return 0;
++	case DstReg:
++		decode_register_operand(&c->dst, c,
++			 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
++		break;
++	case DstMem:
++		if ((c->d & ModRM) && c->modrm_mod == 3) {
++			c->dst.type = OP_REG;
++			break;
 +		}
++		c->dst.type = OP_MEM;
 +		break;
++	}
 +
-+	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
-+		return copy_regset_to_user(child,
-+					   task_user_regset_view(current),
-+					   REGSET_GENERAL,
-+					   0, sizeof(struct user_regs_struct),
-+					   datap);
++done:
++	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
++}
 +
-+	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
-+		return copy_regset_from_user(child,
-+					     task_user_regset_view(current),
-+					     REGSET_GENERAL,
-+					     0, sizeof(struct user_regs_struct),
-+					     datap);
++static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
++{
++	struct decode_cache *c = &ctxt->decode;
 +
-+	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
-+		return copy_regset_to_user(child,
-+					   task_user_regset_view(current),
-+					   REGSET_FP,
-+					   0, sizeof(struct user_i387_struct),
-+					   datap);
++	c->dst.type  = OP_MEM;
++	c->dst.bytes = c->op_bytes;
++	c->dst.val = c->src.val;
++	register_address_increment(c->regs[VCPU_REGS_RSP], -c->op_bytes);
++	c->dst.ptr = (void *) register_address(ctxt->ss_base,
++					       c->regs[VCPU_REGS_RSP]);
++}
 +
-+	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
-+		return copy_regset_from_user(child,
-+					     task_user_regset_view(current),
-+					     REGSET_FP,
-+					     0, sizeof(struct user_i387_struct),
-+					     datap);
++static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
++				struct x86_emulate_ops *ops)
++{
++	struct decode_cache *c = &ctxt->decode;
++	int rc;
 +
-+#ifdef CONFIG_X86_32
-+	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
-+		return copy_regset_to_user(child, &user_x86_32_view,
-+					   REGSET_XFP,
-+					   0, sizeof(struct user_fxsr_struct),
-+					   datap);
++	rc = ops->read_std(register_address(ctxt->ss_base,
++					    c->regs[VCPU_REGS_RSP]),
++			   &c->dst.val, c->dst.bytes, ctxt->vcpu);
++	if (rc != 0)
++		return rc;
 +
-+	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
-+		return copy_regset_from_user(child, &user_x86_32_view,
-+					     REGSET_XFP,
-+					     0, sizeof(struct user_fxsr_struct),
-+					     datap);
-+#endif
++	register_address_increment(c->regs[VCPU_REGS_RSP], c->dst.bytes);
 +
-+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-+	case PTRACE_GET_THREAD_AREA:
-+		if (addr < 0)
-+			return -EIO;
-+		ret = do_get_thread_area(child, addr,
-+					 (struct user_desc __user *) data);
-+		break;
++	return 0;
++}
 +
-+	case PTRACE_SET_THREAD_AREA:
-+		if (addr < 0)
-+			return -EIO;
-+		ret = do_set_thread_area(child, addr,
-+					 (struct user_desc __user *) data, 0);
++static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
++{
++	struct decode_cache *c = &ctxt->decode;
++	switch (c->modrm_reg) {
++	case 0:	/* rol */
++		emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
 +		break;
-+#endif
-+
-+#ifdef CONFIG_X86_64
-+		/* normal 64bit interface to access TLS data.
-+		   Works just like arch_prctl, except that the arguments
-+		   are reversed. */
-+	case PTRACE_ARCH_PRCTL:
-+		ret = do_arch_prctl(child, data, addr);
++	case 1:	/* ror */
++		emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
 +		break;
-+#endif
-+
-+	case PTRACE_BTS_CONFIG:
-+		ret = ptrace_bts_config
-+			(child, data, (struct ptrace_bts_config __user *)addr);
++	case 2:	/* rcl */
++		emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
 +		break;
-+
-+	case PTRACE_BTS_STATUS:
-+		ret = ptrace_bts_status
-+			(child, data, (struct ptrace_bts_config __user *)addr);
++	case 3:	/* rcr */
++		emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
 +		break;
-+
-+	case PTRACE_BTS_SIZE:
-+		ret = ptrace_bts_get_size(child);
++	case 4:	/* sal/shl */
++	case 6:	/* sal/shl */
++		emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
 +		break;
-+
-+	case PTRACE_BTS_GET:
-+		ret = ptrace_bts_read_record
-+			(child, data, (struct bts_struct __user *) addr);
++	case 5:	/* shr */
++		emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
 +		break;
-+
-+	case PTRACE_BTS_CLEAR:
-+		ret = ptrace_bts_clear(child);
++	case 7:	/* sar */
++		emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
 +		break;
++	}
++}
 +
-+	case PTRACE_BTS_DRAIN:
-+		ret = ptrace_bts_drain
-+			(child, data, (struct bts_struct __user *) addr);
-+		break;
++static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
++			       struct x86_emulate_ops *ops)
++{
++	struct decode_cache *c = &ctxt->decode;
++	int rc = 0;
 +
++	switch (c->modrm_reg) {
++	case 0 ... 1:	/* test */
++		/*
++		 * Special case in Grp3: test has an immediate
++		 * source operand.
++		 */
++		c->src.type = OP_IMM;
++		c->src.ptr = (unsigned long *)c->eip;
++		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
++		if (c->src.bytes == 8)
++			c->src.bytes = 4;
++		switch (c->src.bytes) {
++		case 1:
++			c->src.val = insn_fetch(s8, 1, c->eip);
++			break;
++		case 2:
++			c->src.val = insn_fetch(s16, 2, c->eip);
++			break;
++		case 4:
++			c->src.val = insn_fetch(s32, 4, c->eip);
++			break;
++		}
++		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
++		break;
++	case 2:	/* not */
++		c->dst.val = ~c->dst.val;
++		break;
++	case 3:	/* neg */
++		emulate_1op("neg", c->dst, ctxt->eflags);
++		break;
 +	default:
-+		ret = ptrace_request(child, request, addr, data);
++		DPRINTF("Cannot emulate %02x\n", c->b);
++		rc = X86EMUL_UNHANDLEABLE;
 +		break;
 +	}
-+
-+	return ret;
++done:
++	return rc;
 +}
 +
-+#ifdef CONFIG_IA32_EMULATION
++static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
++			       struct x86_emulate_ops *ops)
++{
++	struct decode_cache *c = &ctxt->decode;
++	int rc;
 +
-+#include <linux/compat.h>
-+#include <linux/syscalls.h>
-+#include <asm/ia32.h>
-+#include <asm/user32.h>
++	switch (c->modrm_reg) {
++	case 0:	/* inc */
++		emulate_1op("inc", c->dst, ctxt->eflags);
++		break;
++	case 1:	/* dec */
++		emulate_1op("dec", c->dst, ctxt->eflags);
++		break;
++	case 4: /* jmp abs */
++		if (c->b == 0xff)
++			c->eip = c->dst.val;
++		else {
++			DPRINTF("Cannot emulate %02x\n", c->b);
++			return X86EMUL_UNHANDLEABLE;
++		}
++		break;
++	case 6:	/* push */
 +
-+#define R32(l,q)							\
-+	case offsetof(struct user32, regs.l):				\
-+		regs->q = value; break
++		/* 64-bit mode: PUSH always pushes a 64-bit operand. */
 +
-+#define SEG32(rs)							\
-+	case offsetof(struct user32, regs.rs):				\
-+		return set_segment_reg(child,				\
-+				       offsetof(struct user_regs_struct, rs), \
-+				       value);				\
-+		break
++		if (ctxt->mode == X86EMUL_MODE_PROT64) {
++			c->dst.bytes = 8;
++			rc = ops->read_std((unsigned long)c->dst.ptr,
++					   &c->dst.val, 8, ctxt->vcpu);
++			if (rc != 0)
++				return rc;
++		}
++		register_address_increment(c->regs[VCPU_REGS_RSP],
++					   -c->dst.bytes);
++		rc = ops->write_emulated(register_address(ctxt->ss_base,
++				    c->regs[VCPU_REGS_RSP]), &c->dst.val,
++				    c->dst.bytes, ctxt->vcpu);
++		if (rc != 0)
++			return rc;
++		c->dst.type = OP_NONE;
++		break;
++	default:
++		DPRINTF("Cannot emulate %02x\n", c->b);
++		return X86EMUL_UNHANDLEABLE;
++	}
++	return 0;
++}
 +
-+static int putreg32(struct task_struct *child, unsigned regno, u32 value)
++static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
++			       struct x86_emulate_ops *ops,
++			       unsigned long memop)
 +{
-+	struct pt_regs *regs = task_pt_regs(child);
++	struct decode_cache *c = &ctxt->decode;
++	u64 old, new;
++	int rc;
 +
-+	switch (regno) {
++	rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
++	if (rc != 0)
++		return rc;
 +
-+	SEG32(cs);
-+	SEG32(ds);
-+	SEG32(es);
-+	SEG32(fs);
-+	SEG32(gs);
-+	SEG32(ss);
++	if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
++	    ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
 +
-+	R32(ebx, bx);
-+	R32(ecx, cx);
-+	R32(edx, dx);
-+	R32(edi, di);
-+	R32(esi, si);
-+	R32(ebp, bp);
-+	R32(eax, ax);
-+	R32(orig_eax, orig_ax);
-+	R32(eip, ip);
-+	R32(esp, sp);
++		c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
++		c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
++		ctxt->eflags &= ~EFLG_ZF;
 +
-+	case offsetof(struct user32, regs.eflags):
-+		return set_flags(child, value);
++	} else {
++		new = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
++		       (u32) c->regs[VCPU_REGS_RBX];
 +
-+	case offsetof(struct user32, u_debugreg[0]) ...
-+		offsetof(struct user32, u_debugreg[7]):
-+		regno -= offsetof(struct user32, u_debugreg[0]);
-+		return ptrace_set_debugreg(child, regno / 4, value);
++		rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
++		if (rc != 0)
++			return rc;
++		ctxt->eflags |= EFLG_ZF;
++	}
++	return 0;
++}
 +
-+	default:
-+		if (regno > sizeof(struct user32) || (regno & 3))
-+			return -EIO;
++static inline int writeback(struct x86_emulate_ctxt *ctxt,
++			    struct x86_emulate_ops *ops)
++{
++	int rc;
++	struct decode_cache *c = &ctxt->decode;
 +
-+		/*
-+		 * Other dummy fields in the virtual user structure
-+		 * are ignored
++	switch (c->dst.type) {
++	case OP_REG:
++		/* The 4-byte case *is* correct:
++		 * in 64-bit mode we zero-extend.
 +		 */
++		switch (c->dst.bytes) {
++		case 1:
++			*(u8 *)c->dst.ptr = (u8)c->dst.val;
++			break;
++		case 2:
++			*(u16 *)c->dst.ptr = (u16)c->dst.val;
++			break;
++		case 4:
++			*c->dst.ptr = (u32)c->dst.val;
++			break;	/* 64b: zero-ext */
++		case 8:
++			*c->dst.ptr = c->dst.val;
++			break;
++		}
++		break;
++	case OP_MEM:
++		if (c->lock_prefix)
++			rc = ops->cmpxchg_emulated(
++					(unsigned long)c->dst.ptr,
++					&c->dst.orig_val,
++					&c->dst.val,
++					c->dst.bytes,
++					ctxt->vcpu);
++		else
++			rc = ops->write_emulated(
++					(unsigned long)c->dst.ptr,
++					&c->dst.val,
++					c->dst.bytes,
++					ctxt->vcpu);
++		if (rc != 0)
++			return rc;
++		break;
++	case OP_NONE:
++		/* no writeback */
++		break;
++	default:
 +		break;
 +	}
 +	return 0;
 +}
 +
-+#undef R32
-+#undef SEG32
++int
++x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
++{
++	unsigned long memop = 0;
++	u64 msr_data;
++	unsigned long saved_eip = 0;
++	struct decode_cache *c = &ctxt->decode;
++	int rc = 0;
 +
-+#define R32(l,q)							\
-+	case offsetof(struct user32, regs.l):				\
-+		*val = regs->q; break
++	/* Shadow copy of register state. Committed on successful emulation.
++	 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
++	 * modify them.
++	 */
 +
-+#define SEG32(rs)							\
-+	case offsetof(struct user32, regs.rs):				\
-+		*val = get_segment_reg(child,				\
-+				       offsetof(struct user_regs_struct, rs)); \
-+		break
++	memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
++	saved_eip = c->eip;
 +
-+static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
-+{
-+	struct pt_regs *regs = task_pt_regs(child);
++	if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
++		memop = c->modrm_ea;
 +
-+	switch (regno) {
++	if (c->rep_prefix && (c->d & String)) {
++		/* All REP prefixes have the same first termination condition */
++		if (c->regs[VCPU_REGS_RCX] == 0) {
++			ctxt->vcpu->arch.rip = c->eip;
++			goto done;
++		}
++		/* The second termination condition only applies for REPE
++		 * and REPNE. Test if the repeat string operation prefix is
++		 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
++		 * corresponding termination condition according to:
++		 * 	- if REPE/REPZ and ZF = 0 then done
++		 * 	- if REPNE/REPNZ and ZF = 1 then done
++		 */
++		if ((c->b == 0xa6) || (c->b == 0xa7) ||
++				(c->b == 0xae) || (c->b == 0xaf)) {
++			if ((c->rep_prefix == REPE_PREFIX) &&
++				((ctxt->eflags & EFLG_ZF) == 0)) {
++					ctxt->vcpu->arch.rip = c->eip;
++					goto done;
++			}
++			if ((c->rep_prefix == REPNE_PREFIX) &&
++				((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
++				ctxt->vcpu->arch.rip = c->eip;
++				goto done;
++			}
++		}
++		c->regs[VCPU_REGS_RCX]--;
++		c->eip = ctxt->vcpu->arch.rip;
++	}
 +
-+	SEG32(ds);
-+	SEG32(es);
-+	SEG32(fs);
-+	SEG32(gs);
++	if (c->src.type == OP_MEM) {
++		c->src.ptr = (unsigned long *)memop;
++		c->src.val = 0;
++		rc = ops->read_emulated((unsigned long)c->src.ptr,
++					&c->src.val,
++					c->src.bytes,
++					ctxt->vcpu);
++		if (rc != 0)
++			goto done;
++		c->src.orig_val = c->src.val;
++	}
 +
-+	R32(cs, cs);
-+	R32(ss, ss);
-+	R32(ebx, bx);
-+	R32(ecx, cx);
-+	R32(edx, dx);
-+	R32(edi, di);
-+	R32(esi, si);
-+	R32(ebp, bp);
-+	R32(eax, ax);
-+	R32(orig_eax, orig_ax);
-+	R32(eip, ip);
-+	R32(esp, sp);
++	if ((c->d & DstMask) == ImplicitOps)
++		goto special_insn;
 +
-+	case offsetof(struct user32, regs.eflags):
-+		*val = get_flags(child);
-+		break;
 +
-+	case offsetof(struct user32, u_debugreg[0]) ...
-+		offsetof(struct user32, u_debugreg[7]):
-+		regno -= offsetof(struct user32, u_debugreg[0]);
-+		*val = ptrace_get_debugreg(child, regno / 4);
-+		break;
++	if (c->dst.type == OP_MEM) {
++		c->dst.ptr = (unsigned long *)memop;
++		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
++		c->dst.val = 0;
++		if (c->d & BitOp) {
++			unsigned long mask = ~(c->dst.bytes * 8 - 1);
 +
-+	default:
-+		if (regno > sizeof(struct user32) || (regno & 3))
-+			return -EIO;
++			c->dst.ptr = (void *)c->dst.ptr +
++						   (c->src.val & mask) / 8;
++		}
++		if (!(c->d & Mov) &&
++				   /* optimisation - avoid slow emulated read */
++		    ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
++					   &c->dst.val,
++					  c->dst.bytes, ctxt->vcpu)) != 0))
++			goto done;
++	}
++	c->dst.orig_val = c->dst.val;
++
++special_insn:
++
++	if (c->twobyte)
++		goto twobyte_insn;
++
++	switch (c->b) {
++	case 0x00 ... 0x05:
++	      add:		/* add */
++		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x08 ... 0x0d:
++	      or:		/* or */
++		emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x10 ... 0x15:
++	      adc:		/* adc */
++		emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x18 ... 0x1d:
++	      sbb:		/* sbb */
++		emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x20 ... 0x23:
++	      and:		/* and */
++		emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x24:              /* and al imm8 */
++		c->dst.type = OP_REG;
++		c->dst.ptr = &c->regs[VCPU_REGS_RAX];
++		c->dst.val = *(u8 *)c->dst.ptr;
++		c->dst.bytes = 1;
++		c->dst.orig_val = c->dst.val;
++		goto and;
++	case 0x25:              /* and ax imm16, or eax imm32 */
++		c->dst.type = OP_REG;
++		c->dst.bytes = c->op_bytes;
++		c->dst.ptr = &c->regs[VCPU_REGS_RAX];
++		if (c->op_bytes == 2)
++			c->dst.val = *(u16 *)c->dst.ptr;
++		else
++			c->dst.val = *(u32 *)c->dst.ptr;
++		c->dst.orig_val = c->dst.val;
++		goto and;
++	case 0x28 ... 0x2d:
++	      sub:		/* sub */
++		emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x30 ... 0x35:
++	      xor:		/* xor */
++		emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x38 ... 0x3d:
++	      cmp:		/* cmp */
++		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x40 ... 0x47: /* inc r16/r32 */
++		emulate_1op("inc", c->dst, ctxt->eflags);
++		break;
++	case 0x48 ... 0x4f: /* dec r16/r32 */
++		emulate_1op("dec", c->dst, ctxt->eflags);
++		break;
++	case 0x50 ... 0x57:  /* push reg */
++		c->dst.type  = OP_MEM;
++		c->dst.bytes = c->op_bytes;
++		c->dst.val = c->src.val;
++		register_address_increment(c->regs[VCPU_REGS_RSP],
++					   -c->op_bytes);
++		c->dst.ptr = (void *) register_address(
++			ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
++		break;
++	case 0x58 ... 0x5f: /* pop reg */
++	pop_instruction:
++		if ((rc = ops->read_std(register_address(ctxt->ss_base,
++			c->regs[VCPU_REGS_RSP]), c->dst.ptr,
++			c->op_bytes, ctxt->vcpu)) != 0)
++			goto done;
 +
++		register_address_increment(c->regs[VCPU_REGS_RSP],
++					   c->op_bytes);
++		c->dst.type = OP_NONE;	/* Disable writeback. */
++		break;
++	case 0x63:		/* movsxd */
++		if (ctxt->mode != X86EMUL_MODE_PROT64)
++			goto cannot_emulate;
++		c->dst.val = (s32) c->src.val;
++		break;
++	case 0x6a: /* push imm8 */
++		c->src.val = 0L;
++		c->src.val = insn_fetch(s8, 1, c->eip);
++		emulate_push(ctxt);
++		break;
++	case 0x6c:		/* insb */
++	case 0x6d:		/* insw/insd */
++		 if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
++				1,
++				(c->d & ByteOp) ? 1 : c->op_bytes,
++				c->rep_prefix ?
++				address_mask(c->regs[VCPU_REGS_RCX]) : 1,
++				(ctxt->eflags & EFLG_DF),
++				register_address(ctxt->es_base,
++						 c->regs[VCPU_REGS_RDI]),
++				c->rep_prefix,
++				c->regs[VCPU_REGS_RDX]) == 0) {
++			c->eip = saved_eip;
++			return -1;
++		}
++		return 0;
++	case 0x6e:		/* outsb */
++	case 0x6f:		/* outsw/outsd */
++		if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
++				0,
++				(c->d & ByteOp) ? 1 : c->op_bytes,
++				c->rep_prefix ?
++				address_mask(c->regs[VCPU_REGS_RCX]) : 1,
++				(ctxt->eflags & EFLG_DF),
++				register_address(c->override_base ?
++							*c->override_base :
++							ctxt->ds_base,
++						 c->regs[VCPU_REGS_RSI]),
++				c->rep_prefix,
++				c->regs[VCPU_REGS_RDX]) == 0) {
++			c->eip = saved_eip;
++			return -1;
++		}
++		return 0;
++	case 0x70 ... 0x7f: /* jcc (short) */ {
++		int rel = insn_fetch(s8, 1, c->eip);
++
++		if (test_cc(c->b, ctxt->eflags))
++			JMP_REL(rel);
++		break;
++	}
++	case 0x80 ... 0x83:	/* Grp1 */
++		switch (c->modrm_reg) {
++		case 0:
++			goto add;
++		case 1:
++			goto or;
++		case 2:
++			goto adc;
++		case 3:
++			goto sbb;
++		case 4:
++			goto and;
++		case 5:
++			goto sub;
++		case 6:
++			goto xor;
++		case 7:
++			goto cmp;
++		}
++		break;
++	case 0x84 ... 0x85:
++		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0x86 ... 0x87:	/* xchg */
++		/* Write back the register source. */
++		switch (c->dst.bytes) {
++		case 1:
++			*(u8 *) c->src.ptr = (u8) c->dst.val;
++			break;
++		case 2:
++			*(u16 *) c->src.ptr = (u16) c->dst.val;
++			break;
++		case 4:
++			*c->src.ptr = (u32) c->dst.val;
++			break;	/* 64b reg: zero-extend */
++		case 8:
++			*c->src.ptr = c->dst.val;
++			break;
++		}
 +		/*
-+		 * Other dummy fields in the virtual user structure
-+		 * are ignored
++		 * Write back the memory destination with implicit LOCK
++		 * prefix.
 +		 */
-+		*val = 0;
++		c->dst.val = c->src.val;
++		c->lock_prefix = 1;
++		break;
++	case 0x88 ... 0x8b:	/* mov */
++		goto mov;
++	case 0x8d: /* lea r16/r32, m */
++		c->dst.val = c->modrm_val;
++		break;
++	case 0x8f:		/* pop (sole member of Grp1a) */
++		rc = emulate_grp1a(ctxt, ops);
++		if (rc != 0)
++			goto done;
++		break;
++	case 0x9c: /* pushf */
++		c->src.val =  (unsigned long) ctxt->eflags;
++		emulate_push(ctxt);
++		break;
++	case 0x9d: /* popf */
++		c->dst.ptr = (unsigned long *) &ctxt->eflags;
++		goto pop_instruction;
++	case 0xa0 ... 0xa1:	/* mov */
++		c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
++		c->dst.val = c->src.val;
++		break;
++	case 0xa2 ... 0xa3:	/* mov */
++		c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX];
++		break;
++	case 0xa4 ... 0xa5:	/* movs */
++		c->dst.type = OP_MEM;
++		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
++		c->dst.ptr = (unsigned long *)register_address(
++						   ctxt->es_base,
++						   c->regs[VCPU_REGS_RDI]);
++		if ((rc = ops->read_emulated(register_address(
++		      c->override_base ? *c->override_base :
++					ctxt->ds_base,
++					c->regs[VCPU_REGS_RSI]),
++					&c->dst.val,
++					c->dst.bytes, ctxt->vcpu)) != 0)
++			goto done;
++		register_address_increment(c->regs[VCPU_REGS_RSI],
++				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
++							   : c->dst.bytes);
++		register_address_increment(c->regs[VCPU_REGS_RDI],
++				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
++							   : c->dst.bytes);
++		break;
++	case 0xa6 ... 0xa7:	/* cmps */
++		c->src.type = OP_NONE; /* Disable writeback. */
++		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
++		c->src.ptr = (unsigned long *)register_address(
++				c->override_base ? *c->override_base :
++						   ctxt->ds_base,
++						   c->regs[VCPU_REGS_RSI]);
++		if ((rc = ops->read_emulated((unsigned long)c->src.ptr,
++						&c->src.val,
++						c->src.bytes,
++						ctxt->vcpu)) != 0)
++			goto done;
++
++		c->dst.type = OP_NONE; /* Disable writeback. */
++		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
++		c->dst.ptr = (unsigned long *)register_address(
++						   ctxt->es_base,
++						   c->regs[VCPU_REGS_RDI]);
++		if ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
++						&c->dst.val,
++						c->dst.bytes,
++						ctxt->vcpu)) != 0)
++			goto done;
++
++		DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
++
++		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
++
++		register_address_increment(c->regs[VCPU_REGS_RSI],
++				       (ctxt->eflags & EFLG_DF) ? -c->src.bytes
++								  : c->src.bytes);
++		register_address_increment(c->regs[VCPU_REGS_RDI],
++				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
++								  : c->dst.bytes);
++
++		break;
++	case 0xaa ... 0xab:	/* stos */
++		c->dst.type = OP_MEM;
++		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
++		c->dst.ptr = (unsigned long *)register_address(
++						   ctxt->es_base,
++						   c->regs[VCPU_REGS_RDI]);
++		c->dst.val = c->regs[VCPU_REGS_RAX];
++		register_address_increment(c->regs[VCPU_REGS_RDI],
++				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
++							   : c->dst.bytes);
++		break;
++	case 0xac ... 0xad:	/* lods */
++		c->dst.type = OP_REG;
++		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
++		c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
++		if ((rc = ops->read_emulated(register_address(
++				c->override_base ? *c->override_base :
++						   ctxt->ds_base,
++						 c->regs[VCPU_REGS_RSI]),
++						 &c->dst.val,
++						 c->dst.bytes,
++						 ctxt->vcpu)) != 0)
++			goto done;
++		register_address_increment(c->regs[VCPU_REGS_RSI],
++				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
++							   : c->dst.bytes);
++		break;
++	case 0xae ... 0xaf:	/* scas */
++		DPRINTF("Urk! I don't handle SCAS.\n");
++		goto cannot_emulate;
++	case 0xc0 ... 0xc1:
++		emulate_grp2(ctxt);
++		break;
++	case 0xc3: /* ret */
++		c->dst.ptr = &c->eip;
++		goto pop_instruction;
++	case 0xc6 ... 0xc7:	/* mov (sole member of Grp11) */
++	mov:
++		c->dst.val = c->src.val;
++		break;
++	case 0xd0 ... 0xd1:	/* Grp2 */
++		c->src.val = 1;
++		emulate_grp2(ctxt);
++		break;
++	case 0xd2 ... 0xd3:	/* Grp2 */
++		c->src.val = c->regs[VCPU_REGS_RCX];
++		emulate_grp2(ctxt);
++		break;
++	case 0xe8: /* call (near) */ {
++		long int rel;
++		switch (c->op_bytes) {
++		case 2:
++			rel = insn_fetch(s16, 2, c->eip);
++			break;
++		case 4:
++			rel = insn_fetch(s32, 4, c->eip);
++			break;
++		default:
++			DPRINTF("Call: Invalid op_bytes\n");
++			goto cannot_emulate;
++		}
++		c->src.val = (unsigned long) c->eip;
++		JMP_REL(rel);
++		c->op_bytes = c->ad_bytes;
++		emulate_push(ctxt);
++		break;
++	}
++	case 0xe9: /* jmp rel */
++	case 0xeb: /* jmp rel short */
++		JMP_REL(c->src.val);
++		c->dst.type = OP_NONE; /* Disable writeback. */
++		break;
++	case 0xf4:              /* hlt */
++		ctxt->vcpu->arch.halt_request = 1;
++		goto done;
++	case 0xf5:	/* cmc */
++		/* complement carry flag from eflags reg */
++		ctxt->eflags ^= EFLG_CF;
++		c->dst.type = OP_NONE;	/* Disable writeback. */
++		break;
++	case 0xf6 ... 0xf7:	/* Grp3 */
++		rc = emulate_grp3(ctxt, ops);
++		if (rc != 0)
++			goto done;
 +		break;
++	case 0xf8: /* clc */
++		ctxt->eflags &= ~EFLG_CF;
++		c->dst.type = OP_NONE;	/* Disable writeback. */
++		break;
++	case 0xfa: /* cli */
++		ctxt->eflags &= ~X86_EFLAGS_IF;
++		c->dst.type = OP_NONE;	/* Disable writeback. */
++		break;
++	case 0xfb: /* sti */
++		ctxt->eflags |= X86_EFLAGS_IF;
++		c->dst.type = OP_NONE;	/* Disable writeback. */
++		break;
++	case 0xfe ... 0xff:	/* Grp4/Grp5 */
++		rc = emulate_grp45(ctxt, ops);
++		if (rc != 0)
++			goto done;
++		break;
++	}
++
++writeback:
++	rc = writeback(ctxt, ops);
++	if (rc != 0)
++		goto done;
++
++	/* Commit shadow register state. */
++	memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
++	ctxt->vcpu->arch.rip = c->eip;
++
++done:
++	if (rc == X86EMUL_UNHANDLEABLE) {
++		c->eip = saved_eip;
++		return -1;
 +	}
 +	return 0;
-+}
 +
-+#undef R32
-+#undef SEG32
++twobyte_insn:
++	switch (c->b) {
++	case 0x01: /* lgdt, lidt, lmsw */
++		switch (c->modrm_reg) {
++			u16 size;
++			unsigned long address;
++
++		case 0: /* vmcall */
++			if (c->modrm_mod != 3 || c->modrm_rm != 1)
++				goto cannot_emulate;
 +
-+static int genregs32_get(struct task_struct *target,
-+			 const struct user_regset *regset,
-+			 unsigned int pos, unsigned int count,
-+			 void *kbuf, void __user *ubuf)
-+{
-+	if (kbuf) {
-+		compat_ulong_t *k = kbuf;
-+		while (count > 0) {
-+			getreg32(target, pos, k++);
-+			count -= sizeof(*k);
-+			pos += sizeof(*k);
++			rc = kvm_fix_hypercall(ctxt->vcpu);
++			if (rc)
++				goto done;
++
++			kvm_emulate_hypercall(ctxt->vcpu);
++			break;
++		case 2: /* lgdt */
++			rc = read_descriptor(ctxt, ops, c->src.ptr,
++					     &size, &address, c->op_bytes);
++			if (rc)
++				goto done;
++			realmode_lgdt(ctxt->vcpu, size, address);
++			break;
++		case 3: /* lidt/vmmcall */
++			if (c->modrm_mod == 3 && c->modrm_rm == 1) {
++				rc = kvm_fix_hypercall(ctxt->vcpu);
++				if (rc)
++					goto done;
++				kvm_emulate_hypercall(ctxt->vcpu);
++			} else {
++				rc = read_descriptor(ctxt, ops, c->src.ptr,
++						     &size, &address,
++						     c->op_bytes);
++				if (rc)
++					goto done;
++				realmode_lidt(ctxt->vcpu, size, address);
++			}
++			break;
++		case 4: /* smsw */
++			if (c->modrm_mod != 3)
++				goto cannot_emulate;
++			*(u16 *)&c->regs[c->modrm_rm]
++				= realmode_get_cr(ctxt->vcpu, 0);
++			break;
++		case 6: /* lmsw */
++			if (c->modrm_mod != 3)
++				goto cannot_emulate;
++			realmode_lmsw(ctxt->vcpu, (u16)c->modrm_val,
++						  &ctxt->eflags);
++			break;
++		case 7: /* invlpg*/
++			emulate_invlpg(ctxt->vcpu, memop);
++			break;
++		default:
++			goto cannot_emulate;
 +		}
-+	} else {
-+		compat_ulong_t __user *u = ubuf;
-+		while (count > 0) {
-+			compat_ulong_t word;
-+			getreg32(target, pos, &word);
-+			if (__put_user(word, u++))
-+				return -EFAULT;
-+			count -= sizeof(*u);
-+			pos += sizeof(*u);
++		/* Disable writeback. */
++		c->dst.type = OP_NONE;
++		break;
++	case 0x06:
++		emulate_clts(ctxt->vcpu);
++		c->dst.type = OP_NONE;
++		break;
++	case 0x08:		/* invd */
++	case 0x09:		/* wbinvd */
++	case 0x0d:		/* GrpP (prefetch) */
++	case 0x18:		/* Grp16 (prefetch/nop) */
++		c->dst.type = OP_NONE;
++		break;
++	case 0x20: /* mov cr, reg */
++		if (c->modrm_mod != 3)
++			goto cannot_emulate;
++		c->regs[c->modrm_rm] =
++				realmode_get_cr(ctxt->vcpu, c->modrm_reg);
++		c->dst.type = OP_NONE;	/* no writeback */
++		break;
++	case 0x21: /* mov from dr to reg */
++		if (c->modrm_mod != 3)
++			goto cannot_emulate;
++		rc = emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]);
++		if (rc)
++			goto cannot_emulate;
++		c->dst.type = OP_NONE;	/* no writeback */
++		break;
++	case 0x22: /* mov reg, cr */
++		if (c->modrm_mod != 3)
++			goto cannot_emulate;
++		realmode_set_cr(ctxt->vcpu,
++				c->modrm_reg, c->modrm_val, &ctxt->eflags);
++		c->dst.type = OP_NONE;
++		break;
++	case 0x23: /* mov from reg to dr */
++		if (c->modrm_mod != 3)
++			goto cannot_emulate;
++		rc = emulator_set_dr(ctxt, c->modrm_reg,
++				     c->regs[c->modrm_rm]);
++		if (rc)
++			goto cannot_emulate;
++		c->dst.type = OP_NONE;	/* no writeback */
++		break;
++	case 0x30:
++		/* wrmsr */
++		msr_data = (u32)c->regs[VCPU_REGS_RAX]
++			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
++		rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
++		if (rc) {
++			kvm_inject_gp(ctxt->vcpu, 0);
++			c->eip = ctxt->vcpu->arch.rip;
++		}
++		rc = X86EMUL_CONTINUE;
++		c->dst.type = OP_NONE;
++		break;
++	case 0x32:
++		/* rdmsr */
++		rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
++		if (rc) {
++			kvm_inject_gp(ctxt->vcpu, 0);
++			c->eip = ctxt->vcpu->arch.rip;
++		} else {
++			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
++			c->regs[VCPU_REGS_RDX] = msr_data >> 32;
++		}
++		rc = X86EMUL_CONTINUE;
++		c->dst.type = OP_NONE;
++		break;
++	case 0x40 ... 0x4f:	/* cmov */
++		c->dst.val = c->dst.orig_val = c->src.val;
++		if (!test_cc(c->b, ctxt->eflags))
++			c->dst.type = OP_NONE; /* no writeback */
++		break;
++	case 0x80 ... 0x8f: /* jnz rel, etc*/ {
++		long int rel;
++
++		switch (c->op_bytes) {
++		case 2:
++			rel = insn_fetch(s16, 2, c->eip);
++			break;
++		case 4:
++			rel = insn_fetch(s32, 4, c->eip);
++			break;
++		case 8:
++			rel = insn_fetch(s64, 8, c->eip);
++			break;
++		default:
++			DPRINTF("jnz: Invalid op_bytes\n");
++			goto cannot_emulate;
++		}
++		if (test_cc(c->b, ctxt->eflags))
++			JMP_REL(rel);
++		c->dst.type = OP_NONE;
++		break;
++	}
++	case 0xa3:
++	      bt:		/* bt */
++		c->dst.type = OP_NONE;
++		/* only subword offset */
++		c->src.val &= (c->dst.bytes << 3) - 1;
++		emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0xab:
++	      bts:		/* bts */
++		/* only subword offset */
++		c->src.val &= (c->dst.bytes << 3) - 1;
++		emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0xb0 ... 0xb1:	/* cmpxchg */
++		/*
++		 * Save real source value, then compare EAX against
++		 * destination.
++		 */
++		c->src.orig_val = c->src.val;
++		c->src.val = c->regs[VCPU_REGS_RAX];
++		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
++		if (ctxt->eflags & EFLG_ZF) {
++			/* Success: write back to memory. */
++			c->dst.val = c->src.orig_val;
++		} else {
++			/* Failure: write the value we saw to EAX. */
++			c->dst.type = OP_REG;
++			c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
++		}
++		break;
++	case 0xb3:
++	      btr:		/* btr */
++		/* only subword offset */
++		c->src.val &= (c->dst.bytes << 3) - 1;
++		emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0xb6 ... 0xb7:	/* movzx */
++		c->dst.bytes = c->op_bytes;
++		c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
++						       : (u16) c->src.val;
++		break;
++	case 0xba:		/* Grp8 */
++		switch (c->modrm_reg & 3) {
++		case 0:
++			goto bt;
++		case 1:
++			goto bts;
++		case 2:
++			goto btr;
++		case 3:
++			goto btc;
 +		}
++		break;
++	case 0xbb:
++	      btc:		/* btc */
++		/* only subword offset */
++		c->src.val &= (c->dst.bytes << 3) - 1;
++		emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
++		break;
++	case 0xbe ... 0xbf:	/* movsx */
++		c->dst.bytes = c->op_bytes;
++		c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
++							(s16) c->src.val;
++		break;
++	case 0xc3:		/* movnti */
++		c->dst.bytes = c->op_bytes;
++		c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
++							(u64) c->src.val;
++		break;
++	case 0xc7:		/* Grp9 (cmpxchg8b) */
++		rc = emulate_grp9(ctxt, ops, memop);
++		if (rc != 0)
++			goto done;
++		c->dst.type = OP_NONE;
++		break;
 +	}
++	goto writeback;
 +
-+	return 0;
++cannot_emulate:
++	DPRINTF("Cannot emulate %02x\n", c->b);
++	c->eip = saved_eip;
++	return -1;
++}
+diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
+index 19626ac..964dfa3 100644
+--- a/arch/x86/lguest/Kconfig
++++ b/arch/x86/lguest/Kconfig
+@@ -1,6 +1,7 @@
+ config LGUEST_GUEST
+ 	bool "Lguest guest support"
+ 	select PARAVIRT
++	depends on X86_32
+ 	depends on !X86_PAE
+ 	depends on !(X86_VISWS || X86_VOYAGER)
+ 	select VIRTIO
+diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
+index 92c5611..5afdde4 100644
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -67,6 +67,7 @@
+ #include <asm/mce.h>
+ #include <asm/io.h>
+ #include <asm/i387.h>
++#include <asm/reboot.h>		/* for struct machine_ops */
+ 
+ /*G:010 Welcome to the Guest!
+  *
+@@ -175,8 +176,8 @@ static void lguest_leave_lazy_mode(void)
+  * check there when it wants to deliver an interrupt.
+  */
+ 
+-/* save_flags() is expected to return the processor state (ie. "eflags").  The
+- * eflags word contains all kind of stuff, but in practice Linux only cares
++/* save_flags() is expected to return the processor state (ie. "flags").  The
++ * flags word contains all kind of stuff, but in practice Linux only cares
+  * about the interrupt flag.  Our "save_flags()" just returns that. */
+ static unsigned long save_fl(void)
+ {
+@@ -217,19 +218,20 @@ static void irq_enable(void)
+  * address of the handler, and... well, who cares?  The Guest just asks the
+  * Host to make the change anyway, because the Host controls the real IDT.
+  */
+-static void lguest_write_idt_entry(struct desc_struct *dt,
+-				   int entrynum, u32 low, u32 high)
++static void lguest_write_idt_entry(gate_desc *dt,
++				   int entrynum, const gate_desc *g)
+ {
++	u32 *desc = (u32 *)g;
+ 	/* Keep the local copy up to date. */
+-	write_dt_entry(dt, entrynum, low, high);
++	native_write_idt_entry(dt, entrynum, g);
+ 	/* Tell Host about this new entry. */
+-	hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high);
++	hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
+ }
+ 
+ /* Changing to a different IDT is very rare: we keep the IDT up-to-date every
+  * time it is written, so we can simply loop through all entries and tell the
+  * Host about them. */
+-static void lguest_load_idt(const struct Xgt_desc_struct *desc)
++static void lguest_load_idt(const struct desc_ptr *desc)
+ {
+ 	unsigned int i;
+ 	struct desc_struct *idt = (void *)desc->address;
+@@ -252,7 +254,7 @@ static void lguest_load_idt(const struct Xgt_desc_struct *desc)
+  * hypercall and use that repeatedly to load a new IDT.  I don't think it
+  * really matters, but wouldn't it be nice if they were the same?
+  */
+-static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
++static void lguest_load_gdt(const struct desc_ptr *desc)
+ {
+ 	BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
+ 	hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
+@@ -261,10 +263,10 @@ static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
+ /* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
+  * then tell the Host to reload the entire thing.  This operation is so rare
+  * that this naive implementation is reasonable. */
+-static void lguest_write_gdt_entry(struct desc_struct *dt,
+-				   int entrynum, u32 low, u32 high)
++static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
++				   const void *desc, int type)
+ {
+-	write_dt_entry(dt, entrynum, low, high);
++	native_write_gdt_entry(dt, entrynum, desc, type);
+ 	hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
+ }
+ 
+@@ -323,30 +325,30 @@ static void lguest_load_tr_desc(void)
+  * anyone (including userspace) can just use the raw "cpuid" instruction and
+  * the Host won't even notice since it isn't privileged.  So we try not to get
+  * too worked up about it. */
+-static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
+-			 unsigned int *ecx, unsigned int *edx)
++static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
++			 unsigned int *cx, unsigned int *dx)
+ {
+-	int function = *eax;
++	int function = *ax;
+ 
+-	native_cpuid(eax, ebx, ecx, edx);
++	native_cpuid(ax, bx, cx, dx);
+ 	switch (function) {
+ 	case 1:	/* Basic feature request. */
+ 		/* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
+-		*ecx &= 0x00002201;
++		*cx &= 0x00002201;
+ 		/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
+-		*edx &= 0x07808101;
++		*dx &= 0x07808101;
+ 		/* The Host can do a nice optimization if it knows that the
+ 		 * kernel mappings (addresses above 0xC0000000 or whatever
+ 		 * PAGE_OFFSET is set to) haven't changed.  But Linux calls
+ 		 * flush_tlb_user() for both user and kernel mappings unless
+ 		 * the Page Global Enable (PGE) feature bit is set. */
+-		*edx |= 0x00002000;
++		*dx |= 0x00002000;
+ 		break;
+ 	case 0x80000000:
+ 		/* Futureproof this a little: if they ask how much extended
+ 		 * processor information there is, limit it to known fields. */
+-		if (*eax > 0x80000008)
+-			*eax = 0x80000008;
++		if (*ax > 0x80000008)
++			*ax = 0x80000008;
+ 		break;
+ 	}
+ }
+@@ -755,10 +757,10 @@ static void lguest_time_init(void)
+  * segment), the privilege level (we're privilege level 1, the Host is 0 and
+  * will not tolerate us trying to use that), the stack pointer, and the number
+  * of pages in the stack. */
+-static void lguest_load_esp0(struct tss_struct *tss,
++static void lguest_load_sp0(struct tss_struct *tss,
+ 				     struct thread_struct *thread)
+ {
+-	lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->esp0,
++	lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0,
+ 		   THREAD_SIZE/PAGE_SIZE);
+ }
+ 
+@@ -788,11 +790,11 @@ static void lguest_wbinvd(void)
+  * code qualifies for Advanced.  It will also never interrupt anything.  It
+  * does, however, allow us to get through the Linux boot code. */
+ #ifdef CONFIG_X86_LOCAL_APIC
+-static void lguest_apic_write(unsigned long reg, unsigned long v)
++static void lguest_apic_write(unsigned long reg, u32 v)
+ {
+ }
+ 
+-static unsigned long lguest_apic_read(unsigned long reg)
++static u32 lguest_apic_read(unsigned long reg)
+ {
+ 	return 0;
+ }
+@@ -812,7 +814,7 @@ static void lguest_safe_halt(void)
+  * rather than virtual addresses, so we use __pa() here. */
+ static void lguest_power_off(void)
+ {
+-	hcall(LHCALL_CRASH, __pa("Power down"), 0, 0);
++	hcall(LHCALL_SHUTDOWN, __pa("Power down"), LGUEST_SHUTDOWN_POWEROFF, 0);
+ }
+ 
+ /*
+@@ -822,7 +824,7 @@ static void lguest_power_off(void)
+  */
+ static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
+ {
+-	hcall(LHCALL_CRASH, __pa(p), 0, 0);
++	hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0);
+ 	/* The hcall won't return, but to keep gcc happy, we're "done". */
+ 	return NOTIFY_DONE;
+ }
+@@ -926,6 +928,11 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
+ 	return insn_len;
+ }
+ 
++static void lguest_restart(char *reason)
++{
++	hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0);
 +}
 +
-+static int genregs32_set(struct task_struct *target,
-+			 const struct user_regset *regset,
-+			 unsigned int pos, unsigned int count,
-+			 const void *kbuf, const void __user *ubuf)
+ /*G:030 Once we get to lguest_init(), we know we're a Guest.  The pv_ops
+  * structures in the kernel provide points for (almost) every routine we have
+  * to override to avoid privileged instructions. */
+@@ -957,7 +964,7 @@ __init void lguest_init(void)
+ 	pv_cpu_ops.cpuid = lguest_cpuid;
+ 	pv_cpu_ops.load_idt = lguest_load_idt;
+ 	pv_cpu_ops.iret = lguest_iret;
+-	pv_cpu_ops.load_esp0 = lguest_load_esp0;
++	pv_cpu_ops.load_sp0 = lguest_load_sp0;
+ 	pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
+ 	pv_cpu_ops.set_ldt = lguest_set_ldt;
+ 	pv_cpu_ops.load_tls = lguest_load_tls;
+@@ -1059,6 +1066,7 @@ __init void lguest_init(void)
+ 	 * the Guest routine to power off. */
+ 	pm_power_off = lguest_power_off;
+ 
++	machine_ops.restart = lguest_restart;
+ 	/* Now we're set up, call start_kernel() in init/main.c and we proceed
+ 	 * to boot as normal.  It never returns. */
+ 	start_kernel();
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index 329da27..4876182 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -1,5 +1,27 @@
++#
++# Makefile for x86 specific library files.
++#
++
++obj-$(CONFIG_SMP) := msr-on-cpu.o
++
++lib-y := delay_$(BITS).o
++lib-y += usercopy_$(BITS).o getuser_$(BITS).o putuser_$(BITS).o
++lib-y += memcpy_$(BITS).o
++
+ ifeq ($(CONFIG_X86_32),y)
+-include ${srctree}/arch/x86/lib/Makefile_32
++        lib-y += checksum_32.o
++        lib-y += strstr_32.o
++        lib-y += bitops_32.o semaphore_32.o string_32.o
++
++        lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
+ else
+-include ${srctree}/arch/x86/lib/Makefile_64
++        obj-y += io_64.o iomap_copy_64.o
++
++        CFLAGS_csum-partial_64.o := -funroll-loops
++
++        lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
++        lib-y += thunk_64.o clear_page_64.o copy_page_64.o
++        lib-y += bitstr_64.o bitops_64.o
++        lib-y += memmove_64.o memset_64.o
++        lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
+ endif
+diff --git a/arch/x86/lib/Makefile_32 b/arch/x86/lib/Makefile_32
+deleted file mode 100644
+index 98d1f1e..0000000
+--- a/arch/x86/lib/Makefile_32
++++ /dev/null
+@@ -1,11 +0,0 @@
+-#
+-# Makefile for i386-specific library files..
+-#
+-
+-
+-lib-y = checksum_32.o delay_32.o usercopy_32.o getuser_32.o putuser_32.o memcpy_32.o strstr_32.o \
+-	bitops_32.o semaphore_32.o string_32.o
+-
+-lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
+-
+-obj-$(CONFIG_SMP)	+= msr-on-cpu.o
+diff --git a/arch/x86/lib/Makefile_64 b/arch/x86/lib/Makefile_64
+deleted file mode 100644
+index bbabad3..0000000
+--- a/arch/x86/lib/Makefile_64
++++ /dev/null
+@@ -1,13 +0,0 @@
+-#
+-# Makefile for x86_64-specific library files.
+-#
+-
+-CFLAGS_csum-partial_64.o := -funroll-loops
+-
+-obj-y := io_64.o iomap_copy_64.o
+-obj-$(CONFIG_SMP)	+= msr-on-cpu.o
+-
+-lib-y := csum-partial_64.o csum-copy_64.o csum-wrappers_64.o delay_64.o \
+-	usercopy_64.o getuser_64.o putuser_64.o  \
+-	thunk_64.o clear_page_64.o copy_page_64.o bitstr_64.o bitops_64.o
+-lib-y += memcpy_64.o memmove_64.o memset_64.o copy_user_64.o rwlock_64.o copy_user_nocache_64.o
+diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
+index 8ac51b8..37756b6 100644
+--- a/arch/x86/lib/memcpy_32.c
++++ b/arch/x86/lib/memcpy_32.c
+@@ -34,8 +34,8 @@ void *memmove(void *dest, const void *src, size_t n)
+ 			"cld"
+ 			: "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ 			:"0" (n),
+-			 "1" (n-1+(const char *)src),
+-			 "2" (n-1+(char *)dest)
++			 "1" (n-1+src),
++			 "2" (n-1+dest)
+ 			:"memory");
+ 	}
+ 	return dest;
+diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c
+index 751ebae..80175e4 100644
+--- a/arch/x86/lib/memmove_64.c
++++ b/arch/x86/lib/memmove_64.c
+@@ -11,8 +11,8 @@ void *memmove(void * dest,const void *src,size_t count)
+ 	if (dest < src) { 
+ 		return memcpy(dest,src,count);
+ 	} else {
+-		char *p = (char *) dest + count;
+-		char *s = (char *) src + count;
++		char *p = dest + count;
++		const char *s = src + count;
+ 		while (count--)
+ 			*--p = *--s;
+ 	}
+diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S
+index 444fba4..3899bd3 100644
+--- a/arch/x86/lib/semaphore_32.S
++++ b/arch/x86/lib/semaphore_32.S
+@@ -29,7 +29,7 @@
+  * registers (%eax, %edx and %ecx) except %eax whish is either a return
+  * value or just clobbered..
+  */
+-	.section .sched.text
++	.section .sched.text, "ax"
+ ENTRY(__down_failed)
+ 	CFI_STARTPROC
+ 	FRAME
+@@ -49,7 +49,7 @@ ENTRY(__down_failed)
+ 	ENDFRAME
+ 	ret
+ 	CFI_ENDPROC
+-	END(__down_failed)
++	ENDPROC(__down_failed)
+ 
+ ENTRY(__down_failed_interruptible)
+ 	CFI_STARTPROC
+@@ -70,7 +70,7 @@ ENTRY(__down_failed_interruptible)
+ 	ENDFRAME
+ 	ret
+ 	CFI_ENDPROC
+-	END(__down_failed_interruptible)
++	ENDPROC(__down_failed_interruptible)
+ 
+ ENTRY(__down_failed_trylock)
+ 	CFI_STARTPROC
+@@ -91,7 +91,7 @@ ENTRY(__down_failed_trylock)
+ 	ENDFRAME
+ 	ret
+ 	CFI_ENDPROC
+-	END(__down_failed_trylock)
++	ENDPROC(__down_failed_trylock)
+ 
+ ENTRY(__up_wakeup)
+ 	CFI_STARTPROC
+@@ -112,7 +112,7 @@ ENTRY(__up_wakeup)
+ 	ENDFRAME
+ 	ret
+ 	CFI_ENDPROC
+-	END(__up_wakeup)
++	ENDPROC(__up_wakeup)
+ 
+ /*
+  * rw spinlock fallbacks
+@@ -132,7 +132,7 @@ ENTRY(__write_lock_failed)
+ 	ENDFRAME
+ 	ret
+ 	CFI_ENDPROC
+-	END(__write_lock_failed)
++	ENDPROC(__write_lock_failed)
+ 
+ ENTRY(__read_lock_failed)
+ 	CFI_STARTPROC
+@@ -148,7 +148,7 @@ ENTRY(__read_lock_failed)
+ 	ENDFRAME
+ 	ret
+ 	CFI_ENDPROC
+-	END(__read_lock_failed)
++	ENDPROC(__read_lock_failed)
+ 
+ #endif
+ 
+@@ -170,7 +170,7 @@ ENTRY(call_rwsem_down_read_failed)
+ 	CFI_ADJUST_CFA_OFFSET -4
+ 	ret
+ 	CFI_ENDPROC
+-	END(call_rwsem_down_read_failed)
++	ENDPROC(call_rwsem_down_read_failed)
+ 
+ ENTRY(call_rwsem_down_write_failed)
+ 	CFI_STARTPROC
+@@ -182,7 +182,7 @@ ENTRY(call_rwsem_down_write_failed)
+ 	CFI_ADJUST_CFA_OFFSET -4
+ 	ret
+ 	CFI_ENDPROC
+-	END(call_rwsem_down_write_failed)
++	ENDPROC(call_rwsem_down_write_failed)
+ 
+ ENTRY(call_rwsem_wake)
+ 	CFI_STARTPROC
+@@ -196,7 +196,7 @@ ENTRY(call_rwsem_wake)
+ 	CFI_ADJUST_CFA_OFFSET -4
+ 1:	ret
+ 	CFI_ENDPROC
+-	END(call_rwsem_wake)
++	ENDPROC(call_rwsem_wake)
+ 
+ /* Fix up special calling conventions */
+ ENTRY(call_rwsem_downgrade_wake)
+@@ -214,6 +214,6 @@ ENTRY(call_rwsem_downgrade_wake)
+ 	CFI_ADJUST_CFA_OFFSET -4
+ 	ret
+ 	CFI_ENDPROC
+-	END(call_rwsem_downgrade_wake)
++	ENDPROC(call_rwsem_downgrade_wake)
+ 
+ #endif
+diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
+index 6ea73f3..8b92d42 100644
+--- a/arch/x86/lib/thunk_64.S
++++ b/arch/x86/lib/thunk_64.S
+@@ -33,7 +33,7 @@
+ 	.endm
+ 	
+ 
+-	.section .sched.text
++	.section .sched.text, "ax"
+ #ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
+ 	thunk rwsem_down_read_failed_thunk,rwsem_down_read_failed
+ 	thunk rwsem_down_write_failed_thunk,rwsem_down_write_failed
+diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
+index 8bab2b2..9c4ffd5 100644
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -817,6 +817,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
+ #endif
+ 	return n;
+ }
++EXPORT_SYMBOL(__copy_from_user_ll_nocache);
+ 
+ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
+ 					unsigned long n)
+@@ -831,6 +832,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+ #endif
+ 	return n;
+ }
++EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+ 
+ /**
+  * copy_to_user: - Copy a block of data into user space.
+diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile
+new file mode 100644
+index 0000000..1faac81
+--- /dev/null
++++ b/arch/x86/mach-rdc321x/Makefile
+@@ -0,0 +1,5 @@
++#
++# Makefile for the RDC321x specific parts of the kernel
++#
++obj-$(CONFIG_X86_RDC321X)        := gpio.o platform.o wdt.o
++
+diff --git a/arch/x86/mach-rdc321x/gpio.c b/arch/x86/mach-rdc321x/gpio.c
+new file mode 100644
+index 0000000..0312691
+--- /dev/null
++++ b/arch/x86/mach-rdc321x/gpio.c
+@@ -0,0 +1,91 @@
++/*
++ *  Copyright (C) 2007, OpenWrt.org, Florian Fainelli <florian at openwrt.org>
++ *  	RDC321x architecture specific GPIO support
++ *
++ *  This program is free software; you can redistribute  it and/or modify it
++ *  under  the terms of  the GNU General  Public License as published by the
++ *  Free Software Foundation;  either version 2 of the  License, or (at your
++ *  option) any later version.
++ */
++
++#include <linux/autoconf.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/types.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++
++#include <asm/mach-rdc321x/rdc321x_defs.h>
++
++static inline int rdc_gpio_is_valid(unsigned gpio)
 +{
-+	int ret = 0;
-+	if (kbuf) {
-+		const compat_ulong_t *k = kbuf;
-+		while (count > 0 && !ret) {
-+			ret = putreg(target, pos, *k++);
-+			count -= sizeof(*k);
-+			pos += sizeof(*k);
-+		}
-+	} else {
-+		const compat_ulong_t __user *u = ubuf;
-+		while (count > 0 && !ret) {
-+			compat_ulong_t word;
-+			ret = __get_user(word, u++);
-+			if (ret)
-+				break;
-+			ret = putreg(target, pos, word);
-+			count -= sizeof(*u);
-+			pos += sizeof(*u);
-+		}
-+	}
-+	return ret;
++	return (gpio <= RDC_MAX_GPIO);
 +}
 +
-+static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
++static unsigned int rdc_gpio_read(unsigned gpio)
 +{
-+	siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
-+	compat_siginfo_t __user *si32 = compat_ptr(data);
-+	siginfo_t ssi;
-+	int ret;
++	unsigned int val;
 +
-+	if (request == PTRACE_SETSIGINFO) {
-+		memset(&ssi, 0, sizeof(siginfo_t));
-+		ret = copy_siginfo_from_user32(&ssi, si32);
-+		if (ret)
-+			return ret;
-+		if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
-+			return -EFAULT;
-+	}
-+	ret = sys_ptrace(request, pid, addr, (unsigned long)si);
-+	if (ret)
-+		return ret;
-+	if (request == PTRACE_GETSIGINFO) {
-+		if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
-+			return -EFAULT;
-+		ret = copy_siginfo_to_user32(si32, &ssi);
-+	}
-+	return ret;
++	val = 0x80000000 | (7 << 11) | ((gpio&0x20?0x84:0x48));
++	outl(val, RDC3210_CFGREG_ADDR);
++	udelay(10);
++	val = inl(RDC3210_CFGREG_DATA);
++	val |= (0x1 << (gpio & 0x1F));
++	outl(val, RDC3210_CFGREG_DATA);
++	udelay(10);
++	val = 0x80000000 | (7 << 11) | ((gpio&0x20?0x88:0x4C));
++	outl(val, RDC3210_CFGREG_ADDR);
++	udelay(10);
++	val = inl(RDC3210_CFGREG_DATA);
++
++	return val;
 +}
 +
-+asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
++static void rdc_gpio_write(unsigned int val)
 +{
-+	struct task_struct *child;
-+	struct pt_regs *childregs;
-+	void __user *datap = compat_ptr(data);
-+	int ret;
-+	__u32 val;
-+
-+	switch (request) {
-+	case PTRACE_TRACEME:
-+	case PTRACE_ATTACH:
-+	case PTRACE_KILL:
-+	case PTRACE_CONT:
-+	case PTRACE_SINGLESTEP:
-+	case PTRACE_SINGLEBLOCK:
-+	case PTRACE_DETACH:
-+	case PTRACE_SYSCALL:
-+	case PTRACE_OLDSETOPTIONS:
-+	case PTRACE_SETOPTIONS:
-+	case PTRACE_SET_THREAD_AREA:
-+	case PTRACE_GET_THREAD_AREA:
-+	case PTRACE_BTS_CONFIG:
-+	case PTRACE_BTS_STATUS:
-+	case PTRACE_BTS_SIZE:
-+	case PTRACE_BTS_GET:
-+	case PTRACE_BTS_CLEAR:
-+	case PTRACE_BTS_DRAIN:
-+		return sys_ptrace(request, pid, addr, data);
++	if (val) {
++		outl(val, RDC3210_CFGREG_DATA);
++		udelay(10);
++	}
++}
 +
-+	default:
++int rdc_gpio_get_value(unsigned gpio)
++{
++	if (rdc_gpio_is_valid(gpio))
++		return (int)rdc_gpio_read(gpio);
++	else
 +		return -EINVAL;
++}
++EXPORT_SYMBOL(rdc_gpio_get_value);
 +
-+	case PTRACE_PEEKTEXT:
-+	case PTRACE_PEEKDATA:
-+	case PTRACE_POKEDATA:
-+	case PTRACE_POKETEXT:
-+	case PTRACE_POKEUSR:
-+	case PTRACE_PEEKUSR:
-+	case PTRACE_GETREGS:
-+	case PTRACE_SETREGS:
-+	case PTRACE_SETFPREGS:
-+	case PTRACE_GETFPREGS:
-+	case PTRACE_SETFPXREGS:
-+	case PTRACE_GETFPXREGS:
-+	case PTRACE_GETEVENTMSG:
-+		break;
++void rdc_gpio_set_value(unsigned gpio, int value)
++{
++	unsigned int val;
 +
-+	case PTRACE_SETSIGINFO:
-+	case PTRACE_GETSIGINFO:
-+		return ptrace32_siginfo(request, pid, addr, data);
-+	}
++	if (!rdc_gpio_is_valid(gpio))
++		return;
 +
-+	child = ptrace_get_task_struct(pid);
-+	if (IS_ERR(child))
-+		return PTR_ERR(child);
++	val = rdc_gpio_read(gpio);
 +
-+	ret = ptrace_check_attach(child, request == PTRACE_KILL);
-+	if (ret < 0)
-+		goto out;
++	if (value)
++		val &= ~(0x1 << (gpio & 0x1F));
++	else
++		val |= (0x1 << (gpio & 0x1F));
 +
-+	childregs = task_pt_regs(child);
++	rdc_gpio_write(val);
++}
++EXPORT_SYMBOL(rdc_gpio_set_value);
 +
-+	switch (request) {
-+	case PTRACE_PEEKUSR:
-+		ret = getreg32(child, addr, &val);
-+		if (ret == 0)
-+			ret = put_user(val, (__u32 __user *)datap);
-+		break;
++int rdc_gpio_direction_input(unsigned gpio)
++{
++	return 0;
++}
++EXPORT_SYMBOL(rdc_gpio_direction_input);
 +
-+	case PTRACE_POKEUSR:
-+		ret = putreg32(child, addr, data);
-+		break;
++int rdc_gpio_direction_output(unsigned gpio, int value)
++{
++	return 0;
++}
++EXPORT_SYMBOL(rdc_gpio_direction_output);
 +
-+	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
-+		return copy_regset_to_user(child, &user_x86_32_view,
-+					   REGSET_GENERAL,
-+					   0, sizeof(struct user_regs_struct32),
-+					   datap);
 +
-+	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
-+		return copy_regset_from_user(child, &user_x86_32_view,
-+					     REGSET_GENERAL, 0,
-+					     sizeof(struct user_regs_struct32),
-+					     datap);
+diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c
+new file mode 100644
+index 0000000..dda6024
+--- /dev/null
++++ b/arch/x86/mach-rdc321x/platform.c
+@@ -0,0 +1,68 @@
++/*
++ *  Generic RDC321x platform devices
++ *
++ *  Copyright (C) 2007 Florian Fainelli <florian at openwrt.org>
++ *
++ *  This program is free software; you can redistribute it and/or
++ *  modify it under the terms of the GNU General Public License
++ *  as published by the Free Software Foundation; either version 2
++ *  of the License, or (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *  GNU General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the
++ *  Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
++ *  Boston, MA  02110-1301, USA.
++ *
++ */
 +
-+	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
-+		return copy_regset_to_user(child, &user_x86_32_view,
-+					   REGSET_FP, 0,
-+					   sizeof(struct user_i387_ia32_struct),
-+					   datap);
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/version.h>
++#include <linux/leds.h>
 +
-+	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
-+		return copy_regset_from_user(
-+			child, &user_x86_32_view, REGSET_FP,
-+			0, sizeof(struct user_i387_ia32_struct), datap);
++#include <asm/gpio.h>
 +
-+	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
-+		return copy_regset_to_user(child, &user_x86_32_view,
-+					   REGSET_XFP, 0,
-+					   sizeof(struct user32_fxsr_struct),
-+					   datap);
++/* LEDS */
++static struct gpio_led default_leds[] = {
++	{ .name = "rdc:dmz", .gpio = 1, },
++};
 +
-+	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
-+		return copy_regset_from_user(child, &user_x86_32_view,
-+					     REGSET_XFP, 0,
-+					     sizeof(struct user32_fxsr_struct),
-+					     datap);
++static struct gpio_led_platform_data rdc321x_led_data = {
++	.num_leds = ARRAY_SIZE(default_leds),
++	.leds = default_leds,
++};
 +
-+	default:
-+		return compat_ptrace_request(child, request, addr, data);
++static struct platform_device rdc321x_leds = {
++	.name = "leds-gpio",
++	.id = -1,
++	.dev = {
++		.platform_data = &rdc321x_led_data,
 +	}
++};
 +
-+ out:
-+	put_task_struct(child);
-+	return ret;
++/* Watchdog */
++static struct platform_device rdc321x_wdt = {
++	.name = "rdc321x-wdt",
++	.id = -1,
++	.num_resources = 0,
++};
++
++static struct platform_device *rdc321x_devs[] = {
++	&rdc321x_leds,
++	&rdc321x_wdt
++};
++
++static int __init rdc_board_setup(void)
++{
++	return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
 +}
 +
-+#endif	/* CONFIG_IA32_EMULATION */
++arch_initcall(rdc_board_setup);
+diff --git a/arch/x86/mach-rdc321x/wdt.c b/arch/x86/mach-rdc321x/wdt.c
+new file mode 100644
+index 0000000..ec5625a
+--- /dev/null
++++ b/arch/x86/mach-rdc321x/wdt.c
+@@ -0,0 +1,275 @@
++/*
++ * RDC321x watchdog driver
++ *
++ * Copyright (C) 2007 Florian Fainelli <florian at openwrt.org>
++ *
++ * This driver is highly inspired from the cpu5_wdt driver
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ */
 +
-+#ifdef CONFIG_X86_64
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/ioport.h>
++#include <linux/timer.h>
++#include <linux/completion.h>
++#include <linux/jiffies.h>
++#include <linux/platform_device.h>
++#include <linux/watchdog.h>
++#include <linux/io.h>
++#include <linux/uaccess.h>
 +
-+static const struct user_regset x86_64_regsets[] = {
-+	[REGSET_GENERAL] = {
-+		.core_note_type = NT_PRSTATUS,
-+		.n = sizeof(struct user_regs_struct) / sizeof(long),
-+		.size = sizeof(long), .align = sizeof(long),
-+		.get = genregs_get, .set = genregs_set
-+	},
-+	[REGSET_FP] = {
-+		.core_note_type = NT_PRFPREG,
-+		.n = sizeof(struct user_i387_struct) / sizeof(long),
-+		.size = sizeof(long), .align = sizeof(long),
-+		.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
-+	},
-+};
++#include <asm/mach-rdc321x/rdc321x_defs.h>
 +
-+static const struct user_regset_view user_x86_64_view = {
-+	.name = "x86_64", .e_machine = EM_X86_64,
-+	.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
-+};
++#define RDC_WDT_MASK	0x80000000 /* Mask */
++#define RDC_WDT_EN	0x00800000 /* Enable bit */
++#define RDC_WDT_WTI	0x00200000 /* Generate CPU reset/NMI/WDT on timeout */
++#define RDC_WDT_RST	0x00100000 /* Reset bit */
++#define RDC_WDT_WIF	0x00040000 /* WDT IRQ Flag */
++#define RDC_WDT_IRT	0x00000100 /* IRQ Routing table */
++#define RDC_WDT_CNT	0x00000001 /* WDT count */
 +
-+#else  /* CONFIG_X86_32 */
++#define RDC_CLS_TMR	0x80003844 /* Clear timer */
 +
-+#define user_regs_struct32	user_regs_struct
-+#define genregs32_get		genregs_get
-+#define genregs32_set		genregs_set
++#define RDC_WDT_INTERVAL	(HZ/10+1)
 +
-+#endif	/* CONFIG_X86_64 */
++int nowayout = WATCHDOG_NOWAYOUT;
++module_param(nowayout, int, 0);
++MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 +
-+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-+static const struct user_regset x86_32_regsets[] = {
-+	[REGSET_GENERAL] = {
-+		.core_note_type = NT_PRSTATUS,
-+		.n = sizeof(struct user_regs_struct32) / sizeof(u32),
-+		.size = sizeof(u32), .align = sizeof(u32),
-+		.get = genregs32_get, .set = genregs32_set
-+	},
-+	[REGSET_FP] = {
-+		.core_note_type = NT_PRFPREG,
-+		.n = sizeof(struct user_i387_struct) / sizeof(u32),
-+		.size = sizeof(u32), .align = sizeof(u32),
-+		.active = fpregs_active, .get = fpregs_get, .set = fpregs_set
-+	},
-+	[REGSET_XFP] = {
-+		.core_note_type = NT_PRXFPREG,
-+		.n = sizeof(struct user_i387_struct) / sizeof(u32),
-+		.size = sizeof(u32), .align = sizeof(u32),
-+		.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
-+	},
-+	[REGSET_TLS] = {
-+		.core_note_type = NT_386_TLS,
-+		.n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
-+		.size = sizeof(struct user_desc),
-+		.align = sizeof(struct user_desc),
-+		.active = regset_tls_active,
-+		.get = regset_tls_get, .set = regset_tls_set
-+	},
-+};
++static int ticks = 1000;
 +
-+static const struct user_regset_view user_x86_32_view = {
-+	.name = "i386", .e_machine = EM_386,
-+	.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
-+};
-+#endif
++/* some device data */
++
++static struct {
++	struct completion stop;
++	volatile int running;
++	struct timer_list timer;
++	volatile int queue;
++	int default_ticks;
++	unsigned long inuse;
++} rdc321x_wdt_device;
++
++/* generic helper functions */
++
++static void rdc321x_wdt_trigger(unsigned long unused)
++{
++	if (rdc321x_wdt_device.running)
++		ticks--;
++
++	/* keep watchdog alive */
++	outl(RDC_WDT_EN|inl(RDC3210_CFGREG_DATA), RDC3210_CFGREG_DATA);
++
++	/* requeue?? */
++	if (rdc321x_wdt_device.queue && ticks)
++		mod_timer(&rdc321x_wdt_device.timer,
++				jiffies + RDC_WDT_INTERVAL);
++	else {
++		/* ticks doesn't matter anyway */
++		complete(&rdc321x_wdt_device.stop);
++	}
 +
-+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-+{
-+#ifdef CONFIG_IA32_EMULATION
-+	if (test_tsk_thread_flag(task, TIF_IA32))
-+#endif
-+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-+		return &user_x86_32_view;
-+#endif
-+#ifdef CONFIG_X86_64
-+	return &user_x86_64_view;
-+#endif
 +}
 +
-+#ifdef CONFIG_X86_32
++static void rdc321x_wdt_reset(void)
++{
++	ticks = rdc321x_wdt_device.default_ticks;
++}
 +
-+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
++static void rdc321x_wdt_start(void)
 +{
-+	struct siginfo info;
++	if (!rdc321x_wdt_device.queue) {
++		rdc321x_wdt_device.queue = 1;
 +
-+	tsk->thread.trap_no = 1;
-+	tsk->thread.error_code = error_code;
++		/* Clear the timer */
++		outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR);
 +
-+	memset(&info, 0, sizeof(info));
-+	info.si_signo = SIGTRAP;
-+	info.si_code = TRAP_BRKPT;
++		/* Enable watchdog and set the timeout to 81.92 us */
++		outl(RDC_WDT_EN|RDC_WDT_CNT, RDC3210_CFGREG_DATA);
 +
-+	/* User-mode ip? */
-+	info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
++		mod_timer(&rdc321x_wdt_device.timer,
++				jiffies + RDC_WDT_INTERVAL);
++	}
 +
-+	/* Send us the fake SIGTRAP */
-+	force_sig_info(SIGTRAP, &info, tsk);
++	/* if process dies, counter is not decremented */
++	rdc321x_wdt_device.running++;
 +}
 +
-+/* notification of system call entry/exit
-+ * - triggered by current->work.syscall_trace
-+ */
-+__attribute__((regparm(3)))
-+int do_syscall_trace(struct pt_regs *regs, int entryexit)
++static int rdc321x_wdt_stop(void)
 +{
-+	int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
-+	/*
-+	 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
-+	 * interception
-+	 */
-+	int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
-+	int ret = 0;
++	if (rdc321x_wdt_device.running)
++		rdc321x_wdt_device.running = 0;
 +
-+	/* do the secure computing check first */
-+	if (!entryexit)
-+		secure_computing(regs->orig_ax);
++	ticks = rdc321x_wdt_device.default_ticks;
 +
-+	if (unlikely(current->audit_context)) {
-+		if (entryexit)
-+			audit_syscall_exit(AUDITSC_RESULT(regs->ax),
-+						regs->ax);
-+		/* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
-+		 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
-+		 * not used, entry.S will call us only on syscall exit, not
-+		 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
-+		 * calling send_sigtrap() on syscall entry.
-+		 *
-+		 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
-+		 * is_singlestep is false, despite his name, so we will still do
-+		 * the correct thing.
-+		 */
-+		else if (is_singlestep)
-+			goto out;
-+	}
++	return -EIO;
++}
 +
-+	if (!(current->ptrace & PT_PTRACED))
-+		goto out;
++/* filesystem operations */
 +
-+	/* If a process stops on the 1st tracepoint with SYSCALL_TRACE
-+	 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
-+	 * here. We have to check this and return */
-+	if (is_sysemu && entryexit)
-+		return 0;
++static int rdc321x_wdt_open(struct inode *inode, struct file *file)
++{
++	if (test_and_set_bit(0, &rdc321x_wdt_device.inuse))
++		return -EBUSY;
 +
-+	/* Fake a debug trap */
-+	if (is_singlestep)
-+		send_sigtrap(current, regs, 0);
++	return nonseekable_open(inode, file);
++}
 +
-+ 	if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
-+		goto out;
++static int rdc321x_wdt_release(struct inode *inode, struct file *file)
++{
++	clear_bit(0, &rdc321x_wdt_device.inuse);
++	return 0;
++}
 +
-+	/* the 0x80 provides a way for the tracing parent to distinguish
-+	   between a syscall stop and SIGTRAP delivery */
-+	/* Note that the debugger could change the result of test_thread_flag!*/
-+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
++static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file,
++				unsigned int cmd, unsigned long arg)
++{
++	void __user *argp = (void __user *)arg;
++	unsigned int value;
++	static struct watchdog_info ident = {
++		.options = WDIOF_CARDRESET,
++		.identity = "RDC321x WDT",
++	};
 +
-+	/*
-+	 * this isn't the same as continuing with a signal, but it will do
-+	 * for normal use.  strace only continues with a signal if the
-+	 * stopping signal is not SIGTRAP.  -brl
-+	 */
-+	if (current->exit_code) {
-+		send_sig(current->exit_code, current, 1);
-+		current->exit_code = 0;
++	switch (cmd) {
++	case WDIOC_KEEPALIVE:
++		rdc321x_wdt_reset();
++		break;
++	case WDIOC_GETSTATUS:
++		/* Read the value from the DATA register */
++		value = inl(RDC3210_CFGREG_DATA);
++		if (copy_to_user(argp, &value, sizeof(int)))
++			return -EFAULT;
++		break;
++	case WDIOC_GETSUPPORT:
++		if (copy_to_user(argp, &ident, sizeof(ident)))
++			return -EFAULT;
++		break;
++	case WDIOC_SETOPTIONS:
++		if (copy_from_user(&value, argp, sizeof(int)))
++			return -EFAULT;
++		switch (value) {
++		case WDIOS_ENABLECARD:
++			rdc321x_wdt_start();
++			break;
++		case WDIOS_DISABLECARD:
++			return rdc321x_wdt_stop();
++		default:
++			return -EINVAL;
++		}
++		break;
++	default:
++		return -ENOTTY;
 +	}
-+	ret = is_sysemu;
-+out:
-+	if (unlikely(current->audit_context) && !entryexit)
-+		audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
-+				    regs->bx, regs->cx, regs->dx, regs->si);
-+	if (ret == 0)
-+		return 0;
++	return 0;
++}
 +
-+	regs->orig_ax = -1; /* force skip of syscall restarting */
-+	if (unlikely(current->audit_context))
-+		audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
-+	return 1;
++static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf,
++				size_t count, loff_t *ppos)
++{
++	if (!count)
++		return -EIO;
++
++	rdc321x_wdt_reset();
++
++	return count;
 +}
 +
-+#else  /* CONFIG_X86_64 */
++static const struct file_operations rdc321x_wdt_fops = {
++	.owner		= THIS_MODULE,
++	.llseek		= no_llseek,
++	.ioctl		= rdc321x_wdt_ioctl,
++	.open		= rdc321x_wdt_open,
++	.write		= rdc321x_wdt_write,
++	.release	= rdc321x_wdt_release,
++};
 +
-+static void syscall_trace(struct pt_regs *regs)
-+{
++static struct miscdevice rdc321x_wdt_misc = {
++	.minor	= WATCHDOG_MINOR,
++	.name	= "watchdog",
++	.fops	= &rdc321x_wdt_fops,
++};
 +
-+#if 0
-+	printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
-+	       current->comm,
-+	       regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
-+	       current_thread_info()->flags, current->ptrace);
-+#endif
++static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
++{
++	int err;
 +
-+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-+				? 0x80 : 0));
-+	/*
-+	 * this isn't the same as continuing with a signal, but it will do
-+	 * for normal use.  strace only continues with a signal if the
-+	 * stopping signal is not SIGTRAP.  -brl
-+	 */
-+	if (current->exit_code) {
-+		send_sig(current->exit_code, current, 1);
-+		current->exit_code = 0;
++	err = misc_register(&rdc321x_wdt_misc);
++	if (err < 0) {
++		printk(KERN_ERR PFX "watchdog misc_register failed\n");
++		return err;
 +	}
++
++	/* Reset the watchdog */
++	outl(RDC_WDT_RST, RDC3210_CFGREG_DATA);
++
++	init_completion(&rdc321x_wdt_device.stop);
++	rdc321x_wdt_device.queue = 0;
++
++	clear_bit(0, &rdc321x_wdt_device.inuse);
++
++	setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
++
++	rdc321x_wdt_device.default_ticks = ticks;
++
++	printk(KERN_INFO PFX "watchdog init success\n");
++
++	return 0;
 +}
 +
-+asmlinkage void syscall_trace_enter(struct pt_regs *regs)
++static int rdc321x_wdt_remove(struct platform_device *pdev)
 +{
-+	/* do the secure computing check first */
-+	secure_computing(regs->orig_ax);
++	if (rdc321x_wdt_device.queue) {
++		rdc321x_wdt_device.queue = 0;
++		wait_for_completion(&rdc321x_wdt_device.stop);
++	}
 +
-+	if (test_thread_flag(TIF_SYSCALL_TRACE)
-+	    && (current->ptrace & PT_PTRACED))
-+		syscall_trace(regs);
++	misc_deregister(&rdc321x_wdt_misc);
 +
-+	if (unlikely(current->audit_context)) {
-+		if (test_thread_flag(TIF_IA32)) {
-+			audit_syscall_entry(AUDIT_ARCH_I386,
-+					    regs->orig_ax,
-+					    regs->bx, regs->cx,
-+					    regs->dx, regs->si);
-+		} else {
-+			audit_syscall_entry(AUDIT_ARCH_X86_64,
-+					    regs->orig_ax,
-+					    regs->di, regs->si,
-+					    regs->dx, regs->r10);
-+		}
-+	}
++	return 0;
 +}
 +
-+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
++static struct platform_driver rdc321x_wdt_driver = {
++	.probe = rdc321x_wdt_probe,
++	.remove = rdc321x_wdt_remove,
++	.driver = {
++		.owner = THIS_MODULE,
++		.name = "rdc321x-wdt",
++	},
++};
++
++static int __init rdc321x_wdt_init(void)
 +{
-+	if (unlikely(current->audit_context))
-+		audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
++	return platform_driver_register(&rdc321x_wdt_driver);
++}
 +
-+	if ((test_thread_flag(TIF_SYSCALL_TRACE)
-+	     || test_thread_flag(TIF_SINGLESTEP))
-+	    && (current->ptrace & PT_PTRACED))
-+		syscall_trace(regs);
++static void __exit rdc321x_wdt_exit(void)
++{
++	platform_driver_unregister(&rdc321x_wdt_driver);
 +}
 +
-+#endif	/* CONFIG_X86_32 */
-diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
-deleted file mode 100644
-index ff5431c..0000000
---- a/arch/x86/kernel/ptrace_32.c
-+++ /dev/null
-@@ -1,717 +0,0 @@
--/* By Ross Biro 1/23/92 */
--/*
-- * Pentium III FXSR, SSE support
-- *	Gareth Hughes <gareth at valinux.com>, May 2000
-- */
--
--#include <linux/kernel.h>
--#include <linux/sched.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/errno.h>
--#include <linux/ptrace.h>
--#include <linux/user.h>
--#include <linux/security.h>
--#include <linux/audit.h>
--#include <linux/seccomp.h>
--#include <linux/signal.h>
--
--#include <asm/uaccess.h>
--#include <asm/pgtable.h>
--#include <asm/system.h>
--#include <asm/processor.h>
--#include <asm/i387.h>
--#include <asm/debugreg.h>
--#include <asm/ldt.h>
--#include <asm/desc.h>
--
--/*
-- * does not yet catch signals sent when the child dies.
-- * in exit.c or in signal.c.
-- */
--
--/*
-- * Determines which flags the user has access to [1 = access, 0 = no access].
-- * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9).
-- * Also masks reserved bits (31-22, 15, 5, 3, 1).
-- */
--#define FLAG_MASK 0x00050dd5
--
--/* set's the trap flag. */
--#define TRAP_FLAG 0x100
--
--/*
-- * Offset of eflags on child stack..
-- */
--#define EFL_OFFSET offsetof(struct pt_regs, eflags)
--
--static inline struct pt_regs *get_child_regs(struct task_struct *task)
--{
--	void *stack_top = (void *)task->thread.esp0;
--	return stack_top - sizeof(struct pt_regs);
--}
--
--/*
-- * This routine will get a word off of the processes privileged stack.
-- * the offset is bytes into the pt_regs structure on the stack.
-- * This routine assumes that all the privileged stacks are in our
-- * data space.
-- */   
--static inline int get_stack_long(struct task_struct *task, int offset)
--{
--	unsigned char *stack;
--
--	stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
--	stack += offset;
--	return (*((int *)stack));
--}
--
--/*
-- * This routine will put a word on the processes privileged stack.
-- * the offset is bytes into the pt_regs structure on the stack.
-- * This routine assumes that all the privileged stacks are in our
-- * data space.
-- */
--static inline int put_stack_long(struct task_struct *task, int offset,
--	unsigned long data)
--{
--	unsigned char * stack;
--
--	stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
--	stack += offset;
--	*(unsigned long *) stack = data;
--	return 0;
--}
--
--static int putreg(struct task_struct *child,
--	unsigned long regno, unsigned long value)
--{
--	switch (regno >> 2) {
--		case GS:
--			if (value && (value & 3) != 3)
--				return -EIO;
--			child->thread.gs = value;
--			return 0;
--		case DS:
--		case ES:
--		case FS:
--			if (value && (value & 3) != 3)
--				return -EIO;
--			value &= 0xffff;
--			break;
--		case SS:
--		case CS:
--			if ((value & 3) != 3)
--				return -EIO;
--			value &= 0xffff;
--			break;
--		case EFL:
--			value &= FLAG_MASK;
--			value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
--			break;
--	}
--	if (regno > FS*4)
--		regno -= 1*4;
--	put_stack_long(child, regno, value);
--	return 0;
--}
--
--static unsigned long getreg(struct task_struct *child,
--	unsigned long regno)
--{
--	unsigned long retval = ~0UL;
--
--	switch (regno >> 2) {
--		case GS:
--			retval = child->thread.gs;
--			break;
--		case DS:
--		case ES:
--		case FS:
--		case SS:
--		case CS:
--			retval = 0xffff;
--			/* fall through */
--		default:
--			if (regno > FS*4)
--				regno -= 1*4;
--			retval &= get_stack_long(child, regno);
--	}
--	return retval;
--}
--
--#define LDT_SEGMENT 4
--
--static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs)
--{
--	unsigned long addr, seg;
--
--	addr = regs->eip;
--	seg = regs->xcs & 0xffff;
--	if (regs->eflags & VM_MASK) {
--		addr = (addr & 0xffff) + (seg << 4);
--		return addr;
--	}
--
--	/*
--	 * We'll assume that the code segments in the GDT
--	 * are all zero-based. That is largely true: the
--	 * TLS segments are used for data, and the PNPBIOS
--	 * and APM bios ones we just ignore here.
--	 */
--	if (seg & LDT_SEGMENT) {
--		u32 *desc;
--		unsigned long base;
--
--		seg &= ~7UL;
--
--		mutex_lock(&child->mm->context.lock);
--		if (unlikely((seg >> 3) >= child->mm->context.size))
--			addr = -1L; /* bogus selector, access would fault */
--		else {
--			desc = child->mm->context.ldt + seg;
--			base = ((desc[0] >> 16) |
--				((desc[1] & 0xff) << 16) |
--				(desc[1] & 0xff000000));
--
--			/* 16-bit code segment? */
--			if (!((desc[1] >> 22) & 1))
--				addr &= 0xffff;
--			addr += base;
--		}
--		mutex_unlock(&child->mm->context.lock);
--	}
--	return addr;
--}
--
--static inline int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
--{
--	int i, copied;
--	unsigned char opcode[15];
--	unsigned long addr = convert_eip_to_linear(child, regs);
--
--	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
--	for (i = 0; i < copied; i++) {
--		switch (opcode[i]) {
--		/* popf and iret */
--		case 0x9d: case 0xcf:
--			return 1;
--		/* opcode and address size prefixes */
--		case 0x66: case 0x67:
--			continue;
--		/* irrelevant prefixes (segment overrides and repeats) */
--		case 0x26: case 0x2e:
--		case 0x36: case 0x3e:
--		case 0x64: case 0x65:
--		case 0xf0: case 0xf2: case 0xf3:
--			continue;
--
--		/*
--		 * pushf: NOTE! We should probably not let
--		 * the user see the TF bit being set. But
--		 * it's more pain than it's worth to avoid
--		 * it, and a debugger could emulate this
--		 * all in user space if it _really_ cares.
--		 */
--		case 0x9c:
--		default:
--			return 0;
--		}
--	}
--	return 0;
--}
--
--static void set_singlestep(struct task_struct *child)
--{
--	struct pt_regs *regs = get_child_regs(child);
--
--	/*
--	 * Always set TIF_SINGLESTEP - this guarantees that 
--	 * we single-step system calls etc..  This will also
--	 * cause us to set TF when returning to user mode.
--	 */
--	set_tsk_thread_flag(child, TIF_SINGLESTEP);
--
--	/*
--	 * If TF was already set, don't do anything else
--	 */
--	if (regs->eflags & TRAP_FLAG)
--		return;
--
--	/* Set TF on the kernel stack.. */
--	regs->eflags |= TRAP_FLAG;
--
--	/*
--	 * ..but if TF is changed by the instruction we will trace,
--	 * don't mark it as being "us" that set it, so that we
--	 * won't clear it by hand later.
--	 */
--	if (is_setting_trap_flag(child, regs))
--		return;
--	
--	child->ptrace |= PT_DTRACE;
--}
--
--static void clear_singlestep(struct task_struct *child)
--{
--	/* Always clear TIF_SINGLESTEP... */
--	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
--
--	/* But touch TF only if it was set by us.. */
--	if (child->ptrace & PT_DTRACE) {
--		struct pt_regs *regs = get_child_regs(child);
--		regs->eflags &= ~TRAP_FLAG;
--		child->ptrace &= ~PT_DTRACE;
--	}
--}
--
--/*
-- * Called by kernel/ptrace.c when detaching..
-- *
-- * Make sure the single step bit is not set.
-- */
--void ptrace_disable(struct task_struct *child)
--{ 
--	clear_singlestep(child);
--	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
--}
--
--/*
-- * Perform get_thread_area on behalf of the traced child.
-- */
--static int
--ptrace_get_thread_area(struct task_struct *child,
--		       int idx, struct user_desc __user *user_desc)
--{
--	struct user_desc info;
--	struct desc_struct *desc;
--
--/*
-- * Get the current Thread-Local Storage area:
-- */
--
--#define GET_BASE(desc) ( \
--	(((desc)->a >> 16) & 0x0000ffff) | \
--	(((desc)->b << 16) & 0x00ff0000) | \
--	( (desc)->b        & 0xff000000)   )
--
--#define GET_LIMIT(desc) ( \
--	((desc)->a & 0x0ffff) | \
--	 ((desc)->b & 0xf0000) )
--
--#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
--#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
--#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
--#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
--#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
--#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
--
--	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
--		return -EINVAL;
--
--	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
--
--	info.entry_number = idx;
--	info.base_addr = GET_BASE(desc);
--	info.limit = GET_LIMIT(desc);
--	info.seg_32bit = GET_32BIT(desc);
--	info.contents = GET_CONTENTS(desc);
--	info.read_exec_only = !GET_WRITABLE(desc);
--	info.limit_in_pages = GET_LIMIT_PAGES(desc);
--	info.seg_not_present = !GET_PRESENT(desc);
--	info.useable = GET_USEABLE(desc);
--
--	if (copy_to_user(user_desc, &info, sizeof(info)))
--		return -EFAULT;
--
--	return 0;
--}
--
--/*
-- * Perform set_thread_area on behalf of the traced child.
-- */
--static int
--ptrace_set_thread_area(struct task_struct *child,
--		       int idx, struct user_desc __user *user_desc)
--{
--	struct user_desc info;
--	struct desc_struct *desc;
--
--	if (copy_from_user(&info, user_desc, sizeof(info)))
--		return -EFAULT;
--
--	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
--		return -EINVAL;
--
--	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
--	if (LDT_empty(&info)) {
--		desc->a = 0;
--		desc->b = 0;
--	} else {
--		desc->a = LDT_entry_a(&info);
--		desc->b = LDT_entry_b(&info);
--	}
--
--	return 0;
--}
--
--long arch_ptrace(struct task_struct *child, long request, long addr, long data)
--{
--	struct user * dummy = NULL;
--	int i, ret;
--	unsigned long __user *datap = (unsigned long __user *)data;
--
--	switch (request) {
--	/* when I and D space are separate, these will need to be fixed. */
--	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
--	case PTRACE_PEEKDATA:
--		ret = generic_ptrace_peekdata(child, addr, data);
--		break;
--
--	/* read the word at location addr in the USER area. */
--	case PTRACE_PEEKUSR: {
--		unsigned long tmp;
--
--		ret = -EIO;
--		if ((addr & 3) || addr < 0 || 
--		    addr > sizeof(struct user) - 3)
--			break;
--
--		tmp = 0;  /* Default return condition */
--		if(addr < FRAME_SIZE*sizeof(long))
--			tmp = getreg(child, addr);
--		if(addr >= (long) &dummy->u_debugreg[0] &&
--		   addr <= (long) &dummy->u_debugreg[7]){
--			addr -= (long) &dummy->u_debugreg[0];
--			addr = addr >> 2;
--			tmp = child->thread.debugreg[addr];
--		}
--		ret = put_user(tmp, datap);
--		break;
--	}
--
--	/* when I and D space are separate, this will have to be fixed. */
--	case PTRACE_POKETEXT: /* write the word at location addr. */
--	case PTRACE_POKEDATA:
--		ret = generic_ptrace_pokedata(child, addr, data);
--		break;
--
--	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
--		ret = -EIO;
--		if ((addr & 3) || addr < 0 || 
--		    addr > sizeof(struct user) - 3)
--			break;
--
--		if (addr < FRAME_SIZE*sizeof(long)) {
--			ret = putreg(child, addr, data);
--			break;
--		}
--		/* We need to be very careful here.  We implicitly
--		   want to modify a portion of the task_struct, and we
--		   have to be selective about what portions we allow someone
--		   to modify. */
--
--		  ret = -EIO;
--		  if(addr >= (long) &dummy->u_debugreg[0] &&
--		     addr <= (long) &dummy->u_debugreg[7]){
--
--			  if(addr == (long) &dummy->u_debugreg[4]) break;
--			  if(addr == (long) &dummy->u_debugreg[5]) break;
--			  if(addr < (long) &dummy->u_debugreg[4] &&
--			     ((unsigned long) data) >= TASK_SIZE-3) break;
--			  
--			  /* Sanity-check data. Take one half-byte at once with
--			   * check = (val >> (16 + 4*i)) & 0xf. It contains the
--			   * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
--			   * 2 and 3 are LENi. Given a list of invalid values,
--			   * we do mask |= 1 << invalid_value, so that
--			   * (mask >> check) & 1 is a correct test for invalid
--			   * values.
--			   *
--			   * R/Wi contains the type of the breakpoint /
--			   * watchpoint, LENi contains the length of the watched
--			   * data in the watchpoint case.
--			   *
--			   * The invalid values are:
--			   * - LENi == 0x10 (undefined), so mask |= 0x0f00.
--			   * - R/Wi == 0x10 (break on I/O reads or writes), so
--			   *   mask |= 0x4444.
--			   * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
--			   *   0x1110.
--			   *
--			   * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
--			   *
--			   * See the Intel Manual "System Programming Guide",
--			   * 15.2.4
--			   *
--			   * Note that LENi == 0x10 is defined on x86_64 in long
--			   * mode (i.e. even for 32-bit userspace software, but
--			   * 64-bit kernel), so the x86_64 mask value is 0x5454.
--			   * See the AMD manual no. 24593 (AMD64 System
--			   * Programming)*/
--
--			  if(addr == (long) &dummy->u_debugreg[7]) {
--				  data &= ~DR_CONTROL_RESERVED;
--				  for(i=0; i<4; i++)
--					  if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
--						  goto out_tsk;
--				  if (data)
--					  set_tsk_thread_flag(child, TIF_DEBUG);
--				  else
--					  clear_tsk_thread_flag(child, TIF_DEBUG);
--			  }
--			  addr -= (long) &dummy->u_debugreg;
--			  addr = addr >> 2;
--			  child->thread.debugreg[addr] = data;
--			  ret = 0;
--		  }
--		  break;
--
--	case PTRACE_SYSEMU: /* continue and stop at next syscall, which will not be executed */
--	case PTRACE_SYSCALL:	/* continue and stop at next (return from) syscall */
--	case PTRACE_CONT:	/* restart after signal. */
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		if (request == PTRACE_SYSEMU) {
--			set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
--			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		} else if (request == PTRACE_SYSCALL) {
--			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
--		} else {
--			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
--			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		}
--		child->exit_code = data;
--		/* make sure the single step bit is not set. */
--		clear_singlestep(child);
--		wake_up_process(child);
--		ret = 0;
--		break;
--
--/*
-- * make the child exit.  Best I can do is send it a sigkill. 
-- * perhaps it should be put in the status that it wants to 
-- * exit.
-- */
--	case PTRACE_KILL:
--		ret = 0;
--		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
--			break;
--		child->exit_code = SIGKILL;
--		/* make sure the single step bit is not set. */
--		clear_singlestep(child);
--		wake_up_process(child);
--		break;
--
--	case PTRACE_SYSEMU_SINGLESTEP: /* Same as SYSEMU, but singlestep if not syscall */
--	case PTRACE_SINGLESTEP:	/* set the trap flag. */
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--
--		if (request == PTRACE_SYSEMU_SINGLESTEP)
--			set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
--		else
--			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
--
--		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
--		set_singlestep(child);
--		child->exit_code = data;
--		/* give it a chance to run. */
--		wake_up_process(child);
--		ret = 0;
--		break;
--
--	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
--	  	if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) {
--			ret = -EIO;
--			break;
--		}
--		for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
--			__put_user(getreg(child, i), datap);
--			datap++;
--		}
--		ret = 0;
--		break;
--	}
--
--	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
--		unsigned long tmp;
--	  	if (!access_ok(VERIFY_READ, datap, FRAME_SIZE*sizeof(long))) {
--			ret = -EIO;
--			break;
--		}
--		for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
--			__get_user(tmp, datap);
--			putreg(child, i, tmp);
--			datap++;
--		}
--		ret = 0;
--		break;
--	}
--
--	case PTRACE_GETFPREGS: { /* Get the child FPU state. */
--		if (!access_ok(VERIFY_WRITE, datap,
--			       sizeof(struct user_i387_struct))) {
--			ret = -EIO;
--			break;
--		}
--		ret = 0;
--		if (!tsk_used_math(child))
--			init_fpu(child);
--		get_fpregs((struct user_i387_struct __user *)data, child);
--		break;
--	}
--
--	case PTRACE_SETFPREGS: { /* Set the child FPU state. */
--		if (!access_ok(VERIFY_READ, datap,
--			       sizeof(struct user_i387_struct))) {
--			ret = -EIO;
--			break;
--		}
--		set_stopped_child_used_math(child);
--		set_fpregs(child, (struct user_i387_struct __user *)data);
--		ret = 0;
--		break;
--	}
--
--	case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
--		if (!access_ok(VERIFY_WRITE, datap,
--			       sizeof(struct user_fxsr_struct))) {
--			ret = -EIO;
--			break;
--		}
--		if (!tsk_used_math(child))
--			init_fpu(child);
--		ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
--		break;
--	}
--
--	case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
--		if (!access_ok(VERIFY_READ, datap,
--			       sizeof(struct user_fxsr_struct))) {
--			ret = -EIO;
--			break;
--		}
--		set_stopped_child_used_math(child);
--		ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
--		break;
--	}
--
--	case PTRACE_GET_THREAD_AREA:
--		ret = ptrace_get_thread_area(child, addr,
--					(struct user_desc __user *) data);
--		break;
--
--	case PTRACE_SET_THREAD_AREA:
--		ret = ptrace_set_thread_area(child, addr,
--					(struct user_desc __user *) data);
--		break;
--
--	default:
--		ret = ptrace_request(child, request, addr, data);
--		break;
--	}
-- out_tsk:
--	return ret;
--}
--
--void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
--{
--	struct siginfo info;
--
--	tsk->thread.trap_no = 1;
--	tsk->thread.error_code = error_code;
--
--	memset(&info, 0, sizeof(info));
--	info.si_signo = SIGTRAP;
--	info.si_code = TRAP_BRKPT;
--
--	/* User-mode eip? */
--	info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
--
--	/* Send us the fake SIGTRAP */
--	force_sig_info(SIGTRAP, &info, tsk);
--}
--
--/* notification of system call entry/exit
-- * - triggered by current->work.syscall_trace
-- */
--__attribute__((regparm(3)))
--int do_syscall_trace(struct pt_regs *regs, int entryexit)
--{
--	int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
--	/*
--	 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
--	 * interception
--	 */
--	int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
--	int ret = 0;
--
--	/* do the secure computing check first */
--	if (!entryexit)
--		secure_computing(regs->orig_eax);
--
--	if (unlikely(current->audit_context)) {
--		if (entryexit)
--			audit_syscall_exit(AUDITSC_RESULT(regs->eax),
--						regs->eax);
--		/* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
--		 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
--		 * not used, entry.S will call us only on syscall exit, not
--		 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
--		 * calling send_sigtrap() on syscall entry.
--		 *
--		 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
--		 * is_singlestep is false, despite his name, so we will still do
--		 * the correct thing.
--		 */
--		else if (is_singlestep)
--			goto out;
--	}
--
--	if (!(current->ptrace & PT_PTRACED))
--		goto out;
--
--	/* If a process stops on the 1st tracepoint with SYSCALL_TRACE
--	 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
--	 * here. We have to check this and return */
--	if (is_sysemu && entryexit)
--		return 0;
--
--	/* Fake a debug trap */
--	if (is_singlestep)
--		send_sigtrap(current, regs, 0);
--
-- 	if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
--		goto out;
--
--	/* the 0x80 provides a way for the tracing parent to distinguish
--	   between a syscall stop and SIGTRAP delivery */
--	/* Note that the debugger could change the result of test_thread_flag!*/
--	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
--
--	/*
--	 * this isn't the same as continuing with a signal, but it will do
--	 * for normal use.  strace only continues with a signal if the
--	 * stopping signal is not SIGTRAP.  -brl
--	 */
--	if (current->exit_code) {
--		send_sig(current->exit_code, current, 1);
--		current->exit_code = 0;
--	}
--	ret = is_sysemu;
--out:
--	if (unlikely(current->audit_context) && !entryexit)
--		audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax,
--				    regs->ebx, regs->ecx, regs->edx, regs->esi);
--	if (ret == 0)
--		return 0;
--
--	regs->orig_eax = -1; /* force skip of syscall restarting */
--	if (unlikely(current->audit_context))
--		audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
--	return 1;
--}
-diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
-deleted file mode 100644
-index 607085f..0000000
---- a/arch/x86/kernel/ptrace_64.c
-+++ /dev/null
-@@ -1,621 +0,0 @@
--/* By Ross Biro 1/23/92 */
--/*
-- * Pentium III FXSR, SSE support
-- *	Gareth Hughes <gareth at valinux.com>, May 2000
-- * 
-- * x86-64 port 2000-2002 Andi Kleen
-- */
--
--#include <linux/kernel.h>
--#include <linux/sched.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/errno.h>
--#include <linux/ptrace.h>
--#include <linux/user.h>
--#include <linux/security.h>
--#include <linux/audit.h>
--#include <linux/seccomp.h>
--#include <linux/signal.h>
--
--#include <asm/uaccess.h>
--#include <asm/pgtable.h>
--#include <asm/system.h>
--#include <asm/processor.h>
--#include <asm/i387.h>
--#include <asm/debugreg.h>
--#include <asm/ldt.h>
--#include <asm/desc.h>
--#include <asm/proto.h>
--#include <asm/ia32.h>
--
--/*
-- * does not yet catch signals sent when the child dies.
-- * in exit.c or in signal.c.
-- */
--
--/*
-- * Determines which flags the user has access to [1 = access, 0 = no access].
-- * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
-- * Also masks reserved bits (63-22, 15, 5, 3, 1).
-- */
--#define FLAG_MASK 0x54dd5UL
--
--/* set's the trap flag. */
--#define TRAP_FLAG 0x100UL
--
--/*
-- * eflags and offset of eflags on child stack..
-- */
--#define EFLAGS offsetof(struct pt_regs, eflags)
--#define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
--
--/*
-- * this routine will get a word off of the processes privileged stack. 
-- * the offset is how far from the base addr as stored in the TSS.  
-- * this routine assumes that all the privileged stacks are in our
-- * data space.
-- */   
--static inline unsigned long get_stack_long(struct task_struct *task, int offset)
--{
--	unsigned char *stack;
--
--	stack = (unsigned char *)task->thread.rsp0;
--	stack += offset;
--	return (*((unsigned long *)stack));
--}
--
--/*
-- * this routine will put a word on the processes privileged stack. 
-- * the offset is how far from the base addr as stored in the TSS.  
-- * this routine assumes that all the privileged stacks are in our
-- * data space.
-- */
--static inline long put_stack_long(struct task_struct *task, int offset,
--	unsigned long data)
--{
--	unsigned char * stack;
--
--	stack = (unsigned char *) task->thread.rsp0;
--	stack += offset;
--	*(unsigned long *) stack = data;
--	return 0;
--}
--
--#define LDT_SEGMENT 4
--
--unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
--{
--	unsigned long addr, seg;
--
--	addr = regs->rip;
--	seg = regs->cs & 0xffff;
--
--	/*
--	 * We'll assume that the code segments in the GDT
--	 * are all zero-based. That is largely true: the
--	 * TLS segments are used for data, and the PNPBIOS
--	 * and APM bios ones we just ignore here.
--	 */
--	if (seg & LDT_SEGMENT) {
--		u32 *desc;
--		unsigned long base;
--
--		seg &= ~7UL;
--
--		mutex_lock(&child->mm->context.lock);
--		if (unlikely((seg >> 3) >= child->mm->context.size))
--			addr = -1L; /* bogus selector, access would fault */
--		else {
--			desc = child->mm->context.ldt + seg;
--			base = ((desc[0] >> 16) |
--				((desc[1] & 0xff) << 16) |
--				(desc[1] & 0xff000000));
--
--			/* 16-bit code segment? */
--			if (!((desc[1] >> 22) & 1))
--				addr &= 0xffff;
--			addr += base;
--		}
--		mutex_unlock(&child->mm->context.lock);
--	}
--
--	return addr;
--}
--
--static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
--{
--	int i, copied;
--	unsigned char opcode[15];
--	unsigned long addr = convert_rip_to_linear(child, regs);
--
--	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
--	for (i = 0; i < copied; i++) {
--		switch (opcode[i]) {
--		/* popf and iret */
--		case 0x9d: case 0xcf:
--			return 1;
--
--			/* CHECKME: 64 65 */
--
--		/* opcode and address size prefixes */
--		case 0x66: case 0x67:
--			continue;
--		/* irrelevant prefixes (segment overrides and repeats) */
--		case 0x26: case 0x2e:
--		case 0x36: case 0x3e:
--		case 0x64: case 0x65:
--		case 0xf2: case 0xf3:
--			continue;
--
--		case 0x40 ... 0x4f:
--			if (regs->cs != __USER_CS)
--				/* 32-bit mode: register increment */
--				return 0;
--			/* 64-bit mode: REX prefix */
--			continue;
--
--			/* CHECKME: f2, f3 */
--
--		/*
--		 * pushf: NOTE! We should probably not let
--		 * the user see the TF bit being set. But
--		 * it's more pain than it's worth to avoid
--		 * it, and a debugger could emulate this
--		 * all in user space if it _really_ cares.
--		 */
--		case 0x9c:
--		default:
--			return 0;
--		}
--	}
--	return 0;
--}
--
--static void set_singlestep(struct task_struct *child)
--{
--	struct pt_regs *regs = task_pt_regs(child);
--
--	/*
--	 * Always set TIF_SINGLESTEP - this guarantees that
--	 * we single-step system calls etc..  This will also
--	 * cause us to set TF when returning to user mode.
--	 */
--	set_tsk_thread_flag(child, TIF_SINGLESTEP);
--
--	/*
--	 * If TF was already set, don't do anything else
--	 */
--	if (regs->eflags & TRAP_FLAG)
--		return;
--
--	/* Set TF on the kernel stack.. */
--	regs->eflags |= TRAP_FLAG;
--
--	/*
--	 * ..but if TF is changed by the instruction we will trace,
--	 * don't mark it as being "us" that set it, so that we
--	 * won't clear it by hand later.
--	 */
--	if (is_setting_trap_flag(child, regs))
--		return;
--
--	child->ptrace |= PT_DTRACE;
--}
--
--static void clear_singlestep(struct task_struct *child)
--{
--	/* Always clear TIF_SINGLESTEP... */
--	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
--
--	/* But touch TF only if it was set by us.. */
--	if (child->ptrace & PT_DTRACE) {
--		struct pt_regs *regs = task_pt_regs(child);
--		regs->eflags &= ~TRAP_FLAG;
--		child->ptrace &= ~PT_DTRACE;
--	}
--}
--
--/*
-- * Called by kernel/ptrace.c when detaching..
-- *
-- * Make sure the single step bit is not set.
-- */
--void ptrace_disable(struct task_struct *child)
--{ 
--	clear_singlestep(child);
--}
--
--static int putreg(struct task_struct *child,
--	unsigned long regno, unsigned long value)
--{
--	unsigned long tmp; 
--	
--	switch (regno) {
--		case offsetof(struct user_regs_struct,fs):
--			if (value && (value & 3) != 3)
--				return -EIO;
--			child->thread.fsindex = value & 0xffff; 
--			return 0;
--		case offsetof(struct user_regs_struct,gs):
--			if (value && (value & 3) != 3)
--				return -EIO;
--			child->thread.gsindex = value & 0xffff;
--			return 0;
--		case offsetof(struct user_regs_struct,ds):
--			if (value && (value & 3) != 3)
--				return -EIO;
--			child->thread.ds = value & 0xffff;
--			return 0;
--		case offsetof(struct user_regs_struct,es): 
--			if (value && (value & 3) != 3)
--				return -EIO;
--			child->thread.es = value & 0xffff;
--			return 0;
--		case offsetof(struct user_regs_struct,ss):
--			if ((value & 3) != 3)
--				return -EIO;
--			value &= 0xffff;
--			return 0;
--		case offsetof(struct user_regs_struct,fs_base):
--			if (value >= TASK_SIZE_OF(child))
--				return -EIO;
--			child->thread.fs = value;
--			return 0;
--		case offsetof(struct user_regs_struct,gs_base):
--			if (value >= TASK_SIZE_OF(child))
--				return -EIO;
--			child->thread.gs = value;
--			return 0;
--		case offsetof(struct user_regs_struct, eflags):
--			value &= FLAG_MASK;
--			tmp = get_stack_long(child, EFL_OFFSET); 
--			tmp &= ~FLAG_MASK; 
--			value |= tmp;
--			break;
--		case offsetof(struct user_regs_struct,cs): 
--			if ((value & 3) != 3)
--				return -EIO;
--			value &= 0xffff;
--			break;
--	}
--	put_stack_long(child, regno - sizeof(struct pt_regs), value);
--	return 0;
--}
--
--static unsigned long getreg(struct task_struct *child, unsigned long regno)
--{
--	unsigned long val;
--	switch (regno) {
--		case offsetof(struct user_regs_struct, fs):
--			return child->thread.fsindex;
--		case offsetof(struct user_regs_struct, gs):
--			return child->thread.gsindex;
--		case offsetof(struct user_regs_struct, ds):
--			return child->thread.ds;
--		case offsetof(struct user_regs_struct, es):
--			return child->thread.es; 
--		case offsetof(struct user_regs_struct, fs_base):
--			return child->thread.fs;
--		case offsetof(struct user_regs_struct, gs_base):
--			return child->thread.gs;
--		default:
--			regno = regno - sizeof(struct pt_regs);
--			val = get_stack_long(child, regno);
--			if (test_tsk_thread_flag(child, TIF_IA32))
--				val &= 0xffffffff;
--			return val;
--	}
--
--}
--
--long arch_ptrace(struct task_struct *child, long request, long addr, long data)
--{
--	long i, ret;
--	unsigned ui;
--
--	switch (request) {
--	/* when I and D space are separate, these will need to be fixed. */
--	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
--	case PTRACE_PEEKDATA:
--		ret = generic_ptrace_peekdata(child, addr, data);
--		break;
--
--	/* read the word at location addr in the USER area. */
--	case PTRACE_PEEKUSR: {
--		unsigned long tmp;
--
--		ret = -EIO;
--		if ((addr & 7) ||
--		    addr > sizeof(struct user) - 7)
--			break;
--
--		switch (addr) { 
--		case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
--			tmp = getreg(child, addr);
--			break;
--		case offsetof(struct user, u_debugreg[0]):
--			tmp = child->thread.debugreg0;
--			break;
--		case offsetof(struct user, u_debugreg[1]):
--			tmp = child->thread.debugreg1;
--			break;
--		case offsetof(struct user, u_debugreg[2]):
--			tmp = child->thread.debugreg2;
--			break;
--		case offsetof(struct user, u_debugreg[3]):
--			tmp = child->thread.debugreg3;
--			break;
--		case offsetof(struct user, u_debugreg[6]):
--			tmp = child->thread.debugreg6;
--			break;
--		case offsetof(struct user, u_debugreg[7]):
--			tmp = child->thread.debugreg7;
--			break;
--		default:
--			tmp = 0;
--			break;
--		}
--		ret = put_user(tmp,(unsigned long __user *) data);
--		break;
--	}
--
--	/* when I and D space are separate, this will have to be fixed. */
--	case PTRACE_POKETEXT: /* write the word at location addr. */
--	case PTRACE_POKEDATA:
--		ret = generic_ptrace_pokedata(child, addr, data);
--		break;
--
--	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
--	{
--		int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
--		ret = -EIO;
--		if ((addr & 7) ||
--		    addr > sizeof(struct user) - 7)
--			break;
--
--		switch (addr) { 
--		case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
--			ret = putreg(child, addr, data);
--			break;
--		/* Disallows to set a breakpoint into the vsyscall */
--		case offsetof(struct user, u_debugreg[0]):
--			if (data >= TASK_SIZE_OF(child) - dsize) break;
--			child->thread.debugreg0 = data;
--			ret = 0;
--			break;
--		case offsetof(struct user, u_debugreg[1]):
--			if (data >= TASK_SIZE_OF(child) - dsize) break;
--			child->thread.debugreg1 = data;
--			ret = 0;
--			break;
--		case offsetof(struct user, u_debugreg[2]):
--			if (data >= TASK_SIZE_OF(child) - dsize) break;
--			child->thread.debugreg2 = data;
--			ret = 0;
--			break;
--		case offsetof(struct user, u_debugreg[3]):
--			if (data >= TASK_SIZE_OF(child) - dsize) break;
--			child->thread.debugreg3 = data;
--			ret = 0;
--			break;
--		case offsetof(struct user, u_debugreg[6]):
--				  if (data >> 32)
--				break; 
--			child->thread.debugreg6 = data;
--			ret = 0;
--			break;
--		case offsetof(struct user, u_debugreg[7]):
--			/* See arch/i386/kernel/ptrace.c for an explanation of
--			 * this awkward check.*/
--			data &= ~DR_CONTROL_RESERVED;
--			for(i=0; i<4; i++)
--				if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
--					break;
--			if (i == 4) {
--			  child->thread.debugreg7 = data;
--			  if (data)
--			  	set_tsk_thread_flag(child, TIF_DEBUG);
--			  else
--			  	clear_tsk_thread_flag(child, TIF_DEBUG);
--			  ret = 0;
--		  	}
--		  break;
--		}
--		break;
--	}
--	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
--	case PTRACE_CONT:    /* restart after signal. */
--
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		if (request == PTRACE_SYSCALL)
--			set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
--		else
--			clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
--		clear_tsk_thread_flag(child, TIF_SINGLESTEP);
--		child->exit_code = data;
--		/* make sure the single step bit is not set. */
--		clear_singlestep(child);
--		wake_up_process(child);
--		ret = 0;
--		break;
--
--#ifdef CONFIG_IA32_EMULATION
--		/* This makes only sense with 32bit programs. Allow a
--		   64bit debugger to fully examine them too. Better
--		   don't use it against 64bit processes, use
--		   PTRACE_ARCH_PRCTL instead. */
--	case PTRACE_SET_THREAD_AREA: {
--		struct user_desc __user *p;
--		int old; 
--		p = (struct user_desc __user *)data;
--		get_user(old,  &p->entry_number); 
--		put_user(addr, &p->entry_number);
--		ret = do_set_thread_area(&child->thread, p);
--		put_user(old,  &p->entry_number); 
--		break;
--	case PTRACE_GET_THREAD_AREA:
--		p = (struct user_desc __user *)data;
--		get_user(old,  &p->entry_number); 
--		put_user(addr, &p->entry_number);
--		ret = do_get_thread_area(&child->thread, p);
--		put_user(old,  &p->entry_number); 
--		break;
--	} 
--#endif
--		/* normal 64bit interface to access TLS data. 
--		   Works just like arch_prctl, except that the arguments
--		   are reversed. */
--	case PTRACE_ARCH_PRCTL: 
--		ret = do_arch_prctl(child, data, addr);
--		break;
--
--/*
-- * make the child exit.  Best I can do is send it a sigkill. 
-- * perhaps it should be put in the status that it wants to 
-- * exit.
-- */
--	case PTRACE_KILL:
--		ret = 0;
--		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
--			break;
--		clear_tsk_thread_flag(child, TIF_SINGLESTEP);
--		child->exit_code = SIGKILL;
--		/* make sure the single step bit is not set. */
--		clear_singlestep(child);
--		wake_up_process(child);
--		break;
--
--	case PTRACE_SINGLESTEP:    /* set the trap flag. */
--		ret = -EIO;
--		if (!valid_signal(data))
--			break;
--		clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
--		set_singlestep(child);
--		child->exit_code = data;
--		/* give it a chance to run. */
--		wake_up_process(child);
--		ret = 0;
--		break;
--
--	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
--	  	if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
--			       sizeof(struct user_regs_struct))) {
--			ret = -EIO;
--			break;
--		}
--		ret = 0;
--		for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
--			ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
--			data += sizeof(long);
--		}
--		break;
--	}
--
--	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
--		unsigned long tmp;
--	  	if (!access_ok(VERIFY_READ, (unsigned __user *)data,
--			       sizeof(struct user_regs_struct))) {
--			ret = -EIO;
--			break;
--		}
--		ret = 0;
--		for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
--			ret = __get_user(tmp, (unsigned long __user *) data);
--			if (ret)
--				break;
--			ret = putreg(child, ui, tmp);
--			if (ret)
--				break;
--			data += sizeof(long);
--		}
--		break;
--	}
--
--	case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
--		if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
--			       sizeof(struct user_i387_struct))) {
--			ret = -EIO;
--			break;
--		}
--		ret = get_fpregs((struct user_i387_struct __user *)data, child);
--		break;
--	}
--
--	case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
--		if (!access_ok(VERIFY_READ, (unsigned __user *)data,
--			       sizeof(struct user_i387_struct))) {
--			ret = -EIO;
--			break;
--		}
--		set_stopped_child_used_math(child);
--		ret = set_fpregs(child, (struct user_i387_struct __user *)data);
--		break;
--	}
--
--	default:
--		ret = ptrace_request(child, request, addr, data);
--		break;
--	}
--	return ret;
--}
--
--static void syscall_trace(struct pt_regs *regs)
--{
--
--#if 0
--	printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
--	       current->comm,
--	       regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
--	       current_thread_info()->flags, current->ptrace); 
--#endif
--
--	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
--				? 0x80 : 0));
--	/*
--	 * this isn't the same as continuing with a signal, but it will do
--	 * for normal use.  strace only continues with a signal if the
--	 * stopping signal is not SIGTRAP.  -brl
--	 */
--	if (current->exit_code) {
--		send_sig(current->exit_code, current, 1);
--		current->exit_code = 0;
--	}
--}
--
--asmlinkage void syscall_trace_enter(struct pt_regs *regs)
--{
--	/* do the secure computing check first */
--	secure_computing(regs->orig_rax);
--
--	if (test_thread_flag(TIF_SYSCALL_TRACE)
--	    && (current->ptrace & PT_PTRACED))
--		syscall_trace(regs);
++module_init(rdc321x_wdt_init);
++module_exit(rdc321x_wdt_exit);
++
++MODULE_AUTHOR("Florian Fainelli <florian at openwrt.org>");
++MODULE_DESCRIPTION("RDC321x watchdog driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+diff --git a/arch/x86/mach-visws/mpparse.c b/arch/x86/mach-visws/mpparse.c
+index f3c74fa..2a8456a 100644
+--- a/arch/x86/mach-visws/mpparse.c
++++ b/arch/x86/mach-visws/mpparse.c
+@@ -36,19 +36,19 @@ unsigned int __initdata maxcpus = NR_CPUS;
+ 
+ static void __init MP_processor_info (struct mpc_config_processor *m)
+ {
+- 	int ver, logical_apicid;
++	int ver, logical_apicid;
+ 	physid_mask_t apic_cpus;
+- 	
++
+ 	if (!(m->mpc_cpuflag & CPU_ENABLED))
+ 		return;
+ 
+ 	logical_apicid = m->mpc_apicid;
+-	printk(KERN_INFO "%sCPU #%d %ld:%ld APIC version %d\n",
+-		m->mpc_cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "",
+-		m->mpc_apicid,
+-		(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
+-		(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
+-		m->mpc_apicver);
++	printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n",
++	       m->mpc_cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "",
++	       m->mpc_apicid,
++	       (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
++	       (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
++	       m->mpc_apicver);
+ 
+ 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR)
+ 		boot_cpu_physical_apicid = m->mpc_apicid;
+diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
+index 3bef977..5ae5466 100644
+--- a/arch/x86/mach-voyager/setup.c
++++ b/arch/x86/mach-voyager/setup.c
+@@ -37,14 +37,14 @@ void __init pre_setup_arch_hook(void)
+ {
+ 	/* Voyagers run their CPUs from independent clocks, so disable
+ 	 * the TSC code because we can't sync them */
+-	tsc_disable = 1;
++	setup_clear_cpu_cap(X86_FEATURE_TSC);
+ }
+ 
+ void __init trap_init_hook(void)
+ {
+ }
+ 
+-static struct irqaction irq0  = {
++static struct irqaction irq0 = {
+ 	.handler = timer_interrupt,
+ 	.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
+ 	.mask = CPU_MASK_NONE,
+@@ -59,44 +59,47 @@ void __init time_init_hook(void)
+ 
+ /* Hook for machine specific memory setup. */
+ 
+-char * __init machine_specific_memory_setup(void)
++char *__init machine_specific_memory_setup(void)
+ {
+ 	char *who;
+ 
+ 	who = "NOT VOYAGER";
+ 
+-	if(voyager_level == 5) {
++	if (voyager_level == 5) {
+ 		__u32 addr, length;
+ 		int i;
+ 
+ 		who = "Voyager-SUS";
+ 
+ 		e820.nr_map = 0;
+-		for(i=0; voyager_memory_detect(i, &addr, &length); i++) {
++		for (i = 0; voyager_memory_detect(i, &addr, &length); i++) {
+ 			add_memory_region(addr, length, E820_RAM);
+ 		}
+ 		return who;
+-	} else if(voyager_level == 4) {
++	} else if (voyager_level == 4) {
+ 		__u32 tom;
+-		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8;
++		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
+ 		/* select the DINO config space */
+ 		outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT);
+ 		/* Read DINO top of memory register */
+ 		tom = ((inb(catbase + 0x4) & 0xf0) << 16)
+-			+ ((inb(catbase + 0x5) & 0x7f) << 24);
++		    + ((inb(catbase + 0x5) & 0x7f) << 24);
+ 
+-		if(inb(catbase) != VOYAGER_DINO) {
+-			printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
+-			tom = (boot_params.screen_info.ext_mem_k)<<10;
++		if (inb(catbase) != VOYAGER_DINO) {
++			printk(KERN_ERR
++			       "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
++			tom = (boot_params.screen_info.ext_mem_k) << 10;
+ 		}
+ 		who = "Voyager-TOM";
+ 		add_memory_region(0, 0x9f000, E820_RAM);
+ 		/* map from 1M to top of memory */
+-		add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM);
++		add_memory_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024,
++				  E820_RAM);
+ 		/* FIXME: Should check the ASICs to see if I need to
+ 		 * take out the 8M window.  Just do it at the moment
+ 		 * */
+-		add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED);
++		add_memory_region(8 * 1024 * 1024, 8 * 1024 * 1024,
++				  E820_RESERVED);
+ 		return who;
+ 	}
+ 
+@@ -114,8 +117,7 @@ char * __init machine_specific_memory_setup(void)
+ 		unsigned long mem_size;
+ 
+ 		/* compare results from other methods and take the greater */
+-		if (boot_params.alt_mem_k
+-		    < boot_params.screen_info.ext_mem_k) {
++		if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) {
+ 			mem_size = boot_params.screen_info.ext_mem_k;
+ 			who = "BIOS-88";
+ 		} else {
+@@ -126,6 +128,6 @@ char * __init machine_specific_memory_setup(void)
+ 		e820.nr_map = 0;
+ 		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+ 		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
+-  	}
++	}
+ 	return who;
+ }
+diff --git a/arch/x86/mach-voyager/voyager_basic.c b/arch/x86/mach-voyager/voyager_basic.c
+index 9b77b39..6a949e4 100644
+--- a/arch/x86/mach-voyager/voyager_basic.c
++++ b/arch/x86/mach-voyager/voyager_basic.c
+@@ -35,7 +35,7 @@
+ /*
+  * Power off function, if any
+  */
+-void (*pm_power_off)(void);
++void (*pm_power_off) (void);
+ EXPORT_SYMBOL(pm_power_off);
+ 
+ int voyager_level = 0;
+@@ -43,39 +43,38 @@ int voyager_level = 0;
+ struct voyager_SUS *voyager_SUS = NULL;
+ 
+ #ifdef CONFIG_SMP
+-static void
+-voyager_dump(int dummy1, struct tty_struct *dummy3)
++static void voyager_dump(int dummy1, struct tty_struct *dummy3)
+ {
+ 	/* get here via a sysrq */
+ 	voyager_smp_dump();
+ }
+ 
+ static struct sysrq_key_op sysrq_voyager_dump_op = {
+-	.handler	= voyager_dump,
+-	.help_msg	= "Voyager",
+-	.action_msg	= "Dump Voyager Status",
++	.handler = voyager_dump,
++	.help_msg = "Voyager",
++	.action_msg = "Dump Voyager Status",
+ };
+ #endif
+ 
+-void
+-voyager_detect(struct voyager_bios_info *bios)
++void voyager_detect(struct voyager_bios_info *bios)
+ {
+-	if(bios->len != 0xff) {
+-		int class = (bios->class_1 << 8) 
+-			| (bios->class_2 & 0xff);
++	if (bios->len != 0xff) {
++		int class = (bios->class_1 << 8)
++		    | (bios->class_2 & 0xff);
+ 
+ 		printk("Voyager System detected.\n"
+ 		       "        Class %x, Revision %d.%d\n",
+ 		       class, bios->major, bios->minor);
+-		if(class == VOYAGER_LEVEL4) 
++		if (class == VOYAGER_LEVEL4)
+ 			voyager_level = 4;
+-		else if(class < VOYAGER_LEVEL5_AND_ABOVE)
++		else if (class < VOYAGER_LEVEL5_AND_ABOVE)
+ 			voyager_level = 3;
+ 		else
+ 			voyager_level = 5;
+ 		printk("        Architecture Level %d\n", voyager_level);
+-		if(voyager_level < 4)
+-			printk("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n");
++		if (voyager_level < 4)
++			printk
++			    ("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n");
+ 		/* install the power off handler */
+ 		pm_power_off = voyager_power_off;
+ #ifdef CONFIG_SMP
+@@ -86,15 +85,13 @@ voyager_detect(struct voyager_bios_info *bios)
+ 	}
+ }
+ 
+-void
+-voyager_system_interrupt(int cpl, void *dev_id)
++void voyager_system_interrupt(int cpl, void *dev_id)
+ {
+ 	printk("Voyager: detected system interrupt\n");
+ }
+ 
+ /* Routine to read information from the extended CMOS area */
+-__u8
+-voyager_extended_cmos_read(__u16 addr)
++__u8 voyager_extended_cmos_read(__u16 addr)
+ {
+ 	outb(addr & 0xff, 0x74);
+ 	outb((addr >> 8) & 0xff, 0x75);
+@@ -108,12 +105,11 @@ voyager_extended_cmos_read(__u16 addr)
+ 
+ typedef struct ClickMap {
+ 	struct Entry {
+-		__u32	Address;
+-		__u32	Length;
++		__u32 Address;
++		__u32 Length;
+ 	} Entry[CLICK_ENTRIES];
+ } ClickMap_t;
+ 
 -
--	if (unlikely(current->audit_context)) {
--		if (test_thread_flag(TIF_IA32)) {
--			audit_syscall_entry(AUDIT_ARCH_I386,
--					    regs->orig_rax,
--					    regs->rbx, regs->rcx,
--					    regs->rdx, regs->rsi);
--		} else {
--			audit_syscall_entry(AUDIT_ARCH_X86_64,
--					    regs->orig_rax,
--					    regs->rdi, regs->rsi,
--					    regs->rdx, regs->r10);
--		}
--	}
--}
+ /* This routine is pretty much an awful hack to read the bios clickmap by
+  * mapping it into page 0.  There are usually three regions in the map:
+  * 	Base Memory
+@@ -122,8 +118,7 @@ typedef struct ClickMap {
+  *
+  * Returns are 0 for failure and 1 for success on extracting region.
+  */
+-int __init
+-voyager_memory_detect(int region, __u32 *start, __u32 *length)
++int __init voyager_memory_detect(int region, __u32 * start, __u32 * length)
+ {
+ 	int i;
+ 	int retval = 0;
+@@ -132,13 +127,14 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
+ 	unsigned long map_addr;
+ 	unsigned long old;
+ 
+-	if(region >= CLICK_ENTRIES) {
++	if (region >= CLICK_ENTRIES) {
+ 		printk("Voyager: Illegal ClickMap region %d\n", region);
+ 		return 0;
+ 	}
+ 
+-	for(i = 0; i < sizeof(cmos); i++)
+-		cmos[i] = voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i);
++	for (i = 0; i < sizeof(cmos); i++)
++		cmos[i] =
++		    voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i);
+ 
+ 	map_addr = *(unsigned long *)cmos;
+ 
+@@ -147,10 +143,10 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
+ 	pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
+ 	local_flush_tlb();
+ 	/* now clear everything out but page 0 */
+-	map = (ClickMap_t *)(map_addr & (~PAGE_MASK));
++	map = (ClickMap_t *) (map_addr & (~PAGE_MASK));
+ 
+ 	/* zero length is the end of the clickmap */
+-	if(map->Entry[region].Length != 0) {
++	if (map->Entry[region].Length != 0) {
+ 		*length = map->Entry[region].Length * CLICK_SIZE;
+ 		*start = map->Entry[region].Address;
+ 		retval = 1;
+@@ -165,10 +161,9 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
+ /* voyager specific handling code for timer interrupts.  Used to hand
+  * off the timer tick to the SMP code, since the VIC doesn't have an
+  * internal timer (The QIC does, but that's another story). */
+-void
+-voyager_timer_interrupt(void)
++void voyager_timer_interrupt(void)
+ {
+-	if((jiffies & 0x3ff) == 0) {
++	if ((jiffies & 0x3ff) == 0) {
+ 
+ 		/* There seems to be something flaky in either
+ 		 * hardware or software that is resetting the timer 0
+@@ -186,18 +181,20 @@ voyager_timer_interrupt(void)
+ 		__u16 val;
+ 
+ 		spin_lock(&i8253_lock);
+-		
++
+ 		outb_p(0x00, 0x43);
+ 		val = inb_p(0x40);
+ 		val |= inb(0x40) << 8;
+ 		spin_unlock(&i8253_lock);
+ 
+-		if(val > LATCH) {
+-			printk("\nVOYAGER: countdown timer value too high (%d), resetting\n\n", val);
++		if (val > LATCH) {
++			printk
++			    ("\nVOYAGER: countdown timer value too high (%d), resetting\n\n",
++			     val);
+ 			spin_lock(&i8253_lock);
+-			outb(0x34,0x43);
+-			outb_p(LATCH & 0xff , 0x40);	/* LSB */
+-			outb(LATCH >> 8 , 0x40);	/* MSB */
++			outb(0x34, 0x43);
++			outb_p(LATCH & 0xff, 0x40);	/* LSB */
++			outb(LATCH >> 8, 0x40);	/* MSB */
+ 			spin_unlock(&i8253_lock);
+ 		}
+ 	}
+@@ -206,14 +203,13 @@ voyager_timer_interrupt(void)
+ #endif
+ }
+ 
+-void
+-voyager_power_off(void)
++void voyager_power_off(void)
+ {
+ 	printk("VOYAGER Power Off\n");
+ 
+-	if(voyager_level == 5) {
++	if (voyager_level == 5) {
+ 		voyager_cat_power_off();
+-	} else if(voyager_level == 4) {
++	} else if (voyager_level == 4) {
+ 		/* This doesn't apparently work on most L4 machines,
+ 		 * but the specs say to do this to get automatic power
+ 		 * off.  Unfortunately, if it doesn't power off the
+@@ -222,10 +218,8 @@ voyager_power_off(void)
+ #if 0
+ 		int port;
+ 
+-	  
+ 		/* enable the voyager Configuration Space */
+-		outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, 
+-		     VOYAGER_MC_SETUP);
++		outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, VOYAGER_MC_SETUP);
+ 		/* the port for the power off flag is an offset from the
+ 		   floating base */
+ 		port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21;
+@@ -235,62 +229,57 @@ voyager_power_off(void)
+ 	}
+ 	/* and wait for it to happen */
+ 	local_irq_disable();
+-	for(;;)
++	for (;;)
+ 		halt();
+ }
+ 
+ /* copied from process.c */
+-static inline void
+-kb_wait(void)
++static inline void kb_wait(void)
+ {
+ 	int i;
+ 
+-	for (i=0; i<0x10000; i++)
++	for (i = 0; i < 0x10000; i++)
+ 		if ((inb_p(0x64) & 0x02) == 0)
+ 			break;
+ }
+ 
+-void
+-machine_shutdown(void)
++void machine_shutdown(void)
+ {
+ 	/* Architecture specific shutdown needed before a kexec */
+ }
+ 
+-void
+-machine_restart(char *cmd)
++void machine_restart(char *cmd)
+ {
+ 	printk("Voyager Warm Restart\n");
+ 	kb_wait();
+ 
+-	if(voyager_level == 5) {
++	if (voyager_level == 5) {
+ 		/* write magic values to the RTC to inform system that
+ 		 * shutdown is beginning */
+ 		outb(0x8f, 0x70);
+-		outb(0x5 , 0x71);
+-		
++		outb(0x5, 0x71);
++
+ 		udelay(50);
+-		outb(0xfe,0x64);         /* pull reset low */
+-	} else if(voyager_level == 4) {
+-		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8;
++		outb(0xfe, 0x64);	/* pull reset low */
++	} else if (voyager_level == 4) {
++		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
+ 		__u8 basebd = inb(VOYAGER_MC_SETUP);
+-		
++
+ 		outb(basebd | 0x08, VOYAGER_MC_SETUP);
+ 		outb(0x02, catbase + 0x21);
+ 	}
+ 	local_irq_disable();
+-	for(;;)
++	for (;;)
+ 		halt();
+ }
+ 
+-void
+-machine_emergency_restart(void)
++void machine_emergency_restart(void)
+ {
+ 	/*for now, just hook this to a warm restart */
+ 	machine_restart(NULL);
+ }
+ 
+-void
+-mca_nmi_hook(void)
++void mca_nmi_hook(void)
+ {
+ 	__u8 dumpval __maybe_unused = inb(0xf823);
+ 	__u8 swnmi __maybe_unused = inb(0xf813);
+@@ -301,8 +290,8 @@ mca_nmi_hook(void)
+ 	/* clear swnmi */
+ 	outb(0xff, 0xf813);
+ 	/* tell SUS to ignore dump */
+-	if(voyager_level == 5 && voyager_SUS != NULL) {
+-		if(voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) {
++	if (voyager_level == 5 && voyager_SUS != NULL) {
++		if (voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) {
+ 			voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND;
+ 			voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS;
+ 			udelay(1000);
+@@ -310,15 +299,14 @@ mca_nmi_hook(void)
+ 			voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS;
+ 		}
+ 	}
+-	printk(KERN_ERR "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n", smp_processor_id());
++	printk(KERN_ERR
++	       "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n",
++	       smp_processor_id());
+ 	show_stack(NULL, NULL);
+ 	show_state();
+ }
+ 
 -
--asmlinkage void syscall_trace_leave(struct pt_regs *regs)
--{
--	if (unlikely(current->audit_context))
--		audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
 -
--	if ((test_thread_flag(TIF_SYSCALL_TRACE)
--	     || test_thread_flag(TIF_SINGLESTEP))
--	    && (current->ptrace & PT_PTRACED))
--		syscall_trace(regs);
--}
-diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
-index fab30e1..150ba29 100644
---- a/arch/x86/kernel/quirks.c
-+++ b/arch/x86/kernel/quirks.c
-@@ -162,6 +162,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
- 			 ich_force_enable_hpet);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
- 			 ich_force_enable_hpet);
-+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
-+			 ich_force_enable_hpet);
+-void
+-machine_halt(void)
++void machine_halt(void)
+ {
+ 	/* treat a halt like a power off */
+ 	machine_power_off();
+diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c
+index 2132ca6..17a7904 100644
+--- a/arch/x86/mach-voyager/voyager_cat.c
++++ b/arch/x86/mach-voyager/voyager_cat.c
+@@ -39,34 +39,32 @@
+ #define CAT_DATA	(sspb + 0xd)
  
+ /* the internal cat functions */
+-static void cat_pack(__u8 *msg, __u16 start_bit, __u8 *data, 
+-		     __u16 num_bits);
+-static void cat_unpack(__u8 *msg, __u16 start_bit, __u8 *data,
++static void cat_pack(__u8 * msg, __u16 start_bit, __u8 * data, __u16 num_bits);
++static void cat_unpack(__u8 * msg, __u16 start_bit, __u8 * data,
+ 		       __u16 num_bits);
+-static void cat_build_header(__u8 *header, const __u16 len, 
++static void cat_build_header(__u8 * header, const __u16 len,
+ 			     const __u16 smallest_reg_bits,
+ 			     const __u16 longest_reg_bits);
+-static int cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp,
++static int cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp,
+ 			__u8 reg, __u8 op);
+-static int cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp,
+-		       __u8 reg, __u8 *value);
+-static int cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes,
++static int cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp,
++		       __u8 reg, __u8 * value);
++static int cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes,
+ 			__u8 pad_bits);
+-static int cat_write(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
++static int cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
+ 		     __u8 value);
+-static int cat_read(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
+-		    __u8 *value);
+-static int cat_subread(voyager_module_t *modp, voyager_asic_t *asicp,
++static int cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
++		    __u8 * value);
++static int cat_subread(voyager_module_t * modp, voyager_asic_t * asicp,
+ 		       __u16 offset, __u16 len, void *buf);
+-static int cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp,
++static int cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
+ 			__u8 reg, __u8 value);
+-static int cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp);
+-static int cat_connect(voyager_module_t *modp, voyager_asic_t *asicp);
++static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp);
++static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp);
  
- static struct pci_dev *cached_dev;
-diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
-new file mode 100644
-index 0000000..5818dc2
---- /dev/null
-+++ b/arch/x86/kernel/reboot.c
-@@ -0,0 +1,451 @@
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/reboot.h>
-+#include <linux/init.h>
-+#include <linux/pm.h>
-+#include <linux/efi.h>
-+#include <acpi/reboot.h>
-+#include <asm/io.h>
-+#include <asm/apic.h>
-+#include <asm/desc.h>
-+#include <asm/hpet.h>
-+#include <asm/reboot_fixups.h>
-+#include <asm/reboot.h>
-+
-+#ifdef CONFIG_X86_32
-+# include <linux/dmi.h>
-+# include <linux/ctype.h>
-+# include <linux/mc146818rtc.h>
-+# include <asm/pgtable.h>
-+#else
-+# include <asm/iommu.h>
-+#endif
-+
-+/*
-+ * Power off function, if any
-+ */
-+void (*pm_power_off)(void);
-+EXPORT_SYMBOL(pm_power_off);
-+
-+static long no_idt[3];
-+static int reboot_mode;
-+enum reboot_type reboot_type = BOOT_KBD;
-+int reboot_force;
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
-+static int reboot_cpu = -1;
-+#endif
-+
-+/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old]
-+   warm   Don't set the cold reboot flag
-+   cold   Set the cold reboot flag
-+   bios   Reboot by jumping through the BIOS (only for X86_32)
-+   smp    Reboot by executing reset on BSP or other CPU (only for X86_32)
-+   triple Force a triple fault (init)
-+   kbd    Use the keyboard controller. cold reset (default)
-+   acpi   Use the RESET_REG in the FADT
-+   efi    Use efi reset_system runtime service
-+   force  Avoid anything that could hang.
-+ */
-+static int __init reboot_setup(char *str)
-+{
-+	for (;;) {
-+		switch (*str) {
-+		case 'w':
-+			reboot_mode = 0x1234;
-+			break;
-+
-+		case 'c':
-+			reboot_mode = 0;
-+			break;
-+
-+#ifdef CONFIG_X86_32
-+#ifdef CONFIG_SMP
-+		case 's':
-+			if (isdigit(*(str+1))) {
-+				reboot_cpu = (int) (*(str+1) - '0');
-+				if (isdigit(*(str+2)))
-+					reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
-+			}
-+				/* we will leave sorting out the final value
-+				   when we are ready to reboot, since we might not
-+				   have set up boot_cpu_id or smp_num_cpu */
-+			break;
-+#endif /* CONFIG_SMP */
-+
-+		case 'b':
-+#endif
-+		case 'a':
-+		case 'k':
-+		case 't':
-+		case 'e':
-+			reboot_type = *str;
-+			break;
-+
-+		case 'f':
-+			reboot_force = 1;
-+			break;
-+		}
-+
-+		str = strchr(str, ',');
-+		if (str)
-+			str++;
-+		else
-+			break;
-+	}
-+	return 1;
-+}
-+
-+__setup("reboot=", reboot_setup);
-+
-+
-+#ifdef CONFIG_X86_32
-+/*
-+ * Reboot options and system auto-detection code provided by
-+ * Dell Inc. so their systems "just work". :-)
-+ */
-+
-+/*
-+ * Some machines require the "reboot=b"  commandline option,
-+ * this quirk makes that automatic.
-+ */
-+static int __init set_bios_reboot(const struct dmi_system_id *d)
-+{
-+	if (reboot_type != BOOT_BIOS) {
-+		reboot_type = BOOT_BIOS;
-+		printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
-+	}
-+	return 0;
-+}
-+
-+static struct dmi_system_id __initdata reboot_dmi_table[] = {
-+	{	/* Handle problems with rebooting on Dell E520's */
-+		.callback = set_bios_reboot,
-+		.ident = "Dell E520",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
-+		},
-+	},
-+	{	/* Handle problems with rebooting on Dell 1300's */
-+		.callback = set_bios_reboot,
-+		.ident = "Dell PowerEdge 1300",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
-+		},
-+	},
-+	{	/* Handle problems with rebooting on Dell 300's */
-+		.callback = set_bios_reboot,
-+		.ident = "Dell PowerEdge 300",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
-+		},
-+	},
-+	{       /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
-+		.callback = set_bios_reboot,
-+		.ident = "Dell OptiPlex 745",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
-+			DMI_MATCH(DMI_BOARD_NAME, "0WF810"),
-+		},
-+	},
-+	{	/* Handle problems with rebooting on Dell 2400's */
-+		.callback = set_bios_reboot,
-+		.ident = "Dell PowerEdge 2400",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
-+		},
-+	},
-+	{	/* Handle problems with rebooting on HP laptops */
-+		.callback = set_bios_reboot,
-+		.ident = "HP Compaq Laptop",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
-+		},
-+	},
-+	{ }
-+};
-+
-+static int __init reboot_init(void)
-+{
-+	dmi_check_system(reboot_dmi_table);
-+	return 0;
-+}
-+core_initcall(reboot_init);
-+
-+/* The following code and data reboots the machine by switching to real
-+   mode and jumping to the BIOS reset entry point, as if the CPU has
-+   really been reset.  The previous version asked the keyboard
-+   controller to pulse the CPU reset line, which is more thorough, but
-+   doesn't work with at least one type of 486 motherboard.  It is easy
-+   to stop this code working; hence the copious comments. */
-+static unsigned long long
-+real_mode_gdt_entries [3] =
-+{
-+	0x0000000000000000ULL,	/* Null descriptor */
-+	0x00009a000000ffffULL,	/* 16-bit real-mode 64k code at 0x00000000 */
-+	0x000092000100ffffULL	/* 16-bit real-mode 64k data at 0x00000100 */
-+};
-+
-+static struct desc_ptr
-+real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
-+real_mode_idt = { 0x3ff, 0 };
-+
-+/* This is 16-bit protected mode code to disable paging and the cache,
-+   switch to real mode and jump to the BIOS reset code.
-+
-+   The instruction that switches to real mode by writing to CR0 must be
-+   followed immediately by a far jump instruction, which set CS to a
-+   valid value for real mode, and flushes the prefetch queue to avoid
-+   running instructions that have already been decoded in protected
-+   mode.
-+
-+   Clears all the flags except ET, especially PG (paging), PE
-+   (protected-mode enable) and TS (task switch for coprocessor state
-+   save).  Flushes the TLB after paging has been disabled.  Sets CD and
-+   NW, to disable the cache on a 486, and invalidates the cache.  This
-+   is more like the state of a 486 after reset.  I don't know if
-+   something else should be done for other chips.
-+
-+   More could be done here to set up the registers as if a CPU reset had
-+   occurred; hopefully real BIOSs don't assume much. */
-+static unsigned char real_mode_switch [] =
-+{
-+	0x66, 0x0f, 0x20, 0xc0,			/*    movl  %cr0,%eax        */
-+	0x66, 0x83, 0xe0, 0x11,			/*    andl  $0x00000011,%eax */
-+	0x66, 0x0d, 0x00, 0x00, 0x00, 0x60,	/*    orl   $0x60000000,%eax */
-+	0x66, 0x0f, 0x22, 0xc0,			/*    movl  %eax,%cr0        */
-+	0x66, 0x0f, 0x22, 0xd8,			/*    movl  %eax,%cr3        */
-+	0x66, 0x0f, 0x20, 0xc3,			/*    movl  %cr0,%ebx        */
-+	0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60,	/*    andl  $0x60000000,%ebx */
-+	0x74, 0x02,				/*    jz    f                */
-+	0x0f, 0x09,				/*    wbinvd                 */
-+	0x24, 0x10,				/* f: andb  $0x10,al         */
-+	0x66, 0x0f, 0x22, 0xc0			/*    movl  %eax,%cr0        */
-+};
-+static unsigned char jump_to_bios [] =
-+{
-+	0xea, 0x00, 0x00, 0xff, 0xff		/*    ljmp  $0xffff,$0x0000  */
-+};
-+
-+/*
-+ * Switch to real mode and then execute the code
-+ * specified by the code and length parameters.
-+ * We assume that length will aways be less that 100!
-+ */
-+void machine_real_restart(unsigned char *code, int length)
-+{
-+	local_irq_disable();
+-static inline const char *
+-cat_module_name(int module_id)
++static inline const char *cat_module_name(int module_id)
+ {
+-	switch(module_id) {
++	switch (module_id) {
+ 	case 0x10:
+ 		return "Processor Slot 0";
+ 	case 0x11:
+@@ -105,14 +103,14 @@ voyager_module_t *voyager_cat_list;
+ 
+ /* the I/O port assignments for the VIC and QIC */
+ static struct resource vic_res = {
+-	.name	= "Voyager Interrupt Controller",
+-	.start	= 0xFC00,
+-	.end	= 0xFC6F
++	.name = "Voyager Interrupt Controller",
++	.start = 0xFC00,
++	.end = 0xFC6F
+ };
+ static struct resource qic_res = {
+-	.name	= "Quad Interrupt Controller",
+-	.start	= 0xFC70,
+-	.end	= 0xFCFF
++	.name = "Quad Interrupt Controller",
++	.start = 0xFC70,
++	.end = 0xFCFF
+ };
+ 
+ /* This function is used to pack a data bit stream inside a message.
+@@ -120,7 +118,7 @@ static struct resource qic_res = {
+  * Note: This function assumes that any unused bit in the data stream
+  * is set to zero so that the ors will work correctly */
+ static void
+-cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
++cat_pack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
+ {
+ 	/* compute initial shift needed */
+ 	const __u16 offset = start_bit % BITS_PER_BYTE;
+@@ -130,7 +128,7 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
+ 	int i;
+ 
+ 	/* adjust if we have more than a byte of residue */
+-	if(residue >= BITS_PER_BYTE) {
++	if (residue >= BITS_PER_BYTE) {
+ 		residue -= BITS_PER_BYTE;
+ 		len++;
+ 	}
+@@ -138,24 +136,25 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
+ 	/* clear out the bits.  We assume here that if len==0 then
+ 	 * residue >= offset.  This is always true for the catbus
+ 	 * operations */
+-	msg[byte] &= 0xff << (BITS_PER_BYTE - offset); 
++	msg[byte] &= 0xff << (BITS_PER_BYTE - offset);
+ 	msg[byte++] |= data[0] >> offset;
+-	if(len == 0)
++	if (len == 0)
+ 		return;
+-	for(i = 1; i < len; i++)
+-		msg[byte++] = (data[i-1] << (BITS_PER_BYTE - offset))
+-			| (data[i] >> offset);
+-	if(residue != 0) {
++	for (i = 1; i < len; i++)
++		msg[byte++] = (data[i - 1] << (BITS_PER_BYTE - offset))
++		    | (data[i] >> offset);
++	if (residue != 0) {
+ 		__u8 mask = 0xff >> residue;
+-		__u8 last_byte = data[i-1] << (BITS_PER_BYTE - offset)
+-			| (data[i] >> offset);
+-		
++		__u8 last_byte = data[i - 1] << (BITS_PER_BYTE - offset)
++		    | (data[i] >> offset);
 +
-+	/* Write zero to CMOS register number 0x0f, which the BIOS POST
-+	   routine will recognize as telling it to do a proper reboot.  (Well
-+	   that's what this book in front of me says -- it may only apply to
-+	   the Phoenix BIOS though, it's not clear).  At the same time,
-+	   disable NMIs by setting the top bit in the CMOS address register,
-+	   as we're about to do peculiar things to the CPU.  I'm not sure if
-+	   `outb_p' is needed instead of just `outb'.  Use it to be on the
-+	   safe side.  (Yes, CMOS_WRITE does outb_p's. -  Paul G.)
-+	 */
-+	spin_lock(&rtc_lock);
-+	CMOS_WRITE(0x00, 0x8f);
-+	spin_unlock(&rtc_lock);
+ 		last_byte &= ~mask;
+ 		msg[byte] &= mask;
+ 		msg[byte] |= last_byte;
+ 	}
+ 	return;
+ }
 +
-+	/* Remap the kernel at virtual address zero, as well as offset zero
-+	   from the kernel segment.  This assumes the kernel segment starts at
-+	   virtual address PAGE_OFFSET. */
-+	memcpy(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
-+		sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
+ /* unpack the data again (same arguments as cat_pack()). data buffer
+  * must be zero populated.
+  *
+@@ -163,7 +162,7 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
+  * data (starting at bit 0 in data).
+  */
+ static void
+-cat_unpack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
++cat_unpack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
+ {
+ 	/* compute initial shift needed */
+ 	const __u16 offset = start_bit % BITS_PER_BYTE;
+@@ -172,97 +171,97 @@ cat_unpack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
+ 	__u16 byte = start_bit / BITS_PER_BYTE;
+ 	int i;
+ 
+-	if(last_bits != 0)
++	if (last_bits != 0)
+ 		len++;
+ 
+ 	/* special case: want < 8 bits from msg and we can get it from
+ 	 * a single byte of the msg */
+-	if(len == 0 && BITS_PER_BYTE - offset >= num_bits) {
++	if (len == 0 && BITS_PER_BYTE - offset >= num_bits) {
+ 		data[0] = msg[byte] << offset;
+ 		data[0] &= 0xff >> (BITS_PER_BYTE - num_bits);
+ 		return;
+ 	}
+-	for(i = 0; i < len; i++) {
++	for (i = 0; i < len; i++) {
+ 		/* this annoying if has to be done just in case a read of
+ 		 * msg one beyond the array causes a panic */
+-		if(offset != 0) {
++		if (offset != 0) {
+ 			data[i] = msg[byte++] << offset;
+ 			data[i] |= msg[byte] >> (BITS_PER_BYTE - offset);
+-		}
+-		else {
++		} else {
+ 			data[i] = msg[byte++];
+ 		}
+ 	}
+ 	/* do we need to truncate the final byte */
+-	if(last_bits != 0) {
+-		data[i-1] &= 0xff << (BITS_PER_BYTE - last_bits);
++	if (last_bits != 0) {
++		data[i - 1] &= 0xff << (BITS_PER_BYTE - last_bits);
+ 	}
+ 	return;
+ }
+ 
+ static void
+-cat_build_header(__u8 *header, const __u16 len, const __u16 smallest_reg_bits,
++cat_build_header(__u8 * header, const __u16 len, const __u16 smallest_reg_bits,
+ 		 const __u16 longest_reg_bits)
+ {
+ 	int i;
+ 	__u16 start_bit = (smallest_reg_bits - 1) % BITS_PER_BYTE;
+ 	__u8 *last_byte = &header[len - 1];
+ 
+-	if(start_bit == 0)
++	if (start_bit == 0)
+ 		start_bit = 1;	/* must have at least one bit in the hdr */
+-	
+-	for(i=0; i < len; i++)
 +
-+	/*
-+	 * Use `swapper_pg_dir' as our page directory.
-+	 */
-+	load_cr3(swapper_pg_dir);
++	for (i = 0; i < len; i++)
+ 		header[i] = 0;
+ 
+-	for(i = start_bit; i > 0; i--)
++	for (i = start_bit; i > 0; i--)
+ 		*last_byte = ((*last_byte) << 1) + 1;
+ 
+ }
+ 
+ static int
+-cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op)
++cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 op)
+ {
+ 	__u8 parity, inst, inst_buf[4] = { 0 };
+ 	__u8 iseq[VOYAGER_MAX_SCAN_PATH], hseq[VOYAGER_MAX_REG_SIZE];
+ 	__u16 ibytes, hbytes, padbits;
+ 	int i;
+-	
 +
-+	/* Write 0x1234 to absolute memory location 0x472.  The BIOS reads
-+	   this on booting to tell it to "Bypass memory test (also warm
-+	   boot)".  This seems like a fairly standard thing that gets set by
-+	   REBOOT.COM programs, and the previous reset routine did this
-+	   too. */
-+	*((unsigned short *)0x472) = reboot_mode;
+ 	/* 
+ 	 * Parity is the parity of the register number + 1 (READ_REGISTER
+ 	 * and WRITE_REGISTER always add '1' to the number of bits == 1)
+ 	 */
+-	parity = (__u8)(1 + (reg & 0x01) +
+-	         ((__u8)(reg & 0x02) >> 1) +
+-	         ((__u8)(reg & 0x04) >> 2) +
+-	         ((__u8)(reg & 0x08) >> 3)) % 2;
++	parity = (__u8) (1 + (reg & 0x01) +
++			 ((__u8) (reg & 0x02) >> 1) +
++			 ((__u8) (reg & 0x04) >> 2) +
++			 ((__u8) (reg & 0x08) >> 3)) % 2;
+ 
+ 	inst = ((parity << 7) | (reg << 2) | op);
+ 
+ 	outb(VOYAGER_CAT_IRCYC, CAT_CMD);
+-	if(!modp->scan_path_connected) {
+-		if(asicp->asic_id != VOYAGER_CAT_ID) {
+-			printk("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n");
++	if (!modp->scan_path_connected) {
++		if (asicp->asic_id != VOYAGER_CAT_ID) {
++			printk
++			    ("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n");
+ 			return 1;
+ 		}
+ 		outb(VOYAGER_CAT_HEADER, CAT_DATA);
+ 		outb(inst, CAT_DATA);
+-		if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
++		if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
+ 			CDEBUG(("VOYAGER CAT: cat_sendinst failed to get CAT_HEADER\n"));
+ 			return 1;
+ 		}
+ 		return 0;
+ 	}
+ 	ibytes = modp->inst_bits / BITS_PER_BYTE;
+-	if((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) {
++	if ((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) {
+ 		padbits = BITS_PER_BYTE - padbits;
+ 		ibytes++;
+ 	}
+ 	hbytes = modp->largest_reg / BITS_PER_BYTE;
+-	if(modp->largest_reg % BITS_PER_BYTE)
++	if (modp->largest_reg % BITS_PER_BYTE)
+ 		hbytes++;
+ 	CDEBUG(("cat_sendinst: ibytes=%d, hbytes=%d\n", ibytes, hbytes));
+ 	/* initialise the instruction sequence to 0xff */
+-	for(i=0; i < ibytes + hbytes; i++)
++	for (i = 0; i < ibytes + hbytes; i++)
+ 		iseq[i] = 0xff;
+ 	cat_build_header(hseq, hbytes, modp->smallest_reg, modp->largest_reg);
+ 	cat_pack(iseq, modp->inst_bits, hseq, hbytes * BITS_PER_BYTE);
+@@ -271,11 +270,11 @@ cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op)
+ 	cat_pack(iseq, asicp->bit_location, inst_buf, asicp->ireg_length);
+ #ifdef VOYAGER_CAT_DEBUG
+ 	printk("ins = 0x%x, iseq: ", inst);
+-	for(i=0; i< ibytes + hbytes; i++)
++	for (i = 0; i < ibytes + hbytes; i++)
+ 		printk("0x%x ", iseq[i]);
+ 	printk("\n");
+ #endif
+-	if(cat_shiftout(iseq, ibytes, hbytes, padbits)) {
++	if (cat_shiftout(iseq, ibytes, hbytes, padbits)) {
+ 		CDEBUG(("VOYAGER CAT: cat_sendinst: cat_shiftout failed\n"));
+ 		return 1;
+ 	}
+@@ -284,72 +283,74 @@ cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op)
+ }
+ 
+ static int
+-cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, 
+-	    __u8 *value)
++cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
++	    __u8 * value)
+ {
+-	if(!modp->scan_path_connected) {
+-		if(asicp->asic_id != VOYAGER_CAT_ID) {
++	if (!modp->scan_path_connected) {
++		if (asicp->asic_id != VOYAGER_CAT_ID) {
+ 			CDEBUG(("VOYAGER CAT: ERROR: cat_getdata to CAT asic with scan path connected\n"));
+ 			return 1;
+ 		}
+-		if(reg > VOYAGER_SUBADDRHI) 
++		if (reg > VOYAGER_SUBADDRHI)
+ 			outb(VOYAGER_CAT_RUN, CAT_CMD);
+ 		outb(VOYAGER_CAT_DRCYC, CAT_CMD);
+ 		outb(VOYAGER_CAT_HEADER, CAT_DATA);
+ 		*value = inb(CAT_DATA);
+ 		outb(0xAA, CAT_DATA);
+-		if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
++		if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
+ 			CDEBUG(("cat_getdata: failed to get VOYAGER_CAT_HEADER\n"));
+ 			return 1;
+ 		}
+ 		return 0;
+-	}
+-	else {
+-		__u16 sbits = modp->num_asics -1 + asicp->ireg_length;
++	} else {
++		__u16 sbits = modp->num_asics - 1 + asicp->ireg_length;
+ 		__u16 sbytes = sbits / BITS_PER_BYTE;
+ 		__u16 tbytes;
+-		__u8 string[VOYAGER_MAX_SCAN_PATH], trailer[VOYAGER_MAX_REG_SIZE];
++		__u8 string[VOYAGER_MAX_SCAN_PATH],
++		    trailer[VOYAGER_MAX_REG_SIZE];
+ 		__u8 padbits;
+ 		int i;
+-		
 +
-+	/* For the switch to real mode, copy some code to low memory.  It has
-+	   to be in the first 64k because it is running in 16-bit mode, and it
-+	   has to have the same physical and virtual address, because it turns
-+	   off paging.  Copy it near the end of the first page, out of the way
-+	   of BIOS variables. */
-+	memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
-+		real_mode_switch, sizeof (real_mode_switch));
-+	memcpy((void *)(0x1000 - 100), code, length);
+ 		outb(VOYAGER_CAT_DRCYC, CAT_CMD);
+ 
+-		if((padbits = sbits % BITS_PER_BYTE) != 0) {
++		if ((padbits = sbits % BITS_PER_BYTE) != 0) {
+ 			padbits = BITS_PER_BYTE - padbits;
+ 			sbytes++;
+ 		}
+ 		tbytes = asicp->ireg_length / BITS_PER_BYTE;
+-		if(asicp->ireg_length % BITS_PER_BYTE)
++		if (asicp->ireg_length % BITS_PER_BYTE)
+ 			tbytes++;
+ 		CDEBUG(("cat_getdata: tbytes = %d, sbytes = %d, padbits = %d\n",
+-			tbytes,	sbytes, padbits));
++			tbytes, sbytes, padbits));
+ 		cat_build_header(trailer, tbytes, 1, asicp->ireg_length);
+ 
+-		
+-		for(i = tbytes - 1; i >= 0; i--) {
++		for (i = tbytes - 1; i >= 0; i--) {
+ 			outb(trailer[i], CAT_DATA);
+ 			string[sbytes + i] = inb(CAT_DATA);
+ 		}
+ 
+-		for(i = sbytes - 1; i >= 0; i--) {
++		for (i = sbytes - 1; i >= 0; i--) {
+ 			outb(0xaa, CAT_DATA);
+ 			string[i] = inb(CAT_DATA);
+ 		}
+ 		*value = 0;
+-		cat_unpack(string, padbits + (tbytes * BITS_PER_BYTE) + asicp->asic_location, value, asicp->ireg_length);
++		cat_unpack(string,
++			   padbits + (tbytes * BITS_PER_BYTE) +
++			   asicp->asic_location, value, asicp->ireg_length);
+ #ifdef VOYAGER_CAT_DEBUG
+ 		printk("value=0x%x, string: ", *value);
+-		for(i=0; i< tbytes+sbytes; i++)
++		for (i = 0; i < tbytes + sbytes; i++)
+ 			printk("0x%x ", string[i]);
+ 		printk("\n");
+ #endif
+-		
 +
-+	/* Set up the IDT for real mode. */
-+	load_idt(&real_mode_idt);
+ 		/* sanity check the rest of the return */
+-		for(i=0; i < tbytes; i++) {
++		for (i = 0; i < tbytes; i++) {
+ 			__u8 input = 0;
+ 
+-			cat_unpack(string, padbits + (i * BITS_PER_BYTE), &input, BITS_PER_BYTE);
+-			if(trailer[i] != input) {
++			cat_unpack(string, padbits + (i * BITS_PER_BYTE),
++				   &input, BITS_PER_BYTE);
++			if (trailer[i] != input) {
+ 				CDEBUG(("cat_getdata: failed to sanity check rest of ret(%d) 0x%x != 0x%x\n", i, input, trailer[i]));
+ 				return 1;
+ 			}
+@@ -360,14 +361,14 @@ cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
+ }
+ 
+ static int
+-cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
++cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
+ {
+ 	int i;
+-	
+-	for(i = data_bytes + header_bytes - 1; i >= header_bytes; i--)
 +
-+	/* Set up a GDT from which we can load segment descriptors for real
-+	   mode.  The GDT is not used in real mode; it is just needed here to
-+	   prepare the descriptors. */
-+	load_gdt(&real_mode_gdt);
++	for (i = data_bytes + header_bytes - 1; i >= header_bytes; i--)
+ 		outb(data[i], CAT_DATA);
+ 
+-	for(i = header_bytes - 1; i >= 0; i--) {
++	for (i = header_bytes - 1; i >= 0; i--) {
+ 		__u8 header = 0;
+ 		__u8 input;
+ 
+@@ -376,7 +377,7 @@ cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
+ 		CDEBUG(("cat_shiftout: returned 0x%x\n", input));
+ 		cat_unpack(data, ((data_bytes + i) * BITS_PER_BYTE) - pad_bits,
+ 			   &header, BITS_PER_BYTE);
+-		if(input != header) {
++		if (input != header) {
+ 			CDEBUG(("VOYAGER CAT: cat_shiftout failed to return header 0x%x != 0x%x\n", input, header));
+ 			return 1;
+ 		}
+@@ -385,57 +386,57 @@ cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
+ }
+ 
+ static int
+-cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp, 
++cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
+ 	     __u8 reg, __u8 value)
+ {
+ 	outb(VOYAGER_CAT_DRCYC, CAT_CMD);
+-	if(!modp->scan_path_connected) {
+-		if(asicp->asic_id != VOYAGER_CAT_ID) {
++	if (!modp->scan_path_connected) {
++		if (asicp->asic_id != VOYAGER_CAT_ID) {
+ 			CDEBUG(("VOYAGER CAT: ERROR: scan path disconnected when asic != CAT\n"));
+ 			return 1;
+ 		}
+ 		outb(VOYAGER_CAT_HEADER, CAT_DATA);
+ 		outb(value, CAT_DATA);
+-		if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
++		if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
+ 			CDEBUG(("cat_senddata: failed to get correct header response to sent data\n"));
+ 			return 1;
+ 		}
+-		if(reg > VOYAGER_SUBADDRHI) {
++		if (reg > VOYAGER_SUBADDRHI) {
+ 			outb(VOYAGER_CAT_RUN, CAT_CMD);
+ 			outb(VOYAGER_CAT_END, CAT_CMD);
+ 			outb(VOYAGER_CAT_RUN, CAT_CMD);
+ 		}
+-		
 +
-+	/* Load the data segment registers, and thus the descriptors ready for
-+	   real mode.  The base address of each segment is 0x100, 16 times the
-+	   selector value being loaded here.  This is so that the segment
-+	   registers don't have to be reloaded after switching to real mode:
-+	   the values are consistent for real mode operation already. */
-+	__asm__ __volatile__ ("movl $0x0010,%%eax\n"
-+				"\tmovl %%eax,%%ds\n"
-+				"\tmovl %%eax,%%es\n"
-+				"\tmovl %%eax,%%fs\n"
-+				"\tmovl %%eax,%%gs\n"
-+				"\tmovl %%eax,%%ss" : : : "eax");
+ 		return 0;
+-	}
+-	else {
++	} else {
+ 		__u16 hbytes = asicp->ireg_length / BITS_PER_BYTE;
+-		__u16 dbytes = (modp->num_asics - 1 + asicp->ireg_length)/BITS_PER_BYTE;
+-		__u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH], 
+-			hseq[VOYAGER_MAX_REG_SIZE];
++		__u16 dbytes =
++		    (modp->num_asics - 1 + asicp->ireg_length) / BITS_PER_BYTE;
++		__u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH],
++		    hseq[VOYAGER_MAX_REG_SIZE];
+ 		int i;
+ 
+-		if((padbits = (modp->num_asics - 1 
+-			       + asicp->ireg_length) % BITS_PER_BYTE) != 0) {
++		if ((padbits = (modp->num_asics - 1
++				+ asicp->ireg_length) % BITS_PER_BYTE) != 0) {
+ 			padbits = BITS_PER_BYTE - padbits;
+ 			dbytes++;
+ 		}
+-		if(asicp->ireg_length % BITS_PER_BYTE)
++		if (asicp->ireg_length % BITS_PER_BYTE)
+ 			hbytes++;
+-		
 +
-+	/* Jump to the 16-bit code that we copied earlier.  It disables paging
-+	   and the cache, switches to real mode, and jumps to the BIOS reset
-+	   entry point. */
-+	__asm__ __volatile__ ("ljmp $0x0008,%0"
-+				:
-+				: "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
-+}
-+#ifdef CONFIG_APM_MODULE
-+EXPORT_SYMBOL(machine_real_restart);
-+#endif
+ 		cat_build_header(hseq, hbytes, 1, asicp->ireg_length);
+-		
+-		for(i = 0; i < dbytes + hbytes; i++)
 +
-+#endif /* CONFIG_X86_32 */
++		for (i = 0; i < dbytes + hbytes; i++)
+ 			dseq[i] = 0xff;
+ 		CDEBUG(("cat_senddata: dbytes=%d, hbytes=%d, padbits=%d\n",
+ 			dbytes, hbytes, padbits));
+ 		cat_pack(dseq, modp->num_asics - 1 + asicp->ireg_length,
+ 			 hseq, hbytes * BITS_PER_BYTE);
+-		cat_pack(dseq, asicp->asic_location, &value, 
++		cat_pack(dseq, asicp->asic_location, &value,
+ 			 asicp->ireg_length);
+ #ifdef VOYAGER_CAT_DEBUG
+ 		printk("dseq ");
+-		for(i=0; i<hbytes+dbytes; i++) {
++		for (i = 0; i < hbytes + dbytes; i++) {
+ 			printk("0x%x ", dseq[i]);
+ 		}
+ 		printk("\n");
+@@ -445,121 +446,125 @@ cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp,
+ }
+ 
+ static int
+-cat_write(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
+-	 __u8 value)
++cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 value)
+ {
+-	if(cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG))
++	if (cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG))
+ 		return 1;
+ 	return cat_senddata(modp, asicp, reg, value);
+ }
+ 
+ static int
+-cat_read(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
+-	 __u8 *value)
++cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
++	 __u8 * value)
+ {
+-	if(cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG))
++	if (cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG))
+ 		return 1;
+ 	return cat_getdata(modp, asicp, reg, value);
+ }
+ 
+ static int
+-cat_subaddrsetup(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset,
++cat_subaddrsetup(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
+ 		 __u16 len)
+ {
+ 	__u8 val;
+ 
+-	if(len > 1) {
++	if (len > 1) {
+ 		/* set auto increment */
+ 		__u8 newval;
+-		
+-		if(cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) {
 +
-+static inline void kb_wait(void)
-+{
-+	int i;
++		if (cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) {
+ 			CDEBUG(("cat_subaddrsetup: read of VOYAGER_AUTO_INC_REG failed\n"));
+ 			return 1;
+ 		}
+-		CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n", val));
++		CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n",
++			val));
+ 		newval = val | VOYAGER_AUTO_INC;
+-		if(newval != val) {
+-			if(cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) {
++		if (newval != val) {
++			if (cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) {
+ 				CDEBUG(("cat_subaddrsetup: write to VOYAGER_AUTO_INC_REG failed\n"));
+ 				return 1;
+ 			}
+ 		}
+ 	}
+-	if(cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8)(offset &0xff))) {
++	if (cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8) (offset & 0xff))) {
+ 		CDEBUG(("cat_subaddrsetup: write to SUBADDRLO failed\n"));
+ 		return 1;
+ 	}
+-	if(asicp->subaddr > VOYAGER_SUBADDR_LO) {
+-		if(cat_write(modp, asicp, VOYAGER_SUBADDRHI, (__u8)(offset >> 8))) {
++	if (asicp->subaddr > VOYAGER_SUBADDR_LO) {
++		if (cat_write
++		    (modp, asicp, VOYAGER_SUBADDRHI, (__u8) (offset >> 8))) {
+ 			CDEBUG(("cat_subaddrsetup: write to SUBADDRHI failed\n"));
+ 			return 1;
+ 		}
+ 		cat_read(modp, asicp, VOYAGER_SUBADDRHI, &val);
+-		CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset, val));
++		CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset,
++			val));
+ 	}
+ 	cat_read(modp, asicp, VOYAGER_SUBADDRLO, &val);
+ 	CDEBUG(("cat_subaddrsetup: offset = %d, lo = %d\n", offset, val));
+ 	return 0;
+ }
+-		
 +
-+	for (i = 0; i < 0x10000; i++) {
-+		if ((inb(0x64) & 0x02) == 0)
-+			break;
-+		udelay(2);
-+	}
-+}
+ static int
+-cat_subwrite(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset,
+-	    __u16 len, void *buf)
++cat_subwrite(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
++	     __u16 len, void *buf)
+ {
+ 	int i, retval;
+ 
+ 	/* FIXME: need special actions for VOYAGER_CAT_ID here */
+-	if(asicp->asic_id == VOYAGER_CAT_ID) {
++	if (asicp->asic_id == VOYAGER_CAT_ID) {
+ 		CDEBUG(("cat_subwrite: ATTEMPT TO WRITE TO CAT ASIC\n"));
+ 		/* FIXME -- This is supposed to be handled better
+ 		 * There is a problem writing to the cat asic in the
+ 		 * PSI.  The 30us delay seems to work, though */
+ 		udelay(30);
+ 	}
+-		
+-	if((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
 +
-+void machine_emergency_restart(void)
-+{
-+	int i;
++	if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
+ 		printk("cat_subwrite: cat_subaddrsetup FAILED\n");
+ 		return retval;
+ 	}
+-	
+-	if(cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) {
 +
-+	/* Tell the BIOS if we want cold or warm reboot */
-+	*((unsigned short *)__va(0x472)) = reboot_mode;
++	if (cat_sendinst
++	    (modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) {
+ 		printk("cat_subwrite: cat_sendinst FAILED\n");
+ 		return 1;
+ 	}
+-	for(i = 0; i < len; i++) {
+-		if(cat_senddata(modp, asicp, 0xFF, ((__u8 *)buf)[i])) {
+-			printk("cat_subwrite: cat_sendata element at %d FAILED\n", i);
++	for (i = 0; i < len; i++) {
++		if (cat_senddata(modp, asicp, 0xFF, ((__u8 *) buf)[i])) {
++			printk
++			    ("cat_subwrite: cat_sendata element at %d FAILED\n",
++			     i);
+ 			return 1;
+ 		}
+ 	}
+ 	return 0;
+ }
+ static int
+-cat_subread(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset,
++cat_subread(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
+ 	    __u16 len, void *buf)
+ {
+ 	int i, retval;
+ 
+-	if((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
++	if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
+ 		CDEBUG(("cat_subread: cat_subaddrsetup FAILED\n"));
+ 		return retval;
+ 	}
+ 
+-	if(cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) {
++	if (cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) {
+ 		CDEBUG(("cat_subread: cat_sendinst failed\n"));
+ 		return 1;
+ 	}
+-	for(i = 0; i < len; i++) {
+-		if(cat_getdata(modp, asicp, 0xFF,
+-			       &((__u8 *)buf)[i])) {
+-			CDEBUG(("cat_subread: cat_getdata element %d failed\n", i));
++	for (i = 0; i < len; i++) {
++		if (cat_getdata(modp, asicp, 0xFF, &((__u8 *) buf)[i])) {
++			CDEBUG(("cat_subread: cat_getdata element %d failed\n",
++				i));
+ 			return 1;
+ 		}
+ 	}
+ 	return 0;
+ }
+ 
+-
+ /* buffer for storing EPROM data read in during initialisation */
+ static __initdata __u8 eprom_buf[0xFFFF];
+ static voyager_module_t *voyager_initial_module;
+@@ -568,8 +573,7 @@ static voyager_module_t *voyager_initial_module;
+  * boot cpu *after* all memory initialisation has been done (so we can
+  * use kmalloc) but before smp initialisation, so we can probe the SMP
+  * configuration and pick up necessary information.  */
+-void __init
+-voyager_cat_init(void)
++void __init voyager_cat_init(void)
+ {
+ 	voyager_module_t **modpp = &voyager_initial_module;
+ 	voyager_asic_t **asicpp;
+@@ -578,27 +582,29 @@ voyager_cat_init(void)
+ 	unsigned long qic_addr = 0;
+ 	__u8 qabc_data[0x20];
+ 	__u8 num_submodules, val;
+-	voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *)&eprom_buf[0];
+-	
++	voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *) & eprom_buf[0];
 +
-+	for (;;) {
-+		/* Could also try the reset bit in the Hammer NB */
-+		switch (reboot_type) {
-+		case BOOT_KBD:
-+			for (i = 0; i < 10; i++) {
-+				kb_wait();
-+				udelay(50);
-+				outb(0xfe, 0x64); /* pulse reset low */
-+				udelay(50);
-+			}
+ 	__u8 cmos[4];
+ 	unsigned long addr;
+-	
 +
-+		case BOOT_TRIPLE:
-+			load_idt((const struct desc_ptr *)&no_idt);
-+			__asm__ __volatile__("int3");
+ 	/* initiallise the SUS mailbox */
+-	for(i=0; i<sizeof(cmos); i++)
++	for (i = 0; i < sizeof(cmos); i++)
+ 		cmos[i] = voyager_extended_cmos_read(VOYAGER_DUMP_LOCATION + i);
+ 	addr = *(unsigned long *)cmos;
+-	if((addr & 0xff000000) != 0xff000000) {
+-		printk(KERN_ERR "Voyager failed to get SUS mailbox (addr = 0x%lx\n", addr);
++	if ((addr & 0xff000000) != 0xff000000) {
++		printk(KERN_ERR
++		       "Voyager failed to get SUS mailbox (addr = 0x%lx\n",
++		       addr);
+ 	} else {
+ 		static struct resource res;
+-		
 +
-+			reboot_type = BOOT_KBD;
-+			break;
+ 		res.name = "voyager SUS";
+ 		res.start = addr;
+-		res.end = addr+0x3ff;
+-		
++		res.end = addr + 0x3ff;
 +
-+#ifdef CONFIG_X86_32
-+		case BOOT_BIOS:
-+			machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
+ 		request_resource(&iomem_resource, &res);
+ 		voyager_SUS = (struct voyager_SUS *)
+-			ioremap(addr, 0x400);
++		    ioremap(addr, 0x400);
+ 		printk(KERN_NOTICE "Voyager SUS mailbox version 0x%x\n",
+ 		       voyager_SUS->SUS_version);
+ 		voyager_SUS->kernel_version = VOYAGER_MAILBOX_VERSION;
+@@ -609,8 +615,6 @@ voyager_cat_init(void)
+ 	voyager_extended_vic_processors = 0;
+ 	voyager_quad_processors = 0;
+ 
+-
+-
+ 	printk("VOYAGER: beginning CAT bus probe\n");
+ 	/* set up the SuperSet Port Block which tells us where the
+ 	 * CAT communication port is */
+@@ -618,14 +622,14 @@ voyager_cat_init(void)
+ 	VDEBUG(("VOYAGER DEBUG: sspb = 0x%x\n", sspb));
+ 
+ 	/* now find out if were 8 slot or normal */
+-	if((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER)
+-	   == EIGHT_SLOT_IDENTIFIER) {
++	if ((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER)
++	    == EIGHT_SLOT_IDENTIFIER) {
+ 		voyager_8slot = 1;
+-		printk(KERN_NOTICE "Voyager: Eight slot 51xx configuration detected\n");
++		printk(KERN_NOTICE
++		       "Voyager: Eight slot 51xx configuration detected\n");
+ 	}
+ 
+-	for(i = VOYAGER_MIN_MODULE;
+-	    i <= VOYAGER_MAX_MODULE; i++) {
++	for (i = VOYAGER_MIN_MODULE; i <= VOYAGER_MAX_MODULE; i++) {
+ 		__u8 input;
+ 		int asic;
+ 		__u16 eprom_size;
+@@ -643,21 +647,21 @@ voyager_cat_init(void)
+ 		outb(0xAA, CAT_DATA);
+ 		input = inb(CAT_DATA);
+ 		outb(VOYAGER_CAT_END, CAT_CMD);
+-		if(input != VOYAGER_CAT_HEADER) {
++		if (input != VOYAGER_CAT_HEADER) {
+ 			continue;
+ 		}
+ 		CDEBUG(("VOYAGER DEBUG: found module id 0x%x, %s\n", i,
+ 			cat_module_name(i)));
+-		*modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL); /*&voyager_module_storage[cat_count++];*/
+-		if(*modpp == NULL) {
++		*modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL);	/*&voyager_module_storage[cat_count++]; */
++		if (*modpp == NULL) {
+ 			printk("**WARNING** kmalloc failure in cat_init\n");
+ 			continue;
+ 		}
+ 		memset(*modpp, 0, sizeof(voyager_module_t));
+ 		/* need temporary asic for cat_subread.  It will be
+ 		 * filled in correctly later */
+-		(*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count];*/
+-		if((*modpp)->asic == NULL) {
++		(*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL);	/*&voyager_asic_storage[asic_count]; */
++		if ((*modpp)->asic == NULL) {
+ 			printk("**WARNING** kmalloc failure in cat_init\n");
+ 			continue;
+ 		}
+@@ -666,47 +670,52 @@ voyager_cat_init(void)
+ 		(*modpp)->asic->subaddr = VOYAGER_SUBADDR_HI;
+ 		(*modpp)->module_addr = i;
+ 		(*modpp)->scan_path_connected = 0;
+-		if(i == VOYAGER_PSI) {
++		if (i == VOYAGER_PSI) {
+ 			/* Exception leg for modules with no EEPROM */
+ 			printk("Module \"%s\"\n", cat_module_name(i));
+ 			continue;
+ 		}
+-			       
 +
-+			reboot_type = BOOT_KBD;
-+			break;
-+#endif
+ 		CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
+ 		outb(VOYAGER_CAT_RUN, CAT_CMD);
+ 		cat_disconnect(*modpp, (*modpp)->asic);
+-		if(cat_subread(*modpp, (*modpp)->asic,
+-			       VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
+-			       &eprom_size)) {
+-			printk("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n", i);
++		if (cat_subread(*modpp, (*modpp)->asic,
++				VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
++				&eprom_size)) {
++			printk
++			    ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
++			     i);
+ 			outb(VOYAGER_CAT_END, CAT_CMD);
+ 			continue;
+ 		}
+-		if(eprom_size > sizeof(eprom_buf)) {
+-			printk("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n", i, eprom_size);
++		if (eprom_size > sizeof(eprom_buf)) {
++			printk
++			    ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n",
++			     i, eprom_size);
+ 			outb(VOYAGER_CAT_END, CAT_CMD);
+ 			continue;
+ 		}
+ 		outb(VOYAGER_CAT_END, CAT_CMD);
+ 		outb(VOYAGER_CAT_RUN, CAT_CMD);
+-		CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i, eprom_size));
+-		if(cat_subread(*modpp, (*modpp)->asic, 0, 
+-			       eprom_size, eprom_buf)) {
++		CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
++			eprom_size));
++		if (cat_subread
++		    (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
+ 			outb(VOYAGER_CAT_END, CAT_CMD);
+ 			continue;
+ 		}
+ 		outb(VOYAGER_CAT_END, CAT_CMD);
+ 		printk("Module \"%s\", version 0x%x, tracer 0x%x, asics %d\n",
+ 		       cat_module_name(i), eprom_hdr->version_id,
+-		       *((__u32 *)eprom_hdr->tracer),  eprom_hdr->num_asics);
++		       *((__u32 *) eprom_hdr->tracer), eprom_hdr->num_asics);
+ 		(*modpp)->ee_size = eprom_hdr->ee_size;
+ 		(*modpp)->num_asics = eprom_hdr->num_asics;
+ 		asicpp = &((*modpp)->asic);
+ 		sp_offset = eprom_hdr->scan_path_offset;
+ 		/* All we really care about are the Quad cards.  We
+-                 * identify them because they are in a processor slot
+-                 * and have only four asics */
+-		if((i < 0x10 || (i>=0x14 && i < 0x1c) || i>0x1f)) {
++		 * identify them because they are in a processor slot
++		 * and have only four asics */
++		if ((i < 0x10 || (i >= 0x14 && i < 0x1c) || i > 0x1f)) {
+ 			modpp = &((*modpp)->next);
+ 			continue;
+ 		}
+@@ -717,16 +726,17 @@ voyager_cat_init(void)
+ 			 &num_submodules);
+ 		/* lowest two bits, active low */
+ 		num_submodules = ~(0xfc | num_submodules);
+-		CDEBUG(("VOYAGER CAT: %d submodules present\n", num_submodules));
+-		if(num_submodules == 0) {
++		CDEBUG(("VOYAGER CAT: %d submodules present\n",
++			num_submodules));
++		if (num_submodules == 0) {
+ 			/* fill in the dyadic extended processors */
+ 			__u8 cpu = i & 0x07;
+ 
+ 			printk("Module \"%s\": Dyadic Processor Card\n",
+ 			       cat_module_name(i));
+-			voyager_extended_vic_processors |= (1<<cpu);
++			voyager_extended_vic_processors |= (1 << cpu);
+ 			cpu += 4;
+-			voyager_extended_vic_processors |= (1<<cpu);
++			voyager_extended_vic_processors |= (1 << cpu);
+ 			outb(VOYAGER_CAT_END, CAT_CMD);
+ 			continue;
+ 		}
+@@ -740,28 +750,32 @@ voyager_cat_init(void)
+ 		cat_write(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, val);
+ 
+ 		outb(VOYAGER_CAT_END, CAT_CMD);
+-			 
+ 
+ 		CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
+ 		outb(VOYAGER_CAT_RUN, CAT_CMD);
+ 		cat_disconnect(*modpp, (*modpp)->asic);
+-		if(cat_subread(*modpp, (*modpp)->asic,
+-			       VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
+-			       &eprom_size)) {
+-			printk("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n", i);
++		if (cat_subread(*modpp, (*modpp)->asic,
++				VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
++				&eprom_size)) {
++			printk
++			    ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
++			     i);
+ 			outb(VOYAGER_CAT_END, CAT_CMD);
+ 			continue;
+ 		}
+-		if(eprom_size > sizeof(eprom_buf)) {
+-			printk("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n", i, eprom_size);
++		if (eprom_size > sizeof(eprom_buf)) {
++			printk
++			    ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n",
++			     i, eprom_size);
+ 			outb(VOYAGER_CAT_END, CAT_CMD);
+ 			continue;
+ 		}
+ 		outb(VOYAGER_CAT_END, CAT_CMD);
+ 		outb(VOYAGER_CAT_RUN, CAT_CMD);
+-		CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i, eprom_size));
+-		if(cat_subread(*modpp, (*modpp)->asic, 0, 
+-			       eprom_size, eprom_buf)) {
++		CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
++			eprom_size));
++		if (cat_subread
++		    (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
+ 			outb(VOYAGER_CAT_END, CAT_CMD);
+ 			continue;
+ 		}
+@@ -773,30 +787,35 @@ voyager_cat_init(void)
+ 		sp_offset = eprom_hdr->scan_path_offset;
+ 		/* get rid of the dummy CAT asic and read the real one */
+ 		kfree((*modpp)->asic);
+-		for(asic=0; asic < (*modpp)->num_asics; asic++) {
++		for (asic = 0; asic < (*modpp)->num_asics; asic++) {
+ 			int j;
+-			voyager_asic_t *asicp = *asicpp 
+-				= kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
++			voyager_asic_t *asicp = *asicpp = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL);	/*&voyager_asic_storage[asic_count++]; */
+ 			voyager_sp_table_t *sp_table;
+ 			voyager_at_t *asic_table;
+ 			voyager_jtt_t *jtag_table;
+ 
+-			if(asicp == NULL) {
+-				printk("**WARNING** kmalloc failure in cat_init\n");
++			if (asicp == NULL) {
++				printk
++				    ("**WARNING** kmalloc failure in cat_init\n");
+ 				continue;
+ 			}
+ 			asicpp = &(asicp->next);
+ 			asicp->asic_location = asic;
+-			sp_table = (voyager_sp_table_t *)(eprom_buf + sp_offset);
++			sp_table =
++			    (voyager_sp_table_t *) (eprom_buf + sp_offset);
+ 			asicp->asic_id = sp_table->asic_id;
+-			asic_table = (voyager_at_t *)(eprom_buf + sp_table->asic_data_offset);
+-			for(j=0; j<4; j++)
++			asic_table =
++			    (voyager_at_t *) (eprom_buf +
++					      sp_table->asic_data_offset);
++			for (j = 0; j < 4; j++)
+ 				asicp->jtag_id[j] = asic_table->jtag_id[j];
+-			jtag_table = (voyager_jtt_t *)(eprom_buf + asic_table->jtag_offset);
++			jtag_table =
++			    (voyager_jtt_t *) (eprom_buf +
++					       asic_table->jtag_offset);
+ 			asicp->ireg_length = jtag_table->ireg_len;
+ 			asicp->bit_location = (*modpp)->inst_bits;
+ 			(*modpp)->inst_bits += asicp->ireg_length;
+-			if(asicp->ireg_length > (*modpp)->largest_reg)
++			if (asicp->ireg_length > (*modpp)->largest_reg)
+ 				(*modpp)->largest_reg = asicp->ireg_length;
+ 			if (asicp->ireg_length < (*modpp)->smallest_reg ||
+ 			    (*modpp)->smallest_reg == 0)
+@@ -804,15 +823,13 @@ voyager_cat_init(void)
+ 			CDEBUG(("asic 0x%x, ireg_length=%d, bit_location=%d\n",
+ 				asicp->asic_id, asicp->ireg_length,
+ 				asicp->bit_location));
+-			if(asicp->asic_id == VOYAGER_QUAD_QABC) {
++			if (asicp->asic_id == VOYAGER_QUAD_QABC) {
+ 				CDEBUG(("VOYAGER CAT: QABC ASIC found\n"));
+ 				qabc_asic = asicp;
+ 			}
+ 			sp_offset += sizeof(voyager_sp_table_t);
+ 		}
+-		CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n",
+-			(*modpp)->inst_bits, (*modpp)->largest_reg,
+-			(*modpp)->smallest_reg));
++		CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n", (*modpp)->inst_bits, (*modpp)->largest_reg, (*modpp)->smallest_reg));
+ 		/* OK, now we have the QUAD ASICs set up, use them.
+ 		 * we need to:
+ 		 *
+@@ -828,10 +845,11 @@ voyager_cat_init(void)
+ 		qic_addr = qabc_data[5] << 8;
+ 		qic_addr = (qic_addr | qabc_data[6]) << 8;
+ 		qic_addr = (qic_addr | qabc_data[7]) << 8;
+-		printk("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n",
+-		       cat_module_name(i), qic_addr, qabc_data[8]);
++		printk
++		    ("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n",
++		     cat_module_name(i), qic_addr, qabc_data[8]);
+ #if 0				/* plumbing fails---FIXME */
+-		if((qabc_data[8] & 0xf0) == 0) {
++		if ((qabc_data[8] & 0xf0) == 0) {
+ 			/* FIXME: 32 way 8 CPU slot monster cannot be
+ 			 * plumbed this way---need to check for it */
+ 
+@@ -842,94 +860,97 @@ voyager_cat_init(void)
+ #ifdef VOYAGER_CAT_DEBUG
+ 			/* verify plumbing */
+ 			cat_subread(*modpp, qabc_asic, 8, 1, &qabc_data[8]);
+-			if((qabc_data[8] & 0xf0) == 0) {
+-				CDEBUG(("PLUMBING FAILED: 0x%x\n", qabc_data[8]));
++			if ((qabc_data[8] & 0xf0) == 0) {
++				CDEBUG(("PLUMBING FAILED: 0x%x\n",
++					qabc_data[8]));
+ 			}
+ #endif
+ 		}
+ #endif
+ 
+ 		{
+-			struct resource *res = kzalloc(sizeof(struct resource),GFP_KERNEL);
++			struct resource *res =
++			    kzalloc(sizeof(struct resource), GFP_KERNEL);
+ 			res->name = kmalloc(128, GFP_KERNEL);
+-			sprintf((char *)res->name, "Voyager %s Quad CPI", cat_module_name(i));
++			sprintf((char *)res->name, "Voyager %s Quad CPI",
++				cat_module_name(i));
+ 			res->start = qic_addr;
+ 			res->end = qic_addr + 0x3ff;
+ 			request_resource(&iomem_resource, res);
+ 		}
+ 
+ 		qic_addr = (unsigned long)ioremap(qic_addr, 0x400);
+-				
+-		for(j = 0; j < 4; j++) {
 +
-+		case BOOT_ACPI:
-+			acpi_reboot();
-+			reboot_type = BOOT_KBD;
-+			break;
++		for (j = 0; j < 4; j++) {
+ 			__u8 cpu;
+ 
+-			if(voyager_8slot) {
++			if (voyager_8slot) {
+ 				/* 8 slot has a different mapping,
+ 				 * each slot has only one vic line, so
+ 				 * 1 cpu in each slot must be < 8 */
+-				cpu = (i & 0x07) + j*8;
++				cpu = (i & 0x07) + j * 8;
+ 			} else {
+-				cpu = (i & 0x03) + j*4;
++				cpu = (i & 0x03) + j * 4;
+ 			}
+-			if( (qabc_data[8] & (1<<j))) {
+-				voyager_extended_vic_processors |= (1<<cpu);
++			if ((qabc_data[8] & (1 << j))) {
++				voyager_extended_vic_processors |= (1 << cpu);
+ 			}
+-			if(qabc_data[8] & (1<<(j+4)) ) {
++			if (qabc_data[8] & (1 << (j + 4))) {
+ 				/* Second SET register plumbed: Quad
+ 				 * card has two VIC connected CPUs.
+ 				 * Secondary cannot be booted as a VIC
+ 				 * CPU */
+-				voyager_extended_vic_processors |= (1<<cpu);
+-				voyager_allowed_boot_processors &= (~(1<<cpu));
++				voyager_extended_vic_processors |= (1 << cpu);
++				voyager_allowed_boot_processors &=
++				    (~(1 << cpu));
+ 			}
+ 
+-			voyager_quad_processors |= (1<<cpu);
++			voyager_quad_processors |= (1 << cpu);
+ 			voyager_quad_cpi_addr[cpu] = (struct voyager_qic_cpi *)
+-				(qic_addr+(j<<8));
++			    (qic_addr + (j << 8));
+ 			CDEBUG(("CPU%d: CPI address 0x%lx\n", cpu,
+ 				(unsigned long)voyager_quad_cpi_addr[cpu]));
+ 		}
+ 		outb(VOYAGER_CAT_END, CAT_CMD);
+ 
+-		
+-		
+ 		*asicpp = NULL;
+ 		modpp = &((*modpp)->next);
+ 	}
+ 	*modpp = NULL;
+-	printk("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n", voyager_extended_vic_processors, voyager_quad_processors, voyager_allowed_boot_processors);
++	printk
++	    ("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n",
++	     voyager_extended_vic_processors, voyager_quad_processors,
++	     voyager_allowed_boot_processors);
+ 	request_resource(&ioport_resource, &vic_res);
+-	if(voyager_quad_processors)
++	if (voyager_quad_processors)
+ 		request_resource(&ioport_resource, &qic_res);
+ 	/* set up the front power switch */
+ }
+ 
+-int
+-voyager_cat_readb(__u8 module, __u8 asic, int reg)
++int voyager_cat_readb(__u8 module, __u8 asic, int reg)
+ {
+ 	return 0;
+ }
+ 
+-static int
+-cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp) 
++static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp)
+ {
+ 	__u8 val;
+ 	int err = 0;
+ 
+-	if(!modp->scan_path_connected)
++	if (!modp->scan_path_connected)
+ 		return 0;
+-	if(asicp->asic_id != VOYAGER_CAT_ID) {
++	if (asicp->asic_id != VOYAGER_CAT_ID) {
+ 		CDEBUG(("cat_disconnect: ASIC is not CAT\n"));
+ 		return 1;
+ 	}
+ 	err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
+-	if(err) {
++	if (err) {
+ 		CDEBUG(("cat_disconnect: failed to read SCANPATH\n"));
+ 		return err;
+ 	}
+ 	val &= VOYAGER_DISCONNECT_ASIC;
+ 	err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
+-	if(err) {
++	if (err) {
+ 		CDEBUG(("cat_disconnect: failed to write SCANPATH\n"));
+ 		return err;
+ 	}
+@@ -940,27 +961,26 @@ cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp)
+ 	return 0;
+ }
+ 
+-static int
+-cat_connect(voyager_module_t *modp, voyager_asic_t *asicp) 
++static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp)
+ {
+ 	__u8 val;
+ 	int err = 0;
+ 
+-	if(modp->scan_path_connected)
++	if (modp->scan_path_connected)
+ 		return 0;
+-	if(asicp->asic_id != VOYAGER_CAT_ID) {
++	if (asicp->asic_id != VOYAGER_CAT_ID) {
+ 		CDEBUG(("cat_connect: ASIC is not CAT\n"));
+ 		return 1;
+ 	}
+ 
+ 	err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
+-	if(err) {
++	if (err) {
+ 		CDEBUG(("cat_connect: failed to read SCANPATH\n"));
+ 		return err;
+ 	}
+ 	val |= VOYAGER_CONNECT_ASIC;
+ 	err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
+-	if(err) {
++	if (err) {
+ 		CDEBUG(("cat_connect: failed to write SCANPATH\n"));
+ 		return err;
+ 	}
+@@ -971,11 +991,10 @@ cat_connect(voyager_module_t *modp, voyager_asic_t *asicp)
+ 	return 0;
+ }
+ 
+-void
+-voyager_cat_power_off(void)
++void voyager_cat_power_off(void)
+ {
+ 	/* Power the machine off by writing to the PSI over the CAT
+-         * bus */
++	 * bus */
+ 	__u8 data;
+ 	voyager_module_t psi = { 0 };
+ 	voyager_asic_t psi_asic = { 0 };
+@@ -1009,8 +1028,7 @@ voyager_cat_power_off(void)
+ 
+ struct voyager_status voyager_status = { 0 };
+ 
+-void
+-voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
++void voyager_cat_psi(__u8 cmd, __u16 reg, __u8 * data)
+ {
+ 	voyager_module_t psi = { 0 };
+ 	voyager_asic_t psi_asic = { 0 };
+@@ -1027,7 +1045,7 @@ voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
+ 	outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
+ 	outb(VOYAGER_CAT_RUN, CAT_CMD);
+ 	cat_disconnect(&psi, &psi_asic);
+-	switch(cmd) {
++	switch (cmd) {
+ 	case VOYAGER_PSI_READ:
+ 		cat_read(&psi, &psi_asic, reg, data);
+ 		break;
+@@ -1047,8 +1065,7 @@ voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
+ 	outb(VOYAGER_CAT_END, CAT_CMD);
+ }
+ 
+-void
+-voyager_cat_do_common_interrupt(void)
++void voyager_cat_do_common_interrupt(void)
+ {
+ 	/* This is caused either by a memory parity error or something
+ 	 * in the PSI */
+@@ -1057,7 +1074,7 @@ voyager_cat_do_common_interrupt(void)
+ 	voyager_asic_t psi_asic = { 0 };
+ 	struct voyager_psi psi_reg;
+ 	int i;
+- re_read:
++      re_read:
+ 	psi.asic = &psi_asic;
+ 	psi.asic->asic_id = VOYAGER_CAT_ID;
+ 	psi.asic->subaddr = VOYAGER_SUBADDR_HI;
+@@ -1072,43 +1089,45 @@ voyager_cat_do_common_interrupt(void)
+ 	cat_disconnect(&psi, &psi_asic);
+ 	/* Read the status.  NOTE: Need to read *all* the PSI regs here
+ 	 * otherwise the cmn int will be reasserted */
+-	for(i = 0; i < sizeof(psi_reg.regs); i++) {
+-		cat_read(&psi, &psi_asic, i, &((__u8 *)&psi_reg.regs)[i]);
++	for (i = 0; i < sizeof(psi_reg.regs); i++) {
++		cat_read(&psi, &psi_asic, i, &((__u8 *) & psi_reg.regs)[i]);
+ 	}
+ 	outb(VOYAGER_CAT_END, CAT_CMD);
+-	if((psi_reg.regs.checkbit & 0x02) == 0) {
++	if ((psi_reg.regs.checkbit & 0x02) == 0) {
+ 		psi_reg.regs.checkbit |= 0x02;
+ 		cat_write(&psi, &psi_asic, 5, psi_reg.regs.checkbit);
+ 		printk("VOYAGER RE-READ PSI\n");
+ 		goto re_read;
+ 	}
+ 	outb(VOYAGER_CAT_RUN, CAT_CMD);
+-	for(i = 0; i < sizeof(psi_reg.subregs); i++) {
++	for (i = 0; i < sizeof(psi_reg.subregs); i++) {
+ 		/* This looks strange, but the PSI doesn't do auto increment
+ 		 * correctly */
+-		cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i, 
+-			    1, &((__u8 *)&psi_reg.subregs)[i]); 
++		cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i,
++			    1, &((__u8 *) & psi_reg.subregs)[i]);
+ 	}
+ 	outb(VOYAGER_CAT_END, CAT_CMD);
+ #ifdef VOYAGER_CAT_DEBUG
+ 	printk("VOYAGER PSI: ");
+-	for(i=0; i<sizeof(psi_reg.regs); i++)
+-		printk("%02x ", ((__u8 *)&psi_reg.regs)[i]);
++	for (i = 0; i < sizeof(psi_reg.regs); i++)
++		printk("%02x ", ((__u8 *) & psi_reg.regs)[i]);
+ 	printk("\n           ");
+-	for(i=0; i<sizeof(psi_reg.subregs); i++)
+-		printk("%02x ", ((__u8 *)&psi_reg.subregs)[i]);
++	for (i = 0; i < sizeof(psi_reg.subregs); i++)
++		printk("%02x ", ((__u8 *) & psi_reg.subregs)[i]);
+ 	printk("\n");
+ #endif
+-	if(psi_reg.regs.intstatus & PSI_MON) {
++	if (psi_reg.regs.intstatus & PSI_MON) {
+ 		/* switch off or power fail */
+ 
+-		if(psi_reg.subregs.supply & PSI_SWITCH_OFF) {
+-			if(voyager_status.switch_off) {
+-				printk(KERN_ERR "Voyager front panel switch turned off again---Immediate power off!\n");
++		if (psi_reg.subregs.supply & PSI_SWITCH_OFF) {
++			if (voyager_status.switch_off) {
++				printk(KERN_ERR
++				       "Voyager front panel switch turned off again---Immediate power off!\n");
+ 				voyager_cat_power_off();
+ 				/* not reached */
+ 			} else {
+-				printk(KERN_ERR "Voyager front panel switch turned off\n");
++				printk(KERN_ERR
++				       "Voyager front panel switch turned off\n");
+ 				voyager_status.switch_off = 1;
+ 				voyager_status.request_from_kernel = 1;
+ 				wake_up_process(voyager_thread);
+@@ -1127,7 +1146,7 @@ voyager_cat_do_common_interrupt(void)
+ 
+ 			VDEBUG(("Voyager ac fail reg 0x%x\n",
+ 				psi_reg.subregs.ACfail));
+-			if((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) {
++			if ((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) {
+ 				/* No further update */
+ 				return;
+ 			}
+@@ -1135,20 +1154,20 @@ voyager_cat_do_common_interrupt(void)
+ 			/* Don't bother trying to find out who failed.
+ 			 * FIXME: This probably makes the code incorrect on
+ 			 * anything other than a 345x */
+-			for(i=0; i< 5; i++) {
+-				if( psi_reg.subregs.ACfail &(1<<i)) {
++			for (i = 0; i < 5; i++) {
++				if (psi_reg.subregs.ACfail & (1 << i)) {
+ 					break;
+ 				}
+ 			}
+ 			printk(KERN_NOTICE "AC FAIL IN SUPPLY %d\n", i);
+ #endif
+ 			/* DON'T do this: it shuts down the AC PSI 
+-			outb(VOYAGER_CAT_RUN, CAT_CMD);
+-			data = PSI_MASK_MASK | i;
+-			cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK,
+-				     1, &data);
+-			outb(VOYAGER_CAT_END, CAT_CMD);
+-			*/
++			   outb(VOYAGER_CAT_RUN, CAT_CMD);
++			   data = PSI_MASK_MASK | i;
++			   cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK,
++			   1, &data);
++			   outb(VOYAGER_CAT_END, CAT_CMD);
++			 */
+ 			printk(KERN_ERR "Voyager AC power failure\n");
+ 			outb(VOYAGER_CAT_RUN, CAT_CMD);
+ 			data = PSI_COLD_START;
+@@ -1159,16 +1178,16 @@ voyager_cat_do_common_interrupt(void)
+ 			voyager_status.request_from_kernel = 1;
+ 			wake_up_process(voyager_thread);
+ 		}
+-		
+-		
+-	} else if(psi_reg.regs.intstatus & PSI_FAULT) {
 +
++	} else if (psi_reg.regs.intstatus & PSI_FAULT) {
+ 		/* Major fault! */
+-		printk(KERN_ERR "Voyager PSI Detected major fault, immediate power off!\n");
++		printk(KERN_ERR
++		       "Voyager PSI Detected major fault, immediate power off!\n");
+ 		voyager_cat_power_off();
+ 		/* not reached */
+-	} else if(psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM
+-					    | PSI_CURRENT | PSI_DVM
+-					    | PSI_PSCFAULT | PSI_STAT_CHG)) {
++	} else if (psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM
++					     | PSI_CURRENT | PSI_DVM
++					     | PSI_PSCFAULT | PSI_STAT_CHG)) {
+ 		/* other psi fault */
+ 
+ 		printk(KERN_WARNING "Voyager PSI status 0x%x\n", data);
+diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
+index 88124dd..dffa786 100644
+--- a/arch/x86/mach-voyager/voyager_smp.c
++++ b/arch/x86/mach-voyager/voyager_smp.c
+@@ -32,7 +32,8 @@
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
+ 
+ /* CPU IRQ affinity -- set to all ones initially */
+-static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1]  = ~0UL };
++static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned =
++	{[0 ... NR_CPUS-1]  = ~0UL };
+ 
+ /* per CPU data structure (for /proc/cpuinfo et al), visible externally
+  * indexed physically */
+@@ -76,7 +77,6 @@ EXPORT_SYMBOL(cpu_online_map);
+  * by scheduler but indexed physically */
+ cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
+ 
+-
+ /* The internal functions */
+ static void send_CPI(__u32 cpuset, __u8 cpi);
+ static void ack_CPI(__u8 cpi);
+@@ -101,94 +101,86 @@ int hard_smp_processor_id(void);
+ int safe_smp_processor_id(void);
+ 
+ /* Inline functions */
+-static inline void
+-send_one_QIC_CPI(__u8 cpu, __u8 cpi)
++static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi)
+ {
+ 	voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
+-		(smp_processor_id() << 16) + cpi;
++	    (smp_processor_id() << 16) + cpi;
+ }
+ 
+-static inline void
+-send_QIC_CPI(__u32 cpuset, __u8 cpi)
++static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
+ {
+ 	int cpu;
+ 
+ 	for_each_online_cpu(cpu) {
+-		if(cpuset & (1<<cpu)) {
++		if (cpuset & (1 << cpu)) {
+ #ifdef VOYAGER_DEBUG
+-			if(!cpu_isset(cpu, cpu_online_map))
+-				VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu));
++			if (!cpu_isset(cpu, cpu_online_map))
++				VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
++					"cpu_online_map\n",
++					hard_smp_processor_id(), cpi, cpu));
+ #endif
+ 			send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
+ 		}
+ 	}
+ }
+ 
+-static inline void
+-wrapper_smp_local_timer_interrupt(void)
++static inline void wrapper_smp_local_timer_interrupt(void)
+ {
+ 	irq_enter();
+ 	smp_local_timer_interrupt();
+ 	irq_exit();
+ }
+ 
+-static inline void
+-send_one_CPI(__u8 cpu, __u8 cpi)
++static inline void send_one_CPI(__u8 cpu, __u8 cpi)
+ {
+-	if(voyager_quad_processors & (1<<cpu))
++	if (voyager_quad_processors & (1 << cpu))
+ 		send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
+ 	else
+-		send_CPI(1<<cpu, cpi);
++		send_CPI(1 << cpu, cpi);
+ }
+ 
+-static inline void
+-send_CPI_allbutself(__u8 cpi)
++static inline void send_CPI_allbutself(__u8 cpi)
+ {
+ 	__u8 cpu = smp_processor_id();
+ 	__u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
+ 	send_CPI(mask, cpi);
+ }
+ 
+-static inline int
+-is_cpu_quad(void)
++static inline int is_cpu_quad(void)
+ {
+ 	__u8 cpumask = inb(VIC_PROC_WHO_AM_I);
+ 	return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
+ }
+ 
+-static inline int
+-is_cpu_extended(void)
++static inline int is_cpu_extended(void)
+ {
+ 	__u8 cpu = hard_smp_processor_id();
+ 
+-	return(voyager_extended_vic_processors & (1<<cpu));
++	return (voyager_extended_vic_processors & (1 << cpu));
+ }
+ 
+-static inline int
+-is_cpu_vic_boot(void)
++static inline int is_cpu_vic_boot(void)
+ {
+ 	__u8 cpu = hard_smp_processor_id();
+ 
+-	return(voyager_extended_vic_processors
+-	       & voyager_allowed_boot_processors & (1<<cpu));
++	return (voyager_extended_vic_processors
++		& voyager_allowed_boot_processors & (1 << cpu));
+ }
+ 
+-
+-static inline void
+-ack_CPI(__u8 cpi)
++static inline void ack_CPI(__u8 cpi)
+ {
+-	switch(cpi) {
++	switch (cpi) {
+ 	case VIC_CPU_BOOT_CPI:
+-		if(is_cpu_quad() && !is_cpu_vic_boot())
++		if (is_cpu_quad() && !is_cpu_vic_boot())
+ 			ack_QIC_CPI(cpi);
+ 		else
+ 			ack_VIC_CPI(cpi);
+ 		break;
+ 	case VIC_SYS_INT:
+-	case VIC_CMN_INT: 
++	case VIC_CMN_INT:
+ 		/* These are slightly strange.  Even on the Quad card,
+ 		 * They are vectored as VIC CPIs */
+-		if(is_cpu_quad())
++		if (is_cpu_quad())
+ 			ack_special_QIC_CPI(cpi);
+ 		else
+ 			ack_VIC_CPI(cpi);
+@@ -205,11 +197,11 @@ ack_CPI(__u8 cpi)
+  * 8259 IRQs except that masks and things must be kept per processor
+  */
+ static struct irq_chip vic_chip = {
+-	.name		= "VIC",
+-	.startup	= startup_vic_irq,
+-	.mask		= mask_vic_irq,
+-	.unmask		= unmask_vic_irq,
+-	.set_affinity	= set_vic_irq_affinity,
++	.name = "VIC",
++	.startup = startup_vic_irq,
++	.mask = mask_vic_irq,
++	.unmask = unmask_vic_irq,
++	.set_affinity = set_vic_irq_affinity,
+ };
+ 
+ /* used to count up as CPUs are brought on line (starts at 0) */
+@@ -223,7 +215,7 @@ static __u32 trampoline_base;
+ /* The per cpu profile stuff - used in smp_local_timer_interrupt */
+ static DEFINE_PER_CPU(int, prof_multiplier) = 1;
+ static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
+-static DEFINE_PER_CPU(int, prof_counter) =  1;
++static DEFINE_PER_CPU(int, prof_counter) = 1;
+ 
+ /* the map used to check if a CPU has booted */
+ static __u32 cpu_booted_map;
+@@ -235,7 +227,6 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
+ /* This is for the new dynamic CPU boot code */
+ cpumask_t cpu_callin_map = CPU_MASK_NONE;
+ cpumask_t cpu_callout_map = CPU_MASK_NONE;
+-EXPORT_SYMBOL(cpu_callout_map);
+ cpumask_t cpu_possible_map = CPU_MASK_NONE;
+ EXPORT_SYMBOL(cpu_possible_map);
+ 
+@@ -246,9 +237,9 @@ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
+ static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
+ 
+ /* Lock for enable/disable of VIC interrupts */
+-static  __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
++static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
+ 
+-/* The boot processor is correctly set up in PC mode when it 
++/* The boot processor is correctly set up in PC mode when it
+  * comes up, but the secondaries need their master/slave 8259
+  * pairs initializing correctly */
+ 
+@@ -262,8 +253,7 @@ static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
+ static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
+ 
+ /* debugging routine to read the isr of the cpu's pic */
+-static inline __u16
+-vic_read_isr(void)
++static inline __u16 vic_read_isr(void)
+ {
+ 	__u16 isr;
+ 
+@@ -275,17 +265,16 @@ vic_read_isr(void)
+ 	return isr;
+ }
+ 
+-static __init void
+-qic_setup(void)
++static __init void qic_setup(void)
+ {
+-	if(!is_cpu_quad()) {
++	if (!is_cpu_quad()) {
+ 		/* not a quad, no setup */
+ 		return;
+ 	}
+ 	outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
+ 	outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
+-	
+-	if(is_cpu_extended()) {
 +
-+		case BOOT_EFI:
-+			if (efi_enabled)
-+				efi.reset_system(reboot_mode ? EFI_RESET_WARM : EFI_RESET_COLD,
-+						 EFI_SUCCESS, 0, NULL);
++	if (is_cpu_extended()) {
+ 		/* the QIC duplicate of the VIC base register */
+ 		outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
+ 		outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
+@@ -295,8 +284,7 @@ qic_setup(void)
+ 	}
+ }
+ 
+-static __init void
+-vic_setup_pic(void)
++static __init void vic_setup_pic(void)
+ {
+ 	outb(1, VIC_REDIRECT_REGISTER_1);
+ 	/* clear the claim registers for dynamic routing */
+@@ -333,7 +321,7 @@ vic_setup_pic(void)
+ 
+ 	/* ICW2: slave vector base */
+ 	outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
+-	
 +
-+			reboot_type = BOOT_KBD;
-+			break;
-+		}
-+	}
-+}
+ 	/* ICW3: slave ID */
+ 	outb(0x02, 0xA1);
+ 
+@@ -341,19 +329,18 @@ vic_setup_pic(void)
+ 	outb(0x01, 0xA1);
+ }
+ 
+-static void
+-do_quad_bootstrap(void)
++static void do_quad_bootstrap(void)
+ {
+-	if(is_cpu_quad() && is_cpu_vic_boot()) {
++	if (is_cpu_quad() && is_cpu_vic_boot()) {
+ 		int i;
+ 		unsigned long flags;
+ 		__u8 cpuid = hard_smp_processor_id();
+ 
+ 		local_irq_save(flags);
+ 
+-		for(i = 0; i<4; i++) {
++		for (i = 0; i < 4; i++) {
+ 			/* FIXME: this would be >>3 &0x7 on the 32 way */
+-			if(((cpuid >> 2) & 0x03) == i)
++			if (((cpuid >> 2) & 0x03) == i)
+ 				/* don't lower our own mask! */
+ 				continue;
+ 
+@@ -368,12 +355,10 @@ do_quad_bootstrap(void)
+ 	}
+ }
+ 
+-
+ /* Set up all the basic stuff: read the SMP config and make all the
+  * SMP information reflect only the boot cpu.  All others will be
+  * brought on-line later. */
+-void __init 
+-find_smp_config(void)
++void __init find_smp_config(void)
+ {
+ 	int i;
+ 
+@@ -382,24 +367,31 @@ find_smp_config(void)
+ 	printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
+ 
+ 	/* initialize the CPU structures (moved from smp_boot_cpus) */
+-	for(i=0; i<NR_CPUS; i++) {
++	for (i = 0; i < NR_CPUS; i++) {
+ 		cpu_irq_affinity[i] = ~0;
+ 	}
+ 	cpu_online_map = cpumask_of_cpu(boot_cpu_id);
+ 
+ 	/* The boot CPU must be extended */
+-	voyager_extended_vic_processors = 1<<boot_cpu_id;
++	voyager_extended_vic_processors = 1 << boot_cpu_id;
+ 	/* initially, all of the first 8 CPUs can boot */
+ 	voyager_allowed_boot_processors = 0xff;
+ 	/* set up everything for just this CPU, we can alter
+ 	 * this as we start the other CPUs later */
+ 	/* now get the CPU disposition from the extended CMOS */
+-	cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
+-	cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
+-	cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
+-	cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
++	cpus_addr(phys_cpu_present_map)[0] =
++	    voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
++	cpus_addr(phys_cpu_present_map)[0] |=
++	    voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
++	cpus_addr(phys_cpu_present_map)[0] |=
++	    voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
++				       2) << 16;
++	cpus_addr(phys_cpu_present_map)[0] |=
++	    voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
++				       3) << 24;
+ 	cpu_possible_map = phys_cpu_present_map;
+-	printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]);
++	printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
++	       cpus_addr(phys_cpu_present_map)[0]);
+ 	/* Here we set up the VIC to enable SMP */
+ 	/* enable the CPIs by writing the base vector to their register */
+ 	outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
+@@ -427,8 +419,7 @@ find_smp_config(void)
+ /*
+  *	The bootstrap kernel entry code has set these up. Save them
+  *	for a given CPU, id is physical */
+-void __init
+-smp_store_cpu_info(int id)
++void __init smp_store_cpu_info(int id)
+ {
+ 	struct cpuinfo_x86 *c = &cpu_data(id);
+ 
+@@ -438,21 +429,19 @@ smp_store_cpu_info(int id)
+ }
+ 
+ /* set up the trampoline and return the physical address of the code */
+-static __u32 __init
+-setup_trampoline(void)
++static __u32 __init setup_trampoline(void)
+ {
+ 	/* these two are global symbols in trampoline.S */
+ 	extern const __u8 trampoline_end[];
+ 	extern const __u8 trampoline_data[];
+ 
+-	memcpy((__u8 *)trampoline_base, trampoline_data,
++	memcpy((__u8 *) trampoline_base, trampoline_data,
+ 	       trampoline_end - trampoline_data);
+-	return virt_to_phys((__u8 *)trampoline_base);
++	return virt_to_phys((__u8 *) trampoline_base);
+ }
+ 
+ /* Routine initially called when a non-boot CPU is brought online */
+-static void __init
+-start_secondary(void *unused)
++static void __init start_secondary(void *unused)
+ {
+ 	__u8 cpuid = hard_smp_processor_id();
+ 	/* external functions not defined in the headers */
+@@ -464,17 +453,18 @@ start_secondary(void *unused)
+ 	ack_CPI(VIC_CPU_BOOT_CPI);
+ 
+ 	/* setup the 8259 master slave pair belonging to this CPU ---
+-         * we won't actually receive any until the boot CPU
+-         * relinquishes it's static routing mask */
++	 * we won't actually receive any until the boot CPU
++	 * relinquishes it's static routing mask */
+ 	vic_setup_pic();
+ 
+ 	qic_setup();
+ 
+-	if(is_cpu_quad() && !is_cpu_vic_boot()) {
++	if (is_cpu_quad() && !is_cpu_vic_boot()) {
+ 		/* clear the boot CPI */
+ 		__u8 dummy;
+ 
+-		dummy = voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
++		dummy =
++		    voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
+ 		printk("read dummy %d\n", dummy);
+ 	}
+ 
+@@ -516,7 +506,6 @@ start_secondary(void *unused)
+ 	cpu_idle();
+ }
+ 
+-
+ /* Routine to kick start the given CPU and wait for it to report ready
+  * (or timeout in startup).  When this routine returns, the requested
+  * CPU is either fully running and configured or known to be dead.
+@@ -524,29 +513,28 @@ start_secondary(void *unused)
+  * We call this routine sequentially 1 CPU at a time, so no need for
+  * locking */
+ 
+-static void __init
+-do_boot_cpu(__u8 cpu)
++static void __init do_boot_cpu(__u8 cpu)
+ {
+ 	struct task_struct *idle;
+ 	int timeout;
+ 	unsigned long flags;
+-	int quad_boot = (1<<cpu) & voyager_quad_processors 
+-		& ~( voyager_extended_vic_processors
+-		     & voyager_allowed_boot_processors);
++	int quad_boot = (1 << cpu) & voyager_quad_processors
++	    & ~(voyager_extended_vic_processors
++		& voyager_allowed_boot_processors);
+ 
+ 	/* This is an area in head.S which was used to set up the
+ 	 * initial kernel stack.  We need to alter this to give the
+ 	 * booting CPU a new stack (taken from its idle process) */
+ 	extern struct {
+-		__u8 *esp;
++		__u8 *sp;
+ 		unsigned short ss;
+ 	} stack_start;
+ 	/* This is the format of the CPI IDT gate (in real mode) which
+ 	 * we're hijacking to boot the CPU */
+-	union 	IDTFormat {
++	union IDTFormat {
+ 		struct seg {
+-			__u16	Offset;
+-			__u16	Segment;
++			__u16 Offset;
++			__u16 Segment;
+ 		} idt;
+ 		__u32 val;
+ 	} hijack_source;
+@@ -565,37 +553,44 @@ do_boot_cpu(__u8 cpu)
+ 	alternatives_smp_switch(1);
+ 
+ 	idle = fork_idle(cpu);
+-	if(IS_ERR(idle))
++	if (IS_ERR(idle))
+ 		panic("failed fork for CPU%d", cpu);
+-	idle->thread.eip = (unsigned long) start_secondary;
++	idle->thread.ip = (unsigned long)start_secondary;
+ 	/* init_tasks (in sched.c) is indexed logically */
+-	stack_start.esp = (void *) idle->thread.esp;
++	stack_start.sp = (void *)idle->thread.sp;
+ 
+ 	init_gdt(cpu);
+- 	per_cpu(current_task, cpu) = idle;
++	per_cpu(current_task, cpu) = idle;
+ 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ 	irq_ctx_init(cpu);
+ 
+ 	/* Note: Don't modify initial ss override */
+-	VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, 
++	VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
+ 		(unsigned long)hijack_source.val, hijack_source.idt.Segment,
+-		hijack_source.idt.Offset, stack_start.esp));
++		hijack_source.idt.Offset, stack_start.sp));
+ 
+ 	/* init lowmem identity mapping */
+ 	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
+ 			min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
+ 	flush_tlb_all();
+ 
+-	if(quad_boot) {
++	if (quad_boot) {
+ 		printk("CPU %d: non extended Quad boot\n", cpu);
+-		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4);
++		hijack_vector =
++		    (__u32 *)
++		    phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4);
+ 		*hijack_vector = hijack_source.val;
+ 	} else {
+ 		printk("CPU%d: extended VIC boot\n", cpu);
+-		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4);
++		hijack_vector =
++		    (__u32 *)
++		    phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4);
+ 		*hijack_vector = hijack_source.val;
+ 		/* VIC errata, may also receive interrupt at this address */
+-		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4);
++		hijack_vector =
++		    (__u32 *)
++		    phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI +
++				  VIC_DEFAULT_CPI_BASE) * 4);
+ 		*hijack_vector = hijack_source.val;
+ 	}
+ 	/* All non-boot CPUs start with interrupts fully masked.  Need
+@@ -603,73 +598,76 @@ do_boot_cpu(__u8 cpu)
+ 	 * this in the VIC by masquerading as the processor we're
+ 	 * about to boot and lowering its interrupt mask */
+ 	local_irq_save(flags);
+-	if(quad_boot) {
++	if (quad_boot) {
+ 		send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
+ 	} else {
+ 		outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
+ 		/* here we're altering registers belonging to `cpu' */
+-		
 +
-+void machine_shutdown(void)
-+{
-+	/* Stop the cpus and apics */
-+#ifdef CONFIG_SMP
-+	int reboot_cpu_id;
+ 		outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
+ 		/* now go back to our original identity */
+ 		outb(boot_cpu_id, VIC_PROCESSOR_ID);
+ 
+ 		/* and boot the CPU */
+ 
+-		send_CPI((1<<cpu), VIC_CPU_BOOT_CPI);
++		send_CPI((1 << cpu), VIC_CPU_BOOT_CPI);
+ 	}
+ 	cpu_booted_map = 0;
+ 	local_irq_restore(flags);
+ 
+ 	/* now wait for it to become ready (or timeout) */
+-	for(timeout = 0; timeout < 50000; timeout++) {
+-		if(cpu_booted_map)
++	for (timeout = 0; timeout < 50000; timeout++) {
++		if (cpu_booted_map)
+ 			break;
+ 		udelay(100);
+ 	}
+ 	/* reset the page table */
+ 	zap_low_mappings();
+-	  
 +
-+	/* The boot cpu is always logical cpu 0 */
-+	reboot_cpu_id = 0;
+ 	if (cpu_booted_map) {
+ 		VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
+ 			cpu, smp_processor_id()));
+-	
 +
-+#ifdef CONFIG_X86_32
-+	/* See if there has been given a command line override */
-+	if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
-+		cpu_isset(reboot_cpu, cpu_online_map))
-+		reboot_cpu_id = reboot_cpu;
-+#endif
+ 		printk("CPU%d: ", cpu);
+ 		print_cpu_info(&cpu_data(cpu));
+ 		wmb();
+ 		cpu_set(cpu, cpu_callout_map);
+ 		cpu_set(cpu, cpu_present_map);
+-	}
+-	else {
++	} else {
+ 		printk("CPU%d FAILED TO BOOT: ", cpu);
+-		if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5)
++		if (*
++		    ((volatile unsigned char *)phys_to_virt(start_phys_address))
++		    == 0xA5)
+ 			printk("Stuck.\n");
+ 		else
+ 			printk("Not responding.\n");
+-		
 +
-+	/* Make certain the cpu I'm about to reboot on is online */
-+	if (!cpu_isset(reboot_cpu_id, cpu_online_map))
-+		reboot_cpu_id = smp_processor_id();
+ 		cpucount--;
+ 	}
+ }
+ 
+-void __init
+-smp_boot_cpus(void)
++void __init smp_boot_cpus(void)
+ {
+ 	int i;
+ 
+ 	/* CAT BUS initialisation must be done after the memory */
+ 	/* FIXME: The L4 has a catbus too, it just needs to be
+ 	 * accessed in a totally different way */
+-	if(voyager_level == 5) {
++	if (voyager_level == 5) {
+ 		voyager_cat_init();
+ 
+ 		/* now that the cat has probed the Voyager System Bus, sanity
+ 		 * check the cpu map */
+-		if( ((voyager_quad_processors | voyager_extended_vic_processors)
+-		     & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) {
++		if (((voyager_quad_processors | voyager_extended_vic_processors)
++		     & cpus_addr(phys_cpu_present_map)[0]) !=
++		    cpus_addr(phys_cpu_present_map)[0]) {
+ 			/* should panic */
+-			printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
++			printk("\n\n***WARNING*** "
++			       "Sanity check of CPU present map FAILED\n");
+ 		}
+-	} else if(voyager_level == 4)
+-		voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0];
++	} else if (voyager_level == 4)
++		voyager_extended_vic_processors =
++		    cpus_addr(phys_cpu_present_map)[0];
+ 
+ 	/* this sets up the idle task to run on the current cpu */
+ 	voyager_extended_cpus = 1;
+@@ -678,14 +676,14 @@ smp_boot_cpus(void)
+ 	//global_irq_holder = boot_cpu_id;
+ 
+ 	/* FIXME: Need to do something about this but currently only works
+-	 * on CPUs with a tsc which none of mine have. 
+-	smp_tune_scheduling();
++	 * on CPUs with a tsc which none of mine have.
++	 smp_tune_scheduling();
+ 	 */
+ 	smp_store_cpu_info(boot_cpu_id);
+ 	printk("CPU%d: ", boot_cpu_id);
+ 	print_cpu_info(&cpu_data(boot_cpu_id));
+ 
+-	if(is_cpu_quad()) {
++	if (is_cpu_quad()) {
+ 		/* booting on a Quad CPU */
+ 		printk("VOYAGER SMP: Boot CPU is Quad\n");
+ 		qic_setup();
+@@ -697,11 +695,11 @@ smp_boot_cpus(void)
+ 
+ 	cpu_set(boot_cpu_id, cpu_online_map);
+ 	cpu_set(boot_cpu_id, cpu_callout_map);
+-	
+-	/* loop over all the extended VIC CPUs and boot them.  The 
 +
-+	/* Make certain I only run on the appropriate processor */
-+	set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
++	/* loop over all the extended VIC CPUs and boot them.  The
+ 	 * Quad CPUs must be bootstrapped by their extended VIC cpu */
+-	for(i = 0; i < NR_CPUS; i++) {
+-		if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
++	for (i = 0; i < NR_CPUS; i++) {
++		if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
+ 			continue;
+ 		do_boot_cpu(i);
+ 		/* This udelay seems to be needed for the Quad boots
+@@ -715,25 +713,26 @@ smp_boot_cpus(void)
+ 		for (i = 0; i < NR_CPUS; i++)
+ 			if (cpu_isset(i, cpu_online_map))
+ 				bogosum += cpu_data(i).loops_per_jiffy;
+-		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+-			cpucount+1,
+-			bogosum/(500000/HZ),
+-			(bogosum/(5000/HZ))%100);
++		printk(KERN_INFO "Total of %d processors activated "
++		       "(%lu.%02lu BogoMIPS).\n",
++		       cpucount + 1, bogosum / (500000 / HZ),
++		       (bogosum / (5000 / HZ)) % 100);
+ 	}
+ 	voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
+-	printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus);
++	printk("VOYAGER: Extended (interrupt handling CPUs): "
++	       "%d, non-extended: %d\n", voyager_extended_cpus,
++	       num_booting_cpus() - voyager_extended_cpus);
+ 	/* that's it, switch to symmetric mode */
+ 	outb(0, VIC_PRIORITY_REGISTER);
+ 	outb(0, VIC_CLAIM_REGISTER_0);
+ 	outb(0, VIC_CLAIM_REGISTER_1);
+-	
 +
-+	/* O.K Now that I'm on the appropriate processor,
-+	 * stop all of the others.
+ 	VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
+ }
+ 
+ /* Reload the secondary CPUs task structure (this function does not
+  * return ) */
+-void __init 
+-initialize_secondary(void)
++void __init initialize_secondary(void)
+ {
+ #if 0
+ 	// AC kernels only
+@@ -745,11 +744,9 @@ initialize_secondary(void)
+ 	 * basically just the stack pointer and the eip.
+ 	 */
+ 
+-	asm volatile(
+-		"movl %0,%%esp\n\t"
+-		"jmp *%1"
+-		:
+-		:"r" (current->thread.esp),"r" (current->thread.eip));
++	asm volatile ("movl %0,%%esp\n\t"
++		      "jmp *%1"::"r" (current->thread.sp),
++		      "r"(current->thread.ip));
+ }
+ 
+ /* handle a Voyager SYS_INT -- If we don't, the base board will
+@@ -758,25 +755,23 @@ initialize_secondary(void)
+  * System interrupts occur because some problem was detected on the
+  * various busses.  To find out what you have to probe all the
+  * hardware via the CAT bus.  FIXME: At the moment we do nothing. */
+-fastcall void
+-smp_vic_sys_interrupt(struct pt_regs *regs)
++void smp_vic_sys_interrupt(struct pt_regs *regs)
+ {
+ 	ack_CPI(VIC_SYS_INT);
+-	printk("Voyager SYSTEM INTERRUPT\n");	
++	printk("Voyager SYSTEM INTERRUPT\n");
+ }
+ 
+ /* Handle a voyager CMN_INT; These interrupts occur either because of
+  * a system status change or because a single bit memory error
+  * occurred.  FIXME: At the moment, ignore all this. */
+-fastcall void
+-smp_vic_cmn_interrupt(struct pt_regs *regs)
++void smp_vic_cmn_interrupt(struct pt_regs *regs)
+ {
+ 	static __u8 in_cmn_int = 0;
+ 	static DEFINE_SPINLOCK(cmn_int_lock);
+ 
+ 	/* common ints are broadcast, so make sure we only do this once */
+ 	_raw_spin_lock(&cmn_int_lock);
+-	if(in_cmn_int)
++	if (in_cmn_int)
+ 		goto unlock_end;
+ 
+ 	in_cmn_int++;
+@@ -784,12 +779,12 @@ smp_vic_cmn_interrupt(struct pt_regs *regs)
+ 
+ 	VDEBUG(("Voyager COMMON INTERRUPT\n"));
+ 
+-	if(voyager_level == 5)
++	if (voyager_level == 5)
+ 		voyager_cat_do_common_interrupt();
+ 
+ 	_raw_spin_lock(&cmn_int_lock);
+ 	in_cmn_int = 0;
+- unlock_end:
++      unlock_end:
+ 	_raw_spin_unlock(&cmn_int_lock);
+ 	ack_CPI(VIC_CMN_INT);
+ }
+@@ -797,26 +792,23 @@ smp_vic_cmn_interrupt(struct pt_regs *regs)
+ /*
+  * Reschedule call back. Nothing to do, all the work is done
+  * automatically when we return from the interrupt.  */
+-static void
+-smp_reschedule_interrupt(void)
++static void smp_reschedule_interrupt(void)
+ {
+ 	/* do nothing */
+ }
+ 
+-static struct mm_struct * flush_mm;
++static struct mm_struct *flush_mm;
+ static unsigned long flush_va;
+ static DEFINE_SPINLOCK(tlbstate_lock);
+-#define FLUSH_ALL	0xffffffff
+ 
+ /*
+- * We cannot call mmdrop() because we are in interrupt context, 
++ * We cannot call mmdrop() because we are in interrupt context,
+  * instead update mm->cpu_vm_mask.
+  *
+  * We need to reload %cr3 since the page tables may be going
+  * away from under us..
+  */
+-static inline void
+-leave_mm (unsigned long cpu)
++static inline void voyager_leave_mm(unsigned long cpu)
+ {
+ 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
+ 		BUG();
+@@ -824,12 +816,10 @@ leave_mm (unsigned long cpu)
+ 	load_cr3(swapper_pg_dir);
+ }
+ 
+-
+ /*
+  * Invalidate call-back
+  */
+-static void 
+-smp_invalidate_interrupt(void)
++static void smp_invalidate_interrupt(void)
+ {
+ 	__u8 cpu = smp_processor_id();
+ 
+@@ -837,18 +827,18 @@ smp_invalidate_interrupt(void)
+ 		return;
+ 	/* This will flood messages.  Don't uncomment unless you see
+ 	 * Problems with cross cpu invalidation
+-	VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
+-		smp_processor_id()));
+-	*/
++	 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
++	 smp_processor_id()));
 +	 */
-+	smp_send_stop();
-+#endif
-+
-+	lapic_shutdown();
-+
-+#ifdef CONFIG_X86_IO_APIC
-+	disable_IO_APIC();
-+#endif
-+
-+#ifdef CONFIG_HPET_TIMER
-+	hpet_disable();
-+#endif
-+
-+#ifdef CONFIG_X86_64
-+	pci_iommu_shutdown();
-+#endif
-+}
-+
-+void machine_restart(char *__unused)
-+{
-+	printk("machine restart\n");
-+
-+	if (!reboot_force)
-+		machine_shutdown();
-+	machine_emergency_restart();
-+}
+ 
+ 	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
+ 		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
+-			if (flush_va == FLUSH_ALL)
++			if (flush_va == TLB_FLUSH_ALL)
+ 				local_flush_tlb();
+ 			else
+ 				__flush_tlb_one(flush_va);
+ 		} else
+-			leave_mm(cpu);
++			voyager_leave_mm(cpu);
+ 	}
+ 	smp_mb__before_clear_bit();
+ 	clear_bit(cpu, &smp_invalidate_needed);
+@@ -857,11 +847,10 @@ smp_invalidate_interrupt(void)
+ 
+ /* All the new flush operations for 2.4 */
+ 
+-
+ /* This routine is called with a physical cpu mask */
+ static void
+-voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+-			  unsigned long va)
++voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm,
++			 unsigned long va)
+ {
+ 	int stuck = 50000;
+ 
+@@ -875,7 +864,7 @@ voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ 		BUG();
+ 
+ 	spin_lock(&tlbstate_lock);
+-	
 +
-+void machine_halt(void)
-+{
-+}
+ 	flush_mm = mm;
+ 	flush_va = va;
+ 	atomic_set_mask(cpumask, &smp_invalidate_needed);
+@@ -887,23 +876,23 @@ voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ 
+ 	while (smp_invalidate_needed) {
+ 		mb();
+-		if(--stuck == 0) {
+-			printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id());
++		if (--stuck == 0) {
++			printk("***WARNING*** Stuck doing invalidate CPI "
++			       "(CPU%d)\n", smp_processor_id());
+ 			break;
+ 		}
+ 	}
+ 
+ 	/* Uncomment only to debug invalidation problems
+-	VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
+-	*/
++	   VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
++	 */
+ 
+ 	flush_mm = NULL;
+ 	flush_va = 0;
+ 	spin_unlock(&tlbstate_lock);
+ }
+ 
+-void
+-flush_tlb_current_task(void)
++void flush_tlb_current_task(void)
+ {
+ 	struct mm_struct *mm = current->mm;
+ 	unsigned long cpu_mask;
+@@ -913,14 +902,12 @@ flush_tlb_current_task(void)
+ 	cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
+ 	local_flush_tlb();
+ 	if (cpu_mask)
+-		voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++		voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ 
+ 	preempt_enable();
+ }
+ 
+-
+-void
+-flush_tlb_mm (struct mm_struct * mm)
++void flush_tlb_mm(struct mm_struct *mm)
+ {
+ 	unsigned long cpu_mask;
+ 
+@@ -932,15 +919,15 @@ flush_tlb_mm (struct mm_struct * mm)
+ 		if (current->mm)
+ 			local_flush_tlb();
+ 		else
+-			leave_mm(smp_processor_id());
++			voyager_leave_mm(smp_processor_id());
+ 	}
+ 	if (cpu_mask)
+-		voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++		voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ 
+ 	preempt_enable();
+ }
+ 
+-void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+ {
+ 	struct mm_struct *mm = vma->vm_mm;
+ 	unsigned long cpu_mask;
+@@ -949,10 +936,10 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+ 
+ 	cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
+ 	if (current->active_mm == mm) {
+-		if(current->mm)
++		if (current->mm)
+ 			__flush_tlb_one(va);
+-		 else
+-		 	leave_mm(smp_processor_id());
++		else
++			voyager_leave_mm(smp_processor_id());
+ 	}
+ 
+ 	if (cpu_mask)
+@@ -960,21 +947,21 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+ 
+ 	preempt_enable();
+ }
 +
-+void machine_power_off(void)
-+{
-+	if (pm_power_off) {
-+		if (!reboot_force)
-+			machine_shutdown();
-+		pm_power_off();
-+	}
-+}
+ EXPORT_SYMBOL(flush_tlb_page);
+ 
+ /* enable the requested IRQs */
+-static void
+-smp_enable_irq_interrupt(void)
++static void smp_enable_irq_interrupt(void)
+ {
+ 	__u8 irq;
+ 	__u8 cpu = get_cpu();
+ 
+ 	VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
+-	       vic_irq_enable_mask[cpu]));
++		vic_irq_enable_mask[cpu]));
+ 
+ 	spin_lock(&vic_irq_lock);
+-	for(irq = 0; irq < 16; irq++) {
+-		if(vic_irq_enable_mask[cpu] & (1<<irq))
++	for (irq = 0; irq < 16; irq++) {
++		if (vic_irq_enable_mask[cpu] & (1 << irq))
+ 			enable_local_vic_irq(irq);
+ 	}
+ 	vic_irq_enable_mask[cpu] = 0;
+@@ -982,17 +969,16 @@ smp_enable_irq_interrupt(void)
+ 
+ 	put_cpu_no_resched();
+ }
+-	
 +
-+struct machine_ops machine_ops = {
-+	.power_off = machine_power_off,
-+	.shutdown = machine_shutdown,
-+	.emergency_restart = machine_emergency_restart,
-+	.restart = machine_restart,
-+	.halt = machine_halt
-+};
-diff --git a/arch/x86/kernel/reboot_32.c b/arch/x86/kernel/reboot_32.c
-deleted file mode 100644
-index bb1a0f8..0000000
---- a/arch/x86/kernel/reboot_32.c
-+++ /dev/null
-@@ -1,413 +0,0 @@
--#include <linux/mm.h>
--#include <linux/module.h>
--#include <linux/delay.h>
--#include <linux/init.h>
--#include <linux/interrupt.h>
--#include <linux/mc146818rtc.h>
--#include <linux/efi.h>
--#include <linux/dmi.h>
--#include <linux/ctype.h>
--#include <linux/pm.h>
--#include <linux/reboot.h>
--#include <asm/uaccess.h>
--#include <asm/apic.h>
--#include <asm/hpet.h>
--#include <asm/desc.h>
--#include "mach_reboot.h"
--#include <asm/reboot_fixups.h>
--#include <asm/reboot.h>
+ /*
+  *	CPU halt call-back
+  */
+-static void
+-smp_stop_cpu_function(void *dummy)
++static void smp_stop_cpu_function(void *dummy)
+ {
+ 	VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
+ 	cpu_clear(smp_processor_id(), cpu_online_map);
+ 	local_irq_disable();
+-	for(;;)
++	for (;;)
+ 		halt();
+ }
+ 
+@@ -1006,14 +992,13 @@ struct call_data_struct {
+ 	int wait;
+ };
+ 
+-static struct call_data_struct * call_data;
++static struct call_data_struct *call_data;
+ 
+ /* execute a thread on a new CPU.  The function to be called must be
+  * previously set up.  This is used to schedule a function for
+  * execution on all CPUs - set up the function then broadcast a
+  * function_interrupt CPI to come here on each CPU */
+-static void
+-smp_call_function_interrupt(void)
++static void smp_call_function_interrupt(void)
+ {
+ 	void (*func) (void *info) = call_data->func;
+ 	void *info = call_data->info;
+@@ -1027,16 +1012,17 @@ smp_call_function_interrupt(void)
+ 	 * about to execute the function
+ 	 */
+ 	mb();
+-	if(!test_and_clear_bit(cpu, &call_data->started)) {
++	if (!test_and_clear_bit(cpu, &call_data->started)) {
+ 		/* If the bit wasn't set, this could be a replay */
+-		printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu);
++		printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
++		       " with no call pending\n", cpu);
+ 		return;
+ 	}
+ 	/*
+ 	 * At this point the info structure may be out of scope unless wait==1
+ 	 */
+ 	irq_enter();
+-	(*func)(info);
++	(*func) (info);
+ 	__get_cpu_var(irq_stat).irq_call_count++;
+ 	irq_exit();
+ 	if (wait) {
+@@ -1046,14 +1032,13 @@ smp_call_function_interrupt(void)
+ }
+ 
+ static int
+-voyager_smp_call_function_mask (cpumask_t cpumask,
+-				void (*func) (void *info), void *info,
+-				int wait)
++voyager_smp_call_function_mask(cpumask_t cpumask,
++			       void (*func) (void *info), void *info, int wait)
+ {
+ 	struct call_data_struct data;
+ 	u32 mask = cpus_addr(cpumask)[0];
+ 
+-	mask &= ~(1<<smp_processor_id());
++	mask &= ~(1 << smp_processor_id());
+ 
+ 	if (!mask)
+ 		return 0;
+@@ -1093,7 +1078,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
+  * so we use the system clock to interrupt one processor, which in
+  * turn, broadcasts a timer CPI to all the others --- we receive that
+  * CPI here.  We don't use this actually for counting so losing
+- * ticks doesn't matter 
++ * ticks doesn't matter
+  *
+  * FIXME: For those CPUs which actually have a local APIC, we could
+  * try to use it to trigger this interrupt instead of having to
+@@ -1101,8 +1086,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
+  * no local APIC, so I can't do this
+  *
+  * This function is currently a placeholder and is unused in the code */
+-fastcall void 
+-smp_apic_timer_interrupt(struct pt_regs *regs)
++void smp_apic_timer_interrupt(struct pt_regs *regs)
+ {
+ 	struct pt_regs *old_regs = set_irq_regs(regs);
+ 	wrapper_smp_local_timer_interrupt();
+@@ -1110,8 +1094,7 @@ smp_apic_timer_interrupt(struct pt_regs *regs)
+ }
+ 
+ /* All of the QUAD interrupt GATES */
+-fastcall void
+-smp_qic_timer_interrupt(struct pt_regs *regs)
++void smp_qic_timer_interrupt(struct pt_regs *regs)
+ {
+ 	struct pt_regs *old_regs = set_irq_regs(regs);
+ 	ack_QIC_CPI(QIC_TIMER_CPI);
+@@ -1119,127 +1102,112 @@ smp_qic_timer_interrupt(struct pt_regs *regs)
+ 	set_irq_regs(old_regs);
+ }
+ 
+-fastcall void
+-smp_qic_invalidate_interrupt(struct pt_regs *regs)
++void smp_qic_invalidate_interrupt(struct pt_regs *regs)
+ {
+ 	ack_QIC_CPI(QIC_INVALIDATE_CPI);
+ 	smp_invalidate_interrupt();
+ }
+ 
+-fastcall void
+-smp_qic_reschedule_interrupt(struct pt_regs *regs)
++void smp_qic_reschedule_interrupt(struct pt_regs *regs)
+ {
+ 	ack_QIC_CPI(QIC_RESCHEDULE_CPI);
+ 	smp_reschedule_interrupt();
+ }
+ 
+-fastcall void
+-smp_qic_enable_irq_interrupt(struct pt_regs *regs)
++void smp_qic_enable_irq_interrupt(struct pt_regs *regs)
+ {
+ 	ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
+ 	smp_enable_irq_interrupt();
+ }
+ 
+-fastcall void
+-smp_qic_call_function_interrupt(struct pt_regs *regs)
++void smp_qic_call_function_interrupt(struct pt_regs *regs)
+ {
+ 	ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
+ 	smp_call_function_interrupt();
+ }
+ 
+-fastcall void
+-smp_vic_cpi_interrupt(struct pt_regs *regs)
++void smp_vic_cpi_interrupt(struct pt_regs *regs)
+ {
+ 	struct pt_regs *old_regs = set_irq_regs(regs);
+ 	__u8 cpu = smp_processor_id();
+ 
+-	if(is_cpu_quad())
++	if (is_cpu_quad())
+ 		ack_QIC_CPI(VIC_CPI_LEVEL0);
+ 	else
+ 		ack_VIC_CPI(VIC_CPI_LEVEL0);
+ 
+-	if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
++	if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
+ 		wrapper_smp_local_timer_interrupt();
+-	if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
++	if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
+ 		smp_invalidate_interrupt();
+-	if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
++	if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
+ 		smp_reschedule_interrupt();
+-	if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
++	if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
+ 		smp_enable_irq_interrupt();
+-	if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
++	if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
+ 		smp_call_function_interrupt();
+ 	set_irq_regs(old_regs);
+ }
+ 
+-static void
+-do_flush_tlb_all(void* info)
++static void do_flush_tlb_all(void *info)
+ {
+ 	unsigned long cpu = smp_processor_id();
+ 
+ 	__flush_tlb_all();
+ 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
+-		leave_mm(cpu);
++		voyager_leave_mm(cpu);
+ }
+ 
 -
--/*
-- * Power off function, if any
-- */
--void (*pm_power_off)(void);
--EXPORT_SYMBOL(pm_power_off);
+ /* flush the TLB of every active CPU in the system */
+-void
+-flush_tlb_all(void)
++void flush_tlb_all(void)
+ {
+ 	on_each_cpu(do_flush_tlb_all, 0, 1, 1);
+ }
+ 
+ /* used to set up the trampoline for other CPUs when the memory manager
+  * is sorted out */
+-void __init
+-smp_alloc_memory(void)
++void __init smp_alloc_memory(void)
+ {
+-	trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE);
+-	if(__pa(trampoline_base) >= 0x93000)
++	trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE);
++	if (__pa(trampoline_base) >= 0x93000)
+ 		BUG();
+ }
+ 
+ /* send a reschedule CPI to one CPU by physical CPU number*/
+-static void
+-voyager_smp_send_reschedule(int cpu)
++static void voyager_smp_send_reschedule(int cpu)
+ {
+ 	send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
+ }
+ 
 -
--static int reboot_mode;
--static int reboot_thru_bios;
+-int
+-hard_smp_processor_id(void)
++int hard_smp_processor_id(void)
+ {
+ 	__u8 i;
+ 	__u8 cpumask = inb(VIC_PROC_WHO_AM_I);
+-	if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
++	if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
+ 		return cpumask & 0x1F;
+ 
+-	for(i = 0; i < 8; i++) {
+-		if(cpumask & (1<<i))
++	for (i = 0; i < 8; i++) {
++		if (cpumask & (1 << i))
+ 			return i;
+ 	}
+ 	printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
+ 	return 0;
+ }
+ 
+-int
+-safe_smp_processor_id(void)
++int safe_smp_processor_id(void)
+ {
+ 	return hard_smp_processor_id();
+ }
+ 
+ /* broadcast a halt to all other CPUs */
+-static void
+-voyager_smp_send_stop(void)
++static void voyager_smp_send_stop(void)
+ {
+ 	smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
+ }
+ 
+ /* this function is triggered in time.c when a clock tick fires
+  * we need to re-broadcast the tick to all CPUs */
+-void
+-smp_vic_timer_interrupt(void)
++void smp_vic_timer_interrupt(void)
+ {
+ 	send_CPI_allbutself(VIC_TIMER_CPI);
+ 	smp_local_timer_interrupt();
+@@ -1253,8 +1221,7 @@ smp_vic_timer_interrupt(void)
+  * multiplier is 1 and it can be changed by writing the new multiplier
+  * value into /proc/profile.
+  */
+-void
+-smp_local_timer_interrupt(void)
++void smp_local_timer_interrupt(void)
+ {
+ 	int cpu = smp_processor_id();
+ 	long weight;
+@@ -1269,18 +1236,18 @@ smp_local_timer_interrupt(void)
+ 		 *
+ 		 * Interrupts are already masked off at this point.
+ 		 */
+-		per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu);
++		per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
+ 		if (per_cpu(prof_counter, cpu) !=
+-					per_cpu(prof_old_multiplier, cpu)) {
++		    per_cpu(prof_old_multiplier, cpu)) {
+ 			/* FIXME: need to update the vic timer tick here */
+ 			per_cpu(prof_old_multiplier, cpu) =
+-						per_cpu(prof_counter, cpu);
++			    per_cpu(prof_counter, cpu);
+ 		}
+ 
+ 		update_process_times(user_mode_vm(get_irq_regs()));
+ 	}
+ 
+-	if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
++	if (((1 << cpu) & voyager_extended_vic_processors) == 0)
+ 		/* only extended VIC processors participate in
+ 		 * interrupt distribution */
+ 		return;
+@@ -1296,12 +1263,12 @@ smp_local_timer_interrupt(void)
+ 	 * we can take more than 100K local irqs per second on a 100 MHz P5.
+ 	 */
+ 
+-	if((++vic_tick[cpu] & 0x7) != 0)
++	if ((++vic_tick[cpu] & 0x7) != 0)
+ 		return;
+ 	/* get here every 16 ticks (about every 1/6 of a second) */
+ 
+ 	/* Change our priority to give someone else a chance at getting
+-         * the IRQ. The algorithm goes like this:
++	 * the IRQ. The algorithm goes like this:
+ 	 *
+ 	 * In the VIC, the dynamically routed interrupt is always
+ 	 * handled by the lowest priority eligible (i.e. receiving
+@@ -1325,18 +1292,18 @@ smp_local_timer_interrupt(void)
+ 	 * affinity code since we now try to even up the interrupt
+ 	 * counts when an affinity binding is keeping them on a
+ 	 * particular CPU*/
+-	weight = (vic_intr_count[cpu]*voyager_extended_cpus
++	weight = (vic_intr_count[cpu] * voyager_extended_cpus
+ 		  - vic_intr_total) >> 4;
+ 	weight += 4;
+-	if(weight > 7)
++	if (weight > 7)
+ 		weight = 7;
+-	if(weight < 0)
++	if (weight < 0)
+ 		weight = 0;
+-	
+-	outb((__u8)weight, VIC_PRIORITY_REGISTER);
++
++	outb((__u8) weight, VIC_PRIORITY_REGISTER);
+ 
+ #ifdef VOYAGER_DEBUG
+-	if((vic_tick[cpu] & 0xFFF) == 0) {
++	if ((vic_tick[cpu] & 0xFFF) == 0) {
+ 		/* print this message roughly every 25 secs */
+ 		printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
+ 		       cpu, vic_tick[cpu], weight);
+@@ -1345,15 +1312,14 @@ smp_local_timer_interrupt(void)
+ }
+ 
+ /* setup the profiling timer */
+-int 
+-setup_profiling_timer(unsigned int multiplier)
++int setup_profiling_timer(unsigned int multiplier)
+ {
+ 	int i;
+ 
+-	if ( (!multiplier))
++	if ((!multiplier))
+ 		return -EINVAL;
+ 
+-	/* 
++	/*
+ 	 * Set the new multiplier for each CPU. CPUs don't start using the
+ 	 * new values until the next timer interrupt in which they do process
+ 	 * accounting.
+@@ -1367,15 +1333,13 @@ setup_profiling_timer(unsigned int multiplier)
+ /* This is a bit of a mess, but forced on us by the genirq changes
+  * there's no genirq handler that really does what voyager wants
+  * so hack it up with the simple IRQ handler */
+-static void fastcall
+-handle_vic_irq(unsigned int irq, struct irq_desc *desc)
++static void handle_vic_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ 	before_handle_vic_irq(irq);
+ 	handle_simple_irq(irq, desc);
+ 	after_handle_vic_irq(irq);
+ }
+ 
 -
--#ifdef CONFIG_SMP
--static int reboot_cpu = -1;
--#endif
--static int __init reboot_setup(char *str)
--{
--	while(1) {
--		switch (*str) {
--		case 'w': /* "warm" reboot (no memory testing etc) */
--			reboot_mode = 0x1234;
--			break;
--		case 'c': /* "cold" reboot (with memory testing etc) */
--			reboot_mode = 0x0;
--			break;
--		case 'b': /* "bios" reboot by jumping through the BIOS */
--			reboot_thru_bios = 1;
--			break;
--		case 'h': /* "hard" reboot by toggling RESET and/or crashing the CPU */
--			reboot_thru_bios = 0;
--			break;
--#ifdef CONFIG_SMP
--		case 's': /* "smp" reboot by executing reset on BSP or other CPU*/
--			if (isdigit(*(str+1))) {
--				reboot_cpu = (int) (*(str+1) - '0');
--				if (isdigit(*(str+2)))
--					reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
--			}
--				/* we will leave sorting out the final value 
--				when we are ready to reboot, since we might not
-- 				have set up boot_cpu_id or smp_num_cpu */
--			break;
--#endif
+ /*  The CPIs are handled in the per cpu 8259s, so they must be
+  *  enabled to be received: FIX: enabling the CPIs in the early
+  *  boot sequence interferes with bug checking; enable them later
+@@ -1385,13 +1349,12 @@ handle_vic_irq(unsigned int irq, struct irq_desc *desc)
+ #define QIC_SET_GATE(cpi, vector) \
+ 	set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
+ 
+-void __init
+-smp_intr_init(void)
++void __init smp_intr_init(void)
+ {
+ 	int i;
+ 
+ 	/* initialize the per cpu irq mask to all disabled */
+-	for(i = 0; i < NR_CPUS; i++)
++	for (i = 0; i < NR_CPUS; i++)
+ 		vic_irq_mask[i] = 0xFFFF;
+ 
+ 	VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
+@@ -1404,42 +1367,40 @@ smp_intr_init(void)
+ 	QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
+ 	QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
+ 	QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
+-	
+ 
+-	/* now put the VIC descriptor into the first 48 IRQs 
++	/* now put the VIC descriptor into the first 48 IRQs
+ 	 *
+ 	 * This is for later: first 16 correspond to PC IRQs; next 16
+ 	 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
+-	for(i = 0; i < 48; i++)
++	for (i = 0; i < 48; i++)
+ 		set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
+ }
+ 
+ /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
+  * processor to receive CPI */
+-static void
+-send_CPI(__u32 cpuset, __u8 cpi)
++static void send_CPI(__u32 cpuset, __u8 cpi)
+ {
+ 	int cpu;
+ 	__u32 quad_cpuset = (cpuset & voyager_quad_processors);
+ 
+-	if(cpi < VIC_START_FAKE_CPI) {
+-		/* fake CPI are only used for booting, so send to the 
++	if (cpi < VIC_START_FAKE_CPI) {
++		/* fake CPI are only used for booting, so send to the
+ 		 * extended quads as well---Quads must be VIC booted */
+-		outb((__u8)(cpuset), VIC_CPI_Registers[cpi]);
++		outb((__u8) (cpuset), VIC_CPI_Registers[cpi]);
+ 		return;
+ 	}
+-	if(quad_cpuset)
++	if (quad_cpuset)
+ 		send_QIC_CPI(quad_cpuset, cpi);
+ 	cpuset &= ~quad_cpuset;
+ 	cpuset &= 0xff;		/* only first 8 CPUs vaild for VIC CPI */
+-	if(cpuset == 0)
++	if (cpuset == 0)
+ 		return;
+ 	for_each_online_cpu(cpu) {
+-		if(cpuset & (1<<cpu))
++		if (cpuset & (1 << cpu))
+ 			set_bit(cpi, &vic_cpi_mailbox[cpu]);
+ 	}
+-	if(cpuset)
+-		outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
++	if (cpuset)
++		outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
+ }
+ 
+ /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
+@@ -1448,20 +1409,19 @@ send_CPI(__u32 cpuset, __u8 cpi)
+  * DON'T make this inline otherwise the cache line read will be
+  * optimised away
+  * */
+-static int
+-ack_QIC_CPI(__u8 cpi) {
++static int ack_QIC_CPI(__u8 cpi)
++{
+ 	__u8 cpu = hard_smp_processor_id();
+ 
+ 	cpi &= 7;
+ 
+-	outb(1<<cpi, QIC_INTERRUPT_CLEAR1);
++	outb(1 << cpi, QIC_INTERRUPT_CLEAR1);
+ 	return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
+ }
+ 
+-static void
+-ack_special_QIC_CPI(__u8 cpi)
++static void ack_special_QIC_CPI(__u8 cpi)
+ {
+-	switch(cpi) {
++	switch (cpi) {
+ 	case VIC_CMN_INT:
+ 		outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
+ 		break;
+@@ -1474,8 +1434,7 @@ ack_special_QIC_CPI(__u8 cpi)
+ }
+ 
+ /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
+-static void
+-ack_VIC_CPI(__u8 cpi)
++static void ack_VIC_CPI(__u8 cpi)
+ {
+ #ifdef VOYAGER_DEBUG
+ 	unsigned long flags;
+@@ -1484,17 +1443,17 @@ ack_VIC_CPI(__u8 cpi)
+ 
+ 	local_irq_save(flags);
+ 	isr = vic_read_isr();
+-	if((isr & (1<<(cpi &7))) == 0) {
++	if ((isr & (1 << (cpi & 7))) == 0) {
+ 		printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
+ 	}
+ #endif
+ 	/* send specific EOI; the two system interrupts have
+ 	 * bit 4 set for a separate vector but behave as the
+ 	 * corresponding 3 bit intr */
+-	outb_p(0x60|(cpi & 7),0x20);
++	outb_p(0x60 | (cpi & 7), 0x20);
+ 
+ #ifdef VOYAGER_DEBUG
+-	if((vic_read_isr() & (1<<(cpi &7))) != 0) {
++	if ((vic_read_isr() & (1 << (cpi & 7))) != 0) {
+ 		printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
+ 	}
+ 	local_irq_restore(flags);
+@@ -1502,12 +1461,11 @@ ack_VIC_CPI(__u8 cpi)
+ }
+ 
+ /* cribbed with thanks from irq.c */
+-#define __byte(x,y) 	(((unsigned char *)&(y))[x])
++#define __byte(x,y)	(((unsigned char *)&(y))[x])
+ #define cached_21(cpu)	(__byte(0,vic_irq_mask[cpu]))
+ #define cached_A1(cpu)	(__byte(1,vic_irq_mask[cpu]))
+ 
+-static unsigned int
+-startup_vic_irq(unsigned int irq)
++static unsigned int startup_vic_irq(unsigned int irq)
+ {
+ 	unmask_vic_irq(irq);
+ 
+@@ -1535,13 +1493,12 @@ startup_vic_irq(unsigned int irq)
+  *    broadcast an Interrupt enable CPI which causes all other CPUs to
+  *    adjust their masks accordingly.  */
+ 
+-static void
+-unmask_vic_irq(unsigned int irq)
++static void unmask_vic_irq(unsigned int irq)
+ {
+ 	/* linux doesn't to processor-irq affinity, so enable on
+ 	 * all CPUs we know about */
+ 	int cpu = smp_processor_id(), real_cpu;
+-	__u16 mask = (1<<irq);
++	__u16 mask = (1 << irq);
+ 	__u32 processorList = 0;
+ 	unsigned long flags;
+ 
+@@ -1549,78 +1506,72 @@ unmask_vic_irq(unsigned int irq)
+ 		irq, cpu, cpu_irq_affinity[cpu]));
+ 	spin_lock_irqsave(&vic_irq_lock, flags);
+ 	for_each_online_cpu(real_cpu) {
+-		if(!(voyager_extended_vic_processors & (1<<real_cpu)))
++		if (!(voyager_extended_vic_processors & (1 << real_cpu)))
+ 			continue;
+-		if(!(cpu_irq_affinity[real_cpu] & mask)) {
++		if (!(cpu_irq_affinity[real_cpu] & mask)) {
+ 			/* irq has no affinity for this CPU, ignore */
+ 			continue;
+ 		}
+-		if(real_cpu == cpu) {
++		if (real_cpu == cpu) {
+ 			enable_local_vic_irq(irq);
 -		}
--		if((str = strchr(str,',')) != NULL)
--			str++;
--		else
--			break;
+-		else if(vic_irq_mask[real_cpu] & mask) {
++		} else if (vic_irq_mask[real_cpu] & mask) {
+ 			vic_irq_enable_mask[real_cpu] |= mask;
+-			processorList |= (1<<real_cpu);
++			processorList |= (1 << real_cpu);
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(&vic_irq_lock, flags);
+-	if(processorList)
++	if (processorList)
+ 		send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
+ }
+ 
+-static void
+-mask_vic_irq(unsigned int irq)
++static void mask_vic_irq(unsigned int irq)
+ {
+ 	/* lazy disable, do nothing */
+ }
+ 
+-static void
+-enable_local_vic_irq(unsigned int irq)
++static void enable_local_vic_irq(unsigned int irq)
+ {
+ 	__u8 cpu = smp_processor_id();
+ 	__u16 mask = ~(1 << irq);
+ 	__u16 old_mask = vic_irq_mask[cpu];
+ 
+ 	vic_irq_mask[cpu] &= mask;
+-	if(vic_irq_mask[cpu] == old_mask)
++	if (vic_irq_mask[cpu] == old_mask)
+ 		return;
+ 
+ 	VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
+ 		irq, cpu));
+ 
+ 	if (irq & 8) {
+-		outb_p(cached_A1(cpu),0xA1);
++		outb_p(cached_A1(cpu), 0xA1);
+ 		(void)inb_p(0xA1);
 -	}
--	return 1;
--}
--
--__setup("reboot=", reboot_setup);
--
--/*
-- * Reboot options and system auto-detection code provided by
-- * Dell Inc. so their systems "just work". :-)
-- */
--
--/*
-- * Some machines require the "reboot=b"  commandline option, this quirk makes that automatic.
-- */
--static int __init set_bios_reboot(const struct dmi_system_id *d)
--{
--	if (!reboot_thru_bios) {
--		reboot_thru_bios = 1;
--		printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
+-	else {
+-		outb_p(cached_21(cpu),0x21);
++	} else {
++		outb_p(cached_21(cpu), 0x21);
+ 		(void)inb_p(0x21);
+ 	}
+ }
+ 
+-static void
+-disable_local_vic_irq(unsigned int irq)
++static void disable_local_vic_irq(unsigned int irq)
+ {
+ 	__u8 cpu = smp_processor_id();
+ 	__u16 mask = (1 << irq);
+ 	__u16 old_mask = vic_irq_mask[cpu];
+ 
+-	if(irq == 7)
++	if (irq == 7)
+ 		return;
+ 
+ 	vic_irq_mask[cpu] |= mask;
+-	if(old_mask == vic_irq_mask[cpu])
++	if (old_mask == vic_irq_mask[cpu])
+ 		return;
+ 
+ 	VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
+ 		irq, cpu));
+ 
+ 	if (irq & 8) {
+-		outb_p(cached_A1(cpu),0xA1);
++		outb_p(cached_A1(cpu), 0xA1);
+ 		(void)inb_p(0xA1);
 -	}
--	return 0;
--}
--
--static struct dmi_system_id __initdata reboot_dmi_table[] = {
--	{	/* Handle problems with rebooting on Dell E520's */
--		.callback = set_bios_reboot,
--		.ident = "Dell E520",
--		.matches = {
--			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
--			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
--		},
--	},
--	{	/* Handle problems with rebooting on Dell 1300's */
--		.callback = set_bios_reboot,
--		.ident = "Dell PowerEdge 1300",
--		.matches = {
--			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
--		},
--	},
--	{	/* Handle problems with rebooting on Dell 300's */
--		.callback = set_bios_reboot,
--		.ident = "Dell PowerEdge 300",
--		.matches = {
--			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
--		},
--	},
--	{       /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
--		.callback = set_bios_reboot,
--		.ident = "Dell OptiPlex 745",
--		.matches = {
--			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
--			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
--			DMI_MATCH(DMI_BOARD_NAME, "0WF810"),
--		},
--	},
--	{	/* Handle problems with rebooting on Dell 2400's */
--		.callback = set_bios_reboot,
--		.ident = "Dell PowerEdge 2400",
--		.matches = {
--			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
--		},
--	},
--	{	/* Handle problems with rebooting on HP laptops */
--		.callback = set_bios_reboot,
--		.ident = "HP Compaq Laptop",
--		.matches = {
--			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
--		},
--	},
--	{ }
--};
--
--static int __init reboot_init(void)
--{
--	dmi_check_system(reboot_dmi_table);
--	return 0;
--}
--
--core_initcall(reboot_init);
--
--/* The following code and data reboots the machine by switching to real
--   mode and jumping to the BIOS reset entry point, as if the CPU has
--   really been reset.  The previous version asked the keyboard
--   controller to pulse the CPU reset line, which is more thorough, but
--   doesn't work with at least one type of 486 motherboard.  It is easy
--   to stop this code working; hence the copious comments. */
--
--static unsigned long long
--real_mode_gdt_entries [3] =
--{
--	0x0000000000000000ULL,	/* Null descriptor */
--	0x00009a000000ffffULL,	/* 16-bit real-mode 64k code at 0x00000000 */
--	0x000092000100ffffULL	/* 16-bit real-mode 64k data at 0x00000100 */
--};
--
--static struct Xgt_desc_struct
--real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
--real_mode_idt = { 0x3ff, 0 },
--no_idt = { 0, 0 };
--
--
--/* This is 16-bit protected mode code to disable paging and the cache,
--   switch to real mode and jump to the BIOS reset code.
--
--   The instruction that switches to real mode by writing to CR0 must be
--   followed immediately by a far jump instruction, which set CS to a
--   valid value for real mode, and flushes the prefetch queue to avoid
--   running instructions that have already been decoded in protected
--   mode.
--
--   Clears all the flags except ET, especially PG (paging), PE
--   (protected-mode enable) and TS (task switch for coprocessor state
--   save).  Flushes the TLB after paging has been disabled.  Sets CD and
--   NW, to disable the cache on a 486, and invalidates the cache.  This
--   is more like the state of a 486 after reset.  I don't know if
--   something else should be done for other chips.
--
--   More could be done here to set up the registers as if a CPU reset had
--   occurred; hopefully real BIOSs don't assume much. */
--
--static unsigned char real_mode_switch [] =
--{
--	0x66, 0x0f, 0x20, 0xc0,			/*    movl  %cr0,%eax        */
--	0x66, 0x83, 0xe0, 0x11,			/*    andl  $0x00000011,%eax */
--	0x66, 0x0d, 0x00, 0x00, 0x00, 0x60,	/*    orl   $0x60000000,%eax */
--	0x66, 0x0f, 0x22, 0xc0,			/*    movl  %eax,%cr0        */
--	0x66, 0x0f, 0x22, 0xd8,			/*    movl  %eax,%cr3        */
--	0x66, 0x0f, 0x20, 0xc3,			/*    movl  %cr0,%ebx        */
--	0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60,	/*    andl  $0x60000000,%ebx */
--	0x74, 0x02,				/*    jz    f                */
--	0x0f, 0x09,				/*    wbinvd                 */
--	0x24, 0x10,				/* f: andb  $0x10,al         */
--	0x66, 0x0f, 0x22, 0xc0			/*    movl  %eax,%cr0        */
--};
--static unsigned char jump_to_bios [] =
--{
--	0xea, 0x00, 0x00, 0xff, 0xff		/*    ljmp  $0xffff,$0x0000  */
--};
--
--/*
-- * Switch to real mode and then execute the code
-- * specified by the code and length parameters.
-- * We assume that length will aways be less that 100!
-- */
--void machine_real_restart(unsigned char *code, int length)
--{
--	local_irq_disable();
--
--	/* Write zero to CMOS register number 0x0f, which the BIOS POST
--	   routine will recognize as telling it to do a proper reboot.  (Well
--	   that's what this book in front of me says -- it may only apply to
--	   the Phoenix BIOS though, it's not clear).  At the same time,
--	   disable NMIs by setting the top bit in the CMOS address register,
--	   as we're about to do peculiar things to the CPU.  I'm not sure if
--	   `outb_p' is needed instead of just `outb'.  Use it to be on the
--	   safe side.  (Yes, CMOS_WRITE does outb_p's. -  Paul G.)
--	 */
--
--	spin_lock(&rtc_lock);
--	CMOS_WRITE(0x00, 0x8f);
--	spin_unlock(&rtc_lock);
--
--	/* Remap the kernel at virtual address zero, as well as offset zero
--	   from the kernel segment.  This assumes the kernel segment starts at
--	   virtual address PAGE_OFFSET. */
--
--	memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
--		sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
--
--	/*
--	 * Use `swapper_pg_dir' as our page directory.
--	 */
--	load_cr3(swapper_pg_dir);
--
--	/* Write 0x1234 to absolute memory location 0x472.  The BIOS reads
--	   this on booting to tell it to "Bypass memory test (also warm
--	   boot)".  This seems like a fairly standard thing that gets set by
--	   REBOOT.COM programs, and the previous reset routine did this
--	   too. */
--
--	*((unsigned short *)0x472) = reboot_mode;
--
--	/* For the switch to real mode, copy some code to low memory.  It has
--	   to be in the first 64k because it is running in 16-bit mode, and it
--	   has to have the same physical and virtual address, because it turns
--	   off paging.  Copy it near the end of the first page, out of the way
--	   of BIOS variables. */
+-	else {
+-		outb_p(cached_21(cpu),0x21);
++	} else {
++		outb_p(cached_21(cpu), 0x21);
+ 		(void)inb_p(0x21);
+ 	}
+ }
+@@ -1631,8 +1582,7 @@ disable_local_vic_irq(unsigned int irq)
+  * interrupt in the vic, so we merely set a flag (IRQ_DISABLED).  If
+  * this interrupt actually comes in, then we mask and ack here to push
+  * the interrupt off to another CPU */
+-static void
+-before_handle_vic_irq(unsigned int irq)
++static void before_handle_vic_irq(unsigned int irq)
+ {
+ 	irq_desc_t *desc = irq_desc + irq;
+ 	__u8 cpu = smp_processor_id();
+@@ -1641,16 +1591,16 @@ before_handle_vic_irq(unsigned int irq)
+ 	vic_intr_total++;
+ 	vic_intr_count[cpu]++;
+ 
+-	if(!(cpu_irq_affinity[cpu] & (1<<irq))) {
++	if (!(cpu_irq_affinity[cpu] & (1 << irq))) {
+ 		/* The irq is not in our affinity mask, push it off
+ 		 * onto another CPU */
+-		VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n",
+-			irq, cpu));
++		VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
++			"on cpu %d\n", irq, cpu));
+ 		disable_local_vic_irq(irq);
+ 		/* set IRQ_INPROGRESS to prevent the handler in irq.c from
+ 		 * actually calling the interrupt routine */
+ 		desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
+-	} else if(desc->status & IRQ_DISABLED) {
++	} else if (desc->status & IRQ_DISABLED) {
+ 		/* Damn, the interrupt actually arrived, do the lazy
+ 		 * disable thing. The interrupt routine in irq.c will
+ 		 * not handle a IRQ_DISABLED interrupt, so nothing more
+@@ -1667,8 +1617,7 @@ before_handle_vic_irq(unsigned int irq)
+ }
+ 
+ /* Finish the VIC interrupt: basically mask */
+-static void
+-after_handle_vic_irq(unsigned int irq)
++static void after_handle_vic_irq(unsigned int irq)
+ {
+ 	irq_desc_t *desc = irq_desc + irq;
+ 
+@@ -1685,11 +1634,11 @@ after_handle_vic_irq(unsigned int irq)
+ #ifdef VOYAGER_DEBUG
+ 		/* DEBUG: before we ack, check what's in progress */
+ 		isr = vic_read_isr();
+-		if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) {
++		if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) {
+ 			int i;
+ 			__u8 cpu = smp_processor_id();
+ 			__u8 real_cpu;
+-			int mask; /* Um... initialize me??? --RR */
++			int mask;	/* Um... initialize me??? --RR */
+ 
+ 			printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
+ 			       cpu, irq);
+@@ -1698,9 +1647,10 @@ after_handle_vic_irq(unsigned int irq)
+ 				outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
+ 				     VIC_PROCESSOR_ID);
+ 				isr = vic_read_isr();
+-				if(isr & (1<<irq)) {
+-					printk("VOYAGER SMP: CPU%d ack irq %d\n",
+-					       real_cpu, irq);
++				if (isr & (1 << irq)) {
++					printk
++					    ("VOYAGER SMP: CPU%d ack irq %d\n",
++					     real_cpu, irq);
+ 					ack_vic_irq(irq);
+ 				}
+ 				outb(cpu, VIC_PROCESSOR_ID);
+@@ -1711,7 +1661,7 @@ after_handle_vic_irq(unsigned int irq)
+ 		 * receipt by another CPU so everything must be in
+ 		 * order here  */
+ 		ack_vic_irq(irq);
+-		if(status & IRQ_REPLAY) {
++		if (status & IRQ_REPLAY) {
+ 			/* replay is set if we disable the interrupt
+ 			 * in the before_handle_vic_irq() routine, so
+ 			 * clear the in progress bit here to allow the
+@@ -1720,9 +1670,9 @@ after_handle_vic_irq(unsigned int irq)
+ 		}
+ #ifdef VOYAGER_DEBUG
+ 		isr = vic_read_isr();
+-		if((isr & (1<<irq)) != 0)
+-			printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n",
+-			       irq, isr);
++		if ((isr & (1 << irq)) != 0)
++			printk("VOYAGER SMP: after_handle_vic_irq() after "
++			       "ack irq=%d, isr=0x%x\n", irq, isr);
+ #endif /* VOYAGER_DEBUG */
+ 	}
+ 	_raw_spin_unlock(&vic_irq_lock);
+@@ -1731,7 +1681,6 @@ after_handle_vic_irq(unsigned int irq)
+ 	 * may be intercepted by another CPU if reasserted */
+ }
+ 
 -
--	memcpy ((void *) (0x1000 - sizeof (real_mode_switch) - 100),
--		real_mode_switch, sizeof (real_mode_switch));
--	memcpy ((void *) (0x1000 - 100), code, length);
+ /* Linux processor - interrupt affinity manipulations.
+  *
+  * For each processor, we maintain a 32 bit irq affinity mask.
+@@ -1748,8 +1697,7 @@ after_handle_vic_irq(unsigned int irq)
+  * change the mask and then do an interrupt enable CPI to re-enable on
+  * the selected processors */
+ 
+-void
+-set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
++void set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
+ {
+ 	/* Only extended processors handle interrupts */
+ 	unsigned long real_mask;
+@@ -1757,13 +1705,13 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
+ 	int cpu;
+ 
+ 	real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
+-	
+-	if(cpus_addr(mask)[0] == 0)
++
++	if (cpus_addr(mask)[0] == 0)
+ 		/* can't have no CPUs to accept the interrupt -- extremely
+ 		 * bad things will happen */
+ 		return;
+ 
+-	if(irq == 0)
++	if (irq == 0)
+ 		/* can't change the affinity of the timer IRQ.  This
+ 		 * is due to the constraint in the voyager
+ 		 * architecture that the CPI also comes in on and IRQ
+@@ -1772,7 +1720,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
+ 		 * will no-longer be able to accept VIC CPIs */
+ 		return;
+ 
+-	if(irq >= 32) 
++	if (irq >= 32)
+ 		/* You can only have 32 interrupts in a voyager system
+ 		 * (and 32 only if you have a secondary microchannel
+ 		 * bus) */
+@@ -1780,8 +1728,8 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
+ 
+ 	for_each_online_cpu(cpu) {
+ 		unsigned long cpu_mask = 1 << cpu;
+-		
+-		if(cpu_mask & real_mask) {
++
++		if (cpu_mask & real_mask) {
+ 			/* enable the interrupt for this cpu */
+ 			cpu_irq_affinity[cpu] |= irq_mask;
+ 		} else {
+@@ -1800,25 +1748,23 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
+ 	unmask_vic_irq(irq);
+ }
+ 
+-static void
+-ack_vic_irq(unsigned int irq)
++static void ack_vic_irq(unsigned int irq)
+ {
+ 	if (irq & 8) {
+-		outb(0x62,0x20);	/* Specific EOI to cascade */
+-		outb(0x60|(irq & 7),0xA0);
++		outb(0x62, 0x20);	/* Specific EOI to cascade */
++		outb(0x60 | (irq & 7), 0xA0);
+ 	} else {
+-		outb(0x60 | (irq & 7),0x20);
++		outb(0x60 | (irq & 7), 0x20);
+ 	}
+ }
+ 
+ /* enable the CPIs.  In the VIC, the CPIs are delivered by the 8259
+  * but are not vectored by it.  This means that the 8259 mask must be
+  * lowered to receive them */
+-static __init void
+-vic_enable_cpi(void)
++static __init void vic_enable_cpi(void)
+ {
+ 	__u8 cpu = smp_processor_id();
+-	
++
+ 	/* just take a copy of the current mask (nop for boot cpu) */
+ 	vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
+ 
+@@ -1827,7 +1773,7 @@ vic_enable_cpi(void)
+ 	/* for sys int and cmn int */
+ 	enable_local_vic_irq(7);
+ 
+-	if(is_cpu_quad()) {
++	if (is_cpu_quad()) {
+ 		outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
+ 		outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
+ 		VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
+@@ -1838,8 +1784,7 @@ vic_enable_cpi(void)
+ 		cpu, vic_irq_mask[cpu]));
+ }
+ 
+-void
+-voyager_smp_dump()
++void voyager_smp_dump()
+ {
+ 	int old_cpu = smp_processor_id(), cpu;
+ 
+@@ -1865,10 +1810,10 @@ voyager_smp_dump()
+ 		       cpu, vic_irq_mask[cpu], imr, irr, isr);
+ #if 0
+ 		/* These lines are put in to try to unstick an un ack'd irq */
+-		if(isr != 0) {
++		if (isr != 0) {
+ 			int irq;
+-			for(irq=0; irq<16; irq++) {
+-				if(isr & (1<<irq)) {
++			for (irq = 0; irq < 16; irq++) {
++				if (isr & (1 << irq)) {
+ 					printk("\tCPU%d: ack irq %d\n",
+ 					       cpu, irq);
+ 					local_irq_save(flags);
+@@ -1884,17 +1829,15 @@ voyager_smp_dump()
+ 	}
+ }
+ 
+-void
+-smp_voyager_power_off(void *dummy)
++void smp_voyager_power_off(void *dummy)
+ {
+-	if(smp_processor_id() == boot_cpu_id) 
++	if (smp_processor_id() == boot_cpu_id)
+ 		voyager_power_off();
+ 	else
+ 		smp_stop_cpu_function(NULL);
+ }
+ 
+-static void __init
+-voyager_smp_prepare_cpus(unsigned int max_cpus)
++static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
+ {
+ 	/* FIXME: ignore max_cpus for now */
+ 	smp_boot_cpus();
+@@ -1911,8 +1854,7 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
+ 	cpu_set(smp_processor_id(), cpu_present_map);
+ }
+ 
+-static int __cpuinit
+-voyager_cpu_up(unsigned int cpu)
++static int __cpuinit voyager_cpu_up(unsigned int cpu)
+ {
+ 	/* This only works at boot for x86.  See "rewrite" above. */
+ 	if (cpu_isset(cpu, smp_commenced_mask))
+@@ -1928,14 +1870,12 @@ voyager_cpu_up(unsigned int cpu)
+ 	return 0;
+ }
+ 
+-static void __init
+-voyager_smp_cpus_done(unsigned int max_cpus)
++static void __init voyager_smp_cpus_done(unsigned int max_cpus)
+ {
+ 	zap_low_mappings();
+ }
+ 
+-void __init
+-smp_setup_processor_id(void)
++void __init smp_setup_processor_id(void)
+ {
+ 	current_thread_info()->cpu = hard_smp_processor_id();
+ 	x86_write_percpu(cpu_number, hard_smp_processor_id());
+diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c
+index 50f9366..c69c931 100644
+--- a/arch/x86/mach-voyager/voyager_thread.c
++++ b/arch/x86/mach-voyager/voyager_thread.c
+@@ -30,12 +30,10 @@
+ #include <asm/mtrr.h>
+ #include <asm/msr.h>
+ 
 -
--	/* Set up the IDT for real mode. */
+ struct task_struct *voyager_thread;
+ static __u8 set_timeout;
+ 
+-static int
+-execute(const char *string)
++static int execute(const char *string)
+ {
+ 	int ret;
+ 
+@@ -52,48 +50,48 @@ execute(const char *string)
+ 		NULL,
+ 	};
+ 
+-	if ((ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
+-		printk(KERN_ERR "Voyager failed to run \"%s\": %i\n",
+-		       string, ret);
++	if ((ret =
++	     call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
++		printk(KERN_ERR "Voyager failed to run \"%s\": %i\n", string,
++		       ret);
+ 	}
+ 	return ret;
+ }
+ 
+-static void
+-check_from_kernel(void)
++static void check_from_kernel(void)
+ {
+-	if(voyager_status.switch_off) {
+-		
++	if (voyager_status.switch_off) {
++
+ 		/* FIXME: This should be configurable via proc */
+ 		execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1");
+-	} else if(voyager_status.power_fail) {
++	} else if (voyager_status.power_fail) {
+ 		VDEBUG(("Voyager daemon detected AC power failure\n"));
+-		
++
+ 		/* FIXME: This should be configureable via proc */
+ 		execute("umask 600; echo F > /etc/powerstatus; kill -PWR 1");
+ 		set_timeout = 1;
+ 	}
+ }
+ 
+-static void
+-check_continuing_condition(void)
++static void check_continuing_condition(void)
+ {
+-	if(voyager_status.power_fail) {
++	if (voyager_status.power_fail) {
+ 		__u8 data;
+-		voyager_cat_psi(VOYAGER_PSI_SUBREAD, 
++		voyager_cat_psi(VOYAGER_PSI_SUBREAD,
+ 				VOYAGER_PSI_AC_FAIL_REG, &data);
+-		if((data & 0x1f) == 0) {
++		if ((data & 0x1f) == 0) {
+ 			/* all power restored */
+-			printk(KERN_NOTICE "VOYAGER AC power restored, cancelling shutdown\n");
++			printk(KERN_NOTICE
++			       "VOYAGER AC power restored, cancelling shutdown\n");
+ 			/* FIXME: should be user configureable */
+-			execute("umask 600; echo O > /etc/powerstatus; kill -PWR 1");
++			execute
++			    ("umask 600; echo O > /etc/powerstatus; kill -PWR 1");
+ 			set_timeout = 0;
+ 		}
+ 	}
+ }
+ 
+-static int
+-thread(void *unused)
++static int thread(void *unused)
+ {
+ 	printk(KERN_NOTICE "Voyager starting monitor thread\n");
+ 
+@@ -102,7 +100,7 @@ thread(void *unused)
+ 		schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT);
+ 
+ 		VDEBUG(("Voyager Daemon awoken\n"));
+-		if(voyager_status.request_from_kernel == 0) {
++		if (voyager_status.request_from_kernel == 0) {
+ 			/* probably awoken from timeout */
+ 			check_continuing_condition();
+ 		} else {
+@@ -112,20 +110,18 @@ thread(void *unused)
+ 	}
+ }
+ 
+-static int __init
+-voyager_thread_start(void)
++static int __init voyager_thread_start(void)
+ {
+ 	voyager_thread = kthread_run(thread, NULL, "kvoyagerd");
+ 	if (IS_ERR(voyager_thread)) {
+-		printk(KERN_ERR "Voyager: Failed to create system monitor thread.\n");
++		printk(KERN_ERR
++		       "Voyager: Failed to create system monitor thread.\n");
+ 		return PTR_ERR(voyager_thread);
+ 	}
+ 	return 0;
+ }
+ 
 -
--	load_idt(&real_mode_idt);
+-static void __exit
+-voyager_thread_stop(void)
++static void __exit voyager_thread_stop(void)
+ {
+ 	kthread_stop(voyager_thread);
+ }
+diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
+index a1b0d22..59d353d 100644
+--- a/arch/x86/math-emu/errors.c
++++ b/arch/x86/math-emu/errors.c
+@@ -33,45 +33,41 @@
+ #undef PRINT_MESSAGES
+ /* */
+ 
 -
--	/* Set up a GDT from which we can load segment descriptors for real
--	   mode.  The GDT is not used in real mode; it is just needed here to
--	   prepare the descriptors. */
+ #if 0
+ void Un_impl(void)
+ {
+-  u_char byte1, FPU_modrm;
+-  unsigned long address = FPU_ORIG_EIP;
 -
--	load_gdt(&real_mode_gdt);
+-  RE_ENTRANT_CHECK_OFF;
+-  /* No need to check access_ok(), we have previously fetched these bytes. */
+-  printk("Unimplemented FPU Opcode at eip=%p : ", (void __user *) address);
+-  if ( FPU_CS == __USER_CS )
+-    {
+-      while ( 1 )
+-	{
+-	  FPU_get_user(byte1, (u_char __user *) address);
+-	  if ( (byte1 & 0xf8) == 0xd8 ) break;
+-	  printk("[%02x]", byte1);
+-	  address++;
++	u_char byte1, FPU_modrm;
++	unsigned long address = FPU_ORIG_EIP;
++
++	RE_ENTRANT_CHECK_OFF;
++	/* No need to check access_ok(), we have previously fetched these bytes. */
++	printk("Unimplemented FPU Opcode at eip=%p : ", (void __user *)address);
++	if (FPU_CS == __USER_CS) {
++		while (1) {
++			FPU_get_user(byte1, (u_char __user *) address);
++			if ((byte1 & 0xf8) == 0xd8)
++				break;
++			printk("[%02x]", byte1);
++			address++;
++		}
++		printk("%02x ", byte1);
++		FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
++
++		if (FPU_modrm >= 0300)
++			printk("%02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8,
++			       FPU_modrm & 7);
++		else
++			printk("/%d\n", (FPU_modrm >> 3) & 7);
++	} else {
++		printk("cs selector = %04x\n", FPU_CS);
+ 	}
+-      printk("%02x ", byte1);
+-      FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
+-      
+-      if (FPU_modrm >= 0300)
+-	printk("%02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8, FPU_modrm & 7);
+-      else
+-	printk("/%d\n", (FPU_modrm >> 3) & 7);
+-    }
+-  else
+-    {
+-      printk("cs selector = %04x\n", FPU_CS);
+-    }
 -
--	/* Load the data segment registers, and thus the descriptors ready for
--	   real mode.  The base address of each segment is 0x100, 16 times the
--	   selector value being loaded here.  This is so that the segment
--	   registers don't have to be reloaded after switching to real mode:
--	   the values are consistent for real mode operation already. */
+-  RE_ENTRANT_CHECK_ON;
 -
--	__asm__ __volatile__ ("movl $0x0010,%%eax\n"
--				"\tmovl %%eax,%%ds\n"
--				"\tmovl %%eax,%%es\n"
--				"\tmovl %%eax,%%fs\n"
--				"\tmovl %%eax,%%gs\n"
--				"\tmovl %%eax,%%ss" : : : "eax");
+-  EXCEPTION(EX_Invalid);
+ 
+-}
+-#endif  /*  0  */
++	RE_ENTRANT_CHECK_ON;
+ 
++	EXCEPTION(EX_Invalid);
++
++}
++#endif /*  0  */
+ 
+ /*
+    Called for opcodes which are illegal and which are known to result in a
+@@ -79,139 +75,152 @@ void Un_impl(void)
+    */
+ void FPU_illegal(void)
+ {
+-  math_abort(FPU_info,SIGILL);
++	math_abort(FPU_info, SIGILL);
+ }
+ 
 -
--	/* Jump to the 16-bit code that we copied earlier.  It disables paging
--	   and the cache, switches to real mode, and jumps to the BIOS reset
--	   entry point. */
 -
--	__asm__ __volatile__ ("ljmp $0x0008,%0"
--				:
--				: "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100)));
--}
--#ifdef CONFIG_APM_MODULE
--EXPORT_SYMBOL(machine_real_restart);
--#endif
+ void FPU_printall(void)
+ {
+-  int i;
+-  static const char *tag_desc[] = { "Valid", "Zero", "ERROR", "Empty",
+-                              "DeNorm", "Inf", "NaN" };
+-  u_char byte1, FPU_modrm;
+-  unsigned long address = FPU_ORIG_EIP;
 -
--static void native_machine_shutdown(void)
--{
--#ifdef CONFIG_SMP
--	int reboot_cpu_id;
+-  RE_ENTRANT_CHECK_OFF;
+-  /* No need to check access_ok(), we have previously fetched these bytes. */
+-  printk("At %p:", (void *) address);
+-  if ( FPU_CS == __USER_CS )
+-    {
++	int i;
++	static const char *tag_desc[] = { "Valid", "Zero", "ERROR", "Empty",
++		"DeNorm", "Inf", "NaN"
++	};
++	u_char byte1, FPU_modrm;
++	unsigned long address = FPU_ORIG_EIP;
++
++	RE_ENTRANT_CHECK_OFF;
++	/* No need to check access_ok(), we have previously fetched these bytes. */
++	printk("At %p:", (void *)address);
++	if (FPU_CS == __USER_CS) {
+ #define MAX_PRINTED_BYTES 20
+-      for ( i = 0; i < MAX_PRINTED_BYTES; i++ )
+-	{
+-	  FPU_get_user(byte1, (u_char __user *) address);
+-	  if ( (byte1 & 0xf8) == 0xd8 )
+-	    {
+-	      printk(" %02x", byte1);
+-	      break;
+-	    }
+-	  printk(" [%02x]", byte1);
+-	  address++;
+-	}
+-      if ( i == MAX_PRINTED_BYTES )
+-	printk(" [more..]\n");
+-      else
+-	{
+-	  FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
+-	  
+-	  if (FPU_modrm >= 0300)
+-	    printk(" %02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8, FPU_modrm & 7);
+-	  else
+-	    printk(" /%d, mod=%d rm=%d\n",
+-		   (FPU_modrm >> 3) & 7, (FPU_modrm >> 6) & 3, FPU_modrm & 7);
++		for (i = 0; i < MAX_PRINTED_BYTES; i++) {
++			FPU_get_user(byte1, (u_char __user *) address);
++			if ((byte1 & 0xf8) == 0xd8) {
++				printk(" %02x", byte1);
++				break;
++			}
++			printk(" [%02x]", byte1);
++			address++;
++		}
++		if (i == MAX_PRINTED_BYTES)
++			printk(" [more..]\n");
++		else {
++			FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
++
++			if (FPU_modrm >= 0300)
++				printk(" %02x (%02x+%d)\n", FPU_modrm,
++				       FPU_modrm & 0xf8, FPU_modrm & 7);
++			else
++				printk(" /%d, mod=%d rm=%d\n",
++				       (FPU_modrm >> 3) & 7,
++				       (FPU_modrm >> 6) & 3, FPU_modrm & 7);
++		}
++	} else {
++		printk("%04x\n", FPU_CS);
+ 	}
+-    }
+-  else
+-    {
+-      printk("%04x\n", FPU_CS);
+-    }
+ 
+-  partial_status = status_word();
++	partial_status = status_word();
+ 
+ #ifdef DEBUGGING
+-if ( partial_status & SW_Backward )    printk("SW: backward compatibility\n");
+-if ( partial_status & SW_C3 )          printk("SW: condition bit 3\n");
+-if ( partial_status & SW_C2 )          printk("SW: condition bit 2\n");
+-if ( partial_status & SW_C1 )          printk("SW: condition bit 1\n");
+-if ( partial_status & SW_C0 )          printk("SW: condition bit 0\n");
+-if ( partial_status & SW_Summary )     printk("SW: exception summary\n");
+-if ( partial_status & SW_Stack_Fault ) printk("SW: stack fault\n");
+-if ( partial_status & SW_Precision )   printk("SW: loss of precision\n");
+-if ( partial_status & SW_Underflow )   printk("SW: underflow\n");
+-if ( partial_status & SW_Overflow )    printk("SW: overflow\n");
+-if ( partial_status & SW_Zero_Div )    printk("SW: divide by zero\n");
+-if ( partial_status & SW_Denorm_Op )   printk("SW: denormalized operand\n");
+-if ( partial_status & SW_Invalid )     printk("SW: invalid operation\n");
++	if (partial_status & SW_Backward)
++		printk("SW: backward compatibility\n");
++	if (partial_status & SW_C3)
++		printk("SW: condition bit 3\n");
++	if (partial_status & SW_C2)
++		printk("SW: condition bit 2\n");
++	if (partial_status & SW_C1)
++		printk("SW: condition bit 1\n");
++	if (partial_status & SW_C0)
++		printk("SW: condition bit 0\n");
++	if (partial_status & SW_Summary)
++		printk("SW: exception summary\n");
++	if (partial_status & SW_Stack_Fault)
++		printk("SW: stack fault\n");
++	if (partial_status & SW_Precision)
++		printk("SW: loss of precision\n");
++	if (partial_status & SW_Underflow)
++		printk("SW: underflow\n");
++	if (partial_status & SW_Overflow)
++		printk("SW: overflow\n");
++	if (partial_status & SW_Zero_Div)
++		printk("SW: divide by zero\n");
++	if (partial_status & SW_Denorm_Op)
++		printk("SW: denormalized operand\n");
++	if (partial_status & SW_Invalid)
++		printk("SW: invalid operation\n");
+ #endif /* DEBUGGING */
+ 
+-  printk(" SW: b=%d st=%ld es=%d sf=%d cc=%d%d%d%d ef=%d%d%d%d%d%d\n",
+-	 partial_status & 0x8000 ? 1 : 0,   /* busy */
+-	 (partial_status & 0x3800) >> 11,   /* stack top pointer */
+-	 partial_status & 0x80 ? 1 : 0,     /* Error summary status */
+-	 partial_status & 0x40 ? 1 : 0,     /* Stack flag */
+-	 partial_status & SW_C3?1:0, partial_status & SW_C2?1:0, /* cc */
+-	 partial_status & SW_C1?1:0, partial_status & SW_C0?1:0, /* cc */
+-	 partial_status & SW_Precision?1:0, partial_status & SW_Underflow?1:0,
+-	 partial_status & SW_Overflow?1:0, partial_status & SW_Zero_Div?1:0,
+-	 partial_status & SW_Denorm_Op?1:0, partial_status & SW_Invalid?1:0);
+-  
+-printk(" CW: ic=%d rc=%ld%ld pc=%ld%ld iem=%d     ef=%d%d%d%d%d%d\n",
+-	 control_word & 0x1000 ? 1 : 0,
+-	 (control_word & 0x800) >> 11, (control_word & 0x400) >> 10,
+-	 (control_word & 0x200) >> 9, (control_word & 0x100) >> 8,
+-	 control_word & 0x80 ? 1 : 0,
+-	 control_word & SW_Precision?1:0, control_word & SW_Underflow?1:0,
+-	 control_word & SW_Overflow?1:0, control_word & SW_Zero_Div?1:0,
+-	 control_word & SW_Denorm_Op?1:0, control_word & SW_Invalid?1:0);
 -
--	/* The boot cpu is always logical cpu 0 */
--	reboot_cpu_id = 0;
+-  for ( i = 0; i < 8; i++ )
+-    {
+-      FPU_REG *r = &st(i);
+-      u_char tagi = FPU_gettagi(i);
+-      switch (tagi)
+-	{
+-	case TAG_Empty:
+-	  continue;
+-	  break;
+-	case TAG_Zero:
+-	case TAG_Special:
+-	  tagi = FPU_Special(r);
+-	case TAG_Valid:
+-	  printk("st(%d)  %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
+-		 getsign(r) ? '-' : '+',
+-		 (long)(r->sigh >> 16),
+-		 (long)(r->sigh & 0xFFFF),
+-		 (long)(r->sigl >> 16),
+-		 (long)(r->sigl & 0xFFFF),
+-		 exponent(r) - EXP_BIAS + 1);
+-	  break;
+-	default:
+-	  printk("Whoops! Error in errors.c: tag%d is %d ", i, tagi);
+-	  continue;
+-	  break;
++	printk(" SW: b=%d st=%d es=%d sf=%d cc=%d%d%d%d ef=%d%d%d%d%d%d\n", partial_status & 0x8000 ? 1 : 0,	/* busy */
++	       (partial_status & 0x3800) >> 11,	/* stack top pointer */
++	       partial_status & 0x80 ? 1 : 0,	/* Error summary status */
++	       partial_status & 0x40 ? 1 : 0,	/* Stack flag */
++	       partial_status & SW_C3 ? 1 : 0, partial_status & SW_C2 ? 1 : 0,	/* cc */
++	       partial_status & SW_C1 ? 1 : 0, partial_status & SW_C0 ? 1 : 0,	/* cc */
++	       partial_status & SW_Precision ? 1 : 0,
++	       partial_status & SW_Underflow ? 1 : 0,
++	       partial_status & SW_Overflow ? 1 : 0,
++	       partial_status & SW_Zero_Div ? 1 : 0,
++	       partial_status & SW_Denorm_Op ? 1 : 0,
++	       partial_status & SW_Invalid ? 1 : 0);
++
++	printk(" CW: ic=%d rc=%d%d pc=%d%d iem=%d     ef=%d%d%d%d%d%d\n",
++	       control_word & 0x1000 ? 1 : 0,
++	       (control_word & 0x800) >> 11, (control_word & 0x400) >> 10,
++	       (control_word & 0x200) >> 9, (control_word & 0x100) >> 8,
++	       control_word & 0x80 ? 1 : 0,
++	       control_word & SW_Precision ? 1 : 0,
++	       control_word & SW_Underflow ? 1 : 0,
++	       control_word & SW_Overflow ? 1 : 0,
++	       control_word & SW_Zero_Div ? 1 : 0,
++	       control_word & SW_Denorm_Op ? 1 : 0,
++	       control_word & SW_Invalid ? 1 : 0);
++
++	for (i = 0; i < 8; i++) {
++		FPU_REG *r = &st(i);
++		u_char tagi = FPU_gettagi(i);
++		switch (tagi) {
++		case TAG_Empty:
++			continue;
++			break;
++		case TAG_Zero:
++		case TAG_Special:
++			tagi = FPU_Special(r);
++		case TAG_Valid:
++			printk("st(%d)  %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
++			       getsign(r) ? '-' : '+',
++			       (long)(r->sigh >> 16),
++			       (long)(r->sigh & 0xFFFF),
++			       (long)(r->sigl >> 16),
++			       (long)(r->sigl & 0xFFFF),
++			       exponent(r) - EXP_BIAS + 1);
++			break;
++		default:
++			printk("Whoops! Error in errors.c: tag%d is %d ", i,
++			       tagi);
++			continue;
++			break;
++		}
++		printk("%s\n", tag_desc[(int)(unsigned)tagi]);
+ 	}
+-      printk("%s\n", tag_desc[(int) (unsigned) tagi]);
+-    }
+ 
+-  RE_ENTRANT_CHECK_ON;
++	RE_ENTRANT_CHECK_ON;
+ 
+ }
+ 
+ static struct {
+-  int type;
+-  const char *name;
++	int type;
++	const char *name;
+ } exception_names[] = {
+-  { EX_StackOver, "stack overflow" },
+-  { EX_StackUnder, "stack underflow" },
+-  { EX_Precision, "loss of precision" },
+-  { EX_Underflow, "underflow" },
+-  { EX_Overflow, "overflow" },
+-  { EX_ZeroDiv, "divide by zero" },
+-  { EX_Denormal, "denormalized operand" },
+-  { EX_Invalid, "invalid operation" },
+-  { EX_INTERNAL, "INTERNAL BUG in "FPU_VERSION },
+-  { 0, NULL }
++	{
++	EX_StackOver, "stack overflow"}, {
++	EX_StackUnder, "stack underflow"}, {
++	EX_Precision, "loss of precision"}, {
++	EX_Underflow, "underflow"}, {
++	EX_Overflow, "overflow"}, {
++	EX_ZeroDiv, "divide by zero"}, {
++	EX_Denormal, "denormalized operand"}, {
++	EX_Invalid, "invalid operation"}, {
++	EX_INTERNAL, "INTERNAL BUG in " FPU_VERSION}, {
++	0, NULL}
+ };
+ 
+ /*
+@@ -295,445 +304,386 @@ static struct {
+ 
+ asmlinkage void FPU_exception(int n)
+ {
+-  int i, int_type;
 -
--	/* See if there has been given a command line override */
--	if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
--		cpu_isset(reboot_cpu, cpu_online_map)) {
--		reboot_cpu_id = reboot_cpu;
+-  int_type = 0;         /* Needed only to stop compiler warnings */
+-  if ( n & EX_INTERNAL )
+-    {
+-      int_type = n - EX_INTERNAL;
+-      n = EX_INTERNAL;
+-      /* Set lots of exception bits! */
+-      partial_status |= (SW_Exc_Mask | SW_Summary | SW_Backward);
+-    }
+-  else
+-    {
+-      /* Extract only the bits which we use to set the status word */
+-      n &= (SW_Exc_Mask);
+-      /* Set the corresponding exception bit */
+-      partial_status |= n;
+-      /* Set summary bits iff exception isn't masked */
+-      if ( partial_status & ~control_word & CW_Exceptions )
+-	partial_status |= (SW_Summary | SW_Backward);
+-      if ( n & (SW_Stack_Fault | EX_Precision) )
+-	{
+-	  if ( !(n & SW_C1) )
+-	    /* This bit distinguishes over- from underflow for a stack fault,
+-	       and roundup from round-down for precision loss. */
+-	    partial_status &= ~SW_C1;
++	int i, int_type;
++
++	int_type = 0;		/* Needed only to stop compiler warnings */
++	if (n & EX_INTERNAL) {
++		int_type = n - EX_INTERNAL;
++		n = EX_INTERNAL;
++		/* Set lots of exception bits! */
++		partial_status |= (SW_Exc_Mask | SW_Summary | SW_Backward);
++	} else {
++		/* Extract only the bits which we use to set the status word */
++		n &= (SW_Exc_Mask);
++		/* Set the corresponding exception bit */
++		partial_status |= n;
++		/* Set summary bits iff exception isn't masked */
++		if (partial_status & ~control_word & CW_Exceptions)
++			partial_status |= (SW_Summary | SW_Backward);
++		if (n & (SW_Stack_Fault | EX_Precision)) {
++			if (!(n & SW_C1))
++				/* This bit distinguishes over- from underflow for a stack fault,
++				   and roundup from round-down for precision loss. */
++				partial_status &= ~SW_C1;
++		}
+ 	}
+-    }
+ 
+-  RE_ENTRANT_CHECK_OFF;
+-  if ( (~control_word & n & CW_Exceptions) || (n == EX_INTERNAL) )
+-    {
++	RE_ENTRANT_CHECK_OFF;
++	if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) {
+ #ifdef PRINT_MESSAGES
+-      /* My message from the sponsor */
+-      printk(FPU_VERSION" "__DATE__" (C) W. Metzenthen.\n");
++		/* My message from the sponsor */
++		printk(FPU_VERSION " " __DATE__ " (C) W. Metzenthen.\n");
+ #endif /* PRINT_MESSAGES */
+-      
+-      /* Get a name string for error reporting */
+-      for (i=0; exception_names[i].type; i++)
+-	if ( (exception_names[i].type & n) == exception_names[i].type )
+-	  break;
+-      
+-      if (exception_names[i].type)
+-	{
++
++		/* Get a name string for error reporting */
++		for (i = 0; exception_names[i].type; i++)
++			if ((exception_names[i].type & n) ==
++			    exception_names[i].type)
++				break;
++
++		if (exception_names[i].type) {
+ #ifdef PRINT_MESSAGES
+-	  printk("FP Exception: %s!\n", exception_names[i].name);
++			printk("FP Exception: %s!\n", exception_names[i].name);
+ #endif /* PRINT_MESSAGES */
 -	}
--
--	/* Make certain the cpu I'm rebooting on is online */
--	if (!cpu_isset(reboot_cpu_id, cpu_online_map)) {
--		reboot_cpu_id = smp_processor_id();
+-      else
+-	printk("FPU emulator: Unknown Exception: 0x%04x!\n", n);
+-      
+-      if ( n == EX_INTERNAL )
+-	{
+-	  printk("FPU emulator: Internal error type 0x%04x\n", int_type);
+-	  FPU_printall();
 -	}
++		} else
++			printk("FPU emulator: Unknown Exception: 0x%04x!\n", n);
++
++		if (n == EX_INTERNAL) {
++			printk("FPU emulator: Internal error type 0x%04x\n",
++			       int_type);
++			FPU_printall();
++		}
+ #ifdef PRINT_MESSAGES
+-      else
+-	FPU_printall();
++		else
++			FPU_printall();
+ #endif /* PRINT_MESSAGES */
+ 
+-      /*
+-       * The 80486 generates an interrupt on the next non-control FPU
+-       * instruction. So we need some means of flagging it.
+-       * We use the ES (Error Summary) bit for this.
+-       */
+-    }
+-  RE_ENTRANT_CHECK_ON;
++		/*
++		 * The 80486 generates an interrupt on the next non-control FPU
++		 * instruction. So we need some means of flagging it.
++		 * We use the ES (Error Summary) bit for this.
++		 */
++	}
++	RE_ENTRANT_CHECK_ON;
+ 
+ #ifdef __DEBUG__
+-  math_abort(FPU_info,SIGFPE);
++	math_abort(FPU_info, SIGFPE);
+ #endif /* __DEBUG__ */
+ 
+ }
+ 
 -
--	/* Make certain I only run on the appropriate processor */
--	set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
+ /* Real operation attempted on a NaN. */
+ /* Returns < 0 if the exception is unmasked */
+ int real_1op_NaN(FPU_REG *a)
+ {
+-  int signalling, isNaN;
 -
--	/* O.K. Now that I'm on the appropriate processor, stop
--	 * all of the others, and disable their local APICs.
--	 */
+-  isNaN = (exponent(a) == EXP_OVER) && (a->sigh & 0x80000000);
 -
--	smp_send_stop();
--#endif /* CONFIG_SMP */
+-  /* The default result for the case of two "equal" NaNs (signs may
+-     differ) is chosen to reproduce 80486 behaviour */
+-  signalling = isNaN && !(a->sigh & 0x40000000);
 -
--	lapic_shutdown();
+-  if ( !signalling )
+-    {
+-      if ( !isNaN )  /* pseudo-NaN, or other unsupported? */
+-	{
+-	  if ( control_word & CW_Invalid )
+-	    {
+-	      /* Masked response */
+-	      reg_copy(&CONST_QNaN, a);
+-	    }
+-	  EXCEPTION(EX_Invalid);
+-	  return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
++	int signalling, isNaN;
++
++	isNaN = (exponent(a) == EXP_OVER) && (a->sigh & 0x80000000);
++
++	/* The default result for the case of two "equal" NaNs (signs may
++	   differ) is chosen to reproduce 80486 behaviour */
++	signalling = isNaN && !(a->sigh & 0x40000000);
++
++	if (!signalling) {
++		if (!isNaN) {	/* pseudo-NaN, or other unsupported? */
++			if (control_word & CW_Invalid) {
++				/* Masked response */
++				reg_copy(&CONST_QNaN, a);
++			}
++			EXCEPTION(EX_Invalid);
++			return (!(control_word & CW_Invalid) ? FPU_Exception :
++				0) | TAG_Special;
++		}
++		return TAG_Special;
+ 	}
+-      return TAG_Special;
+-    }
+ 
+-  if ( control_word & CW_Invalid )
+-    {
+-      /* The masked response */
+-      if ( !(a->sigh & 0x80000000) )  /* pseudo-NaN ? */
+-	{
+-	  reg_copy(&CONST_QNaN, a);
++	if (control_word & CW_Invalid) {
++		/* The masked response */
++		if (!(a->sigh & 0x80000000)) {	/* pseudo-NaN ? */
++			reg_copy(&CONST_QNaN, a);
++		}
++		/* ensure a Quiet NaN */
++		a->sigh |= 0x40000000;
+ 	}
+-      /* ensure a Quiet NaN */
+-      a->sigh |= 0x40000000;
+-    }
+ 
+-  EXCEPTION(EX_Invalid);
++	EXCEPTION(EX_Invalid);
+ 
+-  return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
++	return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
+ }
+ 
 -
--#ifdef CONFIG_X86_IO_APIC
--	disable_IO_APIC();
--#endif
--#ifdef CONFIG_HPET_TIMER
--	hpet_disable();
--#endif
--}
+ /* Real operation attempted on two operands, one a NaN. */
+ /* Returns < 0 if the exception is unmasked */
+ int real_2op_NaN(FPU_REG const *b, u_char tagb,
+-		 int deststnr,
+-		 FPU_REG const *defaultNaN)
++		 int deststnr, FPU_REG const *defaultNaN)
+ {
+-  FPU_REG *dest = &st(deststnr);
+-  FPU_REG const *a = dest;
+-  u_char taga = FPU_gettagi(deststnr);
+-  FPU_REG const *x;
+-  int signalling, unsupported;
 -
--void __attribute__((weak)) mach_reboot_fixups(void)
--{
--}
+-  if ( taga == TAG_Special )
+-    taga = FPU_Special(a);
+-  if ( tagb == TAG_Special )
+-    tagb = FPU_Special(b);
 -
--static void native_machine_emergency_restart(void)
--{
--	if (!reboot_thru_bios) {
--		if (efi_enabled) {
--			efi.reset_system(EFI_RESET_COLD, EFI_SUCCESS, 0, NULL);
--			load_idt(&no_idt);
--			__asm__ __volatile__("int3");
--		}
--		/* rebooting needs to touch the page at absolute addr 0 */
--		*((unsigned short *)__va(0x472)) = reboot_mode;
--		for (;;) {
--			mach_reboot_fixups(); /* for board specific fixups */
--			mach_reboot();
--			/* That didn't work - force a triple fault.. */
--			load_idt(&no_idt);
--			__asm__ __volatile__("int3");
--		}
+-  /* TW_NaN is also used for unsupported data types. */
+-  unsupported = ((taga == TW_NaN)
+-		 && !((exponent(a) == EXP_OVER) && (a->sigh & 0x80000000)))
+-    || ((tagb == TW_NaN)
+-	&& !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)));
+-  if ( unsupported )
+-    {
+-      if ( control_word & CW_Invalid )
+-	{
+-	  /* Masked response */
+-	  FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
 -	}
--	if (efi_enabled)
--		efi.reset_system(EFI_RESET_WARM, EFI_SUCCESS, 0, NULL);
+-      EXCEPTION(EX_Invalid);
+-      return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
+-    }
 -
--	machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
--}
+-  if (taga == TW_NaN)
+-    {
+-      x = a;
+-      if (tagb == TW_NaN)
+-	{
+-	  signalling = !(a->sigh & b->sigh & 0x40000000);
+-	  if ( significand(b) > significand(a) )
+-	    x = b;
+-	  else if ( significand(b) == significand(a) )
+-	    {
+-	      /* The default result for the case of two "equal" NaNs (signs may
+-		 differ) is chosen to reproduce 80486 behaviour */
+-	      x = defaultNaN;
+-	    }
+-	}
+-      else
+-	{
+-	  /* return the quiet version of the NaN in a */
+-	  signalling = !(a->sigh & 0x40000000);
++	FPU_REG *dest = &st(deststnr);
++	FPU_REG const *a = dest;
++	u_char taga = FPU_gettagi(deststnr);
++	FPU_REG const *x;
++	int signalling, unsupported;
++
++	if (taga == TAG_Special)
++		taga = FPU_Special(a);
++	if (tagb == TAG_Special)
++		tagb = FPU_Special(b);
++
++	/* TW_NaN is also used for unsupported data types. */
++	unsupported = ((taga == TW_NaN)
++		       && !((exponent(a) == EXP_OVER)
++			    && (a->sigh & 0x80000000)))
++	    || ((tagb == TW_NaN)
++		&& !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)));
++	if (unsupported) {
++		if (control_word & CW_Invalid) {
++			/* Masked response */
++			FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
++		}
++		EXCEPTION(EX_Invalid);
++		return (!(control_word & CW_Invalid) ? FPU_Exception : 0) |
++		    TAG_Special;
+ 	}
+-    }
+-  else
++
++	if (taga == TW_NaN) {
++		x = a;
++		if (tagb == TW_NaN) {
++			signalling = !(a->sigh & b->sigh & 0x40000000);
++			if (significand(b) > significand(a))
++				x = b;
++			else if (significand(b) == significand(a)) {
++				/* The default result for the case of two "equal" NaNs (signs may
++				   differ) is chosen to reproduce 80486 behaviour */
++				x = defaultNaN;
++			}
++		} else {
++			/* return the quiet version of the NaN in a */
++			signalling = !(a->sigh & 0x40000000);
++		}
++	} else
+ #ifdef PARANOID
+-    if (tagb == TW_NaN)
++	if (tagb == TW_NaN)
+ #endif /* PARANOID */
+-    {
+-      signalling = !(b->sigh & 0x40000000);
+-      x = b;
+-    }
++	{
++		signalling = !(b->sigh & 0x40000000);
++		x = b;
++	}
+ #ifdef PARANOID
+-  else
+-    {
+-      signalling = 0;
+-      EXCEPTION(EX_INTERNAL|0x113);
+-      x = &CONST_QNaN;
+-    }
++	else {
++		signalling = 0;
++		EXCEPTION(EX_INTERNAL | 0x113);
++		x = &CONST_QNaN;
++	}
+ #endif /* PARANOID */
+ 
+-  if ( (!signalling) || (control_word & CW_Invalid) )
+-    {
+-      if ( ! x )
+-	x = b;
++	if ((!signalling) || (control_word & CW_Invalid)) {
++		if (!x)
++			x = b;
+ 
+-      if ( !(x->sigh & 0x80000000) )  /* pseudo-NaN ? */
+-	x = &CONST_QNaN;
++		if (!(x->sigh & 0x80000000))	/* pseudo-NaN ? */
++			x = &CONST_QNaN;
+ 
+-      FPU_copy_to_regi(x, TAG_Special, deststnr);
++		FPU_copy_to_regi(x, TAG_Special, deststnr);
+ 
+-      if ( !signalling )
+-	return TAG_Special;
++		if (!signalling)
++			return TAG_Special;
+ 
+-      /* ensure a Quiet NaN */
+-      dest->sigh |= 0x40000000;
+-    }
++		/* ensure a Quiet NaN */
++		dest->sigh |= 0x40000000;
++	}
+ 
+-  EXCEPTION(EX_Invalid);
++	EXCEPTION(EX_Invalid);
+ 
+-  return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
++	return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
+ }
+ 
 -
--static void native_machine_restart(char * __unused)
--{
--	machine_shutdown();
--	machine_emergency_restart();
+ /* Invalid arith operation on Valid registers */
+ /* Returns < 0 if the exception is unmasked */
+ asmlinkage int arith_invalid(int deststnr)
+ {
+ 
+-  EXCEPTION(EX_Invalid);
+-  
+-  if ( control_word & CW_Invalid )
+-    {
+-      /* The masked response */
+-      FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
+-    }
+-  
+-  return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Valid;
++	EXCEPTION(EX_Invalid);
+ 
 -}
++	if (control_word & CW_Invalid) {
++		/* The masked response */
++		FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
++	}
+ 
++	return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Valid;
++
++}
+ 
+ /* Divide a finite number by zero */
+ asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
+ {
+-  FPU_REG *dest = &st(deststnr);
+-  int tag = TAG_Valid;
++	FPU_REG *dest = &st(deststnr);
++	int tag = TAG_Valid;
++
++	if (control_word & CW_ZeroDiv) {
++		/* The masked response */
++		FPU_copy_to_regi(&CONST_INF, TAG_Special, deststnr);
++		setsign(dest, sign);
++		tag = TAG_Special;
++	}
+ 
+-  if ( control_word & CW_ZeroDiv )
+-    {
+-      /* The masked response */
+-      FPU_copy_to_regi(&CONST_INF, TAG_Special, deststnr);
+-      setsign(dest, sign);
+-      tag = TAG_Special;
+-    }
+- 
+-  EXCEPTION(EX_ZeroDiv);
++	EXCEPTION(EX_ZeroDiv);
+ 
+-  return (!(control_word & CW_ZeroDiv) ? FPU_Exception : 0) | tag;
++	return (!(control_word & CW_ZeroDiv) ? FPU_Exception : 0) | tag;
+ 
+ }
+ 
 -
--static void native_machine_halt(void)
--{
--}
+ /* This may be called often, so keep it lean */
+ int set_precision_flag(int flags)
+ {
+-  if ( control_word & CW_Precision )
+-    {
+-      partial_status &= ~(SW_C1 & flags);
+-      partial_status |= flags;   /* The masked response */
+-      return 0;
+-    }
+-  else
+-    {
+-      EXCEPTION(flags);
+-      return 1;
+-    }
++	if (control_word & CW_Precision) {
++		partial_status &= ~(SW_C1 & flags);
++		partial_status |= flags;	/* The masked response */
++		return 0;
++	} else {
++		EXCEPTION(flags);
++		return 1;
++	}
+ }
+ 
 -
--static void native_machine_power_off(void)
--{
--	if (pm_power_off) {
--		machine_shutdown();
--		pm_power_off();
--	}
--}
+ /* This may be called often, so keep it lean */
+ asmlinkage void set_precision_flag_up(void)
+ {
+-  if ( control_word & CW_Precision )
+-    partial_status |= (SW_Precision | SW_C1);   /* The masked response */
+-  else
+-    EXCEPTION(EX_Precision | SW_C1);
++	if (control_word & CW_Precision)
++		partial_status |= (SW_Precision | SW_C1);	/* The masked response */
++	else
++		EXCEPTION(EX_Precision | SW_C1);
+ }
+ 
 -
+ /* This may be called often, so keep it lean */
+ asmlinkage void set_precision_flag_down(void)
+ {
+-  if ( control_word & CW_Precision )
+-    {   /* The masked response */
+-      partial_status &= ~SW_C1;
+-      partial_status |= SW_Precision;
+-    }
+-  else
+-    EXCEPTION(EX_Precision);
++	if (control_word & CW_Precision) {	/* The masked response */
++		partial_status &= ~SW_C1;
++		partial_status |= SW_Precision;
++	} else
++		EXCEPTION(EX_Precision);
+ }
+ 
 -
--struct machine_ops machine_ops = {
--	.power_off = native_machine_power_off,
--	.shutdown = native_machine_shutdown,
--	.emergency_restart = native_machine_emergency_restart,
--	.restart = native_machine_restart,
--	.halt = native_machine_halt,
--};
+ asmlinkage int denormal_operand(void)
+ {
+-  if ( control_word & CW_Denormal )
+-    {   /* The masked response */
+-      partial_status |= SW_Denorm_Op;
+-      return TAG_Special;
+-    }
+-  else
+-    {
+-      EXCEPTION(EX_Denormal);
+-      return TAG_Special | FPU_Exception;
+-    }
++	if (control_word & CW_Denormal) {	/* The masked response */
++		partial_status |= SW_Denorm_Op;
++		return TAG_Special;
++	} else {
++		EXCEPTION(EX_Denormal);
++		return TAG_Special | FPU_Exception;
++	}
+ }
+ 
 -
--void machine_power_off(void)
--{
--	machine_ops.power_off();
--}
+ asmlinkage int arith_overflow(FPU_REG *dest)
+ {
+-  int tag = TAG_Valid;
++	int tag = TAG_Valid;
+ 
+-  if ( control_word & CW_Overflow )
+-    {
+-      /* The masked response */
++	if (control_word & CW_Overflow) {
++		/* The masked response */
+ /* ###### The response here depends upon the rounding mode */
+-      reg_copy(&CONST_INF, dest);
+-      tag = TAG_Special;
+-    }
+-  else
+-    {
+-      /* Subtract the magic number from the exponent */
+-      addexponent(dest, (-3 * (1 << 13)));
+-    }
 -
--void machine_shutdown(void)
--{
--	machine_ops.shutdown();
--}
+-  EXCEPTION(EX_Overflow);
+-  if ( control_word & CW_Overflow )
+-    {
+-      /* The overflow exception is masked. */
+-      /* By definition, precision is lost.
+-	 The roundup bit (C1) is also set because we have
+-	 "rounded" upwards to Infinity. */
+-      EXCEPTION(EX_Precision | SW_C1);
+-      return tag;
+-    }
 -
--void machine_emergency_restart(void)
--{
--	machine_ops.emergency_restart();
+-  return tag;
++		reg_copy(&CONST_INF, dest);
++		tag = TAG_Special;
++	} else {
++		/* Subtract the magic number from the exponent */
++		addexponent(dest, (-3 * (1 << 13)));
++	}
+ 
 -}
++	EXCEPTION(EX_Overflow);
++	if (control_word & CW_Overflow) {
++		/* The overflow exception is masked. */
++		/* By definition, precision is lost.
++		   The roundup bit (C1) is also set because we have
++		   "rounded" upwards to Infinity. */
++		EXCEPTION(EX_Precision | SW_C1);
++		return tag;
++	}
++
++	return tag;
+ 
++}
+ 
+ asmlinkage int arith_underflow(FPU_REG *dest)
+ {
+-  int tag = TAG_Valid;
 -
--void machine_restart(char *cmd)
--{
--	machine_ops.restart(cmd);
--}
+-  if ( control_word & CW_Underflow )
+-    {
+-      /* The masked response */
+-      if ( exponent16(dest) <= EXP_UNDER - 63 )
+-	{
+-	  reg_copy(&CONST_Z, dest);
+-	  partial_status &= ~SW_C1;       /* Round down. */
+-	  tag = TAG_Zero;
++	int tag = TAG_Valid;
++
++	if (control_word & CW_Underflow) {
++		/* The masked response */
++		if (exponent16(dest) <= EXP_UNDER - 63) {
++			reg_copy(&CONST_Z, dest);
++			partial_status &= ~SW_C1;	/* Round down. */
++			tag = TAG_Zero;
++		} else {
++			stdexp(dest);
++		}
++	} else {
++		/* Add the magic number to the exponent. */
++		addexponent(dest, (3 * (1 << 13)) + EXTENDED_Ebias);
+ 	}
+-      else
+-	{
+-	  stdexp(dest);
++
++	EXCEPTION(EX_Underflow);
++	if (control_word & CW_Underflow) {
++		/* The underflow exception is masked. */
++		EXCEPTION(EX_Precision);
++		return tag;
+ 	}
+-    }
+-  else
+-    {
+-      /* Add the magic number to the exponent. */
+-      addexponent(dest, (3 * (1 << 13)) + EXTENDED_Ebias);
+-    }
 -
--void machine_halt(void)
--{
--	machine_ops.halt();
--}
-diff --git a/arch/x86/kernel/reboot_64.c b/arch/x86/kernel/reboot_64.c
-deleted file mode 100644
-index 53620a9..0000000
---- a/arch/x86/kernel/reboot_64.c
-+++ /dev/null
-@@ -1,176 +0,0 @@
--/* Various gunk just to reboot the machine. */ 
--#include <linux/module.h>
--#include <linux/reboot.h>
--#include <linux/init.h>
--#include <linux/smp.h>
--#include <linux/kernel.h>
--#include <linux/ctype.h>
--#include <linux/string.h>
--#include <linux/pm.h>
--#include <linux/kdebug.h>
--#include <linux/sched.h>
--#include <asm/io.h>
--#include <asm/delay.h>
--#include <asm/desc.h>
--#include <asm/hw_irq.h>
--#include <asm/system.h>
--#include <asm/pgtable.h>
--#include <asm/tlbflush.h>
--#include <asm/apic.h>
--#include <asm/hpet.h>
--#include <asm/gart.h>
+-  EXCEPTION(EX_Underflow);
+-  if ( control_word & CW_Underflow )
+-    {
+-      /* The underflow exception is masked. */
+-      EXCEPTION(EX_Precision);
+-      return tag;
+-    }
 -
--/*
-- * Power off function, if any
-- */
--void (*pm_power_off)(void);
--EXPORT_SYMBOL(pm_power_off);
+-  return tag;
+ 
+-}
++	return tag;
+ 
++}
+ 
+ void FPU_stack_overflow(void)
+ {
+ 
+- if ( control_word & CW_Invalid )
+-    {
+-      /* The masked response */
+-      top--;
+-      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
+-    }
++	if (control_word & CW_Invalid) {
++		/* The masked response */
++		top--;
++		FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
++	}
+ 
+-  EXCEPTION(EX_StackOver);
++	EXCEPTION(EX_StackOver);
+ 
+-  return;
++	return;
+ 
+ }
+ 
 -
--static long no_idt[3];
--static enum { 
--	BOOT_TRIPLE = 't',
--	BOOT_KBD = 'k'
--} reboot_type = BOOT_KBD;
--static int reboot_mode = 0;
--int reboot_force;
+ void FPU_stack_underflow(void)
+ {
+ 
+- if ( control_word & CW_Invalid )
+-    {
+-      /* The masked response */
+-      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
+-    }
++	if (control_word & CW_Invalid) {
++		/* The masked response */
++		FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
++	}
+ 
+-  EXCEPTION(EX_StackUnder);
++	EXCEPTION(EX_StackUnder);
+ 
+-  return;
++	return;
+ 
+ }
+ 
 -
--/* reboot=t[riple] | k[bd] [, [w]arm | [c]old]
--   warm   Don't set the cold reboot flag
--   cold   Set the cold reboot flag
--   triple Force a triple fault (init)
--   kbd    Use the keyboard controller. cold reset (default)
--   force  Avoid anything that could hang.
-- */ 
--static int __init reboot_setup(char *str)
--{
--	for (;;) {
--		switch (*str) {
--		case 'w': 
--			reboot_mode = 0x1234;
--			break;
+ void FPU_stack_underflow_i(int i)
+ {
+ 
+- if ( control_word & CW_Invalid )
+-    {
+-      /* The masked response */
+-      FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
+-    }
++	if (control_word & CW_Invalid) {
++		/* The masked response */
++		FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
++	}
+ 
+-  EXCEPTION(EX_StackUnder);
++	EXCEPTION(EX_StackUnder);
+ 
+-  return;
++	return;
+ 
+ }
+ 
 -
--		case 'c':
--			reboot_mode = 0;
--			break;
+ void FPU_stack_underflow_pop(int i)
+ {
+ 
+- if ( control_word & CW_Invalid )
+-    {
+-      /* The masked response */
+-      FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
+-      FPU_pop();
+-    }
++	if (control_word & CW_Invalid) {
++		/* The masked response */
++		FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
++		FPU_pop();
++	}
+ 
+-  EXCEPTION(EX_StackUnder);
++	EXCEPTION(EX_StackUnder);
+ 
+-  return;
++	return;
+ 
+ }
 -
--		case 't':
--		case 'b':
--		case 'k':
--			reboot_type = *str;
--			break;
--		case 'f':
--			reboot_force = 1;
--			break;
--		}
--		if((str = strchr(str,',')) != NULL)
--			str++;
--		else
--			break;
--	}
--	return 1;
--}
+diff --git a/arch/x86/math-emu/exception.h b/arch/x86/math-emu/exception.h
+index b463f21..67f43a4 100644
+--- a/arch/x86/math-emu/exception.h
++++ b/arch/x86/math-emu/exception.h
+@@ -9,7 +9,6 @@
+ #ifndef _EXCEPTION_H_
+ #define _EXCEPTION_H_
+ 
 -
--__setup("reboot=", reboot_setup);
+ #ifdef __ASSEMBLY__
+ #define	Const_(x)	$##x
+ #else
+@@ -20,8 +19,8 @@
+ #include "fpu_emu.h"
+ #endif /* SW_C1 */
+ 
+-#define FPU_BUSY        Const_(0x8000)   /* FPU busy bit (8087 compatibility) */
+-#define EX_ErrorSummary Const_(0x0080)   /* Error summary status */
++#define FPU_BUSY        Const_(0x8000)	/* FPU busy bit (8087 compatibility) */
++#define EX_ErrorSummary Const_(0x0080)	/* Error summary status */
+ /* Special exceptions: */
+ #define	EX_INTERNAL	Const_(0x8000)	/* Internal error in wm-FPU-emu */
+ #define EX_StackOver	Const_(0x0041|SW_C1)	/* stack overflow */
+@@ -34,11 +33,9 @@
+ #define EX_Denormal	Const_(0x0002)	/* denormalized operand */
+ #define EX_Invalid	Const_(0x0001)	/* invalid operation */
+ 
 -
--static inline void kb_wait(void)
--{
--	int i;
+ #define PRECISION_LOST_UP    Const_((EX_Precision | SW_C1))
+ #define PRECISION_LOST_DOWN  Const_(EX_Precision)
+ 
 -
--	for (i=0; i<0x10000; i++)
--		if ((inb_p(0x64) & 0x02) == 0)
--			break;
--}
+ #ifndef __ASSEMBLY__
+ 
+ #ifdef DEBUG
+@@ -48,6 +45,6 @@
+ #define	EXCEPTION(x)	FPU_exception(x)
+ #endif
+ 
+-#endif /* __ASSEMBLY__ */ 
++#endif /* __ASSEMBLY__ */
+ 
+ #endif /* _EXCEPTION_H_ */
+diff --git a/arch/x86/math-emu/fpu_arith.c b/arch/x86/math-emu/fpu_arith.c
+index 6972dec..aeab24e 100644
+--- a/arch/x86/math-emu/fpu_arith.c
++++ b/arch/x86/math-emu/fpu_arith.c
+@@ -15,160 +15,138 @@
+ #include "control_w.h"
+ #include "status_w.h"
+ 
 -
--void machine_shutdown(void)
--{
--	unsigned long flags;
+ void fadd__(void)
+ {
+-  /* fadd st,st(i) */
+-  int i = FPU_rm;
+-  clear_C1();
+-  FPU_add(&st(i), FPU_gettagi(i), 0, control_word);
++	/* fadd st,st(i) */
++	int i = FPU_rm;
++	clear_C1();
++	FPU_add(&st(i), FPU_gettagi(i), 0, control_word);
+ }
+ 
 -
--	/* Stop the cpus and apics */
--#ifdef CONFIG_SMP
--	int reboot_cpu_id;
+ void fmul__(void)
+ {
+-  /* fmul st,st(i) */
+-  int i = FPU_rm;
+-  clear_C1();
+-  FPU_mul(&st(i), FPU_gettagi(i), 0, control_word);
++	/* fmul st,st(i) */
++	int i = FPU_rm;
++	clear_C1();
++	FPU_mul(&st(i), FPU_gettagi(i), 0, control_word);
+ }
+ 
 -
--	/* The boot cpu is always logical cpu 0 */
--	reboot_cpu_id = 0;
 -
--	/* Make certain the cpu I'm about to reboot on is online */
--	if (!cpu_isset(reboot_cpu_id, cpu_online_map)) {
--		reboot_cpu_id = smp_processor_id();
--	}
+ void fsub__(void)
+ {
+-  /* fsub st,st(i) */
+-  clear_C1();
+-  FPU_sub(0, FPU_rm, control_word);
++	/* fsub st,st(i) */
++	clear_C1();
++	FPU_sub(0, FPU_rm, control_word);
+ }
+ 
 -
--	/* Make certain I only run on the appropriate processor */
--	set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
+ void fsubr_(void)
+ {
+-  /* fsubr st,st(i) */
+-  clear_C1();
+-  FPU_sub(REV, FPU_rm, control_word);
++	/* fsubr st,st(i) */
++	clear_C1();
++	FPU_sub(REV, FPU_rm, control_word);
+ }
+ 
 -
--	/* O.K Now that I'm on the appropriate processor,
--	 * stop all of the others.
--	 */
--	smp_send_stop();
--#endif
+ void fdiv__(void)
+ {
+-  /* fdiv st,st(i) */
+-  clear_C1();
+-  FPU_div(0, FPU_rm, control_word);
++	/* fdiv st,st(i) */
++	clear_C1();
++	FPU_div(0, FPU_rm, control_word);
+ }
+ 
 -
--	local_irq_save(flags);
+ void fdivr_(void)
+ {
+-  /* fdivr st,st(i) */
+-  clear_C1();
+-  FPU_div(REV, FPU_rm, control_word);
++	/* fdivr st,st(i) */
++	clear_C1();
++	FPU_div(REV, FPU_rm, control_word);
+ }
+ 
 -
--#ifndef CONFIG_SMP
--	disable_local_APIC();
--#endif
 -
--	disable_IO_APIC();
+ void fadd_i(void)
+ {
+-  /* fadd st(i),st */
+-  int i = FPU_rm;
+-  clear_C1();
+-  FPU_add(&st(i), FPU_gettagi(i), i, control_word);
++	/* fadd st(i),st */
++	int i = FPU_rm;
++	clear_C1();
++	FPU_add(&st(i), FPU_gettagi(i), i, control_word);
+ }
+ 
 -
--#ifdef CONFIG_HPET_TIMER
--	hpet_disable();
--#endif
--	local_irq_restore(flags);
+ void fmul_i(void)
+ {
+-  /* fmul st(i),st */
+-  clear_C1();
+-  FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word);
++	/* fmul st(i),st */
++	clear_C1();
++	FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word);
+ }
+ 
 -
--	pci_iommu_shutdown();
--}
+ void fsubri(void)
+ {
+-  /* fsubr st(i),st */
+-  clear_C1();
+-  FPU_sub(DEST_RM, FPU_rm, control_word);
++	/* fsubr st(i),st */
++	clear_C1();
++	FPU_sub(DEST_RM, FPU_rm, control_word);
+ }
+ 
 -
--void machine_emergency_restart(void)
--{
--	int i;
+ void fsub_i(void)
+ {
+-  /* fsub st(i),st */
+-  clear_C1();
+-  FPU_sub(REV|DEST_RM, FPU_rm, control_word);
++	/* fsub st(i),st */
++	clear_C1();
++	FPU_sub(REV | DEST_RM, FPU_rm, control_word);
+ }
+ 
 -
--	/* Tell the BIOS if we want cold or warm reboot */
--	*((unsigned short *)__va(0x472)) = reboot_mode;
--       
--	for (;;) {
--		/* Could also try the reset bit in the Hammer NB */
--		switch (reboot_type) { 
--		case BOOT_KBD:
--		for (i=0; i<10; i++) {
--			kb_wait();
--			udelay(50);
--			outb(0xfe,0x64);         /* pulse reset low */
--			udelay(50);
--		}
+ void fdivri(void)
+ {
+-  /* fdivr st(i),st */
+-  clear_C1();
+-  FPU_div(DEST_RM, FPU_rm, control_word);
++	/* fdivr st(i),st */
++	clear_C1();
++	FPU_div(DEST_RM, FPU_rm, control_word);
+ }
+ 
 -
--		case BOOT_TRIPLE: 
--			load_idt((const struct desc_ptr *)&no_idt);
--			__asm__ __volatile__("int3");
+ void fdiv_i(void)
+ {
+-  /* fdiv st(i),st */
+-  clear_C1();
+-  FPU_div(REV|DEST_RM, FPU_rm, control_word);
++	/* fdiv st(i),st */
++	clear_C1();
++	FPU_div(REV | DEST_RM, FPU_rm, control_word);
+ }
+ 
 -
--			reboot_type = BOOT_KBD;
--			break;
--		}      
--	}      
--}
 -
--void machine_restart(char * __unused)
--{
--	printk("machine restart\n");
+ void faddp_(void)
+ {
+-  /* faddp st(i),st */
+-  int i = FPU_rm;
+-  clear_C1();
+-  if ( FPU_add(&st(i), FPU_gettagi(i), i, control_word) >= 0 )
+-    FPU_pop();
++	/* faddp st(i),st */
++	int i = FPU_rm;
++	clear_C1();
++	if (FPU_add(&st(i), FPU_gettagi(i), i, control_word) >= 0)
++		FPU_pop();
+ }
+ 
 -
--	if (!reboot_force) {
--		machine_shutdown();
--	}
--	machine_emergency_restart();
--}
+ void fmulp_(void)
+ {
+-  /* fmulp st(i),st */
+-  clear_C1();
+-  if ( FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word) >= 0 )
+-    FPU_pop();
++	/* fmulp st(i),st */
++	clear_C1();
++	if (FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word) >= 0)
++		FPU_pop();
+ }
+ 
 -
--void machine_halt(void)
--{
--}
 -
--void machine_power_off(void)
--{
--	if (pm_power_off) {
--		if (!reboot_force) {
--			machine_shutdown();
--		}
--		pm_power_off();
--	}
--}
+ void fsubrp(void)
+ {
+-  /* fsubrp st(i),st */
+-  clear_C1();
+-  if ( FPU_sub(DEST_RM, FPU_rm, control_word) >= 0 )
+-    FPU_pop();
++	/* fsubrp st(i),st */
++	clear_C1();
++	if (FPU_sub(DEST_RM, FPU_rm, control_word) >= 0)
++		FPU_pop();
+ }
+ 
 -
-diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
-index f452726..dec0b5e 100644
---- a/arch/x86/kernel/reboot_fixups_32.c
-+++ b/arch/x86/kernel/reboot_fixups_32.c
-@@ -30,6 +30,19 @@ static void cs5536_warm_reset(struct pci_dev *dev)
- 	udelay(50); /* shouldn't get here but be safe and spin a while */
+ void fsubp_(void)
+ {
+-  /* fsubp st(i),st */
+-  clear_C1();
+-  if ( FPU_sub(REV|DEST_RM, FPU_rm, control_word) >= 0 )
+-    FPU_pop();
++	/* fsubp st(i),st */
++	clear_C1();
++	if (FPU_sub(REV | DEST_RM, FPU_rm, control_word) >= 0)
++		FPU_pop();
  }
  
-+static void rdc321x_reset(struct pci_dev *dev)
-+{
-+	unsigned i;
-+	/* Voluntary reset the watchdog timer */
-+	outl(0x80003840, 0xCF8);
-+	/* Generate a CPU reset on next tick */
-+	i = inl(0xCFC);
-+	/* Use the minimum timer resolution */
-+	i |= 0x1600;
-+	outl(i, 0xCFC);
-+	outb(1, 0x92);
-+}
-+
- struct device_fixup {
- 	unsigned int vendor;
- 	unsigned int device;
-@@ -40,6 +53,7 @@ static struct device_fixup fixups_table[] = {
- { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset },
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset },
- { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset },
-+{ PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030, rdc321x_reset },
- };
+-
+ void fdivrp(void)
+ {
+-  /* fdivrp st(i),st */
+-  clear_C1();
+-  if ( FPU_div(DEST_RM, FPU_rm, control_word) >= 0 )
+-    FPU_pop();
++	/* fdivrp st(i),st */
++	clear_C1();
++	if (FPU_div(DEST_RM, FPU_rm, control_word) >= 0)
++		FPU_pop();
+ }
  
- /*
-diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
-new file mode 100644
-index 0000000..eb9b1a1
---- /dev/null
-+++ b/arch/x86/kernel/rtc.c
-@@ -0,0 +1,204 @@
-+/*
-+ * RTC related functions
-+ */
-+#include <linux/acpi.h>
-+#include <linux/bcd.h>
-+#include <linux/mc146818rtc.h>
-+
-+#include <asm/time.h>
-+#include <asm/vsyscall.h>
-+
-+#ifdef CONFIG_X86_32
-+# define CMOS_YEARS_OFFS 1900
-+/*
-+ * This is a special lock that is owned by the CPU and holds the index
-+ * register we are working with.  It is required for NMI access to the
-+ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
-+ */
-+volatile unsigned long cmos_lock = 0;
-+EXPORT_SYMBOL(cmos_lock);
-+#else
-+/*
-+ * x86-64 systems only exists since 2002.
-+ * This will work up to Dec 31, 2100
-+ */
-+# define CMOS_YEARS_OFFS 2000
-+#endif
-+
-+DEFINE_SPINLOCK(rtc_lock);
-+EXPORT_SYMBOL(rtc_lock);
-+
-+/*
-+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
-+ * called 500 ms after the second nowtime has started, because when
-+ * nowtime is written into the registers of the CMOS clock, it will
-+ * jump to the next second precisely 500 ms later. Check the Motorola
-+ * MC146818A or Dallas DS12887 data sheet for details.
-+ *
-+ * BUG: This routine does not handle hour overflow properly; it just
-+ *      sets the minutes. Usually you'll only notice that after reboot!
-+ */
-+int mach_set_rtc_mmss(unsigned long nowtime)
-+{
-+	int retval = 0;
-+	int real_seconds, real_minutes, cmos_minutes;
-+	unsigned char save_control, save_freq_select;
-+
-+	 /* tell the clock it's being set */
-+	save_control = CMOS_READ(RTC_CONTROL);
-+	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-+
-+	/* stop and reset prescaler */
-+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-+
-+	cmos_minutes = CMOS_READ(RTC_MINUTES);
-+	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-+		BCD_TO_BIN(cmos_minutes);
-+
-+	/*
-+	 * since we're only adjusting minutes and seconds,
-+	 * don't interfere with hour overflow. This avoids
-+	 * messing with unknown time zones but requires your
-+	 * RTC not to be off by more than 15 minutes
-+	 */
-+	real_seconds = nowtime % 60;
-+	real_minutes = nowtime / 60;
-+	/* correct for half hour time zone */
-+	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
-+		real_minutes += 30;
-+	real_minutes %= 60;
-+
-+	if (abs(real_minutes - cmos_minutes) < 30) {
-+		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-+			BIN_TO_BCD(real_seconds);
-+			BIN_TO_BCD(real_minutes);
-+		}
-+		CMOS_WRITE(real_seconds,RTC_SECONDS);
-+		CMOS_WRITE(real_minutes,RTC_MINUTES);
-+	} else {
-+		printk(KERN_WARNING
-+		       "set_rtc_mmss: can't update from %d to %d\n",
-+		       cmos_minutes, real_minutes);
-+		retval = -1;
-+	}
-+
-+	/* The following flags have to be released exactly in this order,
-+	 * otherwise the DS12887 (popular MC146818A clone with integrated
-+	 * battery and quartz) will not reset the oscillator and will not
-+	 * update precisely 500 ms later. You won't find this mentioned in
-+	 * the Dallas Semiconductor data sheets, but who believes data
-+	 * sheets anyway ...                           -- Markus Kuhn
-+	 */
-+	CMOS_WRITE(save_control, RTC_CONTROL);
-+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+
-+	return retval;
-+}
-+
-+unsigned long mach_get_cmos_time(void)
-+{
-+	unsigned int year, mon, day, hour, min, sec, century = 0;
-+
-+	/*
-+	 * If UIP is clear, then we have >= 244 microseconds before
-+	 * RTC registers will be updated.  Spec sheet says that this
-+	 * is the reliable way to read RTC - registers. If UIP is set
-+	 * then the register access might be invalid.
-+	 */
-+	while ((CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
-+		cpu_relax();
-+
-+	sec = CMOS_READ(RTC_SECONDS);
-+	min = CMOS_READ(RTC_MINUTES);
-+	hour = CMOS_READ(RTC_HOURS);
-+	day = CMOS_READ(RTC_DAY_OF_MONTH);
-+	mon = CMOS_READ(RTC_MONTH);
-+	year = CMOS_READ(RTC_YEAR);
-+
-+#if defined(CONFIG_ACPI) && defined(CONFIG_X86_64)
-+	/* CHECKME: Is this really 64bit only ??? */
-+	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
-+	    acpi_gbl_FADT.century)
-+		century = CMOS_READ(acpi_gbl_FADT.century);
-+#endif
-+
-+	if (RTC_ALWAYS_BCD || !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)) {
-+		BCD_TO_BIN(sec);
-+		BCD_TO_BIN(min);
-+		BCD_TO_BIN(hour);
-+		BCD_TO_BIN(day);
-+		BCD_TO_BIN(mon);
-+		BCD_TO_BIN(year);
-+	}
-+
-+	if (century) {
-+		BCD_TO_BIN(century);
-+		year += century * 100;
-+		printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
-+	} else {
-+		year += CMOS_YEARS_OFFS;
-+		if (year < 1970)
-+			year += 100;
-+	}
-+
-+	return mktime(year, mon, day, hour, min, sec);
-+}
-+
-+/* Routines for accessing the CMOS RAM/RTC. */
-+unsigned char rtc_cmos_read(unsigned char addr)
-+{
-+	unsigned char val;
-+
-+	lock_cmos_prefix(addr);
-+	outb_p(addr, RTC_PORT(0));
-+	val = inb_p(RTC_PORT(1));
-+	lock_cmos_suffix(addr);
-+	return val;
-+}
-+EXPORT_SYMBOL(rtc_cmos_read);
-+
-+void rtc_cmos_write(unsigned char val, unsigned char addr)
-+{
-+	lock_cmos_prefix(addr);
-+	outb_p(addr, RTC_PORT(0));
-+	outb_p(val, RTC_PORT(1));
-+	lock_cmos_suffix(addr);
-+}
-+EXPORT_SYMBOL(rtc_cmos_write);
-+
-+static int set_rtc_mmss(unsigned long nowtime)
-+{
-+	int retval;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&rtc_lock, flags);
-+	retval = set_wallclock(nowtime);
-+	spin_unlock_irqrestore(&rtc_lock, flags);
-+
-+	return retval;
-+}
-+
-+/* not static: needed by APM */
-+unsigned long read_persistent_clock(void)
-+{
-+	unsigned long retval, flags;
-+
-+	spin_lock_irqsave(&rtc_lock, flags);
-+	retval = get_wallclock();
-+	spin_unlock_irqrestore(&rtc_lock, flags);
-+
-+	return retval;
-+}
-+
-+int update_persistent_clock(struct timespec now)
-+{
-+	return set_rtc_mmss(now.tv_sec);
-+}
-+
-+unsigned long long native_read_tsc(void)
-+{
-+	return __native_read_tsc();
-+}
-+EXPORT_SYMBOL(native_read_tsc);
-+
-diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c
-index 3558ac7..309366f 100644
---- a/arch/x86/kernel/setup64.c
-+++ b/arch/x86/kernel/setup64.c
-@@ -24,7 +24,11 @@
- #include <asm/sections.h>
- #include <asm/setup.h>
+-
+ void fdivp_(void)
+ {
+-  /* fdivp st(i),st */
+-  clear_C1();
+-  if ( FPU_div(REV|DEST_RM, FPU_rm, control_word) >= 0 )
+-    FPU_pop();
++	/* fdivp st(i),st */
++	clear_C1();
++	if (FPU_div(REV | DEST_RM, FPU_rm, control_word) >= 0)
++		FPU_pop();
+ }
+diff --git a/arch/x86/math-emu/fpu_asm.h b/arch/x86/math-emu/fpu_asm.h
+index 9ba1241..955b932 100644
+--- a/arch/x86/math-emu/fpu_asm.h
++++ b/arch/x86/math-emu/fpu_asm.h
+@@ -14,7 +14,6 @@
  
-+#ifndef CONFIG_DEBUG_BOOT_PARAMS
- struct boot_params __initdata boot_params;
-+#else
-+struct boot_params boot_params;
-+#endif
+ #define	EXCEPTION	FPU_exception
  
- cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
+-
+ #define PARAM1	8(%ebp)
+ #define	PARAM2	12(%ebp)
+ #define	PARAM3	16(%ebp)
+diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
+index 20886cf..491e737 100644
+--- a/arch/x86/math-emu/fpu_aux.c
++++ b/arch/x86/math-emu/fpu_aux.c
+@@ -16,34 +16,34 @@
+ #include "status_w.h"
+ #include "control_w.h"
  
-@@ -37,6 +41,8 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
- char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
+-
+ static void fnop(void)
+ {
+ }
  
- unsigned long __supported_pte_mask __read_mostly = ~0UL;
-+EXPORT_SYMBOL_GPL(__supported_pte_mask);
-+
- static int do_not_nx __cpuinitdata = 0;
+ static void fclex(void)
+ {
+-  partial_status &= ~(SW_Backward|SW_Summary|SW_Stack_Fault|SW_Precision|
+-		   SW_Underflow|SW_Overflow|SW_Zero_Div|SW_Denorm_Op|
+-		   SW_Invalid);
+-  no_ip_update = 1;
++	partial_status &=
++	    ~(SW_Backward | SW_Summary | SW_Stack_Fault | SW_Precision |
++	      SW_Underflow | SW_Overflow | SW_Zero_Div | SW_Denorm_Op |
++	      SW_Invalid);
++	no_ip_update = 1;
+ }
  
- /* noexec=on|off
-@@ -80,6 +86,43 @@ static int __init nonx32_setup(char *str)
- __setup("noexec32=", nonx32_setup);
+ /* Needs to be externally visible */
+ void finit(void)
+ {
+-  control_word = 0x037f;
+-  partial_status = 0;
+-  top = 0;            /* We don't keep top in the status word internally. */
+-  fpu_tag_word = 0xffff;
+-  /* The behaviour is different from that detailed in
+-     Section 15.1.6 of the Intel manual */
+-  operand_address.offset = 0;
+-  operand_address.selector = 0;
+-  instruction_address.offset = 0;
+-  instruction_address.selector = 0;
+-  instruction_address.opcode = 0;
+-  no_ip_update = 1;
++	control_word = 0x037f;
++	partial_status = 0;
++	top = 0;		/* We don't keep top in the status word internally. */
++	fpu_tag_word = 0xffff;
++	/* The behaviour is different from that detailed in
++	   Section 15.1.6 of the Intel manual */
++	operand_address.offset = 0;
++	operand_address.selector = 0;
++	instruction_address.offset = 0;
++	instruction_address.selector = 0;
++	instruction_address.opcode = 0;
++	no_ip_update = 1;
+ }
  
  /*
-+ * Copy data used in early init routines from the initial arrays to the
-+ * per cpu data areas.  These arrays then become expendable and the
-+ * *_early_ptr's are zeroed indicating that the static arrays are gone.
-+ */
-+static void __init setup_per_cpu_maps(void)
-+{
-+	int cpu;
-+
-+	for_each_possible_cpu(cpu) {
-+#ifdef CONFIG_SMP
-+		if (per_cpu_offset(cpu)) {
-+#endif
-+			per_cpu(x86_cpu_to_apicid, cpu) =
-+						x86_cpu_to_apicid_init[cpu];
-+			per_cpu(x86_bios_cpu_apicid, cpu) =
-+						x86_bios_cpu_apicid_init[cpu];
-+#ifdef CONFIG_NUMA
-+			per_cpu(x86_cpu_to_node_map, cpu) =
-+						x86_cpu_to_node_map_init[cpu];
-+#endif
-+#ifdef CONFIG_SMP
-+		}
-+		else
-+			printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
-+									cpu);
-+#endif
-+	}
-+
-+	/* indicate the early static arrays will soon be gone */
-+	x86_cpu_to_apicid_early_ptr = NULL;
-+	x86_bios_cpu_apicid_early_ptr = NULL;
-+#ifdef CONFIG_NUMA
-+	x86_cpu_to_node_map_early_ptr = NULL;
-+#endif
-+}
-+
-+/*
-  * Great future plan:
-  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
-  * Always point %gs to its beginning
-@@ -100,18 +143,21 @@ void __init setup_per_cpu_areas(void)
- 	for_each_cpu_mask (i, cpu_possible_map) {
- 		char *ptr;
- 
--		if (!NODE_DATA(cpu_to_node(i))) {
-+		if (!NODE_DATA(early_cpu_to_node(i))) {
- 			printk("cpu with no node %d, num_online_nodes %d\n",
- 			       i, num_online_nodes());
- 			ptr = alloc_bootmem_pages(size);
- 		} else { 
--			ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
-+			ptr = alloc_bootmem_pages_node(NODE_DATA(early_cpu_to_node(i)), size);
- 		}
- 		if (!ptr)
- 			panic("Cannot allocate cpu data for CPU %d\n", i);
- 		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
- 		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
- 	}
-+
-+	/* setup percpu data maps early */
-+	setup_per_cpu_maps();
- } 
+@@ -54,151 +54,134 @@ void finit(void)
+ #define fsetpm fnop
  
- void pda_init(int cpu)
-@@ -169,7 +215,8 @@ void syscall_init(void)
- #endif
+ static FUNC const finit_table[] = {
+-  feni, fdisi, fclex, finit,
+-  fsetpm, FPU_illegal, FPU_illegal, FPU_illegal
++	feni, fdisi, fclex, finit,
++	fsetpm, FPU_illegal, FPU_illegal, FPU_illegal
+ };
  
- 	/* Flags to clear on syscall */
--	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
-+	wrmsrl(MSR_SYSCALL_MASK,
-+	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
+ void finit_(void)
+ {
+-  (finit_table[FPU_rm])();
++	(finit_table[FPU_rm]) ();
  }
  
- void __cpuinit check_efer(void)
-@@ -227,7 +274,7 @@ void __cpuinit cpu_init (void)
- 	 * and set up the GDT descriptor:
- 	 */
- 	if (cpu)
-- 		memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
-+		memcpy(get_cpu_gdt_table(cpu), cpu_gdt_table, GDT_SIZE);
- 
- 	cpu_gdt_descr[cpu].size = GDT_SIZE;
- 	load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
-@@ -257,10 +304,10 @@ void __cpuinit cpu_init (void)
- 				      v, cpu); 
- 		}
- 		estacks += PAGE_SIZE << order[v];
--		orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
-+		orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
- 	}
- 
--	t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
-+	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
- 	/*
- 	 * <= is required because the CPU will access up to
- 	 * 8 bits beyond the end of the IO permission bitmap.
-diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
-index 9c24b45..62adc5f 100644
---- a/arch/x86/kernel/setup_32.c
-+++ b/arch/x86/kernel/setup_32.c
-@@ -44,9 +44,12 @@
- #include <linux/crash_dump.h>
- #include <linux/dmi.h>
- #include <linux/pfn.h>
-+#include <linux/pci.h>
-+#include <linux/init_ohci1394_dma.h>
- 
- #include <video/edid.h>
- 
-+#include <asm/mtrr.h>
- #include <asm/apic.h>
- #include <asm/e820.h>
- #include <asm/mpspec.h>
-@@ -67,14 +70,83 @@
-    address, and must not be in the .bss segment! */
- unsigned long init_pg_tables_end __initdata = ~0UL;
- 
--int disable_pse __cpuinitdata = 0;
 -
- /*
-  * Machine setup..
-  */
--extern struct resource code_resource;
--extern struct resource data_resource;
--extern struct resource bss_resource;
-+static struct resource data_resource = {
-+	.name	= "Kernel data",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource code_resource = {
-+	.name	= "Kernel code",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource bss_resource = {
-+	.name	= "Kernel bss",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource video_ram_resource = {
-+	.name	= "Video RAM area",
-+	.start	= 0xa0000,
-+	.end	= 0xbffff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource standard_io_resources[] = { {
-+	.name	= "dma1",
-+	.start	= 0x0000,
-+	.end	= 0x001f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "pic1",
-+	.start	= 0x0020,
-+	.end	= 0x0021,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name   = "timer0",
-+	.start	= 0x0040,
-+	.end    = 0x0043,
-+	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name   = "timer1",
-+	.start  = 0x0050,
-+	.end    = 0x0053,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "keyboard",
-+	.start	= 0x0060,
-+	.end	= 0x006f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "dma page reg",
-+	.start	= 0x0080,
-+	.end	= 0x008f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "pic2",
-+	.start	= 0x00a0,
-+	.end	= 0x00a1,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "dma2",
-+	.start	= 0x00c0,
-+	.end	= 0x00df,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "fpu",
-+	.start	= 0x00f0,
-+	.end	= 0x00ff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+} };
- 
- /* cpu data as detected by the assembly code in head.S */
- struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-@@ -116,13 +188,17 @@ extern int root_mountflags;
- 
- unsigned long saved_videomode;
- 
--#define RAMDISK_IMAGE_START_MASK  	0x07FF
-+#define RAMDISK_IMAGE_START_MASK	0x07FF
- #define RAMDISK_PROMPT_FLAG		0x8000
--#define RAMDISK_LOAD_FLAG		0x4000	
-+#define RAMDISK_LOAD_FLAG		0x4000
+ static void fstsw_ax(void)
+ {
+-  *(short *) &FPU_EAX = status_word();
+-  no_ip_update = 1;
++	*(short *)&FPU_EAX = status_word();
++	no_ip_update = 1;
+ }
  
- static char __initdata command_line[COMMAND_LINE_SIZE];
+ static FUNC const fstsw_table[] = {
+-  fstsw_ax, FPU_illegal, FPU_illegal, FPU_illegal,
+-  FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
++	fstsw_ax, FPU_illegal, FPU_illegal, FPU_illegal,
++	FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
+ };
  
-+#ifndef CONFIG_DEBUG_BOOT_PARAMS
- struct boot_params __initdata boot_params;
-+#else
-+struct boot_params boot_params;
-+#endif
+ void fstsw_(void)
+ {
+-  (fstsw_table[FPU_rm])();
++	(fstsw_table[FPU_rm]) ();
+ }
  
- #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
- struct edd edd;
-@@ -166,8 +242,7 @@ static int __init parse_mem(char *arg)
- 		return -EINVAL;
+-
+ static FUNC const fp_nop_table[] = {
+-  fnop, FPU_illegal, FPU_illegal, FPU_illegal,
+-  FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
++	fnop, FPU_illegal, FPU_illegal, FPU_illegal,
++	FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
+ };
  
- 	if (strcmp(arg, "nopentium") == 0) {
--		clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
--		disable_pse = 1;
-+		setup_clear_cpu_cap(X86_FEATURE_PSE);
- 	} else {
- 		/* If the user specifies memory size, we
- 		 * limit the BIOS-provided memory map to
-@@ -176,7 +251,7 @@ static int __init parse_mem(char *arg)
- 		 * trim the existing memory map.
- 		 */
- 		unsigned long long mem_size;
-- 
-+
- 		mem_size = memparse(arg, &arg);
- 		limit_regions(mem_size);
- 		user_defined_memmap = 1;
-@@ -315,7 +390,7 @@ static void __init reserve_ebda_region(void)
- 	unsigned int addr;
- 	addr = get_bios_ebda();
- 	if (addr)
--		reserve_bootmem(addr, PAGE_SIZE);	
-+		reserve_bootmem(addr, PAGE_SIZE);
+ void fp_nop(void)
+ {
+-  (fp_nop_table[FPU_rm])();
++	(fp_nop_table[FPU_rm]) ();
  }
  
- #ifndef CONFIG_NEED_MULTIPLE_NODES
-@@ -420,6 +495,100 @@ static inline void __init reserve_crashkernel(void)
- {}
- #endif
- 
-+#ifdef CONFIG_BLK_DEV_INITRD
-+
-+static bool do_relocate_initrd = false;
-+
-+static void __init reserve_initrd(void)
-+{
-+	unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
-+	unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
-+	unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
-+	unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
-+	unsigned long ramdisk_here;
-+
-+	initrd_start = 0;
-+
-+	if (!boot_params.hdr.type_of_loader ||
-+	    !ramdisk_image || !ramdisk_size)
-+		return;		/* No initrd provided by bootloader */
-+
-+	if (ramdisk_end < ramdisk_image) {
-+		printk(KERN_ERR "initrd wraps around end of memory, "
-+		       "disabling initrd\n");
-+		return;
-+	}
-+	if (ramdisk_size >= end_of_lowmem/2) {
-+		printk(KERN_ERR "initrd too large to handle, "
-+		       "disabling initrd\n");
-+		return;
-+	}
-+	if (ramdisk_end <= end_of_lowmem) {
-+		/* All in lowmem, easy case */
-+		reserve_bootmem(ramdisk_image, ramdisk_size);
-+		initrd_start = ramdisk_image + PAGE_OFFSET;
-+		initrd_end = initrd_start+ramdisk_size;
-+		return;
-+	}
-+
-+	/* We need to move the initrd down into lowmem */
-+	ramdisk_here = (end_of_lowmem - ramdisk_size) & PAGE_MASK;
-+
-+	/* Note: this includes all the lowmem currently occupied by
-+	   the initrd, we rely on that fact to keep the data intact. */
-+	reserve_bootmem(ramdisk_here, ramdisk_size);
-+	initrd_start = ramdisk_here + PAGE_OFFSET;
-+	initrd_end   = initrd_start + ramdisk_size;
-+
-+	do_relocate_initrd = true;
-+}
-+
-+#define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
-+
-+static void __init relocate_initrd(void)
-+{
-+	unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
-+	unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
-+	unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
-+	unsigned long ramdisk_here;
-+	unsigned long slop, clen, mapaddr;
-+	char *p, *q;
+-
+ void fld_i_(void)
+ {
+-  FPU_REG *st_new_ptr;
+-  int i;
+-  u_char tag;
+-
+-  if ( STACK_OVERFLOW )
+-    { FPU_stack_overflow(); return; }
+-
+-  /* fld st(i) */
+-  i = FPU_rm;
+-  if ( NOT_EMPTY(i) )
+-    {
+-      reg_copy(&st(i), st_new_ptr);
+-      tag = FPU_gettagi(i);
+-      push();
+-      FPU_settag0(tag);
+-    }
+-  else
+-    {
+-      if ( control_word & CW_Invalid )
+-	{
+-	  /* The masked response */
+-	  FPU_stack_underflow();
++	FPU_REG *st_new_ptr;
++	int i;
++	u_char tag;
 +
-+	if (!do_relocate_initrd)
++	if (STACK_OVERFLOW) {
++		FPU_stack_overflow();
 +		return;
-+
-+	ramdisk_here = initrd_start - PAGE_OFFSET;
-+
-+	q = (char *)initrd_start;
-+
-+	/* Copy any lowmem portion of the initrd */
-+	if (ramdisk_image < end_of_lowmem) {
-+		clen = end_of_lowmem - ramdisk_image;
-+		p = (char *)__va(ramdisk_image);
-+		memcpy(q, p, clen);
-+		q += clen;
-+		ramdisk_image += clen;
-+		ramdisk_size  -= clen;
-+	}
-+
-+	/* Copy the highmem portion of the initrd */
-+	while (ramdisk_size) {
-+		slop = ramdisk_image & ~PAGE_MASK;
-+		clen = ramdisk_size;
-+		if (clen > MAX_MAP_CHUNK-slop)
-+			clen = MAX_MAP_CHUNK-slop;
-+		mapaddr = ramdisk_image & PAGE_MASK;
-+		p = early_ioremap(mapaddr, clen+slop);
-+		memcpy(q, p+slop, clen);
-+		early_iounmap(p, clen+slop);
-+		q += clen;
-+		ramdisk_image += clen;
-+		ramdisk_size  -= clen;
+ 	}
+-      else
+-	EXCEPTION(EX_StackUnder);
+-    }
+ 
+-}
++	/* fld st(i) */
++	i = FPU_rm;
++	if (NOT_EMPTY(i)) {
++		reg_copy(&st(i), st_new_ptr);
++		tag = FPU_gettagi(i);
++		push();
++		FPU_settag0(tag);
++	} else {
++		if (control_word & CW_Invalid) {
++			/* The masked response */
++			FPU_stack_underflow();
++		} else
++			EXCEPTION(EX_StackUnder);
 +	}
+ 
 +}
-+
-+#endif /* CONFIG_BLK_DEV_INITRD */
-+
- void __init setup_bootmem_allocator(void)
+ 
+ void fxch_i(void)
  {
- 	unsigned long bootmap_size;
-@@ -475,26 +644,10 @@ void __init setup_bootmem_allocator(void)
- 	 */
- 	find_smp_config();
- #endif
--	numa_kva_reserve();
- #ifdef CONFIG_BLK_DEV_INITRD
--	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
--		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
--		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
--		unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
--		unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
+-  /* fxch st(i) */
+-  FPU_REG t;
+-  int i = FPU_rm;
+-  FPU_REG *st0_ptr = &st(0), *sti_ptr = &st(i);
+-  long tag_word = fpu_tag_word;
+-  int regnr = top & 7, regnri = ((regnr + i) & 7);
+-  u_char st0_tag = (tag_word >> (regnr*2)) & 3;
+-  u_char sti_tag = (tag_word >> (regnri*2)) & 3;
 -
--		if (ramdisk_end <= end_of_lowmem) {
--			reserve_bootmem(ramdisk_image, ramdisk_size);
--			initrd_start = ramdisk_image + PAGE_OFFSET;
--			initrd_end = initrd_start+ramdisk_size;
--		} else {
--			printk(KERN_ERR "initrd extends beyond end of memory "
--			       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
--			       ramdisk_end, end_of_lowmem);
--			initrd_start = 0;
--		}
+-  if ( st0_tag == TAG_Empty )
+-    {
+-      if ( sti_tag == TAG_Empty )
+-	{
+-	  FPU_stack_underflow();
+-	  FPU_stack_underflow_i(i);
+-	  return;
++	/* fxch st(i) */
++	FPU_REG t;
++	int i = FPU_rm;
++	FPU_REG *st0_ptr = &st(0), *sti_ptr = &st(i);
++	long tag_word = fpu_tag_word;
++	int regnr = top & 7, regnri = ((regnr + i) & 7);
++	u_char st0_tag = (tag_word >> (regnr * 2)) & 3;
++	u_char sti_tag = (tag_word >> (regnri * 2)) & 3;
++
++	if (st0_tag == TAG_Empty) {
++		if (sti_tag == TAG_Empty) {
++			FPU_stack_underflow();
++			FPU_stack_underflow_i(i);
++			return;
++		}
++		if (control_word & CW_Invalid) {
++			/* Masked response */
++			FPU_copy_to_reg0(sti_ptr, sti_tag);
++		}
++		FPU_stack_underflow_i(i);
++		return;
+ 	}
+-      if ( control_word & CW_Invalid )
+-	{
+-	  /* Masked response */
+-	  FPU_copy_to_reg0(sti_ptr, sti_tag);
++	if (sti_tag == TAG_Empty) {
++		if (control_word & CW_Invalid) {
++			/* Masked response */
++			FPU_copy_to_regi(st0_ptr, st0_tag, i);
++		}
++		FPU_stack_underflow();
++		return;
+ 	}
+-      FPU_stack_underflow_i(i);
+-      return;
+-    }
+-  if ( sti_tag == TAG_Empty )
+-    {
+-      if ( control_word & CW_Invalid )
+-	{
+-	  /* Masked response */
+-	  FPU_copy_to_regi(st0_ptr, st0_tag, i);
 -	}
-+	reserve_initrd();
- #endif
-+	numa_kva_reserve();
- 	reserve_crashkernel();
- }
- 
-@@ -545,17 +698,11 @@ void __init setup_arch(char **cmdline_p)
- 	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
- 	pre_setup_arch_hook();
- 	early_cpu_init();
-+	early_ioremap_init();
- 
--	/*
--	 * FIXME: This isn't an official loader_type right
--	 * now but does currently work with elilo.
--	 * If we were configured as an EFI kernel, check to make
--	 * sure that we were loaded correctly from elilo and that
--	 * the system table is valid.  If not, then initialize normally.
--	 */
- #ifdef CONFIG_EFI
--	if ((boot_params.hdr.type_of_loader == 0x50) &&
--	    boot_params.efi_info.efi_systab)
-+	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
-+		     "EL32", 4))
- 		efi_enabled = 1;
- #endif
+-      FPU_stack_underflow();
+-      return;
+-    }
+-  clear_C1();
+-
+-  reg_copy(st0_ptr, &t);
+-  reg_copy(sti_ptr, st0_ptr);
+-  reg_copy(&t, sti_ptr);
+-
+-  tag_word &= ~(3 << (regnr*2)) & ~(3 << (regnri*2));
+-  tag_word |= (sti_tag << (regnr*2)) | (st0_tag << (regnri*2));
+-  fpu_tag_word = tag_word;
+-}
++	clear_C1();
  
-@@ -579,12 +726,9 @@ void __init setup_arch(char **cmdline_p)
- 	rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
- #endif
- 	ARCH_SETUP
--	if (efi_enabled)
--		efi_init();
--	else {
--		printk(KERN_INFO "BIOS-provided physical RAM map:\n");
--		print_memory_map(memory_setup());
--	}
++	reg_copy(st0_ptr, &t);
++	reg_copy(sti_ptr, st0_ptr);
++	reg_copy(&t, sti_ptr);
 +
-+	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+	print_memory_map(memory_setup());
- 
- 	copy_edd();
- 
-@@ -612,8 +756,16 @@ void __init setup_arch(char **cmdline_p)
- 	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
- 	*cmdline_p = command_line;
++	tag_word &= ~(3 << (regnr * 2)) & ~(3 << (regnri * 2));
++	tag_word |= (sti_tag << (regnr * 2)) | (st0_tag << (regnri * 2));
++	fpu_tag_word = tag_word;
++}
  
-+	if (efi_enabled)
-+		efi_init();
-+
- 	max_low_pfn = setup_memory();
+ void ffree_(void)
+ {
+-  /* ffree st(i) */
+-  FPU_settagi(FPU_rm, TAG_Empty);
++	/* ffree st(i) */
++	FPU_settagi(FPU_rm, TAG_Empty);
+ }
  
-+	/* update e820 for memory not covered by WB MTRRs */
-+	mtrr_bp_init();
-+	if (mtrr_trim_uncached_memory(max_pfn))
-+		max_low_pfn = setup_memory();
-+
- #ifdef CONFIG_VMI
- 	/*
- 	 * Must be after max_low_pfn is determined, and before kernel
-@@ -636,6 +788,16 @@ void __init setup_arch(char **cmdline_p)
- 	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
- #endif
- 	paging_init();
-+
-+	/*
-+	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
-+	 */
-+
-+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
-+	if (init_ohci1394_dma_early)
-+		init_ohci1394_dma_on_all_controllers();
-+#endif
-+
- 	remapped_pgdat_init();
- 	sparse_init();
- 	zone_sizes_init();
-@@ -644,15 +806,19 @@ void __init setup_arch(char **cmdline_p)
- 	 * NOTE: at this point the bootmem allocator is fully available.
- 	 */
+-
+ void ffreep(void)
+ {
+-  /* ffree st(i) + pop - unofficial code */
+-  FPU_settagi(FPU_rm, TAG_Empty);
+-  FPU_pop();
++	/* ffree st(i) + pop - unofficial code */
++	FPU_settagi(FPU_rm, TAG_Empty);
++	FPU_pop();
+ }
  
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	relocate_initrd();
-+#endif
-+
- 	paravirt_post_allocator_init();
+-
+ void fst_i_(void)
+ {
+-  /* fst st(i) */
+-  FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
++	/* fst st(i) */
++	FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
+ }
  
- 	dmi_scan_machine();
+-
+ void fstp_i(void)
+ {
+-  /* fstp st(i) */
+-  FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
+-  FPU_pop();
++	/* fstp st(i) */
++	FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
++	FPU_pop();
+ }
+-
+diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
+index 65120f5..4dae511 100644
+--- a/arch/x86/math-emu/fpu_emu.h
++++ b/arch/x86/math-emu/fpu_emu.h
+@@ -7,7 +7,6 @@
+  |                                                                           |
+  +---------------------------------------------------------------------------*/
  
-+	io_delay_init();
-+
- #ifdef CONFIG_X86_GENERICARCH
- 	generic_apic_probe();
--#endif	
--	if (efi_enabled)
--		efi_map_memmap();
-+#endif
+-
+ #ifndef _FPU_EMU_H_
+ #define _FPU_EMU_H_
  
- #ifdef CONFIG_ACPI
- 	/*
-@@ -661,9 +827,7 @@ void __init setup_arch(char **cmdline_p)
- 	acpi_boot_table_init();
+@@ -28,15 +27,15 @@
  #endif
  
--#ifdef CONFIG_PCI
- 	early_quirks();
--#endif
+ #define EXP_BIAS	Const(0)
+-#define EXP_OVER	Const(0x4000)    /* smallest invalid large exponent */
+-#define	EXP_UNDER	Const(-0x3fff)   /* largest invalid small exponent */
+-#define EXP_WAY_UNDER   Const(-0x6000)   /* Below the smallest denormal, but
+-					    still a 16 bit nr. */
++#define EXP_OVER	Const(0x4000)	/* smallest invalid large exponent */
++#define	EXP_UNDER	Const(-0x3fff)	/* largest invalid small exponent */
++#define EXP_WAY_UNDER   Const(-0x6000)	/* Below the smallest denormal, but
++					   still a 16 bit nr. */
+ #define EXP_Infinity    EXP_OVER
+ #define EXP_NaN         EXP_OVER
  
- #ifdef CONFIG_ACPI
- 	acpi_boot_init();
-@@ -692,3 +856,26 @@ void __init setup_arch(char **cmdline_p)
- #endif
- #endif
- }
-+
-+/*
-+ * Request address space for all standard resources
-+ *
-+ * This is called just before pcibios_init(), which is also a
-+ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
-+ */
-+static int __init request_standard_resources(void)
-+{
-+	int i;
-+
-+	printk(KERN_INFO "Setting up standard PCI resources\n");
-+	init_iomem_resources(&code_resource, &data_resource, &bss_resource);
-+
-+	request_resource(&iomem_resource, &video_ram_resource);
-+
-+	/* request I/O space for devices used on all i[345]86 PCs */
-+	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
-+		request_resource(&ioport_resource, &standard_io_resources[i]);
-+	return 0;
-+}
-+
-+subsys_initcall(request_standard_resources);
-diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
-index 30d94d1..77fb87b 100644
---- a/arch/x86/kernel/setup_64.c
-+++ b/arch/x86/kernel/setup_64.c
-@@ -30,6 +30,7 @@
- #include <linux/crash_dump.h>
- #include <linux/root_dev.h>
- #include <linux/pci.h>
-+#include <linux/efi.h>
- #include <linux/acpi.h>
- #include <linux/kallsyms.h>
- #include <linux/edd.h>
-@@ -39,10 +40,13 @@
- #include <linux/dmi.h>
- #include <linux/dma-mapping.h>
- #include <linux/ctype.h>
-+#include <linux/uaccess.h>
-+#include <linux/init_ohci1394_dma.h>
+ #define EXTENDED_Ebias Const(0x3fff)
+-#define EXTENDED_Emin (-0x3ffe)  /* smallest valid exponent */
++#define EXTENDED_Emin (-0x3ffe)	/* smallest valid exponent */
  
- #include <asm/mtrr.h>
- #include <asm/uaccess.h>
- #include <asm/system.h>
-+#include <asm/vsyscall.h>
- #include <asm/io.h>
- #include <asm/smp.h>
- #include <asm/msr.h>
-@@ -50,6 +54,7 @@
- #include <video/edid.h>
- #include <asm/e820.h>
- #include <asm/dma.h>
-+#include <asm/gart.h>
- #include <asm/mpspec.h>
- #include <asm/mmu_context.h>
- #include <asm/proto.h>
-@@ -59,6 +64,15 @@
- #include <asm/sections.h>
- #include <asm/dmi.h>
- #include <asm/cacheflush.h>
-+#include <asm/mce.h>
-+#include <asm/ds.h>
-+#include <asm/topology.h>
-+
-+#ifdef CONFIG_PARAVIRT
-+#include <asm/paravirt.h>
-+#else
-+#define ARCH_SETUP
-+#endif
+ #define SIGN_POS	Const(0)
+ #define SIGN_NEG	Const(0x80)
+@@ -44,10 +43,9 @@
+ #define SIGN_Positive	Const(0)
+ #define SIGN_Negative	Const(0x8000)
  
- /*
-  * Machine setup..
-@@ -67,6 +81,8 @@
- struct cpuinfo_x86 boot_cpu_data __read_mostly;
- EXPORT_SYMBOL(boot_cpu_data);
+-
+ /* Keep the order TAG_Valid, TAG_Zero, TW_Denormal */
+ /* The following fold to 2 (Special) in the Tag Word */
+-#define TW_Denormal     Const(4)        /* De-normal */
++#define TW_Denormal     Const(4)	/* De-normal */
+ #define TW_Infinity	Const(5)	/* + or - infinity */
+ #define	TW_NaN		Const(6)	/* Not a Number */
+ #define	TW_Unsupported	Const(7)	/* Not supported by an 80486 */
+@@ -67,14 +65,13 @@
+ #define DEST_RM         0x20
+ #define LOADED          0x40
  
-+__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
-+
- unsigned long mmu_cr4_features;
+-#define FPU_Exception   Const(0x80000000)   /* Added to tag returns. */
+-
++#define FPU_Exception   Const(0x80000000)	/* Added to tag returns. */
  
- /* Boot loader ID as an integer, for the benefit of proc_dointvec */
-@@ -76,7 +92,7 @@ unsigned long saved_video_mode;
+ #ifndef __ASSEMBLY__
  
- int force_mwait __cpuinitdata;
+ #include "fpu_system.h"
  
--/* 
-+/*
-  * Early DMI memory
-  */
- int dmi_alloc_index;
-@@ -122,25 +138,27 @@ struct resource standard_io_resources[] = {
+-#include <asm/sigcontext.h>   /* for struct _fpstate */
++#include <asm/sigcontext.h>	/* for struct _fpstate */
+ #include <asm/math_emu.h>
+ #include <linux/linkage.h>
  
- #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
+@@ -112,30 +109,33 @@ extern u_char emulating;
+ #define PREFIX_DEFAULT 7
  
--struct resource data_resource = {
-+static struct resource data_resource = {
- 	.name = "Kernel data",
- 	.start = 0,
- 	.end = 0,
- 	.flags = IORESOURCE_RAM,
- };
--struct resource code_resource = {
-+static struct resource code_resource = {
- 	.name = "Kernel code",
- 	.start = 0,
- 	.end = 0,
- 	.flags = IORESOURCE_RAM,
+ struct address {
+-  unsigned int offset;
+-  unsigned int selector:16;
+-  unsigned int opcode:11;
+-  unsigned int empty:5;
++	unsigned int offset;
++	unsigned int selector:16;
++	unsigned int opcode:11;
++	unsigned int empty:5;
  };
--struct resource bss_resource = {
-+static struct resource bss_resource = {
- 	.name = "Kernel bss",
- 	.start = 0,
- 	.end = 0,
- 	.flags = IORESOURCE_RAM,
+ struct fpu__reg {
+-  unsigned sigl;
+-  unsigned sigh;
+-  short exp;
++	unsigned sigl;
++	unsigned sigh;
++	short exp;
  };
  
-+static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
-+
- #ifdef CONFIG_PROC_VMCORE
- /* elfcorehdr= specifies the location of elf core header
-  * stored by the crashed kernel. This option will be passed
-@@ -166,12 +184,12 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
- 	bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
- 	bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
- 	if (bootmap == -1L)
--		panic("Cannot find bootmem map of size %ld\n",bootmap_size);
-+		panic("Cannot find bootmem map of size %ld\n", bootmap_size);
- 	bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
- 	e820_register_active_regions(0, start_pfn, end_pfn);
- 	free_bootmem_with_active_regions(0, end_pfn);
- 	reserve_bootmem(bootmap, bootmap_size);
--} 
-+}
- #endif
+-typedef void (*FUNC)(void);
++typedef void (*FUNC) (void);
+ typedef struct fpu__reg FPU_REG;
+-typedef void (*FUNC_ST0)(FPU_REG *st0_ptr, u_char st0_tag);
+-typedef struct { u_char address_size, operand_size, segment; }
+-        overrides;
++typedef void (*FUNC_ST0) (FPU_REG *st0_ptr, u_char st0_tag);
++typedef struct {
++	u_char address_size, operand_size, segment;
++} overrides;
+ /* This structure is 32 bits: */
+-typedef struct { overrides override;
+-		 u_char default_mode; } fpu_addr_modes;
++typedef struct {
++	overrides override;
++	u_char default_mode;
++} fpu_addr_modes;
+ /* PROTECTED has a restricted meaning in the emulator; it is used
+    to signal that the emulator needs to do special things to ensure
+    that protection is respected in a segmented model. */
+ #define PROTECTED 4
+-#define SIXTEEN   1         /* We rely upon this being 1 (true) */
++#define SIXTEEN   1		/* We rely upon this being 1 (true) */
+ #define VM86      SIXTEEN
+ #define PM16      (SIXTEEN | PROTECTED)
+ #define SEG32     PROTECTED
+@@ -168,8 +168,8 @@ extern u_char const data_sizes_16[32];
  
- #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-@@ -205,7 +223,8 @@ static void __init reserve_crashkernel(void)
- 	unsigned long long crash_size, crash_base;
- 	int ret;
+ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
+ {
+-  *(short *)&(y->exp) = *(const short *)&(x->exp); 
+-  *(long long *)&(y->sigl) = *(const long long *)&(x->sigl);
++	*(short *)&(y->exp) = *(const short *)&(x->exp);
++	*(long long *)&(y->sigl) = *(const long long *)&(x->sigl);
+ }
  
--	free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
-+	free_mem =
-+		((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
+ #define exponent(x)  (((*(short *)&((x)->exp)) & 0x7fff) - EXTENDED_Ebias)
+@@ -184,27 +184,26 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
  
- 	ret = parse_crashkernel(boot_command_line, free_mem,
- 			&crash_size, &crash_base);
-@@ -229,33 +248,21 @@ static inline void __init reserve_crashkernel(void)
- {}
- #endif
+ #define significand(x) ( ((unsigned long long *)&((x)->sigl))[0] )
  
--#define EBDA_ADDR_POINTER 0x40E
--
--unsigned __initdata ebda_addr;
--unsigned __initdata ebda_size;
--
--static void discover_ebda(void)
-+/* Overridden in paravirt.c if CONFIG_PARAVIRT */
-+void __attribute__((weak)) __init memory_setup(void)
- {
--	/*
--	 * there is a real-mode segmented pointer pointing to the 
--	 * 4K EBDA area at 0x40E
--	 */
--	ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
--	ebda_addr <<= 4;
--
--	ebda_size = *(unsigned short *)__va(ebda_addr);
 -
--	/* Round EBDA up to pages */
--	if (ebda_size == 0)
--		ebda_size = 1;
--	ebda_size <<= 10;
--	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
--	if (ebda_size > 64*1024)
--		ebda_size = 64*1024;
-+       machine_specific_memory_setup();
- }
+ /*----- Prototypes for functions written in assembler -----*/
+ /* extern void reg_move(FPU_REG *a, FPU_REG *b); */
  
-+/*
-+ * setup_arch - architecture-specific boot-time initializations
-+ *
-+ * Note: On x86_64, fixmaps are ready for use even before this is called.
-+ */
- void __init setup_arch(char **cmdline_p)
- {
-+	unsigned i;
-+
- 	printk(KERN_INFO "Command line: %s\n", boot_command_line);
+ asmlinkage int FPU_normalize(FPU_REG *x);
+ asmlinkage int FPU_normalize_nuo(FPU_REG *x);
+ asmlinkage int FPU_u_sub(FPU_REG const *arg1, FPU_REG const *arg2,
+-			 FPU_REG *answ, unsigned int control_w, u_char sign,
++			 FPU_REG * answ, unsigned int control_w, u_char sign,
+ 			 int expa, int expb);
+ asmlinkage int FPU_u_mul(FPU_REG const *arg1, FPU_REG const *arg2,
+-			 FPU_REG *answ, unsigned int control_w, u_char sign,
++			 FPU_REG * answ, unsigned int control_w, u_char sign,
+ 			 int expon);
+ asmlinkage int FPU_u_div(FPU_REG const *arg1, FPU_REG const *arg2,
+-			 FPU_REG *answ, unsigned int control_w, u_char sign);
++			 FPU_REG * answ, unsigned int control_w, u_char sign);
+ asmlinkage int FPU_u_add(FPU_REG const *arg1, FPU_REG const *arg2,
+-			 FPU_REG *answ, unsigned int control_w, u_char sign,
++			 FPU_REG * answ, unsigned int control_w, u_char sign,
+ 			 int expa, int expb);
+ asmlinkage int wm_sqrt(FPU_REG *n, int dummy1, int dummy2,
+ 		       unsigned int control_w, u_char sign);
+-asmlinkage unsigned	FPU_shrx(void *l, unsigned x);
+-asmlinkage unsigned	FPU_shrxs(void *v, unsigned x);
++asmlinkage unsigned FPU_shrx(void *l, unsigned x);
++asmlinkage unsigned FPU_shrxs(void *v, unsigned x);
+ asmlinkage unsigned long FPU_div_small(unsigned long long *x, unsigned long y);
+ asmlinkage int FPU_round(FPU_REG *arg, unsigned int extent, int dummy,
+ 			 unsigned int control_w, u_char sign);
+diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
+index 1853524..760baee 100644
+--- a/arch/x86/math-emu/fpu_entry.c
++++ b/arch/x86/math-emu/fpu_entry.c
+@@ -25,10 +25,11 @@
+  +---------------------------------------------------------------------------*/
  
- 	ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
-@@ -269,7 +276,15 @@ void __init setup_arch(char **cmdline_p)
- 	rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
- 	rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
- #endif
--	setup_memory_region();
-+#ifdef CONFIG_EFI
-+	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
-+		     "EL64", 4))
-+		efi_enabled = 1;
-+#endif
-+
-+	ARCH_SETUP
-+
-+	memory_setup();
- 	copy_edd();
+ #include <linux/signal.h>
+-#include <linux/ptrace.h>
++#include <linux/regset.h>
  
- 	if (!boot_params.hdr.root_flags)
-@@ -293,27 +308,47 @@ void __init setup_arch(char **cmdline_p)
+ #include <asm/uaccess.h>
+ #include <asm/desc.h>
++#include <asm/user.h>
  
- 	parse_early_param();
+ #include "fpu_system.h"
+ #include "fpu_emu.h"
+@@ -36,726 +37,727 @@
+ #include "control_w.h"
+ #include "status_w.h"
  
-+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
-+	if (init_ohci1394_dma_early)
-+		init_ohci1394_dma_on_all_controllers();
-+#endif
-+
- 	finish_e820_parsing();
+-#define __BAD__ FPU_illegal   /* Illegal on an 80486, causes SIGILL */
++#define __BAD__ FPU_illegal	/* Illegal on an 80486, causes SIGILL */
  
-+	early_gart_iommu_check();
-+
- 	e820_register_active_regions(0, 0, -1UL);
- 	/*
- 	 * partially used pages are not usable - thus
- 	 * we are rounding upwards:
- 	 */
- 	end_pfn = e820_end_of_ram();
-+	/* update e820 for memory not covered by WB MTRRs */
-+	mtrr_bp_init();
-+	if (mtrr_trim_uncached_memory(end_pfn)) {
-+		e820_register_active_regions(0, 0, -1UL);
-+		end_pfn = e820_end_of_ram();
-+	}
-+
- 	num_physpages = end_pfn;
+-#ifndef NO_UNDOC_CODE    /* Un-documented FPU op-codes supported by default. */
++#ifndef NO_UNDOC_CODE		/* Un-documented FPU op-codes supported by default. */
  
- 	check_efer();
+ /* WARNING: These codes are not documented by Intel in their 80486 manual
+    and may not work on FPU clones or later Intel FPUs. */
  
--	discover_ebda();
--
- 	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
-+	if (efi_enabled)
-+		efi_init();
+ /* Changes to support the un-doc codes provided by Linus Torvalds. */
  
- 	dmi_scan_machine();
+-#define _d9_d8_ fstp_i    /* unofficial code (19) */
+-#define _dc_d0_ fcom_st   /* unofficial code (14) */
+-#define _dc_d8_ fcompst   /* unofficial code (1c) */
+-#define _dd_c8_ fxch_i    /* unofficial code (0d) */
+-#define _de_d0_ fcompst   /* unofficial code (16) */
+-#define _df_c0_ ffreep    /* unofficial code (07) ffree + pop */
+-#define _df_c8_ fxch_i    /* unofficial code (0f) */
+-#define _df_d0_ fstp_i    /* unofficial code (17) */
+-#define _df_d8_ fstp_i    /* unofficial code (1f) */
++#define _d9_d8_ fstp_i		/* unofficial code (19) */
++#define _dc_d0_ fcom_st		/* unofficial code (14) */
++#define _dc_d8_ fcompst		/* unofficial code (1c) */
++#define _dd_c8_ fxch_i		/* unofficial code (0d) */
++#define _de_d0_ fcompst		/* unofficial code (16) */
++#define _df_c0_ ffreep		/* unofficial code (07) ffree + pop */
++#define _df_c8_ fxch_i		/* unofficial code (0f) */
++#define _df_d0_ fstp_i		/* unofficial code (17) */
++#define _df_d8_ fstp_i		/* unofficial code (1f) */
  
-+	io_delay_init();
-+
- #ifdef CONFIG_SMP
--	/* setup to use the static apicid table during kernel startup */
--	x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
-+	/* setup to use the early static init tables during kernel startup */
-+	x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
-+	x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
-+#ifdef CONFIG_NUMA
-+	x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
-+#endif
- #endif
+ static FUNC const st_instr_table[64] = {
+-  fadd__,   fld_i_,     __BAD__, __BAD__, fadd_i,  ffree_,  faddp_,  _df_c0_,
+-  fmul__,   fxch_i,     __BAD__, __BAD__, fmul_i,  _dd_c8_, fmulp_,  _df_c8_,
+-  fcom_st,  fp_nop,     __BAD__, __BAD__, _dc_d0_, fst_i_,  _de_d0_, _df_d0_,
+-  fcompst,  _d9_d8_,    __BAD__, __BAD__, _dc_d8_, fstp_i,  fcompp,  _df_d8_,
+-  fsub__,   FPU_etc,    __BAD__, finit_,  fsubri,  fucom_,  fsubrp,  fstsw_,
+-  fsubr_,   fconst,     fucompp, __BAD__, fsub_i,  fucomp,  fsubp_,  __BAD__,
+-  fdiv__,   FPU_triga,  __BAD__, __BAD__, fdivri,  __BAD__, fdivrp,  __BAD__,
+-  fdivr_,   FPU_trigb,  __BAD__, __BAD__, fdiv_i,  __BAD__, fdivp_,  __BAD__,
++	fadd__, fld_i_, __BAD__, __BAD__, fadd_i, ffree_, faddp_, _df_c0_,
++	fmul__, fxch_i, __BAD__, __BAD__, fmul_i, _dd_c8_, fmulp_, _df_c8_,
++	fcom_st, fp_nop, __BAD__, __BAD__, _dc_d0_, fst_i_, _de_d0_, _df_d0_,
++	fcompst, _d9_d8_, __BAD__, __BAD__, _dc_d8_, fstp_i, fcompp, _df_d8_,
++	fsub__, FPU_etc, __BAD__, finit_, fsubri, fucom_, fsubrp, fstsw_,
++	fsubr_, fconst, fucompp, __BAD__, fsub_i, fucomp, fsubp_, __BAD__,
++	fdiv__, FPU_triga, __BAD__, __BAD__, fdivri, __BAD__, fdivrp, __BAD__,
++	fdivr_, FPU_trigb, __BAD__, __BAD__, fdiv_i, __BAD__, fdivp_, __BAD__,
+ };
  
- #ifdef CONFIG_ACPI
-@@ -340,48 +375,26 @@ void __init setup_arch(char **cmdline_p)
- #endif
+-#else     /* Support only documented FPU op-codes */
++#else /* Support only documented FPU op-codes */
  
- #ifdef CONFIG_NUMA
--	numa_initmem_init(0, end_pfn); 
-+	numa_initmem_init(0, end_pfn);
- #else
- 	contig_initmem_init(0, end_pfn);
- #endif
+ static FUNC const st_instr_table[64] = {
+-  fadd__,   fld_i_,     __BAD__, __BAD__, fadd_i,  ffree_,  faddp_,  __BAD__,
+-  fmul__,   fxch_i,     __BAD__, __BAD__, fmul_i,  __BAD__, fmulp_,  __BAD__,
+-  fcom_st,  fp_nop,     __BAD__, __BAD__, __BAD__, fst_i_,  __BAD__, __BAD__,
+-  fcompst,  __BAD__,    __BAD__, __BAD__, __BAD__, fstp_i,  fcompp,  __BAD__,
+-  fsub__,   FPU_etc,    __BAD__, finit_,  fsubri,  fucom_,  fsubrp,  fstsw_,
+-  fsubr_,   fconst,     fucompp, __BAD__, fsub_i,  fucomp,  fsubp_,  __BAD__,
+-  fdiv__,   FPU_triga,  __BAD__, __BAD__, fdivri,  __BAD__, fdivrp,  __BAD__,
+-  fdivr_,   FPU_trigb,  __BAD__, __BAD__, fdiv_i,  __BAD__, fdivp_,  __BAD__,
++	fadd__, fld_i_, __BAD__, __BAD__, fadd_i, ffree_, faddp_, __BAD__,
++	fmul__, fxch_i, __BAD__, __BAD__, fmul_i, __BAD__, fmulp_, __BAD__,
++	fcom_st, fp_nop, __BAD__, __BAD__, __BAD__, fst_i_, __BAD__, __BAD__,
++	fcompst, __BAD__, __BAD__, __BAD__, __BAD__, fstp_i, fcompp, __BAD__,
++	fsub__, FPU_etc, __BAD__, finit_, fsubri, fucom_, fsubrp, fstsw_,
++	fsubr_, fconst, fucompp, __BAD__, fsub_i, fucomp, fsubp_, __BAD__,
++	fdiv__, FPU_triga, __BAD__, __BAD__, fdivri, __BAD__, fdivrp, __BAD__,
++	fdivr_, FPU_trigb, __BAD__, __BAD__, fdiv_i, __BAD__, fdivp_, __BAD__,
+ };
  
--	/* Reserve direct mapping */
--	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
--				(table_end - table_start) << PAGE_SHIFT);
--
--	/* reserve kernel */
--	reserve_bootmem_generic(__pa_symbol(&_text),
--				__pa_symbol(&_end) - __pa_symbol(&_text));
-+	early_res_to_bootmem();
+ #endif /* NO_UNDOC_CODE */
  
-+#ifdef CONFIG_ACPI_SLEEP
- 	/*
--	 * reserve physical page 0 - it's a special BIOS page on many boxes,
--	 * enabling clean reboots, SMP operation, laptop functions.
-+	 * Reserve low memory region for sleep support.
- 	 */
--	reserve_bootmem_generic(0, PAGE_SIZE);
 -
--	/* reserve ebda region */
--	if (ebda_addr)
--		reserve_bootmem_generic(ebda_addr, ebda_size);
--#ifdef CONFIG_NUMA
--	/* reserve nodemap region */
--	if (nodemap_addr)
--		reserve_bootmem_generic(nodemap_addr, nodemap_size);
-+       acpi_reserve_bootmem();
- #endif
+-#define _NONE_ 0   /* Take no special action */
+-#define _REG0_ 1   /* Need to check for not empty st(0) */
+-#define _REGI_ 2   /* Need to check for not empty st(0) and st(rm) */
+-#define _REGi_ 0   /* Uses st(rm) */
+-#define _PUSH_ 3   /* Need to check for space to push onto stack */
+-#define _null_ 4   /* Function illegal or not implemented */
+-#define _REGIi 5   /* Uses st(0) and st(rm), result to st(rm) */
+-#define _REGIp 6   /* Uses st(0) and st(rm), result to st(rm) then pop */
+-#define _REGIc 0   /* Compare st(0) and st(rm) */
+-#define _REGIn 0   /* Uses st(0) and st(rm), but handle checks later */
++#define _NONE_ 0		/* Take no special action */
++#define _REG0_ 1		/* Need to check for not empty st(0) */
++#define _REGI_ 2		/* Need to check for not empty st(0) and st(rm) */
++#define _REGi_ 0		/* Uses st(rm) */
++#define _PUSH_ 3		/* Need to check for space to push onto stack */
++#define _null_ 4		/* Function illegal or not implemented */
++#define _REGIi 5		/* Uses st(0) and st(rm), result to st(rm) */
++#define _REGIp 6		/* Uses st(0) and st(rm), result to st(rm) then pop */
++#define _REGIc 0		/* Compare st(0) and st(rm) */
++#define _REGIn 0		/* Uses st(0) and st(rm), but handle checks later */
  
--#ifdef CONFIG_SMP
--	/* Reserve SMP trampoline */
--	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
--#endif
-+	if (efi_enabled)
-+		efi_reserve_bootmem();
+ #ifndef NO_UNDOC_CODE
  
--#ifdef CONFIG_ACPI_SLEEP
-        /*
--        * Reserve low memory region for sleep support.
--        */
--       acpi_reserve_bootmem();
--#endif
--	/*
--	 * Find and reserve possible boot-time SMP configuration:
--	 */
-+	* Find and reserve possible boot-time SMP configuration:
-+	*/
- 	find_smp_config();
- #ifdef CONFIG_BLK_DEV_INITRD
- 	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
-@@ -395,6 +408,8 @@ void __init setup_arch(char **cmdline_p)
- 			initrd_start = ramdisk_image + PAGE_OFFSET;
- 			initrd_end = initrd_start+ramdisk_size;
- 		} else {
-+			/* Assumes everything on node 0 */
-+			free_bootmem(ramdisk_image, ramdisk_size);
- 			printk(KERN_ERR "initrd extends beyond end of memory "
- 			       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- 			       ramdisk_end, end_of_mem);
-@@ -404,17 +419,10 @@ void __init setup_arch(char **cmdline_p)
- #endif
- 	reserve_crashkernel();
- 	paging_init();
-+	map_vsyscall();
+ /* Un-documented FPU op-codes supported by default. (see above) */
  
--#ifdef CONFIG_PCI
- 	early_quirks();
--#endif
+ static u_char const type_table[64] = {
+-  _REGI_, _NONE_, _null_, _null_, _REGIi, _REGi_, _REGIp, _REGi_,
+-  _REGI_, _REGIn, _null_, _null_, _REGIi, _REGI_, _REGIp, _REGI_,
+-  _REGIc, _NONE_, _null_, _null_, _REGIc, _REG0_, _REGIc, _REG0_,
+-  _REGIc, _REG0_, _null_, _null_, _REGIc, _REG0_, _REGIc, _REG0_,
+-  _REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
+-  _REGI_, _NONE_, _REGIc, _null_, _REGIi, _REGIc, _REGIp, _null_,
+-  _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
+-  _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_
++	_REGI_, _NONE_, _null_, _null_, _REGIi, _REGi_, _REGIp, _REGi_,
++	_REGI_, _REGIn, _null_, _null_, _REGIi, _REGI_, _REGIp, _REGI_,
++	_REGIc, _NONE_, _null_, _null_, _REGIc, _REG0_, _REGIc, _REG0_,
++	_REGIc, _REG0_, _null_, _null_, _REGIc, _REG0_, _REGIc, _REG0_,
++	_REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
++	_REGI_, _NONE_, _REGIc, _null_, _REGIi, _REGIc, _REGIp, _null_,
++	_REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
++	_REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_
+ };
  
--	/*
--	 * set this early, so we dont allocate cpu0
--	 * if MADT list doesnt list BSP first
--	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
--	 */
--	cpu_set(0, cpu_present_map);
- #ifdef CONFIG_ACPI
- 	/*
- 	 * Read APIC and some other early information from ACPI tables.
-@@ -430,25 +438,24 @@ void __init setup_arch(char **cmdline_p)
- 	if (smp_found_config)
- 		get_smp_config();
- 	init_apic_mappings();
-+	ioapic_init_mappings();
+-#else     /* Support only documented FPU op-codes */
++#else /* Support only documented FPU op-codes */
  
- 	/*
- 	 * We trust e820 completely. No explicit ROM probing in memory.
-- 	 */
--	e820_reserve_resources(); 
-+	 */
-+	e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
- 	e820_mark_nosave_regions();
+ static u_char const type_table[64] = {
+-  _REGI_, _NONE_, _null_, _null_, _REGIi, _REGi_, _REGIp, _null_,
+-  _REGI_, _REGIn, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
+-  _REGIc, _NONE_, _null_, _null_, _null_, _REG0_, _null_, _null_,
+-  _REGIc, _null_, _null_, _null_, _null_, _REG0_, _REGIc, _null_,
+-  _REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
+-  _REGI_, _NONE_, _REGIc, _null_, _REGIi, _REGIc, _REGIp, _null_,
+-  _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
+-  _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_
++	_REGI_, _NONE_, _null_, _null_, _REGIi, _REGi_, _REGIp, _null_,
++	_REGI_, _REGIn, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
++	_REGIc, _NONE_, _null_, _null_, _null_, _REG0_, _null_, _null_,
++	_REGIc, _null_, _null_, _null_, _null_, _REG0_, _REGIc, _null_,
++	_REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
++	_REGI_, _NONE_, _REGIc, _null_, _REGIi, _REGIc, _REGIp, _null_,
++	_REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
++	_REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_
+ };
  
--	{
--	unsigned i;
- 	/* request I/O space for devices used on all i[345]86 PCs */
- 	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
- 		request_resource(&ioport_resource, &standard_io_resources[i]);
--	}
+ #endif /* NO_UNDOC_CODE */
  
- 	e820_setup_gap();
+-
+ #ifdef RE_ENTRANT_CHECKING
+-u_char emulating=0;
++u_char emulating = 0;
+ #endif /* RE_ENTRANT_CHECKING */
  
- #ifdef CONFIG_VT
- #if defined(CONFIG_VGA_CONSOLE)
--	conswitchp = &vga_con;
-+	if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
-+		conswitchp = &vga_con;
- #elif defined(CONFIG_DUMMY_CONSOLE)
- 	conswitchp = &dummy_con;
- #endif
-@@ -479,9 +486,10 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
+-static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
+-			overrides *override);
++static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip,
++			overrides * override);
  
- 	if (n >= 0x80000005) {
- 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
--		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
--			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
--		c->x86_cache_size=(ecx>>24)+(edx>>24);
-+		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
-+		       "D cache %dK (%d bytes/line)\n",
-+		       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+		c->x86_cache_size = (ecx>>24) + (edx>>24);
- 		/* On K8 L1 TLB is inclusive, so don't count it */
- 		c->x86_tlbsize = 0;
- 	}
-@@ -495,11 +503,8 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
- 		printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
- 		c->x86_cache_size, ecx & 0xFF);
- 	}
+ asmlinkage void math_emulate(long arg)
+ {
+-  u_char  FPU_modrm, byte1;
+-  unsigned short code;
+-  fpu_addr_modes addr_modes;
+-  int unmasked;
+-  FPU_REG loaded_data;
+-  FPU_REG *st0_ptr;
+-  u_char	  loaded_tag, st0_tag;
+-  void __user *data_address;
+-  struct address data_sel_off;
+-  struct address entry_sel_off;
+-  unsigned long code_base = 0;
+-  unsigned long code_limit = 0;  /* Initialized to stop compiler warnings */
+-  struct desc_struct code_descriptor;
++	u_char FPU_modrm, byte1;
++	unsigned short code;
++	fpu_addr_modes addr_modes;
++	int unmasked;
++	FPU_REG loaded_data;
++	FPU_REG *st0_ptr;
++	u_char loaded_tag, st0_tag;
++	void __user *data_address;
++	struct address data_sel_off;
++	struct address entry_sel_off;
++	unsigned long code_base = 0;
++	unsigned long code_limit = 0;	/* Initialized to stop compiler warnings */
++	struct desc_struct code_descriptor;
+ 
+ #ifdef RE_ENTRANT_CHECKING
+-  if ( emulating )
+-    {
+-      printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n");
+-    }
+-  RE_ENTRANT_CHECK_ON;
++	if (emulating) {
++		printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n");
++	}
++	RE_ENTRANT_CHECK_ON;
+ #endif /* RE_ENTRANT_CHECKING */
+ 
+-  if (!used_math())
+-    {
+-      finit();
+-      set_used_math();
+-    }
 -
--	if (n >= 0x80000007)
--		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
- 	if (n >= 0x80000008) {
--		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
-+		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
- 		c->x86_virt_bits = (eax >> 8) & 0xff;
- 		c->x86_phys_bits = eax & 0xff;
+-  SETUP_DATA_AREA(arg);
+-
+-  FPU_ORIG_EIP = FPU_EIP;
+-
+-  if ( (FPU_EFLAGS & 0x00020000) != 0 )
+-    {
+-      /* Virtual 8086 mode */
+-      addr_modes.default_mode = VM86;
+-      FPU_EIP += code_base = FPU_CS << 4;
+-      code_limit = code_base + 0xffff;  /* Assumes code_base <= 0xffff0000 */
+-    }
+-  else if ( FPU_CS == __USER_CS && FPU_DS == __USER_DS )
+-    {
+-      addr_modes.default_mode = 0;
+-    }
+-  else if ( FPU_CS == __KERNEL_CS )
+-    {
+-      printk("math_emulate: %04x:%08lx\n",FPU_CS,FPU_EIP);
+-      panic("Math emulation needed in kernel");
+-    }
+-  else
+-    {
+-
+-      if ( (FPU_CS & 4) != 4 )   /* Must be in the LDT */
+-	{
+-	  /* Can only handle segmented addressing via the LDT
+-	     for now, and it must be 16 bit */
+-	  printk("FPU emulator: Unsupported addressing mode\n");
+-	  math_abort(FPU_info, SIGILL);
++	if (!used_math()) {
++		finit();
++		set_used_math();
  	}
-@@ -508,14 +513,15 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
- #ifdef CONFIG_NUMA
- static int nearby_node(int apicid)
- {
--	int i;
-+	int i, node;
+ 
+-      code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+-      if ( SEG_D_SIZE(code_descriptor) )
+-	{
+-	  /* The above test may be wrong, the book is not clear */
+-	  /* Segmented 32 bit protected mode */
+-	  addr_modes.default_mode = SEG32;
++	SETUP_DATA_AREA(arg);
 +
- 	for (i = apicid - 1; i >= 0; i--) {
--		int node = apicid_to_node[i];
-+		node = apicid_to_node[i];
- 		if (node != NUMA_NO_NODE && node_online(node))
- 			return node;
- 	}
- 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
--		int node = apicid_to_node[i];
-+		node = apicid_to_node[i];
- 		if (node != NUMA_NO_NODE && node_online(node))
- 			return node;
- 	}
-@@ -527,7 +533,7 @@ static int nearby_node(int apicid)
-  * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
-  * Assumes number of cores is a power of two.
-  */
--static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
-+static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
- {
- #ifdef CONFIG_SMP
- 	unsigned bits;
-@@ -536,7 +542,54 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
- 	int node = 0;
- 	unsigned apicid = hard_smp_processor_id();
- #endif
--	unsigned ecx = cpuid_ecx(0x80000008);
-+	bits = c->x86_coreid_bits;
++	FPU_ORIG_EIP = FPU_EIP;
 +
-+	/* Low order bits define the core id (index of core in socket) */
-+	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
-+	/* Convert the APIC ID into the socket ID */
-+	c->phys_proc_id = phys_pkg_id(bits);
++	if ((FPU_EFLAGS & 0x00020000) != 0) {
++		/* Virtual 8086 mode */
++		addr_modes.default_mode = VM86;
++		FPU_EIP += code_base = FPU_CS << 4;
++		code_limit = code_base + 0xffff;	/* Assumes code_base <= 0xffff0000 */
++	} else if (FPU_CS == __USER_CS && FPU_DS == __USER_DS) {
++		addr_modes.default_mode = 0;
++	} else if (FPU_CS == __KERNEL_CS) {
++		printk("math_emulate: %04x:%08lx\n", FPU_CS, FPU_EIP);
++		panic("Math emulation needed in kernel");
++	} else {
 +
-+#ifdef CONFIG_NUMA
-+	node = c->phys_proc_id;
-+	if (apicid_to_node[apicid] != NUMA_NO_NODE)
-+		node = apicid_to_node[apicid];
-+	if (!node_online(node)) {
-+		/* Two possibilities here:
-+		   - The CPU is missing memory and no node was created.
-+		   In that case try picking one from a nearby CPU
-+		   - The APIC IDs differ from the HyperTransport node IDs
-+		   which the K8 northbridge parsing fills in.
-+		   Assume they are all increased by a constant offset,
-+		   but in the same order as the HT nodeids.
-+		   If that doesn't result in a usable node fall back to the
-+		   path for the previous case.  */
++		if ((FPU_CS & 4) != 4) {	/* Must be in the LDT */
++			/* Can only handle segmented addressing via the LDT
++			   for now, and it must be 16 bit */
++			printk("FPU emulator: Unsupported addressing mode\n");
++			math_abort(FPU_info, SIGILL);
++		}
 +
-+		int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
++		code_descriptor = LDT_DESCRIPTOR(FPU_CS);
++		if (SEG_D_SIZE(code_descriptor)) {
++			/* The above test may be wrong, the book is not clear */
++			/* Segmented 32 bit protected mode */
++			addr_modes.default_mode = SEG32;
++		} else {
++			/* 16 bit protected mode */
++			addr_modes.default_mode = PM16;
++		}
++		FPU_EIP += code_base = SEG_BASE_ADDR(code_descriptor);
++		code_limit = code_base
++		    + (SEG_LIMIT(code_descriptor) +
++		       1) * SEG_GRANULARITY(code_descriptor)
++		    - 1;
++		if (code_limit < code_base)
++			code_limit = 0xffffffff;
+ 	}
+-      else
+-	{
+-	  /* 16 bit protected mode */
+-	  addr_modes.default_mode = PM16;
 +
-+		if (ht_nodeid >= 0 &&
-+		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
-+			node = apicid_to_node[ht_nodeid];
-+		/* Pick a nearby node */
-+		if (!node_online(node))
-+			node = nearby_node(apicid);
-+	}
-+	numa_set_node(cpu, node);
++	FPU_lookahead = !(FPU_EFLAGS & X86_EFLAGS_TF);
 +
-+	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
-+#endif
-+#endif
-+}
++	if (!valid_prefix(&byte1, (u_char __user **) & FPU_EIP,
++			  &addr_modes.override)) {
++		RE_ENTRANT_CHECK_OFF;
++		printk
++		    ("FPU emulator: Unknown prefix byte 0x%02x, probably due to\n"
++		     "FPU emulator: self-modifying code! (emulation impossible)\n",
++		     byte1);
++		RE_ENTRANT_CHECK_ON;
++		EXCEPTION(EX_INTERNAL | 0x126);
++		math_abort(FPU_info, SIGILL);
+ 	}
+-      FPU_EIP += code_base = SEG_BASE_ADDR(code_descriptor);
+-      code_limit = code_base
+-	+ (SEG_LIMIT(code_descriptor)+1) * SEG_GRANULARITY(code_descriptor)
+-	  - 1;
+-      if ( code_limit < code_base ) code_limit = 0xffffffff;
+-    }
+-
+-  FPU_lookahead = 1;
+-  if (current->ptrace & PT_PTRACED)
+-    FPU_lookahead = 0;
+-
+-  if ( !valid_prefix(&byte1, (u_char __user **)&FPU_EIP,
+-		     &addr_modes.override) )
+-    {
+-      RE_ENTRANT_CHECK_OFF;
+-      printk("FPU emulator: Unknown prefix byte 0x%02x, probably due to\n"
+-	     "FPU emulator: self-modifying code! (emulation impossible)\n",
+-	     byte1);
+-      RE_ENTRANT_CHECK_ON;
+-      EXCEPTION(EX_INTERNAL|0x126);
+-      math_abort(FPU_info,SIGILL);
+-    }
+-
+-do_another_FPU_instruction:
+-
+-  no_ip_update = 0;
+-
+-  FPU_EIP++;  /* We have fetched the prefix and first code bytes. */
+-
+-  if ( addr_modes.default_mode )
+-    {
+-      /* This checks for the minimum instruction bytes.
+-	 We also need to check any extra (address mode) code access. */
+-      if ( FPU_EIP > code_limit )
+-	math_abort(FPU_info,SIGSEGV);
+-    }
+-
+-  if ( (byte1 & 0xf8) != 0xd8 )
+-    {
+-      if ( byte1 == FWAIT_OPCODE )
+-	{
+-	  if (partial_status & SW_Summary)
+-	    goto do_the_FPU_interrupt;
+-	  else
+-	    goto FPU_fwait_done;
 +
-+static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_SMP
-+	unsigned bits, ecx;
++      do_another_FPU_instruction:
 +
-+	/* Multi core CPU? */
-+	if (c->extended_cpuid_level < 0x80000008)
-+		return;
++	no_ip_update = 0;
 +
-+	ecx = cpuid_ecx(0x80000008);
- 
- 	c->x86_max_cores = (ecx & 0xff) + 1;
- 
-@@ -549,37 +602,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
- 			bits++;
++	FPU_EIP++;		/* We have fetched the prefix and first code bytes. */
++
++	if (addr_modes.default_mode) {
++		/* This checks for the minimum instruction bytes.
++		   We also need to check any extra (address mode) code access. */
++		if (FPU_EIP > code_limit)
++			math_abort(FPU_info, SIGSEGV);
  	}
- 
--	/* Low order bits define the core id (index of core in socket) */
--	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
--	/* Convert the APIC ID into the socket ID */
--	c->phys_proc_id = phys_pkg_id(bits);
--
--#ifdef CONFIG_NUMA
--  	node = c->phys_proc_id;
-- 	if (apicid_to_node[apicid] != NUMA_NO_NODE)
-- 		node = apicid_to_node[apicid];
-- 	if (!node_online(node)) {
-- 		/* Two possibilities here:
-- 		   - The CPU is missing memory and no node was created.
-- 		   In that case try picking one from a nearby CPU
-- 		   - The APIC IDs differ from the HyperTransport node IDs
-- 		   which the K8 northbridge parsing fills in.
-- 		   Assume they are all increased by a constant offset,
-- 		   but in the same order as the HT nodeids.
-- 		   If that doesn't result in a usable node fall back to the
-- 		   path for the previous case.  */
--		int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
-- 		if (ht_nodeid >= 0 &&
-- 		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
-- 			node = apicid_to_node[ht_nodeid];
-- 		/* Pick a nearby node */
-- 		if (!node_online(node))
-- 			node = nearby_node(apicid);
-- 	}
--	numa_set_node(cpu, node);
-+	c->x86_coreid_bits = bits;
- 
--	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
--#endif
- #endif
- }
- 
-@@ -595,8 +619,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
- /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
- static __cpuinit int amd_apic_timer_broken(void)
- {
--	u32 lo, hi;
--	u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
-+	u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
 +
- 	switch (eax & CPUID_XFAM) {
- 	case CPUID_XFAM_K8:
- 		if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
-@@ -614,6 +638,15 @@ static __cpuinit int amd_apic_timer_broken(void)
- 	return 0;
- }
++	if ((byte1 & 0xf8) != 0xd8) {
++		if (byte1 == FWAIT_OPCODE) {
++			if (partial_status & SW_Summary)
++				goto do_the_FPU_interrupt;
++			else
++				goto FPU_fwait_done;
++		}
+ #ifdef PARANOID
+-      EXCEPTION(EX_INTERNAL|0x128);
+-      math_abort(FPU_info,SIGILL);
++		EXCEPTION(EX_INTERNAL | 0x128);
++		math_abort(FPU_info, SIGILL);
+ #endif /* PARANOID */
+-    }
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_code_access_ok(1);
+-  FPU_get_user(FPU_modrm, (u_char __user *) FPU_EIP);
+-  RE_ENTRANT_CHECK_ON;
+-  FPU_EIP++;
+-
+-  if (partial_status & SW_Summary)
+-    {
+-      /* Ignore the error for now if the current instruction is a no-wait
+-	 control instruction */
+-      /* The 80486 manual contradicts itself on this topic,
+-	 but a real 80486 uses the following instructions:
+-	 fninit, fnstenv, fnsave, fnstsw, fnstenv, fnclex.
+-       */
+-      code = (FPU_modrm << 8) | byte1;
+-      if ( ! ( (((code & 0xf803) == 0xe003) ||    /* fnclex, fninit, fnstsw */
+-		(((code & 0x3003) == 0x3001) &&   /* fnsave, fnstcw, fnstenv,
+-						     fnstsw */
+-		 ((code & 0xc000) != 0xc000))) ) )
+-	{
+-	  /*
+-	   *  We need to simulate the action of the kernel to FPU
+-	   *  interrupts here.
+-	   */
+-	do_the_FPU_interrupt:
+-
+-	  FPU_EIP = FPU_ORIG_EIP;	/* Point to current FPU instruction. */
+-
+-	  RE_ENTRANT_CHECK_OFF;
+-	  current->thread.trap_no = 16;
+-	  current->thread.error_code = 0;
+-	  send_sig(SIGFPE, current, 1);
+-	  return;
+-	}
+-    }
+-
+-  entry_sel_off.offset = FPU_ORIG_EIP;
+-  entry_sel_off.selector = FPU_CS;
+-  entry_sel_off.opcode = (byte1 << 8) | FPU_modrm;
+-
+-  FPU_rm = FPU_modrm & 7;
+-
+-  if ( FPU_modrm < 0300 )
+-    {
+-      /* All of these instructions use the mod/rm byte to get a data address */
+-
+-      if ( (addr_modes.default_mode & SIXTEEN)
+-	  ^ (addr_modes.override.address_size == ADDR_SIZE_PREFIX) )
+-	data_address = FPU_get_address_16(FPU_modrm, &FPU_EIP, &data_sel_off,
+-					  addr_modes);
+-      else
+-	data_address = FPU_get_address(FPU_modrm, &FPU_EIP, &data_sel_off,
+-				       addr_modes);
+-
+-      if ( addr_modes.default_mode )
+-	{
+-	  if ( FPU_EIP-1 > code_limit )
+-	    math_abort(FPU_info,SIGSEGV);
+ 	}
  
-+static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
-+{
-+	early_init_amd_mc(c);
+-      if ( !(byte1 & 1) )
+-	{
+-	  unsigned short status1 = partial_status;
+-
+-	  st0_ptr = &st(0);
+-	  st0_tag = FPU_gettag0();
+-
+-	  /* Stack underflow has priority */
+-	  if ( NOT_EMPTY_ST0 )
+-	    {
+-	      if ( addr_modes.default_mode & PROTECTED )
+-		{
+-		  /* This table works for 16 and 32 bit protected mode */
+-		  if ( access_limit < data_sizes_16[(byte1 >> 1) & 3] )
+-		    math_abort(FPU_info,SIGSEGV);
++	RE_ENTRANT_CHECK_OFF;
++	FPU_code_access_ok(1);
++	FPU_get_user(FPU_modrm, (u_char __user *) FPU_EIP);
++	RE_ENTRANT_CHECK_ON;
++	FPU_EIP++;
 +
-+ 	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
-+	if (c->x86_power & (1<<8))
-+		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-+}
++	if (partial_status & SW_Summary) {
++		/* Ignore the error for now if the current instruction is a no-wait
++		   control instruction */
++		/* The 80486 manual contradicts itself on this topic,
++		   but a real 80486 uses the following instructions:
++		   fninit, fnstenv, fnsave, fnstsw, fnstenv, fnclex.
++		 */
++		code = (FPU_modrm << 8) | byte1;
++		if (!((((code & 0xf803) == 0xe003) ||	/* fnclex, fninit, fnstsw */
++		       (((code & 0x3003) == 0x3001) &&	/* fnsave, fnstcw, fnstenv,
++							   fnstsw */
++			((code & 0xc000) != 0xc000))))) {
++			/*
++			 *  We need to simulate the action of the kernel to FPU
++			 *  interrupts here.
++			 */
++		      do_the_FPU_interrupt:
 +
- static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- {
- 	unsigned level;
-@@ -624,7 +657,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- 	/*
- 	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
- 	 * bit 6 of msr C001_0015
-- 	 *
-+	 *
- 	 * Errata 63 for SH-B3 steppings
- 	 * Errata 122 for all steppings (F+ have it disabled by default)
- 	 */
-@@ -637,35 +670,32 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- 
- 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
- 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
--	clear_bit(0*32+31, &c->x86_capability);
--	
-+	clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
++			FPU_EIP = FPU_ORIG_EIP;	/* Point to current FPU instruction. */
 +
- 	/* On C+ stepping K8 rep microcode works well for copy/memset */
- 	level = cpuid_eax(1);
--	if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
--		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
-+	if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
-+			     level >= 0x0f58))
-+		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
- 	if (c->x86 == 0x10 || c->x86 == 0x11)
--		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
-+		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
- 
- 	/* Enable workaround for FXSAVE leak */
- 	if (c->x86 >= 6)
--		set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
-+		set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
- 
- 	level = get_model_name(c);
- 	if (!level) {
--		switch (c->x86) { 
-+		switch (c->x86) {
- 		case 15:
- 			/* Should distinguish Models here, but this is only
- 			   a fallback anyways. */
- 			strcpy(c->x86_model_id, "Hammer");
--			break; 
--		} 
--	} 
-+			break;
-+		}
++			RE_ENTRANT_CHECK_OFF;
++			current->thread.trap_no = 16;
++			current->thread.error_code = 0;
++			send_sig(SIGFPE, current, 1);
++			return;
+ 		}
 +	}
- 	display_cacheinfo(c);
  
--	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
--	if (c->x86_power & (1<<8))
--		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
--
- 	/* Multi core CPU? */
- 	if (c->extended_cpuid_level >= 0x80000008)
- 		amd_detect_cmp(c);
-@@ -677,41 +707,38 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- 		num_cache_leaves = 3;
+-	      unmasked = 0;  /* Do this here to stop compiler warnings. */
+-	      switch ( (byte1 >> 1) & 3 )
+-		{
+-		case 0:
+-		  unmasked = FPU_load_single((float __user *)data_address,
+-					     &loaded_data);
+-		  loaded_tag = unmasked & 0xff;
+-		  unmasked &= ~0xff;
+-		  break;
+-		case 1:
+-		  loaded_tag = FPU_load_int32((long __user *)data_address, &loaded_data);
+-		  break;
+-		case 2:
+-		  unmasked = FPU_load_double((double __user *)data_address,
+-					     &loaded_data);
+-		  loaded_tag = unmasked & 0xff;
+-		  unmasked &= ~0xff;
+-		  break;
+-		case 3:
+-		default:  /* Used here to suppress gcc warnings. */
+-		  loaded_tag = FPU_load_int16((short __user *)data_address, &loaded_data);
+-		  break;
+-		}
++	entry_sel_off.offset = FPU_ORIG_EIP;
++	entry_sel_off.selector = FPU_CS;
++	entry_sel_off.opcode = (byte1 << 8) | FPU_modrm;
  
- 	if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
--		set_bit(X86_FEATURE_K8, &c->x86_capability);
+-	      /* No more access to user memory, it is safe
+-		 to use static data now */
 -
--	/* RDTSC can be speculated around */
--	clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-+		set_cpu_cap(c, X86_FEATURE_K8);
+-	      /* NaN operands have the next priority. */
+-	      /* We have to delay looking at st(0) until after
+-		 loading the data, because that data might contain an SNaN */
+-	      if ( ((st0_tag == TAG_Special) && isNaN(st0_ptr)) ||
+-		  ((loaded_tag == TAG_Special) && isNaN(&loaded_data)) )
+-		{
+-		  /* Restore the status word; we might have loaded a
+-		     denormal. */
+-		  partial_status = status1;
+-		  if ( (FPU_modrm & 0x30) == 0x10 )
+-		    {
+-		      /* fcom or fcomp */
+-		      EXCEPTION(EX_Invalid);
+-		      setcc(SW_C3 | SW_C2 | SW_C0);
+-		      if ( (FPU_modrm & 0x08) && (control_word & CW_Invalid) )
+-			FPU_pop();             /* fcomp, masked, so we pop. */
+-		    }
+-		  else
+-		    {
+-		      if ( loaded_tag == TAG_Special )
+-			loaded_tag = FPU_Special(&loaded_data);
+-#ifdef PECULIAR_486
+-		      /* This is not really needed, but gives behaviour
+-			 identical to an 80486 */
+-		      if ( (FPU_modrm & 0x28) == 0x20 )
+-			/* fdiv or fsub */
+-			real_2op_NaN(&loaded_data, loaded_tag, 0, &loaded_data);
+-		      else
+-#endif /* PECULIAR_486 */ 
+-			/* fadd, fdivr, fmul, or fsubr */
+-			real_2op_NaN(&loaded_data, loaded_tag, 0, st0_ptr);
+-		    }
+-		  goto reg_mem_instr_done;
+-		}
++	FPU_rm = FPU_modrm & 7;
  
--	/* Family 10 doesn't support C states in MWAIT so don't use it */
--	if (c->x86 == 0x10 && !force_mwait)
--		clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
-+	/* MFENCE stops RDTSC speculation */
-+	set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+-	      if ( unmasked && !((FPU_modrm & 0x30) == 0x10) )
+-		{
+-		  /* Is not a comparison instruction. */
+-		  if ( (FPU_modrm & 0x38) == 0x38 )
+-		    {
+-		      /* fdivr */
+-		      if ( (st0_tag == TAG_Zero) &&
+-			   ((loaded_tag == TAG_Valid)
+-			    || (loaded_tag == TAG_Special
+-				&& isdenormal(&loaded_data))) )
+-			{
+-			  if ( FPU_divide_by_zero(0, getsign(&loaded_data))
+-			       < 0 )
+-			    {
+-			      /* We use the fact here that the unmasked
+-				 exception in the loaded data was for a
+-				 denormal operand */
+-			      /* Restore the state of the denormal op bit */
+-			      partial_status &= ~SW_Denorm_Op;
+-			      partial_status |= status1 & SW_Denorm_Op;
+-			    }
+-			  else
+-			    setsign(st0_ptr, getsign(&loaded_data));
+-			}
+-		    }
+-		  goto reg_mem_instr_done;
+-		}
++	if (FPU_modrm < 0300) {
++		/* All of these instructions use the mod/rm byte to get a data address */
  
- 	if (amd_apic_timer_broken())
- 		disable_apic_timer = 1;
- }
+-	      switch ( (FPU_modrm >> 3) & 7 )
+-		{
+-		case 0:         /* fadd */
+-		  clear_C1();
+-		  FPU_add(&loaded_data, loaded_tag, 0, control_word);
+-		  break;
+-		case 1:         /* fmul */
+-		  clear_C1();
+-		  FPU_mul(&loaded_data, loaded_tag, 0, control_word);
+-		  break;
+-		case 2:         /* fcom */
+-		  FPU_compare_st_data(&loaded_data, loaded_tag);
+-		  break;
+-		case 3:         /* fcomp */
+-		  if ( !FPU_compare_st_data(&loaded_data, loaded_tag)
+-		       && !unmasked )
+-		    FPU_pop();
+-		  break;
+-		case 4:         /* fsub */
+-		  clear_C1();
+-		  FPU_sub(LOADED|loaded_tag, (int)&loaded_data, control_word);
+-		  break;
+-		case 5:         /* fsubr */
+-		  clear_C1();
+-		  FPU_sub(REV|LOADED|loaded_tag, (int)&loaded_data, control_word);
+-		  break;
+-		case 6:         /* fdiv */
+-		  clear_C1();
+-		  FPU_div(LOADED|loaded_tag, (int)&loaded_data, control_word);
+-		  break;
+-		case 7:         /* fdivr */
+-		  clear_C1();
+-		  if ( st0_tag == TAG_Zero )
+-		    partial_status = status1;  /* Undo any denorm tag,
+-						  zero-divide has priority. */
+-		  FPU_div(REV|LOADED|loaded_tag, (int)&loaded_data, control_word);
+-		  break;
++		if ((addr_modes.default_mode & SIXTEEN)
++		    ^ (addr_modes.override.address_size == ADDR_SIZE_PREFIX))
++			data_address =
++			    FPU_get_address_16(FPU_modrm, &FPU_EIP,
++					       &data_sel_off, addr_modes);
++		else
++			data_address =
++			    FPU_get_address(FPU_modrm, &FPU_EIP, &data_sel_off,
++					    addr_modes);
++
++		if (addr_modes.default_mode) {
++			if (FPU_EIP - 1 > code_limit)
++				math_abort(FPU_info, SIGSEGV);
+ 		}
+-	    }
+-	  else
+-	    {
+-	      if ( (FPU_modrm & 0x30) == 0x10 )
+-		{
+-		  /* The instruction is fcom or fcomp */
+-		  EXCEPTION(EX_StackUnder);
+-		  setcc(SW_C3 | SW_C2 | SW_C0);
+-		  if ( (FPU_modrm & 0x08) && (control_word & CW_Invalid) )
+-		    FPU_pop();             /* fcomp */
++
++		if (!(byte1 & 1)) {
++			unsigned short status1 = partial_status;
++
++			st0_ptr = &st(0);
++			st0_tag = FPU_gettag0();
++
++			/* Stack underflow has priority */
++			if (NOT_EMPTY_ST0) {
++				if (addr_modes.default_mode & PROTECTED) {
++					/* This table works for 16 and 32 bit protected mode */
++					if (access_limit <
++					    data_sizes_16[(byte1 >> 1) & 3])
++						math_abort(FPU_info, SIGSEGV);
++				}
++
++				unmasked = 0;	/* Do this here to stop compiler warnings. */
++				switch ((byte1 >> 1) & 3) {
++				case 0:
++					unmasked =
++					    FPU_load_single((float __user *)
++							    data_address,
++							    &loaded_data);
++					loaded_tag = unmasked & 0xff;
++					unmasked &= ~0xff;
++					break;
++				case 1:
++					loaded_tag =
++					    FPU_load_int32((long __user *)
++							   data_address,
++							   &loaded_data);
++					break;
++				case 2:
++					unmasked =
++					    FPU_load_double((double __user *)
++							    data_address,
++							    &loaded_data);
++					loaded_tag = unmasked & 0xff;
++					unmasked &= ~0xff;
++					break;
++				case 3:
++				default:	/* Used here to suppress gcc warnings. */
++					loaded_tag =
++					    FPU_load_int16((short __user *)
++							   data_address,
++							   &loaded_data);
++					break;
++				}
++
++				/* No more access to user memory, it is safe
++				   to use static data now */
++
++				/* NaN operands have the next priority. */
++				/* We have to delay looking at st(0) until after
++				   loading the data, because that data might contain an SNaN */
++				if (((st0_tag == TAG_Special) && isNaN(st0_ptr))
++				    || ((loaded_tag == TAG_Special)
++					&& isNaN(&loaded_data))) {
++					/* Restore the status word; we might have loaded a
++					   denormal. */
++					partial_status = status1;
++					if ((FPU_modrm & 0x30) == 0x10) {
++						/* fcom or fcomp */
++						EXCEPTION(EX_Invalid);
++						setcc(SW_C3 | SW_C2 | SW_C0);
++						if ((FPU_modrm & 0x08)
++						    && (control_word &
++							CW_Invalid))
++							FPU_pop();	/* fcomp, masked, so we pop. */
++					} else {
++						if (loaded_tag == TAG_Special)
++							loaded_tag =
++							    FPU_Special
++							    (&loaded_data);
++#ifdef PECULIAR_486
++						/* This is not really needed, but gives behaviour
++						   identical to an 80486 */
++						if ((FPU_modrm & 0x28) == 0x20)
++							/* fdiv or fsub */
++							real_2op_NaN
++							    (&loaded_data,
++							     loaded_tag, 0,
++							     &loaded_data);
++						else
++#endif /* PECULIAR_486 */
++							/* fadd, fdivr, fmul, or fsubr */
++							real_2op_NaN
++							    (&loaded_data,
++							     loaded_tag, 0,
++							     st0_ptr);
++					}
++					goto reg_mem_instr_done;
++				}
++
++				if (unmasked && !((FPU_modrm & 0x30) == 0x10)) {
++					/* Is not a comparison instruction. */
++					if ((FPU_modrm & 0x38) == 0x38) {
++						/* fdivr */
++						if ((st0_tag == TAG_Zero) &&
++						    ((loaded_tag == TAG_Valid)
++						     || (loaded_tag ==
++							 TAG_Special
++							 &&
++							 isdenormal
++							 (&loaded_data)))) {
++							if (FPU_divide_by_zero
++							    (0,
++							     getsign
++							     (&loaded_data))
++							    < 0) {
++								/* We use the fact here that the unmasked
++								   exception in the loaded data was for a
++								   denormal operand */
++								/* Restore the state of the denormal op bit */
++								partial_status
++								    &=
++								    ~SW_Denorm_Op;
++								partial_status
++								    |=
++								    status1 &
++								    SW_Denorm_Op;
++							} else
++								setsign(st0_ptr,
++									getsign
++									(&loaded_data));
++						}
++					}
++					goto reg_mem_instr_done;
++				}
++
++				switch ((FPU_modrm >> 3) & 7) {
++				case 0:	/* fadd */
++					clear_C1();
++					FPU_add(&loaded_data, loaded_tag, 0,
++						control_word);
++					break;
++				case 1:	/* fmul */
++					clear_C1();
++					FPU_mul(&loaded_data, loaded_tag, 0,
++						control_word);
++					break;
++				case 2:	/* fcom */
++					FPU_compare_st_data(&loaded_data,
++							    loaded_tag);
++					break;
++				case 3:	/* fcomp */
++					if (!FPU_compare_st_data
++					    (&loaded_data, loaded_tag)
++					    && !unmasked)
++						FPU_pop();
++					break;
++				case 4:	/* fsub */
++					clear_C1();
++					FPU_sub(LOADED | loaded_tag,
++						(int)&loaded_data,
++						control_word);
++					break;
++				case 5:	/* fsubr */
++					clear_C1();
++					FPU_sub(REV | LOADED | loaded_tag,
++						(int)&loaded_data,
++						control_word);
++					break;
++				case 6:	/* fdiv */
++					clear_C1();
++					FPU_div(LOADED | loaded_tag,
++						(int)&loaded_data,
++						control_word);
++					break;
++				case 7:	/* fdivr */
++					clear_C1();
++					if (st0_tag == TAG_Zero)
++						partial_status = status1;	/* Undo any denorm tag,
++										   zero-divide has priority. */
++					FPU_div(REV | LOADED | loaded_tag,
++						(int)&loaded_data,
++						control_word);
++					break;
++				}
++			} else {
++				if ((FPU_modrm & 0x30) == 0x10) {
++					/* The instruction is fcom or fcomp */
++					EXCEPTION(EX_StackUnder);
++					setcc(SW_C3 | SW_C2 | SW_C0);
++					if ((FPU_modrm & 0x08)
++					    && (control_word & CW_Invalid))
++						FPU_pop();	/* fcomp */
++				} else
++					FPU_stack_underflow();
++			}
++		      reg_mem_instr_done:
++			operand_address = data_sel_off;
++		} else {
++			if (!(no_ip_update =
++			      FPU_load_store(((FPU_modrm & 0x38) | (byte1 & 6))
++					     >> 1, addr_modes, data_address))) {
++				operand_address = data_sel_off;
++			}
+ 		}
+-	      else
+-		FPU_stack_underflow();
+-	    }
+-	reg_mem_instr_done:
+-	  operand_address = data_sel_off;
+-	}
+-      else
+-	{
+-	  if ( !(no_ip_update =
+-		 FPU_load_store(((FPU_modrm & 0x38) | (byte1 & 6)) >> 1,
+-				addr_modes, data_address)) )
+-	    {
+-	      operand_address = data_sel_off;
+-	    }
+-	}
  
--static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
-+void __cpuinit detect_ht(struct cpuinfo_x86 *c)
- {
- #ifdef CONFIG_SMP
--	u32 	eax, ebx, ecx, edx;
--	int 	index_msb, core_bits;
-+	u32 eax, ebx, ecx, edx;
-+	int index_msb, core_bits;
+-    }
+-  else
+-    {
+-      /* None of these instructions access user memory */
+-      u_char instr_index = (FPU_modrm & 0x38) | (byte1 & 7);
++	} else {
++		/* None of these instructions access user memory */
++		u_char instr_index = (FPU_modrm & 0x38) | (byte1 & 7);
  
- 	cpuid(1, &eax, &ebx, &ecx, &edx);
+ #ifdef PECULIAR_486
+-      /* This is supposed to be undefined, but a real 80486 seems
+-	 to do this: */
+-      operand_address.offset = 0;
+-      operand_address.selector = FPU_DS;
++		/* This is supposed to be undefined, but a real 80486 seems
++		   to do this: */
++		operand_address.offset = 0;
++		operand_address.selector = FPU_DS;
+ #endif /* PECULIAR_486 */
  
+-      st0_ptr = &st(0);
+-      st0_tag = FPU_gettag0();
+-      switch ( type_table[(int) instr_index] )
+-	{
+-	case _NONE_:   /* also _REGIc: _REGIn */
+-	  break;
+-	case _REG0_:
+-	  if ( !NOT_EMPTY_ST0 )
+-	    {
+-	      FPU_stack_underflow();
+-	      goto FPU_instruction_done;
+-	    }
+-	  break;
+-	case _REGIi:
+-	  if ( !NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm) )
+-	    {
+-	      FPU_stack_underflow_i(FPU_rm);
+-	      goto FPU_instruction_done;
+-	    }
+-	  break;
+-	case _REGIp:
+-	  if ( !NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm) )
+-	    {
+-	      FPU_stack_underflow_pop(FPU_rm);
+-	      goto FPU_instruction_done;
+-	    }
+-	  break;
+-	case _REGI_:
+-	  if ( !NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm) )
+-	    {
+-	      FPU_stack_underflow();
+-	      goto FPU_instruction_done;
+-	    }
+-	  break;
+-	case _PUSH_:     /* Only used by the fld st(i) instruction */
+-	  break;
+-	case _null_:
+-	  FPU_illegal();
+-	  goto FPU_instruction_done;
+-	default:
+-	  EXCEPTION(EX_INTERNAL|0x111);
+-	  goto FPU_instruction_done;
+-	}
+-      (*st_instr_table[(int) instr_index])();
++		st0_ptr = &st(0);
++		st0_tag = FPU_gettag0();
++		switch (type_table[(int)instr_index]) {
++		case _NONE_:	/* also _REGIc: _REGIn */
++			break;
++		case _REG0_:
++			if (!NOT_EMPTY_ST0) {
++				FPU_stack_underflow();
++				goto FPU_instruction_done;
++			}
++			break;
++		case _REGIi:
++			if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
++				FPU_stack_underflow_i(FPU_rm);
++				goto FPU_instruction_done;
++			}
++			break;
++		case _REGIp:
++			if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
++				FPU_stack_underflow_pop(FPU_rm);
++				goto FPU_instruction_done;
++			}
++			break;
++		case _REGI_:
++			if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
++				FPU_stack_underflow();
++				goto FPU_instruction_done;
++			}
++			break;
++		case _PUSH_:	/* Only used by the fld st(i) instruction */
++			break;
++		case _null_:
++			FPU_illegal();
++			goto FPU_instruction_done;
++		default:
++			EXCEPTION(EX_INTERNAL | 0x111);
++			goto FPU_instruction_done;
++		}
++		(*st_instr_table[(int)instr_index]) ();
  
- 	if (!cpu_has(c, X86_FEATURE_HT))
- 		return;
-- 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
- 		goto out;
+-FPU_instruction_done:
+-      ;
+-    }
++	      FPU_instruction_done:
++		;
++	}
  
- 	smp_num_siblings = (ebx & 0xff0000) >> 16;
+-  if ( ! no_ip_update )
+-    instruction_address = entry_sel_off;
++	if (!no_ip_update)
++		instruction_address = entry_sel_off;
  
- 	if (smp_num_siblings == 1) {
- 		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
--	} else if (smp_num_siblings > 1 ) {
-+	} else if (smp_num_siblings > 1) {
+-FPU_fwait_done:
++      FPU_fwait_done:
  
- 		if (smp_num_siblings > NR_CPUS) {
--			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
-+			printk(KERN_WARNING "CPU: Unsupported number of "
-+			       "siblings %d", smp_num_siblings);
- 			smp_num_siblings = 1;
- 			return;
- 		}
-@@ -721,7 +748,7 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
+ #ifdef DEBUG
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_printall();
+-  RE_ENTRANT_CHECK_ON;
++	RE_ENTRANT_CHECK_OFF;
++	FPU_printall();
++	RE_ENTRANT_CHECK_ON;
+ #endif /* DEBUG */
  
- 		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
+-  if (FPU_lookahead && !need_resched())
+-    {
+-      FPU_ORIG_EIP = FPU_EIP - code_base;
+-      if ( valid_prefix(&byte1, (u_char __user **)&FPU_EIP,
+-			&addr_modes.override) )
+-	goto do_another_FPU_instruction;
+-    }
++	if (FPU_lookahead && !need_resched()) {
++		FPU_ORIG_EIP = FPU_EIP - code_base;
++		if (valid_prefix(&byte1, (u_char __user **) & FPU_EIP,
++				 &addr_modes.override))
++			goto do_another_FPU_instruction;
++	}
  
--		index_msb = get_count_order(smp_num_siblings) ;
-+		index_msb = get_count_order(smp_num_siblings);
+-  if ( addr_modes.default_mode )
+-    FPU_EIP -= code_base;
++	if (addr_modes.default_mode)
++		FPU_EIP -= code_base;
  
- 		core_bits = get_count_order(c->x86_max_cores);
+-  RE_ENTRANT_CHECK_OFF;
++	RE_ENTRANT_CHECK_OFF;
+ }
  
-@@ -730,8 +757,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
- 	}
- out:
- 	if ((c->x86_max_cores * smp_num_siblings) > 1) {
--		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
--		printk(KERN_INFO  "CPU: Processor Core ID: %d\n", c->cpu_core_id);
-+		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
-+		       c->phys_proc_id);
-+		printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
-+		       c->cpu_core_id);
- 	}
+-
+ /* Support for prefix bytes is not yet complete. To properly handle
+    all prefix bytes, further changes are needed in the emulator code
+    which accesses user address space. Access to separate segments is
+    important for msdos emulation. */
+ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
+-			overrides *override)
++			overrides * override)
+ {
+-  u_char byte;
+-  u_char __user *ip = *fpu_eip;
+-
+-  *override = (overrides) { 0, 0, PREFIX_DEFAULT };       /* defaults */
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_code_access_ok(1);
+-  FPU_get_user(byte, ip);
+-  RE_ENTRANT_CHECK_ON;
+-
+-  while ( 1 )
+-    {
+-      switch ( byte )
+-	{
+-	case ADDR_SIZE_PREFIX:
+-	  override->address_size = ADDR_SIZE_PREFIX;
+-	  goto do_next_byte;
+-
+-	case OP_SIZE_PREFIX:
+-	  override->operand_size = OP_SIZE_PREFIX;
+-	  goto do_next_byte;
+-
+-	case PREFIX_CS:
+-	  override->segment = PREFIX_CS_;
+-	  goto do_next_byte;
+-	case PREFIX_ES:
+-	  override->segment = PREFIX_ES_;
+-	  goto do_next_byte;
+-	case PREFIX_SS:
+-	  override->segment = PREFIX_SS_;
+-	  goto do_next_byte;
+-	case PREFIX_FS:
+-	  override->segment = PREFIX_FS_;
+-	  goto do_next_byte;
+-	case PREFIX_GS:
+-	  override->segment = PREFIX_GS_;
+-	  goto do_next_byte;
+-	case PREFIX_DS:
+-	  override->segment = PREFIX_DS_;
+-	  goto do_next_byte;
++	u_char byte;
++	u_char __user *ip = *fpu_eip;
++
++	*override = (overrides) {
++	0, 0, PREFIX_DEFAULT};	/* defaults */
++
++	RE_ENTRANT_CHECK_OFF;
++	FPU_code_access_ok(1);
++	FPU_get_user(byte, ip);
++	RE_ENTRANT_CHECK_ON;
++
++	while (1) {
++		switch (byte) {
++		case ADDR_SIZE_PREFIX:
++			override->address_size = ADDR_SIZE_PREFIX;
++			goto do_next_byte;
++
++		case OP_SIZE_PREFIX:
++			override->operand_size = OP_SIZE_PREFIX;
++			goto do_next_byte;
++
++		case PREFIX_CS:
++			override->segment = PREFIX_CS_;
++			goto do_next_byte;
++		case PREFIX_ES:
++			override->segment = PREFIX_ES_;
++			goto do_next_byte;
++		case PREFIX_SS:
++			override->segment = PREFIX_SS_;
++			goto do_next_byte;
++		case PREFIX_FS:
++			override->segment = PREFIX_FS_;
++			goto do_next_byte;
++		case PREFIX_GS:
++			override->segment = PREFIX_GS_;
++			goto do_next_byte;
++		case PREFIX_DS:
++			override->segment = PREFIX_DS_;
++			goto do_next_byte;
  
- #endif
-@@ -773,28 +802,39 @@ static void srat_detect_node(void)
- #endif
- }
+ /* lock is not a valid prefix for FPU instructions,
+    let the cpu handle it to generate a SIGILL. */
+ /*	case PREFIX_LOCK: */
  
-+static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
-+{
-+	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-+	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
-+		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+}
+-	  /* rep.. prefixes have no meaning for FPU instructions */
+-	case PREFIX_REPE:
+-	case PREFIX_REPNE:
+-
+-	do_next_byte:
+-	  ip++;
+-	  RE_ENTRANT_CHECK_OFF;
+-	  FPU_code_access_ok(1);
+-	  FPU_get_user(byte, ip);
+-	  RE_ENTRANT_CHECK_ON;
+-	  break;
+-	case FWAIT_OPCODE:
+-	  *Byte = byte;
+-	  return 1;
+-	default:
+-	  if ( (byte & 0xf8) == 0xd8 )
+-	    {
+-	      *Byte = byte;
+-	      *fpu_eip = ip;
+-	      return 1;
+-	    }
+-	  else
+-	    {
+-	      /* Not a valid sequence of prefix bytes followed by
+-		 an FPU instruction. */
+-	      *Byte = byte;  /* Needed for error message. */
+-	      return 0;
+-	    }
++			/* rep.. prefixes have no meaning for FPU instructions */
++		case PREFIX_REPE:
++		case PREFIX_REPNE:
 +
- static void __cpuinit init_intel(struct cpuinfo_x86 *c)
- {
- 	/* Cache sizes */
- 	unsigned n;
- 
- 	init_intel_cacheinfo(c);
--	if (c->cpuid_level > 9 ) {
-+	if (c->cpuid_level > 9) {
- 		unsigned eax = cpuid_eax(10);
- 		/* Check for version and the number of counters */
- 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
--			set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
-+			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
- 	}
- 
- 	if (cpu_has_ds) {
- 		unsigned int l1, l2;
- 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
- 		if (!(l1 & (1<<11)))
--			set_bit(X86_FEATURE_BTS, c->x86_capability);
-+			set_cpu_cap(c, X86_FEATURE_BTS);
- 		if (!(l1 & (1<<12)))
--			set_bit(X86_FEATURE_PEBS, c->x86_capability);
-+			set_cpu_cap(c, X86_FEATURE_PEBS);
++		      do_next_byte:
++			ip++;
++			RE_ENTRANT_CHECK_OFF;
++			FPU_code_access_ok(1);
++			FPU_get_user(byte, ip);
++			RE_ENTRANT_CHECK_ON;
++			break;
++		case FWAIT_OPCODE:
++			*Byte = byte;
++			return 1;
++		default:
++			if ((byte & 0xf8) == 0xd8) {
++				*Byte = byte;
++				*fpu_eip = ip;
++				return 1;
++			} else {
++				/* Not a valid sequence of prefix bytes followed by
++				   an FPU instruction. */
++				*Byte = byte;	/* Needed for error message. */
++				return 0;
++			}
++		}
  	}
- 
-+
-+	if (cpu_has_bts)
-+		ds_init_intel(c);
-+
- 	n = c->extended_cpuid_level;
- 	if (n >= 0x80000008) {
- 		unsigned eax = cpuid_eax(0x80000008);
-@@ -811,14 +851,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
- 		c->x86_cache_alignment = c->x86_clflush_size * 2;
- 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
- 	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
--		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- 	if (c->x86 == 6)
--		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
--	if (c->x86 == 15)
--		set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
--	else
--		clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-- 	c->x86_max_cores = intel_num_cpu_cores(c);
-+		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
-+	set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
-+	c->x86_max_cores = intel_num_cpu_cores(c);
- 
- 	srat_detect_node();
- }
-@@ -835,18 +872,12 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
- 		c->x86_vendor = X86_VENDOR_UNKNOWN;
+-    }
  }
  
--struct cpu_model_info {
--	int vendor;
--	int family;
--	char *model_names[16];
--};
 -
- /* Do some early cpuid on the boot CPU to get some parameter that are
-    needed before check_bugs. Everything advanced is in identify_cpu
-    below. */
--void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
-+static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
+-void math_abort(struct info * info, unsigned int signal)
++void math_abort(struct info *info, unsigned int signal)
  {
--	u32 tfms;
-+	u32 tfms, xlvl;
- 
- 	c->loops_per_jiffy = loops_per_jiffy;
- 	c->x86_cache_size = -1;
-@@ -857,6 +888,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
- 	c->x86_clflush_size = 64;
- 	c->x86_cache_alignment = c->x86_clflush_size;
- 	c->x86_max_cores = 1;
-+	c->x86_coreid_bits = 0;
- 	c->extended_cpuid_level = 0;
- 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
+ 	FPU_EIP = FPU_ORIG_EIP;
+ 	current->thread.trap_no = 16;
+ 	current->thread.error_code = 0;
+-	send_sig(signal,current,1);
++	send_sig(signal, current, 1);
+ 	RE_ENTRANT_CHECK_OFF;
+-	__asm__("movl %0,%%esp ; ret": :"g" (((long) info)-4));
++      __asm__("movl %0,%%esp ; ret": :"g"(((long)info) - 4));
+ #ifdef PARANOID
+-      printk("ERROR: wm-FPU-emu math_abort failed!\n");
++	printk("ERROR: wm-FPU-emu math_abort failed!\n");
+ #endif /* PARANOID */
+ }
  
-@@ -865,7 +897,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
- 	      (unsigned int *)&c->x86_vendor_id[0],
- 	      (unsigned int *)&c->x86_vendor_id[8],
- 	      (unsigned int *)&c->x86_vendor_id[4]);
--		
-+
- 	get_cpu_vendor(c);
+-
+-
+ #define S387 ((struct i387_soft_struct *)s387)
+ #define sstatus_word() \
+   ((S387->swd & ~SW_Top & 0xffff) | ((S387->ftop << SW_Top_Shift) & SW_Top))
  
- 	/* Initialize the standard set of capabilities */
-@@ -883,7 +915,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
- 			c->x86 += (tfms >> 20) & 0xff;
- 		if (c->x86 >= 0x6)
- 			c->x86_model += ((tfms >> 16) & 0xF) << 4;
--		if (c->x86_capability[0] & (1<<19)) 
-+		if (c->x86_capability[0] & (1<<19))
- 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
- 	} else {
- 		/* Have CPUID level 0 only - unheard of */
-@@ -893,18 +925,6 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
- #ifdef CONFIG_SMP
- 	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
- #endif
--}
+-int restore_i387_soft(void *s387, struct _fpstate __user *buf)
++int fpregs_soft_set(struct task_struct *target,
++		    const struct user_regset *regset,
++		    unsigned int pos, unsigned int count,
++		    const void *kbuf, const void __user *ubuf)
+ {
+-  u_char __user *d = (u_char __user *)buf;
+-  int offset, other, i, tags, regnr, tag, newtop;
 -
--/*
-- * This does the hard work of actually picking apart the CPU stuff...
-- */
--void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
--{
--	int i;
--	u32 xlvl;
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ, d, 7*4 + 8*10);
+-  if (__copy_from_user(&S387->cwd, d, 7*4))
+-    return -1;
+-  RE_ENTRANT_CHECK_ON;
 -
--	early_identify_cpu(c);
+-  d += 7*4;
 -
- 	/* AMD-defined flags: level 0x80000001 */
- 	xlvl = cpuid_eax(0x80000000);
- 	c->extended_cpuid_level = xlvl;
-@@ -925,6 +945,30 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- 			c->x86_capability[2] = cpuid_edx(0x80860001);
- 	}
- 
-+	c->extended_cpuid_level = cpuid_eax(0x80000000);
-+	if (c->extended_cpuid_level >= 0x80000007)
-+		c->x86_power = cpuid_edx(0x80000007);
+-  S387->ftop = (S387->swd >> SW_Top_Shift) & 7;
+-  offset = (S387->ftop & 7) * 10;
+-  other = 80 - offset;
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  /* Copy all registers in stack order. */
+-  if (__copy_from_user(((u_char *)&S387->st_space)+offset, d, other))
+-    return -1;
+-  if ( offset )
+-    if (__copy_from_user((u_char *)&S387->st_space, d+other, offset))
+-      return -1;
+-  RE_ENTRANT_CHECK_ON;
+-
+-  /* The tags may need to be corrected now. */
+-  tags = S387->twd;
+-  newtop = S387->ftop;
+-  for ( i = 0; i < 8; i++ )
+-    {
+-      regnr = (i+newtop) & 7;
+-      if ( ((tags >> ((regnr & 7)*2)) & 3) != TAG_Empty )
+-	{
+-	  /* The loaded data over-rides all other cases. */
+-	  tag = FPU_tagof((FPU_REG *)((u_char *)S387->st_space + 10*regnr));
+-	  tags &= ~(3 << (regnr*2));
+-	  tags |= (tag & 3) << (regnr*2);
++	struct i387_soft_struct *s387 = &target->thread.i387.soft;
++	void *space = s387->st_space;
++	int ret;
++	int offset, other, i, tags, regnr, tag, newtop;
 +
-+	switch (c->x86_vendor) {
-+	case X86_VENDOR_AMD:
-+		early_init_amd(c);
-+		break;
-+	case X86_VENDOR_INTEL:
-+		early_init_intel(c);
-+		break;
-+	}
++	RE_ENTRANT_CHECK_OFF;
++	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, s387, 0,
++				 offsetof(struct i387_soft_struct, st_space));
++	RE_ENTRANT_CHECK_ON;
 +
-+}
++	if (ret)
++		return ret;
 +
-+/*
-+ * This does the hard work of actually picking apart the CPU stuff...
-+ */
-+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
-+{
-+	int i;
++	S387->ftop = (S387->swd >> SW_Top_Shift) & 7;
++	offset = (S387->ftop & 7) * 10;
++	other = 80 - offset;
 +
-+	early_identify_cpu(c);
++	RE_ENTRANT_CHECK_OFF;
 +
- 	init_scattered_cpuid_features(c);
- 
- 	c->apicid = phys_pkg_id(0);
-@@ -954,8 +998,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- 		break;
++	/* Copy all registers in stack order. */
++	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
++				 space + offset, 0, other);
++	if (!ret && offset)
++		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
++					 space, 0, offset);
++
++	RE_ENTRANT_CHECK_ON;
++
++	/* The tags may need to be corrected now. */
++	tags = S387->twd;
++	newtop = S387->ftop;
++	for (i = 0; i < 8; i++) {
++		regnr = (i + newtop) & 7;
++		if (((tags >> ((regnr & 7) * 2)) & 3) != TAG_Empty) {
++			/* The loaded data over-rides all other cases. */
++			tag =
++			    FPU_tagof((FPU_REG *) ((u_char *) S387->st_space +
++						   10 * regnr));
++			tags &= ~(3 << (regnr * 2));
++			tags |= (tag & 3) << (regnr * 2);
++		}
  	}
+-    }
+-  S387->twd = tags;
++	S387->twd = tags;
  
--	select_idle_routine(c);
--	detect_ht(c); 
-+	detect_ht(c);
+-  return 0;
++	return ret;
+ }
  
- 	/*
- 	 * On SMP, boot_cpu_data holds the common feature set between
-@@ -965,32 +1008,56 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- 	 */
- 	if (c != &boot_cpu_data) {
- 		/* AND the already accumulated flags with these */
--		for (i = 0 ; i < NCAPINTS ; i++)
-+		for (i = 0; i < NCAPINTS; i++)
- 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
- 	}
+-
+-int save_i387_soft(void *s387, struct _fpstate __user * buf)
++int fpregs_soft_get(struct task_struct *target,
++		    const struct user_regset *regset,
++		    unsigned int pos, unsigned int count,
++		    void *kbuf, void __user *ubuf)
+ {
+-  u_char __user *d = (u_char __user *)buf;
+-  int offset = (S387->ftop & 7) * 10, other = 80 - offset;
++	struct i387_soft_struct *s387 = &target->thread.i387.soft;
++	const void *space = s387->st_space;
++	int ret;
++	int offset = (S387->ftop & 7) * 10, other = 80 - offset;
++
++	RE_ENTRANT_CHECK_OFF;
  
-+	/* Clear all flags overriden by options */
-+	for (i = 0; i < NCAPINTS; i++)
-+		c->x86_capability[i] ^= cleared_cpu_caps[i];
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_WRITE, d, 7*4 + 8*10);
+ #ifdef PECULIAR_486
+-  S387->cwd &= ~0xe080;
+-  /* An 80486 sets nearly all of the reserved bits to 1. */
+-  S387->cwd |= 0xffff0040;
+-  S387->swd = sstatus_word() | 0xffff0000;
+-  S387->twd |= 0xffff0000;
+-  S387->fcs &= ~0xf8000000;
+-  S387->fos |= 0xffff0000;
++	S387->cwd &= ~0xe080;
++	/* An 80486 sets nearly all of the reserved bits to 1. */
++	S387->cwd |= 0xffff0040;
++	S387->swd = sstatus_word() | 0xffff0000;
++	S387->twd |= 0xffff0000;
++	S387->fcs &= ~0xf8000000;
++	S387->fos |= 0xffff0000;
+ #endif /* PECULIAR_486 */
+-  if (__copy_to_user(d, &S387->cwd, 7*4))
+-    return -1;
+-  RE_ENTRANT_CHECK_ON;
+-
+-  d += 7*4;
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  /* Copy all registers in stack order. */
+-  if (__copy_to_user(d, ((u_char *)&S387->st_space)+offset, other))
+-    return -1;
+-  if ( offset )
+-    if (__copy_to_user(d+other, (u_char *)&S387->st_space, offset))
+-      return -1;
+-  RE_ENTRANT_CHECK_ON;
+-
+-  return 1;
 +
- #ifdef CONFIG_X86_MCE
- 	mcheck_init(c);
- #endif
-+	select_idle_routine(c);
++	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, s387, 0,
++				  offsetof(struct i387_soft_struct, st_space));
 +
- 	if (c != &boot_cpu_data)
- 		mtrr_ap_init();
- #ifdef CONFIG_NUMA
- 	numa_add_cpu(smp_processor_id());
- #endif
++	/* Copy all registers in stack order. */
++	if (!ret)
++		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
++					  space + offset, 0, other);
++	if (!ret)
++		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
++					  space, 0, offset);
 +
-+}
++	RE_ENTRANT_CHECK_ON;
 +
-+static __init int setup_noclflush(char *arg)
-+{
-+	setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
-+	return 1;
++	return ret;
  }
-- 
-+__setup("noclflush", setup_noclflush);
+diff --git a/arch/x86/math-emu/fpu_etc.c b/arch/x86/math-emu/fpu_etc.c
+index e3b5d46..233e5af 100644
+--- a/arch/x86/math-emu/fpu_etc.c
++++ b/arch/x86/math-emu/fpu_etc.c
+@@ -16,128 +16,115 @@
+ #include "status_w.h"
+ #include "reg_constant.h"
  
- void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+-
+ static void fchs(FPU_REG *st0_ptr, u_char st0tag)
  {
- 	if (c->x86_model_id[0])
--		printk("%s", c->x86_model_id);
-+		printk(KERN_INFO "%s", c->x86_model_id);
- 
--	if (c->x86_mask || c->cpuid_level >= 0) 
--		printk(" stepping %02x\n", c->x86_mask);
-+	if (c->x86_mask || c->cpuid_level >= 0)
-+		printk(KERN_CONT " stepping %02x\n", c->x86_mask);
- 	else
--		printk("\n");
-+		printk(KERN_CONT "\n");
+-  if ( st0tag ^ TAG_Empty )
+-    {
+-      signbyte(st0_ptr) ^= SIGN_NEG;
+-      clear_C1();
+-    }
+-  else
+-    FPU_stack_underflow();
++	if (st0tag ^ TAG_Empty) {
++		signbyte(st0_ptr) ^= SIGN_NEG;
++		clear_C1();
++	} else
++		FPU_stack_underflow();
  }
  
-+static __init int setup_disablecpuid(char *arg)
-+{
-+	int bit;
-+	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
-+		setup_clear_cpu_cap(bit);
-+	else
-+		return 0;
-+	return 1;
-+}
-+__setup("clearcpuid=", setup_disablecpuid);
-+
- /*
-  *	Get CPU information for use by the procfs.
-  */
-@@ -998,9 +1065,9 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
- static int show_cpuinfo(struct seq_file *m, void *v)
+-
+ static void fabs(FPU_REG *st0_ptr, u_char st0tag)
  {
- 	struct cpuinfo_x86 *c = v;
--	int cpu = 0;
-+	int cpu = 0, i;
- 
--	/* 
-+	/*
- 	 * These flag bits must match the definitions in <asm/cpufeature.h>.
- 	 * NULL means this bit is undefined or reserved; either way it doesn't
- 	 * have meaning as far as Linux is concerned.  Note that it's important
-@@ -1010,10 +1077,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
- 	 */
- 	static const char *const x86_cap_flags[] = {
- 		/* Intel-defined */
--	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
--	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
--	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
--	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
-+		"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
-+		"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
-+		"pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
-+		"fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
- 
- 		/* AMD-defined */
- 		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-@@ -1080,34 +1147,35 @@ static int show_cpuinfo(struct seq_file *m, void *v)
- 	cpu = c->cpu_index;
- #endif
- 
--	seq_printf(m,"processor\t: %u\n"
--		     "vendor_id\t: %s\n"
--		     "cpu family\t: %d\n"
--		     "model\t\t: %d\n"
--		     "model name\t: %s\n",
--		     (unsigned)cpu,
--		     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
--		     c->x86,
--		     (int)c->x86_model,
--		     c->x86_model_id[0] ? c->x86_model_id : "unknown");
--	
-+	seq_printf(m, "processor\t: %u\n"
-+		   "vendor_id\t: %s\n"
-+		   "cpu family\t: %d\n"
-+		   "model\t\t: %d\n"
-+		   "model name\t: %s\n",
-+		   (unsigned)cpu,
-+		   c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-+		   c->x86,
-+		   (int)c->x86_model,
-+		   c->x86_model_id[0] ? c->x86_model_id : "unknown");
-+
- 	if (c->x86_mask || c->cpuid_level >= 0)
- 		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
- 	else
- 		seq_printf(m, "stepping\t: unknown\n");
--	
--	if (cpu_has(c,X86_FEATURE_TSC)) {
-+
-+	if (cpu_has(c, X86_FEATURE_TSC)) {
- 		unsigned int freq = cpufreq_quick_get((unsigned)cpu);
-+
- 		if (!freq)
- 			freq = cpu_khz;
- 		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
--			     freq / 1000, (freq % 1000));
-+			   freq / 1000, (freq % 1000));
- 	}
- 
- 	/* Cache size */
--	if (c->x86_cache_size >= 0) 
-+	if (c->x86_cache_size >= 0)
- 		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
--	
-+
- #ifdef CONFIG_SMP
- 	if (smp_num_siblings * c->x86_max_cores > 1) {
- 		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-@@ -1116,48 +1184,43 @@ static int show_cpuinfo(struct seq_file *m, void *v)
- 		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
- 		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
- 	}
--#endif	
-+#endif
- 
- 	seq_printf(m,
--	        "fpu\t\t: yes\n"
--	        "fpu_exception\t: yes\n"
--	        "cpuid level\t: %d\n"
--	        "wp\t\t: yes\n"
--	        "flags\t\t:",
-+		   "fpu\t\t: yes\n"
-+		   "fpu_exception\t: yes\n"
-+		   "cpuid level\t: %d\n"
-+		   "wp\t\t: yes\n"
-+		   "flags\t\t:",
- 		   c->cpuid_level);
- 
--	{ 
--		int i; 
--		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
--			if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
--				seq_printf(m, " %s", x86_cap_flags[i]);
--	}
--		
-+	for (i = 0; i < 32*NCAPINTS; i++)
-+		if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
-+			seq_printf(m, " %s", x86_cap_flags[i]);
-+
- 	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
- 		   c->loops_per_jiffy/(500000/HZ),
- 		   (c->loops_per_jiffy/(5000/HZ)) % 100);
- 
--	if (c->x86_tlbsize > 0) 
-+	if (c->x86_tlbsize > 0)
- 		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
- 	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
- 	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
- 
--	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
-+	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
- 		   c->x86_phys_bits, c->x86_virt_bits);
+-  if ( st0tag ^ TAG_Empty )
+-    {
+-      setpositive(st0_ptr);
+-      clear_C1();
+-    }
+-  else
+-    FPU_stack_underflow();
++	if (st0tag ^ TAG_Empty) {
++		setpositive(st0_ptr);
++		clear_C1();
++	} else
++		FPU_stack_underflow();
+ }
  
- 	seq_printf(m, "power management:");
+-
+ static void ftst_(FPU_REG *st0_ptr, u_char st0tag)
+ {
+-  switch (st0tag)
+-    {
+-    case TAG_Zero:
+-      setcc(SW_C3);
+-      break;
+-    case TAG_Valid:
+-      if (getsign(st0_ptr) == SIGN_POS)
+-        setcc(0);
+-      else
+-        setcc(SW_C0);
+-      break;
+-    case TAG_Special:
+-      switch ( FPU_Special(st0_ptr) )
 -	{
--		unsigned i;
--		for (i = 0; i < 32; i++) 
--			if (c->x86_power & (1 << i)) {
--				if (i < ARRAY_SIZE(x86_power_flags) &&
--					x86_power_flags[i])
--					seq_printf(m, "%s%s",
--						x86_power_flags[i][0]?" ":"",
--						x86_power_flags[i]);
--				else
--					seq_printf(m, " [%d]", i);
--			}
-+	for (i = 0; i < 32; i++) {
-+		if (c->x86_power & (1 << i)) {
-+			if (i < ARRAY_SIZE(x86_power_flags) &&
-+			    x86_power_flags[i])
-+				seq_printf(m, "%s%s",
-+					   x86_power_flags[i][0]?" ":"",
-+					   x86_power_flags[i]);
+-	case TW_Denormal:
+-	  if (getsign(st0_ptr) == SIGN_POS)
+-	    setcc(0);
+-	  else
+-	    setcc(SW_C0);
+-	  if ( denormal_operand() < 0 )
+-	    {
+-#ifdef PECULIAR_486
+-	      /* This is weird! */
+-	      if (getsign(st0_ptr) == SIGN_POS)
++	switch (st0tag) {
++	case TAG_Zero:
+ 		setcc(SW_C3);
++		break;
++	case TAG_Valid:
++		if (getsign(st0_ptr) == SIGN_POS)
++			setcc(0);
++		else
++			setcc(SW_C0);
++		break;
++	case TAG_Special:
++		switch (FPU_Special(st0_ptr)) {
++		case TW_Denormal:
++			if (getsign(st0_ptr) == SIGN_POS)
++				setcc(0);
 +			else
-+				seq_printf(m, " [%d]", i);
++				setcc(SW_C0);
++			if (denormal_operand() < 0) {
++#ifdef PECULIAR_486
++				/* This is weird! */
++				if (getsign(st0_ptr) == SIGN_POS)
++					setcc(SW_C3);
+ #endif /* PECULIAR_486 */
+-	      return;
+-	    }
+-	  break;
+-	case TW_NaN:
+-	  setcc(SW_C0|SW_C2|SW_C3);   /* Operand is not comparable */ 
+-	  EXCEPTION(EX_Invalid);
+-	  break;
+-	case TW_Infinity:
+-	  if (getsign(st0_ptr) == SIGN_POS)
+-	    setcc(0);
+-	  else
+-	    setcc(SW_C0);
+-	  break;
+-	default:
+-	  setcc(SW_C0|SW_C2|SW_C3);   /* Operand is not comparable */ 
+-	  EXCEPTION(EX_INTERNAL|0x14);
+-	  break;
++				return;
++			}
++			break;
++		case TW_NaN:
++			setcc(SW_C0 | SW_C2 | SW_C3);	/* Operand is not comparable */
++			EXCEPTION(EX_Invalid);
++			break;
++		case TW_Infinity:
++			if (getsign(st0_ptr) == SIGN_POS)
++				setcc(0);
++			else
++				setcc(SW_C0);
++			break;
++		default:
++			setcc(SW_C0 | SW_C2 | SW_C3);	/* Operand is not comparable */
++			EXCEPTION(EX_INTERNAL | 0x14);
++			break;
 +		}
++		break;
++	case TAG_Empty:
++		setcc(SW_C0 | SW_C2 | SW_C3);
++		EXCEPTION(EX_StackUnder);
++		break;
  	}
+-      break;
+-    case TAG_Empty:
+-      setcc(SW_C0|SW_C2|SW_C3);
+-      EXCEPTION(EX_StackUnder);
+-      break;
+-    }
+ }
  
- 	seq_printf(m, "\n\n");
-@@ -1184,8 +1247,8 @@ static void c_stop(struct seq_file *m, void *v)
+-
+ static void fxam(FPU_REG *st0_ptr, u_char st0tag)
  {
+-  int c = 0;
+-  switch (st0tag)
+-    {
+-    case TAG_Empty:
+-      c = SW_C3|SW_C0;
+-      break;
+-    case TAG_Zero:
+-      c = SW_C3;
+-      break;
+-    case TAG_Valid:
+-      c = SW_C2;
+-      break;
+-    case TAG_Special:
+-      switch ( FPU_Special(st0_ptr) )
+-	{
+-	case TW_Denormal:
+-	  c = SW_C2|SW_C3;  /* Denormal */
+-	  break;
+-	case TW_NaN:
+-	  /* We also use NaN for unsupported types. */
+-	  if ( (st0_ptr->sigh & 0x80000000) && (exponent(st0_ptr) == EXP_OVER) )
+-	    c = SW_C0;
+-	  break;
+-	case TW_Infinity:
+-	  c = SW_C2|SW_C0;
+-	  break;
++	int c = 0;
++	switch (st0tag) {
++	case TAG_Empty:
++		c = SW_C3 | SW_C0;
++		break;
++	case TAG_Zero:
++		c = SW_C3;
++		break;
++	case TAG_Valid:
++		c = SW_C2;
++		break;
++	case TAG_Special:
++		switch (FPU_Special(st0_ptr)) {
++		case TW_Denormal:
++			c = SW_C2 | SW_C3;	/* Denormal */
++			break;
++		case TW_NaN:
++			/* We also use NaN for unsupported types. */
++			if ((st0_ptr->sigh & 0x80000000)
++			    && (exponent(st0_ptr) == EXP_OVER))
++				c = SW_C0;
++			break;
++		case TW_Infinity:
++			c = SW_C2 | SW_C0;
++			break;
++		}
+ 	}
+-    }
+-  if ( getsign(st0_ptr) == SIGN_NEG )
+-    c |= SW_C1;
+-  setcc(c);
++	if (getsign(st0_ptr) == SIGN_NEG)
++		c |= SW_C1;
++	setcc(c);
  }
  
--struct seq_operations cpuinfo_op = {
--	.start =c_start,
-+const struct seq_operations cpuinfo_op = {
-+	.start = c_start,
- 	.next =	c_next,
- 	.stop =	c_stop,
- 	.show =	show_cpuinfo,
-diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
-index 9bdd830..caee1f0 100644
---- a/arch/x86/kernel/signal_32.c
-+++ b/arch/x86/kernel/signal_32.c
-@@ -23,6 +23,7 @@
- #include <asm/ucontext.h>
- #include <asm/uaccess.h>
- #include <asm/i387.h>
-+#include <asm/vdso.h>
- #include "sigframe_32.h"
+-
+ static FUNC_ST0 const fp_etc_table[] = {
+-  fchs, fabs, (FUNC_ST0)FPU_illegal, (FUNC_ST0)FPU_illegal,
+-  ftst_, fxam, (FUNC_ST0)FPU_illegal, (FUNC_ST0)FPU_illegal
++	fchs, fabs, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal,
++	ftst_, fxam, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal
+ };
  
- #define DEBUG_SIG 0
-@@ -81,14 +82,14 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
+ void FPU_etc(void)
+ {
+-  (fp_etc_table[FPU_rm])(&st(0), FPU_gettag0());
++	(fp_etc_table[FPU_rm]) (&st(0), FPU_gettag0());
  }
+diff --git a/arch/x86/math-emu/fpu_proto.h b/arch/x86/math-emu/fpu_proto.h
+index 37a8a7f..aa49b6a 100644
+--- a/arch/x86/math-emu/fpu_proto.h
++++ b/arch/x86/math-emu/fpu_proto.h
+@@ -66,7 +66,7 @@ extern int FPU_Special(FPU_REG const *ptr);
+ extern int isNaN(FPU_REG const *ptr);
+ extern void FPU_pop(void);
+ extern int FPU_empty_i(int stnr);
+-extern int FPU_stackoverflow(FPU_REG **st_new_ptr);
++extern int FPU_stackoverflow(FPU_REG ** st_new_ptr);
+ extern void FPU_copy_to_regi(FPU_REG const *r, u_char tag, int stnr);
+ extern void FPU_copy_to_reg1(FPU_REG const *r, u_char tag);
+ extern void FPU_copy_to_reg0(FPU_REG const *r, u_char tag);
+@@ -75,21 +75,23 @@ extern void FPU_triga(void);
+ extern void FPU_trigb(void);
+ /* get_address.c */
+ extern void __user *FPU_get_address(u_char FPU_modrm, unsigned long *fpu_eip,
+-			 struct address *addr, fpu_addr_modes addr_modes);
++				    struct address *addr,
++				    fpu_addr_modes addr_modes);
+ extern void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
+-			    struct address *addr, fpu_addr_modes addr_modes);
++				       struct address *addr,
++				       fpu_addr_modes addr_modes);
+ /* load_store.c */
+ extern int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
+-			    void __user *data_address);
++			  void __user * data_address);
+ /* poly_2xm1.c */
+-extern int poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result);
++extern int poly_2xm1(u_char sign, FPU_REG * arg, FPU_REG *result);
+ /* poly_atan.c */
+-extern void poly_atan(FPU_REG *st0_ptr, u_char st0_tag, FPU_REG *st1_ptr,
++extern void poly_atan(FPU_REG * st0_ptr, u_char st0_tag, FPU_REG *st1_ptr,
+ 		      u_char st1_tag);
+ /* poly_l2.c */
+ extern void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign);
+ extern int poly_l2p1(u_char s0, u_char s1, FPU_REG *r0, FPU_REG *r1,
+-		     FPU_REG *d);
++		     FPU_REG * d);
+ /* poly_sin.c */
+ extern void poly_sine(FPU_REG *st0_ptr);
+ extern void poly_cos(FPU_REG *st0_ptr);
+@@ -117,10 +119,13 @@ extern int FPU_load_int32(long __user *_s, FPU_REG *loaded_data);
+ extern int FPU_load_int16(short __user *_s, FPU_REG *loaded_data);
+ extern int FPU_load_bcd(u_char __user *s);
+ extern int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag,
+-			      long double __user *d);
+-extern int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat);
+-extern int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single);
+-extern int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d);
++			      long double __user * d);
++extern int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag,
++			    double __user * dfloat);
++extern int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag,
++			    float __user * single);
++extern int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag,
++			   long long __user * d);
+ extern int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d);
+ extern int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d);
+ extern int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d);
+@@ -137,4 +142,3 @@ extern int FPU_div(int flags, int regrm, int control_w);
+ /* reg_convert.c */
+ extern int FPU_to_exp16(FPU_REG const *a, FPU_REG *x);
+ #endif /* _FPU_PROTO_H */
+-
+diff --git a/arch/x86/math-emu/fpu_tags.c b/arch/x86/math-emu/fpu_tags.c
+index cb436fe..d9c657c 100644
+--- a/arch/x86/math-emu/fpu_tags.c
++++ b/arch/x86/math-emu/fpu_tags.c
+@@ -14,114 +14,102 @@
+ #include "fpu_system.h"
+ #include "exception.h"
  
- asmlinkage int
--sys_sigaltstack(unsigned long ebx)
-+sys_sigaltstack(unsigned long bx)
+-
+ void FPU_pop(void)
  {
- 	/* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
--	struct pt_regs *regs = (struct pt_regs *)&ebx;
--	const stack_t __user *uss = (const stack_t __user *)ebx;
--	stack_t __user *uoss = (stack_t __user *)regs->ecx;
-+	struct pt_regs *regs = (struct pt_regs *)&bx;
-+	const stack_t __user *uss = (const stack_t __user *)bx;
-+	stack_t __user *uoss = (stack_t __user *)regs->cx;
- 
--	return do_sigaltstack(uss, uoss, regs->esp);
-+	return do_sigaltstack(uss, uoss, regs->sp);
+-  fpu_tag_word |= 3 << ((top & 7)*2);
+-  top++;
++	fpu_tag_word |= 3 << ((top & 7) * 2);
++	top++;
  }
  
- 
-@@ -109,12 +110,12 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
- #define COPY_SEG(seg)							\
- 	{ unsigned short tmp;						\
- 	  err |= __get_user(tmp, &sc->seg);				\
--	  regs->x##seg = tmp; }
-+	  regs->seg = tmp; }
- 
- #define COPY_SEG_STRICT(seg)						\
- 	{ unsigned short tmp;						\
- 	  err |= __get_user(tmp, &sc->seg);				\
--	  regs->x##seg = tmp|3; }
-+	  regs->seg = tmp|3; }
- 
- #define GET_SEG(seg)							\
- 	{ unsigned short tmp;						\
-@@ -130,22 +131,22 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
- 	COPY_SEG(fs);
- 	COPY_SEG(es);
- 	COPY_SEG(ds);
--	COPY(edi);
--	COPY(esi);
--	COPY(ebp);
--	COPY(esp);
--	COPY(ebx);
--	COPY(edx);
--	COPY(ecx);
--	COPY(eip);
-+	COPY(di);
-+	COPY(si);
-+	COPY(bp);
-+	COPY(sp);
-+	COPY(bx);
-+	COPY(dx);
-+	COPY(cx);
-+	COPY(ip);
- 	COPY_SEG_STRICT(cs);
- 	COPY_SEG_STRICT(ss);
- 	
- 	{
- 		unsigned int tmpflags;
--		err |= __get_user(tmpflags, &sc->eflags);
--		regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
--		regs->orig_eax = -1;		/* disable syscall checks */
-+		err |= __get_user(tmpflags, &sc->flags);
-+		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-+		regs->orig_ax = -1;		/* disable syscall checks */
- 	}
- 
- 	{
-@@ -164,7 +165,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
- 		}
- 	}
- 
--	err |= __get_user(*peax, &sc->eax);
-+	err |= __get_user(*peax, &sc->ax);
- 	return err;
- 
- badframe:
-@@ -174,9 +175,9 @@ badframe:
- asmlinkage int sys_sigreturn(unsigned long __unused)
+-
+ int FPU_gettag0(void)
  {
- 	struct pt_regs *regs = (struct pt_regs *) &__unused;
--	struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8);
-+	struct sigframe __user *frame = (struct sigframe __user *)(regs->sp - 8);
- 	sigset_t set;
--	int eax;
-+	int ax;
- 
- 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- 		goto badframe;
-@@ -192,17 +193,20 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
- 	recalc_sigpending();
- 	spin_unlock_irq(&current->sighand->siglock);
- 	
--	if (restore_sigcontext(regs, &frame->sc, &eax))
-+	if (restore_sigcontext(regs, &frame->sc, &ax))
- 		goto badframe;
--	return eax;
-+	return ax;
- 
- badframe:
--	if (show_unhandled_signals && printk_ratelimit())
--		printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx"
--		       " esp:%lx oeax:%lx\n",
-+	if (show_unhandled_signals && printk_ratelimit()) {
-+		printk("%s%s[%d] bad frame in sigreturn frame:%p ip:%lx"
-+		       " sp:%lx oeax:%lx",
- 		    task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
--		    current->comm, task_pid_nr(current), frame, regs->eip,
--		    regs->esp, regs->orig_eax);
-+		    current->comm, task_pid_nr(current), frame, regs->ip,
-+		    regs->sp, regs->orig_ax);
-+		print_vma_addr(" in ", regs->ip);
-+		printk("\n");
-+	}
+-  return (fpu_tag_word >> ((top & 7)*2)) & 3;
++	return (fpu_tag_word >> ((top & 7) * 2)) & 3;
+ }
  
- 	force_sig(SIGSEGV, current);
- 	return 0;
-@@ -211,9 +215,9 @@ badframe:
- asmlinkage int sys_rt_sigreturn(unsigned long __unused)
+-
+ int FPU_gettagi(int stnr)
  {
- 	struct pt_regs *regs = (struct pt_regs *) &__unused;
--	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->esp - 4);
-+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->sp - 4);
- 	sigset_t set;
--	int eax;
-+	int ax;
- 
- 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- 		goto badframe;
-@@ -226,13 +230,13 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
- 	recalc_sigpending();
- 	spin_unlock_irq(&current->sighand->siglock);
- 	
--	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
-+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
- 		goto badframe;
- 
--	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
-+	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
- 		goto badframe;
- 
--	return eax;
-+	return ax;
+-  return (fpu_tag_word >> (((top+stnr) & 7)*2)) & 3;
++	return (fpu_tag_word >> (((top + stnr) & 7) * 2)) & 3;
+ }
  
- badframe:
- 	force_sig(SIGSEGV, current);
-@@ -249,27 +253,27 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
+-
+ int FPU_gettag(int regnr)
  {
- 	int tmp, err = 0;
- 
--	err |= __put_user(regs->xfs, (unsigned int __user *)&sc->fs);
-+	err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
- 	savesegment(gs, tmp);
- 	err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
- 
--	err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
--	err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
--	err |= __put_user(regs->edi, &sc->edi);
--	err |= __put_user(regs->esi, &sc->esi);
--	err |= __put_user(regs->ebp, &sc->ebp);
--	err |= __put_user(regs->esp, &sc->esp);
--	err |= __put_user(regs->ebx, &sc->ebx);
--	err |= __put_user(regs->edx, &sc->edx);
--	err |= __put_user(regs->ecx, &sc->ecx);
--	err |= __put_user(regs->eax, &sc->eax);
-+	err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
-+	err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
-+	err |= __put_user(regs->di, &sc->di);
-+	err |= __put_user(regs->si, &sc->si);
-+	err |= __put_user(regs->bp, &sc->bp);
-+	err |= __put_user(regs->sp, &sc->sp);
-+	err |= __put_user(regs->bx, &sc->bx);
-+	err |= __put_user(regs->dx, &sc->dx);
-+	err |= __put_user(regs->cx, &sc->cx);
-+	err |= __put_user(regs->ax, &sc->ax);
- 	err |= __put_user(current->thread.trap_no, &sc->trapno);
- 	err |= __put_user(current->thread.error_code, &sc->err);
--	err |= __put_user(regs->eip, &sc->eip);
--	err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
--	err |= __put_user(regs->eflags, &sc->eflags);
--	err |= __put_user(regs->esp, &sc->esp_at_signal);
--	err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
-+	err |= __put_user(regs->ip, &sc->ip);
-+	err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
-+	err |= __put_user(regs->flags, &sc->flags);
-+	err |= __put_user(regs->sp, &sc->sp_at_signal);
-+	err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
+-  return (fpu_tag_word >> ((regnr & 7)*2)) & 3;
++	return (fpu_tag_word >> ((regnr & 7) * 2)) & 3;
+ }
  
- 	tmp = save_i387(fpstate);
- 	if (tmp < 0)
-@@ -290,29 +294,36 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
- static inline void __user *
- get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+-
+ void FPU_settag0(int tag)
  {
--	unsigned long esp;
-+	unsigned long sp;
- 
- 	/* Default to using normal stack */
--	esp = regs->esp;
-+	sp = regs->sp;
-+
-+	/*
-+	 * If we are on the alternate signal stack and would overflow it, don't.
-+	 * Return an always-bogus address instead so we will die with SIGSEGV.
-+	 */
-+	if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
-+		return (void __user *) -1L;
- 
- 	/* This is the X/Open sanctioned signal stack switching.  */
- 	if (ka->sa.sa_flags & SA_ONSTACK) {
--		if (sas_ss_flags(esp) == 0)
--			esp = current->sas_ss_sp + current->sas_ss_size;
-+		if (sas_ss_flags(sp) == 0)
-+			sp = current->sas_ss_sp + current->sas_ss_size;
- 	}
- 
- 	/* This is the legacy signal stack switching. */
--	else if ((regs->xss & 0xffff) != __USER_DS &&
-+	else if ((regs->ss & 0xffff) != __USER_DS &&
- 		 !(ka->sa.sa_flags & SA_RESTORER) &&
- 		 ka->sa.sa_restorer) {
--		esp = (unsigned long) ka->sa.sa_restorer;
-+		sp = (unsigned long) ka->sa.sa_restorer;
- 	}
- 
--	esp -= frame_size;
-+	sp -= frame_size;
- 	/* Align the stack pointer according to the i386 ABI,
- 	 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
--	esp = ((esp + 4) & -16ul) - 4;
--	return (void __user *) esp;
-+	sp = ((sp + 4) & -16ul) - 4;
-+	return (void __user *) sp;
+-  int regnr = top;
+-  regnr &= 7;
+-  fpu_tag_word &= ~(3 << (regnr*2));
+-  fpu_tag_word |= (tag & 3) << (regnr*2);
++	int regnr = top;
++	regnr &= 7;
++	fpu_tag_word &= ~(3 << (regnr * 2));
++	fpu_tag_word |= (tag & 3) << (regnr * 2);
  }
  
- /* These symbols are defined with the addresses in the vsyscall page.
-@@ -355,9 +366,9 @@ static int setup_frame(int sig, struct k_sigaction *ka,
- 	}
- 
- 	if (current->binfmt->hasvdso)
--		restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
-+		restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
- 	else
--		restorer = (void *)&frame->retcode;
-+		restorer = &frame->retcode;
- 	if (ka->sa.sa_flags & SA_RESTORER)
- 		restorer = ka->sa.sa_restorer;
- 
-@@ -379,16 +390,16 @@ static int setup_frame(int sig, struct k_sigaction *ka,
- 		goto give_sigsegv;
- 
- 	/* Set up registers for signal handler */
--	regs->esp = (unsigned long) frame;
--	regs->eip = (unsigned long) ka->sa.sa_handler;
--	regs->eax = (unsigned long) sig;
--	regs->edx = (unsigned long) 0;
--	regs->ecx = (unsigned long) 0;
-+	regs->sp = (unsigned long) frame;
-+	regs->ip = (unsigned long) ka->sa.sa_handler;
-+	regs->ax = (unsigned long) sig;
-+	regs->dx = (unsigned long) 0;
-+	regs->cx = (unsigned long) 0;
- 
--	regs->xds = __USER_DS;
--	regs->xes = __USER_DS;
--	regs->xss = __USER_DS;
--	regs->xcs = __USER_CS;
-+	regs->ds = __USER_DS;
-+	regs->es = __USER_DS;
-+	regs->ss = __USER_DS;
-+	regs->cs = __USER_CS;
- 
- 	/*
- 	 * Clear TF when entering the signal handler, but
-@@ -396,13 +407,13 @@ static int setup_frame(int sig, struct k_sigaction *ka,
- 	 * The tracer may want to single-step inside the
- 	 * handler too.
- 	 */
--	regs->eflags &= ~TF_MASK;
-+	regs->flags &= ~TF_MASK;
- 	if (test_thread_flag(TIF_SINGLESTEP))
- 		ptrace_notify(SIGTRAP);
- 
- #if DEBUG_SIG
- 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
--		current->comm, current->pid, frame, regs->eip, frame->pretcode);
-+		current->comm, current->pid, frame, regs->ip, frame->pretcode);
- #endif
- 
- 	return 0;
-@@ -442,7 +453,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 	err |= __put_user(0, &frame->uc.uc_flags);
- 	err |= __put_user(0, &frame->uc.uc_link);
- 	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
--	err |= __put_user(sas_ss_flags(regs->esp),
-+	err |= __put_user(sas_ss_flags(regs->sp),
- 			  &frame->uc.uc_stack.ss_flags);
- 	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- 	err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
-@@ -452,13 +463,13 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 		goto give_sigsegv;
- 
- 	/* Set up to return from userspace.  */
--	restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
-+	restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
- 	if (ka->sa.sa_flags & SA_RESTORER)
- 		restorer = ka->sa.sa_restorer;
- 	err |= __put_user(restorer, &frame->pretcode);
- 	 
- 	/*
--	 * This is movl $,%eax ; int $0x80
-+	 * This is movl $,%ax ; int $0x80
- 	 *
- 	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
- 	 * reasons and because gdb uses it as a signature to notice
-@@ -472,16 +483,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 		goto give_sigsegv;
- 
- 	/* Set up registers for signal handler */
--	regs->esp = (unsigned long) frame;
--	regs->eip = (unsigned long) ka->sa.sa_handler;
--	regs->eax = (unsigned long) usig;
--	regs->edx = (unsigned long) &frame->info;
--	regs->ecx = (unsigned long) &frame->uc;
-+	regs->sp = (unsigned long) frame;
-+	regs->ip = (unsigned long) ka->sa.sa_handler;
-+	regs->ax = (unsigned long) usig;
-+	regs->dx = (unsigned long) &frame->info;
-+	regs->cx = (unsigned long) &frame->uc;
- 
--	regs->xds = __USER_DS;
--	regs->xes = __USER_DS;
--	regs->xss = __USER_DS;
--	regs->xcs = __USER_CS;
-+	regs->ds = __USER_DS;
-+	regs->es = __USER_DS;
-+	regs->ss = __USER_DS;
-+	regs->cs = __USER_CS;
- 
- 	/*
- 	 * Clear TF when entering the signal handler, but
-@@ -489,13 +500,13 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 	 * The tracer may want to single-step inside the
- 	 * handler too.
- 	 */
--	regs->eflags &= ~TF_MASK;
-+	regs->flags &= ~TF_MASK;
- 	if (test_thread_flag(TIF_SINGLESTEP))
- 		ptrace_notify(SIGTRAP);
- 
- #if DEBUG_SIG
- 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
--		current->comm, current->pid, frame, regs->eip, frame->pretcode);
-+		current->comm, current->pid, frame, regs->ip, frame->pretcode);
- #endif
- 
- 	return 0;
-@@ -516,35 +527,33 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
- 	int ret;
- 
- 	/* Are we from a system call? */
--	if (regs->orig_eax >= 0) {
-+	if (regs->orig_ax >= 0) {
- 		/* If so, check system call restarting.. */
--		switch (regs->eax) {
-+		switch (regs->ax) {
- 		        case -ERESTART_RESTARTBLOCK:
- 			case -ERESTARTNOHAND:
--				regs->eax = -EINTR;
-+				regs->ax = -EINTR;
- 				break;
- 
- 			case -ERESTARTSYS:
- 				if (!(ka->sa.sa_flags & SA_RESTART)) {
--					regs->eax = -EINTR;
-+					regs->ax = -EINTR;
- 					break;
- 				}
- 			/* fallthrough */
- 			case -ERESTARTNOINTR:
--				regs->eax = regs->orig_eax;
--				regs->eip -= 2;
-+				regs->ax = regs->orig_ax;
-+				regs->ip -= 2;
- 		}
- 	}
- 
- 	/*
--	 * If TF is set due to a debugger (PT_DTRACE), clear the TF flag so
--	 * that register information in the sigcontext is correct.
-+	 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
-+	 * flag so that register information in the sigcontext is correct.
- 	 */
--	if (unlikely(regs->eflags & TF_MASK)
--	    && likely(current->ptrace & PT_DTRACE)) {
--		current->ptrace &= ~PT_DTRACE;
--		regs->eflags &= ~TF_MASK;
--	}
-+	if (unlikely(regs->flags & X86_EFLAGS_TF) &&
-+	    likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
-+		regs->flags &= ~X86_EFLAGS_TF;
- 
- 	/* Set up the stack frame */
- 	if (ka->sa.sa_flags & SA_SIGINFO)
-@@ -569,7 +578,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-  * want to handle. Thus you cannot kill init even with a SIGKILL even by
-  * mistake.
-  */
--static void fastcall do_signal(struct pt_regs *regs)
-+static void do_signal(struct pt_regs *regs)
+-
+ void FPU_settagi(int stnr, int tag)
  {
- 	siginfo_t info;
- 	int signr;
-@@ -599,8 +608,8 @@ static void fastcall do_signal(struct pt_regs *regs)
- 		 * have been cleared if the watchpoint triggered
- 		 * inside the kernel.
- 		 */
--		if (unlikely(current->thread.debugreg[7]))
--			set_debugreg(current->thread.debugreg[7], 7);
-+		if (unlikely(current->thread.debugreg7))
-+			set_debugreg(current->thread.debugreg7, 7);
- 
- 		/* Whee!  Actually deliver the signal.  */
- 		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-@@ -616,19 +625,19 @@ static void fastcall do_signal(struct pt_regs *regs)
- 	}
- 
- 	/* Did we come from a system call? */
--	if (regs->orig_eax >= 0) {
-+	if (regs->orig_ax >= 0) {
- 		/* Restart the system call - no handlers present */
--		switch (regs->eax) {
-+		switch (regs->ax) {
- 		case -ERESTARTNOHAND:
- 		case -ERESTARTSYS:
- 		case -ERESTARTNOINTR:
--			regs->eax = regs->orig_eax;
--			regs->eip -= 2;
-+			regs->ax = regs->orig_ax;
-+			regs->ip -= 2;
- 			break;
+-  int regnr = stnr+top;
+-  regnr &= 7;
+-  fpu_tag_word &= ~(3 << (regnr*2));
+-  fpu_tag_word |= (tag & 3) << (regnr*2);
++	int regnr = stnr + top;
++	regnr &= 7;
++	fpu_tag_word &= ~(3 << (regnr * 2));
++	fpu_tag_word |= (tag & 3) << (regnr * 2);
+ }
  
- 		case -ERESTART_RESTARTBLOCK:
--			regs->eax = __NR_restart_syscall;
--			regs->eip -= 2;
-+			regs->ax = __NR_restart_syscall;
-+			regs->ip -= 2;
- 			break;
- 		}
- 	}
-@@ -651,13 +660,16 @@ void do_notify_resume(struct pt_regs *regs, void *_unused,
+-
+ void FPU_settag(int regnr, int tag)
  {
- 	/* Pending single-step? */
- 	if (thread_info_flags & _TIF_SINGLESTEP) {
--		regs->eflags |= TF_MASK;
-+		regs->flags |= TF_MASK;
- 		clear_thread_flag(TIF_SINGLESTEP);
- 	}
+-  regnr &= 7;
+-  fpu_tag_word &= ~(3 << (regnr*2));
+-  fpu_tag_word |= (tag & 3) << (regnr*2);
++	regnr &= 7;
++	fpu_tag_word &= ~(3 << (regnr * 2));
++	fpu_tag_word |= (tag & 3) << (regnr * 2);
+ }
  
- 	/* deal with pending signal delivery */
- 	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
- 		do_signal(regs);
+-
+ int FPU_Special(FPU_REG const *ptr)
+ {
+-  int exp = exponent(ptr);
+-
+-  if ( exp == EXP_BIAS+EXP_UNDER )
+-    return TW_Denormal;
+-  else if ( exp != EXP_BIAS+EXP_OVER )
+-    return TW_NaN;
+-  else if ( (ptr->sigh == 0x80000000) && (ptr->sigl == 0) )
+-    return TW_Infinity;
+-  return TW_NaN;
++	int exp = exponent(ptr);
 +
-+	if (thread_info_flags & _TIF_HRTICK_RESCHED)
-+		hrtick_resched();
- 	
- 	clear_thread_flag(TIF_IRET);
++	if (exp == EXP_BIAS + EXP_UNDER)
++		return TW_Denormal;
++	else if (exp != EXP_BIAS + EXP_OVER)
++		return TW_NaN;
++	else if ((ptr->sigh == 0x80000000) && (ptr->sigl == 0))
++		return TW_Infinity;
++	return TW_NaN;
  }
-diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
-index ab086b0..7347bb1 100644
---- a/arch/x86/kernel/signal_64.c
-+++ b/arch/x86/kernel/signal_64.c
-@@ -39,7 +39,7 @@ asmlinkage long
- sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- 		struct pt_regs *regs)
+ 
+-
+ int isNaN(FPU_REG const *ptr)
  {
--	return do_sigaltstack(uss, uoss, regs->rsp);
-+	return do_sigaltstack(uss, uoss, regs->sp);
+-  return ( (exponent(ptr) == EXP_BIAS+EXP_OVER)
+-	   && !((ptr->sigh == 0x80000000) && (ptr->sigl == 0)) );
++	return ((exponent(ptr) == EXP_BIAS + EXP_OVER)
++		&& !((ptr->sigh == 0x80000000) && (ptr->sigl == 0)));
  }
  
- 
-@@ -64,8 +64,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
- 
- #define COPY(x)		err |= __get_user(regs->x, &sc->x)
- 
--	COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx);
--	COPY(rdx); COPY(rcx); COPY(rip);
-+	COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
-+	COPY(dx); COPY(cx); COPY(ip);
- 	COPY(r8);
- 	COPY(r9);
- 	COPY(r10);
-@@ -86,9 +86,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
- 
- 	{
- 		unsigned int tmpflags;
--		err |= __get_user(tmpflags, &sc->eflags);
--		regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
--		regs->orig_rax = -1;		/* disable syscall checks */
-+		err |= __get_user(tmpflags, &sc->flags);
-+		regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5);
-+		regs->orig_ax = -1;		/* disable syscall checks */
- 	}
- 
- 	{
-@@ -108,7 +108,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
- 		}
- 	}
- 
--	err |= __get_user(*prax, &sc->rax);
-+	err |= __get_user(*prax, &sc->ax);
- 	return err;
- 
- badframe:
-@@ -119,9 +119,9 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+-
+ int FPU_empty_i(int stnr)
  {
- 	struct rt_sigframe __user *frame;
- 	sigset_t set;
--	unsigned long eax;
-+	unsigned long ax;
- 
--	frame = (struct rt_sigframe __user *)(regs->rsp - 8);
-+	frame = (struct rt_sigframe __user *)(regs->sp - 8);
- 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) {
- 		goto badframe;
- 	} 
-@@ -135,17 +135,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
- 	recalc_sigpending();
- 	spin_unlock_irq(&current->sighand->siglock);
- 	
--	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
-+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
- 		goto badframe;
- 
- #ifdef DEBUG_SIG
--	printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs->rip,regs->rsp,frame,eax);
-+	printk("%d sigreturn ip:%lx sp:%lx frame:%p ax:%lx\n",current->pid,regs->ip,regs->sp,frame,ax);
- #endif
- 
--	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->rsp) == -EFAULT)
-+	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
- 		goto badframe;
- 
--	return eax;
-+	return ax;
- 
- badframe:
- 	signal_fault(regs,frame,"sigreturn");
-@@ -165,14 +165,14 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
- 	err |= __put_user(0, &sc->gs);
- 	err |= __put_user(0, &sc->fs);
+-  int regnr = (top+stnr) & 7;
++	int regnr = (top + stnr) & 7;
  
--	err |= __put_user(regs->rdi, &sc->rdi);
--	err |= __put_user(regs->rsi, &sc->rsi);
--	err |= __put_user(regs->rbp, &sc->rbp);
--	err |= __put_user(regs->rsp, &sc->rsp);
--	err |= __put_user(regs->rbx, &sc->rbx);
--	err |= __put_user(regs->rdx, &sc->rdx);
--	err |= __put_user(regs->rcx, &sc->rcx);
--	err |= __put_user(regs->rax, &sc->rax);
-+	err |= __put_user(regs->di, &sc->di);
-+	err |= __put_user(regs->si, &sc->si);
-+	err |= __put_user(regs->bp, &sc->bp);
-+	err |= __put_user(regs->sp, &sc->sp);
-+	err |= __put_user(regs->bx, &sc->bx);
-+	err |= __put_user(regs->dx, &sc->dx);
-+	err |= __put_user(regs->cx, &sc->cx);
-+	err |= __put_user(regs->ax, &sc->ax);
- 	err |= __put_user(regs->r8, &sc->r8);
- 	err |= __put_user(regs->r9, &sc->r9);
- 	err |= __put_user(regs->r10, &sc->r10);
-@@ -183,8 +183,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
- 	err |= __put_user(regs->r15, &sc->r15);
- 	err |= __put_user(me->thread.trap_no, &sc->trapno);
- 	err |= __put_user(me->thread.error_code, &sc->err);
--	err |= __put_user(regs->rip, &sc->rip);
--	err |= __put_user(regs->eflags, &sc->eflags);
-+	err |= __put_user(regs->ip, &sc->ip);
-+	err |= __put_user(regs->flags, &sc->flags);
- 	err |= __put_user(mask, &sc->oldmask);
- 	err |= __put_user(me->thread.cr2, &sc->cr2);
+-  return ((fpu_tag_word >> (regnr*2)) & 3) == TAG_Empty;
++	return ((fpu_tag_word >> (regnr * 2)) & 3) == TAG_Empty;
+ }
  
-@@ -198,18 +198,18 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
- static void __user *
- get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
+-
+-int FPU_stackoverflow(FPU_REG **st_new_ptr)
++int FPU_stackoverflow(FPU_REG ** st_new_ptr)
  {
--	unsigned long rsp;
-+	unsigned long sp;
- 
- 	/* Default to using normal stack - redzone*/
--	rsp = regs->rsp - 128;
-+	sp = regs->sp - 128;
- 
- 	/* This is the X/Open sanctioned signal stack switching.  */
- 	if (ka->sa.sa_flags & SA_ONSTACK) {
--		if (sas_ss_flags(rsp) == 0)
--			rsp = current->sas_ss_sp + current->sas_ss_size;
-+		if (sas_ss_flags(sp) == 0)
-+			sp = current->sas_ss_sp + current->sas_ss_size;
- 	}
+-  *st_new_ptr = &st(-1);
++	*st_new_ptr = &st(-1);
  
--	return (void __user *)round_down(rsp - size, 16); 
-+	return (void __user *)round_down(sp - size, 16);
+-  return ((fpu_tag_word >> (((top - 1) & 7)*2)) & 3) != TAG_Empty;
++	return ((fpu_tag_word >> (((top - 1) & 7) * 2)) & 3) != TAG_Empty;
  }
  
- static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-@@ -246,7 +246,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 	err |= __put_user(0, &frame->uc.uc_flags);
- 	err |= __put_user(0, &frame->uc.uc_link);
- 	err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
--	err |= __put_user(sas_ss_flags(regs->rsp),
-+	err |= __put_user(sas_ss_flags(regs->sp),
- 			  &frame->uc.uc_stack.ss_flags);
- 	err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
- 	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
-@@ -271,21 +271,21 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 		goto give_sigsegv;
- 
- #ifdef DEBUG_SIG
--	printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax);
-+	printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax);
- #endif
- 
- 	/* Set up registers for signal handler */
--	regs->rdi = sig;
-+	regs->di = sig;
- 	/* In case the signal handler was declared without prototypes */ 
--	regs->rax = 0;	
-+	regs->ax = 0;
+-
+ void FPU_copy_to_regi(FPU_REG const *r, u_char tag, int stnr)
+ {
+-  reg_copy(r, &st(stnr));
+-  FPU_settagi(stnr, tag);
++	reg_copy(r, &st(stnr));
++	FPU_settagi(stnr, tag);
+ }
  
- 	/* This also works for non SA_SIGINFO handlers because they expect the
- 	   next argument after the signal number on the stack. */
--	regs->rsi = (unsigned long)&frame->info; 
--	regs->rdx = (unsigned long)&frame->uc; 
--	regs->rip = (unsigned long) ka->sa.sa_handler;
-+	regs->si = (unsigned long)&frame->info;
-+	regs->dx = (unsigned long)&frame->uc;
-+	regs->ip = (unsigned long) ka->sa.sa_handler;
+ void FPU_copy_to_reg1(FPU_REG const *r, u_char tag)
+ {
+-  reg_copy(r, &st(1));
+-  FPU_settagi(1, tag);
++	reg_copy(r, &st(1));
++	FPU_settagi(1, tag);
+ }
  
--	regs->rsp = (unsigned long)frame;
-+	regs->sp = (unsigned long)frame;
+ void FPU_copy_to_reg0(FPU_REG const *r, u_char tag)
+ {
+-  int regnr = top;
+-  regnr &= 7;
++	int regnr = top;
++	regnr &= 7;
  
- 	/* Set up the CS register to run signal handlers in 64-bit mode,
- 	   even if the handler happens to be interrupting 32-bit code. */
-@@ -295,12 +295,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 	   see include/asm-x86_64/uaccess.h for details. */
- 	set_fs(USER_DS);
+-  reg_copy(r, &st(0));
++	reg_copy(r, &st(0));
  
--	regs->eflags &= ~TF_MASK;
-+	regs->flags &= ~X86_EFLAGS_TF;
- 	if (test_thread_flag(TIF_SINGLESTEP))
- 		ptrace_notify(SIGTRAP);
- #ifdef DEBUG_SIG
- 	printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n",
--		current->comm, current->pid, frame, regs->rip, frame->pretcode);
-+		current->comm, current->pid, frame, regs->ip, frame->pretcode);
- #endif
+-  fpu_tag_word &= ~(3 << (regnr*2));
+-  fpu_tag_word |= (tag & 3) << (regnr*2);
++	fpu_tag_word &= ~(3 << (regnr * 2));
++	fpu_tag_word |= (tag & 3) << (regnr * 2);
+ }
+diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
+index 403cbde..ecd0668 100644
+--- a/arch/x86/math-emu/fpu_trig.c
++++ b/arch/x86/math-emu/fpu_trig.c
+@@ -15,11 +15,10 @@
+ #include "fpu_emu.h"
+ #include "status_w.h"
+ #include "control_w.h"
+-#include "reg_constant.h"	
++#include "reg_constant.h"
  
- 	return 0;
-@@ -321,44 +321,40 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
- 	int ret;
+ static void rem_kernel(unsigned long long st0, unsigned long long *y,
+-		       unsigned long long st1,
+-		       unsigned long long q, int n);
++		       unsigned long long st1, unsigned long long q, int n);
  
- #ifdef DEBUG_SIG
--	printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n",
-+	printk("handle_signal pid:%d sig:%lu ip:%lx sp:%lx regs=%p\n",
- 		current->pid, sig,
--		regs->rip, regs->rsp, regs);
-+		regs->ip, regs->sp, regs);
- #endif
+ #define BETTER_THAN_486
  
- 	/* Are we from a system call? */
--	if ((long)regs->orig_rax >= 0) {
-+	if ((long)regs->orig_ax >= 0) {
- 		/* If so, check system call restarting.. */
--		switch (regs->rax) {
-+		switch (regs->ax) {
- 		        case -ERESTART_RESTARTBLOCK:
- 			case -ERESTARTNOHAND:
--				regs->rax = -EINTR;
-+				regs->ax = -EINTR;
- 				break;
+@@ -33,788 +32,706 @@ static void rem_kernel(unsigned long long st0, unsigned long long *y,
+    precision of the result sometimes degrades to about 63.9 bits */
+ static int trig_arg(FPU_REG *st0_ptr, int even)
+ {
+-  FPU_REG tmp;
+-  u_char tmptag;
+-  unsigned long long q;
+-  int old_cw = control_word, saved_status = partial_status;
+-  int tag, st0_tag = TAG_Valid;
+-
+-  if ( exponent(st0_ptr) >= 63 )
+-    {
+-      partial_status |= SW_C2;     /* Reduction incomplete. */
+-      return -1;
+-    }
+-
+-  control_word &= ~CW_RC;
+-  control_word |= RC_CHOP;
+-
+-  setpositive(st0_ptr);
+-  tag = FPU_u_div(st0_ptr, &CONST_PI2, &tmp, PR_64_BITS | RC_CHOP | 0x3f,
+-		  SIGN_POS);
+-
+-  FPU_round_to_int(&tmp, tag);  /* Fortunately, this can't overflow
+-				   to 2^64 */
+-  q = significand(&tmp);
+-  if ( q )
+-    {
+-      rem_kernel(significand(st0_ptr),
+-		 &significand(&tmp),
+-		 significand(&CONST_PI2),
+-		 q, exponent(st0_ptr) - exponent(&CONST_PI2));
+-      setexponent16(&tmp, exponent(&CONST_PI2));
+-      st0_tag = FPU_normalize(&tmp);
+-      FPU_copy_to_reg0(&tmp, st0_tag);
+-    }
+-
+-  if ( (even && !(q & 1)) || (!even && (q & 1)) )
+-    {
+-      st0_tag = FPU_sub(REV|LOADED|TAG_Valid, (int)&CONST_PI2, FULL_PRECISION);
++	FPU_REG tmp;
++	u_char tmptag;
++	unsigned long long q;
++	int old_cw = control_word, saved_status = partial_status;
++	int tag, st0_tag = TAG_Valid;
++
++	if (exponent(st0_ptr) >= 63) {
++		partial_status |= SW_C2;	/* Reduction incomplete. */
++		return -1;
++	}
  
- 			case -ERESTARTSYS:
- 				if (!(ka->sa.sa_flags & SA_RESTART)) {
--					regs->rax = -EINTR;
-+					regs->ax = -EINTR;
- 					break;
- 				}
- 				/* fallthrough */
- 			case -ERESTARTNOINTR:
--				regs->rax = regs->orig_rax;
--				regs->rip -= 2;
-+				regs->ax = regs->orig_ax;
-+				regs->ip -= 2;
- 				break;
- 		}
+-#ifdef BETTER_THAN_486
+-      /* So far, the results are exact but based upon a 64 bit
+-	 precision approximation to pi/2. The technique used
+-	 now is equivalent to using an approximation to pi/2 which
+-	 is accurate to about 128 bits. */
+-      if ( (exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64) || (q > 1) )
+-	{
+-	  /* This code gives the effect of having pi/2 to better than
+-	     128 bits precision. */
+-
+-	  significand(&tmp) = q + 1;
+-	  setexponent16(&tmp, 63);
+-	  FPU_normalize(&tmp);
+-	  tmptag =
+-	    FPU_u_mul(&CONST_PI2extra, &tmp, &tmp, FULL_PRECISION, SIGN_POS,
+-		      exponent(&CONST_PI2extra) + exponent(&tmp));
+-	  setsign(&tmp, getsign(&CONST_PI2extra));
+-	  st0_tag = FPU_add(&tmp, tmptag, 0, FULL_PRECISION);
+-	  if ( signnegative(st0_ptr) )
+-	    {
+-	      /* CONST_PI2extra is negative, so the result of the addition
+-		 can be negative. This means that the argument is actually
+-		 in a different quadrant. The correction is always < pi/2,
+-		 so it can't overflow into yet another quadrant. */
+-	      setpositive(st0_ptr);
+-	      q++;
+-	    }
++	control_word &= ~CW_RC;
++	control_word |= RC_CHOP;
++
++	setpositive(st0_ptr);
++	tag = FPU_u_div(st0_ptr, &CONST_PI2, &tmp, PR_64_BITS | RC_CHOP | 0x3f,
++			SIGN_POS);
++
++	FPU_round_to_int(&tmp, tag);	/* Fortunately, this can't overflow
++					   to 2^64 */
++	q = significand(&tmp);
++	if (q) {
++		rem_kernel(significand(st0_ptr),
++			   &significand(&tmp),
++			   significand(&CONST_PI2),
++			   q, exponent(st0_ptr) - exponent(&CONST_PI2));
++		setexponent16(&tmp, exponent(&CONST_PI2));
++		st0_tag = FPU_normalize(&tmp);
++		FPU_copy_to_reg0(&tmp, st0_tag);
+ 	}
++
++	if ((even && !(q & 1)) || (!even && (q & 1))) {
++		st0_tag =
++		    FPU_sub(REV | LOADED | TAG_Valid, (int)&CONST_PI2,
++			    FULL_PRECISION);
++
++#ifdef BETTER_THAN_486
++		/* So far, the results are exact but based upon a 64 bit
++		   precision approximation to pi/2. The technique used
++		   now is equivalent to using an approximation to pi/2 which
++		   is accurate to about 128 bits. */
++		if ((exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64)
++		    || (q > 1)) {
++			/* This code gives the effect of having pi/2 to better than
++			   128 bits precision. */
++
++			significand(&tmp) = q + 1;
++			setexponent16(&tmp, 63);
++			FPU_normalize(&tmp);
++			tmptag =
++			    FPU_u_mul(&CONST_PI2extra, &tmp, &tmp,
++				      FULL_PRECISION, SIGN_POS,
++				      exponent(&CONST_PI2extra) +
++				      exponent(&tmp));
++			setsign(&tmp, getsign(&CONST_PI2extra));
++			st0_tag = FPU_add(&tmp, tmptag, 0, FULL_PRECISION);
++			if (signnegative(st0_ptr)) {
++				/* CONST_PI2extra is negative, so the result of the addition
++				   can be negative. This means that the argument is actually
++				   in a different quadrant. The correction is always < pi/2,
++				   so it can't overflow into yet another quadrant. */
++				setpositive(st0_ptr);
++				q++;
++			}
++		}
+ #endif /* BETTER_THAN_486 */
+-    }
++	}
+ #ifdef BETTER_THAN_486
+-  else
+-    {
+-      /* So far, the results are exact but based upon a 64 bit
+-	 precision approximation to pi/2. The technique used
+-	 now is equivalent to using an approximation to pi/2 which
+-	 is accurate to about 128 bits. */
+-      if ( ((q > 0) && (exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64))
+-	   || (q > 1) )
+-	{
+-	  /* This code gives the effect of having p/2 to better than
+-	     128 bits precision. */
+-
+-	  significand(&tmp) = q;
+-	  setexponent16(&tmp, 63);
+-	  FPU_normalize(&tmp);         /* This must return TAG_Valid */
+-	  tmptag = FPU_u_mul(&CONST_PI2extra, &tmp, &tmp, FULL_PRECISION,
+-			     SIGN_POS,
+-			     exponent(&CONST_PI2extra) + exponent(&tmp));
+-	  setsign(&tmp, getsign(&CONST_PI2extra));
+-	  st0_tag = FPU_sub(LOADED|(tmptag & 0x0f), (int)&tmp,
+-			    FULL_PRECISION);
+-	  if ( (exponent(st0_ptr) == exponent(&CONST_PI2)) &&
+-	      ((st0_ptr->sigh > CONST_PI2.sigh)
+-	       || ((st0_ptr->sigh == CONST_PI2.sigh)
+-		   && (st0_ptr->sigl > CONST_PI2.sigl))) )
+-	    {
+-	      /* CONST_PI2extra is negative, so the result of the
+-		 subtraction can be larger than pi/2. This means
+-		 that the argument is actually in a different quadrant.
+-		 The correction is always < pi/2, so it can't overflow
+-		 into yet another quadrant. */
+-	      st0_tag = FPU_sub(REV|LOADED|TAG_Valid, (int)&CONST_PI2,
+-				FULL_PRECISION);
+-	      q++;
+-	    }
++	else {
++		/* So far, the results are exact but based upon a 64 bit
++		   precision approximation to pi/2. The technique used
++		   now is equivalent to using an approximation to pi/2 which
++		   is accurate to about 128 bits. */
++		if (((q > 0)
++		     && (exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64))
++		    || (q > 1)) {
++			/* This code gives the effect of having p/2 to better than
++			   128 bits precision. */
++
++			significand(&tmp) = q;
++			setexponent16(&tmp, 63);
++			FPU_normalize(&tmp);	/* This must return TAG_Valid */
++			tmptag =
++			    FPU_u_mul(&CONST_PI2extra, &tmp, &tmp,
++				      FULL_PRECISION, SIGN_POS,
++				      exponent(&CONST_PI2extra) +
++				      exponent(&tmp));
++			setsign(&tmp, getsign(&CONST_PI2extra));
++			st0_tag = FPU_sub(LOADED | (tmptag & 0x0f), (int)&tmp,
++					  FULL_PRECISION);
++			if ((exponent(st0_ptr) == exponent(&CONST_PI2)) &&
++			    ((st0_ptr->sigh > CONST_PI2.sigh)
++			     || ((st0_ptr->sigh == CONST_PI2.sigh)
++				 && (st0_ptr->sigl > CONST_PI2.sigl)))) {
++				/* CONST_PI2extra is negative, so the result of the
++				   subtraction can be larger than pi/2. This means
++				   that the argument is actually in a different quadrant.
++				   The correction is always < pi/2, so it can't overflow
++				   into yet another quadrant. */
++				st0_tag =
++				    FPU_sub(REV | LOADED | TAG_Valid,
++					    (int)&CONST_PI2, FULL_PRECISION);
++				q++;
++			}
++		}
  	}
+-    }
+ #endif /* BETTER_THAN_486 */
  
- 	/*
--	 * If TF is set due to a debugger (PT_DTRACE), clear the TF
--	 * flag so that register information in the sigcontext is
--	 * correct.
-+	 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
-+	 * flag so that register information in the sigcontext is correct.
- 	 */
--	if (unlikely(regs->eflags & TF_MASK)) {
--		if (likely(current->ptrace & PT_DTRACE)) {
--			current->ptrace &= ~PT_DTRACE;
--			regs->eflags &= ~TF_MASK;
--		}
--	}
-+	if (unlikely(regs->flags & X86_EFLAGS_TF) &&
-+	    likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
-+		regs->flags &= ~X86_EFLAGS_TF;
+-  FPU_settag0(st0_tag);
+-  control_word = old_cw;
+-  partial_status = saved_status & ~SW_C2;     /* Reduction complete. */
++	FPU_settag0(st0_tag);
++	control_word = old_cw;
++	partial_status = saved_status & ~SW_C2;	/* Reduction complete. */
  
- #ifdef CONFIG_IA32_EMULATION
- 	if (test_thread_flag(TIF_IA32)) {
-@@ -430,21 +426,21 @@ static void do_signal(struct pt_regs *regs)
- 	}
+-  return (q & 3) | even;
++	return (q & 3) | even;
+ }
  
- 	/* Did we come from a system call? */
--	if ((long)regs->orig_rax >= 0) {
-+	if ((long)regs->orig_ax >= 0) {
- 		/* Restart the system call - no handlers present */
--		long res = regs->rax;
-+		long res = regs->ax;
- 		switch (res) {
- 		case -ERESTARTNOHAND:
- 		case -ERESTARTSYS:
- 		case -ERESTARTNOINTR:
--			regs->rax = regs->orig_rax;
--			regs->rip -= 2;
-+			regs->ax = regs->orig_ax;
-+			regs->ip -= 2;
- 			break;
- 		case -ERESTART_RESTARTBLOCK:
--			regs->rax = test_thread_flag(TIF_IA32) ?
-+			regs->ax = test_thread_flag(TIF_IA32) ?
- 					__NR_ia32_restart_syscall :
- 					__NR_restart_syscall;
--			regs->rip -= 2;
-+			regs->ip -= 2;
- 			break;
- 		}
- 	}
-@@ -461,13 +457,13 @@ void
- do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
+-
+ /* Convert a long to register */
+ static void convert_l2reg(long const *arg, int deststnr)
  {
- #ifdef DEBUG_SIG
--	printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%p pending:%x\n",
--	       thread_info_flags, regs->rip, regs->rsp, __builtin_return_address(0),signal_pending(current)); 
-+	printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n",
-+	       thread_info_flags, regs->ip, regs->sp, __builtin_return_address(0),signal_pending(current));
- #endif
- 	       
- 	/* Pending single-step? */
- 	if (thread_info_flags & _TIF_SINGLESTEP) {
--		regs->eflags |= TF_MASK;
-+		regs->flags |= X86_EFLAGS_TF;
- 		clear_thread_flag(TIF_SINGLESTEP);
- 	}
+-  int tag;
+-  long num = *arg;
+-  u_char sign;
+-  FPU_REG *dest = &st(deststnr);
+-
+-  if (num == 0)
+-    {
+-      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
+-      return;
+-    }
+-
+-  if (num > 0)
+-    { sign = SIGN_POS; }
+-  else
+-    { num = -num; sign = SIGN_NEG; }
+-
+-  dest->sigh = num;
+-  dest->sigl = 0;
+-  setexponent16(dest, 31);
+-  tag = FPU_normalize(dest);
+-  FPU_settagi(deststnr, tag);
+-  setsign(dest, sign);
+-  return;
+-}
++	int tag;
++	long num = *arg;
++	u_char sign;
++	FPU_REG *dest = &st(deststnr);
  
-@@ -480,14 +476,20 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
- 	/* deal with pending signal delivery */
- 	if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
- 		do_signal(regs);
++	if (num == 0) {
++		FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
++		return;
++	}
 +
-+	if (thread_info_flags & _TIF_HRTICK_RESCHED)
-+		hrtick_resched();
++	if (num > 0) {
++		sign = SIGN_POS;
++	} else {
++		num = -num;
++		sign = SIGN_NEG;
++	}
++
++	dest->sigh = num;
++	dest->sigl = 0;
++	setexponent16(dest, 31);
++	tag = FPU_normalize(dest);
++	FPU_settagi(deststnr, tag);
++	setsign(dest, sign);
++	return;
++}
+ 
+ static void single_arg_error(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+-  if ( st0_tag == TAG_Empty )
+-    FPU_stack_underflow();  /* Puts a QNaN in st(0) */
+-  else if ( st0_tag == TW_NaN )
+-    real_1op_NaN(st0_ptr);       /* return with a NaN in st(0) */
++	if (st0_tag == TAG_Empty)
++		FPU_stack_underflow();	/* Puts a QNaN in st(0) */
++	else if (st0_tag == TW_NaN)
++		real_1op_NaN(st0_ptr);	/* return with a NaN in st(0) */
+ #ifdef PARANOID
+-  else
+-    EXCEPTION(EX_INTERNAL|0x0112);
++	else
++		EXCEPTION(EX_INTERNAL | 0x0112);
+ #endif /* PARANOID */
  }
  
- void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
- { 
- 	struct task_struct *me = current; 
--	if (show_unhandled_signals && printk_ratelimit())
--		printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n",
--	       me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax); 
-+	if (show_unhandled_signals && printk_ratelimit()) {
-+		printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
-+	       me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax);
-+		print_vma_addr(" in ", regs->ip);
-+		printk("\n");
+-
+ static void single_arg_2_error(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+-  int isNaN;
+-
+-  switch ( st0_tag )
+-    {
+-    case TW_NaN:
+-      isNaN = (exponent(st0_ptr) == EXP_OVER) && (st0_ptr->sigh & 0x80000000);
+-      if ( isNaN && !(st0_ptr->sigh & 0x40000000) )   /* Signaling ? */
+-	{
+-	  EXCEPTION(EX_Invalid);
+-	  if ( control_word & CW_Invalid )
+-	    {
+-	      /* The masked response */
+-	      /* Convert to a QNaN */
+-	      st0_ptr->sigh |= 0x40000000;
+-	      push();
+-	      FPU_copy_to_reg0(st0_ptr, TAG_Special);
+-	    }
+-	}
+-      else if ( isNaN )
+-	{
+-	  /* A QNaN */
+-	  push();
+-	  FPU_copy_to_reg0(st0_ptr, TAG_Special);
+-	}
+-      else
+-	{
+-	  /* pseudoNaN or other unsupported */
+-	  EXCEPTION(EX_Invalid);
+-	  if ( control_word & CW_Invalid )
+-	    {
+-	      /* The masked response */
+-	      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
+-	      push();
+-	      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
+-	    }
+-	}
+-      break;              /* return with a NaN in st(0) */
++	int isNaN;
++
++	switch (st0_tag) {
++	case TW_NaN:
++		isNaN = (exponent(st0_ptr) == EXP_OVER)
++		    && (st0_ptr->sigh & 0x80000000);
++		if (isNaN && !(st0_ptr->sigh & 0x40000000)) {	/* Signaling ? */
++			EXCEPTION(EX_Invalid);
++			if (control_word & CW_Invalid) {
++				/* The masked response */
++				/* Convert to a QNaN */
++				st0_ptr->sigh |= 0x40000000;
++				push();
++				FPU_copy_to_reg0(st0_ptr, TAG_Special);
++			}
++		} else if (isNaN) {
++			/* A QNaN */
++			push();
++			FPU_copy_to_reg0(st0_ptr, TAG_Special);
++		} else {
++			/* pseudoNaN or other unsupported */
++			EXCEPTION(EX_Invalid);
++			if (control_word & CW_Invalid) {
++				/* The masked response */
++				FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
++				push();
++				FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
++			}
++		}
++		break;		/* return with a NaN in st(0) */
+ #ifdef PARANOID
+-    default:
+-      EXCEPTION(EX_INTERNAL|0x0112);
++	default:
++		EXCEPTION(EX_INTERNAL | 0x0112);
+ #endif /* PARANOID */
+-    }
 +	}
- 
- 	force_sig(SIGSEGV, me); 
- } 
-diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
-index fcaa026..dc0cde9 100644
---- a/arch/x86/kernel/smp_32.c
-+++ b/arch/x86/kernel/smp_32.c
-@@ -159,7 +159,7 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector)
- 	apic_write_around(APIC_ICR, cfg);
  }
  
--void fastcall send_IPI_self(int vector)
-+void send_IPI_self(int vector)
- {
- 	__send_IPI_shortcut(APIC_DEST_SELF, vector);
- }
-@@ -223,7 +223,7 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
- 	 */ 
+-
+ /*---------------------------------------------------------------------------*/
  
- 	local_irq_save(flags);
--	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
-+	for_each_possible_cpu(query_cpu) {
- 		if (cpu_isset(query_cpu, mask)) {
- 			__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
- 					      vector);
-@@ -256,13 +256,14 @@ static DEFINE_SPINLOCK(tlbstate_lock);
-  * We need to reload %cr3 since the page tables may be going
-  * away from under us..
-  */
--void leave_mm(unsigned long cpu)
-+void leave_mm(int cpu)
+ static void f2xm1(FPU_REG *st0_ptr, u_char tag)
  {
- 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
- 		BUG();
- 	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
- 	load_cr3(swapper_pg_dir);
- }
-+EXPORT_SYMBOL_GPL(leave_mm);
- 
- /*
-  *
-@@ -310,7 +311,7 @@ void leave_mm(unsigned long cpu)
-  * 2) Leave the mm if we are in the lazy tlb mode.
-  */
+-  FPU_REG a;
++	FPU_REG a;
  
--fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
-+void smp_invalidate_interrupt(struct pt_regs *regs)
- {
- 	unsigned long cpu;
+-  clear_C1();
++	clear_C1();
  
-@@ -638,13 +639,13 @@ static void native_smp_send_stop(void)
-  * all the work is done automatically when
-  * we return from the interrupt.
-  */
--fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
-+void smp_reschedule_interrupt(struct pt_regs *regs)
- {
- 	ack_APIC_irq();
- 	__get_cpu_var(irq_stat).irq_resched_count++;
- }
+-  if ( tag == TAG_Valid )
+-    {
+-      /* For an 80486 FPU, the result is undefined if the arg is >= 1.0 */
+-      if ( exponent(st0_ptr) < 0 )
+-	{
+-	denormal_arg:
++	if (tag == TAG_Valid) {
++		/* For an 80486 FPU, the result is undefined if the arg is >= 1.0 */
++		if (exponent(st0_ptr) < 0) {
++		      denormal_arg:
  
--fastcall void smp_call_function_interrupt(struct pt_regs *regs)
-+void smp_call_function_interrupt(struct pt_regs *regs)
- {
- 	void (*func) (void *info) = call_data->func;
- 	void *info = call_data->info;
-@@ -675,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id)
- {
- 	int i;
+-	  FPU_to_exp16(st0_ptr, &a);
++			FPU_to_exp16(st0_ptr, &a);
  
--	for (i = 0; i < NR_CPUS; i++) {
-+	for_each_possible_cpu(i) {
- 		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
- 			return i;
+-	  /* poly_2xm1(x) requires 0 < st(0) < 1. */
+-	  poly_2xm1(getsign(st0_ptr), &a, st0_ptr);
++			/* poly_2xm1(x) requires 0 < st(0) < 1. */
++			poly_2xm1(getsign(st0_ptr), &a, st0_ptr);
++		}
++		set_precision_flag_up();	/* 80486 appears to always do this */
++		return;
  	}
-diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
-index 03fa6ed..2fd74b0 100644
---- a/arch/x86/kernel/smp_64.c
-+++ b/arch/x86/kernel/smp_64.c
-@@ -29,7 +29,7 @@
- #include <asm/idle.h>
- 
- /*
-- *	Smarter SMP flushing macros. 
-+ *	Smarter SMP flushing macros.
-  *		c/o Linus Torvalds.
-  *
-  *	These mean you can really definitely utterly forget about
-@@ -37,15 +37,15 @@
-  *
-  *	Optimizations Manfred Spraul <manfred at colorfullife.com>
-  *
-- * 	More scalable flush, from Andi Kleen
-+ *	More scalable flush, from Andi Kleen
-  *
-- * 	To avoid global state use 8 different call vectors.
-- * 	Each CPU uses a specific vector to trigger flushes on other
-- * 	CPUs. Depending on the received vector the target CPUs look into
-+ *	To avoid global state use 8 different call vectors.
-+ *	Each CPU uses a specific vector to trigger flushes on other
-+ *	CPUs. Depending on the received vector the target CPUs look into
-  *	the right per cpu variable for the flush data.
-  *
-- * 	With more than 8 CPUs they are hashed to the 8 available
-- * 	vectors. The limited global vector space forces us to this right now.
-+ *	With more than 8 CPUs they are hashed to the 8 available
-+ *	vectors. The limited global vector space forces us to this right now.
-  *	In future when interrupts are split into per CPU domains this could be
-  *	fixed, at the cost of triggering multiple IPIs in some cases.
-  */
-@@ -55,7 +55,6 @@ union smp_flush_state {
- 		cpumask_t flush_cpumask;
- 		struct mm_struct *flush_mm;
- 		unsigned long flush_va;
--#define FLUSH_ALL	-1ULL
- 		spinlock_t tlbstate_lock;
- 	};
- 	char pad[SMP_CACHE_BYTES];
-@@ -67,16 +66,17 @@ union smp_flush_state {
- static DEFINE_PER_CPU(union smp_flush_state, flush_state);
+-      set_precision_flag_up();   /* 80486 appears to always do this */
+-      return;
+-    }
  
- /*
-- * We cannot call mmdrop() because we are in interrupt context, 
-+ * We cannot call mmdrop() because we are in interrupt context,
-  * instead update mm->cpu_vm_mask.
-  */
--static inline void leave_mm(int cpu)
-+void leave_mm(int cpu)
- {
- 	if (read_pda(mmu_state) == TLBSTATE_OK)
- 		BUG();
- 	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
- 	load_cr3(swapper_pg_dir);
- }
-+EXPORT_SYMBOL_GPL(leave_mm);
+-  if ( tag == TAG_Zero )
+-    return;
++	if (tag == TAG_Zero)
++		return;
  
- /*
-  *
-@@ -85,25 +85,25 @@ static inline void leave_mm(int cpu)
-  * 1) switch_mm() either 1a) or 1b)
-  * 1a) thread switch to a different mm
-  * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
-- * 	Stop ipi delivery for the old mm. This is not synchronized with
-- * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
-- * 	for the wrong mm, and in the worst case we perform a superfluous
-- * 	tlb flush.
-+ *	Stop ipi delivery for the old mm. This is not synchronized with
-+ *	the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ *	for the wrong mm, and in the worst case we perform a superfluous
-+ *	tlb flush.
-  * 1a2) set cpu mmu_state to TLBSTATE_OK
-- * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ *	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-  *	was in lazy tlb mode.
-  * 1a3) update cpu active_mm
-- * 	Now cpu0 accepts tlb flushes for the new mm.
-+ *	Now cpu0 accepts tlb flushes for the new mm.
-  * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
-- * 	Now the other cpus will send tlb flush ipis.
-+ *	Now the other cpus will send tlb flush ipis.
-  * 1a4) change cr3.
-  * 1b) thread switch without mm change
-  *	cpu active_mm is correct, cpu0 already handles
-  *	flush ipis.
-  * 1b1) set cpu mmu_state to TLBSTATE_OK
-  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-- * 	Atomically set the bit [other cpus will start sending flush ipis],
-- * 	and test the bit.
-+ *	Atomically set the bit [other cpus will start sending flush ipis],
-+ *	and test the bit.
-  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-  * 2) switch %%esp, ie current
-  *
-@@ -137,12 +137,12 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
- 	 * orig_rax contains the negated interrupt vector.
- 	 * Use that to determine where the sender put the data.
- 	 */
--	sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
-+	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
- 	f = &per_cpu(flush_state, sender);
+-  if ( tag == TAG_Special )
+-    tag = FPU_Special(st0_ptr);
++	if (tag == TAG_Special)
++		tag = FPU_Special(st0_ptr);
  
- 	if (!cpu_isset(cpu, f->flush_cpumask))
- 		goto out;
--		/* 
-+		/*
- 		 * This was a BUG() but until someone can quote me the
- 		 * line from the intel manual that guarantees an IPI to
- 		 * multiple CPUs is retried _only_ on the erroring CPUs
-@@ -150,10 +150,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
- 		 *
- 		 * BUG();
- 		 */
--		 
-+
- 	if (f->flush_mm == read_pda(active_mm)) {
- 		if (read_pda(mmu_state) == TLBSTATE_OK) {
--			if (f->flush_va == FLUSH_ALL)
-+			if (f->flush_va == TLB_FLUSH_ALL)
- 				local_flush_tlb();
- 			else
- 				__flush_tlb_one(f->flush_va);
-@@ -166,19 +166,22 @@ out:
- 	add_pda(irq_tlb_count, 1);
+-  switch ( tag )
+-    {
+-    case TW_Denormal:
+-      if ( denormal_operand() < 0 )
+-	return;
+-      goto denormal_arg;
+-    case TW_Infinity:
+-      if ( signnegative(st0_ptr) )
+-	{
+-	  /* -infinity gives -1 (p16-10) */
+-	  FPU_copy_to_reg0(&CONST_1, TAG_Valid);
+-	  setnegative(st0_ptr);
++	switch (tag) {
++	case TW_Denormal:
++		if (denormal_operand() < 0)
++			return;
++		goto denormal_arg;
++	case TW_Infinity:
++		if (signnegative(st0_ptr)) {
++			/* -infinity gives -1 (p16-10) */
++			FPU_copy_to_reg0(&CONST_1, TAG_Valid);
++			setnegative(st0_ptr);
++		}
++		return;
++	default:
++		single_arg_error(st0_ptr, tag);
+ 	}
+-      return;
+-    default:
+-      single_arg_error(st0_ptr, tag);
+-    }
  }
  
--static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
--						unsigned long va)
-+void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-+			     unsigned long va)
- {
- 	int sender;
- 	union smp_flush_state *f;
-+	cpumask_t cpumask = *cpumaskp;
- 
- 	/* Caller has disabled preemption */
- 	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
- 	f = &per_cpu(flush_state, sender);
- 
--	/* Could avoid this lock when
--	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
--	   probably not worth checking this for a cache-hot lock. */
-+	/*
-+	 * Could avoid this lock when
-+	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-+	 * probably not worth checking this for a cache-hot lock.
-+	 */
- 	spin_lock(&f->tlbstate_lock);
- 
- 	f->flush_mm = mm;
-@@ -202,14 +205,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
- int __cpuinit init_smp_flush(void)
+-
+ static void fptan(FPU_REG *st0_ptr, u_char st0_tag)
  {
- 	int i;
+-  FPU_REG *st_new_ptr;
+-  int q;
+-  u_char arg_sign = getsign(st0_ptr);
+-
+-  /* Stack underflow has higher priority */
+-  if ( st0_tag == TAG_Empty )
+-    {
+-      FPU_stack_underflow();  /* Puts a QNaN in st(0) */
+-      if ( control_word & CW_Invalid )
+-	{
+-	  st_new_ptr = &st(-1);
+-	  push();
+-	  FPU_stack_underflow();  /* Puts a QNaN in the new st(0) */
++	FPU_REG *st_new_ptr;
++	int q;
++	u_char arg_sign = getsign(st0_ptr);
 +
- 	for_each_cpu_mask(i, cpu_possible_map) {
- 		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
++	/* Stack underflow has higher priority */
++	if (st0_tag == TAG_Empty) {
++		FPU_stack_underflow();	/* Puts a QNaN in st(0) */
++		if (control_word & CW_Invalid) {
++			st_new_ptr = &st(-1);
++			push();
++			FPU_stack_underflow();	/* Puts a QNaN in the new st(0) */
++		}
++		return;
  	}
- 	return 0;
- }
+-      return;
+-    }
 -
- core_initcall(init_smp_flush);
--	
+-  if ( STACK_OVERFLOW )
+-    { FPU_stack_overflow(); return; }
+-
+-  if ( st0_tag == TAG_Valid )
+-    {
+-      if ( exponent(st0_ptr) > -40 )
+-	{
+-	  if ( (q = trig_arg(st0_ptr, 0)) == -1 )
+-	    {
+-	      /* Operand is out of range */
+-	      return;
+-	    }
+-
+-	  poly_tan(st0_ptr);
+-	  setsign(st0_ptr, (q & 1) ^ (arg_sign != 0));
+-	  set_precision_flag_up();  /* We do not really know if up or down */
 +
- void flush_tlb_current_task(void)
- {
- 	struct mm_struct *mm = current->mm;
-@@ -221,10 +224,9 @@ void flush_tlb_current_task(void)
++	if (STACK_OVERFLOW) {
++		FPU_stack_overflow();
++		return;
+ 	}
+-      else
+-	{
+-	  /* For a small arg, the result == the argument */
+-	  /* Underflow may happen */
  
- 	local_flush_tlb();
- 	if (!cpus_empty(cpu_mask))
--		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
- 	preempt_enable();
- }
--EXPORT_SYMBOL(flush_tlb_current_task);
+-	denormal_arg:
++	if (st0_tag == TAG_Valid) {
++		if (exponent(st0_ptr) > -40) {
++			if ((q = trig_arg(st0_ptr, 0)) == -1) {
++				/* Operand is out of range */
++				return;
++			}
++
++			poly_tan(st0_ptr);
++			setsign(st0_ptr, (q & 1) ^ (arg_sign != 0));
++			set_precision_flag_up();	/* We do not really know if up or down */
++		} else {
++			/* For a small arg, the result == the argument */
++			/* Underflow may happen */
++
++		      denormal_arg:
++
++			FPU_to_exp16(st0_ptr, st0_ptr);
  
- void flush_tlb_mm (struct mm_struct * mm)
- {
-@@ -241,11 +243,10 @@ void flush_tlb_mm (struct mm_struct * mm)
- 			leave_mm(smp_processor_id());
+-	  FPU_to_exp16(st0_ptr, st0_ptr);
+-      
+-	  st0_tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
+-	  FPU_settag0(st0_tag);
++			st0_tag =
++			    FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
++			FPU_settag0(st0_tag);
++		}
++		push();
++		FPU_copy_to_reg0(&CONST_1, TAG_Valid);
++		return;
  	}
- 	if (!cpus_empty(cpu_mask))
--		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+		flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
- 
- 	preempt_enable();
- }
--EXPORT_SYMBOL(flush_tlb_mm);
+-      push();
+-      FPU_copy_to_reg0(&CONST_1, TAG_Valid);
+-      return;
+-    }
+-
+-  if ( st0_tag == TAG_Zero )
+-    {
+-      push();
+-      FPU_copy_to_reg0(&CONST_1, TAG_Valid);
+-      setcc(0);
+-      return;
+-    }
+-
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-
+-  if ( st0_tag == TW_Denormal )
+-    {
+-      if ( denormal_operand() < 0 )
+-	return;
  
- void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
- {
-@@ -259,8 +260,8 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
- 	if (current->active_mm == mm) {
- 		if(current->mm)
- 			__flush_tlb_one(va);
--		 else
--		 	leave_mm(smp_processor_id());
-+		else
-+			leave_mm(smp_processor_id());
+-      goto denormal_arg;
+-    }
+-
+-  if ( st0_tag == TW_Infinity )
+-    {
+-      /* The 80486 treats infinity as an invalid operand */
+-      if ( arith_invalid(0) >= 0 )
+-	{
+-	  st_new_ptr = &st(-1);
+-	  push();
+-	  arith_invalid(0);
++	if (st0_tag == TAG_Zero) {
++		push();
++		FPU_copy_to_reg0(&CONST_1, TAG_Valid);
++		setcc(0);
++		return;
++	}
++
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
++
++	if (st0_tag == TW_Denormal) {
++		if (denormal_operand() < 0)
++			return;
++
++		goto denormal_arg;
  	}
+-      return;
+-    }
  
- 	if (!cpus_empty(cpu_mask))
-@@ -268,7 +269,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
+-  single_arg_2_error(st0_ptr, st0_tag);
+-}
++	if (st0_tag == TW_Infinity) {
++		/* The 80486 treats infinity as an invalid operand */
++		if (arith_invalid(0) >= 0) {
++			st_new_ptr = &st(-1);
++			push();
++			arith_invalid(0);
++		}
++		return;
++	}
  
- 	preempt_enable();
- }
--EXPORT_SYMBOL(flush_tlb_page);
++	single_arg_2_error(st0_ptr, st0_tag);
++}
  
- static void do_flush_tlb_all(void* info)
+ static void fxtract(FPU_REG *st0_ptr, u_char st0_tag)
  {
-@@ -325,11 +325,9 @@ void unlock_ipi_call_lock(void)
-  * this function sends a 'generic call function' IPI to all other CPU
-  * of the system defined in the mask.
-  */
+-  FPU_REG *st_new_ptr;
+-  u_char sign;
+-  register FPU_REG *st1_ptr = st0_ptr;  /* anticipate */
 -
--static int
--__smp_call_function_mask(cpumask_t mask,
--			 void (*func)(void *), void *info,
--			 int wait)
-+static int __smp_call_function_mask(cpumask_t mask,
-+				    void (*func)(void *), void *info,
-+				    int wait)
- {
- 	struct call_data_struct data;
- 	cpumask_t allbutself;
-@@ -417,11 +415,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
-  */
- 
- int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
--	int nonatomic, int wait)
-+			      int nonatomic, int wait)
- {
- 	/* prevent preemption and reschedule on another processor */
--	int ret;
--	int me = get_cpu();
-+	int ret, me = get_cpu();
+-  if ( STACK_OVERFLOW )
+-    {  FPU_stack_overflow(); return; }
+-
+-  clear_C1();
+-
+-  if ( st0_tag == TAG_Valid )
+-    {
+-      long e;
+-
+-      push();
+-      sign = getsign(st1_ptr);
+-      reg_copy(st1_ptr, st_new_ptr);
+-      setexponent16(st_new_ptr, exponent(st_new_ptr));
+-
+-    denormal_arg:
+-
+-      e = exponent16(st_new_ptr);
+-      convert_l2reg(&e, 1);
+-      setexponentpos(st_new_ptr, 0);
+-      setsign(st_new_ptr, sign);
+-      FPU_settag0(TAG_Valid);       /* Needed if arg was a denormal */
+-      return;
+-    }
+-  else if ( st0_tag == TAG_Zero )
+-    {
+-      sign = getsign(st0_ptr);
+-
+-      if ( FPU_divide_by_zero(0, SIGN_NEG) < 0 )
+-	return;
++	FPU_REG *st_new_ptr;
++	u_char sign;
++	register FPU_REG *st1_ptr = st0_ptr;	/* anticipate */
  
- 	/* Can deadlock when called with interrupts disabled */
- 	WARN_ON(irqs_disabled());
-@@ -471,9 +468,9 @@ static void stop_this_cpu(void *dummy)
- 	 */
- 	cpu_clear(smp_processor_id(), cpu_online_map);
- 	disable_local_APIC();
--	for (;;) 
-+	for (;;)
- 		halt();
--} 
-+}
+-      push();
+-      FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
+-      setsign(st_new_ptr, sign);
+-      return;
+-    }
++	if (STACK_OVERFLOW) {
++		FPU_stack_overflow();
++		return;
++	}
  
- void smp_send_stop(void)
- {
-diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
-index 4ea80cb..5787a0c 100644
---- a/arch/x86/kernel/smpboot_32.c
-+++ b/arch/x86/kernel/smpboot_32.c
-@@ -83,7 +83,6 @@ EXPORT_SYMBOL(cpu_online_map);
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
++	clear_C1();
  
- cpumask_t cpu_callin_map;
- cpumask_t cpu_callout_map;
--EXPORT_SYMBOL(cpu_callout_map);
- cpumask_t cpu_possible_map;
- EXPORT_SYMBOL(cpu_possible_map);
- static cpumask_t smp_commenced_mask;
-@@ -92,15 +91,10 @@ static cpumask_t smp_commenced_mask;
- DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
- EXPORT_PER_CPU_SYMBOL(cpu_info);
+-  if ( st0_tag == TW_Denormal )
+-    {
+-      if (denormal_operand() < 0 )
+-	return;
++	if (st0_tag == TAG_Valid) {
++		long e;
  
--/*
-- * The following static array is used during kernel startup
-- * and the x86_cpu_to_apicid_ptr contains the address of the
-- * array during this time.  Is it zeroed when the per_cpu
-- * data area is removed.
-- */
-+/* which logical CPU number maps to which CPU (physical APIC ID) */
- u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
- 			{ [0 ... NR_CPUS-1] = BAD_APICID };
--void *x86_cpu_to_apicid_ptr;
-+void *x86_cpu_to_apicid_early_ptr;
- DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
- EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
+-      push();
+-      sign = getsign(st1_ptr);
+-      FPU_to_exp16(st1_ptr, st_new_ptr);
+-      goto denormal_arg;
+-    }
+-  else if ( st0_tag == TW_Infinity )
+-    {
+-      sign = getsign(st0_ptr);
+-      setpositive(st0_ptr);
+-      push();
+-      FPU_copy_to_reg0(&CONST_INF, TAG_Special);
+-      setsign(st_new_ptr, sign);
+-      return;
+-    }
+-  else if ( st0_tag == TW_NaN )
+-    {
+-      if ( real_1op_NaN(st0_ptr) < 0 )
+-	return;
++		push();
++		sign = getsign(st1_ptr);
++		reg_copy(st1_ptr, st_new_ptr);
++		setexponent16(st_new_ptr, exponent(st_new_ptr));
++
++	      denormal_arg:
++
++		e = exponent16(st_new_ptr);
++		convert_l2reg(&e, 1);
++		setexponentpos(st_new_ptr, 0);
++		setsign(st_new_ptr, sign);
++		FPU_settag0(TAG_Valid);	/* Needed if arg was a denormal */
++		return;
++	} else if (st0_tag == TAG_Zero) {
++		sign = getsign(st0_ptr);
++
++		if (FPU_divide_by_zero(0, SIGN_NEG) < 0)
++			return;
  
-@@ -113,7 +107,6 @@ u8 apicid_2_node[MAX_APICID];
- extern const unsigned char trampoline_data [];
- extern const unsigned char trampoline_end  [];
- static unsigned char *trampoline_base;
--static int trampoline_exec;
+-      push();
+-      FPU_copy_to_reg0(st0_ptr, TAG_Special);
+-      return;
+-    }
+-  else if ( st0_tag == TAG_Empty )
+-    {
+-      /* Is this the correct behaviour? */
+-      if ( control_word & EX_Invalid )
+-	{
+-	  FPU_stack_underflow();
+-	  push();
+-	  FPU_stack_underflow();
++		push();
++		FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
++		setsign(st_new_ptr, sign);
++		return;
++	}
++
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
++
++	if (st0_tag == TW_Denormal) {
++		if (denormal_operand() < 0)
++			return;
++
++		push();
++		sign = getsign(st1_ptr);
++		FPU_to_exp16(st1_ptr, st_new_ptr);
++		goto denormal_arg;
++	} else if (st0_tag == TW_Infinity) {
++		sign = getsign(st0_ptr);
++		setpositive(st0_ptr);
++		push();
++		FPU_copy_to_reg0(&CONST_INF, TAG_Special);
++		setsign(st_new_ptr, sign);
++		return;
++	} else if (st0_tag == TW_NaN) {
++		if (real_1op_NaN(st0_ptr) < 0)
++			return;
++
++		push();
++		FPU_copy_to_reg0(st0_ptr, TAG_Special);
++		return;
++	} else if (st0_tag == TAG_Empty) {
++		/* Is this the correct behaviour? */
++		if (control_word & EX_Invalid) {
++			FPU_stack_underflow();
++			push();
++			FPU_stack_underflow();
++		} else
++			EXCEPTION(EX_StackUnder);
+ 	}
+-      else
+-	EXCEPTION(EX_StackUnder);
+-    }
+ #ifdef PARANOID
+-  else
+-    EXCEPTION(EX_INTERNAL | 0x119);
++	else
++		EXCEPTION(EX_INTERNAL | 0x119);
+ #endif /* PARANOID */
+ }
  
- static void map_cpu_to_logical_apicid(void);
+-
+ static void fdecstp(void)
+ {
+-  clear_C1();
+-  top--;
++	clear_C1();
++	top--;
+ }
  
-@@ -138,17 +131,13 @@ static unsigned long __cpuinit setup_trampoline(void)
-  */
- void __init smp_alloc_memory(void)
+ static void fincstp(void)
  {
--	trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
-+	trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
- 	/*
- 	 * Has to be in very low memory so we can execute
- 	 * real-mode AP code.
- 	 */
- 	if (__pa(trampoline_base) >= 0x9F000)
- 		BUG();
--	/*
--	 * Make the SMP trampoline executable:
--	 */
--	trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
+-  clear_C1();
+-  top++;
++	clear_C1();
++	top++;
  }
  
- /*
-@@ -405,7 +394,7 @@ static void __cpuinit start_secondary(void *unused)
- 	setup_secondary_clock();
- 	if (nmi_watchdog == NMI_IO_APIC) {
- 		disable_8259A_irq(0);
--		enable_NMI_through_LVT0(NULL);
-+		enable_NMI_through_LVT0();
- 		enable_8259A_irq(0);
- 	}
- 	/*
-@@ -448,38 +437,38 @@ void __devinit initialize_secondary(void)
+-
+ static void fsqrt_(FPU_REG *st0_ptr, u_char st0_tag)
  {
- 	/*
- 	 * We don't actually need to load the full TSS,
--	 * basically just the stack pointer and the eip.
-+	 * basically just the stack pointer and the ip.
- 	 */
+-  int expon;
+-
+-  clear_C1();
+-
+-  if ( st0_tag == TAG_Valid )
+-    {
+-      u_char tag;
+-      
+-      if (signnegative(st0_ptr))
+-	{
+-	  arith_invalid(0);  /* sqrt(negative) is invalid */
+-	  return;
+-	}
++	int expon;
++
++	clear_C1();
  
- 	asm volatile(
- 		"movl %0,%%esp\n\t"
- 		"jmp *%1"
- 		:
--		:"m" (current->thread.esp),"m" (current->thread.eip));
-+		:"m" (current->thread.sp),"m" (current->thread.ip));
- }
+-      /* make st(0) in  [1.0 .. 4.0) */
+-      expon = exponent(st0_ptr);
+-
+-    denormal_arg:
+-
+-      setexponent16(st0_ptr, (expon & 1));
+-
+-      /* Do the computation, the sign of the result will be positive. */
+-      tag = wm_sqrt(st0_ptr, 0, 0, control_word, SIGN_POS);
+-      addexponent(st0_ptr, expon >> 1);
+-      FPU_settag0(tag);
+-      return;
+-    }
+-
+-  if ( st0_tag == TAG_Zero )
+-    return;
+-
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-
+-  if ( st0_tag == TW_Infinity )
+-    {
+-      if ( signnegative(st0_ptr) )
+-	arith_invalid(0);  /* sqrt(-Infinity) is invalid */
+-      return;
+-    }
+-  else if ( st0_tag == TW_Denormal )
+-    {
+-      if (signnegative(st0_ptr))
+-	{
+-	  arith_invalid(0);  /* sqrt(negative) is invalid */
+-	  return;
++	if (st0_tag == TAG_Valid) {
++		u_char tag;
++
++		if (signnegative(st0_ptr)) {
++			arith_invalid(0);	/* sqrt(negative) is invalid */
++			return;
++		}
++
++		/* make st(0) in  [1.0 .. 4.0) */
++		expon = exponent(st0_ptr);
++
++	      denormal_arg:
++
++		setexponent16(st0_ptr, (expon & 1));
++
++		/* Do the computation, the sign of the result will be positive. */
++		tag = wm_sqrt(st0_ptr, 0, 0, control_word, SIGN_POS);
++		addexponent(st0_ptr, expon >> 1);
++		FPU_settag0(tag);
++		return;
+ 	}
  
- /* Static state in head.S used to set up a CPU */
- extern struct {
--	void * esp;
-+	void * sp;
- 	unsigned short ss;
- } stack_start;
+-      if ( denormal_operand() < 0 )
+-	return;
++	if (st0_tag == TAG_Zero)
++		return;
  
- #ifdef CONFIG_NUMA
+-      FPU_to_exp16(st0_ptr, st0_ptr);
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
  
- /* which logical CPUs are on which nodes */
--cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
-+cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
- 				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
--EXPORT_SYMBOL(node_2_cpu_mask);
-+EXPORT_SYMBOL(node_to_cpumask_map);
- /* which node each logical CPU is on */
--int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
--EXPORT_SYMBOL(cpu_2_node);
-+int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
-+EXPORT_SYMBOL(cpu_to_node_map);
+-      expon = exponent16(st0_ptr);
++	if (st0_tag == TW_Infinity) {
++		if (signnegative(st0_ptr))
++			arith_invalid(0);	/* sqrt(-Infinity) is invalid */
++		return;
++	} else if (st0_tag == TW_Denormal) {
++		if (signnegative(st0_ptr)) {
++			arith_invalid(0);	/* sqrt(negative) is invalid */
++			return;
++		}
  
- /* set up a mapping between cpu and node. */
- static inline void map_cpu_to_node(int cpu, int node)
- {
- 	printk("Mapping cpu %d to node %d\n", cpu, node);
--	cpu_set(cpu, node_2_cpu_mask[node]);
--	cpu_2_node[cpu] = node;
-+	cpu_set(cpu, node_to_cpumask_map[node]);
-+	cpu_to_node_map[cpu] = node;
- }
+-      goto denormal_arg;
+-    }
++		if (denormal_operand() < 0)
++			return;
  
- /* undo a mapping between cpu and node. */
-@@ -489,8 +478,8 @@ static inline void unmap_cpu_to_node(int cpu)
+-  single_arg_error(st0_ptr, st0_tag);
++		FPU_to_exp16(st0_ptr, st0_ptr);
  
- 	printk("Unmapping cpu %d from all nodes\n", cpu);
- 	for (node = 0; node < MAX_NUMNODES; node ++)
--		cpu_clear(cpu, node_2_cpu_mask[node]);
--	cpu_2_node[cpu] = 0;
-+		cpu_clear(cpu, node_to_cpumask_map[node]);
-+	cpu_to_node_map[cpu] = 0;
- }
- #else /* !CONFIG_NUMA */
+-}
++		expon = exponent16(st0_ptr);
++
++		goto denormal_arg;
++	}
  
-@@ -668,7 +657,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
- 	 * target processor state.
- 	 */
- 	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
--		         (unsigned long) stack_start.esp);
-+		         (unsigned long) stack_start.sp);
++	single_arg_error(st0_ptr, st0_tag);
++
++}
  
- 	/*
- 	 * Run STARTUP IPI loop.
-@@ -754,7 +743,7 @@ static inline struct task_struct * __cpuinit alloc_idle_task(int cpu)
- 		/* initialize thread_struct.  we really want to avoid destroy
- 		 * idle tread
- 		 */
--		idle->thread.esp = (unsigned long)task_pt_regs(idle);
-+		idle->thread.sp = (unsigned long)task_pt_regs(idle);
- 		init_idle(idle, cpu);
- 		return idle;
- 	}
-@@ -799,7 +788,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
-  	per_cpu(current_task, cpu) = idle;
- 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ static void frndint_(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+-  int flags, tag;
++	int flags, tag;
  
--	idle->thread.eip = (unsigned long) start_secondary;
-+	idle->thread.ip = (unsigned long) start_secondary;
- 	/* start_eip had better be page-aligned! */
- 	start_eip = setup_trampoline();
+-  if ( st0_tag == TAG_Valid )
+-    {
+-      u_char sign;
++	if (st0_tag == TAG_Valid) {
++		u_char sign;
  
-@@ -807,9 +796,9 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
- 	alternatives_smp_switch(1);
+-    denormal_arg:
++	      denormal_arg:
  
- 	/* So we see what's up   */
--	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
-+	printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
- 	/* Stack for startup_32 can be just as for start_secondary onwards */
--	stack_start.esp = (void *) idle->thread.esp;
-+	stack_start.sp = (void *) idle->thread.sp;
+-      sign = getsign(st0_ptr);
++		sign = getsign(st0_ptr);
  
- 	irq_ctx_init(cpu);
+-      if (exponent(st0_ptr) > 63)
+-	return;
++		if (exponent(st0_ptr) > 63)
++			return;
++
++		if (st0_tag == TW_Denormal) {
++			if (denormal_operand() < 0)
++				return;
++		}
++
++		/* Fortunately, this can't overflow to 2^64 */
++		if ((flags = FPU_round_to_int(st0_ptr, st0_tag)))
++			set_precision_flag(flags);
  
-@@ -1091,7 +1080,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
- 	 * Allow the user to impress friends.
- 	 */
- 	Dprintk("Before bogomips.\n");
--	for (cpu = 0; cpu < NR_CPUS; cpu++)
-+	for_each_possible_cpu(cpu)
- 		if (cpu_isset(cpu, cpu_callout_map))
- 			bogosum += cpu_data(cpu).loops_per_jiffy;
- 	printk(KERN_INFO
-@@ -1122,7 +1111,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
- 	 * construct cpu_sibling_map, so that we can tell sibling CPUs
- 	 * efficiently.
- 	 */
--	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+	for_each_possible_cpu(cpu) {
- 		cpus_clear(per_cpu(cpu_sibling_map, cpu));
- 		cpus_clear(per_cpu(cpu_core_map, cpu));
+-      if ( st0_tag == TW_Denormal )
+-	{
+-	  if (denormal_operand() < 0 )
+-	    return;
++		setexponent16(st0_ptr, 63);
++		tag = FPU_normalize(st0_ptr);
++		setsign(st0_ptr, sign);
++		FPU_settag0(tag);
++		return;
  	}
-@@ -1296,12 +1285,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
- 	setup_ioapic_dest();
- #endif
- 	zap_low_mappings();
--#ifndef CONFIG_HOTPLUG_CPU
--	/*
--	 * Disable executability of the SMP trampoline:
--	 */
--	set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
--#endif
- }
  
- void __init smp_intr_init(void)
-diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
-index aaf4e12..cc64b80 100644
---- a/arch/x86/kernel/smpboot_64.c
-+++ b/arch/x86/kernel/smpboot_64.c
-@@ -65,7 +65,7 @@ int smp_num_siblings = 1;
- EXPORT_SYMBOL(smp_num_siblings);
+-      /* Fortunately, this can't overflow to 2^64 */
+-      if ( (flags = FPU_round_to_int(st0_ptr, st0_tag)) )
+-	set_precision_flag(flags);
+-
+-      setexponent16(st0_ptr, 63);
+-      tag = FPU_normalize(st0_ptr);
+-      setsign(st0_ptr, sign);
+-      FPU_settag0(tag);
+-      return;
+-    }
+-
+-  if ( st0_tag == TAG_Zero )
+-    return;
+-
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-
+-  if ( st0_tag == TW_Denormal )
+-    goto denormal_arg;
+-  else if ( st0_tag == TW_Infinity )
+-    return;
+-  else
+-    single_arg_error(st0_ptr, st0_tag);
+-}
++	if (st0_tag == TAG_Zero)
++		return;
  
- /* Last level cache ID of each logical CPU */
--DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
-+DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
++
++	if (st0_tag == TW_Denormal)
++		goto denormal_arg;
++	else if (st0_tag == TW_Infinity)
++		return;
++	else
++		single_arg_error(st0_ptr, st0_tag);
++}
  
- /* Bitmask of currently online CPUs */
- cpumask_t cpu_online_map __read_mostly;
-@@ -78,8 +78,6 @@ EXPORT_SYMBOL(cpu_online_map);
-  */
- cpumask_t cpu_callin_map;
- cpumask_t cpu_callout_map;
--EXPORT_SYMBOL(cpu_callout_map);
+ static int fsin(FPU_REG *st0_ptr, u_char tag)
+ {
+-  u_char arg_sign = getsign(st0_ptr);
 -
- cpumask_t cpu_possible_map;
- EXPORT_SYMBOL(cpu_possible_map);
- 
-@@ -113,10 +111,20 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
-  * a new thread. Also avoids complicated thread destroy functionality
-  * for idle threads.
-  */
-+#ifdef CONFIG_HOTPLUG_CPU
-+/*
-+ * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
-+ * removed after init for !CONFIG_HOTPLUG_CPU.
-+ */
-+static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
-+#define get_idle_for_cpu(x)     (per_cpu(idle_thread_array, x))
-+#define set_idle_for_cpu(x,p)   (per_cpu(idle_thread_array, x) = (p))
-+#else
- struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+-  if ( tag == TAG_Valid )
+-    {
+-      int q;
 -
- #define get_idle_for_cpu(x)     (idle_thread_array[(x)])
- #define set_idle_for_cpu(x,p)   (idle_thread_array[(x)] = (p))
-+#endif
+-      if ( exponent(st0_ptr) > -40 )
+-	{
+-	  if ( (q = trig_arg(st0_ptr, 0)) == -1 )
+-	    {
+-	      /* Operand is out of range */
+-	      return 1;
+-	    }
+-
+-	  poly_sine(st0_ptr);
+-	  
+-	  if (q & 2)
+-	    changesign(st0_ptr);
+-
+-	  setsign(st0_ptr, getsign(st0_ptr) ^ arg_sign);
+-
+-	  /* We do not really know if up or down */
+-	  set_precision_flag_up();
+-	  return 0;
++	u_char arg_sign = getsign(st0_ptr);
 +
++	if (tag == TAG_Valid) {
++		int q;
++
++		if (exponent(st0_ptr) > -40) {
++			if ((q = trig_arg(st0_ptr, 0)) == -1) {
++				/* Operand is out of range */
++				return 1;
++			}
++
++			poly_sine(st0_ptr);
++
++			if (q & 2)
++				changesign(st0_ptr);
++
++			setsign(st0_ptr, getsign(st0_ptr) ^ arg_sign);
++
++			/* We do not really know if up or down */
++			set_precision_flag_up();
++			return 0;
++		} else {
++			/* For a small arg, the result == the argument */
++			set_precision_flag_up();	/* Must be up. */
++			return 0;
++		}
+ 	}
+-      else
+-	{
+-	  /* For a small arg, the result == the argument */
+-	  set_precision_flag_up();  /* Must be up. */
+-	  return 0;
++
++	if (tag == TAG_Zero) {
++		setcc(0);
++		return 0;
+ 	}
+-    }
+-
+-  if ( tag == TAG_Zero )
+-    {
+-      setcc(0);
+-      return 0;
+-    }
+-
+-  if ( tag == TAG_Special )
+-    tag = FPU_Special(st0_ptr);
+-
+-  if ( tag == TW_Denormal )
+-    {
+-      if ( denormal_operand() < 0 )
+-	return 1;
+-
+-      /* For a small arg, the result == the argument */
+-      /* Underflow may happen */
+-      FPU_to_exp16(st0_ptr, st0_ptr);
+-      
+-      tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
+-
+-      FPU_settag0(tag);
+-
+-      return 0;
+-    }
+-  else if ( tag == TW_Infinity )
+-    {
+-      /* The 80486 treats infinity as an invalid operand */
+-      arith_invalid(0);
+-      return 1;
+-    }
+-  else
+-    {
+-      single_arg_error(st0_ptr, tag);
+-      return 1;
+-    }
+-}
  
- /*
-  * Currently trivial. Write the real->protected mode
-@@ -212,6 +220,7 @@ void __cpuinit smp_callin(void)
- 
- 	Dprintk("CALLIN, before setup_local_APIC().\n");
- 	setup_local_APIC();
-+	end_local_APIC_setup();
++	if (tag == TAG_Special)
++		tag = FPU_Special(st0_ptr);
++
++	if (tag == TW_Denormal) {
++		if (denormal_operand() < 0)
++			return 1;
++
++		/* For a small arg, the result == the argument */
++		/* Underflow may happen */
++		FPU_to_exp16(st0_ptr, st0_ptr);
++
++		tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
++
++		FPU_settag0(tag);
++
++		return 0;
++	} else if (tag == TW_Infinity) {
++		/* The 80486 treats infinity as an invalid operand */
++		arith_invalid(0);
++		return 1;
++	} else {
++		single_arg_error(st0_ptr, tag);
++		return 1;
++	}
++}
  
- 	/*
- 	 * Get our bogomips.
-@@ -338,7 +347,7 @@ void __cpuinit start_secondary(void)
+ static int f_cos(FPU_REG *st0_ptr, u_char tag)
+ {
+-  u_char st0_sign;
+-
+-  st0_sign = getsign(st0_ptr);
+-
+-  if ( tag == TAG_Valid )
+-    {
+-      int q;
+-
+-      if ( exponent(st0_ptr) > -40 )
+-	{
+-	  if ( (exponent(st0_ptr) < 0)
+-	      || ((exponent(st0_ptr) == 0)
+-		  && (significand(st0_ptr) <= 0xc90fdaa22168c234LL)) )
+-	    {
+-	      poly_cos(st0_ptr);
+-
+-	      /* We do not really know if up or down */
+-	      set_precision_flag_down();
+-	  
+-	      return 0;
+-	    }
+-	  else if ( (q = trig_arg(st0_ptr, FCOS)) != -1 )
+-	    {
+-	      poly_sine(st0_ptr);
+-
+-	      if ((q+1) & 2)
+-		changesign(st0_ptr);
+-
+-	      /* We do not really know if up or down */
+-	      set_precision_flag_down();
+-	  
+-	      return 0;
+-	    }
+-	  else
+-	    {
+-	      /* Operand is out of range */
+-	      return 1;
+-	    }
+-	}
+-      else
+-	{
+-	denormal_arg:
++	u_char st0_sign;
++
++	st0_sign = getsign(st0_ptr);
  
- 	if (nmi_watchdog == NMI_IO_APIC) {
- 		disable_8259A_irq(0);
--		enable_NMI_through_LVT0(NULL);
-+		enable_NMI_through_LVT0();
- 		enable_8259A_irq(0);
+-	  setcc(0);
+-	  FPU_copy_to_reg0(&CONST_1, TAG_Valid);
++	if (tag == TAG_Valid) {
++		int q;
++
++		if (exponent(st0_ptr) > -40) {
++			if ((exponent(st0_ptr) < 0)
++			    || ((exponent(st0_ptr) == 0)
++				&& (significand(st0_ptr) <=
++				    0xc90fdaa22168c234LL))) {
++				poly_cos(st0_ptr);
++
++				/* We do not really know if up or down */
++				set_precision_flag_down();
++
++				return 0;
++			} else if ((q = trig_arg(st0_ptr, FCOS)) != -1) {
++				poly_sine(st0_ptr);
++
++				if ((q + 1) & 2)
++					changesign(st0_ptr);
++
++				/* We do not really know if up or down */
++				set_precision_flag_down();
++
++				return 0;
++			} else {
++				/* Operand is out of range */
++				return 1;
++			}
++		} else {
++		      denormal_arg:
++
++			setcc(0);
++			FPU_copy_to_reg0(&CONST_1, TAG_Valid);
+ #ifdef PECULIAR_486
+-	  set_precision_flag_down();  /* 80486 appears to do this. */
++			set_precision_flag_down();	/* 80486 appears to do this. */
+ #else
+-	  set_precision_flag_up();  /* Must be up. */
++			set_precision_flag_up();	/* Must be up. */
+ #endif /* PECULIAR_486 */
+-	  return 0;
++			return 0;
++		}
++	} else if (tag == TAG_Zero) {
++		FPU_copy_to_reg0(&CONST_1, TAG_Valid);
++		setcc(0);
++		return 0;
  	}
+-    }
+-  else if ( tag == TAG_Zero )
+-    {
+-      FPU_copy_to_reg0(&CONST_1, TAG_Valid);
+-      setcc(0);
+-      return 0;
+-    }
+-
+-  if ( tag == TAG_Special )
+-    tag = FPU_Special(st0_ptr);
+-
+-  if ( tag == TW_Denormal )
+-    {
+-      if ( denormal_operand() < 0 )
+-	return 1;
+-
+-      goto denormal_arg;
+-    }
+-  else if ( tag == TW_Infinity )
+-    {
+-      /* The 80486 treats infinity as an invalid operand */
+-      arith_invalid(0);
+-      return 1;
+-    }
+-  else
+-    {
+-      single_arg_error(st0_ptr, tag);  /* requires st0_ptr == &st(0) */
+-      return 1;
+-    }
+-}
  
-@@ -370,7 +379,7 @@ void __cpuinit start_secondary(void)
++	if (tag == TAG_Special)
++		tag = FPU_Special(st0_ptr);
++
++	if (tag == TW_Denormal) {
++		if (denormal_operand() < 0)
++			return 1;
++
++		goto denormal_arg;
++	} else if (tag == TW_Infinity) {
++		/* The 80486 treats infinity as an invalid operand */
++		arith_invalid(0);
++		return 1;
++	} else {
++		single_arg_error(st0_ptr, tag);	/* requires st0_ptr == &st(0) */
++		return 1;
++	}
++}
  
- 	unlock_ipi_call_lock();
+ static void fcos(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+-  f_cos(st0_ptr, st0_tag);
++	f_cos(st0_ptr, st0_tag);
+ }
  
--	setup_secondary_APIC_clock();
-+	setup_secondary_clock();
+-
+ static void fsincos(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+-  FPU_REG *st_new_ptr;
+-  FPU_REG arg;
+-  u_char tag;
+-
+-  /* Stack underflow has higher priority */
+-  if ( st0_tag == TAG_Empty )
+-    {
+-      FPU_stack_underflow();  /* Puts a QNaN in st(0) */
+-      if ( control_word & CW_Invalid )
+-	{
+-	  st_new_ptr = &st(-1);
+-	  push();
+-	  FPU_stack_underflow();  /* Puts a QNaN in the new st(0) */
++	FPU_REG *st_new_ptr;
++	FPU_REG arg;
++	u_char tag;
++
++	/* Stack underflow has higher priority */
++	if (st0_tag == TAG_Empty) {
++		FPU_stack_underflow();	/* Puts a QNaN in st(0) */
++		if (control_word & CW_Invalid) {
++			st_new_ptr = &st(-1);
++			push();
++			FPU_stack_underflow();	/* Puts a QNaN in the new st(0) */
++		}
++		return;
+ 	}
+-      return;
+-    }
+-
+-  if ( STACK_OVERFLOW )
+-    { FPU_stack_overflow(); return; }
+-
+-  if ( st0_tag == TAG_Special )
+-    tag = FPU_Special(st0_ptr);
+-  else
+-    tag = st0_tag;
+-
+-  if ( tag == TW_NaN )
+-    {
+-      single_arg_2_error(st0_ptr, TW_NaN);
+-      return;
+-    }
+-  else if ( tag == TW_Infinity )
+-    {
+-      /* The 80486 treats infinity as an invalid operand */
+-      if ( arith_invalid(0) >= 0 )
+-	{
+-	  /* Masked response */
+-	  push();
+-	  arith_invalid(0);
++
++	if (STACK_OVERFLOW) {
++		FPU_stack_overflow();
++		return;
+ 	}
+-      return;
+-    }
+-
+-  reg_copy(st0_ptr, &arg);
+-  if ( !fsin(st0_ptr, st0_tag) )
+-    {
+-      push();
+-      FPU_copy_to_reg0(&arg, st0_tag);
+-      f_cos(&st(0), st0_tag);
+-    }
+-  else
+-    {
+-      /* An error, so restore st(0) */
+-      FPU_copy_to_reg0(&arg, st0_tag);
+-    }
+-}
  
- 	cpu_idle();
++	if (st0_tag == TAG_Special)
++		tag = FPU_Special(st0_ptr);
++	else
++		tag = st0_tag;
++
++	if (tag == TW_NaN) {
++		single_arg_2_error(st0_ptr, TW_NaN);
++		return;
++	} else if (tag == TW_Infinity) {
++		/* The 80486 treats infinity as an invalid operand */
++		if (arith_invalid(0) >= 0) {
++			/* Masked response */
++			push();
++			arith_invalid(0);
++		}
++		return;
++	}
++
++	reg_copy(st0_ptr, &arg);
++	if (!fsin(st0_ptr, st0_tag)) {
++		push();
++		FPU_copy_to_reg0(&arg, st0_tag);
++		f_cos(&st(0), st0_tag);
++	} else {
++		/* An error, so restore st(0) */
++		FPU_copy_to_reg0(&arg, st0_tag);
++	}
++}
+ 
+ /*---------------------------------------------------------------------------*/
+ /* The following all require two arguments: st(0) and st(1) */
+@@ -826,1020 +743,901 @@ static void fsincos(FPU_REG *st0_ptr, u_char st0_tag)
+    result must be zero.
+  */
+ static void rem_kernel(unsigned long long st0, unsigned long long *y,
+-		       unsigned long long st1,
+-		       unsigned long long q, int n)
++		       unsigned long long st1, unsigned long long q, int n)
+ {
+-  int dummy;
+-  unsigned long long x;
+-
+-  x = st0 << n;
+-
+-  /* Do the required multiplication and subtraction in the one operation */
+-
+-  /* lsw x -= lsw st1 * lsw q */
+-  asm volatile ("mull %4; subl %%eax,%0; sbbl %%edx,%1"
+-		:"=m" (((unsigned *)&x)[0]), "=m" (((unsigned *)&x)[1]),
+-		"=a" (dummy)
+-		:"2" (((unsigned *)&st1)[0]), "m" (((unsigned *)&q)[0])
+-		:"%dx");
+-  /* msw x -= msw st1 * lsw q */
+-  asm volatile ("mull %3; subl %%eax,%0"
+-		:"=m" (((unsigned *)&x)[1]), "=a" (dummy)
+-		:"1" (((unsigned *)&st1)[1]), "m" (((unsigned *)&q)[0])
+-		:"%dx");
+-  /* msw x -= lsw st1 * msw q */
+-  asm volatile ("mull %3; subl %%eax,%0"
+-		:"=m" (((unsigned *)&x)[1]), "=a" (dummy)
+-		:"1" (((unsigned *)&st1)[0]), "m" (((unsigned *)&q)[1])
+-		:"%dx");
+-
+-  *y = x;
++	int dummy;
++	unsigned long long x;
++
++	x = st0 << n;
++
++	/* Do the required multiplication and subtraction in the one operation */
++
++	/* lsw x -= lsw st1 * lsw q */
++	asm volatile ("mull %4; subl %%eax,%0; sbbl %%edx,%1":"=m"
++		      (((unsigned *)&x)[0]), "=m"(((unsigned *)&x)[1]),
++		      "=a"(dummy)
++		      :"2"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[0])
++		      :"%dx");
++	/* msw x -= msw st1 * lsw q */
++	asm volatile ("mull %3; subl %%eax,%0":"=m" (((unsigned *)&x)[1]),
++		      "=a"(dummy)
++		      :"1"(((unsigned *)&st1)[1]), "m"(((unsigned *)&q)[0])
++		      :"%dx");
++	/* msw x -= lsw st1 * msw q */
++	asm volatile ("mull %3; subl %%eax,%0":"=m" (((unsigned *)&x)[1]),
++		      "=a"(dummy)
++		      :"1"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[1])
++		      :"%dx");
++
++	*y = x;
  }
-@@ -384,19 +393,20 @@ static void inquire_remote_apic(int apicid)
- 	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
- 	char *names[] = { "ID", "VERSION", "SPIV" };
- 	int timeout;
--	unsigned int status;
-+	u32 status;
  
- 	printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
+-
+ /* Remainder of st(0) / st(1) */
+ /* This routine produces exact results, i.e. there is never any
+    rounding or truncation, etc of the result. */
+ static void do_fprem(FPU_REG *st0_ptr, u_char st0_tag, int round)
+ {
+-  FPU_REG *st1_ptr = &st(1);
+-  u_char st1_tag = FPU_gettagi(1);
+-
+-  if ( !((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid)) )
+-    {
+-      FPU_REG tmp, st0, st1;
+-      u_char st0_sign, st1_sign;
+-      u_char tmptag;
+-      int tag;
+-      int old_cw;
+-      int expdif;
+-      long long q;
+-      unsigned short saved_status;
+-      int cc;
+-
+-    fprem_valid:
+-      /* Convert registers for internal use. */
+-      st0_sign = FPU_to_exp16(st0_ptr, &st0);
+-      st1_sign = FPU_to_exp16(st1_ptr, &st1);
+-      expdif = exponent16(&st0) - exponent16(&st1);
+-
+-      old_cw = control_word;
+-      cc = 0;
+-
+-      /* We want the status following the denorm tests, but don't want
+-	 the status changed by the arithmetic operations. */
+-      saved_status = partial_status;
+-      control_word &= ~CW_RC;
+-      control_word |= RC_CHOP;
+-
+-      if ( expdif < 64 )
+-	{
+-	  /* This should be the most common case */
+-
+-	  if ( expdif > -2 )
+-	    {
+-	      u_char sign = st0_sign ^ st1_sign;
+-	      tag = FPU_u_div(&st0, &st1, &tmp,
+-			      PR_64_BITS | RC_CHOP | 0x3f,
+-			      sign);
+-	      setsign(&tmp, sign);
+-
+-	      if ( exponent(&tmp) >= 0 )
+-		{
+-		  FPU_round_to_int(&tmp, tag);  /* Fortunately, this can't
+-						   overflow to 2^64 */
+-		  q = significand(&tmp);
+-
+-		  rem_kernel(significand(&st0),
+-			     &significand(&tmp),
+-			     significand(&st1),
+-			     q, expdif);
+-
+-		  setexponent16(&tmp, exponent16(&st1));
+-		}
+-	      else
+-		{
+-		  reg_copy(&st0, &tmp);
+-		  q = 0;
+-		}
+-
+-	      if ( (round == RC_RND) && (tmp.sigh & 0xc0000000) )
+-		{
+-		  /* We may need to subtract st(1) once more,
+-		     to get a result <= 1/2 of st(1). */
+-		  unsigned long long x;
+-		  expdif = exponent16(&st1) - exponent16(&tmp);
+-		  if ( expdif <= 1 )
+-		    {
+-		      if ( expdif == 0 )
+-			x = significand(&st1) - significand(&tmp);
+-		      else /* expdif is 1 */
+-			x = (significand(&st1) << 1) - significand(&tmp);
+-		      if ( (x < significand(&tmp)) ||
+-			  /* or equi-distant (from 0 & st(1)) and q is odd */
+-			  ((x == significand(&tmp)) && (q & 1) ) )
+-			{
+-			  st0_sign = ! st0_sign;
+-			  significand(&tmp) = x;
+-			  q++;
++	FPU_REG *st1_ptr = &st(1);
++	u_char st1_tag = FPU_gettagi(1);
++
++	if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
++		FPU_REG tmp, st0, st1;
++		u_char st0_sign, st1_sign;
++		u_char tmptag;
++		int tag;
++		int old_cw;
++		int expdif;
++		long long q;
++		unsigned short saved_status;
++		int cc;
++
++	      fprem_valid:
++		/* Convert registers for internal use. */
++		st0_sign = FPU_to_exp16(st0_ptr, &st0);
++		st1_sign = FPU_to_exp16(st1_ptr, &st1);
++		expdif = exponent16(&st0) - exponent16(&st1);
++
++		old_cw = control_word;
++		cc = 0;
++
++		/* We want the status following the denorm tests, but don't want
++		   the status changed by the arithmetic operations. */
++		saved_status = partial_status;
++		control_word &= ~CW_RC;
++		control_word |= RC_CHOP;
++
++		if (expdif < 64) {
++			/* This should be the most common case */
++
++			if (expdif > -2) {
++				u_char sign = st0_sign ^ st1_sign;
++				tag = FPU_u_div(&st0, &st1, &tmp,
++						PR_64_BITS | RC_CHOP | 0x3f,
++						sign);
++				setsign(&tmp, sign);
++
++				if (exponent(&tmp) >= 0) {
++					FPU_round_to_int(&tmp, tag);	/* Fortunately, this can't
++									   overflow to 2^64 */
++					q = significand(&tmp);
++
++					rem_kernel(significand(&st0),
++						   &significand(&tmp),
++						   significand(&st1),
++						   q, expdif);
++
++					setexponent16(&tmp, exponent16(&st1));
++				} else {
++					reg_copy(&st0, &tmp);
++					q = 0;
++				}
++
++				if ((round == RC_RND)
++				    && (tmp.sigh & 0xc0000000)) {
++					/* We may need to subtract st(1) once more,
++					   to get a result <= 1/2 of st(1). */
++					unsigned long long x;
++					expdif =
++					    exponent16(&st1) - exponent16(&tmp);
++					if (expdif <= 1) {
++						if (expdif == 0)
++							x = significand(&st1) -
++							    significand(&tmp);
++						else	/* expdif is 1 */
++							x = (significand(&st1)
++							     << 1) -
++							    significand(&tmp);
++						if ((x < significand(&tmp)) ||
++						    /* or equi-distant (from 0 & st(1)) and q is odd */
++						    ((x == significand(&tmp))
++						     && (q & 1))) {
++							st0_sign = !st0_sign;
++							significand(&tmp) = x;
++							q++;
++						}
++					}
++				}
++
++				if (q & 4)
++					cc |= SW_C0;
++				if (q & 2)
++					cc |= SW_C3;
++				if (q & 1)
++					cc |= SW_C1;
++			} else {
++				control_word = old_cw;
++				setcc(0);
++				return;
+ 			}
+-		    }
+-		}
+-
+-	      if (q & 4) cc |= SW_C0;
+-	      if (q & 2) cc |= SW_C3;
+-	      if (q & 1) cc |= SW_C1;
+-	    }
+-	  else
+-	    {
+-	      control_word = old_cw;
+-	      setcc(0);
+-	      return;
+-	    }
+-	}
+-      else
+-	{
+-	  /* There is a large exponent difference ( >= 64 ) */
+-	  /* To make much sense, the code in this section should
+-	     be done at high precision. */
+-	  int exp_1, N;
+-	  u_char sign;
+-
+-	  /* prevent overflow here */
+-	  /* N is 'a number between 32 and 63' (p26-113) */
+-	  reg_copy(&st0, &tmp);
+-	  tmptag = st0_tag;
+-	  N = (expdif & 0x0000001f) + 32;  /* This choice gives results
+-					      identical to an AMD 486 */
+-	  setexponent16(&tmp, N);
+-	  exp_1 = exponent16(&st1);
+-	  setexponent16(&st1, 0);
+-	  expdif -= N;
+-
+-	  sign = getsign(&tmp) ^ st1_sign;
+-	  tag = FPU_u_div(&tmp, &st1, &tmp, PR_64_BITS | RC_CHOP | 0x3f,
+-			  sign);
+-	  setsign(&tmp, sign);
+-
+-	  FPU_round_to_int(&tmp, tag);  /* Fortunately, this can't
+-					   overflow to 2^64 */
+-
+-	  rem_kernel(significand(&st0),
+-		     &significand(&tmp),
+-		     significand(&st1),
+-		     significand(&tmp),
+-		     exponent(&tmp)
+-		     ); 
+-	  setexponent16(&tmp, exp_1 + expdif);
+-
+-	  /* It is possible for the operation to be complete here.
+-	     What does the IEEE standard say? The Intel 80486 manual
+-	     implies that the operation will never be completed at this
+-	     point, and the behaviour of a real 80486 confirms this.
+-	   */
+-	  if ( !(tmp.sigh | tmp.sigl) )
+-	    {
+-	      /* The result is zero */
+-	      control_word = old_cw;
+-	      partial_status = saved_status;
+-	      FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
+-	      setsign(&st0, st0_sign);
++		} else {
++			/* There is a large exponent difference ( >= 64 ) */
++			/* To make much sense, the code in this section should
++			   be done at high precision. */
++			int exp_1, N;
++			u_char sign;
++
++			/* prevent overflow here */
++			/* N is 'a number between 32 and 63' (p26-113) */
++			reg_copy(&st0, &tmp);
++			tmptag = st0_tag;
++			N = (expdif & 0x0000001f) + 32;	/* This choice gives results
++							   identical to an AMD 486 */
++			setexponent16(&tmp, N);
++			exp_1 = exponent16(&st1);
++			setexponent16(&st1, 0);
++			expdif -= N;
++
++			sign = getsign(&tmp) ^ st1_sign;
++			tag =
++			    FPU_u_div(&tmp, &st1, &tmp,
++				      PR_64_BITS | RC_CHOP | 0x3f, sign);
++			setsign(&tmp, sign);
++
++			FPU_round_to_int(&tmp, tag);	/* Fortunately, this can't
++							   overflow to 2^64 */
++
++			rem_kernel(significand(&st0),
++				   &significand(&tmp),
++				   significand(&st1),
++				   significand(&tmp), exponent(&tmp)
++			    );
++			setexponent16(&tmp, exp_1 + expdif);
++
++			/* It is possible for the operation to be complete here.
++			   What does the IEEE standard say? The Intel 80486 manual
++			   implies that the operation will never be completed at this
++			   point, and the behaviour of a real 80486 confirms this.
++			 */
++			if (!(tmp.sigh | tmp.sigl)) {
++				/* The result is zero */
++				control_word = old_cw;
++				partial_status = saved_status;
++				FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
++				setsign(&st0, st0_sign);
+ #ifdef PECULIAR_486
+-	      setcc(SW_C2);
++				setcc(SW_C2);
+ #else
+-	      setcc(0);
++				setcc(0);
+ #endif /* PECULIAR_486 */
+-	      return;
+-	    }
+-	  cc = SW_C2;
+-	}
++				return;
++			}
++			cc = SW_C2;
++		}
  
- 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
--		printk("... APIC #%d %s: ", apicid, names[i]);
-+		printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
+-      control_word = old_cw;
+-      partial_status = saved_status;
+-      tag = FPU_normalize_nuo(&tmp);
+-      reg_copy(&tmp, st0_ptr);
+-
+-      /* The only condition to be looked for is underflow,
+-	 and it can occur here only if underflow is unmasked. */
+-      if ( (exponent16(&tmp) <= EXP_UNDER) && (tag != TAG_Zero)
+-	  && !(control_word & CW_Underflow) )
+-	{
+-	  setcc(cc);
+-	  tag = arith_underflow(st0_ptr);
+-	  setsign(st0_ptr, st0_sign);
+-	  FPU_settag0(tag);
+-	  return;
+-	}
+-      else if ( (exponent16(&tmp) > EXP_UNDER) || (tag == TAG_Zero) )
+-	{
+-	  stdexp(st0_ptr);
+-	  setsign(st0_ptr, st0_sign);
+-	}
+-      else
+-	{
+-	  tag = FPU_round(st0_ptr, 0, 0, FULL_PRECISION, st0_sign);
+-	}
+-      FPU_settag0(tag);
+-      setcc(cc);
++		control_word = old_cw;
++		partial_status = saved_status;
++		tag = FPU_normalize_nuo(&tmp);
++		reg_copy(&tmp, st0_ptr);
++
++		/* The only condition to be looked for is underflow,
++		   and it can occur here only if underflow is unmasked. */
++		if ((exponent16(&tmp) <= EXP_UNDER) && (tag != TAG_Zero)
++		    && !(control_word & CW_Underflow)) {
++			setcc(cc);
++			tag = arith_underflow(st0_ptr);
++			setsign(st0_ptr, st0_sign);
++			FPU_settag0(tag);
++			return;
++		} else if ((exponent16(&tmp) > EXP_UNDER) || (tag == TAG_Zero)) {
++			stdexp(st0_ptr);
++			setsign(st0_ptr, st0_sign);
++		} else {
++			tag =
++			    FPU_round(st0_ptr, 0, 0, FULL_PRECISION, st0_sign);
++		}
++		FPU_settag0(tag);
++		setcc(cc);
  
- 		/*
- 		 * Wait for idle.
- 		 */
- 		status = safe_apic_wait_icr_idle();
- 		if (status)
--			printk("a previous APIC delivery may have failed\n");
-+			printk(KERN_CONT
-+			       "a previous APIC delivery may have failed\n");
+-      return;
+-    }
++		return;
++	}
  
- 		apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
- 		apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
-@@ -410,10 +420,10 @@ static void inquire_remote_apic(int apicid)
- 		switch (status) {
- 		case APIC_ICR_RR_VALID:
- 			status = apic_read(APIC_RRR);
--			printk("%08x\n", status);
-+			printk(KERN_CONT "%08x\n", status);
- 			break;
- 		default:
--			printk("failed\n");
-+			printk(KERN_CONT "failed\n");
- 		}
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-  if ( st1_tag == TAG_Special )
+-    st1_tag = FPU_Special(st1_ptr);
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
++	if (st1_tag == TAG_Special)
++		st1_tag = FPU_Special(st1_ptr);
+ 
+-  if ( ((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
++	if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
+ 	    || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
+-	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal)) )
+-    {
+-      if ( denormal_operand() < 0 )
+-	return;
+-      goto fprem_valid;
+-    }
+-  else if ( (st0_tag == TAG_Empty) || (st1_tag == TAG_Empty) )
+-    {
+-      FPU_stack_underflow();
+-      return;
+-    }
+-  else if ( st0_tag == TAG_Zero )
+-    {
+-      if ( st1_tag == TAG_Valid )
+-	{
+-	  setcc(0); return;
+-	}
+-      else if ( st1_tag == TW_Denormal )
+-	{
+-	  if ( denormal_operand() < 0 )
+-	    return;
+-	  setcc(0); return;
+-	}
+-      else if ( st1_tag == TAG_Zero )
+-	{ arith_invalid(0); return; } /* fprem(?,0) always invalid */
+-      else if ( st1_tag == TW_Infinity )
+-	{ setcc(0); return; }
+-    }
+-  else if ( (st0_tag == TAG_Valid) || (st0_tag == TW_Denormal) )
+-    {
+-      if ( st1_tag == TAG_Zero )
+-	{
+-	  arith_invalid(0); /* fprem(Valid,Zero) is invalid */
+-	  return;
+-	}
+-      else if ( st1_tag != TW_NaN )
+-	{
+-	  if ( ((st0_tag == TW_Denormal) || (st1_tag == TW_Denormal))
+-	       && (denormal_operand() < 0) )
+-	    return;
+-
+-	  if ( st1_tag == TW_Infinity )
+-	    {
+-	      /* fprem(Valid,Infinity) is o.k. */
+-	      setcc(0); return;
+-	    }
+-	}
+-    }
+-  else if ( st0_tag == TW_Infinity )
+-    {
+-      if ( st1_tag != TW_NaN )
+-	{
+-	  arith_invalid(0); /* fprem(Infinity,?) is invalid */
+-	  return;
++	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
++		if (denormal_operand() < 0)
++			return;
++		goto fprem_valid;
++	} else if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
++		FPU_stack_underflow();
++		return;
++	} else if (st0_tag == TAG_Zero) {
++		if (st1_tag == TAG_Valid) {
++			setcc(0);
++			return;
++		} else if (st1_tag == TW_Denormal) {
++			if (denormal_operand() < 0)
++				return;
++			setcc(0);
++			return;
++		} else if (st1_tag == TAG_Zero) {
++			arith_invalid(0);
++			return;
++		} /* fprem(?,0) always invalid */
++		else if (st1_tag == TW_Infinity) {
++			setcc(0);
++			return;
++		}
++	} else if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
++		if (st1_tag == TAG_Zero) {
++			arith_invalid(0);	/* fprem(Valid,Zero) is invalid */
++			return;
++		} else if (st1_tag != TW_NaN) {
++			if (((st0_tag == TW_Denormal)
++			     || (st1_tag == TW_Denormal))
++			    && (denormal_operand() < 0))
++				return;
++
++			if (st1_tag == TW_Infinity) {
++				/* fprem(Valid,Infinity) is o.k. */
++				setcc(0);
++				return;
++			}
++		}
++	} else if (st0_tag == TW_Infinity) {
++		if (st1_tag != TW_NaN) {
++			arith_invalid(0);	/* fprem(Infinity,?) is invalid */
++			return;
++		}
  	}
- }
-@@ -466,7 +476,7 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
- 	 */
- 	Dprintk("#startup loops: %d.\n", num_starts);
- 
--	maxlvt = get_maxlvt();
-+	maxlvt = lapic_get_maxlvt();
- 
- 	for (j = 1; j <= num_starts; j++) {
- 		Dprintk("Sending STARTUP #%d.\n",j);
-@@ -577,7 +587,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
- 	c_idle.idle = get_idle_for_cpu(cpu);
+-    }
  
- 	if (c_idle.idle) {
--		c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
-+		c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
- 			(THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
- 		init_idle(c_idle.idle, cpu);
- 		goto do_rest;
-@@ -613,8 +623,8 @@ do_rest:
+-  /* One of the registers must contain a NaN if we got here. */
++	/* One of the registers must contain a NaN if we got here. */
  
- 	start_rip = setup_trampoline();
+ #ifdef PARANOID
+-  if ( (st0_tag != TW_NaN) && (st1_tag != TW_NaN) )
+-      EXCEPTION(EX_INTERNAL | 0x118);
++	if ((st0_tag != TW_NaN) && (st1_tag != TW_NaN))
++		EXCEPTION(EX_INTERNAL | 0x118);
+ #endif /* PARANOID */
  
--	init_rsp = c_idle.idle->thread.rsp;
--	per_cpu(init_tss,cpu).rsp0 = init_rsp;
-+	init_rsp = c_idle.idle->thread.sp;
-+	load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
- 	initial_code = start_secondary;
- 	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+-  real_2op_NaN(st1_ptr, st1_tag, 0, st1_ptr);
++	real_2op_NaN(st1_ptr, st1_tag, 0, st1_ptr);
  
-@@ -691,7 +701,7 @@ do_rest:
- 	}
- 	if (boot_error) {
- 		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
--		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
-+		clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
- 		clear_node_cpumask(cpu); /* was set by numa_add_cpu */
- 		cpu_clear(cpu, cpu_present_map);
- 		cpu_clear(cpu, cpu_possible_map);
-@@ -841,24 +851,16 @@ static int __init smp_sanity_check(unsigned max_cpus)
- 	return 0;
  }
  
--/*
-- * Copy apicid's found by MP_processor_info from initial array to the per cpu
-- * data area.  The x86_cpu_to_apicid_init array is then expendable and the
-- * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
-- * longer available.
-- */
--void __init smp_set_apicids(void)
-+static void __init smp_cpu_index_default(void)
+-
+ /* ST(1) <- ST(1) * log ST;  pop ST */
+ static void fyl2x(FPU_REG *st0_ptr, u_char st0_tag)
  {
--	int cpu;
-+	int i;
-+	struct cpuinfo_x86 *c;
+-  FPU_REG *st1_ptr = &st(1), exponent;
+-  u_char st1_tag = FPU_gettagi(1);
+-  u_char sign;
+-  int e, tag;
+-
+-  clear_C1();
+-
+-  if ( (st0_tag == TAG_Valid) && (st1_tag == TAG_Valid) )
+-    {
+-    both_valid:
+-      /* Both regs are Valid or Denormal */
+-      if ( signpositive(st0_ptr) )
+-	{
+-	  if ( st0_tag == TW_Denormal )
+-	    FPU_to_exp16(st0_ptr, st0_ptr);
+-	  else
+-	    /* Convert st(0) for internal use. */
+-	    setexponent16(st0_ptr, exponent(st0_ptr));
+-
+-	  if ( (st0_ptr->sigh == 0x80000000) && (st0_ptr->sigl == 0) )
+-	    {
+-	      /* Special case. The result can be precise. */
+-	      u_char esign;
+-	      e = exponent16(st0_ptr);
+-	      if ( e >= 0 )
+-		{
+-		  exponent.sigh = e;
+-		  esign = SIGN_POS;
+-		}
+-	      else
+-		{
+-		  exponent.sigh = -e;
+-		  esign = SIGN_NEG;
++	FPU_REG *st1_ptr = &st(1), exponent;
++	u_char st1_tag = FPU_gettagi(1);
++	u_char sign;
++	int e, tag;
++
++	clear_C1();
++
++	if ((st0_tag == TAG_Valid) && (st1_tag == TAG_Valid)) {
++	      both_valid:
++		/* Both regs are Valid or Denormal */
++		if (signpositive(st0_ptr)) {
++			if (st0_tag == TW_Denormal)
++				FPU_to_exp16(st0_ptr, st0_ptr);
++			else
++				/* Convert st(0) for internal use. */
++				setexponent16(st0_ptr, exponent(st0_ptr));
++
++			if ((st0_ptr->sigh == 0x80000000)
++			    && (st0_ptr->sigl == 0)) {
++				/* Special case. The result can be precise. */
++				u_char esign;
++				e = exponent16(st0_ptr);
++				if (e >= 0) {
++					exponent.sigh = e;
++					esign = SIGN_POS;
++				} else {
++					exponent.sigh = -e;
++					esign = SIGN_NEG;
++				}
++				exponent.sigl = 0;
++				setexponent16(&exponent, 31);
++				tag = FPU_normalize_nuo(&exponent);
++				stdexp(&exponent);
++				setsign(&exponent, esign);
++				tag =
++				    FPU_mul(&exponent, tag, 1, FULL_PRECISION);
++				if (tag >= 0)
++					FPU_settagi(1, tag);
++			} else {
++				/* The usual case */
++				sign = getsign(st1_ptr);
++				if (st1_tag == TW_Denormal)
++					FPU_to_exp16(st1_ptr, st1_ptr);
++				else
++					/* Convert st(1) for internal use. */
++					setexponent16(st1_ptr,
++						      exponent(st1_ptr));
++				poly_l2(st0_ptr, st1_ptr, sign);
++			}
++		} else {
++			/* negative */
++			if (arith_invalid(1) < 0)
++				return;
+ 		}
+-	      exponent.sigl = 0;
+-	      setexponent16(&exponent, 31);
+-	      tag = FPU_normalize_nuo(&exponent);
+-	      stdexp(&exponent);
+-	      setsign(&exponent, esign);
+-	      tag = FPU_mul(&exponent, tag, 1, FULL_PRECISION);
+-	      if ( tag >= 0 )
+-		FPU_settagi(1, tag);
+-	    }
+-	  else
+-	    {
+-	      /* The usual case */
+-	      sign = getsign(st1_ptr);
+-	      if ( st1_tag == TW_Denormal )
+-		FPU_to_exp16(st1_ptr, st1_ptr);
+-	      else
+-		/* Convert st(1) for internal use. */
+-		setexponent16(st1_ptr, exponent(st1_ptr));
+-	      poly_l2(st0_ptr, st1_ptr, sign);
+-	    }
+-	}
+-      else
+-	{
+-	  /* negative */
+-	  if ( arith_invalid(1) < 0 )
+-	    return;
+-	}
  
--	for_each_cpu_mask(cpu, cpu_possible_map) {
--		if (per_cpu_offset(cpu))
--			per_cpu(x86_cpu_to_apicid, cpu) =
--						x86_cpu_to_apicid_init[cpu];
-+	for_each_cpu_mask(i, cpu_possible_map) {
-+		c = &cpu_data(i);
-+		/* mark all to hotplug */
-+		c->cpu_index = NR_CPUS;
- 	}
+-      FPU_pop();
 -
--	/* indicate the static array will be going away soon */
--	x86_cpu_to_apicid_ptr = NULL;
- }
+-      return;
+-    }
+-
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-  if ( st1_tag == TAG_Special )
+-    st1_tag = FPU_Special(st1_ptr);
+-
+-  if ( (st0_tag == TAG_Empty) || (st1_tag == TAG_Empty) )
+-    {
+-      FPU_stack_underflow_pop(1);
+-      return;
+-    }
+-  else if ( (st0_tag <= TW_Denormal) && (st1_tag <= TW_Denormal) )
+-    {
+-      if ( st0_tag == TAG_Zero )
+-	{
+-	  if ( st1_tag == TAG_Zero )
+-	    {
+-	      /* Both args zero is invalid */
+-	      if ( arith_invalid(1) < 0 )
+-		return;
+-	    }
+-	  else
+-	    {
+-	      u_char sign;
+-	      sign = getsign(st1_ptr)^SIGN_NEG;
+-	      if ( FPU_divide_by_zero(1, sign) < 0 )
+-		return;
++		FPU_pop();
  
- /*
-@@ -868,9 +870,9 @@ void __init smp_set_apicids(void)
- void __init smp_prepare_cpus(unsigned int max_cpus)
- {
- 	nmi_watchdog_default();
-+	smp_cpu_index_default();
- 	current_cpu_data = boot_cpu_data;
- 	current_thread_info()->cpu = 0;  /* needed? */
--	smp_set_apicids();
- 	set_cpu_sibling_map(0);
+-	      setsign(st1_ptr, sign);
+-	    }
+-	}
+-      else if ( st1_tag == TAG_Zero )
+-	{
+-	  /* st(1) contains zero, st(0) valid <> 0 */
+-	  /* Zero is the valid answer */
+-	  sign = getsign(st1_ptr);
+-	  
+-	  if ( signnegative(st0_ptr) )
+-	    {
+-	      /* log(negative) */
+-	      if ( arith_invalid(1) < 0 )
+ 		return;
+-	    }
+-	  else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
+-	  else
+-	    {
+-	      if ( exponent(st0_ptr) < 0 )
+-		sign ^= SIGN_NEG;
+-
+-	      FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
+-	      setsign(st1_ptr, sign);
+-	    }
+ 	}
+-      else
+-	{
+-	  /* One or both operands are denormals. */
+-	  if ( denormal_operand() < 0 )
+-	    return;
+-	  goto both_valid;
+-	}
+-    }
+-  else if ( (st0_tag == TW_NaN) || (st1_tag == TW_NaN) )
+-    {
+-      if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
+-	return;
+-    }
+-  /* One or both arg must be an infinity */
+-  else if ( st0_tag == TW_Infinity )
+-    {
+-      if ( (signnegative(st0_ptr)) || (st1_tag == TAG_Zero) )
+-	{
+-	  /* log(-infinity) or 0*log(infinity) */
+-	  if ( arith_invalid(1) < 0 )
+-	    return;
+-	}
+-      else
+-	{
+-	  u_char sign = getsign(st1_ptr);
  
- 	if (smp_sanity_check(max_cpus) < 0) {
-@@ -885,6 +887,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
- 	 */
- 	setup_local_APIC();
+-	  if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
++	if (st1_tag == TAG_Special)
++		st1_tag = FPU_Special(st1_ptr);
  
-+	/*
-+	 * Enable IO APIC before setting up error vector
-+	 */
-+	if (!skip_ioapic_setup && nr_ioapics)
-+		enable_IO_APIC();
-+	end_local_APIC_setup();
+-	  FPU_copy_to_reg1(&CONST_INF, TAG_Special);
+-	  setsign(st1_ptr, sign);
+-	}
+-    }
+-  /* st(1) must be infinity here */
+-  else if ( ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal))
+-	    && ( signpositive(st0_ptr) ) )
+-    {
+-      if ( exponent(st0_ptr) >= 0 )
+-	{
+-	  if ( (exponent(st0_ptr) == 0) &&
+-	      (st0_ptr->sigh == 0x80000000) &&
+-	      (st0_ptr->sigl == 0) )
+-	    {
+-	      /* st(0) holds 1.0 */
+-	      /* infinity*log(1) */
+-	      if ( arith_invalid(1) < 0 )
++	if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
++		FPU_stack_underflow_pop(1);
+ 		return;
+-	    }
+-	  /* else st(0) is positive and > 1.0 */
++	} else if ((st0_tag <= TW_Denormal) && (st1_tag <= TW_Denormal)) {
++		if (st0_tag == TAG_Zero) {
++			if (st1_tag == TAG_Zero) {
++				/* Both args zero is invalid */
++				if (arith_invalid(1) < 0)
++					return;
++			} else {
++				u_char sign;
++				sign = getsign(st1_ptr) ^ SIGN_NEG;
++				if (FPU_divide_by_zero(1, sign) < 0)
++					return;
 +
- 	if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
- 		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
- 		      GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
-@@ -903,7 +912,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
- 	 * Set up local APIC timer on boot CPU.
- 	 */
- 
--	setup_boot_APIC_clock();
-+	setup_boot_clock();
- }
++				setsign(st1_ptr, sign);
++			}
++		} else if (st1_tag == TAG_Zero) {
++			/* st(1) contains zero, st(0) valid <> 0 */
++			/* Zero is the valid answer */
++			sign = getsign(st1_ptr);
++
++			if (signnegative(st0_ptr)) {
++				/* log(negative) */
++				if (arith_invalid(1) < 0)
++					return;
++			} else if ((st0_tag == TW_Denormal)
++				   && (denormal_operand() < 0))
++				return;
++			else {
++				if (exponent(st0_ptr) < 0)
++					sign ^= SIGN_NEG;
++
++				FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
++				setsign(st1_ptr, sign);
++			}
++		} else {
++			/* One or both operands are denormals. */
++			if (denormal_operand() < 0)
++				return;
++			goto both_valid;
++		}
++	} else if ((st0_tag == TW_NaN) || (st1_tag == TW_NaN)) {
++		if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
++			return;
++	}
++	/* One or both arg must be an infinity */
++	else if (st0_tag == TW_Infinity) {
++		if ((signnegative(st0_ptr)) || (st1_tag == TAG_Zero)) {
++			/* log(-infinity) or 0*log(infinity) */
++			if (arith_invalid(1) < 0)
++				return;
++		} else {
++			u_char sign = getsign(st1_ptr);
++
++			if ((st1_tag == TW_Denormal)
++			    && (denormal_operand() < 0))
++				return;
++
++			FPU_copy_to_reg1(&CONST_INF, TAG_Special);
++			setsign(st1_ptr, sign);
++		}
+ 	}
+-      else
+-	{
+-	  /* st(0) is positive and < 1.0 */
++	/* st(1) must be infinity here */
++	else if (((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal))
++		 && (signpositive(st0_ptr))) {
++		if (exponent(st0_ptr) >= 0) {
++			if ((exponent(st0_ptr) == 0) &&
++			    (st0_ptr->sigh == 0x80000000) &&
++			    (st0_ptr->sigl == 0)) {
++				/* st(0) holds 1.0 */
++				/* infinity*log(1) */
++				if (arith_invalid(1) < 0)
++					return;
++			}
++			/* else st(0) is positive and > 1.0 */
++		} else {
++			/* st(0) is positive and < 1.0 */
  
- /*
-@@ -912,7 +921,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
- void __init smp_prepare_boot_cpu(void)
- {
- 	int me = smp_processor_id();
--	cpu_set(me, cpu_online_map);
-+	/* already set me in cpu_online_map in boot_cpu_init() */
- 	cpu_set(me, cpu_callout_map);
- 	per_cpu(cpu_state, me) = CPU_ONLINE;
- }
-@@ -1016,7 +1025,7 @@ void remove_cpu_from_maps(void)
+-	  if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
++			if ((st0_tag == TW_Denormal)
++			    && (denormal_operand() < 0))
++				return;
  
- 	cpu_clear(cpu, cpu_callout_map);
- 	cpu_clear(cpu, cpu_callin_map);
--	clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
-+	clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
- 	clear_node_cpumask(cpu);
- }
+-	  changesign(st1_ptr);
+-	}
+-    }
+-  else
+-    {
+-      /* st(0) must be zero or negative */
+-      if ( st0_tag == TAG_Zero )
+-	{
+-	  /* This should be invalid, but a real 80486 is happy with it. */
++			changesign(st1_ptr);
++		}
++	} else {
++		/* st(0) must be zero or negative */
++		if (st0_tag == TAG_Zero) {
++			/* This should be invalid, but a real 80486 is happy with it. */
  
-diff --git a/arch/x86/kernel/smpcommon_32.c b/arch/x86/kernel/smpcommon_32.c
-index bbfe85a..8bc38af 100644
---- a/arch/x86/kernel/smpcommon_32.c
-+++ b/arch/x86/kernel/smpcommon_32.c
-@@ -14,10 +14,11 @@ __cpuinit void init_gdt(int cpu)
- {
- 	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ #ifndef PECULIAR_486
+-	  sign = getsign(st1_ptr);
+-	  if ( FPU_divide_by_zero(1, sign) < 0 )
+-	    return;
++			sign = getsign(st1_ptr);
++			if (FPU_divide_by_zero(1, sign) < 0)
++				return;
+ #endif /* PECULIAR_486 */
  
--	pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
--			(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
-+	pack_descriptor(&gdt[GDT_ENTRY_PERCPU],
- 			__per_cpu_offset[cpu], 0xFFFFF,
--			0x80 | DESCTYPE_S | 0x2, 0x8);
-+			0x2 | DESCTYPE_S, 0x8);
-+
-+	gdt[GDT_ENTRY_PERCPU].s = 1;
+-	  changesign(st1_ptr);
++			changesign(st1_ptr);
++		} else if (arith_invalid(1) < 0)	/* log(negative) */
++			return;
+ 	}
+-      else if ( arith_invalid(1) < 0 )	  /* log(negative) */
+-	return;
+-    }
  
- 	per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
- 	per_cpu(cpu_number, cpu) = cpu;
-diff --git a/arch/x86/kernel/srat_32.c b/arch/x86/kernel/srat_32.c
-index 2a8713e..2bf6903 100644
---- a/arch/x86/kernel/srat_32.c
-+++ b/arch/x86/kernel/srat_32.c
-@@ -57,8 +57,6 @@ static struct node_memory_chunk_s node_memory_chunk[MAXCHUNKS];
- static int num_memory_chunks;		/* total number of memory chunks */
- static u8 __initdata apicid_to_pxm[MAX_APICID];
+-  FPU_pop();
++	FPU_pop();
+ }
  
--extern void * boot_ioremap(unsigned long, unsigned long);
 -
- /* Identify CPU proximity domains */
- static void __init parse_cpu_affinity_structure(char *p)
+ static void fpatan(FPU_REG *st0_ptr, u_char st0_tag)
  {
-@@ -299,7 +297,7 @@ int __init get_memcfg_from_srat(void)
- 	}
+-  FPU_REG *st1_ptr = &st(1);
+-  u_char st1_tag = FPU_gettagi(1);
+-  int tag;
++	FPU_REG *st1_ptr = &st(1);
++	u_char st1_tag = FPU_gettagi(1);
++	int tag;
  
- 	rsdt = (struct acpi_table_rsdt *)
--	    boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
-+	    early_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
+-  clear_C1();
+-  if ( !((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid)) )
+-    {
+-    valid_atan:
++	clear_C1();
++	if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
++	      valid_atan:
  
- 	if (!rsdt) {
- 		printk(KERN_WARNING
-@@ -339,11 +337,11 @@ int __init get_memcfg_from_srat(void)
- 	for (i = 0; i < tables; i++) {
- 		/* Map in header, then map in full table length. */
- 		header = (struct acpi_table_header *)
--			boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
-+			early_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
- 		if (!header)
- 			break;
- 		header = (struct acpi_table_header *)
--			boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
-+			early_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
- 		if (!header)
- 			break;
+-      poly_atan(st0_ptr, st0_tag, st1_ptr, st1_tag);
++		poly_atan(st0_ptr, st0_tag, st1_ptr, st1_tag);
  
-diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
-index 6fa6cf0..02f0f61 100644
---- a/arch/x86/kernel/stacktrace.c
-+++ b/arch/x86/kernel/stacktrace.c
-@@ -22,9 +22,23 @@ static int save_stack_stack(void *data, char *name)
- 	return -1;
- }
+-      FPU_pop();
++		FPU_pop();
  
--static void save_stack_address(void *data, unsigned long addr)
-+static void save_stack_address(void *data, unsigned long addr, int reliable)
-+{
-+	struct stack_trace *trace = data;
-+	if (trace->skip > 0) {
-+		trace->skip--;
+-      return;
+-    }
 +		return;
 +	}
-+	if (trace->nr_entries < trace->max_entries)
-+		trace->entries[trace->nr_entries++] = addr;
-+}
-+
-+static void
-+save_stack_address_nosched(void *data, unsigned long addr, int reliable)
- {
- 	struct stack_trace *trace = (struct stack_trace *)data;
-+	if (in_sched_functions(addr))
-+		return;
- 	if (trace->skip > 0) {
- 		trace->skip--;
- 		return;
-@@ -40,13 +54,26 @@ static const struct stacktrace_ops save_stack_ops = {
- 	.address = save_stack_address,
- };
  
-+static const struct stacktrace_ops save_stack_ops_nosched = {
-+	.warning = save_stack_warning,
-+	.warning_symbol = save_stack_warning_symbol,
-+	.stack = save_stack_stack,
-+	.address = save_stack_address_nosched,
-+};
-+
- /*
-  * Save stack-backtrace addresses into a stack_trace buffer.
-  */
- void save_stack_trace(struct stack_trace *trace)
- {
--	dump_trace(current, NULL, NULL, &save_stack_ops, trace);
-+	dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
-+	if (trace->nr_entries < trace->max_entries)
-+		trace->entries[trace->nr_entries++] = ULONG_MAX;
-+}
-+
-+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-+{
-+	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
- 	if (trace->nr_entries < trace->max_entries)
- 		trace->entries[trace->nr_entries++] = ULONG_MAX;
- }
--EXPORT_SYMBOL(save_stack_trace);
-diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
-new file mode 100644
-index 0000000..2ef1a5f
---- /dev/null
-+++ b/arch/x86/kernel/step.c
-@@ -0,0 +1,203 @@
-+/*
-+ * x86 single-step support code, common to 32-bit and 64-bit.
-+ */
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/ptrace.h>
-+
-+unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
-+{
-+	unsigned long addr, seg;
-+
-+	addr = regs->ip;
-+	seg = regs->cs & 0xffff;
-+	if (v8086_mode(regs)) {
-+		addr = (addr & 0xffff) + (seg << 4);
-+		return addr;
-+	}
-+
-+	/*
-+	 * We'll assume that the code segments in the GDT
-+	 * are all zero-based. That is largely true: the
-+	 * TLS segments are used for data, and the PNPBIOS
-+	 * and APM bios ones we just ignore here.
-+	 */
-+	if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
-+		u32 *desc;
-+		unsigned long base;
-+
-+		seg &= ~7UL;
-+
-+		mutex_lock(&child->mm->context.lock);
-+		if (unlikely((seg >> 3) >= child->mm->context.size))
-+			addr = -1L; /* bogus selector, access would fault */
-+		else {
-+			desc = child->mm->context.ldt + seg;
-+			base = ((desc[0] >> 16) |
-+				((desc[1] & 0xff) << 16) |
-+				(desc[1] & 0xff000000));
-+
-+			/* 16-bit code segment? */
-+			if (!((desc[1] >> 22) & 1))
-+				addr &= 0xffff;
-+			addr += base;
-+		}
-+		mutex_unlock(&child->mm->context.lock);
-+	}
-+
-+	return addr;
-+}
-+
-+static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
-+{
-+	int i, copied;
-+	unsigned char opcode[15];
-+	unsigned long addr = convert_ip_to_linear(child, regs);
-+
-+	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
-+	for (i = 0; i < copied; i++) {
-+		switch (opcode[i]) {
-+		/* popf and iret */
-+		case 0x9d: case 0xcf:
-+			return 1;
-+
-+			/* CHECKME: 64 65 */
-+
-+		/* opcode and address size prefixes */
-+		case 0x66: case 0x67:
-+			continue;
-+		/* irrelevant prefixes (segment overrides and repeats) */
-+		case 0x26: case 0x2e:
-+		case 0x36: case 0x3e:
-+		case 0x64: case 0x65:
-+		case 0xf0: case 0xf2: case 0xf3:
-+			continue;
-+
-+#ifdef CONFIG_X86_64
-+		case 0x40 ... 0x4f:
-+			if (regs->cs != __USER_CS)
-+				/* 32-bit mode: register increment */
-+				return 0;
-+			/* 64-bit mode: REX prefix */
-+			continue;
-+#endif
-+
-+			/* CHECKME: f2, f3 */
-+
-+		/*
-+		 * pushf: NOTE! We should probably not let
-+		 * the user see the TF bit being set. But
-+		 * it's more pain than it's worth to avoid
-+		 * it, and a debugger could emulate this
-+		 * all in user space if it _really_ cares.
-+		 */
-+		case 0x9c:
-+		default:
-+			return 0;
-+		}
-+	}
-+	return 0;
-+}
-+
-+/*
-+ * Enable single-stepping.  Return nonzero if user mode is not using TF itself.
-+ */
-+static int enable_single_step(struct task_struct *child)
-+{
-+	struct pt_regs *regs = task_pt_regs(child);
-+
-+	/*
-+	 * Always set TIF_SINGLESTEP - this guarantees that
-+	 * we single-step system calls etc..  This will also
-+	 * cause us to set TF when returning to user mode.
-+	 */
-+	set_tsk_thread_flag(child, TIF_SINGLESTEP);
-+
-+	/*
-+	 * If TF was already set, don't do anything else
-+	 */
-+	if (regs->flags & X86_EFLAGS_TF)
-+		return 0;
-+
-+	/* Set TF on the kernel stack.. */
-+	regs->flags |= X86_EFLAGS_TF;
-+
-+	/*
-+	 * ..but if TF is changed by the instruction we will trace,
-+	 * don't mark it as being "us" that set it, so that we
-+	 * won't clear it by hand later.
-+	 */
-+	if (is_setting_trap_flag(child, regs))
-+		return 0;
-+
-+	set_tsk_thread_flag(child, TIF_FORCED_TF);
-+
-+	return 1;
-+}
-+
-+/*
-+ * Install this value in MSR_IA32_DEBUGCTLMSR whenever child is running.
-+ */
-+static void write_debugctlmsr(struct task_struct *child, unsigned long val)
-+{
-+	child->thread.debugctlmsr = val;
-+
-+	if (child != current)
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-  if ( st1_tag == TAG_Special )
+-    st1_tag = FPU_Special(st1_ptr);
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
++	if (st1_tag == TAG_Special)
++		st1_tag = FPU_Special(st1_ptr);
+ 
+-  if ( ((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
++	if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
+ 	    || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
+-	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal)) )
+-    {
+-      if ( denormal_operand() < 0 )
+-	return;
++	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
++		if (denormal_operand() < 0)
++			return;
+ 
+-      goto valid_atan;
+-    }
+-  else if ( (st0_tag == TAG_Empty) || (st1_tag == TAG_Empty) )
+-    {
+-      FPU_stack_underflow_pop(1);
+-      return;
+-    }
+-  else if ( (st0_tag == TW_NaN) || (st1_tag == TW_NaN) )
+-    {
+-      if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) >= 0 )
+-	  FPU_pop();
+-      return;
+-    }
+-  else if ( (st0_tag == TW_Infinity) || (st1_tag == TW_Infinity) )
+-    {
+-      u_char sign = getsign(st1_ptr);
+-      if ( st0_tag == TW_Infinity )
+-	{
+-	  if ( st1_tag == TW_Infinity )
+-	    {
+-	      if ( signpositive(st0_ptr) )
+-		{
+-		  FPU_copy_to_reg1(&CONST_PI4, TAG_Valid);
+-		}
+-	      else
+-		{
+-		  setpositive(st1_ptr);
+-		  tag = FPU_u_add(&CONST_PI4, &CONST_PI2, st1_ptr,
+-				  FULL_PRECISION, SIGN_POS,
+-				  exponent(&CONST_PI4), exponent(&CONST_PI2));
+-		  if ( tag >= 0 )
+-		    FPU_settagi(1, tag);
+-		}
+-	    }
+-	  else
+-	    {
+-	      if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
++		goto valid_atan;
++	} else if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
++		FPU_stack_underflow_pop(1);
 +		return;
++	} else if ((st0_tag == TW_NaN) || (st1_tag == TW_NaN)) {
++		if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) >= 0)
++			FPU_pop();
+ 		return;
++	} else if ((st0_tag == TW_Infinity) || (st1_tag == TW_Infinity)) {
++		u_char sign = getsign(st1_ptr);
++		if (st0_tag == TW_Infinity) {
++			if (st1_tag == TW_Infinity) {
++				if (signpositive(st0_ptr)) {
++					FPU_copy_to_reg1(&CONST_PI4, TAG_Valid);
++				} else {
++					setpositive(st1_ptr);
++					tag =
++					    FPU_u_add(&CONST_PI4, &CONST_PI2,
++						      st1_ptr, FULL_PRECISION,
++						      SIGN_POS,
++						      exponent(&CONST_PI4),
++						      exponent(&CONST_PI2));
++					if (tag >= 0)
++						FPU_settagi(1, tag);
++				}
++			} else {
++				if ((st1_tag == TW_Denormal)
++				    && (denormal_operand() < 0))
++					return;
 +
-+	wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
-+}
-+
-+/*
-+ * Enable single or block step.
-+ */
-+static void enable_step(struct task_struct *child, bool block)
-+{
-+	/*
-+	 * Make sure block stepping (BTF) is not enabled unless it should be.
-+	 * Note that we don't try to worry about any is_setting_trap_flag()
-+	 * instructions after the first when using block stepping.
-+	 * So noone should try to use debugger block stepping in a program
-+	 * that uses user-mode single stepping itself.
-+	 */
-+	if (enable_single_step(child) && block) {
-+		set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
-+		write_debugctlmsr(child,
-+				  child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
-+	} else {
-+	    write_debugctlmsr(child,
-+			      child->thread.debugctlmsr & ~TIF_DEBUGCTLMSR);
-+
-+	    if (!child->thread.debugctlmsr)
-+		    clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
-+	}
-+}
-+
-+void user_enable_single_step(struct task_struct *child)
-+{
-+	enable_step(child, 0);
-+}
-+
-+void user_enable_block_step(struct task_struct *child)
-+{
-+	enable_step(child, 1);
-+}
-+
-+void user_disable_single_step(struct task_struct *child)
-+{
-+	/*
-+	 * Make sure block stepping (BTF) is disabled.
-+	 */
-+	write_debugctlmsr(child,
-+			  child->thread.debugctlmsr & ~TIF_DEBUGCTLMSR);
-+
-+	if (!child->thread.debugctlmsr)
-+		clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
++				if (signpositive(st0_ptr)) {
++					FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
++					setsign(st1_ptr, sign);	/* An 80486 preserves the sign */
++					FPU_pop();
++					return;
++				} else {
++					FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
++				}
++			}
++		} else {
++			/* st(1) is infinity, st(0) not infinity */
++			if ((st0_tag == TW_Denormal)
++			    && (denormal_operand() < 0))
++				return;
+ 
+-	      if ( signpositive(st0_ptr) )
+-		{
+-		  FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
+-		  setsign(st1_ptr, sign);   /* An 80486 preserves the sign */
+-		  FPU_pop();
+-		  return;
++			FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
+ 		}
+-	      else
+-		{
+-		  FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
++		setsign(st1_ptr, sign);
++	} else if (st1_tag == TAG_Zero) {
++		/* st(0) must be valid or zero */
++		u_char sign = getsign(st1_ptr);
 +
-+	/* Always clear TIF_SINGLESTEP... */
-+	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
++		if ((st0_tag == TW_Denormal) && (denormal_operand() < 0))
++			return;
 +
-+	/* But touch TF only if it was set by us.. */
-+	if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
-+		task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
-+}
-diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
-index 2e5efaa..0919951 100644
---- a/arch/x86/kernel/suspend_64.c
-+++ b/arch/x86/kernel/suspend_64.c
-@@ -17,9 +17,26 @@
- /* References to section boundaries */
- extern const void __nosave_begin, __nosave_end;
++		if (signpositive(st0_ptr)) {
++			/* An 80486 preserves the sign */
++			FPU_pop();
++			return;
+ 		}
+-	    }
+-	}
+-      else
+-	{
+-	  /* st(1) is infinity, st(0) not infinity */
+-	  if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
  
-+static void fix_processor_context(void);
-+
- struct saved_context saved_context;
+-	  FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
+-	}
+-      setsign(st1_ptr, sign);
+-    }
+-  else if ( st1_tag == TAG_Zero )
+-    {
+-      /* st(0) must be valid or zero */
+-      u_char sign = getsign(st1_ptr);
+-
+-      if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	return;
++		FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
++		setsign(st1_ptr, sign);
++	} else if (st0_tag == TAG_Zero) {
++		/* st(1) must be TAG_Valid here */
++		u_char sign = getsign(st1_ptr);
  
--void __save_processor_state(struct saved_context *ctxt)
-+/**
-+ *	__save_processor_state - save CPU registers before creating a
-+ *		hibernation image and before restoring the memory state from it
-+ *	@ctxt - structure to store the registers contents in
-+ *
-+ *	NOTE: If there is a CPU register the modification of which by the
-+ *	boot kernel (ie. the kernel used for loading the hibernation image)
-+ *	might affect the operations of the restored target kernel (ie. the one
-+ *	saved in the hibernation image), then its contents must be saved by this
-+ *	function.  In other words, if kernel A is hibernated and different
-+ *	kernel B is used for loading the hibernation image into memory, the
-+ *	kernel A's __save_processor_state() function must save all registers
-+ *	needed by kernel A, so that it can operate correctly after the resume
-+ *	regardless of what kernel B does in the meantime.
-+ */
-+static void __save_processor_state(struct saved_context *ctxt)
- {
- 	kernel_fpu_begin();
+-      if ( signpositive(st0_ptr) )
+-	{
+-	  /* An 80486 preserves the sign */
+-	  FPU_pop();
+-	  return;
+-	}
++		if ((st1_tag == TW_Denormal) && (denormal_operand() < 0))
++			return;
  
-@@ -69,7 +86,12 @@ static void do_fpu_end(void)
- 	kernel_fpu_end();
+-      FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
+-      setsign(st1_ptr, sign);
+-    }
+-  else if ( st0_tag == TAG_Zero )
+-    {
+-      /* st(1) must be TAG_Valid here */
+-      u_char sign = getsign(st1_ptr);
+-
+-      if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	return;
+-
+-      FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
+-      setsign(st1_ptr, sign);
+-    }
++		FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
++		setsign(st1_ptr, sign);
++	}
+ #ifdef PARANOID
+-  else
+-    EXCEPTION(EX_INTERNAL | 0x125);
++	else
++		EXCEPTION(EX_INTERNAL | 0x125);
+ #endif /* PARANOID */
+ 
+-  FPU_pop();
+-  set_precision_flag_up();  /* We do not really know if up or down */
++	FPU_pop();
++	set_precision_flag_up();	/* We do not really know if up or down */
  }
  
--void __restore_processor_state(struct saved_context *ctxt)
-+/**
-+ *	__restore_processor_state - restore the contents of CPU registers saved
-+ *		by __save_processor_state()
-+ *	@ctxt - structure to load the registers contents from
-+ */
-+static void __restore_processor_state(struct saved_context *ctxt)
+-
+ static void fprem(FPU_REG *st0_ptr, u_char st0_tag)
  {
- 	/*
- 	 * control registers
-@@ -113,14 +135,14 @@ void restore_processor_state(void)
- 	__restore_processor_state(&saved_context);
+-  do_fprem(st0_ptr, st0_tag, RC_CHOP);
++	do_fprem(st0_ptr, st0_tag, RC_CHOP);
  }
  
--void fix_processor_context(void)
-+static void fix_processor_context(void)
+-
+ static void fprem1(FPU_REG *st0_ptr, u_char st0_tag)
  {
- 	int cpu = smp_processor_id();
- 	struct tss_struct *t = &per_cpu(init_tss, cpu);
+-  do_fprem(st0_ptr, st0_tag, RC_RND);
++	do_fprem(st0_ptr, st0_tag, RC_RND);
+ }
  
- 	set_tss_desc(cpu,t);	/* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
+-
+ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+-  u_char sign, sign1;
+-  FPU_REG *st1_ptr = &st(1), a, b;
+-  u_char st1_tag = FPU_gettagi(1);
++	u_char sign, sign1;
++	FPU_REG *st1_ptr = &st(1), a, b;
++	u_char st1_tag = FPU_gettagi(1);
  
--	cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
-+	get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
+-  clear_C1();
+-  if ( !((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid)) )
+-    {
+-    valid_yl2xp1:
++	clear_C1();
++	if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
++	      valid_yl2xp1:
  
- 	syscall_init();                         /* This sets MSR_*STAR and related */
- 	load_TR_desc();				/* This does ltr */
-diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S
-index 72f9521..aeb9a4d 100644
---- a/arch/x86/kernel/suspend_asm_64.S
-+++ b/arch/x86/kernel/suspend_asm_64.S
-@@ -18,13 +18,13 @@
+-      sign = getsign(st0_ptr);
+-      sign1 = getsign(st1_ptr);
++		sign = getsign(st0_ptr);
++		sign1 = getsign(st1_ptr);
  
- ENTRY(swsusp_arch_suspend)
- 	movq	$saved_context, %rax
--	movq	%rsp, pt_regs_rsp(%rax)
--	movq	%rbp, pt_regs_rbp(%rax)
--	movq	%rsi, pt_regs_rsi(%rax)
--	movq	%rdi, pt_regs_rdi(%rax)
--	movq	%rbx, pt_regs_rbx(%rax)
--	movq	%rcx, pt_regs_rcx(%rax)
--	movq	%rdx, pt_regs_rdx(%rax)
-+	movq	%rsp, pt_regs_sp(%rax)
-+	movq	%rbp, pt_regs_bp(%rax)
-+	movq	%rsi, pt_regs_si(%rax)
-+	movq	%rdi, pt_regs_di(%rax)
-+	movq	%rbx, pt_regs_bx(%rax)
-+	movq	%rcx, pt_regs_cx(%rax)
-+	movq	%rdx, pt_regs_dx(%rax)
- 	movq	%r8, pt_regs_r8(%rax)
- 	movq	%r9, pt_regs_r9(%rax)
- 	movq	%r10, pt_regs_r10(%rax)
-@@ -34,7 +34,7 @@ ENTRY(swsusp_arch_suspend)
- 	movq	%r14, pt_regs_r14(%rax)
- 	movq	%r15, pt_regs_r15(%rax)
- 	pushfq
--	popq	pt_regs_eflags(%rax)
-+	popq	pt_regs_flags(%rax)
+-      FPU_to_exp16(st0_ptr, &a);
+-      FPU_to_exp16(st1_ptr, &b);
++		FPU_to_exp16(st0_ptr, &a);
++		FPU_to_exp16(st1_ptr, &b);
  
- 	/* save the address of restore_registers */
- 	movq	$restore_registers, %rax
-@@ -115,13 +115,13 @@ ENTRY(restore_registers)
+-      if ( poly_l2p1(sign, sign1, &a, &b, st1_ptr) )
+-	return;
++		if (poly_l2p1(sign, sign1, &a, &b, st1_ptr))
++			return;
  
- 	/* We don't restore %rax, it must be 0 anyway */
- 	movq	$saved_context, %rax
--	movq	pt_regs_rsp(%rax), %rsp
--	movq	pt_regs_rbp(%rax), %rbp
--	movq	pt_regs_rsi(%rax), %rsi
--	movq	pt_regs_rdi(%rax), %rdi
--	movq	pt_regs_rbx(%rax), %rbx
--	movq	pt_regs_rcx(%rax), %rcx
--	movq	pt_regs_rdx(%rax), %rdx
-+	movq	pt_regs_sp(%rax), %rsp
-+	movq	pt_regs_bp(%rax), %rbp
-+	movq	pt_regs_si(%rax), %rsi
-+	movq	pt_regs_di(%rax), %rdi
-+	movq	pt_regs_bx(%rax), %rbx
-+	movq	pt_regs_cx(%rax), %rcx
-+	movq	pt_regs_dx(%rax), %rdx
- 	movq	pt_regs_r8(%rax), %r8
- 	movq	pt_regs_r9(%rax), %r9
- 	movq	pt_regs_r10(%rax), %r10
-@@ -130,7 +130,7 @@ ENTRY(restore_registers)
- 	movq	pt_regs_r13(%rax), %r13
- 	movq	pt_regs_r14(%rax), %r14
- 	movq	pt_regs_r15(%rax), %r15
--	pushq	pt_regs_eflags(%rax)
-+	pushq	pt_regs_flags(%rax)
- 	popfq
+-      FPU_pop();
+-      return;
+-    }
++		FPU_pop();
++		return;
++	}
  
- 	xorq	%rax, %rax
-diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index 907942e..bd802a5 100644
---- a/arch/x86/kernel/sys_x86_64.c
-+++ b/arch/x86/kernel/sys_x86_64.c
-@@ -12,6 +12,7 @@
- #include <linux/file.h>
- #include <linux/utsname.h>
- #include <linux/personality.h>
-+#include <linux/random.h>
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-  if ( st1_tag == TAG_Special )
+-    st1_tag = FPU_Special(st1_ptr);
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
++	if (st1_tag == TAG_Special)
++		st1_tag = FPU_Special(st1_ptr);
  
- #include <asm/uaccess.h>
- #include <asm/ia32.h>
-@@ -65,6 +66,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
- 			   unsigned long *end)
- {
- 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
-+		unsigned long new_begin;
- 		/* This is usually used needed to map code in small
- 		   model, so it needs to be in the first 31bit. Limit
- 		   it to that.  This means we need to move the
-@@ -74,6 +76,11 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
- 		   of playground for now. -AK */ 
- 		*begin = 0x40000000; 
- 		*end = 0x80000000;		
-+		if (current->flags & PF_RANDOMIZE) {
-+			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
-+			if (new_begin)
-+				*begin = new_begin;
-+		}
- 	} else {
- 		*begin = TASK_UNMAPPED_BASE;
- 		*end = TASK_SIZE; 
-@@ -143,6 +150,97 @@ full_search:
- 	}
- }
+-  if ( ((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
++	if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
+ 	    || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
+-	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal)) )
+-    {
+-      if ( denormal_operand() < 0 )
+-	return;
+-
+-      goto valid_yl2xp1;
+-    }
+-  else if ( (st0_tag == TAG_Empty) | (st1_tag == TAG_Empty) )
+-    {
+-      FPU_stack_underflow_pop(1);
+-      return;
+-    }
+-  else if ( st0_tag == TAG_Zero )
+-    {
+-      switch ( st1_tag )
+-	{
+-	case TW_Denormal:
+-	  if ( denormal_operand() < 0 )
+-	    return;
+-
+-	case TAG_Zero:
+-	case TAG_Valid:
+-	  setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
+-	  FPU_copy_to_reg1(st0_ptr, st0_tag);
+-	  break;
+-
+-	case TW_Infinity:
+-	  /* Infinity*log(1) */
+-	  if ( arith_invalid(1) < 0 )
+-	    return;
+-	  break;
++	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
++		if (denormal_operand() < 0)
++			return;
  
+-	case TW_NaN:
+-	  if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
+-	    return;
+-	  break;
+-
+-	default:
++		goto valid_yl2xp1;
++	} else if ((st0_tag == TAG_Empty) | (st1_tag == TAG_Empty)) {
++		FPU_stack_underflow_pop(1);
++		return;
++	} else if (st0_tag == TAG_Zero) {
++		switch (st1_tag) {
++		case TW_Denormal:
++			if (denormal_operand() < 0)
++				return;
 +
-+unsigned long
-+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-+			  const unsigned long len, const unsigned long pgoff,
-+			  const unsigned long flags)
-+{
-+	struct vm_area_struct *vma;
-+	struct mm_struct *mm = current->mm;
-+	unsigned long addr = addr0;
-+
-+	/* requested length too big for entire address space */
-+	if (len > TASK_SIZE)
-+		return -ENOMEM;
-+
-+	if (flags & MAP_FIXED)
-+		return addr;
-+
-+	/* for MAP_32BIT mappings we force the legact mmap base */
-+	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
-+		goto bottomup;
-+
-+	/* requesting a specific address */
-+	if (addr) {
-+		addr = PAGE_ALIGN(addr);
-+		vma = find_vma(mm, addr);
-+		if (TASK_SIZE - len >= addr &&
-+				(!vma || addr + len <= vma->vm_start))
-+			return addr;
-+	}
-+
-+	/* check if free_area_cache is useful for us */
-+	if (len <= mm->cached_hole_size) {
-+		mm->cached_hole_size = 0;
-+		mm->free_area_cache = mm->mmap_base;
-+	}
-+
-+	/* either no address requested or can't fit in requested address hole */
-+	addr = mm->free_area_cache;
-+
-+	/* make sure it can fit in the remaining address space */
-+	if (addr > len) {
-+		vma = find_vma(mm, addr-len);
-+		if (!vma || addr <= vma->vm_start)
-+			/* remember the address as a hint for next time */
-+			return (mm->free_area_cache = addr-len);
-+	}
-+
-+	if (mm->mmap_base < len)
-+		goto bottomup;
-+
-+	addr = mm->mmap_base-len;
-+
-+	do {
-+		/*
-+		 * Lookup failure means no vma is above this address,
-+		 * else if new region fits below vma->vm_start,
-+		 * return with success:
-+		 */
-+		vma = find_vma(mm, addr);
-+		if (!vma || addr+len <= vma->vm_start)
-+			/* remember the address as a hint for next time */
-+			return (mm->free_area_cache = addr);
-+
-+		/* remember the largest hole we saw so far */
-+		if (addr + mm->cached_hole_size < vma->vm_start)
-+			mm->cached_hole_size = vma->vm_start - addr;
-+
-+		/* try just below the current vma->vm_start */
-+		addr = vma->vm_start-len;
-+	} while (len < vma->vm_start);
-+
-+bottomup:
-+	/*
-+	 * A failed mmap() very likely causes application failure,
-+	 * so fall back to the bottom-up function here. This scenario
-+	 * can happen with large stack limits and large mmap()
-+	 * allocations.
-+	 */
-+	mm->cached_hole_size = ~0UL;
-+	mm->free_area_cache = TASK_UNMAPPED_BASE;
-+	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-+	/*
-+	 * Restore the topdown base:
-+	 */
-+	mm->free_area_cache = mm->mmap_base;
-+	mm->cached_hole_size = ~0UL;
++		case TAG_Zero:
++		case TAG_Valid:
++			setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
++			FPU_copy_to_reg1(st0_ptr, st0_tag);
++			break;
 +
-+	return addr;
-+}
++		case TW_Infinity:
++			/* Infinity*log(1) */
++			if (arith_invalid(1) < 0)
++				return;
++			break;
 +
++		case TW_NaN:
++			if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
++				return;
++			break;
 +
- asmlinkage long sys_uname(struct new_utsname __user * name)
- {
- 	int err;
-diff --git a/arch/x86/kernel/sysenter_32.c b/arch/x86/kernel/sysenter_32.c
-deleted file mode 100644
-index 5a2d951..0000000
---- a/arch/x86/kernel/sysenter_32.c
-+++ /dev/null
-@@ -1,346 +0,0 @@
--/*
-- * (C) Copyright 2002 Linus Torvalds
-- * Portions based on the vdso-randomization code from exec-shield:
-- * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
-- *
-- * This file contains the needed initializations to support sysenter.
-- */
--
--#include <linux/init.h>
--#include <linux/smp.h>
--#include <linux/thread_info.h>
--#include <linux/sched.h>
--#include <linux/gfp.h>
--#include <linux/string.h>
--#include <linux/elf.h>
--#include <linux/mm.h>
--#include <linux/err.h>
--#include <linux/module.h>
--
--#include <asm/cpufeature.h>
--#include <asm/msr.h>
--#include <asm/pgtable.h>
--#include <asm/unistd.h>
--#include <asm/elf.h>
--#include <asm/tlbflush.h>
--
--enum {
--	VDSO_DISABLED = 0,
--	VDSO_ENABLED = 1,
--	VDSO_COMPAT = 2,
--};
--
--#ifdef CONFIG_COMPAT_VDSO
--#define VDSO_DEFAULT	VDSO_COMPAT
--#else
--#define VDSO_DEFAULT	VDSO_ENABLED
--#endif
--
--/*
-- * Should the kernel map a VDSO page into processes and pass its
-- * address down to glibc upon exec()?
-- */
--unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
--
--EXPORT_SYMBOL_GPL(vdso_enabled);
--
--static int __init vdso_setup(char *s)
--{
--	vdso_enabled = simple_strtoul(s, NULL, 0);
--
--	return 1;
--}
--
--__setup("vdso=", vdso_setup);
--
--extern asmlinkage void sysenter_entry(void);
--
--static __init void reloc_symtab(Elf32_Ehdr *ehdr,
--				unsigned offset, unsigned size)
--{
--	Elf32_Sym *sym = (void *)ehdr + offset;
--	unsigned nsym = size / sizeof(*sym);
--	unsigned i;
--
--	for(i = 0; i < nsym; i++, sym++) {
--		if (sym->st_shndx == SHN_UNDEF ||
--		    sym->st_shndx == SHN_ABS)
--			continue;  /* skip */
--
--		if (sym->st_shndx > SHN_LORESERVE) {
--			printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
--			       sym->st_shndx);
--			continue;
--		}
--
--		switch(ELF_ST_TYPE(sym->st_info)) {
--		case STT_OBJECT:
--		case STT_FUNC:
--		case STT_SECTION:
--		case STT_FILE:
--			sym->st_value += VDSO_HIGH_BASE;
--		}
++		default:
+ #ifdef PARANOID
+-	  EXCEPTION(EX_INTERNAL | 0x116);
+-	  return;
++			EXCEPTION(EX_INTERNAL | 0x116);
++			return;
+ #endif /* PARANOID */
+-	  break;
 -	}
--}
--
--static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
--{
--	Elf32_Dyn *dyn = (void *)ehdr + offset;
--
--	for(; dyn->d_tag != DT_NULL; dyn++)
--		switch(dyn->d_tag) {
--		case DT_PLTGOT:
--		case DT_HASH:
--		case DT_STRTAB:
--		case DT_SYMTAB:
--		case DT_RELA:
--		case DT_INIT:
--		case DT_FINI:
--		case DT_REL:
--		case DT_DEBUG:
--		case DT_JMPREL:
--		case DT_VERSYM:
--		case DT_VERDEF:
--		case DT_VERNEED:
--		case DT_ADDRRNGLO ... DT_ADDRRNGHI:
--			/* definitely pointers needing relocation */
--			dyn->d_un.d_ptr += VDSO_HIGH_BASE;
--			break;
--
--		case DT_ENCODING ... OLD_DT_LOOS-1:
--		case DT_LOOS ... DT_HIOS-1:
--			/* Tags above DT_ENCODING are pointers if
--			   they're even */
--			if (dyn->d_tag >= DT_ENCODING &&
--			    (dyn->d_tag & 1) == 0)
--				dyn->d_un.d_ptr += VDSO_HIGH_BASE;
--			break;
--
--		case DT_VERDEFNUM:
--		case DT_VERNEEDNUM:
--		case DT_FLAGS_1:
--		case DT_RELACOUNT:
--		case DT_RELCOUNT:
--		case DT_VALRNGLO ... DT_VALRNGHI:
--			/* definitely not pointers */
--			break;
--
--		case OLD_DT_LOOS ... DT_LOOS-1:
--		case DT_HIOS ... DT_VALRNGLO-1:
--		default:
--			if (dyn->d_tag > DT_ENCODING)
--				printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
--				       dyn->d_tag);
--			break;
+-    }
+-  else if ( (st0_tag == TAG_Valid) || (st0_tag == TW_Denormal) )
+-    {
+-      switch ( st1_tag )
+-	{
+-	case TAG_Zero:
+-	  if ( signnegative(st0_ptr) )
+-	    {
+-	      if ( exponent(st0_ptr) >= 0 )
+-		{
+-		  /* st(0) holds <= -1.0 */
+-#ifdef PECULIAR_486   /* Stupid 80486 doesn't worry about log(negative). */
+-		  changesign(st1_ptr);
++			break;
++		}
++	} else if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
++		switch (st1_tag) {
++		case TAG_Zero:
++			if (signnegative(st0_ptr)) {
++				if (exponent(st0_ptr) >= 0) {
++					/* st(0) holds <= -1.0 */
++#ifdef PECULIAR_486		/* Stupid 80486 doesn't worry about log(negative). */
++					changesign(st1_ptr);
+ #else
+-		  if ( arith_invalid(1) < 0 )
+-		    return;
++					if (arith_invalid(1) < 0)
++						return;
+ #endif /* PECULIAR_486 */
 -		}
--}
--
--static __init void relocate_vdso(Elf32_Ehdr *ehdr)
--{
--	Elf32_Phdr *phdr;
--	Elf32_Shdr *shdr;
--	int i;
--
--	BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 ||
--	       !elf_check_arch(ehdr) ||
--	       ehdr->e_type != ET_DYN);
--
--	ehdr->e_entry += VDSO_HIGH_BASE;
+-	      else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-		return;
+-	      else
+-		changesign(st1_ptr);
+-	    }
+-	  else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
+-	  break;
 -
--	/* rebase phdrs */
--	phdr = (void *)ehdr + ehdr->e_phoff;
--	for (i = 0; i < ehdr->e_phnum; i++) {
--		phdr[i].p_vaddr += VDSO_HIGH_BASE;
+-	case TW_Infinity:
+-	  if ( signnegative(st0_ptr) )
+-	    {
+-	      if ( (exponent(st0_ptr) >= 0) &&
+-		  !((st0_ptr->sigh == 0x80000000) &&
+-		    (st0_ptr->sigl == 0)) )
+-		{
+-		  /* st(0) holds < -1.0 */
+-#ifdef PECULIAR_486   /* Stupid 80486 doesn't worry about log(negative). */
+-		  changesign(st1_ptr);
++				} else if ((st0_tag == TW_Denormal)
++					   && (denormal_operand() < 0))
++					return;
++				else
++					changesign(st1_ptr);
++			} else if ((st0_tag == TW_Denormal)
++				   && (denormal_operand() < 0))
++				return;
++			break;
++
++		case TW_Infinity:
++			if (signnegative(st0_ptr)) {
++				if ((exponent(st0_ptr) >= 0) &&
++				    !((st0_ptr->sigh == 0x80000000) &&
++				      (st0_ptr->sigl == 0))) {
++					/* st(0) holds < -1.0 */
++#ifdef PECULIAR_486		/* Stupid 80486 doesn't worry about log(negative). */
++					changesign(st1_ptr);
+ #else
+-		  if ( arith_invalid(1) < 0 ) return;
++					if (arith_invalid(1) < 0)
++						return;
+ #endif /* PECULIAR_486 */
++				} else if ((st0_tag == TW_Denormal)
++					   && (denormal_operand() < 0))
++					return;
++				else
++					changesign(st1_ptr);
++			} else if ((st0_tag == TW_Denormal)
++				   && (denormal_operand() < 0))
++				return;
++			break;
++
++		case TW_NaN:
++			if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
++				return;
+ 		}
+-	      else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-		return;
+-	      else
+-		changesign(st1_ptr);
+-	    }
+-	  else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
+-	  break;
 -
--		/* relocate dynamic stuff */
--		if (phdr[i].p_type == PT_DYNAMIC)
--			reloc_dyn(ehdr, phdr[i].p_offset);
+-	case TW_NaN:
+-	  if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
+-	    return;
 -	}
--
--	/* rebase sections */
--	shdr = (void *)ehdr + ehdr->e_shoff;
--	for(i = 0; i < ehdr->e_shnum; i++) {
--		if (!(shdr[i].sh_flags & SHF_ALLOC))
--			continue;
--
--		shdr[i].sh_addr += VDSO_HIGH_BASE;
--
--		if (shdr[i].sh_type == SHT_SYMTAB ||
--		    shdr[i].sh_type == SHT_DYNSYM)
--			reloc_symtab(ehdr, shdr[i].sh_offset,
--				     shdr[i].sh_size);
+ 
+-    }
+-  else if ( st0_tag == TW_NaN )
+-    {
+-      if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
+-	return;
+-    }
+-  else if ( st0_tag == TW_Infinity )
+-    {
+-      if ( st1_tag == TW_NaN )
+-	{
+-	  if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
+-	    return;
 -	}
--}
--
--void enable_sep_cpu(void)
--{
--	int cpu = get_cpu();
--	struct tss_struct *tss = &per_cpu(init_tss, cpu);
--
--	if (!boot_cpu_has(X86_FEATURE_SEP)) {
--		put_cpu();
--		return;
+-      else if ( signnegative(st0_ptr) )
+-	{
++	} else if (st0_tag == TW_NaN) {
++		if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
++			return;
++	} else if (st0_tag == TW_Infinity) {
++		if (st1_tag == TW_NaN) {
++			if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
++				return;
++		} else if (signnegative(st0_ptr)) {
+ #ifndef PECULIAR_486
+-	  /* This should have higher priority than denormals, but... */
+-	  if ( arith_invalid(1) < 0 )  /* log(-infinity) */
+-	    return;
++			/* This should have higher priority than denormals, but... */
++			if (arith_invalid(1) < 0)	/* log(-infinity) */
++				return;
+ #endif /* PECULIAR_486 */
+-	  if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
++			if ((st1_tag == TW_Denormal)
++			    && (denormal_operand() < 0))
++				return;
+ #ifdef PECULIAR_486
+-	  /* Denormal operands actually get higher priority */
+-	  if ( arith_invalid(1) < 0 )  /* log(-infinity) */
+-	    return;
++			/* Denormal operands actually get higher priority */
++			if (arith_invalid(1) < 0)	/* log(-infinity) */
++				return;
+ #endif /* PECULIAR_486 */
 -	}
--
--	tss->x86_tss.ss1 = __KERNEL_CS;
--	tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
--	wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
--	wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
--	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
--	put_cpu();	
--}
--
--static struct vm_area_struct gate_vma;
--
--static int __init gate_vma_init(void)
--{
--	gate_vma.vm_mm = NULL;
--	gate_vma.vm_start = FIXADDR_USER_START;
--	gate_vma.vm_end = FIXADDR_USER_END;
--	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
--	gate_vma.vm_page_prot = __P101;
--	/*
--	 * Make sure the vDSO gets into every core dump.
--	 * Dumping its contents makes post-mortem fully interpretable later
--	 * without matching up the same kernel and hardware config to see
--	 * what PC values meant.
--	 */
--	gate_vma.vm_flags |= VM_ALWAYSDUMP;
--	return 0;
--}
--
--/*
-- * These symbols are defined by vsyscall.o to mark the bounds
-- * of the ELF DSO images included therein.
-- */
--extern const char vsyscall_int80_start, vsyscall_int80_end;
--extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
--static struct page *syscall_pages[1];
--
--static void map_compat_vdso(int map)
--{
--	static int vdso_mapped;
--
--	if (map == vdso_mapped)
--		return;
--
--	vdso_mapped = map;
--
--	__set_fixmap(FIX_VDSO, page_to_pfn(syscall_pages[0]) << PAGE_SHIFT,
--		     map ? PAGE_READONLY_EXEC : PAGE_NONE);
--
--	/* flush stray tlbs */
--	flush_tlb_all();
--}
--
--int __init sysenter_setup(void)
--{
--	void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
--	const void *vsyscall;
--	size_t vsyscall_len;
--
--	syscall_pages[0] = virt_to_page(syscall_page);
--
--	gate_vma_init();
--
--	printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
--
--	if (!boot_cpu_has(X86_FEATURE_SEP)) {
--		vsyscall = &vsyscall_int80_start;
--		vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start;
--	} else {
--		vsyscall = &vsyscall_sysenter_start;
--		vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start;
+-      else if ( st1_tag == TAG_Zero )
+-	{
+-	  /* log(infinity) */
+-	  if ( arith_invalid(1) < 0 )
+-	    return;
 -	}
+-	
+-      /* st(1) must be valid here. */
++		} else if (st1_tag == TAG_Zero) {
++			/* log(infinity) */
++			if (arith_invalid(1) < 0)
++				return;
++		}
+ 
+-      else if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	return;
++		/* st(1) must be valid here. */
++
++		else if ((st1_tag == TW_Denormal) && (denormal_operand() < 0))
++			return;
+ 
+-      /* The Manual says that log(Infinity) is invalid, but a real
+-	 80486 sensibly says that it is o.k. */
+-      else
+-	{
+-	  u_char sign = getsign(st1_ptr);
+-	  FPU_copy_to_reg1(&CONST_INF, TAG_Special);
+-	  setsign(st1_ptr, sign);
++		/* The Manual says that log(Infinity) is invalid, but a real
++		   80486 sensibly says that it is o.k. */
++		else {
++			u_char sign = getsign(st1_ptr);
++			FPU_copy_to_reg1(&CONST_INF, TAG_Special);
++			setsign(st1_ptr, sign);
++		}
+ 	}
+-    }
+ #ifdef PARANOID
+-  else
+-    {
+-      EXCEPTION(EX_INTERNAL | 0x117);
+-      return;
+-    }
++	else {
++		EXCEPTION(EX_INTERNAL | 0x117);
++		return;
++	}
+ #endif /* PARANOID */
+ 
+-  FPU_pop();
+-  return;
++	FPU_pop();
++	return;
+ 
+ }
+ 
 -
--	memcpy(syscall_page, vsyscall, vsyscall_len);
--	relocate_vdso(syscall_page);
--
--	return 0;
--}
--
--/* Defined in vsyscall-sysenter.S */
--extern void SYSENTER_RETURN;
+ static void fscale(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+-  FPU_REG *st1_ptr = &st(1);
+-  u_char st1_tag = FPU_gettagi(1);
+-  int old_cw = control_word;
+-  u_char sign = getsign(st0_ptr);
 -
--/* Setup a VMA at program startup for the vsyscall page */
--int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
--{
--	struct mm_struct *mm = current->mm;
--	unsigned long addr;
--	int ret = 0;
--	bool compat;
+-  clear_C1();
+-  if ( !((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid)) )
+-    {
+-      long scale;
+-      FPU_REG tmp;
 -
--	down_write(&mm->mmap_sem);
+-      /* Convert register for internal use. */
+-      setexponent16(st0_ptr, exponent(st0_ptr));
 -
--	/* Test compat mode once here, in case someone
--	   changes it via sysctl */
--	compat = (vdso_enabled == VDSO_COMPAT);
+-    valid_scale:
 -
--	map_compat_vdso(compat);
+-      if ( exponent(st1_ptr) > 30 )
+-	{
+-	  /* 2^31 is far too large, would require 2^(2^30) or 2^(-2^30) */
 -
--	if (compat)
--		addr = VDSO_HIGH_BASE;
--	else {
--		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
--		if (IS_ERR_VALUE(addr)) {
--			ret = addr;
--			goto up_fail;
--		}
+-	  if ( signpositive(st1_ptr) )
+-	    {
+-	      EXCEPTION(EX_Overflow);
+-	      FPU_copy_to_reg0(&CONST_INF, TAG_Special);
+-	    }
+-	  else
+-	    {
+-	      EXCEPTION(EX_Underflow);
+-	      FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
+-	    }
+-	  setsign(st0_ptr, sign);
+-	  return;
+-	}
 -
--		/*
--		 * MAYWRITE to allow gdb to COW and set breakpoints
--		 *
--		 * Make sure the vDSO gets into every core dump.
--		 * Dumping its contents makes post-mortem fully
--		 * interpretable later without matching up the same
--		 * kernel and hardware config to see what PC values
--		 * meant.
--		 */
--		ret = install_special_mapping(mm, addr, PAGE_SIZE,
--					      VM_READ|VM_EXEC|
--					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
--					      VM_ALWAYSDUMP,
--					      syscall_pages);
+-      control_word &= ~CW_RC;
+-      control_word |= RC_CHOP;
+-      reg_copy(st1_ptr, &tmp);
+-      FPU_round_to_int(&tmp, st1_tag);      /* This can never overflow here */
+-      control_word = old_cw;
+-      scale = signnegative(st1_ptr) ? -tmp.sigl : tmp.sigl;
+-      scale += exponent16(st0_ptr);
 -
--		if (ret)
--			goto up_fail;
--	}
+-      setexponent16(st0_ptr, scale);
 -
--	current->mm->context.vdso = (void *)addr;
--	current_thread_info()->sysenter_return =
--		(void *)VDSO_SYM(&SYSENTER_RETURN);
+-      /* Use FPU_round() to properly detect under/overflow etc */
+-      FPU_round(st0_ptr, 0, 0, control_word, sign);
 -
--  up_fail:
--	up_write(&mm->mmap_sem);
+-      return;
+-    }
 -
--	return ret;
--}
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-  if ( st1_tag == TAG_Special )
+-    st1_tag = FPU_Special(st1_ptr);
 -
--const char *arch_vma_name(struct vm_area_struct *vma)
--{
--	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
--		return "[vdso]";
--	return NULL;
--}
+-  if ( (st0_tag == TAG_Valid) || (st0_tag == TW_Denormal) )
+-    {
+-      switch ( st1_tag )
+-	{
+-	case TAG_Valid:
+-	  /* st(0) must be a denormal */
+-	  if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
 -
--struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
--{
--	struct mm_struct *mm = tsk->mm;
+-	  FPU_to_exp16(st0_ptr, st0_ptr);  /* Will not be left on stack */
+-	  goto valid_scale;
 -
--	/* Check to see if this task was created in compat vdso mode */
--	if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
--		return &gate_vma;
--	return NULL;
--}
+-	case TAG_Zero:
+-	  if ( st0_tag == TW_Denormal )
+-	    denormal_operand();
+-	  return;
 -
--int in_gate_area(struct task_struct *task, unsigned long addr)
--{
--	const struct vm_area_struct *vma = get_gate_vma(task);
+-	case TW_Denormal:
+-	  denormal_operand();
+-	  return;
 -
--	return vma && addr >= vma->vm_start && addr < vma->vm_end;
--}
+-	case TW_Infinity:
+-	  if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
+-	    return;
 -
--int in_gate_area_no_task(unsigned long addr)
--{
--	return 0;
--}
-diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c
-new file mode 100644
-index 0000000..6d7ef11
---- /dev/null
-+++ b/arch/x86/kernel/test_nx.c
-@@ -0,0 +1,176 @@
-+/*
-+ * test_nx.c: functional test for NX functionality
-+ *
-+ * (C) Copyright 2008 Intel Corporation
-+ * Author: Arjan van de Ven <arjan at linux.intel.com>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; version 2
-+ * of the License.
-+ */
-+#include <linux/module.h>
-+#include <linux/sort.h>
-+#include <asm/uaccess.h>
-+
-+extern int rodata_test_data;
-+
-+/*
-+ * This file checks 4 things:
-+ * 1) Check if the stack is not executable
-+ * 2) Check if kmalloc memory is not executable
-+ * 3) Check if the .rodata section is not executable
-+ * 4) Check if the .data section of a module is not executable
-+ *
-+ * To do this, the test code tries to execute memory in stack/kmalloc/etc,
-+ * and then checks if the expected trap happens.
-+ *
-+ * Sadly, this implies having a dynamic exception handling table entry.
-+ * ... which can be done (and will make Rusty cry)... but it can only
-+ * be done in a stand-alone module with only 1 entry total.
-+ * (otherwise we'd have to sort and that's just too messy)
-+ */
-+
-+
-+
-+/*
-+ * We want to set up an exception handling point on our stack,
-+ * which means a variable value. This function is rather dirty
-+ * and walks the exception table of the module, looking for a magic
-+ * marker and replaces it with a specific function.
-+ */
-+static void fudze_exception_table(void *marker, void *new)
-+{
-+	struct module *mod = THIS_MODULE;
-+	struct exception_table_entry *extable;
-+
-+	/*
-+	 * Note: This module has only 1 exception table entry,
-+	 * so searching and sorting is not needed. If that changes,
-+	 * this would be the place to search and re-sort the exception
-+	 * table.
-+	 */
-+	if (mod->num_exentries > 1) {
-+		printk(KERN_ERR "test_nx: too many exception table entries!\n");
-+		printk(KERN_ERR "test_nx: test results are not reliable.\n");
-+		return;
-+	}
-+	extable = (struct exception_table_entry *)mod->extable;
-+	extable[0].insn = (unsigned long)new;
-+}
-+
-+
-+/*
-+ * exception tables get their symbols translated so we need
-+ * to use a fake function to put in there, which we can then
-+ * replace at runtime.
-+ */
-+void foo_label(void);
-+
-+/*
-+ * returns 0 for not-executable, negative for executable
-+ *
-+ * Note: we cannot allow this function to be inlined, because
-+ * that would give us more than 1 exception table entry.
-+ * This in turn would break the assumptions above.
-+ */
-+static noinline int test_address(void *address)
-+{
-+	unsigned long result;
-+
-+	/* Set up an exception table entry for our address */
-+	fudze_exception_table(&foo_label, address);
-+	result = 1;
-+	asm volatile(
-+		"foo_label:\n"
-+		"0:	call *%[fake_code]\n"
-+		"1:\n"
-+		".section .fixup,\"ax\"\n"
-+		"2:	mov %[zero], %[rslt]\n"
-+		"	ret\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"       .align 8\n"
-+		"	.quad 0b\n"
-+		"	.quad 2b\n"
-+		".previous\n"
-+		: [rslt] "=r" (result)
-+		: [fake_code] "r" (address), [zero] "r" (0UL), "0" (result)
-+	);
-+	/* change the exception table back for the next round */
-+	fudze_exception_table(address, &foo_label);
-+
-+	if (result)
-+		return -ENODEV;
-+	return 0;
-+}
-+
-+static unsigned char test_data = 0xC3; /* 0xC3 is the opcode for "ret" */
-+
-+static int test_NX(void)
-+{
-+	int ret = 0;
-+	/* 0xC3 is the opcode for "ret" */
-+	char stackcode[] = {0xC3, 0x90, 0 };
-+	char *heap;
-+
-+	test_data = 0xC3;
+-	  if ( signpositive(st1_ptr) )
+-	    FPU_copy_to_reg0(&CONST_INF, TAG_Special);
+-	  else
+-	    FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
+-	  setsign(st0_ptr, sign);
+-	  return;
++	FPU_REG *st1_ptr = &st(1);
++	u_char st1_tag = FPU_gettagi(1);
++	int old_cw = control_word;
++	u_char sign = getsign(st0_ptr);
 +
-+	printk(KERN_INFO "Testing NX protection\n");
++	clear_C1();
++	if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
++		long scale;
++		FPU_REG tmp;
 +
-+	/* Test 1: check if the stack is not executable */
-+	if (test_address(&stackcode)) {
-+		printk(KERN_ERR "test_nx: stack was executable\n");
-+		ret = -ENODEV;
-+	}
++		/* Convert register for internal use. */
++		setexponent16(st0_ptr, exponent(st0_ptr));
 +
++	      valid_scale:
 +
-+	/* Test 2: Check if the heap is executable */
-+	heap = kmalloc(64, GFP_KERNEL);
-+	if (!heap)
-+		return -ENOMEM;
-+	heap[0] = 0xC3; /* opcode for "ret" */
++		if (exponent(st1_ptr) > 30) {
++			/* 2^31 is far too large, would require 2^(2^30) or 2^(-2^30) */
 +
-+	if (test_address(heap)) {
-+		printk(KERN_ERR "test_nx: heap was executable\n");
-+		ret = -ENODEV;
-+	}
-+	kfree(heap);
++			if (signpositive(st1_ptr)) {
++				EXCEPTION(EX_Overflow);
++				FPU_copy_to_reg0(&CONST_INF, TAG_Special);
++			} else {
++				EXCEPTION(EX_Underflow);
++				FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
++			}
++			setsign(st0_ptr, sign);
++			return;
++		}
+ 
+-	case TW_NaN:
+-	  real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
+-	  return;
+-	}
+-    }
+-  else if ( st0_tag == TAG_Zero )
+-    {
+-      switch ( st1_tag )
+-	{
+-	case TAG_Valid:
+-	case TAG_Zero:
+-	  return;
++		control_word &= ~CW_RC;
++		control_word |= RC_CHOP;
++		reg_copy(st1_ptr, &tmp);
++		FPU_round_to_int(&tmp, st1_tag);	/* This can never overflow here */
++		control_word = old_cw;
++		scale = signnegative(st1_ptr) ? -tmp.sigl : tmp.sigl;
++		scale += exponent16(st0_ptr);
+ 
+-	case TW_Denormal:
+-	  denormal_operand();
+-	  return;
++		setexponent16(st0_ptr, scale);
+ 
+-	case TW_Infinity:
+-	  if ( signpositive(st1_ptr) )
+-	    arith_invalid(0); /* Zero scaled by +Infinity */
+-	  return;
++		/* Use FPU_round() to properly detect under/overflow etc */
++		FPU_round(st0_ptr, 0, 0, control_word, sign);
+ 
+-	case TW_NaN:
+-	  real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
+-	  return;
++		return;
+ 	}
+-    }
+-  else if ( st0_tag == TW_Infinity )
+-    {
+-      switch ( st1_tag )
+-	{
+-	case TAG_Valid:
+-	case TAG_Zero:
+-	  return;
+-
+-	case TW_Denormal:
+-	  denormal_operand();
+-	  return;
+ 
+-	case TW_Infinity:
+-	  if ( signnegative(st1_ptr) )
+-	    arith_invalid(0); /* Infinity scaled by -Infinity */
+-	  return;
+-
+-	case TW_NaN:
+-	  real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
+-	  return;
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
++	if (st1_tag == TAG_Special)
++		st1_tag = FPU_Special(st1_ptr);
 +
-+	/*
-+	 * The following 2 tests currently fail, this needs to get fixed
-+	 * Until then, don't run them to avoid too many people getting scared
-+	 * by the error message
-+	 */
-+#if 0
++	if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
++		switch (st1_tag) {
++		case TAG_Valid:
++			/* st(0) must be a denormal */
++			if ((st0_tag == TW_Denormal)
++			    && (denormal_operand() < 0))
++				return;
 +
-+#ifdef CONFIG_DEBUG_RODATA
-+	/* Test 3: Check if the .rodata section is executable */
-+	if (rodata_test_data != 0xC3) {
-+		printk(KERN_ERR "test_nx: .rodata marker has invalid value\n");
-+		ret = -ENODEV;
-+	} else if (test_address(&rodata_test_data)) {
-+		printk(KERN_ERR "test_nx: .rodata section is executable\n");
-+		ret = -ENODEV;
-+	}
-+#endif
++			FPU_to_exp16(st0_ptr, st0_ptr);	/* Will not be left on stack */
++			goto valid_scale;
 +
-+	/* Test 4: Check if the .data section of a module is executable */
-+	if (test_address(&test_data)) {
-+		printk(KERN_ERR "test_nx: .data section is executable\n");
-+		ret = -ENODEV;
-+	}
++		case TAG_Zero:
++			if (st0_tag == TW_Denormal)
++				denormal_operand();
++			return;
 +
-+#endif
-+	return 0;
-+}
++		case TW_Denormal:
++			denormal_operand();
++			return;
 +
-+static void test_exit(void)
-+{
-+}
++		case TW_Infinity:
++			if ((st0_tag == TW_Denormal)
++			    && (denormal_operand() < 0))
++				return;
 +
-+module_init(test_NX);
-+module_exit(test_exit);
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Testcase for the NX infrastructure");
-+MODULE_AUTHOR("Arjan van de Ven <arjan at linux.intel.com>");
-diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
-new file mode 100644
-index 0000000..4c16377
---- /dev/null
-+++ b/arch/x86/kernel/test_rodata.c
-@@ -0,0 +1,86 @@
-+/*
-+ * test_rodata.c: functional test for mark_rodata_ro function
-+ *
-+ * (C) Copyright 2008 Intel Corporation
-+ * Author: Arjan van de Ven <arjan at linux.intel.com>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; version 2
-+ * of the License.
-+ */
-+#include <linux/module.h>
-+#include <asm/sections.h>
-+extern int rodata_test_data;
++			if (signpositive(st1_ptr))
++				FPU_copy_to_reg0(&CONST_INF, TAG_Special);
++			else
++				FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
++			setsign(st0_ptr, sign);
++			return;
 +
-+int rodata_test(void)
-+{
-+	unsigned long result;
-+	unsigned long start, end;
++		case TW_NaN:
++			real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
++			return;
++		}
++	} else if (st0_tag == TAG_Zero) {
++		switch (st1_tag) {
++		case TAG_Valid:
++		case TAG_Zero:
++			return;
 +
-+	/* test 1: read the value */
-+	/* If this test fails, some previous testrun has clobbered the state */
-+	if (!rodata_test_data) {
-+		printk(KERN_ERR "rodata_test: test 1 fails (start data)\n");
-+		return -ENODEV;
-+	}
++		case TW_Denormal:
++			denormal_operand();
++			return;
 +
-+	/* test 2: write to the variable; this should fault */
-+	/*
-+	 * If this test fails, we managed to overwrite the data
-+	 *
-+	 * This is written in assembly to be able to catch the
-+	 * exception that is supposed to happen in the correct
-+	 * case
-+	 */
++		case TW_Infinity:
++			if (signpositive(st1_ptr))
++				arith_invalid(0);	/* Zero scaled by +Infinity */
++			return;
 +
-+	result = 1;
-+	asm volatile(
-+		"0:	mov %[zero],(%[rodata_test])\n"
-+		"	mov %[zero], %[rslt]\n"
-+		"1:\n"
-+		".section .fixup,\"ax\"\n"
-+		"2:	jmp 1b\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"       .align 16\n"
-+#ifdef CONFIG_X86_32
-+		"	.long 0b,2b\n"
-+#else
-+		"	.quad 0b,2b\n"
-+#endif
-+		".previous"
-+		: [rslt] "=r" (result)
-+		: [rodata_test] "r" (&rodata_test_data), [zero] "r" (0UL)
-+	);
++		case TW_NaN:
++			real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
++			return;
++		}
++	} else if (st0_tag == TW_Infinity) {
++		switch (st1_tag) {
++		case TAG_Valid:
++		case TAG_Zero:
++			return;
 +
++		case TW_Denormal:
++			denormal_operand();
++			return;
 +
-+	if (!result) {
-+		printk(KERN_ERR "rodata_test: test data was not read only\n");
-+		return -ENODEV;
-+	}
++		case TW_Infinity:
++			if (signnegative(st1_ptr))
++				arith_invalid(0);	/* Infinity scaled by -Infinity */
++			return;
 +
-+	/* test 3: check the value hasn't changed */
-+	/* If this test fails, we managed to overwrite the data */
-+	if (!rodata_test_data) {
-+		printk(KERN_ERR "rodata_test: Test 3 failes (end data)\n");
-+		return -ENODEV;
-+	}
-+	/* test 4: check if the rodata section is 4Kb aligned */
-+	start = (unsigned long)__start_rodata;
-+	end = (unsigned long)__end_rodata;
-+	if (start & (PAGE_SIZE - 1)) {
-+		printk(KERN_ERR "rodata_test: .rodata is not 4k aligned\n");
-+		return -ENODEV;
-+	}
-+	if (end & (PAGE_SIZE - 1)) {
-+		printk(KERN_ERR "rodata_test: .rodata end is not 4k aligned\n");
-+		return -ENODEV;
++		case TW_NaN:
++			real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
++			return;
++		}
++	} else if (st0_tag == TW_NaN) {
++		if (st1_tag != TAG_Empty) {
++			real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
++			return;
++		}
+ 	}
+-    }
+-  else if ( st0_tag == TW_NaN )
+-    {
+-      if ( st1_tag != TAG_Empty )
+-	{ real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr); return; }
+-    }
+-
+ #ifdef PARANOID
+-  if ( !((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) )
+-    {
+-      EXCEPTION(EX_INTERNAL | 0x115);
+-      return;
+-    }
++	if (!((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty))) {
++		EXCEPTION(EX_INTERNAL | 0x115);
++		return;
 +	}
-+
-+	return 0;
-+}
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Testcase for the DEBUG_RODATA infrastructure");
-+MODULE_AUTHOR("Arjan van de Ven <arjan at linux.intel.com>");
-diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
-index 8a322c9..1a89e93 100644
---- a/arch/x86/kernel/time_32.c
-+++ b/arch/x86/kernel/time_32.c
-@@ -28,98 +28,20 @@
-  *	serialize accesses to xtime/lost_ticks).
-  */
+ #endif
  
--#include <linux/errno.h>
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/param.h>
--#include <linux/string.h>
--#include <linux/mm.h>
-+#include <linux/init.h>
- #include <linux/interrupt.h>
- #include <linux/time.h>
--#include <linux/delay.h>
--#include <linux/init.h>
--#include <linux/smp.h>
--#include <linux/module.h>
--#include <linux/sysdev.h>
--#include <linux/bcd.h>
--#include <linux/efi.h>
- #include <linux/mca.h>
+-  /* At least one of st(0), st(1) must be empty */
+-  FPU_stack_underflow();
++	/* At least one of st(0), st(1) must be empty */
++	FPU_stack_underflow();
+ 
+ }
  
--#include <asm/io.h>
--#include <asm/smp.h>
--#include <asm/irq.h>
--#include <asm/msr.h>
--#include <asm/delay.h>
--#include <asm/mpspec.h>
--#include <asm/uaccess.h>
--#include <asm/processor.h>
--#include <asm/timer.h>
--#include <asm/time.h>
--
--#include "mach_time.h"
--
--#include <linux/timex.h>
--
--#include <asm/hpet.h>
--
- #include <asm/arch_hooks.h>
--
--#include "io_ports.h"
 -
--#include <asm/i8259.h>
-+#include <asm/hpet.h>
-+#include <asm/time.h>
+ /*---------------------------------------------------------------------------*/
  
- #include "do_timer.h"
+ static FUNC_ST0 const trig_table_a[] = {
+-  f2xm1, fyl2x, fptan, fpatan,
+-  fxtract, fprem1, (FUNC_ST0)fdecstp, (FUNC_ST0)fincstp
++	f2xm1, fyl2x, fptan, fpatan,
++	fxtract, fprem1, (FUNC_ST0) fdecstp, (FUNC_ST0) fincstp
+ };
  
- unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
- EXPORT_SYMBOL(cpu_khz);
+ void FPU_triga(void)
+ {
+-  (trig_table_a[FPU_rm])(&st(0), FPU_gettag0());
++	(trig_table_a[FPU_rm]) (&st(0), FPU_gettag0());
+ }
  
--DEFINE_SPINLOCK(rtc_lock);
--EXPORT_SYMBOL(rtc_lock);
--
--/*
-- * This is a special lock that is owned by the CPU and holds the index
-- * register we are working with.  It is required for NMI access to the
-- * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
-- */
--volatile unsigned long cmos_lock = 0;
--EXPORT_SYMBOL(cmos_lock);
--
--/* Routines for accessing the CMOS RAM/RTC. */
--unsigned char rtc_cmos_read(unsigned char addr)
--{
--	unsigned char val;
--	lock_cmos_prefix(addr);
--	outb_p(addr, RTC_PORT(0));
--	val = inb_p(RTC_PORT(1));
--	lock_cmos_suffix(addr);
--	return val;
--}
--EXPORT_SYMBOL(rtc_cmos_read);
--
--void rtc_cmos_write(unsigned char val, unsigned char addr)
--{
--	lock_cmos_prefix(addr);
--	outb_p(addr, RTC_PORT(0));
--	outb_p(val, RTC_PORT(1));
--	lock_cmos_suffix(addr);
--}
--EXPORT_SYMBOL(rtc_cmos_write);
--
--static int set_rtc_mmss(unsigned long nowtime)
--{
--	int retval;
--	unsigned long flags;
--
--	/* gets recalled with irq locally disabled */
--	/* XXX - does irqsave resolve this? -johnstul */
--	spin_lock_irqsave(&rtc_lock, flags);
--	retval = set_wallclock(nowtime);
--	spin_unlock_irqrestore(&rtc_lock, flags);
--
--	return retval;
--}
--
 -
- int timer_ack;
- 
- unsigned long profile_pc(struct pt_regs *regs)
-@@ -127,17 +49,17 @@ unsigned long profile_pc(struct pt_regs *regs)
- 	unsigned long pc = instruction_pointer(regs);
- 
- #ifdef CONFIG_SMP
--	if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs) &&
-+	if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->cs) &&
- 	    in_lock_functions(pc)) {
- #ifdef CONFIG_FRAME_POINTER
--		return *(unsigned long *)(regs->ebp + 4);
-+		return *(unsigned long *)(regs->bp + 4);
- #else
--		unsigned long *sp = (unsigned long *)&regs->esp;
-+		unsigned long *sp = (unsigned long *)&regs->sp;
+-static FUNC_ST0 const trig_table_b[] =
+-  {
+-    fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0)fsin, fcos
+-  };
++static FUNC_ST0 const trig_table_b[] = {
++	fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0) fsin, fcos
++};
  
- 		/* Return address is either directly at stack pointer
--		   or above a saved eflags. Eflags has bits 22-31 zero,
-+		   or above a saved flags. Eflags has bits 22-31 zero,
- 		   kernel addresses don't. */
-- 		if (sp[0] >> 22)
-+		if (sp[0] >> 22)
- 			return sp[0];
- 		if (sp[1] >> 22)
- 			return sp[1];
-@@ -193,26 +115,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
- 	return IRQ_HANDLED;
+ void FPU_trigb(void)
+ {
+-  (trig_table_b[FPU_rm])(&st(0), FPU_gettag0());
++	(trig_table_b[FPU_rm]) (&st(0), FPU_gettag0());
  }
+diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
+index 2e2c51a..d701e2b 100644
+--- a/arch/x86/math-emu/get_address.c
++++ b/arch/x86/math-emu/get_address.c
+@@ -17,7 +17,6 @@
+  |    other processes using the emulator while swapping is in progress.      |
+  +---------------------------------------------------------------------------*/
  
--/* not static: needed by APM */
--unsigned long read_persistent_clock(void)
--{
--	unsigned long retval;
--	unsigned long flags;
--
--	spin_lock_irqsave(&rtc_lock, flags);
--
--	retval = get_wallclock();
--
--	spin_unlock_irqrestore(&rtc_lock, flags);
--
--	return retval;
--}
--
--int update_persistent_clock(struct timespec now)
--{
--	return set_rtc_mmss(now.tv_sec);
--}
 -
- extern void (*late_time_init)(void);
- /* Duplicate of time_init() below, with hpet_enable part added */
- void __init hpet_time_init(void)
-diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
-index 368b194..0380795 100644
---- a/arch/x86/kernel/time_64.c
-+++ b/arch/x86/kernel/time_64.c
-@@ -11,43 +11,18 @@
-  *  RTC support code taken from arch/i386/kernel/timers/time_hpet.c
-  */
+ #include <linux/stddef.h>
  
--#include <linux/kernel.h>
--#include <linux/sched.h>
--#include <linux/interrupt.h>
-+#include <linux/clockchips.h>
- #include <linux/init.h>
--#include <linux/mc146818rtc.h>
--#include <linux/time.h>
--#include <linux/ioport.h>
-+#include <linux/interrupt.h>
- #include <linux/module.h>
--#include <linux/device.h>
--#include <linux/sysdev.h>
--#include <linux/bcd.h>
--#include <linux/notifier.h>
--#include <linux/cpu.h>
--#include <linux/kallsyms.h>
--#include <linux/acpi.h>
--#include <linux/clockchips.h>
-+#include <linux/time.h>
+ #include <asm/uaccess.h>
+@@ -27,31 +26,30 @@
+ #include "exception.h"
+ #include "fpu_emu.h"
  
--#ifdef CONFIG_ACPI
--#include <acpi/achware.h>	/* for PM timer frequency */
--#include <acpi/acpi_bus.h>
--#endif
- #include <asm/i8253.h>
--#include <asm/pgtable.h>
--#include <asm/vsyscall.h>
--#include <asm/timex.h>
--#include <asm/proto.h>
--#include <asm/hpet.h>
--#include <asm/sections.h>
--#include <linux/hpet.h>
--#include <asm/apic.h>
- #include <asm/hpet.h>
--#include <asm/mpspec.h>
- #include <asm/nmi.h>
- #include <asm/vgtod.h>
 -
--DEFINE_SPINLOCK(rtc_lock);
--EXPORT_SYMBOL(rtc_lock);
-+#include <asm/time.h>
-+#include <asm/timer.h>
+ #define FPU_WRITE_BIT 0x10
  
- volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+ static int reg_offset[] = {
+-	offsetof(struct info,___eax),
+-	offsetof(struct info,___ecx),
+-	offsetof(struct info,___edx),
+-	offsetof(struct info,___ebx),
+-	offsetof(struct info,___esp),
+-	offsetof(struct info,___ebp),
+-	offsetof(struct info,___esi),
+-	offsetof(struct info,___edi)
++	offsetof(struct info, ___eax),
++	offsetof(struct info, ___ecx),
++	offsetof(struct info, ___edx),
++	offsetof(struct info, ___ebx),
++	offsetof(struct info, ___esp),
++	offsetof(struct info, ___ebp),
++	offsetof(struct info, ___esi),
++	offsetof(struct info, ___edi)
+ };
  
-@@ -56,10 +31,10 @@ unsigned long profile_pc(struct pt_regs *regs)
- 	unsigned long pc = instruction_pointer(regs);
+ #define REG_(x) (*(long *)(reg_offset[(x)]+(u_char *) FPU_info))
  
- 	/* Assume the lock function has either no stack frame or a copy
--	   of eflags from PUSHF
-+	   of flags from PUSHF
- 	   Eflags always has bits 22 and up cleared unlike kernel addresses. */
- 	if (!user_mode(regs) && in_lock_functions(pc)) {
--		unsigned long *sp = (unsigned long *)regs->rsp;
-+		unsigned long *sp = (unsigned long *)regs->sp;
- 		if (sp[0] >> 22)
- 			return sp[0];
- 		if (sp[1] >> 22)
-@@ -69,82 +44,6 @@ unsigned long profile_pc(struct pt_regs *regs)
- }
- EXPORT_SYMBOL(profile_pc);
+ static int reg_offset_vm86[] = {
+-	offsetof(struct info,___cs),
+-	offsetof(struct info,___vm86_ds),
+-	offsetof(struct info,___vm86_es),
+-	offsetof(struct info,___vm86_fs),
+-	offsetof(struct info,___vm86_gs),
+-	offsetof(struct info,___ss),
+-	offsetof(struct info,___vm86_ds)
+-      };
++	offsetof(struct info, ___cs),
++	offsetof(struct info, ___vm86_ds),
++	offsetof(struct info, ___vm86_es),
++	offsetof(struct info, ___vm86_fs),
++	offsetof(struct info, ___vm86_gs),
++	offsetof(struct info, ___ss),
++	offsetof(struct info, ___vm86_ds)
++};
+ 
+ #define VM86_REG_(x) (*(unsigned short *) \
+ 		      (reg_offset_vm86[((unsigned)x)]+(u_char *) FPU_info))
+@@ -60,158 +58,141 @@ static int reg_offset_vm86[] = {
+ #define ___GS ___ds
+ 
+ static int reg_offset_pm[] = {
+-	offsetof(struct info,___cs),
+-	offsetof(struct info,___ds),
+-	offsetof(struct info,___es),
+-	offsetof(struct info,___fs),
+-	offsetof(struct info,___GS),
+-	offsetof(struct info,___ss),
+-	offsetof(struct info,___ds)
+-      };
++	offsetof(struct info, ___cs),
++	offsetof(struct info, ___ds),
++	offsetof(struct info, ___es),
++	offsetof(struct info, ___fs),
++	offsetof(struct info, ___GS),
++	offsetof(struct info, ___ss),
++	offsetof(struct info, ___ds)
++};
+ 
+ #define PM_REG_(x) (*(unsigned short *) \
+ 		      (reg_offset_pm[((unsigned)x)]+(u_char *) FPU_info))
  
--/*
-- * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
-- * ms after the second nowtime has started, because when nowtime is written
-- * into the registers of the CMOS clock, it will jump to the next second
-- * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
-- * sheet for details.
-- */
--
--static int set_rtc_mmss(unsigned long nowtime)
--{
--	int retval = 0;
--	int real_seconds, real_minutes, cmos_minutes;
--	unsigned char control, freq_select;
--	unsigned long flags;
--
--/*
-- * set_rtc_mmss is called when irqs are enabled, so disable irqs here
-- */
--	spin_lock_irqsave(&rtc_lock, flags);
--/*
-- * Tell the clock it's being set and stop it.
-- */
--	control = CMOS_READ(RTC_CONTROL);
--	CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
--
--	freq_select = CMOS_READ(RTC_FREQ_SELECT);
--	CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
--
--	cmos_minutes = CMOS_READ(RTC_MINUTES);
--		BCD_TO_BIN(cmos_minutes);
--
--/*
-- * since we're only adjusting minutes and seconds, don't interfere with hour
-- * overflow. This avoids messing with unknown time zones but requires your RTC
-- * not to be off by more than 15 minutes. Since we're calling it only when
-- * our clock is externally synchronized using NTP, this shouldn't be a problem.
-- */
--
--	real_seconds = nowtime % 60;
--	real_minutes = nowtime / 60;
--	if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
--		real_minutes += 30;		/* correct for half hour time zone */
--	real_minutes %= 60;
 -
--	if (abs(real_minutes - cmos_minutes) >= 30) {
--		printk(KERN_WARNING "time.c: can't update CMOS clock "
--		       "from %d to %d\n", cmos_minutes, real_minutes);
--		retval = -1;
--	} else {
--		BIN_TO_BCD(real_seconds);
--		BIN_TO_BCD(real_minutes);
--		CMOS_WRITE(real_seconds, RTC_SECONDS);
--		CMOS_WRITE(real_minutes, RTC_MINUTES);
--	}
+ /* Decode the SIB byte. This function assumes mod != 0 */
+ static int sib(int mod, unsigned long *fpu_eip)
+ {
+-  u_char ss,index,base;
+-  long offset;
 -
--/*
-- * The following flags have to be released exactly in this order, otherwise the
-- * DS12887 (popular MC146818A clone with integrated battery and quartz) will
-- * not reset the oscillator and will not update precisely 500 ms later. You
-- * won't find this mentioned in the Dallas Semiconductor data sheets, but who
-- * believes data sheets anyway ... -- Markus Kuhn
-- */
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_code_access_ok(1);
+-  FPU_get_user(base, (u_char __user *) (*fpu_eip));   /* The SIB byte */
+-  RE_ENTRANT_CHECK_ON;
+-  (*fpu_eip)++;
+-  ss = base >> 6;
+-  index = (base >> 3) & 7;
+-  base &= 7;
 -
--	CMOS_WRITE(control, RTC_CONTROL);
--	CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
+-  if ((mod == 0) && (base == 5))
+-    offset = 0;              /* No base register */
+-  else
+-    offset = REG_(base);
 -
--	spin_unlock_irqrestore(&rtc_lock, flags);
+-  if (index == 4)
+-    {
+-      /* No index register */
+-      /* A non-zero ss is illegal */
+-      if ( ss )
+-	EXCEPTION(EX_Invalid);
+-    }
+-  else
+-    {
+-      offset += (REG_(index)) << ss;
+-    }
 -
--	return retval;
--}
+-  if (mod == 1)
+-    {
+-      /* 8 bit signed displacement */
+-      long displacement;
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_code_access_ok(1);
+-      FPU_get_user(displacement, (signed char __user *) (*fpu_eip));
+-      offset += displacement;
+-      RE_ENTRANT_CHECK_ON;
+-      (*fpu_eip)++;
+-    }
+-  else if (mod == 2 || base == 5) /* The second condition also has mod==0 */
+-    {
+-      /* 32 bit displacement */
+-      long displacement;
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_code_access_ok(4);
+-      FPU_get_user(displacement, (long __user *) (*fpu_eip));
+-      offset += displacement;
+-      RE_ENTRANT_CHECK_ON;
+-      (*fpu_eip) += 4;
+-    }
 -
--int update_persistent_clock(struct timespec now)
--{
--	return set_rtc_mmss(now.tv_sec);
+-  return offset;
 -}
--
- static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
++	u_char ss, index, base;
++	long offset;
++
++	RE_ENTRANT_CHECK_OFF;
++	FPU_code_access_ok(1);
++	FPU_get_user(base, (u_char __user *) (*fpu_eip));	/* The SIB byte */
++	RE_ENTRANT_CHECK_ON;
++	(*fpu_eip)++;
++	ss = base >> 6;
++	index = (base >> 3) & 7;
++	base &= 7;
++
++	if ((mod == 0) && (base == 5))
++		offset = 0;	/* No base register */
++	else
++		offset = REG_(base);
++
++	if (index == 4) {
++		/* No index register */
++		/* A non-zero ss is illegal */
++		if (ss)
++			EXCEPTION(EX_Invalid);
++	} else {
++		offset += (REG_(index)) << ss;
++	}
++
++	if (mod == 1) {
++		/* 8 bit signed displacement */
++		long displacement;
++		RE_ENTRANT_CHECK_OFF;
++		FPU_code_access_ok(1);
++		FPU_get_user(displacement, (signed char __user *)(*fpu_eip));
++		offset += displacement;
++		RE_ENTRANT_CHECK_ON;
++		(*fpu_eip)++;
++	} else if (mod == 2 || base == 5) {	/* The second condition also has mod==0 */
++		/* 32 bit displacement */
++		long displacement;
++		RE_ENTRANT_CHECK_OFF;
++		FPU_code_access_ok(4);
++		FPU_get_user(displacement, (long __user *)(*fpu_eip));
++		offset += displacement;
++		RE_ENTRANT_CHECK_ON;
++		(*fpu_eip) += 4;
++	}
+ 
++	return offset;
++}
+ 
+-static unsigned long vm86_segment(u_char segment,
+-				  struct address *addr)
++static unsigned long vm86_segment(u_char segment, struct address *addr)
  {
- 	add_pda(irq0_irqs, 1);
-@@ -154,67 +53,10 @@ static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
- 	return IRQ_HANDLED;
+-  segment--;
++	segment--;
+ #ifdef PARANOID
+-  if ( segment > PREFIX_SS_ )
+-    {
+-      EXCEPTION(EX_INTERNAL|0x130);
+-      math_abort(FPU_info,SIGSEGV);
+-    }
++	if (segment > PREFIX_SS_) {
++		EXCEPTION(EX_INTERNAL | 0x130);
++		math_abort(FPU_info, SIGSEGV);
++	}
+ #endif /* PARANOID */
+-  addr->selector = VM86_REG_(segment);
+-  return (unsigned long)VM86_REG_(segment) << 4;
++	addr->selector = VM86_REG_(segment);
++	return (unsigned long)VM86_REG_(segment) << 4;
  }
  
--unsigned long read_persistent_clock(void)
--{
--	unsigned int year, mon, day, hour, min, sec;
--	unsigned long flags;
--	unsigned century = 0;
--
--	spin_lock_irqsave(&rtc_lock, flags);
--	/*
--	 * if UIP is clear, then we have >= 244 microseconds before RTC
--	 * registers will be updated.  Spec sheet says that this is the
--	 * reliable way to read RTC - registers invalid (off bus) during update
--	 */
--	while ((CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
--		cpu_relax();
--
--
--	/* now read all RTC registers while stable with interrupts disabled */
--	sec = CMOS_READ(RTC_SECONDS);
--	min = CMOS_READ(RTC_MINUTES);
--	hour = CMOS_READ(RTC_HOURS);
--	day = CMOS_READ(RTC_DAY_OF_MONTH);
--	mon = CMOS_READ(RTC_MONTH);
--	year = CMOS_READ(RTC_YEAR);
--#ifdef CONFIG_ACPI
--	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
--				acpi_gbl_FADT.century)
--		century = CMOS_READ(acpi_gbl_FADT.century);
--#endif
--	spin_unlock_irqrestore(&rtc_lock, flags);
--
--	/*
--	 * We know that x86-64 always uses BCD format, no need to check the
--	 * config register.
--	 */
 -
--	BCD_TO_BIN(sec);
--	BCD_TO_BIN(min);
--	BCD_TO_BIN(hour);
--	BCD_TO_BIN(day);
--	BCD_TO_BIN(mon);
--	BCD_TO_BIN(year);
+ /* This should work for 16 and 32 bit protected mode. */
+ static long pm_address(u_char FPU_modrm, u_char segment,
+ 		       struct address *addr, long offset)
+-{ 
+-  struct desc_struct descriptor;
+-  unsigned long base_address, limit, address, seg_top;
++{
++	struct desc_struct descriptor;
++	unsigned long base_address, limit, address, seg_top;
+ 
+-  segment--;
++	segment--;
+ 
+ #ifdef PARANOID
+-  /* segment is unsigned, so this also detects if segment was 0: */
+-  if ( segment > PREFIX_SS_ )
+-    {
+-      EXCEPTION(EX_INTERNAL|0x132);
+-      math_abort(FPU_info,SIGSEGV);
+-    }
++	/* segment is unsigned, so this also detects if segment was 0: */
++	if (segment > PREFIX_SS_) {
++		EXCEPTION(EX_INTERNAL | 0x132);
++		math_abort(FPU_info, SIGSEGV);
++	}
+ #endif /* PARANOID */
+ 
+-  switch ( segment )
+-    {
+-      /* gs isn't used by the kernel, so it still has its
+-	 user-space value. */
+-    case PREFIX_GS_-1:
+-      /* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
+-      savesegment(gs, addr->selector);
+-      break;
+-    default:
+-      addr->selector = PM_REG_(segment);
+-    }
 -
--	if (century) {
--		BCD_TO_BIN(century);
--		year += century * 100;
--		printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
--	} else {
--		/*
--		 * x86-64 systems only exists since 2002.
--		 * This will work up to Dec 31, 2100
--		 */
--		year += 2000;
--	}
+-  descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+-  base_address = SEG_BASE_ADDR(descriptor);
+-  address = base_address + offset;
+-  limit = base_address
+-	+ (SEG_LIMIT(descriptor)+1) * SEG_GRANULARITY(descriptor) - 1;
+-  if ( limit < base_address ) limit = 0xffffffff;
 -
--	return mktime(year, mon, day, hour, min, sec);
+-  if ( SEG_EXPAND_DOWN(descriptor) )
+-    {
+-      if ( SEG_G_BIT(descriptor) )
+-	seg_top = 0xffffffff;
+-      else
+-	{
+-	  seg_top = base_address + (1 << 20);
+-	  if ( seg_top < base_address ) seg_top = 0xffffffff;
++	switch (segment) {
++		/* gs isn't used by the kernel, so it still has its
++		   user-space value. */
++	case PREFIX_GS_ - 1:
++		/* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
++		savesegment(gs, addr->selector);
++		break;
++	default:
++		addr->selector = PM_REG_(segment);
+ 	}
+-      access_limit =
+-	(address <= limit) || (address >= seg_top) ? 0 :
+-	  ((seg_top-address) >= 255 ? 255 : seg_top-address);
+-    }
+-  else
+-    {
+-      access_limit =
+-	(address > limit) || (address < base_address) ? 0 :
+-	  ((limit-address) >= 254 ? 255 : limit-address+1);
+-    }
+-  if ( SEG_EXECUTE_ONLY(descriptor) ||
+-      (!SEG_WRITE_PERM(descriptor) && (FPU_modrm & FPU_WRITE_BIT)) )
+-    {
+-      access_limit = 0;
+-    }
+-  return address;
 -}
--
- /* calibrate_cpu is used on systems with fixed rate TSCs to determine
-  * processor frequency */
- #define TICK_COUNT 100000000
--static unsigned int __init tsc_calibrate_cpu_khz(void)
-+unsigned long __init native_calculate_cpu_khz(void)
- {
- 	int tsc_start, tsc_now;
- 	int i, no_ctr_free;
-@@ -241,7 +83,7 @@ static unsigned int __init tsc_calibrate_cpu_khz(void)
- 	rdtscl(tsc_start);
- 	do {
- 		rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
--		tsc_now = get_cycles_sync();
-+		tsc_now = get_cycles();
- 	} while ((tsc_now - tsc_start) < TICK_COUNT);
- 
- 	local_irq_restore(flags);
-@@ -264,20 +106,22 @@ static struct irqaction irq0 = {
- 	.name		= "timer"
- };
- 
--void __init time_init(void)
-+void __init hpet_time_init(void)
- {
- 	if (!hpet_enable())
- 		setup_pit_timer();
  
- 	setup_irq(0, &irq0);
++	descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
++	base_address = SEG_BASE_ADDR(descriptor);
++	address = base_address + offset;
++	limit = base_address
++	    + (SEG_LIMIT(descriptor) + 1) * SEG_GRANULARITY(descriptor) - 1;
++	if (limit < base_address)
++		limit = 0xffffffff;
++
++	if (SEG_EXPAND_DOWN(descriptor)) {
++		if (SEG_G_BIT(descriptor))
++			seg_top = 0xffffffff;
++		else {
++			seg_top = base_address + (1 << 20);
++			if (seg_top < base_address)
++				seg_top = 0xffffffff;
++		}
++		access_limit =
++		    (address <= limit) || (address >= seg_top) ? 0 :
++		    ((seg_top - address) >= 255 ? 255 : seg_top - address);
++	} else {
++		access_limit =
++		    (address > limit) || (address < base_address) ? 0 :
++		    ((limit - address) >= 254 ? 255 : limit - address + 1);
++	}
++	if (SEG_EXECUTE_ONLY(descriptor) ||
++	    (!SEG_WRITE_PERM(descriptor) && (FPU_modrm & FPU_WRITE_BIT))) {
++		access_limit = 0;
++	}
++	return address;
 +}
  
-+void __init time_init(void)
-+{
- 	tsc_calibrate();
+ /*
+        MOD R/M byte:  MOD == 3 has a special use for the FPU
+@@ -221,7 +202,6 @@ static long pm_address(u_char FPU_modrm, u_char segment,
+        .....   .........   .........
+         MOD    OPCODE(2)     R/M
  
- 	cpu_khz = tsc_khz;
- 	if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
--		boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
--		boot_cpu_data.x86 == 16)
--		cpu_khz = tsc_calibrate_cpu_khz();
-+		(boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
-+		cpu_khz = calculate_cpu_khz();
+-
+        SIB byte
  
- 	if (unsynchronized_tsc())
- 		mark_tsc_unstable("TSCs unsynchronized");
-@@ -290,4 +134,5 @@ void __init time_init(void)
- 	printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
- 		cpu_khz / 1000, cpu_khz % 1000);
- 	init_tsc_clocksource();
-+	late_time_init = choose_time_init();
- }
-diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
-new file mode 100644
-index 0000000..6dfd4e7
---- /dev/null
-+++ b/arch/x86/kernel/tls.c
-@@ -0,0 +1,213 @@
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/user.h>
-+#include <linux/regset.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/desc.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/processor.h>
-+#include <asm/proto.h>
-+
-+#include "tls.h"
-+
-+/*
-+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
-+ */
-+static int get_free_idx(void)
-+{
-+	struct thread_struct *t = &current->thread;
-+	int idx;
-+
-+	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
-+		if (desc_empty(&t->tls_array[idx]))
-+			return idx + GDT_ENTRY_TLS_MIN;
-+	return -ESRCH;
-+}
-+
-+static void set_tls_desc(struct task_struct *p, int idx,
-+			 const struct user_desc *info, int n)
+        7   6   5   4   3   2   1   0
+@@ -231,208 +211,194 @@ static long pm_address(u_char FPU_modrm, u_char segment,
+ */
+ 
+ void __user *FPU_get_address(u_char FPU_modrm, unsigned long *fpu_eip,
+-		  struct address *addr,
+-		  fpu_addr_modes addr_modes)
++			     struct address *addr, fpu_addr_modes addr_modes)
 +{
-+	struct thread_struct *t = &p->thread;
-+	struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
-+	int cpu;
-+
-+	/*
-+	 * We must not get preempted while modifying the TLS.
-+	 */
-+	cpu = get_cpu();
++	u_char mod;
++	unsigned rm = FPU_modrm & 7;
++	long *cpu_reg_ptr;
++	int address = 0;	/* Initialized just to stop compiler warnings. */
 +
-+	while (n-- > 0) {
-+		if (LDT_empty(info))
-+			desc->a = desc->b = 0;
-+		else
-+			fill_ldt(desc, info);
-+		++info;
-+		++desc;
++	/* Memory accessed via the cs selector is write protected
++	   in `non-segmented' 32 bit protected mode. */
++	if (!addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
++	    && (addr_modes.override.segment == PREFIX_CS_)) {
++		math_abort(FPU_info, SIGSEGV);
 +	}
 +
-+	if (t == &current->thread)
-+		load_TLS(t, cpu);
-+
-+	put_cpu();
-+}
-+
-+/*
-+ * Set a given TLS descriptor:
-+ */
-+int do_set_thread_area(struct task_struct *p, int idx,
-+		       struct user_desc __user *u_info,
-+		       int can_allocate)
-+{
-+	struct user_desc info;
-+
-+	if (copy_from_user(&info, u_info, sizeof(info)))
-+		return -EFAULT;
++	addr->selector = FPU_DS;	/* Default, for 32 bit non-segmented mode. */
 +
-+	if (idx == -1)
-+		idx = info.entry_number;
++	mod = (FPU_modrm >> 6) & 3;
 +
-+	/*
-+	 * index -1 means the kernel should try to find and
-+	 * allocate an empty descriptor:
-+	 */
-+	if (idx == -1 && can_allocate) {
-+		idx = get_free_idx();
-+		if (idx < 0)
-+			return idx;
-+		if (put_user(idx, &u_info->entry_number))
-+			return -EFAULT;
++	if (rm == 4 && mod != 3) {
++		address = sib(mod, fpu_eip);
++	} else {
++		cpu_reg_ptr = &REG_(rm);
++		switch (mod) {
++		case 0:
++			if (rm == 5) {
++				/* Special case: disp32 */
++				RE_ENTRANT_CHECK_OFF;
++				FPU_code_access_ok(4);
++				FPU_get_user(address,
++					     (unsigned long __user
++					      *)(*fpu_eip));
++				(*fpu_eip) += 4;
++				RE_ENTRANT_CHECK_ON;
++				addr->offset = address;
++				return (void __user *)address;
++			} else {
++				address = *cpu_reg_ptr;	/* Just return the contents
++							   of the cpu register */
++				addr->offset = address;
++				return (void __user *)address;
++			}
++		case 1:
++			/* 8 bit signed displacement */
++			RE_ENTRANT_CHECK_OFF;
++			FPU_code_access_ok(1);
++			FPU_get_user(address, (signed char __user *)(*fpu_eip));
++			RE_ENTRANT_CHECK_ON;
++			(*fpu_eip)++;
++			break;
++		case 2:
++			/* 32 bit displacement */
++			RE_ENTRANT_CHECK_OFF;
++			FPU_code_access_ok(4);
++			FPU_get_user(address, (long __user *)(*fpu_eip));
++			(*fpu_eip) += 4;
++			RE_ENTRANT_CHECK_ON;
++			break;
++		case 3:
++			/* Not legal for the FPU */
++			EXCEPTION(EX_Invalid);
++		}
++		address += *cpu_reg_ptr;
 +	}
 +
-+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+		return -EINVAL;
-+
-+	set_tls_desc(p, idx, &info, 1);
-+
-+	return 0;
-+}
-+
-+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
-+{
-+	return do_set_thread_area(current, -1, u_info, 1);
-+}
-+
-+
-+/*
-+ * Get the current Thread-Local Storage area:
-+ */
-+
-+static void fill_user_desc(struct user_desc *info, int idx,
-+			   const struct desc_struct *desc)
-+
-+{
-+	memset(info, 0, sizeof(*info));
-+	info->entry_number = idx;
-+	info->base_addr = get_desc_base(desc);
-+	info->limit = get_desc_limit(desc);
-+	info->seg_32bit = desc->d;
-+	info->contents = desc->type >> 2;
-+	info->read_exec_only = !(desc->type & 2);
-+	info->limit_in_pages = desc->g;
-+	info->seg_not_present = !desc->p;
-+	info->useable = desc->avl;
-+#ifdef CONFIG_X86_64
-+	info->lm = desc->l;
-+#endif
-+}
-+
-+int do_get_thread_area(struct task_struct *p, int idx,
-+		       struct user_desc __user *u_info)
-+{
-+	struct user_desc info;
-+
-+	if (idx == -1 && get_user(idx, &u_info->entry_number))
-+		return -EFAULT;
-+
-+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+		return -EINVAL;
-+
-+	fill_user_desc(&info, idx,
-+		       &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
-+
-+	if (copy_to_user(u_info, &info, sizeof(info)))
-+		return -EFAULT;
-+	return 0;
-+}
++	addr->offset = address;
 +
-+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
-+{
-+	return do_get_thread_area(current, -1, u_info);
-+}
++	switch (addr_modes.default_mode) {
++	case 0:
++		break;
++	case VM86:
++		address += vm86_segment(addr_modes.override.segment, addr);
++		break;
++	case PM16:
++	case SEG32:
++		address = pm_address(FPU_modrm, addr_modes.override.segment,
++				     addr, address);
++		break;
++	default:
++		EXCEPTION(EX_INTERNAL | 0x133);
++	}
 +
-+int regset_tls_active(struct task_struct *target,
-+		      const struct user_regset *regset)
-+{
-+	struct thread_struct *t = &target->thread;
-+	int n = GDT_ENTRY_TLS_ENTRIES;
-+	while (n > 0 && desc_empty(&t->tls_array[n - 1]))
-+		--n;
-+	return n;
++	return (void __user *)address;
 +}
 +
-+int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
-+		   unsigned int pos, unsigned int count,
-+		   void *kbuf, void __user *ubuf)
-+{
-+	const struct desc_struct *tls;
++void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
++				struct address *addr, fpu_addr_modes addr_modes)
+ {
+-  u_char mod;
+-  unsigned rm = FPU_modrm & 7;
+-  long *cpu_reg_ptr;
+-  int address = 0;     /* Initialized just to stop compiler warnings. */
+-
+-  /* Memory accessed via the cs selector is write protected
+-     in `non-segmented' 32 bit protected mode. */
+-  if ( !addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
+-      && (addr_modes.override.segment == PREFIX_CS_) )
+-    {
+-      math_abort(FPU_info,SIGSEGV);
+-    }
+-
+-  addr->selector = FPU_DS;   /* Default, for 32 bit non-segmented mode. */
+-
+-  mod = (FPU_modrm >> 6) & 3;
+-
+-  if (rm == 4 && mod != 3)
+-    {
+-      address = sib(mod, fpu_eip);
+-    }
+-  else
+-    {
+-      cpu_reg_ptr = & REG_(rm);
+-      switch (mod)
+-	{
++	u_char mod;
++	unsigned rm = FPU_modrm & 7;
++	int address = 0;	/* Default used for mod == 0 */
 +
-+	if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
-+	    (pos % sizeof(struct user_desc)) != 0 ||
-+	    (count % sizeof(struct user_desc)) != 0)
-+		return -EINVAL;
++	/* Memory accessed via the cs selector is write protected
++	   in `non-segmented' 32 bit protected mode. */
++	if (!addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
++	    && (addr_modes.override.segment == PREFIX_CS_)) {
++		math_abort(FPU_info, SIGSEGV);
++	}
 +
-+	pos /= sizeof(struct user_desc);
-+	count /= sizeof(struct user_desc);
++	addr->selector = FPU_DS;	/* Default, for 32 bit non-segmented mode. */
 +
-+	tls = &target->thread.tls_array[pos];
++	mod = (FPU_modrm >> 6) & 3;
 +
-+	if (kbuf) {
-+		struct user_desc *info = kbuf;
-+		while (count-- > 0)
-+			fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
-+				       tls++);
-+	} else {
-+		struct user_desc __user *u_info = ubuf;
-+		while (count-- > 0) {
-+			struct user_desc info;
-+			fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
-+			if (__copy_to_user(u_info++, &info, sizeof(info)))
-+				return -EFAULT;
++	switch (mod) {
+ 	case 0:
+-	  if (rm == 5)
+-	    {
+-	      /* Special case: disp32 */
+-	      RE_ENTRANT_CHECK_OFF;
+-	      FPU_code_access_ok(4);
+-	      FPU_get_user(address, (unsigned long __user *) (*fpu_eip));
+-	      (*fpu_eip) += 4;
+-	      RE_ENTRANT_CHECK_ON;
+-	      addr->offset = address;
+-	      return (void __user *) address;
+-	    }
+-	  else
+-	    {
+-	      address = *cpu_reg_ptr;  /* Just return the contents
+-					  of the cpu register */
+-	      addr->offset = address;
+-	      return (void __user *) address;
+-	    }
++		if (rm == 6) {
++			/* Special case: disp16 */
++			RE_ENTRANT_CHECK_OFF;
++			FPU_code_access_ok(2);
++			FPU_get_user(address,
++				     (unsigned short __user *)(*fpu_eip));
++			(*fpu_eip) += 2;
++			RE_ENTRANT_CHECK_ON;
++			goto add_segment;
 +		}
++		break;
+ 	case 1:
+-	  /* 8 bit signed displacement */
+-	  RE_ENTRANT_CHECK_OFF;
+-	  FPU_code_access_ok(1);
+-	  FPU_get_user(address, (signed char __user *) (*fpu_eip));
+-	  RE_ENTRANT_CHECK_ON;
+-	  (*fpu_eip)++;
+-	  break;
++		/* 8 bit signed displacement */
++		RE_ENTRANT_CHECK_OFF;
++		FPU_code_access_ok(1);
++		FPU_get_user(address, (signed char __user *)(*fpu_eip));
++		RE_ENTRANT_CHECK_ON;
++		(*fpu_eip)++;
++		break;
+ 	case 2:
+-	  /* 32 bit displacement */
+-	  RE_ENTRANT_CHECK_OFF;
+-	  FPU_code_access_ok(4);
+-	  FPU_get_user(address, (long __user *) (*fpu_eip));
+-	  (*fpu_eip) += 4;
+-	  RE_ENTRANT_CHECK_ON;
+-	  break;
++		/* 16 bit displacement */
++		RE_ENTRANT_CHECK_OFF;
++		FPU_code_access_ok(2);
++		FPU_get_user(address, (unsigned short __user *)(*fpu_eip));
++		(*fpu_eip) += 2;
++		RE_ENTRANT_CHECK_ON;
++		break;
+ 	case 3:
+-	  /* Not legal for the FPU */
+-	  EXCEPTION(EX_Invalid);
++		/* Not legal for the FPU */
++		EXCEPTION(EX_Invalid);
++		break;
 +	}
++	switch (rm) {
++	case 0:
++		address += FPU_info->___ebx + FPU_info->___esi;
++		break;
++	case 1:
++		address += FPU_info->___ebx + FPU_info->___edi;
++		break;
++	case 2:
++		address += FPU_info->___ebp + FPU_info->___esi;
++		if (addr_modes.override.segment == PREFIX_DEFAULT)
++			addr_modes.override.segment = PREFIX_SS_;
++		break;
++	case 3:
++		address += FPU_info->___ebp + FPU_info->___edi;
++		if (addr_modes.override.segment == PREFIX_DEFAULT)
++			addr_modes.override.segment = PREFIX_SS_;
++		break;
++	case 4:
++		address += FPU_info->___esi;
++		break;
++	case 5:
++		address += FPU_info->___edi;
++		break;
++	case 6:
++		address += FPU_info->___ebp;
++		if (addr_modes.override.segment == PREFIX_DEFAULT)
++			addr_modes.override.segment = PREFIX_SS_;
++		break;
++	case 7:
++		address += FPU_info->___ebx;
++		break;
+ 	}
+-      address += *cpu_reg_ptr;
+-    }
+-
+-  addr->offset = address;
+-
+-  switch ( addr_modes.default_mode )
+-    {
+-    case 0:
+-      break;
+-    case VM86:
+-      address += vm86_segment(addr_modes.override.segment, addr);
+-      break;
+-    case PM16:
+-    case SEG32:
+-      address = pm_address(FPU_modrm, addr_modes.override.segment,
+-			   addr, address);
+-      break;
+-    default:
+-      EXCEPTION(EX_INTERNAL|0x133);
+-    }
+-
+-  return (void __user *)address;
+-}
+ 
++      add_segment:
++	address &= 0xffff;
+ 
+-void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
+-		     struct address *addr,
+-		     fpu_addr_modes addr_modes)
+-{
+-  u_char mod;
+-  unsigned rm = FPU_modrm & 7;
+-  int address = 0;     /* Default used for mod == 0 */
+-
+-  /* Memory accessed via the cs selector is write protected
+-     in `non-segmented' 32 bit protected mode. */
+-  if ( !addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
+-      && (addr_modes.override.segment == PREFIX_CS_) )
+-    {
+-      math_abort(FPU_info,SIGSEGV);
+-    }
+-
+-  addr->selector = FPU_DS;   /* Default, for 32 bit non-segmented mode. */
+-
+-  mod = (FPU_modrm >> 6) & 3;
+-
+-  switch (mod)
+-    {
+-    case 0:
+-      if (rm == 6)
+-	{
+-	  /* Special case: disp16 */
+-	  RE_ENTRANT_CHECK_OFF;
+-	  FPU_code_access_ok(2);
+-	  FPU_get_user(address, (unsigned short __user *) (*fpu_eip));
+-	  (*fpu_eip) += 2;
+-	  RE_ENTRANT_CHECK_ON;
+-	  goto add_segment;
++	addr->offset = address;
 +
-+	return 0;
-+}
-+
-+int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
-+		   unsigned int pos, unsigned int count,
-+		   const void *kbuf, const void __user *ubuf)
-+{
-+	struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
-+	const struct user_desc *info;
-+
-+	if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
-+	    (pos % sizeof(struct user_desc)) != 0 ||
-+	    (count % sizeof(struct user_desc)) != 0)
-+		return -EINVAL;
-+
-+	if (kbuf)
-+		info = kbuf;
-+	else if (__copy_from_user(infobuf, ubuf, count))
-+		return -EFAULT;
-+	else
-+		info = infobuf;
-+
-+	set_tls_desc(target,
-+		     GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
-+		     info, count / sizeof(struct user_desc));
-+
-+	return 0;
-+}
-diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
-new file mode 100644
-index 0000000..2f083a2
---- /dev/null
-+++ b/arch/x86/kernel/tls.h
-@@ -0,0 +1,21 @@
-+/*
-+ * Internal declarations for x86 TLS implementation functions.
-+ *
-+ * Copyright (C) 2007 Red Hat, Inc.  All rights reserved.
-+ *
-+ * This copyrighted material is made available to anyone wishing to use,
-+ * modify, copy, or redistribute it subject to the terms and conditions
-+ * of the GNU General Public License v.2.
-+ *
-+ * Red Hat Author: Roland McGrath.
-+ */
-+
-+#ifndef _ARCH_X86_KERNEL_TLS_H
-+
-+#include <linux/regset.h>
-+
-+extern user_regset_active_fn regset_tls_active;
-+extern user_regset_get_fn regset_tls_get;
-+extern user_regset_set_fn regset_tls_set;
++	switch (addr_modes.default_mode) {
++	case 0:
++		break;
++	case VM86:
++		address += vm86_segment(addr_modes.override.segment, addr);
++		break;
++	case PM16:
++	case SEG32:
++		address = pm_address(FPU_modrm, addr_modes.override.segment,
++				     addr, address);
++		break;
++	default:
++		EXCEPTION(EX_INTERNAL | 0x131);
+ 	}
+-      break;
+-    case 1:
+-      /* 8 bit signed displacement */
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_code_access_ok(1);
+-      FPU_get_user(address, (signed char __user *) (*fpu_eip));
+-      RE_ENTRANT_CHECK_ON;
+-      (*fpu_eip)++;
+-      break;
+-    case 2:
+-      /* 16 bit displacement */
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_code_access_ok(2);
+-      FPU_get_user(address, (unsigned short __user *) (*fpu_eip));
+-      (*fpu_eip) += 2;
+-      RE_ENTRANT_CHECK_ON;
+-      break;
+-    case 3:
+-      /* Not legal for the FPU */
+-      EXCEPTION(EX_Invalid);
+-      break;
+-    }
+-  switch ( rm )
+-    {
+-    case 0:
+-      address += FPU_info->___ebx + FPU_info->___esi;
+-      break;
+-    case 1:
+-      address += FPU_info->___ebx + FPU_info->___edi;
+-      break;
+-    case 2:
+-      address += FPU_info->___ebp + FPU_info->___esi;
+-      if ( addr_modes.override.segment == PREFIX_DEFAULT )
+-	addr_modes.override.segment = PREFIX_SS_;
+-      break;
+-    case 3:
+-      address += FPU_info->___ebp + FPU_info->___edi;
+-      if ( addr_modes.override.segment == PREFIX_DEFAULT )
+-	addr_modes.override.segment = PREFIX_SS_;
+-      break;
+-    case 4:
+-      address += FPU_info->___esi;
+-      break;
+-    case 5:
+-      address += FPU_info->___edi;
+-      break;
+-    case 6:
+-      address += FPU_info->___ebp;
+-      if ( addr_modes.override.segment == PREFIX_DEFAULT )
+-	addr_modes.override.segment = PREFIX_SS_;
+-      break;
+-    case 7:
+-      address += FPU_info->___ebx;
+-      break;
+-    }
+-
+- add_segment:
+-  address &= 0xffff;
+-
+-  addr->offset = address;
+-
+-  switch ( addr_modes.default_mode )
+-    {
+-    case 0:
+-      break;
+-    case VM86:
+-      address += vm86_segment(addr_modes.override.segment, addr);
+-      break;
+-    case PM16:
+-    case SEG32:
+-      address = pm_address(FPU_modrm, addr_modes.override.segment,
+-			   addr, address);
+-      break;
+-    default:
+-      EXCEPTION(EX_INTERNAL|0x131);
+-    }
+-
+-  return (void __user *)address ;
 +
-+#endif	/* _ARCH_X86_KERNEL_TLS_H */
-diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
-index 7e16d67..78cbb65 100644
---- a/arch/x86/kernel/topology.c
-+++ b/arch/x86/kernel/topology.c
-@@ -31,9 +31,10 @@
- #include <linux/mmzone.h>
- #include <asm/cpu.h>
++	return (void __user *)address;
+ }
+diff --git a/arch/x86/math-emu/load_store.c b/arch/x86/math-emu/load_store.c
+index eebd6fb..2931ff3 100644
+--- a/arch/x86/math-emu/load_store.c
++++ b/arch/x86/math-emu/load_store.c
+@@ -26,247 +26,257 @@
+ #include "status_w.h"
+ #include "control_w.h"
  
--static struct i386_cpu cpu_devices[NR_CPUS];
-+static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
+-
+-#define _NONE_ 0   /* st0_ptr etc not needed */
+-#define _REG0_ 1   /* Will be storing st(0) */
+-#define _PUSH_ 3   /* Need to check for space to push onto stack */
+-#define _null_ 4   /* Function illegal or not implemented */
++#define _NONE_ 0		/* st0_ptr etc not needed */
++#define _REG0_ 1		/* Will be storing st(0) */
++#define _PUSH_ 3		/* Need to check for space to push onto stack */
++#define _null_ 4		/* Function illegal or not implemented */
+ 
+ #define pop_0()	{ FPU_settag0(TAG_Empty); top++; }
  
--int __cpuinit arch_register_cpu(int num)
-+#ifdef CONFIG_HOTPLUG_CPU
-+int arch_register_cpu(int num)
- {
- 	/*
- 	 * CPU0 cannot be offlined due to several
-@@ -44,21 +45,23 @@ int __cpuinit arch_register_cpu(int num)
- 	 * Also certain PCI quirks require not to enable hotplug control
- 	 * for all CPU's.
- 	 */
--#ifdef CONFIG_HOTPLUG_CPU
- 	if (num)
--		cpu_devices[num].cpu.hotpluggable = 1;
--#endif
 -
--	return register_cpu(&cpu_devices[num].cpu, num);
-+		per_cpu(cpu_devices, num).cpu.hotpluggable = 1;
-+	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
- }
-+EXPORT_SYMBOL(arch_register_cpu);
+ static u_char const type_table[32] = {
+-  _PUSH_, _PUSH_, _PUSH_, _PUSH_,
+-  _null_, _null_, _null_, _null_,
+-  _REG0_, _REG0_, _REG0_, _REG0_,
+-  _REG0_, _REG0_, _REG0_, _REG0_,
+-  _NONE_, _null_, _NONE_, _PUSH_,
+-  _NONE_, _PUSH_, _null_, _PUSH_,
+-  _NONE_, _null_, _NONE_, _REG0_,
+-  _NONE_, _REG0_, _NONE_, _REG0_
+-  };
++	_PUSH_, _PUSH_, _PUSH_, _PUSH_,
++	_null_, _null_, _null_, _null_,
++	_REG0_, _REG0_, _REG0_, _REG0_,
++	_REG0_, _REG0_, _REG0_, _REG0_,
++	_NONE_, _null_, _NONE_, _PUSH_,
++	_NONE_, _PUSH_, _null_, _PUSH_,
++	_NONE_, _null_, _NONE_, _REG0_,
++	_NONE_, _REG0_, _NONE_, _REG0_
++};
  
--#ifdef CONFIG_HOTPLUG_CPU
- void arch_unregister_cpu(int num)
+ u_char const data_sizes_16[32] = {
+-  4,  4,  8,  2,  0,  0,  0,  0,
+-  4,  4,  8,  2,  4,  4,  8,  2,
+-  14, 0, 94, 10,  2, 10,  0,  8,  
+-  14, 0, 94, 10,  2, 10,  2,  8
++	4, 4, 8, 2, 0, 0, 0, 0,
++	4, 4, 8, 2, 4, 4, 8, 2,
++	14, 0, 94, 10, 2, 10, 0, 8,
++	14, 0, 94, 10, 2, 10, 2, 8
+ };
+ 
+ static u_char const data_sizes_32[32] = {
+-  4,  4,  8,  2,  0,  0,  0,  0,
+-  4,  4,  8,  2,  4,  4,  8,  2,
+-  28, 0,108, 10,  2, 10,  0,  8,  
+-  28, 0,108, 10,  2, 10,  2,  8
++	4, 4, 8, 2, 0, 0, 0, 0,
++	4, 4, 8, 2, 4, 4, 8, 2,
++	28, 0, 108, 10, 2, 10, 0, 8,
++	28, 0, 108, 10, 2, 10, 2, 8
+ };
+ 
+ int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
+-		     void __user *data_address)
++		   void __user * data_address)
  {
--	return unregister_cpu(&cpu_devices[num].cpu);
-+	return unregister_cpu(&per_cpu(cpu_devices, num).cpu);
- }
--EXPORT_SYMBOL(arch_register_cpu);
- EXPORT_SYMBOL(arch_unregister_cpu);
-+#else
-+int arch_register_cpu(int num)
-+{
-+	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
-+}
-+EXPORT_SYMBOL(arch_register_cpu);
- #endif /*CONFIG_HOTPLUG_CPU*/
+-  FPU_REG loaded_data;
+-  FPU_REG *st0_ptr;
+-  u_char st0_tag = TAG_Empty;  /* This is just to stop a gcc warning. */
+-  u_char loaded_tag;
++	FPU_REG loaded_data;
++	FPU_REG *st0_ptr;
++	u_char st0_tag = TAG_Empty;	/* This is just to stop a gcc warning. */
++	u_char loaded_tag;
  
- static int __init topology_init(void)
-diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
-index 02d1e1e..3cf7297 100644
---- a/arch/x86/kernel/traps_32.c
-+++ b/arch/x86/kernel/traps_32.c
-@@ -76,7 +76,8 @@ char ignore_fpu_irq = 0;
-  * F0 0F bug workaround.. We have a special link segment
-  * for this.
-  */
--struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
-+gate_desc idt_table[256]
-+	__attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
+-  st0_ptr = NULL;    /* Initialized just to stop compiler warnings. */
++	st0_ptr = NULL;		/* Initialized just to stop compiler warnings. */
  
- asmlinkage void divide_error(void);
- asmlinkage void debug(void);
-@@ -101,6 +102,34 @@ asmlinkage void machine_check(void);
- int kstack_depth_to_print = 24;
- static unsigned int code_bytes = 64;
+-  if ( addr_modes.default_mode & PROTECTED )
+-    {
+-      if ( addr_modes.default_mode == SEG32 )
+-	{
+-	  if ( access_limit < data_sizes_32[type] )
+-	    math_abort(FPU_info,SIGSEGV);
+-	}
+-      else if ( addr_modes.default_mode == PM16 )
+-	{
+-	  if ( access_limit < data_sizes_16[type] )
+-	    math_abort(FPU_info,SIGSEGV);
+-	}
++	if (addr_modes.default_mode & PROTECTED) {
++		if (addr_modes.default_mode == SEG32) {
++			if (access_limit < data_sizes_32[type])
++				math_abort(FPU_info, SIGSEGV);
++		} else if (addr_modes.default_mode == PM16) {
++			if (access_limit < data_sizes_16[type])
++				math_abort(FPU_info, SIGSEGV);
++		}
+ #ifdef PARANOID
+-      else
+-	EXCEPTION(EX_INTERNAL|0x140);
++		else
++			EXCEPTION(EX_INTERNAL | 0x140);
+ #endif /* PARANOID */
+-    }
++	}
  
-+void printk_address(unsigned long address, int reliable)
-+{
-+#ifdef CONFIG_KALLSYMS
-+	unsigned long offset = 0, symsize;
-+	const char *symname;
-+	char *modname;
-+	char *delim = ":";
-+	char namebuf[128];
-+	char reliab[4] = "";
+-  switch ( type_table[type] )
+-    {
+-    case _NONE_:
+-      break;
+-    case _REG0_:
+-      st0_ptr = &st(0);       /* Some of these instructions pop after
+-				 storing */
+-      st0_tag = FPU_gettag0();
+-      break;
+-    case _PUSH_:
+-      {
+-	if ( FPU_gettagi(-1) != TAG_Empty )
+-	  { FPU_stack_overflow(); return 0; }
+-	top--;
+-	st0_ptr = &st(0);
+-      }
+-      break;
+-    case _null_:
+-      FPU_illegal();
+-      return 0;
++	switch (type_table[type]) {
++	case _NONE_:
++		break;
++	case _REG0_:
++		st0_ptr = &st(0);	/* Some of these instructions pop after
++					   storing */
++		st0_tag = FPU_gettag0();
++		break;
++	case _PUSH_:
++		{
++			if (FPU_gettagi(-1) != TAG_Empty) {
++				FPU_stack_overflow();
++				return 0;
++			}
++			top--;
++			st0_ptr = &st(0);
++		}
++		break;
++	case _null_:
++		FPU_illegal();
++		return 0;
+ #ifdef PARANOID
+-    default:
+-      EXCEPTION(EX_INTERNAL|0x141);
+-      return 0;
++	default:
++		EXCEPTION(EX_INTERNAL | 0x141);
++		return 0;
+ #endif /* PARANOID */
+-    }
+-
+-  switch ( type )
+-    {
+-    case 000:       /* fld m32real */
+-      clear_C1();
+-      loaded_tag = FPU_load_single((float __user *)data_address, &loaded_data);
+-      if ( (loaded_tag == TAG_Special)
+-	   && isNaN(&loaded_data)
+-	   && (real_1op_NaN(&loaded_data) < 0) )
+-	{
+-	  top++;
+-	  break;
+-	}
+-      FPU_copy_to_reg0(&loaded_data, loaded_tag);
+-      break;
+-    case 001:      /* fild m32int */
+-      clear_C1();
+-      loaded_tag = FPU_load_int32((long __user *)data_address, &loaded_data);
+-      FPU_copy_to_reg0(&loaded_data, loaded_tag);
+-      break;
+-    case 002:      /* fld m64real */
+-      clear_C1();
+-      loaded_tag = FPU_load_double((double __user *)data_address, &loaded_data);
+-      if ( (loaded_tag == TAG_Special)
+-	   && isNaN(&loaded_data)
+-	   && (real_1op_NaN(&loaded_data) < 0) )
+-	{
+-	  top++;
+-	  break;
+ 	}
+-      FPU_copy_to_reg0(&loaded_data, loaded_tag);
+-      break;
+-    case 003:      /* fild m16int */
+-      clear_C1();
+-      loaded_tag = FPU_load_int16((short __user *)data_address, &loaded_data);
+-      FPU_copy_to_reg0(&loaded_data, loaded_tag);
+-      break;
+-    case 010:      /* fst m32real */
+-      clear_C1();
+-      FPU_store_single(st0_ptr, st0_tag, (float __user *)data_address);
+-      break;
+-    case 011:      /* fist m32int */
+-      clear_C1();
+-      FPU_store_int32(st0_ptr, st0_tag, (long __user *)data_address);
+-      break;
+-    case 012:     /* fst m64real */
+-      clear_C1();
+-      FPU_store_double(st0_ptr, st0_tag, (double __user *)data_address);
+-      break;
+-    case 013:     /* fist m16int */
+-      clear_C1();
+-      FPU_store_int16(st0_ptr, st0_tag, (short __user *)data_address);
+-      break;
+-    case 014:     /* fstp m32real */
+-      clear_C1();
+-      if ( FPU_store_single(st0_ptr, st0_tag, (float __user *)data_address) )
+-	pop_0();  /* pop only if the number was actually stored
+-		     (see the 80486 manual p16-28) */
+-      break;
+-    case 015:     /* fistp m32int */
+-      clear_C1();
+-      if ( FPU_store_int32(st0_ptr, st0_tag, (long __user *)data_address) )
+-	pop_0();  /* pop only if the number was actually stored
+-		     (see the 80486 manual p16-28) */
+-      break;
+-    case 016:     /* fstp m64real */
+-      clear_C1();
+-      if ( FPU_store_double(st0_ptr, st0_tag, (double __user *)data_address) )
+-	pop_0();  /* pop only if the number was actually stored
+-		     (see the 80486 manual p16-28) */
+-      break;
+-    case 017:     /* fistp m16int */
+-      clear_C1();
+-      if ( FPU_store_int16(st0_ptr, st0_tag, (short __user *)data_address) )
+-	pop_0();  /* pop only if the number was actually stored
+-		     (see the 80486 manual p16-28) */
+-      break;
+-    case 020:     /* fldenv  m14/28byte */
+-      fldenv(addr_modes, (u_char __user *)data_address);
+-      /* Ensure that the values just loaded are not changed by
+-	 fix-up operations. */
+-      return 1;
+-    case 022:     /* frstor m94/108byte */
+-      frstor(addr_modes, (u_char __user *)data_address);
+-      /* Ensure that the values just loaded are not changed by
+-	 fix-up operations. */
+-      return 1;
+-    case 023:     /* fbld m80dec */
+-      clear_C1();
+-      loaded_tag = FPU_load_bcd((u_char __user *)data_address);
+-      FPU_settag0(loaded_tag);
+-      break;
+-    case 024:     /* fldcw */
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_READ, data_address, 2);
+-      FPU_get_user(control_word, (unsigned short __user *) data_address);
+-      RE_ENTRANT_CHECK_ON;
+-      if ( partial_status & ~control_word & CW_Exceptions )
+-	partial_status |= (SW_Summary | SW_Backward);
+-      else
+-	partial_status &= ~(SW_Summary | SW_Backward);
 +
-+	symname = kallsyms_lookup(address, &symsize, &offset,
-+					&modname, namebuf);
-+	if (!symname) {
-+		printk(" [<%08lx>]\n", address);
-+		return;
++	switch (type) {
++	case 000:		/* fld m32real */
++		clear_C1();
++		loaded_tag =
++		    FPU_load_single((float __user *)data_address, &loaded_data);
++		if ((loaded_tag == TAG_Special)
++		    && isNaN(&loaded_data)
++		    && (real_1op_NaN(&loaded_data) < 0)) {
++			top++;
++			break;
++		}
++		FPU_copy_to_reg0(&loaded_data, loaded_tag);
++		break;
++	case 001:		/* fild m32int */
++		clear_C1();
++		loaded_tag =
++		    FPU_load_int32((long __user *)data_address, &loaded_data);
++		FPU_copy_to_reg0(&loaded_data, loaded_tag);
++		break;
++	case 002:		/* fld m64real */
++		clear_C1();
++		loaded_tag =
++		    FPU_load_double((double __user *)data_address,
++				    &loaded_data);
++		if ((loaded_tag == TAG_Special)
++		    && isNaN(&loaded_data)
++		    && (real_1op_NaN(&loaded_data) < 0)) {
++			top++;
++			break;
++		}
++		FPU_copy_to_reg0(&loaded_data, loaded_tag);
++		break;
++	case 003:		/* fild m16int */
++		clear_C1();
++		loaded_tag =
++		    FPU_load_int16((short __user *)data_address, &loaded_data);
++		FPU_copy_to_reg0(&loaded_data, loaded_tag);
++		break;
++	case 010:		/* fst m32real */
++		clear_C1();
++		FPU_store_single(st0_ptr, st0_tag,
++				 (float __user *)data_address);
++		break;
++	case 011:		/* fist m32int */
++		clear_C1();
++		FPU_store_int32(st0_ptr, st0_tag, (long __user *)data_address);
++		break;
++	case 012:		/* fst m64real */
++		clear_C1();
++		FPU_store_double(st0_ptr, st0_tag,
++				 (double __user *)data_address);
++		break;
++	case 013:		/* fist m16int */
++		clear_C1();
++		FPU_store_int16(st0_ptr, st0_tag, (short __user *)data_address);
++		break;
++	case 014:		/* fstp m32real */
++		clear_C1();
++		if (FPU_store_single
++		    (st0_ptr, st0_tag, (float __user *)data_address))
++			pop_0();	/* pop only if the number was actually stored
++					   (see the 80486 manual p16-28) */
++		break;
++	case 015:		/* fistp m32int */
++		clear_C1();
++		if (FPU_store_int32
++		    (st0_ptr, st0_tag, (long __user *)data_address))
++			pop_0();	/* pop only if the number was actually stored
++					   (see the 80486 manual p16-28) */
++		break;
++	case 016:		/* fstp m64real */
++		clear_C1();
++		if (FPU_store_double
++		    (st0_ptr, st0_tag, (double __user *)data_address))
++			pop_0();	/* pop only if the number was actually stored
++					   (see the 80486 manual p16-28) */
++		break;
++	case 017:		/* fistp m16int */
++		clear_C1();
++		if (FPU_store_int16
++		    (st0_ptr, st0_tag, (short __user *)data_address))
++			pop_0();	/* pop only if the number was actually stored
++					   (see the 80486 manual p16-28) */
++		break;
++	case 020:		/* fldenv  m14/28byte */
++		fldenv(addr_modes, (u_char __user *) data_address);
++		/* Ensure that the values just loaded are not changed by
++		   fix-up operations. */
++		return 1;
++	case 022:		/* frstor m94/108byte */
++		frstor(addr_modes, (u_char __user *) data_address);
++		/* Ensure that the values just loaded are not changed by
++		   fix-up operations. */
++		return 1;
++	case 023:		/* fbld m80dec */
++		clear_C1();
++		loaded_tag = FPU_load_bcd((u_char __user *) data_address);
++		FPU_settag0(loaded_tag);
++		break;
++	case 024:		/* fldcw */
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_READ, data_address, 2);
++		FPU_get_user(control_word,
++			     (unsigned short __user *)data_address);
++		RE_ENTRANT_CHECK_ON;
++		if (partial_status & ~control_word & CW_Exceptions)
++			partial_status |= (SW_Summary | SW_Backward);
++		else
++			partial_status &= ~(SW_Summary | SW_Backward);
+ #ifdef PECULIAR_486
+-      control_word |= 0x40;  /* An 80486 appears to always set this bit */
++		control_word |= 0x40;	/* An 80486 appears to always set this bit */
+ #endif /* PECULIAR_486 */
+-      return 1;
+-    case 025:      /* fld m80real */
+-      clear_C1();
+-      loaded_tag = FPU_load_extended((long double __user *)data_address, 0);
+-      FPU_settag0(loaded_tag);
+-      break;
+-    case 027:      /* fild m64int */
+-      clear_C1();
+-      loaded_tag = FPU_load_int64((long long __user *)data_address);
+-      if (loaded_tag == TAG_Error)
++		return 1;
++	case 025:		/* fld m80real */
++		clear_C1();
++		loaded_tag =
++		    FPU_load_extended((long double __user *)data_address, 0);
++		FPU_settag0(loaded_tag);
++		break;
++	case 027:		/* fild m64int */
++		clear_C1();
++		loaded_tag = FPU_load_int64((long long __user *)data_address);
++		if (loaded_tag == TAG_Error)
++			return 0;
++		FPU_settag0(loaded_tag);
++		break;
++	case 030:		/* fstenv  m14/28byte */
++		fstenv(addr_modes, (u_char __user *) data_address);
++		return 1;
++	case 032:		/* fsave */
++		fsave(addr_modes, (u_char __user *) data_address);
++		return 1;
++	case 033:		/* fbstp m80dec */
++		clear_C1();
++		if (FPU_store_bcd
++		    (st0_ptr, st0_tag, (u_char __user *) data_address))
++			pop_0();	/* pop only if the number was actually stored
++					   (see the 80486 manual p16-28) */
++		break;
++	case 034:		/* fstcw m16int */
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_WRITE, data_address, 2);
++		FPU_put_user(control_word,
++			     (unsigned short __user *)data_address);
++		RE_ENTRANT_CHECK_ON;
++		return 1;
++	case 035:		/* fstp m80real */
++		clear_C1();
++		if (FPU_store_extended
++		    (st0_ptr, st0_tag, (long double __user *)data_address))
++			pop_0();	/* pop only if the number was actually stored
++					   (see the 80486 manual p16-28) */
++		break;
++	case 036:		/* fstsw m2byte */
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_WRITE, data_address, 2);
++		FPU_put_user(status_word(),
++			     (unsigned short __user *)data_address);
++		RE_ENTRANT_CHECK_ON;
++		return 1;
++	case 037:		/* fistp m64int */
++		clear_C1();
++		if (FPU_store_int64
++		    (st0_ptr, st0_tag, (long long __user *)data_address))
++			pop_0();	/* pop only if the number was actually stored
++					   (see the 80486 manual p16-28) */
++		break;
 +	}
-+	if (!reliable)
-+		strcpy(reliab, "? ");
-+
-+	if (!modname)
-+		modname = delim = "";
-+	printk(" [<%08lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
-+		address, reliab, delim, modname, delim, symname, offset, symsize);
-+#else
-+	printk(" [<%08lx>]\n", address);
-+#endif
-+}
-+
- static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned size)
- {
- 	return	p > (void *)tinfo &&
-@@ -114,48 +143,35 @@ struct stack_frame {
- };
- 
- static inline unsigned long print_context_stack(struct thread_info *tinfo,
--				unsigned long *stack, unsigned long ebp,
-+				unsigned long *stack, unsigned long bp,
- 				const struct stacktrace_ops *ops, void *data)
- {
--#ifdef	CONFIG_FRAME_POINTER
--	struct stack_frame *frame = (struct stack_frame *)ebp;
--	while (valid_stack_ptr(tinfo, frame, sizeof(*frame))) {
--		struct stack_frame *next;
--		unsigned long addr;
-+	struct stack_frame *frame = (struct stack_frame *)bp;
- 
--		addr = frame->return_address;
--		ops->address(data, addr);
--		/*
--		 * break out of recursive entries (such as
--		 * end_of_stack_stop_unwind_function). Also,
--		 * we can never allow a frame pointer to
--		 * move downwards!
--		 */
--		next = frame->next_frame;
--		if (next <= frame)
--			break;
--		frame = next;
--	}
--#else
- 	while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
- 		unsigned long addr;
- 
--		addr = *stack++;
--		if (__kernel_text_address(addr))
--			ops->address(data, addr);
-+		addr = *stack;
-+		if (__kernel_text_address(addr)) {
-+			if ((unsigned long) stack == bp + 4) {
-+				ops->address(data, addr, 1);
-+				frame = frame->next_frame;
-+				bp = (unsigned long) frame;
-+			} else {
-+				ops->address(data, addr, bp == 0);
-+			}
-+		}
-+		stack++;
- 	}
--#endif
--	return ebp;
-+	return bp;
+ 	return 0;
+-      FPU_settag0(loaded_tag);
+-      break;
+-    case 030:     /* fstenv  m14/28byte */
+-      fstenv(addr_modes, (u_char __user *)data_address);
+-      return 1;
+-    case 032:      /* fsave */
+-      fsave(addr_modes, (u_char __user *)data_address);
+-      return 1;
+-    case 033:      /* fbstp m80dec */
+-      clear_C1();
+-      if ( FPU_store_bcd(st0_ptr, st0_tag, (u_char __user *)data_address) )
+-	pop_0();  /* pop only if the number was actually stored
+-		     (see the 80486 manual p16-28) */
+-      break;
+-    case 034:      /* fstcw m16int */
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_WRITE,data_address,2);
+-      FPU_put_user(control_word, (unsigned short __user *) data_address);
+-      RE_ENTRANT_CHECK_ON;
+-      return 1;
+-    case 035:      /* fstp m80real */
+-      clear_C1();
+-      if ( FPU_store_extended(st0_ptr, st0_tag, (long double __user *)data_address) )
+-	pop_0();  /* pop only if the number was actually stored
+-		     (see the 80486 manual p16-28) */
+-      break;
+-    case 036:      /* fstsw m2byte */
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_WRITE,data_address,2);
+-      FPU_put_user(status_word(),(unsigned short __user *) data_address);
+-      RE_ENTRANT_CHECK_ON;
+-      return 1;
+-    case 037:      /* fistp m64int */
+-      clear_C1();
+-      if ( FPU_store_int64(st0_ptr, st0_tag, (long long __user *)data_address) )
+-	pop_0();  /* pop only if the number was actually stored
+-		     (see the 80486 manual p16-28) */
+-      break;
+-    }
+-  return 0;
  }
+diff --git a/arch/x86/math-emu/poly.h b/arch/x86/math-emu/poly.h
+index 4db7981..168eb44 100644
+--- a/arch/x86/math-emu/poly.h
++++ b/arch/x86/math-emu/poly.h
+@@ -21,9 +21,9 @@
+    allows. 9-byte would probably be sufficient.
+    */
+ typedef struct {
+-  unsigned long lsw;
+-  unsigned long midw;
+-  unsigned long msw;
++	unsigned long lsw;
++	unsigned long midw;
++	unsigned long msw;
+ } Xsig;
  
- #define MSG(msg) ops->warning(data, msg)
+ asmlinkage void mul64(unsigned long long const *a, unsigned long long const *b,
+@@ -49,7 +49,6 @@ asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest);
+ /* Macro to access the 8 ms bytes of an Xsig as a long long */
+ #define XSIG_LL(x)         (*(unsigned long long *)&x.midw)
  
- void dump_trace(struct task_struct *task, struct pt_regs *regs,
--	        unsigned long *stack,
-+		unsigned long *stack, unsigned long bp,
- 		const struct stacktrace_ops *ops, void *data)
- {
--	unsigned long ebp = 0;
 -
- 	if (!task)
- 		task = current;
- 
-@@ -163,17 +179,17 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- 		unsigned long dummy;
- 		stack = &dummy;
- 		if (task != current)
--			stack = (unsigned long *)task->thread.esp;
-+			stack = (unsigned long *)task->thread.sp;
- 	}
- 
- #ifdef CONFIG_FRAME_POINTER
--	if (!ebp) {
-+	if (!bp) {
- 		if (task == current) {
--			/* Grab ebp right from our regs */
--			asm ("movl %%ebp, %0" : "=r" (ebp) : );
-+			/* Grab bp right from our regs */
-+			asm ("movl %%ebp, %0" : "=r" (bp) : );
- 		} else {
--			/* ebp is the last reg pushed by switch_to */
--			ebp = *(unsigned long *) task->thread.esp;
-+			/* bp is the last reg pushed by switch_to */
-+			bp = *(unsigned long *) task->thread.sp;
- 		}
- 	}
- #endif
-@@ -182,7 +198,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- 		struct thread_info *context;
- 		context = (struct thread_info *)
- 			((unsigned long)stack & (~(THREAD_SIZE - 1)));
--		ebp = print_context_stack(context, stack, ebp, ops, data);
-+		bp = print_context_stack(context, stack, bp, ops, data);
- 		/* Should be after the line below, but somewhere
- 		   in early boot context comes out corrupted and we
- 		   can't reference it -AK */
-@@ -217,9 +233,11 @@ static int print_trace_stack(void *data, char *name)
  /*
-  * Print one address/symbol entries per line.
-  */
--static void print_trace_address(void *data, unsigned long addr)
-+static void print_trace_address(void *data, unsigned long addr, int reliable)
- {
- 	printk("%s [<%08lx>] ", (char *)data, addr);
-+	if (!reliable)
-+		printk("? ");
- 	print_symbol("%s\n", addr);
- 	touch_nmi_watchdog();
- }
-@@ -233,32 +251,32 @@ static const struct stacktrace_ops print_trace_ops = {
- 
- static void
- show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
--		   unsigned long * stack, char *log_lvl)
-+		unsigned long *stack, unsigned long bp, char *log_lvl)
- {
--	dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
-+	dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
- 	printk("%s =======================\n", log_lvl);
- }
- 
- void show_trace(struct task_struct *task, struct pt_regs *regs,
--		unsigned long * stack)
-+		unsigned long *stack, unsigned long bp)
+    Need to run gcc with optimizations on to get these to
+    actually be in-line.
+@@ -63,59 +62,53 @@ asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest);
+ static inline unsigned long mul_32_32(const unsigned long arg1,
+ 				      const unsigned long arg2)
  {
--	show_trace_log_lvl(task, regs, stack, "");
-+	show_trace_log_lvl(task, regs, stack, bp, "");
+-  int retval;
+-  asm volatile ("mull %2; movl %%edx,%%eax" \
+-		:"=a" (retval) \
+-		:"0" (arg1), "g" (arg2) \
+-		:"dx");
+-  return retval;
++	int retval;
++	asm volatile ("mull %2; movl %%edx,%%eax":"=a" (retval)
++		      :"0"(arg1), "g"(arg2)
++		      :"dx");
++	return retval;
  }
  
- static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
--			       unsigned long *esp, char *log_lvl)
-+		       unsigned long *sp, unsigned long bp, char *log_lvl)
+-
+ /* Add the 12 byte Xsig x2 to Xsig dest, with no checks for overflow. */
+ static inline void add_Xsig_Xsig(Xsig *dest, const Xsig *x2)
  {
- 	unsigned long *stack;
- 	int i;
- 
--	if (esp == NULL) {
-+	if (sp == NULL) {
- 		if (task)
--			esp = (unsigned long*)task->thread.esp;
-+			sp = (unsigned long*)task->thread.sp;
- 		else
--			esp = (unsigned long *)&esp;
-+			sp = (unsigned long *)&sp;
- 	}
- 
--	stack = esp;
-+	stack = sp;
- 	for(i = 0; i < kstack_depth_to_print; i++) {
- 		if (kstack_end(stack))
- 			break;
-@@ -267,13 +285,13 @@ static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- 		printk("%08lx ", *stack++);
- 	}
- 	printk("\n%sCall Trace:\n", log_lvl);
--	show_trace_log_lvl(task, regs, esp, log_lvl);
-+	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
+-  asm volatile ("movl %1,%%edi; movl %2,%%esi;\n"
+-                "movl (%%esi),%%eax; addl %%eax,(%%edi);\n"
+-                "movl 4(%%esi),%%eax; adcl %%eax,4(%%edi);\n"
+-                "movl 8(%%esi),%%eax; adcl %%eax,8(%%edi);\n"
+-                 :"=g" (*dest):"g" (dest), "g" (x2)
+-                 :"ax","si","di");
++	asm volatile ("movl %1,%%edi; movl %2,%%esi;\n"
++		      "movl (%%esi),%%eax; addl %%eax,(%%edi);\n"
++		      "movl 4(%%esi),%%eax; adcl %%eax,4(%%edi);\n"
++		      "movl 8(%%esi),%%eax; adcl %%eax,8(%%edi);\n":"=g"
++		      (*dest):"g"(dest), "g"(x2)
++		      :"ax", "si", "di");
  }
  
--void show_stack(struct task_struct *task, unsigned long *esp)
-+void show_stack(struct task_struct *task, unsigned long *sp)
+-
+ /* Add the 12 byte Xsig x2 to Xsig dest, adjust exp if overflow occurs. */
+ /* Note: the constraints in the asm statement didn't always work properly
+    with gcc 2.5.8.  Changing from using edi to using ecx got around the
+    problem, but keep fingers crossed! */
+ static inline void add_two_Xsig(Xsig *dest, const Xsig *x2, long int *exp)
  {
- 	printk("       ");
--	show_stack_log_lvl(task, NULL, esp, "");
-+	show_stack_log_lvl(task, NULL, sp, 0, "");
+-  asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n"
+-                "movl (%%esi),%%eax; addl %%eax,(%%ecx);\n"
+-                "movl 4(%%esi),%%eax; adcl %%eax,4(%%ecx);\n"
+-                "movl 8(%%esi),%%eax; adcl %%eax,8(%%ecx);\n"
+-                "jnc 0f;\n"
+-		"rcrl 8(%%ecx); rcrl 4(%%ecx); rcrl (%%ecx)\n"
+-                "movl %4,%%ecx; incl (%%ecx)\n"
+-                "movl $1,%%eax; jmp 1f;\n"
+-                "0: xorl %%eax,%%eax;\n"
+-                "1:\n"
+-		:"=g" (*exp), "=g" (*dest)
+-		:"g" (dest), "g" (x2), "g" (exp)
+-		:"cx","si","ax");
++	asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n"
++		      "movl (%%esi),%%eax; addl %%eax,(%%ecx);\n"
++		      "movl 4(%%esi),%%eax; adcl %%eax,4(%%ecx);\n"
++		      "movl 8(%%esi),%%eax; adcl %%eax,8(%%ecx);\n"
++		      "jnc 0f;\n"
++		      "rcrl 8(%%ecx); rcrl 4(%%ecx); rcrl (%%ecx)\n"
++		      "movl %4,%%ecx; incl (%%ecx)\n"
++		      "movl $1,%%eax; jmp 1f;\n"
++		      "0: xorl %%eax,%%eax;\n" "1:\n":"=g" (*exp), "=g"(*dest)
++		      :"g"(dest), "g"(x2), "g"(exp)
++		      :"cx", "si", "ax");
  }
  
- /*
-@@ -282,13 +300,19 @@ void show_stack(struct task_struct *task, unsigned long *esp)
- void dump_stack(void)
+-
+ /* Negate (subtract from 1.0) the 12 byte Xsig */
+ /* This is faster in a loop on my 386 than using the "neg" instruction. */
+ static inline void negate_Xsig(Xsig *x)
  {
- 	unsigned long stack;
-+	unsigned long bp = 0;
-+
-+#ifdef CONFIG_FRAME_POINTER
-+	if (!bp)
-+		asm("movl %%ebp, %0" : "=r" (bp):);
-+#endif
- 
- 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
- 		current->pid, current->comm, print_tainted(),
- 		init_utsname()->release,
- 		(int)strcspn(init_utsname()->version, " "),
- 		init_utsname()->version);
--	show_trace(current, NULL, &stack);
-+	show_trace(current, NULL, &stack, bp);
+-  asm volatile("movl %1,%%esi;\n"
+-               "xorl %%ecx,%%ecx;\n"
+-               "movl %%ecx,%%eax; subl (%%esi),%%eax; movl %%eax,(%%esi);\n"
+-               "movl %%ecx,%%eax; sbbl 4(%%esi),%%eax; movl %%eax,4(%%esi);\n"
+-               "movl %%ecx,%%eax; sbbl 8(%%esi),%%eax; movl %%eax,8(%%esi);\n"
+-               :"=g" (*x):"g" (x):"si","ax","cx");
++	asm volatile ("movl %1,%%esi;\n"
++		      "xorl %%ecx,%%ecx;\n"
++		      "movl %%ecx,%%eax; subl (%%esi),%%eax; movl %%eax,(%%esi);\n"
++		      "movl %%ecx,%%eax; sbbl 4(%%esi),%%eax; movl %%eax,4(%%esi);\n"
++		      "movl %%ecx,%%eax; sbbl 8(%%esi),%%eax; movl %%eax,8(%%esi);\n":"=g"
++		      (*x):"g"(x):"si", "ax", "cx");
  }
  
- EXPORT_SYMBOL(dump_stack);
-@@ -307,30 +331,30 @@ void show_registers(struct pt_regs *regs)
- 	 * time of the fault..
- 	 */
- 	if (!user_mode_vm(regs)) {
--		u8 *eip;
-+		u8 *ip;
- 		unsigned int code_prologue = code_bytes * 43 / 64;
- 		unsigned int code_len = code_bytes;
- 		unsigned char c;
+ #endif /* _POLY_H */
+diff --git a/arch/x86/math-emu/poly_2xm1.c b/arch/x86/math-emu/poly_2xm1.c
+index 9766ad5..b00e9e1 100644
+--- a/arch/x86/math-emu/poly_2xm1.c
++++ b/arch/x86/math-emu/poly_2xm1.c
+@@ -17,21 +17,19 @@
+ #include "control_w.h"
+ #include "poly.h"
  
- 		printk("\n" KERN_EMERG "Stack: ");
--		show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG);
-+		show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
+-
+ #define	HIPOWER	11
+-static const unsigned long long lterms[HIPOWER] =
+-{
+-  0x0000000000000000LL,  /* This term done separately as 12 bytes */
+-  0xf5fdeffc162c7543LL,
+-  0x1c6b08d704a0bfa6LL,
+-  0x0276556df749cc21LL,
+-  0x002bb0ffcf14f6b8LL,
+-  0x0002861225ef751cLL,
+-  0x00001ffcbfcd5422LL,
+-  0x00000162c005d5f1LL,
+-  0x0000000da96ccb1bLL,
+-  0x0000000078d1b897LL,
+-  0x000000000422b029LL
++static const unsigned long long lterms[HIPOWER] = {
++	0x0000000000000000LL,	/* This term done separately as 12 bytes */
++	0xf5fdeffc162c7543LL,
++	0x1c6b08d704a0bfa6LL,
++	0x0276556df749cc21LL,
++	0x002bb0ffcf14f6b8LL,
++	0x0002861225ef751cLL,
++	0x00001ffcbfcd5422LL,
++	0x00000162c005d5f1LL,
++	0x0000000da96ccb1bLL,
++	0x0000000078d1b897LL,
++	0x000000000422b029LL
+ };
  
- 		printk(KERN_EMERG "Code: ");
+ static const Xsig hiterm = MK_XSIG(0xb17217f7, 0xd1cf79ab, 0xc8a39194);
+@@ -45,112 +43,103 @@ static const Xsig shiftterm2 = MK_XSIG(0xb504f333, 0xf9de6484, 0x597d89b3);
+ static const Xsig shiftterm3 = MK_XSIG(0xd744fcca, 0xd69d6af4, 0x39a68bb9);
  
--		eip = (u8 *)regs->eip - code_prologue;
--		if (eip < (u8 *)PAGE_OFFSET ||
--			probe_kernel_address(eip, c)) {
-+		ip = (u8 *)regs->ip - code_prologue;
-+		if (ip < (u8 *)PAGE_OFFSET ||
-+			probe_kernel_address(ip, c)) {
- 			/* try starting at EIP */
--			eip = (u8 *)regs->eip;
-+			ip = (u8 *)regs->ip;
- 			code_len = code_len - code_prologue + 1;
- 		}
--		for (i = 0; i < code_len; i++, eip++) {
--			if (eip < (u8 *)PAGE_OFFSET ||
--				probe_kernel_address(eip, c)) {
-+		for (i = 0; i < code_len; i++, ip++) {
-+			if (ip < (u8 *)PAGE_OFFSET ||
-+				probe_kernel_address(ip, c)) {
- 				printk(" Bad EIP value.");
- 				break;
- 			}
--			if (eip == (u8 *)regs->eip)
-+			if (ip == (u8 *)regs->ip)
- 				printk("<%02x> ", c);
- 			else
- 				printk("%02x ", c);
-@@ -339,18 +363,57 @@ void show_registers(struct pt_regs *regs)
- 	printk("\n");
- }	
+ static const Xsig *shiftterm[] = { &shiftterm0, &shiftterm1,
+-				     &shiftterm2, &shiftterm3 };
+-
++	&shiftterm2, &shiftterm3
++};
  
--int is_valid_bugaddr(unsigned long eip)
-+int is_valid_bugaddr(unsigned long ip)
+ /*--- poly_2xm1() -----------------------------------------------------------+
+  | Requires st(0) which is TAG_Valid and < 1.                                |
+  +---------------------------------------------------------------------------*/
+-int	poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result)
++int poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result)
  {
- 	unsigned short ud2;
- 
--	if (eip < PAGE_OFFSET)
-+	if (ip < PAGE_OFFSET)
- 		return 0;
--	if (probe_kernel_address((unsigned short *)eip, ud2))
-+	if (probe_kernel_address((unsigned short *)ip, ud2))
- 		return 0;
+-  long int              exponent, shift;
+-  unsigned long long    Xll;
+-  Xsig                  accumulator, Denom, argSignif;
+-  u_char                tag;
++	long int exponent, shift;
++	unsigned long long Xll;
++	Xsig accumulator, Denom, argSignif;
++	u_char tag;
  
- 	return ud2 == 0x0b0f;
- }
+-  exponent = exponent16(arg);
++	exponent = exponent16(arg);
  
-+static int die_counter;
-+
-+int __kprobes __die(const char * str, struct pt_regs * regs, long err)
-+{
-+	unsigned long sp;
-+	unsigned short ss;
-+
-+	printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
-+#ifdef CONFIG_PREEMPT
-+	printk("PREEMPT ");
-+#endif
-+#ifdef CONFIG_SMP
-+	printk("SMP ");
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+	printk("DEBUG_PAGEALLOC");
-+#endif
-+	printk("\n");
-+
-+	if (notify_die(DIE_OOPS, str, regs, err,
-+				current->thread.trap_no, SIGSEGV) !=
-+			NOTIFY_STOP) {
-+		show_registers(regs);
-+		/* Executive summary in case the oops scrolled away */
-+		sp = (unsigned long) (&regs->sp);
-+		savesegment(ss, ss);
-+		if (user_mode(regs)) {
-+			sp = regs->sp;
-+			ss = regs->ss & 0xffff;
-+		}
-+		printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
-+		print_symbol("%s", regs->ip);
-+		printk(" SS:ESP %04x:%08lx\n", ss, sp);
-+		return 0;
-+	} else {
+ #ifdef PARANOID
+-  if ( exponent >= 0 )    	/* Don't want a |number| >= 1.0 */
+-    {
+-      /* Number negative, too large, or not Valid. */
+-      EXCEPTION(EX_INTERNAL|0x127);
+-      return 1;
+-    }
++	if (exponent >= 0) {	/* Don't want a |number| >= 1.0 */
++		/* Number negative, too large, or not Valid. */
++		EXCEPTION(EX_INTERNAL | 0x127);
 +		return 1;
 +	}
-+}
-+
- /*
-  * This is gone through when something in the kernel has done something bad and
-  * is about to be terminated.
-@@ -366,7 +429,6 @@ void die(const char * str, struct pt_regs * regs, long err)
- 		.lock_owner =		-1,
- 		.lock_owner_depth =	0
- 	};
--	static int die_counter;
- 	unsigned long flags;
- 
- 	oops_enter();
-@@ -382,43 +444,13 @@ void die(const char * str, struct pt_regs * regs, long err)
- 		raw_local_irq_save(flags);
- 
- 	if (++die.lock_owner_depth < 3) {
--		unsigned long esp;
--		unsigned short ss;
-+		report_bug(regs->ip, regs);
+ #endif /* PARANOID */
  
--		report_bug(regs->eip, regs);
+-  argSignif.lsw = 0;
+-  XSIG_LL(argSignif) = Xll = significand(arg);
 -
--		printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff,
--		       ++die_counter);
--#ifdef CONFIG_PREEMPT
--		printk("PREEMPT ");
--#endif
--#ifdef CONFIG_SMP
--		printk("SMP ");
--#endif
--#ifdef CONFIG_DEBUG_PAGEALLOC
--		printk("DEBUG_PAGEALLOC");
--#endif
--		printk("\n");
+-  if ( exponent == -1 )
+-    {
+-      shift = (argSignif.msw & 0x40000000) ? 3 : 2;
+-      /* subtract 0.5 or 0.75 */
+-      exponent -= 2;
+-      XSIG_LL(argSignif) <<= 2;
+-      Xll <<= 2;
+-    }
+-  else if ( exponent == -2 )
+-    {
+-      shift = 1;
+-      /* subtract 0.25 */
+-      exponent--;
+-      XSIG_LL(argSignif) <<= 1;
+-      Xll <<= 1;
+-    }
+-  else
+-    shift = 0;
 -
--		if (notify_die(DIE_OOPS, str, regs, err,
--					current->thread.trap_no, SIGSEGV) !=
--				NOTIFY_STOP) {
--			show_registers(regs);
--			/* Executive summary in case the oops scrolled away */
--			esp = (unsigned long) (&regs->esp);
--			savesegment(ss, ss);
--			if (user_mode(regs)) {
--				esp = regs->esp;
--				ss = regs->xss & 0xffff;
--			}
--			printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
--			print_symbol("%s", regs->eip);
--			printk(" SS:ESP %04x:%08lx\n", ss, esp);
--		}
--		else
-+		if (__die(str, regs, err))
- 			regs = NULL;
--  	} else
-+	} else {
- 		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
+-  if ( exponent < -2 )
+-    {
+-      /* Shift the argument right by the required places. */
+-      if ( FPU_shrx(&Xll, -2-exponent) >= 0x80000000U )
+-	Xll++;	/* round up */
+-    }
+-
+-  accumulator.lsw = accumulator.midw = accumulator.msw = 0;
+-  polynomial_Xsig(&accumulator, &Xll, lterms, HIPOWER-1);
+-  mul_Xsig_Xsig(&accumulator, &argSignif);
+-  shr_Xsig(&accumulator, 3);
+-
+-  mul_Xsig_Xsig(&argSignif, &hiterm);   /* The leading term */
+-  add_two_Xsig(&accumulator, &argSignif, &exponent);
+-
+-  if ( shift )
+-    {
+-      /* The argument is large, use the identity:
+-	 f(x+a) = f(a) * (f(x) + 1) - 1;
+-	 */
+-      shr_Xsig(&accumulator, - exponent);
+-      accumulator.msw |= 0x80000000;      /* add 1.0 */
+-      mul_Xsig_Xsig(&accumulator, shiftterm[shift]);
+-      accumulator.msw &= 0x3fffffff;      /* subtract 1.0 */
+-      exponent = 1;
+-    }
+-
+-  if ( sign != SIGN_POS )
+-    {
+-      /* The argument is negative, use the identity:
+-	     f(-x) = -f(x) / (1 + f(x))
+-	 */
+-      Denom.lsw = accumulator.lsw;
+-      XSIG_LL(Denom) = XSIG_LL(accumulator);
+-      if ( exponent < 0 )
+-	shr_Xsig(&Denom, - exponent);
+-      else if ( exponent > 0 )
+-	{
+-	  /* exponent must be 1 here */
+-	  XSIG_LL(Denom) <<= 1;
+-	  if ( Denom.lsw & 0x80000000 )
+-	    XSIG_LL(Denom) |= 1;
+-	  (Denom.lsw) <<= 1;
++	argSignif.lsw = 0;
++	XSIG_LL(argSignif) = Xll = significand(arg);
++
++	if (exponent == -1) {
++		shift = (argSignif.msw & 0x40000000) ? 3 : 2;
++		/* subtract 0.5 or 0.75 */
++		exponent -= 2;
++		XSIG_LL(argSignif) <<= 2;
++		Xll <<= 2;
++	} else if (exponent == -2) {
++		shift = 1;
++		/* subtract 0.25 */
++		exponent--;
++		XSIG_LL(argSignif) <<= 1;
++		Xll <<= 1;
++	} else
++		shift = 0;
++
++	if (exponent < -2) {
++		/* Shift the argument right by the required places. */
++		if (FPU_shrx(&Xll, -2 - exponent) >= 0x80000000U)
++			Xll++;	/* round up */
 +	}
- 
- 	bust_spinlocks(0);
- 	die.lock_owner = -1;
-@@ -454,7 +486,7 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
- {
- 	struct task_struct *tsk = current;
- 
--	if (regs->eflags & VM_MASK) {
-+	if (regs->flags & VM_MASK) {
- 		if (vm86)
- 			goto vm86_trap;
- 		goto trap_signal;
-@@ -500,7 +532,7 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
- }
- 
- #define DO_ERROR(trapnr, signr, str, name) \
--fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+void do_##name(struct pt_regs * regs, long error_code) \
- { \
- 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
- 						== NOTIFY_STOP) \
-@@ -509,7 +541,7 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
- }
- 
- #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
--fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+void do_##name(struct pt_regs * regs, long error_code) \
- { \
- 	siginfo_t info; \
- 	if (irq) \
-@@ -525,7 +557,7 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
- }
- 
- #define DO_VM86_ERROR(trapnr, signr, str, name) \
--fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+void do_##name(struct pt_regs * regs, long error_code) \
- { \
- 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
- 						== NOTIFY_STOP) \
-@@ -534,7 +566,7 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
- }
- 
- #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
--fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+void do_##name(struct pt_regs * regs, long error_code) \
- { \
- 	siginfo_t info; \
- 	info.si_signo = signr; \
-@@ -548,13 +580,13 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
- 	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
- }
- 
--DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
-+DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->ip)
- #ifndef CONFIG_KPROBES
- DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
- #endif
- DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
- DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
--DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0)
-+DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0)
- DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
- DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
- DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
-@@ -562,7 +594,7 @@ DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
- DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
- DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
- 
--fastcall void __kprobes do_general_protection(struct pt_regs * regs,
-+void __kprobes do_general_protection(struct pt_regs * regs,
- 					      long error_code)
- {
- 	int cpu = get_cpu();
-@@ -596,7 +628,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
- 	}
- 	put_cpu();
- 
--	if (regs->eflags & VM_MASK)
-+	if (regs->flags & VM_MASK)
- 		goto gp_in_vm86;
- 
- 	if (!user_mode(regs))
-@@ -605,11 +637,14 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
- 	current->thread.error_code = error_code;
- 	current->thread.trap_no = 13;
- 	if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&
--	    printk_ratelimit())
-+	    printk_ratelimit()) {
- 		printk(KERN_INFO
--		    "%s[%d] general protection eip:%lx esp:%lx error:%lx\n",
-+		    "%s[%d] general protection ip:%lx sp:%lx error:%lx",
- 		    current->comm, task_pid_nr(current),
--		    regs->eip, regs->esp, error_code);
-+		    regs->ip, regs->sp, error_code);
-+		print_vma_addr(" in ", regs->ip);
-+		printk("\n");
++
++	accumulator.lsw = accumulator.midw = accumulator.msw = 0;
++	polynomial_Xsig(&accumulator, &Xll, lterms, HIPOWER - 1);
++	mul_Xsig_Xsig(&accumulator, &argSignif);
++	shr_Xsig(&accumulator, 3);
++
++	mul_Xsig_Xsig(&argSignif, &hiterm);	/* The leading term */
++	add_two_Xsig(&accumulator, &argSignif, &exponent);
++
++	if (shift) {
++		/* The argument is large, use the identity:
++		   f(x+a) = f(a) * (f(x) + 1) - 1;
++		 */
++		shr_Xsig(&accumulator, -exponent);
++		accumulator.msw |= 0x80000000;	/* add 1.0 */
++		mul_Xsig_Xsig(&accumulator, shiftterm[shift]);
++		accumulator.msw &= 0x3fffffff;	/* subtract 1.0 */
++		exponent = 1;
 +	}
- 
- 	force_sig(SIGSEGV, current);
- 	return;
-@@ -705,8 +740,8 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
- 	*/
- 	bust_spinlocks(1);
- 	printk(KERN_EMERG "%s", msg);
--	printk(" on CPU%d, eip %08lx, registers:\n",
--		smp_processor_id(), regs->eip);
-+	printk(" on CPU%d, ip %08lx, registers:\n",
-+		smp_processor_id(), regs->ip);
- 	show_registers(regs);
- 	console_silent();
- 	spin_unlock(&nmi_print_lock);
-@@ -763,7 +798,7 @@ static __kprobes void default_do_nmi(struct pt_regs * regs)
- 
- static int ignore_nmis;
- 
--fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
-+__kprobes void do_nmi(struct pt_regs * regs, long error_code)
- {
- 	int cpu;
- 
-@@ -792,7 +827,7 @@ void restart_nmi(void)
- }
- 
- #ifdef CONFIG_KPROBES
--fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
-+void __kprobes do_int3(struct pt_regs *regs, long error_code)
- {
- 	trace_hardirqs_fixup();
- 
-@@ -828,7 +863,7 @@ fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
-  * find every occurrence of the TF bit that could be saved away even
-  * by user code)
-  */
--fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
-+void __kprobes do_debug(struct pt_regs * regs, long error_code)
- {
- 	unsigned int condition;
- 	struct task_struct *tsk = current;
-@@ -837,24 +872,30 @@ fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
- 
- 	get_debugreg(condition, 6);
- 
-+	/*
-+	 * The processor cleared BTF, so don't mark that we need it set.
-+	 */
-+	clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
-+	tsk->thread.debugctlmsr = 0;
 +
- 	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
- 					SIGTRAP) == NOTIFY_STOP)
- 		return;
- 	/* It's safe to allow irq's after DR6 has been saved */
--	if (regs->eflags & X86_EFLAGS_IF)
-+	if (regs->flags & X86_EFLAGS_IF)
- 		local_irq_enable();
- 
- 	/* Mask out spurious debug traps due to lazy DR7 setting */
- 	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
--		if (!tsk->thread.debugreg[7])
-+		if (!tsk->thread.debugreg7)
- 			goto clear_dr7;
++	if (sign != SIGN_POS) {
++		/* The argument is negative, use the identity:
++		   f(-x) = -f(x) / (1 + f(x))
++		 */
++		Denom.lsw = accumulator.lsw;
++		XSIG_LL(Denom) = XSIG_LL(accumulator);
++		if (exponent < 0)
++			shr_Xsig(&Denom, -exponent);
++		else if (exponent > 0) {
++			/* exponent must be 1 here */
++			XSIG_LL(Denom) <<= 1;
++			if (Denom.lsw & 0x80000000)
++				XSIG_LL(Denom) |= 1;
++			(Denom.lsw) <<= 1;
++		}
++		Denom.msw |= 0x80000000;	/* add 1.0 */
++		div_Xsig(&accumulator, &Denom, &accumulator);
  	}
+-      Denom.msw |= 0x80000000;      /* add 1.0 */
+-      div_Xsig(&accumulator, &Denom, &accumulator);
+-    }
  
--	if (regs->eflags & VM_MASK)
-+	if (regs->flags & VM_MASK)
- 		goto debug_vm86;
- 
- 	/* Save debug status register where ptrace can see it */
--	tsk->thread.debugreg[6] = condition;
-+	tsk->thread.debugreg6 = condition;
+-  /* Convert to 64 bit signed-compatible */
+-  exponent += round_Xsig(&accumulator);
++	/* Convert to 64 bit signed-compatible */
++	exponent += round_Xsig(&accumulator);
  
- 	/*
- 	 * Single-stepping through TF: make sure we ignore any events in
-@@ -886,7 +927,7 @@ debug_vm86:
+-  result = &st(0);
+-  significand(result) = XSIG_LL(accumulator);
+-  setexponent16(result, exponent);
++	result = &st(0);
++	significand(result) = XSIG_LL(accumulator);
++	setexponent16(result, exponent);
  
- clear_TF_reenable:
- 	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
--	regs->eflags &= ~TF_MASK;
-+	regs->flags &= ~TF_MASK;
- 	return;
- }
+-  tag = FPU_round(result, 1, 0, FULL_PRECISION, sign);
++	tag = FPU_round(result, 1, 0, FULL_PRECISION, sign);
  
-@@ -895,7 +936,7 @@ clear_TF_reenable:
-  * the correct behaviour even in the presence of the asynchronous
-  * IRQ13 behaviour
-  */
--void math_error(void __user *eip)
-+void math_error(void __user *ip)
- {
- 	struct task_struct * task;
- 	siginfo_t info;
-@@ -911,7 +952,7 @@ void math_error(void __user *eip)
- 	info.si_signo = SIGFPE;
- 	info.si_errno = 0;
- 	info.si_code = __SI_FAULT;
--	info.si_addr = eip;
-+	info.si_addr = ip;
- 	/*
- 	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
- 	 * status.  0x3f is the exception bits in these regs, 0x200 is the
-@@ -954,13 +995,13 @@ void math_error(void __user *eip)
- 	force_sig_info(SIGFPE, &info, task);
- }
+-  setsign(result, sign);
+-  FPU_settag0(tag);
++	setsign(result, sign);
++	FPU_settag0(tag);
  
--fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
-+void do_coprocessor_error(struct pt_regs * regs, long error_code)
- {
- 	ignore_fpu_irq = 1;
--	math_error((void __user *)regs->eip);
-+	math_error((void __user *)regs->ip);
- }
+-  return 0;
++	return 0;
  
--static void simd_math_error(void __user *eip)
-+static void simd_math_error(void __user *ip)
- {
- 	struct task_struct * task;
- 	siginfo_t info;
-@@ -976,7 +1017,7 @@ static void simd_math_error(void __user *eip)
- 	info.si_signo = SIGFPE;
- 	info.si_errno = 0;
- 	info.si_code = __SI_FAULT;
--	info.si_addr = eip;
-+	info.si_addr = ip;
- 	/*
- 	 * The SIMD FPU exceptions are handled a little differently, as there
- 	 * is only a single status/control register.  Thus, to determine which
-@@ -1008,19 +1049,19 @@ static void simd_math_error(void __user *eip)
- 	force_sig_info(SIGFPE, &info, task);
  }
+diff --git a/arch/x86/math-emu/poly_atan.c b/arch/x86/math-emu/poly_atan.c
+index 82f7029..20c28e5 100644
+--- a/arch/x86/math-emu/poly_atan.c
++++ b/arch/x86/math-emu/poly_atan.c
+@@ -18,28 +18,25 @@
+ #include "control_w.h"
+ #include "poly.h"
  
--fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
-+void do_simd_coprocessor_error(struct pt_regs * regs,
- 					  long error_code)
- {
- 	if (cpu_has_xmm) {
- 		/* Handle SIMD FPU exceptions on PIII+ processors. */
- 		ignore_fpu_irq = 1;
--		simd_math_error((void __user *)regs->eip);
-+		simd_math_error((void __user *)regs->ip);
- 	} else {
- 		/*
- 		 * Handle strange cache flush from user space exception
- 		 * in all other cases.  This is undocumented behaviour.
- 		 */
--		if (regs->eflags & VM_MASK) {
-+		if (regs->flags & VM_MASK) {
- 			handle_vm86_fault((struct kernel_vm86_regs *)regs,
- 					  error_code);
- 			return;
-@@ -1032,7 +1073,7 @@ fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
- 	}
- }
+-
+ #define	HIPOWERon	6	/* odd poly, negative terms */
+-static const unsigned long long oddnegterms[HIPOWERon] =
+-{
+-  0x0000000000000000LL, /* Dummy (not for - 1.0) */
+-  0x015328437f756467LL,
+-  0x0005dda27b73dec6LL,
+-  0x0000226bf2bfb91aLL,
+-  0x000000ccc439c5f7LL,
+-  0x0000000355438407LL
+-} ;
++static const unsigned long long oddnegterms[HIPOWERon] = {
++	0x0000000000000000LL,	/* Dummy (not for - 1.0) */
++	0x015328437f756467LL,
++	0x0005dda27b73dec6LL,
++	0x0000226bf2bfb91aLL,
++	0x000000ccc439c5f7LL,
++	0x0000000355438407LL
++};
  
--fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
-+void do_spurious_interrupt_bug(struct pt_regs * regs,
- 					  long error_code)
- {
- #if 0
-@@ -1041,7 +1082,7 @@ fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
- #endif
- }
+ #define	HIPOWERop	6	/* odd poly, positive terms */
+-static const unsigned long long oddplterms[HIPOWERop] =
+-{
++static const unsigned long long oddplterms[HIPOWERop] = {
+ /*  0xaaaaaaaaaaaaaaabLL,  transferred to fixedpterm[] */
+-  0x0db55a71875c9ac2LL,
+-  0x0029fce2d67880b0LL,
+-  0x0000dfd3908b4596LL,
+-  0x00000550fd61dab4LL,
+-  0x0000001c9422b3f9LL,
+-  0x000000003e3301e1LL
++	0x0db55a71875c9ac2LL,
++	0x0029fce2d67880b0LL,
++	0x0000dfd3908b4596LL,
++	0x00000550fd61dab4LL,
++	0x0000001c9422b3f9LL,
++	0x000000003e3301e1LL
+ };
  
--fastcall unsigned long patch_espfix_desc(unsigned long uesp,
-+unsigned long patch_espfix_desc(unsigned long uesp,
- 					  unsigned long kesp)
- {
- 	struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
-@@ -1095,51 +1136,17 @@ asmlinkage void math_emulate(long arg)
+ static const unsigned long long denomterm = 0xebd9b842c5c53a0eLL;
+@@ -48,182 +45,164 @@ static const Xsig fixedpterm = MK_XSIG(0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa);
  
- #endif /* CONFIG_MATH_EMULATION */
+ static const Xsig pi_signif = MK_XSIG(0xc90fdaa2, 0x2168c234, 0xc4c6628b);
  
--/*
-- * This needs to use 'idt_table' rather than 'idt', and
-- * thus use the _nonmapped_ version of the IDT, as the
-- * Pentium F0 0F bugfix can have resulted in the mapped
-- * IDT being write-protected.
-- */
--void set_intr_gate(unsigned int n, void *addr)
--{
--	_set_gate(n, DESCTYPE_INT, addr, __KERNEL_CS);
--}
--
--/*
-- * This routine sets up an interrupt gate at directory privilege level 3.
-- */
--static inline void set_system_intr_gate(unsigned int n, void *addr)
--{
--	_set_gate(n, DESCTYPE_INT | DESCTYPE_DPL3, addr, __KERNEL_CS);
--}
 -
--static void __init set_trap_gate(unsigned int n, void *addr)
--{
--	_set_gate(n, DESCTYPE_TRAP, addr, __KERNEL_CS);
--}
+ /*--- poly_atan() -----------------------------------------------------------+
+  |                                                                           |
+  +---------------------------------------------------------------------------*/
+-void	poly_atan(FPU_REG *st0_ptr, u_char st0_tag,
+-		  FPU_REG *st1_ptr, u_char st1_tag)
++void poly_atan(FPU_REG *st0_ptr, u_char st0_tag,
++	       FPU_REG *st1_ptr, u_char st1_tag)
+ {
+-  u_char	transformed, inverted,
+-                sign1, sign2;
+-  int           exponent;
+-  long int   	dummy_exp;
+-  Xsig          accumulator, Numer, Denom, accumulatore, argSignif,
+-                argSq, argSqSq;
+-  u_char        tag;
+-  
+-  sign1 = getsign(st0_ptr);
+-  sign2 = getsign(st1_ptr);
+-  if ( st0_tag == TAG_Valid )
+-    {
+-      exponent = exponent(st0_ptr);
+-    }
+-  else
+-    {
+-      /* This gives non-compatible stack contents... */
+-      FPU_to_exp16(st0_ptr, st0_ptr);
+-      exponent = exponent16(st0_ptr);
+-    }
+-  if ( st1_tag == TAG_Valid )
+-    {
+-      exponent -= exponent(st1_ptr);
+-    }
+-  else
+-    {
+-      /* This gives non-compatible stack contents... */
+-      FPU_to_exp16(st1_ptr, st1_ptr);
+-      exponent -= exponent16(st1_ptr);
+-    }
 -
--static void __init set_system_gate(unsigned int n, void *addr)
--{
--	_set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, addr, __KERNEL_CS);
--}
+-  if ( (exponent < 0) || ((exponent == 0) &&
+-			  ((st0_ptr->sigh < st1_ptr->sigh) ||
+-			   ((st0_ptr->sigh == st1_ptr->sigh) &&
+-			    (st0_ptr->sigl < st1_ptr->sigl))) ) )
+-    {
+-      inverted = 1;
+-      Numer.lsw = Denom.lsw = 0;
+-      XSIG_LL(Numer) = significand(st0_ptr);
+-      XSIG_LL(Denom) = significand(st1_ptr);
+-    }
+-  else
+-    {
+-      inverted = 0;
+-      exponent = -exponent;
+-      Numer.lsw = Denom.lsw = 0;
+-      XSIG_LL(Numer) = significand(st1_ptr);
+-      XSIG_LL(Denom) = significand(st0_ptr);
+-     }
+-  div_Xsig(&Numer, &Denom, &argSignif);
+-  exponent += norm_Xsig(&argSignif);
 -
--static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
--{
--	_set_gate(n, DESCTYPE_TASK, (void *)0, (gdt_entry<<3));
--}
+-  if ( (exponent >= -1)
+-      || ((exponent == -2) && (argSignif.msw > 0xd413ccd0)) )
+-    {
+-      /* The argument is greater than sqrt(2)-1 (=0.414213562...) */
+-      /* Convert the argument by an identity for atan */
+-      transformed = 1;
 -
- 
- void __init trap_init(void)
- {
- 	int i;
- 
- #ifdef CONFIG_EISA
--	void __iomem *p = ioremap(0x0FFFD9, 4);
-+	void __iomem *p = early_ioremap(0x0FFFD9, 4);
- 	if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
- 		EISA_bus = 1;
- 	}
--	iounmap(p);
-+	early_iounmap(p, 4);
- #endif
- 
- #ifdef CONFIG_X86_LOCAL_APIC
-diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
-index cc68b92..efc66df 100644
---- a/arch/x86/kernel/traps_64.c
-+++ b/arch/x86/kernel/traps_64.c
-@@ -74,22 +74,24 @@ asmlinkage void alignment_check(void);
- asmlinkage void machine_check(void);
- asmlinkage void spurious_interrupt_bug(void);
- 
-+static unsigned int code_bytes = 64;
+-      if ( exponent >= 0 )
+-	{
++	u_char transformed, inverted, sign1, sign2;
++	int exponent;
++	long int dummy_exp;
++	Xsig accumulator, Numer, Denom, accumulatore, argSignif, argSq, argSqSq;
++	u_char tag;
 +
- static inline void conditional_sti(struct pt_regs *regs)
- {
--	if (regs->eflags & X86_EFLAGS_IF)
-+	if (regs->flags & X86_EFLAGS_IF)
- 		local_irq_enable();
- }
- 
- static inline void preempt_conditional_sti(struct pt_regs *regs)
- {
- 	preempt_disable();
--	if (regs->eflags & X86_EFLAGS_IF)
-+	if (regs->flags & X86_EFLAGS_IF)
- 		local_irq_enable();
- }
- 
- static inline void preempt_conditional_cli(struct pt_regs *regs)
- {
--	if (regs->eflags & X86_EFLAGS_IF)
-+	if (regs->flags & X86_EFLAGS_IF)
- 		local_irq_disable();
- 	/* Make sure to not schedule here because we could be running
- 	   on an exception stack. */
-@@ -98,14 +100,15 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
- 
- int kstack_depth_to_print = 12;
- 
--#ifdef CONFIG_KALLSYMS
--void printk_address(unsigned long address)
-+void printk_address(unsigned long address, int reliable)
- {
-+#ifdef CONFIG_KALLSYMS
- 	unsigned long offset = 0, symsize;
- 	const char *symname;
- 	char *modname;
- 	char *delim = ":";
--	char namebuf[128];
-+	char namebuf[KSYM_NAME_LEN];
-+	char reliab[4] = "";
- 
- 	symname = kallsyms_lookup(address, &symsize, &offset,
- 					&modname, namebuf);
-@@ -113,17 +116,17 @@ void printk_address(unsigned long address)
- 		printk(" [<%016lx>]\n", address);
- 		return;
- 	}
-+	if (!reliable)
-+		strcpy(reliab, "? ");
++	sign1 = getsign(st0_ptr);
++	sign2 = getsign(st1_ptr);
++	if (st0_tag == TAG_Valid) {
++		exponent = exponent(st0_ptr);
++	} else {
++		/* This gives non-compatible stack contents... */
++		FPU_to_exp16(st0_ptr, st0_ptr);
++		exponent = exponent16(st0_ptr);
++	}
++	if (st1_tag == TAG_Valid) {
++		exponent -= exponent(st1_ptr);
++	} else {
++		/* This gives non-compatible stack contents... */
++		FPU_to_exp16(st1_ptr, st1_ptr);
++		exponent -= exponent16(st1_ptr);
++	}
 +
- 	if (!modname)
--		modname = delim = ""; 		
--	printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
--		address, delim, modname, delim, symname, offset, symsize);
--}
-+		modname = delim = "";
-+	printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
-+		address, reliab, delim, modname, delim, symname, offset, symsize);
- #else
--void printk_address(unsigned long address)
--{
- 	printk(" [<%016lx>]\n", address);
--}
- #endif
-+}
- 
- static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
- 					unsigned *usedp, char **idp)
-@@ -208,14 +211,53 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
-  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
-  */
- 
--static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
-+static inline int valid_stack_ptr(struct thread_info *tinfo,
-+			void *p, unsigned int size, void *end)
-+{
-+	void *t = tinfo;
-+	if (end) {
-+		if (p < end && p >= (end-THREAD_SIZE))
-+			return 1;
-+		else
-+			return 0;
++	if ((exponent < 0) || ((exponent == 0) &&
++			       ((st0_ptr->sigh < st1_ptr->sigh) ||
++				((st0_ptr->sigh == st1_ptr->sigh) &&
++				 (st0_ptr->sigl < st1_ptr->sigl))))) {
++		inverted = 1;
++		Numer.lsw = Denom.lsw = 0;
++		XSIG_LL(Numer) = significand(st0_ptr);
++		XSIG_LL(Denom) = significand(st1_ptr);
++	} else {
++		inverted = 0;
++		exponent = -exponent;
++		Numer.lsw = Denom.lsw = 0;
++		XSIG_LL(Numer) = significand(st1_ptr);
++		XSIG_LL(Denom) = significand(st0_ptr);
 +	}
-+	return p > t && p < t + THREAD_SIZE - size;
-+}
++	div_Xsig(&Numer, &Denom, &argSignif);
++	exponent += norm_Xsig(&argSignif);
 +
-+/* The form of the top of the frame on the stack */
-+struct stack_frame {
-+	struct stack_frame *next_frame;
-+	unsigned long return_address;
-+};
++	if ((exponent >= -1)
++	    || ((exponent == -2) && (argSignif.msw > 0xd413ccd0))) {
++		/* The argument is greater than sqrt(2)-1 (=0.414213562...) */
++		/* Convert the argument by an identity for atan */
++		transformed = 1;
 +
++		if (exponent >= 0) {
+ #ifdef PARANOID
+-	  if ( !( (exponent == 0) && 
+-		 (argSignif.lsw == 0) && (argSignif.midw == 0) &&
+-		 (argSignif.msw == 0x80000000) ) )
+-	    {
+-	      EXCEPTION(EX_INTERNAL|0x104);  /* There must be a logic error */
+-	      return;
+-	    }
++			if (!((exponent == 0) &&
++			      (argSignif.lsw == 0) && (argSignif.midw == 0) &&
++			      (argSignif.msw == 0x80000000))) {
++				EXCEPTION(EX_INTERNAL | 0x104);	/* There must be a logic error */
++				return;
++			}
+ #endif /* PARANOID */
+-	  argSignif.msw = 0;   /* Make the transformed arg -> 0.0 */
++			argSignif.msw = 0;	/* Make the transformed arg -> 0.0 */
++		} else {
++			Numer.lsw = Denom.lsw = argSignif.lsw;
++			XSIG_LL(Numer) = XSIG_LL(Denom) = XSIG_LL(argSignif);
 +
-+static inline unsigned long print_context_stack(struct thread_info *tinfo,
-+				unsigned long *stack, unsigned long bp,
-+				const struct stacktrace_ops *ops, void *data,
-+				unsigned long *end)
- {
--	void *t = (void *)tinfo;
--        return p > t && p < t + THREAD_SIZE - 3;
-+	struct stack_frame *frame = (struct stack_frame *)bp;
++			if (exponent < -1)
++				shr_Xsig(&Numer, -1 - exponent);
++			negate_Xsig(&Numer);
 +
-+	while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
-+		unsigned long addr;
++			shr_Xsig(&Denom, -exponent);
++			Denom.msw |= 0x80000000;
 +
-+		addr = *stack;
-+		if (__kernel_text_address(addr)) {
-+			if ((unsigned long) stack == bp + 8) {
-+				ops->address(data, addr, 1);
-+				frame = frame->next_frame;
-+				bp = (unsigned long) frame;
-+			} else {
-+				ops->address(data, addr, bp == 0);
-+			}
-+		}
-+		stack++;
-+	}
-+	return bp;
- }
- 
- void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
--		unsigned long *stack,
-+		unsigned long *stack, unsigned long bp,
- 		const struct stacktrace_ops *ops, void *data)
- {
- 	const unsigned cpu = get_cpu();
-@@ -225,36 +267,28 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
- 
- 	if (!tsk)
- 		tsk = current;
-+	tinfo = task_thread_info(tsk);
- 
- 	if (!stack) {
- 		unsigned long dummy;
- 		stack = &dummy;
- 		if (tsk && tsk != current)
--			stack = (unsigned long *)tsk->thread.rsp;
-+			stack = (unsigned long *)tsk->thread.sp;
- 	}
- 
--	/*
--	 * Print function call entries within a stack. 'cond' is the
--	 * "end of stackframe" condition, that the 'stack++'
--	 * iteration will eventually trigger.
--	 */
--#define HANDLE_STACK(cond) \
--	do while (cond) { \
--		unsigned long addr = *stack++; \
--		/* Use unlocked access here because except for NMIs	\
--		   we should be already protected against module unloads */ \
--		if (__kernel_text_address(addr)) { \
--			/* \
--			 * If the address is either in the text segment of the \
--			 * kernel, or in the region which contains vmalloc'ed \
--			 * memory, it *may* be the address of a calling \
--			 * routine; if so, print it so that someone tracing \
--			 * down the cause of the crash will be able to figure \
--			 * out the call path that was taken. \
--			 */ \
--			ops->address(data, addr);   \
--		} \
--	} while (0)
-+#ifdef CONFIG_FRAME_POINTER
-+	if (!bp) {
-+		if (tsk == current) {
-+			/* Grab bp right from our regs */
-+			asm("movq %%rbp, %0" : "=r" (bp):);
-+		} else {
-+			/* bp is the last reg pushed by switch_to */
-+			bp = *(unsigned long *) tsk->thread.sp;
++			div_Xsig(&Numer, &Denom, &argSignif);
++
++			exponent = -1 + norm_Xsig(&argSignif);
 +		}
++	} else {
++		transformed = 0;
 +	}
-+#endif
 +
++	argSq.lsw = argSignif.lsw;
++	argSq.midw = argSignif.midw;
++	argSq.msw = argSignif.msw;
++	mul_Xsig_Xsig(&argSq, &argSq);
 +
- 
- 	/*
- 	 * Print function call entries in all stacks, starting at the
-@@ -270,7 +304,9 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
- 		if (estack_end) {
- 			if (ops->stack(data, id) < 0)
- 				break;
--			HANDLE_STACK (stack < estack_end);
++	argSqSq.lsw = argSq.lsw;
++	argSqSq.midw = argSq.midw;
++	argSqSq.msw = argSq.msw;
++	mul_Xsig_Xsig(&argSqSq, &argSqSq);
 +
-+			bp = print_context_stack(tinfo, stack, bp, ops,
-+							data, estack_end);
- 			ops->stack(data, "<EOE>");
- 			/*
- 			 * We link to the next stack via the
-@@ -288,7 +324,8 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
- 			if (stack >= irqstack && stack < irqstack_end) {
- 				if (ops->stack(data, "IRQ") < 0)
- 					break;
--				HANDLE_STACK (stack < irqstack_end);
-+				bp = print_context_stack(tinfo, stack, bp,
-+						ops, data, irqstack_end);
- 				/*
- 				 * We link to the next stack (which would be
- 				 * the process stack normally) the last
-@@ -306,9 +343,7 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
- 	/*
- 	 * This handles the process stack:
- 	 */
--	tinfo = task_thread_info(tsk);
--	HANDLE_STACK (valid_stack_ptr(tinfo, stack));
--#undef HANDLE_STACK
-+	bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
- 	put_cpu();
- }
- EXPORT_SYMBOL(dump_trace);
-@@ -331,10 +366,10 @@ static int print_trace_stack(void *data, char *name)
- 	return 0;
- }
- 
--static void print_trace_address(void *data, unsigned long addr)
-+static void print_trace_address(void *data, unsigned long addr, int reliable)
- {
- 	touch_nmi_watchdog();
--	printk_address(addr);
-+	printk_address(addr, reliable);
- }
- 
- static const struct stacktrace_ops print_trace_ops = {
-@@ -345,15 +380,17 @@ static const struct stacktrace_ops print_trace_ops = {
- };
- 
- void
--show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack)
-+show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
-+		unsigned long bp)
- {
- 	printk("\nCall Trace:\n");
--	dump_trace(tsk, regs, stack, &print_trace_ops, NULL);
-+	dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL);
- 	printk("\n");
- }
- 
- static void
--_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
-+_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
-+							unsigned long bp)
- {
- 	unsigned long *stack;
- 	int i;
-@@ -364,14 +401,14 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
- 	// debugging aid: "show_stack(NULL, NULL);" prints the
- 	// back trace for this cpu.
- 
--	if (rsp == NULL) {
-+	if (sp == NULL) {
- 		if (tsk)
--			rsp = (unsigned long *)tsk->thread.rsp;
-+			sp = (unsigned long *)tsk->thread.sp;
- 		else
--			rsp = (unsigned long *)&rsp;
-+			sp = (unsigned long *)&sp;
- 	}
- 
--	stack = rsp;
-+	stack = sp;
- 	for(i=0; i < kstack_depth_to_print; i++) {
- 		if (stack >= irqstack && stack <= irqstack_end) {
- 			if (stack == irqstack_end) {
-@@ -387,12 +424,12 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
- 		printk(" %016lx", *stack++);
- 		touch_nmi_watchdog();
- 	}
--	show_trace(tsk, regs, rsp);
-+	show_trace(tsk, regs, sp, bp);
- }
- 
--void show_stack(struct task_struct *tsk, unsigned long * rsp)
-+void show_stack(struct task_struct *tsk, unsigned long * sp)
- {
--	_show_stack(tsk, NULL, rsp);
-+	_show_stack(tsk, NULL, sp, 0);
- }
- 
- /*
-@@ -401,13 +438,19 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
- void dump_stack(void)
- {
- 	unsigned long dummy;
-+	unsigned long bp = 0;
++	accumulatore.lsw = argSq.lsw;
++	XSIG_LL(accumulatore) = XSIG_LL(argSq);
 +
-+#ifdef CONFIG_FRAME_POINTER
-+	if (!bp)
-+		asm("movq %%rbp, %0" : "=r" (bp):);
-+#endif
- 
- 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
- 		current->pid, current->comm, print_tainted(),
- 		init_utsname()->release,
- 		(int)strcspn(init_utsname()->version, " "),
- 		init_utsname()->version);
--	show_trace(NULL, NULL, &dummy);
-+	show_trace(NULL, NULL, &dummy, bp);
- }
- 
- EXPORT_SYMBOL(dump_stack);
-@@ -415,12 +458,15 @@ EXPORT_SYMBOL(dump_stack);
- void show_registers(struct pt_regs *regs)
- {
- 	int i;
--	int in_kernel = !user_mode(regs);
--	unsigned long rsp;
-+	unsigned long sp;
- 	const int cpu = smp_processor_id();
- 	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
-+	u8 *ip;
-+	unsigned int code_prologue = code_bytes * 43 / 64;
-+	unsigned int code_len = code_bytes;
- 
--	rsp = regs->rsp;
-+	sp = regs->sp;
-+	ip = (u8 *) regs->ip - code_prologue;
- 	printk("CPU %d ", cpu);
- 	__show_regs(regs);
- 	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
-@@ -430,45 +476,43 @@ void show_registers(struct pt_regs *regs)
- 	 * When in-kernel, we also print out the stack and code at the
- 	 * time of the fault..
- 	 */
--	if (in_kernel) {
-+	if (!user_mode(regs)) {
-+		unsigned char c;
- 		printk("Stack: ");
--		_show_stack(NULL, regs, (unsigned long*)rsp);
--
--		printk("\nCode: ");
--		if (regs->rip < PAGE_OFFSET)
--			goto bad;
--
--		for (i=0; i<20; i++) {
--			unsigned char c;
--			if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
--bad:
-+		_show_stack(NULL, regs, (unsigned long *)sp, regs->bp);
-+		printk("\n");
++	shr_Xsig(&argSq, 2 * (-1 - exponent - 1));
++	shr_Xsig(&argSqSq, 4 * (-1 - exponent - 1));
 +
-+		printk(KERN_EMERG "Code: ");
-+		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
-+			/* try starting at RIP */
-+			ip = (u8 *) regs->ip;
-+			code_len = code_len - code_prologue + 1;
-+		}
-+		for (i = 0; i < code_len; i++, ip++) {
-+			if (ip < (u8 *)PAGE_OFFSET ||
-+					probe_kernel_address(ip, c)) {
- 				printk(" Bad RIP value.");
- 				break;
- 			}
--			printk("%02x ", c);
-+			if (ip == (u8 *)regs->ip)
-+				printk("<%02x> ", c);
-+			else
-+				printk("%02x ", c);
- 		}
- 	}
- 	printk("\n");
- }	
- 
--int is_valid_bugaddr(unsigned long rip)
-+int is_valid_bugaddr(unsigned long ip)
- {
- 	unsigned short ud2;
- 
--	if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2)))
-+	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
- 		return 0;
- 
- 	return ud2 == 0x0b0f;
- }
- 
--#ifdef CONFIG_BUG
--void out_of_line_bug(void)
--{ 
--	BUG(); 
--} 
--EXPORT_SYMBOL(out_of_line_bug);
--#endif
--
- static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
- static int die_owner = -1;
- static unsigned int die_nest_count;
-@@ -496,7 +540,7 @@ unsigned __kprobes long oops_begin(void)
- 	return flags;
- }
- 
--void __kprobes oops_end(unsigned long flags)
-+void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
- { 
- 	die_owner = -1;
- 	bust_spinlocks(0);
-@@ -505,12 +549,17 @@ void __kprobes oops_end(unsigned long flags)
- 		/* Nest count reaches zero, release the lock. */
- 		__raw_spin_unlock(&die_lock);
- 	raw_local_irq_restore(flags);
-+	if (!regs) {
-+		oops_exit();
-+		return;
++	/* Now have argSq etc with binary point at the left
++	   .1xxxxxxxx */
++
++	/* Do the basic fixed point polynomial evaluation */
++	accumulator.msw = accumulator.midw = accumulator.lsw = 0;
++	polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq),
++			oddplterms, HIPOWERop - 1);
++	mul64_Xsig(&accumulator, &XSIG_LL(argSq));
++	negate_Xsig(&accumulator);
++	polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq), oddnegterms,
++			HIPOWERon - 1);
++	negate_Xsig(&accumulator);
++	add_two_Xsig(&accumulator, &fixedpterm, &dummy_exp);
++
++	mul64_Xsig(&accumulatore, &denomterm);
++	shr_Xsig(&accumulatore, 1 + 2 * (-1 - exponent));
++	accumulatore.msw |= 0x80000000;
++
++	div_Xsig(&accumulator, &accumulatore, &accumulator);
++
++	mul_Xsig_Xsig(&accumulator, &argSignif);
++	mul_Xsig_Xsig(&accumulator, &argSq);
++
++	shr_Xsig(&accumulator, 3);
++	negate_Xsig(&accumulator);
++	add_Xsig_Xsig(&accumulator, &argSignif);
++
++	if (transformed) {
++		/* compute pi/4 - accumulator */
++		shr_Xsig(&accumulator, -1 - exponent);
++		negate_Xsig(&accumulator);
++		add_Xsig_Xsig(&accumulator, &pi_signif);
++		exponent = -1;
 +	}
- 	if (panic_on_oops)
- 		panic("Fatal exception");
- 	oops_exit();
-+	do_exit(signr);
- }
- 
--void __kprobes __die(const char * str, struct pt_regs * regs, long err)
-+int __kprobes __die(const char * str, struct pt_regs * regs, long err)
- {
- 	static int die_counter;
- 	printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
-@@ -524,15 +573,17 @@ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
- 	printk("DEBUG_PAGEALLOC");
- #endif
- 	printk("\n");
--	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
-+	if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
-+		return 1;
- 	show_registers(regs);
- 	add_taint(TAINT_DIE);
- 	/* Executive summary in case the oops scrolled away */
- 	printk(KERN_ALERT "RIP ");
--	printk_address(regs->rip); 
--	printk(" RSP <%016lx>\n", regs->rsp); 
-+	printk_address(regs->ip, 1);
-+	printk(" RSP <%016lx>\n", regs->sp);
- 	if (kexec_should_crash(current))
- 		crash_kexec(regs);
-+	return 0;
- }
- 
- void die(const char * str, struct pt_regs * regs, long err)
-@@ -540,11 +591,11 @@ void die(const char * str, struct pt_regs * regs, long err)
- 	unsigned long flags = oops_begin();
- 
- 	if (!user_mode(regs))
--		report_bug(regs->rip, regs);
-+		report_bug(regs->ip, regs);
- 
--	__die(str, regs, err);
--	oops_end(flags);
--	do_exit(SIGSEGV); 
-+	if (__die(str, regs, err))
-+		regs = NULL;
-+	oops_end(flags, regs, SIGSEGV);
- }
- 
- void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
-@@ -561,10 +612,10 @@ void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
- 		crash_kexec(regs);
- 	if (do_panic || panic_on_oops)
- 		panic("Non maskable interrupt");
--	oops_end(flags);
-+	oops_end(flags, NULL, SIGBUS);
- 	nmi_exit();
- 	local_irq_enable();
--	do_exit(SIGSEGV);
-+	do_exit(SIGBUS);
- }
- 
- static void __kprobes do_trap(int trapnr, int signr, char *str,
-@@ -588,11 +639,14 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
- 		tsk->thread.trap_no = trapnr;
- 
- 		if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
--		    printk_ratelimit())
-+		    printk_ratelimit()) {
- 			printk(KERN_INFO
--			       "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
-+			       "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
- 			       tsk->comm, tsk->pid, str,
--			       regs->rip, regs->rsp, error_code); 
-+			       regs->ip, regs->sp, error_code);
-+			print_vma_addr(" in ", regs->ip);
-+			printk("\n");
-+		}
- 
- 		if (info)
- 			force_sig_info(signr, info, tsk);
-@@ -602,19 +656,12 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
- 	}
- 
- 
--	/* kernel trap */ 
--	{	     
--		const struct exception_table_entry *fixup;
--		fixup = search_exception_tables(regs->rip);
--		if (fixup)
--			regs->rip = fixup->fixup;
--		else {
--			tsk->thread.error_code = error_code;
--			tsk->thread.trap_no = trapnr;
--			die(str, regs, error_code);
--		}
--		return;
-+	if (!fixup_exception(regs)) {
-+		tsk->thread.error_code = error_code;
-+		tsk->thread.trap_no = trapnr;
-+		die(str, regs, error_code);
++
++	if (inverted) {
++		/* compute pi/2 - accumulator */
++		shr_Xsig(&accumulator, -exponent);
++		negate_Xsig(&accumulator);
++		add_Xsig_Xsig(&accumulator, &pi_signif);
++		exponent = 0;
  	}
-+	return;
- }
- 
- #define DO_ERROR(trapnr, signr, str, name) \
-@@ -643,10 +690,10 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
- 	do_trap(trapnr, signr, str, regs, error_code, &info); \
- }
- 
--DO_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->rip)
-+DO_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->ip)
- DO_ERROR( 4, SIGSEGV, "overflow", overflow)
- DO_ERROR( 5, SIGSEGV, "bounds", bounds)
--DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
-+DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
- DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
- DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
- DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-@@ -694,32 +741,28 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
- 		tsk->thread.trap_no = 13;
- 
- 		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
--		    printk_ratelimit())
-+		    printk_ratelimit()) {
- 			printk(KERN_INFO
--		       "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
-+		       "%s[%d] general protection ip:%lx sp:%lx error:%lx",
- 			       tsk->comm, tsk->pid,
--			       regs->rip, regs->rsp, error_code); 
-+			       regs->ip, regs->sp, error_code);
-+			print_vma_addr(" in ", regs->ip);
-+			printk("\n");
-+		}
- 
- 		force_sig(SIGSEGV, tsk);
- 		return;
- 	} 
- 
--	/* kernel gp */
+-      else
 -	{
--		const struct exception_table_entry *fixup;
--		fixup = search_exception_tables(regs->rip);
--		if (fixup) {
--			regs->rip = fixup->fixup;
--			return;
--		}
-+	if (fixup_exception(regs))
-+		return;
- 
--		tsk->thread.error_code = error_code;
--		tsk->thread.trap_no = 13;
--		if (notify_die(DIE_GPF, "general protection fault", regs,
--					error_code, 13, SIGSEGV) == NOTIFY_STOP)
--			return;
--		die("general protection fault", regs, error_code);
--	}
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 13;
-+	if (notify_die(DIE_GPF, "general protection fault", regs,
-+				error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+		return;
-+	die("general protection fault", regs, error_code);
- }
- 
- static __kprobes void
-@@ -832,15 +875,15 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
- {
- 	struct pt_regs *regs = eregs;
- 	/* Did already sync */
--	if (eregs == (struct pt_regs *)eregs->rsp)
-+	if (eregs == (struct pt_regs *)eregs->sp)
- 		;
- 	/* Exception from user space */
- 	else if (user_mode(eregs))
- 		regs = task_pt_regs(current);
- 	/* Exception from kernel and interrupts are enabled. Move to
-  	   kernel process stack. */
--	else if (eregs->eflags & X86_EFLAGS_IF)
--		regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
-+	else if (eregs->flags & X86_EFLAGS_IF)
-+		regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
- 	if (eregs != regs)
- 		*regs = *eregs;
- 	return regs;
-@@ -858,6 +901,12 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
- 
- 	get_debugreg(condition, 6);
- 
-+	/*
-+	 * The processor cleared BTF, so don't mark that we need it set.
-+	 */
-+	clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
-+	tsk->thread.debugctlmsr = 0;
-+
- 	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
- 						SIGTRAP) == NOTIFY_STOP)
- 		return;
-@@ -873,27 +922,14 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
- 
- 	tsk->thread.debugreg6 = condition;
- 
--	/* Mask out spurious TF errors due to lazy TF clearing */
+-	  Numer.lsw = Denom.lsw = argSignif.lsw;
+-	  XSIG_LL(Numer) = XSIG_LL(Denom) = XSIG_LL(argSignif);
+-
+-	  if ( exponent < -1 )
+-	    shr_Xsig(&Numer, -1-exponent);
+-	  negate_Xsig(&Numer);
+-      
+-	  shr_Xsig(&Denom, -exponent);
+-	  Denom.msw |= 0x80000000;
+-      
+-	  div_Xsig(&Numer, &Denom, &argSignif);
+-
+-	  exponent = -1 + norm_Xsig(&argSignif);
 +
-+	/*
-+	 * Single-stepping through TF: make sure we ignore any events in
-+	 * kernel space (but re-enable TF when returning to user mode).
-+	 */
- 	if (condition & DR_STEP) {
--		/*
--		 * The TF error should be masked out only if the current
--		 * process is not traced and if the TRAP flag has been set
--		 * previously by a tracing process (condition detected by
--		 * the PT_DTRACE flag); remember that the i386 TRAP flag
--		 * can be modified by the process itself in user mode,
--		 * allowing programs to debug themselves without the ptrace()
--		 * interface.
--		 */
-                 if (!user_mode(regs))
-                        goto clear_TF_reenable;
--		/*
--		 * Was the TF flag set by a debugger? If so, clear it now,
--		 * so that register information is correct.
--		 */
--		if (tsk->ptrace & PT_DTRACE) {
--			regs->eflags &= ~TF_MASK;
--			tsk->ptrace &= ~PT_DTRACE;
--		}
++	if (sign1) {
++		/* compute pi - accumulator */
++		shr_Xsig(&accumulator, 1 - exponent);
++		negate_Xsig(&accumulator);
++		add_Xsig_Xsig(&accumulator, &pi_signif);
++		exponent = 1;
  	}
- 
- 	/* Ok, finally something we can handle */
-@@ -902,7 +938,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
- 	info.si_signo = SIGTRAP;
- 	info.si_errno = 0;
- 	info.si_code = TRAP_BRKPT;
--	info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
-+	info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
- 	force_sig_info(SIGTRAP, &info, tsk);
- 
- clear_dr7:
-@@ -912,18 +948,15 @@ clear_dr7:
- 
- clear_TF_reenable:
- 	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
--	regs->eflags &= ~TF_MASK;
-+	regs->flags &= ~X86_EFLAGS_TF;
- 	preempt_conditional_cli(regs);
- }
- 
- static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
- {
--	const struct exception_table_entry *fixup;
--	fixup = search_exception_tables(regs->rip);
--	if (fixup) {
--		regs->rip = fixup->fixup;
-+	if (fixup_exception(regs))
- 		return 1;
--	}
-+
- 	notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
- 	/* Illegal floating point operation in the kernel */
- 	current->thread.trap_no = trapnr;
-@@ -938,7 +971,7 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
-  */
- asmlinkage void do_coprocessor_error(struct pt_regs *regs)
- {
--	void __user *rip = (void __user *)(regs->rip);
-+	void __user *ip = (void __user *)(regs->ip);
- 	struct task_struct * task;
- 	siginfo_t info;
- 	unsigned short cwd, swd;
-@@ -958,7 +991,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
- 	info.si_signo = SIGFPE;
- 	info.si_errno = 0;
- 	info.si_code = __SI_FAULT;
--	info.si_addr = rip;
-+	info.si_addr = ip;
- 	/*
- 	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
- 	 * status.  0x3f is the exception bits in these regs, 0x200 is the
-@@ -1007,7 +1040,7 @@ asmlinkage void bad_intr(void)
- 
- asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
- {
--	void __user *rip = (void __user *)(regs->rip);
-+	void __user *ip = (void __user *)(regs->ip);
- 	struct task_struct * task;
- 	siginfo_t info;
- 	unsigned short mxcsr;
-@@ -1027,7 +1060,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
- 	info.si_signo = SIGFPE;
- 	info.si_errno = 0;
- 	info.si_code = __SI_FAULT;
--	info.si_addr = rip;
-+	info.si_addr = ip;
- 	/*
- 	 * The SIMD FPU exceptions are handled a little differently, as there
- 	 * is only a single status/control register.  Thus, to determine which
-@@ -1089,6 +1122,7 @@ asmlinkage void math_state_restore(void)
- 	task_thread_info(me)->status |= TS_USEDFPU;
- 	me->fpu_counter++;
- }
-+EXPORT_SYMBOL_GPL(math_state_restore);
- 
- void __init trap_init(void)
- {
-@@ -1144,3 +1178,14 @@ static int __init kstack_setup(char *s)
- 	return 0;
- }
- early_param("kstack", kstack_setup);
+-    }
+-  else
+-    {
+-      transformed = 0;
+-    }
+-
+-  argSq.lsw = argSignif.lsw; argSq.midw = argSignif.midw;
+-  argSq.msw = argSignif.msw;
+-  mul_Xsig_Xsig(&argSq, &argSq);
+-  
+-  argSqSq.lsw = argSq.lsw; argSqSq.midw = argSq.midw; argSqSq.msw = argSq.msw;
+-  mul_Xsig_Xsig(&argSqSq, &argSqSq);
+-
+-  accumulatore.lsw = argSq.lsw;
+-  XSIG_LL(accumulatore) = XSIG_LL(argSq);
+-
+-  shr_Xsig(&argSq, 2*(-1-exponent-1));
+-  shr_Xsig(&argSqSq, 4*(-1-exponent-1));
+-
+-  /* Now have argSq etc with binary point at the left
+-     .1xxxxxxxx */
+-
+-  /* Do the basic fixed point polynomial evaluation */
+-  accumulator.msw = accumulator.midw = accumulator.lsw = 0;
+-  polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq),
+-		   oddplterms, HIPOWERop-1);
+-  mul64_Xsig(&accumulator, &XSIG_LL(argSq));
+-  negate_Xsig(&accumulator);
+-  polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq), oddnegterms, HIPOWERon-1);
+-  negate_Xsig(&accumulator);
+-  add_two_Xsig(&accumulator, &fixedpterm, &dummy_exp);
+-
+-  mul64_Xsig(&accumulatore, &denomterm);
+-  shr_Xsig(&accumulatore, 1 + 2*(-1-exponent));
+-  accumulatore.msw |= 0x80000000;
+-
+-  div_Xsig(&accumulator, &accumulatore, &accumulator);
+-
+-  mul_Xsig_Xsig(&accumulator, &argSignif);
+-  mul_Xsig_Xsig(&accumulator, &argSq);
+-
+-  shr_Xsig(&accumulator, 3);
+-  negate_Xsig(&accumulator);
+-  add_Xsig_Xsig(&accumulator, &argSignif);
+-
+-  if ( transformed )
+-    {
+-      /* compute pi/4 - accumulator */
+-      shr_Xsig(&accumulator, -1-exponent);
+-      negate_Xsig(&accumulator);
+-      add_Xsig_Xsig(&accumulator, &pi_signif);
+-      exponent = -1;
+-    }
+-
+-  if ( inverted )
+-    {
+-      /* compute pi/2 - accumulator */
+-      shr_Xsig(&accumulator, -exponent);
+-      negate_Xsig(&accumulator);
+-      add_Xsig_Xsig(&accumulator, &pi_signif);
+-      exponent = 0;
+-    }
+-
+-  if ( sign1 )
+-    {
+-      /* compute pi - accumulator */
+-      shr_Xsig(&accumulator, 1 - exponent);
+-      negate_Xsig(&accumulator);
+-      add_Xsig_Xsig(&accumulator, &pi_signif);
+-      exponent = 1;
+-    }
+-
+-  exponent += round_Xsig(&accumulator);
+-
+-  significand(st1_ptr) = XSIG_LL(accumulator);
+-  setexponent16(st1_ptr, exponent);
+-
+-  tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign2);
+-  FPU_settagi(1, tag);
+-
+-  set_precision_flag_up();  /* We do not really know if up or down,
+-			       use this as the default. */
 +
++	exponent += round_Xsig(&accumulator);
 +
-+static int __init code_bytes_setup(char *s)
-+{
-+	code_bytes = simple_strtoul(s, NULL, 0);
-+	if (code_bytes > 8192)
-+		code_bytes = 8192;
++	significand(st1_ptr) = XSIG_LL(accumulator);
++	setexponent16(st1_ptr, exponent);
 +
-+	return 1;
-+}
-+__setup("code_bytes=", code_bytes_setup);
-diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
-index 9ebc0da..43517e3 100644
---- a/arch/x86/kernel/tsc_32.c
-+++ b/arch/x86/kernel/tsc_32.c
-@@ -5,6 +5,7 @@
- #include <linux/jiffies.h>
- #include <linux/init.h>
- #include <linux/dmi.h>
-+#include <linux/percpu.h>
++	tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign2);
++	FPU_settagi(1, tag);
++
++	set_precision_flag_up();	/* We do not really know if up or down,
++					   use this as the default. */
  
- #include <asm/delay.h>
- #include <asm/tsc.h>
-@@ -23,8 +24,6 @@ static int tsc_enabled;
- unsigned int tsc_khz;
- EXPORT_SYMBOL_GPL(tsc_khz);
+ }
+diff --git a/arch/x86/math-emu/poly_l2.c b/arch/x86/math-emu/poly_l2.c
+index dd00e1d..8e2ff4b 100644
+--- a/arch/x86/math-emu/poly_l2.c
++++ b/arch/x86/math-emu/poly_l2.c
+@@ -10,7 +10,6 @@
+  |                                                                           |
+  +---------------------------------------------------------------------------*/
  
--int tsc_disable;
--
- #ifdef CONFIG_X86_TSC
- static int __init tsc_setup(char *str)
- {
-@@ -39,8 +38,7 @@ static int __init tsc_setup(char *str)
-  */
- static int __init tsc_setup(char *str)
- {
--	tsc_disable = 1;
 -
-+	setup_clear_cpu_cap(X86_FEATURE_TSC);
- 	return 1;
- }
- #endif
-@@ -80,13 +78,31 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
-  *
-  *			-johnstul at us.ibm.com "math is hard, lets go shopping!"
-  */
--unsigned long cyc2ns_scale __read_mostly;
+ #include "exception.h"
+ #include "reg_constant.h"
+ #include "fpu_emu.h"
+@@ -18,184 +17,163 @@
+ #include "control_w.h"
+ #include "poly.h"
  
--#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-+DEFINE_PER_CPU(unsigned long, cyc2ns);
+-
+ static void log2_kernel(FPU_REG const *arg, u_char argsign,
+-			Xsig *accum_result, long int *expon);
+-
++			Xsig * accum_result, long int *expon);
  
--static inline void set_cyc2ns_scale(unsigned long cpu_khz)
-+static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+ /*--- poly_l2() -------------------------------------------------------------+
+  |   Base 2 logarithm by a polynomial approximation.                         |
+  +---------------------------------------------------------------------------*/
+-void	poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign)
++void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign)
  {
--	cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
-+	unsigned long flags, prev_scale, *scale;
-+	unsigned long long tsc_now, ns_now;
-+
-+	local_irq_save(flags);
-+	sched_clock_idle_sleep_event();
+-  long int	       exponent, expon, expon_expon;
+-  Xsig                 accumulator, expon_accum, yaccum;
+-  u_char		       sign, argsign;
+-  FPU_REG              x;
+-  int                  tag;
+-
+-  exponent = exponent16(st0_ptr);
+-
+-  /* From st0_ptr, make a number > sqrt(2)/2 and < sqrt(2) */
+-  if ( st0_ptr->sigh > (unsigned)0xb504f334 )
+-    {
+-      /* Treat as  sqrt(2)/2 < st0_ptr < 1 */
+-      significand(&x) = - significand(st0_ptr);
+-      setexponent16(&x, -1);
+-      exponent++;
+-      argsign = SIGN_NEG;
+-    }
+-  else
+-    {
+-      /* Treat as  1 <= st0_ptr < sqrt(2) */
+-      x.sigh = st0_ptr->sigh - 0x80000000;
+-      x.sigl = st0_ptr->sigl;
+-      setexponent16(&x, 0);
+-      argsign = SIGN_POS;
+-    }
+-  tag = FPU_normalize_nuo(&x);
+-
+-  if ( tag == TAG_Zero )
+-    {
+-      expon = 0;
+-      accumulator.msw = accumulator.midw = accumulator.lsw = 0;
+-    }
+-  else
+-    {
+-      log2_kernel(&x, argsign, &accumulator, &expon);
+-    }
+-
+-  if ( exponent < 0 )
+-    {
+-      sign = SIGN_NEG;
+-      exponent = -exponent;
+-    }
+-  else
+-    sign = SIGN_POS;
+-  expon_accum.msw = exponent; expon_accum.midw = expon_accum.lsw = 0;
+-  if ( exponent )
+-    {
+-      expon_expon = 31 + norm_Xsig(&expon_accum);
+-      shr_Xsig(&accumulator, expon_expon - expon);
+-
+-      if ( sign ^ argsign )
+-	negate_Xsig(&accumulator);
+-      add_Xsig_Xsig(&accumulator, &expon_accum);
+-    }
+-  else
+-    {
+-      expon_expon = expon;
+-      sign = argsign;
+-    }
+-
+-  yaccum.lsw = 0; XSIG_LL(yaccum) = significand(st1_ptr);
+-  mul_Xsig_Xsig(&accumulator, &yaccum);
+-
+-  expon_expon += round_Xsig(&accumulator);
+-
+-  if ( accumulator.msw == 0 )
+-    {
+-      FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
+-      return;
+-    }
+-
+-  significand(st1_ptr) = XSIG_LL(accumulator);
+-  setexponent16(st1_ptr, expon_expon + exponent16(st1_ptr) + 1);
+-
+-  tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign ^ st1_sign);
+-  FPU_settagi(1, tag);
+-
+-  set_precision_flag_up();  /* 80486 appears to always do this */
+-
+-  return;
++	long int exponent, expon, expon_expon;
++	Xsig accumulator, expon_accum, yaccum;
++	u_char sign, argsign;
++	FPU_REG x;
++	int tag;
 +
-+	scale = &per_cpu(cyc2ns, cpu);
++	exponent = exponent16(st0_ptr);
 +
-+	rdtscll(tsc_now);
-+	ns_now = __cycles_2_ns(tsc_now);
++	/* From st0_ptr, make a number > sqrt(2)/2 and < sqrt(2) */
++	if (st0_ptr->sigh > (unsigned)0xb504f334) {
++		/* Treat as  sqrt(2)/2 < st0_ptr < 1 */
++		significand(&x) = -significand(st0_ptr);
++		setexponent16(&x, -1);
++		exponent++;
++		argsign = SIGN_NEG;
++	} else {
++		/* Treat as  1 <= st0_ptr < sqrt(2) */
++		x.sigh = st0_ptr->sigh - 0x80000000;
++		x.sigl = st0_ptr->sigl;
++		setexponent16(&x, 0);
++		argsign = SIGN_POS;
++	}
++	tag = FPU_normalize_nuo(&x);
+ 
+-}
++	if (tag == TAG_Zero) {
++		expon = 0;
++		accumulator.msw = accumulator.midw = accumulator.lsw = 0;
++	} else {
++		log2_kernel(&x, argsign, &accumulator, &expon);
++	}
 +
-+	prev_scale = *scale;
-+	if (cpu_khz)
-+		*scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
++	if (exponent < 0) {
++		sign = SIGN_NEG;
++		exponent = -exponent;
++	} else
++		sign = SIGN_POS;
++	expon_accum.msw = exponent;
++	expon_accum.midw = expon_accum.lsw = 0;
++	if (exponent) {
++		expon_expon = 31 + norm_Xsig(&expon_accum);
++		shr_Xsig(&accumulator, expon_expon - expon);
 +
-+	/*
-+	 * Start smoothly with the new frequency:
-+	 */
-+	sched_clock_idle_wakeup_event(0);
-+	local_irq_restore(flags);
- }
- 
- /*
-@@ -239,7 +255,9 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
- 						ref_freq, freq->new);
- 			if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
- 				tsc_khz = cpu_khz;
--				set_cyc2ns_scale(cpu_khz);
-+				preempt_disable();
-+				set_cyc2ns_scale(cpu_khz, smp_processor_id());
-+				preempt_enable();
- 				/*
- 				 * TSC based sched_clock turns
- 				 * to junk w/ cpufreq
-@@ -333,6 +351,11 @@ __cpuinit int unsynchronized_tsc(void)
- {
- 	if (!cpu_has_tsc || tsc_unstable)
- 		return 1;
++		if (sign ^ argsign)
++			negate_Xsig(&accumulator);
++		add_Xsig_Xsig(&accumulator, &expon_accum);
++	} else {
++		expon_expon = expon;
++		sign = argsign;
++	}
 +
-+	/* Anything with constant TSC should be synchronized */
-+	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
-+		return 0;
++	yaccum.lsw = 0;
++	XSIG_LL(yaccum) = significand(st1_ptr);
++	mul_Xsig_Xsig(&accumulator, &yaccum);
 +
- 	/*
- 	 * Intel systems are normally all synchronized.
- 	 * Exceptions must mark TSC as unstable:
-@@ -367,7 +390,9 @@ static inline void check_geode_tsc_reliable(void) { }
- 
- void __init tsc_init(void)
- {
--	if (!cpu_has_tsc || tsc_disable)
-+	int cpu;
++	expon_expon += round_Xsig(&accumulator);
 +
-+	if (!cpu_has_tsc)
- 		goto out_no_tsc;
- 
- 	cpu_khz = calculate_cpu_khz();
-@@ -380,7 +405,15 @@ void __init tsc_init(void)
- 				(unsigned long)cpu_khz / 1000,
- 				(unsigned long)cpu_khz % 1000);
- 
--	set_cyc2ns_scale(cpu_khz);
-+	/*
-+	 * Secondary CPUs do not run through tsc_init(), so set up
-+	 * all the scale factors for all CPUs, assuming the same
-+	 * speed as the bootup CPU. (cpufreq notifiers will fix this
-+	 * up if their speed diverges)
-+	 */
-+	for_each_possible_cpu(cpu)
-+		set_cyc2ns_scale(cpu_khz, cpu);
++	if (accumulator.msw == 0) {
++		FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
++		return;
++	}
 +
- 	use_tsc_delay();
- 
- 	/* Check and install the TSC clocksource */
-@@ -403,10 +436,5 @@ void __init tsc_init(void)
- 	return;
- 
- out_no_tsc:
--	/*
--	 * Set the tsc_disable flag if there's no TSC support, this
--	 * makes it a fast flag for the kernel to see whether it
--	 * should be using the TSC.
--	 */
--	tsc_disable = 1;
-+	setup_clear_cpu_cap(X86_FEATURE_TSC);
- }
-diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
-index 9c70af4..947554d 100644
---- a/arch/x86/kernel/tsc_64.c
-+++ b/arch/x86/kernel/tsc_64.c
-@@ -10,6 +10,7 @@
- 
- #include <asm/hpet.h>
- #include <asm/timex.h>
-+#include <asm/timer.h>
- 
- static int notsc __initdata = 0;
- 
-@@ -18,19 +19,51 @@ EXPORT_SYMBOL(cpu_khz);
- unsigned int tsc_khz;
- EXPORT_SYMBOL(tsc_khz);
- 
--static unsigned int cyc2ns_scale __read_mostly;
-+/* Accelerators for sched_clock()
-+ * convert from cycles(64bits) => nanoseconds (64bits)
-+ *  basic equation:
-+ *		ns = cycles / (freq / ns_per_sec)
-+ *		ns = cycles * (ns_per_sec / freq)
-+ *		ns = cycles * (10^9 / (cpu_khz * 10^3))
-+ *		ns = cycles * (10^6 / cpu_khz)
-+ *
-+ *	Then we use scaling math (suggested by george at mvista.com) to get:
-+ *		ns = cycles * (10^6 * SC / cpu_khz) / SC
-+ *		ns = cycles * cyc2ns_scale / SC
-+ *
-+ *	And since SC is a constant power of two, we can convert the div
-+ *  into a shift.
-+ *
-+ *  We can use khz divisor instead of mhz to keep a better precision, since
-+ *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
-+ *  (mathieu.desnoyers at polymtl.ca)
-+ *
-+ *			-johnstul at us.ibm.com "math is hard, lets go shopping!"
-+ */
-+DEFINE_PER_CPU(unsigned long, cyc2ns);
- 
--static inline void set_cyc2ns_scale(unsigned long khz)
-+static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
- {
--	cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
--}
-+	unsigned long flags, prev_scale, *scale;
-+	unsigned long long tsc_now, ns_now;
++	significand(st1_ptr) = XSIG_LL(accumulator);
++	setexponent16(st1_ptr, expon_expon + exponent16(st1_ptr) + 1);
  
--static unsigned long long cycles_2_ns(unsigned long long cyc)
--{
--	return (cyc * cyc2ns_scale) >> NS_SCALE;
-+	local_irq_save(flags);
-+	sched_clock_idle_sleep_event();
-+
-+	scale = &per_cpu(cyc2ns, cpu);
++	tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign ^ st1_sign);
++	FPU_settagi(1, tag);
 +
-+	rdtscll(tsc_now);
-+	ns_now = __cycles_2_ns(tsc_now);
++	set_precision_flag_up();	/* 80486 appears to always do this */
 +
-+	prev_scale = *scale;
-+	if (cpu_khz)
-+		*scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
++	return;
 +
-+	sched_clock_idle_wakeup_event(0);
-+	local_irq_restore(flags);
- }
- 
--unsigned long long sched_clock(void)
-+unsigned long long native_sched_clock(void)
- {
- 	unsigned long a = 0;
- 
-@@ -44,12 +77,27 @@ unsigned long long sched_clock(void)
- 	return cycles_2_ns(a);
- }
- 
-+/* We need to define a real function for sched_clock, to override the
-+   weak default version */
-+#ifdef CONFIG_PARAVIRT
-+unsigned long long sched_clock(void)
-+{
-+	return paravirt_sched_clock();
 +}
-+#else
-+unsigned long long
-+sched_clock(void) __attribute__((alias("native_sched_clock")));
-+#endif
-+
-+
- static int tsc_unstable;
  
--inline int check_tsc_unstable(void)
-+int check_tsc_unstable(void)
+ /*--- poly_l2p1() -----------------------------------------------------------+
+  |   Base 2 logarithm by a polynomial approximation.                         |
+  |   log2(x+1)                                                               |
+  +---------------------------------------------------------------------------*/
+-int	poly_l2p1(u_char sign0, u_char sign1,
+-		  FPU_REG *st0_ptr, FPU_REG *st1_ptr, FPU_REG *dest)
++int poly_l2p1(u_char sign0, u_char sign1,
++	      FPU_REG * st0_ptr, FPU_REG * st1_ptr, FPU_REG * dest)
  {
- 	return tsc_unstable;
- }
-+EXPORT_SYMBOL_GPL(check_tsc_unstable);
-+
- #ifdef CONFIG_CPU_FREQ
+-  u_char             	tag;
+-  long int        	exponent;
+-  Xsig              	accumulator, yaccum;
++	u_char tag;
++	long int exponent;
++	Xsig accumulator, yaccum;
  
- /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
-@@ -100,7 +148,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- 			mark_tsc_unstable("cpufreq changes");
- 	}
+-  if ( exponent16(st0_ptr) < 0 )
+-    {
+-      log2_kernel(st0_ptr, sign0, &accumulator, &exponent);
++	if (exponent16(st0_ptr) < 0) {
++		log2_kernel(st0_ptr, sign0, &accumulator, &exponent);
  
--	set_cyc2ns_scale(tsc_khz_ref);
-+	preempt_disable();
-+	set_cyc2ns_scale(tsc_khz_ref, smp_processor_id());
-+	preempt_enable();
+-      yaccum.lsw = 0;
+-      XSIG_LL(yaccum) = significand(st1_ptr);
+-      mul_Xsig_Xsig(&accumulator, &yaccum);
++		yaccum.lsw = 0;
++		XSIG_LL(yaccum) = significand(st1_ptr);
++		mul_Xsig_Xsig(&accumulator, &yaccum);
  
- 	return 0;
- }
-@@ -133,12 +183,12 @@ static unsigned long __init tsc_read_refs(unsigned long *pm,
- 	int i;
+-      exponent += round_Xsig(&accumulator);
++		exponent += round_Xsig(&accumulator);
  
- 	for (i = 0; i < MAX_RETRIES; i++) {
--		t1 = get_cycles_sync();
-+		t1 = get_cycles();
- 		if (hpet)
- 			*hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
- 		else
- 			*pm = acpi_pm_read_early();
--		t2 = get_cycles_sync();
-+		t2 = get_cycles();
- 		if ((t2 - t1) < SMI_TRESHOLD)
- 			return t2;
- 	}
-@@ -151,7 +201,7 @@ static unsigned long __init tsc_read_refs(unsigned long *pm,
- void __init tsc_calibrate(void)
- {
- 	unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2;
--	int hpet = is_hpet_enabled();
-+	int hpet = is_hpet_enabled(), cpu;
+-      exponent += exponent16(st1_ptr) + 1;
+-      if ( exponent < EXP_WAY_UNDER ) exponent = EXP_WAY_UNDER;
++		exponent += exponent16(st1_ptr) + 1;
++		if (exponent < EXP_WAY_UNDER)
++			exponent = EXP_WAY_UNDER;
  
- 	local_irq_save(flags);
+-      significand(dest) = XSIG_LL(accumulator);
+-      setexponent16(dest, exponent);
++		significand(dest) = XSIG_LL(accumulator);
++		setexponent16(dest, exponent);
  
-@@ -162,9 +212,9 @@ void __init tsc_calibrate(void)
- 	outb(0xb0, 0x43);
- 	outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
- 	outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
--	tr1 = get_cycles_sync();
-+	tr1 = get_cycles();
- 	while ((inb(0x61) & 0x20) == 0);
--	tr2 = get_cycles_sync();
-+	tr2 = get_cycles();
+-      tag = FPU_round(dest, 1, 0, FULL_PRECISION, sign0 ^ sign1);
+-      FPU_settagi(1, tag);
++		tag = FPU_round(dest, 1, 0, FULL_PRECISION, sign0 ^ sign1);
++		FPU_settagi(1, tag);
  
- 	tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
+-      if ( tag == TAG_Valid )
+-	set_precision_flag_up();   /* 80486 appears to always do this */
+-    }
+-  else
+-    {
+-      /* The magnitude of st0_ptr is far too large. */
++		if (tag == TAG_Valid)
++			set_precision_flag_up();	/* 80486 appears to always do this */
++	} else {
++		/* The magnitude of st0_ptr is far too large. */
  
-@@ -206,7 +256,9 @@ void __init tsc_calibrate(void)
- 	}
+-      if ( sign0 != SIGN_POS )
+-	{
+-	  /* Trying to get the log of a negative number. */
+-#ifdef PECULIAR_486   /* Stupid 80486 doesn't worry about log(negative). */
+-	  changesign(st1_ptr);
++		if (sign0 != SIGN_POS) {
++			/* Trying to get the log of a negative number. */
++#ifdef PECULIAR_486		/* Stupid 80486 doesn't worry about log(negative). */
++			changesign(st1_ptr);
+ #else
+-	  if ( arith_invalid(1) < 0 )
+-	    return 1;
++			if (arith_invalid(1) < 0)
++				return 1;
+ #endif /* PECULIAR_486 */
+-	}
++		}
  
- 	tsc_khz = tsc2 / tsc1;
--	set_cyc2ns_scale(tsc_khz);
-+
-+	for_each_possible_cpu(cpu)
-+		set_cyc2ns_scale(tsc_khz, cpu);
- }
+-      /* 80486 appears to do this */
+-      if ( sign0 == SIGN_NEG )
+-	set_precision_flag_down();
+-      else
+-	set_precision_flag_up();
+-    }
++		/* 80486 appears to do this */
++		if (sign0 == SIGN_NEG)
++			set_precision_flag_down();
++		else
++			set_precision_flag_up();
++	}
  
- /*
-@@ -222,17 +274,9 @@ __cpuinit int unsynchronized_tsc(void)
- 	if (apic_is_clustered_box())
- 		return 1;
- #endif
--	/* Most intel systems have synchronized TSCs except for
--	   multi node systems */
--	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
--#ifdef CONFIG_ACPI
--		/* But TSC doesn't tick in C3 so don't use it there */
--		if (acpi_gbl_FADT.header.length > 0 &&
--		    acpi_gbl_FADT.C3latency < 1000)
--			return 1;
--#endif
-+
-+	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
- 		return 0;
--	}
+-  if ( exponent(dest) <= EXP_UNDER )
+-    EXCEPTION(EX_Underflow);
++	if (exponent(dest) <= EXP_UNDER)
++		EXCEPTION(EX_Underflow);
  
- 	/* Assume multi socket systems are not synchronized */
- 	return num_present_cpus() > 1;
-@@ -250,13 +294,13 @@ __setup("notsc", notsc_setup);
- /* clock source code: */
- static cycle_t read_tsc(void)
- {
--	cycle_t ret = (cycle_t)get_cycles_sync();
-+	cycle_t ret = (cycle_t)get_cycles();
- 	return ret;
- }
+-  return 0;
++	return 0;
  
- static cycle_t __vsyscall_fn vread_tsc(void)
- {
--	cycle_t ret = (cycle_t)get_cycles_sync();
-+	cycle_t ret = (cycle_t)vget_cycles();
- 	return ret;
  }
  
-diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
-index 9125efe..0577825 100644
---- a/arch/x86/kernel/tsc_sync.c
-+++ b/arch/x86/kernel/tsc_sync.c
-@@ -46,7 +46,7 @@ static __cpuinit void check_tsc_warp(void)
- 	cycles_t start, now, prev, end;
- 	int i;
+-
+-
+-
+ #undef HIPOWER
+ #define	HIPOWER	10
+-static const unsigned long long logterms[HIPOWER] =
+-{
+-  0x2a8eca5705fc2ef0LL,
+-  0xf6384ee1d01febceLL,
+-  0x093bb62877cdf642LL,
+-  0x006985d8a9ec439bLL,
+-  0x0005212c4f55a9c8LL,
+-  0x00004326a16927f0LL,
+-  0x0000038d1d80a0e7LL,
+-  0x0000003141cc80c6LL,
+-  0x00000002b1668c9fLL,
+-  0x000000002c7a46aaLL
++static const unsigned long long logterms[HIPOWER] = {
++	0x2a8eca5705fc2ef0LL,
++	0xf6384ee1d01febceLL,
++	0x093bb62877cdf642LL,
++	0x006985d8a9ec439bLL,
++	0x0005212c4f55a9c8LL,
++	0x00004326a16927f0LL,
++	0x0000038d1d80a0e7LL,
++	0x0000003141cc80c6LL,
++	0x00000002b1668c9fLL,
++	0x000000002c7a46aaLL
+ };
  
--	start = get_cycles_sync();
-+	start = get_cycles();
- 	/*
- 	 * The measurement runs for 20 msecs:
- 	 */
-@@ -61,18 +61,18 @@ static __cpuinit void check_tsc_warp(void)
- 		 */
- 		__raw_spin_lock(&sync_lock);
- 		prev = last_tsc;
--		now = get_cycles_sync();
-+		now = get_cycles();
- 		last_tsc = now;
- 		__raw_spin_unlock(&sync_lock);
+ static const unsigned long leadterm = 0xb8000000;
  
- 		/*
- 		 * Be nice every now and then (and also check whether
--		 * measurement is done [we also insert a 100 million
-+		 * measurement is done [we also insert a 10 million
- 		 * loops safety exit, so we dont lock up in case the
- 		 * TSC readout is totally broken]):
- 		 */
- 		if (unlikely(!(i & 7))) {
--			if (now > end || i > 100000000)
-+			if (now > end || i > 10000000)
- 				break;
- 			cpu_relax();
- 			touch_nmi_watchdog();
-@@ -87,7 +87,11 @@ static __cpuinit void check_tsc_warp(void)
- 			nr_warps++;
- 			__raw_spin_unlock(&sync_lock);
- 		}
 -
-+	}
-+	if (!(now-start)) {
-+		printk("Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
-+			now-start, end-start);
-+		WARN_ON(1);
+ /*--- log2_kernel() ---------------------------------------------------------+
+  |   Base 2 logarithm by a polynomial approximation.                         |
+  |   log2(x+1)                                                               |
+@@ -203,70 +181,64 @@ static const unsigned long leadterm = 0xb8000000;
+ static void log2_kernel(FPU_REG const *arg, u_char argsign, Xsig *accum_result,
+ 			long int *expon)
+ {
+-  long int             exponent, adj;
+-  unsigned long long   Xsq;
+-  Xsig                 accumulator, Numer, Denom, argSignif, arg_signif;
+-
+-  exponent = exponent16(arg);
+-  Numer.lsw = Denom.lsw = 0;
+-  XSIG_LL(Numer) = XSIG_LL(Denom) = significand(arg);
+-  if ( argsign == SIGN_POS )
+-    {
+-      shr_Xsig(&Denom, 2 - (1 + exponent));
+-      Denom.msw |= 0x80000000;
+-      div_Xsig(&Numer, &Denom, &argSignif);
+-    }
+-  else
+-    {
+-      shr_Xsig(&Denom, 1 - (1 + exponent));
+-      negate_Xsig(&Denom);
+-      if ( Denom.msw & 0x80000000 )
+-	{
+-	  div_Xsig(&Numer, &Denom, &argSignif);
+-	  exponent ++;
+-	}
+-      else
+-	{
+-	  /* Denom must be 1.0 */
+-	  argSignif.lsw = Numer.lsw; argSignif.midw = Numer.midw;
+-	  argSignif.msw = Numer.msw;
++	long int exponent, adj;
++	unsigned long long Xsq;
++	Xsig accumulator, Numer, Denom, argSignif, arg_signif;
++
++	exponent = exponent16(arg);
++	Numer.lsw = Denom.lsw = 0;
++	XSIG_LL(Numer) = XSIG_LL(Denom) = significand(arg);
++	if (argsign == SIGN_POS) {
++		shr_Xsig(&Denom, 2 - (1 + exponent));
++		Denom.msw |= 0x80000000;
++		div_Xsig(&Numer, &Denom, &argSignif);
++	} else {
++		shr_Xsig(&Denom, 1 - (1 + exponent));
++		negate_Xsig(&Denom);
++		if (Denom.msw & 0x80000000) {
++			div_Xsig(&Numer, &Denom, &argSignif);
++			exponent++;
++		} else {
++			/* Denom must be 1.0 */
++			argSignif.lsw = Numer.lsw;
++			argSignif.midw = Numer.midw;
++			argSignif.msw = Numer.msw;
++		}
  	}
- }
- 
-@@ -129,24 +133,24 @@ void __cpuinit check_tsc_sync_source(int cpu)
- 	while (atomic_read(&stop_count) != cpus-1)
- 		cpu_relax();
+-    }
  
--	/*
--	 * Reset it - just in case we boot another CPU later:
--	 */
--	atomic_set(&start_count, 0);
--
- 	if (nr_warps) {
- 		printk("\n");
- 		printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs,"
- 				    " turning off TSC clock.\n", max_warp);
- 		mark_tsc_unstable("check_tsc_sync_source failed");
--		nr_warps = 0;
--		max_warp = 0;
--		last_tsc = 0;
- 	} else {
- 		printk(" passed.\n");
+ #ifndef PECULIAR_486
+-  /* Should check here that  |local_arg|  is within the valid range */
+-  if ( exponent >= -2 )
+-    {
+-      if ( (exponent > -2) ||
+-	  (argSignif.msw > (unsigned)0xafb0ccc0) )
+-	{
+-	  /* The argument is too large */
++	/* Should check here that  |local_arg|  is within the valid range */
++	if (exponent >= -2) {
++		if ((exponent > -2) || (argSignif.msw > (unsigned)0xafb0ccc0)) {
++			/* The argument is too large */
++		}
  	}
+-    }
+ #endif /* PECULIAR_486 */
  
- 	/*
-+	 * Reset it - just in case we boot another CPU later:
-+	 */
-+	atomic_set(&start_count, 0);
-+	nr_warps = 0;
-+	max_warp = 0;
-+	last_tsc = 0;
+-  arg_signif.lsw = argSignif.lsw; XSIG_LL(arg_signif) = XSIG_LL(argSignif);
+-  adj = norm_Xsig(&argSignif);
+-  accumulator.lsw = argSignif.lsw; XSIG_LL(accumulator) = XSIG_LL(argSignif);
+-  mul_Xsig_Xsig(&accumulator, &accumulator);
+-  shr_Xsig(&accumulator, 2*(-1 - (1 + exponent + adj)));
+-  Xsq = XSIG_LL(accumulator);
+-  if ( accumulator.lsw & 0x80000000 )
+-    Xsq++;
+-
+-  accumulator.msw = accumulator.midw = accumulator.lsw = 0;
+-  /* Do the basic fixed point polynomial evaluation */
+-  polynomial_Xsig(&accumulator, &Xsq, logterms, HIPOWER-1);
+-
+-  mul_Xsig_Xsig(&accumulator, &argSignif);
+-  shr_Xsig(&accumulator, 6 - adj);
+-
+-  mul32_Xsig(&arg_signif, leadterm);
+-  add_two_Xsig(&accumulator, &arg_signif, &exponent);
+-
+-  *expon = exponent + 1;
+-  accum_result->lsw = accumulator.lsw;
+-  accum_result->midw = accumulator.midw;
+-  accum_result->msw = accumulator.msw;
++	arg_signif.lsw = argSignif.lsw;
++	XSIG_LL(arg_signif) = XSIG_LL(argSignif);
++	adj = norm_Xsig(&argSignif);
++	accumulator.lsw = argSignif.lsw;
++	XSIG_LL(accumulator) = XSIG_LL(argSignif);
++	mul_Xsig_Xsig(&accumulator, &accumulator);
++	shr_Xsig(&accumulator, 2 * (-1 - (1 + exponent + adj)));
++	Xsq = XSIG_LL(accumulator);
++	if (accumulator.lsw & 0x80000000)
++		Xsq++;
 +
-+	/*
- 	 * Let the target continue with the bootup:
- 	 */
- 	atomic_inc(&stop_count);
-diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
-index 157e4be..738c210 100644
---- a/arch/x86/kernel/vm86_32.c
-+++ b/arch/x86/kernel/vm86_32.c
-@@ -70,10 +70,10 @@
- /*
-  * 8- and 16-bit register defines..
-  */
--#define AL(regs)	(((unsigned char *)&((regs)->pt.eax))[0])
--#define AH(regs)	(((unsigned char *)&((regs)->pt.eax))[1])
--#define IP(regs)	(*(unsigned short *)&((regs)->pt.eip))
--#define SP(regs)	(*(unsigned short *)&((regs)->pt.esp))
-+#define AL(regs)	(((unsigned char *)&((regs)->pt.ax))[0])
-+#define AH(regs)	(((unsigned char *)&((regs)->pt.ax))[1])
-+#define IP(regs)	(*(unsigned short *)&((regs)->pt.ip))
-+#define SP(regs)	(*(unsigned short *)&((regs)->pt.sp))
- 
- /*
-  * virtual flags (16 and 32-bit versions)
-@@ -93,12 +93,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
- {
- 	int ret = 0;
- 
--	/* kernel_vm86_regs is missing xgs, so copy everything up to
-+	/* kernel_vm86_regs is missing gs, so copy everything up to
- 	   (but not including) orig_eax, and then rest including orig_eax. */
--	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax));
--	ret += copy_to_user(&user->orig_eax, &regs->pt.orig_eax,
-+	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
-+	ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
- 			    sizeof(struct kernel_vm86_regs) -
--			    offsetof(struct kernel_vm86_regs, pt.orig_eax));
-+			    offsetof(struct kernel_vm86_regs, pt.orig_ax));
++	accumulator.msw = accumulator.midw = accumulator.lsw = 0;
++	/* Do the basic fixed point polynomial evaluation */
++	polynomial_Xsig(&accumulator, &Xsq, logterms, HIPOWER - 1);
++
++	mul_Xsig_Xsig(&accumulator, &argSignif);
++	shr_Xsig(&accumulator, 6 - adj);
++
++	mul32_Xsig(&arg_signif, leadterm);
++	add_two_Xsig(&accumulator, &arg_signif, &exponent);
++
++	*expon = exponent + 1;
++	accum_result->lsw = accumulator.lsw;
++	accum_result->midw = accumulator.midw;
++	accum_result->msw = accumulator.msw;
  
- 	return ret;
  }
-@@ -110,18 +110,17 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
- {
- 	int ret = 0;
+diff --git a/arch/x86/math-emu/poly_sin.c b/arch/x86/math-emu/poly_sin.c
+index a36313f..b862039 100644
+--- a/arch/x86/math-emu/poly_sin.c
++++ b/arch/x86/math-emu/poly_sin.c
+@@ -11,7 +11,6 @@
+  |                                                                           |
+  +---------------------------------------------------------------------------*/
  
--	/* copy eax-xfs inclusive */
--	ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax));
--	/* copy orig_eax-__gsh+extra */
--	ret += copy_from_user(&regs->pt.orig_eax, &user->orig_eax,
-+	/* copy ax-fs inclusive */
-+	ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
-+	/* copy orig_ax-__gsh+extra */
-+	ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
- 			      sizeof(struct kernel_vm86_regs) -
--			      offsetof(struct kernel_vm86_regs, pt.orig_eax) +
-+			      offsetof(struct kernel_vm86_regs, pt.orig_ax) +
- 			      extra);
- 	return ret;
- }
+-
+ #include "exception.h"
+ #include "reg_constant.h"
+ #include "fpu_emu.h"
+@@ -19,379 +18,361 @@
+ #include "control_w.h"
+ #include "poly.h"
  
--struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
--struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
-+struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
- {
- 	struct tss_struct *tss;
- 	struct pt_regs *ret;
-@@ -138,7 +137,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
- 		printk("no vm86_info: BAD\n");
- 		do_exit(SIGSEGV);
- 	}
--	set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
-+	set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
- 	tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
- 	tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
- 	if (tmp) {
-@@ -147,15 +146,15 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
- 	}
+-
+ #define	N_COEFF_P	4
+ #define	N_COEFF_N	4
  
- 	tss = &per_cpu(init_tss, get_cpu());
--	current->thread.esp0 = current->thread.saved_esp0;
-+	current->thread.sp0 = current->thread.saved_sp0;
- 	current->thread.sysenter_cs = __KERNEL_CS;
--	load_esp0(tss, &current->thread);
--	current->thread.saved_esp0 = 0;
-+	load_sp0(tss, &current->thread);
-+	current->thread.saved_sp0 = 0;
- 	put_cpu();
+-static const unsigned long long pos_terms_l[N_COEFF_P] =
+-{
+-  0xaaaaaaaaaaaaaaabLL,
+-  0x00d00d00d00cf906LL,
+-  0x000006b99159a8bbLL,
+-  0x000000000d7392e6LL
++static const unsigned long long pos_terms_l[N_COEFF_P] = {
++	0xaaaaaaaaaaaaaaabLL,
++	0x00d00d00d00cf906LL,
++	0x000006b99159a8bbLL,
++	0x000000000d7392e6LL
+ };
  
- 	ret = KVM86->regs32;
+-static const unsigned long long neg_terms_l[N_COEFF_N] =
+-{
+-  0x2222222222222167LL,
+-  0x0002e3bc74aab624LL,
+-  0x0000000b09229062LL,
+-  0x00000000000c7973LL
++static const unsigned long long neg_terms_l[N_COEFF_N] = {
++	0x2222222222222167LL,
++	0x0002e3bc74aab624LL,
++	0x0000000b09229062LL,
++	0x00000000000c7973LL
+ };
  
--	ret->xfs = current->thread.saved_fs;
-+	ret->fs = current->thread.saved_fs;
- 	loadsegment(gs, current->thread.saved_gs);
+-
+-
+ #define	N_COEFF_PH	4
+ #define	N_COEFF_NH	4
+-static const unsigned long long pos_terms_h[N_COEFF_PH] =
+-{
+-  0x0000000000000000LL,
+-  0x05b05b05b05b0406LL,
+-  0x000049f93edd91a9LL,
+-  0x00000000c9c9ed62LL
++static const unsigned long long pos_terms_h[N_COEFF_PH] = {
++	0x0000000000000000LL,
++	0x05b05b05b05b0406LL,
++	0x000049f93edd91a9LL,
++	0x00000000c9c9ed62LL
+ };
  
- 	return ret;
-@@ -197,7 +196,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
+-static const unsigned long long neg_terms_h[N_COEFF_NH] =
+-{
+-  0xaaaaaaaaaaaaaa98LL,
+-  0x001a01a01a019064LL,
+-  0x0000008f76c68a77LL,
+-  0x0000000000d58f5eLL
++static const unsigned long long neg_terms_h[N_COEFF_NH] = {
++	0xaaaaaaaaaaaaaa98LL,
++	0x001a01a01a019064LL,
++	0x0000008f76c68a77LL,
++	0x0000000000d58f5eLL
+ };
  
- asmlinkage int sys_vm86old(struct pt_regs regs)
+-
+ /*--- poly_sine() -----------------------------------------------------------+
+  |                                                                           |
+  +---------------------------------------------------------------------------*/
+-void	poly_sine(FPU_REG *st0_ptr)
++void poly_sine(FPU_REG *st0_ptr)
  {
--	struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx;
-+	struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
- 	struct kernel_vm86_struct info; /* declare this _on top_,
- 					 * this avoids wasting of stack space.
- 					 * This remains on the stack until we
-@@ -207,7 +206,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
- 	int tmp, ret = -EPERM;
- 
- 	tsk = current;
--	if (tsk->thread.saved_esp0)
-+	if (tsk->thread.saved_sp0)
- 		goto out;
- 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
- 				       offsetof(struct kernel_vm86_struct, vm86plus) -
-@@ -237,12 +236,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
- 	struct vm86plus_struct __user *v86;
- 
- 	tsk = current;
--	switch (regs.ebx) {
-+	switch (regs.bx) {
- 		case VM86_REQUEST_IRQ:
- 		case VM86_FREE_IRQ:
- 		case VM86_GET_IRQ_BITS:
- 		case VM86_GET_AND_RESET_IRQ:
--			ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx);
-+			ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
- 			goto out;
- 		case VM86_PLUS_INSTALL_CHECK:
- 			/* NOTE: on old vm86 stuff this will return the error
-@@ -256,9 +255,9 @@ asmlinkage int sys_vm86(struct pt_regs regs)
+-  int                 exponent, echange;
+-  Xsig                accumulator, argSqrd, argTo4;
+-  unsigned long       fix_up, adj;
+-  unsigned long long  fixed_arg;
+-  FPU_REG	      result;
++	int exponent, echange;
++	Xsig accumulator, argSqrd, argTo4;
++	unsigned long fix_up, adj;
++	unsigned long long fixed_arg;
++	FPU_REG result;
  
- 	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
- 	ret = -EPERM;
--	if (tsk->thread.saved_esp0)
-+	if (tsk->thread.saved_sp0)
- 		goto out;
--	v86 = (struct vm86plus_struct __user *)regs.ecx;
-+	v86 = (struct vm86plus_struct __user *)regs.cx;
- 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
- 				       offsetof(struct kernel_vm86_struct, regs32) -
- 				       sizeof(info.regs));
-@@ -281,23 +280,23 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
- /*
-  * make sure the vm86() system call doesn't try to do anything silly
-  */
--	info->regs.pt.xds = 0;
--	info->regs.pt.xes = 0;
--	info->regs.pt.xfs = 0;
-+	info->regs.pt.ds = 0;
-+	info->regs.pt.es = 0;
-+	info->regs.pt.fs = 0;
+-  exponent = exponent(st0_ptr);
++	exponent = exponent(st0_ptr);
  
- /* we are clearing gs later just before "jmp resume_userspace",
-  * because it is not saved/restored.
-  */
+-  accumulator.lsw = accumulator.midw = accumulator.msw = 0;
++	accumulator.lsw = accumulator.midw = accumulator.msw = 0;
  
- /*
-- * The eflags register is also special: we cannot trust that the user
-+ * The flags register is also special: we cannot trust that the user
-  * has set it up safely, so this makes sure interrupt etc flags are
-  * inherited from protected mode.
-  */
-- 	VEFLAGS = info->regs.pt.eflags;
--	info->regs.pt.eflags &= SAFE_MASK;
--	info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK;
--	info->regs.pt.eflags |= VM_MASK;
-+	VEFLAGS = info->regs.pt.flags;
-+	info->regs.pt.flags &= SAFE_MASK;
-+	info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
-+	info->regs.pt.flags |= VM_MASK;
+-  /* Split into two ranges, for arguments below and above 1.0 */
+-  /* The boundary between upper and lower is approx 0.88309101259 */
+-  if ( (exponent < -1) || ((exponent == -1) && (st0_ptr->sigh <= 0xe21240aa)) )
+-    {
+-      /* The argument is <= 0.88309101259 */
++	/* Split into two ranges, for arguments below and above 1.0 */
++	/* The boundary between upper and lower is approx 0.88309101259 */
++	if ((exponent < -1)
++	    || ((exponent == -1) && (st0_ptr->sigh <= 0xe21240aa))) {
++		/* The argument is <= 0.88309101259 */
  
- 	switch (info->cpu_type) {
- 		case CPU_286:
-@@ -315,18 +314,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
- 	}
+-      argSqrd.msw = st0_ptr->sigh; argSqrd.midw = st0_ptr->sigl; argSqrd.lsw = 0;
+-      mul64_Xsig(&argSqrd, &significand(st0_ptr));
+-      shr_Xsig(&argSqrd, 2*(-1-exponent));
+-      argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw;
+-      argTo4.lsw = argSqrd.lsw;
+-      mul_Xsig_Xsig(&argTo4, &argTo4);
++		argSqrd.msw = st0_ptr->sigh;
++		argSqrd.midw = st0_ptr->sigl;
++		argSqrd.lsw = 0;
++		mul64_Xsig(&argSqrd, &significand(st0_ptr));
++		shr_Xsig(&argSqrd, 2 * (-1 - exponent));
++		argTo4.msw = argSqrd.msw;
++		argTo4.midw = argSqrd.midw;
++		argTo4.lsw = argSqrd.lsw;
++		mul_Xsig_Xsig(&argTo4, &argTo4);
  
- /*
-- * Save old state, set default return value (%eax) to 0
-+ * Save old state, set default return value (%ax) to 0
-  */
--	info->regs32->eax = 0;
--	tsk->thread.saved_esp0 = tsk->thread.esp0;
--	tsk->thread.saved_fs = info->regs32->xfs;
-+	info->regs32->ax = 0;
-+	tsk->thread.saved_sp0 = tsk->thread.sp0;
-+	tsk->thread.saved_fs = info->regs32->fs;
- 	savesegment(gs, tsk->thread.saved_gs);
+-      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
+-		      N_COEFF_N-1);
+-      mul_Xsig_Xsig(&accumulator, &argSqrd);
+-      negate_Xsig(&accumulator);
++		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
++				N_COEFF_N - 1);
++		mul_Xsig_Xsig(&accumulator, &argSqrd);
++		negate_Xsig(&accumulator);
  
- 	tss = &per_cpu(init_tss, get_cpu());
--	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
-+	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
- 	if (cpu_has_sep)
- 		tsk->thread.sysenter_cs = 0;
--	load_esp0(tss, &tsk->thread);
-+	load_sp0(tss, &tsk->thread);
- 	put_cpu();
+-      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
+-		      N_COEFF_P-1);
++		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
++				N_COEFF_P - 1);
  
- 	tsk->thread.screen_bitmap = info->screen_bitmap;
-@@ -352,7 +351,7 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
- 	struct pt_regs * regs32;
+-      shr_Xsig(&accumulator, 2);    /* Divide by four */
+-      accumulator.msw |= 0x80000000;  /* Add 1.0 */
++		shr_Xsig(&accumulator, 2);	/* Divide by four */
++		accumulator.msw |= 0x80000000;	/* Add 1.0 */
  
- 	regs32 = save_v86_state(regs16);
--	regs32->eax = retval;
-+	regs32->ax = retval;
- 	__asm__ __volatile__("movl %0,%%esp\n\t"
- 		"movl %1,%%ebp\n\t"
- 		"jmp resume_userspace"
-@@ -373,30 +372,30 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
+-      mul64_Xsig(&accumulator, &significand(st0_ptr));
+-      mul64_Xsig(&accumulator, &significand(st0_ptr));
+-      mul64_Xsig(&accumulator, &significand(st0_ptr));
++		mul64_Xsig(&accumulator, &significand(st0_ptr));
++		mul64_Xsig(&accumulator, &significand(st0_ptr));
++		mul64_Xsig(&accumulator, &significand(st0_ptr));
  
- static inline void clear_TF(struct kernel_vm86_regs * regs)
- {
--	regs->pt.eflags &= ~TF_MASK;
-+	regs->pt.flags &= ~TF_MASK;
- }
+-      /* Divide by four, FPU_REG compatible, etc */
+-      exponent = 3*exponent;
++		/* Divide by four, FPU_REG compatible, etc */
++		exponent = 3 * exponent;
  
- static inline void clear_AC(struct kernel_vm86_regs * regs)
- {
--	regs->pt.eflags &= ~AC_MASK;
-+	regs->pt.flags &= ~AC_MASK;
- }
+-      /* The minimum exponent difference is 3 */
+-      shr_Xsig(&accumulator, exponent(st0_ptr) - exponent);
++		/* The minimum exponent difference is 3 */
++		shr_Xsig(&accumulator, exponent(st0_ptr) - exponent);
  
- /* It is correct to call set_IF(regs) from the set_vflags_*
-  * functions. However someone forgot to call clear_IF(regs)
-  * in the opposite case.
-  * After the command sequence CLI PUSHF STI POPF you should
-- * end up with interrups disabled, but you ended up with
-+ * end up with interrupts disabled, but you ended up with
-  * interrupts enabled.
-  *  ( I was testing my own changes, but the only bug I
-  *    could find was in a function I had not changed. )
-  * [KD]
-  */
+-      negate_Xsig(&accumulator);
+-      XSIG_LL(accumulator) += significand(st0_ptr);
++		negate_Xsig(&accumulator);
++		XSIG_LL(accumulator) += significand(st0_ptr);
  
--static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
-+static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs)
- {
--	set_flags(VEFLAGS, eflags, current->thread.v86mask);
--	set_flags(regs->pt.eflags, eflags, SAFE_MASK);
--	if (eflags & IF_MASK)
-+	set_flags(VEFLAGS, flags, current->thread.v86mask);
-+	set_flags(regs->pt.flags, flags, SAFE_MASK);
-+	if (flags & IF_MASK)
- 		set_IF(regs);
- 	else
- 		clear_IF(regs);
-@@ -405,7 +404,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
- static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
- {
- 	set_flags(VFLAGS, flags, current->thread.v86mask);
--	set_flags(regs->pt.eflags, flags, SAFE_MASK);
-+	set_flags(regs->pt.flags, flags, SAFE_MASK);
- 	if (flags & IF_MASK)
- 		set_IF(regs);
- 	else
-@@ -414,7 +413,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
+-      echange = round_Xsig(&accumulator);
++		echange = round_Xsig(&accumulator);
  
- static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
- {
--	unsigned long flags = regs->pt.eflags & RETURN_MASK;
-+	unsigned long flags = regs->pt.flags & RETURN_MASK;
+-      setexponentpos(&result, exponent(st0_ptr) + echange);
+-    }
+-  else
+-    {
+-      /* The argument is > 0.88309101259 */
+-      /* We use sin(st(0)) = cos(pi/2-st(0)) */
++		setexponentpos(&result, exponent(st0_ptr) + echange);
++	} else {
++		/* The argument is > 0.88309101259 */
++		/* We use sin(st(0)) = cos(pi/2-st(0)) */
  
- 	if (VEFLAGS & VIF_MASK)
- 		flags |= IF_MASK;
-@@ -518,7 +517,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
- 	unsigned long __user *intr_ptr;
- 	unsigned long segoffs;
+-      fixed_arg = significand(st0_ptr);
++		fixed_arg = significand(st0_ptr);
  
--	if (regs->pt.xcs == BIOSSEG)
-+	if (regs->pt.cs == BIOSSEG)
- 		goto cannot_handle;
- 	if (is_revectored(i, &KVM86->int_revectored))
- 		goto cannot_handle;
-@@ -530,9 +529,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
- 	if ((segoffs >> 16) == BIOSSEG)
- 		goto cannot_handle;
- 	pushw(ssp, sp, get_vflags(regs), cannot_handle);
--	pushw(ssp, sp, regs->pt.xcs, cannot_handle);
-+	pushw(ssp, sp, regs->pt.cs, cannot_handle);
- 	pushw(ssp, sp, IP(regs), cannot_handle);
--	regs->pt.xcs = segoffs >> 16;
-+	regs->pt.cs = segoffs >> 16;
- 	SP(regs) -= 6;
- 	IP(regs) = segoffs & 0xffff;
- 	clear_TF(regs);
-@@ -549,7 +548,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
- 	if (VMPI.is_vm86pus) {
- 		if ( (trapno==3) || (trapno==1) )
- 			return_to_32bit(regs, VM86_TRAP + (trapno << 8));
--		do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs));
-+		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
- 		return 0;
- 	}
- 	if (trapno !=1)
-@@ -585,10 +584,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
- 		handle_vm86_trap(regs, 0, 1); \
- 	return; } while (0)
+-      if ( exponent == 0 )
+-	{
+-	  /* The argument is >= 1.0 */
++		if (exponent == 0) {
++			/* The argument is >= 1.0 */
  
--	orig_flags = *(unsigned short *)&regs->pt.eflags;
-+	orig_flags = *(unsigned short *)&regs->pt.flags;
+-	  /* Put the binary point at the left. */
+-	  fixed_arg <<= 1;
+-	}
+-      /* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
+-      fixed_arg = 0x921fb54442d18469LL - fixed_arg;
+-      /* There is a special case which arises due to rounding, to fix here. */
+-      if ( fixed_arg == 0xffffffffffffffffLL )
+-	fixed_arg = 0;
++			/* Put the binary point at the left. */
++			fixed_arg <<= 1;
++		}
++		/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
++		fixed_arg = 0x921fb54442d18469LL - fixed_arg;
++		/* There is a special case which arises due to rounding, to fix here. */
++		if (fixed_arg == 0xffffffffffffffffLL)
++			fixed_arg = 0;
  
--	csp = (unsigned char __user *) (regs->pt.xcs << 4);
--	ssp = (unsigned char __user *) (regs->pt.xss << 4);
-+	csp = (unsigned char __user *) (regs->pt.cs << 4);
-+	ssp = (unsigned char __user *) (regs->pt.ss << 4);
- 	sp = SP(regs);
- 	ip = IP(regs);
+-      XSIG_LL(argSqrd) = fixed_arg; argSqrd.lsw = 0;
+-      mul64_Xsig(&argSqrd, &fixed_arg);
++		XSIG_LL(argSqrd) = fixed_arg;
++		argSqrd.lsw = 0;
++		mul64_Xsig(&argSqrd, &fixed_arg);
  
-@@ -675,7 +674,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
- 			SP(regs) += 6;
- 		}
- 		IP(regs) = newip;
--		regs->pt.xcs = newcs;
-+		regs->pt.cs = newcs;
- 		CHECK_IF_IN_TRAP;
- 		if (data32) {
- 			set_vflags_long(newflags, regs);
-diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
-index f02bad6..4525bc2 100644
---- a/arch/x86/kernel/vmi_32.c
-+++ b/arch/x86/kernel/vmi_32.c
-@@ -62,7 +62,10 @@ static struct {
- 	void (*cpuid)(void /* non-c */);
- 	void (*_set_ldt)(u32 selector);
- 	void (*set_tr)(u32 selector);
--	void (*set_kernel_stack)(u32 selector, u32 esp0);
-+	void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
-+	void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
-+	void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
-+	void (*set_kernel_stack)(u32 selector, u32 sp0);
- 	void (*allocate_page)(u32, u32, u32, u32, u32);
- 	void (*release_page)(u32, u32);
- 	void (*set_pte)(pte_t, pte_t *, unsigned);
-@@ -88,13 +91,13 @@ struct vmi_timer_ops vmi_timer_ops;
- #define IRQ_PATCH_DISABLE  5
+-      XSIG_LL(argTo4) = XSIG_LL(argSqrd); argTo4.lsw = argSqrd.lsw;
+-      mul_Xsig_Xsig(&argTo4, &argTo4);
++		XSIG_LL(argTo4) = XSIG_LL(argSqrd);
++		argTo4.lsw = argSqrd.lsw;
++		mul_Xsig_Xsig(&argTo4, &argTo4);
  
- static inline void patch_offset(void *insnbuf,
--				unsigned long eip, unsigned long dest)
-+				unsigned long ip, unsigned long dest)
- {
--        *(unsigned long *)(insnbuf+1) = dest-eip-5;
-+        *(unsigned long *)(insnbuf+1) = dest-ip-5;
- }
+-      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
+-		      N_COEFF_NH-1);
+-      mul_Xsig_Xsig(&accumulator, &argSqrd);
+-      negate_Xsig(&accumulator);
++		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
++				N_COEFF_NH - 1);
++		mul_Xsig_Xsig(&accumulator, &argSqrd);
++		negate_Xsig(&accumulator);
  
- static unsigned patch_internal(int call, unsigned len, void *insnbuf,
--			       unsigned long eip)
-+			       unsigned long ip)
- {
- 	u64 reloc;
- 	struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
-@@ -103,13 +106,13 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
- 		case VMI_RELOCATION_CALL_REL:
- 			BUG_ON(len < 5);
- 			*(char *)insnbuf = MNEM_CALL;
--			patch_offset(insnbuf, eip, (unsigned long)rel->eip);
-+			patch_offset(insnbuf, ip, (unsigned long)rel->eip);
- 			return 5;
+-      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
+-		      N_COEFF_PH-1);
+-      negate_Xsig(&accumulator);
++		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
++				N_COEFF_PH - 1);
++		negate_Xsig(&accumulator);
  
- 		case VMI_RELOCATION_JUMP_REL:
- 			BUG_ON(len < 5);
- 			*(char *)insnbuf = MNEM_JMP;
--			patch_offset(insnbuf, eip, (unsigned long)rel->eip);
-+			patch_offset(insnbuf, ip, (unsigned long)rel->eip);
- 			return 5;
+-      mul64_Xsig(&accumulator, &fixed_arg);
+-      mul64_Xsig(&accumulator, &fixed_arg);
++		mul64_Xsig(&accumulator, &fixed_arg);
++		mul64_Xsig(&accumulator, &fixed_arg);
  
- 		case VMI_RELOCATION_NOP:
-@@ -131,25 +134,25 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
-  * sequence.  The callee does nop padding for us.
-  */
- static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
--			  unsigned long eip, unsigned len)
-+			  unsigned long ip, unsigned len)
- {
- 	switch (type) {
- 		case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
- 			return patch_internal(VMI_CALL_DisableInterrupts, len,
--					      insns, eip);
-+					      insns, ip);
- 		case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
- 			return patch_internal(VMI_CALL_EnableInterrupts, len,
--					      insns, eip);
-+					      insns, ip);
- 		case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
- 			return patch_internal(VMI_CALL_SetInterruptMask, len,
--					      insns, eip);
-+					      insns, ip);
- 		case PARAVIRT_PATCH(pv_irq_ops.save_fl):
- 			return patch_internal(VMI_CALL_GetInterruptMask, len,
--					      insns, eip);
-+					      insns, ip);
- 		case PARAVIRT_PATCH(pv_cpu_ops.iret):
--			return patch_internal(VMI_CALL_IRET, len, insns, eip);
--		case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
--			return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip);
-+			return patch_internal(VMI_CALL_IRET, len, insns, ip);
-+		case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret):
-+			return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
- 		default:
- 			break;
- 	}
-@@ -157,36 +160,36 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
- }
+-      shr_Xsig(&accumulator, 3);
+-      negate_Xsig(&accumulator);
++		shr_Xsig(&accumulator, 3);
++		negate_Xsig(&accumulator);
  
- /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
--static void vmi_cpuid(unsigned int *eax, unsigned int *ebx,
--                               unsigned int *ecx, unsigned int *edx)
-+static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
-+                               unsigned int *cx, unsigned int *dx)
- {
- 	int override = 0;
--	if (*eax == 1)
-+	if (*ax == 1)
- 		override = 1;
-         asm volatile ("call *%6"
--                      : "=a" (*eax),
--                        "=b" (*ebx),
--                        "=c" (*ecx),
--                        "=d" (*edx)
--                      : "0" (*eax), "2" (*ecx), "r" (vmi_ops.cpuid));
-+                      : "=a" (*ax),
-+                        "=b" (*bx),
-+                        "=c" (*cx),
-+                        "=d" (*dx)
-+                      : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
- 	if (override) {
- 		if (disable_pse)
--			*edx &= ~X86_FEATURE_PSE;
-+			*dx &= ~X86_FEATURE_PSE;
- 		if (disable_pge)
--			*edx &= ~X86_FEATURE_PGE;
-+			*dx &= ~X86_FEATURE_PGE;
- 		if (disable_sep)
--			*edx &= ~X86_FEATURE_SEP;
-+			*dx &= ~X86_FEATURE_SEP;
- 		if (disable_tsc)
--			*edx &= ~X86_FEATURE_TSC;
-+			*dx &= ~X86_FEATURE_TSC;
- 		if (disable_mtrr)
--			*edx &= ~X86_FEATURE_MTRR;
-+			*dx &= ~X86_FEATURE_MTRR;
- 	}
- }
+-      add_Xsig_Xsig(&accumulator, &argSqrd);
++		add_Xsig_Xsig(&accumulator, &argSqrd);
  
- static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
- {
- 	if (gdt[nr].a != new->a || gdt[nr].b != new->b)
--		write_gdt_entry(gdt, nr, new->a, new->b);
-+		write_gdt_entry(gdt, nr, new, 0);
- }
+-      shr_Xsig(&accumulator, 1);
++		shr_Xsig(&accumulator, 1);
  
- static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
-@@ -200,12 +203,12 @@ static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
- static void vmi_set_ldt(const void *addr, unsigned entries)
- {
- 	unsigned cpu = smp_processor_id();
--	u32 low, high;
-+	struct desc_struct desc;
+-      accumulator.lsw |= 1;  /* A zero accumulator here would cause problems */
+-      negate_Xsig(&accumulator);
++		accumulator.lsw |= 1;	/* A zero accumulator here would cause problems */
++		negate_Xsig(&accumulator);
  
--	pack_descriptor(&low, &high, (unsigned long)addr,
-+	pack_descriptor(&desc, (unsigned long)addr,
- 			entries * sizeof(struct desc_struct) - 1,
--			DESCTYPE_LDT, 0);
--	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, low, high);
-+			DESC_LDT, 0);
-+	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
- 	vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
- }
+-      /* The basic computation is complete. Now fix the answer to
+-	 compensate for the error due to the approximation used for
+-	 pi/2
+-	 */
++		/* The basic computation is complete. Now fix the answer to
++		   compensate for the error due to the approximation used for
++		   pi/2
++		 */
  
-@@ -214,17 +217,37 @@ static void vmi_set_tr(void)
- 	vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
- }
+-      /* This has an exponent of -65 */
+-      fix_up = 0x898cc517;
+-      /* The fix-up needs to be improved for larger args */
+-      if ( argSqrd.msw & 0xffc00000 )
+-	{
+-	  /* Get about 32 bit precision in these: */
+-	  fix_up -= mul_32_32(0x898cc517, argSqrd.msw) / 6;
+-	}
+-      fix_up = mul_32_32(fix_up, LL_MSW(fixed_arg));
++		/* This has an exponent of -65 */
++		fix_up = 0x898cc517;
++		/* The fix-up needs to be improved for larger args */
++		if (argSqrd.msw & 0xffc00000) {
++			/* Get about 32 bit precision in these: */
++			fix_up -= mul_32_32(0x898cc517, argSqrd.msw) / 6;
++		}
++		fix_up = mul_32_32(fix_up, LL_MSW(fixed_arg));
  
--static void vmi_load_esp0(struct tss_struct *tss,
-+static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
-+{
-+	u32 *idt_entry = (u32 *)g;
-+	vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[2]);
-+}
-+
-+static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
-+				const void *desc, int type)
-+{
-+	u32 *gdt_entry = (u32 *)desc;
-+	vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[2]);
-+}
-+
-+static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
-+				const void *desc)
-+{
-+	u32 *ldt_entry = (u32 *)desc;
-+	vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[2]);
-+}
-+
-+static void vmi_load_sp0(struct tss_struct *tss,
- 				   struct thread_struct *thread)
- {
--	tss->x86_tss.esp0 = thread->esp0;
-+	tss->x86_tss.sp0 = thread->sp0;
+-      adj = accumulator.lsw;    /* temp save */
+-      accumulator.lsw -= fix_up;
+-      if ( accumulator.lsw > adj )
+-	XSIG_LL(accumulator) --;
++		adj = accumulator.lsw;	/* temp save */
++		accumulator.lsw -= fix_up;
++		if (accumulator.lsw > adj)
++			XSIG_LL(accumulator)--;
  
- 	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
- 	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
- 		tss->x86_tss.ss1 = thread->sysenter_cs;
- 		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
- 	}
--	vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0);
-+	vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
- }
+-      echange = round_Xsig(&accumulator);
++		echange = round_Xsig(&accumulator);
  
- static void vmi_flush_tlb_user(void)
-@@ -375,7 +398,7 @@ static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
- 	vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
- }
+-      setexponentpos(&result, echange - 1);
+-    }
++		setexponentpos(&result, echange - 1);
++	}
  
--static void vmi_allocate_pd(u32 pfn)
-+static void vmi_allocate_pd(struct mm_struct *mm, u32 pfn)
- {
-  	/*
- 	 * This call comes in very early, before mem_map is setup.
-@@ -452,7 +475,7 @@ static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep
- static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
- {
- #ifdef CONFIG_X86_PAE
--	const pte_t pte = { pmdval.pmd, pmdval.pmd >> 32 };
-+	const pte_t pte = { .pte = pmdval.pmd };
- 	vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD);
- #else
- 	const pte_t pte = { pmdval.pud.pgd.pgd };
-@@ -485,21 +508,21 @@ static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t
- static void vmi_set_pud(pud_t *pudp, pud_t pudval)
- {
- 	/* Um, eww */
--	const pte_t pte = { pudval.pgd.pgd, pudval.pgd.pgd >> 32 };
-+	const pte_t pte = { .pte = pudval.pgd.pgd };
- 	vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD);
- 	vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
- }
+-  significand(&result) = XSIG_LL(accumulator);
+-  setsign(&result, getsign(st0_ptr));
+-  FPU_copy_to_reg0(&result, TAG_Valid);
++	significand(&result) = XSIG_LL(accumulator);
++	setsign(&result, getsign(st0_ptr));
++	FPU_copy_to_reg0(&result, TAG_Valid);
  
- static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
--	const pte_t pte = { 0 };
-+	const pte_t pte = { .pte = 0 };
- 	vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
- 	vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
- }
+ #ifdef PARANOID
+-  if ( (exponent(&result) >= 0)
+-      && (significand(&result) > 0x8000000000000000LL) )
+-    {
+-      EXCEPTION(EX_INTERNAL|0x150);
+-    }
++	if ((exponent(&result) >= 0)
++	    && (significand(&result) > 0x8000000000000000LL)) {
++		EXCEPTION(EX_INTERNAL | 0x150);
++	}
+ #endif /* PARANOID */
  
- static void vmi_pmd_clear(pmd_t *pmd)
- {
--	const pte_t pte = { 0 };
-+	const pte_t pte = { .pte = 0 };
- 	vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD);
- 	vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
  }
-@@ -790,10 +813,13 @@ static inline int __init activate_vmi(void)
- 	para_fill(pv_cpu_ops.store_idt, GetIDT);
- 	para_fill(pv_cpu_ops.store_tr, GetTR);
- 	pv_cpu_ops.load_tls = vmi_load_tls;
--	para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);
--	para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry);
--	para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry);
--	para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
-+	para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry,
-+		  write_ldt_entry, WriteLDTEntry);
-+	para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
-+		  write_gdt_entry, WriteGDTEntry);
-+	para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
-+		  write_idt_entry, WriteIDTEntry);
-+	para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
- 	para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
- 	para_fill(pv_cpu_ops.io_delay, IODelay);
- 
-@@ -870,7 +896,7 @@ static inline int __init activate_vmi(void)
- 	 * the backend.  They are performance critical anyway, so requiring
- 	 * a patch is not a big problem.
- 	 */
--	pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
-+	pv_cpu_ops.irq_enable_syscall_ret = (void *)0xfeedbab0;
- 	pv_cpu_ops.iret = (void *)0xbadbab0;
- 
- #ifdef CONFIG_SMP
-@@ -963,19 +989,19 @@ static int __init parse_vmi(char *arg)
- 		return -EINVAL;
- 
- 	if (!strcmp(arg, "disable_pge")) {
--		clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
-+		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
- 		disable_pge = 1;
- 	} else if (!strcmp(arg, "disable_pse")) {
--		clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
- 		disable_pse = 1;
- 	} else if (!strcmp(arg, "disable_sep")) {
--		clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
-+		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
- 		disable_sep = 1;
- 	} else if (!strcmp(arg, "disable_tsc")) {
--		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
-+		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
- 		disable_tsc = 1;
- 	} else if (!strcmp(arg, "disable_mtrr")) {
--		clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
-+		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
- 		disable_mtrr = 1;
- 	} else if (!strcmp(arg, "disable_timer")) {
- 		disable_vmi_timer = 1;
-diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
-index b1b5ab0..a2b0307 100644
---- a/arch/x86/kernel/vmiclock_32.c
-+++ b/arch/x86/kernel/vmiclock_32.c
-@@ -35,7 +35,6 @@
- #include <asm/i8253.h>
- 
- #include <irq_vectors.h>
--#include "io_ports.h"
- 
- #define VMI_ONESHOT  (VMI_ALARM_IS_ONESHOT  | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
- #define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
-@@ -238,7 +237,7 @@ static void __devinit vmi_time_init_clockevent(void)
- void __init vmi_time_init(void)
- {
- 	/* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */
--	outb_p(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
-+	outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
- 
- 	vmi_time_init_clockevent();
- 	setup_irq(0, &vmi_clock_action);
-diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
-index 7d72cce..f1148ac 100644
---- a/arch/x86/kernel/vmlinux_32.lds.S
-+++ b/arch/x86/kernel/vmlinux_32.lds.S
-@@ -8,12 +8,6 @@
-  * put it inside the section definition.
-  */
- 
--/* Don't define absolute symbols until and unless you know that symbol
-- * value is should remain constant even if kernel image is relocated
-- * at run time. Absolute symbols are not relocated. If symbol value should
-- * change if kernel is relocated, make the symbol section relative and
-- * put it inside the section definition.
-- */
- #define LOAD_OFFSET __PAGE_OFFSET
- 
- #include <asm-generic/vmlinux.lds.h>
-@@ -44,6 +38,8 @@ SECTIONS
- 
-   /* read-only */
-   .text : AT(ADDR(.text) - LOAD_OFFSET) {
-+	. = ALIGN(4096); /* not really needed, already page aligned */
-+	*(.text.page_aligned)
- 	TEXT_TEXT
- 	SCHED_TEXT
- 	LOCK_TEXT
-@@ -131,10 +127,12 @@ SECTIONS
-   .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
-   	__init_begin = .;
- 	_sinittext = .;
--	*(.init.text)
-+	INIT_TEXT
- 	_einittext = .;
-   }
--  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
-+  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
-+	INIT_DATA
-+  }
-   . = ALIGN(16);
-   .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
-   	__setup_start = .;
-@@ -169,8 +167,12 @@ SECTIONS
-   }
-   /* .exit.text is discard at runtime, not link time, to deal with references
-      from .altinstructions and .eh_frame */
--  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
--  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
-+  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
-+	EXIT_TEXT
-+  }
-+  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
-+	EXIT_DATA
-+  }
- #if defined(CONFIG_BLK_DEV_INITRD)
-   . = ALIGN(4096);
-   .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
-diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
-index ba8ea97..0992b99 100644
---- a/arch/x86/kernel/vmlinux_64.lds.S
-+++ b/arch/x86/kernel/vmlinux_64.lds.S
-@@ -37,16 +37,15 @@ SECTIONS
- 	KPROBES_TEXT
- 	*(.fixup)
- 	*(.gnu.warning)
--	} :text = 0x9090
--  				/* out-of-line lock text */
--  .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
--
--  _etext = .;			/* End of text section */
-+	_etext = .;			/* End of text section */
-+  } :text = 0x9090
- 
-   . = ALIGN(16);		/* Exception table */
--  __start___ex_table = .;
--  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
--  __stop___ex_table = .;
-+  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
-+  	__start___ex_table = .;
-+	 *(__ex_table)
-+  	__stop___ex_table = .;
-+  }
- 
-   NOTES :text :note
- 
-@@ -155,12 +154,15 @@ SECTIONS
-   __init_begin = .;
-   .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
- 	_sinittext = .;
--	*(.init.text)
-+	INIT_TEXT
- 	_einittext = .;
-   }
--  __initdata_begin = .;
--  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
--  __initdata_end = .;
-+  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
-+	__initdata_begin = .;
-+	INIT_DATA
-+	__initdata_end = .;
-+   }
-+
-   . = ALIGN(16);
-   __setup_start = .;
-   .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
-@@ -176,6 +178,14 @@ SECTIONS
-   }
-   __con_initcall_end = .;
-   SECURITY_INIT
-+
-+  . = ALIGN(8);
-+  .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
-+  __parainstructions = .;
-+       *(.parainstructions)
-+  __parainstructions_end = .;
-+  }
-+
-   . = ALIGN(8);
-   __alt_instructions = .;
-   .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
-@@ -187,8 +197,12 @@ SECTIONS
-   }
-   /* .exit.text is discard at runtime, not link time, to deal with references
-      from .altinstructions and .eh_frame */
--  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
--  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
-+  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
-+	EXIT_TEXT
-+  }
-+  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
-+	EXIT_DATA
-+  }
- 
- /* vdso blob that is mapped into user space */
-   vdso_start = . ;
-diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
-index 414caf0..d971210 100644
---- a/arch/x86/kernel/vsmp_64.c
-+++ b/arch/x86/kernel/vsmp_64.c
-@@ -25,21 +25,24 @@ static int __init vsmp_init(void)
- 		return 0;
- 
- 	/* Check if we are running on a ScaleMP vSMP box */
--	if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != PCI_VENDOR_ID_SCALEMP) ||
--	    (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != PCI_DEVICE_ID_SCALEMP_VSMP_CTL))
-+	if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) !=
-+	     PCI_VENDOR_ID_SCALEMP) ||
-+	    (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) !=
-+	     PCI_DEVICE_ID_SCALEMP_VSMP_CTL))
- 		return 0;
- 
- 	/* set vSMP magic bits to indicate vSMP capable kernel */
- 	address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8);
- 	cap = readl(address);
- 	ctl = readl(address + 4);
--	printk("vSMP CTL: capabilities:0x%08x  control:0x%08x\n", cap, ctl);
-+	printk(KERN_INFO "vSMP CTL: capabilities:0x%08x  control:0x%08x\n",
-+	       cap, ctl);
- 	if (cap & ctl & (1 << 4)) {
- 		/* Turn on vSMP IRQ fastpath handling (see system.h) */
- 		ctl &= ~(1 << 4);
- 		writel(ctl, address + 4);
- 		ctl = readl(address + 4);
--		printk("vSMP CTL: control set to:0x%08x\n", ctl);
-+		printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
- 	}
  
- 	iounmap(address);
-diff --git a/arch/x86/kernel/vsyscall-int80_32.S b/arch/x86/kernel/vsyscall-int80_32.S
-deleted file mode 100644
-index 103cab6..0000000
---- a/arch/x86/kernel/vsyscall-int80_32.S
-+++ /dev/null
-@@ -1,53 +0,0 @@
--/*
-- * Code for the vsyscall page.  This version uses the old int $0x80 method.
-- *
-- * NOTE:
-- * 1) __kernel_vsyscall _must_ be first in this page.
-- * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
-- *    for details.
-- */
--
--	.text
--	.globl __kernel_vsyscall
--	.type __kernel_vsyscall, at function
--__kernel_vsyscall:
--.LSTART_vsyscall:
--	int $0x80
--	ret
--.LEND_vsyscall:
--	.size __kernel_vsyscall,.-.LSTART_vsyscall
--	.previous
--
--	.section .eh_frame,"a", at progbits
--.LSTARTFRAMEDLSI:
--	.long .LENDCIEDLSI-.LSTARTCIEDLSI
--.LSTARTCIEDLSI:
--	.long 0			/* CIE ID */
--	.byte 1			/* Version number */
--	.string "zR"		/* NUL-terminated augmentation string */
--	.uleb128 1		/* Code alignment factor */
--	.sleb128 -4		/* Data alignment factor */
--	.byte 8			/* Return address register column */
--	.uleb128 1		/* Augmentation value length */
--	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
--	.byte 0x0c		/* DW_CFA_def_cfa */
--	.uleb128 4
--	.uleb128 4
--	.byte 0x88		/* DW_CFA_offset, column 0x8 */
--	.uleb128 1
--	.align 4
--.LENDCIEDLSI:
--	.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
--.LSTARTFDEDLSI:
--	.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
--	.long .LSTART_vsyscall-.	/* PC-relative start address */
--	.long .LEND_vsyscall-.LSTART_vsyscall
--	.uleb128 0
--	.align 4
--.LENDFDEDLSI:
--	.previous
--
--/*
-- * Get the common code for the sigreturn entry points.
-- */
--#include "vsyscall-sigreturn_32.S"
-diff --git a/arch/x86/kernel/vsyscall-note_32.S b/arch/x86/kernel/vsyscall-note_32.S
-deleted file mode 100644
-index fcf376a..0000000
---- a/arch/x86/kernel/vsyscall-note_32.S
-+++ /dev/null
-@@ -1,45 +0,0 @@
--/*
-- * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
-- * Here we can supply some information useful to userland.
-- */
--
--#include <linux/version.h>
--#include <linux/elfnote.h>
--
--/* Ideally this would use UTS_NAME, but using a quoted string here
--   doesn't work. Remember to change this when changing the
--   kernel's name. */
--ELFNOTE_START(Linux, 0, "a")
--	.long LINUX_VERSION_CODE
--ELFNOTE_END
--
--#ifdef CONFIG_XEN
--/*
-- * Add a special note telling glibc's dynamic linker a fake hardware
-- * flavor that it will use to choose the search path for libraries in the
-- * same way it uses real hardware capabilities like "mmx".
-- * We supply "nosegneg" as the fake capability, to indicate that we
-- * do not like negative offsets in instructions using segment overrides,
-- * since we implement those inefficiently.  This makes it possible to
-- * install libraries optimized to avoid those access patterns in someplace
-- * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
-- * corresponding to the bits here is needed to make ldconfig work right.
-- * It should contain:
-- *	hwcap 1 nosegneg
-- * to match the mapping of bit to name that we give here.
-- *
-- * At runtime, the fake hardware feature will be considered to be present
-- * if its bit is set in the mask word.  So, we start with the mask 0, and
-- * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
-- */
--
--#include "../../x86/xen/vdso.h"	/* Defines VDSO_NOTE_NONEGSEG_BIT.  */
--
--	.globl VDSO_NOTE_MASK
--ELFNOTE_START(GNU, 2, "a")
--	.long 1			/* ncaps */
--VDSO_NOTE_MASK:
--	.long 0			/* mask */
--	.byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg"	/* bit, name */
--ELFNOTE_END
--#endif
-diff --git a/arch/x86/kernel/vsyscall-sigreturn_32.S b/arch/x86/kernel/vsyscall-sigreturn_32.S
-deleted file mode 100644
-index a92262f..0000000
---- a/arch/x86/kernel/vsyscall-sigreturn_32.S
-+++ /dev/null
-@@ -1,143 +0,0 @@
--/*
-- * Common code for the sigreturn entry points on the vsyscall page.
-- * So far this code is the same for both int80 and sysenter versions.
-- * This file is #include'd by vsyscall-*.S to define them after the
-- * vsyscall entry point.  The kernel assumes that the addresses of these
-- * routines are constant for all vsyscall implementations.
-- */
--
--#include <asm/unistd.h>
--#include <asm/asm-offsets.h>
--
--
--/* XXX
--   Should these be named "_sigtramp" or something?
--*/
 -
--	.text
--	.org __kernel_vsyscall+32,0x90
--	.globl __kernel_sigreturn
--	.type __kernel_sigreturn, at function
--__kernel_sigreturn:
--.LSTART_sigreturn:
--	popl %eax		/* XXX does this mean it needs unwind info? */
--	movl $__NR_sigreturn, %eax
--	int $0x80
--.LEND_sigreturn:
--	.size __kernel_sigreturn,.-.LSTART_sigreturn
--
--	.balign 32
--	.globl __kernel_rt_sigreturn
--	.type __kernel_rt_sigreturn, at function
--__kernel_rt_sigreturn:
--.LSTART_rt_sigreturn:
--	movl $__NR_rt_sigreturn, %eax
--	int $0x80
--.LEND_rt_sigreturn:
--	.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
--	.balign 32
--	.previous
 -
--	.section .eh_frame,"a", at progbits
--.LSTARTFRAMEDLSI1:
--	.long .LENDCIEDLSI1-.LSTARTCIEDLSI1
--.LSTARTCIEDLSI1:
--	.long 0			/* CIE ID */
--	.byte 1			/* Version number */
--	.string "zRS"		/* NUL-terminated augmentation string */
--	.uleb128 1		/* Code alignment factor */
--	.sleb128 -4		/* Data alignment factor */
--	.byte 8			/* Return address register column */
--	.uleb128 1		/* Augmentation value length */
--	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
--	.byte 0			/* DW_CFA_nop */
--	.align 4
--.LENDCIEDLSI1:
--	.long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */
--.LSTARTFDEDLSI1:
--	.long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */
--	/* HACK: The dwarf2 unwind routines will subtract 1 from the
--	   return address to get an address in the middle of the
--	   presumed call instruction.  Since we didn't get here via
--	   a call, we need to include the nop before the real start
--	   to make up for it.  */
--	.long .LSTART_sigreturn-1-.	/* PC-relative start address */
--	.long .LEND_sigreturn-.LSTART_sigreturn+1
--	.uleb128 0			/* Augmentation */
--	/* What follows are the instructions for the table generation.
--	   We record the locations of each register saved.  This is
--	   complicated by the fact that the "CFA" is always assumed to
--	   be the value of the stack pointer in the caller.  This means
--	   that we must define the CFA of this body of code to be the
--	   saved value of the stack pointer in the sigcontext.  Which
--	   also means that there is no fixed relation to the other 
--	   saved registers, which means that we must use DW_CFA_expression
--	   to compute their addresses.  It also means that when we 
--	   adjust the stack with the popl, we have to do it all over again.  */
+ /*--- poly_cos() ------------------------------------------------------------+
+  |                                                                           |
+  +---------------------------------------------------------------------------*/
+-void	poly_cos(FPU_REG *st0_ptr)
++void poly_cos(FPU_REG *st0_ptr)
+ {
+-  FPU_REG	      result;
+-  long int            exponent, exp2, echange;
+-  Xsig                accumulator, argSqrd, fix_up, argTo4;
+-  unsigned long long  fixed_arg;
++	FPU_REG result;
++	long int exponent, exp2, echange;
++	Xsig accumulator, argSqrd, fix_up, argTo4;
++	unsigned long long fixed_arg;
+ 
+ #ifdef PARANOID
+-  if ( (exponent(st0_ptr) > 0)
+-      || ((exponent(st0_ptr) == 0)
+-	  && (significand(st0_ptr) > 0xc90fdaa22168c234LL)) )
+-    {
+-      EXCEPTION(EX_Invalid);
+-      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
+-      return;
+-    }
+-#endif /* PARANOID */
 -
--#define do_cfa_expr(offset)						\
--	.byte 0x0f;			/* DW_CFA_def_cfa_expression */	\
--	.uleb128 1f-0f;			/*   length */			\
--0:	.byte 0x74;			/*     DW_OP_breg4 */		\
--	.sleb128 offset;		/*      offset */		\
--	.byte 0x06;			/*     DW_OP_deref */		\
--1:
+-  exponent = exponent(st0_ptr);
 -
--#define do_expr(regno, offset)						\
--	.byte 0x10;			/* DW_CFA_expression */		\
--	.uleb128 regno;			/*   regno */			\
--	.uleb128 1f-0f;			/*   length */			\
--0:	.byte 0x74;			/*     DW_OP_breg4 */		\
--	.sleb128 offset;		/*       offset */		\
--1:
+-  accumulator.lsw = accumulator.midw = accumulator.msw = 0;
 -
--	do_cfa_expr(SIGCONTEXT_esp+4)
--	do_expr(0, SIGCONTEXT_eax+4)
--	do_expr(1, SIGCONTEXT_ecx+4)
--	do_expr(2, SIGCONTEXT_edx+4)
--	do_expr(3, SIGCONTEXT_ebx+4)
--	do_expr(5, SIGCONTEXT_ebp+4)
--	do_expr(6, SIGCONTEXT_esi+4)
--	do_expr(7, SIGCONTEXT_edi+4)
--	do_expr(8, SIGCONTEXT_eip+4)
+-  if ( (exponent < -1) || ((exponent == -1) && (st0_ptr->sigh <= 0xb00d6f54)) )
+-    {
+-      /* arg is < 0.687705 */
 -
--	.byte 0x42	/* DW_CFA_advance_loc 2 -- nop; popl eax. */
+-      argSqrd.msw = st0_ptr->sigh; argSqrd.midw = st0_ptr->sigl;
+-      argSqrd.lsw = 0;
+-      mul64_Xsig(&argSqrd, &significand(st0_ptr));
 -
--	do_cfa_expr(SIGCONTEXT_esp)
--	do_expr(0, SIGCONTEXT_eax)
--	do_expr(1, SIGCONTEXT_ecx)
--	do_expr(2, SIGCONTEXT_edx)
--	do_expr(3, SIGCONTEXT_ebx)
--	do_expr(5, SIGCONTEXT_ebp)
--	do_expr(6, SIGCONTEXT_esi)
--	do_expr(7, SIGCONTEXT_edi)
--	do_expr(8, SIGCONTEXT_eip)
+-      if ( exponent < -1 )
+-	{
+-	  /* shift the argument right by the required places */
+-	  shr_Xsig(&argSqrd, 2*(-1-exponent));
++	if ((exponent(st0_ptr) > 0)
++	    || ((exponent(st0_ptr) == 0)
++		&& (significand(st0_ptr) > 0xc90fdaa22168c234LL))) {
++		EXCEPTION(EX_Invalid);
++		FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
++		return;
+ 	}
++#endif /* PARANOID */
+ 
+-      argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw;
+-      argTo4.lsw = argSqrd.lsw;
+-      mul_Xsig_Xsig(&argTo4, &argTo4);
 -
--	.align 4
--.LENDFDEDLSI1:
+-      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
+-		      N_COEFF_NH-1);
+-      mul_Xsig_Xsig(&accumulator, &argSqrd);
+-      negate_Xsig(&accumulator);
 -
--	.long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */
--.LSTARTFDEDLSI2:
--	.long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */
--	/* HACK: See above wrt unwind library assumptions.  */
--	.long .LSTART_rt_sigreturn-1-.	/* PC-relative start address */
--	.long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
--	.uleb128 0			/* Augmentation */
--	/* What follows are the instructions for the table generation.
--	   We record the locations of each register saved.  This is
--	   slightly less complicated than the above, since we don't
--	   modify the stack pointer in the process.  */
+-      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
+-		      N_COEFF_PH-1);
+-      negate_Xsig(&accumulator);
 -
--	do_cfa_expr(RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_esp)
--	do_expr(0, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_eax)
--	do_expr(1, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_ecx)
--	do_expr(2, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_edx)
--	do_expr(3, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_ebx)
--	do_expr(5, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_ebp)
--	do_expr(6, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_esi)
--	do_expr(7, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_edi)
--	do_expr(8, RT_SIGFRAME_sigcontext-4 + SIGCONTEXT_eip)
+-      mul64_Xsig(&accumulator, &significand(st0_ptr));
+-      mul64_Xsig(&accumulator, &significand(st0_ptr));
+-      shr_Xsig(&accumulator, -2*(1+exponent));
 -
--	.align 4
--.LENDFDEDLSI2:
--	.previous
-diff --git a/arch/x86/kernel/vsyscall-sysenter_32.S b/arch/x86/kernel/vsyscall-sysenter_32.S
-deleted file mode 100644
-index ed879bf..0000000
---- a/arch/x86/kernel/vsyscall-sysenter_32.S
-+++ /dev/null
-@@ -1,122 +0,0 @@
--/*
-- * Code for the vsyscall page.  This version uses the sysenter instruction.
-- *
-- * NOTE:
-- * 1) __kernel_vsyscall _must_ be first in this page.
-- * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
-- *    for details.
-- */
+-      shr_Xsig(&accumulator, 3);
+-      negate_Xsig(&accumulator);
 -
--/*
-- * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
-- * %ecx itself for arg2. The pushing is because the sysexit instruction
-- * (found in entry.S) requires that we clobber %ecx with the desired %esp.
-- * User code might expect that %ecx is unclobbered though, as it would be
-- * for returning via the iret instruction, so we must push and pop.
-- *
-- * The caller puts arg3 in %edx, which the sysexit instruction requires
-- * for %eip. Thus, exactly as for arg2, we must push and pop.
-- *
-- * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
-- * instruction clobbers %esp, the user's %esp won't even survive entry
-- * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
-- * arg6 from the stack.
-- *
-- * You can not use this vsyscall for the clone() syscall because the
-- * three dwords on the parent stack do not get copied to the child.
-- */
--	.text
--	.globl __kernel_vsyscall
--	.type __kernel_vsyscall, at function
--__kernel_vsyscall:
--.LSTART_vsyscall:
--	push %ecx
--.Lpush_ecx:
--	push %edx
--.Lpush_edx:
--	push %ebp
--.Lenter_kernel:
--	movl %esp,%ebp
--	sysenter
+-      add_Xsig_Xsig(&accumulator, &argSqrd);
 -
--	/* 7: align return point with nop's to make disassembly easier */
--	.space 7,0x90
+-      shr_Xsig(&accumulator, 1);
 -
--	/* 14: System call restart point is here! (SYSENTER_RETURN-2) */
--	jmp .Lenter_kernel
--	/* 16: System call normal return point is here! */
--	.globl SYSENTER_RETURN	/* Symbol used by sysenter.c  */
--SYSENTER_RETURN:
--	pop %ebp
--.Lpop_ebp:
--	pop %edx
--.Lpop_edx:
--	pop %ecx
--.Lpop_ecx:
--	ret
--.LEND_vsyscall:
--	.size __kernel_vsyscall,.-.LSTART_vsyscall
--	.previous
+-      /* It doesn't matter if accumulator is all zero here, the
+-	 following code will work ok */
+-      negate_Xsig(&accumulator);
 -
--	.section .eh_frame,"a", at progbits
--.LSTARTFRAMEDLSI:
--	.long .LENDCIEDLSI-.LSTARTCIEDLSI
--.LSTARTCIEDLSI:
--	.long 0			/* CIE ID */
--	.byte 1			/* Version number */
--	.string "zR"		/* NUL-terminated augmentation string */
--	.uleb128 1		/* Code alignment factor */
--	.sleb128 -4		/* Data alignment factor */
--	.byte 8			/* Return address register column */
--	.uleb128 1		/* Augmentation value length */
--	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
--	.byte 0x0c		/* DW_CFA_def_cfa */
--	.uleb128 4
--	.uleb128 4
--	.byte 0x88		/* DW_CFA_offset, column 0x8 */
--	.uleb128 1
--	.align 4
--.LENDCIEDLSI:
--	.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
--.LSTARTFDEDLSI:
--	.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
--	.long .LSTART_vsyscall-.	/* PC-relative start address */
--	.long .LEND_vsyscall-.LSTART_vsyscall
--	.uleb128 0
--	/* What follows are the instructions for the table generation.
--	   We have to record all changes of the stack pointer.  */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpush_ecx-.LSTART_vsyscall
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x08		/* RA at offset 8 now */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpush_edx-.Lpush_ecx
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x0c		/* RA at offset 12 now */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lenter_kernel-.Lpush_edx
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x10		/* RA at offset 16 now */
--	.byte 0x85, 0x04	/* DW_CFA_offset %ebp -16 */
--	/* Finally the epilogue.  */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpop_ebp-.Lenter_kernel
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x0c		/* RA at offset 12 now */
--	.byte 0xc5		/* DW_CFA_restore %ebp */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpop_edx-.Lpop_ebp
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x08		/* RA at offset 8 now */
--	.byte 0x04		/* DW_CFA_advance_loc4 */
--	.long .Lpop_ecx-.Lpop_edx
--	.byte 0x0e		/* DW_CFA_def_cfa_offset */
--	.byte 0x04		/* RA at offset 4 now */
--	.align 4
--.LENDFDEDLSI:
--	.previous
+-      if ( accumulator.lsw & 0x80000000 )
+-	XSIG_LL(accumulator) ++;
+-      if ( accumulator.msw == 0 )
+-	{
+-	  /* The result is 1.0 */
+-	  FPU_copy_to_reg0(&CONST_1, TAG_Valid);
+-	  return;
+-	}
+-      else
+-	{
+-	  significand(&result) = XSIG_LL(accumulator);
+-      
+-	  /* will be a valid positive nr with expon = -1 */
+-	  setexponentpos(&result, -1);
+-	}
+-    }
+-  else
+-    {
+-      fixed_arg = significand(st0_ptr);
 -
--/*
-- * Get the common code for the sigreturn entry points.
-- */
--#include "vsyscall-sigreturn_32.S"
-diff --git a/arch/x86/kernel/vsyscall_32.S b/arch/x86/kernel/vsyscall_32.S
-deleted file mode 100644
-index a5ab3dc..0000000
---- a/arch/x86/kernel/vsyscall_32.S
-+++ /dev/null
-@@ -1,15 +0,0 @@
--#include <linux/init.h>
+-      if ( exponent == 0 )
+-	{
+-	  /* The argument is >= 1.0 */
 -
--__INITDATA
+-	  /* Put the binary point at the left. */
+-	  fixed_arg <<= 1;
+-	}
+-      /* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
+-      fixed_arg = 0x921fb54442d18469LL - fixed_arg;
+-      /* There is a special case which arises due to rounding, to fix here. */
+-      if ( fixed_arg == 0xffffffffffffffffLL )
+-	fixed_arg = 0;
 -
--	.globl vsyscall_int80_start, vsyscall_int80_end
--vsyscall_int80_start:
--	.incbin "arch/x86/kernel/vsyscall-int80_32.so"
--vsyscall_int80_end:
+-      exponent = -1;
+-      exp2 = -1;
 -
--	.globl vsyscall_sysenter_start, vsyscall_sysenter_end
--vsyscall_sysenter_start:
--	.incbin "arch/x86/kernel/vsyscall-sysenter_32.so"
--vsyscall_sysenter_end:
+-      /* A shift is needed here only for a narrow range of arguments,
+-	 i.e. for fixed_arg approx 2^-32, but we pick up more... */
+-      if ( !(LL_MSW(fixed_arg) & 0xffff0000) )
+-	{
+-	  fixed_arg <<= 16;
+-	  exponent -= 16;
+-	  exp2 -= 16;
+-	}
 -
--__FINIT
-diff --git a/arch/x86/kernel/vsyscall_32.lds.S b/arch/x86/kernel/vsyscall_32.lds.S
-deleted file mode 100644
-index 4a8b0ed..0000000
---- a/arch/x86/kernel/vsyscall_32.lds.S
-+++ /dev/null
-@@ -1,67 +0,0 @@
--/*
-- * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
-- * object prelinked to its virtual address, and with only one read-only
-- * segment (that fits in one page).  This script controls its layout.
-- */
--#include <asm/asm-offsets.h>
+-      XSIG_LL(argSqrd) = fixed_arg; argSqrd.lsw = 0;
+-      mul64_Xsig(&argSqrd, &fixed_arg);
 -
--SECTIONS
--{
--  . = VDSO_PRELINK_asm + SIZEOF_HEADERS;
+-      if ( exponent < -1 )
+-	{
+-	  /* shift the argument right by the required places */
+-	  shr_Xsig(&argSqrd, 2*(-1-exponent));
+-	}
 -
--  .hash           : { *(.hash) }		:text
--  .gnu.hash       : { *(.gnu.hash) }
--  .dynsym         : { *(.dynsym) }
--  .dynstr         : { *(.dynstr) }
--  .gnu.version    : { *(.gnu.version) }
--  .gnu.version_d  : { *(.gnu.version_d) }
--  .gnu.version_r  : { *(.gnu.version_r) }
+-      argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw;
+-      argTo4.lsw = argSqrd.lsw;
+-      mul_Xsig_Xsig(&argTo4, &argTo4);
 -
--  /* This linker script is used both with -r and with -shared.
--     For the layouts to match, we need to skip more than enough
--     space for the dynamic symbol table et al.  If this amount
--     is insufficient, ld -shared will barf.  Just increase it here.  */
--  . = VDSO_PRELINK_asm + 0x400;
+-      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
+-		      N_COEFF_N-1);
+-      mul_Xsig_Xsig(&accumulator, &argSqrd);
+-      negate_Xsig(&accumulator);
 -
--  .text           : { *(.text) }		:text =0x90909090
--  .note		  : { *(.note.*) }		:text :note
--  .eh_frame_hdr   : { *(.eh_frame_hdr) }	:text :eh_frame_hdr
--  .eh_frame       : { KEEP (*(.eh_frame)) }	:text
--  .dynamic        : { *(.dynamic) }		:text :dynamic
--  .useless        : {
--  	*(.got.plt) *(.got)
--	*(.data .data.* .gnu.linkonce.d.*)
--	*(.dynbss)
--	*(.bss .bss.* .gnu.linkonce.b.*)
--  }						:text
--}
+-      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
+-		      N_COEFF_P-1);
 -
--/*
-- * We must supply the ELF program headers explicitly to get just one
-- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
-- */
--PHDRS
--{
--  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
--  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
--  note PT_NOTE FLAGS(4); /* PF_R */
--  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
--}
+-      shr_Xsig(&accumulator, 2);    /* Divide by four */
+-      accumulator.msw |= 0x80000000;  /* Add 1.0 */
 -
--/*
-- * This controls what symbols we export from the DSO.
-- */
--VERSION
--{
--  LINUX_2.5 {
--    global:
--    	__kernel_vsyscall;
--    	__kernel_sigreturn;
--    	__kernel_rt_sigreturn;
+-      mul64_Xsig(&accumulator, &fixed_arg);
+-      mul64_Xsig(&accumulator, &fixed_arg);
+-      mul64_Xsig(&accumulator, &fixed_arg);
 -
--    local: *;
--  };
--}
+-      /* Divide by four, FPU_REG compatible, etc */
+-      exponent = 3*exponent;
 -
--/* The ELF entry point can be used to set the AT_SYSINFO value.  */
--ENTRY(__kernel_vsyscall);
-diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
-index ad4005c..3f82427 100644
---- a/arch/x86/kernel/vsyscall_64.c
-+++ b/arch/x86/kernel/vsyscall_64.c
-@@ -43,7 +43,7 @@
- #include <asm/vgtod.h>
- 
- #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
--#define __syscall_clobber "r11","rcx","memory"
-+#define __syscall_clobber "r11","cx","memory"
- #define __pa_vsymbol(x)			\
- 	({unsigned long v;  		\
- 	extern char __vsyscall_0; 	\
-@@ -190,7 +190,7 @@ time_t __vsyscall(1) vtime(time_t *t)
- long __vsyscall(2)
- vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
- {
--	unsigned int dummy, p;
-+	unsigned int p;
- 	unsigned long j = 0;
- 
- 	/* Fast cache - only recompute value once per jiffies and avoid
-@@ -205,7 +205,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
- 		p = tcache->blob[1];
- 	} else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
- 		/* Load per CPU data from RDTSCP */
--		rdtscp(dummy, dummy, p);
-+		native_read_tscp(&p);
- 	} else {
- 		/* Load per CPU data from GDT */
- 		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
-@@ -297,7 +297,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
- 	/* Store cpu number in limit so that it can be loaded quickly
- 	   in user space in vgetcpu.
- 	   12 bits for the CPU and 8 bits for the node. */
--	d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
-+	d = (unsigned long *)(get_cpu_gdt_table(cpu) + GDT_ENTRY_PER_CPU);
- 	*d = 0x0f40000000000ULL;
- 	*d |= cpu;
- 	*d |= (node & 0xf) << 12;
-@@ -319,7 +319,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
- 	return NOTIFY_DONE;
- }
- 
--static void __init map_vsyscall(void)
-+void __init map_vsyscall(void)
- {
- 	extern char __vsyscall_0;
- 	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
-@@ -335,7 +335,6 @@ static int __init vsyscall_init(void)
- 	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
- 	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
- 	BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
--	map_vsyscall();
- #ifdef CONFIG_SYSCTL
- 	register_sysctl_table(kernel_root_table2);
- #endif
-diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
-index 77c25b3..a66e9c1 100644
---- a/arch/x86/kernel/x8664_ksyms_64.c
-+++ b/arch/x86/kernel/x8664_ksyms_64.c
-@@ -8,6 +8,7 @@
- #include <asm/processor.h>
- #include <asm/uaccess.h>
- #include <asm/pgtable.h>
-+#include <asm/desc.h>
- 
- EXPORT_SYMBOL(kernel_thread);
- 
-@@ -34,13 +35,6 @@ EXPORT_SYMBOL(__copy_from_user_inatomic);
- EXPORT_SYMBOL(copy_page);
- EXPORT_SYMBOL(clear_page);
- 
--#ifdef CONFIG_SMP
--extern void  __write_lock_failed(rwlock_t *rw);
--extern void  __read_lock_failed(rwlock_t *rw);
--EXPORT_SYMBOL(__write_lock_failed);
--EXPORT_SYMBOL(__read_lock_failed);
--#endif
+-      /* The minimum exponent difference is 3 */
+-      shr_Xsig(&accumulator, exp2 - exponent);
 -
- /* Export string functions. We normally rely on gcc builtin for most of these,
-    but gcc sometimes decides not to inline them. */    
- #undef memcpy
-@@ -60,3 +54,8 @@ EXPORT_SYMBOL(init_level4_pgt);
- EXPORT_SYMBOL(load_gs_index);
- 
- EXPORT_SYMBOL(_proxy_pda);
+-      negate_Xsig(&accumulator);
+-      XSIG_LL(accumulator) += fixed_arg;
+-
+-      /* The basic computation is complete. Now fix the answer to
+-	 compensate for the error due to the approximation used for
+-	 pi/2
+-	 */
+-
+-      /* This has an exponent of -65 */
+-      XSIG_LL(fix_up) = 0x898cc51701b839a2ll;
+-      fix_up.lsw = 0;
+-
+-      /* The fix-up needs to be improved for larger args */
+-      if ( argSqrd.msw & 0xffc00000 )
+-	{
+-	  /* Get about 32 bit precision in these: */
+-	  fix_up.msw -= mul_32_32(0x898cc517, argSqrd.msw) / 2;
+-	  fix_up.msw += mul_32_32(0x898cc517, argTo4.msw) / 24;
++	exponent = exponent(st0_ptr);
 +
-+#ifdef CONFIG_PARAVIRT
-+/* Virtualized guests may want to use it */
-+EXPORT_SYMBOL_GPL(cpu_gdt_descr);
-+#endif
-diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
-index 19626ac..964dfa3 100644
---- a/arch/x86/lguest/Kconfig
-+++ b/arch/x86/lguest/Kconfig
-@@ -1,6 +1,7 @@
- config LGUEST_GUEST
- 	bool "Lguest guest support"
- 	select PARAVIRT
-+	depends on X86_32
- 	depends on !X86_PAE
- 	depends on !(X86_VISWS || X86_VOYAGER)
- 	select VIRTIO
-diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
-index 92c5611..a633737 100644
---- a/arch/x86/lguest/boot.c
-+++ b/arch/x86/lguest/boot.c
-@@ -175,8 +175,8 @@ static void lguest_leave_lazy_mode(void)
-  * check there when it wants to deliver an interrupt.
-  */
- 
--/* save_flags() is expected to return the processor state (ie. "eflags").  The
-- * eflags word contains all kind of stuff, but in practice Linux only cares
-+/* save_flags() is expected to return the processor state (ie. "flags").  The
-+ * flags word contains all kind of stuff, but in practice Linux only cares
-  * about the interrupt flag.  Our "save_flags()" just returns that. */
- static unsigned long save_fl(void)
- {
-@@ -217,19 +217,20 @@ static void irq_enable(void)
-  * address of the handler, and... well, who cares?  The Guest just asks the
-  * Host to make the change anyway, because the Host controls the real IDT.
-  */
--static void lguest_write_idt_entry(struct desc_struct *dt,
--				   int entrynum, u32 low, u32 high)
-+static void lguest_write_idt_entry(gate_desc *dt,
-+				   int entrynum, const gate_desc *g)
- {
-+	u32 *desc = (u32 *)g;
- 	/* Keep the local copy up to date. */
--	write_dt_entry(dt, entrynum, low, high);
-+	native_write_idt_entry(dt, entrynum, g);
- 	/* Tell Host about this new entry. */
--	hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high);
-+	hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
- }
- 
- /* Changing to a different IDT is very rare: we keep the IDT up-to-date every
-  * time it is written, so we can simply loop through all entries and tell the
-  * Host about them. */
--static void lguest_load_idt(const struct Xgt_desc_struct *desc)
-+static void lguest_load_idt(const struct desc_ptr *desc)
- {
- 	unsigned int i;
- 	struct desc_struct *idt = (void *)desc->address;
-@@ -252,7 +253,7 @@ static void lguest_load_idt(const struct Xgt_desc_struct *desc)
-  * hypercall and use that repeatedly to load a new IDT.  I don't think it
-  * really matters, but wouldn't it be nice if they were the same?
-  */
--static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
-+static void lguest_load_gdt(const struct desc_ptr *desc)
- {
- 	BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
- 	hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
-@@ -261,10 +262,10 @@ static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
- /* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
-  * then tell the Host to reload the entire thing.  This operation is so rare
-  * that this naive implementation is reasonable. */
--static void lguest_write_gdt_entry(struct desc_struct *dt,
--				   int entrynum, u32 low, u32 high)
-+static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
-+				   const void *desc, int type)
- {
--	write_dt_entry(dt, entrynum, low, high);
-+	native_write_gdt_entry(dt, entrynum, desc, type);
- 	hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
- }
- 
-@@ -323,30 +324,30 @@ static void lguest_load_tr_desc(void)
-  * anyone (including userspace) can just use the raw "cpuid" instruction and
-  * the Host won't even notice since it isn't privileged.  So we try not to get
-  * too worked up about it. */
--static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
--			 unsigned int *ecx, unsigned int *edx)
-+static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
-+			 unsigned int *cx, unsigned int *dx)
- {
--	int function = *eax;
-+	int function = *ax;
- 
--	native_cpuid(eax, ebx, ecx, edx);
-+	native_cpuid(ax, bx, cx, dx);
- 	switch (function) {
- 	case 1:	/* Basic feature request. */
- 		/* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
--		*ecx &= 0x00002201;
-+		*cx &= 0x00002201;
- 		/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
--		*edx &= 0x07808101;
-+		*dx &= 0x07808101;
- 		/* The Host can do a nice optimization if it knows that the
- 		 * kernel mappings (addresses above 0xC0000000 or whatever
- 		 * PAGE_OFFSET is set to) haven't changed.  But Linux calls
- 		 * flush_tlb_user() for both user and kernel mappings unless
- 		 * the Page Global Enable (PGE) feature bit is set. */
--		*edx |= 0x00002000;
-+		*dx |= 0x00002000;
- 		break;
- 	case 0x80000000:
- 		/* Futureproof this a little: if they ask how much extended
- 		 * processor information there is, limit it to known fields. */
--		if (*eax > 0x80000008)
--			*eax = 0x80000008;
-+		if (*ax > 0x80000008)
-+			*ax = 0x80000008;
- 		break;
- 	}
- }
-@@ -755,10 +756,10 @@ static void lguest_time_init(void)
-  * segment), the privilege level (we're privilege level 1, the Host is 0 and
-  * will not tolerate us trying to use that), the stack pointer, and the number
-  * of pages in the stack. */
--static void lguest_load_esp0(struct tss_struct *tss,
-+static void lguest_load_sp0(struct tss_struct *tss,
- 				     struct thread_struct *thread)
- {
--	lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->esp0,
-+	lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0,
- 		   THREAD_SIZE/PAGE_SIZE);
- }
- 
-@@ -788,11 +789,11 @@ static void lguest_wbinvd(void)
-  * code qualifies for Advanced.  It will also never interrupt anything.  It
-  * does, however, allow us to get through the Linux boot code. */
- #ifdef CONFIG_X86_LOCAL_APIC
--static void lguest_apic_write(unsigned long reg, unsigned long v)
-+static void lguest_apic_write(unsigned long reg, u32 v)
- {
- }
- 
--static unsigned long lguest_apic_read(unsigned long reg)
-+static u32 lguest_apic_read(unsigned long reg)
- {
- 	return 0;
- }
-@@ -957,7 +958,7 @@ __init void lguest_init(void)
- 	pv_cpu_ops.cpuid = lguest_cpuid;
- 	pv_cpu_ops.load_idt = lguest_load_idt;
- 	pv_cpu_ops.iret = lguest_iret;
--	pv_cpu_ops.load_esp0 = lguest_load_esp0;
-+	pv_cpu_ops.load_sp0 = lguest_load_sp0;
- 	pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
- 	pv_cpu_ops.set_ldt = lguest_set_ldt;
- 	pv_cpu_ops.load_tls = lguest_load_tls;
-diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
-index 329da27..4876182 100644
---- a/arch/x86/lib/Makefile
-+++ b/arch/x86/lib/Makefile
-@@ -1,5 +1,27 @@
-+#
-+# Makefile for x86 specific library files.
-+#
++	accumulator.lsw = accumulator.midw = accumulator.msw = 0;
 +
-+obj-$(CONFIG_SMP) := msr-on-cpu.o
++	if ((exponent < -1)
++	    || ((exponent == -1) && (st0_ptr->sigh <= 0xb00d6f54))) {
++		/* arg is < 0.687705 */
 +
-+lib-y := delay_$(BITS).o
-+lib-y += usercopy_$(BITS).o getuser_$(BITS).o putuser_$(BITS).o
-+lib-y += memcpy_$(BITS).o
++		argSqrd.msw = st0_ptr->sigh;
++		argSqrd.midw = st0_ptr->sigl;
++		argSqrd.lsw = 0;
++		mul64_Xsig(&argSqrd, &significand(st0_ptr));
 +
- ifeq ($(CONFIG_X86_32),y)
--include ${srctree}/arch/x86/lib/Makefile_32
-+        lib-y += checksum_32.o
-+        lib-y += strstr_32.o
-+        lib-y += bitops_32.o semaphore_32.o string_32.o
++		if (exponent < -1) {
++			/* shift the argument right by the required places */
++			shr_Xsig(&argSqrd, 2 * (-1 - exponent));
++		}
 +
-+        lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
- else
--include ${srctree}/arch/x86/lib/Makefile_64
-+        obj-y += io_64.o iomap_copy_64.o
++		argTo4.msw = argSqrd.msw;
++		argTo4.midw = argSqrd.midw;
++		argTo4.lsw = argSqrd.lsw;
++		mul_Xsig_Xsig(&argTo4, &argTo4);
 +
-+        CFLAGS_csum-partial_64.o := -funroll-loops
++		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
++				N_COEFF_NH - 1);
++		mul_Xsig_Xsig(&accumulator, &argSqrd);
++		negate_Xsig(&accumulator);
 +
-+        lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
-+        lib-y += thunk_64.o clear_page_64.o copy_page_64.o
-+        lib-y += bitstr_64.o bitops_64.o
-+        lib-y += memmove_64.o memset_64.o
-+        lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
- endif
-diff --git a/arch/x86/lib/Makefile_32 b/arch/x86/lib/Makefile_32
-deleted file mode 100644
-index 98d1f1e..0000000
---- a/arch/x86/lib/Makefile_32
-+++ /dev/null
-@@ -1,11 +0,0 @@
--#
--# Makefile for i386-specific library files..
--#
--
--
--lib-y = checksum_32.o delay_32.o usercopy_32.o getuser_32.o putuser_32.o memcpy_32.o strstr_32.o \
--	bitops_32.o semaphore_32.o string_32.o
--
--lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
++		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
++				N_COEFF_PH - 1);
++		negate_Xsig(&accumulator);
++
++		mul64_Xsig(&accumulator, &significand(st0_ptr));
++		mul64_Xsig(&accumulator, &significand(st0_ptr));
++		shr_Xsig(&accumulator, -2 * (1 + exponent));
++
++		shr_Xsig(&accumulator, 3);
++		negate_Xsig(&accumulator);
++
++		add_Xsig_Xsig(&accumulator, &argSqrd);
++
++		shr_Xsig(&accumulator, 1);
++
++		/* It doesn't matter if accumulator is all zero here, the
++		   following code will work ok */
++		negate_Xsig(&accumulator);
++
++		if (accumulator.lsw & 0x80000000)
++			XSIG_LL(accumulator)++;
++		if (accumulator.msw == 0) {
++			/* The result is 1.0 */
++			FPU_copy_to_reg0(&CONST_1, TAG_Valid);
++			return;
++		} else {
++			significand(&result) = XSIG_LL(accumulator);
++
++			/* will be a valid positive nr with expon = -1 */
++			setexponentpos(&result, -1);
++		}
++	} else {
++		fixed_arg = significand(st0_ptr);
++
++		if (exponent == 0) {
++			/* The argument is >= 1.0 */
++
++			/* Put the binary point at the left. */
++			fixed_arg <<= 1;
++		}
++		/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
++		fixed_arg = 0x921fb54442d18469LL - fixed_arg;
++		/* There is a special case which arises due to rounding, to fix here. */
++		if (fixed_arg == 0xffffffffffffffffLL)
++			fixed_arg = 0;
++
++		exponent = -1;
++		exp2 = -1;
++
++		/* A shift is needed here only for a narrow range of arguments,
++		   i.e. for fixed_arg approx 2^-32, but we pick up more... */
++		if (!(LL_MSW(fixed_arg) & 0xffff0000)) {
++			fixed_arg <<= 16;
++			exponent -= 16;
++			exp2 -= 16;
++		}
++
++		XSIG_LL(argSqrd) = fixed_arg;
++		argSqrd.lsw = 0;
++		mul64_Xsig(&argSqrd, &fixed_arg);
++
++		if (exponent < -1) {
++			/* shift the argument right by the required places */
++			shr_Xsig(&argSqrd, 2 * (-1 - exponent));
++		}
++
++		argTo4.msw = argSqrd.msw;
++		argTo4.midw = argSqrd.midw;
++		argTo4.lsw = argSqrd.lsw;
++		mul_Xsig_Xsig(&argTo4, &argTo4);
++
++		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
++				N_COEFF_N - 1);
++		mul_Xsig_Xsig(&accumulator, &argSqrd);
++		negate_Xsig(&accumulator);
++
++		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
++				N_COEFF_P - 1);
++
++		shr_Xsig(&accumulator, 2);	/* Divide by four */
++		accumulator.msw |= 0x80000000;	/* Add 1.0 */
++
++		mul64_Xsig(&accumulator, &fixed_arg);
++		mul64_Xsig(&accumulator, &fixed_arg);
++		mul64_Xsig(&accumulator, &fixed_arg);
++
++		/* Divide by four, FPU_REG compatible, etc */
++		exponent = 3 * exponent;
++
++		/* The minimum exponent difference is 3 */
++		shr_Xsig(&accumulator, exp2 - exponent);
++
++		negate_Xsig(&accumulator);
++		XSIG_LL(accumulator) += fixed_arg;
++
++		/* The basic computation is complete. Now fix the answer to
++		   compensate for the error due to the approximation used for
++		   pi/2
++		 */
++
++		/* This has an exponent of -65 */
++		XSIG_LL(fix_up) = 0x898cc51701b839a2ll;
++		fix_up.lsw = 0;
++
++		/* The fix-up needs to be improved for larger args */
++		if (argSqrd.msw & 0xffc00000) {
++			/* Get about 32 bit precision in these: */
++			fix_up.msw -= mul_32_32(0x898cc517, argSqrd.msw) / 2;
++			fix_up.msw += mul_32_32(0x898cc517, argTo4.msw) / 24;
++		}
++
++		exp2 += norm_Xsig(&accumulator);
++		shr_Xsig(&accumulator, 1);	/* Prevent overflow */
++		exp2++;
++		shr_Xsig(&fix_up, 65 + exp2);
++
++		add_Xsig_Xsig(&accumulator, &fix_up);
++
++		echange = round_Xsig(&accumulator);
++
++		setexponentpos(&result, exp2 + echange);
++		significand(&result) = XSIG_LL(accumulator);
+ 	}
+ 
+-      exp2 += norm_Xsig(&accumulator);
+-      shr_Xsig(&accumulator, 1); /* Prevent overflow */
+-      exp2++;
+-      shr_Xsig(&fix_up, 65 + exp2);
 -
--obj-$(CONFIG_SMP)	+= msr-on-cpu.o
-diff --git a/arch/x86/lib/Makefile_64 b/arch/x86/lib/Makefile_64
-deleted file mode 100644
-index bbabad3..0000000
---- a/arch/x86/lib/Makefile_64
-+++ /dev/null
-@@ -1,13 +0,0 @@
--#
--# Makefile for x86_64-specific library files.
--#
+-      add_Xsig_Xsig(&accumulator, &fix_up);
 -
--CFLAGS_csum-partial_64.o := -funroll-loops
+-      echange = round_Xsig(&accumulator);
 -
--obj-y := io_64.o iomap_copy_64.o
--obj-$(CONFIG_SMP)	+= msr-on-cpu.o
+-      setexponentpos(&result, exp2 + echange);
+-      significand(&result) = XSIG_LL(accumulator);
+-    }
 -
--lib-y := csum-partial_64.o csum-copy_64.o csum-wrappers_64.o delay_64.o \
--	usercopy_64.o getuser_64.o putuser_64.o  \
--	thunk_64.o clear_page_64.o copy_page_64.o bitstr_64.o bitops_64.o
--lib-y += memcpy_64.o memmove_64.o memset_64.o copy_user_64.o rwlock_64.o copy_user_nocache_64.o
-diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
-index 8ac51b8..37756b6 100644
---- a/arch/x86/lib/memcpy_32.c
-+++ b/arch/x86/lib/memcpy_32.c
-@@ -34,8 +34,8 @@ void *memmove(void *dest, const void *src, size_t n)
- 			"cld"
- 			: "=&c" (d0), "=&S" (d1), "=&D" (d2)
- 			:"0" (n),
--			 "1" (n-1+(const char *)src),
--			 "2" (n-1+(char *)dest)
-+			 "1" (n-1+src),
-+			 "2" (n-1+dest)
- 			:"memory");
- 	}
- 	return dest;
-diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c
-index 751ebae..80175e4 100644
---- a/arch/x86/lib/memmove_64.c
-+++ b/arch/x86/lib/memmove_64.c
-@@ -11,8 +11,8 @@ void *memmove(void * dest,const void *src,size_t count)
- 	if (dest < src) { 
- 		return memcpy(dest,src,count);
- 	} else {
--		char *p = (char *) dest + count;
--		char *s = (char *) src + count;
-+		char *p = dest + count;
-+		const char *s = src + count;
- 		while (count--)
- 			*--p = *--s;
- 	}
-diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S
-index 444fba4..3899bd3 100644
---- a/arch/x86/lib/semaphore_32.S
-+++ b/arch/x86/lib/semaphore_32.S
-@@ -29,7 +29,7 @@
-  * registers (%eax, %edx and %ecx) except %eax whish is either a return
-  * value or just clobbered..
-  */
--	.section .sched.text
-+	.section .sched.text, "ax"
- ENTRY(__down_failed)
- 	CFI_STARTPROC
- 	FRAME
-@@ -49,7 +49,7 @@ ENTRY(__down_failed)
- 	ENDFRAME
- 	ret
- 	CFI_ENDPROC
--	END(__down_failed)
-+	ENDPROC(__down_failed)
- 
- ENTRY(__down_failed_interruptible)
- 	CFI_STARTPROC
-@@ -70,7 +70,7 @@ ENTRY(__down_failed_interruptible)
- 	ENDFRAME
- 	ret
- 	CFI_ENDPROC
--	END(__down_failed_interruptible)
-+	ENDPROC(__down_failed_interruptible)
+-  FPU_copy_to_reg0(&result, TAG_Valid);
++	FPU_copy_to_reg0(&result, TAG_Valid);
  
- ENTRY(__down_failed_trylock)
- 	CFI_STARTPROC
-@@ -91,7 +91,7 @@ ENTRY(__down_failed_trylock)
- 	ENDFRAME
- 	ret
- 	CFI_ENDPROC
--	END(__down_failed_trylock)
-+	ENDPROC(__down_failed_trylock)
+ #ifdef PARANOID
+-  if ( (exponent(&result) >= 0)
+-      && (significand(&result) > 0x8000000000000000LL) )
+-    {
+-      EXCEPTION(EX_INTERNAL|0x151);
+-    }
++	if ((exponent(&result) >= 0)
++	    && (significand(&result) > 0x8000000000000000LL)) {
++		EXCEPTION(EX_INTERNAL | 0x151);
++	}
+ #endif /* PARANOID */
  
- ENTRY(__up_wakeup)
- 	CFI_STARTPROC
-@@ -112,7 +112,7 @@ ENTRY(__up_wakeup)
- 	ENDFRAME
- 	ret
- 	CFI_ENDPROC
--	END(__up_wakeup)
-+	ENDPROC(__up_wakeup)
+ }
+diff --git a/arch/x86/math-emu/poly_tan.c b/arch/x86/math-emu/poly_tan.c
+index 8df3e03..1875763 100644
+--- a/arch/x86/math-emu/poly_tan.c
++++ b/arch/x86/math-emu/poly_tan.c
+@@ -17,206 +17,196 @@
+ #include "control_w.h"
+ #include "poly.h"
  
- /*
-  * rw spinlock fallbacks
-@@ -132,7 +132,7 @@ ENTRY(__write_lock_failed)
- 	ENDFRAME
- 	ret
- 	CFI_ENDPROC
--	END(__write_lock_failed)
-+	ENDPROC(__write_lock_failed)
+-
+ #define	HiPOWERop	3	/* odd poly, positive terms */
+-static const unsigned long long oddplterm[HiPOWERop] =
+-{
+-  0x0000000000000000LL,
+-  0x0051a1cf08fca228LL,
+-  0x0000000071284ff7LL
++static const unsigned long long oddplterm[HiPOWERop] = {
++	0x0000000000000000LL,
++	0x0051a1cf08fca228LL,
++	0x0000000071284ff7LL
+ };
  
- ENTRY(__read_lock_failed)
- 	CFI_STARTPROC
-@@ -148,7 +148,7 @@ ENTRY(__read_lock_failed)
- 	ENDFRAME
- 	ret
- 	CFI_ENDPROC
--	END(__read_lock_failed)
-+	ENDPROC(__read_lock_failed)
+ #define	HiPOWERon	2	/* odd poly, negative terms */
+-static const unsigned long long oddnegterm[HiPOWERon] =
+-{
+-   0x1291a9a184244e80LL,
+-   0x0000583245819c21LL
++static const unsigned long long oddnegterm[HiPOWERon] = {
++	0x1291a9a184244e80LL,
++	0x0000583245819c21LL
+ };
  
- #endif
+ #define	HiPOWERep	2	/* even poly, positive terms */
+-static const unsigned long long evenplterm[HiPOWERep] =
+-{
+-  0x0e848884b539e888LL,
+-  0x00003c7f18b887daLL
++static const unsigned long long evenplterm[HiPOWERep] = {
++	0x0e848884b539e888LL,
++	0x00003c7f18b887daLL
+ };
  
-@@ -170,7 +170,7 @@ ENTRY(call_rwsem_down_read_failed)
- 	CFI_ADJUST_CFA_OFFSET -4
- 	ret
- 	CFI_ENDPROC
--	END(call_rwsem_down_read_failed)
-+	ENDPROC(call_rwsem_down_read_failed)
+ #define	HiPOWERen	2	/* even poly, negative terms */
+-static const unsigned long long evennegterm[HiPOWERen] =
+-{
+-  0xf1f0200fd51569ccLL,
+-  0x003afb46105c4432LL
++static const unsigned long long evennegterm[HiPOWERen] = {
++	0xf1f0200fd51569ccLL,
++	0x003afb46105c4432LL
+ };
  
- ENTRY(call_rwsem_down_write_failed)
- 	CFI_STARTPROC
-@@ -182,7 +182,7 @@ ENTRY(call_rwsem_down_write_failed)
- 	CFI_ADJUST_CFA_OFFSET -4
- 	ret
- 	CFI_ENDPROC
--	END(call_rwsem_down_write_failed)
-+	ENDPROC(call_rwsem_down_write_failed)
+ static const unsigned long long twothirds = 0xaaaaaaaaaaaaaaabLL;
  
- ENTRY(call_rwsem_wake)
- 	CFI_STARTPROC
-@@ -196,7 +196,7 @@ ENTRY(call_rwsem_wake)
- 	CFI_ADJUST_CFA_OFFSET -4
- 1:	ret
- 	CFI_ENDPROC
--	END(call_rwsem_wake)
-+	ENDPROC(call_rwsem_wake)
+-
+ /*--- poly_tan() ------------------------------------------------------------+
+  |                                                                           |
+  +---------------------------------------------------------------------------*/
+-void	poly_tan(FPU_REG *st0_ptr)
++void poly_tan(FPU_REG *st0_ptr)
+ {
+-  long int    		exponent;
+-  int                   invert;
+-  Xsig                  argSq, argSqSq, accumulatoro, accumulatore, accum,
+-                        argSignif, fix_up;
+-  unsigned long         adj;
++	long int exponent;
++	int invert;
++	Xsig argSq, argSqSq, accumulatoro, accumulatore, accum,
++	    argSignif, fix_up;
++	unsigned long adj;
  
- /* Fix up special calling conventions */
- ENTRY(call_rwsem_downgrade_wake)
-@@ -214,6 +214,6 @@ ENTRY(call_rwsem_downgrade_wake)
- 	CFI_ADJUST_CFA_OFFSET -4
- 	ret
- 	CFI_ENDPROC
--	END(call_rwsem_downgrade_wake)
-+	ENDPROC(call_rwsem_downgrade_wake)
+-  exponent = exponent(st0_ptr);
++	exponent = exponent(st0_ptr);
  
- #endif
-diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
-index 6ea73f3..8b92d42 100644
---- a/arch/x86/lib/thunk_64.S
-+++ b/arch/x86/lib/thunk_64.S
-@@ -33,7 +33,7 @@
- 	.endm
- 	
+ #ifdef PARANOID
+-  if ( signnegative(st0_ptr) )	/* Can't hack a number < 0.0 */
+-    { arith_invalid(0); return; }  /* Need a positive number */
++	if (signnegative(st0_ptr)) {	/* Can't hack a number < 0.0 */
++		arith_invalid(0);
++		return;
++	}			/* Need a positive number */
+ #endif /* PARANOID */
  
--	.section .sched.text
-+	.section .sched.text, "ax"
- #ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
- 	thunk rwsem_down_read_failed_thunk,rwsem_down_read_failed
- 	thunk rwsem_down_write_failed_thunk,rwsem_down_write_failed
-diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile
-new file mode 100644
-index 0000000..1faac81
---- /dev/null
-+++ b/arch/x86/mach-rdc321x/Makefile
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for the RDC321x specific parts of the kernel
-+#
-+obj-$(CONFIG_X86_RDC321X)        := gpio.o platform.o wdt.o
+-  /* Split the problem into two domains, smaller and larger than pi/4 */
+-  if ( (exponent == 0) || ((exponent == -1) && (st0_ptr->sigh > 0xc90fdaa2)) )
+-    {
+-      /* The argument is greater than (approx) pi/4 */
+-      invert = 1;
+-      accum.lsw = 0;
+-      XSIG_LL(accum) = significand(st0_ptr);
+- 
+-      if ( exponent == 0 )
+-	{
+-	  /* The argument is >= 1.0 */
+-	  /* Put the binary point at the left. */
+-	  XSIG_LL(accum) <<= 1;
+-	}
+-      /* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
+-      XSIG_LL(accum) = 0x921fb54442d18469LL - XSIG_LL(accum);
+-      /* This is a special case which arises due to rounding. */
+-      if ( XSIG_LL(accum) == 0xffffffffffffffffLL )
+-	{
+-	  FPU_settag0(TAG_Valid);
+-	  significand(st0_ptr) = 0x8a51e04daabda360LL;
+-	  setexponent16(st0_ptr, (0x41 + EXTENDED_Ebias) | SIGN_Negative);
+-	  return;
++	/* Split the problem into two domains, smaller and larger than pi/4 */
++	if ((exponent == 0)
++	    || ((exponent == -1) && (st0_ptr->sigh > 0xc90fdaa2))) {
++		/* The argument is greater than (approx) pi/4 */
++		invert = 1;
++		accum.lsw = 0;
++		XSIG_LL(accum) = significand(st0_ptr);
 +
-diff --git a/arch/x86/mach-rdc321x/gpio.c b/arch/x86/mach-rdc321x/gpio.c
-new file mode 100644
-index 0000000..0312691
---- /dev/null
-+++ b/arch/x86/mach-rdc321x/gpio.c
-@@ -0,0 +1,91 @@
-+/*
-+ *  Copyright (C) 2007, OpenWrt.org, Florian Fainelli <florian at openwrt.org>
-+ *  	RDC321x architecture specific GPIO support
-+ *
-+ *  This program is free software; you can redistribute  it and/or modify it
-+ *  under  the terms of  the GNU General  Public License as published by the
-+ *  Free Software Foundation;  either version 2 of the  License, or (at your
-+ *  option) any later version.
-+ */
++		if (exponent == 0) {
++			/* The argument is >= 1.0 */
++			/* Put the binary point at the left. */
++			XSIG_LL(accum) <<= 1;
++		}
++		/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
++		XSIG_LL(accum) = 0x921fb54442d18469LL - XSIG_LL(accum);
++		/* This is a special case which arises due to rounding. */
++		if (XSIG_LL(accum) == 0xffffffffffffffffLL) {
++			FPU_settag0(TAG_Valid);
++			significand(st0_ptr) = 0x8a51e04daabda360LL;
++			setexponent16(st0_ptr,
++				      (0x41 + EXTENDED_Ebias) | SIGN_Negative);
++			return;
++		}
 +
-+#include <linux/autoconf.h>
-+#include <linux/init.h>
-+#include <linux/io.h>
-+#include <linux/types.h>
-+#include <linux/module.h>
-+#include <linux/delay.h>
++		argSignif.lsw = accum.lsw;
++		XSIG_LL(argSignif) = XSIG_LL(accum);
++		exponent = -1 + norm_Xsig(&argSignif);
++	} else {
++		invert = 0;
++		argSignif.lsw = 0;
++		XSIG_LL(accum) = XSIG_LL(argSignif) = significand(st0_ptr);
 +
-+#include <asm/mach-rdc321x/rdc321x_defs.h>
++		if (exponent < -1) {
++			/* shift the argument right by the required places */
++			if (FPU_shrx(&XSIG_LL(accum), -1 - exponent) >=
++			    0x80000000U)
++				XSIG_LL(accum)++;	/* round up */
++		}
+ 	}
+ 
+-      argSignif.lsw = accum.lsw;
+-      XSIG_LL(argSignif) = XSIG_LL(accum);
+-      exponent = -1 + norm_Xsig(&argSignif);
+-    }
+-  else
+-    {
+-      invert = 0;
+-      argSignif.lsw = 0;
+-      XSIG_LL(accum) = XSIG_LL(argSignif) = significand(st0_ptr);
+- 
+-      if ( exponent < -1 )
+-	{
+-	  /* shift the argument right by the required places */
+-	  if ( FPU_shrx(&XSIG_LL(accum), -1-exponent) >= 0x80000000U )
+-	    XSIG_LL(accum) ++;	/* round up */
+-	}
+-    }
+-
+-  XSIG_LL(argSq) = XSIG_LL(accum); argSq.lsw = accum.lsw;
+-  mul_Xsig_Xsig(&argSq, &argSq);
+-  XSIG_LL(argSqSq) = XSIG_LL(argSq); argSqSq.lsw = argSq.lsw;
+-  mul_Xsig_Xsig(&argSqSq, &argSqSq);
+-
+-  /* Compute the negative terms for the numerator polynomial */
+-  accumulatoro.msw = accumulatoro.midw = accumulatoro.lsw = 0;
+-  polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddnegterm, HiPOWERon-1);
+-  mul_Xsig_Xsig(&accumulatoro, &argSq);
+-  negate_Xsig(&accumulatoro);
+-  /* Add the positive terms */
+-  polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddplterm, HiPOWERop-1);
+-
+-  
+-  /* Compute the positive terms for the denominator polynomial */
+-  accumulatore.msw = accumulatore.midw = accumulatore.lsw = 0;
+-  polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evenplterm, HiPOWERep-1);
+-  mul_Xsig_Xsig(&accumulatore, &argSq);
+-  negate_Xsig(&accumulatore);
+-  /* Add the negative terms */
+-  polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evennegterm, HiPOWERen-1);
+-  /* Multiply by arg^2 */
+-  mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
+-  mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
+-  /* de-normalize and divide by 2 */
+-  shr_Xsig(&accumulatore, -2*(1+exponent) + 1);
+-  negate_Xsig(&accumulatore);      /* This does 1 - accumulator */
+-
+-  /* Now find the ratio. */
+-  if ( accumulatore.msw == 0 )
+-    {
+-      /* accumulatoro must contain 1.0 here, (actually, 0) but it
+-	 really doesn't matter what value we use because it will
+-	 have negligible effect in later calculations
+-	 */
+-      XSIG_LL(accum) = 0x8000000000000000LL;
+-      accum.lsw = 0;
+-    }
+-  else
+-    {
+-      div_Xsig(&accumulatoro, &accumulatore, &accum);
+-    }
+-
+-  /* Multiply by 1/3 * arg^3 */
+-  mul64_Xsig(&accum, &XSIG_LL(argSignif));
+-  mul64_Xsig(&accum, &XSIG_LL(argSignif));
+-  mul64_Xsig(&accum, &XSIG_LL(argSignif));
+-  mul64_Xsig(&accum, &twothirds);
+-  shr_Xsig(&accum, -2*(exponent+1));
+-
+-  /* tan(arg) = arg + accum */
+-  add_two_Xsig(&accum, &argSignif, &exponent);
+-
+-  if ( invert )
+-    {
+-      /* We now have the value of tan(pi_2 - arg) where pi_2 is an
+-	 approximation for pi/2
+-	 */
+-      /* The next step is to fix the answer to compensate for the
+-	 error due to the approximation used for pi/2
+-	 */
+-
+-      /* This is (approx) delta, the error in our approx for pi/2
+-	 (see above). It has an exponent of -65
+-	 */
+-      XSIG_LL(fix_up) = 0x898cc51701b839a2LL;
+-      fix_up.lsw = 0;
+-
+-      if ( exponent == 0 )
+-	adj = 0xffffffff;   /* We want approx 1.0 here, but
+-			       this is close enough. */
+-      else if ( exponent > -30 )
+-	{
+-	  adj = accum.msw >> -(exponent+1);      /* tan */
+-	  adj = mul_32_32(adj, adj);             /* tan^2 */
++	XSIG_LL(argSq) = XSIG_LL(accum);
++	argSq.lsw = accum.lsw;
++	mul_Xsig_Xsig(&argSq, &argSq);
++	XSIG_LL(argSqSq) = XSIG_LL(argSq);
++	argSqSq.lsw = argSq.lsw;
++	mul_Xsig_Xsig(&argSqSq, &argSqSq);
 +
-+static inline int rdc_gpio_is_valid(unsigned gpio)
-+{
-+	return (gpio <= RDC_MAX_GPIO);
-+}
++	/* Compute the negative terms for the numerator polynomial */
++	accumulatoro.msw = accumulatoro.midw = accumulatoro.lsw = 0;
++	polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddnegterm,
++			HiPOWERon - 1);
++	mul_Xsig_Xsig(&accumulatoro, &argSq);
++	negate_Xsig(&accumulatoro);
++	/* Add the positive terms */
++	polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddplterm,
++			HiPOWERop - 1);
 +
-+static unsigned int rdc_gpio_read(unsigned gpio)
-+{
-+	unsigned int val;
++	/* Compute the positive terms for the denominator polynomial */
++	accumulatore.msw = accumulatore.midw = accumulatore.lsw = 0;
++	polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evenplterm,
++			HiPOWERep - 1);
++	mul_Xsig_Xsig(&accumulatore, &argSq);
++	negate_Xsig(&accumulatore);
++	/* Add the negative terms */
++	polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evennegterm,
++			HiPOWERen - 1);
++	/* Multiply by arg^2 */
++	mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
++	mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
++	/* de-normalize and divide by 2 */
++	shr_Xsig(&accumulatore, -2 * (1 + exponent) + 1);
++	negate_Xsig(&accumulatore);	/* This does 1 - accumulator */
 +
-+	val = 0x80000000 | (7 << 11) | ((gpio&0x20?0x84:0x48));
-+	outl(val, RDC3210_CFGREG_ADDR);
-+	udelay(10);
-+	val = inl(RDC3210_CFGREG_DATA);
-+	val |= (0x1 << (gpio & 0x1F));
-+	outl(val, RDC3210_CFGREG_DATA);
-+	udelay(10);
-+	val = 0x80000000 | (7 << 11) | ((gpio&0x20?0x88:0x4C));
-+	outl(val, RDC3210_CFGREG_ADDR);
-+	udelay(10);
-+	val = inl(RDC3210_CFGREG_DATA);
++	/* Now find the ratio. */
++	if (accumulatore.msw == 0) {
++		/* accumulatoro must contain 1.0 here, (actually, 0) but it
++		   really doesn't matter what value we use because it will
++		   have negligible effect in later calculations
++		 */
++		XSIG_LL(accum) = 0x8000000000000000LL;
++		accum.lsw = 0;
++	} else {
++		div_Xsig(&accumulatoro, &accumulatore, &accum);
+ 	}
+-      else
+-	adj = 0;
+-      adj = mul_32_32(0x898cc517, adj);          /* delta * tan^2 */
+-
+-      fix_up.msw += adj;
+-      if ( !(fix_up.msw & 0x80000000) )   /* did fix_up overflow ? */
+-	{
+-	  /* Yes, we need to add an msb */
+-	  shr_Xsig(&fix_up, 1);
+-	  fix_up.msw |= 0x80000000;
+-	  shr_Xsig(&fix_up, 64 + exponent);
 +
-+	return val;
-+}
++	/* Multiply by 1/3 * arg^3 */
++	mul64_Xsig(&accum, &XSIG_LL(argSignif));
++	mul64_Xsig(&accum, &XSIG_LL(argSignif));
++	mul64_Xsig(&accum, &XSIG_LL(argSignif));
++	mul64_Xsig(&accum, &twothirds);
++	shr_Xsig(&accum, -2 * (exponent + 1));
 +
-+static void rdc_gpio_write(unsigned int val)
-+{
-+	if (val) {
-+		outl(val, RDC3210_CFGREG_DATA);
-+		udelay(10);
-+	}
-+}
++	/* tan(arg) = arg + accum */
++	add_two_Xsig(&accum, &argSignif, &exponent);
 +
-+int rdc_gpio_get_value(unsigned gpio)
-+{
-+	if (rdc_gpio_is_valid(gpio))
-+		return (int)rdc_gpio_read(gpio);
-+	else
-+		return -EINVAL;
-+}
-+EXPORT_SYMBOL(rdc_gpio_get_value);
++	if (invert) {
++		/* We now have the value of tan(pi_2 - arg) where pi_2 is an
++		   approximation for pi/2
++		 */
++		/* The next step is to fix the answer to compensate for the
++		   error due to the approximation used for pi/2
++		 */
 +
-+void rdc_gpio_set_value(unsigned gpio, int value)
-+{
-+	unsigned int val;
++		/* This is (approx) delta, the error in our approx for pi/2
++		   (see above). It has an exponent of -65
++		 */
++		XSIG_LL(fix_up) = 0x898cc51701b839a2LL;
++		fix_up.lsw = 0;
 +
-+	if (!rdc_gpio_is_valid(gpio))
-+		return;
++		if (exponent == 0)
++			adj = 0xffffffff;	/* We want approx 1.0 here, but
++						   this is close enough. */
++		else if (exponent > -30) {
++			adj = accum.msw >> -(exponent + 1);	/* tan */
++			adj = mul_32_32(adj, adj);	/* tan^2 */
++		} else
++			adj = 0;
++		adj = mul_32_32(0x898cc517, adj);	/* delta * tan^2 */
 +
-+	val = rdc_gpio_read(gpio);
++		fix_up.msw += adj;
++		if (!(fix_up.msw & 0x80000000)) {	/* did fix_up overflow ? */
++			/* Yes, we need to add an msb */
++			shr_Xsig(&fix_up, 1);
++			fix_up.msw |= 0x80000000;
++			shr_Xsig(&fix_up, 64 + exponent);
++		} else
++			shr_Xsig(&fix_up, 65 + exponent);
 +
-+	if (value)
-+		val &= ~(0x1 << (gpio & 0x1F));
-+	else
-+		val |= (0x1 << (gpio & 0x1F));
++		add_two_Xsig(&accum, &fix_up, &exponent);
 +
-+	rdc_gpio_write(val);
-+}
-+EXPORT_SYMBOL(rdc_gpio_set_value);
++		/* accum now contains tan(pi/2 - arg).
++		   Use tan(arg) = 1.0 / tan(pi/2 - arg)
++		 */
++		accumulatoro.lsw = accumulatoro.midw = 0;
++		accumulatoro.msw = 0x80000000;
++		div_Xsig(&accumulatoro, &accum, &accum);
++		exponent = -exponent - 1;
+ 	}
+-      else
+-	shr_Xsig(&fix_up, 65 + exponent);
+-
+-      add_two_Xsig(&accum, &fix_up, &exponent);
+-
+-      /* accum now contains tan(pi/2 - arg).
+-	 Use tan(arg) = 1.0 / tan(pi/2 - arg)
+-	 */
+-      accumulatoro.lsw = accumulatoro.midw = 0;
+-      accumulatoro.msw = 0x80000000;
+-      div_Xsig(&accumulatoro, &accum, &accum);
+-      exponent = - exponent - 1;
+-    }
+-
+-  /* Transfer the result */
+-  round_Xsig(&accum);
+-  FPU_settag0(TAG_Valid);
+-  significand(st0_ptr) = XSIG_LL(accum);
+-  setexponent16(st0_ptr, exponent + EXTENDED_Ebias);  /* Result is positive. */
 +
-+int rdc_gpio_direction_input(unsigned gpio)
-+{
-+	return 0;
-+}
-+EXPORT_SYMBOL(rdc_gpio_direction_input);
++	/* Transfer the result */
++	round_Xsig(&accum);
++	FPU_settag0(TAG_Valid);
++	significand(st0_ptr) = XSIG_LL(accum);
++	setexponent16(st0_ptr, exponent + EXTENDED_Ebias);	/* Result is positive. */
+ 
+ }
+diff --git a/arch/x86/math-emu/reg_add_sub.c b/arch/x86/math-emu/reg_add_sub.c
+index 7cd3b37..deea48b 100644
+--- a/arch/x86/math-emu/reg_add_sub.c
++++ b/arch/x86/math-emu/reg_add_sub.c
+@@ -27,7 +27,7 @@
+ static
+ int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa,
+ 		     FPU_REG const *b, u_char tagb, u_char signb,
+-		     FPU_REG *dest, int deststnr, int control_w);
++		     FPU_REG * dest, int deststnr, int control_w);
+ 
+ /*
+   Operates on st(0) and st(n), or on st(0) and temporary data.
+@@ -35,340 +35,299 @@ int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa,
+   */
+ int FPU_add(FPU_REG const *b, u_char tagb, int deststnr, int control_w)
+ {
+-  FPU_REG *a = &st(0);
+-  FPU_REG *dest = &st(deststnr);
+-  u_char signb = getsign(b);
+-  u_char taga = FPU_gettag0();
+-  u_char signa = getsign(a);
+-  u_char saved_sign = getsign(dest);
+-  int diff, tag, expa, expb;
+-  
+-  if ( !(taga | tagb) )
+-    {
+-      expa = exponent(a);
+-      expb = exponent(b);
+-
+-    valid_add:
+-      /* Both registers are valid */
+-      if (!(signa ^ signb))
+-	{
+-	  /* signs are the same */
+-	  tag = FPU_u_add(a, b, dest, control_w, signa, expa, expb);
+-	}
+-      else
+-	{
+-	  /* The signs are different, so do a subtraction */
+-	  diff = expa - expb;
+-	  if (!diff)
+-	    {
+-	      diff = a->sigh - b->sigh;  /* This works only if the ms bits
+-					    are identical. */
+-	      if (!diff)
+-		{
+-		  diff = a->sigl > b->sigl;
+-		  if (!diff)
+-		    diff = -(a->sigl < b->sigl);
++	FPU_REG *a = &st(0);
++	FPU_REG *dest = &st(deststnr);
++	u_char signb = getsign(b);
++	u_char taga = FPU_gettag0();
++	u_char signa = getsign(a);
++	u_char saved_sign = getsign(dest);
++	int diff, tag, expa, expb;
 +
-+int rdc_gpio_direction_output(unsigned gpio, int value)
-+{
-+	return 0;
-+}
-+EXPORT_SYMBOL(rdc_gpio_direction_output);
++	if (!(taga | tagb)) {
++		expa = exponent(a);
++		expb = exponent(b);
 +
++	      valid_add:
++		/* Both registers are valid */
++		if (!(signa ^ signb)) {
++			/* signs are the same */
++			tag =
++			    FPU_u_add(a, b, dest, control_w, signa, expa, expb);
++		} else {
++			/* The signs are different, so do a subtraction */
++			diff = expa - expb;
++			if (!diff) {
++				diff = a->sigh - b->sigh;	/* This works only if the ms bits
++								   are identical. */
++				if (!diff) {
++					diff = a->sigl > b->sigl;
++					if (!diff)
++						diff = -(a->sigl < b->sigl);
++				}
++			}
 +
-diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c
-new file mode 100644
-index 0000000..dda6024
---- /dev/null
-+++ b/arch/x86/mach-rdc321x/platform.c
-@@ -0,0 +1,68 @@
-+/*
-+ *  Generic RDC321x platform devices
-+ *
-+ *  Copyright (C) 2007 Florian Fainelli <florian at openwrt.org>
-+ *
-+ *  This program is free software; you can redistribute it and/or
-+ *  modify it under the terms of the GNU General Public License
-+ *  as published by the Free Software Foundation; either version 2
-+ *  of the License, or (at your option) any later version.
-+ *
-+ *  This program is distributed in the hope that it will be useful,
-+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *  GNU General Public License for more details.
-+ *
-+ *  You should have received a copy of the GNU General Public License
-+ *  along with this program; if not, write to the
-+ *  Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
-+ *  Boston, MA  02110-1301, USA.
-+ *
-+ */
++			if (diff > 0) {
++				tag =
++				    FPU_u_sub(a, b, dest, control_w, signa,
++					      expa, expb);
++			} else if (diff < 0) {
++				tag =
++				    FPU_u_sub(b, a, dest, control_w, signb,
++					      expb, expa);
++			} else {
++				FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
++				/* sign depends upon rounding mode */
++				setsign(dest, ((control_w & CW_RC) != RC_DOWN)
++					? SIGN_POS : SIGN_NEG);
++				return TAG_Zero;
++			}
+ 		}
+-	    }
+-      
+-	  if (diff > 0)
+-	    {
+-	      tag = FPU_u_sub(a, b, dest, control_w, signa, expa, expb);
+-	    }
+-	  else if ( diff < 0 )
+-	    {
+-	      tag = FPU_u_sub(b, a, dest, control_w, signb, expb, expa);
+-	    }
+-	  else
+-	    {
+-	      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
+-	      /* sign depends upon rounding mode */
+-	      setsign(dest, ((control_w & CW_RC) != RC_DOWN)
+-		      ? SIGN_POS : SIGN_NEG);
+-	      return TAG_Zero;
+-	    }
+-	}
+ 
+-      if ( tag < 0 )
+-	{
+-	  setsign(dest, saved_sign);
+-	  return tag;
++		if (tag < 0) {
++			setsign(dest, saved_sign);
++			return tag;
++		}
++		FPU_settagi(deststnr, tag);
++		return tag;
+ 	}
+-      FPU_settagi(deststnr, tag);
+-      return tag;
+-    }
+ 
+-  if ( taga == TAG_Special )
+-    taga = FPU_Special(a);
+-  if ( tagb == TAG_Special )
+-    tagb = FPU_Special(b);
++	if (taga == TAG_Special)
++		taga = FPU_Special(a);
++	if (tagb == TAG_Special)
++		tagb = FPU_Special(b);
+ 
+-  if ( ((taga == TAG_Valid) && (tagb == TW_Denormal))
++	if (((taga == TAG_Valid) && (tagb == TW_Denormal))
+ 	    || ((taga == TW_Denormal) && (tagb == TAG_Valid))
+-	    || ((taga == TW_Denormal) && (tagb == TW_Denormal)) )
+-    {
+-      FPU_REG x, y;
++	    || ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
++		FPU_REG x, y;
 +
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/list.h>
-+#include <linux/device.h>
-+#include <linux/platform_device.h>
-+#include <linux/version.h>
-+#include <linux/leds.h>
++		if (denormal_operand() < 0)
++			return FPU_Exception;
 +
-+#include <asm/gpio.h>
++		FPU_to_exp16(a, &x);
++		FPU_to_exp16(b, &y);
++		a = &x;
++		b = &y;
++		expa = exponent16(a);
++		expb = exponent16(b);
++		goto valid_add;
++	}
+ 
+-      if ( denormal_operand() < 0 )
+-	return FPU_Exception;
++	if ((taga == TW_NaN) || (tagb == TW_NaN)) {
++		if (deststnr == 0)
++			return real_2op_NaN(b, tagb, deststnr, a);
++		else
++			return real_2op_NaN(a, taga, deststnr, a);
++	}
+ 
+-      FPU_to_exp16(a, &x);
+-      FPU_to_exp16(b, &y);
+-      a = &x;
+-      b = &y;
+-      expa = exponent16(a);
+-      expb = exponent16(b);
+-      goto valid_add;
+-    }
+-
+-  if ( (taga == TW_NaN) || (tagb == TW_NaN) )
+-    {
+-      if ( deststnr == 0 )
+-	return real_2op_NaN(b, tagb, deststnr, a);
+-      else
+-	return real_2op_NaN(a, taga, deststnr, a);
+-    }
+-
+-  return add_sub_specials(a, taga, signa, b, tagb, signb,
+-			  dest, deststnr, control_w);
++	return add_sub_specials(a, taga, signa, b, tagb, signb,
++				dest, deststnr, control_w);
+ }
+ 
+-
+ /* Subtract b from a.  (a-b) -> dest */
+ int FPU_sub(int flags, int rm, int control_w)
+ {
+-  FPU_REG const *a, *b;
+-  FPU_REG *dest;
+-  u_char taga, tagb, signa, signb, saved_sign, sign;
+-  int diff, tag = 0, expa, expb, deststnr;
+-
+-  a = &st(0);
+-  taga = FPU_gettag0();
+-
+-  deststnr = 0;
+-  if ( flags & LOADED )
+-    {
+-      b = (FPU_REG *)rm;
+-      tagb = flags & 0x0f;
+-    }
+-  else
+-    {
+-      b = &st(rm);
+-      tagb = FPU_gettagi(rm);
+-
+-      if ( flags & DEST_RM )
+-	deststnr = rm;
+-    }
+-
+-  signa = getsign(a);
+-  signb = getsign(b);
+-
+-  if ( flags & REV )
+-    {
+-      signa ^= SIGN_NEG;
+-      signb ^= SIGN_NEG;
+-    }
+-
+-  dest = &st(deststnr);
+-  saved_sign = getsign(dest);
+-
+-  if ( !(taga | tagb) )
+-    {
+-      expa = exponent(a);
+-      expb = exponent(b);
+-
+-    valid_subtract:
+-      /* Both registers are valid */
+-
+-      diff = expa - expb;
+-
+-      if (!diff)
+-	{
+-	  diff = a->sigh - b->sigh;  /* Works only if ms bits are identical */
+-	  if (!diff)
+-	    {
+-	      diff = a->sigl > b->sigl;
+-	      if (!diff)
+-		diff = -(a->sigl < b->sigl);
+-	    }
++	FPU_REG const *a, *b;
++	FPU_REG *dest;
++	u_char taga, tagb, signa, signb, saved_sign, sign;
++	int diff, tag = 0, expa, expb, deststnr;
 +
-+/* LEDS */
-+static struct gpio_led default_leds[] = {
-+	{ .name = "rdc:dmz", .gpio = 1, },
-+};
++	a = &st(0);
++	taga = FPU_gettag0();
 +
-+static struct gpio_led_platform_data rdc321x_led_data = {
-+	.num_leds = ARRAY_SIZE(default_leds),
-+	.leds = default_leds,
-+};
++	deststnr = 0;
++	if (flags & LOADED) {
++		b = (FPU_REG *) rm;
++		tagb = flags & 0x0f;
++	} else {
++		b = &st(rm);
++		tagb = FPU_gettagi(rm);
 +
-+static struct platform_device rdc321x_leds = {
-+	.name = "leds-gpio",
-+	.id = -1,
-+	.dev = {
-+		.platform_data = &rdc321x_led_data,
++		if (flags & DEST_RM)
++			deststnr = rm;
+ 	}
+ 
+-      switch ( (((int)signa)*2 + signb) / SIGN_NEG )
+-	{
+-	case 0: /* P - P */
+-	case 3: /* N - N */
+-	  if (diff > 0)
+-	    {
+-	      /* |a| > |b| */
+-	      tag = FPU_u_sub(a, b, dest, control_w, signa, expa, expb);
+-	    }
+-	  else if ( diff == 0 )
+-	    {
+-	      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
+-
+-	      /* sign depends upon rounding mode */
+-	      setsign(dest, ((control_w & CW_RC) != RC_DOWN)
+-		? SIGN_POS : SIGN_NEG);
+-	      return TAG_Zero;
+-	    }
+-	  else
+-	    {
+-	      sign = signa ^ SIGN_NEG;
+-	      tag = FPU_u_sub(b, a, dest, control_w, sign, expb, expa);
+-	    }
+-	  break;
+-	case 1: /* P - N */
+-	  tag = FPU_u_add(a, b, dest, control_w, SIGN_POS, expa, expb);
+-	  break;
+-	case 2: /* N - P */
+-	  tag = FPU_u_add(a, b, dest, control_w, SIGN_NEG, expa, expb);
+-	  break;
++	signa = getsign(a);
++	signb = getsign(b);
++
++	if (flags & REV) {
++		signa ^= SIGN_NEG;
++		signb ^= SIGN_NEG;
 +	}
-+};
 +
-+/* Watchdog */
-+static struct platform_device rdc321x_wdt = {
-+	.name = "rdc321x-wdt",
-+	.id = -1,
-+	.num_resources = 0,
-+};
++	dest = &st(deststnr);
++	saved_sign = getsign(dest);
 +
-+static struct platform_device *rdc321x_devs[] = {
-+	&rdc321x_leds,
-+	&rdc321x_wdt
-+};
++	if (!(taga | tagb)) {
++		expa = exponent(a);
++		expb = exponent(b);
 +
-+static int __init rdc_board_setup(void)
-+{
-+	return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
-+}
++	      valid_subtract:
++		/* Both registers are valid */
 +
-+arch_initcall(rdc_board_setup);
-diff --git a/arch/x86/mach-rdc321x/wdt.c b/arch/x86/mach-rdc321x/wdt.c
-new file mode 100644
-index 0000000..ec5625a
---- /dev/null
-+++ b/arch/x86/mach-rdc321x/wdt.c
-@@ -0,0 +1,275 @@
-+/*
-+ * RDC321x watchdog driver
-+ *
-+ * Copyright (C) 2007 Florian Fainelli <florian at openwrt.org>
-+ *
-+ * This driver is highly inspired from the cpu5_wdt driver
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-+ *
-+ */
++		diff = expa - expb;
 +
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/types.h>
-+#include <linux/errno.h>
-+#include <linux/miscdevice.h>
-+#include <linux/fs.h>
-+#include <linux/init.h>
-+#include <linux/ioport.h>
-+#include <linux/timer.h>
-+#include <linux/completion.h>
-+#include <linux/jiffies.h>
-+#include <linux/platform_device.h>
-+#include <linux/watchdog.h>
-+#include <linux/io.h>
-+#include <linux/uaccess.h>
++		if (!diff) {
++			diff = a->sigh - b->sigh;	/* Works only if ms bits are identical */
++			if (!diff) {
++				diff = a->sigl > b->sigl;
++				if (!diff)
++					diff = -(a->sigl < b->sigl);
++			}
++		}
 +
-+#include <asm/mach-rdc321x/rdc321x_defs.h>
++		switch ((((int)signa) * 2 + signb) / SIGN_NEG) {
++		case 0:	/* P - P */
++		case 3:	/* N - N */
++			if (diff > 0) {
++				/* |a| > |b| */
++				tag =
++				    FPU_u_sub(a, b, dest, control_w, signa,
++					      expa, expb);
++			} else if (diff == 0) {
++				FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
 +
-+#define RDC_WDT_MASK	0x80000000 /* Mask */
-+#define RDC_WDT_EN	0x00800000 /* Enable bit */
-+#define RDC_WDT_WTI	0x00200000 /* Generate CPU reset/NMI/WDT on timeout */
-+#define RDC_WDT_RST	0x00100000 /* Reset bit */
-+#define RDC_WDT_WIF	0x00040000 /* WDT IRQ Flag */
-+#define RDC_WDT_IRT	0x00000100 /* IRQ Routing table */
-+#define RDC_WDT_CNT	0x00000001 /* WDT count */
++				/* sign depends upon rounding mode */
++				setsign(dest, ((control_w & CW_RC) != RC_DOWN)
++					? SIGN_POS : SIGN_NEG);
++				return TAG_Zero;
++			} else {
++				sign = signa ^ SIGN_NEG;
++				tag =
++				    FPU_u_sub(b, a, dest, control_w, sign, expb,
++					      expa);
++			}
++			break;
++		case 1:	/* P - N */
++			tag =
++			    FPU_u_add(a, b, dest, control_w, SIGN_POS, expa,
++				      expb);
++			break;
++		case 2:	/* N - P */
++			tag =
++			    FPU_u_add(a, b, dest, control_w, SIGN_NEG, expa,
++				      expb);
++			break;
+ #ifdef PARANOID
+-	default:
+-	  EXCEPTION(EX_INTERNAL|0x111);
+-	  return -1;
++		default:
++			EXCEPTION(EX_INTERNAL | 0x111);
++			return -1;
+ #endif
++		}
++		if (tag < 0) {
++			setsign(dest, saved_sign);
++			return tag;
++		}
++		FPU_settagi(deststnr, tag);
++		return tag;
+ 	}
+-      if ( tag < 0 )
+-	{
+-	  setsign(dest, saved_sign);
+-	  return tag;
+-	}
+-      FPU_settagi(deststnr, tag);
+-      return tag;
+-    }
+ 
+-  if ( taga == TAG_Special )
+-    taga = FPU_Special(a);
+-  if ( tagb == TAG_Special )
+-    tagb = FPU_Special(b);
++	if (taga == TAG_Special)
++		taga = FPU_Special(a);
++	if (tagb == TAG_Special)
++		tagb = FPU_Special(b);
+ 
+-  if ( ((taga == TAG_Valid) && (tagb == TW_Denormal))
++	if (((taga == TAG_Valid) && (tagb == TW_Denormal))
+ 	    || ((taga == TW_Denormal) && (tagb == TAG_Valid))
+-	    || ((taga == TW_Denormal) && (tagb == TW_Denormal)) )
+-    {
+-      FPU_REG x, y;
++	    || ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
++		FPU_REG x, y;
+ 
+-      if ( denormal_operand() < 0 )
+-	return FPU_Exception;
++		if (denormal_operand() < 0)
++			return FPU_Exception;
 +
-+#define RDC_CLS_TMR	0x80003844 /* Clear timer */
++		FPU_to_exp16(a, &x);
++		FPU_to_exp16(b, &y);
++		a = &x;
++		b = &y;
++		expa = exponent16(a);
++		expb = exponent16(b);
+ 
+-      FPU_to_exp16(a, &x);
+-      FPU_to_exp16(b, &y);
+-      a = &x;
+-      b = &y;
+-      expa = exponent16(a);
+-      expb = exponent16(b);
+-
+-      goto valid_subtract;
+-    }
+-
+-  if ( (taga == TW_NaN) || (tagb == TW_NaN) )
+-    {
+-      FPU_REG const *d1, *d2;
+-      if ( flags & REV )
+-	{
+-	  d1 = b;
+-	  d2 = a;
++		goto valid_subtract;
+ 	}
+-      else
+-	{
+-	  d1 = a;
+-	  d2 = b;
 +
-+#define RDC_WDT_INTERVAL	(HZ/10+1)
++	if ((taga == TW_NaN) || (tagb == TW_NaN)) {
++		FPU_REG const *d1, *d2;
++		if (flags & REV) {
++			d1 = b;
++			d2 = a;
++		} else {
++			d1 = a;
++			d2 = b;
++		}
++		if (flags & LOADED)
++			return real_2op_NaN(b, tagb, deststnr, d1);
++		if (flags & DEST_RM)
++			return real_2op_NaN(a, taga, deststnr, d2);
++		else
++			return real_2op_NaN(b, tagb, deststnr, d2);
+ 	}
+-      if ( flags & LOADED )
+-	return real_2op_NaN(b, tagb, deststnr, d1);
+-      if ( flags & DEST_RM )
+-	return real_2op_NaN(a, taga, deststnr, d2);
+-      else
+-	return real_2op_NaN(b, tagb, deststnr, d2);
+-    }
+-
+-    return add_sub_specials(a, taga, signa, b, tagb, signb ^ SIGN_NEG,
+-			    dest, deststnr, control_w);
+-}
+ 
++	return add_sub_specials(a, taga, signa, b, tagb, signb ^ SIGN_NEG,
++				dest, deststnr, control_w);
++}
+ 
+ static
+ int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa,
+ 		     FPU_REG const *b, u_char tagb, u_char signb,
+-		     FPU_REG *dest, int deststnr, int control_w)
++		     FPU_REG * dest, int deststnr, int control_w)
+ {
+-  if ( ((taga == TW_Denormal) || (tagb == TW_Denormal))
+-       && (denormal_operand() < 0) )
+-    return FPU_Exception;
+-
+-  if (taga == TAG_Zero)
+-    {
+-      if (tagb == TAG_Zero)
+-	{
+-	  /* Both are zero, result will be zero. */
+-	  u_char different_signs = signa ^ signb;
+-
+-	  FPU_copy_to_regi(a, TAG_Zero, deststnr);
+-	  if ( different_signs )
+-	    {
+-	      /* Signs are different. */
+-	      /* Sign of answer depends upon rounding mode. */
+-	      setsign(dest, ((control_w & CW_RC) != RC_DOWN)
+-		      ? SIGN_POS : SIGN_NEG);
+-	    }
+-	  else
+-	    setsign(dest, signa);  /* signa may differ from the sign of a. */
+-	  return TAG_Zero;
+-	}
+-      else
+-	{
+-	  reg_copy(b, dest);
+-	  if ( (tagb == TW_Denormal) && (b->sigh & 0x80000000) )
+-	    {
+-	      /* A pseudoDenormal, convert it. */
+-	      addexponent(dest, 1);
+-	      tagb = TAG_Valid;
+-	    }
+-	  else if ( tagb > TAG_Empty )
+-	    tagb = TAG_Special;
+-	  setsign(dest, signb);  /* signb may differ from the sign of b. */
+-	  FPU_settagi(deststnr, tagb);
+-	  return tagb;
+-	}
+-    }
+-  else if (tagb == TAG_Zero)
+-    {
+-      reg_copy(a, dest);
+-      if ( (taga == TW_Denormal) && (a->sigh & 0x80000000) )
+-	{
+-	  /* A pseudoDenormal */
+-	  addexponent(dest, 1);
+-	  taga = TAG_Valid;
+-	}
+-      else if ( taga > TAG_Empty )
+-	taga = TAG_Special;
+-      setsign(dest, signa);  /* signa may differ from the sign of a. */
+-      FPU_settagi(deststnr, taga);
+-      return taga;
+-    }
+-  else if (taga == TW_Infinity)
+-    {
+-      if ( (tagb != TW_Infinity) || (signa == signb) )
+-	{
+-	  FPU_copy_to_regi(a, TAG_Special, deststnr);
+-	  setsign(dest, signa);  /* signa may differ from the sign of a. */
+-	  return taga;
++	if (((taga == TW_Denormal) || (tagb == TW_Denormal))
++	    && (denormal_operand() < 0))
++		return FPU_Exception;
 +
-+int nowayout = WATCHDOG_NOWAYOUT;
-+module_param(nowayout, int, 0);
-+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
++	if (taga == TAG_Zero) {
++		if (tagb == TAG_Zero) {
++			/* Both are zero, result will be zero. */
++			u_char different_signs = signa ^ signb;
 +
-+static int ticks = 1000;
++			FPU_copy_to_regi(a, TAG_Zero, deststnr);
++			if (different_signs) {
++				/* Signs are different. */
++				/* Sign of answer depends upon rounding mode. */
++				setsign(dest, ((control_w & CW_RC) != RC_DOWN)
++					? SIGN_POS : SIGN_NEG);
++			} else
++				setsign(dest, signa);	/* signa may differ from the sign of a. */
++			return TAG_Zero;
++		} else {
++			reg_copy(b, dest);
++			if ((tagb == TW_Denormal) && (b->sigh & 0x80000000)) {
++				/* A pseudoDenormal, convert it. */
++				addexponent(dest, 1);
++				tagb = TAG_Valid;
++			} else if (tagb > TAG_Empty)
++				tagb = TAG_Special;
++			setsign(dest, signb);	/* signb may differ from the sign of b. */
++			FPU_settagi(deststnr, tagb);
++			return tagb;
++		}
++	} else if (tagb == TAG_Zero) {
++		reg_copy(a, dest);
++		if ((taga == TW_Denormal) && (a->sigh & 0x80000000)) {
++			/* A pseudoDenormal */
++			addexponent(dest, 1);
++			taga = TAG_Valid;
++		} else if (taga > TAG_Empty)
++			taga = TAG_Special;
++		setsign(dest, signa);	/* signa may differ from the sign of a. */
++		FPU_settagi(deststnr, taga);
++		return taga;
++	} else if (taga == TW_Infinity) {
++		if ((tagb != TW_Infinity) || (signa == signb)) {
++			FPU_copy_to_regi(a, TAG_Special, deststnr);
++			setsign(dest, signa);	/* signa may differ from the sign of a. */
++			return taga;
++		}
++		/* Infinity-Infinity is undefined. */
++		return arith_invalid(deststnr);
++	} else if (tagb == TW_Infinity) {
++		FPU_copy_to_regi(b, TAG_Special, deststnr);
++		setsign(dest, signb);	/* signb may differ from the sign of b. */
++		return tagb;
+ 	}
+-      /* Infinity-Infinity is undefined. */
+-      return arith_invalid(deststnr);
+-    }
+-  else if (tagb == TW_Infinity)
+-    {
+-      FPU_copy_to_regi(b, TAG_Special, deststnr);
+-      setsign(dest, signb);  /* signb may differ from the sign of b. */
+-      return tagb;
+-    }
+-
+ #ifdef PARANOID
+-  EXCEPTION(EX_INTERNAL|0x101);
++	EXCEPTION(EX_INTERNAL | 0x101);
+ #endif
+ 
+-  return FPU_Exception;
++	return FPU_Exception;
+ }
+-
+diff --git a/arch/x86/math-emu/reg_compare.c b/arch/x86/math-emu/reg_compare.c
+index f37c5b5..ecce55f 100644
+--- a/arch/x86/math-emu/reg_compare.c
++++ b/arch/x86/math-emu/reg_compare.c
+@@ -20,362 +20,331 @@
+ #include "control_w.h"
+ #include "status_w.h"
+ 
+-
+ static int compare(FPU_REG const *b, int tagb)
+ {
+-  int diff, exp0, expb;
+-  u_char	  	st0_tag;
+-  FPU_REG  	*st0_ptr;
+-  FPU_REG	x, y;
+-  u_char		st0_sign, signb = getsign(b);
+-
+-  st0_ptr = &st(0);
+-  st0_tag = FPU_gettag0();
+-  st0_sign = getsign(st0_ptr);
+-
+-  if ( tagb == TAG_Special )
+-    tagb = FPU_Special(b);
+-  if ( st0_tag == TAG_Special )
+-    st0_tag = FPU_Special(st0_ptr);
+-
+-  if ( ((st0_tag != TAG_Valid) && (st0_tag != TW_Denormal))
+-       || ((tagb != TAG_Valid) && (tagb != TW_Denormal)) )
+-    {
+-      if ( st0_tag == TAG_Zero )
+-	{
+-	  if ( tagb == TAG_Zero ) return COMP_A_eq_B;
+-	  if ( tagb == TAG_Valid )
+-	    return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
+-	  if ( tagb == TW_Denormal )
+-	    return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
+-	    | COMP_Denormal;
+-	}
+-      else if ( tagb == TAG_Zero )
+-	{
+-	  if ( st0_tag == TAG_Valid )
+-	    return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
+-	  if ( st0_tag == TW_Denormal )
+-	    return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
+-	    | COMP_Denormal;
++	int diff, exp0, expb;
++	u_char st0_tag;
++	FPU_REG *st0_ptr;
++	FPU_REG x, y;
++	u_char st0_sign, signb = getsign(b);
 +
-+/* some device data */
++	st0_ptr = &st(0);
++	st0_tag = FPU_gettag0();
++	st0_sign = getsign(st0_ptr);
 +
-+static struct {
-+	struct completion stop;
-+	volatile int running;
-+	struct timer_list timer;
-+	volatile int queue;
-+	int default_ticks;
-+	unsigned long inuse;
-+} rdc321x_wdt_device;
++	if (tagb == TAG_Special)
++		tagb = FPU_Special(b);
++	if (st0_tag == TAG_Special)
++		st0_tag = FPU_Special(st0_ptr);
 +
-+/* generic helper functions */
++	if (((st0_tag != TAG_Valid) && (st0_tag != TW_Denormal))
++	    || ((tagb != TAG_Valid) && (tagb != TW_Denormal))) {
++		if (st0_tag == TAG_Zero) {
++			if (tagb == TAG_Zero)
++				return COMP_A_eq_B;
++			if (tagb == TAG_Valid)
++				return ((signb ==
++					 SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
++			if (tagb == TW_Denormal)
++				return ((signb ==
++					 SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
++				    | COMP_Denormal;
++		} else if (tagb == TAG_Zero) {
++			if (st0_tag == TAG_Valid)
++				return ((st0_sign ==
++					 SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
++			if (st0_tag == TW_Denormal)
++				return ((st0_sign ==
++					 SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
++				    | COMP_Denormal;
++		}
 +
-+static void rdc321x_wdt_trigger(unsigned long unused)
-+{
-+	if (rdc321x_wdt_device.running)
-+		ticks--;
++		if (st0_tag == TW_Infinity) {
++			if ((tagb == TAG_Valid) || (tagb == TAG_Zero))
++				return ((st0_sign ==
++					 SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
++			else if (tagb == TW_Denormal)
++				return ((st0_sign ==
++					 SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
++				    | COMP_Denormal;
++			else if (tagb == TW_Infinity) {
++				/* The 80486 book says that infinities can be equal! */
++				return (st0_sign == signb) ? COMP_A_eq_B :
++				    ((st0_sign ==
++				      SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
++			}
++			/* Fall through to the NaN code */
++		} else if (tagb == TW_Infinity) {
++			if ((st0_tag == TAG_Valid) || (st0_tag == TAG_Zero))
++				return ((signb ==
++					 SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
++			if (st0_tag == TW_Denormal)
++				return ((signb ==
++					 SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
++				    | COMP_Denormal;
++			/* Fall through to the NaN code */
++		}
 +
-+	/* keep watchdog alive */
-+	outl(RDC_WDT_EN|inl(RDC3210_CFGREG_DATA), RDC3210_CFGREG_DATA);
++		/* The only possibility now should be that one of the arguments
++		   is a NaN */
++		if ((st0_tag == TW_NaN) || (tagb == TW_NaN)) {
++			int signalling = 0, unsupported = 0;
++			if (st0_tag == TW_NaN) {
++				signalling =
++				    (st0_ptr->sigh & 0xc0000000) == 0x80000000;
++				unsupported = !((exponent(st0_ptr) == EXP_OVER)
++						&& (st0_ptr->
++						    sigh & 0x80000000));
++			}
++			if (tagb == TW_NaN) {
++				signalling |=
++				    (b->sigh & 0xc0000000) == 0x80000000;
++				unsupported |= !((exponent(b) == EXP_OVER)
++						 && (b->sigh & 0x80000000));
++			}
++			if (signalling || unsupported)
++				return COMP_No_Comp | COMP_SNaN | COMP_NaN;
++			else
++				/* Neither is a signaling NaN */
++				return COMP_No_Comp | COMP_NaN;
++		}
 +
-+	/* requeue?? */
-+	if (rdc321x_wdt_device.queue && ticks)
-+		mod_timer(&rdc321x_wdt_device.timer,
-+				jiffies + RDC_WDT_INTERVAL);
-+	else {
-+		/* ticks doesn't matter anyway */
-+		complete(&rdc321x_wdt_device.stop);
++		EXCEPTION(EX_Invalid);
+ 	}
+ 
+-      if ( st0_tag == TW_Infinity )
+-	{
+-	  if ( (tagb == TAG_Valid) || (tagb == TAG_Zero) )
+-	    return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
+-	  else if ( tagb == TW_Denormal )
+-	    return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
+-	      | COMP_Denormal;
+-	  else if ( tagb == TW_Infinity )
+-	    {
+-	      /* The 80486 book says that infinities can be equal! */
+-	      return (st0_sign == signb) ? COMP_A_eq_B :
+-		((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
+-	    }
+-	  /* Fall through to the NaN code */
+-	}
+-      else if ( tagb == TW_Infinity )
+-	{
+-	  if ( (st0_tag == TAG_Valid) || (st0_tag == TAG_Zero) )
+-	    return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
+-	  if ( st0_tag == TW_Denormal )
+-	    return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
+-		| COMP_Denormal;
+-	  /* Fall through to the NaN code */
++	if (st0_sign != signb) {
++		return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
++		    | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
++		       COMP_Denormal : 0);
+ 	}
+ 
+-      /* The only possibility now should be that one of the arguments
+-	 is a NaN */
+-      if ( (st0_tag == TW_NaN) || (tagb == TW_NaN) )
+-	{
+-	  int signalling = 0, unsupported = 0;
+-	  if ( st0_tag == TW_NaN )
+-	    {
+-	      signalling = (st0_ptr->sigh & 0xc0000000) == 0x80000000;
+-	      unsupported = !((exponent(st0_ptr) == EXP_OVER)
+-			      && (st0_ptr->sigh & 0x80000000));
+-	    }
+-	  if ( tagb == TW_NaN )
+-	    {
+-	      signalling |= (b->sigh & 0xc0000000) == 0x80000000;
+-	      unsupported |= !((exponent(b) == EXP_OVER)
+-			       && (b->sigh & 0x80000000));
+-	    }
+-	  if ( signalling || unsupported )
+-	    return COMP_No_Comp | COMP_SNaN | COMP_NaN;
+-	  else
+-	    /* Neither is a signaling NaN */
+-	    return COMP_No_Comp | COMP_NaN;
++	if ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) {
++		FPU_to_exp16(st0_ptr, &x);
++		FPU_to_exp16(b, &y);
++		st0_ptr = &x;
++		b = &y;
++		exp0 = exponent16(st0_ptr);
++		expb = exponent16(b);
++	} else {
++		exp0 = exponent(st0_ptr);
++		expb = exponent(b);
+ 	}
+-      
+-      EXCEPTION(EX_Invalid);
+-    }
+-  
+-  if (st0_sign != signb)
+-    {
+-      return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
+-	| ( ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
+-	    COMP_Denormal : 0);
+-    }
+-
+-  if ( (st0_tag == TW_Denormal) || (tagb == TW_Denormal) )
+-    {
+-      FPU_to_exp16(st0_ptr, &x);
+-      FPU_to_exp16(b, &y);
+-      st0_ptr = &x;
+-      b = &y;
+-      exp0 = exponent16(st0_ptr);
+-      expb = exponent16(b);
+-    }
+-  else
+-    {
+-      exp0 = exponent(st0_ptr);
+-      expb = exponent(b);
+-    }
+ 
+ #ifdef PARANOID
+-  if (!(st0_ptr->sigh & 0x80000000)) EXCEPTION(EX_Invalid);
+-  if (!(b->sigh & 0x80000000)) EXCEPTION(EX_Invalid);
++	if (!(st0_ptr->sigh & 0x80000000))
++		EXCEPTION(EX_Invalid);
++	if (!(b->sigh & 0x80000000))
++		EXCEPTION(EX_Invalid);
+ #endif /* PARANOID */
+ 
+-  diff = exp0 - expb;
+-  if ( diff == 0 )
+-    {
+-      diff = st0_ptr->sigh - b->sigh;  /* Works only if ms bits are
+-					      identical */
+-      if ( diff == 0 )
+-	{
+-	diff = st0_ptr->sigl > b->sigl;
+-	if ( diff == 0 )
+-	  diff = -(st0_ptr->sigl < b->sigl);
++	diff = exp0 - expb;
++	if (diff == 0) {
++		diff = st0_ptr->sigh - b->sigh;	/* Works only if ms bits are
++						   identical */
++		if (diff == 0) {
++			diff = st0_ptr->sigl > b->sigl;
++			if (diff == 0)
++				diff = -(st0_ptr->sigl < b->sigl);
++		}
+ 	}
+-    }
+-
+-  if ( diff > 0 )
+-    {
+-      return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
+-	| ( ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
+-	    COMP_Denormal : 0);
+-    }
+-  if ( diff < 0 )
+-    {
+-      return ((st0_sign == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
+-	| ( ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
+-	    COMP_Denormal : 0);
+-    }
+-
+-  return COMP_A_eq_B
+-    | ( ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
+-	COMP_Denormal : 0);
+ 
+-}
++	if (diff > 0) {
++		return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
++		    | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
++		       COMP_Denormal : 0);
 +	}
-+
-+}
-+
-+static void rdc321x_wdt_reset(void)
-+{
-+	ticks = rdc321x_wdt_device.default_ticks;
-+}
-+
-+static void rdc321x_wdt_start(void)
-+{
-+	if (!rdc321x_wdt_device.queue) {
-+		rdc321x_wdt_device.queue = 1;
-+
-+		/* Clear the timer */
-+		outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR);
-+
-+		/* Enable watchdog and set the timeout to 81.92 us */
-+		outl(RDC_WDT_EN|RDC_WDT_CNT, RDC3210_CFGREG_DATA);
-+
-+		mod_timer(&rdc321x_wdt_device.timer,
-+				jiffies + RDC_WDT_INTERVAL);
++	if (diff < 0) {
++		return ((st0_sign == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
++		    | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
++		       COMP_Denormal : 0);
 +	}
+ 
++	return COMP_A_eq_B
++	    | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
++	       COMP_Denormal : 0);
 +
-+	/* if process dies, counter is not decremented */
-+	rdc321x_wdt_device.running++;
-+}
-+
-+static int rdc321x_wdt_stop(void)
-+{
-+	if (rdc321x_wdt_device.running)
-+		rdc321x_wdt_device.running = 0;
-+
-+	ticks = rdc321x_wdt_device.default_ticks;
-+
-+	return -EIO;
-+}
-+
-+/* filesystem operations */
-+
-+static int rdc321x_wdt_open(struct inode *inode, struct file *file)
-+{
-+	if (test_and_set_bit(0, &rdc321x_wdt_device.inuse))
-+		return -EBUSY;
-+
-+	return nonseekable_open(inode, file);
-+}
-+
-+static int rdc321x_wdt_release(struct inode *inode, struct file *file)
-+{
-+	clear_bit(0, &rdc321x_wdt_device.inuse);
-+	return 0;
 +}
+ 
+ /* This function requires that st(0) is not empty */
+ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
+ {
+-  int f = 0, c;
+-
+-  c = compare(loaded_data, loaded_tag);
+-
+-  if (c & COMP_NaN)
+-    {
+-      EXCEPTION(EX_Invalid);
+-      f = SW_C3 | SW_C2 | SW_C0;
+-    }
+-  else
+-    switch (c & 7)
+-      {
+-      case COMP_A_lt_B:
+-	f = SW_C0;
+-	break;
+-      case COMP_A_eq_B:
+-	f = SW_C3;
+-	break;
+-      case COMP_A_gt_B:
+-	f = 0;
+-	break;
+-      case COMP_No_Comp:
+-	f = SW_C3 | SW_C2 | SW_C0;
+-	break;
++	int f = 0, c;
 +
-+static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file,
-+				unsigned int cmd, unsigned long arg)
-+{
-+	void __user *argp = (void __user *)arg;
-+	unsigned int value;
-+	static struct watchdog_info ident = {
-+		.options = WDIOF_CARDRESET,
-+		.identity = "RDC321x WDT",
-+	};
++	c = compare(loaded_data, loaded_tag);
 +
-+	switch (cmd) {
-+	case WDIOC_KEEPALIVE:
-+		rdc321x_wdt_reset();
-+		break;
-+	case WDIOC_GETSTATUS:
-+		/* Read the value from the DATA register */
-+		value = inl(RDC3210_CFGREG_DATA);
-+		if (copy_to_user(argp, &value, sizeof(int)))
-+			return -EFAULT;
-+		break;
-+	case WDIOC_GETSUPPORT:
-+		if (copy_to_user(argp, &ident, sizeof(ident)))
-+			return -EFAULT;
-+		break;
-+	case WDIOC_SETOPTIONS:
-+		if (copy_from_user(&value, argp, sizeof(int)))
-+			return -EFAULT;
-+		switch (value) {
-+		case WDIOS_ENABLECARD:
-+			rdc321x_wdt_start();
++	if (c & COMP_NaN) {
++		EXCEPTION(EX_Invalid);
++		f = SW_C3 | SW_C2 | SW_C0;
++	} else
++		switch (c & 7) {
++		case COMP_A_lt_B:
++			f = SW_C0;
 +			break;
-+		case WDIOS_DISABLECARD:
-+			return rdc321x_wdt_stop();
++		case COMP_A_eq_B:
++			f = SW_C3;
++			break;
++		case COMP_A_gt_B:
++			f = 0;
++			break;
++		case COMP_No_Comp:
++			f = SW_C3 | SW_C2 | SW_C0;
++			break;
+ #ifdef PARANOID
+-      default:
+-	EXCEPTION(EX_INTERNAL|0x121);
+-	f = SW_C3 | SW_C2 | SW_C0;
+-	break;
 +		default:
-+			return -EINVAL;
++			EXCEPTION(EX_INTERNAL | 0x121);
++			f = SW_C3 | SW_C2 | SW_C0;
++			break;
+ #endif /* PARANOID */
+-      }
+-  setcc(f);
+-  if (c & COMP_Denormal)
+-    {
+-      return denormal_operand() < 0;
+-    }
+-  return 0;
 +		}
-+		break;
-+	default:
-+		return -ENOTTY;
++	setcc(f);
++	if (c & COMP_Denormal) {
++		return denormal_operand() < 0;
 +	}
 +	return 0;
-+}
-+
-+static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf,
-+				size_t count, loff_t *ppos)
-+{
-+	if (!count)
-+		return -EIO;
-+
-+	rdc321x_wdt_reset();
-+
-+	return count;
-+}
-+
-+static const struct file_operations rdc321x_wdt_fops = {
-+	.owner		= THIS_MODULE,
-+	.llseek		= no_llseek,
-+	.ioctl		= rdc321x_wdt_ioctl,
-+	.open		= rdc321x_wdt_open,
-+	.write		= rdc321x_wdt_write,
-+	.release	= rdc321x_wdt_release,
-+};
-+
-+static struct miscdevice rdc321x_wdt_misc = {
-+	.minor	= WATCHDOG_MINOR,
-+	.name	= "watchdog",
-+	.fops	= &rdc321x_wdt_fops,
-+};
-+
-+static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
-+{
-+	int err;
+ }
+ 
+-
+ static int compare_st_st(int nr)
+ {
+-  int f = 0, c;
+-  FPU_REG *st_ptr;
+-
+-  if ( !NOT_EMPTY(0) || !NOT_EMPTY(nr) )
+-    {
+-      setcc(SW_C3 | SW_C2 | SW_C0);
+-      /* Stack fault */
+-      EXCEPTION(EX_StackUnder);
+-      return !(control_word & CW_Invalid);
+-    }
+-
+-  st_ptr = &st(nr);
+-  c = compare(st_ptr, FPU_gettagi(nr));
+-  if (c & COMP_NaN)
+-    {
+-      setcc(SW_C3 | SW_C2 | SW_C0);
+-      EXCEPTION(EX_Invalid);
+-      return !(control_word & CW_Invalid);
+-    }
+-  else
+-    switch (c & 7)
+-      {
+-      case COMP_A_lt_B:
+-	f = SW_C0;
+-	break;
+-      case COMP_A_eq_B:
+-	f = SW_C3;
+-	break;
+-      case COMP_A_gt_B:
+-	f = 0;
+-	break;
+-      case COMP_No_Comp:
+-	f = SW_C3 | SW_C2 | SW_C0;
+-	break;
++	int f = 0, c;
++	FPU_REG *st_ptr;
 +
-+	err = misc_register(&rdc321x_wdt_misc);
-+	if (err < 0) {
-+		printk(KERN_ERR PFX "watchdog misc_register failed\n");
-+		return err;
++	if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
++		setcc(SW_C3 | SW_C2 | SW_C0);
++		/* Stack fault */
++		EXCEPTION(EX_StackUnder);
++		return !(control_word & CW_Invalid);
 +	}
 +
-+	/* Reset the watchdog */
-+	outl(RDC_WDT_RST, RDC3210_CFGREG_DATA);
-+
-+	init_completion(&rdc321x_wdt_device.stop);
-+	rdc321x_wdt_device.queue = 0;
-+
-+	clear_bit(0, &rdc321x_wdt_device.inuse);
-+
-+	setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
-+
-+	rdc321x_wdt_device.default_ticks = ticks;
-+
-+	printk(KERN_INFO PFX "watchdog init success\n");
-+
-+	return 0;
-+}
-+
-+static int rdc321x_wdt_remove(struct platform_device *pdev)
-+{
-+	if (rdc321x_wdt_device.queue) {
-+		rdc321x_wdt_device.queue = 0;
-+		wait_for_completion(&rdc321x_wdt_device.stop);
++	st_ptr = &st(nr);
++	c = compare(st_ptr, FPU_gettagi(nr));
++	if (c & COMP_NaN) {
++		setcc(SW_C3 | SW_C2 | SW_C0);
++		EXCEPTION(EX_Invalid);
++		return !(control_word & CW_Invalid);
++	} else
++		switch (c & 7) {
++		case COMP_A_lt_B:
++			f = SW_C0;
++			break;
++		case COMP_A_eq_B:
++			f = SW_C3;
++			break;
++		case COMP_A_gt_B:
++			f = 0;
++			break;
++		case COMP_No_Comp:
++			f = SW_C3 | SW_C2 | SW_C0;
++			break;
+ #ifdef PARANOID
+-      default:
+-	EXCEPTION(EX_INTERNAL|0x122);
+-	f = SW_C3 | SW_C2 | SW_C0;
+-	break;
++		default:
++			EXCEPTION(EX_INTERNAL | 0x122);
++			f = SW_C3 | SW_C2 | SW_C0;
++			break;
+ #endif /* PARANOID */
+-      }
+-  setcc(f);
+-  if (c & COMP_Denormal)
+-    {
+-      return denormal_operand() < 0;
+-    }
+-  return 0;
++		}
++	setcc(f);
++	if (c & COMP_Denormal) {
++		return denormal_operand() < 0;
 +	}
-+
-+	misc_deregister(&rdc321x_wdt_misc);
-+
 +	return 0;
-+}
-+
-+static struct platform_driver rdc321x_wdt_driver = {
-+	.probe = rdc321x_wdt_probe,
-+	.remove = rdc321x_wdt_remove,
-+	.driver = {
-+		.owner = THIS_MODULE,
-+		.name = "rdc321x-wdt",
-+	},
-+};
-+
-+static int __init rdc321x_wdt_init(void)
-+{
-+	return platform_driver_register(&rdc321x_wdt_driver);
-+}
-+
-+static void __exit rdc321x_wdt_exit(void)
-+{
-+	platform_driver_unregister(&rdc321x_wdt_driver);
-+}
-+
-+module_init(rdc321x_wdt_init);
-+module_exit(rdc321x_wdt_exit);
-+
-+MODULE_AUTHOR("Florian Fainelli <florian at openwrt.org>");
-+MODULE_DESCRIPTION("RDC321x watchdog driver");
-+MODULE_LICENSE("GPL");
-+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-diff --git a/arch/x86/mach-visws/mpparse.c b/arch/x86/mach-visws/mpparse.c
-index f3c74fa..2a8456a 100644
---- a/arch/x86/mach-visws/mpparse.c
-+++ b/arch/x86/mach-visws/mpparse.c
-@@ -36,19 +36,19 @@ unsigned int __initdata maxcpus = NR_CPUS;
+ }
  
- static void __init MP_processor_info (struct mpc_config_processor *m)
+-
+ static int compare_u_st_st(int nr)
  {
-- 	int ver, logical_apicid;
-+	int ver, logical_apicid;
- 	physid_mask_t apic_cpus;
-- 	
+-  int f = 0, c;
+-  FPU_REG *st_ptr;
+-
+-  if ( !NOT_EMPTY(0) || !NOT_EMPTY(nr) )
+-    {
+-      setcc(SW_C3 | SW_C2 | SW_C0);
+-      /* Stack fault */
+-      EXCEPTION(EX_StackUnder);
+-      return !(control_word & CW_Invalid);
+-    }
+-
+-  st_ptr = &st(nr);
+-  c = compare(st_ptr, FPU_gettagi(nr));
+-  if (c & COMP_NaN)
+-    {
+-      setcc(SW_C3 | SW_C2 | SW_C0);
+-      if (c & COMP_SNaN)       /* This is the only difference between
+-				  un-ordered and ordinary comparisons */
+-	{
+-	  EXCEPTION(EX_Invalid);
+-	  return !(control_word & CW_Invalid);
++	int f = 0, c;
++	FPU_REG *st_ptr;
 +
- 	if (!(m->mpc_cpuflag & CPU_ENABLED))
- 		return;
++	if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
++		setcc(SW_C3 | SW_C2 | SW_C0);
++		/* Stack fault */
++		EXCEPTION(EX_StackUnder);
++		return !(control_word & CW_Invalid);
+ 	}
+-      return 0;
+-    }
+-  else
+-    switch (c & 7)
+-      {
+-      case COMP_A_lt_B:
+-	f = SW_C0;
+-	break;
+-      case COMP_A_eq_B:
+-	f = SW_C3;
+-	break;
+-      case COMP_A_gt_B:
+-	f = 0;
+-	break;
+-      case COMP_No_Comp:
+-	f = SW_C3 | SW_C2 | SW_C0;
+-	break;
++
++	st_ptr = &st(nr);
++	c = compare(st_ptr, FPU_gettagi(nr));
++	if (c & COMP_NaN) {
++		setcc(SW_C3 | SW_C2 | SW_C0);
++		if (c & COMP_SNaN) {	/* This is the only difference between
++					   un-ordered and ordinary comparisons */
++			EXCEPTION(EX_Invalid);
++			return !(control_word & CW_Invalid);
++		}
++		return 0;
++	} else
++		switch (c & 7) {
++		case COMP_A_lt_B:
++			f = SW_C0;
++			break;
++		case COMP_A_eq_B:
++			f = SW_C3;
++			break;
++		case COMP_A_gt_B:
++			f = 0;
++			break;
++		case COMP_No_Comp:
++			f = SW_C3 | SW_C2 | SW_C0;
++			break;
+ #ifdef PARANOID
+-      default:
+-	EXCEPTION(EX_INTERNAL|0x123);
+-	f = SW_C3 | SW_C2 | SW_C0;
+-	break;
+-#endif /* PARANOID */ 
+-      }
+-  setcc(f);
+-  if (c & COMP_Denormal)
+-    {
+-      return denormal_operand() < 0;
+-    }
+-  return 0;
++		default:
++			EXCEPTION(EX_INTERNAL | 0x123);
++			f = SW_C3 | SW_C2 | SW_C0;
++			break;
++#endif /* PARANOID */
++		}
++	setcc(f);
++	if (c & COMP_Denormal) {
++		return denormal_operand() < 0;
++	}
++	return 0;
+ }
  
- 	logical_apicid = m->mpc_apicid;
--	printk(KERN_INFO "%sCPU #%d %ld:%ld APIC version %d\n",
--		m->mpc_cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "",
--		m->mpc_apicid,
--		(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
--		(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
--		m->mpc_apicver);
-+	printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n",
-+	       m->mpc_cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "",
-+	       m->mpc_apicid,
-+	       (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
-+	       (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
-+	       m->mpc_apicver);
+ /*---------------------------------------------------------------------------*/
  
- 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR)
- 		boot_cpu_physical_apicid = m->mpc_apicid;
-diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
-index 3bef977..5ae5466 100644
---- a/arch/x86/mach-voyager/setup.c
-+++ b/arch/x86/mach-voyager/setup.c
-@@ -37,14 +37,14 @@ void __init pre_setup_arch_hook(void)
+ void fcom_st(void)
  {
- 	/* Voyagers run their CPUs from independent clocks, so disable
- 	 * the TSC code because we can't sync them */
--	tsc_disable = 1;
-+	setup_clear_cpu_cap(X86_FEATURE_TSC);
+-  /* fcom st(i) */
+-  compare_st_st(FPU_rm);
++	/* fcom st(i) */
++	compare_st_st(FPU_rm);
  }
  
- void __init trap_init_hook(void)
+-
+ void fcompst(void)
  {
+-  /* fcomp st(i) */
+-  if ( !compare_st_st(FPU_rm) )
+-    FPU_pop();
++	/* fcomp st(i) */
++	if (!compare_st_st(FPU_rm))
++		FPU_pop();
  }
  
--static struct irqaction irq0  = {
-+static struct irqaction irq0 = {
- 	.handler = timer_interrupt,
- 	.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
- 	.mask = CPU_MASK_NONE,
-@@ -59,44 +59,47 @@ void __init time_init_hook(void)
+-
+ void fcompp(void)
+ {
+-  /* fcompp */
+-  if (FPU_rm != 1)
+-    {
+-      FPU_illegal();
+-      return;
+-    }
+-  if ( !compare_st_st(1) )
+-      poppop();
++	/* fcompp */
++	if (FPU_rm != 1) {
++		FPU_illegal();
++		return;
++	}
++	if (!compare_st_st(1))
++		poppop();
+ }
  
- /* Hook for machine specific memory setup. */
+-
+ void fucom_(void)
+ {
+-  /* fucom st(i) */
+-  compare_u_st_st(FPU_rm);
++	/* fucom st(i) */
++	compare_u_st_st(FPU_rm);
  
--char * __init machine_specific_memory_setup(void)
-+char *__init machine_specific_memory_setup(void)
+ }
+ 
+-
+ void fucomp(void)
  {
- 	char *who;
+-  /* fucomp st(i) */
+-  if ( !compare_u_st_st(FPU_rm) )
+-    FPU_pop();
++	/* fucomp st(i) */
++	if (!compare_u_st_st(FPU_rm))
++		FPU_pop();
+ }
  
- 	who = "NOT VOYAGER";
+-
+ void fucompp(void)
+ {
+-  /* fucompp */
+-  if (FPU_rm == 1)
+-    {
+-      if ( !compare_u_st_st(1) )
+-	poppop();
+-    }
+-  else
+-    FPU_illegal();
++	/* fucompp */
++	if (FPU_rm == 1) {
++		if (!compare_u_st_st(1))
++			poppop();
++	} else
++		FPU_illegal();
+ }
+diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c
+index a850158..04869e6 100644
+--- a/arch/x86/math-emu/reg_constant.c
++++ b/arch/x86/math-emu/reg_constant.c
+@@ -16,29 +16,28 @@
+ #include "reg_constant.h"
+ #include "control_w.h"
  
--	if(voyager_level == 5) {
-+	if (voyager_level == 5) {
- 		__u32 addr, length;
- 		int i;
+-
+ #define MAKE_REG(s,e,l,h) { l, h, \
+                             ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
  
- 		who = "Voyager-SUS";
+-FPU_REG const CONST_1    = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
++FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
+ #if 0
+-FPU_REG const CONST_2    = MAKE_REG(POS, 1, 0x00000000, 0x80000000);
++FPU_REG const CONST_2 = MAKE_REG(POS, 1, 0x00000000, 0x80000000);
+ FPU_REG const CONST_HALF = MAKE_REG(POS, -1, 0x00000000, 0x80000000);
+-#endif  /*  0  */
+-static FPU_REG const CONST_L2T  = MAKE_REG(POS, 1, 0xcd1b8afe, 0xd49a784b);
+-static FPU_REG const CONST_L2E  = MAKE_REG(POS, 0, 0x5c17f0bc, 0xb8aa3b29);
+-FPU_REG const CONST_PI   = MAKE_REG(POS, 1, 0x2168c235, 0xc90fdaa2);
+-FPU_REG const CONST_PI2  = MAKE_REG(POS, 0, 0x2168c235, 0xc90fdaa2);
+-FPU_REG const CONST_PI4  = MAKE_REG(POS, -1, 0x2168c235, 0xc90fdaa2);
+-static FPU_REG const CONST_LG2  = MAKE_REG(POS, -2, 0xfbcff799, 0x9a209a84);
+-static FPU_REG const CONST_LN2  = MAKE_REG(POS, -1, 0xd1cf79ac, 0xb17217f7);
++#endif /*  0  */
++static FPU_REG const CONST_L2T = MAKE_REG(POS, 1, 0xcd1b8afe, 0xd49a784b);
++static FPU_REG const CONST_L2E = MAKE_REG(POS, 0, 0x5c17f0bc, 0xb8aa3b29);
++FPU_REG const CONST_PI = MAKE_REG(POS, 1, 0x2168c235, 0xc90fdaa2);
++FPU_REG const CONST_PI2 = MAKE_REG(POS, 0, 0x2168c235, 0xc90fdaa2);
++FPU_REG const CONST_PI4 = MAKE_REG(POS, -1, 0x2168c235, 0xc90fdaa2);
++static FPU_REG const CONST_LG2 = MAKE_REG(POS, -2, 0xfbcff799, 0x9a209a84);
++static FPU_REG const CONST_LN2 = MAKE_REG(POS, -1, 0xd1cf79ac, 0xb17217f7);
  
- 		e820.nr_map = 0;
--		for(i=0; voyager_memory_detect(i, &addr, &length); i++) {
-+		for (i = 0; voyager_memory_detect(i, &addr, &length); i++) {
- 			add_memory_region(addr, length, E820_RAM);
- 		}
- 		return who;
--	} else if(voyager_level == 4) {
-+	} else if (voyager_level == 4) {
- 		__u32 tom;
--		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8;
-+		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
- 		/* select the DINO config space */
- 		outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT);
- 		/* Read DINO top of memory register */
- 		tom = ((inb(catbase + 0x4) & 0xf0) << 16)
--			+ ((inb(catbase + 0x5) & 0x7f) << 24);
-+		    + ((inb(catbase + 0x5) & 0x7f) << 24);
+ /* Extra bits to take pi/2 to more than 128 bits precision. */
+ FPU_REG const CONST_PI2extra = MAKE_REG(NEG, -66,
+-					 0xfc8f8cbb, 0xece675d1);
++					0xfc8f8cbb, 0xece675d1);
  
--		if(inb(catbase) != VOYAGER_DINO) {
--			printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
--			tom = (boot_params.screen_info.ext_mem_k)<<10;
-+		if (inb(catbase) != VOYAGER_DINO) {
-+			printk(KERN_ERR
-+			       "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
-+			tom = (boot_params.screen_info.ext_mem_k) << 10;
- 		}
- 		who = "Voyager-TOM";
- 		add_memory_region(0, 0x9f000, E820_RAM);
- 		/* map from 1M to top of memory */
--		add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM);
-+		add_memory_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024,
-+				  E820_RAM);
- 		/* FIXME: Should check the ASICs to see if I need to
- 		 * take out the 8M window.  Just do it at the moment
- 		 * */
--		add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED);
-+		add_memory_region(8 * 1024 * 1024, 8 * 1024 * 1024,
-+				  E820_RESERVED);
- 		return who;
- 	}
+ /* Only the sign (and tag) is used in internal zeroes */
+-FPU_REG const CONST_Z    = MAKE_REG(POS, EXP_UNDER, 0x0, 0x0);
++FPU_REG const CONST_Z = MAKE_REG(POS, EXP_UNDER, 0x0, 0x0);
  
-@@ -114,8 +117,7 @@ char * __init machine_specific_memory_setup(void)
- 		unsigned long mem_size;
+ /* Only the sign and significand (and tag) are used in internal NaNs */
+ /* The 80486 never generates one of these 
+@@ -48,24 +47,22 @@ FPU_REG const CONST_SNAN = MAKE_REG(POS, EXP_OVER, 0x00000001, 0x80000000);
+ FPU_REG const CONST_QNaN = MAKE_REG(NEG, EXP_OVER, 0x00000000, 0xC0000000);
  
- 		/* compare results from other methods and take the greater */
--		if (boot_params.alt_mem_k
--		    < boot_params.screen_info.ext_mem_k) {
-+		if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) {
- 			mem_size = boot_params.screen_info.ext_mem_k;
- 			who = "BIOS-88";
- 		} else {
-@@ -126,6 +128,6 @@ char * __init machine_specific_memory_setup(void)
- 		e820.nr_map = 0;
- 		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
- 		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
--  	}
+ /* Only the sign (and tag) is used in internal infinities */
+-FPU_REG const CONST_INF  = MAKE_REG(POS, EXP_OVER, 0x00000000, 0x80000000);
+-
++FPU_REG const CONST_INF = MAKE_REG(POS, EXP_OVER, 0x00000000, 0x80000000);
+ 
+ static void fld_const(FPU_REG const *c, int adj, u_char tag)
+ {
+-  FPU_REG *st_new_ptr;
+-
+-  if ( STACK_OVERFLOW )
+-    {
+-      FPU_stack_overflow();
+-      return;
+-    }
+-  push();
+-  reg_copy(c, st_new_ptr);
+-  st_new_ptr->sigl += adj;  /* For all our fldxxx constants, we don't need to
+-			       borrow or carry. */
+-  FPU_settag0(tag);
+-  clear_C1();
++	FPU_REG *st_new_ptr;
++
++	if (STACK_OVERFLOW) {
++		FPU_stack_overflow();
++		return;
 +	}
- 	return who;
++	push();
++	reg_copy(c, st_new_ptr);
++	st_new_ptr->sigl += adj;	/* For all our fldxxx constants, we don't need to
++					   borrow or carry. */
++	FPU_settag0(tag);
++	clear_C1();
  }
-diff --git a/arch/x86/mach-voyager/voyager_basic.c b/arch/x86/mach-voyager/voyager_basic.c
-index 9b77b39..6a949e4 100644
---- a/arch/x86/mach-voyager/voyager_basic.c
-+++ b/arch/x86/mach-voyager/voyager_basic.c
-@@ -35,7 +35,7 @@
- /*
-  * Power off function, if any
-  */
--void (*pm_power_off)(void);
-+void (*pm_power_off) (void);
- EXPORT_SYMBOL(pm_power_off);
  
- int voyager_level = 0;
-@@ -43,39 +43,38 @@ int voyager_level = 0;
- struct voyager_SUS *voyager_SUS = NULL;
+ /* A fast way to find out whether x is one of RC_DOWN or RC_CHOP
+@@ -75,46 +72,46 @@ static void fld_const(FPU_REG const *c, int adj, u_char tag)
  
- #ifdef CONFIG_SMP
--static void
--voyager_dump(int dummy1, struct tty_struct *dummy3)
-+static void voyager_dump(int dummy1, struct tty_struct *dummy3)
+ static void fld1(int rc)
  {
- 	/* get here via a sysrq */
- 	voyager_smp_dump();
+-  fld_const(&CONST_1, 0, TAG_Valid);
++	fld_const(&CONST_1, 0, TAG_Valid);
  }
  
- static struct sysrq_key_op sysrq_voyager_dump_op = {
--	.handler	= voyager_dump,
--	.help_msg	= "Voyager",
--	.action_msg	= "Dump Voyager Status",
-+	.handler = voyager_dump,
-+	.help_msg = "Voyager",
-+	.action_msg = "Dump Voyager Status",
- };
- #endif
- 
--void
--voyager_detect(struct voyager_bios_info *bios)
-+void voyager_detect(struct voyager_bios_info *bios)
+ static void fldl2t(int rc)
  {
--	if(bios->len != 0xff) {
--		int class = (bios->class_1 << 8) 
--			| (bios->class_2 & 0xff);
-+	if (bios->len != 0xff) {
-+		int class = (bios->class_1 << 8)
-+		    | (bios->class_2 & 0xff);
- 
- 		printk("Voyager System detected.\n"
- 		       "        Class %x, Revision %d.%d\n",
- 		       class, bios->major, bios->minor);
--		if(class == VOYAGER_LEVEL4) 
-+		if (class == VOYAGER_LEVEL4)
- 			voyager_level = 4;
--		else if(class < VOYAGER_LEVEL5_AND_ABOVE)
-+		else if (class < VOYAGER_LEVEL5_AND_ABOVE)
- 			voyager_level = 3;
- 		else
- 			voyager_level = 5;
- 		printk("        Architecture Level %d\n", voyager_level);
--		if(voyager_level < 4)
--			printk("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n");
-+		if (voyager_level < 4)
-+			printk
-+			    ("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n");
- 		/* install the power off handler */
- 		pm_power_off = voyager_power_off;
- #ifdef CONFIG_SMP
-@@ -86,15 +85,13 @@ voyager_detect(struct voyager_bios_info *bios)
- 	}
+-  fld_const(&CONST_L2T, (rc == RC_UP) ? 1 : 0, TAG_Valid);
++	fld_const(&CONST_L2T, (rc == RC_UP) ? 1 : 0, TAG_Valid);
  }
  
--void
--voyager_system_interrupt(int cpl, void *dev_id)
-+void voyager_system_interrupt(int cpl, void *dev_id)
+ static void fldl2e(int rc)
  {
- 	printk("Voyager: detected system interrupt\n");
+-  fld_const(&CONST_L2E, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
++	fld_const(&CONST_L2E, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
  }
  
- /* Routine to read information from the extended CMOS area */
--__u8
--voyager_extended_cmos_read(__u16 addr)
-+__u8 voyager_extended_cmos_read(__u16 addr)
+ static void fldpi(int rc)
  {
- 	outb(addr & 0xff, 0x74);
- 	outb((addr >> 8) & 0xff, 0x75);
-@@ -108,12 +105,11 @@ voyager_extended_cmos_read(__u16 addr)
+-  fld_const(&CONST_PI, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
++	fld_const(&CONST_PI, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
+ }
  
- typedef struct ClickMap {
- 	struct Entry {
--		__u32	Address;
--		__u32	Length;
-+		__u32 Address;
-+		__u32 Length;
- 	} Entry[CLICK_ENTRIES];
- } ClickMap_t;
+ static void fldlg2(int rc)
+ {
+-  fld_const(&CONST_LG2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
++	fld_const(&CONST_LG2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
+ }
  
--
- /* This routine is pretty much an awful hack to read the bios clickmap by
-  * mapping it into page 0.  There are usually three regions in the map:
-  * 	Base Memory
-@@ -122,8 +118,7 @@ typedef struct ClickMap {
-  *
-  * Returns are 0 for failure and 1 for success on extracting region.
-  */
--int __init
--voyager_memory_detect(int region, __u32 *start, __u32 *length)
-+int __init voyager_memory_detect(int region, __u32 * start, __u32 * length)
+ static void fldln2(int rc)
  {
- 	int i;
- 	int retval = 0;
-@@ -132,13 +127,14 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
- 	unsigned long map_addr;
- 	unsigned long old;
+-  fld_const(&CONST_LN2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
++	fld_const(&CONST_LN2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
+ }
  
--	if(region >= CLICK_ENTRIES) {
-+	if (region >= CLICK_ENTRIES) {
- 		printk("Voyager: Illegal ClickMap region %d\n", region);
- 		return 0;
- 	}
+ static void fldz(int rc)
+ {
+-  fld_const(&CONST_Z, 0, TAG_Zero);
++	fld_const(&CONST_Z, 0, TAG_Zero);
+ }
  
--	for(i = 0; i < sizeof(cmos); i++)
--		cmos[i] = voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i);
-+	for (i = 0; i < sizeof(cmos); i++)
-+		cmos[i] =
-+		    voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i);
+-typedef void (*FUNC_RC)(int);
++typedef void (*FUNC_RC) (int);
  
- 	map_addr = *(unsigned long *)cmos;
+ static FUNC_RC constants_table[] = {
+-  fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC)FPU_illegal
++	fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC) FPU_illegal
+ };
  
-@@ -147,10 +143,10 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
- 	pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
- 	local_flush_tlb();
- 	/* now clear everything out but page 0 */
--	map = (ClickMap_t *)(map_addr & (~PAGE_MASK));
-+	map = (ClickMap_t *) (map_addr & (~PAGE_MASK));
+ void fconst(void)
+ {
+-  (constants_table[FPU_rm])(control_word & CW_RC);
++	(constants_table[FPU_rm]) (control_word & CW_RC);
+ }
+diff --git a/arch/x86/math-emu/reg_convert.c b/arch/x86/math-emu/reg_convert.c
+index 45a2587..1080607 100644
+--- a/arch/x86/math-emu/reg_convert.c
++++ b/arch/x86/math-emu/reg_convert.c
+@@ -13,41 +13,34 @@
+ #include "exception.h"
+ #include "fpu_emu.h"
  
- 	/* zero length is the end of the clickmap */
--	if(map->Entry[region].Length != 0) {
-+	if (map->Entry[region].Length != 0) {
- 		*length = map->Entry[region].Length * CLICK_SIZE;
- 		*start = map->Entry[region].Address;
- 		retval = 1;
-@@ -165,10 +161,9 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
- /* voyager specific handling code for timer interrupts.  Used to hand
-  * off the timer tick to the SMP code, since the VIC doesn't have an
-  * internal timer (The QIC does, but that's another story). */
--void
--voyager_timer_interrupt(void)
-+void voyager_timer_interrupt(void)
+-
+ int FPU_to_exp16(FPU_REG const *a, FPU_REG *x)
  {
--	if((jiffies & 0x3ff) == 0) {
-+	if ((jiffies & 0x3ff) == 0) {
+-  int sign = getsign(a);
+-
+-  *(long long *)&(x->sigl) = *(const long long *)&(a->sigl);
+-
+-  /* Set up the exponent as a 16 bit quantity. */
+-  setexponent16(x, exponent(a));
+-
+-  if ( exponent16(x) == EXP_UNDER )
+-    {
+-      /* The number is a de-normal or pseudodenormal. */
+-      /* We only deal with the significand and exponent. */
+-
+-      if (x->sigh & 0x80000000)
+-	{
+-	  /* Is a pseudodenormal. */
+-	  /* This is non-80486 behaviour because the number
+-	     loses its 'denormal' identity. */
+-	  addexponent(x, 1);
+-	}
+-      else
+-	{
+-	  /* Is a denormal. */
+-	  addexponent(x, 1);
+-	  FPU_normalize_nuo(x);
++	int sign = getsign(a);
++
++	*(long long *)&(x->sigl) = *(const long long *)&(a->sigl);
++
++	/* Set up the exponent as a 16 bit quantity. */
++	setexponent16(x, exponent(a));
++
++	if (exponent16(x) == EXP_UNDER) {
++		/* The number is a de-normal or pseudodenormal. */
++		/* We only deal with the significand and exponent. */
++
++		if (x->sigh & 0x80000000) {
++			/* Is a pseudodenormal. */
++			/* This is non-80486 behaviour because the number
++			   loses its 'denormal' identity. */
++			addexponent(x, 1);
++		} else {
++			/* Is a denormal. */
++			addexponent(x, 1);
++			FPU_normalize_nuo(x);
++		}
+ 	}
+-    }
  
- 		/* There seems to be something flaky in either
- 		 * hardware or software that is resetting the timer 0
-@@ -186,18 +181,20 @@ voyager_timer_interrupt(void)
- 		__u16 val;
+-  if ( !(x->sigh & 0x80000000) )
+-    {
+-      EXCEPTION(EX_INTERNAL | 0x180);
+-    }
++	if (!(x->sigh & 0x80000000)) {
++		EXCEPTION(EX_INTERNAL | 0x180);
++	}
  
- 		spin_lock(&i8253_lock);
--		
+-  return sign;
++	return sign;
+ }
+-
+diff --git a/arch/x86/math-emu/reg_divide.c b/arch/x86/math-emu/reg_divide.c
+index 5cee7ff..6827012 100644
+--- a/arch/x86/math-emu/reg_divide.c
++++ b/arch/x86/math-emu/reg_divide.c
+@@ -26,182 +26,157 @@
+   */
+ int FPU_div(int flags, int rm, int control_w)
+ {
+-  FPU_REG x, y;
+-  FPU_REG const *a, *b, *st0_ptr, *st_ptr;
+-  FPU_REG *dest;
+-  u_char taga, tagb, signa, signb, sign, saved_sign;
+-  int tag, deststnr;
+-
+-  if ( flags & DEST_RM )
+-    deststnr = rm;
+-  else
+-    deststnr = 0;
+-
+-  if ( flags & REV )
+-    {
+-      b = &st(0);
+-      st0_ptr = b;
+-      tagb = FPU_gettag0();
+-      if ( flags & LOADED )
+-	{
+-	  a = (FPU_REG *)rm;
+-	  taga = flags & 0x0f;
++	FPU_REG x, y;
++	FPU_REG const *a, *b, *st0_ptr, *st_ptr;
++	FPU_REG *dest;
++	u_char taga, tagb, signa, signb, sign, saved_sign;
++	int tag, deststnr;
 +
- 		outb_p(0x00, 0x43);
- 		val = inb_p(0x40);
- 		val |= inb(0x40) << 8;
- 		spin_unlock(&i8253_lock);
- 
--		if(val > LATCH) {
--			printk("\nVOYAGER: countdown timer value too high (%d), resetting\n\n", val);
-+		if (val > LATCH) {
-+			printk
-+			    ("\nVOYAGER: countdown timer value too high (%d), resetting\n\n",
-+			     val);
- 			spin_lock(&i8253_lock);
--			outb(0x34,0x43);
--			outb_p(LATCH & 0xff , 0x40);	/* LSB */
--			outb(LATCH >> 8 , 0x40);	/* MSB */
-+			outb(0x34, 0x43);
-+			outb_p(LATCH & 0xff, 0x40);	/* LSB */
-+			outb(LATCH >> 8, 0x40);	/* MSB */
- 			spin_unlock(&i8253_lock);
- 		}
++	if (flags & DEST_RM)
++		deststnr = rm;
++	else
++		deststnr = 0;
++
++	if (flags & REV) {
++		b = &st(0);
++		st0_ptr = b;
++		tagb = FPU_gettag0();
++		if (flags & LOADED) {
++			a = (FPU_REG *) rm;
++			taga = flags & 0x0f;
++		} else {
++			a = &st(rm);
++			st_ptr = a;
++			taga = FPU_gettagi(rm);
++		}
++	} else {
++		a = &st(0);
++		st0_ptr = a;
++		taga = FPU_gettag0();
++		if (flags & LOADED) {
++			b = (FPU_REG *) rm;
++			tagb = flags & 0x0f;
++		} else {
++			b = &st(rm);
++			st_ptr = b;
++			tagb = FPU_gettagi(rm);
++		}
  	}
-@@ -206,14 +203,13 @@ voyager_timer_interrupt(void)
- #endif
- }
+-      else
+-	{
+-	  a = &st(rm);
+-	  st_ptr = a;
+-	  taga = FPU_gettagi(rm);
+-	}
+-    }
+-  else
+-    {
+-      a = &st(0);
+-      st0_ptr = a;
+-      taga = FPU_gettag0();
+-      if ( flags & LOADED )
+-	{
+-	  b = (FPU_REG *)rm;
+-	  tagb = flags & 0x0f;
+-	}
+-      else
+-	{
+-	  b = &st(rm);
+-	  st_ptr = b;
+-	  tagb = FPU_gettagi(rm);
+-	}
+-    }
  
--void
--voyager_power_off(void)
-+void voyager_power_off(void)
- {
- 	printk("VOYAGER Power Off\n");
+-  signa = getsign(a);
+-  signb = getsign(b);
++	signa = getsign(a);
++	signb = getsign(b);
  
--	if(voyager_level == 5) {
-+	if (voyager_level == 5) {
- 		voyager_cat_power_off();
--	} else if(voyager_level == 4) {
-+	} else if (voyager_level == 4) {
- 		/* This doesn't apparently work on most L4 machines,
- 		 * but the specs say to do this to get automatic power
- 		 * off.  Unfortunately, if it doesn't power off the
-@@ -222,10 +218,8 @@ voyager_power_off(void)
- #if 0
- 		int port;
+-  sign = signa ^ signb;
++	sign = signa ^ signb;
  
--	  
- 		/* enable the voyager Configuration Space */
--		outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, 
--		     VOYAGER_MC_SETUP);
-+		outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, VOYAGER_MC_SETUP);
- 		/* the port for the power off flag is an offset from the
- 		   floating base */
- 		port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21;
-@@ -235,62 +229,57 @@ voyager_power_off(void)
- 	}
- 	/* and wait for it to happen */
- 	local_irq_disable();
--	for(;;)
-+	for (;;)
- 		halt();
- }
+-  dest = &st(deststnr);
+-  saved_sign = getsign(dest);
++	dest = &st(deststnr);
++	saved_sign = getsign(dest);
  
- /* copied from process.c */
--static inline void
--kb_wait(void)
-+static inline void kb_wait(void)
- {
- 	int i;
+-  if ( !(taga | tagb) )
+-    {
+-      /* Both regs Valid, this should be the most common case. */
+-      reg_copy(a, &x);
+-      reg_copy(b, &y);
+-      setpositive(&x);
+-      setpositive(&y);
+-      tag = FPU_u_div(&x, &y, dest, control_w, sign);
++	if (!(taga | tagb)) {
++		/* Both regs Valid, this should be the most common case. */
++		reg_copy(a, &x);
++		reg_copy(b, &y);
++		setpositive(&x);
++		setpositive(&y);
++		tag = FPU_u_div(&x, &y, dest, control_w, sign);
  
--	for (i=0; i<0x10000; i++)
-+	for (i = 0; i < 0x10000; i++)
- 		if ((inb_p(0x64) & 0x02) == 0)
- 			break;
- }
+-      if ( tag < 0 )
+-	return tag;
++		if (tag < 0)
++			return tag;
  
--void
--machine_shutdown(void)
-+void machine_shutdown(void)
- {
- 	/* Architecture specific shutdown needed before a kexec */
- }
+-      FPU_settagi(deststnr, tag);
+-      return tag;
+-    }
++		FPU_settagi(deststnr, tag);
++		return tag;
++	}
  
--void
--machine_restart(char *cmd)
-+void machine_restart(char *cmd)
- {
- 	printk("Voyager Warm Restart\n");
- 	kb_wait();
+-  if ( taga == TAG_Special )
+-    taga = FPU_Special(a);
+-  if ( tagb == TAG_Special )
+-    tagb = FPU_Special(b);
++	if (taga == TAG_Special)
++		taga = FPU_Special(a);
++	if (tagb == TAG_Special)
++		tagb = FPU_Special(b);
  
--	if(voyager_level == 5) {
-+	if (voyager_level == 5) {
- 		/* write magic values to the RTC to inform system that
- 		 * shutdown is beginning */
- 		outb(0x8f, 0x70);
--		outb(0x5 , 0x71);
--		
-+		outb(0x5, 0x71);
+-  if ( ((taga == TAG_Valid) && (tagb == TW_Denormal))
++	if (((taga == TAG_Valid) && (tagb == TW_Denormal))
+ 	    || ((taga == TW_Denormal) && (tagb == TAG_Valid))
+-	    || ((taga == TW_Denormal) && (tagb == TW_Denormal)) )
+-    {
+-      if ( denormal_operand() < 0 )
+-	return FPU_Exception;
+-
+-      FPU_to_exp16(a, &x);
+-      FPU_to_exp16(b, &y);
+-      tag = FPU_u_div(&x, &y, dest, control_w, sign);
+-      if ( tag < 0 )
+-	return tag;
+-
+-      FPU_settagi(deststnr, tag);
+-      return tag;
+-    }
+-  else if ( (taga <= TW_Denormal) && (tagb <= TW_Denormal) )
+-    {
+-      if ( tagb != TAG_Zero )
+-	{
+-	  /* Want to find Zero/Valid */
+-	  if ( tagb == TW_Denormal )
+-	    {
+-	      if ( denormal_operand() < 0 )
+-		return FPU_Exception;
+-	    }
+-
+-	  /* The result is zero. */
+-	  FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
+-	  setsign(dest, sign);
+-	  return TAG_Zero;
++	    || ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
++		if (denormal_operand() < 0)
++			return FPU_Exception;
 +
- 		udelay(50);
--		outb(0xfe,0x64);         /* pull reset low */
--	} else if(voyager_level == 4) {
--		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8;
-+		outb(0xfe, 0x64);	/* pull reset low */
-+	} else if (voyager_level == 4) {
-+		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
- 		__u8 basebd = inb(VOYAGER_MC_SETUP);
--		
++		FPU_to_exp16(a, &x);
++		FPU_to_exp16(b, &y);
++		tag = FPU_u_div(&x, &y, dest, control_w, sign);
++		if (tag < 0)
++			return tag;
 +
- 		outb(basebd | 0x08, VOYAGER_MC_SETUP);
- 		outb(0x02, catbase + 0x21);
++		FPU_settagi(deststnr, tag);
++		return tag;
++	} else if ((taga <= TW_Denormal) && (tagb <= TW_Denormal)) {
++		if (tagb != TAG_Zero) {
++			/* Want to find Zero/Valid */
++			if (tagb == TW_Denormal) {
++				if (denormal_operand() < 0)
++					return FPU_Exception;
++			}
++
++			/* The result is zero. */
++			FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
++			setsign(dest, sign);
++			return TAG_Zero;
++		}
++		/* We have an exception condition, either 0/0 or Valid/Zero. */
++		if (taga == TAG_Zero) {
++			/* 0/0 */
++			return arith_invalid(deststnr);
++		}
++		/* Valid/Zero */
++		return FPU_divide_by_zero(deststnr, sign);
  	}
- 	local_irq_disable();
--	for(;;)
-+	for (;;)
- 		halt();
+-      /* We have an exception condition, either 0/0 or Valid/Zero. */
+-      if ( taga == TAG_Zero )
+-	{
+-	  /* 0/0 */
+-	  return arith_invalid(deststnr);
++	/* Must have infinities, NaNs, etc */
++	else if ((taga == TW_NaN) || (tagb == TW_NaN)) {
++		if (flags & LOADED)
++			return real_2op_NaN((FPU_REG *) rm, flags & 0x0f, 0,
++					    st0_ptr);
++
++		if (flags & DEST_RM) {
++			int tag;
++			tag = FPU_gettag0();
++			if (tag == TAG_Special)
++				tag = FPU_Special(st0_ptr);
++			return real_2op_NaN(st0_ptr, tag, rm,
++					    (flags & REV) ? st0_ptr : &st(rm));
++		} else {
++			int tag;
++			tag = FPU_gettagi(rm);
++			if (tag == TAG_Special)
++				tag = FPU_Special(&st(rm));
++			return real_2op_NaN(&st(rm), tag, 0,
++					    (flags & REV) ? st0_ptr : &st(rm));
++		}
++	} else if (taga == TW_Infinity) {
++		if (tagb == TW_Infinity) {
++			/* infinity/infinity */
++			return arith_invalid(deststnr);
++		} else {
++			/* tagb must be Valid or Zero */
++			if ((tagb == TW_Denormal) && (denormal_operand() < 0))
++				return FPU_Exception;
++
++			/* Infinity divided by Zero or Valid does
++			   not raise and exception, but returns Infinity */
++			FPU_copy_to_regi(a, TAG_Special, deststnr);
++			setsign(dest, sign);
++			return taga;
++		}
++	} else if (tagb == TW_Infinity) {
++		if ((taga == TW_Denormal) && (denormal_operand() < 0))
++			return FPU_Exception;
++
++		/* The result is zero. */
++		FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
++		setsign(dest, sign);
++		return TAG_Zero;
+ 	}
+-      /* Valid/Zero */
+-      return FPU_divide_by_zero(deststnr, sign);
+-    }
+-  /* Must have infinities, NaNs, etc */
+-  else if ( (taga == TW_NaN) || (tagb == TW_NaN) )
+-    {
+-      if ( flags & LOADED )
+-	return real_2op_NaN((FPU_REG *)rm, flags & 0x0f, 0, st0_ptr);
+-
+-      if ( flags & DEST_RM )
+-	{
+-	  int tag;
+-	  tag = FPU_gettag0();
+-	  if ( tag == TAG_Special )
+-	    tag = FPU_Special(st0_ptr);
+-	  return real_2op_NaN(st0_ptr, tag, rm, (flags & REV) ? st0_ptr : &st(rm));
+-	}
+-      else
+-	{
+-	  int tag;
+-	  tag = FPU_gettagi(rm);
+-	  if ( tag == TAG_Special )
+-	    tag = FPU_Special(&st(rm));
+-	  return real_2op_NaN(&st(rm), tag, 0, (flags & REV) ? st0_ptr : &st(rm));
+-	}
+-    }
+-  else if (taga == TW_Infinity)
+-    {
+-      if (tagb == TW_Infinity)
+-	{
+-	  /* infinity/infinity */
+-	  return arith_invalid(deststnr);
+-	}
+-      else
+-	{
+-	  /* tagb must be Valid or Zero */
+-	  if ( (tagb == TW_Denormal) && (denormal_operand() < 0) )
+-	    return FPU_Exception;
+-	  
+-	  /* Infinity divided by Zero or Valid does
+-	     not raise and exception, but returns Infinity */
+-	  FPU_copy_to_regi(a, TAG_Special, deststnr);
+-	  setsign(dest, sign);
+-	  return taga;
+-	}
+-    }
+-  else if (tagb == TW_Infinity)
+-    {
+-      if ( (taga == TW_Denormal) && (denormal_operand() < 0) )
+-	return FPU_Exception;
+-
+-      /* The result is zero. */
+-      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
+-      setsign(dest, sign);
+-      return TAG_Zero;
+-    }
+ #ifdef PARANOID
+-  else
+-    {
+-      EXCEPTION(EX_INTERNAL|0x102);
+-      return FPU_Exception;
+-    }
+-#endif /* PARANOID */ 
++	else {
++		EXCEPTION(EX_INTERNAL | 0x102);
++		return FPU_Exception;
++	}
++#endif /* PARANOID */
+ 
+ 	return 0;
  }
+diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c
+index e976cae..799d4af 100644
+--- a/arch/x86/math-emu/reg_ld_str.c
++++ b/arch/x86/math-emu/reg_ld_str.c
+@@ -27,1084 +27,938 @@
+ #include "control_w.h"
+ #include "status_w.h"
  
--void
--machine_emergency_restart(void)
-+void machine_emergency_restart(void)
+-
+-#define DOUBLE_Emax 1023         /* largest valid exponent */
++#define DOUBLE_Emax 1023	/* largest valid exponent */
+ #define DOUBLE_Ebias 1023
+-#define DOUBLE_Emin (-1022)      /* smallest valid exponent */
++#define DOUBLE_Emin (-1022)	/* smallest valid exponent */
+ 
+-#define SINGLE_Emax 127          /* largest valid exponent */
++#define SINGLE_Emax 127		/* largest valid exponent */
+ #define SINGLE_Ebias 127
+-#define SINGLE_Emin (-126)       /* smallest valid exponent */
+-
++#define SINGLE_Emin (-126)	/* smallest valid exponent */
+ 
+ static u_char normalize_no_excep(FPU_REG *r, int exp, int sign)
  {
- 	/*for now, just hook this to a warm restart */
- 	machine_restart(NULL);
+-  u_char tag;
++	u_char tag;
+ 
+-  setexponent16(r, exp);
++	setexponent16(r, exp);
+ 
+-  tag = FPU_normalize_nuo(r);
+-  stdexp(r);
+-  if ( sign )
+-    setnegative(r);
++	tag = FPU_normalize_nuo(r);
++	stdexp(r);
++	if (sign)
++		setnegative(r);
+ 
+-  return tag;
++	return tag;
  }
  
--void
--mca_nmi_hook(void)
-+void mca_nmi_hook(void)
+-
+ int FPU_tagof(FPU_REG *ptr)
  {
- 	__u8 dumpval __maybe_unused = inb(0xf823);
- 	__u8 swnmi __maybe_unused = inb(0xf813);
-@@ -301,8 +290,8 @@ mca_nmi_hook(void)
- 	/* clear swnmi */
- 	outb(0xff, 0xf813);
- 	/* tell SUS to ignore dump */
--	if(voyager_level == 5 && voyager_SUS != NULL) {
--		if(voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) {
-+	if (voyager_level == 5 && voyager_SUS != NULL) {
-+		if (voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) {
- 			voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND;
- 			voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS;
- 			udelay(1000);
-@@ -310,15 +299,14 @@ mca_nmi_hook(void)
- 			voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS;
- 		}
+-  int exp;
+-
+-  exp = exponent16(ptr) & 0x7fff;
+-  if ( exp == 0 )
+-    {
+-      if ( !(ptr->sigh | ptr->sigl) )
+-	{
+-	  return TAG_Zero;
++	int exp;
++
++	exp = exponent16(ptr) & 0x7fff;
++	if (exp == 0) {
++		if (!(ptr->sigh | ptr->sigl)) {
++			return TAG_Zero;
++		}
++		/* The number is a de-normal or pseudodenormal. */
++		return TAG_Special;
++	}
++
++	if (exp == 0x7fff) {
++		/* Is an Infinity, a NaN, or an unsupported data type. */
++		return TAG_Special;
  	}
--	printk(KERN_ERR "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n", smp_processor_id());
-+	printk(KERN_ERR
-+	       "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n",
-+	       smp_processor_id());
- 	show_stack(NULL, NULL);
- 	show_state();
- }
- 
+-      /* The number is a de-normal or pseudodenormal. */
+-      return TAG_Special;
+-    }
+-
+-  if ( exp == 0x7fff )
+-    {
+-      /* Is an Infinity, a NaN, or an unsupported data type. */
+-      return TAG_Special;
+-    }
 -
+-  if ( !(ptr->sigh & 0x80000000) )
+-    {
+-      /* Unsupported data type. */
+-      /* Valid numbers have the ms bit set to 1. */
+-      /* Unnormal. */
+-      return TAG_Special;
+-    }
 -
--void
--machine_halt(void)
-+void machine_halt(void)
- {
- 	/* treat a halt like a power off */
- 	machine_power_off();
-diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c
-index 2132ca6..17a7904 100644
---- a/arch/x86/mach-voyager/voyager_cat.c
-+++ b/arch/x86/mach-voyager/voyager_cat.c
-@@ -39,34 +39,32 @@
- #define CAT_DATA	(sspb + 0xd)
+-  return TAG_Valid;
+-}
  
- /* the internal cat functions */
--static void cat_pack(__u8 *msg, __u16 start_bit, __u8 *data, 
--		     __u16 num_bits);
--static void cat_unpack(__u8 *msg, __u16 start_bit, __u8 *data,
-+static void cat_pack(__u8 * msg, __u16 start_bit, __u8 * data, __u16 num_bits);
-+static void cat_unpack(__u8 * msg, __u16 start_bit, __u8 * data,
- 		       __u16 num_bits);
--static void cat_build_header(__u8 *header, const __u16 len, 
-+static void cat_build_header(__u8 * header, const __u16 len,
- 			     const __u16 smallest_reg_bits,
- 			     const __u16 longest_reg_bits);
--static int cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp,
-+static int cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp,
- 			__u8 reg, __u8 op);
--static int cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp,
--		       __u8 reg, __u8 *value);
--static int cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes,
-+static int cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp,
-+		       __u8 reg, __u8 * value);
-+static int cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes,
- 			__u8 pad_bits);
--static int cat_write(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
-+static int cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
- 		     __u8 value);
--static int cat_read(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
--		    __u8 *value);
--static int cat_subread(voyager_module_t *modp, voyager_asic_t *asicp,
-+static int cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
-+		    __u8 * value);
-+static int cat_subread(voyager_module_t * modp, voyager_asic_t * asicp,
- 		       __u16 offset, __u16 len, void *buf);
--static int cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp,
-+static int cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
- 			__u8 reg, __u8 value);
--static int cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp);
--static int cat_connect(voyager_module_t *modp, voyager_asic_t *asicp);
-+static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp);
-+static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp);
++	if (!(ptr->sigh & 0x80000000)) {
++		/* Unsupported data type. */
++		/* Valid numbers have the ms bit set to 1. */
++		/* Unnormal. */
++		return TAG_Special;
++	}
++
++	return TAG_Valid;
++}
  
--static inline const char *
--cat_module_name(int module_id)
-+static inline const char *cat_module_name(int module_id)
+ /* Get a long double from user memory */
+ int FPU_load_extended(long double __user *s, int stnr)
  {
--	switch(module_id) {
-+	switch (module_id) {
- 	case 0x10:
- 		return "Processor Slot 0";
- 	case 0x11:
-@@ -105,14 +103,14 @@ voyager_module_t *voyager_cat_list;
+-  FPU_REG *sti_ptr = &st(stnr);
++	FPU_REG *sti_ptr = &st(stnr);
  
- /* the I/O port assignments for the VIC and QIC */
- static struct resource vic_res = {
--	.name	= "Voyager Interrupt Controller",
--	.start	= 0xFC00,
--	.end	= 0xFC6F
-+	.name = "Voyager Interrupt Controller",
-+	.start = 0xFC00,
-+	.end = 0xFC6F
- };
- static struct resource qic_res = {
--	.name	= "Quad Interrupt Controller",
--	.start	= 0xFC70,
--	.end	= 0xFCFF
-+	.name = "Quad Interrupt Controller",
-+	.start = 0xFC70,
-+	.end = 0xFCFF
- };
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ, s, 10);
+-  __copy_from_user(sti_ptr, s, 10);
+-  RE_ENTRANT_CHECK_ON;
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_READ, s, 10);
++	__copy_from_user(sti_ptr, s, 10);
++	RE_ENTRANT_CHECK_ON;
  
- /* This function is used to pack a data bit stream inside a message.
-@@ -120,7 +118,7 @@ static struct resource qic_res = {
-  * Note: This function assumes that any unused bit in the data stream
-  * is set to zero so that the ors will work correctly */
- static void
--cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
-+cat_pack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
- {
- 	/* compute initial shift needed */
- 	const __u16 offset = start_bit % BITS_PER_BYTE;
-@@ -130,7 +128,7 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
- 	int i;
+-  return FPU_tagof(sti_ptr);
++	return FPU_tagof(sti_ptr);
+ }
  
- 	/* adjust if we have more than a byte of residue */
--	if(residue >= BITS_PER_BYTE) {
-+	if (residue >= BITS_PER_BYTE) {
- 		residue -= BITS_PER_BYTE;
- 		len++;
- 	}
-@@ -138,24 +136,25 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
- 	/* clear out the bits.  We assume here that if len==0 then
- 	 * residue >= offset.  This is always true for the catbus
- 	 * operations */
--	msg[byte] &= 0xff << (BITS_PER_BYTE - offset); 
-+	msg[byte] &= 0xff << (BITS_PER_BYTE - offset);
- 	msg[byte++] |= data[0] >> offset;
--	if(len == 0)
-+	if (len == 0)
- 		return;
--	for(i = 1; i < len; i++)
--		msg[byte++] = (data[i-1] << (BITS_PER_BYTE - offset))
--			| (data[i] >> offset);
--	if(residue != 0) {
-+	for (i = 1; i < len; i++)
-+		msg[byte++] = (data[i - 1] << (BITS_PER_BYTE - offset))
-+		    | (data[i] >> offset);
-+	if (residue != 0) {
- 		__u8 mask = 0xff >> residue;
--		__u8 last_byte = data[i-1] << (BITS_PER_BYTE - offset)
--			| (data[i] >> offset);
--		
-+		__u8 last_byte = data[i - 1] << (BITS_PER_BYTE - offset)
-+		    | (data[i] >> offset);
+-
+ /* Get a double from user memory */
+ int FPU_load_double(double __user *dfloat, FPU_REG *loaded_data)
+ {
+-  int exp, tag, negative;
+-  unsigned m64, l64;
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ, dfloat, 8);
+-  FPU_get_user(m64, 1 + (unsigned long __user *) dfloat);
+-  FPU_get_user(l64, (unsigned long __user *) dfloat);
+-  RE_ENTRANT_CHECK_ON;
+-
+-  negative = (m64 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
+-  exp = ((m64 & 0x7ff00000) >> 20) - DOUBLE_Ebias + EXTENDED_Ebias;
+-  m64 &= 0xfffff;
+-  if ( exp > DOUBLE_Emax + EXTENDED_Ebias )
+-    {
+-      /* Infinity or NaN */
+-      if ((m64 == 0) && (l64 == 0))
+-	{
+-	  /* +- infinity */
+-	  loaded_data->sigh = 0x80000000;
+-	  loaded_data->sigl = 0x00000000;
+-	  exp = EXP_Infinity + EXTENDED_Ebias;
+-	  tag = TAG_Special;
+-	}
+-      else
+-	{
+-	  /* Must be a signaling or quiet NaN */
+-	  exp = EXP_NaN + EXTENDED_Ebias;
+-	  loaded_data->sigh = (m64 << 11) | 0x80000000;
+-	  loaded_data->sigh |= l64 >> 21;
+-	  loaded_data->sigl = l64 << 11;
+-	  tag = TAG_Special;    /* The calling function must look for NaNs */
+-	}
+-    }
+-  else if ( exp < DOUBLE_Emin + EXTENDED_Ebias )
+-    {
+-      /* Zero or de-normal */
+-      if ((m64 == 0) && (l64 == 0))
+-	{
+-	  /* Zero */
+-	  reg_copy(&CONST_Z, loaded_data);
+-	  exp = 0;
+-	  tag = TAG_Zero;
+-	}
+-      else
+-	{
+-	  /* De-normal */
+-	  loaded_data->sigh = m64 << 11;
+-	  loaded_data->sigh |= l64 >> 21;
+-	  loaded_data->sigl = l64 << 11;
+-
+-	  return normalize_no_excep(loaded_data, DOUBLE_Emin, negative)
+-	    | (denormal_operand() < 0 ? FPU_Exception : 0);
+-	}
+-    }
+-  else
+-    {
+-      loaded_data->sigh = (m64 << 11) | 0x80000000;
+-      loaded_data->sigh |= l64 >> 21;
+-      loaded_data->sigl = l64 << 11;
++	int exp, tag, negative;
++	unsigned m64, l64;
++
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_READ, dfloat, 8);
++	FPU_get_user(m64, 1 + (unsigned long __user *)dfloat);
++	FPU_get_user(l64, (unsigned long __user *)dfloat);
++	RE_ENTRANT_CHECK_ON;
 +
- 		last_byte &= ~mask;
- 		msg[byte] &= mask;
- 		msg[byte] |= last_byte;
- 	}
- 	return;
- }
++	negative = (m64 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
++	exp = ((m64 & 0x7ff00000) >> 20) - DOUBLE_Ebias + EXTENDED_Ebias;
++	m64 &= 0xfffff;
++	if (exp > DOUBLE_Emax + EXTENDED_Ebias) {
++		/* Infinity or NaN */
++		if ((m64 == 0) && (l64 == 0)) {
++			/* +- infinity */
++			loaded_data->sigh = 0x80000000;
++			loaded_data->sigl = 0x00000000;
++			exp = EXP_Infinity + EXTENDED_Ebias;
++			tag = TAG_Special;
++		} else {
++			/* Must be a signaling or quiet NaN */
++			exp = EXP_NaN + EXTENDED_Ebias;
++			loaded_data->sigh = (m64 << 11) | 0x80000000;
++			loaded_data->sigh |= l64 >> 21;
++			loaded_data->sigl = l64 << 11;
++			tag = TAG_Special;	/* The calling function must look for NaNs */
++		}
++	} else if (exp < DOUBLE_Emin + EXTENDED_Ebias) {
++		/* Zero or de-normal */
++		if ((m64 == 0) && (l64 == 0)) {
++			/* Zero */
++			reg_copy(&CONST_Z, loaded_data);
++			exp = 0;
++			tag = TAG_Zero;
++		} else {
++			/* De-normal */
++			loaded_data->sigh = m64 << 11;
++			loaded_data->sigh |= l64 >> 21;
++			loaded_data->sigl = l64 << 11;
 +
- /* unpack the data again (same arguments as cat_pack()). data buffer
-  * must be zero populated.
-  *
-@@ -163,7 +162,7 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
-  * data (starting at bit 0 in data).
-  */
- static void
--cat_unpack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
-+cat_unpack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
- {
- 	/* compute initial shift needed */
- 	const __u16 offset = start_bit % BITS_PER_BYTE;
-@@ -172,97 +171,97 @@ cat_unpack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
- 	__u16 byte = start_bit / BITS_PER_BYTE;
- 	int i;
++			return normalize_no_excep(loaded_data, DOUBLE_Emin,
++						  negative)
++			    | (denormal_operand() < 0 ? FPU_Exception : 0);
++		}
++	} else {
++		loaded_data->sigh = (m64 << 11) | 0x80000000;
++		loaded_data->sigh |= l64 >> 21;
++		loaded_data->sigl = l64 << 11;
  
--	if(last_bits != 0)
-+	if (last_bits != 0)
- 		len++;
+-      tag = TAG_Valid;
+-    }
++		tag = TAG_Valid;
++	}
  
- 	/* special case: want < 8 bits from msg and we can get it from
- 	 * a single byte of the msg */
--	if(len == 0 && BITS_PER_BYTE - offset >= num_bits) {
-+	if (len == 0 && BITS_PER_BYTE - offset >= num_bits) {
- 		data[0] = msg[byte] << offset;
- 		data[0] &= 0xff >> (BITS_PER_BYTE - num_bits);
- 		return;
- 	}
--	for(i = 0; i < len; i++) {
-+	for (i = 0; i < len; i++) {
- 		/* this annoying if has to be done just in case a read of
- 		 * msg one beyond the array causes a panic */
--		if(offset != 0) {
-+		if (offset != 0) {
- 			data[i] = msg[byte++] << offset;
- 			data[i] |= msg[byte] >> (BITS_PER_BYTE - offset);
--		}
--		else {
-+		} else {
- 			data[i] = msg[byte++];
- 		}
- 	}
- 	/* do we need to truncate the final byte */
--	if(last_bits != 0) {
--		data[i-1] &= 0xff << (BITS_PER_BYTE - last_bits);
-+	if (last_bits != 0) {
-+		data[i - 1] &= 0xff << (BITS_PER_BYTE - last_bits);
- 	}
- 	return;
+-  setexponent16(loaded_data, exp | negative);
++	setexponent16(loaded_data, exp | negative);
+ 
+-  return tag;
++	return tag;
  }
  
- static void
--cat_build_header(__u8 *header, const __u16 len, const __u16 smallest_reg_bits,
-+cat_build_header(__u8 * header, const __u16 len, const __u16 smallest_reg_bits,
- 		 const __u16 longest_reg_bits)
+-
+ /* Get a float from user memory */
+ int FPU_load_single(float __user *single, FPU_REG *loaded_data)
  {
- 	int i;
- 	__u16 start_bit = (smallest_reg_bits - 1) % BITS_PER_BYTE;
- 	__u8 *last_byte = &header[len - 1];
- 
--	if(start_bit == 0)
-+	if (start_bit == 0)
- 		start_bit = 1;	/* must have at least one bit in the hdr */
--	
--	for(i=0; i < len; i++)
+-  unsigned m32;
+-  int exp, tag, negative;
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ, single, 4);
+-  FPU_get_user(m32, (unsigned long __user *) single);
+-  RE_ENTRANT_CHECK_ON;
+-
+-  negative = (m32 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
+-
+-  if (!(m32 & 0x7fffffff))
+-    {
+-      /* Zero */
+-      reg_copy(&CONST_Z, loaded_data);
+-      addexponent(loaded_data, negative);
+-      return TAG_Zero;
+-    }
+-  exp = ((m32 & 0x7f800000) >> 23) - SINGLE_Ebias + EXTENDED_Ebias;
+-  m32 = (m32 & 0x7fffff) << 8;
+-  if ( exp < SINGLE_Emin + EXTENDED_Ebias )
+-    {
+-      /* De-normals */
+-      loaded_data->sigh = m32;
+-      loaded_data->sigl = 0;
+-
+-      return normalize_no_excep(loaded_data, SINGLE_Emin, negative)
+-	| (denormal_operand() < 0 ? FPU_Exception : 0);
+-    }
+-  else if ( exp > SINGLE_Emax + EXTENDED_Ebias )
+-    {
+-    /* Infinity or NaN */
+-      if ( m32 == 0 )
+-	{
+-	  /* +- infinity */
+-	  loaded_data->sigh = 0x80000000;
+-	  loaded_data->sigl = 0x00000000;
+-	  exp = EXP_Infinity + EXTENDED_Ebias;
+-	  tag = TAG_Special;
++	unsigned m32;
++	int exp, tag, negative;
 +
-+	for (i = 0; i < len; i++)
- 		header[i] = 0;
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_READ, single, 4);
++	FPU_get_user(m32, (unsigned long __user *)single);
++	RE_ENTRANT_CHECK_ON;
++
++	negative = (m32 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
++
++	if (!(m32 & 0x7fffffff)) {
++		/* Zero */
++		reg_copy(&CONST_Z, loaded_data);
++		addexponent(loaded_data, negative);
++		return TAG_Zero;
+ 	}
+-      else
+-	{
+-	  /* Must be a signaling or quiet NaN */
+-	  exp = EXP_NaN + EXTENDED_Ebias;
+-	  loaded_data->sigh = m32 | 0x80000000;
+-	  loaded_data->sigl = 0;
+-	  tag = TAG_Special;  /* The calling function must look for NaNs */
++	exp = ((m32 & 0x7f800000) >> 23) - SINGLE_Ebias + EXTENDED_Ebias;
++	m32 = (m32 & 0x7fffff) << 8;
++	if (exp < SINGLE_Emin + EXTENDED_Ebias) {
++		/* De-normals */
++		loaded_data->sigh = m32;
++		loaded_data->sigl = 0;
++
++		return normalize_no_excep(loaded_data, SINGLE_Emin, negative)
++		    | (denormal_operand() < 0 ? FPU_Exception : 0);
++	} else if (exp > SINGLE_Emax + EXTENDED_Ebias) {
++		/* Infinity or NaN */
++		if (m32 == 0) {
++			/* +- infinity */
++			loaded_data->sigh = 0x80000000;
++			loaded_data->sigl = 0x00000000;
++			exp = EXP_Infinity + EXTENDED_Ebias;
++			tag = TAG_Special;
++		} else {
++			/* Must be a signaling or quiet NaN */
++			exp = EXP_NaN + EXTENDED_Ebias;
++			loaded_data->sigh = m32 | 0x80000000;
++			loaded_data->sigl = 0;
++			tag = TAG_Special;	/* The calling function must look for NaNs */
++		}
++	} else {
++		loaded_data->sigh = m32 | 0x80000000;
++		loaded_data->sigl = 0;
++		tag = TAG_Valid;
+ 	}
+-    }
+-  else
+-    {
+-      loaded_data->sigh = m32 | 0x80000000;
+-      loaded_data->sigl = 0;
+-      tag = TAG_Valid;
+-    }
  
--	for(i = start_bit; i > 0; i--)
-+	for (i = start_bit; i > 0; i--)
- 		*last_byte = ((*last_byte) << 1) + 1;
+-  setexponent16(loaded_data, exp | negative);  /* Set the sign. */
++	setexponent16(loaded_data, exp | negative);	/* Set the sign. */
  
+-  return tag;
++	return tag;
  }
  
- static int
--cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op)
-+cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 op)
+-
+ /* Get a long long from user memory */
+ int FPU_load_int64(long long __user *_s)
  {
- 	__u8 parity, inst, inst_buf[4] = { 0 };
- 	__u8 iseq[VOYAGER_MAX_SCAN_PATH], hseq[VOYAGER_MAX_REG_SIZE];
- 	__u16 ibytes, hbytes, padbits;
- 	int i;
--	
+-  long long s;
+-  int sign;
+-  FPU_REG *st0_ptr = &st(0);
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ, _s, 8);
+-  if (copy_from_user(&s,_s,8))
+-    FPU_abort;
+-  RE_ENTRANT_CHECK_ON;
+-
+-  if (s == 0)
+-    {
+-      reg_copy(&CONST_Z, st0_ptr);
+-      return TAG_Zero;
+-    }
+-
+-  if (s > 0)
+-    sign = SIGN_Positive;
+-  else
+-  {
+-    s = -s;
+-    sign = SIGN_Negative;
+-  }
+-
+-  significand(st0_ptr) = s;
+-
+-  return normalize_no_excep(st0_ptr, 63, sign);
+-}
++	long long s;
++	int sign;
++	FPU_REG *st0_ptr = &st(0);
 +
- 	/* 
- 	 * Parity is the parity of the register number + 1 (READ_REGISTER
- 	 * and WRITE_REGISTER always add '1' to the number of bits == 1)
- 	 */
--	parity = (__u8)(1 + (reg & 0x01) +
--	         ((__u8)(reg & 0x02) >> 1) +
--	         ((__u8)(reg & 0x04) >> 2) +
--	         ((__u8)(reg & 0x08) >> 3)) % 2;
-+	parity = (__u8) (1 + (reg & 0x01) +
-+			 ((__u8) (reg & 0x02) >> 1) +
-+			 ((__u8) (reg & 0x04) >> 2) +
-+			 ((__u8) (reg & 0x08) >> 3)) % 2;
- 
- 	inst = ((parity << 7) | (reg << 2) | op);
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_READ, _s, 8);
++	if (copy_from_user(&s, _s, 8))
++		FPU_abort;
++	RE_ENTRANT_CHECK_ON;
++
++	if (s == 0) {
++		reg_copy(&CONST_Z, st0_ptr);
++		return TAG_Zero;
++	}
++
++	if (s > 0)
++		sign = SIGN_Positive;
++	else {
++		s = -s;
++		sign = SIGN_Negative;
++	}
  
- 	outb(VOYAGER_CAT_IRCYC, CAT_CMD);
--	if(!modp->scan_path_connected) {
--		if(asicp->asic_id != VOYAGER_CAT_ID) {
--			printk("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n");
-+	if (!modp->scan_path_connected) {
-+		if (asicp->asic_id != VOYAGER_CAT_ID) {
-+			printk
-+			    ("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n");
- 			return 1;
- 		}
- 		outb(VOYAGER_CAT_HEADER, CAT_DATA);
- 		outb(inst, CAT_DATA);
--		if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
-+		if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
- 			CDEBUG(("VOYAGER CAT: cat_sendinst failed to get CAT_HEADER\n"));
- 			return 1;
- 		}
- 		return 0;
- 	}
- 	ibytes = modp->inst_bits / BITS_PER_BYTE;
--	if((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) {
-+	if ((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) {
- 		padbits = BITS_PER_BYTE - padbits;
- 		ibytes++;
- 	}
- 	hbytes = modp->largest_reg / BITS_PER_BYTE;
--	if(modp->largest_reg % BITS_PER_BYTE)
-+	if (modp->largest_reg % BITS_PER_BYTE)
- 		hbytes++;
- 	CDEBUG(("cat_sendinst: ibytes=%d, hbytes=%d\n", ibytes, hbytes));
- 	/* initialise the instruction sequence to 0xff */
--	for(i=0; i < ibytes + hbytes; i++)
-+	for (i = 0; i < ibytes + hbytes; i++)
- 		iseq[i] = 0xff;
- 	cat_build_header(hseq, hbytes, modp->smallest_reg, modp->largest_reg);
- 	cat_pack(iseq, modp->inst_bits, hseq, hbytes * BITS_PER_BYTE);
-@@ -271,11 +270,11 @@ cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op)
- 	cat_pack(iseq, asicp->bit_location, inst_buf, asicp->ireg_length);
- #ifdef VOYAGER_CAT_DEBUG
- 	printk("ins = 0x%x, iseq: ", inst);
--	for(i=0; i< ibytes + hbytes; i++)
-+	for (i = 0; i < ibytes + hbytes; i++)
- 		printk("0x%x ", iseq[i]);
- 	printk("\n");
- #endif
--	if(cat_shiftout(iseq, ibytes, hbytes, padbits)) {
-+	if (cat_shiftout(iseq, ibytes, hbytes, padbits)) {
- 		CDEBUG(("VOYAGER CAT: cat_sendinst: cat_shiftout failed\n"));
- 		return 1;
- 	}
-@@ -284,72 +283,74 @@ cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op)
- }
++	significand(st0_ptr) = s;
++
++	return normalize_no_excep(st0_ptr, 63, sign);
++}
  
- static int
--cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, 
--	    __u8 *value)
-+cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
-+	    __u8 * value)
+ /* Get a long from user memory */
+ int FPU_load_int32(long __user *_s, FPU_REG *loaded_data)
  {
--	if(!modp->scan_path_connected) {
--		if(asicp->asic_id != VOYAGER_CAT_ID) {
-+	if (!modp->scan_path_connected) {
-+		if (asicp->asic_id != VOYAGER_CAT_ID) {
- 			CDEBUG(("VOYAGER CAT: ERROR: cat_getdata to CAT asic with scan path connected\n"));
- 			return 1;
- 		}
--		if(reg > VOYAGER_SUBADDRHI) 
-+		if (reg > VOYAGER_SUBADDRHI)
- 			outb(VOYAGER_CAT_RUN, CAT_CMD);
- 		outb(VOYAGER_CAT_DRCYC, CAT_CMD);
- 		outb(VOYAGER_CAT_HEADER, CAT_DATA);
- 		*value = inb(CAT_DATA);
- 		outb(0xAA, CAT_DATA);
--		if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
-+		if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
- 			CDEBUG(("cat_getdata: failed to get VOYAGER_CAT_HEADER\n"));
- 			return 1;
- 		}
- 		return 0;
--	}
--	else {
--		__u16 sbits = modp->num_asics -1 + asicp->ireg_length;
-+	} else {
-+		__u16 sbits = modp->num_asics - 1 + asicp->ireg_length;
- 		__u16 sbytes = sbits / BITS_PER_BYTE;
- 		__u16 tbytes;
--		__u8 string[VOYAGER_MAX_SCAN_PATH], trailer[VOYAGER_MAX_REG_SIZE];
-+		__u8 string[VOYAGER_MAX_SCAN_PATH],
-+		    trailer[VOYAGER_MAX_REG_SIZE];
- 		__u8 padbits;
- 		int i;
--		
-+
- 		outb(VOYAGER_CAT_DRCYC, CAT_CMD);
+-  long s;
+-  int negative;
++	long s;
++	int negative;
  
--		if((padbits = sbits % BITS_PER_BYTE) != 0) {
-+		if ((padbits = sbits % BITS_PER_BYTE) != 0) {
- 			padbits = BITS_PER_BYTE - padbits;
- 			sbytes++;
- 		}
- 		tbytes = asicp->ireg_length / BITS_PER_BYTE;
--		if(asicp->ireg_length % BITS_PER_BYTE)
-+		if (asicp->ireg_length % BITS_PER_BYTE)
- 			tbytes++;
- 		CDEBUG(("cat_getdata: tbytes = %d, sbytes = %d, padbits = %d\n",
--			tbytes,	sbytes, padbits));
-+			tbytes, sbytes, padbits));
- 		cat_build_header(trailer, tbytes, 1, asicp->ireg_length);
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ, _s, 4);
+-  FPU_get_user(s, _s);
+-  RE_ENTRANT_CHECK_ON;
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_READ, _s, 4);
++	FPU_get_user(s, _s);
++	RE_ENTRANT_CHECK_ON;
  
--		
--		for(i = tbytes - 1; i >= 0; i--) {
-+		for (i = tbytes - 1; i >= 0; i--) {
- 			outb(trailer[i], CAT_DATA);
- 			string[sbytes + i] = inb(CAT_DATA);
- 		}
+-  if (s == 0)
+-    { reg_copy(&CONST_Z, loaded_data); return TAG_Zero; }
++	if (s == 0) {
++		reg_copy(&CONST_Z, loaded_data);
++		return TAG_Zero;
++	}
  
--		for(i = sbytes - 1; i >= 0; i--) {
-+		for (i = sbytes - 1; i >= 0; i--) {
- 			outb(0xaa, CAT_DATA);
- 			string[i] = inb(CAT_DATA);
- 		}
- 		*value = 0;
--		cat_unpack(string, padbits + (tbytes * BITS_PER_BYTE) + asicp->asic_location, value, asicp->ireg_length);
-+		cat_unpack(string,
-+			   padbits + (tbytes * BITS_PER_BYTE) +
-+			   asicp->asic_location, value, asicp->ireg_length);
- #ifdef VOYAGER_CAT_DEBUG
- 		printk("value=0x%x, string: ", *value);
--		for(i=0; i< tbytes+sbytes; i++)
-+		for (i = 0; i < tbytes + sbytes; i++)
- 			printk("0x%x ", string[i]);
- 		printk("\n");
- #endif
--		
-+
- 		/* sanity check the rest of the return */
--		for(i=0; i < tbytes; i++) {
-+		for (i = 0; i < tbytes; i++) {
- 			__u8 input = 0;
+-  if (s > 0)
+-    negative = SIGN_Positive;
+-  else
+-    {
+-      s = -s;
+-      negative = SIGN_Negative;
+-    }
++	if (s > 0)
++		negative = SIGN_Positive;
++	else {
++		s = -s;
++		negative = SIGN_Negative;
++	}
  
--			cat_unpack(string, padbits + (i * BITS_PER_BYTE), &input, BITS_PER_BYTE);
--			if(trailer[i] != input) {
-+			cat_unpack(string, padbits + (i * BITS_PER_BYTE),
-+				   &input, BITS_PER_BYTE);
-+			if (trailer[i] != input) {
- 				CDEBUG(("cat_getdata: failed to sanity check rest of ret(%d) 0x%x != 0x%x\n", i, input, trailer[i]));
- 				return 1;
- 			}
-@@ -360,14 +361,14 @@ cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
+-  loaded_data->sigh = s;
+-  loaded_data->sigl = 0;
++	loaded_data->sigh = s;
++	loaded_data->sigl = 0;
+ 
+-  return normalize_no_excep(loaded_data, 31, negative);
++	return normalize_no_excep(loaded_data, 31, negative);
  }
  
- static int
--cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
-+cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
+-
+ /* Get a short from user memory */
+ int FPU_load_int16(short __user *_s, FPU_REG *loaded_data)
  {
- 	int i;
--	
--	for(i = data_bytes + header_bytes - 1; i >= header_bytes; i--)
-+
-+	for (i = data_bytes + header_bytes - 1; i >= header_bytes; i--)
- 		outb(data[i], CAT_DATA);
+-  int s, negative;
++	int s, negative;
  
--	for(i = header_bytes - 1; i >= 0; i--) {
-+	for (i = header_bytes - 1; i >= 0; i--) {
- 		__u8 header = 0;
- 		__u8 input;
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ, _s, 2);
+-  /* Cast as short to get the sign extended. */
+-  FPU_get_user(s, _s);
+-  RE_ENTRANT_CHECK_ON;
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_READ, _s, 2);
++	/* Cast as short to get the sign extended. */
++	FPU_get_user(s, _s);
++	RE_ENTRANT_CHECK_ON;
  
-@@ -376,7 +377,7 @@ cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
- 		CDEBUG(("cat_shiftout: returned 0x%x\n", input));
- 		cat_unpack(data, ((data_bytes + i) * BITS_PER_BYTE) - pad_bits,
- 			   &header, BITS_PER_BYTE);
--		if(input != header) {
-+		if (input != header) {
- 			CDEBUG(("VOYAGER CAT: cat_shiftout failed to return header 0x%x != 0x%x\n", input, header));
- 			return 1;
- 		}
-@@ -385,57 +386,57 @@ cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
+-  if (s == 0)
+-    { reg_copy(&CONST_Z, loaded_data); return TAG_Zero; }
++	if (s == 0) {
++		reg_copy(&CONST_Z, loaded_data);
++		return TAG_Zero;
++	}
+ 
+-  if (s > 0)
+-    negative = SIGN_Positive;
+-  else
+-    {
+-      s = -s;
+-      negative = SIGN_Negative;
+-    }
++	if (s > 0)
++		negative = SIGN_Positive;
++	else {
++		s = -s;
++		negative = SIGN_Negative;
++	}
+ 
+-  loaded_data->sigh = s << 16;
+-  loaded_data->sigl = 0;
++	loaded_data->sigh = s << 16;
++	loaded_data->sigl = 0;
+ 
+-  return normalize_no_excep(loaded_data, 15, negative);
++	return normalize_no_excep(loaded_data, 15, negative);
  }
  
- static int
--cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp, 
-+cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
- 	     __u8 reg, __u8 value)
+-
+ /* Get a packed bcd array from user memory */
+ int FPU_load_bcd(u_char __user *s)
  {
- 	outb(VOYAGER_CAT_DRCYC, CAT_CMD);
--	if(!modp->scan_path_connected) {
--		if(asicp->asic_id != VOYAGER_CAT_ID) {
-+	if (!modp->scan_path_connected) {
-+		if (asicp->asic_id != VOYAGER_CAT_ID) {
- 			CDEBUG(("VOYAGER CAT: ERROR: scan path disconnected when asic != CAT\n"));
- 			return 1;
- 		}
- 		outb(VOYAGER_CAT_HEADER, CAT_DATA);
- 		outb(value, CAT_DATA);
--		if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
-+		if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
- 			CDEBUG(("cat_senddata: failed to get correct header response to sent data\n"));
- 			return 1;
- 		}
--		if(reg > VOYAGER_SUBADDRHI) {
-+		if (reg > VOYAGER_SUBADDRHI) {
- 			outb(VOYAGER_CAT_RUN, CAT_CMD);
- 			outb(VOYAGER_CAT_END, CAT_CMD);
- 			outb(VOYAGER_CAT_RUN, CAT_CMD);
- 		}
--		
+-  FPU_REG *st0_ptr = &st(0);
+-  int pos;
+-  u_char bcd;
+-  long long l=0;
+-  int sign;
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ, s, 10);
+-  RE_ENTRANT_CHECK_ON;
+-  for ( pos = 8; pos >= 0; pos--)
+-    {
+-      l *= 10;
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_get_user(bcd, s+pos);
+-      RE_ENTRANT_CHECK_ON;
+-      l += bcd >> 4;
+-      l *= 10;
+-      l += bcd & 0x0f;
+-    }
+- 
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_get_user(sign, s+9);
+-  sign = sign & 0x80 ? SIGN_Negative : SIGN_Positive;
+-  RE_ENTRANT_CHECK_ON;
+-
+-  if ( l == 0 )
+-    {
+-      reg_copy(&CONST_Z, st0_ptr);
+-      addexponent(st0_ptr, sign);   /* Set the sign. */
+-      return TAG_Zero;
+-    }
+-  else
+-    {
+-      significand(st0_ptr) = l;
+-      return normalize_no_excep(st0_ptr, 63, sign);
+-    }
++	FPU_REG *st0_ptr = &st(0);
++	int pos;
++	u_char bcd;
++	long long l = 0;
++	int sign;
 +
- 		return 0;
--	}
--	else {
-+	} else {
- 		__u16 hbytes = asicp->ireg_length / BITS_PER_BYTE;
--		__u16 dbytes = (modp->num_asics - 1 + asicp->ireg_length)/BITS_PER_BYTE;
--		__u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH], 
--			hseq[VOYAGER_MAX_REG_SIZE];
-+		__u16 dbytes =
-+		    (modp->num_asics - 1 + asicp->ireg_length) / BITS_PER_BYTE;
-+		__u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH],
-+		    hseq[VOYAGER_MAX_REG_SIZE];
- 		int i;
- 
--		if((padbits = (modp->num_asics - 1 
--			       + asicp->ireg_length) % BITS_PER_BYTE) != 0) {
-+		if ((padbits = (modp->num_asics - 1
-+				+ asicp->ireg_length) % BITS_PER_BYTE) != 0) {
- 			padbits = BITS_PER_BYTE - padbits;
- 			dbytes++;
- 		}
--		if(asicp->ireg_length % BITS_PER_BYTE)
-+		if (asicp->ireg_length % BITS_PER_BYTE)
- 			hbytes++;
--		
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_READ, s, 10);
++	RE_ENTRANT_CHECK_ON;
++	for (pos = 8; pos >= 0; pos--) {
++		l *= 10;
++		RE_ENTRANT_CHECK_OFF;
++		FPU_get_user(bcd, s + pos);
++		RE_ENTRANT_CHECK_ON;
++		l += bcd >> 4;
++		l *= 10;
++		l += bcd & 0x0f;
++	}
 +
- 		cat_build_header(hseq, hbytes, 1, asicp->ireg_length);
--		
--		for(i = 0; i < dbytes + hbytes; i++)
++	RE_ENTRANT_CHECK_OFF;
++	FPU_get_user(sign, s + 9);
++	sign = sign & 0x80 ? SIGN_Negative : SIGN_Positive;
++	RE_ENTRANT_CHECK_ON;
 +
-+		for (i = 0; i < dbytes + hbytes; i++)
- 			dseq[i] = 0xff;
- 		CDEBUG(("cat_senddata: dbytes=%d, hbytes=%d, padbits=%d\n",
- 			dbytes, hbytes, padbits));
- 		cat_pack(dseq, modp->num_asics - 1 + asicp->ireg_length,
- 			 hseq, hbytes * BITS_PER_BYTE);
--		cat_pack(dseq, asicp->asic_location, &value, 
-+		cat_pack(dseq, asicp->asic_location, &value,
- 			 asicp->ireg_length);
- #ifdef VOYAGER_CAT_DEBUG
- 		printk("dseq ");
--		for(i=0; i<hbytes+dbytes; i++) {
-+		for (i = 0; i < hbytes + dbytes; i++) {
- 			printk("0x%x ", dseq[i]);
- 		}
- 		printk("\n");
-@@ -445,121 +446,125 @@ cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp,
- }
- 
- static int
--cat_write(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
--	 __u8 value)
-+cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 value)
- {
--	if(cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG))
-+	if (cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG))
- 		return 1;
- 	return cat_senddata(modp, asicp, reg, value);
++	if (l == 0) {
++		reg_copy(&CONST_Z, st0_ptr);
++		addexponent(st0_ptr, sign);	/* Set the sign. */
++		return TAG_Zero;
++	} else {
++		significand(st0_ptr) = l;
++		return normalize_no_excep(st0_ptr, 63, sign);
++	}
  }
  
- static int
--cat_read(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
--	 __u8 *value)
-+cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
-+	 __u8 * value)
- {
--	if(cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG))
-+	if (cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG))
- 		return 1;
- 	return cat_getdata(modp, asicp, reg, value);
- }
+ /*===========================================================================*/
  
- static int
--cat_subaddrsetup(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset,
-+cat_subaddrsetup(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
- 		 __u16 len)
+ /* Put a long double into user memory */
+-int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag, long double __user *d)
++int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag,
++		       long double __user * d)
  {
- 	__u8 val;
- 
--	if(len > 1) {
-+	if (len > 1) {
- 		/* set auto increment */
- 		__u8 newval;
--		
--		if(cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) {
-+
-+		if (cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) {
- 			CDEBUG(("cat_subaddrsetup: read of VOYAGER_AUTO_INC_REG failed\n"));
- 			return 1;
- 		}
--		CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n", val));
-+		CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n",
-+			val));
- 		newval = val | VOYAGER_AUTO_INC;
--		if(newval != val) {
--			if(cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) {
-+		if (newval != val) {
-+			if (cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) {
- 				CDEBUG(("cat_subaddrsetup: write to VOYAGER_AUTO_INC_REG failed\n"));
- 				return 1;
- 			}
- 		}
- 	}
--	if(cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8)(offset &0xff))) {
-+	if (cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8) (offset & 0xff))) {
- 		CDEBUG(("cat_subaddrsetup: write to SUBADDRLO failed\n"));
- 		return 1;
- 	}
--	if(asicp->subaddr > VOYAGER_SUBADDR_LO) {
--		if(cat_write(modp, asicp, VOYAGER_SUBADDRHI, (__u8)(offset >> 8))) {
-+	if (asicp->subaddr > VOYAGER_SUBADDR_LO) {
-+		if (cat_write
-+		    (modp, asicp, VOYAGER_SUBADDRHI, (__u8) (offset >> 8))) {
- 			CDEBUG(("cat_subaddrsetup: write to SUBADDRHI failed\n"));
- 			return 1;
- 		}
- 		cat_read(modp, asicp, VOYAGER_SUBADDRHI, &val);
--		CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset, val));
-+		CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset,
-+			val));
- 	}
- 	cat_read(modp, asicp, VOYAGER_SUBADDRLO, &val);
- 	CDEBUG(("cat_subaddrsetup: offset = %d, lo = %d\n", offset, val));
- 	return 0;
- }
--		
+-  /*
+-    The only exception raised by an attempt to store to an
+-    extended format is the Invalid Stack exception, i.e.
+-    attempting to store from an empty register.
+-   */
+-
+-  if ( st0_tag != TAG_Empty )
+-    {
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_WRITE, d, 10);
+-
+-      FPU_put_user(st0_ptr->sigl, (unsigned long __user *) d);
+-      FPU_put_user(st0_ptr->sigh, (unsigned long __user *) ((u_char __user *)d + 4));
+-      FPU_put_user(exponent16(st0_ptr), (unsigned short __user *) ((u_char __user *)d + 8));
+-      RE_ENTRANT_CHECK_ON;
+-
+-      return 1;
+-    }
+-
+-  /* Empty register (stack underflow) */
+-  EXCEPTION(EX_StackUnder);
+-  if ( control_word & CW_Invalid )
+-    {
+-      /* The masked response */
+-      /* Put out the QNaN indefinite */
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_WRITE,d,10);
+-      FPU_put_user(0, (unsigned long __user *) d);
+-      FPU_put_user(0xc0000000, 1 + (unsigned long __user *) d);
+-      FPU_put_user(0xffff, 4 + (short __user *) d);
+-      RE_ENTRANT_CHECK_ON;
+-      return 1;
+-    }
+-  else
+-    return 0;
++	/*
++	   The only exception raised by an attempt to store to an
++	   extended format is the Invalid Stack exception, i.e.
++	   attempting to store from an empty register.
++	 */
 +
- static int
--cat_subwrite(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset,
--	    __u16 len, void *buf)
-+cat_subwrite(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
-+	     __u16 len, void *buf)
- {
- 	int i, retval;
- 
- 	/* FIXME: need special actions for VOYAGER_CAT_ID here */
--	if(asicp->asic_id == VOYAGER_CAT_ID) {
-+	if (asicp->asic_id == VOYAGER_CAT_ID) {
- 		CDEBUG(("cat_subwrite: ATTEMPT TO WRITE TO CAT ASIC\n"));
- 		/* FIXME -- This is supposed to be handled better
- 		 * There is a problem writing to the cat asic in the
- 		 * PSI.  The 30us delay seems to work, though */
- 		udelay(30);
- 	}
--		
--	if((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
++	if (st0_tag != TAG_Empty) {
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_WRITE, d, 10);
 +
-+	if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
- 		printk("cat_subwrite: cat_subaddrsetup FAILED\n");
- 		return retval;
- 	}
--	
--	if(cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) {
++		FPU_put_user(st0_ptr->sigl, (unsigned long __user *)d);
++		FPU_put_user(st0_ptr->sigh,
++			     (unsigned long __user *)((u_char __user *) d + 4));
++		FPU_put_user(exponent16(st0_ptr),
++			     (unsigned short __user *)((u_char __user *) d +
++						       8));
++		RE_ENTRANT_CHECK_ON;
 +
-+	if (cat_sendinst
-+	    (modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) {
- 		printk("cat_subwrite: cat_sendinst FAILED\n");
- 		return 1;
- 	}
--	for(i = 0; i < len; i++) {
--		if(cat_senddata(modp, asicp, 0xFF, ((__u8 *)buf)[i])) {
--			printk("cat_subwrite: cat_sendata element at %d FAILED\n", i);
-+	for (i = 0; i < len; i++) {
-+		if (cat_senddata(modp, asicp, 0xFF, ((__u8 *) buf)[i])) {
-+			printk
-+			    ("cat_subwrite: cat_sendata element at %d FAILED\n",
-+			     i);
- 			return 1;
- 		}
- 	}
- 	return 0;
- }
- static int
--cat_subread(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset,
-+cat_subread(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
- 	    __u16 len, void *buf)
- {
- 	int i, retval;
++		return 1;
++	}
  
--	if((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
-+	if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
- 		CDEBUG(("cat_subread: cat_subaddrsetup FAILED\n"));
- 		return retval;
- 	}
+-}
++	/* Empty register (stack underflow) */
++	EXCEPTION(EX_StackUnder);
++	if (control_word & CW_Invalid) {
++		/* The masked response */
++		/* Put out the QNaN indefinite */
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_WRITE, d, 10);
++		FPU_put_user(0, (unsigned long __user *)d);
++		FPU_put_user(0xc0000000, 1 + (unsigned long __user *)d);
++		FPU_put_user(0xffff, 4 + (short __user *)d);
++		RE_ENTRANT_CHECK_ON;
++		return 1;
++	} else
++		return 0;
  
--	if(cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) {
-+	if (cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) {
- 		CDEBUG(("cat_subread: cat_sendinst failed\n"));
- 		return 1;
- 	}
--	for(i = 0; i < len; i++) {
--		if(cat_getdata(modp, asicp, 0xFF,
--			       &((__u8 *)buf)[i])) {
--			CDEBUG(("cat_subread: cat_getdata element %d failed\n", i));
-+	for (i = 0; i < len; i++) {
-+		if (cat_getdata(modp, asicp, 0xFF, &((__u8 *) buf)[i])) {
-+			CDEBUG(("cat_subread: cat_getdata element %d failed\n",
-+				i));
- 			return 1;
- 		}
- 	}
- 	return 0;
- }
++}
  
--
- /* buffer for storing EPROM data read in during initialisation */
- static __initdata __u8 eprom_buf[0xFFFF];
- static voyager_module_t *voyager_initial_module;
-@@ -568,8 +573,7 @@ static voyager_module_t *voyager_initial_module;
-  * boot cpu *after* all memory initialisation has been done (so we can
-  * use kmalloc) but before smp initialisation, so we can probe the SMP
-  * configuration and pick up necessary information.  */
--void __init
--voyager_cat_init(void)
-+void __init voyager_cat_init(void)
+ /* Put a double into user memory */
+ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat)
  {
- 	voyager_module_t **modpp = &voyager_initial_module;
- 	voyager_asic_t **asicpp;
-@@ -578,27 +582,29 @@ voyager_cat_init(void)
- 	unsigned long qic_addr = 0;
- 	__u8 qabc_data[0x20];
- 	__u8 num_submodules, val;
--	voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *)&eprom_buf[0];
--	
-+	voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *) & eprom_buf[0];
+-  unsigned long l[2];
+-  unsigned long increment = 0;	/* avoid gcc warnings */
+-  int precision_loss;
+-  int exp;
+-  FPU_REG tmp;
++	unsigned long l[2];
++	unsigned long increment = 0;	/* avoid gcc warnings */
++	int precision_loss;
++	int exp;
++	FPU_REG tmp;
+ 
+-  if ( st0_tag == TAG_Valid )
+-    {
+-      reg_copy(st0_ptr, &tmp);
+-      exp = exponent(&tmp);
++	if (st0_tag == TAG_Valid) {
++		reg_copy(st0_ptr, &tmp);
++		exp = exponent(&tmp);
+ 
+-      if ( exp < DOUBLE_Emin )     /* It may be a denormal */
+-	{
+-	  addexponent(&tmp, -DOUBLE_Emin + 52);  /* largest exp to be 51 */
++		if (exp < DOUBLE_Emin) {	/* It may be a denormal */
++			addexponent(&tmp, -DOUBLE_Emin + 52);	/* largest exp to be 51 */
+ 
+-	denormal_arg:
++		      denormal_arg:
+ 
+-	  if ( (precision_loss = FPU_round_to_int(&tmp, st0_tag)) )
+-	    {
++			if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
+ #ifdef PECULIAR_486
+-	      /* Did it round to a non-denormal ? */
+-	      /* This behaviour might be regarded as peculiar, it appears
+-		 that the 80486 rounds to the dest precision, then
+-		 converts to decide underflow. */
+-	      if ( !((tmp.sigh == 0x00100000) && (tmp.sigl == 0) &&
+-		  (st0_ptr->sigl & 0x000007ff)) )
++				/* Did it round to a non-denormal ? */
++				/* This behaviour might be regarded as peculiar, it appears
++				   that the 80486 rounds to the dest precision, then
++				   converts to decide underflow. */
++				if (!
++				    ((tmp.sigh == 0x00100000) && (tmp.sigl == 0)
++				     && (st0_ptr->sigl & 0x000007ff)))
+ #endif /* PECULIAR_486 */
+-		{
+-		  EXCEPTION(EX_Underflow);
+-		  /* This is a special case: see sec 16.2.5.1 of
+-		     the 80486 book */
+-		  if ( !(control_word & CW_Underflow) )
+-		    return 0;
+-		}
+-	      EXCEPTION(precision_loss);
+-	      if ( !(control_word & CW_Precision) )
+-		return 0;
+-	    }
+-	  l[0] = tmp.sigl;
+-	  l[1] = tmp.sigh;
+-	}
+-      else
+-	{
+-	  if ( tmp.sigl & 0x000007ff )
+-	    {
+-	      precision_loss = 1;
+-	      switch (control_word & CW_RC)
+-		{
+-		case RC_RND:
+-		  /* Rounding can get a little messy.. */
+-		  increment = ((tmp.sigl & 0x7ff) > 0x400) |  /* nearest */
+-		    ((tmp.sigl & 0xc00) == 0xc00);            /* odd -> even */
+-		  break;
+-		case RC_DOWN:   /* towards -infinity */
+-		  increment = signpositive(&tmp) ? 0 : tmp.sigl & 0x7ff;
+-		  break;
+-		case RC_UP:     /* towards +infinity */
+-		  increment = signpositive(&tmp) ? tmp.sigl & 0x7ff : 0;
+-		  break;
+-		case RC_CHOP:
+-		  increment = 0;
+-		  break;
+-		}
+-	  
+-	      /* Truncate the mantissa */
+-	      tmp.sigl &= 0xfffff800;
+-	  
+-	      if ( increment )
+-		{
+-		  if ( tmp.sigl >= 0xfffff800 )
+-		    {
+-		      /* the sigl part overflows */
+-		      if ( tmp.sigh == 0xffffffff )
+-			{
+-			  /* The sigh part overflows */
+-			  tmp.sigh = 0x80000000;
+-			  exp++;
+-			  if (exp >= EXP_OVER)
+-			    goto overflow;
++				{
++					EXCEPTION(EX_Underflow);
++					/* This is a special case: see sec 16.2.5.1 of
++					   the 80486 book */
++					if (!(control_word & CW_Underflow))
++						return 0;
++				}
++				EXCEPTION(precision_loss);
++				if (!(control_word & CW_Precision))
++					return 0;
+ 			}
+-		      else
+-			{
+-			  tmp.sigh ++;
++			l[0] = tmp.sigl;
++			l[1] = tmp.sigh;
++		} else {
++			if (tmp.sigl & 0x000007ff) {
++				precision_loss = 1;
++				switch (control_word & CW_RC) {
++				case RC_RND:
++					/* Rounding can get a little messy.. */
++					increment = ((tmp.sigl & 0x7ff) > 0x400) |	/* nearest */
++					    ((tmp.sigl & 0xc00) == 0xc00);	/* odd -> even */
++					break;
++				case RC_DOWN:	/* towards -infinity */
++					increment =
++					    signpositive(&tmp) ? 0 : tmp.
++					    sigl & 0x7ff;
++					break;
++				case RC_UP:	/* towards +infinity */
++					increment =
++					    signpositive(&tmp) ? tmp.
++					    sigl & 0x7ff : 0;
++					break;
++				case RC_CHOP:
++					increment = 0;
++					break;
++				}
 +
- 	__u8 cmos[4];
- 	unsigned long addr;
--	
++				/* Truncate the mantissa */
++				tmp.sigl &= 0xfffff800;
 +
- 	/* initiallise the SUS mailbox */
--	for(i=0; i<sizeof(cmos); i++)
-+	for (i = 0; i < sizeof(cmos); i++)
- 		cmos[i] = voyager_extended_cmos_read(VOYAGER_DUMP_LOCATION + i);
- 	addr = *(unsigned long *)cmos;
--	if((addr & 0xff000000) != 0xff000000) {
--		printk(KERN_ERR "Voyager failed to get SUS mailbox (addr = 0x%lx\n", addr);
-+	if ((addr & 0xff000000) != 0xff000000) {
-+		printk(KERN_ERR
-+		       "Voyager failed to get SUS mailbox (addr = 0x%lx\n",
-+		       addr);
- 	} else {
- 		static struct resource res;
--		
++				if (increment) {
++					if (tmp.sigl >= 0xfffff800) {
++						/* the sigl part overflows */
++						if (tmp.sigh == 0xffffffff) {
++							/* The sigh part overflows */
++							tmp.sigh = 0x80000000;
++							exp++;
++							if (exp >= EXP_OVER)
++								goto overflow;
++						} else {
++							tmp.sigh++;
++						}
++						tmp.sigl = 0x00000000;
++					} else {
++						/* We only need to increment sigl */
++						tmp.sigl += 0x00000800;
++					}
++				}
++			} else
++				precision_loss = 0;
 +
- 		res.name = "voyager SUS";
- 		res.start = addr;
--		res.end = addr+0x3ff;
--		
-+		res.end = addr + 0x3ff;
++			l[0] = (tmp.sigl >> 11) | (tmp.sigh << 21);
++			l[1] = ((tmp.sigh >> 11) & 0xfffff);
 +
- 		request_resource(&iomem_resource, &res);
- 		voyager_SUS = (struct voyager_SUS *)
--			ioremap(addr, 0x400);
-+		    ioremap(addr, 0x400);
- 		printk(KERN_NOTICE "Voyager SUS mailbox version 0x%x\n",
- 		       voyager_SUS->SUS_version);
- 		voyager_SUS->kernel_version = VOYAGER_MAILBOX_VERSION;
-@@ -609,8 +615,6 @@ voyager_cat_init(void)
- 	voyager_extended_vic_processors = 0;
- 	voyager_quad_processors = 0;
- 
++			if (exp > DOUBLE_Emax) {
++			      overflow:
++				EXCEPTION(EX_Overflow);
++				if (!(control_word & CW_Overflow))
++					return 0;
++				set_precision_flag_up();
++				if (!(control_word & CW_Precision))
++					return 0;
++
++				/* This is a special case: see sec 16.2.5.1 of the 80486 book */
++				/* Overflow to infinity */
++				l[0] = 0x00000000;	/* Set to */
++				l[1] = 0x7ff00000;	/* + INF */
++			} else {
++				if (precision_loss) {
++					if (increment)
++						set_precision_flag_up();
++					else
++						set_precision_flag_down();
++				}
++				/* Add the exponent */
++				l[1] |= (((exp + DOUBLE_Ebias) & 0x7ff) << 20);
+ 			}
+-		      tmp.sigl = 0x00000000;
+-		    }
+-		  else
+-		    {
+-		      /* We only need to increment sigl */
+-		      tmp.sigl += 0x00000800;
+-		    }
+-		}
+-	    }
+-	  else
+-	    precision_loss = 0;
+-	  
+-	  l[0] = (tmp.sigl >> 11) | (tmp.sigh << 21);
+-	  l[1] = ((tmp.sigh >> 11) & 0xfffff);
 -
+-	  if ( exp > DOUBLE_Emax )
+-	    {
+-	    overflow:
+-	      EXCEPTION(EX_Overflow);
+-	      if ( !(control_word & CW_Overflow) )
+-		return 0;
+-	      set_precision_flag_up();
+-	      if ( !(control_word & CW_Precision) )
+-		return 0;
 -
- 	printk("VOYAGER: beginning CAT bus probe\n");
- 	/* set up the SuperSet Port Block which tells us where the
- 	 * CAT communication port is */
-@@ -618,14 +622,14 @@ voyager_cat_init(void)
- 	VDEBUG(("VOYAGER DEBUG: sspb = 0x%x\n", sspb));
- 
- 	/* now find out if were 8 slot or normal */
--	if((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER)
--	   == EIGHT_SLOT_IDENTIFIER) {
-+	if ((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER)
-+	    == EIGHT_SLOT_IDENTIFIER) {
- 		voyager_8slot = 1;
--		printk(KERN_NOTICE "Voyager: Eight slot 51xx configuration detected\n");
-+		printk(KERN_NOTICE
-+		       "Voyager: Eight slot 51xx configuration detected\n");
- 	}
- 
--	for(i = VOYAGER_MIN_MODULE;
--	    i <= VOYAGER_MAX_MODULE; i++) {
-+	for (i = VOYAGER_MIN_MODULE; i <= VOYAGER_MAX_MODULE; i++) {
- 		__u8 input;
- 		int asic;
- 		__u16 eprom_size;
-@@ -643,21 +647,21 @@ voyager_cat_init(void)
- 		outb(0xAA, CAT_DATA);
- 		input = inb(CAT_DATA);
- 		outb(VOYAGER_CAT_END, CAT_CMD);
--		if(input != VOYAGER_CAT_HEADER) {
-+		if (input != VOYAGER_CAT_HEADER) {
- 			continue;
- 		}
- 		CDEBUG(("VOYAGER DEBUG: found module id 0x%x, %s\n", i,
- 			cat_module_name(i)));
--		*modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL); /*&voyager_module_storage[cat_count++];*/
--		if(*modpp == NULL) {
-+		*modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL);	/*&voyager_module_storage[cat_count++]; */
-+		if (*modpp == NULL) {
- 			printk("**WARNING** kmalloc failure in cat_init\n");
- 			continue;
- 		}
- 		memset(*modpp, 0, sizeof(voyager_module_t));
- 		/* need temporary asic for cat_subread.  It will be
- 		 * filled in correctly later */
--		(*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count];*/
--		if((*modpp)->asic == NULL) {
-+		(*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL);	/*&voyager_asic_storage[asic_count]; */
-+		if ((*modpp)->asic == NULL) {
- 			printk("**WARNING** kmalloc failure in cat_init\n");
- 			continue;
+-	      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
+-	      /* Overflow to infinity */
+-	      l[0] = 0x00000000;	/* Set to */
+-	      l[1] = 0x7ff00000;	/* + INF */
+-	    }
+-	  else
+-	    {
+-	      if ( precision_loss )
+-		{
+-		  if ( increment )
+-		    set_precision_flag_up();
+-		  else
+-		    set_precision_flag_down();
  		}
-@@ -666,47 +670,52 @@ voyager_cat_init(void)
- 		(*modpp)->asic->subaddr = VOYAGER_SUBADDR_HI;
- 		(*modpp)->module_addr = i;
- 		(*modpp)->scan_path_connected = 0;
--		if(i == VOYAGER_PSI) {
-+		if (i == VOYAGER_PSI) {
- 			/* Exception leg for modules with no EEPROM */
- 			printk("Module \"%s\"\n", cat_module_name(i));
- 			continue;
+-	      /* Add the exponent */
+-	      l[1] |= (((exp+DOUBLE_Ebias) & 0x7ff) << 20);
+-	    }
+-	}
+-    }
+-  else if (st0_tag == TAG_Zero)
+-    {
+-      /* Number is zero */
+-      l[0] = 0;
+-      l[1] = 0;
+-    }
+-  else if ( st0_tag == TAG_Special )
+-    {
+-      st0_tag = FPU_Special(st0_ptr);
+-      if ( st0_tag == TW_Denormal )
+-	{
+-	  /* A denormal will always underflow. */
++	} else if (st0_tag == TAG_Zero) {
++		/* Number is zero */
++		l[0] = 0;
++		l[1] = 0;
++	} else if (st0_tag == TAG_Special) {
++		st0_tag = FPU_Special(st0_ptr);
++		if (st0_tag == TW_Denormal) {
++			/* A denormal will always underflow. */
+ #ifndef PECULIAR_486
+-	  /* An 80486 is supposed to be able to generate
+-	     a denormal exception here, but... */
+-	  /* Underflow has priority. */
+-	  if ( control_word & CW_Underflow )
+-	    denormal_operand();
++			/* An 80486 is supposed to be able to generate
++			   a denormal exception here, but... */
++			/* Underflow has priority. */
++			if (control_word & CW_Underflow)
++				denormal_operand();
+ #endif /* PECULIAR_486 */
+-	  reg_copy(st0_ptr, &tmp);
+-	  goto denormal_arg;
+-	}
+-      else if (st0_tag == TW_Infinity)
+-	{
+-	  l[0] = 0;
+-	  l[1] = 0x7ff00000;
+-	}
+-      else if (st0_tag == TW_NaN)
+-	{
+-	  /* Is it really a NaN ? */
+-	  if ( (exponent(st0_ptr) == EXP_OVER)
+-	       && (st0_ptr->sigh & 0x80000000) )
+-	    {
+-	      /* See if we can get a valid NaN from the FPU_REG */
+-	      l[0] = (st0_ptr->sigl >> 11) | (st0_ptr->sigh << 21);
+-	      l[1] = ((st0_ptr->sigh >> 11) & 0xfffff);
+-	      if ( !(st0_ptr->sigh & 0x40000000) )
+-		{
+-		  /* It is a signalling NaN */
+-		  EXCEPTION(EX_Invalid);
+-		  if ( !(control_word & CW_Invalid) )
+-		    return 0;
+-		  l[1] |= (0x40000000 >> 11);
++			reg_copy(st0_ptr, &tmp);
++			goto denormal_arg;
++		} else if (st0_tag == TW_Infinity) {
++			l[0] = 0;
++			l[1] = 0x7ff00000;
++		} else if (st0_tag == TW_NaN) {
++			/* Is it really a NaN ? */
++			if ((exponent(st0_ptr) == EXP_OVER)
++			    && (st0_ptr->sigh & 0x80000000)) {
++				/* See if we can get a valid NaN from the FPU_REG */
++				l[0] =
++				    (st0_ptr->sigl >> 11) | (st0_ptr->
++							     sigh << 21);
++				l[1] = ((st0_ptr->sigh >> 11) & 0xfffff);
++				if (!(st0_ptr->sigh & 0x40000000)) {
++					/* It is a signalling NaN */
++					EXCEPTION(EX_Invalid);
++					if (!(control_word & CW_Invalid))
++						return 0;
++					l[1] |= (0x40000000 >> 11);
++				}
++				l[1] |= 0x7ff00000;
++			} else {
++				/* It is an unsupported data type */
++				EXCEPTION(EX_Invalid);
++				if (!(control_word & CW_Invalid))
++					return 0;
++				l[0] = 0;
++				l[1] = 0xfff80000;
++			}
  		}
--			       
+-	      l[1] |= 0x7ff00000;
+-	    }
+-	  else
+-	    {
+-	      /* It is an unsupported data type */
+-	      EXCEPTION(EX_Invalid);
+-	      if ( !(control_word & CW_Invalid) )
+-		return 0;
+-	      l[0] = 0;
+-	      l[1] = 0xfff80000;
+-	    }
++	} else if (st0_tag == TAG_Empty) {
++		/* Empty register (stack underflow) */
++		EXCEPTION(EX_StackUnder);
++		if (control_word & CW_Invalid) {
++			/* The masked response */
++			/* Put out the QNaN indefinite */
++			RE_ENTRANT_CHECK_OFF;
++			FPU_access_ok(VERIFY_WRITE, dfloat, 8);
++			FPU_put_user(0, (unsigned long __user *)dfloat);
++			FPU_put_user(0xfff80000,
++				     1 + (unsigned long __user *)dfloat);
++			RE_ENTRANT_CHECK_ON;
++			return 1;
++		} else
++			return 0;
+ 	}
+-    }
+-  else if ( st0_tag == TAG_Empty )
+-    {
+-      /* Empty register (stack underflow) */
+-      EXCEPTION(EX_StackUnder);
+-      if ( control_word & CW_Invalid )
+-	{
+-	  /* The masked response */
+-	  /* Put out the QNaN indefinite */
+-	  RE_ENTRANT_CHECK_OFF;
+-	  FPU_access_ok(VERIFY_WRITE,dfloat,8);
+-	  FPU_put_user(0, (unsigned long __user *) dfloat);
+-	  FPU_put_user(0xfff80000, 1 + (unsigned long __user *) dfloat);
+-	  RE_ENTRANT_CHECK_ON;
+-	  return 1;
+-	}
+-      else
+-	return 0;
+-    }
+-  if ( getsign(st0_ptr) )
+-    l[1] |= 0x80000000;
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_WRITE,dfloat,8);
+-  FPU_put_user(l[0], (unsigned long __user *)dfloat);
+-  FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat);
+-  RE_ENTRANT_CHECK_ON;
+-
+-  return 1;
+-}
++	if (getsign(st0_ptr))
++		l[1] |= 0x80000000;
+ 
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_WRITE, dfloat, 8);
++	FPU_put_user(l[0], (unsigned long __user *)dfloat);
++	FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat);
++	RE_ENTRANT_CHECK_ON;
 +
- 		CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
- 		outb(VOYAGER_CAT_RUN, CAT_CMD);
- 		cat_disconnect(*modpp, (*modpp)->asic);
--		if(cat_subread(*modpp, (*modpp)->asic,
--			       VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
--			       &eprom_size)) {
--			printk("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n", i);
-+		if (cat_subread(*modpp, (*modpp)->asic,
-+				VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
-+				&eprom_size)) {
-+			printk
-+			    ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
-+			     i);
- 			outb(VOYAGER_CAT_END, CAT_CMD);
- 			continue;
- 		}
--		if(eprom_size > sizeof(eprom_buf)) {
--			printk("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n", i, eprom_size);
-+		if (eprom_size > sizeof(eprom_buf)) {
-+			printk
-+			    ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n",
-+			     i, eprom_size);
- 			outb(VOYAGER_CAT_END, CAT_CMD);
- 			continue;
- 		}
- 		outb(VOYAGER_CAT_END, CAT_CMD);
- 		outb(VOYAGER_CAT_RUN, CAT_CMD);
--		CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i, eprom_size));
--		if(cat_subread(*modpp, (*modpp)->asic, 0, 
--			       eprom_size, eprom_buf)) {
-+		CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
-+			eprom_size));
-+		if (cat_subread
-+		    (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
- 			outb(VOYAGER_CAT_END, CAT_CMD);
- 			continue;
- 		}
- 		outb(VOYAGER_CAT_END, CAT_CMD);
- 		printk("Module \"%s\", version 0x%x, tracer 0x%x, asics %d\n",
- 		       cat_module_name(i), eprom_hdr->version_id,
--		       *((__u32 *)eprom_hdr->tracer),  eprom_hdr->num_asics);
-+		       *((__u32 *) eprom_hdr->tracer), eprom_hdr->num_asics);
- 		(*modpp)->ee_size = eprom_hdr->ee_size;
- 		(*modpp)->num_asics = eprom_hdr->num_asics;
- 		asicpp = &((*modpp)->asic);
- 		sp_offset = eprom_hdr->scan_path_offset;
- 		/* All we really care about are the Quad cards.  We
--                 * identify them because they are in a processor slot
--                 * and have only four asics */
--		if((i < 0x10 || (i>=0x14 && i < 0x1c) || i>0x1f)) {
-+		 * identify them because they are in a processor slot
-+		 * and have only four asics */
-+		if ((i < 0x10 || (i >= 0x14 && i < 0x1c) || i > 0x1f)) {
- 			modpp = &((*modpp)->next);
- 			continue;
- 		}
-@@ -717,16 +726,17 @@ voyager_cat_init(void)
- 			 &num_submodules);
- 		/* lowest two bits, active low */
- 		num_submodules = ~(0xfc | num_submodules);
--		CDEBUG(("VOYAGER CAT: %d submodules present\n", num_submodules));
--		if(num_submodules == 0) {
-+		CDEBUG(("VOYAGER CAT: %d submodules present\n",
-+			num_submodules));
-+		if (num_submodules == 0) {
- 			/* fill in the dyadic extended processors */
- 			__u8 cpu = i & 0x07;
++	return 1;
++}
  
- 			printk("Module \"%s\": Dyadic Processor Card\n",
- 			       cat_module_name(i));
--			voyager_extended_vic_processors |= (1<<cpu);
-+			voyager_extended_vic_processors |= (1 << cpu);
- 			cpu += 4;
--			voyager_extended_vic_processors |= (1<<cpu);
-+			voyager_extended_vic_processors |= (1 << cpu);
- 			outb(VOYAGER_CAT_END, CAT_CMD);
- 			continue;
- 		}
-@@ -740,28 +750,32 @@ voyager_cat_init(void)
- 		cat_write(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, val);
+ /* Put a float into user memory */
+ int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single)
+ {
+-  long templ = 0;
+-  unsigned long increment = 0;     	/* avoid gcc warnings */
+-  int precision_loss;
+-  int exp;
+-  FPU_REG tmp;
++	long templ = 0;
++	unsigned long increment = 0;	/* avoid gcc warnings */
++	int precision_loss;
++	int exp;
++	FPU_REG tmp;
  
- 		outb(VOYAGER_CAT_END, CAT_CMD);
--			 
+-  if ( st0_tag == TAG_Valid )
+-    {
++	if (st0_tag == TAG_Valid) {
  
- 		CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
- 		outb(VOYAGER_CAT_RUN, CAT_CMD);
- 		cat_disconnect(*modpp, (*modpp)->asic);
--		if(cat_subread(*modpp, (*modpp)->asic,
--			       VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
--			       &eprom_size)) {
--			printk("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n", i);
-+		if (cat_subread(*modpp, (*modpp)->asic,
-+				VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
-+				&eprom_size)) {
-+			printk
-+			    ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
-+			     i);
- 			outb(VOYAGER_CAT_END, CAT_CMD);
- 			continue;
- 		}
--		if(eprom_size > sizeof(eprom_buf)) {
--			printk("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n", i, eprom_size);
-+		if (eprom_size > sizeof(eprom_buf)) {
-+			printk
-+			    ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n",
-+			     i, eprom_size);
- 			outb(VOYAGER_CAT_END, CAT_CMD);
- 			continue;
- 		}
- 		outb(VOYAGER_CAT_END, CAT_CMD);
- 		outb(VOYAGER_CAT_RUN, CAT_CMD);
--		CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i, eprom_size));
--		if(cat_subread(*modpp, (*modpp)->asic, 0, 
--			       eprom_size, eprom_buf)) {
-+		CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
-+			eprom_size));
-+		if (cat_subread
-+		    (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
- 			outb(VOYAGER_CAT_END, CAT_CMD);
- 			continue;
- 		}
-@@ -773,30 +787,35 @@ voyager_cat_init(void)
- 		sp_offset = eprom_hdr->scan_path_offset;
- 		/* get rid of the dummy CAT asic and read the real one */
- 		kfree((*modpp)->asic);
--		for(asic=0; asic < (*modpp)->num_asics; asic++) {
-+		for (asic = 0; asic < (*modpp)->num_asics; asic++) {
- 			int j;
--			voyager_asic_t *asicp = *asicpp 
--				= kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
-+			voyager_asic_t *asicp = *asicpp = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL);	/*&voyager_asic_storage[asic_count++]; */
- 			voyager_sp_table_t *sp_table;
- 			voyager_at_t *asic_table;
- 			voyager_jtt_t *jtag_table;
+-      reg_copy(st0_ptr, &tmp);
+-      exp = exponent(&tmp);
++		reg_copy(st0_ptr, &tmp);
++		exp = exponent(&tmp);
  
--			if(asicp == NULL) {
--				printk("**WARNING** kmalloc failure in cat_init\n");
-+			if (asicp == NULL) {
-+				printk
-+				    ("**WARNING** kmalloc failure in cat_init\n");
- 				continue;
- 			}
- 			asicpp = &(asicp->next);
- 			asicp->asic_location = asic;
--			sp_table = (voyager_sp_table_t *)(eprom_buf + sp_offset);
-+			sp_table =
-+			    (voyager_sp_table_t *) (eprom_buf + sp_offset);
- 			asicp->asic_id = sp_table->asic_id;
--			asic_table = (voyager_at_t *)(eprom_buf + sp_table->asic_data_offset);
--			for(j=0; j<4; j++)
-+			asic_table =
-+			    (voyager_at_t *) (eprom_buf +
-+					      sp_table->asic_data_offset);
-+			for (j = 0; j < 4; j++)
- 				asicp->jtag_id[j] = asic_table->jtag_id[j];
--			jtag_table = (voyager_jtt_t *)(eprom_buf + asic_table->jtag_offset);
-+			jtag_table =
-+			    (voyager_jtt_t *) (eprom_buf +
-+					       asic_table->jtag_offset);
- 			asicp->ireg_length = jtag_table->ireg_len;
- 			asicp->bit_location = (*modpp)->inst_bits;
- 			(*modpp)->inst_bits += asicp->ireg_length;
--			if(asicp->ireg_length > (*modpp)->largest_reg)
-+			if (asicp->ireg_length > (*modpp)->largest_reg)
- 				(*modpp)->largest_reg = asicp->ireg_length;
- 			if (asicp->ireg_length < (*modpp)->smallest_reg ||
- 			    (*modpp)->smallest_reg == 0)
-@@ -804,15 +823,13 @@ voyager_cat_init(void)
- 			CDEBUG(("asic 0x%x, ireg_length=%d, bit_location=%d\n",
- 				asicp->asic_id, asicp->ireg_length,
- 				asicp->bit_location));
--			if(asicp->asic_id == VOYAGER_QUAD_QABC) {
-+			if (asicp->asic_id == VOYAGER_QUAD_QABC) {
- 				CDEBUG(("VOYAGER CAT: QABC ASIC found\n"));
- 				qabc_asic = asicp;
- 			}
- 			sp_offset += sizeof(voyager_sp_table_t);
- 		}
--		CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n",
--			(*modpp)->inst_bits, (*modpp)->largest_reg,
--			(*modpp)->smallest_reg));
-+		CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n", (*modpp)->inst_bits, (*modpp)->largest_reg, (*modpp)->smallest_reg));
- 		/* OK, now we have the QUAD ASICs set up, use them.
- 		 * we need to:
- 		 *
-@@ -828,10 +845,11 @@ voyager_cat_init(void)
- 		qic_addr = qabc_data[5] << 8;
- 		qic_addr = (qic_addr | qabc_data[6]) << 8;
- 		qic_addr = (qic_addr | qabc_data[7]) << 8;
--		printk("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n",
--		       cat_module_name(i), qic_addr, qabc_data[8]);
-+		printk
-+		    ("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n",
-+		     cat_module_name(i), qic_addr, qabc_data[8]);
- #if 0				/* plumbing fails---FIXME */
--		if((qabc_data[8] & 0xf0) == 0) {
-+		if ((qabc_data[8] & 0xf0) == 0) {
- 			/* FIXME: 32 way 8 CPU slot monster cannot be
- 			 * plumbed this way---need to check for it */
+-      if ( exp < SINGLE_Emin )
+-	{
+-	  addexponent(&tmp, -SINGLE_Emin + 23);  /* largest exp to be 22 */
++		if (exp < SINGLE_Emin) {
++			addexponent(&tmp, -SINGLE_Emin + 23);	/* largest exp to be 22 */
  
-@@ -842,94 +860,97 @@ voyager_cat_init(void)
- #ifdef VOYAGER_CAT_DEBUG
- 			/* verify plumbing */
- 			cat_subread(*modpp, qabc_asic, 8, 1, &qabc_data[8]);
--			if((qabc_data[8] & 0xf0) == 0) {
--				CDEBUG(("PLUMBING FAILED: 0x%x\n", qabc_data[8]));
-+			if ((qabc_data[8] & 0xf0) == 0) {
-+				CDEBUG(("PLUMBING FAILED: 0x%x\n",
-+					qabc_data[8]));
- 			}
- #endif
- 		}
- #endif
+-	denormal_arg:
++		      denormal_arg:
  
- 		{
--			struct resource *res = kzalloc(sizeof(struct resource),GFP_KERNEL);
-+			struct resource *res =
-+			    kzalloc(sizeof(struct resource), GFP_KERNEL);
- 			res->name = kmalloc(128, GFP_KERNEL);
--			sprintf((char *)res->name, "Voyager %s Quad CPI", cat_module_name(i));
-+			sprintf((char *)res->name, "Voyager %s Quad CPI",
-+				cat_module_name(i));
- 			res->start = qic_addr;
- 			res->end = qic_addr + 0x3ff;
- 			request_resource(&iomem_resource, res);
+-	  if ( (precision_loss = FPU_round_to_int(&tmp, st0_tag)) )
+-	    {
++			if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
+ #ifdef PECULIAR_486
+-	      /* Did it round to a non-denormal ? */
+-	      /* This behaviour might be regarded as peculiar, it appears
+-		 that the 80486 rounds to the dest precision, then
+-		 converts to decide underflow. */
+-	      if ( !((tmp.sigl == 0x00800000) &&
+-		  ((st0_ptr->sigh & 0x000000ff) || st0_ptr->sigl)) )
++				/* Did it round to a non-denormal ? */
++				/* This behaviour might be regarded as peculiar, it appears
++				   that the 80486 rounds to the dest precision, then
++				   converts to decide underflow. */
++				if (!((tmp.sigl == 0x00800000) &&
++				      ((st0_ptr->sigh & 0x000000ff)
++				       || st0_ptr->sigl)))
+ #endif /* PECULIAR_486 */
+-		{
+-		  EXCEPTION(EX_Underflow);
+-		  /* This is a special case: see sec 16.2.5.1 of
+-		     the 80486 book */
+-		  if ( !(control_word & CW_Underflow) )
+-		    return 0;
+-		}
+-	      EXCEPTION(precision_loss);
+-	      if ( !(control_word & CW_Precision) )
+-		return 0;
+-	    }
+-	  templ = tmp.sigl;
+-      }
+-      else
+-	{
+-	  if ( tmp.sigl | (tmp.sigh & 0x000000ff) )
+-	    {
+-	      unsigned long sigh = tmp.sigh;
+-	      unsigned long sigl = tmp.sigl;
+-	      
+-	      precision_loss = 1;
+-	      switch (control_word & CW_RC)
+-		{
+-		case RC_RND:
+-		  increment = ((sigh & 0xff) > 0x80)       /* more than half */
+-		    || (((sigh & 0xff) == 0x80) && sigl)   /* more than half */
+-		    || ((sigh & 0x180) == 0x180);        /* round to even */
+-		  break;
+-		case RC_DOWN:   /* towards -infinity */
+-		  increment = signpositive(&tmp)
+-		    ? 0 : (sigl | (sigh & 0xff));
+-		  break;
+-		case RC_UP:     /* towards +infinity */
+-		  increment = signpositive(&tmp)
+-		    ? (sigl | (sigh & 0xff)) : 0;
+-		  break;
+-		case RC_CHOP:
+-		  increment = 0;
+-		  break;
+-		}
+-	  
+-	      /* Truncate part of the mantissa */
+-	      tmp.sigl = 0;
+-	  
+-	      if (increment)
+-		{
+-		  if ( sigh >= 0xffffff00 )
+-		    {
+-		      /* The sigh part overflows */
+-		      tmp.sigh = 0x80000000;
+-		      exp++;
+-		      if ( exp >= EXP_OVER )
+-			goto overflow;
+-		    }
+-		  else
+-		    {
+-		      tmp.sigh &= 0xffffff00;
+-		      tmp.sigh += 0x100;
+-		    }
+-		}
+-	      else
+-		{
+-		  tmp.sigh &= 0xffffff00;  /* Finish the truncation */
+-		}
+-	    }
+-	  else
+-	    precision_loss = 0;
+-      
+-	  templ = (tmp.sigh >> 8) & 0x007fffff;
+-
+-	  if ( exp > SINGLE_Emax )
+-	    {
+-	    overflow:
+-	      EXCEPTION(EX_Overflow);
+-	      if ( !(control_word & CW_Overflow) )
+-		return 0;
+-	      set_precision_flag_up();
+-	      if ( !(control_word & CW_Precision) )
+-		return 0;
+-
+-	      /* This is a special case: see sec 16.2.5.1 of the 80486 book. */
+-	      /* Masked response is overflow to infinity. */
+-	      templ = 0x7f800000;
+-	    }
+-	  else
+-	    {
+-	      if ( precision_loss )
+-		{
+-		  if ( increment )
+-		    set_precision_flag_up();
+-		  else
+-		    set_precision_flag_down();
++				{
++					EXCEPTION(EX_Underflow);
++					/* This is a special case: see sec 16.2.5.1 of
++					   the 80486 book */
++					if (!(control_word & CW_Underflow))
++						return 0;
++				}
++				EXCEPTION(precision_loss);
++				if (!(control_word & CW_Precision))
++					return 0;
++			}
++			templ = tmp.sigl;
++		} else {
++			if (tmp.sigl | (tmp.sigh & 0x000000ff)) {
++				unsigned long sigh = tmp.sigh;
++				unsigned long sigl = tmp.sigl;
++
++				precision_loss = 1;
++				switch (control_word & CW_RC) {
++				case RC_RND:
++					increment = ((sigh & 0xff) > 0x80)	/* more than half */
++					    ||(((sigh & 0xff) == 0x80) && sigl)	/* more than half */
++					    ||((sigh & 0x180) == 0x180);	/* round to even */
++					break;
++				case RC_DOWN:	/* towards -infinity */
++					increment = signpositive(&tmp)
++					    ? 0 : (sigl | (sigh & 0xff));
++					break;
++				case RC_UP:	/* towards +infinity */
++					increment = signpositive(&tmp)
++					    ? (sigl | (sigh & 0xff)) : 0;
++					break;
++				case RC_CHOP:
++					increment = 0;
++					break;
++				}
++
++				/* Truncate part of the mantissa */
++				tmp.sigl = 0;
++
++				if (increment) {
++					if (sigh >= 0xffffff00) {
++						/* The sigh part overflows */
++						tmp.sigh = 0x80000000;
++						exp++;
++						if (exp >= EXP_OVER)
++							goto overflow;
++					} else {
++						tmp.sigh &= 0xffffff00;
++						tmp.sigh += 0x100;
++					}
++				} else {
++					tmp.sigh &= 0xffffff00;	/* Finish the truncation */
++				}
++			} else
++				precision_loss = 0;
++
++			templ = (tmp.sigh >> 8) & 0x007fffff;
++
++			if (exp > SINGLE_Emax) {
++			      overflow:
++				EXCEPTION(EX_Overflow);
++				if (!(control_word & CW_Overflow))
++					return 0;
++				set_precision_flag_up();
++				if (!(control_word & CW_Precision))
++					return 0;
++
++				/* This is a special case: see sec 16.2.5.1 of the 80486 book. */
++				/* Masked response is overflow to infinity. */
++				templ = 0x7f800000;
++			} else {
++				if (precision_loss) {
++					if (increment)
++						set_precision_flag_up();
++					else
++						set_precision_flag_down();
++				}
++				/* Add the exponent */
++				templ |= ((exp + SINGLE_Ebias) & 0xff) << 23;
++			}
  		}
- 
- 		qic_addr = (unsigned long)ioremap(qic_addr, 0x400);
--				
--		for(j = 0; j < 4; j++) {
+-	      /* Add the exponent */
+-	      templ |= ((exp+SINGLE_Ebias) & 0xff) << 23;
+-	    }
+-	}
+-    }
+-  else if (st0_tag == TAG_Zero)
+-    {
+-      templ = 0;
+-    }
+-  else if ( st0_tag == TAG_Special )
+-    {
+-      st0_tag = FPU_Special(st0_ptr);
+-      if (st0_tag == TW_Denormal)
+-	{
+-	  reg_copy(st0_ptr, &tmp);
+-
+-	  /* A denormal will always underflow. */
++	} else if (st0_tag == TAG_Zero) {
++		templ = 0;
++	} else if (st0_tag == TAG_Special) {
++		st0_tag = FPU_Special(st0_ptr);
++		if (st0_tag == TW_Denormal) {
++			reg_copy(st0_ptr, &tmp);
 +
-+		for (j = 0; j < 4; j++) {
- 			__u8 cpu;
- 
--			if(voyager_8slot) {
-+			if (voyager_8slot) {
- 				/* 8 slot has a different mapping,
- 				 * each slot has only one vic line, so
- 				 * 1 cpu in each slot must be < 8 */
--				cpu = (i & 0x07) + j*8;
-+				cpu = (i & 0x07) + j * 8;
- 			} else {
--				cpu = (i & 0x03) + j*4;
-+				cpu = (i & 0x03) + j * 4;
- 			}
--			if( (qabc_data[8] & (1<<j))) {
--				voyager_extended_vic_processors |= (1<<cpu);
-+			if ((qabc_data[8] & (1 << j))) {
-+				voyager_extended_vic_processors |= (1 << cpu);
- 			}
--			if(qabc_data[8] & (1<<(j+4)) ) {
-+			if (qabc_data[8] & (1 << (j + 4))) {
- 				/* Second SET register plumbed: Quad
- 				 * card has two VIC connected CPUs.
- 				 * Secondary cannot be booted as a VIC
- 				 * CPU */
--				voyager_extended_vic_processors |= (1<<cpu);
--				voyager_allowed_boot_processors &= (~(1<<cpu));
-+				voyager_extended_vic_processors |= (1 << cpu);
-+				voyager_allowed_boot_processors &=
-+				    (~(1 << cpu));
- 			}
- 
--			voyager_quad_processors |= (1<<cpu);
-+			voyager_quad_processors |= (1 << cpu);
- 			voyager_quad_cpi_addr[cpu] = (struct voyager_qic_cpi *)
--				(qic_addr+(j<<8));
-+			    (qic_addr + (j << 8));
- 			CDEBUG(("CPU%d: CPI address 0x%lx\n", cpu,
- 				(unsigned long)voyager_quad_cpi_addr[cpu]));
++			/* A denormal will always underflow. */
+ #ifndef PECULIAR_486
+-	  /* An 80486 is supposed to be able to generate
+-	     a denormal exception here, but... */
+-	  /* Underflow has priority. */
+-	  if ( control_word & CW_Underflow )
+-	    denormal_operand();
+-#endif /* PECULIAR_486 */ 
+-	  goto denormal_arg;
+-	}
+-      else if (st0_tag == TW_Infinity)
+-	{
+-	  templ = 0x7f800000;
+-	}
+-      else if (st0_tag == TW_NaN)
+-	{
+-	  /* Is it really a NaN ? */
+-	  if ( (exponent(st0_ptr) == EXP_OVER) && (st0_ptr->sigh & 0x80000000) )
+-	    {
+-	      /* See if we can get a valid NaN from the FPU_REG */
+-	      templ = st0_ptr->sigh >> 8;
+-	      if ( !(st0_ptr->sigh & 0x40000000) )
+-		{
+-		  /* It is a signalling NaN */
+-		  EXCEPTION(EX_Invalid);
+-		  if ( !(control_word & CW_Invalid) )
+-		    return 0;
+-		  templ |= (0x40000000 >> 8);
++			/* An 80486 is supposed to be able to generate
++			   a denormal exception here, but... */
++			/* Underflow has priority. */
++			if (control_word & CW_Underflow)
++				denormal_operand();
++#endif /* PECULIAR_486 */
++			goto denormal_arg;
++		} else if (st0_tag == TW_Infinity) {
++			templ = 0x7f800000;
++		} else if (st0_tag == TW_NaN) {
++			/* Is it really a NaN ? */
++			if ((exponent(st0_ptr) == EXP_OVER)
++			    && (st0_ptr->sigh & 0x80000000)) {
++				/* See if we can get a valid NaN from the FPU_REG */
++				templ = st0_ptr->sigh >> 8;
++				if (!(st0_ptr->sigh & 0x40000000)) {
++					/* It is a signalling NaN */
++					EXCEPTION(EX_Invalid);
++					if (!(control_word & CW_Invalid))
++						return 0;
++					templ |= (0x40000000 >> 8);
++				}
++				templ |= 0x7f800000;
++			} else {
++				/* It is an unsupported data type */
++				EXCEPTION(EX_Invalid);
++				if (!(control_word & CW_Invalid))
++					return 0;
++				templ = 0xffc00000;
++			}
  		}
- 		outb(VOYAGER_CAT_END, CAT_CMD);
- 
--		
--		
- 		*asicpp = NULL;
- 		modpp = &((*modpp)->next);
+-	      templ |= 0x7f800000;
+-	    }
+-	  else
+-	    {
+-	      /* It is an unsupported data type */
+-	      EXCEPTION(EX_Invalid);
+-	      if ( !(control_word & CW_Invalid) )
+-		return 0;
+-	      templ = 0xffc00000;
+-	    }
+-	}
+ #ifdef PARANOID
+-      else
+-	{
+-	  EXCEPTION(EX_INTERNAL|0x164);
+-	  return 0;
+-	}
++		else {
++			EXCEPTION(EX_INTERNAL | 0x164);
++			return 0;
++		}
+ #endif
+-    }
+-  else if ( st0_tag == TAG_Empty )
+-    {
+-      /* Empty register (stack underflow) */
+-      EXCEPTION(EX_StackUnder);
+-      if ( control_word & EX_Invalid )
+-	{
+-	  /* The masked response */
+-	  /* Put out the QNaN indefinite */
+-	  RE_ENTRANT_CHECK_OFF;
+-	  FPU_access_ok(VERIFY_WRITE,single,4);
+-	  FPU_put_user(0xffc00000, (unsigned long __user *) single);
+-	  RE_ENTRANT_CHECK_ON;
+-	  return 1;
++	} else if (st0_tag == TAG_Empty) {
++		/* Empty register (stack underflow) */
++		EXCEPTION(EX_StackUnder);
++		if (control_word & EX_Invalid) {
++			/* The masked response */
++			/* Put out the QNaN indefinite */
++			RE_ENTRANT_CHECK_OFF;
++			FPU_access_ok(VERIFY_WRITE, single, 4);
++			FPU_put_user(0xffc00000,
++				     (unsigned long __user *)single);
++			RE_ENTRANT_CHECK_ON;
++			return 1;
++		} else
++			return 0;
  	}
- 	*modpp = NULL;
--	printk("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n", voyager_extended_vic_processors, voyager_quad_processors, voyager_allowed_boot_processors);
-+	printk
-+	    ("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n",
-+	     voyager_extended_vic_processors, voyager_quad_processors,
-+	     voyager_allowed_boot_processors);
- 	request_resource(&ioport_resource, &vic_res);
--	if(voyager_quad_processors)
-+	if (voyager_quad_processors)
- 		request_resource(&ioport_resource, &qic_res);
- 	/* set up the front power switch */
- }
- 
--int
--voyager_cat_readb(__u8 module, __u8 asic, int reg)
-+int voyager_cat_readb(__u8 module, __u8 asic, int reg)
- {
- 	return 0;
- }
+-      else
+-	return 0;
+-    }
+ #ifdef PARANOID
+-  else
+-    {
+-      EXCEPTION(EX_INTERNAL|0x163);
+-      return 0;
+-    }
++	else {
++		EXCEPTION(EX_INTERNAL | 0x163);
++		return 0;
++	}
+ #endif
+-  if ( getsign(st0_ptr) )
+-    templ |= 0x80000000;
++	if (getsign(st0_ptr))
++		templ |= 0x80000000;
  
--static int
--cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp) 
-+static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp)
- {
- 	__u8 val;
- 	int err = 0;
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_WRITE,single,4);
+-  FPU_put_user(templ,(unsigned long __user *) single);
+-  RE_ENTRANT_CHECK_ON;
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_WRITE, single, 4);
++	FPU_put_user(templ, (unsigned long __user *)single);
++	RE_ENTRANT_CHECK_ON;
  
--	if(!modp->scan_path_connected)
-+	if (!modp->scan_path_connected)
- 		return 0;
--	if(asicp->asic_id != VOYAGER_CAT_ID) {
-+	if (asicp->asic_id != VOYAGER_CAT_ID) {
- 		CDEBUG(("cat_disconnect: ASIC is not CAT\n"));
- 		return 1;
- 	}
- 	err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
--	if(err) {
-+	if (err) {
- 		CDEBUG(("cat_disconnect: failed to read SCANPATH\n"));
- 		return err;
- 	}
- 	val &= VOYAGER_DISCONNECT_ASIC;
- 	err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
--	if(err) {
-+	if (err) {
- 		CDEBUG(("cat_disconnect: failed to write SCANPATH\n"));
- 		return err;
- 	}
-@@ -940,27 +961,26 @@ cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp)
- 	return 0;
+-  return 1;
++	return 1;
  }
  
--static int
--cat_connect(voyager_module_t *modp, voyager_asic_t *asicp) 
-+static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp)
+-
+ /* Put a long long into user memory */
+ int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d)
  {
- 	__u8 val;
- 	int err = 0;
- 
--	if(modp->scan_path_connected)
-+	if (modp->scan_path_connected)
- 		return 0;
--	if(asicp->asic_id != VOYAGER_CAT_ID) {
-+	if (asicp->asic_id != VOYAGER_CAT_ID) {
- 		CDEBUG(("cat_connect: ASIC is not CAT\n"));
- 		return 1;
- 	}
- 
- 	err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
--	if(err) {
-+	if (err) {
- 		CDEBUG(("cat_connect: failed to read SCANPATH\n"));
- 		return err;
+-  FPU_REG t;
+-  long long tll;
+-  int precision_loss;
+-
+-  if ( st0_tag == TAG_Empty )
+-    {
+-      /* Empty register (stack underflow) */
+-      EXCEPTION(EX_StackUnder);
+-      goto invalid_operand;
+-    }
+-  else if ( st0_tag == TAG_Special )
+-    {
+-      st0_tag = FPU_Special(st0_ptr);
+-      if ( (st0_tag == TW_Infinity) ||
+-	   (st0_tag == TW_NaN) )
+-	{
+-	  EXCEPTION(EX_Invalid);
+-	  goto invalid_operand;
++	FPU_REG t;
++	long long tll;
++	int precision_loss;
++
++	if (st0_tag == TAG_Empty) {
++		/* Empty register (stack underflow) */
++		EXCEPTION(EX_StackUnder);
++		goto invalid_operand;
++	} else if (st0_tag == TAG_Special) {
++		st0_tag = FPU_Special(st0_ptr);
++		if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
++			EXCEPTION(EX_Invalid);
++			goto invalid_operand;
++		}
  	}
- 	val |= VOYAGER_CONNECT_ASIC;
- 	err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
--	if(err) {
-+	if (err) {
- 		CDEBUG(("cat_connect: failed to write SCANPATH\n"));
- 		return err;
+-    }
+-
+-  reg_copy(st0_ptr, &t);
+-  precision_loss = FPU_round_to_int(&t, st0_tag);
+-  ((long *)&tll)[0] = t.sigl;
+-  ((long *)&tll)[1] = t.sigh;
+-  if ( (precision_loss == 1) ||
+-      ((t.sigh & 0x80000000) &&
+-       !((t.sigh == 0x80000000) && (t.sigl == 0) &&
+-	 signnegative(&t))) )
+-    {
+-      EXCEPTION(EX_Invalid);
+-      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
+-    invalid_operand:
+-      if ( control_word & EX_Invalid )
+-	{
+-	  /* Produce something like QNaN "indefinite" */
+-	  tll = 0x8000000000000000LL;
++
++	reg_copy(st0_ptr, &t);
++	precision_loss = FPU_round_to_int(&t, st0_tag);
++	((long *)&tll)[0] = t.sigl;
++	((long *)&tll)[1] = t.sigh;
++	if ((precision_loss == 1) ||
++	    ((t.sigh & 0x80000000) &&
++	     !((t.sigh == 0x80000000) && (t.sigl == 0) && signnegative(&t)))) {
++		EXCEPTION(EX_Invalid);
++		/* This is a special case: see sec 16.2.5.1 of the 80486 book */
++	      invalid_operand:
++		if (control_word & EX_Invalid) {
++			/* Produce something like QNaN "indefinite" */
++			tll = 0x8000000000000000LL;
++		} else
++			return 0;
++	} else {
++		if (precision_loss)
++			set_precision_flag(precision_loss);
++		if (signnegative(&t))
++			tll = -tll;
  	}
-@@ -971,11 +991,10 @@ cat_connect(voyager_module_t *modp, voyager_asic_t *asicp)
- 	return 0;
- }
- 
--void
--voyager_cat_power_off(void)
-+void voyager_cat_power_off(void)
- {
- 	/* Power the machine off by writing to the PSI over the CAT
--         * bus */
-+	 * bus */
- 	__u8 data;
- 	voyager_module_t psi = { 0 };
- 	voyager_asic_t psi_asic = { 0 };
-@@ -1009,8 +1028,7 @@ voyager_cat_power_off(void)
- 
- struct voyager_status voyager_status = { 0 };
+-      else
+-	return 0;
+-    }
+-  else
+-    {
+-      if ( precision_loss )
+-	set_precision_flag(precision_loss);
+-      if ( signnegative(&t) )
+-	tll = - tll;
+-    }
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_WRITE,d,8);
+-  if (copy_to_user(d, &tll, 8))
+-    FPU_abort;
+-  RE_ENTRANT_CHECK_ON;
+-
+-  return 1;
+-}
  
--void
--voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
-+void voyager_cat_psi(__u8 cmd, __u16 reg, __u8 * data)
- {
- 	voyager_module_t psi = { 0 };
- 	voyager_asic_t psi_asic = { 0 };
-@@ -1027,7 +1045,7 @@ voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
- 	outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
- 	outb(VOYAGER_CAT_RUN, CAT_CMD);
- 	cat_disconnect(&psi, &psi_asic);
--	switch(cmd) {
-+	switch (cmd) {
- 	case VOYAGER_PSI_READ:
- 		cat_read(&psi, &psi_asic, reg, data);
- 		break;
-@@ -1047,8 +1065,7 @@ voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
- 	outb(VOYAGER_CAT_END, CAT_CMD);
- }
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_WRITE, d, 8);
++	if (copy_to_user(d, &tll, 8))
++		FPU_abort;
++	RE_ENTRANT_CHECK_ON;
++
++	return 1;
++}
  
--void
--voyager_cat_do_common_interrupt(void)
-+void voyager_cat_do_common_interrupt(void)
+ /* Put a long into user memory */
+ int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d)
  {
- 	/* This is caused either by a memory parity error or something
- 	 * in the PSI */
-@@ -1057,7 +1074,7 @@ voyager_cat_do_common_interrupt(void)
- 	voyager_asic_t psi_asic = { 0 };
- 	struct voyager_psi psi_reg;
- 	int i;
-- re_read:
-+      re_read:
- 	psi.asic = &psi_asic;
- 	psi.asic->asic_id = VOYAGER_CAT_ID;
- 	psi.asic->subaddr = VOYAGER_SUBADDR_HI;
-@@ -1072,43 +1089,45 @@ voyager_cat_do_common_interrupt(void)
- 	cat_disconnect(&psi, &psi_asic);
- 	/* Read the status.  NOTE: Need to read *all* the PSI regs here
- 	 * otherwise the cmn int will be reasserted */
--	for(i = 0; i < sizeof(psi_reg.regs); i++) {
--		cat_read(&psi, &psi_asic, i, &((__u8 *)&psi_reg.regs)[i]);
-+	for (i = 0; i < sizeof(psi_reg.regs); i++) {
-+		cat_read(&psi, &psi_asic, i, &((__u8 *) & psi_reg.regs)[i]);
- 	}
- 	outb(VOYAGER_CAT_END, CAT_CMD);
--	if((psi_reg.regs.checkbit & 0x02) == 0) {
-+	if ((psi_reg.regs.checkbit & 0x02) == 0) {
- 		psi_reg.regs.checkbit |= 0x02;
- 		cat_write(&psi, &psi_asic, 5, psi_reg.regs.checkbit);
- 		printk("VOYAGER RE-READ PSI\n");
- 		goto re_read;
+-  FPU_REG t;
+-  int precision_loss;
+-
+-  if ( st0_tag == TAG_Empty )
+-    {
+-      /* Empty register (stack underflow) */
+-      EXCEPTION(EX_StackUnder);
+-      goto invalid_operand;
+-    }
+-  else if ( st0_tag == TAG_Special )
+-    {
+-      st0_tag = FPU_Special(st0_ptr);
+-      if ( (st0_tag == TW_Infinity) ||
+-	   (st0_tag == TW_NaN) )
+-	{
+-	  EXCEPTION(EX_Invalid);
+-	  goto invalid_operand;
++	FPU_REG t;
++	int precision_loss;
++
++	if (st0_tag == TAG_Empty) {
++		/* Empty register (stack underflow) */
++		EXCEPTION(EX_StackUnder);
++		goto invalid_operand;
++	} else if (st0_tag == TAG_Special) {
++		st0_tag = FPU_Special(st0_ptr);
++		if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
++			EXCEPTION(EX_Invalid);
++			goto invalid_operand;
++		}
  	}
- 	outb(VOYAGER_CAT_RUN, CAT_CMD);
--	for(i = 0; i < sizeof(psi_reg.subregs); i++) {
-+	for (i = 0; i < sizeof(psi_reg.subregs); i++) {
- 		/* This looks strange, but the PSI doesn't do auto increment
- 		 * correctly */
--		cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i, 
--			    1, &((__u8 *)&psi_reg.subregs)[i]); 
-+		cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i,
-+			    1, &((__u8 *) & psi_reg.subregs)[i]);
+-    }
+-
+-  reg_copy(st0_ptr, &t);
+-  precision_loss = FPU_round_to_int(&t, st0_tag);
+-  if (t.sigh ||
+-      ((t.sigl & 0x80000000) &&
+-       !((t.sigl == 0x80000000) && signnegative(&t))) )
+-    {
+-      EXCEPTION(EX_Invalid);
+-      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
+-    invalid_operand:
+-      if ( control_word & EX_Invalid )
+-	{
+-	  /* Produce something like QNaN "indefinite" */
+-	  t.sigl = 0x80000000;
++
++	reg_copy(st0_ptr, &t);
++	precision_loss = FPU_round_to_int(&t, st0_tag);
++	if (t.sigh ||
++	    ((t.sigl & 0x80000000) &&
++	     !((t.sigl == 0x80000000) && signnegative(&t)))) {
++		EXCEPTION(EX_Invalid);
++		/* This is a special case: see sec 16.2.5.1 of the 80486 book */
++	      invalid_operand:
++		if (control_word & EX_Invalid) {
++			/* Produce something like QNaN "indefinite" */
++			t.sigl = 0x80000000;
++		} else
++			return 0;
++	} else {
++		if (precision_loss)
++			set_precision_flag(precision_loss);
++		if (signnegative(&t))
++			t.sigl = -(long)t.sigl;
  	}
- 	outb(VOYAGER_CAT_END, CAT_CMD);
- #ifdef VOYAGER_CAT_DEBUG
- 	printk("VOYAGER PSI: ");
--	for(i=0; i<sizeof(psi_reg.regs); i++)
--		printk("%02x ", ((__u8 *)&psi_reg.regs)[i]);
-+	for (i = 0; i < sizeof(psi_reg.regs); i++)
-+		printk("%02x ", ((__u8 *) & psi_reg.regs)[i]);
- 	printk("\n           ");
--	for(i=0; i<sizeof(psi_reg.subregs); i++)
--		printk("%02x ", ((__u8 *)&psi_reg.subregs)[i]);
-+	for (i = 0; i < sizeof(psi_reg.subregs); i++)
-+		printk("%02x ", ((__u8 *) & psi_reg.subregs)[i]);
- 	printk("\n");
- #endif
--	if(psi_reg.regs.intstatus & PSI_MON) {
-+	if (psi_reg.regs.intstatus & PSI_MON) {
- 		/* switch off or power fail */
- 
--		if(psi_reg.subregs.supply & PSI_SWITCH_OFF) {
--			if(voyager_status.switch_off) {
--				printk(KERN_ERR "Voyager front panel switch turned off again---Immediate power off!\n");
-+		if (psi_reg.subregs.supply & PSI_SWITCH_OFF) {
-+			if (voyager_status.switch_off) {
-+				printk(KERN_ERR
-+				       "Voyager front panel switch turned off again---Immediate power off!\n");
- 				voyager_cat_power_off();
- 				/* not reached */
- 			} else {
--				printk(KERN_ERR "Voyager front panel switch turned off\n");
-+				printk(KERN_ERR
-+				       "Voyager front panel switch turned off\n");
- 				voyager_status.switch_off = 1;
- 				voyager_status.request_from_kernel = 1;
- 				wake_up_process(voyager_thread);
-@@ -1127,7 +1146,7 @@ voyager_cat_do_common_interrupt(void)
+-      else
+-	return 0;
+-    }
+-  else
+-    {
+-      if ( precision_loss )
+-	set_precision_flag(precision_loss);
+-      if ( signnegative(&t) )
+-	t.sigl = -(long)t.sigl;
+-    }
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_WRITE,d,4);
+-  FPU_put_user(t.sigl, (unsigned long __user *) d);
+-  RE_ENTRANT_CHECK_ON;
+-
+-  return 1;
+-}
  
- 			VDEBUG(("Voyager ac fail reg 0x%x\n",
- 				psi_reg.subregs.ACfail));
--			if((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) {
-+			if ((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) {
- 				/* No further update */
- 				return;
- 			}
-@@ -1135,20 +1154,20 @@ voyager_cat_do_common_interrupt(void)
- 			/* Don't bother trying to find out who failed.
- 			 * FIXME: This probably makes the code incorrect on
- 			 * anything other than a 345x */
--			for(i=0; i< 5; i++) {
--				if( psi_reg.subregs.ACfail &(1<<i)) {
-+			for (i = 0; i < 5; i++) {
-+				if (psi_reg.subregs.ACfail & (1 << i)) {
- 					break;
- 				}
- 			}
- 			printk(KERN_NOTICE "AC FAIL IN SUPPLY %d\n", i);
- #endif
- 			/* DON'T do this: it shuts down the AC PSI 
--			outb(VOYAGER_CAT_RUN, CAT_CMD);
--			data = PSI_MASK_MASK | i;
--			cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK,
--				     1, &data);
--			outb(VOYAGER_CAT_END, CAT_CMD);
--			*/
-+			   outb(VOYAGER_CAT_RUN, CAT_CMD);
-+			   data = PSI_MASK_MASK | i;
-+			   cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK,
-+			   1, &data);
-+			   outb(VOYAGER_CAT_END, CAT_CMD);
-+			 */
- 			printk(KERN_ERR "Voyager AC power failure\n");
- 			outb(VOYAGER_CAT_RUN, CAT_CMD);
- 			data = PSI_COLD_START;
-@@ -1159,16 +1178,16 @@ voyager_cat_do_common_interrupt(void)
- 			voyager_status.request_from_kernel = 1;
- 			wake_up_process(voyager_thread);
- 		}
--		
--		
--	} else if(psi_reg.regs.intstatus & PSI_FAULT) {
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_WRITE, d, 4);
++	FPU_put_user(t.sigl, (unsigned long __user *)d);
++	RE_ENTRANT_CHECK_ON;
 +
-+	} else if (psi_reg.regs.intstatus & PSI_FAULT) {
- 		/* Major fault! */
--		printk(KERN_ERR "Voyager PSI Detected major fault, immediate power off!\n");
-+		printk(KERN_ERR
-+		       "Voyager PSI Detected major fault, immediate power off!\n");
- 		voyager_cat_power_off();
- 		/* not reached */
--	} else if(psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM
--					    | PSI_CURRENT | PSI_DVM
--					    | PSI_PSCFAULT | PSI_STAT_CHG)) {
-+	} else if (psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM
-+					     | PSI_CURRENT | PSI_DVM
-+					     | PSI_PSCFAULT | PSI_STAT_CHG)) {
- 		/* other psi fault */
- 
- 		printk(KERN_WARNING "Voyager PSI status 0x%x\n", data);
-diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
-index 88124dd..dffa786 100644
---- a/arch/x86/mach-voyager/voyager_smp.c
-+++ b/arch/x86/mach-voyager/voyager_smp.c
-@@ -32,7 +32,8 @@
- DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
- 
- /* CPU IRQ affinity -- set to all ones initially */
--static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1]  = ~0UL };
-+static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned =
-+	{[0 ... NR_CPUS-1]  = ~0UL };
- 
- /* per CPU data structure (for /proc/cpuinfo et al), visible externally
-  * indexed physically */
-@@ -76,7 +77,6 @@ EXPORT_SYMBOL(cpu_online_map);
-  * by scheduler but indexed physically */
- cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
++	return 1;
++}
  
+ /* Put a short into user memory */
+ int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d)
+ {
+-  FPU_REG t;
+-  int precision_loss;
 -
- /* The internal functions */
- static void send_CPI(__u32 cpuset, __u8 cpi);
- static void ack_CPI(__u8 cpi);
-@@ -101,94 +101,86 @@ int hard_smp_processor_id(void);
- int safe_smp_processor_id(void);
+-  if ( st0_tag == TAG_Empty )
+-    {
+-      /* Empty register (stack underflow) */
+-      EXCEPTION(EX_StackUnder);
+-      goto invalid_operand;
+-    }
+-  else if ( st0_tag == TAG_Special )
+-    {
+-      st0_tag = FPU_Special(st0_ptr);
+-      if ( (st0_tag == TW_Infinity) ||
+-	   (st0_tag == TW_NaN) )
+-	{
+-	  EXCEPTION(EX_Invalid);
+-	  goto invalid_operand;
++	FPU_REG t;
++	int precision_loss;
++
++	if (st0_tag == TAG_Empty) {
++		/* Empty register (stack underflow) */
++		EXCEPTION(EX_StackUnder);
++		goto invalid_operand;
++	} else if (st0_tag == TAG_Special) {
++		st0_tag = FPU_Special(st0_ptr);
++		if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
++			EXCEPTION(EX_Invalid);
++			goto invalid_operand;
++		}
+ 	}
+-    }
+-
+-  reg_copy(st0_ptr, &t);
+-  precision_loss = FPU_round_to_int(&t, st0_tag);
+-  if (t.sigh ||
+-      ((t.sigl & 0xffff8000) &&
+-       !((t.sigl == 0x8000) && signnegative(&t))) )
+-    {
+-      EXCEPTION(EX_Invalid);
+-      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
+-    invalid_operand:
+-      if ( control_word & EX_Invalid )
+-	{
+-	  /* Produce something like QNaN "indefinite" */
+-	  t.sigl = 0x8000;
++
++	reg_copy(st0_ptr, &t);
++	precision_loss = FPU_round_to_int(&t, st0_tag);
++	if (t.sigh ||
++	    ((t.sigl & 0xffff8000) &&
++	     !((t.sigl == 0x8000) && signnegative(&t)))) {
++		EXCEPTION(EX_Invalid);
++		/* This is a special case: see sec 16.2.5.1 of the 80486 book */
++	      invalid_operand:
++		if (control_word & EX_Invalid) {
++			/* Produce something like QNaN "indefinite" */
++			t.sigl = 0x8000;
++		} else
++			return 0;
++	} else {
++		if (precision_loss)
++			set_precision_flag(precision_loss);
++		if (signnegative(&t))
++			t.sigl = -t.sigl;
+ 	}
+-      else
+-	return 0;
+-    }
+-  else
+-    {
+-      if ( precision_loss )
+-	set_precision_flag(precision_loss);
+-      if ( signnegative(&t) )
+-	t.sigl = -t.sigl;
+-    }
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_WRITE,d,2);
+-  FPU_put_user((short)t.sigl, d);
+-  RE_ENTRANT_CHECK_ON;
+-
+-  return 1;
+-}
  
- /* Inline functions */
--static inline void
--send_one_QIC_CPI(__u8 cpu, __u8 cpi)
-+static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi)
- {
- 	voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
--		(smp_processor_id() << 16) + cpi;
-+	    (smp_processor_id() << 16) + cpi;
- }
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_WRITE, d, 2);
++	FPU_put_user((short)t.sigl, d);
++	RE_ENTRANT_CHECK_ON;
++
++	return 1;
++}
  
--static inline void
--send_QIC_CPI(__u32 cpuset, __u8 cpi)
-+static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
+ /* Put a packed bcd array into user memory */
+ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
  {
- 	int cpu;
- 
- 	for_each_online_cpu(cpu) {
--		if(cpuset & (1<<cpu)) {
-+		if (cpuset & (1 << cpu)) {
- #ifdef VOYAGER_DEBUG
--			if(!cpu_isset(cpu, cpu_online_map))
--				VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu));
-+			if (!cpu_isset(cpu, cpu_online_map))
-+				VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
-+					"cpu_online_map\n",
-+					hard_smp_processor_id(), cpi, cpu));
- #endif
- 			send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
- 		}
+-  FPU_REG t;
+-  unsigned long long ll;
+-  u_char b;
+-  int i, precision_loss;
+-  u_char sign = (getsign(st0_ptr) == SIGN_NEG) ? 0x80 : 0;
+-
+-  if ( st0_tag == TAG_Empty )
+-    {
+-      /* Empty register (stack underflow) */
+-      EXCEPTION(EX_StackUnder);
+-      goto invalid_operand;
+-    }
+-  else if ( st0_tag == TAG_Special )
+-    {
+-      st0_tag = FPU_Special(st0_ptr);
+-      if ( (st0_tag == TW_Infinity) ||
+-	   (st0_tag == TW_NaN) )
+-	{
+-	  EXCEPTION(EX_Invalid);
+-	  goto invalid_operand;
++	FPU_REG t;
++	unsigned long long ll;
++	u_char b;
++	int i, precision_loss;
++	u_char sign = (getsign(st0_ptr) == SIGN_NEG) ? 0x80 : 0;
++
++	if (st0_tag == TAG_Empty) {
++		/* Empty register (stack underflow) */
++		EXCEPTION(EX_StackUnder);
++		goto invalid_operand;
++	} else if (st0_tag == TAG_Special) {
++		st0_tag = FPU_Special(st0_ptr);
++		if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
++			EXCEPTION(EX_Invalid);
++			goto invalid_operand;
++		}
++	}
++
++	reg_copy(st0_ptr, &t);
++	precision_loss = FPU_round_to_int(&t, st0_tag);
++	ll = significand(&t);
++
++	/* Check for overflow, by comparing with 999999999999999999 decimal. */
++	if ((t.sigh > 0x0de0b6b3) ||
++	    ((t.sigh == 0x0de0b6b3) && (t.sigl > 0xa763ffff))) {
++		EXCEPTION(EX_Invalid);
++		/* This is a special case: see sec 16.2.5.1 of the 80486 book */
++	      invalid_operand:
++		if (control_word & CW_Invalid) {
++			/* Produce the QNaN "indefinite" */
++			RE_ENTRANT_CHECK_OFF;
++			FPU_access_ok(VERIFY_WRITE, d, 10);
++			for (i = 0; i < 7; i++)
++				FPU_put_user(0, d + i);	/* These bytes "undefined" */
++			FPU_put_user(0xc0, d + 7);	/* This byte "undefined" */
++			FPU_put_user(0xff, d + 8);
++			FPU_put_user(0xff, d + 9);
++			RE_ENTRANT_CHECK_ON;
++			return 1;
++		} else
++			return 0;
++	} else if (precision_loss) {
++		/* Precision loss doesn't stop the data transfer */
++		set_precision_flag(precision_loss);
  	}
+-    }
+-
+-  reg_copy(st0_ptr, &t);
+-  precision_loss = FPU_round_to_int(&t, st0_tag);
+-  ll = significand(&t);
+-
+-  /* Check for overflow, by comparing with 999999999999999999 decimal. */
+-  if ( (t.sigh > 0x0de0b6b3) ||
+-      ((t.sigh == 0x0de0b6b3) && (t.sigl > 0xa763ffff)) )
+-    {
+-      EXCEPTION(EX_Invalid);
+-      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
+-    invalid_operand:
+-      if ( control_word & CW_Invalid )
+-	{
+-	  /* Produce the QNaN "indefinite" */
+-	  RE_ENTRANT_CHECK_OFF;
+-	  FPU_access_ok(VERIFY_WRITE,d,10);
+-	  for ( i = 0; i < 7; i++)
+-	    FPU_put_user(0, d+i); /* These bytes "undefined" */
+-	  FPU_put_user(0xc0, d+7); /* This byte "undefined" */
+-	  FPU_put_user(0xff, d+8);
+-	  FPU_put_user(0xff, d+9);
+-	  RE_ENTRANT_CHECK_ON;
+-	  return 1;
++
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_WRITE, d, 10);
++	RE_ENTRANT_CHECK_ON;
++	for (i = 0; i < 9; i++) {
++		b = FPU_div_small(&ll, 10);
++		b |= (FPU_div_small(&ll, 10)) << 4;
++		RE_ENTRANT_CHECK_OFF;
++		FPU_put_user(b, d + i);
++		RE_ENTRANT_CHECK_ON;
+ 	}
+-      else
+-	return 0;
+-    }
+-  else if ( precision_loss )
+-    {
+-      /* Precision loss doesn't stop the data transfer */
+-      set_precision_flag(precision_loss);
+-    }
+-
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_WRITE,d,10);
+-  RE_ENTRANT_CHECK_ON;
+-  for ( i = 0; i < 9; i++)
+-    {
+-      b = FPU_div_small(&ll, 10);
+-      b |= (FPU_div_small(&ll, 10)) << 4;
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_put_user(b, d+i);
+-      RE_ENTRANT_CHECK_ON;
+-    }
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_put_user(sign, d+9);
+-  RE_ENTRANT_CHECK_ON;
+-
+-  return 1;
++	RE_ENTRANT_CHECK_OFF;
++	FPU_put_user(sign, d + 9);
++	RE_ENTRANT_CHECK_ON;
++
++	return 1;
  }
  
--static inline void
--wrapper_smp_local_timer_interrupt(void)
-+static inline void wrapper_smp_local_timer_interrupt(void)
- {
- 	irq_enter();
- 	smp_local_timer_interrupt();
- 	irq_exit();
- }
- 
--static inline void
--send_one_CPI(__u8 cpu, __u8 cpi)
-+static inline void send_one_CPI(__u8 cpu, __u8 cpi)
+ /*===========================================================================*/
+@@ -1119,59 +973,56 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
+    largest possible value */
+ int FPU_round_to_int(FPU_REG *r, u_char tag)
  {
--	if(voyager_quad_processors & (1<<cpu))
-+	if (voyager_quad_processors & (1 << cpu))
- 		send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
- 	else
--		send_CPI(1<<cpu, cpi);
-+		send_CPI(1 << cpu, cpi);
- }
+-  u_char     very_big;
+-  unsigned eax;
+-
+-  if (tag == TAG_Zero)
+-    {
+-      /* Make sure that zero is returned */
+-      significand(r) = 0;
+-      return 0;        /* o.k. */
+-    }
+-
+-  if (exponent(r) > 63)
+-    {
+-      r->sigl = r->sigh = ~0;      /* The largest representable number */
+-      return 1;        /* overflow */
+-    }
+-
+-  eax = FPU_shrxs(&r->sigl, 63 - exponent(r));
+-  very_big = !(~(r->sigh) | ~(r->sigl));  /* test for 0xfff...fff */
++	u_char very_big;
++	unsigned eax;
++
++	if (tag == TAG_Zero) {
++		/* Make sure that zero is returned */
++		significand(r) = 0;
++		return 0;	/* o.k. */
++	}
++
++	if (exponent(r) > 63) {
++		r->sigl = r->sigh = ~0;	/* The largest representable number */
++		return 1;	/* overflow */
++	}
++
++	eax = FPU_shrxs(&r->sigl, 63 - exponent(r));
++	very_big = !(~(r->sigh) | ~(r->sigl));	/* test for 0xfff...fff */
+ #define	half_or_more	(eax & 0x80000000)
+ #define	frac_part	(eax)
+ #define more_than_half  ((eax & 0x80000001) == 0x80000001)
+-  switch (control_word & CW_RC)
+-    {
+-    case RC_RND:
+-      if ( more_than_half               	/* nearest */
+-	  || (half_or_more && (r->sigl & 1)) )	/* odd -> even */
+-	{
+-	  if ( very_big ) return 1;        /* overflow */
+-	  significand(r) ++;
+-	  return PRECISION_LOST_UP;
+-	}
+-      break;
+-    case RC_DOWN:
+-      if (frac_part && getsign(r))
+-	{
+-	  if ( very_big ) return 1;        /* overflow */
+-	  significand(r) ++;
+-	  return PRECISION_LOST_UP;
+-	}
+-      break;
+-    case RC_UP:
+-      if (frac_part && !getsign(r))
+-	{
+-	  if ( very_big ) return 1;        /* overflow */
+-	  significand(r) ++;
+-	  return PRECISION_LOST_UP;
++	switch (control_word & CW_RC) {
++	case RC_RND:
++		if (more_than_half	/* nearest */
++		    || (half_or_more && (r->sigl & 1))) {	/* odd -> even */
++			if (very_big)
++				return 1;	/* overflow */
++			significand(r)++;
++			return PRECISION_LOST_UP;
++		}
++		break;
++	case RC_DOWN:
++		if (frac_part && getsign(r)) {
++			if (very_big)
++				return 1;	/* overflow */
++			significand(r)++;
++			return PRECISION_LOST_UP;
++		}
++		break;
++	case RC_UP:
++		if (frac_part && !getsign(r)) {
++			if (very_big)
++				return 1;	/* overflow */
++			significand(r)++;
++			return PRECISION_LOST_UP;
++		}
++		break;
++	case RC_CHOP:
++		break;
+ 	}
+-      break;
+-    case RC_CHOP:
+-      break;
+-    }
  
--static inline void
--send_CPI_allbutself(__u8 cpi)
-+static inline void send_CPI_allbutself(__u8 cpi)
- {
- 	__u8 cpu = smp_processor_id();
- 	__u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
- 	send_CPI(mask, cpi);
- }
+-  return eax ? PRECISION_LOST_DOWN : 0;
++	return eax ? PRECISION_LOST_DOWN : 0;
  
--static inline int
--is_cpu_quad(void)
-+static inline int is_cpu_quad(void)
- {
- 	__u8 cpumask = inb(VIC_PROC_WHO_AM_I);
- 	return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
  }
  
--static inline int
--is_cpu_extended(void)
-+static inline int is_cpu_extended(void)
- {
- 	__u8 cpu = hard_smp_processor_id();
- 
--	return(voyager_extended_vic_processors & (1<<cpu));
-+	return (voyager_extended_vic_processors & (1 << cpu));
- }
+@@ -1179,197 +1030,195 @@ int FPU_round_to_int(FPU_REG *r, u_char tag)
  
--static inline int
--is_cpu_vic_boot(void)
-+static inline int is_cpu_vic_boot(void)
+ u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s)
  {
- 	__u8 cpu = hard_smp_processor_id();
- 
--	return(voyager_extended_vic_processors
--	       & voyager_allowed_boot_processors & (1<<cpu));
-+	return (voyager_extended_vic_processors
-+		& voyager_allowed_boot_processors & (1 << cpu));
- }
- 
+-  unsigned short tag_word = 0;
+-  u_char tag;
+-  int i;
 -
--static inline void
--ack_CPI(__u8 cpi)
-+static inline void ack_CPI(__u8 cpi)
- {
--	switch(cpi) {
-+	switch (cpi) {
- 	case VIC_CPU_BOOT_CPI:
--		if(is_cpu_quad() && !is_cpu_vic_boot())
-+		if (is_cpu_quad() && !is_cpu_vic_boot())
- 			ack_QIC_CPI(cpi);
- 		else
- 			ack_VIC_CPI(cpi);
- 		break;
- 	case VIC_SYS_INT:
--	case VIC_CMN_INT: 
-+	case VIC_CMN_INT:
- 		/* These are slightly strange.  Even on the Quad card,
- 		 * They are vectored as VIC CPIs */
--		if(is_cpu_quad())
-+		if (is_cpu_quad())
- 			ack_special_QIC_CPI(cpi);
- 		else
- 			ack_VIC_CPI(cpi);
-@@ -205,11 +197,11 @@ ack_CPI(__u8 cpi)
-  * 8259 IRQs except that masks and things must be kept per processor
-  */
- static struct irq_chip vic_chip = {
--	.name		= "VIC",
--	.startup	= startup_vic_irq,
--	.mask		= mask_vic_irq,
--	.unmask		= unmask_vic_irq,
--	.set_affinity	= set_vic_irq_affinity,
-+	.name = "VIC",
-+	.startup = startup_vic_irq,
-+	.mask = mask_vic_irq,
-+	.unmask = unmask_vic_irq,
-+	.set_affinity = set_vic_irq_affinity,
- };
- 
- /* used to count up as CPUs are brought on line (starts at 0) */
-@@ -223,7 +215,7 @@ static __u32 trampoline_base;
- /* The per cpu profile stuff - used in smp_local_timer_interrupt */
- static DEFINE_PER_CPU(int, prof_multiplier) = 1;
- static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
--static DEFINE_PER_CPU(int, prof_counter) =  1;
-+static DEFINE_PER_CPU(int, prof_counter) = 1;
- 
- /* the map used to check if a CPU has booted */
- static __u32 cpu_booted_map;
-@@ -235,7 +227,6 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
- /* This is for the new dynamic CPU boot code */
- cpumask_t cpu_callin_map = CPU_MASK_NONE;
- cpumask_t cpu_callout_map = CPU_MASK_NONE;
--EXPORT_SYMBOL(cpu_callout_map);
- cpumask_t cpu_possible_map = CPU_MASK_NONE;
- EXPORT_SYMBOL(cpu_possible_map);
- 
-@@ -246,9 +237,9 @@ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
- static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
- 
- /* Lock for enable/disable of VIC interrupts */
--static  __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
-+static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
- 
--/* The boot processor is correctly set up in PC mode when it 
-+/* The boot processor is correctly set up in PC mode when it
-  * comes up, but the secondaries need their master/slave 8259
-  * pairs initializing correctly */
- 
-@@ -262,8 +253,7 @@ static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
- static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
- 
- /* debugging routine to read the isr of the cpu's pic */
--static inline __u16
--vic_read_isr(void)
-+static inline __u16 vic_read_isr(void)
- {
- 	__u16 isr;
- 
-@@ -275,17 +265,16 @@ vic_read_isr(void)
- 	return isr;
- }
- 
--static __init void
--qic_setup(void)
-+static __init void qic_setup(void)
- {
--	if(!is_cpu_quad()) {
-+	if (!is_cpu_quad()) {
- 		/* not a quad, no setup */
- 		return;
- 	}
- 	outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
- 	outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
--	
--	if(is_cpu_extended()) {
+-  if ( (addr_modes.default_mode == VM86) ||
+-      ((addr_modes.default_mode == PM16)
+-      ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX)) )
+-    {
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_READ, s, 0x0e);
+-      FPU_get_user(control_word, (unsigned short __user *) s);
+-      FPU_get_user(partial_status, (unsigned short __user *) (s+2));
+-      FPU_get_user(tag_word, (unsigned short __user *) (s+4));
+-      FPU_get_user(instruction_address.offset, (unsigned short __user *) (s+6));
+-      FPU_get_user(instruction_address.selector, (unsigned short __user *) (s+8));
+-      FPU_get_user(operand_address.offset, (unsigned short __user *) (s+0x0a));
+-      FPU_get_user(operand_address.selector, (unsigned short __user *) (s+0x0c));
+-      RE_ENTRANT_CHECK_ON;
+-      s += 0x0e;
+-      if ( addr_modes.default_mode == VM86 )
+-	{
+-	  instruction_address.offset
+-	    += (instruction_address.selector & 0xf000) << 4;
+-	  operand_address.offset += (operand_address.selector & 0xf000) << 4;
++	unsigned short tag_word = 0;
++	u_char tag;
++	int i;
 +
-+	if (is_cpu_extended()) {
- 		/* the QIC duplicate of the VIC base register */
- 		outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
- 		outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
-@@ -295,8 +284,7 @@ qic_setup(void)
++	if ((addr_modes.default_mode == VM86) ||
++	    ((addr_modes.default_mode == PM16)
++	     ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) {
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_READ, s, 0x0e);
++		FPU_get_user(control_word, (unsigned short __user *)s);
++		FPU_get_user(partial_status, (unsigned short __user *)(s + 2));
++		FPU_get_user(tag_word, (unsigned short __user *)(s + 4));
++		FPU_get_user(instruction_address.offset,
++			     (unsigned short __user *)(s + 6));
++		FPU_get_user(instruction_address.selector,
++			     (unsigned short __user *)(s + 8));
++		FPU_get_user(operand_address.offset,
++			     (unsigned short __user *)(s + 0x0a));
++		FPU_get_user(operand_address.selector,
++			     (unsigned short __user *)(s + 0x0c));
++		RE_ENTRANT_CHECK_ON;
++		s += 0x0e;
++		if (addr_modes.default_mode == VM86) {
++			instruction_address.offset
++			    += (instruction_address.selector & 0xf000) << 4;
++			operand_address.offset +=
++			    (operand_address.selector & 0xf000) << 4;
++		}
++	} else {
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_READ, s, 0x1c);
++		FPU_get_user(control_word, (unsigned short __user *)s);
++		FPU_get_user(partial_status, (unsigned short __user *)(s + 4));
++		FPU_get_user(tag_word, (unsigned short __user *)(s + 8));
++		FPU_get_user(instruction_address.offset,
++			     (unsigned long __user *)(s + 0x0c));
++		FPU_get_user(instruction_address.selector,
++			     (unsigned short __user *)(s + 0x10));
++		FPU_get_user(instruction_address.opcode,
++			     (unsigned short __user *)(s + 0x12));
++		FPU_get_user(operand_address.offset,
++			     (unsigned long __user *)(s + 0x14));
++		FPU_get_user(operand_address.selector,
++			     (unsigned long __user *)(s + 0x18));
++		RE_ENTRANT_CHECK_ON;
++		s += 0x1c;
  	}
- }
- 
--static __init void
--vic_setup_pic(void)
-+static __init void vic_setup_pic(void)
- {
- 	outb(1, VIC_REDIRECT_REGISTER_1);
- 	/* clear the claim registers for dynamic routing */
-@@ -333,7 +321,7 @@ vic_setup_pic(void)
+-    }
+-  else
+-    {
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_READ, s, 0x1c);
+-      FPU_get_user(control_word, (unsigned short __user *) s);
+-      FPU_get_user(partial_status, (unsigned short __user *) (s+4));
+-      FPU_get_user(tag_word, (unsigned short __user *) (s+8));
+-      FPU_get_user(instruction_address.offset, (unsigned long __user *) (s+0x0c));
+-      FPU_get_user(instruction_address.selector, (unsigned short __user *) (s+0x10));
+-      FPU_get_user(instruction_address.opcode, (unsigned short __user *) (s+0x12));
+-      FPU_get_user(operand_address.offset, (unsigned long __user *) (s+0x14));
+-      FPU_get_user(operand_address.selector, (unsigned long __user *) (s+0x18));
+-      RE_ENTRANT_CHECK_ON;
+-      s += 0x1c;
+-    }
  
- 	/* ICW2: slave vector base */
- 	outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
--	
+ #ifdef PECULIAR_486
+-  control_word &= ~0xe080;
+-#endif /* PECULIAR_486 */ 
+-
+-  top = (partial_status >> SW_Top_Shift) & 7;
+-
+-  if ( partial_status & ~control_word & CW_Exceptions )
+-    partial_status |= (SW_Summary | SW_Backward);
+-  else
+-    partial_status &= ~(SW_Summary | SW_Backward);
+-
+-  for ( i = 0; i < 8; i++ )
+-    {
+-      tag = tag_word & 3;
+-      tag_word >>= 2;
+-
+-      if ( tag == TAG_Empty )
+-	/* New tag is empty.  Accept it */
+-	FPU_settag(i, TAG_Empty);
+-      else if ( FPU_gettag(i) == TAG_Empty )
+-	{
+-	  /* Old tag is empty and new tag is not empty.  New tag is determined
+-	     by old reg contents */
+-	  if ( exponent(&fpu_register(i)) == - EXTENDED_Ebias )
+-	    {
+-	      if ( !(fpu_register(i).sigl | fpu_register(i).sigh) )
+-		FPU_settag(i, TAG_Zero);
+-	      else
+-		FPU_settag(i, TAG_Special);
+-	    }
+-	  else if ( exponent(&fpu_register(i)) == 0x7fff - EXTENDED_Ebias )
+-	    {
+-	      FPU_settag(i, TAG_Special);
+-	    }
+-	  else if ( fpu_register(i).sigh & 0x80000000 )
+-	    FPU_settag(i, TAG_Valid);
+-	  else
+-	    FPU_settag(i, TAG_Special);   /* An Un-normal */
+-  	}
+-      /* Else old tag is not empty and new tag is not empty.  Old tag
+-	 remains correct */
+-    }
+-
+-  return s;
+-}
++	control_word &= ~0xe080;
++#endif /* PECULIAR_486 */
 +
- 	/* ICW3: slave ID */
- 	outb(0x02, 0xA1);
++	top = (partial_status >> SW_Top_Shift) & 7;
++
++	if (partial_status & ~control_word & CW_Exceptions)
++		partial_status |= (SW_Summary | SW_Backward);
++	else
++		partial_status &= ~(SW_Summary | SW_Backward);
++
++	for (i = 0; i < 8; i++) {
++		tag = tag_word & 3;
++		tag_word >>= 2;
++
++		if (tag == TAG_Empty)
++			/* New tag is empty.  Accept it */
++			FPU_settag(i, TAG_Empty);
++		else if (FPU_gettag(i) == TAG_Empty) {
++			/* Old tag is empty and new tag is not empty.  New tag is determined
++			   by old reg contents */
++			if (exponent(&fpu_register(i)) == -EXTENDED_Ebias) {
++				if (!
++				    (fpu_register(i).sigl | fpu_register(i).
++				     sigh))
++					FPU_settag(i, TAG_Zero);
++				else
++					FPU_settag(i, TAG_Special);
++			} else if (exponent(&fpu_register(i)) ==
++				   0x7fff - EXTENDED_Ebias) {
++				FPU_settag(i, TAG_Special);
++			} else if (fpu_register(i).sigh & 0x80000000)
++				FPU_settag(i, TAG_Valid);
++			else
++				FPU_settag(i, TAG_Special);	/* An Un-normal */
++		}
++		/* Else old tag is not empty and new tag is not empty.  Old tag
++		   remains correct */
++	}
  
-@@ -341,19 +329,18 @@ vic_setup_pic(void)
- 	outb(0x01, 0xA1);
- }
++	return s;
++}
  
--static void
--do_quad_bootstrap(void)
-+static void do_quad_bootstrap(void)
+ void frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
  {
--	if(is_cpu_quad() && is_cpu_vic_boot()) {
-+	if (is_cpu_quad() && is_cpu_vic_boot()) {
- 		int i;
- 		unsigned long flags;
- 		__u8 cpuid = hard_smp_processor_id();
- 
- 		local_irq_save(flags);
- 
--		for(i = 0; i<4; i++) {
-+		for (i = 0; i < 4; i++) {
- 			/* FIXME: this would be >>3 &0x7 on the 32 way */
--			if(((cpuid >> 2) & 0x03) == i)
-+			if (((cpuid >> 2) & 0x03) == i)
- 				/* don't lower our own mask! */
- 				continue;
+-  int i, regnr;
+-  u_char __user *s = fldenv(addr_modes, data_address);
+-  int offset = (top & 7) * 10, other = 80 - offset;
+-
+-  /* Copy all registers in stack order. */
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_READ,s,80);
+-  __copy_from_user(register_base+offset, s, other);
+-  if ( offset )
+-    __copy_from_user(register_base, s+other, offset);
+-  RE_ENTRANT_CHECK_ON;
+-
+-  for ( i = 0; i < 8; i++ )
+-    {
+-      regnr = (i+top) & 7;
+-      if ( FPU_gettag(regnr) != TAG_Empty )
+-	/* The loaded data over-rides all other cases. */
+-	FPU_settag(regnr, FPU_tagof(&st(i)));
+-    }
++	int i, regnr;
++	u_char __user *s = fldenv(addr_modes, data_address);
++	int offset = (top & 7) * 10, other = 80 - offset;
++
++	/* Copy all registers in stack order. */
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_READ, s, 80);
++	__copy_from_user(register_base + offset, s, other);
++	if (offset)
++		__copy_from_user(register_base, s + other, offset);
++	RE_ENTRANT_CHECK_ON;
++
++	for (i = 0; i < 8; i++) {
++		regnr = (i + top) & 7;
++		if (FPU_gettag(regnr) != TAG_Empty)
++			/* The loaded data over-rides all other cases. */
++			FPU_settag(regnr, FPU_tagof(&st(i)));
++	}
  
-@@ -368,12 +355,10 @@ do_quad_bootstrap(void)
- 	}
  }
  
 -
- /* Set up all the basic stuff: read the SMP config and make all the
-  * SMP information reflect only the boot cpu.  All others will be
-  * brought on-line later. */
--void __init 
--find_smp_config(void)
-+void __init find_smp_config(void)
+ u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d)
  {
- 	int i;
- 
-@@ -382,24 +367,31 @@ find_smp_config(void)
- 	printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
+-  if ( (addr_modes.default_mode == VM86) ||
+-      ((addr_modes.default_mode == PM16)
+-      ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX)) )
+-    {
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_WRITE,d,14);
++	if ((addr_modes.default_mode == VM86) ||
++	    ((addr_modes.default_mode == PM16)
++	     ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) {
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_WRITE, d, 14);
+ #ifdef PECULIAR_486
+-      FPU_put_user(control_word & ~0xe080, (unsigned long __user *) d);
++		FPU_put_user(control_word & ~0xe080, (unsigned long __user *)d);
+ #else
+-      FPU_put_user(control_word, (unsigned short __user *) d);
++		FPU_put_user(control_word, (unsigned short __user *)d);
+ #endif /* PECULIAR_486 */
+-      FPU_put_user(status_word(), (unsigned short __user *) (d+2));
+-      FPU_put_user(fpu_tag_word, (unsigned short __user *) (d+4));
+-      FPU_put_user(instruction_address.offset, (unsigned short __user *) (d+6));
+-      FPU_put_user(operand_address.offset, (unsigned short __user *) (d+0x0a));
+-      if ( addr_modes.default_mode == VM86 )
+-	{
+-	  FPU_put_user((instruction_address.offset & 0xf0000) >> 4,
+-		      (unsigned short __user *) (d+8));
+-	  FPU_put_user((operand_address.offset & 0xf0000) >> 4,
+-		      (unsigned short __user *) (d+0x0c));
+-	}
+-      else
+-	{
+-	  FPU_put_user(instruction_address.selector, (unsigned short __user *) (d+8));
+-	  FPU_put_user(operand_address.selector, (unsigned short __user *) (d+0x0c));
+-	}
+-      RE_ENTRANT_CHECK_ON;
+-      d += 0x0e;
+-    }
+-  else
+-    {
+-      RE_ENTRANT_CHECK_OFF;
+-      FPU_access_ok(VERIFY_WRITE, d, 7*4);
++		FPU_put_user(status_word(), (unsigned short __user *)(d + 2));
++		FPU_put_user(fpu_tag_word, (unsigned short __user *)(d + 4));
++		FPU_put_user(instruction_address.offset,
++			     (unsigned short __user *)(d + 6));
++		FPU_put_user(operand_address.offset,
++			     (unsigned short __user *)(d + 0x0a));
++		if (addr_modes.default_mode == VM86) {
++			FPU_put_user((instruction_address.
++				      offset & 0xf0000) >> 4,
++				     (unsigned short __user *)(d + 8));
++			FPU_put_user((operand_address.offset & 0xf0000) >> 4,
++				     (unsigned short __user *)(d + 0x0c));
++		} else {
++			FPU_put_user(instruction_address.selector,
++				     (unsigned short __user *)(d + 8));
++			FPU_put_user(operand_address.selector,
++				     (unsigned short __user *)(d + 0x0c));
++		}
++		RE_ENTRANT_CHECK_ON;
++		d += 0x0e;
++	} else {
++		RE_ENTRANT_CHECK_OFF;
++		FPU_access_ok(VERIFY_WRITE, d, 7 * 4);
+ #ifdef PECULIAR_486
+-      control_word &= ~0xe080;
+-      /* An 80486 sets nearly all of the reserved bits to 1. */
+-      control_word |= 0xffff0040;
+-      partial_status = status_word() | 0xffff0000;
+-      fpu_tag_word |= 0xffff0000;
+-      I387.soft.fcs &= ~0xf8000000;
+-      I387.soft.fos |= 0xffff0000;
++		control_word &= ~0xe080;
++		/* An 80486 sets nearly all of the reserved bits to 1. */
++		control_word |= 0xffff0040;
++		partial_status = status_word() | 0xffff0000;
++		fpu_tag_word |= 0xffff0000;
++		I387.soft.fcs &= ~0xf8000000;
++		I387.soft.fos |= 0xffff0000;
+ #endif /* PECULIAR_486 */
+-      if (__copy_to_user(d, &control_word, 7*4))
+-	FPU_abort;
+-      RE_ENTRANT_CHECK_ON;
+-      d += 0x1c;
+-    }
+-  
+-  control_word |= CW_Exceptions;
+-  partial_status &= ~(SW_Summary | SW_Backward);
+-
+-  return d;
+-}
++		if (__copy_to_user(d, &control_word, 7 * 4))
++			FPU_abort;
++		RE_ENTRANT_CHECK_ON;
++		d += 0x1c;
++	}
  
- 	/* initialize the CPU structures (moved from smp_boot_cpus) */
--	for(i=0; i<NR_CPUS; i++) {
-+	for (i = 0; i < NR_CPUS; i++) {
- 		cpu_irq_affinity[i] = ~0;
- 	}
- 	cpu_online_map = cpumask_of_cpu(boot_cpu_id);
++	control_word |= CW_Exceptions;
++	partial_status &= ~(SW_Summary | SW_Backward);
++
++	return d;
++}
  
- 	/* The boot CPU must be extended */
--	voyager_extended_vic_processors = 1<<boot_cpu_id;
-+	voyager_extended_vic_processors = 1 << boot_cpu_id;
- 	/* initially, all of the first 8 CPUs can boot */
- 	voyager_allowed_boot_processors = 0xff;
- 	/* set up everything for just this CPU, we can alter
- 	 * this as we start the other CPUs later */
- 	/* now get the CPU disposition from the extended CMOS */
--	cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
--	cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
--	cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
--	cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
-+	cpus_addr(phys_cpu_present_map)[0] =
-+	    voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
-+	cpus_addr(phys_cpu_present_map)[0] |=
-+	    voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
-+	cpus_addr(phys_cpu_present_map)[0] |=
-+	    voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
-+				       2) << 16;
-+	cpus_addr(phys_cpu_present_map)[0] |=
-+	    voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
-+				       3) << 24;
- 	cpu_possible_map = phys_cpu_present_map;
--	printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]);
-+	printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
-+	       cpus_addr(phys_cpu_present_map)[0]);
- 	/* Here we set up the VIC to enable SMP */
- 	/* enable the CPIs by writing the base vector to their register */
- 	outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
-@@ -427,8 +419,7 @@ find_smp_config(void)
- /*
-  *	The bootstrap kernel entry code has set these up. Save them
-  *	for a given CPU, id is physical */
--void __init
--smp_store_cpu_info(int id)
-+void __init smp_store_cpu_info(int id)
+ void fsave(fpu_addr_modes addr_modes, u_char __user *data_address)
  {
- 	struct cpuinfo_x86 *c = &cpu_data(id);
+-  u_char __user *d;
+-  int offset = (top & 7) * 10, other = 80 - offset;
++	u_char __user *d;
++	int offset = (top & 7) * 10, other = 80 - offset;
  
-@@ -438,21 +429,19 @@ smp_store_cpu_info(int id)
- }
+-  d = fstenv(addr_modes, data_address);
++	d = fstenv(addr_modes, data_address);
  
- /* set up the trampoline and return the physical address of the code */
--static __u32 __init
--setup_trampoline(void)
-+static __u32 __init setup_trampoline(void)
- {
- 	/* these two are global symbols in trampoline.S */
- 	extern const __u8 trampoline_end[];
- 	extern const __u8 trampoline_data[];
+-  RE_ENTRANT_CHECK_OFF;
+-  FPU_access_ok(VERIFY_WRITE,d,80);
++	RE_ENTRANT_CHECK_OFF;
++	FPU_access_ok(VERIFY_WRITE, d, 80);
  
--	memcpy((__u8 *)trampoline_base, trampoline_data,
-+	memcpy((__u8 *) trampoline_base, trampoline_data,
- 	       trampoline_end - trampoline_data);
--	return virt_to_phys((__u8 *)trampoline_base);
-+	return virt_to_phys((__u8 *) trampoline_base);
- }
+-  /* Copy all registers in stack order. */
+-  if (__copy_to_user(d, register_base+offset, other))
+-    FPU_abort;
+-  if ( offset )
+-    if (__copy_to_user(d+other, register_base, offset))
+-      FPU_abort;
+-  RE_ENTRANT_CHECK_ON;
++	/* Copy all registers in stack order. */
++	if (__copy_to_user(d, register_base + offset, other))
++		FPU_abort;
++	if (offset)
++		if (__copy_to_user(d + other, register_base, offset))
++			FPU_abort;
++	RE_ENTRANT_CHECK_ON;
  
- /* Routine initially called when a non-boot CPU is brought online */
--static void __init
--start_secondary(void *unused)
-+static void __init start_secondary(void *unused)
- {
- 	__u8 cpuid = hard_smp_processor_id();
- 	/* external functions not defined in the headers */
-@@ -464,17 +453,18 @@ start_secondary(void *unused)
- 	ack_CPI(VIC_CPU_BOOT_CPI);
+-  finit();
++	finit();
+ }
  
- 	/* setup the 8259 master slave pair belonging to this CPU ---
--         * we won't actually receive any until the boot CPU
--         * relinquishes it's static routing mask */
-+	 * we won't actually receive any until the boot CPU
-+	 * relinquishes it's static routing mask */
- 	vic_setup_pic();
+ /*===========================================================================*/
+diff --git a/arch/x86/math-emu/reg_mul.c b/arch/x86/math-emu/reg_mul.c
+index 40f50b6..36c37f7 100644
+--- a/arch/x86/math-emu/reg_mul.c
++++ b/arch/x86/math-emu/reg_mul.c
+@@ -20,7 +20,6 @@
+ #include "reg_constant.h"
+ #include "fpu_system.h"
  
- 	qic_setup();
+-
+ /*
+   Multiply two registers to give a register result.
+   The sources are st(deststnr) and (b,tagb,signb).
+@@ -29,104 +28,88 @@
+ /* This routine must be called with non-empty source registers */
+ int FPU_mul(FPU_REG const *b, u_char tagb, int deststnr, int control_w)
+ {
+-  FPU_REG *a = &st(deststnr);
+-  FPU_REG *dest = a;
+-  u_char taga = FPU_gettagi(deststnr);
+-  u_char saved_sign = getsign(dest);
+-  u_char sign = (getsign(a) ^ getsign(b));
+-  int tag;
+-
++	FPU_REG *a = &st(deststnr);
++	FPU_REG *dest = a;
++	u_char taga = FPU_gettagi(deststnr);
++	u_char saved_sign = getsign(dest);
++	u_char sign = (getsign(a) ^ getsign(b));
++	int tag;
  
--	if(is_cpu_quad() && !is_cpu_vic_boot()) {
-+	if (is_cpu_quad() && !is_cpu_vic_boot()) {
- 		/* clear the boot CPI */
- 		__u8 dummy;
+-  if ( !(taga | tagb) )
+-    {
+-      /* Both regs Valid, this should be the most common case. */
++	if (!(taga | tagb)) {
++		/* Both regs Valid, this should be the most common case. */
  
--		dummy = voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
-+		dummy =
-+		    voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
- 		printk("read dummy %d\n", dummy);
+-      tag = FPU_u_mul(a, b, dest, control_w, sign, exponent(a) + exponent(b));
+-      if ( tag < 0 )
+-	{
+-	  setsign(dest, saved_sign);
+-	  return tag;
++		tag =
++		    FPU_u_mul(a, b, dest, control_w, sign,
++			      exponent(a) + exponent(b));
++		if (tag < 0) {
++			setsign(dest, saved_sign);
++			return tag;
++		}
++		FPU_settagi(deststnr, tag);
++		return tag;
  	}
+-      FPU_settagi(deststnr, tag);
+-      return tag;
+-    }
  
-@@ -516,7 +506,6 @@ start_secondary(void *unused)
- 	cpu_idle();
- }
+-  if ( taga == TAG_Special )
+-    taga = FPU_Special(a);
+-  if ( tagb == TAG_Special )
+-    tagb = FPU_Special(b);
++	if (taga == TAG_Special)
++		taga = FPU_Special(a);
++	if (tagb == TAG_Special)
++		tagb = FPU_Special(b);
  
+-  if ( ((taga == TAG_Valid) && (tagb == TW_Denormal))
++	if (((taga == TAG_Valid) && (tagb == TW_Denormal))
+ 	    || ((taga == TW_Denormal) && (tagb == TAG_Valid))
+-	    || ((taga == TW_Denormal) && (tagb == TW_Denormal)) )
+-    {
+-      FPU_REG x, y;
+-      if ( denormal_operand() < 0 )
+-	return FPU_Exception;
 -
- /* Routine to kick start the given CPU and wait for it to report ready
-  * (or timeout in startup).  When this routine returns, the requested
-  * CPU is either fully running and configured or known to be dead.
-@@ -524,29 +513,28 @@ start_secondary(void *unused)
-  * We call this routine sequentially 1 CPU at a time, so no need for
-  * locking */
- 
--static void __init
--do_boot_cpu(__u8 cpu)
-+static void __init do_boot_cpu(__u8 cpu)
- {
- 	struct task_struct *idle;
- 	int timeout;
- 	unsigned long flags;
--	int quad_boot = (1<<cpu) & voyager_quad_processors 
--		& ~( voyager_extended_vic_processors
--		     & voyager_allowed_boot_processors);
-+	int quad_boot = (1 << cpu) & voyager_quad_processors
-+	    & ~(voyager_extended_vic_processors
-+		& voyager_allowed_boot_processors);
- 
- 	/* This is an area in head.S which was used to set up the
- 	 * initial kernel stack.  We need to alter this to give the
- 	 * booting CPU a new stack (taken from its idle process) */
- 	extern struct {
--		__u8 *esp;
-+		__u8 *sp;
- 		unsigned short ss;
- 	} stack_start;
- 	/* This is the format of the CPI IDT gate (in real mode) which
- 	 * we're hijacking to boot the CPU */
--	union 	IDTFormat {
-+	union IDTFormat {
- 		struct seg {
--			__u16	Offset;
--			__u16	Segment;
-+			__u16 Offset;
-+			__u16 Segment;
- 		} idt;
- 		__u32 val;
- 	} hijack_source;
-@@ -565,37 +553,44 @@ do_boot_cpu(__u8 cpu)
- 	alternatives_smp_switch(1);
- 
- 	idle = fork_idle(cpu);
--	if(IS_ERR(idle))
-+	if (IS_ERR(idle))
- 		panic("failed fork for CPU%d", cpu);
--	idle->thread.eip = (unsigned long) start_secondary;
-+	idle->thread.ip = (unsigned long)start_secondary;
- 	/* init_tasks (in sched.c) is indexed logically */
--	stack_start.esp = (void *) idle->thread.esp;
-+	stack_start.sp = (void *)idle->thread.sp;
+-      FPU_to_exp16(a, &x);
+-      FPU_to_exp16(b, &y);
+-      tag = FPU_u_mul(&x, &y, dest, control_w, sign,
+-		      exponent16(&x) + exponent16(&y));
+-      if ( tag < 0 )
+-	{
+-	  setsign(dest, saved_sign);
+-	  return tag;
+-	}
+-      FPU_settagi(deststnr, tag);
+-      return tag;
+-    }
+-  else if ( (taga <= TW_Denormal) && (tagb <= TW_Denormal) )
+-    {
+-      if ( ((tagb == TW_Denormal) || (taga == TW_Denormal))
+-	   && (denormal_operand() < 0) )
+-	return FPU_Exception;
++	    || ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
++		FPU_REG x, y;
++		if (denormal_operand() < 0)
++			return FPU_Exception;
  
- 	init_gdt(cpu);
-- 	per_cpu(current_task, cpu) = idle;
-+	per_cpu(current_task, cpu) = idle;
- 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
- 	irq_ctx_init(cpu);
+-      /* Must have either both arguments == zero, or
+-	 one valid and the other zero.
+-	 The result is therefore zero. */
+-      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
+-      /* The 80486 book says that the answer is +0, but a real
+-	 80486 behaves this way.
+-	 IEEE-754 apparently says it should be this way. */
+-      setsign(dest, sign);
+-      return TAG_Zero;
+-    }
+-      /* Must have infinities, NaNs, etc */
+-  else if ( (taga == TW_NaN) || (tagb == TW_NaN) )
+-    {
+-      return real_2op_NaN(b, tagb, deststnr, &st(0));
+-    }
+-  else if ( ((taga == TW_Infinity) && (tagb == TAG_Zero))
+-	    || ((tagb == TW_Infinity) && (taga == TAG_Zero)) )
+-    {
+-      return arith_invalid(deststnr);  /* Zero*Infinity is invalid */
+-    }
+-  else if ( ((taga == TW_Denormal) || (tagb == TW_Denormal))
+-	    && (denormal_operand() < 0) )
+-    {
+-      return FPU_Exception;
+-    }
+-  else if (taga == TW_Infinity)
+-    {
+-      FPU_copy_to_regi(a, TAG_Special, deststnr);
+-      setsign(dest, sign);
+-      return TAG_Special;
+-    }
+-  else if (tagb == TW_Infinity)
+-    {
+-      FPU_copy_to_regi(b, TAG_Special, deststnr);
+-      setsign(dest, sign);
+-      return TAG_Special;
+-    }
++		FPU_to_exp16(a, &x);
++		FPU_to_exp16(b, &y);
++		tag = FPU_u_mul(&x, &y, dest, control_w, sign,
++				exponent16(&x) + exponent16(&y));
++		if (tag < 0) {
++			setsign(dest, saved_sign);
++			return tag;
++		}
++		FPU_settagi(deststnr, tag);
++		return tag;
++	} else if ((taga <= TW_Denormal) && (tagb <= TW_Denormal)) {
++		if (((tagb == TW_Denormal) || (taga == TW_Denormal))
++		    && (denormal_operand() < 0))
++			return FPU_Exception;
  
- 	/* Note: Don't modify initial ss override */
--	VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, 
-+	VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
- 		(unsigned long)hijack_source.val, hijack_source.idt.Segment,
--		hijack_source.idt.Offset, stack_start.esp));
-+		hijack_source.idt.Offset, stack_start.sp));
++		/* Must have either both arguments == zero, or
++		   one valid and the other zero.
++		   The result is therefore zero. */
++		FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
++		/* The 80486 book says that the answer is +0, but a real
++		   80486 behaves this way.
++		   IEEE-754 apparently says it should be this way. */
++		setsign(dest, sign);
++		return TAG_Zero;
++	}
++	/* Must have infinities, NaNs, etc */
++	else if ((taga == TW_NaN) || (tagb == TW_NaN)) {
++		return real_2op_NaN(b, tagb, deststnr, &st(0));
++	} else if (((taga == TW_Infinity) && (tagb == TAG_Zero))
++		   || ((tagb == TW_Infinity) && (taga == TAG_Zero))) {
++		return arith_invalid(deststnr);	/* Zero*Infinity is invalid */
++	} else if (((taga == TW_Denormal) || (tagb == TW_Denormal))
++		   && (denormal_operand() < 0)) {
++		return FPU_Exception;
++	} else if (taga == TW_Infinity) {
++		FPU_copy_to_regi(a, TAG_Special, deststnr);
++		setsign(dest, sign);
++		return TAG_Special;
++	} else if (tagb == TW_Infinity) {
++		FPU_copy_to_regi(b, TAG_Special, deststnr);
++		setsign(dest, sign);
++		return TAG_Special;
++	}
+ #ifdef PARANOID
+-  else
+-    {
+-      EXCEPTION(EX_INTERNAL|0x102);
+-      return FPU_Exception;
+-    }
+-#endif /* PARANOID */ 
++	else {
++		EXCEPTION(EX_INTERNAL | 0x102);
++		return FPU_Exception;
++	}
++#endif /* PARANOID */
  
- 	/* init lowmem identity mapping */
- 	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
- 			min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
- 	flush_tlb_all();
+ 	return 0;
+ }
+diff --git a/arch/x86/math-emu/status_w.h b/arch/x86/math-emu/status_w.h
+index 59e7330..54a3f22 100644
+--- a/arch/x86/math-emu/status_w.h
++++ b/arch/x86/math-emu/status_w.h
+@@ -10,7 +10,7 @@
+ #ifndef _STATUS_H_
+ #define _STATUS_H_
  
--	if(quad_boot) {
-+	if (quad_boot) {
- 		printk("CPU %d: non extended Quad boot\n", cpu);
--		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4);
-+		hijack_vector =
-+		    (__u32 *)
-+		    phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4);
- 		*hijack_vector = hijack_source.val;
- 	} else {
- 		printk("CPU%d: extended VIC boot\n", cpu);
--		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4);
-+		hijack_vector =
-+		    (__u32 *)
-+		    phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4);
- 		*hijack_vector = hijack_source.val;
- 		/* VIC errata, may also receive interrupt at this address */
--		hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4);
-+		hijack_vector =
-+		    (__u32 *)
-+		    phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI +
-+				  VIC_DEFAULT_CPI_BASE) * 4);
- 		*hijack_vector = hijack_source.val;
- 	}
- 	/* All non-boot CPUs start with interrupts fully masked.  Need
-@@ -603,73 +598,76 @@ do_boot_cpu(__u8 cpu)
- 	 * this in the VIC by masquerading as the processor we're
- 	 * about to boot and lowering its interrupt mask */
- 	local_irq_save(flags);
--	if(quad_boot) {
-+	if (quad_boot) {
- 		send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
- 	} else {
- 		outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
- 		/* here we're altering registers belonging to `cpu' */
--		
-+
- 		outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
- 		/* now go back to our original identity */
- 		outb(boot_cpu_id, VIC_PROCESSOR_ID);
+-#include "fpu_emu.h"    /* for definition of PECULIAR_486 */
++#include "fpu_emu.h"		/* for definition of PECULIAR_486 */
  
- 		/* and boot the CPU */
+ #ifdef __ASSEMBLY__
+ #define	Const__(x)	$##x
+@@ -34,7 +34,7 @@
+ #define SW_Denorm_Op   	Const__(0x0002)	/* denormalized operand */
+ #define SW_Invalid     	Const__(0x0001)	/* invalid operation */
  
--		send_CPI((1<<cpu), VIC_CPU_BOOT_CPI);
-+		send_CPI((1 << cpu), VIC_CPU_BOOT_CPI);
- 	}
- 	cpu_booted_map = 0;
- 	local_irq_restore(flags);
+-#define SW_Exc_Mask     Const__(0x27f)  /* Status word exception bit mask */
++#define SW_Exc_Mask     Const__(0x27f)	/* Status word exception bit mask */
  
- 	/* now wait for it to become ready (or timeout) */
--	for(timeout = 0; timeout < 50000; timeout++) {
--		if(cpu_booted_map)
-+	for (timeout = 0; timeout < 50000; timeout++) {
-+		if (cpu_booted_map)
- 			break;
- 		udelay(100);
- 	}
- 	/* reset the page table */
- 	zap_low_mappings();
--	  
-+
- 	if (cpu_booted_map) {
- 		VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
- 			cpu, smp_processor_id()));
--	
-+
- 		printk("CPU%d: ", cpu);
- 		print_cpu_info(&cpu_data(cpu));
- 		wmb();
- 		cpu_set(cpu, cpu_callout_map);
- 		cpu_set(cpu, cpu_present_map);
--	}
--	else {
-+	} else {
- 		printk("CPU%d FAILED TO BOOT: ", cpu);
--		if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5)
-+		if (*
-+		    ((volatile unsigned char *)phys_to_virt(start_phys_address))
-+		    == 0xA5)
- 			printk("Stuck.\n");
- 		else
- 			printk("Not responding.\n");
--		
-+
- 		cpucount--;
- 	}
- }
+ #ifndef __ASSEMBLY__
  
--void __init
--smp_boot_cpus(void)
-+void __init smp_boot_cpus(void)
+@@ -50,8 +50,8 @@
+   ((partial_status & ~SW_Top & 0xffff) | ((top << SW_Top_Shift) & SW_Top))
+ static inline void setcc(int cc)
  {
- 	int i;
- 
- 	/* CAT BUS initialisation must be done after the memory */
- 	/* FIXME: The L4 has a catbus too, it just needs to be
- 	 * accessed in a totally different way */
--	if(voyager_level == 5) {
-+	if (voyager_level == 5) {
- 		voyager_cat_init();
- 
- 		/* now that the cat has probed the Voyager System Bus, sanity
- 		 * check the cpu map */
--		if( ((voyager_quad_processors | voyager_extended_vic_processors)
--		     & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) {
-+		if (((voyager_quad_processors | voyager_extended_vic_processors)
-+		     & cpus_addr(phys_cpu_present_map)[0]) !=
-+		    cpus_addr(phys_cpu_present_map)[0]) {
- 			/* should panic */
--			printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
-+			printk("\n\n***WARNING*** "
-+			       "Sanity check of CPU present map FAILED\n");
- 		}
--	} else if(voyager_level == 4)
--		voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0];
-+	} else if (voyager_level == 4)
-+		voyager_extended_vic_processors =
-+		    cpus_addr(phys_cpu_present_map)[0];
+-	partial_status &= ~(SW_C0|SW_C1|SW_C2|SW_C3);
+-	partial_status |= (cc) & (SW_C0|SW_C1|SW_C2|SW_C3);
++	partial_status &= ~(SW_C0 | SW_C1 | SW_C2 | SW_C3);
++	partial_status |= (cc) & (SW_C0 | SW_C1 | SW_C2 | SW_C3);
+ }
  
- 	/* this sets up the idle task to run on the current cpu */
- 	voyager_extended_cpus = 1;
-@@ -678,14 +676,14 @@ smp_boot_cpus(void)
- 	//global_irq_holder = boot_cpu_id;
+ #ifdef PECULIAR_486
+diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32
+index 362b4ad..c36ae88 100644
+--- a/arch/x86/mm/Makefile_32
++++ b/arch/x86/mm/Makefile_32
+@@ -2,9 +2,8 @@
+ # Makefile for the linux i386-specific parts of the memory manager.
+ #
  
- 	/* FIXME: Need to do something about this but currently only works
--	 * on CPUs with a tsc which none of mine have. 
--	smp_tune_scheduling();
-+	 * on CPUs with a tsc which none of mine have.
-+	 smp_tune_scheduling();
- 	 */
- 	smp_store_cpu_info(boot_cpu_id);
- 	printk("CPU%d: ", boot_cpu_id);
- 	print_cpu_info(&cpu_data(boot_cpu_id));
+-obj-y	:= init_32.o pgtable_32.o fault_32.o ioremap_32.o extable_32.o pageattr_32.o mmap_32.o
++obj-y	:= init_32.o pgtable_32.o fault.o ioremap.o extable.o pageattr.o mmap.o
  
--	if(is_cpu_quad()) {
-+	if (is_cpu_quad()) {
- 		/* booting on a Quad CPU */
- 		printk("VOYAGER SMP: Boot CPU is Quad\n");
- 		qic_setup();
-@@ -697,11 +695,11 @@ smp_boot_cpus(void)
+ obj-$(CONFIG_NUMA) += discontig_32.o
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_HIGHMEM) += highmem_32.o
+-obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap_32.o
+diff --git a/arch/x86/mm/Makefile_64 b/arch/x86/mm/Makefile_64
+index 6bcb479..688c8c2 100644
+--- a/arch/x86/mm/Makefile_64
++++ b/arch/x86/mm/Makefile_64
+@@ -2,9 +2,8 @@
+ # Makefile for the linux x86_64-specific parts of the memory manager.
+ #
  
- 	cpu_set(boot_cpu_id, cpu_online_map);
- 	cpu_set(boot_cpu_id, cpu_callout_map);
+-obj-y	 := init_64.o fault_64.o ioremap_64.o extable_64.o pageattr_64.o mmap_64.o
++obj-y	 := init_64.o fault.o ioremap.o extable.o pageattr.o mmap.o
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_NUMA) += numa_64.o
+ obj-$(CONFIG_K8_NUMA) += k8topology_64.o
+ obj-$(CONFIG_ACPI_NUMA) += srat_64.o
+-
+diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c
+deleted file mode 100644
+index f14da2a..0000000
+--- a/arch/x86/mm/boot_ioremap_32.c
++++ /dev/null
+@@ -1,100 +0,0 @@
+-/*
+- * arch/i386/mm/boot_ioremap.c
+- * 
+- * Re-map functions for early boot-time before paging_init() when the 
+- * boot-time pagetables are still in use
+- *
+- * Written by Dave Hansen <haveblue at us.ibm.com>
+- */
+-
+-
+-/*
+- * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE
+- * keeps that from happening.  If anyone has a better way, I'm listening.
+- *
+- * boot_pte_t is defined only if this all works correctly
+- */
+-
+-#undef CONFIG_X86_PAE
+-#undef CONFIG_PARAVIRT
+-#include <asm/page.h>
+-#include <asm/pgtable.h>
+-#include <asm/tlbflush.h>
+-#include <linux/init.h>
+-#include <linux/stddef.h>
+-
+-/* 
+- * I'm cheating here.  It is known that the two boot PTE pages are 
+- * allocated next to each other.  I'm pretending that they're just
+- * one big array. 
+- */
+-
+-#define BOOT_PTE_PTRS (PTRS_PER_PTE*2)
+-
+-static unsigned long boot_pte_index(unsigned long vaddr) 
+-{
+-	return __pa(vaddr) >> PAGE_SHIFT;
+-}
+-
+-static inline boot_pte_t* boot_vaddr_to_pte(void *address)
+-{
+-	boot_pte_t* boot_pg = (boot_pte_t*)pg0;
+-	return &boot_pg[boot_pte_index((unsigned long)address)];
+-}
+-
+-/*
+- * This is only for a caller who is clever enough to page-align
+- * phys_addr and virtual_source, and who also has a preference
+- * about which virtual address from which to steal ptes
+- */
+-static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, 
+-		    void* virtual_source)
+-{
+-	boot_pte_t* pte;
+-	int i;
+-	char *vaddr = virtual_source;
+-
+-	pte = boot_vaddr_to_pte(virtual_source);
+-	for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
+-		set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
+-		__flush_tlb_one(&vaddr[i*PAGE_SIZE]);
+-	}
+-}
+-
+-/* the virtual space we're going to remap comes from this array */
+-#define BOOT_IOREMAP_PAGES 4
+-#define BOOT_IOREMAP_SIZE (BOOT_IOREMAP_PAGES*PAGE_SIZE)
+-static __initdata char boot_ioremap_space[BOOT_IOREMAP_SIZE]
+-		       __attribute__ ((aligned (PAGE_SIZE)));
+-
+-/*
+- * This only applies to things which need to ioremap before paging_init()
+- * bt_ioremap() and plain ioremap() are both useless at this point.
+- * 
+- * When used, we're still using the boot-time pagetables, which only
+- * have 2 PTE pages mapping the first 8MB
+- *
+- * There is no unmap.  The boot-time PTE pages aren't used after boot.
+- * If you really want the space back, just remap it yourself.
+- * boot_ioremap(&ioremap_space-PAGE_OFFSET, BOOT_IOREMAP_SIZE)
+- */
+-__init void* boot_ioremap(unsigned long phys_addr, unsigned long size)
+-{
+-	unsigned long last_addr, offset;
+-	unsigned int nrpages;
 -	
--	/* loop over all the extended VIC CPUs and boot them.  The 
-+
-+	/* loop over all the extended VIC CPUs and boot them.  The
- 	 * Quad CPUs must be bootstrapped by their extended VIC cpu */
--	for(i = 0; i < NR_CPUS; i++) {
--		if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
-+	for (i = 0; i < NR_CPUS; i++) {
-+		if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
- 			continue;
- 		do_boot_cpu(i);
- 		/* This udelay seems to be needed for the Quad boots
-@@ -715,25 +713,26 @@ smp_boot_cpus(void)
- 		for (i = 0; i < NR_CPUS; i++)
- 			if (cpu_isset(i, cpu_online_map))
- 				bogosum += cpu_data(i).loops_per_jiffy;
--		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
--			cpucount+1,
--			bogosum/(500000/HZ),
--			(bogosum/(5000/HZ))%100);
-+		printk(KERN_INFO "Total of %d processors activated "
-+		       "(%lu.%02lu BogoMIPS).\n",
-+		       cpucount + 1, bogosum / (500000 / HZ),
-+		       (bogosum / (5000 / HZ)) % 100);
- 	}
- 	voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
--	printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus);
-+	printk("VOYAGER: Extended (interrupt handling CPUs): "
-+	       "%d, non-extended: %d\n", voyager_extended_cpus,
-+	       num_booting_cpus() - voyager_extended_cpus);
- 	/* that's it, switch to symmetric mode */
- 	outb(0, VIC_PRIORITY_REGISTER);
- 	outb(0, VIC_CLAIM_REGISTER_0);
- 	outb(0, VIC_CLAIM_REGISTER_1);
+-	last_addr = phys_addr + size - 1;
+-
+-	/* page align the requested address */
+-	offset = phys_addr & ~PAGE_MASK;
+-	phys_addr &= PAGE_MASK;
+-	size = PAGE_ALIGN(last_addr) - phys_addr;
 -	
-+
- 	VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
- }
+-	nrpages = size >> PAGE_SHIFT;
+-	if (nrpages > BOOT_IOREMAP_PAGES)
+-		return NULL;
+-	
+-	__boot_ioremap(phys_addr, nrpages, boot_ioremap_space);
+-
+-	return &boot_ioremap_space[offset];
+-}
+diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
+index 13a474d..04b1d20 100644
+--- a/arch/x86/mm/discontig_32.c
++++ b/arch/x86/mm/discontig_32.c
+@@ -32,6 +32,7 @@
+ #include <linux/kexec.h>
+ #include <linux/pfn.h>
+ #include <linux/swap.h>
++#include <linux/acpi.h>
  
- /* Reload the secondary CPUs task structure (this function does not
-  * return ) */
--void __init 
--initialize_secondary(void)
-+void __init initialize_secondary(void)
- {
- #if 0
- 	// AC kernels only
-@@ -745,11 +744,9 @@ initialize_secondary(void)
- 	 * basically just the stack pointer and the eip.
- 	 */
+ #include <asm/e820.h>
+ #include <asm/setup.h>
+@@ -103,14 +104,10 @@ extern unsigned long highend_pfn, highstart_pfn;
  
--	asm volatile(
--		"movl %0,%%esp\n\t"
--		"jmp *%1"
--		:
--		:"r" (current->thread.esp),"r" (current->thread.eip));
-+	asm volatile ("movl %0,%%esp\n\t"
-+		      "jmp *%1"::"r" (current->thread.sp),
-+		      "r"(current->thread.ip));
+ #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
+ 
+-static unsigned long node_remap_start_pfn[MAX_NUMNODES];
+ unsigned long node_remap_size[MAX_NUMNODES];
+-static unsigned long node_remap_offset[MAX_NUMNODES];
+ static void *node_remap_start_vaddr[MAX_NUMNODES];
+ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+ 
+-static void *node_remap_end_vaddr[MAX_NUMNODES];
+-static void *node_remap_alloc_vaddr[MAX_NUMNODES];
+ static unsigned long kva_start_pfn;
+ static unsigned long kva_pages;
+ /*
+@@ -167,6 +164,22 @@ static void __init allocate_pgdat(int nid)
+ 	}
  }
  
- /* handle a Voyager SYS_INT -- If we don't, the base board will
-@@ -758,25 +755,23 @@ initialize_secondary(void)
-  * System interrupts occur because some problem was detected on the
-  * various busses.  To find out what you have to probe all the
-  * hardware via the CAT bus.  FIXME: At the moment we do nothing. */
--fastcall void
--smp_vic_sys_interrupt(struct pt_regs *regs)
-+void smp_vic_sys_interrupt(struct pt_regs *regs)
++#ifdef CONFIG_DISCONTIGMEM
++/*
++ * In the discontig memory model, a portion of the kernel virtual area (KVA)
++ * is reserved and portions of nodes are mapped using it. This is to allow
++ * node-local memory to be allocated for structures that would normally require
++ * ZONE_NORMAL. The memory is allocated with alloc_remap() and callers
++ * should be prepared to allocate from the bootmem allocator instead. This KVA
++ * mechanism is incompatible with SPARSEMEM as it makes assumptions about the
++ * layout of memory that are broken if alloc_remap() succeeds for some of the
++ * map and fails for others
++ */
++static unsigned long node_remap_start_pfn[MAX_NUMNODES];
++static void *node_remap_end_vaddr[MAX_NUMNODES];
++static void *node_remap_alloc_vaddr[MAX_NUMNODES];
++static unsigned long node_remap_offset[MAX_NUMNODES];
++
+ void *alloc_remap(int nid, unsigned long size)
  {
- 	ack_CPI(VIC_SYS_INT);
--	printk("Voyager SYSTEM INTERRUPT\n");	
-+	printk("Voyager SYSTEM INTERRUPT\n");
+ 	void *allocation = node_remap_alloc_vaddr[nid];
+@@ -263,11 +276,46 @@ static unsigned long calculate_numa_remap_pages(void)
+ 	return reserve_pages;
  }
  
- /* Handle a voyager CMN_INT; These interrupts occur either because of
-  * a system status change or because a single bit memory error
-  * occurred.  FIXME: At the moment, ignore all this. */
--fastcall void
--smp_vic_cmn_interrupt(struct pt_regs *regs)
-+void smp_vic_cmn_interrupt(struct pt_regs *regs)
++static void init_remap_allocator(int nid)
++{
++	node_remap_start_vaddr[nid] = pfn_to_kaddr(
++			kva_start_pfn + node_remap_offset[nid]);
++	node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
++		(node_remap_size[nid] * PAGE_SIZE);
++	node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
++		ALIGN(sizeof(pg_data_t), PAGE_SIZE);
++
++	printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
++		(ulong) node_remap_start_vaddr[nid],
++		(ulong) pfn_to_kaddr(highstart_pfn
++		   + node_remap_offset[nid] + node_remap_size[nid]));
++}
++#else
++void *alloc_remap(int nid, unsigned long size)
++{
++	return NULL;
++}
++
++static unsigned long calculate_numa_remap_pages(void)
++{
++	return 0;
++}
++
++static void init_remap_allocator(int nid)
++{
++}
++
++void __init remap_numa_kva(void)
++{
++}
++#endif /* CONFIG_DISCONTIGMEM */
++
+ extern void setup_bootmem_allocator(void);
+ unsigned long __init setup_memory(void)
  {
- 	static __u8 in_cmn_int = 0;
- 	static DEFINE_SPINLOCK(cmn_int_lock);
+ 	int nid;
+ 	unsigned long system_start_pfn, system_max_low_pfn;
++	unsigned long wasted_pages;
  
- 	/* common ints are broadcast, so make sure we only do this once */
- 	_raw_spin_lock(&cmn_int_lock);
--	if(in_cmn_int)
-+	if (in_cmn_int)
- 		goto unlock_end;
+ 	/*
+ 	 * When mapping a NUMA machine we allocate the node_mem_map arrays
+@@ -288,11 +336,18 @@ unsigned long __init setup_memory(void)
  
- 	in_cmn_int++;
-@@ -784,12 +779,12 @@ smp_vic_cmn_interrupt(struct pt_regs *regs)
+ #ifdef CONFIG_BLK_DEV_INITRD
+ 	/* Numa kva area is below the initrd */
+-	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image)
+-		kva_start_pfn = PFN_DOWN(boot_params.hdr.ramdisk_image)
++	if (initrd_start)
++		kva_start_pfn = PFN_DOWN(initrd_start - PAGE_OFFSET)
+ 			- kva_pages;
+ #endif
+-	kva_start_pfn -= kva_start_pfn & (PTRS_PER_PTE-1);
++
++	/*
++	 * We waste pages past at the end of the KVA for no good reason other
++	 * than how it is located. This is bad.
++	 */
++	wasted_pages = kva_start_pfn & (PTRS_PER_PTE-1);
++	kva_start_pfn -= wasted_pages;
++	kva_pages += wasted_pages;
  
- 	VDEBUG(("Voyager COMMON INTERRUPT\n"));
+ 	system_max_low_pfn = max_low_pfn = find_max_low_pfn();
+ 	printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
+@@ -318,19 +373,9 @@ unsigned long __init setup_memory(void)
+ 	printk("Low memory ends at vaddr %08lx\n",
+ 			(ulong) pfn_to_kaddr(max_low_pfn));
+ 	for_each_online_node(nid) {
+-		node_remap_start_vaddr[nid] = pfn_to_kaddr(
+-				kva_start_pfn + node_remap_offset[nid]);
+-		/* Init the node remap allocator */
+-		node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
+-			(node_remap_size[nid] * PAGE_SIZE);
+-		node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
+-			ALIGN(sizeof(pg_data_t), PAGE_SIZE);
++		init_remap_allocator(nid);
  
--	if(voyager_level == 5)
-+	if (voyager_level == 5)
- 		voyager_cat_do_common_interrupt();
+ 		allocate_pgdat(nid);
+-		printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
+-			(ulong) node_remap_start_vaddr[nid],
+-			(ulong) pfn_to_kaddr(highstart_pfn
+-			   + node_remap_offset[nid] + node_remap_size[nid]));
+ 	}
+ 	printk("High memory starts at vaddr %08lx\n",
+ 			(ulong) pfn_to_kaddr(highstart_pfn));
+@@ -345,7 +390,8 @@ unsigned long __init setup_memory(void)
  
- 	_raw_spin_lock(&cmn_int_lock);
- 	in_cmn_int = 0;
-- unlock_end:
-+      unlock_end:
- 	_raw_spin_unlock(&cmn_int_lock);
- 	ack_CPI(VIC_CMN_INT);
- }
-@@ -797,26 +792,23 @@ smp_vic_cmn_interrupt(struct pt_regs *regs)
- /*
-  * Reschedule call back. Nothing to do, all the work is done
-  * automatically when we return from the interrupt.  */
--static void
--smp_reschedule_interrupt(void)
-+static void smp_reschedule_interrupt(void)
+ void __init numa_kva_reserve(void)
  {
- 	/* do nothing */
+-	reserve_bootmem(PFN_PHYS(kva_start_pfn),PFN_PHYS(kva_pages));
++	if (kva_pages)
++		reserve_bootmem(PFN_PHYS(kva_start_pfn), PFN_PHYS(kva_pages));
  }
  
--static struct mm_struct * flush_mm;
-+static struct mm_struct *flush_mm;
- static unsigned long flush_va;
- static DEFINE_SPINLOCK(tlbstate_lock);
--#define FLUSH_ALL	0xffffffff
- 
- /*
-- * We cannot call mmdrop() because we are in interrupt context, 
-+ * We cannot call mmdrop() because we are in interrupt context,
-  * instead update mm->cpu_vm_mask.
-  *
-  * We need to reload %cr3 since the page tables may be going
-  * away from under us..
-  */
--static inline void
--leave_mm (unsigned long cpu)
-+static inline void voyager_leave_mm(unsigned long cpu)
- {
- 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
- 		BUG();
-@@ -824,12 +816,10 @@ leave_mm (unsigned long cpu)
- 	load_cr3(swapper_pg_dir);
- }
+ void __init zone_sizes_init(void)
+@@ -430,3 +476,29 @@ int memory_add_physaddr_to_nid(u64 addr)
  
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+ #endif
++
++#ifndef CONFIG_HAVE_ARCH_PARSE_SRAT
++/*
++ * XXX FIXME: Make SLIT table parsing available to 32-bit NUMA
++ *
++ * These stub functions are needed to compile 32-bit NUMA when SRAT is
++ * not set. There are functions in srat_64.c for parsing this table
++ * and it may be possible to make them common functions.
++ */
++void acpi_numa_slit_init (struct acpi_table_slit *slit)
++{
++	printk(KERN_INFO "ACPI: No support for parsing SLIT table\n");
++}
++
++void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa)
++{
++}
++
++void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma)
++{
++}
++
++void acpi_numa_arch_fixup(void)
++{
++}
++#endif /* CONFIG_HAVE_ARCH_PARSE_SRAT */
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
+new file mode 100644
+index 0000000..7e8db53
+--- /dev/null
++++ b/arch/x86/mm/extable.c
+@@ -0,0 +1,62 @@
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <asm/uaccess.h>
++
++
++int fixup_exception(struct pt_regs *regs)
++{
++	const struct exception_table_entry *fixup;
++
++#ifdef CONFIG_PNPBIOS
++	if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
++		extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
++		extern u32 pnp_bios_is_utter_crap;
++		pnp_bios_is_utter_crap = 1;
++		printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
++		__asm__ volatile(
++			"movl %0, %%esp\n\t"
++			"jmp *%1\n\t"
++			: : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
++		panic("do_trap: can't hit this");
++	}
++#endif
++
++	fixup = search_exception_tables(regs->ip);
++	if (fixup) {
++		regs->ip = fixup->fixup;
++		return 1;
++	}
++
++	return 0;
++}
++
++#ifdef CONFIG_X86_64
++/*
++ * Need to defined our own search_extable on X86_64 to work around
++ * a B stepping K8 bug.
++ */
++const struct exception_table_entry *
++search_extable(const struct exception_table_entry *first,
++	       const struct exception_table_entry *last,
++	       unsigned long value)
++{
++	/* B stepping K8 bug */
++	if ((value >> 32) == 0)
++		value |= 0xffffffffUL << 32;
++
++	while (first <= last) {
++		const struct exception_table_entry *mid;
++		long diff;
++
++		mid = (last - first) / 2 + first;
++		diff = mid->insn - value;
++		if (diff == 0)
++			return mid;
++		else if (diff < 0)
++			first = mid+1;
++		else
++			last = mid-1;
++	}
++	return NULL;
++}
++#endif
+diff --git a/arch/x86/mm/extable_32.c b/arch/x86/mm/extable_32.c
+deleted file mode 100644
+index 0ce4f22..0000000
+--- a/arch/x86/mm/extable_32.c
++++ /dev/null
+@@ -1,35 +0,0 @@
+-/*
+- * linux/arch/i386/mm/extable.c
+- */
+-
+-#include <linux/module.h>
+-#include <linux/spinlock.h>
+-#include <asm/uaccess.h>
+-
+-int fixup_exception(struct pt_regs *regs)
+-{
+-	const struct exception_table_entry *fixup;
+-
+-#ifdef CONFIG_PNPBIOS
+-	if (unlikely(SEGMENT_IS_PNP_CODE(regs->xcs)))
+-	{
+-		extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+-		extern u32 pnp_bios_is_utter_crap;
+-		pnp_bios_is_utter_crap = 1;
+-		printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
+-		__asm__ volatile(
+-			"movl %0, %%esp\n\t"
+-			"jmp *%1\n\t"
+-			: : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
+-		panic("do_trap: can't hit this");
+-	}
+-#endif
+-
+-	fixup = search_exception_tables(regs->eip);
+-	if (fixup) {
+-		regs->eip = fixup->fixup;
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+diff --git a/arch/x86/mm/extable_64.c b/arch/x86/mm/extable_64.c
+deleted file mode 100644
+index 79ac6e7..0000000
+--- a/arch/x86/mm/extable_64.c
++++ /dev/null
+@@ -1,34 +0,0 @@
+-/*
+- * linux/arch/x86_64/mm/extable.c
+- */
+-
+-#include <linux/module.h>
+-#include <linux/spinlock.h>
+-#include <linux/init.h>
+-#include <asm/uaccess.h>
+-
+-/* Simple binary search */
+-const struct exception_table_entry *
+-search_extable(const struct exception_table_entry *first,
+-	       const struct exception_table_entry *last,
+-	       unsigned long value)
+-{
+-	/* Work around a B stepping K8 bug */
+-	if ((value >> 32) == 0)
+-		value |= 0xffffffffUL << 32; 
+-
+-        while (first <= last) {
+-		const struct exception_table_entry *mid;
+-		long diff;
+-
+-		mid = (last - first) / 2 + first;
+-		diff = mid->insn - value;
+-                if (diff == 0)
+-                        return mid;
+-                else if (diff < 0)
+-                        first = mid+1;
+-                else
+-                        last = mid-1;
+-        }
+-        return NULL;
+-}
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+new file mode 100644
+index 0000000..e28cc52
+--- /dev/null
++++ b/arch/x86/mm/fault.c
+@@ -0,0 +1,986 @@
++/*
++ *  Copyright (C) 1995  Linus Torvalds
++ *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h>		/* For unblank_screen() */
++#include <linux/compiler.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>		/* for max_low_pfn */
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++#include <linux/uaccess.h>
++#include <linux/kdebug.h>
++
++#include <asm/system.h>
++#include <asm/desc.h>
++#include <asm/segment.h>
++#include <asm/pgalloc.h>
++#include <asm/smp.h>
++#include <asm/tlbflush.h>
++#include <asm/proto.h>
++#include <asm-generic/sections.h>
++
++/*
++ * Page fault error code bits
++ *	bit 0 == 0 means no page found, 1 means protection fault
++ *	bit 1 == 0 means read, 1 means write
++ *	bit 2 == 0 means kernel, 1 means user-mode
++ *	bit 3 == 1 means use of reserved bit detected
++ *	bit 4 == 1 means fault was an instruction fetch
++ */
++#define PF_PROT		(1<<0)
++#define PF_WRITE	(1<<1)
++#define PF_USER		(1<<2)
++#define PF_RSVD		(1<<3)
++#define PF_INSTR	(1<<4)
++
++static inline int notify_page_fault(struct pt_regs *regs)
++{
++#ifdef CONFIG_KPROBES
++	int ret = 0;
++
++	/* kprobe_running() needs smp_processor_id() */
++#ifdef CONFIG_X86_32
++	if (!user_mode_vm(regs)) {
++#else
++	if (!user_mode(regs)) {
++#endif
++		preempt_disable();
++		if (kprobe_running() && kprobe_fault_handler(regs, 14))
++			ret = 1;
++		preempt_enable();
++	}
++
++	return ret;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * X86_32
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ *
++ * X86_64
++ * Sometimes the CPU reports invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ *
++ * Opcode checker based on code by Richard Brunner
++ */
++static int is_prefetch(struct pt_regs *regs, unsigned long addr,
++		       unsigned long error_code)
++{
++	unsigned char *instr;
++	int scan_more = 1;
++	int prefetch = 0;
++	unsigned char *max_instr;
++
++#ifdef CONFIG_X86_32
++	if (!(__supported_pte_mask & _PAGE_NX))
++		return 0;
++#endif
++
++	/* If it was a exec fault on NX page, ignore */
++	if (error_code & PF_INSTR)
++		return 0;
++
++	instr = (unsigned char *)convert_ip_to_linear(current, regs);
++	max_instr = instr + 15;
++
++	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
++		return 0;
++
++	while (scan_more && instr < max_instr) {
++		unsigned char opcode;
++		unsigned char instr_hi;
++		unsigned char instr_lo;
++
++		if (probe_kernel_address(instr, opcode))
++			break;
++
++		instr_hi = opcode & 0xf0;
++		instr_lo = opcode & 0x0f;
++		instr++;
++
++		switch (instr_hi) {
++		case 0x20:
++		case 0x30:
++			/*
++			 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
++			 * In X86_64 long mode, the CPU will signal invalid
++			 * opcode if some of these prefixes are present so
++			 * X86_64 will never get here anyway
++			 */
++			scan_more = ((instr_lo & 7) == 0x6);
++			break;
++#ifdef CONFIG_X86_64
++		case 0x40:
++			/*
++			 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
++			 * Need to figure out under what instruction mode the
++			 * instruction was issued. Could check the LDT for lm,
++			 * but for now it's good enough to assume that long
++			 * mode only uses well known segments or kernel.
++			 */
++			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
++			break;
++#endif
++		case 0x60:
++			/* 0x64 thru 0x67 are valid prefixes in all modes. */
++			scan_more = (instr_lo & 0xC) == 0x4;
++			break;
++		case 0xF0:
++			/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
++			scan_more = !instr_lo || (instr_lo>>1) == 1;
++			break;
++		case 0x00:
++			/* Prefetch instruction is 0x0F0D or 0x0F18 */
++			scan_more = 0;
++
++			if (probe_kernel_address(instr, opcode))
++				break;
++			prefetch = (instr_lo == 0xF) &&
++				(opcode == 0x0D || opcode == 0x18);
++			break;
++		default:
++			scan_more = 0;
++			break;
++		}
++	}
++	return prefetch;
++}
++
++static void force_sig_info_fault(int si_signo, int si_code,
++	unsigned long address, struct task_struct *tsk)
++{
++	siginfo_t info;
++
++	info.si_signo = si_signo;
++	info.si_errno = 0;
++	info.si_code = si_code;
++	info.si_addr = (void __user *)address;
++	force_sig_info(si_signo, &info, tsk);
++}
++
++#ifdef CONFIG_X86_64
++static int bad_address(void *p)
++{
++	unsigned long dummy;
++	return probe_kernel_address((unsigned long *)p, dummy);
++}
++#endif
++
++void dump_pagetable(unsigned long address)
++{
++#ifdef CONFIG_X86_32
++	__typeof__(pte_val(__pte(0))) page;
++
++	page = read_cr3();
++	page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
++#ifdef CONFIG_X86_PAE
++	printk("*pdpt = %016Lx ", page);
++	if ((page >> PAGE_SHIFT) < max_low_pfn
++	    && page & _PAGE_PRESENT) {
++		page &= PAGE_MASK;
++		page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
++		                                         & (PTRS_PER_PMD - 1)];
++		printk(KERN_CONT "*pde = %016Lx ", page);
++		page &= ~_PAGE_NX;
++	}
++#else
++	printk("*pde = %08lx ", page);
++#endif
++
++	/*
++	 * We must not directly access the pte in the highpte
++	 * case if the page table is located in highmem.
++	 * And let's rather not kmap-atomic the pte, just in case
++	 * it's allocated already.
++	 */
++	if ((page >> PAGE_SHIFT) < max_low_pfn
++	    && (page & _PAGE_PRESENT)
++	    && !(page & _PAGE_PSE)) {
++		page &= PAGE_MASK;
++		page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
++		                                         & (PTRS_PER_PTE - 1)];
++		printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
++	}
++
++	printk("\n");
++#else /* CONFIG_X86_64 */
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	pgd = (pgd_t *)read_cr3();
++
++	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
++	pgd += pgd_index(address);
++	if (bad_address(pgd)) goto bad;
++	printk("PGD %lx ", pgd_val(*pgd));
++	if (!pgd_present(*pgd)) goto ret;
++
++	pud = pud_offset(pgd, address);
++	if (bad_address(pud)) goto bad;
++	printk("PUD %lx ", pud_val(*pud));
++	if (!pud_present(*pud))	goto ret;
++
++	pmd = pmd_offset(pud, address);
++	if (bad_address(pmd)) goto bad;
++	printk("PMD %lx ", pmd_val(*pmd));
++	if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
++
++	pte = pte_offset_kernel(pmd, address);
++	if (bad_address(pte)) goto bad;
++	printk("PTE %lx", pte_val(*pte));
++ret:
++	printk("\n");
++	return;
++bad:
++	printk("BAD\n");
++#endif
++}
++
++#ifdef CONFIG_X86_32
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++{
++	unsigned index = pgd_index(address);
++	pgd_t *pgd_k;
++	pud_t *pud, *pud_k;
++	pmd_t *pmd, *pmd_k;
++
++	pgd += index;
++	pgd_k = init_mm.pgd + index;
++
++	if (!pgd_present(*pgd_k))
++		return NULL;
++
++	/*
++	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
++	 * and redundant with the set_pmd() on non-PAE. As would
++	 * set_pud.
++	 */
++
++	pud = pud_offset(pgd, address);
++	pud_k = pud_offset(pgd_k, address);
++	if (!pud_present(*pud_k))
++		return NULL;
++
++	pmd = pmd_offset(pud, address);
++	pmd_k = pmd_offset(pud_k, address);
++	if (!pmd_present(*pmd_k))
++		return NULL;
++	if (!pmd_present(*pmd)) {
++		set_pmd(pmd, *pmd_k);
++		arch_flush_lazy_mmu_mode();
++	} else
++		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++	return pmd_k;
++}
++#endif
++
++#ifdef CONFIG_X86_64
++static const char errata93_warning[] =
++KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
++KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
++KERN_ERR "******* Please consider a BIOS update.\n"
++KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
++#endif
++
++/* Workaround for K8 erratum #93 & buggy BIOS.
++   BIOS SMM functions are required to use a specific workaround
++   to avoid corruption of the 64bit RIP register on C stepping K8.
++   A lot of BIOS that didn't get tested properly miss this.
++   The OS sees this as a page fault with the upper 32bits of RIP cleared.
++   Try to work around it here.
++   Note we only handle faults in kernel here.
++   Does nothing for X86_32
++ */
++static int is_errata93(struct pt_regs *regs, unsigned long address)
++{
++#ifdef CONFIG_X86_64
++	static int warned;
++	if (address != regs->ip)
++		return 0;
++	if ((address >> 32) != 0)
++		return 0;
++	address |= 0xffffffffUL << 32;
++	if ((address >= (u64)_stext && address <= (u64)_etext) ||
++	    (address >= MODULES_VADDR && address <= MODULES_END)) {
++		if (!warned) {
++			printk(errata93_warning);
++			warned = 1;
++		}
++		regs->ip = address;
++		return 1;
++	}
++#endif
++	return 0;
++}
++
++/*
++ * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
++ * addresses >4GB.  We catch this in the page fault handler because these
++ * addresses are not reachable. Just detect this case and return.  Any code
++ * segment in LDT is compatibility mode.
++ */
++static int is_errata100(struct pt_regs *regs, unsigned long address)
++{
++#ifdef CONFIG_X86_64
++	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
++	    (address >> 32))
++		return 1;
++#endif
++	return 0;
++}
++
++void do_invalid_op(struct pt_regs *, unsigned long);
++
++static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
++{
++#ifdef CONFIG_X86_F00F_BUG
++	unsigned long nr;
++	/*
++	 * Pentium F0 0F C7 C8 bug workaround.
++	 */
++	if (boot_cpu_data.f00f_bug) {
++		nr = (address - idt_descr.address) >> 3;
++
++		if (nr == 6) {
++			do_invalid_op(regs, 0);
++			return 1;
++		}
++	}
++#endif
++	return 0;
++}
++
++static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
++			    unsigned long address)
++{
++#ifdef CONFIG_X86_32
++	if (!oops_may_print())
++		return;
++#endif
++
++#ifdef CONFIG_X86_PAE
++	if (error_code & PF_INSTR) {
++		int level;
++		pte_t *pte = lookup_address(address, &level);
++
++		if (pte && pte_present(*pte) && !pte_exec(*pte))
++			printk(KERN_CRIT "kernel tried to execute "
++				"NX-protected page - exploit attempt? "
++				"(uid: %d)\n", current->uid);
++	}
++#endif
++
++	printk(KERN_ALERT "BUG: unable to handle kernel ");
++	if (address < PAGE_SIZE)
++		printk(KERN_CONT "NULL pointer dereference");
++	else
++		printk(KERN_CONT "paging request");
++#ifdef CONFIG_X86_32
++	printk(KERN_CONT " at %08lx\n", address);
++#else
++	printk(KERN_CONT " at %016lx\n", address);
++#endif
++	printk(KERN_ALERT "IP:");
++	printk_address(regs->ip, 1);
++	dump_pagetable(address);
++}
++
++#ifdef CONFIG_X86_64
++static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
++				 unsigned long error_code)
++{
++	unsigned long flags = oops_begin();
++	struct task_struct *tsk;
++
++	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
++	       current->comm, address);
++	dump_pagetable(address);
++	tsk = current;
++	tsk->thread.cr2 = address;
++	tsk->thread.trap_no = 14;
++	tsk->thread.error_code = error_code;
++	if (__die("Bad pagetable", regs, error_code))
++		regs = NULL;
++	oops_end(flags, regs, SIGKILL);
++}
++#endif
++
++/*
++ * Handle a spurious fault caused by a stale TLB entry.  This allows
++ * us to lazily refresh the TLB when increasing the permissions of a
++ * kernel page (RO -> RW or NX -> X).  Doing it eagerly is very
++ * expensive since that implies doing a full cross-processor TLB
++ * flush, even if no stale TLB entries exist on other processors.
++ * There are no security implications to leaving a stale TLB when
++ * increasing the permissions on a page.
++ */
++static int spurious_fault(unsigned long address,
++			  unsigned long error_code)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	/* Reserved-bit violation or user access to kernel space? */
++	if (error_code & (PF_USER | PF_RSVD))
++		return 0;
++
++	pgd = init_mm.pgd + pgd_index(address);
++	if (!pgd_present(*pgd))
++		return 0;
++
++	pud = pud_offset(pgd, address);
++	if (!pud_present(*pud))
++		return 0;
++
++	pmd = pmd_offset(pud, address);
++	if (!pmd_present(*pmd))
++		return 0;
++
++	pte = pte_offset_kernel(pmd, address);
++	if (!pte_present(*pte))
++		return 0;
++
++	if ((error_code & PF_WRITE) && !pte_write(*pte))
++		return 0;
++	if ((error_code & PF_INSTR) && !pte_exec(*pte))
++		return 0;
++
++	return 1;
++}
++
++/*
++ * X86_32
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * X86_64
++ * Handle a fault on the vmalloc area
++ *
++ * This assumes no large pages in there.
++ */
++static int vmalloc_fault(unsigned long address)
++{
++#ifdef CONFIG_X86_32
++	unsigned long pgd_paddr;
++	pmd_t *pmd_k;
++	pte_t *pte_k;
++	/*
++	 * Synchronize this task's top level page-table
++	 * with the 'reference' page table.
++	 *
++	 * Do _not_ use "current" here. We might be inside
++	 * an interrupt in the middle of a task switch..
++	 */
++	pgd_paddr = read_cr3();
++	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++	if (!pmd_k)
++		return -1;
++	pte_k = pte_offset_kernel(pmd_k, address);
++	if (!pte_present(*pte_k))
++		return -1;
++	return 0;
++#else
++	pgd_t *pgd, *pgd_ref;
++	pud_t *pud, *pud_ref;
++	pmd_t *pmd, *pmd_ref;
++	pte_t *pte, *pte_ref;
++
++	/* Copy kernel mappings over when needed. This can also
++	   happen within a race in page table update. In the later
++	   case just flush. */
++
++	pgd = pgd_offset(current->mm ?: &init_mm, address);
++	pgd_ref = pgd_offset_k(address);
++	if (pgd_none(*pgd_ref))
++		return -1;
++	if (pgd_none(*pgd))
++		set_pgd(pgd, *pgd_ref);
++	else
++		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++
++	/* Below here mismatches are bugs because these lower tables
++	   are shared */
++
++	pud = pud_offset(pgd, address);
++	pud_ref = pud_offset(pgd_ref, address);
++	if (pud_none(*pud_ref))
++		return -1;
++	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
++		BUG();
++	pmd = pmd_offset(pud, address);
++	pmd_ref = pmd_offset(pud_ref, address);
++	if (pmd_none(*pmd_ref))
++		return -1;
++	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
++		BUG();
++	pte_ref = pte_offset_kernel(pmd_ref, address);
++	if (!pte_present(*pte_ref))
++		return -1;
++	pte = pte_offset_kernel(pmd, address);
++	/* Don't use pte_page here, because the mappings can point
++	   outside mem_map, and the NUMA hash lookup cannot handle
++	   that. */
++	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
++		BUG();
++	return 0;
++#endif
++}
++
++int show_unhandled_signals = 1;
++
++/*
++ * This routine handles page faults.  It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ */
++#ifdef CONFIG_X86_64
++asmlinkage
++#endif
++void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
++{
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	struct vm_area_struct *vma;
++	unsigned long address;
++	int write, si_code;
++	int fault;
++#ifdef CONFIG_X86_64
++	unsigned long flags;
++#endif
++
++	/*
++	 * We can fault from pretty much anywhere, with unknown IRQ state.
++	 */
++	trace_hardirqs_fixup();
++
++	tsk = current;
++	mm = tsk->mm;
++	prefetchw(&mm->mmap_sem);
++
++	/* get the address */
++	address = read_cr2();
++
++	si_code = SEGV_MAPERR;
++
++	if (notify_page_fault(regs))
++		return;
++
++	/*
++	 * We fault-in kernel-space virtual memory on-demand. The
++	 * 'reference' page table is init_mm.pgd.
++	 *
++	 * NOTE! We MUST NOT take any locks for this case. We may
++	 * be in an interrupt or a critical region, and should
++	 * only copy the information from the master page table,
++	 * nothing more.
++	 *
++	 * This verifies that the fault happens in kernel space
++	 * (error_code & 4) == 0, and that the fault was not a
++	 * protection error (error_code & 9) == 0.
++	 */
++#ifdef CONFIG_X86_32
++	if (unlikely(address >= TASK_SIZE)) {
++		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
++		    vmalloc_fault(address) >= 0)
++			return;
++
++		/* Can handle a stale RO->RW TLB */
++		if (spurious_fault(address, error_code))
++			return;
++
++		/*
++		 * Don't take the mm semaphore here. If we fixup a prefetch
++		 * fault we could otherwise deadlock.
++		 */
++		goto bad_area_nosemaphore;
++	}
++
++	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
++	   fault has been handled. */
++	if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
++		local_irq_enable();
++
++	/*
++	 * If we're in an interrupt, have no user context or are running in an
++	 * atomic region then we must not take the fault.
++	 */
++	if (in_atomic() || !mm)
++		goto bad_area_nosemaphore;
++#else /* CONFIG_X86_64 */
++	if (unlikely(address >= TASK_SIZE64)) {
++		/*
++		 * Don't check for the module range here: its PML4
++		 * is always initialized because it's shared with the main
++		 * kernel text. Only vmalloc may need PML4 syncups.
++		 */
++		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
++		      ((address >= VMALLOC_START && address < VMALLOC_END))) {
++			if (vmalloc_fault(address) >= 0)
++				return;
++		}
++
++		/* Can handle a stale RO->RW TLB */
++		if (spurious_fault(address, error_code))
++			return;
++
++		/*
++		 * Don't take the mm semaphore here. If we fixup a prefetch
++		 * fault we could otherwise deadlock.
++		 */
++		goto bad_area_nosemaphore;
++	}
++	if (likely(regs->flags & X86_EFLAGS_IF))
++		local_irq_enable();
++
++	if (unlikely(error_code & PF_RSVD))
++		pgtable_bad(address, regs, error_code);
++
++	/*
++	 * If we're in an interrupt, have no user context or are running in an
++	 * atomic region then we must not take the fault.
++	 */
++	if (unlikely(in_atomic() || !mm))
++		goto bad_area_nosemaphore;
++
++	/*
++	 * User-mode registers count as a user access even for any
++	 * potential system fault or CPU buglet.
++	 */
++	if (user_mode_vm(regs))
++		error_code |= PF_USER;
++again:
++#endif
++	/* When running in the kernel we expect faults to occur only to
++	 * addresses in user space.  All other faults represent errors in the
++	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
++	 * erroneous fault occurring in a code path which already holds mmap_sem
++	 * we will deadlock attempting to validate the fault against the
++	 * address space.  Luckily the kernel only validly references user
++	 * space from well defined areas of code, which are listed in the
++	 * exceptions table.
++	 *
++	 * As the vast majority of faults will be valid we will only perform
++	 * the source reference check when there is a possibility of a deadlock.
++	 * Attempt to lock the address space, if we cannot we then validate the
++	 * source.  If this is invalid we can skip the address space check,
++	 * thus avoiding the deadlock.
++	 */
++	if (!down_read_trylock(&mm->mmap_sem)) {
++		if ((error_code & PF_USER) == 0 &&
++		    !search_exception_tables(regs->ip))
++			goto bad_area_nosemaphore;
++		down_read(&mm->mmap_sem);
++	}
++
++	vma = find_vma(mm, address);
++	if (!vma)
++		goto bad_area;
++	if (vma->vm_start <= address)
++		goto good_area;
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		goto bad_area;
++	if (error_code & PF_USER) {
++		/*
++		 * Accessing the stack below %sp is always a bug.
++		 * The large cushion allows instructions like enter
++		 * and pusha to work.  ("enter $65535,$31" pushes
++		 * 32 pointers and then decrements %sp by 65535.)
++		 */
++		if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
++			goto bad_area;
++	}
++	if (expand_stack(vma, address))
++		goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++	si_code = SEGV_ACCERR;
++	write = 0;
++	switch (error_code & (PF_PROT|PF_WRITE)) {
++	default:	/* 3: write, present */
++		/* fall through */
++	case PF_WRITE:		/* write, not present */
++		if (!(vma->vm_flags & VM_WRITE))
++			goto bad_area;
++		write++;
++		break;
++	case PF_PROT:		/* read, present */
++		goto bad_area;
++	case 0:			/* read, not present */
++		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
++			goto bad_area;
++	}
++
++#ifdef CONFIG_X86_32
++survive:
++#endif
++	/*
++	 * If for any reason at all we couldn't handle the fault,
++	 * make sure we exit gracefully rather than endlessly redo
++	 * the fault.
++	 */
++	fault = handle_mm_fault(mm, vma, address, write);
++	if (unlikely(fault & VM_FAULT_ERROR)) {
++		if (fault & VM_FAULT_OOM)
++			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGBUS)
++			goto do_sigbus;
++		BUG();
++	}
++	if (fault & VM_FAULT_MAJOR)
++		tsk->maj_flt++;
++	else
++		tsk->min_flt++;
++
++#ifdef CONFIG_X86_32
++	/*
++	 * Did it hit the DOS screen memory VA from vm86 mode?
++	 */
++	if (v8086_mode(regs)) {
++		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++		if (bit < 32)
++			tsk->thread.screen_bitmap |= 1 << bit;
++	}
++#endif
++	up_read(&mm->mmap_sem);
++	return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++	up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++	/* User mode accesses just cause a SIGSEGV */
++	if (error_code & PF_USER) {
++		/*
++		 * It's possible to have interrupts off here.
++		 */
++		local_irq_enable();
++
++		/*
++		 * Valid to do another page fault here because this one came
++		 * from user space.
++		 */
++		if (is_prefetch(regs, address, error_code))
++			return;
++
++		if (is_errata100(regs, address))
++			return;
++
++		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
++		    printk_ratelimit()) {
++			printk(
++#ifdef CONFIG_X86_32
++			"%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
++#else
++			"%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
++#endif
++			task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
++			tsk->comm, task_pid_nr(tsk), address, regs->ip,
++			regs->sp, error_code);
++			print_vma_addr(" in ", regs->ip);
++			printk("\n");
++		}
++
++		tsk->thread.cr2 = address;
++		/* Kernel addresses are always protection faults */
++		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++		tsk->thread.trap_no = 14;
++		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++		return;
++	}
++
++	if (is_f00f_bug(regs, address))
++		return;
++
++no_context:
++	/* Are we prepared to handle this kernel fault?  */
++	if (fixup_exception(regs))
++		return;
++
++	/*
++	 * X86_32
++	 * Valid to do another page fault here, because if this fault
++	 * had been triggered by is_prefetch fixup_exception would have
++	 * handled it.
++	 *
++	 * X86_64
++	 * Hall of shame of CPU/BIOS bugs.
++	 */
++	if (is_prefetch(regs, address, error_code))
++		return;
++
++	if (is_errata93(regs, address))
++		return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++#ifdef CONFIG_X86_32
++	bust_spinlocks(1);
++#else
++	flags = oops_begin();
++#endif
++
++	show_fault_oops(regs, error_code, address);
++
++	tsk->thread.cr2 = address;
++	tsk->thread.trap_no = 14;
++	tsk->thread.error_code = error_code;
++
++#ifdef CONFIG_X86_32
++	die("Oops", regs, error_code);
++	bust_spinlocks(0);
++	do_exit(SIGKILL);
++#else
++	if (__die("Oops", regs, error_code))
++		regs = NULL;
++	/* Executive summary in case the body of the oops scrolled away */
++	printk(KERN_EMERG "CR2: %016lx\n", address);
++	oops_end(flags, regs, SIGKILL);
++#endif
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++	up_read(&mm->mmap_sem);
++	if (is_global_init(tsk)) {
++		yield();
++#ifdef CONFIG_X86_32
++		down_read(&mm->mmap_sem);
++		goto survive;
++#else
++		goto again;
++#endif
++	}
++
++	printk("VM: killing process %s\n", tsk->comm);
++	if (error_code & PF_USER)
++		do_group_exit(SIGKILL);
++	goto no_context;
++
++do_sigbus:
++	up_read(&mm->mmap_sem);
++
++	/* Kernel mode? Handle exceptions or die */
++	if (!(error_code & PF_USER))
++		goto no_context;
++#ifdef CONFIG_X86_32
++	/* User space => ok to do another page fault */
++	if (is_prefetch(regs, address, error_code))
++		return;
++#endif
++	tsk->thread.cr2 = address;
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 14;
++	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++}
++
++DEFINE_SPINLOCK(pgd_lock);
++LIST_HEAD(pgd_list);
++
++void vmalloc_sync_all(void)
++{
++#ifdef CONFIG_X86_32
++	/*
++	 * Note that races in the updates of insync and start aren't
++	 * problematic: insync can only get set bits added, and updates to
++	 * start are only improving performance (without affecting correctness
++	 * if undone).
++	 */
++	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++	static unsigned long start = TASK_SIZE;
++	unsigned long address;
++
++	if (SHARED_KERNEL_PMD)
++		return;
++
++	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++	for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
++		if (!test_bit(pgd_index(address), insync)) {
++			unsigned long flags;
++			struct page *page;
++
++			spin_lock_irqsave(&pgd_lock, flags);
++			list_for_each_entry(page, &pgd_list, lru) {
++				if (!vmalloc_sync_one(page_address(page),
++						      address))
++					break;
++			}
++			spin_unlock_irqrestore(&pgd_lock, flags);
++			if (!page)
++				set_bit(pgd_index(address), insync);
++		}
++		if (address == start && test_bit(pgd_index(address), insync))
++			start = address + PGDIR_SIZE;
++	}
++#else /* CONFIG_X86_64 */
++	/*
++	 * Note that races in the updates of insync and start aren't
++	 * problematic: insync can only get set bits added, and updates to
++	 * start are only improving performance (without affecting correctness
++	 * if undone).
++	 */
++	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++	static unsigned long start = VMALLOC_START & PGDIR_MASK;
++	unsigned long address;
++
++	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
++		if (!test_bit(pgd_index(address), insync)) {
++			const pgd_t *pgd_ref = pgd_offset_k(address);
++			struct page *page;
++
++			if (pgd_none(*pgd_ref))
++				continue;
++			spin_lock(&pgd_lock);
++			list_for_each_entry(page, &pgd_list, lru) {
++				pgd_t *pgd;
++				pgd = (pgd_t *)page_address(page) + pgd_index(address);
++				if (pgd_none(*pgd))
++					set_pgd(pgd, *pgd_ref);
++				else
++					BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++			}
++			spin_unlock(&pgd_lock);
++			set_bit(pgd_index(address), insync);
++		}
++		if (address == start)
++			start = address + PGDIR_SIZE;
++	}
++	/* Check that there is no need to do the same for the modules area. */
++	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
++	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
++				(__START_KERNEL & PGDIR_MASK)));
++#endif
++}
+diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
+deleted file mode 100644
+index a2273d4..0000000
+--- a/arch/x86/mm/fault_32.c
++++ /dev/null
+@@ -1,659 +0,0 @@
+-/*
+- *  linux/arch/i386/mm/fault.c
+- *
+- *  Copyright (C) 1995  Linus Torvalds
+- */
+-
+-#include <linux/signal.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/errno.h>
+-#include <linux/string.h>
+-#include <linux/types.h>
+-#include <linux/ptrace.h>
+-#include <linux/mman.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/interrupt.h>
+-#include <linux/init.h>
+-#include <linux/tty.h>
+-#include <linux/vt_kern.h>		/* For unblank_screen() */
+-#include <linux/highmem.h>
+-#include <linux/bootmem.h>		/* for max_low_pfn */
+-#include <linux/vmalloc.h>
+-#include <linux/module.h>
+-#include <linux/kprobes.h>
+-#include <linux/uaccess.h>
+-#include <linux/kdebug.h>
+-#include <linux/kprobes.h>
+-
+-#include <asm/system.h>
+-#include <asm/desc.h>
+-#include <asm/segment.h>
+-
+-extern void die(const char *,struct pt_regs *,long);
+-
+-#ifdef CONFIG_KPROBES
+-static inline int notify_page_fault(struct pt_regs *regs)
+-{
+-	int ret = 0;
+-
+-	/* kprobe_running() needs smp_processor_id() */
+-	if (!user_mode_vm(regs)) {
+-		preempt_disable();
+-		if (kprobe_running() && kprobe_fault_handler(regs, 14))
+-			ret = 1;
+-		preempt_enable();
+-	}
+-
+-	return ret;
+-}
+-#else
+-static inline int notify_page_fault(struct pt_regs *regs)
+-{
+-	return 0;
+-}
+-#endif
+-
+-/*
+- * Return EIP plus the CS segment base.  The segment limit is also
+- * adjusted, clamped to the kernel/user address space (whichever is
+- * appropriate), and returned in *eip_limit.
+- *
+- * The segment is checked, because it might have been changed by another
+- * task between the original faulting instruction and here.
+- *
+- * If CS is no longer a valid code segment, or if EIP is beyond the
+- * limit, or if it is a kernel address when CS is not a kernel segment,
+- * then the returned value will be greater than *eip_limit.
+- * 
+- * This is slow, but is very rarely executed.
+- */
+-static inline unsigned long get_segment_eip(struct pt_regs *regs,
+-					    unsigned long *eip_limit)
+-{
+-	unsigned long eip = regs->eip;
+-	unsigned seg = regs->xcs & 0xffff;
+-	u32 seg_ar, seg_limit, base, *desc;
+-
+-	/* Unlikely, but must come before segment checks. */
+-	if (unlikely(regs->eflags & VM_MASK)) {
+-		base = seg << 4;
+-		*eip_limit = base + 0xffff;
+-		return base + (eip & 0xffff);
+-	}
+-
+-	/* The standard kernel/user address space limit. */
+-	*eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
+-	
+-	/* By far the most common cases. */
+-	if (likely(SEGMENT_IS_FLAT_CODE(seg)))
+-		return eip;
+-
+-	/* Check the segment exists, is within the current LDT/GDT size,
+-	   that kernel/user (ring 0..3) has the appropriate privilege,
+-	   that it's a code segment, and get the limit. */
+-	__asm__ ("larl %3,%0; lsll %3,%1"
+-		 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
+-	if ((~seg_ar & 0x9800) || eip > seg_limit) {
+-		*eip_limit = 0;
+-		return 1;	 /* So that returned eip > *eip_limit. */
+-	}
+-
+-	/* Get the GDT/LDT descriptor base. 
+-	   When you look for races in this code remember that
+-	   LDT and other horrors are only used in user space. */
+-	if (seg & (1<<2)) {
+-		/* Must lock the LDT while reading it. */
+-		mutex_lock(&current->mm->context.lock);
+-		desc = current->mm->context.ldt;
+-		desc = (void *)desc + (seg & ~7);
+-	} else {
+-		/* Must disable preemption while reading the GDT. */
+- 		desc = (u32 *)get_cpu_gdt_table(get_cpu());
+-		desc = (void *)desc + (seg & ~7);
+-	}
+-
+-	/* Decode the code segment base from the descriptor */
+-	base = get_desc_base((unsigned long *)desc);
+-
+-	if (seg & (1<<2)) { 
+-		mutex_unlock(&current->mm->context.lock);
+-	} else
+-		put_cpu();
+-
+-	/* Adjust EIP and segment limit, and clamp at the kernel limit.
+-	   It's legitimate for segments to wrap at 0xffffffff. */
+-	seg_limit += base;
+-	if (seg_limit < *eip_limit && seg_limit >= base)
+-		*eip_limit = seg_limit;
+-	return eip + base;
+-}
+-
+-/* 
+- * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+- * Check that here and ignore it.
+- */
+-static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
+-{ 
+-	unsigned long limit;
+-	unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
+-	int scan_more = 1;
+-	int prefetch = 0; 
+-	int i;
+-
+-	for (i = 0; scan_more && i < 15; i++) { 
+-		unsigned char opcode;
+-		unsigned char instr_hi;
+-		unsigned char instr_lo;
+-
+-		if (instr > (unsigned char *)limit)
+-			break;
+-		if (probe_kernel_address(instr, opcode))
+-			break; 
+-
+-		instr_hi = opcode & 0xf0; 
+-		instr_lo = opcode & 0x0f; 
+-		instr++;
+-
+-		switch (instr_hi) { 
+-		case 0x20:
+-		case 0x30:
+-			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
+-			scan_more = ((instr_lo & 7) == 0x6);
+-			break;
+-			
+-		case 0x60:
+-			/* 0x64 thru 0x67 are valid prefixes in all modes. */
+-			scan_more = (instr_lo & 0xC) == 0x4;
+-			break;		
+-		case 0xF0:
+-			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
+-			scan_more = !instr_lo || (instr_lo>>1) == 1;
+-			break;			
+-		case 0x00:
+-			/* Prefetch instruction is 0x0F0D or 0x0F18 */
+-			scan_more = 0;
+-			if (instr > (unsigned char *)limit)
+-				break;
+-			if (probe_kernel_address(instr, opcode))
+-				break;
+-			prefetch = (instr_lo == 0xF) &&
+-				(opcode == 0x0D || opcode == 0x18);
+-			break;			
+-		default:
+-			scan_more = 0;
+-			break;
+-		} 
+-	}
+-	return prefetch;
+-}
+-
+-static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+-			      unsigned long error_code)
+-{
+-	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+-		     boot_cpu_data.x86 >= 6)) {
+-		/* Catch an obscure case of prefetch inside an NX page. */
+-		if (nx_enabled && (error_code & 16))
+-			return 0;
+-		return __is_prefetch(regs, addr);
+-	}
+-	return 0;
+-} 
+-
+-static noinline void force_sig_info_fault(int si_signo, int si_code,
+-	unsigned long address, struct task_struct *tsk)
+-{
+-	siginfo_t info;
+-
+-	info.si_signo = si_signo;
+-	info.si_errno = 0;
+-	info.si_code = si_code;
+-	info.si_addr = (void __user *)address;
+-	force_sig_info(si_signo, &info, tsk);
+-}
+-
+-fastcall void do_invalid_op(struct pt_regs *, unsigned long);
+-
+-static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
+-{
+-	unsigned index = pgd_index(address);
+-	pgd_t *pgd_k;
+-	pud_t *pud, *pud_k;
+-	pmd_t *pmd, *pmd_k;
+-
+-	pgd += index;
+-	pgd_k = init_mm.pgd + index;
+-
+-	if (!pgd_present(*pgd_k))
+-		return NULL;
+-
+-	/*
+-	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
+-	 * and redundant with the set_pmd() on non-PAE. As would
+-	 * set_pud.
+-	 */
+-
+-	pud = pud_offset(pgd, address);
+-	pud_k = pud_offset(pgd_k, address);
+-	if (!pud_present(*pud_k))
+-		return NULL;
+-
+-	pmd = pmd_offset(pud, address);
+-	pmd_k = pmd_offset(pud_k, address);
+-	if (!pmd_present(*pmd_k))
+-		return NULL;
+-	if (!pmd_present(*pmd)) {
+-		set_pmd(pmd, *pmd_k);
+-		arch_flush_lazy_mmu_mode();
+-	} else
+-		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+-	return pmd_k;
+-}
+-
+-/*
+- * Handle a fault on the vmalloc or module mapping area
+- *
+- * This assumes no large pages in there.
+- */
+-static inline int vmalloc_fault(unsigned long address)
+-{
+-	unsigned long pgd_paddr;
+-	pmd_t *pmd_k;
+-	pte_t *pte_k;
+-	/*
+-	 * Synchronize this task's top level page-table
+-	 * with the 'reference' page table.
+-	 *
+-	 * Do _not_ use "current" here. We might be inside
+-	 * an interrupt in the middle of a task switch..
+-	 */
+-	pgd_paddr = read_cr3();
+-	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+-	if (!pmd_k)
+-		return -1;
+-	pte_k = pte_offset_kernel(pmd_k, address);
+-	if (!pte_present(*pte_k))
+-		return -1;
+-	return 0;
+-}
+-
+-int show_unhandled_signals = 1;
+-
+-/*
+- * This routine handles page faults.  It determines the address,
+- * and the problem, and then passes it off to one of the appropriate
+- * routines.
+- *
+- * error_code:
+- *	bit 0 == 0 means no page found, 1 means protection fault
+- *	bit 1 == 0 means read, 1 means write
+- *	bit 2 == 0 means kernel, 1 means user-mode
+- *	bit 3 == 1 means use of reserved bit detected
+- *	bit 4 == 1 means fault was an instruction fetch
+- */
+-fastcall void __kprobes do_page_fault(struct pt_regs *regs,
+-				      unsigned long error_code)
+-{
+-	struct task_struct *tsk;
+-	struct mm_struct *mm;
+-	struct vm_area_struct * vma;
+-	unsigned long address;
+-	int write, si_code;
+-	int fault;
+-
+-	/*
+-	 * We can fault from pretty much anywhere, with unknown IRQ state.
+-	 */
+-	trace_hardirqs_fixup();
+-
+-	/* get the address */
+-        address = read_cr2();
+-
+-	tsk = current;
+-
+-	si_code = SEGV_MAPERR;
+-
+-	/*
+-	 * We fault-in kernel-space virtual memory on-demand. The
+-	 * 'reference' page table is init_mm.pgd.
+-	 *
+-	 * NOTE! We MUST NOT take any locks for this case. We may
+-	 * be in an interrupt or a critical region, and should
+-	 * only copy the information from the master page table,
+-	 * nothing more.
+-	 *
+-	 * This verifies that the fault happens in kernel space
+-	 * (error_code & 4) == 0, and that the fault was not a
+-	 * protection error (error_code & 9) == 0.
+-	 */
+-	if (unlikely(address >= TASK_SIZE)) {
+-		if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
+-			return;
+-		if (notify_page_fault(regs))
+-			return;
+-		/*
+-		 * Don't take the mm semaphore here. If we fixup a prefetch
+-		 * fault we could otherwise deadlock.
+-		 */
+-		goto bad_area_nosemaphore;
+-	}
+-
+-	if (notify_page_fault(regs))
+-		return;
+-
+-	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
+-	   fault has been handled. */
+-	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+-		local_irq_enable();
+-
+-	mm = tsk->mm;
+-
+-	/*
+-	 * If we're in an interrupt, have no user context or are running in an
+-	 * atomic region then we must not take the fault..
+-	 */
+-	if (in_atomic() || !mm)
+-		goto bad_area_nosemaphore;
+-
+-	/* When running in the kernel we expect faults to occur only to
+-	 * addresses in user space.  All other faults represent errors in the
+-	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
+-	 * erroneous fault occurring in a code path which already holds mmap_sem
+-	 * we will deadlock attempting to validate the fault against the
+-	 * address space.  Luckily the kernel only validly references user
+-	 * space from well defined areas of code, which are listed in the
+-	 * exceptions table.
+-	 *
+-	 * As the vast majority of faults will be valid we will only perform
+-	 * the source reference check when there is a possibility of a deadlock.
+-	 * Attempt to lock the address space, if we cannot we then validate the
+-	 * source.  If this is invalid we can skip the address space check,
+-	 * thus avoiding the deadlock.
+-	 */
+-	if (!down_read_trylock(&mm->mmap_sem)) {
+-		if ((error_code & 4) == 0 &&
+-		    !search_exception_tables(regs->eip))
+-			goto bad_area_nosemaphore;
+-		down_read(&mm->mmap_sem);
+-	}
+-
+-	vma = find_vma(mm, address);
+-	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (error_code & 4) {
+-		/*
+-		 * Accessing the stack below %esp is always a bug.
+-		 * The large cushion allows instructions like enter
+-		 * and pusha to work.  ("enter $65535,$31" pushes
+-		 * 32 pointers and then decrements %esp by 65535.)
+-		 */
+-		if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
+-			goto bad_area;
+-	}
+-	if (expand_stack(vma, address))
+-		goto bad_area;
+-/*
+- * Ok, we have a good vm_area for this memory access, so
+- * we can handle it..
+- */
+-good_area:
+-	si_code = SEGV_ACCERR;
+-	write = 0;
+-	switch (error_code & 3) {
+-		default:	/* 3: write, present */
+-				/* fall through */
+-		case 2:		/* write, not present */
+-			if (!(vma->vm_flags & VM_WRITE))
+-				goto bad_area;
+-			write++;
+-			break;
+-		case 1:		/* read, present */
+-			goto bad_area;
+-		case 0:		/* read, not present */
+-			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+-				goto bad_area;
+-	}
+-
+- survive:
+-	/*
+-	 * If for any reason at all we couldn't handle the fault,
+-	 * make sure we exit gracefully rather than endlessly redo
+-	 * the fault.
+-	 */
+-	fault = handle_mm_fault(mm, vma, address, write);
+-	if (unlikely(fault & VM_FAULT_ERROR)) {
+-		if (fault & VM_FAULT_OOM)
+-			goto out_of_memory;
+-		else if (fault & VM_FAULT_SIGBUS)
+-			goto do_sigbus;
+-		BUG();
+-	}
+-	if (fault & VM_FAULT_MAJOR)
+-		tsk->maj_flt++;
+-	else
+-		tsk->min_flt++;
+-
+-	/*
+-	 * Did it hit the DOS screen memory VA from vm86 mode?
+-	 */
+-	if (regs->eflags & VM_MASK) {
+-		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
+-		if (bit < 32)
+-			tsk->thread.screen_bitmap |= 1 << bit;
+-	}
+-	up_read(&mm->mmap_sem);
+-	return;
+-
+-/*
+- * Something tried to access memory that isn't in our memory map..
+- * Fix it, but check if it's kernel or user first..
+- */
+-bad_area:
+-	up_read(&mm->mmap_sem);
+-
+-bad_area_nosemaphore:
+-	/* User mode accesses just cause a SIGSEGV */
+-	if (error_code & 4) {
+-		/*
+-		 * It's possible to have interrupts off here.
+-		 */
+-		local_irq_enable();
+-
+-		/* 
+-		 * Valid to do another page fault here because this one came 
+-		 * from user space.
+-		 */
+-		if (is_prefetch(regs, address, error_code))
+-			return;
+-
+-		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+-		    printk_ratelimit()) {
+-			printk("%s%s[%d]: segfault at %08lx eip %08lx "
+-			    "esp %08lx error %lx\n",
+-			    task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+-			    tsk->comm, task_pid_nr(tsk), address, regs->eip,
+-			    regs->esp, error_code);
+-		}
+-		tsk->thread.cr2 = address;
+-		/* Kernel addresses are always protection faults */
+-		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+-		tsk->thread.trap_no = 14;
+-		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
+-		return;
+-	}
+-
+-#ifdef CONFIG_X86_F00F_BUG
+-	/*
+-	 * Pentium F0 0F C7 C8 bug workaround.
+-	 */
+-	if (boot_cpu_data.f00f_bug) {
+-		unsigned long nr;
+-		
+-		nr = (address - idt_descr.address) >> 3;
+-
+-		if (nr == 6) {
+-			do_invalid_op(regs, 0);
+-			return;
+-		}
+-	}
+-#endif
+-
+-no_context:
+-	/* Are we prepared to handle this kernel fault?  */
+-	if (fixup_exception(regs))
+-		return;
+-
+-	/* 
+-	 * Valid to do another page fault here, because if this fault
+-	 * had been triggered by is_prefetch fixup_exception would have 
+-	 * handled it.
+-	 */
+- 	if (is_prefetch(regs, address, error_code))
+- 		return;
+-
+-/*
+- * Oops. The kernel tried to access some bad page. We'll have to
+- * terminate things with extreme prejudice.
+- */
+-
+-	bust_spinlocks(1);
+-
+-	if (oops_may_print()) {
+-		__typeof__(pte_val(__pte(0))) page;
+-
+-#ifdef CONFIG_X86_PAE
+-		if (error_code & 16) {
+-			pte_t *pte = lookup_address(address);
+-
+-			if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
+-				printk(KERN_CRIT "kernel tried to execute "
+-					"NX-protected page - exploit attempt? "
+-					"(uid: %d)\n", current->uid);
+-		}
+-#endif
+-		if (address < PAGE_SIZE)
+-			printk(KERN_ALERT "BUG: unable to handle kernel NULL "
+-					"pointer dereference");
+-		else
+-			printk(KERN_ALERT "BUG: unable to handle kernel paging"
+-					" request");
+-		printk(" at virtual address %08lx\n",address);
+-		printk(KERN_ALERT "printing eip: %08lx ", regs->eip);
+-
+-		page = read_cr3();
+-		page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
+-#ifdef CONFIG_X86_PAE
+-		printk("*pdpt = %016Lx ", page);
+-		if ((page >> PAGE_SHIFT) < max_low_pfn
+-		    && page & _PAGE_PRESENT) {
+-			page &= PAGE_MASK;
+-			page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
+-			                                         & (PTRS_PER_PMD - 1)];
+-			printk(KERN_CONT "*pde = %016Lx ", page);
+-			page &= ~_PAGE_NX;
+-		}
+-#else
+-		printk("*pde = %08lx ", page);
+-#endif
+-
+-		/*
+-		 * We must not directly access the pte in the highpte
+-		 * case if the page table is located in highmem.
+-		 * And let's rather not kmap-atomic the pte, just in case
+-		 * it's allocated already.
+-		 */
+-		if ((page >> PAGE_SHIFT) < max_low_pfn
+-		    && (page & _PAGE_PRESENT)
+-		    && !(page & _PAGE_PSE)) {
+-			page &= PAGE_MASK;
+-			page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
+-			                                         & (PTRS_PER_PTE - 1)];
+-			printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
+-		}
+-
+-		printk("\n");
+-	}
+-
+-	tsk->thread.cr2 = address;
+-	tsk->thread.trap_no = 14;
+-	tsk->thread.error_code = error_code;
+-	die("Oops", regs, error_code);
+-	bust_spinlocks(0);
+-	do_exit(SIGKILL);
+-
+-/*
+- * We ran out of memory, or some other thing happened to us that made
+- * us unable to handle the page fault gracefully.
+- */
+-out_of_memory:
+-	up_read(&mm->mmap_sem);
+-	if (is_global_init(tsk)) {
+-		yield();
+-		down_read(&mm->mmap_sem);
+-		goto survive;
+-	}
+-	printk("VM: killing process %s\n", tsk->comm);
+-	if (error_code & 4)
+-		do_group_exit(SIGKILL);
+-	goto no_context;
+-
+-do_sigbus:
+-	up_read(&mm->mmap_sem);
+-
+-	/* Kernel mode? Handle exceptions or die */
+-	if (!(error_code & 4))
+-		goto no_context;
+-
+-	/* User space => ok to do another page fault */
+-	if (is_prefetch(regs, address, error_code))
+-		return;
+-
+-	tsk->thread.cr2 = address;
+-	tsk->thread.error_code = error_code;
+-	tsk->thread.trap_no = 14;
+-	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
+-}
+-
+-void vmalloc_sync_all(void)
+-{
+-	/*
+-	 * Note that races in the updates of insync and start aren't
+-	 * problematic: insync can only get set bits added, and updates to
+-	 * start are only improving performance (without affecting correctness
+-	 * if undone).
+-	 */
+-	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
+-	static unsigned long start = TASK_SIZE;
+-	unsigned long address;
+-
+-	if (SHARED_KERNEL_PMD)
+-		return;
+-
+-	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
+-	for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
+-		if (!test_bit(pgd_index(address), insync)) {
+-			unsigned long flags;
+-			struct page *page;
+-
+-			spin_lock_irqsave(&pgd_lock, flags);
+-			for (page = pgd_list; page; page =
+-					(struct page *)page->index)
+-				if (!vmalloc_sync_one(page_address(page),
+-								address)) {
+-					BUG_ON(page != pgd_list);
+-					break;
+-				}
+-			spin_unlock_irqrestore(&pgd_lock, flags);
+-			if (!page)
+-				set_bit(pgd_index(address), insync);
+-		}
+-		if (address == start && test_bit(pgd_index(address), insync))
+-			start = address + PGDIR_SIZE;
+-	}
+-}
+diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
+deleted file mode 100644
+index 0e26230..0000000
+--- a/arch/x86/mm/fault_64.c
++++ /dev/null
+@@ -1,623 +0,0 @@
+-/*
+- *  linux/arch/x86-64/mm/fault.c
+- *
+- *  Copyright (C) 1995  Linus Torvalds
+- *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
+- */
+-
+-#include <linux/signal.h>
+-#include <linux/sched.h>
+-#include <linux/kernel.h>
+-#include <linux/errno.h>
+-#include <linux/string.h>
+-#include <linux/types.h>
+-#include <linux/ptrace.h>
+-#include <linux/mman.h>
+-#include <linux/mm.h>
+-#include <linux/smp.h>
+-#include <linux/interrupt.h>
+-#include <linux/init.h>
+-#include <linux/tty.h>
+-#include <linux/vt_kern.h>		/* For unblank_screen() */
+-#include <linux/compiler.h>
+-#include <linux/vmalloc.h>
+-#include <linux/module.h>
+-#include <linux/kprobes.h>
+-#include <linux/uaccess.h>
+-#include <linux/kdebug.h>
+-#include <linux/kprobes.h>
+-
+-#include <asm/system.h>
+-#include <asm/pgalloc.h>
+-#include <asm/smp.h>
+-#include <asm/tlbflush.h>
+-#include <asm/proto.h>
+-#include <asm-generic/sections.h>
 -
- /*
-  * Invalidate call-back
-  */
--static void 
--smp_invalidate_interrupt(void)
-+static void smp_invalidate_interrupt(void)
- {
- 	__u8 cpu = smp_processor_id();
- 
-@@ -837,18 +827,18 @@ smp_invalidate_interrupt(void)
- 		return;
- 	/* This will flood messages.  Don't uncomment unless you see
- 	 * Problems with cross cpu invalidation
--	VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
--		smp_processor_id()));
--	*/
-+	 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
-+	 smp_processor_id()));
-+	 */
- 
- 	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
- 		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
--			if (flush_va == FLUSH_ALL)
-+			if (flush_va == TLB_FLUSH_ALL)
- 				local_flush_tlb();
- 			else
- 				__flush_tlb_one(flush_va);
- 		} else
--			leave_mm(cpu);
-+			voyager_leave_mm(cpu);
- 	}
- 	smp_mb__before_clear_bit();
- 	clear_bit(cpu, &smp_invalidate_needed);
-@@ -857,11 +847,10 @@ smp_invalidate_interrupt(void)
- 
- /* All the new flush operations for 2.4 */
- 
+-/* Page fault error code bits */
+-#define PF_PROT	(1<<0)		/* or no page found */
+-#define PF_WRITE	(1<<1)
+-#define PF_USER	(1<<2)
+-#define PF_RSVD	(1<<3)
+-#define PF_INSTR	(1<<4)
 -
- /* This routine is called with a physical cpu mask */
- static void
--voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
--			  unsigned long va)
-+voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm,
-+			 unsigned long va)
- {
- 	int stuck = 50000;
- 
-@@ -875,7 +864,7 @@ voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
- 		BUG();
- 
- 	spin_lock(&tlbstate_lock);
--	
-+
- 	flush_mm = mm;
- 	flush_va = va;
- 	atomic_set_mask(cpumask, &smp_invalidate_needed);
-@@ -887,23 +876,23 @@ voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
- 
- 	while (smp_invalidate_needed) {
- 		mb();
--		if(--stuck == 0) {
--			printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id());
-+		if (--stuck == 0) {
-+			printk("***WARNING*** Stuck doing invalidate CPI "
-+			       "(CPU%d)\n", smp_processor_id());
- 			break;
- 		}
- 	}
- 
- 	/* Uncomment only to debug invalidation problems
--	VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
--	*/
-+	   VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
-+	 */
- 
- 	flush_mm = NULL;
- 	flush_va = 0;
- 	spin_unlock(&tlbstate_lock);
- }
- 
--void
--flush_tlb_current_task(void)
-+void flush_tlb_current_task(void)
- {
- 	struct mm_struct *mm = current->mm;
- 	unsigned long cpu_mask;
-@@ -913,14 +902,12 @@ flush_tlb_current_task(void)
- 	cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
- 	local_flush_tlb();
- 	if (cpu_mask)
--		voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+		voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
- 
- 	preempt_enable();
- }
- 
+-#ifdef CONFIG_KPROBES
+-static inline int notify_page_fault(struct pt_regs *regs)
+-{
+-	int ret = 0;
 -
--void
--flush_tlb_mm (struct mm_struct * mm)
-+void flush_tlb_mm(struct mm_struct *mm)
- {
- 	unsigned long cpu_mask;
- 
-@@ -932,15 +919,15 @@ flush_tlb_mm (struct mm_struct * mm)
- 		if (current->mm)
- 			local_flush_tlb();
- 		else
--			leave_mm(smp_processor_id());
-+			voyager_leave_mm(smp_processor_id());
- 	}
- 	if (cpu_mask)
--		voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+		voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
- 
- 	preempt_enable();
- }
- 
--void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
- {
- 	struct mm_struct *mm = vma->vm_mm;
- 	unsigned long cpu_mask;
-@@ -949,10 +936,10 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
- 
- 	cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
- 	if (current->active_mm == mm) {
--		if(current->mm)
-+		if (current->mm)
- 			__flush_tlb_one(va);
--		 else
--		 	leave_mm(smp_processor_id());
-+		else
-+			voyager_leave_mm(smp_processor_id());
- 	}
- 
- 	if (cpu_mask)
-@@ -960,21 +947,21 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
- 
- 	preempt_enable();
- }
-+
- EXPORT_SYMBOL(flush_tlb_page);
- 
- /* enable the requested IRQs */
--static void
--smp_enable_irq_interrupt(void)
-+static void smp_enable_irq_interrupt(void)
- {
- 	__u8 irq;
- 	__u8 cpu = get_cpu();
- 
- 	VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
--	       vic_irq_enable_mask[cpu]));
-+		vic_irq_enable_mask[cpu]));
- 
- 	spin_lock(&vic_irq_lock);
--	for(irq = 0; irq < 16; irq++) {
--		if(vic_irq_enable_mask[cpu] & (1<<irq))
-+	for (irq = 0; irq < 16; irq++) {
-+		if (vic_irq_enable_mask[cpu] & (1 << irq))
- 			enable_local_vic_irq(irq);
- 	}
- 	vic_irq_enable_mask[cpu] = 0;
-@@ -982,17 +969,16 @@ smp_enable_irq_interrupt(void)
- 
- 	put_cpu_no_resched();
- }
--	
-+
- /*
-  *	CPU halt call-back
-  */
--static void
--smp_stop_cpu_function(void *dummy)
-+static void smp_stop_cpu_function(void *dummy)
- {
- 	VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
- 	cpu_clear(smp_processor_id(), cpu_online_map);
- 	local_irq_disable();
--	for(;;)
-+	for (;;)
- 		halt();
- }
- 
-@@ -1006,14 +992,13 @@ struct call_data_struct {
- 	int wait;
- };
- 
--static struct call_data_struct * call_data;
-+static struct call_data_struct *call_data;
- 
- /* execute a thread on a new CPU.  The function to be called must be
-  * previously set up.  This is used to schedule a function for
-  * execution on all CPUs - set up the function then broadcast a
-  * function_interrupt CPI to come here on each CPU */
--static void
--smp_call_function_interrupt(void)
-+static void smp_call_function_interrupt(void)
- {
- 	void (*func) (void *info) = call_data->func;
- 	void *info = call_data->info;
-@@ -1027,16 +1012,17 @@ smp_call_function_interrupt(void)
- 	 * about to execute the function
- 	 */
- 	mb();
--	if(!test_and_clear_bit(cpu, &call_data->started)) {
-+	if (!test_and_clear_bit(cpu, &call_data->started)) {
- 		/* If the bit wasn't set, this could be a replay */
--		printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu);
-+		printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
-+		       " with no call pending\n", cpu);
- 		return;
- 	}
- 	/*
- 	 * At this point the info structure may be out of scope unless wait==1
- 	 */
- 	irq_enter();
--	(*func)(info);
-+	(*func) (info);
- 	__get_cpu_var(irq_stat).irq_call_count++;
- 	irq_exit();
- 	if (wait) {
-@@ -1046,14 +1032,13 @@ smp_call_function_interrupt(void)
- }
- 
- static int
--voyager_smp_call_function_mask (cpumask_t cpumask,
--				void (*func) (void *info), void *info,
--				int wait)
-+voyager_smp_call_function_mask(cpumask_t cpumask,
-+			       void (*func) (void *info), void *info, int wait)
- {
- 	struct call_data_struct data;
- 	u32 mask = cpus_addr(cpumask)[0];
- 
--	mask &= ~(1<<smp_processor_id());
-+	mask &= ~(1 << smp_processor_id());
- 
- 	if (!mask)
- 		return 0;
-@@ -1093,7 +1078,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
-  * so we use the system clock to interrupt one processor, which in
-  * turn, broadcasts a timer CPI to all the others --- we receive that
-  * CPI here.  We don't use this actually for counting so losing
-- * ticks doesn't matter 
-+ * ticks doesn't matter
-  *
-  * FIXME: For those CPUs which actually have a local APIC, we could
-  * try to use it to trigger this interrupt instead of having to
-@@ -1101,8 +1086,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
-  * no local APIC, so I can't do this
-  *
-  * This function is currently a placeholder and is unused in the code */
--fastcall void 
--smp_apic_timer_interrupt(struct pt_regs *regs)
-+void smp_apic_timer_interrupt(struct pt_regs *regs)
- {
- 	struct pt_regs *old_regs = set_irq_regs(regs);
- 	wrapper_smp_local_timer_interrupt();
-@@ -1110,8 +1094,7 @@ smp_apic_timer_interrupt(struct pt_regs *regs)
- }
- 
- /* All of the QUAD interrupt GATES */
--fastcall void
--smp_qic_timer_interrupt(struct pt_regs *regs)
-+void smp_qic_timer_interrupt(struct pt_regs *regs)
- {
- 	struct pt_regs *old_regs = set_irq_regs(regs);
- 	ack_QIC_CPI(QIC_TIMER_CPI);
-@@ -1119,127 +1102,112 @@ smp_qic_timer_interrupt(struct pt_regs *regs)
- 	set_irq_regs(old_regs);
- }
- 
--fastcall void
--smp_qic_invalidate_interrupt(struct pt_regs *regs)
-+void smp_qic_invalidate_interrupt(struct pt_regs *regs)
- {
- 	ack_QIC_CPI(QIC_INVALIDATE_CPI);
- 	smp_invalidate_interrupt();
- }
- 
--fastcall void
--smp_qic_reschedule_interrupt(struct pt_regs *regs)
-+void smp_qic_reschedule_interrupt(struct pt_regs *regs)
- {
- 	ack_QIC_CPI(QIC_RESCHEDULE_CPI);
- 	smp_reschedule_interrupt();
- }
- 
--fastcall void
--smp_qic_enable_irq_interrupt(struct pt_regs *regs)
-+void smp_qic_enable_irq_interrupt(struct pt_regs *regs)
- {
- 	ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
- 	smp_enable_irq_interrupt();
- }
- 
--fastcall void
--smp_qic_call_function_interrupt(struct pt_regs *regs)
-+void smp_qic_call_function_interrupt(struct pt_regs *regs)
- {
- 	ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
- 	smp_call_function_interrupt();
- }
- 
--fastcall void
--smp_vic_cpi_interrupt(struct pt_regs *regs)
-+void smp_vic_cpi_interrupt(struct pt_regs *regs)
- {
- 	struct pt_regs *old_regs = set_irq_regs(regs);
- 	__u8 cpu = smp_processor_id();
- 
--	if(is_cpu_quad())
-+	if (is_cpu_quad())
- 		ack_QIC_CPI(VIC_CPI_LEVEL0);
- 	else
- 		ack_VIC_CPI(VIC_CPI_LEVEL0);
- 
--	if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
-+	if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
- 		wrapper_smp_local_timer_interrupt();
--	if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
-+	if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
- 		smp_invalidate_interrupt();
--	if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
-+	if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
- 		smp_reschedule_interrupt();
--	if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
-+	if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
- 		smp_enable_irq_interrupt();
--	if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
-+	if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
- 		smp_call_function_interrupt();
- 	set_irq_regs(old_regs);
- }
- 
--static void
--do_flush_tlb_all(void* info)
-+static void do_flush_tlb_all(void *info)
- {
- 	unsigned long cpu = smp_processor_id();
- 
- 	__flush_tlb_all();
- 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
--		leave_mm(cpu);
-+		voyager_leave_mm(cpu);
- }
- 
+-	/* kprobe_running() needs smp_processor_id() */
+-	if (!user_mode(regs)) {
+-		preempt_disable();
+-		if (kprobe_running() && kprobe_fault_handler(regs, 14))
+-			ret = 1;
+-		preempt_enable();
+-	}
 -
- /* flush the TLB of every active CPU in the system */
--void
--flush_tlb_all(void)
-+void flush_tlb_all(void)
- {
- 	on_each_cpu(do_flush_tlb_all, 0, 1, 1);
- }
- 
- /* used to set up the trampoline for other CPUs when the memory manager
-  * is sorted out */
--void __init
--smp_alloc_memory(void)
-+void __init smp_alloc_memory(void)
- {
--	trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE);
--	if(__pa(trampoline_base) >= 0x93000)
-+	trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE);
-+	if (__pa(trampoline_base) >= 0x93000)
- 		BUG();
- }
- 
- /* send a reschedule CPI to one CPU by physical CPU number*/
--static void
--voyager_smp_send_reschedule(int cpu)
-+static void voyager_smp_send_reschedule(int cpu)
- {
- 	send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
- }
- 
+-	return ret;
+-}
+-#else
+-static inline int notify_page_fault(struct pt_regs *regs)
+-{
+-	return 0;
+-}
+-#endif
 -
--int
--hard_smp_processor_id(void)
-+int hard_smp_processor_id(void)
- {
- 	__u8 i;
- 	__u8 cpumask = inb(VIC_PROC_WHO_AM_I);
--	if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
-+	if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
- 		return cpumask & 0x1F;
- 
--	for(i = 0; i < 8; i++) {
--		if(cpumask & (1<<i))
-+	for (i = 0; i < 8; i++) {
-+		if (cpumask & (1 << i))
- 			return i;
- 	}
- 	printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
- 	return 0;
- }
- 
--int
--safe_smp_processor_id(void)
-+int safe_smp_processor_id(void)
- {
- 	return hard_smp_processor_id();
- }
- 
- /* broadcast a halt to all other CPUs */
--static void
--voyager_smp_send_stop(void)
-+static void voyager_smp_send_stop(void)
- {
- 	smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
- }
- 
- /* this function is triggered in time.c when a clock tick fires
-  * we need to re-broadcast the tick to all CPUs */
--void
--smp_vic_timer_interrupt(void)
-+void smp_vic_timer_interrupt(void)
- {
- 	send_CPI_allbutself(VIC_TIMER_CPI);
- 	smp_local_timer_interrupt();
-@@ -1253,8 +1221,7 @@ smp_vic_timer_interrupt(void)
-  * multiplier is 1 and it can be changed by writing the new multiplier
-  * value into /proc/profile.
-  */
--void
--smp_local_timer_interrupt(void)
-+void smp_local_timer_interrupt(void)
- {
- 	int cpu = smp_processor_id();
- 	long weight;
-@@ -1269,18 +1236,18 @@ smp_local_timer_interrupt(void)
- 		 *
- 		 * Interrupts are already masked off at this point.
- 		 */
--		per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu);
-+		per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
- 		if (per_cpu(prof_counter, cpu) !=
--					per_cpu(prof_old_multiplier, cpu)) {
-+		    per_cpu(prof_old_multiplier, cpu)) {
- 			/* FIXME: need to update the vic timer tick here */
- 			per_cpu(prof_old_multiplier, cpu) =
--						per_cpu(prof_counter, cpu);
-+			    per_cpu(prof_counter, cpu);
- 		}
- 
- 		update_process_times(user_mode_vm(get_irq_regs()));
- 	}
- 
--	if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
-+	if (((1 << cpu) & voyager_extended_vic_processors) == 0)
- 		/* only extended VIC processors participate in
- 		 * interrupt distribution */
- 		return;
-@@ -1296,12 +1263,12 @@ smp_local_timer_interrupt(void)
- 	 * we can take more than 100K local irqs per second on a 100 MHz P5.
- 	 */
- 
--	if((++vic_tick[cpu] & 0x7) != 0)
-+	if ((++vic_tick[cpu] & 0x7) != 0)
- 		return;
- 	/* get here every 16 ticks (about every 1/6 of a second) */
- 
- 	/* Change our priority to give someone else a chance at getting
--         * the IRQ. The algorithm goes like this:
-+	 * the IRQ. The algorithm goes like this:
- 	 *
- 	 * In the VIC, the dynamically routed interrupt is always
- 	 * handled by the lowest priority eligible (i.e. receiving
-@@ -1325,18 +1292,18 @@ smp_local_timer_interrupt(void)
- 	 * affinity code since we now try to even up the interrupt
- 	 * counts when an affinity binding is keeping them on a
- 	 * particular CPU*/
--	weight = (vic_intr_count[cpu]*voyager_extended_cpus
-+	weight = (vic_intr_count[cpu] * voyager_extended_cpus
- 		  - vic_intr_total) >> 4;
- 	weight += 4;
--	if(weight > 7)
-+	if (weight > 7)
- 		weight = 7;
--	if(weight < 0)
-+	if (weight < 0)
- 		weight = 0;
--	
--	outb((__u8)weight, VIC_PRIORITY_REGISTER);
-+
-+	outb((__u8) weight, VIC_PRIORITY_REGISTER);
- 
- #ifdef VOYAGER_DEBUG
--	if((vic_tick[cpu] & 0xFFF) == 0) {
-+	if ((vic_tick[cpu] & 0xFFF) == 0) {
- 		/* print this message roughly every 25 secs */
- 		printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
- 		       cpu, vic_tick[cpu], weight);
-@@ -1345,15 +1312,14 @@ smp_local_timer_interrupt(void)
- }
- 
- /* setup the profiling timer */
--int 
--setup_profiling_timer(unsigned int multiplier)
-+int setup_profiling_timer(unsigned int multiplier)
- {
- 	int i;
- 
--	if ( (!multiplier))
-+	if ((!multiplier))
- 		return -EINVAL;
- 
--	/* 
-+	/*
- 	 * Set the new multiplier for each CPU. CPUs don't start using the
- 	 * new values until the next timer interrupt in which they do process
- 	 * accounting.
-@@ -1367,15 +1333,13 @@ setup_profiling_timer(unsigned int multiplier)
- /* This is a bit of a mess, but forced on us by the genirq changes
-  * there's no genirq handler that really does what voyager wants
-  * so hack it up with the simple IRQ handler */
--static void fastcall
--handle_vic_irq(unsigned int irq, struct irq_desc *desc)
-+static void handle_vic_irq(unsigned int irq, struct irq_desc *desc)
- {
- 	before_handle_vic_irq(irq);
- 	handle_simple_irq(irq, desc);
- 	after_handle_vic_irq(irq);
- }
- 
+-/* Sometimes the CPU reports invalid exceptions on prefetch.
+-   Check that here and ignore.
+-   Opcode checker based on code by Richard Brunner */
+-static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+-				unsigned long error_code)
+-{ 
+-	unsigned char *instr;
+-	int scan_more = 1;
+-	int prefetch = 0; 
+-	unsigned char *max_instr;
 -
- /*  The CPIs are handled in the per cpu 8259s, so they must be
-  *  enabled to be received: FIX: enabling the CPIs in the early
-  *  boot sequence interferes with bug checking; enable them later
-@@ -1385,13 +1349,12 @@ handle_vic_irq(unsigned int irq, struct irq_desc *desc)
- #define QIC_SET_GATE(cpi, vector) \
- 	set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
- 
--void __init
--smp_intr_init(void)
-+void __init smp_intr_init(void)
- {
- 	int i;
- 
- 	/* initialize the per cpu irq mask to all disabled */
--	for(i = 0; i < NR_CPUS; i++)
-+	for (i = 0; i < NR_CPUS; i++)
- 		vic_irq_mask[i] = 0xFFFF;
- 
- 	VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
-@@ -1404,42 +1367,40 @@ smp_intr_init(void)
- 	QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
- 	QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
- 	QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
+-	/* If it was a exec fault ignore */
+-	if (error_code & PF_INSTR)
+-		return 0;
 -	
- 
--	/* now put the VIC descriptor into the first 48 IRQs 
-+	/* now put the VIC descriptor into the first 48 IRQs
- 	 *
- 	 * This is for later: first 16 correspond to PC IRQs; next 16
- 	 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
--	for(i = 0; i < 48; i++)
-+	for (i = 0; i < 48; i++)
- 		set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
- }
- 
- /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
-  * processor to receive CPI */
--static void
--send_CPI(__u32 cpuset, __u8 cpi)
-+static void send_CPI(__u32 cpuset, __u8 cpi)
- {
- 	int cpu;
- 	__u32 quad_cpuset = (cpuset & voyager_quad_processors);
- 
--	if(cpi < VIC_START_FAKE_CPI) {
--		/* fake CPI are only used for booting, so send to the 
-+	if (cpi < VIC_START_FAKE_CPI) {
-+		/* fake CPI are only used for booting, so send to the
- 		 * extended quads as well---Quads must be VIC booted */
--		outb((__u8)(cpuset), VIC_CPI_Registers[cpi]);
-+		outb((__u8) (cpuset), VIC_CPI_Registers[cpi]);
- 		return;
- 	}
--	if(quad_cpuset)
-+	if (quad_cpuset)
- 		send_QIC_CPI(quad_cpuset, cpi);
- 	cpuset &= ~quad_cpuset;
- 	cpuset &= 0xff;		/* only first 8 CPUs vaild for VIC CPI */
--	if(cpuset == 0)
-+	if (cpuset == 0)
- 		return;
- 	for_each_online_cpu(cpu) {
--		if(cpuset & (1<<cpu))
-+		if (cpuset & (1 << cpu))
- 			set_bit(cpi, &vic_cpi_mailbox[cpu]);
- 	}
--	if(cpuset)
--		outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
-+	if (cpuset)
-+		outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
- }
- 
- /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
-@@ -1448,20 +1409,19 @@ send_CPI(__u32 cpuset, __u8 cpi)
-  * DON'T make this inline otherwise the cache line read will be
-  * optimised away
-  * */
--static int
--ack_QIC_CPI(__u8 cpi) {
-+static int ack_QIC_CPI(__u8 cpi)
-+{
- 	__u8 cpu = hard_smp_processor_id();
- 
- 	cpi &= 7;
- 
--	outb(1<<cpi, QIC_INTERRUPT_CLEAR1);
-+	outb(1 << cpi, QIC_INTERRUPT_CLEAR1);
- 	return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
- }
- 
--static void
--ack_special_QIC_CPI(__u8 cpi)
-+static void ack_special_QIC_CPI(__u8 cpi)
- {
--	switch(cpi) {
-+	switch (cpi) {
- 	case VIC_CMN_INT:
- 		outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
- 		break;
-@@ -1474,8 +1434,7 @@ ack_special_QIC_CPI(__u8 cpi)
- }
- 
- /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
--static void
--ack_VIC_CPI(__u8 cpi)
-+static void ack_VIC_CPI(__u8 cpi)
- {
- #ifdef VOYAGER_DEBUG
- 	unsigned long flags;
-@@ -1484,17 +1443,17 @@ ack_VIC_CPI(__u8 cpi)
- 
- 	local_irq_save(flags);
- 	isr = vic_read_isr();
--	if((isr & (1<<(cpi &7))) == 0) {
-+	if ((isr & (1 << (cpi & 7))) == 0) {
- 		printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
- 	}
- #endif
- 	/* send specific EOI; the two system interrupts have
- 	 * bit 4 set for a separate vector but behave as the
- 	 * corresponding 3 bit intr */
--	outb_p(0x60|(cpi & 7),0x20);
-+	outb_p(0x60 | (cpi & 7), 0x20);
- 
- #ifdef VOYAGER_DEBUG
--	if((vic_read_isr() & (1<<(cpi &7))) != 0) {
-+	if ((vic_read_isr() & (1 << (cpi & 7))) != 0) {
- 		printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
- 	}
- 	local_irq_restore(flags);
-@@ -1502,12 +1461,11 @@ ack_VIC_CPI(__u8 cpi)
- }
- 
- /* cribbed with thanks from irq.c */
--#define __byte(x,y) 	(((unsigned char *)&(y))[x])
-+#define __byte(x,y)	(((unsigned char *)&(y))[x])
- #define cached_21(cpu)	(__byte(0,vic_irq_mask[cpu]))
- #define cached_A1(cpu)	(__byte(1,vic_irq_mask[cpu]))
- 
--static unsigned int
--startup_vic_irq(unsigned int irq)
-+static unsigned int startup_vic_irq(unsigned int irq)
- {
- 	unmask_vic_irq(irq);
- 
-@@ -1535,13 +1493,12 @@ startup_vic_irq(unsigned int irq)
-  *    broadcast an Interrupt enable CPI which causes all other CPUs to
-  *    adjust their masks accordingly.  */
- 
--static void
--unmask_vic_irq(unsigned int irq)
-+static void unmask_vic_irq(unsigned int irq)
- {
- 	/* linux doesn't to processor-irq affinity, so enable on
- 	 * all CPUs we know about */
- 	int cpu = smp_processor_id(), real_cpu;
--	__u16 mask = (1<<irq);
-+	__u16 mask = (1 << irq);
- 	__u32 processorList = 0;
- 	unsigned long flags;
- 
-@@ -1549,78 +1506,72 @@ unmask_vic_irq(unsigned int irq)
- 		irq, cpu, cpu_irq_affinity[cpu]));
- 	spin_lock_irqsave(&vic_irq_lock, flags);
- 	for_each_online_cpu(real_cpu) {
--		if(!(voyager_extended_vic_processors & (1<<real_cpu)))
-+		if (!(voyager_extended_vic_processors & (1 << real_cpu)))
- 			continue;
--		if(!(cpu_irq_affinity[real_cpu] & mask)) {
-+		if (!(cpu_irq_affinity[real_cpu] & mask)) {
- 			/* irq has no affinity for this CPU, ignore */
- 			continue;
- 		}
--		if(real_cpu == cpu) {
-+		if (real_cpu == cpu) {
- 			enable_local_vic_irq(irq);
--		}
--		else if(vic_irq_mask[real_cpu] & mask) {
-+		} else if (vic_irq_mask[real_cpu] & mask) {
- 			vic_irq_enable_mask[real_cpu] |= mask;
--			processorList |= (1<<real_cpu);
-+			processorList |= (1 << real_cpu);
- 		}
- 	}
- 	spin_unlock_irqrestore(&vic_irq_lock, flags);
--	if(processorList)
-+	if (processorList)
- 		send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
- }
- 
--static void
--mask_vic_irq(unsigned int irq)
-+static void mask_vic_irq(unsigned int irq)
- {
- 	/* lazy disable, do nothing */
- }
- 
--static void
--enable_local_vic_irq(unsigned int irq)
-+static void enable_local_vic_irq(unsigned int irq)
- {
- 	__u8 cpu = smp_processor_id();
- 	__u16 mask = ~(1 << irq);
- 	__u16 old_mask = vic_irq_mask[cpu];
- 
- 	vic_irq_mask[cpu] &= mask;
--	if(vic_irq_mask[cpu] == old_mask)
-+	if (vic_irq_mask[cpu] == old_mask)
- 		return;
- 
- 	VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
- 		irq, cpu));
- 
- 	if (irq & 8) {
--		outb_p(cached_A1(cpu),0xA1);
-+		outb_p(cached_A1(cpu), 0xA1);
- 		(void)inb_p(0xA1);
+-	instr = (unsigned char __user *)convert_rip_to_linear(current, regs);
+-	max_instr = instr + 15;
+-
+-	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
+-		return 0;
+-
+-	while (scan_more && instr < max_instr) { 
+-		unsigned char opcode;
+-		unsigned char instr_hi;
+-		unsigned char instr_lo;
+-
+-		if (probe_kernel_address(instr, opcode))
+-			break; 
+-
+-		instr_hi = opcode & 0xf0; 
+-		instr_lo = opcode & 0x0f; 
+-		instr++;
+-
+-		switch (instr_hi) { 
+-		case 0x20:
+-		case 0x30:
+-			/* Values 0x26,0x2E,0x36,0x3E are valid x86
+-			   prefixes.  In long mode, the CPU will signal
+-			   invalid opcode if some of these prefixes are
+-			   present so we will never get here anyway */
+-			scan_more = ((instr_lo & 7) == 0x6);
+-			break;
+-			
+-		case 0x40:
+-			/* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
+-			   Need to figure out under what instruction mode the
+-			   instruction was issued ... */
+-			/* Could check the LDT for lm, but for now it's good
+-			   enough to assume that long mode only uses well known
+-			   segments or kernel. */
+-			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
+-			break;
+-			
+-		case 0x60:
+-			/* 0x64 thru 0x67 are valid prefixes in all modes. */
+-			scan_more = (instr_lo & 0xC) == 0x4;
+-			break;		
+-		case 0xF0:
+-			/* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
+-			scan_more = !instr_lo || (instr_lo>>1) == 1;
+-			break;			
+-		case 0x00:
+-			/* Prefetch instruction is 0x0F0D or 0x0F18 */
+-			scan_more = 0;
+-			if (probe_kernel_address(instr, opcode))
+-				break;
+-			prefetch = (instr_lo == 0xF) &&
+-				(opcode == 0x0D || opcode == 0x18);
+-			break;			
+-		default:
+-			scan_more = 0;
+-			break;
+-		} 
 -	}
--	else {
--		outb_p(cached_21(cpu),0x21);
-+	} else {
-+		outb_p(cached_21(cpu), 0x21);
- 		(void)inb_p(0x21);
- 	}
- }
- 
--static void
--disable_local_vic_irq(unsigned int irq)
-+static void disable_local_vic_irq(unsigned int irq)
- {
- 	__u8 cpu = smp_processor_id();
- 	__u16 mask = (1 << irq);
- 	__u16 old_mask = vic_irq_mask[cpu];
- 
--	if(irq == 7)
-+	if (irq == 7)
- 		return;
- 
- 	vic_irq_mask[cpu] |= mask;
--	if(old_mask == vic_irq_mask[cpu])
-+	if (old_mask == vic_irq_mask[cpu])
- 		return;
- 
- 	VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
- 		irq, cpu));
- 
- 	if (irq & 8) {
--		outb_p(cached_A1(cpu),0xA1);
-+		outb_p(cached_A1(cpu), 0xA1);
- 		(void)inb_p(0xA1);
+-	return prefetch;
+-}
+-
+-static int bad_address(void *p) 
+-{ 
+-	unsigned long dummy;
+-	return probe_kernel_address((unsigned long *)p, dummy);
+-} 
+-
+-void dump_pagetable(unsigned long address)
+-{
+-	pgd_t *pgd;
+-	pud_t *pud;
+-	pmd_t *pmd;
+-	pte_t *pte;
+-
+-	pgd = (pgd_t *)read_cr3();
+-
+-	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 
+-	pgd += pgd_index(address);
+-	if (bad_address(pgd)) goto bad;
+-	printk("PGD %lx ", pgd_val(*pgd));
+-	if (!pgd_present(*pgd)) goto ret; 
+-
+-	pud = pud_offset(pgd, address);
+-	if (bad_address(pud)) goto bad;
+-	printk("PUD %lx ", pud_val(*pud));
+-	if (!pud_present(*pud))	goto ret;
+-
+-	pmd = pmd_offset(pud, address);
+-	if (bad_address(pmd)) goto bad;
+-	printk("PMD %lx ", pmd_val(*pmd));
+-	if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
+-
+-	pte = pte_offset_kernel(pmd, address);
+-	if (bad_address(pte)) goto bad;
+-	printk("PTE %lx", pte_val(*pte)); 
+-ret:
+-	printk("\n");
+-	return;
+-bad:
+-	printk("BAD\n");
+-}
+-
+-static const char errata93_warning[] = 
+-KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
+-KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
+-KERN_ERR "******* Please consider a BIOS update.\n"
+-KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
+-
+-/* Workaround for K8 erratum #93 & buggy BIOS.
+-   BIOS SMM functions are required to use a specific workaround
+-   to avoid corruption of the 64bit RIP register on C stepping K8. 
+-   A lot of BIOS that didn't get tested properly miss this. 
+-   The OS sees this as a page fault with the upper 32bits of RIP cleared.
+-   Try to work around it here.
+-   Note we only handle faults in kernel here. */
+-
+-static int is_errata93(struct pt_regs *regs, unsigned long address) 
+-{
+-	static int warned;
+-	if (address != regs->rip)
+-		return 0;
+-	if ((address >> 32) != 0) 
+-		return 0;
+-	address |= 0xffffffffUL << 32;
+-	if ((address >= (u64)_stext && address <= (u64)_etext) || 
+-	    (address >= MODULES_VADDR && address <= MODULES_END)) { 
+-		if (!warned) {
+-			printk(errata93_warning); 		
+-			warned = 1;
+-		}
+-		regs->rip = address;
+-		return 1;
 -	}
--	else {
--		outb_p(cached_21(cpu),0x21);
-+	} else {
-+		outb_p(cached_21(cpu), 0x21);
- 		(void)inb_p(0x21);
- 	}
- }
-@@ -1631,8 +1582,7 @@ disable_local_vic_irq(unsigned int irq)
-  * interrupt in the vic, so we merely set a flag (IRQ_DISABLED).  If
-  * this interrupt actually comes in, then we mask and ack here to push
-  * the interrupt off to another CPU */
--static void
--before_handle_vic_irq(unsigned int irq)
-+static void before_handle_vic_irq(unsigned int irq)
- {
- 	irq_desc_t *desc = irq_desc + irq;
- 	__u8 cpu = smp_processor_id();
-@@ -1641,16 +1591,16 @@ before_handle_vic_irq(unsigned int irq)
- 	vic_intr_total++;
- 	vic_intr_count[cpu]++;
- 
--	if(!(cpu_irq_affinity[cpu] & (1<<irq))) {
-+	if (!(cpu_irq_affinity[cpu] & (1 << irq))) {
- 		/* The irq is not in our affinity mask, push it off
- 		 * onto another CPU */
--		VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n",
--			irq, cpu));
-+		VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
-+			"on cpu %d\n", irq, cpu));
- 		disable_local_vic_irq(irq);
- 		/* set IRQ_INPROGRESS to prevent the handler in irq.c from
- 		 * actually calling the interrupt routine */
- 		desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
--	} else if(desc->status & IRQ_DISABLED) {
-+	} else if (desc->status & IRQ_DISABLED) {
- 		/* Damn, the interrupt actually arrived, do the lazy
- 		 * disable thing. The interrupt routine in irq.c will
- 		 * not handle a IRQ_DISABLED interrupt, so nothing more
-@@ -1667,8 +1617,7 @@ before_handle_vic_irq(unsigned int irq)
- }
- 
- /* Finish the VIC interrupt: basically mask */
--static void
--after_handle_vic_irq(unsigned int irq)
-+static void after_handle_vic_irq(unsigned int irq)
- {
- 	irq_desc_t *desc = irq_desc + irq;
- 
-@@ -1685,11 +1634,11 @@ after_handle_vic_irq(unsigned int irq)
- #ifdef VOYAGER_DEBUG
- 		/* DEBUG: before we ack, check what's in progress */
- 		isr = vic_read_isr();
--		if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) {
-+		if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) {
- 			int i;
- 			__u8 cpu = smp_processor_id();
- 			__u8 real_cpu;
--			int mask; /* Um... initialize me??? --RR */
-+			int mask;	/* Um... initialize me??? --RR */
- 
- 			printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
- 			       cpu, irq);
-@@ -1698,9 +1647,10 @@ after_handle_vic_irq(unsigned int irq)
- 				outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
- 				     VIC_PROCESSOR_ID);
- 				isr = vic_read_isr();
--				if(isr & (1<<irq)) {
--					printk("VOYAGER SMP: CPU%d ack irq %d\n",
--					       real_cpu, irq);
-+				if (isr & (1 << irq)) {
-+					printk
-+					    ("VOYAGER SMP: CPU%d ack irq %d\n",
-+					     real_cpu, irq);
- 					ack_vic_irq(irq);
- 				}
- 				outb(cpu, VIC_PROCESSOR_ID);
-@@ -1711,7 +1661,7 @@ after_handle_vic_irq(unsigned int irq)
- 		 * receipt by another CPU so everything must be in
- 		 * order here  */
- 		ack_vic_irq(irq);
--		if(status & IRQ_REPLAY) {
-+		if (status & IRQ_REPLAY) {
- 			/* replay is set if we disable the interrupt
- 			 * in the before_handle_vic_irq() routine, so
- 			 * clear the in progress bit here to allow the
-@@ -1720,9 +1670,9 @@ after_handle_vic_irq(unsigned int irq)
- 		}
- #ifdef VOYAGER_DEBUG
- 		isr = vic_read_isr();
--		if((isr & (1<<irq)) != 0)
--			printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n",
--			       irq, isr);
-+		if ((isr & (1 << irq)) != 0)
-+			printk("VOYAGER SMP: after_handle_vic_irq() after "
-+			       "ack irq=%d, isr=0x%x\n", irq, isr);
- #endif /* VOYAGER_DEBUG */
- 	}
- 	_raw_spin_unlock(&vic_irq_lock);
-@@ -1731,7 +1681,6 @@ after_handle_vic_irq(unsigned int irq)
- 	 * may be intercepted by another CPU if reasserted */
- }
- 
+-	return 0;
+-} 
 -
- /* Linux processor - interrupt affinity manipulations.
-  *
-  * For each processor, we maintain a 32 bit irq affinity mask.
-@@ -1748,8 +1697,7 @@ after_handle_vic_irq(unsigned int irq)
-  * change the mask and then do an interrupt enable CPI to re-enable on
-  * the selected processors */
- 
--void
--set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
-+void set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
- {
- 	/* Only extended processors handle interrupts */
- 	unsigned long real_mask;
-@@ -1757,13 +1705,13 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
- 	int cpu;
- 
- 	real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
--	
--	if(cpus_addr(mask)[0] == 0)
-+
-+	if (cpus_addr(mask)[0] == 0)
- 		/* can't have no CPUs to accept the interrupt -- extremely
- 		 * bad things will happen */
- 		return;
- 
--	if(irq == 0)
-+	if (irq == 0)
- 		/* can't change the affinity of the timer IRQ.  This
- 		 * is due to the constraint in the voyager
- 		 * architecture that the CPI also comes in on and IRQ
-@@ -1772,7 +1720,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
- 		 * will no-longer be able to accept VIC CPIs */
- 		return;
- 
--	if(irq >= 32) 
-+	if (irq >= 32)
- 		/* You can only have 32 interrupts in a voyager system
- 		 * (and 32 only if you have a secondary microchannel
- 		 * bus) */
-@@ -1780,8 +1728,8 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
- 
- 	for_each_online_cpu(cpu) {
- 		unsigned long cpu_mask = 1 << cpu;
--		
--		if(cpu_mask & real_mask) {
-+
-+		if (cpu_mask & real_mask) {
- 			/* enable the interrupt for this cpu */
- 			cpu_irq_affinity[cpu] |= irq_mask;
- 		} else {
-@@ -1800,25 +1748,23 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
- 	unmask_vic_irq(irq);
- }
- 
--static void
--ack_vic_irq(unsigned int irq)
-+static void ack_vic_irq(unsigned int irq)
- {
- 	if (irq & 8) {
--		outb(0x62,0x20);	/* Specific EOI to cascade */
--		outb(0x60|(irq & 7),0xA0);
-+		outb(0x62, 0x20);	/* Specific EOI to cascade */
-+		outb(0x60 | (irq & 7), 0xA0);
- 	} else {
--		outb(0x60 | (irq & 7),0x20);
-+		outb(0x60 | (irq & 7), 0x20);
- 	}
- }
- 
- /* enable the CPIs.  In the VIC, the CPIs are delivered by the 8259
-  * but are not vectored by it.  This means that the 8259 mask must be
-  * lowered to receive them */
--static __init void
--vic_enable_cpi(void)
-+static __init void vic_enable_cpi(void)
- {
- 	__u8 cpu = smp_processor_id();
--	
-+
- 	/* just take a copy of the current mask (nop for boot cpu) */
- 	vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
- 
-@@ -1827,7 +1773,7 @@ vic_enable_cpi(void)
- 	/* for sys int and cmn int */
- 	enable_local_vic_irq(7);
- 
--	if(is_cpu_quad()) {
-+	if (is_cpu_quad()) {
- 		outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
- 		outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
- 		VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
-@@ -1838,8 +1784,7 @@ vic_enable_cpi(void)
- 		cpu, vic_irq_mask[cpu]));
- }
- 
--void
--voyager_smp_dump()
-+void voyager_smp_dump()
- {
- 	int old_cpu = smp_processor_id(), cpu;
- 
-@@ -1865,10 +1810,10 @@ voyager_smp_dump()
- 		       cpu, vic_irq_mask[cpu], imr, irr, isr);
- #if 0
- 		/* These lines are put in to try to unstick an un ack'd irq */
--		if(isr != 0) {
-+		if (isr != 0) {
- 			int irq;
--			for(irq=0; irq<16; irq++) {
--				if(isr & (1<<irq)) {
-+			for (irq = 0; irq < 16; irq++) {
-+				if (isr & (1 << irq)) {
- 					printk("\tCPU%d: ack irq %d\n",
- 					       cpu, irq);
- 					local_irq_save(flags);
-@@ -1884,17 +1829,15 @@ voyager_smp_dump()
- 	}
- }
- 
--void
--smp_voyager_power_off(void *dummy)
-+void smp_voyager_power_off(void *dummy)
- {
--	if(smp_processor_id() == boot_cpu_id) 
-+	if (smp_processor_id() == boot_cpu_id)
- 		voyager_power_off();
- 	else
- 		smp_stop_cpu_function(NULL);
- }
- 
--static void __init
--voyager_smp_prepare_cpus(unsigned int max_cpus)
-+static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
- {
- 	/* FIXME: ignore max_cpus for now */
- 	smp_boot_cpus();
-@@ -1911,8 +1854,7 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
- 	cpu_set(smp_processor_id(), cpu_present_map);
- }
- 
--static int __cpuinit
--voyager_cpu_up(unsigned int cpu)
-+static int __cpuinit voyager_cpu_up(unsigned int cpu)
- {
- 	/* This only works at boot for x86.  See "rewrite" above. */
- 	if (cpu_isset(cpu, smp_commenced_mask))
-@@ -1928,14 +1870,12 @@ voyager_cpu_up(unsigned int cpu)
- 	return 0;
- }
- 
--static void __init
--voyager_smp_cpus_done(unsigned int max_cpus)
-+static void __init voyager_smp_cpus_done(unsigned int max_cpus)
- {
- 	zap_low_mappings();
- }
- 
--void __init
--smp_setup_processor_id(void)
-+void __init smp_setup_processor_id(void)
- {
- 	current_thread_info()->cpu = hard_smp_processor_id();
- 	x86_write_percpu(cpu_number, hard_smp_processor_id());
-diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c
-index 50f9366..c69c931 100644
---- a/arch/x86/mach-voyager/voyager_thread.c
-+++ b/arch/x86/mach-voyager/voyager_thread.c
-@@ -30,12 +30,10 @@
- #include <asm/mtrr.h>
- #include <asm/msr.h>
- 
+-static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
+-				 unsigned long error_code)
+-{
+-	unsigned long flags = oops_begin();
+-	struct task_struct *tsk;
 -
- struct task_struct *voyager_thread;
- static __u8 set_timeout;
- 
--static int
--execute(const char *string)
-+static int execute(const char *string)
- {
- 	int ret;
- 
-@@ -52,48 +50,48 @@ execute(const char *string)
- 		NULL,
- 	};
- 
--	if ((ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
--		printk(KERN_ERR "Voyager failed to run \"%s\": %i\n",
--		       string, ret);
-+	if ((ret =
-+	     call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
-+		printk(KERN_ERR "Voyager failed to run \"%s\": %i\n", string,
-+		       ret);
- 	}
- 	return ret;
- }
- 
--static void
--check_from_kernel(void)
-+static void check_from_kernel(void)
- {
--	if(voyager_status.switch_off) {
--		
-+	if (voyager_status.switch_off) {
-+
- 		/* FIXME: This should be configurable via proc */
- 		execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1");
--	} else if(voyager_status.power_fail) {
-+	} else if (voyager_status.power_fail) {
- 		VDEBUG(("Voyager daemon detected AC power failure\n"));
--		
-+
- 		/* FIXME: This should be configureable via proc */
- 		execute("umask 600; echo F > /etc/powerstatus; kill -PWR 1");
- 		set_timeout = 1;
- 	}
- }
- 
--static void
--check_continuing_condition(void)
-+static void check_continuing_condition(void)
- {
--	if(voyager_status.power_fail) {
-+	if (voyager_status.power_fail) {
- 		__u8 data;
--		voyager_cat_psi(VOYAGER_PSI_SUBREAD, 
-+		voyager_cat_psi(VOYAGER_PSI_SUBREAD,
- 				VOYAGER_PSI_AC_FAIL_REG, &data);
--		if((data & 0x1f) == 0) {
-+		if ((data & 0x1f) == 0) {
- 			/* all power restored */
--			printk(KERN_NOTICE "VOYAGER AC power restored, cancelling shutdown\n");
-+			printk(KERN_NOTICE
-+			       "VOYAGER AC power restored, cancelling shutdown\n");
- 			/* FIXME: should be user configureable */
--			execute("umask 600; echo O > /etc/powerstatus; kill -PWR 1");
-+			execute
-+			    ("umask 600; echo O > /etc/powerstatus; kill -PWR 1");
- 			set_timeout = 0;
- 		}
- 	}
- }
- 
--static int
--thread(void *unused)
-+static int thread(void *unused)
- {
- 	printk(KERN_NOTICE "Voyager starting monitor thread\n");
- 
-@@ -102,7 +100,7 @@ thread(void *unused)
- 		schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT);
- 
- 		VDEBUG(("Voyager Daemon awoken\n"));
--		if(voyager_status.request_from_kernel == 0) {
-+		if (voyager_status.request_from_kernel == 0) {
- 			/* probably awoken from timeout */
- 			check_continuing_condition();
- 		} else {
-@@ -112,20 +110,18 @@ thread(void *unused)
- 	}
- }
- 
--static int __init
--voyager_thread_start(void)
-+static int __init voyager_thread_start(void)
- {
- 	voyager_thread = kthread_run(thread, NULL, "kvoyagerd");
- 	if (IS_ERR(voyager_thread)) {
--		printk(KERN_ERR "Voyager: Failed to create system monitor thread.\n");
-+		printk(KERN_ERR
-+		       "Voyager: Failed to create system monitor thread.\n");
- 		return PTR_ERR(voyager_thread);
- 	}
- 	return 0;
- }
- 
+-	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
+-	       current->comm, address);
+-	dump_pagetable(address);
+-	tsk = current;
+-	tsk->thread.cr2 = address;
+-	tsk->thread.trap_no = 14;
+-	tsk->thread.error_code = error_code;
+-	__die("Bad pagetable", regs, error_code);
+-	oops_end(flags);
+-	do_exit(SIGKILL);
+-}
 -
--static void __exit
--voyager_thread_stop(void)
-+static void __exit voyager_thread_stop(void)
- {
- 	kthread_stop(voyager_thread);
- }
-diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
-index a1b0d22..59d353d 100644
---- a/arch/x86/math-emu/errors.c
-+++ b/arch/x86/math-emu/errors.c
-@@ -33,45 +33,41 @@
- #undef PRINT_MESSAGES
- /* */
- 
+-/*
+- * Handle a fault on the vmalloc area
+- *
+- * This assumes no large pages in there.
+- */
+-static int vmalloc_fault(unsigned long address)
+-{
+-	pgd_t *pgd, *pgd_ref;
+-	pud_t *pud, *pud_ref;
+-	pmd_t *pmd, *pmd_ref;
+-	pte_t *pte, *pte_ref;
 -
- #if 0
- void Un_impl(void)
- {
--  u_char byte1, FPU_modrm;
--  unsigned long address = FPU_ORIG_EIP;
+-	/* Copy kernel mappings over when needed. This can also
+-	   happen within a race in page table update. In the later
+-	   case just flush. */
 -
--  RE_ENTRANT_CHECK_OFF;
--  /* No need to check access_ok(), we have previously fetched these bytes. */
--  printk("Unimplemented FPU Opcode at eip=%p : ", (void __user *) address);
--  if ( FPU_CS == __USER_CS )
--    {
--      while ( 1 )
--	{
--	  FPU_get_user(byte1, (u_char __user *) address);
--	  if ( (byte1 & 0xf8) == 0xd8 ) break;
--	  printk("[%02x]", byte1);
--	  address++;
-+	u_char byte1, FPU_modrm;
-+	unsigned long address = FPU_ORIG_EIP;
-+
-+	RE_ENTRANT_CHECK_OFF;
-+	/* No need to check access_ok(), we have previously fetched these bytes. */
-+	printk("Unimplemented FPU Opcode at eip=%p : ", (void __user *)address);
-+	if (FPU_CS == __USER_CS) {
-+		while (1) {
-+			FPU_get_user(byte1, (u_char __user *) address);
-+			if ((byte1 & 0xf8) == 0xd8)
-+				break;
-+			printk("[%02x]", byte1);
-+			address++;
-+		}
-+		printk("%02x ", byte1);
-+		FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
-+
-+		if (FPU_modrm >= 0300)
-+			printk("%02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8,
-+			       FPU_modrm & 7);
-+		else
-+			printk("/%d\n", (FPU_modrm >> 3) & 7);
-+	} else {
-+		printk("cs selector = %04x\n", FPU_CS);
- 	}
--      printk("%02x ", byte1);
--      FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
--      
--      if (FPU_modrm >= 0300)
--	printk("%02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8, FPU_modrm & 7);
--      else
--	printk("/%d\n", (FPU_modrm >> 3) & 7);
--    }
--  else
--    {
--      printk("cs selector = %04x\n", FPU_CS);
--    }
+-	pgd = pgd_offset(current->mm ?: &init_mm, address);
+-	pgd_ref = pgd_offset_k(address);
+-	if (pgd_none(*pgd_ref))
+-		return -1;
+-	if (pgd_none(*pgd))
+-		set_pgd(pgd, *pgd_ref);
+-	else
+-		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
 -
--  RE_ENTRANT_CHECK_ON;
+-	/* Below here mismatches are bugs because these lower tables
+-	   are shared */
 -
--  EXCEPTION(EX_Invalid);
- 
+-	pud = pud_offset(pgd, address);
+-	pud_ref = pud_offset(pgd_ref, address);
+-	if (pud_none(*pud_ref))
+-		return -1;
+-	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+-		BUG();
+-	pmd = pmd_offset(pud, address);
+-	pmd_ref = pmd_offset(pud_ref, address);
+-	if (pmd_none(*pmd_ref))
+-		return -1;
+-	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+-		BUG();
+-	pte_ref = pte_offset_kernel(pmd_ref, address);
+-	if (!pte_present(*pte_ref))
+-		return -1;
+-	pte = pte_offset_kernel(pmd, address);
+-	/* Don't use pte_page here, because the mappings can point
+-	   outside mem_map, and the NUMA hash lookup cannot handle
+-	   that. */
+-	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
+-		BUG();
+-	return 0;
 -}
--#endif  /*  0  */
-+	RE_ENTRANT_CHECK_ON;
- 
-+	EXCEPTION(EX_Invalid);
-+
-+}
-+#endif /*  0  */
- 
- /*
-    Called for opcodes which are illegal and which are known to result in a
-@@ -79,139 +75,152 @@ void Un_impl(void)
-    */
- void FPU_illegal(void)
- {
--  math_abort(FPU_info,SIGILL);
-+	math_abort(FPU_info, SIGILL);
- }
- 
 -
+-int show_unhandled_signals = 1;
 -
- void FPU_printall(void)
- {
--  int i;
--  static const char *tag_desc[] = { "Valid", "Zero", "ERROR", "Empty",
--                              "DeNorm", "Inf", "NaN" };
--  u_char byte1, FPU_modrm;
--  unsigned long address = FPU_ORIG_EIP;
+-/*
+- * This routine handles page faults.  It determines the address,
+- * and the problem, and then passes it off to one of the appropriate
+- * routines.
+- */
+-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+-					unsigned long error_code)
+-{
+-	struct task_struct *tsk;
+-	struct mm_struct *mm;
+-	struct vm_area_struct * vma;
+-	unsigned long address;
+-	const struct exception_table_entry *fixup;
+-	int write, fault;
+-	unsigned long flags;
+-	siginfo_t info;
 -
--  RE_ENTRANT_CHECK_OFF;
--  /* No need to check access_ok(), we have previously fetched these bytes. */
--  printk("At %p:", (void *) address);
--  if ( FPU_CS == __USER_CS )
--    {
-+	int i;
-+	static const char *tag_desc[] = { "Valid", "Zero", "ERROR", "Empty",
-+		"DeNorm", "Inf", "NaN"
-+	};
-+	u_char byte1, FPU_modrm;
-+	unsigned long address = FPU_ORIG_EIP;
-+
-+	RE_ENTRANT_CHECK_OFF;
-+	/* No need to check access_ok(), we have previously fetched these bytes. */
-+	printk("At %p:", (void *)address);
-+	if (FPU_CS == __USER_CS) {
- #define MAX_PRINTED_BYTES 20
--      for ( i = 0; i < MAX_PRINTED_BYTES; i++ )
--	{
--	  FPU_get_user(byte1, (u_char __user *) address);
--	  if ( (byte1 & 0xf8) == 0xd8 )
--	    {
--	      printk(" %02x", byte1);
--	      break;
--	    }
--	  printk(" [%02x]", byte1);
--	  address++;
+-	/*
+-	 * We can fault from pretty much anywhere, with unknown IRQ state.
+-	 */
+-	trace_hardirqs_fixup();
+-
+-	tsk = current;
+-	mm = tsk->mm;
+-	prefetchw(&mm->mmap_sem);
+-
+-	/* get the address */
+-	address = read_cr2();
+-
+-	info.si_code = SEGV_MAPERR;
+-
+-
+-	/*
+-	 * We fault-in kernel-space virtual memory on-demand. The
+-	 * 'reference' page table is init_mm.pgd.
+-	 *
+-	 * NOTE! We MUST NOT take any locks for this case. We may
+-	 * be in an interrupt or a critical region, and should
+-	 * only copy the information from the master page table,
+-	 * nothing more.
+-	 *
+-	 * This verifies that the fault happens in kernel space
+-	 * (error_code & 4) == 0, and that the fault was not a
+-	 * protection error (error_code & 9) == 0.
+-	 */
+-	if (unlikely(address >= TASK_SIZE64)) {
+-		/*
+-		 * Don't check for the module range here: its PML4
+-		 * is always initialized because it's shared with the main
+-		 * kernel text. Only vmalloc may need PML4 syncups.
+-		 */
+-		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
+-		      ((address >= VMALLOC_START && address < VMALLOC_END))) {
+-			if (vmalloc_fault(address) >= 0)
+-				return;
+-		}
+-		if (notify_page_fault(regs))
+-			return;
+-		/*
+-		 * Don't take the mm semaphore here. If we fixup a prefetch
+-		 * fault we could otherwise deadlock.
+-		 */
+-		goto bad_area_nosemaphore;
 -	}
--      if ( i == MAX_PRINTED_BYTES )
--	printk(" [more..]\n");
--      else
--	{
--	  FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
--	  
--	  if (FPU_modrm >= 0300)
--	    printk(" %02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8, FPU_modrm & 7);
--	  else
--	    printk(" /%d, mod=%d rm=%d\n",
--		   (FPU_modrm >> 3) & 7, (FPU_modrm >> 6) & 3, FPU_modrm & 7);
-+		for (i = 0; i < MAX_PRINTED_BYTES; i++) {
-+			FPU_get_user(byte1, (u_char __user *) address);
-+			if ((byte1 & 0xf8) == 0xd8) {
-+				printk(" %02x", byte1);
-+				break;
-+			}
-+			printk(" [%02x]", byte1);
-+			address++;
-+		}
-+		if (i == MAX_PRINTED_BYTES)
-+			printk(" [more..]\n");
-+		else {
-+			FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
-+
-+			if (FPU_modrm >= 0300)
-+				printk(" %02x (%02x+%d)\n", FPU_modrm,
-+				       FPU_modrm & 0xf8, FPU_modrm & 7);
-+			else
-+				printk(" /%d, mod=%d rm=%d\n",
-+				       (FPU_modrm >> 3) & 7,
-+				       (FPU_modrm >> 6) & 3, FPU_modrm & 7);
-+		}
-+	} else {
-+		printk("%04x\n", FPU_CS);
- 	}
--    }
--  else
--    {
--      printk("%04x\n", FPU_CS);
--    }
- 
--  partial_status = status_word();
-+	partial_status = status_word();
- 
- #ifdef DEBUGGING
--if ( partial_status & SW_Backward )    printk("SW: backward compatibility\n");
--if ( partial_status & SW_C3 )          printk("SW: condition bit 3\n");
--if ( partial_status & SW_C2 )          printk("SW: condition bit 2\n");
--if ( partial_status & SW_C1 )          printk("SW: condition bit 1\n");
--if ( partial_status & SW_C0 )          printk("SW: condition bit 0\n");
--if ( partial_status & SW_Summary )     printk("SW: exception summary\n");
--if ( partial_status & SW_Stack_Fault ) printk("SW: stack fault\n");
--if ( partial_status & SW_Precision )   printk("SW: loss of precision\n");
--if ( partial_status & SW_Underflow )   printk("SW: underflow\n");
--if ( partial_status & SW_Overflow )    printk("SW: overflow\n");
--if ( partial_status & SW_Zero_Div )    printk("SW: divide by zero\n");
--if ( partial_status & SW_Denorm_Op )   printk("SW: denormalized operand\n");
--if ( partial_status & SW_Invalid )     printk("SW: invalid operation\n");
-+	if (partial_status & SW_Backward)
-+		printk("SW: backward compatibility\n");
-+	if (partial_status & SW_C3)
-+		printk("SW: condition bit 3\n");
-+	if (partial_status & SW_C2)
-+		printk("SW: condition bit 2\n");
-+	if (partial_status & SW_C1)
-+		printk("SW: condition bit 1\n");
-+	if (partial_status & SW_C0)
-+		printk("SW: condition bit 0\n");
-+	if (partial_status & SW_Summary)
-+		printk("SW: exception summary\n");
-+	if (partial_status & SW_Stack_Fault)
-+		printk("SW: stack fault\n");
-+	if (partial_status & SW_Precision)
-+		printk("SW: loss of precision\n");
-+	if (partial_status & SW_Underflow)
-+		printk("SW: underflow\n");
-+	if (partial_status & SW_Overflow)
-+		printk("SW: overflow\n");
-+	if (partial_status & SW_Zero_Div)
-+		printk("SW: divide by zero\n");
-+	if (partial_status & SW_Denorm_Op)
-+		printk("SW: denormalized operand\n");
-+	if (partial_status & SW_Invalid)
-+		printk("SW: invalid operation\n");
- #endif /* DEBUGGING */
- 
--  printk(" SW: b=%d st=%ld es=%d sf=%d cc=%d%d%d%d ef=%d%d%d%d%d%d\n",
--	 partial_status & 0x8000 ? 1 : 0,   /* busy */
--	 (partial_status & 0x3800) >> 11,   /* stack top pointer */
--	 partial_status & 0x80 ? 1 : 0,     /* Error summary status */
--	 partial_status & 0x40 ? 1 : 0,     /* Stack flag */
--	 partial_status & SW_C3?1:0, partial_status & SW_C2?1:0, /* cc */
--	 partial_status & SW_C1?1:0, partial_status & SW_C0?1:0, /* cc */
--	 partial_status & SW_Precision?1:0, partial_status & SW_Underflow?1:0,
--	 partial_status & SW_Overflow?1:0, partial_status & SW_Zero_Div?1:0,
--	 partial_status & SW_Denorm_Op?1:0, partial_status & SW_Invalid?1:0);
--  
--printk(" CW: ic=%d rc=%ld%ld pc=%ld%ld iem=%d     ef=%d%d%d%d%d%d\n",
--	 control_word & 0x1000 ? 1 : 0,
--	 (control_word & 0x800) >> 11, (control_word & 0x400) >> 10,
--	 (control_word & 0x200) >> 9, (control_word & 0x100) >> 8,
--	 control_word & 0x80 ? 1 : 0,
--	 control_word & SW_Precision?1:0, control_word & SW_Underflow?1:0,
--	 control_word & SW_Overflow?1:0, control_word & SW_Zero_Div?1:0,
--	 control_word & SW_Denorm_Op?1:0, control_word & SW_Invalid?1:0);
 -
--  for ( i = 0; i < 8; i++ )
--    {
--      FPU_REG *r = &st(i);
--      u_char tagi = FPU_gettagi(i);
--      switch (tagi)
--	{
--	case TAG_Empty:
--	  continue;
--	  break;
--	case TAG_Zero:
--	case TAG_Special:
--	  tagi = FPU_Special(r);
--	case TAG_Valid:
--	  printk("st(%d)  %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
--		 getsign(r) ? '-' : '+',
--		 (long)(r->sigh >> 16),
--		 (long)(r->sigh & 0xFFFF),
--		 (long)(r->sigl >> 16),
--		 (long)(r->sigl & 0xFFFF),
--		 exponent(r) - EXP_BIAS + 1);
--	  break;
--	default:
--	  printk("Whoops! Error in errors.c: tag%d is %d ", i, tagi);
--	  continue;
--	  break;
-+	printk(" SW: b=%d st=%d es=%d sf=%d cc=%d%d%d%d ef=%d%d%d%d%d%d\n", partial_status & 0x8000 ? 1 : 0,	/* busy */
-+	       (partial_status & 0x3800) >> 11,	/* stack top pointer */
-+	       partial_status & 0x80 ? 1 : 0,	/* Error summary status */
-+	       partial_status & 0x40 ? 1 : 0,	/* Stack flag */
-+	       partial_status & SW_C3 ? 1 : 0, partial_status & SW_C2 ? 1 : 0,	/* cc */
-+	       partial_status & SW_C1 ? 1 : 0, partial_status & SW_C0 ? 1 : 0,	/* cc */
-+	       partial_status & SW_Precision ? 1 : 0,
-+	       partial_status & SW_Underflow ? 1 : 0,
-+	       partial_status & SW_Overflow ? 1 : 0,
-+	       partial_status & SW_Zero_Div ? 1 : 0,
-+	       partial_status & SW_Denorm_Op ? 1 : 0,
-+	       partial_status & SW_Invalid ? 1 : 0);
-+
-+	printk(" CW: ic=%d rc=%d%d pc=%d%d iem=%d     ef=%d%d%d%d%d%d\n",
-+	       control_word & 0x1000 ? 1 : 0,
-+	       (control_word & 0x800) >> 11, (control_word & 0x400) >> 10,
-+	       (control_word & 0x200) >> 9, (control_word & 0x100) >> 8,
-+	       control_word & 0x80 ? 1 : 0,
-+	       control_word & SW_Precision ? 1 : 0,
-+	       control_word & SW_Underflow ? 1 : 0,
-+	       control_word & SW_Overflow ? 1 : 0,
-+	       control_word & SW_Zero_Div ? 1 : 0,
-+	       control_word & SW_Denorm_Op ? 1 : 0,
-+	       control_word & SW_Invalid ? 1 : 0);
-+
-+	for (i = 0; i < 8; i++) {
-+		FPU_REG *r = &st(i);
-+		u_char tagi = FPU_gettagi(i);
-+		switch (tagi) {
-+		case TAG_Empty:
-+			continue;
-+			break;
-+		case TAG_Zero:
-+		case TAG_Special:
-+			tagi = FPU_Special(r);
-+		case TAG_Valid:
-+			printk("st(%d)  %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
-+			       getsign(r) ? '-' : '+',
-+			       (long)(r->sigh >> 16),
-+			       (long)(r->sigh & 0xFFFF),
-+			       (long)(r->sigl >> 16),
-+			       (long)(r->sigl & 0xFFFF),
-+			       exponent(r) - EXP_BIAS + 1);
-+			break;
-+		default:
-+			printk("Whoops! Error in errors.c: tag%d is %d ", i,
-+			       tagi);
-+			continue;
-+			break;
-+		}
-+		printk("%s\n", tag_desc[(int)(unsigned)tagi]);
- 	}
--      printk("%s\n", tag_desc[(int) (unsigned) tagi]);
--    }
- 
--  RE_ENTRANT_CHECK_ON;
-+	RE_ENTRANT_CHECK_ON;
- 
- }
- 
- static struct {
--  int type;
--  const char *name;
-+	int type;
-+	const char *name;
- } exception_names[] = {
--  { EX_StackOver, "stack overflow" },
--  { EX_StackUnder, "stack underflow" },
--  { EX_Precision, "loss of precision" },
--  { EX_Underflow, "underflow" },
--  { EX_Overflow, "overflow" },
--  { EX_ZeroDiv, "divide by zero" },
--  { EX_Denormal, "denormalized operand" },
--  { EX_Invalid, "invalid operation" },
--  { EX_INTERNAL, "INTERNAL BUG in "FPU_VERSION },
--  { 0, NULL }
-+	{
-+	EX_StackOver, "stack overflow"}, {
-+	EX_StackUnder, "stack underflow"}, {
-+	EX_Precision, "loss of precision"}, {
-+	EX_Underflow, "underflow"}, {
-+	EX_Overflow, "overflow"}, {
-+	EX_ZeroDiv, "divide by zero"}, {
-+	EX_Denormal, "denormalized operand"}, {
-+	EX_Invalid, "invalid operation"}, {
-+	EX_INTERNAL, "INTERNAL BUG in " FPU_VERSION}, {
-+	0, NULL}
- };
- 
- /*
-@@ -295,445 +304,386 @@ static struct {
- 
- asmlinkage void FPU_exception(int n)
- {
--  int i, int_type;
+-	if (notify_page_fault(regs))
+-		return;
 -
--  int_type = 0;         /* Needed only to stop compiler warnings */
--  if ( n & EX_INTERNAL )
--    {
--      int_type = n - EX_INTERNAL;
--      n = EX_INTERNAL;
--      /* Set lots of exception bits! */
--      partial_status |= (SW_Exc_Mask | SW_Summary | SW_Backward);
--    }
--  else
--    {
--      /* Extract only the bits which we use to set the status word */
--      n &= (SW_Exc_Mask);
--      /* Set the corresponding exception bit */
--      partial_status |= n;
--      /* Set summary bits iff exception isn't masked */
--      if ( partial_status & ~control_word & CW_Exceptions )
--	partial_status |= (SW_Summary | SW_Backward);
--      if ( n & (SW_Stack_Fault | EX_Precision) )
--	{
--	  if ( !(n & SW_C1) )
--	    /* This bit distinguishes over- from underflow for a stack fault,
--	       and roundup from round-down for precision loss. */
--	    partial_status &= ~SW_C1;
-+	int i, int_type;
-+
-+	int_type = 0;		/* Needed only to stop compiler warnings */
-+	if (n & EX_INTERNAL) {
-+		int_type = n - EX_INTERNAL;
-+		n = EX_INTERNAL;
-+		/* Set lots of exception bits! */
-+		partial_status |= (SW_Exc_Mask | SW_Summary | SW_Backward);
-+	} else {
-+		/* Extract only the bits which we use to set the status word */
-+		n &= (SW_Exc_Mask);
-+		/* Set the corresponding exception bit */
-+		partial_status |= n;
-+		/* Set summary bits iff exception isn't masked */
-+		if (partial_status & ~control_word & CW_Exceptions)
-+			partial_status |= (SW_Summary | SW_Backward);
-+		if (n & (SW_Stack_Fault | EX_Precision)) {
-+			if (!(n & SW_C1))
-+				/* This bit distinguishes over- from underflow for a stack fault,
-+				   and roundup from round-down for precision loss. */
-+				partial_status &= ~SW_C1;
-+		}
- 	}
--    }
- 
--  RE_ENTRANT_CHECK_OFF;
--  if ( (~control_word & n & CW_Exceptions) || (n == EX_INTERNAL) )
--    {
-+	RE_ENTRANT_CHECK_OFF;
-+	if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) {
- #ifdef PRINT_MESSAGES
--      /* My message from the sponsor */
--      printk(FPU_VERSION" "__DATE__" (C) W. Metzenthen.\n");
-+		/* My message from the sponsor */
-+		printk(FPU_VERSION " " __DATE__ " (C) W. Metzenthen.\n");
- #endif /* PRINT_MESSAGES */
--      
--      /* Get a name string for error reporting */
--      for (i=0; exception_names[i].type; i++)
--	if ( (exception_names[i].type & n) == exception_names[i].type )
--	  break;
--      
--      if (exception_names[i].type)
--	{
-+
-+		/* Get a name string for error reporting */
-+		for (i = 0; exception_names[i].type; i++)
-+			if ((exception_names[i].type & n) ==
-+			    exception_names[i].type)
-+				break;
-+
-+		if (exception_names[i].type) {
- #ifdef PRINT_MESSAGES
--	  printk("FP Exception: %s!\n", exception_names[i].name);
-+			printk("FP Exception: %s!\n", exception_names[i].name);
- #endif /* PRINT_MESSAGES */
+-	if (likely(regs->eflags & X86_EFLAGS_IF))
+-		local_irq_enable();
+-
+-	if (unlikely(error_code & PF_RSVD))
+-		pgtable_bad(address, regs, error_code);
+-
+-	/*
+-	 * If we're in an interrupt or have no user
+-	 * context, we must not take the fault..
+-	 */
+-	if (unlikely(in_atomic() || !mm))
+-		goto bad_area_nosemaphore;
+-
+-	/*
+-	 * User-mode registers count as a user access even for any
+-	 * potential system fault or CPU buglet.
+-	 */
+-	if (user_mode_vm(regs))
+-		error_code |= PF_USER;
+-
+- again:
+-	/* When running in the kernel we expect faults to occur only to
+-	 * addresses in user space.  All other faults represent errors in the
+-	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
+-	 * erroneous fault occurring in a code path which already holds mmap_sem
+-	 * we will deadlock attempting to validate the fault against the
+-	 * address space.  Luckily the kernel only validly references user
+-	 * space from well defined areas of code, which are listed in the
+-	 * exceptions table.
+-	 *
+-	 * As the vast majority of faults will be valid we will only perform
+-	 * the source reference check when there is a possibility of a deadlock.
+-	 * Attempt to lock the address space, if we cannot we then validate the
+-	 * source.  If this is invalid we can skip the address space check,
+-	 * thus avoiding the deadlock.
+-	 */
+-	if (!down_read_trylock(&mm->mmap_sem)) {
+-		if ((error_code & PF_USER) == 0 &&
+-		    !search_exception_tables(regs->rip))
+-			goto bad_area_nosemaphore;
+-		down_read(&mm->mmap_sem);
 -	}
--      else
--	printk("FPU emulator: Unknown Exception: 0x%04x!\n", n);
--      
--      if ( n == EX_INTERNAL )
--	{
--	  printk("FPU emulator: Internal error type 0x%04x\n", int_type);
--	  FPU_printall();
+-
+-	vma = find_vma(mm, address);
+-	if (!vma)
+-		goto bad_area;
+-	if (likely(vma->vm_start <= address))
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (error_code & 4) {
+-		/* Allow userspace just enough access below the stack pointer
+-		 * to let the 'enter' instruction work.
+-		 */
+-		if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
+-			goto bad_area;
 -	}
-+		} else
-+			printk("FPU emulator: Unknown Exception: 0x%04x!\n", n);
-+
-+		if (n == EX_INTERNAL) {
-+			printk("FPU emulator: Internal error type 0x%04x\n",
-+			       int_type);
-+			FPU_printall();
-+		}
- #ifdef PRINT_MESSAGES
--      else
--	FPU_printall();
-+		else
-+			FPU_printall();
- #endif /* PRINT_MESSAGES */
- 
--      /*
--       * The 80486 generates an interrupt on the next non-control FPU
--       * instruction. So we need some means of flagging it.
--       * We use the ES (Error Summary) bit for this.
--       */
--    }
--  RE_ENTRANT_CHECK_ON;
-+		/*
-+		 * The 80486 generates an interrupt on the next non-control FPU
-+		 * instruction. So we need some means of flagging it.
-+		 * We use the ES (Error Summary) bit for this.
-+		 */
-+	}
-+	RE_ENTRANT_CHECK_ON;
- 
- #ifdef __DEBUG__
--  math_abort(FPU_info,SIGFPE);
-+	math_abort(FPU_info, SIGFPE);
- #endif /* __DEBUG__ */
- 
- }
- 
+-	if (expand_stack(vma, address))
+-		goto bad_area;
+-/*
+- * Ok, we have a good vm_area for this memory access, so
+- * we can handle it..
+- */
+-good_area:
+-	info.si_code = SEGV_ACCERR;
+-	write = 0;
+-	switch (error_code & (PF_PROT|PF_WRITE)) {
+-		default:	/* 3: write, present */
+-			/* fall through */
+-		case PF_WRITE:		/* write, not present */
+-			if (!(vma->vm_flags & VM_WRITE))
+-				goto bad_area;
+-			write++;
+-			break;
+-		case PF_PROT:		/* read, present */
+-			goto bad_area;
+-		case 0:			/* read, not present */
+-			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+-				goto bad_area;
+-	}
+-
+-	/*
+-	 * If for any reason at all we couldn't handle the fault,
+-	 * make sure we exit gracefully rather than endlessly redo
+-	 * the fault.
+-	 */
+-	fault = handle_mm_fault(mm, vma, address, write);
+-	if (unlikely(fault & VM_FAULT_ERROR)) {
+-		if (fault & VM_FAULT_OOM)
+-			goto out_of_memory;
+-		else if (fault & VM_FAULT_SIGBUS)
+-			goto do_sigbus;
+-		BUG();
+-	}
+-	if (fault & VM_FAULT_MAJOR)
+-		tsk->maj_flt++;
+-	else
+-		tsk->min_flt++;
+-	up_read(&mm->mmap_sem);
+-	return;
+-
+-/*
+- * Something tried to access memory that isn't in our memory map..
+- * Fix it, but check if it's kernel or user first..
+- */
+-bad_area:
+-	up_read(&mm->mmap_sem);
+-
+-bad_area_nosemaphore:
+-	/* User mode accesses just cause a SIGSEGV */
+-	if (error_code & PF_USER) {
+-
+-		/*
+-		 * It's possible to have interrupts off here.
+-		 */
+-		local_irq_enable();
+-
+-		if (is_prefetch(regs, address, error_code))
+-			return;
+-
+-		/* Work around K8 erratum #100 K8 in compat mode
+-		   occasionally jumps to illegal addresses >4GB.  We
+-		   catch this here in the page fault handler because
+-		   these addresses are not reachable. Just detect this
+-		   case and return.  Any code segment in LDT is
+-		   compatibility mode. */
+-		if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
+-		    (address >> 32))
+-			return;
 -
- /* Real operation attempted on a NaN. */
- /* Returns < 0 if the exception is unmasked */
- int real_1op_NaN(FPU_REG *a)
- {
--  int signalling, isNaN;
+-		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+-		    printk_ratelimit()) {
+-			printk(
+-		       "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",
+-					tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
+-					tsk->comm, tsk->pid, address, regs->rip,
+-					regs->rsp, error_code);
+-		}
+-       
+-		tsk->thread.cr2 = address;
+-		/* Kernel addresses are always protection faults */
+-		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+-		tsk->thread.trap_no = 14;
+-		info.si_signo = SIGSEGV;
+-		info.si_errno = 0;
+-		/* info.si_code has been set above */
+-		info.si_addr = (void __user *)address;
+-		force_sig_info(SIGSEGV, &info, tsk);
+-		return;
+-	}
 -
--  isNaN = (exponent(a) == EXP_OVER) && (a->sigh & 0x80000000);
+-no_context:
+-	
+-	/* Are we prepared to handle this kernel fault?  */
+-	fixup = search_exception_tables(regs->rip);
+-	if (fixup) {
+-		regs->rip = fixup->fixup;
+-		return;
+-	}
 -
--  /* The default result for the case of two "equal" NaNs (signs may
--     differ) is chosen to reproduce 80486 behaviour */
--  signalling = isNaN && !(a->sigh & 0x40000000);
+-	/* 
+-	 * Hall of shame of CPU/BIOS bugs.
+-	 */
 -
--  if ( !signalling )
--    {
--      if ( !isNaN )  /* pseudo-NaN, or other unsupported? */
--	{
--	  if ( control_word & CW_Invalid )
--	    {
--	      /* Masked response */
--	      reg_copy(&CONST_QNaN, a);
--	    }
--	  EXCEPTION(EX_Invalid);
--	  return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
-+	int signalling, isNaN;
-+
-+	isNaN = (exponent(a) == EXP_OVER) && (a->sigh & 0x80000000);
-+
-+	/* The default result for the case of two "equal" NaNs (signs may
-+	   differ) is chosen to reproduce 80486 behaviour */
-+	signalling = isNaN && !(a->sigh & 0x40000000);
-+
-+	if (!signalling) {
-+		if (!isNaN) {	/* pseudo-NaN, or other unsupported? */
-+			if (control_word & CW_Invalid) {
-+				/* Masked response */
-+				reg_copy(&CONST_QNaN, a);
-+			}
-+			EXCEPTION(EX_Invalid);
-+			return (!(control_word & CW_Invalid) ? FPU_Exception :
-+				0) | TAG_Special;
-+		}
-+		return TAG_Special;
- 	}
--      return TAG_Special;
--    }
- 
--  if ( control_word & CW_Invalid )
--    {
--      /* The masked response */
--      if ( !(a->sigh & 0x80000000) )  /* pseudo-NaN ? */
--	{
--	  reg_copy(&CONST_QNaN, a);
-+	if (control_word & CW_Invalid) {
-+		/* The masked response */
-+		if (!(a->sigh & 0x80000000)) {	/* pseudo-NaN ? */
-+			reg_copy(&CONST_QNaN, a);
-+		}
-+		/* ensure a Quiet NaN */
-+		a->sigh |= 0x40000000;
- 	}
--      /* ensure a Quiet NaN */
--      a->sigh |= 0x40000000;
--    }
- 
--  EXCEPTION(EX_Invalid);
-+	EXCEPTION(EX_Invalid);
- 
--  return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
-+	return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
- }
- 
+- 	if (is_prefetch(regs, address, error_code))
+- 		return;
 -
- /* Real operation attempted on two operands, one a NaN. */
- /* Returns < 0 if the exception is unmasked */
- int real_2op_NaN(FPU_REG const *b, u_char tagb,
--		 int deststnr,
--		 FPU_REG const *defaultNaN)
-+		 int deststnr, FPU_REG const *defaultNaN)
- {
--  FPU_REG *dest = &st(deststnr);
--  FPU_REG const *a = dest;
--  u_char taga = FPU_gettagi(deststnr);
--  FPU_REG const *x;
--  int signalling, unsupported;
+-	if (is_errata93(regs, address))
+-		return; 
 -
--  if ( taga == TAG_Special )
--    taga = FPU_Special(a);
--  if ( tagb == TAG_Special )
--    tagb = FPU_Special(b);
+-/*
+- * Oops. The kernel tried to access some bad page. We'll have to
+- * terminate things with extreme prejudice.
+- */
 -
--  /* TW_NaN is also used for unsupported data types. */
--  unsupported = ((taga == TW_NaN)
--		 && !((exponent(a) == EXP_OVER) && (a->sigh & 0x80000000)))
--    || ((tagb == TW_NaN)
--	&& !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)));
--  if ( unsupported )
--    {
--      if ( control_word & CW_Invalid )
--	{
--	  /* Masked response */
--	  FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
+-	flags = oops_begin();
+-
+-	if (address < PAGE_SIZE)
+-		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+-	else
+-		printk(KERN_ALERT "Unable to handle kernel paging request");
+-	printk(" at %016lx RIP: \n" KERN_ALERT,address);
+-	printk_address(regs->rip);
+-	dump_pagetable(address);
+-	tsk->thread.cr2 = address;
+-	tsk->thread.trap_no = 14;
+-	tsk->thread.error_code = error_code;
+-	__die("Oops", regs, error_code);
+-	/* Executive summary in case the body of the oops scrolled away */
+-	printk(KERN_EMERG "CR2: %016lx\n", address);
+-	oops_end(flags);
+-	do_exit(SIGKILL);
+-
+-/*
+- * We ran out of memory, or some other thing happened to us that made
+- * us unable to handle the page fault gracefully.
+- */
+-out_of_memory:
+-	up_read(&mm->mmap_sem);
+-	if (is_global_init(current)) {
+-		yield();
+-		goto again;
 -	}
--      EXCEPTION(EX_Invalid);
--      return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
--    }
+-	printk("VM: killing process %s\n", tsk->comm);
+-	if (error_code & 4)
+-		do_group_exit(SIGKILL);
+-	goto no_context;
 -
--  if (taga == TW_NaN)
--    {
--      x = a;
--      if (tagb == TW_NaN)
--	{
--	  signalling = !(a->sigh & b->sigh & 0x40000000);
--	  if ( significand(b) > significand(a) )
--	    x = b;
--	  else if ( significand(b) == significand(a) )
--	    {
--	      /* The default result for the case of two "equal" NaNs (signs may
--		 differ) is chosen to reproduce 80486 behaviour */
--	      x = defaultNaN;
--	    }
+-do_sigbus:
+-	up_read(&mm->mmap_sem);
+-
+-	/* Kernel mode? Handle exceptions or die */
+-	if (!(error_code & PF_USER))
+-		goto no_context;
+-
+-	tsk->thread.cr2 = address;
+-	tsk->thread.error_code = error_code;
+-	tsk->thread.trap_no = 14;
+-	info.si_signo = SIGBUS;
+-	info.si_errno = 0;
+-	info.si_code = BUS_ADRERR;
+-	info.si_addr = (void __user *)address;
+-	force_sig_info(SIGBUS, &info, tsk);
+-	return;
+-}
+-
+-DEFINE_SPINLOCK(pgd_lock);
+-LIST_HEAD(pgd_list);
+-
+-void vmalloc_sync_all(void)
+-{
+-	/* Note that races in the updates of insync and start aren't 
+-	   problematic:
+-	   insync can only get set bits added, and updates to start are only
+-	   improving performance (without affecting correctness if undone). */
+-	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
+-	static unsigned long start = VMALLOC_START & PGDIR_MASK;
+-	unsigned long address;
+-
+-	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
+-		if (!test_bit(pgd_index(address), insync)) {
+-			const pgd_t *pgd_ref = pgd_offset_k(address);
+-			struct page *page;
+-
+-			if (pgd_none(*pgd_ref))
+-				continue;
+-			spin_lock(&pgd_lock);
+-			list_for_each_entry(page, &pgd_list, lru) {
+-				pgd_t *pgd;
+-				pgd = (pgd_t *)page_address(page) + pgd_index(address);
+-				if (pgd_none(*pgd))
+-					set_pgd(pgd, *pgd_ref);
+-				else
+-					BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+-			}
+-			spin_unlock(&pgd_lock);
+-			set_bit(pgd_index(address), insync);
+-		}
+-		if (address == start)
+-			start = address + PGDIR_SIZE;
 -	}
--      else
--	{
--	  /* return the quiet version of the NaN in a */
--	  signalling = !(a->sigh & 0x40000000);
-+	FPU_REG *dest = &st(deststnr);
-+	FPU_REG const *a = dest;
-+	u_char taga = FPU_gettagi(deststnr);
-+	FPU_REG const *x;
-+	int signalling, unsupported;
-+
-+	if (taga == TAG_Special)
-+		taga = FPU_Special(a);
-+	if (tagb == TAG_Special)
-+		tagb = FPU_Special(b);
+-	/* Check that there is no need to do the same for the modules area. */
+-	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
+-	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 
+-				(__START_KERNEL & PGDIR_MASK)));
+-}
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index 1c3bf95..3d936f2 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -18,6 +18,49 @@ void kunmap(struct page *page)
+ 	kunmap_high(page);
+ }
+ 
++static void debug_kmap_atomic_prot(enum km_type type)
++{
++#ifdef CONFIG_DEBUG_HIGHMEM
++	static unsigned warn_count = 10;
 +
-+	/* TW_NaN is also used for unsupported data types. */
-+	unsupported = ((taga == TW_NaN)
-+		       && !((exponent(a) == EXP_OVER)
-+			    && (a->sigh & 0x80000000)))
-+	    || ((tagb == TW_NaN)
-+		&& !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)));
-+	if (unsupported) {
-+		if (control_word & CW_Invalid) {
-+			/* Masked response */
-+			FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
-+		}
-+		EXCEPTION(EX_Invalid);
-+		return (!(control_word & CW_Invalid) ? FPU_Exception : 0) |
-+		    TAG_Special;
- 	}
--    }
--  else
++	if (unlikely(warn_count == 0))
++		return;
 +
-+	if (taga == TW_NaN) {
-+		x = a;
-+		if (tagb == TW_NaN) {
-+			signalling = !(a->sigh & b->sigh & 0x40000000);
-+			if (significand(b) > significand(a))
-+				x = b;
-+			else if (significand(b) == significand(a)) {
-+				/* The default result for the case of two "equal" NaNs (signs may
-+				   differ) is chosen to reproduce 80486 behaviour */
-+				x = defaultNaN;
++	if (unlikely(in_interrupt())) {
++		if (in_irq()) {
++			if (type != KM_IRQ0 && type != KM_IRQ1 &&
++			    type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
++			    type != KM_BOUNCE_READ) {
++				WARN_ON(1);
++				warn_count--;
++			}
++		} else if (!irqs_disabled()) {	/* softirq */
++			if (type != KM_IRQ0 && type != KM_IRQ1 &&
++			    type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
++			    type != KM_SKB_SUNRPC_DATA &&
++			    type != KM_SKB_DATA_SOFTIRQ &&
++			    type != KM_BOUNCE_READ) {
++				WARN_ON(1);
++				warn_count--;
 +			}
-+		} else {
-+			/* return the quiet version of the NaN in a */
-+			signalling = !(a->sigh & 0x40000000);
 +		}
-+	} else
- #ifdef PARANOID
--    if (tagb == TW_NaN)
-+	if (tagb == TW_NaN)
- #endif /* PARANOID */
--    {
--      signalling = !(b->sigh & 0x40000000);
--      x = b;
--    }
-+	{
-+		signalling = !(b->sigh & 0x40000000);
-+		x = b;
 +	}
- #ifdef PARANOID
--  else
--    {
--      signalling = 0;
--      EXCEPTION(EX_INTERNAL|0x113);
--      x = &CONST_QNaN;
--    }
-+	else {
-+		signalling = 0;
-+		EXCEPTION(EX_INTERNAL | 0x113);
-+		x = &CONST_QNaN;
++
++	if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
++			type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
++		if (!irqs_disabled()) {
++			WARN_ON(1);
++			warn_count--;
++		}
++	} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
++		if (irq_count() == 0 && !irqs_disabled()) {
++			WARN_ON(1);
++			warn_count--;
++		}
 +	}
- #endif /* PARANOID */
- 
--  if ( (!signalling) || (control_word & CW_Invalid) )
--    {
--      if ( ! x )
--	x = b;
-+	if ((!signalling) || (control_word & CW_Invalid)) {
-+		if (!x)
-+			x = b;
- 
--      if ( !(x->sigh & 0x80000000) )  /* pseudo-NaN ? */
--	x = &CONST_QNaN;
-+		if (!(x->sigh & 0x80000000))	/* pseudo-NaN ? */
-+			x = &CONST_QNaN;
++#endif
++}
++
+ /*
+  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+  * no global lock is needed and because the kmap code must perform a global TLB
+@@ -30,8 +73,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+ {
+ 	enum fixed_addresses idx;
+ 	unsigned long vaddr;
+-
+ 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++
++	debug_kmap_atomic_prot(type);
++
+ 	pagefault_disable();
  
--      FPU_copy_to_regi(x, TAG_Special, deststnr);
-+		FPU_copy_to_regi(x, TAG_Special, deststnr);
+ 	if (!PageHighMem(page))
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index 6c06d9c..4fbafb4 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -15,6 +15,7 @@
+ #include <asm/mman.h>
+ #include <asm/tlb.h>
+ #include <asm/tlbflush.h>
++#include <asm/pgalloc.h>
  
--      if ( !signalling )
--	return TAG_Special;
-+		if (!signalling)
-+			return TAG_Special;
+ static unsigned long page_table_shareable(struct vm_area_struct *svma,
+ 				struct vm_area_struct *vma,
+@@ -88,7 +89,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  
--      /* ensure a Quiet NaN */
--      dest->sigh |= 0x40000000;
--    }
-+		/* ensure a Quiet NaN */
-+		dest->sigh |= 0x40000000;
-+	}
+ 	spin_lock(&mm->page_table_lock);
+ 	if (pud_none(*pud))
+-		pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK);
++		pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
+ 	else
+ 		put_page(virt_to_page(spte));
+ 	spin_unlock(&mm->page_table_lock);
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index 3c76d19..da524fb 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -27,7 +27,6 @@
+ #include <linux/bootmem.h>
+ #include <linux/slab.h>
+ #include <linux/proc_fs.h>
+-#include <linux/efi.h>
+ #include <linux/memory_hotplug.h>
+ #include <linux/initrd.h>
+ #include <linux/cpumask.h>
+@@ -40,8 +39,10 @@
+ #include <asm/fixmap.h>
+ #include <asm/e820.h>
+ #include <asm/apic.h>
++#include <asm/bugs.h>
+ #include <asm/tlb.h>
+ #include <asm/tlbflush.h>
++#include <asm/pgalloc.h>
+ #include <asm/sections.h>
+ #include <asm/paravirt.h>
  
--  EXCEPTION(EX_Invalid);
-+	EXCEPTION(EX_Invalid);
+@@ -50,7 +51,7 @@ unsigned int __VMALLOC_RESERVE = 128 << 20;
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+ unsigned long highstart_pfn, highend_pfn;
  
--  return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
-+	return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
- }
+-static int noinline do_test_wp_bit(void);
++static noinline int do_test_wp_bit(void);
  
--
- /* Invalid arith operation on Valid registers */
- /* Returns < 0 if the exception is unmasked */
- asmlinkage int arith_invalid(int deststnr)
+ /*
+  * Creates a middle page table and puts a pointer to it in the
+@@ -61,26 +62,26 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
  {
- 
--  EXCEPTION(EX_Invalid);
--  
--  if ( control_word & CW_Invalid )
--    {
--      /* The masked response */
--      FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
--    }
--  
--  return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Valid;
-+	EXCEPTION(EX_Invalid);
- 
--}
-+	if (control_word & CW_Invalid) {
-+		/* The masked response */
-+		FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
-+	}
- 
-+	return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Valid;
+ 	pud_t *pud;
+ 	pmd_t *pmd_table;
+-		
 +
-+}
+ #ifdef CONFIG_X86_PAE
+ 	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
+ 		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  
- /* Divide a finite number by zero */
- asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
- {
--  FPU_REG *dest = &st(deststnr);
--  int tag = TAG_Valid;
-+	FPU_REG *dest = &st(deststnr);
-+	int tag = TAG_Valid;
+-		paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
++		paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+ 		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+ 		pud = pud_offset(pgd, 0);
+-		if (pmd_table != pmd_offset(pud, 0))
+-			BUG();
++		BUG_ON(pmd_table != pmd_offset(pud, 0));
+ 	}
+ #endif
+ 	pud = pud_offset(pgd, 0);
+ 	pmd_table = pmd_offset(pud, 0);
 +
-+	if (control_word & CW_ZeroDiv) {
-+		/* The masked response */
-+		FPU_copy_to_regi(&CONST_INF, TAG_Special, deststnr);
-+		setsign(dest, sign);
-+		tag = TAG_Special;
-+	}
- 
--  if ( control_word & CW_ZeroDiv )
--    {
--      /* The masked response */
--      FPU_copy_to_regi(&CONST_INF, TAG_Special, deststnr);
--      setsign(dest, sign);
--      tag = TAG_Special;
--    }
-- 
--  EXCEPTION(EX_ZeroDiv);
-+	EXCEPTION(EX_ZeroDiv);
- 
--  return (!(control_word & CW_ZeroDiv) ? FPU_Exception : 0) | tag;
-+	return (!(control_word & CW_ZeroDiv) ? FPU_Exception : 0) | tag;
- 
+ 	return pmd_table;
  }
  
--
- /* This may be called often, so keep it lean */
- int set_precision_flag(int flags)
+ /*
+  * Create a page table and place a pointer to it in a middle page
+- * directory entry.
++ * directory entry:
+  */
+ static pte_t * __init one_page_table_init(pmd_t *pmd)
  {
--  if ( control_word & CW_Precision )
--    {
--      partial_status &= ~(SW_C1 & flags);
--      partial_status |= flags;   /* The masked response */
--      return 0;
--    }
--  else
--    {
--      EXCEPTION(flags);
--      return 1;
--    }
-+	if (control_word & CW_Precision) {
-+		partial_status &= ~(SW_C1 & flags);
-+		partial_status |= flags;	/* The masked response */
-+		return 0;
-+	} else {
-+		EXCEPTION(flags);
-+		return 1;
-+	}
- }
+@@ -90,9 +91,10 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+ 		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
+ #endif
+-		if (!page_table)
++		if (!page_table) {
+ 			page_table =
+ 				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
++		}
  
--
- /* This may be called often, so keep it lean */
- asmlinkage void set_precision_flag_up(void)
- {
--  if ( control_word & CW_Precision )
--    partial_status |= (SW_Precision | SW_C1);   /* The masked response */
--  else
--    EXCEPTION(EX_Precision | SW_C1);
-+	if (control_word & CW_Precision)
-+		partial_status |= (SW_Precision | SW_C1);	/* The masked response */
-+	else
-+		EXCEPTION(EX_Precision | SW_C1);
+ 		paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
+ 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+@@ -103,22 +105,21 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
  }
  
+ /*
+- * This function initializes a certain range of kernel virtual memory 
++ * This function initializes a certain range of kernel virtual memory
+  * with new bootmem page tables, everywhere page tables are missing in
+  * the given range.
+- */
 -
- /* This may be called often, so keep it lean */
- asmlinkage void set_precision_flag_down(void)
+-/*
+- * NOTE: The pagetables are allocated contiguous on the physical space 
+- * so we can cache the place of the first one and move around without 
++ *
++ * NOTE: The pagetables are allocated contiguous on the physical space
++ * so we can cache the place of the first one and move around without
+  * checking the pgd every time.
+  */
+-static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++static void __init
++page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  {
--  if ( control_word & CW_Precision )
--    {   /* The masked response */
--      partial_status &= ~SW_C1;
--      partial_status |= SW_Precision;
--    }
--  else
--    EXCEPTION(EX_Precision);
-+	if (control_word & CW_Precision) {	/* The masked response */
-+		partial_status &= ~SW_C1;
-+		partial_status |= SW_Precision;
-+	} else
-+		EXCEPTION(EX_Precision);
- }
+-	pgd_t *pgd;
+-	pmd_t *pmd;
+ 	int pgd_idx, pmd_idx;
+ 	unsigned long vaddr;
++	pgd_t *pgd;
++	pmd_t *pmd;
  
--
- asmlinkage int denormal_operand(void)
- {
--  if ( control_word & CW_Denormal )
--    {   /* The masked response */
--      partial_status |= SW_Denorm_Op;
--      return TAG_Special;
--    }
--  else
--    {
--      EXCEPTION(EX_Denormal);
--      return TAG_Special | FPU_Exception;
--    }
-+	if (control_word & CW_Denormal) {	/* The masked response */
-+		partial_status |= SW_Denorm_Op;
-+		return TAG_Special;
-+	} else {
-+		EXCEPTION(EX_Denormal);
-+		return TAG_Special | FPU_Exception;
-+	}
+ 	vaddr = start;
+ 	pgd_idx = pgd_index(vaddr);
+@@ -128,7 +129,8 @@ static void __init page_table_range_init (unsigned long start, unsigned long end
+ 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+ 		pmd = one_md_table_init(pgd);
+ 		pmd = pmd + pmd_index(vaddr);
+-		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
++							pmd++, pmd_idx++) {
+ 			one_page_table_init(pmd);
+ 
+ 			vaddr += PMD_SIZE;
+@@ -145,17 +147,17 @@ static inline int is_kernel_text(unsigned long addr)
  }
  
--
- asmlinkage int arith_overflow(FPU_REG *dest)
+ /*
+- * This maps the physical memory to kernel virtual address space, a total 
+- * of max_low_pfn pages, by creating page tables starting from address 
+- * PAGE_OFFSET.
++ * This maps the physical memory to kernel virtual address space, a total
++ * of max_low_pfn pages, by creating page tables starting from address
++ * PAGE_OFFSET:
+  */
+ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
  {
--  int tag = TAG_Valid;
-+	int tag = TAG_Valid;
++	int pgd_idx, pmd_idx, pte_ofs;
+ 	unsigned long pfn;
+ 	pgd_t *pgd;
+ 	pmd_t *pmd;
+ 	pte_t *pte;
+-	int pgd_idx, pmd_idx, pte_ofs;
  
--  if ( control_word & CW_Overflow )
--    {
--      /* The masked response */
-+	if (control_word & CW_Overflow) {
-+		/* The masked response */
- /* ###### The response here depends upon the rounding mode */
--      reg_copy(&CONST_INF, dest);
--      tag = TAG_Special;
--    }
--  else
--    {
--      /* Subtract the magic number from the exponent */
--      addexponent(dest, (-3 * (1 << 13)));
--    }
--
--  EXCEPTION(EX_Overflow);
--  if ( control_word & CW_Overflow )
--    {
--      /* The overflow exception is masked. */
--      /* By definition, precision is lost.
--	 The roundup bit (C1) is also set because we have
--	 "rounded" upwards to Infinity. */
--      EXCEPTION(EX_Precision | SW_C1);
--      return tag;
--    }
--
--  return tag;
-+		reg_copy(&CONST_INF, dest);
-+		tag = TAG_Special;
-+	} else {
-+		/* Subtract the magic number from the exponent */
-+		addexponent(dest, (-3 * (1 << 13)));
-+	}
+ 	pgd_idx = pgd_index(PAGE_OFFSET);
+ 	pgd = pgd_base + pgd_idx;
+@@ -165,29 +167,43 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
+ 		pmd = one_md_table_init(pgd);
+ 		if (pfn >= max_low_pfn)
+ 			continue;
+-		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
+-			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
  
--}
-+	EXCEPTION(EX_Overflow);
-+	if (control_word & CW_Overflow) {
-+		/* The overflow exception is masked. */
-+		/* By definition, precision is lost.
-+		   The roundup bit (C1) is also set because we have
-+		   "rounded" upwards to Infinity. */
-+		EXCEPTION(EX_Precision | SW_C1);
-+		return tag;
-+	}
+-			/* Map with big pages if possible, otherwise create normal page tables. */
++		for (pmd_idx = 0;
++		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
++		     pmd++, pmd_idx++) {
++			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
 +
-+	return tag;
- 
-+}
++			/*
++			 * Map with big pages if possible, otherwise
++			 * create normal page tables:
++			 */
+ 			if (cpu_has_pse) {
+-				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
+-				if (is_kernel_text(address) || is_kernel_text(address2))
+-					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+-				else
+-					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++				unsigned int addr2;
++				pgprot_t prot = PAGE_KERNEL_LARGE;
++
++				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
++					PAGE_OFFSET + PAGE_SIZE-1;
++
++				if (is_kernel_text(addr) ||
++				    is_kernel_text(addr2))
++					prot = PAGE_KERNEL_LARGE_EXEC;
++
++				set_pmd(pmd, pfn_pmd(pfn, prot));
  
- asmlinkage int arith_underflow(FPU_REG *dest)
- {
--  int tag = TAG_Valid;
+ 				pfn += PTRS_PER_PTE;
+-			} else {
+-				pte = one_page_table_init(pmd);
 -
--  if ( control_word & CW_Underflow )
--    {
--      /* The masked response */
--      if ( exponent16(dest) <= EXP_UNDER - 63 )
--	{
--	  reg_copy(&CONST_Z, dest);
--	  partial_status &= ~SW_C1;       /* Round down. */
--	  tag = TAG_Zero;
-+	int tag = TAG_Valid;
+-				for (pte_ofs = 0;
+-				     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
+-				     pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+-					if (is_kernel_text(address))
+-						set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+-					else
+-						set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
+-				}
++				continue;
++			}
++			pte = one_page_table_init(pmd);
 +
-+	if (control_word & CW_Underflow) {
-+		/* The masked response */
-+		if (exponent16(dest) <= EXP_UNDER - 63) {
-+			reg_copy(&CONST_Z, dest);
-+			partial_status &= ~SW_C1;	/* Round down. */
-+			tag = TAG_Zero;
-+		} else {
-+			stdexp(dest);
-+		}
-+	} else {
-+		/* Add the magic number to the exponent. */
-+		addexponent(dest, (3 * (1 << 13)) + EXTENDED_Ebias);
- 	}
--      else
--	{
--	  stdexp(dest);
++			for (pte_ofs = 0;
++			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
++			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
++				pgprot_t prot = PAGE_KERNEL;
 +
-+	EXCEPTION(EX_Underflow);
-+	if (control_word & CW_Underflow) {
-+		/* The underflow exception is masked. */
-+		EXCEPTION(EX_Precision);
-+		return tag;
++				if (is_kernel_text(addr))
++					prot = PAGE_KERNEL_EXEC;
++
++				set_pte(pte, pfn_pte(pfn, prot));
+ 			}
+ 		}
  	}
--    }
--  else
--    {
--      /* Add the magic number to the exponent. */
--      addexponent(dest, (3 * (1 << 13)) + EXTENDED_Ebias);
--    }
+@@ -200,57 +216,23 @@ static inline int page_kills_ppro(unsigned long pagenr)
+ 	return 0;
+ }
+ 
+-int page_is_ram(unsigned long pagenr)
+-{
+-	int i;
+-	unsigned long addr, end;
 -
--  EXCEPTION(EX_Underflow);
--  if ( control_word & CW_Underflow )
--    {
--      /* The underflow exception is masked. */
--      EXCEPTION(EX_Precision);
--      return tag;
--    }
+-	if (efi_enabled) {
+-		efi_memory_desc_t *md;
+-		void *p;
 -
--  return tag;
- 
+-		for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+-			md = p;
+-			if (!is_available_memory(md))
+-				continue;
+-			addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+-			end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
+-
+-			if ((pagenr >= addr) && (pagenr < end))
+-				return 1;
+-		}
+-		return 0;
+-	}
+-
+-	for (i = 0; i < e820.nr_map; i++) {
+-
+-		if (e820.map[i].type != E820_RAM)	/* not usable memory */
+-			continue;
+-		/*
+-		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
+-		 *	are not. Notably the 640->1Mb area. We need a sanity
+-		 *	check here.
+-		 */
+-		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+-		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
+-		if  ((pagenr >= addr) && (pagenr < end))
+-			return 1;
+-	}
+-	return 0;
 -}
-+	return tag;
+-
+ #ifdef CONFIG_HIGHMEM
+ pte_t *kmap_pte;
+ pgprot_t kmap_prot;
  
+-#define kmap_get_fixmap_pte(vaddr)					\
+-	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
++{
++	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
++			vaddr), vaddr), vaddr);
 +}
  
- void FPU_stack_overflow(void)
+ static void __init kmap_init(void)
  {
+ 	unsigned long kmap_vstart;
  
-- if ( control_word & CW_Invalid )
--    {
--      /* The masked response */
--      top--;
--      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
--    }
-+	if (control_word & CW_Invalid) {
-+		/* The masked response */
-+		top--;
-+		FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
-+	}
- 
--  EXCEPTION(EX_StackOver);
-+	EXCEPTION(EX_StackOver);
- 
--  return;
-+	return;
+-	/* cache the first kmap pte */
++	/*
++	 * Cache the first kmap pte:
++	 */
+ 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  
- }
+@@ -259,11 +241,11 @@ static void __init kmap_init(void)
  
--
- void FPU_stack_underflow(void)
+ static void __init permanent_kmaps_init(pgd_t *pgd_base)
  {
++	unsigned long vaddr;
+ 	pgd_t *pgd;
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+ 	pte_t *pte;
+-	unsigned long vaddr;
  
-- if ( control_word & CW_Invalid )
--    {
--      /* The masked response */
--      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
--    }
-+	if (control_word & CW_Invalid) {
-+		/* The masked response */
-+		FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
-+	}
+ 	vaddr = PKMAP_BASE;
+ 	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+@@ -272,7 +254,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
+ 	pud = pud_offset(pgd, vaddr);
+ 	pmd = pmd_offset(pud, vaddr);
+ 	pte = pte_offset_kernel(pmd, vaddr);
+-	pkmap_page_table = pte;	
++	pkmap_page_table = pte;
+ }
  
--  EXCEPTION(EX_StackUnder);
-+	EXCEPTION(EX_StackUnder);
+ static void __meminit free_new_highpage(struct page *page)
+@@ -291,7 +273,8 @@ void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
+ 		SetPageReserved(page);
+ }
  
--  return;
-+	return;
+-static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++static int __meminit
++add_one_highpage_hotplug(struct page *page, unsigned long pfn)
+ {
+ 	free_new_highpage(page);
+ 	totalram_pages++;
+@@ -299,6 +282,7 @@ static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long p
+ 	max_mapnr = max(pfn, max_mapnr);
+ #endif
+ 	num_physpages++;
++
+ 	return 0;
+ }
  
+@@ -306,7 +290,7 @@ static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long p
+  * Not currently handling the NUMA case.
+  * Assuming single node and all memory that
+  * has been added dynamically that would be
+- * onlined here is in HIGHMEM
++ * onlined here is in HIGHMEM.
+  */
+ void __meminit online_page(struct page *page)
+ {
+@@ -314,13 +298,11 @@ void __meminit online_page(struct page *page)
+ 	add_one_highpage_hotplug(page, page_to_pfn(page));
  }
  
 -
- void FPU_stack_underflow_i(int i)
+-#ifdef CONFIG_NUMA
+-extern void set_highmem_pages_init(int);
+-#else
++#ifndef CONFIG_NUMA
+ static void __init set_highmem_pages_init(int bad_ppro)
  {
+ 	int pfn;
++
+ 	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
+ 		/*
+ 		 * Holes under sparsemem might not have no mem_map[]:
+@@ -330,23 +312,18 @@ static void __init set_highmem_pages_init(int bad_ppro)
+ 	}
+ 	totalram_pages += totalhigh_pages;
+ }
+-#endif /* CONFIG_FLATMEM */
++#endif /* !CONFIG_NUMA */
  
-- if ( control_word & CW_Invalid )
--    {
--      /* The masked response */
--      FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
--    }
-+	if (control_word & CW_Invalid) {
-+		/* The masked response */
-+		FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
-+	}
+ #else
+-#define kmap_init() do { } while (0)
+-#define permanent_kmaps_init(pgd_base) do { } while (0)
+-#define set_highmem_pages_init(bad_ppro) do { } while (0)
++# define kmap_init()				do { } while (0)
++# define permanent_kmaps_init(pgd_base)		do { } while (0)
++# define set_highmem_pages_init(bad_ppro)	do { } while (0)
+ #endif /* CONFIG_HIGHMEM */
  
--  EXCEPTION(EX_StackUnder);
-+	EXCEPTION(EX_StackUnder);
+-unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
+ EXPORT_SYMBOL(__PAGE_KERNEL);
+-unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
  
--  return;
-+	return;
+-#ifdef CONFIG_NUMA
+-extern void __init remap_numa_kva(void);
+-#else
+-#define remap_numa_kva() do {} while (0)
+-#endif
++pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
  
+ void __init native_pagetable_setup_start(pgd_t *base)
+ {
+@@ -372,7 +349,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
+ 	memset(&base[USER_PTRS_PER_PGD], 0,
+ 	       KERNEL_PGD_PTRS * sizeof(pgd_t));
+ #else
+-	paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);
++	paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
+ #endif
  }
  
--
- void FPU_stack_underflow_pop(int i)
+@@ -410,10 +387,10 @@ void __init native_pagetable_setup_done(pgd_t *base)
+  * be partially populated, and so it avoids stomping on any existing
+  * mappings.
+  */
+-static void __init pagetable_init (void)
++static void __init pagetable_init(void)
  {
+-	unsigned long vaddr, end;
+ 	pgd_t *pgd_base = swapper_pg_dir;
++	unsigned long vaddr, end;
  
-- if ( control_word & CW_Invalid )
--    {
--      /* The masked response */
--      FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
--      FPU_pop();
--    }
-+	if (control_word & CW_Invalid) {
-+		/* The masked response */
-+		FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
-+		FPU_pop();
-+	}
+ 	paravirt_pagetable_setup_start(pgd_base);
  
--  EXCEPTION(EX_StackUnder);
-+	EXCEPTION(EX_StackUnder);
+@@ -435,9 +412,11 @@ static void __init pagetable_init (void)
+ 	 * Fixed mappings, only the page table structure has to be
+ 	 * created - mappings will be set by set_fixmap():
+ 	 */
++	early_ioremap_clear();
+ 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
+ 	page_table_range_init(vaddr, end, pgd_base);
++	early_ioremap_reset();
  
--  return;
-+	return;
+ 	permanent_kmaps_init(pgd_base);
  
- }
--
-diff --git a/arch/x86/math-emu/exception.h b/arch/x86/math-emu/exception.h
-index b463f21..67f43a4 100644
---- a/arch/x86/math-emu/exception.h
-+++ b/arch/x86/math-emu/exception.h
-@@ -9,7 +9,6 @@
- #ifndef _EXCEPTION_H_
- #define _EXCEPTION_H_
+@@ -450,7 +429,7 @@ static void __init pagetable_init (void)
+  * driver might have split up a kernel 4MB mapping.
+  */
+ char __nosavedata swsusp_pg_dir[PAGE_SIZE]
+-	__attribute__ ((aligned (PAGE_SIZE)));
++	__attribute__ ((aligned(PAGE_SIZE)));
  
--
- #ifdef __ASSEMBLY__
- #define	Const_(x)	$##x
- #else
-@@ -20,8 +19,8 @@
- #include "fpu_emu.h"
- #endif /* SW_C1 */
+ static inline void save_pg_dir(void)
+ {
+@@ -462,7 +441,7 @@ static inline void save_pg_dir(void)
+ }
+ #endif
  
--#define FPU_BUSY        Const_(0x8000)   /* FPU busy bit (8087 compatibility) */
--#define EX_ErrorSummary Const_(0x0080)   /* Error summary status */
-+#define FPU_BUSY        Const_(0x8000)	/* FPU busy bit (8087 compatibility) */
-+#define EX_ErrorSummary Const_(0x0080)	/* Error summary status */
- /* Special exceptions: */
- #define	EX_INTERNAL	Const_(0x8000)	/* Internal error in wm-FPU-emu */
- #define EX_StackOver	Const_(0x0041|SW_C1)	/* stack overflow */
-@@ -34,11 +33,9 @@
- #define EX_Denormal	Const_(0x0002)	/* denormalized operand */
- #define EX_Invalid	Const_(0x0001)	/* invalid operation */
+-void zap_low_mappings (void)
++void zap_low_mappings(void)
+ {
+ 	int i;
  
--
- #define PRECISION_LOST_UP    Const_((EX_Precision | SW_C1))
- #define PRECISION_LOST_DOWN  Const_(EX_Precision)
+@@ -474,22 +453,24 @@ void zap_low_mappings (void)
+ 	 * Note that "pgd_clear()" doesn't do it for
+ 	 * us, because pgd_clear() is a no-op on i386.
+ 	 */
+-	for (i = 0; i < USER_PTRS_PER_PGD; i++)
++	for (i = 0; i < USER_PTRS_PER_PGD; i++) {
+ #ifdef CONFIG_X86_PAE
+ 		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
+ #else
+ 		set_pgd(swapper_pg_dir+i, __pgd(0));
+ #endif
++	}
+ 	flush_tlb_all();
+ }
  
--
- #ifndef __ASSEMBLY__
+-int nx_enabled = 0;
++int nx_enabled;
++
++pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
++EXPORT_SYMBOL_GPL(__supported_pte_mask);
  
- #ifdef DEBUG
-@@ -48,6 +45,6 @@
- #define	EXCEPTION(x)	FPU_exception(x)
- #endif
+ #ifdef CONFIG_X86_PAE
  
--#endif /* __ASSEMBLY__ */ 
-+#endif /* __ASSEMBLY__ */
+-static int disable_nx __initdata = 0;
+-u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
+-EXPORT_SYMBOL_GPL(__supported_pte_mask);
++static int disable_nx __initdata;
  
- #endif /* _EXCEPTION_H_ */
-diff --git a/arch/x86/math-emu/fpu_arith.c b/arch/x86/math-emu/fpu_arith.c
-index 6972dec..aeab24e 100644
---- a/arch/x86/math-emu/fpu_arith.c
-+++ b/arch/x86/math-emu/fpu_arith.c
-@@ -15,160 +15,138 @@
- #include "control_w.h"
- #include "status_w.h"
+ /*
+  * noexec = on|off
+@@ -506,11 +487,14 @@ static int __init noexec_setup(char *str)
+ 			__supported_pte_mask |= _PAGE_NX;
+ 			disable_nx = 0;
+ 		}
+-	} else if (!strcmp(str,"off")) {
+-		disable_nx = 1;
+-		__supported_pte_mask &= ~_PAGE_NX;
+-	} else
+-		return -EINVAL;
++	} else {
++		if (!strcmp(str, "off")) {
++			disable_nx = 1;
++			__supported_pte_mask &= ~_PAGE_NX;
++		} else {
++			return -EINVAL;
++		}
++	}
  
--
- void fadd__(void)
- {
--  /* fadd st,st(i) */
--  int i = FPU_rm;
--  clear_C1();
--  FPU_add(&st(i), FPU_gettagi(i), 0, control_word);
-+	/* fadd st,st(i) */
-+	int i = FPU_rm;
-+	clear_C1();
-+	FPU_add(&st(i), FPU_gettagi(i), 0, control_word);
+ 	return 0;
  }
+@@ -522,6 +506,7 @@ static void __init set_nx(void)
  
--
- void fmul__(void)
- {
--  /* fmul st,st(i) */
--  int i = FPU_rm;
--  clear_C1();
--  FPU_mul(&st(i), FPU_gettagi(i), 0, control_word);
-+	/* fmul st,st(i) */
-+	int i = FPU_rm;
-+	clear_C1();
-+	FPU_mul(&st(i), FPU_gettagi(i), 0, control_word);
+ 	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
+ 		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++
+ 		if ((v[3] & (1 << 20)) && !disable_nx) {
+ 			rdmsr(MSR_EFER, l, h);
+ 			l |= EFER_NX;
+@@ -531,35 +516,6 @@ static void __init set_nx(void)
+ 		}
+ 	}
  }
- 
--
 -
- void fsub__(void)
- {
--  /* fsub st,st(i) */
--  clear_C1();
--  FPU_sub(0, FPU_rm, control_word);
-+	/* fsub st,st(i) */
-+	clear_C1();
-+	FPU_sub(0, FPU_rm, control_word);
- }
- 
+-/*
+- * Enables/disables executability of a given kernel page and
+- * returns the previous setting.
+- */
+-int __init set_kernel_exec(unsigned long vaddr, int enable)
+-{
+-	pte_t *pte;
+-	int ret = 1;
 -
- void fsubr_(void)
- {
--  /* fsubr st,st(i) */
--  clear_C1();
--  FPU_sub(REV, FPU_rm, control_word);
-+	/* fsubr st,st(i) */
-+	clear_C1();
-+	FPU_sub(REV, FPU_rm, control_word);
- }
- 
+-	if (!nx_enabled)
+-		goto out;
 -
- void fdiv__(void)
- {
--  /* fdiv st,st(i) */
--  clear_C1();
--  FPU_div(0, FPU_rm, control_word);
-+	/* fdiv st,st(i) */
-+	clear_C1();
-+	FPU_div(0, FPU_rm, control_word);
- }
- 
+-	pte = lookup_address(vaddr);
+-	BUG_ON(!pte);
 -
- void fdivr_(void)
- {
--  /* fdivr st,st(i) */
--  clear_C1();
--  FPU_div(REV, FPU_rm, control_word);
-+	/* fdivr st,st(i) */
-+	clear_C1();
-+	FPU_div(REV, FPU_rm, control_word);
- }
- 
+-	if (!pte_exec_kernel(*pte))
+-		ret = 0;
 -
+-	if (enable)
+-		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+-	else
+-		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
+-	pte_update_defer(&init_mm, vaddr, pte);
+-	__flush_tlb_all();
+-out:
+-	return ret;
+-}
 -
- void fadd_i(void)
- {
--  /* fadd st(i),st */
--  int i = FPU_rm;
--  clear_C1();
--  FPU_add(&st(i), FPU_gettagi(i), i, control_word);
-+	/* fadd st(i),st */
-+	int i = FPU_rm;
-+	clear_C1();
-+	FPU_add(&st(i), FPU_gettagi(i), i, control_word);
- }
+ #endif
  
+ /*
+@@ -574,9 +530,8 @@ void __init paging_init(void)
+ #ifdef CONFIG_X86_PAE
+ 	set_nx();
+ 	if (nx_enabled)
+-		printk("NX (Execute Disable) protection: active\n");
++		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
+ #endif
 -
- void fmul_i(void)
- {
--  /* fmul st(i),st */
--  clear_C1();
--  FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word);
-+	/* fmul st(i),st */
-+	clear_C1();
-+	FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word);
- }
+ 	pagetable_init();
  
+ 	load_cr3(swapper_pg_dir);
+@@ -600,10 +555,10 @@ void __init paging_init(void)
+  * used to involve black magic jumps to work around some nasty CPU bugs,
+  * but fortunately the switch to using exceptions got rid of all that.
+  */
 -
- void fsubri(void)
+ static void __init test_wp_bit(void)
  {
--  /* fsubr st(i),st */
--  clear_C1();
--  FPU_sub(DEST_RM, FPU_rm, control_word);
-+	/* fsubr st(i),st */
-+	clear_C1();
-+	FPU_sub(DEST_RM, FPU_rm, control_word);
- }
+-	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
++	printk(KERN_INFO
++  "Checking if this processor honours the WP bit even in supervisor mode...");
  
--
- void fsub_i(void)
- {
--  /* fsub st(i),st */
--  clear_C1();
--  FPU_sub(REV|DEST_RM, FPU_rm, control_word);
-+	/* fsub st(i),st */
-+	clear_C1();
-+	FPU_sub(REV | DEST_RM, FPU_rm, control_word);
- }
+ 	/* Any page-aligned address will do, the test is non-destructive */
+ 	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
+@@ -611,47 +566,46 @@ static void __init test_wp_bit(void)
+ 	clear_fixmap(FIX_WP_TEST);
  
--
- void fdivri(void)
- {
--  /* fdivr st(i),st */
--  clear_C1();
--  FPU_div(DEST_RM, FPU_rm, control_word);
-+	/* fdivr st(i),st */
-+	clear_C1();
-+	FPU_div(DEST_RM, FPU_rm, control_word);
+ 	if (!boot_cpu_data.wp_works_ok) {
+-		printk("No.\n");
++		printk(KERN_CONT "No.\n");
+ #ifdef CONFIG_X86_WP_WORKS_OK
+-		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++		panic(
++  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
+ #endif
+ 	} else {
+-		printk("Ok.\n");
++		printk(KERN_CONT "Ok.\n");
+ 	}
  }
  
--
- void fdiv_i(void)
- {
--  /* fdiv st(i),st */
--  clear_C1();
--  FPU_div(REV|DEST_RM, FPU_rm, control_word);
-+	/* fdiv st(i),st */
-+	clear_C1();
-+	FPU_div(REV | DEST_RM, FPU_rm, control_word);
- }
+-static struct kcore_list kcore_mem, kcore_vmalloc; 
++static struct kcore_list kcore_mem, kcore_vmalloc;
  
--
--
- void faddp_(void)
+ void __init mem_init(void)
  {
--  /* faddp st(i),st */
--  int i = FPU_rm;
--  clear_C1();
--  if ( FPU_add(&st(i), FPU_gettagi(i), i, control_word) >= 0 )
--    FPU_pop();
-+	/* faddp st(i),st */
-+	int i = FPU_rm;
-+	clear_C1();
-+	if (FPU_add(&st(i), FPU_gettagi(i), i, control_word) >= 0)
-+		FPU_pop();
- }
+-	extern int ppro_with_ram_bug(void);
+ 	int codesize, reservedpages, datasize, initsize;
+-	int tmp;
+-	int bad_ppro;
++	int tmp, bad_ppro;
  
--
- void fmulp_(void)
- {
--  /* fmulp st(i),st */
--  clear_C1();
--  if ( FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word) >= 0 )
--    FPU_pop();
-+	/* fmulp st(i),st */
-+	clear_C1();
-+	if (FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word) >= 0)
-+		FPU_pop();
+ #ifdef CONFIG_FLATMEM
+ 	BUG_ON(!mem_map);
+ #endif
+-	
+ 	bad_ppro = ppro_with_ram_bug();
+ 
+ #ifdef CONFIG_HIGHMEM
+ 	/* check that fixmap and pkmap do not overlap */
+-	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+-		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++	if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++		printk(KERN_ERR
++			"fixmap and kmap areas overlap - this will crash\n");
+ 		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
+-				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++				PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
++				FIXADDR_START);
+ 		BUG();
+ 	}
+ #endif
+- 
+ 	/* this will put all low memory onto the freelists */
+ 	totalram_pages += free_all_bootmem();
+ 
+ 	reservedpages = 0;
+ 	for (tmp = 0; tmp < max_low_pfn; tmp++)
+ 		/*
+-		 * Only count reserved RAM pages
++		 * Only count reserved RAM pages:
+ 		 */
+ 		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
+ 			reservedpages++;
+@@ -662,11 +616,12 @@ void __init mem_init(void)
+ 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
+ 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+ 
+-	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
+-	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
++	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ 		   VMALLOC_END-VMALLOC_START);
+ 
+-	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
++			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
+ 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ 		num_physpages << (PAGE_SHIFT-10),
+ 		codesize >> 10,
+@@ -677,45 +632,46 @@ void __init mem_init(void)
+ 	       );
+ 
+ #if 1 /* double-sanity-check paranoia */
+-	printk("virtual kernel memory layout:\n"
+-	       "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
++	printk(KERN_INFO "virtual kernel memory layout:\n"
++		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+ #ifdef CONFIG_HIGHMEM
+-	       "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
++		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+ #endif
+-	       "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+-	       "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+-	       "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+-	       "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+-	       "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
+-	       FIXADDR_START, FIXADDR_TOP,
+-	       (FIXADDR_TOP - FIXADDR_START) >> 10,
++		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
++		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
++		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
++		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
++		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
++		FIXADDR_START, FIXADDR_TOP,
++		(FIXADDR_TOP - FIXADDR_START) >> 10,
+ 
+ #ifdef CONFIG_HIGHMEM
+-	       PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+-	       (LAST_PKMAP*PAGE_SIZE) >> 10,
++		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
++		(LAST_PKMAP*PAGE_SIZE) >> 10,
+ #endif
+ 
+-	       VMALLOC_START, VMALLOC_END,
+-	       (VMALLOC_END - VMALLOC_START) >> 20,
++		VMALLOC_START, VMALLOC_END,
++		(VMALLOC_END - VMALLOC_START) >> 20,
+ 
+-	       (unsigned long)__va(0), (unsigned long)high_memory,
+-	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
++		(unsigned long)__va(0), (unsigned long)high_memory,
++		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
+ 
+-	       (unsigned long)&__init_begin, (unsigned long)&__init_end,
+-	       ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
++		(unsigned long)&__init_begin, (unsigned long)&__init_end,
++		((unsigned long)&__init_end -
++		 (unsigned long)&__init_begin) >> 10,
+ 
+-	       (unsigned long)&_etext, (unsigned long)&_edata,
+-	       ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++		(unsigned long)&_etext, (unsigned long)&_edata,
++		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+ 
+-	       (unsigned long)&_text, (unsigned long)&_etext,
+-	       ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
++		(unsigned long)&_text, (unsigned long)&_etext,
++		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+ 
+ #ifdef CONFIG_HIGHMEM
+-	BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
+-	BUG_ON(VMALLOC_END                     > PKMAP_BASE);
++	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
++	BUG_ON(VMALLOC_END				> PKMAP_BASE);
+ #endif
+-	BUG_ON(VMALLOC_START                   > VMALLOC_END);
+-	BUG_ON((unsigned long)high_memory      > VMALLOC_START);
++	BUG_ON(VMALLOC_START				> VMALLOC_END);
++	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
+ #endif /* double-sanity-check paranoia */
+ 
+ #ifdef CONFIG_X86_PAE
+@@ -746,49 +702,38 @@ int arch_add_memory(int nid, u64 start, u64 size)
+ 
+ 	return __add_pages(zone, start_pfn, nr_pages);
  }
+-
+ #endif
  
+-struct kmem_cache *pmd_cache;
 -
+-void __init pgtable_cache_init(void)
+-{
+-	if (PTRS_PER_PMD > 1)
+-		pmd_cache = kmem_cache_create("pmd",
+-					      PTRS_PER_PMD*sizeof(pmd_t),
+-					      PTRS_PER_PMD*sizeof(pmd_t),
+-					      SLAB_PANIC,
+-					      pmd_ctor);
+-}
 -
- void fsubrp(void)
+ /*
+  * This function cannot be __init, since exceptions don't work in that
+  * section.  Put this after the callers, so that it cannot be inlined.
+  */
+-static int noinline do_test_wp_bit(void)
++static noinline int do_test_wp_bit(void)
  {
--  /* fsubrp st(i),st */
--  clear_C1();
--  if ( FPU_sub(DEST_RM, FPU_rm, control_word) >= 0 )
--    FPU_pop();
-+	/* fsubrp st(i),st */
-+	clear_C1();
-+	if (FPU_sub(DEST_RM, FPU_rm, control_word) >= 0)
-+		FPU_pop();
- }
+ 	char tmp_reg;
+ 	int flag;
  
--
- void fsubp_(void)
- {
--  /* fsubp st(i),st */
--  clear_C1();
--  if ( FPU_sub(REV|DEST_RM, FPU_rm, control_word) >= 0 )
--    FPU_pop();
-+	/* fsubp st(i),st */
-+	clear_C1();
-+	if (FPU_sub(REV | DEST_RM, FPU_rm, control_word) >= 0)
-+		FPU_pop();
+ 	__asm__ __volatile__(
+-		"	movb %0,%1	\n"
+-		"1:	movb %1,%0	\n"
+-		"	xorl %2,%2	\n"
++		"	movb %0, %1	\n"
++		"1:	movb %1, %0	\n"
++		"	xorl %2, %2	\n"
+ 		"2:			\n"
+-		".section __ex_table,\"a\"\n"
++		".section __ex_table, \"a\"\n"
+ 		"	.align 4	\n"
+-		"	.long 1b,2b	\n"
++		"	.long 1b, 2b	\n"
+ 		".previous		\n"
+ 		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
+ 		 "=q" (tmp_reg),
+ 		 "=r" (flag)
+ 		:"2" (1)
+ 		:"memory");
+-	
++
+ 	return flag;
  }
  
--
- void fdivrp(void)
+ #ifdef CONFIG_DEBUG_RODATA
++const int rodata_test_data = 0xC3;
++EXPORT_SYMBOL_GPL(rodata_test_data);
+ 
+ void mark_rodata_ro(void)
  {
--  /* fdivrp st(i),st */
--  clear_C1();
--  if ( FPU_div(DEST_RM, FPU_rm, control_word) >= 0 )
--    FPU_pop();
-+	/* fdivrp st(i),st */
-+	clear_C1();
-+	if (FPU_div(DEST_RM, FPU_rm, control_word) >= 0)
-+		FPU_pop();
+@@ -801,32 +746,58 @@ void mark_rodata_ro(void)
+ 	if (num_possible_cpus() <= 1)
+ #endif
+ 	{
+-		change_page_attr(virt_to_page(start),
+-		                 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
+-		printk("Write protecting the kernel text: %luk\n", size >> 10);
++		set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
++		printk(KERN_INFO "Write protecting the kernel text: %luk\n",
++			size >> 10);
++
++#ifdef CONFIG_CPA_DEBUG
++		printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
++			start, start+size);
++		set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
++
++		printk(KERN_INFO "Testing CPA: write protecting again\n");
++		set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
++#endif
+ 	}
+ #endif
+ 	start += size;
+ 	size = (unsigned long)__end_rodata - start;
+-	change_page_attr(virt_to_page(start),
+-	                 size >> PAGE_SHIFT, PAGE_KERNEL_RO);
+-	printk("Write protecting the kernel read-only data: %luk\n",
+-	       size >> 10);
++	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
++	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
++		size >> 10);
++	rodata_test();
+ 
+-	/*
+-	 * change_page_attr() requires a global_flush_tlb() call after it.
+-	 * We do this after the printk so that if something went wrong in the
+-	 * change, the printk gets out at least to give a better debug hint
+-	 * of who is the culprit.
+-	 */
+-	global_flush_tlb();
++#ifdef CONFIG_CPA_DEBUG
++	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
++	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
++
++	printk(KERN_INFO "Testing CPA: write protecting again\n");
++	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
++#endif
  }
+ #endif
  
--
- void fdivp_(void)
+ void free_init_pages(char *what, unsigned long begin, unsigned long end)
  {
--  /* fdivp st(i),st */
--  clear_C1();
--  if ( FPU_div(REV|DEST_RM, FPU_rm, control_word) >= 0 )
--    FPU_pop();
-+	/* fdivp st(i),st */
-+	clear_C1();
-+	if (FPU_div(REV | DEST_RM, FPU_rm, control_word) >= 0)
-+		FPU_pop();
- }
-diff --git a/arch/x86/math-emu/fpu_asm.h b/arch/x86/math-emu/fpu_asm.h
-index 9ba1241..955b932 100644
---- a/arch/x86/math-emu/fpu_asm.h
-+++ b/arch/x86/math-emu/fpu_asm.h
-@@ -14,7 +14,6 @@
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	/*
++	 * If debugging page accesses then do not free this memory but
++	 * mark them not present - any buggy init-section access will
++	 * create a kernel page fault:
++	 */
++	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
++		begin, PAGE_ALIGN(end));
++	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
++#else
+ 	unsigned long addr;
  
- #define	EXCEPTION	FPU_exception
++	/*
++	 * We just marked the kernel text read only above, now that
++	 * we are going to free part of that, we need to make that
++	 * writeable first.
++	 */
++	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
++
+ 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ 		ClearPageReserved(virt_to_page(addr));
+ 		init_page_count(virt_to_page(addr));
+@@ -835,6 +806,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ 		totalram_pages++;
+ 	}
+ 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
++#endif
+ }
  
+ void free_initmem(void)
+@@ -850,4 +822,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
+ 	free_init_pages("initrd memory", start, end);
+ }
+ #endif
 -
- #define PARAM1	8(%ebp)
- #define	PARAM2	12(%ebp)
- #define	PARAM3	16(%ebp)
-diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
-index 20886cf..491e737 100644
---- a/arch/x86/math-emu/fpu_aux.c
-+++ b/arch/x86/math-emu/fpu_aux.c
-@@ -16,34 +16,34 @@
- #include "status_w.h"
- #include "control_w.h"
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 0f9c8c8..cc50a13 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -43,12 +43,10 @@
+ #include <asm/proto.h>
+ #include <asm/smp.h>
+ #include <asm/sections.h>
++#include <asm/kdebug.h>
++#include <asm/numa.h>
  
+-#ifndef Dprintk
+-#define Dprintk(x...)
+-#endif
 -
- static void fnop(void)
- {
- }
+-const struct dma_mapping_ops* dma_ops;
++const struct dma_mapping_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
  
- static void fclex(void)
+ static unsigned long dma_reserve __initdata;
+@@ -65,22 +63,26 @@ void show_mem(void)
  {
--  partial_status &= ~(SW_Backward|SW_Summary|SW_Stack_Fault|SW_Precision|
--		   SW_Underflow|SW_Overflow|SW_Zero_Div|SW_Denorm_Op|
--		   SW_Invalid);
--  no_ip_update = 1;
-+	partial_status &=
-+	    ~(SW_Backward | SW_Summary | SW_Stack_Fault | SW_Precision |
-+	      SW_Underflow | SW_Overflow | SW_Zero_Div | SW_Denorm_Op |
-+	      SW_Invalid);
-+	no_ip_update = 1;
- }
+ 	long i, total = 0, reserved = 0;
+ 	long shared = 0, cached = 0;
+-	pg_data_t *pgdat;
+ 	struct page *page;
++	pg_data_t *pgdat;
  
- /* Needs to be externally visible */
- void finit(void)
- {
--  control_word = 0x037f;
--  partial_status = 0;
--  top = 0;            /* We don't keep top in the status word internally. */
--  fpu_tag_word = 0xffff;
--  /* The behaviour is different from that detailed in
--     Section 15.1.6 of the Intel manual */
--  operand_address.offset = 0;
--  operand_address.selector = 0;
--  instruction_address.offset = 0;
--  instruction_address.selector = 0;
--  instruction_address.opcode = 0;
--  no_ip_update = 1;
-+	control_word = 0x037f;
-+	partial_status = 0;
-+	top = 0;		/* We don't keep top in the status word internally. */
-+	fpu_tag_word = 0xffff;
-+	/* The behaviour is different from that detailed in
-+	   Section 15.1.6 of the Intel manual */
-+	operand_address.offset = 0;
-+	operand_address.selector = 0;
-+	instruction_address.offset = 0;
-+	instruction_address.selector = 0;
-+	instruction_address.opcode = 0;
-+	no_ip_update = 1;
+ 	printk(KERN_INFO "Mem-info:\n");
+ 	show_free_areas();
+-	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++	printk(KERN_INFO "Free swap:       %6ldkB\n",
++		nr_swap_pages << (PAGE_SHIFT-10));
+ 
+ 	for_each_online_pgdat(pgdat) {
+-               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+-			/* this loop can take a while with 256 GB and 4k pages
+-			   so update the NMI watchdog */
+-			if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) {
++		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++			/*
++			 * This loop can take a while with 256 GB and
++			 * 4k pages so defer the NMI watchdog:
++			 */
++			if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+ 				touch_nmi_watchdog();
+-			}
++
+ 			if (!pfn_valid(pgdat->node_start_pfn + i))
+ 				continue;
++
+ 			page = pfn_to_page(pgdat->node_start_pfn + i);
+ 			total++;
+ 			if (PageReserved(page))
+@@ -89,51 +91,58 @@ void show_mem(void)
+ 				cached++;
+ 			else if (page_count(page))
+ 				shared += page_count(page) - 1;
+-               }
++		}
+ 	}
+-	printk(KERN_INFO "%lu pages of RAM\n", total);
+-	printk(KERN_INFO "%lu reserved pages\n",reserved);
+-	printk(KERN_INFO "%lu pages shared\n",shared);
+-	printk(KERN_INFO "%lu pages swap cached\n",cached);
++	printk(KERN_INFO "%lu pages of RAM\n",		total);
++	printk(KERN_INFO "%lu reserved pages\n",	reserved);
++	printk(KERN_INFO "%lu pages shared\n",		shared);
++	printk(KERN_INFO "%lu pages swap cached\n",	cached);
  }
  
- /*
-@@ -54,151 +54,134 @@ void finit(void)
- #define fsetpm fnop
+ int after_bootmem;
  
- static FUNC const finit_table[] = {
--  feni, fdisi, fclex, finit,
--  fsetpm, FPU_illegal, FPU_illegal, FPU_illegal
-+	feni, fdisi, fclex, finit,
-+	fsetpm, FPU_illegal, FPU_illegal, FPU_illegal
- };
+ static __init void *spp_getpage(void)
+-{ 
++{
+ 	void *ptr;
++
+ 	if (after_bootmem)
+-		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 
++		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
+ 	else
+ 		ptr = alloc_bootmem_pages(PAGE_SIZE);
+-	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
+-		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
  
- void finit_(void)
- {
--  (finit_table[FPU_rm])();
-+	(finit_table[FPU_rm]) ();
- }
+-	Dprintk("spp_getpage %p\n", ptr);
++	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
++		panic("set_pte_phys: cannot allocate page data %s\n",
++			after_bootmem ? "after bootmem" : "");
++	}
++
++	pr_debug("spp_getpage %p\n", ptr);
++
+ 	return ptr;
+-} 
++}
  
--
- static void fstsw_ax(void)
+-static __init void set_pte_phys(unsigned long vaddr,
+-			 unsigned long phys, pgprot_t prot)
++static __init void
++set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
  {
--  *(short *) &FPU_EAX = status_word();
--  no_ip_update = 1;
-+	*(short *)&FPU_EAX = status_word();
-+	no_ip_update = 1;
- }
+ 	pgd_t *pgd;
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+ 	pte_t *pte, new_pte;
  
- static FUNC const fstsw_table[] = {
--  fstsw_ax, FPU_illegal, FPU_illegal, FPU_illegal,
--  FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
-+	fstsw_ax, FPU_illegal, FPU_illegal, FPU_illegal,
-+	FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
- };
+-	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++	pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
  
- void fstsw_(void)
- {
--  (fstsw_table[FPU_rm])();
-+	(fstsw_table[FPU_rm]) ();
+ 	pgd = pgd_offset_k(vaddr);
+ 	if (pgd_none(*pgd)) {
+-		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++		printk(KERN_ERR
++			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
+ 		return;
+ 	}
+ 	pud = pud_offset(pgd, vaddr);
+ 	if (pud_none(*pud)) {
+-		pmd = (pmd_t *) spp_getpage(); 
++		pmd = (pmd_t *) spp_getpage();
+ 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+ 		if (pmd != pmd_offset(pud, 0)) {
+-			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
++				pmd, pmd_offset(pud, 0));
+ 			return;
+ 		}
+ 	}
+@@ -142,7 +151,7 @@ static __init void set_pte_phys(unsigned long vaddr,
+ 		pte = (pte_t *) spp_getpage();
+ 		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
+ 		if (pte != pte_offset_kernel(pmd, 0)) {
+-			printk("PAGETABLE BUG #02!\n");
++			printk(KERN_ERR "PAGETABLE BUG #02!\n");
+ 			return;
+ 		}
+ 	}
+@@ -162,33 +171,35 @@ static __init void set_pte_phys(unsigned long vaddr,
  }
  
--
- static FUNC const fp_nop_table[] = {
--  fnop, FPU_illegal, FPU_illegal, FPU_illegal,
--  FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
-+	fnop, FPU_illegal, FPU_illegal, FPU_illegal,
-+	FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
- };
- 
- void fp_nop(void)
+ /* NOTE: this is meant to be run only at boot */
+-void __init 
+-__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++void __init
++__set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
  {
--  (fp_nop_table[FPU_rm])();
-+	(fp_nop_table[FPU_rm]) ();
- }
+ 	unsigned long address = __fix_to_virt(idx);
  
--
- void fld_i_(void)
- {
--  FPU_REG *st_new_ptr;
--  int i;
--  u_char tag;
--
--  if ( STACK_OVERFLOW )
--    { FPU_stack_overflow(); return; }
--
--  /* fld st(i) */
--  i = FPU_rm;
--  if ( NOT_EMPTY(i) )
--    {
--      reg_copy(&st(i), st_new_ptr);
--      tag = FPU_gettagi(i);
--      push();
--      FPU_settag0(tag);
--    }
--  else
--    {
--      if ( control_word & CW_Invalid )
--	{
--	  /* The masked response */
--	  FPU_stack_underflow();
-+	FPU_REG *st_new_ptr;
-+	int i;
-+	u_char tag;
-+
-+	if (STACK_OVERFLOW) {
-+		FPU_stack_overflow();
-+		return;
+ 	if (idx >= __end_of_fixed_addresses) {
+-		printk("Invalid __set_fixmap\n");
++		printk(KERN_ERR "Invalid __set_fixmap\n");
+ 		return;
  	}
--      else
--	EXCEPTION(EX_StackUnder);
--    }
+ 	set_pte_phys(address, phys, prot);
+ }
  
--}
-+	/* fld st(i) */
-+	i = FPU_rm;
-+	if (NOT_EMPTY(i)) {
-+		reg_copy(&st(i), st_new_ptr);
-+		tag = FPU_gettagi(i);
-+		push();
-+		FPU_settag0(tag);
-+	} else {
-+		if (control_word & CW_Invalid) {
-+			/* The masked response */
-+			FPU_stack_underflow();
-+		} else
-+			EXCEPTION(EX_StackUnder);
-+	}
+-unsigned long __meminitdata table_start, table_end;
++static unsigned long __initdata table_start;
++static unsigned long __meminitdata table_end;
  
-+}
+ static __meminit void *alloc_low_page(unsigned long *phys)
+-{ 
++{
+ 	unsigned long pfn = table_end++;
+ 	void *adr;
  
- void fxch_i(void)
- {
--  /* fxch st(i) */
--  FPU_REG t;
--  int i = FPU_rm;
--  FPU_REG *st0_ptr = &st(0), *sti_ptr = &st(i);
--  long tag_word = fpu_tag_word;
--  int regnr = top & 7, regnri = ((regnr + i) & 7);
--  u_char st0_tag = (tag_word >> (regnr*2)) & 3;
--  u_char sti_tag = (tag_word >> (regnri*2)) & 3;
--
--  if ( st0_tag == TAG_Empty )
--    {
--      if ( sti_tag == TAG_Empty )
--	{
--	  FPU_stack_underflow();
--	  FPU_stack_underflow_i(i);
--	  return;
-+	/* fxch st(i) */
-+	FPU_REG t;
-+	int i = FPU_rm;
-+	FPU_REG *st0_ptr = &st(0), *sti_ptr = &st(i);
-+	long tag_word = fpu_tag_word;
-+	int regnr = top & 7, regnri = ((regnr + i) & 7);
-+	u_char st0_tag = (tag_word >> (regnr * 2)) & 3;
-+	u_char sti_tag = (tag_word >> (regnri * 2)) & 3;
+ 	if (after_bootmem) {
+ 		adr = (void *)get_zeroed_page(GFP_ATOMIC);
+ 		*phys = __pa(adr);
 +
-+	if (st0_tag == TAG_Empty) {
-+		if (sti_tag == TAG_Empty) {
-+			FPU_stack_underflow();
-+			FPU_stack_underflow_i(i);
-+			return;
-+		}
-+		if (control_word & CW_Invalid) {
-+			/* Masked response */
-+			FPU_copy_to_reg0(sti_ptr, sti_tag);
-+		}
-+		FPU_stack_underflow_i(i);
-+		return;
- 	}
--      if ( control_word & CW_Invalid )
--	{
--	  /* Masked response */
--	  FPU_copy_to_reg0(sti_ptr, sti_tag);
-+	if (sti_tag == TAG_Empty) {
-+		if (control_word & CW_Invalid) {
-+			/* Masked response */
-+			FPU_copy_to_regi(st0_ptr, st0_tag, i);
-+		}
-+		FPU_stack_underflow();
-+		return;
+ 		return adr;
  	}
--      FPU_stack_underflow_i(i);
--      return;
--    }
--  if ( sti_tag == TAG_Empty )
--    {
--      if ( control_word & CW_Invalid )
--	{
--	  /* Masked response */
--	  FPU_copy_to_regi(st0_ptr, st0_tag, i);
--	}
--      FPU_stack_underflow();
--      return;
--    }
--  clear_C1();
--
--  reg_copy(st0_ptr, &t);
--  reg_copy(sti_ptr, st0_ptr);
--  reg_copy(&t, sti_ptr);
--
--  tag_word &= ~(3 << (regnr*2)) & ~(3 << (regnri*2));
--  tag_word |= (sti_tag << (regnr*2)) | (st0_tag << (regnri*2));
--  fpu_tag_word = tag_word;
--}
-+	clear_C1();
  
-+	reg_copy(st0_ptr, &t);
-+	reg_copy(sti_ptr, st0_ptr);
-+	reg_copy(&t, sti_ptr);
-+
-+	tag_word &= ~(3 << (regnr * 2)) & ~(3 << (regnri * 2));
-+	tag_word |= (sti_tag << (regnr * 2)) | (st0_tag << (regnri * 2));
-+	fpu_tag_word = tag_word;
+-	if (pfn >= end_pfn) 
+-		panic("alloc_low_page: ran out of memory"); 
++	if (pfn >= end_pfn)
++		panic("alloc_low_page: ran out of memory");
+ 
+ 	adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
+ 	memset(adr, 0, PAGE_SIZE);
+@@ -197,44 +208,49 @@ static __meminit void *alloc_low_page(unsigned long *phys)
+ }
+ 
+ static __meminit void unmap_low_page(void *adr)
+-{ 
+-
++{
+ 	if (after_bootmem)
+ 		return;
+ 
+ 	early_iounmap(adr, PAGE_SIZE);
+-} 
 +}
  
- void ffree_(void)
+ /* Must run before zap_low_mappings */
+ __meminit void *early_ioremap(unsigned long addr, unsigned long size)
  {
--  /* ffree st(i) */
--  FPU_settagi(FPU_rm, TAG_Empty);
-+	/* ffree st(i) */
-+	FPU_settagi(FPU_rm, TAG_Empty);
+-	unsigned long vaddr;
+ 	pmd_t *pmd, *last_pmd;
++	unsigned long vaddr;
+ 	int i, pmds;
+ 
+ 	pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
+ 	vaddr = __START_KERNEL_map;
+ 	pmd = level2_kernel_pgt;
+ 	last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
++
+ 	for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
+ 		for (i = 0; i < pmds; i++) {
+ 			if (pmd_present(pmd[i]))
+-				goto next;
++				goto continue_outer_loop;
+ 		}
+ 		vaddr += addr & ~PMD_MASK;
+ 		addr &= PMD_MASK;
++
+ 		for (i = 0; i < pmds; i++, addr += PMD_SIZE)
+-			set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
+-		__flush_tlb();
++			set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
++		__flush_tlb_all();
++
+ 		return (void *)vaddr;
+-	next:
++continue_outer_loop:
+ 		;
+ 	}
+-	printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
++	printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
++
+ 	return NULL;
  }
  
--
- void ffreep(void)
+-/* To avoid virtual aliases later */
++/*
++ * To avoid virtual aliases later:
++ */
+ __meminit void early_iounmap(void *addr, unsigned long size)
  {
--  /* ffree st(i) + pop - unofficial code */
--  FPU_settagi(FPU_rm, TAG_Empty);
--  FPU_pop();
-+	/* ffree st(i) + pop - unofficial code */
-+	FPU_settagi(FPU_rm, TAG_Empty);
-+	FPU_pop();
+ 	unsigned long vaddr;
+@@ -244,9 +260,11 @@ __meminit void early_iounmap(void *addr, unsigned long size)
+ 	vaddr = (unsigned long)addr;
+ 	pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
+ 	pmd = level2_kernel_pgt + pmd_index(vaddr);
++
+ 	for (i = 0; i < pmds; i++)
+ 		pmd_clear(pmd + i);
+-	__flush_tlb();
++
++	__flush_tlb_all();
  }
  
--
- void fst_i_(void)
+ static void __meminit
+@@ -259,16 +277,17 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
+ 		pmd_t *pmd = pmd_page + pmd_index(address);
+ 
+ 		if (address >= end) {
+-			if (!after_bootmem)
++			if (!after_bootmem) {
+ 				for (; i < PTRS_PER_PMD; i++, pmd++)
+ 					set_pmd(pmd, __pmd(0));
++			}
+ 			break;
+ 		}
+ 
+ 		if (pmd_val(*pmd))
+ 			continue;
+ 
+-		entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
++		entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address;
+ 		entry &= __supported_pte_mask;
+ 		set_pmd(pmd, __pmd(entry));
+ 	}
+@@ -277,19 +296,19 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
+ static void __meminit
+ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
  {
--  /* fst st(i) */
--  FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
-+	/* fst st(i) */
-+	FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
+-	pmd_t *pmd = pmd_offset(pud,0);
++	pmd_t *pmd = pmd_offset(pud, 0);
+ 	spin_lock(&init_mm.page_table_lock);
+ 	phys_pmd_init(pmd, address, end);
+ 	spin_unlock(&init_mm.page_table_lock);
+ 	__flush_tlb_all();
  }
  
+-static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
+-{ 
++static void __meminit
++phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
++{
+ 	int i = pud_index(addr);
+ 
 -
- void fstp_i(void)
+-	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
++	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
+ 		unsigned long pmd_phys;
+ 		pud_t *pud = pud_page + pud_index(addr);
+ 		pmd_t *pmd;
+@@ -297,10 +316,11 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
+ 		if (addr >= end)
+ 			break;
+ 
+-		if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
+-			set_pud(pud, __pud(0)); 
++		if (!after_bootmem &&
++				!e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
++			set_pud(pud, __pud(0));
+ 			continue;
+-		} 
++		}
+ 
+ 		if (pud_val(*pud)) {
+ 			phys_pmd_update(pud, addr, end);
+@@ -308,14 +328,16 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
+ 		}
+ 
+ 		pmd = alloc_low_page(&pmd_phys);
++
+ 		spin_lock(&init_mm.page_table_lock);
+ 		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
+ 		phys_pmd_init(pmd, addr, end);
+ 		spin_unlock(&init_mm.page_table_lock);
++
+ 		unmap_low_page(pmd);
+ 	}
+-	__flush_tlb();
+-} 
++	__flush_tlb_all();
++}
+ 
+ static void __init find_early_table_space(unsigned long end)
  {
--  /* fstp st(i) */
--  FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
--  FPU_pop();
-+	/* fstp st(i) */
-+	FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
-+	FPU_pop();
+@@ -326,14 +348,23 @@ static void __init find_early_table_space(unsigned long end)
+ 	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
+ 		 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+ 
+- 	/* RED-PEN putting page tables only on node 0 could
+- 	   cause a hotspot and fill up ZONE_DMA. The page tables
+- 	   need roughly 0.5KB per GB. */
+- 	start = 0x8000;
+- 	table_start = find_e820_area(start, end, tables);
++	/*
++	 * RED-PEN putting page tables only on node 0 could
++	 * cause a hotspot and fill up ZONE_DMA. The page tables
++	 * need roughly 0.5KB per GB.
++	 */
++	start = 0x8000;
++	table_start = find_e820_area(start, end, tables);
+ 	if (table_start == -1UL)
+ 		panic("Cannot find space for the kernel page tables");
+ 
++	/*
++	 * When you have a lot of RAM like 256GB, early_table will not fit
++	 * into 0x8000 range, find_e820_area() will find area after kernel
++	 * bss but the table_start is not page aligned, so need to round it
++	 * up to avoid overlap with bss:
++	 */
++	table_start = round_up(table_start, PAGE_SIZE);
+ 	table_start >>= PAGE_SHIFT;
+ 	table_end = table_start;
+ 
+@@ -342,20 +373,23 @@ static void __init find_early_table_space(unsigned long end)
+ 		(table_start << PAGE_SHIFT) + tables);
  }
--
-diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
-index 65120f5..4dae511 100644
---- a/arch/x86/math-emu/fpu_emu.h
-+++ b/arch/x86/math-emu/fpu_emu.h
-@@ -7,7 +7,6 @@
-  |                                                                           |
-  +---------------------------------------------------------------------------*/
  
--
- #ifndef _FPU_EMU_H_
- #define _FPU_EMU_H_
+-/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
+-   This runs before bootmem is initialized and gets pages directly from the 
+-   physical memory. To access them they are temporarily mapped. */
++/*
++ * Setup the direct mapping of the physical memory at PAGE_OFFSET.
++ * This runs before bootmem is initialized and gets pages directly from
++ * the physical memory. To access them they are temporarily mapped.
++ */
+ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+-{ 
+-	unsigned long next; 
++{
++	unsigned long next;
  
-@@ -28,15 +27,15 @@
- #endif
+-	Dprintk("init_memory_mapping\n");
++	pr_debug("init_memory_mapping\n");
  
- #define EXP_BIAS	Const(0)
--#define EXP_OVER	Const(0x4000)    /* smallest invalid large exponent */
--#define	EXP_UNDER	Const(-0x3fff)   /* largest invalid small exponent */
--#define EXP_WAY_UNDER   Const(-0x6000)   /* Below the smallest denormal, but
--					    still a 16 bit nr. */
-+#define EXP_OVER	Const(0x4000)	/* smallest invalid large exponent */
-+#define	EXP_UNDER	Const(-0x3fff)	/* largest invalid small exponent */
-+#define EXP_WAY_UNDER   Const(-0x6000)	/* Below the smallest denormal, but
-+					   still a 16 bit nr. */
- #define EXP_Infinity    EXP_OVER
- #define EXP_NaN         EXP_OVER
+-	/* 
++	/*
+ 	 * Find space for the kernel direct mapping tables.
+-	 * Later we should allocate these tables in the local node of the memory
+-	 * mapped.  Unfortunately this is done currently before the nodes are 
+-	 * discovered.
++	 *
++	 * Later we should allocate these tables in the local node of the
++	 * memory mapped. Unfortunately this is done currently before the
++	 * nodes are discovered.
+ 	 */
+ 	if (!after_bootmem)
+ 		find_early_table_space(end);
+@@ -364,8 +398,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+ 	end = (unsigned long)__va(end);
  
- #define EXTENDED_Ebias Const(0x3fff)
--#define EXTENDED_Emin (-0x3ffe)  /* smallest valid exponent */
-+#define EXTENDED_Emin (-0x3ffe)	/* smallest valid exponent */
+ 	for (; start < end; start = next) {
+-		unsigned long pud_phys; 
+ 		pgd_t *pgd = pgd_offset_k(start);
++		unsigned long pud_phys;
+ 		pud_t *pud;
  
- #define SIGN_POS	Const(0)
- #define SIGN_NEG	Const(0x80)
-@@ -44,10 +43,9 @@
- #define SIGN_Positive	Const(0)
- #define SIGN_Negative	Const(0x8000)
+ 		if (after_bootmem)
+@@ -374,23 +408,26 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+ 			pud = alloc_low_page(&pud_phys);
  
--
- /* Keep the order TAG_Valid, TAG_Zero, TW_Denormal */
- /* The following fold to 2 (Special) in the Tag Word */
--#define TW_Denormal     Const(4)        /* De-normal */
-+#define TW_Denormal     Const(4)	/* De-normal */
- #define TW_Infinity	Const(5)	/* + or - infinity */
- #define	TW_NaN		Const(6)	/* Not a Number */
- #define	TW_Unsupported	Const(7)	/* Not supported by an 80486 */
-@@ -67,14 +65,13 @@
- #define DEST_RM         0x20
- #define LOADED          0x40
+ 		next = start + PGDIR_SIZE;
+-		if (next > end) 
+-			next = end; 
++		if (next > end)
++			next = end;
+ 		phys_pud_init(pud, __pa(start), __pa(next));
+ 		if (!after_bootmem)
+ 			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+ 		unmap_low_page(pud);
+-	} 
++	}
  
--#define FPU_Exception   Const(0x80000000)   /* Added to tag returns. */
--
-+#define FPU_Exception   Const(0x80000000)	/* Added to tag returns. */
+ 	if (!after_bootmem)
+ 		mmu_cr4_features = read_cr4();
+ 	__flush_tlb_all();
++
++	reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
+ }
  
- #ifndef __ASSEMBLY__
+ #ifndef CONFIG_NUMA
+ void __init paging_init(void)
+ {
+ 	unsigned long max_zone_pfns[MAX_NR_ZONES];
++
+ 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+ 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+@@ -402,39 +439,48 @@ void __init paging_init(void)
+ }
+ #endif
  
- #include "fpu_system.h"
+-/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
+-   from the CPU leading to inconsistent cache lines. address and size
+-   must be aligned to 2MB boundaries. 
+-   Does nothing when the mapping doesn't exist. */
+-void __init clear_kernel_mapping(unsigned long address, unsigned long size) 
++/*
++ * Unmap a kernel mapping if it exists. This is useful to avoid
++ * prefetches from the CPU leading to inconsistent cache lines.
++ * address and size must be aligned to 2MB boundaries.
++ * Does nothing when the mapping doesn't exist.
++ */
++void __init clear_kernel_mapping(unsigned long address, unsigned long size)
+ {
+ 	unsigned long end = address + size;
  
--#include <asm/sigcontext.h>   /* for struct _fpstate */
-+#include <asm/sigcontext.h>	/* for struct _fpstate */
- #include <asm/math_emu.h>
- #include <linux/linkage.h>
+ 	BUG_ON(address & ~LARGE_PAGE_MASK);
+-	BUG_ON(size & ~LARGE_PAGE_MASK); 
+-	
+-	for (; address < end; address += LARGE_PAGE_SIZE) { 
++	BUG_ON(size & ~LARGE_PAGE_MASK);
++
++	for (; address < end; address += LARGE_PAGE_SIZE) {
+ 		pgd_t *pgd = pgd_offset_k(address);
+ 		pud_t *pud;
+ 		pmd_t *pmd;
++
+ 		if (pgd_none(*pgd))
+ 			continue;
++
+ 		pud = pud_offset(pgd, address);
+ 		if (pud_none(*pud))
+-			continue; 
++			continue;
++
+ 		pmd = pmd_offset(pud, address);
+ 		if (!pmd || pmd_none(*pmd))
+-			continue; 
+-		if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 
+-			/* Could handle this, but it should not happen currently. */
+-			printk(KERN_ERR 
+-	       "clear_kernel_mapping: mapping has been split. will leak memory\n"); 
+-			pmd_ERROR(*pmd); 
++			continue;
++
++		if (!(pmd_val(*pmd) & _PAGE_PSE)) {
++			/*
++			 * Could handle this, but it should not happen
++			 * currently:
++			 */
++			printk(KERN_ERR "clear_kernel_mapping: "
++				"mapping has been split. will leak memory\n");
++			pmd_ERROR(*pmd);
+ 		}
+-		set_pmd(pmd, __pmd(0)); 		
++		set_pmd(pmd, __pmd(0));
+ 	}
+ 	__flush_tlb_all();
+-} 
++}
  
-@@ -112,30 +109,33 @@ extern u_char emulating;
- #define PREFIX_DEFAULT 7
+ /*
+  * Memory hotplug specific functions
+@@ -461,16 +507,12 @@ int arch_add_memory(int nid, u64 start, u64 size)
+ 	unsigned long nr_pages = size >> PAGE_SHIFT;
+ 	int ret;
  
- struct address {
--  unsigned int offset;
--  unsigned int selector:16;
--  unsigned int opcode:11;
--  unsigned int empty:5;
-+	unsigned int offset;
-+	unsigned int selector:16;
-+	unsigned int opcode:11;
-+	unsigned int empty:5;
- };
- struct fpu__reg {
--  unsigned sigl;
--  unsigned sigh;
--  short exp;
-+	unsigned sigl;
-+	unsigned sigh;
-+	short exp;
- };
+-	init_memory_mapping(start, (start + size -1));
++	init_memory_mapping(start, start + size-1);
  
--typedef void (*FUNC)(void);
-+typedef void (*FUNC) (void);
- typedef struct fpu__reg FPU_REG;
--typedef void (*FUNC_ST0)(FPU_REG *st0_ptr, u_char st0_tag);
--typedef struct { u_char address_size, operand_size, segment; }
--        overrides;
-+typedef void (*FUNC_ST0) (FPU_REG *st0_ptr, u_char st0_tag);
-+typedef struct {
-+	u_char address_size, operand_size, segment;
-+} overrides;
- /* This structure is 32 bits: */
--typedef struct { overrides override;
--		 u_char default_mode; } fpu_addr_modes;
-+typedef struct {
-+	overrides override;
-+	u_char default_mode;
-+} fpu_addr_modes;
- /* PROTECTED has a restricted meaning in the emulator; it is used
-    to signal that the emulator needs to do special things to ensure
-    that protection is respected in a segmented model. */
- #define PROTECTED 4
--#define SIXTEEN   1         /* We rely upon this being 1 (true) */
-+#define SIXTEEN   1		/* We rely upon this being 1 (true) */
- #define VM86      SIXTEEN
- #define PM16      (SIXTEEN | PROTECTED)
- #define SEG32     PROTECTED
-@@ -168,8 +168,8 @@ extern u_char const data_sizes_16[32];
+ 	ret = __add_pages(zone, start_pfn, nr_pages);
+-	if (ret)
+-		goto error;
++	WARN_ON(1);
  
- static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
- {
--  *(short *)&(y->exp) = *(const short *)&(x->exp); 
--  *(long long *)&(y->sigl) = *(const long long *)&(x->sigl);
-+	*(short *)&(y->exp) = *(const short *)&(x->exp);
-+	*(long long *)&(y->sigl) = *(const long long *)&(x->sigl);
+ 	return ret;
+-error:
+-	printk("%s: Problem encountered in __add_pages!\n", __func__);
+-	return ret;
  }
+ EXPORT_SYMBOL_GPL(arch_add_memory);
  
- #define exponent(x)  (((*(short *)&((x)->exp)) & 0x7fff) - EXTENDED_Ebias)
-@@ -184,27 +184,26 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
+@@ -484,36 +526,8 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  
- #define significand(x) ( ((unsigned long long *)&((x)->sigl))[0] )
+ #endif /* CONFIG_MEMORY_HOTPLUG */
  
+-#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
+-/*
+- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
+- * just online the pages.
+- */
+-int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+-{
+-	int err = -EIO;
+-	unsigned long pfn;
+-	unsigned long total = 0, mem = 0;
+-	for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+-		if (pfn_valid(pfn)) {
+-			online_page(pfn_to_page(pfn));
+-			err = 0;
+-			mem++;
+-		}
+-		total++;
+-	}
+-	if (!err) {
+-		z->spanned_pages += total;
+-		z->present_pages += mem;
+-		z->zone_pgdat->node_spanned_pages += total;
+-		z->zone_pgdat->node_present_pages += mem;
+-	}
+-	return err;
+-}
+-#endif
 -
- /*----- Prototypes for functions written in assembler -----*/
- /* extern void reg_move(FPU_REG *a, FPU_REG *b); */
- 
- asmlinkage int FPU_normalize(FPU_REG *x);
- asmlinkage int FPU_normalize_nuo(FPU_REG *x);
- asmlinkage int FPU_u_sub(FPU_REG const *arg1, FPU_REG const *arg2,
--			 FPU_REG *answ, unsigned int control_w, u_char sign,
-+			 FPU_REG * answ, unsigned int control_w, u_char sign,
- 			 int expa, int expb);
- asmlinkage int FPU_u_mul(FPU_REG const *arg1, FPU_REG const *arg2,
--			 FPU_REG *answ, unsigned int control_w, u_char sign,
-+			 FPU_REG * answ, unsigned int control_w, u_char sign,
- 			 int expon);
- asmlinkage int FPU_u_div(FPU_REG const *arg1, FPU_REG const *arg2,
--			 FPU_REG *answ, unsigned int control_w, u_char sign);
-+			 FPU_REG * answ, unsigned int control_w, u_char sign);
- asmlinkage int FPU_u_add(FPU_REG const *arg1, FPU_REG const *arg2,
--			 FPU_REG *answ, unsigned int control_w, u_char sign,
-+			 FPU_REG * answ, unsigned int control_w, u_char sign,
- 			 int expa, int expb);
- asmlinkage int wm_sqrt(FPU_REG *n, int dummy1, int dummy2,
- 		       unsigned int control_w, u_char sign);
--asmlinkage unsigned	FPU_shrx(void *l, unsigned x);
--asmlinkage unsigned	FPU_shrxs(void *v, unsigned x);
-+asmlinkage unsigned FPU_shrx(void *l, unsigned x);
-+asmlinkage unsigned FPU_shrxs(void *v, unsigned x);
- asmlinkage unsigned long FPU_div_small(unsigned long long *x, unsigned long y);
- asmlinkage int FPU_round(FPU_REG *arg, unsigned int extent, int dummy,
- 			 unsigned int control_w, u_char sign);
-diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
-index 1853524..760baee 100644
---- a/arch/x86/math-emu/fpu_entry.c
-+++ b/arch/x86/math-emu/fpu_entry.c
-@@ -25,10 +25,11 @@
-  +---------------------------------------------------------------------------*/
+-static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
+-			 kcore_vsyscall;
++static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
++			 kcore_modules, kcore_vsyscall;
  
- #include <linux/signal.h>
--#include <linux/ptrace.h>
-+#include <linux/regset.h>
+ void __init mem_init(void)
+ {
+@@ -521,8 +535,15 @@ void __init mem_init(void)
  
- #include <asm/uaccess.h>
- #include <asm/desc.h>
-+#include <asm/user.h>
+ 	pci_iommu_alloc();
  
- #include "fpu_system.h"
- #include "fpu_emu.h"
-@@ -36,726 +37,727 @@
- #include "control_w.h"
- #include "status_w.h"
+-	/* clear the zero-page */
+-	memset(empty_zero_page, 0, PAGE_SIZE);
++	/* clear_bss() already clear the empty_zero_page */
++
++	/* temporary debugging - double check it's true: */
++	{
++		int i;
++
++		for (i = 0; i < 1024; i++)
++			WARN_ON_ONCE(empty_zero_page[i]);
++	}
  
--#define __BAD__ FPU_illegal   /* Illegal on an 80486, causes SIGILL */
-+#define __BAD__ FPU_illegal	/* Illegal on an 80486, causes SIGILL */
+ 	reservedpages = 0;
  
--#ifndef NO_UNDOC_CODE    /* Un-documented FPU op-codes supported by default. */
-+#ifndef NO_UNDOC_CODE		/* Un-documented FPU op-codes supported by default. */
+@@ -534,7 +555,6 @@ void __init mem_init(void)
+ #endif
+ 	reservedpages = end_pfn - totalram_pages -
+ 					absent_pages_in_range(0, end_pfn);
+-
+ 	after_bootmem = 1;
  
- /* WARNING: These codes are not documented by Intel in their 80486 manual
-    and may not work on FPU clones or later Intel FPUs. */
+ 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
+@@ -542,15 +562,16 @@ void __init mem_init(void)
+ 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
  
- /* Changes to support the un-doc codes provided by Linus Torvalds. */
+ 	/* Register memory areas for /proc/kcore */
+-	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
+-	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
++	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ 		   VMALLOC_END-VMALLOC_START);
+ 	kclist_add(&kcore_kernel, &_stext, _end - _stext);
+ 	kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
+-	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 
++	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
+ 				 VSYSCALL_END - VSYSCALL_START);
  
--#define _d9_d8_ fstp_i    /* unofficial code (19) */
--#define _dc_d0_ fcom_st   /* unofficial code (14) */
--#define _dc_d8_ fcompst   /* unofficial code (1c) */
--#define _dd_c8_ fxch_i    /* unofficial code (0d) */
--#define _de_d0_ fcompst   /* unofficial code (16) */
--#define _df_c0_ ffreep    /* unofficial code (07) ffree + pop */
--#define _df_c8_ fxch_i    /* unofficial code (0f) */
--#define _df_d0_ fstp_i    /* unofficial code (17) */
--#define _df_d8_ fstp_i    /* unofficial code (1f) */
-+#define _d9_d8_ fstp_i		/* unofficial code (19) */
-+#define _dc_d0_ fcom_st		/* unofficial code (14) */
-+#define _dc_d8_ fcompst		/* unofficial code (1c) */
-+#define _dd_c8_ fxch_i		/* unofficial code (0d) */
-+#define _de_d0_ fcompst		/* unofficial code (16) */
-+#define _df_c0_ ffreep		/* unofficial code (07) ffree + pop */
-+#define _df_c8_ fxch_i		/* unofficial code (0f) */
-+#define _df_d0_ fstp_i		/* unofficial code (17) */
-+#define _df_d8_ fstp_i		/* unofficial code (1f) */
+-	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
++	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
++				"%ldk reserved, %ldk data, %ldk init)\n",
+ 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ 		end_pfn << (PAGE_SHIFT-10),
+ 		codesize >> 10,
+@@ -566,19 +587,27 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ 	if (begin >= end)
+ 		return;
  
- static FUNC const st_instr_table[64] = {
--  fadd__,   fld_i_,     __BAD__, __BAD__, fadd_i,  ffree_,  faddp_,  _df_c0_,
--  fmul__,   fxch_i,     __BAD__, __BAD__, fmul_i,  _dd_c8_, fmulp_,  _df_c8_,
--  fcom_st,  fp_nop,     __BAD__, __BAD__, _dc_d0_, fst_i_,  _de_d0_, _df_d0_,
--  fcompst,  _d9_d8_,    __BAD__, __BAD__, _dc_d8_, fstp_i,  fcompp,  _df_d8_,
--  fsub__,   FPU_etc,    __BAD__, finit_,  fsubri,  fucom_,  fsubrp,  fstsw_,
--  fsubr_,   fconst,     fucompp, __BAD__, fsub_i,  fucomp,  fsubp_,  __BAD__,
--  fdiv__,   FPU_triga,  __BAD__, __BAD__, fdivri,  __BAD__, fdivrp,  __BAD__,
--  fdivr_,   FPU_trigb,  __BAD__, __BAD__, fdiv_i,  __BAD__, fdivp_,  __BAD__,
-+	fadd__, fld_i_, __BAD__, __BAD__, fadd_i, ffree_, faddp_, _df_c0_,
-+	fmul__, fxch_i, __BAD__, __BAD__, fmul_i, _dd_c8_, fmulp_, _df_c8_,
-+	fcom_st, fp_nop, __BAD__, __BAD__, _dc_d0_, fst_i_, _de_d0_, _df_d0_,
-+	fcompst, _d9_d8_, __BAD__, __BAD__, _dc_d8_, fstp_i, fcompp, _df_d8_,
-+	fsub__, FPU_etc, __BAD__, finit_, fsubri, fucom_, fsubrp, fstsw_,
-+	fsubr_, fconst, fucompp, __BAD__, fsub_i, fucomp, fsubp_, __BAD__,
-+	fdiv__, FPU_triga, __BAD__, __BAD__, fdivri, __BAD__, fdivrp, __BAD__,
-+	fdivr_, FPU_trigb, __BAD__, __BAD__, fdiv_i, __BAD__, fdivp_, __BAD__,
- };
++	/*
++	 * If debugging page accesses then do not free this memory but
++	 * mark them not present - any buggy init-section access will
++	 * create a kernel page fault:
++	 */
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
++		begin, PAGE_ALIGN(end));
++	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
++#else
+ 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
++
+ 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ 		ClearPageReserved(virt_to_page(addr));
+ 		init_page_count(virt_to_page(addr));
+ 		memset((void *)(addr & ~(PAGE_SIZE-1)),
+ 			POISON_FREE_INITMEM, PAGE_SIZE);
+-		if (addr >= __START_KERNEL_map)
+-			change_page_attr_addr(addr, 1, __pgprot(0));
+ 		free_page(addr);
+ 		totalram_pages++;
+ 	}
+-	if (addr > __START_KERNEL_map)
+-		global_flush_tlb();
++#endif
+ }
  
--#else     /* Support only documented FPU op-codes */
-+#else /* Support only documented FPU op-codes */
+ void free_initmem(void)
+@@ -589,6 +618,8 @@ void free_initmem(void)
+ }
  
- static FUNC const st_instr_table[64] = {
--  fadd__,   fld_i_,     __BAD__, __BAD__, fadd_i,  ffree_,  faddp_,  __BAD__,
--  fmul__,   fxch_i,     __BAD__, __BAD__, fmul_i,  __BAD__, fmulp_,  __BAD__,
--  fcom_st,  fp_nop,     __BAD__, __BAD__, __BAD__, fst_i_,  __BAD__, __BAD__,
--  fcompst,  __BAD__,    __BAD__, __BAD__, __BAD__, fstp_i,  fcompp,  __BAD__,
--  fsub__,   FPU_etc,    __BAD__, finit_,  fsubri,  fucom_,  fsubrp,  fstsw_,
--  fsubr_,   fconst,     fucompp, __BAD__, fsub_i,  fucomp,  fsubp_,  __BAD__,
--  fdiv__,   FPU_triga,  __BAD__, __BAD__, fdivri,  __BAD__, fdivrp,  __BAD__,
--  fdivr_,   FPU_trigb,  __BAD__, __BAD__, fdiv_i,  __BAD__, fdivp_,  __BAD__,
-+	fadd__, fld_i_, __BAD__, __BAD__, fadd_i, ffree_, faddp_, __BAD__,
-+	fmul__, fxch_i, __BAD__, __BAD__, fmul_i, __BAD__, fmulp_, __BAD__,
-+	fcom_st, fp_nop, __BAD__, __BAD__, __BAD__, fst_i_, __BAD__, __BAD__,
-+	fcompst, __BAD__, __BAD__, __BAD__, __BAD__, fstp_i, fcompp, __BAD__,
-+	fsub__, FPU_etc, __BAD__, finit_, fsubri, fucom_, fsubrp, fstsw_,
-+	fsubr_, fconst, fucompp, __BAD__, fsub_i, fucomp, fsubp_, __BAD__,
-+	fdiv__, FPU_triga, __BAD__, __BAD__, fdivri, __BAD__, fdivrp, __BAD__,
-+	fdivr_, FPU_trigb, __BAD__, __BAD__, fdiv_i, __BAD__, fdivp_, __BAD__,
- };
+ #ifdef CONFIG_DEBUG_RODATA
++const int rodata_test_data = 0xC3;
++EXPORT_SYMBOL_GPL(rodata_test_data);
  
- #endif /* NO_UNDOC_CODE */
+ void mark_rodata_ro(void)
+ {
+@@ -603,25 +634,27 @@ void mark_rodata_ro(void)
+ #ifdef CONFIG_KPROBES
+ 	start = (unsigned long)__start_rodata;
+ #endif
+-	
++
+ 	end = (unsigned long)__end_rodata;
+ 	start = (start + PAGE_SIZE - 1) & PAGE_MASK;
+ 	end &= PAGE_MASK;
+ 	if (end <= start)
+ 		return;
  
--
--#define _NONE_ 0   /* Take no special action */
--#define _REG0_ 1   /* Need to check for not empty st(0) */
--#define _REGI_ 2   /* Need to check for not empty st(0) and st(rm) */
--#define _REGi_ 0   /* Uses st(rm) */
--#define _PUSH_ 3   /* Need to check for space to push onto stack */
--#define _null_ 4   /* Function illegal or not implemented */
--#define _REGIi 5   /* Uses st(0) and st(rm), result to st(rm) */
--#define _REGIp 6   /* Uses st(0) and st(rm), result to st(rm) then pop */
--#define _REGIc 0   /* Compare st(0) and st(rm) */
--#define _REGIn 0   /* Uses st(0) and st(rm), but handle checks later */
-+#define _NONE_ 0		/* Take no special action */
-+#define _REG0_ 1		/* Need to check for not empty st(0) */
-+#define _REGI_ 2		/* Need to check for not empty st(0) and st(rm) */
-+#define _REGi_ 0		/* Uses st(rm) */
-+#define _PUSH_ 3		/* Need to check for space to push onto stack */
-+#define _null_ 4		/* Function illegal or not implemented */
-+#define _REGIi 5		/* Uses st(0) and st(rm), result to st(rm) */
-+#define _REGIp 6		/* Uses st(0) and st(rm), result to st(rm) then pop */
-+#define _REGIc 0		/* Compare st(0) and st(rm) */
-+#define _REGIn 0		/* Uses st(0) and st(rm), but handle checks later */
+-	change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
++	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
  
- #ifndef NO_UNDOC_CODE
+ 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ 	       (end - start) >> 10);
  
- /* Un-documented FPU op-codes supported by default. (see above) */
+-	/*
+-	 * change_page_attr_addr() requires a global_flush_tlb() call after it.
+-	 * We do this after the printk so that if something went wrong in the
+-	 * change, the printk gets out at least to give a better debug hint
+-	 * of who is the culprit.
+-	 */
+-	global_flush_tlb();
++	rodata_test();
++
++#ifdef CONFIG_CPA_DEBUG
++	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
++	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
++
++	printk(KERN_INFO "Testing CPA: again\n");
++	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
++#endif
+ }
+ #endif
  
- static u_char const type_table[64] = {
--  _REGI_, _NONE_, _null_, _null_, _REGIi, _REGi_, _REGIp, _REGi_,
--  _REGI_, _REGIn, _null_, _null_, _REGIi, _REGI_, _REGIp, _REGI_,
--  _REGIc, _NONE_, _null_, _null_, _REGIc, _REG0_, _REGIc, _REG0_,
--  _REGIc, _REG0_, _null_, _null_, _REGIc, _REG0_, _REGIc, _REG0_,
--  _REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
--  _REGI_, _NONE_, _REGIc, _null_, _REGIi, _REGIc, _REGIp, _null_,
--  _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
--  _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_
-+	_REGI_, _NONE_, _null_, _null_, _REGIi, _REGi_, _REGIp, _REGi_,
-+	_REGI_, _REGIn, _null_, _null_, _REGIi, _REGI_, _REGIp, _REGI_,
-+	_REGIc, _NONE_, _null_, _null_, _REGIc, _REG0_, _REGIc, _REG0_,
-+	_REGIc, _REG0_, _null_, _null_, _REGIc, _REG0_, _REGIc, _REG0_,
-+	_REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
-+	_REGI_, _NONE_, _REGIc, _null_, _REGIi, _REGIc, _REGIp, _null_,
-+	_REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
-+	_REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_
- };
+@@ -632,17 +665,21 @@ void free_initrd_mem(unsigned long start, unsigned long end)
+ }
+ #endif
  
--#else     /* Support only documented FPU op-codes */
-+#else /* Support only documented FPU op-codes */
+-void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
+-{ 
++void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
++{
+ #ifdef CONFIG_NUMA
+ 	int nid = phys_to_nid(phys);
+ #endif
+ 	unsigned long pfn = phys >> PAGE_SHIFT;
++
+ 	if (pfn >= end_pfn) {
+-		/* This can happen with kdump kernels when accessing firmware
+-		   tables. */
++		/*
++		 * This can happen with kdump kernels when accessing
++		 * firmware tables:
++		 */
+ 		if (pfn < end_pfn_map)
+ 			return;
++
+ 		printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
+ 				phys, len);
+ 		return;
+@@ -650,9 +687,9 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
  
- static u_char const type_table[64] = {
--  _REGI_, _NONE_, _null_, _null_, _REGIi, _REGi_, _REGIp, _null_,
--  _REGI_, _REGIn, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
--  _REGIc, _NONE_, _null_, _null_, _null_, _REG0_, _null_, _null_,
--  _REGIc, _null_, _null_, _null_, _null_, _REG0_, _REGIc, _null_,
--  _REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
--  _REGI_, _NONE_, _REGIc, _null_, _REGIi, _REGIc, _REGIp, _null_,
--  _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
--  _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_
-+	_REGI_, _NONE_, _null_, _null_, _REGIi, _REGi_, _REGIp, _null_,
-+	_REGI_, _REGIn, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
-+	_REGIc, _NONE_, _null_, _null_, _null_, _REG0_, _null_, _null_,
-+	_REGIc, _null_, _null_, _null_, _null_, _REG0_, _REGIc, _null_,
-+	_REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
-+	_REGI_, _NONE_, _REGIc, _null_, _REGIi, _REGIc, _REGIp, _null_,
-+	_REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
-+	_REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_
- };
+ 	/* Should check here against the e820 map to avoid double free */
+ #ifdef CONFIG_NUMA
+-  	reserve_bootmem_node(NODE_DATA(nid), phys, len);
+-#else       		
+-	reserve_bootmem(phys, len);    
++	reserve_bootmem_node(NODE_DATA(nid), phys, len);
++#else
++	reserve_bootmem(phys, len);
+ #endif
+ 	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
+ 		dma_reserve += len / PAGE_SIZE;
+@@ -660,46 +697,49 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
+ 	}
+ }
  
- #endif /* NO_UNDOC_CODE */
+-int kern_addr_valid(unsigned long addr) 
+-{ 
++int kern_addr_valid(unsigned long addr)
++{
+ 	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
+-       pgd_t *pgd;
+-       pud_t *pud;
+-       pmd_t *pmd;
+-       pte_t *pte;
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
  
--
- #ifdef RE_ENTRANT_CHECKING
--u_char emulating=0;
-+u_char emulating = 0;
- #endif /* RE_ENTRANT_CHECKING */
+ 	if (above != 0 && above != -1UL)
+-		return 0; 
+-	
++		return 0;
++
+ 	pgd = pgd_offset_k(addr);
+ 	if (pgd_none(*pgd))
+ 		return 0;
  
--static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
--			overrides *override);
-+static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip,
-+			overrides * override);
+ 	pud = pud_offset(pgd, addr);
+ 	if (pud_none(*pud))
+-		return 0; 
++		return 0;
  
- asmlinkage void math_emulate(long arg)
- {
--  u_char  FPU_modrm, byte1;
--  unsigned short code;
--  fpu_addr_modes addr_modes;
--  int unmasked;
--  FPU_REG loaded_data;
--  FPU_REG *st0_ptr;
--  u_char	  loaded_tag, st0_tag;
--  void __user *data_address;
--  struct address data_sel_off;
--  struct address entry_sel_off;
--  unsigned long code_base = 0;
--  unsigned long code_limit = 0;  /* Initialized to stop compiler warnings */
--  struct desc_struct code_descriptor;
-+	u_char FPU_modrm, byte1;
-+	unsigned short code;
-+	fpu_addr_modes addr_modes;
-+	int unmasked;
-+	FPU_REG loaded_data;
-+	FPU_REG *st0_ptr;
-+	u_char loaded_tag, st0_tag;
-+	void __user *data_address;
-+	struct address data_sel_off;
-+	struct address entry_sel_off;
-+	unsigned long code_base = 0;
-+	unsigned long code_limit = 0;	/* Initialized to stop compiler warnings */
-+	struct desc_struct code_descriptor;
+ 	pmd = pmd_offset(pud, addr);
+ 	if (pmd_none(*pmd))
+ 		return 0;
++
+ 	if (pmd_large(*pmd))
+ 		return pfn_valid(pmd_pfn(*pmd));
  
- #ifdef RE_ENTRANT_CHECKING
--  if ( emulating )
--    {
--      printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n");
--    }
--  RE_ENTRANT_CHECK_ON;
-+	if (emulating) {
-+		printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n");
-+	}
-+	RE_ENTRANT_CHECK_ON;
- #endif /* RE_ENTRANT_CHECKING */
+ 	pte = pte_offset_kernel(pmd, addr);
+ 	if (pte_none(*pte))
+ 		return 0;
++
+ 	return pfn_valid(pte_pfn(*pte));
+ }
  
--  if (!used_math())
--    {
--      finit();
--      set_used_math();
--    }
--
--  SETUP_DATA_AREA(arg);
--
--  FPU_ORIG_EIP = FPU_EIP;
--
--  if ( (FPU_EFLAGS & 0x00020000) != 0 )
--    {
--      /* Virtual 8086 mode */
--      addr_modes.default_mode = VM86;
--      FPU_EIP += code_base = FPU_CS << 4;
--      code_limit = code_base + 0xffff;  /* Assumes code_base <= 0xffff0000 */
--    }
--  else if ( FPU_CS == __USER_CS && FPU_DS == __USER_DS )
--    {
--      addr_modes.default_mode = 0;
--    }
--  else if ( FPU_CS == __KERNEL_CS )
--    {
--      printk("math_emulate: %04x:%08lx\n",FPU_CS,FPU_EIP);
--      panic("Math emulation needed in kernel");
--    }
--  else
--    {
+-/* A pseudo VMA to allow ptrace access for the vsyscall page.  This only
+-   covers the 64bit vsyscall page now. 32bit has a real VMA now and does
+-   not need special handling anymore. */
 -
--      if ( (FPU_CS & 4) != 4 )   /* Must be in the LDT */
--	{
--	  /* Can only handle segmented addressing via the LDT
--	     for now, and it must be 16 bit */
--	  printk("FPU emulator: Unsupported addressing mode\n");
--	  math_abort(FPU_info, SIGILL);
-+	if (!used_math()) {
-+		finit();
-+		set_used_math();
- 	}
++/*
++ * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
++ * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
++ * not need special handling anymore:
++ */
+ static struct vm_area_struct gate_vma = {
+-	.vm_start = VSYSCALL_START,
+-	.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
+-	.vm_page_prot = PAGE_READONLY_EXEC,
+-	.vm_flags = VM_READ | VM_EXEC
++	.vm_start	= VSYSCALL_START,
++	.vm_end		= VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
++	.vm_page_prot	= PAGE_READONLY_EXEC,
++	.vm_flags	= VM_READ | VM_EXEC
+ };
  
--      code_descriptor = LDT_DESCRIPTOR(FPU_CS);
--      if ( SEG_D_SIZE(code_descriptor) )
--	{
--	  /* The above test may be wrong, the book is not clear */
--	  /* Segmented 32 bit protected mode */
--	  addr_modes.default_mode = SEG32;
-+	SETUP_DATA_AREA(arg);
+ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+@@ -714,14 +754,17 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+ int in_gate_area(struct task_struct *task, unsigned long addr)
+ {
+ 	struct vm_area_struct *vma = get_gate_vma(task);
 +
-+	FPU_ORIG_EIP = FPU_EIP;
+ 	if (!vma)
+ 		return 0;
 +
-+	if ((FPU_EFLAGS & 0x00020000) != 0) {
-+		/* Virtual 8086 mode */
-+		addr_modes.default_mode = VM86;
-+		FPU_EIP += code_base = FPU_CS << 4;
-+		code_limit = code_base + 0xffff;	/* Assumes code_base <= 0xffff0000 */
-+	} else if (FPU_CS == __USER_CS && FPU_DS == __USER_DS) {
-+		addr_modes.default_mode = 0;
-+	} else if (FPU_CS == __KERNEL_CS) {
-+		printk("math_emulate: %04x:%08lx\n", FPU_CS, FPU_EIP);
-+		panic("Math emulation needed in kernel");
-+	} else {
+ 	return (addr >= vma->vm_start) && (addr < vma->vm_end);
+ }
+ 
+-/* Use this when you have no reliable task/vma, typically from interrupt
+- * context.  It is less reliable than using the task's vma and may give
+- * false positives.
++/*
++ * Use this when you have no reliable task/vma, typically from interrupt
++ * context. It is less reliable than using the task's vma and may give
++ * false positives:
+  */
+ int in_gate_area_no_task(unsigned long addr)
+ {
+@@ -741,8 +784,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+ /*
+  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
+  */
+-int __meminit vmemmap_populate(struct page *start_page,
+-						unsigned long size, int node)
++int __meminit
++vmemmap_populate(struct page *start_page, unsigned long size, int node)
+ {
+ 	unsigned long addr = (unsigned long)start_page;
+ 	unsigned long end = (unsigned long)(start_page + size);
+@@ -757,6 +800,7 @@ int __meminit vmemmap_populate(struct page *start_page,
+ 		pgd = vmemmap_pgd_populate(addr, node);
+ 		if (!pgd)
+ 			return -ENOMEM;
 +
-+		if ((FPU_CS & 4) != 4) {	/* Must be in the LDT */
-+			/* Can only handle segmented addressing via the LDT
-+			   for now, and it must be 16 bit */
-+			printk("FPU emulator: Unsupported addressing mode\n");
-+			math_abort(FPU_info, SIGILL);
-+		}
+ 		pud = vmemmap_pud_populate(pgd, addr, node);
+ 		if (!pud)
+ 			return -ENOMEM;
+@@ -764,20 +808,22 @@ int __meminit vmemmap_populate(struct page *start_page,
+ 		pmd = pmd_offset(pud, addr);
+ 		if (pmd_none(*pmd)) {
+ 			pte_t entry;
+-			void *p = vmemmap_alloc_block(PMD_SIZE, node);
++			void *p;
 +
-+		code_descriptor = LDT_DESCRIPTOR(FPU_CS);
-+		if (SEG_D_SIZE(code_descriptor)) {
-+			/* The above test may be wrong, the book is not clear */
-+			/* Segmented 32 bit protected mode */
-+			addr_modes.default_mode = SEG32;
++			p = vmemmap_alloc_block(PMD_SIZE, node);
+ 			if (!p)
+ 				return -ENOMEM;
+ 
+-			entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
+-			mk_pte_huge(entry);
++			entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
++							PAGE_KERNEL_LARGE);
+ 			set_pmd(pmd, __pmd(pte_val(entry)));
+ 
+ 			printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
+ 				addr, addr + PMD_SIZE - 1, p, node);
+-		} else
 +		} else {
-+			/* 16 bit protected mode */
-+			addr_modes.default_mode = PM16;
+ 			vmemmap_verify((pte_t *)pmd, node, addr, next);
 +		}
-+		FPU_EIP += code_base = SEG_BASE_ADDR(code_descriptor);
-+		code_limit = code_base
-+		    + (SEG_LIMIT(code_descriptor) +
-+		       1) * SEG_GRANULARITY(code_descriptor)
-+		    - 1;
-+		if (code_limit < code_base)
-+			code_limit = 0xffffffff;
  	}
--      else
--	{
--	  /* 16 bit protected mode */
--	  addr_modes.default_mode = PM16;
+-
+ 	return 0;
+ }
+ #endif
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+new file mode 100644
+index 0000000..ed79572
+--- /dev/null
++++ b/arch/x86/mm/ioremap.c
+@@ -0,0 +1,501 @@
++/*
++ * Re-map IO memory to kernel address space so that we can access it.
++ * This is needed for high PCI addresses that aren't mapped in the
++ * 640k-1MB IO memory area on PC's
++ *
++ * (C) Copyright 1995 1996 Linus Torvalds
++ */
 +
-+	FPU_lookahead = !(FPU_EFLAGS & X86_EFLAGS_TF);
++#include <linux/bootmem.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
 +
-+	if (!valid_prefix(&byte1, (u_char __user **) & FPU_EIP,
-+			  &addr_modes.override)) {
-+		RE_ENTRANT_CHECK_OFF;
-+		printk
-+		    ("FPU emulator: Unknown prefix byte 0x%02x, probably due to\n"
-+		     "FPU emulator: self-modifying code! (emulation impossible)\n",
-+		     byte1);
-+		RE_ENTRANT_CHECK_ON;
-+		EXCEPTION(EX_INTERNAL | 0x126);
-+		math_abort(FPU_info, SIGILL);
- 	}
--      FPU_EIP += code_base = SEG_BASE_ADDR(code_descriptor);
--      code_limit = code_base
--	+ (SEG_LIMIT(code_descriptor)+1) * SEG_GRANULARITY(code_descriptor)
--	  - 1;
--      if ( code_limit < code_base ) code_limit = 0xffffffff;
--    }
++#include <asm/cacheflush.h>
++#include <asm/e820.h>
++#include <asm/fixmap.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
++#include <asm/pgalloc.h>
++
++enum ioremap_mode {
++	IOR_MODE_UNCACHED,
++	IOR_MODE_CACHED,
++};
++
++#ifdef CONFIG_X86_64
++
++unsigned long __phys_addr(unsigned long x)
++{
++	if (x >= __START_KERNEL_map)
++		return x - __START_KERNEL_map + phys_base;
++	return x - PAGE_OFFSET;
++}
++EXPORT_SYMBOL(__phys_addr);
++
++#endif
++
++int page_is_ram(unsigned long pagenr)
++{
++	unsigned long addr, end;
++	int i;
++
++	for (i = 0; i < e820.nr_map; i++) {
++		/*
++		 * Not usable memory:
++		 */
++		if (e820.map[i].type != E820_RAM)
++			continue;
++		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
++		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
++
++		/*
++		 * Sanity check: Some BIOSen report areas as RAM that
++		 * are not. Notably the 640->1Mb area, which is the
++		 * PCI BIOS area.
++		 */
++		if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
++		    end < (BIOS_END >> PAGE_SHIFT))
++			continue;
++
++		if ((pagenr >= addr) && (pagenr < end))
++			return 1;
++	}
++	return 0;
++}
++
++/*
++ * Fix up the linear direct mapping of the kernel to avoid cache attribute
++ * conflicts.
++ */
++static int ioremap_change_attr(unsigned long paddr, unsigned long size,
++			       enum ioremap_mode mode)
++{
++	unsigned long vaddr = (unsigned long)__va(paddr);
++	unsigned long nrpages = size >> PAGE_SHIFT;
++	int err, level;
++
++	/* No change for pages after the last mapping */
++	if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
++		return 0;
++
++	/*
++	 * If there is no identity map for this address,
++	 * change_page_attr_addr is unnecessary
++	 */
++	if (!lookup_address(vaddr, &level))
++		return 0;
++
++	switch (mode) {
++	case IOR_MODE_UNCACHED:
++	default:
++		err = set_memory_uc(vaddr, nrpages);
++		break;
++	case IOR_MODE_CACHED:
++		err = set_memory_wb(vaddr, nrpages);
++		break;
++	}
++
++	return err;
++}
++
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
++ *
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
++ */
++static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
++			       enum ioremap_mode mode)
++{
++	void __iomem *addr;
++	struct vm_struct *area;
++	unsigned long offset, last_addr;
++	pgprot_t prot;
++
++	/* Don't allow wraparound or zero size */
++	last_addr = phys_addr + size - 1;
++	if (!size || last_addr < phys_addr)
++		return NULL;
++
++	/*
++	 * Don't remap the low PCI/ISA area, it's always mapped..
++	 */
++	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++		return (__force void __iomem *)phys_to_virt(phys_addr);
++
++	/*
++	 * Don't allow anybody to remap normal RAM that we're using..
++	 */
++	for (offset = phys_addr >> PAGE_SHIFT; offset < max_pfn_mapped &&
++	     (offset << PAGE_SHIFT) < last_addr; offset++) {
++		if (page_is_ram(offset))
++			return NULL;
++	}
++
++	switch (mode) {
++	case IOR_MODE_UNCACHED:
++	default:
++		prot = PAGE_KERNEL_NOCACHE;
++		break;
++	case IOR_MODE_CACHED:
++		prot = PAGE_KERNEL;
++		break;
++	}
++
++	/*
++	 * Mappings have to be page-aligned
++	 */
++	offset = phys_addr & ~PAGE_MASK;
++	phys_addr &= PAGE_MASK;
++	size = PAGE_ALIGN(last_addr+1) - phys_addr;
++
++	/*
++	 * Ok, go for it..
++	 */
++	area = get_vm_area(size, VM_IOREMAP);
++	if (!area)
++		return NULL;
++	area->phys_addr = phys_addr;
++	addr = (void __iomem *) area->addr;
++	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
++			       phys_addr, prot)) {
++		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
++		return NULL;
++	}
++
++	if (ioremap_change_attr(phys_addr, size, mode) < 0) {
++		vunmap(addr);
++		return NULL;
++	}
++
++	return (void __iomem *) (offset + (char __iomem *)addr);
++}
++
++/**
++ * ioremap_nocache     -   map bus memory into CPU space
++ * @offset:    bus address of the memory
++ * @size:      size of the resource to map
++ *
++ * ioremap_nocache performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address.
++ *
++ * This version of ioremap ensures that the memory is marked uncachable
++ * on the CPU as well as honouring existing caching rules from things like
++ * the PCI bus. Note that there are other caches and buffers on many
++ * busses. In particular driver authors should read up on PCI writes
++ *
++ * It's useful if some control registers are in such an area and
++ * write combining or read caching is not desirable:
++ *
++ * Must be freed with iounmap.
++ */
++void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
++{
++	return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
++}
++EXPORT_SYMBOL(ioremap_nocache);
++
++void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
++{
++	return __ioremap(phys_addr, size, IOR_MODE_CACHED);
++}
++EXPORT_SYMBOL(ioremap_cache);
++
++/**
++ * iounmap - Free a IO remapping
++ * @addr: virtual address from ioremap_*
++ *
++ * Caller must ensure there is only one unmapping for the same pointer.
++ */
++void iounmap(volatile void __iomem *addr)
++{
++	struct vm_struct *p, *o;
++
++	if ((void __force *)addr <= high_memory)
++		return;
++
++	/*
++	 * __ioremap special-cases the PCI/ISA range by not instantiating a
++	 * vm_area and by simply returning an address into the kernel mapping
++	 * of ISA space.   So handle that here.
++	 */
++	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
++	    addr < phys_to_virt(ISA_END_ADDRESS))
++		return;
++
++	addr = (volatile void __iomem *)
++		(PAGE_MASK & (unsigned long __force)addr);
++
++	/* Use the vm area unlocked, assuming the caller
++	   ensures there isn't another iounmap for the same address
++	   in parallel. Reuse of the virtual address is prevented by
++	   leaving it in the global lists until we're done with it.
++	   cpa takes care of the direct mappings. */
++	read_lock(&vmlist_lock);
++	for (p = vmlist; p; p = p->next) {
++		if (p->addr == addr)
++			break;
++	}
++	read_unlock(&vmlist_lock);
++
++	if (!p) {
++		printk(KERN_ERR "iounmap: bad address %p\n", addr);
++		dump_stack();
++		return;
++	}
++
++	/* Reset the direct mapping. Can block */
++	ioremap_change_attr(p->phys_addr, p->size, IOR_MODE_CACHED);
++
++	/* Finally remove it */
++	o = remove_vm_area((void *)addr);
++	BUG_ON(p != o || o == NULL);
++	kfree(p);
++}
++EXPORT_SYMBOL(iounmap);
++
++#ifdef CONFIG_X86_32
++
++int __initdata early_ioremap_debug;
++
++static int __init early_ioremap_debug_setup(char *str)
++{
++	early_ioremap_debug = 1;
++
++	return 0;
++}
++early_param("early_ioremap_debug", early_ioremap_debug_setup);
++
++static __initdata int after_paging_init;
++static __initdata unsigned long bm_pte[1024]
++				__attribute__((aligned(PAGE_SIZE)));
++
++static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
++{
++	return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
++}
++
++static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
++{
++	return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
++}
++
++void __init early_ioremap_init(void)
++{
++	unsigned long *pgd;
++
++	if (early_ioremap_debug)
++		printk(KERN_INFO "early_ioremap_init()\n");
++
++	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
++	*pgd = __pa(bm_pte) | _PAGE_TABLE;
++	memset(bm_pte, 0, sizeof(bm_pte));
++	/*
++	 * The boot-ioremap range spans multiple pgds, for which
++	 * we are not prepared:
++	 */
++	if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
++		WARN_ON(1);
++		printk(KERN_WARNING "pgd %p != %p\n",
++		       pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
++		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
++		       fix_to_virt(FIX_BTMAP_BEGIN));
++		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
++		       fix_to_virt(FIX_BTMAP_END));
++
++		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
++		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
++		       FIX_BTMAP_BEGIN);
++	}
++}
++
++void __init early_ioremap_clear(void)
++{
++	unsigned long *pgd;
++
++	if (early_ioremap_debug)
++		printk(KERN_INFO "early_ioremap_clear()\n");
++
++	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
++	*pgd = 0;
++	paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT);
++	__flush_tlb_all();
++}
++
++void __init early_ioremap_reset(void)
++{
++	enum fixed_addresses idx;
++	unsigned long *pte, phys, addr;
++
++	after_paging_init = 1;
++	for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
++		addr = fix_to_virt(idx);
++		pte = early_ioremap_pte(addr);
++		if (!*pte & _PAGE_PRESENT) {
++			phys = *pte & PAGE_MASK;
++			set_fixmap(idx, phys);
++		}
++	}
++}
++
++static void __init __early_set_fixmap(enum fixed_addresses idx,
++				   unsigned long phys, pgprot_t flags)
++{
++	unsigned long *pte, addr = __fix_to_virt(idx);
++
++	if (idx >= __end_of_fixed_addresses) {
++		BUG();
++		return;
++	}
++	pte = early_ioremap_pte(addr);
++	if (pgprot_val(flags))
++		*pte = (phys & PAGE_MASK) | pgprot_val(flags);
++	else
++		*pte = 0;
++	__flush_tlb_one(addr);
++}
++
++static inline void __init early_set_fixmap(enum fixed_addresses idx,
++					unsigned long phys)
++{
++	if (after_paging_init)
++		set_fixmap(idx, phys);
++	else
++		__early_set_fixmap(idx, phys, PAGE_KERNEL);
++}
++
++static inline void __init early_clear_fixmap(enum fixed_addresses idx)
++{
++	if (after_paging_init)
++		clear_fixmap(idx);
++	else
++		__early_set_fixmap(idx, 0, __pgprot(0));
++}
++
++
++int __initdata early_ioremap_nested;
++
++static int __init check_early_ioremap_leak(void)
++{
++	if (!early_ioremap_nested)
++		return 0;
++
++	printk(KERN_WARNING
++	       "Debug warning: early ioremap leak of %d areas detected.\n",
++	       early_ioremap_nested);
++	printk(KERN_WARNING
++	       "please boot with early_ioremap_debug and report the dmesg.\n");
++	WARN_ON(1);
++
++	return 1;
++}
++late_initcall(check_early_ioremap_leak);
++
++void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
++{
++	unsigned long offset, last_addr;
++	unsigned int nrpages, nesting;
++	enum fixed_addresses idx0, idx;
++
++	WARN_ON(system_state != SYSTEM_BOOTING);
++
++	nesting = early_ioremap_nested;
++	if (early_ioremap_debug) {
++		printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
++		       phys_addr, size, nesting);
++		dump_stack();
++	}
++
++	/* Don't allow wraparound or zero size */
++	last_addr = phys_addr + size - 1;
++	if (!size || last_addr < phys_addr) {
++		WARN_ON(1);
++		return NULL;
++	}
++
++	if (nesting >= FIX_BTMAPS_NESTING) {
++		WARN_ON(1);
++		return NULL;
++	}
++	early_ioremap_nested++;
++	/*
++	 * Mappings have to be page-aligned
++	 */
++	offset = phys_addr & ~PAGE_MASK;
++	phys_addr &= PAGE_MASK;
++	size = PAGE_ALIGN(last_addr) - phys_addr;
++
++	/*
++	 * Mappings have to fit in the FIX_BTMAP area.
++	 */
++	nrpages = size >> PAGE_SHIFT;
++	if (nrpages > NR_FIX_BTMAPS) {
++		WARN_ON(1);
++		return NULL;
++	}
++
++	/*
++	 * Ok, go for it..
++	 */
++	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
++	idx = idx0;
++	while (nrpages > 0) {
++		early_set_fixmap(idx, phys_addr);
++		phys_addr += PAGE_SIZE;
++		--idx;
++		--nrpages;
++	}
++	if (early_ioremap_debug)
++		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
++
++	return (void *) (offset + fix_to_virt(idx0));
++}
++
++void __init early_iounmap(void *addr, unsigned long size)
++{
++	unsigned long virt_addr;
++	unsigned long offset;
++	unsigned int nrpages;
++	enum fixed_addresses idx;
++	unsigned int nesting;
++
++	nesting = --early_ioremap_nested;
++	WARN_ON(nesting < 0);
++
++	if (early_ioremap_debug) {
++		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
++		       size, nesting);
++		dump_stack();
++	}
++
++	virt_addr = (unsigned long)addr;
++	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
++		WARN_ON(1);
++		return;
++	}
++	offset = virt_addr & ~PAGE_MASK;
++	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
++
++	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
++	while (nrpages > 0) {
++		early_clear_fixmap(idx);
++		--idx;
++		--nrpages;
++	}
++}
++
++void __this_fixmap_does_not_exist(void)
++{
++	WARN_ON(1);
++}
++
++#endif /* CONFIG_X86_32 */
+diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c
+deleted file mode 100644
+index 0b27831..0000000
+--- a/arch/x86/mm/ioremap_32.c
++++ /dev/null
+@@ -1,274 +0,0 @@
+-/*
+- * arch/i386/mm/ioremap.c
+- *
+- * Re-map IO memory to kernel address space so that we can access it.
+- * This is needed for high PCI addresses that aren't mapped in the
+- * 640k-1MB IO memory area on PC's
+- *
+- * (C) Copyright 1995 1996 Linus Torvalds
+- */
 -
--  FPU_lookahead = 1;
--  if (current->ptrace & PT_PTRACED)
--    FPU_lookahead = 0;
+-#include <linux/vmalloc.h>
+-#include <linux/init.h>
+-#include <linux/slab.h>
+-#include <linux/module.h>
+-#include <linux/io.h>
+-#include <asm/fixmap.h>
+-#include <asm/cacheflush.h>
+-#include <asm/tlbflush.h>
+-#include <asm/pgtable.h>
 -
--  if ( !valid_prefix(&byte1, (u_char __user **)&FPU_EIP,
--		     &addr_modes.override) )
--    {
--      RE_ENTRANT_CHECK_OFF;
--      printk("FPU emulator: Unknown prefix byte 0x%02x, probably due to\n"
--	     "FPU emulator: self-modifying code! (emulation impossible)\n",
--	     byte1);
--      RE_ENTRANT_CHECK_ON;
--      EXCEPTION(EX_INTERNAL|0x126);
--      math_abort(FPU_info,SIGILL);
--    }
+-#define ISA_START_ADDRESS	0xa0000
+-#define ISA_END_ADDRESS		0x100000
 -
--do_another_FPU_instruction:
+-/*
+- * Generic mapping function (not visible outside):
+- */
 -
--  no_ip_update = 0;
+-/*
+- * Remap an arbitrary physical address space into the kernel virtual
+- * address space. Needed when the kernel wants to access high addresses
+- * directly.
+- *
+- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+- * have to convert them into an offset in a page-aligned mapping, but the
+- * caller shouldn't need to know that small detail.
+- */
+-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+-{
+-	void __iomem * addr;
+-	struct vm_struct * area;
+-	unsigned long offset, last_addr;
+-	pgprot_t prot;
 -
--  FPU_EIP++;  /* We have fetched the prefix and first code bytes. */
+-	/* Don't allow wraparound or zero size */
+-	last_addr = phys_addr + size - 1;
+-	if (!size || last_addr < phys_addr)
+-		return NULL;
 -
--  if ( addr_modes.default_mode )
--    {
--      /* This checks for the minimum instruction bytes.
--	 We also need to check any extra (address mode) code access. */
--      if ( FPU_EIP > code_limit )
--	math_abort(FPU_info,SIGSEGV);
--    }
+-	/*
+-	 * Don't remap the low PCI/ISA area, it's always mapped..
+-	 */
+-	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
+-		return (void __iomem *) phys_to_virt(phys_addr);
 -
--  if ( (byte1 & 0xf8) != 0xd8 )
--    {
--      if ( byte1 == FWAIT_OPCODE )
--	{
--	  if (partial_status & SW_Summary)
--	    goto do_the_FPU_interrupt;
--	  else
--	    goto FPU_fwait_done;
-+
-+      do_another_FPU_instruction:
-+
-+	no_ip_update = 0;
-+
-+	FPU_EIP++;		/* We have fetched the prefix and first code bytes. */
-+
-+	if (addr_modes.default_mode) {
-+		/* This checks for the minimum instruction bytes.
-+		   We also need to check any extra (address mode) code access. */
-+		if (FPU_EIP > code_limit)
-+			math_abort(FPU_info, SIGSEGV);
- 	}
-+
-+	if ((byte1 & 0xf8) != 0xd8) {
-+		if (byte1 == FWAIT_OPCODE) {
-+			if (partial_status & SW_Summary)
-+				goto do_the_FPU_interrupt;
-+			else
-+				goto FPU_fwait_done;
-+		}
- #ifdef PARANOID
--      EXCEPTION(EX_INTERNAL|0x128);
--      math_abort(FPU_info,SIGILL);
-+		EXCEPTION(EX_INTERNAL | 0x128);
-+		math_abort(FPU_info, SIGILL);
- #endif /* PARANOID */
--    }
+-	/*
+-	 * Don't allow anybody to remap normal RAM that we're using..
+-	 */
+-	if (phys_addr <= virt_to_phys(high_memory - 1)) {
+-		char *t_addr, *t_end;
+-		struct page *page;
+-
+-		t_addr = __va(phys_addr);
+-		t_end = t_addr + (size - 1);
+-	   
+-		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+-			if(!PageReserved(page))
+-				return NULL;
+-	}
+-
+-	prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
+-			| _PAGE_ACCESSED | flags);
+-
+-	/*
+-	 * Mappings have to be page-aligned
+-	 */
+-	offset = phys_addr & ~PAGE_MASK;
+-	phys_addr &= PAGE_MASK;
+-	size = PAGE_ALIGN(last_addr+1) - phys_addr;
+-
+-	/*
+-	 * Ok, go for it..
+-	 */
+-	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
+-	if (!area)
+-		return NULL;
+-	area->phys_addr = phys_addr;
+-	addr = (void __iomem *) area->addr;
+-	if (ioremap_page_range((unsigned long) addr,
+-			(unsigned long) addr + size, phys_addr, prot)) {
+-		vunmap((void __force *) addr);
+-		return NULL;
+-	}
+-	return (void __iomem *) (offset + (char __iomem *)addr);
+-}
+-EXPORT_SYMBOL(__ioremap);
+-
+-/**
+- * ioremap_nocache     -   map bus memory into CPU space
+- * @offset:    bus address of the memory
+- * @size:      size of the resource to map
+- *
+- * ioremap_nocache performs a platform specific sequence of operations to
+- * make bus memory CPU accessible via the readb/readw/readl/writeb/
+- * writew/writel functions and the other mmio helpers. The returned
+- * address is not guaranteed to be usable directly as a virtual
+- * address. 
+- *
+- * This version of ioremap ensures that the memory is marked uncachable
+- * on the CPU as well as honouring existing caching rules from things like
+- * the PCI bus. Note that there are other caches and buffers on many 
+- * busses. In particular driver authors should read up on PCI writes
+- *
+- * It's useful if some control registers are in such an area and
+- * write combining or read caching is not desirable:
+- * 
+- * Must be freed with iounmap.
+- */
+-
+-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+-{
+-	unsigned long last_addr;
+-	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
+-	if (!p) 
+-		return p; 
+-
+-	/* Guaranteed to be > phys_addr, as per __ioremap() */
+-	last_addr = phys_addr + size - 1;
+-
+-	if (last_addr < virt_to_phys(high_memory) - 1) {
+-		struct page *ppage = virt_to_page(__va(phys_addr));		
+-		unsigned long npages;
+-
+-		phys_addr &= PAGE_MASK;
+-
+-		/* This might overflow and become zero.. */
+-		last_addr = PAGE_ALIGN(last_addr);
+-
+-		/* .. but that's ok, because modulo-2**n arithmetic will make
+-	 	* the page-aligned "last - first" come out right.
+-	 	*/
+-		npages = (last_addr - phys_addr) >> PAGE_SHIFT;
+-
+-		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
+-			iounmap(p); 
+-			p = NULL;
+-		}
+-		global_flush_tlb();
+-	}
+-
+-	return p;					
+-}
+-EXPORT_SYMBOL(ioremap_nocache);
+-
+-/**
+- * iounmap - Free a IO remapping
+- * @addr: virtual address from ioremap_*
+- *
+- * Caller must ensure there is only one unmapping for the same pointer.
+- */
+-void iounmap(volatile void __iomem *addr)
+-{
+-	struct vm_struct *p, *o;
+-
+-	if ((void __force *)addr <= high_memory)
+-		return;
+-
+-	/*
+-	 * __ioremap special-cases the PCI/ISA range by not instantiating a
+-	 * vm_area and by simply returning an address into the kernel mapping
+-	 * of ISA space.   So handle that here.
+-	 */
+-	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
+-			addr < phys_to_virt(ISA_END_ADDRESS))
+-		return;
+-
+-	addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
+-
+-	/* Use the vm area unlocked, assuming the caller
+-	   ensures there isn't another iounmap for the same address
+-	   in parallel. Reuse of the virtual address is prevented by
+-	   leaving it in the global lists until we're done with it.
+-	   cpa takes care of the direct mappings. */
+-	read_lock(&vmlist_lock);
+-	for (p = vmlist; p; p = p->next) {
+-		if (p->addr == addr)
+-			break;
+-	}
+-	read_unlock(&vmlist_lock);
+-
+-	if (!p) {
+-		printk("iounmap: bad address %p\n", addr);
+-		dump_stack();
+-		return;
+-	}
+-
+-	/* Reset the direct mapping. Can block */
+-	if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
+-		change_page_attr(virt_to_page(__va(p->phys_addr)),
+-				 get_vm_area_size(p) >> PAGE_SHIFT,
+-				 PAGE_KERNEL);
+-		global_flush_tlb();
+-	} 
+-
+-	/* Finally remove it */
+-	o = remove_vm_area((void *)addr);
+-	BUG_ON(p != o || o == NULL);
+-	kfree(p); 
+-}
+-EXPORT_SYMBOL(iounmap);
+-
+-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+-{
+-	unsigned long offset, last_addr;
+-	unsigned int nrpages;
+-	enum fixed_addresses idx;
+-
+-	/* Don't allow wraparound or zero size */
+-	last_addr = phys_addr + size - 1;
+-	if (!size || last_addr < phys_addr)
+-		return NULL;
+-
+-	/*
+-	 * Don't remap the low PCI/ISA area, it's always mapped..
+-	 */
+-	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
+-		return phys_to_virt(phys_addr);
+-
+-	/*
+-	 * Mappings have to be page-aligned
+-	 */
+-	offset = phys_addr & ~PAGE_MASK;
+-	phys_addr &= PAGE_MASK;
+-	size = PAGE_ALIGN(last_addr) - phys_addr;
+-
+-	/*
+-	 * Mappings have to fit in the FIX_BTMAP area.
+-	 */
+-	nrpages = size >> PAGE_SHIFT;
+-	if (nrpages > NR_FIX_BTMAPS)
+-		return NULL;
+-
+-	/*
+-	 * Ok, go for it..
+-	 */
+-	idx = FIX_BTMAP_BEGIN;
+-	while (nrpages > 0) {
+-		set_fixmap(idx, phys_addr);
+-		phys_addr += PAGE_SIZE;
+-		--idx;
+-		--nrpages;
+-	}
+-	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+-}
+-
+-void __init bt_iounmap(void *addr, unsigned long size)
+-{
+-	unsigned long virt_addr;
+-	unsigned long offset;
+-	unsigned int nrpages;
+-	enum fixed_addresses idx;
+-
+-	virt_addr = (unsigned long)addr;
+-	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+-		return;
+-	offset = virt_addr & ~PAGE_MASK;
+-	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
+-
+-	idx = FIX_BTMAP_BEGIN;
+-	while (nrpages > 0) {
+-		clear_fixmap(idx);
+-		--idx;
+-		--nrpages;
+-	}
+-}
+diff --git a/arch/x86/mm/ioremap_64.c b/arch/x86/mm/ioremap_64.c
+deleted file mode 100644
+index 6cac90a..0000000
+--- a/arch/x86/mm/ioremap_64.c
++++ /dev/null
+@@ -1,210 +0,0 @@
+-/*
+- * arch/x86_64/mm/ioremap.c
+- *
+- * Re-map IO memory to kernel address space so that we can access it.
+- * This is needed for high PCI addresses that aren't mapped in the
+- * 640k-1MB IO memory area on PC's
+- *
+- * (C) Copyright 1995 1996 Linus Torvalds
+- */
+-
+-#include <linux/vmalloc.h>
+-#include <linux/init.h>
+-#include <linux/slab.h>
+-#include <linux/module.h>
+-#include <linux/io.h>
+-
+-#include <asm/pgalloc.h>
+-#include <asm/fixmap.h>
+-#include <asm/tlbflush.h>
+-#include <asm/cacheflush.h>
+-#include <asm/proto.h>
+-
+-unsigned long __phys_addr(unsigned long x)
+-{
+-	if (x >= __START_KERNEL_map)
+-		return x - __START_KERNEL_map + phys_base;
+-	return x - PAGE_OFFSET;
+-}
+-EXPORT_SYMBOL(__phys_addr);
+-
+-#define ISA_START_ADDRESS      0xa0000
+-#define ISA_END_ADDRESS                0x100000
+-
+-/*
+- * Fix up the linear direct mapping of the kernel to avoid cache attribute
+- * conflicts.
+- */
+-static int
+-ioremap_change_attr(unsigned long phys_addr, unsigned long size,
+-					unsigned long flags)
+-{
+-	int err = 0;
+-	if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
+-		unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+-		unsigned long vaddr = (unsigned long) __va(phys_addr);
+-
+-		/*
+- 		 * Must use a address here and not struct page because the phys addr
+-		 * can be a in hole between nodes and not have an memmap entry.
+-		 */
+-		err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
+-		if (!err)
+-			global_flush_tlb();
+-	}
+-	return err;
+-}
+-
+-/*
+- * Generic mapping function
+- */
+-
+-/*
+- * Remap an arbitrary physical address space into the kernel virtual
+- * address space. Needed when the kernel wants to access high addresses
+- * directly.
+- *
+- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+- * have to convert them into an offset in a page-aligned mapping, but the
+- * caller shouldn't need to know that small detail.
+- */
+-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+-{
+-	void * addr;
+-	struct vm_struct * area;
+-	unsigned long offset, last_addr;
+-	pgprot_t pgprot;
+-
+-	/* Don't allow wraparound or zero size */
+-	last_addr = phys_addr + size - 1;
+-	if (!size || last_addr < phys_addr)
+-		return NULL;
+-
+-	/*
+-	 * Don't remap the low PCI/ISA area, it's always mapped..
+-	 */
+-	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
+-		return (__force void __iomem *)phys_to_virt(phys_addr);
+-
+-#ifdef CONFIG_FLATMEM
+-	/*
+-	 * Don't allow anybody to remap normal RAM that we're using..
+-	 */
+-	if (last_addr < virt_to_phys(high_memory)) {
+-		char *t_addr, *t_end;
+- 		struct page *page;
+-
+-		t_addr = __va(phys_addr);
+-		t_end = t_addr + (size - 1);
+-	   
+-		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+-			if(!PageReserved(page))
+-				return NULL;
+-	}
+-#endif
+-
+-	pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL
+-			  | _PAGE_DIRTY | _PAGE_ACCESSED | flags);
+-	/*
+-	 * Mappings have to be page-aligned
+-	 */
+-	offset = phys_addr & ~PAGE_MASK;
+-	phys_addr &= PAGE_MASK;
+-	size = PAGE_ALIGN(last_addr+1) - phys_addr;
+-
+-	/*
+-	 * Ok, go for it..
+-	 */
+-	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
+-	if (!area)
+-		return NULL;
+-	area->phys_addr = phys_addr;
+-	addr = area->addr;
+-	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+-			       phys_addr, pgprot)) {
+-		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
+-		return NULL;
+-	}
+-	if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
+-		area->flags &= 0xffffff;
+-		vunmap(addr);
+-		return NULL;
+-	}
+-	return (__force void __iomem *) (offset + (char *)addr);
+-}
+-EXPORT_SYMBOL(__ioremap);
+-
+-/**
+- * ioremap_nocache     -   map bus memory into CPU space
+- * @offset:    bus address of the memory
+- * @size:      size of the resource to map
+- *
+- * ioremap_nocache performs a platform specific sequence of operations to
+- * make bus memory CPU accessible via the readb/readw/readl/writeb/
+- * writew/writel functions and the other mmio helpers. The returned
+- * address is not guaranteed to be usable directly as a virtual
+- * address. 
+- *
+- * This version of ioremap ensures that the memory is marked uncachable
+- * on the CPU as well as honouring existing caching rules from things like
+- * the PCI bus. Note that there are other caches and buffers on many 
+- * busses. In particular driver authors should read up on PCI writes
+- *
+- * It's useful if some control registers are in such an area and
+- * write combining or read caching is not desirable:
+- * 
+- * Must be freed with iounmap.
+- */
 -
--  RE_ENTRANT_CHECK_OFF;
--  FPU_code_access_ok(1);
--  FPU_get_user(FPU_modrm, (u_char __user *) FPU_EIP);
--  RE_ENTRANT_CHECK_ON;
--  FPU_EIP++;
+-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+-{
+-	return __ioremap(phys_addr, size, _PAGE_PCD);
+-}
+-EXPORT_SYMBOL(ioremap_nocache);
 -
--  if (partial_status & SW_Summary)
--    {
--      /* Ignore the error for now if the current instruction is a no-wait
--	 control instruction */
--      /* The 80486 manual contradicts itself on this topic,
--	 but a real 80486 uses the following instructions:
--	 fninit, fnstenv, fnsave, fnstsw, fnstenv, fnclex.
--       */
--      code = (FPU_modrm << 8) | byte1;
--      if ( ! ( (((code & 0xf803) == 0xe003) ||    /* fnclex, fninit, fnstsw */
--		(((code & 0x3003) == 0x3001) &&   /* fnsave, fnstcw, fnstenv,
--						     fnstsw */
--		 ((code & 0xc000) != 0xc000))) ) )
--	{
--	  /*
--	   *  We need to simulate the action of the kernel to FPU
--	   *  interrupts here.
--	   */
--	do_the_FPU_interrupt:
+-/**
+- * iounmap - Free a IO remapping
+- * @addr: virtual address from ioremap_*
+- *
+- * Caller must ensure there is only one unmapping for the same pointer.
+- */
+-void iounmap(volatile void __iomem *addr)
+-{
+-	struct vm_struct *p, *o;
 -
--	  FPU_EIP = FPU_ORIG_EIP;	/* Point to current FPU instruction. */
+-	if (addr <= high_memory) 
+-		return; 
+-	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
+-		addr < phys_to_virt(ISA_END_ADDRESS))
+-		return;
 -
--	  RE_ENTRANT_CHECK_OFF;
--	  current->thread.trap_no = 16;
--	  current->thread.error_code = 0;
--	  send_sig(SIGFPE, current, 1);
--	  return;
+-	addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
+-	/* Use the vm area unlocked, assuming the caller
+-	   ensures there isn't another iounmap for the same address
+-	   in parallel. Reuse of the virtual address is prevented by
+-	   leaving it in the global lists until we're done with it.
+-	   cpa takes care of the direct mappings. */
+-	read_lock(&vmlist_lock);
+-	for (p = vmlist; p; p = p->next) {
+-		if (p->addr == addr)
+-			break;
 -	}
--    }
--
--  entry_sel_off.offset = FPU_ORIG_EIP;
--  entry_sel_off.selector = FPU_CS;
--  entry_sel_off.opcode = (byte1 << 8) | FPU_modrm;
+-	read_unlock(&vmlist_lock);
 -
--  FPU_rm = FPU_modrm & 7;
+-	if (!p) {
+-		printk("iounmap: bad address %p\n", addr);
+-		dump_stack();
+-		return;
+-	}
 -
--  if ( FPU_modrm < 0300 )
--    {
--      /* All of these instructions use the mod/rm byte to get a data address */
+-	/* Reset the direct mapping. Can block */
+-	if (p->flags >> 20)
+-		ioremap_change_attr(p->phys_addr, p->size, 0);
 -
--      if ( (addr_modes.default_mode & SIXTEEN)
--	  ^ (addr_modes.override.address_size == ADDR_SIZE_PREFIX) )
--	data_address = FPU_get_address_16(FPU_modrm, &FPU_EIP, &data_sel_off,
--					  addr_modes);
--      else
--	data_address = FPU_get_address(FPU_modrm, &FPU_EIP, &data_sel_off,
--				       addr_modes);
+-	/* Finally remove it */
+-	o = remove_vm_area((void *)addr);
+-	BUG_ON(p != o || o == NULL);
+-	kfree(p); 
+-}
+-EXPORT_SYMBOL(iounmap);
 -
--      if ( addr_modes.default_mode )
--	{
--	  if ( FPU_EIP-1 > code_limit )
--	    math_abort(FPU_info,SIGSEGV);
- 	}
+diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
+index a96006f..7a2ebce 100644
+--- a/arch/x86/mm/k8topology_64.c
++++ b/arch/x86/mm/k8topology_64.c
+@@ -1,9 +1,9 @@
+-/* 
++/*
+  * AMD K8 NUMA support.
+  * Discover the memory map and associated nodes.
+- * 
++ *
+  * This version reads it directly from the K8 northbridge.
+- * 
++ *
+  * Copyright 2002,2003 Andi Kleen, SuSE Labs.
+  */
+ #include <linux/kernel.h>
+@@ -22,132 +22,135 @@
  
--      if ( !(byte1 & 1) )
--	{
--	  unsigned short status1 = partial_status;
+ static __init int find_northbridge(void)
+ {
+-	int num; 
++	int num;
+ 
+-	for (num = 0; num < 32; num++) { 
++	for (num = 0; num < 32; num++) {
+ 		u32 header;
+-		
+-		header = read_pci_config(0, num, 0, 0x00);  
+-		if (header != (PCI_VENDOR_ID_AMD | (0x1100<<16)))
+-			continue; 	
 -
--	  st0_ptr = &st(0);
--	  st0_tag = FPU_gettag0();
+-		header = read_pci_config(0, num, 1, 0x00); 
+-		if (header != (PCI_VENDOR_ID_AMD | (0x1101<<16)))
+-			continue;	
+-		return num; 
+-	} 
 -
--	  /* Stack underflow has priority */
--	  if ( NOT_EMPTY_ST0 )
--	    {
--	      if ( addr_modes.default_mode & PROTECTED )
--		{
--		  /* This table works for 16 and 32 bit protected mode */
--		  if ( access_limit < data_sizes_16[(byte1 >> 1) & 3] )
--		    math_abort(FPU_info,SIGSEGV);
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_code_access_ok(1);
-+	FPU_get_user(FPU_modrm, (u_char __user *) FPU_EIP);
-+	RE_ENTRANT_CHECK_ON;
-+	FPU_EIP++;
-+
-+	if (partial_status & SW_Summary) {
-+		/* Ignore the error for now if the current instruction is a no-wait
-+		   control instruction */
-+		/* The 80486 manual contradicts itself on this topic,
-+		   but a real 80486 uses the following instructions:
-+		   fninit, fnstenv, fnsave, fnstsw, fnstenv, fnclex.
-+		 */
-+		code = (FPU_modrm << 8) | byte1;
-+		if (!((((code & 0xf803) == 0xe003) ||	/* fnclex, fninit, fnstsw */
-+		       (((code & 0x3003) == 0x3001) &&	/* fnsave, fnstcw, fnstenv,
-+							   fnstsw */
-+			((code & 0xc000) != 0xc000))))) {
-+			/*
-+			 *  We need to simulate the action of the kernel to FPU
-+			 *  interrupts here.
-+			 */
-+		      do_the_FPU_interrupt:
+-	return -1; 	
 +
-+			FPU_EIP = FPU_ORIG_EIP;	/* Point to current FPU instruction. */
++		header = read_pci_config(0, num, 0, 0x00);
++		if (header != (PCI_VENDOR_ID_AMD | (0x1100<<16)) &&
++			header != (PCI_VENDOR_ID_AMD | (0x1200<<16)) &&
++			header != (PCI_VENDOR_ID_AMD | (0x1300<<16)))
++			continue;
 +
-+			RE_ENTRANT_CHECK_OFF;
-+			current->thread.trap_no = 16;
-+			current->thread.error_code = 0;
-+			send_sig(SIGFPE, current, 1);
-+			return;
- 		}
++		header = read_pci_config(0, num, 1, 0x00);
++		if (header != (PCI_VENDOR_ID_AMD | (0x1101<<16)) &&
++			header != (PCI_VENDOR_ID_AMD | (0x1201<<16)) &&
++			header != (PCI_VENDOR_ID_AMD | (0x1301<<16)))
++			continue;
++		return num;
 +	}
++
++	return -1;
+ }
  
--	      unmasked = 0;  /* Do this here to stop compiler warnings. */
--	      switch ( (byte1 >> 1) & 3 )
--		{
--		case 0:
--		  unmasked = FPU_load_single((float __user *)data_address,
--					     &loaded_data);
--		  loaded_tag = unmasked & 0xff;
--		  unmasked &= ~0xff;
--		  break;
--		case 1:
--		  loaded_tag = FPU_load_int32((long __user *)data_address, &loaded_data);
--		  break;
--		case 2:
--		  unmasked = FPU_load_double((double __user *)data_address,
--					     &loaded_data);
--		  loaded_tag = unmasked & 0xff;
--		  unmasked &= ~0xff;
--		  break;
--		case 3:
--		default:  /* Used here to suppress gcc warnings. */
--		  loaded_tag = FPU_load_int16((short __user *)data_address, &loaded_data);
--		  break;
--		}
-+	entry_sel_off.offset = FPU_ORIG_EIP;
-+	entry_sel_off.selector = FPU_CS;
-+	entry_sel_off.opcode = (byte1 << 8) | FPU_modrm;
+ int __init k8_scan_nodes(unsigned long start, unsigned long end)
+-{ 
++{
+ 	unsigned long prevbase;
+ 	struct bootnode nodes[8];
+-	int nodeid, i, j, nb;
++	int nodeid, i, nb;
+ 	unsigned char nodeids[8];
+ 	int found = 0;
+ 	u32 reg;
+ 	unsigned numnodes;
+-	unsigned num_cores;
++	unsigned cores;
++	unsigned bits;
++	int j;
  
--	      /* No more access to user memory, it is safe
--		 to use static data now */
+ 	if (!early_pci_allowed())
+ 		return -1;
+ 
+-	nb = find_northbridge(); 
+-	if (nb < 0) 
++	nb = find_northbridge();
++	if (nb < 0)
+ 		return nb;
+ 
+-	printk(KERN_INFO "Scanning NUMA topology in Northbridge %d\n", nb); 
 -
--	      /* NaN operands have the next priority. */
--	      /* We have to delay looking at st(0) until after
--		 loading the data, because that data might contain an SNaN */
--	      if ( ((st0_tag == TAG_Special) && isNaN(st0_ptr)) ||
--		  ((loaded_tag == TAG_Special) && isNaN(&loaded_data)) )
--		{
--		  /* Restore the status word; we might have loaded a
--		     denormal. */
--		  partial_status = status1;
--		  if ( (FPU_modrm & 0x30) == 0x10 )
--		    {
--		      /* fcom or fcomp */
--		      EXCEPTION(EX_Invalid);
--		      setcc(SW_C3 | SW_C2 | SW_C0);
--		      if ( (FPU_modrm & 0x08) && (control_word & CW_Invalid) )
--			FPU_pop();             /* fcomp, masked, so we pop. */
--		    }
--		  else
--		    {
--		      if ( loaded_tag == TAG_Special )
--			loaded_tag = FPU_Special(&loaded_data);
--#ifdef PECULIAR_486
--		      /* This is not really needed, but gives behaviour
--			 identical to an 80486 */
--		      if ( (FPU_modrm & 0x28) == 0x20 )
--			/* fdiv or fsub */
--			real_2op_NaN(&loaded_data, loaded_tag, 0, &loaded_data);
--		      else
--#endif /* PECULIAR_486 */ 
--			/* fadd, fdivr, fmul, or fsubr */
--			real_2op_NaN(&loaded_data, loaded_tag, 0, st0_ptr);
--		    }
--		  goto reg_mem_instr_done;
--		}
-+	FPU_rm = FPU_modrm & 7;
+-	num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+-	printk(KERN_INFO "CPU has %d num_cores\n", num_cores);
++	printk(KERN_INFO "Scanning NUMA topology in Northbridge %d\n", nb);
  
--	      if ( unmasked && !((FPU_modrm & 0x30) == 0x10) )
--		{
--		  /* Is not a comparison instruction. */
--		  if ( (FPU_modrm & 0x38) == 0x38 )
--		    {
--		      /* fdivr */
--		      if ( (st0_tag == TAG_Zero) &&
--			   ((loaded_tag == TAG_Valid)
--			    || (loaded_tag == TAG_Special
--				&& isdenormal(&loaded_data))) )
--			{
--			  if ( FPU_divide_by_zero(0, getsign(&loaded_data))
--			       < 0 )
--			    {
--			      /* We use the fact here that the unmasked
--				 exception in the loaded data was for a
--				 denormal operand */
--			      /* Restore the state of the denormal op bit */
--			      partial_status &= ~SW_Denorm_Op;
--			      partial_status |= status1 & SW_Denorm_Op;
--			    }
--			  else
--			    setsign(st0_ptr, getsign(&loaded_data));
--			}
--		    }
--		  goto reg_mem_instr_done;
--		}
-+	if (FPU_modrm < 0300) {
-+		/* All of these instructions use the mod/rm byte to get a data address */
+-	reg = read_pci_config(0, nb, 0, 0x60); 
++	reg = read_pci_config(0, nb, 0, 0x60);
+ 	numnodes = ((reg >> 4) & 0xF) + 1;
+ 	if (numnodes <= 1)
+ 		return -1;
  
--	      switch ( (FPU_modrm >> 3) & 7 )
--		{
--		case 0:         /* fadd */
--		  clear_C1();
--		  FPU_add(&loaded_data, loaded_tag, 0, control_word);
--		  break;
--		case 1:         /* fmul */
--		  clear_C1();
--		  FPU_mul(&loaded_data, loaded_tag, 0, control_word);
--		  break;
--		case 2:         /* fcom */
--		  FPU_compare_st_data(&loaded_data, loaded_tag);
--		  break;
--		case 3:         /* fcomp */
--		  if ( !FPU_compare_st_data(&loaded_data, loaded_tag)
--		       && !unmasked )
--		    FPU_pop();
--		  break;
--		case 4:         /* fsub */
--		  clear_C1();
--		  FPU_sub(LOADED|loaded_tag, (int)&loaded_data, control_word);
--		  break;
--		case 5:         /* fsubr */
--		  clear_C1();
--		  FPU_sub(REV|LOADED|loaded_tag, (int)&loaded_data, control_word);
--		  break;
--		case 6:         /* fdiv */
--		  clear_C1();
--		  FPU_div(LOADED|loaded_tag, (int)&loaded_data, control_word);
--		  break;
--		case 7:         /* fdivr */
--		  clear_C1();
--		  if ( st0_tag == TAG_Zero )
--		    partial_status = status1;  /* Undo any denorm tag,
--						  zero-divide has priority. */
--		  FPU_div(REV|LOADED|loaded_tag, (int)&loaded_data, control_word);
--		  break;
-+		if ((addr_modes.default_mode & SIXTEEN)
-+		    ^ (addr_modes.override.address_size == ADDR_SIZE_PREFIX))
-+			data_address =
-+			    FPU_get_address_16(FPU_modrm, &FPU_EIP,
-+					       &data_sel_off, addr_modes);
-+		else
-+			data_address =
-+			    FPU_get_address(FPU_modrm, &FPU_EIP, &data_sel_off,
-+					    addr_modes);
+ 	printk(KERN_INFO "Number of nodes %d\n", numnodes);
+ 
+-	memset(&nodes,0,sizeof(nodes)); 
++	memset(&nodes, 0, sizeof(nodes));
+ 	prevbase = 0;
+-	for (i = 0; i < 8; i++) { 
+-		unsigned long base,limit; 
++	for (i = 0; i < 8; i++) {
++		unsigned long base, limit;
+ 		u32 nodeid;
+-		
 +
-+		if (addr_modes.default_mode) {
-+			if (FPU_EIP - 1 > code_limit)
-+				math_abort(FPU_info, SIGSEGV);
+ 		base = read_pci_config(0, nb, 1, 0x40 + i*8);
+ 		limit = read_pci_config(0, nb, 1, 0x44 + i*8);
+ 
+-		nodeid = limit & 7; 
++		nodeid = limit & 7;
+ 		nodeids[i] = nodeid;
+-		if ((base & 3) == 0) { 
++		if ((base & 3) == 0) {
+ 			if (i < numnodes)
+-				printk("Skipping disabled node %d\n", i); 
++				printk("Skipping disabled node %d\n", i);
+ 			continue;
+-		} 
++		}
+ 		if (nodeid >= numnodes) {
+ 			printk("Ignoring excess node %d (%lx:%lx)\n", nodeid,
+-			       base, limit); 
++			       base, limit);
+ 			continue;
+-		} 
++		}
+ 
+-		if (!limit) { 
+-			printk(KERN_INFO "Skipping node entry %d (base %lx)\n", i,
+-			       base);
++		if (!limit) {
++			printk(KERN_INFO "Skipping node entry %d (base %lx)\n",
++			       i, base);
+ 			continue;
  		}
--	    }
--	  else
--	    {
--	      if ( (FPU_modrm & 0x30) == 0x10 )
--		{
--		  /* The instruction is fcom or fcomp */
--		  EXCEPTION(EX_StackUnder);
--		  setcc(SW_C3 | SW_C2 | SW_C0);
--		  if ( (FPU_modrm & 0x08) && (control_word & CW_Invalid) )
--		    FPU_pop();             /* fcomp */
-+
-+		if (!(byte1 & 1)) {
-+			unsigned short status1 = partial_status;
-+
-+			st0_ptr = &st(0);
-+			st0_tag = FPU_gettag0();
+ 		if ((base >> 8) & 3 || (limit >> 8) & 3) {
+-			printk(KERN_ERR "Node %d using interleaving mode %lx/%lx\n", 
+-			       nodeid, (base>>8)&3, (limit>>8) & 3); 
+-			return -1; 
+-		}	
++			printk(KERN_ERR "Node %d using interleaving mode %lx/%lx\n",
++			       nodeid, (base>>8)&3, (limit>>8) & 3);
++			return -1;
++		}
+ 		if (node_isset(nodeid, node_possible_map)) {
+-			printk(KERN_INFO "Node %d already present. Skipping\n", 
++			printk(KERN_INFO "Node %d already present. Skipping\n",
+ 			       nodeid);
+ 			continue;
+ 		}
+ 
+-		limit >>= 16; 
+-		limit <<= 24; 
++		limit >>= 16;
++		limit <<= 24;
+ 		limit |= (1<<24)-1;
+ 		limit++;
+ 
+ 		if (limit > end_pfn << PAGE_SHIFT)
+ 			limit = end_pfn << PAGE_SHIFT;
+ 		if (limit <= base)
+-			continue; 
+-			
++			continue;
 +
-+			/* Stack underflow has priority */
-+			if (NOT_EMPTY_ST0) {
-+				if (addr_modes.default_mode & PROTECTED) {
-+					/* This table works for 16 and 32 bit protected mode */
-+					if (access_limit <
-+					    data_sizes_16[(byte1 >> 1) & 3])
-+						math_abort(FPU_info, SIGSEGV);
-+				}
+ 		base >>= 16;
+-		base <<= 24; 
+-
+-		if (base < start) 
+-			base = start; 
+-		if (limit > end) 
+-			limit = end; 
+-		if (limit == base) { 
+-			printk(KERN_ERR "Empty node %d\n", nodeid); 
+-			continue; 
++		base <<= 24;
 +
-+				unmasked = 0;	/* Do this here to stop compiler warnings. */
-+				switch ((byte1 >> 1) & 3) {
-+				case 0:
-+					unmasked =
-+					    FPU_load_single((float __user *)
-+							    data_address,
-+							    &loaded_data);
-+					loaded_tag = unmasked & 0xff;
-+					unmasked &= ~0xff;
-+					break;
-+				case 1:
-+					loaded_tag =
-+					    FPU_load_int32((long __user *)
-+							   data_address,
-+							   &loaded_data);
-+					break;
-+				case 2:
-+					unmasked =
-+					    FPU_load_double((double __user *)
-+							    data_address,
-+							    &loaded_data);
-+					loaded_tag = unmasked & 0xff;
-+					unmasked &= ~0xff;
-+					break;
-+				case 3:
-+				default:	/* Used here to suppress gcc warnings. */
-+					loaded_tag =
-+					    FPU_load_int16((short __user *)
-+							   data_address,
-+							   &loaded_data);
-+					break;
-+				}
++		if (base < start)
++			base = start;
++		if (limit > end)
++			limit = end;
++		if (limit == base) {
++			printk(KERN_ERR "Empty node %d\n", nodeid);
++			continue;
+ 		}
+-		if (limit < base) { 
++		if (limit < base) {
+ 			printk(KERN_ERR "Node %d bogus settings %lx-%lx.\n",
+-			       nodeid, base, limit); 			       
++			       nodeid, base, limit);
+ 			continue;
+-		} 
+-		
++		}
 +
-+				/* No more access to user memory, it is safe
-+				   to use static data now */
+ 		/* Could sort here, but pun for now. Should not happen anyroads. */
+-		if (prevbase > base) { 
++		if (prevbase > base) {
+ 			printk(KERN_ERR "Node map not sorted %lx,%lx\n",
+-			       prevbase,base);
++			       prevbase, base);
+ 			return -1;
+ 		}
+-			
+-		printk(KERN_INFO "Node %d MemBase %016lx Limit %016lx\n", 
+-		       nodeid, base, limit); 
+-		
 +
-+				/* NaN operands have the next priority. */
-+				/* We have to delay looking at st(0) until after
-+				   loading the data, because that data might contain an SNaN */
-+				if (((st0_tag == TAG_Special) && isNaN(st0_ptr))
-+				    || ((loaded_tag == TAG_Special)
-+					&& isNaN(&loaded_data))) {
-+					/* Restore the status word; we might have loaded a
-+					   denormal. */
-+					partial_status = status1;
-+					if ((FPU_modrm & 0x30) == 0x10) {
-+						/* fcom or fcomp */
-+						EXCEPTION(EX_Invalid);
-+						setcc(SW_C3 | SW_C2 | SW_C0);
-+						if ((FPU_modrm & 0x08)
-+						    && (control_word &
-+							CW_Invalid))
-+							FPU_pop();	/* fcomp, masked, so we pop. */
-+					} else {
-+						if (loaded_tag == TAG_Special)
-+							loaded_tag =
-+							    FPU_Special
-+							    (&loaded_data);
-+#ifdef PECULIAR_486
-+						/* This is not really needed, but gives behaviour
-+						   identical to an 80486 */
-+						if ((FPU_modrm & 0x28) == 0x20)
-+							/* fdiv or fsub */
-+							real_2op_NaN
-+							    (&loaded_data,
-+							     loaded_tag, 0,
-+							     &loaded_data);
-+						else
-+#endif /* PECULIAR_486 */
-+							/* fadd, fdivr, fmul, or fsubr */
-+							real_2op_NaN
-+							    (&loaded_data,
-+							     loaded_tag, 0,
-+							     st0_ptr);
-+					}
-+					goto reg_mem_instr_done;
-+				}
++		printk(KERN_INFO "Node %d MemBase %016lx Limit %016lx\n",
++		       nodeid, base, limit);
 +
-+				if (unmasked && !((FPU_modrm & 0x30) == 0x10)) {
-+					/* Is not a comparison instruction. */
-+					if ((FPU_modrm & 0x38) == 0x38) {
-+						/* fdivr */
-+						if ((st0_tag == TAG_Zero) &&
-+						    ((loaded_tag == TAG_Valid)
-+						     || (loaded_tag ==
-+							 TAG_Special
-+							 &&
-+							 isdenormal
-+							 (&loaded_data)))) {
-+							if (FPU_divide_by_zero
-+							    (0,
-+							     getsign
-+							     (&loaded_data))
-+							    < 0) {
-+								/* We use the fact here that the unmasked
-+								   exception in the loaded data was for a
-+								   denormal operand */
-+								/* Restore the state of the denormal op bit */
-+								partial_status
-+								    &=
-+								    ~SW_Denorm_Op;
-+								partial_status
-+								    |=
-+								    status1 &
-+								    SW_Denorm_Op;
-+							} else
-+								setsign(st0_ptr,
-+									getsign
-+									(&loaded_data));
-+						}
-+					}
-+					goto reg_mem_instr_done;
-+				}
+ 		found++;
+-		
+-		nodes[nodeid].start = base; 
 +
-+				switch ((FPU_modrm >> 3) & 7) {
-+				case 0:	/* fadd */
-+					clear_C1();
-+					FPU_add(&loaded_data, loaded_tag, 0,
-+						control_word);
-+					break;
-+				case 1:	/* fmul */
-+					clear_C1();
-+					FPU_mul(&loaded_data, loaded_tag, 0,
-+						control_word);
-+					break;
-+				case 2:	/* fcom */
-+					FPU_compare_st_data(&loaded_data,
-+							    loaded_tag);
-+					break;
-+				case 3:	/* fcomp */
-+					if (!FPU_compare_st_data
-+					    (&loaded_data, loaded_tag)
-+					    && !unmasked)
-+						FPU_pop();
-+					break;
-+				case 4:	/* fsub */
-+					clear_C1();
-+					FPU_sub(LOADED | loaded_tag,
-+						(int)&loaded_data,
-+						control_word);
-+					break;
-+				case 5:	/* fsubr */
-+					clear_C1();
-+					FPU_sub(REV | LOADED | loaded_tag,
-+						(int)&loaded_data,
-+						control_word);
-+					break;
-+				case 6:	/* fdiv */
-+					clear_C1();
-+					FPU_div(LOADED | loaded_tag,
-+						(int)&loaded_data,
-+						control_word);
-+					break;
-+				case 7:	/* fdivr */
-+					clear_C1();
-+					if (st0_tag == TAG_Zero)
-+						partial_status = status1;	/* Undo any denorm tag,
-+										   zero-divide has priority. */
-+					FPU_div(REV | LOADED | loaded_tag,
-+						(int)&loaded_data,
-+						control_word);
-+					break;
-+				}
-+			} else {
-+				if ((FPU_modrm & 0x30) == 0x10) {
-+					/* The instruction is fcom or fcomp */
-+					EXCEPTION(EX_StackUnder);
-+					setcc(SW_C3 | SW_C2 | SW_C0);
-+					if ((FPU_modrm & 0x08)
-+					    && (control_word & CW_Invalid))
-+						FPU_pop();	/* fcomp */
-+				} else
-+					FPU_stack_underflow();
-+			}
-+		      reg_mem_instr_done:
-+			operand_address = data_sel_off;
-+		} else {
-+			if (!(no_ip_update =
-+			      FPU_load_store(((FPU_modrm & 0x38) | (byte1 & 6))
-+					     >> 1, addr_modes, data_address))) {
-+				operand_address = data_sel_off;
-+			}
- 		}
--	      else
--		FPU_stack_underflow();
--	    }
--	reg_mem_instr_done:
--	  operand_address = data_sel_off;
--	}
--      else
--	{
--	  if ( !(no_ip_update =
--		 FPU_load_store(((FPU_modrm & 0x38) | (byte1 & 6)) >> 1,
--				addr_modes, data_address)) )
--	    {
--	      operand_address = data_sel_off;
--	    }
--	}
- 
--    }
--  else
--    {
--      /* None of these instructions access user memory */
--      u_char instr_index = (FPU_modrm & 0x38) | (byte1 & 7);
-+	} else {
-+		/* None of these instructions access user memory */
-+		u_char instr_index = (FPU_modrm & 0x38) | (byte1 & 7);
- 
- #ifdef PECULIAR_486
--      /* This is supposed to be undefined, but a real 80486 seems
--	 to do this: */
--      operand_address.offset = 0;
--      operand_address.selector = FPU_DS;
-+		/* This is supposed to be undefined, but a real 80486 seems
-+		   to do this: */
-+		operand_address.offset = 0;
-+		operand_address.selector = FPU_DS;
- #endif /* PECULIAR_486 */
- 
--      st0_ptr = &st(0);
--      st0_tag = FPU_gettag0();
--      switch ( type_table[(int) instr_index] )
--	{
--	case _NONE_:   /* also _REGIc: _REGIn */
--	  break;
--	case _REG0_:
--	  if ( !NOT_EMPTY_ST0 )
--	    {
--	      FPU_stack_underflow();
--	      goto FPU_instruction_done;
--	    }
--	  break;
--	case _REGIi:
--	  if ( !NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm) )
--	    {
--	      FPU_stack_underflow_i(FPU_rm);
--	      goto FPU_instruction_done;
--	    }
--	  break;
--	case _REGIp:
--	  if ( !NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm) )
--	    {
--	      FPU_stack_underflow_pop(FPU_rm);
--	      goto FPU_instruction_done;
--	    }
--	  break;
--	case _REGI_:
--	  if ( !NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm) )
--	    {
--	      FPU_stack_underflow();
--	      goto FPU_instruction_done;
--	    }
--	  break;
--	case _PUSH_:     /* Only used by the fld st(i) instruction */
--	  break;
--	case _null_:
--	  FPU_illegal();
--	  goto FPU_instruction_done;
--	default:
--	  EXCEPTION(EX_INTERNAL|0x111);
--	  goto FPU_instruction_done;
--	}
--      (*st_instr_table[(int) instr_index])();
-+		st0_ptr = &st(0);
-+		st0_tag = FPU_gettag0();
-+		switch (type_table[(int)instr_index]) {
-+		case _NONE_:	/* also _REGIc: _REGIn */
-+			break;
-+		case _REG0_:
-+			if (!NOT_EMPTY_ST0) {
-+				FPU_stack_underflow();
-+				goto FPU_instruction_done;
-+			}
-+			break;
-+		case _REGIi:
-+			if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
-+				FPU_stack_underflow_i(FPU_rm);
-+				goto FPU_instruction_done;
-+			}
-+			break;
-+		case _REGIp:
-+			if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
-+				FPU_stack_underflow_pop(FPU_rm);
-+				goto FPU_instruction_done;
-+			}
-+			break;
-+		case _REGI_:
-+			if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
-+				FPU_stack_underflow();
-+				goto FPU_instruction_done;
-+			}
-+			break;
-+		case _PUSH_:	/* Only used by the fld st(i) instruction */
-+			break;
-+		case _null_:
-+			FPU_illegal();
-+			goto FPU_instruction_done;
-+		default:
-+			EXCEPTION(EX_INTERNAL | 0x111);
-+			goto FPU_instruction_done;
-+		}
-+		(*st_instr_table[(int)instr_index]) ();
++		nodes[nodeid].start = base;
+ 		nodes[nodeid].end = limit;
+ 		e820_register_active_regions(nodeid,
+ 				nodes[nodeid].start >> PAGE_SHIFT,
+@@ -156,27 +159,31 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
+ 		prevbase = base;
  
--FPU_instruction_done:
--      ;
--    }
-+	      FPU_instruction_done:
-+		;
+ 		node_set(nodeid, node_possible_map);
+-	} 
 +	}
  
--  if ( ! no_ip_update )
--    instruction_address = entry_sel_off;
-+	if (!no_ip_update)
-+		instruction_address = entry_sel_off;
- 
--FPU_fwait_done:
-+      FPU_fwait_done:
- 
- #ifdef DEBUG
--  RE_ENTRANT_CHECK_OFF;
--  FPU_printall();
--  RE_ENTRANT_CHECK_ON;
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_printall();
-+	RE_ENTRANT_CHECK_ON;
- #endif /* DEBUG */
+ 	if (!found)
+-		return -1; 
++		return -1;
  
--  if (FPU_lookahead && !need_resched())
--    {
--      FPU_ORIG_EIP = FPU_EIP - code_base;
--      if ( valid_prefix(&byte1, (u_char __user **)&FPU_EIP,
--			&addr_modes.override) )
--	goto do_another_FPU_instruction;
--    }
-+	if (FPU_lookahead && !need_resched()) {
-+		FPU_ORIG_EIP = FPU_EIP - code_base;
-+		if (valid_prefix(&byte1, (u_char __user **) & FPU_EIP,
-+				 &addr_modes.override))
-+			goto do_another_FPU_instruction;
+ 	memnode_shift = compute_hash_shift(nodes, 8);
+-	if (memnode_shift < 0) { 
+-		printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n"); 
+-		return -1; 
+-	} 
+-	printk(KERN_INFO "Using node hash shift of %d\n", memnode_shift); 
++	if (memnode_shift < 0) {
++		printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n");
++		return -1;
 +	}
++	printk(KERN_INFO "Using node hash shift of %d\n", memnode_shift);
++
++	/* use the coreid bits from early_identify_cpu */
++	bits = boot_cpu_data.x86_coreid_bits;
++	cores = (1<<bits);
  
--  if ( addr_modes.default_mode )
--    FPU_EIP -= code_base;
-+	if (addr_modes.default_mode)
-+		FPU_EIP -= code_base;
- 
--  RE_ENTRANT_CHECK_OFF;
-+	RE_ENTRANT_CHECK_OFF;
- }
+ 	for (i = 0; i < 8; i++) {
+-		if (nodes[i].start != nodes[i].end) { 
++		if (nodes[i].start != nodes[i].end) {
+ 			nodeid = nodeids[i];
+-			for (j = 0; j < num_cores; j++)
+-				apicid_to_node[(nodeid * num_cores) + j] = i;
+-			setup_node_bootmem(i, nodes[i].start, nodes[i].end); 
+-		} 
++			for (j = 0; j < cores; j++)
++				apicid_to_node[(nodeid << bits) + j] = i;
++			setup_node_bootmem(i, nodes[i].start, nodes[i].end);
++		}
+ 	}
  
--
- /* Support for prefix bytes is not yet complete. To properly handle
-    all prefix bytes, further changes are needed in the emulator code
-    which accesses user address space. Access to separate segments is
-    important for msdos emulation. */
- static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
--			overrides *override)
-+			overrides * override)
- {
--  u_char byte;
--  u_char __user *ip = *fpu_eip;
--
--  *override = (overrides) { 0, 0, PREFIX_DEFAULT };       /* defaults */
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_code_access_ok(1);
--  FPU_get_user(byte, ip);
--  RE_ENTRANT_CHECK_ON;
--
--  while ( 1 )
--    {
--      switch ( byte )
--	{
--	case ADDR_SIZE_PREFIX:
--	  override->address_size = ADDR_SIZE_PREFIX;
--	  goto do_next_byte;
--
--	case OP_SIZE_PREFIX:
--	  override->operand_size = OP_SIZE_PREFIX;
--	  goto do_next_byte;
--
--	case PREFIX_CS:
--	  override->segment = PREFIX_CS_;
--	  goto do_next_byte;
--	case PREFIX_ES:
--	  override->segment = PREFIX_ES_;
--	  goto do_next_byte;
--	case PREFIX_SS:
--	  override->segment = PREFIX_SS_;
--	  goto do_next_byte;
--	case PREFIX_FS:
--	  override->segment = PREFIX_FS_;
--	  goto do_next_byte;
--	case PREFIX_GS:
--	  override->segment = PREFIX_GS_;
--	  goto do_next_byte;
--	case PREFIX_DS:
--	  override->segment = PREFIX_DS_;
--	  goto do_next_byte;
-+	u_char byte;
-+	u_char __user *ip = *fpu_eip;
+ 	numa_init_array();
+ 	return 0;
+-} 
++}
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+new file mode 100644
+index 0000000..56fe712
+--- /dev/null
++++ b/arch/x86/mm/mmap.c
+@@ -0,0 +1,123 @@
++/*
++ * Flexible mmap layout support
++ *
++ * Based on code by Ingo Molnar and Andi Kleen, copyrighted
++ * as follows:
++ *
++ * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
++ * All Rights Reserved.
++ * Copyright 2005 Andi Kleen, SUSE Labs.
++ * Copyright 2007 Jiri Kosina, SUSE Labs.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ */
 +
-+	*override = (overrides) {
-+	0, 0, PREFIX_DEFAULT};	/* defaults */
++#include <linux/personality.h>
++#include <linux/mm.h>
++#include <linux/random.h>
++#include <linux/limits.h>
++#include <linux/sched.h>
 +
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_code_access_ok(1);
-+	FPU_get_user(byte, ip);
-+	RE_ENTRANT_CHECK_ON;
++/*
++ * Top of mmap area (just below the process stack).
++ *
++ * Leave an at least ~128 MB hole.
++ */
++#define MIN_GAP (128*1024*1024)
++#define MAX_GAP (TASK_SIZE/6*5)
 +
-+	while (1) {
-+		switch (byte) {
-+		case ADDR_SIZE_PREFIX:
-+			override->address_size = ADDR_SIZE_PREFIX;
-+			goto do_next_byte;
++/*
++ * True on X86_32 or when emulating IA32 on X86_64
++ */
++static int mmap_is_ia32(void)
++{
++#ifdef CONFIG_X86_32
++	return 1;
++#endif
++#ifdef CONFIG_IA32_EMULATION
++	if (test_thread_flag(TIF_IA32))
++		return 1;
++#endif
++	return 0;
++}
 +
-+		case OP_SIZE_PREFIX:
-+			override->operand_size = OP_SIZE_PREFIX;
-+			goto do_next_byte;
++static int mmap_is_legacy(void)
++{
++	if (current->personality & ADDR_COMPAT_LAYOUT)
++		return 1;
 +
-+		case PREFIX_CS:
-+			override->segment = PREFIX_CS_;
-+			goto do_next_byte;
-+		case PREFIX_ES:
-+			override->segment = PREFIX_ES_;
-+			goto do_next_byte;
-+		case PREFIX_SS:
-+			override->segment = PREFIX_SS_;
-+			goto do_next_byte;
-+		case PREFIX_FS:
-+			override->segment = PREFIX_FS_;
-+			goto do_next_byte;
-+		case PREFIX_GS:
-+			override->segment = PREFIX_GS_;
-+			goto do_next_byte;
-+		case PREFIX_DS:
-+			override->segment = PREFIX_DS_;
-+			goto do_next_byte;
- 
- /* lock is not a valid prefix for FPU instructions,
-    let the cpu handle it to generate a SIGILL. */
- /*	case PREFIX_LOCK: */
- 
--	  /* rep.. prefixes have no meaning for FPU instructions */
--	case PREFIX_REPE:
--	case PREFIX_REPNE:
--
--	do_next_byte:
--	  ip++;
--	  RE_ENTRANT_CHECK_OFF;
--	  FPU_code_access_ok(1);
--	  FPU_get_user(byte, ip);
--	  RE_ENTRANT_CHECK_ON;
--	  break;
--	case FWAIT_OPCODE:
--	  *Byte = byte;
--	  return 1;
--	default:
--	  if ( (byte & 0xf8) == 0xd8 )
--	    {
--	      *Byte = byte;
--	      *fpu_eip = ip;
--	      return 1;
--	    }
--	  else
--	    {
--	      /* Not a valid sequence of prefix bytes followed by
--		 an FPU instruction. */
--	      *Byte = byte;  /* Needed for error message. */
--	      return 0;
--	    }
-+			/* rep.. prefixes have no meaning for FPU instructions */
-+		case PREFIX_REPE:
-+		case PREFIX_REPNE:
++	if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
++		return 1;
 +
-+		      do_next_byte:
-+			ip++;
-+			RE_ENTRANT_CHECK_OFF;
-+			FPU_code_access_ok(1);
-+			FPU_get_user(byte, ip);
-+			RE_ENTRANT_CHECK_ON;
-+			break;
-+		case FWAIT_OPCODE:
-+			*Byte = byte;
-+			return 1;
-+		default:
-+			if ((byte & 0xf8) == 0xd8) {
-+				*Byte = byte;
-+				*fpu_eip = ip;
-+				return 1;
-+			} else {
-+				/* Not a valid sequence of prefix bytes followed by
-+				   an FPU instruction. */
-+				*Byte = byte;	/* Needed for error message. */
-+				return 0;
-+			}
-+		}
- 	}
--    }
- }
- 
++	return sysctl_legacy_va_layout;
++}
++
++static unsigned long mmap_rnd(void)
++{
++	unsigned long rnd = 0;
++
++	/*
++	*  8 bits of randomness in 32bit mmaps, 20 address space bits
++	* 28 bits of randomness in 64bit mmaps, 40 address space bits
++	*/
++	if (current->flags & PF_RANDOMIZE) {
++		if (mmap_is_ia32())
++			rnd = (long)get_random_int() % (1<<8);
++		else
++			rnd = (long)(get_random_int() % (1<<28));
++	}
++	return rnd << PAGE_SHIFT;
++}
++
++static unsigned long mmap_base(void)
++{
++	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
++
++	if (gap < MIN_GAP)
++		gap = MIN_GAP;
++	else if (gap > MAX_GAP)
++		gap = MAX_GAP;
++
++	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
++}
++
++/*
++ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
++ * does, but not when emulating X86_32
++ */
++static unsigned long mmap_legacy_base(void)
++{
++	if (mmap_is_ia32())
++		return TASK_UNMAPPED_BASE;
++	else
++		return TASK_UNMAPPED_BASE + mmap_rnd();
++}
++
++/*
++ * This function, called very early during the creation of a new
++ * process VM image, sets up which VM layout function to use:
++ */
++void arch_pick_mmap_layout(struct mm_struct *mm)
++{
++	if (mmap_is_legacy()) {
++		mm->mmap_base = mmap_legacy_base();
++		mm->get_unmapped_area = arch_get_unmapped_area;
++		mm->unmap_area = arch_unmap_area;
++	} else {
++		mm->mmap_base = mmap_base();
++		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
++		mm->unmap_area = arch_unmap_area_topdown;
++	}
++}
+diff --git a/arch/x86/mm/mmap_32.c b/arch/x86/mm/mmap_32.c
+deleted file mode 100644
+index 552e084..0000000
+--- a/arch/x86/mm/mmap_32.c
++++ /dev/null
+@@ -1,77 +0,0 @@
+-/*
+- *  linux/arch/i386/mm/mmap.c
+- *
+- *  flexible mmap layout support
+- *
+- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+- *
+- *
+- * Started by Ingo Molnar <mingo at elte.hu>
+- */
 -
--void math_abort(struct info * info, unsigned int signal)
-+void math_abort(struct info *info, unsigned int signal)
- {
- 	FPU_EIP = FPU_ORIG_EIP;
- 	current->thread.trap_no = 16;
- 	current->thread.error_code = 0;
--	send_sig(signal,current,1);
-+	send_sig(signal, current, 1);
- 	RE_ENTRANT_CHECK_OFF;
--	__asm__("movl %0,%%esp ; ret": :"g" (((long) info)-4));
-+      __asm__("movl %0,%%esp ; ret": :"g"(((long)info) - 4));
- #ifdef PARANOID
--      printk("ERROR: wm-FPU-emu math_abort failed!\n");
-+	printk("ERROR: wm-FPU-emu math_abort failed!\n");
- #endif /* PARANOID */
- }
- 
+-#include <linux/personality.h>
+-#include <linux/mm.h>
+-#include <linux/random.h>
+-#include <linux/sched.h>
 -
+-/*
+- * Top of mmap area (just below the process stack).
+- *
+- * Leave an at least ~128 MB hole.
+- */
+-#define MIN_GAP (128*1024*1024)
+-#define MAX_GAP (TASK_SIZE/6*5)
 -
- #define S387 ((struct i387_soft_struct *)s387)
- #define sstatus_word() \
-   ((S387->swd & ~SW_Top & 0xffff) | ((S387->ftop << SW_Top_Shift) & SW_Top))
- 
--int restore_i387_soft(void *s387, struct _fpstate __user *buf)
-+int fpregs_soft_set(struct task_struct *target,
-+		    const struct user_regset *regset,
-+		    unsigned int pos, unsigned int count,
-+		    const void *kbuf, const void __user *ubuf)
- {
--  u_char __user *d = (u_char __user *)buf;
--  int offset, other, i, tags, regnr, tag, newtop;
+-static inline unsigned long mmap_base(struct mm_struct *mm)
+-{
+-	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+-	unsigned long random_factor = 0;
 -
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ, d, 7*4 + 8*10);
--  if (__copy_from_user(&S387->cwd, d, 7*4))
--    return -1;
--  RE_ENTRANT_CHECK_ON;
+-	if (current->flags & PF_RANDOMIZE)
+-		random_factor = get_random_int() % (1024*1024);
 -
--  d += 7*4;
+-	if (gap < MIN_GAP)
+-		gap = MIN_GAP;
+-	else if (gap > MAX_GAP)
+-		gap = MAX_GAP;
 -
--  S387->ftop = (S387->swd >> SW_Top_Shift) & 7;
--  offset = (S387->ftop & 7) * 10;
--  other = 80 - offset;
+-	return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
+-}
 -
--  RE_ENTRANT_CHECK_OFF;
--  /* Copy all registers in stack order. */
--  if (__copy_from_user(((u_char *)&S387->st_space)+offset, d, other))
--    return -1;
--  if ( offset )
--    if (__copy_from_user((u_char *)&S387->st_space, d+other, offset))
--      return -1;
--  RE_ENTRANT_CHECK_ON;
+-/*
+- * This function, called very early during the creation of a new
+- * process VM image, sets up which VM layout function to use:
+- */
+-void arch_pick_mmap_layout(struct mm_struct *mm)
+-{
+-	/*
+-	 * Fall back to the standard layout if the personality
+-	 * bit is set, or if the expected stack growth is unlimited:
+-	 */
+-	if (sysctl_legacy_va_layout ||
+-			(current->personality & ADDR_COMPAT_LAYOUT) ||
+-			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
+-		mm->mmap_base = TASK_UNMAPPED_BASE;
+-		mm->get_unmapped_area = arch_get_unmapped_area;
+-		mm->unmap_area = arch_unmap_area;
+-	} else {
+-		mm->mmap_base = mmap_base(mm);
+-		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+-		mm->unmap_area = arch_unmap_area_topdown;
+-	}
+-}
+diff --git a/arch/x86/mm/mmap_64.c b/arch/x86/mm/mmap_64.c
+deleted file mode 100644
+index 80bba0d..0000000
+--- a/arch/x86/mm/mmap_64.c
++++ /dev/null
+@@ -1,29 +0,0 @@
+-/* Copyright 2005 Andi Kleen, SuSE Labs.
+- * Licensed under GPL, v.2
+- */
+-#include <linux/mm.h>
+-#include <linux/sched.h>
+-#include <linux/random.h>
+-#include <asm/ia32.h>
 -
--  /* The tags may need to be corrected now. */
--  tags = S387->twd;
--  newtop = S387->ftop;
--  for ( i = 0; i < 8; i++ )
--    {
--      regnr = (i+newtop) & 7;
--      if ( ((tags >> ((regnr & 7)*2)) & 3) != TAG_Empty )
--	{
--	  /* The loaded data over-rides all other cases. */
--	  tag = FPU_tagof((FPU_REG *)((u_char *)S387->st_space + 10*regnr));
--	  tags &= ~(3 << (regnr*2));
--	  tags |= (tag & 3) << (regnr*2);
-+	struct i387_soft_struct *s387 = &target->thread.i387.soft;
-+	void *space = s387->st_space;
-+	int ret;
-+	int offset, other, i, tags, regnr, tag, newtop;
-+
-+	RE_ENTRANT_CHECK_OFF;
-+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, s387, 0,
-+				 offsetof(struct i387_soft_struct, st_space));
-+	RE_ENTRANT_CHECK_ON;
-+
-+	if (ret)
-+		return ret;
-+
-+	S387->ftop = (S387->swd >> SW_Top_Shift) & 7;
-+	offset = (S387->ftop & 7) * 10;
-+	other = 80 - offset;
-+
-+	RE_ENTRANT_CHECK_OFF;
+-/* Notebook: move the mmap code from sys_x86_64.c over here. */
+-
+-void arch_pick_mmap_layout(struct mm_struct *mm)
+-{
+-#ifdef CONFIG_IA32_EMULATION
+-	if (current_thread_info()->flags & _TIF_IA32)
+-		return ia32_pick_mmap_layout(mm);
+-#endif
+-	mm->mmap_base = TASK_UNMAPPED_BASE;
+-	if (current->flags & PF_RANDOMIZE) {
+-		/* Add 28bit randomness which is about 40bits of address space
+-		   because mmap base has to be page aligned.
+- 		   or ~1/128 of the total user VM
+-	   	   (total user address space is 47bits) */
+-		unsigned rnd = get_random_int() & 0xfffffff;
+-		mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT;
+-	}
+-	mm->get_unmapped_area = arch_get_unmapped_area;
+-	mm->unmap_area = arch_unmap_area;
+-}
+-
+diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
+index 3d6926b..dc3b1f7 100644
+--- a/arch/x86/mm/numa_64.c
++++ b/arch/x86/mm/numa_64.c
+@@ -1,7 +1,7 @@
+-/* 
++/*
+  * Generic VM initialization for x86-64 NUMA setups.
+  * Copyright 2002,2003 Andi Kleen, SuSE Labs.
+- */ 
++ */
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/string.h>
+@@ -11,35 +11,45 @@
+ #include <linux/ctype.h>
+ #include <linux/module.h>
+ #include <linux/nodemask.h>
++#include <linux/sched.h>
+ 
+ #include <asm/e820.h>
+ #include <asm/proto.h>
+ #include <asm/dma.h>
+ #include <asm/numa.h>
+ #include <asm/acpi.h>
++#include <asm/k8.h>
+ 
+ #ifndef Dprintk
+ #define Dprintk(x...)
+ #endif
+ 
+ struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
++EXPORT_SYMBOL(node_data);
 +
-+	/* Copy all registers in stack order. */
-+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-+				 space + offset, 0, other);
-+	if (!ret && offset)
-+		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-+					 space, 0, offset);
+ bootmem_data_t plat_node_bdata[MAX_NUMNODES];
+ 
+ struct memnode memnode;
+ 
+-unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
++int x86_cpu_to_node_map_init[NR_CPUS] = {
+ 	[0 ... NR_CPUS-1] = NUMA_NO_NODE
+ };
+-unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
+- 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
++void *x86_cpu_to_node_map_early_ptr;
++DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
++EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map);
++EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
 +
-+	RE_ENTRANT_CHECK_ON;
++s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
++	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+ };
+-cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
 +
-+	/* The tags may need to be corrected now. */
-+	tags = S387->twd;
-+	newtop = S387->ftop;
-+	for (i = 0; i < 8; i++) {
-+		regnr = (i + newtop) & 7;
-+		if (((tags >> ((regnr & 7) * 2)) & 3) != TAG_Empty) {
-+			/* The loaded data over-rides all other cases. */
-+			tag =
-+			    FPU_tagof((FPU_REG *) ((u_char *) S387->st_space +
-+						   10 * regnr));
-+			tags &= ~(3 << (regnr * 2));
-+			tags |= (tag & 3) << (regnr * 2);
-+		}
- 	}
--    }
--  S387->twd = tags;
-+	S387->twd = tags;
++cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
++EXPORT_SYMBOL(node_to_cpumask_map);
  
--  return 0;
-+	return ret;
- }
+ int numa_off __initdata;
+ unsigned long __initdata nodemap_addr;
+ unsigned long __initdata nodemap_size;
  
 -
--int save_i387_soft(void *s387, struct _fpstate __user * buf)
-+int fpregs_soft_get(struct task_struct *target,
-+		    const struct user_regset *regset,
-+		    unsigned int pos, unsigned int count,
-+		    void *kbuf, void __user *ubuf)
+ /*
+  * Given a shift value, try to populate memnodemap[]
+  * Returns :
+@@ -47,14 +57,13 @@ unsigned long __initdata nodemap_size;
+  * 0 if memnodmap[] too small (of shift too small)
+  * -1 if node overlap or lost ram (shift too big)
+  */
+-static int __init
+-populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
++static int __init populate_memnodemap(const struct bootnode *nodes,
++				      int numnodes, int shift)
  {
--  u_char __user *d = (u_char __user *)buf;
--  int offset = (S387->ftop & 7) * 10, other = 80 - offset;
-+	struct i387_soft_struct *s387 = &target->thread.i387.soft;
-+	const void *space = s387->st_space;
-+	int ret;
-+	int offset = (S387->ftop & 7) * 10, other = 80 - offset;
-+
-+	RE_ENTRANT_CHECK_OFF;
+-	int i; 
+-	int res = -1;
+ 	unsigned long addr, end;
++	int i, res = -1;
  
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_WRITE, d, 7*4 + 8*10);
- #ifdef PECULIAR_486
--  S387->cwd &= ~0xe080;
--  /* An 80486 sets nearly all of the reserved bits to 1. */
--  S387->cwd |= 0xffff0040;
--  S387->swd = sstatus_word() | 0xffff0000;
--  S387->twd |= 0xffff0000;
--  S387->fcs &= ~0xf8000000;
--  S387->fos |= 0xffff0000;
-+	S387->cwd &= ~0xe080;
-+	/* An 80486 sets nearly all of the reserved bits to 1. */
-+	S387->cwd |= 0xffff0040;
-+	S387->swd = sstatus_word() | 0xffff0000;
-+	S387->twd |= 0xffff0000;
-+	S387->fcs &= ~0xf8000000;
-+	S387->fos |= 0xffff0000;
- #endif /* PECULIAR_486 */
--  if (__copy_to_user(d, &S387->cwd, 7*4))
--    return -1;
--  RE_ENTRANT_CHECK_ON;
--
--  d += 7*4;
--
--  RE_ENTRANT_CHECK_OFF;
--  /* Copy all registers in stack order. */
--  if (__copy_to_user(d, ((u_char *)&S387->st_space)+offset, other))
--    return -1;
--  if ( offset )
--    if (__copy_to_user(d+other, (u_char *)&S387->st_space, offset))
--      return -1;
--  RE_ENTRANT_CHECK_ON;
--
--  return 1;
-+
-+	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, s387, 0,
-+				  offsetof(struct i387_soft_struct, st_space));
-+
-+	/* Copy all registers in stack order. */
-+	if (!ret)
-+		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-+					  space + offset, 0, other);
-+	if (!ret)
-+		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-+					  space, 0, offset);
-+
-+	RE_ENTRANT_CHECK_ON;
-+
-+	return ret;
+-	memset(memnodemap, 0xff, memnodemapsize);
++	memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
+ 	for (i = 0; i < numnodes; i++) {
+ 		addr = nodes[i].start;
+ 		end = nodes[i].end;
+@@ -63,13 +72,13 @@ populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
+ 		if ((end >> shift) >= memnodemapsize)
+ 			return 0;
+ 		do {
+-			if (memnodemap[addr >> shift] != 0xff)
++			if (memnodemap[addr >> shift] != NUMA_NO_NODE)
+ 				return -1;
+ 			memnodemap[addr >> shift] = i;
+ 			addr += (1UL << shift);
+ 		} while (addr < end);
+ 		res = 1;
+-	} 
++	}
+ 	return res;
  }
-diff --git a/arch/x86/math-emu/fpu_etc.c b/arch/x86/math-emu/fpu_etc.c
-index e3b5d46..233e5af 100644
---- a/arch/x86/math-emu/fpu_etc.c
-+++ b/arch/x86/math-emu/fpu_etc.c
-@@ -16,128 +16,115 @@
- #include "status_w.h"
- #include "reg_constant.h"
  
--
- static void fchs(FPU_REG *st0_ptr, u_char st0tag)
+@@ -78,12 +87,12 @@ static int __init allocate_cachealigned_memnodemap(void)
+ 	unsigned long pad, pad_addr;
+ 
+ 	memnodemap = memnode.embedded_map;
+-	if (memnodemapsize <= 48)
++	if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
+ 		return 0;
+ 
+ 	pad = L1_CACHE_BYTES - 1;
+ 	pad_addr = 0x8000;
+-	nodemap_size = pad + memnodemapsize;
++	nodemap_size = pad + sizeof(s16) * memnodemapsize;
+ 	nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
+ 				      nodemap_size);
+ 	if (nodemap_addr == -1UL) {
+@@ -94,6 +103,7 @@ static int __init allocate_cachealigned_memnodemap(void)
+ 	}
+ 	pad_addr = (nodemap_addr + pad) & ~pad;
+ 	memnodemap = phys_to_virt(pad_addr);
++	reserve_early(nodemap_addr, nodemap_addr + nodemap_size);
+ 
+ 	printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
+ 	       nodemap_addr, nodemap_addr + nodemap_size);
+@@ -104,8 +114,8 @@ static int __init allocate_cachealigned_memnodemap(void)
+  * The LSB of all start and end addresses in the node map is the value of the
+  * maximum possible shift.
+  */
+-static int __init
+-extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
++static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
++					 int numnodes)
  {
--  if ( st0tag ^ TAG_Empty )
--    {
--      signbyte(st0_ptr) ^= SIGN_NEG;
--      clear_C1();
--    }
--  else
--    FPU_stack_underflow();
-+	if (st0tag ^ TAG_Empty) {
-+		signbyte(st0_ptr) ^= SIGN_NEG;
-+		clear_C1();
-+	} else
-+		FPU_stack_underflow();
+ 	int i, nodes_used = 0;
+ 	unsigned long start, end;
+@@ -140,51 +150,50 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
+ 		shift);
+ 
+ 	if (populate_memnodemap(nodes, numnodes, shift) != 1) {
+-		printk(KERN_INFO
+-	"Your memory is not aligned you need to rebuild your kernel "
+-	"with a bigger NODEMAPSIZE shift=%d\n",
+-			shift);
++		printk(KERN_INFO "Your memory is not aligned you need to "
++		       "rebuild your kernel with a bigger NODEMAPSIZE "
++		       "shift=%d\n", shift);
+ 		return -1;
+ 	}
+ 	return shift;
  }
  
--
- static void fabs(FPU_REG *st0_ptr, u_char st0tag)
+-#ifdef CONFIG_SPARSEMEM
+ int early_pfn_to_nid(unsigned long pfn)
  {
--  if ( st0tag ^ TAG_Empty )
--    {
--      setpositive(st0_ptr);
--      clear_C1();
--    }
--  else
--    FPU_stack_underflow();
-+	if (st0tag ^ TAG_Empty) {
-+		setpositive(st0_ptr);
-+		clear_C1();
-+	} else
-+		FPU_stack_underflow();
+ 	return phys_to_nid(pfn << PAGE_SHIFT);
  }
+-#endif
  
--
- static void ftst_(FPU_REG *st0_ptr, u_char st0tag)
+-static void * __init
+-early_node_mem(int nodeid, unsigned long start, unsigned long end,
+-	      unsigned long size)
++static void * __init early_node_mem(int nodeid, unsigned long start,
++				    unsigned long end, unsigned long size)
  {
--  switch (st0tag)
--    {
--    case TAG_Zero:
--      setcc(SW_C3);
--      break;
--    case TAG_Valid:
--      if (getsign(st0_ptr) == SIGN_POS)
--        setcc(0);
--      else
--        setcc(SW_C0);
--      break;
--    case TAG_Special:
--      switch ( FPU_Special(st0_ptr) )
--	{
--	case TW_Denormal:
--	  if (getsign(st0_ptr) == SIGN_POS)
--	    setcc(0);
--	  else
--	    setcc(SW_C0);
--	  if ( denormal_operand() < 0 )
--	    {
--#ifdef PECULIAR_486
--	      /* This is weird! */
--	      if (getsign(st0_ptr) == SIGN_POS)
-+	switch (st0tag) {
-+	case TAG_Zero:
- 		setcc(SW_C3);
-+		break;
-+	case TAG_Valid:
-+		if (getsign(st0_ptr) == SIGN_POS)
-+			setcc(0);
-+		else
-+			setcc(SW_C0);
-+		break;
-+	case TAG_Special:
-+		switch (FPU_Special(st0_ptr)) {
-+		case TW_Denormal:
-+			if (getsign(st0_ptr) == SIGN_POS)
-+				setcc(0);
-+			else
-+				setcc(SW_C0);
-+			if (denormal_operand() < 0) {
-+#ifdef PECULIAR_486
-+				/* This is weird! */
-+				if (getsign(st0_ptr) == SIGN_POS)
-+					setcc(SW_C3);
- #endif /* PECULIAR_486 */
--	      return;
--	    }
--	  break;
--	case TW_NaN:
--	  setcc(SW_C0|SW_C2|SW_C3);   /* Operand is not comparable */ 
--	  EXCEPTION(EX_Invalid);
--	  break;
--	case TW_Infinity:
--	  if (getsign(st0_ptr) == SIGN_POS)
--	    setcc(0);
--	  else
--	    setcc(SW_C0);
--	  break;
--	default:
--	  setcc(SW_C0|SW_C2|SW_C3);   /* Operand is not comparable */ 
--	  EXCEPTION(EX_INTERNAL|0x14);
--	  break;
-+				return;
-+			}
-+			break;
-+		case TW_NaN:
-+			setcc(SW_C0 | SW_C2 | SW_C3);	/* Operand is not comparable */
-+			EXCEPTION(EX_Invalid);
-+			break;
-+		case TW_Infinity:
-+			if (getsign(st0_ptr) == SIGN_POS)
-+				setcc(0);
-+			else
-+				setcc(SW_C0);
-+			break;
-+		default:
-+			setcc(SW_C0 | SW_C2 | SW_C3);	/* Operand is not comparable */
-+			EXCEPTION(EX_INTERNAL | 0x14);
-+			break;
-+		}
-+		break;
-+	case TAG_Empty:
-+		setcc(SW_C0 | SW_C2 | SW_C3);
-+		EXCEPTION(EX_StackUnder);
-+		break;
+ 	unsigned long mem = find_e820_area(start, end, size);
+ 	void *ptr;
++
+ 	if (mem != -1L)
+ 		return __va(mem);
+ 	ptr = __alloc_bootmem_nopanic(size,
+ 				SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
+ 	if (ptr == NULL) {
+ 		printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
+-			size, nodeid);
++		       size, nodeid);
+ 		return NULL;
  	}
--      break;
--    case TAG_Empty:
--      setcc(SW_C0|SW_C2|SW_C3);
--      EXCEPTION(EX_StackUnder);
--      break;
--    }
+ 	return ptr;
  }
  
--
- static void fxam(FPU_REG *st0_ptr, u_char st0tag)
- {
--  int c = 0;
--  switch (st0tag)
--    {
--    case TAG_Empty:
--      c = SW_C3|SW_C0;
--      break;
--    case TAG_Zero:
--      c = SW_C3;
--      break;
--    case TAG_Valid:
--      c = SW_C2;
--      break;
--    case TAG_Special:
--      switch ( FPU_Special(st0_ptr) )
--	{
--	case TW_Denormal:
--	  c = SW_C2|SW_C3;  /* Denormal */
--	  break;
--	case TW_NaN:
--	  /* We also use NaN for unsupported types. */
--	  if ( (st0_ptr->sigh & 0x80000000) && (exponent(st0_ptr) == EXP_OVER) )
--	    c = SW_C0;
--	  break;
--	case TW_Infinity:
--	  c = SW_C2|SW_C0;
--	  break;
-+	int c = 0;
-+	switch (st0tag) {
-+	case TAG_Empty:
-+		c = SW_C3 | SW_C0;
-+		break;
-+	case TAG_Zero:
-+		c = SW_C3;
-+		break;
-+	case TAG_Valid:
-+		c = SW_C2;
-+		break;
-+	case TAG_Special:
-+		switch (FPU_Special(st0_ptr)) {
-+		case TW_Denormal:
-+			c = SW_C2 | SW_C3;	/* Denormal */
-+			break;
-+		case TW_NaN:
-+			/* We also use NaN for unsupported types. */
-+			if ((st0_ptr->sigh & 0x80000000)
-+			    && (exponent(st0_ptr) == EXP_OVER))
-+				c = SW_C0;
-+			break;
-+		case TW_Infinity:
-+			c = SW_C2 | SW_C0;
-+			break;
-+		}
+ /* Initialize bootmem allocator for a node */
+-void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
+-{ 
+-	unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start; 
+-	unsigned long nodedata_phys;
++void __init setup_node_bootmem(int nodeid, unsigned long start,
++			       unsigned long end)
++{
++	unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size;
++	unsigned long bootmap_start, nodedata_phys;
+ 	void *bootmap;
+ 	const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
+ 
+-	start = round_up(start, ZONE_ALIGN); 
++	start = round_up(start, ZONE_ALIGN);
+ 
+-	printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
++	printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
++	       start, end);
+ 
+ 	start_pfn = start >> PAGE_SHIFT;
+ 	end_pfn = end >> PAGE_SHIFT;
+@@ -200,75 +209,55 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
+ 	NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
+ 
+ 	/* Find a place for the bootmem map */
+-	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 
++	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+ 	bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
+ 	bootmap = early_node_mem(nodeid, bootmap_start, end,
+ 					bootmap_pages<<PAGE_SHIFT);
+ 	if (bootmap == NULL)  {
+ 		if (nodedata_phys < start || nodedata_phys >= end)
+-			free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
++			free_bootmem((unsigned long)node_data[nodeid],
++				     pgdat_size);
+ 		node_data[nodeid] = NULL;
+ 		return;
  	}
--    }
--  if ( getsign(st0_ptr) == SIGN_NEG )
--    c |= SW_C1;
--  setcc(c);
-+	if (getsign(st0_ptr) == SIGN_NEG)
-+		c |= SW_C1;
-+	setcc(c);
- }
+ 	bootmap_start = __pa(bootmap);
+-	Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages); 
+-	
++	Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
++
+ 	bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
+-					 bootmap_start >> PAGE_SHIFT, 
+-					 start_pfn, end_pfn); 
++					 bootmap_start >> PAGE_SHIFT,
++					 start_pfn, end_pfn);
  
--
- static FUNC_ST0 const fp_etc_table[] = {
--  fchs, fabs, (FUNC_ST0)FPU_illegal, (FUNC_ST0)FPU_illegal,
--  ftst_, fxam, (FUNC_ST0)FPU_illegal, (FUNC_ST0)FPU_illegal
-+	fchs, fabs, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal,
-+	ftst_, fxam, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal
- };
+ 	free_bootmem_with_active_regions(nodeid, end);
  
- void FPU_etc(void)
- {
--  (fp_etc_table[FPU_rm])(&st(0), FPU_gettag0());
-+	(fp_etc_table[FPU_rm]) (&st(0), FPU_gettag0());
- }
-diff --git a/arch/x86/math-emu/fpu_proto.h b/arch/x86/math-emu/fpu_proto.h
-index 37a8a7f..aa49b6a 100644
---- a/arch/x86/math-emu/fpu_proto.h
-+++ b/arch/x86/math-emu/fpu_proto.h
-@@ -66,7 +66,7 @@ extern int FPU_Special(FPU_REG const *ptr);
- extern int isNaN(FPU_REG const *ptr);
- extern void FPU_pop(void);
- extern int FPU_empty_i(int stnr);
--extern int FPU_stackoverflow(FPU_REG **st_new_ptr);
-+extern int FPU_stackoverflow(FPU_REG ** st_new_ptr);
- extern void FPU_copy_to_regi(FPU_REG const *r, u_char tag, int stnr);
- extern void FPU_copy_to_reg1(FPU_REG const *r, u_char tag);
- extern void FPU_copy_to_reg0(FPU_REG const *r, u_char tag);
-@@ -75,21 +75,23 @@ extern void FPU_triga(void);
- extern void FPU_trigb(void);
- /* get_address.c */
- extern void __user *FPU_get_address(u_char FPU_modrm, unsigned long *fpu_eip,
--			 struct address *addr, fpu_addr_modes addr_modes);
-+				    struct address *addr,
-+				    fpu_addr_modes addr_modes);
- extern void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
--			    struct address *addr, fpu_addr_modes addr_modes);
-+				       struct address *addr,
-+				       fpu_addr_modes addr_modes);
- /* load_store.c */
- extern int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
--			    void __user *data_address);
-+			  void __user * data_address);
- /* poly_2xm1.c */
--extern int poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result);
-+extern int poly_2xm1(u_char sign, FPU_REG * arg, FPU_REG *result);
- /* poly_atan.c */
--extern void poly_atan(FPU_REG *st0_ptr, u_char st0_tag, FPU_REG *st1_ptr,
-+extern void poly_atan(FPU_REG * st0_ptr, u_char st0_tag, FPU_REG *st1_ptr,
- 		      u_char st1_tag);
- /* poly_l2.c */
- extern void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign);
- extern int poly_l2p1(u_char s0, u_char s1, FPU_REG *r0, FPU_REG *r1,
--		     FPU_REG *d);
-+		     FPU_REG * d);
- /* poly_sin.c */
- extern void poly_sine(FPU_REG *st0_ptr);
- extern void poly_cos(FPU_REG *st0_ptr);
-@@ -117,10 +119,13 @@ extern int FPU_load_int32(long __user *_s, FPU_REG *loaded_data);
- extern int FPU_load_int16(short __user *_s, FPU_REG *loaded_data);
- extern int FPU_load_bcd(u_char __user *s);
- extern int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag,
--			      long double __user *d);
--extern int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat);
--extern int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single);
--extern int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d);
-+			      long double __user * d);
-+extern int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag,
-+			    double __user * dfloat);
-+extern int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag,
-+			    float __user * single);
-+extern int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag,
-+			   long long __user * d);
- extern int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d);
- extern int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d);
- extern int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d);
-@@ -137,4 +142,3 @@ extern int FPU_div(int flags, int regrm, int control_w);
- /* reg_convert.c */
- extern int FPU_to_exp16(FPU_REG const *a, FPU_REG *x);
- #endif /* _FPU_PROTO_H */
+-	reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); 
+-	reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
++	reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
++	reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
++			     bootmap_pages<<PAGE_SHIFT);
+ #ifdef CONFIG_ACPI_NUMA
+ 	srat_reserve_add_area(nodeid);
+ #endif
+ 	node_set_online(nodeid);
+-} 
 -
-diff --git a/arch/x86/math-emu/fpu_tags.c b/arch/x86/math-emu/fpu_tags.c
-index cb436fe..d9c657c 100644
---- a/arch/x86/math-emu/fpu_tags.c
-+++ b/arch/x86/math-emu/fpu_tags.c
-@@ -14,114 +14,102 @@
- #include "fpu_system.h"
- #include "exception.h"
- 
+-/* Initialize final allocator for a zone */
+-void __init setup_node_zones(int nodeid)
+-{ 
+-	unsigned long start_pfn, end_pfn, memmapsize, limit;
 -
- void FPU_pop(void)
- {
--  fpu_tag_word |= 3 << ((top & 7)*2);
--  top++;
-+	fpu_tag_word |= 3 << ((top & 7) * 2);
-+	top++;
- }
- 
+- 	start_pfn = node_start_pfn(nodeid);
+- 	end_pfn = node_end_pfn(nodeid);
 -
- int FPU_gettag0(void)
- {
--  return (fpu_tag_word >> ((top & 7)*2)) & 3;
-+	return (fpu_tag_word >> ((top & 7) * 2)) & 3;
- }
- 
+-	Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
+-		nodeid, start_pfn, end_pfn);
 -
- int FPU_gettagi(int stnr)
- {
--  return (fpu_tag_word >> (((top+stnr) & 7)*2)) & 3;
-+	return (fpu_tag_word >> (((top + stnr) & 7) * 2)) & 3;
- }
+-	/* Try to allocate mem_map at end to not fill up precious <4GB
+-	   memory. */
+-	memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
+-	limit = end_pfn << PAGE_SHIFT;
+-#ifdef CONFIG_FLAT_NODE_MEM_MAP
+-	NODE_DATA(nodeid)->node_mem_map = 
+-		__alloc_bootmem_core(NODE_DATA(nodeid)->bdata, 
+-				memmapsize, SMP_CACHE_BYTES, 
+-				round_down(limit - memmapsize, PAGE_SIZE), 
+-				limit);
+-#endif
+-} 
++}
  
--
- int FPU_gettag(int regnr)
++/*
++ * There are unfortunately some poorly designed mainboards around that
++ * only connect memory to a single CPU. This breaks the 1:1 cpu->node
++ * mapping. To avoid this fill in the mapping for all possible CPUs,
++ * as the number of CPUs is not known yet. We round robin the existing
++ * nodes.
++ */
+ void __init numa_init_array(void)
  {
--  return (fpu_tag_word >> ((regnr & 7)*2)) & 3;
-+	return (fpu_tag_word >> ((regnr & 7) * 2)) & 3;
- }
- 
+ 	int rr, i;
+-	/* There are unfortunately some poorly designed mainboards around
+-	   that only connect memory to a single CPU. This breaks the 1:1 cpu->node
+-	   mapping. To avoid this fill in the mapping for all possible
+-	   CPUs, as the number of CPUs is not known yet. 
+-	   We round robin the existing nodes. */
++
+ 	rr = first_node(node_online_map);
+ 	for (i = 0; i < NR_CPUS; i++) {
+-		if (cpu_to_node(i) != NUMA_NO_NODE)
++		if (early_cpu_to_node(i) != NUMA_NO_NODE)
+ 			continue;
+- 		numa_set_node(i, rr);
++		numa_set_node(i, rr);
+ 		rr = next_node(rr, node_online_map);
+ 		if (rr == MAX_NUMNODES)
+ 			rr = first_node(node_online_map);
+ 	}
 -
- void FPU_settag0(int tag)
- {
--  int regnr = top;
--  regnr &= 7;
--  fpu_tag_word &= ~(3 << (regnr*2));
--  fpu_tag_word |= (tag & 3) << (regnr*2);
-+	int regnr = top;
-+	regnr &= 7;
-+	fpu_tag_word &= ~(3 << (regnr * 2));
-+	fpu_tag_word |= (tag & 3) << (regnr * 2);
  }
  
--
- void FPU_settagi(int stnr, int tag)
- {
--  int regnr = stnr+top;
--  regnr &= 7;
--  fpu_tag_word &= ~(3 << (regnr*2));
--  fpu_tag_word |= (tag & 3) << (regnr*2);
-+	int regnr = stnr + top;
-+	regnr &= 7;
-+	fpu_tag_word &= ~(3 << (regnr * 2));
-+	fpu_tag_word |= (tag & 3) << (regnr * 2);
- }
+ #ifdef CONFIG_NUMA_EMU
+@@ -276,15 +265,17 @@ void __init numa_init_array(void)
+ char *cmdline __initdata;
  
--
- void FPU_settag(int regnr, int tag)
+ /*
+- * Setups up nid to range from addr to addr + size.  If the end boundary is
+- * greater than max_addr, then max_addr is used instead.  The return value is 0
+- * if there is additional memory left for allocation past addr and -1 otherwise.
+- * addr is adjusted to be at the end of the node.
++ * Setups up nid to range from addr to addr + size.  If the end
++ * boundary is greater than max_addr, then max_addr is used instead.
++ * The return value is 0 if there is additional memory left for
++ * allocation past addr and -1 otherwise.  addr is adjusted to be at
++ * the end of the node.
+  */
+ static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
+ 				   u64 size, u64 max_addr)
  {
--  regnr &= 7;
--  fpu_tag_word &= ~(3 << (regnr*2));
--  fpu_tag_word |= (tag & 3) << (regnr*2);
-+	regnr &= 7;
-+	fpu_tag_word &= ~(3 << (regnr * 2));
-+	fpu_tag_word |= (tag & 3) << (regnr * 2);
- }
+ 	int ret = 0;
++
+ 	nodes[nid].start = *addr;
+ 	*addr += size;
+ 	if (*addr >= max_addr) {
+@@ -335,6 +326,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
  
--
- int FPU_Special(FPU_REG const *ptr)
+ 	for (i = node_start; i < num_nodes + node_start; i++) {
+ 		u64 end = *addr + size;
++
+ 		if (i < big)
+ 			end += FAKE_NODE_MIN_SIZE;
+ 		/*
+@@ -380,14 +372,9 @@ static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
+ static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
  {
--  int exp = exponent(ptr);
--
--  if ( exp == EXP_BIAS+EXP_UNDER )
--    return TW_Denormal;
--  else if ( exp != EXP_BIAS+EXP_OVER )
--    return TW_NaN;
--  else if ( (ptr->sigh == 0x80000000) && (ptr->sigl == 0) )
--    return TW_Infinity;
--  return TW_NaN;
-+	int exp = exponent(ptr);
+ 	struct bootnode nodes[MAX_NUMNODES];
+-	u64 addr = start_pfn << PAGE_SHIFT;
++	u64 size, addr = start_pfn << PAGE_SHIFT;
+ 	u64 max_addr = end_pfn << PAGE_SHIFT;
+-	int num_nodes = 0;
+-	int coeff_flag;
+-	int coeff = -1;
+-	int num = 0;
+-	u64 size;
+-	int i;
++	int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
+ 
+ 	memset(&nodes, 0, sizeof(nodes));
+ 	/*
+@@ -395,8 +382,9 @@ static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
+ 	 * system RAM into N fake nodes.
+ 	 */
+ 	if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
+-		num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0,
+-						simple_strtol(cmdline, NULL, 0));
++		long n = simple_strtol(cmdline, NULL, 0);
 +
-+	if (exp == EXP_BIAS + EXP_UNDER)
-+		return TW_Denormal;
-+	else if (exp != EXP_BIAS + EXP_OVER)
-+		return TW_NaN;
-+	else if ((ptr->sigh == 0x80000000) && (ptr->sigl == 0))
-+		return TW_Infinity;
-+	return TW_NaN;
++		num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n);
+ 		if (num_nodes < 0)
+ 			return num_nodes;
+ 		goto out;
+@@ -483,46 +471,47 @@ out:
+ 	for_each_node_mask(i, node_possible_map) {
+ 		e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
+ 						nodes[i].end >> PAGE_SHIFT);
+- 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
++		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
+ 	}
+ 	acpi_fake_nodes(nodes, num_nodes);
+- 	numa_init_array();
+- 	return 0;
++	numa_init_array();
++	return 0;
  }
+ #endif /* CONFIG_NUMA_EMU */
  
--
- int isNaN(FPU_REG const *ptr)
- {
--  return ( (exponent(ptr) == EXP_BIAS+EXP_OVER)
--	   && !((ptr->sigh == 0x80000000) && (ptr->sigl == 0)) );
-+	return ((exponent(ptr) == EXP_BIAS + EXP_OVER)
-+		&& !((ptr->sigh == 0x80000000) && (ptr->sigl == 0)));
- }
+ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
+-{ 
++{
+ 	int i;
  
--
- int FPU_empty_i(int stnr)
- {
--  int regnr = (top+stnr) & 7;
-+	int regnr = (top + stnr) & 7;
+ 	nodes_clear(node_possible_map);
  
--  return ((fpu_tag_word >> (regnr*2)) & 3) == TAG_Empty;
-+	return ((fpu_tag_word >> (regnr * 2)) & 3) == TAG_Empty;
- }
+ #ifdef CONFIG_NUMA_EMU
+ 	if (cmdline && !numa_emulation(start_pfn, end_pfn))
+- 		return;
++		return;
+ 	nodes_clear(node_possible_map);
+ #endif
  
--
--int FPU_stackoverflow(FPU_REG **st_new_ptr)
-+int FPU_stackoverflow(FPU_REG ** st_new_ptr)
- {
--  *st_new_ptr = &st(-1);
-+	*st_new_ptr = &st(-1);
+ #ifdef CONFIG_ACPI_NUMA
+ 	if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
+ 					  end_pfn << PAGE_SHIFT))
+- 		return;
++		return;
+ 	nodes_clear(node_possible_map);
+ #endif
  
--  return ((fpu_tag_word >> (((top - 1) & 7)*2)) & 3) != TAG_Empty;
-+	return ((fpu_tag_word >> (((top - 1) & 7) * 2)) & 3) != TAG_Empty;
- }
+ #ifdef CONFIG_K8_NUMA
+-	if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
++	if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT,
++					end_pfn<<PAGE_SHIFT))
+ 		return;
+ 	nodes_clear(node_possible_map);
+ #endif
+ 	printk(KERN_INFO "%s\n",
+ 	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
  
--
- void FPU_copy_to_regi(FPU_REG const *r, u_char tag, int stnr)
- {
--  reg_copy(r, &st(stnr));
--  FPU_settagi(stnr, tag);
-+	reg_copy(r, &st(stnr));
-+	FPU_settagi(stnr, tag);
+-	printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 
++	printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
+ 	       start_pfn << PAGE_SHIFT,
+-	       end_pfn << PAGE_SHIFT); 
+-		/* setup dummy node covering all memory */ 
+-	memnode_shift = 63; 
++	       end_pfn << PAGE_SHIFT);
++	/* setup dummy node covering all memory */
++	memnode_shift = 63;
+ 	memnodemap = memnode.embedded_map;
+ 	memnodemap[0] = 0;
+ 	nodes_clear(node_online_map);
+@@ -530,36 +519,48 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
+ 	node_set(0, node_possible_map);
+ 	for (i = 0; i < NR_CPUS; i++)
+ 		numa_set_node(i, 0);
+-	node_to_cpumask[0] = cpumask_of_cpu(0);
++	/* cpumask_of_cpu() may not be available during early startup */
++	memset(&node_to_cpumask_map[0], 0, sizeof(node_to_cpumask_map[0]));
++	cpu_set(0, node_to_cpumask_map[0]);
+ 	e820_register_active_regions(0, start_pfn, end_pfn);
+ 	setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
  }
  
- void FPU_copy_to_reg1(FPU_REG const *r, u_char tag)
+ __cpuinit void numa_add_cpu(int cpu)
  {
--  reg_copy(r, &st(1));
--  FPU_settagi(1, tag);
-+	reg_copy(r, &st(1));
-+	FPU_settagi(1, tag);
- }
+-	set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
+-} 
++	set_bit(cpu,
++		(unsigned long *)&node_to_cpumask_map[early_cpu_to_node(cpu)]);
++}
  
- void FPU_copy_to_reg0(FPU_REG const *r, u_char tag)
+ void __cpuinit numa_set_node(int cpu, int node)
  {
--  int regnr = top;
--  regnr &= 7;
-+	int regnr = top;
-+	regnr &= 7;
- 
--  reg_copy(r, &st(0));
-+	reg_copy(r, &st(0));
- 
--  fpu_tag_word &= ~(3 << (regnr*2));
--  fpu_tag_word |= (tag & 3) << (regnr*2);
-+	fpu_tag_word &= ~(3 << (regnr * 2));
-+	fpu_tag_word |= (tag & 3) << (regnr * 2);
++	int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
++
+ 	cpu_pda(cpu)->nodenumber = node;
+-	cpu_to_node(cpu) = node;
++
++	if(cpu_to_node_map)
++		cpu_to_node_map[cpu] = node;
++	else if(per_cpu_offset(cpu))
++		per_cpu(x86_cpu_to_node_map, cpu) = node;
++	else
++		Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
  }
-diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
-index 403cbde..ecd0668 100644
---- a/arch/x86/math-emu/fpu_trig.c
-+++ b/arch/x86/math-emu/fpu_trig.c
-@@ -15,11 +15,10 @@
- #include "fpu_emu.h"
- #include "status_w.h"
- #include "control_w.h"
--#include "reg_constant.h"	
-+#include "reg_constant.h"
  
- static void rem_kernel(unsigned long long st0, unsigned long long *y,
--		       unsigned long long st1,
--		       unsigned long long q, int n);
-+		       unsigned long long st1, unsigned long long q, int n);
+-unsigned long __init numa_free_all_bootmem(void) 
+-{ 
+-	int i;
++unsigned long __init numa_free_all_bootmem(void)
++{
+ 	unsigned long pages = 0;
+-	for_each_online_node(i) {
++	int i;
++
++	for_each_online_node(i)
+ 		pages += free_all_bootmem_node(NODE_DATA(i));
+-	}
++
+ 	return pages;
+-} 
++}
  
- #define BETTER_THAN_486
+ void __init paging_init(void)
+-{ 
+-	int i;
++{
+ 	unsigned long max_zone_pfns[MAX_NR_ZONES];
++
+ 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+ 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+@@ -568,32 +569,27 @@ void __init paging_init(void)
+ 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ 	sparse_init();
  
-@@ -33,788 +32,706 @@ static void rem_kernel(unsigned long long st0, unsigned long long *y,
-    precision of the result sometimes degrades to about 63.9 bits */
- static int trig_arg(FPU_REG *st0_ptr, int even)
- {
--  FPU_REG tmp;
--  u_char tmptag;
--  unsigned long long q;
--  int old_cw = control_word, saved_status = partial_status;
--  int tag, st0_tag = TAG_Valid;
--
--  if ( exponent(st0_ptr) >= 63 )
--    {
--      partial_status |= SW_C2;     /* Reduction incomplete. */
--      return -1;
--    }
--
--  control_word &= ~CW_RC;
--  control_word |= RC_CHOP;
--
--  setpositive(st0_ptr);
--  tag = FPU_u_div(st0_ptr, &CONST_PI2, &tmp, PR_64_BITS | RC_CHOP | 0x3f,
--		  SIGN_POS);
+-	for_each_online_node(i) {
+-		setup_node_zones(i); 
+-	}
 -
--  FPU_round_to_int(&tmp, tag);  /* Fortunately, this can't overflow
--				   to 2^64 */
--  q = significand(&tmp);
--  if ( q )
--    {
--      rem_kernel(significand(st0_ptr),
--		 &significand(&tmp),
--		 significand(&CONST_PI2),
--		 q, exponent(st0_ptr) - exponent(&CONST_PI2));
--      setexponent16(&tmp, exponent(&CONST_PI2));
--      st0_tag = FPU_normalize(&tmp);
--      FPU_copy_to_reg0(&tmp, st0_tag);
--    }
+ 	free_area_init_nodes(max_zone_pfns);
+-} 
++}
+ 
+ static __init int numa_setup(char *opt)
+-{ 
++{
+ 	if (!opt)
+ 		return -EINVAL;
+-	if (!strncmp(opt,"off",3))
++	if (!strncmp(opt, "off", 3))
+ 		numa_off = 1;
+ #ifdef CONFIG_NUMA_EMU
+ 	if (!strncmp(opt, "fake=", 5))
+ 		cmdline = opt + 5;
+ #endif
+ #ifdef CONFIG_ACPI_NUMA
+- 	if (!strncmp(opt,"noacpi",6))
+- 		acpi_numa = -1;
+-	if (!strncmp(opt,"hotadd=", 7))
++	if (!strncmp(opt, "noacpi", 6))
++		acpi_numa = -1;
++	if (!strncmp(opt, "hotadd=", 7))
+ 		hotadd_percent = simple_strtoul(opt+7, NULL, 10);
+ #endif
+ 	return 0;
+-} 
 -
--  if ( (even && !(q & 1)) || (!even && (q & 1)) )
--    {
--      st0_tag = FPU_sub(REV|LOADED|TAG_Valid, (int)&CONST_PI2, FULL_PRECISION);
-+	FPU_REG tmp;
-+	u_char tmptag;
-+	unsigned long long q;
-+	int old_cw = control_word, saved_status = partial_status;
-+	int tag, st0_tag = TAG_Valid;
++}
+ early_param("numa", numa_setup);
+ 
+ /*
+@@ -611,38 +607,16 @@ early_param("numa", numa_setup);
+ void __init init_cpu_to_node(void)
+ {
+ 	int i;
+- 	for (i = 0; i < NR_CPUS; i++) {
+-		u8 apicid = x86_cpu_to_apicid_init[i];
 +
-+	if (exponent(st0_ptr) >= 63) {
-+		partial_status |= SW_C2;	/* Reduction incomplete. */
-+		return -1;
-+	}
++	for (i = 0; i < NR_CPUS; i++) {
++		u16 apicid = x86_cpu_to_apicid_init[i];
++
+ 		if (apicid == BAD_APICID)
+ 			continue;
+ 		if (apicid_to_node[apicid] == NUMA_NO_NODE)
+ 			continue;
+-		numa_set_node(i,apicid_to_node[apicid]);
++		numa_set_node(i, apicid_to_node[apicid]);
+ 	}
+ }
  
--#ifdef BETTER_THAN_486
--      /* So far, the results are exact but based upon a 64 bit
--	 precision approximation to pi/2. The technique used
--	 now is equivalent to using an approximation to pi/2 which
--	 is accurate to about 128 bits. */
--      if ( (exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64) || (q > 1) )
--	{
--	  /* This code gives the effect of having pi/2 to better than
--	     128 bits precision. */
+-EXPORT_SYMBOL(cpu_to_node);
+-EXPORT_SYMBOL(node_to_cpumask);
+-EXPORT_SYMBOL(memnode);
+-EXPORT_SYMBOL(node_data);
 -
--	  significand(&tmp) = q + 1;
--	  setexponent16(&tmp, 63);
--	  FPU_normalize(&tmp);
--	  tmptag =
--	    FPU_u_mul(&CONST_PI2extra, &tmp, &tmp, FULL_PRECISION, SIGN_POS,
--		      exponent(&CONST_PI2extra) + exponent(&tmp));
--	  setsign(&tmp, getsign(&CONST_PI2extra));
--	  st0_tag = FPU_add(&tmp, tmptag, 0, FULL_PRECISION);
--	  if ( signnegative(st0_ptr) )
--	    {
--	      /* CONST_PI2extra is negative, so the result of the addition
--		 can be negative. This means that the argument is actually
--		 in a different quadrant. The correction is always < pi/2,
--		 so it can't overflow into yet another quadrant. */
--	      setpositive(st0_ptr);
--	      q++;
--	    }
-+	control_word &= ~CW_RC;
-+	control_word |= RC_CHOP;
+-#ifdef CONFIG_DISCONTIGMEM
+-/*
+- * Functions to convert PFNs from/to per node page addresses.
+- * These are out of line because they are quite big.
+- * They could be all tuned by pre caching more state.
+- * Should do that.
+- */
+ 
+-int pfn_valid(unsigned long pfn)
+-{
+-	unsigned nid;
+-	if (pfn >= num_physpages)
+-		return 0;
+-	nid = pfn_to_nid(pfn);
+-	if (nid == 0xff)
+-		return 0;
+-	return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
+-}
+-EXPORT_SYMBOL(pfn_valid);
+-#endif
+diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
+new file mode 100644
+index 0000000..06353d4
+--- /dev/null
++++ b/arch/x86/mm/pageattr-test.c
+@@ -0,0 +1,224 @@
++/*
++ * self test for change_page_attr.
++ *
++ * Clears the global bit on random pages in the direct mapping, then reverts
++ * and compares page tables forwards and afterwards.
++ */
++#include <linux/bootmem.h>
++#include <linux/random.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/mm.h>
 +
-+	setpositive(st0_ptr);
-+	tag = FPU_u_div(st0_ptr, &CONST_PI2, &tmp, PR_64_BITS | RC_CHOP | 0x3f,
-+			SIGN_POS);
++#include <asm/cacheflush.h>
++#include <asm/pgtable.h>
++#include <asm/kdebug.h>
 +
-+	FPU_round_to_int(&tmp, tag);	/* Fortunately, this can't overflow
-+					   to 2^64 */
-+	q = significand(&tmp);
-+	if (q) {
-+		rem_kernel(significand(st0_ptr),
-+			   &significand(&tmp),
-+			   significand(&CONST_PI2),
-+			   q, exponent(st0_ptr) - exponent(&CONST_PI2));
-+		setexponent16(&tmp, exponent(&CONST_PI2));
-+		st0_tag = FPU_normalize(&tmp);
-+		FPU_copy_to_reg0(&tmp, st0_tag);
- 	}
++enum {
++	NTEST			= 4000,
++#ifdef CONFIG_X86_64
++	LPS			= (1 << PMD_SHIFT),
++#elif defined(CONFIG_X86_PAE)
++	LPS			= (1 << PMD_SHIFT),
++#else
++	LPS			= (1 << 22),
++#endif
++	GPS			= (1<<30)
++};
 +
-+	if ((even && !(q & 1)) || (!even && (q & 1))) {
-+		st0_tag =
-+		    FPU_sub(REV | LOADED | TAG_Valid, (int)&CONST_PI2,
-+			    FULL_PRECISION);
++struct split_state {
++	long lpg, gpg, spg, exec;
++	long min_exec, max_exec;
++};
 +
-+#ifdef BETTER_THAN_486
-+		/* So far, the results are exact but based upon a 64 bit
-+		   precision approximation to pi/2. The technique used
-+		   now is equivalent to using an approximation to pi/2 which
-+		   is accurate to about 128 bits. */
-+		if ((exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64)
-+		    || (q > 1)) {
-+			/* This code gives the effect of having pi/2 to better than
-+			   128 bits precision. */
++static __init int print_split(struct split_state *s)
++{
++	long i, expected, missed = 0;
++	int printed = 0;
++	int err = 0;
 +
-+			significand(&tmp) = q + 1;
-+			setexponent16(&tmp, 63);
-+			FPU_normalize(&tmp);
-+			tmptag =
-+			    FPU_u_mul(&CONST_PI2extra, &tmp, &tmp,
-+				      FULL_PRECISION, SIGN_POS,
-+				      exponent(&CONST_PI2extra) +
-+				      exponent(&tmp));
-+			setsign(&tmp, getsign(&CONST_PI2extra));
-+			st0_tag = FPU_add(&tmp, tmptag, 0, FULL_PRECISION);
-+			if (signnegative(st0_ptr)) {
-+				/* CONST_PI2extra is negative, so the result of the addition
-+				   can be negative. This means that the argument is actually
-+				   in a different quadrant. The correction is always < pi/2,
-+				   so it can't overflow into yet another quadrant. */
-+				setpositive(st0_ptr);
-+				q++;
++	s->lpg = s->gpg = s->spg = s->exec = 0;
++	s->min_exec = ~0UL;
++	s->max_exec = 0;
++	for (i = 0; i < max_pfn_mapped; ) {
++		unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
++		int level;
++		pte_t *pte;
++
++		pte = lookup_address(addr, &level);
++		if (!pte) {
++			if (!printed) {
++				dump_pagetable(addr);
++				printk(KERN_INFO "CPA %lx no pte level %d\n",
++					addr, level);
++				printed = 1;
 +			}
++			missed++;
++			i++;
++			continue;
 +		}
- #endif /* BETTER_THAN_486 */
--    }
-+	}
- #ifdef BETTER_THAN_486
--  else
--    {
--      /* So far, the results are exact but based upon a 64 bit
--	 precision approximation to pi/2. The technique used
--	 now is equivalent to using an approximation to pi/2 which
--	 is accurate to about 128 bits. */
--      if ( ((q > 0) && (exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64))
--	   || (q > 1) )
--	{
--	  /* This code gives the effect of having p/2 to better than
--	     128 bits precision. */
--
--	  significand(&tmp) = q;
--	  setexponent16(&tmp, 63);
--	  FPU_normalize(&tmp);         /* This must return TAG_Valid */
--	  tmptag = FPU_u_mul(&CONST_PI2extra, &tmp, &tmp, FULL_PRECISION,
--			     SIGN_POS,
--			     exponent(&CONST_PI2extra) + exponent(&tmp));
--	  setsign(&tmp, getsign(&CONST_PI2extra));
--	  st0_tag = FPU_sub(LOADED|(tmptag & 0x0f), (int)&tmp,
--			    FULL_PRECISION);
--	  if ( (exponent(st0_ptr) == exponent(&CONST_PI2)) &&
--	      ((st0_ptr->sigh > CONST_PI2.sigh)
--	       || ((st0_ptr->sigh == CONST_PI2.sigh)
--		   && (st0_ptr->sigl > CONST_PI2.sigl))) )
--	    {
--	      /* CONST_PI2extra is negative, so the result of the
--		 subtraction can be larger than pi/2. This means
--		 that the argument is actually in a different quadrant.
--		 The correction is always < pi/2, so it can't overflow
--		 into yet another quadrant. */
--	      st0_tag = FPU_sub(REV|LOADED|TAG_Valid, (int)&CONST_PI2,
--				FULL_PRECISION);
--	      q++;
--	    }
-+	else {
-+		/* So far, the results are exact but based upon a 64 bit
-+		   precision approximation to pi/2. The technique used
-+		   now is equivalent to using an approximation to pi/2 which
-+		   is accurate to about 128 bits. */
-+		if (((q > 0)
-+		     && (exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64))
-+		    || (q > 1)) {
-+			/* This code gives the effect of having p/2 to better than
-+			   128 bits precision. */
 +
-+			significand(&tmp) = q;
-+			setexponent16(&tmp, 63);
-+			FPU_normalize(&tmp);	/* This must return TAG_Valid */
-+			tmptag =
-+			    FPU_u_mul(&CONST_PI2extra, &tmp, &tmp,
-+				      FULL_PRECISION, SIGN_POS,
-+				      exponent(&CONST_PI2extra) +
-+				      exponent(&tmp));
-+			setsign(&tmp, getsign(&CONST_PI2extra));
-+			st0_tag = FPU_sub(LOADED | (tmptag & 0x0f), (int)&tmp,
-+					  FULL_PRECISION);
-+			if ((exponent(st0_ptr) == exponent(&CONST_PI2)) &&
-+			    ((st0_ptr->sigh > CONST_PI2.sigh)
-+			     || ((st0_ptr->sigh == CONST_PI2.sigh)
-+				 && (st0_ptr->sigl > CONST_PI2.sigl)))) {
-+				/* CONST_PI2extra is negative, so the result of the
-+				   subtraction can be larger than pi/2. This means
-+				   that the argument is actually in a different quadrant.
-+				   The correction is always < pi/2, so it can't overflow
-+				   into yet another quadrant. */
-+				st0_tag =
-+				    FPU_sub(REV | LOADED | TAG_Valid,
-+					    (int)&CONST_PI2, FULL_PRECISION);
-+				q++;
++		if (level == PG_LEVEL_1G && sizeof(long) == 8) {
++			s->gpg++;
++			i += GPS/PAGE_SIZE;
++		} else if (level == PG_LEVEL_2M) {
++			if (!(pte_val(*pte) & _PAGE_PSE)) {
++				printk(KERN_ERR
++					"%lx level %d but not PSE %Lx\n",
++					addr, level, (u64)pte_val(*pte));
++				err = 1;
 +			}
++			s->lpg++;
++			i += LPS/PAGE_SIZE;
++		} else {
++			s->spg++;
++			i++;
++		}
++		if (!(pte_val(*pte) & _PAGE_NX)) {
++			s->exec++;
++			if (addr < s->min_exec)
++				s->min_exec = addr;
++			if (addr > s->max_exec)
++				s->max_exec = addr;
 +		}
- 	}
--    }
- #endif /* BETTER_THAN_486 */
- 
--  FPU_settag0(st0_tag);
--  control_word = old_cw;
--  partial_status = saved_status & ~SW_C2;     /* Reduction complete. */
-+	FPU_settag0(st0_tag);
-+	control_word = old_cw;
-+	partial_status = saved_status & ~SW_C2;	/* Reduction complete. */
- 
--  return (q & 3) | even;
-+	return (q & 3) | even;
- }
- 
--
- /* Convert a long to register */
- static void convert_l2reg(long const *arg, int deststnr)
- {
--  int tag;
--  long num = *arg;
--  u_char sign;
--  FPU_REG *dest = &st(deststnr);
--
--  if (num == 0)
--    {
--      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
--      return;
--    }
--
--  if (num > 0)
--    { sign = SIGN_POS; }
--  else
--    { num = -num; sign = SIGN_NEG; }
--
--  dest->sigh = num;
--  dest->sigl = 0;
--  setexponent16(dest, 31);
--  tag = FPU_normalize(dest);
--  FPU_settagi(deststnr, tag);
--  setsign(dest, sign);
--  return;
--}
-+	int tag;
-+	long num = *arg;
-+	u_char sign;
-+	FPU_REG *dest = &st(deststnr);
- 
-+	if (num == 0) {
-+		FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
-+		return;
 +	}
++	printk(KERN_INFO
++		"CPA mapping 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
++		s->spg, s->lpg, s->gpg, s->exec,
++		s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed);
 +
-+	if (num > 0) {
-+		sign = SIGN_POS;
-+	} else {
-+		num = -num;
-+		sign = SIGN_NEG;
++	expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
++	if (expected != i) {
++		printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n",
++			max_pfn_mapped, expected);
++		return 1;
 +	}
-+
-+	dest->sigh = num;
-+	dest->sigl = 0;
-+	setexponent16(dest, 31);
-+	tag = FPU_normalize(dest);
-+	FPU_settagi(deststnr, tag);
-+	setsign(dest, sign);
-+	return;
++	return err;
 +}
- 
- static void single_arg_error(FPU_REG *st0_ptr, u_char st0_tag)
- {
--  if ( st0_tag == TAG_Empty )
--    FPU_stack_underflow();  /* Puts a QNaN in st(0) */
--  else if ( st0_tag == TW_NaN )
--    real_1op_NaN(st0_ptr);       /* return with a NaN in st(0) */
-+	if (st0_tag == TAG_Empty)
-+		FPU_stack_underflow();	/* Puts a QNaN in st(0) */
-+	else if (st0_tag == TW_NaN)
-+		real_1op_NaN(st0_ptr);	/* return with a NaN in st(0) */
- #ifdef PARANOID
--  else
--    EXCEPTION(EX_INTERNAL|0x0112);
-+	else
-+		EXCEPTION(EX_INTERNAL | 0x0112);
- #endif /* PARANOID */
- }
- 
--
- static void single_arg_2_error(FPU_REG *st0_ptr, u_char st0_tag)
- {
--  int isNaN;
--
--  switch ( st0_tag )
--    {
--    case TW_NaN:
--      isNaN = (exponent(st0_ptr) == EXP_OVER) && (st0_ptr->sigh & 0x80000000);
--      if ( isNaN && !(st0_ptr->sigh & 0x40000000) )   /* Signaling ? */
--	{
--	  EXCEPTION(EX_Invalid);
--	  if ( control_word & CW_Invalid )
--	    {
--	      /* The masked response */
--	      /* Convert to a QNaN */
--	      st0_ptr->sigh |= 0x40000000;
--	      push();
--	      FPU_copy_to_reg0(st0_ptr, TAG_Special);
--	    }
--	}
--      else if ( isNaN )
--	{
--	  /* A QNaN */
--	  push();
--	  FPU_copy_to_reg0(st0_ptr, TAG_Special);
--	}
--      else
--	{
--	  /* pseudoNaN or other unsupported */
--	  EXCEPTION(EX_Invalid);
--	  if ( control_word & CW_Invalid )
--	    {
--	      /* The masked response */
--	      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
--	      push();
--	      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
--	    }
--	}
--      break;              /* return with a NaN in st(0) */
-+	int isNaN;
 +
-+	switch (st0_tag) {
-+	case TW_NaN:
-+		isNaN = (exponent(st0_ptr) == EXP_OVER)
-+		    && (st0_ptr->sigh & 0x80000000);
-+		if (isNaN && !(st0_ptr->sigh & 0x40000000)) {	/* Signaling ? */
-+			EXCEPTION(EX_Invalid);
-+			if (control_word & CW_Invalid) {
-+				/* The masked response */
-+				/* Convert to a QNaN */
-+				st0_ptr->sigh |= 0x40000000;
-+				push();
-+				FPU_copy_to_reg0(st0_ptr, TAG_Special);
++static unsigned long __initdata addr[NTEST];
++static unsigned int __initdata len[NTEST];
++
++/* Change the global bit on random pages in the direct mapping */
++static __init int exercise_pageattr(void)
++{
++	struct split_state sa, sb, sc;
++	unsigned long *bm;
++	pte_t *pte, pte0;
++	int failed = 0;
++	int level;
++	int i, k;
++	int err;
++
++	printk(KERN_INFO "CPA exercising pageattr\n");
++
++	bm = vmalloc((max_pfn_mapped + 7) / 8);
++	if (!bm) {
++		printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
++		return -ENOMEM;
++	}
++	memset(bm, 0, (max_pfn_mapped + 7) / 8);
++
++	failed += print_split(&sa);
++	srandom32(100);
++
++	for (i = 0; i < NTEST; i++) {
++		unsigned long pfn = random32() % max_pfn_mapped;
++
++		addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
++		len[i] = random32() % 100;
++		len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
++
++		if (len[i] == 0)
++			len[i] = 1;
++
++		pte = NULL;
++		pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */
++
++		for (k = 0; k < len[i]; k++) {
++			pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
++			if (!pte || pgprot_val(pte_pgprot(*pte)) == 0) {
++				addr[i] = 0;
++				break;
 +			}
-+		} else if (isNaN) {
-+			/* A QNaN */
-+			push();
-+			FPU_copy_to_reg0(st0_ptr, TAG_Special);
-+		} else {
-+			/* pseudoNaN or other unsupported */
-+			EXCEPTION(EX_Invalid);
-+			if (control_word & CW_Invalid) {
-+				/* The masked response */
-+				FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
-+				push();
-+				FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
++			if (k == 0) {
++				pte0 = *pte;
++			} else {
++				if (pgprot_val(pte_pgprot(*pte)) !=
++					pgprot_val(pte_pgprot(pte0))) {
++					len[i] = k;
++					break;
++				}
 +			}
++			if (test_bit(pfn + k, bm)) {
++				len[i] = k;
++				break;
++			}
++			__set_bit(pfn + k, bm);
 +		}
-+		break;		/* return with a NaN in st(0) */
- #ifdef PARANOID
--    default:
--      EXCEPTION(EX_INTERNAL|0x0112);
-+	default:
-+		EXCEPTION(EX_INTERNAL | 0x0112);
- #endif /* PARANOID */
--    }
-+	}
- }
- 
--
- /*---------------------------------------------------------------------------*/
- 
- static void f2xm1(FPU_REG *st0_ptr, u_char tag)
- {
--  FPU_REG a;
-+	FPU_REG a;
- 
--  clear_C1();
-+	clear_C1();
- 
--  if ( tag == TAG_Valid )
--    {
--      /* For an 80486 FPU, the result is undefined if the arg is >= 1.0 */
--      if ( exponent(st0_ptr) < 0 )
--	{
--	denormal_arg:
-+	if (tag == TAG_Valid) {
-+		/* For an 80486 FPU, the result is undefined if the arg is >= 1.0 */
-+		if (exponent(st0_ptr) < 0) {
-+		      denormal_arg:
- 
--	  FPU_to_exp16(st0_ptr, &a);
-+			FPU_to_exp16(st0_ptr, &a);
- 
--	  /* poly_2xm1(x) requires 0 < st(0) < 1. */
--	  poly_2xm1(getsign(st0_ptr), &a, st0_ptr);
-+			/* poly_2xm1(x) requires 0 < st(0) < 1. */
-+			poly_2xm1(getsign(st0_ptr), &a, st0_ptr);
-+		}
-+		set_precision_flag_up();	/* 80486 appears to always do this */
-+		return;
- 	}
--      set_precision_flag_up();   /* 80486 appears to always do this */
--      return;
--    }
- 
--  if ( tag == TAG_Zero )
--    return;
-+	if (tag == TAG_Zero)
-+		return;
- 
--  if ( tag == TAG_Special )
--    tag = FPU_Special(st0_ptr);
-+	if (tag == TAG_Special)
-+		tag = FPU_Special(st0_ptr);
- 
--  switch ( tag )
--    {
--    case TW_Denormal:
--      if ( denormal_operand() < 0 )
--	return;
--      goto denormal_arg;
--    case TW_Infinity:
--      if ( signnegative(st0_ptr) )
--	{
--	  /* -infinity gives -1 (p16-10) */
--	  FPU_copy_to_reg0(&CONST_1, TAG_Valid);
--	  setnegative(st0_ptr);
-+	switch (tag) {
-+	case TW_Denormal:
-+		if (denormal_operand() < 0)
-+			return;
-+		goto denormal_arg;
-+	case TW_Infinity:
-+		if (signnegative(st0_ptr)) {
-+			/* -infinity gives -1 (p16-10) */
-+			FPU_copy_to_reg0(&CONST_1, TAG_Valid);
-+			setnegative(st0_ptr);
++		if (!addr[i] || !pte || !k) {
++			addr[i] = 0;
++			continue;
 +		}
-+		return;
-+	default:
-+		single_arg_error(st0_ptr, tag);
- 	}
--      return;
--    default:
--      single_arg_error(st0_ptr, tag);
--    }
- }
- 
--
- static void fptan(FPU_REG *st0_ptr, u_char st0_tag)
- {
--  FPU_REG *st_new_ptr;
--  int q;
--  u_char arg_sign = getsign(st0_ptr);
--
--  /* Stack underflow has higher priority */
--  if ( st0_tag == TAG_Empty )
--    {
--      FPU_stack_underflow();  /* Puts a QNaN in st(0) */
--      if ( control_word & CW_Invalid )
--	{
--	  st_new_ptr = &st(-1);
--	  push();
--	  FPU_stack_underflow();  /* Puts a QNaN in the new st(0) */
-+	FPU_REG *st_new_ptr;
-+	int q;
-+	u_char arg_sign = getsign(st0_ptr);
 +
-+	/* Stack underflow has higher priority */
-+	if (st0_tag == TAG_Empty) {
-+		FPU_stack_underflow();	/* Puts a QNaN in st(0) */
-+		if (control_word & CW_Invalid) {
-+			st_new_ptr = &st(-1);
-+			push();
-+			FPU_stack_underflow();	/* Puts a QNaN in the new st(0) */
++		err = change_page_attr_clear(addr[i], len[i],
++					       __pgprot(_PAGE_GLOBAL));
++		if (err < 0) {
++			printk(KERN_ERR "CPA %d failed %d\n", i, err);
++			failed++;
 +		}
-+		return;
- 	}
--      return;
--    }
--
--  if ( STACK_OVERFLOW )
--    { FPU_stack_overflow(); return; }
--
--  if ( st0_tag == TAG_Valid )
--    {
--      if ( exponent(st0_ptr) > -40 )
--	{
--	  if ( (q = trig_arg(st0_ptr, 0)) == -1 )
--	    {
--	      /* Operand is out of range */
--	      return;
--	    }
--
--	  poly_tan(st0_ptr);
--	  setsign(st0_ptr, (q & 1) ^ (arg_sign != 0));
--	  set_precision_flag_up();  /* We do not really know if up or down */
 +
-+	if (STACK_OVERFLOW) {
-+		FPU_stack_overflow();
-+		return;
- 	}
--      else
--	{
--	  /* For a small arg, the result == the argument */
--	  /* Underflow may happen */
- 
--	denormal_arg:
-+	if (st0_tag == TAG_Valid) {
-+		if (exponent(st0_ptr) > -40) {
-+			if ((q = trig_arg(st0_ptr, 0)) == -1) {
-+				/* Operand is out of range */
-+				return;
-+			}
++		pte = lookup_address(addr[i], &level);
++		if (!pte || pte_global(*pte) || pte_huge(*pte)) {
++			printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i],
++				pte ? (u64)pte_val(*pte) : 0ULL);
++			failed++;
++		}
++		if (level != PG_LEVEL_4K) {
++			printk(KERN_ERR "CPA %lx: unexpected level %d\n",
++				addr[i], level);
++			failed++;
++		}
 +
-+			poly_tan(st0_ptr);
-+			setsign(st0_ptr, (q & 1) ^ (arg_sign != 0));
-+			set_precision_flag_up();	/* We do not really know if up or down */
-+		} else {
-+			/* For a small arg, the result == the argument */
-+			/* Underflow may happen */
++	}
++	vfree(bm);
 +
-+		      denormal_arg:
++	failed += print_split(&sb);
 +
-+			FPU_to_exp16(st0_ptr, st0_ptr);
- 
--	  FPU_to_exp16(st0_ptr, st0_ptr);
--      
--	  st0_tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
--	  FPU_settag0(st0_tag);
-+			st0_tag =
-+			    FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
-+			FPU_settag0(st0_tag);
++	printk(KERN_INFO "CPA reverting everything\n");
++	for (i = 0; i < NTEST; i++) {
++		if (!addr[i])
++			continue;
++		pte = lookup_address(addr[i], &level);
++		if (!pte) {
++			printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]);
++			failed++;
++			continue;
 +		}
-+		push();
-+		FPU_copy_to_reg0(&CONST_1, TAG_Valid);
-+		return;
- 	}
--      push();
--      FPU_copy_to_reg0(&CONST_1, TAG_Valid);
--      return;
--    }
--
--  if ( st0_tag == TAG_Zero )
--    {
--      push();
--      FPU_copy_to_reg0(&CONST_1, TAG_Valid);
--      setcc(0);
--      return;
--    }
--
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
--
--  if ( st0_tag == TW_Denormal )
--    {
--      if ( denormal_operand() < 0 )
--	return;
- 
--      goto denormal_arg;
--    }
--
--  if ( st0_tag == TW_Infinity )
--    {
--      /* The 80486 treats infinity as an invalid operand */
--      if ( arith_invalid(0) >= 0 )
--	{
--	  st_new_ptr = &st(-1);
--	  push();
--	  arith_invalid(0);
-+	if (st0_tag == TAG_Zero) {
-+		push();
-+		FPU_copy_to_reg0(&CONST_1, TAG_Valid);
-+		setcc(0);
-+		return;
++		err = change_page_attr_set(addr[i], len[i],
++					     __pgprot(_PAGE_GLOBAL));
++		if (err < 0) {
++			printk(KERN_ERR "CPA reverting failed: %d\n", err);
++			failed++;
++		}
++		pte = lookup_address(addr[i], &level);
++		if (!pte || !pte_global(*pte)) {
++			printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n",
++				addr[i], pte ? (u64)pte_val(*pte) : 0ULL);
++			failed++;
++		}
++
 +	}
 +
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
++	failed += print_split(&sc);
 +
-+	if (st0_tag == TW_Denormal) {
-+		if (denormal_operand() < 0)
-+			return;
++	if (failed) {
++		printk(KERN_ERR "CPA selftests NOT PASSED. Please report.\n");
++		WARN_ON(1);
++	} else {
++		printk(KERN_INFO "CPA selftests PASSED\n");
++	}
 +
-+		goto denormal_arg;
- 	}
--      return;
--    }
- 
--  single_arg_2_error(st0_ptr, st0_tag);
--}
-+	if (st0_tag == TW_Infinity) {
-+		/* The 80486 treats infinity as an invalid operand */
-+		if (arith_invalid(0) >= 0) {
-+			st_new_ptr = &st(-1);
-+			push();
-+			arith_invalid(0);
++	return 0;
++}
++module_init(exercise_pageattr);
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+new file mode 100644
+index 0000000..1cc6607
+--- /dev/null
++++ b/arch/x86/mm/pageattr.c
+@@ -0,0 +1,564 @@
++/*
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ * Thanks to Ben LaHaise for precious feedback.
++ */
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++
++#include <asm/e820.h>
++#include <asm/processor.h>
++#include <asm/tlbflush.h>
++#include <asm/sections.h>
++#include <asm/uaccess.h>
++#include <asm/pgalloc.h>
++
++static inline int
++within(unsigned long addr, unsigned long start, unsigned long end)
++{
++	return addr >= start && addr < end;
++}
++
++/*
++ * Flushing functions
++ */
++
++/**
++ * clflush_cache_range - flush a cache range with clflush
++ * @addr:	virtual start address
++ * @size:	number of bytes to flush
++ *
++ * clflush is an unordered instruction which needs fencing with mfence
++ * to avoid ordering issues.
++ */
++void clflush_cache_range(void *vaddr, unsigned int size)
++{
++	void *vend = vaddr + size - 1;
++
++	mb();
++
++	for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
++		clflush(vaddr);
++	/*
++	 * Flush any possible final partial cacheline:
++	 */
++	clflush(vend);
++
++	mb();
++}
++
++static void __cpa_flush_all(void *arg)
++{
++	/*
++	 * Flush all to work around Errata in early athlons regarding
++	 * large page flushing.
++	 */
++	__flush_tlb_all();
++
++	if (boot_cpu_data.x86_model >= 4)
++		wbinvd();
++}
++
++static void cpa_flush_all(void)
++{
++	BUG_ON(irqs_disabled());
++
++	on_each_cpu(__cpa_flush_all, NULL, 1, 1);
++}
++
++static void __cpa_flush_range(void *arg)
++{
++	/*
++	 * We could optimize that further and do individual per page
++	 * tlb invalidates for a low number of pages. Caveat: we must
++	 * flush the high aliases on 64bit as well.
++	 */
++	__flush_tlb_all();
++}
++
++static void cpa_flush_range(unsigned long start, int numpages)
++{
++	unsigned int i, level;
++	unsigned long addr;
++
++	BUG_ON(irqs_disabled());
++	WARN_ON(PAGE_ALIGN(start) != start);
++
++	on_each_cpu(__cpa_flush_range, NULL, 1, 1);
++
++	/*
++	 * We only need to flush on one CPU,
++	 * clflush is a MESI-coherent instruction that
++	 * will cause all other CPUs to flush the same
++	 * cachelines:
++	 */
++	for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
++		pte_t *pte = lookup_address(addr, &level);
++
++		/*
++		 * Only flush present addresses:
++		 */
++		if (pte && pte_present(*pte))
++			clflush_cache_range((void *) addr, PAGE_SIZE);
++	}
++}
++
++/*
++ * Certain areas of memory on x86 require very specific protection flags,
++ * for example the BIOS area or kernel text. Callers don't always get this
++ * right (again, ioremap() on BIOS memory is not uncommon) so this function
++ * checks and fixes these known static required protection bits.
++ */
++static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
++{
++	pgprot_t forbidden = __pgprot(0);
++
++	/*
++	 * The BIOS area between 640k and 1Mb needs to be executable for
++	 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
++	 */
++	if (within(__pa(address), BIOS_BEGIN, BIOS_END))
++		pgprot_val(forbidden) |= _PAGE_NX;
++
++	/*
++	 * The kernel text needs to be executable for obvious reasons
++	 * Does not cover __inittext since that is gone later on
++	 */
++	if (within(address, (unsigned long)_text, (unsigned long)_etext))
++		pgprot_val(forbidden) |= _PAGE_NX;
++
++#ifdef CONFIG_DEBUG_RODATA
++	/* The .rodata section needs to be read-only */
++	if (within(address, (unsigned long)__start_rodata,
++				(unsigned long)__end_rodata))
++		pgprot_val(forbidden) |= _PAGE_RW;
++#endif
++
++	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
++
++	return prot;
++}
++
++pte_t *lookup_address(unsigned long address, int *level)
++{
++	pgd_t *pgd = pgd_offset_k(address);
++	pud_t *pud;
++	pmd_t *pmd;
++
++	*level = PG_LEVEL_NONE;
++
++	if (pgd_none(*pgd))
++		return NULL;
++	pud = pud_offset(pgd, address);
++	if (pud_none(*pud))
++		return NULL;
++	pmd = pmd_offset(pud, address);
++	if (pmd_none(*pmd))
++		return NULL;
++
++	*level = PG_LEVEL_2M;
++	if (pmd_large(*pmd))
++		return (pte_t *)pmd;
++
++	*level = PG_LEVEL_4K;
++	return pte_offset_kernel(pmd, address);
++}
++
++static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
++{
++	/* change init_mm */
++	set_pte_atomic(kpte, pte);
++#ifdef CONFIG_X86_32
++	if (!SHARED_KERNEL_PMD) {
++		struct page *page;
++
++		list_for_each_entry(page, &pgd_list, lru) {
++			pgd_t *pgd;
++			pud_t *pud;
++			pmd_t *pmd;
++
++			pgd = (pgd_t *)page_address(page) + pgd_index(address);
++			pud = pud_offset(pgd, address);
++			pmd = pmd_offset(pud, address);
++			set_pte_atomic((pte_t *)pmd, pte);
 +		}
-+		return;
 +	}
- 
-+	single_arg_2_error(st0_ptr, st0_tag);
++#endif
 +}
- 
- static void fxtract(FPU_REG *st0_ptr, u_char st0_tag)
- {
--  FPU_REG *st_new_ptr;
--  u_char sign;
--  register FPU_REG *st1_ptr = st0_ptr;  /* anticipate */
--
--  if ( STACK_OVERFLOW )
--    {  FPU_stack_overflow(); return; }
--
--  clear_C1();
--
--  if ( st0_tag == TAG_Valid )
--    {
--      long e;
--
--      push();
--      sign = getsign(st1_ptr);
--      reg_copy(st1_ptr, st_new_ptr);
--      setexponent16(st_new_ptr, exponent(st_new_ptr));
--
--    denormal_arg:
--
--      e = exponent16(st_new_ptr);
--      convert_l2reg(&e, 1);
--      setexponentpos(st_new_ptr, 0);
--      setsign(st_new_ptr, sign);
--      FPU_settag0(TAG_Valid);       /* Needed if arg was a denormal */
--      return;
--    }
--  else if ( st0_tag == TAG_Zero )
--    {
--      sign = getsign(st0_ptr);
--
--      if ( FPU_divide_by_zero(0, SIGN_NEG) < 0 )
--	return;
-+	FPU_REG *st_new_ptr;
-+	u_char sign;
-+	register FPU_REG *st1_ptr = st0_ptr;	/* anticipate */
- 
--      push();
--      FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
--      setsign(st_new_ptr, sign);
--      return;
--    }
-+	if (STACK_OVERFLOW) {
-+		FPU_stack_overflow();
-+		return;
++
++static int split_large_page(pte_t *kpte, unsigned long address)
++{
++	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
++	gfp_t gfp_flags = GFP_KERNEL;
++	unsigned long flags;
++	unsigned long addr;
++	pte_t *pbase, *tmp;
++	struct page *base;
++	unsigned int i, level;
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
++	gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
++#endif
++	base = alloc_pages(gfp_flags, 0);
++	if (!base)
++		return -ENOMEM;
++
++	spin_lock_irqsave(&pgd_lock, flags);
++	/*
++	 * Check for races, another CPU might have split this page
++	 * up for us already:
++	 */
++	tmp = lookup_address(address, &level);
++	if (tmp != kpte) {
++		WARN_ON_ONCE(1);
++		goto out_unlock;
 +	}
- 
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
-+	clear_C1();
- 
--  if ( st0_tag == TW_Denormal )
--    {
--      if (denormal_operand() < 0 )
--	return;
-+	if (st0_tag == TAG_Valid) {
-+		long e;
- 
--      push();
--      sign = getsign(st1_ptr);
--      FPU_to_exp16(st1_ptr, st_new_ptr);
--      goto denormal_arg;
--    }
--  else if ( st0_tag == TW_Infinity )
--    {
--      sign = getsign(st0_ptr);
--      setpositive(st0_ptr);
--      push();
--      FPU_copy_to_reg0(&CONST_INF, TAG_Special);
--      setsign(st_new_ptr, sign);
--      return;
--    }
--  else if ( st0_tag == TW_NaN )
--    {
--      if ( real_1op_NaN(st0_ptr) < 0 )
--	return;
-+		push();
-+		sign = getsign(st1_ptr);
-+		reg_copy(st1_ptr, st_new_ptr);
-+		setexponent16(st_new_ptr, exponent(st_new_ptr));
 +
-+	      denormal_arg:
++	address = __pa(address);
++	addr = address & LARGE_PAGE_MASK;
++	pbase = (pte_t *)page_address(base);
++#ifdef CONFIG_X86_32
++	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
++#endif
 +
-+		e = exponent16(st_new_ptr);
-+		convert_l2reg(&e, 1);
-+		setexponentpos(st_new_ptr, 0);
-+		setsign(st_new_ptr, sign);
-+		FPU_settag0(TAG_Valid);	/* Needed if arg was a denormal */
-+		return;
-+	} else if (st0_tag == TAG_Zero) {
-+		sign = getsign(st0_ptr);
++	pgprot_val(ref_prot) &= ~_PAGE_NX;
++	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
++		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
 +
-+		if (FPU_divide_by_zero(0, SIGN_NEG) < 0)
-+			return;
- 
--      push();
--      FPU_copy_to_reg0(st0_ptr, TAG_Special);
--      return;
--    }
--  else if ( st0_tag == TAG_Empty )
--    {
--      /* Is this the correct behaviour? */
--      if ( control_word & EX_Invalid )
--	{
--	  FPU_stack_underflow();
--	  push();
--	  FPU_stack_underflow();
-+		push();
-+		FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
-+		setsign(st_new_ptr, sign);
-+		return;
++	/*
++	 * Install the new, split up pagetable. Important detail here:
++	 *
++	 * On Intel the NX bit of all levels must be cleared to make a
++	 * page executable. See section 4.13.2 of Intel 64 and IA-32
++	 * Architectures Software Developer's Manual).
++	 */
++	ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
++	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
++	base = NULL;
++
++out_unlock:
++	spin_unlock_irqrestore(&pgd_lock, flags);
++
++	if (base)
++		__free_pages(base, 0);
++
++	return 0;
++}
++
++static int
++__change_page_attr(unsigned long address, unsigned long pfn,
++		   pgprot_t mask_set, pgprot_t mask_clr)
++{
++	struct page *kpte_page;
++	int level, err = 0;
++	pte_t *kpte;
++
++#ifdef CONFIG_X86_32
++	BUG_ON(pfn > max_low_pfn);
++#endif
++
++repeat:
++	kpte = lookup_address(address, &level);
++	if (!kpte)
++		return -EINVAL;
++
++	kpte_page = virt_to_page(kpte);
++	BUG_ON(PageLRU(kpte_page));
++	BUG_ON(PageCompound(kpte_page));
++
++	if (level == PG_LEVEL_4K) {
++		pgprot_t new_prot = pte_pgprot(*kpte);
++		pte_t new_pte, old_pte = *kpte;
++
++		pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
++		pgprot_val(new_prot) |= pgprot_val(mask_set);
++
++		new_prot = static_protections(new_prot, address);
++
++		new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
++		BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
++
++		set_pte_atomic(kpte, new_pte);
++	} else {
++		err = split_large_page(kpte, address);
++		if (!err)
++			goto repeat;
 +	}
++	return err;
++}
 +
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
++/**
++ * change_page_attr_addr - Change page table attributes in linear mapping
++ * @address: Virtual address in linear mapping.
++ * @prot:    New page table attribute (PAGE_*)
++ *
++ * Change page attributes of a page in the direct mapping. This is a variant
++ * of change_page_attr() that also works on memory holes that do not have
++ * mem_map entry (pfn_valid() is false).
++ *
++ * See change_page_attr() documentation for more details.
++ *
++ * Modules and drivers should use the set_memory_* APIs instead.
++ */
 +
-+	if (st0_tag == TW_Denormal) {
-+		if (denormal_operand() < 0)
-+			return;
++#define HIGH_MAP_START	__START_KERNEL_map
++#define HIGH_MAP_END	(__START_KERNEL_map + KERNEL_TEXT_SIZE)
 +
-+		push();
-+		sign = getsign(st1_ptr);
-+		FPU_to_exp16(st1_ptr, st_new_ptr);
-+		goto denormal_arg;
-+	} else if (st0_tag == TW_Infinity) {
-+		sign = getsign(st0_ptr);
-+		setpositive(st0_ptr);
-+		push();
-+		FPU_copy_to_reg0(&CONST_INF, TAG_Special);
-+		setsign(st_new_ptr, sign);
++static int
++change_page_attr_addr(unsigned long address, pgprot_t mask_set,
++		      pgprot_t mask_clr)
++{
++	unsigned long phys_addr = __pa(address);
++	unsigned long pfn = phys_addr >> PAGE_SHIFT;
++	int err;
++
++#ifdef CONFIG_X86_64
++	/*
++	 * If we are inside the high mapped kernel range, then we
++	 * fixup the low mapping first. __va() returns the virtual
++	 * address in the linear mapping:
++	 */
++	if (within(address, HIGH_MAP_START, HIGH_MAP_END))
++		address = (unsigned long) __va(phys_addr);
++#endif
++
++	err = __change_page_attr(address, pfn, mask_set, mask_clr);
++	if (err)
++		return err;
++
++#ifdef CONFIG_X86_64
++	/*
++	 * If the physical address is inside the kernel map, we need
++	 * to touch the high mapped kernel as well:
++	 */
++	if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) {
++		/*
++		 * Calc the high mapping address. See __phys_addr()
++		 * for the non obvious details.
++		 */
++		address = phys_addr + HIGH_MAP_START - phys_base;
++		/* Make sure the kernel mappings stay executable */
++		pgprot_val(mask_clr) |= _PAGE_NX;
++
++		/*
++		 * Our high aliases are imprecise, because we check
++		 * everything between 0 and KERNEL_TEXT_SIZE, so do
++		 * not propagate lookup failures back to users:
++		 */
++		__change_page_attr(address, pfn, mask_set, mask_clr);
++	}
++#endif
++	return err;
++}
++
++static int __change_page_attr_set_clr(unsigned long addr, int numpages,
++				      pgprot_t mask_set, pgprot_t mask_clr)
++{
++	unsigned int i;
++	int ret;
++
++	for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
++		ret = change_page_attr_addr(addr, mask_set, mask_clr);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static int change_page_attr_set_clr(unsigned long addr, int numpages,
++				    pgprot_t mask_set, pgprot_t mask_clr)
++{
++	int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
++					     mask_clr);
++
++	/*
++	 * On success we use clflush, when the CPU supports it to
++	 * avoid the wbindv. If the CPU does not support it and in the
++	 * error case we fall back to cpa_flush_all (which uses
++	 * wbindv):
++	 */
++	if (!ret && cpu_has_clflush)
++		cpa_flush_range(addr, numpages);
++	else
++		cpa_flush_all();
++
++	return ret;
++}
++
++static inline int change_page_attr_set(unsigned long addr, int numpages,
++				       pgprot_t mask)
++{
++	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
++}
++
++static inline int change_page_attr_clear(unsigned long addr, int numpages,
++					 pgprot_t mask)
++{
++	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
++
++}
++
++int set_memory_uc(unsigned long addr, int numpages)
++{
++	return change_page_attr_set(addr, numpages,
++				    __pgprot(_PAGE_PCD | _PAGE_PWT));
++}
++EXPORT_SYMBOL(set_memory_uc);
++
++int set_memory_wb(unsigned long addr, int numpages)
++{
++	return change_page_attr_clear(addr, numpages,
++				      __pgprot(_PAGE_PCD | _PAGE_PWT));
++}
++EXPORT_SYMBOL(set_memory_wb);
++
++int set_memory_x(unsigned long addr, int numpages)
++{
++	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
++}
++EXPORT_SYMBOL(set_memory_x);
++
++int set_memory_nx(unsigned long addr, int numpages)
++{
++	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
++}
++EXPORT_SYMBOL(set_memory_nx);
++
++int set_memory_ro(unsigned long addr, int numpages)
++{
++	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
++}
++
++int set_memory_rw(unsigned long addr, int numpages)
++{
++	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
++}
++
++int set_memory_np(unsigned long addr, int numpages)
++{
++	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
++}
++
++int set_pages_uc(struct page *page, int numpages)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++
++	return set_memory_uc(addr, numpages);
++}
++EXPORT_SYMBOL(set_pages_uc);
++
++int set_pages_wb(struct page *page, int numpages)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++
++	return set_memory_wb(addr, numpages);
++}
++EXPORT_SYMBOL(set_pages_wb);
++
++int set_pages_x(struct page *page, int numpages)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++
++	return set_memory_x(addr, numpages);
++}
++EXPORT_SYMBOL(set_pages_x);
++
++int set_pages_nx(struct page *page, int numpages)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++
++	return set_memory_nx(addr, numpages);
++}
++EXPORT_SYMBOL(set_pages_nx);
++
++int set_pages_ro(struct page *page, int numpages)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++
++	return set_memory_ro(addr, numpages);
++}
++
++int set_pages_rw(struct page *page, int numpages)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++
++	return set_memory_rw(addr, numpages);
++}
++
++
++#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
++static inline int __change_page_attr_set(unsigned long addr, int numpages,
++					 pgprot_t mask)
++{
++	return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
++}
++
++static inline int __change_page_attr_clear(unsigned long addr, int numpages,
++					   pgprot_t mask)
++{
++	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
++}
++#endif
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++
++static int __set_pages_p(struct page *page, int numpages)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++
++	return __change_page_attr_set(addr, numpages,
++				      __pgprot(_PAGE_PRESENT | _PAGE_RW));
++}
++
++static int __set_pages_np(struct page *page, int numpages)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++
++	return __change_page_attr_clear(addr, numpages,
++					__pgprot(_PAGE_PRESENT));
++}
++
++void kernel_map_pages(struct page *page, int numpages, int enable)
++{
++	if (PageHighMem(page))
 +		return;
-+	} else if (st0_tag == TW_NaN) {
-+		if (real_1op_NaN(st0_ptr) < 0)
-+			return;
++	if (!enable) {
++		debug_check_no_locks_freed(page_address(page),
++					   numpages * PAGE_SIZE);
++	}
 +
-+		push();
-+		FPU_copy_to_reg0(st0_ptr, TAG_Special);
++	/*
++	 * If page allocator is not up yet then do not call c_p_a():
++	 */
++	if (!debug_pagealloc_enabled)
 +		return;
-+	} else if (st0_tag == TAG_Empty) {
-+		/* Is this the correct behaviour? */
-+		if (control_word & EX_Invalid) {
-+			FPU_stack_underflow();
-+			push();
-+			FPU_stack_underflow();
-+		} else
-+			EXCEPTION(EX_StackUnder);
- 	}
--      else
--	EXCEPTION(EX_StackUnder);
--    }
- #ifdef PARANOID
--  else
--    EXCEPTION(EX_INTERNAL | 0x119);
++
++	/*
++	 * The return value is ignored - the calls cannot fail,
++	 * large pages are disabled at boot time:
++	 */
++	if (enable)
++		__set_pages_p(page, numpages);
 +	else
-+		EXCEPTION(EX_INTERNAL | 0x119);
- #endif /* PARANOID */
- }
- 
++		__set_pages_np(page, numpages);
++
++	/*
++	 * We should perform an IPI and flush all tlbs,
++	 * but that can deadlock->flush only current cpu:
++	 */
++	__flush_tlb_all();
++}
++#endif
++
++/*
++ * The testcases use internal knowledge of the implementation that shouldn't
++ * be exposed to the rest of the kernel. Include these directly here.
++ */
++#ifdef CONFIG_CPA_DEBUG
++#include "pageattr-test.c"
++#endif
+diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
+deleted file mode 100644
+index 260073c..0000000
+--- a/arch/x86/mm/pageattr_32.c
++++ /dev/null
+@@ -1,278 +0,0 @@
+-/* 
+- * Copyright 2002 Andi Kleen, SuSE Labs. 
+- * Thanks to Ben LaHaise for precious feedback.
+- */ 
 -
- static void fdecstp(void)
- {
--  clear_C1();
--  top--;
-+	clear_C1();
-+	top--;
- }
- 
- static void fincstp(void)
- {
--  clear_C1();
--  top++;
-+	clear_C1();
-+	top++;
- }
- 
+-#include <linux/mm.h>
+-#include <linux/sched.h>
+-#include <linux/highmem.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <asm/uaccess.h>
+-#include <asm/processor.h>
+-#include <asm/tlbflush.h>
+-#include <asm/pgalloc.h>
+-#include <asm/sections.h>
+-
+-static DEFINE_SPINLOCK(cpa_lock);
+-static struct list_head df_list = LIST_HEAD_INIT(df_list);
+-
+-
+-pte_t *lookup_address(unsigned long address) 
+-{ 
+-	pgd_t *pgd = pgd_offset_k(address);
+-	pud_t *pud;
+-	pmd_t *pmd;
+-	if (pgd_none(*pgd))
+-		return NULL;
+-	pud = pud_offset(pgd, address);
+-	if (pud_none(*pud))
+-		return NULL;
+-	pmd = pmd_offset(pud, address);
+-	if (pmd_none(*pmd))
+-		return NULL;
+-	if (pmd_large(*pmd))
+-		return (pte_t *)pmd;
+-        return pte_offset_kernel(pmd, address);
+-} 
+-
+-static struct page *split_large_page(unsigned long address, pgprot_t prot,
+-					pgprot_t ref_prot)
+-{ 
+-	int i; 
+-	unsigned long addr;
+-	struct page *base;
+-	pte_t *pbase;
+-
+-	spin_unlock_irq(&cpa_lock);
+-	base = alloc_pages(GFP_KERNEL, 0);
+-	spin_lock_irq(&cpa_lock);
+-	if (!base) 
+-		return NULL;
+-
+-	/*
+-	 * page_private is used to track the number of entries in
+-	 * the page table page that have non standard attributes.
+-	 */
+-	SetPagePrivate(base);
+-	page_private(base) = 0;
+-
+-	address = __pa(address);
+-	addr = address & LARGE_PAGE_MASK; 
+-	pbase = (pte_t *)page_address(base);
+-	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
+-	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+-               set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
+-                                          addr == address ? prot : ref_prot));
+-	}
+-	return base;
+-} 
+-
+-static void cache_flush_page(struct page *p)
+-{ 
+-	void *adr = page_address(p);
+-	int i;
+-	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+-		clflush(adr+i);
+-}
+-
+-static void flush_kernel_map(void *arg)
+-{
+-	struct list_head *lh = (struct list_head *)arg;
+-	struct page *p;
+-
+-	/* High level code is not ready for clflush yet */
+-	if (0 && cpu_has_clflush) {
+-		list_for_each_entry (p, lh, lru)
+-			cache_flush_page(p);
+-	} else if (boot_cpu_data.x86_model >= 4)
+-		wbinvd();
+-
+-	/* Flush all to work around Errata in early athlons regarding 
+-	 * large page flushing. 
+-	 */
+-	__flush_tlb_all(); 	
+-}
+-
+-static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
+-{ 
+-	struct page *page;
+-	unsigned long flags;
+-
+-	set_pte_atomic(kpte, pte); 	/* change init_mm */
+-	if (SHARED_KERNEL_PMD)
+-		return;
+-
+-	spin_lock_irqsave(&pgd_lock, flags);
+-	for (page = pgd_list; page; page = (struct page *)page->index) {
+-		pgd_t *pgd;
+-		pud_t *pud;
+-		pmd_t *pmd;
+-		pgd = (pgd_t *)page_address(page) + pgd_index(address);
+-		pud = pud_offset(pgd, address);
+-		pmd = pmd_offset(pud, address);
+-		set_pte_atomic((pte_t *)pmd, pte);
+-	}
+-	spin_unlock_irqrestore(&pgd_lock, flags);
+-}
+-
+-/* 
+- * No more special protections in this 2/4MB area - revert to a
+- * large page again. 
+- */
+-static inline void revert_page(struct page *kpte_page, unsigned long address)
+-{
+-	pgprot_t ref_prot;
+-	pte_t *linear;
+-
+-	ref_prot =
+-	((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
+-		? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
+-
+-	linear = (pte_t *)
+-		pmd_offset(pud_offset(pgd_offset_k(address), address), address);
+-	set_pmd_pte(linear,  address,
+-		    pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
+-			    ref_prot));
+-}
+-
+-static inline void save_page(struct page *kpte_page)
+-{
+-	if (!test_and_set_bit(PG_arch_1, &kpte_page->flags))
+-		list_add(&kpte_page->lru, &df_list);
+-}
+-
+-static int
+-__change_page_attr(struct page *page, pgprot_t prot)
+-{ 
+-	pte_t *kpte; 
+-	unsigned long address;
+-	struct page *kpte_page;
+-
+-	BUG_ON(PageHighMem(page));
+-	address = (unsigned long)page_address(page);
+-
+-	kpte = lookup_address(address);
+-	if (!kpte)
+-		return -EINVAL;
+-	kpte_page = virt_to_page(kpte);
+-	BUG_ON(PageLRU(kpte_page));
+-	BUG_ON(PageCompound(kpte_page));
+-
+-	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
+-		if (!pte_huge(*kpte)) {
+-			set_pte_atomic(kpte, mk_pte(page, prot)); 
+-		} else {
+-			pgprot_t ref_prot;
+-			struct page *split;
+-
+-			ref_prot =
+-			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
+-				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
+-			split = split_large_page(address, prot, ref_prot);
+-			if (!split)
+-				return -ENOMEM;
+-			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
+-			kpte_page = split;
+-		}
+-		page_private(kpte_page)++;
+-	} else if (!pte_huge(*kpte)) {
+-		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
+-		BUG_ON(page_private(kpte_page) == 0);
+-		page_private(kpte_page)--;
+-	} else
+-		BUG();
+-
+-	/*
+-	 * If the pte was reserved, it means it was created at boot
+-	 * time (not via split_large_page) and in turn we must not
+-	 * replace it with a largepage.
+-	 */
+-
+-	save_page(kpte_page);
+-	if (!PageReserved(kpte_page)) {
+-		if (cpu_has_pse && (page_private(kpte_page) == 0)) {
+-			paravirt_release_pt(page_to_pfn(kpte_page));
+-			revert_page(kpte_page, address);
+-		}
+-	}
+-	return 0;
+-} 
+-
+-static inline void flush_map(struct list_head *l)
+-{
+-	on_each_cpu(flush_kernel_map, l, 1, 1);
+-}
+-
+-/*
+- * Change the page attributes of an page in the linear mapping.
+- *
+- * This should be used when a page is mapped with a different caching policy
+- * than write-back somewhere - some CPUs do not like it when mappings with
+- * different caching policies exist. This changes the page attributes of the
+- * in kernel linear mapping too.
+- * 
+- * The caller needs to ensure that there are no conflicting mappings elsewhere.
+- * This function only deals with the kernel linear map.
+- * 
+- * Caller must call global_flush_tlb() after this.
+- */
+-int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+-{
+-	int err = 0; 
+-	int i; 
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cpa_lock, flags);
+-	for (i = 0; i < numpages; i++, page++) { 
+-		err = __change_page_attr(page, prot);
+-		if (err) 
+-			break; 
+-	} 	
+-	spin_unlock_irqrestore(&cpa_lock, flags);
+-	return err;
+-}
 -
- static void fsqrt_(FPU_REG *st0_ptr, u_char st0_tag)
- {
--  int expon;
+-void global_flush_tlb(void)
+-{
+-	struct list_head l;
+-	struct page *pg, *next;
 -
--  clear_C1();
+-	BUG_ON(irqs_disabled());
 -
--  if ( st0_tag == TAG_Valid )
--    {
--      u_char tag;
--      
--      if (signnegative(st0_ptr))
--	{
--	  arith_invalid(0);  /* sqrt(negative) is invalid */
--	  return;
+-	spin_lock_irq(&cpa_lock);
+-	list_replace_init(&df_list, &l);
+-	spin_unlock_irq(&cpa_lock);
+-	flush_map(&l);
+-	list_for_each_entry_safe(pg, next, &l, lru) {
+-		list_del(&pg->lru);
+-		clear_bit(PG_arch_1, &pg->flags);
+-		if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0)
+-			continue;
+-		ClearPagePrivate(pg);
+-		__free_page(pg);
 -	}
-+	int expon;
-+
-+	clear_C1();
- 
--      /* make st(0) in  [1.0 .. 4.0) */
--      expon = exponent(st0_ptr);
+-}
 -
--    denormal_arg:
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+-void kernel_map_pages(struct page *page, int numpages, int enable)
+-{
+-	if (PageHighMem(page))
+-		return;
+-	if (!enable)
+-		debug_check_no_locks_freed(page_address(page),
+-					   numpages * PAGE_SIZE);
 -
--      setexponent16(st0_ptr, (expon & 1));
+-	/* the return value is ignored - the calls cannot fail,
+-	 * large pages are disabled at boot time.
+-	 */
+-	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+-	/* we should perform an IPI and flush all tlbs,
+-	 * but that can deadlock->flush only current cpu.
+-	 */
+-	__flush_tlb_all();
+-}
+-#endif
 -
--      /* Do the computation, the sign of the result will be positive. */
--      tag = wm_sqrt(st0_ptr, 0, 0, control_word, SIGN_POS);
--      addexponent(st0_ptr, expon >> 1);
--      FPU_settag0(tag);
--      return;
--    }
+-EXPORT_SYMBOL(change_page_attr);
+-EXPORT_SYMBOL(global_flush_tlb);
+diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
+deleted file mode 100644
+index c40afba..0000000
+--- a/arch/x86/mm/pageattr_64.c
++++ /dev/null
+@@ -1,255 +0,0 @@
+-/* 
+- * Copyright 2002 Andi Kleen, SuSE Labs. 
+- * Thanks to Ben LaHaise for precious feedback.
+- */ 
 -
--  if ( st0_tag == TAG_Zero )
--    return;
+-#include <linux/mm.h>
+-#include <linux/sched.h>
+-#include <linux/highmem.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <asm/uaccess.h>
+-#include <asm/processor.h>
+-#include <asm/tlbflush.h>
+-#include <asm/io.h>
 -
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
+-pte_t *lookup_address(unsigned long address)
+-{ 
+-	pgd_t *pgd = pgd_offset_k(address);
+-	pud_t *pud;
+-	pmd_t *pmd;
+-	pte_t *pte;
+-	if (pgd_none(*pgd))
+-		return NULL;
+-	pud = pud_offset(pgd, address);
+-	if (!pud_present(*pud))
+-		return NULL; 
+-	pmd = pmd_offset(pud, address);
+-	if (!pmd_present(*pmd))
+-		return NULL; 
+-	if (pmd_large(*pmd))
+-		return (pte_t *)pmd;
+-	pte = pte_offset_kernel(pmd, address);
+-	if (pte && !pte_present(*pte))
+-		pte = NULL; 
+-	return pte;
+-} 
 -
--  if ( st0_tag == TW_Infinity )
--    {
--      if ( signnegative(st0_ptr) )
--	arith_invalid(0);  /* sqrt(-Infinity) is invalid */
--      return;
--    }
--  else if ( st0_tag == TW_Denormal )
--    {
--      if (signnegative(st0_ptr))
--	{
--	  arith_invalid(0);  /* sqrt(negative) is invalid */
--	  return;
-+	if (st0_tag == TAG_Valid) {
-+		u_char tag;
-+
-+		if (signnegative(st0_ptr)) {
-+			arith_invalid(0);	/* sqrt(negative) is invalid */
-+			return;
-+		}
-+
-+		/* make st(0) in  [1.0 .. 4.0) */
-+		expon = exponent(st0_ptr);
-+
-+	      denormal_arg:
-+
-+		setexponent16(st0_ptr, (expon & 1));
-+
-+		/* Do the computation, the sign of the result will be positive. */
-+		tag = wm_sqrt(st0_ptr, 0, 0, control_word, SIGN_POS);
-+		addexponent(st0_ptr, expon >> 1);
-+		FPU_settag0(tag);
-+		return;
- 	}
- 
--      if ( denormal_operand() < 0 )
--	return;
-+	if (st0_tag == TAG_Zero)
-+		return;
- 
--      FPU_to_exp16(st0_ptr, st0_ptr);
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
- 
--      expon = exponent16(st0_ptr);
-+	if (st0_tag == TW_Infinity) {
-+		if (signnegative(st0_ptr))
-+			arith_invalid(0);	/* sqrt(-Infinity) is invalid */
-+		return;
-+	} else if (st0_tag == TW_Denormal) {
-+		if (signnegative(st0_ptr)) {
-+			arith_invalid(0);	/* sqrt(negative) is invalid */
-+			return;
-+		}
- 
--      goto denormal_arg;
--    }
-+		if (denormal_operand() < 0)
-+			return;
- 
--  single_arg_error(st0_ptr, st0_tag);
-+		FPU_to_exp16(st0_ptr, st0_ptr);
- 
+-static struct page *split_large_page(unsigned long address, pgprot_t prot,
+-				     pgprot_t ref_prot)
+-{ 
+-	int i; 
+-	unsigned long addr;
+-	struct page *base = alloc_pages(GFP_KERNEL, 0);
+-	pte_t *pbase;
+-	if (!base) 
+-		return NULL;
+-	/*
+-	 * page_private is used to track the number of entries in
+-	 * the page table page have non standard attributes.
+-	 */
+-	SetPagePrivate(base);
+-	page_private(base) = 0;
+-
+-	address = __pa(address);
+-	addr = address & LARGE_PAGE_MASK; 
+-	pbase = (pte_t *)page_address(base);
+-	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+-		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
+-				   addr == address ? prot : ref_prot);
+-	}
+-	return base;
+-} 
+-
+-void clflush_cache_range(void *adr, int size)
+-{
+-	int i;
+-	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
+-		clflush(adr+i);
 -}
-+		expon = exponent16(st0_ptr);
-+
-+		goto denormal_arg;
-+	}
- 
-+	single_arg_error(st0_ptr, st0_tag);
-+
-+}
- 
- static void frndint_(FPU_REG *st0_ptr, u_char st0_tag)
- {
--  int flags, tag;
-+	int flags, tag;
- 
--  if ( st0_tag == TAG_Valid )
--    {
--      u_char sign;
-+	if (st0_tag == TAG_Valid) {
-+		u_char sign;
- 
--    denormal_arg:
-+	      denormal_arg:
- 
--      sign = getsign(st0_ptr);
-+		sign = getsign(st0_ptr);
- 
--      if (exponent(st0_ptr) > 63)
--	return;
-+		if (exponent(st0_ptr) > 63)
-+			return;
-+
-+		if (st0_tag == TW_Denormal) {
-+			if (denormal_operand() < 0)
-+				return;
-+		}
-+
-+		/* Fortunately, this can't overflow to 2^64 */
-+		if ((flags = FPU_round_to_int(st0_ptr, st0_tag)))
-+			set_precision_flag(flags);
- 
--      if ( st0_tag == TW_Denormal )
--	{
--	  if (denormal_operand() < 0 )
--	    return;
-+		setexponent16(st0_ptr, 63);
-+		tag = FPU_normalize(st0_ptr);
-+		setsign(st0_ptr, sign);
-+		FPU_settag0(tag);
-+		return;
- 	}
- 
--      /* Fortunately, this can't overflow to 2^64 */
--      if ( (flags = FPU_round_to_int(st0_ptr, st0_tag)) )
--	set_precision_flag(flags);
 -
--      setexponent16(st0_ptr, 63);
--      tag = FPU_normalize(st0_ptr);
--      setsign(st0_ptr, sign);
--      FPU_settag0(tag);
--      return;
--    }
+-static void flush_kernel_map(void *arg)
+-{
+-	struct list_head *l = (struct list_head *)arg;
+-	struct page *pg;
 -
--  if ( st0_tag == TAG_Zero )
--    return;
+-	/* When clflush is available always use it because it is
+-	   much cheaper than WBINVD. */
+-	/* clflush is still broken. Disable for now. */
+-	if (1 || !cpu_has_clflush)
+-		asm volatile("wbinvd" ::: "memory");
+-	else list_for_each_entry(pg, l, lru) {
+-		void *adr = page_address(pg);
+-		clflush_cache_range(adr, PAGE_SIZE);
+-	}
+-	__flush_tlb_all();
+-}
 -
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
+-static inline void flush_map(struct list_head *l)
+-{	
+-	on_each_cpu(flush_kernel_map, l, 1, 1);
+-}
 -
--  if ( st0_tag == TW_Denormal )
--    goto denormal_arg;
--  else if ( st0_tag == TW_Infinity )
--    return;
--  else
--    single_arg_error(st0_ptr, st0_tag);
+-static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
+-
+-static inline void save_page(struct page *fpage)
+-{
+-	if (!test_and_set_bit(PG_arch_1, &fpage->flags))
+-		list_add(&fpage->lru, &deferred_pages);
 -}
-+	if (st0_tag == TAG_Zero)
-+		return;
- 
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
-+
-+	if (st0_tag == TW_Denormal)
-+		goto denormal_arg;
-+	else if (st0_tag == TW_Infinity)
-+		return;
-+	else
-+		single_arg_error(st0_ptr, st0_tag);
-+}
- 
- static int fsin(FPU_REG *st0_ptr, u_char tag)
- {
--  u_char arg_sign = getsign(st0_ptr);
 -
--  if ( tag == TAG_Valid )
--    {
--      int q;
+-/* 
+- * No more special protections in this 2/4MB area - revert to a
+- * large page again. 
+- */
+-static void revert_page(unsigned long address, pgprot_t ref_prot)
+-{
+-	pgd_t *pgd;
+-	pud_t *pud;
+-	pmd_t *pmd;
+-	pte_t large_pte;
+-	unsigned long pfn;
 -
--      if ( exponent(st0_ptr) > -40 )
--	{
--	  if ( (q = trig_arg(st0_ptr, 0)) == -1 )
--	    {
--	      /* Operand is out of range */
--	      return 1;
--	    }
+-	pgd = pgd_offset_k(address);
+-	BUG_ON(pgd_none(*pgd));
+-	pud = pud_offset(pgd,address);
+-	BUG_ON(pud_none(*pud));
+-	pmd = pmd_offset(pud, address);
+-	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
+-	pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
+-	large_pte = pfn_pte(pfn, ref_prot);
+-	large_pte = pte_mkhuge(large_pte);
+-	set_pte((pte_t *)pmd, large_pte);
+-}      
 -
--	  poly_sine(st0_ptr);
--	  
--	  if (q & 2)
--	    changesign(st0_ptr);
+-static int
+-__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
+-				   pgprot_t ref_prot)
+-{ 
+-	pte_t *kpte; 
+-	struct page *kpte_page;
+-	pgprot_t ref_prot2;
 -
--	  setsign(st0_ptr, getsign(st0_ptr) ^ arg_sign);
+-	kpte = lookup_address(address);
+-	if (!kpte) return 0;
+-	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
+-	BUG_ON(PageLRU(kpte_page));
+-	BUG_ON(PageCompound(kpte_page));
+-	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
+-		if (!pte_huge(*kpte)) {
+-			set_pte(kpte, pfn_pte(pfn, prot));
+-		} else {
+- 			/*
+-			 * split_large_page will take the reference for this
+-			 * change_page_attr on the split page.
+- 			 */
+-			struct page *split;
+-			ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
+-			split = split_large_page(address, prot, ref_prot2);
+-			if (!split)
+-				return -ENOMEM;
+-			pgprot_val(ref_prot2) &= ~_PAGE_NX;
+-			set_pte(kpte, mk_pte(split, ref_prot2));
+-			kpte_page = split;
+-		}
+-		page_private(kpte_page)++;
+-	} else if (!pte_huge(*kpte)) {
+-		set_pte(kpte, pfn_pte(pfn, ref_prot));
+-		BUG_ON(page_private(kpte_page) == 0);
+-		page_private(kpte_page)--;
+-	} else
+-		BUG();
 -
--	  /* We do not really know if up or down */
--	  set_precision_flag_up();
--	  return 0;
-+	u_char arg_sign = getsign(st0_ptr);
-+
-+	if (tag == TAG_Valid) {
-+		int q;
-+
-+		if (exponent(st0_ptr) > -40) {
-+			if ((q = trig_arg(st0_ptr, 0)) == -1) {
-+				/* Operand is out of range */
-+				return 1;
-+			}
-+
-+			poly_sine(st0_ptr);
-+
-+			if (q & 2)
-+				changesign(st0_ptr);
-+
-+			setsign(st0_ptr, getsign(st0_ptr) ^ arg_sign);
-+
-+			/* We do not really know if up or down */
-+			set_precision_flag_up();
-+			return 0;
-+		} else {
-+			/* For a small arg, the result == the argument */
-+			set_precision_flag_up();	/* Must be up. */
-+			return 0;
-+		}
- 	}
--      else
--	{
--	  /* For a small arg, the result == the argument */
--	  set_precision_flag_up();  /* Must be up. */
--	  return 0;
-+
-+	if (tag == TAG_Zero) {
-+		setcc(0);
-+		return 0;
- 	}
--    }
+-	/* on x86-64 the direct mapping set at boot is not using 4k pages */
+- 	BUG_ON(PageReserved(kpte_page));
 -
--  if ( tag == TAG_Zero )
--    {
--      setcc(0);
--      return 0;
--    }
+-	save_page(kpte_page);
+-	if (page_private(kpte_page) == 0)
+-		revert_page(address, ref_prot);
+-	return 0;
+-} 
 -
--  if ( tag == TAG_Special )
--    tag = FPU_Special(st0_ptr);
+-/*
+- * Change the page attributes of an page in the linear mapping.
+- *
+- * This should be used when a page is mapped with a different caching policy
+- * than write-back somewhere - some CPUs do not like it when mappings with
+- * different caching policies exist. This changes the page attributes of the
+- * in kernel linear mapping too.
+- * 
+- * The caller needs to ensure that there are no conflicting mappings elsewhere.
+- * This function only deals with the kernel linear map.
+- * 
+- * Caller must call global_flush_tlb() after this.
+- */
+-int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
+-{
+-	int err = 0, kernel_map = 0;
+-	int i; 
 -
--  if ( tag == TW_Denormal )
--    {
--      if ( denormal_operand() < 0 )
--	return 1;
+-	if (address >= __START_KERNEL_map
+-	    && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
+-		address = (unsigned long)__va(__pa(address));
+-		kernel_map = 1;
+-	}
 -
--      /* For a small arg, the result == the argument */
--      /* Underflow may happen */
--      FPU_to_exp16(st0_ptr, st0_ptr);
--      
--      tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
+-	down_write(&init_mm.mmap_sem);
+-	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
+-		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
 -
--      FPU_settag0(tag);
+-		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
+-			err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
+-			if (err)
+-				break;
+-		}
+-		/* Handle kernel mapping too which aliases part of the
+-		 * lowmem */
+-		if (__pa(address) < KERNEL_TEXT_SIZE) {
+-			unsigned long addr2;
+-			pgprot_t prot2;
+-			addr2 = __START_KERNEL_map + __pa(address);
+-			/* Make sure the kernel mappings stay executable */
+-			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
+-			err = __change_page_attr(addr2, pfn, prot2,
+-						 PAGE_KERNEL_EXEC);
+-		} 
+-	} 	
+-	up_write(&init_mm.mmap_sem); 
+-	return err;
+-}
 -
--      return 0;
--    }
--  else if ( tag == TW_Infinity )
--    {
--      /* The 80486 treats infinity as an invalid operand */
--      arith_invalid(0);
--      return 1;
--    }
--  else
--    {
--      single_arg_error(st0_ptr, tag);
--      return 1;
--    }
+-/* Don't call this for MMIO areas that may not have a mem_map entry */
+-int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+-{
+-	unsigned long addr = (unsigned long)page_address(page);
+-	return change_page_attr_addr(addr, numpages, prot);
 -}
- 
-+	if (tag == TAG_Special)
-+		tag = FPU_Special(st0_ptr);
-+
-+	if (tag == TW_Denormal) {
-+		if (denormal_operand() < 0)
-+			return 1;
-+
-+		/* For a small arg, the result == the argument */
-+		/* Underflow may happen */
-+		FPU_to_exp16(st0_ptr, st0_ptr);
-+
-+		tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
-+
-+		FPU_settag0(tag);
-+
-+		return 0;
-+	} else if (tag == TW_Infinity) {
-+		/* The 80486 treats infinity as an invalid operand */
-+		arith_invalid(0);
-+		return 1;
-+	} else {
-+		single_arg_error(st0_ptr, tag);
-+		return 1;
-+	}
-+}
- 
- static int f_cos(FPU_REG *st0_ptr, u_char tag)
- {
--  u_char st0_sign;
 -
--  st0_sign = getsign(st0_ptr);
+-void global_flush_tlb(void)
+-{ 
+-	struct page *pg, *next;
+-	struct list_head l;
 -
--  if ( tag == TAG_Valid )
--    {
--      int q;
+-	/*
+-	 * Write-protect the semaphore, to exclude two contexts
+-	 * doing a list_replace_init() call in parallel and to
+-	 * exclude new additions to the deferred_pages list:
+-	 */
+-	down_write(&init_mm.mmap_sem);
+-	list_replace_init(&deferred_pages, &l);
+-	up_write(&init_mm.mmap_sem);
 -
--      if ( exponent(st0_ptr) > -40 )
--	{
--	  if ( (exponent(st0_ptr) < 0)
--	      || ((exponent(st0_ptr) == 0)
--		  && (significand(st0_ptr) <= 0xc90fdaa22168c234LL)) )
--	    {
--	      poly_cos(st0_ptr);
+-	flush_map(&l);
 -
--	      /* We do not really know if up or down */
--	      set_precision_flag_down();
--	  
--	      return 0;
--	    }
--	  else if ( (q = trig_arg(st0_ptr, FCOS)) != -1 )
--	    {
--	      poly_sine(st0_ptr);
+-	list_for_each_entry_safe(pg, next, &l, lru) {
+-		list_del(&pg->lru);
+-		clear_bit(PG_arch_1, &pg->flags);
+-		if (page_private(pg) != 0)
+-			continue;
+-		ClearPagePrivate(pg);
+-		__free_page(pg);
+-	} 
+-} 
 -
--	      if ((q+1) & 2)
--		changesign(st0_ptr);
+-EXPORT_SYMBOL(change_page_attr);
+-EXPORT_SYMBOL(global_flush_tlb);
+diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
+index be61a1d..2ae5999 100644
+--- a/arch/x86/mm/pgtable_32.c
++++ b/arch/x86/mm/pgtable_32.c
+@@ -195,11 +195,6 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+ 	return pte;
+ }
+ 
+-void pmd_ctor(struct kmem_cache *cache, void *pmd)
+-{
+-	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+-}
 -
--	      /* We do not really know if up or down */
--	      set_precision_flag_down();
--	  
--	      return 0;
--	    }
--	  else
--	    {
--	      /* Operand is out of range */
--	      return 1;
--	    }
--	}
--      else
--	{
--	denormal_arg:
-+	u_char st0_sign;
+ /*
+  * List of all pgd's needed for non-PAE so it can invalidate entries
+  * in both cached and uncached pgd's; not needed for PAE since the
+@@ -210,27 +205,18 @@ void pmd_ctor(struct kmem_cache *cache, void *pmd)
+  * vmalloc faults work because attached pagetables are never freed.
+  * -- wli
+  */
+-DEFINE_SPINLOCK(pgd_lock);
+-struct page *pgd_list;
+-
+ static inline void pgd_list_add(pgd_t *pgd)
+ {
+ 	struct page *page = virt_to_page(pgd);
+-	page->index = (unsigned long)pgd_list;
+-	if (pgd_list)
+-		set_page_private(pgd_list, (unsigned long)&page->index);
+-	pgd_list = page;
+-	set_page_private(page, (unsigned long)&pgd_list);
 +
-+	st0_sign = getsign(st0_ptr);
++	list_add(&page->lru, &pgd_list);
+ }
  
--	  setcc(0);
--	  FPU_copy_to_reg0(&CONST_1, TAG_Valid);
-+	if (tag == TAG_Valid) {
-+		int q;
-+
-+		if (exponent(st0_ptr) > -40) {
-+			if ((exponent(st0_ptr) < 0)
-+			    || ((exponent(st0_ptr) == 0)
-+				&& (significand(st0_ptr) <=
-+				    0xc90fdaa22168c234LL))) {
-+				poly_cos(st0_ptr);
+ static inline void pgd_list_del(pgd_t *pgd)
+ {
+-	struct page *next, **pprev, *page = virt_to_page(pgd);
+-	next = (struct page *)page->index;
+-	pprev = (struct page **)page_private(page);
+-	*pprev = next;
+-	if (next)
+-		set_page_private(next, (unsigned long)pprev);
++	struct page *page = virt_to_page(pgd);
 +
-+				/* We do not really know if up or down */
-+				set_precision_flag_down();
++	list_del(&page->lru);
+ }
+ 
+ 
+@@ -285,7 +271,6 @@ static void pgd_dtor(void *pgd)
+ 	if (SHARED_KERNEL_PMD)
+ 		return;
+ 
+-	paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
+ 	spin_lock_irqsave(&pgd_lock, flags);
+ 	pgd_list_del(pgd);
+ 	spin_unlock_irqrestore(&pgd_lock, flags);
+@@ -294,77 +279,96 @@ static void pgd_dtor(void *pgd)
+ #define UNSHARED_PTRS_PER_PGD				\
+ 	(SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
+ 
+-/* If we allocate a pmd for part of the kernel address space, then
+-   make sure its initialized with the appropriate kernel mappings.
+-   Otherwise use a cached zeroed pmd.  */
+-static pmd_t *pmd_cache_alloc(int idx)
++#ifdef CONFIG_X86_PAE
++/*
++ * Mop up any pmd pages which may still be attached to the pgd.
++ * Normally they will be freed by munmap/exit_mmap, but any pmd we
++ * preallocate which never got a corresponding vma will need to be
++ * freed manually.
++ */
++static void pgd_mop_up_pmds(pgd_t *pgdp)
+ {
+-	pmd_t *pmd;
++	int i;
+ 
+-	if (idx >= USER_PTRS_PER_PGD) {
+-		pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
++	for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
++		pgd_t pgd = pgdp[i];
+ 
+-		if (pmd)
+-			memcpy(pmd,
+-			       (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
++		if (pgd_val(pgd) != 0) {
++			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 +
-+				return 0;
-+			} else if ((q = trig_arg(st0_ptr, FCOS)) != -1) {
-+				poly_sine(st0_ptr);
++			pgdp[i] = native_make_pgd(0);
 +
-+				if ((q + 1) & 2)
-+					changesign(st0_ptr);
++			paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
++			pmd_free(pmd);
++		}
++	}
++}
 +
-+				/* We do not really know if up or down */
-+				set_precision_flag_down();
++/*
++ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
++ * updating the top-level pagetable entries to guarantee the
++ * processor notices the update.  Since this is expensive, and
++ * all 4 top-level entries are used almost immediately in a
++ * new process's life, we just pre-populate them here.
++ *
++ * Also, if we're in a paravirt environment where the kernel pmd is
++ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
++ * and initialize the kernel pmds here.
++ */
++static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
++{
++	pud_t *pud;
++	unsigned long addr;
++	int i;
 +
-+				return 0;
-+			} else {
-+				/* Operand is out of range */
-+				return 1;
-+			}
-+		} else {
-+		      denormal_arg:
++	pud = pud_offset(pgd, 0);
++ 	for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
++	     i++, pud++, addr += PUD_SIZE) {
++		pmd_t *pmd = pmd_alloc_one(mm, addr);
 +
-+			setcc(0);
-+			FPU_copy_to_reg0(&CONST_1, TAG_Valid);
- #ifdef PECULIAR_486
--	  set_precision_flag_down();  /* 80486 appears to do this. */
-+			set_precision_flag_down();	/* 80486 appears to do this. */
- #else
--	  set_precision_flag_up();  /* Must be up. */
-+			set_precision_flag_up();	/* Must be up. */
- #endif /* PECULIAR_486 */
--	  return 0;
++		if (!pmd) {
++			pgd_mop_up_pmds(pgd);
 +			return 0;
 +		}
-+	} else if (tag == TAG_Zero) {
-+		FPU_copy_to_reg0(&CONST_1, TAG_Valid);
-+		setcc(0);
-+		return 0;
- 	}
--    }
--  else if ( tag == TAG_Zero )
--    {
--      FPU_copy_to_reg0(&CONST_1, TAG_Valid);
--      setcc(0);
--      return 0;
--    }
--
--  if ( tag == TAG_Special )
--    tag = FPU_Special(st0_ptr);
--
--  if ( tag == TW_Denormal )
--    {
--      if ( denormal_operand() < 0 )
--	return 1;
--
--      goto denormal_arg;
--    }
--  else if ( tag == TW_Infinity )
--    {
--      /* The 80486 treats infinity as an invalid operand */
--      arith_invalid(0);
--      return 1;
--    }
--  else
--    {
--      single_arg_error(st0_ptr, tag);  /* requires st0_ptr == &st(0) */
--      return 1;
--    }
--}
- 
-+	if (tag == TAG_Special)
-+		tag = FPU_Special(st0_ptr);
 +
-+	if (tag == TW_Denormal) {
-+		if (denormal_operand() < 0)
-+			return 1;
-+
-+		goto denormal_arg;
-+	} else if (tag == TW_Infinity) {
-+		/* The 80486 treats infinity as an invalid operand */
-+		arith_invalid(0);
-+		return 1;
-+	} else {
-+		single_arg_error(st0_ptr, tag);	/* requires st0_ptr == &st(0) */
-+		return 1;
++		if (i >= USER_PTRS_PER_PGD)
++			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+ 			       sizeof(pmd_t) * PTRS_PER_PMD);
+-	} else
+-		pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+ 
+-	return pmd;
++		pud_populate(mm, pud, pmd);
 +	}
++
++	return 1;
 +}
++#else  /* !CONFIG_X86_PAE */
++/* No need to prepopulate any pagetable entries in non-PAE modes. */
++static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
++{
++	return 1;
+ }
  
- static void fcos(FPU_REG *st0_ptr, u_char st0_tag)
+-static void pmd_cache_free(pmd_t *pmd, int idx)
++static void pgd_mop_up_pmds(pgd_t *pgd)
  {
--  f_cos(st0_ptr, st0_tag);
-+	f_cos(st0_ptr, st0_tag);
+-	if (idx >= USER_PTRS_PER_PGD)
+-		free_page((unsigned long)pmd);
+-	else
+-		kmem_cache_free(pmd_cache, pmd);
  }
++#endif	/* CONFIG_X86_PAE */
  
--
- static void fsincos(FPU_REG *st0_ptr, u_char st0_tag)
+ pgd_t *pgd_alloc(struct mm_struct *mm)
  {
--  FPU_REG *st_new_ptr;
--  FPU_REG arg;
--  u_char tag;
--
--  /* Stack underflow has higher priority */
--  if ( st0_tag == TAG_Empty )
--    {
--      FPU_stack_underflow();  /* Puts a QNaN in st(0) */
--      if ( control_word & CW_Invalid )
--	{
--	  st_new_ptr = &st(-1);
--	  push();
--	  FPU_stack_underflow();  /* Puts a QNaN in the new st(0) */
-+	FPU_REG *st_new_ptr;
-+	FPU_REG arg;
-+	u_char tag;
-+
-+	/* Stack underflow has higher priority */
-+	if (st0_tag == TAG_Empty) {
-+		FPU_stack_underflow();	/* Puts a QNaN in st(0) */
-+		if (control_word & CW_Invalid) {
-+			st_new_ptr = &st(-1);
-+			push();
-+			FPU_stack_underflow();	/* Puts a QNaN in the new st(0) */
-+		}
-+		return;
- 	}
--      return;
--    }
--
--  if ( STACK_OVERFLOW )
--    { FPU_stack_overflow(); return; }
+-	int i;
+ 	pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
+ 
+-	if (PTRS_PER_PMD == 1 || !pgd)
+-		return pgd;
++	mm->pgd = pgd;		/* so that alloc_pd can use it */
+ 
+- 	for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
+-		pmd_t *pmd = pmd_cache_alloc(i);
 -
--  if ( st0_tag == TAG_Special )
--    tag = FPU_Special(st0_ptr);
--  else
--    tag = st0_tag;
+-		if (!pmd)
+-			goto out_oom;
 -
--  if ( tag == TW_NaN )
--    {
--      single_arg_2_error(st0_ptr, TW_NaN);
--      return;
--    }
--  else if ( tag == TW_Infinity )
--    {
--      /* The 80486 treats infinity as an invalid operand */
--      if ( arith_invalid(0) >= 0 )
--	{
--	  /* Masked response */
--	  push();
--	  arith_invalid(0);
-+
-+	if (STACK_OVERFLOW) {
-+		FPU_stack_overflow();
-+		return;
+-		paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
+-		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
++	if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
++		quicklist_free(0, pgd_dtor, pgd);
++		pgd = NULL;
  	}
--      return;
--    }
--
--  reg_copy(st0_ptr, &arg);
--  if ( !fsin(st0_ptr, st0_tag) )
--    {
--      push();
--      FPU_copy_to_reg0(&arg, st0_tag);
--      f_cos(&st(0), st0_tag);
--    }
--  else
--    {
--      /* An error, so restore st(0) */
--      FPU_copy_to_reg0(&arg, st0_tag);
--    }
--}
+-	return pgd;
  
-+	if (st0_tag == TAG_Special)
-+		tag = FPU_Special(st0_ptr);
-+	else
-+		tag = st0_tag;
-+
-+	if (tag == TW_NaN) {
-+		single_arg_2_error(st0_ptr, TW_NaN);
-+		return;
-+	} else if (tag == TW_Infinity) {
-+		/* The 80486 treats infinity as an invalid operand */
-+		if (arith_invalid(0) >= 0) {
-+			/* Masked response */
-+			push();
-+			arith_invalid(0);
-+		}
-+		return;
-+	}
-+
-+	reg_copy(st0_ptr, &arg);
-+	if (!fsin(st0_ptr, st0_tag)) {
-+		push();
-+		FPU_copy_to_reg0(&arg, st0_tag);
-+		f_cos(&st(0), st0_tag);
-+	} else {
-+		/* An error, so restore st(0) */
-+		FPU_copy_to_reg0(&arg, st0_tag);
-+	}
-+}
+-out_oom:
+-	for (i--; i >= 0; i--) {
+-		pgd_t pgdent = pgd[i];
+-		void* pmd = (void *)__va(pgd_val(pgdent)-1);
+-		paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
+-		pmd_cache_free(pmd, i);
+-	}
+-	quicklist_free(0, pgd_dtor, pgd);
+-	return NULL;
++	return pgd;
+ }
  
- /*---------------------------------------------------------------------------*/
- /* The following all require two arguments: st(0) and st(1) */
-@@ -826,1020 +743,901 @@ static void fsincos(FPU_REG *st0_ptr, u_char st0_tag)
-    result must be zero.
-  */
- static void rem_kernel(unsigned long long st0, unsigned long long *y,
--		       unsigned long long st1,
--		       unsigned long long q, int n)
-+		       unsigned long long st1, unsigned long long q, int n)
+ void pgd_free(pgd_t *pgd)
  {
--  int dummy;
--  unsigned long long x;
--
--  x = st0 << n;
--
--  /* Do the required multiplication and subtraction in the one operation */
--
--  /* lsw x -= lsw st1 * lsw q */
--  asm volatile ("mull %4; subl %%eax,%0; sbbl %%edx,%1"
--		:"=m" (((unsigned *)&x)[0]), "=m" (((unsigned *)&x)[1]),
--		"=a" (dummy)
--		:"2" (((unsigned *)&st1)[0]), "m" (((unsigned *)&q)[0])
--		:"%dx");
--  /* msw x -= msw st1 * lsw q */
--  asm volatile ("mull %3; subl %%eax,%0"
--		:"=m" (((unsigned *)&x)[1]), "=a" (dummy)
--		:"1" (((unsigned *)&st1)[1]), "m" (((unsigned *)&q)[0])
--		:"%dx");
--  /* msw x -= lsw st1 * msw q */
--  asm volatile ("mull %3; subl %%eax,%0"
--		:"=m" (((unsigned *)&x)[1]), "=a" (dummy)
--		:"1" (((unsigned *)&st1)[0]), "m" (((unsigned *)&q)[1])
--		:"%dx");
+-	int i;
 -
--  *y = x;
-+	int dummy;
-+	unsigned long long x;
-+
-+	x = st0 << n;
-+
-+	/* Do the required multiplication and subtraction in the one operation */
-+
-+	/* lsw x -= lsw st1 * lsw q */
-+	asm volatile ("mull %4; subl %%eax,%0; sbbl %%edx,%1":"=m"
-+		      (((unsigned *)&x)[0]), "=m"(((unsigned *)&x)[1]),
-+		      "=a"(dummy)
-+		      :"2"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[0])
-+		      :"%dx");
-+	/* msw x -= msw st1 * lsw q */
-+	asm volatile ("mull %3; subl %%eax,%0":"=m" (((unsigned *)&x)[1]),
-+		      "=a"(dummy)
-+		      :"1"(((unsigned *)&st1)[1]), "m"(((unsigned *)&q)[0])
-+		      :"%dx");
-+	/* msw x -= lsw st1 * msw q */
-+	asm volatile ("mull %3; subl %%eax,%0":"=m" (((unsigned *)&x)[1]),
-+		      "=a"(dummy)
-+		      :"1"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[1])
-+		      :"%dx");
-+
-+	*y = x;
+-	/* in the PAE case user pgd entries are overwritten before usage */
+-	if (PTRS_PER_PMD > 1)
+-		for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
+-			pgd_t pgdent = pgd[i];
+-			void* pmd = (void *)__va(pgd_val(pgdent)-1);
+-			paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
+-			pmd_cache_free(pmd, i);
+-		}
+-	/* in the non-PAE case, free_pgtables() clears user pgd entries */
++	pgd_mop_up_pmds(pgd);
+ 	quicklist_free(0, pgd_dtor, pgd);
  }
  
--
- /* Remainder of st(0) / st(1) */
- /* This routine produces exact results, i.e. there is never any
-    rounding or truncation, etc of the result. */
- static void do_fprem(FPU_REG *st0_ptr, u_char st0_tag, int round)
+@@ -372,4 +376,3 @@ void check_pgt_cache(void)
  {
--  FPU_REG *st1_ptr = &st(1);
--  u_char st1_tag = FPU_gettagi(1);
--
--  if ( !((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid)) )
--    {
--      FPU_REG tmp, st0, st1;
--      u_char st0_sign, st1_sign;
--      u_char tmptag;
--      int tag;
--      int old_cw;
--      int expdif;
--      long long q;
--      unsigned short saved_status;
--      int cc;
--
--    fprem_valid:
--      /* Convert registers for internal use. */
--      st0_sign = FPU_to_exp16(st0_ptr, &st0);
--      st1_sign = FPU_to_exp16(st1_ptr, &st1);
--      expdif = exponent16(&st0) - exponent16(&st1);
--
--      old_cw = control_word;
--      cc = 0;
--
--      /* We want the status following the denorm tests, but don't want
--	 the status changed by the arithmetic operations. */
--      saved_status = partial_status;
--      control_word &= ~CW_RC;
--      control_word |= RC_CHOP;
--
--      if ( expdif < 64 )
--	{
--	  /* This should be the most common case */
--
--	  if ( expdif > -2 )
--	    {
--	      u_char sign = st0_sign ^ st1_sign;
--	      tag = FPU_u_div(&st0, &st1, &tmp,
--			      PR_64_BITS | RC_CHOP | 0x3f,
--			      sign);
--	      setsign(&tmp, sign);
--
--	      if ( exponent(&tmp) >= 0 )
--		{
--		  FPU_round_to_int(&tmp, tag);  /* Fortunately, this can't
--						   overflow to 2^64 */
--		  q = significand(&tmp);
--
--		  rem_kernel(significand(&st0),
--			     &significand(&tmp),
--			     significand(&st1),
--			     q, expdif);
--
--		  setexponent16(&tmp, exponent16(&st1));
--		}
--	      else
--		{
--		  reg_copy(&st0, &tmp);
--		  q = 0;
--		}
+ 	quicklist_trim(0, pgd_dtor, 25, 16);
+ }
 -
--	      if ( (round == RC_RND) && (tmp.sigh & 0xc0000000) )
--		{
--		  /* We may need to subtract st(1) once more,
--		     to get a result <= 1/2 of st(1). */
--		  unsigned long long x;
--		  expdif = exponent16(&st1) - exponent16(&tmp);
--		  if ( expdif <= 1 )
--		    {
--		      if ( expdif == 0 )
--			x = significand(&st1) - significand(&tmp);
--		      else /* expdif is 1 */
--			x = (significand(&st1) << 1) - significand(&tmp);
--		      if ( (x < significand(&tmp)) ||
--			  /* or equi-distant (from 0 & st(1)) and q is odd */
--			  ((x == significand(&tmp)) && (q & 1) ) )
--			{
--			  st0_sign = ! st0_sign;
--			  significand(&tmp) = x;
--			  q++;
-+	FPU_REG *st1_ptr = &st(1);
-+	u_char st1_tag = FPU_gettagi(1);
-+
-+	if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
-+		FPU_REG tmp, st0, st1;
-+		u_char st0_sign, st1_sign;
-+		u_char tmptag;
-+		int tag;
-+		int old_cw;
-+		int expdif;
-+		long long q;
-+		unsigned short saved_status;
-+		int cc;
-+
-+	      fprem_valid:
-+		/* Convert registers for internal use. */
-+		st0_sign = FPU_to_exp16(st0_ptr, &st0);
-+		st1_sign = FPU_to_exp16(st1_ptr, &st1);
-+		expdif = exponent16(&st0) - exponent16(&st1);
-+
-+		old_cw = control_word;
-+		cc = 0;
-+
-+		/* We want the status following the denorm tests, but don't want
-+		   the status changed by the arithmetic operations. */
-+		saved_status = partial_status;
-+		control_word &= ~CW_RC;
-+		control_word |= RC_CHOP;
-+
-+		if (expdif < 64) {
-+			/* This should be the most common case */
-+
-+			if (expdif > -2) {
-+				u_char sign = st0_sign ^ st1_sign;
-+				tag = FPU_u_div(&st0, &st1, &tmp,
-+						PR_64_BITS | RC_CHOP | 0x3f,
-+						sign);
-+				setsign(&tmp, sign);
-+
-+				if (exponent(&tmp) >= 0) {
-+					FPU_round_to_int(&tmp, tag);	/* Fortunately, this can't
-+									   overflow to 2^64 */
-+					q = significand(&tmp);
-+
-+					rem_kernel(significand(&st0),
-+						   &significand(&tmp),
-+						   significand(&st1),
-+						   q, expdif);
-+
-+					setexponent16(&tmp, exponent16(&st1));
-+				} else {
-+					reg_copy(&st0, &tmp);
-+					q = 0;
-+				}
-+
-+				if ((round == RC_RND)
-+				    && (tmp.sigh & 0xc0000000)) {
-+					/* We may need to subtract st(1) once more,
-+					   to get a result <= 1/2 of st(1). */
-+					unsigned long long x;
-+					expdif =
-+					    exponent16(&st1) - exponent16(&tmp);
-+					if (expdif <= 1) {
-+						if (expdif == 0)
-+							x = significand(&st1) -
-+							    significand(&tmp);
-+						else	/* expdif is 1 */
-+							x = (significand(&st1)
-+							     << 1) -
-+							    significand(&tmp);
-+						if ((x < significand(&tmp)) ||
-+						    /* or equi-distant (from 0 & st(1)) and q is odd */
-+						    ((x == significand(&tmp))
-+						     && (q & 1))) {
-+							st0_sign = !st0_sign;
-+							significand(&tmp) = x;
-+							q++;
-+						}
-+					}
-+				}
+diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
+index ea85172..65416f8 100644
+--- a/arch/x86/mm/srat_64.c
++++ b/arch/x86/mm/srat_64.c
+@@ -130,6 +130,9 @@ void __init
+ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+ {
+ 	int pxm, node;
++	int apic_id;
 +
-+				if (q & 4)
-+					cc |= SW_C0;
-+				if (q & 2)
-+					cc |= SW_C3;
-+				if (q & 1)
-+					cc |= SW_C1;
-+			} else {
-+				control_word = old_cw;
-+				setcc(0);
-+				return;
- 			}
--		    }
--		}
--
--	      if (q & 4) cc |= SW_C0;
--	      if (q & 2) cc |= SW_C3;
--	      if (q & 1) cc |= SW_C1;
--	    }
--	  else
--	    {
--	      control_word = old_cw;
--	      setcc(0);
--	      return;
--	    }
--	}
--      else
--	{
--	  /* There is a large exponent difference ( >= 64 ) */
--	  /* To make much sense, the code in this section should
--	     be done at high precision. */
--	  int exp_1, N;
--	  u_char sign;
--
--	  /* prevent overflow here */
--	  /* N is 'a number between 32 and 63' (p26-113) */
--	  reg_copy(&st0, &tmp);
--	  tmptag = st0_tag;
--	  N = (expdif & 0x0000001f) + 32;  /* This choice gives results
--					      identical to an AMD 486 */
--	  setexponent16(&tmp, N);
--	  exp_1 = exponent16(&st1);
--	  setexponent16(&st1, 0);
--	  expdif -= N;
--
--	  sign = getsign(&tmp) ^ st1_sign;
--	  tag = FPU_u_div(&tmp, &st1, &tmp, PR_64_BITS | RC_CHOP | 0x3f,
--			  sign);
--	  setsign(&tmp, sign);
--
--	  FPU_round_to_int(&tmp, tag);  /* Fortunately, this can't
--					   overflow to 2^64 */
++	apic_id = pa->apic_id;
+ 	if (srat_disabled())
+ 		return;
+ 	if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
+@@ -145,68 +148,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+ 		bad_srat();
+ 		return;
+ 	}
+-	apicid_to_node[pa->apic_id] = node;
++	apicid_to_node[apic_id] = node;
+ 	acpi_numa = 1;
+ 	printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
+-	       pxm, pa->apic_id, node);
+-}
 -
--	  rem_kernel(significand(&st0),
--		     &significand(&tmp),
--		     significand(&st1),
--		     significand(&tmp),
--		     exponent(&tmp)
--		     ); 
--	  setexponent16(&tmp, exp_1 + expdif);
+-#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
+-/*
+- * Protect against too large hotadd areas that would fill up memory.
+- */
+-static int hotadd_enough_memory(struct bootnode *nd)
+-{
+-	static unsigned long allocated;
+-	static unsigned long last_area_end;
+-	unsigned long pages = (nd->end - nd->start) >> PAGE_SHIFT;
+-	long mem = pages * sizeof(struct page);
+-	unsigned long addr;
+-	unsigned long allowed;
+-	unsigned long oldpages = pages;
 -
--	  /* It is possible for the operation to be complete here.
--	     What does the IEEE standard say? The Intel 80486 manual
--	     implies that the operation will never be completed at this
--	     point, and the behaviour of a real 80486 confirms this.
--	   */
--	  if ( !(tmp.sigh | tmp.sigl) )
--	    {
--	      /* The result is zero */
--	      control_word = old_cw;
--	      partial_status = saved_status;
--	      FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
--	      setsign(&st0, st0_sign);
-+		} else {
-+			/* There is a large exponent difference ( >= 64 ) */
-+			/* To make much sense, the code in this section should
-+			   be done at high precision. */
-+			int exp_1, N;
-+			u_char sign;
-+
-+			/* prevent overflow here */
-+			/* N is 'a number between 32 and 63' (p26-113) */
-+			reg_copy(&st0, &tmp);
-+			tmptag = st0_tag;
-+			N = (expdif & 0x0000001f) + 32;	/* This choice gives results
-+							   identical to an AMD 486 */
-+			setexponent16(&tmp, N);
-+			exp_1 = exponent16(&st1);
-+			setexponent16(&st1, 0);
-+			expdif -= N;
-+
-+			sign = getsign(&tmp) ^ st1_sign;
-+			tag =
-+			    FPU_u_div(&tmp, &st1, &tmp,
-+				      PR_64_BITS | RC_CHOP | 0x3f, sign);
-+			setsign(&tmp, sign);
-+
-+			FPU_round_to_int(&tmp, tag);	/* Fortunately, this can't
-+							   overflow to 2^64 */
-+
-+			rem_kernel(significand(&st0),
-+				   &significand(&tmp),
-+				   significand(&st1),
-+				   significand(&tmp), exponent(&tmp)
-+			    );
-+			setexponent16(&tmp, exp_1 + expdif);
-+
-+			/* It is possible for the operation to be complete here.
-+			   What does the IEEE standard say? The Intel 80486 manual
-+			   implies that the operation will never be completed at this
-+			   point, and the behaviour of a real 80486 confirms this.
-+			 */
-+			if (!(tmp.sigh | tmp.sigl)) {
-+				/* The result is zero */
-+				control_word = old_cw;
-+				partial_status = saved_status;
-+				FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
-+				setsign(&st0, st0_sign);
- #ifdef PECULIAR_486
--	      setcc(SW_C2);
-+				setcc(SW_C2);
- #else
--	      setcc(0);
-+				setcc(0);
- #endif /* PECULIAR_486 */
--	      return;
--	    }
--	  cc = SW_C2;
+-	if (mem < 0)
+-		return 0;
+-	allowed = (end_pfn - absent_pages_in_range(0, end_pfn)) * PAGE_SIZE;
+-	allowed = (allowed / 100) * hotadd_percent;
+-	if (allocated + mem > allowed) {
+-		unsigned long range;
+-		/* Give them at least part of their hotadd memory upto hotadd_percent
+-		   It would be better to spread the limit out
+-		   over multiple hotplug areas, but that is too complicated
+-		   right now */
+-		if (allocated >= allowed)
+-			return 0;
+-		range = allowed - allocated;
+-		pages = (range / PAGE_SIZE);
+-		mem = pages * sizeof(struct page);
+-		nd->end = nd->start + range;
 -	}
-+				return;
-+			}
-+			cc = SW_C2;
-+		}
- 
--      control_word = old_cw;
--      partial_status = saved_status;
--      tag = FPU_normalize_nuo(&tmp);
--      reg_copy(&tmp, st0_ptr);
+-	/* Not completely fool proof, but a good sanity check */
+-	addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
+-	if (addr == -1UL)
+-		return 0;
+-	if (pages != oldpages)
+-		printk(KERN_NOTICE "SRAT: Hotadd area limited to %lu bytes\n",
+-			pages << PAGE_SHIFT);
+-	last_area_end = addr + mem;
+-	allocated += mem;
+-	return 1;
+-}
 -
--      /* The only condition to be looked for is underflow,
--	 and it can occur here only if underflow is unmasked. */
--      if ( (exponent16(&tmp) <= EXP_UNDER) && (tag != TAG_Zero)
--	  && !(control_word & CW_Underflow) )
--	{
--	  setcc(cc);
--	  tag = arith_underflow(st0_ptr);
--	  setsign(st0_ptr, st0_sign);
--	  FPU_settag0(tag);
--	  return;
--	}
--      else if ( (exponent16(&tmp) > EXP_UNDER) || (tag == TAG_Zero) )
--	{
--	  stdexp(st0_ptr);
--	  setsign(st0_ptr, st0_sign);
--	}
--      else
--	{
--	  tag = FPU_round(st0_ptr, 0, 0, FULL_PRECISION, st0_sign);
--	}
--      FPU_settag0(tag);
--      setcc(cc);
-+		control_word = old_cw;
-+		partial_status = saved_status;
-+		tag = FPU_normalize_nuo(&tmp);
-+		reg_copy(&tmp, st0_ptr);
-+
-+		/* The only condition to be looked for is underflow,
-+		   and it can occur here only if underflow is unmasked. */
-+		if ((exponent16(&tmp) <= EXP_UNDER) && (tag != TAG_Zero)
-+		    && !(control_word & CW_Underflow)) {
-+			setcc(cc);
-+			tag = arith_underflow(st0_ptr);
-+			setsign(st0_ptr, st0_sign);
-+			FPU_settag0(tag);
-+			return;
-+		} else if ((exponent16(&tmp) > EXP_UNDER) || (tag == TAG_Zero)) {
-+			stdexp(st0_ptr);
-+			setsign(st0_ptr, st0_sign);
-+		} else {
-+			tag =
-+			    FPU_round(st0_ptr, 0, 0, FULL_PRECISION, st0_sign);
-+		}
-+		FPU_settag0(tag);
-+		setcc(cc);
+-static int update_end_of_memory(unsigned long end)
+-{
+-	found_add_area = 1;
+-	if ((end >> PAGE_SHIFT) > end_pfn)
+-		end_pfn = end >> PAGE_SHIFT;
+-	return 1;
++	       pxm, apic_id, node);
+ }
  
--      return;
--    }
-+		return;
-+	}
+-static inline int save_add_info(void)
+-{
+-	return hotadd_percent > 0;
+-}
+-#else
+ int update_end_of_memory(unsigned long end) {return -1;}
+ static int hotadd_enough_memory(struct bootnode *nd) {return 1;}
+ #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+@@ -214,10 +161,9 @@ static inline int save_add_info(void) {return 1;}
+ #else
+ static inline int save_add_info(void) {return 0;}
+ #endif
+-#endif
+ /*
+  * Update nodes_add and decide if to include add are in the zone.
+- * Both SPARSE and RESERVE need nodes_add infomation.
++ * Both SPARSE and RESERVE need nodes_add information.
+  * This code supports one contiguous hot add area per node.
+  */
+ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
+@@ -377,7 +323,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
+ 	return 1;
+ }
  
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
--  if ( st1_tag == TAG_Special )
--    st1_tag = FPU_Special(st1_ptr);
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
-+	if (st1_tag == TAG_Special)
-+		st1_tag = FPU_Special(st1_ptr);
+-static void unparse_node(int node)
++static void __init unparse_node(int node)
+ {
+ 	int i;
+ 	node_clear(node, nodes_parsed);
+@@ -400,7 +346,12 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
+ 	/* First clean up the node list */
+ 	for (i = 0; i < MAX_NUMNODES; i++) {
+ 		cutoff_node(i, start, end);
+-		if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
++		/*
++		 * don't confuse VM with a node that doesn't have the
++		 * minimum memory.
++		 */
++		if (nodes[i].end &&
++			(nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
+ 			unparse_node(i);
+ 			node_set_offline(i);
+ 		}
+@@ -431,9 +382,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
+ 			setup_node_bootmem(i, nodes[i].start, nodes[i].end);
  
--  if ( ((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
-+	if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
- 	    || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
--	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal)) )
--    {
--      if ( denormal_operand() < 0 )
--	return;
--      goto fprem_valid;
--    }
--  else if ( (st0_tag == TAG_Empty) || (st1_tag == TAG_Empty) )
--    {
--      FPU_stack_underflow();
--      return;
--    }
--  else if ( st0_tag == TAG_Zero )
--    {
--      if ( st1_tag == TAG_Valid )
--	{
--	  setcc(0); return;
--	}
--      else if ( st1_tag == TW_Denormal )
--	{
--	  if ( denormal_operand() < 0 )
--	    return;
--	  setcc(0); return;
--	}
--      else if ( st1_tag == TAG_Zero )
--	{ arith_invalid(0); return; } /* fprem(?,0) always invalid */
--      else if ( st1_tag == TW_Infinity )
--	{ setcc(0); return; }
--    }
--  else if ( (st0_tag == TAG_Valid) || (st0_tag == TW_Denormal) )
--    {
--      if ( st1_tag == TAG_Zero )
--	{
--	  arith_invalid(0); /* fprem(Valid,Zero) is invalid */
--	  return;
--	}
--      else if ( st1_tag != TW_NaN )
--	{
--	  if ( ((st0_tag == TW_Denormal) || (st1_tag == TW_Denormal))
--	       && (denormal_operand() < 0) )
--	    return;
--
--	  if ( st1_tag == TW_Infinity )
--	    {
--	      /* fprem(Valid,Infinity) is o.k. */
--	      setcc(0); return;
--	    }
--	}
--    }
--  else if ( st0_tag == TW_Infinity )
--    {
--      if ( st1_tag != TW_NaN )
--	{
--	  arith_invalid(0); /* fprem(Infinity,?) is invalid */
--	  return;
-+	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
-+		if (denormal_operand() < 0)
-+			return;
-+		goto fprem_valid;
-+	} else if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
-+		FPU_stack_underflow();
-+		return;
-+	} else if (st0_tag == TAG_Zero) {
-+		if (st1_tag == TAG_Valid) {
-+			setcc(0);
-+			return;
-+		} else if (st1_tag == TW_Denormal) {
-+			if (denormal_operand() < 0)
-+				return;
-+			setcc(0);
-+			return;
-+		} else if (st1_tag == TAG_Zero) {
-+			arith_invalid(0);
-+			return;
-+		} /* fprem(?,0) always invalid */
-+		else if (st1_tag == TW_Infinity) {
-+			setcc(0);
-+			return;
-+		}
-+	} else if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
-+		if (st1_tag == TAG_Zero) {
-+			arith_invalid(0);	/* fprem(Valid,Zero) is invalid */
-+			return;
-+		} else if (st1_tag != TW_NaN) {
-+			if (((st0_tag == TW_Denormal)
-+			     || (st1_tag == TW_Denormal))
-+			    && (denormal_operand() < 0))
-+				return;
+ 	for (i = 0; i < NR_CPUS; i++) {
+-		if (cpu_to_node(i) == NUMA_NO_NODE)
++		int node = early_cpu_to_node(i);
 +
-+			if (st1_tag == TW_Infinity) {
-+				/* fprem(Valid,Infinity) is o.k. */
-+				setcc(0);
-+				return;
-+			}
-+		}
-+	} else if (st0_tag == TW_Infinity) {
-+		if (st1_tag != TW_NaN) {
-+			arith_invalid(0);	/* fprem(Infinity,?) is invalid */
-+			return;
-+		}
++		if (node == NUMA_NO_NODE)
+ 			continue;
+-		if (!node_isset(cpu_to_node(i), node_possible_map))
++		if (!node_isset(node, node_possible_map))
+ 			numa_set_node(i, NUMA_NO_NODE);
  	}
--    }
+ 	numa_init_array();
+@@ -441,6 +394,12 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
+ }
  
--  /* One of the registers must contain a NaN if we got here. */
-+	/* One of the registers must contain a NaN if we got here. */
+ #ifdef CONFIG_NUMA_EMU
++static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
++	[0 ... MAX_NUMNODES-1] = PXM_INVAL
++};
++static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
++	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
++};
+ static int __init find_node_by_addr(unsigned long addr)
+ {
+ 	int ret = NUMA_NO_NODE;
+@@ -457,7 +416,7 @@ static int __init find_node_by_addr(unsigned long addr)
+ 			break;
+ 		}
+ 	}
+-	return i;
++	return ret;
+ }
  
- #ifdef PARANOID
--  if ( (st0_tag != TW_NaN) && (st1_tag != TW_NaN) )
--      EXCEPTION(EX_INTERNAL | 0x118);
-+	if ((st0_tag != TW_NaN) && (st1_tag != TW_NaN))
-+		EXCEPTION(EX_INTERNAL | 0x118);
- #endif /* PARANOID */
+ /*
+@@ -471,12 +430,6 @@ static int __init find_node_by_addr(unsigned long addr)
+ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
+ {
+ 	int i, j;
+-	int fake_node_to_pxm_map[MAX_NUMNODES] = {
+-		[0 ... MAX_NUMNODES-1] = PXM_INVAL
+-	};
+-	unsigned char fake_apicid_to_node[MAX_LOCAL_APIC] = {
+-		[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+-	};
  
--  real_2op_NaN(st1_ptr, st1_tag, 0, st1_ptr);
-+	real_2op_NaN(st1_ptr, st1_tag, 0, st1_ptr);
+ 	printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
+ 			 "topology.\n");
+diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
+index 0ed046a..e2095cb 100644
+--- a/arch/x86/oprofile/backtrace.c
++++ b/arch/x86/oprofile/backtrace.c
+@@ -32,7 +32,7 @@ static int backtrace_stack(void *data, char *name)
+ 	return 0;
+ }
+ 
+-static void backtrace_address(void *data, unsigned long addr)
++static void backtrace_address(void *data, unsigned long addr, int reliable)
+ {
+ 	unsigned int *depth = data;
+ 
+@@ -48,7 +48,7 @@ static struct stacktrace_ops backtrace_ops = {
+ };
+ 
+ struct frame_head {
+-	struct frame_head *ebp;
++	struct frame_head *bp;
+ 	unsigned long ret;
+ } __attribute__((packed));
+ 
+@@ -67,21 +67,21 @@ dump_user_backtrace(struct frame_head * head)
+ 
+ 	/* frame pointers should strictly progress back up the stack
+ 	 * (towards higher addresses) */
+-	if (head >= bufhead[0].ebp)
++	if (head >= bufhead[0].bp)
+ 		return NULL;
+ 
+-	return bufhead[0].ebp;
++	return bufhead[0].bp;
+ }
+ 
+ void
+ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
+ {
+ 	struct frame_head *head = (struct frame_head *)frame_pointer(regs);
+-	unsigned long stack = stack_pointer(regs);
++	unsigned long stack = kernel_trap_sp(regs);
+ 
+ 	if (!user_mode_vm(regs)) {
+ 		if (depth)
+-			dump_trace(NULL, regs, (unsigned long *)stack,
++			dump_trace(NULL, regs, (unsigned long *)stack, 0,
+ 				   &backtrace_ops, &depth);
+ 		return;
+ 	}
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index 944bbcd..1f11cf0 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -18,11 +18,11 @@
+ #include <asm/nmi.h>
+ #include <asm/msr.h>
+ #include <asm/apic.h>
+- 
++
+ #include "op_counter.h"
+ #include "op_x86_model.h"
+ 
+-static struct op_x86_model_spec const * model;
++static struct op_x86_model_spec const *model;
+ static struct op_msrs cpu_msrs[NR_CPUS];
+ static unsigned long saved_lvtpc[NR_CPUS];
  
+@@ -41,7 +41,6 @@ static int nmi_suspend(struct sys_device *dev, pm_message_t state)
+ 	return 0;
  }
  
 -
- /* ST(1) <- ST(1) * log ST;  pop ST */
- static void fyl2x(FPU_REG *st0_ptr, u_char st0_tag)
+ static int nmi_resume(struct sys_device *dev)
  {
--  FPU_REG *st1_ptr = &st(1), exponent;
--  u_char st1_tag = FPU_gettagi(1);
--  u_char sign;
--  int e, tag;
+ 	if (nmi_enabled == 1)
+@@ -49,29 +48,27 @@ static int nmi_resume(struct sys_device *dev)
+ 	return 0;
+ }
+ 
 -
--  clear_C1();
+ static struct sysdev_class oprofile_sysclass = {
+-	set_kset_name("oprofile"),
++	.name		= "oprofile",
+ 	.resume		= nmi_resume,
+ 	.suspend	= nmi_suspend,
+ };
+ 
 -
--  if ( (st0_tag == TAG_Valid) && (st1_tag == TAG_Valid) )
--    {
--    both_valid:
--      /* Both regs are Valid or Denormal */
--      if ( signpositive(st0_ptr) )
--	{
--	  if ( st0_tag == TW_Denormal )
--	    FPU_to_exp16(st0_ptr, st0_ptr);
--	  else
--	    /* Convert st(0) for internal use. */
--	    setexponent16(st0_ptr, exponent(st0_ptr));
+ static struct sys_device device_oprofile = {
+ 	.id	= 0,
+ 	.cls	= &oprofile_sysclass,
+ };
+ 
 -
--	  if ( (st0_ptr->sigh == 0x80000000) && (st0_ptr->sigl == 0) )
--	    {
--	      /* Special case. The result can be precise. */
--	      u_char esign;
--	      e = exponent16(st0_ptr);
--	      if ( e >= 0 )
--		{
--		  exponent.sigh = e;
--		  esign = SIGN_POS;
--		}
--	      else
--		{
--		  exponent.sigh = -e;
--		  esign = SIGN_NEG;
-+	FPU_REG *st1_ptr = &st(1), exponent;
-+	u_char st1_tag = FPU_gettagi(1);
-+	u_char sign;
-+	int e, tag;
-+
-+	clear_C1();
-+
-+	if ((st0_tag == TAG_Valid) && (st1_tag == TAG_Valid)) {
-+	      both_valid:
-+		/* Both regs are Valid or Denormal */
-+		if (signpositive(st0_ptr)) {
-+			if (st0_tag == TW_Denormal)
-+				FPU_to_exp16(st0_ptr, st0_ptr);
-+			else
-+				/* Convert st(0) for internal use. */
-+				setexponent16(st0_ptr, exponent(st0_ptr));
+ static int __init init_sysfs(void)
+ {
+ 	int error;
+-	if (!(error = sysdev_class_register(&oprofile_sysclass)))
 +
-+			if ((st0_ptr->sigh == 0x80000000)
-+			    && (st0_ptr->sigl == 0)) {
-+				/* Special case. The result can be precise. */
-+				u_char esign;
-+				e = exponent16(st0_ptr);
-+				if (e >= 0) {
-+					exponent.sigh = e;
-+					esign = SIGN_POS;
-+				} else {
-+					exponent.sigh = -e;
-+					esign = SIGN_NEG;
-+				}
-+				exponent.sigl = 0;
-+				setexponent16(&exponent, 31);
-+				tag = FPU_normalize_nuo(&exponent);
-+				stdexp(&exponent);
-+				setsign(&exponent, esign);
-+				tag =
-+				    FPU_mul(&exponent, tag, 1, FULL_PRECISION);
-+				if (tag >= 0)
-+					FPU_settagi(1, tag);
-+			} else {
-+				/* The usual case */
-+				sign = getsign(st1_ptr);
-+				if (st1_tag == TW_Denormal)
-+					FPU_to_exp16(st1_ptr, st1_ptr);
-+				else
-+					/* Convert st(1) for internal use. */
-+					setexponent16(st1_ptr,
-+						      exponent(st1_ptr));
-+				poly_l2(st0_ptr, st1_ptr, sign);
-+			}
-+		} else {
-+			/* negative */
-+			if (arith_invalid(1) < 0)
-+				return;
++	error = sysdev_class_register(&oprofile_sysclass);
++	if (!error)
+ 		error = sysdev_register(&device_oprofile);
+ 	return error;
+ }
+ 
+-
+ static void exit_sysfs(void)
+ {
+ 	sysdev_unregister(&device_oprofile);
+@@ -90,7 +87,7 @@ static int profile_exceptions_notify(struct notifier_block *self,
+ 	int ret = NOTIFY_DONE;
+ 	int cpu = smp_processor_id();
+ 
+-	switch(val) {
++	switch (val) {
+ 	case DIE_NMI:
+ 		if (model->check_ctrs(args->regs, &cpu_msrs[cpu]))
+ 			ret = NOTIFY_STOP;
+@@ -101,24 +98,24 @@ static int profile_exceptions_notify(struct notifier_block *self,
+ 	return ret;
+ }
+ 
+-static void nmi_cpu_save_registers(struct op_msrs * msrs)
++static void nmi_cpu_save_registers(struct op_msrs *msrs)
+ {
+ 	unsigned int const nr_ctrs = model->num_counters;
+-	unsigned int const nr_ctrls = model->num_controls; 
+-	struct op_msr * counters = msrs->counters;
+-	struct op_msr * controls = msrs->controls;
++	unsigned int const nr_ctrls = model->num_controls;
++	struct op_msr *counters = msrs->counters;
++	struct op_msr *controls = msrs->controls;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < nr_ctrs; ++i) {
+-		if (counters[i].addr){
++		if (counters[i].addr) {
+ 			rdmsr(counters[i].addr,
+ 				counters[i].saved.low,
+ 				counters[i].saved.high);
  		}
--	      exponent.sigl = 0;
--	      setexponent16(&exponent, 31);
--	      tag = FPU_normalize_nuo(&exponent);
--	      stdexp(&exponent);
--	      setsign(&exponent, esign);
--	      tag = FPU_mul(&exponent, tag, 1, FULL_PRECISION);
--	      if ( tag >= 0 )
--		FPU_settagi(1, tag);
--	    }
--	  else
--	    {
--	      /* The usual case */
--	      sign = getsign(st1_ptr);
--	      if ( st1_tag == TW_Denormal )
--		FPU_to_exp16(st1_ptr, st1_ptr);
--	      else
--		/* Convert st(1) for internal use. */
--		setexponent16(st1_ptr, exponent(st1_ptr));
--	      poly_l2(st0_ptr, st1_ptr, sign);
--	    }
--	}
--      else
--	{
--	  /* negative */
--	  if ( arith_invalid(1) < 0 )
--	    return;
--	}
+ 	}
+- 
++
+ 	for (i = 0; i < nr_ctrls; ++i) {
+-		if (controls[i].addr){
++		if (controls[i].addr) {
+ 			rdmsr(controls[i].addr,
+ 				controls[i].saved.low,
+ 				controls[i].saved.high);
+@@ -126,15 +123,13 @@ static void nmi_cpu_save_registers(struct op_msrs * msrs)
+ 	}
+ }
  
--      FPU_pop();
 -
--      return;
--    }
+-static void nmi_save_registers(void * dummy)
++static void nmi_save_registers(void *dummy)
+ {
+ 	int cpu = smp_processor_id();
+-	struct op_msrs * msrs = &cpu_msrs[cpu];
++	struct op_msrs *msrs = &cpu_msrs[cpu];
+ 	nmi_cpu_save_registers(msrs);
+ }
+ 
 -
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
--  if ( st1_tag == TAG_Special )
--    st1_tag = FPU_Special(st1_ptr);
+ static void free_msrs(void)
+ {
+ 	int i;
+@@ -146,7 +141,6 @@ static void free_msrs(void)
+ 	}
+ }
+ 
 -
--  if ( (st0_tag == TAG_Empty) || (st1_tag == TAG_Empty) )
--    {
--      FPU_stack_underflow_pop(1);
--      return;
--    }
--  else if ( (st0_tag <= TW_Denormal) && (st1_tag <= TW_Denormal) )
--    {
--      if ( st0_tag == TAG_Zero )
--	{
--	  if ( st1_tag == TAG_Zero )
--	    {
--	      /* Both args zero is invalid */
--	      if ( arith_invalid(1) < 0 )
--		return;
--	    }
--	  else
--	    {
--	      u_char sign;
--	      sign = getsign(st1_ptr)^SIGN_NEG;
--	      if ( FPU_divide_by_zero(1, sign) < 0 )
--		return;
-+		FPU_pop();
+ static int allocate_msrs(void)
+ {
+ 	int success = 1;
+@@ -173,11 +167,10 @@ static int allocate_msrs(void)
+ 	return success;
+ }
  
--	      setsign(st1_ptr, sign);
--	    }
--	}
--      else if ( st1_tag == TAG_Zero )
--	{
--	  /* st(1) contains zero, st(0) valid <> 0 */
--	  /* Zero is the valid answer */
--	  sign = getsign(st1_ptr);
--	  
--	  if ( signnegative(st0_ptr) )
--	    {
--	      /* log(negative) */
--	      if ( arith_invalid(1) < 0 )
- 		return;
--	    }
--	  else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
--	  else
--	    {
--	      if ( exponent(st0_ptr) < 0 )
--		sign ^= SIGN_NEG;
 -
--	      FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
--	      setsign(st1_ptr, sign);
--	    }
+-static void nmi_cpu_setup(void * dummy)
++static void nmi_cpu_setup(void *dummy)
+ {
+ 	int cpu = smp_processor_id();
+-	struct op_msrs * msrs = &cpu_msrs[cpu];
++	struct op_msrs *msrs = &cpu_msrs[cpu];
+ 	spin_lock(&oprofilefs_lock);
+ 	model->setup_ctrs(msrs);
+ 	spin_unlock(&oprofilefs_lock);
+@@ -193,13 +186,14 @@ static struct notifier_block profile_exceptions_nb = {
+ 
+ static int nmi_setup(void)
+ {
+-	int err=0;
++	int err = 0;
+ 	int cpu;
+ 
+ 	if (!allocate_msrs())
+ 		return -ENOMEM;
+ 
+-	if ((err = register_die_notifier(&profile_exceptions_nb))){
++	err = register_die_notifier(&profile_exceptions_nb);
++	if (err) {
+ 		free_msrs();
+ 		return err;
+ 	}
+@@ -210,7 +204,7 @@ static int nmi_setup(void)
+ 
+ 	/* Assume saved/restored counters are the same on all CPUs */
+ 	model->fill_in_addresses(&cpu_msrs[0]);
+-	for_each_possible_cpu (cpu) {
++	for_each_possible_cpu(cpu) {
+ 		if (cpu != 0) {
+ 			memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters,
+ 				sizeof(struct op_msr) * model->num_counters);
+@@ -226,39 +220,37 @@ static int nmi_setup(void)
+ 	return 0;
+ }
+ 
+-
+-static void nmi_restore_registers(struct op_msrs * msrs)
++static void nmi_restore_registers(struct op_msrs *msrs)
+ {
+ 	unsigned int const nr_ctrs = model->num_counters;
+-	unsigned int const nr_ctrls = model->num_controls; 
+-	struct op_msr * counters = msrs->counters;
+-	struct op_msr * controls = msrs->controls;
++	unsigned int const nr_ctrls = model->num_controls;
++	struct op_msr *counters = msrs->counters;
++	struct op_msr *controls = msrs->controls;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < nr_ctrls; ++i) {
+-		if (controls[i].addr){
++		if (controls[i].addr) {
+ 			wrmsr(controls[i].addr,
+ 				controls[i].saved.low,
+ 				controls[i].saved.high);
+ 		}
+ 	}
+- 
++
+ 	for (i = 0; i < nr_ctrs; ++i) {
+-		if (counters[i].addr){
++		if (counters[i].addr) {
+ 			wrmsr(counters[i].addr,
+ 				counters[i].saved.low,
+ 				counters[i].saved.high);
+ 		}
  	}
--      else
--	{
--	  /* One or both operands are denormals. */
--	  if ( denormal_operand() < 0 )
--	    return;
--	  goto both_valid;
--	}
--    }
--  else if ( (st0_tag == TW_NaN) || (st1_tag == TW_NaN) )
--    {
--      if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
--	return;
--    }
--  /* One or both arg must be an infinity */
--  else if ( st0_tag == TW_Infinity )
--    {
--      if ( (signnegative(st0_ptr)) || (st1_tag == TAG_Zero) )
--	{
--	  /* log(-infinity) or 0*log(infinity) */
--	  if ( arith_invalid(1) < 0 )
--	    return;
--	}
--      else
--	{
--	  u_char sign = getsign(st1_ptr);
+ }
+- 
  
--	  if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
-+	if (st1_tag == TAG_Special)
-+		st1_tag = FPU_Special(st1_ptr);
+-static void nmi_cpu_shutdown(void * dummy)
++static void nmi_cpu_shutdown(void *dummy)
+ {
+ 	unsigned int v;
+ 	int cpu = smp_processor_id();
+-	struct op_msrs * msrs = &cpu_msrs[cpu];
+- 
++	struct op_msrs *msrs = &cpu_msrs[cpu];
++
+ 	/* restoring APIC_LVTPC can trigger an apic error because the delivery
+ 	 * mode and vector nr combination can be illegal. That's by design: on
+ 	 * power on apic lvt contain a zero vector nr which are legal only for
+@@ -271,7 +263,6 @@ static void nmi_cpu_shutdown(void * dummy)
+ 	nmi_restore_registers(msrs);
+ }
  
--	  FPU_copy_to_reg1(&CONST_INF, TAG_Special);
--	  setsign(st1_ptr, sign);
--	}
--    }
--  /* st(1) must be infinity here */
--  else if ( ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal))
--	    && ( signpositive(st0_ptr) ) )
--    {
--      if ( exponent(st0_ptr) >= 0 )
--	{
--	  if ( (exponent(st0_ptr) == 0) &&
--	      (st0_ptr->sigh == 0x80000000) &&
--	      (st0_ptr->sigl == 0) )
--	    {
--	      /* st(0) holds 1.0 */
--	      /* infinity*log(1) */
--	      if ( arith_invalid(1) < 0 )
-+	if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
-+		FPU_stack_underflow_pop(1);
- 		return;
--	    }
--	  /* else st(0) is positive and > 1.0 */
-+	} else if ((st0_tag <= TW_Denormal) && (st1_tag <= TW_Denormal)) {
-+		if (st0_tag == TAG_Zero) {
-+			if (st1_tag == TAG_Zero) {
-+				/* Both args zero is invalid */
-+				if (arith_invalid(1) < 0)
-+					return;
-+			} else {
-+				u_char sign;
-+				sign = getsign(st1_ptr) ^ SIGN_NEG;
-+				if (FPU_divide_by_zero(1, sign) < 0)
-+					return;
+- 
+ static void nmi_shutdown(void)
+ {
+ 	nmi_enabled = 0;
+@@ -281,45 +272,40 @@ static void nmi_shutdown(void)
+ 	free_msrs();
+ }
+ 
+- 
+-static void nmi_cpu_start(void * dummy)
++static void nmi_cpu_start(void *dummy)
+ {
+-	struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
++	struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()];
+ 	model->start(msrs);
+ }
+- 
+ 
+ static int nmi_start(void)
+ {
+ 	on_each_cpu(nmi_cpu_start, NULL, 0, 1);
+ 	return 0;
+ }
+- 
+- 
+-static void nmi_cpu_stop(void * dummy)
 +
-+				setsign(st1_ptr, sign);
-+			}
-+		} else if (st1_tag == TAG_Zero) {
-+			/* st(1) contains zero, st(0) valid <> 0 */
-+			/* Zero is the valid answer */
-+			sign = getsign(st1_ptr);
++static void nmi_cpu_stop(void *dummy)
+ {
+-	struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
++	struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()];
+ 	model->stop(msrs);
+ }
+- 
+- 
 +
-+			if (signnegative(st0_ptr)) {
-+				/* log(negative) */
-+				if (arith_invalid(1) < 0)
-+					return;
-+			} else if ((st0_tag == TW_Denormal)
-+				   && (denormal_operand() < 0))
-+				return;
-+			else {
-+				if (exponent(st0_ptr) < 0)
-+					sign ^= SIGN_NEG;
+ static void nmi_stop(void)
+ {
+ 	on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
+ }
+ 
+-
+ struct op_counter_config counter_config[OP_MAX_COUNTER];
+ 
+-static int nmi_create_files(struct super_block * sb, struct dentry * root)
++static int nmi_create_files(struct super_block *sb, struct dentry *root)
+ {
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < model->num_counters; ++i) {
+-		struct dentry * dir;
++		struct dentry *dir;
+ 		char buf[4];
+- 
+- 		/* quick little hack to _not_ expose a counter if it is not
 +
-+				FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
-+				setsign(st1_ptr, sign);
-+			}
-+		} else {
-+			/* One or both operands are denormals. */
-+			if (denormal_operand() < 0)
-+				return;
-+			goto both_valid;
-+		}
-+	} else if ((st0_tag == TW_NaN) || (st1_tag == TW_NaN)) {
-+		if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
-+			return;
-+	}
-+	/* One or both arg must be an infinity */
-+	else if (st0_tag == TW_Infinity) {
-+		if ((signnegative(st0_ptr)) || (st1_tag == TAG_Zero)) {
-+			/* log(-infinity) or 0*log(infinity) */
-+			if (arith_invalid(1) < 0)
-+				return;
-+		} else {
-+			u_char sign = getsign(st1_ptr);
++		/* quick little hack to _not_ expose a counter if it is not
+ 		 * available for use.  This should protect userspace app.
+ 		 * NOTE:  assumes 1:1 mapping here (that counters are organized
+ 		 *        sequentially in their struct assignment).
+@@ -329,21 +315,21 @@ static int nmi_create_files(struct super_block * sb, struct dentry * root)
+ 
+ 		snprintf(buf,  sizeof(buf), "%d", i);
+ 		dir = oprofilefs_mkdir(sb, root, buf);
+-		oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); 
+-		oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); 
+-		oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); 
+-		oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); 
+-		oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); 
+-		oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); 
++		oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
++		oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
++		oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
++		oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
++		oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
++		oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+ 	}
+ 
+ 	return 0;
+ }
+- 
 +
-+			if ((st1_tag == TW_Denormal)
-+			    && (denormal_operand() < 0))
-+				return;
+ static int p4force;
+ module_param(p4force, int, 0);
+- 
+-static int __init p4_init(char ** cpu_type)
 +
-+			FPU_copy_to_reg1(&CONST_INF, TAG_Special);
-+			setsign(st1_ptr, sign);
-+		}
++static int __init p4_init(char **cpu_type)
+ {
+ 	__u8 cpu_model = boot_cpu_data.x86_model;
+ 
+@@ -356,15 +342,15 @@ static int __init p4_init(char ** cpu_type)
+ 	return 1;
+ #else
+ 	switch (smp_num_siblings) {
+-		case 1:
+-			*cpu_type = "i386/p4";
+-			model = &op_p4_spec;
+-			return 1;
+-
+-		case 2:
+-			*cpu_type = "i386/p4-ht";
+-			model = &op_p4_ht2_spec;
+-			return 1;
++	case 1:
++		*cpu_type = "i386/p4";
++		model = &op_p4_spec;
++		return 1;
++
++	case 2:
++		*cpu_type = "i386/p4-ht";
++		model = &op_p4_ht2_spec;
++		return 1;
  	}
--      else
--	{
--	  /* st(0) is positive and < 1.0 */
-+	/* st(1) must be infinity here */
-+	else if (((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal))
-+		 && (signpositive(st0_ptr))) {
-+		if (exponent(st0_ptr) >= 0) {
-+			if ((exponent(st0_ptr) == 0) &&
-+			    (st0_ptr->sigh == 0x80000000) &&
-+			    (st0_ptr->sigl == 0)) {
-+				/* st(0) holds 1.0 */
-+				/* infinity*log(1) */
-+				if (arith_invalid(1) < 0)
-+					return;
-+			}
-+			/* else st(0) is positive and > 1.0 */
-+		} else {
-+			/* st(0) is positive and < 1.0 */
+ #endif
  
--	  if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
-+			if ((st0_tag == TW_Denormal)
-+			    && (denormal_operand() < 0))
-+				return;
+@@ -373,8 +359,7 @@ static int __init p4_init(char ** cpu_type)
+ 	return 0;
+ }
  
--	  changesign(st1_ptr);
--	}
--    }
--  else
--    {
--      /* st(0) must be zero or negative */
--      if ( st0_tag == TAG_Zero )
--	{
--	  /* This should be invalid, but a real 80486 is happy with it. */
-+			changesign(st1_ptr);
-+		}
-+	} else {
-+		/* st(0) must be zero or negative */
-+		if (st0_tag == TAG_Zero) {
-+			/* This should be invalid, but a real 80486 is happy with it. */
+-
+-static int __init ppro_init(char ** cpu_type)
++static int __init ppro_init(char **cpu_type)
+ {
+ 	__u8 cpu_model = boot_cpu_data.x86_model;
  
- #ifndef PECULIAR_486
--	  sign = getsign(st1_ptr);
--	  if ( FPU_divide_by_zero(1, sign) < 0 )
--	    return;
-+			sign = getsign(st1_ptr);
-+			if (FPU_divide_by_zero(1, sign) < 0)
-+				return;
- #endif /* PECULIAR_486 */
+@@ -409,52 +394,52 @@ int __init op_nmi_init(struct oprofile_operations *ops)
  
--	  changesign(st1_ptr);
-+			changesign(st1_ptr);
-+		} else if (arith_invalid(1) < 0)	/* log(negative) */
-+			return;
+ 	if (!cpu_has_apic)
+ 		return -ENODEV;
+- 
++
+ 	switch (vendor) {
+-		case X86_VENDOR_AMD:
+-			/* Needs to be at least an Athlon (or hammer in 32bit mode) */
++	case X86_VENDOR_AMD:
++		/* Needs to be at least an Athlon (or hammer in 32bit mode) */
+ 
+-			switch (family) {
+-			default:
++		switch (family) {
++		default:
++			return -ENODEV;
++		case 6:
++			model = &op_athlon_spec;
++			cpu_type = "i386/athlon";
++			break;
++		case 0xf:
++			model = &op_athlon_spec;
++			/* Actually it could be i386/hammer too, but give
++			 user space an consistent name. */
++			cpu_type = "x86-64/hammer";
++			break;
++		case 0x10:
++			model = &op_athlon_spec;
++			cpu_type = "x86-64/family10";
++			break;
++		}
++		break;
++
++	case X86_VENDOR_INTEL:
++		switch (family) {
++			/* Pentium IV */
++		case 0xf:
++			if (!p4_init(&cpu_type))
+ 				return -ENODEV;
+-			case 6:
+-				model = &op_athlon_spec;
+-				cpu_type = "i386/athlon";
+-				break;
+-			case 0xf:
+-				model = &op_athlon_spec;
+-				/* Actually it could be i386/hammer too, but give
+-				   user space an consistent name. */
+-				cpu_type = "x86-64/hammer";
+-				break;
+-			case 0x10:
+-				model = &op_athlon_spec;
+-				cpu_type = "x86-64/family10";
+-				break;
+-			}
+ 			break;
+- 
+-		case X86_VENDOR_INTEL:
+-			switch (family) {
+-				/* Pentium IV */
+-				case 0xf:
+-					if (!p4_init(&cpu_type))
+-						return -ENODEV;
+-					break;
+-
+-				/* A P6-class processor */
+-				case 6:
+-					if (!ppro_init(&cpu_type))
+-						return -ENODEV;
+-					break;
+-
+-				default:
+-					return -ENODEV;
+-			}
++
++			/* A P6-class processor */
++		case 6:
++			if (!ppro_init(&cpu_type))
++				return -ENODEV;
+ 			break;
+ 
+ 		default:
+ 			return -ENODEV;
++		}
++		break;
++
++	default:
++		return -ENODEV;
  	}
--      else if ( arith_invalid(1) < 0 )	  /* log(negative) */
--	return;
--    }
  
--  FPU_pop();
-+	FPU_pop();
+ 	init_sysfs();
+@@ -469,7 +454,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
+ 	return 0;
  }
  
 -
- static void fpatan(FPU_REG *st0_ptr, u_char st0_tag)
+ void op_nmi_exit(void)
  {
--  FPU_REG *st1_ptr = &st(1);
--  u_char st1_tag = FPU_gettagi(1);
--  int tag;
-+	FPU_REG *st1_ptr = &st(1);
-+	u_char st1_tag = FPU_gettagi(1);
-+	int tag;
+ 	if (using_nmi)
+diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
+index 8627463..52deabc 100644
+--- a/arch/x86/pci/common.c
++++ b/arch/x86/pci/common.c
+@@ -109,6 +109,19 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
+ 	}
+ }
  
--  clear_C1();
--  if ( !((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid)) )
--    {
--    valid_atan:
-+	clear_C1();
-+	if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
-+	      valid_atan:
++static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
++{
++	struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
++
++	if (rom_r->parent)
++		return;
++	if (rom_r->start)
++		/* we deal with BIOS assigned ROM later */
++		return;
++	if (!(pci_probe & PCI_ASSIGN_ROMS))
++		rom_r->start = rom_r->end = rom_r->flags = 0;
++}
++
+ /*
+  *  Called after each bus is probed, but before its children
+  *  are examined.
+@@ -116,8 +129,12 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
  
--      poly_atan(st0_ptr, st0_tag, st1_ptr, st1_tag);
-+		poly_atan(st0_ptr, st0_tag, st1_ptr, st1_tag);
+ void __devinit  pcibios_fixup_bus(struct pci_bus *b)
+ {
++	struct pci_dev *dev;
++
+ 	pcibios_fixup_ghosts(b);
+ 	pci_read_bridge_bases(b);
++	list_for_each_entry(dev, &b->devices, bus_list)
++		pcibios_fixup_device_resources(dev);
+ }
  
--      FPU_pop();
-+		FPU_pop();
+ /*
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 6cff66d..cb63007 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -19,7 +19,7 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d)
  
--      return;
--    }
-+		return;
-+	}
+ 	printk(KERN_WARNING "PCI: Searching for i450NX host bridges on %s\n", pci_name(d));
+ 	reg = 0xd0;
+-	for(pxb=0; pxb<2; pxb++) {
++	for(pxb = 0; pxb < 2; pxb++) {
+ 		pci_read_config_byte(d, reg++, &busno);
+ 		pci_read_config_byte(d, reg++, &suba);
+ 		pci_read_config_byte(d, reg++, &subb);
+@@ -56,7 +56,7 @@ static void __devinit  pci_fixup_umc_ide(struct pci_dev *d)
+ 	int i;
  
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
--  if ( st1_tag == TAG_Special )
--    st1_tag = FPU_Special(st1_ptr);
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
-+	if (st1_tag == TAG_Special)
-+		st1_tag = FPU_Special(st1_ptr);
+ 	printk(KERN_WARNING "PCI: Fixing base address flags for device %s\n", pci_name(d));
+-	for(i=0; i<4; i++)
++	for(i = 0; i < 4; i++)
+ 		d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
+@@ -127,7 +127,7 @@ static void pci_fixup_via_northbridge_bug(struct pci_dev *d)
+ 		   NB latency to zero */
+ 		pci_write_config_byte(d, PCI_LATENCY_TIMER, 0);
  
--  if ( ((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
-+	if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
- 	    || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
--	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal)) )
--    {
--      if ( denormal_operand() < 0 )
--	return;
-+	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
-+		if (denormal_operand() < 0)
-+			return;
+-		where = 0x95; /* the memory write queue timer register is 
++		where = 0x95; /* the memory write queue timer register is
+ 				different for the KT266x's: 0x95 not 0x55 */
+ 	} else if (d->device == PCI_DEVICE_ID_VIA_8363_0 &&
+ 			(d->revision == VIA_8363_KL133_REVISION_ID ||
+@@ -230,7 +230,7 @@ static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int wh
  
--      goto valid_atan;
--    }
--  else if ( (st0_tag == TAG_Empty) || (st1_tag == TAG_Empty) )
--    {
--      FPU_stack_underflow_pop(1);
--      return;
--    }
--  else if ( (st0_tag == TW_NaN) || (st1_tag == TW_NaN) )
--    {
--      if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) >= 0 )
--	  FPU_pop();
--      return;
--    }
--  else if ( (st0_tag == TW_Infinity) || (st1_tag == TW_Infinity) )
--    {
--      u_char sign = getsign(st1_ptr);
--      if ( st0_tag == TW_Infinity )
--	{
--	  if ( st1_tag == TW_Infinity )
--	    {
--	      if ( signpositive(st0_ptr) )
--		{
--		  FPU_copy_to_reg1(&CONST_PI4, TAG_Valid);
--		}
--	      else
--		{
--		  setpositive(st1_ptr);
--		  tag = FPU_u_add(&CONST_PI4, &CONST_PI2, st1_ptr,
--				  FULL_PRECISION, SIGN_POS,
--				  exponent(&CONST_PI4), exponent(&CONST_PI2));
--		  if ( tag >= 0 )
--		    FPU_settagi(1, tag);
--		}
--	    }
--	  else
--	    {
--	      if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
-+		goto valid_atan;
-+	} else if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
-+		FPU_stack_underflow_pop(1);
-+		return;
-+	} else if ((st0_tag == TW_NaN) || (st1_tag == TW_NaN)) {
-+		if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) >= 0)
-+			FPU_pop();
- 		return;
-+	} else if ((st0_tag == TW_Infinity) || (st1_tag == TW_Infinity)) {
-+		u_char sign = getsign(st1_ptr);
-+		if (st0_tag == TW_Infinity) {
-+			if (st1_tag == TW_Infinity) {
-+				if (signpositive(st0_ptr)) {
-+					FPU_copy_to_reg1(&CONST_PI4, TAG_Valid);
-+				} else {
-+					setpositive(st1_ptr);
-+					tag =
-+					    FPU_u_add(&CONST_PI4, &CONST_PI2,
-+						      st1_ptr, FULL_PRECISION,
-+						      SIGN_POS,
-+						      exponent(&CONST_PI4),
-+						      exponent(&CONST_PI2));
-+					if (tag >= 0)
-+						FPU_settagi(1, tag);
-+				}
-+			} else {
-+				if ((st1_tag == TW_Denormal)
-+				    && (denormal_operand() < 0))
-+					return;
+ 	if ((offset) && (where == offset))
+ 		value = value & 0xfffffffc;
+-	
 +
-+				if (signpositive(st0_ptr)) {
-+					FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
-+					setsign(st1_ptr, sign);	/* An 80486 preserves the sign */
-+					FPU_pop();
-+					return;
-+				} else {
-+					FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
-+				}
-+			}
-+		} else {
-+			/* st(1) is infinity, st(0) not infinity */
-+			if ((st0_tag == TW_Denormal)
-+			    && (denormal_operand() < 0))
-+				return;
+ 	return raw_pci_ops->write(0, bus->number, devfn, where, size, value);
+ }
  
--	      if ( signpositive(st0_ptr) )
--		{
--		  FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
--		  setsign(st1_ptr, sign);   /* An 80486 preserves the sign */
--		  FPU_pop();
--		  return;
-+			FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
+@@ -271,8 +271,8 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
+ 		 * after hot-remove, the pbus->devices is empty and this code
+ 		 * will set the offsets to zero and the bus ops to parent's bus
+ 		 * ops, which is unmodified.
+-	 	 */
+-		for (i= GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
++		 */
++		for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
+ 			quirk_aspm_offset[i] = 0;
+ 
+ 		pbus->ops = pbus->parent->ops;
+@@ -286,17 +286,17 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
+ 		list_for_each_entry(dev, &pbus->devices, bus_list) {
+ 			/* There are 0 to 8 devices attached to this bus */
+ 			cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP);
+-			quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)]= cap_base + 0x10;
++			quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10;
  		}
--	      else
--		{
--		  FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
-+		setsign(st1_ptr, sign);
-+	} else if (st1_tag == TAG_Zero) {
-+		/* st(0) must be valid or zero */
-+		u_char sign = getsign(st1_ptr);
+ 		pbus->ops = &quirk_pcie_aspm_ops;
+ 	}
+ }
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA,	pcie_rootport_aspm_quirk );
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA1,	pcie_rootport_aspm_quirk );
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PB,	pcie_rootport_aspm_quirk );
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PB1,	pcie_rootport_aspm_quirk );
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PC,	pcie_rootport_aspm_quirk );
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PC1,	pcie_rootport_aspm_quirk );
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA,	pcie_rootport_aspm_quirk);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA1,	pcie_rootport_aspm_quirk);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PB,	pcie_rootport_aspm_quirk);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PB1,	pcie_rootport_aspm_quirk);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PC,	pcie_rootport_aspm_quirk);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PC1,	pcie_rootport_aspm_quirk);
+ 
+ /*
+  * Fixup to mark boot BIOS video selected by BIOS before it changes
+@@ -336,8 +336,8 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
+ 		 * PCI header type NORMAL.
+ 		 */
+ 		if (bridge
+-		    &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+-		       ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) {
++		    && ((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE)
++		       || (bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) {
+ 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+ 						&config);
+ 			if (!(config & PCI_BRIDGE_CTL_VGA))
+diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
+index 88d8f5c..ed07ce6 100644
+--- a/arch/x86/pci/irq.c
++++ b/arch/x86/pci/irq.c
+@@ -200,6 +200,7 @@ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+ {
+ 	static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
+ 
++	WARN_ON_ONCE(pirq >= 16);
+ 	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
+ }
+ 
+@@ -207,7 +208,8 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
+ {
+ 	static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
+ 	unsigned int val = irqmap[irq];
+-		
 +
-+		if ((st0_tag == TW_Denormal) && (denormal_operand() < 0))
-+			return;
++	WARN_ON_ONCE(pirq >= 16);
+ 	if (val) {
+ 		write_config_nybble(router, 0x48, pirq-1, val);
+ 		return 1;
+@@ -257,12 +259,16 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
+ static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+ {
+ 	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
 +
-+		if (signpositive(st0_ptr)) {
-+			/* An 80486 preserves the sign */
-+			FPU_pop();
-+			return;
- 		}
--	    }
--	}
--      else
--	{
--	  /* st(1) is infinity, st(0) not infinity */
--	  if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
++	WARN_ON_ONCE(pirq >= 5);
+ 	return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
+ }
  
--	  FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
--	}
--      setsign(st1_ptr, sign);
--    }
--  else if ( st1_tag == TAG_Zero )
--    {
--      /* st(0) must be valid or zero */
--      u_char sign = getsign(st1_ptr);
--
--      if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--	return;
-+		FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
-+		setsign(st1_ptr, sign);
-+	} else if (st0_tag == TAG_Zero) {
-+		/* st(1) must be TAG_Valid here */
-+		u_char sign = getsign(st1_ptr);
+ static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+ {
+ 	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
++
++	WARN_ON_ONCE(pirq >= 5);
+ 	write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
+ 	return 1;
+ }
+@@ -275,12 +281,16 @@ static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq
+ static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+ {
+ 	static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++
++	WARN_ON_ONCE(pirq >= 4);
+ 	return read_config_nybble(router,0x43, pirqmap[pirq-1]);
+ }
  
--      if ( signpositive(st0_ptr) )
--	{
--	  /* An 80486 preserves the sign */
--	  FPU_pop();
--	  return;
--	}
-+		if ((st1_tag == TW_Denormal) && (denormal_operand() < 0))
-+			return;
+ static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+ {
+ 	static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++
++	WARN_ON_ONCE(pirq >= 4);
+ 	write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
+ 	return 1;
+ }
+@@ -419,6 +429,7 @@ static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
  
--      FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
--      setsign(st1_ptr, sign);
--    }
--  else if ( st0_tag == TAG_Zero )
--    {
--      /* st(1) must be TAG_Valid here */
--      u_char sign = getsign(st1_ptr);
--
--      if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
--	return;
--
--      FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
--      setsign(st1_ptr, sign);
--    }
-+		FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
-+		setsign(st1_ptr, sign);
-+	}
- #ifdef PARANOID
--  else
--    EXCEPTION(EX_INTERNAL | 0x125);
-+	else
-+		EXCEPTION(EX_INTERNAL | 0x125);
- #endif /* PARANOID */
+ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+ {
++	WARN_ON_ONCE(pirq >= 9);
+ 	if (pirq > 8) {
+ 		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
+ 		return 0;
+@@ -428,6 +439,7 @@ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  
--  FPU_pop();
--  set_precision_flag_up();  /* We do not really know if up or down */
-+	FPU_pop();
-+	set_precision_flag_up();	/* We do not really know if up or down */
+ static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+ {
++	WARN_ON_ONCE(pirq >= 9);
+ 	if (pirq > 8) {
+ 		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
+ 		return 0;
+@@ -449,14 +461,14 @@ static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
+  */
+ static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+ {
+-	outb_p(pirq, 0xc00);
++	outb(pirq, 0xc00);
+ 	return inb(0xc01) & 0xf;
  }
  
--
- static void fprem(FPU_REG *st0_ptr, u_char st0_tag)
+ static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  {
--  do_fprem(st0_ptr, st0_tag, RC_CHOP);
-+	do_fprem(st0_ptr, st0_tag, RC_CHOP);
+-	outb_p(pirq, 0xc00);
+-	outb_p(irq, 0xc01);
++	outb(pirq, 0xc00);
++	outb(irq, 0xc01);
+ 	return 1;
  }
  
--
- static void fprem1(FPU_REG *st0_ptr, u_char st0_tag)
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 998fd3e..efcf620 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -19,7 +19,7 @@ unsigned long saved_context_esp, saved_context_ebp;
+ unsigned long saved_context_esi, saved_context_edi;
+ unsigned long saved_context_eflags;
+ 
+-void __save_processor_state(struct saved_context *ctxt)
++static void __save_processor_state(struct saved_context *ctxt)
  {
--  do_fprem(st0_ptr, st0_tag, RC_RND);
-+	do_fprem(st0_ptr, st0_tag, RC_RND);
+ 	mtrr_save_fixed_ranges(NULL);
+ 	kernel_fpu_begin();
+@@ -74,19 +74,19 @@ static void fix_processor_context(void)
+ 	/*
+ 	 * Now maybe reload the debug registers
+ 	 */
+-	if (current->thread.debugreg[7]){
+-		set_debugreg(current->thread.debugreg[0], 0);
+-		set_debugreg(current->thread.debugreg[1], 1);
+-		set_debugreg(current->thread.debugreg[2], 2);
+-		set_debugreg(current->thread.debugreg[3], 3);
++	if (current->thread.debugreg7) {
++		set_debugreg(current->thread.debugreg0, 0);
++		set_debugreg(current->thread.debugreg1, 1);
++		set_debugreg(current->thread.debugreg2, 2);
++		set_debugreg(current->thread.debugreg3, 3);
+ 		/* no 4 and 5 */
+-		set_debugreg(current->thread.debugreg[6], 6);
+-		set_debugreg(current->thread.debugreg[7], 7);
++		set_debugreg(current->thread.debugreg6, 6);
++		set_debugreg(current->thread.debugreg7, 7);
+ 	}
+ 
  }
  
--
- static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
+-void __restore_processor_state(struct saved_context *ctxt)
++static void __restore_processor_state(struct saved_context *ctxt)
  {
--  u_char sign, sign1;
--  FPU_REG *st1_ptr = &st(1), a, b;
--  u_char st1_tag = FPU_gettagi(1);
-+	u_char sign, sign1;
-+	FPU_REG *st1_ptr = &st(1), a, b;
-+	u_char st1_tag = FPU_gettagi(1);
+ 	/*
+ 	 * control registers
+diff --git a/arch/x86/vdso/.gitignore b/arch/x86/vdso/.gitignore
+index f8b69d8..60274d5 100644
+--- a/arch/x86/vdso/.gitignore
++++ b/arch/x86/vdso/.gitignore
+@@ -1 +1,6 @@
+ vdso.lds
++vdso-syms.lds
++vdso32-syms.lds
++vdso32-syscall-syms.lds
++vdso32-sysenter-syms.lds
++vdso32-int80-syms.lds
+diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
+index e7bff0f..d28dda5 100644
+--- a/arch/x86/vdso/Makefile
++++ b/arch/x86/vdso/Makefile
+@@ -1,39 +1,37 @@
+ #
+-# x86-64 vDSO.
++# Building vDSO images for x86.
+ #
  
--  clear_C1();
--  if ( !((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid)) )
--    {
--    valid_yl2xp1:
-+	clear_C1();
-+	if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
-+	      valid_yl2xp1:
++VDSO64-$(CONFIG_X86_64)		:= y
++VDSO32-$(CONFIG_X86_32)		:= y
++VDSO32-$(CONFIG_COMPAT)		:= y
++
++vdso-install-$(VDSO64-y)	+= vdso.so
++vdso-install-$(VDSO32-y)	+= $(vdso32-y:=.so)
++
++
+ # files to link into the vdso
+-# vdso-start.o has to be first
+-vobjs-y := vdso-start.o vdso-note.o vclock_gettime.o vgetcpu.o vvar.o
++vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vvar.o
  
--      sign = getsign(st0_ptr);
--      sign1 = getsign(st1_ptr);
-+		sign = getsign(st0_ptr);
-+		sign1 = getsign(st1_ptr);
+ # files to link into kernel
+-obj-y := vma.o vdso.o vdso-syms.o
++obj-$(VDSO64-y)			+= vma.o vdso.o
++obj-$(VDSO32-y)			+= vdso32.o vdso32-setup.o
  
--      FPU_to_exp16(st0_ptr, &a);
--      FPU_to_exp16(st1_ptr, &b);
-+		FPU_to_exp16(st0_ptr, &a);
-+		FPU_to_exp16(st1_ptr, &b);
+ vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
  
--      if ( poly_l2p1(sign, sign1, &a, &b, st1_ptr) )
--	return;
-+		if (poly_l2p1(sign, sign1, &a, &b, st1_ptr))
-+			return;
+ $(obj)/vdso.o: $(obj)/vdso.so
  
--      FPU_pop();
--      return;
--    }
-+		FPU_pop();
-+		return;
-+	}
+-targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) vdso-syms.o
+-
+-# The DSO images are built using a special linker script.
+-quiet_cmd_syscall = SYSCALL $@
+-      cmd_syscall = $(CC) -m elf_x86_64 -nostdlib $(SYSCFLAGS_$(@F)) \
+-		          -Wl,-T,$(filter-out FORCE,$^) -o $@
++targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
  
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
--  if ( st1_tag == TAG_Special )
--    st1_tag = FPU_Special(st1_ptr);
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
-+	if (st1_tag == TAG_Special)
-+		st1_tag = FPU_Special(st1_ptr);
+ export CPPFLAGS_vdso.lds += -P -C
  
--  if ( ((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
-+	if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
- 	    || ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
--	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal)) )
--    {
--      if ( denormal_operand() < 0 )
--	return;
--
--      goto valid_yl2xp1;
--    }
--  else if ( (st0_tag == TAG_Empty) | (st1_tag == TAG_Empty) )
--    {
--      FPU_stack_underflow_pop(1);
--      return;
--    }
--  else if ( st0_tag == TAG_Zero )
--    {
--      switch ( st1_tag )
--	{
--	case TW_Denormal:
--	  if ( denormal_operand() < 0 )
--	    return;
--
--	case TAG_Zero:
--	case TAG_Valid:
--	  setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
--	  FPU_copy_to_reg1(st0_ptr, st0_tag);
--	  break;
--
--	case TW_Infinity:
--	  /* Infinity*log(1) */
--	  if ( arith_invalid(1) < 0 )
--	    return;
--	  break;
-+	    || ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
-+		if (denormal_operand() < 0)
-+			return;
+-vdso-flags = -fPIC -shared -Wl,-soname=linux-vdso.so.1 \
+-		 $(call ld-option, -Wl$(comma)--hash-style=sysv) \
+-		-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+-SYSCFLAGS_vdso.so = $(vdso-flags)
+-SYSCFLAGS_vdso.so.dbg = $(vdso-flags)
++VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
++		      	-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
  
--	case TW_NaN:
--	  if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
--	    return;
--	  break;
+ $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
+ 
+-$(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE
 -
--	default:
-+		goto valid_yl2xp1;
-+	} else if ((st0_tag == TAG_Empty) | (st1_tag == TAG_Empty)) {
-+		FPU_stack_underflow_pop(1);
-+		return;
-+	} else if (st0_tag == TAG_Zero) {
-+		switch (st1_tag) {
-+		case TW_Denormal:
-+			if (denormal_operand() < 0)
-+				return;
+ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+-	$(call if_changed,syscall)
++	$(call if_changed,vdso)
+ 
+ $(obj)/%.so: OBJCOPYFLAGS := -S
+ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+@@ -41,24 +39,96 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+ 
+ CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64
+ 
+-$(obj)/vclock_gettime.o: KBUILD_CFLAGS = $(CFL)
+-$(obj)/vgetcpu.o: KBUILD_CFLAGS = $(CFL)
++$(vobjs): KBUILD_CFLAGS = $(CFL)
 +
-+		case TAG_Zero:
-+		case TAG_Valid:
-+			setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
-+			FPU_copy_to_reg1(st0_ptr, st0_tag);
-+			break;
++targets += vdso-syms.lds
++obj-$(VDSO64-y)			+= vdso-syms.lds
 +
-+		case TW_Infinity:
-+			/* Infinity*log(1) */
-+			if (arith_invalid(1) < 0)
-+				return;
-+			break;
++#
++# Match symbols in the DSO that look like VDSO*; produce a file of constants.
++#
++sed-vdsosym := -e 's/^00*/0/' \
++	-e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
++quiet_cmd_vdsosym = VDSOSYM $@
++      cmd_vdsosym = $(NM) $< | sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
 +
-+		case TW_NaN:
-+			if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
-+				return;
-+			break;
++$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
++	$(call if_changed,vdsosym)
 +
-+		default:
- #ifdef PARANOID
--	  EXCEPTION(EX_INTERNAL | 0x116);
--	  return;
-+			EXCEPTION(EX_INTERNAL | 0x116);
-+			return;
- #endif /* PARANOID */
--	  break;
--	}
--    }
--  else if ( (st0_tag == TAG_Valid) || (st0_tag == TW_Denormal) )
--    {
--      switch ( st1_tag )
--	{
--	case TAG_Zero:
--	  if ( signnegative(st0_ptr) )
--	    {
--	      if ( exponent(st0_ptr) >= 0 )
--		{
--		  /* st(0) holds <= -1.0 */
--#ifdef PECULIAR_486   /* Stupid 80486 doesn't worry about log(negative). */
--		  changesign(st1_ptr);
-+			break;
-+		}
-+	} else if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
-+		switch (st1_tag) {
-+		case TAG_Zero:
-+			if (signnegative(st0_ptr)) {
-+				if (exponent(st0_ptr) >= 0) {
-+					/* st(0) holds <= -1.0 */
-+#ifdef PECULIAR_486		/* Stupid 80486 doesn't worry about log(negative). */
-+					changesign(st1_ptr);
- #else
--		  if ( arith_invalid(1) < 0 )
--		    return;
-+					if (arith_invalid(1) < 0)
-+						return;
- #endif /* PECULIAR_486 */
--		}
--	      else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--		return;
--	      else
--		changesign(st1_ptr);
--	    }
--	  else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
--	  break;
--
--	case TW_Infinity:
--	  if ( signnegative(st0_ptr) )
--	    {
--	      if ( (exponent(st0_ptr) >= 0) &&
--		  !((st0_ptr->sigh == 0x80000000) &&
--		    (st0_ptr->sigl == 0)) )
--		{
--		  /* st(0) holds < -1.0 */
--#ifdef PECULIAR_486   /* Stupid 80486 doesn't worry about log(negative). */
--		  changesign(st1_ptr);
-+				} else if ((st0_tag == TW_Denormal)
-+					   && (denormal_operand() < 0))
-+					return;
-+				else
-+					changesign(st1_ptr);
-+			} else if ((st0_tag == TW_Denormal)
-+				   && (denormal_operand() < 0))
-+				return;
-+			break;
++#
++# Build multiple 32-bit vDSO images to choose from at boot time.
++#
++obj-$(VDSO32-y)			+= vdso32-syms.lds
++vdso32.so-$(CONFIG_X86_32)	+= int80
++vdso32.so-$(CONFIG_COMPAT)	+= syscall
++vdso32.so-$(VDSO32-y)		+= sysenter
 +
-+		case TW_Infinity:
-+			if (signnegative(st0_ptr)) {
-+				if ((exponent(st0_ptr) >= 0) &&
-+				    !((st0_ptr->sigh == 0x80000000) &&
-+				      (st0_ptr->sigl == 0))) {
-+					/* st(0) holds < -1.0 */
-+#ifdef PECULIAR_486		/* Stupid 80486 doesn't worry about log(negative). */
-+					changesign(st1_ptr);
- #else
--		  if ( arith_invalid(1) < 0 ) return;
-+					if (arith_invalid(1) < 0)
-+						return;
- #endif /* PECULIAR_486 */
-+				} else if ((st0_tag == TW_Denormal)
-+					   && (denormal_operand() < 0))
-+					return;
-+				else
-+					changesign(st1_ptr);
-+			} else if ((st0_tag == TW_Denormal)
-+				   && (denormal_operand() < 0))
-+				return;
-+			break;
++CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
++VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
 +
-+		case TW_NaN:
-+			if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
-+				return;
- 		}
--	      else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--		return;
--	      else
--		changesign(st1_ptr);
--	    }
--	  else if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
--	  break;
--
--	case TW_NaN:
--	  if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
--	    return;
--	}
++# This makes sure the $(obj) subdirectory exists even though vdso32/
++# is not a kbuild sub-make subdirectory.
++override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
  
--    }
--  else if ( st0_tag == TW_NaN )
--    {
--      if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
--	return;
--    }
--  else if ( st0_tag == TW_Infinity )
--    {
--      if ( st1_tag == TW_NaN )
--	{
--	  if ( real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0 )
--	    return;
--	}
--      else if ( signnegative(st0_ptr) )
--	{
-+	} else if (st0_tag == TW_NaN) {
-+		if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
-+			return;
-+	} else if (st0_tag == TW_Infinity) {
-+		if (st1_tag == TW_NaN) {
-+			if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
-+				return;
-+		} else if (signnegative(st0_ptr)) {
- #ifndef PECULIAR_486
--	  /* This should have higher priority than denormals, but... */
--	  if ( arith_invalid(1) < 0 )  /* log(-infinity) */
--	    return;
-+			/* This should have higher priority than denormals, but... */
-+			if (arith_invalid(1) < 0)	/* log(-infinity) */
-+				return;
- #endif /* PECULIAR_486 */
--	  if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
-+			if ((st1_tag == TW_Denormal)
-+			    && (denormal_operand() < 0))
-+				return;
- #ifdef PECULIAR_486
--	  /* Denormal operands actually get higher priority */
--	  if ( arith_invalid(1) < 0 )  /* log(-infinity) */
--	    return;
-+			/* Denormal operands actually get higher priority */
-+			if (arith_invalid(1) < 0)	/* log(-infinity) */
-+				return;
- #endif /* PECULIAR_486 */
--	}
--      else if ( st1_tag == TAG_Zero )
--	{
--	  /* log(infinity) */
--	  if ( arith_invalid(1) < 0 )
--	    return;
--	}
--	
--      /* st(1) must be valid here. */
-+		} else if (st1_tag == TAG_Zero) {
-+			/* log(infinity) */
-+			if (arith_invalid(1) < 0)
-+				return;
-+		}
+-# We also create a special relocatable object that should mirror the symbol
+-# table and layout of the linked DSO.  With ld -R we can then refer to
+-# these symbols in the kernel code rather than hand-coded addresses.
+-extra-y += vdso-syms.o
+-$(obj)/built-in.o: $(obj)/vdso-syms.o
+-$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
++targets += vdso32/vdso32.lds
++targets += $(vdso32.so-y:%=vdso32-%.so.dbg) $(vdso32.so-y:%=vdso32-%.so)
++targets += vdso32/note.o $(vdso32.so-y:%=vdso32/%.o)
  
--      else if ( (st1_tag == TW_Denormal) && (denormal_operand() < 0) )
--	return;
-+		/* st(1) must be valid here. */
-+
-+		else if ((st1_tag == TW_Denormal) && (denormal_operand() < 0))
-+			return;
+-SYSCFLAGS_vdso-syms.o = -r -d
+-$(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE
+-	$(call if_changed,syscall)
++extra-y	+= $(vdso32.so-y:%=vdso32-%.so)
  
--      /* The Manual says that log(Infinity) is invalid, but a real
--	 80486 sensibly says that it is o.k. */
--      else
--	{
--	  u_char sign = getsign(st1_ptr);
--	  FPU_copy_to_reg1(&CONST_INF, TAG_Special);
--	  setsign(st1_ptr, sign);
-+		/* The Manual says that log(Infinity) is invalid, but a real
-+		   80486 sensibly says that it is o.k. */
-+		else {
-+			u_char sign = getsign(st1_ptr);
-+			FPU_copy_to_reg1(&CONST_INF, TAG_Special);
-+			setsign(st1_ptr, sign);
-+		}
- 	}
--    }
- #ifdef PARANOID
--  else
--    {
--      EXCEPTION(EX_INTERNAL | 0x117);
--      return;
--    }
-+	else {
-+		EXCEPTION(EX_INTERNAL | 0x117);
-+		return;
-+	}
- #endif /* PARANOID */
++$(obj)/vdso32.o: $(vdso32.so-y:%=$(obj)/vdso32-%.so)
++
++KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
++$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
++$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): asflags-$(CONFIG_X86_64) += -m32
++
++$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
++					 $(obj)/vdso32/vdso32.lds \
++					 $(obj)/vdso32/note.o \
++					 $(obj)/vdso32/%.o
++	$(call if_changed,vdso)
++
++# Make vdso32-*-syms.lds from each image, and then make sure they match.
++# The only difference should be that some do not define VDSO32_SYSENTER_RETURN.
++
++targets += vdso32-syms.lds $(vdso32.so-y:%=vdso32-%-syms.lds)
++
++quiet_cmd_vdso32sym = VDSOSYM $@
++define cmd_vdso32sym
++	if LC_ALL=C sort -u $(filter-out FORCE,$^) > $(@D)/.tmp_$(@F) && \
++	   $(foreach H,$(filter-out FORCE,$^),\
++		     if grep -q VDSO32_SYSENTER_RETURN $H; \
++		     then diff -u $(@D)/.tmp_$(@F) $H; \
++		     else sed /VDSO32_SYSENTER_RETURN/d $(@D)/.tmp_$(@F) | \
++			  diff -u - $H; fi &&) : ;\
++	then mv -f $(@D)/.tmp_$(@F) $@; \
++	else rm -f $(@D)/.tmp_$(@F); exit 1; \
++	fi
++endef
++
++$(obj)/vdso32-syms.lds: $(vdso32.so-y:%=$(obj)/vdso32-%-syms.lds) FORCE
++	$(call if_changed,vdso32sym)
++
++#
++# The DSO images are built using a special linker script.
++#
++quiet_cmd_vdso = VDSO    $@
++      cmd_vdso = $(CC) -nostdlib -o $@ \
++		       $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
++		       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
++
++VDSO_LDFLAGS = -fPIC -shared $(call ld-option, -Wl$(comma)--hash-style=sysv)
++
++#
++# Install the unstripped copy of vdso*.so listed in $(vdso-install-y).
++#
+ quiet_cmd_vdso_install = INSTALL $@
+       cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+-vdso.so:
++$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE
+ 	@mkdir -p $(MODLIB)/vdso
+ 	$(call cmd,vdso_install)
  
--  FPU_pop();
--  return;
-+	FPU_pop();
-+	return;
+-vdso_install: vdso.so
++PHONY += vdso_install $(vdso-install-y)
++vdso_install: $(vdso-install-y)
++
++clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80*
+diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
+index 5b54cdf..23476c2 100644
+--- a/arch/x86/vdso/vclock_gettime.c
++++ b/arch/x86/vdso/vclock_gettime.c
+@@ -19,7 +19,6 @@
+ #include <asm/hpet.h>
+ #include <asm/unistd.h>
+ #include <asm/io.h>
+-#include <asm/vgtod.h>
+ #include "vextern.h"
  
- }
+ #define gtod vdso_vsyscall_gtod_data
+diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
+new file mode 100644
+index 0000000..634a2cf
+--- /dev/null
++++ b/arch/x86/vdso/vdso-layout.lds.S
+@@ -0,0 +1,64 @@
++/*
++ * Linker script for vDSO.  This is an ELF shared object prelinked to
++ * its virtual address, and with only one read-only segment.
++ * This script controls its layout.
++ */
++
++SECTIONS
++{
++	. = VDSO_PRELINK + SIZEOF_HEADERS;
++
++	.hash		: { *(.hash) }			:text
++	.gnu.hash	: { *(.gnu.hash) }
++	.dynsym		: { *(.dynsym) }
++	.dynstr		: { *(.dynstr) }
++	.gnu.version	: { *(.gnu.version) }
++	.gnu.version_d	: { *(.gnu.version_d) }
++	.gnu.version_r	: { *(.gnu.version_r) }
++
++	.note		: { *(.note.*) }		:text	:note
++
++	.eh_frame_hdr	: { *(.eh_frame_hdr) }		:text	:eh_frame_hdr
++	.eh_frame	: { KEEP (*(.eh_frame)) }	:text
++
++	.dynamic	: { *(.dynamic) }		:text	:dynamic
++
++	.rodata		: { *(.rodata*) }		:text
++	.data		: {
++	      *(.data*)
++	      *(.sdata*)
++	      *(.got.plt) *(.got)
++	      *(.gnu.linkonce.d.*)
++	      *(.bss*)
++	      *(.dynbss*)
++	      *(.gnu.linkonce.b.*)
++	}
++
++	.altinstructions	: { *(.altinstructions) }
++	.altinstr_replacement	: { *(.altinstr_replacement) }
++
++	/*
++	 * Align the actual code well away from the non-instruction data.
++	 * This is the best thing for the I-cache.
++	 */
++	. = ALIGN(0x100);
++
++	.text		: { *(.text*) }			:text	=0x90909090
++}
++
++/*
++ * Very old versions of ld do not recognize this name token; use the constant.
++ */
++#define PT_GNU_EH_FRAME	0x6474e550
++
++/*
++ * We must supply the ELF program headers explicitly to get just one
++ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
++ */
++PHDRS
++{
++	text		PT_LOAD		FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
++	dynamic		PT_DYNAMIC	FLAGS(4);		/* PF_R */
++	note		PT_NOTE		FLAGS(4);		/* PF_R */
++	eh_frame_hdr	PT_GNU_EH_FRAME;
++}
+diff --git a/arch/x86/vdso/vdso-start.S b/arch/x86/vdso/vdso-start.S
+deleted file mode 100644
+index 2dc2cdb..0000000
+--- a/arch/x86/vdso/vdso-start.S
++++ /dev/null
+@@ -1,2 +0,0 @@
+-	.globl vdso_kernel_start
+-vdso_kernel_start:
+diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
+index 667d324..4e5dd3b 100644
+--- a/arch/x86/vdso/vdso.lds.S
++++ b/arch/x86/vdso/vdso.lds.S
+@@ -1,79 +1,37 @@
+ /*
+- * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
+- * object prelinked to its virtual address, and with only one read-only
+- * segment (that fits in one page).  This script controls its layout.
++ * Linker script for 64-bit vDSO.
++ * We #include the file to define the layout details.
++ * Here we only choose the prelinked virtual address.
++ *
++ * This file defines the version script giving the user-exported symbols in
++ * the DSO.  We can define local symbols here called VDSO* to make their
++ * values visible using the asm-x86/vdso.h macros from the kernel proper.
+  */
+-#include <asm/asm-offsets.h>
+-#include "voffset.h"
  
+ #define VDSO_PRELINK 0xffffffffff700000
 -
- static void fscale(FPU_REG *st0_ptr, u_char st0_tag)
- {
--  FPU_REG *st1_ptr = &st(1);
--  u_char st1_tag = FPU_gettagi(1);
--  int old_cw = control_word;
--  u_char sign = getsign(st0_ptr);
--
--  clear_C1();
--  if ( !((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid)) )
--    {
--      long scale;
--      FPU_REG tmp;
--
--      /* Convert register for internal use. */
--      setexponent16(st0_ptr, exponent(st0_ptr));
--
--    valid_scale:
--
--      if ( exponent(st1_ptr) > 30 )
--	{
--	  /* 2^31 is far too large, would require 2^(2^30) or 2^(-2^30) */
--
--	  if ( signpositive(st1_ptr) )
--	    {
--	      EXCEPTION(EX_Overflow);
--	      FPU_copy_to_reg0(&CONST_INF, TAG_Special);
--	    }
--	  else
--	    {
--	      EXCEPTION(EX_Underflow);
--	      FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
--	    }
--	  setsign(st0_ptr, sign);
--	  return;
--	}
--
--      control_word &= ~CW_RC;
--      control_word |= RC_CHOP;
--      reg_copy(st1_ptr, &tmp);
--      FPU_round_to_int(&tmp, st1_tag);      /* This can never overflow here */
--      control_word = old_cw;
--      scale = signnegative(st1_ptr) ? -tmp.sigl : tmp.sigl;
--      scale += exponent16(st0_ptr);
--
--      setexponent16(st0_ptr, scale);
--
--      /* Use FPU_round() to properly detect under/overflow etc */
--      FPU_round(st0_ptr, 0, 0, control_word, sign);
--
--      return;
--    }
--
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
--  if ( st1_tag == TAG_Special )
--    st1_tag = FPU_Special(st1_ptr);
--
--  if ( (st0_tag == TAG_Valid) || (st0_tag == TW_Denormal) )
--    {
--      switch ( st1_tag )
--	{
--	case TAG_Valid:
--	  /* st(0) must be a denormal */
--	  if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
+-SECTIONS
+-{
+-  . = VDSO_PRELINK + SIZEOF_HEADERS;
 -
--	  FPU_to_exp16(st0_ptr, st0_ptr);  /* Will not be left on stack */
--	  goto valid_scale;
+-  .hash           : { *(.hash) }		:text
+-  .gnu.hash       : { *(.gnu.hash) }
+-  .dynsym         : { *(.dynsym) }
+-  .dynstr         : { *(.dynstr) }
+-  .gnu.version    : { *(.gnu.version) }
+-  .gnu.version_d  : { *(.gnu.version_d) }
+-  .gnu.version_r  : { *(.gnu.version_r) }
 -
--	case TAG_Zero:
--	  if ( st0_tag == TW_Denormal )
--	    denormal_operand();
--	  return;
+-  /* This linker script is used both with -r and with -shared.
+-     For the layouts to match, we need to skip more than enough
+-     space for the dynamic symbol table et al.  If this amount
+-     is insufficient, ld -shared will barf.  Just increase it here.  */
+-  . = VDSO_PRELINK + VDSO_TEXT_OFFSET;
 -
--	case TW_Denormal:
--	  denormal_operand();
--	  return;
+-  .text           : { *(.text*) }		:text
+-  .rodata         : { *(.rodata*) }		:text
+-  .data		  : {
+-	*(.data*)
+-	*(.sdata*)
+-	*(.bss*)
+-	*(.dynbss*)
+-  }						:text
 -
--	case TW_Infinity:
--	  if ( (st0_tag == TW_Denormal) && (denormal_operand() < 0) )
--	    return;
+-  .altinstructions : { *(.altinstructions) }		:text
+-  .altinstr_replacement  : { *(.altinstr_replacement) }	:text
 -
--	  if ( signpositive(st1_ptr) )
--	    FPU_copy_to_reg0(&CONST_INF, TAG_Special);
--	  else
--	    FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
--	  setsign(st0_ptr, sign);
--	  return;
-+	FPU_REG *st1_ptr = &st(1);
-+	u_char st1_tag = FPU_gettagi(1);
-+	int old_cw = control_word;
-+	u_char sign = getsign(st0_ptr);
+-  .note		  : { *(.note.*) }		:text :note
+-  .eh_frame_hdr   : { *(.eh_frame_hdr) }	:text :eh_frame_hdr
+-  .eh_frame       : { KEEP (*(.eh_frame)) }	:text
+-  .dynamic        : { *(.dynamic) }		:text :dynamic
+-  .useless        : {
+-  	*(.got.plt) *(.got)
+-	*(.gnu.linkonce.d.*)
+-	*(.gnu.linkonce.b.*)
+-  }						:text
+-}
++#include "vdso-layout.lds.S"
+ 
+ /*
+- * We must supply the ELF program headers explicitly to get just one
+- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
++ * This controls what userland symbols we export from the vDSO.
+  */
+-PHDRS
+-{
+-  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+-  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+-  note PT_NOTE FLAGS(4); /* PF_R */
+-  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
++VERSION {
++	LINUX_2.6 {
++	global:
++		clock_gettime;
++		__vdso_clock_gettime;
++		gettimeofday;
++		__vdso_gettimeofday;
++		getcpu;
++		__vdso_getcpu;
++	local: *;
++	};
+ }
+ 
++VDSO64_PRELINK = VDSO_PRELINK;
++
+ /*
+- * This controls what symbols we export from the DSO.
++ * Define VDSO64_x for each VEXTERN(x), for use via VDSO64_SYMBOL.
+  */
+-VERSION
+-{
+-  LINUX_2.6 {
+-    global:
+-	clock_gettime;
+-	__vdso_clock_gettime;
+-	gettimeofday;
+-	__vdso_gettimeofday;
+-	getcpu;
+-	__vdso_getcpu;
+-    local: *;
+-  };
+-}
++#define VEXTERN(x)	VDSO64_ ## x = vdso_ ## x;
++#include "vextern.h"
++#undef	VEXTERN
+diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
+new file mode 100644
+index 0000000..348f134
+--- /dev/null
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -0,0 +1,444 @@
++/*
++ * (C) Copyright 2002 Linus Torvalds
++ * Portions based on the vdso-randomization code from exec-shield:
++ * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
++ *
++ * This file contains the needed initializations to support sysenter.
++ */
++
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/thread_info.h>
++#include <linux/sched.h>
++#include <linux/gfp.h>
++#include <linux/string.h>
++#include <linux/elf.h>
++#include <linux/mm.h>
++#include <linux/err.h>
++#include <linux/module.h>
++
++#include <asm/cpufeature.h>
++#include <asm/msr.h>
++#include <asm/pgtable.h>
++#include <asm/unistd.h>
++#include <asm/elf.h>
++#include <asm/tlbflush.h>
++#include <asm/vdso.h>
++#include <asm/proto.h>
++
++enum {
++	VDSO_DISABLED = 0,
++	VDSO_ENABLED = 1,
++	VDSO_COMPAT = 2,
++};
++
++#ifdef CONFIG_COMPAT_VDSO
++#define VDSO_DEFAULT	VDSO_COMPAT
++#else
++#define VDSO_DEFAULT	VDSO_ENABLED
++#endif
++
++#ifdef CONFIG_X86_64
++#define vdso_enabled			sysctl_vsyscall32
++#define arch_setup_additional_pages	syscall32_setup_pages
++#endif
++
++/*
++ * This is the difference between the prelinked addresses in the vDSO images
++ * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
++ * in the user address space.
++ */
++#define VDSO_ADDR_ADJUST	(VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
++
++/*
++ * Should the kernel map a VDSO page into processes and pass its
++ * address down to glibc upon exec()?
++ */
++unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
++
++static int __init vdso_setup(char *s)
++{
++	vdso_enabled = simple_strtoul(s, NULL, 0);
++
++	return 1;
++}
++
++/*
++ * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
++ * behavior on both 64-bit and 32-bit kernels.
++ * On 32-bit kernels, vdso=[012] means the same thing.
++ */
++__setup("vdso32=", vdso_setup);
++
++#ifdef CONFIG_X86_32
++__setup_param("vdso=", vdso32_setup, vdso_setup, 0);
++
++EXPORT_SYMBOL_GPL(vdso_enabled);
++#endif
++
++static __init void reloc_symtab(Elf32_Ehdr *ehdr,
++				unsigned offset, unsigned size)
++{
++	Elf32_Sym *sym = (void *)ehdr + offset;
++	unsigned nsym = size / sizeof(*sym);
++	unsigned i;
++
++	for(i = 0; i < nsym; i++, sym++) {
++		if (sym->st_shndx == SHN_UNDEF ||
++		    sym->st_shndx == SHN_ABS)
++			continue;  /* skip */
++
++		if (sym->st_shndx > SHN_LORESERVE) {
++			printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
++			       sym->st_shndx);
++			continue;
++		}
++
++		switch(ELF_ST_TYPE(sym->st_info)) {
++		case STT_OBJECT:
++		case STT_FUNC:
++		case STT_SECTION:
++		case STT_FILE:
++			sym->st_value += VDSO_ADDR_ADJUST;
++		}
++	}
++}
++
++static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
++{
++	Elf32_Dyn *dyn = (void *)ehdr + offset;
++
++	for(; dyn->d_tag != DT_NULL; dyn++)
++		switch(dyn->d_tag) {
++		case DT_PLTGOT:
++		case DT_HASH:
++		case DT_STRTAB:
++		case DT_SYMTAB:
++		case DT_RELA:
++		case DT_INIT:
++		case DT_FINI:
++		case DT_REL:
++		case DT_DEBUG:
++		case DT_JMPREL:
++		case DT_VERSYM:
++		case DT_VERDEF:
++		case DT_VERNEED:
++		case DT_ADDRRNGLO ... DT_ADDRRNGHI:
++			/* definitely pointers needing relocation */
++			dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
++			break;
++
++		case DT_ENCODING ... OLD_DT_LOOS-1:
++		case DT_LOOS ... DT_HIOS-1:
++			/* Tags above DT_ENCODING are pointers if
++			   they're even */
++			if (dyn->d_tag >= DT_ENCODING &&
++			    (dyn->d_tag & 1) == 0)
++				dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
++			break;
++
++		case DT_VERDEFNUM:
++		case DT_VERNEEDNUM:
++		case DT_FLAGS_1:
++		case DT_RELACOUNT:
++		case DT_RELCOUNT:
++		case DT_VALRNGLO ... DT_VALRNGHI:
++			/* definitely not pointers */
++			break;
++
++		case OLD_DT_LOOS ... DT_LOOS-1:
++		case DT_HIOS ... DT_VALRNGLO-1:
++		default:
++			if (dyn->d_tag > DT_ENCODING)
++				printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
++				       dyn->d_tag);
++			break;
++		}
++}
++
++static __init void relocate_vdso(Elf32_Ehdr *ehdr)
++{
++	Elf32_Phdr *phdr;
++	Elf32_Shdr *shdr;
++	int i;
++
++	BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 ||
++	       !elf_check_arch_ia32(ehdr) ||
++	       ehdr->e_type != ET_DYN);
++
++	ehdr->e_entry += VDSO_ADDR_ADJUST;
++
++	/* rebase phdrs */
++	phdr = (void *)ehdr + ehdr->e_phoff;
++	for (i = 0; i < ehdr->e_phnum; i++) {
++		phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
++
++		/* relocate dynamic stuff */
++		if (phdr[i].p_type == PT_DYNAMIC)
++			reloc_dyn(ehdr, phdr[i].p_offset);
++	}
++
++	/* rebase sections */
++	shdr = (void *)ehdr + ehdr->e_shoff;
++	for(i = 0; i < ehdr->e_shnum; i++) {
++		if (!(shdr[i].sh_flags & SHF_ALLOC))
++			continue;
++
++		shdr[i].sh_addr += VDSO_ADDR_ADJUST;
++
++		if (shdr[i].sh_type == SHT_SYMTAB ||
++		    shdr[i].sh_type == SHT_DYNSYM)
++			reloc_symtab(ehdr, shdr[i].sh_offset,
++				     shdr[i].sh_size);
++	}
++}
++
++/*
++ * These symbols are defined by vdso32.S to mark the bounds
++ * of the ELF DSO images included therein.
++ */
++extern const char vdso32_default_start, vdso32_default_end;
++extern const char vdso32_sysenter_start, vdso32_sysenter_end;
++static struct page *vdso32_pages[1];
++
++#ifdef CONFIG_X86_64
++
++static int use_sysenter __read_mostly = -1;
++
++#define	vdso32_sysenter()	(use_sysenter > 0)
++
++/* May not be __init: called during resume */
++void syscall32_cpu_init(void)
++{
++	if (use_sysenter < 0)
++		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
++
++	/* Load these always in case some future AMD CPU supports
++	   SYSENTER from compat mode too. */
++	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
++	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
++	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
++
++	wrmsrl(MSR_CSTAR, ia32_cstar_target);
++}
++
++#define compat_uses_vma		1
++
++static inline void map_compat_vdso(int map)
++{
++}
++
++#else  /* CONFIG_X86_32 */
++
++#define vdso32_sysenter()	(boot_cpu_has(X86_FEATURE_SEP))
++
++void enable_sep_cpu(void)
++{
++	int cpu = get_cpu();
++	struct tss_struct *tss = &per_cpu(init_tss, cpu);
++
++	if (!boot_cpu_has(X86_FEATURE_SEP)) {
++		put_cpu();
++		return;
++	}
++
++	tss->x86_tss.ss1 = __KERNEL_CS;
++	tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
++	wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
++	wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
++	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
++	put_cpu();	
++}
++
++static struct vm_area_struct gate_vma;
++
++static int __init gate_vma_init(void)
++{
++	gate_vma.vm_mm = NULL;
++	gate_vma.vm_start = FIXADDR_USER_START;
++	gate_vma.vm_end = FIXADDR_USER_END;
++	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
++	gate_vma.vm_page_prot = __P101;
++	/*
++	 * Make sure the vDSO gets into every core dump.
++	 * Dumping its contents makes post-mortem fully interpretable later
++	 * without matching up the same kernel and hardware config to see
++	 * what PC values meant.
++	 */
++	gate_vma.vm_flags |= VM_ALWAYSDUMP;
++	return 0;
++}
++
++#define compat_uses_vma		0
++
++static void map_compat_vdso(int map)
++{
++	static int vdso_mapped;
++
++	if (map == vdso_mapped)
++		return;
++
++	vdso_mapped = map;
++
++	__set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
++		     map ? PAGE_READONLY_EXEC : PAGE_NONE);
++
++	/* flush stray tlbs */
++	flush_tlb_all();
++}
++
++#endif	/* CONFIG_X86_64 */
++
++int __init sysenter_setup(void)
++{
++	void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
++	const void *vsyscall;
++	size_t vsyscall_len;
++
++	vdso32_pages[0] = virt_to_page(syscall_page);
++
++#ifdef CONFIG_X86_32
++	gate_vma_init();
++
++	printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
++#endif
++
++	if (!vdso32_sysenter()) {
++		vsyscall = &vdso32_default_start;
++		vsyscall_len = &vdso32_default_end - &vdso32_default_start;
++	} else {
++		vsyscall = &vdso32_sysenter_start;
++		vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
++	}
++
++	memcpy(syscall_page, vsyscall, vsyscall_len);
++	relocate_vdso(syscall_page);
++
++	return 0;
++}
++
++/* Setup a VMA at program startup for the vsyscall page */
++int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
++{
++	struct mm_struct *mm = current->mm;
++	unsigned long addr;
++	int ret = 0;
++	bool compat;
++
++	down_write(&mm->mmap_sem);
++
++	/* Test compat mode once here, in case someone
++	   changes it via sysctl */
++	compat = (vdso_enabled == VDSO_COMPAT);
++
++	map_compat_vdso(compat);
++
++	if (compat)
++		addr = VDSO_HIGH_BASE;
++	else {
++		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++		if (IS_ERR_VALUE(addr)) {
++			ret = addr;
++			goto up_fail;
++		}
++	}
++
++	if (compat_uses_vma || !compat) {
++		/*
++		 * MAYWRITE to allow gdb to COW and set breakpoints
++		 *
++		 * Make sure the vDSO gets into every core dump.
++		 * Dumping its contents makes post-mortem fully
++		 * interpretable later without matching up the same
++		 * kernel and hardware config to see what PC values
++		 * meant.
++		 */
++		ret = install_special_mapping(mm, addr, PAGE_SIZE,
++					      VM_READ|VM_EXEC|
++					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
++					      VM_ALWAYSDUMP,
++					      vdso32_pages);
++
++		if (ret)
++			goto up_fail;
++	}
++
++	current->mm->context.vdso = (void *)addr;
++	current_thread_info()->sysenter_return =
++		VDSO32_SYMBOL(addr, SYSENTER_RETURN);
++
++  up_fail:
++	up_write(&mm->mmap_sem);
++
++	return ret;
++}
++
++#ifdef CONFIG_X86_64
++
++__initcall(sysenter_setup);
++
++#ifdef CONFIG_SYSCTL
++/* Register vsyscall32 into the ABI table */
++#include <linux/sysctl.h>
++
++static ctl_table abi_table2[] = {
++	{
++		.procname	= "vsyscall32",
++		.data		= &sysctl_vsyscall32,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec
++	},
++	{}
++};
++
++static ctl_table abi_root_table2[] = {
++	{
++		.ctl_name = CTL_ABI,
++		.procname = "abi",
++		.mode = 0555,
++		.child = abi_table2
++	},
++	{}
++};
++
++static __init int ia32_binfmt_init(void)
++{
++	register_sysctl_table(abi_root_table2);
++	return 0;
++}
++__initcall(ia32_binfmt_init);
++#endif
++
++#else  /* CONFIG_X86_32 */
++
++const char *arch_vma_name(struct vm_area_struct *vma)
++{
++	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++		return "[vdso]";
++	return NULL;
++}
++
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++	struct mm_struct *mm = tsk->mm;
++
++	/* Check to see if this task was created in compat vdso mode */
++	if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
++		return &gate_vma;
++	return NULL;
++}
++
++int in_gate_area(struct task_struct *task, unsigned long addr)
++{
++	const struct vm_area_struct *vma = get_gate_vma(task);
++
++	return vma && addr >= vma->vm_start && addr < vma->vm_end;
++}
++
++int in_gate_area_no_task(unsigned long addr)
++{
++	return 0;
++}
++
++#endif	/* CONFIG_X86_64 */
+diff --git a/arch/x86/vdso/vdso32.S b/arch/x86/vdso/vdso32.S
+new file mode 100644
+index 0000000..1e36f72
+--- /dev/null
++++ b/arch/x86/vdso/vdso32.S
+@@ -0,0 +1,19 @@
++#include <linux/init.h>
++
++__INITDATA
++
++	.globl vdso32_default_start, vdso32_default_end
++vdso32_default_start:
++#ifdef CONFIG_X86_32
++	.incbin "arch/x86/vdso/vdso32-int80.so"
++#else
++	.incbin "arch/x86/vdso/vdso32-syscall.so"
++#endif
++vdso32_default_end:
++
++	.globl vdso32_sysenter_start, vdso32_sysenter_end
++vdso32_sysenter_start:
++	.incbin "arch/x86/vdso/vdso32-sysenter.so"
++vdso32_sysenter_end:
 +
-+	clear_C1();
-+	if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
-+		long scale;
-+		FPU_REG tmp;
++__FINIT
+diff --git a/arch/x86/vdso/vdso32/.gitignore b/arch/x86/vdso/vdso32/.gitignore
+new file mode 100644
+index 0000000..e45fba9
+--- /dev/null
++++ b/arch/x86/vdso/vdso32/.gitignore
+@@ -0,0 +1 @@
++vdso32.lds
+diff --git a/arch/x86/vdso/vdso32/int80.S b/arch/x86/vdso/vdso32/int80.S
+new file mode 100644
+index 0000000..b15b7c0
+--- /dev/null
++++ b/arch/x86/vdso/vdso32/int80.S
+@@ -0,0 +1,56 @@
++/*
++ * Code for the vDSO.  This version uses the old int $0x80 method.
++ *
++ * First get the common code for the sigreturn entry points.
++ * This must come first.
++ */
++#include "sigreturn.S"
 +
-+		/* Convert register for internal use. */
-+		setexponent16(st0_ptr, exponent(st0_ptr));
++	.text
++	.globl __kernel_vsyscall
++	.type __kernel_vsyscall, at function
++	ALIGN
++__kernel_vsyscall:
++.LSTART_vsyscall:
++	int $0x80
++	ret
++.LEND_vsyscall:
++	.size __kernel_vsyscall,.-.LSTART_vsyscall
++	.previous
 +
-+	      valid_scale:
++	.section .eh_frame,"a", at progbits
++.LSTARTFRAMEDLSI:
++	.long .LENDCIEDLSI-.LSTARTCIEDLSI
++.LSTARTCIEDLSI:
++	.long 0			/* CIE ID */
++	.byte 1			/* Version number */
++	.string "zR"		/* NUL-terminated augmentation string */
++	.uleb128 1		/* Code alignment factor */
++	.sleb128 -4		/* Data alignment factor */
++	.byte 8			/* Return address register column */
++	.uleb128 1		/* Augmentation value length */
++	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
++	.byte 0x0c		/* DW_CFA_def_cfa */
++	.uleb128 4
++	.uleb128 4
++	.byte 0x88		/* DW_CFA_offset, column 0x8 */
++	.uleb128 1
++	.align 4
++.LENDCIEDLSI:
++	.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
++.LSTARTFDEDLSI:
++	.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
++	.long .LSTART_vsyscall-.	/* PC-relative start address */
++	.long .LEND_vsyscall-.LSTART_vsyscall
++	.uleb128 0
++	.align 4
++.LENDFDEDLSI:
++	.previous
 +
-+		if (exponent(st1_ptr) > 30) {
-+			/* 2^31 is far too large, would require 2^(2^30) or 2^(-2^30) */
++	/*
++	 * Pad out the segment to match the size of the sysenter.S version.
++	 */
++VDSO32_vsyscall_eh_frame_size = 0x40
++	.section .data,"aw", at progbits
++	.space VDSO32_vsyscall_eh_frame_size-(.LENDFDEDLSI-.LSTARTFRAMEDLSI), 0
++	.previous
+diff --git a/arch/x86/vdso/vdso32/note.S b/arch/x86/vdso/vdso32/note.S
+new file mode 100644
+index 0000000..c83f257
+--- /dev/null
++++ b/arch/x86/vdso/vdso32/note.S
+@@ -0,0 +1,44 @@
++/*
++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
++ * Here we can supply some information useful to userland.
++ */
 +
-+			if (signpositive(st1_ptr)) {
-+				EXCEPTION(EX_Overflow);
-+				FPU_copy_to_reg0(&CONST_INF, TAG_Special);
-+			} else {
-+				EXCEPTION(EX_Underflow);
-+				FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
-+			}
-+			setsign(st0_ptr, sign);
-+			return;
-+		}
- 
--	case TW_NaN:
--	  real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
--	  return;
--	}
--    }
--  else if ( st0_tag == TAG_Zero )
--    {
--      switch ( st1_tag )
--	{
--	case TAG_Valid:
--	case TAG_Zero:
--	  return;
-+		control_word &= ~CW_RC;
-+		control_word |= RC_CHOP;
-+		reg_copy(st1_ptr, &tmp);
-+		FPU_round_to_int(&tmp, st1_tag);	/* This can never overflow here */
-+		control_word = old_cw;
-+		scale = signnegative(st1_ptr) ? -tmp.sigl : tmp.sigl;
-+		scale += exponent16(st0_ptr);
- 
--	case TW_Denormal:
--	  denormal_operand();
--	  return;
-+		setexponent16(st0_ptr, scale);
- 
--	case TW_Infinity:
--	  if ( signpositive(st1_ptr) )
--	    arith_invalid(0); /* Zero scaled by +Infinity */
--	  return;
-+		/* Use FPU_round() to properly detect under/overflow etc */
-+		FPU_round(st0_ptr, 0, 0, control_word, sign);
- 
--	case TW_NaN:
--	  real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
--	  return;
-+		return;
- 	}
--    }
--  else if ( st0_tag == TW_Infinity )
--    {
--      switch ( st1_tag )
--	{
--	case TAG_Valid:
--	case TAG_Zero:
--	  return;
--
--	case TW_Denormal:
--	  denormal_operand();
--	  return;
- 
--	case TW_Infinity:
--	  if ( signnegative(st1_ptr) )
--	    arith_invalid(0); /* Infinity scaled by -Infinity */
--	  return;
--
--	case TW_NaN:
--	  real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
--	  return;
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
-+	if (st1_tag == TAG_Special)
-+		st1_tag = FPU_Special(st1_ptr);
++#include <linux/version.h>
++#include <linux/elfnote.h>
 +
-+	if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
-+		switch (st1_tag) {
-+		case TAG_Valid:
-+			/* st(0) must be a denormal */
-+			if ((st0_tag == TW_Denormal)
-+			    && (denormal_operand() < 0))
-+				return;
++/* Ideally this would use UTS_NAME, but using a quoted string here
++   doesn't work. Remember to change this when changing the
++   kernel's name. */
++ELFNOTE_START(Linux, 0, "a")
++	.long LINUX_VERSION_CODE
++ELFNOTE_END
 +
-+			FPU_to_exp16(st0_ptr, st0_ptr);	/* Will not be left on stack */
-+			goto valid_scale;
++#ifdef CONFIG_XEN
++/*
++ * Add a special note telling glibc's dynamic linker a fake hardware
++ * flavor that it will use to choose the search path for libraries in the
++ * same way it uses real hardware capabilities like "mmx".
++ * We supply "nosegneg" as the fake capability, to indicate that we
++ * do not like negative offsets in instructions using segment overrides,
++ * since we implement those inefficiently.  This makes it possible to
++ * install libraries optimized to avoid those access patterns in someplace
++ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
++ * corresponding to the bits here is needed to make ldconfig work right.
++ * It should contain:
++ *	hwcap 1 nosegneg
++ * to match the mapping of bit to name that we give here.
++ *
++ * At runtime, the fake hardware feature will be considered to be present
++ * if its bit is set in the mask word.  So, we start with the mask 0, and
++ * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
++ */
 +
-+		case TAG_Zero:
-+			if (st0_tag == TW_Denormal)
-+				denormal_operand();
-+			return;
++#include "../../xen/vdso.h"	/* Defines VDSO_NOTE_NONEGSEG_BIT.  */
 +
-+		case TW_Denormal:
-+			denormal_operand();
-+			return;
++ELFNOTE_START(GNU, 2, "a")
++	.long 1			/* ncaps */
++VDSO32_NOTE_MASK:		/* Symbol used by arch/x86/xen/setup.c */
++	.long 0			/* mask */
++	.byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg"	/* bit, name */
++ELFNOTE_END
++#endif
+diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
+new file mode 100644
+index 0000000..31776d0
+--- /dev/null
++++ b/arch/x86/vdso/vdso32/sigreturn.S
+@@ -0,0 +1,144 @@
++/*
++ * Common code for the sigreturn entry points in vDSO images.
++ * So far this code is the same for both int80 and sysenter versions.
++ * This file is #include'd by int80.S et al to define them first thing.
++ * The kernel assumes that the addresses of these routines are constant
++ * for all vDSO implementations.
++ */
 +
-+		case TW_Infinity:
-+			if ((st0_tag == TW_Denormal)
-+			    && (denormal_operand() < 0))
-+				return;
++#include <linux/linkage.h>
++#include <asm/unistd_32.h>
++#include <asm/asm-offsets.h>
 +
-+			if (signpositive(st1_ptr))
-+				FPU_copy_to_reg0(&CONST_INF, TAG_Special);
-+			else
-+				FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
-+			setsign(st0_ptr, sign);
-+			return;
++#ifndef SYSCALL_ENTER_KERNEL
++#define	SYSCALL_ENTER_KERNEL	int $0x80
++#endif
 +
-+		case TW_NaN:
-+			real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
-+			return;
-+		}
-+	} else if (st0_tag == TAG_Zero) {
-+		switch (st1_tag) {
-+		case TAG_Valid:
-+		case TAG_Zero:
-+			return;
++	.text
++	.globl __kernel_sigreturn
++	.type __kernel_sigreturn, at function
++	ALIGN
++__kernel_sigreturn:
++.LSTART_sigreturn:
++	popl %eax		/* XXX does this mean it needs unwind info? */
++	movl $__NR_sigreturn, %eax
++	SYSCALL_ENTER_KERNEL
++.LEND_sigreturn:
++	nop
++	.size __kernel_sigreturn,.-.LSTART_sigreturn
 +
-+		case TW_Denormal:
-+			denormal_operand();
-+			return;
++	.globl __kernel_rt_sigreturn
++	.type __kernel_rt_sigreturn, at function
++	ALIGN
++__kernel_rt_sigreturn:
++.LSTART_rt_sigreturn:
++	movl $__NR_rt_sigreturn, %eax
++	SYSCALL_ENTER_KERNEL
++.LEND_rt_sigreturn:
++	nop
++	.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
++	.previous
 +
-+		case TW_Infinity:
-+			if (signpositive(st1_ptr))
-+				arith_invalid(0);	/* Zero scaled by +Infinity */
-+			return;
++	.section .eh_frame,"a", at progbits
++.LSTARTFRAMEDLSI1:
++	.long .LENDCIEDLSI1-.LSTARTCIEDLSI1
++.LSTARTCIEDLSI1:
++	.long 0			/* CIE ID */
++	.byte 1			/* Version number */
++	.string "zRS"		/* NUL-terminated augmentation string */
++	.uleb128 1		/* Code alignment factor */
++	.sleb128 -4		/* Data alignment factor */
++	.byte 8			/* Return address register column */
++	.uleb128 1		/* Augmentation value length */
++	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
++	.byte 0			/* DW_CFA_nop */
++	.align 4
++.LENDCIEDLSI1:
++	.long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */
++.LSTARTFDEDLSI1:
++	.long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */
++	/* HACK: The dwarf2 unwind routines will subtract 1 from the
++	   return address to get an address in the middle of the
++	   presumed call instruction.  Since we didn't get here via
++	   a call, we need to include the nop before the real start
++	   to make up for it.  */
++	.long .LSTART_sigreturn-1-.	/* PC-relative start address */
++	.long .LEND_sigreturn-.LSTART_sigreturn+1
++	.uleb128 0			/* Augmentation */
++	/* What follows are the instructions for the table generation.
++	   We record the locations of each register saved.  This is
++	   complicated by the fact that the "CFA" is always assumed to
++	   be the value of the stack pointer in the caller.  This means
++	   that we must define the CFA of this body of code to be the
++	   saved value of the stack pointer in the sigcontext.  Which
++	   also means that there is no fixed relation to the other
++	   saved registers, which means that we must use DW_CFA_expression
++	   to compute their addresses.  It also means that when we
++	   adjust the stack with the popl, we have to do it all over again.  */
 +
-+		case TW_NaN:
-+			real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
-+			return;
-+		}
-+	} else if (st0_tag == TW_Infinity) {
-+		switch (st1_tag) {
-+		case TAG_Valid:
-+		case TAG_Zero:
-+			return;
++#define do_cfa_expr(offset)						\
++	.byte 0x0f;			/* DW_CFA_def_cfa_expression */	\
++	.uleb128 1f-0f;			/*   length */			\
++0:	.byte 0x74;			/*     DW_OP_breg4 */		\
++	.sleb128 offset;		/*      offset */		\
++	.byte 0x06;			/*     DW_OP_deref */		\
++1:
 +
-+		case TW_Denormal:
-+			denormal_operand();
-+			return;
++#define do_expr(regno, offset)						\
++	.byte 0x10;			/* DW_CFA_expression */		\
++	.uleb128 regno;			/*   regno */			\
++	.uleb128 1f-0f;			/*   length */			\
++0:	.byte 0x74;			/*     DW_OP_breg4 */		\
++	.sleb128 offset;		/*       offset */		\
++1:
 +
-+		case TW_Infinity:
-+			if (signnegative(st1_ptr))
-+				arith_invalid(0);	/* Infinity scaled by -Infinity */
-+			return;
++	do_cfa_expr(IA32_SIGCONTEXT_sp+4)
++	do_expr(0, IA32_SIGCONTEXT_ax+4)
++	do_expr(1, IA32_SIGCONTEXT_cx+4)
++	do_expr(2, IA32_SIGCONTEXT_dx+4)
++	do_expr(3, IA32_SIGCONTEXT_bx+4)
++	do_expr(5, IA32_SIGCONTEXT_bp+4)
++	do_expr(6, IA32_SIGCONTEXT_si+4)
++	do_expr(7, IA32_SIGCONTEXT_di+4)
++	do_expr(8, IA32_SIGCONTEXT_ip+4)
 +
-+		case TW_NaN:
-+			real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
-+			return;
-+		}
-+	} else if (st0_tag == TW_NaN) {
-+		if (st1_tag != TAG_Empty) {
-+			real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
-+			return;
-+		}
- 	}
--    }
--  else if ( st0_tag == TW_NaN )
--    {
--      if ( st1_tag != TAG_Empty )
--	{ real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr); return; }
--    }
--
- #ifdef PARANOID
--  if ( !((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) )
--    {
--      EXCEPTION(EX_INTERNAL | 0x115);
--      return;
--    }
-+	if (!((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty))) {
-+		EXCEPTION(EX_INTERNAL | 0x115);
-+		return;
-+	}
- #endif
- 
--  /* At least one of st(0), st(1) must be empty */
--  FPU_stack_underflow();
-+	/* At least one of st(0), st(1) must be empty */
-+	FPU_stack_underflow();
- 
- }
- 
--
- /*---------------------------------------------------------------------------*/
- 
- static FUNC_ST0 const trig_table_a[] = {
--  f2xm1, fyl2x, fptan, fpatan,
--  fxtract, fprem1, (FUNC_ST0)fdecstp, (FUNC_ST0)fincstp
-+	f2xm1, fyl2x, fptan, fpatan,
-+	fxtract, fprem1, (FUNC_ST0) fdecstp, (FUNC_ST0) fincstp
- };
- 
- void FPU_triga(void)
- {
--  (trig_table_a[FPU_rm])(&st(0), FPU_gettag0());
-+	(trig_table_a[FPU_rm]) (&st(0), FPU_gettag0());
- }
- 
--
--static FUNC_ST0 const trig_table_b[] =
--  {
--    fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0)fsin, fcos
--  };
-+static FUNC_ST0 const trig_table_b[] = {
-+	fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0) fsin, fcos
-+};
- 
- void FPU_trigb(void)
- {
--  (trig_table_b[FPU_rm])(&st(0), FPU_gettag0());
-+	(trig_table_b[FPU_rm]) (&st(0), FPU_gettag0());
- }
-diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
-index 2e2c51a..d701e2b 100644
---- a/arch/x86/math-emu/get_address.c
-+++ b/arch/x86/math-emu/get_address.c
-@@ -17,7 +17,6 @@
-  |    other processes using the emulator while swapping is in progress.      |
-  +---------------------------------------------------------------------------*/
- 
--
- #include <linux/stddef.h>
- 
- #include <asm/uaccess.h>
-@@ -27,31 +26,30 @@
- #include "exception.h"
- #include "fpu_emu.h"
- 
--
- #define FPU_WRITE_BIT 0x10
- 
- static int reg_offset[] = {
--	offsetof(struct info,___eax),
--	offsetof(struct info,___ecx),
--	offsetof(struct info,___edx),
--	offsetof(struct info,___ebx),
--	offsetof(struct info,___esp),
--	offsetof(struct info,___ebp),
--	offsetof(struct info,___esi),
--	offsetof(struct info,___edi)
-+	offsetof(struct info, ___eax),
-+	offsetof(struct info, ___ecx),
-+	offsetof(struct info, ___edx),
-+	offsetof(struct info, ___ebx),
-+	offsetof(struct info, ___esp),
-+	offsetof(struct info, ___ebp),
-+	offsetof(struct info, ___esi),
-+	offsetof(struct info, ___edi)
- };
- 
- #define REG_(x) (*(long *)(reg_offset[(x)]+(u_char *) FPU_info))
- 
- static int reg_offset_vm86[] = {
--	offsetof(struct info,___cs),
--	offsetof(struct info,___vm86_ds),
--	offsetof(struct info,___vm86_es),
--	offsetof(struct info,___vm86_fs),
--	offsetof(struct info,___vm86_gs),
--	offsetof(struct info,___ss),
--	offsetof(struct info,___vm86_ds)
--      };
-+	offsetof(struct info, ___cs),
-+	offsetof(struct info, ___vm86_ds),
-+	offsetof(struct info, ___vm86_es),
-+	offsetof(struct info, ___vm86_fs),
-+	offsetof(struct info, ___vm86_gs),
-+	offsetof(struct info, ___ss),
-+	offsetof(struct info, ___vm86_ds)
-+};
- 
- #define VM86_REG_(x) (*(unsigned short *) \
- 		      (reg_offset_vm86[((unsigned)x)]+(u_char *) FPU_info))
-@@ -60,158 +58,141 @@ static int reg_offset_vm86[] = {
- #define ___GS ___ds
- 
- static int reg_offset_pm[] = {
--	offsetof(struct info,___cs),
--	offsetof(struct info,___ds),
--	offsetof(struct info,___es),
--	offsetof(struct info,___fs),
--	offsetof(struct info,___GS),
--	offsetof(struct info,___ss),
--	offsetof(struct info,___ds)
--      };
-+	offsetof(struct info, ___cs),
-+	offsetof(struct info, ___ds),
-+	offsetof(struct info, ___es),
-+	offsetof(struct info, ___fs),
-+	offsetof(struct info, ___GS),
-+	offsetof(struct info, ___ss),
-+	offsetof(struct info, ___ds)
-+};
- 
- #define PM_REG_(x) (*(unsigned short *) \
- 		      (reg_offset_pm[((unsigned)x)]+(u_char *) FPU_info))
- 
--
- /* Decode the SIB byte. This function assumes mod != 0 */
- static int sib(int mod, unsigned long *fpu_eip)
- {
--  u_char ss,index,base;
--  long offset;
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_code_access_ok(1);
--  FPU_get_user(base, (u_char __user *) (*fpu_eip));   /* The SIB byte */
--  RE_ENTRANT_CHECK_ON;
--  (*fpu_eip)++;
--  ss = base >> 6;
--  index = (base >> 3) & 7;
--  base &= 7;
--
--  if ((mod == 0) && (base == 5))
--    offset = 0;              /* No base register */
--  else
--    offset = REG_(base);
--
--  if (index == 4)
--    {
--      /* No index register */
--      /* A non-zero ss is illegal */
--      if ( ss )
--	EXCEPTION(EX_Invalid);
--    }
--  else
--    {
--      offset += (REG_(index)) << ss;
--    }
--
--  if (mod == 1)
--    {
--      /* 8 bit signed displacement */
--      long displacement;
--      RE_ENTRANT_CHECK_OFF;
--      FPU_code_access_ok(1);
--      FPU_get_user(displacement, (signed char __user *) (*fpu_eip));
--      offset += displacement;
--      RE_ENTRANT_CHECK_ON;
--      (*fpu_eip)++;
--    }
--  else if (mod == 2 || base == 5) /* The second condition also has mod==0 */
--    {
--      /* 32 bit displacement */
--      long displacement;
--      RE_ENTRANT_CHECK_OFF;
--      FPU_code_access_ok(4);
--      FPU_get_user(displacement, (long __user *) (*fpu_eip));
--      offset += displacement;
--      RE_ENTRANT_CHECK_ON;
--      (*fpu_eip) += 4;
--    }
--
--  return offset;
--}
-+	u_char ss, index, base;
-+	long offset;
++	.byte 0x42	/* DW_CFA_advance_loc 2 -- nop; popl eax. */
 +
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_code_access_ok(1);
-+	FPU_get_user(base, (u_char __user *) (*fpu_eip));	/* The SIB byte */
-+	RE_ENTRANT_CHECK_ON;
-+	(*fpu_eip)++;
-+	ss = base >> 6;
-+	index = (base >> 3) & 7;
-+	base &= 7;
++	do_cfa_expr(IA32_SIGCONTEXT_sp)
++	do_expr(0, IA32_SIGCONTEXT_ax)
++	do_expr(1, IA32_SIGCONTEXT_cx)
++	do_expr(2, IA32_SIGCONTEXT_dx)
++	do_expr(3, IA32_SIGCONTEXT_bx)
++	do_expr(5, IA32_SIGCONTEXT_bp)
++	do_expr(6, IA32_SIGCONTEXT_si)
++	do_expr(7, IA32_SIGCONTEXT_di)
++	do_expr(8, IA32_SIGCONTEXT_ip)
 +
-+	if ((mod == 0) && (base == 5))
-+		offset = 0;	/* No base register */
-+	else
-+		offset = REG_(base);
++	.align 4
++.LENDFDEDLSI1:
 +
-+	if (index == 4) {
-+		/* No index register */
-+		/* A non-zero ss is illegal */
-+		if (ss)
-+			EXCEPTION(EX_Invalid);
-+	} else {
-+		offset += (REG_(index)) << ss;
-+	}
++	.long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */
++.LSTARTFDEDLSI2:
++	.long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */
++	/* HACK: See above wrt unwind library assumptions.  */
++	.long .LSTART_rt_sigreturn-1-.	/* PC-relative start address */
++	.long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
++	.uleb128 0			/* Augmentation */
++	/* What follows are the instructions for the table generation.
++	   We record the locations of each register saved.  This is
++	   slightly less complicated than the above, since we don't
++	   modify the stack pointer in the process.  */
 +
-+	if (mod == 1) {
-+		/* 8 bit signed displacement */
-+		long displacement;
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_code_access_ok(1);
-+		FPU_get_user(displacement, (signed char __user *)(*fpu_eip));
-+		offset += displacement;
-+		RE_ENTRANT_CHECK_ON;
-+		(*fpu_eip)++;
-+	} else if (mod == 2 || base == 5) {	/* The second condition also has mod==0 */
-+		/* 32 bit displacement */
-+		long displacement;
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_code_access_ok(4);
-+		FPU_get_user(displacement, (long __user *)(*fpu_eip));
-+		offset += displacement;
-+		RE_ENTRANT_CHECK_ON;
-+		(*fpu_eip) += 4;
-+	}
- 
-+	return offset;
-+}
- 
--static unsigned long vm86_segment(u_char segment,
--				  struct address *addr)
-+static unsigned long vm86_segment(u_char segment, struct address *addr)
- {
--  segment--;
-+	segment--;
- #ifdef PARANOID
--  if ( segment > PREFIX_SS_ )
--    {
--      EXCEPTION(EX_INTERNAL|0x130);
--      math_abort(FPU_info,SIGSEGV);
--    }
-+	if (segment > PREFIX_SS_) {
-+		EXCEPTION(EX_INTERNAL | 0x130);
-+		math_abort(FPU_info, SIGSEGV);
-+	}
- #endif /* PARANOID */
--  addr->selector = VM86_REG_(segment);
--  return (unsigned long)VM86_REG_(segment) << 4;
-+	addr->selector = VM86_REG_(segment);
-+	return (unsigned long)VM86_REG_(segment) << 4;
- }
- 
--
- /* This should work for 16 and 32 bit protected mode. */
- static long pm_address(u_char FPU_modrm, u_char segment,
- 		       struct address *addr, long offset)
--{ 
--  struct desc_struct descriptor;
--  unsigned long base_address, limit, address, seg_top;
-+{
-+	struct desc_struct descriptor;
-+	unsigned long base_address, limit, address, seg_top;
- 
--  segment--;
-+	segment--;
- 
- #ifdef PARANOID
--  /* segment is unsigned, so this also detects if segment was 0: */
--  if ( segment > PREFIX_SS_ )
--    {
--      EXCEPTION(EX_INTERNAL|0x132);
--      math_abort(FPU_info,SIGSEGV);
--    }
-+	/* segment is unsigned, so this also detects if segment was 0: */
-+	if (segment > PREFIX_SS_) {
-+		EXCEPTION(EX_INTERNAL | 0x132);
-+		math_abort(FPU_info, SIGSEGV);
-+	}
- #endif /* PARANOID */
- 
--  switch ( segment )
--    {
--      /* gs isn't used by the kernel, so it still has its
--	 user-space value. */
--    case PREFIX_GS_-1:
--      /* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
--      savesegment(gs, addr->selector);
--      break;
--    default:
--      addr->selector = PM_REG_(segment);
--    }
--
--  descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
--  base_address = SEG_BASE_ADDR(descriptor);
--  address = base_address + offset;
--  limit = base_address
--	+ (SEG_LIMIT(descriptor)+1) * SEG_GRANULARITY(descriptor) - 1;
--  if ( limit < base_address ) limit = 0xffffffff;
--
--  if ( SEG_EXPAND_DOWN(descriptor) )
--    {
--      if ( SEG_G_BIT(descriptor) )
--	seg_top = 0xffffffff;
--      else
--	{
--	  seg_top = base_address + (1 << 20);
--	  if ( seg_top < base_address ) seg_top = 0xffffffff;
-+	switch (segment) {
-+		/* gs isn't used by the kernel, so it still has its
-+		   user-space value. */
-+	case PREFIX_GS_ - 1:
-+		/* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
-+		savesegment(gs, addr->selector);
-+		break;
-+	default:
-+		addr->selector = PM_REG_(segment);
- 	}
--      access_limit =
--	(address <= limit) || (address >= seg_top) ? 0 :
--	  ((seg_top-address) >= 255 ? 255 : seg_top-address);
--    }
--  else
--    {
--      access_limit =
--	(address > limit) || (address < base_address) ? 0 :
--	  ((limit-address) >= 254 ? 255 : limit-address+1);
--    }
--  if ( SEG_EXECUTE_ONLY(descriptor) ||
--      (!SEG_WRITE_PERM(descriptor) && (FPU_modrm & FPU_WRITE_BIT)) )
--    {
--      access_limit = 0;
--    }
--  return address;
--}
- 
-+	descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
-+	base_address = SEG_BASE_ADDR(descriptor);
-+	address = base_address + offset;
-+	limit = base_address
-+	    + (SEG_LIMIT(descriptor) + 1) * SEG_GRANULARITY(descriptor) - 1;
-+	if (limit < base_address)
-+		limit = 0xffffffff;
++	do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp)
++	do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax)
++	do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx)
++	do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx)
++	do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx)
++	do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp)
++	do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si)
++	do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di)
++	do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip)
 +
-+	if (SEG_EXPAND_DOWN(descriptor)) {
-+		if (SEG_G_BIT(descriptor))
-+			seg_top = 0xffffffff;
-+		else {
-+			seg_top = base_address + (1 << 20);
-+			if (seg_top < base_address)
-+				seg_top = 0xffffffff;
-+		}
-+		access_limit =
-+		    (address <= limit) || (address >= seg_top) ? 0 :
-+		    ((seg_top - address) >= 255 ? 255 : seg_top - address);
-+	} else {
-+		access_limit =
-+		    (address > limit) || (address < base_address) ? 0 :
-+		    ((limit - address) >= 254 ? 255 : limit - address + 1);
-+	}
-+	if (SEG_EXECUTE_ONLY(descriptor) ||
-+	    (!SEG_WRITE_PERM(descriptor) && (FPU_modrm & FPU_WRITE_BIT))) {
-+		access_limit = 0;
-+	}
-+	return address;
-+}
- 
- /*
-        MOD R/M byte:  MOD == 3 has a special use for the FPU
-@@ -221,7 +202,6 @@ static long pm_address(u_char FPU_modrm, u_char segment,
-        .....   .........   .........
-         MOD    OPCODE(2)     R/M
- 
--
-        SIB byte
- 
-        7   6   5   4   3   2   1   0
-@@ -231,208 +211,194 @@ static long pm_address(u_char FPU_modrm, u_char segment,
- */
- 
- void __user *FPU_get_address(u_char FPU_modrm, unsigned long *fpu_eip,
--		  struct address *addr,
--		  fpu_addr_modes addr_modes)
-+			     struct address *addr, fpu_addr_modes addr_modes)
-+{
-+	u_char mod;
-+	unsigned rm = FPU_modrm & 7;
-+	long *cpu_reg_ptr;
-+	int address = 0;	/* Initialized just to stop compiler warnings. */
++	.align 4
++.LENDFDEDLSI2:
++	.previous
+diff --git a/arch/x86/vdso/vdso32/syscall.S b/arch/x86/vdso/vdso32/syscall.S
+new file mode 100644
+index 0000000..5415b56
+--- /dev/null
++++ b/arch/x86/vdso/vdso32/syscall.S
+@@ -0,0 +1,77 @@
++/*
++ * Code for the vDSO.  This version uses the syscall instruction.
++ *
++ * First get the common code for the sigreturn entry points.
++ * This must come first.
++ */
++#define SYSCALL_ENTER_KERNEL	syscall
++#include "sigreturn.S"
 +
-+	/* Memory accessed via the cs selector is write protected
-+	   in `non-segmented' 32 bit protected mode. */
-+	if (!addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
-+	    && (addr_modes.override.segment == PREFIX_CS_)) {
-+		math_abort(FPU_info, SIGSEGV);
-+	}
++#include <asm/segment.h>
 +
-+	addr->selector = FPU_DS;	/* Default, for 32 bit non-segmented mode. */
++	.text
++	.globl __kernel_vsyscall
++	.type __kernel_vsyscall, at function
++	ALIGN
++__kernel_vsyscall:
++.LSTART_vsyscall:
++	push	%ebp
++.Lpush_ebp:
++	movl	%ecx, %ebp
++	syscall
++	movl	$__USER32_DS, %ecx
++	movl	%ecx, %ss
++	movl	%ebp, %ecx
++	popl	%ebp
++.Lpop_ebp:
++	ret
++.LEND_vsyscall:
++	.size __kernel_vsyscall,.-.LSTART_vsyscall
 +
-+	mod = (FPU_modrm >> 6) & 3;
++	.section .eh_frame,"a", at progbits
++.LSTARTFRAME:
++	.long .LENDCIE-.LSTARTCIE
++.LSTARTCIE:
++	.long 0			/* CIE ID */
++	.byte 1			/* Version number */
++	.string "zR"		/* NUL-terminated augmentation string */
++	.uleb128 1		/* Code alignment factor */
++	.sleb128 -4		/* Data alignment factor */
++	.byte 8			/* Return address register column */
++	.uleb128 1		/* Augmentation value length */
++	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
++	.byte 0x0c		/* DW_CFA_def_cfa */
++	.uleb128 4
++	.uleb128 4
++	.byte 0x88		/* DW_CFA_offset, column 0x8 */
++	.uleb128 1
++	.align 4
++.LENDCIE:
 +
-+	if (rm == 4 && mod != 3) {
-+		address = sib(mod, fpu_eip);
-+	} else {
-+		cpu_reg_ptr = &REG_(rm);
-+		switch (mod) {
-+		case 0:
-+			if (rm == 5) {
-+				/* Special case: disp32 */
-+				RE_ENTRANT_CHECK_OFF;
-+				FPU_code_access_ok(4);
-+				FPU_get_user(address,
-+					     (unsigned long __user
-+					      *)(*fpu_eip));
-+				(*fpu_eip) += 4;
-+				RE_ENTRANT_CHECK_ON;
-+				addr->offset = address;
-+				return (void __user *)address;
-+			} else {
-+				address = *cpu_reg_ptr;	/* Just return the contents
-+							   of the cpu register */
-+				addr->offset = address;
-+				return (void __user *)address;
-+			}
-+		case 1:
-+			/* 8 bit signed displacement */
-+			RE_ENTRANT_CHECK_OFF;
-+			FPU_code_access_ok(1);
-+			FPU_get_user(address, (signed char __user *)(*fpu_eip));
-+			RE_ENTRANT_CHECK_ON;
-+			(*fpu_eip)++;
-+			break;
-+		case 2:
-+			/* 32 bit displacement */
-+			RE_ENTRANT_CHECK_OFF;
-+			FPU_code_access_ok(4);
-+			FPU_get_user(address, (long __user *)(*fpu_eip));
-+			(*fpu_eip) += 4;
-+			RE_ENTRANT_CHECK_ON;
-+			break;
-+		case 3:
-+			/* Not legal for the FPU */
-+			EXCEPTION(EX_Invalid);
-+		}
-+		address += *cpu_reg_ptr;
-+	}
++	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
++.LSTARTFDE1:
++	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
++	.long .LSTART_vsyscall-.	/* PC-relative start address */
++	.long .LEND_vsyscall-.LSTART_vsyscall
++	.uleb128 0			/* Augmentation length */
++	/* What follows are the instructions for the table generation.
++	   We have to record all changes of the stack pointer.  */
++	.byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
++	.byte 0x0e		/* DW_CFA_def_cfa_offset */
++	.uleb128 8
++	.byte 0x85, 0x02	/* DW_CFA_offset %ebp -8 */
++	.byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
++	.byte 0xc5		/* DW_CFA_restore %ebp */
++	.byte 0x0e		/* DW_CFA_def_cfa_offset */
++	.uleb128 4
++	.align 4
++.LENDFDE1:
++	.previous
 +
-+	addr->offset = address;
++	/*
++	 * Pad out the segment to match the size of the sysenter.S version.
++	 */
++VDSO32_vsyscall_eh_frame_size = 0x40
++	.section .data,"aw", at progbits
++	.space VDSO32_vsyscall_eh_frame_size-(.LENDFDE1-.LSTARTFRAME), 0
++	.previous
+diff --git a/arch/x86/vdso/vdso32/sysenter.S b/arch/x86/vdso/vdso32/sysenter.S
+new file mode 100644
+index 0000000..e2800af
+--- /dev/null
++++ b/arch/x86/vdso/vdso32/sysenter.S
+@@ -0,0 +1,116 @@
++/*
++ * Code for the vDSO.  This version uses the sysenter instruction.
++ *
++ * First get the common code for the sigreturn entry points.
++ * This must come first.
++ */
++#include "sigreturn.S"
 +
-+	switch (addr_modes.default_mode) {
-+	case 0:
-+		break;
-+	case VM86:
-+		address += vm86_segment(addr_modes.override.segment, addr);
-+		break;
-+	case PM16:
-+	case SEG32:
-+		address = pm_address(FPU_modrm, addr_modes.override.segment,
-+				     addr, address);
-+		break;
-+	default:
-+		EXCEPTION(EX_INTERNAL | 0x133);
-+	}
++/*
++ * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
++ * %ecx itself for arg2. The pushing is because the sysexit instruction
++ * (found in entry.S) requires that we clobber %ecx with the desired %esp.
++ * User code might expect that %ecx is unclobbered though, as it would be
++ * for returning via the iret instruction, so we must push and pop.
++ *
++ * The caller puts arg3 in %edx, which the sysexit instruction requires
++ * for %eip. Thus, exactly as for arg2, we must push and pop.
++ *
++ * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
++ * instruction clobbers %esp, the user's %esp won't even survive entry
++ * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
++ * arg6 from the stack.
++ *
++ * You can not use this vsyscall for the clone() syscall because the
++ * three words on the parent stack do not get copied to the child.
++ */
++	.text
++	.globl __kernel_vsyscall
++	.type __kernel_vsyscall, at function
++	ALIGN
++__kernel_vsyscall:
++.LSTART_vsyscall:
++	push %ecx
++.Lpush_ecx:
++	push %edx
++.Lpush_edx:
++	push %ebp
++.Lenter_kernel:
++	movl %esp,%ebp
++	sysenter
 +
-+	return (void __user *)address;
-+}
++	/* 7: align return point with nop's to make disassembly easier */
++	.space 7,0x90
 +
-+void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
-+				struct address *addr, fpu_addr_modes addr_modes)
- {
--  u_char mod;
--  unsigned rm = FPU_modrm & 7;
--  long *cpu_reg_ptr;
--  int address = 0;     /* Initialized just to stop compiler warnings. */
--
--  /* Memory accessed via the cs selector is write protected
--     in `non-segmented' 32 bit protected mode. */
--  if ( !addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
--      && (addr_modes.override.segment == PREFIX_CS_) )
--    {
--      math_abort(FPU_info,SIGSEGV);
--    }
--
--  addr->selector = FPU_DS;   /* Default, for 32 bit non-segmented mode. */
--
--  mod = (FPU_modrm >> 6) & 3;
--
--  if (rm == 4 && mod != 3)
--    {
--      address = sib(mod, fpu_eip);
--    }
--  else
--    {
--      cpu_reg_ptr = & REG_(rm);
--      switch (mod)
--	{
-+	u_char mod;
-+	unsigned rm = FPU_modrm & 7;
-+	int address = 0;	/* Default used for mod == 0 */
++	/* 14: System call restart point is here! (SYSENTER_RETURN-2) */
++	jmp .Lenter_kernel
++	/* 16: System call normal return point is here! */
++VDSO32_SYSENTER_RETURN:	/* Symbol used by sysenter.c via vdso32-syms.h */
++	pop %ebp
++.Lpop_ebp:
++	pop %edx
++.Lpop_edx:
++	pop %ecx
++.Lpop_ecx:
++	ret
++.LEND_vsyscall:
++	.size __kernel_vsyscall,.-.LSTART_vsyscall
++	.previous
 +
-+	/* Memory accessed via the cs selector is write protected
-+	   in `non-segmented' 32 bit protected mode. */
-+	if (!addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
-+	    && (addr_modes.override.segment == PREFIX_CS_)) {
-+		math_abort(FPU_info, SIGSEGV);
-+	}
++	.section .eh_frame,"a", at progbits
++.LSTARTFRAMEDLSI:
++	.long .LENDCIEDLSI-.LSTARTCIEDLSI
++.LSTARTCIEDLSI:
++	.long 0			/* CIE ID */
++	.byte 1			/* Version number */
++	.string "zR"		/* NUL-terminated augmentation string */
++	.uleb128 1		/* Code alignment factor */
++	.sleb128 -4		/* Data alignment factor */
++	.byte 8			/* Return address register column */
++	.uleb128 1		/* Augmentation value length */
++	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
++	.byte 0x0c		/* DW_CFA_def_cfa */
++	.uleb128 4
++	.uleb128 4
++	.byte 0x88		/* DW_CFA_offset, column 0x8 */
++	.uleb128 1
++	.align 4
++.LENDCIEDLSI:
++	.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
++.LSTARTFDEDLSI:
++	.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
++	.long .LSTART_vsyscall-.	/* PC-relative start address */
++	.long .LEND_vsyscall-.LSTART_vsyscall
++	.uleb128 0
++	/* What follows are the instructions for the table generation.
++	   We have to record all changes of the stack pointer.  */
++	.byte 0x40 + (.Lpush_ecx-.LSTART_vsyscall) /* DW_CFA_advance_loc */
++	.byte 0x0e		/* DW_CFA_def_cfa_offset */
++	.byte 0x08		/* RA at offset 8 now */
++	.byte 0x40 + (.Lpush_edx-.Lpush_ecx) /* DW_CFA_advance_loc */
++	.byte 0x0e		/* DW_CFA_def_cfa_offset */
++	.byte 0x0c		/* RA at offset 12 now */
++	.byte 0x40 + (.Lenter_kernel-.Lpush_edx) /* DW_CFA_advance_loc */
++	.byte 0x0e		/* DW_CFA_def_cfa_offset */
++	.byte 0x10		/* RA at offset 16 now */
++	.byte 0x85, 0x04	/* DW_CFA_offset %ebp -16 */
++	/* Finally the epilogue.  */
++	.byte 0x40 + (.Lpop_ebp-.Lenter_kernel)	/* DW_CFA_advance_loc */
++	.byte 0x0e		/* DW_CFA_def_cfa_offset */
++	.byte 0x0c		/* RA at offset 12 now */
++	.byte 0xc5		/* DW_CFA_restore %ebp */
++	.byte 0x40 + (.Lpop_edx-.Lpop_ebp) /* DW_CFA_advance_loc */
++	.byte 0x0e		/* DW_CFA_def_cfa_offset */
++	.byte 0x08		/* RA at offset 8 now */
++	.byte 0x40 + (.Lpop_ecx-.Lpop_edx) /* DW_CFA_advance_loc */
++	.byte 0x0e		/* DW_CFA_def_cfa_offset */
++	.byte 0x04		/* RA at offset 4 now */
++	.align 4
++.LENDFDEDLSI:
++	.previous
 +
-+	addr->selector = FPU_DS;	/* Default, for 32 bit non-segmented mode. */
++	/*
++	 * Emit a symbol with the size of this .eh_frame data,
++	 * to verify it matches the other versions.
++	 */
++VDSO32_vsyscall_eh_frame_size = (.LENDFDEDLSI-.LSTARTFRAMEDLSI)
+diff --git a/arch/x86/vdso/vdso32/vdso32.lds.S b/arch/x86/vdso/vdso32/vdso32.lds.S
+new file mode 100644
+index 0000000..976124b
+--- /dev/null
++++ b/arch/x86/vdso/vdso32/vdso32.lds.S
+@@ -0,0 +1,37 @@
++/*
++ * Linker script for 32-bit vDSO.
++ * We #include the file to define the layout details.
++ * Here we only choose the prelinked virtual address.
++ *
++ * This file defines the version script giving the user-exported symbols in
++ * the DSO.  We can define local symbols here called VDSO* to make their
++ * values visible using the asm-x86/vdso.h macros from the kernel proper.
++ */
 +
-+	mod = (FPU_modrm >> 6) & 3;
++#define VDSO_PRELINK 0
++#include "../vdso-layout.lds.S"
 +
-+	switch (mod) {
- 	case 0:
--	  if (rm == 5)
--	    {
--	      /* Special case: disp32 */
--	      RE_ENTRANT_CHECK_OFF;
--	      FPU_code_access_ok(4);
--	      FPU_get_user(address, (unsigned long __user *) (*fpu_eip));
--	      (*fpu_eip) += 4;
--	      RE_ENTRANT_CHECK_ON;
--	      addr->offset = address;
--	      return (void __user *) address;
--	    }
--	  else
--	    {
--	      address = *cpu_reg_ptr;  /* Just return the contents
--					  of the cpu register */
--	      addr->offset = address;
--	      return (void __user *) address;
--	    }
-+		if (rm == 6) {
-+			/* Special case: disp16 */
-+			RE_ENTRANT_CHECK_OFF;
-+			FPU_code_access_ok(2);
-+			FPU_get_user(address,
-+				     (unsigned short __user *)(*fpu_eip));
-+			(*fpu_eip) += 2;
-+			RE_ENTRANT_CHECK_ON;
-+			goto add_segment;
-+		}
-+		break;
- 	case 1:
--	  /* 8 bit signed displacement */
--	  RE_ENTRANT_CHECK_OFF;
--	  FPU_code_access_ok(1);
--	  FPU_get_user(address, (signed char __user *) (*fpu_eip));
--	  RE_ENTRANT_CHECK_ON;
--	  (*fpu_eip)++;
--	  break;
-+		/* 8 bit signed displacement */
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_code_access_ok(1);
-+		FPU_get_user(address, (signed char __user *)(*fpu_eip));
-+		RE_ENTRANT_CHECK_ON;
-+		(*fpu_eip)++;
-+		break;
- 	case 2:
--	  /* 32 bit displacement */
--	  RE_ENTRANT_CHECK_OFF;
--	  FPU_code_access_ok(4);
--	  FPU_get_user(address, (long __user *) (*fpu_eip));
--	  (*fpu_eip) += 4;
--	  RE_ENTRANT_CHECK_ON;
--	  break;
-+		/* 16 bit displacement */
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_code_access_ok(2);
-+		FPU_get_user(address, (unsigned short __user *)(*fpu_eip));
-+		(*fpu_eip) += 2;
-+		RE_ENTRANT_CHECK_ON;
-+		break;
- 	case 3:
--	  /* Not legal for the FPU */
--	  EXCEPTION(EX_Invalid);
-+		/* Not legal for the FPU */
-+		EXCEPTION(EX_Invalid);
-+		break;
-+	}
-+	switch (rm) {
-+	case 0:
-+		address += FPU_info->___ebx + FPU_info->___esi;
-+		break;
-+	case 1:
-+		address += FPU_info->___ebx + FPU_info->___edi;
-+		break;
-+	case 2:
-+		address += FPU_info->___ebp + FPU_info->___esi;
-+		if (addr_modes.override.segment == PREFIX_DEFAULT)
-+			addr_modes.override.segment = PREFIX_SS_;
-+		break;
-+	case 3:
-+		address += FPU_info->___ebp + FPU_info->___edi;
-+		if (addr_modes.override.segment == PREFIX_DEFAULT)
-+			addr_modes.override.segment = PREFIX_SS_;
-+		break;
-+	case 4:
-+		address += FPU_info->___esi;
-+		break;
-+	case 5:
-+		address += FPU_info->___edi;
-+		break;
-+	case 6:
-+		address += FPU_info->___ebp;
-+		if (addr_modes.override.segment == PREFIX_DEFAULT)
-+			addr_modes.override.segment = PREFIX_SS_;
-+		break;
-+	case 7:
-+		address += FPU_info->___ebx;
-+		break;
- 	}
--      address += *cpu_reg_ptr;
--    }
--
--  addr->offset = address;
--
--  switch ( addr_modes.default_mode )
--    {
--    case 0:
--      break;
--    case VM86:
--      address += vm86_segment(addr_modes.override.segment, addr);
--      break;
--    case PM16:
--    case SEG32:
--      address = pm_address(FPU_modrm, addr_modes.override.segment,
--			   addr, address);
--      break;
--    default:
--      EXCEPTION(EX_INTERNAL|0x133);
--    }
--
--  return (void __user *)address;
--}
++/* The ELF entry point can be used to set the AT_SYSINFO value.  */
++ENTRY(__kernel_vsyscall);
++
++/*
++ * This controls what userland symbols we export from the vDSO.
++ */
++VERSION
++{
++	LINUX_2.5 {
++	global:
++		__kernel_vsyscall;
++		__kernel_sigreturn;
++		__kernel_rt_sigreturn;
++	local: *;
++	};
++}
++
++/*
++ * Symbols we define here called VDSO* get their values into vdso32-syms.h.
++ */
++VDSO32_PRELINK		= VDSO_PRELINK;
++VDSO32_vsyscall		= __kernel_vsyscall;
++VDSO32_sigreturn	= __kernel_sigreturn;
++VDSO32_rt_sigreturn	= __kernel_rt_sigreturn;
+diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
+index 3b1ae1a..c8097f1 100644
+--- a/arch/x86/vdso/vgetcpu.c
++++ b/arch/x86/vdso/vgetcpu.c
+@@ -15,11 +15,11 @@
  
-+      add_segment:
-+	address &= 0xffff;
+ long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+ {
+-	unsigned int dummy, p;
++	unsigned int p;
  
--void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
--		     struct address *addr,
--		     fpu_addr_modes addr_modes)
--{
--  u_char mod;
--  unsigned rm = FPU_modrm & 7;
--  int address = 0;     /* Default used for mod == 0 */
--
--  /* Memory accessed via the cs selector is write protected
--     in `non-segmented' 32 bit protected mode. */
--  if ( !addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
--      && (addr_modes.override.segment == PREFIX_CS_) )
--    {
--      math_abort(FPU_info,SIGSEGV);
--    }
--
--  addr->selector = FPU_DS;   /* Default, for 32 bit non-segmented mode. */
--
--  mod = (FPU_modrm >> 6) & 3;
+ 	if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
+ 		/* Load per CPU data from RDTSCP */
+-		rdtscp(dummy, dummy, p);
++		native_read_tscp(&p);
+ 	} else {
+ 		/* Load per CPU data from GDT */
+ 		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
+index ff9333e..3fdd514 100644
+--- a/arch/x86/vdso/vma.c
++++ b/arch/x86/vdso/vma.c
+@@ -11,23 +11,20 @@
+ #include <asm/vsyscall.h>
+ #include <asm/vgtod.h>
+ #include <asm/proto.h>
+-#include "voffset.h"
++#include <asm/vdso.h>
+ 
+-int vdso_enabled = 1;
 -
--  switch (mod)
--    {
--    case 0:
--      if (rm == 6)
--	{
--	  /* Special case: disp16 */
--	  RE_ENTRANT_CHECK_OFF;
--	  FPU_code_access_ok(2);
--	  FPU_get_user(address, (unsigned short __user *) (*fpu_eip));
--	  (*fpu_eip) += 2;
--	  RE_ENTRANT_CHECK_ON;
--	  goto add_segment;
-+	addr->offset = address;
+-#define VEXTERN(x) extern typeof(__ ## x) *vdso_ ## x;
+-#include "vextern.h"
++#include "vextern.h"		/* Just for VMAGIC.  */
+ #undef VEXTERN
+ 
+-extern char vdso_kernel_start[], vdso_start[], vdso_end[];
++int vdso_enabled = 1;
 +
-+	switch (addr_modes.default_mode) {
-+	case 0:
-+		break;
-+	case VM86:
-+		address += vm86_segment(addr_modes.override.segment, addr);
-+		break;
-+	case PM16:
-+	case SEG32:
-+		address = pm_address(FPU_modrm, addr_modes.override.segment,
-+				     addr, address);
-+		break;
-+	default:
-+		EXCEPTION(EX_INTERNAL | 0x131);
++extern char vdso_start[], vdso_end[];
+ extern unsigned short vdso_sync_cpuid;
+ 
+ struct page **vdso_pages;
+ 
+-static inline void *var_ref(void *vbase, char *var, char *name)
++static inline void *var_ref(void *p, char *name)
+ {
+-	unsigned offset = var - &vdso_kernel_start[0] + VDSO_TEXT_OFFSET;
+-	void *p = vbase + offset;
+ 	if (*(void **)p != (void *)VMAGIC) {
+ 		printk("VDSO: variable %s broken\n", name);
+ 		vdso_enabled = 0;
+@@ -62,9 +59,8 @@ static int __init init_vdso_vars(void)
+ 		vdso_enabled = 0;
  	}
--      break;
--    case 1:
--      /* 8 bit signed displacement */
--      RE_ENTRANT_CHECK_OFF;
--      FPU_code_access_ok(1);
--      FPU_get_user(address, (signed char __user *) (*fpu_eip));
--      RE_ENTRANT_CHECK_ON;
--      (*fpu_eip)++;
--      break;
--    case 2:
--      /* 16 bit displacement */
--      RE_ENTRANT_CHECK_OFF;
--      FPU_code_access_ok(2);
--      FPU_get_user(address, (unsigned short __user *) (*fpu_eip));
--      (*fpu_eip) += 2;
--      RE_ENTRANT_CHECK_ON;
--      break;
--    case 3:
--      /* Not legal for the FPU */
--      EXCEPTION(EX_Invalid);
--      break;
--    }
--  switch ( rm )
--    {
--    case 0:
--      address += FPU_info->___ebx + FPU_info->___esi;
--      break;
--    case 1:
--      address += FPU_info->___ebx + FPU_info->___edi;
--      break;
--    case 2:
--      address += FPU_info->___ebp + FPU_info->___esi;
--      if ( addr_modes.override.segment == PREFIX_DEFAULT )
--	addr_modes.override.segment = PREFIX_SS_;
--      break;
--    case 3:
--      address += FPU_info->___ebp + FPU_info->___edi;
--      if ( addr_modes.override.segment == PREFIX_DEFAULT )
--	addr_modes.override.segment = PREFIX_SS_;
--      break;
--    case 4:
--      address += FPU_info->___esi;
--      break;
--    case 5:
--      address += FPU_info->___edi;
--      break;
--    case 6:
--      address += FPU_info->___ebp;
--      if ( addr_modes.override.segment == PREFIX_DEFAULT )
--	addr_modes.override.segment = PREFIX_SS_;
--      break;
--    case 7:
--      address += FPU_info->___ebx;
--      break;
--    }
--
-- add_segment:
--  address &= 0xffff;
--
--  addr->offset = address;
--
--  switch ( addr_modes.default_mode )
--    {
--    case 0:
--      break;
--    case VM86:
--      address += vm86_segment(addr_modes.override.segment, addr);
--      break;
--    case PM16:
--    case SEG32:
--      address = pm_address(FPU_modrm, addr_modes.override.segment,
--			   addr, address);
--      break;
--    default:
--      EXCEPTION(EX_INTERNAL|0x131);
--    }
--
--  return (void __user *)address ;
-+
-+	return (void __user *)address;
+ 
+-#define V(x) *(typeof(x) *) var_ref(vbase, (char *)RELOC_HIDE(&x, 0), #x)
+ #define VEXTERN(x) \
+-	V(vdso_ ## x) = &__ ## x;
++	*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
+ #include "vextern.h"
+ #undef VEXTERN
+ 	return 0;
+diff --git a/arch/x86/vdso/voffset.h b/arch/x86/vdso/voffset.h
+deleted file mode 100644
+index 4af67c7..0000000
+--- a/arch/x86/vdso/voffset.h
++++ /dev/null
+@@ -1 +0,0 @@
+-#define VDSO_TEXT_OFFSET 0x600
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index fbfa55c..4d5f264 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -5,6 +5,7 @@
+ config XEN
+ 	bool "Xen guest support"
+ 	select PARAVIRT
++	depends on X86_32
+ 	depends on X86_CMPXCHG && X86_TSC && !NEED_MULTIPLE_NODES && !(X86_VISWS || X86_VOYAGER)
+ 	help
+ 	  This is the Linux Xen port.  Enabling this will allow the
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 79ad152..de647bc 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -141,8 +141,8 @@ static void __init xen_banner(void)
+ 	printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
  }
-diff --git a/arch/x86/math-emu/load_store.c b/arch/x86/math-emu/load_store.c
-index eebd6fb..2931ff3 100644
---- a/arch/x86/math-emu/load_store.c
-+++ b/arch/x86/math-emu/load_store.c
-@@ -26,247 +26,257 @@
- #include "status_w.h"
- #include "control_w.h"
  
--
--#define _NONE_ 0   /* st0_ptr etc not needed */
--#define _REG0_ 1   /* Will be storing st(0) */
--#define _PUSH_ 3   /* Need to check for space to push onto stack */
--#define _null_ 4   /* Function illegal or not implemented */
-+#define _NONE_ 0		/* st0_ptr etc not needed */
-+#define _REG0_ 1		/* Will be storing st(0) */
-+#define _PUSH_ 3		/* Need to check for space to push onto stack */
-+#define _null_ 4		/* Function illegal or not implemented */
+-static void xen_cpuid(unsigned int *eax, unsigned int *ebx,
+-		      unsigned int *ecx, unsigned int *edx)
++static void xen_cpuid(unsigned int *ax, unsigned int *bx,
++		      unsigned int *cx, unsigned int *dx)
+ {
+ 	unsigned maskedx = ~0;
  
- #define pop_0()	{ FPU_settag0(TAG_Empty); top++; }
+@@ -150,18 +150,18 @@ static void xen_cpuid(unsigned int *eax, unsigned int *ebx,
+ 	 * Mask out inconvenient features, to try and disable as many
+ 	 * unsupported kernel subsystems as possible.
+ 	 */
+-	if (*eax == 1)
++	if (*ax == 1)
+ 		maskedx = ~((1 << X86_FEATURE_APIC) |  /* disable APIC */
+ 			    (1 << X86_FEATURE_ACPI) |  /* disable ACPI */
+ 			    (1 << X86_FEATURE_ACC));   /* thermal monitoring */
  
--
- static u_char const type_table[32] = {
--  _PUSH_, _PUSH_, _PUSH_, _PUSH_,
--  _null_, _null_, _null_, _null_,
--  _REG0_, _REG0_, _REG0_, _REG0_,
--  _REG0_, _REG0_, _REG0_, _REG0_,
--  _NONE_, _null_, _NONE_, _PUSH_,
--  _NONE_, _PUSH_, _null_, _PUSH_,
--  _NONE_, _null_, _NONE_, _REG0_,
--  _NONE_, _REG0_, _NONE_, _REG0_
--  };
-+	_PUSH_, _PUSH_, _PUSH_, _PUSH_,
-+	_null_, _null_, _null_, _null_,
-+	_REG0_, _REG0_, _REG0_, _REG0_,
-+	_REG0_, _REG0_, _REG0_, _REG0_,
-+	_NONE_, _null_, _NONE_, _PUSH_,
-+	_NONE_, _PUSH_, _null_, _PUSH_,
-+	_NONE_, _null_, _NONE_, _REG0_,
-+	_NONE_, _REG0_, _NONE_, _REG0_
-+};
+ 	asm(XEN_EMULATE_PREFIX "cpuid"
+-		: "=a" (*eax),
+-		  "=b" (*ebx),
+-		  "=c" (*ecx),
+-		  "=d" (*edx)
+-		: "0" (*eax), "2" (*ecx));
+-	*edx &= maskedx;
++		: "=a" (*ax),
++		  "=b" (*bx),
++		  "=c" (*cx),
++		  "=d" (*dx)
++		: "0" (*ax), "2" (*cx));
++	*dx &= maskedx;
+ }
  
- u_char const data_sizes_16[32] = {
--  4,  4,  8,  2,  0,  0,  0,  0,
--  4,  4,  8,  2,  4,  4,  8,  2,
--  14, 0, 94, 10,  2, 10,  0,  8,  
--  14, 0, 94, 10,  2, 10,  2,  8
-+	4, 4, 8, 2, 0, 0, 0, 0,
-+	4, 4, 8, 2, 4, 4, 8, 2,
-+	14, 0, 94, 10, 2, 10, 0, 8,
-+	14, 0, 94, 10, 2, 10, 2, 8
- };
+ static void xen_set_debugreg(int reg, unsigned long val)
+@@ -275,19 +275,12 @@ static unsigned long xen_store_tr(void)
  
- static u_char const data_sizes_32[32] = {
--  4,  4,  8,  2,  0,  0,  0,  0,
--  4,  4,  8,  2,  4,  4,  8,  2,
--  28, 0,108, 10,  2, 10,  0,  8,  
--  28, 0,108, 10,  2, 10,  2,  8
-+	4, 4, 8, 2, 0, 0, 0, 0,
-+	4, 4, 8, 2, 4, 4, 8, 2,
-+	28, 0, 108, 10, 2, 10, 0, 8,
-+	28, 0, 108, 10, 2, 10, 2, 8
- };
+ static void xen_set_ldt(const void *addr, unsigned entries)
+ {
+-	unsigned long linear_addr = (unsigned long)addr;
+ 	struct mmuext_op *op;
+ 	struct multicall_space mcs = xen_mc_entry(sizeof(*op));
  
- int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
--		     void __user *data_address)
-+		   void __user * data_address)
+ 	op = mcs.args;
+ 	op->cmd = MMUEXT_SET_LDT;
+-	if (linear_addr) {
+-		/* ldt my be vmalloced, use arbitrary_virt_to_machine */
+-		xmaddr_t maddr;
+-		maddr = arbitrary_virt_to_machine((unsigned long)addr);
+-		linear_addr = (unsigned long)maddr.maddr;
+-	}
+-	op->arg1.linear_addr = linear_addr;
++	op->arg1.linear_addr = (unsigned long)addr;
+ 	op->arg2.nr_ents = entries;
+ 
+ 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+@@ -295,7 +288,7 @@ static void xen_set_ldt(const void *addr, unsigned entries)
+ 	xen_mc_issue(PARAVIRT_LAZY_CPU);
+ }
+ 
+-static void xen_load_gdt(const struct Xgt_desc_struct *dtr)
++static void xen_load_gdt(const struct desc_ptr *dtr)
  {
--  FPU_REG loaded_data;
--  FPU_REG *st0_ptr;
--  u_char st0_tag = TAG_Empty;  /* This is just to stop a gcc warning. */
--  u_char loaded_tag;
-+	FPU_REG loaded_data;
-+	FPU_REG *st0_ptr;
-+	u_char st0_tag = TAG_Empty;	/* This is just to stop a gcc warning. */
-+	u_char loaded_tag;
+ 	unsigned long *frames;
+ 	unsigned long va = dtr->address;
+@@ -357,11 +350,11 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
+ }
  
--  st0_ptr = NULL;    /* Initialized just to stop compiler warnings. */
-+	st0_ptr = NULL;		/* Initialized just to stop compiler warnings. */
+ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
+-				u32 low, u32 high)
++				const void *ptr)
+ {
+ 	unsigned long lp = (unsigned long)&dt[entrynum];
+ 	xmaddr_t mach_lp = virt_to_machine(lp);
+-	u64 entry = (u64)high << 32 | low;
++	u64 entry = *(u64 *)ptr;
  
--  if ( addr_modes.default_mode & PROTECTED )
--    {
--      if ( addr_modes.default_mode == SEG32 )
--	{
--	  if ( access_limit < data_sizes_32[type] )
--	    math_abort(FPU_info,SIGSEGV);
--	}
--      else if ( addr_modes.default_mode == PM16 )
--	{
--	  if ( access_limit < data_sizes_16[type] )
--	    math_abort(FPU_info,SIGSEGV);
--	}
-+	if (addr_modes.default_mode & PROTECTED) {
-+		if (addr_modes.default_mode == SEG32) {
-+			if (access_limit < data_sizes_32[type])
-+				math_abort(FPU_info, SIGSEGV);
-+		} else if (addr_modes.default_mode == PM16) {
-+			if (access_limit < data_sizes_16[type])
-+				math_abort(FPU_info, SIGSEGV);
-+		}
- #ifdef PARANOID
--      else
--	EXCEPTION(EX_INTERNAL|0x140);
-+		else
-+			EXCEPTION(EX_INTERNAL | 0x140);
- #endif /* PARANOID */
--    }
-+	}
+ 	preempt_disable();
  
--  switch ( type_table[type] )
--    {
--    case _NONE_:
--      break;
--    case _REG0_:
--      st0_ptr = &st(0);       /* Some of these instructions pop after
--				 storing */
--      st0_tag = FPU_gettag0();
--      break;
--    case _PUSH_:
--      {
--	if ( FPU_gettagi(-1) != TAG_Empty )
--	  { FPU_stack_overflow(); return 0; }
--	top--;
--	st0_ptr = &st(0);
--      }
--      break;
--    case _null_:
--      FPU_illegal();
--      return 0;
-+	switch (type_table[type]) {
-+	case _NONE_:
-+		break;
-+	case _REG0_:
-+		st0_ptr = &st(0);	/* Some of these instructions pop after
-+					   storing */
-+		st0_tag = FPU_gettag0();
-+		break;
-+	case _PUSH_:
-+		{
-+			if (FPU_gettagi(-1) != TAG_Empty) {
-+				FPU_stack_overflow();
-+				return 0;
-+			}
-+			top--;
-+			st0_ptr = &st(0);
-+		}
-+		break;
-+	case _null_:
-+		FPU_illegal();
-+		return 0;
- #ifdef PARANOID
--    default:
--      EXCEPTION(EX_INTERNAL|0x141);
--      return 0;
-+	default:
-+		EXCEPTION(EX_INTERNAL | 0x141);
-+		return 0;
- #endif /* PARANOID */
--    }
--
--  switch ( type )
--    {
--    case 000:       /* fld m32real */
--      clear_C1();
--      loaded_tag = FPU_load_single((float __user *)data_address, &loaded_data);
--      if ( (loaded_tag == TAG_Special)
--	   && isNaN(&loaded_data)
--	   && (real_1op_NaN(&loaded_data) < 0) )
--	{
--	  top++;
--	  break;
--	}
--      FPU_copy_to_reg0(&loaded_data, loaded_tag);
--      break;
--    case 001:      /* fild m32int */
--      clear_C1();
--      loaded_tag = FPU_load_int32((long __user *)data_address, &loaded_data);
--      FPU_copy_to_reg0(&loaded_data, loaded_tag);
--      break;
--    case 002:      /* fld m64real */
--      clear_C1();
--      loaded_tag = FPU_load_double((double __user *)data_address, &loaded_data);
--      if ( (loaded_tag == TAG_Special)
--	   && isNaN(&loaded_data)
--	   && (real_1op_NaN(&loaded_data) < 0) )
--	{
--	  top++;
--	  break;
- 	}
--      FPU_copy_to_reg0(&loaded_data, loaded_tag);
--      break;
--    case 003:      /* fild m16int */
--      clear_C1();
--      loaded_tag = FPU_load_int16((short __user *)data_address, &loaded_data);
--      FPU_copy_to_reg0(&loaded_data, loaded_tag);
--      break;
--    case 010:      /* fst m32real */
--      clear_C1();
--      FPU_store_single(st0_ptr, st0_tag, (float __user *)data_address);
--      break;
--    case 011:      /* fist m32int */
--      clear_C1();
--      FPU_store_int32(st0_ptr, st0_tag, (long __user *)data_address);
--      break;
--    case 012:     /* fst m64real */
--      clear_C1();
--      FPU_store_double(st0_ptr, st0_tag, (double __user *)data_address);
--      break;
--    case 013:     /* fist m16int */
--      clear_C1();
--      FPU_store_int16(st0_ptr, st0_tag, (short __user *)data_address);
--      break;
--    case 014:     /* fstp m32real */
--      clear_C1();
--      if ( FPU_store_single(st0_ptr, st0_tag, (float __user *)data_address) )
--	pop_0();  /* pop only if the number was actually stored
--		     (see the 80486 manual p16-28) */
--      break;
--    case 015:     /* fistp m32int */
--      clear_C1();
--      if ( FPU_store_int32(st0_ptr, st0_tag, (long __user *)data_address) )
--	pop_0();  /* pop only if the number was actually stored
--		     (see the 80486 manual p16-28) */
--      break;
--    case 016:     /* fstp m64real */
--      clear_C1();
--      if ( FPU_store_double(st0_ptr, st0_tag, (double __user *)data_address) )
--	pop_0();  /* pop only if the number was actually stored
--		     (see the 80486 manual p16-28) */
--      break;
--    case 017:     /* fistp m16int */
--      clear_C1();
--      if ( FPU_store_int16(st0_ptr, st0_tag, (short __user *)data_address) )
--	pop_0();  /* pop only if the number was actually stored
--		     (see the 80486 manual p16-28) */
--      break;
--    case 020:     /* fldenv  m14/28byte */
--      fldenv(addr_modes, (u_char __user *)data_address);
--      /* Ensure that the values just loaded are not changed by
--	 fix-up operations. */
--      return 1;
--    case 022:     /* frstor m94/108byte */
--      frstor(addr_modes, (u_char __user *)data_address);
--      /* Ensure that the values just loaded are not changed by
--	 fix-up operations. */
--      return 1;
--    case 023:     /* fbld m80dec */
--      clear_C1();
--      loaded_tag = FPU_load_bcd((u_char __user *)data_address);
--      FPU_settag0(loaded_tag);
--      break;
--    case 024:     /* fldcw */
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_READ, data_address, 2);
--      FPU_get_user(control_word, (unsigned short __user *) data_address);
--      RE_ENTRANT_CHECK_ON;
--      if ( partial_status & ~control_word & CW_Exceptions )
--	partial_status |= (SW_Summary | SW_Backward);
--      else
--	partial_status &= ~(SW_Summary | SW_Backward);
-+
-+	switch (type) {
-+	case 000:		/* fld m32real */
-+		clear_C1();
-+		loaded_tag =
-+		    FPU_load_single((float __user *)data_address, &loaded_data);
-+		if ((loaded_tag == TAG_Special)
-+		    && isNaN(&loaded_data)
-+		    && (real_1op_NaN(&loaded_data) < 0)) {
-+			top++;
-+			break;
-+		}
-+		FPU_copy_to_reg0(&loaded_data, loaded_tag);
-+		break;
-+	case 001:		/* fild m32int */
-+		clear_C1();
-+		loaded_tag =
-+		    FPU_load_int32((long __user *)data_address, &loaded_data);
-+		FPU_copy_to_reg0(&loaded_data, loaded_tag);
-+		break;
-+	case 002:		/* fld m64real */
-+		clear_C1();
-+		loaded_tag =
-+		    FPU_load_double((double __user *)data_address,
-+				    &loaded_data);
-+		if ((loaded_tag == TAG_Special)
-+		    && isNaN(&loaded_data)
-+		    && (real_1op_NaN(&loaded_data) < 0)) {
-+			top++;
-+			break;
-+		}
-+		FPU_copy_to_reg0(&loaded_data, loaded_tag);
-+		break;
-+	case 003:		/* fild m16int */
-+		clear_C1();
-+		loaded_tag =
-+		    FPU_load_int16((short __user *)data_address, &loaded_data);
-+		FPU_copy_to_reg0(&loaded_data, loaded_tag);
-+		break;
-+	case 010:		/* fst m32real */
-+		clear_C1();
-+		FPU_store_single(st0_ptr, st0_tag,
-+				 (float __user *)data_address);
-+		break;
-+	case 011:		/* fist m32int */
-+		clear_C1();
-+		FPU_store_int32(st0_ptr, st0_tag, (long __user *)data_address);
-+		break;
-+	case 012:		/* fst m64real */
-+		clear_C1();
-+		FPU_store_double(st0_ptr, st0_tag,
-+				 (double __user *)data_address);
-+		break;
-+	case 013:		/* fist m16int */
-+		clear_C1();
-+		FPU_store_int16(st0_ptr, st0_tag, (short __user *)data_address);
-+		break;
-+	case 014:		/* fstp m32real */
-+		clear_C1();
-+		if (FPU_store_single
-+		    (st0_ptr, st0_tag, (float __user *)data_address))
-+			pop_0();	/* pop only if the number was actually stored
-+					   (see the 80486 manual p16-28) */
-+		break;
-+	case 015:		/* fistp m32int */
-+		clear_C1();
-+		if (FPU_store_int32
-+		    (st0_ptr, st0_tag, (long __user *)data_address))
-+			pop_0();	/* pop only if the number was actually stored
-+					   (see the 80486 manual p16-28) */
-+		break;
-+	case 016:		/* fstp m64real */
-+		clear_C1();
-+		if (FPU_store_double
-+		    (st0_ptr, st0_tag, (double __user *)data_address))
-+			pop_0();	/* pop only if the number was actually stored
-+					   (see the 80486 manual p16-28) */
-+		break;
-+	case 017:		/* fistp m16int */
-+		clear_C1();
-+		if (FPU_store_int16
-+		    (st0_ptr, st0_tag, (short __user *)data_address))
-+			pop_0();	/* pop only if the number was actually stored
-+					   (see the 80486 manual p16-28) */
-+		break;
-+	case 020:		/* fldenv  m14/28byte */
-+		fldenv(addr_modes, (u_char __user *) data_address);
-+		/* Ensure that the values just loaded are not changed by
-+		   fix-up operations. */
-+		return 1;
-+	case 022:		/* frstor m94/108byte */
-+		frstor(addr_modes, (u_char __user *) data_address);
-+		/* Ensure that the values just loaded are not changed by
-+		   fix-up operations. */
-+		return 1;
-+	case 023:		/* fbld m80dec */
-+		clear_C1();
-+		loaded_tag = FPU_load_bcd((u_char __user *) data_address);
-+		FPU_settag0(loaded_tag);
-+		break;
-+	case 024:		/* fldcw */
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_READ, data_address, 2);
-+		FPU_get_user(control_word,
-+			     (unsigned short __user *)data_address);
-+		RE_ENTRANT_CHECK_ON;
-+		if (partial_status & ~control_word & CW_Exceptions)
-+			partial_status |= (SW_Summary | SW_Backward);
-+		else
-+			partial_status &= ~(SW_Summary | SW_Backward);
- #ifdef PECULIAR_486
--      control_word |= 0x40;  /* An 80486 appears to always set this bit */
-+		control_word |= 0x40;	/* An 80486 appears to always set this bit */
- #endif /* PECULIAR_486 */
--      return 1;
--    case 025:      /* fld m80real */
--      clear_C1();
--      loaded_tag = FPU_load_extended((long double __user *)data_address, 0);
--      FPU_settag0(loaded_tag);
--      break;
--    case 027:      /* fild m64int */
--      clear_C1();
--      loaded_tag = FPU_load_int64((long long __user *)data_address);
--      if (loaded_tag == TAG_Error)
-+		return 1;
-+	case 025:		/* fld m80real */
-+		clear_C1();
-+		loaded_tag =
-+		    FPU_load_extended((long double __user *)data_address, 0);
-+		FPU_settag0(loaded_tag);
-+		break;
-+	case 027:		/* fild m64int */
-+		clear_C1();
-+		loaded_tag = FPU_load_int64((long long __user *)data_address);
-+		if (loaded_tag == TAG_Error)
-+			return 0;
-+		FPU_settag0(loaded_tag);
-+		break;
-+	case 030:		/* fstenv  m14/28byte */
-+		fstenv(addr_modes, (u_char __user *) data_address);
-+		return 1;
-+	case 032:		/* fsave */
-+		fsave(addr_modes, (u_char __user *) data_address);
-+		return 1;
-+	case 033:		/* fbstp m80dec */
-+		clear_C1();
-+		if (FPU_store_bcd
-+		    (st0_ptr, st0_tag, (u_char __user *) data_address))
-+			pop_0();	/* pop only if the number was actually stored
-+					   (see the 80486 manual p16-28) */
-+		break;
-+	case 034:		/* fstcw m16int */
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_WRITE, data_address, 2);
-+		FPU_put_user(control_word,
-+			     (unsigned short __user *)data_address);
-+		RE_ENTRANT_CHECK_ON;
-+		return 1;
-+	case 035:		/* fstp m80real */
-+		clear_C1();
-+		if (FPU_store_extended
-+		    (st0_ptr, st0_tag, (long double __user *)data_address))
-+			pop_0();	/* pop only if the number was actually stored
-+					   (see the 80486 manual p16-28) */
-+		break;
-+	case 036:		/* fstsw m2byte */
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_WRITE, data_address, 2);
-+		FPU_put_user(status_word(),
-+			     (unsigned short __user *)data_address);
-+		RE_ENTRANT_CHECK_ON;
-+		return 1;
-+	case 037:		/* fistp m64int */
-+		clear_C1();
-+		if (FPU_store_int64
-+		    (st0_ptr, st0_tag, (long long __user *)data_address))
-+			pop_0();	/* pop only if the number was actually stored
-+					   (see the 80486 manual p16-28) */
-+		break;
-+	}
- 	return 0;
--      FPU_settag0(loaded_tag);
--      break;
--    case 030:     /* fstenv  m14/28byte */
--      fstenv(addr_modes, (u_char __user *)data_address);
--      return 1;
--    case 032:      /* fsave */
--      fsave(addr_modes, (u_char __user *)data_address);
--      return 1;
--    case 033:      /* fbstp m80dec */
--      clear_C1();
--      if ( FPU_store_bcd(st0_ptr, st0_tag, (u_char __user *)data_address) )
--	pop_0();  /* pop only if the number was actually stored
--		     (see the 80486 manual p16-28) */
--      break;
--    case 034:      /* fstcw m16int */
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_WRITE,data_address,2);
--      FPU_put_user(control_word, (unsigned short __user *) data_address);
--      RE_ENTRANT_CHECK_ON;
--      return 1;
--    case 035:      /* fstp m80real */
--      clear_C1();
--      if ( FPU_store_extended(st0_ptr, st0_tag, (long double __user *)data_address) )
--	pop_0();  /* pop only if the number was actually stored
--		     (see the 80486 manual p16-28) */
--      break;
--    case 036:      /* fstsw m2byte */
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_WRITE,data_address,2);
--      FPU_put_user(status_word(),(unsigned short __user *) data_address);
--      RE_ENTRANT_CHECK_ON;
--      return 1;
--    case 037:      /* fistp m64int */
--      clear_C1();
--      if ( FPU_store_int64(st0_ptr, st0_tag, (long long __user *)data_address) )
--	pop_0();  /* pop only if the number was actually stored
--		     (see the 80486 manual p16-28) */
--      break;
--    }
--  return 0;
+@@ -395,12 +388,11 @@ static int cvt_gate_to_trap(int vector, u32 low, u32 high,
+ }
+ 
+ /* Locations of each CPU's IDT */
+-static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc);
++static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
+ 
+ /* Set an IDT entry.  If the entry is part of the current IDT, then
+    also update Xen. */
+-static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
+-				u32 low, u32 high)
++static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
+ {
+ 	unsigned long p = (unsigned long)&dt[entrynum];
+ 	unsigned long start, end;
+@@ -412,14 +404,15 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
+ 
+ 	xen_mc_flush();
+ 
+-	write_dt_entry(dt, entrynum, low, high);
++	native_write_idt_entry(dt, entrynum, g);
+ 
+ 	if (p >= start && (p + 8) <= end) {
+ 		struct trap_info info[2];
++		u32 *desc = (u32 *)g;
+ 
+ 		info[1].address = 0;
+ 
+-		if (cvt_gate_to_trap(entrynum, low, high, &info[0]))
++		if (cvt_gate_to_trap(entrynum, desc[0], desc[1], &info[0]))
+ 			if (HYPERVISOR_set_trap_table(info))
+ 				BUG();
+ 	}
+@@ -427,7 +420,7 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
+ 	preempt_enable();
  }
-diff --git a/arch/x86/math-emu/poly.h b/arch/x86/math-emu/poly.h
-index 4db7981..168eb44 100644
---- a/arch/x86/math-emu/poly.h
-+++ b/arch/x86/math-emu/poly.h
-@@ -21,9 +21,9 @@
-    allows. 9-byte would probably be sufficient.
-    */
- typedef struct {
--  unsigned long lsw;
--  unsigned long midw;
--  unsigned long msw;
-+	unsigned long lsw;
-+	unsigned long midw;
-+	unsigned long msw;
- } Xsig;
  
- asmlinkage void mul64(unsigned long long const *a, unsigned long long const *b,
-@@ -49,7 +49,6 @@ asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest);
- /* Macro to access the 8 ms bytes of an Xsig as a long long */
- #define XSIG_LL(x)         (*(unsigned long long *)&x.midw)
+-static void xen_convert_trap_info(const struct Xgt_desc_struct *desc,
++static void xen_convert_trap_info(const struct desc_ptr *desc,
+ 				  struct trap_info *traps)
+ {
+ 	unsigned in, out, count;
+@@ -446,7 +439,7 @@ static void xen_convert_trap_info(const struct Xgt_desc_struct *desc,
  
--
- /*
-    Need to run gcc with optimizations on to get these to
-    actually be in-line.
-@@ -63,59 +62,53 @@ asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest);
- static inline unsigned long mul_32_32(const unsigned long arg1,
- 				      const unsigned long arg2)
+ void xen_copy_trap_info(struct trap_info *traps)
  {
--  int retval;
--  asm volatile ("mull %2; movl %%edx,%%eax" \
--		:"=a" (retval) \
--		:"0" (arg1), "g" (arg2) \
--		:"dx");
--  return retval;
-+	int retval;
-+	asm volatile ("mull %2; movl %%edx,%%eax":"=a" (retval)
-+		      :"0"(arg1), "g"(arg2)
-+		      :"dx");
-+	return retval;
+-	const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc);
++	const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
+ 
+ 	xen_convert_trap_info(desc, traps);
+ }
+@@ -454,7 +447,7 @@ void xen_copy_trap_info(struct trap_info *traps)
+ /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
+    hold a spinlock to protect the static traps[] array (static because
+    it avoids allocation, and saves stack space). */
+-static void xen_load_idt(const struct Xgt_desc_struct *desc)
++static void xen_load_idt(const struct desc_ptr *desc)
+ {
+ 	static DEFINE_SPINLOCK(lock);
+ 	static struct trap_info traps[257];
+@@ -475,22 +468,21 @@ static void xen_load_idt(const struct Xgt_desc_struct *desc)
+ /* Write a GDT descriptor entry.  Ignore LDT descriptors, since
+    they're handled differently. */
+ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
+-				u32 low, u32 high)
++				const void *desc, int type)
+ {
+ 	preempt_disable();
+ 
+-	switch ((high >> 8) & 0xff) {
+-	case DESCTYPE_LDT:
+-	case DESCTYPE_TSS:
++	switch (type) {
++	case DESC_LDT:
++	case DESC_TSS:
+ 		/* ignore */
+ 		break;
+ 
+ 	default: {
+ 		xmaddr_t maddr = virt_to_machine(&dt[entry]);
+-		u64 desc = (u64)high << 32 | low;
+ 
+ 		xen_mc_flush();
+-		if (HYPERVISOR_update_descriptor(maddr.maddr, desc))
++		if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
+ 			BUG();
+ 	}
+ 
+@@ -499,11 +491,11 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
+ 	preempt_enable();
  }
  
--
- /* Add the 12 byte Xsig x2 to Xsig dest, with no checks for overflow. */
- static inline void add_Xsig_Xsig(Xsig *dest, const Xsig *x2)
+-static void xen_load_esp0(struct tss_struct *tss,
++static void xen_load_sp0(struct tss_struct *tss,
+ 			  struct thread_struct *thread)
  {
--  asm volatile ("movl %1,%%edi; movl %2,%%esi;\n"
--                "movl (%%esi),%%eax; addl %%eax,(%%edi);\n"
--                "movl 4(%%esi),%%eax; adcl %%eax,4(%%edi);\n"
--                "movl 8(%%esi),%%eax; adcl %%eax,8(%%edi);\n"
--                 :"=g" (*dest):"g" (dest), "g" (x2)
--                 :"ax","si","di");
-+	asm volatile ("movl %1,%%edi; movl %2,%%esi;\n"
-+		      "movl (%%esi),%%eax; addl %%eax,(%%edi);\n"
-+		      "movl 4(%%esi),%%eax; adcl %%eax,4(%%edi);\n"
-+		      "movl 8(%%esi),%%eax; adcl %%eax,8(%%edi);\n":"=g"
-+		      (*dest):"g"(dest), "g"(x2)
-+		      :"ax", "si", "di");
+ 	struct multicall_space mcs = xen_mc_entry(0);
+-	MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0);
++	MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
+ 	xen_mc_issue(PARAVIRT_LAZY_CPU);
  }
  
--
- /* Add the 12 byte Xsig x2 to Xsig dest, adjust exp if overflow occurs. */
- /* Note: the constraints in the asm statement didn't always work properly
-    with gcc 2.5.8.  Changing from using edi to using ecx got around the
-    problem, but keep fingers crossed! */
- static inline void add_two_Xsig(Xsig *dest, const Xsig *x2, long int *exp)
+@@ -521,12 +513,12 @@ static void xen_io_delay(void)
+ }
+ 
+ #ifdef CONFIG_X86_LOCAL_APIC
+-static unsigned long xen_apic_read(unsigned long reg)
++static u32 xen_apic_read(unsigned long reg)
  {
--  asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n"
--                "movl (%%esi),%%eax; addl %%eax,(%%ecx);\n"
--                "movl 4(%%esi),%%eax; adcl %%eax,4(%%ecx);\n"
--                "movl 8(%%esi),%%eax; adcl %%eax,8(%%ecx);\n"
--                "jnc 0f;\n"
--		"rcrl 8(%%ecx); rcrl 4(%%ecx); rcrl (%%ecx)\n"
--                "movl %4,%%ecx; incl (%%ecx)\n"
--                "movl $1,%%eax; jmp 1f;\n"
--                "0: xorl %%eax,%%eax;\n"
--                "1:\n"
--		:"=g" (*exp), "=g" (*dest)
--		:"g" (dest), "g" (x2), "g" (exp)
--		:"cx","si","ax");
-+	asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n"
-+		      "movl (%%esi),%%eax; addl %%eax,(%%ecx);\n"
-+		      "movl 4(%%esi),%%eax; adcl %%eax,4(%%ecx);\n"
-+		      "movl 8(%%esi),%%eax; adcl %%eax,8(%%ecx);\n"
-+		      "jnc 0f;\n"
-+		      "rcrl 8(%%ecx); rcrl 4(%%ecx); rcrl (%%ecx)\n"
-+		      "movl %4,%%ecx; incl (%%ecx)\n"
-+		      "movl $1,%%eax; jmp 1f;\n"
-+		      "0: xorl %%eax,%%eax;\n" "1:\n":"=g" (*exp), "=g"(*dest)
-+		      :"g"(dest), "g"(x2), "g"(exp)
-+		      :"cx", "si", "ax");
+ 	return 0;
  }
  
--
- /* Negate (subtract from 1.0) the 12 byte Xsig */
- /* This is faster in a loop on my 386 than using the "neg" instruction. */
- static inline void negate_Xsig(Xsig *x)
+-static void xen_apic_write(unsigned long reg, unsigned long val)
++static void xen_apic_write(unsigned long reg, u32 val)
  {
--  asm volatile("movl %1,%%esi;\n"
--               "xorl %%ecx,%%ecx;\n"
--               "movl %%ecx,%%eax; subl (%%esi),%%eax; movl %%eax,(%%esi);\n"
--               "movl %%ecx,%%eax; sbbl 4(%%esi),%%eax; movl %%eax,4(%%esi);\n"
--               "movl %%ecx,%%eax; sbbl 8(%%esi),%%eax; movl %%eax,8(%%esi);\n"
--               :"=g" (*x):"g" (x):"si","ax","cx");
-+	asm volatile ("movl %1,%%esi;\n"
-+		      "xorl %%ecx,%%ecx;\n"
-+		      "movl %%ecx,%%eax; subl (%%esi),%%eax; movl %%eax,(%%esi);\n"
-+		      "movl %%ecx,%%eax; sbbl 4(%%esi),%%eax; movl %%eax,4(%%esi);\n"
-+		      "movl %%ecx,%%eax; sbbl 8(%%esi),%%eax; movl %%eax,8(%%esi);\n":"=g"
-+		      (*x):"g"(x):"si", "ax", "cx");
+ 	/* Warn to see if there's any stray references */
+ 	WARN_ON(1);
+@@ -666,6 +658,13 @@ static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
+ 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
  }
  
- #endif /* _POLY_H */
-diff --git a/arch/x86/math-emu/poly_2xm1.c b/arch/x86/math-emu/poly_2xm1.c
-index 9766ad5..b00e9e1 100644
---- a/arch/x86/math-emu/poly_2xm1.c
-+++ b/arch/x86/math-emu/poly_2xm1.c
-@@ -17,21 +17,19 @@
- #include "control_w.h"
- #include "poly.h"
++/* Early release_pt assumes that all pts are pinned, since there's
++   only init_mm and anything attached to that is pinned. */
++static void xen_release_pt_init(u32 pfn)
++{
++	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
++}
++
+ static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
+ {
+ 	struct mmuext_op op;
+@@ -677,7 +676,7 @@ static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
  
--
- #define	HIPOWER	11
--static const unsigned long long lterms[HIPOWER] =
--{
--  0x0000000000000000LL,  /* This term done separately as 12 bytes */
--  0xf5fdeffc162c7543LL,
--  0x1c6b08d704a0bfa6LL,
--  0x0276556df749cc21LL,
--  0x002bb0ffcf14f6b8LL,
--  0x0002861225ef751cLL,
--  0x00001ffcbfcd5422LL,
--  0x00000162c005d5f1LL,
--  0x0000000da96ccb1bLL,
--  0x0000000078d1b897LL,
--  0x000000000422b029LL
-+static const unsigned long long lterms[HIPOWER] = {
-+	0x0000000000000000LL,	/* This term done separately as 12 bytes */
-+	0xf5fdeffc162c7543LL,
-+	0x1c6b08d704a0bfa6LL,
-+	0x0276556df749cc21LL,
-+	0x002bb0ffcf14f6b8LL,
-+	0x0002861225ef751cLL,
-+	0x00001ffcbfcd5422LL,
-+	0x00000162c005d5f1LL,
-+	0x0000000da96ccb1bLL,
-+	0x0000000078d1b897LL,
-+	0x000000000422b029LL
- };
+ /* This needs to make sure the new pte page is pinned iff its being
+    attached to a pinned pagetable. */
+-static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
++static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
+ {
+ 	struct page *page = pfn_to_page(pfn);
  
- static const Xsig hiterm = MK_XSIG(0xb17217f7, 0xd1cf79ab, 0xc8a39194);
-@@ -45,112 +43,103 @@ static const Xsig shiftterm2 = MK_XSIG(0xb504f333, 0xf9de6484, 0x597d89b3);
- static const Xsig shiftterm3 = MK_XSIG(0xd744fcca, 0xd69d6af4, 0x39a68bb9);
+@@ -686,7 +685,7 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
  
- static const Xsig *shiftterm[] = { &shiftterm0, &shiftterm1,
--				     &shiftterm2, &shiftterm3 };
--
-+	&shiftterm2, &shiftterm3
-+};
+ 		if (!PageHighMem(page)) {
+ 			make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+-			pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
++			pin_pagetable_pfn(level, pfn);
+ 		} else
+ 			/* make sure there are no stray mappings of
+ 			   this page */
+@@ -694,6 +693,16 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
+ 	}
+ }
  
- /*--- poly_2xm1() -----------------------------------------------------------+
-  | Requires st(0) which is TAG_Valid and < 1.                                |
-  +---------------------------------------------------------------------------*/
--int	poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result)
-+int poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result)
++static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
++{
++	xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L1_TABLE);
++}
++
++static void xen_alloc_pd(struct mm_struct *mm, u32 pfn)
++{
++	xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L2_TABLE);
++}
++
+ /* This should never happen until we're OK to use struct page */
+ static void xen_release_pt(u32 pfn)
  {
--  long int              exponent, shift;
--  unsigned long long    Xll;
--  Xsig                  accumulator, Denom, argSignif;
--  u_char                tag;
-+	long int exponent, shift;
-+	unsigned long long Xll;
-+	Xsig accumulator, Denom, argSignif;
-+	u_char tag;
+@@ -796,6 +805,9 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
+ 	/* This will work as long as patching hasn't happened yet
+ 	   (which it hasn't) */
+ 	pv_mmu_ops.alloc_pt = xen_alloc_pt;
++	pv_mmu_ops.alloc_pd = xen_alloc_pd;
++	pv_mmu_ops.release_pt = xen_release_pt;
++	pv_mmu_ops.release_pd = xen_release_pt;
+ 	pv_mmu_ops.set_pte = xen_set_pte;
  
--  exponent = exponent16(arg);
-+	exponent = exponent16(arg);
+ 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+@@ -953,7 +965,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
+ 	.read_pmc = native_read_pmc,
  
- #ifdef PARANOID
--  if ( exponent >= 0 )    	/* Don't want a |number| >= 1.0 */
--    {
--      /* Number negative, too large, or not Valid. */
--      EXCEPTION(EX_INTERNAL|0x127);
--      return 1;
--    }
-+	if (exponent >= 0) {	/* Don't want a |number| >= 1.0 */
-+		/* Number negative, too large, or not Valid. */
-+		EXCEPTION(EX_INTERNAL | 0x127);
-+		return 1;
+ 	.iret = (void *)&hypercall_page[__HYPERVISOR_iret],
+-	.irq_enable_sysexit = NULL,  /* never called */
++	.irq_enable_syscall_ret = NULL,  /* never called */
+ 
+ 	.load_tr_desc = paravirt_nop,
+ 	.set_ldt = xen_set_ldt,
+@@ -968,7 +980,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
+ 	.write_ldt_entry = xen_write_ldt_entry,
+ 	.write_gdt_entry = xen_write_gdt_entry,
+ 	.write_idt_entry = xen_write_idt_entry,
+-	.load_esp0 = xen_load_esp0,
++	.load_sp0 = xen_load_sp0,
+ 
+ 	.set_iopl_mask = xen_set_iopl_mask,
+ 	.io_delay = xen_io_delay,
+@@ -1019,10 +1031,10 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
+ 	.pte_update_defer = paravirt_nop,
+ 
+ 	.alloc_pt = xen_alloc_pt_init,
+-	.release_pt = xen_release_pt,
+-	.alloc_pd = paravirt_nop,
++	.release_pt = xen_release_pt_init,
++	.alloc_pd = xen_alloc_pt_init,
+ 	.alloc_pd_clone = paravirt_nop,
+-	.release_pd = paravirt_nop,
++	.release_pd = xen_release_pt_init,
+ 
+ #ifdef CONFIG_HIGHPTE
+ 	.kmap_atomic_pte = xen_kmap_atomic_pte,
+diff --git a/arch/x86/xen/events.c b/arch/x86/xen/events.c
+index 6d1da58..dcf613e 100644
+--- a/arch/x86/xen/events.c
++++ b/arch/x86/xen/events.c
+@@ -465,7 +465,7 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+  * a bitset of words which contain pending event bits.  The second
+  * level is a bitset of pending events themselves.
+  */
+-fastcall void xen_evtchn_do_upcall(struct pt_regs *regs)
++void xen_evtchn_do_upcall(struct pt_regs *regs)
+ {
+ 	int cpu = get_cpu();
+ 	struct shared_info *s = HYPERVISOR_shared_info;
+@@ -487,7 +487,7 @@ fastcall void xen_evtchn_do_upcall(struct pt_regs *regs)
+ 			int irq = evtchn_to_irq[port];
+ 
+ 			if (irq != -1) {
+-				regs->orig_eax = ~irq;
++				regs->orig_ax = ~irq;
+ 				do_IRQ(regs);
+ 			}
+ 		}
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 0ac6c5d..45aa771 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -58,7 +58,8 @@
+ 
+ xmaddr_t arbitrary_virt_to_machine(unsigned long address)
+ {
+-	pte_t *pte = lookup_address(address);
++	int level;
++	pte_t *pte = lookup_address(address, &level);
+ 	unsigned offset = address & PAGE_MASK;
+ 
+ 	BUG_ON(pte == NULL);
+@@ -70,8 +71,9 @@ void make_lowmem_page_readonly(void *vaddr)
+ {
+ 	pte_t *pte, ptev;
+ 	unsigned long address = (unsigned long)vaddr;
++	int level;
+ 
+-	pte = lookup_address(address);
++	pte = lookup_address(address, &level);
+ 	BUG_ON(pte == NULL);
+ 
+ 	ptev = pte_wrprotect(*pte);
+@@ -84,8 +86,9 @@ void make_lowmem_page_readwrite(void *vaddr)
+ {
+ 	pte_t *pte, ptev;
+ 	unsigned long address = (unsigned long)vaddr;
++	int level;
+ 
+-	pte = lookup_address(address);
++	pte = lookup_address(address, &level);
+ 	BUG_ON(pte == NULL);
+ 
+ 	ptev = pte_mkwrite(*pte);
+@@ -241,12 +244,12 @@ unsigned long long xen_pgd_val(pgd_t pgd)
+ 
+ pte_t xen_make_pte(unsigned long long pte)
+ {
+-	if (pte & 1)
++	if (pte & _PAGE_PRESENT) {
+ 		pte = phys_to_machine(XPADDR(pte)).maddr;
++		pte &= ~(_PAGE_PCD | _PAGE_PWT);
 +	}
- #endif /* PARANOID */
  
--  argSignif.lsw = 0;
--  XSIG_LL(argSignif) = Xll = significand(arg);
--
--  if ( exponent == -1 )
--    {
--      shift = (argSignif.msw & 0x40000000) ? 3 : 2;
--      /* subtract 0.5 or 0.75 */
--      exponent -= 2;
--      XSIG_LL(argSignif) <<= 2;
--      Xll <<= 2;
--    }
--  else if ( exponent == -2 )
--    {
--      shift = 1;
--      /* subtract 0.25 */
--      exponent--;
--      XSIG_LL(argSignif) <<= 1;
--      Xll <<= 1;
--    }
--  else
--    shift = 0;
--
--  if ( exponent < -2 )
--    {
--      /* Shift the argument right by the required places. */
--      if ( FPU_shrx(&Xll, -2-exponent) >= 0x80000000U )
--	Xll++;	/* round up */
--    }
--
--  accumulator.lsw = accumulator.midw = accumulator.msw = 0;
--  polynomial_Xsig(&accumulator, &Xll, lterms, HIPOWER-1);
--  mul_Xsig_Xsig(&accumulator, &argSignif);
--  shr_Xsig(&accumulator, 3);
--
--  mul_Xsig_Xsig(&argSignif, &hiterm);   /* The leading term */
--  add_two_Xsig(&accumulator, &argSignif, &exponent);
+-	pte &= ~_PAGE_PCD;
 -
--  if ( shift )
--    {
--      /* The argument is large, use the identity:
--	 f(x+a) = f(a) * (f(x) + 1) - 1;
--	 */
--      shr_Xsig(&accumulator, - exponent);
--      accumulator.msw |= 0x80000000;      /* add 1.0 */
--      mul_Xsig_Xsig(&accumulator, shiftterm[shift]);
--      accumulator.msw &= 0x3fffffff;      /* subtract 1.0 */
--      exponent = 1;
--    }
+-	return (pte_t){ pte, pte >> 32 };
++	return (pte_t){ .pte = pte };
+ }
+ 
+ pmd_t xen_make_pmd(unsigned long long pmd)
+@@ -290,10 +293,10 @@ unsigned long xen_pgd_val(pgd_t pgd)
+ 
+ pte_t xen_make_pte(unsigned long pte)
+ {
+-	if (pte & _PAGE_PRESENT)
++	if (pte & _PAGE_PRESENT) {
+ 		pte = phys_to_machine(XPADDR(pte)).maddr;
 -
--  if ( sign != SIGN_POS )
--    {
--      /* The argument is negative, use the identity:
--	     f(-x) = -f(x) / (1 + f(x))
--	 */
--      Denom.lsw = accumulator.lsw;
--      XSIG_LL(Denom) = XSIG_LL(accumulator);
--      if ( exponent < 0 )
--	shr_Xsig(&Denom, - exponent);
--      else if ( exponent > 0 )
--	{
--	  /* exponent must be 1 here */
--	  XSIG_LL(Denom) <<= 1;
--	  if ( Denom.lsw & 0x80000000 )
--	    XSIG_LL(Denom) |= 1;
--	  (Denom.lsw) <<= 1;
-+	argSignif.lsw = 0;
-+	XSIG_LL(argSignif) = Xll = significand(arg);
-+
-+	if (exponent == -1) {
-+		shift = (argSignif.msw & 0x40000000) ? 3 : 2;
-+		/* subtract 0.5 or 0.75 */
-+		exponent -= 2;
-+		XSIG_LL(argSignif) <<= 2;
-+		Xll <<= 2;
-+	} else if (exponent == -2) {
-+		shift = 1;
-+		/* subtract 0.25 */
-+		exponent--;
-+		XSIG_LL(argSignif) <<= 1;
-+		Xll <<= 1;
-+	} else
-+		shift = 0;
-+
-+	if (exponent < -2) {
-+		/* Shift the argument right by the required places. */
-+		if (FPU_shrx(&Xll, -2 - exponent) >= 0x80000000U)
-+			Xll++;	/* round up */
-+	}
-+
-+	accumulator.lsw = accumulator.midw = accumulator.msw = 0;
-+	polynomial_Xsig(&accumulator, &Xll, lterms, HIPOWER - 1);
-+	mul_Xsig_Xsig(&accumulator, &argSignif);
-+	shr_Xsig(&accumulator, 3);
-+
-+	mul_Xsig_Xsig(&argSignif, &hiterm);	/* The leading term */
-+	add_two_Xsig(&accumulator, &argSignif, &exponent);
-+
-+	if (shift) {
-+		/* The argument is large, use the identity:
-+		   f(x+a) = f(a) * (f(x) + 1) - 1;
-+		 */
-+		shr_Xsig(&accumulator, -exponent);
-+		accumulator.msw |= 0x80000000;	/* add 1.0 */
-+		mul_Xsig_Xsig(&accumulator, shiftterm[shift]);
-+		accumulator.msw &= 0x3fffffff;	/* subtract 1.0 */
-+		exponent = 1;
+-	pte &= ~_PAGE_PCD;
++		pte &= ~(_PAGE_PCD | _PAGE_PWT);
 +	}
+ 
+ 	return (pte_t){ pte };
+ }
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index f84e772..3bad477 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -10,6 +10,7 @@
+ #include <linux/pm.h>
+ 
+ #include <asm/elf.h>
++#include <asm/vdso.h>
+ #include <asm/e820.h>
+ #include <asm/setup.h>
+ #include <asm/xen/hypervisor.h>
+@@ -59,12 +60,10 @@ static void xen_idle(void)
+ /*
+  * Set the bit indicating "nosegneg" library variants should be used.
+  */
+-static void fiddle_vdso(void)
++static void __init fiddle_vdso(void)
+ {
+-	extern u32 VDSO_NOTE_MASK; /* See ../kernel/vsyscall-note.S.  */
+-	extern char vsyscall_int80_start;
+-	u32 *mask = (u32 *) ((unsigned long) &VDSO_NOTE_MASK - VDSO_PRELINK +
+-			     &vsyscall_int80_start);
++	extern const char vdso32_default_start;
++	u32 *mask = VDSO32_SYMBOL(&vdso32_default_start, NOTE_MASK);
+ 	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
+ }
+ 
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index c1b131b..aafc544 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -146,7 +146,7 @@ void __init xen_smp_prepare_boot_cpu(void)
+ 	   old memory can be recycled */
+ 	make_lowmem_page_readwrite(&per_cpu__gdt_page);
+ 
+-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
++	for_each_possible_cpu(cpu) {
+ 		cpus_clear(per_cpu(cpu_sibling_map, cpu));
+ 		/*
+ 		 * cpu_core_map lives in a per cpu area that is cleared
+@@ -163,7 +163,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
+ {
+ 	unsigned cpu;
+ 
+-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
++	for_each_possible_cpu(cpu) {
+ 		cpus_clear(per_cpu(cpu_sibling_map, cpu));
+ 		/*
+ 		 * cpu_core_ map will be zeroed when the per
+@@ -239,10 +239,10 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+ 	ctxt->gdt_ents      = ARRAY_SIZE(gdt->gdt);
+ 
+ 	ctxt->user_regs.cs = __KERNEL_CS;
+-	ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
++	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
+ 
+ 	ctxt->kernel_ss = __KERNEL_DS;
+-	ctxt->kernel_sp = idle->thread.esp0;
++	ctxt->kernel_sp = idle->thread.sp0;
+ 
+ 	ctxt->event_callback_cs     = __KERNEL_CS;
+ 	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index d083ff5..b3721fd 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -592,7 +592,7 @@ __init void xen_time_init(void)
+ 	set_normalized_timespec(&wall_to_monotonic,
+ 				-xtime.tv_sec, -xtime.tv_nsec);
+ 
+-	tsc_disable = 0;
++	setup_force_cpu_cap(X86_FEATURE_TSC);
+ 
+ 	xen_setup_timer(cpu);
+ 	xen_setup_cpu_clockevents();
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index f8d6937..288d587 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -4,16 +4,18 @@
+ #ifdef CONFIG_XEN
+ 
+ #include <linux/elfnote.h>
++#include <linux/init.h>
+ #include <asm/boot.h>
+ #include <xen/interface/elfnote.h>
+ 
+-.pushsection .init.text
++	__INIT
+ ENTRY(startup_xen)
+ 	movl %esi,xen_start_info
+ 	cld
+ 	movl $(init_thread_union+THREAD_SIZE),%esp
+ 	jmp xen_start_kernel
+-.popsection
 +
-+	if (sign != SIGN_POS) {
-+		/* The argument is negative, use the identity:
-+		   f(-x) = -f(x) / (1 + f(x))
-+		 */
-+		Denom.lsw = accumulator.lsw;
-+		XSIG_LL(Denom) = XSIG_LL(accumulator);
-+		if (exponent < 0)
-+			shr_Xsig(&Denom, -exponent);
-+		else if (exponent > 0) {
-+			/* exponent must be 1 here */
-+			XSIG_LL(Denom) <<= 1;
-+			if (Denom.lsw & 0x80000000)
-+				XSIG_LL(Denom) |= 1;
-+			(Denom.lsw) <<= 1;
-+		}
-+		Denom.msw |= 0x80000000;	/* add 1.0 */
-+		div_Xsig(&accumulator, &Denom, &accumulator);
- 	}
--      Denom.msw |= 0x80000000;      /* add 1.0 */
--      div_Xsig(&accumulator, &Denom, &accumulator);
--    }
++	__FINIT
  
--  /* Convert to 64 bit signed-compatible */
--  exponent += round_Xsig(&accumulator);
-+	/* Convert to 64 bit signed-compatible */
-+	exponent += round_Xsig(&accumulator);
+ .pushsection .bss.page_aligned
+ 	.align PAGE_SIZE_asm
+diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
+index ac4ed52..7d0f55a 100644
+--- a/arch/xtensa/kernel/vmlinux.lds.S
++++ b/arch/xtensa/kernel/vmlinux.lds.S
+@@ -136,13 +136,13 @@ SECTIONS
+   __init_begin = .;
+   .init.text : {
+   	_sinittext = .;
+-	*(.init.literal) *(.init.text)
++	*(.init.literal) INIT_TEXT
+ 	_einittext = .;
+   }
  
--  result = &st(0);
--  significand(result) = XSIG_LL(accumulator);
--  setexponent16(result, exponent);
-+	result = &st(0);
-+	significand(result) = XSIG_LL(accumulator);
-+	setexponent16(result, exponent);
+   .init.data :
+   {
+-    *(.init.data)
++    INIT_DATA
+     . = ALIGN(0x4);
+     __tagtable_begin = .;
+     *(.taglist)
+@@ -278,8 +278,9 @@ SECTIONS
+   /* Sections to be discarded */
+   /DISCARD/ :
+   {
+-  	*(.exit.literal .exit.text)
+-  	*(.exit.data)
++	*(.exit.literal)
++	EXIT_TEXT
++	EXIT_DATA
+         *(.exitcall.exit)
+   }
  
--  tag = FPU_round(result, 1, 0, FULL_PRECISION, sign);
-+	tag = FPU_round(result, 1, 0, FULL_PRECISION, sign);
+diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
+index 10aec22..64e304a 100644
+--- a/arch/xtensa/mm/Makefile
++++ b/arch/xtensa/mm/Makefile
+@@ -1,9 +1,5 @@
+ #
+ # Makefile for the Linux/Xtensa-specific parts of the memory manager.
+ #
+-# Note! Dependencies are done automagically by 'make dep', which also
+-# removes any old dependencies. DON'T put your own dependencies here
+-# unless it's something special (ie not a .c file).
+-#
  
--  setsign(result, sign);
--  FPU_settag0(tag);
-+	setsign(result, sign);
-+	FPU_settag0(tag);
+ obj-y	 := init.o fault.o tlb.o misc.o cache.o
+diff --git a/arch/xtensa/platform-iss/Makefile b/arch/xtensa/platform-iss/Makefile
+index 5b394e9..af96e31 100644
+--- a/arch/xtensa/platform-iss/Makefile
++++ b/arch/xtensa/platform-iss/Makefile
+@@ -3,11 +3,6 @@
+ # Makefile for the Xtensa Instruction Set Simulator (ISS)
+ # "prom monitor" library routines under Linux.
+ #
+-# Note! Dependencies are done automagically by 'make dep', which also
+-# removes any old dependencies. DON'T put your own dependencies here
+-# unless it's something special (ie not a .c file).
+-#
+-# Note 2! The CFLAGS definitions are in the main makefile...
  
--  return 0;
-+	return 0;
+ obj-y			= io.o console.o setup.o network.o
+ 
+diff --git a/block/Makefile b/block/Makefile
+index 8261081..5a43c7d 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -2,7 +2,9 @@
+ # Makefile for the kernel block layer
+ #
+ 
+-obj-$(CONFIG_BLOCK) := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
++obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
++			blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
++			blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o
+ 
+ obj-$(CONFIG_BLK_DEV_BSG)	+= bsg.o
+ obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
+diff --git a/block/as-iosched.c b/block/as-iosched.c
+index cb5e53b..9603684 100644
+--- a/block/as-iosched.c
++++ b/block/as-iosched.c
+@@ -170,9 +170,11 @@ static void free_as_io_context(struct as_io_context *aic)
  
+ static void as_trim(struct io_context *ioc)
+ {
++	spin_lock(&ioc->lock);
+ 	if (ioc->aic)
+ 		free_as_io_context(ioc->aic);
+ 	ioc->aic = NULL;
++	spin_unlock(&ioc->lock);
  }
-diff --git a/arch/x86/math-emu/poly_atan.c b/arch/x86/math-emu/poly_atan.c
-index 82f7029..20c28e5 100644
---- a/arch/x86/math-emu/poly_atan.c
-+++ b/arch/x86/math-emu/poly_atan.c
-@@ -18,28 +18,25 @@
- #include "control_w.h"
- #include "poly.h"
  
--
- #define	HIPOWERon	6	/* odd poly, negative terms */
--static const unsigned long long oddnegterms[HIPOWERon] =
--{
--  0x0000000000000000LL, /* Dummy (not for - 1.0) */
--  0x015328437f756467LL,
--  0x0005dda27b73dec6LL,
--  0x0000226bf2bfb91aLL,
--  0x000000ccc439c5f7LL,
--  0x0000000355438407LL
--} ;
-+static const unsigned long long oddnegterms[HIPOWERon] = {
-+	0x0000000000000000LL,	/* Dummy (not for - 1.0) */
-+	0x015328437f756467LL,
-+	0x0005dda27b73dec6LL,
-+	0x0000226bf2bfb91aLL,
-+	0x000000ccc439c5f7LL,
-+	0x0000000355438407LL
-+};
+ /* Called when the task exits */
+@@ -462,7 +464,9 @@ static void as_antic_timeout(unsigned long data)
+ 	spin_lock_irqsave(q->queue_lock, flags);
+ 	if (ad->antic_status == ANTIC_WAIT_REQ
+ 			|| ad->antic_status == ANTIC_WAIT_NEXT) {
+-		struct as_io_context *aic = ad->io_context->aic;
++		struct as_io_context *aic;
++		spin_lock(&ad->io_context->lock);
++		aic = ad->io_context->aic;
  
- #define	HIPOWERop	6	/* odd poly, positive terms */
--static const unsigned long long oddplterms[HIPOWERop] =
--{
-+static const unsigned long long oddplterms[HIPOWERop] = {
- /*  0xaaaaaaaaaaaaaaabLL,  transferred to fixedpterm[] */
--  0x0db55a71875c9ac2LL,
--  0x0029fce2d67880b0LL,
--  0x0000dfd3908b4596LL,
--  0x00000550fd61dab4LL,
--  0x0000001c9422b3f9LL,
--  0x000000003e3301e1LL
-+	0x0db55a71875c9ac2LL,
-+	0x0029fce2d67880b0LL,
-+	0x0000dfd3908b4596LL,
-+	0x00000550fd61dab4LL,
-+	0x0000001c9422b3f9LL,
-+	0x000000003e3301e1LL
- };
+ 		ad->antic_status = ANTIC_FINISHED;
+ 		kblockd_schedule_work(&ad->antic_work);
+@@ -475,6 +479,7 @@ static void as_antic_timeout(unsigned long data)
+ 			/* process not "saved" by a cooperating request */
+ 			ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
+ 		}
++		spin_unlock(&ad->io_context->lock);
+ 	}
+ 	spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+@@ -635,9 +640,11 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
  
- static const unsigned long long denomterm = 0xebd9b842c5c53a0eLL;
-@@ -48,182 +45,164 @@ static const Xsig fixedpterm = MK_XSIG(0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa);
+ 	ioc = ad->io_context;
+ 	BUG_ON(!ioc);
++	spin_lock(&ioc->lock);
  
- static const Xsig pi_signif = MK_XSIG(0xc90fdaa2, 0x2168c234, 0xc4c6628b);
+ 	if (rq && ioc == RQ_IOC(rq)) {
+ 		/* request from same process */
++		spin_unlock(&ioc->lock);
+ 		return 1;
+ 	}
  
+@@ -646,20 +653,25 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
+ 		 * In this situation status should really be FINISHED,
+ 		 * however the timer hasn't had the chance to run yet.
+ 		 */
++		spin_unlock(&ioc->lock);
+ 		return 1;
+ 	}
+ 
+ 	aic = ioc->aic;
+-	if (!aic)
++	if (!aic) {
++		spin_unlock(&ioc->lock);
+ 		return 0;
++	}
+ 
+ 	if (atomic_read(&aic->nr_queued) > 0) {
+ 		/* process has more requests queued */
++		spin_unlock(&ioc->lock);
+ 		return 1;
+ 	}
+ 
+ 	if (atomic_read(&aic->nr_dispatched) > 0) {
+ 		/* process has more requests dispatched */
++		spin_unlock(&ioc->lock);
+ 		return 1;
+ 	}
+ 
+@@ -680,6 +692,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
+ 		}
+ 
+ 		as_update_iohist(ad, aic, rq);
++		spin_unlock(&ioc->lock);
+ 		return 1;
+ 	}
+ 
+@@ -688,20 +701,27 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
+ 		if (aic->ttime_samples == 0)
+ 			ad->exit_prob = (7*ad->exit_prob + 256)/8;
+ 
+-		if (ad->exit_no_coop > 128)
++		if (ad->exit_no_coop > 128) {
++			spin_unlock(&ioc->lock);
+ 			return 1;
++		}
+ 	}
+ 
+ 	if (aic->ttime_samples == 0) {
+-		if (ad->new_ttime_mean > ad->antic_expire)
++		if (ad->new_ttime_mean > ad->antic_expire) {
++			spin_unlock(&ioc->lock);
+ 			return 1;
+-		if (ad->exit_prob * ad->exit_no_coop > 128*256)
++		}
++		if (ad->exit_prob * ad->exit_no_coop > 128*256) {
++			spin_unlock(&ioc->lock);
+ 			return 1;
++		}
+ 	} else if (aic->ttime_mean > ad->antic_expire) {
+ 		/* the process thinks too much between requests */
++		spin_unlock(&ioc->lock);
+ 		return 1;
+ 	}
 -
- /*--- poly_atan() -----------------------------------------------------------+
-  |                                                                           |
-  +---------------------------------------------------------------------------*/
--void	poly_atan(FPU_REG *st0_ptr, u_char st0_tag,
--		  FPU_REG *st1_ptr, u_char st1_tag)
-+void poly_atan(FPU_REG *st0_ptr, u_char st0_tag,
-+	       FPU_REG *st1_ptr, u_char st1_tag)
- {
--  u_char	transformed, inverted,
--                sign1, sign2;
--  int           exponent;
--  long int   	dummy_exp;
--  Xsig          accumulator, Numer, Denom, accumulatore, argSignif,
--                argSq, argSqSq;
--  u_char        tag;
--  
--  sign1 = getsign(st0_ptr);
--  sign2 = getsign(st1_ptr);
--  if ( st0_tag == TAG_Valid )
--    {
--      exponent = exponent(st0_ptr);
--    }
--  else
--    {
--      /* This gives non-compatible stack contents... */
--      FPU_to_exp16(st0_ptr, st0_ptr);
--      exponent = exponent16(st0_ptr);
--    }
--  if ( st1_tag == TAG_Valid )
--    {
--      exponent -= exponent(st1_ptr);
--    }
--  else
--    {
--      /* This gives non-compatible stack contents... */
--      FPU_to_exp16(st1_ptr, st1_ptr);
--      exponent -= exponent16(st1_ptr);
--    }
--
--  if ( (exponent < 0) || ((exponent == 0) &&
--			  ((st0_ptr->sigh < st1_ptr->sigh) ||
--			   ((st0_ptr->sigh == st1_ptr->sigh) &&
--			    (st0_ptr->sigl < st1_ptr->sigl))) ) )
--    {
--      inverted = 1;
--      Numer.lsw = Denom.lsw = 0;
--      XSIG_LL(Numer) = significand(st0_ptr);
--      XSIG_LL(Denom) = significand(st1_ptr);
--    }
--  else
--    {
--      inverted = 0;
--      exponent = -exponent;
--      Numer.lsw = Denom.lsw = 0;
--      XSIG_LL(Numer) = significand(st1_ptr);
--      XSIG_LL(Denom) = significand(st0_ptr);
--     }
--  div_Xsig(&Numer, &Denom, &argSignif);
--  exponent += norm_Xsig(&argSignif);
--
--  if ( (exponent >= -1)
--      || ((exponent == -2) && (argSignif.msw > 0xd413ccd0)) )
--    {
--      /* The argument is greater than sqrt(2)-1 (=0.414213562...) */
--      /* Convert the argument by an identity for atan */
--      transformed = 1;
--
--      if ( exponent >= 0 )
--	{
-+	u_char transformed, inverted, sign1, sign2;
-+	int exponent;
-+	long int dummy_exp;
-+	Xsig accumulator, Numer, Denom, accumulatore, argSignif, argSq, argSqSq;
-+	u_char tag;
++	spin_unlock(&ioc->lock);
+ 	return 0;
+ }
+ 
+@@ -1255,7 +1275,13 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
+ 			 * Don't copy here but swap, because when anext is
+ 			 * removed below, it must contain the unused context
+ 			 */
+-			swap_io_context(&rioc, &nioc);
++			if (rioc != nioc) {
++				double_spin_lock(&rioc->lock, &nioc->lock,
++								rioc < nioc);
++				swap_io_context(&rioc, &nioc);
++				double_spin_unlock(&rioc->lock, &nioc->lock,
++								rioc < nioc);
++			}
+ 		}
+ 	}
+ 
+diff --git a/block/blk-barrier.c b/block/blk-barrier.c
+new file mode 100644
+index 0000000..5f74fec
+--- /dev/null
++++ b/block/blk-barrier.c
+@@ -0,0 +1,319 @@
++/*
++ * Functions related to barrier IO handling
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
 +
-+	sign1 = getsign(st0_ptr);
-+	sign2 = getsign(st1_ptr);
-+	if (st0_tag == TAG_Valid) {
-+		exponent = exponent(st0_ptr);
-+	} else {
-+		/* This gives non-compatible stack contents... */
-+		FPU_to_exp16(st0_ptr, st0_ptr);
-+		exponent = exponent16(st0_ptr);
++#include "blk.h"
++
++/**
++ * blk_queue_ordered - does this queue support ordered writes
++ * @q:        the request queue
++ * @ordered:  one of QUEUE_ORDERED_*
++ * @prepare_flush_fn: rq setup helper for cache flush ordered writes
++ *
++ * Description:
++ *   For journalled file systems, doing ordered writes on a commit
++ *   block instead of explicitly doing wait_on_buffer (which is bad
++ *   for performance) can be a big win. Block drivers supporting this
++ *   feature should call this function and indicate so.
++ *
++ **/
++int blk_queue_ordered(struct request_queue *q, unsigned ordered,
++		      prepare_flush_fn *prepare_flush_fn)
++{
++	if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
++	    prepare_flush_fn == NULL) {
++		printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
++		return -EINVAL;
 +	}
-+	if (st1_tag == TAG_Valid) {
-+		exponent -= exponent(st1_ptr);
-+	} else {
-+		/* This gives non-compatible stack contents... */
-+		FPU_to_exp16(st1_ptr, st1_ptr);
-+		exponent -= exponent16(st1_ptr);
++
++	if (ordered != QUEUE_ORDERED_NONE &&
++	    ordered != QUEUE_ORDERED_DRAIN &&
++	    ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
++	    ordered != QUEUE_ORDERED_DRAIN_FUA &&
++	    ordered != QUEUE_ORDERED_TAG &&
++	    ordered != QUEUE_ORDERED_TAG_FLUSH &&
++	    ordered != QUEUE_ORDERED_TAG_FUA) {
++		printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
++		return -EINVAL;
 +	}
 +
-+	if ((exponent < 0) || ((exponent == 0) &&
-+			       ((st0_ptr->sigh < st1_ptr->sigh) ||
-+				((st0_ptr->sigh == st1_ptr->sigh) &&
-+				 (st0_ptr->sigl < st1_ptr->sigl))))) {
-+		inverted = 1;
-+		Numer.lsw = Denom.lsw = 0;
-+		XSIG_LL(Numer) = significand(st0_ptr);
-+		XSIG_LL(Denom) = significand(st1_ptr);
++	q->ordered = ordered;
++	q->next_ordered = ordered;
++	q->prepare_flush_fn = prepare_flush_fn;
++
++	return 0;
++}
++
++EXPORT_SYMBOL(blk_queue_ordered);
++
++/*
++ * Cache flushing for ordered writes handling
++ */
++inline unsigned blk_ordered_cur_seq(struct request_queue *q)
++{
++	if (!q->ordseq)
++		return 0;
++	return 1 << ffz(q->ordseq);
++}
++
++unsigned blk_ordered_req_seq(struct request *rq)
++{
++	struct request_queue *q = rq->q;
++
++	BUG_ON(q->ordseq == 0);
++
++	if (rq == &q->pre_flush_rq)
++		return QUEUE_ORDSEQ_PREFLUSH;
++	if (rq == &q->bar_rq)
++		return QUEUE_ORDSEQ_BAR;
++	if (rq == &q->post_flush_rq)
++		return QUEUE_ORDSEQ_POSTFLUSH;
++
++	/*
++	 * !fs requests don't need to follow barrier ordering.  Always
++	 * put them at the front.  This fixes the following deadlock.
++	 *
++	 * http://thread.gmane.org/gmane.linux.kernel/537473
++	 */
++	if (!blk_fs_request(rq))
++		return QUEUE_ORDSEQ_DRAIN;
++
++	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
++	    (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
++		return QUEUE_ORDSEQ_DRAIN;
++	else
++		return QUEUE_ORDSEQ_DONE;
++}
++
++void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
++{
++	struct request *rq;
++
++	if (error && !q->orderr)
++		q->orderr = error;
++
++	BUG_ON(q->ordseq & seq);
++	q->ordseq |= seq;
++
++	if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
++		return;
++
++	/*
++	 * Okay, sequence complete.
++	 */
++	q->ordseq = 0;
++	rq = q->orig_bar_rq;
++
++	if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
++		BUG();
++}
++
++static void pre_flush_end_io(struct request *rq, int error)
++{
++	elv_completed_request(rq->q, rq);
++	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
++}
++
++static void bar_end_io(struct request *rq, int error)
++{
++	elv_completed_request(rq->q, rq);
++	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
++}
++
++static void post_flush_end_io(struct request *rq, int error)
++{
++	elv_completed_request(rq->q, rq);
++	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
++}
++
++static void queue_flush(struct request_queue *q, unsigned which)
++{
++	struct request *rq;
++	rq_end_io_fn *end_io;
++
++	if (which == QUEUE_ORDERED_PREFLUSH) {
++		rq = &q->pre_flush_rq;
++		end_io = pre_flush_end_io;
 +	} else {
-+		inverted = 0;
-+		exponent = -exponent;
-+		Numer.lsw = Denom.lsw = 0;
-+		XSIG_LL(Numer) = significand(st1_ptr);
-+		XSIG_LL(Denom) = significand(st0_ptr);
++		rq = &q->post_flush_rq;
++		end_io = post_flush_end_io;
 +	}
-+	div_Xsig(&Numer, &Denom, &argSignif);
-+	exponent += norm_Xsig(&argSignif);
 +
-+	if ((exponent >= -1)
-+	    || ((exponent == -2) && (argSignif.msw > 0xd413ccd0))) {
-+		/* The argument is greater than sqrt(2)-1 (=0.414213562...) */
-+		/* Convert the argument by an identity for atan */
-+		transformed = 1;
++	rq->cmd_flags = REQ_HARDBARRIER;
++	rq_init(q, rq);
++	rq->elevator_private = NULL;
++	rq->elevator_private2 = NULL;
++	rq->rq_disk = q->bar_rq.rq_disk;
++	rq->end_io = end_io;
++	q->prepare_flush_fn(q, rq);
 +
-+		if (exponent >= 0) {
- #ifdef PARANOID
--	  if ( !( (exponent == 0) && 
--		 (argSignif.lsw == 0) && (argSignif.midw == 0) &&
--		 (argSignif.msw == 0x80000000) ) )
--	    {
--	      EXCEPTION(EX_INTERNAL|0x104);  /* There must be a logic error */
--	      return;
--	    }
-+			if (!((exponent == 0) &&
-+			      (argSignif.lsw == 0) && (argSignif.midw == 0) &&
-+			      (argSignif.msw == 0x80000000))) {
-+				EXCEPTION(EX_INTERNAL | 0x104);	/* There must be a logic error */
-+				return;
-+			}
- #endif /* PARANOID */
--	  argSignif.msw = 0;   /* Make the transformed arg -> 0.0 */
-+			argSignif.msw = 0;	/* Make the transformed arg -> 0.0 */
-+		} else {
-+			Numer.lsw = Denom.lsw = argSignif.lsw;
-+			XSIG_LL(Numer) = XSIG_LL(Denom) = XSIG_LL(argSignif);
++	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
++}
 +
-+			if (exponent < -1)
-+				shr_Xsig(&Numer, -1 - exponent);
-+			negate_Xsig(&Numer);
++static inline struct request *start_ordered(struct request_queue *q,
++					    struct request *rq)
++{
++	q->orderr = 0;
++	q->ordered = q->next_ordered;
++	q->ordseq |= QUEUE_ORDSEQ_STARTED;
 +
-+			shr_Xsig(&Denom, -exponent);
-+			Denom.msw |= 0x80000000;
++	/*
++	 * Prep proxy barrier request.
++	 */
++	blkdev_dequeue_request(rq);
++	q->orig_bar_rq = rq;
++	rq = &q->bar_rq;
++	rq->cmd_flags = 0;
++	rq_init(q, rq);
++	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
++		rq->cmd_flags |= REQ_RW;
++	if (q->ordered & QUEUE_ORDERED_FUA)
++		rq->cmd_flags |= REQ_FUA;
++	rq->elevator_private = NULL;
++	rq->elevator_private2 = NULL;
++	init_request_from_bio(rq, q->orig_bar_rq->bio);
++	rq->end_io = bar_end_io;
 +
-+			div_Xsig(&Numer, &Denom, &argSignif);
++	/*
++	 * Queue ordered sequence.  As we stack them at the head, we
++	 * need to queue in reverse order.  Note that we rely on that
++	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
++	 * request gets inbetween ordered sequence. If this request is
++	 * an empty barrier, we don't need to do a postflush ever since
++	 * there will be no data written between the pre and post flush.
++	 * Hence a single flush will suffice.
++	 */
++	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
++		queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
++	else
++		q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
 +
-+			exponent = -1 + norm_Xsig(&argSignif);
++	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
++
++	if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
++		queue_flush(q, QUEUE_ORDERED_PREFLUSH);
++		rq = &q->pre_flush_rq;
++	} else
++		q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
++
++	if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
++		q->ordseq |= QUEUE_ORDSEQ_DRAIN;
++	else
++		rq = NULL;
++
++	return rq;
++}
++
++int blk_do_ordered(struct request_queue *q, struct request **rqp)
++{
++	struct request *rq = *rqp;
++	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
++
++	if (!q->ordseq) {
++		if (!is_barrier)
++			return 1;
++
++		if (q->next_ordered != QUEUE_ORDERED_NONE) {
++			*rqp = start_ordered(q, rq);
++			return 1;
++		} else {
++			/*
++			 * This can happen when the queue switches to
++			 * ORDERED_NONE while this request is on it.
++			 */
++			blkdev_dequeue_request(rq);
++			if (__blk_end_request(rq, -EOPNOTSUPP,
++					      blk_rq_bytes(rq)))
++				BUG();
++			*rqp = NULL;
++			return 0;
 +		}
++	}
++
++	/*
++	 * Ordered sequence in progress
++	 */
++
++	/* Special requests are not subject to ordering rules. */
++	if (!blk_fs_request(rq) &&
++	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
++		return 1;
++
++	if (q->ordered & QUEUE_ORDERED_TAG) {
++		/* Ordered by tag.  Blocking the next barrier is enough. */
++		if (is_barrier && rq != &q->bar_rq)
++			*rqp = NULL;
 +	} else {
-+		transformed = 0;
++		/* Ordered by draining.  Wait for turn. */
++		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
++		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
++			*rqp = NULL;
 +	}
 +
-+	argSq.lsw = argSignif.lsw;
-+	argSq.midw = argSignif.midw;
-+	argSq.msw = argSignif.msw;
-+	mul_Xsig_Xsig(&argSq, &argSq);
++	return 1;
++}
 +
-+	argSqSq.lsw = argSq.lsw;
-+	argSqSq.midw = argSq.midw;
-+	argSqSq.msw = argSq.msw;
-+	mul_Xsig_Xsig(&argSqSq, &argSqSq);
++static void bio_end_empty_barrier(struct bio *bio, int err)
++{
++	if (err)
++		clear_bit(BIO_UPTODATE, &bio->bi_flags);
 +
-+	accumulatore.lsw = argSq.lsw;
-+	XSIG_LL(accumulatore) = XSIG_LL(argSq);
++	complete(bio->bi_private);
++}
 +
-+	shr_Xsig(&argSq, 2 * (-1 - exponent - 1));
-+	shr_Xsig(&argSqSq, 4 * (-1 - exponent - 1));
++/**
++ * blkdev_issue_flush - queue a flush
++ * @bdev:	blockdev to issue flush for
++ * @error_sector:	error sector
++ *
++ * Description:
++ *    Issue a flush for the block device in question. Caller can supply
++ *    room for storing the error offset in case of a flush error, if they
++ *    wish to.  Caller must run wait_for_completion() on its own.
++ */
++int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
++{
++	DECLARE_COMPLETION_ONSTACK(wait);
++	struct request_queue *q;
++	struct bio *bio;
++	int ret;
 +
-+	/* Now have argSq etc with binary point at the left
-+	   .1xxxxxxxx */
++	if (bdev->bd_disk == NULL)
++		return -ENXIO;
 +
-+	/* Do the basic fixed point polynomial evaluation */
-+	accumulator.msw = accumulator.midw = accumulator.lsw = 0;
-+	polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq),
-+			oddplterms, HIPOWERop - 1);
-+	mul64_Xsig(&accumulator, &XSIG_LL(argSq));
-+	negate_Xsig(&accumulator);
-+	polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq), oddnegterms,
-+			HIPOWERon - 1);
-+	negate_Xsig(&accumulator);
-+	add_two_Xsig(&accumulator, &fixedpterm, &dummy_exp);
++	q = bdev_get_queue(bdev);
++	if (!q)
++		return -ENXIO;
 +
-+	mul64_Xsig(&accumulatore, &denomterm);
-+	shr_Xsig(&accumulatore, 1 + 2 * (-1 - exponent));
-+	accumulatore.msw |= 0x80000000;
++	bio = bio_alloc(GFP_KERNEL, 0);
++	if (!bio)
++		return -ENOMEM;
 +
-+	div_Xsig(&accumulator, &accumulatore, &accumulator);
++	bio->bi_end_io = bio_end_empty_barrier;
++	bio->bi_private = &wait;
++	bio->bi_bdev = bdev;
++	submit_bio(1 << BIO_RW_BARRIER, bio);
 +
-+	mul_Xsig_Xsig(&accumulator, &argSignif);
-+	mul_Xsig_Xsig(&accumulator, &argSq);
++	wait_for_completion(&wait);
 +
-+	shr_Xsig(&accumulator, 3);
-+	negate_Xsig(&accumulator);
-+	add_Xsig_Xsig(&accumulator, &argSignif);
++	/*
++	 * The driver must store the error location in ->bi_sector, if
++	 * it supports it. For non-stacked drivers, this should be copied
++	 * from rq->sector.
++	 */
++	if (error_sector)
++		*error_sector = bio->bi_sector;
 +
-+	if (transformed) {
-+		/* compute pi/4 - accumulator */
-+		shr_Xsig(&accumulator, -1 - exponent);
-+		negate_Xsig(&accumulator);
-+		add_Xsig_Xsig(&accumulator, &pi_signif);
-+		exponent = -1;
++	ret = 0;
++	if (!bio_flagged(bio, BIO_UPTODATE))
++		ret = -EIO;
++
++	bio_put(bio);
++	return ret;
++}
++
++EXPORT_SYMBOL(blkdev_issue_flush);
+diff --git a/block/blk-core.c b/block/blk-core.c
+new file mode 100644
+index 0000000..8ff9944
+--- /dev/null
++++ b/block/blk-core.c
+@@ -0,0 +1,2034 @@
++/*
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
++ * Elevator latency, (C) 2000  Andrea Arcangeli <andrea at suse.de> SuSE
++ * Queue request tables / lock, selectable elevator, Jens Axboe <axboe at suse.de>
++ * kernel-doc documentation started by NeilBrown <neilb at cse.unsw.edu.au> -  July2000
++ * bio rewrite, highmem i/o, etc, Jens Axboe <axboe at suse.de> - may 2001
++ */
++
++/*
++ * This handles all read/write requests to block devices
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/backing-dev.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/highmem.h>
++#include <linux/mm.h>
++#include <linux/kernel_stat.h>
++#include <linux/string.h>
++#include <linux/init.h>
++#include <linux/completion.h>
++#include <linux/slab.h>
++#include <linux/swap.h>
++#include <linux/writeback.h>
++#include <linux/task_io_accounting_ops.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/blktrace_api.h>
++#include <linux/fault-inject.h>
++
++#include "blk.h"
++
++static int __make_request(struct request_queue *q, struct bio *bio);
++
++/*
++ * For the allocated request tables
++ */
++struct kmem_cache *request_cachep;
++
++/*
++ * For queue allocation
++ */
++struct kmem_cache *blk_requestq_cachep = NULL;
++
++/*
++ * Controlling structure to kblockd
++ */
++static struct workqueue_struct *kblockd_workqueue;
++
++static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
++
++static void drive_stat_acct(struct request *rq, int new_io)
++{
++	int rw = rq_data_dir(rq);
++
++	if (!blk_fs_request(rq) || !rq->rq_disk)
++		return;
++
++	if (!new_io) {
++		__disk_stat_inc(rq->rq_disk, merges[rw]);
++	} else {
++		disk_round_stats(rq->rq_disk);
++		rq->rq_disk->in_flight++;
 +	}
++}
 +
-+	if (inverted) {
-+		/* compute pi/2 - accumulator */
-+		shr_Xsig(&accumulator, -exponent);
-+		negate_Xsig(&accumulator);
-+		add_Xsig_Xsig(&accumulator, &pi_signif);
-+		exponent = 0;
- 	}
--      else
--	{
--	  Numer.lsw = Denom.lsw = argSignif.lsw;
--	  XSIG_LL(Numer) = XSIG_LL(Denom) = XSIG_LL(argSignif);
--
--	  if ( exponent < -1 )
--	    shr_Xsig(&Numer, -1-exponent);
--	  negate_Xsig(&Numer);
--      
--	  shr_Xsig(&Denom, -exponent);
--	  Denom.msw |= 0x80000000;
--      
--	  div_Xsig(&Numer, &Denom, &argSignif);
--
--	  exponent = -1 + norm_Xsig(&argSignif);
++void blk_queue_congestion_threshold(struct request_queue *q)
++{
++	int nr;
 +
-+	if (sign1) {
-+		/* compute pi - accumulator */
-+		shr_Xsig(&accumulator, 1 - exponent);
-+		negate_Xsig(&accumulator);
-+		add_Xsig_Xsig(&accumulator, &pi_signif);
-+		exponent = 1;
- 	}
--    }
--  else
--    {
--      transformed = 0;
--    }
--
--  argSq.lsw = argSignif.lsw; argSq.midw = argSignif.midw;
--  argSq.msw = argSignif.msw;
--  mul_Xsig_Xsig(&argSq, &argSq);
--  
--  argSqSq.lsw = argSq.lsw; argSqSq.midw = argSq.midw; argSqSq.msw = argSq.msw;
--  mul_Xsig_Xsig(&argSqSq, &argSqSq);
--
--  accumulatore.lsw = argSq.lsw;
--  XSIG_LL(accumulatore) = XSIG_LL(argSq);
--
--  shr_Xsig(&argSq, 2*(-1-exponent-1));
--  shr_Xsig(&argSqSq, 4*(-1-exponent-1));
--
--  /* Now have argSq etc with binary point at the left
--     .1xxxxxxxx */
--
--  /* Do the basic fixed point polynomial evaluation */
--  accumulator.msw = accumulator.midw = accumulator.lsw = 0;
--  polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq),
--		   oddplterms, HIPOWERop-1);
--  mul64_Xsig(&accumulator, &XSIG_LL(argSq));
--  negate_Xsig(&accumulator);
--  polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq), oddnegterms, HIPOWERon-1);
--  negate_Xsig(&accumulator);
--  add_two_Xsig(&accumulator, &fixedpterm, &dummy_exp);
--
--  mul64_Xsig(&accumulatore, &denomterm);
--  shr_Xsig(&accumulatore, 1 + 2*(-1-exponent));
--  accumulatore.msw |= 0x80000000;
--
--  div_Xsig(&accumulator, &accumulatore, &accumulator);
--
--  mul_Xsig_Xsig(&accumulator, &argSignif);
--  mul_Xsig_Xsig(&accumulator, &argSq);
--
--  shr_Xsig(&accumulator, 3);
--  negate_Xsig(&accumulator);
--  add_Xsig_Xsig(&accumulator, &argSignif);
--
--  if ( transformed )
--    {
--      /* compute pi/4 - accumulator */
--      shr_Xsig(&accumulator, -1-exponent);
--      negate_Xsig(&accumulator);
--      add_Xsig_Xsig(&accumulator, &pi_signif);
--      exponent = -1;
--    }
--
--  if ( inverted )
--    {
--      /* compute pi/2 - accumulator */
--      shr_Xsig(&accumulator, -exponent);
--      negate_Xsig(&accumulator);
--      add_Xsig_Xsig(&accumulator, &pi_signif);
--      exponent = 0;
--    }
--
--  if ( sign1 )
--    {
--      /* compute pi - accumulator */
--      shr_Xsig(&accumulator, 1 - exponent);
--      negate_Xsig(&accumulator);
--      add_Xsig_Xsig(&accumulator, &pi_signif);
--      exponent = 1;
--    }
--
--  exponent += round_Xsig(&accumulator);
--
--  significand(st1_ptr) = XSIG_LL(accumulator);
--  setexponent16(st1_ptr, exponent);
--
--  tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign2);
--  FPU_settagi(1, tag);
--
--  set_precision_flag_up();  /* We do not really know if up or down,
--			       use this as the default. */
++	nr = q->nr_requests - (q->nr_requests / 8) + 1;
++	if (nr > q->nr_requests)
++		nr = q->nr_requests;
++	q->nr_congestion_on = nr;
 +
-+	exponent += round_Xsig(&accumulator);
++	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
++	if (nr < 1)
++		nr = 1;
++	q->nr_congestion_off = nr;
++}
 +
-+	significand(st1_ptr) = XSIG_LL(accumulator);
-+	setexponent16(st1_ptr, exponent);
++/**
++ * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
++ * @bdev:	device
++ *
++ * Locates the passed device's request queue and returns the address of its
++ * backing_dev_info
++ *
++ * Will return NULL if the request queue cannot be located.
++ */
++struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
++{
++	struct backing_dev_info *ret = NULL;
++	struct request_queue *q = bdev_get_queue(bdev);
 +
-+	tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign2);
-+	FPU_settagi(1, tag);
++	if (q)
++		ret = &q->backing_dev_info;
++	return ret;
++}
++EXPORT_SYMBOL(blk_get_backing_dev_info);
 +
-+	set_precision_flag_up();	/* We do not really know if up or down,
-+					   use this as the default. */
- 
- }
-diff --git a/arch/x86/math-emu/poly_l2.c b/arch/x86/math-emu/poly_l2.c
-index dd00e1d..8e2ff4b 100644
---- a/arch/x86/math-emu/poly_l2.c
-+++ b/arch/x86/math-emu/poly_l2.c
-@@ -10,7 +10,6 @@
-  |                                                                           |
-  +---------------------------------------------------------------------------*/
- 
--
- #include "exception.h"
- #include "reg_constant.h"
- #include "fpu_emu.h"
-@@ -18,184 +17,163 @@
- #include "control_w.h"
- #include "poly.h"
- 
--
- static void log2_kernel(FPU_REG const *arg, u_char argsign,
--			Xsig *accum_result, long int *expon);
--
-+			Xsig * accum_result, long int *expon);
- 
- /*--- poly_l2() -------------------------------------------------------------+
-  |   Base 2 logarithm by a polynomial approximation.                         |
-  +---------------------------------------------------------------------------*/
--void	poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign)
-+void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign)
- {
--  long int	       exponent, expon, expon_expon;
--  Xsig                 accumulator, expon_accum, yaccum;
--  u_char		       sign, argsign;
--  FPU_REG              x;
--  int                  tag;
--
--  exponent = exponent16(st0_ptr);
--
--  /* From st0_ptr, make a number > sqrt(2)/2 and < sqrt(2) */
--  if ( st0_ptr->sigh > (unsigned)0xb504f334 )
--    {
--      /* Treat as  sqrt(2)/2 < st0_ptr < 1 */
--      significand(&x) = - significand(st0_ptr);
--      setexponent16(&x, -1);
--      exponent++;
--      argsign = SIGN_NEG;
--    }
--  else
--    {
--      /* Treat as  1 <= st0_ptr < sqrt(2) */
--      x.sigh = st0_ptr->sigh - 0x80000000;
--      x.sigl = st0_ptr->sigl;
--      setexponent16(&x, 0);
--      argsign = SIGN_POS;
--    }
--  tag = FPU_normalize_nuo(&x);
--
--  if ( tag == TAG_Zero )
--    {
--      expon = 0;
--      accumulator.msw = accumulator.midw = accumulator.lsw = 0;
--    }
--  else
--    {
--      log2_kernel(&x, argsign, &accumulator, &expon);
--    }
--
--  if ( exponent < 0 )
--    {
--      sign = SIGN_NEG;
--      exponent = -exponent;
--    }
--  else
--    sign = SIGN_POS;
--  expon_accum.msw = exponent; expon_accum.midw = expon_accum.lsw = 0;
--  if ( exponent )
--    {
--      expon_expon = 31 + norm_Xsig(&expon_accum);
--      shr_Xsig(&accumulator, expon_expon - expon);
--
--      if ( sign ^ argsign )
--	negate_Xsig(&accumulator);
--      add_Xsig_Xsig(&accumulator, &expon_accum);
--    }
--  else
--    {
--      expon_expon = expon;
--      sign = argsign;
--    }
--
--  yaccum.lsw = 0; XSIG_LL(yaccum) = significand(st1_ptr);
--  mul_Xsig_Xsig(&accumulator, &yaccum);
--
--  expon_expon += round_Xsig(&accumulator);
--
--  if ( accumulator.msw == 0 )
--    {
--      FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
--      return;
--    }
--
--  significand(st1_ptr) = XSIG_LL(accumulator);
--  setexponent16(st1_ptr, expon_expon + exponent16(st1_ptr) + 1);
--
--  tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign ^ st1_sign);
--  FPU_settagi(1, tag);
--
--  set_precision_flag_up();  /* 80486 appears to always do this */
--
--  return;
-+	long int exponent, expon, expon_expon;
-+	Xsig accumulator, expon_accum, yaccum;
-+	u_char sign, argsign;
-+	FPU_REG x;
-+	int tag;
++void rq_init(struct request_queue *q, struct request *rq)
++{
++	INIT_LIST_HEAD(&rq->queuelist);
++	INIT_LIST_HEAD(&rq->donelist);
 +
-+	exponent = exponent16(st0_ptr);
++	rq->errors = 0;
++	rq->bio = rq->biotail = NULL;
++	INIT_HLIST_NODE(&rq->hash);
++	RB_CLEAR_NODE(&rq->rb_node);
++	rq->ioprio = 0;
++	rq->buffer = NULL;
++	rq->ref_count = 1;
++	rq->q = q;
++	rq->special = NULL;
++	rq->data_len = 0;
++	rq->data = NULL;
++	rq->nr_phys_segments = 0;
++	rq->sense = NULL;
++	rq->end_io = NULL;
++	rq->end_io_data = NULL;
++	rq->completion_data = NULL;
++	rq->next_rq = NULL;
++}
 +
-+	/* From st0_ptr, make a number > sqrt(2)/2 and < sqrt(2) */
-+	if (st0_ptr->sigh > (unsigned)0xb504f334) {
-+		/* Treat as  sqrt(2)/2 < st0_ptr < 1 */
-+		significand(&x) = -significand(st0_ptr);
-+		setexponent16(&x, -1);
-+		exponent++;
-+		argsign = SIGN_NEG;
++static void req_bio_endio(struct request *rq, struct bio *bio,
++			  unsigned int nbytes, int error)
++{
++	struct request_queue *q = rq->q;
++
++	if (&q->bar_rq != rq) {
++		if (error)
++			clear_bit(BIO_UPTODATE, &bio->bi_flags);
++		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
++			error = -EIO;
++
++		if (unlikely(nbytes > bio->bi_size)) {
++			printk("%s: want %u bytes done, only %u left\n",
++			       __FUNCTION__, nbytes, bio->bi_size);
++			nbytes = bio->bi_size;
++		}
++
++		bio->bi_size -= nbytes;
++		bio->bi_sector += (nbytes >> 9);
++		if (bio->bi_size == 0)
++			bio_endio(bio, error);
 +	} else {
-+		/* Treat as  1 <= st0_ptr < sqrt(2) */
-+		x.sigh = st0_ptr->sigh - 0x80000000;
-+		x.sigl = st0_ptr->sigl;
-+		setexponent16(&x, 0);
-+		argsign = SIGN_POS;
++
++		/*
++		 * Okay, this is the barrier request in progress, just
++		 * record the error;
++		 */
++		if (error && !q->orderr)
++			q->orderr = error;
 +	}
-+	tag = FPU_normalize_nuo(&x);
- 
--}
-+	if (tag == TAG_Zero) {
-+		expon = 0;
-+		accumulator.msw = accumulator.midw = accumulator.lsw = 0;
++}
++
++void blk_dump_rq_flags(struct request *rq, char *msg)
++{
++	int bit;
++
++	printk("%s: dev %s: type=%x, flags=%x\n", msg,
++		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
++		rq->cmd_flags);
++
++	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
++						       rq->nr_sectors,
++						       rq->current_nr_sectors);
++	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
++
++	if (blk_pc_request(rq)) {
++		printk("cdb: ");
++		for (bit = 0; bit < sizeof(rq->cmd); bit++)
++			printk("%02x ", rq->cmd[bit]);
++		printk("\n");
++	}
++}
++
++EXPORT_SYMBOL(blk_dump_rq_flags);
++
++/*
++ * "plug" the device if there are no outstanding requests: this will
++ * force the transfer to start only after we have put all the requests
++ * on the list.
++ *
++ * This is called with interrupts off and no requests on the queue and
++ * with the queue lock held.
++ */
++void blk_plug_device(struct request_queue *q)
++{
++	WARN_ON(!irqs_disabled());
++
++	/*
++	 * don't plug a stopped queue, it must be paired with blk_start_queue()
++	 * which will restart the queueing
++	 */
++	if (blk_queue_stopped(q))
++		return;
++
++	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
++		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
++		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
++	}
++}
++
++EXPORT_SYMBOL(blk_plug_device);
++
++/*
++ * remove the queue from the plugged list, if present. called with
++ * queue lock held and interrupts disabled.
++ */
++int blk_remove_plug(struct request_queue *q)
++{
++	WARN_ON(!irqs_disabled());
++
++	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
++		return 0;
++
++	del_timer(&q->unplug_timer);
++	return 1;
++}
++
++EXPORT_SYMBOL(blk_remove_plug);
++
++/*
++ * remove the plug and let it rip..
++ */
++void __generic_unplug_device(struct request_queue *q)
++{
++	if (unlikely(blk_queue_stopped(q)))
++		return;
++
++	if (!blk_remove_plug(q))
++		return;
++
++	q->request_fn(q);
++}
++EXPORT_SYMBOL(__generic_unplug_device);
++
++/**
++ * generic_unplug_device - fire a request queue
++ * @q:    The &struct request_queue in question
++ *
++ * Description:
++ *   Linux uses plugging to build bigger requests queues before letting
++ *   the device have at them. If a queue is plugged, the I/O scheduler
++ *   is still adding and merging requests on the queue. Once the queue
++ *   gets unplugged, the request_fn defined for the queue is invoked and
++ *   transfers started.
++ **/
++void generic_unplug_device(struct request_queue *q)
++{
++	spin_lock_irq(q->queue_lock);
++	__generic_unplug_device(q);
++	spin_unlock_irq(q->queue_lock);
++}
++EXPORT_SYMBOL(generic_unplug_device);
++
++static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
++				   struct page *page)
++{
++	struct request_queue *q = bdi->unplug_io_data;
++
++	blk_unplug(q);
++}
++
++void blk_unplug_work(struct work_struct *work)
++{
++	struct request_queue *q =
++		container_of(work, struct request_queue, unplug_work);
++
++	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
++				q->rq.count[READ] + q->rq.count[WRITE]);
++
++	q->unplug_fn(q);
++}
++
++void blk_unplug_timeout(unsigned long data)
++{
++	struct request_queue *q = (struct request_queue *)data;
++
++	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
++				q->rq.count[READ] + q->rq.count[WRITE]);
++
++	kblockd_schedule_work(&q->unplug_work);
++}
++
++void blk_unplug(struct request_queue *q)
++{
++	/*
++	 * devices don't necessarily have an ->unplug_fn defined
++	 */
++	if (q->unplug_fn) {
++		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
++					q->rq.count[READ] + q->rq.count[WRITE]);
++
++		q->unplug_fn(q);
++	}
++}
++EXPORT_SYMBOL(blk_unplug);
++
++/**
++ * blk_start_queue - restart a previously stopped queue
++ * @q:    The &struct request_queue in question
++ *
++ * Description:
++ *   blk_start_queue() will clear the stop flag on the queue, and call
++ *   the request_fn for the queue if it was in a stopped state when
++ *   entered. Also see blk_stop_queue(). Queue lock must be held.
++ **/
++void blk_start_queue(struct request_queue *q)
++{
++	WARN_ON(!irqs_disabled());
++
++	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
++
++	/*
++	 * one level of recursion is ok and is much faster than kicking
++	 * the unplug handling
++	 */
++	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
++		q->request_fn(q);
++		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
 +	} else {
-+		log2_kernel(&x, argsign, &accumulator, &expon);
++		blk_plug_device(q);
++		kblockd_schedule_work(&q->unplug_work);
++	}
++}
++
++EXPORT_SYMBOL(blk_start_queue);
++
++/**
++ * blk_stop_queue - stop a queue
++ * @q:    The &struct request_queue in question
++ *
++ * Description:
++ *   The Linux block layer assumes that a block driver will consume all
++ *   entries on the request queue when the request_fn strategy is called.
++ *   Often this will not happen, because of hardware limitations (queue
++ *   depth settings). If a device driver gets a 'queue full' response,
++ *   or if it simply chooses not to queue more I/O at one point, it can
++ *   call this function to prevent the request_fn from being called until
++ *   the driver has signalled it's ready to go again. This happens by calling
++ *   blk_start_queue() to restart queue operations. Queue lock must be held.
++ **/
++void blk_stop_queue(struct request_queue *q)
++{
++	blk_remove_plug(q);
++	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
++}
++EXPORT_SYMBOL(blk_stop_queue);
++
++/**
++ * blk_sync_queue - cancel any pending callbacks on a queue
++ * @q: the queue
++ *
++ * Description:
++ *     The block layer may perform asynchronous callback activity
++ *     on a queue, such as calling the unplug function after a timeout.
++ *     A block device may call blk_sync_queue to ensure that any
++ *     such activity is cancelled, thus allowing it to release resources
++ *     that the callbacks might use. The caller must already have made sure
++ *     that its ->make_request_fn will not re-add plugging prior to calling
++ *     this function.
++ *
++ */
++void blk_sync_queue(struct request_queue *q)
++{
++	del_timer_sync(&q->unplug_timer);
++	kblockd_flush_work(&q->unplug_work);
++}
++EXPORT_SYMBOL(blk_sync_queue);
++
++/**
++ * blk_run_queue - run a single device queue
++ * @q:	The queue to run
++ */
++void blk_run_queue(struct request_queue *q)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(q->queue_lock, flags);
++	blk_remove_plug(q);
++
++	/*
++	 * Only recurse once to avoid overrunning the stack, let the unplug
++	 * handling reinvoke the handler shortly if we already got there.
++	 */
++	if (!elv_queue_empty(q)) {
++		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
++			q->request_fn(q);
++			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
++		} else {
++			blk_plug_device(q);
++			kblockd_schedule_work(&q->unplug_work);
++		}
 +	}
 +
-+	if (exponent < 0) {
-+		sign = SIGN_NEG;
-+		exponent = -exponent;
-+	} else
-+		sign = SIGN_POS;
-+	expon_accum.msw = exponent;
-+	expon_accum.midw = expon_accum.lsw = 0;
-+	if (exponent) {
-+		expon_expon = 31 + norm_Xsig(&expon_accum);
-+		shr_Xsig(&accumulator, expon_expon - expon);
++	spin_unlock_irqrestore(q->queue_lock, flags);
++}
++EXPORT_SYMBOL(blk_run_queue);
++
++void blk_put_queue(struct request_queue *q)
++{
++	kobject_put(&q->kobj);
++}
++EXPORT_SYMBOL(blk_put_queue);
++
++void blk_cleanup_queue(struct request_queue * q)
++{
++	mutex_lock(&q->sysfs_lock);
++	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
++	mutex_unlock(&q->sysfs_lock);
++
++	if (q->elevator)
++		elevator_exit(q->elevator);
++
++	blk_put_queue(q);
++}
++
++EXPORT_SYMBOL(blk_cleanup_queue);
++
++static int blk_init_free_list(struct request_queue *q)
++{
++	struct request_list *rl = &q->rq;
++
++	rl->count[READ] = rl->count[WRITE] = 0;
++	rl->starved[READ] = rl->starved[WRITE] = 0;
++	rl->elvpriv = 0;
++	init_waitqueue_head(&rl->wait[READ]);
++	init_waitqueue_head(&rl->wait[WRITE]);
++
++	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
++				mempool_free_slab, request_cachep, q->node);
++
++	if (!rl->rq_pool)
++		return -ENOMEM;
 +
-+		if (sign ^ argsign)
-+			negate_Xsig(&accumulator);
-+		add_Xsig_Xsig(&accumulator, &expon_accum);
-+	} else {
-+		expon_expon = expon;
-+		sign = argsign;
-+	}
++	return 0;
++}
 +
-+	yaccum.lsw = 0;
-+	XSIG_LL(yaccum) = significand(st1_ptr);
-+	mul_Xsig_Xsig(&accumulator, &yaccum);
++struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
++{
++	return blk_alloc_queue_node(gfp_mask, -1);
++}
++EXPORT_SYMBOL(blk_alloc_queue);
 +
-+	expon_expon += round_Xsig(&accumulator);
++struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
++{
++	struct request_queue *q;
++	int err;
 +
-+	if (accumulator.msw == 0) {
-+		FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
-+		return;
++	q = kmem_cache_alloc_node(blk_requestq_cachep,
++				gfp_mask | __GFP_ZERO, node_id);
++	if (!q)
++		return NULL;
++
++	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
++	q->backing_dev_info.unplug_io_data = q;
++	err = bdi_init(&q->backing_dev_info);
++	if (err) {
++		kmem_cache_free(blk_requestq_cachep, q);
++		return NULL;
 +	}
 +
-+	significand(st1_ptr) = XSIG_LL(accumulator);
-+	setexponent16(st1_ptr, expon_expon + exponent16(st1_ptr) + 1);
- 
-+	tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign ^ st1_sign);
-+	FPU_settagi(1, tag);
++	init_timer(&q->unplug_timer);
 +
-+	set_precision_flag_up();	/* 80486 appears to always do this */
++	kobject_init(&q->kobj, &blk_queue_ktype);
 +
-+	return;
++	mutex_init(&q->sysfs_lock);
 +
++	return q;
 +}
- 
- /*--- poly_l2p1() -----------------------------------------------------------+
-  |   Base 2 logarithm by a polynomial approximation.                         |
-  |   log2(x+1)                                                               |
-  +---------------------------------------------------------------------------*/
--int	poly_l2p1(u_char sign0, u_char sign1,
--		  FPU_REG *st0_ptr, FPU_REG *st1_ptr, FPU_REG *dest)
-+int poly_l2p1(u_char sign0, u_char sign1,
-+	      FPU_REG * st0_ptr, FPU_REG * st1_ptr, FPU_REG * dest)
- {
--  u_char             	tag;
--  long int        	exponent;
--  Xsig              	accumulator, yaccum;
-+	u_char tag;
-+	long int exponent;
-+	Xsig accumulator, yaccum;
- 
--  if ( exponent16(st0_ptr) < 0 )
--    {
--      log2_kernel(st0_ptr, sign0, &accumulator, &exponent);
-+	if (exponent16(st0_ptr) < 0) {
-+		log2_kernel(st0_ptr, sign0, &accumulator, &exponent);
- 
--      yaccum.lsw = 0;
--      XSIG_LL(yaccum) = significand(st1_ptr);
--      mul_Xsig_Xsig(&accumulator, &yaccum);
-+		yaccum.lsw = 0;
-+		XSIG_LL(yaccum) = significand(st1_ptr);
-+		mul_Xsig_Xsig(&accumulator, &yaccum);
- 
--      exponent += round_Xsig(&accumulator);
-+		exponent += round_Xsig(&accumulator);
- 
--      exponent += exponent16(st1_ptr) + 1;
--      if ( exponent < EXP_WAY_UNDER ) exponent = EXP_WAY_UNDER;
-+		exponent += exponent16(st1_ptr) + 1;
-+		if (exponent < EXP_WAY_UNDER)
-+			exponent = EXP_WAY_UNDER;
- 
--      significand(dest) = XSIG_LL(accumulator);
--      setexponent16(dest, exponent);
-+		significand(dest) = XSIG_LL(accumulator);
-+		setexponent16(dest, exponent);
- 
--      tag = FPU_round(dest, 1, 0, FULL_PRECISION, sign0 ^ sign1);
--      FPU_settagi(1, tag);
-+		tag = FPU_round(dest, 1, 0, FULL_PRECISION, sign0 ^ sign1);
-+		FPU_settagi(1, tag);
- 
--      if ( tag == TAG_Valid )
--	set_precision_flag_up();   /* 80486 appears to always do this */
--    }
--  else
--    {
--      /* The magnitude of st0_ptr is far too large. */
-+		if (tag == TAG_Valid)
-+			set_precision_flag_up();	/* 80486 appears to always do this */
-+	} else {
-+		/* The magnitude of st0_ptr is far too large. */
- 
--      if ( sign0 != SIGN_POS )
--	{
--	  /* Trying to get the log of a negative number. */
--#ifdef PECULIAR_486   /* Stupid 80486 doesn't worry about log(negative). */
--	  changesign(st1_ptr);
-+		if (sign0 != SIGN_POS) {
-+			/* Trying to get the log of a negative number. */
-+#ifdef PECULIAR_486		/* Stupid 80486 doesn't worry about log(negative). */
-+			changesign(st1_ptr);
- #else
--	  if ( arith_invalid(1) < 0 )
--	    return 1;
-+			if (arith_invalid(1) < 0)
-+				return 1;
- #endif /* PECULIAR_486 */
--	}
-+		}
- 
--      /* 80486 appears to do this */
--      if ( sign0 == SIGN_NEG )
--	set_precision_flag_down();
--      else
--	set_precision_flag_up();
--    }
-+		/* 80486 appears to do this */
-+		if (sign0 == SIGN_NEG)
-+			set_precision_flag_down();
-+		else
-+			set_precision_flag_up();
++EXPORT_SYMBOL(blk_alloc_queue_node);
++
++/**
++ * blk_init_queue  - prepare a request queue for use with a block device
++ * @rfn:  The function to be called to process requests that have been
++ *        placed on the queue.
++ * @lock: Request queue spin lock
++ *
++ * Description:
++ *    If a block device wishes to use the standard request handling procedures,
++ *    which sorts requests and coalesces adjacent requests, then it must
++ *    call blk_init_queue().  The function @rfn will be called when there
++ *    are requests on the queue that need to be processed.  If the device
++ *    supports plugging, then @rfn may not be called immediately when requests
++ *    are available on the queue, but may be called at some time later instead.
++ *    Plugged queues are generally unplugged when a buffer belonging to one
++ *    of the requests on the queue is needed, or due to memory pressure.
++ *
++ *    @rfn is not required, or even expected, to remove all requests off the
++ *    queue, but only as many as it can handle at a time.  If it does leave
++ *    requests on the queue, it is responsible for arranging that the requests
++ *    get dealt with eventually.
++ *
++ *    The queue spin lock must be held while manipulating the requests on the
++ *    request queue; this lock will be taken also from interrupt context, so irq
++ *    disabling is needed for it.
++ *
++ *    Function returns a pointer to the initialized request queue, or NULL if
++ *    it didn't succeed.
++ *
++ * Note:
++ *    blk_init_queue() must be paired with a blk_cleanup_queue() call
++ *    when the block device is deactivated (such as at module unload).
++ **/
++
++struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
++{
++	return blk_init_queue_node(rfn, lock, -1);
++}
++EXPORT_SYMBOL(blk_init_queue);
++
++struct request_queue *
++blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
++{
++	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
++
++	if (!q)
++		return NULL;
++
++	q->node = node_id;
++	if (blk_init_free_list(q)) {
++		kmem_cache_free(blk_requestq_cachep, q);
++		return NULL;
 +	}
- 
--  if ( exponent(dest) <= EXP_UNDER )
--    EXCEPTION(EX_Underflow);
-+	if (exponent(dest) <= EXP_UNDER)
-+		EXCEPTION(EX_Underflow);
- 
--  return 0;
-+	return 0;
- 
- }
- 
--
--
--
- #undef HIPOWER
- #define	HIPOWER	10
--static const unsigned long long logterms[HIPOWER] =
--{
--  0x2a8eca5705fc2ef0LL,
--  0xf6384ee1d01febceLL,
--  0x093bb62877cdf642LL,
--  0x006985d8a9ec439bLL,
--  0x0005212c4f55a9c8LL,
--  0x00004326a16927f0LL,
--  0x0000038d1d80a0e7LL,
--  0x0000003141cc80c6LL,
--  0x00000002b1668c9fLL,
--  0x000000002c7a46aaLL
-+static const unsigned long long logterms[HIPOWER] = {
-+	0x2a8eca5705fc2ef0LL,
-+	0xf6384ee1d01febceLL,
-+	0x093bb62877cdf642LL,
-+	0x006985d8a9ec439bLL,
-+	0x0005212c4f55a9c8LL,
-+	0x00004326a16927f0LL,
-+	0x0000038d1d80a0e7LL,
-+	0x0000003141cc80c6LL,
-+	0x00000002b1668c9fLL,
-+	0x000000002c7a46aaLL
- };
- 
- static const unsigned long leadterm = 0xb8000000;
- 
--
- /*--- log2_kernel() ---------------------------------------------------------+
-  |   Base 2 logarithm by a polynomial approximation.                         |
-  |   log2(x+1)                                                               |
-@@ -203,70 +181,64 @@ static const unsigned long leadterm = 0xb8000000;
- static void log2_kernel(FPU_REG const *arg, u_char argsign, Xsig *accum_result,
- 			long int *expon)
- {
--  long int             exponent, adj;
--  unsigned long long   Xsq;
--  Xsig                 accumulator, Numer, Denom, argSignif, arg_signif;
--
--  exponent = exponent16(arg);
--  Numer.lsw = Denom.lsw = 0;
--  XSIG_LL(Numer) = XSIG_LL(Denom) = significand(arg);
--  if ( argsign == SIGN_POS )
--    {
--      shr_Xsig(&Denom, 2 - (1 + exponent));
--      Denom.msw |= 0x80000000;
--      div_Xsig(&Numer, &Denom, &argSignif);
--    }
--  else
--    {
--      shr_Xsig(&Denom, 1 - (1 + exponent));
--      negate_Xsig(&Denom);
--      if ( Denom.msw & 0x80000000 )
--	{
--	  div_Xsig(&Numer, &Denom, &argSignif);
--	  exponent ++;
--	}
--      else
--	{
--	  /* Denom must be 1.0 */
--	  argSignif.lsw = Numer.lsw; argSignif.midw = Numer.midw;
--	  argSignif.msw = Numer.msw;
-+	long int exponent, adj;
-+	unsigned long long Xsq;
-+	Xsig accumulator, Numer, Denom, argSignif, arg_signif;
 +
-+	exponent = exponent16(arg);
-+	Numer.lsw = Denom.lsw = 0;
-+	XSIG_LL(Numer) = XSIG_LL(Denom) = significand(arg);
-+	if (argsign == SIGN_POS) {
-+		shr_Xsig(&Denom, 2 - (1 + exponent));
-+		Denom.msw |= 0x80000000;
-+		div_Xsig(&Numer, &Denom, &argSignif);
-+	} else {
-+		shr_Xsig(&Denom, 1 - (1 + exponent));
-+		negate_Xsig(&Denom);
-+		if (Denom.msw & 0x80000000) {
-+			div_Xsig(&Numer, &Denom, &argSignif);
-+			exponent++;
-+		} else {
-+			/* Denom must be 1.0 */
-+			argSignif.lsw = Numer.lsw;
-+			argSignif.midw = Numer.midw;
-+			argSignif.msw = Numer.msw;
-+		}
- 	}
--    }
- 
- #ifndef PECULIAR_486
--  /* Should check here that  |local_arg|  is within the valid range */
--  if ( exponent >= -2 )
--    {
--      if ( (exponent > -2) ||
--	  (argSignif.msw > (unsigned)0xafb0ccc0) )
--	{
--	  /* The argument is too large */
-+	/* Should check here that  |local_arg|  is within the valid range */
-+	if (exponent >= -2) {
-+		if ((exponent > -2) || (argSignif.msw > (unsigned)0xafb0ccc0)) {
-+			/* The argument is too large */
-+		}
- 	}
--    }
- #endif /* PECULIAR_486 */
- 
--  arg_signif.lsw = argSignif.lsw; XSIG_LL(arg_signif) = XSIG_LL(argSignif);
--  adj = norm_Xsig(&argSignif);
--  accumulator.lsw = argSignif.lsw; XSIG_LL(accumulator) = XSIG_LL(argSignif);
--  mul_Xsig_Xsig(&accumulator, &accumulator);
--  shr_Xsig(&accumulator, 2*(-1 - (1 + exponent + adj)));
--  Xsq = XSIG_LL(accumulator);
--  if ( accumulator.lsw & 0x80000000 )
--    Xsq++;
--
--  accumulator.msw = accumulator.midw = accumulator.lsw = 0;
--  /* Do the basic fixed point polynomial evaluation */
--  polynomial_Xsig(&accumulator, &Xsq, logterms, HIPOWER-1);
--
--  mul_Xsig_Xsig(&accumulator, &argSignif);
--  shr_Xsig(&accumulator, 6 - adj);
--
--  mul32_Xsig(&arg_signif, leadterm);
--  add_two_Xsig(&accumulator, &arg_signif, &exponent);
--
--  *expon = exponent + 1;
--  accum_result->lsw = accumulator.lsw;
--  accum_result->midw = accumulator.midw;
--  accum_result->msw = accumulator.msw;
-+	arg_signif.lsw = argSignif.lsw;
-+	XSIG_LL(arg_signif) = XSIG_LL(argSignif);
-+	adj = norm_Xsig(&argSignif);
-+	accumulator.lsw = argSignif.lsw;
-+	XSIG_LL(accumulator) = XSIG_LL(argSignif);
-+	mul_Xsig_Xsig(&accumulator, &accumulator);
-+	shr_Xsig(&accumulator, 2 * (-1 - (1 + exponent + adj)));
-+	Xsq = XSIG_LL(accumulator);
-+	if (accumulator.lsw & 0x80000000)
-+		Xsq++;
++	/*
++	 * if caller didn't supply a lock, they get per-queue locking with
++	 * our embedded lock
++	 */
++	if (!lock) {
++		spin_lock_init(&q->__queue_lock);
++		lock = &q->__queue_lock;
++	}
 +
-+	accumulator.msw = accumulator.midw = accumulator.lsw = 0;
-+	/* Do the basic fixed point polynomial evaluation */
-+	polynomial_Xsig(&accumulator, &Xsq, logterms, HIPOWER - 1);
++	q->request_fn		= rfn;
++	q->prep_rq_fn		= NULL;
++	q->unplug_fn		= generic_unplug_device;
++	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
++	q->queue_lock		= lock;
 +
-+	mul_Xsig_Xsig(&accumulator, &argSignif);
-+	shr_Xsig(&accumulator, 6 - adj);
++	blk_queue_segment_boundary(q, 0xffffffff);
 +
-+	mul32_Xsig(&arg_signif, leadterm);
-+	add_two_Xsig(&accumulator, &arg_signif, &exponent);
++	blk_queue_make_request(q, __make_request);
++	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
 +
-+	*expon = exponent + 1;
-+	accum_result->lsw = accumulator.lsw;
-+	accum_result->midw = accumulator.midw;
-+	accum_result->msw = accumulator.msw;
- 
- }
-diff --git a/arch/x86/math-emu/poly_sin.c b/arch/x86/math-emu/poly_sin.c
-index a36313f..b862039 100644
---- a/arch/x86/math-emu/poly_sin.c
-+++ b/arch/x86/math-emu/poly_sin.c
-@@ -11,7 +11,6 @@
-  |                                                                           |
-  +---------------------------------------------------------------------------*/
- 
--
- #include "exception.h"
- #include "reg_constant.h"
- #include "fpu_emu.h"
-@@ -19,379 +18,361 @@
- #include "control_w.h"
- #include "poly.h"
- 
--
- #define	N_COEFF_P	4
- #define	N_COEFF_N	4
- 
--static const unsigned long long pos_terms_l[N_COEFF_P] =
--{
--  0xaaaaaaaaaaaaaaabLL,
--  0x00d00d00d00cf906LL,
--  0x000006b99159a8bbLL,
--  0x000000000d7392e6LL
-+static const unsigned long long pos_terms_l[N_COEFF_P] = {
-+	0xaaaaaaaaaaaaaaabLL,
-+	0x00d00d00d00cf906LL,
-+	0x000006b99159a8bbLL,
-+	0x000000000d7392e6LL
- };
- 
--static const unsigned long long neg_terms_l[N_COEFF_N] =
--{
--  0x2222222222222167LL,
--  0x0002e3bc74aab624LL,
--  0x0000000b09229062LL,
--  0x00000000000c7973LL
-+static const unsigned long long neg_terms_l[N_COEFF_N] = {
-+	0x2222222222222167LL,
-+	0x0002e3bc74aab624LL,
-+	0x0000000b09229062LL,
-+	0x00000000000c7973LL
- };
- 
--
--
- #define	N_COEFF_PH	4
- #define	N_COEFF_NH	4
--static const unsigned long long pos_terms_h[N_COEFF_PH] =
--{
--  0x0000000000000000LL,
--  0x05b05b05b05b0406LL,
--  0x000049f93edd91a9LL,
--  0x00000000c9c9ed62LL
-+static const unsigned long long pos_terms_h[N_COEFF_PH] = {
-+	0x0000000000000000LL,
-+	0x05b05b05b05b0406LL,
-+	0x000049f93edd91a9LL,
-+	0x00000000c9c9ed62LL
- };
- 
--static const unsigned long long neg_terms_h[N_COEFF_NH] =
--{
--  0xaaaaaaaaaaaaaa98LL,
--  0x001a01a01a019064LL,
--  0x0000008f76c68a77LL,
--  0x0000000000d58f5eLL
-+static const unsigned long long neg_terms_h[N_COEFF_NH] = {
-+	0xaaaaaaaaaaaaaa98LL,
-+	0x001a01a01a019064LL,
-+	0x0000008f76c68a77LL,
-+	0x0000000000d58f5eLL
- };
- 
--
- /*--- poly_sine() -----------------------------------------------------------+
-  |                                                                           |
-  +---------------------------------------------------------------------------*/
--void	poly_sine(FPU_REG *st0_ptr)
-+void poly_sine(FPU_REG *st0_ptr)
- {
--  int                 exponent, echange;
--  Xsig                accumulator, argSqrd, argTo4;
--  unsigned long       fix_up, adj;
--  unsigned long long  fixed_arg;
--  FPU_REG	      result;
-+	int exponent, echange;
-+	Xsig accumulator, argSqrd, argTo4;
-+	unsigned long fix_up, adj;
-+	unsigned long long fixed_arg;
-+	FPU_REG result;
- 
--  exponent = exponent(st0_ptr);
-+	exponent = exponent(st0_ptr);
- 
--  accumulator.lsw = accumulator.midw = accumulator.msw = 0;
-+	accumulator.lsw = accumulator.midw = accumulator.msw = 0;
- 
--  /* Split into two ranges, for arguments below and above 1.0 */
--  /* The boundary between upper and lower is approx 0.88309101259 */
--  if ( (exponent < -1) || ((exponent == -1) && (st0_ptr->sigh <= 0xe21240aa)) )
--    {
--      /* The argument is <= 0.88309101259 */
-+	/* Split into two ranges, for arguments below and above 1.0 */
-+	/* The boundary between upper and lower is approx 0.88309101259 */
-+	if ((exponent < -1)
-+	    || ((exponent == -1) && (st0_ptr->sigh <= 0xe21240aa))) {
-+		/* The argument is <= 0.88309101259 */
- 
--      argSqrd.msw = st0_ptr->sigh; argSqrd.midw = st0_ptr->sigl; argSqrd.lsw = 0;
--      mul64_Xsig(&argSqrd, &significand(st0_ptr));
--      shr_Xsig(&argSqrd, 2*(-1-exponent));
--      argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw;
--      argTo4.lsw = argSqrd.lsw;
--      mul_Xsig_Xsig(&argTo4, &argTo4);
-+		argSqrd.msw = st0_ptr->sigh;
-+		argSqrd.midw = st0_ptr->sigl;
-+		argSqrd.lsw = 0;
-+		mul64_Xsig(&argSqrd, &significand(st0_ptr));
-+		shr_Xsig(&argSqrd, 2 * (-1 - exponent));
-+		argTo4.msw = argSqrd.msw;
-+		argTo4.midw = argSqrd.midw;
-+		argTo4.lsw = argSqrd.lsw;
-+		mul_Xsig_Xsig(&argTo4, &argTo4);
- 
--      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
--		      N_COEFF_N-1);
--      mul_Xsig_Xsig(&accumulator, &argSqrd);
--      negate_Xsig(&accumulator);
-+		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
-+				N_COEFF_N - 1);
-+		mul_Xsig_Xsig(&accumulator, &argSqrd);
-+		negate_Xsig(&accumulator);
- 
--      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
--		      N_COEFF_P-1);
-+		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
-+				N_COEFF_P - 1);
- 
--      shr_Xsig(&accumulator, 2);    /* Divide by four */
--      accumulator.msw |= 0x80000000;  /* Add 1.0 */
-+		shr_Xsig(&accumulator, 2);	/* Divide by four */
-+		accumulator.msw |= 0x80000000;	/* Add 1.0 */
- 
--      mul64_Xsig(&accumulator, &significand(st0_ptr));
--      mul64_Xsig(&accumulator, &significand(st0_ptr));
--      mul64_Xsig(&accumulator, &significand(st0_ptr));
-+		mul64_Xsig(&accumulator, &significand(st0_ptr));
-+		mul64_Xsig(&accumulator, &significand(st0_ptr));
-+		mul64_Xsig(&accumulator, &significand(st0_ptr));
- 
--      /* Divide by four, FPU_REG compatible, etc */
--      exponent = 3*exponent;
-+		/* Divide by four, FPU_REG compatible, etc */
-+		exponent = 3 * exponent;
- 
--      /* The minimum exponent difference is 3 */
--      shr_Xsig(&accumulator, exponent(st0_ptr) - exponent);
-+		/* The minimum exponent difference is 3 */
-+		shr_Xsig(&accumulator, exponent(st0_ptr) - exponent);
- 
--      negate_Xsig(&accumulator);
--      XSIG_LL(accumulator) += significand(st0_ptr);
-+		negate_Xsig(&accumulator);
-+		XSIG_LL(accumulator) += significand(st0_ptr);
- 
--      echange = round_Xsig(&accumulator);
-+		echange = round_Xsig(&accumulator);
- 
--      setexponentpos(&result, exponent(st0_ptr) + echange);
--    }
--  else
--    {
--      /* The argument is > 0.88309101259 */
--      /* We use sin(st(0)) = cos(pi/2-st(0)) */
-+		setexponentpos(&result, exponent(st0_ptr) + echange);
-+	} else {
-+		/* The argument is > 0.88309101259 */
-+		/* We use sin(st(0)) = cos(pi/2-st(0)) */
- 
--      fixed_arg = significand(st0_ptr);
-+		fixed_arg = significand(st0_ptr);
- 
--      if ( exponent == 0 )
--	{
--	  /* The argument is >= 1.0 */
-+		if (exponent == 0) {
-+			/* The argument is >= 1.0 */
- 
--	  /* Put the binary point at the left. */
--	  fixed_arg <<= 1;
--	}
--      /* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
--      fixed_arg = 0x921fb54442d18469LL - fixed_arg;
--      /* There is a special case which arises due to rounding, to fix here. */
--      if ( fixed_arg == 0xffffffffffffffffLL )
--	fixed_arg = 0;
-+			/* Put the binary point at the left. */
-+			fixed_arg <<= 1;
-+		}
-+		/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
-+		fixed_arg = 0x921fb54442d18469LL - fixed_arg;
-+		/* There is a special case which arises due to rounding, to fix here. */
-+		if (fixed_arg == 0xffffffffffffffffLL)
-+			fixed_arg = 0;
- 
--      XSIG_LL(argSqrd) = fixed_arg; argSqrd.lsw = 0;
--      mul64_Xsig(&argSqrd, &fixed_arg);
-+		XSIG_LL(argSqrd) = fixed_arg;
-+		argSqrd.lsw = 0;
-+		mul64_Xsig(&argSqrd, &fixed_arg);
- 
--      XSIG_LL(argTo4) = XSIG_LL(argSqrd); argTo4.lsw = argSqrd.lsw;
--      mul_Xsig_Xsig(&argTo4, &argTo4);
-+		XSIG_LL(argTo4) = XSIG_LL(argSqrd);
-+		argTo4.lsw = argSqrd.lsw;
-+		mul_Xsig_Xsig(&argTo4, &argTo4);
- 
--      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
--		      N_COEFF_NH-1);
--      mul_Xsig_Xsig(&accumulator, &argSqrd);
--      negate_Xsig(&accumulator);
-+		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
-+				N_COEFF_NH - 1);
-+		mul_Xsig_Xsig(&accumulator, &argSqrd);
-+		negate_Xsig(&accumulator);
- 
--      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
--		      N_COEFF_PH-1);
--      negate_Xsig(&accumulator);
-+		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
-+				N_COEFF_PH - 1);
-+		negate_Xsig(&accumulator);
- 
--      mul64_Xsig(&accumulator, &fixed_arg);
--      mul64_Xsig(&accumulator, &fixed_arg);
-+		mul64_Xsig(&accumulator, &fixed_arg);
-+		mul64_Xsig(&accumulator, &fixed_arg);
- 
--      shr_Xsig(&accumulator, 3);
--      negate_Xsig(&accumulator);
-+		shr_Xsig(&accumulator, 3);
-+		negate_Xsig(&accumulator);
- 
--      add_Xsig_Xsig(&accumulator, &argSqrd);
-+		add_Xsig_Xsig(&accumulator, &argSqrd);
- 
--      shr_Xsig(&accumulator, 1);
-+		shr_Xsig(&accumulator, 1);
- 
--      accumulator.lsw |= 1;  /* A zero accumulator here would cause problems */
--      negate_Xsig(&accumulator);
-+		accumulator.lsw |= 1;	/* A zero accumulator here would cause problems */
-+		negate_Xsig(&accumulator);
- 
--      /* The basic computation is complete. Now fix the answer to
--	 compensate for the error due to the approximation used for
--	 pi/2
--	 */
-+		/* The basic computation is complete. Now fix the answer to
-+		   compensate for the error due to the approximation used for
-+		   pi/2
-+		 */
- 
--      /* This has an exponent of -65 */
--      fix_up = 0x898cc517;
--      /* The fix-up needs to be improved for larger args */
--      if ( argSqrd.msw & 0xffc00000 )
--	{
--	  /* Get about 32 bit precision in these: */
--	  fix_up -= mul_32_32(0x898cc517, argSqrd.msw) / 6;
--	}
--      fix_up = mul_32_32(fix_up, LL_MSW(fixed_arg));
-+		/* This has an exponent of -65 */
-+		fix_up = 0x898cc517;
-+		/* The fix-up needs to be improved for larger args */
-+		if (argSqrd.msw & 0xffc00000) {
-+			/* Get about 32 bit precision in these: */
-+			fix_up -= mul_32_32(0x898cc517, argSqrd.msw) / 6;
-+		}
-+		fix_up = mul_32_32(fix_up, LL_MSW(fixed_arg));
- 
--      adj = accumulator.lsw;    /* temp save */
--      accumulator.lsw -= fix_up;
--      if ( accumulator.lsw > adj )
--	XSIG_LL(accumulator) --;
-+		adj = accumulator.lsw;	/* temp save */
-+		accumulator.lsw -= fix_up;
-+		if (accumulator.lsw > adj)
-+			XSIG_LL(accumulator)--;
- 
--      echange = round_Xsig(&accumulator);
-+		echange = round_Xsig(&accumulator);
- 
--      setexponentpos(&result, echange - 1);
--    }
-+		setexponentpos(&result, echange - 1);
++	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
++	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
++
++	q->sg_reserved_size = INT_MAX;
++
++	/*
++	 * all done
++	 */
++	if (!elevator_init(q, NULL)) {
++		blk_queue_congestion_threshold(q);
++		return q;
 +	}
- 
--  significand(&result) = XSIG_LL(accumulator);
--  setsign(&result, getsign(st0_ptr));
--  FPU_copy_to_reg0(&result, TAG_Valid);
-+	significand(&result) = XSIG_LL(accumulator);
-+	setsign(&result, getsign(st0_ptr));
-+	FPU_copy_to_reg0(&result, TAG_Valid);
- 
- #ifdef PARANOID
--  if ( (exponent(&result) >= 0)
--      && (significand(&result) > 0x8000000000000000LL) )
--    {
--      EXCEPTION(EX_INTERNAL|0x150);
--    }
-+	if ((exponent(&result) >= 0)
-+	    && (significand(&result) > 0x8000000000000000LL)) {
-+		EXCEPTION(EX_INTERNAL | 0x150);
++
++	blk_put_queue(q);
++	return NULL;
++}
++EXPORT_SYMBOL(blk_init_queue_node);
++
++int blk_get_queue(struct request_queue *q)
++{
++	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
++		kobject_get(&q->kobj);
++		return 0;
 +	}
- #endif /* PARANOID */
- 
- }
- 
--
--
- /*--- poly_cos() ------------------------------------------------------------+
-  |                                                                           |
-  +---------------------------------------------------------------------------*/
--void	poly_cos(FPU_REG *st0_ptr)
-+void poly_cos(FPU_REG *st0_ptr)
- {
--  FPU_REG	      result;
--  long int            exponent, exp2, echange;
--  Xsig                accumulator, argSqrd, fix_up, argTo4;
--  unsigned long long  fixed_arg;
-+	FPU_REG result;
-+	long int exponent, exp2, echange;
-+	Xsig accumulator, argSqrd, fix_up, argTo4;
-+	unsigned long long fixed_arg;
- 
- #ifdef PARANOID
--  if ( (exponent(st0_ptr) > 0)
--      || ((exponent(st0_ptr) == 0)
--	  && (significand(st0_ptr) > 0xc90fdaa22168c234LL)) )
--    {
--      EXCEPTION(EX_Invalid);
--      FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
--      return;
--    }
--#endif /* PARANOID */
--
--  exponent = exponent(st0_ptr);
--
--  accumulator.lsw = accumulator.midw = accumulator.msw = 0;
--
--  if ( (exponent < -1) || ((exponent == -1) && (st0_ptr->sigh <= 0xb00d6f54)) )
--    {
--      /* arg is < 0.687705 */
--
--      argSqrd.msw = st0_ptr->sigh; argSqrd.midw = st0_ptr->sigl;
--      argSqrd.lsw = 0;
--      mul64_Xsig(&argSqrd, &significand(st0_ptr));
--
--      if ( exponent < -1 )
--	{
--	  /* shift the argument right by the required places */
--	  shr_Xsig(&argSqrd, 2*(-1-exponent));
-+	if ((exponent(st0_ptr) > 0)
-+	    || ((exponent(st0_ptr) == 0)
-+		&& (significand(st0_ptr) > 0xc90fdaa22168c234LL))) {
-+		EXCEPTION(EX_Invalid);
-+		FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
-+		return;
- 	}
-+#endif /* PARANOID */
- 
--      argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw;
--      argTo4.lsw = argSqrd.lsw;
--      mul_Xsig_Xsig(&argTo4, &argTo4);
--
--      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
--		      N_COEFF_NH-1);
--      mul_Xsig_Xsig(&accumulator, &argSqrd);
--      negate_Xsig(&accumulator);
--
--      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
--		      N_COEFF_PH-1);
--      negate_Xsig(&accumulator);
--
--      mul64_Xsig(&accumulator, &significand(st0_ptr));
--      mul64_Xsig(&accumulator, &significand(st0_ptr));
--      shr_Xsig(&accumulator, -2*(1+exponent));
--
--      shr_Xsig(&accumulator, 3);
--      negate_Xsig(&accumulator);
--
--      add_Xsig_Xsig(&accumulator, &argSqrd);
--
--      shr_Xsig(&accumulator, 1);
--
--      /* It doesn't matter if accumulator is all zero here, the
--	 following code will work ok */
--      negate_Xsig(&accumulator);
--
--      if ( accumulator.lsw & 0x80000000 )
--	XSIG_LL(accumulator) ++;
--      if ( accumulator.msw == 0 )
--	{
--	  /* The result is 1.0 */
--	  FPU_copy_to_reg0(&CONST_1, TAG_Valid);
--	  return;
--	}
--      else
--	{
--	  significand(&result) = XSIG_LL(accumulator);
--      
--	  /* will be a valid positive nr with expon = -1 */
--	  setexponentpos(&result, -1);
--	}
--    }
--  else
--    {
--      fixed_arg = significand(st0_ptr);
--
--      if ( exponent == 0 )
--	{
--	  /* The argument is >= 1.0 */
--
--	  /* Put the binary point at the left. */
--	  fixed_arg <<= 1;
--	}
--      /* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
--      fixed_arg = 0x921fb54442d18469LL - fixed_arg;
--      /* There is a special case which arises due to rounding, to fix here. */
--      if ( fixed_arg == 0xffffffffffffffffLL )
--	fixed_arg = 0;
--
--      exponent = -1;
--      exp2 = -1;
--
--      /* A shift is needed here only for a narrow range of arguments,
--	 i.e. for fixed_arg approx 2^-32, but we pick up more... */
--      if ( !(LL_MSW(fixed_arg) & 0xffff0000) )
--	{
--	  fixed_arg <<= 16;
--	  exponent -= 16;
--	  exp2 -= 16;
--	}
--
--      XSIG_LL(argSqrd) = fixed_arg; argSqrd.lsw = 0;
--      mul64_Xsig(&argSqrd, &fixed_arg);
--
--      if ( exponent < -1 )
--	{
--	  /* shift the argument right by the required places */
--	  shr_Xsig(&argSqrd, 2*(-1-exponent));
--	}
--
--      argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw;
--      argTo4.lsw = argSqrd.lsw;
--      mul_Xsig_Xsig(&argTo4, &argTo4);
--
--      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
--		      N_COEFF_N-1);
--      mul_Xsig_Xsig(&accumulator, &argSqrd);
--      negate_Xsig(&accumulator);
--
--      polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
--		      N_COEFF_P-1);
--
--      shr_Xsig(&accumulator, 2);    /* Divide by four */
--      accumulator.msw |= 0x80000000;  /* Add 1.0 */
--
--      mul64_Xsig(&accumulator, &fixed_arg);
--      mul64_Xsig(&accumulator, &fixed_arg);
--      mul64_Xsig(&accumulator, &fixed_arg);
--
--      /* Divide by four, FPU_REG compatible, etc */
--      exponent = 3*exponent;
--
--      /* The minimum exponent difference is 3 */
--      shr_Xsig(&accumulator, exp2 - exponent);
--
--      negate_Xsig(&accumulator);
--      XSIG_LL(accumulator) += fixed_arg;
--
--      /* The basic computation is complete. Now fix the answer to
--	 compensate for the error due to the approximation used for
--	 pi/2
--	 */
--
--      /* This has an exponent of -65 */
--      XSIG_LL(fix_up) = 0x898cc51701b839a2ll;
--      fix_up.lsw = 0;
--
--      /* The fix-up needs to be improved for larger args */
--      if ( argSqrd.msw & 0xffc00000 )
--	{
--	  /* Get about 32 bit precision in these: */
--	  fix_up.msw -= mul_32_32(0x898cc517, argSqrd.msw) / 2;
--	  fix_up.msw += mul_32_32(0x898cc517, argTo4.msw) / 24;
-+	exponent = exponent(st0_ptr);
 +
-+	accumulator.lsw = accumulator.midw = accumulator.msw = 0;
++	return 1;
++}
 +
-+	if ((exponent < -1)
-+	    || ((exponent == -1) && (st0_ptr->sigh <= 0xb00d6f54))) {
-+		/* arg is < 0.687705 */
++EXPORT_SYMBOL(blk_get_queue);
 +
-+		argSqrd.msw = st0_ptr->sigh;
-+		argSqrd.midw = st0_ptr->sigl;
-+		argSqrd.lsw = 0;
-+		mul64_Xsig(&argSqrd, &significand(st0_ptr));
++static inline void blk_free_request(struct request_queue *q, struct request *rq)
++{
++	if (rq->cmd_flags & REQ_ELVPRIV)
++		elv_put_request(q, rq);
++	mempool_free(rq, q->rq.rq_pool);
++}
 +
-+		if (exponent < -1) {
-+			/* shift the argument right by the required places */
-+			shr_Xsig(&argSqrd, 2 * (-1 - exponent));
++static struct request *
++blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
++{
++	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
++
++	if (!rq)
++		return NULL;
++
++	/*
++	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
++	 * see bio.h and blkdev.h
++	 */
++	rq->cmd_flags = rw | REQ_ALLOCED;
++
++	if (priv) {
++		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
++			mempool_free(rq, q->rq.rq_pool);
++			return NULL;
 +		}
++		rq->cmd_flags |= REQ_ELVPRIV;
++	}
 +
-+		argTo4.msw = argSqrd.msw;
-+		argTo4.midw = argSqrd.midw;
-+		argTo4.lsw = argSqrd.lsw;
-+		mul_Xsig_Xsig(&argTo4, &argTo4);
++	return rq;
++}
 +
-+		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
-+				N_COEFF_NH - 1);
-+		mul_Xsig_Xsig(&accumulator, &argSqrd);
-+		negate_Xsig(&accumulator);
++/*
++ * ioc_batching returns true if the ioc is a valid batching request and
++ * should be given priority access to a request.
++ */
++static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
++{
++	if (!ioc)
++		return 0;
 +
-+		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
-+				N_COEFF_PH - 1);
-+		negate_Xsig(&accumulator);
++	/*
++	 * Make sure the process is able to allocate at least 1 request
++	 * even if the batch times out, otherwise we could theoretically
++	 * lose wakeups.
++	 */
++	return ioc->nr_batch_requests == q->nr_batching ||
++		(ioc->nr_batch_requests > 0
++		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
++}
 +
-+		mul64_Xsig(&accumulator, &significand(st0_ptr));
-+		mul64_Xsig(&accumulator, &significand(st0_ptr));
-+		shr_Xsig(&accumulator, -2 * (1 + exponent));
++/*
++ * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
++ * will cause the process to be a "batcher" on all queues in the system. This
++ * is the behaviour we want though - once it gets a wakeup it should be given
++ * a nice run.
++ */
++static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
++{
++	if (!ioc || ioc_batching(q, ioc))
++		return;
 +
-+		shr_Xsig(&accumulator, 3);
-+		negate_Xsig(&accumulator);
++	ioc->nr_batch_requests = q->nr_batching;
++	ioc->last_waited = jiffies;
++}
 +
-+		add_Xsig_Xsig(&accumulator, &argSqrd);
++static void __freed_request(struct request_queue *q, int rw)
++{
++	struct request_list *rl = &q->rq;
 +
-+		shr_Xsig(&accumulator, 1);
++	if (rl->count[rw] < queue_congestion_off_threshold(q))
++		blk_clear_queue_congested(q, rw);
 +
-+		/* It doesn't matter if accumulator is all zero here, the
-+		   following code will work ok */
-+		negate_Xsig(&accumulator);
++	if (rl->count[rw] + 1 <= q->nr_requests) {
++		if (waitqueue_active(&rl->wait[rw]))
++			wake_up(&rl->wait[rw]);
 +
-+		if (accumulator.lsw & 0x80000000)
-+			XSIG_LL(accumulator)++;
-+		if (accumulator.msw == 0) {
-+			/* The result is 1.0 */
-+			FPU_copy_to_reg0(&CONST_1, TAG_Valid);
-+			return;
-+		} else {
-+			significand(&result) = XSIG_LL(accumulator);
++		blk_clear_queue_full(q, rw);
++	}
++}
 +
-+			/* will be a valid positive nr with expon = -1 */
-+			setexponentpos(&result, -1);
-+		}
-+	} else {
-+		fixed_arg = significand(st0_ptr);
++/*
++ * A request has just been released.  Account for it, update the full and
++ * congestion status, wake up any waiters.   Called under q->queue_lock.
++ */
++static void freed_request(struct request_queue *q, int rw, int priv)
++{
++	struct request_list *rl = &q->rq;
 +
-+		if (exponent == 0) {
-+			/* The argument is >= 1.0 */
++	rl->count[rw]--;
++	if (priv)
++		rl->elvpriv--;
 +
-+			/* Put the binary point at the left. */
-+			fixed_arg <<= 1;
-+		}
-+		/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
-+		fixed_arg = 0x921fb54442d18469LL - fixed_arg;
-+		/* There is a special case which arises due to rounding, to fix here. */
-+		if (fixed_arg == 0xffffffffffffffffLL)
-+			fixed_arg = 0;
++	__freed_request(q, rw);
 +
-+		exponent = -1;
-+		exp2 = -1;
++	if (unlikely(rl->starved[rw ^ 1]))
++		__freed_request(q, rw ^ 1);
++}
 +
-+		/* A shift is needed here only for a narrow range of arguments,
-+		   i.e. for fixed_arg approx 2^-32, but we pick up more... */
-+		if (!(LL_MSW(fixed_arg) & 0xffff0000)) {
-+			fixed_arg <<= 16;
-+			exponent -= 16;
-+			exp2 -= 16;
-+		}
++#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
++/*
++ * Get a free request, queue_lock must be held.
++ * Returns NULL on failure, with queue_lock held.
++ * Returns !NULL on success, with queue_lock *not held*.
++ */
++static struct request *get_request(struct request_queue *q, int rw_flags,
++				   struct bio *bio, gfp_t gfp_mask)
++{
++	struct request *rq = NULL;
++	struct request_list *rl = &q->rq;
++	struct io_context *ioc = NULL;
++	const int rw = rw_flags & 0x01;
++	int may_queue, priv;
 +
-+		XSIG_LL(argSqrd) = fixed_arg;
-+		argSqrd.lsw = 0;
-+		mul64_Xsig(&argSqrd, &fixed_arg);
++	may_queue = elv_may_queue(q, rw_flags);
++	if (may_queue == ELV_MQUEUE_NO)
++		goto rq_starved;
 +
-+		if (exponent < -1) {
-+			/* shift the argument right by the required places */
-+			shr_Xsig(&argSqrd, 2 * (-1 - exponent));
++	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
++		if (rl->count[rw]+1 >= q->nr_requests) {
++			ioc = current_io_context(GFP_ATOMIC, q->node);
++			/*
++			 * The queue will fill after this allocation, so set
++			 * it as full, and mark this process as "batching".
++			 * This process will be allowed to complete a batch of
++			 * requests, others will be blocked.
++			 */
++			if (!blk_queue_full(q, rw)) {
++				ioc_set_batching(q, ioc);
++				blk_set_queue_full(q, rw);
++			} else {
++				if (may_queue != ELV_MQUEUE_MUST
++						&& !ioc_batching(q, ioc)) {
++					/*
++					 * The queue is full and the allocating
++					 * process is not a "batcher", and not
++					 * exempted by the IO scheduler
++					 */
++					goto out;
++				}
++			}
 +		}
++		blk_set_queue_congested(q, rw);
++	}
 +
-+		argTo4.msw = argSqrd.msw;
-+		argTo4.midw = argSqrd.midw;
-+		argTo4.lsw = argSqrd.lsw;
-+		mul_Xsig_Xsig(&argTo4, &argTo4);
++	/*
++	 * Only allow batching queuers to allocate up to 50% over the defined
++	 * limit of requests, otherwise we could have thousands of requests
++	 * allocated with any setting of ->nr_requests
++	 */
++	if (rl->count[rw] >= (3 * q->nr_requests / 2))
++		goto out;
 +
-+		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
-+				N_COEFF_N - 1);
-+		mul_Xsig_Xsig(&accumulator, &argSqrd);
-+		negate_Xsig(&accumulator);
++	rl->count[rw]++;
++	rl->starved[rw] = 0;
 +
-+		polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
-+				N_COEFF_P - 1);
++	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
++	if (priv)
++		rl->elvpriv++;
 +
-+		shr_Xsig(&accumulator, 2);	/* Divide by four */
-+		accumulator.msw |= 0x80000000;	/* Add 1.0 */
++	spin_unlock_irq(q->queue_lock);
 +
-+		mul64_Xsig(&accumulator, &fixed_arg);
-+		mul64_Xsig(&accumulator, &fixed_arg);
-+		mul64_Xsig(&accumulator, &fixed_arg);
++	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
++	if (unlikely(!rq)) {
++		/*
++		 * Allocation failed presumably due to memory. Undo anything
++		 * we might have messed up.
++		 *
++		 * Allocating task should really be put onto the front of the
++		 * wait queue, but this is pretty rare.
++		 */
++		spin_lock_irq(q->queue_lock);
++		freed_request(q, rw, priv);
 +
-+		/* Divide by four, FPU_REG compatible, etc */
-+		exponent = 3 * exponent;
++		/*
++		 * in the very unlikely event that allocation failed and no
++		 * requests for this direction was pending, mark us starved
++		 * so that freeing of a request in the other direction will
++		 * notice us. another possible fix would be to split the
++		 * rq mempool into READ and WRITE
++		 */
++rq_starved:
++		if (unlikely(rl->count[rw] == 0))
++			rl->starved[rw] = 1;
 +
-+		/* The minimum exponent difference is 3 */
-+		shr_Xsig(&accumulator, exp2 - exponent);
++		goto out;
++	}
 +
-+		negate_Xsig(&accumulator);
-+		XSIG_LL(accumulator) += fixed_arg;
++	/*
++	 * ioc may be NULL here, and ioc_batching will be false. That's
++	 * OK, if the queue is under the request limit then requests need
++	 * not count toward the nr_batch_requests limit. There will always
++	 * be some limit enforced by BLK_BATCH_TIME.
++	 */
++	if (ioc_batching(q, ioc))
++		ioc->nr_batch_requests--;
++	
++	rq_init(q, rq);
 +
-+		/* The basic computation is complete. Now fix the answer to
-+		   compensate for the error due to the approximation used for
-+		   pi/2
-+		 */
++	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
++out:
++	return rq;
++}
 +
-+		/* This has an exponent of -65 */
-+		XSIG_LL(fix_up) = 0x898cc51701b839a2ll;
-+		fix_up.lsw = 0;
++/*
++ * No available requests for this queue, unplug the device and wait for some
++ * requests to become available.
++ *
++ * Called with q->queue_lock held, and returns with it unlocked.
++ */
++static struct request *get_request_wait(struct request_queue *q, int rw_flags,
++					struct bio *bio)
++{
++	const int rw = rw_flags & 0x01;
++	struct request *rq;
 +
-+		/* The fix-up needs to be improved for larger args */
-+		if (argSqrd.msw & 0xffc00000) {
-+			/* Get about 32 bit precision in these: */
-+			fix_up.msw -= mul_32_32(0x898cc517, argSqrd.msw) / 2;
-+			fix_up.msw += mul_32_32(0x898cc517, argTo4.msw) / 24;
-+		}
++	rq = get_request(q, rw_flags, bio, GFP_NOIO);
++	while (!rq) {
++		DEFINE_WAIT(wait);
++		struct request_list *rl = &q->rq;
 +
-+		exp2 += norm_Xsig(&accumulator);
-+		shr_Xsig(&accumulator, 1);	/* Prevent overflow */
-+		exp2++;
-+		shr_Xsig(&fix_up, 65 + exp2);
++		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
++				TASK_UNINTERRUPTIBLE);
 +
-+		add_Xsig_Xsig(&accumulator, &fix_up);
++		rq = get_request(q, rw_flags, bio, GFP_NOIO);
 +
-+		echange = round_Xsig(&accumulator);
++		if (!rq) {
++			struct io_context *ioc;
 +
-+		setexponentpos(&result, exp2 + echange);
-+		significand(&result) = XSIG_LL(accumulator);
- 	}
- 
--      exp2 += norm_Xsig(&accumulator);
--      shr_Xsig(&accumulator, 1); /* Prevent overflow */
--      exp2++;
--      shr_Xsig(&fix_up, 65 + exp2);
--
--      add_Xsig_Xsig(&accumulator, &fix_up);
--
--      echange = round_Xsig(&accumulator);
--
--      setexponentpos(&result, exp2 + echange);
--      significand(&result) = XSIG_LL(accumulator);
--    }
--
--  FPU_copy_to_reg0(&result, TAG_Valid);
-+	FPU_copy_to_reg0(&result, TAG_Valid);
- 
- #ifdef PARANOID
--  if ( (exponent(&result) >= 0)
--      && (significand(&result) > 0x8000000000000000LL) )
--    {
--      EXCEPTION(EX_INTERNAL|0x151);
--    }
-+	if ((exponent(&result) >= 0)
-+	    && (significand(&result) > 0x8000000000000000LL)) {
-+		EXCEPTION(EX_INTERNAL | 0x151);
-+	}
- #endif /* PARANOID */
- 
- }
-diff --git a/arch/x86/math-emu/poly_tan.c b/arch/x86/math-emu/poly_tan.c
-index 8df3e03..1875763 100644
---- a/arch/x86/math-emu/poly_tan.c
-+++ b/arch/x86/math-emu/poly_tan.c
-@@ -17,206 +17,196 @@
- #include "control_w.h"
- #include "poly.h"
- 
--
- #define	HiPOWERop	3	/* odd poly, positive terms */
--static const unsigned long long oddplterm[HiPOWERop] =
--{
--  0x0000000000000000LL,
--  0x0051a1cf08fca228LL,
--  0x0000000071284ff7LL
-+static const unsigned long long oddplterm[HiPOWERop] = {
-+	0x0000000000000000LL,
-+	0x0051a1cf08fca228LL,
-+	0x0000000071284ff7LL
- };
- 
- #define	HiPOWERon	2	/* odd poly, negative terms */
--static const unsigned long long oddnegterm[HiPOWERon] =
--{
--   0x1291a9a184244e80LL,
--   0x0000583245819c21LL
-+static const unsigned long long oddnegterm[HiPOWERon] = {
-+	0x1291a9a184244e80LL,
-+	0x0000583245819c21LL
- };
- 
- #define	HiPOWERep	2	/* even poly, positive terms */
--static const unsigned long long evenplterm[HiPOWERep] =
--{
--  0x0e848884b539e888LL,
--  0x00003c7f18b887daLL
-+static const unsigned long long evenplterm[HiPOWERep] = {
-+	0x0e848884b539e888LL,
-+	0x00003c7f18b887daLL
- };
- 
- #define	HiPOWERen	2	/* even poly, negative terms */
--static const unsigned long long evennegterm[HiPOWERen] =
--{
--  0xf1f0200fd51569ccLL,
--  0x003afb46105c4432LL
-+static const unsigned long long evennegterm[HiPOWERen] = {
-+	0xf1f0200fd51569ccLL,
-+	0x003afb46105c4432LL
- };
- 
- static const unsigned long long twothirds = 0xaaaaaaaaaaaaaaabLL;
- 
--
- /*--- poly_tan() ------------------------------------------------------------+
-  |                                                                           |
-  +---------------------------------------------------------------------------*/
--void	poly_tan(FPU_REG *st0_ptr)
-+void poly_tan(FPU_REG *st0_ptr)
- {
--  long int    		exponent;
--  int                   invert;
--  Xsig                  argSq, argSqSq, accumulatoro, accumulatore, accum,
--                        argSignif, fix_up;
--  unsigned long         adj;
-+	long int exponent;
-+	int invert;
-+	Xsig argSq, argSqSq, accumulatoro, accumulatore, accum,
-+	    argSignif, fix_up;
-+	unsigned long adj;
- 
--  exponent = exponent(st0_ptr);
-+	exponent = exponent(st0_ptr);
- 
- #ifdef PARANOID
--  if ( signnegative(st0_ptr) )	/* Can't hack a number < 0.0 */
--    { arith_invalid(0); return; }  /* Need a positive number */
-+	if (signnegative(st0_ptr)) {	/* Can't hack a number < 0.0 */
-+		arith_invalid(0);
-+		return;
-+	}			/* Need a positive number */
- #endif /* PARANOID */
- 
--  /* Split the problem into two domains, smaller and larger than pi/4 */
--  if ( (exponent == 0) || ((exponent == -1) && (st0_ptr->sigh > 0xc90fdaa2)) )
--    {
--      /* The argument is greater than (approx) pi/4 */
--      invert = 1;
--      accum.lsw = 0;
--      XSIG_LL(accum) = significand(st0_ptr);
-- 
--      if ( exponent == 0 )
--	{
--	  /* The argument is >= 1.0 */
--	  /* Put the binary point at the left. */
--	  XSIG_LL(accum) <<= 1;
--	}
--      /* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
--      XSIG_LL(accum) = 0x921fb54442d18469LL - XSIG_LL(accum);
--      /* This is a special case which arises due to rounding. */
--      if ( XSIG_LL(accum) == 0xffffffffffffffffLL )
--	{
--	  FPU_settag0(TAG_Valid);
--	  significand(st0_ptr) = 0x8a51e04daabda360LL;
--	  setexponent16(st0_ptr, (0x41 + EXTENDED_Ebias) | SIGN_Negative);
--	  return;
-+	/* Split the problem into two domains, smaller and larger than pi/4 */
-+	if ((exponent == 0)
-+	    || ((exponent == -1) && (st0_ptr->sigh > 0xc90fdaa2))) {
-+		/* The argument is greater than (approx) pi/4 */
-+		invert = 1;
-+		accum.lsw = 0;
-+		XSIG_LL(accum) = significand(st0_ptr);
++			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
 +
-+		if (exponent == 0) {
-+			/* The argument is >= 1.0 */
-+			/* Put the binary point at the left. */
-+			XSIG_LL(accum) <<= 1;
-+		}
-+		/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
-+		XSIG_LL(accum) = 0x921fb54442d18469LL - XSIG_LL(accum);
-+		/* This is a special case which arises due to rounding. */
-+		if (XSIG_LL(accum) == 0xffffffffffffffffLL) {
-+			FPU_settag0(TAG_Valid);
-+			significand(st0_ptr) = 0x8a51e04daabda360LL;
-+			setexponent16(st0_ptr,
-+				      (0x41 + EXTENDED_Ebias) | SIGN_Negative);
-+			return;
-+		}
++			__generic_unplug_device(q);
++			spin_unlock_irq(q->queue_lock);
++			io_schedule();
 +
-+		argSignif.lsw = accum.lsw;
-+		XSIG_LL(argSignif) = XSIG_LL(accum);
-+		exponent = -1 + norm_Xsig(&argSignif);
-+	} else {
-+		invert = 0;
-+		argSignif.lsw = 0;
-+		XSIG_LL(accum) = XSIG_LL(argSignif) = significand(st0_ptr);
++			/*
++			 * After sleeping, we become a "batching" process and
++			 * will be able to allocate at least one request, and
++			 * up to a big batch of them for a small period time.
++			 * See ioc_batching, ioc_set_batching
++			 */
++			ioc = current_io_context(GFP_NOIO, q->node);
++			ioc_set_batching(q, ioc);
 +
-+		if (exponent < -1) {
-+			/* shift the argument right by the required places */
-+			if (FPU_shrx(&XSIG_LL(accum), -1 - exponent) >=
-+			    0x80000000U)
-+				XSIG_LL(accum)++;	/* round up */
++			spin_lock_irq(q->queue_lock);
 +		}
- 	}
- 
--      argSignif.lsw = accum.lsw;
--      XSIG_LL(argSignif) = XSIG_LL(accum);
--      exponent = -1 + norm_Xsig(&argSignif);
--    }
--  else
--    {
--      invert = 0;
--      argSignif.lsw = 0;
--      XSIG_LL(accum) = XSIG_LL(argSignif) = significand(st0_ptr);
-- 
--      if ( exponent < -1 )
--	{
--	  /* shift the argument right by the required places */
--	  if ( FPU_shrx(&XSIG_LL(accum), -1-exponent) >= 0x80000000U )
--	    XSIG_LL(accum) ++;	/* round up */
--	}
--    }
--
--  XSIG_LL(argSq) = XSIG_LL(accum); argSq.lsw = accum.lsw;
--  mul_Xsig_Xsig(&argSq, &argSq);
--  XSIG_LL(argSqSq) = XSIG_LL(argSq); argSqSq.lsw = argSq.lsw;
--  mul_Xsig_Xsig(&argSqSq, &argSqSq);
--
--  /* Compute the negative terms for the numerator polynomial */
--  accumulatoro.msw = accumulatoro.midw = accumulatoro.lsw = 0;
--  polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddnegterm, HiPOWERon-1);
--  mul_Xsig_Xsig(&accumulatoro, &argSq);
--  negate_Xsig(&accumulatoro);
--  /* Add the positive terms */
--  polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddplterm, HiPOWERop-1);
--
--  
--  /* Compute the positive terms for the denominator polynomial */
--  accumulatore.msw = accumulatore.midw = accumulatore.lsw = 0;
--  polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evenplterm, HiPOWERep-1);
--  mul_Xsig_Xsig(&accumulatore, &argSq);
--  negate_Xsig(&accumulatore);
--  /* Add the negative terms */
--  polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evennegterm, HiPOWERen-1);
--  /* Multiply by arg^2 */
--  mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
--  mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
--  /* de-normalize and divide by 2 */
--  shr_Xsig(&accumulatore, -2*(1+exponent) + 1);
--  negate_Xsig(&accumulatore);      /* This does 1 - accumulator */
--
--  /* Now find the ratio. */
--  if ( accumulatore.msw == 0 )
--    {
--      /* accumulatoro must contain 1.0 here, (actually, 0) but it
--	 really doesn't matter what value we use because it will
--	 have negligible effect in later calculations
--	 */
--      XSIG_LL(accum) = 0x8000000000000000LL;
--      accum.lsw = 0;
--    }
--  else
--    {
--      div_Xsig(&accumulatoro, &accumulatore, &accum);
--    }
--
--  /* Multiply by 1/3 * arg^3 */
--  mul64_Xsig(&accum, &XSIG_LL(argSignif));
--  mul64_Xsig(&accum, &XSIG_LL(argSignif));
--  mul64_Xsig(&accum, &XSIG_LL(argSignif));
--  mul64_Xsig(&accum, &twothirds);
--  shr_Xsig(&accum, -2*(exponent+1));
--
--  /* tan(arg) = arg + accum */
--  add_two_Xsig(&accum, &argSignif, &exponent);
--
--  if ( invert )
--    {
--      /* We now have the value of tan(pi_2 - arg) where pi_2 is an
--	 approximation for pi/2
--	 */
--      /* The next step is to fix the answer to compensate for the
--	 error due to the approximation used for pi/2
--	 */
--
--      /* This is (approx) delta, the error in our approx for pi/2
--	 (see above). It has an exponent of -65
--	 */
--      XSIG_LL(fix_up) = 0x898cc51701b839a2LL;
--      fix_up.lsw = 0;
--
--      if ( exponent == 0 )
--	adj = 0xffffffff;   /* We want approx 1.0 here, but
--			       this is close enough. */
--      else if ( exponent > -30 )
--	{
--	  adj = accum.msw >> -(exponent+1);      /* tan */
--	  adj = mul_32_32(adj, adj);             /* tan^2 */
-+	XSIG_LL(argSq) = XSIG_LL(accum);
-+	argSq.lsw = accum.lsw;
-+	mul_Xsig_Xsig(&argSq, &argSq);
-+	XSIG_LL(argSqSq) = XSIG_LL(argSq);
-+	argSqSq.lsw = argSq.lsw;
-+	mul_Xsig_Xsig(&argSqSq, &argSqSq);
++		finish_wait(&rl->wait[rw], &wait);
++	}
 +
-+	/* Compute the negative terms for the numerator polynomial */
-+	accumulatoro.msw = accumulatoro.midw = accumulatoro.lsw = 0;
-+	polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddnegterm,
-+			HiPOWERon - 1);
-+	mul_Xsig_Xsig(&accumulatoro, &argSq);
-+	negate_Xsig(&accumulatoro);
-+	/* Add the positive terms */
-+	polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddplterm,
-+			HiPOWERop - 1);
++	return rq;
++}
 +
-+	/* Compute the positive terms for the denominator polynomial */
-+	accumulatore.msw = accumulatore.midw = accumulatore.lsw = 0;
-+	polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evenplterm,
-+			HiPOWERep - 1);
-+	mul_Xsig_Xsig(&accumulatore, &argSq);
-+	negate_Xsig(&accumulatore);
-+	/* Add the negative terms */
-+	polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evennegterm,
-+			HiPOWERen - 1);
-+	/* Multiply by arg^2 */
-+	mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
-+	mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
-+	/* de-normalize and divide by 2 */
-+	shr_Xsig(&accumulatore, -2 * (1 + exponent) + 1);
-+	negate_Xsig(&accumulatore);	/* This does 1 - accumulator */
++struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
++{
++	struct request *rq;
 +
-+	/* Now find the ratio. */
-+	if (accumulatore.msw == 0) {
-+		/* accumulatoro must contain 1.0 here, (actually, 0) but it
-+		   really doesn't matter what value we use because it will
-+		   have negligible effect in later calculations
-+		 */
-+		XSIG_LL(accum) = 0x8000000000000000LL;
-+		accum.lsw = 0;
-+	} else {
-+		div_Xsig(&accumulatoro, &accumulatore, &accum);
- 	}
--      else
--	adj = 0;
--      adj = mul_32_32(0x898cc517, adj);          /* delta * tan^2 */
--
--      fix_up.msw += adj;
--      if ( !(fix_up.msw & 0x80000000) )   /* did fix_up overflow ? */
--	{
--	  /* Yes, we need to add an msb */
--	  shr_Xsig(&fix_up, 1);
--	  fix_up.msw |= 0x80000000;
--	  shr_Xsig(&fix_up, 64 + exponent);
++	BUG_ON(rw != READ && rw != WRITE);
 +
-+	/* Multiply by 1/3 * arg^3 */
-+	mul64_Xsig(&accum, &XSIG_LL(argSignif));
-+	mul64_Xsig(&accum, &XSIG_LL(argSignif));
-+	mul64_Xsig(&accum, &XSIG_LL(argSignif));
-+	mul64_Xsig(&accum, &twothirds);
-+	shr_Xsig(&accum, -2 * (exponent + 1));
++	spin_lock_irq(q->queue_lock);
++	if (gfp_mask & __GFP_WAIT) {
++		rq = get_request_wait(q, rw, NULL);
++	} else {
++		rq = get_request(q, rw, NULL, gfp_mask);
++		if (!rq)
++			spin_unlock_irq(q->queue_lock);
++	}
++	/* q->queue_lock is unlocked at this point */
 +
-+	/* tan(arg) = arg + accum */
-+	add_two_Xsig(&accum, &argSignif, &exponent);
++	return rq;
++}
++EXPORT_SYMBOL(blk_get_request);
 +
-+	if (invert) {
-+		/* We now have the value of tan(pi_2 - arg) where pi_2 is an
-+		   approximation for pi/2
-+		 */
-+		/* The next step is to fix the answer to compensate for the
-+		   error due to the approximation used for pi/2
-+		 */
++/**
++ * blk_start_queueing - initiate dispatch of requests to device
++ * @q:		request queue to kick into gear
++ *
++ * This is basically a helper to remove the need to know whether a queue
++ * is plugged or not if someone just wants to initiate dispatch of requests
++ * for this queue.
++ *
++ * The queue lock must be held with interrupts disabled.
++ */
++void blk_start_queueing(struct request_queue *q)
++{
++	if (!blk_queue_plugged(q))
++		q->request_fn(q);
++	else
++		__generic_unplug_device(q);
++}
++EXPORT_SYMBOL(blk_start_queueing);
 +
-+		/* This is (approx) delta, the error in our approx for pi/2
-+		   (see above). It has an exponent of -65
-+		 */
-+		XSIG_LL(fix_up) = 0x898cc51701b839a2LL;
-+		fix_up.lsw = 0;
++/**
++ * blk_requeue_request - put a request back on queue
++ * @q:		request queue where request should be inserted
++ * @rq:		request to be inserted
++ *
++ * Description:
++ *    Drivers often keep queueing requests until the hardware cannot accept
++ *    more, when that condition happens we need to put the request back
++ *    on the queue. Must be called with queue lock held.
++ */
++void blk_requeue_request(struct request_queue *q, struct request *rq)
++{
++	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
 +
-+		if (exponent == 0)
-+			adj = 0xffffffff;	/* We want approx 1.0 here, but
-+						   this is close enough. */
-+		else if (exponent > -30) {
-+			adj = accum.msw >> -(exponent + 1);	/* tan */
-+			adj = mul_32_32(adj, adj);	/* tan^2 */
-+		} else
-+			adj = 0;
-+		adj = mul_32_32(0x898cc517, adj);	/* delta * tan^2 */
++	if (blk_rq_tagged(rq))
++		blk_queue_end_tag(q, rq);
 +
-+		fix_up.msw += adj;
-+		if (!(fix_up.msw & 0x80000000)) {	/* did fix_up overflow ? */
-+			/* Yes, we need to add an msb */
-+			shr_Xsig(&fix_up, 1);
-+			fix_up.msw |= 0x80000000;
-+			shr_Xsig(&fix_up, 64 + exponent);
-+		} else
-+			shr_Xsig(&fix_up, 65 + exponent);
++	elv_requeue_request(q, rq);
++}
 +
-+		add_two_Xsig(&accum, &fix_up, &exponent);
++EXPORT_SYMBOL(blk_requeue_request);
 +
-+		/* accum now contains tan(pi/2 - arg).
-+		   Use tan(arg) = 1.0 / tan(pi/2 - arg)
-+		 */
-+		accumulatoro.lsw = accumulatoro.midw = 0;
-+		accumulatoro.msw = 0x80000000;
-+		div_Xsig(&accumulatoro, &accum, &accum);
-+		exponent = -exponent - 1;
- 	}
--      else
--	shr_Xsig(&fix_up, 65 + exponent);
--
--      add_two_Xsig(&accum, &fix_up, &exponent);
--
--      /* accum now contains tan(pi/2 - arg).
--	 Use tan(arg) = 1.0 / tan(pi/2 - arg)
--	 */
--      accumulatoro.lsw = accumulatoro.midw = 0;
--      accumulatoro.msw = 0x80000000;
--      div_Xsig(&accumulatoro, &accum, &accum);
--      exponent = - exponent - 1;
--    }
--
--  /* Transfer the result */
--  round_Xsig(&accum);
--  FPU_settag0(TAG_Valid);
--  significand(st0_ptr) = XSIG_LL(accum);
--  setexponent16(st0_ptr, exponent + EXTENDED_Ebias);  /* Result is positive. */
++/**
++ * blk_insert_request - insert a special request in to a request queue
++ * @q:		request queue where request should be inserted
++ * @rq:		request to be inserted
++ * @at_head:	insert request at head or tail of queue
++ * @data:	private data
++ *
++ * Description:
++ *    Many block devices need to execute commands asynchronously, so they don't
++ *    block the whole kernel from preemption during request execution.  This is
++ *    accomplished normally by inserting aritficial requests tagged as
++ *    REQ_SPECIAL in to the corresponding request queue, and letting them be
++ *    scheduled for actual execution by the request queue.
++ *
++ *    We have the option of inserting the head or the tail of the queue.
++ *    Typically we use the tail for new ioctls and so forth.  We use the head
++ *    of the queue for things like a QUEUE_FULL message from a device, or a
++ *    host that is unable to accept a particular command.
++ */
++void blk_insert_request(struct request_queue *q, struct request *rq,
++			int at_head, void *data)
++{
++	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
++	unsigned long flags;
 +
-+	/* Transfer the result */
-+	round_Xsig(&accum);
-+	FPU_settag0(TAG_Valid);
-+	significand(st0_ptr) = XSIG_LL(accum);
-+	setexponent16(st0_ptr, exponent + EXTENDED_Ebias);	/* Result is positive. */
- 
- }
-diff --git a/arch/x86/math-emu/reg_add_sub.c b/arch/x86/math-emu/reg_add_sub.c
-index 7cd3b37..deea48b 100644
---- a/arch/x86/math-emu/reg_add_sub.c
-+++ b/arch/x86/math-emu/reg_add_sub.c
-@@ -27,7 +27,7 @@
- static
- int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa,
- 		     FPU_REG const *b, u_char tagb, u_char signb,
--		     FPU_REG *dest, int deststnr, int control_w);
-+		     FPU_REG * dest, int deststnr, int control_w);
- 
- /*
-   Operates on st(0) and st(n), or on st(0) and temporary data.
-@@ -35,340 +35,299 @@ int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa,
-   */
- int FPU_add(FPU_REG const *b, u_char tagb, int deststnr, int control_w)
- {
--  FPU_REG *a = &st(0);
--  FPU_REG *dest = &st(deststnr);
--  u_char signb = getsign(b);
--  u_char taga = FPU_gettag0();
--  u_char signa = getsign(a);
--  u_char saved_sign = getsign(dest);
--  int diff, tag, expa, expb;
--  
--  if ( !(taga | tagb) )
--    {
--      expa = exponent(a);
--      expb = exponent(b);
--
--    valid_add:
--      /* Both registers are valid */
--      if (!(signa ^ signb))
--	{
--	  /* signs are the same */
--	  tag = FPU_u_add(a, b, dest, control_w, signa, expa, expb);
--	}
--      else
--	{
--	  /* The signs are different, so do a subtraction */
--	  diff = expa - expb;
--	  if (!diff)
--	    {
--	      diff = a->sigh - b->sigh;  /* This works only if the ms bits
--					    are identical. */
--	      if (!diff)
--		{
--		  diff = a->sigl > b->sigl;
--		  if (!diff)
--		    diff = -(a->sigl < b->sigl);
-+	FPU_REG *a = &st(0);
-+	FPU_REG *dest = &st(deststnr);
-+	u_char signb = getsign(b);
-+	u_char taga = FPU_gettag0();
-+	u_char signa = getsign(a);
-+	u_char saved_sign = getsign(dest);
-+	int diff, tag, expa, expb;
++	/*
++	 * tell I/O scheduler that this isn't a regular read/write (ie it
++	 * must not attempt merges on this) and that it acts as a soft
++	 * barrier
++	 */
++	rq->cmd_type = REQ_TYPE_SPECIAL;
++	rq->cmd_flags |= REQ_SOFTBARRIER;
 +
-+	if (!(taga | tagb)) {
-+		expa = exponent(a);
-+		expb = exponent(b);
++	rq->special = data;
 +
-+	      valid_add:
-+		/* Both registers are valid */
-+		if (!(signa ^ signb)) {
-+			/* signs are the same */
-+			tag =
-+			    FPU_u_add(a, b, dest, control_w, signa, expa, expb);
-+		} else {
-+			/* The signs are different, so do a subtraction */
-+			diff = expa - expb;
-+			if (!diff) {
-+				diff = a->sigh - b->sigh;	/* This works only if the ms bits
-+								   are identical. */
-+				if (!diff) {
-+					diff = a->sigl > b->sigl;
-+					if (!diff)
-+						diff = -(a->sigl < b->sigl);
-+				}
-+			}
++	spin_lock_irqsave(q->queue_lock, flags);
 +
-+			if (diff > 0) {
-+				tag =
-+				    FPU_u_sub(a, b, dest, control_w, signa,
-+					      expa, expb);
-+			} else if (diff < 0) {
-+				tag =
-+				    FPU_u_sub(b, a, dest, control_w, signb,
-+					      expb, expa);
-+			} else {
-+				FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
-+				/* sign depends upon rounding mode */
-+				setsign(dest, ((control_w & CW_RC) != RC_DOWN)
-+					? SIGN_POS : SIGN_NEG);
-+				return TAG_Zero;
-+			}
- 		}
--	    }
--      
--	  if (diff > 0)
--	    {
--	      tag = FPU_u_sub(a, b, dest, control_w, signa, expa, expb);
--	    }
--	  else if ( diff < 0 )
--	    {
--	      tag = FPU_u_sub(b, a, dest, control_w, signb, expb, expa);
--	    }
--	  else
--	    {
--	      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
--	      /* sign depends upon rounding mode */
--	      setsign(dest, ((control_w & CW_RC) != RC_DOWN)
--		      ? SIGN_POS : SIGN_NEG);
--	      return TAG_Zero;
--	    }
--	}
- 
--      if ( tag < 0 )
--	{
--	  setsign(dest, saved_sign);
--	  return tag;
-+		if (tag < 0) {
-+			setsign(dest, saved_sign);
-+			return tag;
-+		}
-+		FPU_settagi(deststnr, tag);
-+		return tag;
- 	}
--      FPU_settagi(deststnr, tag);
--      return tag;
--    }
- 
--  if ( taga == TAG_Special )
--    taga = FPU_Special(a);
--  if ( tagb == TAG_Special )
--    tagb = FPU_Special(b);
-+	if (taga == TAG_Special)
-+		taga = FPU_Special(a);
-+	if (tagb == TAG_Special)
-+		tagb = FPU_Special(b);
- 
--  if ( ((taga == TAG_Valid) && (tagb == TW_Denormal))
-+	if (((taga == TAG_Valid) && (tagb == TW_Denormal))
- 	    || ((taga == TW_Denormal) && (tagb == TAG_Valid))
--	    || ((taga == TW_Denormal) && (tagb == TW_Denormal)) )
--    {
--      FPU_REG x, y;
-+	    || ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
-+		FPU_REG x, y;
++	/*
++	 * If command is tagged, release the tag
++	 */
++	if (blk_rq_tagged(rq))
++		blk_queue_end_tag(q, rq);
 +
-+		if (denormal_operand() < 0)
-+			return FPU_Exception;
++	drive_stat_acct(rq, 1);
++	__elv_add_request(q, rq, where, 0);
++	blk_start_queueing(q);
++	spin_unlock_irqrestore(q->queue_lock, flags);
++}
 +
-+		FPU_to_exp16(a, &x);
-+		FPU_to_exp16(b, &y);
-+		a = &x;
-+		b = &y;
-+		expa = exponent16(a);
-+		expb = exponent16(b);
-+		goto valid_add;
-+	}
- 
--      if ( denormal_operand() < 0 )
--	return FPU_Exception;
-+	if ((taga == TW_NaN) || (tagb == TW_NaN)) {
-+		if (deststnr == 0)
-+			return real_2op_NaN(b, tagb, deststnr, a);
-+		else
-+			return real_2op_NaN(a, taga, deststnr, a);
-+	}
- 
--      FPU_to_exp16(a, &x);
--      FPU_to_exp16(b, &y);
--      a = &x;
--      b = &y;
--      expa = exponent16(a);
--      expb = exponent16(b);
--      goto valid_add;
--    }
--
--  if ( (taga == TW_NaN) || (tagb == TW_NaN) )
--    {
--      if ( deststnr == 0 )
--	return real_2op_NaN(b, tagb, deststnr, a);
--      else
--	return real_2op_NaN(a, taga, deststnr, a);
--    }
--
--  return add_sub_specials(a, taga, signa, b, tagb, signb,
--			  dest, deststnr, control_w);
-+	return add_sub_specials(a, taga, signa, b, tagb, signb,
-+				dest, deststnr, control_w);
- }
- 
--
- /* Subtract b from a.  (a-b) -> dest */
- int FPU_sub(int flags, int rm, int control_w)
- {
--  FPU_REG const *a, *b;
--  FPU_REG *dest;
--  u_char taga, tagb, signa, signb, saved_sign, sign;
--  int diff, tag = 0, expa, expb, deststnr;
--
--  a = &st(0);
--  taga = FPU_gettag0();
--
--  deststnr = 0;
--  if ( flags & LOADED )
--    {
--      b = (FPU_REG *)rm;
--      tagb = flags & 0x0f;
--    }
--  else
--    {
--      b = &st(rm);
--      tagb = FPU_gettagi(rm);
--
--      if ( flags & DEST_RM )
--	deststnr = rm;
--    }
--
--  signa = getsign(a);
--  signb = getsign(b);
--
--  if ( flags & REV )
--    {
--      signa ^= SIGN_NEG;
--      signb ^= SIGN_NEG;
--    }
--
--  dest = &st(deststnr);
--  saved_sign = getsign(dest);
--
--  if ( !(taga | tagb) )
--    {
--      expa = exponent(a);
--      expb = exponent(b);
--
--    valid_subtract:
--      /* Both registers are valid */
--
--      diff = expa - expb;
--
--      if (!diff)
--	{
--	  diff = a->sigh - b->sigh;  /* Works only if ms bits are identical */
--	  if (!diff)
--	    {
--	      diff = a->sigl > b->sigl;
--	      if (!diff)
--		diff = -(a->sigl < b->sigl);
--	    }
-+	FPU_REG const *a, *b;
-+	FPU_REG *dest;
-+	u_char taga, tagb, signa, signb, saved_sign, sign;
-+	int diff, tag = 0, expa, expb, deststnr;
++EXPORT_SYMBOL(blk_insert_request);
 +
-+	a = &st(0);
-+	taga = FPU_gettag0();
++/*
++ * add-request adds a request to the linked list.
++ * queue lock is held and interrupts disabled, as we muck with the
++ * request queue list.
++ */
++static inline void add_request(struct request_queue * q, struct request * req)
++{
++	drive_stat_acct(req, 1);
 +
-+	deststnr = 0;
-+	if (flags & LOADED) {
-+		b = (FPU_REG *) rm;
-+		tagb = flags & 0x0f;
-+	} else {
-+		b = &st(rm);
-+		tagb = FPU_gettagi(rm);
++	/*
++	 * elevator indicated where it wants this request to be
++	 * inserted at elevator_merge time
++	 */
++	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
++}
++ 
++/*
++ * disk_round_stats()	- Round off the performance stats on a struct
++ * disk_stats.
++ *
++ * The average IO queue length and utilisation statistics are maintained
++ * by observing the current state of the queue length and the amount of
++ * time it has been in this state for.
++ *
++ * Normally, that accounting is done on IO completion, but that can result
++ * in more than a second's worth of IO being accounted for within any one
++ * second, leading to >100% utilisation.  To deal with that, we call this
++ * function to do a round-off before returning the results when reading
++ * /proc/diskstats.  This accounts immediately for all queue usage up to
++ * the current jiffies and restarts the counters again.
++ */
++void disk_round_stats(struct gendisk *disk)
++{
++	unsigned long now = jiffies;
 +
-+		if (flags & DEST_RM)
-+			deststnr = rm;
- 	}
- 
--      switch ( (((int)signa)*2 + signb) / SIGN_NEG )
--	{
--	case 0: /* P - P */
--	case 3: /* N - N */
--	  if (diff > 0)
--	    {
--	      /* |a| > |b| */
--	      tag = FPU_u_sub(a, b, dest, control_w, signa, expa, expb);
--	    }
--	  else if ( diff == 0 )
--	    {
--	      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
--
--	      /* sign depends upon rounding mode */
--	      setsign(dest, ((control_w & CW_RC) != RC_DOWN)
--		? SIGN_POS : SIGN_NEG);
--	      return TAG_Zero;
--	    }
--	  else
--	    {
--	      sign = signa ^ SIGN_NEG;
--	      tag = FPU_u_sub(b, a, dest, control_w, sign, expb, expa);
--	    }
--	  break;
--	case 1: /* P - N */
--	  tag = FPU_u_add(a, b, dest, control_w, SIGN_POS, expa, expb);
--	  break;
--	case 2: /* N - P */
--	  tag = FPU_u_add(a, b, dest, control_w, SIGN_NEG, expa, expb);
--	  break;
-+	signa = getsign(a);
-+	signb = getsign(b);
++	if (now == disk->stamp)
++		return;
 +
-+	if (flags & REV) {
-+		signa ^= SIGN_NEG;
-+		signb ^= SIGN_NEG;
++	if (disk->in_flight) {
++		__disk_stat_add(disk, time_in_queue,
++				disk->in_flight * (now - disk->stamp));
++		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
 +	}
++	disk->stamp = now;
++}
 +
-+	dest = &st(deststnr);
-+	saved_sign = getsign(dest);
++EXPORT_SYMBOL_GPL(disk_round_stats);
 +
-+	if (!(taga | tagb)) {
-+		expa = exponent(a);
-+		expb = exponent(b);
++/*
++ * queue lock must be held
++ */
++void __blk_put_request(struct request_queue *q, struct request *req)
++{
++	if (unlikely(!q))
++		return;
++	if (unlikely(--req->ref_count))
++		return;
 +
-+	      valid_subtract:
-+		/* Both registers are valid */
++	elv_completed_request(q, req);
 +
-+		diff = expa - expb;
++	/*
++	 * Request may not have originated from ll_rw_blk. if not,
++	 * it didn't come out of our reserved rq pools
++	 */
++	if (req->cmd_flags & REQ_ALLOCED) {
++		int rw = rq_data_dir(req);
++		int priv = req->cmd_flags & REQ_ELVPRIV;
 +
-+		if (!diff) {
-+			diff = a->sigh - b->sigh;	/* Works only if ms bits are identical */
-+			if (!diff) {
-+				diff = a->sigl > b->sigl;
-+				if (!diff)
-+					diff = -(a->sigl < b->sigl);
-+			}
-+		}
++		BUG_ON(!list_empty(&req->queuelist));
++		BUG_ON(!hlist_unhashed(&req->hash));
 +
-+		switch ((((int)signa) * 2 + signb) / SIGN_NEG) {
-+		case 0:	/* P - P */
-+		case 3:	/* N - N */
-+			if (diff > 0) {
-+				/* |a| > |b| */
-+				tag =
-+				    FPU_u_sub(a, b, dest, control_w, signa,
-+					      expa, expb);
-+			} else if (diff == 0) {
-+				FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
++		blk_free_request(q, req);
++		freed_request(q, rw, priv);
++	}
++}
 +
-+				/* sign depends upon rounding mode */
-+				setsign(dest, ((control_w & CW_RC) != RC_DOWN)
-+					? SIGN_POS : SIGN_NEG);
-+				return TAG_Zero;
-+			} else {
-+				sign = signa ^ SIGN_NEG;
-+				tag =
-+				    FPU_u_sub(b, a, dest, control_w, sign, expb,
-+					      expa);
-+			}
-+			break;
-+		case 1:	/* P - N */
-+			tag =
-+			    FPU_u_add(a, b, dest, control_w, SIGN_POS, expa,
-+				      expb);
-+			break;
-+		case 2:	/* N - P */
-+			tag =
-+			    FPU_u_add(a, b, dest, control_w, SIGN_NEG, expa,
-+				      expb);
-+			break;
- #ifdef PARANOID
--	default:
--	  EXCEPTION(EX_INTERNAL|0x111);
--	  return -1;
-+		default:
-+			EXCEPTION(EX_INTERNAL | 0x111);
-+			return -1;
- #endif
-+		}
-+		if (tag < 0) {
-+			setsign(dest, saved_sign);
-+			return tag;
-+		}
-+		FPU_settagi(deststnr, tag);
-+		return tag;
- 	}
--      if ( tag < 0 )
--	{
--	  setsign(dest, saved_sign);
--	  return tag;
--	}
--      FPU_settagi(deststnr, tag);
--      return tag;
--    }
- 
--  if ( taga == TAG_Special )
--    taga = FPU_Special(a);
--  if ( tagb == TAG_Special )
--    tagb = FPU_Special(b);
-+	if (taga == TAG_Special)
-+		taga = FPU_Special(a);
-+	if (tagb == TAG_Special)
-+		tagb = FPU_Special(b);
- 
--  if ( ((taga == TAG_Valid) && (tagb == TW_Denormal))
-+	if (((taga == TAG_Valid) && (tagb == TW_Denormal))
- 	    || ((taga == TW_Denormal) && (tagb == TAG_Valid))
--	    || ((taga == TW_Denormal) && (tagb == TW_Denormal)) )
--    {
--      FPU_REG x, y;
-+	    || ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
-+		FPU_REG x, y;
- 
--      if ( denormal_operand() < 0 )
--	return FPU_Exception;
-+		if (denormal_operand() < 0)
-+			return FPU_Exception;
++EXPORT_SYMBOL_GPL(__blk_put_request);
 +
-+		FPU_to_exp16(a, &x);
-+		FPU_to_exp16(b, &y);
-+		a = &x;
-+		b = &y;
-+		expa = exponent16(a);
-+		expb = exponent16(b);
- 
--      FPU_to_exp16(a, &x);
--      FPU_to_exp16(b, &y);
--      a = &x;
--      b = &y;
--      expa = exponent16(a);
--      expb = exponent16(b);
--
--      goto valid_subtract;
--    }
--
--  if ( (taga == TW_NaN) || (tagb == TW_NaN) )
--    {
--      FPU_REG const *d1, *d2;
--      if ( flags & REV )
--	{
--	  d1 = b;
--	  d2 = a;
-+		goto valid_subtract;
- 	}
--      else
--	{
--	  d1 = a;
--	  d2 = b;
++void blk_put_request(struct request *req)
++{
++	unsigned long flags;
++	struct request_queue *q = req->q;
 +
-+	if ((taga == TW_NaN) || (tagb == TW_NaN)) {
-+		FPU_REG const *d1, *d2;
-+		if (flags & REV) {
-+			d1 = b;
-+			d2 = a;
-+		} else {
-+			d1 = a;
-+			d2 = b;
-+		}
-+		if (flags & LOADED)
-+			return real_2op_NaN(b, tagb, deststnr, d1);
-+		if (flags & DEST_RM)
-+			return real_2op_NaN(a, taga, deststnr, d2);
-+		else
-+			return real_2op_NaN(b, tagb, deststnr, d2);
- 	}
--      if ( flags & LOADED )
--	return real_2op_NaN(b, tagb, deststnr, d1);
--      if ( flags & DEST_RM )
--	return real_2op_NaN(a, taga, deststnr, d2);
--      else
--	return real_2op_NaN(b, tagb, deststnr, d2);
--    }
--
--    return add_sub_specials(a, taga, signa, b, tagb, signb ^ SIGN_NEG,
--			    dest, deststnr, control_w);
--}
- 
-+	return add_sub_specials(a, taga, signa, b, tagb, signb ^ SIGN_NEG,
-+				dest, deststnr, control_w);
++	/*
++	 * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
++	 * following if (q) test.
++	 */
++	if (q) {
++		spin_lock_irqsave(q->queue_lock, flags);
++		__blk_put_request(q, req);
++		spin_unlock_irqrestore(q->queue_lock, flags);
++	}
 +}
- 
- static
- int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa,
- 		     FPU_REG const *b, u_char tagb, u_char signb,
--		     FPU_REG *dest, int deststnr, int control_w)
-+		     FPU_REG * dest, int deststnr, int control_w)
- {
--  if ( ((taga == TW_Denormal) || (tagb == TW_Denormal))
--       && (denormal_operand() < 0) )
--    return FPU_Exception;
--
--  if (taga == TAG_Zero)
--    {
--      if (tagb == TAG_Zero)
--	{
--	  /* Both are zero, result will be zero. */
--	  u_char different_signs = signa ^ signb;
--
--	  FPU_copy_to_regi(a, TAG_Zero, deststnr);
--	  if ( different_signs )
--	    {
--	      /* Signs are different. */
--	      /* Sign of answer depends upon rounding mode. */
--	      setsign(dest, ((control_w & CW_RC) != RC_DOWN)
--		      ? SIGN_POS : SIGN_NEG);
--	    }
--	  else
--	    setsign(dest, signa);  /* signa may differ from the sign of a. */
--	  return TAG_Zero;
--	}
--      else
--	{
--	  reg_copy(b, dest);
--	  if ( (tagb == TW_Denormal) && (b->sigh & 0x80000000) )
--	    {
--	      /* A pseudoDenormal, convert it. */
--	      addexponent(dest, 1);
--	      tagb = TAG_Valid;
--	    }
--	  else if ( tagb > TAG_Empty )
--	    tagb = TAG_Special;
--	  setsign(dest, signb);  /* signb may differ from the sign of b. */
--	  FPU_settagi(deststnr, tagb);
--	  return tagb;
--	}
--    }
--  else if (tagb == TAG_Zero)
--    {
--      reg_copy(a, dest);
--      if ( (taga == TW_Denormal) && (a->sigh & 0x80000000) )
--	{
--	  /* A pseudoDenormal */
--	  addexponent(dest, 1);
--	  taga = TAG_Valid;
--	}
--      else if ( taga > TAG_Empty )
--	taga = TAG_Special;
--      setsign(dest, signa);  /* signa may differ from the sign of a. */
--      FPU_settagi(deststnr, taga);
--      return taga;
--    }
--  else if (taga == TW_Infinity)
--    {
--      if ( (tagb != TW_Infinity) || (signa == signb) )
--	{
--	  FPU_copy_to_regi(a, TAG_Special, deststnr);
--	  setsign(dest, signa);  /* signa may differ from the sign of a. */
--	  return taga;
-+	if (((taga == TW_Denormal) || (tagb == TW_Denormal))
-+	    && (denormal_operand() < 0))
-+		return FPU_Exception;
 +
-+	if (taga == TAG_Zero) {
-+		if (tagb == TAG_Zero) {
-+			/* Both are zero, result will be zero. */
-+			u_char different_signs = signa ^ signb;
++EXPORT_SYMBOL(blk_put_request);
 +
-+			FPU_copy_to_regi(a, TAG_Zero, deststnr);
-+			if (different_signs) {
-+				/* Signs are different. */
-+				/* Sign of answer depends upon rounding mode. */
-+				setsign(dest, ((control_w & CW_RC) != RC_DOWN)
-+					? SIGN_POS : SIGN_NEG);
-+			} else
-+				setsign(dest, signa);	/* signa may differ from the sign of a. */
-+			return TAG_Zero;
-+		} else {
-+			reg_copy(b, dest);
-+			if ((tagb == TW_Denormal) && (b->sigh & 0x80000000)) {
-+				/* A pseudoDenormal, convert it. */
-+				addexponent(dest, 1);
-+				tagb = TAG_Valid;
-+			} else if (tagb > TAG_Empty)
-+				tagb = TAG_Special;
-+			setsign(dest, signb);	/* signb may differ from the sign of b. */
-+			FPU_settagi(deststnr, tagb);
-+			return tagb;
-+		}
-+	} else if (tagb == TAG_Zero) {
-+		reg_copy(a, dest);
-+		if ((taga == TW_Denormal) && (a->sigh & 0x80000000)) {
-+			/* A pseudoDenormal */
-+			addexponent(dest, 1);
-+			taga = TAG_Valid;
-+		} else if (taga > TAG_Empty)
-+			taga = TAG_Special;
-+		setsign(dest, signa);	/* signa may differ from the sign of a. */
-+		FPU_settagi(deststnr, taga);
-+		return taga;
-+	} else if (taga == TW_Infinity) {
-+		if ((tagb != TW_Infinity) || (signa == signb)) {
-+			FPU_copy_to_regi(a, TAG_Special, deststnr);
-+			setsign(dest, signa);	/* signa may differ from the sign of a. */
-+			return taga;
-+		}
-+		/* Infinity-Infinity is undefined. */
-+		return arith_invalid(deststnr);
-+	} else if (tagb == TW_Infinity) {
-+		FPU_copy_to_regi(b, TAG_Special, deststnr);
-+		setsign(dest, signb);	/* signb may differ from the sign of b. */
-+		return tagb;
- 	}
--      /* Infinity-Infinity is undefined. */
--      return arith_invalid(deststnr);
--    }
--  else if (tagb == TW_Infinity)
--    {
--      FPU_copy_to_regi(b, TAG_Special, deststnr);
--      setsign(dest, signb);  /* signb may differ from the sign of b. */
--      return tagb;
--    }
--
- #ifdef PARANOID
--  EXCEPTION(EX_INTERNAL|0x101);
-+	EXCEPTION(EX_INTERNAL | 0x101);
- #endif
- 
--  return FPU_Exception;
-+	return FPU_Exception;
- }
--
-diff --git a/arch/x86/math-emu/reg_compare.c b/arch/x86/math-emu/reg_compare.c
-index f37c5b5..ecce55f 100644
---- a/arch/x86/math-emu/reg_compare.c
-+++ b/arch/x86/math-emu/reg_compare.c
-@@ -20,362 +20,331 @@
- #include "control_w.h"
- #include "status_w.h"
- 
--
- static int compare(FPU_REG const *b, int tagb)
- {
--  int diff, exp0, expb;
--  u_char	  	st0_tag;
--  FPU_REG  	*st0_ptr;
--  FPU_REG	x, y;
--  u_char		st0_sign, signb = getsign(b);
--
--  st0_ptr = &st(0);
--  st0_tag = FPU_gettag0();
--  st0_sign = getsign(st0_ptr);
--
--  if ( tagb == TAG_Special )
--    tagb = FPU_Special(b);
--  if ( st0_tag == TAG_Special )
--    st0_tag = FPU_Special(st0_ptr);
--
--  if ( ((st0_tag != TAG_Valid) && (st0_tag != TW_Denormal))
--       || ((tagb != TAG_Valid) && (tagb != TW_Denormal)) )
--    {
--      if ( st0_tag == TAG_Zero )
--	{
--	  if ( tagb == TAG_Zero ) return COMP_A_eq_B;
--	  if ( tagb == TAG_Valid )
--	    return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
--	  if ( tagb == TW_Denormal )
--	    return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
--	    | COMP_Denormal;
--	}
--      else if ( tagb == TAG_Zero )
--	{
--	  if ( st0_tag == TAG_Valid )
--	    return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
--	  if ( st0_tag == TW_Denormal )
--	    return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
--	    | COMP_Denormal;
-+	int diff, exp0, expb;
-+	u_char st0_tag;
-+	FPU_REG *st0_ptr;
-+	FPU_REG x, y;
-+	u_char st0_sign, signb = getsign(b);
++void init_request_from_bio(struct request *req, struct bio *bio)
++{
++	req->cmd_type = REQ_TYPE_FS;
 +
-+	st0_ptr = &st(0);
-+	st0_tag = FPU_gettag0();
-+	st0_sign = getsign(st0_ptr);
++	/*
++	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
++	 */
++	if (bio_rw_ahead(bio) || bio_failfast(bio))
++		req->cmd_flags |= REQ_FAILFAST;
 +
-+	if (tagb == TAG_Special)
-+		tagb = FPU_Special(b);
-+	if (st0_tag == TAG_Special)
-+		st0_tag = FPU_Special(st0_ptr);
++	/*
++	 * REQ_BARRIER implies no merging, but lets make it explicit
++	 */
++	if (unlikely(bio_barrier(bio)))
++		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 +
-+	if (((st0_tag != TAG_Valid) && (st0_tag != TW_Denormal))
-+	    || ((tagb != TAG_Valid) && (tagb != TW_Denormal))) {
-+		if (st0_tag == TAG_Zero) {
-+			if (tagb == TAG_Zero)
-+				return COMP_A_eq_B;
-+			if (tagb == TAG_Valid)
-+				return ((signb ==
-+					 SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
-+			if (tagb == TW_Denormal)
-+				return ((signb ==
-+					 SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
-+				    | COMP_Denormal;
-+		} else if (tagb == TAG_Zero) {
-+			if (st0_tag == TAG_Valid)
-+				return ((st0_sign ==
-+					 SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
-+			if (st0_tag == TW_Denormal)
-+				return ((st0_sign ==
-+					 SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
-+				    | COMP_Denormal;
-+		}
++	if (bio_sync(bio))
++		req->cmd_flags |= REQ_RW_SYNC;
++	if (bio_rw_meta(bio))
++		req->cmd_flags |= REQ_RW_META;
 +
-+		if (st0_tag == TW_Infinity) {
-+			if ((tagb == TAG_Valid) || (tagb == TAG_Zero))
-+				return ((st0_sign ==
-+					 SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
-+			else if (tagb == TW_Denormal)
-+				return ((st0_sign ==
-+					 SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
-+				    | COMP_Denormal;
-+			else if (tagb == TW_Infinity) {
-+				/* The 80486 book says that infinities can be equal! */
-+				return (st0_sign == signb) ? COMP_A_eq_B :
-+				    ((st0_sign ==
-+				      SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
-+			}
-+			/* Fall through to the NaN code */
-+		} else if (tagb == TW_Infinity) {
-+			if ((st0_tag == TAG_Valid) || (st0_tag == TAG_Zero))
-+				return ((signb ==
-+					 SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
-+			if (st0_tag == TW_Denormal)
-+				return ((signb ==
-+					 SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
-+				    | COMP_Denormal;
-+			/* Fall through to the NaN code */
-+		}
++	req->errors = 0;
++	req->hard_sector = req->sector = bio->bi_sector;
++	req->ioprio = bio_prio(bio);
++	req->start_time = jiffies;
++	blk_rq_bio_prep(req->q, req, bio);
++}
 +
-+		/* The only possibility now should be that one of the arguments
-+		   is a NaN */
-+		if ((st0_tag == TW_NaN) || (tagb == TW_NaN)) {
-+			int signalling = 0, unsupported = 0;
-+			if (st0_tag == TW_NaN) {
-+				signalling =
-+				    (st0_ptr->sigh & 0xc0000000) == 0x80000000;
-+				unsupported = !((exponent(st0_ptr) == EXP_OVER)
-+						&& (st0_ptr->
-+						    sigh & 0x80000000));
-+			}
-+			if (tagb == TW_NaN) {
-+				signalling |=
-+				    (b->sigh & 0xc0000000) == 0x80000000;
-+				unsupported |= !((exponent(b) == EXP_OVER)
-+						 && (b->sigh & 0x80000000));
-+			}
-+			if (signalling || unsupported)
-+				return COMP_No_Comp | COMP_SNaN | COMP_NaN;
-+			else
-+				/* Neither is a signaling NaN */
-+				return COMP_No_Comp | COMP_NaN;
-+		}
++static int __make_request(struct request_queue *q, struct bio *bio)
++{
++	struct request *req;
++	int el_ret, nr_sectors, barrier, err;
++	const unsigned short prio = bio_prio(bio);
++	const int sync = bio_sync(bio);
++	int rw_flags;
 +
-+		EXCEPTION(EX_Invalid);
- 	}
- 
--      if ( st0_tag == TW_Infinity )
--	{
--	  if ( (tagb == TAG_Valid) || (tagb == TAG_Zero) )
--	    return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
--	  else if ( tagb == TW_Denormal )
--	    return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
--	      | COMP_Denormal;
--	  else if ( tagb == TW_Infinity )
--	    {
--	      /* The 80486 book says that infinities can be equal! */
--	      return (st0_sign == signb) ? COMP_A_eq_B :
--		((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
--	    }
--	  /* Fall through to the NaN code */
--	}
--      else if ( tagb == TW_Infinity )
--	{
--	  if ( (st0_tag == TAG_Valid) || (st0_tag == TAG_Zero) )
--	    return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
--	  if ( st0_tag == TW_Denormal )
--	    return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
--		| COMP_Denormal;
--	  /* Fall through to the NaN code */
-+	if (st0_sign != signb) {
-+		return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
-+		    | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
-+		       COMP_Denormal : 0);
- 	}
- 
--      /* The only possibility now should be that one of the arguments
--	 is a NaN */
--      if ( (st0_tag == TW_NaN) || (tagb == TW_NaN) )
--	{
--	  int signalling = 0, unsupported = 0;
--	  if ( st0_tag == TW_NaN )
--	    {
--	      signalling = (st0_ptr->sigh & 0xc0000000) == 0x80000000;
--	      unsupported = !((exponent(st0_ptr) == EXP_OVER)
--			      && (st0_ptr->sigh & 0x80000000));
--	    }
--	  if ( tagb == TW_NaN )
--	    {
--	      signalling |= (b->sigh & 0xc0000000) == 0x80000000;
--	      unsupported |= !((exponent(b) == EXP_OVER)
--			       && (b->sigh & 0x80000000));
--	    }
--	  if ( signalling || unsupported )
--	    return COMP_No_Comp | COMP_SNaN | COMP_NaN;
--	  else
--	    /* Neither is a signaling NaN */
--	    return COMP_No_Comp | COMP_NaN;
-+	if ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) {
-+		FPU_to_exp16(st0_ptr, &x);
-+		FPU_to_exp16(b, &y);
-+		st0_ptr = &x;
-+		b = &y;
-+		exp0 = exponent16(st0_ptr);
-+		expb = exponent16(b);
-+	} else {
-+		exp0 = exponent(st0_ptr);
-+		expb = exponent(b);
- 	}
--      
--      EXCEPTION(EX_Invalid);
--    }
--  
--  if (st0_sign != signb)
--    {
--      return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
--	| ( ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
--	    COMP_Denormal : 0);
--    }
--
--  if ( (st0_tag == TW_Denormal) || (tagb == TW_Denormal) )
--    {
--      FPU_to_exp16(st0_ptr, &x);
--      FPU_to_exp16(b, &y);
--      st0_ptr = &x;
--      b = &y;
--      exp0 = exponent16(st0_ptr);
--      expb = exponent16(b);
--    }
--  else
--    {
--      exp0 = exponent(st0_ptr);
--      expb = exponent(b);
--    }
- 
- #ifdef PARANOID
--  if (!(st0_ptr->sigh & 0x80000000)) EXCEPTION(EX_Invalid);
--  if (!(b->sigh & 0x80000000)) EXCEPTION(EX_Invalid);
-+	if (!(st0_ptr->sigh & 0x80000000))
-+		EXCEPTION(EX_Invalid);
-+	if (!(b->sigh & 0x80000000))
-+		EXCEPTION(EX_Invalid);
- #endif /* PARANOID */
- 
--  diff = exp0 - expb;
--  if ( diff == 0 )
--    {
--      diff = st0_ptr->sigh - b->sigh;  /* Works only if ms bits are
--					      identical */
--      if ( diff == 0 )
--	{
--	diff = st0_ptr->sigl > b->sigl;
--	if ( diff == 0 )
--	  diff = -(st0_ptr->sigl < b->sigl);
-+	diff = exp0 - expb;
-+	if (diff == 0) {
-+		diff = st0_ptr->sigh - b->sigh;	/* Works only if ms bits are
-+						   identical */
-+		if (diff == 0) {
-+			diff = st0_ptr->sigl > b->sigl;
-+			if (diff == 0)
-+				diff = -(st0_ptr->sigl < b->sigl);
-+		}
- 	}
--    }
--
--  if ( diff > 0 )
--    {
--      return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
--	| ( ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
--	    COMP_Denormal : 0);
--    }
--  if ( diff < 0 )
--    {
--      return ((st0_sign == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
--	| ( ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
--	    COMP_Denormal : 0);
--    }
--
--  return COMP_A_eq_B
--    | ( ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
--	COMP_Denormal : 0);
- 
--}
-+	if (diff > 0) {
-+		return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
-+		    | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
-+		       COMP_Denormal : 0);
-+	}
-+	if (diff < 0) {
-+		return ((st0_sign == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
-+		    | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
-+		       COMP_Denormal : 0);
++	nr_sectors = bio_sectors(bio);
++
++	/*
++	 * low level driver can indicate that it wants pages above a
++	 * certain limit bounced to low memory (ie for highmem, or even
++	 * ISA dma in theory)
++	 */
++	blk_queue_bounce(q, &bio);
++
++	barrier = bio_barrier(bio);
++	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
++		err = -EOPNOTSUPP;
++		goto end_io;
 +	}
- 
-+	return COMP_A_eq_B
-+	    | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
-+	       COMP_Denormal : 0);
 +
-+}
- 
- /* This function requires that st(0) is not empty */
- int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
- {
--  int f = 0, c;
--
--  c = compare(loaded_data, loaded_tag);
--
--  if (c & COMP_NaN)
--    {
--      EXCEPTION(EX_Invalid);
--      f = SW_C3 | SW_C2 | SW_C0;
--    }
--  else
--    switch (c & 7)
--      {
--      case COMP_A_lt_B:
--	f = SW_C0;
--	break;
--      case COMP_A_eq_B:
--	f = SW_C3;
--	break;
--      case COMP_A_gt_B:
--	f = 0;
--	break;
--      case COMP_No_Comp:
--	f = SW_C3 | SW_C2 | SW_C0;
--	break;
-+	int f = 0, c;
++	spin_lock_irq(q->queue_lock);
 +
-+	c = compare(loaded_data, loaded_tag);
++	if (unlikely(barrier) || elv_queue_empty(q))
++		goto get_rq;
 +
-+	if (c & COMP_NaN) {
-+		EXCEPTION(EX_Invalid);
-+		f = SW_C3 | SW_C2 | SW_C0;
-+	} else
-+		switch (c & 7) {
-+		case COMP_A_lt_B:
-+			f = SW_C0;
-+			break;
-+		case COMP_A_eq_B:
-+			f = SW_C3;
-+			break;
-+		case COMP_A_gt_B:
-+			f = 0;
-+			break;
-+		case COMP_No_Comp:
-+			f = SW_C3 | SW_C2 | SW_C0;
-+			break;
- #ifdef PARANOID
--      default:
--	EXCEPTION(EX_INTERNAL|0x121);
--	f = SW_C3 | SW_C2 | SW_C0;
--	break;
-+		default:
-+			EXCEPTION(EX_INTERNAL | 0x121);
-+			f = SW_C3 | SW_C2 | SW_C0;
-+			break;
- #endif /* PARANOID */
--      }
--  setcc(f);
--  if (c & COMP_Denormal)
--    {
--      return denormal_operand() < 0;
--    }
--  return 0;
-+		}
-+	setcc(f);
-+	if (c & COMP_Denormal) {
-+		return denormal_operand() < 0;
-+	}
-+	return 0;
- }
- 
--
- static int compare_st_st(int nr)
- {
--  int f = 0, c;
--  FPU_REG *st_ptr;
--
--  if ( !NOT_EMPTY(0) || !NOT_EMPTY(nr) )
--    {
--      setcc(SW_C3 | SW_C2 | SW_C0);
--      /* Stack fault */
--      EXCEPTION(EX_StackUnder);
--      return !(control_word & CW_Invalid);
--    }
--
--  st_ptr = &st(nr);
--  c = compare(st_ptr, FPU_gettagi(nr));
--  if (c & COMP_NaN)
--    {
--      setcc(SW_C3 | SW_C2 | SW_C0);
--      EXCEPTION(EX_Invalid);
--      return !(control_word & CW_Invalid);
--    }
--  else
--    switch (c & 7)
--      {
--      case COMP_A_lt_B:
--	f = SW_C0;
--	break;
--      case COMP_A_eq_B:
--	f = SW_C3;
--	break;
--      case COMP_A_gt_B:
--	f = 0;
--	break;
--      case COMP_No_Comp:
--	f = SW_C3 | SW_C2 | SW_C0;
--	break;
-+	int f = 0, c;
-+	FPU_REG *st_ptr;
++	el_ret = elv_merge(q, &req, bio);
++	switch (el_ret) {
++		case ELEVATOR_BACK_MERGE:
++			BUG_ON(!rq_mergeable(req));
 +
-+	if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
-+		setcc(SW_C3 | SW_C2 | SW_C0);
-+		/* Stack fault */
-+		EXCEPTION(EX_StackUnder);
-+		return !(control_word & CW_Invalid);
-+	}
++			if (!ll_back_merge_fn(q, req, bio))
++				break;
 +
-+	st_ptr = &st(nr);
-+	c = compare(st_ptr, FPU_gettagi(nr));
-+	if (c & COMP_NaN) {
-+		setcc(SW_C3 | SW_C2 | SW_C0);
-+		EXCEPTION(EX_Invalid);
-+		return !(control_word & CW_Invalid);
-+	} else
-+		switch (c & 7) {
-+		case COMP_A_lt_B:
-+			f = SW_C0;
-+			break;
-+		case COMP_A_eq_B:
-+			f = SW_C3;
-+			break;
-+		case COMP_A_gt_B:
-+			f = 0;
-+			break;
-+		case COMP_No_Comp:
-+			f = SW_C3 | SW_C2 | SW_C0;
-+			break;
- #ifdef PARANOID
--      default:
--	EXCEPTION(EX_INTERNAL|0x122);
--	f = SW_C3 | SW_C2 | SW_C0;
--	break;
-+		default:
-+			EXCEPTION(EX_INTERNAL | 0x122);
-+			f = SW_C3 | SW_C2 | SW_C0;
-+			break;
- #endif /* PARANOID */
--      }
--  setcc(f);
--  if (c & COMP_Denormal)
--    {
--      return denormal_operand() < 0;
--    }
--  return 0;
-+		}
-+	setcc(f);
-+	if (c & COMP_Denormal) {
-+		return denormal_operand() < 0;
-+	}
-+	return 0;
- }
- 
--
- static int compare_u_st_st(int nr)
- {
--  int f = 0, c;
--  FPU_REG *st_ptr;
--
--  if ( !NOT_EMPTY(0) || !NOT_EMPTY(nr) )
--    {
--      setcc(SW_C3 | SW_C2 | SW_C0);
--      /* Stack fault */
--      EXCEPTION(EX_StackUnder);
--      return !(control_word & CW_Invalid);
--    }
--
--  st_ptr = &st(nr);
--  c = compare(st_ptr, FPU_gettagi(nr));
--  if (c & COMP_NaN)
--    {
--      setcc(SW_C3 | SW_C2 | SW_C0);
--      if (c & COMP_SNaN)       /* This is the only difference between
--				  un-ordered and ordinary comparisons */
--	{
--	  EXCEPTION(EX_Invalid);
--	  return !(control_word & CW_Invalid);
-+	int f = 0, c;
-+	FPU_REG *st_ptr;
++			blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
 +
-+	if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
-+		setcc(SW_C3 | SW_C2 | SW_C0);
-+		/* Stack fault */
-+		EXCEPTION(EX_StackUnder);
-+		return !(control_word & CW_Invalid);
- 	}
--      return 0;
--    }
--  else
--    switch (c & 7)
--      {
--      case COMP_A_lt_B:
--	f = SW_C0;
--	break;
--      case COMP_A_eq_B:
--	f = SW_C3;
--	break;
--      case COMP_A_gt_B:
--	f = 0;
--	break;
--      case COMP_No_Comp:
--	f = SW_C3 | SW_C2 | SW_C0;
--	break;
++			req->biotail->bi_next = bio;
++			req->biotail = bio;
++			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
++			req->ioprio = ioprio_best(req->ioprio, prio);
++			drive_stat_acct(req, 0);
++			if (!attempt_back_merge(q, req))
++				elv_merged_request(q, req, el_ret);
++			goto out;
 +
-+	st_ptr = &st(nr);
-+	c = compare(st_ptr, FPU_gettagi(nr));
-+	if (c & COMP_NaN) {
-+		setcc(SW_C3 | SW_C2 | SW_C0);
-+		if (c & COMP_SNaN) {	/* This is the only difference between
-+					   un-ordered and ordinary comparisons */
-+			EXCEPTION(EX_Invalid);
-+			return !(control_word & CW_Invalid);
-+		}
-+		return 0;
-+	} else
-+		switch (c & 7) {
-+		case COMP_A_lt_B:
-+			f = SW_C0;
-+			break;
-+		case COMP_A_eq_B:
-+			f = SW_C3;
-+			break;
-+		case COMP_A_gt_B:
-+			f = 0;
-+			break;
-+		case COMP_No_Comp:
-+			f = SW_C3 | SW_C2 | SW_C0;
-+			break;
- #ifdef PARANOID
--      default:
--	EXCEPTION(EX_INTERNAL|0x123);
--	f = SW_C3 | SW_C2 | SW_C0;
--	break;
--#endif /* PARANOID */ 
--      }
--  setcc(f);
--  if (c & COMP_Denormal)
--    {
--      return denormal_operand() < 0;
--    }
--  return 0;
++		case ELEVATOR_FRONT_MERGE:
++			BUG_ON(!rq_mergeable(req));
++
++			if (!ll_front_merge_fn(q, req, bio))
++				break;
++
++			blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
++
++			bio->bi_next = req->bio;
++			req->bio = bio;
++
++			/*
++			 * may not be valid. if the low level driver said
++			 * it didn't need a bounce buffer then it better
++			 * not touch req->buffer either...
++			 */
++			req->buffer = bio_data(bio);
++			req->current_nr_sectors = bio_cur_sectors(bio);
++			req->hard_cur_sectors = req->current_nr_sectors;
++			req->sector = req->hard_sector = bio->bi_sector;
++			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
++			req->ioprio = ioprio_best(req->ioprio, prio);
++			drive_stat_acct(req, 0);
++			if (!attempt_front_merge(q, req))
++				elv_merged_request(q, req, el_ret);
++			goto out;
++
++		/* ELV_NO_MERGE: elevator says don't/can't merge. */
 +		default:
-+			EXCEPTION(EX_INTERNAL | 0x123);
-+			f = SW_C3 | SW_C2 | SW_C0;
-+			break;
-+#endif /* PARANOID */
-+		}
-+	setcc(f);
-+	if (c & COMP_Denormal) {
-+		return denormal_operand() < 0;
++			;
 +	}
++
++get_rq:
++	/*
++	 * This sync check and mask will be re-done in init_request_from_bio(),
++	 * but we need to set it earlier to expose the sync flag to the
++	 * rq allocator and io schedulers.
++	 */
++	rw_flags = bio_data_dir(bio);
++	if (sync)
++		rw_flags |= REQ_RW_SYNC;
++
++	/*
++	 * Grab a free request. This is might sleep but can not fail.
++	 * Returns with the queue unlocked.
++	 */
++	req = get_request_wait(q, rw_flags, bio);
++
++	/*
++	 * After dropping the lock and possibly sleeping here, our request
++	 * may now be mergeable after it had proven unmergeable (above).
++	 * We don't worry about that case for efficiency. It won't happen
++	 * often, and the elevators are able to handle it.
++	 */
++	init_request_from_bio(req, bio);
++
++	spin_lock_irq(q->queue_lock);
++	if (elv_queue_empty(q))
++		blk_plug_device(q);
++	add_request(q, req);
++out:
++	if (sync)
++		__generic_unplug_device(q);
++
++	spin_unlock_irq(q->queue_lock);
 +	return 0;
- }
- 
- /*---------------------------------------------------------------------------*/
- 
- void fcom_st(void)
- {
--  /* fcom st(i) */
--  compare_st_st(FPU_rm);
-+	/* fcom st(i) */
-+	compare_st_st(FPU_rm);
- }
- 
--
- void fcompst(void)
- {
--  /* fcomp st(i) */
--  if ( !compare_st_st(FPU_rm) )
--    FPU_pop();
-+	/* fcomp st(i) */
-+	if (!compare_st_st(FPU_rm))
-+		FPU_pop();
- }
- 
--
- void fcompp(void)
- {
--  /* fcompp */
--  if (FPU_rm != 1)
--    {
--      FPU_illegal();
--      return;
--    }
--  if ( !compare_st_st(1) )
--      poppop();
-+	/* fcompp */
-+	if (FPU_rm != 1) {
-+		FPU_illegal();
-+		return;
-+	}
-+	if (!compare_st_st(1))
-+		poppop();
- }
- 
--
- void fucom_(void)
- {
--  /* fucom st(i) */
--  compare_u_st_st(FPU_rm);
-+	/* fucom st(i) */
-+	compare_u_st_st(FPU_rm);
- 
- }
- 
--
- void fucomp(void)
- {
--  /* fucomp st(i) */
--  if ( !compare_u_st_st(FPU_rm) )
--    FPU_pop();
-+	/* fucomp st(i) */
-+	if (!compare_u_st_st(FPU_rm))
-+		FPU_pop();
- }
- 
--
- void fucompp(void)
- {
--  /* fucompp */
--  if (FPU_rm == 1)
--    {
--      if ( !compare_u_st_st(1) )
--	poppop();
--    }
--  else
--    FPU_illegal();
-+	/* fucompp */
-+	if (FPU_rm == 1) {
-+		if (!compare_u_st_st(1))
-+			poppop();
-+	} else
-+		FPU_illegal();
- }
-diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c
-index a850158..04869e6 100644
---- a/arch/x86/math-emu/reg_constant.c
-+++ b/arch/x86/math-emu/reg_constant.c
-@@ -16,29 +16,28 @@
- #include "reg_constant.h"
- #include "control_w.h"
- 
--
- #define MAKE_REG(s,e,l,h) { l, h, \
-                             ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
- 
--FPU_REG const CONST_1    = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
-+FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
- #if 0
--FPU_REG const CONST_2    = MAKE_REG(POS, 1, 0x00000000, 0x80000000);
-+FPU_REG const CONST_2 = MAKE_REG(POS, 1, 0x00000000, 0x80000000);
- FPU_REG const CONST_HALF = MAKE_REG(POS, -1, 0x00000000, 0x80000000);
--#endif  /*  0  */
--static FPU_REG const CONST_L2T  = MAKE_REG(POS, 1, 0xcd1b8afe, 0xd49a784b);
--static FPU_REG const CONST_L2E  = MAKE_REG(POS, 0, 0x5c17f0bc, 0xb8aa3b29);
--FPU_REG const CONST_PI   = MAKE_REG(POS, 1, 0x2168c235, 0xc90fdaa2);
--FPU_REG const CONST_PI2  = MAKE_REG(POS, 0, 0x2168c235, 0xc90fdaa2);
--FPU_REG const CONST_PI4  = MAKE_REG(POS, -1, 0x2168c235, 0xc90fdaa2);
--static FPU_REG const CONST_LG2  = MAKE_REG(POS, -2, 0xfbcff799, 0x9a209a84);
--static FPU_REG const CONST_LN2  = MAKE_REG(POS, -1, 0xd1cf79ac, 0xb17217f7);
-+#endif /*  0  */
-+static FPU_REG const CONST_L2T = MAKE_REG(POS, 1, 0xcd1b8afe, 0xd49a784b);
-+static FPU_REG const CONST_L2E = MAKE_REG(POS, 0, 0x5c17f0bc, 0xb8aa3b29);
-+FPU_REG const CONST_PI = MAKE_REG(POS, 1, 0x2168c235, 0xc90fdaa2);
-+FPU_REG const CONST_PI2 = MAKE_REG(POS, 0, 0x2168c235, 0xc90fdaa2);
-+FPU_REG const CONST_PI4 = MAKE_REG(POS, -1, 0x2168c235, 0xc90fdaa2);
-+static FPU_REG const CONST_LG2 = MAKE_REG(POS, -2, 0xfbcff799, 0x9a209a84);
-+static FPU_REG const CONST_LN2 = MAKE_REG(POS, -1, 0xd1cf79ac, 0xb17217f7);
- 
- /* Extra bits to take pi/2 to more than 128 bits precision. */
- FPU_REG const CONST_PI2extra = MAKE_REG(NEG, -66,
--					 0xfc8f8cbb, 0xece675d1);
-+					0xfc8f8cbb, 0xece675d1);
- 
- /* Only the sign (and tag) is used in internal zeroes */
--FPU_REG const CONST_Z    = MAKE_REG(POS, EXP_UNDER, 0x0, 0x0);
-+FPU_REG const CONST_Z = MAKE_REG(POS, EXP_UNDER, 0x0, 0x0);
- 
- /* Only the sign and significand (and tag) are used in internal NaNs */
- /* The 80486 never generates one of these 
-@@ -48,24 +47,22 @@ FPU_REG const CONST_SNAN = MAKE_REG(POS, EXP_OVER, 0x00000001, 0x80000000);
- FPU_REG const CONST_QNaN = MAKE_REG(NEG, EXP_OVER, 0x00000000, 0xC0000000);
- 
- /* Only the sign (and tag) is used in internal infinities */
--FPU_REG const CONST_INF  = MAKE_REG(POS, EXP_OVER, 0x00000000, 0x80000000);
--
-+FPU_REG const CONST_INF = MAKE_REG(POS, EXP_OVER, 0x00000000, 0x80000000);
- 
- static void fld_const(FPU_REG const *c, int adj, u_char tag)
- {
--  FPU_REG *st_new_ptr;
--
--  if ( STACK_OVERFLOW )
--    {
--      FPU_stack_overflow();
--      return;
--    }
--  push();
--  reg_copy(c, st_new_ptr);
--  st_new_ptr->sigl += adj;  /* For all our fldxxx constants, we don't need to
--			       borrow or carry. */
--  FPU_settag0(tag);
--  clear_C1();
-+	FPU_REG *st_new_ptr;
 +
-+	if (STACK_OVERFLOW) {
-+		FPU_stack_overflow();
-+		return;
-+	}
-+	push();
-+	reg_copy(c, st_new_ptr);
-+	st_new_ptr->sigl += adj;	/* For all our fldxxx constants, we don't need to
-+					   borrow or carry. */
-+	FPU_settag0(tag);
-+	clear_C1();
- }
- 
- /* A fast way to find out whether x is one of RC_DOWN or RC_CHOP
-@@ -75,46 +72,46 @@ static void fld_const(FPU_REG const *c, int adj, u_char tag)
- 
- static void fld1(int rc)
- {
--  fld_const(&CONST_1, 0, TAG_Valid);
-+	fld_const(&CONST_1, 0, TAG_Valid);
- }
- 
- static void fldl2t(int rc)
- {
--  fld_const(&CONST_L2T, (rc == RC_UP) ? 1 : 0, TAG_Valid);
-+	fld_const(&CONST_L2T, (rc == RC_UP) ? 1 : 0, TAG_Valid);
- }
- 
- static void fldl2e(int rc)
- {
--  fld_const(&CONST_L2E, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
-+	fld_const(&CONST_L2E, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
- }
- 
- static void fldpi(int rc)
- {
--  fld_const(&CONST_PI, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
-+	fld_const(&CONST_PI, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
- }
- 
- static void fldlg2(int rc)
- {
--  fld_const(&CONST_LG2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
-+	fld_const(&CONST_LG2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
- }
- 
- static void fldln2(int rc)
- {
--  fld_const(&CONST_LN2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
-+	fld_const(&CONST_LN2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
- }
- 
- static void fldz(int rc)
- {
--  fld_const(&CONST_Z, 0, TAG_Zero);
-+	fld_const(&CONST_Z, 0, TAG_Zero);
- }
- 
--typedef void (*FUNC_RC)(int);
-+typedef void (*FUNC_RC) (int);
- 
- static FUNC_RC constants_table[] = {
--  fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC)FPU_illegal
-+	fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC) FPU_illegal
- };
- 
- void fconst(void)
- {
--  (constants_table[FPU_rm])(control_word & CW_RC);
-+	(constants_table[FPU_rm]) (control_word & CW_RC);
- }
-diff --git a/arch/x86/math-emu/reg_convert.c b/arch/x86/math-emu/reg_convert.c
-index 45a2587..1080607 100644
---- a/arch/x86/math-emu/reg_convert.c
-+++ b/arch/x86/math-emu/reg_convert.c
-@@ -13,41 +13,34 @@
- #include "exception.h"
- #include "fpu_emu.h"
- 
--
- int FPU_to_exp16(FPU_REG const *a, FPU_REG *x)
- {
--  int sign = getsign(a);
--
--  *(long long *)&(x->sigl) = *(const long long *)&(a->sigl);
--
--  /* Set up the exponent as a 16 bit quantity. */
--  setexponent16(x, exponent(a));
--
--  if ( exponent16(x) == EXP_UNDER )
--    {
--      /* The number is a de-normal or pseudodenormal. */
--      /* We only deal with the significand and exponent. */
--
--      if (x->sigh & 0x80000000)
--	{
--	  /* Is a pseudodenormal. */
--	  /* This is non-80486 behaviour because the number
--	     loses its 'denormal' identity. */
--	  addexponent(x, 1);
--	}
--      else
--	{
--	  /* Is a denormal. */
--	  addexponent(x, 1);
--	  FPU_normalize_nuo(x);
-+	int sign = getsign(a);
++end_io:
++	bio_endio(bio, err);
++	return 0;
++}
 +
-+	*(long long *)&(x->sigl) = *(const long long *)&(a->sigl);
++/*
++ * If bio->bi_dev is a partition, remap the location
++ */
++static inline void blk_partition_remap(struct bio *bio)
++{
++	struct block_device *bdev = bio->bi_bdev;
 +
-+	/* Set up the exponent as a 16 bit quantity. */
-+	setexponent16(x, exponent(a));
++	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
++		struct hd_struct *p = bdev->bd_part;
++		const int rw = bio_data_dir(bio);
 +
-+	if (exponent16(x) == EXP_UNDER) {
-+		/* The number is a de-normal or pseudodenormal. */
-+		/* We only deal with the significand and exponent. */
++		p->sectors[rw] += bio_sectors(bio);
++		p->ios[rw]++;
 +
-+		if (x->sigh & 0x80000000) {
-+			/* Is a pseudodenormal. */
-+			/* This is non-80486 behaviour because the number
-+			   loses its 'denormal' identity. */
-+			addexponent(x, 1);
-+		} else {
-+			/* Is a denormal. */
-+			addexponent(x, 1);
-+			FPU_normalize_nuo(x);
-+		}
- 	}
--    }
- 
--  if ( !(x->sigh & 0x80000000) )
--    {
--      EXCEPTION(EX_INTERNAL | 0x180);
--    }
-+	if (!(x->sigh & 0x80000000)) {
-+		EXCEPTION(EX_INTERNAL | 0x180);
++		bio->bi_sector += p->start_sect;
++		bio->bi_bdev = bdev->bd_contains;
++
++		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
++				    bdev->bd_dev, bio->bi_sector,
++				    bio->bi_sector - p->start_sect);
 +	}
- 
--  return sign;
-+	return sign;
- }
--
-diff --git a/arch/x86/math-emu/reg_divide.c b/arch/x86/math-emu/reg_divide.c
-index 5cee7ff..6827012 100644
---- a/arch/x86/math-emu/reg_divide.c
-+++ b/arch/x86/math-emu/reg_divide.c
-@@ -26,182 +26,157 @@
-   */
- int FPU_div(int flags, int rm, int control_w)
- {
--  FPU_REG x, y;
--  FPU_REG const *a, *b, *st0_ptr, *st_ptr;
--  FPU_REG *dest;
--  u_char taga, tagb, signa, signb, sign, saved_sign;
--  int tag, deststnr;
--
--  if ( flags & DEST_RM )
--    deststnr = rm;
--  else
--    deststnr = 0;
--
--  if ( flags & REV )
--    {
--      b = &st(0);
--      st0_ptr = b;
--      tagb = FPU_gettag0();
--      if ( flags & LOADED )
--	{
--	  a = (FPU_REG *)rm;
--	  taga = flags & 0x0f;
-+	FPU_REG x, y;
-+	FPU_REG const *a, *b, *st0_ptr, *st_ptr;
-+	FPU_REG *dest;
-+	u_char taga, tagb, signa, signb, sign, saved_sign;
-+	int tag, deststnr;
++}
 +
-+	if (flags & DEST_RM)
-+		deststnr = rm;
-+	else
-+		deststnr = 0;
++static void handle_bad_sector(struct bio *bio)
++{
++	char b[BDEVNAME_SIZE];
 +
-+	if (flags & REV) {
-+		b = &st(0);
-+		st0_ptr = b;
-+		tagb = FPU_gettag0();
-+		if (flags & LOADED) {
-+			a = (FPU_REG *) rm;
-+			taga = flags & 0x0f;
-+		} else {
-+			a = &st(rm);
-+			st_ptr = a;
-+			taga = FPU_gettagi(rm);
-+		}
-+	} else {
-+		a = &st(0);
-+		st0_ptr = a;
-+		taga = FPU_gettag0();
-+		if (flags & LOADED) {
-+			b = (FPU_REG *) rm;
-+			tagb = flags & 0x0f;
-+		} else {
-+			b = &st(rm);
-+			st_ptr = b;
-+			tagb = FPU_gettagi(rm);
-+		}
- 	}
--      else
--	{
--	  a = &st(rm);
--	  st_ptr = a;
--	  taga = FPU_gettagi(rm);
--	}
--    }
--  else
--    {
--      a = &st(0);
--      st0_ptr = a;
--      taga = FPU_gettag0();
--      if ( flags & LOADED )
--	{
--	  b = (FPU_REG *)rm;
--	  tagb = flags & 0x0f;
--	}
--      else
--	{
--	  b = &st(rm);
--	  st_ptr = b;
--	  tagb = FPU_gettagi(rm);
--	}
--    }
- 
--  signa = getsign(a);
--  signb = getsign(b);
-+	signa = getsign(a);
-+	signb = getsign(b);
- 
--  sign = signa ^ signb;
-+	sign = signa ^ signb;
- 
--  dest = &st(deststnr);
--  saved_sign = getsign(dest);
-+	dest = &st(deststnr);
-+	saved_sign = getsign(dest);
- 
--  if ( !(taga | tagb) )
--    {
--      /* Both regs Valid, this should be the most common case. */
--      reg_copy(a, &x);
--      reg_copy(b, &y);
--      setpositive(&x);
--      setpositive(&y);
--      tag = FPU_u_div(&x, &y, dest, control_w, sign);
-+	if (!(taga | tagb)) {
-+		/* Both regs Valid, this should be the most common case. */
-+		reg_copy(a, &x);
-+		reg_copy(b, &y);
-+		setpositive(&x);
-+		setpositive(&y);
-+		tag = FPU_u_div(&x, &y, dest, control_w, sign);
- 
--      if ( tag < 0 )
--	return tag;
-+		if (tag < 0)
-+			return tag;
- 
--      FPU_settagi(deststnr, tag);
--      return tag;
--    }
-+		FPU_settagi(deststnr, tag);
-+		return tag;
-+	}
- 
--  if ( taga == TAG_Special )
--    taga = FPU_Special(a);
--  if ( tagb == TAG_Special )
--    tagb = FPU_Special(b);
-+	if (taga == TAG_Special)
-+		taga = FPU_Special(a);
-+	if (tagb == TAG_Special)
-+		tagb = FPU_Special(b);
- 
--  if ( ((taga == TAG_Valid) && (tagb == TW_Denormal))
-+	if (((taga == TAG_Valid) && (tagb == TW_Denormal))
- 	    || ((taga == TW_Denormal) && (tagb == TAG_Valid))
--	    || ((taga == TW_Denormal) && (tagb == TW_Denormal)) )
--    {
--      if ( denormal_operand() < 0 )
--	return FPU_Exception;
--
--      FPU_to_exp16(a, &x);
--      FPU_to_exp16(b, &y);
--      tag = FPU_u_div(&x, &y, dest, control_w, sign);
--      if ( tag < 0 )
--	return tag;
--
--      FPU_settagi(deststnr, tag);
--      return tag;
--    }
--  else if ( (taga <= TW_Denormal) && (tagb <= TW_Denormal) )
--    {
--      if ( tagb != TAG_Zero )
--	{
--	  /* Want to find Zero/Valid */
--	  if ( tagb == TW_Denormal )
--	    {
--	      if ( denormal_operand() < 0 )
--		return FPU_Exception;
--	    }
--
--	  /* The result is zero. */
--	  FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
--	  setsign(dest, sign);
--	  return TAG_Zero;
-+	    || ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
-+		if (denormal_operand() < 0)
-+			return FPU_Exception;
++	printk(KERN_INFO "attempt to access beyond end of device\n");
++	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
++			bdevname(bio->bi_bdev, b),
++			bio->bi_rw,
++			(unsigned long long)bio->bi_sector + bio_sectors(bio),
++			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
 +
-+		FPU_to_exp16(a, &x);
-+		FPU_to_exp16(b, &y);
-+		tag = FPU_u_div(&x, &y, dest, control_w, sign);
-+		if (tag < 0)
-+			return tag;
++	set_bit(BIO_EOF, &bio->bi_flags);
++}
 +
-+		FPU_settagi(deststnr, tag);
-+		return tag;
-+	} else if ((taga <= TW_Denormal) && (tagb <= TW_Denormal)) {
-+		if (tagb != TAG_Zero) {
-+			/* Want to find Zero/Valid */
-+			if (tagb == TW_Denormal) {
-+				if (denormal_operand() < 0)
-+					return FPU_Exception;
-+			}
++#ifdef CONFIG_FAIL_MAKE_REQUEST
 +
-+			/* The result is zero. */
-+			FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
-+			setsign(dest, sign);
-+			return TAG_Zero;
-+		}
-+		/* We have an exception condition, either 0/0 or Valid/Zero. */
-+		if (taga == TAG_Zero) {
-+			/* 0/0 */
-+			return arith_invalid(deststnr);
-+		}
-+		/* Valid/Zero */
-+		return FPU_divide_by_zero(deststnr, sign);
- 	}
--      /* We have an exception condition, either 0/0 or Valid/Zero. */
--      if ( taga == TAG_Zero )
--	{
--	  /* 0/0 */
--	  return arith_invalid(deststnr);
-+	/* Must have infinities, NaNs, etc */
-+	else if ((taga == TW_NaN) || (tagb == TW_NaN)) {
-+		if (flags & LOADED)
-+			return real_2op_NaN((FPU_REG *) rm, flags & 0x0f, 0,
-+					    st0_ptr);
++static DECLARE_FAULT_ATTR(fail_make_request);
 +
-+		if (flags & DEST_RM) {
-+			int tag;
-+			tag = FPU_gettag0();
-+			if (tag == TAG_Special)
-+				tag = FPU_Special(st0_ptr);
-+			return real_2op_NaN(st0_ptr, tag, rm,
-+					    (flags & REV) ? st0_ptr : &st(rm));
-+		} else {
-+			int tag;
-+			tag = FPU_gettagi(rm);
-+			if (tag == TAG_Special)
-+				tag = FPU_Special(&st(rm));
-+			return real_2op_NaN(&st(rm), tag, 0,
-+					    (flags & REV) ? st0_ptr : &st(rm));
-+		}
-+	} else if (taga == TW_Infinity) {
-+		if (tagb == TW_Infinity) {
-+			/* infinity/infinity */
-+			return arith_invalid(deststnr);
-+		} else {
-+			/* tagb must be Valid or Zero */
-+			if ((tagb == TW_Denormal) && (denormal_operand() < 0))
-+				return FPU_Exception;
++static int __init setup_fail_make_request(char *str)
++{
++	return setup_fault_attr(&fail_make_request, str);
++}
++__setup("fail_make_request=", setup_fail_make_request);
 +
-+			/* Infinity divided by Zero or Valid does
-+			   not raise and exception, but returns Infinity */
-+			FPU_copy_to_regi(a, TAG_Special, deststnr);
-+			setsign(dest, sign);
-+			return taga;
-+		}
-+	} else if (tagb == TW_Infinity) {
-+		if ((taga == TW_Denormal) && (denormal_operand() < 0))
-+			return FPU_Exception;
++static int should_fail_request(struct bio *bio)
++{
++	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
++	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
++		return should_fail(&fail_make_request, bio->bi_size);
 +
-+		/* The result is zero. */
-+		FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
-+		setsign(dest, sign);
-+		return TAG_Zero;
- 	}
--      /* Valid/Zero */
--      return FPU_divide_by_zero(deststnr, sign);
--    }
--  /* Must have infinities, NaNs, etc */
--  else if ( (taga == TW_NaN) || (tagb == TW_NaN) )
--    {
--      if ( flags & LOADED )
--	return real_2op_NaN((FPU_REG *)rm, flags & 0x0f, 0, st0_ptr);
--
--      if ( flags & DEST_RM )
--	{
--	  int tag;
--	  tag = FPU_gettag0();
--	  if ( tag == TAG_Special )
--	    tag = FPU_Special(st0_ptr);
--	  return real_2op_NaN(st0_ptr, tag, rm, (flags & REV) ? st0_ptr : &st(rm));
--	}
--      else
--	{
--	  int tag;
--	  tag = FPU_gettagi(rm);
--	  if ( tag == TAG_Special )
--	    tag = FPU_Special(&st(rm));
--	  return real_2op_NaN(&st(rm), tag, 0, (flags & REV) ? st0_ptr : &st(rm));
--	}
--    }
--  else if (taga == TW_Infinity)
--    {
--      if (tagb == TW_Infinity)
--	{
--	  /* infinity/infinity */
--	  return arith_invalid(deststnr);
--	}
--      else
--	{
--	  /* tagb must be Valid or Zero */
--	  if ( (tagb == TW_Denormal) && (denormal_operand() < 0) )
--	    return FPU_Exception;
--	  
--	  /* Infinity divided by Zero or Valid does
--	     not raise and exception, but returns Infinity */
--	  FPU_copy_to_regi(a, TAG_Special, deststnr);
--	  setsign(dest, sign);
--	  return taga;
--	}
--    }
--  else if (tagb == TW_Infinity)
--    {
--      if ( (taga == TW_Denormal) && (denormal_operand() < 0) )
--	return FPU_Exception;
--
--      /* The result is zero. */
--      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
--      setsign(dest, sign);
--      return TAG_Zero;
--    }
- #ifdef PARANOID
--  else
--    {
--      EXCEPTION(EX_INTERNAL|0x102);
--      return FPU_Exception;
--    }
--#endif /* PARANOID */ 
-+	else {
-+		EXCEPTION(EX_INTERNAL | 0x102);
-+		return FPU_Exception;
-+	}
-+#endif /* PARANOID */
- 
- 	return 0;
- }
-diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c
-index e976cae..799d4af 100644
---- a/arch/x86/math-emu/reg_ld_str.c
-+++ b/arch/x86/math-emu/reg_ld_str.c
-@@ -27,1084 +27,938 @@
- #include "control_w.h"
- #include "status_w.h"
- 
--
--#define DOUBLE_Emax 1023         /* largest valid exponent */
-+#define DOUBLE_Emax 1023	/* largest valid exponent */
- #define DOUBLE_Ebias 1023
--#define DOUBLE_Emin (-1022)      /* smallest valid exponent */
-+#define DOUBLE_Emin (-1022)	/* smallest valid exponent */
- 
--#define SINGLE_Emax 127          /* largest valid exponent */
-+#define SINGLE_Emax 127		/* largest valid exponent */
- #define SINGLE_Ebias 127
--#define SINGLE_Emin (-126)       /* smallest valid exponent */
--
-+#define SINGLE_Emin (-126)	/* smallest valid exponent */
- 
- static u_char normalize_no_excep(FPU_REG *r, int exp, int sign)
- {
--  u_char tag;
-+	u_char tag;
- 
--  setexponent16(r, exp);
-+	setexponent16(r, exp);
- 
--  tag = FPU_normalize_nuo(r);
--  stdexp(r);
--  if ( sign )
--    setnegative(r);
-+	tag = FPU_normalize_nuo(r);
-+	stdexp(r);
-+	if (sign)
-+		setnegative(r);
- 
--  return tag;
-+	return tag;
- }
- 
--
- int FPU_tagof(FPU_REG *ptr)
- {
--  int exp;
--
--  exp = exponent16(ptr) & 0x7fff;
--  if ( exp == 0 )
--    {
--      if ( !(ptr->sigh | ptr->sigl) )
--	{
--	  return TAG_Zero;
-+	int exp;
++	return 0;
++}
 +
-+	exp = exponent16(ptr) & 0x7fff;
-+	if (exp == 0) {
-+		if (!(ptr->sigh | ptr->sigl)) {
-+			return TAG_Zero;
-+		}
-+		/* The number is a de-normal or pseudodenormal. */
-+		return TAG_Special;
-+	}
++static int __init fail_make_request_debugfs(void)
++{
++	return init_fault_attr_dentries(&fail_make_request,
++					"fail_make_request");
++}
 +
-+	if (exp == 0x7fff) {
-+		/* Is an Infinity, a NaN, or an unsupported data type. */
-+		return TAG_Special;
- 	}
--      /* The number is a de-normal or pseudodenormal. */
--      return TAG_Special;
--    }
--
--  if ( exp == 0x7fff )
--    {
--      /* Is an Infinity, a NaN, or an unsupported data type. */
--      return TAG_Special;
--    }
--
--  if ( !(ptr->sigh & 0x80000000) )
--    {
--      /* Unsupported data type. */
--      /* Valid numbers have the ms bit set to 1. */
--      /* Unnormal. */
--      return TAG_Special;
--    }
--
--  return TAG_Valid;
--}
- 
-+	if (!(ptr->sigh & 0x80000000)) {
-+		/* Unsupported data type. */
-+		/* Valid numbers have the ms bit set to 1. */
-+		/* Unnormal. */
-+		return TAG_Special;
++late_initcall(fail_make_request_debugfs);
++
++#else /* CONFIG_FAIL_MAKE_REQUEST */
++
++static inline int should_fail_request(struct bio *bio)
++{
++	return 0;
++}
++
++#endif /* CONFIG_FAIL_MAKE_REQUEST */
++
++/*
++ * Check whether this bio extends beyond the end of the device.
++ */
++static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
++{
++	sector_t maxsector;
++
++	if (!nr_sectors)
++		return 0;
++
++	/* Test device or partition size, when known. */
++	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
++	if (maxsector) {
++		sector_t sector = bio->bi_sector;
++
++		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
++			/*
++			 * This may well happen - the kernel calls bread()
++			 * without checking the size of the device, e.g., when
++			 * mounting a device.
++			 */
++			handle_bad_sector(bio);
++			return 1;
++		}
 +	}
 +
-+	return TAG_Valid;
++	return 0;
 +}
- 
- /* Get a long double from user memory */
- int FPU_load_extended(long double __user *s, int stnr)
- {
--  FPU_REG *sti_ptr = &st(stnr);
-+	FPU_REG *sti_ptr = &st(stnr);
- 
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ, s, 10);
--  __copy_from_user(sti_ptr, s, 10);
--  RE_ENTRANT_CHECK_ON;
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_READ, s, 10);
-+	__copy_from_user(sti_ptr, s, 10);
-+	RE_ENTRANT_CHECK_ON;
- 
--  return FPU_tagof(sti_ptr);
-+	return FPU_tagof(sti_ptr);
- }
- 
--
- /* Get a double from user memory */
- int FPU_load_double(double __user *dfloat, FPU_REG *loaded_data)
- {
--  int exp, tag, negative;
--  unsigned m64, l64;
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ, dfloat, 8);
--  FPU_get_user(m64, 1 + (unsigned long __user *) dfloat);
--  FPU_get_user(l64, (unsigned long __user *) dfloat);
--  RE_ENTRANT_CHECK_ON;
--
--  negative = (m64 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
--  exp = ((m64 & 0x7ff00000) >> 20) - DOUBLE_Ebias + EXTENDED_Ebias;
--  m64 &= 0xfffff;
--  if ( exp > DOUBLE_Emax + EXTENDED_Ebias )
--    {
--      /* Infinity or NaN */
--      if ((m64 == 0) && (l64 == 0))
--	{
--	  /* +- infinity */
--	  loaded_data->sigh = 0x80000000;
--	  loaded_data->sigl = 0x00000000;
--	  exp = EXP_Infinity + EXTENDED_Ebias;
--	  tag = TAG_Special;
--	}
--      else
--	{
--	  /* Must be a signaling or quiet NaN */
--	  exp = EXP_NaN + EXTENDED_Ebias;
--	  loaded_data->sigh = (m64 << 11) | 0x80000000;
--	  loaded_data->sigh |= l64 >> 21;
--	  loaded_data->sigl = l64 << 11;
--	  tag = TAG_Special;    /* The calling function must look for NaNs */
--	}
--    }
--  else if ( exp < DOUBLE_Emin + EXTENDED_Ebias )
--    {
--      /* Zero or de-normal */
--      if ((m64 == 0) && (l64 == 0))
--	{
--	  /* Zero */
--	  reg_copy(&CONST_Z, loaded_data);
--	  exp = 0;
--	  tag = TAG_Zero;
--	}
--      else
--	{
--	  /* De-normal */
--	  loaded_data->sigh = m64 << 11;
--	  loaded_data->sigh |= l64 >> 21;
--	  loaded_data->sigl = l64 << 11;
--
--	  return normalize_no_excep(loaded_data, DOUBLE_Emin, negative)
--	    | (denormal_operand() < 0 ? FPU_Exception : 0);
--	}
--    }
--  else
--    {
--      loaded_data->sigh = (m64 << 11) | 0x80000000;
--      loaded_data->sigh |= l64 >> 21;
--      loaded_data->sigl = l64 << 11;
-+	int exp, tag, negative;
-+	unsigned m64, l64;
 +
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_READ, dfloat, 8);
-+	FPU_get_user(m64, 1 + (unsigned long __user *)dfloat);
-+	FPU_get_user(l64, (unsigned long __user *)dfloat);
-+	RE_ENTRANT_CHECK_ON;
++/**
++ * generic_make_request: hand a buffer to its device driver for I/O
++ * @bio:  The bio describing the location in memory and on the device.
++ *
++ * generic_make_request() is used to make I/O requests of block
++ * devices. It is passed a &struct bio, which describes the I/O that needs
++ * to be done.
++ *
++ * generic_make_request() does not return any status.  The
++ * success/failure status of the request, along with notification of
++ * completion, is delivered asynchronously through the bio->bi_end_io
++ * function described (one day) else where.
++ *
++ * The caller of generic_make_request must make sure that bi_io_vec
++ * are set to describe the memory buffer, and that bi_dev and bi_sector are
++ * set to describe the device address, and the
++ * bi_end_io and optionally bi_private are set to describe how
++ * completion notification should be signaled.
++ *
++ * generic_make_request and the drivers it calls may use bi_next if this
++ * bio happens to be merged with someone else, and may change bi_dev and
++ * bi_sector for remaps as it sees fit.  So the values of these fields
++ * should NOT be depended on after the call to generic_make_request.
++ */
++static inline void __generic_make_request(struct bio *bio)
++{
++	struct request_queue *q;
++	sector_t old_sector;
++	int ret, nr_sectors = bio_sectors(bio);
++	dev_t old_dev;
++	int err = -EIO;
 +
-+	negative = (m64 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
-+	exp = ((m64 & 0x7ff00000) >> 20) - DOUBLE_Ebias + EXTENDED_Ebias;
-+	m64 &= 0xfffff;
-+	if (exp > DOUBLE_Emax + EXTENDED_Ebias) {
-+		/* Infinity or NaN */
-+		if ((m64 == 0) && (l64 == 0)) {
-+			/* +- infinity */
-+			loaded_data->sigh = 0x80000000;
-+			loaded_data->sigl = 0x00000000;
-+			exp = EXP_Infinity + EXTENDED_Ebias;
-+			tag = TAG_Special;
-+		} else {
-+			/* Must be a signaling or quiet NaN */
-+			exp = EXP_NaN + EXTENDED_Ebias;
-+			loaded_data->sigh = (m64 << 11) | 0x80000000;
-+			loaded_data->sigh |= l64 >> 21;
-+			loaded_data->sigl = l64 << 11;
-+			tag = TAG_Special;	/* The calling function must look for NaNs */
++	might_sleep();
++
++	if (bio_check_eod(bio, nr_sectors))
++		goto end_io;
++
++	/*
++	 * Resolve the mapping until finished. (drivers are
++	 * still free to implement/resolve their own stacking
++	 * by explicitly returning 0)
++	 *
++	 * NOTE: we don't repeat the blk_size check for each new device.
++	 * Stacking drivers are expected to know what they are doing.
++	 */
++	old_sector = -1;
++	old_dev = 0;
++	do {
++		char b[BDEVNAME_SIZE];
++
++		q = bdev_get_queue(bio->bi_bdev);
++		if (!q) {
++			printk(KERN_ERR
++			       "generic_make_request: Trying to access "
++				"nonexistent block-device %s (%Lu)\n",
++				bdevname(bio->bi_bdev, b),
++				(long long) bio->bi_sector);
++end_io:
++			bio_endio(bio, err);
++			break;
 +		}
-+	} else if (exp < DOUBLE_Emin + EXTENDED_Ebias) {
-+		/* Zero or de-normal */
-+		if ((m64 == 0) && (l64 == 0)) {
-+			/* Zero */
-+			reg_copy(&CONST_Z, loaded_data);
-+			exp = 0;
-+			tag = TAG_Zero;
-+		} else {
-+			/* De-normal */
-+			loaded_data->sigh = m64 << 11;
-+			loaded_data->sigh |= l64 >> 21;
-+			loaded_data->sigl = l64 << 11;
 +
-+			return normalize_no_excep(loaded_data, DOUBLE_Emin,
-+						  negative)
-+			    | (denormal_operand() < 0 ? FPU_Exception : 0);
++		if (unlikely(nr_sectors > q->max_hw_sectors)) {
++			printk("bio too big device %s (%u > %u)\n", 
++				bdevname(bio->bi_bdev, b),
++				bio_sectors(bio),
++				q->max_hw_sectors);
++			goto end_io;
 +		}
-+	} else {
-+		loaded_data->sigh = (m64 << 11) | 0x80000000;
-+		loaded_data->sigh |= l64 >> 21;
-+		loaded_data->sigl = l64 << 11;
- 
--      tag = TAG_Valid;
--    }
-+		tag = TAG_Valid;
-+	}
- 
--  setexponent16(loaded_data, exp | negative);
-+	setexponent16(loaded_data, exp | negative);
- 
--  return tag;
-+	return tag;
- }
- 
--
- /* Get a float from user memory */
- int FPU_load_single(float __user *single, FPU_REG *loaded_data)
- {
--  unsigned m32;
--  int exp, tag, negative;
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ, single, 4);
--  FPU_get_user(m32, (unsigned long __user *) single);
--  RE_ENTRANT_CHECK_ON;
--
--  negative = (m32 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
--
--  if (!(m32 & 0x7fffffff))
--    {
--      /* Zero */
--      reg_copy(&CONST_Z, loaded_data);
--      addexponent(loaded_data, negative);
--      return TAG_Zero;
--    }
--  exp = ((m32 & 0x7f800000) >> 23) - SINGLE_Ebias + EXTENDED_Ebias;
--  m32 = (m32 & 0x7fffff) << 8;
--  if ( exp < SINGLE_Emin + EXTENDED_Ebias )
--    {
--      /* De-normals */
--      loaded_data->sigh = m32;
--      loaded_data->sigl = 0;
--
--      return normalize_no_excep(loaded_data, SINGLE_Emin, negative)
--	| (denormal_operand() < 0 ? FPU_Exception : 0);
--    }
--  else if ( exp > SINGLE_Emax + EXTENDED_Ebias )
--    {
--    /* Infinity or NaN */
--      if ( m32 == 0 )
--	{
--	  /* +- infinity */
--	  loaded_data->sigh = 0x80000000;
--	  loaded_data->sigl = 0x00000000;
--	  exp = EXP_Infinity + EXTENDED_Ebias;
--	  tag = TAG_Special;
-+	unsigned m32;
-+	int exp, tag, negative;
 +
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_READ, single, 4);
-+	FPU_get_user(m32, (unsigned long __user *)single);
-+	RE_ENTRANT_CHECK_ON;
++		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
++			goto end_io;
++
++		if (should_fail_request(bio))
++			goto end_io;
++
++		/*
++		 * If this device has partitions, remap block n
++		 * of partition p to block n+start(p) of the disk.
++		 */
++		blk_partition_remap(bio);
++
++		if (old_sector != -1)
++			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
++					    old_sector);
 +
-+	negative = (m32 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
++		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
 +
-+	if (!(m32 & 0x7fffffff)) {
-+		/* Zero */
-+		reg_copy(&CONST_Z, loaded_data);
-+		addexponent(loaded_data, negative);
-+		return TAG_Zero;
- 	}
--      else
--	{
--	  /* Must be a signaling or quiet NaN */
--	  exp = EXP_NaN + EXTENDED_Ebias;
--	  loaded_data->sigh = m32 | 0x80000000;
--	  loaded_data->sigl = 0;
--	  tag = TAG_Special;  /* The calling function must look for NaNs */
-+	exp = ((m32 & 0x7f800000) >> 23) - SINGLE_Ebias + EXTENDED_Ebias;
-+	m32 = (m32 & 0x7fffff) << 8;
-+	if (exp < SINGLE_Emin + EXTENDED_Ebias) {
-+		/* De-normals */
-+		loaded_data->sigh = m32;
-+		loaded_data->sigl = 0;
++		old_sector = bio->bi_sector;
++		old_dev = bio->bi_bdev->bd_dev;
 +
-+		return normalize_no_excep(loaded_data, SINGLE_Emin, negative)
-+		    | (denormal_operand() < 0 ? FPU_Exception : 0);
-+	} else if (exp > SINGLE_Emax + EXTENDED_Ebias) {
-+		/* Infinity or NaN */
-+		if (m32 == 0) {
-+			/* +- infinity */
-+			loaded_data->sigh = 0x80000000;
-+			loaded_data->sigl = 0x00000000;
-+			exp = EXP_Infinity + EXTENDED_Ebias;
-+			tag = TAG_Special;
-+		} else {
-+			/* Must be a signaling or quiet NaN */
-+			exp = EXP_NaN + EXTENDED_Ebias;
-+			loaded_data->sigh = m32 | 0x80000000;
-+			loaded_data->sigl = 0;
-+			tag = TAG_Special;	/* The calling function must look for NaNs */
++		if (bio_check_eod(bio, nr_sectors))
++			goto end_io;
++		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
++			err = -EOPNOTSUPP;
++			goto end_io;
 +		}
-+	} else {
-+		loaded_data->sigh = m32 | 0x80000000;
-+		loaded_data->sigl = 0;
-+		tag = TAG_Valid;
- 	}
--    }
--  else
--    {
--      loaded_data->sigh = m32 | 0x80000000;
--      loaded_data->sigl = 0;
--      tag = TAG_Valid;
--    }
- 
--  setexponent16(loaded_data, exp | negative);  /* Set the sign. */
-+	setexponent16(loaded_data, exp | negative);	/* Set the sign. */
- 
--  return tag;
-+	return tag;
- }
- 
--
- /* Get a long long from user memory */
- int FPU_load_int64(long long __user *_s)
- {
--  long long s;
--  int sign;
--  FPU_REG *st0_ptr = &st(0);
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ, _s, 8);
--  if (copy_from_user(&s,_s,8))
--    FPU_abort;
--  RE_ENTRANT_CHECK_ON;
--
--  if (s == 0)
--    {
--      reg_copy(&CONST_Z, st0_ptr);
--      return TAG_Zero;
--    }
--
--  if (s > 0)
--    sign = SIGN_Positive;
--  else
--  {
--    s = -s;
--    sign = SIGN_Negative;
--  }
--
--  significand(st0_ptr) = s;
--
--  return normalize_no_excep(st0_ptr, 63, sign);
--}
-+	long long s;
-+	int sign;
-+	FPU_REG *st0_ptr = &st(0);
-+
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_READ, _s, 8);
-+	if (copy_from_user(&s, _s, 8))
-+		FPU_abort;
-+	RE_ENTRANT_CHECK_ON;
 +
-+	if (s == 0) {
-+		reg_copy(&CONST_Z, st0_ptr);
-+		return TAG_Zero;
-+	}
++		ret = q->make_request_fn(q, bio);
++	} while (ret);
++}
 +
-+	if (s > 0)
-+		sign = SIGN_Positive;
-+	else {
-+		s = -s;
-+		sign = SIGN_Negative;
++/*
++ * We only want one ->make_request_fn to be active at a time,
++ * else stack usage with stacked devices could be a problem.
++ * So use current->bio_{list,tail} to keep a list of requests
++ * submited by a make_request_fn function.
++ * current->bio_tail is also used as a flag to say if
++ * generic_make_request is currently active in this task or not.
++ * If it is NULL, then no make_request is active.  If it is non-NULL,
++ * then a make_request is active, and new requests should be added
++ * at the tail
++ */
++void generic_make_request(struct bio *bio)
++{
++	if (current->bio_tail) {
++		/* make_request is active */
++		*(current->bio_tail) = bio;
++		bio->bi_next = NULL;
++		current->bio_tail = &bio->bi_next;
++		return;
 +	}
- 
-+	significand(st0_ptr) = s;
-+
-+	return normalize_no_excep(st0_ptr, 63, sign);
++	/* following loop may be a bit non-obvious, and so deserves some
++	 * explanation.
++	 * Before entering the loop, bio->bi_next is NULL (as all callers
++	 * ensure that) so we have a list with a single bio.
++	 * We pretend that we have just taken it off a longer list, so
++	 * we assign bio_list to the next (which is NULL) and bio_tail
++	 * to &bio_list, thus initialising the bio_list of new bios to be
++	 * added.  __generic_make_request may indeed add some more bios
++	 * through a recursive call to generic_make_request.  If it
++	 * did, we find a non-NULL value in bio_list and re-enter the loop
++	 * from the top.  In this case we really did just take the bio
++	 * of the top of the list (no pretending) and so fixup bio_list and
++	 * bio_tail or bi_next, and call into __generic_make_request again.
++	 *
++	 * The loop was structured like this to make only one call to
++	 * __generic_make_request (which is important as it is large and
++	 * inlined) and to keep the structure simple.
++	 */
++	BUG_ON(bio->bi_next);
++	do {
++		current->bio_list = bio->bi_next;
++		if (bio->bi_next == NULL)
++			current->bio_tail = &current->bio_list;
++		else
++			bio->bi_next = NULL;
++		__generic_make_request(bio);
++		bio = current->bio_list;
++	} while (bio);
++	current->bio_tail = NULL; /* deactivate */
 +}
- 
- /* Get a long from user memory */
- int FPU_load_int32(long __user *_s, FPU_REG *loaded_data)
- {
--  long s;
--  int negative;
-+	long s;
-+	int negative;
- 
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ, _s, 4);
--  FPU_get_user(s, _s);
--  RE_ENTRANT_CHECK_ON;
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_READ, _s, 4);
-+	FPU_get_user(s, _s);
-+	RE_ENTRANT_CHECK_ON;
- 
--  if (s == 0)
--    { reg_copy(&CONST_Z, loaded_data); return TAG_Zero; }
-+	if (s == 0) {
-+		reg_copy(&CONST_Z, loaded_data);
-+		return TAG_Zero;
-+	}
- 
--  if (s > 0)
--    negative = SIGN_Positive;
--  else
--    {
--      s = -s;
--      negative = SIGN_Negative;
--    }
-+	if (s > 0)
-+		negative = SIGN_Positive;
-+	else {
-+		s = -s;
-+		negative = SIGN_Negative;
-+	}
- 
--  loaded_data->sigh = s;
--  loaded_data->sigl = 0;
-+	loaded_data->sigh = s;
-+	loaded_data->sigl = 0;
- 
--  return normalize_no_excep(loaded_data, 31, negative);
-+	return normalize_no_excep(loaded_data, 31, negative);
- }
- 
--
- /* Get a short from user memory */
- int FPU_load_int16(short __user *_s, FPU_REG *loaded_data)
- {
--  int s, negative;
-+	int s, negative;
- 
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ, _s, 2);
--  /* Cast as short to get the sign extended. */
--  FPU_get_user(s, _s);
--  RE_ENTRANT_CHECK_ON;
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_READ, _s, 2);
-+	/* Cast as short to get the sign extended. */
-+	FPU_get_user(s, _s);
-+	RE_ENTRANT_CHECK_ON;
- 
--  if (s == 0)
--    { reg_copy(&CONST_Z, loaded_data); return TAG_Zero; }
-+	if (s == 0) {
-+		reg_copy(&CONST_Z, loaded_data);
-+		return TAG_Zero;
-+	}
- 
--  if (s > 0)
--    negative = SIGN_Positive;
--  else
--    {
--      s = -s;
--      negative = SIGN_Negative;
--    }
-+	if (s > 0)
-+		negative = SIGN_Positive;
-+	else {
-+		s = -s;
-+		negative = SIGN_Negative;
-+	}
- 
--  loaded_data->sigh = s << 16;
--  loaded_data->sigl = 0;
-+	loaded_data->sigh = s << 16;
-+	loaded_data->sigl = 0;
- 
--  return normalize_no_excep(loaded_data, 15, negative);
-+	return normalize_no_excep(loaded_data, 15, negative);
- }
- 
--
- /* Get a packed bcd array from user memory */
- int FPU_load_bcd(u_char __user *s)
- {
--  FPU_REG *st0_ptr = &st(0);
--  int pos;
--  u_char bcd;
--  long long l=0;
--  int sign;
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ, s, 10);
--  RE_ENTRANT_CHECK_ON;
--  for ( pos = 8; pos >= 0; pos--)
--    {
--      l *= 10;
--      RE_ENTRANT_CHECK_OFF;
--      FPU_get_user(bcd, s+pos);
--      RE_ENTRANT_CHECK_ON;
--      l += bcd >> 4;
--      l *= 10;
--      l += bcd & 0x0f;
--    }
-- 
--  RE_ENTRANT_CHECK_OFF;
--  FPU_get_user(sign, s+9);
--  sign = sign & 0x80 ? SIGN_Negative : SIGN_Positive;
--  RE_ENTRANT_CHECK_ON;
--
--  if ( l == 0 )
--    {
--      reg_copy(&CONST_Z, st0_ptr);
--      addexponent(st0_ptr, sign);   /* Set the sign. */
--      return TAG_Zero;
--    }
--  else
--    {
--      significand(st0_ptr) = l;
--      return normalize_no_excep(st0_ptr, 63, sign);
--    }
-+	FPU_REG *st0_ptr = &st(0);
-+	int pos;
-+	u_char bcd;
-+	long long l = 0;
-+	int sign;
 +
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_READ, s, 10);
-+	RE_ENTRANT_CHECK_ON;
-+	for (pos = 8; pos >= 0; pos--) {
-+		l *= 10;
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_get_user(bcd, s + pos);
-+		RE_ENTRANT_CHECK_ON;
-+		l += bcd >> 4;
-+		l *= 10;
-+		l += bcd & 0x0f;
-+	}
++EXPORT_SYMBOL(generic_make_request);
 +
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_get_user(sign, s + 9);
-+	sign = sign & 0x80 ? SIGN_Negative : SIGN_Positive;
-+	RE_ENTRANT_CHECK_ON;
++/**
++ * submit_bio: submit a bio to the block device layer for I/O
++ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
++ * @bio: The &struct bio which describes the I/O
++ *
++ * submit_bio() is very similar in purpose to generic_make_request(), and
++ * uses that function to do most of the work. Both are fairly rough
++ * interfaces, @bio must be presetup and ready for I/O.
++ *
++ */
++void submit_bio(int rw, struct bio *bio)
++{
++	int count = bio_sectors(bio);
++
++	bio->bi_rw |= rw;
 +
-+	if (l == 0) {
-+		reg_copy(&CONST_Z, st0_ptr);
-+		addexponent(st0_ptr, sign);	/* Set the sign. */
-+		return TAG_Zero;
-+	} else {
-+		significand(st0_ptr) = l;
-+		return normalize_no_excep(st0_ptr, 63, sign);
-+	}
- }
- 
- /*===========================================================================*/
- 
- /* Put a long double into user memory */
--int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag, long double __user *d)
-+int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag,
-+		       long double __user * d)
- {
--  /*
--    The only exception raised by an attempt to store to an
--    extended format is the Invalid Stack exception, i.e.
--    attempting to store from an empty register.
--   */
--
--  if ( st0_tag != TAG_Empty )
--    {
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_WRITE, d, 10);
--
--      FPU_put_user(st0_ptr->sigl, (unsigned long __user *) d);
--      FPU_put_user(st0_ptr->sigh, (unsigned long __user *) ((u_char __user *)d + 4));
--      FPU_put_user(exponent16(st0_ptr), (unsigned short __user *) ((u_char __user *)d + 8));
--      RE_ENTRANT_CHECK_ON;
--
--      return 1;
--    }
--
--  /* Empty register (stack underflow) */
--  EXCEPTION(EX_StackUnder);
--  if ( control_word & CW_Invalid )
--    {
--      /* The masked response */
--      /* Put out the QNaN indefinite */
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_WRITE,d,10);
--      FPU_put_user(0, (unsigned long __user *) d);
--      FPU_put_user(0xc0000000, 1 + (unsigned long __user *) d);
--      FPU_put_user(0xffff, 4 + (short __user *) d);
--      RE_ENTRANT_CHECK_ON;
--      return 1;
--    }
--  else
--    return 0;
 +	/*
-+	   The only exception raised by an attempt to store to an
-+	   extended format is the Invalid Stack exception, i.e.
-+	   attempting to store from an empty register.
++	 * If it's a regular read/write or a barrier with data attached,
++	 * go through the normal accounting stuff before submission.
 +	 */
++	if (!bio_empty_barrier(bio)) {
 +
-+	if (st0_tag != TAG_Empty) {
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_WRITE, d, 10);
++		BIO_BUG_ON(!bio->bi_size);
++		BIO_BUG_ON(!bio->bi_io_vec);
 +
-+		FPU_put_user(st0_ptr->sigl, (unsigned long __user *)d);
-+		FPU_put_user(st0_ptr->sigh,
-+			     (unsigned long __user *)((u_char __user *) d + 4));
-+		FPU_put_user(exponent16(st0_ptr),
-+			     (unsigned short __user *)((u_char __user *) d +
-+						       8));
-+		RE_ENTRANT_CHECK_ON;
++		if (rw & WRITE) {
++			count_vm_events(PGPGOUT, count);
++		} else {
++			task_io_account_read(bio->bi_size);
++			count_vm_events(PGPGIN, count);
++		}
 +
-+		return 1;
++		if (unlikely(block_dump)) {
++			char b[BDEVNAME_SIZE];
++			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
++			current->comm, task_pid_nr(current),
++				(rw & WRITE) ? "WRITE" : "READ",
++				(unsigned long long)bio->bi_sector,
++				bdevname(bio->bi_bdev,b));
++		}
 +	}
- 
--}
-+	/* Empty register (stack underflow) */
-+	EXCEPTION(EX_StackUnder);
-+	if (control_word & CW_Invalid) {
-+		/* The masked response */
-+		/* Put out the QNaN indefinite */
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_WRITE, d, 10);
-+		FPU_put_user(0, (unsigned long __user *)d);
-+		FPU_put_user(0xc0000000, 1 + (unsigned long __user *)d);
-+		FPU_put_user(0xffff, 4 + (short __user *)d);
-+		RE_ENTRANT_CHECK_ON;
-+		return 1;
-+	} else
-+		return 0;
- 
-+}
- 
- /* Put a double into user memory */
- int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat)
- {
--  unsigned long l[2];
--  unsigned long increment = 0;	/* avoid gcc warnings */
--  int precision_loss;
--  int exp;
--  FPU_REG tmp;
-+	unsigned long l[2];
-+	unsigned long increment = 0;	/* avoid gcc warnings */
-+	int precision_loss;
-+	int exp;
-+	FPU_REG tmp;
- 
--  if ( st0_tag == TAG_Valid )
--    {
--      reg_copy(st0_ptr, &tmp);
--      exp = exponent(&tmp);
-+	if (st0_tag == TAG_Valid) {
-+		reg_copy(st0_ptr, &tmp);
-+		exp = exponent(&tmp);
- 
--      if ( exp < DOUBLE_Emin )     /* It may be a denormal */
--	{
--	  addexponent(&tmp, -DOUBLE_Emin + 52);  /* largest exp to be 51 */
-+		if (exp < DOUBLE_Emin) {	/* It may be a denormal */
-+			addexponent(&tmp, -DOUBLE_Emin + 52);	/* largest exp to be 51 */
- 
--	denormal_arg:
-+		      denormal_arg:
- 
--	  if ( (precision_loss = FPU_round_to_int(&tmp, st0_tag)) )
--	    {
-+			if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
- #ifdef PECULIAR_486
--	      /* Did it round to a non-denormal ? */
--	      /* This behaviour might be regarded as peculiar, it appears
--		 that the 80486 rounds to the dest precision, then
--		 converts to decide underflow. */
--	      if ( !((tmp.sigh == 0x00100000) && (tmp.sigl == 0) &&
--		  (st0_ptr->sigl & 0x000007ff)) )
-+				/* Did it round to a non-denormal ? */
-+				/* This behaviour might be regarded as peculiar, it appears
-+				   that the 80486 rounds to the dest precision, then
-+				   converts to decide underflow. */
-+				if (!
-+				    ((tmp.sigh == 0x00100000) && (tmp.sigl == 0)
-+				     && (st0_ptr->sigl & 0x000007ff)))
- #endif /* PECULIAR_486 */
--		{
--		  EXCEPTION(EX_Underflow);
--		  /* This is a special case: see sec 16.2.5.1 of
--		     the 80486 book */
--		  if ( !(control_word & CW_Underflow) )
--		    return 0;
--		}
--	      EXCEPTION(precision_loss);
--	      if ( !(control_word & CW_Precision) )
--		return 0;
--	    }
--	  l[0] = tmp.sigl;
--	  l[1] = tmp.sigh;
--	}
--      else
--	{
--	  if ( tmp.sigl & 0x000007ff )
--	    {
--	      precision_loss = 1;
--	      switch (control_word & CW_RC)
--		{
--		case RC_RND:
--		  /* Rounding can get a little messy.. */
--		  increment = ((tmp.sigl & 0x7ff) > 0x400) |  /* nearest */
--		    ((tmp.sigl & 0xc00) == 0xc00);            /* odd -> even */
--		  break;
--		case RC_DOWN:   /* towards -infinity */
--		  increment = signpositive(&tmp) ? 0 : tmp.sigl & 0x7ff;
--		  break;
--		case RC_UP:     /* towards +infinity */
--		  increment = signpositive(&tmp) ? tmp.sigl & 0x7ff : 0;
--		  break;
--		case RC_CHOP:
--		  increment = 0;
--		  break;
--		}
--	  
--	      /* Truncate the mantissa */
--	      tmp.sigl &= 0xfffff800;
--	  
--	      if ( increment )
--		{
--		  if ( tmp.sigl >= 0xfffff800 )
--		    {
--		      /* the sigl part overflows */
--		      if ( tmp.sigh == 0xffffffff )
--			{
--			  /* The sigh part overflows */
--			  tmp.sigh = 0x80000000;
--			  exp++;
--			  if (exp >= EXP_OVER)
--			    goto overflow;
-+				{
-+					EXCEPTION(EX_Underflow);
-+					/* This is a special case: see sec 16.2.5.1 of
-+					   the 80486 book */
-+					if (!(control_word & CW_Underflow))
-+						return 0;
-+				}
-+				EXCEPTION(precision_loss);
-+				if (!(control_word & CW_Precision))
-+					return 0;
- 			}
--		      else
--			{
--			  tmp.sigh ++;
-+			l[0] = tmp.sigl;
-+			l[1] = tmp.sigh;
-+		} else {
-+			if (tmp.sigl & 0x000007ff) {
-+				precision_loss = 1;
-+				switch (control_word & CW_RC) {
-+				case RC_RND:
-+					/* Rounding can get a little messy.. */
-+					increment = ((tmp.sigl & 0x7ff) > 0x400) |	/* nearest */
-+					    ((tmp.sigl & 0xc00) == 0xc00);	/* odd -> even */
-+					break;
-+				case RC_DOWN:	/* towards -infinity */
-+					increment =
-+					    signpositive(&tmp) ? 0 : tmp.
-+					    sigl & 0x7ff;
-+					break;
-+				case RC_UP:	/* towards +infinity */
-+					increment =
-+					    signpositive(&tmp) ? tmp.
-+					    sigl & 0x7ff : 0;
-+					break;
-+				case RC_CHOP:
-+					increment = 0;
-+					break;
-+				}
 +
-+				/* Truncate the mantissa */
-+				tmp.sigl &= 0xfffff800;
++	generic_make_request(bio);
++}
 +
-+				if (increment) {
-+					if (tmp.sigl >= 0xfffff800) {
-+						/* the sigl part overflows */
-+						if (tmp.sigh == 0xffffffff) {
-+							/* The sigh part overflows */
-+							tmp.sigh = 0x80000000;
-+							exp++;
-+							if (exp >= EXP_OVER)
-+								goto overflow;
-+						} else {
-+							tmp.sigh++;
-+						}
-+						tmp.sigl = 0x00000000;
-+					} else {
-+						/* We only need to increment sigl */
-+						tmp.sigl += 0x00000800;
-+					}
-+				}
-+			} else
-+				precision_loss = 0;
++EXPORT_SYMBOL(submit_bio);
 +
-+			l[0] = (tmp.sigl >> 11) | (tmp.sigh << 21);
-+			l[1] = ((tmp.sigh >> 11) & 0xfffff);
++/**
++ * __end_that_request_first - end I/O on a request
++ * @req:      the request being processed
++ * @error:    0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete
++ *
++ * Description:
++ *     Ends I/O on a number of bytes attached to @req, and sets it up
++ *     for the next range of segments (if any) in the cluster.
++ *
++ * Return:
++ *     0 - we are done with this request, call end_that_request_last()
++ *     1 - still buffers pending for this request
++ **/
++static int __end_that_request_first(struct request *req, int error,
++				    int nr_bytes)
++{
++	int total_bytes, bio_nbytes, next_idx = 0;
++	struct bio *bio;
 +
-+			if (exp > DOUBLE_Emax) {
-+			      overflow:
-+				EXCEPTION(EX_Overflow);
-+				if (!(control_word & CW_Overflow))
-+					return 0;
-+				set_precision_flag_up();
-+				if (!(control_word & CW_Precision))
-+					return 0;
++	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
 +
-+				/* This is a special case: see sec 16.2.5.1 of the 80486 book */
-+				/* Overflow to infinity */
-+				l[0] = 0x00000000;	/* Set to */
-+				l[1] = 0x7ff00000;	/* + INF */
-+			} else {
-+				if (precision_loss) {
-+					if (increment)
-+						set_precision_flag_up();
-+					else
-+						set_precision_flag_down();
-+				}
-+				/* Add the exponent */
-+				l[1] |= (((exp + DOUBLE_Ebias) & 0x7ff) << 20);
- 			}
--		      tmp.sigl = 0x00000000;
--		    }
--		  else
--		    {
--		      /* We only need to increment sigl */
--		      tmp.sigl += 0x00000800;
--		    }
--		}
--	    }
--	  else
--	    precision_loss = 0;
--	  
--	  l[0] = (tmp.sigl >> 11) | (tmp.sigh << 21);
--	  l[1] = ((tmp.sigh >> 11) & 0xfffff);
--
--	  if ( exp > DOUBLE_Emax )
--	    {
--	    overflow:
--	      EXCEPTION(EX_Overflow);
--	      if ( !(control_word & CW_Overflow) )
--		return 0;
--	      set_precision_flag_up();
--	      if ( !(control_word & CW_Precision) )
--		return 0;
--
--	      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
--	      /* Overflow to infinity */
--	      l[0] = 0x00000000;	/* Set to */
--	      l[1] = 0x7ff00000;	/* + INF */
--	    }
--	  else
--	    {
--	      if ( precision_loss )
--		{
--		  if ( increment )
--		    set_precision_flag_up();
--		  else
--		    set_precision_flag_down();
- 		}
--	      /* Add the exponent */
--	      l[1] |= (((exp+DOUBLE_Ebias) & 0x7ff) << 20);
--	    }
--	}
--    }
--  else if (st0_tag == TAG_Zero)
--    {
--      /* Number is zero */
--      l[0] = 0;
--      l[1] = 0;
--    }
--  else if ( st0_tag == TAG_Special )
--    {
--      st0_tag = FPU_Special(st0_ptr);
--      if ( st0_tag == TW_Denormal )
--	{
--	  /* A denormal will always underflow. */
-+	} else if (st0_tag == TAG_Zero) {
-+		/* Number is zero */
-+		l[0] = 0;
-+		l[1] = 0;
-+	} else if (st0_tag == TAG_Special) {
-+		st0_tag = FPU_Special(st0_ptr);
-+		if (st0_tag == TW_Denormal) {
-+			/* A denormal will always underflow. */
- #ifndef PECULIAR_486
--	  /* An 80486 is supposed to be able to generate
--	     a denormal exception here, but... */
--	  /* Underflow has priority. */
--	  if ( control_word & CW_Underflow )
--	    denormal_operand();
-+			/* An 80486 is supposed to be able to generate
-+			   a denormal exception here, but... */
-+			/* Underflow has priority. */
-+			if (control_word & CW_Underflow)
-+				denormal_operand();
- #endif /* PECULIAR_486 */
--	  reg_copy(st0_ptr, &tmp);
--	  goto denormal_arg;
--	}
--      else if (st0_tag == TW_Infinity)
--	{
--	  l[0] = 0;
--	  l[1] = 0x7ff00000;
--	}
--      else if (st0_tag == TW_NaN)
--	{
--	  /* Is it really a NaN ? */
--	  if ( (exponent(st0_ptr) == EXP_OVER)
--	       && (st0_ptr->sigh & 0x80000000) )
--	    {
--	      /* See if we can get a valid NaN from the FPU_REG */
--	      l[0] = (st0_ptr->sigl >> 11) | (st0_ptr->sigh << 21);
--	      l[1] = ((st0_ptr->sigh >> 11) & 0xfffff);
--	      if ( !(st0_ptr->sigh & 0x40000000) )
--		{
--		  /* It is a signalling NaN */
--		  EXCEPTION(EX_Invalid);
--		  if ( !(control_word & CW_Invalid) )
--		    return 0;
--		  l[1] |= (0x40000000 >> 11);
-+			reg_copy(st0_ptr, &tmp);
-+			goto denormal_arg;
-+		} else if (st0_tag == TW_Infinity) {
-+			l[0] = 0;
-+			l[1] = 0x7ff00000;
-+		} else if (st0_tag == TW_NaN) {
-+			/* Is it really a NaN ? */
-+			if ((exponent(st0_ptr) == EXP_OVER)
-+			    && (st0_ptr->sigh & 0x80000000)) {
-+				/* See if we can get a valid NaN from the FPU_REG */
-+				l[0] =
-+				    (st0_ptr->sigl >> 11) | (st0_ptr->
-+							     sigh << 21);
-+				l[1] = ((st0_ptr->sigh >> 11) & 0xfffff);
-+				if (!(st0_ptr->sigh & 0x40000000)) {
-+					/* It is a signalling NaN */
-+					EXCEPTION(EX_Invalid);
-+					if (!(control_word & CW_Invalid))
-+						return 0;
-+					l[1] |= (0x40000000 >> 11);
-+				}
-+				l[1] |= 0x7ff00000;
-+			} else {
-+				/* It is an unsupported data type */
-+				EXCEPTION(EX_Invalid);
-+				if (!(control_word & CW_Invalid))
-+					return 0;
-+				l[0] = 0;
-+				l[1] = 0xfff80000;
-+			}
- 		}
--	      l[1] |= 0x7ff00000;
--	    }
--	  else
--	    {
--	      /* It is an unsupported data type */
--	      EXCEPTION(EX_Invalid);
--	      if ( !(control_word & CW_Invalid) )
--		return 0;
--	      l[0] = 0;
--	      l[1] = 0xfff80000;
--	    }
-+	} else if (st0_tag == TAG_Empty) {
-+		/* Empty register (stack underflow) */
-+		EXCEPTION(EX_StackUnder);
-+		if (control_word & CW_Invalid) {
-+			/* The masked response */
-+			/* Put out the QNaN indefinite */
-+			RE_ENTRANT_CHECK_OFF;
-+			FPU_access_ok(VERIFY_WRITE, dfloat, 8);
-+			FPU_put_user(0, (unsigned long __user *)dfloat);
-+			FPU_put_user(0xfff80000,
-+				     1 + (unsigned long __user *)dfloat);
-+			RE_ENTRANT_CHECK_ON;
-+			return 1;
-+		} else
-+			return 0;
- 	}
--    }
--  else if ( st0_tag == TAG_Empty )
--    {
--      /* Empty register (stack underflow) */
--      EXCEPTION(EX_StackUnder);
--      if ( control_word & CW_Invalid )
--	{
--	  /* The masked response */
--	  /* Put out the QNaN indefinite */
--	  RE_ENTRANT_CHECK_OFF;
--	  FPU_access_ok(VERIFY_WRITE,dfloat,8);
--	  FPU_put_user(0, (unsigned long __user *) dfloat);
--	  FPU_put_user(0xfff80000, 1 + (unsigned long __user *) dfloat);
--	  RE_ENTRANT_CHECK_ON;
--	  return 1;
--	}
--      else
--	return 0;
--    }
--  if ( getsign(st0_ptr) )
--    l[1] |= 0x80000000;
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_WRITE,dfloat,8);
--  FPU_put_user(l[0], (unsigned long __user *)dfloat);
--  FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat);
--  RE_ENTRANT_CHECK_ON;
--
--  return 1;
--}
-+	if (getsign(st0_ptr))
-+		l[1] |= 0x80000000;
- 
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_WRITE, dfloat, 8);
-+	FPU_put_user(l[0], (unsigned long __user *)dfloat);
-+	FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat);
-+	RE_ENTRANT_CHECK_ON;
++	/*
++	 * for a REQ_BLOCK_PC request, we want to carry any eventual
++	 * sense key with us all the way through
++	 */
++	if (!blk_pc_request(req))
++		req->errors = 0;
 +
-+	return 1;
-+}
- 
- /* Put a float into user memory */
- int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single)
- {
--  long templ = 0;
--  unsigned long increment = 0;     	/* avoid gcc warnings */
--  int precision_loss;
--  int exp;
--  FPU_REG tmp;
-+	long templ = 0;
-+	unsigned long increment = 0;	/* avoid gcc warnings */
-+	int precision_loss;
-+	int exp;
-+	FPU_REG tmp;
- 
--  if ( st0_tag == TAG_Valid )
--    {
-+	if (st0_tag == TAG_Valid) {
- 
--      reg_copy(st0_ptr, &tmp);
--      exp = exponent(&tmp);
-+		reg_copy(st0_ptr, &tmp);
-+		exp = exponent(&tmp);
- 
--      if ( exp < SINGLE_Emin )
--	{
--	  addexponent(&tmp, -SINGLE_Emin + 23);  /* largest exp to be 22 */
-+		if (exp < SINGLE_Emin) {
-+			addexponent(&tmp, -SINGLE_Emin + 23);	/* largest exp to be 22 */
- 
--	denormal_arg:
-+		      denormal_arg:
- 
--	  if ( (precision_loss = FPU_round_to_int(&tmp, st0_tag)) )
--	    {
-+			if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
- #ifdef PECULIAR_486
--	      /* Did it round to a non-denormal ? */
--	      /* This behaviour might be regarded as peculiar, it appears
--		 that the 80486 rounds to the dest precision, then
--		 converts to decide underflow. */
--	      if ( !((tmp.sigl == 0x00800000) &&
--		  ((st0_ptr->sigh & 0x000000ff) || st0_ptr->sigl)) )
-+				/* Did it round to a non-denormal ? */
-+				/* This behaviour might be regarded as peculiar, it appears
-+				   that the 80486 rounds to the dest precision, then
-+				   converts to decide underflow. */
-+				if (!((tmp.sigl == 0x00800000) &&
-+				      ((st0_ptr->sigh & 0x000000ff)
-+				       || st0_ptr->sigl)))
- #endif /* PECULIAR_486 */
--		{
--		  EXCEPTION(EX_Underflow);
--		  /* This is a special case: see sec 16.2.5.1 of
--		     the 80486 book */
--		  if ( !(control_word & CW_Underflow) )
--		    return 0;
--		}
--	      EXCEPTION(precision_loss);
--	      if ( !(control_word & CW_Precision) )
--		return 0;
--	    }
--	  templ = tmp.sigl;
--      }
--      else
--	{
--	  if ( tmp.sigl | (tmp.sigh & 0x000000ff) )
--	    {
--	      unsigned long sigh = tmp.sigh;
--	      unsigned long sigl = tmp.sigl;
--	      
--	      precision_loss = 1;
--	      switch (control_word & CW_RC)
--		{
--		case RC_RND:
--		  increment = ((sigh & 0xff) > 0x80)       /* more than half */
--		    || (((sigh & 0xff) == 0x80) && sigl)   /* more than half */
--		    || ((sigh & 0x180) == 0x180);        /* round to even */
--		  break;
--		case RC_DOWN:   /* towards -infinity */
--		  increment = signpositive(&tmp)
--		    ? 0 : (sigl | (sigh & 0xff));
--		  break;
--		case RC_UP:     /* towards +infinity */
--		  increment = signpositive(&tmp)
--		    ? (sigl | (sigh & 0xff)) : 0;
--		  break;
--		case RC_CHOP:
--		  increment = 0;
--		  break;
--		}
--	  
--	      /* Truncate part of the mantissa */
--	      tmp.sigl = 0;
--	  
--	      if (increment)
--		{
--		  if ( sigh >= 0xffffff00 )
--		    {
--		      /* The sigh part overflows */
--		      tmp.sigh = 0x80000000;
--		      exp++;
--		      if ( exp >= EXP_OVER )
--			goto overflow;
--		    }
--		  else
--		    {
--		      tmp.sigh &= 0xffffff00;
--		      tmp.sigh += 0x100;
--		    }
--		}
--	      else
--		{
--		  tmp.sigh &= 0xffffff00;  /* Finish the truncation */
--		}
--	    }
--	  else
--	    precision_loss = 0;
--      
--	  templ = (tmp.sigh >> 8) & 0x007fffff;
--
--	  if ( exp > SINGLE_Emax )
--	    {
--	    overflow:
--	      EXCEPTION(EX_Overflow);
--	      if ( !(control_word & CW_Overflow) )
--		return 0;
--	      set_precision_flag_up();
--	      if ( !(control_word & CW_Precision) )
--		return 0;
--
--	      /* This is a special case: see sec 16.2.5.1 of the 80486 book. */
--	      /* Masked response is overflow to infinity. */
--	      templ = 0x7f800000;
--	    }
--	  else
--	    {
--	      if ( precision_loss )
--		{
--		  if ( increment )
--		    set_precision_flag_up();
--		  else
--		    set_precision_flag_down();
-+				{
-+					EXCEPTION(EX_Underflow);
-+					/* This is a special case: see sec 16.2.5.1 of
-+					   the 80486 book */
-+					if (!(control_word & CW_Underflow))
-+						return 0;
-+				}
-+				EXCEPTION(precision_loss);
-+				if (!(control_word & CW_Precision))
-+					return 0;
-+			}
-+			templ = tmp.sigl;
-+		} else {
-+			if (tmp.sigl | (tmp.sigh & 0x000000ff)) {
-+				unsigned long sigh = tmp.sigh;
-+				unsigned long sigl = tmp.sigl;
++	if (error) {
++		if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
++			printk("end_request: I/O error, dev %s, sector %llu\n",
++				req->rq_disk ? req->rq_disk->disk_name : "?",
++				(unsigned long long)req->sector);
++	}
 +
-+				precision_loss = 1;
-+				switch (control_word & CW_RC) {
-+				case RC_RND:
-+					increment = ((sigh & 0xff) > 0x80)	/* more than half */
-+					    ||(((sigh & 0xff) == 0x80) && sigl)	/* more than half */
-+					    ||((sigh & 0x180) == 0x180);	/* round to even */
-+					break;
-+				case RC_DOWN:	/* towards -infinity */
-+					increment = signpositive(&tmp)
-+					    ? 0 : (sigl | (sigh & 0xff));
-+					break;
-+				case RC_UP:	/* towards +infinity */
-+					increment = signpositive(&tmp)
-+					    ? (sigl | (sigh & 0xff)) : 0;
-+					break;
-+				case RC_CHOP:
-+					increment = 0;
-+					break;
-+				}
++	if (blk_fs_request(req) && req->rq_disk) {
++		const int rw = rq_data_dir(req);
 +
-+				/* Truncate part of the mantissa */
-+				tmp.sigl = 0;
++		disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
++	}
 +
-+				if (increment) {
-+					if (sigh >= 0xffffff00) {
-+						/* The sigh part overflows */
-+						tmp.sigh = 0x80000000;
-+						exp++;
-+						if (exp >= EXP_OVER)
-+							goto overflow;
-+					} else {
-+						tmp.sigh &= 0xffffff00;
-+						tmp.sigh += 0x100;
-+					}
-+				} else {
-+					tmp.sigh &= 0xffffff00;	/* Finish the truncation */
-+				}
-+			} else
-+				precision_loss = 0;
++	total_bytes = bio_nbytes = 0;
++	while ((bio = req->bio) != NULL) {
++		int nbytes;
 +
-+			templ = (tmp.sigh >> 8) & 0x007fffff;
++		/*
++		 * For an empty barrier request, the low level driver must
++		 * store a potential error location in ->sector. We pass
++		 * that back up in ->bi_sector.
++		 */
++		if (blk_empty_barrier(req))
++			bio->bi_sector = req->sector;
 +
-+			if (exp > SINGLE_Emax) {
-+			      overflow:
-+				EXCEPTION(EX_Overflow);
-+				if (!(control_word & CW_Overflow))
-+					return 0;
-+				set_precision_flag_up();
-+				if (!(control_word & CW_Precision))
-+					return 0;
++		if (nr_bytes >= bio->bi_size) {
++			req->bio = bio->bi_next;
++			nbytes = bio->bi_size;
++			req_bio_endio(req, bio, nbytes, error);
++			next_idx = 0;
++			bio_nbytes = 0;
++		} else {
++			int idx = bio->bi_idx + next_idx;
 +
-+				/* This is a special case: see sec 16.2.5.1 of the 80486 book. */
-+				/* Masked response is overflow to infinity. */
-+				templ = 0x7f800000;
-+			} else {
-+				if (precision_loss) {
-+					if (increment)
-+						set_precision_flag_up();
-+					else
-+						set_precision_flag_down();
-+				}
-+				/* Add the exponent */
-+				templ |= ((exp + SINGLE_Ebias) & 0xff) << 23;
++			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
++				blk_dump_rq_flags(req, "__end_that");
++				printk("%s: bio idx %d >= vcnt %d\n",
++						__FUNCTION__,
++						bio->bi_idx, bio->bi_vcnt);
++				break;
 +			}
- 		}
--	      /* Add the exponent */
--	      templ |= ((exp+SINGLE_Ebias) & 0xff) << 23;
--	    }
--	}
--    }
--  else if (st0_tag == TAG_Zero)
--    {
--      templ = 0;
--    }
--  else if ( st0_tag == TAG_Special )
--    {
--      st0_tag = FPU_Special(st0_ptr);
--      if (st0_tag == TW_Denormal)
--	{
--	  reg_copy(st0_ptr, &tmp);
--
--	  /* A denormal will always underflow. */
-+	} else if (st0_tag == TAG_Zero) {
-+		templ = 0;
-+	} else if (st0_tag == TAG_Special) {
-+		st0_tag = FPU_Special(st0_ptr);
-+		if (st0_tag == TW_Denormal) {
-+			reg_copy(st0_ptr, &tmp);
 +
-+			/* A denormal will always underflow. */
- #ifndef PECULIAR_486
--	  /* An 80486 is supposed to be able to generate
--	     a denormal exception here, but... */
--	  /* Underflow has priority. */
--	  if ( control_word & CW_Underflow )
--	    denormal_operand();
--#endif /* PECULIAR_486 */ 
--	  goto denormal_arg;
--	}
--      else if (st0_tag == TW_Infinity)
--	{
--	  templ = 0x7f800000;
--	}
--      else if (st0_tag == TW_NaN)
--	{
--	  /* Is it really a NaN ? */
--	  if ( (exponent(st0_ptr) == EXP_OVER) && (st0_ptr->sigh & 0x80000000) )
--	    {
--	      /* See if we can get a valid NaN from the FPU_REG */
--	      templ = st0_ptr->sigh >> 8;
--	      if ( !(st0_ptr->sigh & 0x40000000) )
--		{
--		  /* It is a signalling NaN */
--		  EXCEPTION(EX_Invalid);
--		  if ( !(control_word & CW_Invalid) )
--		    return 0;
--		  templ |= (0x40000000 >> 8);
-+			/* An 80486 is supposed to be able to generate
-+			   a denormal exception here, but... */
-+			/* Underflow has priority. */
-+			if (control_word & CW_Underflow)
-+				denormal_operand();
-+#endif /* PECULIAR_486 */
-+			goto denormal_arg;
-+		} else if (st0_tag == TW_Infinity) {
-+			templ = 0x7f800000;
-+		} else if (st0_tag == TW_NaN) {
-+			/* Is it really a NaN ? */
-+			if ((exponent(st0_ptr) == EXP_OVER)
-+			    && (st0_ptr->sigh & 0x80000000)) {
-+				/* See if we can get a valid NaN from the FPU_REG */
-+				templ = st0_ptr->sigh >> 8;
-+				if (!(st0_ptr->sigh & 0x40000000)) {
-+					/* It is a signalling NaN */
-+					EXCEPTION(EX_Invalid);
-+					if (!(control_word & CW_Invalid))
-+						return 0;
-+					templ |= (0x40000000 >> 8);
-+				}
-+				templ |= 0x7f800000;
-+			} else {
-+				/* It is an unsupported data type */
-+				EXCEPTION(EX_Invalid);
-+				if (!(control_word & CW_Invalid))
-+					return 0;
-+				templ = 0xffc00000;
++			nbytes = bio_iovec_idx(bio, idx)->bv_len;
++			BIO_BUG_ON(nbytes > bio->bi_size);
++
++			/*
++			 * not a complete bvec done
++			 */
++			if (unlikely(nbytes > nr_bytes)) {
++				bio_nbytes += nr_bytes;
++				total_bytes += nr_bytes;
++				break;
 +			}
- 		}
--	      templ |= 0x7f800000;
--	    }
--	  else
--	    {
--	      /* It is an unsupported data type */
--	      EXCEPTION(EX_Invalid);
--	      if ( !(control_word & CW_Invalid) )
--		return 0;
--	      templ = 0xffc00000;
--	    }
--	}
- #ifdef PARANOID
--      else
--	{
--	  EXCEPTION(EX_INTERNAL|0x164);
--	  return 0;
--	}
-+		else {
-+			EXCEPTION(EX_INTERNAL | 0x164);
-+			return 0;
-+		}
- #endif
--    }
--  else if ( st0_tag == TAG_Empty )
--    {
--      /* Empty register (stack underflow) */
--      EXCEPTION(EX_StackUnder);
--      if ( control_word & EX_Invalid )
--	{
--	  /* The masked response */
--	  /* Put out the QNaN indefinite */
--	  RE_ENTRANT_CHECK_OFF;
--	  FPU_access_ok(VERIFY_WRITE,single,4);
--	  FPU_put_user(0xffc00000, (unsigned long __user *) single);
--	  RE_ENTRANT_CHECK_ON;
--	  return 1;
-+	} else if (st0_tag == TAG_Empty) {
-+		/* Empty register (stack underflow) */
-+		EXCEPTION(EX_StackUnder);
-+		if (control_word & EX_Invalid) {
-+			/* The masked response */
-+			/* Put out the QNaN indefinite */
-+			RE_ENTRANT_CHECK_OFF;
-+			FPU_access_ok(VERIFY_WRITE, single, 4);
-+			FPU_put_user(0xffc00000,
-+				     (unsigned long __user *)single);
-+			RE_ENTRANT_CHECK_ON;
-+			return 1;
-+		} else
-+			return 0;
- 	}
--      else
--	return 0;
--    }
- #ifdef PARANOID
--  else
--    {
--      EXCEPTION(EX_INTERNAL|0x163);
--      return 0;
--    }
-+	else {
-+		EXCEPTION(EX_INTERNAL | 0x163);
-+		return 0;
-+	}
- #endif
--  if ( getsign(st0_ptr) )
--    templ |= 0x80000000;
-+	if (getsign(st0_ptr))
-+		templ |= 0x80000000;
- 
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_WRITE,single,4);
--  FPU_put_user(templ,(unsigned long __user *) single);
--  RE_ENTRANT_CHECK_ON;
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_WRITE, single, 4);
-+	FPU_put_user(templ, (unsigned long __user *)single);
-+	RE_ENTRANT_CHECK_ON;
- 
--  return 1;
-+	return 1;
- }
- 
--
- /* Put a long long into user memory */
- int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d)
- {
--  FPU_REG t;
--  long long tll;
--  int precision_loss;
--
--  if ( st0_tag == TAG_Empty )
--    {
--      /* Empty register (stack underflow) */
--      EXCEPTION(EX_StackUnder);
--      goto invalid_operand;
--    }
--  else if ( st0_tag == TAG_Special )
--    {
--      st0_tag = FPU_Special(st0_ptr);
--      if ( (st0_tag == TW_Infinity) ||
--	   (st0_tag == TW_NaN) )
--	{
--	  EXCEPTION(EX_Invalid);
--	  goto invalid_operand;
-+	FPU_REG t;
-+	long long tll;
-+	int precision_loss;
 +
-+	if (st0_tag == TAG_Empty) {
-+		/* Empty register (stack underflow) */
-+		EXCEPTION(EX_StackUnder);
-+		goto invalid_operand;
-+	} else if (st0_tag == TAG_Special) {
-+		st0_tag = FPU_Special(st0_ptr);
-+		if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
-+			EXCEPTION(EX_Invalid);
-+			goto invalid_operand;
++			/*
++			 * advance to the next vector
++			 */
++			next_idx++;
++			bio_nbytes += nbytes;
 +		}
- 	}
--    }
--
--  reg_copy(st0_ptr, &t);
--  precision_loss = FPU_round_to_int(&t, st0_tag);
--  ((long *)&tll)[0] = t.sigl;
--  ((long *)&tll)[1] = t.sigh;
--  if ( (precision_loss == 1) ||
--      ((t.sigh & 0x80000000) &&
--       !((t.sigh == 0x80000000) && (t.sigl == 0) &&
--	 signnegative(&t))) )
--    {
--      EXCEPTION(EX_Invalid);
--      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
--    invalid_operand:
--      if ( control_word & EX_Invalid )
--	{
--	  /* Produce something like QNaN "indefinite" */
--	  tll = 0x8000000000000000LL;
-+
-+	reg_copy(st0_ptr, &t);
-+	precision_loss = FPU_round_to_int(&t, st0_tag);
-+	((long *)&tll)[0] = t.sigl;
-+	((long *)&tll)[1] = t.sigh;
-+	if ((precision_loss == 1) ||
-+	    ((t.sigh & 0x80000000) &&
-+	     !((t.sigh == 0x80000000) && (t.sigl == 0) && signnegative(&t)))) {
-+		EXCEPTION(EX_Invalid);
-+		/* This is a special case: see sec 16.2.5.1 of the 80486 book */
-+	      invalid_operand:
-+		if (control_word & EX_Invalid) {
-+			/* Produce something like QNaN "indefinite" */
-+			tll = 0x8000000000000000LL;
-+		} else
-+			return 0;
-+	} else {
-+		if (precision_loss)
-+			set_precision_flag(precision_loss);
-+		if (signnegative(&t))
-+			tll = -tll;
- 	}
--      else
--	return 0;
--    }
--  else
--    {
--      if ( precision_loss )
--	set_precision_flag(precision_loss);
--      if ( signnegative(&t) )
--	tll = - tll;
--    }
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_WRITE,d,8);
--  if (copy_to_user(d, &tll, 8))
--    FPU_abort;
--  RE_ENTRANT_CHECK_ON;
--
--  return 1;
--}
- 
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_WRITE, d, 8);
-+	if (copy_to_user(d, &tll, 8))
-+		FPU_abort;
-+	RE_ENTRANT_CHECK_ON;
 +
-+	return 1;
-+}
- 
- /* Put a long into user memory */
- int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d)
- {
--  FPU_REG t;
--  int precision_loss;
--
--  if ( st0_tag == TAG_Empty )
--    {
--      /* Empty register (stack underflow) */
--      EXCEPTION(EX_StackUnder);
--      goto invalid_operand;
--    }
--  else if ( st0_tag == TAG_Special )
--    {
--      st0_tag = FPU_Special(st0_ptr);
--      if ( (st0_tag == TW_Infinity) ||
--	   (st0_tag == TW_NaN) )
--	{
--	  EXCEPTION(EX_Invalid);
--	  goto invalid_operand;
-+	FPU_REG t;
-+	int precision_loss;
++		total_bytes += nbytes;
++		nr_bytes -= nbytes;
 +
-+	if (st0_tag == TAG_Empty) {
-+		/* Empty register (stack underflow) */
-+		EXCEPTION(EX_StackUnder);
-+		goto invalid_operand;
-+	} else if (st0_tag == TAG_Special) {
-+		st0_tag = FPU_Special(st0_ptr);
-+		if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
-+			EXCEPTION(EX_Invalid);
-+			goto invalid_operand;
++		if ((bio = req->bio)) {
++			/*
++			 * end more in this run, or just return 'not-done'
++			 */
++			if (unlikely(nr_bytes <= 0))
++				break;
 +		}
- 	}
--    }
--
--  reg_copy(st0_ptr, &t);
--  precision_loss = FPU_round_to_int(&t, st0_tag);
--  if (t.sigh ||
--      ((t.sigl & 0x80000000) &&
--       !((t.sigl == 0x80000000) && signnegative(&t))) )
--    {
--      EXCEPTION(EX_Invalid);
--      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
--    invalid_operand:
--      if ( control_word & EX_Invalid )
--	{
--	  /* Produce something like QNaN "indefinite" */
--	  t.sigl = 0x80000000;
++	}
 +
-+	reg_copy(st0_ptr, &t);
-+	precision_loss = FPU_round_to_int(&t, st0_tag);
-+	if (t.sigh ||
-+	    ((t.sigl & 0x80000000) &&
-+	     !((t.sigl == 0x80000000) && signnegative(&t)))) {
-+		EXCEPTION(EX_Invalid);
-+		/* This is a special case: see sec 16.2.5.1 of the 80486 book */
-+	      invalid_operand:
-+		if (control_word & EX_Invalid) {
-+			/* Produce something like QNaN "indefinite" */
-+			t.sigl = 0x80000000;
-+		} else
-+			return 0;
-+	} else {
-+		if (precision_loss)
-+			set_precision_flag(precision_loss);
-+		if (signnegative(&t))
-+			t.sigl = -(long)t.sigl;
- 	}
--      else
--	return 0;
--    }
--  else
--    {
--      if ( precision_loss )
--	set_precision_flag(precision_loss);
--      if ( signnegative(&t) )
--	t.sigl = -(long)t.sigl;
--    }
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_WRITE,d,4);
--  FPU_put_user(t.sigl, (unsigned long __user *) d);
--  RE_ENTRANT_CHECK_ON;
--
--  return 1;
--}
- 
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_WRITE, d, 4);
-+	FPU_put_user(t.sigl, (unsigned long __user *)d);
-+	RE_ENTRANT_CHECK_ON;
++	/*
++	 * completely done
++	 */
++	if (!req->bio)
++		return 0;
++
++	/*
++	 * if the request wasn't completed, update state
++	 */
++	if (bio_nbytes) {
++		req_bio_endio(req, bio, bio_nbytes, error);
++		bio->bi_idx += next_idx;
++		bio_iovec(bio)->bv_offset += nr_bytes;
++		bio_iovec(bio)->bv_len -= nr_bytes;
++	}
 +
++	blk_recalc_rq_sectors(req, total_bytes >> 9);
++	blk_recalc_rq_segments(req);
 +	return 1;
 +}
- 
- /* Put a short into user memory */
- int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d)
- {
--  FPU_REG t;
--  int precision_loss;
--
--  if ( st0_tag == TAG_Empty )
--    {
--      /* Empty register (stack underflow) */
--      EXCEPTION(EX_StackUnder);
--      goto invalid_operand;
--    }
--  else if ( st0_tag == TAG_Special )
--    {
--      st0_tag = FPU_Special(st0_ptr);
--      if ( (st0_tag == TW_Infinity) ||
--	   (st0_tag == TW_NaN) )
--	{
--	  EXCEPTION(EX_Invalid);
--	  goto invalid_operand;
-+	FPU_REG t;
-+	int precision_loss;
 +
-+	if (st0_tag == TAG_Empty) {
-+		/* Empty register (stack underflow) */
-+		EXCEPTION(EX_StackUnder);
-+		goto invalid_operand;
-+	} else if (st0_tag == TAG_Special) {
-+		st0_tag = FPU_Special(st0_ptr);
-+		if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
-+			EXCEPTION(EX_Invalid);
-+			goto invalid_operand;
-+		}
- 	}
--    }
--
--  reg_copy(st0_ptr, &t);
--  precision_loss = FPU_round_to_int(&t, st0_tag);
--  if (t.sigh ||
--      ((t.sigl & 0xffff8000) &&
--       !((t.sigl == 0x8000) && signnegative(&t))) )
--    {
--      EXCEPTION(EX_Invalid);
--      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
--    invalid_operand:
--      if ( control_word & EX_Invalid )
--	{
--	  /* Produce something like QNaN "indefinite" */
--	  t.sigl = 0x8000;
++/*
++ * splice the completion data to a local structure and hand off to
++ * process_completion_queue() to complete the requests
++ */
++static void blk_done_softirq(struct softirq_action *h)
++{
++	struct list_head *cpu_list, local_list;
 +
-+	reg_copy(st0_ptr, &t);
-+	precision_loss = FPU_round_to_int(&t, st0_tag);
-+	if (t.sigh ||
-+	    ((t.sigl & 0xffff8000) &&
-+	     !((t.sigl == 0x8000) && signnegative(&t)))) {
-+		EXCEPTION(EX_Invalid);
-+		/* This is a special case: see sec 16.2.5.1 of the 80486 book */
-+	      invalid_operand:
-+		if (control_word & EX_Invalid) {
-+			/* Produce something like QNaN "indefinite" */
-+			t.sigl = 0x8000;
-+		} else
-+			return 0;
-+	} else {
-+		if (precision_loss)
-+			set_precision_flag(precision_loss);
-+		if (signnegative(&t))
-+			t.sigl = -t.sigl;
- 	}
--      else
--	return 0;
--    }
--  else
--    {
--      if ( precision_loss )
--	set_precision_flag(precision_loss);
--      if ( signnegative(&t) )
--	t.sigl = -t.sigl;
--    }
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_WRITE,d,2);
--  FPU_put_user((short)t.sigl, d);
--  RE_ENTRANT_CHECK_ON;
--
--  return 1;
--}
- 
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_WRITE, d, 2);
-+	FPU_put_user((short)t.sigl, d);
-+	RE_ENTRANT_CHECK_ON;
++	local_irq_disable();
++	cpu_list = &__get_cpu_var(blk_cpu_done);
++	list_replace_init(cpu_list, &local_list);
++	local_irq_enable();
 +
-+	return 1;
++	while (!list_empty(&local_list)) {
++		struct request *rq = list_entry(local_list.next, struct request, donelist);
++
++		list_del_init(&rq->donelist);
++		rq->q->softirq_done_fn(rq);
++	}
 +}
- 
- /* Put a packed bcd array into user memory */
- int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
- {
--  FPU_REG t;
--  unsigned long long ll;
--  u_char b;
--  int i, precision_loss;
--  u_char sign = (getsign(st0_ptr) == SIGN_NEG) ? 0x80 : 0;
--
--  if ( st0_tag == TAG_Empty )
--    {
--      /* Empty register (stack underflow) */
--      EXCEPTION(EX_StackUnder);
--      goto invalid_operand;
--    }
--  else if ( st0_tag == TAG_Special )
--    {
--      st0_tag = FPU_Special(st0_ptr);
--      if ( (st0_tag == TW_Infinity) ||
--	   (st0_tag == TW_NaN) )
--	{
--	  EXCEPTION(EX_Invalid);
--	  goto invalid_operand;
-+	FPU_REG t;
-+	unsigned long long ll;
-+	u_char b;
-+	int i, precision_loss;
-+	u_char sign = (getsign(st0_ptr) == SIGN_NEG) ? 0x80 : 0;
 +
-+	if (st0_tag == TAG_Empty) {
-+		/* Empty register (stack underflow) */
-+		EXCEPTION(EX_StackUnder);
-+		goto invalid_operand;
-+	} else if (st0_tag == TAG_Special) {
-+		st0_tag = FPU_Special(st0_ptr);
-+		if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
-+			EXCEPTION(EX_Invalid);
-+			goto invalid_operand;
-+		}
++static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
++			  void *hcpu)
++{
++	/*
++	 * If a CPU goes away, splice its entries to the current CPU
++	 * and trigger a run of the softirq
++	 */
++	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
++		int cpu = (unsigned long) hcpu;
++
++		local_irq_disable();
++		list_splice_init(&per_cpu(blk_cpu_done, cpu),
++				 &__get_cpu_var(blk_cpu_done));
++		raise_softirq_irqoff(BLOCK_SOFTIRQ);
++		local_irq_enable();
 +	}
 +
-+	reg_copy(st0_ptr, &t);
-+	precision_loss = FPU_round_to_int(&t, st0_tag);
-+	ll = significand(&t);
++	return NOTIFY_OK;
++}
 +
-+	/* Check for overflow, by comparing with 999999999999999999 decimal. */
-+	if ((t.sigh > 0x0de0b6b3) ||
-+	    ((t.sigh == 0x0de0b6b3) && (t.sigl > 0xa763ffff))) {
-+		EXCEPTION(EX_Invalid);
-+		/* This is a special case: see sec 16.2.5.1 of the 80486 book */
-+	      invalid_operand:
-+		if (control_word & CW_Invalid) {
-+			/* Produce the QNaN "indefinite" */
-+			RE_ENTRANT_CHECK_OFF;
-+			FPU_access_ok(VERIFY_WRITE, d, 10);
-+			for (i = 0; i < 7; i++)
-+				FPU_put_user(0, d + i);	/* These bytes "undefined" */
-+			FPU_put_user(0xc0, d + 7);	/* This byte "undefined" */
-+			FPU_put_user(0xff, d + 8);
-+			FPU_put_user(0xff, d + 9);
-+			RE_ENTRANT_CHECK_ON;
-+			return 1;
-+		} else
-+			return 0;
-+	} else if (precision_loss) {
-+		/* Precision loss doesn't stop the data transfer */
-+		set_precision_flag(precision_loss);
- 	}
--    }
--
--  reg_copy(st0_ptr, &t);
--  precision_loss = FPU_round_to_int(&t, st0_tag);
--  ll = significand(&t);
--
--  /* Check for overflow, by comparing with 999999999999999999 decimal. */
--  if ( (t.sigh > 0x0de0b6b3) ||
--      ((t.sigh == 0x0de0b6b3) && (t.sigl > 0xa763ffff)) )
--    {
--      EXCEPTION(EX_Invalid);
--      /* This is a special case: see sec 16.2.5.1 of the 80486 book */
--    invalid_operand:
--      if ( control_word & CW_Invalid )
--	{
--	  /* Produce the QNaN "indefinite" */
--	  RE_ENTRANT_CHECK_OFF;
--	  FPU_access_ok(VERIFY_WRITE,d,10);
--	  for ( i = 0; i < 7; i++)
--	    FPU_put_user(0, d+i); /* These bytes "undefined" */
--	  FPU_put_user(0xc0, d+7); /* This byte "undefined" */
--	  FPU_put_user(0xff, d+8);
--	  FPU_put_user(0xff, d+9);
--	  RE_ENTRANT_CHECK_ON;
--	  return 1;
 +
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_WRITE, d, 10);
-+	RE_ENTRANT_CHECK_ON;
-+	for (i = 0; i < 9; i++) {
-+		b = FPU_div_small(&ll, 10);
-+		b |= (FPU_div_small(&ll, 10)) << 4;
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_put_user(b, d + i);
-+		RE_ENTRANT_CHECK_ON;
- 	}
--      else
--	return 0;
--    }
--  else if ( precision_loss )
--    {
--      /* Precision loss doesn't stop the data transfer */
--      set_precision_flag(precision_loss);
--    }
--
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_WRITE,d,10);
--  RE_ENTRANT_CHECK_ON;
--  for ( i = 0; i < 9; i++)
--    {
--      b = FPU_div_small(&ll, 10);
--      b |= (FPU_div_small(&ll, 10)) << 4;
--      RE_ENTRANT_CHECK_OFF;
--      FPU_put_user(b, d+i);
--      RE_ENTRANT_CHECK_ON;
--    }
--  RE_ENTRANT_CHECK_OFF;
--  FPU_put_user(sign, d+9);
--  RE_ENTRANT_CHECK_ON;
--
--  return 1;
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_put_user(sign, d + 9);
-+	RE_ENTRANT_CHECK_ON;
++static struct notifier_block blk_cpu_notifier __cpuinitdata = {
++	.notifier_call	= blk_cpu_notify,
++};
 +
-+	return 1;
- }
- 
- /*===========================================================================*/
-@@ -1119,59 +973,56 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
-    largest possible value */
- int FPU_round_to_int(FPU_REG *r, u_char tag)
- {
--  u_char     very_big;
--  unsigned eax;
--
--  if (tag == TAG_Zero)
--    {
--      /* Make sure that zero is returned */
--      significand(r) = 0;
--      return 0;        /* o.k. */
--    }
--
--  if (exponent(r) > 63)
--    {
--      r->sigl = r->sigh = ~0;      /* The largest representable number */
--      return 1;        /* overflow */
--    }
--
--  eax = FPU_shrxs(&r->sigl, 63 - exponent(r));
--  very_big = !(~(r->sigh) | ~(r->sigl));  /* test for 0xfff...fff */
-+	u_char very_big;
-+	unsigned eax;
++/**
++ * blk_complete_request - end I/O on a request
++ * @req:      the request being processed
++ *
++ * Description:
++ *     Ends all I/O on a request. It does not handle partial completions,
++ *     unless the driver actually implements this in its completion callback
++ *     through requeueing. The actual completion happens out-of-order,
++ *     through a softirq handler. The user must have registered a completion
++ *     callback through blk_queue_softirq_done().
++ **/
 +
-+	if (tag == TAG_Zero) {
-+		/* Make sure that zero is returned */
-+		significand(r) = 0;
-+		return 0;	/* o.k. */
-+	}
++void blk_complete_request(struct request *req)
++{
++	struct list_head *cpu_list;
++	unsigned long flags;
 +
-+	if (exponent(r) > 63) {
-+		r->sigl = r->sigh = ~0;	/* The largest representable number */
-+		return 1;	/* overflow */
-+	}
++	BUG_ON(!req->q->softirq_done_fn);
++		
++	local_irq_save(flags);
 +
-+	eax = FPU_shrxs(&r->sigl, 63 - exponent(r));
-+	very_big = !(~(r->sigh) | ~(r->sigl));	/* test for 0xfff...fff */
- #define	half_or_more	(eax & 0x80000000)
- #define	frac_part	(eax)
- #define more_than_half  ((eax & 0x80000001) == 0x80000001)
--  switch (control_word & CW_RC)
--    {
--    case RC_RND:
--      if ( more_than_half               	/* nearest */
--	  || (half_or_more && (r->sigl & 1)) )	/* odd -> even */
--	{
--	  if ( very_big ) return 1;        /* overflow */
--	  significand(r) ++;
--	  return PRECISION_LOST_UP;
--	}
--      break;
--    case RC_DOWN:
--      if (frac_part && getsign(r))
--	{
--	  if ( very_big ) return 1;        /* overflow */
--	  significand(r) ++;
--	  return PRECISION_LOST_UP;
--	}
--      break;
--    case RC_UP:
--      if (frac_part && !getsign(r))
--	{
--	  if ( very_big ) return 1;        /* overflow */
--	  significand(r) ++;
--	  return PRECISION_LOST_UP;
-+	switch (control_word & CW_RC) {
-+	case RC_RND:
-+		if (more_than_half	/* nearest */
-+		    || (half_or_more && (r->sigl & 1))) {	/* odd -> even */
-+			if (very_big)
-+				return 1;	/* overflow */
-+			significand(r)++;
-+			return PRECISION_LOST_UP;
-+		}
-+		break;
-+	case RC_DOWN:
-+		if (frac_part && getsign(r)) {
-+			if (very_big)
-+				return 1;	/* overflow */
-+			significand(r)++;
-+			return PRECISION_LOST_UP;
-+		}
-+		break;
-+	case RC_UP:
-+		if (frac_part && !getsign(r)) {
-+			if (very_big)
-+				return 1;	/* overflow */
-+			significand(r)++;
-+			return PRECISION_LOST_UP;
-+		}
-+		break;
-+	case RC_CHOP:
-+		break;
- 	}
--      break;
--    case RC_CHOP:
--      break;
--    }
- 
--  return eax ? PRECISION_LOST_DOWN : 0;
-+	return eax ? PRECISION_LOST_DOWN : 0;
- 
- }
- 
-@@ -1179,197 +1030,195 @@ int FPU_round_to_int(FPU_REG *r, u_char tag)
- 
- u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s)
- {
--  unsigned short tag_word = 0;
--  u_char tag;
--  int i;
--
--  if ( (addr_modes.default_mode == VM86) ||
--      ((addr_modes.default_mode == PM16)
--      ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX)) )
--    {
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_READ, s, 0x0e);
--      FPU_get_user(control_word, (unsigned short __user *) s);
--      FPU_get_user(partial_status, (unsigned short __user *) (s+2));
--      FPU_get_user(tag_word, (unsigned short __user *) (s+4));
--      FPU_get_user(instruction_address.offset, (unsigned short __user *) (s+6));
--      FPU_get_user(instruction_address.selector, (unsigned short __user *) (s+8));
--      FPU_get_user(operand_address.offset, (unsigned short __user *) (s+0x0a));
--      FPU_get_user(operand_address.selector, (unsigned short __user *) (s+0x0c));
--      RE_ENTRANT_CHECK_ON;
--      s += 0x0e;
--      if ( addr_modes.default_mode == VM86 )
--	{
--	  instruction_address.offset
--	    += (instruction_address.selector & 0xf000) << 4;
--	  operand_address.offset += (operand_address.selector & 0xf000) << 4;
-+	unsigned short tag_word = 0;
-+	u_char tag;
-+	int i;
++	cpu_list = &__get_cpu_var(blk_cpu_done);
++	list_add_tail(&req->donelist, cpu_list);
++	raise_softirq_irqoff(BLOCK_SOFTIRQ);
 +
-+	if ((addr_modes.default_mode == VM86) ||
-+	    ((addr_modes.default_mode == PM16)
-+	     ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) {
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_READ, s, 0x0e);
-+		FPU_get_user(control_word, (unsigned short __user *)s);
-+		FPU_get_user(partial_status, (unsigned short __user *)(s + 2));
-+		FPU_get_user(tag_word, (unsigned short __user *)(s + 4));
-+		FPU_get_user(instruction_address.offset,
-+			     (unsigned short __user *)(s + 6));
-+		FPU_get_user(instruction_address.selector,
-+			     (unsigned short __user *)(s + 8));
-+		FPU_get_user(operand_address.offset,
-+			     (unsigned short __user *)(s + 0x0a));
-+		FPU_get_user(operand_address.selector,
-+			     (unsigned short __user *)(s + 0x0c));
-+		RE_ENTRANT_CHECK_ON;
-+		s += 0x0e;
-+		if (addr_modes.default_mode == VM86) {
-+			instruction_address.offset
-+			    += (instruction_address.selector & 0xf000) << 4;
-+			operand_address.offset +=
-+			    (operand_address.selector & 0xf000) << 4;
-+		}
-+	} else {
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_READ, s, 0x1c);
-+		FPU_get_user(control_word, (unsigned short __user *)s);
-+		FPU_get_user(partial_status, (unsigned short __user *)(s + 4));
-+		FPU_get_user(tag_word, (unsigned short __user *)(s + 8));
-+		FPU_get_user(instruction_address.offset,
-+			     (unsigned long __user *)(s + 0x0c));
-+		FPU_get_user(instruction_address.selector,
-+			     (unsigned short __user *)(s + 0x10));
-+		FPU_get_user(instruction_address.opcode,
-+			     (unsigned short __user *)(s + 0x12));
-+		FPU_get_user(operand_address.offset,
-+			     (unsigned long __user *)(s + 0x14));
-+		FPU_get_user(operand_address.selector,
-+			     (unsigned long __user *)(s + 0x18));
-+		RE_ENTRANT_CHECK_ON;
-+		s += 0x1c;
- 	}
--    }
--  else
--    {
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_READ, s, 0x1c);
--      FPU_get_user(control_word, (unsigned short __user *) s);
--      FPU_get_user(partial_status, (unsigned short __user *) (s+4));
--      FPU_get_user(tag_word, (unsigned short __user *) (s+8));
--      FPU_get_user(instruction_address.offset, (unsigned long __user *) (s+0x0c));
--      FPU_get_user(instruction_address.selector, (unsigned short __user *) (s+0x10));
--      FPU_get_user(instruction_address.opcode, (unsigned short __user *) (s+0x12));
--      FPU_get_user(operand_address.offset, (unsigned long __user *) (s+0x14));
--      FPU_get_user(operand_address.selector, (unsigned long __user *) (s+0x18));
--      RE_ENTRANT_CHECK_ON;
--      s += 0x1c;
--    }
- 
- #ifdef PECULIAR_486
--  control_word &= ~0xe080;
--#endif /* PECULIAR_486 */ 
--
--  top = (partial_status >> SW_Top_Shift) & 7;
--
--  if ( partial_status & ~control_word & CW_Exceptions )
--    partial_status |= (SW_Summary | SW_Backward);
--  else
--    partial_status &= ~(SW_Summary | SW_Backward);
--
--  for ( i = 0; i < 8; i++ )
--    {
--      tag = tag_word & 3;
--      tag_word >>= 2;
--
--      if ( tag == TAG_Empty )
--	/* New tag is empty.  Accept it */
--	FPU_settag(i, TAG_Empty);
--      else if ( FPU_gettag(i) == TAG_Empty )
--	{
--	  /* Old tag is empty and new tag is not empty.  New tag is determined
--	     by old reg contents */
--	  if ( exponent(&fpu_register(i)) == - EXTENDED_Ebias )
--	    {
--	      if ( !(fpu_register(i).sigl | fpu_register(i).sigh) )
--		FPU_settag(i, TAG_Zero);
--	      else
--		FPU_settag(i, TAG_Special);
--	    }
--	  else if ( exponent(&fpu_register(i)) == 0x7fff - EXTENDED_Ebias )
--	    {
--	      FPU_settag(i, TAG_Special);
--	    }
--	  else if ( fpu_register(i).sigh & 0x80000000 )
--	    FPU_settag(i, TAG_Valid);
--	  else
--	    FPU_settag(i, TAG_Special);   /* An Un-normal */
--  	}
--      /* Else old tag is not empty and new tag is not empty.  Old tag
--	 remains correct */
--    }
--
--  return s;
--}
-+	control_word &= ~0xe080;
-+#endif /* PECULIAR_486 */
++	local_irq_restore(flags);
++}
++
++EXPORT_SYMBOL(blk_complete_request);
++	
++/*
++ * queue lock must be held
++ */
++static void end_that_request_last(struct request *req, int error)
++{
++	struct gendisk *disk = req->rq_disk;
 +
-+	top = (partial_status >> SW_Top_Shift) & 7;
++	if (blk_rq_tagged(req))
++		blk_queue_end_tag(req->q, req);
 +
-+	if (partial_status & ~control_word & CW_Exceptions)
-+		partial_status |= (SW_Summary | SW_Backward);
-+	else
-+		partial_status &= ~(SW_Summary | SW_Backward);
++	if (blk_queued_rq(req))
++		blkdev_dequeue_request(req);
 +
-+	for (i = 0; i < 8; i++) {
-+		tag = tag_word & 3;
-+		tag_word >>= 2;
++	if (unlikely(laptop_mode) && blk_fs_request(req))
++		laptop_io_completion();
 +
-+		if (tag == TAG_Empty)
-+			/* New tag is empty.  Accept it */
-+			FPU_settag(i, TAG_Empty);
-+		else if (FPU_gettag(i) == TAG_Empty) {
-+			/* Old tag is empty and new tag is not empty.  New tag is determined
-+			   by old reg contents */
-+			if (exponent(&fpu_register(i)) == -EXTENDED_Ebias) {
-+				if (!
-+				    (fpu_register(i).sigl | fpu_register(i).
-+				     sigh))
-+					FPU_settag(i, TAG_Zero);
-+				else
-+					FPU_settag(i, TAG_Special);
-+			} else if (exponent(&fpu_register(i)) ==
-+				   0x7fff - EXTENDED_Ebias) {
-+				FPU_settag(i, TAG_Special);
-+			} else if (fpu_register(i).sigh & 0x80000000)
-+				FPU_settag(i, TAG_Valid);
-+			else
-+				FPU_settag(i, TAG_Special);	/* An Un-normal */
-+		}
-+		/* Else old tag is not empty and new tag is not empty.  Old tag
-+		   remains correct */
++	/*
++	 * Account IO completion.  bar_rq isn't accounted as a normal
++	 * IO on queueing nor completion.  Accounting the containing
++	 * request is enough.
++	 */
++	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
++		unsigned long duration = jiffies - req->start_time;
++		const int rw = rq_data_dir(req);
++
++		__disk_stat_inc(disk, ios[rw]);
++		__disk_stat_add(disk, ticks[rw], duration);
++		disk_round_stats(disk);
++		disk->in_flight--;
 +	}
- 
-+	return s;
-+}
- 
- void frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
- {
--  int i, regnr;
--  u_char __user *s = fldenv(addr_modes, data_address);
--  int offset = (top & 7) * 10, other = 80 - offset;
--
--  /* Copy all registers in stack order. */
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_READ,s,80);
--  __copy_from_user(register_base+offset, s, other);
--  if ( offset )
--    __copy_from_user(register_base, s+other, offset);
--  RE_ENTRANT_CHECK_ON;
--
--  for ( i = 0; i < 8; i++ )
--    {
--      regnr = (i+top) & 7;
--      if ( FPU_gettag(regnr) != TAG_Empty )
--	/* The loaded data over-rides all other cases. */
--	FPU_settag(regnr, FPU_tagof(&st(i)));
--    }
-+	int i, regnr;
-+	u_char __user *s = fldenv(addr_modes, data_address);
-+	int offset = (top & 7) * 10, other = 80 - offset;
 +
-+	/* Copy all registers in stack order. */
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_READ, s, 80);
-+	__copy_from_user(register_base + offset, s, other);
-+	if (offset)
-+		__copy_from_user(register_base, s + other, offset);
-+	RE_ENTRANT_CHECK_ON;
++	if (req->end_io)
++		req->end_io(req, error);
++	else {
++		if (blk_bidi_rq(req))
++			__blk_put_request(req->next_rq->q, req->next_rq);
 +
-+	for (i = 0; i < 8; i++) {
-+		regnr = (i + top) & 7;
-+		if (FPU_gettag(regnr) != TAG_Empty)
-+			/* The loaded data over-rides all other cases. */
-+			FPU_settag(regnr, FPU_tagof(&st(i)));
-+	}
- 
- }
- 
--
- u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d)
- {
--  if ( (addr_modes.default_mode == VM86) ||
--      ((addr_modes.default_mode == PM16)
--      ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX)) )
--    {
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_WRITE,d,14);
-+	if ((addr_modes.default_mode == VM86) ||
-+	    ((addr_modes.default_mode == PM16)
-+	     ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) {
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_WRITE, d, 14);
- #ifdef PECULIAR_486
--      FPU_put_user(control_word & ~0xe080, (unsigned long __user *) d);
-+		FPU_put_user(control_word & ~0xe080, (unsigned long __user *)d);
- #else
--      FPU_put_user(control_word, (unsigned short __user *) d);
-+		FPU_put_user(control_word, (unsigned short __user *)d);
- #endif /* PECULIAR_486 */
--      FPU_put_user(status_word(), (unsigned short __user *) (d+2));
--      FPU_put_user(fpu_tag_word, (unsigned short __user *) (d+4));
--      FPU_put_user(instruction_address.offset, (unsigned short __user *) (d+6));
--      FPU_put_user(operand_address.offset, (unsigned short __user *) (d+0x0a));
--      if ( addr_modes.default_mode == VM86 )
--	{
--	  FPU_put_user((instruction_address.offset & 0xf0000) >> 4,
--		      (unsigned short __user *) (d+8));
--	  FPU_put_user((operand_address.offset & 0xf0000) >> 4,
--		      (unsigned short __user *) (d+0x0c));
--	}
--      else
--	{
--	  FPU_put_user(instruction_address.selector, (unsigned short __user *) (d+8));
--	  FPU_put_user(operand_address.selector, (unsigned short __user *) (d+0x0c));
--	}
--      RE_ENTRANT_CHECK_ON;
--      d += 0x0e;
--    }
--  else
--    {
--      RE_ENTRANT_CHECK_OFF;
--      FPU_access_ok(VERIFY_WRITE, d, 7*4);
-+		FPU_put_user(status_word(), (unsigned short __user *)(d + 2));
-+		FPU_put_user(fpu_tag_word, (unsigned short __user *)(d + 4));
-+		FPU_put_user(instruction_address.offset,
-+			     (unsigned short __user *)(d + 6));
-+		FPU_put_user(operand_address.offset,
-+			     (unsigned short __user *)(d + 0x0a));
-+		if (addr_modes.default_mode == VM86) {
-+			FPU_put_user((instruction_address.
-+				      offset & 0xf0000) >> 4,
-+				     (unsigned short __user *)(d + 8));
-+			FPU_put_user((operand_address.offset & 0xf0000) >> 4,
-+				     (unsigned short __user *)(d + 0x0c));
-+		} else {
-+			FPU_put_user(instruction_address.selector,
-+				     (unsigned short __user *)(d + 8));
-+			FPU_put_user(operand_address.selector,
-+				     (unsigned short __user *)(d + 0x0c));
-+		}
-+		RE_ENTRANT_CHECK_ON;
-+		d += 0x0e;
-+	} else {
-+		RE_ENTRANT_CHECK_OFF;
-+		FPU_access_ok(VERIFY_WRITE, d, 7 * 4);
- #ifdef PECULIAR_486
--      control_word &= ~0xe080;
--      /* An 80486 sets nearly all of the reserved bits to 1. */
--      control_word |= 0xffff0040;
--      partial_status = status_word() | 0xffff0000;
--      fpu_tag_word |= 0xffff0000;
--      I387.soft.fcs &= ~0xf8000000;
--      I387.soft.fos |= 0xffff0000;
-+		control_word &= ~0xe080;
-+		/* An 80486 sets nearly all of the reserved bits to 1. */
-+		control_word |= 0xffff0040;
-+		partial_status = status_word() | 0xffff0000;
-+		fpu_tag_word |= 0xffff0000;
-+		I387.soft.fcs &= ~0xf8000000;
-+		I387.soft.fos |= 0xffff0000;
- #endif /* PECULIAR_486 */
--      if (__copy_to_user(d, &control_word, 7*4))
--	FPU_abort;
--      RE_ENTRANT_CHECK_ON;
--      d += 0x1c;
--    }
--  
--  control_word |= CW_Exceptions;
--  partial_status &= ~(SW_Summary | SW_Backward);
--
--  return d;
--}
-+		if (__copy_to_user(d, &control_word, 7 * 4))
-+			FPU_abort;
-+		RE_ENTRANT_CHECK_ON;
-+		d += 0x1c;
++		__blk_put_request(req->q, req);
 +	}
- 
-+	control_word |= CW_Exceptions;
-+	partial_status &= ~(SW_Summary | SW_Backward);
++}
 +
-+	return d;
++static inline void __end_request(struct request *rq, int uptodate,
++				 unsigned int nr_bytes)
++{
++	int error = 0;
++
++	if (uptodate <= 0)
++		error = uptodate ? uptodate : -EIO;
++
++	__blk_end_request(rq, error, nr_bytes);
 +}
- 
- void fsave(fpu_addr_modes addr_modes, u_char __user *data_address)
- {
--  u_char __user *d;
--  int offset = (top & 7) * 10, other = 80 - offset;
-+	u_char __user *d;
-+	int offset = (top & 7) * 10, other = 80 - offset;
- 
--  d = fstenv(addr_modes, data_address);
-+	d = fstenv(addr_modes, data_address);
- 
--  RE_ENTRANT_CHECK_OFF;
--  FPU_access_ok(VERIFY_WRITE,d,80);
-+	RE_ENTRANT_CHECK_OFF;
-+	FPU_access_ok(VERIFY_WRITE, d, 80);
- 
--  /* Copy all registers in stack order. */
--  if (__copy_to_user(d, register_base+offset, other))
--    FPU_abort;
--  if ( offset )
--    if (__copy_to_user(d+other, register_base, offset))
--      FPU_abort;
--  RE_ENTRANT_CHECK_ON;
-+	/* Copy all registers in stack order. */
-+	if (__copy_to_user(d, register_base + offset, other))
-+		FPU_abort;
-+	if (offset)
-+		if (__copy_to_user(d + other, register_base, offset))
-+			FPU_abort;
-+	RE_ENTRANT_CHECK_ON;
- 
--  finit();
-+	finit();
- }
- 
- /*===========================================================================*/
-diff --git a/arch/x86/math-emu/reg_mul.c b/arch/x86/math-emu/reg_mul.c
-index 40f50b6..36c37f7 100644
---- a/arch/x86/math-emu/reg_mul.c
-+++ b/arch/x86/math-emu/reg_mul.c
-@@ -20,7 +20,6 @@
- #include "reg_constant.h"
- #include "fpu_system.h"
- 
--
- /*
-   Multiply two registers to give a register result.
-   The sources are st(deststnr) and (b,tagb,signb).
-@@ -29,104 +28,88 @@
- /* This routine must be called with non-empty source registers */
- int FPU_mul(FPU_REG const *b, u_char tagb, int deststnr, int control_w)
- {
--  FPU_REG *a = &st(deststnr);
--  FPU_REG *dest = a;
--  u_char taga = FPU_gettagi(deststnr);
--  u_char saved_sign = getsign(dest);
--  u_char sign = (getsign(a) ^ getsign(b));
--  int tag;
--
-+	FPU_REG *a = &st(deststnr);
-+	FPU_REG *dest = a;
-+	u_char taga = FPU_gettagi(deststnr);
-+	u_char saved_sign = getsign(dest);
-+	u_char sign = (getsign(a) ^ getsign(b));
-+	int tag;
- 
--  if ( !(taga | tagb) )
--    {
--      /* Both regs Valid, this should be the most common case. */
-+	if (!(taga | tagb)) {
-+		/* Both regs Valid, this should be the most common case. */
- 
--      tag = FPU_u_mul(a, b, dest, control_w, sign, exponent(a) + exponent(b));
--      if ( tag < 0 )
--	{
--	  setsign(dest, saved_sign);
--	  return tag;
-+		tag =
-+		    FPU_u_mul(a, b, dest, control_w, sign,
-+			      exponent(a) + exponent(b));
-+		if (tag < 0) {
-+			setsign(dest, saved_sign);
-+			return tag;
-+		}
-+		FPU_settagi(deststnr, tag);
-+		return tag;
- 	}
--      FPU_settagi(deststnr, tag);
--      return tag;
--    }
- 
--  if ( taga == TAG_Special )
--    taga = FPU_Special(a);
--  if ( tagb == TAG_Special )
--    tagb = FPU_Special(b);
-+	if (taga == TAG_Special)
-+		taga = FPU_Special(a);
-+	if (tagb == TAG_Special)
-+		tagb = FPU_Special(b);
- 
--  if ( ((taga == TAG_Valid) && (tagb == TW_Denormal))
-+	if (((taga == TAG_Valid) && (tagb == TW_Denormal))
- 	    || ((taga == TW_Denormal) && (tagb == TAG_Valid))
--	    || ((taga == TW_Denormal) && (tagb == TW_Denormal)) )
--    {
--      FPU_REG x, y;
--      if ( denormal_operand() < 0 )
--	return FPU_Exception;
--
--      FPU_to_exp16(a, &x);
--      FPU_to_exp16(b, &y);
--      tag = FPU_u_mul(&x, &y, dest, control_w, sign,
--		      exponent16(&x) + exponent16(&y));
--      if ( tag < 0 )
--	{
--	  setsign(dest, saved_sign);
--	  return tag;
--	}
--      FPU_settagi(deststnr, tag);
--      return tag;
--    }
--  else if ( (taga <= TW_Denormal) && (tagb <= TW_Denormal) )
--    {
--      if ( ((tagb == TW_Denormal) || (taga == TW_Denormal))
--	   && (denormal_operand() < 0) )
--	return FPU_Exception;
-+	    || ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
-+		FPU_REG x, y;
-+		if (denormal_operand() < 0)
-+			return FPU_Exception;
- 
--      /* Must have either both arguments == zero, or
--	 one valid and the other zero.
--	 The result is therefore zero. */
--      FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
--      /* The 80486 book says that the answer is +0, but a real
--	 80486 behaves this way.
--	 IEEE-754 apparently says it should be this way. */
--      setsign(dest, sign);
--      return TAG_Zero;
--    }
--      /* Must have infinities, NaNs, etc */
--  else if ( (taga == TW_NaN) || (tagb == TW_NaN) )
--    {
--      return real_2op_NaN(b, tagb, deststnr, &st(0));
--    }
--  else if ( ((taga == TW_Infinity) && (tagb == TAG_Zero))
--	    || ((tagb == TW_Infinity) && (taga == TAG_Zero)) )
--    {
--      return arith_invalid(deststnr);  /* Zero*Infinity is invalid */
--    }
--  else if ( ((taga == TW_Denormal) || (tagb == TW_Denormal))
--	    && (denormal_operand() < 0) )
--    {
--      return FPU_Exception;
--    }
--  else if (taga == TW_Infinity)
--    {
--      FPU_copy_to_regi(a, TAG_Special, deststnr);
--      setsign(dest, sign);
--      return TAG_Special;
--    }
--  else if (tagb == TW_Infinity)
--    {
--      FPU_copy_to_regi(b, TAG_Special, deststnr);
--      setsign(dest, sign);
--      return TAG_Special;
--    }
-+		FPU_to_exp16(a, &x);
-+		FPU_to_exp16(b, &y);
-+		tag = FPU_u_mul(&x, &y, dest, control_w, sign,
-+				exponent16(&x) + exponent16(&y));
-+		if (tag < 0) {
-+			setsign(dest, saved_sign);
-+			return tag;
-+		}
-+		FPU_settagi(deststnr, tag);
-+		return tag;
-+	} else if ((taga <= TW_Denormal) && (tagb <= TW_Denormal)) {
-+		if (((tagb == TW_Denormal) || (taga == TW_Denormal))
-+		    && (denormal_operand() < 0))
-+			return FPU_Exception;
- 
-+		/* Must have either both arguments == zero, or
-+		   one valid and the other zero.
-+		   The result is therefore zero. */
-+		FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
-+		/* The 80486 book says that the answer is +0, but a real
-+		   80486 behaves this way.
-+		   IEEE-754 apparently says it should be this way. */
-+		setsign(dest, sign);
-+		return TAG_Zero;
-+	}
-+	/* Must have infinities, NaNs, etc */
-+	else if ((taga == TW_NaN) || (tagb == TW_NaN)) {
-+		return real_2op_NaN(b, tagb, deststnr, &st(0));
-+	} else if (((taga == TW_Infinity) && (tagb == TAG_Zero))
-+		   || ((tagb == TW_Infinity) && (taga == TAG_Zero))) {
-+		return arith_invalid(deststnr);	/* Zero*Infinity is invalid */
-+	} else if (((taga == TW_Denormal) || (tagb == TW_Denormal))
-+		   && (denormal_operand() < 0)) {
-+		return FPU_Exception;
-+	} else if (taga == TW_Infinity) {
-+		FPU_copy_to_regi(a, TAG_Special, deststnr);
-+		setsign(dest, sign);
-+		return TAG_Special;
-+	} else if (tagb == TW_Infinity) {
-+		FPU_copy_to_regi(b, TAG_Special, deststnr);
-+		setsign(dest, sign);
-+		return TAG_Special;
-+	}
- #ifdef PARANOID
--  else
--    {
--      EXCEPTION(EX_INTERNAL|0x102);
--      return FPU_Exception;
--    }
--#endif /* PARANOID */ 
-+	else {
-+		EXCEPTION(EX_INTERNAL | 0x102);
-+		return FPU_Exception;
-+	}
-+#endif /* PARANOID */
- 
- 	return 0;
- }
-diff --git a/arch/x86/math-emu/status_w.h b/arch/x86/math-emu/status_w.h
-index 59e7330..54a3f22 100644
---- a/arch/x86/math-emu/status_w.h
-+++ b/arch/x86/math-emu/status_w.h
-@@ -10,7 +10,7 @@
- #ifndef _STATUS_H_
- #define _STATUS_H_
- 
--#include "fpu_emu.h"    /* for definition of PECULIAR_486 */
-+#include "fpu_emu.h"		/* for definition of PECULIAR_486 */
- 
- #ifdef __ASSEMBLY__
- #define	Const__(x)	$##x
-@@ -34,7 +34,7 @@
- #define SW_Denorm_Op   	Const__(0x0002)	/* denormalized operand */
- #define SW_Invalid     	Const__(0x0001)	/* invalid operation */
- 
--#define SW_Exc_Mask     Const__(0x27f)  /* Status word exception bit mask */
-+#define SW_Exc_Mask     Const__(0x27f)	/* Status word exception bit mask */
- 
- #ifndef __ASSEMBLY__
- 
-@@ -50,8 +50,8 @@
-   ((partial_status & ~SW_Top & 0xffff) | ((top << SW_Top_Shift) & SW_Top))
- static inline void setcc(int cc)
- {
--	partial_status &= ~(SW_C0|SW_C1|SW_C2|SW_C3);
--	partial_status |= (cc) & (SW_C0|SW_C1|SW_C2|SW_C3);
-+	partial_status &= ~(SW_C0 | SW_C1 | SW_C2 | SW_C3);
-+	partial_status |= (cc) & (SW_C0 | SW_C1 | SW_C2 | SW_C3);
- }
- 
- #ifdef PECULIAR_486
-diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32
-index 362b4ad..c36ae88 100644
---- a/arch/x86/mm/Makefile_32
-+++ b/arch/x86/mm/Makefile_32
-@@ -2,9 +2,8 @@
- # Makefile for the linux i386-specific parts of the memory manager.
- #
- 
--obj-y	:= init_32.o pgtable_32.o fault_32.o ioremap_32.o extable_32.o pageattr_32.o mmap_32.o
-+obj-y	:= init_32.o pgtable_32.o fault.o ioremap.o extable.o pageattr.o mmap.o
- 
- obj-$(CONFIG_NUMA) += discontig_32.o
- obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
- obj-$(CONFIG_HIGHMEM) += highmem_32.o
--obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap_32.o
-diff --git a/arch/x86/mm/Makefile_64 b/arch/x86/mm/Makefile_64
-index 6bcb479..688c8c2 100644
---- a/arch/x86/mm/Makefile_64
-+++ b/arch/x86/mm/Makefile_64
-@@ -2,9 +2,8 @@
- # Makefile for the linux x86_64-specific parts of the memory manager.
- #
- 
--obj-y	 := init_64.o fault_64.o ioremap_64.o extable_64.o pageattr_64.o mmap_64.o
-+obj-y	 := init_64.o fault.o ioremap.o extable.o pageattr.o mmap.o
- obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
- obj-$(CONFIG_NUMA) += numa_64.o
- obj-$(CONFIG_K8_NUMA) += k8topology_64.o
- obj-$(CONFIG_ACPI_NUMA) += srat_64.o
--
-diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c
-deleted file mode 100644
-index f14da2a..0000000
---- a/arch/x86/mm/boot_ioremap_32.c
-+++ /dev/null
-@@ -1,100 +0,0 @@
--/*
-- * arch/i386/mm/boot_ioremap.c
-- * 
-- * Re-map functions for early boot-time before paging_init() when the 
-- * boot-time pagetables are still in use
-- *
-- * Written by Dave Hansen <haveblue at us.ibm.com>
-- */
--
--
--/*
-- * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE
-- * keeps that from happening.  If anyone has a better way, I'm listening.
-- *
-- * boot_pte_t is defined only if this all works correctly
-- */
--
--#undef CONFIG_X86_PAE
--#undef CONFIG_PARAVIRT
--#include <asm/page.h>
--#include <asm/pgtable.h>
--#include <asm/tlbflush.h>
--#include <linux/init.h>
--#include <linux/stddef.h>
--
--/* 
-- * I'm cheating here.  It is known that the two boot PTE pages are 
-- * allocated next to each other.  I'm pretending that they're just
-- * one big array. 
-- */
--
--#define BOOT_PTE_PTRS (PTRS_PER_PTE*2)
--
--static unsigned long boot_pte_index(unsigned long vaddr) 
--{
--	return __pa(vaddr) >> PAGE_SHIFT;
--}
--
--static inline boot_pte_t* boot_vaddr_to_pte(void *address)
--{
--	boot_pte_t* boot_pg = (boot_pte_t*)pg0;
--	return &boot_pg[boot_pte_index((unsigned long)address)];
--}
--
--/*
-- * This is only for a caller who is clever enough to page-align
-- * phys_addr and virtual_source, and who also has a preference
-- * about which virtual address from which to steal ptes
-- */
--static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, 
--		    void* virtual_source)
--{
--	boot_pte_t* pte;
--	int i;
--	char *vaddr = virtual_source;
--
--	pte = boot_vaddr_to_pte(virtual_source);
--	for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
--		set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
--		__flush_tlb_one(&vaddr[i*PAGE_SIZE]);
--	}
--}
--
--/* the virtual space we're going to remap comes from this array */
--#define BOOT_IOREMAP_PAGES 4
--#define BOOT_IOREMAP_SIZE (BOOT_IOREMAP_PAGES*PAGE_SIZE)
--static __initdata char boot_ioremap_space[BOOT_IOREMAP_SIZE]
--		       __attribute__ ((aligned (PAGE_SIZE)));
--
--/*
-- * This only applies to things which need to ioremap before paging_init()
-- * bt_ioremap() and plain ioremap() are both useless at this point.
-- * 
-- * When used, we're still using the boot-time pagetables, which only
-- * have 2 PTE pages mapping the first 8MB
-- *
-- * There is no unmap.  The boot-time PTE pages aren't used after boot.
-- * If you really want the space back, just remap it yourself.
-- * boot_ioremap(&ioremap_space-PAGE_OFFSET, BOOT_IOREMAP_SIZE)
-- */
--__init void* boot_ioremap(unsigned long phys_addr, unsigned long size)
--{
--	unsigned long last_addr, offset;
--	unsigned int nrpages;
--	
--	last_addr = phys_addr + size - 1;
--
--	/* page align the requested address */
--	offset = phys_addr & ~PAGE_MASK;
--	phys_addr &= PAGE_MASK;
--	size = PAGE_ALIGN(last_addr) - phys_addr;
--	
--	nrpages = size >> PAGE_SHIFT;
--	if (nrpages > BOOT_IOREMAP_PAGES)
--		return NULL;
--	
--	__boot_ioremap(phys_addr, nrpages, boot_ioremap_space);
--
--	return &boot_ioremap_space[offset];
--}
-diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
-index 13a474d..04b1d20 100644
---- a/arch/x86/mm/discontig_32.c
-+++ b/arch/x86/mm/discontig_32.c
-@@ -32,6 +32,7 @@
- #include <linux/kexec.h>
- #include <linux/pfn.h>
- #include <linux/swap.h>
-+#include <linux/acpi.h>
- 
- #include <asm/e820.h>
- #include <asm/setup.h>
-@@ -103,14 +104,10 @@ extern unsigned long highend_pfn, highstart_pfn;
- 
- #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
- 
--static unsigned long node_remap_start_pfn[MAX_NUMNODES];
- unsigned long node_remap_size[MAX_NUMNODES];
--static unsigned long node_remap_offset[MAX_NUMNODES];
- static void *node_remap_start_vaddr[MAX_NUMNODES];
- void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
- 
--static void *node_remap_end_vaddr[MAX_NUMNODES];
--static void *node_remap_alloc_vaddr[MAX_NUMNODES];
- static unsigned long kva_start_pfn;
- static unsigned long kva_pages;
- /*
-@@ -167,6 +164,22 @@ static void __init allocate_pgdat(int nid)
- 	}
- }
- 
-+#ifdef CONFIG_DISCONTIGMEM
-+/*
-+ * In the discontig memory model, a portion of the kernel virtual area (KVA)
-+ * is reserved and portions of nodes are mapped using it. This is to allow
-+ * node-local memory to be allocated for structures that would normally require
-+ * ZONE_NORMAL. The memory is allocated with alloc_remap() and callers
-+ * should be prepared to allocate from the bootmem allocator instead. This KVA
-+ * mechanism is incompatible with SPARSEMEM as it makes assumptions about the
-+ * layout of memory that are broken if alloc_remap() succeeds for some of the
-+ * map and fails for others
-+ */
-+static unsigned long node_remap_start_pfn[MAX_NUMNODES];
-+static void *node_remap_end_vaddr[MAX_NUMNODES];
-+static void *node_remap_alloc_vaddr[MAX_NUMNODES];
-+static unsigned long node_remap_offset[MAX_NUMNODES];
 +
- void *alloc_remap(int nid, unsigned long size)
- {
- 	void *allocation = node_remap_alloc_vaddr[nid];
-@@ -263,11 +276,46 @@ static unsigned long calculate_numa_remap_pages(void)
- 	return reserve_pages;
- }
- 
-+static void init_remap_allocator(int nid)
++/**
++ * blk_rq_bytes - Returns bytes left to complete in the entire request
++ **/
++unsigned int blk_rq_bytes(struct request *rq)
 +{
-+	node_remap_start_vaddr[nid] = pfn_to_kaddr(
-+			kva_start_pfn + node_remap_offset[nid]);
-+	node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
-+		(node_remap_size[nid] * PAGE_SIZE);
-+	node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
-+		ALIGN(sizeof(pg_data_t), PAGE_SIZE);
++	if (blk_fs_request(rq))
++		return rq->hard_nr_sectors << 9;
 +
-+	printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
-+		(ulong) node_remap_start_vaddr[nid],
-+		(ulong) pfn_to_kaddr(highstart_pfn
-+		   + node_remap_offset[nid] + node_remap_size[nid]));
++	return rq->data_len;
 +}
-+#else
-+void *alloc_remap(int nid, unsigned long size)
++EXPORT_SYMBOL_GPL(blk_rq_bytes);
++
++/**
++ * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
++ **/
++unsigned int blk_rq_cur_bytes(struct request *rq)
 +{
-+	return NULL;
++	if (blk_fs_request(rq))
++		return rq->current_nr_sectors << 9;
++
++	if (rq->bio)
++		return rq->bio->bi_size;
++
++	return rq->data_len;
 +}
++EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 +
-+static unsigned long calculate_numa_remap_pages(void)
++/**
++ * end_queued_request - end all I/O on a queued request
++ * @rq:		the request being processed
++ * @uptodate:	error value or 0/1 uptodate flag
++ *
++ * Description:
++ *     Ends all I/O on a request, and removes it from the block layer queues.
++ *     Not suitable for normal IO completion, unless the driver still has
++ *     the request attached to the block layer.
++ *
++ **/
++void end_queued_request(struct request *rq, int uptodate)
 +{
-+	return 0;
++	__end_request(rq, uptodate, blk_rq_bytes(rq));
 +}
++EXPORT_SYMBOL(end_queued_request);
 +
-+static void init_remap_allocator(int nid)
++/**
++ * end_dequeued_request - end all I/O on a dequeued request
++ * @rq:		the request being processed
++ * @uptodate:	error value or 0/1 uptodate flag
++ *
++ * Description:
++ *     Ends all I/O on a request. The request must already have been
++ *     dequeued using blkdev_dequeue_request(), as is normally the case
++ *     for most drivers.
++ *
++ **/
++void end_dequeued_request(struct request *rq, int uptodate)
 +{
++	__end_request(rq, uptodate, blk_rq_bytes(rq));
 +}
++EXPORT_SYMBOL(end_dequeued_request);
 +
-+void __init remap_numa_kva(void)
++
++/**
++ * end_request - end I/O on the current segment of the request
++ * @req:	the request being processed
++ * @uptodate:	error value or 0/1 uptodate flag
++ *
++ * Description:
++ *     Ends I/O on the current segment of a request. If that is the only
++ *     remaining segment, the request is also completed and freed.
++ *
++ *     This is a remnant of how older block drivers handled IO completions.
++ *     Modern drivers typically end IO on the full request in one go, unless
++ *     they have a residual value to account for. For that case this function
++ *     isn't really useful, unless the residual just happens to be the
++ *     full current segment. In other words, don't use this function in new
++ *     code. Either use end_request_completely(), or the
++ *     end_that_request_chunk() (along with end_that_request_last()) for
++ *     partial completions.
++ *
++ **/
++void end_request(struct request *req, int uptodate)
 +{
++	__end_request(req, uptodate, req->hard_cur_sectors << 9);
 +}
-+#endif /* CONFIG_DISCONTIGMEM */
++EXPORT_SYMBOL(end_request);
 +
- extern void setup_bootmem_allocator(void);
- unsigned long __init setup_memory(void)
- {
- 	int nid;
- 	unsigned long system_start_pfn, system_max_low_pfn;
-+	unsigned long wasted_pages;
- 
- 	/*
- 	 * When mapping a NUMA machine we allocate the node_mem_map arrays
-@@ -288,11 +336,18 @@ unsigned long __init setup_memory(void)
- 
- #ifdef CONFIG_BLK_DEV_INITRD
- 	/* Numa kva area is below the initrd */
--	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image)
--		kva_start_pfn = PFN_DOWN(boot_params.hdr.ramdisk_image)
-+	if (initrd_start)
-+		kva_start_pfn = PFN_DOWN(initrd_start - PAGE_OFFSET)
- 			- kva_pages;
- #endif
--	kva_start_pfn -= kva_start_pfn & (PTRS_PER_PTE-1);
++/**
++ * blk_end_io - Generic end_io function to complete a request.
++ * @rq:           the request being processed
++ * @error:        0 for success, < 0 for error
++ * @nr_bytes:     number of bytes to complete @rq
++ * @bidi_bytes:   number of bytes to complete @rq->next_rq
++ * @drv_callback: function called between completion of bios in the request
++ *                and completion of the request.
++ *                If the callback returns non 0, this helper returns without
++ *                completion of the request.
++ *
++ * Description:
++ *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
++ *     If @rq has leftover, sets it up for the next range of segments.
++ *
++ * Return:
++ *     0 - we are done with this request
++ *     1 - this request is not freed yet, it still has pending buffers.
++ **/
++static int blk_end_io(struct request *rq, int error, int nr_bytes,
++		      int bidi_bytes, int (drv_callback)(struct request *))
++{
++	struct request_queue *q = rq->q;
++	unsigned long flags = 0UL;
 +
-+	/*
-+	 * We waste pages past at the end of the KVA for no good reason other
-+	 * than how it is located. This is bad.
-+	 */
-+	wasted_pages = kva_start_pfn & (PTRS_PER_PTE-1);
-+	kva_start_pfn -= wasted_pages;
-+	kva_pages += wasted_pages;
- 
- 	system_max_low_pfn = max_low_pfn = find_max_low_pfn();
- 	printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
-@@ -318,19 +373,9 @@ unsigned long __init setup_memory(void)
- 	printk("Low memory ends at vaddr %08lx\n",
- 			(ulong) pfn_to_kaddr(max_low_pfn));
- 	for_each_online_node(nid) {
--		node_remap_start_vaddr[nid] = pfn_to_kaddr(
--				kva_start_pfn + node_remap_offset[nid]);
--		/* Init the node remap allocator */
--		node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
--			(node_remap_size[nid] * PAGE_SIZE);
--		node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
--			ALIGN(sizeof(pg_data_t), PAGE_SIZE);
-+		init_remap_allocator(nid);
- 
- 		allocate_pgdat(nid);
--		printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
--			(ulong) node_remap_start_vaddr[nid],
--			(ulong) pfn_to_kaddr(highstart_pfn
--			   + node_remap_offset[nid] + node_remap_size[nid]));
- 	}
- 	printk("High memory starts at vaddr %08lx\n",
- 			(ulong) pfn_to_kaddr(highstart_pfn));
-@@ -345,7 +390,8 @@ unsigned long __init setup_memory(void)
- 
- void __init numa_kva_reserve(void)
- {
--	reserve_bootmem(PFN_PHYS(kva_start_pfn),PFN_PHYS(kva_pages));
-+	if (kva_pages)
-+		reserve_bootmem(PFN_PHYS(kva_start_pfn), PFN_PHYS(kva_pages));
- }
- 
- void __init zone_sizes_init(void)
-@@ -430,3 +476,29 @@ int memory_add_physaddr_to_nid(u64 addr)
- 
- EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
- #endif
++	if (blk_fs_request(rq) || blk_pc_request(rq)) {
++		if (__end_that_request_first(rq, error, nr_bytes))
++			return 1;
 +
-+#ifndef CONFIG_HAVE_ARCH_PARSE_SRAT
-+/*
-+ * XXX FIXME: Make SLIT table parsing available to 32-bit NUMA
++		/* Bidi request must be completed as a whole */
++		if (blk_bidi_rq(rq) &&
++		    __end_that_request_first(rq->next_rq, error, bidi_bytes))
++			return 1;
++	}
++
++	/* Special feature for tricky drivers */
++	if (drv_callback && drv_callback(rq))
++		return 1;
++
++	add_disk_randomness(rq->rq_disk);
++
++	spin_lock_irqsave(q->queue_lock, flags);
++	end_that_request_last(rq, error);
++	spin_unlock_irqrestore(q->queue_lock, flags);
++
++	return 0;
++}
++
++/**
++ * blk_end_request - Helper function for drivers to complete the request.
++ * @rq:       the request being processed
++ * @error:    0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete
 + *
-+ * These stub functions are needed to compile 32-bit NUMA when SRAT is
-+ * not set. There are functions in srat_64.c for parsing this table
-+ * and it may be possible to make them common functions.
-+ */
-+void acpi_numa_slit_init (struct acpi_table_slit *slit)
++ * Description:
++ *     Ends I/O on a number of bytes attached to @rq.
++ *     If @rq has leftover, sets it up for the next range of segments.
++ *
++ * Return:
++ *     0 - we are done with this request
++ *     1 - still buffers pending for this request
++ **/
++int blk_end_request(struct request *rq, int error, int nr_bytes)
 +{
-+	printk(KERN_INFO "ACPI: No support for parsing SLIT table\n");
++	return blk_end_io(rq, error, nr_bytes, 0, NULL);
 +}
++EXPORT_SYMBOL_GPL(blk_end_request);
 +
-+void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa)
++/**
++ * __blk_end_request - Helper function for drivers to complete the request.
++ * @rq:       the request being processed
++ * @error:    0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete
++ *
++ * Description:
++ *     Must be called with queue lock held unlike blk_end_request().
++ *
++ * Return:
++ *     0 - we are done with this request
++ *     1 - still buffers pending for this request
++ **/
++int __blk_end_request(struct request *rq, int error, int nr_bytes)
 +{
++	if (blk_fs_request(rq) || blk_pc_request(rq)) {
++		if (__end_that_request_first(rq, error, nr_bytes))
++			return 1;
++	}
++
++	add_disk_randomness(rq->rq_disk);
++
++	end_that_request_last(rq, error);
++
++	return 0;
 +}
++EXPORT_SYMBOL_GPL(__blk_end_request);
 +
-+void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma)
++/**
++ * blk_end_bidi_request - Helper function for drivers to complete bidi request.
++ * @rq:         the bidi request being processed
++ * @error:      0 for success, < 0 for error
++ * @nr_bytes:   number of bytes to complete @rq
++ * @bidi_bytes: number of bytes to complete @rq->next_rq
++ *
++ * Description:
++ *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
++ *
++ * Return:
++ *     0 - we are done with this request
++ *     1 - still buffers pending for this request
++ **/
++int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
++			 int bidi_bytes)
 +{
++	return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
 +}
++EXPORT_SYMBOL_GPL(blk_end_bidi_request);
 +
-+void acpi_numa_arch_fixup(void)
++/**
++ * blk_end_request_callback - Special helper function for tricky drivers
++ * @rq:           the request being processed
++ * @error:        0 for success, < 0 for error
++ * @nr_bytes:     number of bytes to complete
++ * @drv_callback: function called between completion of bios in the request
++ *                and completion of the request.
++ *                If the callback returns non 0, this helper returns without
++ *                completion of the request.
++ *
++ * Description:
++ *     Ends I/O on a number of bytes attached to @rq.
++ *     If @rq has leftover, sets it up for the next range of segments.
++ *
++ *     This special helper function is used only for existing tricky drivers.
++ *     (e.g. cdrom_newpc_intr() of ide-cd)
++ *     This interface will be removed when such drivers are rewritten.
++ *     Don't use this interface in other places anymore.
++ *
++ * Return:
++ *     0 - we are done with this request
++ *     1 - this request is not freed yet.
++ *         this request still has pending buffers or
++ *         the driver doesn't want to finish this request yet.
++ **/
++int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
++			     int (drv_callback)(struct request *))
 +{
++	return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
 +}
-+#endif /* CONFIG_HAVE_ARCH_PARSE_SRAT */
-diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
++EXPORT_SYMBOL_GPL(blk_end_request_callback);
++
++void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
++		     struct bio *bio)
++{
++	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
++	rq->cmd_flags |= (bio->bi_rw & 3);
++
++	rq->nr_phys_segments = bio_phys_segments(q, bio);
++	rq->nr_hw_segments = bio_hw_segments(q, bio);
++	rq->current_nr_sectors = bio_cur_sectors(bio);
++	rq->hard_cur_sectors = rq->current_nr_sectors;
++	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
++	rq->buffer = bio_data(bio);
++	rq->data_len = bio->bi_size;
++
++	rq->bio = rq->biotail = bio;
++
++	if (bio->bi_bdev)
++		rq->rq_disk = bio->bi_bdev->bd_disk;
++}
++
++int kblockd_schedule_work(struct work_struct *work)
++{
++	return queue_work(kblockd_workqueue, work);
++}
++
++EXPORT_SYMBOL(kblockd_schedule_work);
++
++void kblockd_flush_work(struct work_struct *work)
++{
++	cancel_work_sync(work);
++}
++EXPORT_SYMBOL(kblockd_flush_work);
++
++int __init blk_dev_init(void)
++{
++	int i;
++
++	kblockd_workqueue = create_workqueue("kblockd");
++	if (!kblockd_workqueue)
++		panic("Failed to create kblockd\n");
++
++	request_cachep = kmem_cache_create("blkdev_requests",
++			sizeof(struct request), 0, SLAB_PANIC, NULL);
++
++	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
++			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
++
++	for_each_possible_cpu(i)
++		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
++
++	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
++	register_hotcpu_notifier(&blk_cpu_notifier);
++
++	return 0;
++}
++
+diff --git a/block/blk-exec.c b/block/blk-exec.c
 new file mode 100644
-index 0000000..7e8db53
+index 0000000..ebfb44e
 --- /dev/null
-+++ b/arch/x86/mm/extable.c
-@@ -0,0 +1,62 @@
++++ b/block/blk-exec.c
+@@ -0,0 +1,105 @@
++/*
++ * Functions related to setting various queue properties from drivers
++ */
++#include <linux/kernel.h>
 +#include <linux/module.h>
-+#include <linux/spinlock.h>
-+#include <asm/uaccess.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++
++#include "blk.h"
 +
++/*
++ * for max sense size
++ */
++#include <scsi/scsi_cmnd.h>
 +
-+int fixup_exception(struct pt_regs *regs)
++/**
++ * blk_end_sync_rq - executes a completion event on a request
++ * @rq: request to complete
++ * @error: end io status of the request
++ */
++void blk_end_sync_rq(struct request *rq, int error)
 +{
-+	const struct exception_table_entry *fixup;
++	struct completion *waiting = rq->end_io_data;
 +
-+#ifdef CONFIG_PNPBIOS
-+	if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
-+		extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
-+		extern u32 pnp_bios_is_utter_crap;
-+		pnp_bios_is_utter_crap = 1;
-+		printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
-+		__asm__ volatile(
-+			"movl %0, %%esp\n\t"
-+			"jmp *%1\n\t"
-+			: : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
-+		panic("do_trap: can't hit this");
-+	}
-+#endif
++	rq->end_io_data = NULL;
++	__blk_put_request(rq->q, rq);
 +
-+	fixup = search_exception_tables(regs->ip);
-+	if (fixup) {
-+		regs->ip = fixup->fixup;
-+		return 1;
-+	}
++	/*
++	 * complete last, if this is a stack request the process (and thus
++	 * the rq pointer) could be invalid right after this complete()
++	 */
++	complete(waiting);
++}
++EXPORT_SYMBOL(blk_end_sync_rq);
 +
-+	return 0;
++/**
++ * blk_execute_rq_nowait - insert a request into queue for execution
++ * @q:		queue to insert the request in
++ * @bd_disk:	matching gendisk
++ * @rq:		request to insert
++ * @at_head:    insert request at head or tail of queue
++ * @done:	I/O completion handler
++ *
++ * Description:
++ *    Insert a fully prepared request at the back of the io scheduler queue
++ *    for execution.  Don't wait for completion.
++ */
++void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
++			   struct request *rq, int at_head,
++			   rq_end_io_fn *done)
++{
++	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
++
++	rq->rq_disk = bd_disk;
++	rq->cmd_flags |= REQ_NOMERGE;
++	rq->end_io = done;
++	WARN_ON(irqs_disabled());
++	spin_lock_irq(q->queue_lock);
++	__elv_add_request(q, rq, where, 1);
++	__generic_unplug_device(q);
++	spin_unlock_irq(q->queue_lock);
 +}
++EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 +
-+#ifdef CONFIG_X86_64
-+/*
-+ * Need to defined our own search_extable on X86_64 to work around
-+ * a B stepping K8 bug.
++/**
++ * blk_execute_rq - insert a request into queue for execution
++ * @q:		queue to insert the request in
++ * @bd_disk:	matching gendisk
++ * @rq:		request to insert
++ * @at_head:    insert request at head or tail of queue
++ *
++ * Description:
++ *    Insert a fully prepared request at the back of the io scheduler queue
++ *    for execution and wait for completion.
 + */
-+const struct exception_table_entry *
-+search_extable(const struct exception_table_entry *first,
-+	       const struct exception_table_entry *last,
-+	       unsigned long value)
++int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
++		   struct request *rq, int at_head)
 +{
-+	/* B stepping K8 bug */
-+	if ((value >> 32) == 0)
-+		value |= 0xffffffffUL << 32;
++	DECLARE_COMPLETION_ONSTACK(wait);
++	char sense[SCSI_SENSE_BUFFERSIZE];
++	int err = 0;
 +
-+	while (first <= last) {
-+		const struct exception_table_entry *mid;
-+		long diff;
++	/*
++	 * we need an extra reference to the request, so we can look at
++	 * it after io completion
++	 */
++	rq->ref_count++;
 +
-+		mid = (last - first) / 2 + first;
-+		diff = mid->insn - value;
-+		if (diff == 0)
-+			return mid;
-+		else if (diff < 0)
-+			first = mid+1;
-+		else
-+			last = mid-1;
++	if (!rq->sense) {
++		memset(sense, 0, sizeof(sense));
++		rq->sense = sense;
++		rq->sense_len = 0;
 +	}
-+	return NULL;
++
++	rq->end_io_data = &wait;
++	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
++	wait_for_completion(&wait);
++
++	if (rq->errors)
++		err = -EIO;
++
++	return err;
 +}
-+#endif
-diff --git a/arch/x86/mm/extable_32.c b/arch/x86/mm/extable_32.c
-deleted file mode 100644
-index 0ce4f22..0000000
---- a/arch/x86/mm/extable_32.c
-+++ /dev/null
-@@ -1,35 +0,0 @@
--/*
-- * linux/arch/i386/mm/extable.c
-- */
--
--#include <linux/module.h>
--#include <linux/spinlock.h>
--#include <asm/uaccess.h>
--
--int fixup_exception(struct pt_regs *regs)
--{
--	const struct exception_table_entry *fixup;
--
--#ifdef CONFIG_PNPBIOS
--	if (unlikely(SEGMENT_IS_PNP_CODE(regs->xcs)))
--	{
--		extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
--		extern u32 pnp_bios_is_utter_crap;
--		pnp_bios_is_utter_crap = 1;
--		printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
--		__asm__ volatile(
--			"movl %0, %%esp\n\t"
--			"jmp *%1\n\t"
--			: : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
--		panic("do_trap: can't hit this");
--	}
--#endif
--
--	fixup = search_exception_tables(regs->eip);
--	if (fixup) {
--		regs->eip = fixup->fixup;
--		return 1;
--	}
--
--	return 0;
--}
-diff --git a/arch/x86/mm/extable_64.c b/arch/x86/mm/extable_64.c
-deleted file mode 100644
-index 79ac6e7..0000000
---- a/arch/x86/mm/extable_64.c
-+++ /dev/null
-@@ -1,34 +0,0 @@
--/*
-- * linux/arch/x86_64/mm/extable.c
-- */
--
--#include <linux/module.h>
--#include <linux/spinlock.h>
--#include <linux/init.h>
--#include <asm/uaccess.h>
--
--/* Simple binary search */
--const struct exception_table_entry *
--search_extable(const struct exception_table_entry *first,
--	       const struct exception_table_entry *last,
--	       unsigned long value)
--{
--	/* Work around a B stepping K8 bug */
--	if ((value >> 32) == 0)
--		value |= 0xffffffffUL << 32; 
--
--        while (first <= last) {
--		const struct exception_table_entry *mid;
--		long diff;
--
--		mid = (last - first) / 2 + first;
--		diff = mid->insn - value;
--                if (diff == 0)
--                        return mid;
--                else if (diff < 0)
--                        first = mid+1;
--                else
--                        last = mid-1;
--        }
--        return NULL;
--}
-diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
++
++EXPORT_SYMBOL(blk_execute_rq);
+diff --git a/block/blk-ioc.c b/block/blk-ioc.c
 new file mode 100644
-index 0000000..e28cc52
+index 0000000..6d16755
 --- /dev/null
-+++ b/arch/x86/mm/fault.c
-@@ -0,0 +1,986 @@
++++ b/block/blk-ioc.c
+@@ -0,0 +1,194 @@
 +/*
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ * Functions related to io context handling
 + */
-+
-+#include <linux/signal.h>
-+#include <linux/sched.h>
 +#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h>		/* For unblank_screen() */
-+#include <linux/compiler.h>
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>		/* for max_low_pfn */
-+#include <linux/vmalloc.h>
 +#include <linux/module.h>
-+#include <linux/kprobes.h>
-+#include <linux/uaccess.h>
-+#include <linux/kdebug.h>
++#include <linux/init.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 +
-+#include <asm/system.h>
-+#include <asm/desc.h>
-+#include <asm/segment.h>
-+#include <asm/pgalloc.h>
-+#include <asm/smp.h>
-+#include <asm/tlbflush.h>
-+#include <asm/proto.h>
-+#include <asm-generic/sections.h>
++#include "blk.h"
 +
 +/*
-+ * Page fault error code bits
-+ *	bit 0 == 0 means no page found, 1 means protection fault
-+ *	bit 1 == 0 means read, 1 means write
-+ *	bit 2 == 0 means kernel, 1 means user-mode
-+ *	bit 3 == 1 means use of reserved bit detected
-+ *	bit 4 == 1 means fault was an instruction fetch
++ * For io context allocations
 + */
-+#define PF_PROT		(1<<0)
-+#define PF_WRITE	(1<<1)
-+#define PF_USER		(1<<2)
-+#define PF_RSVD		(1<<3)
-+#define PF_INSTR	(1<<4)
++static struct kmem_cache *iocontext_cachep;
 +
-+static inline int notify_page_fault(struct pt_regs *regs)
++static void cfq_dtor(struct io_context *ioc)
 +{
-+#ifdef CONFIG_KPROBES
-+	int ret = 0;
++	struct cfq_io_context *cic[1];
++	int r;
 +
-+	/* kprobe_running() needs smp_processor_id() */
-+#ifdef CONFIG_X86_32
-+	if (!user_mode_vm(regs)) {
-+#else
-+	if (!user_mode(regs)) {
-+#endif
-+		preempt_disable();
-+		if (kprobe_running() && kprobe_fault_handler(regs, 14))
-+			ret = 1;
-+		preempt_enable();
++	/*
++	 * We don't have a specific key to lookup with, so use the gang
++	 * lookup to just retrieve the first item stored. The cfq exit
++	 * function will iterate the full tree, so any member will do.
++	 */
++	r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
++	if (r > 0)
++		cic[0]->dtor(ioc);
++}
++
++/*
++ * IO Context helper functions. put_io_context() returns 1 if there are no
++ * more users of this io context, 0 otherwise.
++ */
++int put_io_context(struct io_context *ioc)
++{
++	if (ioc == NULL)
++		return 1;
++
++	BUG_ON(atomic_read(&ioc->refcount) == 0);
++
++	if (atomic_dec_and_test(&ioc->refcount)) {
++		rcu_read_lock();
++		if (ioc->aic && ioc->aic->dtor)
++			ioc->aic->dtor(ioc->aic);
++		rcu_read_unlock();
++		cfq_dtor(ioc);
++
++		kmem_cache_free(iocontext_cachep, ioc);
++		return 1;
++	}
++	return 0;
++}
++EXPORT_SYMBOL(put_io_context);
++
++static void cfq_exit(struct io_context *ioc)
++{
++	struct cfq_io_context *cic[1];
++	int r;
++
++	rcu_read_lock();
++	/*
++	 * See comment for cfq_dtor()
++	 */
++	r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
++	rcu_read_unlock();
++
++	if (r > 0)
++		cic[0]->exit(ioc);
++}
++
++/* Called by the exitting task */
++void exit_io_context(void)
++{
++	struct io_context *ioc;
++
++	task_lock(current);
++	ioc = current->io_context;
++	current->io_context = NULL;
++	task_unlock(current);
++
++	if (atomic_dec_and_test(&ioc->nr_tasks)) {
++		if (ioc->aic && ioc->aic->exit)
++			ioc->aic->exit(ioc->aic);
++		cfq_exit(ioc);
++
++		put_io_context(ioc);
++	}
++}
++
++struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
++{
++	struct io_context *ret;
++
++	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
++	if (ret) {
++		atomic_set(&ret->refcount, 1);
++		atomic_set(&ret->nr_tasks, 1);
++		spin_lock_init(&ret->lock);
++		ret->ioprio_changed = 0;
++		ret->ioprio = 0;
++		ret->last_waited = jiffies; /* doesn't matter... */
++		ret->nr_batch_requests = 0; /* because this is 0 */
++		ret->aic = NULL;
++		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
++		ret->ioc_data = NULL;
 +	}
 +
 +	return ret;
-+#else
-+	return 0;
-+#endif
 +}
 +
 +/*
-+ * X86_32
-+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
-+ * Check that here and ignore it.
-+ *
-+ * X86_64
-+ * Sometimes the CPU reports invalid exceptions on prefetch.
-+ * Check that here and ignore it.
++ * If the current task has no IO context then create one and initialise it.
++ * Otherwise, return its existing IO context.
 + *
-+ * Opcode checker based on code by Richard Brunner
++ * This returned IO context doesn't have a specifically elevated refcount,
++ * but since the current task itself holds a reference, the context can be
++ * used in general code, so long as it stays within `current` context.
 + */
-+static int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+		       unsigned long error_code)
++struct io_context *current_io_context(gfp_t gfp_flags, int node)
 +{
-+	unsigned char *instr;
-+	int scan_more = 1;
-+	int prefetch = 0;
-+	unsigned char *max_instr;
-+
-+#ifdef CONFIG_X86_32
-+	if (!(__supported_pte_mask & _PAGE_NX))
-+		return 0;
-+#endif
++	struct task_struct *tsk = current;
++	struct io_context *ret;
 +
-+	/* If it was a exec fault on NX page, ignore */
-+	if (error_code & PF_INSTR)
-+		return 0;
++	ret = tsk->io_context;
++	if (likely(ret))
++		return ret;
 +
-+	instr = (unsigned char *)convert_ip_to_linear(current, regs);
-+	max_instr = instr + 15;
++	ret = alloc_io_context(gfp_flags, node);
++	if (ret) {
++		/* make sure set_task_ioprio() sees the settings above */
++		smp_wmb();
++		tsk->io_context = ret;
++	}
 +
-+	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
-+		return 0;
++	return ret;
++}
 +
-+	while (scan_more && instr < max_instr) {
-+		unsigned char opcode;
-+		unsigned char instr_hi;
-+		unsigned char instr_lo;
++/*
++ * If the current task has no IO context then create one and initialise it.
++ * If it does have a context, take a ref on it.
++ *
++ * This is always called in the context of the task which submitted the I/O.
++ */
++struct io_context *get_io_context(gfp_t gfp_flags, int node)
++{
++	struct io_context *ret = NULL;
 +
-+		if (probe_kernel_address(instr, opcode))
++	/*
++	 * Check for unlikely race with exiting task. ioc ref count is
++	 * zero when ioc is being detached.
++	 */
++	do {
++		ret = current_io_context(gfp_flags, node);
++		if (unlikely(!ret))
 +			break;
++	} while (!atomic_inc_not_zero(&ret->refcount));
 +
-+		instr_hi = opcode & 0xf0;
-+		instr_lo = opcode & 0x0f;
-+		instr++;
++	return ret;
++}
++EXPORT_SYMBOL(get_io_context);
 +
-+		switch (instr_hi) {
-+		case 0x20:
-+		case 0x30:
-+			/*
-+			 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
-+			 * In X86_64 long mode, the CPU will signal invalid
-+			 * opcode if some of these prefixes are present so
-+			 * X86_64 will never get here anyway
-+			 */
-+			scan_more = ((instr_lo & 7) == 0x6);
-+			break;
-+#ifdef CONFIG_X86_64
-+		case 0x40:
-+			/*
-+			 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
-+			 * Need to figure out under what instruction mode the
-+			 * instruction was issued. Could check the LDT for lm,
-+			 * but for now it's good enough to assume that long
-+			 * mode only uses well known segments or kernel.
-+			 */
-+			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
-+			break;
-+#endif
-+		case 0x60:
-+			/* 0x64 thru 0x67 are valid prefixes in all modes. */
-+			scan_more = (instr_lo & 0xC) == 0x4;
-+			break;
-+		case 0xF0:
-+			/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
-+			scan_more = !instr_lo || (instr_lo>>1) == 1;
-+			break;
-+		case 0x00:
-+			/* Prefetch instruction is 0x0F0D or 0x0F18 */
-+			scan_more = 0;
++void copy_io_context(struct io_context **pdst, struct io_context **psrc)
++{
++	struct io_context *src = *psrc;
++	struct io_context *dst = *pdst;
 +
-+			if (probe_kernel_address(instr, opcode))
-+				break;
-+			prefetch = (instr_lo == 0xF) &&
-+				(opcode == 0x0D || opcode == 0x18);
-+			break;
-+		default:
-+			scan_more = 0;
-+			break;
-+		}
++	if (src) {
++		BUG_ON(atomic_read(&src->refcount) == 0);
++		atomic_inc(&src->refcount);
++		put_io_context(dst);
++		*pdst = src;
 +	}
-+	return prefetch;
 +}
++EXPORT_SYMBOL(copy_io_context);
 +
-+static void force_sig_info_fault(int si_signo, int si_code,
-+	unsigned long address, struct task_struct *tsk)
++void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
 +{
-+	siginfo_t info;
++	struct io_context *temp;
++	temp = *ioc1;
++	*ioc1 = *ioc2;
++	*ioc2 = temp;
++}
++EXPORT_SYMBOL(swap_io_context);
 +
-+	info.si_signo = si_signo;
-+	info.si_errno = 0;
-+	info.si_code = si_code;
-+	info.si_addr = (void __user *)address;
-+	force_sig_info(si_signo, &info, tsk);
++int __init blk_ioc_init(void)
++{
++	iocontext_cachep = kmem_cache_create("blkdev_ioc",
++			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
++	return 0;
 +}
++subsys_initcall(blk_ioc_init);
+diff --git a/block/blk-map.c b/block/blk-map.c
+new file mode 100644
+index 0000000..916cfc9
+--- /dev/null
++++ b/block/blk-map.c
+@@ -0,0 +1,264 @@
++/*
++ * Functions related to mapping data to requests
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
 +
-+#ifdef CONFIG_X86_64
-+static int bad_address(void *p)
++#include "blk.h"
++
++int blk_rq_append_bio(struct request_queue *q, struct request *rq,
++		      struct bio *bio)
 +{
-+	unsigned long dummy;
-+	return probe_kernel_address((unsigned long *)p, dummy);
++	if (!rq->bio)
++		blk_rq_bio_prep(q, rq, bio);
++	else if (!ll_back_merge_fn(q, rq, bio))
++		return -EINVAL;
++	else {
++		rq->biotail->bi_next = bio;
++		rq->biotail = bio;
++
++		rq->data_len += bio->bi_size;
++	}
++	return 0;
 +}
-+#endif
++EXPORT_SYMBOL(blk_rq_append_bio);
 +
-+void dump_pagetable(unsigned long address)
++static int __blk_rq_unmap_user(struct bio *bio)
 +{
-+#ifdef CONFIG_X86_32
-+	__typeof__(pte_val(__pte(0))) page;
++	int ret = 0;
 +
-+	page = read_cr3();
-+	page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
-+#ifdef CONFIG_X86_PAE
-+	printk("*pdpt = %016Lx ", page);
-+	if ((page >> PAGE_SHIFT) < max_low_pfn
-+	    && page & _PAGE_PRESENT) {
-+		page &= PAGE_MASK;
-+		page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
-+		                                         & (PTRS_PER_PMD - 1)];
-+		printk(KERN_CONT "*pde = %016Lx ", page);
-+		page &= ~_PAGE_NX;
++	if (bio) {
++		if (bio_flagged(bio, BIO_USER_MAPPED))
++			bio_unmap_user(bio);
++		else
++			ret = bio_uncopy_user(bio);
 +	}
-+#else
-+	printk("*pde = %08lx ", page);
-+#endif
++
++	return ret;
++}
++
++static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
++			     void __user *ubuf, unsigned int len)
++{
++	unsigned long uaddr;
++	struct bio *bio, *orig_bio;
++	int reading, ret;
++
++	reading = rq_data_dir(rq) == READ;
 +
 +	/*
-+	 * We must not directly access the pte in the highpte
-+	 * case if the page table is located in highmem.
-+	 * And let's rather not kmap-atomic the pte, just in case
-+	 * it's allocated already.
++	 * if alignment requirement is satisfied, map in user pages for
++	 * direct dma. else, set up kernel bounce buffers
 +	 */
-+	if ((page >> PAGE_SHIFT) < max_low_pfn
-+	    && (page & _PAGE_PRESENT)
-+	    && !(page & _PAGE_PSE)) {
-+		page &= PAGE_MASK;
-+		page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
-+		                                         & (PTRS_PER_PTE - 1)];
-+		printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
-+	}
-+
-+	printk("\n");
-+#else /* CONFIG_X86_64 */
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
++	uaddr = (unsigned long) ubuf;
++	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
++		bio = bio_map_user(q, NULL, uaddr, len, reading);
++	else
++		bio = bio_copy_user(q, uaddr, len, reading);
 +
-+	pgd = (pgd_t *)read_cr3();
++	if (IS_ERR(bio))
++		return PTR_ERR(bio);
 +
-+	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
-+	pgd += pgd_index(address);
-+	if (bad_address(pgd)) goto bad;
-+	printk("PGD %lx ", pgd_val(*pgd));
-+	if (!pgd_present(*pgd)) goto ret;
++	orig_bio = bio;
++	blk_queue_bounce(q, &bio);
 +
-+	pud = pud_offset(pgd, address);
-+	if (bad_address(pud)) goto bad;
-+	printk("PUD %lx ", pud_val(*pud));
-+	if (!pud_present(*pud))	goto ret;
++	/*
++	 * We link the bounce buffer in and could have to traverse it
++	 * later so we have to get a ref to prevent it from being freed
++	 */
++	bio_get(bio);
 +
-+	pmd = pmd_offset(pud, address);
-+	if (bad_address(pmd)) goto bad;
-+	printk("PMD %lx ", pmd_val(*pmd));
-+	if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
++	ret = blk_rq_append_bio(q, rq, bio);
++	if (!ret)
++		return bio->bi_size;
 +
-+	pte = pte_offset_kernel(pmd, address);
-+	if (bad_address(pte)) goto bad;
-+	printk("PTE %lx", pte_val(*pte));
-+ret:
-+	printk("\n");
-+	return;
-+bad:
-+	printk("BAD\n");
-+#endif
++	/* if it was boucned we must call the end io function */
++	bio_endio(bio, 0);
++	__blk_rq_unmap_user(orig_bio);
++	bio_put(bio);
++	return ret;
 +}
 +
-+#ifdef CONFIG_X86_32
-+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++/**
++ * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
++ * @q:		request queue where request should be inserted
++ * @rq:		request structure to fill
++ * @ubuf:	the user buffer
++ * @len:	length of user data
++ *
++ * Description:
++ *    Data will be mapped directly for zero copy io, if possible. Otherwise
++ *    a kernel bounce buffer is used.
++ *
++ *    A matching blk_rq_unmap_user() must be issued at the end of io, while
++ *    still in process context.
++ *
++ *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
++ *    before being submitted to the device, as pages mapped may be out of
++ *    reach. It's the callers responsibility to make sure this happens. The
++ *    original bio must be passed back in to blk_rq_unmap_user() for proper
++ *    unmapping.
++ */
++int blk_rq_map_user(struct request_queue *q, struct request *rq,
++		    void __user *ubuf, unsigned long len)
 +{
-+	unsigned index = pgd_index(address);
-+	pgd_t *pgd_k;
-+	pud_t *pud, *pud_k;
-+	pmd_t *pmd, *pmd_k;
++	unsigned long bytes_read = 0;
++	struct bio *bio = NULL;
++	int ret;
 +
-+	pgd += index;
-+	pgd_k = init_mm.pgd + index;
++	if (len > (q->max_hw_sectors << 9))
++		return -EINVAL;
++	if (!len || !ubuf)
++		return -EINVAL;
 +
-+	if (!pgd_present(*pgd_k))
-+		return NULL;
++	while (bytes_read != len) {
++		unsigned long map_len, end, start;
 +
-+	/*
-+	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
-+	 * and redundant with the set_pmd() on non-PAE. As would
-+	 * set_pud.
-+	 */
++		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
++		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
++								>> PAGE_SHIFT;
++		start = (unsigned long)ubuf >> PAGE_SHIFT;
 +
-+	pud = pud_offset(pgd, address);
-+	pud_k = pud_offset(pgd_k, address);
-+	if (!pud_present(*pud_k))
-+		return NULL;
++		/*
++		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
++		 * pages. If this happens we just lower the requested
++		 * mapping len by a page so that we can fit
++		 */
++		if (end - start > BIO_MAX_PAGES)
++			map_len -= PAGE_SIZE;
 +
-+	pmd = pmd_offset(pud, address);
-+	pmd_k = pmd_offset(pud_k, address);
-+	if (!pmd_present(*pmd_k))
-+		return NULL;
-+	if (!pmd_present(*pmd)) {
-+		set_pmd(pmd, *pmd_k);
-+		arch_flush_lazy_mmu_mode();
-+	} else
-+		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
-+	return pmd_k;
++		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
++		if (ret < 0)
++			goto unmap_rq;
++		if (!bio)
++			bio = rq->bio;
++		bytes_read += ret;
++		ubuf += ret;
++	}
++
++	rq->buffer = rq->data = NULL;
++	return 0;
++unmap_rq:
++	blk_rq_unmap_user(bio);
++	return ret;
 +}
-+#endif
 +
-+#ifdef CONFIG_X86_64
-+static const char errata93_warning[] =
-+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
-+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
-+KERN_ERR "******* Please consider a BIOS update.\n"
-+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
-+#endif
++EXPORT_SYMBOL(blk_rq_map_user);
 +
-+/* Workaround for K8 erratum #93 & buggy BIOS.
-+   BIOS SMM functions are required to use a specific workaround
-+   to avoid corruption of the 64bit RIP register on C stepping K8.
-+   A lot of BIOS that didn't get tested properly miss this.
-+   The OS sees this as a page fault with the upper 32bits of RIP cleared.
-+   Try to work around it here.
-+   Note we only handle faults in kernel here.
-+   Does nothing for X86_32
++/**
++ * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
++ * @q:		request queue where request should be inserted
++ * @rq:		request to map data to
++ * @iov:	pointer to the iovec
++ * @iov_count:	number of elements in the iovec
++ * @len:	I/O byte count
++ *
++ * Description:
++ *    Data will be mapped directly for zero copy io, if possible. Otherwise
++ *    a kernel bounce buffer is used.
++ *
++ *    A matching blk_rq_unmap_user() must be issued at the end of io, while
++ *    still in process context.
++ *
++ *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
++ *    before being submitted to the device, as pages mapped may be out of
++ *    reach. It's the callers responsibility to make sure this happens. The
++ *    original bio must be passed back in to blk_rq_unmap_user() for proper
++ *    unmapping.
 + */
-+static int is_errata93(struct pt_regs *regs, unsigned long address)
++int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
++			struct sg_iovec *iov, int iov_count, unsigned int len)
 +{
-+#ifdef CONFIG_X86_64
-+	static int warned;
-+	if (address != regs->ip)
-+		return 0;
-+	if ((address >> 32) != 0)
-+		return 0;
-+	address |= 0xffffffffUL << 32;
-+	if ((address >= (u64)_stext && address <= (u64)_etext) ||
-+	    (address >= MODULES_VADDR && address <= MODULES_END)) {
-+		if (!warned) {
-+			printk(errata93_warning);
-+			warned = 1;
-+		}
-+		regs->ip = address;
-+		return 1;
++	struct bio *bio;
++
++	if (!iov || iov_count <= 0)
++		return -EINVAL;
++
++	/* we don't allow misaligned data like bio_map_user() does.  If the
++	 * user is using sg, they're expected to know the alignment constraints
++	 * and respect them accordingly */
++	bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
++	if (IS_ERR(bio))
++		return PTR_ERR(bio);
++
++	if (bio->bi_size != len) {
++		bio_endio(bio, 0);
++		bio_unmap_user(bio);
++		return -EINVAL;
 +	}
-+#endif
++
++	bio_get(bio);
++	blk_rq_bio_prep(q, rq, bio);
++	rq->buffer = rq->data = NULL;
 +	return 0;
 +}
 +
-+/*
-+ * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
-+ * addresses >4GB.  We catch this in the page fault handler because these
-+ * addresses are not reachable. Just detect this case and return.  Any code
-+ * segment in LDT is compatibility mode.
++EXPORT_SYMBOL(blk_rq_map_user_iov);
++
++/**
++ * blk_rq_unmap_user - unmap a request with user data
++ * @bio:	       start of bio list
++ *
++ * Description:
++ *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
++ *    supply the original rq->bio from the blk_rq_map_user() return, since
++ *    the io completion may have changed rq->bio.
 + */
-+static int is_errata100(struct pt_regs *regs, unsigned long address)
++int blk_rq_unmap_user(struct bio *bio)
 +{
-+#ifdef CONFIG_X86_64
-+	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
-+	    (address >> 32))
-+		return 1;
-+#endif
++	struct bio *mapped_bio;
++	int ret = 0, ret2;
++
++	while (bio) {
++		mapped_bio = bio;
++		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
++			mapped_bio = bio->bi_private;
++
++		ret2 = __blk_rq_unmap_user(mapped_bio);
++		if (ret2 && !ret)
++			ret = ret2;
++
++		mapped_bio = bio;
++		bio = bio->bi_next;
++		bio_put(mapped_bio);
++	}
++
++	return ret;
++}
++
++EXPORT_SYMBOL(blk_rq_unmap_user);
++
++/**
++ * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
++ * @q:		request queue where request should be inserted
++ * @rq:		request to fill
++ * @kbuf:	the kernel buffer
++ * @len:	length of user data
++ * @gfp_mask:	memory allocation flags
++ */
++int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
++		    unsigned int len, gfp_t gfp_mask)
++{
++	struct bio *bio;
++
++	if (len > (q->max_hw_sectors << 9))
++		return -EINVAL;
++	if (!len || !kbuf)
++		return -EINVAL;
++
++	bio = bio_map_kern(q, kbuf, len, gfp_mask);
++	if (IS_ERR(bio))
++		return PTR_ERR(bio);
++
++	if (rq_data_dir(rq) == WRITE)
++		bio->bi_rw |= (1 << BIO_RW);
++
++	blk_rq_bio_prep(q, rq, bio);
++	blk_queue_bounce(q, &rq->bio);
++	rq->buffer = rq->data = NULL;
 +	return 0;
 +}
 +
-+void do_invalid_op(struct pt_regs *, unsigned long);
++EXPORT_SYMBOL(blk_rq_map_kern);
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+new file mode 100644
+index 0000000..5023f0b
+--- /dev/null
++++ b/block/blk-merge.c
+@@ -0,0 +1,485 @@
++/*
++ * Functions related to segment and merge handling
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/scatterlist.h>
 +
-+static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
++#include "blk.h"
++
++void blk_recalc_rq_sectors(struct request *rq, int nsect)
 +{
-+#ifdef CONFIG_X86_F00F_BUG
-+	unsigned long nr;
-+	/*
-+	 * Pentium F0 0F C7 C8 bug workaround.
-+	 */
-+	if (boot_cpu_data.f00f_bug) {
-+		nr = (address - idt_descr.address) >> 3;
++	if (blk_fs_request(rq)) {
++		rq->hard_sector += nsect;
++		rq->hard_nr_sectors -= nsect;
 +
-+		if (nr == 6) {
-+			do_invalid_op(regs, 0);
-+			return 1;
++		/*
++		 * Move the I/O submission pointers ahead if required.
++		 */
++		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
++		    (rq->sector <= rq->hard_sector)) {
++			rq->sector = rq->hard_sector;
++			rq->nr_sectors = rq->hard_nr_sectors;
++			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
++			rq->current_nr_sectors = rq->hard_cur_sectors;
++			rq->buffer = bio_data(rq->bio);
++		}
++
++		/*
++		 * if total number of sectors is less than the first segment
++		 * size, something has gone terribly wrong
++		 */
++		if (rq->nr_sectors < rq->current_nr_sectors) {
++			printk("blk: request botched\n");
++			rq->nr_sectors = rq->current_nr_sectors;
 +		}
 +	}
-+#endif
-+	return 0;
 +}
 +
-+static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
-+			    unsigned long address)
++void blk_recalc_rq_segments(struct request *rq)
 +{
-+#ifdef CONFIG_X86_32
-+	if (!oops_may_print())
++	int nr_phys_segs;
++	int nr_hw_segs;
++	unsigned int phys_size;
++	unsigned int hw_size;
++	struct bio_vec *bv, *bvprv = NULL;
++	int seg_size;
++	int hw_seg_size;
++	int cluster;
++	struct req_iterator iter;
++	int high, highprv = 1;
++	struct request_queue *q = rq->q;
++
++	if (!rq->bio)
 +		return;
-+#endif
 +
-+#ifdef CONFIG_X86_PAE
-+	if (error_code & PF_INSTR) {
-+		int level;
-+		pte_t *pte = lookup_address(address, &level);
++	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
++	hw_seg_size = seg_size = 0;
++	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
++	rq_for_each_segment(bv, rq, iter) {
++		/*
++		 * the trick here is making sure that a high page is never
++		 * considered part of another segment, since that might
++		 * change with the bounce page.
++		 */
++		high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
++		if (high || highprv)
++			goto new_hw_segment;
++		if (cluster) {
++			if (seg_size + bv->bv_len > q->max_segment_size)
++				goto new_segment;
++			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
++				goto new_segment;
++			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
++				goto new_segment;
++			if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
++				goto new_hw_segment;
 +
-+		if (pte && pte_present(*pte) && !pte_exec(*pte))
-+			printk(KERN_CRIT "kernel tried to execute "
-+				"NX-protected page - exploit attempt? "
-+				"(uid: %d)\n", current->uid);
++			seg_size += bv->bv_len;
++			hw_seg_size += bv->bv_len;
++			bvprv = bv;
++			continue;
++		}
++new_segment:
++		if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
++		    !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
++			hw_seg_size += bv->bv_len;
++		else {
++new_hw_segment:
++			if (nr_hw_segs == 1 &&
++			    hw_seg_size > rq->bio->bi_hw_front_size)
++				rq->bio->bi_hw_front_size = hw_seg_size;
++			hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
++			nr_hw_segs++;
++		}
++
++		nr_phys_segs++;
++		bvprv = bv;
++		seg_size = bv->bv_len;
++		highprv = high;
 +	}
-+#endif
 +
-+	printk(KERN_ALERT "BUG: unable to handle kernel ");
-+	if (address < PAGE_SIZE)
-+		printk(KERN_CONT "NULL pointer dereference");
-+	else
-+		printk(KERN_CONT "paging request");
-+#ifdef CONFIG_X86_32
-+	printk(KERN_CONT " at %08lx\n", address);
-+#else
-+	printk(KERN_CONT " at %016lx\n", address);
-+#endif
-+	printk(KERN_ALERT "IP:");
-+	printk_address(regs->ip, 1);
-+	dump_pagetable(address);
++	if (nr_hw_segs == 1 &&
++	    hw_seg_size > rq->bio->bi_hw_front_size)
++		rq->bio->bi_hw_front_size = hw_seg_size;
++	if (hw_seg_size > rq->biotail->bi_hw_back_size)
++		rq->biotail->bi_hw_back_size = hw_seg_size;
++	rq->nr_phys_segments = nr_phys_segs;
++	rq->nr_hw_segments = nr_hw_segs;
 +}
 +
-+#ifdef CONFIG_X86_64
-+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
-+				 unsigned long error_code)
++void blk_recount_segments(struct request_queue *q, struct bio *bio)
 +{
-+	unsigned long flags = oops_begin();
-+	struct task_struct *tsk;
-+
-+	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
-+	       current->comm, address);
-+	dump_pagetable(address);
-+	tsk = current;
-+	tsk->thread.cr2 = address;
-+	tsk->thread.trap_no = 14;
-+	tsk->thread.error_code = error_code;
-+	if (__die("Bad pagetable", regs, error_code))
-+		regs = NULL;
-+	oops_end(flags, regs, SIGKILL);
++	struct request rq;
++	struct bio *nxt = bio->bi_next;
++	rq.q = q;
++	rq.bio = rq.biotail = bio;
++	bio->bi_next = NULL;
++	blk_recalc_rq_segments(&rq);
++	bio->bi_next = nxt;
++	bio->bi_phys_segments = rq.nr_phys_segments;
++	bio->bi_hw_segments = rq.nr_hw_segments;
++	bio->bi_flags |= (1 << BIO_SEG_VALID);
 +}
-+#endif
++EXPORT_SYMBOL(blk_recount_segments);
 +
-+/*
-+ * Handle a spurious fault caused by a stale TLB entry.  This allows
-+ * us to lazily refresh the TLB when increasing the permissions of a
-+ * kernel page (RO -> RW or NX -> X).  Doing it eagerly is very
-+ * expensive since that implies doing a full cross-processor TLB
-+ * flush, even if no stale TLB entries exist on other processors.
-+ * There are no security implications to leaving a stale TLB when
-+ * increasing the permissions on a page.
-+ */
-+static int spurious_fault(unsigned long address,
-+			  unsigned long error_code)
++static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
++				   struct bio *nxt)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+
-+	/* Reserved-bit violation or user access to kernel space? */
-+	if (error_code & (PF_USER | PF_RSVD))
++	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
 +		return 0;
 +
-+	pgd = init_mm.pgd + pgd_index(address);
-+	if (!pgd_present(*pgd))
++	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
 +		return 0;
-+
-+	pud = pud_offset(pgd, address);
-+	if (!pud_present(*pud))
++	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
 +		return 0;
 +
-+	pmd = pmd_offset(pud, address);
-+	if (!pmd_present(*pmd))
-+		return 0;
++	/*
++	 * bio and nxt are contigous in memory, check if the queue allows
++	 * these two to be merged into one
++	 */
++	if (BIO_SEG_BOUNDARY(q, bio, nxt))
++		return 1;
 +
-+	pte = pte_offset_kernel(pmd, address);
-+	if (!pte_present(*pte))
-+		return 0;
++	return 0;
++}
 +
-+	if ((error_code & PF_WRITE) && !pte_write(*pte))
++static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
++				 struct bio *nxt)
++{
++	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
++		blk_recount_segments(q, bio);
++	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
++		blk_recount_segments(q, nxt);
++	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
++	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
 +		return 0;
-+	if ((error_code & PF_INSTR) && !pte_exec(*pte))
++	if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
 +		return 0;
 +
 +	return 1;
 +}
 +
 +/*
-+ * X86_32
-+ * Handle a fault on the vmalloc or module mapping area
-+ *
-+ * X86_64
-+ * Handle a fault on the vmalloc area
-+ *
-+ * This assumes no large pages in there.
++ * map a request to scatterlist, return number of sg entries setup. Caller
++ * must make sure sg can hold rq->nr_phys_segments entries
 + */
-+static int vmalloc_fault(unsigned long address)
++int blk_rq_map_sg(struct request_queue *q, struct request *rq,
++		  struct scatterlist *sglist)
 +{
-+#ifdef CONFIG_X86_32
-+	unsigned long pgd_paddr;
-+	pmd_t *pmd_k;
-+	pte_t *pte_k;
-+	/*
-+	 * Synchronize this task's top level page-table
-+	 * with the 'reference' page table.
-+	 *
-+	 * Do _not_ use "current" here. We might be inside
-+	 * an interrupt in the middle of a task switch..
-+	 */
-+	pgd_paddr = read_cr3();
-+	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
-+	if (!pmd_k)
-+		return -1;
-+	pte_k = pte_offset_kernel(pmd_k, address);
-+	if (!pte_present(*pte_k))
-+		return -1;
-+	return 0;
-+#else
-+	pgd_t *pgd, *pgd_ref;
-+	pud_t *pud, *pud_ref;
-+	pmd_t *pmd, *pmd_ref;
-+	pte_t *pte, *pte_ref;
-+
-+	/* Copy kernel mappings over when needed. This can also
-+	   happen within a race in page table update. In the later
-+	   case just flush. */
-+
-+	pgd = pgd_offset(current->mm ?: &init_mm, address);
-+	pgd_ref = pgd_offset_k(address);
-+	if (pgd_none(*pgd_ref))
-+		return -1;
-+	if (pgd_none(*pgd))
-+		set_pgd(pgd, *pgd_ref);
-+	else
-+		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
-+
-+	/* Below here mismatches are bugs because these lower tables
-+	   are shared */
-+
-+	pud = pud_offset(pgd, address);
-+	pud_ref = pud_offset(pgd_ref, address);
-+	if (pud_none(*pud_ref))
-+		return -1;
-+	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
-+		BUG();
-+	pmd = pmd_offset(pud, address);
-+	pmd_ref = pmd_offset(pud_ref, address);
-+	if (pmd_none(*pmd_ref))
-+		return -1;
-+	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
-+		BUG();
-+	pte_ref = pte_offset_kernel(pmd_ref, address);
-+	if (!pte_present(*pte_ref))
-+		return -1;
-+	pte = pte_offset_kernel(pmd, address);
-+	/* Don't use pte_page here, because the mappings can point
-+	   outside mem_map, and the NUMA hash lookup cannot handle
-+	   that. */
-+	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
-+		BUG();
-+	return 0;
-+#endif
-+}
-+
-+int show_unhandled_signals = 1;
++	struct bio_vec *bvec, *bvprv;
++	struct req_iterator iter;
++	struct scatterlist *sg;
++	int nsegs, cluster;
 +
-+/*
-+ * This routine handles page faults.  It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ */
-+#ifdef CONFIG_X86_64
-+asmlinkage
-+#endif
-+void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
-+{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	struct vm_area_struct *vma;
-+	unsigned long address;
-+	int write, si_code;
-+	int fault;
-+#ifdef CONFIG_X86_64
-+	unsigned long flags;
-+#endif
++	nsegs = 0;
++	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
 +
 +	/*
-+	 * We can fault from pretty much anywhere, with unknown IRQ state.
++	 * for each bio in rq
 +	 */
-+	trace_hardirqs_fixup();
-+
-+	tsk = current;
-+	mm = tsk->mm;
-+	prefetchw(&mm->mmap_sem);
-+
-+	/* get the address */
-+	address = read_cr2();
++	bvprv = NULL;
++	sg = NULL;
++	rq_for_each_segment(bvec, rq, iter) {
++		int nbytes = bvec->bv_len;
 +
-+	si_code = SEGV_MAPERR;
++		if (bvprv && cluster) {
++			if (sg->length + nbytes > q->max_segment_size)
++				goto new_segment;
 +
-+	if (notify_page_fault(regs))
-+		return;
++			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
++				goto new_segment;
++			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
++				goto new_segment;
 +
-+	/*
-+	 * We fault-in kernel-space virtual memory on-demand. The
-+	 * 'reference' page table is init_mm.pgd.
-+	 *
-+	 * NOTE! We MUST NOT take any locks for this case. We may
-+	 * be in an interrupt or a critical region, and should
-+	 * only copy the information from the master page table,
-+	 * nothing more.
-+	 *
-+	 * This verifies that the fault happens in kernel space
-+	 * (error_code & 4) == 0, and that the fault was not a
-+	 * protection error (error_code & 9) == 0.
-+	 */
-+#ifdef CONFIG_X86_32
-+	if (unlikely(address >= TASK_SIZE)) {
-+		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
-+		    vmalloc_fault(address) >= 0)
-+			return;
++			sg->length += nbytes;
++		} else {
++new_segment:
++			if (!sg)
++				sg = sglist;
++			else {
++				/*
++				 * If the driver previously mapped a shorter
++				 * list, we could see a termination bit
++				 * prematurely unless it fully inits the sg
++				 * table on each mapping. We KNOW that there
++				 * must be more entries here or the driver
++				 * would be buggy, so force clear the
++				 * termination bit to avoid doing a full
++				 * sg_init_table() in drivers for each command.
++				 */
++				sg->page_link &= ~0x02;
++				sg = sg_next(sg);
++			}
 +
-+		/* Can handle a stale RO->RW TLB */
-+		if (spurious_fault(address, error_code))
-+			return;
++			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
++			nsegs++;
++		}
++		bvprv = bvec;
++	} /* segments in rq */
 +
-+		/*
-+		 * Don't take the mm semaphore here. If we fixup a prefetch
-+		 * fault we could otherwise deadlock.
-+		 */
-+		goto bad_area_nosemaphore;
++	if (q->dma_drain_size) {
++		sg->page_link &= ~0x02;
++		sg = sg_next(sg);
++		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
++			    q->dma_drain_size,
++			    ((unsigned long)q->dma_drain_buffer) &
++			    (PAGE_SIZE - 1));
++		nsegs++;
 +	}
 +
-+	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
-+	   fault has been handled. */
-+	if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
-+		local_irq_enable();
-+
-+	/*
-+	 * If we're in an interrupt, have no user context or are running in an
-+	 * atomic region then we must not take the fault.
-+	 */
-+	if (in_atomic() || !mm)
-+		goto bad_area_nosemaphore;
-+#else /* CONFIG_X86_64 */
-+	if (unlikely(address >= TASK_SIZE64)) {
-+		/*
-+		 * Don't check for the module range here: its PML4
-+		 * is always initialized because it's shared with the main
-+		 * kernel text. Only vmalloc may need PML4 syncups.
-+		 */
-+		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
-+		      ((address >= VMALLOC_START && address < VMALLOC_END))) {
-+			if (vmalloc_fault(address) >= 0)
-+				return;
-+		}
++	if (sg)
++		sg_mark_end(sg);
 +
-+		/* Can handle a stale RO->RW TLB */
-+		if (spurious_fault(address, error_code))
-+			return;
++	return nsegs;
++}
 +
-+		/*
-+		 * Don't take the mm semaphore here. If we fixup a prefetch
-+		 * fault we could otherwise deadlock.
-+		 */
-+		goto bad_area_nosemaphore;
-+	}
-+	if (likely(regs->flags & X86_EFLAGS_IF))
-+		local_irq_enable();
++EXPORT_SYMBOL(blk_rq_map_sg);
 +
-+	if (unlikely(error_code & PF_RSVD))
-+		pgtable_bad(address, regs, error_code);
++static inline int ll_new_mergeable(struct request_queue *q,
++				   struct request *req,
++				   struct bio *bio)
++{
++	int nr_phys_segs = bio_phys_segments(q, bio);
 +
-+	/*
-+	 * If we're in an interrupt, have no user context or are running in an
-+	 * atomic region then we must not take the fault.
-+	 */
-+	if (unlikely(in_atomic() || !mm))
-+		goto bad_area_nosemaphore;
++	if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
++		req->cmd_flags |= REQ_NOMERGE;
++		if (req == q->last_merge)
++			q->last_merge = NULL;
++		return 0;
++	}
 +
 +	/*
-+	 * User-mode registers count as a user access even for any
-+	 * potential system fault or CPU buglet.
-+	 */
-+	if (user_mode_vm(regs))
-+		error_code |= PF_USER;
-+again:
-+#endif
-+	/* When running in the kernel we expect faults to occur only to
-+	 * addresses in user space.  All other faults represent errors in the
-+	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
-+	 * erroneous fault occurring in a code path which already holds mmap_sem
-+	 * we will deadlock attempting to validate the fault against the
-+	 * address space.  Luckily the kernel only validly references user
-+	 * space from well defined areas of code, which are listed in the
-+	 * exceptions table.
-+	 *
-+	 * As the vast majority of faults will be valid we will only perform
-+	 * the source reference check when there is a possibility of a deadlock.
-+	 * Attempt to lock the address space, if we cannot we then validate the
-+	 * source.  If this is invalid we can skip the address space check,
-+	 * thus avoiding the deadlock.
++	 * A hw segment is just getting larger, bump just the phys
++	 * counter.
 +	 */
-+	if (!down_read_trylock(&mm->mmap_sem)) {
-+		if ((error_code & PF_USER) == 0 &&
-+		    !search_exception_tables(regs->ip))
-+			goto bad_area_nosemaphore;
-+		down_read(&mm->mmap_sem);
-+	}
++	req->nr_phys_segments += nr_phys_segs;
++	return 1;
++}
 +
-+	vma = find_vma(mm, address);
-+	if (!vma)
-+		goto bad_area;
-+	if (vma->vm_start <= address)
-+		goto good_area;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		goto bad_area;
-+	if (error_code & PF_USER) {
-+		/*
-+		 * Accessing the stack below %sp is always a bug.
-+		 * The large cushion allows instructions like enter
-+		 * and pusha to work.  ("enter $65535,$31" pushes
-+		 * 32 pointers and then decrements %sp by 65535.)
-+		 */
-+		if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
-+			goto bad_area;
-+	}
-+	if (expand_stack(vma, address))
-+		goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+	si_code = SEGV_ACCERR;
-+	write = 0;
-+	switch (error_code & (PF_PROT|PF_WRITE)) {
-+	default:	/* 3: write, present */
-+		/* fall through */
-+	case PF_WRITE:		/* write, not present */
-+		if (!(vma->vm_flags & VM_WRITE))
-+			goto bad_area;
-+		write++;
-+		break;
-+	case PF_PROT:		/* read, present */
-+		goto bad_area;
-+	case 0:			/* read, not present */
-+		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
-+			goto bad_area;
-+	}
++static inline int ll_new_hw_segment(struct request_queue *q,
++				    struct request *req,
++				    struct bio *bio)
++{
++	int nr_hw_segs = bio_hw_segments(q, bio);
++	int nr_phys_segs = bio_phys_segments(q, bio);
 +
-+#ifdef CONFIG_X86_32
-+survive:
-+#endif
-+	/*
-+	 * If for any reason at all we couldn't handle the fault,
-+	 * make sure we exit gracefully rather than endlessly redo
-+	 * the fault.
-+	 */
-+	fault = handle_mm_fault(mm, vma, address, write);
-+	if (unlikely(fault & VM_FAULT_ERROR)) {
-+		if (fault & VM_FAULT_OOM)
-+			goto out_of_memory;
-+		else if (fault & VM_FAULT_SIGBUS)
-+			goto do_sigbus;
-+		BUG();
++	if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
++	    || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
++		req->cmd_flags |= REQ_NOMERGE;
++		if (req == q->last_merge)
++			q->last_merge = NULL;
++		return 0;
 +	}
-+	if (fault & VM_FAULT_MAJOR)
-+		tsk->maj_flt++;
-+	else
-+		tsk->min_flt++;
 +
-+#ifdef CONFIG_X86_32
 +	/*
-+	 * Did it hit the DOS screen memory VA from vm86 mode?
++	 * This will form the start of a new hw segment.  Bump both
++	 * counters.
 +	 */
-+	if (v8086_mode(regs)) {
-+		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
-+		if (bit < 32)
-+			tsk->thread.screen_bitmap |= 1 << bit;
-+	}
-+#endif
-+	up_read(&mm->mmap_sem);
-+	return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+	up_read(&mm->mmap_sem);
++	req->nr_hw_segments += nr_hw_segs;
++	req->nr_phys_segments += nr_phys_segs;
++	return 1;
++}
 +
-+bad_area_nosemaphore:
-+	/* User mode accesses just cause a SIGSEGV */
-+	if (error_code & PF_USER) {
-+		/*
-+		 * It's possible to have interrupts off here.
-+		 */
-+		local_irq_enable();
++int ll_back_merge_fn(struct request_queue *q, struct request *req,
++		     struct bio *bio)
++{
++	unsigned short max_sectors;
++	int len;
 +
-+		/*
-+		 * Valid to do another page fault here because this one came
-+		 * from user space.
-+		 */
-+		if (is_prefetch(regs, address, error_code))
-+			return;
++	if (unlikely(blk_pc_request(req)))
++		max_sectors = q->max_hw_sectors;
++	else
++		max_sectors = q->max_sectors;
 +
-+		if (is_errata100(regs, address))
-+			return;
++	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
++		req->cmd_flags |= REQ_NOMERGE;
++		if (req == q->last_merge)
++			q->last_merge = NULL;
++		return 0;
++	}
++	if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
++		blk_recount_segments(q, req->biotail);
++	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
++		blk_recount_segments(q, bio);
++	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
++	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
++	    !BIOVEC_VIRT_OVERSIZE(len)) {
++		int mergeable =  ll_new_mergeable(q, req, bio);
 +
-+		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
-+		    printk_ratelimit()) {
-+			printk(
-+#ifdef CONFIG_X86_32
-+			"%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
-+#else
-+			"%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
-+#endif
-+			task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
-+			tsk->comm, task_pid_nr(tsk), address, regs->ip,
-+			regs->sp, error_code);
-+			print_vma_addr(" in ", regs->ip);
-+			printk("\n");
++		if (mergeable) {
++			if (req->nr_hw_segments == 1)
++				req->bio->bi_hw_front_size = len;
++			if (bio->bi_hw_segments == 1)
++				bio->bi_hw_back_size = len;
 +		}
-+
-+		tsk->thread.cr2 = address;
-+		/* Kernel addresses are always protection faults */
-+		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+		tsk->thread.trap_no = 14;
-+		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
-+		return;
++		return mergeable;
 +	}
 +
-+	if (is_f00f_bug(regs, address))
-+		return;
-+
-+no_context:
-+	/* Are we prepared to handle this kernel fault?  */
-+	if (fixup_exception(regs))
-+		return;
-+
-+	/*
-+	 * X86_32
-+	 * Valid to do another page fault here, because if this fault
-+	 * had been triggered by is_prefetch fixup_exception would have
-+	 * handled it.
-+	 *
-+	 * X86_64
-+	 * Hall of shame of CPU/BIOS bugs.
-+	 */
-+	if (is_prefetch(regs, address, error_code))
-+		return;
-+
-+	if (is_errata93(regs, address))
-+		return;
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
-+#ifdef CONFIG_X86_32
-+	bust_spinlocks(1);
-+#else
-+	flags = oops_begin();
-+#endif
++	return ll_new_hw_segment(q, req, bio);
++}
 +
-+	show_fault_oops(regs, error_code, address);
++int ll_front_merge_fn(struct request_queue *q, struct request *req, 
++		      struct bio *bio)
++{
++	unsigned short max_sectors;
++	int len;
 +
-+	tsk->thread.cr2 = address;
-+	tsk->thread.trap_no = 14;
-+	tsk->thread.error_code = error_code;
++	if (unlikely(blk_pc_request(req)))
++		max_sectors = q->max_hw_sectors;
++	else
++		max_sectors = q->max_sectors;
 +
-+#ifdef CONFIG_X86_32
-+	die("Oops", regs, error_code);
-+	bust_spinlocks(0);
-+	do_exit(SIGKILL);
-+#else
-+	if (__die("Oops", regs, error_code))
-+		regs = NULL;
-+	/* Executive summary in case the body of the oops scrolled away */
-+	printk(KERN_EMERG "CR2: %016lx\n", address);
-+	oops_end(flags, regs, SIGKILL);
-+#endif
 +
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+	up_read(&mm->mmap_sem);
-+	if (is_global_init(tsk)) {
-+		yield();
-+#ifdef CONFIG_X86_32
-+		down_read(&mm->mmap_sem);
-+		goto survive;
-+#else
-+		goto again;
-+#endif
++	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
++		req->cmd_flags |= REQ_NOMERGE;
++		if (req == q->last_merge)
++			q->last_merge = NULL;
++		return 0;
 +	}
++	len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
++	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
++		blk_recount_segments(q, bio);
++	if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
++		blk_recount_segments(q, req->bio);
++	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
++	    !BIOVEC_VIRT_OVERSIZE(len)) {
++		int mergeable =  ll_new_mergeable(q, req, bio);
 +
-+	printk("VM: killing process %s\n", tsk->comm);
-+	if (error_code & PF_USER)
-+		do_group_exit(SIGKILL);
-+	goto no_context;
-+
-+do_sigbus:
-+	up_read(&mm->mmap_sem);
++		if (mergeable) {
++			if (bio->bi_hw_segments == 1)
++				bio->bi_hw_front_size = len;
++			if (req->nr_hw_segments == 1)
++				req->biotail->bi_hw_back_size = len;
++		}
++		return mergeable;
++	}
 +
-+	/* Kernel mode? Handle exceptions or die */
-+	if (!(error_code & PF_USER))
-+		goto no_context;
-+#ifdef CONFIG_X86_32
-+	/* User space => ok to do another page fault */
-+	if (is_prefetch(regs, address, error_code))
-+		return;
-+#endif
-+	tsk->thread.cr2 = address;
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 14;
-+	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++	return ll_new_hw_segment(q, req, bio);
 +}
 +
-+DEFINE_SPINLOCK(pgd_lock);
-+LIST_HEAD(pgd_list);
-+
-+void vmalloc_sync_all(void)
++static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
++				struct request *next)
 +{
-+#ifdef CONFIG_X86_32
++	int total_phys_segments;
++	int total_hw_segments;
++
 +	/*
-+	 * Note that races in the updates of insync and start aren't
-+	 * problematic: insync can only get set bits added, and updates to
-+	 * start are only improving performance (without affecting correctness
-+	 * if undone).
++	 * First check if the either of the requests are re-queued
++	 * requests.  Can't merge them if they are.
 +	 */
-+	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-+	static unsigned long start = TASK_SIZE;
-+	unsigned long address;
-+
-+	if (SHARED_KERNEL_PMD)
-+		return;
-+
-+	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
-+	for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
-+		if (!test_bit(pgd_index(address), insync)) {
-+			unsigned long flags;
-+			struct page *page;
++	if (req->special || next->special)
++		return 0;
 +
-+			spin_lock_irqsave(&pgd_lock, flags);
-+			list_for_each_entry(page, &pgd_list, lru) {
-+				if (!vmalloc_sync_one(page_address(page),
-+						      address))
-+					break;
-+			}
-+			spin_unlock_irqrestore(&pgd_lock, flags);
-+			if (!page)
-+				set_bit(pgd_index(address), insync);
-+		}
-+		if (address == start && test_bit(pgd_index(address), insync))
-+			start = address + PGDIR_SIZE;
-+	}
-+#else /* CONFIG_X86_64 */
 +	/*
-+	 * Note that races in the updates of insync and start aren't
-+	 * problematic: insync can only get set bits added, and updates to
-+	 * start are only improving performance (without affecting correctness
-+	 * if undone).
++	 * Will it become too large?
 +	 */
-+	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-+	static unsigned long start = VMALLOC_START & PGDIR_MASK;
-+	unsigned long address;
++	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
++		return 0;
 +
-+	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
-+		if (!test_bit(pgd_index(address), insync)) {
-+			const pgd_t *pgd_ref = pgd_offset_k(address);
-+			struct page *page;
++	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
++	if (blk_phys_contig_segment(q, req->biotail, next->bio))
++		total_phys_segments--;
 +
-+			if (pgd_none(*pgd_ref))
-+				continue;
-+			spin_lock(&pgd_lock);
-+			list_for_each_entry(page, &pgd_list, lru) {
-+				pgd_t *pgd;
-+				pgd = (pgd_t *)page_address(page) + pgd_index(address);
-+				if (pgd_none(*pgd))
-+					set_pgd(pgd, *pgd_ref);
-+				else
-+					BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
-+			}
-+			spin_unlock(&pgd_lock);
-+			set_bit(pgd_index(address), insync);
-+		}
-+		if (address == start)
-+			start = address + PGDIR_SIZE;
-+	}
-+	/* Check that there is no need to do the same for the modules area. */
-+	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
-+	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
-+				(__START_KERNEL & PGDIR_MASK)));
-+#endif
-+}
-diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
-deleted file mode 100644
-index a2273d4..0000000
---- a/arch/x86/mm/fault_32.c
-+++ /dev/null
-@@ -1,659 +0,0 @@
--/*
-- *  linux/arch/i386/mm/fault.c
-- *
-- *  Copyright (C) 1995  Linus Torvalds
-- */
--
--#include <linux/signal.h>
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/errno.h>
--#include <linux/string.h>
--#include <linux/types.h>
--#include <linux/ptrace.h>
--#include <linux/mman.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/interrupt.h>
--#include <linux/init.h>
--#include <linux/tty.h>
--#include <linux/vt_kern.h>		/* For unblank_screen() */
--#include <linux/highmem.h>
--#include <linux/bootmem.h>		/* for max_low_pfn */
--#include <linux/vmalloc.h>
--#include <linux/module.h>
--#include <linux/kprobes.h>
--#include <linux/uaccess.h>
--#include <linux/kdebug.h>
--#include <linux/kprobes.h>
--
--#include <asm/system.h>
--#include <asm/desc.h>
--#include <asm/segment.h>
--
--extern void die(const char *,struct pt_regs *,long);
--
--#ifdef CONFIG_KPROBES
--static inline int notify_page_fault(struct pt_regs *regs)
--{
--	int ret = 0;
--
--	/* kprobe_running() needs smp_processor_id() */
--	if (!user_mode_vm(regs)) {
--		preempt_disable();
--		if (kprobe_running() && kprobe_fault_handler(regs, 14))
--			ret = 1;
--		preempt_enable();
--	}
--
--	return ret;
--}
--#else
--static inline int notify_page_fault(struct pt_regs *regs)
--{
--	return 0;
--}
--#endif
--
--/*
-- * Return EIP plus the CS segment base.  The segment limit is also
-- * adjusted, clamped to the kernel/user address space (whichever is
-- * appropriate), and returned in *eip_limit.
-- *
-- * The segment is checked, because it might have been changed by another
-- * task between the original faulting instruction and here.
-- *
-- * If CS is no longer a valid code segment, or if EIP is beyond the
-- * limit, or if it is a kernel address when CS is not a kernel segment,
-- * then the returned value will be greater than *eip_limit.
-- * 
-- * This is slow, but is very rarely executed.
-- */
--static inline unsigned long get_segment_eip(struct pt_regs *regs,
--					    unsigned long *eip_limit)
--{
--	unsigned long eip = regs->eip;
--	unsigned seg = regs->xcs & 0xffff;
--	u32 seg_ar, seg_limit, base, *desc;
--
--	/* Unlikely, but must come before segment checks. */
--	if (unlikely(regs->eflags & VM_MASK)) {
--		base = seg << 4;
--		*eip_limit = base + 0xffff;
--		return base + (eip & 0xffff);
--	}
--
--	/* The standard kernel/user address space limit. */
--	*eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
--	
--	/* By far the most common cases. */
--	if (likely(SEGMENT_IS_FLAT_CODE(seg)))
--		return eip;
--
--	/* Check the segment exists, is within the current LDT/GDT size,
--	   that kernel/user (ring 0..3) has the appropriate privilege,
--	   that it's a code segment, and get the limit. */
--	__asm__ ("larl %3,%0; lsll %3,%1"
--		 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
--	if ((~seg_ar & 0x9800) || eip > seg_limit) {
--		*eip_limit = 0;
--		return 1;	 /* So that returned eip > *eip_limit. */
--	}
--
--	/* Get the GDT/LDT descriptor base. 
--	   When you look for races in this code remember that
--	   LDT and other horrors are only used in user space. */
--	if (seg & (1<<2)) {
--		/* Must lock the LDT while reading it. */
--		mutex_lock(&current->mm->context.lock);
--		desc = current->mm->context.ldt;
--		desc = (void *)desc + (seg & ~7);
--	} else {
--		/* Must disable preemption while reading the GDT. */
-- 		desc = (u32 *)get_cpu_gdt_table(get_cpu());
--		desc = (void *)desc + (seg & ~7);
--	}
--
--	/* Decode the code segment base from the descriptor */
--	base = get_desc_base((unsigned long *)desc);
--
--	if (seg & (1<<2)) { 
--		mutex_unlock(&current->mm->context.lock);
--	} else
--		put_cpu();
--
--	/* Adjust EIP and segment limit, and clamp at the kernel limit.
--	   It's legitimate for segments to wrap at 0xffffffff. */
--	seg_limit += base;
--	if (seg_limit < *eip_limit && seg_limit >= base)
--		*eip_limit = seg_limit;
--	return eip + base;
--}
--
--/* 
-- * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
-- * Check that here and ignore it.
-- */
--static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
--{ 
--	unsigned long limit;
--	unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
--	int scan_more = 1;
--	int prefetch = 0; 
--	int i;
--
--	for (i = 0; scan_more && i < 15; i++) { 
--		unsigned char opcode;
--		unsigned char instr_hi;
--		unsigned char instr_lo;
--
--		if (instr > (unsigned char *)limit)
--			break;
--		if (probe_kernel_address(instr, opcode))
--			break; 
--
--		instr_hi = opcode & 0xf0; 
--		instr_lo = opcode & 0x0f; 
--		instr++;
--
--		switch (instr_hi) { 
--		case 0x20:
--		case 0x30:
--			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
--			scan_more = ((instr_lo & 7) == 0x6);
--			break;
--			
--		case 0x60:
--			/* 0x64 thru 0x67 are valid prefixes in all modes. */
--			scan_more = (instr_lo & 0xC) == 0x4;
--			break;		
--		case 0xF0:
--			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
--			scan_more = !instr_lo || (instr_lo>>1) == 1;
--			break;			
--		case 0x00:
--			/* Prefetch instruction is 0x0F0D or 0x0F18 */
--			scan_more = 0;
--			if (instr > (unsigned char *)limit)
--				break;
--			if (probe_kernel_address(instr, opcode))
--				break;
--			prefetch = (instr_lo == 0xF) &&
--				(opcode == 0x0D || opcode == 0x18);
--			break;			
--		default:
--			scan_more = 0;
--			break;
--		} 
--	}
--	return prefetch;
--}
--
--static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
--			      unsigned long error_code)
--{
--	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
--		     boot_cpu_data.x86 >= 6)) {
--		/* Catch an obscure case of prefetch inside an NX page. */
--		if (nx_enabled && (error_code & 16))
--			return 0;
--		return __is_prefetch(regs, addr);
--	}
--	return 0;
--} 
--
--static noinline void force_sig_info_fault(int si_signo, int si_code,
--	unsigned long address, struct task_struct *tsk)
--{
--	siginfo_t info;
--
--	info.si_signo = si_signo;
--	info.si_errno = 0;
--	info.si_code = si_code;
--	info.si_addr = (void __user *)address;
--	force_sig_info(si_signo, &info, tsk);
--}
--
--fastcall void do_invalid_op(struct pt_regs *, unsigned long);
--
--static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
--{
--	unsigned index = pgd_index(address);
--	pgd_t *pgd_k;
--	pud_t *pud, *pud_k;
--	pmd_t *pmd, *pmd_k;
--
--	pgd += index;
--	pgd_k = init_mm.pgd + index;
--
--	if (!pgd_present(*pgd_k))
--		return NULL;
--
--	/*
--	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
--	 * and redundant with the set_pmd() on non-PAE. As would
--	 * set_pud.
--	 */
--
--	pud = pud_offset(pgd, address);
--	pud_k = pud_offset(pgd_k, address);
--	if (!pud_present(*pud_k))
--		return NULL;
--
--	pmd = pmd_offset(pud, address);
--	pmd_k = pmd_offset(pud_k, address);
--	if (!pmd_present(*pmd_k))
--		return NULL;
--	if (!pmd_present(*pmd)) {
--		set_pmd(pmd, *pmd_k);
--		arch_flush_lazy_mmu_mode();
--	} else
--		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
--	return pmd_k;
--}
--
--/*
-- * Handle a fault on the vmalloc or module mapping area
-- *
-- * This assumes no large pages in there.
-- */
--static inline int vmalloc_fault(unsigned long address)
--{
--	unsigned long pgd_paddr;
--	pmd_t *pmd_k;
--	pte_t *pte_k;
--	/*
--	 * Synchronize this task's top level page-table
--	 * with the 'reference' page table.
--	 *
--	 * Do _not_ use "current" here. We might be inside
--	 * an interrupt in the middle of a task switch..
--	 */
--	pgd_paddr = read_cr3();
--	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
--	if (!pmd_k)
--		return -1;
--	pte_k = pte_offset_kernel(pmd_k, address);
--	if (!pte_present(*pte_k))
--		return -1;
--	return 0;
--}
--
--int show_unhandled_signals = 1;
--
--/*
-- * This routine handles page faults.  It determines the address,
-- * and the problem, and then passes it off to one of the appropriate
-- * routines.
-- *
-- * error_code:
-- *	bit 0 == 0 means no page found, 1 means protection fault
-- *	bit 1 == 0 means read, 1 means write
-- *	bit 2 == 0 means kernel, 1 means user-mode
-- *	bit 3 == 1 means use of reserved bit detected
-- *	bit 4 == 1 means fault was an instruction fetch
-- */
--fastcall void __kprobes do_page_fault(struct pt_regs *regs,
--				      unsigned long error_code)
--{
--	struct task_struct *tsk;
--	struct mm_struct *mm;
--	struct vm_area_struct * vma;
--	unsigned long address;
--	int write, si_code;
--	int fault;
--
--	/*
--	 * We can fault from pretty much anywhere, with unknown IRQ state.
--	 */
--	trace_hardirqs_fixup();
--
--	/* get the address */
--        address = read_cr2();
--
--	tsk = current;
--
--	si_code = SEGV_MAPERR;
--
--	/*
--	 * We fault-in kernel-space virtual memory on-demand. The
--	 * 'reference' page table is init_mm.pgd.
--	 *
--	 * NOTE! We MUST NOT take any locks for this case. We may
--	 * be in an interrupt or a critical region, and should
--	 * only copy the information from the master page table,
--	 * nothing more.
--	 *
--	 * This verifies that the fault happens in kernel space
--	 * (error_code & 4) == 0, and that the fault was not a
--	 * protection error (error_code & 9) == 0.
--	 */
--	if (unlikely(address >= TASK_SIZE)) {
--		if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
--			return;
--		if (notify_page_fault(regs))
--			return;
--		/*
--		 * Don't take the mm semaphore here. If we fixup a prefetch
--		 * fault we could otherwise deadlock.
--		 */
--		goto bad_area_nosemaphore;
--	}
--
--	if (notify_page_fault(regs))
--		return;
--
--	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
--	   fault has been handled. */
--	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
--		local_irq_enable();
--
--	mm = tsk->mm;
--
--	/*
--	 * If we're in an interrupt, have no user context or are running in an
--	 * atomic region then we must not take the fault..
--	 */
--	if (in_atomic() || !mm)
--		goto bad_area_nosemaphore;
--
--	/* When running in the kernel we expect faults to occur only to
--	 * addresses in user space.  All other faults represent errors in the
--	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
--	 * erroneous fault occurring in a code path which already holds mmap_sem
--	 * we will deadlock attempting to validate the fault against the
--	 * address space.  Luckily the kernel only validly references user
--	 * space from well defined areas of code, which are listed in the
--	 * exceptions table.
--	 *
--	 * As the vast majority of faults will be valid we will only perform
--	 * the source reference check when there is a possibility of a deadlock.
--	 * Attempt to lock the address space, if we cannot we then validate the
--	 * source.  If this is invalid we can skip the address space check,
--	 * thus avoiding the deadlock.
--	 */
--	if (!down_read_trylock(&mm->mmap_sem)) {
--		if ((error_code & 4) == 0 &&
--		    !search_exception_tables(regs->eip))
--			goto bad_area_nosemaphore;
--		down_read(&mm->mmap_sem);
--	}
--
--	vma = find_vma(mm, address);
--	if (!vma)
--		goto bad_area;
--	if (vma->vm_start <= address)
--		goto good_area;
--	if (!(vma->vm_flags & VM_GROWSDOWN))
--		goto bad_area;
--	if (error_code & 4) {
--		/*
--		 * Accessing the stack below %esp is always a bug.
--		 * The large cushion allows instructions like enter
--		 * and pusha to work.  ("enter $65535,$31" pushes
--		 * 32 pointers and then decrements %esp by 65535.)
--		 */
--		if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
--			goto bad_area;
--	}
--	if (expand_stack(vma, address))
--		goto bad_area;
--/*
-- * Ok, we have a good vm_area for this memory access, so
-- * we can handle it..
-- */
--good_area:
--	si_code = SEGV_ACCERR;
--	write = 0;
--	switch (error_code & 3) {
--		default:	/* 3: write, present */
--				/* fall through */
--		case 2:		/* write, not present */
--			if (!(vma->vm_flags & VM_WRITE))
--				goto bad_area;
--			write++;
--			break;
--		case 1:		/* read, present */
--			goto bad_area;
--		case 0:		/* read, not present */
--			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
--				goto bad_area;
--	}
--
-- survive:
--	/*
--	 * If for any reason at all we couldn't handle the fault,
--	 * make sure we exit gracefully rather than endlessly redo
--	 * the fault.
--	 */
--	fault = handle_mm_fault(mm, vma, address, write);
--	if (unlikely(fault & VM_FAULT_ERROR)) {
--		if (fault & VM_FAULT_OOM)
--			goto out_of_memory;
--		else if (fault & VM_FAULT_SIGBUS)
--			goto do_sigbus;
--		BUG();
--	}
--	if (fault & VM_FAULT_MAJOR)
--		tsk->maj_flt++;
--	else
--		tsk->min_flt++;
--
--	/*
--	 * Did it hit the DOS screen memory VA from vm86 mode?
--	 */
--	if (regs->eflags & VM_MASK) {
--		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
--		if (bit < 32)
--			tsk->thread.screen_bitmap |= 1 << bit;
--	}
--	up_read(&mm->mmap_sem);
--	return;
--
--/*
-- * Something tried to access memory that isn't in our memory map..
-- * Fix it, but check if it's kernel or user first..
-- */
--bad_area:
--	up_read(&mm->mmap_sem);
--
--bad_area_nosemaphore:
--	/* User mode accesses just cause a SIGSEGV */
--	if (error_code & 4) {
--		/*
--		 * It's possible to have interrupts off here.
--		 */
--		local_irq_enable();
--
--		/* 
--		 * Valid to do another page fault here because this one came 
--		 * from user space.
--		 */
--		if (is_prefetch(regs, address, error_code))
--			return;
--
--		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
--		    printk_ratelimit()) {
--			printk("%s%s[%d]: segfault at %08lx eip %08lx "
--			    "esp %08lx error %lx\n",
--			    task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
--			    tsk->comm, task_pid_nr(tsk), address, regs->eip,
--			    regs->esp, error_code);
--		}
--		tsk->thread.cr2 = address;
--		/* Kernel addresses are always protection faults */
--		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
--		tsk->thread.trap_no = 14;
--		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
--		return;
--	}
--
--#ifdef CONFIG_X86_F00F_BUG
--	/*
--	 * Pentium F0 0F C7 C8 bug workaround.
--	 */
--	if (boot_cpu_data.f00f_bug) {
--		unsigned long nr;
--		
--		nr = (address - idt_descr.address) >> 3;
--
--		if (nr == 6) {
--			do_invalid_op(regs, 0);
--			return;
--		}
--	}
--#endif
--
--no_context:
--	/* Are we prepared to handle this kernel fault?  */
--	if (fixup_exception(regs))
--		return;
--
--	/* 
--	 * Valid to do another page fault here, because if this fault
--	 * had been triggered by is_prefetch fixup_exception would have 
--	 * handled it.
--	 */
-- 	if (is_prefetch(regs, address, error_code))
-- 		return;
--
--/*
-- * Oops. The kernel tried to access some bad page. We'll have to
-- * terminate things with extreme prejudice.
-- */
--
--	bust_spinlocks(1);
--
--	if (oops_may_print()) {
--		__typeof__(pte_val(__pte(0))) page;
--
--#ifdef CONFIG_X86_PAE
--		if (error_code & 16) {
--			pte_t *pte = lookup_address(address);
--
--			if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
--				printk(KERN_CRIT "kernel tried to execute "
--					"NX-protected page - exploit attempt? "
--					"(uid: %d)\n", current->uid);
--		}
--#endif
--		if (address < PAGE_SIZE)
--			printk(KERN_ALERT "BUG: unable to handle kernel NULL "
--					"pointer dereference");
--		else
--			printk(KERN_ALERT "BUG: unable to handle kernel paging"
--					" request");
--		printk(" at virtual address %08lx\n",address);
--		printk(KERN_ALERT "printing eip: %08lx ", regs->eip);
--
--		page = read_cr3();
--		page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
--#ifdef CONFIG_X86_PAE
--		printk("*pdpt = %016Lx ", page);
--		if ((page >> PAGE_SHIFT) < max_low_pfn
--		    && page & _PAGE_PRESENT) {
--			page &= PAGE_MASK;
--			page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
--			                                         & (PTRS_PER_PMD - 1)];
--			printk(KERN_CONT "*pde = %016Lx ", page);
--			page &= ~_PAGE_NX;
--		}
--#else
--		printk("*pde = %08lx ", page);
--#endif
--
--		/*
--		 * We must not directly access the pte in the highpte
--		 * case if the page table is located in highmem.
--		 * And let's rather not kmap-atomic the pte, just in case
--		 * it's allocated already.
--		 */
--		if ((page >> PAGE_SHIFT) < max_low_pfn
--		    && (page & _PAGE_PRESENT)
--		    && !(page & _PAGE_PSE)) {
--			page &= PAGE_MASK;
--			page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
--			                                         & (PTRS_PER_PTE - 1)];
--			printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
--		}
--
--		printk("\n");
--	}
--
--	tsk->thread.cr2 = address;
--	tsk->thread.trap_no = 14;
--	tsk->thread.error_code = error_code;
--	die("Oops", regs, error_code);
--	bust_spinlocks(0);
--	do_exit(SIGKILL);
--
--/*
-- * We ran out of memory, or some other thing happened to us that made
-- * us unable to handle the page fault gracefully.
-- */
--out_of_memory:
--	up_read(&mm->mmap_sem);
--	if (is_global_init(tsk)) {
--		yield();
--		down_read(&mm->mmap_sem);
--		goto survive;
--	}
--	printk("VM: killing process %s\n", tsk->comm);
--	if (error_code & 4)
--		do_group_exit(SIGKILL);
--	goto no_context;
--
--do_sigbus:
--	up_read(&mm->mmap_sem);
--
--	/* Kernel mode? Handle exceptions or die */
--	if (!(error_code & 4))
--		goto no_context;
--
--	/* User space => ok to do another page fault */
--	if (is_prefetch(regs, address, error_code))
--		return;
--
--	tsk->thread.cr2 = address;
--	tsk->thread.error_code = error_code;
--	tsk->thread.trap_no = 14;
--	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
--}
--
--void vmalloc_sync_all(void)
--{
--	/*
--	 * Note that races in the updates of insync and start aren't
--	 * problematic: insync can only get set bits added, and updates to
--	 * start are only improving performance (without affecting correctness
--	 * if undone).
--	 */
--	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
--	static unsigned long start = TASK_SIZE;
--	unsigned long address;
--
--	if (SHARED_KERNEL_PMD)
--		return;
--
--	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
--	for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
--		if (!test_bit(pgd_index(address), insync)) {
--			unsigned long flags;
--			struct page *page;
--
--			spin_lock_irqsave(&pgd_lock, flags);
--			for (page = pgd_list; page; page =
--					(struct page *)page->index)
--				if (!vmalloc_sync_one(page_address(page),
--								address)) {
--					BUG_ON(page != pgd_list);
--					break;
--				}
--			spin_unlock_irqrestore(&pgd_lock, flags);
--			if (!page)
--				set_bit(pgd_index(address), insync);
--		}
--		if (address == start && test_bit(pgd_index(address), insync))
--			start = address + PGDIR_SIZE;
--	}
--}
-diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
-deleted file mode 100644
-index 0e26230..0000000
---- a/arch/x86/mm/fault_64.c
-+++ /dev/null
-@@ -1,623 +0,0 @@
--/*
-- *  linux/arch/x86-64/mm/fault.c
-- *
-- *  Copyright (C) 1995  Linus Torvalds
-- *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
-- */
--
--#include <linux/signal.h>
--#include <linux/sched.h>
--#include <linux/kernel.h>
--#include <linux/errno.h>
--#include <linux/string.h>
--#include <linux/types.h>
--#include <linux/ptrace.h>
--#include <linux/mman.h>
--#include <linux/mm.h>
--#include <linux/smp.h>
--#include <linux/interrupt.h>
--#include <linux/init.h>
--#include <linux/tty.h>
--#include <linux/vt_kern.h>		/* For unblank_screen() */
--#include <linux/compiler.h>
--#include <linux/vmalloc.h>
--#include <linux/module.h>
--#include <linux/kprobes.h>
--#include <linux/uaccess.h>
--#include <linux/kdebug.h>
--#include <linux/kprobes.h>
--
--#include <asm/system.h>
--#include <asm/pgalloc.h>
--#include <asm/smp.h>
--#include <asm/tlbflush.h>
--#include <asm/proto.h>
--#include <asm-generic/sections.h>
--
--/* Page fault error code bits */
--#define PF_PROT	(1<<0)		/* or no page found */
--#define PF_WRITE	(1<<1)
--#define PF_USER	(1<<2)
--#define PF_RSVD	(1<<3)
--#define PF_INSTR	(1<<4)
--
--#ifdef CONFIG_KPROBES
--static inline int notify_page_fault(struct pt_regs *regs)
--{
--	int ret = 0;
--
--	/* kprobe_running() needs smp_processor_id() */
--	if (!user_mode(regs)) {
--		preempt_disable();
--		if (kprobe_running() && kprobe_fault_handler(regs, 14))
--			ret = 1;
--		preempt_enable();
--	}
--
--	return ret;
--}
--#else
--static inline int notify_page_fault(struct pt_regs *regs)
--{
--	return 0;
--}
--#endif
--
--/* Sometimes the CPU reports invalid exceptions on prefetch.
--   Check that here and ignore.
--   Opcode checker based on code by Richard Brunner */
--static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
--				unsigned long error_code)
--{ 
--	unsigned char *instr;
--	int scan_more = 1;
--	int prefetch = 0; 
--	unsigned char *max_instr;
--
--	/* If it was a exec fault ignore */
--	if (error_code & PF_INSTR)
--		return 0;
--	
--	instr = (unsigned char __user *)convert_rip_to_linear(current, regs);
--	max_instr = instr + 15;
--
--	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
--		return 0;
--
--	while (scan_more && instr < max_instr) { 
--		unsigned char opcode;
--		unsigned char instr_hi;
--		unsigned char instr_lo;
--
--		if (probe_kernel_address(instr, opcode))
--			break; 
--
--		instr_hi = opcode & 0xf0; 
--		instr_lo = opcode & 0x0f; 
--		instr++;
--
--		switch (instr_hi) { 
--		case 0x20:
--		case 0x30:
--			/* Values 0x26,0x2E,0x36,0x3E are valid x86
--			   prefixes.  In long mode, the CPU will signal
--			   invalid opcode if some of these prefixes are
--			   present so we will never get here anyway */
--			scan_more = ((instr_lo & 7) == 0x6);
--			break;
--			
--		case 0x40:
--			/* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
--			   Need to figure out under what instruction mode the
--			   instruction was issued ... */
--			/* Could check the LDT for lm, but for now it's good
--			   enough to assume that long mode only uses well known
--			   segments or kernel. */
--			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
--			break;
--			
--		case 0x60:
--			/* 0x64 thru 0x67 are valid prefixes in all modes. */
--			scan_more = (instr_lo & 0xC) == 0x4;
--			break;		
--		case 0xF0:
--			/* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
--			scan_more = !instr_lo || (instr_lo>>1) == 1;
--			break;			
--		case 0x00:
--			/* Prefetch instruction is 0x0F0D or 0x0F18 */
--			scan_more = 0;
--			if (probe_kernel_address(instr, opcode))
--				break;
--			prefetch = (instr_lo == 0xF) &&
--				(opcode == 0x0D || opcode == 0x18);
--			break;			
--		default:
--			scan_more = 0;
--			break;
--		} 
--	}
--	return prefetch;
--}
--
--static int bad_address(void *p) 
--{ 
--	unsigned long dummy;
--	return probe_kernel_address((unsigned long *)p, dummy);
--} 
--
--void dump_pagetable(unsigned long address)
--{
--	pgd_t *pgd;
--	pud_t *pud;
--	pmd_t *pmd;
--	pte_t *pte;
--
--	pgd = (pgd_t *)read_cr3();
--
--	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 
--	pgd += pgd_index(address);
--	if (bad_address(pgd)) goto bad;
--	printk("PGD %lx ", pgd_val(*pgd));
--	if (!pgd_present(*pgd)) goto ret; 
--
--	pud = pud_offset(pgd, address);
--	if (bad_address(pud)) goto bad;
--	printk("PUD %lx ", pud_val(*pud));
--	if (!pud_present(*pud))	goto ret;
--
--	pmd = pmd_offset(pud, address);
--	if (bad_address(pmd)) goto bad;
--	printk("PMD %lx ", pmd_val(*pmd));
--	if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
--
--	pte = pte_offset_kernel(pmd, address);
--	if (bad_address(pte)) goto bad;
--	printk("PTE %lx", pte_val(*pte)); 
--ret:
--	printk("\n");
--	return;
--bad:
--	printk("BAD\n");
--}
--
--static const char errata93_warning[] = 
--KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
--KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
--KERN_ERR "******* Please consider a BIOS update.\n"
--KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
--
--/* Workaround for K8 erratum #93 & buggy BIOS.
--   BIOS SMM functions are required to use a specific workaround
--   to avoid corruption of the 64bit RIP register on C stepping K8. 
--   A lot of BIOS that didn't get tested properly miss this. 
--   The OS sees this as a page fault with the upper 32bits of RIP cleared.
--   Try to work around it here.
--   Note we only handle faults in kernel here. */
--
--static int is_errata93(struct pt_regs *regs, unsigned long address) 
--{
--	static int warned;
--	if (address != regs->rip)
--		return 0;
--	if ((address >> 32) != 0) 
--		return 0;
--	address |= 0xffffffffUL << 32;
--	if ((address >= (u64)_stext && address <= (u64)_etext) || 
--	    (address >= MODULES_VADDR && address <= MODULES_END)) { 
--		if (!warned) {
--			printk(errata93_warning); 		
--			warned = 1;
--		}
--		regs->rip = address;
--		return 1;
--	}
--	return 0;
--} 
--
--static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
--				 unsigned long error_code)
--{
--	unsigned long flags = oops_begin();
--	struct task_struct *tsk;
--
--	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
--	       current->comm, address);
--	dump_pagetable(address);
--	tsk = current;
--	tsk->thread.cr2 = address;
--	tsk->thread.trap_no = 14;
--	tsk->thread.error_code = error_code;
--	__die("Bad pagetable", regs, error_code);
--	oops_end(flags);
--	do_exit(SIGKILL);
--}
--
--/*
-- * Handle a fault on the vmalloc area
-- *
-- * This assumes no large pages in there.
-- */
--static int vmalloc_fault(unsigned long address)
--{
--	pgd_t *pgd, *pgd_ref;
--	pud_t *pud, *pud_ref;
--	pmd_t *pmd, *pmd_ref;
--	pte_t *pte, *pte_ref;
--
--	/* Copy kernel mappings over when needed. This can also
--	   happen within a race in page table update. In the later
--	   case just flush. */
--
--	pgd = pgd_offset(current->mm ?: &init_mm, address);
--	pgd_ref = pgd_offset_k(address);
--	if (pgd_none(*pgd_ref))
--		return -1;
--	if (pgd_none(*pgd))
--		set_pgd(pgd, *pgd_ref);
--	else
--		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
--
--	/* Below here mismatches are bugs because these lower tables
--	   are shared */
--
--	pud = pud_offset(pgd, address);
--	pud_ref = pud_offset(pgd_ref, address);
--	if (pud_none(*pud_ref))
--		return -1;
--	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
--		BUG();
--	pmd = pmd_offset(pud, address);
--	pmd_ref = pmd_offset(pud_ref, address);
--	if (pmd_none(*pmd_ref))
--		return -1;
--	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
--		BUG();
--	pte_ref = pte_offset_kernel(pmd_ref, address);
--	if (!pte_present(*pte_ref))
--		return -1;
--	pte = pte_offset_kernel(pmd, address);
--	/* Don't use pte_page here, because the mappings can point
--	   outside mem_map, and the NUMA hash lookup cannot handle
--	   that. */
--	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
--		BUG();
--	return 0;
--}
--
--int show_unhandled_signals = 1;
--
--/*
-- * This routine handles page faults.  It determines the address,
-- * and the problem, and then passes it off to one of the appropriate
-- * routines.
-- */
--asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
--					unsigned long error_code)
--{
--	struct task_struct *tsk;
--	struct mm_struct *mm;
--	struct vm_area_struct * vma;
--	unsigned long address;
--	const struct exception_table_entry *fixup;
--	int write, fault;
--	unsigned long flags;
--	siginfo_t info;
--
--	/*
--	 * We can fault from pretty much anywhere, with unknown IRQ state.
--	 */
--	trace_hardirqs_fixup();
--
--	tsk = current;
--	mm = tsk->mm;
--	prefetchw(&mm->mmap_sem);
--
--	/* get the address */
--	address = read_cr2();
--
--	info.si_code = SEGV_MAPERR;
--
--
--	/*
--	 * We fault-in kernel-space virtual memory on-demand. The
--	 * 'reference' page table is init_mm.pgd.
--	 *
--	 * NOTE! We MUST NOT take any locks for this case. We may
--	 * be in an interrupt or a critical region, and should
--	 * only copy the information from the master page table,
--	 * nothing more.
--	 *
--	 * This verifies that the fault happens in kernel space
--	 * (error_code & 4) == 0, and that the fault was not a
--	 * protection error (error_code & 9) == 0.
--	 */
--	if (unlikely(address >= TASK_SIZE64)) {
--		/*
--		 * Don't check for the module range here: its PML4
--		 * is always initialized because it's shared with the main
--		 * kernel text. Only vmalloc may need PML4 syncups.
--		 */
--		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
--		      ((address >= VMALLOC_START && address < VMALLOC_END))) {
--			if (vmalloc_fault(address) >= 0)
--				return;
--		}
--		if (notify_page_fault(regs))
--			return;
--		/*
--		 * Don't take the mm semaphore here. If we fixup a prefetch
--		 * fault we could otherwise deadlock.
--		 */
--		goto bad_area_nosemaphore;
--	}
--
--	if (notify_page_fault(regs))
--		return;
--
--	if (likely(regs->eflags & X86_EFLAGS_IF))
--		local_irq_enable();
--
--	if (unlikely(error_code & PF_RSVD))
--		pgtable_bad(address, regs, error_code);
--
--	/*
--	 * If we're in an interrupt or have no user
--	 * context, we must not take the fault..
--	 */
--	if (unlikely(in_atomic() || !mm))
--		goto bad_area_nosemaphore;
--
--	/*
--	 * User-mode registers count as a user access even for any
--	 * potential system fault or CPU buglet.
--	 */
--	if (user_mode_vm(regs))
--		error_code |= PF_USER;
--
-- again:
--	/* When running in the kernel we expect faults to occur only to
--	 * addresses in user space.  All other faults represent errors in the
--	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
--	 * erroneous fault occurring in a code path which already holds mmap_sem
--	 * we will deadlock attempting to validate the fault against the
--	 * address space.  Luckily the kernel only validly references user
--	 * space from well defined areas of code, which are listed in the
--	 * exceptions table.
--	 *
--	 * As the vast majority of faults will be valid we will only perform
--	 * the source reference check when there is a possibility of a deadlock.
--	 * Attempt to lock the address space, if we cannot we then validate the
--	 * source.  If this is invalid we can skip the address space check,
--	 * thus avoiding the deadlock.
--	 */
--	if (!down_read_trylock(&mm->mmap_sem)) {
--		if ((error_code & PF_USER) == 0 &&
--		    !search_exception_tables(regs->rip))
--			goto bad_area_nosemaphore;
--		down_read(&mm->mmap_sem);
--	}
--
--	vma = find_vma(mm, address);
--	if (!vma)
--		goto bad_area;
--	if (likely(vma->vm_start <= address))
--		goto good_area;
--	if (!(vma->vm_flags & VM_GROWSDOWN))
--		goto bad_area;
--	if (error_code & 4) {
--		/* Allow userspace just enough access below the stack pointer
--		 * to let the 'enter' instruction work.
--		 */
--		if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
--			goto bad_area;
--	}
--	if (expand_stack(vma, address))
--		goto bad_area;
--/*
-- * Ok, we have a good vm_area for this memory access, so
-- * we can handle it..
-- */
--good_area:
--	info.si_code = SEGV_ACCERR;
--	write = 0;
--	switch (error_code & (PF_PROT|PF_WRITE)) {
--		default:	/* 3: write, present */
--			/* fall through */
--		case PF_WRITE:		/* write, not present */
--			if (!(vma->vm_flags & VM_WRITE))
--				goto bad_area;
--			write++;
--			break;
--		case PF_PROT:		/* read, present */
--			goto bad_area;
--		case 0:			/* read, not present */
--			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
--				goto bad_area;
--	}
--
--	/*
--	 * If for any reason at all we couldn't handle the fault,
--	 * make sure we exit gracefully rather than endlessly redo
--	 * the fault.
--	 */
--	fault = handle_mm_fault(mm, vma, address, write);
--	if (unlikely(fault & VM_FAULT_ERROR)) {
--		if (fault & VM_FAULT_OOM)
--			goto out_of_memory;
--		else if (fault & VM_FAULT_SIGBUS)
--			goto do_sigbus;
--		BUG();
--	}
--	if (fault & VM_FAULT_MAJOR)
--		tsk->maj_flt++;
--	else
--		tsk->min_flt++;
--	up_read(&mm->mmap_sem);
--	return;
--
--/*
-- * Something tried to access memory that isn't in our memory map..
-- * Fix it, but check if it's kernel or user first..
-- */
--bad_area:
--	up_read(&mm->mmap_sem);
--
--bad_area_nosemaphore:
--	/* User mode accesses just cause a SIGSEGV */
--	if (error_code & PF_USER) {
--
--		/*
--		 * It's possible to have interrupts off here.
--		 */
--		local_irq_enable();
--
--		if (is_prefetch(regs, address, error_code))
--			return;
--
--		/* Work around K8 erratum #100 K8 in compat mode
--		   occasionally jumps to illegal addresses >4GB.  We
--		   catch this here in the page fault handler because
--		   these addresses are not reachable. Just detect this
--		   case and return.  Any code segment in LDT is
--		   compatibility mode. */
--		if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
--		    (address >> 32))
--			return;
--
--		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
--		    printk_ratelimit()) {
--			printk(
--		       "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",
--					tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
--					tsk->comm, tsk->pid, address, regs->rip,
--					regs->rsp, error_code);
--		}
--       
--		tsk->thread.cr2 = address;
--		/* Kernel addresses are always protection faults */
--		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
--		tsk->thread.trap_no = 14;
--		info.si_signo = SIGSEGV;
--		info.si_errno = 0;
--		/* info.si_code has been set above */
--		info.si_addr = (void __user *)address;
--		force_sig_info(SIGSEGV, &info, tsk);
--		return;
--	}
--
--no_context:
--	
--	/* Are we prepared to handle this kernel fault?  */
--	fixup = search_exception_tables(regs->rip);
--	if (fixup) {
--		regs->rip = fixup->fixup;
--		return;
--	}
--
--	/* 
--	 * Hall of shame of CPU/BIOS bugs.
--	 */
--
-- 	if (is_prefetch(regs, address, error_code))
-- 		return;
--
--	if (is_errata93(regs, address))
--		return; 
--
--/*
-- * Oops. The kernel tried to access some bad page. We'll have to
-- * terminate things with extreme prejudice.
-- */
--
--	flags = oops_begin();
--
--	if (address < PAGE_SIZE)
--		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
--	else
--		printk(KERN_ALERT "Unable to handle kernel paging request");
--	printk(" at %016lx RIP: \n" KERN_ALERT,address);
--	printk_address(regs->rip);
--	dump_pagetable(address);
--	tsk->thread.cr2 = address;
--	tsk->thread.trap_no = 14;
--	tsk->thread.error_code = error_code;
--	__die("Oops", regs, error_code);
--	/* Executive summary in case the body of the oops scrolled away */
--	printk(KERN_EMERG "CR2: %016lx\n", address);
--	oops_end(flags);
--	do_exit(SIGKILL);
--
--/*
-- * We ran out of memory, or some other thing happened to us that made
-- * us unable to handle the page fault gracefully.
-- */
--out_of_memory:
--	up_read(&mm->mmap_sem);
--	if (is_global_init(current)) {
--		yield();
--		goto again;
--	}
--	printk("VM: killing process %s\n", tsk->comm);
--	if (error_code & 4)
--		do_group_exit(SIGKILL);
--	goto no_context;
--
--do_sigbus:
--	up_read(&mm->mmap_sem);
--
--	/* Kernel mode? Handle exceptions or die */
--	if (!(error_code & PF_USER))
--		goto no_context;
--
--	tsk->thread.cr2 = address;
--	tsk->thread.error_code = error_code;
--	tsk->thread.trap_no = 14;
--	info.si_signo = SIGBUS;
--	info.si_errno = 0;
--	info.si_code = BUS_ADRERR;
--	info.si_addr = (void __user *)address;
--	force_sig_info(SIGBUS, &info, tsk);
--	return;
--}
--
--DEFINE_SPINLOCK(pgd_lock);
--LIST_HEAD(pgd_list);
--
--void vmalloc_sync_all(void)
--{
--	/* Note that races in the updates of insync and start aren't 
--	   problematic:
--	   insync can only get set bits added, and updates to start are only
--	   improving performance (without affecting correctness if undone). */
--	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
--	static unsigned long start = VMALLOC_START & PGDIR_MASK;
--	unsigned long address;
--
--	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
--		if (!test_bit(pgd_index(address), insync)) {
--			const pgd_t *pgd_ref = pgd_offset_k(address);
--			struct page *page;
--
--			if (pgd_none(*pgd_ref))
--				continue;
--			spin_lock(&pgd_lock);
--			list_for_each_entry(page, &pgd_list, lru) {
--				pgd_t *pgd;
--				pgd = (pgd_t *)page_address(page) + pgd_index(address);
--				if (pgd_none(*pgd))
--					set_pgd(pgd, *pgd_ref);
--				else
--					BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
--			}
--			spin_unlock(&pgd_lock);
--			set_bit(pgd_index(address), insync);
--		}
--		if (address == start)
--			start = address + PGDIR_SIZE;
--	}
--	/* Check that there is no need to do the same for the modules area. */
--	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
--	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 
--				(__START_KERNEL & PGDIR_MASK)));
--}
-diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
-index 1c3bf95..3d936f2 100644
---- a/arch/x86/mm/highmem_32.c
-+++ b/arch/x86/mm/highmem_32.c
-@@ -18,6 +18,49 @@ void kunmap(struct page *page)
- 	kunmap_high(page);
- }
- 
-+static void debug_kmap_atomic_prot(enum km_type type)
++	if (total_phys_segments > q->max_phys_segments)
++		return 0;
++
++	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
++	if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
++		int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
++		/*
++		 * propagate the combined length to the end of the requests
++		 */
++		if (req->nr_hw_segments == 1)
++			req->bio->bi_hw_front_size = len;
++		if (next->nr_hw_segments == 1)
++			next->biotail->bi_hw_back_size = len;
++		total_hw_segments--;
++	}
++
++	if (total_hw_segments > q->max_hw_segments)
++		return 0;
++
++	/* Merge is OK... */
++	req->nr_phys_segments = total_phys_segments;
++	req->nr_hw_segments = total_hw_segments;
++	return 1;
++}
++
++/*
++ * Has to be called with the request spinlock acquired
++ */
++static int attempt_merge(struct request_queue *q, struct request *req,
++			  struct request *next)
 +{
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+	static unsigned warn_count = 10;
++	if (!rq_mergeable(req) || !rq_mergeable(next))
++		return 0;
 +
-+	if (unlikely(warn_count == 0))
-+		return;
++	/*
++	 * not contiguous
++	 */
++	if (req->sector + req->nr_sectors != next->sector)
++		return 0;
 +
-+	if (unlikely(in_interrupt())) {
-+		if (in_irq()) {
-+			if (type != KM_IRQ0 && type != KM_IRQ1 &&
-+			    type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
-+			    type != KM_BOUNCE_READ) {
-+				WARN_ON(1);
-+				warn_count--;
-+			}
-+		} else if (!irqs_disabled()) {	/* softirq */
-+			if (type != KM_IRQ0 && type != KM_IRQ1 &&
-+			    type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
-+			    type != KM_SKB_SUNRPC_DATA &&
-+			    type != KM_SKB_DATA_SOFTIRQ &&
-+			    type != KM_BOUNCE_READ) {
-+				WARN_ON(1);
-+				warn_count--;
-+			}
-+		}
-+	}
++	if (rq_data_dir(req) != rq_data_dir(next)
++	    || req->rq_disk != next->rq_disk
++	    || next->special)
++		return 0;
 +
-+	if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
-+			type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
-+		if (!irqs_disabled()) {
-+			WARN_ON(1);
-+			warn_count--;
-+		}
-+	} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
-+		if (irq_count() == 0 && !irqs_disabled()) {
-+			WARN_ON(1);
-+			warn_count--;
-+		}
++	/*
++	 * If we are allowed to merge, then append bio list
++	 * from next to rq and release next. merge_requests_fn
++	 * will have updated segment counts, update sector
++	 * counts here.
++	 */
++	if (!ll_merge_requests_fn(q, req, next))
++		return 0;
++
++	/*
++	 * At this point we have either done a back merge
++	 * or front merge. We need the smaller start_time of
++	 * the merged requests to be the current request
++	 * for accounting purposes.
++	 */
++	if (time_after(req->start_time, next->start_time))
++		req->start_time = next->start_time;
++
++	req->biotail->bi_next = next->bio;
++	req->biotail = next->biotail;
++
++	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
++
++	elv_merge_requests(q, req, next);
++
++	if (req->rq_disk) {
++		disk_round_stats(req->rq_disk);
++		req->rq_disk->in_flight--;
 +	}
++
++	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
++
++	__blk_put_request(q, next);
++	return 1;
++}
++
++int attempt_back_merge(struct request_queue *q, struct request *rq)
++{
++	struct request *next = elv_latter_request(q, rq);
++
++	if (next)
++		return attempt_merge(q, rq, next);
++
++	return 0;
++}
++
++int attempt_front_merge(struct request_queue *q, struct request *rq)
++{
++	struct request *prev = elv_former_request(q, rq);
++
++	if (prev)
++		return attempt_merge(q, prev, rq);
++
++	return 0;
++}
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+new file mode 100644
+index 0000000..4df09a1
+--- /dev/null
++++ b/block/blk-settings.c
+@@ -0,0 +1,402 @@
++/*
++ * Functions related to setting various queue properties from drivers
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
++
++#include "blk.h"
++
++unsigned long blk_max_low_pfn, blk_max_pfn;
++EXPORT_SYMBOL(blk_max_low_pfn);
++EXPORT_SYMBOL(blk_max_pfn);
++
++/**
++ * blk_queue_prep_rq - set a prepare_request function for queue
++ * @q:		queue
++ * @pfn:	prepare_request function
++ *
++ * It's possible for a queue to register a prepare_request callback which
++ * is invoked before the request is handed to the request_fn. The goal of
++ * the function is to prepare a request for I/O, it can be used to build a
++ * cdb from the request data for instance.
++ *
++ */
++void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
++{
++	q->prep_rq_fn = pfn;
++}
++
++EXPORT_SYMBOL(blk_queue_prep_rq);
++
++/**
++ * blk_queue_merge_bvec - set a merge_bvec function for queue
++ * @q:		queue
++ * @mbfn:	merge_bvec_fn
++ *
++ * Usually queues have static limitations on the max sectors or segments that
++ * we can put in a request. Stacking drivers may have some settings that
++ * are dynamic, and thus we have to query the queue whether it is ok to
++ * add a new bio_vec to a bio at a given offset or not. If the block device
++ * has such limitations, it needs to register a merge_bvec_fn to control
++ * the size of bio's sent to it. Note that a block device *must* allow a
++ * single page to be added to an empty bio. The block device driver may want
++ * to use the bio_split() function to deal with these bio's. By default
++ * no merge_bvec_fn is defined for a queue, and only the fixed limits are
++ * honored.
++ */
++void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
++{
++	q->merge_bvec_fn = mbfn;
++}
++
++EXPORT_SYMBOL(blk_queue_merge_bvec);
++
++void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
++{
++	q->softirq_done_fn = fn;
++}
++
++EXPORT_SYMBOL(blk_queue_softirq_done);
++
++/**
++ * blk_queue_make_request - define an alternate make_request function for a device
++ * @q:  the request queue for the device to be affected
++ * @mfn: the alternate make_request function
++ *
++ * Description:
++ *    The normal way for &struct bios to be passed to a device
++ *    driver is for them to be collected into requests on a request
++ *    queue, and then to allow the device driver to select requests
++ *    off that queue when it is ready.  This works well for many block
++ *    devices. However some block devices (typically virtual devices
++ *    such as md or lvm) do not benefit from the processing on the
++ *    request queue, and are served best by having the requests passed
++ *    directly to them.  This can be achieved by providing a function
++ *    to blk_queue_make_request().
++ *
++ * Caveat:
++ *    The driver that does this *must* be able to deal appropriately
++ *    with buffers in "highmemory". This can be accomplished by either calling
++ *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
++ *    blk_queue_bounce() to create a buffer in normal memory.
++ **/
++void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
++{
++	/*
++	 * set defaults
++	 */
++	q->nr_requests = BLKDEV_MAX_RQ;
++	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
++	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
++	q->make_request_fn = mfn;
++	q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
++	q->backing_dev_info.state = 0;
++	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
++	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
++	blk_queue_hardsect_size(q, 512);
++	blk_queue_dma_alignment(q, 511);
++	blk_queue_congestion_threshold(q);
++	q->nr_batching = BLK_BATCH_REQ;
++
++	q->unplug_thresh = 4;		/* hmm */
++	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
++	if (q->unplug_delay == 0)
++		q->unplug_delay = 1;
++
++	INIT_WORK(&q->unplug_work, blk_unplug_work);
++
++	q->unplug_timer.function = blk_unplug_timeout;
++	q->unplug_timer.data = (unsigned long)q;
++
++	/*
++	 * by default assume old behaviour and bounce for any highmem page
++	 */
++	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
++}
++
++EXPORT_SYMBOL(blk_queue_make_request);
++
++/**
++ * blk_queue_bounce_limit - set bounce buffer limit for queue
++ * @q:  the request queue for the device
++ * @dma_addr:   bus address limit
++ *
++ * Description:
++ *    Different hardware can have different requirements as to what pages
++ *    it can do I/O directly to. A low level driver can call
++ *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
++ *    buffers for doing I/O to pages residing above @page.
++ **/
++void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
++{
++	unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
++	int dma = 0;
++
++	q->bounce_gfp = GFP_NOIO;
++#if BITS_PER_LONG == 64
++	/* Assume anything <= 4GB can be handled by IOMMU.
++	   Actually some IOMMUs can handle everything, but I don't
++	   know of a way to test this here. */
++	if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
++		dma = 1;
++	q->bounce_pfn = max_low_pfn;
++#else
++	if (bounce_pfn < blk_max_low_pfn)
++		dma = 1;
++	q->bounce_pfn = bounce_pfn;
 +#endif
++	if (dma) {
++		init_emergency_isa_pool();
++		q->bounce_gfp = GFP_NOIO | GFP_DMA;
++		q->bounce_pfn = bounce_pfn;
++	}
 +}
 +
- /*
-  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
-  * no global lock is needed and because the kmap code must perform a global TLB
-@@ -30,8 +73,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
- {
- 	enum fixed_addresses idx;
- 	unsigned long vaddr;
--
- 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++EXPORT_SYMBOL(blk_queue_bounce_limit);
 +
-+	debug_kmap_atomic_prot(type);
++/**
++ * blk_queue_max_sectors - set max sectors for a request for this queue
++ * @q:  the request queue for the device
++ * @max_sectors:  max sectors in the usual 512b unit
++ *
++ * Description:
++ *    Enables a low level driver to set an upper limit on the size of
++ *    received requests.
++ **/
++void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
++{
++	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
++		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
++		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
++	}
 +
- 	pagefault_disable();
- 
- 	if (!PageHighMem(page))
-diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index 6c06d9c..4fbafb4 100644
---- a/arch/x86/mm/hugetlbpage.c
-+++ b/arch/x86/mm/hugetlbpage.c
-@@ -15,6 +15,7 @@
- #include <asm/mman.h>
- #include <asm/tlb.h>
- #include <asm/tlbflush.h>
-+#include <asm/pgalloc.h>
- 
- static unsigned long page_table_shareable(struct vm_area_struct *svma,
- 				struct vm_area_struct *vma,
-@@ -88,7 +89,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
- 
- 	spin_lock(&mm->page_table_lock);
- 	if (pud_none(*pud))
--		pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK);
-+		pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
- 	else
- 		put_page(virt_to_page(spte));
- 	spin_unlock(&mm->page_table_lock);
-diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 3c76d19..da524fb 100644
---- a/arch/x86/mm/init_32.c
-+++ b/arch/x86/mm/init_32.c
-@@ -27,7 +27,6 @@
- #include <linux/bootmem.h>
- #include <linux/slab.h>
- #include <linux/proc_fs.h>
--#include <linux/efi.h>
- #include <linux/memory_hotplug.h>
- #include <linux/initrd.h>
- #include <linux/cpumask.h>
-@@ -40,8 +39,10 @@
- #include <asm/fixmap.h>
- #include <asm/e820.h>
- #include <asm/apic.h>
-+#include <asm/bugs.h>
- #include <asm/tlb.h>
- #include <asm/tlbflush.h>
-+#include <asm/pgalloc.h>
- #include <asm/sections.h>
- #include <asm/paravirt.h>
- 
-@@ -50,7 +51,7 @@ unsigned int __VMALLOC_RESERVE = 128 << 20;
- DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
- unsigned long highstart_pfn, highend_pfn;
- 
--static int noinline do_test_wp_bit(void);
-+static noinline int do_test_wp_bit(void);
- 
- /*
-  * Creates a middle page table and puts a pointer to it in the
-@@ -61,26 +62,26 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
- {
- 	pud_t *pud;
- 	pmd_t *pmd_table;
--		
++	if (BLK_DEF_MAX_SECTORS > max_sectors)
++		q->max_hw_sectors = q->max_sectors = max_sectors;
++	else {
++		q->max_sectors = BLK_DEF_MAX_SECTORS;
++		q->max_hw_sectors = max_sectors;
++	}
++}
 +
- #ifdef CONFIG_X86_PAE
- 	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
- 		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- 
--		paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
-+		paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
- 		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
- 		pud = pud_offset(pgd, 0);
--		if (pmd_table != pmd_offset(pud, 0))
--			BUG();
-+		BUG_ON(pmd_table != pmd_offset(pud, 0));
- 	}
- #endif
- 	pud = pud_offset(pgd, 0);
- 	pmd_table = pmd_offset(pud, 0);
++EXPORT_SYMBOL(blk_queue_max_sectors);
 +
- 	return pmd_table;
- }
- 
- /*
-  * Create a page table and place a pointer to it in a middle page
-- * directory entry.
-+ * directory entry:
-  */
- static pte_t * __init one_page_table_init(pmd_t *pmd)
- {
-@@ -90,9 +91,10 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
- #ifdef CONFIG_DEBUG_PAGEALLOC
- 		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
- #endif
--		if (!page_table)
-+		if (!page_table) {
- 			page_table =
- 				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
-+		}
- 
- 		paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
- 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-@@ -103,22 +105,21 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
- }
- 
- /*
-- * This function initializes a certain range of kernel virtual memory 
-+ * This function initializes a certain range of kernel virtual memory
-  * with new bootmem page tables, everywhere page tables are missing in
-  * the given range.
-- */
--
--/*
-- * NOTE: The pagetables are allocated contiguous on the physical space 
-- * so we can cache the place of the first one and move around without 
++/**
++ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
++ * @q:  the request queue for the device
++ * @max_segments:  max number of segments
 + *
-+ * NOTE: The pagetables are allocated contiguous on the physical space
-+ * so we can cache the place of the first one and move around without
-  * checking the pgd every time.
-  */
--static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
-+static void __init
-+page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
- {
--	pgd_t *pgd;
--	pmd_t *pmd;
- 	int pgd_idx, pmd_idx;
- 	unsigned long vaddr;
-+	pgd_t *pgd;
-+	pmd_t *pmd;
- 
- 	vaddr = start;
- 	pgd_idx = pgd_index(vaddr);
-@@ -128,7 +129,8 @@ static void __init page_table_range_init (unsigned long start, unsigned long end
- 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
- 		pmd = one_md_table_init(pgd);
- 		pmd = pmd + pmd_index(vaddr);
--		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
-+		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
-+							pmd++, pmd_idx++) {
- 			one_page_table_init(pmd);
- 
- 			vaddr += PMD_SIZE;
-@@ -145,17 +147,17 @@ static inline int is_kernel_text(unsigned long addr)
- }
- 
- /*
-- * This maps the physical memory to kernel virtual address space, a total 
-- * of max_low_pfn pages, by creating page tables starting from address 
-- * PAGE_OFFSET.
-+ * This maps the physical memory to kernel virtual address space, a total
-+ * of max_low_pfn pages, by creating page tables starting from address
-+ * PAGE_OFFSET:
-  */
- static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
- {
-+	int pgd_idx, pmd_idx, pte_ofs;
- 	unsigned long pfn;
- 	pgd_t *pgd;
- 	pmd_t *pmd;
- 	pte_t *pte;
--	int pgd_idx, pmd_idx, pte_ofs;
- 
- 	pgd_idx = pgd_index(PAGE_OFFSET);
- 	pgd = pgd_base + pgd_idx;
-@@ -165,29 +167,43 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
- 		pmd = one_md_table_init(pgd);
- 		if (pfn >= max_low_pfn)
- 			continue;
--		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
--			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
- 
--			/* Map with big pages if possible, otherwise create normal page tables. */
-+		for (pmd_idx = 0;
-+		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
-+		     pmd++, pmd_idx++) {
-+			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
++ * Description:
++ *    Enables a low level driver to set an upper limit on the number of
++ *    physical data segments in a request.  This would be the largest sized
++ *    scatter list the driver could handle.
++ **/
++void blk_queue_max_phys_segments(struct request_queue *q,
++				 unsigned short max_segments)
++{
++	if (!max_segments) {
++		max_segments = 1;
++		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
++	}
 +
-+			/*
-+			 * Map with big pages if possible, otherwise
-+			 * create normal page tables:
-+			 */
- 			if (cpu_has_pse) {
--				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
--				if (is_kernel_text(address) || is_kernel_text(address2))
--					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
--				else
--					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-+				unsigned int addr2;
-+				pgprot_t prot = PAGE_KERNEL_LARGE;
++	q->max_phys_segments = max_segments;
++}
 +
-+				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
-+					PAGE_OFFSET + PAGE_SIZE-1;
++EXPORT_SYMBOL(blk_queue_max_phys_segments);
 +
-+				if (is_kernel_text(addr) ||
-+				    is_kernel_text(addr2))
-+					prot = PAGE_KERNEL_LARGE_EXEC;
++/**
++ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
++ * @q:  the request queue for the device
++ * @max_segments:  max number of segments
++ *
++ * Description:
++ *    Enables a low level driver to set an upper limit on the number of
++ *    hw data segments in a request.  This would be the largest number of
++ *    address/length pairs the host adapter can actually give as once
++ *    to the device.
++ **/
++void blk_queue_max_hw_segments(struct request_queue *q,
++			       unsigned short max_segments)
++{
++	if (!max_segments) {
++		max_segments = 1;
++		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
++	}
 +
-+				set_pmd(pmd, pfn_pmd(pfn, prot));
- 
- 				pfn += PTRS_PER_PTE;
--			} else {
--				pte = one_page_table_init(pmd);
--
--				for (pte_ofs = 0;
--				     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
--				     pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
--					if (is_kernel_text(address))
--						set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
--					else
--						set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
--				}
-+				continue;
-+			}
-+			pte = one_page_table_init(pmd);
++	q->max_hw_segments = max_segments;
++}
 +
-+			for (pte_ofs = 0;
-+			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
-+			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
-+				pgprot_t prot = PAGE_KERNEL;
++EXPORT_SYMBOL(blk_queue_max_hw_segments);
 +
-+				if (is_kernel_text(addr))
-+					prot = PAGE_KERNEL_EXEC;
++/**
++ * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
++ * @q:  the request queue for the device
++ * @max_size:  max size of segment in bytes
++ *
++ * Description:
++ *    Enables a low level driver to set an upper limit on the size of a
++ *    coalesced segment
++ **/
++void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
++{
++	if (max_size < PAGE_CACHE_SIZE) {
++		max_size = PAGE_CACHE_SIZE;
++		printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
++	}
 +
-+				set_pte(pte, pfn_pte(pfn, prot));
- 			}
- 		}
- 	}
-@@ -200,57 +216,23 @@ static inline int page_kills_ppro(unsigned long pagenr)
- 	return 0;
- }
- 
--int page_is_ram(unsigned long pagenr)
--{
--	int i;
--	unsigned long addr, end;
--
--	if (efi_enabled) {
--		efi_memory_desc_t *md;
--		void *p;
--
--		for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
--			md = p;
--			if (!is_available_memory(md))
--				continue;
--			addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
--			end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
--
--			if ((pagenr >= addr) && (pagenr < end))
--				return 1;
--		}
--		return 0;
--	}
--
--	for (i = 0; i < e820.nr_map; i++) {
--
--		if (e820.map[i].type != E820_RAM)	/* not usable memory */
--			continue;
--		/*
--		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
--		 *	are not. Notably the 640->1Mb area. We need a sanity
--		 *	check here.
--		 */
--		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
--		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
--		if  ((pagenr >= addr) && (pagenr < end))
--			return 1;
--	}
--	return 0;
--}
--
- #ifdef CONFIG_HIGHMEM
- pte_t *kmap_pte;
- pgprot_t kmap_prot;
- 
--#define kmap_get_fixmap_pte(vaddr)					\
--	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
-+static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
++	q->max_segment_size = max_size;
++}
++
++EXPORT_SYMBOL(blk_queue_max_segment_size);
++
++/**
++ * blk_queue_hardsect_size - set hardware sector size for the queue
++ * @q:  the request queue for the device
++ * @size:  the hardware sector size, in bytes
++ *
++ * Description:
++ *   This should typically be set to the lowest possible sector size
++ *   that the hardware can operate on (possible without reverting to
++ *   even internal read-modify-write operations). Usually the default
++ *   of 512 covers most hardware.
++ **/
++void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
 +{
-+	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
-+			vaddr), vaddr), vaddr);
++	q->hardsect_size = size;
 +}
- 
- static void __init kmap_init(void)
- {
- 	unsigned long kmap_vstart;
- 
--	/* cache the first kmap pte */
++
++EXPORT_SYMBOL(blk_queue_hardsect_size);
++
++/*
++ * Returns the minimum that is _not_ zero, unless both are zero.
++ */
++#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
++
++/**
++ * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
++ * @t:	the stacking driver (top)
++ * @b:  the underlying device (bottom)
++ **/
++void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
++{
++	/* zero is "infinity" */
++	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
++	t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
++
++	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
++	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
++	t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
++	t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
++	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
++		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
++}
++
++EXPORT_SYMBOL(blk_queue_stack_limits);
++
++/**
++ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
++ *
++ * @q:  the request queue for the device
++ * @buf:	physically contiguous buffer
++ * @size:	size of the buffer in bytes
++ *
++ * Some devices have excess DMA problems and can't simply discard (or
++ * zero fill) the unwanted piece of the transfer.  They have to have a
++ * real area of memory to transfer it into.  The use case for this is
++ * ATAPI devices in DMA mode.  If the packet command causes a transfer
++ * bigger than the transfer size some HBAs will lock up if there
++ * aren't DMA elements to contain the excess transfer.  What this API
++ * does is adjust the queue so that the buf is always appended
++ * silently to the scatterlist.
++ *
++ * Note: This routine adjusts max_hw_segments to make room for
++ * appending the drain buffer.  If you call
++ * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
++ * calling this routine, you must set the limit to one fewer than your
++ * device can support otherwise there won't be room for the drain
++ * buffer.
++ */
++int blk_queue_dma_drain(struct request_queue *q, void *buf,
++				unsigned int size)
++{
++	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
++		return -EINVAL;
++	/* make room for appending the drain */
++	--q->max_hw_segments;
++	--q->max_phys_segments;
++	q->dma_drain_buffer = buf;
++	q->dma_drain_size = size;
++
++	return 0;
++}
++
++EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
++
++/**
++ * blk_queue_segment_boundary - set boundary rules for segment merging
++ * @q:  the request queue for the device
++ * @mask:  the memory boundary mask
++ **/
++void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
++{
++	if (mask < PAGE_CACHE_SIZE - 1) {
++		mask = PAGE_CACHE_SIZE - 1;
++		printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
++	}
++
++	q->seg_boundary_mask = mask;
++}
++
++EXPORT_SYMBOL(blk_queue_segment_boundary);
++
++/**
++ * blk_queue_dma_alignment - set dma length and memory alignment
++ * @q:     the request queue for the device
++ * @mask:  alignment mask
++ *
++ * description:
++ *    set required memory and length aligment for direct dma transactions.
++ *    this is used when buiding direct io requests for the queue.
++ *
++ **/
++void blk_queue_dma_alignment(struct request_queue *q, int mask)
++{
++	q->dma_alignment = mask;
++}
++
++EXPORT_SYMBOL(blk_queue_dma_alignment);
++
++/**
++ * blk_queue_update_dma_alignment - update dma length and memory alignment
++ * @q:     the request queue for the device
++ * @mask:  alignment mask
++ *
++ * description:
++ *    update required memory and length aligment for direct dma transactions.
++ *    If the requested alignment is larger than the current alignment, then
++ *    the current queue alignment is updated to the new value, otherwise it
++ *    is left alone.  The design of this is to allow multiple objects
++ *    (driver, device, transport etc) to set their respective
++ *    alignments without having them interfere.
++ *
++ **/
++void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
++{
++	BUG_ON(mask > PAGE_SIZE);
++
++	if (mask > q->dma_alignment)
++		q->dma_alignment = mask;
++}
++
++EXPORT_SYMBOL(blk_queue_update_dma_alignment);
++
++int __init blk_settings_init(void)
++{
++	blk_max_low_pfn = max_low_pfn - 1;
++	blk_max_pfn = max_pfn - 1;
++	return 0;
++}
++subsys_initcall(blk_settings_init);
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+new file mode 100644
+index 0000000..bc28776
+--- /dev/null
++++ b/block/blk-sysfs.c
+@@ -0,0 +1,309 @@
++/*
++ * Functions related to sysfs handling
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/blktrace_api.h>
++
++#include "blk.h"
++
++struct queue_sysfs_entry {
++	struct attribute attr;
++	ssize_t (*show)(struct request_queue *, char *);
++	ssize_t (*store)(struct request_queue *, const char *, size_t);
++};
++
++static ssize_t
++queue_var_show(unsigned int var, char *page)
++{
++	return sprintf(page, "%d\n", var);
++}
++
++static ssize_t
++queue_var_store(unsigned long *var, const char *page, size_t count)
++{
++	char *p = (char *) page;
++
++	*var = simple_strtoul(p, &p, 10);
++	return count;
++}
++
++static ssize_t queue_requests_show(struct request_queue *q, char *page)
++{
++	return queue_var_show(q->nr_requests, (page));
++}
++
++static ssize_t
++queue_requests_store(struct request_queue *q, const char *page, size_t count)
++{
++	struct request_list *rl = &q->rq;
++	unsigned long nr;
++	int ret = queue_var_store(&nr, page, count);
++	if (nr < BLKDEV_MIN_RQ)
++		nr = BLKDEV_MIN_RQ;
++
++	spin_lock_irq(q->queue_lock);
++	q->nr_requests = nr;
++	blk_queue_congestion_threshold(q);
++
++	if (rl->count[READ] >= queue_congestion_on_threshold(q))
++		blk_set_queue_congested(q, READ);
++	else if (rl->count[READ] < queue_congestion_off_threshold(q))
++		blk_clear_queue_congested(q, READ);
++
++	if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
++		blk_set_queue_congested(q, WRITE);
++	else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
++		blk_clear_queue_congested(q, WRITE);
++
++	if (rl->count[READ] >= q->nr_requests) {
++		blk_set_queue_full(q, READ);
++	} else if (rl->count[READ]+1 <= q->nr_requests) {
++		blk_clear_queue_full(q, READ);
++		wake_up(&rl->wait[READ]);
++	}
++
++	if (rl->count[WRITE] >= q->nr_requests) {
++		blk_set_queue_full(q, WRITE);
++	} else if (rl->count[WRITE]+1 <= q->nr_requests) {
++		blk_clear_queue_full(q, WRITE);
++		wake_up(&rl->wait[WRITE]);
++	}
++	spin_unlock_irq(q->queue_lock);
++	return ret;
++}
++
++static ssize_t queue_ra_show(struct request_queue *q, char *page)
++{
++	int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
++
++	return queue_var_show(ra_kb, (page));
++}
++
++static ssize_t
++queue_ra_store(struct request_queue *q, const char *page, size_t count)
++{
++	unsigned long ra_kb;
++	ssize_t ret = queue_var_store(&ra_kb, page, count);
++
++	spin_lock_irq(q->queue_lock);
++	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
++	spin_unlock_irq(q->queue_lock);
++
++	return ret;
++}
++
++static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
++{
++	int max_sectors_kb = q->max_sectors >> 1;
++
++	return queue_var_show(max_sectors_kb, (page));
++}
++
++static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page)
++{
++	return queue_var_show(q->hardsect_size, page);
++}
++
++static ssize_t
++queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
++{
++	unsigned long max_sectors_kb,
++			max_hw_sectors_kb = q->max_hw_sectors >> 1,
++			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
++	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
++
++	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
++		return -EINVAL;
 +	/*
-+	 * Cache the first kmap pte:
++	 * Take the queue lock to update the readahead and max_sectors
++	 * values synchronously:
 +	 */
- 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
- 
-@@ -259,11 +241,11 @@ static void __init kmap_init(void)
++	spin_lock_irq(q->queue_lock);
++	q->max_sectors = max_sectors_kb << 1;
++	spin_unlock_irq(q->queue_lock);
++
++	return ret;
++}
++
++static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
++{
++	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
++
++	return queue_var_show(max_hw_sectors_kb, (page));
++}
++
++
++static struct queue_sysfs_entry queue_requests_entry = {
++	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
++	.show = queue_requests_show,
++	.store = queue_requests_store,
++};
++
++static struct queue_sysfs_entry queue_ra_entry = {
++	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
++	.show = queue_ra_show,
++	.store = queue_ra_store,
++};
++
++static struct queue_sysfs_entry queue_max_sectors_entry = {
++	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
++	.show = queue_max_sectors_show,
++	.store = queue_max_sectors_store,
++};
++
++static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
++	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
++	.show = queue_max_hw_sectors_show,
++};
++
++static struct queue_sysfs_entry queue_iosched_entry = {
++	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
++	.show = elv_iosched_show,
++	.store = elv_iosched_store,
++};
++
++static struct queue_sysfs_entry queue_hw_sector_size_entry = {
++	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
++	.show = queue_hw_sector_size_show,
++};
++
++static struct attribute *default_attrs[] = {
++	&queue_requests_entry.attr,
++	&queue_ra_entry.attr,
++	&queue_max_hw_sectors_entry.attr,
++	&queue_max_sectors_entry.attr,
++	&queue_iosched_entry.attr,
++	&queue_hw_sector_size_entry.attr,
++	NULL,
++};
++
++#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
++
++static ssize_t
++queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
++{
++	struct queue_sysfs_entry *entry = to_queue(attr);
++	struct request_queue *q =
++		container_of(kobj, struct request_queue, kobj);
++	ssize_t res;
++
++	if (!entry->show)
++		return -EIO;
++	mutex_lock(&q->sysfs_lock);
++	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
++		mutex_unlock(&q->sysfs_lock);
++		return -ENOENT;
++	}
++	res = entry->show(q, page);
++	mutex_unlock(&q->sysfs_lock);
++	return res;
++}
++
++static ssize_t
++queue_attr_store(struct kobject *kobj, struct attribute *attr,
++		    const char *page, size_t length)
++{
++	struct queue_sysfs_entry *entry = to_queue(attr);
++	struct request_queue *q = container_of(kobj, struct request_queue, kobj);
++
++	ssize_t res;
++
++	if (!entry->store)
++		return -EIO;
++	mutex_lock(&q->sysfs_lock);
++	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
++		mutex_unlock(&q->sysfs_lock);
++		return -ENOENT;
++	}
++	res = entry->store(q, page, length);
++	mutex_unlock(&q->sysfs_lock);
++	return res;
++}
++
++/**
++ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
++ * @kobj:    the kobj belonging of the request queue to be released
++ *
++ * Description:
++ *     blk_cleanup_queue is the pair to blk_init_queue() or
++ *     blk_queue_make_request().  It should be called when a request queue is
++ *     being released; typically when a block device is being de-registered.
++ *     Currently, its primary task it to free all the &struct request
++ *     structures that were allocated to the queue and the queue itself.
++ *
++ * Caveat:
++ *     Hopefully the low level driver will have finished any
++ *     outstanding requests first...
++ **/
++static void blk_release_queue(struct kobject *kobj)
++{
++	struct request_queue *q =
++		container_of(kobj, struct request_queue, kobj);
++	struct request_list *rl = &q->rq;
++
++	blk_sync_queue(q);
++
++	if (rl->rq_pool)
++		mempool_destroy(rl->rq_pool);
++
++	if (q->queue_tags)
++		__blk_queue_free_tags(q);
++
++	blk_trace_shutdown(q);
++
++	bdi_destroy(&q->backing_dev_info);
++	kmem_cache_free(blk_requestq_cachep, q);
++}
++
++static struct sysfs_ops queue_sysfs_ops = {
++	.show	= queue_attr_show,
++	.store	= queue_attr_store,
++};
++
++struct kobj_type blk_queue_ktype = {
++	.sysfs_ops	= &queue_sysfs_ops,
++	.default_attrs	= default_attrs,
++	.release	= blk_release_queue,
++};
++
++int blk_register_queue(struct gendisk *disk)
++{
++	int ret;
++
++	struct request_queue *q = disk->queue;
++
++	if (!q || !q->request_fn)
++		return -ENXIO;
++
++	ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
++			  "%s", "queue");
++	if (ret < 0)
++		return ret;
++
++	kobject_uevent(&q->kobj, KOBJ_ADD);
++
++	ret = elv_register_queue(q);
++	if (ret) {
++		kobject_uevent(&q->kobj, KOBJ_REMOVE);
++		kobject_del(&q->kobj);
++		return ret;
++	}
++
++	return 0;
++}
++
++void blk_unregister_queue(struct gendisk *disk)
++{
++	struct request_queue *q = disk->queue;
++
++	if (q && q->request_fn) {
++		elv_unregister_queue(q);
++
++		kobject_uevent(&q->kobj, KOBJ_REMOVE);
++		kobject_del(&q->kobj);
++		kobject_put(&disk->dev.kobj);
++	}
++}
+diff --git a/block/blk-tag.c b/block/blk-tag.c
+new file mode 100644
+index 0000000..d1fd300
+--- /dev/null
++++ b/block/blk-tag.c
+@@ -0,0 +1,396 @@
++/*
++ * Functions related to tagged command queuing
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++
++/**
++ * blk_queue_find_tag - find a request by its tag and queue
++ * @q:	 The request queue for the device
++ * @tag: The tag of the request
++ *
++ * Notes:
++ *    Should be used when a device returns a tag and you want to match
++ *    it with a request.
++ *
++ *    no locks need be held.
++ **/
++struct request *blk_queue_find_tag(struct request_queue *q, int tag)
++{
++	return blk_map_queue_find_tag(q->queue_tags, tag);
++}
++
++EXPORT_SYMBOL(blk_queue_find_tag);
++
++/**
++ * __blk_free_tags - release a given set of tag maintenance info
++ * @bqt:	the tag map to free
++ *
++ * Tries to free the specified @bqt at .  Returns true if it was
++ * actually freed and false if there are still references using it
++ */
++static int __blk_free_tags(struct blk_queue_tag *bqt)
++{
++	int retval;
++
++	retval = atomic_dec_and_test(&bqt->refcnt);
++	if (retval) {
++		BUG_ON(bqt->busy);
++
++		kfree(bqt->tag_index);
++		bqt->tag_index = NULL;
++
++		kfree(bqt->tag_map);
++		bqt->tag_map = NULL;
++
++		kfree(bqt);
++	}
++
++	return retval;
++}
++
++/**
++ * __blk_queue_free_tags - release tag maintenance info
++ * @q:  the request queue for the device
++ *
++ *  Notes:
++ *    blk_cleanup_queue() will take care of calling this function, if tagging
++ *    has been used. So there's no need to call this directly.
++ **/
++void __blk_queue_free_tags(struct request_queue *q)
++{
++	struct blk_queue_tag *bqt = q->queue_tags;
++
++	if (!bqt)
++		return;
++
++	__blk_free_tags(bqt);
++
++	q->queue_tags = NULL;
++	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
++}
++
++/**
++ * blk_free_tags - release a given set of tag maintenance info
++ * @bqt:	the tag map to free
++ *
++ * For externally managed @bqt@ frees the map.  Callers of this
++ * function must guarantee to have released all the queues that
++ * might have been using this tag map.
++ */
++void blk_free_tags(struct blk_queue_tag *bqt)
++{
++	if (unlikely(!__blk_free_tags(bqt)))
++		BUG();
++}
++EXPORT_SYMBOL(blk_free_tags);
++
++/**
++ * blk_queue_free_tags - release tag maintenance info
++ * @q:  the request queue for the device
++ *
++ *  Notes:
++ *	This is used to disabled tagged queuing to a device, yet leave
++ *	queue in function.
++ **/
++void blk_queue_free_tags(struct request_queue *q)
++{
++	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
++}
++
++EXPORT_SYMBOL(blk_queue_free_tags);
++
++static int
++init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
++{
++	struct request **tag_index;
++	unsigned long *tag_map;
++	int nr_ulongs;
++
++	if (q && depth > q->nr_requests * 2) {
++		depth = q->nr_requests * 2;
++		printk(KERN_ERR "%s: adjusted depth to %d\n",
++				__FUNCTION__, depth);
++	}
++
++	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
++	if (!tag_index)
++		goto fail;
++
++	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
++	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
++	if (!tag_map)
++		goto fail;
++
++	tags->real_max_depth = depth;
++	tags->max_depth = depth;
++	tags->tag_index = tag_index;
++	tags->tag_map = tag_map;
++
++	return 0;
++fail:
++	kfree(tag_index);
++	return -ENOMEM;
++}
++
++static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
++						   int depth)
++{
++	struct blk_queue_tag *tags;
++
++	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
++	if (!tags)
++		goto fail;
++
++	if (init_tag_map(q, tags, depth))
++		goto fail;
++
++	tags->busy = 0;
++	atomic_set(&tags->refcnt, 1);
++	return tags;
++fail:
++	kfree(tags);
++	return NULL;
++}
++
++/**
++ * blk_init_tags - initialize the tag info for an external tag map
++ * @depth:	the maximum queue depth supported
++ * @tags: the tag to use
++ **/
++struct blk_queue_tag *blk_init_tags(int depth)
++{
++	return __blk_queue_init_tags(NULL, depth);
++}
++EXPORT_SYMBOL(blk_init_tags);
++
++/**
++ * blk_queue_init_tags - initialize the queue tag info
++ * @q:  the request queue for the device
++ * @depth:  the maximum queue depth supported
++ * @tags: the tag to use
++ **/
++int blk_queue_init_tags(struct request_queue *q, int depth,
++			struct blk_queue_tag *tags)
++{
++	int rc;
++
++	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
++
++	if (!tags && !q->queue_tags) {
++		tags = __blk_queue_init_tags(q, depth);
++
++		if (!tags)
++			goto fail;
++	} else if (q->queue_tags) {
++		if ((rc = blk_queue_resize_tags(q, depth)))
++			return rc;
++		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
++		return 0;
++	} else
++		atomic_inc(&tags->refcnt);
++
++	/*
++	 * assign it, all done
++	 */
++	q->queue_tags = tags;
++	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
++	INIT_LIST_HEAD(&q->tag_busy_list);
++	return 0;
++fail:
++	kfree(tags);
++	return -ENOMEM;
++}
++
++EXPORT_SYMBOL(blk_queue_init_tags);
++
++/**
++ * blk_queue_resize_tags - change the queueing depth
++ * @q:  the request queue for the device
++ * @new_depth: the new max command queueing depth
++ *
++ *  Notes:
++ *    Must be called with the queue lock held.
++ **/
++int blk_queue_resize_tags(struct request_queue *q, int new_depth)
++{
++	struct blk_queue_tag *bqt = q->queue_tags;
++	struct request **tag_index;
++	unsigned long *tag_map;
++	int max_depth, nr_ulongs;
++
++	if (!bqt)
++		return -ENXIO;
++
++	/*
++	 * if we already have large enough real_max_depth.  just
++	 * adjust max_depth.  *NOTE* as requests with tag value
++	 * between new_depth and real_max_depth can be in-flight, tag
++	 * map can not be shrunk blindly here.
++	 */
++	if (new_depth <= bqt->real_max_depth) {
++		bqt->max_depth = new_depth;
++		return 0;
++	}
++
++	/*
++	 * Currently cannot replace a shared tag map with a new
++	 * one, so error out if this is the case
++	 */
++	if (atomic_read(&bqt->refcnt) != 1)
++		return -EBUSY;
++
++	/*
++	 * save the old state info, so we can copy it back
++	 */
++	tag_index = bqt->tag_index;
++	tag_map = bqt->tag_map;
++	max_depth = bqt->real_max_depth;
++
++	if (init_tag_map(q, bqt, new_depth))
++		return -ENOMEM;
++
++	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
++	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
++	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
++
++	kfree(tag_index);
++	kfree(tag_map);
++	return 0;
++}
++
++EXPORT_SYMBOL(blk_queue_resize_tags);
++
++/**
++ * blk_queue_end_tag - end tag operations for a request
++ * @q:  the request queue for the device
++ * @rq: the request that has completed
++ *
++ *  Description:
++ *    Typically called when end_that_request_first() returns 0, meaning
++ *    all transfers have been done for a request. It's important to call
++ *    this function before end_that_request_last(), as that will put the
++ *    request back on the free list thus corrupting the internal tag list.
++ *
++ *  Notes:
++ *   queue lock must be held.
++ **/
++void blk_queue_end_tag(struct request_queue *q, struct request *rq)
++{
++	struct blk_queue_tag *bqt = q->queue_tags;
++	int tag = rq->tag;
++
++	BUG_ON(tag == -1);
++
++	if (unlikely(tag >= bqt->real_max_depth))
++		/*
++		 * This can happen after tag depth has been reduced.
++		 * FIXME: how about a warning or info message here?
++		 */
++		return;
++
++	list_del_init(&rq->queuelist);
++	rq->cmd_flags &= ~REQ_QUEUED;
++	rq->tag = -1;
++
++	if (unlikely(bqt->tag_index[tag] == NULL))
++		printk(KERN_ERR "%s: tag %d is missing\n",
++		       __FUNCTION__, tag);
++
++	bqt->tag_index[tag] = NULL;
++
++	if (unlikely(!test_bit(tag, bqt->tag_map))) {
++		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
++		       __FUNCTION__, tag);
++		return;
++	}
++	/*
++	 * The tag_map bit acts as a lock for tag_index[bit], so we need
++	 * unlock memory barrier semantics.
++	 */
++	clear_bit_unlock(tag, bqt->tag_map);
++	bqt->busy--;
++}
++
++EXPORT_SYMBOL(blk_queue_end_tag);
++
++/**
++ * blk_queue_start_tag - find a free tag and assign it
++ * @q:  the request queue for the device
++ * @rq:  the block request that needs tagging
++ *
++ *  Description:
++ *    This can either be used as a stand-alone helper, or possibly be
++ *    assigned as the queue &prep_rq_fn (in which case &struct request
++ *    automagically gets a tag assigned). Note that this function
++ *    assumes that any type of request can be queued! if this is not
++ *    true for your device, you must check the request type before
++ *    calling this function.  The request will also be removed from
++ *    the request queue, so it's the drivers responsibility to readd
++ *    it if it should need to be restarted for some reason.
++ *
++ *  Notes:
++ *   queue lock must be held.
++ **/
++int blk_queue_start_tag(struct request_queue *q, struct request *rq)
++{
++	struct blk_queue_tag *bqt = q->queue_tags;
++	int tag;
++
++	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
++		printk(KERN_ERR 
++		       "%s: request %p for device [%s] already tagged %d",
++		       __FUNCTION__, rq,
++		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
++		BUG();
++	}
++
++	/*
++	 * Protect against shared tag maps, as we may not have exclusive
++	 * access to the tag map.
++	 */
++	do {
++		tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
++		if (tag >= bqt->max_depth)
++			return 1;
++
++	} while (test_and_set_bit_lock(tag, bqt->tag_map));
++	/*
++	 * We need lock ordering semantics given by test_and_set_bit_lock.
++	 * See blk_queue_end_tag for details.
++	 */
++
++	rq->cmd_flags |= REQ_QUEUED;
++	rq->tag = tag;
++	bqt->tag_index[tag] = rq;
++	blkdev_dequeue_request(rq);
++	list_add(&rq->queuelist, &q->tag_busy_list);
++	bqt->busy++;
++	return 0;
++}
++
++EXPORT_SYMBOL(blk_queue_start_tag);
++
++/**
++ * blk_queue_invalidate_tags - invalidate all pending tags
++ * @q:  the request queue for the device
++ *
++ *  Description:
++ *   Hardware conditions may dictate a need to stop all pending requests.
++ *   In this case, we will safely clear the block side of the tag queue and
++ *   readd all requests to the request queue in the right order.
++ *
++ *  Notes:
++ *   queue lock must be held.
++ **/
++void blk_queue_invalidate_tags(struct request_queue *q)
++{
++	struct list_head *tmp, *n;
++
++	list_for_each_safe(tmp, n, &q->tag_busy_list)
++		blk_requeue_request(q, list_entry_rq(tmp));
++}
++
++EXPORT_SYMBOL(blk_queue_invalidate_tags);
+diff --git a/block/blk.h b/block/blk.h
+new file mode 100644
+index 0000000..ec898dd
+--- /dev/null
++++ b/block/blk.h
+@@ -0,0 +1,53 @@
++#ifndef BLK_INTERNAL_H
++#define BLK_INTERNAL_H
++
++/* Amount of time in which a process may batch requests */
++#define BLK_BATCH_TIME	(HZ/50UL)
++
++/* Number of requests a "batching" process may submit */
++#define BLK_BATCH_REQ	32
++
++extern struct kmem_cache *blk_requestq_cachep;
++extern struct kobj_type blk_queue_ktype;
++
++void rq_init(struct request_queue *q, struct request *rq);
++void init_request_from_bio(struct request *req, struct bio *bio);
++void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
++			struct bio *bio);
++void __blk_queue_free_tags(struct request_queue *q);
++
++void blk_unplug_work(struct work_struct *work);
++void blk_unplug_timeout(unsigned long data);
++
++struct io_context *current_io_context(gfp_t gfp_flags, int node);
++
++int ll_back_merge_fn(struct request_queue *q, struct request *req,
++		     struct bio *bio);
++int ll_front_merge_fn(struct request_queue *q, struct request *req, 
++		      struct bio *bio);
++int attempt_back_merge(struct request_queue *q, struct request *rq);
++int attempt_front_merge(struct request_queue *q, struct request *rq);
++void blk_recalc_rq_segments(struct request *rq);
++void blk_recalc_rq_sectors(struct request *rq, int nsect);
++
++void blk_queue_congestion_threshold(struct request_queue *q);
++
++/*
++ * Return the threshold (number of used requests) at which the queue is
++ * considered to be congested.  It include a little hysteresis to keep the
++ * context switch rate down.
++ */
++static inline int queue_congestion_on_threshold(struct request_queue *q)
++{
++	return q->nr_congestion_on;
++}
++
++/*
++ * The threshold at which a queue is considered to be uncongested
++ */
++static inline int queue_congestion_off_threshold(struct request_queue *q)
++{
++	return q->nr_congestion_off;
++}
++
++#endif
+diff --git a/block/blktrace.c b/block/blktrace.c
+index 9b4da4a..568588c 100644
+--- a/block/blktrace.c
++++ b/block/blktrace.c
+@@ -235,7 +235,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
+ 	kfree(bt);
+ }
  
- static void __init permanent_kmaps_init(pgd_t *pgd_base)
+-static int blk_trace_remove(struct request_queue *q)
++int blk_trace_remove(struct request_queue *q)
  {
-+	unsigned long vaddr;
- 	pgd_t *pgd;
- 	pud_t *pud;
- 	pmd_t *pmd;
- 	pte_t *pte;
--	unsigned long vaddr;
- 
- 	vaddr = PKMAP_BASE;
- 	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-@@ -272,7 +254,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
- 	pud = pud_offset(pgd, vaddr);
- 	pmd = pmd_offset(pud, vaddr);
- 	pte = pte_offset_kernel(pmd, vaddr);
--	pkmap_page_table = pte;	
-+	pkmap_page_table = pte;
- }
+ 	struct blk_trace *bt;
  
- static void __meminit free_new_highpage(struct page *page)
-@@ -291,7 +273,8 @@ void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
- 		SetPageReserved(page);
- }
+@@ -249,6 +249,7 @@ static int blk_trace_remove(struct request_queue *q)
  
--static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
-+static int __meminit
-+add_one_highpage_hotplug(struct page *page, unsigned long pfn)
- {
- 	free_new_highpage(page);
- 	totalram_pages++;
-@@ -299,6 +282,7 @@ static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long p
- 	max_mapnr = max(pfn, max_mapnr);
- #endif
- 	num_physpages++;
-+
  	return 0;
  }
++EXPORT_SYMBOL_GPL(blk_trace_remove);
  
-@@ -306,7 +290,7 @@ static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long p
-  * Not currently handling the NUMA case.
-  * Assuming single node and all memory that
-  * has been added dynamically that would be
-- * onlined here is in HIGHMEM
-+ * onlined here is in HIGHMEM.
+ static int blk_dropped_open(struct inode *inode, struct file *filp)
+ {
+@@ -316,18 +317,17 @@ static struct rchan_callbacks blk_relay_callbacks = {
+ /*
+  * Setup everything required to start tracing
   */
- void __meminit online_page(struct page *page)
+-int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
++int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ 			struct blk_user_trace_setup *buts)
  {
-@@ -314,13 +298,11 @@ void __meminit online_page(struct page *page)
- 	add_one_highpage_hotplug(page, page_to_pfn(page));
- }
+ 	struct blk_trace *old_bt, *bt = NULL;
+ 	struct dentry *dir = NULL;
+-	char b[BDEVNAME_SIZE];
+ 	int ret, i;
  
--
--#ifdef CONFIG_NUMA
--extern void set_highmem_pages_init(int);
--#else
-+#ifndef CONFIG_NUMA
- static void __init set_highmem_pages_init(int bad_ppro)
- {
- 	int pfn;
-+
- 	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
- 		/*
- 		 * Holes under sparsemem might not have no mem_map[]:
-@@ -330,23 +312,18 @@ static void __init set_highmem_pages_init(int bad_ppro)
- 	}
- 	totalram_pages += totalhigh_pages;
- }
--#endif /* CONFIG_FLATMEM */
-+#endif /* !CONFIG_NUMA */
+ 	if (!buts->buf_size || !buts->buf_nr)
+ 		return -EINVAL;
  
- #else
--#define kmap_init() do { } while (0)
--#define permanent_kmaps_init(pgd_base) do { } while (0)
--#define set_highmem_pages_init(bad_ppro) do { } while (0)
-+# define kmap_init()				do { } while (0)
-+# define permanent_kmaps_init(pgd_base)		do { } while (0)
-+# define set_highmem_pages_init(bad_ppro)	do { } while (0)
- #endif /* CONFIG_HIGHMEM */
+-	strcpy(buts->name, bdevname(bdev, b));
++	strcpy(buts->name, name);
  
--unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-+pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
- EXPORT_SYMBOL(__PAGE_KERNEL);
--unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
+ 	/*
+ 	 * some device names have larger paths - convert the slashes
+@@ -352,7 +352,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
+ 		goto err;
  
--#ifdef CONFIG_NUMA
--extern void __init remap_numa_kva(void);
--#else
--#define remap_numa_kva() do {} while (0)
--#endif
-+pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
+ 	bt->dir = dir;
+-	bt->dev = bdev->bd_dev;
++	bt->dev = dev;
+ 	atomic_set(&bt->dropped, 0);
  
- void __init native_pagetable_setup_start(pgd_t *base)
- {
-@@ -372,7 +349,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
- 	memset(&base[USER_PTRS_PER_PGD], 0,
- 	       KERNEL_PGD_PTRS * sizeof(pgd_t));
- #else
--	paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);
-+	paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
- #endif
+ 	ret = -EIO;
+@@ -399,8 +399,8 @@ err:
+ 	return ret;
  }
  
-@@ -410,10 +387,10 @@ void __init native_pagetable_setup_done(pgd_t *base)
-  * be partially populated, and so it avoids stomping on any existing
-  * mappings.
-  */
--static void __init pagetable_init (void)
-+static void __init pagetable_init(void)
+-static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
+-			   char __user *arg)
++int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
++		    char __user *arg)
  {
--	unsigned long vaddr, end;
- 	pgd_t *pgd_base = swapper_pg_dir;
-+	unsigned long vaddr, end;
- 
- 	paravirt_pagetable_setup_start(pgd_base);
- 
-@@ -435,9 +412,11 @@ static void __init pagetable_init (void)
- 	 * Fixed mappings, only the page table structure has to be
- 	 * created - mappings will be set by set_fixmap():
- 	 */
-+	early_ioremap_clear();
- 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
- 	page_table_range_init(vaddr, end, pgd_base);
-+	early_ioremap_reset();
+ 	struct blk_user_trace_setup buts;
+ 	int ret;
+@@ -409,7 +409,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
+ 	if (ret)
+ 		return -EFAULT;
  
- 	permanent_kmaps_init(pgd_base);
+-	ret = do_blk_trace_setup(q, bdev, &buts);
++	ret = do_blk_trace_setup(q, name, dev, &buts);
+ 	if (ret)
+ 		return ret;
  
-@@ -450,7 +429,7 @@ static void __init pagetable_init (void)
-  * driver might have split up a kernel 4MB mapping.
-  */
- char __nosavedata swsusp_pg_dir[PAGE_SIZE]
--	__attribute__ ((aligned (PAGE_SIZE)));
-+	__attribute__ ((aligned(PAGE_SIZE)));
+@@ -418,8 +418,9 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
  
- static inline void save_pg_dir(void)
- {
-@@ -462,7 +441,7 @@ static inline void save_pg_dir(void)
+ 	return 0;
  }
- #endif
++EXPORT_SYMBOL_GPL(blk_trace_setup);
  
--void zap_low_mappings (void)
-+void zap_low_mappings(void)
+-static int blk_trace_startstop(struct request_queue *q, int start)
++int blk_trace_startstop(struct request_queue *q, int start)
  {
- 	int i;
+ 	struct blk_trace *bt;
+ 	int ret;
+@@ -452,6 +453,7 @@ static int blk_trace_startstop(struct request_queue *q, int start)
  
-@@ -474,22 +453,24 @@ void zap_low_mappings (void)
- 	 * Note that "pgd_clear()" doesn't do it for
- 	 * us, because pgd_clear() is a no-op on i386.
- 	 */
--	for (i = 0; i < USER_PTRS_PER_PGD; i++)
-+	for (i = 0; i < USER_PTRS_PER_PGD; i++) {
- #ifdef CONFIG_X86_PAE
- 		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
- #else
- 		set_pgd(swapper_pg_dir+i, __pgd(0));
- #endif
-+	}
- 	flush_tlb_all();
+ 	return ret;
  }
++EXPORT_SYMBOL_GPL(blk_trace_startstop);
  
--int nx_enabled = 0;
-+int nx_enabled;
-+
-+pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
-+EXPORT_SYMBOL_GPL(__supported_pte_mask);
- 
- #ifdef CONFIG_X86_PAE
+ /**
+  * blk_trace_ioctl: - handle the ioctls associated with tracing
+@@ -464,6 +466,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
+ {
+ 	struct request_queue *q;
+ 	int ret, start = 0;
++	char b[BDEVNAME_SIZE];
  
--static int disable_nx __initdata = 0;
--u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
--EXPORT_SYMBOL_GPL(__supported_pte_mask);
-+static int disable_nx __initdata;
+ 	q = bdev_get_queue(bdev);
+ 	if (!q)
+@@ -473,7 +476,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  
- /*
-  * noexec = on|off
-@@ -506,11 +487,14 @@ static int __init noexec_setup(char *str)
- 			__supported_pte_mask |= _PAGE_NX;
- 			disable_nx = 0;
+ 	switch (cmd) {
+ 	case BLKTRACESETUP:
+-		ret = blk_trace_setup(q, bdev, arg);
++		strcpy(b, bdevname(bdev, b));
++		ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
+ 		break;
+ 	case BLKTRACESTART:
+ 		start = 1;
+diff --git a/block/bsg.c b/block/bsg.c
+index 8e181ab..8917c51 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -279,6 +279,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
+ 			goto out;
  		}
--	} else if (!strcmp(str,"off")) {
--		disable_nx = 1;
--		__supported_pte_mask &= ~_PAGE_NX;
--	} else
--		return -EINVAL;
-+	} else {
-+		if (!strcmp(str, "off")) {
-+			disable_nx = 1;
-+			__supported_pte_mask &= ~_PAGE_NX;
-+		} else {
-+			return -EINVAL;
-+		}
-+	}
+ 		rq->next_rq = next_rq;
++		next_rq->cmd_type = rq->cmd_type;
  
- 	return 0;
- }
-@@ -522,6 +506,7 @@ static void __init set_nx(void)
+ 		dxferp = (void*)(unsigned long)hdr->din_xferp;
+ 		ret =  blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
+@@ -445,6 +446,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
+ 	else
+ 		hdr->dout_resid = rq->data_len;
  
- 	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
- 		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++	/*
++	 * If the request generated a negative error number, return it
++	 * (providing we aren't already returning an error); if it's
++	 * just a protocol response (i.e. non negative), that gets
++	 * processed above.
++	 */
++	if (!ret && rq->errors < 0)
++		ret = rq->errors;
 +
- 		if ((v[3] & (1 << 20)) && !disable_nx) {
- 			rdmsr(MSR_EFER, l, h);
- 			l |= EFER_NX;
-@@ -531,35 +516,6 @@ static void __init set_nx(void)
- 		}
- 	}
- }
--
--/*
-- * Enables/disables executability of a given kernel page and
-- * returns the previous setting.
-- */
--int __init set_kernel_exec(unsigned long vaddr, int enable)
--{
--	pte_t *pte;
--	int ret = 1;
--
--	if (!nx_enabled)
--		goto out;
--
--	pte = lookup_address(vaddr);
--	BUG_ON(!pte);
--
--	if (!pte_exec_kernel(*pte))
--		ret = 0;
--
--	if (enable)
--		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
--	else
--		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
--	pte_update_defer(&init_mm, vaddr, pte);
--	__flush_tlb_all();
--out:
--	return ret;
--}
--
- #endif
- 
- /*
-@@ -574,9 +530,8 @@ void __init paging_init(void)
- #ifdef CONFIG_X86_PAE
- 	set_nx();
- 	if (nx_enabled)
--		printk("NX (Execute Disable) protection: active\n");
-+		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
- #endif
--
- 	pagetable_init();
+ 	blk_rq_unmap_user(bio);
+ 	blk_put_request(rq);
  
- 	load_cr3(swapper_pg_dir);
-@@ -600,10 +555,10 @@ void __init paging_init(void)
-  * used to involve black magic jumps to work around some nasty CPU bugs,
-  * but fortunately the switch to using exceptions got rid of all that.
-  */
--
- static void __init test_wp_bit(void)
+@@ -837,6 +847,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  {
--	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
-+	printk(KERN_INFO
-+  "Checking if this processor honours the WP bit even in supervisor mode...");
- 
- 	/* Any page-aligned address will do, the test is non-destructive */
- 	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
-@@ -611,47 +566,46 @@ static void __init test_wp_bit(void)
- 	clear_fixmap(FIX_WP_TEST);
+ 	struct bsg_device *bd = file->private_data;
+ 	int __user *uarg = (int __user *) arg;
++	int ret;
  
- 	if (!boot_cpu_data.wp_works_ok) {
--		printk("No.\n");
-+		printk(KERN_CONT "No.\n");
- #ifdef CONFIG_X86_WP_WORKS_OK
--		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
-+		panic(
-+  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
- #endif
- 	} else {
--		printk("Ok.\n");
-+		printk(KERN_CONT "Ok.\n");
- 	}
- }
+ 	switch (cmd) {
+ 		/*
+@@ -889,12 +900,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		if (rq->next_rq)
+ 			bidi_bio = rq->next_rq->bio;
+ 		blk_execute_rq(bd->queue, NULL, rq, 0);
+-		blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
++		ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
  
--static struct kcore_list kcore_mem, kcore_vmalloc; 
-+static struct kcore_list kcore_mem, kcore_vmalloc;
+ 		if (copy_to_user(uarg, &hdr, sizeof(hdr)))
+ 			return -EFAULT;
  
- void __init mem_init(void)
- {
--	extern int ppro_with_ram_bug(void);
- 	int codesize, reservedpages, datasize, initsize;
--	int tmp;
--	int bad_ppro;
-+	int tmp, bad_ppro;
+-		return 0;
++		return ret;
+ 	}
+ 	/*
+ 	 * block device ioctls
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 13553e0..f28d1fb 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -26,9 +26,9 @@ static const int cfq_slice_async_rq = 2;
+ static int cfq_slice_idle = HZ / 125;
  
- #ifdef CONFIG_FLATMEM
- 	BUG_ON(!mem_map);
- #endif
--	
- 	bad_ppro = ppro_with_ram_bug();
+ /*
+- * grace period before allowing idle class to get disk access
++ * offset from end of service tree
+  */
+-#define CFQ_IDLE_GRACE		(HZ / 10)
++#define CFQ_IDLE_DELAY		(HZ / 5)
  
- #ifdef CONFIG_HIGHMEM
- 	/* check that fixmap and pkmap do not overlap */
--	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
--		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
-+	if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
-+		printk(KERN_ERR
-+			"fixmap and kmap areas overlap - this will crash\n");
- 		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
--				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
-+				PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
-+				FIXADDR_START);
- 		BUG();
- 	}
- #endif
-- 
- 	/* this will put all low memory onto the freelists */
- 	totalram_pages += free_all_bootmem();
+ /*
+  * below this threshold, we consider thinktime immediate
+@@ -98,8 +98,6 @@ struct cfq_data {
+ 	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+ 	struct cfq_queue *async_idle_cfqq;
  
- 	reservedpages = 0;
- 	for (tmp = 0; tmp < max_low_pfn; tmp++)
- 		/*
--		 * Only count reserved RAM pages
-+		 * Only count reserved RAM pages:
- 		 */
- 		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
- 			reservedpages++;
-@@ -662,11 +616,12 @@ void __init mem_init(void)
- 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
- 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+-	struct timer_list idle_class_timer;
+-
+ 	sector_t last_position;
+ 	unsigned long last_end_request;
  
--	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
--	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
-+	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
-+	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- 		   VMALLOC_END-VMALLOC_START);
+@@ -199,8 +197,8 @@ CFQ_CFQQ_FNS(sync);
  
--	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
-+	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
-+			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
- 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- 		num_physpages << (PAGE_SHIFT-10),
- 		codesize >> 10,
-@@ -677,45 +632,46 @@ void __init mem_init(void)
- 	       );
+ static void cfq_dispatch_insert(struct request_queue *, struct request *);
+ static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
+-				       struct task_struct *, gfp_t);
+-static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
++				       struct io_context *, gfp_t);
++static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
+ 						struct io_context *);
  
- #if 1 /* double-sanity-check paranoia */
--	printk("virtual kernel memory layout:\n"
--	       "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-+	printk(KERN_INFO "virtual kernel memory layout:\n"
-+		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
- #ifdef CONFIG_HIGHMEM
--	       "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-+		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
- #endif
--	       "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
--	       "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
--	       "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
--	       "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
--	       "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
--	       FIXADDR_START, FIXADDR_TOP,
--	       (FIXADDR_TOP - FIXADDR_START) >> 10,
-+		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-+		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-+		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-+		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-+		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
-+		FIXADDR_START, FIXADDR_TOP,
-+		(FIXADDR_TOP - FIXADDR_START) >> 10,
+ static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
+@@ -384,12 +382,15 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
+ /*
+  * The below is leftmost cache rbtree addon
+  */
+-static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
++static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
+ {
+ 	if (!root->left)
+ 		root->left = rb_first(&root->rb);
  
- #ifdef CONFIG_HIGHMEM
--	       PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
--	       (LAST_PKMAP*PAGE_SIZE) >> 10,
-+		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
-+		(LAST_PKMAP*PAGE_SIZE) >> 10,
- #endif
+-	return root->left;
++	if (root->left)
++		return rb_entry(root->left, struct cfq_queue, rb_node);
++
++	return NULL;
+ }
  
--	       VMALLOC_START, VMALLOC_END,
--	       (VMALLOC_END - VMALLOC_START) >> 20,
-+		VMALLOC_START, VMALLOC_END,
-+		(VMALLOC_END - VMALLOC_START) >> 20,
+ static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
+@@ -446,12 +447,20 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
+ static void cfq_service_tree_add(struct cfq_data *cfqd,
+ 				    struct cfq_queue *cfqq, int add_front)
+ {
+-	struct rb_node **p = &cfqd->service_tree.rb.rb_node;
+-	struct rb_node *parent = NULL;
++	struct rb_node **p, *parent;
++	struct cfq_queue *__cfqq;
+ 	unsigned long rb_key;
+ 	int left;
  
--	       (unsigned long)__va(0), (unsigned long)high_memory,
--	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
-+		(unsigned long)__va(0), (unsigned long)high_memory,
-+		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
+-	if (!add_front) {
++	if (cfq_class_idle(cfqq)) {
++		rb_key = CFQ_IDLE_DELAY;
++		parent = rb_last(&cfqd->service_tree.rb);
++		if (parent && parent != &cfqq->rb_node) {
++			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
++			rb_key += __cfqq->rb_key;
++		} else
++			rb_key += jiffies;
++	} else if (!add_front) {
+ 		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
+ 		rb_key += cfqq->slice_resid;
+ 		cfqq->slice_resid = 0;
+@@ -469,8 +478,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
+ 	}
  
--	       (unsigned long)&__init_begin, (unsigned long)&__init_end,
--	       ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
-+		(unsigned long)&__init_begin, (unsigned long)&__init_end,
-+		((unsigned long)&__init_end -
-+		 (unsigned long)&__init_begin) >> 10,
+ 	left = 1;
++	parent = NULL;
++	p = &cfqd->service_tree.rb.rb_node;
+ 	while (*p) {
+-		struct cfq_queue *__cfqq;
+ 		struct rb_node **n;
  
--	       (unsigned long)&_etext, (unsigned long)&_edata,
--	       ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
-+		(unsigned long)&_etext, (unsigned long)&_edata,
-+		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+ 		parent = *p;
+@@ -524,8 +534,7 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+  * add to busy list of queues for service, trying to be fair in ordering
+  * the pending list according to last request service
+  */
+-static inline void
+-cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
++static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+ {
+ 	BUG_ON(cfq_cfqq_on_rr(cfqq));
+ 	cfq_mark_cfqq_on_rr(cfqq);
+@@ -538,8 +547,7 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+  * Called when the cfqq no longer has requests pending, remove it from
+  * the service tree.
+  */
+-static inline void
+-cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
++static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+ {
+ 	BUG_ON(!cfq_cfqq_on_rr(cfqq));
+ 	cfq_clear_cfqq_on_rr(cfqq);
+@@ -554,7 +562,7 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+ /*
+  * rb tree support functions
+  */
+-static inline void cfq_del_rq_rb(struct request *rq)
++static void cfq_del_rq_rb(struct request *rq)
+ {
+ 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ 	struct cfq_data *cfqd = cfqq->cfqd;
+@@ -594,8 +602,7 @@ static void cfq_add_rq_rb(struct request *rq)
+ 	BUG_ON(!cfqq->next_rq);
+ }
  
--	       (unsigned long)&_text, (unsigned long)&_etext,
--	       ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
-+		(unsigned long)&_text, (unsigned long)&_etext,
-+		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+-static inline void
+-cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
++static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
+ {
+ 	elv_rb_del(&cfqq->sort_list, rq);
+ 	cfqq->queued[rq_is_sync(rq)]--;
+@@ -609,7 +616,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
+ 	struct cfq_io_context *cic;
+ 	struct cfq_queue *cfqq;
  
- #ifdef CONFIG_HIGHMEM
--	BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
--	BUG_ON(VMALLOC_END                     > PKMAP_BASE);
-+	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
-+	BUG_ON(VMALLOC_END				> PKMAP_BASE);
- #endif
--	BUG_ON(VMALLOC_START                   > VMALLOC_END);
--	BUG_ON((unsigned long)high_memory      > VMALLOC_START);
-+	BUG_ON(VMALLOC_START				> VMALLOC_END);
-+	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
- #endif /* double-sanity-check paranoia */
+-	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
++	cic = cfq_cic_lookup(cfqd, tsk->io_context);
+ 	if (!cic)
+ 		return NULL;
  
- #ifdef CONFIG_X86_PAE
-@@ -746,49 +702,38 @@ int arch_add_memory(int nid, u64 start, u64 size)
+@@ -721,7 +728,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
+ 	 * Lookup the cfqq that this bio will be queued with. Allow
+ 	 * merge only if rq is queued there.
+ 	 */
+-	cic = cfq_cic_rb_lookup(cfqd, current->io_context);
++	cic = cfq_cic_lookup(cfqd, current->io_context);
+ 	if (!cic)
+ 		return 0;
  
- 	return __add_pages(zone, start_pfn, nr_pages);
+@@ -732,15 +739,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
+ 	return 0;
  }
--
- #endif
  
--struct kmem_cache *pmd_cache;
+-static inline void
+-__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
++static void __cfq_set_active_queue(struct cfq_data *cfqd,
++				   struct cfq_queue *cfqq)
+ {
+ 	if (cfqq) {
+-		/*
+-		 * stop potential idle class queues waiting service
+-		 */
+-		del_timer(&cfqd->idle_class_timer);
 -
--void __init pgtable_cache_init(void)
+ 		cfqq->slice_end = 0;
+ 		cfq_clear_cfqq_must_alloc_slice(cfqq);
+ 		cfq_clear_cfqq_fifo_expire(cfqq);
+@@ -789,47 +791,16 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
+ 		__cfq_slice_expired(cfqd, cfqq, timed_out);
+ }
+ 
+-static int start_idle_class_timer(struct cfq_data *cfqd)
 -{
--	if (PTRS_PER_PMD > 1)
--		pmd_cache = kmem_cache_create("pmd",
--					      PTRS_PER_PMD*sizeof(pmd_t),
--					      PTRS_PER_PMD*sizeof(pmd_t),
--					      SLAB_PANIC,
--					      pmd_ctor);
+-	unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
+-	unsigned long now = jiffies;
+-
+-	if (time_before(now, end) &&
+-	    time_after_eq(now, cfqd->last_end_request)) {
+-		mod_timer(&cfqd->idle_class_timer, end);
+-		return 1;
+-	}
+-
+-	return 0;
 -}
 -
  /*
-  * This function cannot be __init, since exceptions don't work in that
-  * section.  Put this after the callers, so that it cannot be inlined.
+  * Get next queue for service. Unless we have a queue preemption,
+  * we'll simply select the first cfqq in the service tree.
   */
--static int noinline do_test_wp_bit(void)
-+static noinline int do_test_wp_bit(void)
+ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
  {
- 	char tmp_reg;
- 	int flag;
+-	struct cfq_queue *cfqq;
+-	struct rb_node *n;
+-
+ 	if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
+ 		return NULL;
  
- 	__asm__ __volatile__(
--		"	movb %0,%1	\n"
--		"1:	movb %1,%0	\n"
--		"	xorl %2,%2	\n"
-+		"	movb %0, %1	\n"
-+		"1:	movb %1, %0	\n"
-+		"	xorl %2, %2	\n"
- 		"2:			\n"
--		".section __ex_table,\"a\"\n"
-+		".section __ex_table, \"a\"\n"
- 		"	.align 4	\n"
--		"	.long 1b,2b	\n"
-+		"	.long 1b, 2b	\n"
- 		".previous		\n"
- 		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
- 		 "=q" (tmp_reg),
- 		 "=r" (flag)
- 		:"2" (1)
- 		:"memory");
--	
-+
- 	return flag;
+-	n = cfq_rb_first(&cfqd->service_tree);
+-	cfqq = rb_entry(n, struct cfq_queue, rb_node);
+-
+-	if (cfq_class_idle(cfqq)) {
+-		/*
+-		 * if we have idle queues and no rt or be queues had
+-		 * pending requests, either allow immediate service if
+-		 * the grace period has passed or arm the idle grace
+-		 * timer
+-		 */
+-		if (start_idle_class_timer(cfqd))
+-			cfqq = NULL;
+-	}
+-
+-	return cfqq;
++	return cfq_rb_first(&cfqd->service_tree);
  }
  
- #ifdef CONFIG_DEBUG_RODATA
-+const int rodata_test_data = 0xC3;
-+EXPORT_SYMBOL_GPL(rodata_test_data);
+ /*
+@@ -895,7 +866,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
+ 	 * task has exited, don't wait
+ 	 */
+ 	cic = cfqd->active_cic;
+-	if (!cic || !cic->ioc->task)
++	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
+ 		return;
  
- void mark_rodata_ro(void)
+ 	/*
+@@ -939,7 +910,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
+ /*
+  * return expired entry, or NULL to just start from scratch in rbtree
+  */
+-static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
++static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
  {
-@@ -801,32 +746,58 @@ void mark_rodata_ro(void)
- 	if (num_possible_cpus() <= 1)
- #endif
- 	{
--		change_page_attr(virt_to_page(start),
--		                 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
--		printk("Write protecting the kernel text: %luk\n", size >> 10);
-+		set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
-+		printk(KERN_INFO "Write protecting the kernel text: %luk\n",
-+			size >> 10);
-+
-+#ifdef CONFIG_CPA_DEBUG
-+		printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
-+			start, start+size);
-+		set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
-+
-+		printk(KERN_INFO "Testing CPA: write protecting again\n");
-+		set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
-+#endif
- 	}
- #endif
- 	start += size;
- 	size = (unsigned long)__end_rodata - start;
--	change_page_attr(virt_to_page(start),
--	                 size >> PAGE_SHIFT, PAGE_KERNEL_RO);
--	printk("Write protecting the kernel read-only data: %luk\n",
--	       size >> 10);
-+	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
-+	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
-+		size >> 10);
-+	rodata_test();
- 
--	/*
--	 * change_page_attr() requires a global_flush_tlb() call after it.
--	 * We do this after the printk so that if something went wrong in the
--	 * change, the printk gets out at least to give a better debug hint
--	 * of who is the culprit.
--	 */
--	global_flush_tlb();
-+#ifdef CONFIG_CPA_DEBUG
-+	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
-+	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
-+
-+	printk(KERN_INFO "Testing CPA: write protecting again\n");
-+	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
-+#endif
+ 	struct cfq_data *cfqd = cfqq->cfqd;
+ 	struct request *rq;
+@@ -1068,7 +1039,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ 	return dispatched;
  }
- #endif
  
- void free_init_pages(char *what, unsigned long begin, unsigned long end)
+-static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
++static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
  {
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+	/*
-+	 * If debugging page accesses then do not free this memory but
-+	 * mark them not present - any buggy init-section access will
-+	 * create a kernel page fault:
-+	 */
-+	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-+		begin, PAGE_ALIGN(end));
-+	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
-+#else
- 	unsigned long addr;
- 
-+	/*
-+	 * We just marked the kernel text read only above, now that
-+	 * we are going to free part of that, we need to make that
-+	 * writeable first.
-+	 */
-+	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
-+
- 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
- 		ClearPageReserved(virt_to_page(addr));
- 		init_page_count(virt_to_page(addr));
-@@ -835,6 +806,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
- 		totalram_pages++;
- 	}
- 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-+#endif
- }
+ 	int dispatched = 0;
  
- void free_initmem(void)
-@@ -850,4 +822,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
- 	free_init_pages("initrd memory", start, end);
- }
- #endif
+@@ -1087,14 +1058,11 @@ static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
+  */
+ static int cfq_forced_dispatch(struct cfq_data *cfqd)
+ {
++	struct cfq_queue *cfqq;
+ 	int dispatched = 0;
+-	struct rb_node *n;
 -
-diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index 0f9c8c8..cc50a13 100644
---- a/arch/x86/mm/init_64.c
-+++ b/arch/x86/mm/init_64.c
-@@ -43,12 +43,10 @@
- #include <asm/proto.h>
- #include <asm/smp.h>
- #include <asm/sections.h>
-+#include <asm/kdebug.h>
-+#include <asm/numa.h>
+-	while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
+-		struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
  
--#ifndef Dprintk
--#define Dprintk(x...)
--#endif
--
--const struct dma_mapping_ops* dma_ops;
-+const struct dma_mapping_ops *dma_ops;
- EXPORT_SYMBOL(dma_ops);
++	while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
+ 		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
+-	}
  
- static unsigned long dma_reserve __initdata;
-@@ -65,22 +63,26 @@ void show_mem(void)
+ 	cfq_slice_expired(cfqd, 0);
+ 
+@@ -1170,20 +1138,69 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
+ 	kmem_cache_free(cfq_pool, cfqq);
+ }
+ 
+-static void cfq_free_io_context(struct io_context *ioc)
++/*
++ * Call func for each cic attached to this ioc. Returns number of cic's seen.
++ */
++#define CIC_GANG_NR	16
++static unsigned int
++call_for_each_cic(struct io_context *ioc,
++		  void (*func)(struct io_context *, struct cfq_io_context *))
  {
- 	long i, total = 0, reserved = 0;
- 	long shared = 0, cached = 0;
--	pg_data_t *pgdat;
- 	struct page *page;
-+	pg_data_t *pgdat;
+-	struct cfq_io_context *__cic;
+-	struct rb_node *n;
+-	int freed = 0;
++	struct cfq_io_context *cics[CIC_GANG_NR];
++	unsigned long index = 0;
++	unsigned int called = 0;
++	int nr;
  
- 	printk(KERN_INFO "Mem-info:\n");
- 	show_free_areas();
--	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-+	printk(KERN_INFO "Free swap:       %6ldkB\n",
-+		nr_swap_pages << (PAGE_SHIFT-10));
+-	ioc->ioc_data = NULL;
++	rcu_read_lock();
  
- 	for_each_online_pgdat(pgdat) {
--               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
--			/* this loop can take a while with 256 GB and 4k pages
--			   so update the NMI watchdog */
--			if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) {
-+		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+			/*
-+			 * This loop can take a while with 256 GB and
-+			 * 4k pages so defer the NMI watchdog:
-+			 */
-+			if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
- 				touch_nmi_watchdog();
--			}
+-	while ((n = rb_first(&ioc->cic_root)) != NULL) {
+-		__cic = rb_entry(n, struct cfq_io_context, rb_node);
+-		rb_erase(&__cic->rb_node, &ioc->cic_root);
+-		kmem_cache_free(cfq_ioc_pool, __cic);
+-		freed++;
+-	}
++	do {
++		int i;
 +
- 			if (!pfn_valid(pgdat->node_start_pfn + i))
- 				continue;
++		/*
++		 * Perhaps there's a better way - this just gang lookups from
++		 * 0 to the end, restarting after each CIC_GANG_NR from the
++		 * last key + 1.
++		 */
++		nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics,
++						index, CIC_GANG_NR);
++		if (!nr)
++			break;
 +
- 			page = pfn_to_page(pgdat->node_start_pfn + i);
- 			total++;
- 			if (PageReserved(page))
-@@ -89,51 +91,58 @@ void show_mem(void)
- 				cached++;
- 			else if (page_count(page))
- 				shared += page_count(page) - 1;
--               }
-+		}
- 	}
--	printk(KERN_INFO "%lu pages of RAM\n", total);
--	printk(KERN_INFO "%lu reserved pages\n",reserved);
--	printk(KERN_INFO "%lu pages shared\n",shared);
--	printk(KERN_INFO "%lu pages swap cached\n",cached);
-+	printk(KERN_INFO "%lu pages of RAM\n",		total);
-+	printk(KERN_INFO "%lu reserved pages\n",	reserved);
-+	printk(KERN_INFO "%lu pages shared\n",		shared);
-+	printk(KERN_INFO "%lu pages swap cached\n",	cached);
- }
- 
- int after_bootmem;
- 
- static __init void *spp_getpage(void)
--{ 
++		called += nr;
++		index = 1 + (unsigned long) cics[nr - 1]->key;
++
++		for (i = 0; i < nr; i++)
++			func(ioc, cics[i]);
++	} while (nr == CIC_GANG_NR);
++
++	rcu_read_unlock();
++
++	return called;
++}
++
++static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
 +{
- 	void *ptr;
++	unsigned long flags;
 +
- 	if (after_bootmem)
--		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 
-+		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
- 	else
- 		ptr = alloc_bootmem_pages(PAGE_SIZE);
--	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
--		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
- 
--	Dprintk("spp_getpage %p\n", ptr);
-+	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
-+		panic("set_pte_phys: cannot allocate page data %s\n",
-+			after_bootmem ? "after bootmem" : "");
-+	}
++	BUG_ON(!cic->dead_key);
 +
-+	pr_debug("spp_getpage %p\n", ptr);
++	spin_lock_irqsave(&ioc->lock, flags);
++	radix_tree_delete(&ioc->radix_root, cic->dead_key);
++	spin_unlock_irqrestore(&ioc->lock, flags);
 +
- 	return ptr;
--} 
++	kmem_cache_free(cfq_ioc_pool, cic);
 +}
++
++static void cfq_free_io_context(struct io_context *ioc)
++{
++	int freed;
++
++	/*
++	 * ioc->refcount is zero here, so no more cic's are allowed to be
++	 * linked into this ioc. So it should be ok to iterate over the known
++	 * list, we will see all cic's since no new ones are added.
++	 */
++	freed = call_for_each_cic(ioc, cic_free_func);
  
--static __init void set_pte_phys(unsigned long vaddr,
--			 unsigned long phys, pgprot_t prot)
-+static __init void
-+set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
- {
- 	pgd_t *pgd;
- 	pud_t *pud;
- 	pmd_t *pmd;
- 	pte_t *pte, new_pte;
- 
--	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
-+	pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
- 
- 	pgd = pgd_offset_k(vaddr);
- 	if (pgd_none(*pgd)) {
--		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+		printk(KERN_ERR
-+			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
- 		return;
- 	}
- 	pud = pud_offset(pgd, vaddr);
- 	if (pud_none(*pud)) {
--		pmd = (pmd_t *) spp_getpage(); 
-+		pmd = (pmd_t *) spp_getpage();
- 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
- 		if (pmd != pmd_offset(pud, 0)) {
--			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
-+				pmd, pmd_offset(pud, 0));
- 			return;
- 		}
- 	}
-@@ -142,7 +151,7 @@ static __init void set_pte_phys(unsigned long vaddr,
- 		pte = (pte_t *) spp_getpage();
- 		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
- 		if (pte != pte_offset_kernel(pmd, 0)) {
--			printk("PAGETABLE BUG #02!\n");
-+			printk(KERN_ERR "PAGETABLE BUG #02!\n");
- 			return;
- 		}
- 	}
-@@ -162,33 +171,35 @@ static __init void set_pte_phys(unsigned long vaddr,
- }
+ 	elv_ioc_count_mod(ioc_count, -freed);
  
- /* NOTE: this is meant to be run only at boot */
--void __init 
--__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-+void __init
-+__set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+@@ -1205,7 +1222,12 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
+ 					 struct cfq_io_context *cic)
  {
- 	unsigned long address = __fix_to_virt(idx);
+ 	list_del_init(&cic->queue_list);
++
++	/*
++	 * Make sure key == NULL is seen for dead queues
++	 */
+ 	smp_wmb();
++	cic->dead_key = (unsigned long) cic->key;
+ 	cic->key = NULL;
  
- 	if (idx >= __end_of_fixed_addresses) {
--		printk("Invalid __set_fixmap\n");
-+		printk(KERN_ERR "Invalid __set_fixmap\n");
- 		return;
+ 	if (cic->cfqq[ASYNC]) {
+@@ -1219,16 +1241,18 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
  	}
- 	set_pte_phys(address, phys, prot);
  }
  
--unsigned long __meminitdata table_start, table_end;
-+static unsigned long __initdata table_start;
-+static unsigned long __meminitdata table_end;
+-static void cfq_exit_single_io_context(struct cfq_io_context *cic)
++static void cfq_exit_single_io_context(struct io_context *ioc,
++				       struct cfq_io_context *cic)
+ {
+ 	struct cfq_data *cfqd = cic->key;
  
- static __meminit void *alloc_low_page(unsigned long *phys)
--{ 
-+{
- 	unsigned long pfn = table_end++;
- 	void *adr;
+ 	if (cfqd) {
+ 		struct request_queue *q = cfqd->queue;
++		unsigned long flags;
  
- 	if (after_bootmem) {
- 		adr = (void *)get_zeroed_page(GFP_ATOMIC);
- 		*phys = __pa(adr);
-+
- 		return adr;
+-		spin_lock_irq(q->queue_lock);
++		spin_lock_irqsave(q->queue_lock, flags);
+ 		__cfq_exit_single_io_context(cfqd, cic);
+-		spin_unlock_irq(q->queue_lock);
++		spin_unlock_irqrestore(q->queue_lock, flags);
  	}
- 
--	if (pfn >= end_pfn) 
--		panic("alloc_low_page: ran out of memory"); 
-+	if (pfn >= end_pfn)
-+		panic("alloc_low_page: ran out of memory");
- 
- 	adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
- 	memset(adr, 0, PAGE_SIZE);
-@@ -197,44 +208,49 @@ static __meminit void *alloc_low_page(unsigned long *phys)
  }
  
- static __meminit void unmap_low_page(void *adr)
--{ 
--
-+{
- 	if (after_bootmem)
- 		return;
- 
- 	early_iounmap(adr, PAGE_SIZE);
--} 
-+}
- 
- /* Must run before zap_low_mappings */
- __meminit void *early_ioremap(unsigned long addr, unsigned long size)
+@@ -1238,21 +1262,8 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
+  */
+ static void cfq_exit_io_context(struct io_context *ioc)
  {
--	unsigned long vaddr;
- 	pmd_t *pmd, *last_pmd;
-+	unsigned long vaddr;
- 	int i, pmds;
- 
- 	pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
- 	vaddr = __START_KERNEL_map;
- 	pmd = level2_kernel_pgt;
- 	last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
-+
- 	for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
- 		for (i = 0; i < pmds; i++) {
- 			if (pmd_present(pmd[i]))
--				goto next;
-+				goto continue_outer_loop;
- 		}
- 		vaddr += addr & ~PMD_MASK;
- 		addr &= PMD_MASK;
-+
- 		for (i = 0; i < pmds; i++, addr += PMD_SIZE)
--			set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
--		__flush_tlb();
-+			set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
-+		__flush_tlb_all();
-+
- 		return (void *)vaddr;
--	next:
-+continue_outer_loop:
- 		;
- 	}
--	printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
-+	printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
-+
- 	return NULL;
+-	struct cfq_io_context *__cic;
+-	struct rb_node *n;
+-
+-	ioc->ioc_data = NULL;
+-
+-	/*
+-	 * put the reference this task is holding to the various queues
+-	 */
+-	n = rb_first(&ioc->cic_root);
+-	while (n != NULL) {
+-		__cic = rb_entry(n, struct cfq_io_context, rb_node);
+-
+-		cfq_exit_single_io_context(__cic);
+-		n = rb_next(n);
+-	}
++	rcu_assign_pointer(ioc->ioc_data, NULL);
++	call_for_each_cic(ioc, cfq_exit_single_io_context);
  }
  
--/* To avoid virtual aliases later */
-+/*
-+ * To avoid virtual aliases later:
-+ */
- __meminit void early_iounmap(void *addr, unsigned long size)
- {
- 	unsigned long vaddr;
-@@ -244,9 +260,11 @@ __meminit void early_iounmap(void *addr, unsigned long size)
- 	vaddr = (unsigned long)addr;
- 	pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
- 	pmd = level2_kernel_pgt + pmd_index(vaddr);
-+
- 	for (i = 0; i < pmds; i++)
- 		pmd_clear(pmd + i);
--	__flush_tlb();
-+
-+	__flush_tlb_all();
+ static struct cfq_io_context *
+@@ -1273,7 +1284,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ 	return cic;
  }
  
- static void __meminit
-@@ -259,16 +277,17 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
- 		pmd_t *pmd = pmd_page + pmd_index(address);
+-static void cfq_init_prio_data(struct cfq_queue *cfqq)
++static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+ {
+ 	struct task_struct *tsk = current;
+ 	int ioprio_class;
+@@ -1281,7 +1292,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
+ 	if (!cfq_cfqq_prio_changed(cfqq))
+ 		return;
  
- 		if (address >= end) {
--			if (!after_bootmem)
-+			if (!after_bootmem) {
- 				for (; i < PTRS_PER_PMD; i++, pmd++)
- 					set_pmd(pmd, __pmd(0));
-+			}
+-	ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
++	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+ 	switch (ioprio_class) {
+ 		default:
+ 			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+@@ -1293,11 +1304,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
+ 			cfqq->ioprio_class = IOPRIO_CLASS_BE;
  			break;
- 		}
+ 		case IOPRIO_CLASS_RT:
+-			cfqq->ioprio = task_ioprio(tsk);
++			cfqq->ioprio = task_ioprio(ioc);
+ 			cfqq->ioprio_class = IOPRIO_CLASS_RT;
+ 			break;
+ 		case IOPRIO_CLASS_BE:
+-			cfqq->ioprio = task_ioprio(tsk);
++			cfqq->ioprio = task_ioprio(ioc);
+ 			cfqq->ioprio_class = IOPRIO_CLASS_BE;
+ 			break;
+ 		case IOPRIO_CLASS_IDLE:
+@@ -1316,7 +1327,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
+ 	cfq_clear_cfqq_prio_changed(cfqq);
+ }
  
- 		if (pmd_val(*pmd))
- 			continue;
+-static inline void changed_ioprio(struct cfq_io_context *cic)
++static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
+ {
+ 	struct cfq_data *cfqd = cic->key;
+ 	struct cfq_queue *cfqq;
+@@ -1330,8 +1341,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
+ 	cfqq = cic->cfqq[ASYNC];
+ 	if (cfqq) {
+ 		struct cfq_queue *new_cfqq;
+-		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task,
+-					 GFP_ATOMIC);
++		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
+ 		if (new_cfqq) {
+ 			cic->cfqq[ASYNC] = new_cfqq;
+ 			cfq_put_queue(cfqq);
+@@ -1347,29 +1357,19 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
  
--		entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
-+		entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address;
- 		entry &= __supported_pte_mask;
- 		set_pmd(pmd, __pmd(entry));
- 	}
-@@ -277,19 +296,19 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
- static void __meminit
- phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+ static void cfq_ioc_set_ioprio(struct io_context *ioc)
  {
--	pmd_t *pmd = pmd_offset(pud,0);
-+	pmd_t *pmd = pmd_offset(pud, 0);
- 	spin_lock(&init_mm.page_table_lock);
- 	phys_pmd_init(pmd, address, end);
- 	spin_unlock(&init_mm.page_table_lock);
- 	__flush_tlb_all();
+-	struct cfq_io_context *cic;
+-	struct rb_node *n;
+-
++	call_for_each_cic(ioc, changed_ioprio);
+ 	ioc->ioprio_changed = 0;
+-
+-	n = rb_first(&ioc->cic_root);
+-	while (n != NULL) {
+-		cic = rb_entry(n, struct cfq_io_context, rb_node);
+-
+-		changed_ioprio(cic);
+-		n = rb_next(n);
+-	}
  }
  
--static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
--{ 
-+static void __meminit
-+phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
-+{
- 	int i = pud_index(addr);
+ static struct cfq_queue *
+ cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+-		     struct task_struct *tsk, gfp_t gfp_mask)
++		     struct io_context *ioc, gfp_t gfp_mask)
+ {
+ 	struct cfq_queue *cfqq, *new_cfqq = NULL;
+ 	struct cfq_io_context *cic;
  
--
--	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
-+	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
- 		unsigned long pmd_phys;
- 		pud_t *pud = pud_page + pud_index(addr);
- 		pmd_t *pmd;
-@@ -297,10 +316,11 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
- 		if (addr >= end)
- 			break;
+ retry:
+-	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
++	cic = cfq_cic_lookup(cfqd, ioc);
+ 	/* cic always exists here */
+ 	cfqq = cic_to_cfqq(cic, is_sync);
  
--		if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
--			set_pud(pud, __pud(0)); 
-+		if (!after_bootmem &&
-+				!e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
-+			set_pud(pud, __pud(0));
- 			continue;
--		} 
-+		}
+@@ -1404,15 +1404,16 @@ retry:
+ 		atomic_set(&cfqq->ref, 0);
+ 		cfqq->cfqd = cfqd;
  
- 		if (pud_val(*pud)) {
- 			phys_pmd_update(pud, addr, end);
-@@ -308,14 +328,16 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
- 		}
+-		if (is_sync) {
+-			cfq_mark_cfqq_idle_window(cfqq);
+-			cfq_mark_cfqq_sync(cfqq);
+-		}
+-
+ 		cfq_mark_cfqq_prio_changed(cfqq);
+ 		cfq_mark_cfqq_queue_new(cfqq);
  
- 		pmd = alloc_low_page(&pmd_phys);
-+
- 		spin_lock(&init_mm.page_table_lock);
- 		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
- 		phys_pmd_init(pmd, addr, end);
- 		spin_unlock(&init_mm.page_table_lock);
+-		cfq_init_prio_data(cfqq);
++		cfq_init_prio_data(cfqq, ioc);
 +
- 		unmap_low_page(pmd);
++		if (is_sync) {
++			if (!cfq_class_idle(cfqq))
++				cfq_mark_cfqq_idle_window(cfqq);
++			cfq_mark_cfqq_sync(cfqq);
++		}
  	}
--	__flush_tlb();
--} 
-+	__flush_tlb_all();
-+}
  
- static void __init find_early_table_space(unsigned long end)
- {
-@@ -326,14 +348,23 @@ static void __init find_early_table_space(unsigned long end)
- 	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
- 		 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+ 	if (new_cfqq)
+@@ -1439,11 +1440,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+ }
  
-- 	/* RED-PEN putting page tables only on node 0 could
-- 	   cause a hotspot and fill up ZONE_DMA. The page tables
-- 	   need roughly 0.5KB per GB. */
-- 	start = 0x8000;
-- 	table_start = find_e820_area(start, end, tables);
-+	/*
-+	 * RED-PEN putting page tables only on node 0 could
-+	 * cause a hotspot and fill up ZONE_DMA. The page tables
-+	 * need roughly 0.5KB per GB.
-+	 */
-+	start = 0x8000;
-+	table_start = find_e820_area(start, end, tables);
- 	if (table_start == -1UL)
- 		panic("Cannot find space for the kernel page tables");
+ static struct cfq_queue *
+-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
++cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
+ 	      gfp_t gfp_mask)
+ {
+-	const int ioprio = task_ioprio(tsk);
+-	const int ioprio_class = task_ioprio_class(tsk);
++	const int ioprio = task_ioprio(ioc);
++	const int ioprio_class = task_ioprio_class(ioc);
+ 	struct cfq_queue **async_cfqq = NULL;
+ 	struct cfq_queue *cfqq = NULL;
  
-+	/*
-+	 * When you have a lot of RAM like 256GB, early_table will not fit
-+	 * into 0x8000 range, find_e820_area() will find area after kernel
-+	 * bss but the table_start is not page aligned, so need to round it
-+	 * up to avoid overlap with bss:
-+	 */
-+	table_start = round_up(table_start, PAGE_SIZE);
- 	table_start >>= PAGE_SHIFT;
- 	table_end = table_start;
+@@ -1453,7 +1454,7 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
+ 	}
  
-@@ -342,20 +373,23 @@ static void __init find_early_table_space(unsigned long end)
- 		(table_start << PAGE_SHIFT) + tables);
+ 	if (!cfqq) {
+-		cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
++		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+ 		if (!cfqq)
+ 			return NULL;
+ 	}
+@@ -1470,28 +1471,42 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
+ 	return cfqq;
  }
  
--/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
--   This runs before bootmem is initialized and gets pages directly from the 
--   physical memory. To access them they are temporarily mapped. */
-+/*
-+ * Setup the direct mapping of the physical memory at PAGE_OFFSET.
-+ * This runs before bootmem is initialized and gets pages directly from
-+ * the physical memory. To access them they are temporarily mapped.
-+ */
- void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
--{ 
--	unsigned long next; 
++static void cfq_cic_free(struct cfq_io_context *cic)
 +{
-+	unsigned long next;
++	kmem_cache_free(cfq_ioc_pool, cic);
++	elv_ioc_count_dec(ioc_count);
++
++	if (ioc_gone && !elv_ioc_count_read(ioc_count))
++		complete(ioc_gone);
++}
++
+ /*
+  * We drop cfq io contexts lazily, so we may find a dead one.
+  */
+ static void
+-cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
++cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
++		  struct cfq_io_context *cic)
+ {
++	unsigned long flags;
++
+ 	WARN_ON(!list_empty(&cic->queue_list));
  
--	Dprintk("init_memory_mapping\n");
-+	pr_debug("init_memory_mapping\n");
++	spin_lock_irqsave(&ioc->lock, flags);
++
+ 	if (ioc->ioc_data == cic)
+-		ioc->ioc_data = NULL;
++		rcu_assign_pointer(ioc->ioc_data, NULL);
  
--	/* 
-+	/*
- 	 * Find space for the kernel direct mapping tables.
--	 * Later we should allocate these tables in the local node of the memory
--	 * mapped.  Unfortunately this is done currently before the nodes are 
--	 * discovered.
-+	 *
-+	 * Later we should allocate these tables in the local node of the
-+	 * memory mapped. Unfortunately this is done currently before the
-+	 * nodes are discovered.
- 	 */
- 	if (!after_bootmem)
- 		find_early_table_space(end);
-@@ -364,8 +398,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
- 	end = (unsigned long)__va(end);
+-	rb_erase(&cic->rb_node, &ioc->cic_root);
+-	kmem_cache_free(cfq_ioc_pool, cic);
+-	elv_ioc_count_dec(ioc_count);
++	radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
++	spin_unlock_irqrestore(&ioc->lock, flags);
++
++	cfq_cic_free(cic);
+ }
  
- 	for (; start < end; start = next) {
--		unsigned long pud_phys; 
- 		pgd_t *pgd = pgd_offset_k(start);
-+		unsigned long pud_phys;
- 		pud_t *pud;
+ static struct cfq_io_context *
+-cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
++cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
+ {
+-	struct rb_node *n;
+ 	struct cfq_io_context *cic;
+-	void *k, *key = cfqd;
++	void *k;
  
- 		if (after_bootmem)
-@@ -374,23 +408,26 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
- 			pud = alloc_low_page(&pud_phys);
+ 	if (unlikely(!ioc))
+ 		return NULL;
+@@ -1499,74 +1514,64 @@ cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
+ 	/*
+ 	 * we maintain a last-hit cache, to avoid browsing over the tree
+ 	 */
+-	cic = ioc->ioc_data;
++	cic = rcu_dereference(ioc->ioc_data);
+ 	if (cic && cic->key == cfqd)
+ 		return cic;
  
- 		next = start + PGDIR_SIZE;
--		if (next > end) 
--			next = end; 
-+		if (next > end)
-+			next = end;
- 		phys_pud_init(pud, __pa(start), __pa(next));
- 		if (!after_bootmem)
- 			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
- 		unmap_low_page(pud);
--	} 
-+	}
+-restart:
+-	n = ioc->cic_root.rb_node;
+-	while (n) {
+-		cic = rb_entry(n, struct cfq_io_context, rb_node);
++	do {
++		rcu_read_lock();
++		cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
++		rcu_read_unlock();
++		if (!cic)
++			break;
+ 		/* ->key must be copied to avoid race with cfq_exit_queue() */
+ 		k = cic->key;
+ 		if (unlikely(!k)) {
+-			cfq_drop_dead_cic(ioc, cic);
+-			goto restart;
++			cfq_drop_dead_cic(cfqd, ioc, cic);
++			continue;
+ 		}
  
- 	if (!after_bootmem)
- 		mmu_cr4_features = read_cr4();
- 	__flush_tlb_all();
-+
-+	reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
- }
+-		if (key < k)
+-			n = n->rb_left;
+-		else if (key > k)
+-			n = n->rb_right;
+-		else {
+-			ioc->ioc_data = cic;
+-			return cic;
+-		}
+-	}
++		rcu_assign_pointer(ioc->ioc_data, cic);
++		break;
++	} while (1);
  
- #ifndef CONFIG_NUMA
- void __init paging_init(void)
- {
- 	unsigned long max_zone_pfns[MAX_NR_ZONES];
-+
- 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
- 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
-@@ -402,39 +439,48 @@ void __init paging_init(void)
+-	return NULL;
++	return cic;
  }
- #endif
  
--/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
--   from the CPU leading to inconsistent cache lines. address and size
--   must be aligned to 2MB boundaries. 
--   Does nothing when the mapping doesn't exist. */
--void __init clear_kernel_mapping(unsigned long address, unsigned long size) 
+-static inline void
+-cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
+-	     struct cfq_io_context *cic)
 +/*
-+ * Unmap a kernel mapping if it exists. This is useful to avoid
-+ * prefetches from the CPU leading to inconsistent cache lines.
-+ * address and size must be aligned to 2MB boundaries.
-+ * Does nothing when the mapping doesn't exist.
++ * Add cic into ioc, using cfqd as the search key. This enables us to lookup
++ * the process specific cfq io context when entered from the block layer.
++ * Also adds the cic to a per-cfqd list, used when this queue is removed.
 + */
-+void __init clear_kernel_mapping(unsigned long address, unsigned long size)
++static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
++			struct cfq_io_context *cic, gfp_t gfp_mask)
  {
- 	unsigned long end = address + size;
+-	struct rb_node **p;
+-	struct rb_node *parent;
+-	struct cfq_io_context *__cic;
+ 	unsigned long flags;
+-	void *k;
++	int ret;
  
- 	BUG_ON(address & ~LARGE_PAGE_MASK);
--	BUG_ON(size & ~LARGE_PAGE_MASK); 
--	
--	for (; address < end; address += LARGE_PAGE_SIZE) { 
-+	BUG_ON(size & ~LARGE_PAGE_MASK);
-+
-+	for (; address < end; address += LARGE_PAGE_SIZE) {
- 		pgd_t *pgd = pgd_offset_k(address);
- 		pud_t *pud;
- 		pmd_t *pmd;
-+
- 		if (pgd_none(*pgd))
- 			continue;
-+
- 		pud = pud_offset(pgd, address);
- 		if (pud_none(*pud))
--			continue; 
-+			continue;
-+
- 		pmd = pmd_offset(pud, address);
- 		if (!pmd || pmd_none(*pmd))
--			continue; 
--		if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 
--			/* Could handle this, but it should not happen currently. */
--			printk(KERN_ERR 
--	       "clear_kernel_mapping: mapping has been split. will leak memory\n"); 
--			pmd_ERROR(*pmd); 
-+			continue;
-+
-+		if (!(pmd_val(*pmd) & _PAGE_PSE)) {
-+			/*
-+			 * Could handle this, but it should not happen
-+			 * currently:
-+			 */
-+			printk(KERN_ERR "clear_kernel_mapping: "
-+				"mapping has been split. will leak memory\n");
-+			pmd_ERROR(*pmd);
- 		}
--		set_pmd(pmd, __pmd(0)); 		
-+		set_pmd(pmd, __pmd(0));
- 	}
- 	__flush_tlb_all();
--} 
-+}
+-	cic->ioc = ioc;
+-	cic->key = cfqd;
++	ret = radix_tree_preload(gfp_mask);
++	if (!ret) {
++		cic->ioc = ioc;
++		cic->key = cfqd;
  
- /*
-  * Memory hotplug specific functions
-@@ -461,16 +507,12 @@ int arch_add_memory(int nid, u64 start, u64 size)
- 	unsigned long nr_pages = size >> PAGE_SHIFT;
- 	int ret;
+-restart:
+-	parent = NULL;
+-	p = &ioc->cic_root.rb_node;
+-	while (*p) {
+-		parent = *p;
+-		__cic = rb_entry(parent, struct cfq_io_context, rb_node);
+-		/* ->key must be copied to avoid race with cfq_exit_queue() */
+-		k = __cic->key;
+-		if (unlikely(!k)) {
+-			cfq_drop_dead_cic(ioc, __cic);
+-			goto restart;
+-		}
++		spin_lock_irqsave(&ioc->lock, flags);
++		ret = radix_tree_insert(&ioc->radix_root,
++						(unsigned long) cfqd, cic);
++		spin_unlock_irqrestore(&ioc->lock, flags);
  
--	init_memory_mapping(start, (start + size -1));
-+	init_memory_mapping(start, start + size-1);
+-		if (cic->key < k)
+-			p = &(*p)->rb_left;
+-		else if (cic->key > k)
+-			p = &(*p)->rb_right;
+-		else
+-			BUG();
++		radix_tree_preload_end();
++
++		if (!ret) {
++			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
++			list_add(&cic->queue_list, &cfqd->cic_list);
++			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
++		}
+ 	}
  
- 	ret = __add_pages(zone, start_pfn, nr_pages);
--	if (ret)
--		goto error;
-+	WARN_ON(1);
+-	rb_link_node(&cic->rb_node, parent, p);
+-	rb_insert_color(&cic->rb_node, &ioc->cic_root);
++	if (ret)
++		printk(KERN_ERR "cfq: cic link failed!\n");
  
- 	return ret;
--error:
--	printk("%s: Problem encountered in __add_pages!\n", __func__);
--	return ret;
+-	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+-	list_add(&cic->queue_list, &cfqd->cic_list);
+-	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
++	return ret;
  }
- EXPORT_SYMBOL_GPL(arch_add_memory);
- 
-@@ -484,36 +526,8 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  
- #endif /* CONFIG_MEMORY_HOTPLUG */
- 
--#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
--/*
-- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
-- * just online the pages.
-- */
--int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
--{
--	int err = -EIO;
--	unsigned long pfn;
--	unsigned long total = 0, mem = 0;
--	for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
--		if (pfn_valid(pfn)) {
--			online_page(pfn_to_page(pfn));
--			err = 0;
--			mem++;
--		}
--		total++;
--	}
--	if (!err) {
--		z->spanned_pages += total;
--		z->present_pages += mem;
--		z->zone_pgdat->node_spanned_pages += total;
--		z->zone_pgdat->node_present_pages += mem;
--	}
--	return err;
--}
--#endif
--
--static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
--			 kcore_vsyscall;
-+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
-+			 kcore_modules, kcore_vsyscall;
+ /*
+@@ -1586,7 +1591,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ 	if (!ioc)
+ 		return NULL;
  
- void __init mem_init(void)
- {
-@@ -521,8 +535,15 @@ void __init mem_init(void)
+-	cic = cfq_cic_rb_lookup(cfqd, ioc);
++	cic = cfq_cic_lookup(cfqd, ioc);
+ 	if (cic)
+ 		goto out;
  
- 	pci_iommu_alloc();
+@@ -1594,13 +1599,17 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ 	if (cic == NULL)
+ 		goto err;
  
--	/* clear the zero-page */
--	memset(empty_zero_page, 0, PAGE_SIZE);
-+	/* clear_bss() already clear the empty_zero_page */
-+
-+	/* temporary debugging - double check it's true: */
-+	{
-+		int i;
+-	cfq_cic_link(cfqd, ioc, cic);
++	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
++		goto err_free;
 +
-+		for (i = 0; i < 1024; i++)
-+			WARN_ON_ONCE(empty_zero_page[i]);
-+	}
+ out:
+ 	smp_read_barrier_depends();
+ 	if (unlikely(ioc->ioprio_changed))
+ 		cfq_ioc_set_ioprio(ioc);
  
- 	reservedpages = 0;
+ 	return cic;
++err_free:
++	cfq_cic_free(cic);
+ err:
+ 	put_io_context(ioc);
+ 	return NULL;
+@@ -1655,12 +1664,15 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ {
+ 	int enable_idle;
  
-@@ -534,7 +555,6 @@ void __init mem_init(void)
- #endif
- 	reservedpages = end_pfn - totalram_pages -
- 					absent_pages_in_range(0, end_pfn);
--
- 	after_bootmem = 1;
+-	if (!cfq_cfqq_sync(cfqq))
++	/*
++	 * Don't idle for async or idle io prio class
++	 */
++	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
+ 		return;
  
- 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-@@ -542,15 +562,16 @@ void __init mem_init(void)
- 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+ 	enable_idle = cfq_cfqq_idle_window(cfqq);
  
- 	/* Register memory areas for /proc/kcore */
--	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
--	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
-+	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
-+	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- 		   VMALLOC_END-VMALLOC_START);
- 	kclist_add(&kcore_kernel, &_stext, _end - _stext);
- 	kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
--	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 
-+	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
- 				 VSYSCALL_END - VSYSCALL_START);
+-	if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
++	if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
+ 	    (cfqd->hw_tag && CIC_SEEKY(cic)))
+ 		enable_idle = 0;
+ 	else if (sample_valid(cic->ttime_samples)) {
+@@ -1793,7 +1805,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
+ 	struct cfq_data *cfqd = q->elevator->elevator_data;
+ 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
  
--	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
-+	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
-+				"%ldk reserved, %ldk data, %ldk init)\n",
- 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- 		end_pfn << (PAGE_SHIFT-10),
- 		codesize >> 10,
-@@ -566,19 +587,27 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
- 	if (begin >= end)
- 		return;
+-	cfq_init_prio_data(cfqq);
++	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
  
-+	/*
-+	 * If debugging page accesses then do not free this memory but
-+	 * mark them not present - any buggy init-section access will
-+	 * create a kernel page fault:
-+	 */
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-+		begin, PAGE_ALIGN(end));
-+	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
-+#else
- 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-+
- 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
- 		ClearPageReserved(virt_to_page(addr));
- 		init_page_count(virt_to_page(addr));
- 		memset((void *)(addr & ~(PAGE_SIZE-1)),
- 			POISON_FREE_INITMEM, PAGE_SIZE);
--		if (addr >= __START_KERNEL_map)
--			change_page_attr_addr(addr, 1, __pgprot(0));
- 		free_page(addr);
- 		totalram_pages++;
- 	}
--	if (addr > __START_KERNEL_map)
--		global_flush_tlb();
-+#endif
- }
+ 	cfq_add_rq_rb(rq);
  
- void free_initmem(void)
-@@ -589,6 +618,8 @@ void free_initmem(void)
- }
+@@ -1834,7 +1846,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
+ 			cfq_set_prio_slice(cfqd, cfqq);
+ 			cfq_clear_cfqq_slice_new(cfqq);
+ 		}
+-		if (cfq_slice_used(cfqq))
++		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
+ 			cfq_slice_expired(cfqd, 1);
+ 		else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
+ 			cfq_arm_slice_timer(cfqd);
+@@ -1894,13 +1906,13 @@ static int cfq_may_queue(struct request_queue *q, int rw)
+ 	 * so just lookup a possibly existing queue, or return 'may queue'
+ 	 * if that fails
+ 	 */
+-	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
++	cic = cfq_cic_lookup(cfqd, tsk->io_context);
+ 	if (!cic)
+ 		return ELV_MQUEUE_MAY;
  
- #ifdef CONFIG_DEBUG_RODATA
-+const int rodata_test_data = 0xC3;
-+EXPORT_SYMBOL_GPL(rodata_test_data);
+ 	cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
+ 	if (cfqq) {
+-		cfq_init_prio_data(cfqq);
++		cfq_init_prio_data(cfqq, cic->ioc);
+ 		cfq_prio_boost(cfqq);
  
- void mark_rodata_ro(void)
+ 		return __cfq_may_queue(cfqq);
+@@ -1938,7 +1950,6 @@ static int
+ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
  {
-@@ -603,25 +634,27 @@ void mark_rodata_ro(void)
- #ifdef CONFIG_KPROBES
- 	start = (unsigned long)__start_rodata;
- #endif
--	
-+
- 	end = (unsigned long)__end_rodata;
- 	start = (start + PAGE_SIZE - 1) & PAGE_MASK;
- 	end &= PAGE_MASK;
- 	if (end <= start)
- 		return;
+ 	struct cfq_data *cfqd = q->elevator->elevator_data;
+-	struct task_struct *tsk = current;
+ 	struct cfq_io_context *cic;
+ 	const int rw = rq_data_dir(rq);
+ 	const int is_sync = rq_is_sync(rq);
+@@ -1956,7 +1967,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
  
--	change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
-+	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+ 	cfqq = cic_to_cfqq(cic, is_sync);
+ 	if (!cfqq) {
+-		cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask);
++		cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
  
- 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
- 	       (end - start) >> 10);
+ 		if (!cfqq)
+ 			goto queue_fail;
+@@ -2039,29 +2050,9 @@ out_cont:
+ 	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+ }
  
+-/*
+- * Timer running if an idle class queue is waiting for service
+- */
+-static void cfq_idle_class_timer(unsigned long data)
+-{
+-	struct cfq_data *cfqd = (struct cfq_data *) data;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+-
 -	/*
--	 * change_page_attr_addr() requires a global_flush_tlb() call after it.
--	 * We do this after the printk so that if something went wrong in the
--	 * change, the printk gets out at least to give a better debug hint
--	 * of who is the culprit.
+-	 * race with a non-idle queue, reset timer
 -	 */
--	global_flush_tlb();
-+	rodata_test();
-+
-+#ifdef CONFIG_CPA_DEBUG
-+	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
-+	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
-+
-+	printk(KERN_INFO "Testing CPA: again\n");
-+	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
-+#endif
+-	if (!start_idle_class_timer(cfqd))
+-		cfq_schedule_dispatch(cfqd);
+-
+-	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+-}
+-
+ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
+ {
+ 	del_timer_sync(&cfqd->idle_slice_timer);
+-	del_timer_sync(&cfqd->idle_class_timer);
+ 	kblockd_flush_work(&cfqd->unplug_work);
  }
- #endif
  
-@@ -632,17 +665,21 @@ void free_initrd_mem(unsigned long start, unsigned long end)
- }
- #endif
+@@ -2126,10 +2117,6 @@ static void *cfq_init_queue(struct request_queue *q)
+ 	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
+ 	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  
--void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
--{ 
-+void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
-+{
- #ifdef CONFIG_NUMA
- 	int nid = phys_to_nid(phys);
- #endif
- 	unsigned long pfn = phys >> PAGE_SHIFT;
-+
- 	if (pfn >= end_pfn) {
--		/* This can happen with kdump kernels when accessing firmware
--		   tables. */
-+		/*
-+		 * This can happen with kdump kernels when accessing
-+		 * firmware tables:
-+		 */
- 		if (pfn < end_pfn_map)
- 			return;
-+
- 		printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
- 				phys, len);
- 		return;
-@@ -650,9 +687,9 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
+-	init_timer(&cfqd->idle_class_timer);
+-	cfqd->idle_class_timer.function = cfq_idle_class_timer;
+-	cfqd->idle_class_timer.data = (unsigned long) cfqd;
+-
+ 	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  
- 	/* Should check here against the e820 map to avoid double free */
- #ifdef CONFIG_NUMA
--  	reserve_bootmem_node(NODE_DATA(nid), phys, len);
--#else       		
--	reserve_bootmem(phys, len);    
-+	reserve_bootmem_node(NODE_DATA(nid), phys, len);
-+#else
-+	reserve_bootmem(phys, len);
- #endif
- 	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
- 		dma_reserve += len / PAGE_SIZE;
-@@ -660,46 +697,49 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
- 	}
- }
+ 	cfqd->last_end_request = jiffies;
+@@ -2160,7 +2147,7 @@ static int __init cfq_slab_setup(void)
+ 	if (!cfq_pool)
+ 		goto fail;
  
--int kern_addr_valid(unsigned long addr) 
--{ 
-+int kern_addr_valid(unsigned long addr)
-+{
- 	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
--       pgd_t *pgd;
--       pud_t *pud;
--       pmd_t *pmd;
--       pte_t *pte;
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
+-	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
++	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU);
+ 	if (!cfq_ioc_pool)
+ 		goto fail;
  
- 	if (above != 0 && above != -1UL)
--		return 0; 
--	
-+		return 0;
-+
- 	pgd = pgd_offset_k(addr);
- 	if (pgd_none(*pgd))
- 		return 0;
+diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
+index cae0a85..b733732 100644
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
+ 	struct blk_user_trace_setup buts;
+ 	struct compat_blk_user_trace_setup cbuts;
+ 	struct request_queue *q;
++	char b[BDEVNAME_SIZE];
+ 	int ret;
  
- 	pud = pud_offset(pgd, addr);
- 	if (pud_none(*pud))
--		return 0; 
-+		return 0;
+ 	q = bdev_get_queue(bdev);
+@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
+ 	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
+ 		return -EFAULT;
  
- 	pmd = pmd_offset(pud, addr);
- 	if (pmd_none(*pmd))
- 		return 0;
++	strcpy(b, bdevname(bdev, b));
 +
- 	if (pmd_large(*pmd))
- 		return pfn_valid(pmd_pfn(*pmd));
+ 	buts = (struct blk_user_trace_setup) {
+ 		.act_mask = cbuts.act_mask,
+ 		.buf_size = cbuts.buf_size,
+@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
+ 	memcpy(&buts.name, &cbuts.name, 32);
  
- 	pte = pte_offset_kernel(pmd, addr);
- 	if (pte_none(*pte))
- 		return 0;
-+
- 	return pfn_valid(pte_pfn(*pte));
- }
+ 	mutex_lock(&bdev->bd_mutex);
+-	ret = do_blk_trace_setup(q, bdev, &buts);
++	ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts);
+ 	mutex_unlock(&bdev->bd_mutex);
+ 	if (ret)
+ 		return ret;
+diff --git a/block/elevator.c b/block/elevator.c
+index e452deb..8cd5775 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -185,9 +185,7 @@ static elevator_t *elevator_alloc(struct request_queue *q,
  
--/* A pseudo VMA to allow ptrace access for the vsyscall page.  This only
--   covers the 64bit vsyscall page now. 32bit has a real VMA now and does
--   not need special handling anymore. */
--
-+/*
-+ * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
-+ * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
-+ * not need special handling anymore:
-+ */
- static struct vm_area_struct gate_vma = {
--	.vm_start = VSYSCALL_START,
--	.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
--	.vm_page_prot = PAGE_READONLY_EXEC,
--	.vm_flags = VM_READ | VM_EXEC
-+	.vm_start	= VSYSCALL_START,
-+	.vm_end		= VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
-+	.vm_page_prot	= PAGE_READONLY_EXEC,
-+	.vm_flags	= VM_READ | VM_EXEC
- };
+ 	eq->ops = &e->ops;
+ 	eq->elevator_type = e;
+-	kobject_init(&eq->kobj);
+-	kobject_set_name(&eq->kobj, "%s", "iosched");
+-	eq->kobj.ktype = &elv_ktype;
++	kobject_init(&eq->kobj, &elv_ktype);
+ 	mutex_init(&eq->sysfs_lock);
  
- struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-@@ -714,14 +754,17 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
- int in_gate_area(struct task_struct *task, unsigned long addr)
- {
- 	struct vm_area_struct *vma = get_gate_vma(task);
-+
- 	if (!vma)
- 		return 0;
-+
- 	return (addr >= vma->vm_start) && (addr < vma->vm_end);
- }
+ 	eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
+@@ -743,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q)
+ 			q->boundary_rq = NULL;
+ 		}
  
--/* Use this when you have no reliable task/vma, typically from interrupt
-- * context.  It is less reliable than using the task's vma and may give
-- * false positives.
-+/*
-+ * Use this when you have no reliable task/vma, typically from interrupt
-+ * context. It is less reliable than using the task's vma and may give
-+ * false positives:
-  */
- int in_gate_area_no_task(unsigned long addr)
- {
-@@ -741,8 +784,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
- /*
-  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
-  */
--int __meminit vmemmap_populate(struct page *start_page,
--						unsigned long size, int node)
-+int __meminit
-+vmemmap_populate(struct page *start_page, unsigned long size, int node)
- {
- 	unsigned long addr = (unsigned long)start_page;
- 	unsigned long end = (unsigned long)(start_page + size);
-@@ -757,6 +800,7 @@ int __meminit vmemmap_populate(struct page *start_page,
- 		pgd = vmemmap_pgd_populate(addr, node);
- 		if (!pgd)
- 			return -ENOMEM;
+-		if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
++		if (rq->cmd_flags & REQ_DONTPREP)
++			break;
 +
- 		pud = vmemmap_pud_populate(pgd, addr, node);
- 		if (!pud)
- 			return -ENOMEM;
-@@ -764,20 +808,22 @@ int __meminit vmemmap_populate(struct page *start_page,
- 		pmd = pmd_offset(pud, addr);
- 		if (pmd_none(*pmd)) {
- 			pte_t entry;
--			void *p = vmemmap_alloc_block(PMD_SIZE, node);
-+			void *p;
++		if (q->dma_drain_size && rq->data_len) {
++			/*
++			 * make sure space for the drain appears we
++			 * know we can do this because max_hw_segments
++			 * has been adjusted to be one fewer than the
++			 * device can handle
++			 */
++			rq->nr_phys_segments++;
++			rq->nr_hw_segments++;
++		}
 +
-+			p = vmemmap_alloc_block(PMD_SIZE, node);
- 			if (!p)
- 				return -ENOMEM;
++		if (!q->prep_rq_fn)
+ 			break;
  
--			entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
--			mk_pte_huge(entry);
-+			entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
-+							PAGE_KERNEL_LARGE);
- 			set_pmd(pmd, __pmd(pte_val(entry)));
+ 		ret = q->prep_rq_fn(q, rq);
+@@ -756,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q)
+ 			 * avoid resource deadlock.  REQ_STARTED will
+ 			 * prevent other fs requests from passing this one.
+ 			 */
++			if (q->dma_drain_size && rq->data_len &&
++			    !(rq->cmd_flags & REQ_DONTPREP)) {
++				/*
++				 * remove the space for the drain we added
++				 * so that we don't add it again
++				 */
++				--rq->nr_phys_segments;
++				--rq->nr_hw_segments;
++			}
++
+ 			rq = NULL;
+ 			break;
+ 		} else if (ret == BLKPREP_KILL) {
+@@ -931,9 +953,7 @@ int elv_register_queue(struct request_queue *q)
+ 	elevator_t *e = q->elevator;
+ 	int error;
  
- 			printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
- 				addr, addr + PMD_SIZE - 1, p, node);
--		} else
-+		} else {
- 			vmemmap_verify((pte_t *)pmd, node, addr, next);
-+		}
- 	}
+-	e->kobj.parent = &q->kobj;
 -
- 	return 0;
- }
- #endif
-diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
-new file mode 100644
-index 0000000..ed79572
---- /dev/null
-+++ b/arch/x86/mm/ioremap.c
-@@ -0,0 +1,501 @@
-+/*
-+ * Re-map IO memory to kernel address space so that we can access it.
-+ * This is needed for high PCI addresses that aren't mapped in the
-+ * 640k-1MB IO memory area on PC's
-+ *
-+ * (C) Copyright 1995 1996 Linus Torvalds
-+ */
-+
-+#include <linux/bootmem.h>
-+#include <linux/init.h>
-+#include <linux/io.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+
-+#include <asm/cacheflush.h>
-+#include <asm/e820.h>
-+#include <asm/fixmap.h>
-+#include <asm/pgtable.h>
-+#include <asm/tlbflush.h>
-+#include <asm/pgalloc.h>
-+
-+enum ioremap_mode {
-+	IOR_MODE_UNCACHED,
-+	IOR_MODE_CACHED,
-+};
-+
-+#ifdef CONFIG_X86_64
-+
-+unsigned long __phys_addr(unsigned long x)
-+{
-+	if (x >= __START_KERNEL_map)
-+		return x - __START_KERNEL_map + phys_base;
-+	return x - PAGE_OFFSET;
-+}
-+EXPORT_SYMBOL(__phys_addr);
-+
+-	error = kobject_add(&e->kobj);
++	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
+ 	if (!error) {
+ 		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
+ 		if (attr) {
+diff --git a/block/genhd.c b/block/genhd.c
+index f2ac914..de2ebb2 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -17,8 +17,10 @@
+ #include <linux/buffer_head.h>
+ #include <linux/mutex.h>
+ 
+-struct kset block_subsys;
+-static DEFINE_MUTEX(block_subsys_lock);
++static DEFINE_MUTEX(block_class_lock);
++#ifndef CONFIG_SYSFS_DEPRECATED
++struct kobject *block_depr;
 +#endif
+ 
+ /*
+  * Can be deleted altogether. Later.
+@@ -37,19 +39,17 @@ static inline int major_to_index(int major)
+ }
+ 
+ #ifdef CONFIG_PROC_FS
+-
+ void blkdev_show(struct seq_file *f, off_t offset)
+ {
+ 	struct blk_major_name *dp;
+ 
+ 	if (offset < BLKDEV_MAJOR_HASH_SIZE) {
+-		mutex_lock(&block_subsys_lock);
++		mutex_lock(&block_class_lock);
+ 		for (dp = major_names[offset]; dp; dp = dp->next)
+ 			seq_printf(f, "%3d %s\n", dp->major, dp->name);
+-		mutex_unlock(&block_subsys_lock);
++		mutex_unlock(&block_class_lock);
+ 	}
+ }
+-
+ #endif /* CONFIG_PROC_FS */
+ 
+ int register_blkdev(unsigned int major, const char *name)
+@@ -57,7 +57,7 @@ int register_blkdev(unsigned int major, const char *name)
+ 	struct blk_major_name **n, *p;
+ 	int index, ret = 0;
+ 
+-	mutex_lock(&block_subsys_lock);
++	mutex_lock(&block_class_lock);
+ 
+ 	/* temporary */
+ 	if (major == 0) {
+@@ -102,7 +102,7 @@ int register_blkdev(unsigned int major, const char *name)
+ 		kfree(p);
+ 	}
+ out:
+-	mutex_unlock(&block_subsys_lock);
++	mutex_unlock(&block_class_lock);
+ 	return ret;
+ }
+ 
+@@ -114,7 +114,7 @@ void unregister_blkdev(unsigned int major, const char *name)
+ 	struct blk_major_name *p = NULL;
+ 	int index = major_to_index(major);
+ 
+-	mutex_lock(&block_subsys_lock);
++	mutex_lock(&block_class_lock);
+ 	for (n = &major_names[index]; *n; n = &(*n)->next)
+ 		if ((*n)->major == major)
+ 			break;
+@@ -124,7 +124,7 @@ void unregister_blkdev(unsigned int major, const char *name)
+ 		p = *n;
+ 		*n = p->next;
+ 	}
+-	mutex_unlock(&block_subsys_lock);
++	mutex_unlock(&block_class_lock);
+ 	kfree(p);
+ }
+ 
+@@ -137,29 +137,30 @@ static struct kobj_map *bdev_map;
+  * range must be nonzero
+  * The hash chain is sorted on range, so that subranges can override.
+  */
+-void blk_register_region(dev_t dev, unsigned long range, struct module *module,
++void blk_register_region(dev_t devt, unsigned long range, struct module *module,
+ 			 struct kobject *(*probe)(dev_t, int *, void *),
+ 			 int (*lock)(dev_t, void *), void *data)
+ {
+-	kobj_map(bdev_map, dev, range, module, probe, lock, data);
++	kobj_map(bdev_map, devt, range, module, probe, lock, data);
+ }
+ 
+ EXPORT_SYMBOL(blk_register_region);
+ 
+-void blk_unregister_region(dev_t dev, unsigned long range)
++void blk_unregister_region(dev_t devt, unsigned long range)
+ {
+-	kobj_unmap(bdev_map, dev, range);
++	kobj_unmap(bdev_map, devt, range);
+ }
+ 
+ EXPORT_SYMBOL(blk_unregister_region);
+ 
+-static struct kobject *exact_match(dev_t dev, int *part, void *data)
++static struct kobject *exact_match(dev_t devt, int *part, void *data)
+ {
+ 	struct gendisk *p = data;
+-	return &p->kobj;
 +
-+int page_is_ram(unsigned long pagenr)
-+{
-+	unsigned long addr, end;
-+	int i;
-+
-+	for (i = 0; i < e820.nr_map; i++) {
-+		/*
-+		 * Not usable memory:
-+		 */
-+		if (e820.map[i].type != E820_RAM)
-+			continue;
-+		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
-+		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
-+
-+		/*
-+		 * Sanity check: Some BIOSen report areas as RAM that
-+		 * are not. Notably the 640->1Mb area, which is the
-+		 * PCI BIOS area.
-+		 */
-+		if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
-+		    end < (BIOS_END >> PAGE_SHIFT))
-+			continue;
-+
-+		if ((pagenr >= addr) && (pagenr < end))
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+/*
-+ * Fix up the linear direct mapping of the kernel to avoid cache attribute
-+ * conflicts.
-+ */
-+static int ioremap_change_attr(unsigned long paddr, unsigned long size,
-+			       enum ioremap_mode mode)
-+{
-+	unsigned long vaddr = (unsigned long)__va(paddr);
-+	unsigned long nrpages = size >> PAGE_SHIFT;
-+	int err, level;
-+
-+	/* No change for pages after the last mapping */
-+	if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
-+		return 0;
-+
-+	/*
-+	 * If there is no identity map for this address,
-+	 * change_page_attr_addr is unnecessary
-+	 */
-+	if (!lookup_address(vaddr, &level))
-+		return 0;
-+
-+	switch (mode) {
-+	case IOR_MODE_UNCACHED:
-+	default:
-+		err = set_memory_uc(vaddr, nrpages);
-+		break;
-+	case IOR_MODE_CACHED:
-+		err = set_memory_wb(vaddr, nrpages);
-+		break;
-+	}
-+
-+	return err;
-+}
-+
-+/*
-+ * Remap an arbitrary physical address space into the kernel virtual
-+ * address space. Needed when the kernel wants to access high addresses
-+ * directly.
-+ *
-+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-+ * have to convert them into an offset in a page-aligned mapping, but the
-+ * caller shouldn't need to know that small detail.
-+ */
-+static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
-+			       enum ioremap_mode mode)
-+{
-+	void __iomem *addr;
-+	struct vm_struct *area;
-+	unsigned long offset, last_addr;
-+	pgprot_t prot;
-+
-+	/* Don't allow wraparound or zero size */
-+	last_addr = phys_addr + size - 1;
-+	if (!size || last_addr < phys_addr)
-+		return NULL;
-+
-+	/*
-+	 * Don't remap the low PCI/ISA area, it's always mapped..
-+	 */
-+	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+		return (__force void __iomem *)phys_to_virt(phys_addr);
-+
-+	/*
-+	 * Don't allow anybody to remap normal RAM that we're using..
-+	 */
-+	for (offset = phys_addr >> PAGE_SHIFT; offset < max_pfn_mapped &&
-+	     (offset << PAGE_SHIFT) < last_addr; offset++) {
-+		if (page_is_ram(offset))
-+			return NULL;
-+	}
-+
-+	switch (mode) {
-+	case IOR_MODE_UNCACHED:
-+	default:
-+		prot = PAGE_KERNEL_NOCACHE;
-+		break;
-+	case IOR_MODE_CACHED:
-+		prot = PAGE_KERNEL;
-+		break;
-+	}
-+
-+	/*
-+	 * Mappings have to be page-aligned
-+	 */
-+	offset = phys_addr & ~PAGE_MASK;
-+	phys_addr &= PAGE_MASK;
-+	size = PAGE_ALIGN(last_addr+1) - phys_addr;
-+
-+	/*
-+	 * Ok, go for it..
-+	 */
-+	area = get_vm_area(size, VM_IOREMAP);
-+	if (!area)
-+		return NULL;
-+	area->phys_addr = phys_addr;
-+	addr = (void __iomem *) area->addr;
-+	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
-+			       phys_addr, prot)) {
-+		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
-+		return NULL;
-+	}
-+
-+	if (ioremap_change_attr(phys_addr, size, mode) < 0) {
-+		vunmap(addr);
-+		return NULL;
-+	}
-+
-+	return (void __iomem *) (offset + (char __iomem *)addr);
-+}
-+
-+/**
-+ * ioremap_nocache     -   map bus memory into CPU space
-+ * @offset:    bus address of the memory
-+ * @size:      size of the resource to map
-+ *
-+ * ioremap_nocache performs a platform specific sequence of operations to
-+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
-+ * writew/writel functions and the other mmio helpers. The returned
-+ * address is not guaranteed to be usable directly as a virtual
-+ * address.
-+ *
-+ * This version of ioremap ensures that the memory is marked uncachable
-+ * on the CPU as well as honouring existing caching rules from things like
-+ * the PCI bus. Note that there are other caches and buffers on many
-+ * busses. In particular driver authors should read up on PCI writes
-+ *
-+ * It's useful if some control registers are in such an area and
-+ * write combining or read caching is not desirable:
-+ *
-+ * Must be freed with iounmap.
-+ */
-+void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
-+{
-+	return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
-+}
-+EXPORT_SYMBOL(ioremap_nocache);
-+
-+void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
-+{
-+	return __ioremap(phys_addr, size, IOR_MODE_CACHED);
-+}
-+EXPORT_SYMBOL(ioremap_cache);
-+
-+/**
-+ * iounmap - Free a IO remapping
-+ * @addr: virtual address from ioremap_*
-+ *
-+ * Caller must ensure there is only one unmapping for the same pointer.
-+ */
-+void iounmap(volatile void __iomem *addr)
-+{
-+	struct vm_struct *p, *o;
-+
-+	if ((void __force *)addr <= high_memory)
-+		return;
-+
-+	/*
-+	 * __ioremap special-cases the PCI/ISA range by not instantiating a
-+	 * vm_area and by simply returning an address into the kernel mapping
-+	 * of ISA space.   So handle that here.
-+	 */
-+	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
-+	    addr < phys_to_virt(ISA_END_ADDRESS))
-+		return;
-+
-+	addr = (volatile void __iomem *)
-+		(PAGE_MASK & (unsigned long __force)addr);
-+
-+	/* Use the vm area unlocked, assuming the caller
-+	   ensures there isn't another iounmap for the same address
-+	   in parallel. Reuse of the virtual address is prevented by
-+	   leaving it in the global lists until we're done with it.
-+	   cpa takes care of the direct mappings. */
-+	read_lock(&vmlist_lock);
-+	for (p = vmlist; p; p = p->next) {
-+		if (p->addr == addr)
-+			break;
-+	}
-+	read_unlock(&vmlist_lock);
-+
-+	if (!p) {
-+		printk(KERN_ERR "iounmap: bad address %p\n", addr);
-+		dump_stack();
-+		return;
-+	}
-+
-+	/* Reset the direct mapping. Can block */
-+	ioremap_change_attr(p->phys_addr, p->size, IOR_MODE_CACHED);
-+
-+	/* Finally remove it */
-+	o = remove_vm_area((void *)addr);
-+	BUG_ON(p != o || o == NULL);
-+	kfree(p);
-+}
-+EXPORT_SYMBOL(iounmap);
-+
-+#ifdef CONFIG_X86_32
-+
-+int __initdata early_ioremap_debug;
-+
-+static int __init early_ioremap_debug_setup(char *str)
-+{
-+	early_ioremap_debug = 1;
-+
-+	return 0;
-+}
-+early_param("early_ioremap_debug", early_ioremap_debug_setup);
-+
-+static __initdata int after_paging_init;
-+static __initdata unsigned long bm_pte[1024]
-+				__attribute__((aligned(PAGE_SIZE)));
-+
-+static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
-+{
-+	return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
-+}
-+
-+static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
-+{
-+	return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
-+}
-+
-+void __init early_ioremap_init(void)
-+{
-+	unsigned long *pgd;
-+
-+	if (early_ioremap_debug)
-+		printk(KERN_INFO "early_ioremap_init()\n");
-+
-+	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
-+	*pgd = __pa(bm_pte) | _PAGE_TABLE;
-+	memset(bm_pte, 0, sizeof(bm_pte));
-+	/*
-+	 * The boot-ioremap range spans multiple pgds, for which
-+	 * we are not prepared:
-+	 */
-+	if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
-+		WARN_ON(1);
-+		printk(KERN_WARNING "pgd %p != %p\n",
-+		       pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
-+		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
-+		       fix_to_virt(FIX_BTMAP_BEGIN));
-+		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
-+		       fix_to_virt(FIX_BTMAP_END));
-+
-+		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
-+		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
-+		       FIX_BTMAP_BEGIN);
-+	}
-+}
-+
-+void __init early_ioremap_clear(void)
-+{
-+	unsigned long *pgd;
-+
-+	if (early_ioremap_debug)
-+		printk(KERN_INFO "early_ioremap_clear()\n");
-+
-+	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
-+	*pgd = 0;
-+	paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT);
-+	__flush_tlb_all();
-+}
-+
-+void __init early_ioremap_reset(void)
-+{
-+	enum fixed_addresses idx;
-+	unsigned long *pte, phys, addr;
++	return &p->dev.kobj;
+ }
+ 
+-static int exact_lock(dev_t dev, void *data)
++static int exact_lock(dev_t devt, void *data)
+ {
+ 	struct gendisk *p = data;
+ 
+@@ -194,8 +195,6 @@ void unlink_gendisk(struct gendisk *disk)
+ 			      disk->minors);
+ }
+ 
+-#define to_disk(obj) container_of(obj,struct gendisk,kobj)
+-
+ /**
+  * get_gendisk - get partitioning information for a given device
+  * @dev: device to get partitioning information for
+@@ -203,10 +202,12 @@ void unlink_gendisk(struct gendisk *disk)
+  * This function gets the structure containing partitioning
+  * information for the given device @dev.
+  */
+-struct gendisk *get_gendisk(dev_t dev, int *part)
++struct gendisk *get_gendisk(dev_t devt, int *part)
+ {
+-	struct kobject *kobj = kobj_lookup(bdev_map, dev, part);
+-	return  kobj ? to_disk(kobj) : NULL;
++	struct kobject *kobj = kobj_lookup(bdev_map, devt, part);
++	struct device *dev = kobj_to_dev(kobj);
 +
-+	after_paging_init = 1;
-+	for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
-+		addr = fix_to_virt(idx);
-+		pte = early_ioremap_pte(addr);
-+		if (!*pte & _PAGE_PRESENT) {
-+			phys = *pte & PAGE_MASK;
-+			set_fixmap(idx, phys);
++	return  kobj ? dev_to_disk(dev) : NULL;
+ }
+ 
+ /*
+@@ -216,13 +217,17 @@ struct gendisk *get_gendisk(dev_t dev, int *part)
+  */
+ void __init printk_all_partitions(void)
+ {
+-	int n;
++	struct device *dev;
+ 	struct gendisk *sgp;
++	char buf[BDEVNAME_SIZE];
++	int n;
+ 
+-	mutex_lock(&block_subsys_lock);
++	mutex_lock(&block_class_lock);
+ 	/* For each block device... */
+-	list_for_each_entry(sgp, &block_subsys.list, kobj.entry) {
+-		char buf[BDEVNAME_SIZE];
++	list_for_each_entry(dev, &block_class.devices, node) {
++		if (dev->type != &disk_type)
++			continue;
++		sgp = dev_to_disk(dev);
+ 		/*
+ 		 * Don't show empty devices or things that have been surpressed
+ 		 */
+@@ -255,38 +260,46 @@ void __init printk_all_partitions(void)
+ 				sgp->major, n + 1 + sgp->first_minor,
+ 				(unsigned long long)sgp->part[n]->nr_sects >> 1,
+ 				disk_name(sgp, n + 1, buf));
+-		} /* partition subloop */
+-	} /* Block device loop */
 +		}
 +	}
-+}
-+
-+static void __init __early_set_fixmap(enum fixed_addresses idx,
-+				   unsigned long phys, pgprot_t flags)
-+{
-+	unsigned long *pte, addr = __fix_to_virt(idx);
-+
-+	if (idx >= __end_of_fixed_addresses) {
-+		BUG();
-+		return;
+ 
+-	mutex_unlock(&block_subsys_lock);
+-	return;
++	mutex_unlock(&block_class_lock);
+ }
+ 
+ #ifdef CONFIG_PROC_FS
+ /* iterator */
+ static void *part_start(struct seq_file *part, loff_t *pos)
+ {
+-	struct list_head *p;
+-	loff_t l = *pos;
++	loff_t k = *pos;
++	struct device *dev;
+ 
+-	mutex_lock(&block_subsys_lock);
+-	list_for_each(p, &block_subsys.list)
+-		if (!l--)
+-			return list_entry(p, struct gendisk, kobj.entry);
++	mutex_lock(&block_class_lock);
++	list_for_each_entry(dev, &block_class.devices, node) {
++		if (dev->type != &disk_type)
++			continue;
++		if (!k--)
++			return dev_to_disk(dev);
 +	}
-+	pte = early_ioremap_pte(addr);
-+	if (pgprot_val(flags))
-+		*pte = (phys & PAGE_MASK) | pgprot_val(flags);
-+	else
-+		*pte = 0;
-+	__flush_tlb_one(addr);
-+}
+ 	return NULL;
+ }
+ 
+ static void *part_next(struct seq_file *part, void *v, loff_t *pos)
+ {
+-	struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
++	struct gendisk *gp = v;
++	struct device *dev;
+ 	++*pos;
+-	return p==&block_subsys.list ? NULL :
+-		list_entry(p, struct gendisk, kobj.entry);
++	list_for_each_entry(dev, &gp->dev.node, node) {
++		if (&dev->node == &block_class.devices)
++			return NULL;
++		if (dev->type == &disk_type)
++			return dev_to_disk(dev);
++	}
++	return NULL;
+ }
+ 
+ static void part_stop(struct seq_file *part, void *v)
+ {
+-	mutex_unlock(&block_subsys_lock);
++	mutex_unlock(&block_class_lock);
+ }
+ 
+ static int show_partition(struct seq_file *part, void *v)
+@@ -295,7 +308,7 @@ static int show_partition(struct seq_file *part, void *v)
+ 	int n;
+ 	char buf[BDEVNAME_SIZE];
+ 
+-	if (&sgp->kobj.entry == block_subsys.list.next)
++	if (&sgp->dev.node == block_class.devices.next)
+ 		seq_puts(part, "major minor  #blocks  name\n\n");
+ 
+ 	/* Don't show non-partitionable removeable devices or empty devices */
+@@ -324,111 +337,82 @@ static int show_partition(struct seq_file *part, void *v)
+ 	return 0;
+ }
+ 
+-struct seq_operations partitions_op = {
+-	.start =part_start,
+-	.next =	part_next,
+-	.stop =	part_stop,
+-	.show =	show_partition
++const struct seq_operations partitions_op = {
++	.start	= part_start,
++	.next	= part_next,
++	.stop	= part_stop,
++	.show	= show_partition
+ };
+ #endif
+ 
+ 
+ extern int blk_dev_init(void);
+ 
+-static struct kobject *base_probe(dev_t dev, int *part, void *data)
++static struct kobject *base_probe(dev_t devt, int *part, void *data)
+ {
+-	if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
++	if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
+ 		/* Make old-style 2.4 aliases work */
+-		request_module("block-major-%d", MAJOR(dev));
++		request_module("block-major-%d", MAJOR(devt));
+ 	return NULL;
+ }
+ 
+ static int __init genhd_device_init(void)
+ {
+-	int err;
+-
+-	bdev_map = kobj_map_init(base_probe, &block_subsys_lock);
++	class_register(&block_class);
++	bdev_map = kobj_map_init(base_probe, &block_class_lock);
+ 	blk_dev_init();
+-	err = subsystem_register(&block_subsys);
+-	if (err < 0)
+-		printk(KERN_WARNING "%s: subsystem_register error: %d\n",
+-			__FUNCTION__, err);
+-	return err;
 +
-+static inline void __init early_set_fixmap(enum fixed_addresses idx,
-+					unsigned long phys)
-+{
-+	if (after_paging_init)
-+		set_fixmap(idx, phys);
-+	else
-+		__early_set_fixmap(idx, phys, PAGE_KERNEL);
-+}
++#ifndef CONFIG_SYSFS_DEPRECATED
++	/* create top-level block dir */
++	block_depr = kobject_create_and_add("block", NULL);
++#endif
++	return 0;
+ }
+ 
+ subsys_initcall(genhd_device_init);
+ 
+-
+-
+-/*
+- * kobject & sysfs bindings for block devices
+- */
+-static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
+-			      char *page)
++static ssize_t disk_range_show(struct device *dev,
++			       struct device_attribute *attr, char *buf)
+ {
+-	struct gendisk *disk = to_disk(kobj);
+-	struct disk_attribute *disk_attr =
+-		container_of(attr,struct disk_attribute,attr);
+-	ssize_t ret = -EIO;
++	struct gendisk *disk = dev_to_disk(dev);
+ 
+-	if (disk_attr->show)
+-		ret = disk_attr->show(disk,page);
+-	return ret;
++	return sprintf(buf, "%d\n", disk->minors);
+ }
+ 
+-static ssize_t disk_attr_store(struct kobject * kobj, struct attribute * attr,
+-			       const char *page, size_t count)
++static ssize_t disk_removable_show(struct device *dev,
++				   struct device_attribute *attr, char *buf)
+ {
+-	struct gendisk *disk = to_disk(kobj);
+-	struct disk_attribute *disk_attr =
+-		container_of(attr,struct disk_attribute,attr);
+-	ssize_t ret = 0;
++	struct gendisk *disk = dev_to_disk(dev);
+ 
+-	if (disk_attr->store)
+-		ret = disk_attr->store(disk, page, count);
+-	return ret;
++	return sprintf(buf, "%d\n",
++		       (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
+ }
+ 
+-static struct sysfs_ops disk_sysfs_ops = {
+-	.show	= &disk_attr_show,
+-	.store	= &disk_attr_store,
+-};
+-
+-static ssize_t disk_uevent_store(struct gendisk * disk,
+-				 const char *buf, size_t count)
+-{
+-	kobject_uevent(&disk->kobj, KOBJ_ADD);
+-	return count;
+-}
+-static ssize_t disk_dev_read(struct gendisk * disk, char *page)
+-{
+-	dev_t base = MKDEV(disk->major, disk->first_minor); 
+-	return print_dev_t(page, base);
+-}
+-static ssize_t disk_range_read(struct gendisk * disk, char *page)
++static ssize_t disk_size_show(struct device *dev,
++			      struct device_attribute *attr, char *buf)
+ {
+-	return sprintf(page, "%d\n", disk->minors);
+-}
+-static ssize_t disk_removable_read(struct gendisk * disk, char *page)
+-{
+-	return sprintf(page, "%d\n",
+-		       (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
++	struct gendisk *disk = dev_to_disk(dev);
+ 
++	return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk));
+ }
+-static ssize_t disk_size_read(struct gendisk * disk, char *page)
+-{
+-	return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
+-}
+-static ssize_t disk_capability_read(struct gendisk *disk, char *page)
 +
-+static inline void __init early_clear_fixmap(enum fixed_addresses idx)
-+{
-+	if (after_paging_init)
-+		clear_fixmap(idx);
-+	else
-+		__early_set_fixmap(idx, 0, __pgprot(0));
-+}
++static ssize_t disk_capability_show(struct device *dev,
++				    struct device_attribute *attr, char *buf)
+ {
+-	return sprintf(page, "%x\n", disk->flags);
++	struct gendisk *disk = dev_to_disk(dev);
 +
++	return sprintf(buf, "%x\n", disk->flags);
+ }
+-static ssize_t disk_stats_read(struct gendisk * disk, char *page)
 +
-+int __initdata early_ioremap_nested;
++static ssize_t disk_stat_show(struct device *dev,
++			      struct device_attribute *attr, char *buf)
+ {
++	struct gendisk *disk = dev_to_disk(dev);
 +
-+static int __init check_early_ioremap_leak(void)
+ 	preempt_disable();
+ 	disk_round_stats(disk);
+ 	preempt_enable();
+-	return sprintf(page,
++	return sprintf(buf,
+ 		"%8lu %8lu %8llu %8u "
+ 		"%8lu %8lu %8llu %8u "
+ 		"%8u %8u %8u"
+@@ -445,40 +429,21 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page)
+ 		jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
+ 		jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
+ }
+-static struct disk_attribute disk_attr_uevent = {
+-	.attr = {.name = "uevent", .mode = S_IWUSR },
+-	.store	= disk_uevent_store
+-};
+-static struct disk_attribute disk_attr_dev = {
+-	.attr = {.name = "dev", .mode = S_IRUGO },
+-	.show	= disk_dev_read
+-};
+-static struct disk_attribute disk_attr_range = {
+-	.attr = {.name = "range", .mode = S_IRUGO },
+-	.show	= disk_range_read
+-};
+-static struct disk_attribute disk_attr_removable = {
+-	.attr = {.name = "removable", .mode = S_IRUGO },
+-	.show	= disk_removable_read
+-};
+-static struct disk_attribute disk_attr_size = {
+-	.attr = {.name = "size", .mode = S_IRUGO },
+-	.show	= disk_size_read
+-};
+-static struct disk_attribute disk_attr_capability = {
+-	.attr = {.name = "capability", .mode = S_IRUGO },
+-	.show	= disk_capability_read
+-};
+-static struct disk_attribute disk_attr_stat = {
+-	.attr = {.name = "stat", .mode = S_IRUGO },
+-	.show	= disk_stats_read
+-};
+ 
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
++static ssize_t disk_fail_show(struct device *dev,
++			      struct device_attribute *attr, char *buf)
 +{
-+	if (!early_ioremap_nested)
-+		return 0;
-+
-+	printk(KERN_WARNING
-+	       "Debug warning: early ioremap leak of %d areas detected.\n",
-+	       early_ioremap_nested);
-+	printk(KERN_WARNING
-+	       "please boot with early_ioremap_debug and report the dmesg.\n");
-+	WARN_ON(1);
++	struct gendisk *disk = dev_to_disk(dev);
 +
-+	return 1;
++	return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
 +}
-+late_initcall(check_early_ioremap_leak);
-+
-+void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
-+{
-+	unsigned long offset, last_addr;
-+	unsigned int nrpages, nesting;
-+	enum fixed_addresses idx0, idx;
-+
-+	WARN_ON(system_state != SYSTEM_BOOTING);
+ 
+-static ssize_t disk_fail_store(struct gendisk * disk,
++static ssize_t disk_fail_store(struct device *dev,
++			       struct device_attribute *attr,
+ 			       const char *buf, size_t count)
+ {
++	struct gendisk *disk = dev_to_disk(dev);
+ 	int i;
+ 
+ 	if (count > 0 && sscanf(buf, "%d", &i) > 0) {
+@@ -490,136 +455,100 @@ static ssize_t disk_fail_store(struct gendisk * disk,
+ 
+ 	return count;
+ }
+-static ssize_t disk_fail_read(struct gendisk * disk, char *page)
+-{
+-	return sprintf(page, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
+-}
+-static struct disk_attribute disk_attr_fail = {
+-	.attr = {.name = "make-it-fail", .mode = S_IRUGO | S_IWUSR },
+-	.store	= disk_fail_store,
+-	.show	= disk_fail_read
+-};
+ 
+ #endif
+ 
+-static struct attribute * default_attrs[] = {
+-	&disk_attr_uevent.attr,
+-	&disk_attr_dev.attr,
+-	&disk_attr_range.attr,
+-	&disk_attr_removable.attr,
+-	&disk_attr_size.attr,
+-	&disk_attr_stat.attr,
+-	&disk_attr_capability.attr,
++static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
++static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
++static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL);
++static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
++static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL);
++#ifdef CONFIG_FAIL_MAKE_REQUEST
++static struct device_attribute dev_attr_fail =
++	__ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store);
++#endif
 +
-+	nesting = early_ioremap_nested;
-+	if (early_ioremap_debug) {
-+		printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
-+		       phys_addr, size, nesting);
-+		dump_stack();
-+	}
++static struct attribute *disk_attrs[] = {
++	&dev_attr_range.attr,
++	&dev_attr_removable.attr,
++	&dev_attr_size.attr,
++	&dev_attr_capability.attr,
++	&dev_attr_stat.attr,
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+-	&disk_attr_fail.attr,
++	&dev_attr_fail.attr,
+ #endif
+-	NULL,
++	NULL
++};
 +
-+	/* Don't allow wraparound or zero size */
-+	last_addr = phys_addr + size - 1;
-+	if (!size || last_addr < phys_addr) {
-+		WARN_ON(1);
-+		return NULL;
-+	}
++static struct attribute_group disk_attr_group = {
++	.attrs = disk_attrs,
+ };
+ 
+-static void disk_release(struct kobject * kobj)
++static struct attribute_group *disk_attr_groups[] = {
++	&disk_attr_group,
++	NULL
++};
 +
-+	if (nesting >= FIX_BTMAPS_NESTING) {
-+		WARN_ON(1);
-+		return NULL;
-+	}
-+	early_ioremap_nested++;
-+	/*
-+	 * Mappings have to be page-aligned
-+	 */
-+	offset = phys_addr & ~PAGE_MASK;
-+	phys_addr &= PAGE_MASK;
-+	size = PAGE_ALIGN(last_addr) - phys_addr;
++static void disk_release(struct device *dev)
+ {
+-	struct gendisk *disk = to_disk(kobj);
++	struct gendisk *disk = dev_to_disk(dev);
 +
-+	/*
-+	 * Mappings have to fit in the FIX_BTMAP area.
-+	 */
-+	nrpages = size >> PAGE_SHIFT;
-+	if (nrpages > NR_FIX_BTMAPS) {
-+		WARN_ON(1);
-+		return NULL;
+ 	kfree(disk->random);
+ 	kfree(disk->part);
+ 	free_disk_stats(disk);
+ 	kfree(disk);
+ }
+-
+-static struct kobj_type ktype_block = {
+-	.release	= disk_release,
+-	.sysfs_ops	= &disk_sysfs_ops,
+-	.default_attrs	= default_attrs,
++struct class block_class = {
++	.name		= "block",
+ };
+ 
+-extern struct kobj_type ktype_part;
+-
+-static int block_uevent_filter(struct kset *kset, struct kobject *kobj)
+-{
+-	struct kobj_type *ktype = get_ktype(kobj);
+-
+-	return ((ktype == &ktype_block) || (ktype == &ktype_part));
+-}
+-
+-static int block_uevent(struct kset *kset, struct kobject *kobj,
+-			struct kobj_uevent_env *env)
+-{
+-	struct kobj_type *ktype = get_ktype(kobj);
+-	struct device *physdev;
+-	struct gendisk *disk;
+-	struct hd_struct *part;
+-
+-	if (ktype == &ktype_block) {
+-		disk = container_of(kobj, struct gendisk, kobj);
+-		add_uevent_var(env, "MINOR=%u", disk->first_minor);
+-	} else if (ktype == &ktype_part) {
+-		disk = container_of(kobj->parent, struct gendisk, kobj);
+-		part = container_of(kobj, struct hd_struct, kobj);
+-		add_uevent_var(env, "MINOR=%u",
+-			       disk->first_minor + part->partno);
+-	} else
+-		return 0;
+-
+-	add_uevent_var(env, "MAJOR=%u", disk->major);
+-
+-	/* add physical device, backing this device  */
+-	physdev = disk->driverfs_dev;
+-	if (physdev) {
+-		char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
+-
+-		add_uevent_var(env, "PHYSDEVPATH=%s", path);
+-		kfree(path);
+-
+-		if (physdev->bus)
+-			add_uevent_var(env, "PHYSDEVBUS=%s", physdev->bus->name);
+-
+-		if (physdev->driver)
+-			add_uevent_var(env, physdev->driver->name);
+-	}
+-
+-	return 0;
+-}
+-
+-static struct kset_uevent_ops block_uevent_ops = {
+-	.filter		= block_uevent_filter,
+-	.uevent		= block_uevent,
++struct device_type disk_type = {
++	.name		= "disk",
++	.groups		= disk_attr_groups,
++	.release	= disk_release,
+ };
+ 
+-decl_subsys(block, &ktype_block, &block_uevent_ops);
+-
+ /*
+  * aggregate disk stat collector.  Uses the same stats that the sysfs
+  * entries do, above, but makes them available through one seq_file.
+- * Watching a few disks may be efficient through sysfs, but watching
+- * all of them will be more efficient through this interface.
+  *
+  * The output looks suspiciously like /proc/partitions with a bunch of
+  * extra fields.
+  */
+ 
+-/* iterator */
+ static void *diskstats_start(struct seq_file *part, loff_t *pos)
+ {
+ 	loff_t k = *pos;
+-	struct list_head *p;
++	struct device *dev;
+ 
+-	mutex_lock(&block_subsys_lock);
+-	list_for_each(p, &block_subsys.list)
++	mutex_lock(&block_class_lock);
++	list_for_each_entry(dev, &block_class.devices, node) {
++		if (dev->type != &disk_type)
++			continue;
+ 		if (!k--)
+-			return list_entry(p, struct gendisk, kobj.entry);
++			return dev_to_disk(dev);
 +	}
+ 	return NULL;
+ }
+ 
+ static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
+ {
+-	struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
++	struct gendisk *gp = v;
++	struct device *dev;
 +
-+	/*
-+	 * Ok, go for it..
-+	 */
-+	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
-+	idx = idx0;
-+	while (nrpages > 0) {
-+		early_set_fixmap(idx, phys_addr);
-+		phys_addr += PAGE_SIZE;
-+		--idx;
-+		--nrpages;
+ 	++*pos;
+-	return p==&block_subsys.list ? NULL :
+-		list_entry(p, struct gendisk, kobj.entry);
++	list_for_each_entry(dev, &gp->dev.node, node) {
++		if (&dev->node == &block_class.devices)
++			return NULL;
++		if (dev->type == &disk_type)
++			return dev_to_disk(dev);
 +	}
-+	if (early_ioremap_debug)
-+		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
-+
-+	return (void *) (offset + fix_to_virt(idx0));
-+}
-+
-+void __init early_iounmap(void *addr, unsigned long size)
++	return NULL;
+ }
+ 
+ static void diskstats_stop(struct seq_file *part, void *v)
+ {
+-	mutex_unlock(&block_subsys_lock);
++	mutex_unlock(&block_class_lock);
+ }
+ 
+ static int diskstats_show(struct seq_file *s, void *v)
+@@ -629,7 +558,7 @@ static int diskstats_show(struct seq_file *s, void *v)
+ 	int n = 0;
+ 
+ 	/*
+-	if (&sgp->kobj.entry == block_subsys.kset.list.next)
++	if (&gp->dev.kobj.entry == block_class.devices.next)
+ 		seq_puts(s,	"major minor name"
+ 				"     rio rmerge rsect ruse wio wmerge "
+ 				"wsect wuse running use aveq"
+@@ -666,7 +595,7 @@ static int diskstats_show(struct seq_file *s, void *v)
+ 	return 0;
+ }
+ 
+-struct seq_operations diskstats_op = {
++const struct seq_operations diskstats_op = {
+ 	.start	= diskstats_start,
+ 	.next	= diskstats_next,
+ 	.stop	= diskstats_stop,
+@@ -683,7 +612,7 @@ static void media_change_notify_thread(struct work_struct *work)
+ 	 * set enviroment vars to indicate which event this is for
+ 	 * so that user space will know to go check the media status.
+ 	 */
+-	kobject_uevent_env(&gd->kobj, KOBJ_CHANGE, envp);
++	kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp);
+ 	put_device(gd->driverfs_dev);
+ }
+ 
+@@ -694,6 +623,25 @@ void genhd_media_change_notify(struct gendisk *disk)
+ }
+ EXPORT_SYMBOL_GPL(genhd_media_change_notify);
+ 
++dev_t blk_lookup_devt(const char *name)
 +{
-+	unsigned long virt_addr;
-+	unsigned long offset;
-+	unsigned int nrpages;
-+	enum fixed_addresses idx;
-+	unsigned int nesting;
-+
-+	nesting = --early_ioremap_nested;
-+	WARN_ON(nesting < 0);
-+
-+	if (early_ioremap_debug) {
-+		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
-+		       size, nesting);
-+		dump_stack();
-+	}
++	struct device *dev;
++	dev_t devt = MKDEV(0, 0);
 +
-+	virt_addr = (unsigned long)addr;
-+	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
-+		WARN_ON(1);
-+		return;
++	mutex_lock(&block_class_lock);
++	list_for_each_entry(dev, &block_class.devices, node) {
++		if (strcmp(dev->bus_id, name) == 0) {
++			devt = dev->devt;
++			break;
++		}
 +	}
-+	offset = virt_addr & ~PAGE_MASK;
-+	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
++	mutex_unlock(&block_class_lock);
 +
-+	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
-+	while (nrpages > 0) {
-+		early_clear_fixmap(idx);
-+		--idx;
-+		--nrpages;
-+	}
++	return devt;
 +}
 +
-+void __this_fixmap_does_not_exist(void)
-+{
-+	WARN_ON(1);
-+}
++EXPORT_SYMBOL(blk_lookup_devt);
 +
-+#endif /* CONFIG_X86_32 */
-diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c
+ struct gendisk *alloc_disk(int minors)
+ {
+ 	return alloc_disk_node(minors, -1);
+@@ -721,9 +669,10 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
+ 			}
+ 		}
+ 		disk->minors = minors;
+-		kobj_set_kset_s(disk,block_subsys);
+-		kobject_init(&disk->kobj);
+ 		rand_initialize_disk(disk);
++		disk->dev.class = &block_class;
++		disk->dev.type = &disk_type;
++		device_initialize(&disk->dev);
+ 		INIT_WORK(&disk->async_notify,
+ 			media_change_notify_thread);
+ 	}
+@@ -743,7 +692,7 @@ struct kobject *get_disk(struct gendisk *disk)
+ 	owner = disk->fops->owner;
+ 	if (owner && !try_module_get(owner))
+ 		return NULL;
+-	kobj = kobject_get(&disk->kobj);
++	kobj = kobject_get(&disk->dev.kobj);
+ 	if (kobj == NULL) {
+ 		module_put(owner);
+ 		return NULL;
+@@ -757,7 +706,7 @@ EXPORT_SYMBOL(get_disk);
+ void put_disk(struct gendisk *disk)
+ {
+ 	if (disk)
+-		kobject_put(&disk->kobj);
++		kobject_put(&disk->dev.kobj);
+ }
+ 
+ EXPORT_SYMBOL(put_disk);
+diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
 deleted file mode 100644
-index 0b27831..0000000
---- a/arch/x86/mm/ioremap_32.c
+index 8b91994..0000000
+--- a/block/ll_rw_blk.c
 +++ /dev/null
-@@ -1,274 +0,0 @@
+@@ -1,4214 +0,0 @@
 -/*
-- * arch/i386/mm/ioremap.c
+- * Copyright (C) 1991, 1992 Linus Torvalds
+- * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
+- * Elevator latency, (C) 2000  Andrea Arcangeli <andrea at suse.de> SuSE
+- * Queue request tables / lock, selectable elevator, Jens Axboe <axboe at suse.de>
+- * kernel-doc documentation started by NeilBrown <neilb at cse.unsw.edu.au> -  July2000
+- * bio rewrite, highmem i/o, etc, Jens Axboe <axboe at suse.de> - may 2001
+- */
+-
+-/*
+- * This handles all read/write requests to block devices
+- */
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/backing-dev.h>
+-#include <linux/bio.h>
+-#include <linux/blkdev.h>
+-#include <linux/highmem.h>
+-#include <linux/mm.h>
+-#include <linux/kernel_stat.h>
+-#include <linux/string.h>
+-#include <linux/init.h>
+-#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
+-#include <linux/completion.h>
+-#include <linux/slab.h>
+-#include <linux/swap.h>
+-#include <linux/writeback.h>
+-#include <linux/task_io_accounting_ops.h>
+-#include <linux/interrupt.h>
+-#include <linux/cpu.h>
+-#include <linux/blktrace_api.h>
+-#include <linux/fault-inject.h>
+-#include <linux/scatterlist.h>
+-
+-/*
+- * for max sense size
+- */
+-#include <scsi/scsi_cmnd.h>
+-
+-static void blk_unplug_work(struct work_struct *work);
+-static void blk_unplug_timeout(unsigned long data);
+-static void drive_stat_acct(struct request *rq, int new_io);
+-static void init_request_from_bio(struct request *req, struct bio *bio);
+-static int __make_request(struct request_queue *q, struct bio *bio);
+-static struct io_context *current_io_context(gfp_t gfp_flags, int node);
+-static void blk_recalc_rq_segments(struct request *rq);
+-static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+-			    struct bio *bio);
+-
+-/*
+- * For the allocated request tables
+- */
+-static struct kmem_cache *request_cachep;
+-
+-/*
+- * For queue allocation
+- */
+-static struct kmem_cache *requestq_cachep;
+-
+-/*
+- * For io context allocations
+- */
+-static struct kmem_cache *iocontext_cachep;
+-
+-/*
+- * Controlling structure to kblockd
+- */
+-static struct workqueue_struct *kblockd_workqueue;
+-
+-unsigned long blk_max_low_pfn, blk_max_pfn;
+-
+-EXPORT_SYMBOL(blk_max_low_pfn);
+-EXPORT_SYMBOL(blk_max_pfn);
+-
+-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+-
+-/* Amount of time in which a process may batch requests */
+-#define BLK_BATCH_TIME	(HZ/50UL)
+-
+-/* Number of requests a "batching" process may submit */
+-#define BLK_BATCH_REQ	32
+-
+-/*
+- * Return the threshold (number of used requests) at which the queue is
+- * considered to be congested.  It include a little hysteresis to keep the
+- * context switch rate down.
+- */
+-static inline int queue_congestion_on_threshold(struct request_queue *q)
+-{
+-	return q->nr_congestion_on;
+-}
+-
+-/*
+- * The threshold at which a queue is considered to be uncongested
+- */
+-static inline int queue_congestion_off_threshold(struct request_queue *q)
+-{
+-	return q->nr_congestion_off;
+-}
+-
+-static void blk_queue_congestion_threshold(struct request_queue *q)
+-{
+-	int nr;
+-
+-	nr = q->nr_requests - (q->nr_requests / 8) + 1;
+-	if (nr > q->nr_requests)
+-		nr = q->nr_requests;
+-	q->nr_congestion_on = nr;
+-
+-	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
+-	if (nr < 1)
+-		nr = 1;
+-	q->nr_congestion_off = nr;
+-}
+-
+-/**
+- * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
+- * @bdev:	device
 - *
-- * Re-map IO memory to kernel address space so that we can access it.
-- * This is needed for high PCI addresses that aren't mapped in the
-- * 640k-1MB IO memory area on PC's
+- * Locates the passed device's request queue and returns the address of its
+- * backing_dev_info
 - *
-- * (C) Copyright 1995 1996 Linus Torvalds
+- * Will return NULL if the request queue cannot be located.
+- */
+-struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
+-{
+-	struct backing_dev_info *ret = NULL;
+-	struct request_queue *q = bdev_get_queue(bdev);
+-
+-	if (q)
+-		ret = &q->backing_dev_info;
+-	return ret;
+-}
+-EXPORT_SYMBOL(blk_get_backing_dev_info);
+-
+-/**
+- * blk_queue_prep_rq - set a prepare_request function for queue
+- * @q:		queue
+- * @pfn:	prepare_request function
+- *
+- * It's possible for a queue to register a prepare_request callback which
+- * is invoked before the request is handed to the request_fn. The goal of
+- * the function is to prepare a request for I/O, it can be used to build a
+- * cdb from the request data for instance.
+- *
+- */
+-void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
+-{
+-	q->prep_rq_fn = pfn;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_prep_rq);
+-
+-/**
+- * blk_queue_merge_bvec - set a merge_bvec function for queue
+- * @q:		queue
+- * @mbfn:	merge_bvec_fn
+- *
+- * Usually queues have static limitations on the max sectors or segments that
+- * we can put in a request. Stacking drivers may have some settings that
+- * are dynamic, and thus we have to query the queue whether it is ok to
+- * add a new bio_vec to a bio at a given offset or not. If the block device
+- * has such limitations, it needs to register a merge_bvec_fn to control
+- * the size of bio's sent to it. Note that a block device *must* allow a
+- * single page to be added to an empty bio. The block device driver may want
+- * to use the bio_split() function to deal with these bio's. By default
+- * no merge_bvec_fn is defined for a queue, and only the fixed limits are
+- * honored.
+- */
+-void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
+-{
+-	q->merge_bvec_fn = mbfn;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_merge_bvec);
+-
+-void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
+-{
+-	q->softirq_done_fn = fn;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_softirq_done);
+-
+-/**
+- * blk_queue_make_request - define an alternate make_request function for a device
+- * @q:  the request queue for the device to be affected
+- * @mfn: the alternate make_request function
+- *
+- * Description:
+- *    The normal way for &struct bios to be passed to a device
+- *    driver is for them to be collected into requests on a request
+- *    queue, and then to allow the device driver to select requests
+- *    off that queue when it is ready.  This works well for many block
+- *    devices. However some block devices (typically virtual devices
+- *    such as md or lvm) do not benefit from the processing on the
+- *    request queue, and are served best by having the requests passed
+- *    directly to them.  This can be achieved by providing a function
+- *    to blk_queue_make_request().
+- *
+- * Caveat:
+- *    The driver that does this *must* be able to deal appropriately
+- *    with buffers in "highmemory". This can be accomplished by either calling
+- *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
+- *    blk_queue_bounce() to create a buffer in normal memory.
+- **/
+-void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
+-{
+-	/*
+-	 * set defaults
+-	 */
+-	q->nr_requests = BLKDEV_MAX_RQ;
+-	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+-	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+-	q->make_request_fn = mfn;
+-	q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+-	q->backing_dev_info.state = 0;
+-	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+-	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
+-	blk_queue_hardsect_size(q, 512);
+-	blk_queue_dma_alignment(q, 511);
+-	blk_queue_congestion_threshold(q);
+-	q->nr_batching = BLK_BATCH_REQ;
+-
+-	q->unplug_thresh = 4;		/* hmm */
+-	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
+-	if (q->unplug_delay == 0)
+-		q->unplug_delay = 1;
+-
+-	INIT_WORK(&q->unplug_work, blk_unplug_work);
+-
+-	q->unplug_timer.function = blk_unplug_timeout;
+-	q->unplug_timer.data = (unsigned long)q;
+-
+-	/*
+-	 * by default assume old behaviour and bounce for any highmem page
+-	 */
+-	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+-}
+-
+-EXPORT_SYMBOL(blk_queue_make_request);
+-
+-static void rq_init(struct request_queue *q, struct request *rq)
+-{
+-	INIT_LIST_HEAD(&rq->queuelist);
+-	INIT_LIST_HEAD(&rq->donelist);
+-
+-	rq->errors = 0;
+-	rq->bio = rq->biotail = NULL;
+-	INIT_HLIST_NODE(&rq->hash);
+-	RB_CLEAR_NODE(&rq->rb_node);
+-	rq->ioprio = 0;
+-	rq->buffer = NULL;
+-	rq->ref_count = 1;
+-	rq->q = q;
+-	rq->special = NULL;
+-	rq->data_len = 0;
+-	rq->data = NULL;
+-	rq->nr_phys_segments = 0;
+-	rq->sense = NULL;
+-	rq->end_io = NULL;
+-	rq->end_io_data = NULL;
+-	rq->completion_data = NULL;
+-	rq->next_rq = NULL;
+-}
+-
+-/**
+- * blk_queue_ordered - does this queue support ordered writes
+- * @q:        the request queue
+- * @ordered:  one of QUEUE_ORDERED_*
+- * @prepare_flush_fn: rq setup helper for cache flush ordered writes
+- *
+- * Description:
+- *   For journalled file systems, doing ordered writes on a commit
+- *   block instead of explicitly doing wait_on_buffer (which is bad
+- *   for performance) can be a big win. Block drivers supporting this
+- *   feature should call this function and indicate so.
+- *
+- **/
+-int blk_queue_ordered(struct request_queue *q, unsigned ordered,
+-		      prepare_flush_fn *prepare_flush_fn)
+-{
+-	if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
+-	    prepare_flush_fn == NULL) {
+-		printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
+-		return -EINVAL;
+-	}
+-
+-	if (ordered != QUEUE_ORDERED_NONE &&
+-	    ordered != QUEUE_ORDERED_DRAIN &&
+-	    ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
+-	    ordered != QUEUE_ORDERED_DRAIN_FUA &&
+-	    ordered != QUEUE_ORDERED_TAG &&
+-	    ordered != QUEUE_ORDERED_TAG_FLUSH &&
+-	    ordered != QUEUE_ORDERED_TAG_FUA) {
+-		printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
+-		return -EINVAL;
+-	}
+-
+-	q->ordered = ordered;
+-	q->next_ordered = ordered;
+-	q->prepare_flush_fn = prepare_flush_fn;
+-
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_ordered);
+-
+-/*
+- * Cache flushing for ordered writes handling
+- */
+-inline unsigned blk_ordered_cur_seq(struct request_queue *q)
+-{
+-	if (!q->ordseq)
+-		return 0;
+-	return 1 << ffz(q->ordseq);
+-}
+-
+-unsigned blk_ordered_req_seq(struct request *rq)
+-{
+-	struct request_queue *q = rq->q;
+-
+-	BUG_ON(q->ordseq == 0);
+-
+-	if (rq == &q->pre_flush_rq)
+-		return QUEUE_ORDSEQ_PREFLUSH;
+-	if (rq == &q->bar_rq)
+-		return QUEUE_ORDSEQ_BAR;
+-	if (rq == &q->post_flush_rq)
+-		return QUEUE_ORDSEQ_POSTFLUSH;
+-
+-	/*
+-	 * !fs requests don't need to follow barrier ordering.  Always
+-	 * put them at the front.  This fixes the following deadlock.
+-	 *
+-	 * http://thread.gmane.org/gmane.linux.kernel/537473
+-	 */
+-	if (!blk_fs_request(rq))
+-		return QUEUE_ORDSEQ_DRAIN;
+-
+-	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
+-	    (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
+-		return QUEUE_ORDSEQ_DRAIN;
+-	else
+-		return QUEUE_ORDSEQ_DONE;
+-}
+-
+-void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
+-{
+-	struct request *rq;
+-	int uptodate;
+-
+-	if (error && !q->orderr)
+-		q->orderr = error;
+-
+-	BUG_ON(q->ordseq & seq);
+-	q->ordseq |= seq;
+-
+-	if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
+-		return;
+-
+-	/*
+-	 * Okay, sequence complete.
+-	 */
+-	uptodate = 1;
+-	if (q->orderr)
+-		uptodate = q->orderr;
+-
+-	q->ordseq = 0;
+-	rq = q->orig_bar_rq;
+-
+-	end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
+-	end_that_request_last(rq, uptodate);
+-}
+-
+-static void pre_flush_end_io(struct request *rq, int error)
+-{
+-	elv_completed_request(rq->q, rq);
+-	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
+-}
+-
+-static void bar_end_io(struct request *rq, int error)
+-{
+-	elv_completed_request(rq->q, rq);
+-	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
+-}
+-
+-static void post_flush_end_io(struct request *rq, int error)
+-{
+-	elv_completed_request(rq->q, rq);
+-	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
+-}
+-
+-static void queue_flush(struct request_queue *q, unsigned which)
+-{
+-	struct request *rq;
+-	rq_end_io_fn *end_io;
+-
+-	if (which == QUEUE_ORDERED_PREFLUSH) {
+-		rq = &q->pre_flush_rq;
+-		end_io = pre_flush_end_io;
+-	} else {
+-		rq = &q->post_flush_rq;
+-		end_io = post_flush_end_io;
+-	}
+-
+-	rq->cmd_flags = REQ_HARDBARRIER;
+-	rq_init(q, rq);
+-	rq->elevator_private = NULL;
+-	rq->elevator_private2 = NULL;
+-	rq->rq_disk = q->bar_rq.rq_disk;
+-	rq->end_io = end_io;
+-	q->prepare_flush_fn(q, rq);
+-
+-	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+-}
+-
+-static inline struct request *start_ordered(struct request_queue *q,
+-					    struct request *rq)
+-{
+-	q->orderr = 0;
+-	q->ordered = q->next_ordered;
+-	q->ordseq |= QUEUE_ORDSEQ_STARTED;
+-
+-	/*
+-	 * Prep proxy barrier request.
+-	 */
+-	blkdev_dequeue_request(rq);
+-	q->orig_bar_rq = rq;
+-	rq = &q->bar_rq;
+-	rq->cmd_flags = 0;
+-	rq_init(q, rq);
+-	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
+-		rq->cmd_flags |= REQ_RW;
+-	if (q->ordered & QUEUE_ORDERED_FUA)
+-		rq->cmd_flags |= REQ_FUA;
+-	rq->elevator_private = NULL;
+-	rq->elevator_private2 = NULL;
+-	init_request_from_bio(rq, q->orig_bar_rq->bio);
+-	rq->end_io = bar_end_io;
+-
+-	/*
+-	 * Queue ordered sequence.  As we stack them at the head, we
+-	 * need to queue in reverse order.  Note that we rely on that
+-	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
+-	 * request gets inbetween ordered sequence. If this request is
+-	 * an empty barrier, we don't need to do a postflush ever since
+-	 * there will be no data written between the pre and post flush.
+-	 * Hence a single flush will suffice.
+-	 */
+-	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
+-		queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
+-	else
+-		q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
+-
+-	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+-
+-	if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
+-		queue_flush(q, QUEUE_ORDERED_PREFLUSH);
+-		rq = &q->pre_flush_rq;
+-	} else
+-		q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
+-
+-	if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
+-		q->ordseq |= QUEUE_ORDSEQ_DRAIN;
+-	else
+-		rq = NULL;
+-
+-	return rq;
+-}
+-
+-int blk_do_ordered(struct request_queue *q, struct request **rqp)
+-{
+-	struct request *rq = *rqp;
+-	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+-
+-	if (!q->ordseq) {
+-		if (!is_barrier)
+-			return 1;
+-
+-		if (q->next_ordered != QUEUE_ORDERED_NONE) {
+-			*rqp = start_ordered(q, rq);
+-			return 1;
+-		} else {
+-			/*
+-			 * This can happen when the queue switches to
+-			 * ORDERED_NONE while this request is on it.
+-			 */
+-			blkdev_dequeue_request(rq);
+-			end_that_request_first(rq, -EOPNOTSUPP,
+-					       rq->hard_nr_sectors);
+-			end_that_request_last(rq, -EOPNOTSUPP);
+-			*rqp = NULL;
+-			return 0;
+-		}
+-	}
+-
+-	/*
+-	 * Ordered sequence in progress
+-	 */
+-
+-	/* Special requests are not subject to ordering rules. */
+-	if (!blk_fs_request(rq) &&
+-	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+-		return 1;
+-
+-	if (q->ordered & QUEUE_ORDERED_TAG) {
+-		/* Ordered by tag.  Blocking the next barrier is enough. */
+-		if (is_barrier && rq != &q->bar_rq)
+-			*rqp = NULL;
+-	} else {
+-		/* Ordered by draining.  Wait for turn. */
+-		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+-		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+-			*rqp = NULL;
+-	}
+-
+-	return 1;
+-}
+-
+-static void req_bio_endio(struct request *rq, struct bio *bio,
+-			  unsigned int nbytes, int error)
+-{
+-	struct request_queue *q = rq->q;
+-
+-	if (&q->bar_rq != rq) {
+-		if (error)
+-			clear_bit(BIO_UPTODATE, &bio->bi_flags);
+-		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+-			error = -EIO;
+-
+-		if (unlikely(nbytes > bio->bi_size)) {
+-			printk("%s: want %u bytes done, only %u left\n",
+-			       __FUNCTION__, nbytes, bio->bi_size);
+-			nbytes = bio->bi_size;
+-		}
+-
+-		bio->bi_size -= nbytes;
+-		bio->bi_sector += (nbytes >> 9);
+-		if (bio->bi_size == 0)
+-			bio_endio(bio, error);
+-	} else {
+-
+-		/*
+-		 * Okay, this is the barrier request in progress, just
+-		 * record the error;
+-		 */
+-		if (error && !q->orderr)
+-			q->orderr = error;
+-	}
+-}
+-
+-/**
+- * blk_queue_bounce_limit - set bounce buffer limit for queue
+- * @q:  the request queue for the device
+- * @dma_addr:   bus address limit
+- *
+- * Description:
+- *    Different hardware can have different requirements as to what pages
+- *    it can do I/O directly to. A low level driver can call
+- *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
+- *    buffers for doing I/O to pages residing above @page.
+- **/
+-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
+-{
+-	unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
+-	int dma = 0;
+-
+-	q->bounce_gfp = GFP_NOIO;
+-#if BITS_PER_LONG == 64
+-	/* Assume anything <= 4GB can be handled by IOMMU.
+-	   Actually some IOMMUs can handle everything, but I don't
+-	   know of a way to test this here. */
+-	if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+-		dma = 1;
+-	q->bounce_pfn = max_low_pfn;
+-#else
+-	if (bounce_pfn < blk_max_low_pfn)
+-		dma = 1;
+-	q->bounce_pfn = bounce_pfn;
+-#endif
+-	if (dma) {
+-		init_emergency_isa_pool();
+-		q->bounce_gfp = GFP_NOIO | GFP_DMA;
+-		q->bounce_pfn = bounce_pfn;
+-	}
+-}
+-
+-EXPORT_SYMBOL(blk_queue_bounce_limit);
+-
+-/**
+- * blk_queue_max_sectors - set max sectors for a request for this queue
+- * @q:  the request queue for the device
+- * @max_sectors:  max sectors in the usual 512b unit
+- *
+- * Description:
+- *    Enables a low level driver to set an upper limit on the size of
+- *    received requests.
+- **/
+-void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
+-{
+-	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
+-		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+-		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
+-	}
+-
+-	if (BLK_DEF_MAX_SECTORS > max_sectors)
+-		q->max_hw_sectors = q->max_sectors = max_sectors;
+- 	else {
+-		q->max_sectors = BLK_DEF_MAX_SECTORS;
+-		q->max_hw_sectors = max_sectors;
+-	}
+-}
+-
+-EXPORT_SYMBOL(blk_queue_max_sectors);
+-
+-/**
+- * blk_queue_max_phys_segments - set max phys segments for a request for this queue
+- * @q:  the request queue for the device
+- * @max_segments:  max number of segments
+- *
+- * Description:
+- *    Enables a low level driver to set an upper limit on the number of
+- *    physical data segments in a request.  This would be the largest sized
+- *    scatter list the driver could handle.
+- **/
+-void blk_queue_max_phys_segments(struct request_queue *q,
+-				 unsigned short max_segments)
+-{
+-	if (!max_segments) {
+-		max_segments = 1;
+-		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+-	}
+-
+-	q->max_phys_segments = max_segments;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_max_phys_segments);
+-
+-/**
+- * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+- * @q:  the request queue for the device
+- * @max_segments:  max number of segments
+- *
+- * Description:
+- *    Enables a low level driver to set an upper limit on the number of
+- *    hw data segments in a request.  This would be the largest number of
+- *    address/length pairs the host adapter can actually give as once
+- *    to the device.
+- **/
+-void blk_queue_max_hw_segments(struct request_queue *q,
+-			       unsigned short max_segments)
+-{
+-	if (!max_segments) {
+-		max_segments = 1;
+-		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+-	}
+-
+-	q->max_hw_segments = max_segments;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_max_hw_segments);
+-
+-/**
+- * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
+- * @q:  the request queue for the device
+- * @max_size:  max size of segment in bytes
+- *
+- * Description:
+- *    Enables a low level driver to set an upper limit on the size of a
+- *    coalesced segment
+- **/
+-void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
+-{
+-	if (max_size < PAGE_CACHE_SIZE) {
+-		max_size = PAGE_CACHE_SIZE;
+-		printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
+-	}
+-
+-	q->max_segment_size = max_size;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_max_segment_size);
+-
+-/**
+- * blk_queue_hardsect_size - set hardware sector size for the queue
+- * @q:  the request queue for the device
+- * @size:  the hardware sector size, in bytes
+- *
+- * Description:
+- *   This should typically be set to the lowest possible sector size
+- *   that the hardware can operate on (possible without reverting to
+- *   even internal read-modify-write operations). Usually the default
+- *   of 512 covers most hardware.
+- **/
+-void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
+-{
+-	q->hardsect_size = size;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_hardsect_size);
+-
+-/*
+- * Returns the minimum that is _not_ zero, unless both are zero.
+- */
+-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
+-
+-/**
+- * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
+- * @t:	the stacking driver (top)
+- * @b:  the underlying device (bottom)
+- **/
+-void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
+-{
+-	/* zero is "infinity" */
+-	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+-	t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
+-
+-	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
+-	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
+-	t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
+-	t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
+-	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+-		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
+-}
+-
+-EXPORT_SYMBOL(blk_queue_stack_limits);
+-
+-/**
+- * blk_queue_segment_boundary - set boundary rules for segment merging
+- * @q:  the request queue for the device
+- * @mask:  the memory boundary mask
+- **/
+-void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
+-{
+-	if (mask < PAGE_CACHE_SIZE - 1) {
+-		mask = PAGE_CACHE_SIZE - 1;
+-		printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
+-	}
+-
+-	q->seg_boundary_mask = mask;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_segment_boundary);
+-
+-/**
+- * blk_queue_dma_alignment - set dma length and memory alignment
+- * @q:     the request queue for the device
+- * @mask:  alignment mask
+- *
+- * description:
+- *    set required memory and length aligment for direct dma transactions.
+- *    this is used when buiding direct io requests for the queue.
+- *
+- **/
+-void blk_queue_dma_alignment(struct request_queue *q, int mask)
+-{
+-	q->dma_alignment = mask;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_dma_alignment);
+-
+-/**
+- * blk_queue_find_tag - find a request by its tag and queue
+- * @q:	 The request queue for the device
+- * @tag: The tag of the request
+- *
+- * Notes:
+- *    Should be used when a device returns a tag and you want to match
+- *    it with a request.
+- *
+- *    no locks need be held.
+- **/
+-struct request *blk_queue_find_tag(struct request_queue *q, int tag)
+-{
+-	return blk_map_queue_find_tag(q->queue_tags, tag);
+-}
+-
+-EXPORT_SYMBOL(blk_queue_find_tag);
+-
+-/**
+- * __blk_free_tags - release a given set of tag maintenance info
+- * @bqt:	the tag map to free
+- *
+- * Tries to free the specified @bqt at .  Returns true if it was
+- * actually freed and false if there are still references using it
+- */
+-static int __blk_free_tags(struct blk_queue_tag *bqt)
+-{
+-	int retval;
+-
+-	retval = atomic_dec_and_test(&bqt->refcnt);
+-	if (retval) {
+-		BUG_ON(bqt->busy);
+-
+-		kfree(bqt->tag_index);
+-		bqt->tag_index = NULL;
+-
+-		kfree(bqt->tag_map);
+-		bqt->tag_map = NULL;
+-
+-		kfree(bqt);
+-
+-	}
+-
+-	return retval;
+-}
+-
+-/**
+- * __blk_queue_free_tags - release tag maintenance info
+- * @q:  the request queue for the device
+- *
+- *  Notes:
+- *    blk_cleanup_queue() will take care of calling this function, if tagging
+- *    has been used. So there's no need to call this directly.
+- **/
+-static void __blk_queue_free_tags(struct request_queue *q)
+-{
+-	struct blk_queue_tag *bqt = q->queue_tags;
+-
+-	if (!bqt)
+-		return;
+-
+-	__blk_free_tags(bqt);
+-
+-	q->queue_tags = NULL;
+-	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+-}
+-
+-
+-/**
+- * blk_free_tags - release a given set of tag maintenance info
+- * @bqt:	the tag map to free
+- *
+- * For externally managed @bqt@ frees the map.  Callers of this
+- * function must guarantee to have released all the queues that
+- * might have been using this tag map.
 - */
+-void blk_free_tags(struct blk_queue_tag *bqt)
+-{
+-	if (unlikely(!__blk_free_tags(bqt)))
+-		BUG();
+-}
+-EXPORT_SYMBOL(blk_free_tags);
+-
+-/**
+- * blk_queue_free_tags - release tag maintenance info
+- * @q:  the request queue for the device
+- *
+- *  Notes:
+- *	This is used to disabled tagged queuing to a device, yet leave
+- *	queue in function.
+- **/
+-void blk_queue_free_tags(struct request_queue *q)
+-{
+-	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+-}
+-
+-EXPORT_SYMBOL(blk_queue_free_tags);
+-
+-static int
+-init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
+-{
+-	struct request **tag_index;
+-	unsigned long *tag_map;
+-	int nr_ulongs;
+-
+-	if (q && depth > q->nr_requests * 2) {
+-		depth = q->nr_requests * 2;
+-		printk(KERN_ERR "%s: adjusted depth to %d\n",
+-				__FUNCTION__, depth);
+-	}
+-
+-	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+-	if (!tag_index)
+-		goto fail;
+-
+-	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
+-	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+-	if (!tag_map)
+-		goto fail;
+-
+-	tags->real_max_depth = depth;
+-	tags->max_depth = depth;
+-	tags->tag_index = tag_index;
+-	tags->tag_map = tag_map;
+-
+-	return 0;
+-fail:
+-	kfree(tag_index);
+-	return -ENOMEM;
+-}
+-
+-static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
+-						   int depth)
+-{
+-	struct blk_queue_tag *tags;
+-
+-	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+-	if (!tags)
+-		goto fail;
+-
+-	if (init_tag_map(q, tags, depth))
+-		goto fail;
+-
+-	tags->busy = 0;
+-	atomic_set(&tags->refcnt, 1);
+-	return tags;
+-fail:
+-	kfree(tags);
+-	return NULL;
+-}
+-
+-/**
+- * blk_init_tags - initialize the tag info for an external tag map
+- * @depth:	the maximum queue depth supported
+- * @tags: the tag to use
+- **/
+-struct blk_queue_tag *blk_init_tags(int depth)
+-{
+-	return __blk_queue_init_tags(NULL, depth);
+-}
+-EXPORT_SYMBOL(blk_init_tags);
+-
+-/**
+- * blk_queue_init_tags - initialize the queue tag info
+- * @q:  the request queue for the device
+- * @depth:  the maximum queue depth supported
+- * @tags: the tag to use
+- **/
+-int blk_queue_init_tags(struct request_queue *q, int depth,
+-			struct blk_queue_tag *tags)
+-{
+-	int rc;
+-
+-	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
+-
+-	if (!tags && !q->queue_tags) {
+-		tags = __blk_queue_init_tags(q, depth);
+-
+-		if (!tags)
+-			goto fail;
+-	} else if (q->queue_tags) {
+-		if ((rc = blk_queue_resize_tags(q, depth)))
+-			return rc;
+-		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+-		return 0;
+-	} else
+-		atomic_inc(&tags->refcnt);
+-
+-	/*
+-	 * assign it, all done
+-	 */
+-	q->queue_tags = tags;
+-	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+-	INIT_LIST_HEAD(&q->tag_busy_list);
+-	return 0;
+-fail:
+-	kfree(tags);
+-	return -ENOMEM;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_init_tags);
+-
+-/**
+- * blk_queue_resize_tags - change the queueing depth
+- * @q:  the request queue for the device
+- * @new_depth: the new max command queueing depth
+- *
+- *  Notes:
+- *    Must be called with the queue lock held.
+- **/
+-int blk_queue_resize_tags(struct request_queue *q, int new_depth)
+-{
+-	struct blk_queue_tag *bqt = q->queue_tags;
+-	struct request **tag_index;
+-	unsigned long *tag_map;
+-	int max_depth, nr_ulongs;
+-
+-	if (!bqt)
+-		return -ENXIO;
+-
+-	/*
+-	 * if we already have large enough real_max_depth.  just
+-	 * adjust max_depth.  *NOTE* as requests with tag value
+-	 * between new_depth and real_max_depth can be in-flight, tag
+-	 * map can not be shrunk blindly here.
+-	 */
+-	if (new_depth <= bqt->real_max_depth) {
+-		bqt->max_depth = new_depth;
+-		return 0;
+-	}
+-
+-	/*
+-	 * Currently cannot replace a shared tag map with a new
+-	 * one, so error out if this is the case
+-	 */
+-	if (atomic_read(&bqt->refcnt) != 1)
+-		return -EBUSY;
+-
+-	/*
+-	 * save the old state info, so we can copy it back
+-	 */
+-	tag_index = bqt->tag_index;
+-	tag_map = bqt->tag_map;
+-	max_depth = bqt->real_max_depth;
+-
+-	if (init_tag_map(q, bqt, new_depth))
+-		return -ENOMEM;
+-
+-	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
+-	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
+-	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
+-
+-	kfree(tag_index);
+-	kfree(tag_map);
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_resize_tags);
+-
+-/**
+- * blk_queue_end_tag - end tag operations for a request
+- * @q:  the request queue for the device
+- * @rq: the request that has completed
+- *
+- *  Description:
+- *    Typically called when end_that_request_first() returns 0, meaning
+- *    all transfers have been done for a request. It's important to call
+- *    this function before end_that_request_last(), as that will put the
+- *    request back on the free list thus corrupting the internal tag list.
+- *
+- *  Notes:
+- *   queue lock must be held.
+- **/
+-void blk_queue_end_tag(struct request_queue *q, struct request *rq)
+-{
+-	struct blk_queue_tag *bqt = q->queue_tags;
+-	int tag = rq->tag;
+-
+-	BUG_ON(tag == -1);
+-
+-	if (unlikely(tag >= bqt->real_max_depth))
+-		/*
+-		 * This can happen after tag depth has been reduced.
+-		 * FIXME: how about a warning or info message here?
+-		 */
+-		return;
+-
+-	list_del_init(&rq->queuelist);
+-	rq->cmd_flags &= ~REQ_QUEUED;
+-	rq->tag = -1;
+-
+-	if (unlikely(bqt->tag_index[tag] == NULL))
+-		printk(KERN_ERR "%s: tag %d is missing\n",
+-		       __FUNCTION__, tag);
+-
+-	bqt->tag_index[tag] = NULL;
+-
+-	if (unlikely(!test_bit(tag, bqt->tag_map))) {
+-		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+-		       __FUNCTION__, tag);
+-		return;
+-	}
+-	/*
+-	 * The tag_map bit acts as a lock for tag_index[bit], so we need
+-	 * unlock memory barrier semantics.
+-	 */
+-	clear_bit_unlock(tag, bqt->tag_map);
+-	bqt->busy--;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_end_tag);
+-
+-/**
+- * blk_queue_start_tag - find a free tag and assign it
+- * @q:  the request queue for the device
+- * @rq:  the block request that needs tagging
+- *
+- *  Description:
+- *    This can either be used as a stand-alone helper, or possibly be
+- *    assigned as the queue &prep_rq_fn (in which case &struct request
+- *    automagically gets a tag assigned). Note that this function
+- *    assumes that any type of request can be queued! if this is not
+- *    true for your device, you must check the request type before
+- *    calling this function.  The request will also be removed from
+- *    the request queue, so it's the drivers responsibility to readd
+- *    it if it should need to be restarted for some reason.
+- *
+- *  Notes:
+- *   queue lock must be held.
+- **/
+-int blk_queue_start_tag(struct request_queue *q, struct request *rq)
+-{
+-	struct blk_queue_tag *bqt = q->queue_tags;
+-	int tag;
+-
+-	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
+-		printk(KERN_ERR 
+-		       "%s: request %p for device [%s] already tagged %d",
+-		       __FUNCTION__, rq,
+-		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
+-		BUG();
+-	}
+-
+-	/*
+-	 * Protect against shared tag maps, as we may not have exclusive
+-	 * access to the tag map.
+-	 */
+-	do {
+-		tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
+-		if (tag >= bqt->max_depth)
+-			return 1;
+-
+-	} while (test_and_set_bit_lock(tag, bqt->tag_map));
+-	/*
+-	 * We need lock ordering semantics given by test_and_set_bit_lock.
+-	 * See blk_queue_end_tag for details.
+-	 */
+-
+-	rq->cmd_flags |= REQ_QUEUED;
+-	rq->tag = tag;
+-	bqt->tag_index[tag] = rq;
+-	blkdev_dequeue_request(rq);
+-	list_add(&rq->queuelist, &q->tag_busy_list);
+-	bqt->busy++;
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_start_tag);
+-
+-/**
+- * blk_queue_invalidate_tags - invalidate all pending tags
+- * @q:  the request queue for the device
+- *
+- *  Description:
+- *   Hardware conditions may dictate a need to stop all pending requests.
+- *   In this case, we will safely clear the block side of the tag queue and
+- *   readd all requests to the request queue in the right order.
+- *
+- *  Notes:
+- *   queue lock must be held.
+- **/
+-void blk_queue_invalidate_tags(struct request_queue *q)
+-{
+-	struct list_head *tmp, *n;
+-
+-	list_for_each_safe(tmp, n, &q->tag_busy_list)
+-		blk_requeue_request(q, list_entry_rq(tmp));
+-}
+-
+-EXPORT_SYMBOL(blk_queue_invalidate_tags);
+-
+-void blk_dump_rq_flags(struct request *rq, char *msg)
+-{
+-	int bit;
+-
+-	printk("%s: dev %s: type=%x, flags=%x\n", msg,
+-		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
+-		rq->cmd_flags);
+-
+-	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
+-						       rq->nr_sectors,
+-						       rq->current_nr_sectors);
+-	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+-
+-	if (blk_pc_request(rq)) {
+-		printk("cdb: ");
+-		for (bit = 0; bit < sizeof(rq->cmd); bit++)
+-			printk("%02x ", rq->cmd[bit]);
+-		printk("\n");
+-	}
+-}
+-
+-EXPORT_SYMBOL(blk_dump_rq_flags);
+-
+-void blk_recount_segments(struct request_queue *q, struct bio *bio)
+-{
+-	struct request rq;
+-	struct bio *nxt = bio->bi_next;
+-	rq.q = q;
+-	rq.bio = rq.biotail = bio;
+-	bio->bi_next = NULL;
+-	blk_recalc_rq_segments(&rq);
+-	bio->bi_next = nxt;
+-	bio->bi_phys_segments = rq.nr_phys_segments;
+-	bio->bi_hw_segments = rq.nr_hw_segments;
+-	bio->bi_flags |= (1 << BIO_SEG_VALID);
+-}
+-EXPORT_SYMBOL(blk_recount_segments);
+-
+-static void blk_recalc_rq_segments(struct request *rq)
+-{
+-	int nr_phys_segs;
+-	int nr_hw_segs;
+-	unsigned int phys_size;
+-	unsigned int hw_size;
+-	struct bio_vec *bv, *bvprv = NULL;
+-	int seg_size;
+-	int hw_seg_size;
+-	int cluster;
+-	struct req_iterator iter;
+-	int high, highprv = 1;
+-	struct request_queue *q = rq->q;
+-
+-	if (!rq->bio)
+-		return;
+-
+-	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+-	hw_seg_size = seg_size = 0;
+-	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+-	rq_for_each_segment(bv, rq, iter) {
+-		/*
+-		 * the trick here is making sure that a high page is never
+-		 * considered part of another segment, since that might
+-		 * change with the bounce page.
+-		 */
+-		high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+-		if (high || highprv)
+-			goto new_hw_segment;
+-		if (cluster) {
+-			if (seg_size + bv->bv_len > q->max_segment_size)
+-				goto new_segment;
+-			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+-				goto new_segment;
+-			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+-				goto new_segment;
+-			if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
+-				goto new_hw_segment;
+-
+-			seg_size += bv->bv_len;
+-			hw_seg_size += bv->bv_len;
+-			bvprv = bv;
+-			continue;
+-		}
+-new_segment:
+-		if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
+-		    !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
+-			hw_seg_size += bv->bv_len;
+-		else {
+-new_hw_segment:
+-			if (nr_hw_segs == 1 &&
+-			    hw_seg_size > rq->bio->bi_hw_front_size)
+-				rq->bio->bi_hw_front_size = hw_seg_size;
+-			hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
+-			nr_hw_segs++;
+-		}
 -
--#include <linux/vmalloc.h>
--#include <linux/init.h>
--#include <linux/slab.h>
--#include <linux/module.h>
--#include <linux/io.h>
--#include <asm/fixmap.h>
--#include <asm/cacheflush.h>
--#include <asm/tlbflush.h>
--#include <asm/pgtable.h>
+-		nr_phys_segs++;
+-		bvprv = bv;
+-		seg_size = bv->bv_len;
+-		highprv = high;
+-	}
 -
--#define ISA_START_ADDRESS	0xa0000
--#define ISA_END_ADDRESS		0x100000
+-	if (nr_hw_segs == 1 &&
+-	    hw_seg_size > rq->bio->bi_hw_front_size)
+-		rq->bio->bi_hw_front_size = hw_seg_size;
+-	if (hw_seg_size > rq->biotail->bi_hw_back_size)
+-		rq->biotail->bi_hw_back_size = hw_seg_size;
+-	rq->nr_phys_segments = nr_phys_segs;
+-	rq->nr_hw_segments = nr_hw_segs;
+-}
+-
+-static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
+-				   struct bio *nxt)
+-{
+-	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+-		return 0;
+-
+-	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+-		return 0;
+-	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+-		return 0;
+-
+-	/*
+-	 * bio and nxt are contigous in memory, check if the queue allows
+-	 * these two to be merged into one
+-	 */
+-	if (BIO_SEG_BOUNDARY(q, bio, nxt))
+-		return 1;
+-
+-	return 0;
+-}
+-
+-static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
+-				 struct bio *nxt)
+-{
+-	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+-		blk_recount_segments(q, bio);
+-	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
+-		blk_recount_segments(q, nxt);
+-	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
+-	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
+-		return 0;
+-	if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
+-		return 0;
+-
+-	return 1;
+-}
 -
 -/*
-- * Generic mapping function (not visible outside):
+- * map a request to scatterlist, return number of sg entries setup. Caller
+- * must make sure sg can hold rq->nr_phys_segments entries
 - */
+-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+-		  struct scatterlist *sglist)
+-{
+-	struct bio_vec *bvec, *bvprv;
+-	struct req_iterator iter;
+-	struct scatterlist *sg;
+-	int nsegs, cluster;
+-
+-	nsegs = 0;
+-	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+-
+-	/*
+-	 * for each bio in rq
+-	 */
+-	bvprv = NULL;
+-	sg = NULL;
+-	rq_for_each_segment(bvec, rq, iter) {
+-		int nbytes = bvec->bv_len;
+-
+-		if (bvprv && cluster) {
+-			if (sg->length + nbytes > q->max_segment_size)
+-				goto new_segment;
+-
+-			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+-				goto new_segment;
+-			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+-				goto new_segment;
+-
+-			sg->length += nbytes;
+-		} else {
+-new_segment:
+-			if (!sg)
+-				sg = sglist;
+-			else {
+-				/*
+-				 * If the driver previously mapped a shorter
+-				 * list, we could see a termination bit
+-				 * prematurely unless it fully inits the sg
+-				 * table on each mapping. We KNOW that there
+-				 * must be more entries here or the driver
+-				 * would be buggy, so force clear the
+-				 * termination bit to avoid doing a full
+-				 * sg_init_table() in drivers for each command.
+-				 */
+-				sg->page_link &= ~0x02;
+-				sg = sg_next(sg);
+-			}
+-
+-			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
+-			nsegs++;
+-		}
+-		bvprv = bvec;
+-	} /* segments in rq */
+-
+-	if (sg)
+-		sg_mark_end(sg);
+-
+-	return nsegs;
+-}
+-
+-EXPORT_SYMBOL(blk_rq_map_sg);
 -
 -/*
-- * Remap an arbitrary physical address space into the kernel virtual
-- * address space. Needed when the kernel wants to access high addresses
-- * directly.
-- *
-- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-- * have to convert them into an offset in a page-aligned mapping, but the
-- * caller shouldn't need to know that small detail.
+- * the standard queue merge functions, can be overridden with device
+- * specific ones if so desired
 - */
--void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+-
+-static inline int ll_new_mergeable(struct request_queue *q,
+-				   struct request *req,
+-				   struct bio *bio)
 -{
--	void __iomem * addr;
--	struct vm_struct * area;
--	unsigned long offset, last_addr;
--	pgprot_t prot;
+-	int nr_phys_segs = bio_phys_segments(q, bio);
 -
--	/* Don't allow wraparound or zero size */
--	last_addr = phys_addr + size - 1;
--	if (!size || last_addr < phys_addr)
--		return NULL;
+-	if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+-		req->cmd_flags |= REQ_NOMERGE;
+-		if (req == q->last_merge)
+-			q->last_merge = NULL;
+-		return 0;
+-	}
 -
 -	/*
--	 * Don't remap the low PCI/ISA area, it's always mapped..
+-	 * A hw segment is just getting larger, bump just the phys
+-	 * counter.
 -	 */
--	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
--		return (void __iomem *) phys_to_virt(phys_addr);
+-	req->nr_phys_segments += nr_phys_segs;
+-	return 1;
+-}
+-
+-static inline int ll_new_hw_segment(struct request_queue *q,
+-				    struct request *req,
+-				    struct bio *bio)
+-{
+-	int nr_hw_segs = bio_hw_segments(q, bio);
+-	int nr_phys_segs = bio_phys_segments(q, bio);
+-
+-	if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
+-	    || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+-		req->cmd_flags |= REQ_NOMERGE;
+-		if (req == q->last_merge)
+-			q->last_merge = NULL;
+-		return 0;
+-	}
 -
 -	/*
--	 * Don't allow anybody to remap normal RAM that we're using..
+-	 * This will form the start of a new hw segment.  Bump both
+-	 * counters.
 -	 */
--	if (phys_addr <= virt_to_phys(high_memory - 1)) {
--		char *t_addr, *t_end;
--		struct page *page;
+-	req->nr_hw_segments += nr_hw_segs;
+-	req->nr_phys_segments += nr_phys_segs;
+-	return 1;
+-}
 -
--		t_addr = __va(phys_addr);
--		t_end = t_addr + (size - 1);
--	   
--		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
--			if(!PageReserved(page))
--				return NULL;
+-static int ll_back_merge_fn(struct request_queue *q, struct request *req,
+-			    struct bio *bio)
+-{
+-	unsigned short max_sectors;
+-	int len;
+-
+-	if (unlikely(blk_pc_request(req)))
+-		max_sectors = q->max_hw_sectors;
+-	else
+-		max_sectors = q->max_sectors;
+-
+-	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+-		req->cmd_flags |= REQ_NOMERGE;
+-		if (req == q->last_merge)
+-			q->last_merge = NULL;
+-		return 0;
 -	}
+-	if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
+-		blk_recount_segments(q, req->biotail);
+-	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+-		blk_recount_segments(q, bio);
+-	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
+-	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
+-	    !BIOVEC_VIRT_OVERSIZE(len)) {
+-		int mergeable =  ll_new_mergeable(q, req, bio);
 -
--	prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
--			| _PAGE_ACCESSED | flags);
+-		if (mergeable) {
+-			if (req->nr_hw_segments == 1)
+-				req->bio->bi_hw_front_size = len;
+-			if (bio->bi_hw_segments == 1)
+-				bio->bi_hw_back_size = len;
+-		}
+-		return mergeable;
+-	}
+-
+-	return ll_new_hw_segment(q, req, bio);
+-}
+-
+-static int ll_front_merge_fn(struct request_queue *q, struct request *req, 
+-			     struct bio *bio)
+-{
+-	unsigned short max_sectors;
+-	int len;
+-
+-	if (unlikely(blk_pc_request(req)))
+-		max_sectors = q->max_hw_sectors;
+-	else
+-		max_sectors = q->max_sectors;
+-
+-
+-	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+-		req->cmd_flags |= REQ_NOMERGE;
+-		if (req == q->last_merge)
+-			q->last_merge = NULL;
+-		return 0;
+-	}
+-	len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
+-	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+-		blk_recount_segments(q, bio);
+-	if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
+-		blk_recount_segments(q, req->bio);
+-	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
+-	    !BIOVEC_VIRT_OVERSIZE(len)) {
+-		int mergeable =  ll_new_mergeable(q, req, bio);
+-
+-		if (mergeable) {
+-			if (bio->bi_hw_segments == 1)
+-				bio->bi_hw_front_size = len;
+-			if (req->nr_hw_segments == 1)
+-				req->biotail->bi_hw_back_size = len;
+-		}
+-		return mergeable;
+-	}
+-
+-	return ll_new_hw_segment(q, req, bio);
+-}
+-
+-static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
+-				struct request *next)
+-{
+-	int total_phys_segments;
+-	int total_hw_segments;
 -
 -	/*
--	 * Mappings have to be page-aligned
+-	 * First check if the either of the requests are re-queued
+-	 * requests.  Can't merge them if they are.
 -	 */
--	offset = phys_addr & ~PAGE_MASK;
--	phys_addr &= PAGE_MASK;
--	size = PAGE_ALIGN(last_addr+1) - phys_addr;
+-	if (req->special || next->special)
+-		return 0;
 -
 -	/*
--	 * Ok, go for it..
+-	 * Will it become too large?
 -	 */
--	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
--	if (!area)
--		return NULL;
--	area->phys_addr = phys_addr;
--	addr = (void __iomem *) area->addr;
--	if (ioremap_page_range((unsigned long) addr,
--			(unsigned long) addr + size, phys_addr, prot)) {
--		vunmap((void __force *) addr);
--		return NULL;
+-	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+-		return 0;
+-
+-	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+-	if (blk_phys_contig_segment(q, req->biotail, next->bio))
+-		total_phys_segments--;
+-
+-	if (total_phys_segments > q->max_phys_segments)
+-		return 0;
+-
+-	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+-	if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
+-		int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
+-		/*
+-		 * propagate the combined length to the end of the requests
+-		 */
+-		if (req->nr_hw_segments == 1)
+-			req->bio->bi_hw_front_size = len;
+-		if (next->nr_hw_segments == 1)
+-			next->biotail->bi_hw_back_size = len;
+-		total_hw_segments--;
 -	}
--	return (void __iomem *) (offset + (char __iomem *)addr);
+-
+-	if (total_hw_segments > q->max_hw_segments)
+-		return 0;
+-
+-	/* Merge is OK... */
+-	req->nr_phys_segments = total_phys_segments;
+-	req->nr_hw_segments = total_hw_segments;
+-	return 1;
 -}
--EXPORT_SYMBOL(__ioremap);
 -
--/**
-- * ioremap_nocache     -   map bus memory into CPU space
-- * @offset:    bus address of the memory
-- * @size:      size of the resource to map
-- *
-- * ioremap_nocache performs a platform specific sequence of operations to
-- * make bus memory CPU accessible via the readb/readw/readl/writeb/
-- * writew/writel functions and the other mmio helpers. The returned
-- * address is not guaranteed to be usable directly as a virtual
-- * address. 
-- *
-- * This version of ioremap ensures that the memory is marked uncachable
-- * on the CPU as well as honouring existing caching rules from things like
-- * the PCI bus. Note that there are other caches and buffers on many 
-- * busses. In particular driver authors should read up on PCI writes
+-/*
+- * "plug" the device if there are no outstanding requests: this will
+- * force the transfer to start only after we have put all the requests
+- * on the list.
 - *
-- * It's useful if some control registers are in such an area and
-- * write combining or read caching is not desirable:
-- * 
-- * Must be freed with iounmap.
+- * This is called with interrupts off and no requests on the queue and
+- * with the queue lock held.
 - */
+-void blk_plug_device(struct request_queue *q)
+-{
+-	WARN_ON(!irqs_disabled());
 -
--void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+-	/*
+-	 * don't plug a stopped queue, it must be paired with blk_start_queue()
+-	 * which will restart the queueing
+-	 */
+-	if (blk_queue_stopped(q))
+-		return;
+-
+-	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+-		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
+-		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+-	}
+-}
+-
+-EXPORT_SYMBOL(blk_plug_device);
+-
+-/*
+- * remove the queue from the plugged list, if present. called with
+- * queue lock held and interrupts disabled.
+- */
+-int blk_remove_plug(struct request_queue *q)
 -{
--	unsigned long last_addr;
--	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
--	if (!p) 
--		return p; 
+-	WARN_ON(!irqs_disabled());
 -
--	/* Guaranteed to be > phys_addr, as per __ioremap() */
--	last_addr = phys_addr + size - 1;
+-	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+-		return 0;
 -
--	if (last_addr < virt_to_phys(high_memory) - 1) {
--		struct page *ppage = virt_to_page(__va(phys_addr));		
--		unsigned long npages;
+-	del_timer(&q->unplug_timer);
+-	return 1;
+-}
 -
--		phys_addr &= PAGE_MASK;
+-EXPORT_SYMBOL(blk_remove_plug);
 -
--		/* This might overflow and become zero.. */
--		last_addr = PAGE_ALIGN(last_addr);
+-/*
+- * remove the plug and let it rip..
+- */
+-void __generic_unplug_device(struct request_queue *q)
+-{
+-	if (unlikely(blk_queue_stopped(q)))
+-		return;
 -
--		/* .. but that's ok, because modulo-2**n arithmetic will make
--	 	* the page-aligned "last - first" come out right.
--	 	*/
--		npages = (last_addr - phys_addr) >> PAGE_SHIFT;
+-	if (!blk_remove_plug(q))
+-		return;
 -
--		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
--			iounmap(p); 
--			p = NULL;
--		}
--		global_flush_tlb();
+-	q->request_fn(q);
+-}
+-EXPORT_SYMBOL(__generic_unplug_device);
+-
+-/**
+- * generic_unplug_device - fire a request queue
+- * @q:    The &struct request_queue in question
+- *
+- * Description:
+- *   Linux uses plugging to build bigger requests queues before letting
+- *   the device have at them. If a queue is plugged, the I/O scheduler
+- *   is still adding and merging requests on the queue. Once the queue
+- *   gets unplugged, the request_fn defined for the queue is invoked and
+- *   transfers started.
+- **/
+-void generic_unplug_device(struct request_queue *q)
+-{
+-	spin_lock_irq(q->queue_lock);
+-	__generic_unplug_device(q);
+-	spin_unlock_irq(q->queue_lock);
+-}
+-EXPORT_SYMBOL(generic_unplug_device);
+-
+-static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
+-				   struct page *page)
+-{
+-	struct request_queue *q = bdi->unplug_io_data;
+-
+-	blk_unplug(q);
+-}
+-
+-static void blk_unplug_work(struct work_struct *work)
+-{
+-	struct request_queue *q =
+-		container_of(work, struct request_queue, unplug_work);
+-
+-	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+-				q->rq.count[READ] + q->rq.count[WRITE]);
+-
+-	q->unplug_fn(q);
+-}
+-
+-static void blk_unplug_timeout(unsigned long data)
+-{
+-	struct request_queue *q = (struct request_queue *)data;
+-
+-	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
+-				q->rq.count[READ] + q->rq.count[WRITE]);
+-
+-	kblockd_schedule_work(&q->unplug_work);
+-}
+-
+-void blk_unplug(struct request_queue *q)
+-{
+-	/*
+-	 * devices don't necessarily have an ->unplug_fn defined
+-	 */
+-	if (q->unplug_fn) {
+-		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+-					q->rq.count[READ] + q->rq.count[WRITE]);
+-
+-		q->unplug_fn(q);
 -	}
+-}
+-EXPORT_SYMBOL(blk_unplug);
 -
--	return p;					
+-/**
+- * blk_start_queue - restart a previously stopped queue
+- * @q:    The &struct request_queue in question
+- *
+- * Description:
+- *   blk_start_queue() will clear the stop flag on the queue, and call
+- *   the request_fn for the queue if it was in a stopped state when
+- *   entered. Also see blk_stop_queue(). Queue lock must be held.
+- **/
+-void blk_start_queue(struct request_queue *q)
+-{
+-	WARN_ON(!irqs_disabled());
+-
+-	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+-
+-	/*
+-	 * one level of recursion is ok and is much faster than kicking
+-	 * the unplug handling
+-	 */
+-	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+-		q->request_fn(q);
+-		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+-	} else {
+-		blk_plug_device(q);
+-		kblockd_schedule_work(&q->unplug_work);
+-	}
 -}
--EXPORT_SYMBOL(ioremap_nocache);
+-
+-EXPORT_SYMBOL(blk_start_queue);
 -
 -/**
-- * iounmap - Free a IO remapping
-- * @addr: virtual address from ioremap_*
+- * blk_stop_queue - stop a queue
+- * @q:    The &struct request_queue in question
+- *
+- * Description:
+- *   The Linux block layer assumes that a block driver will consume all
+- *   entries on the request queue when the request_fn strategy is called.
+- *   Often this will not happen, because of hardware limitations (queue
+- *   depth settings). If a device driver gets a 'queue full' response,
+- *   or if it simply chooses not to queue more I/O at one point, it can
+- *   call this function to prevent the request_fn from being called until
+- *   the driver has signalled it's ready to go again. This happens by calling
+- *   blk_start_queue() to restart queue operations. Queue lock must be held.
+- **/
+-void blk_stop_queue(struct request_queue *q)
+-{
+-	blk_remove_plug(q);
+-	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+-}
+-EXPORT_SYMBOL(blk_stop_queue);
+-
+-/**
+- * blk_sync_queue - cancel any pending callbacks on a queue
+- * @q: the queue
+- *
+- * Description:
+- *     The block layer may perform asynchronous callback activity
+- *     on a queue, such as calling the unplug function after a timeout.
+- *     A block device may call blk_sync_queue to ensure that any
+- *     such activity is cancelled, thus allowing it to release resources
+- *     that the callbacks might use. The caller must already have made sure
+- *     that its ->make_request_fn will not re-add plugging prior to calling
+- *     this function.
 - *
-- * Caller must ensure there is only one unmapping for the same pointer.
 - */
--void iounmap(volatile void __iomem *addr)
+-void blk_sync_queue(struct request_queue *q)
 -{
--	struct vm_struct *p, *o;
+-	del_timer_sync(&q->unplug_timer);
+-	kblockd_flush_work(&q->unplug_work);
+-}
+-EXPORT_SYMBOL(blk_sync_queue);
 -
--	if ((void __force *)addr <= high_memory)
--		return;
+-/**
+- * blk_run_queue - run a single device queue
+- * @q:	The queue to run
+- */
+-void blk_run_queue(struct request_queue *q)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(q->queue_lock, flags);
+-	blk_remove_plug(q);
 -
 -	/*
--	 * __ioremap special-cases the PCI/ISA range by not instantiating a
--	 * vm_area and by simply returning an address into the kernel mapping
--	 * of ISA space.   So handle that here.
+-	 * Only recurse once to avoid overrunning the stack, let the unplug
+-	 * handling reinvoke the handler shortly if we already got there.
 -	 */
--	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
--			addr < phys_to_virt(ISA_END_ADDRESS))
--		return;
+-	if (!elv_queue_empty(q)) {
+-		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+-			q->request_fn(q);
+-			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+-		} else {
+-			blk_plug_device(q);
+-			kblockd_schedule_work(&q->unplug_work);
+-		}
+-	}
 -
--	addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
+-	spin_unlock_irqrestore(q->queue_lock, flags);
+-}
+-EXPORT_SYMBOL(blk_run_queue);
 -
--	/* Use the vm area unlocked, assuming the caller
--	   ensures there isn't another iounmap for the same address
--	   in parallel. Reuse of the virtual address is prevented by
--	   leaving it in the global lists until we're done with it.
--	   cpa takes care of the direct mappings. */
--	read_lock(&vmlist_lock);
--	for (p = vmlist; p; p = p->next) {
--		if (p->addr == addr)
--			break;
--	}
--	read_unlock(&vmlist_lock);
+-/**
+- * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
+- * @kobj:    the kobj belonging of the request queue to be released
+- *
+- * Description:
+- *     blk_cleanup_queue is the pair to blk_init_queue() or
+- *     blk_queue_make_request().  It should be called when a request queue is
+- *     being released; typically when a block device is being de-registered.
+- *     Currently, its primary task it to free all the &struct request
+- *     structures that were allocated to the queue and the queue itself.
+- *
+- * Caveat:
+- *     Hopefully the low level driver will have finished any
+- *     outstanding requests first...
+- **/
+-static void blk_release_queue(struct kobject *kobj)
+-{
+-	struct request_queue *q =
+-		container_of(kobj, struct request_queue, kobj);
+-	struct request_list *rl = &q->rq;
 -
--	if (!p) {
--		printk("iounmap: bad address %p\n", addr);
--		dump_stack();
--		return;
+-	blk_sync_queue(q);
+-
+-	if (rl->rq_pool)
+-		mempool_destroy(rl->rq_pool);
+-
+-	if (q->queue_tags)
+-		__blk_queue_free_tags(q);
+-
+-	blk_trace_shutdown(q);
+-
+-	bdi_destroy(&q->backing_dev_info);
+-	kmem_cache_free(requestq_cachep, q);
+-}
+-
+-void blk_put_queue(struct request_queue *q)
+-{
+-	kobject_put(&q->kobj);
+-}
+-EXPORT_SYMBOL(blk_put_queue);
+-
+-void blk_cleanup_queue(struct request_queue * q)
+-{
+-	mutex_lock(&q->sysfs_lock);
+-	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+-	mutex_unlock(&q->sysfs_lock);
+-
+-	if (q->elevator)
+-		elevator_exit(q->elevator);
+-
+-	blk_put_queue(q);
+-}
+-
+-EXPORT_SYMBOL(blk_cleanup_queue);
+-
+-static int blk_init_free_list(struct request_queue *q)
+-{
+-	struct request_list *rl = &q->rq;
+-
+-	rl->count[READ] = rl->count[WRITE] = 0;
+-	rl->starved[READ] = rl->starved[WRITE] = 0;
+-	rl->elvpriv = 0;
+-	init_waitqueue_head(&rl->wait[READ]);
+-	init_waitqueue_head(&rl->wait[WRITE]);
+-
+-	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+-				mempool_free_slab, request_cachep, q->node);
+-
+-	if (!rl->rq_pool)
+-		return -ENOMEM;
+-
+-	return 0;
+-}
+-
+-struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
+-{
+-	return blk_alloc_queue_node(gfp_mask, -1);
+-}
+-EXPORT_SYMBOL(blk_alloc_queue);
+-
+-static struct kobj_type queue_ktype;
+-
+-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+-{
+-	struct request_queue *q;
+-	int err;
+-
+-	q = kmem_cache_alloc_node(requestq_cachep,
+-				gfp_mask | __GFP_ZERO, node_id);
+-	if (!q)
+-		return NULL;
+-
+-	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+-	q->backing_dev_info.unplug_io_data = q;
+-	err = bdi_init(&q->backing_dev_info);
+-	if (err) {
+-		kmem_cache_free(requestq_cachep, q);
+-		return NULL;
 -	}
 -
--	/* Reset the direct mapping. Can block */
--	if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
--		change_page_attr(virt_to_page(__va(p->phys_addr)),
--				 get_vm_area_size(p) >> PAGE_SHIFT,
--				 PAGE_KERNEL);
--		global_flush_tlb();
--	} 
+-	init_timer(&q->unplug_timer);
 -
--	/* Finally remove it */
--	o = remove_vm_area((void *)addr);
--	BUG_ON(p != o || o == NULL);
--	kfree(p); 
+-	kobject_set_name(&q->kobj, "%s", "queue");
+-	q->kobj.ktype = &queue_ktype;
+-	kobject_init(&q->kobj);
+-
+-	mutex_init(&q->sysfs_lock);
+-
+-	return q;
 -}
--EXPORT_SYMBOL(iounmap);
+-EXPORT_SYMBOL(blk_alloc_queue_node);
 -
--void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+-/**
+- * blk_init_queue  - prepare a request queue for use with a block device
+- * @rfn:  The function to be called to process requests that have been
+- *        placed on the queue.
+- * @lock: Request queue spin lock
+- *
+- * Description:
+- *    If a block device wishes to use the standard request handling procedures,
+- *    which sorts requests and coalesces adjacent requests, then it must
+- *    call blk_init_queue().  The function @rfn will be called when there
+- *    are requests on the queue that need to be processed.  If the device
+- *    supports plugging, then @rfn may not be called immediately when requests
+- *    are available on the queue, but may be called at some time later instead.
+- *    Plugged queues are generally unplugged when a buffer belonging to one
+- *    of the requests on the queue is needed, or due to memory pressure.
+- *
+- *    @rfn is not required, or even expected, to remove all requests off the
+- *    queue, but only as many as it can handle at a time.  If it does leave
+- *    requests on the queue, it is responsible for arranging that the requests
+- *    get dealt with eventually.
+- *
+- *    The queue spin lock must be held while manipulating the requests on the
+- *    request queue; this lock will be taken also from interrupt context, so irq
+- *    disabling is needed for it.
+- *
+- *    Function returns a pointer to the initialized request queue, or NULL if
+- *    it didn't succeed.
+- *
+- * Note:
+- *    blk_init_queue() must be paired with a blk_cleanup_queue() call
+- *    when the block device is deactivated (such as at module unload).
+- **/
+-
+-struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
 -{
--	unsigned long offset, last_addr;
--	unsigned int nrpages;
--	enum fixed_addresses idx;
+-	return blk_init_queue_node(rfn, lock, -1);
+-}
+-EXPORT_SYMBOL(blk_init_queue);
 -
--	/* Don't allow wraparound or zero size */
--	last_addr = phys_addr + size - 1;
--	if (!size || last_addr < phys_addr)
+-struct request_queue *
+-blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+-{
+-	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+-
+-	if (!q)
 -		return NULL;
 -
--	/*
--	 * Don't remap the low PCI/ISA area, it's always mapped..
--	 */
--	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
--		return phys_to_virt(phys_addr);
+-	q->node = node_id;
+-	if (blk_init_free_list(q)) {
+-		kmem_cache_free(requestq_cachep, q);
+-		return NULL;
+-	}
 -
 -	/*
--	 * Mappings have to be page-aligned
+-	 * if caller didn't supply a lock, they get per-queue locking with
+-	 * our embedded lock
 -	 */
--	offset = phys_addr & ~PAGE_MASK;
--	phys_addr &= PAGE_MASK;
--	size = PAGE_ALIGN(last_addr) - phys_addr;
+-	if (!lock) {
+-		spin_lock_init(&q->__queue_lock);
+-		lock = &q->__queue_lock;
+-	}
+-
+-	q->request_fn		= rfn;
+-	q->prep_rq_fn		= NULL;
+-	q->unplug_fn		= generic_unplug_device;
+-	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
+-	q->queue_lock		= lock;
+-
+-	blk_queue_segment_boundary(q, 0xffffffff);
+-
+-	blk_queue_make_request(q, __make_request);
+-	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
+-
+-	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+-	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+-
+-	q->sg_reserved_size = INT_MAX;
 -
 -	/*
--	 * Mappings have to fit in the FIX_BTMAP area.
+-	 * all done
 -	 */
--	nrpages = size >> PAGE_SHIFT;
--	if (nrpages > NR_FIX_BTMAPS)
+-	if (!elevator_init(q, NULL)) {
+-		blk_queue_congestion_threshold(q);
+-		return q;
+-	}
+-
+-	blk_put_queue(q);
+-	return NULL;
+-}
+-EXPORT_SYMBOL(blk_init_queue_node);
+-
+-int blk_get_queue(struct request_queue *q)
+-{
+-	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+-		kobject_get(&q->kobj);
+-		return 0;
+-	}
+-
+-	return 1;
+-}
+-
+-EXPORT_SYMBOL(blk_get_queue);
+-
+-static inline void blk_free_request(struct request_queue *q, struct request *rq)
+-{
+-	if (rq->cmd_flags & REQ_ELVPRIV)
+-		elv_put_request(q, rq);
+-	mempool_free(rq, q->rq.rq_pool);
+-}
+-
+-static struct request *
+-blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
+-{
+-	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+-
+-	if (!rq)
 -		return NULL;
 -
 -	/*
--	 * Ok, go for it..
+-	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
+-	 * see bio.h and blkdev.h
 -	 */
--	idx = FIX_BTMAP_BEGIN;
--	while (nrpages > 0) {
--		set_fixmap(idx, phys_addr);
--		phys_addr += PAGE_SIZE;
--		--idx;
--		--nrpages;
+-	rq->cmd_flags = rw | REQ_ALLOCED;
+-
+-	if (priv) {
+-		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
+-			mempool_free(rq, q->rq.rq_pool);
+-			return NULL;
+-		}
+-		rq->cmd_flags |= REQ_ELVPRIV;
 -	}
--	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+-
+-	return rq;
 -}
 -
--void __init bt_iounmap(void *addr, unsigned long size)
+-/*
+- * ioc_batching returns true if the ioc is a valid batching request and
+- * should be given priority access to a request.
+- */
+-static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
 -{
--	unsigned long virt_addr;
--	unsigned long offset;
--	unsigned int nrpages;
--	enum fixed_addresses idx;
+-	if (!ioc)
+-		return 0;
 -
--	virt_addr = (unsigned long)addr;
--	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+-	/*
+-	 * Make sure the process is able to allocate at least 1 request
+-	 * even if the batch times out, otherwise we could theoretically
+-	 * lose wakeups.
+-	 */
+-	return ioc->nr_batch_requests == q->nr_batching ||
+-		(ioc->nr_batch_requests > 0
+-		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
+-}
+-
+-/*
+- * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
+- * will cause the process to be a "batcher" on all queues in the system. This
+- * is the behaviour we want though - once it gets a wakeup it should be given
+- * a nice run.
+- */
+-static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
+-{
+-	if (!ioc || ioc_batching(q, ioc))
 -		return;
--	offset = virt_addr & ~PAGE_MASK;
--	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
 -
--	idx = FIX_BTMAP_BEGIN;
--	while (nrpages > 0) {
--		clear_fixmap(idx);
--		--idx;
--		--nrpages;
+-	ioc->nr_batch_requests = q->nr_batching;
+-	ioc->last_waited = jiffies;
+-}
+-
+-static void __freed_request(struct request_queue *q, int rw)
+-{
+-	struct request_list *rl = &q->rq;
+-
+-	if (rl->count[rw] < queue_congestion_off_threshold(q))
+-		blk_clear_queue_congested(q, rw);
+-
+-	if (rl->count[rw] + 1 <= q->nr_requests) {
+-		if (waitqueue_active(&rl->wait[rw]))
+-			wake_up(&rl->wait[rw]);
+-
+-		blk_clear_queue_full(q, rw);
 -	}
 -}
-diff --git a/arch/x86/mm/ioremap_64.c b/arch/x86/mm/ioremap_64.c
-deleted file mode 100644
-index 6cac90a..0000000
---- a/arch/x86/mm/ioremap_64.c
-+++ /dev/null
-@@ -1,210 +0,0 @@
+-
 -/*
-- * arch/x86_64/mm/ioremap.c
-- *
-- * Re-map IO memory to kernel address space so that we can access it.
-- * This is needed for high PCI addresses that aren't mapped in the
-- * 640k-1MB IO memory area on PC's
-- *
-- * (C) Copyright 1995 1996 Linus Torvalds
+- * A request has just been released.  Account for it, update the full and
+- * congestion status, wake up any waiters.   Called under q->queue_lock.
 - */
+-static void freed_request(struct request_queue *q, int rw, int priv)
+-{
+-	struct request_list *rl = &q->rq;
 -
--#include <linux/vmalloc.h>
--#include <linux/init.h>
--#include <linux/slab.h>
--#include <linux/module.h>
--#include <linux/io.h>
+-	rl->count[rw]--;
+-	if (priv)
+-		rl->elvpriv--;
 -
--#include <asm/pgalloc.h>
--#include <asm/fixmap.h>
--#include <asm/tlbflush.h>
--#include <asm/cacheflush.h>
--#include <asm/proto.h>
+-	__freed_request(q, rw);
 -
--unsigned long __phys_addr(unsigned long x)
--{
--	if (x >= __START_KERNEL_map)
--		return x - __START_KERNEL_map + phys_base;
--	return x - PAGE_OFFSET;
+-	if (unlikely(rl->starved[rw ^ 1]))
+-		__freed_request(q, rw ^ 1);
 -}
--EXPORT_SYMBOL(__phys_addr);
--
--#define ISA_START_ADDRESS      0xa0000
--#define ISA_END_ADDRESS                0x100000
 -
+-#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
 -/*
-- * Fix up the linear direct mapping of the kernel to avoid cache attribute
-- * conflicts.
+- * Get a free request, queue_lock must be held.
+- * Returns NULL on failure, with queue_lock held.
+- * Returns !NULL on success, with queue_lock *not held*.
 - */
--static int
--ioremap_change_attr(unsigned long phys_addr, unsigned long size,
--					unsigned long flags)
+-static struct request *get_request(struct request_queue *q, int rw_flags,
+-				   struct bio *bio, gfp_t gfp_mask)
 -{
--	int err = 0;
--	if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
--		unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
--		unsigned long vaddr = (unsigned long) __va(phys_addr);
+-	struct request *rq = NULL;
+-	struct request_list *rl = &q->rq;
+-	struct io_context *ioc = NULL;
+-	const int rw = rw_flags & 0x01;
+-	int may_queue, priv;
+-
+-	may_queue = elv_may_queue(q, rw_flags);
+-	if (may_queue == ELV_MQUEUE_NO)
+-		goto rq_starved;
+-
+-	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
+-		if (rl->count[rw]+1 >= q->nr_requests) {
+-			ioc = current_io_context(GFP_ATOMIC, q->node);
+-			/*
+-			 * The queue will fill after this allocation, so set
+-			 * it as full, and mark this process as "batching".
+-			 * This process will be allowed to complete a batch of
+-			 * requests, others will be blocked.
+-			 */
+-			if (!blk_queue_full(q, rw)) {
+-				ioc_set_batching(q, ioc);
+-				blk_set_queue_full(q, rw);
+-			} else {
+-				if (may_queue != ELV_MQUEUE_MUST
+-						&& !ioc_batching(q, ioc)) {
+-					/*
+-					 * The queue is full and the allocating
+-					 * process is not a "batcher", and not
+-					 * exempted by the IO scheduler
+-					 */
+-					goto out;
+-				}
+-			}
+-		}
+-		blk_set_queue_congested(q, rw);
+-	}
+-
+-	/*
+-	 * Only allow batching queuers to allocate up to 50% over the defined
+-	 * limit of requests, otherwise we could have thousands of requests
+-	 * allocated with any setting of ->nr_requests
+-	 */
+-	if (rl->count[rw] >= (3 * q->nr_requests / 2))
+-		goto out;
+-
+-	rl->count[rw]++;
+-	rl->starved[rw] = 0;
+-
+-	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+-	if (priv)
+-		rl->elvpriv++;
+-
+-	spin_unlock_irq(q->queue_lock);
 -
+-	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
+-	if (unlikely(!rq)) {
 -		/*
-- 		 * Must use a address here and not struct page because the phys addr
--		 * can be a in hole between nodes and not have an memmap entry.
+-		 * Allocation failed presumably due to memory. Undo anything
+-		 * we might have messed up.
+-		 *
+-		 * Allocating task should really be put onto the front of the
+-		 * wait queue, but this is pretty rare.
 -		 */
--		err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
--		if (!err)
--			global_flush_tlb();
+-		spin_lock_irq(q->queue_lock);
+-		freed_request(q, rw, priv);
+-
+-		/*
+-		 * in the very unlikely event that allocation failed and no
+-		 * requests for this direction was pending, mark us starved
+-		 * so that freeing of a request in the other direction will
+-		 * notice us. another possible fix would be to split the
+-		 * rq mempool into READ and WRITE
+-		 */
+-rq_starved:
+-		if (unlikely(rl->count[rw] == 0))
+-			rl->starved[rw] = 1;
+-
+-		goto out;
 -	}
--	return err;
+-
+-	/*
+-	 * ioc may be NULL here, and ioc_batching will be false. That's
+-	 * OK, if the queue is under the request limit then requests need
+-	 * not count toward the nr_batch_requests limit. There will always
+-	 * be some limit enforced by BLK_BATCH_TIME.
+-	 */
+-	if (ioc_batching(q, ioc))
+-		ioc->nr_batch_requests--;
+-	
+-	rq_init(q, rq);
+-
+-	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
+-out:
+-	return rq;
 -}
 -
 -/*
-- * Generic mapping function
+- * No available requests for this queue, unplug the device and wait for some
+- * requests to become available.
+- *
+- * Called with q->queue_lock held, and returns with it unlocked.
 - */
+-static struct request *get_request_wait(struct request_queue *q, int rw_flags,
+-					struct bio *bio)
+-{
+-	const int rw = rw_flags & 0x01;
+-	struct request *rq;
 -
--/*
-- * Remap an arbitrary physical address space into the kernel virtual
-- * address space. Needed when the kernel wants to access high addresses
-- * directly.
+-	rq = get_request(q, rw_flags, bio, GFP_NOIO);
+-	while (!rq) {
+-		DEFINE_WAIT(wait);
+-		struct request_list *rl = &q->rq;
+-
+-		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+-				TASK_UNINTERRUPTIBLE);
+-
+-		rq = get_request(q, rw_flags, bio, GFP_NOIO);
+-
+-		if (!rq) {
+-			struct io_context *ioc;
+-
+-			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+-
+-			__generic_unplug_device(q);
+-			spin_unlock_irq(q->queue_lock);
+-			io_schedule();
+-
+-			/*
+-			 * After sleeping, we become a "batching" process and
+-			 * will be able to allocate at least one request, and
+-			 * up to a big batch of them for a small period time.
+-			 * See ioc_batching, ioc_set_batching
+-			 */
+-			ioc = current_io_context(GFP_NOIO, q->node);
+-			ioc_set_batching(q, ioc);
+-
+-			spin_lock_irq(q->queue_lock);
+-		}
+-		finish_wait(&rl->wait[rw], &wait);
+-	}
+-
+-	return rq;
+-}
+-
+-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+-{
+-	struct request *rq;
+-
+-	BUG_ON(rw != READ && rw != WRITE);
+-
+-	spin_lock_irq(q->queue_lock);
+-	if (gfp_mask & __GFP_WAIT) {
+-		rq = get_request_wait(q, rw, NULL);
+-	} else {
+-		rq = get_request(q, rw, NULL, gfp_mask);
+-		if (!rq)
+-			spin_unlock_irq(q->queue_lock);
+-	}
+-	/* q->queue_lock is unlocked at this point */
+-
+-	return rq;
+-}
+-EXPORT_SYMBOL(blk_get_request);
+-
+-/**
+- * blk_start_queueing - initiate dispatch of requests to device
+- * @q:		request queue to kick into gear
 - *
-- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-- * have to convert them into an offset in a page-aligned mapping, but the
-- * caller shouldn't need to know that small detail.
+- * This is basically a helper to remove the need to know whether a queue
+- * is plugged or not if someone just wants to initiate dispatch of requests
+- * for this queue.
+- *
+- * The queue lock must be held with interrupts disabled.
 - */
--void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+-void blk_start_queueing(struct request_queue *q)
 -{
--	void * addr;
--	struct vm_struct * area;
--	unsigned long offset, last_addr;
--	pgprot_t pgprot;
+-	if (!blk_queue_plugged(q))
+-		q->request_fn(q);
+-	else
+-		__generic_unplug_device(q);
+-}
+-EXPORT_SYMBOL(blk_start_queueing);
 -
--	/* Don't allow wraparound or zero size */
--	last_addr = phys_addr + size - 1;
--	if (!size || last_addr < phys_addr)
--		return NULL;
+-/**
+- * blk_requeue_request - put a request back on queue
+- * @q:		request queue where request should be inserted
+- * @rq:		request to be inserted
+- *
+- * Description:
+- *    Drivers often keep queueing requests until the hardware cannot accept
+- *    more, when that condition happens we need to put the request back
+- *    on the queue. Must be called with queue lock held.
+- */
+-void blk_requeue_request(struct request_queue *q, struct request *rq)
+-{
+-	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+-
+-	if (blk_rq_tagged(rq))
+-		blk_queue_end_tag(q, rq);
+-
+-	elv_requeue_request(q, rq);
+-}
+-
+-EXPORT_SYMBOL(blk_requeue_request);
+-
+-/**
+- * blk_insert_request - insert a special request in to a request queue
+- * @q:		request queue where request should be inserted
+- * @rq:		request to be inserted
+- * @at_head:	insert request at head or tail of queue
+- * @data:	private data
+- *
+- * Description:
+- *    Many block devices need to execute commands asynchronously, so they don't
+- *    block the whole kernel from preemption during request execution.  This is
+- *    accomplished normally by inserting aritficial requests tagged as
+- *    REQ_SPECIAL in to the corresponding request queue, and letting them be
+- *    scheduled for actual execution by the request queue.
+- *
+- *    We have the option of inserting the head or the tail of the queue.
+- *    Typically we use the tail for new ioctls and so forth.  We use the head
+- *    of the queue for things like a QUEUE_FULL message from a device, or a
+- *    host that is unable to accept a particular command.
+- */
+-void blk_insert_request(struct request_queue *q, struct request *rq,
+-			int at_head, void *data)
+-{
+-	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+-	unsigned long flags;
 -
 -	/*
--	 * Don't remap the low PCI/ISA area, it's always mapped..
+-	 * tell I/O scheduler that this isn't a regular read/write (ie it
+-	 * must not attempt merges on this) and that it acts as a soft
+-	 * barrier
 -	 */
--	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
--		return (__force void __iomem *)phys_to_virt(phys_addr);
+-	rq->cmd_type = REQ_TYPE_SPECIAL;
+-	rq->cmd_flags |= REQ_SOFTBARRIER;
+-
+-	rq->special = data;
+-
+-	spin_lock_irqsave(q->queue_lock, flags);
 -
--#ifdef CONFIG_FLATMEM
 -	/*
--	 * Don't allow anybody to remap normal RAM that we're using..
+-	 * If command is tagged, release the tag
 -	 */
--	if (last_addr < virt_to_phys(high_memory)) {
--		char *t_addr, *t_end;
-- 		struct page *page;
+-	if (blk_rq_tagged(rq))
+-		blk_queue_end_tag(q, rq);
 -
--		t_addr = __va(phys_addr);
--		t_end = t_addr + (size - 1);
--	   
--		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
--			if(!PageReserved(page))
--				return NULL;
+-	drive_stat_acct(rq, 1);
+-	__elv_add_request(q, rq, where, 0);
+-	blk_start_queueing(q);
+-	spin_unlock_irqrestore(q->queue_lock, flags);
+-}
+-
+-EXPORT_SYMBOL(blk_insert_request);
+-
+-static int __blk_rq_unmap_user(struct bio *bio)
+-{
+-	int ret = 0;
+-
+-	if (bio) {
+-		if (bio_flagged(bio, BIO_USER_MAPPED))
+-			bio_unmap_user(bio);
+-		else
+-			ret = bio_uncopy_user(bio);
 -	}
--#endif
 -
--	pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL
--			  | _PAGE_DIRTY | _PAGE_ACCESSED | flags);
+-	return ret;
+-}
+-
+-int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+-		      struct bio *bio)
+-{
+-	if (!rq->bio)
+-		blk_rq_bio_prep(q, rq, bio);
+-	else if (!ll_back_merge_fn(q, rq, bio))
+-		return -EINVAL;
+-	else {
+-		rq->biotail->bi_next = bio;
+-		rq->biotail = bio;
+-
+-		rq->data_len += bio->bi_size;
+-	}
+-	return 0;
+-}
+-EXPORT_SYMBOL(blk_rq_append_bio);
+-
+-static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
+-			     void __user *ubuf, unsigned int len)
+-{
+-	unsigned long uaddr;
+-	struct bio *bio, *orig_bio;
+-	int reading, ret;
+-
+-	reading = rq_data_dir(rq) == READ;
+-
 -	/*
--	 * Mappings have to be page-aligned
+-	 * if alignment requirement is satisfied, map in user pages for
+-	 * direct dma. else, set up kernel bounce buffers
 -	 */
--	offset = phys_addr & ~PAGE_MASK;
--	phys_addr &= PAGE_MASK;
--	size = PAGE_ALIGN(last_addr+1) - phys_addr;
+-	uaddr = (unsigned long) ubuf;
+-	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+-		bio = bio_map_user(q, NULL, uaddr, len, reading);
+-	else
+-		bio = bio_copy_user(q, uaddr, len, reading);
+-
+-	if (IS_ERR(bio))
+-		return PTR_ERR(bio);
+-
+-	orig_bio = bio;
+-	blk_queue_bounce(q, &bio);
 -
 -	/*
--	 * Ok, go for it..
+-	 * We link the bounce buffer in and could have to traverse it
+-	 * later so we have to get a ref to prevent it from being freed
 -	 */
--	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
--	if (!area)
--		return NULL;
--	area->phys_addr = phys_addr;
--	addr = area->addr;
--	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
--			       phys_addr, pgprot)) {
--		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
--		return NULL;
--	}
--	if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
--		area->flags &= 0xffffff;
--		vunmap(addr);
--		return NULL;
--	}
--	return (__force void __iomem *) (offset + (char *)addr);
+-	bio_get(bio);
+-
+-	ret = blk_rq_append_bio(q, rq, bio);
+-	if (!ret)
+-		return bio->bi_size;
+-
+-	/* if it was boucned we must call the end io function */
+-	bio_endio(bio, 0);
+-	__blk_rq_unmap_user(orig_bio);
+-	bio_put(bio);
+-	return ret;
 -}
--EXPORT_SYMBOL(__ioremap);
 -
 -/**
-- * ioremap_nocache     -   map bus memory into CPU space
-- * @offset:    bus address of the memory
-- * @size:      size of the resource to map
+- * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
+- * @q:		request queue where request should be inserted
+- * @rq:		request structure to fill
+- * @ubuf:	the user buffer
+- * @len:	length of user data
 - *
-- * ioremap_nocache performs a platform specific sequence of operations to
-- * make bus memory CPU accessible via the readb/readw/readl/writeb/
-- * writew/writel functions and the other mmio helpers. The returned
-- * address is not guaranteed to be usable directly as a virtual
-- * address. 
+- * Description:
+- *    Data will be mapped directly for zero copy io, if possible. Otherwise
+- *    a kernel bounce buffer is used.
 - *
-- * This version of ioremap ensures that the memory is marked uncachable
-- * on the CPU as well as honouring existing caching rules from things like
-- * the PCI bus. Note that there are other caches and buffers on many 
-- * busses. In particular driver authors should read up on PCI writes
+- *    A matching blk_rq_unmap_user() must be issued at the end of io, while
+- *    still in process context.
 - *
-- * It's useful if some control registers are in such an area and
-- * write combining or read caching is not desirable:
-- * 
-- * Must be freed with iounmap.
+- *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
+- *    before being submitted to the device, as pages mapped may be out of
+- *    reach. It's the callers responsibility to make sure this happens. The
+- *    original bio must be passed back in to blk_rq_unmap_user() for proper
+- *    unmapping.
 - */
+-int blk_rq_map_user(struct request_queue *q, struct request *rq,
+-		    void __user *ubuf, unsigned long len)
+-{
+-	unsigned long bytes_read = 0;
+-	struct bio *bio = NULL;
+-	int ret;
 -
--void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+-	if (len > (q->max_hw_sectors << 9))
+-		return -EINVAL;
+-	if (!len || !ubuf)
+-		return -EINVAL;
+-
+-	while (bytes_read != len) {
+-		unsigned long map_len, end, start;
+-
+-		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+-		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+-								>> PAGE_SHIFT;
+-		start = (unsigned long)ubuf >> PAGE_SHIFT;
+-
+-		/*
+-		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
+-		 * pages. If this happens we just lower the requested
+-		 * mapping len by a page so that we can fit
+-		 */
+-		if (end - start > BIO_MAX_PAGES)
+-			map_len -= PAGE_SIZE;
+-
+-		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+-		if (ret < 0)
+-			goto unmap_rq;
+-		if (!bio)
+-			bio = rq->bio;
+-		bytes_read += ret;
+-		ubuf += ret;
+-	}
+-
+-	rq->buffer = rq->data = NULL;
+-	return 0;
+-unmap_rq:
+-	blk_rq_unmap_user(bio);
+-	return ret;
+-}
+-
+-EXPORT_SYMBOL(blk_rq_map_user);
+-
+-/**
+- * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
+- * @q:		request queue where request should be inserted
+- * @rq:		request to map data to
+- * @iov:	pointer to the iovec
+- * @iov_count:	number of elements in the iovec
+- * @len:	I/O byte count
+- *
+- * Description:
+- *    Data will be mapped directly for zero copy io, if possible. Otherwise
+- *    a kernel bounce buffer is used.
+- *
+- *    A matching blk_rq_unmap_user() must be issued at the end of io, while
+- *    still in process context.
+- *
+- *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
+- *    before being submitted to the device, as pages mapped may be out of
+- *    reach. It's the callers responsibility to make sure this happens. The
+- *    original bio must be passed back in to blk_rq_unmap_user() for proper
+- *    unmapping.
+- */
+-int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
+-			struct sg_iovec *iov, int iov_count, unsigned int len)
 -{
--	return __ioremap(phys_addr, size, _PAGE_PCD);
+-	struct bio *bio;
+-
+-	if (!iov || iov_count <= 0)
+-		return -EINVAL;
+-
+-	/* we don't allow misaligned data like bio_map_user() does.  If the
+-	 * user is using sg, they're expected to know the alignment constraints
+-	 * and respect them accordingly */
+-	bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
+-	if (IS_ERR(bio))
+-		return PTR_ERR(bio);
+-
+-	if (bio->bi_size != len) {
+-		bio_endio(bio, 0);
+-		bio_unmap_user(bio);
+-		return -EINVAL;
+-	}
+-
+-	bio_get(bio);
+-	blk_rq_bio_prep(q, rq, bio);
+-	rq->buffer = rq->data = NULL;
+-	return 0;
 -}
--EXPORT_SYMBOL(ioremap_nocache);
+-
+-EXPORT_SYMBOL(blk_rq_map_user_iov);
 -
 -/**
-- * iounmap - Free a IO remapping
-- * @addr: virtual address from ioremap_*
+- * blk_rq_unmap_user - unmap a request with user data
+- * @bio:	       start of bio list
 - *
-- * Caller must ensure there is only one unmapping for the same pointer.
+- * Description:
+- *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
+- *    supply the original rq->bio from the blk_rq_map_user() return, since
+- *    the io completion may have changed rq->bio.
 - */
--void iounmap(volatile void __iomem *addr)
+-int blk_rq_unmap_user(struct bio *bio)
 -{
--	struct vm_struct *p, *o;
+-	struct bio *mapped_bio;
+-	int ret = 0, ret2;
 -
--	if (addr <= high_memory) 
--		return; 
--	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
--		addr < phys_to_virt(ISA_END_ADDRESS))
--		return;
+-	while (bio) {
+-		mapped_bio = bio;
+-		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
+-			mapped_bio = bio->bi_private;
 -
--	addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
--	/* Use the vm area unlocked, assuming the caller
--	   ensures there isn't another iounmap for the same address
--	   in parallel. Reuse of the virtual address is prevented by
--	   leaving it in the global lists until we're done with it.
--	   cpa takes care of the direct mappings. */
--	read_lock(&vmlist_lock);
--	for (p = vmlist; p; p = p->next) {
--		if (p->addr == addr)
--			break;
+-		ret2 = __blk_rq_unmap_user(mapped_bio);
+-		if (ret2 && !ret)
+-			ret = ret2;
+-
+-		mapped_bio = bio;
+-		bio = bio->bi_next;
+-		bio_put(mapped_bio);
 -	}
--	read_unlock(&vmlist_lock);
 -
--	if (!p) {
--		printk("iounmap: bad address %p\n", addr);
--		dump_stack();
--		return;
+-	return ret;
+-}
+-
+-EXPORT_SYMBOL(blk_rq_unmap_user);
+-
+-/**
+- * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
+- * @q:		request queue where request should be inserted
+- * @rq:		request to fill
+- * @kbuf:	the kernel buffer
+- * @len:	length of user data
+- * @gfp_mask:	memory allocation flags
+- */
+-int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+-		    unsigned int len, gfp_t gfp_mask)
+-{
+-	struct bio *bio;
+-
+-	if (len > (q->max_hw_sectors << 9))
+-		return -EINVAL;
+-	if (!len || !kbuf)
+-		return -EINVAL;
+-
+-	bio = bio_map_kern(q, kbuf, len, gfp_mask);
+-	if (IS_ERR(bio))
+-		return PTR_ERR(bio);
+-
+-	if (rq_data_dir(rq) == WRITE)
+-		bio->bi_rw |= (1 << BIO_RW);
+-
+-	blk_rq_bio_prep(q, rq, bio);
+-	blk_queue_bounce(q, &rq->bio);
+-	rq->buffer = rq->data = NULL;
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL(blk_rq_map_kern);
+-
+-/**
+- * blk_execute_rq_nowait - insert a request into queue for execution
+- * @q:		queue to insert the request in
+- * @bd_disk:	matching gendisk
+- * @rq:		request to insert
+- * @at_head:    insert request at head or tail of queue
+- * @done:	I/O completion handler
+- *
+- * Description:
+- *    Insert a fully prepared request at the back of the io scheduler queue
+- *    for execution.  Don't wait for completion.
+- */
+-void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+-			   struct request *rq, int at_head,
+-			   rq_end_io_fn *done)
+-{
+-	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+-
+-	rq->rq_disk = bd_disk;
+-	rq->cmd_flags |= REQ_NOMERGE;
+-	rq->end_io = done;
+-	WARN_ON(irqs_disabled());
+-	spin_lock_irq(q->queue_lock);
+-	__elv_add_request(q, rq, where, 1);
+-	__generic_unplug_device(q);
+-	spin_unlock_irq(q->queue_lock);
+-}
+-EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+-
+-/**
+- * blk_execute_rq - insert a request into queue for execution
+- * @q:		queue to insert the request in
+- * @bd_disk:	matching gendisk
+- * @rq:		request to insert
+- * @at_head:    insert request at head or tail of queue
+- *
+- * Description:
+- *    Insert a fully prepared request at the back of the io scheduler queue
+- *    for execution and wait for completion.
+- */
+-int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
+-		   struct request *rq, int at_head)
+-{
+-	DECLARE_COMPLETION_ONSTACK(wait);
+-	char sense[SCSI_SENSE_BUFFERSIZE];
+-	int err = 0;
+-
+-	/*
+-	 * we need an extra reference to the request, so we can look at
+-	 * it after io completion
+-	 */
+-	rq->ref_count++;
+-
+-	if (!rq->sense) {
+-		memset(sense, 0, sizeof(sense));
+-		rq->sense = sense;
+-		rq->sense_len = 0;
 -	}
 -
--	/* Reset the direct mapping. Can block */
--	if (p->flags >> 20)
--		ioremap_change_attr(p->phys_addr, p->size, 0);
+-	rq->end_io_data = &wait;
+-	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
+-	wait_for_completion(&wait);
 -
--	/* Finally remove it */
--	o = remove_vm_area((void *)addr);
--	BUG_ON(p != o || o == NULL);
--	kfree(p); 
+-	if (rq->errors)
+-		err = -EIO;
+-
+-	return err;
 -}
--EXPORT_SYMBOL(iounmap);
 -
-diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
-index a96006f..7a2ebce 100644
---- a/arch/x86/mm/k8topology_64.c
-+++ b/arch/x86/mm/k8topology_64.c
-@@ -1,9 +1,9 @@
--/* 
-+/*
-  * AMD K8 NUMA support.
-  * Discover the memory map and associated nodes.
-- * 
-+ *
-  * This version reads it directly from the K8 northbridge.
-- * 
-+ *
-  * Copyright 2002,2003 Andi Kleen, SuSE Labs.
-  */
- #include <linux/kernel.h>
-@@ -22,132 +22,135 @@
- 
- static __init int find_northbridge(void)
- {
--	int num; 
-+	int num;
- 
--	for (num = 0; num < 32; num++) { 
-+	for (num = 0; num < 32; num++) {
- 		u32 header;
--		
--		header = read_pci_config(0, num, 0, 0x00);  
--		if (header != (PCI_VENDOR_ID_AMD | (0x1100<<16)))
--			continue; 	
+-EXPORT_SYMBOL(blk_execute_rq);
 -
--		header = read_pci_config(0, num, 1, 0x00); 
--		if (header != (PCI_VENDOR_ID_AMD | (0x1101<<16)))
--			continue;	
--		return num; 
--	} 
+-static void bio_end_empty_barrier(struct bio *bio, int err)
+-{
+-	if (err)
+-		clear_bit(BIO_UPTODATE, &bio->bi_flags);
 -
--	return -1; 	
-+
-+		header = read_pci_config(0, num, 0, 0x00);
-+		if (header != (PCI_VENDOR_ID_AMD | (0x1100<<16)) &&
-+			header != (PCI_VENDOR_ID_AMD | (0x1200<<16)) &&
-+			header != (PCI_VENDOR_ID_AMD | (0x1300<<16)))
-+			continue;
-+
-+		header = read_pci_config(0, num, 1, 0x00);
-+		if (header != (PCI_VENDOR_ID_AMD | (0x1101<<16)) &&
-+			header != (PCI_VENDOR_ID_AMD | (0x1201<<16)) &&
-+			header != (PCI_VENDOR_ID_AMD | (0x1301<<16)))
-+			continue;
-+		return num;
-+	}
-+
-+	return -1;
- }
- 
- int __init k8_scan_nodes(unsigned long start, unsigned long end)
--{ 
-+{
- 	unsigned long prevbase;
- 	struct bootnode nodes[8];
--	int nodeid, i, j, nb;
-+	int nodeid, i, nb;
- 	unsigned char nodeids[8];
- 	int found = 0;
- 	u32 reg;
- 	unsigned numnodes;
--	unsigned num_cores;
-+	unsigned cores;
-+	unsigned bits;
-+	int j;
- 
- 	if (!early_pci_allowed())
- 		return -1;
- 
--	nb = find_northbridge(); 
--	if (nb < 0) 
-+	nb = find_northbridge();
-+	if (nb < 0)
- 		return nb;
- 
--	printk(KERN_INFO "Scanning NUMA topology in Northbridge %d\n", nb); 
+-	complete(bio->bi_private);
+-}
 -
--	num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
--	printk(KERN_INFO "CPU has %d num_cores\n", num_cores);
-+	printk(KERN_INFO "Scanning NUMA topology in Northbridge %d\n", nb);
- 
--	reg = read_pci_config(0, nb, 0, 0x60); 
-+	reg = read_pci_config(0, nb, 0, 0x60);
- 	numnodes = ((reg >> 4) & 0xF) + 1;
- 	if (numnodes <= 1)
- 		return -1;
- 
- 	printk(KERN_INFO "Number of nodes %d\n", numnodes);
- 
--	memset(&nodes,0,sizeof(nodes)); 
-+	memset(&nodes, 0, sizeof(nodes));
- 	prevbase = 0;
--	for (i = 0; i < 8; i++) { 
--		unsigned long base,limit; 
-+	for (i = 0; i < 8; i++) {
-+		unsigned long base, limit;
- 		u32 nodeid;
--		
-+
- 		base = read_pci_config(0, nb, 1, 0x40 + i*8);
- 		limit = read_pci_config(0, nb, 1, 0x44 + i*8);
- 
--		nodeid = limit & 7; 
-+		nodeid = limit & 7;
- 		nodeids[i] = nodeid;
--		if ((base & 3) == 0) { 
-+		if ((base & 3) == 0) {
- 			if (i < numnodes)
--				printk("Skipping disabled node %d\n", i); 
-+				printk("Skipping disabled node %d\n", i);
- 			continue;
--		} 
-+		}
- 		if (nodeid >= numnodes) {
- 			printk("Ignoring excess node %d (%lx:%lx)\n", nodeid,
--			       base, limit); 
-+			       base, limit);
- 			continue;
--		} 
-+		}
- 
--		if (!limit) { 
--			printk(KERN_INFO "Skipping node entry %d (base %lx)\n", i,
--			       base);
-+		if (!limit) {
-+			printk(KERN_INFO "Skipping node entry %d (base %lx)\n",
-+			       i, base);
- 			continue;
- 		}
- 		if ((base >> 8) & 3 || (limit >> 8) & 3) {
--			printk(KERN_ERR "Node %d using interleaving mode %lx/%lx\n", 
--			       nodeid, (base>>8)&3, (limit>>8) & 3); 
--			return -1; 
--		}	
-+			printk(KERN_ERR "Node %d using interleaving mode %lx/%lx\n",
-+			       nodeid, (base>>8)&3, (limit>>8) & 3);
-+			return -1;
-+		}
- 		if (node_isset(nodeid, node_possible_map)) {
--			printk(KERN_INFO "Node %d already present. Skipping\n", 
-+			printk(KERN_INFO "Node %d already present. Skipping\n",
- 			       nodeid);
- 			continue;
- 		}
- 
--		limit >>= 16; 
--		limit <<= 24; 
-+		limit >>= 16;
-+		limit <<= 24;
- 		limit |= (1<<24)-1;
- 		limit++;
- 
- 		if (limit > end_pfn << PAGE_SHIFT)
- 			limit = end_pfn << PAGE_SHIFT;
- 		if (limit <= base)
--			continue; 
--			
-+			continue;
-+
- 		base >>= 16;
--		base <<= 24; 
+-/**
+- * blkdev_issue_flush - queue a flush
+- * @bdev:	blockdev to issue flush for
+- * @error_sector:	error sector
+- *
+- * Description:
+- *    Issue a flush for the block device in question. Caller can supply
+- *    room for storing the error offset in case of a flush error, if they
+- *    wish to.  Caller must run wait_for_completion() on its own.
+- */
+-int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+-{
+-	DECLARE_COMPLETION_ONSTACK(wait);
+-	struct request_queue *q;
+-	struct bio *bio;
+-	int ret;
+-
+-	if (bdev->bd_disk == NULL)
+-		return -ENXIO;
+-
+-	q = bdev_get_queue(bdev);
+-	if (!q)
+-		return -ENXIO;
+-
+-	bio = bio_alloc(GFP_KERNEL, 0);
+-	if (!bio)
+-		return -ENOMEM;
+-
+-	bio->bi_end_io = bio_end_empty_barrier;
+-	bio->bi_private = &wait;
+-	bio->bi_bdev = bdev;
+-	submit_bio(1 << BIO_RW_BARRIER, bio);
+-
+-	wait_for_completion(&wait);
+-
+-	/*
+-	 * The driver must store the error location in ->bi_sector, if
+-	 * it supports it. For non-stacked drivers, this should be copied
+-	 * from rq->sector.
+-	 */
+-	if (error_sector)
+-		*error_sector = bio->bi_sector;
+-
+-	ret = 0;
+-	if (!bio_flagged(bio, BIO_UPTODATE))
+-		ret = -EIO;
+-
+-	bio_put(bio);
+-	return ret;
+-}
+-
+-EXPORT_SYMBOL(blkdev_issue_flush);
+-
+-static void drive_stat_acct(struct request *rq, int new_io)
+-{
+-	int rw = rq_data_dir(rq);
+-
+-	if (!blk_fs_request(rq) || !rq->rq_disk)
+-		return;
+-
+-	if (!new_io) {
+-		__disk_stat_inc(rq->rq_disk, merges[rw]);
+-	} else {
+-		disk_round_stats(rq->rq_disk);
+-		rq->rq_disk->in_flight++;
+-	}
+-}
 -
--		if (base < start) 
--			base = start; 
--		if (limit > end) 
--			limit = end; 
--		if (limit == base) { 
--			printk(KERN_ERR "Empty node %d\n", nodeid); 
--			continue; 
-+		base <<= 24;
-+
-+		if (base < start)
-+			base = start;
-+		if (limit > end)
-+			limit = end;
-+		if (limit == base) {
-+			printk(KERN_ERR "Empty node %d\n", nodeid);
-+			continue;
- 		}
--		if (limit < base) { 
-+		if (limit < base) {
- 			printk(KERN_ERR "Node %d bogus settings %lx-%lx.\n",
--			       nodeid, base, limit); 			       
-+			       nodeid, base, limit);
- 			continue;
--		} 
--		
-+		}
-+
- 		/* Could sort here, but pun for now. Should not happen anyroads. */
--		if (prevbase > base) { 
-+		if (prevbase > base) {
- 			printk(KERN_ERR "Node map not sorted %lx,%lx\n",
--			       prevbase,base);
-+			       prevbase, base);
- 			return -1;
- 		}
--			
--		printk(KERN_INFO "Node %d MemBase %016lx Limit %016lx\n", 
--		       nodeid, base, limit); 
--		
-+
-+		printk(KERN_INFO "Node %d MemBase %016lx Limit %016lx\n",
-+		       nodeid, base, limit);
-+
- 		found++;
--		
--		nodes[nodeid].start = base; 
-+
-+		nodes[nodeid].start = base;
- 		nodes[nodeid].end = limit;
- 		e820_register_active_regions(nodeid,
- 				nodes[nodeid].start >> PAGE_SHIFT,
-@@ -156,27 +159,31 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
- 		prevbase = base;
- 
- 		node_set(nodeid, node_possible_map);
--	} 
-+	}
- 
- 	if (!found)
--		return -1; 
-+		return -1;
- 
- 	memnode_shift = compute_hash_shift(nodes, 8);
--	if (memnode_shift < 0) { 
--		printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n"); 
--		return -1; 
--	} 
--	printk(KERN_INFO "Using node hash shift of %d\n", memnode_shift); 
-+	if (memnode_shift < 0) {
-+		printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n");
-+		return -1;
-+	}
-+	printk(KERN_INFO "Using node hash shift of %d\n", memnode_shift);
-+
-+	/* use the coreid bits from early_identify_cpu */
-+	bits = boot_cpu_data.x86_coreid_bits;
-+	cores = (1<<bits);
- 
- 	for (i = 0; i < 8; i++) {
--		if (nodes[i].start != nodes[i].end) { 
-+		if (nodes[i].start != nodes[i].end) {
- 			nodeid = nodeids[i];
--			for (j = 0; j < num_cores; j++)
--				apicid_to_node[(nodeid * num_cores) + j] = i;
--			setup_node_bootmem(i, nodes[i].start, nodes[i].end); 
--		} 
-+			for (j = 0; j < cores; j++)
-+				apicid_to_node[(nodeid << bits) + j] = i;
-+			setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-+		}
- 	}
- 
- 	numa_init_array();
- 	return 0;
--} 
-+}
-diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-new file mode 100644
-index 0000000..56fe712
---- /dev/null
-+++ b/arch/x86/mm/mmap.c
-@@ -0,0 +1,123 @@
-+/*
-+ * Flexible mmap layout support
-+ *
-+ * Based on code by Ingo Molnar and Andi Kleen, copyrighted
-+ * as follows:
-+ *
-+ * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
-+ * All Rights Reserved.
-+ * Copyright 2005 Andi Kleen, SUSE Labs.
-+ * Copyright 2007 Jiri Kosina, SUSE Labs.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+ */
-+
-+#include <linux/personality.h>
-+#include <linux/mm.h>
-+#include <linux/random.h>
-+#include <linux/limits.h>
-+#include <linux/sched.h>
-+
-+/*
-+ * Top of mmap area (just below the process stack).
-+ *
-+ * Leave an at least ~128 MB hole.
-+ */
-+#define MIN_GAP (128*1024*1024)
-+#define MAX_GAP (TASK_SIZE/6*5)
-+
-+/*
-+ * True on X86_32 or when emulating IA32 on X86_64
-+ */
-+static int mmap_is_ia32(void)
-+{
-+#ifdef CONFIG_X86_32
-+	return 1;
-+#endif
-+#ifdef CONFIG_IA32_EMULATION
-+	if (test_thread_flag(TIF_IA32))
-+		return 1;
-+#endif
-+	return 0;
-+}
-+
-+static int mmap_is_legacy(void)
-+{
-+	if (current->personality & ADDR_COMPAT_LAYOUT)
-+		return 1;
-+
-+	if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
-+		return 1;
-+
-+	return sysctl_legacy_va_layout;
-+}
-+
-+static unsigned long mmap_rnd(void)
-+{
-+	unsigned long rnd = 0;
-+
-+	/*
-+	*  8 bits of randomness in 32bit mmaps, 20 address space bits
-+	* 28 bits of randomness in 64bit mmaps, 40 address space bits
-+	*/
-+	if (current->flags & PF_RANDOMIZE) {
-+		if (mmap_is_ia32())
-+			rnd = (long)get_random_int() % (1<<8);
-+		else
-+			rnd = (long)(get_random_int() % (1<<28));
-+	}
-+	return rnd << PAGE_SHIFT;
-+}
-+
-+static unsigned long mmap_base(void)
-+{
-+	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
-+
-+	if (gap < MIN_GAP)
-+		gap = MIN_GAP;
-+	else if (gap > MAX_GAP)
-+		gap = MAX_GAP;
-+
-+	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
-+}
-+
-+/*
-+ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
-+ * does, but not when emulating X86_32
-+ */
-+static unsigned long mmap_legacy_base(void)
-+{
-+	if (mmap_is_ia32())
-+		return TASK_UNMAPPED_BASE;
-+	else
-+		return TASK_UNMAPPED_BASE + mmap_rnd();
-+}
-+
-+/*
-+ * This function, called very early during the creation of a new
-+ * process VM image, sets up which VM layout function to use:
-+ */
-+void arch_pick_mmap_layout(struct mm_struct *mm)
-+{
-+	if (mmap_is_legacy()) {
-+		mm->mmap_base = mmap_legacy_base();
-+		mm->get_unmapped_area = arch_get_unmapped_area;
-+		mm->unmap_area = arch_unmap_area;
-+	} else {
-+		mm->mmap_base = mmap_base();
-+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-+		mm->unmap_area = arch_unmap_area_topdown;
-+	}
-+}
-diff --git a/arch/x86/mm/mmap_32.c b/arch/x86/mm/mmap_32.c
-deleted file mode 100644
-index 552e084..0000000
---- a/arch/x86/mm/mmap_32.c
-+++ /dev/null
-@@ -1,77 +0,0 @@
 -/*
-- *  linux/arch/i386/mm/mmap.c
-- *
-- *  flexible mmap layout support
-- *
-- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
-- * All Rights Reserved.
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, write to the Free Software
-- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+- * add-request adds a request to the linked list.
+- * queue lock is held and interrupts disabled, as we muck with the
+- * request queue list.
+- */
+-static inline void add_request(struct request_queue * q, struct request * req)
+-{
+-	drive_stat_acct(req, 1);
+-
+-	/*
+-	 * elevator indicated where it wants this request to be
+-	 * inserted at elevator_merge time
+-	 */
+-	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
+-}
+- 
+-/*
+- * disk_round_stats()	- Round off the performance stats on a struct
+- * disk_stats.
 - *
+- * The average IO queue length and utilisation statistics are maintained
+- * by observing the current state of the queue length and the amount of
+- * time it has been in this state for.
 - *
-- * Started by Ingo Molnar <mingo at elte.hu>
+- * Normally, that accounting is done on IO completion, but that can result
+- * in more than a second's worth of IO being accounted for within any one
+- * second, leading to >100% utilisation.  To deal with that, we call this
+- * function to do a round-off before returning the results when reading
+- * /proc/diskstats.  This accounts immediately for all queue usage up to
+- * the current jiffies and restarts the counters again.
 - */
+-void disk_round_stats(struct gendisk *disk)
+-{
+-	unsigned long now = jiffies;
 -
--#include <linux/personality.h>
--#include <linux/mm.h>
--#include <linux/random.h>
--#include <linux/sched.h>
+-	if (now == disk->stamp)
+-		return;
+-
+-	if (disk->in_flight) {
+-		__disk_stat_add(disk, time_in_queue,
+-				disk->in_flight * (now - disk->stamp));
+-		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
+-	}
+-	disk->stamp = now;
+-}
+-
+-EXPORT_SYMBOL_GPL(disk_round_stats);
 -
 -/*
-- * Top of mmap area (just below the process stack).
-- *
-- * Leave an at least ~128 MB hole.
+- * queue lock must be held
 - */
--#define MIN_GAP (128*1024*1024)
--#define MAX_GAP (TASK_SIZE/6*5)
+-void __blk_put_request(struct request_queue *q, struct request *req)
+-{
+-	if (unlikely(!q))
+-		return;
+-	if (unlikely(--req->ref_count))
+-		return;
 -
--static inline unsigned long mmap_base(struct mm_struct *mm)
+-	elv_completed_request(q, req);
+-
+-	/*
+-	 * Request may not have originated from ll_rw_blk. if not,
+-	 * it didn't come out of our reserved rq pools
+-	 */
+-	if (req->cmd_flags & REQ_ALLOCED) {
+-		int rw = rq_data_dir(req);
+-		int priv = req->cmd_flags & REQ_ELVPRIV;
+-
+-		BUG_ON(!list_empty(&req->queuelist));
+-		BUG_ON(!hlist_unhashed(&req->hash));
+-
+-		blk_free_request(q, req);
+-		freed_request(q, rw, priv);
+-	}
+-}
+-
+-EXPORT_SYMBOL_GPL(__blk_put_request);
+-
+-void blk_put_request(struct request *req)
 -{
--	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
--	unsigned long random_factor = 0;
+-	unsigned long flags;
+-	struct request_queue *q = req->q;
 -
--	if (current->flags & PF_RANDOMIZE)
--		random_factor = get_random_int() % (1024*1024);
+-	/*
+-	 * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
+-	 * following if (q) test.
+-	 */
+-	if (q) {
+-		spin_lock_irqsave(q->queue_lock, flags);
+-		__blk_put_request(q, req);
+-		spin_unlock_irqrestore(q->queue_lock, flags);
+-	}
+-}
 -
--	if (gap < MIN_GAP)
--		gap = MIN_GAP;
--	else if (gap > MAX_GAP)
--		gap = MAX_GAP;
+-EXPORT_SYMBOL(blk_put_request);
 -
--	return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
+-/**
+- * blk_end_sync_rq - executes a completion event on a request
+- * @rq: request to complete
+- * @error: end io status of the request
+- */
+-void blk_end_sync_rq(struct request *rq, int error)
+-{
+-	struct completion *waiting = rq->end_io_data;
+-
+-	rq->end_io_data = NULL;
+-	__blk_put_request(rq->q, rq);
+-
+-	/*
+-	 * complete last, if this is a stack request the process (and thus
+-	 * the rq pointer) could be invalid right after this complete()
+-	 */
+-	complete(waiting);
 -}
+-EXPORT_SYMBOL(blk_end_sync_rq);
 -
 -/*
-- * This function, called very early during the creation of a new
-- * process VM image, sets up which VM layout function to use:
+- * Has to be called with the request spinlock acquired
 - */
--void arch_pick_mmap_layout(struct mm_struct *mm)
+-static int attempt_merge(struct request_queue *q, struct request *req,
+-			  struct request *next)
 -{
+-	if (!rq_mergeable(req) || !rq_mergeable(next))
+-		return 0;
+-
 -	/*
--	 * Fall back to the standard layout if the personality
--	 * bit is set, or if the expected stack growth is unlimited:
+-	 * not contiguous
 -	 */
--	if (sysctl_legacy_va_layout ||
--			(current->personality & ADDR_COMPAT_LAYOUT) ||
--			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
--		mm->mmap_base = TASK_UNMAPPED_BASE;
--		mm->get_unmapped_area = arch_get_unmapped_area;
--		mm->unmap_area = arch_unmap_area;
--	} else {
--		mm->mmap_base = mmap_base(mm);
--		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
--		mm->unmap_area = arch_unmap_area_topdown;
+-	if (req->sector + req->nr_sectors != next->sector)
+-		return 0;
+-
+-	if (rq_data_dir(req) != rq_data_dir(next)
+-	    || req->rq_disk != next->rq_disk
+-	    || next->special)
+-		return 0;
+-
+-	/*
+-	 * If we are allowed to merge, then append bio list
+-	 * from next to rq and release next. merge_requests_fn
+-	 * will have updated segment counts, update sector
+-	 * counts here.
+-	 */
+-	if (!ll_merge_requests_fn(q, req, next))
+-		return 0;
+-
+-	/*
+-	 * At this point we have either done a back merge
+-	 * or front merge. We need the smaller start_time of
+-	 * the merged requests to be the current request
+-	 * for accounting purposes.
+-	 */
+-	if (time_after(req->start_time, next->start_time))
+-		req->start_time = next->start_time;
+-
+-	req->biotail->bi_next = next->bio;
+-	req->biotail = next->biotail;
+-
+-	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+-
+-	elv_merge_requests(q, req, next);
+-
+-	if (req->rq_disk) {
+-		disk_round_stats(req->rq_disk);
+-		req->rq_disk->in_flight--;
 -	}
+-
+-	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+-
+-	__blk_put_request(q, next);
+-	return 1;
 -}
-diff --git a/arch/x86/mm/mmap_64.c b/arch/x86/mm/mmap_64.c
-deleted file mode 100644
-index 80bba0d..0000000
---- a/arch/x86/mm/mmap_64.c
-+++ /dev/null
-@@ -1,29 +0,0 @@
--/* Copyright 2005 Andi Kleen, SuSE Labs.
-- * Licensed under GPL, v.2
-- */
--#include <linux/mm.h>
--#include <linux/sched.h>
--#include <linux/random.h>
--#include <asm/ia32.h>
 -
--/* Notebook: move the mmap code from sys_x86_64.c over here. */
+-static inline int attempt_back_merge(struct request_queue *q,
+-				     struct request *rq)
+-{
+-	struct request *next = elv_latter_request(q, rq);
 -
--void arch_pick_mmap_layout(struct mm_struct *mm)
+-	if (next)
+-		return attempt_merge(q, rq, next);
+-
+-	return 0;
+-}
+-
+-static inline int attempt_front_merge(struct request_queue *q,
+-				      struct request *rq)
 -{
--#ifdef CONFIG_IA32_EMULATION
--	if (current_thread_info()->flags & _TIF_IA32)
--		return ia32_pick_mmap_layout(mm);
--#endif
--	mm->mmap_base = TASK_UNMAPPED_BASE;
--	if (current->flags & PF_RANDOMIZE) {
--		/* Add 28bit randomness which is about 40bits of address space
--		   because mmap base has to be page aligned.
-- 		   or ~1/128 of the total user VM
--	   	   (total user address space is 47bits) */
--		unsigned rnd = get_random_int() & 0xfffffff;
--		mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT;
--	}
--	mm->get_unmapped_area = arch_get_unmapped_area;
--	mm->unmap_area = arch_unmap_area;
+-	struct request *prev = elv_former_request(q, rq);
+-
+-	if (prev)
+-		return attempt_merge(q, prev, rq);
+-
+-	return 0;
 -}
 -
-diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
-index 3d6926b..dc3b1f7 100644
---- a/arch/x86/mm/numa_64.c
-+++ b/arch/x86/mm/numa_64.c
-@@ -1,7 +1,7 @@
--/* 
-+/*
-  * Generic VM initialization for x86-64 NUMA setups.
-  * Copyright 2002,2003 Andi Kleen, SuSE Labs.
-- */ 
-+ */
- #include <linux/kernel.h>
- #include <linux/mm.h>
- #include <linux/string.h>
-@@ -11,35 +11,45 @@
- #include <linux/ctype.h>
- #include <linux/module.h>
- #include <linux/nodemask.h>
-+#include <linux/sched.h>
- 
- #include <asm/e820.h>
- #include <asm/proto.h>
- #include <asm/dma.h>
- #include <asm/numa.h>
- #include <asm/acpi.h>
-+#include <asm/k8.h>
- 
- #ifndef Dprintk
- #define Dprintk(x...)
- #endif
- 
- struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-+EXPORT_SYMBOL(node_data);
-+
- bootmem_data_t plat_node_bdata[MAX_NUMNODES];
- 
- struct memnode memnode;
- 
--unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
-+int x86_cpu_to_node_map_init[NR_CPUS] = {
- 	[0 ... NR_CPUS-1] = NUMA_NO_NODE
- };
--unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
-- 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-+void *x86_cpu_to_node_map_early_ptr;
-+DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
-+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map);
-+EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
-+
-+s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
-+	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
- };
--cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
-+
-+cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
-+EXPORT_SYMBOL(node_to_cpumask_map);
- 
- int numa_off __initdata;
- unsigned long __initdata nodemap_addr;
- unsigned long __initdata nodemap_size;
- 
+-static void init_request_from_bio(struct request *req, struct bio *bio)
+-{
+-	req->cmd_type = REQ_TYPE_FS;
 -
- /*
-  * Given a shift value, try to populate memnodemap[]
-  * Returns :
-@@ -47,14 +57,13 @@ unsigned long __initdata nodemap_size;
-  * 0 if memnodmap[] too small (of shift too small)
-  * -1 if node overlap or lost ram (shift too big)
-  */
--static int __init
--populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
-+static int __init populate_memnodemap(const struct bootnode *nodes,
-+				      int numnodes, int shift)
- {
--	int i; 
--	int res = -1;
- 	unsigned long addr, end;
-+	int i, res = -1;
- 
--	memset(memnodemap, 0xff, memnodemapsize);
-+	memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
- 	for (i = 0; i < numnodes; i++) {
- 		addr = nodes[i].start;
- 		end = nodes[i].end;
-@@ -63,13 +72,13 @@ populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
- 		if ((end >> shift) >= memnodemapsize)
- 			return 0;
- 		do {
--			if (memnodemap[addr >> shift] != 0xff)
-+			if (memnodemap[addr >> shift] != NUMA_NO_NODE)
- 				return -1;
- 			memnodemap[addr >> shift] = i;
- 			addr += (1UL << shift);
- 		} while (addr < end);
- 		res = 1;
--	} 
-+	}
- 	return res;
- }
- 
-@@ -78,12 +87,12 @@ static int __init allocate_cachealigned_memnodemap(void)
- 	unsigned long pad, pad_addr;
- 
- 	memnodemap = memnode.embedded_map;
--	if (memnodemapsize <= 48)
-+	if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
- 		return 0;
- 
- 	pad = L1_CACHE_BYTES - 1;
- 	pad_addr = 0x8000;
--	nodemap_size = pad + memnodemapsize;
-+	nodemap_size = pad + sizeof(s16) * memnodemapsize;
- 	nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
- 				      nodemap_size);
- 	if (nodemap_addr == -1UL) {
-@@ -94,6 +103,7 @@ static int __init allocate_cachealigned_memnodemap(void)
- 	}
- 	pad_addr = (nodemap_addr + pad) & ~pad;
- 	memnodemap = phys_to_virt(pad_addr);
-+	reserve_early(nodemap_addr, nodemap_addr + nodemap_size);
- 
- 	printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
- 	       nodemap_addr, nodemap_addr + nodemap_size);
-@@ -104,8 +114,8 @@ static int __init allocate_cachealigned_memnodemap(void)
-  * The LSB of all start and end addresses in the node map is the value of the
-  * maximum possible shift.
-  */
--static int __init
--extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
-+static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
-+					 int numnodes)
- {
- 	int i, nodes_used = 0;
- 	unsigned long start, end;
-@@ -140,51 +150,50 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
- 		shift);
- 
- 	if (populate_memnodemap(nodes, numnodes, shift) != 1) {
--		printk(KERN_INFO
--	"Your memory is not aligned you need to rebuild your kernel "
--	"with a bigger NODEMAPSIZE shift=%d\n",
--			shift);
-+		printk(KERN_INFO "Your memory is not aligned you need to "
-+		       "rebuild your kernel with a bigger NODEMAPSIZE "
-+		       "shift=%d\n", shift);
- 		return -1;
- 	}
- 	return shift;
- }
- 
--#ifdef CONFIG_SPARSEMEM
- int early_pfn_to_nid(unsigned long pfn)
- {
- 	return phys_to_nid(pfn << PAGE_SHIFT);
- }
--#endif
- 
--static void * __init
--early_node_mem(int nodeid, unsigned long start, unsigned long end,
--	      unsigned long size)
-+static void * __init early_node_mem(int nodeid, unsigned long start,
-+				    unsigned long end, unsigned long size)
- {
- 	unsigned long mem = find_e820_area(start, end, size);
- 	void *ptr;
-+
- 	if (mem != -1L)
- 		return __va(mem);
- 	ptr = __alloc_bootmem_nopanic(size,
- 				SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
- 	if (ptr == NULL) {
- 		printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
--			size, nodeid);
-+		       size, nodeid);
- 		return NULL;
- 	}
- 	return ptr;
- }
- 
- /* Initialize bootmem allocator for a node */
--void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
--{ 
--	unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start; 
--	unsigned long nodedata_phys;
-+void __init setup_node_bootmem(int nodeid, unsigned long start,
-+			       unsigned long end)
-+{
-+	unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size;
-+	unsigned long bootmap_start, nodedata_phys;
- 	void *bootmap;
- 	const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
- 
--	start = round_up(start, ZONE_ALIGN); 
-+	start = round_up(start, ZONE_ALIGN);
- 
--	printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
-+	printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
-+	       start, end);
- 
- 	start_pfn = start >> PAGE_SHIFT;
- 	end_pfn = end >> PAGE_SHIFT;
-@@ -200,75 +209,55 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
- 	NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
- 
- 	/* Find a place for the bootmem map */
--	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 
-+	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
- 	bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
- 	bootmap = early_node_mem(nodeid, bootmap_start, end,
- 					bootmap_pages<<PAGE_SHIFT);
- 	if (bootmap == NULL)  {
- 		if (nodedata_phys < start || nodedata_phys >= end)
--			free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
-+			free_bootmem((unsigned long)node_data[nodeid],
-+				     pgdat_size);
- 		node_data[nodeid] = NULL;
- 		return;
- 	}
- 	bootmap_start = __pa(bootmap);
--	Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages); 
--	
-+	Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
-+
- 	bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
--					 bootmap_start >> PAGE_SHIFT, 
--					 start_pfn, end_pfn); 
-+					 bootmap_start >> PAGE_SHIFT,
-+					 start_pfn, end_pfn);
- 
- 	free_bootmem_with_active_regions(nodeid, end);
- 
--	reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); 
--	reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
-+	reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
-+	reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
-+			     bootmap_pages<<PAGE_SHIFT);
- #ifdef CONFIG_ACPI_NUMA
- 	srat_reserve_add_area(nodeid);
- #endif
- 	node_set_online(nodeid);
--} 
+-	/*
+-	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+-	 */
+-	if (bio_rw_ahead(bio) || bio_failfast(bio))
+-		req->cmd_flags |= REQ_FAILFAST;
 -
--/* Initialize final allocator for a zone */
--void __init setup_node_zones(int nodeid)
--{ 
--	unsigned long start_pfn, end_pfn, memmapsize, limit;
+-	/*
+-	 * REQ_BARRIER implies no merging, but lets make it explicit
+-	 */
+-	if (unlikely(bio_barrier(bio)))
+-		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 -
-- 	start_pfn = node_start_pfn(nodeid);
-- 	end_pfn = node_end_pfn(nodeid);
+-	if (bio_sync(bio))
+-		req->cmd_flags |= REQ_RW_SYNC;
+-	if (bio_rw_meta(bio))
+-		req->cmd_flags |= REQ_RW_META;
 -
--	Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
--		nodeid, start_pfn, end_pfn);
+-	req->errors = 0;
+-	req->hard_sector = req->sector = bio->bi_sector;
+-	req->ioprio = bio_prio(bio);
+-	req->start_time = jiffies;
+-	blk_rq_bio_prep(req->q, req, bio);
+-}
 -
--	/* Try to allocate mem_map at end to not fill up precious <4GB
--	   memory. */
--	memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
--	limit = end_pfn << PAGE_SHIFT;
--#ifdef CONFIG_FLAT_NODE_MEM_MAP
--	NODE_DATA(nodeid)->node_mem_map = 
--		__alloc_bootmem_core(NODE_DATA(nodeid)->bdata, 
--				memmapsize, SMP_CACHE_BYTES, 
--				round_down(limit - memmapsize, PAGE_SIZE), 
--				limit);
--#endif
--} 
-+}
- 
-+/*
-+ * There are unfortunately some poorly designed mainboards around that
-+ * only connect memory to a single CPU. This breaks the 1:1 cpu->node
-+ * mapping. To avoid this fill in the mapping for all possible CPUs,
-+ * as the number of CPUs is not known yet. We round robin the existing
-+ * nodes.
-+ */
- void __init numa_init_array(void)
- {
- 	int rr, i;
--	/* There are unfortunately some poorly designed mainboards around
--	   that only connect memory to a single CPU. This breaks the 1:1 cpu->node
--	   mapping. To avoid this fill in the mapping for all possible
--	   CPUs, as the number of CPUs is not known yet. 
--	   We round robin the existing nodes. */
-+
- 	rr = first_node(node_online_map);
- 	for (i = 0; i < NR_CPUS; i++) {
--		if (cpu_to_node(i) != NUMA_NO_NODE)
-+		if (early_cpu_to_node(i) != NUMA_NO_NODE)
- 			continue;
-- 		numa_set_node(i, rr);
-+		numa_set_node(i, rr);
- 		rr = next_node(rr, node_online_map);
- 		if (rr == MAX_NUMNODES)
- 			rr = first_node(node_online_map);
- 	}
+-static int __make_request(struct request_queue *q, struct bio *bio)
+-{
+-	struct request *req;
+-	int el_ret, nr_sectors, barrier, err;
+-	const unsigned short prio = bio_prio(bio);
+-	const int sync = bio_sync(bio);
+-	int rw_flags;
 -
- }
- 
- #ifdef CONFIG_NUMA_EMU
-@@ -276,15 +265,17 @@ void __init numa_init_array(void)
- char *cmdline __initdata;
- 
- /*
-- * Setups up nid to range from addr to addr + size.  If the end boundary is
-- * greater than max_addr, then max_addr is used instead.  The return value is 0
-- * if there is additional memory left for allocation past addr and -1 otherwise.
-- * addr is adjusted to be at the end of the node.
-+ * Setups up nid to range from addr to addr + size.  If the end
-+ * boundary is greater than max_addr, then max_addr is used instead.
-+ * The return value is 0 if there is additional memory left for
-+ * allocation past addr and -1 otherwise.  addr is adjusted to be at
-+ * the end of the node.
-  */
- static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
- 				   u64 size, u64 max_addr)
- {
- 	int ret = 0;
-+
- 	nodes[nid].start = *addr;
- 	*addr += size;
- 	if (*addr >= max_addr) {
-@@ -335,6 +326,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
- 
- 	for (i = node_start; i < num_nodes + node_start; i++) {
- 		u64 end = *addr + size;
-+
- 		if (i < big)
- 			end += FAKE_NODE_MIN_SIZE;
- 		/*
-@@ -380,14 +372,9 @@ static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
- static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
- {
- 	struct bootnode nodes[MAX_NUMNODES];
--	u64 addr = start_pfn << PAGE_SHIFT;
-+	u64 size, addr = start_pfn << PAGE_SHIFT;
- 	u64 max_addr = end_pfn << PAGE_SHIFT;
--	int num_nodes = 0;
--	int coeff_flag;
--	int coeff = -1;
--	int num = 0;
--	u64 size;
--	int i;
-+	int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
- 
- 	memset(&nodes, 0, sizeof(nodes));
- 	/*
-@@ -395,8 +382,9 @@ static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
- 	 * system RAM into N fake nodes.
- 	 */
- 	if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
--		num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0,
--						simple_strtol(cmdline, NULL, 0));
-+		long n = simple_strtol(cmdline, NULL, 0);
-+
-+		num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n);
- 		if (num_nodes < 0)
- 			return num_nodes;
- 		goto out;
-@@ -483,46 +471,47 @@ out:
- 	for_each_node_mask(i, node_possible_map) {
- 		e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
- 						nodes[i].end >> PAGE_SHIFT);
-- 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-+		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- 	}
- 	acpi_fake_nodes(nodes, num_nodes);
-- 	numa_init_array();
-- 	return 0;
-+	numa_init_array();
-+	return 0;
- }
- #endif /* CONFIG_NUMA_EMU */
- 
- void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
--{ 
-+{
- 	int i;
- 
- 	nodes_clear(node_possible_map);
- 
- #ifdef CONFIG_NUMA_EMU
- 	if (cmdline && !numa_emulation(start_pfn, end_pfn))
-- 		return;
-+		return;
- 	nodes_clear(node_possible_map);
- #endif
- 
- #ifdef CONFIG_ACPI_NUMA
- 	if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
- 					  end_pfn << PAGE_SHIFT))
-- 		return;
-+		return;
- 	nodes_clear(node_possible_map);
- #endif
- 
- #ifdef CONFIG_K8_NUMA
--	if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
-+	if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT,
-+					end_pfn<<PAGE_SHIFT))
- 		return;
- 	nodes_clear(node_possible_map);
- #endif
- 	printk(KERN_INFO "%s\n",
- 	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
- 
--	printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 
-+	printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
- 	       start_pfn << PAGE_SHIFT,
--	       end_pfn << PAGE_SHIFT); 
--		/* setup dummy node covering all memory */ 
--	memnode_shift = 63; 
-+	       end_pfn << PAGE_SHIFT);
-+	/* setup dummy node covering all memory */
-+	memnode_shift = 63;
- 	memnodemap = memnode.embedded_map;
- 	memnodemap[0] = 0;
- 	nodes_clear(node_online_map);
-@@ -530,36 +519,48 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
- 	node_set(0, node_possible_map);
- 	for (i = 0; i < NR_CPUS; i++)
- 		numa_set_node(i, 0);
--	node_to_cpumask[0] = cpumask_of_cpu(0);
-+	/* cpumask_of_cpu() may not be available during early startup */
-+	memset(&node_to_cpumask_map[0], 0, sizeof(node_to_cpumask_map[0]));
-+	cpu_set(0, node_to_cpumask_map[0]);
- 	e820_register_active_regions(0, start_pfn, end_pfn);
- 	setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
- }
- 
- __cpuinit void numa_add_cpu(int cpu)
- {
--	set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
--} 
-+	set_bit(cpu,
-+		(unsigned long *)&node_to_cpumask_map[early_cpu_to_node(cpu)]);
-+}
- 
- void __cpuinit numa_set_node(int cpu, int node)
- {
-+	int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
-+
- 	cpu_pda(cpu)->nodenumber = node;
--	cpu_to_node(cpu) = node;
-+
-+	if(cpu_to_node_map)
-+		cpu_to_node_map[cpu] = node;
-+	else if(per_cpu_offset(cpu))
-+		per_cpu(x86_cpu_to_node_map, cpu) = node;
-+	else
-+		Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
- }
- 
--unsigned long __init numa_free_all_bootmem(void) 
--{ 
--	int i;
-+unsigned long __init numa_free_all_bootmem(void)
-+{
- 	unsigned long pages = 0;
--	for_each_online_node(i) {
-+	int i;
-+
-+	for_each_online_node(i)
- 		pages += free_all_bootmem_node(NODE_DATA(i));
+-	nr_sectors = bio_sectors(bio);
+-
+-	/*
+-	 * low level driver can indicate that it wants pages above a
+-	 * certain limit bounced to low memory (ie for highmem, or even
+-	 * ISA dma in theory)
+-	 */
+-	blk_queue_bounce(q, &bio);
+-
+-	barrier = bio_barrier(bio);
+-	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
+-		err = -EOPNOTSUPP;
+-		goto end_io;
 -	}
-+
- 	return pages;
--} 
-+}
- 
- void __init paging_init(void)
--{ 
--	int i;
-+{
- 	unsigned long max_zone_pfns[MAX_NR_ZONES];
-+
- 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
- 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
-@@ -568,32 +569,27 @@ void __init paging_init(void)
- 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
- 	sparse_init();
- 
--	for_each_online_node(i) {
--		setup_node_zones(i); 
+-
+-	spin_lock_irq(q->queue_lock);
+-
+-	if (unlikely(barrier) || elv_queue_empty(q))
+-		goto get_rq;
+-
+-	el_ret = elv_merge(q, &req, bio);
+-	switch (el_ret) {
+-		case ELEVATOR_BACK_MERGE:
+-			BUG_ON(!rq_mergeable(req));
+-
+-			if (!ll_back_merge_fn(q, req, bio))
+-				break;
+-
+-			blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+-
+-			req->biotail->bi_next = bio;
+-			req->biotail = bio;
+-			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+-			req->ioprio = ioprio_best(req->ioprio, prio);
+-			drive_stat_acct(req, 0);
+-			if (!attempt_back_merge(q, req))
+-				elv_merged_request(q, req, el_ret);
+-			goto out;
+-
+-		case ELEVATOR_FRONT_MERGE:
+-			BUG_ON(!rq_mergeable(req));
+-
+-			if (!ll_front_merge_fn(q, req, bio))
+-				break;
+-
+-			blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+-
+-			bio->bi_next = req->bio;
+-			req->bio = bio;
+-
+-			/*
+-			 * may not be valid. if the low level driver said
+-			 * it didn't need a bounce buffer then it better
+-			 * not touch req->buffer either...
+-			 */
+-			req->buffer = bio_data(bio);
+-			req->current_nr_sectors = bio_cur_sectors(bio);
+-			req->hard_cur_sectors = req->current_nr_sectors;
+-			req->sector = req->hard_sector = bio->bi_sector;
+-			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+-			req->ioprio = ioprio_best(req->ioprio, prio);
+-			drive_stat_acct(req, 0);
+-			if (!attempt_front_merge(q, req))
+-				elv_merged_request(q, req, el_ret);
+-			goto out;
+-
+-		/* ELV_NO_MERGE: elevator says don't/can't merge. */
+-		default:
+-			;
 -	}
 -
- 	free_area_init_nodes(max_zone_pfns);
--} 
-+}
- 
- static __init int numa_setup(char *opt)
--{ 
-+{
- 	if (!opt)
- 		return -EINVAL;
--	if (!strncmp(opt,"off",3))
-+	if (!strncmp(opt, "off", 3))
- 		numa_off = 1;
- #ifdef CONFIG_NUMA_EMU
- 	if (!strncmp(opt, "fake=", 5))
- 		cmdline = opt + 5;
- #endif
- #ifdef CONFIG_ACPI_NUMA
-- 	if (!strncmp(opt,"noacpi",6))
-- 		acpi_numa = -1;
--	if (!strncmp(opt,"hotadd=", 7))
-+	if (!strncmp(opt, "noacpi", 6))
-+		acpi_numa = -1;
-+	if (!strncmp(opt, "hotadd=", 7))
- 		hotadd_percent = simple_strtoul(opt+7, NULL, 10);
- #endif
- 	return 0;
--} 
+-get_rq:
+-	/*
+-	 * This sync check and mask will be re-done in init_request_from_bio(),
+-	 * but we need to set it earlier to expose the sync flag to the
+-	 * rq allocator and io schedulers.
+-	 */
+-	rw_flags = bio_data_dir(bio);
+-	if (sync)
+-		rw_flags |= REQ_RW_SYNC;
 -
-+}
- early_param("numa", numa_setup);
- 
- /*
-@@ -611,38 +607,16 @@ early_param("numa", numa_setup);
- void __init init_cpu_to_node(void)
- {
- 	int i;
-- 	for (i = 0; i < NR_CPUS; i++) {
--		u8 apicid = x86_cpu_to_apicid_init[i];
-+
-+	for (i = 0; i < NR_CPUS; i++) {
-+		u16 apicid = x86_cpu_to_apicid_init[i];
-+
- 		if (apicid == BAD_APICID)
- 			continue;
- 		if (apicid_to_node[apicid] == NUMA_NO_NODE)
- 			continue;
--		numa_set_node(i,apicid_to_node[apicid]);
-+		numa_set_node(i, apicid_to_node[apicid]);
- 	}
- }
- 
--EXPORT_SYMBOL(cpu_to_node);
--EXPORT_SYMBOL(node_to_cpumask);
--EXPORT_SYMBOL(memnode);
--EXPORT_SYMBOL(node_data);
+-	/*
+-	 * Grab a free request. This is might sleep but can not fail.
+-	 * Returns with the queue unlocked.
+-	 */
+-	req = get_request_wait(q, rw_flags, bio);
+-
+-	/*
+-	 * After dropping the lock and possibly sleeping here, our request
+-	 * may now be mergeable after it had proven unmergeable (above).
+-	 * We don't worry about that case for efficiency. It won't happen
+-	 * often, and the elevators are able to handle it.
+-	 */
+-	init_request_from_bio(req, bio);
+-
+-	spin_lock_irq(q->queue_lock);
+-	if (elv_queue_empty(q))
+-		blk_plug_device(q);
+-	add_request(q, req);
+-out:
+-	if (sync)
+-		__generic_unplug_device(q);
+-
+-	spin_unlock_irq(q->queue_lock);
+-	return 0;
+-
+-end_io:
+-	bio_endio(bio, err);
+-	return 0;
+-}
 -
--#ifdef CONFIG_DISCONTIGMEM
 -/*
-- * Functions to convert PFNs from/to per node page addresses.
-- * These are out of line because they are quite big.
-- * They could be all tuned by pre caching more state.
-- * Should do that.
+- * If bio->bi_dev is a partition, remap the location
 - */
- 
--int pfn_valid(unsigned long pfn)
+-static inline void blk_partition_remap(struct bio *bio)
 -{
--	unsigned nid;
--	if (pfn >= num_physpages)
--		return 0;
--	nid = pfn_to_nid(pfn);
--	if (nid == 0xff)
+-	struct block_device *bdev = bio->bi_bdev;
+-
+-	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+-		struct hd_struct *p = bdev->bd_part;
+-		const int rw = bio_data_dir(bio);
+-
+-		p->sectors[rw] += bio_sectors(bio);
+-		p->ios[rw]++;
+-
+-		bio->bi_sector += p->start_sect;
+-		bio->bi_bdev = bdev->bd_contains;
+-
+-		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
+-				    bdev->bd_dev, bio->bi_sector,
+-				    bio->bi_sector - p->start_sect);
+-	}
+-}
+-
+-static void handle_bad_sector(struct bio *bio)
+-{
+-	char b[BDEVNAME_SIZE];
+-
+-	printk(KERN_INFO "attempt to access beyond end of device\n");
+-	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+-			bdevname(bio->bi_bdev, b),
+-			bio->bi_rw,
+-			(unsigned long long)bio->bi_sector + bio_sectors(bio),
+-			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+-
+-	set_bit(BIO_EOF, &bio->bi_flags);
+-}
+-
+-#ifdef CONFIG_FAIL_MAKE_REQUEST
+-
+-static DECLARE_FAULT_ATTR(fail_make_request);
+-
+-static int __init setup_fail_make_request(char *str)
+-{
+-	return setup_fault_attr(&fail_make_request, str);
+-}
+-__setup("fail_make_request=", setup_fail_make_request);
+-
+-static int should_fail_request(struct bio *bio)
+-{
+-	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
+-	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
+-		return should_fail(&fail_make_request, bio->bi_size);
+-
+-	return 0;
+-}
+-
+-static int __init fail_make_request_debugfs(void)
+-{
+-	return init_fault_attr_dentries(&fail_make_request,
+-					"fail_make_request");
+-}
+-
+-late_initcall(fail_make_request_debugfs);
+-
+-#else /* CONFIG_FAIL_MAKE_REQUEST */
+-
+-static inline int should_fail_request(struct bio *bio)
+-{
+-	return 0;
+-}
+-
+-#endif /* CONFIG_FAIL_MAKE_REQUEST */
+-
+-/*
+- * Check whether this bio extends beyond the end of the device.
+- */
+-static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+-{
+-	sector_t maxsector;
+-
+-	if (!nr_sectors)
 -		return 0;
--	return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
+-
+-	/* Test device or partition size, when known. */
+-	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+-	if (maxsector) {
+-		sector_t sector = bio->bi_sector;
+-
+-		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+-			/*
+-			 * This may well happen - the kernel calls bread()
+-			 * without checking the size of the device, e.g., when
+-			 * mounting a device.
+-			 */
+-			handle_bad_sector(bio);
+-			return 1;
+-		}
+-	}
+-
+-	return 0;
 -}
--EXPORT_SYMBOL(pfn_valid);
--#endif
-diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
-new file mode 100644
-index 0000000..06353d4
---- /dev/null
-+++ b/arch/x86/mm/pageattr-test.c
-@@ -0,0 +1,224 @@
-+/*
-+ * self test for change_page_attr.
-+ *
-+ * Clears the global bit on random pages in the direct mapping, then reverts
-+ * and compares page tables forwards and afterwards.
-+ */
-+#include <linux/bootmem.h>
-+#include <linux/random.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/mm.h>
-+
-+#include <asm/cacheflush.h>
-+#include <asm/pgtable.h>
-+#include <asm/kdebug.h>
-+
-+enum {
-+	NTEST			= 4000,
-+#ifdef CONFIG_X86_64
-+	LPS			= (1 << PMD_SHIFT),
-+#elif defined(CONFIG_X86_PAE)
-+	LPS			= (1 << PMD_SHIFT),
-+#else
-+	LPS			= (1 << 22),
-+#endif
-+	GPS			= (1<<30)
-+};
-+
-+struct split_state {
-+	long lpg, gpg, spg, exec;
-+	long min_exec, max_exec;
-+};
-+
-+static __init int print_split(struct split_state *s)
-+{
-+	long i, expected, missed = 0;
-+	int printed = 0;
-+	int err = 0;
-+
-+	s->lpg = s->gpg = s->spg = s->exec = 0;
-+	s->min_exec = ~0UL;
-+	s->max_exec = 0;
-+	for (i = 0; i < max_pfn_mapped; ) {
-+		unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
-+		int level;
-+		pte_t *pte;
-+
-+		pte = lookup_address(addr, &level);
-+		if (!pte) {
-+			if (!printed) {
-+				dump_pagetable(addr);
-+				printk(KERN_INFO "CPA %lx no pte level %d\n",
-+					addr, level);
-+				printed = 1;
-+			}
-+			missed++;
-+			i++;
-+			continue;
-+		}
-+
-+		if (level == PG_LEVEL_1G && sizeof(long) == 8) {
-+			s->gpg++;
-+			i += GPS/PAGE_SIZE;
-+		} else if (level == PG_LEVEL_2M) {
-+			if (!(pte_val(*pte) & _PAGE_PSE)) {
-+				printk(KERN_ERR
-+					"%lx level %d but not PSE %Lx\n",
-+					addr, level, (u64)pte_val(*pte));
-+				err = 1;
-+			}
-+			s->lpg++;
-+			i += LPS/PAGE_SIZE;
-+		} else {
-+			s->spg++;
-+			i++;
-+		}
-+		if (!(pte_val(*pte) & _PAGE_NX)) {
-+			s->exec++;
-+			if (addr < s->min_exec)
-+				s->min_exec = addr;
-+			if (addr > s->max_exec)
-+				s->max_exec = addr;
-+		}
-+	}
-+	printk(KERN_INFO
-+		"CPA mapping 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
-+		s->spg, s->lpg, s->gpg, s->exec,
-+		s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed);
-+
-+	expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
-+	if (expected != i) {
-+		printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n",
-+			max_pfn_mapped, expected);
-+		return 1;
-+	}
-+	return err;
-+}
-+
-+static unsigned long __initdata addr[NTEST];
-+static unsigned int __initdata len[NTEST];
-+
-+/* Change the global bit on random pages in the direct mapping */
-+static __init int exercise_pageattr(void)
-+{
-+	struct split_state sa, sb, sc;
-+	unsigned long *bm;
-+	pte_t *pte, pte0;
-+	int failed = 0;
-+	int level;
-+	int i, k;
-+	int err;
-+
-+	printk(KERN_INFO "CPA exercising pageattr\n");
-+
-+	bm = vmalloc((max_pfn_mapped + 7) / 8);
-+	if (!bm) {
-+		printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
-+		return -ENOMEM;
-+	}
-+	memset(bm, 0, (max_pfn_mapped + 7) / 8);
-+
-+	failed += print_split(&sa);
-+	srandom32(100);
-+
-+	for (i = 0; i < NTEST; i++) {
-+		unsigned long pfn = random32() % max_pfn_mapped;
-+
-+		addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
-+		len[i] = random32() % 100;
-+		len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
-+
-+		if (len[i] == 0)
-+			len[i] = 1;
-+
-+		pte = NULL;
-+		pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */
-+
-+		for (k = 0; k < len[i]; k++) {
-+			pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
-+			if (!pte || pgprot_val(pte_pgprot(*pte)) == 0) {
-+				addr[i] = 0;
-+				break;
-+			}
-+			if (k == 0) {
-+				pte0 = *pte;
-+			} else {
-+				if (pgprot_val(pte_pgprot(*pte)) !=
-+					pgprot_val(pte_pgprot(pte0))) {
-+					len[i] = k;
-+					break;
-+				}
-+			}
-+			if (test_bit(pfn + k, bm)) {
-+				len[i] = k;
-+				break;
-+			}
-+			__set_bit(pfn + k, bm);
-+		}
-+		if (!addr[i] || !pte || !k) {
-+			addr[i] = 0;
-+			continue;
-+		}
-+
-+		err = change_page_attr_clear(addr[i], len[i],
-+					       __pgprot(_PAGE_GLOBAL));
-+		if (err < 0) {
-+			printk(KERN_ERR "CPA %d failed %d\n", i, err);
-+			failed++;
-+		}
-+
-+		pte = lookup_address(addr[i], &level);
-+		if (!pte || pte_global(*pte) || pte_huge(*pte)) {
-+			printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i],
-+				pte ? (u64)pte_val(*pte) : 0ULL);
-+			failed++;
-+		}
-+		if (level != PG_LEVEL_4K) {
-+			printk(KERN_ERR "CPA %lx: unexpected level %d\n",
-+				addr[i], level);
-+			failed++;
-+		}
-+
-+	}
-+	vfree(bm);
-+
-+	failed += print_split(&sb);
-+
-+	printk(KERN_INFO "CPA reverting everything\n");
-+	for (i = 0; i < NTEST; i++) {
-+		if (!addr[i])
-+			continue;
-+		pte = lookup_address(addr[i], &level);
-+		if (!pte) {
-+			printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]);
-+			failed++;
-+			continue;
-+		}
-+		err = change_page_attr_set(addr[i], len[i],
-+					     __pgprot(_PAGE_GLOBAL));
-+		if (err < 0) {
-+			printk(KERN_ERR "CPA reverting failed: %d\n", err);
-+			failed++;
-+		}
-+		pte = lookup_address(addr[i], &level);
-+		if (!pte || !pte_global(*pte)) {
-+			printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n",
-+				addr[i], pte ? (u64)pte_val(*pte) : 0ULL);
-+			failed++;
-+		}
-+
-+	}
-+
-+	failed += print_split(&sc);
-+
-+	if (failed) {
-+		printk(KERN_ERR "CPA selftests NOT PASSED. Please report.\n");
-+		WARN_ON(1);
-+	} else {
-+		printk(KERN_INFO "CPA selftests PASSED\n");
-+	}
-+
-+	return 0;
-+}
-+module_init(exercise_pageattr);
-diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-new file mode 100644
-index 0000000..1cc6607
---- /dev/null
-+++ b/arch/x86/mm/pageattr.c
-@@ -0,0 +1,564 @@
-+/*
-+ * Copyright 2002 Andi Kleen, SuSE Labs.
-+ * Thanks to Ben LaHaise for precious feedback.
-+ */
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/mm.h>
-+
-+#include <asm/e820.h>
-+#include <asm/processor.h>
-+#include <asm/tlbflush.h>
-+#include <asm/sections.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgalloc.h>
-+
-+static inline int
-+within(unsigned long addr, unsigned long start, unsigned long end)
-+{
-+	return addr >= start && addr < end;
-+}
-+
-+/*
-+ * Flushing functions
-+ */
-+
-+/**
-+ * clflush_cache_range - flush a cache range with clflush
-+ * @addr:	virtual start address
-+ * @size:	number of bytes to flush
-+ *
-+ * clflush is an unordered instruction which needs fencing with mfence
-+ * to avoid ordering issues.
-+ */
-+void clflush_cache_range(void *vaddr, unsigned int size)
-+{
-+	void *vend = vaddr + size - 1;
-+
-+	mb();
-+
-+	for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
-+		clflush(vaddr);
-+	/*
-+	 * Flush any possible final partial cacheline:
-+	 */
-+	clflush(vend);
-+
-+	mb();
-+}
-+
-+static void __cpa_flush_all(void *arg)
-+{
-+	/*
-+	 * Flush all to work around Errata in early athlons regarding
-+	 * large page flushing.
-+	 */
-+	__flush_tlb_all();
-+
-+	if (boot_cpu_data.x86_model >= 4)
-+		wbinvd();
-+}
-+
-+static void cpa_flush_all(void)
-+{
-+	BUG_ON(irqs_disabled());
-+
-+	on_each_cpu(__cpa_flush_all, NULL, 1, 1);
-+}
-+
-+static void __cpa_flush_range(void *arg)
-+{
-+	/*
-+	 * We could optimize that further and do individual per page
-+	 * tlb invalidates for a low number of pages. Caveat: we must
-+	 * flush the high aliases on 64bit as well.
-+	 */
-+	__flush_tlb_all();
-+}
-+
-+static void cpa_flush_range(unsigned long start, int numpages)
-+{
-+	unsigned int i, level;
-+	unsigned long addr;
-+
-+	BUG_ON(irqs_disabled());
-+	WARN_ON(PAGE_ALIGN(start) != start);
-+
-+	on_each_cpu(__cpa_flush_range, NULL, 1, 1);
-+
-+	/*
-+	 * We only need to flush on one CPU,
-+	 * clflush is a MESI-coherent instruction that
-+	 * will cause all other CPUs to flush the same
-+	 * cachelines:
-+	 */
-+	for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
-+		pte_t *pte = lookup_address(addr, &level);
-+
-+		/*
-+		 * Only flush present addresses:
-+		 */
-+		if (pte && pte_present(*pte))
-+			clflush_cache_range((void *) addr, PAGE_SIZE);
-+	}
-+}
-+
-+/*
-+ * Certain areas of memory on x86 require very specific protection flags,
-+ * for example the BIOS area or kernel text. Callers don't always get this
-+ * right (again, ioremap() on BIOS memory is not uncommon) so this function
-+ * checks and fixes these known static required protection bits.
-+ */
-+static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
-+{
-+	pgprot_t forbidden = __pgprot(0);
-+
-+	/*
-+	 * The BIOS area between 640k and 1Mb needs to be executable for
-+	 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
-+	 */
-+	if (within(__pa(address), BIOS_BEGIN, BIOS_END))
-+		pgprot_val(forbidden) |= _PAGE_NX;
-+
-+	/*
-+	 * The kernel text needs to be executable for obvious reasons
-+	 * Does not cover __inittext since that is gone later on
-+	 */
-+	if (within(address, (unsigned long)_text, (unsigned long)_etext))
-+		pgprot_val(forbidden) |= _PAGE_NX;
-+
-+#ifdef CONFIG_DEBUG_RODATA
-+	/* The .rodata section needs to be read-only */
-+	if (within(address, (unsigned long)__start_rodata,
-+				(unsigned long)__end_rodata))
-+		pgprot_val(forbidden) |= _PAGE_RW;
-+#endif
-+
-+	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
-+
-+	return prot;
-+}
-+
-+pte_t *lookup_address(unsigned long address, int *level)
-+{
-+	pgd_t *pgd = pgd_offset_k(address);
-+	pud_t *pud;
-+	pmd_t *pmd;
-+
-+	*level = PG_LEVEL_NONE;
-+
-+	if (pgd_none(*pgd))
-+		return NULL;
-+	pud = pud_offset(pgd, address);
-+	if (pud_none(*pud))
-+		return NULL;
-+	pmd = pmd_offset(pud, address);
-+	if (pmd_none(*pmd))
-+		return NULL;
-+
-+	*level = PG_LEVEL_2M;
-+	if (pmd_large(*pmd))
-+		return (pte_t *)pmd;
-+
-+	*level = PG_LEVEL_4K;
-+	return pte_offset_kernel(pmd, address);
-+}
-+
-+static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
-+{
-+	/* change init_mm */
-+	set_pte_atomic(kpte, pte);
-+#ifdef CONFIG_X86_32
-+	if (!SHARED_KERNEL_PMD) {
-+		struct page *page;
-+
-+		list_for_each_entry(page, &pgd_list, lru) {
-+			pgd_t *pgd;
-+			pud_t *pud;
-+			pmd_t *pmd;
-+
-+			pgd = (pgd_t *)page_address(page) + pgd_index(address);
-+			pud = pud_offset(pgd, address);
-+			pmd = pmd_offset(pud, address);
-+			set_pte_atomic((pte_t *)pmd, pte);
-+		}
-+	}
-+#endif
-+}
-+
-+static int split_large_page(pte_t *kpte, unsigned long address)
-+{
-+	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
-+	gfp_t gfp_flags = GFP_KERNEL;
-+	unsigned long flags;
-+	unsigned long addr;
-+	pte_t *pbase, *tmp;
-+	struct page *base;
-+	unsigned int i, level;
-+
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+	gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
-+	gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
-+#endif
-+	base = alloc_pages(gfp_flags, 0);
-+	if (!base)
-+		return -ENOMEM;
-+
-+	spin_lock_irqsave(&pgd_lock, flags);
-+	/*
-+	 * Check for races, another CPU might have split this page
-+	 * up for us already:
-+	 */
-+	tmp = lookup_address(address, &level);
-+	if (tmp != kpte) {
-+		WARN_ON_ONCE(1);
-+		goto out_unlock;
-+	}
-+
-+	address = __pa(address);
-+	addr = address & LARGE_PAGE_MASK;
-+	pbase = (pte_t *)page_address(base);
-+#ifdef CONFIG_X86_32
-+	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
-+#endif
-+
-+	pgprot_val(ref_prot) &= ~_PAGE_NX;
-+	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
-+		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
-+
-+	/*
-+	 * Install the new, split up pagetable. Important detail here:
-+	 *
-+	 * On Intel the NX bit of all levels must be cleared to make a
-+	 * page executable. See section 4.13.2 of Intel 64 and IA-32
-+	 * Architectures Software Developer's Manual).
-+	 */
-+	ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
-+	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
-+	base = NULL;
-+
-+out_unlock:
-+	spin_unlock_irqrestore(&pgd_lock, flags);
-+
-+	if (base)
-+		__free_pages(base, 0);
-+
-+	return 0;
-+}
-+
-+static int
-+__change_page_attr(unsigned long address, unsigned long pfn,
-+		   pgprot_t mask_set, pgprot_t mask_clr)
-+{
-+	struct page *kpte_page;
-+	int level, err = 0;
-+	pte_t *kpte;
-+
-+#ifdef CONFIG_X86_32
-+	BUG_ON(pfn > max_low_pfn);
-+#endif
-+
-+repeat:
-+	kpte = lookup_address(address, &level);
-+	if (!kpte)
-+		return -EINVAL;
-+
-+	kpte_page = virt_to_page(kpte);
-+	BUG_ON(PageLRU(kpte_page));
-+	BUG_ON(PageCompound(kpte_page));
-+
-+	if (level == PG_LEVEL_4K) {
-+		pgprot_t new_prot = pte_pgprot(*kpte);
-+		pte_t new_pte, old_pte = *kpte;
-+
-+		pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
-+		pgprot_val(new_prot) |= pgprot_val(mask_set);
-+
-+		new_prot = static_protections(new_prot, address);
-+
-+		new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
-+		BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
-+
-+		set_pte_atomic(kpte, new_pte);
-+	} else {
-+		err = split_large_page(kpte, address);
-+		if (!err)
-+			goto repeat;
-+	}
-+	return err;
-+}
-+
-+/**
-+ * change_page_attr_addr - Change page table attributes in linear mapping
-+ * @address: Virtual address in linear mapping.
-+ * @prot:    New page table attribute (PAGE_*)
-+ *
-+ * Change page attributes of a page in the direct mapping. This is a variant
-+ * of change_page_attr() that also works on memory holes that do not have
-+ * mem_map entry (pfn_valid() is false).
-+ *
-+ * See change_page_attr() documentation for more details.
-+ *
-+ * Modules and drivers should use the set_memory_* APIs instead.
-+ */
-+
-+#define HIGH_MAP_START	__START_KERNEL_map
-+#define HIGH_MAP_END	(__START_KERNEL_map + KERNEL_TEXT_SIZE)
-+
-+static int
-+change_page_attr_addr(unsigned long address, pgprot_t mask_set,
-+		      pgprot_t mask_clr)
-+{
-+	unsigned long phys_addr = __pa(address);
-+	unsigned long pfn = phys_addr >> PAGE_SHIFT;
-+	int err;
-+
-+#ifdef CONFIG_X86_64
-+	/*
-+	 * If we are inside the high mapped kernel range, then we
-+	 * fixup the low mapping first. __va() returns the virtual
-+	 * address in the linear mapping:
-+	 */
-+	if (within(address, HIGH_MAP_START, HIGH_MAP_END))
-+		address = (unsigned long) __va(phys_addr);
-+#endif
-+
-+	err = __change_page_attr(address, pfn, mask_set, mask_clr);
-+	if (err)
-+		return err;
-+
-+#ifdef CONFIG_X86_64
-+	/*
-+	 * If the physical address is inside the kernel map, we need
-+	 * to touch the high mapped kernel as well:
-+	 */
-+	if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) {
-+		/*
-+		 * Calc the high mapping address. See __phys_addr()
-+		 * for the non obvious details.
-+		 */
-+		address = phys_addr + HIGH_MAP_START - phys_base;
-+		/* Make sure the kernel mappings stay executable */
-+		pgprot_val(mask_clr) |= _PAGE_NX;
-+
-+		/*
-+		 * Our high aliases are imprecise, because we check
-+		 * everything between 0 and KERNEL_TEXT_SIZE, so do
-+		 * not propagate lookup failures back to users:
-+		 */
-+		__change_page_attr(address, pfn, mask_set, mask_clr);
-+	}
-+#endif
-+	return err;
-+}
-+
-+static int __change_page_attr_set_clr(unsigned long addr, int numpages,
-+				      pgprot_t mask_set, pgprot_t mask_clr)
-+{
-+	unsigned int i;
-+	int ret;
-+
-+	for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
-+		ret = change_page_attr_addr(addr, mask_set, mask_clr);
-+		if (ret)
-+			return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static int change_page_attr_set_clr(unsigned long addr, int numpages,
-+				    pgprot_t mask_set, pgprot_t mask_clr)
-+{
-+	int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
-+					     mask_clr);
-+
-+	/*
-+	 * On success we use clflush, when the CPU supports it to
-+	 * avoid the wbindv. If the CPU does not support it and in the
-+	 * error case we fall back to cpa_flush_all (which uses
-+	 * wbindv):
-+	 */
-+	if (!ret && cpu_has_clflush)
-+		cpa_flush_range(addr, numpages);
-+	else
-+		cpa_flush_all();
-+
-+	return ret;
-+}
-+
-+static inline int change_page_attr_set(unsigned long addr, int numpages,
-+				       pgprot_t mask)
-+{
-+	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
-+}
-+
-+static inline int change_page_attr_clear(unsigned long addr, int numpages,
-+					 pgprot_t mask)
-+{
-+	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
-+
-+}
-+
-+int set_memory_uc(unsigned long addr, int numpages)
-+{
-+	return change_page_attr_set(addr, numpages,
-+				    __pgprot(_PAGE_PCD | _PAGE_PWT));
-+}
-+EXPORT_SYMBOL(set_memory_uc);
-+
-+int set_memory_wb(unsigned long addr, int numpages)
-+{
-+	return change_page_attr_clear(addr, numpages,
-+				      __pgprot(_PAGE_PCD | _PAGE_PWT));
-+}
-+EXPORT_SYMBOL(set_memory_wb);
-+
-+int set_memory_x(unsigned long addr, int numpages)
-+{
-+	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
-+}
-+EXPORT_SYMBOL(set_memory_x);
-+
-+int set_memory_nx(unsigned long addr, int numpages)
-+{
-+	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
-+}
-+EXPORT_SYMBOL(set_memory_nx);
-+
-+int set_memory_ro(unsigned long addr, int numpages)
-+{
-+	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
-+}
-+
-+int set_memory_rw(unsigned long addr, int numpages)
-+{
-+	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
-+}
-+
-+int set_memory_np(unsigned long addr, int numpages)
-+{
-+	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
-+}
-+
-+int set_pages_uc(struct page *page, int numpages)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+
-+	return set_memory_uc(addr, numpages);
-+}
-+EXPORT_SYMBOL(set_pages_uc);
-+
-+int set_pages_wb(struct page *page, int numpages)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+
-+	return set_memory_wb(addr, numpages);
-+}
-+EXPORT_SYMBOL(set_pages_wb);
-+
-+int set_pages_x(struct page *page, int numpages)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+
-+	return set_memory_x(addr, numpages);
-+}
-+EXPORT_SYMBOL(set_pages_x);
-+
-+int set_pages_nx(struct page *page, int numpages)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+
-+	return set_memory_nx(addr, numpages);
-+}
-+EXPORT_SYMBOL(set_pages_nx);
-+
-+int set_pages_ro(struct page *page, int numpages)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+
-+	return set_memory_ro(addr, numpages);
-+}
-+
-+int set_pages_rw(struct page *page, int numpages)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+
-+	return set_memory_rw(addr, numpages);
-+}
-+
-+
-+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
-+static inline int __change_page_attr_set(unsigned long addr, int numpages,
-+					 pgprot_t mask)
-+{
-+	return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
-+}
-+
-+static inline int __change_page_attr_clear(unsigned long addr, int numpages,
-+					   pgprot_t mask)
-+{
-+	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
-+}
-+#endif
-+
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+
-+static int __set_pages_p(struct page *page, int numpages)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+
-+	return __change_page_attr_set(addr, numpages,
-+				      __pgprot(_PAGE_PRESENT | _PAGE_RW));
-+}
-+
-+static int __set_pages_np(struct page *page, int numpages)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+
-+	return __change_page_attr_clear(addr, numpages,
-+					__pgprot(_PAGE_PRESENT));
-+}
-+
-+void kernel_map_pages(struct page *page, int numpages, int enable)
-+{
-+	if (PageHighMem(page))
-+		return;
-+	if (!enable) {
-+		debug_check_no_locks_freed(page_address(page),
-+					   numpages * PAGE_SIZE);
-+	}
-+
-+	/*
-+	 * If page allocator is not up yet then do not call c_p_a():
-+	 */
-+	if (!debug_pagealloc_enabled)
-+		return;
-+
-+	/*
-+	 * The return value is ignored - the calls cannot fail,
-+	 * large pages are disabled at boot time:
-+	 */
-+	if (enable)
-+		__set_pages_p(page, numpages);
-+	else
-+		__set_pages_np(page, numpages);
-+
-+	/*
-+	 * We should perform an IPI and flush all tlbs,
-+	 * but that can deadlock->flush only current cpu:
-+	 */
-+	__flush_tlb_all();
-+}
-+#endif
-+
-+/*
-+ * The testcases use internal knowledge of the implementation that shouldn't
-+ * be exposed to the rest of the kernel. Include these directly here.
-+ */
-+#ifdef CONFIG_CPA_DEBUG
-+#include "pageattr-test.c"
-+#endif
-diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
-deleted file mode 100644
-index 260073c..0000000
---- a/arch/x86/mm/pageattr_32.c
-+++ /dev/null
-@@ -1,278 +0,0 @@
--/* 
-- * Copyright 2002 Andi Kleen, SuSE Labs. 
-- * Thanks to Ben LaHaise for precious feedback.
-- */ 
 -
--#include <linux/mm.h>
--#include <linux/sched.h>
--#include <linux/highmem.h>
--#include <linux/module.h>
--#include <linux/slab.h>
--#include <asm/uaccess.h>
--#include <asm/processor.h>
--#include <asm/tlbflush.h>
--#include <asm/pgalloc.h>
--#include <asm/sections.h>
+-/**
+- * generic_make_request: hand a buffer to its device driver for I/O
+- * @bio:  The bio describing the location in memory and on the device.
+- *
+- * generic_make_request() is used to make I/O requests of block
+- * devices. It is passed a &struct bio, which describes the I/O that needs
+- * to be done.
+- *
+- * generic_make_request() does not return any status.  The
+- * success/failure status of the request, along with notification of
+- * completion, is delivered asynchronously through the bio->bi_end_io
+- * function described (one day) else where.
+- *
+- * The caller of generic_make_request must make sure that bi_io_vec
+- * are set to describe the memory buffer, and that bi_dev and bi_sector are
+- * set to describe the device address, and the
+- * bi_end_io and optionally bi_private are set to describe how
+- * completion notification should be signaled.
+- *
+- * generic_make_request and the drivers it calls may use bi_next if this
+- * bio happens to be merged with someone else, and may change bi_dev and
+- * bi_sector for remaps as it sees fit.  So the values of these fields
+- * should NOT be depended on after the call to generic_make_request.
+- */
+-static inline void __generic_make_request(struct bio *bio)
+-{
+-	struct request_queue *q;
+-	sector_t old_sector;
+-	int ret, nr_sectors = bio_sectors(bio);
+-	dev_t old_dev;
+-	int err = -EIO;
+-
+-	might_sleep();
+-
+-	if (bio_check_eod(bio, nr_sectors))
+-		goto end_io;
+-
+-	/*
+-	 * Resolve the mapping until finished. (drivers are
+-	 * still free to implement/resolve their own stacking
+-	 * by explicitly returning 0)
+-	 *
+-	 * NOTE: we don't repeat the blk_size check for each new device.
+-	 * Stacking drivers are expected to know what they are doing.
+-	 */
+-	old_sector = -1;
+-	old_dev = 0;
+-	do {
+-		char b[BDEVNAME_SIZE];
+-
+-		q = bdev_get_queue(bio->bi_bdev);
+-		if (!q) {
+-			printk(KERN_ERR
+-			       "generic_make_request: Trying to access "
+-				"nonexistent block-device %s (%Lu)\n",
+-				bdevname(bio->bi_bdev, b),
+-				(long long) bio->bi_sector);
+-end_io:
+-			bio_endio(bio, err);
+-			break;
+-		}
+-
+-		if (unlikely(nr_sectors > q->max_hw_sectors)) {
+-			printk("bio too big device %s (%u > %u)\n", 
+-				bdevname(bio->bi_bdev, b),
+-				bio_sectors(bio),
+-				q->max_hw_sectors);
+-			goto end_io;
+-		}
+-
+-		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+-			goto end_io;
+-
+-		if (should_fail_request(bio))
+-			goto end_io;
+-
+-		/*
+-		 * If this device has partitions, remap block n
+-		 * of partition p to block n+start(p) of the disk.
+-		 */
+-		blk_partition_remap(bio);
+-
+-		if (old_sector != -1)
+-			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+-					    old_sector);
+-
+-		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+-
+-		old_sector = bio->bi_sector;
+-		old_dev = bio->bi_bdev->bd_dev;
+-
+-		if (bio_check_eod(bio, nr_sectors))
+-			goto end_io;
+-		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
+-			err = -EOPNOTSUPP;
+-			goto end_io;
+-		}
+-
+-		ret = q->make_request_fn(q, bio);
+-	} while (ret);
+-}
+-
+-/*
+- * We only want one ->make_request_fn to be active at a time,
+- * else stack usage with stacked devices could be a problem.
+- * So use current->bio_{list,tail} to keep a list of requests
+- * submited by a make_request_fn function.
+- * current->bio_tail is also used as a flag to say if
+- * generic_make_request is currently active in this task or not.
+- * If it is NULL, then no make_request is active.  If it is non-NULL,
+- * then a make_request is active, and new requests should be added
+- * at the tail
+- */
+-void generic_make_request(struct bio *bio)
+-{
+-	if (current->bio_tail) {
+-		/* make_request is active */
+-		*(current->bio_tail) = bio;
+-		bio->bi_next = NULL;
+-		current->bio_tail = &bio->bi_next;
+-		return;
+-	}
+-	/* following loop may be a bit non-obvious, and so deserves some
+-	 * explanation.
+-	 * Before entering the loop, bio->bi_next is NULL (as all callers
+-	 * ensure that) so we have a list with a single bio.
+-	 * We pretend that we have just taken it off a longer list, so
+-	 * we assign bio_list to the next (which is NULL) and bio_tail
+-	 * to &bio_list, thus initialising the bio_list of new bios to be
+-	 * added.  __generic_make_request may indeed add some more bios
+-	 * through a recursive call to generic_make_request.  If it
+-	 * did, we find a non-NULL value in bio_list and re-enter the loop
+-	 * from the top.  In this case we really did just take the bio
+-	 * of the top of the list (no pretending) and so fixup bio_list and
+-	 * bio_tail or bi_next, and call into __generic_make_request again.
+-	 *
+-	 * The loop was structured like this to make only one call to
+-	 * __generic_make_request (which is important as it is large and
+-	 * inlined) and to keep the structure simple.
+-	 */
+-	BUG_ON(bio->bi_next);
+-	do {
+-		current->bio_list = bio->bi_next;
+-		if (bio->bi_next == NULL)
+-			current->bio_tail = &current->bio_list;
+-		else
+-			bio->bi_next = NULL;
+-		__generic_make_request(bio);
+-		bio = current->bio_list;
+-	} while (bio);
+-	current->bio_tail = NULL; /* deactivate */
+-}
+-
+-EXPORT_SYMBOL(generic_make_request);
+-
+-/**
+- * submit_bio: submit a bio to the block device layer for I/O
+- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
+- * @bio: The &struct bio which describes the I/O
+- *
+- * submit_bio() is very similar in purpose to generic_make_request(), and
+- * uses that function to do most of the work. Both are fairly rough
+- * interfaces, @bio must be presetup and ready for I/O.
+- *
+- */
+-void submit_bio(int rw, struct bio *bio)
+-{
+-	int count = bio_sectors(bio);
+-
+-	bio->bi_rw |= rw;
+-
+-	/*
+-	 * If it's a regular read/write or a barrier with data attached,
+-	 * go through the normal accounting stuff before submission.
+-	 */
+-	if (!bio_empty_barrier(bio)) {
 -
--static DEFINE_SPINLOCK(cpa_lock);
--static struct list_head df_list = LIST_HEAD_INIT(df_list);
+-		BIO_BUG_ON(!bio->bi_size);
+-		BIO_BUG_ON(!bio->bi_io_vec);
 -
+-		if (rw & WRITE) {
+-			count_vm_events(PGPGOUT, count);
+-		} else {
+-			task_io_account_read(bio->bi_size);
+-			count_vm_events(PGPGIN, count);
+-		}
 -
--pte_t *lookup_address(unsigned long address) 
--{ 
--	pgd_t *pgd = pgd_offset_k(address);
--	pud_t *pud;
--	pmd_t *pmd;
--	if (pgd_none(*pgd))
--		return NULL;
--	pud = pud_offset(pgd, address);
--	if (pud_none(*pud))
--		return NULL;
--	pmd = pmd_offset(pud, address);
--	if (pmd_none(*pmd))
--		return NULL;
--	if (pmd_large(*pmd))
--		return (pte_t *)pmd;
--        return pte_offset_kernel(pmd, address);
--} 
+-		if (unlikely(block_dump)) {
+-			char b[BDEVNAME_SIZE];
+-			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+-			current->comm, task_pid_nr(current),
+-				(rw & WRITE) ? "WRITE" : "READ",
+-				(unsigned long long)bio->bi_sector,
+-				bdevname(bio->bi_bdev,b));
+-		}
+-	}
 -
--static struct page *split_large_page(unsigned long address, pgprot_t prot,
--					pgprot_t ref_prot)
--{ 
--	int i; 
--	unsigned long addr;
--	struct page *base;
--	pte_t *pbase;
+-	generic_make_request(bio);
+-}
 -
--	spin_unlock_irq(&cpa_lock);
--	base = alloc_pages(GFP_KERNEL, 0);
--	spin_lock_irq(&cpa_lock);
--	if (!base) 
--		return NULL;
+-EXPORT_SYMBOL(submit_bio);
 -
--	/*
--	 * page_private is used to track the number of entries in
--	 * the page table page that have non standard attributes.
--	 */
--	SetPagePrivate(base);
--	page_private(base) = 0;
+-static void blk_recalc_rq_sectors(struct request *rq, int nsect)
+-{
+-	if (blk_fs_request(rq)) {
+-		rq->hard_sector += nsect;
+-		rq->hard_nr_sectors -= nsect;
 -
--	address = __pa(address);
--	addr = address & LARGE_PAGE_MASK; 
--	pbase = (pte_t *)page_address(base);
--	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
--	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
--               set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
--                                          addr == address ? prot : ref_prot));
--	}
--	return base;
--} 
+-		/*
+-		 * Move the I/O submission pointers ahead if required.
+-		 */
+-		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
+-		    (rq->sector <= rq->hard_sector)) {
+-			rq->sector = rq->hard_sector;
+-			rq->nr_sectors = rq->hard_nr_sectors;
+-			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
+-			rq->current_nr_sectors = rq->hard_cur_sectors;
+-			rq->buffer = bio_data(rq->bio);
+-		}
 -
--static void cache_flush_page(struct page *p)
--{ 
--	void *adr = page_address(p);
--	int i;
--	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
--		clflush(adr+i);
+-		/*
+-		 * if total number of sectors is less than the first segment
+-		 * size, something has gone terribly wrong
+-		 */
+-		if (rq->nr_sectors < rq->current_nr_sectors) {
+-			printk("blk: request botched\n");
+-			rq->nr_sectors = rq->current_nr_sectors;
+-		}
+-	}
 -}
 -
--static void flush_kernel_map(void *arg)
+-static int __end_that_request_first(struct request *req, int uptodate,
+-				    int nr_bytes)
 -{
--	struct list_head *lh = (struct list_head *)arg;
--	struct page *p;
+-	int total_bytes, bio_nbytes, error, next_idx = 0;
+-	struct bio *bio;
 -
--	/* High level code is not ready for clflush yet */
--	if (0 && cpu_has_clflush) {
--		list_for_each_entry (p, lh, lru)
--			cache_flush_page(p);
--	} else if (boot_cpu_data.x86_model >= 4)
--		wbinvd();
+-	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
 -
--	/* Flush all to work around Errata in early athlons regarding 
--	 * large page flushing. 
+-	/*
+-	 * extend uptodate bool to allow < 0 value to be direct io error
 -	 */
--	__flush_tlb_all(); 	
--}
+-	error = 0;
+-	if (end_io_error(uptodate))
+-		error = !uptodate ? -EIO : uptodate;
 -
--static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
--{ 
--	struct page *page;
--	unsigned long flags;
+-	/*
+-	 * for a REQ_BLOCK_PC request, we want to carry any eventual
+-	 * sense key with us all the way through
+-	 */
+-	if (!blk_pc_request(req))
+-		req->errors = 0;
 -
--	set_pte_atomic(kpte, pte); 	/* change init_mm */
--	if (SHARED_KERNEL_PMD)
--		return;
+-	if (!uptodate) {
+-		if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
+-			printk("end_request: I/O error, dev %s, sector %llu\n",
+-				req->rq_disk ? req->rq_disk->disk_name : "?",
+-				(unsigned long long)req->sector);
+-	}
 -
--	spin_lock_irqsave(&pgd_lock, flags);
--	for (page = pgd_list; page; page = (struct page *)page->index) {
--		pgd_t *pgd;
--		pud_t *pud;
--		pmd_t *pmd;
--		pgd = (pgd_t *)page_address(page) + pgd_index(address);
--		pud = pud_offset(pgd, address);
--		pmd = pmd_offset(pud, address);
--		set_pte_atomic((pte_t *)pmd, pte);
+-	if (blk_fs_request(req) && req->rq_disk) {
+-		const int rw = rq_data_dir(req);
+-
+-		disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
 -	}
--	spin_unlock_irqrestore(&pgd_lock, flags);
--}
 -
--/* 
-- * No more special protections in this 2/4MB area - revert to a
-- * large page again. 
-- */
--static inline void revert_page(struct page *kpte_page, unsigned long address)
--{
--	pgprot_t ref_prot;
--	pte_t *linear;
+-	total_bytes = bio_nbytes = 0;
+-	while ((bio = req->bio) != NULL) {
+-		int nbytes;
 -
--	ref_prot =
--	((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
--		? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
+-		/*
+-		 * For an empty barrier request, the low level driver must
+-		 * store a potential error location in ->sector. We pass
+-		 * that back up in ->bi_sector.
+-		 */
+-		if (blk_empty_barrier(req))
+-			bio->bi_sector = req->sector;
 -
--	linear = (pte_t *)
--		pmd_offset(pud_offset(pgd_offset_k(address), address), address);
--	set_pmd_pte(linear,  address,
--		    pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
--			    ref_prot));
--}
+-		if (nr_bytes >= bio->bi_size) {
+-			req->bio = bio->bi_next;
+-			nbytes = bio->bi_size;
+-			req_bio_endio(req, bio, nbytes, error);
+-			next_idx = 0;
+-			bio_nbytes = 0;
+-		} else {
+-			int idx = bio->bi_idx + next_idx;
 -
--static inline void save_page(struct page *kpte_page)
--{
--	if (!test_and_set_bit(PG_arch_1, &kpte_page->flags))
--		list_add(&kpte_page->lru, &df_list);
--}
+-			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
+-				blk_dump_rq_flags(req, "__end_that");
+-				printk("%s: bio idx %d >= vcnt %d\n",
+-						__FUNCTION__,
+-						bio->bi_idx, bio->bi_vcnt);
+-				break;
+-			}
 -
--static int
--__change_page_attr(struct page *page, pgprot_t prot)
--{ 
--	pte_t *kpte; 
--	unsigned long address;
--	struct page *kpte_page;
+-			nbytes = bio_iovec_idx(bio, idx)->bv_len;
+-			BIO_BUG_ON(nbytes > bio->bi_size);
 -
--	BUG_ON(PageHighMem(page));
--	address = (unsigned long)page_address(page);
+-			/*
+-			 * not a complete bvec done
+-			 */
+-			if (unlikely(nbytes > nr_bytes)) {
+-				bio_nbytes += nr_bytes;
+-				total_bytes += nr_bytes;
+-				break;
+-			}
 -
--	kpte = lookup_address(address);
--	if (!kpte)
--		return -EINVAL;
--	kpte_page = virt_to_page(kpte);
--	BUG_ON(PageLRU(kpte_page));
--	BUG_ON(PageCompound(kpte_page));
+-			/*
+-			 * advance to the next vector
+-			 */
+-			next_idx++;
+-			bio_nbytes += nbytes;
+-		}
 -
--	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
--		if (!pte_huge(*kpte)) {
--			set_pte_atomic(kpte, mk_pte(page, prot)); 
--		} else {
--			pgprot_t ref_prot;
--			struct page *split;
+-		total_bytes += nbytes;
+-		nr_bytes -= nbytes;
 -
--			ref_prot =
--			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
--				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
--			split = split_large_page(address, prot, ref_prot);
--			if (!split)
--				return -ENOMEM;
--			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
--			kpte_page = split;
+-		if ((bio = req->bio)) {
+-			/*
+-			 * end more in this run, or just return 'not-done'
+-			 */
+-			if (unlikely(nr_bytes <= 0))
+-				break;
 -		}
--		page_private(kpte_page)++;
--	} else if (!pte_huge(*kpte)) {
--		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
--		BUG_ON(page_private(kpte_page) == 0);
--		page_private(kpte_page)--;
--	} else
--		BUG();
+-	}
 -
 -	/*
--	 * If the pte was reserved, it means it was created at boot
--	 * time (not via split_large_page) and in turn we must not
--	 * replace it with a largepage.
+-	 * completely done
 -	 */
+-	if (!req->bio)
+-		return 0;
 -
--	save_page(kpte_page);
--	if (!PageReserved(kpte_page)) {
--		if (cpu_has_pse && (page_private(kpte_page) == 0)) {
--			paravirt_release_pt(page_to_pfn(kpte_page));
--			revert_page(kpte_page, address);
--		}
+-	/*
+-	 * if the request wasn't completed, update state
+-	 */
+-	if (bio_nbytes) {
+-		req_bio_endio(req, bio, bio_nbytes, error);
+-		bio->bi_idx += next_idx;
+-		bio_iovec(bio)->bv_offset += nr_bytes;
+-		bio_iovec(bio)->bv_len -= nr_bytes;
 -	}
--	return 0;
--} 
 -
--static inline void flush_map(struct list_head *l)
+-	blk_recalc_rq_sectors(req, total_bytes >> 9);
+-	blk_recalc_rq_segments(req);
+-	return 1;
+-}
+-
+-/**
+- * end_that_request_first - end I/O on a request
+- * @req:      the request being processed
+- * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
+- * @nr_sectors: number of sectors to end I/O on
+- *
+- * Description:
+- *     Ends I/O on a number of sectors attached to @req, and sets it up
+- *     for the next range of segments (if any) in the cluster.
+- *
+- * Return:
+- *     0 - we are done with this request, call end_that_request_last()
+- *     1 - still buffers pending for this request
+- **/
+-int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
 -{
--	on_each_cpu(flush_kernel_map, l, 1, 1);
+-	return __end_that_request_first(req, uptodate, nr_sectors << 9);
 -}
 -
--/*
-- * Change the page attributes of an page in the linear mapping.
+-EXPORT_SYMBOL(end_that_request_first);
+-
+-/**
+- * end_that_request_chunk - end I/O on a request
+- * @req:      the request being processed
+- * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
+- * @nr_bytes: number of bytes to complete
 - *
-- * This should be used when a page is mapped with a different caching policy
-- * than write-back somewhere - some CPUs do not like it when mappings with
-- * different caching policies exist. This changes the page attributes of the
-- * in kernel linear mapping too.
-- * 
-- * The caller needs to ensure that there are no conflicting mappings elsewhere.
-- * This function only deals with the kernel linear map.
-- * 
-- * Caller must call global_flush_tlb() after this.
+- * Description:
+- *     Ends I/O on a number of bytes attached to @req, and sets it up
+- *     for the next range of segments (if any). Like end_that_request_first(),
+- *     but deals with bytes instead of sectors.
+- *
+- * Return:
+- *     0 - we are done with this request, call end_that_request_last()
+- *     1 - still buffers pending for this request
+- **/
+-int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
+-{
+-	return __end_that_request_first(req, uptodate, nr_bytes);
+-}
+-
+-EXPORT_SYMBOL(end_that_request_chunk);
+-
+-/*
+- * splice the completion data to a local structure and hand off to
+- * process_completion_queue() to complete the requests
 - */
--int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+-static void blk_done_softirq(struct softirq_action *h)
 -{
--	int err = 0; 
--	int i; 
--	unsigned long flags;
+-	struct list_head *cpu_list, local_list;
 -
--	spin_lock_irqsave(&cpa_lock, flags);
--	for (i = 0; i < numpages; i++, page++) { 
--		err = __change_page_attr(page, prot);
--		if (err) 
--			break; 
--	} 	
--	spin_unlock_irqrestore(&cpa_lock, flags);
--	return err;
+-	local_irq_disable();
+-	cpu_list = &__get_cpu_var(blk_cpu_done);
+-	list_replace_init(cpu_list, &local_list);
+-	local_irq_enable();
+-
+-	while (!list_empty(&local_list)) {
+-		struct request *rq = list_entry(local_list.next, struct request, donelist);
+-
+-		list_del_init(&rq->donelist);
+-		rq->q->softirq_done_fn(rq);
+-	}
 -}
 -
--void global_flush_tlb(void)
+-static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
+-			  void *hcpu)
 -{
--	struct list_head l;
--	struct page *pg, *next;
--
--	BUG_ON(irqs_disabled());
+-	/*
+-	 * If a CPU goes away, splice its entries to the current CPU
+-	 * and trigger a run of the softirq
+-	 */
+-	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+-		int cpu = (unsigned long) hcpu;
 -
--	spin_lock_irq(&cpa_lock);
--	list_replace_init(&df_list, &l);
--	spin_unlock_irq(&cpa_lock);
--	flush_map(&l);
--	list_for_each_entry_safe(pg, next, &l, lru) {
--		list_del(&pg->lru);
--		clear_bit(PG_arch_1, &pg->flags);
--		if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0)
--			continue;
--		ClearPagePrivate(pg);
--		__free_page(pg);
+-		local_irq_disable();
+-		list_splice_init(&per_cpu(blk_cpu_done, cpu),
+-				 &__get_cpu_var(blk_cpu_done));
+-		raise_softirq_irqoff(BLOCK_SOFTIRQ);
+-		local_irq_enable();
 -	}
+-
+-	return NOTIFY_OK;
 -}
 -
--#ifdef CONFIG_DEBUG_PAGEALLOC
--void kernel_map_pages(struct page *page, int numpages, int enable)
+-
+-static struct notifier_block blk_cpu_notifier __cpuinitdata = {
+-	.notifier_call	= blk_cpu_notify,
+-};
+-
+-/**
+- * blk_complete_request - end I/O on a request
+- * @req:      the request being processed
+- *
+- * Description:
+- *     Ends all I/O on a request. It does not handle partial completions,
+- *     unless the driver actually implements this in its completion callback
+- *     through requeueing. The actual completion happens out-of-order,
+- *     through a softirq handler. The user must have registered a completion
+- *     callback through blk_queue_softirq_done().
+- **/
+-
+-void blk_complete_request(struct request *req)
 -{
--	if (PageHighMem(page))
--		return;
--	if (!enable)
--		debug_check_no_locks_freed(page_address(page),
--					   numpages * PAGE_SIZE);
+-	struct list_head *cpu_list;
+-	unsigned long flags;
 -
--	/* the return value is ignored - the calls cannot fail,
--	 * large pages are disabled at boot time.
--	 */
--	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
--	/* we should perform an IPI and flush all tlbs,
--	 * but that can deadlock->flush only current cpu.
--	 */
--	__flush_tlb_all();
+-	BUG_ON(!req->q->softirq_done_fn);
+-		
+-	local_irq_save(flags);
+-
+-	cpu_list = &__get_cpu_var(blk_cpu_done);
+-	list_add_tail(&req->donelist, cpu_list);
+-	raise_softirq_irqoff(BLOCK_SOFTIRQ);
+-
+-	local_irq_restore(flags);
 -}
--#endif
 -
--EXPORT_SYMBOL(change_page_attr);
--EXPORT_SYMBOL(global_flush_tlb);
-diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
-deleted file mode 100644
-index c40afba..0000000
---- a/arch/x86/mm/pageattr_64.c
-+++ /dev/null
-@@ -1,255 +0,0 @@
--/* 
-- * Copyright 2002 Andi Kleen, SuSE Labs. 
-- * Thanks to Ben LaHaise for precious feedback.
-- */ 
+-EXPORT_SYMBOL(blk_complete_request);
+-	
+-/*
+- * queue lock must be held
+- */
+-void end_that_request_last(struct request *req, int uptodate)
+-{
+-	struct gendisk *disk = req->rq_disk;
+-	int error;
 -
--#include <linux/mm.h>
--#include <linux/sched.h>
--#include <linux/highmem.h>
--#include <linux/module.h>
--#include <linux/slab.h>
--#include <asm/uaccess.h>
--#include <asm/processor.h>
--#include <asm/tlbflush.h>
--#include <asm/io.h>
+-	/*
+-	 * extend uptodate bool to allow < 0 value to be direct io error
+-	 */
+-	error = 0;
+-	if (end_io_error(uptodate))
+-		error = !uptodate ? -EIO : uptodate;
 -
--pte_t *lookup_address(unsigned long address)
--{ 
--	pgd_t *pgd = pgd_offset_k(address);
--	pud_t *pud;
--	pmd_t *pmd;
--	pte_t *pte;
--	if (pgd_none(*pgd))
--		return NULL;
--	pud = pud_offset(pgd, address);
--	if (!pud_present(*pud))
--		return NULL; 
--	pmd = pmd_offset(pud, address);
--	if (!pmd_present(*pmd))
--		return NULL; 
--	if (pmd_large(*pmd))
--		return (pte_t *)pmd;
--	pte = pte_offset_kernel(pmd, address);
--	if (pte && !pte_present(*pte))
--		pte = NULL; 
--	return pte;
--} 
+-	if (unlikely(laptop_mode) && blk_fs_request(req))
+-		laptop_io_completion();
 -
--static struct page *split_large_page(unsigned long address, pgprot_t prot,
--				     pgprot_t ref_prot)
--{ 
--	int i; 
--	unsigned long addr;
--	struct page *base = alloc_pages(GFP_KERNEL, 0);
--	pte_t *pbase;
--	if (!base) 
--		return NULL;
 -	/*
--	 * page_private is used to track the number of entries in
--	 * the page table page have non standard attributes.
+-	 * Account IO completion.  bar_rq isn't accounted as a normal
+-	 * IO on queueing nor completion.  Accounting the containing
+-	 * request is enough.
 -	 */
--	SetPagePrivate(base);
--	page_private(base) = 0;
+-	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
+-		unsigned long duration = jiffies - req->start_time;
+-		const int rw = rq_data_dir(req);
 -
--	address = __pa(address);
--	addr = address & LARGE_PAGE_MASK; 
--	pbase = (pte_t *)page_address(base);
--	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
--		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
--				   addr == address ? prot : ref_prot);
+-		__disk_stat_inc(disk, ios[rw]);
+-		__disk_stat_add(disk, ticks[rw], duration);
+-		disk_round_stats(disk);
+-		disk->in_flight--;
 -	}
--	return base;
--} 
+-	if (req->end_io)
+-		req->end_io(req, error);
+-	else
+-		__blk_put_request(req->q, req);
+-}
 -
--void clflush_cache_range(void *adr, int size)
+-EXPORT_SYMBOL(end_that_request_last);
+-
+-static inline void __end_request(struct request *rq, int uptodate,
+-				 unsigned int nr_bytes, int dequeue)
 -{
--	int i;
--	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
--		clflush(adr+i);
+-	if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
+-		if (dequeue)
+-			blkdev_dequeue_request(rq);
+-		add_disk_randomness(rq->rq_disk);
+-		end_that_request_last(rq, uptodate);
+-	}
 -}
 -
--static void flush_kernel_map(void *arg)
+-static unsigned int rq_byte_size(struct request *rq)
 -{
--	struct list_head *l = (struct list_head *)arg;
--	struct page *pg;
+-	if (blk_fs_request(rq))
+-		return rq->hard_nr_sectors << 9;
 -
--	/* When clflush is available always use it because it is
--	   much cheaper than WBINVD. */
--	/* clflush is still broken. Disable for now. */
--	if (1 || !cpu_has_clflush)
--		asm volatile("wbinvd" ::: "memory");
--	else list_for_each_entry(pg, l, lru) {
--		void *adr = page_address(pg);
--		clflush_cache_range(adr, PAGE_SIZE);
--	}
--	__flush_tlb_all();
+-	return rq->data_len;
 -}
 -
--static inline void flush_map(struct list_head *l)
--{	
--	on_each_cpu(flush_kernel_map, l, 1, 1);
+-/**
+- * end_queued_request - end all I/O on a queued request
+- * @rq:		the request being processed
+- * @uptodate:	error value or 0/1 uptodate flag
+- *
+- * Description:
+- *     Ends all I/O on a request, and removes it from the block layer queues.
+- *     Not suitable for normal IO completion, unless the driver still has
+- *     the request attached to the block layer.
+- *
+- **/
+-void end_queued_request(struct request *rq, int uptodate)
+-{
+-	__end_request(rq, uptodate, rq_byte_size(rq), 1);
 -}
+-EXPORT_SYMBOL(end_queued_request);
 -
--static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
+-/**
+- * end_dequeued_request - end all I/O on a dequeued request
+- * @rq:		the request being processed
+- * @uptodate:	error value or 0/1 uptodate flag
+- *
+- * Description:
+- *     Ends all I/O on a request. The request must already have been
+- *     dequeued using blkdev_dequeue_request(), as is normally the case
+- *     for most drivers.
+- *
+- **/
+-void end_dequeued_request(struct request *rq, int uptodate)
+-{
+-	__end_request(rq, uptodate, rq_byte_size(rq), 0);
+-}
+-EXPORT_SYMBOL(end_dequeued_request);
 -
--static inline void save_page(struct page *fpage)
+-
+-/**
+- * end_request - end I/O on the current segment of the request
+- * @req:	the request being processed
+- * @uptodate:	error value or 0/1 uptodate flag
+- *
+- * Description:
+- *     Ends I/O on the current segment of a request. If that is the only
+- *     remaining segment, the request is also completed and freed.
+- *
+- *     This is a remnant of how older block drivers handled IO completions.
+- *     Modern drivers typically end IO on the full request in one go, unless
+- *     they have a residual value to account for. For that case this function
+- *     isn't really useful, unless the residual just happens to be the
+- *     full current segment. In other words, don't use this function in new
+- *     code. Either use end_request_completely(), or the
+- *     end_that_request_chunk() (along with end_that_request_last()) for
+- *     partial completions.
+- *
+- **/
+-void end_request(struct request *req, int uptodate)
 -{
--	if (!test_and_set_bit(PG_arch_1, &fpage->flags))
--		list_add(&fpage->lru, &deferred_pages);
+-	__end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
 -}
+-EXPORT_SYMBOL(end_request);
 -
--/* 
-- * No more special protections in this 2/4MB area - revert to a
-- * large page again. 
+-static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+-			    struct bio *bio)
+-{
+-	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
+-	rq->cmd_flags |= (bio->bi_rw & 3);
+-
+-	rq->nr_phys_segments = bio_phys_segments(q, bio);
+-	rq->nr_hw_segments = bio_hw_segments(q, bio);
+-	rq->current_nr_sectors = bio_cur_sectors(bio);
+-	rq->hard_cur_sectors = rq->current_nr_sectors;
+-	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
+-	rq->buffer = bio_data(bio);
+-	rq->data_len = bio->bi_size;
+-
+-	rq->bio = rq->biotail = bio;
+-
+-	if (bio->bi_bdev)
+-		rq->rq_disk = bio->bi_bdev->bd_disk;
+-}
+-
+-int kblockd_schedule_work(struct work_struct *work)
+-{
+-	return queue_work(kblockd_workqueue, work);
+-}
+-
+-EXPORT_SYMBOL(kblockd_schedule_work);
+-
+-void kblockd_flush_work(struct work_struct *work)
+-{
+-	cancel_work_sync(work);
+-}
+-EXPORT_SYMBOL(kblockd_flush_work);
+-
+-int __init blk_dev_init(void)
+-{
+-	int i;
+-
+-	kblockd_workqueue = create_workqueue("kblockd");
+-	if (!kblockd_workqueue)
+-		panic("Failed to create kblockd\n");
+-
+-	request_cachep = kmem_cache_create("blkdev_requests",
+-			sizeof(struct request), 0, SLAB_PANIC, NULL);
+-
+-	requestq_cachep = kmem_cache_create("blkdev_queue",
+-			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
+-
+-	iocontext_cachep = kmem_cache_create("blkdev_ioc",
+-			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
+-
+-	for_each_possible_cpu(i)
+-		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+-
+-	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
+-	register_hotcpu_notifier(&blk_cpu_notifier);
+-
+-	blk_max_low_pfn = max_low_pfn - 1;
+-	blk_max_pfn = max_pfn - 1;
+-
+-	return 0;
+-}
+-
+-/*
+- * IO Context helper functions
 - */
--static void revert_page(unsigned long address, pgprot_t ref_prot)
+-void put_io_context(struct io_context *ioc)
 -{
--	pgd_t *pgd;
--	pud_t *pud;
--	pmd_t *pmd;
--	pte_t large_pte;
--	unsigned long pfn;
+-	if (ioc == NULL)
+-		return;
 -
--	pgd = pgd_offset_k(address);
--	BUG_ON(pgd_none(*pgd));
--	pud = pud_offset(pgd,address);
--	BUG_ON(pud_none(*pud));
--	pmd = pmd_offset(pud, address);
--	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
--	pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
--	large_pte = pfn_pte(pfn, ref_prot);
--	large_pte = pte_mkhuge(large_pte);
--	set_pte((pte_t *)pmd, large_pte);
--}      
+-	BUG_ON(atomic_read(&ioc->refcount) == 0);
 -
--static int
--__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
--				   pgprot_t ref_prot)
--{ 
--	pte_t *kpte; 
--	struct page *kpte_page;
--	pgprot_t ref_prot2;
+-	if (atomic_dec_and_test(&ioc->refcount)) {
+-		struct cfq_io_context *cic;
 -
--	kpte = lookup_address(address);
--	if (!kpte) return 0;
--	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
--	BUG_ON(PageLRU(kpte_page));
--	BUG_ON(PageCompound(kpte_page));
--	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
--		if (!pte_huge(*kpte)) {
--			set_pte(kpte, pfn_pte(pfn, prot));
--		} else {
-- 			/*
--			 * split_large_page will take the reference for this
--			 * change_page_attr on the split page.
-- 			 */
--			struct page *split;
--			ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
--			split = split_large_page(address, prot, ref_prot2);
--			if (!split)
--				return -ENOMEM;
--			pgprot_val(ref_prot2) &= ~_PAGE_NX;
--			set_pte(kpte, mk_pte(split, ref_prot2));
--			kpte_page = split;
+-		rcu_read_lock();
+-		if (ioc->aic && ioc->aic->dtor)
+-			ioc->aic->dtor(ioc->aic);
+-		if (ioc->cic_root.rb_node != NULL) {
+-			struct rb_node *n = rb_first(&ioc->cic_root);
+-
+-			cic = rb_entry(n, struct cfq_io_context, rb_node);
+-			cic->dtor(ioc);
 -		}
--		page_private(kpte_page)++;
--	} else if (!pte_huge(*kpte)) {
--		set_pte(kpte, pfn_pte(pfn, ref_prot));
--		BUG_ON(page_private(kpte_page) == 0);
--		page_private(kpte_page)--;
--	} else
--		BUG();
+-		rcu_read_unlock();
 -
--	/* on x86-64 the direct mapping set at boot is not using 4k pages */
-- 	BUG_ON(PageReserved(kpte_page));
+-		kmem_cache_free(iocontext_cachep, ioc);
+-	}
+-}
+-EXPORT_SYMBOL(put_io_context);
 -
--	save_page(kpte_page);
--	if (page_private(kpte_page) == 0)
--		revert_page(address, ref_prot);
--	return 0;
--} 
+-/* Called by the exitting task */
+-void exit_io_context(void)
+-{
+-	struct io_context *ioc;
+-	struct cfq_io_context *cic;
+-
+-	task_lock(current);
+-	ioc = current->io_context;
+-	current->io_context = NULL;
+-	task_unlock(current);
+-
+-	ioc->task = NULL;
+-	if (ioc->aic && ioc->aic->exit)
+-		ioc->aic->exit(ioc->aic);
+-	if (ioc->cic_root.rb_node != NULL) {
+-		cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
+-		cic->exit(ioc);
+-	}
+-
+-	put_io_context(ioc);
+-}
 -
 -/*
-- * Change the page attributes of an page in the linear mapping.
+- * If the current task has no IO context then create one and initialise it.
+- * Otherwise, return its existing IO context.
 - *
-- * This should be used when a page is mapped with a different caching policy
-- * than write-back somewhere - some CPUs do not like it when mappings with
-- * different caching policies exist. This changes the page attributes of the
-- * in kernel linear mapping too.
-- * 
-- * The caller needs to ensure that there are no conflicting mappings elsewhere.
-- * This function only deals with the kernel linear map.
-- * 
-- * Caller must call global_flush_tlb() after this.
+- * This returned IO context doesn't have a specifically elevated refcount,
+- * but since the current task itself holds a reference, the context can be
+- * used in general code, so long as it stays within `current` context.
 - */
--int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
+-static struct io_context *current_io_context(gfp_t gfp_flags, int node)
 -{
--	int err = 0, kernel_map = 0;
--	int i; 
+-	struct task_struct *tsk = current;
+-	struct io_context *ret;
 -
--	if (address >= __START_KERNEL_map
--	    && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
--		address = (unsigned long)__va(__pa(address));
--		kernel_map = 1;
+-	ret = tsk->io_context;
+-	if (likely(ret))
+-		return ret;
+-
+-	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
+-	if (ret) {
+-		atomic_set(&ret->refcount, 1);
+-		ret->task = current;
+-		ret->ioprio_changed = 0;
+-		ret->last_waited = jiffies; /* doesn't matter... */
+-		ret->nr_batch_requests = 0; /* because this is 0 */
+-		ret->aic = NULL;
+-		ret->cic_root.rb_node = NULL;
+-		ret->ioc_data = NULL;
+-		/* make sure set_task_ioprio() sees the settings above */
+-		smp_wmb();
+-		tsk->io_context = ret;
 -	}
 -
--	down_write(&init_mm.mmap_sem);
--	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
--		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+-	return ret;
+-}
 -
--		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
--			err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
--			if (err)
--				break;
--		}
--		/* Handle kernel mapping too which aliases part of the
--		 * lowmem */
--		if (__pa(address) < KERNEL_TEXT_SIZE) {
--			unsigned long addr2;
--			pgprot_t prot2;
--			addr2 = __START_KERNEL_map + __pa(address);
--			/* Make sure the kernel mappings stay executable */
--			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
--			err = __change_page_attr(addr2, pfn, prot2,
--						 PAGE_KERNEL_EXEC);
--		} 
--	} 	
--	up_write(&init_mm.mmap_sem); 
--	return err;
+-/*
+- * If the current task has no IO context then create one and initialise it.
+- * If it does have a context, take a ref on it.
+- *
+- * This is always called in the context of the task which submitted the I/O.
+- */
+-struct io_context *get_io_context(gfp_t gfp_flags, int node)
+-{
+-	struct io_context *ret;
+-	ret = current_io_context(gfp_flags, node);
+-	if (likely(ret))
+-		atomic_inc(&ret->refcount);
+-	return ret;
 -}
+-EXPORT_SYMBOL(get_io_context);
 -
--/* Don't call this for MMIO areas that may not have a mem_map entry */
--int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+-void copy_io_context(struct io_context **pdst, struct io_context **psrc)
 -{
--	unsigned long addr = (unsigned long)page_address(page);
--	return change_page_attr_addr(addr, numpages, prot);
+-	struct io_context *src = *psrc;
+-	struct io_context *dst = *pdst;
+-
+-	if (src) {
+-		BUG_ON(atomic_read(&src->refcount) == 0);
+-		atomic_inc(&src->refcount);
+-		put_io_context(dst);
+-		*pdst = src;
+-	}
 -}
+-EXPORT_SYMBOL(copy_io_context);
 -
--void global_flush_tlb(void)
--{ 
--	struct page *pg, *next;
--	struct list_head l;
+-void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
+-{
+-	struct io_context *temp;
+-	temp = *ioc1;
+-	*ioc1 = *ioc2;
+-	*ioc2 = temp;
+-}
+-EXPORT_SYMBOL(swap_io_context);
 -
--	/*
--	 * Write-protect the semaphore, to exclude two contexts
--	 * doing a list_replace_init() call in parallel and to
--	 * exclude new additions to the deferred_pages list:
--	 */
--	down_write(&init_mm.mmap_sem);
--	list_replace_init(&deferred_pages, &l);
--	up_write(&init_mm.mmap_sem);
+-/*
+- * sysfs parts below
+- */
+-struct queue_sysfs_entry {
+-	struct attribute attr;
+-	ssize_t (*show)(struct request_queue *, char *);
+-	ssize_t (*store)(struct request_queue *, const char *, size_t);
+-};
 -
--	flush_map(&l);
+-static ssize_t
+-queue_var_show(unsigned int var, char *page)
+-{
+-	return sprintf(page, "%d\n", var);
+-}
 -
--	list_for_each_entry_safe(pg, next, &l, lru) {
--		list_del(&pg->lru);
--		clear_bit(PG_arch_1, &pg->flags);
--		if (page_private(pg) != 0)
--			continue;
--		ClearPagePrivate(pg);
--		__free_page(pg);
--	} 
--} 
+-static ssize_t
+-queue_var_store(unsigned long *var, const char *page, size_t count)
+-{
+-	char *p = (char *) page;
 -
--EXPORT_SYMBOL(change_page_attr);
--EXPORT_SYMBOL(global_flush_tlb);
-diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
-index be61a1d..2ae5999 100644
---- a/arch/x86/mm/pgtable_32.c
-+++ b/arch/x86/mm/pgtable_32.c
-@@ -195,11 +195,6 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
- 	return pte;
- }
- 
--void pmd_ctor(struct kmem_cache *cache, void *pmd)
+-	*var = simple_strtoul(p, &p, 10);
+-	return count;
+-}
+-
+-static ssize_t queue_requests_show(struct request_queue *q, char *page)
 -{
--	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+-	return queue_var_show(q->nr_requests, (page));
 -}
 -
- /*
-  * List of all pgd's needed for non-PAE so it can invalidate entries
-  * in both cached and uncached pgd's; not needed for PAE since the
-@@ -210,27 +205,18 @@ void pmd_ctor(struct kmem_cache *cache, void *pmd)
-  * vmalloc faults work because attached pagetables are never freed.
-  * -- wli
-  */
--DEFINE_SPINLOCK(pgd_lock);
--struct page *pgd_list;
+-static ssize_t
+-queue_requests_store(struct request_queue *q, const char *page, size_t count)
+-{
+-	struct request_list *rl = &q->rq;
+-	unsigned long nr;
+-	int ret = queue_var_store(&nr, page, count);
+-	if (nr < BLKDEV_MIN_RQ)
+-		nr = BLKDEV_MIN_RQ;
 -
- static inline void pgd_list_add(pgd_t *pgd)
- {
- 	struct page *page = virt_to_page(pgd);
--	page->index = (unsigned long)pgd_list;
--	if (pgd_list)
--		set_page_private(pgd_list, (unsigned long)&page->index);
--	pgd_list = page;
--	set_page_private(page, (unsigned long)&pgd_list);
-+
-+	list_add(&page->lru, &pgd_list);
- }
- 
- static inline void pgd_list_del(pgd_t *pgd)
- {
--	struct page *next, **pprev, *page = virt_to_page(pgd);
--	next = (struct page *)page->index;
--	pprev = (struct page **)page_private(page);
--	*pprev = next;
--	if (next)
--		set_page_private(next, (unsigned long)pprev);
-+	struct page *page = virt_to_page(pgd);
-+
-+	list_del(&page->lru);
- }
- 
- 
-@@ -285,7 +271,6 @@ static void pgd_dtor(void *pgd)
- 	if (SHARED_KERNEL_PMD)
- 		return;
- 
--	paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
- 	spin_lock_irqsave(&pgd_lock, flags);
- 	pgd_list_del(pgd);
- 	spin_unlock_irqrestore(&pgd_lock, flags);
-@@ -294,77 +279,96 @@ static void pgd_dtor(void *pgd)
- #define UNSHARED_PTRS_PER_PGD				\
- 	(SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
- 
--/* If we allocate a pmd for part of the kernel address space, then
--   make sure its initialized with the appropriate kernel mappings.
--   Otherwise use a cached zeroed pmd.  */
--static pmd_t *pmd_cache_alloc(int idx)
-+#ifdef CONFIG_X86_PAE
-+/*
-+ * Mop up any pmd pages which may still be attached to the pgd.
-+ * Normally they will be freed by munmap/exit_mmap, but any pmd we
-+ * preallocate which never got a corresponding vma will need to be
-+ * freed manually.
-+ */
-+static void pgd_mop_up_pmds(pgd_t *pgdp)
- {
--	pmd_t *pmd;
-+	int i;
- 
--	if (idx >= USER_PTRS_PER_PGD) {
--		pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
-+	for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
-+		pgd_t pgd = pgdp[i];
- 
--		if (pmd)
--			memcpy(pmd,
--			       (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
-+		if (pgd_val(pgd) != 0) {
-+			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
-+
-+			pgdp[i] = native_make_pgd(0);
-+
-+			paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
-+			pmd_free(pmd);
-+		}
-+	}
-+}
-+
-+/*
-+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
-+ * updating the top-level pagetable entries to guarantee the
-+ * processor notices the update.  Since this is expensive, and
-+ * all 4 top-level entries are used almost immediately in a
-+ * new process's life, we just pre-populate them here.
-+ *
-+ * Also, if we're in a paravirt environment where the kernel pmd is
-+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
-+ * and initialize the kernel pmds here.
-+ */
-+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
-+{
-+	pud_t *pud;
-+	unsigned long addr;
-+	int i;
-+
-+	pud = pud_offset(pgd, 0);
-+ 	for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
-+	     i++, pud++, addr += PUD_SIZE) {
-+		pmd_t *pmd = pmd_alloc_one(mm, addr);
-+
-+		if (!pmd) {
-+			pgd_mop_up_pmds(pgd);
-+			return 0;
-+		}
-+
-+		if (i >= USER_PTRS_PER_PGD)
-+			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
- 			       sizeof(pmd_t) * PTRS_PER_PMD);
--	} else
--		pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
- 
--	return pmd;
-+		pud_populate(mm, pud, pmd);
-+	}
-+
-+	return 1;
-+}
-+#else  /* !CONFIG_X86_PAE */
-+/* No need to prepopulate any pagetable entries in non-PAE modes. */
-+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
-+{
-+	return 1;
- }
- 
--static void pmd_cache_free(pmd_t *pmd, int idx)
-+static void pgd_mop_up_pmds(pgd_t *pgd)
- {
--	if (idx >= USER_PTRS_PER_PGD)
--		free_page((unsigned long)pmd);
--	else
--		kmem_cache_free(pmd_cache, pmd);
- }
-+#endif	/* CONFIG_X86_PAE */
- 
- pgd_t *pgd_alloc(struct mm_struct *mm)
- {
--	int i;
- 	pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
- 
--	if (PTRS_PER_PMD == 1 || !pgd)
--		return pgd;
-+	mm->pgd = pgd;		/* so that alloc_pd can use it */
- 
-- 	for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
--		pmd_t *pmd = pmd_cache_alloc(i);
+-	spin_lock_irq(q->queue_lock);
+-	q->nr_requests = nr;
+-	blk_queue_congestion_threshold(q);
 -
--		if (!pmd)
--			goto out_oom;
+-	if (rl->count[READ] >= queue_congestion_on_threshold(q))
+-		blk_set_queue_congested(q, READ);
+-	else if (rl->count[READ] < queue_congestion_off_threshold(q))
+-		blk_clear_queue_congested(q, READ);
 -
--		paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
--		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
-+	if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
-+		quicklist_free(0, pgd_dtor, pgd);
-+		pgd = NULL;
- 	}
--	return pgd;
- 
--out_oom:
--	for (i--; i >= 0; i--) {
--		pgd_t pgdent = pgd[i];
--		void* pmd = (void *)__va(pgd_val(pgdent)-1);
--		paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
--		pmd_cache_free(pmd, i);
+-	if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
+-		blk_set_queue_congested(q, WRITE);
+-	else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
+-		blk_clear_queue_congested(q, WRITE);
+-
+-	if (rl->count[READ] >= q->nr_requests) {
+-		blk_set_queue_full(q, READ);
+-	} else if (rl->count[READ]+1 <= q->nr_requests) {
+-		blk_clear_queue_full(q, READ);
+-		wake_up(&rl->wait[READ]);
 -	}
--	quicklist_free(0, pgd_dtor, pgd);
--	return NULL;
-+	return pgd;
- }
- 
- void pgd_free(pgd_t *pgd)
- {
--	int i;
 -
--	/* in the PAE case user pgd entries are overwritten before usage */
--	if (PTRS_PER_PMD > 1)
--		for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
--			pgd_t pgdent = pgd[i];
--			void* pmd = (void *)__va(pgd_val(pgdent)-1);
--			paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
--			pmd_cache_free(pmd, i);
--		}
--	/* in the non-PAE case, free_pgtables() clears user pgd entries */
-+	pgd_mop_up_pmds(pgd);
- 	quicklist_free(0, pgd_dtor, pgd);
- }
- 
-@@ -372,4 +376,3 @@ void check_pgt_cache(void)
- {
- 	quicklist_trim(0, pgd_dtor, 25, 16);
- }
+-	if (rl->count[WRITE] >= q->nr_requests) {
+-		blk_set_queue_full(q, WRITE);
+-	} else if (rl->count[WRITE]+1 <= q->nr_requests) {
+-		blk_clear_queue_full(q, WRITE);
+-		wake_up(&rl->wait[WRITE]);
+-	}
+-	spin_unlock_irq(q->queue_lock);
+-	return ret;
+-}
 -
-diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
-index ea85172..65416f8 100644
---- a/arch/x86/mm/srat_64.c
-+++ b/arch/x86/mm/srat_64.c
-@@ -130,6 +130,9 @@ void __init
- acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
- {
- 	int pxm, node;
-+	int apic_id;
-+
-+	apic_id = pa->apic_id;
- 	if (srat_disabled())
- 		return;
- 	if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
-@@ -145,68 +148,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
- 		bad_srat();
- 		return;
- 	}
--	apicid_to_node[pa->apic_id] = node;
-+	apicid_to_node[apic_id] = node;
- 	acpi_numa = 1;
- 	printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
--	       pxm, pa->apic_id, node);
+-static ssize_t queue_ra_show(struct request_queue *q, char *page)
+-{
+-	int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+-
+-	return queue_var_show(ra_kb, (page));
 -}
 -
--#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
--/*
-- * Protect against too large hotadd areas that would fill up memory.
-- */
--static int hotadd_enough_memory(struct bootnode *nd)
+-static ssize_t
+-queue_ra_store(struct request_queue *q, const char *page, size_t count)
 -{
--	static unsigned long allocated;
--	static unsigned long last_area_end;
--	unsigned long pages = (nd->end - nd->start) >> PAGE_SHIFT;
--	long mem = pages * sizeof(struct page);
--	unsigned long addr;
--	unsigned long allowed;
--	unsigned long oldpages = pages;
+-	unsigned long ra_kb;
+-	ssize_t ret = queue_var_store(&ra_kb, page, count);
 -
--	if (mem < 0)
--		return 0;
--	allowed = (end_pfn - absent_pages_in_range(0, end_pfn)) * PAGE_SIZE;
--	allowed = (allowed / 100) * hotadd_percent;
--	if (allocated + mem > allowed) {
--		unsigned long range;
--		/* Give them at least part of their hotadd memory upto hotadd_percent
--		   It would be better to spread the limit out
--		   over multiple hotplug areas, but that is too complicated
--		   right now */
--		if (allocated >= allowed)
--			return 0;
--		range = allowed - allocated;
--		pages = (range / PAGE_SIZE);
--		mem = pages * sizeof(struct page);
--		nd->end = nd->start + range;
--	}
--	/* Not completely fool proof, but a good sanity check */
--	addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
--	if (addr == -1UL)
--		return 0;
--	if (pages != oldpages)
--		printk(KERN_NOTICE "SRAT: Hotadd area limited to %lu bytes\n",
--			pages << PAGE_SHIFT);
--	last_area_end = addr + mem;
--	allocated += mem;
--	return 1;
+-	spin_lock_irq(q->queue_lock);
+-	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+-	spin_unlock_irq(q->queue_lock);
+-
+-	return ret;
 -}
 -
--static int update_end_of_memory(unsigned long end)
+-static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
 -{
--	found_add_area = 1;
--	if ((end >> PAGE_SHIFT) > end_pfn)
--		end_pfn = end >> PAGE_SHIFT;
--	return 1;
-+	       pxm, apic_id, node);
- }
- 
--static inline int save_add_info(void)
+-	int max_sectors_kb = q->max_sectors >> 1;
+-
+-	return queue_var_show(max_sectors_kb, (page));
+-}
+-
+-static ssize_t
+-queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 -{
--	return hotadd_percent > 0;
+-	unsigned long max_sectors_kb,
+-			max_hw_sectors_kb = q->max_hw_sectors >> 1,
+-			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+-	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+-
+-	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+-		return -EINVAL;
+-	/*
+-	 * Take the queue lock to update the readahead and max_sectors
+-	 * values synchronously:
+-	 */
+-	spin_lock_irq(q->queue_lock);
+-	q->max_sectors = max_sectors_kb << 1;
+-	spin_unlock_irq(q->queue_lock);
+-
+-	return ret;
 -}
--#else
- int update_end_of_memory(unsigned long end) {return -1;}
- static int hotadd_enough_memory(struct bootnode *nd) {return 1;}
- #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
-@@ -214,10 +161,9 @@ static inline int save_add_info(void) {return 1;}
- #else
- static inline int save_add_info(void) {return 0;}
- #endif
--#endif
- /*
-  * Update nodes_add and decide if to include add are in the zone.
-- * Both SPARSE and RESERVE need nodes_add infomation.
-+ * Both SPARSE and RESERVE need nodes_add information.
-  * This code supports one contiguous hot add area per node.
-  */
- static int reserve_hotadd(int node, unsigned long start, unsigned long end)
-@@ -377,7 +323,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
- 	return 1;
- }
- 
--static void unparse_node(int node)
-+static void __init unparse_node(int node)
- {
- 	int i;
- 	node_clear(node, nodes_parsed);
-@@ -400,7 +346,12 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
- 	/* First clean up the node list */
- 	for (i = 0; i < MAX_NUMNODES; i++) {
- 		cutoff_node(i, start, end);
--		if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
-+		/*
-+		 * don't confuse VM with a node that doesn't have the
-+		 * minimum memory.
-+		 */
-+		if (nodes[i].end &&
-+			(nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
- 			unparse_node(i);
- 			node_set_offline(i);
- 		}
-@@ -431,9 +382,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
- 			setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- 
- 	for (i = 0; i < NR_CPUS; i++) {
--		if (cpu_to_node(i) == NUMA_NO_NODE)
-+		int node = early_cpu_to_node(i);
-+
-+		if (node == NUMA_NO_NODE)
- 			continue;
--		if (!node_isset(cpu_to_node(i), node_possible_map))
-+		if (!node_isset(node, node_possible_map))
- 			numa_set_node(i, NUMA_NO_NODE);
- 	}
- 	numa_init_array();
-@@ -441,6 +394,12 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
- }
- 
- #ifdef CONFIG_NUMA_EMU
-+static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
-+	[0 ... MAX_NUMNODES-1] = PXM_INVAL
-+};
-+static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
-+	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-+};
- static int __init find_node_by_addr(unsigned long addr)
- {
- 	int ret = NUMA_NO_NODE;
-@@ -457,7 +416,7 @@ static int __init find_node_by_addr(unsigned long addr)
- 			break;
- 		}
- 	}
--	return i;
-+	return ret;
- }
- 
- /*
-@@ -471,12 +430,6 @@ static int __init find_node_by_addr(unsigned long addr)
- void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
- {
- 	int i, j;
--	int fake_node_to_pxm_map[MAX_NUMNODES] = {
--		[0 ... MAX_NUMNODES-1] = PXM_INVAL
--	};
--	unsigned char fake_apicid_to_node[MAX_LOCAL_APIC] = {
--		[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
--	};
- 
- 	printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
- 			 "topology.\n");
-diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
-index 0ed046a..e2095cb 100644
---- a/arch/x86/oprofile/backtrace.c
-+++ b/arch/x86/oprofile/backtrace.c
-@@ -32,7 +32,7 @@ static int backtrace_stack(void *data, char *name)
- 	return 0;
- }
- 
--static void backtrace_address(void *data, unsigned long addr)
-+static void backtrace_address(void *data, unsigned long addr, int reliable)
- {
- 	unsigned int *depth = data;
- 
-@@ -48,7 +48,7 @@ static struct stacktrace_ops backtrace_ops = {
- };
- 
- struct frame_head {
--	struct frame_head *ebp;
-+	struct frame_head *bp;
- 	unsigned long ret;
- } __attribute__((packed));
- 
-@@ -67,21 +67,21 @@ dump_user_backtrace(struct frame_head * head)
- 
- 	/* frame pointers should strictly progress back up the stack
- 	 * (towards higher addresses) */
--	if (head >= bufhead[0].ebp)
-+	if (head >= bufhead[0].bp)
- 		return NULL;
- 
--	return bufhead[0].ebp;
-+	return bufhead[0].bp;
- }
- 
- void
- x86_backtrace(struct pt_regs * const regs, unsigned int depth)
- {
- 	struct frame_head *head = (struct frame_head *)frame_pointer(regs);
--	unsigned long stack = stack_pointer(regs);
-+	unsigned long stack = kernel_trap_sp(regs);
- 
- 	if (!user_mode_vm(regs)) {
- 		if (depth)
--			dump_trace(NULL, regs, (unsigned long *)stack,
-+			dump_trace(NULL, regs, (unsigned long *)stack, 0,
- 				   &backtrace_ops, &depth);
- 		return;
- 	}
-diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
-index 944bbcd..1f11cf0 100644
---- a/arch/x86/oprofile/nmi_int.c
-+++ b/arch/x86/oprofile/nmi_int.c
-@@ -18,11 +18,11 @@
- #include <asm/nmi.h>
- #include <asm/msr.h>
- #include <asm/apic.h>
-- 
-+
- #include "op_counter.h"
- #include "op_x86_model.h"
- 
--static struct op_x86_model_spec const * model;
-+static struct op_x86_model_spec const *model;
- static struct op_msrs cpu_msrs[NR_CPUS];
- static unsigned long saved_lvtpc[NR_CPUS];
- 
-@@ -41,7 +41,6 @@ static int nmi_suspend(struct sys_device *dev, pm_message_t state)
- 	return 0;
- }
- 
 -
- static int nmi_resume(struct sys_device *dev)
- {
- 	if (nmi_enabled == 1)
-@@ -49,29 +48,27 @@ static int nmi_resume(struct sys_device *dev)
- 	return 0;
- }
- 
+-static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+-{
+-	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
 -
- static struct sysdev_class oprofile_sysclass = {
--	set_kset_name("oprofile"),
-+	.name		= "oprofile",
- 	.resume		= nmi_resume,
- 	.suspend	= nmi_suspend,
- };
- 
+-	return queue_var_show(max_hw_sectors_kb, (page));
+-}
 -
- static struct sys_device device_oprofile = {
- 	.id	= 0,
- 	.cls	= &oprofile_sysclass,
- };
- 
 -
- static int __init init_sysfs(void)
- {
- 	int error;
--	if (!(error = sysdev_class_register(&oprofile_sysclass)))
-+
-+	error = sysdev_class_register(&oprofile_sysclass);
-+	if (!error)
- 		error = sysdev_register(&device_oprofile);
- 	return error;
- }
- 
+-static struct queue_sysfs_entry queue_requests_entry = {
+-	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
+-	.show = queue_requests_show,
+-	.store = queue_requests_store,
+-};
 -
- static void exit_sysfs(void)
- {
- 	sysdev_unregister(&device_oprofile);
-@@ -90,7 +87,7 @@ static int profile_exceptions_notify(struct notifier_block *self,
- 	int ret = NOTIFY_DONE;
- 	int cpu = smp_processor_id();
- 
--	switch(val) {
-+	switch (val) {
- 	case DIE_NMI:
- 		if (model->check_ctrs(args->regs, &cpu_msrs[cpu]))
- 			ret = NOTIFY_STOP;
-@@ -101,24 +98,24 @@ static int profile_exceptions_notify(struct notifier_block *self,
- 	return ret;
- }
- 
--static void nmi_cpu_save_registers(struct op_msrs * msrs)
-+static void nmi_cpu_save_registers(struct op_msrs *msrs)
- {
- 	unsigned int const nr_ctrs = model->num_counters;
--	unsigned int const nr_ctrls = model->num_controls; 
--	struct op_msr * counters = msrs->counters;
--	struct op_msr * controls = msrs->controls;
-+	unsigned int const nr_ctrls = model->num_controls;
-+	struct op_msr *counters = msrs->counters;
-+	struct op_msr *controls = msrs->controls;
- 	unsigned int i;
- 
- 	for (i = 0; i < nr_ctrs; ++i) {
--		if (counters[i].addr){
-+		if (counters[i].addr) {
- 			rdmsr(counters[i].addr,
- 				counters[i].saved.low,
- 				counters[i].saved.high);
- 		}
- 	}
-- 
-+
- 	for (i = 0; i < nr_ctrls; ++i) {
--		if (controls[i].addr){
-+		if (controls[i].addr) {
- 			rdmsr(controls[i].addr,
- 				controls[i].saved.low,
- 				controls[i].saved.high);
-@@ -126,15 +123,13 @@ static void nmi_cpu_save_registers(struct op_msrs * msrs)
- 	}
- }
- 
+-static struct queue_sysfs_entry queue_ra_entry = {
+-	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
+-	.show = queue_ra_show,
+-	.store = queue_ra_store,
+-};
 -
--static void nmi_save_registers(void * dummy)
-+static void nmi_save_registers(void *dummy)
- {
- 	int cpu = smp_processor_id();
--	struct op_msrs * msrs = &cpu_msrs[cpu];
-+	struct op_msrs *msrs = &cpu_msrs[cpu];
- 	nmi_cpu_save_registers(msrs);
- }
- 
+-static struct queue_sysfs_entry queue_max_sectors_entry = {
+-	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+-	.show = queue_max_sectors_show,
+-	.store = queue_max_sectors_store,
+-};
 -
- static void free_msrs(void)
- {
- 	int i;
-@@ -146,7 +141,6 @@ static void free_msrs(void)
- 	}
- }
- 
+-static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+-	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+-	.show = queue_max_hw_sectors_show,
+-};
 -
- static int allocate_msrs(void)
- {
- 	int success = 1;
-@@ -173,11 +167,10 @@ static int allocate_msrs(void)
- 	return success;
- }
- 
+-static struct queue_sysfs_entry queue_iosched_entry = {
+-	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
+-	.show = elv_iosched_show,
+-	.store = elv_iosched_store,
+-};
 -
--static void nmi_cpu_setup(void * dummy)
-+static void nmi_cpu_setup(void *dummy)
- {
- 	int cpu = smp_processor_id();
--	struct op_msrs * msrs = &cpu_msrs[cpu];
-+	struct op_msrs *msrs = &cpu_msrs[cpu];
- 	spin_lock(&oprofilefs_lock);
- 	model->setup_ctrs(msrs);
- 	spin_unlock(&oprofilefs_lock);
-@@ -193,13 +186,14 @@ static struct notifier_block profile_exceptions_nb = {
- 
- static int nmi_setup(void)
- {
--	int err=0;
-+	int err = 0;
- 	int cpu;
- 
- 	if (!allocate_msrs())
- 		return -ENOMEM;
- 
--	if ((err = register_die_notifier(&profile_exceptions_nb))){
-+	err = register_die_notifier(&profile_exceptions_nb);
-+	if (err) {
- 		free_msrs();
- 		return err;
- 	}
-@@ -210,7 +204,7 @@ static int nmi_setup(void)
- 
- 	/* Assume saved/restored counters are the same on all CPUs */
- 	model->fill_in_addresses(&cpu_msrs[0]);
--	for_each_possible_cpu (cpu) {
-+	for_each_possible_cpu(cpu) {
- 		if (cpu != 0) {
- 			memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters,
- 				sizeof(struct op_msr) * model->num_counters);
-@@ -226,39 +220,37 @@ static int nmi_setup(void)
- 	return 0;
- }
- 
+-static struct attribute *default_attrs[] = {
+-	&queue_requests_entry.attr,
+-	&queue_ra_entry.attr,
+-	&queue_max_hw_sectors_entry.attr,
+-	&queue_max_sectors_entry.attr,
+-	&queue_iosched_entry.attr,
+-	NULL,
+-};
 -
--static void nmi_restore_registers(struct op_msrs * msrs)
-+static void nmi_restore_registers(struct op_msrs *msrs)
- {
- 	unsigned int const nr_ctrs = model->num_counters;
--	unsigned int const nr_ctrls = model->num_controls; 
--	struct op_msr * counters = msrs->counters;
--	struct op_msr * controls = msrs->controls;
-+	unsigned int const nr_ctrls = model->num_controls;
-+	struct op_msr *counters = msrs->counters;
-+	struct op_msr *controls = msrs->controls;
- 	unsigned int i;
+-#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
+-
+-static ssize_t
+-queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+-{
+-	struct queue_sysfs_entry *entry = to_queue(attr);
+-	struct request_queue *q =
+-		container_of(kobj, struct request_queue, kobj);
+-	ssize_t res;
+-
+-	if (!entry->show)
+-		return -EIO;
+-	mutex_lock(&q->sysfs_lock);
+-	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+-		mutex_unlock(&q->sysfs_lock);
+-		return -ENOENT;
+-	}
+-	res = entry->show(q, page);
+-	mutex_unlock(&q->sysfs_lock);
+-	return res;
+-}
+-
+-static ssize_t
+-queue_attr_store(struct kobject *kobj, struct attribute *attr,
+-		    const char *page, size_t length)
+-{
+-	struct queue_sysfs_entry *entry = to_queue(attr);
+-	struct request_queue *q = container_of(kobj, struct request_queue, kobj);
+-
+-	ssize_t res;
+-
+-	if (!entry->store)
+-		return -EIO;
+-	mutex_lock(&q->sysfs_lock);
+-	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+-		mutex_unlock(&q->sysfs_lock);
+-		return -ENOENT;
+-	}
+-	res = entry->store(q, page, length);
+-	mutex_unlock(&q->sysfs_lock);
+-	return res;
+-}
+-
+-static struct sysfs_ops queue_sysfs_ops = {
+-	.show	= queue_attr_show,
+-	.store	= queue_attr_store,
+-};
+-
+-static struct kobj_type queue_ktype = {
+-	.sysfs_ops	= &queue_sysfs_ops,
+-	.default_attrs	= default_attrs,
+-	.release	= blk_release_queue,
+-};
+-
+-int blk_register_queue(struct gendisk *disk)
+-{
+-	int ret;
+-
+-	struct request_queue *q = disk->queue;
+-
+-	if (!q || !q->request_fn)
+-		return -ENXIO;
+-
+-	q->kobj.parent = kobject_get(&disk->kobj);
+-
+-	ret = kobject_add(&q->kobj);
+-	if (ret < 0)
+-		return ret;
+-
+-	kobject_uevent(&q->kobj, KOBJ_ADD);
+-
+-	ret = elv_register_queue(q);
+-	if (ret) {
+-		kobject_uevent(&q->kobj, KOBJ_REMOVE);
+-		kobject_del(&q->kobj);
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-void blk_unregister_queue(struct gendisk *disk)
+-{
+-	struct request_queue *q = disk->queue;
+-
+-	if (q && q->request_fn) {
+-		elv_unregister_queue(q);
+-
+-		kobject_uevent(&q->kobj, KOBJ_REMOVE);
+-		kobject_del(&q->kobj);
+-		kobject_put(&disk->kobj);
+-	}
+-}
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 083d2e1..c3166a1 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -24,10 +24,6 @@ config CRYPTO_ALGAPI
+ 	help
+ 	  This option provides the API for cryptographic algorithms.
  
- 	for (i = 0; i < nr_ctrls; ++i) {
--		if (controls[i].addr){
-+		if (controls[i].addr) {
- 			wrmsr(controls[i].addr,
- 				controls[i].saved.low,
- 				controls[i].saved.high);
- 		}
- 	}
-- 
-+
- 	for (i = 0; i < nr_ctrs; ++i) {
--		if (counters[i].addr){
-+		if (counters[i].addr) {
- 			wrmsr(counters[i].addr,
- 				counters[i].saved.low,
- 				counters[i].saved.high);
- 		}
- 	}
- }
-- 
+-config CRYPTO_ABLKCIPHER
+-	tristate
+-	select CRYPTO_BLKCIPHER
+-
+ config CRYPTO_AEAD
+ 	tristate
+ 	select CRYPTO_ALGAPI
+@@ -36,6 +32,15 @@ config CRYPTO_BLKCIPHER
+ 	tristate
+ 	select CRYPTO_ALGAPI
  
--static void nmi_cpu_shutdown(void * dummy)
-+static void nmi_cpu_shutdown(void *dummy)
- {
- 	unsigned int v;
- 	int cpu = smp_processor_id();
--	struct op_msrs * msrs = &cpu_msrs[cpu];
-- 
-+	struct op_msrs *msrs = &cpu_msrs[cpu];
++config CRYPTO_SEQIV
++	tristate "Sequence Number IV Generator"
++	select CRYPTO_AEAD
++	select CRYPTO_BLKCIPHER
++	help
++	  This IV generator generates an IV based on a sequence number by
++	  xoring it with a salt.  This algorithm is mainly useful for CTR
++	  and similar modes.
 +
- 	/* restoring APIC_LVTPC can trigger an apic error because the delivery
- 	 * mode and vector nr combination can be illegal. That's by design: on
- 	 * power on apic lvt contain a zero vector nr which are legal only for
-@@ -271,7 +263,6 @@ static void nmi_cpu_shutdown(void * dummy)
- 	nmi_restore_registers(msrs);
- }
+ config CRYPTO_HASH
+ 	tristate
+ 	select CRYPTO_ALGAPI
+@@ -91,7 +96,7 @@ config CRYPTO_SHA1
+ 	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
  
-- 
- static void nmi_shutdown(void)
- {
- 	nmi_enabled = 0;
-@@ -281,45 +272,40 @@ static void nmi_shutdown(void)
- 	free_msrs();
- }
+ config CRYPTO_SHA256
+-	tristate "SHA256 digest algorithm"
++	tristate "SHA224 and SHA256 digest algorithm"
+ 	select CRYPTO_ALGAPI
+ 	help
+ 	  SHA256 secure hash standard (DFIPS 180-2).
+@@ -99,6 +104,9 @@ config CRYPTO_SHA256
+ 	  This version of SHA implements a 256 bit hash with 128 bits of
+ 	  security against collision attacks.
  
-- 
--static void nmi_cpu_start(void * dummy)
-+static void nmi_cpu_start(void *dummy)
- {
--	struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
-+	struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()];
- 	model->start(msrs);
- }
-- 
++          This code also includes SHA-224, a 224 bit hash with 112 bits
++          of security against collision attacks.
++
+ config CRYPTO_SHA512
+ 	tristate "SHA384 and SHA512 digest algorithms"
+ 	select CRYPTO_ALGAPI
+@@ -195,9 +203,34 @@ config CRYPTO_XTS
+ 	  key size 256, 384 or 512 bits. This implementation currently
+ 	  can't handle a sectorsize which is not a multiple of 16 bytes.
  
- static int nmi_start(void)
- {
- 	on_each_cpu(nmi_cpu_start, NULL, 0, 1);
- 	return 0;
- }
-- 
-- 
--static void nmi_cpu_stop(void * dummy)
++config CRYPTO_CTR
++	tristate "CTR support"
++	select CRYPTO_BLKCIPHER
++	select CRYPTO_SEQIV
++	select CRYPTO_MANAGER
++	help
++	  CTR: Counter mode
++	  This block cipher algorithm is required for IPSec.
 +
-+static void nmi_cpu_stop(void *dummy)
- {
--	struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
-+	struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()];
- 	model->stop(msrs);
- }
-- 
-- 
++config CRYPTO_GCM
++	tristate "GCM/GMAC support"
++	select CRYPTO_CTR
++	select CRYPTO_AEAD
++	select CRYPTO_GF128MUL
++	help
++	  Support for Galois/Counter Mode (GCM) and Galois Message
++	  Authentication Code (GMAC). Required for IPSec.
 +
- static void nmi_stop(void)
- {
- 	on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
- }
- 
--
- struct op_counter_config counter_config[OP_MAX_COUNTER];
- 
--static int nmi_create_files(struct super_block * sb, struct dentry * root)
-+static int nmi_create_files(struct super_block *sb, struct dentry *root)
- {
- 	unsigned int i;
- 
- 	for (i = 0; i < model->num_counters; ++i) {
--		struct dentry * dir;
-+		struct dentry *dir;
- 		char buf[4];
-- 
-- 		/* quick little hack to _not_ expose a counter if it is not
++config CRYPTO_CCM
++	tristate "CCM support"
++	select CRYPTO_CTR
++	select CRYPTO_AEAD
++	help
++	  Support for Counter with CBC MAC. Required for IPsec.
 +
-+		/* quick little hack to _not_ expose a counter if it is not
- 		 * available for use.  This should protect userspace app.
- 		 * NOTE:  assumes 1:1 mapping here (that counters are organized
- 		 *        sequentially in their struct assignment).
-@@ -329,21 +315,21 @@ static int nmi_create_files(struct super_block * sb, struct dentry * root)
- 
- 		snprintf(buf,  sizeof(buf), "%d", i);
- 		dir = oprofilefs_mkdir(sb, root, buf);
--		oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); 
--		oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); 
--		oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); 
--		oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); 
--		oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); 
--		oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); 
-+		oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
-+		oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
-+		oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
-+		oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
-+		oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
-+		oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
- 	}
+ config CRYPTO_CRYPTD
+ 	tristate "Software async crypto daemon"
+-	select CRYPTO_ABLKCIPHER
++	select CRYPTO_BLKCIPHER
+ 	select CRYPTO_MANAGER
+ 	help
+ 	  This is a generic software asynchronous crypto daemon that
+@@ -320,6 +353,7 @@ config CRYPTO_AES_586
+ 	tristate "AES cipher algorithms (i586)"
+ 	depends on (X86 || UML_X86) && !64BIT
+ 	select CRYPTO_ALGAPI
++	select CRYPTO_AES
+ 	help
+ 	  AES cipher algorithms (FIPS-197). AES uses the Rijndael 
+ 	  algorithm.
+@@ -341,6 +375,7 @@ config CRYPTO_AES_X86_64
+ 	tristate "AES cipher algorithms (x86_64)"
+ 	depends on (X86 || UML_X86) && 64BIT
+ 	select CRYPTO_ALGAPI
++	select CRYPTO_AES
+ 	help
+ 	  AES cipher algorithms (FIPS-197). AES uses the Rijndael 
+ 	  algorithm.
+@@ -441,6 +476,46 @@ config CRYPTO_SEED
+ 	  See also:
+ 	  <http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp>
  
- 	return 0;
- }
-- 
++config CRYPTO_SALSA20
++	tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)"
++	depends on EXPERIMENTAL
++	select CRYPTO_BLKCIPHER
++	help
++	  Salsa20 stream cipher algorithm.
 +
- static int p4force;
- module_param(p4force, int, 0);
-- 
--static int __init p4_init(char ** cpu_type)
++	  Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
++	  Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
 +
-+static int __init p4_init(char **cpu_type)
- {
- 	__u8 cpu_model = boot_cpu_data.x86_model;
- 
-@@ -356,15 +342,15 @@ static int __init p4_init(char ** cpu_type)
- 	return 1;
- #else
- 	switch (smp_num_siblings) {
--		case 1:
--			*cpu_type = "i386/p4";
--			model = &op_p4_spec;
--			return 1;
--
--		case 2:
--			*cpu_type = "i386/p4-ht";
--			model = &op_p4_ht2_spec;
--			return 1;
-+	case 1:
-+		*cpu_type = "i386/p4";
-+		model = &op_p4_spec;
-+		return 1;
++	  The Salsa20 stream cipher algorithm is designed by Daniel J.
++	  Bernstein <djb at cr.yp.to>. See <http://cr.yp.to/snuffle.html>
 +
-+	case 2:
-+		*cpu_type = "i386/p4-ht";
-+		model = &op_p4_ht2_spec;
-+		return 1;
- 	}
- #endif
++config CRYPTO_SALSA20_586
++	tristate "Salsa20 stream cipher algorithm (i586) (EXPERIMENTAL)"
++	depends on (X86 || UML_X86) && !64BIT
++	depends on EXPERIMENTAL
++	select CRYPTO_BLKCIPHER
++	help
++	  Salsa20 stream cipher algorithm.
++
++	  Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
++	  Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
++
++	  The Salsa20 stream cipher algorithm is designed by Daniel J.
++	  Bernstein <djb at cr.yp.to>. See <http://cr.yp.to/snuffle.html>
++
++config CRYPTO_SALSA20_X86_64
++	tristate "Salsa20 stream cipher algorithm (x86_64) (EXPERIMENTAL)"
++	depends on (X86 || UML_X86) && 64BIT
++	depends on EXPERIMENTAL
++	select CRYPTO_BLKCIPHER
++	help
++	  Salsa20 stream cipher algorithm.
++
++	  Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
++	  Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
++
++	  The Salsa20 stream cipher algorithm is designed by Daniel J.
++	  Bernstein <djb at cr.yp.to>. See <http://cr.yp.to/snuffle.html>
  
-@@ -373,8 +359,7 @@ static int __init p4_init(char ** cpu_type)
- 	return 0;
- }
+ config CRYPTO_DEFLATE
+ 	tristate "Deflate compression algorithm"
+@@ -491,6 +566,7 @@ config CRYPTO_TEST
+ 	tristate "Testing module"
+ 	depends on m
+ 	select CRYPTO_ALGAPI
++	select CRYPTO_AEAD
+ 	help
+ 	  Quick & dirty crypto test module.
  
--
--static int __init ppro_init(char ** cpu_type)
-+static int __init ppro_init(char **cpu_type)
- {
- 	__u8 cpu_model = boot_cpu_data.x86_model;
+@@ -498,10 +574,19 @@ config CRYPTO_AUTHENC
+ 	tristate "Authenc support"
+ 	select CRYPTO_AEAD
+ 	select CRYPTO_MANAGER
++	select CRYPTO_HASH
+ 	help
+ 	  Authenc: Combined mode wrapper for IPsec.
+ 	  This is required for IPSec.
+ 
++config CRYPTO_LZO
++	tristate "LZO compression algorithm"
++	select CRYPTO_ALGAPI
++	select LZO_COMPRESS
++	select LZO_DECOMPRESS
++	help
++	  This is the LZO algorithm.
++
+ source "drivers/crypto/Kconfig"
  
-@@ -409,52 +394,52 @@ int __init op_nmi_init(struct oprofile_operations *ops)
+ endif	# if CRYPTO
+diff --git a/crypto/Makefile b/crypto/Makefile
+index 43c2a0d..48c7583 100644
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -8,9 +8,14 @@ crypto_algapi-$(CONFIG_PROC_FS) += proc.o
+ crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
+ obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
  
- 	if (!cpu_has_apic)
- 		return -ENODEV;
-- 
+-obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o
+ obj-$(CONFIG_CRYPTO_AEAD) += aead.o
+-obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o
 +
- 	switch (vendor) {
--		case X86_VENDOR_AMD:
--			/* Needs to be at least an Athlon (or hammer in 32bit mode) */
-+	case X86_VENDOR_AMD:
-+		/* Needs to be at least an Athlon (or hammer in 32bit mode) */
++crypto_blkcipher-objs := ablkcipher.o
++crypto_blkcipher-objs += blkcipher.o
++obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
++obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o
++obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o
++obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
  
--			switch (family) {
--			default:
-+		switch (family) {
-+		default:
-+			return -ENODEV;
-+		case 6:
-+			model = &op_athlon_spec;
-+			cpu_type = "i386/athlon";
-+			break;
-+		case 0xf:
-+			model = &op_athlon_spec;
-+			/* Actually it could be i386/hammer too, but give
-+			 user space an consistent name. */
-+			cpu_type = "x86-64/hammer";
-+			break;
-+		case 0x10:
-+			model = &op_athlon_spec;
-+			cpu_type = "x86-64/family10";
-+			break;
-+		}
-+		break;
-+
-+	case X86_VENDOR_INTEL:
-+		switch (family) {
-+			/* Pentium IV */
-+		case 0xf:
-+			if (!p4_init(&cpu_type))
- 				return -ENODEV;
--			case 6:
--				model = &op_athlon_spec;
--				cpu_type = "i386/athlon";
--				break;
--			case 0xf:
--				model = &op_athlon_spec;
--				/* Actually it could be i386/hammer too, but give
--				   user space an consistent name. */
--				cpu_type = "x86-64/hammer";
--				break;
--			case 0x10:
--				model = &op_athlon_spec;
--				cpu_type = "x86-64/family10";
--				break;
--			}
- 			break;
-- 
--		case X86_VENDOR_INTEL:
--			switch (family) {
--				/* Pentium IV */
--				case 0xf:
--					if (!p4_init(&cpu_type))
--						return -ENODEV;
--					break;
--
--				/* A P6-class processor */
--				case 6:
--					if (!ppro_init(&cpu_type))
--						return -ENODEV;
--					break;
--
--				default:
--					return -ENODEV;
--			}
-+
-+			/* A P6-class processor */
-+		case 6:
-+			if (!ppro_init(&cpu_type))
-+				return -ENODEV;
- 			break;
+ crypto_hash-objs := hash.o
+ obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
+@@ -32,6 +37,9 @@ obj-$(CONFIG_CRYPTO_CBC) += cbc.o
+ obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
+ obj-$(CONFIG_CRYPTO_LRW) += lrw.o
+ obj-$(CONFIG_CRYPTO_XTS) += xts.o
++obj-$(CONFIG_CRYPTO_CTR) += ctr.o
++obj-$(CONFIG_CRYPTO_GCM) += gcm.o
++obj-$(CONFIG_CRYPTO_CCM) += ccm.o
+ obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
+ obj-$(CONFIG_CRYPTO_DES) += des_generic.o
+ obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
+@@ -48,10 +56,12 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
+ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
+ obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
+ obj-$(CONFIG_CRYPTO_SEED) += seed.o
++obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
+ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
+ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
+ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
+ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
++obj-$(CONFIG_CRYPTO_LZO) += lzo.o
  
- 		default:
- 			return -ENODEV;
-+		}
-+		break;
-+
-+	default:
-+		return -ENODEV;
- 	}
+ obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
  
- 	init_sysfs();
-@@ -469,7 +454,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
- 	return 0;
- }
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index 2731acb..3bcb099 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -13,14 +13,18 @@
+  *
+  */
  
--
- void op_nmi_exit(void)
+-#include <crypto/algapi.h>
+-#include <linux/errno.h>
++#include <crypto/internal/skcipher.h>
++#include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/rtnetlink.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/seq_file.h>
+ 
++#include "internal.h"
++
+ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
+ 			    unsigned int keylen)
  {
- 	if (using_nmi)
-diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
-index 8627463..52deabc 100644
---- a/arch/x86/pci/common.c
-+++ b/arch/x86/pci/common.c
-@@ -109,6 +109,19 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
- 	}
+@@ -66,6 +70,16 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
+ 	return alg->cra_ctxsize;
  }
  
-+static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
++int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
 +{
-+	struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
++	return crypto_ablkcipher_encrypt(&req->creq);
++}
 +
-+	if (rom_r->parent)
-+		return;
-+	if (rom_r->start)
-+		/* we deal with BIOS assigned ROM later */
-+		return;
-+	if (!(pci_probe & PCI_ASSIGN_ROMS))
-+		rom_r->start = rom_r->end = rom_r->flags = 0;
++int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
++{
++	return crypto_ablkcipher_decrypt(&req->creq);
 +}
 +
- /*
-  *  Called after each bus is probed, but before its children
-  *  are examined.
-@@ -116,8 +129,12 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
- 
- void __devinit  pcibios_fixup_bus(struct pci_bus *b)
+ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
+ 				      u32 mask)
  {
-+	struct pci_dev *dev;
-+
- 	pcibios_fixup_ghosts(b);
- 	pci_read_bridge_bases(b);
-+	list_for_each_entry(dev, &b->devices, bus_list)
-+		pcibios_fixup_device_resources(dev);
- }
- 
- /*
-diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
-index 6cff66d..cb63007 100644
---- a/arch/x86/pci/fixup.c
-+++ b/arch/x86/pci/fixup.c
-@@ -19,7 +19,7 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d)
+@@ -78,6 +92,11 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
+ 	crt->setkey = setkey;
+ 	crt->encrypt = alg->encrypt;
+ 	crt->decrypt = alg->decrypt;
++	if (!alg->ivsize) {
++		crt->givencrypt = skcipher_null_givencrypt;
++		crt->givdecrypt = skcipher_null_givdecrypt;
++	}
++	crt->base = __crypto_ablkcipher_cast(tfm);
+ 	crt->ivsize = alg->ivsize;
  
- 	printk(KERN_WARNING "PCI: Searching for i450NX host bridges on %s\n", pci_name(d));
- 	reg = 0xd0;
--	for(pxb=0; pxb<2; pxb++) {
-+	for(pxb = 0; pxb < 2; pxb++) {
- 		pci_read_config_byte(d, reg++, &busno);
- 		pci_read_config_byte(d, reg++, &suba);
- 		pci_read_config_byte(d, reg++, &subb);
-@@ -56,7 +56,7 @@ static void __devinit  pci_fixup_umc_ide(struct pci_dev *d)
- 	int i;
+ 	return 0;
+@@ -90,10 +109,13 @@ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
+ 	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
  
- 	printk(KERN_WARNING "PCI: Fixing base address flags for device %s\n", pci_name(d));
--	for(i=0; i<4; i++)
-+	for(i = 0; i < 4; i++)
- 		d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
+ 	seq_printf(m, "type         : ablkcipher\n");
++	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
++					     "yes" : "no");
+ 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
+ 	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
+ 	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
+ 	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
++	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
  }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
-@@ -127,7 +127,7 @@ static void pci_fixup_via_northbridge_bug(struct pci_dev *d)
- 		   NB latency to zero */
- 		pci_write_config_byte(d, PCI_LATENCY_TIMER, 0);
  
--		where = 0x95; /* the memory write queue timer register is 
-+		where = 0x95; /* the memory write queue timer register is
- 				different for the KT266x's: 0x95 not 0x55 */
- 	} else if (d->device == PCI_DEVICE_ID_VIA_8363_0 &&
- 			(d->revision == VIA_8363_KL133_REVISION_ID ||
-@@ -230,7 +230,7 @@ static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int wh
+ const struct crypto_type crypto_ablkcipher_type = {
+@@ -105,5 +127,220 @@ const struct crypto_type crypto_ablkcipher_type = {
+ };
+ EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
  
- 	if ((offset) && (where == offset))
- 		value = value & 0xfffffffc;
--	
++static int no_givdecrypt(struct skcipher_givcrypt_request *req)
++{
++	return -ENOSYS;
++}
 +
- 	return raw_pci_ops->write(0, bus->number, devfn, where, size, value);
- }
- 
-@@ -271,8 +271,8 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
- 		 * after hot-remove, the pbus->devices is empty and this code
- 		 * will set the offsets to zero and the bus ops to parent's bus
- 		 * ops, which is unmodified.
--	 	 */
--		for (i= GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
-+		 */
-+		for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
- 			quirk_aspm_offset[i] = 0;
++static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
++				      u32 mask)
++{
++	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
++	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
++
++	if (alg->ivsize > PAGE_SIZE / 8)
++		return -EINVAL;
++
++	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
++		      alg->setkey : setkey;
++	crt->encrypt = alg->encrypt;
++	crt->decrypt = alg->decrypt;
++	crt->givencrypt = alg->givencrypt;
++	crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
++	crt->base = __crypto_ablkcipher_cast(tfm);
++	crt->ivsize = alg->ivsize;
++
++	return 0;
++}
++
++static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
++	__attribute__ ((unused));
++static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
++{
++	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
++
++	seq_printf(m, "type         : givcipher\n");
++	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
++					     "yes" : "no");
++	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
++	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
++	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
++	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
++	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
++}
++
++const struct crypto_type crypto_givcipher_type = {
++	.ctxsize = crypto_ablkcipher_ctxsize,
++	.init = crypto_init_givcipher_ops,
++#ifdef CONFIG_PROC_FS
++	.show = crypto_givcipher_show,
++#endif
++};
++EXPORT_SYMBOL_GPL(crypto_givcipher_type);
++
++const char *crypto_default_geniv(const struct crypto_alg *alg)
++{
++	return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv";
++}
++
++static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
++{
++	struct rtattr *tb[3];
++	struct {
++		struct rtattr attr;
++		struct crypto_attr_type data;
++	} ptype;
++	struct {
++		struct rtattr attr;
++		struct crypto_attr_alg data;
++	} palg;
++	struct crypto_template *tmpl;
++	struct crypto_instance *inst;
++	struct crypto_alg *larval;
++	const char *geniv;
++	int err;
++
++	larval = crypto_larval_lookup(alg->cra_driver_name,
++				      CRYPTO_ALG_TYPE_GIVCIPHER,
++				      CRYPTO_ALG_TYPE_MASK);
++	err = PTR_ERR(larval);
++	if (IS_ERR(larval))
++		goto out;
++
++	err = -EAGAIN;
++	if (!crypto_is_larval(larval))
++		goto drop_larval;
++
++	ptype.attr.rta_len = sizeof(ptype);
++	ptype.attr.rta_type = CRYPTOA_TYPE;
++	ptype.data.type = type | CRYPTO_ALG_GENIV;
++	/* GENIV tells the template that we're making a default geniv. */
++	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
++	tb[0] = &ptype.attr;
++
++	palg.attr.rta_len = sizeof(palg);
++	palg.attr.rta_type = CRYPTOA_ALG;
++	/* Must use the exact name to locate ourselves. */
++	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
++	tb[1] = &palg.attr;
++
++	tb[2] = NULL;
++
++	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
++	    CRYPTO_ALG_TYPE_BLKCIPHER)
++		geniv = alg->cra_blkcipher.geniv;
++	else
++		geniv = alg->cra_ablkcipher.geniv;
++
++	if (!geniv)
++		geniv = crypto_default_geniv(alg);
++
++	tmpl = crypto_lookup_template(geniv);
++	err = -ENOENT;
++	if (!tmpl)
++		goto kill_larval;
++
++	inst = tmpl->alloc(tb);
++	err = PTR_ERR(inst);
++	if (IS_ERR(inst))
++		goto put_tmpl;
++
++	if ((err = crypto_register_instance(tmpl, inst))) {
++		tmpl->free(inst);
++		goto put_tmpl;
++	}
++
++	/* Redo the lookup to use the instance we just registered. */
++	err = -EAGAIN;
++
++put_tmpl:
++	crypto_tmpl_put(tmpl);
++kill_larval:
++	crypto_larval_kill(larval);
++drop_larval:
++	crypto_mod_put(larval);
++out:
++	crypto_mod_put(alg);
++	return err;
++}
++
++static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
++						 u32 mask)
++{
++	struct crypto_alg *alg;
++
++	alg = crypto_alg_mod_lookup(name, type, mask);
++	if (IS_ERR(alg))
++		return alg;
++
++	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
++	    CRYPTO_ALG_TYPE_GIVCIPHER)
++		return alg;
++
++	if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
++	      CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
++					  alg->cra_ablkcipher.ivsize))
++		return alg;
++
++	return ERR_PTR(crypto_givcipher_default(alg, type, mask));
++}
++
++int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
++			 u32 type, u32 mask)
++{
++	struct crypto_alg *alg;
++	int err;
++
++	type = crypto_skcipher_type(type);
++	mask = crypto_skcipher_mask(mask);
++
++	alg = crypto_lookup_skcipher(name, type, mask);
++	if (IS_ERR(alg))
++		return PTR_ERR(alg);
++
++	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
++	crypto_mod_put(alg);
++	return err;
++}
++EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
++
++struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
++						  u32 type, u32 mask)
++{
++	struct crypto_tfm *tfm;
++	int err;
++
++	type = crypto_skcipher_type(type);
++	mask = crypto_skcipher_mask(mask);
++
++	for (;;) {
++		struct crypto_alg *alg;
++
++		alg = crypto_lookup_skcipher(alg_name, type, mask);
++		if (IS_ERR(alg)) {
++			err = PTR_ERR(alg);
++			goto err;
++		}
++
++		tfm = __crypto_alloc_tfm(alg, type, mask);
++		if (!IS_ERR(tfm))
++			return __crypto_ablkcipher_cast(tfm);
++
++		crypto_mod_put(alg);
++		err = PTR_ERR(tfm);
++
++err:
++		if (err != -EAGAIN)
++			break;
++		if (signal_pending(current)) {
++			err = -EINTR;
++			break;
++		}
++	}
++
++	return ERR_PTR(err);
++}
++EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
++
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Asynchronous block chaining cipher type");
+diff --git a/crypto/aead.c b/crypto/aead.c
+index 84a3501..3a6f3f5 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -12,14 +12,17 @@
+  *
+  */
  
- 		pbus->ops = pbus->parent->ops;
-@@ -286,17 +286,17 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
- 		list_for_each_entry(dev, &pbus->devices, bus_list) {
- 			/* There are 0 to 8 devices attached to this bus */
- 			cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP);
--			quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)]= cap_base + 0x10;
-+			quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10;
- 		}
- 		pbus->ops = &quirk_pcie_aspm_ops;
- 	}
- }
--DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA,	pcie_rootport_aspm_quirk );
--DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA1,	pcie_rootport_aspm_quirk );
--DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PB,	pcie_rootport_aspm_quirk );
--DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PB1,	pcie_rootport_aspm_quirk );
--DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PC,	pcie_rootport_aspm_quirk );
--DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PC1,	pcie_rootport_aspm_quirk );
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA,	pcie_rootport_aspm_quirk);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PA1,	pcie_rootport_aspm_quirk);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PB,	pcie_rootport_aspm_quirk);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PB1,	pcie_rootport_aspm_quirk);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PC,	pcie_rootport_aspm_quirk);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_MCH_PC1,	pcie_rootport_aspm_quirk);
+-#include <crypto/algapi.h>
+-#include <linux/errno.h>
++#include <crypto/internal/aead.h>
++#include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/rtnetlink.h>
+ #include <linux/slab.h>
+ #include <linux/seq_file.h>
  
- /*
-  * Fixup to mark boot BIOS video selected by BIOS before it changes
-@@ -336,8 +336,8 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
- 		 * PCI header type NORMAL.
- 		 */
- 		if (bridge
--		    &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE)
--		       ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) {
-+		    && ((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE)
-+		       || (bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) {
- 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
- 						&config);
- 			if (!(config & PCI_BRIDGE_CTL_VGA))
-diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
-index 88d8f5c..ed07ce6 100644
---- a/arch/x86/pci/irq.c
-+++ b/arch/x86/pci/irq.c
-@@ -200,6 +200,7 @@ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++#include "internal.h"
++
+ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
+ 			    unsigned int keylen)
  {
- 	static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
- 
-+	WARN_ON_ONCE(pirq >= 16);
- 	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
+@@ -53,25 +56,54 @@ static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+ 	return aead->setkey(tfm, key, keylen);
  }
  
-@@ -207,7 +208,8 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
- {
- 	static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
- 	unsigned int val = irqmap[irq];
--		
++int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
++{
++	struct aead_tfm *crt = crypto_aead_crt(tfm);
++	int err;
 +
-+	WARN_ON_ONCE(pirq >= 16);
- 	if (val) {
- 		write_config_nybble(router, 0x48, pirq-1, val);
- 		return 1;
-@@ -257,12 +259,16 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
- static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
- {
- 	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
++	if (authsize > crypto_aead_alg(tfm)->maxauthsize)
++		return -EINVAL;
 +
-+	WARN_ON_ONCE(pirq >= 5);
- 	return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
- }
- 
- static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
- {
- 	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
++	if (crypto_aead_alg(tfm)->setauthsize) {
++		err = crypto_aead_alg(tfm)->setauthsize(crt->base, authsize);
++		if (err)
++			return err;
++	}
 +
-+	WARN_ON_ONCE(pirq >= 5);
- 	write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
- 	return 1;
- }
-@@ -275,12 +281,16 @@ static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq
- static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
- {
- 	static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++	crypto_aead_crt(crt->base)->authsize = authsize;
++	crt->authsize = authsize;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
 +
-+	WARN_ON_ONCE(pirq >= 4);
- 	return read_config_nybble(router,0x43, pirqmap[pirq-1]);
- }
- 
- static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+ static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type,
+ 					u32 mask)
  {
- 	static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+
-+	WARN_ON_ONCE(pirq >= 4);
- 	write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
- 	return 1;
+ 	return alg->cra_ctxsize;
  }
-@@ -419,6 +429,7 @@ static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
- 
- static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
- {
-+	WARN_ON_ONCE(pirq >= 9);
- 	if (pirq > 8) {
- 		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
- 		return 0;
-@@ -428,6 +439,7 @@ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  
- static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
- {
-+	WARN_ON_ONCE(pirq >= 9);
- 	if (pirq > 8) {
- 		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
- 		return 0;
-@@ -449,14 +461,14 @@ static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
-  */
- static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++static int no_givcrypt(struct aead_givcrypt_request *req)
++{
++	return -ENOSYS;
++}
++
+ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
  {
--	outb_p(pirq, 0xc00);
-+	outb(pirq, 0xc00);
- 	return inb(0xc01) & 0xf;
- }
+ 	struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
+ 	struct aead_tfm *crt = &tfm->crt_aead;
  
- static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
- {
--	outb_p(pirq, 0xc00);
--	outb_p(irq, 0xc01);
-+	outb(pirq, 0xc00);
-+	outb(irq, 0xc01);
- 	return 1;
- }
+-	if (max(alg->authsize, alg->ivsize) > PAGE_SIZE / 8)
++	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
+ 		return -EINVAL;
  
-diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
-index 998fd3e..efcf620 100644
---- a/arch/x86/power/cpu.c
-+++ b/arch/x86/power/cpu.c
-@@ -19,7 +19,7 @@ unsigned long saved_context_esp, saved_context_ebp;
- unsigned long saved_context_esi, saved_context_edi;
- unsigned long saved_context_eflags;
+-	crt->setkey = setkey;
++	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
++		      alg->setkey : setkey;
+ 	crt->encrypt = alg->encrypt;
+ 	crt->decrypt = alg->decrypt;
++	crt->givencrypt = alg->givencrypt ?: no_givcrypt;
++	crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
++	crt->base = __crypto_aead_cast(tfm);
+ 	crt->ivsize = alg->ivsize;
+-	crt->authsize = alg->authsize;
++	crt->authsize = alg->maxauthsize;
  
--void __save_processor_state(struct saved_context *ctxt)
-+static void __save_processor_state(struct saved_context *ctxt)
- {
- 	mtrr_save_fixed_ranges(NULL);
- 	kernel_fpu_begin();
-@@ -74,19 +74,19 @@ static void fix_processor_context(void)
- 	/*
- 	 * Now maybe reload the debug registers
- 	 */
--	if (current->thread.debugreg[7]){
--		set_debugreg(current->thread.debugreg[0], 0);
--		set_debugreg(current->thread.debugreg[1], 1);
--		set_debugreg(current->thread.debugreg[2], 2);
--		set_debugreg(current->thread.debugreg[3], 3);
-+	if (current->thread.debugreg7) {
-+		set_debugreg(current->thread.debugreg0, 0);
-+		set_debugreg(current->thread.debugreg1, 1);
-+		set_debugreg(current->thread.debugreg2, 2);
-+		set_debugreg(current->thread.debugreg3, 3);
- 		/* no 4 and 5 */
--		set_debugreg(current->thread.debugreg[6], 6);
--		set_debugreg(current->thread.debugreg[7], 7);
-+		set_debugreg(current->thread.debugreg6, 6);
-+		set_debugreg(current->thread.debugreg7, 7);
- 	}
+ 	return 0;
+ }
+@@ -83,9 +115,12 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
+ 	struct aead_alg *aead = &alg->cra_aead;
  
+ 	seq_printf(m, "type         : aead\n");
++	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
++					     "yes" : "no");
+ 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
+ 	seq_printf(m, "ivsize       : %u\n", aead->ivsize);
+-	seq_printf(m, "authsize     : %u\n", aead->authsize);
++	seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
++	seq_printf(m, "geniv        : %s\n", aead->geniv ?: "<built-in>");
  }
  
--void __restore_processor_state(struct saved_context *ctxt)
-+static void __restore_processor_state(struct saved_context *ctxt)
- {
- 	/*
- 	 * control registers
-diff --git a/arch/x86/vdso/.gitignore b/arch/x86/vdso/.gitignore
-index f8b69d8..60274d5 100644
---- a/arch/x86/vdso/.gitignore
-+++ b/arch/x86/vdso/.gitignore
-@@ -1 +1,6 @@
- vdso.lds
-+vdso-syms.lds
-+vdso32-syms.lds
-+vdso32-syscall-syms.lds
-+vdso32-sysenter-syms.lds
-+vdso32-int80-syms.lds
-diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
-index e7bff0f..d28dda5 100644
---- a/arch/x86/vdso/Makefile
-+++ b/arch/x86/vdso/Makefile
-@@ -1,39 +1,37 @@
- #
--# x86-64 vDSO.
-+# Building vDSO images for x86.
- #
+ const struct crypto_type crypto_aead_type = {
+@@ -97,5 +132,358 @@ const struct crypto_type crypto_aead_type = {
+ };
+ EXPORT_SYMBOL_GPL(crypto_aead_type);
  
-+VDSO64-$(CONFIG_X86_64)		:= y
-+VDSO32-$(CONFIG_X86_32)		:= y
-+VDSO32-$(CONFIG_COMPAT)		:= y
++static int aead_null_givencrypt(struct aead_givcrypt_request *req)
++{
++	return crypto_aead_encrypt(&req->areq);
++}
 +
-+vdso-install-$(VDSO64-y)	+= vdso.so
-+vdso-install-$(VDSO32-y)	+= $(vdso32-y:=.so)
++static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
++{
++	return crypto_aead_decrypt(&req->areq);
++}
 +
++static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
++{
++	struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
++	struct aead_tfm *crt = &tfm->crt_aead;
 +
- # files to link into the vdso
--# vdso-start.o has to be first
--vobjs-y := vdso-start.o vdso-note.o vclock_gettime.o vgetcpu.o vvar.o
-+vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vvar.o
- 
- # files to link into kernel
--obj-y := vma.o vdso.o vdso-syms.o
-+obj-$(VDSO64-y)			+= vma.o vdso.o
-+obj-$(VDSO32-y)			+= vdso32.o vdso32-setup.o
- 
- vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
- 
- $(obj)/vdso.o: $(obj)/vdso.so
- 
--targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) vdso-syms.o
--
--# The DSO images are built using a special linker script.
--quiet_cmd_syscall = SYSCALL $@
--      cmd_syscall = $(CC) -m elf_x86_64 -nostdlib $(SYSCFLAGS_$(@F)) \
--		          -Wl,-T,$(filter-out FORCE,$^) -o $@
-+targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
- 
- export CPPFLAGS_vdso.lds += -P -C
- 
--vdso-flags = -fPIC -shared -Wl,-soname=linux-vdso.so.1 \
--		 $(call ld-option, -Wl$(comma)--hash-style=sysv) \
--		-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
--SYSCFLAGS_vdso.so = $(vdso-flags)
--SYSCFLAGS_vdso.so.dbg = $(vdso-flags)
-+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
-+		      	-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
- 
- $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
- 
--$(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE
--
- $(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
--	$(call if_changed,syscall)
-+	$(call if_changed,vdso)
- 
- $(obj)/%.so: OBJCOPYFLAGS := -S
- $(obj)/%.so: $(obj)/%.so.dbg FORCE
-@@ -41,24 +39,96 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
- 
- CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64
- 
--$(obj)/vclock_gettime.o: KBUILD_CFLAGS = $(CFL)
--$(obj)/vgetcpu.o: KBUILD_CFLAGS = $(CFL)
-+$(vobjs): KBUILD_CFLAGS = $(CFL)
++	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
++		return -EINVAL;
 +
-+targets += vdso-syms.lds
-+obj-$(VDSO64-y)			+= vdso-syms.lds
++	crt->setkey = setkey;
++	crt->encrypt = alg->encrypt;
++	crt->decrypt = alg->decrypt;
++	if (!alg->ivsize) {
++		crt->givencrypt = aead_null_givencrypt;
++		crt->givdecrypt = aead_null_givdecrypt;
++	}
++	crt->base = __crypto_aead_cast(tfm);
++	crt->ivsize = alg->ivsize;
++	crt->authsize = alg->maxauthsize;
 +
-+#
-+# Match symbols in the DSO that look like VDSO*; produce a file of constants.
-+#
-+sed-vdsosym := -e 's/^00*/0/' \
-+	-e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
-+quiet_cmd_vdsosym = VDSOSYM $@
-+      cmd_vdsosym = $(NM) $< | sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
++	return 0;
++}
 +
-+$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
-+	$(call if_changed,vdsosym)
++static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
++	__attribute__ ((unused));
++static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
++{
++	struct aead_alg *aead = &alg->cra_aead;
 +
-+#
-+# Build multiple 32-bit vDSO images to choose from at boot time.
-+#
-+obj-$(VDSO32-y)			+= vdso32-syms.lds
-+vdso32.so-$(CONFIG_X86_32)	+= int80
-+vdso32.so-$(CONFIG_COMPAT)	+= syscall
-+vdso32.so-$(VDSO32-y)		+= sysenter
++	seq_printf(m, "type         : nivaead\n");
++	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
++					     "yes" : "no");
++	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
++	seq_printf(m, "ivsize       : %u\n", aead->ivsize);
++	seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
++	seq_printf(m, "geniv        : %s\n", aead->geniv);
++}
 +
-+CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-+VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
++const struct crypto_type crypto_nivaead_type = {
++	.ctxsize = crypto_aead_ctxsize,
++	.init = crypto_init_nivaead_ops,
++#ifdef CONFIG_PROC_FS
++	.show = crypto_nivaead_show,
++#endif
++};
++EXPORT_SYMBOL_GPL(crypto_nivaead_type);
 +
-+# This makes sure the $(obj) subdirectory exists even though vdso32/
-+# is not a kbuild sub-make subdirectory.
-+override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
- 
--# We also create a special relocatable object that should mirror the symbol
--# table and layout of the linked DSO.  With ld -R we can then refer to
--# these symbols in the kernel code rather than hand-coded addresses.
--extra-y += vdso-syms.o
--$(obj)/built-in.o: $(obj)/vdso-syms.o
--$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
-+targets += vdso32/vdso32.lds
-+targets += $(vdso32.so-y:%=vdso32-%.so.dbg) $(vdso32.so-y:%=vdso32-%.so)
-+targets += vdso32/note.o $(vdso32.so-y:%=vdso32/%.o)
- 
--SYSCFLAGS_vdso-syms.o = -r -d
--$(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE
--	$(call if_changed,syscall)
-+extra-y	+= $(vdso32.so-y:%=vdso32-%.so)
- 
-+$(obj)/vdso32.o: $(vdso32.so-y:%=$(obj)/vdso32-%.so)
++static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
++			       const char *name, u32 type, u32 mask)
++{
++	struct crypto_alg *alg;
++	int err;
 +
-+KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
-+$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
-+$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): asflags-$(CONFIG_X86_64) += -m32
++	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
++	type |= CRYPTO_ALG_TYPE_AEAD;
++	mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV;
 +
-+$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
-+					 $(obj)/vdso32/vdso32.lds \
-+					 $(obj)/vdso32/note.o \
-+					 $(obj)/vdso32/%.o
-+	$(call if_changed,vdso)
++	alg = crypto_alg_mod_lookup(name, type, mask);
++	if (IS_ERR(alg))
++		return PTR_ERR(alg);
 +
-+# Make vdso32-*-syms.lds from each image, and then make sure they match.
-+# The only difference should be that some do not define VDSO32_SYSENTER_RETURN.
++	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
++	crypto_mod_put(alg);
++	return err;
++}
 +
-+targets += vdso32-syms.lds $(vdso32.so-y:%=vdso32-%-syms.lds)
++struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
++					 struct rtattr **tb, u32 type,
++					 u32 mask)
++{
++	const char *name;
++	struct crypto_aead_spawn *spawn;
++	struct crypto_attr_type *algt;
++	struct crypto_instance *inst;
++	struct crypto_alg *alg;
++	int err;
 +
-+quiet_cmd_vdso32sym = VDSOSYM $@
-+define cmd_vdso32sym
-+	if LC_ALL=C sort -u $(filter-out FORCE,$^) > $(@D)/.tmp_$(@F) && \
-+	   $(foreach H,$(filter-out FORCE,$^),\
-+		     if grep -q VDSO32_SYSENTER_RETURN $H; \
-+		     then diff -u $(@D)/.tmp_$(@F) $H; \
-+		     else sed /VDSO32_SYSENTER_RETURN/d $(@D)/.tmp_$(@F) | \
-+			  diff -u - $H; fi &&) : ;\
-+	then mv -f $(@D)/.tmp_$(@F) $@; \
-+	else rm -f $(@D)/.tmp_$(@F); exit 1; \
-+	fi
-+endef
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
++		return ERR_PTR(err);
 +
-+$(obj)/vdso32-syms.lds: $(vdso32.so-y:%=$(obj)/vdso32-%-syms.lds) FORCE
-+	$(call if_changed,vdso32sym)
++	if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
++	    algt->mask)
++		return ERR_PTR(-EINVAL);
 +
-+#
-+# The DSO images are built using a special linker script.
-+#
-+quiet_cmd_vdso = VDSO    $@
-+      cmd_vdso = $(CC) -nostdlib -o $@ \
-+		       $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-+		       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
++	name = crypto_attr_alg_name(tb[1]);
++	err = PTR_ERR(name);
++	if (IS_ERR(name))
++		return ERR_PTR(err);
 +
-+VDSO_LDFLAGS = -fPIC -shared $(call ld-option, -Wl$(comma)--hash-style=sysv)
++	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
++	if (!inst)
++		return ERR_PTR(-ENOMEM);
 +
-+#
-+# Install the unstripped copy of vdso*.so listed in $(vdso-install-y).
-+#
- quiet_cmd_vdso_install = INSTALL $@
-       cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
--vdso.so:
-+$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE
- 	@mkdir -p $(MODLIB)/vdso
- 	$(call cmd,vdso_install)
- 
--vdso_install: vdso.so
-+PHONY += vdso_install $(vdso-install-y)
-+vdso_install: $(vdso-install-y)
++	spawn = crypto_instance_ctx(inst);
 +
-+clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80*
-diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
-index 5b54cdf..23476c2 100644
---- a/arch/x86/vdso/vclock_gettime.c
-+++ b/arch/x86/vdso/vclock_gettime.c
-@@ -19,7 +19,6 @@
- #include <asm/hpet.h>
- #include <asm/unistd.h>
- #include <asm/io.h>
--#include <asm/vgtod.h>
- #include "vextern.h"
- 
- #define gtod vdso_vsyscall_gtod_data
-diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
-new file mode 100644
-index 0000000..634a2cf
---- /dev/null
-+++ b/arch/x86/vdso/vdso-layout.lds.S
-@@ -0,0 +1,64 @@
-+/*
-+ * Linker script for vDSO.  This is an ELF shared object prelinked to
-+ * its virtual address, and with only one read-only segment.
-+ * This script controls its layout.
-+ */
++	/* Ignore async algorithms if necessary. */
++	mask |= crypto_requires_sync(algt->type, algt->mask);
 +
-+SECTIONS
++	crypto_set_aead_spawn(spawn, inst);
++	err = crypto_grab_nivaead(spawn, name, type, mask);
++	if (err)
++		goto err_free_inst;
++
++	alg = crypto_aead_spawn_alg(spawn);
++
++	err = -EINVAL;
++	if (!alg->cra_aead.ivsize)
++		goto err_drop_alg;
++
++	/*
++	 * This is only true if we're constructing an algorithm with its
++	 * default IV generator.  For the default generator we elide the
++	 * template name and double-check the IV generator.
++	 */
++	if (algt->mask & CRYPTO_ALG_GENIV) {
++		if (strcmp(tmpl->name, alg->cra_aead.geniv))
++			goto err_drop_alg;
++
++		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
++		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
++		       CRYPTO_MAX_ALG_NAME);
++	} else {
++		err = -ENAMETOOLONG;
++		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
++			     "%s(%s)", tmpl->name, alg->cra_name) >=
++		    CRYPTO_MAX_ALG_NAME)
++			goto err_drop_alg;
++		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
++			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
++		    CRYPTO_MAX_ALG_NAME)
++			goto err_drop_alg;
++	}
++
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV;
++	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
++	inst->alg.cra_priority = alg->cra_priority;
++	inst->alg.cra_blocksize = alg->cra_blocksize;
++	inst->alg.cra_alignmask = alg->cra_alignmask;
++	inst->alg.cra_type = &crypto_aead_type;
++
++	inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
++	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
++	inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
++
++	inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
++	inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
++	inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt;
++	inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt;
++
++out:
++	return inst;
++
++err_drop_alg:
++	crypto_drop_aead(spawn);
++err_free_inst:
++	kfree(inst);
++	inst = ERR_PTR(err);
++	goto out;
++}
++EXPORT_SYMBOL_GPL(aead_geniv_alloc);
++
++void aead_geniv_free(struct crypto_instance *inst)
 +{
-+	. = VDSO_PRELINK + SIZEOF_HEADERS;
++	crypto_drop_aead(crypto_instance_ctx(inst));
++	kfree(inst);
++}
++EXPORT_SYMBOL_GPL(aead_geniv_free);
 +
-+	.hash		: { *(.hash) }			:text
-+	.gnu.hash	: { *(.gnu.hash) }
-+	.dynsym		: { *(.dynsym) }
-+	.dynstr		: { *(.dynstr) }
-+	.gnu.version	: { *(.gnu.version) }
-+	.gnu.version_d	: { *(.gnu.version_d) }
-+	.gnu.version_r	: { *(.gnu.version_r) }
++int aead_geniv_init(struct crypto_tfm *tfm)
++{
++	struct crypto_instance *inst = (void *)tfm->__crt_alg;
++	struct crypto_aead *aead;
 +
-+	.note		: { *(.note.*) }		:text	:note
++	aead = crypto_spawn_aead(crypto_instance_ctx(inst));
++	if (IS_ERR(aead))
++		return PTR_ERR(aead);
 +
-+	.eh_frame_hdr	: { *(.eh_frame_hdr) }		:text	:eh_frame_hdr
-+	.eh_frame	: { KEEP (*(.eh_frame)) }	:text
++	tfm->crt_aead.base = aead;
++	tfm->crt_aead.reqsize += crypto_aead_reqsize(aead);
 +
-+	.dynamic	: { *(.dynamic) }		:text	:dynamic
++	return 0;
++}
++EXPORT_SYMBOL_GPL(aead_geniv_init);
 +
-+	.rodata		: { *(.rodata*) }		:text
-+	.data		: {
-+	      *(.data*)
-+	      *(.sdata*)
-+	      *(.got.plt) *(.got)
-+	      *(.gnu.linkonce.d.*)
-+	      *(.bss*)
-+	      *(.dynbss*)
-+	      *(.gnu.linkonce.b.*)
++void aead_geniv_exit(struct crypto_tfm *tfm)
++{
++	crypto_free_aead(tfm->crt_aead.base);
++}
++EXPORT_SYMBOL_GPL(aead_geniv_exit);
++
++static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
++{
++	struct rtattr *tb[3];
++	struct {
++		struct rtattr attr;
++		struct crypto_attr_type data;
++	} ptype;
++	struct {
++		struct rtattr attr;
++		struct crypto_attr_alg data;
++	} palg;
++	struct crypto_template *tmpl;
++	struct crypto_instance *inst;
++	struct crypto_alg *larval;
++	const char *geniv;
++	int err;
++
++	larval = crypto_larval_lookup(alg->cra_driver_name,
++				      CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
++				      CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
++	err = PTR_ERR(larval);
++	if (IS_ERR(larval))
++		goto out;
++
++	err = -EAGAIN;
++	if (!crypto_is_larval(larval))
++		goto drop_larval;
++
++	ptype.attr.rta_len = sizeof(ptype);
++	ptype.attr.rta_type = CRYPTOA_TYPE;
++	ptype.data.type = type | CRYPTO_ALG_GENIV;
++	/* GENIV tells the template that we're making a default geniv. */
++	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
++	tb[0] = &ptype.attr;
++
++	palg.attr.rta_len = sizeof(palg);
++	palg.attr.rta_type = CRYPTOA_ALG;
++	/* Must use the exact name to locate ourselves. */
++	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
++	tb[1] = &palg.attr;
++
++	tb[2] = NULL;
++
++	geniv = alg->cra_aead.geniv;
++
++	tmpl = crypto_lookup_template(geniv);
++	err = -ENOENT;
++	if (!tmpl)
++		goto kill_larval;
++
++	inst = tmpl->alloc(tb);
++	err = PTR_ERR(inst);
++	if (IS_ERR(inst))
++		goto put_tmpl;
++
++	if ((err = crypto_register_instance(tmpl, inst))) {
++		tmpl->free(inst);
++		goto put_tmpl;
 +	}
 +
-+	.altinstructions	: { *(.altinstructions) }
-+	.altinstr_replacement	: { *(.altinstr_replacement) }
++	/* Redo the lookup to use the instance we just registered. */
++	err = -EAGAIN;
 +
-+	/*
-+	 * Align the actual code well away from the non-instruction data.
-+	 * This is the best thing for the I-cache.
-+	 */
-+	. = ALIGN(0x100);
++put_tmpl:
++	crypto_tmpl_put(tmpl);
++kill_larval:
++	crypto_larval_kill(larval);
++drop_larval:
++	crypto_mod_put(larval);
++out:
++	crypto_mod_put(alg);
++	return err;
++}
 +
-+	.text		: { *(.text*) }			:text	=0x90909090
++static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
++					     u32 mask)
++{
++	struct crypto_alg *alg;
++
++	alg = crypto_alg_mod_lookup(name, type, mask);
++	if (IS_ERR(alg))
++		return alg;
++
++	if (alg->cra_type == &crypto_aead_type)
++		return alg;
++
++	if (!alg->cra_aead.ivsize)
++		return alg;
++
++	return ERR_PTR(crypto_nivaead_default(alg, type, mask));
 +}
 +
-+/*
-+ * Very old versions of ld do not recognize this name token; use the constant.
-+ */
-+#define PT_GNU_EH_FRAME	0x6474e550
++int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
++		     u32 type, u32 mask)
++{
++	struct crypto_alg *alg;
++	int err;
 +
-+/*
-+ * We must supply the ELF program headers explicitly to get just one
-+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
-+ */
-+PHDRS
++	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
++	type |= CRYPTO_ALG_TYPE_AEAD;
++	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
++	mask |= CRYPTO_ALG_TYPE_MASK;
++
++	alg = crypto_lookup_aead(name, type, mask);
++	if (IS_ERR(alg))
++		return PTR_ERR(alg);
++
++	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
++	crypto_mod_put(alg);
++	return err;
++}
++EXPORT_SYMBOL_GPL(crypto_grab_aead);
++
++struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
 +{
-+	text		PT_LOAD		FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
-+	dynamic		PT_DYNAMIC	FLAGS(4);		/* PF_R */
-+	note		PT_NOTE		FLAGS(4);		/* PF_R */
-+	eh_frame_hdr	PT_GNU_EH_FRAME;
++	struct crypto_tfm *tfm;
++	int err;
++
++	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
++	type |= CRYPTO_ALG_TYPE_AEAD;
++	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
++	mask |= CRYPTO_ALG_TYPE_MASK;
++
++	for (;;) {
++		struct crypto_alg *alg;
++
++		alg = crypto_lookup_aead(alg_name, type, mask);
++		if (IS_ERR(alg)) {
++			err = PTR_ERR(alg);
++			goto err;
++		}
++
++		tfm = __crypto_alloc_tfm(alg, type, mask);
++		if (!IS_ERR(tfm))
++			return __crypto_aead_cast(tfm);
++
++		crypto_mod_put(alg);
++		err = PTR_ERR(tfm);
++
++err:
++		if (err != -EAGAIN)
++			break;
++		if (signal_pending(current)) {
++			err = -EINTR;
++			break;
++		}
++	}
++
++	return ERR_PTR(err);
 +}
-diff --git a/arch/x86/vdso/vdso-start.S b/arch/x86/vdso/vdso-start.S
-deleted file mode 100644
-index 2dc2cdb..0000000
---- a/arch/x86/vdso/vdso-start.S
-+++ /dev/null
-@@ -1,2 +0,0 @@
--	.globl vdso_kernel_start
--vdso_kernel_start:
-diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
-index 667d324..4e5dd3b 100644
---- a/arch/x86/vdso/vdso.lds.S
-+++ b/arch/x86/vdso/vdso.lds.S
-@@ -1,79 +1,37 @@
- /*
-- * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
-- * object prelinked to its virtual address, and with only one read-only
-- * segment (that fits in one page).  This script controls its layout.
-+ * Linker script for 64-bit vDSO.
-+ * We #include the file to define the layout details.
-+ * Here we only choose the prelinked virtual address.
-+ *
-+ * This file defines the version script giving the user-exported symbols in
-+ * the DSO.  We can define local symbols here called VDSO* to make their
-+ * values visible using the asm-x86/vdso.h macros from the kernel proper.
++EXPORT_SYMBOL_GPL(crypto_alloc_aead);
++
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)");
+diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
+index 9401dca..cf30af7 100644
+--- a/crypto/aes_generic.c
++++ b/crypto/aes_generic.c
+@@ -47,11 +47,7 @@
+  * ---------------------------------------------------------------------------
   */
--#include <asm/asm-offsets.h>
--#include "voffset.h"
  
- #define VDSO_PRELINK 0xffffffffff700000
--
--SECTIONS
--{
--  . = VDSO_PRELINK + SIZEOF_HEADERS;
+-/* Some changes from the Gladman version:
+-    s/RIJNDAEL(e_key)/E_KEY/g
+-    s/RIJNDAEL(d_key)/D_KEY/g
+-*/
 -
--  .hash           : { *(.hash) }		:text
--  .gnu.hash       : { *(.gnu.hash) }
--  .dynsym         : { *(.dynsym) }
--  .dynstr         : { *(.dynstr) }
--  .gnu.version    : { *(.gnu.version) }
--  .gnu.version_d  : { *(.gnu.version_d) }
--  .gnu.version_r  : { *(.gnu.version_r) }
++#include <crypto/aes.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
+@@ -59,88 +55,46 @@
+ #include <linux/crypto.h>
+ #include <asm/byteorder.h>
+ 
+-#define AES_MIN_KEY_SIZE	16
+-#define AES_MAX_KEY_SIZE	32
 -
--  /* This linker script is used both with -r and with -shared.
--     For the layouts to match, we need to skip more than enough
--     space for the dynamic symbol table et al.  If this amount
--     is insufficient, ld -shared will barf.  Just increase it here.  */
--  . = VDSO_PRELINK + VDSO_TEXT_OFFSET;
+-#define AES_BLOCK_SIZE		16
 -
--  .text           : { *(.text*) }		:text
--  .rodata         : { *(.rodata*) }		:text
--  .data		  : {
--	*(.data*)
--	*(.sdata*)
--	*(.bss*)
--	*(.dynbss*)
--  }						:text
+-/*
+- * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) 
+- */
+-static inline u8
+-byte(const u32 x, const unsigned n)
++static inline u8 byte(const u32 x, const unsigned n)
+ {
+ 	return x >> (n << 3);
+ }
+ 
+-struct aes_ctx {
+-	int key_length;
+-	u32 buf[120];
+-};
 -
--  .altinstructions : { *(.altinstructions) }		:text
--  .altinstr_replacement  : { *(.altinstr_replacement) }	:text
+-#define E_KEY (&ctx->buf[0])
+-#define D_KEY (&ctx->buf[60])
 -
--  .note		  : { *(.note.*) }		:text :note
--  .eh_frame_hdr   : { *(.eh_frame_hdr) }	:text :eh_frame_hdr
--  .eh_frame       : { KEEP (*(.eh_frame)) }	:text
--  .dynamic        : { *(.dynamic) }		:text :dynamic
--  .useless        : {
--  	*(.got.plt) *(.got)
--	*(.gnu.linkonce.d.*)
--	*(.gnu.linkonce.b.*)
--  }						:text
--}
-+#include "vdso-layout.lds.S"
+ static u8 pow_tab[256] __initdata;
+ static u8 log_tab[256] __initdata;
+ static u8 sbx_tab[256] __initdata;
+ static u8 isb_tab[256] __initdata;
+ static u32 rco_tab[10];
+-static u32 ft_tab[4][256];
+-static u32 it_tab[4][256];
  
- /*
-- * We must supply the ELF program headers explicitly to get just one
-- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
-+ * This controls what userland symbols we export from the vDSO.
-  */
--PHDRS
--{
--  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
--  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
--  note PT_NOTE FLAGS(4); /* PF_R */
--  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
-+VERSION {
-+	LINUX_2.6 {
-+	global:
-+		clock_gettime;
-+		__vdso_clock_gettime;
-+		gettimeofday;
-+		__vdso_gettimeofday;
-+		getcpu;
-+		__vdso_getcpu;
-+	local: *;
-+	};
- }
+-static u32 fl_tab[4][256];
+-static u32 il_tab[4][256];
++u32 crypto_ft_tab[4][256];
++u32 crypto_fl_tab[4][256];
++u32 crypto_it_tab[4][256];
++u32 crypto_il_tab[4][256];
  
-+VDSO64_PRELINK = VDSO_PRELINK;
-+
- /*
-- * This controls what symbols we export from the DSO.
-+ * Define VDSO64_x for each VEXTERN(x), for use via VDSO64_SYMBOL.
-  */
--VERSION
--{
--  LINUX_2.6 {
--    global:
--	clock_gettime;
--	__vdso_clock_gettime;
--	gettimeofday;
--	__vdso_gettimeofday;
--	getcpu;
--	__vdso_getcpu;
--    local: *;
--  };
--}
-+#define VEXTERN(x)	VDSO64_ ## x = vdso_ ## x;
-+#include "vextern.h"
-+#undef	VEXTERN
-diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
-new file mode 100644
-index 0000000..348f134
---- /dev/null
-+++ b/arch/x86/vdso/vdso32-setup.c
-@@ -0,0 +1,444 @@
-+/*
-+ * (C) Copyright 2002 Linus Torvalds
-+ * Portions based on the vdso-randomization code from exec-shield:
-+ * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
-+ *
-+ * This file contains the needed initializations to support sysenter.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/smp.h>
-+#include <linux/thread_info.h>
-+#include <linux/sched.h>
-+#include <linux/gfp.h>
-+#include <linux/string.h>
-+#include <linux/elf.h>
-+#include <linux/mm.h>
-+#include <linux/err.h>
-+#include <linux/module.h>
+-static inline u8 __init
+-f_mult (u8 a, u8 b)
++EXPORT_SYMBOL_GPL(crypto_ft_tab);
++EXPORT_SYMBOL_GPL(crypto_fl_tab);
++EXPORT_SYMBOL_GPL(crypto_it_tab);
++EXPORT_SYMBOL_GPL(crypto_il_tab);
 +
-+#include <asm/cpufeature.h>
-+#include <asm/msr.h>
-+#include <asm/pgtable.h>
-+#include <asm/unistd.h>
-+#include <asm/elf.h>
-+#include <asm/tlbflush.h>
-+#include <asm/vdso.h>
-+#include <asm/proto.h>
++static inline u8 __init f_mult(u8 a, u8 b)
+ {
+ 	u8 aa = log_tab[a], cc = aa + log_tab[b];
+ 
+ 	return pow_tab[cc + (cc < aa ? 1 : 0)];
+ }
+ 
+-#define ff_mult(a,b)    (a && b ? f_mult(a, b) : 0)
+-
+-#define f_rn(bo, bi, n, k)					\
+-    bo[n] =  ft_tab[0][byte(bi[n],0)] ^				\
+-             ft_tab[1][byte(bi[(n + 1) & 3],1)] ^		\
+-             ft_tab[2][byte(bi[(n + 2) & 3],2)] ^		\
+-             ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
+-
+-#define i_rn(bo, bi, n, k)					\
+-    bo[n] =  it_tab[0][byte(bi[n],0)] ^				\
+-             it_tab[1][byte(bi[(n + 3) & 3],1)] ^		\
+-             it_tab[2][byte(bi[(n + 2) & 3],2)] ^		\
+-             it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
+-
+-#define ls_box(x)				\
+-    ( fl_tab[0][byte(x, 0)] ^			\
+-      fl_tab[1][byte(x, 1)] ^			\
+-      fl_tab[2][byte(x, 2)] ^			\
+-      fl_tab[3][byte(x, 3)] )
+-
+-#define f_rl(bo, bi, n, k)					\
+-    bo[n] =  fl_tab[0][byte(bi[n],0)] ^				\
+-             fl_tab[1][byte(bi[(n + 1) & 3],1)] ^		\
+-             fl_tab[2][byte(bi[(n + 2) & 3],2)] ^		\
+-             fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
+-
+-#define i_rl(bo, bi, n, k)					\
+-    bo[n] =  il_tab[0][byte(bi[n],0)] ^				\
+-             il_tab[1][byte(bi[(n + 3) & 3],1)] ^		\
+-             il_tab[2][byte(bi[(n + 2) & 3],2)] ^		\
+-             il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
+-
+-static void __init
+-gen_tabs (void)
++#define ff_mult(a, b)	(a && b ? f_mult(a, b) : 0)
 +
-+enum {
-+	VDSO_DISABLED = 0,
-+	VDSO_ENABLED = 1,
-+	VDSO_COMPAT = 2,
-+};
++static void __init gen_tabs(void)
+ {
+ 	u32 i, t;
+ 	u8 p, q;
+ 
+-	/* log and power tables for GF(2**8) finite field with
+-	   0x011b as modular polynomial - the simplest primitive
+-	   root is 0x03, used here to generate the tables */
++	/*
++	 * log and power tables for GF(2**8) finite field with
++	 * 0x011b as modular polynomial - the simplest primitive
++	 * root is 0x03, used here to generate the tables
++	 */
+ 
+ 	for (i = 0, p = 1; i < 256; ++i) {
+ 		pow_tab[i] = (u8) p;
+@@ -169,92 +123,119 @@ gen_tabs (void)
+ 		p = sbx_tab[i];
+ 
+ 		t = p;
+-		fl_tab[0][i] = t;
+-		fl_tab[1][i] = rol32(t, 8);
+-		fl_tab[2][i] = rol32(t, 16);
+-		fl_tab[3][i] = rol32(t, 24);
++		crypto_fl_tab[0][i] = t;
++		crypto_fl_tab[1][i] = rol32(t, 8);
++		crypto_fl_tab[2][i] = rol32(t, 16);
++		crypto_fl_tab[3][i] = rol32(t, 24);
+ 
+-		t = ((u32) ff_mult (2, p)) |
++		t = ((u32) ff_mult(2, p)) |
+ 		    ((u32) p << 8) |
+-		    ((u32) p << 16) | ((u32) ff_mult (3, p) << 24);
++		    ((u32) p << 16) | ((u32) ff_mult(3, p) << 24);
+ 
+-		ft_tab[0][i] = t;
+-		ft_tab[1][i] = rol32(t, 8);
+-		ft_tab[2][i] = rol32(t, 16);
+-		ft_tab[3][i] = rol32(t, 24);
++		crypto_ft_tab[0][i] = t;
++		crypto_ft_tab[1][i] = rol32(t, 8);
++		crypto_ft_tab[2][i] = rol32(t, 16);
++		crypto_ft_tab[3][i] = rol32(t, 24);
+ 
+ 		p = isb_tab[i];
+ 
+ 		t = p;
+-		il_tab[0][i] = t;
+-		il_tab[1][i] = rol32(t, 8);
+-		il_tab[2][i] = rol32(t, 16);
+-		il_tab[3][i] = rol32(t, 24);
+-
+-		t = ((u32) ff_mult (14, p)) |
+-		    ((u32) ff_mult (9, p) << 8) |
+-		    ((u32) ff_mult (13, p) << 16) |
+-		    ((u32) ff_mult (11, p) << 24);
+-
+-		it_tab[0][i] = t;
+-		it_tab[1][i] = rol32(t, 8);
+-		it_tab[2][i] = rol32(t, 16);
+-		it_tab[3][i] = rol32(t, 24);
++		crypto_il_tab[0][i] = t;
++		crypto_il_tab[1][i] = rol32(t, 8);
++		crypto_il_tab[2][i] = rol32(t, 16);
++		crypto_il_tab[3][i] = rol32(t, 24);
 +
-+#ifdef CONFIG_COMPAT_VDSO
-+#define VDSO_DEFAULT	VDSO_COMPAT
-+#else
-+#define VDSO_DEFAULT	VDSO_ENABLED
-+#endif
++		t = ((u32) ff_mult(14, p)) |
++		    ((u32) ff_mult(9, p) << 8) |
++		    ((u32) ff_mult(13, p) << 16) |
++		    ((u32) ff_mult(11, p) << 24);
 +
-+#ifdef CONFIG_X86_64
-+#define vdso_enabled			sysctl_vsyscall32
-+#define arch_setup_additional_pages	syscall32_setup_pages
-+#endif
++		crypto_it_tab[0][i] = t;
++		crypto_it_tab[1][i] = rol32(t, 8);
++		crypto_it_tab[2][i] = rol32(t, 16);
++		crypto_it_tab[3][i] = rol32(t, 24);
+ 	}
+ }
+ 
+-#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
+-
+-#define imix_col(y,x)       \
+-    u   = star_x(x);        \
+-    v   = star_x(u);        \
+-    w   = star_x(v);        \
+-    t   = w ^ (x);          \
+-   (y)  = u ^ v ^ w;        \
+-   (y) ^= ror32(u ^ t,  8) ^ \
+-          ror32(v ^ t, 16) ^ \
+-          ror32(t,24)
+-
+ /* initialise the key schedule from the user supplied key */
+ 
+-#define loop4(i)                                    \
+-{   t = ror32(t,  8); t = ls_box(t) ^ rco_tab[i];    \
+-    t ^= E_KEY[4 * i];     E_KEY[4 * i + 4] = t;    \
+-    t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t;    \
+-    t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t;    \
+-    t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t;    \
+-}
+-
+-#define loop6(i)                                    \
+-{   t = ror32(t,  8); t = ls_box(t) ^ rco_tab[i];    \
+-    t ^= E_KEY[6 * i];     E_KEY[6 * i + 6] = t;    \
+-    t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t;    \
+-    t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t;    \
+-    t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t;    \
+-    t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t;   \
+-    t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t;   \
+-}
+-
+-#define loop8(i)                                    \
+-{   t = ror32(t,  8); ; t = ls_box(t) ^ rco_tab[i];  \
+-    t ^= E_KEY[8 * i];     E_KEY[8 * i + 8] = t;    \
+-    t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t;    \
+-    t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t;   \
+-    t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t;   \
+-    t  = E_KEY[8 * i + 4] ^ ls_box(t);    \
+-    E_KEY[8 * i + 12] = t;                \
+-    t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t;   \
+-    t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t;   \
+-    t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t;   \
+-}
++#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
+ 
+-static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+-		       unsigned int key_len)
++#define imix_col(y,x)	do {		\
++	u	= star_x(x);		\
++	v	= star_x(u);		\
++	w	= star_x(v);		\
++	t	= w ^ (x);		\
++	(y)	= u ^ v ^ w;		\
++	(y)	^= ror32(u ^ t, 8) ^	\
++		ror32(v ^ t, 16) ^	\
++		ror32(t, 24);		\
++} while (0)
 +
-+/*
-+ * This is the difference between the prelinked addresses in the vDSO images
-+ * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
-+ * in the user address space.
-+ */
-+#define VDSO_ADDR_ADJUST	(VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
++#define ls_box(x)		\
++	crypto_fl_tab[0][byte(x, 0)] ^	\
++	crypto_fl_tab[1][byte(x, 1)] ^	\
++	crypto_fl_tab[2][byte(x, 2)] ^	\
++	crypto_fl_tab[3][byte(x, 3)]
 +
-+/*
-+ * Should the kernel map a VDSO page into processes and pass its
-+ * address down to glibc upon exec()?
-+ */
-+unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
++#define loop4(i)	do {		\
++	t = ror32(t, 8);		\
++	t = ls_box(t) ^ rco_tab[i];	\
++	t ^= ctx->key_enc[4 * i];		\
++	ctx->key_enc[4 * i + 4] = t;		\
++	t ^= ctx->key_enc[4 * i + 1];		\
++	ctx->key_enc[4 * i + 5] = t;		\
++	t ^= ctx->key_enc[4 * i + 2];		\
++	ctx->key_enc[4 * i + 6] = t;		\
++	t ^= ctx->key_enc[4 * i + 3];		\
++	ctx->key_enc[4 * i + 7] = t;		\
++} while (0)
 +
-+static int __init vdso_setup(char *s)
-+{
-+	vdso_enabled = simple_strtoul(s, NULL, 0);
++#define loop6(i)	do {		\
++	t = ror32(t, 8);		\
++	t = ls_box(t) ^ rco_tab[i];	\
++	t ^= ctx->key_enc[6 * i];		\
++	ctx->key_enc[6 * i + 6] = t;		\
++	t ^= ctx->key_enc[6 * i + 1];		\
++	ctx->key_enc[6 * i + 7] = t;		\
++	t ^= ctx->key_enc[6 * i + 2];		\
++	ctx->key_enc[6 * i + 8] = t;		\
++	t ^= ctx->key_enc[6 * i + 3];		\
++	ctx->key_enc[6 * i + 9] = t;		\
++	t ^= ctx->key_enc[6 * i + 4];		\
++	ctx->key_enc[6 * i + 10] = t;		\
++	t ^= ctx->key_enc[6 * i + 5];		\
++	ctx->key_enc[6 * i + 11] = t;		\
++} while (0)
 +
-+	return 1;
-+}
++#define loop8(i)	do {			\
++	t = ror32(t, 8);			\
++	t = ls_box(t) ^ rco_tab[i];		\
++	t ^= ctx->key_enc[8 * i];			\
++	ctx->key_enc[8 * i + 8] = t;			\
++	t ^= ctx->key_enc[8 * i + 1];			\
++	ctx->key_enc[8 * i + 9] = t;			\
++	t ^= ctx->key_enc[8 * i + 2];			\
++	ctx->key_enc[8 * i + 10] = t;			\
++	t ^= ctx->key_enc[8 * i + 3];			\
++	ctx->key_enc[8 * i + 11] = t;			\
++	t  = ctx->key_enc[8 * i + 4] ^ ls_box(t);	\
++	ctx->key_enc[8 * i + 12] = t;			\
++	t ^= ctx->key_enc[8 * i + 5];			\
++	ctx->key_enc[8 * i + 13] = t;			\
++	t ^= ctx->key_enc[8 * i + 6];			\
++	ctx->key_enc[8 * i + 14] = t;			\
++	t ^= ctx->key_enc[8 * i + 7];			\
++	ctx->key_enc[8 * i + 15] = t;			\
++} while (0)
 +
-+/*
-+ * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
-+ * behavior on both 64-bit and 32-bit kernels.
-+ * On 32-bit kernels, vdso=[012] means the same thing.
-+ */
-+__setup("vdso32=", vdso_setup);
++int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
++		unsigned int key_len)
+ {
+-	struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	const __le32 *key = (const __le32 *)in_key;
+ 	u32 *flags = &tfm->crt_flags;
+-	u32 i, t, u, v, w;
++	u32 i, t, u, v, w, j;
+ 
+ 	if (key_len % 8) {
+ 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+@@ -263,95 +244,113 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ 
+ 	ctx->key_length = key_len;
+ 
+-	E_KEY[0] = le32_to_cpu(key[0]);
+-	E_KEY[1] = le32_to_cpu(key[1]);
+-	E_KEY[2] = le32_to_cpu(key[2]);
+-	E_KEY[3] = le32_to_cpu(key[3]);
++	ctx->key_dec[key_len + 24] = ctx->key_enc[0] = le32_to_cpu(key[0]);
++	ctx->key_dec[key_len + 25] = ctx->key_enc[1] = le32_to_cpu(key[1]);
++	ctx->key_dec[key_len + 26] = ctx->key_enc[2] = le32_to_cpu(key[2]);
++	ctx->key_dec[key_len + 27] = ctx->key_enc[3] = le32_to_cpu(key[3]);
+ 
+ 	switch (key_len) {
+ 	case 16:
+-		t = E_KEY[3];
++		t = ctx->key_enc[3];
+ 		for (i = 0; i < 10; ++i)
+-			loop4 (i);
++			loop4(i);
+ 		break;
+ 
+ 	case 24:
+-		E_KEY[4] = le32_to_cpu(key[4]);
+-		t = E_KEY[5] = le32_to_cpu(key[5]);
++		ctx->key_enc[4] = le32_to_cpu(key[4]);
++		t = ctx->key_enc[5] = le32_to_cpu(key[5]);
+ 		for (i = 0; i < 8; ++i)
+-			loop6 (i);
++			loop6(i);
+ 		break;
+ 
+ 	case 32:
+-		E_KEY[4] = le32_to_cpu(key[4]);
+-		E_KEY[5] = le32_to_cpu(key[5]);
+-		E_KEY[6] = le32_to_cpu(key[6]);
+-		t = E_KEY[7] = le32_to_cpu(key[7]);
++		ctx->key_enc[4] = le32_to_cpu(key[4]);
++		ctx->key_enc[5] = le32_to_cpu(key[5]);
++		ctx->key_enc[6] = le32_to_cpu(key[6]);
++		t = ctx->key_enc[7] = le32_to_cpu(key[7]);
+ 		for (i = 0; i < 7; ++i)
+-			loop8 (i);
++			loop8(i);
+ 		break;
+ 	}
+ 
+-	D_KEY[0] = E_KEY[0];
+-	D_KEY[1] = E_KEY[1];
+-	D_KEY[2] = E_KEY[2];
+-	D_KEY[3] = E_KEY[3];
++	ctx->key_dec[0] = ctx->key_enc[key_len + 24];
++	ctx->key_dec[1] = ctx->key_enc[key_len + 25];
++	ctx->key_dec[2] = ctx->key_enc[key_len + 26];
++	ctx->key_dec[3] = ctx->key_enc[key_len + 27];
+ 
+ 	for (i = 4; i < key_len + 24; ++i) {
+-		imix_col (D_KEY[i], E_KEY[i]);
++		j = key_len + 24 - (i & ~3) + (i & 3);
++		imix_col(ctx->key_dec[j], ctx->key_enc[i]);
+ 	}
+-
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(crypto_aes_set_key);
+ 
+ /* encrypt a block of text */
+ 
+-#define f_nround(bo, bi, k) \
+-    f_rn(bo, bi, 0, k);     \
+-    f_rn(bo, bi, 1, k);     \
+-    f_rn(bo, bi, 2, k);     \
+-    f_rn(bo, bi, 3, k);     \
+-    k += 4
+-
+-#define f_lround(bo, bi, k) \
+-    f_rl(bo, bi, 0, k);     \
+-    f_rl(bo, bi, 1, k);     \
+-    f_rl(bo, bi, 2, k);     \
+-    f_rl(bo, bi, 3, k)
++#define f_rn(bo, bi, n, k)	do {				\
++	bo[n] = crypto_ft_tab[0][byte(bi[n], 0)] ^			\
++		crypto_ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^		\
++		crypto_ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^		\
++		crypto_ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n);	\
++} while (0)
 +
-+#ifdef CONFIG_X86_32
-+__setup_param("vdso=", vdso32_setup, vdso_setup, 0);
++#define f_nround(bo, bi, k)	do {\
++	f_rn(bo, bi, 0, k);	\
++	f_rn(bo, bi, 1, k);	\
++	f_rn(bo, bi, 2, k);	\
++	f_rn(bo, bi, 3, k);	\
++	k += 4;			\
++} while (0)
 +
-+EXPORT_SYMBOL_GPL(vdso_enabled);
-+#endif
++#define f_rl(bo, bi, n, k)	do {				\
++	bo[n] = crypto_fl_tab[0][byte(bi[n], 0)] ^			\
++		crypto_fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^		\
++		crypto_fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^		\
++		crypto_fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n);	\
++} while (0)
 +
-+static __init void reloc_symtab(Elf32_Ehdr *ehdr,
-+				unsigned offset, unsigned size)
-+{
-+	Elf32_Sym *sym = (void *)ehdr + offset;
-+	unsigned nsym = size / sizeof(*sym);
-+	unsigned i;
++#define f_lround(bo, bi, k)	do {\
++	f_rl(bo, bi, 0, k);	\
++	f_rl(bo, bi, 1, k);	\
++	f_rl(bo, bi, 2, k);	\
++	f_rl(bo, bi, 3, k);	\
++} while (0)
+ 
+ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+-	const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
++	const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	const __le32 *src = (const __le32 *)in;
+ 	__le32 *dst = (__le32 *)out;
+ 	u32 b0[4], b1[4];
+-	const u32 *kp = E_KEY + 4;
++	const u32 *kp = ctx->key_enc + 4;
++	const int key_len = ctx->key_length;
+ 
+-	b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0];
+-	b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1];
+-	b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2];
+-	b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3];
++	b0[0] = le32_to_cpu(src[0]) ^ ctx->key_enc[0];
++	b0[1] = le32_to_cpu(src[1]) ^ ctx->key_enc[1];
++	b0[2] = le32_to_cpu(src[2]) ^ ctx->key_enc[2];
++	b0[3] = le32_to_cpu(src[3]) ^ ctx->key_enc[3];
+ 
+-	if (ctx->key_length > 24) {
+-		f_nround (b1, b0, kp);
+-		f_nround (b0, b1, kp);
++	if (key_len > 24) {
++		f_nround(b1, b0, kp);
++		f_nround(b0, b1, kp);
+ 	}
+ 
+-	if (ctx->key_length > 16) {
+-		f_nround (b1, b0, kp);
+-		f_nround (b0, b1, kp);
++	if (key_len > 16) {
++		f_nround(b1, b0, kp);
++		f_nround(b0, b1, kp);
+ 	}
+ 
+-	f_nround (b1, b0, kp);
+-	f_nround (b0, b1, kp);
+-	f_nround (b1, b0, kp);
+-	f_nround (b0, b1, kp);
+-	f_nround (b1, b0, kp);
+-	f_nround (b0, b1, kp);
+-	f_nround (b1, b0, kp);
+-	f_nround (b0, b1, kp);
+-	f_nround (b1, b0, kp);
+-	f_lround (b0, b1, kp);
++	f_nround(b1, b0, kp);
++	f_nround(b0, b1, kp);
++	f_nround(b1, b0, kp);
++	f_nround(b0, b1, kp);
++	f_nround(b1, b0, kp);
++	f_nround(b0, b1, kp);
++	f_nround(b1, b0, kp);
++	f_nround(b0, b1, kp);
++	f_nround(b1, b0, kp);
++	f_lround(b0, b1, kp);
+ 
+ 	dst[0] = cpu_to_le32(b0[0]);
+ 	dst[1] = cpu_to_le32(b0[1]);
+@@ -361,53 +360,69 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ 
+ /* decrypt a block of text */
+ 
+-#define i_nround(bo, bi, k) \
+-    i_rn(bo, bi, 0, k);     \
+-    i_rn(bo, bi, 1, k);     \
+-    i_rn(bo, bi, 2, k);     \
+-    i_rn(bo, bi, 3, k);     \
+-    k -= 4
+-
+-#define i_lround(bo, bi, k) \
+-    i_rl(bo, bi, 0, k);     \
+-    i_rl(bo, bi, 1, k);     \
+-    i_rl(bo, bi, 2, k);     \
+-    i_rl(bo, bi, 3, k)
++#define i_rn(bo, bi, n, k)	do {				\
++	bo[n] = crypto_it_tab[0][byte(bi[n], 0)] ^			\
++		crypto_it_tab[1][byte(bi[(n + 3) & 3], 1)] ^		\
++		crypto_it_tab[2][byte(bi[(n + 2) & 3], 2)] ^		\
++		crypto_it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n);	\
++} while (0)
 +
-+	for(i = 0; i < nsym; i++, sym++) {
-+		if (sym->st_shndx == SHN_UNDEF ||
-+		    sym->st_shndx == SHN_ABS)
-+			continue;  /* skip */
++#define i_nround(bo, bi, k)	do {\
++	i_rn(bo, bi, 0, k);	\
++	i_rn(bo, bi, 1, k);	\
++	i_rn(bo, bi, 2, k);	\
++	i_rn(bo, bi, 3, k);	\
++	k += 4;			\
++} while (0)
 +
-+		if (sym->st_shndx > SHN_LORESERVE) {
-+			printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
-+			       sym->st_shndx);
-+			continue;
-+		}
++#define i_rl(bo, bi, n, k)	do {			\
++	bo[n] = crypto_il_tab[0][byte(bi[n], 0)] ^		\
++	crypto_il_tab[1][byte(bi[(n + 3) & 3], 1)] ^		\
++	crypto_il_tab[2][byte(bi[(n + 2) & 3], 2)] ^		\
++	crypto_il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n);	\
++} while (0)
 +
-+		switch(ELF_ST_TYPE(sym->st_info)) {
-+		case STT_OBJECT:
-+		case STT_FUNC:
-+		case STT_SECTION:
-+		case STT_FILE:
-+			sym->st_value += VDSO_ADDR_ADJUST;
-+		}
-+	}
++#define i_lround(bo, bi, k)	do {\
++	i_rl(bo, bi, 0, k);	\
++	i_rl(bo, bi, 1, k);	\
++	i_rl(bo, bi, 2, k);	\
++	i_rl(bo, bi, 3, k);	\
++} while (0)
+ 
+ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+-	const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
++	const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	const __le32 *src = (const __le32 *)in;
+ 	__le32 *dst = (__le32 *)out;
+ 	u32 b0[4], b1[4];
+ 	const int key_len = ctx->key_length;
+-	const u32 *kp = D_KEY + key_len + 20;
++	const u32 *kp = ctx->key_dec + 4;
+ 
+-	b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24];
+-	b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25];
+-	b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26];
+-	b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27];
++	b0[0] = le32_to_cpu(src[0]) ^  ctx->key_dec[0];
++	b0[1] = le32_to_cpu(src[1]) ^  ctx->key_dec[1];
++	b0[2] = le32_to_cpu(src[2]) ^  ctx->key_dec[2];
++	b0[3] = le32_to_cpu(src[3]) ^  ctx->key_dec[3];
+ 
+ 	if (key_len > 24) {
+-		i_nround (b1, b0, kp);
+-		i_nround (b0, b1, kp);
++		i_nround(b1, b0, kp);
++		i_nround(b0, b1, kp);
+ 	}
+ 
+ 	if (key_len > 16) {
+-		i_nround (b1, b0, kp);
+-		i_nround (b0, b1, kp);
++		i_nround(b1, b0, kp);
++		i_nround(b0, b1, kp);
+ 	}
+ 
+-	i_nround (b1, b0, kp);
+-	i_nround (b0, b1, kp);
+-	i_nround (b1, b0, kp);
+-	i_nround (b0, b1, kp);
+-	i_nround (b1, b0, kp);
+-	i_nround (b0, b1, kp);
+-	i_nround (b1, b0, kp);
+-	i_nround (b0, b1, kp);
+-	i_nround (b1, b0, kp);
+-	i_lround (b0, b1, kp);
++	i_nround(b1, b0, kp);
++	i_nround(b0, b1, kp);
++	i_nround(b1, b0, kp);
++	i_nround(b0, b1, kp);
++	i_nround(b1, b0, kp);
++	i_nround(b0, b1, kp);
++	i_nround(b1, b0, kp);
++	i_nround(b0, b1, kp);
++	i_nround(b1, b0, kp);
++	i_lround(b0, b1, kp);
+ 
+ 	dst[0] = cpu_to_le32(b0[0]);
+ 	dst[1] = cpu_to_le32(b0[1]);
+@@ -415,14 +430,13 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ 	dst[3] = cpu_to_le32(b0[3]);
+ }
+ 
+-
+ static struct crypto_alg aes_alg = {
+ 	.cra_name		=	"aes",
+ 	.cra_driver_name	=	"aes-generic",
+ 	.cra_priority		=	100,
+ 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+ 	.cra_blocksize		=	AES_BLOCK_SIZE,
+-	.cra_ctxsize		=	sizeof(struct aes_ctx),
++	.cra_ctxsize		=	sizeof(struct crypto_aes_ctx),
+ 	.cra_alignmask		=	3,
+ 	.cra_module		=	THIS_MODULE,
+ 	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
+@@ -430,9 +444,9 @@ static struct crypto_alg aes_alg = {
+ 		.cipher = {
+ 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
+ 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
+-			.cia_setkey	   	= 	aes_set_key,
+-			.cia_encrypt	 	=	aes_encrypt,
+-			.cia_decrypt	  	=	aes_decrypt
++			.cia_setkey		=	crypto_aes_set_key,
++			.cia_encrypt		=	aes_encrypt,
++			.cia_decrypt		=	aes_decrypt
+ 		}
+ 	}
+ };
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 8383282..e65cb50 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -472,7 +472,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type)
+ }
+ EXPORT_SYMBOL_GPL(crypto_check_attr_type);
+ 
+-struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
++const char *crypto_attr_alg_name(struct rtattr *rta)
+ {
+ 	struct crypto_attr_alg *alga;
+ 
+@@ -486,7 +486,21 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+ 	alga = RTA_DATA(rta);
+ 	alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0;
+ 
+-	return crypto_alg_mod_lookup(alga->name, type, mask);
++	return alga->name;
 +}
++EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
 +
-+static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
++struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
 +{
-+	Elf32_Dyn *dyn = (void *)ehdr + offset;
-+
-+	for(; dyn->d_tag != DT_NULL; dyn++)
-+		switch(dyn->d_tag) {
-+		case DT_PLTGOT:
-+		case DT_HASH:
-+		case DT_STRTAB:
-+		case DT_SYMTAB:
-+		case DT_RELA:
-+		case DT_INIT:
-+		case DT_FINI:
-+		case DT_REL:
-+		case DT_DEBUG:
-+		case DT_JMPREL:
-+		case DT_VERSYM:
-+		case DT_VERDEF:
-+		case DT_VERNEED:
-+		case DT_ADDRRNGLO ... DT_ADDRRNGHI:
-+			/* definitely pointers needing relocation */
-+			dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
-+			break;
++	const char *name;
++	int err;
 +
-+		case DT_ENCODING ... OLD_DT_LOOS-1:
-+		case DT_LOOS ... DT_HIOS-1:
-+			/* Tags above DT_ENCODING are pointers if
-+			   they're even */
-+			if (dyn->d_tag >= DT_ENCODING &&
-+			    (dyn->d_tag & 1) == 0)
-+				dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
-+			break;
++	name = crypto_attr_alg_name(rta);
++	err = PTR_ERR(name);
++	if (IS_ERR(name))
++		return ERR_PTR(err);
 +
-+		case DT_VERDEFNUM:
-+		case DT_VERNEEDNUM:
-+		case DT_FLAGS_1:
-+		case DT_RELACOUNT:
-+		case DT_RELCOUNT:
-+		case DT_VALRNGLO ... DT_VALRNGHI:
-+			/* definitely not pointers */
-+			break;
++	return crypto_alg_mod_lookup(name, type, mask);
+ }
+ EXPORT_SYMBOL_GPL(crypto_attr_alg);
+ 
+@@ -605,6 +619,53 @@ int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm)
+ }
+ EXPORT_SYMBOL_GPL(crypto_tfm_in_queue);
+ 
++static inline void crypto_inc_byte(u8 *a, unsigned int size)
++{
++	u8 *b = (a + size);
++	u8 c;
 +
-+		case OLD_DT_LOOS ... DT_LOOS-1:
-+		case DT_HIOS ... DT_VALRNGLO-1:
-+		default:
-+			if (dyn->d_tag > DT_ENCODING)
-+				printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
-+				       dyn->d_tag);
++	for (; size; size--) {
++		c = *--b + 1;
++		*b = c;
++		if (c)
 +			break;
-+		}
++	}
 +}
 +
-+static __init void relocate_vdso(Elf32_Ehdr *ehdr)
++void crypto_inc(u8 *a, unsigned int size)
 +{
-+	Elf32_Phdr *phdr;
-+	Elf32_Shdr *shdr;
-+	int i;
-+
-+	BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 ||
-+	       !elf_check_arch_ia32(ehdr) ||
-+	       ehdr->e_type != ET_DYN);
-+
-+	ehdr->e_entry += VDSO_ADDR_ADJUST;
-+
-+	/* rebase phdrs */
-+	phdr = (void *)ehdr + ehdr->e_phoff;
-+	for (i = 0; i < ehdr->e_phnum; i++) {
-+		phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
++	__be32 *b = (__be32 *)(a + size);
++	u32 c;
 +
-+		/* relocate dynamic stuff */
-+		if (phdr[i].p_type == PT_DYNAMIC)
-+			reloc_dyn(ehdr, phdr[i].p_offset);
++	for (; size >= 4; size -= 4) {
++		c = be32_to_cpu(*--b) + 1;
++		*b = cpu_to_be32(c);
++		if (c)
++			return;
 +	}
 +
-+	/* rebase sections */
-+	shdr = (void *)ehdr + ehdr->e_shoff;
-+	for(i = 0; i < ehdr->e_shnum; i++) {
-+		if (!(shdr[i].sh_flags & SHF_ALLOC))
-+			continue;
-+
-+		shdr[i].sh_addr += VDSO_ADDR_ADJUST;
++	crypto_inc_byte(a, size);
++}
++EXPORT_SYMBOL_GPL(crypto_inc);
 +
-+		if (shdr[i].sh_type == SHT_SYMTAB ||
-+		    shdr[i].sh_type == SHT_DYNSYM)
-+			reloc_symtab(ehdr, shdr[i].sh_offset,
-+				     shdr[i].sh_size);
-+	}
++static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size)
++{
++	for (; size; size--)
++		*a++ ^= *b++;
 +}
 +
-+/*
-+ * These symbols are defined by vdso32.S to mark the bounds
-+ * of the ELF DSO images included therein.
-+ */
-+extern const char vdso32_default_start, vdso32_default_end;
-+extern const char vdso32_sysenter_start, vdso32_sysenter_end;
-+static struct page *vdso32_pages[1];
++void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
++{
++	u32 *a = (u32 *)dst;
++	u32 *b = (u32 *)src;
 +
-+#ifdef CONFIG_X86_64
++	for (; size >= 4; size -= 4)
++		*a++ ^= *b++;
 +
-+static int use_sysenter __read_mostly = -1;
++	crypto_xor_byte((u8 *)a, (u8 *)b, size);
++}
++EXPORT_SYMBOL_GPL(crypto_xor);
 +
-+#define	vdso32_sysenter()	(use_sysenter > 0)
+ static int __init crypto_algapi_init(void)
+ {
+ 	crypto_init_proc();
+diff --git a/crypto/api.c b/crypto/api.c
+index 1f5c724..a2496d1 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type,
+ 	return alg;
+ }
+ 
+-static void crypto_larval_kill(struct crypto_alg *alg)
++void crypto_larval_kill(struct crypto_alg *alg)
+ {
+ 	struct crypto_larval *larval = (void *)alg;
+ 
+@@ -147,6 +147,7 @@ static void crypto_larval_kill(struct crypto_alg *alg)
+ 	complete_all(&larval->completion);
+ 	crypto_alg_put(alg);
+ }
++EXPORT_SYMBOL_GPL(crypto_larval_kill);
+ 
+ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
+ {
+@@ -176,11 +177,9 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
+ 	return alg;
+ }
+ 
+-struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
++struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
+ {
+ 	struct crypto_alg *alg;
+-	struct crypto_alg *larval;
+-	int ok;
+ 
+ 	if (!name)
+ 		return ERR_PTR(-ENOENT);
+@@ -193,7 +192,17 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
+ 	if (alg)
+ 		return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
+ 
+-	larval = crypto_larval_alloc(name, type, mask);
++	return crypto_larval_alloc(name, type, mask);
++}
++EXPORT_SYMBOL_GPL(crypto_larval_lookup);
 +
-+/* May not be __init: called during resume */
-+void syscall32_cpu_init(void)
++struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
 +{
-+	if (use_sysenter < 0)
-+		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
-+
-+	/* Load these always in case some future AMD CPU supports
-+	   SYSENTER from compat mode too. */
-+	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-+	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-+	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
++	struct crypto_alg *alg;
++	struct crypto_alg *larval;
++	int ok;
 +
-+	wrmsrl(MSR_CSTAR, ia32_cstar_target);
-+}
++	larval = crypto_larval_lookup(name, type, mask);
+ 	if (IS_ERR(larval) || !crypto_is_larval(larval))
+ 		return larval;
+ 
+diff --git a/crypto/authenc.c b/crypto/authenc.c
+index 126a529..ed8ac5a 100644
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -10,22 +10,21 @@
+  *
+  */
+ 
+-#include <crypto/algapi.h>
++#include <crypto/aead.h>
++#include <crypto/internal/skcipher.h>
++#include <crypto/authenc.h>
++#include <crypto/scatterwalk.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/rtnetlink.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ 
+-#include "scatterwalk.h"
+-
+ struct authenc_instance_ctx {
+ 	struct crypto_spawn auth;
+-	struct crypto_spawn enc;
+-
+-	unsigned int authsize;
+-	unsigned int enckeylen;
++	struct crypto_skcipher_spawn enc;
+ };
+ 
+ struct crypto_authenc_ctx {
+@@ -37,19 +36,31 @@ struct crypto_authenc_ctx {
+ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ 				 unsigned int keylen)
+ {
+-	struct authenc_instance_ctx *ictx =
+-		crypto_instance_ctx(crypto_aead_alg_instance(authenc));
+-	unsigned int enckeylen = ictx->enckeylen;
+ 	unsigned int authkeylen;
++	unsigned int enckeylen;
+ 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ 	struct crypto_hash *auth = ctx->auth;
+ 	struct crypto_ablkcipher *enc = ctx->enc;
++	struct rtattr *rta = (void *)key;
++	struct crypto_authenc_key_param *param;
+ 	int err = -EINVAL;
+ 
+-	if (keylen < enckeylen) {
+-		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+-		goto out;
+-	}
++	if (!RTA_OK(rta, keylen))
++		goto badkey;
++	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
++		goto badkey;
++	if (RTA_PAYLOAD(rta) < sizeof(*param))
++		goto badkey;
 +
-+#define compat_uses_vma		1
++	param = RTA_DATA(rta);
++	enckeylen = be32_to_cpu(param->enckeylen);
 +
-+static inline void map_compat_vdso(int map)
-+{
-+}
++	key += RTA_ALIGN(rta->rta_len);
++	keylen -= RTA_ALIGN(rta->rta_len);
 +
-+#else  /* CONFIG_X86_32 */
++	if (keylen < enckeylen)
++		goto badkey;
 +
-+#define vdso32_sysenter()	(boot_cpu_has(X86_FEATURE_SEP))
+ 	authkeylen = keylen - enckeylen;
+ 
+ 	crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+@@ -71,21 +82,38 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ 
+ out:
+ 	return err;
 +
-+void enable_sep_cpu(void)
++badkey:
++	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
++	goto out;
+ }
+ 
+-static int crypto_authenc_hash(struct aead_request *req)
++static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
++			  int chain)
 +{
-+	int cpu = get_cpu();
-+	struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+
-+	if (!boot_cpu_has(X86_FEATURE_SEP)) {
-+		put_cpu();
-+		return;
++	if (chain) {
++		head->length += sg->length;
++		sg = scatterwalk_sg_next(sg);
 +	}
 +
-+	tss->x86_tss.ss1 = __KERNEL_CS;
-+	tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
-+	wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
-+	wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
-+	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
-+	put_cpu();	
++	if (sg)
++		scatterwalk_sg_chain(head, 2, sg);
++	else
++		sg_mark_end(head);
 +}
 +
-+static struct vm_area_struct gate_vma;
++static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags,
++			       struct scatterlist *cipher,
++			       unsigned int cryptlen)
+ {
+ 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+-	struct authenc_instance_ctx *ictx =
+-		crypto_instance_ctx(crypto_aead_alg_instance(authenc));
+ 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ 	struct crypto_hash *auth = ctx->auth;
+ 	struct hash_desc desc = {
+ 		.tfm = auth,
++		.flags = aead_request_flags(req) & flags,
+ 	};
+ 	u8 *hash = aead_request_ctx(req);
+-	struct scatterlist *dst = req->dst;
+-	unsigned int cryptlen = req->cryptlen;
+ 	int err;
+ 
+ 	hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), 
+@@ -100,7 +128,7 @@ static int crypto_authenc_hash(struct aead_request *req)
+ 	if (err)
+ 		goto auth_unlock;
+ 
+-	err = crypto_hash_update(&desc, dst, cryptlen);
++	err = crypto_hash_update(&desc, cipher, cryptlen);
+ 	if (err)
+ 		goto auth_unlock;
+ 
+@@ -109,17 +137,53 @@ auth_unlock:
+ 	spin_unlock_bh(&ctx->auth_lock);
+ 
+ 	if (err)
+-		return err;
++		return ERR_PTR(err);
 +
-+static int __init gate_vma_init(void)
-+{
-+	gate_vma.vm_mm = NULL;
-+	gate_vma.vm_start = FIXADDR_USER_START;
-+	gate_vma.vm_end = FIXADDR_USER_END;
-+	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-+	gate_vma.vm_page_prot = __P101;
-+	/*
-+	 * Make sure the vDSO gets into every core dump.
-+	 * Dumping its contents makes post-mortem fully interpretable later
-+	 * without matching up the same kernel and hardware config to see
-+	 * what PC values meant.
-+	 */
-+	gate_vma.vm_flags |= VM_ALWAYSDUMP;
-+	return 0;
++	return hash;
 +}
-+
-+#define compat_uses_vma		0
-+
-+static void map_compat_vdso(int map)
+ 
+-	scatterwalk_map_and_copy(hash, dst, cryptlen, ictx->authsize, 1);
++static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
++				 unsigned int flags)
 +{
-+	static int vdso_mapped;
-+
-+	if (map == vdso_mapped)
-+		return;
-+
-+	vdso_mapped = map;
-+
-+	__set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
-+		     map ? PAGE_READONLY_EXEC : PAGE_NONE);
++	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
++	struct scatterlist *dst = req->dst;
++	struct scatterlist cipher[2];
++	struct page *dstp;
++	unsigned int ivsize = crypto_aead_ivsize(authenc);
++	unsigned int cryptlen;
++	u8 *vdst;
++	u8 *hash;
 +
-+	/* flush stray tlbs */
-+	flush_tlb_all();
-+}
++	dstp = sg_page(dst);
++	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
 +
-+#endif	/* CONFIG_X86_64 */
++	sg_init_table(cipher, 2);
++	sg_set_buf(cipher, iv, ivsize);
++	authenc_chain(cipher, dst, vdst == iv + ivsize);
 +
-+int __init sysenter_setup(void)
-+{
-+	void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
-+	const void *vsyscall;
-+	size_t vsyscall_len;
++	cryptlen = req->cryptlen + ivsize;
++	hash = crypto_authenc_hash(req, flags, cipher, cryptlen);
++	if (IS_ERR(hash))
++		return PTR_ERR(hash);
 +
-+	vdso32_pages[0] = virt_to_page(syscall_page);
++	scatterwalk_map_and_copy(hash, cipher, cryptlen,
++				 crypto_aead_authsize(authenc), 1);
+ 	return 0;
+ }
+ 
+ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
+ 					int err)
+ {
+-	if (!err)
+-		err = crypto_authenc_hash(req->data);
++	if (!err) {
++		struct aead_request *areq = req->data;
++		struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
++		struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
++		struct ablkcipher_request *abreq = aead_request_ctx(areq);
++		u8 *iv = (u8 *)(abreq + 1) +
++			 crypto_ablkcipher_reqsize(ctx->enc);
 +
-+#ifdef CONFIG_X86_32
-+	gate_vma_init();
++		err = crypto_authenc_genicv(areq, iv, 0);
++	}
+ 
+ 	aead_request_complete(req->data, err);
+ }
+@@ -129,72 +193,99 @@ static int crypto_authenc_encrypt(struct aead_request *req)
+ 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ 	struct ablkcipher_request *abreq = aead_request_ctx(req);
++	struct crypto_ablkcipher *enc = ctx->enc;
++	struct scatterlist *dst = req->dst;
++	unsigned int cryptlen = req->cryptlen;
++	u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc);
+ 	int err;
+ 
+-	ablkcipher_request_set_tfm(abreq, ctx->enc);
++	ablkcipher_request_set_tfm(abreq, enc);
+ 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ 					crypto_authenc_encrypt_done, req);
+-	ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen,
+-				     req->iv);
++	ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
 +
-+	printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
-+#endif
++	memcpy(iv, req->iv, crypto_aead_ivsize(authenc));
+ 
+ 	err = crypto_ablkcipher_encrypt(abreq);
+ 	if (err)
+ 		return err;
+ 
+-	return crypto_authenc_hash(req);
++	return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+ }
+ 
+-static int crypto_authenc_verify(struct aead_request *req)
++static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
++					   int err)
+ {
+-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+-	struct authenc_instance_ctx *ictx =
+-		crypto_instance_ctx(crypto_aead_alg_instance(authenc));
++	if (!err) {
++		struct aead_givcrypt_request *greq = req->data;
 +
-+	if (!vdso32_sysenter()) {
-+		vsyscall = &vdso32_default_start;
-+		vsyscall_len = &vdso32_default_end - &vdso32_default_start;
-+	} else {
-+		vsyscall = &vdso32_sysenter_start;
-+		vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
++		err = crypto_authenc_genicv(&greq->areq, greq->giv, 0);
 +	}
 +
-+	memcpy(syscall_page, vsyscall, vsyscall_len);
-+	relocate_vdso(syscall_page);
-+
-+	return 0;
++	aead_request_complete(req->data, err);
 +}
 +
-+/* Setup a VMA at program startup for the vsyscall page */
-+int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
++static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
 +{
-+	struct mm_struct *mm = current->mm;
-+	unsigned long addr;
-+	int ret = 0;
-+	bool compat;
-+
-+	down_write(&mm->mmap_sem);
++	struct crypto_aead *authenc = aead_givcrypt_reqtfm(req);
+ 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+-	struct crypto_hash *auth = ctx->auth;
+-	struct hash_desc desc = {
+-		.tfm = auth,
+-		.flags = aead_request_flags(req),
+-	};
+-	u8 *ohash = aead_request_ctx(req);
+-	u8 *ihash;
+-	struct scatterlist *src = req->src;
+-	unsigned int cryptlen = req->cryptlen;
+-	unsigned int authsize;
++	struct aead_request *areq = &req->areq;
++	struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
++	u8 *iv = req->giv;
+ 	int err;
+ 
+-	ohash = (u8 *)ALIGN((unsigned long)ohash + crypto_hash_alignmask(auth), 
+-			    crypto_hash_alignmask(auth) + 1);
+-	ihash = ohash + crypto_hash_digestsize(auth);
+-
+-	spin_lock_bh(&ctx->auth_lock);
+-	err = crypto_hash_init(&desc);
+-	if (err)
+-		goto auth_unlock;
++	skcipher_givcrypt_set_tfm(greq, ctx->enc);
++	skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
++				       crypto_authenc_givencrypt_done, areq);
++	skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
++				    areq->iv);
++	skcipher_givcrypt_set_giv(greq, iv, req->seq);
+ 
+-	err = crypto_hash_update(&desc, req->assoc, req->assoclen);
++	err = crypto_skcipher_givencrypt(greq);
+ 	if (err)
+-		goto auth_unlock;
++		return err;
+ 
+-	err = crypto_hash_update(&desc, src, cryptlen);
+-	if (err)
+-		goto auth_unlock;
++	return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
++}
+ 
+-	err = crypto_hash_final(&desc, ohash);
+-auth_unlock:
+-	spin_unlock_bh(&ctx->auth_lock);
++static int crypto_authenc_verify(struct aead_request *req,
++				 struct scatterlist *cipher,
++				 unsigned int cryptlen)
++{
++	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
++	u8 *ohash;
++	u8 *ihash;
++	unsigned int authsize;
+ 
+-	if (err)
+-		return err;
++	ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher,
++				    cryptlen);
++	if (IS_ERR(ohash))
++		return PTR_ERR(ohash);
+ 
+-	authsize = ictx->authsize;
+-	scatterwalk_map_and_copy(ihash, src, cryptlen, authsize, 0);
+-	return memcmp(ihash, ohash, authsize) ? -EINVAL : 0;
++	authsize = crypto_aead_authsize(authenc);
++	ihash = ohash + authsize;
++	scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0);
++	return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
+ }
+ 
+-static void crypto_authenc_decrypt_done(struct crypto_async_request *req,
+-					int err)
++static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
++				  unsigned int cryptlen)
+ {
+-	aead_request_complete(req->data, err);
++	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
++	struct scatterlist *src = req->src;
++	struct scatterlist cipher[2];
++	struct page *srcp;
++	unsigned int ivsize = crypto_aead_ivsize(authenc);
++	u8 *vsrc;
 +
-+	/* Test compat mode once here, in case someone
-+	   changes it via sysctl */
-+	compat = (vdso_enabled == VDSO_COMPAT);
++	srcp = sg_page(src);
++	vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
 +
-+	map_compat_vdso(compat);
++	sg_init_table(cipher, 2);
++	sg_set_buf(cipher, iv, ivsize);
++	authenc_chain(cipher, src, vsrc == iv + ivsize);
 +
-+	if (compat)
-+		addr = VDSO_HIGH_BASE;
-+	else {
-+		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
-+		if (IS_ERR_VALUE(addr)) {
-+			ret = addr;
-+			goto up_fail;
-+		}
-+	}
++	return crypto_authenc_verify(req, cipher, cryptlen + ivsize);
+ }
+ 
+ static int crypto_authenc_decrypt(struct aead_request *req)
+@@ -202,17 +293,23 @@ static int crypto_authenc_decrypt(struct aead_request *req)
+ 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ 	struct ablkcipher_request *abreq = aead_request_ctx(req);
++	unsigned int cryptlen = req->cryptlen;
++	unsigned int authsize = crypto_aead_authsize(authenc);
++	u8 *iv = req->iv;
+ 	int err;
+ 
+-	err = crypto_authenc_verify(req);
++	if (cryptlen < authsize)
++		return -EINVAL;
++	cryptlen -= authsize;
 +
-+	if (compat_uses_vma || !compat) {
-+		/*
-+		 * MAYWRITE to allow gdb to COW and set breakpoints
-+		 *
-+		 * Make sure the vDSO gets into every core dump.
-+		 * Dumping its contents makes post-mortem fully
-+		 * interpretable later without matching up the same
-+		 * kernel and hardware config to see what PC values
-+		 * meant.
-+		 */
-+		ret = install_special_mapping(mm, addr, PAGE_SIZE,
-+					      VM_READ|VM_EXEC|
-+					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
-+					      VM_ALWAYSDUMP,
-+					      vdso32_pages);
++	err = crypto_authenc_iverify(req, iv, cryptlen);
+ 	if (err)
+ 		return err;
+ 
+ 	ablkcipher_request_set_tfm(abreq, ctx->enc);
+ 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+-					crypto_authenc_decrypt_done, req);
+-	ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen,
+-				     req->iv);
++					req->base.complete, req->base.data);
++	ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
+ 
+ 	return crypto_ablkcipher_decrypt(abreq);
+ }
+@@ -224,19 +321,13 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
+ 	struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	struct crypto_hash *auth;
+ 	struct crypto_ablkcipher *enc;
+-	unsigned int digestsize;
+ 	int err;
+ 
+ 	auth = crypto_spawn_hash(&ictx->auth);
+ 	if (IS_ERR(auth))
+ 		return PTR_ERR(auth);
+ 
+-	err = -EINVAL;
+-	digestsize = crypto_hash_digestsize(auth);
+-	if (ictx->authsize > digestsize)
+-		goto err_free_hash;
+-
+-	enc = crypto_spawn_ablkcipher(&ictx->enc);
++	enc = crypto_spawn_skcipher(&ictx->enc);
+ 	err = PTR_ERR(enc);
+ 	if (IS_ERR(enc))
+ 		goto err_free_hash;
+@@ -246,9 +337,10 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
+ 	tfm->crt_aead.reqsize = max_t(unsigned int,
+ 				      (crypto_hash_alignmask(auth) &
+ 				       ~(crypto_tfm_ctx_alignment() - 1)) +
+-				      digestsize * 2,
+-				      sizeof(struct ablkcipher_request) +
+-				      crypto_ablkcipher_reqsize(enc));
++				      crypto_hash_digestsize(auth) * 2,
++				      sizeof(struct skcipher_givcrypt_request) +
++				      crypto_ablkcipher_reqsize(enc) +
++				      crypto_ablkcipher_ivsize(enc));
+ 
+ 	spin_lock_init(&ctx->auth_lock);
+ 
+@@ -269,75 +361,74 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
+ 
+ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
+ {
++	struct crypto_attr_type *algt;
+ 	struct crypto_instance *inst;
+ 	struct crypto_alg *auth;
+ 	struct crypto_alg *enc;
+ 	struct authenc_instance_ctx *ctx;
+-	unsigned int authsize;
+-	unsigned int enckeylen;
++	const char *enc_name;
+ 	int err;
+ 
+-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD);
+-	if (err)
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
+ 		return ERR_PTR(err);
+ 
++	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
++		return ERR_PTR(-EINVAL);
 +
-+		if (ret)
-+			goto up_fail;
-+	}
+ 	auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
+ 			       CRYPTO_ALG_TYPE_HASH_MASK);
+ 	if (IS_ERR(auth))
+ 		return ERR_PTR(PTR_ERR(auth));
+ 
+-	err = crypto_attr_u32(tb[2], &authsize);
+-	inst = ERR_PTR(err);
+-	if (err)
+-		goto out_put_auth;
+-
+-	enc = crypto_attr_alg(tb[3], CRYPTO_ALG_TYPE_BLKCIPHER,
+-			      CRYPTO_ALG_TYPE_MASK);
+-	inst = ERR_PTR(PTR_ERR(enc));
+-	if (IS_ERR(enc))
++	enc_name = crypto_attr_alg_name(tb[2]);
++	err = PTR_ERR(enc_name);
++	if (IS_ERR(enc_name))
+ 		goto out_put_auth;
+ 
+-	err = crypto_attr_u32(tb[4], &enckeylen);
+-	if (err)
+-		goto out_put_enc;
+-
+ 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ 	err = -ENOMEM;
+ 	if (!inst)
+-		goto out_put_enc;
+-
+-	err = -ENAMETOOLONG;
+-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+-		     "authenc(%s,%u,%s,%u)", auth->cra_name, authsize,
+-		     enc->cra_name, enckeylen) >= CRYPTO_MAX_ALG_NAME)
+-		goto err_free_inst;
+-
+-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+-		     "authenc(%s,%u,%s,%u)", auth->cra_driver_name,
+-		     authsize, enc->cra_driver_name, enckeylen) >=
+-	    CRYPTO_MAX_ALG_NAME)
+-		goto err_free_inst;
++		goto out_put_auth;
+ 
+ 	ctx = crypto_instance_ctx(inst);
+-	ctx->authsize = authsize;
+-	ctx->enckeylen = enckeylen;
+ 
+ 	err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK);
+ 	if (err)
+ 		goto err_free_inst;
+ 
+-	err = crypto_init_spawn(&ctx->enc, enc, inst, CRYPTO_ALG_TYPE_MASK);
++	crypto_set_skcipher_spawn(&ctx->enc, inst);
++	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
++				   crypto_requires_sync(algt->type,
++							algt->mask));
+ 	if (err)
+ 		goto err_drop_auth;
+ 
+-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
++	enc = crypto_skcipher_spawn_alg(&ctx->enc);
 +
-+	current->mm->context.vdso = (void *)addr;
-+	current_thread_info()->sysenter_return =
-+		VDSO32_SYMBOL(addr, SYSENTER_RETURN);
++	err = -ENAMETOOLONG;
++	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
++		     "authenc(%s,%s)", auth->cra_name, enc->cra_name) >=
++	    CRYPTO_MAX_ALG_NAME)
++		goto err_drop_enc;
 +
-+  up_fail:
-+	up_write(&mm->mmap_sem);
++	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
++		     "authenc(%s,%s)", auth->cra_driver_name,
++		     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
++		goto err_drop_enc;
 +
-+	return ret;
-+}
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
++	inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
+ 	inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority;
+ 	inst->alg.cra_blocksize = enc->cra_blocksize;
+-	inst->alg.cra_alignmask = max(auth->cra_alignmask, enc->cra_alignmask);
++	inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask;
+ 	inst->alg.cra_type = &crypto_aead_type;
+ 
+-	inst->alg.cra_aead.ivsize = enc->cra_blkcipher.ivsize;
+-	inst->alg.cra_aead.authsize = authsize;
++	inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
++	inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ?
++					 auth->cra_hash.digestsize :
++					 auth->cra_digest.dia_digestsize;
+ 
+ 	inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
+ 
+@@ -347,18 +438,19 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
+ 	inst->alg.cra_aead.setkey = crypto_authenc_setkey;
+ 	inst->alg.cra_aead.encrypt = crypto_authenc_encrypt;
+ 	inst->alg.cra_aead.decrypt = crypto_authenc_decrypt;
++	inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
+ 
+ out:
+-	crypto_mod_put(enc);
+-out_put_auth:
+ 	crypto_mod_put(auth);
+ 	return inst;
+ 
++err_drop_enc:
++	crypto_drop_skcipher(&ctx->enc);
+ err_drop_auth:
+ 	crypto_drop_spawn(&ctx->auth);
+ err_free_inst:
+ 	kfree(inst);
+-out_put_enc:
++out_put_auth:
+ 	inst = ERR_PTR(err);
+ 	goto out;
+ }
+@@ -367,7 +459,7 @@ static void crypto_authenc_free(struct crypto_instance *inst)
+ {
+ 	struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
+ 
+-	crypto_drop_spawn(&ctx->enc);
++	crypto_drop_skcipher(&ctx->enc);
+ 	crypto_drop_spawn(&ctx->auth);
+ 	kfree(inst);
+ }
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index f6c67f9..4a7e65c 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -14,7 +14,8 @@
+  *
+  */
+ 
+-#include <linux/crypto.h>
++#include <crypto/internal/skcipher.h>
++#include <crypto/scatterwalk.h>
+ #include <linux/errno.h>
+ #include <linux/hardirq.h>
+ #include <linux/kernel.h>
+@@ -25,7 +26,6 @@
+ #include <linux/string.h>
+ 
+ #include "internal.h"
+-#include "scatterwalk.h"
+ 
+ enum {
+ 	BLKCIPHER_WALK_PHYS = 1 << 0,
+@@ -433,9 +433,8 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
+ 	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
+ 	unsigned int len = alg->cra_ctxsize;
+ 
+-	type ^= CRYPTO_ALG_ASYNC;
+-	mask &= CRYPTO_ALG_ASYNC;
+-	if ((type & mask) && cipher->ivsize) {
++	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
++	    cipher->ivsize) {
+ 		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
+ 		len += cipher->ivsize;
+ 	}
+@@ -451,6 +450,11 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
+ 	crt->setkey = async_setkey;
+ 	crt->encrypt = async_encrypt;
+ 	crt->decrypt = async_decrypt;
++	if (!alg->ivsize) {
++		crt->givencrypt = skcipher_null_givencrypt;
++		crt->givdecrypt = skcipher_null_givdecrypt;
++	}
++	crt->base = __crypto_ablkcipher_cast(tfm);
+ 	crt->ivsize = alg->ivsize;
+ 
+ 	return 0;
+@@ -482,9 +486,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+ 	if (alg->ivsize > PAGE_SIZE / 8)
+ 		return -EINVAL;
+ 
+-	type ^= CRYPTO_ALG_ASYNC;
+-	mask &= CRYPTO_ALG_ASYNC;
+-	if (type & mask)
++	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
+ 		return crypto_init_blkcipher_ops_sync(tfm);
+ 	else
+ 		return crypto_init_blkcipher_ops_async(tfm);
+@@ -499,6 +501,8 @@ static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
+ 	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
+ 	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
+ 	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
++	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
++					     "<default>");
+ }
+ 
+ const struct crypto_type crypto_blkcipher_type = {
+@@ -510,5 +514,187 @@ const struct crypto_type crypto_blkcipher_type = {
+ };
+ EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
+ 
++static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
++				const char *name, u32 type, u32 mask)
++{
++	struct crypto_alg *alg;
++	int err;
 +
-+#ifdef CONFIG_X86_64
++	type = crypto_skcipher_type(type);
++	mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV;
 +
-+__initcall(sysenter_setup);
++	alg = crypto_alg_mod_lookup(name, type, mask);
++	if (IS_ERR(alg))
++		return PTR_ERR(alg);
 +
-+#ifdef CONFIG_SYSCTL
-+/* Register vsyscall32 into the ABI table */
-+#include <linux/sysctl.h>
++	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
++	crypto_mod_put(alg);
++	return err;
++}
 +
-+static ctl_table abi_table2[] = {
-+	{
-+		.procname	= "vsyscall32",
-+		.data		= &sysctl_vsyscall32,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0644,
-+		.proc_handler	= proc_dointvec
-+	},
-+	{}
-+};
++struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
++					     struct rtattr **tb, u32 type,
++					     u32 mask)
++{
++	struct {
++		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
++			      unsigned int keylen);
++		int (*encrypt)(struct ablkcipher_request *req);
++		int (*decrypt)(struct ablkcipher_request *req);
 +
-+static ctl_table abi_root_table2[] = {
-+	{
-+		.ctl_name = CTL_ABI,
-+		.procname = "abi",
-+		.mode = 0555,
-+		.child = abi_table2
-+	},
-+	{}
-+};
++		unsigned int min_keysize;
++		unsigned int max_keysize;
++		unsigned int ivsize;
 +
-+static __init int ia32_binfmt_init(void)
-+{
-+	register_sysctl_table(abi_root_table2);
-+	return 0;
-+}
-+__initcall(ia32_binfmt_init);
-+#endif
++		const char *geniv;
++	} balg;
++	const char *name;
++	struct crypto_skcipher_spawn *spawn;
++	struct crypto_attr_type *algt;
++	struct crypto_instance *inst;
++	struct crypto_alg *alg;
++	int err;
 +
-+#else  /* CONFIG_X86_32 */
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
++		return ERR_PTR(err);
 +
-+const char *arch_vma_name(struct vm_area_struct *vma)
-+{
-+	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
-+		return "[vdso]";
-+	return NULL;
-+}
++	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
++	    algt->mask)
++		return ERR_PTR(-EINVAL);
 +
-+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-+{
-+	struct mm_struct *mm = tsk->mm;
++	name = crypto_attr_alg_name(tb[1]);
++	err = PTR_ERR(name);
++	if (IS_ERR(name))
++		return ERR_PTR(err);
 +
-+	/* Check to see if this task was created in compat vdso mode */
-+	if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
-+		return &gate_vma;
-+	return NULL;
-+}
++	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
++	if (!inst)
++		return ERR_PTR(-ENOMEM);
 +
-+int in_gate_area(struct task_struct *task, unsigned long addr)
-+{
-+	const struct vm_area_struct *vma = get_gate_vma(task);
++	spawn = crypto_instance_ctx(inst);
 +
-+	return vma && addr >= vma->vm_start && addr < vma->vm_end;
-+}
++	/* Ignore async algorithms if necessary. */
++	mask |= crypto_requires_sync(algt->type, algt->mask);
 +
-+int in_gate_area_no_task(unsigned long addr)
-+{
-+	return 0;
-+}
++	crypto_set_skcipher_spawn(spawn, inst);
++	err = crypto_grab_nivcipher(spawn, name, type, mask);
++	if (err)
++		goto err_free_inst;
 +
-+#endif	/* CONFIG_X86_64 */
-diff --git a/arch/x86/vdso/vdso32.S b/arch/x86/vdso/vdso32.S
-new file mode 100644
-index 0000000..1e36f72
---- /dev/null
-+++ b/arch/x86/vdso/vdso32.S
-@@ -0,0 +1,19 @@
-+#include <linux/init.h>
++	alg = crypto_skcipher_spawn_alg(spawn);
 +
-+__INITDATA
++	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
++	    CRYPTO_ALG_TYPE_BLKCIPHER) {
++		balg.ivsize = alg->cra_blkcipher.ivsize;
++		balg.min_keysize = alg->cra_blkcipher.min_keysize;
++		balg.max_keysize = alg->cra_blkcipher.max_keysize;
 +
-+	.globl vdso32_default_start, vdso32_default_end
-+vdso32_default_start:
-+#ifdef CONFIG_X86_32
-+	.incbin "arch/x86/vdso/vdso32-int80.so"
-+#else
-+	.incbin "arch/x86/vdso/vdso32-syscall.so"
-+#endif
-+vdso32_default_end:
++		balg.setkey = async_setkey;
++		balg.encrypt = async_encrypt;
++		balg.decrypt = async_decrypt;
 +
-+	.globl vdso32_sysenter_start, vdso32_sysenter_end
-+vdso32_sysenter_start:
-+	.incbin "arch/x86/vdso/vdso32-sysenter.so"
-+vdso32_sysenter_end:
++		balg.geniv = alg->cra_blkcipher.geniv;
++	} else {
++		balg.ivsize = alg->cra_ablkcipher.ivsize;
++		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
++		balg.max_keysize = alg->cra_ablkcipher.max_keysize;
 +
-+__FINIT
-diff --git a/arch/x86/vdso/vdso32/.gitignore b/arch/x86/vdso/vdso32/.gitignore
-new file mode 100644
-index 0000000..e45fba9
---- /dev/null
-+++ b/arch/x86/vdso/vdso32/.gitignore
-@@ -0,0 +1 @@
-+vdso32.lds
-diff --git a/arch/x86/vdso/vdso32/int80.S b/arch/x86/vdso/vdso32/int80.S
-new file mode 100644
-index 0000000..b15b7c0
---- /dev/null
-+++ b/arch/x86/vdso/vdso32/int80.S
-@@ -0,0 +1,56 @@
-+/*
-+ * Code for the vDSO.  This version uses the old int $0x80 method.
-+ *
-+ * First get the common code for the sigreturn entry points.
-+ * This must come first.
-+ */
-+#include "sigreturn.S"
++		balg.setkey = alg->cra_ablkcipher.setkey;
++		balg.encrypt = alg->cra_ablkcipher.encrypt;
++		balg.decrypt = alg->cra_ablkcipher.decrypt;
 +
-+	.text
-+	.globl __kernel_vsyscall
-+	.type __kernel_vsyscall, at function
-+	ALIGN
-+__kernel_vsyscall:
-+.LSTART_vsyscall:
-+	int $0x80
-+	ret
-+.LEND_vsyscall:
-+	.size __kernel_vsyscall,.-.LSTART_vsyscall
-+	.previous
++		balg.geniv = alg->cra_ablkcipher.geniv;
++	}
 +
-+	.section .eh_frame,"a", at progbits
-+.LSTARTFRAMEDLSI:
-+	.long .LENDCIEDLSI-.LSTARTCIEDLSI
-+.LSTARTCIEDLSI:
-+	.long 0			/* CIE ID */
-+	.byte 1			/* Version number */
-+	.string "zR"		/* NUL-terminated augmentation string */
-+	.uleb128 1		/* Code alignment factor */
-+	.sleb128 -4		/* Data alignment factor */
-+	.byte 8			/* Return address register column */
-+	.uleb128 1		/* Augmentation value length */
-+	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-+	.byte 0x0c		/* DW_CFA_def_cfa */
-+	.uleb128 4
-+	.uleb128 4
-+	.byte 0x88		/* DW_CFA_offset, column 0x8 */
-+	.uleb128 1
-+	.align 4
-+.LENDCIEDLSI:
-+	.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
-+.LSTARTFDEDLSI:
-+	.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
-+	.long .LSTART_vsyscall-.	/* PC-relative start address */
-+	.long .LEND_vsyscall-.LSTART_vsyscall
-+	.uleb128 0
-+	.align 4
-+.LENDFDEDLSI:
-+	.previous
++	err = -EINVAL;
++	if (!balg.ivsize)
++		goto err_drop_alg;
 +
 +	/*
-+	 * Pad out the segment to match the size of the sysenter.S version.
++	 * This is only true if we're constructing an algorithm with its
++	 * default IV generator.  For the default generator we elide the
++	 * template name and double-check the IV generator.
 +	 */
-+VDSO32_vsyscall_eh_frame_size = 0x40
-+	.section .data,"aw", at progbits
-+	.space VDSO32_vsyscall_eh_frame_size-(.LENDFDEDLSI-.LSTARTFRAMEDLSI), 0
-+	.previous
-diff --git a/arch/x86/vdso/vdso32/note.S b/arch/x86/vdso/vdso32/note.S
-new file mode 100644
-index 0000000..c83f257
---- /dev/null
-+++ b/arch/x86/vdso/vdso32/note.S
-@@ -0,0 +1,44 @@
-+/*
-+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
-+ * Here we can supply some information useful to userland.
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/elfnote.h>
++	if (algt->mask & CRYPTO_ALG_GENIV) {
++		if (!balg.geniv)
++			balg.geniv = crypto_default_geniv(alg);
++		err = -EAGAIN;
++		if (strcmp(tmpl->name, balg.geniv))
++			goto err_drop_alg;
 +
-+/* Ideally this would use UTS_NAME, but using a quoted string here
-+   doesn't work. Remember to change this when changing the
-+   kernel's name. */
-+ELFNOTE_START(Linux, 0, "a")
-+	.long LINUX_VERSION_CODE
-+ELFNOTE_END
++		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
++		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
++		       CRYPTO_MAX_ALG_NAME);
++	} else {
++		err = -ENAMETOOLONG;
++		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
++			     "%s(%s)", tmpl->name, alg->cra_name) >=
++		    CRYPTO_MAX_ALG_NAME)
++			goto err_drop_alg;
++		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
++			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
++		    CRYPTO_MAX_ALG_NAME)
++			goto err_drop_alg;
++	}
 +
-+#ifdef CONFIG_XEN
-+/*
-+ * Add a special note telling glibc's dynamic linker a fake hardware
-+ * flavor that it will use to choose the search path for libraries in the
-+ * same way it uses real hardware capabilities like "mmx".
-+ * We supply "nosegneg" as the fake capability, to indicate that we
-+ * do not like negative offsets in instructions using segment overrides,
-+ * since we implement those inefficiently.  This makes it possible to
-+ * install libraries optimized to avoid those access patterns in someplace
-+ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
-+ * corresponding to the bits here is needed to make ldconfig work right.
-+ * It should contain:
-+ *	hwcap 1 nosegneg
-+ * to match the mapping of bit to name that we give here.
-+ *
-+ * At runtime, the fake hardware feature will be considered to be present
-+ * if its bit is set in the mask word.  So, we start with the mask 0, and
-+ * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
-+ */
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
++	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
++	inst->alg.cra_priority = alg->cra_priority;
++	inst->alg.cra_blocksize = alg->cra_blocksize;
++	inst->alg.cra_alignmask = alg->cra_alignmask;
++	inst->alg.cra_type = &crypto_givcipher_type;
 +
-+#include "../../xen/vdso.h"	/* Defines VDSO_NOTE_NONEGSEG_BIT.  */
++	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
++	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
++	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
++	inst->alg.cra_ablkcipher.geniv = balg.geniv;
 +
-+ELFNOTE_START(GNU, 2, "a")
-+	.long 1			/* ncaps */
-+VDSO32_NOTE_MASK:		/* Symbol used by arch/x86/xen/setup.c */
-+	.long 0			/* mask */
-+	.byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg"	/* bit, name */
-+ELFNOTE_END
-+#endif
-diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
-new file mode 100644
-index 0000000..31776d0
---- /dev/null
-+++ b/arch/x86/vdso/vdso32/sigreturn.S
-@@ -0,0 +1,144 @@
-+/*
-+ * Common code for the sigreturn entry points in vDSO images.
-+ * So far this code is the same for both int80 and sysenter versions.
-+ * This file is #include'd by int80.S et al to define them first thing.
-+ * The kernel assumes that the addresses of these routines are constant
-+ * for all vDSO implementations.
-+ */
++	inst->alg.cra_ablkcipher.setkey = balg.setkey;
++	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
++	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
 +
-+#include <linux/linkage.h>
-+#include <asm/unistd_32.h>
-+#include <asm/asm-offsets.h>
++out:
++	return inst;
 +
-+#ifndef SYSCALL_ENTER_KERNEL
-+#define	SYSCALL_ENTER_KERNEL	int $0x80
-+#endif
++err_drop_alg:
++	crypto_drop_skcipher(spawn);
++err_free_inst:
++	kfree(inst);
++	inst = ERR_PTR(err);
++	goto out;
++}
++EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
 +
-+	.text
-+	.globl __kernel_sigreturn
-+	.type __kernel_sigreturn, at function
-+	ALIGN
-+__kernel_sigreturn:
-+.LSTART_sigreturn:
-+	popl %eax		/* XXX does this mean it needs unwind info? */
-+	movl $__NR_sigreturn, %eax
-+	SYSCALL_ENTER_KERNEL
-+.LEND_sigreturn:
-+	nop
-+	.size __kernel_sigreturn,.-.LSTART_sigreturn
++void skcipher_geniv_free(struct crypto_instance *inst)
++{
++	crypto_drop_skcipher(crypto_instance_ctx(inst));
++	kfree(inst);
++}
++EXPORT_SYMBOL_GPL(skcipher_geniv_free);
 +
-+	.globl __kernel_rt_sigreturn
-+	.type __kernel_rt_sigreturn, at function
-+	ALIGN
-+__kernel_rt_sigreturn:
-+.LSTART_rt_sigreturn:
-+	movl $__NR_rt_sigreturn, %eax
-+	SYSCALL_ENTER_KERNEL
-+.LEND_rt_sigreturn:
-+	nop
-+	.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
-+	.previous
++int skcipher_geniv_init(struct crypto_tfm *tfm)
++{
++	struct crypto_instance *inst = (void *)tfm->__crt_alg;
++	struct crypto_ablkcipher *cipher;
 +
-+	.section .eh_frame,"a", at progbits
-+.LSTARTFRAMEDLSI1:
-+	.long .LENDCIEDLSI1-.LSTARTCIEDLSI1
-+.LSTARTCIEDLSI1:
-+	.long 0			/* CIE ID */
-+	.byte 1			/* Version number */
-+	.string "zRS"		/* NUL-terminated augmentation string */
-+	.uleb128 1		/* Code alignment factor */
-+	.sleb128 -4		/* Data alignment factor */
-+	.byte 8			/* Return address register column */
-+	.uleb128 1		/* Augmentation value length */
-+	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-+	.byte 0			/* DW_CFA_nop */
-+	.align 4
-+.LENDCIEDLSI1:
-+	.long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */
-+.LSTARTFDEDLSI1:
-+	.long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */
-+	/* HACK: The dwarf2 unwind routines will subtract 1 from the
-+	   return address to get an address in the middle of the
-+	   presumed call instruction.  Since we didn't get here via
-+	   a call, we need to include the nop before the real start
-+	   to make up for it.  */
-+	.long .LSTART_sigreturn-1-.	/* PC-relative start address */
-+	.long .LEND_sigreturn-.LSTART_sigreturn+1
-+	.uleb128 0			/* Augmentation */
-+	/* What follows are the instructions for the table generation.
-+	   We record the locations of each register saved.  This is
-+	   complicated by the fact that the "CFA" is always assumed to
-+	   be the value of the stack pointer in the caller.  This means
-+	   that we must define the CFA of this body of code to be the
-+	   saved value of the stack pointer in the sigcontext.  Which
-+	   also means that there is no fixed relation to the other
-+	   saved registers, which means that we must use DW_CFA_expression
-+	   to compute their addresses.  It also means that when we
-+	   adjust the stack with the popl, we have to do it all over again.  */
++	cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
++	if (IS_ERR(cipher))
++		return PTR_ERR(cipher);
 +
-+#define do_cfa_expr(offset)						\
-+	.byte 0x0f;			/* DW_CFA_def_cfa_expression */	\
-+	.uleb128 1f-0f;			/*   length */			\
-+0:	.byte 0x74;			/*     DW_OP_breg4 */		\
-+	.sleb128 offset;		/*      offset */		\
-+	.byte 0x06;			/*     DW_OP_deref */		\
-+1:
++	tfm->crt_ablkcipher.base = cipher;
++	tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
 +
-+#define do_expr(regno, offset)						\
-+	.byte 0x10;			/* DW_CFA_expression */		\
-+	.uleb128 regno;			/*   regno */			\
-+	.uleb128 1f-0f;			/*   length */			\
-+0:	.byte 0x74;			/*     DW_OP_breg4 */		\
-+	.sleb128 offset;		/*       offset */		\
-+1:
++	return 0;
++}
++EXPORT_SYMBOL_GPL(skcipher_geniv_init);
 +
-+	do_cfa_expr(IA32_SIGCONTEXT_sp+4)
-+	do_expr(0, IA32_SIGCONTEXT_ax+4)
-+	do_expr(1, IA32_SIGCONTEXT_cx+4)
-+	do_expr(2, IA32_SIGCONTEXT_dx+4)
-+	do_expr(3, IA32_SIGCONTEXT_bx+4)
-+	do_expr(5, IA32_SIGCONTEXT_bp+4)
-+	do_expr(6, IA32_SIGCONTEXT_si+4)
-+	do_expr(7, IA32_SIGCONTEXT_di+4)
-+	do_expr(8, IA32_SIGCONTEXT_ip+4)
++void skcipher_geniv_exit(struct crypto_tfm *tfm)
++{
++	crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
++}
++EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
 +
-+	.byte 0x42	/* DW_CFA_advance_loc 2 -- nop; popl eax. */
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Generic block chaining cipher type");
+diff --git a/crypto/camellia.c b/crypto/camellia.c
+index 6877ecf..493fee7 100644
+--- a/crypto/camellia.c
++++ b/crypto/camellia.c
+@@ -36,176 +36,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ 
+-
+-#define CAMELLIA_MIN_KEY_SIZE        16
+-#define CAMELLIA_MAX_KEY_SIZE        32
+-#define CAMELLIA_BLOCK_SIZE 16
+-#define CAMELLIA_TABLE_BYTE_LEN 272
+-#define CAMELLIA_TABLE_WORD_LEN (CAMELLIA_TABLE_BYTE_LEN / 4)
+-
+-typedef u32 KEY_TABLE_TYPE[CAMELLIA_TABLE_WORD_LEN];
+-
+-
+-/* key constants */
+-
+-#define CAMELLIA_SIGMA1L (0xA09E667FL)
+-#define CAMELLIA_SIGMA1R (0x3BCC908BL)
+-#define CAMELLIA_SIGMA2L (0xB67AE858L)
+-#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
+-#define CAMELLIA_SIGMA3L (0xC6EF372FL)
+-#define CAMELLIA_SIGMA3R (0xE94F82BEL)
+-#define CAMELLIA_SIGMA4L (0x54FF53A5L)
+-#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
+-#define CAMELLIA_SIGMA5L (0x10E527FAL)
+-#define CAMELLIA_SIGMA5R (0xDE682D1DL)
+-#define CAMELLIA_SIGMA6L (0xB05688C2L)
+-#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
+-
+-struct camellia_ctx {
+-	int key_length;
+-	KEY_TABLE_TYPE key_table;
+-};
+-
+-
+-/*
+- *  macros
+- */
+-
+-
+-# define GETU32(pt) (((u32)(pt)[0] << 24)	\
+-		     ^ ((u32)(pt)[1] << 16)	\
+-		     ^ ((u32)(pt)[2] <<  8)	\
+-		     ^ ((u32)(pt)[3]))
+-
+-#define COPY4WORD(dst, src)			\
+-    do {					\
+-	(dst)[0]=(src)[0];			\
+-	(dst)[1]=(src)[1];			\
+-	(dst)[2]=(src)[2];			\
+-	(dst)[3]=(src)[3];			\
+-    }while(0)
+-
+-#define SWAP4WORD(word)				\
+-    do {					\
+-	CAMELLIA_SWAP4((word)[0]);		\
+-	CAMELLIA_SWAP4((word)[1]);		\
+-	CAMELLIA_SWAP4((word)[2]);		\
+-	CAMELLIA_SWAP4((word)[3]);		\
+-    }while(0)
+-
+-#define XOR4WORD(a, b)/* a = a ^ b */		\
+-    do {					\
+-	(a)[0]^=(b)[0];				\
+-	(a)[1]^=(b)[1];				\
+-	(a)[2]^=(b)[2];				\
+-	(a)[3]^=(b)[3];				\
+-    }while(0)
+-
+-#define XOR4WORD2(a, b, c)/* a = b ^ c */	\
+-    do {					\
+-	(a)[0]=(b)[0]^(c)[0];			\
+-	(a)[1]=(b)[1]^(c)[1];			\
+-	(a)[2]=(b)[2]^(c)[2];			\
+-	(a)[3]=(b)[3]^(c)[3];			\
+-    }while(0)
+-
+-#define CAMELLIA_SUBKEY_L(INDEX) (subkey[(INDEX)*2])
+-#define CAMELLIA_SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
+-
+-/* rotation right shift 1byte */
+-#define CAMELLIA_RR8(x) (((x) >> 8) + ((x) << 24))
+-/* rotation left shift 1bit */
+-#define CAMELLIA_RL1(x) (((x) << 1) + ((x) >> 31))
+-/* rotation left shift 1byte */
+-#define CAMELLIA_RL8(x) (((x) << 8) + ((x) >> 24))
+-
+-#define CAMELLIA_ROLDQ(ll, lr, rl, rr, w0, w1, bits)	\
+-    do {						\
+-	w0 = ll;					\
+-	ll = (ll << bits) + (lr >> (32 - bits));	\
+-	lr = (lr << bits) + (rl >> (32 - bits));	\
+-	rl = (rl << bits) + (rr >> (32 - bits));	\
+-	rr = (rr << bits) + (w0 >> (32 - bits));	\
+-    } while(0)
+-
+-#define CAMELLIA_ROLDQo32(ll, lr, rl, rr, w0, w1, bits)	\
+-    do {						\
+-	w0 = ll;					\
+-	w1 = lr;					\
+-	ll = (lr << (bits - 32)) + (rl >> (64 - bits));	\
+-	lr = (rl << (bits - 32)) + (rr >> (64 - bits));	\
+-	rl = (rr << (bits - 32)) + (w0 >> (64 - bits));	\
+-	rr = (w0 << (bits - 32)) + (w1 >> (64 - bits));	\
+-    } while(0)
+-
+-#define CAMELLIA_SP1110(INDEX) (camellia_sp1110[(INDEX)])
+-#define CAMELLIA_SP0222(INDEX) (camellia_sp0222[(INDEX)])
+-#define CAMELLIA_SP3033(INDEX) (camellia_sp3033[(INDEX)])
+-#define CAMELLIA_SP4404(INDEX) (camellia_sp4404[(INDEX)])
+-
+-#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1)	\
+-    do {							\
+-	il = xl ^ kl;						\
+-	ir = xr ^ kr;						\
+-	t0 = il >> 16;						\
+-	t1 = ir >> 16;						\
+-	yl = CAMELLIA_SP1110(ir & 0xff)				\
+-	    ^ CAMELLIA_SP0222((t1 >> 8) & 0xff)			\
+-	    ^ CAMELLIA_SP3033(t1 & 0xff)			\
+-	    ^ CAMELLIA_SP4404((ir >> 8) & 0xff);		\
+-	yr = CAMELLIA_SP1110((t0 >> 8) & 0xff)			\
+-	    ^ CAMELLIA_SP0222(t0 & 0xff)			\
+-	    ^ CAMELLIA_SP3033((il >> 8) & 0xff)			\
+-	    ^ CAMELLIA_SP4404(il & 0xff);			\
+-	yl ^= yr;						\
+-	yr = CAMELLIA_RR8(yr);					\
+-	yr ^= yl;						\
+-    } while(0)
+-
+-
+-/*
+- * for speed up
+- *
+- */
+-#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \
+-    do {								\
+-	t0 = kll;							\
+-	t2 = krr;							\
+-	t0 &= ll;							\
+-	t2 |= rr;							\
+-	rl ^= t2;							\
+-	lr ^= CAMELLIA_RL1(t0);						\
+-	t3 = krl;							\
+-	t1 = klr;							\
+-	t3 &= rl;							\
+-	t1 |= lr;							\
+-	ll ^= t1;							\
+-	rr ^= CAMELLIA_RL1(t3);						\
+-    } while(0)
+-
+-#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1)	\
+-    do {								\
+-	ir =  CAMELLIA_SP1110(xr & 0xff);				\
+-	il =  CAMELLIA_SP1110((xl>>24) & 0xff);				\
+-	ir ^= CAMELLIA_SP0222((xr>>24) & 0xff);				\
+-	il ^= CAMELLIA_SP0222((xl>>16) & 0xff);				\
+-	ir ^= CAMELLIA_SP3033((xr>>16) & 0xff);				\
+-	il ^= CAMELLIA_SP3033((xl>>8) & 0xff);				\
+-	ir ^= CAMELLIA_SP4404((xr>>8) & 0xff);				\
+-	il ^= CAMELLIA_SP4404(xl & 0xff);				\
+-	il ^= kl;							\
+-	ir ^= il ^ kr;							\
+-	yl ^= ir;							\
+-	yr ^= CAMELLIA_RR8(il) ^ ir;					\
+-    } while(0)
+-
+-/**
+- * Stuff related to the Camellia key schedule
+- */
+-#define SUBL(x) subL[(x)]
+-#define SUBR(x) subR[(x)]
+-
+-
+ static const u32 camellia_sp1110[256] = {
+ 	0x70707000,0x82828200,0x2c2c2c00,0xececec00,
+ 	0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500,
+@@ -475,67 +305,348 @@ static const u32 camellia_sp4404[256] = {
+ };
+ 
+ 
++#define CAMELLIA_MIN_KEY_SIZE        16
++#define CAMELLIA_MAX_KEY_SIZE        32
++#define CAMELLIA_BLOCK_SIZE          16
++#define CAMELLIA_TABLE_BYTE_LEN     272
 +
-+	do_cfa_expr(IA32_SIGCONTEXT_sp)
-+	do_expr(0, IA32_SIGCONTEXT_ax)
-+	do_expr(1, IA32_SIGCONTEXT_cx)
-+	do_expr(2, IA32_SIGCONTEXT_dx)
-+	do_expr(3, IA32_SIGCONTEXT_bx)
-+	do_expr(5, IA32_SIGCONTEXT_bp)
-+	do_expr(6, IA32_SIGCONTEXT_si)
-+	do_expr(7, IA32_SIGCONTEXT_di)
-+	do_expr(8, IA32_SIGCONTEXT_ip)
++/*
++ * NB: L and R below stand for 'left' and 'right' as in written numbers.
++ * That is, in (xxxL,xxxR) pair xxxL holds most significant digits,
++ * _not_ least significant ones!
++ */
 +
-+	.align 4
-+.LENDFDEDLSI1:
 +
-+	.long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */
-+.LSTARTFDEDLSI2:
-+	.long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */
-+	/* HACK: See above wrt unwind library assumptions.  */
-+	.long .LSTART_rt_sigreturn-1-.	/* PC-relative start address */
-+	.long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
-+	.uleb128 0			/* Augmentation */
-+	/* What follows are the instructions for the table generation.
-+	   We record the locations of each register saved.  This is
-+	   slightly less complicated than the above, since we don't
-+	   modify the stack pointer in the process.  */
++/* key constants */
 +
-+	do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp)
-+	do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax)
-+	do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx)
-+	do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx)
-+	do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx)
-+	do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp)
-+	do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si)
-+	do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di)
-+	do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip)
++#define CAMELLIA_SIGMA1L (0xA09E667FL)
++#define CAMELLIA_SIGMA1R (0x3BCC908BL)
++#define CAMELLIA_SIGMA2L (0xB67AE858L)
++#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
++#define CAMELLIA_SIGMA3L (0xC6EF372FL)
++#define CAMELLIA_SIGMA3R (0xE94F82BEL)
++#define CAMELLIA_SIGMA4L (0x54FF53A5L)
++#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
++#define CAMELLIA_SIGMA5L (0x10E527FAL)
++#define CAMELLIA_SIGMA5R (0xDE682D1DL)
++#define CAMELLIA_SIGMA6L (0xB05688C2L)
++#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
 +
-+	.align 4
-+.LENDFDEDLSI2:
-+	.previous
-diff --git a/arch/x86/vdso/vdso32/syscall.S b/arch/x86/vdso/vdso32/syscall.S
-new file mode 100644
-index 0000000..5415b56
---- /dev/null
-+++ b/arch/x86/vdso/vdso32/syscall.S
-@@ -0,0 +1,77 @@
 +/*
-+ * Code for the vDSO.  This version uses the syscall instruction.
-+ *
-+ * First get the common code for the sigreturn entry points.
-+ * This must come first.
++ *  macros
 + */
-+#define SYSCALL_ENTER_KERNEL	syscall
-+#include "sigreturn.S"
-+
-+#include <asm/segment.h>
-+
-+	.text
-+	.globl __kernel_vsyscall
-+	.type __kernel_vsyscall, at function
-+	ALIGN
-+__kernel_vsyscall:
-+.LSTART_vsyscall:
-+	push	%ebp
-+.Lpush_ebp:
-+	movl	%ecx, %ebp
-+	syscall
-+	movl	$__USER32_DS, %ecx
-+	movl	%ecx, %ss
-+	movl	%ebp, %ecx
-+	popl	%ebp
-+.Lpop_ebp:
-+	ret
-+.LEND_vsyscall:
-+	.size __kernel_vsyscall,.-.LSTART_vsyscall
++#define GETU32(v, pt) \
++    do { \
++	/* latest breed of gcc is clever enough to use move */ \
++	memcpy(&(v), (pt), 4); \
++	(v) = be32_to_cpu(v); \
++    } while(0)
 +
-+	.section .eh_frame,"a", at progbits
-+.LSTARTFRAME:
-+	.long .LENDCIE-.LSTARTCIE
-+.LSTARTCIE:
-+	.long 0			/* CIE ID */
-+	.byte 1			/* Version number */
-+	.string "zR"		/* NUL-terminated augmentation string */
-+	.uleb128 1		/* Code alignment factor */
-+	.sleb128 -4		/* Data alignment factor */
-+	.byte 8			/* Return address register column */
-+	.uleb128 1		/* Augmentation value length */
-+	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-+	.byte 0x0c		/* DW_CFA_def_cfa */
-+	.uleb128 4
-+	.uleb128 4
-+	.byte 0x88		/* DW_CFA_offset, column 0x8 */
-+	.uleb128 1
-+	.align 4
-+.LENDCIE:
++/* rotation right shift 1byte */
++#define ROR8(x) (((x) >> 8) + ((x) << 24))
++/* rotation left shift 1bit */
++#define ROL1(x) (((x) << 1) + ((x) >> 31))
++/* rotation left shift 1byte */
++#define ROL8(x) (((x) << 8) + ((x) >> 24))
 +
-+	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
-+.LSTARTFDE1:
-+	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
-+	.long .LSTART_vsyscall-.	/* PC-relative start address */
-+	.long .LEND_vsyscall-.LSTART_vsyscall
-+	.uleb128 0			/* Augmentation length */
-+	/* What follows are the instructions for the table generation.
-+	   We have to record all changes of the stack pointer.  */
-+	.byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
-+	.byte 0x0e		/* DW_CFA_def_cfa_offset */
-+	.uleb128 8
-+	.byte 0x85, 0x02	/* DW_CFA_offset %ebp -8 */
-+	.byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
-+	.byte 0xc5		/* DW_CFA_restore %ebp */
-+	.byte 0x0e		/* DW_CFA_def_cfa_offset */
-+	.uleb128 4
-+	.align 4
-+.LENDFDE1:
-+	.previous
++#define ROLDQ(ll, lr, rl, rr, w0, w1, bits)		\
++    do {						\
++	w0 = ll;					\
++	ll = (ll << bits) + (lr >> (32 - bits));	\
++	lr = (lr << bits) + (rl >> (32 - bits));	\
++	rl = (rl << bits) + (rr >> (32 - bits));	\
++	rr = (rr << bits) + (w0 >> (32 - bits));	\
++    } while(0)
 +
-+	/*
-+	 * Pad out the segment to match the size of the sysenter.S version.
-+	 */
-+VDSO32_vsyscall_eh_frame_size = 0x40
-+	.section .data,"aw", at progbits
-+	.space VDSO32_vsyscall_eh_frame_size-(.LENDFDE1-.LSTARTFRAME), 0
-+	.previous
-diff --git a/arch/x86/vdso/vdso32/sysenter.S b/arch/x86/vdso/vdso32/sysenter.S
-new file mode 100644
-index 0000000..e2800af
---- /dev/null
-+++ b/arch/x86/vdso/vdso32/sysenter.S
-@@ -0,0 +1,116 @@
-+/*
-+ * Code for the vDSO.  This version uses the sysenter instruction.
-+ *
-+ * First get the common code for the sigreturn entry points.
-+ * This must come first.
-+ */
-+#include "sigreturn.S"
++#define ROLDQo32(ll, lr, rl, rr, w0, w1, bits)		\
++    do {						\
++	w0 = ll;					\
++	w1 = lr;					\
++	ll = (lr << (bits - 32)) + (rl >> (64 - bits));	\
++	lr = (rl << (bits - 32)) + (rr >> (64 - bits));	\
++	rl = (rr << (bits - 32)) + (w0 >> (64 - bits));	\
++	rr = (w0 << (bits - 32)) + (w1 >> (64 - bits));	\
++    } while(0)
 +
-+/*
-+ * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
-+ * %ecx itself for arg2. The pushing is because the sysexit instruction
-+ * (found in entry.S) requires that we clobber %ecx with the desired %esp.
-+ * User code might expect that %ecx is unclobbered though, as it would be
-+ * for returning via the iret instruction, so we must push and pop.
-+ *
-+ * The caller puts arg3 in %edx, which the sysexit instruction requires
-+ * for %eip. Thus, exactly as for arg2, we must push and pop.
-+ *
-+ * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
-+ * instruction clobbers %esp, the user's %esp won't even survive entry
-+ * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
-+ * arg6 from the stack.
-+ *
-+ * You can not use this vsyscall for the clone() syscall because the
-+ * three words on the parent stack do not get copied to the child.
-+ */
-+	.text
-+	.globl __kernel_vsyscall
-+	.type __kernel_vsyscall, at function
-+	ALIGN
-+__kernel_vsyscall:
-+.LSTART_vsyscall:
-+	push %ecx
-+.Lpush_ecx:
-+	push %edx
-+.Lpush_edx:
-+	push %ebp
-+.Lenter_kernel:
-+	movl %esp,%ebp
-+	sysenter
++#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1)	\
++    do {							\
++	il = xl ^ kl;						\
++	ir = xr ^ kr;						\
++	t0 = il >> 16;						\
++	t1 = ir >> 16;						\
++	yl = camellia_sp1110[(u8)(ir     )]			\
++	   ^ camellia_sp0222[    (t1 >> 8)]			\
++	   ^ camellia_sp3033[(u8)(t1     )]			\
++	   ^ camellia_sp4404[(u8)(ir >> 8)];			\
++	yr = camellia_sp1110[    (t0 >> 8)]			\
++	   ^ camellia_sp0222[(u8)(t0     )]			\
++	   ^ camellia_sp3033[(u8)(il >> 8)]			\
++	   ^ camellia_sp4404[(u8)(il     )];			\
++	yl ^= yr;						\
++	yr = ROR8(yr);						\
++	yr ^= yl;						\
++    } while(0)
 +
-+	/* 7: align return point with nop's to make disassembly easier */
-+	.space 7,0x90
++#define SUBKEY_L(INDEX) (subkey[(INDEX)*2])
++#define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
 +
-+	/* 14: System call restart point is here! (SYSENTER_RETURN-2) */
-+	jmp .Lenter_kernel
-+	/* 16: System call normal return point is here! */
-+VDSO32_SYSENTER_RETURN:	/* Symbol used by sysenter.c via vdso32-syms.h */
-+	pop %ebp
-+.Lpop_ebp:
-+	pop %edx
-+.Lpop_edx:
-+	pop %ecx
-+.Lpop_ecx:
-+	ret
-+.LEND_vsyscall:
-+	.size __kernel_vsyscall,.-.LSTART_vsyscall
-+	.previous
++static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
++{
++	u32 dw, tl, tr;
++	u32 kw4l, kw4r;
++	int i;
 +
-+	.section .eh_frame,"a", at progbits
-+.LSTARTFRAMEDLSI:
-+	.long .LENDCIEDLSI-.LSTARTCIEDLSI
-+.LSTARTCIEDLSI:
-+	.long 0			/* CIE ID */
-+	.byte 1			/* Version number */
-+	.string "zR"		/* NUL-terminated augmentation string */
-+	.uleb128 1		/* Code alignment factor */
-+	.sleb128 -4		/* Data alignment factor */
-+	.byte 8			/* Return address register column */
-+	.uleb128 1		/* Augmentation value length */
-+	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-+	.byte 0x0c		/* DW_CFA_def_cfa */
-+	.uleb128 4
-+	.uleb128 4
-+	.byte 0x88		/* DW_CFA_offset, column 0x8 */
-+	.uleb128 1
-+	.align 4
-+.LENDCIEDLSI:
-+	.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
-+.LSTARTFDEDLSI:
-+	.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
-+	.long .LSTART_vsyscall-.	/* PC-relative start address */
-+	.long .LEND_vsyscall-.LSTART_vsyscall
-+	.uleb128 0
-+	/* What follows are the instructions for the table generation.
-+	   We have to record all changes of the stack pointer.  */
-+	.byte 0x40 + (.Lpush_ecx-.LSTART_vsyscall) /* DW_CFA_advance_loc */
-+	.byte 0x0e		/* DW_CFA_def_cfa_offset */
-+	.byte 0x08		/* RA at offset 8 now */
-+	.byte 0x40 + (.Lpush_edx-.Lpush_ecx) /* DW_CFA_advance_loc */
-+	.byte 0x0e		/* DW_CFA_def_cfa_offset */
-+	.byte 0x0c		/* RA at offset 12 now */
-+	.byte 0x40 + (.Lenter_kernel-.Lpush_edx) /* DW_CFA_advance_loc */
-+	.byte 0x0e		/* DW_CFA_def_cfa_offset */
-+	.byte 0x10		/* RA at offset 16 now */
-+	.byte 0x85, 0x04	/* DW_CFA_offset %ebp -16 */
-+	/* Finally the epilogue.  */
-+	.byte 0x40 + (.Lpop_ebp-.Lenter_kernel)	/* DW_CFA_advance_loc */
-+	.byte 0x0e		/* DW_CFA_def_cfa_offset */
-+	.byte 0x0c		/* RA at offset 12 now */
-+	.byte 0xc5		/* DW_CFA_restore %ebp */
-+	.byte 0x40 + (.Lpop_edx-.Lpop_ebp) /* DW_CFA_advance_loc */
-+	.byte 0x0e		/* DW_CFA_def_cfa_offset */
-+	.byte 0x08		/* RA at offset 8 now */
-+	.byte 0x40 + (.Lpop_ecx-.Lpop_edx) /* DW_CFA_advance_loc */
-+	.byte 0x0e		/* DW_CFA_def_cfa_offset */
-+	.byte 0x04		/* RA at offset 4 now */
-+	.align 4
-+.LENDFDEDLSI:
-+	.previous
++	/* absorb kw2 to other subkeys */
++	/* round 2 */
++	subL[3] ^= subL[1]; subR[3] ^= subR[1];
++	/* round 4 */
++	subL[5] ^= subL[1]; subR[5] ^= subR[1];
++	/* round 6 */
++	subL[7] ^= subL[1]; subR[7] ^= subR[1];
++	subL[1] ^= subR[1] & ~subR[9];
++	dw = subL[1] & subL[9],
++		subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */
++	/* round 8 */
++	subL[11] ^= subL[1]; subR[11] ^= subR[1];
++	/* round 10 */
++	subL[13] ^= subL[1]; subR[13] ^= subR[1];
++	/* round 12 */
++	subL[15] ^= subL[1]; subR[15] ^= subR[1];
++	subL[1] ^= subR[1] & ~subR[17];
++	dw = subL[1] & subL[17],
++		subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */
++	/* round 14 */
++	subL[19] ^= subL[1]; subR[19] ^= subR[1];
++	/* round 16 */
++	subL[21] ^= subL[1]; subR[21] ^= subR[1];
++	/* round 18 */
++	subL[23] ^= subL[1]; subR[23] ^= subR[1];
++	if (max == 24) {
++		/* kw3 */
++		subL[24] ^= subL[1]; subR[24] ^= subR[1];
 +
-+	/*
-+	 * Emit a symbol with the size of this .eh_frame data,
-+	 * to verify it matches the other versions.
-+	 */
-+VDSO32_vsyscall_eh_frame_size = (.LENDFDEDLSI-.LSTARTFRAMEDLSI)
-diff --git a/arch/x86/vdso/vdso32/vdso32.lds.S b/arch/x86/vdso/vdso32/vdso32.lds.S
-new file mode 100644
-index 0000000..976124b
---- /dev/null
-+++ b/arch/x86/vdso/vdso32/vdso32.lds.S
-@@ -0,0 +1,37 @@
-+/*
-+ * Linker script for 32-bit vDSO.
-+ * We #include the file to define the layout details.
-+ * Here we only choose the prelinked virtual address.
-+ *
-+ * This file defines the version script giving the user-exported symbols in
-+ * the DSO.  We can define local symbols here called VDSO* to make their
-+ * values visible using the asm-x86/vdso.h macros from the kernel proper.
-+ */
++	/* absorb kw4 to other subkeys */
++		kw4l = subL[25]; kw4r = subR[25];
++	} else {
++		subL[1] ^= subR[1] & ~subR[25];
++		dw = subL[1] & subL[25],
++			subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */
++		/* round 20 */
++		subL[27] ^= subL[1]; subR[27] ^= subR[1];
++		/* round 22 */
++		subL[29] ^= subL[1]; subR[29] ^= subR[1];
++		/* round 24 */
++		subL[31] ^= subL[1]; subR[31] ^= subR[1];
++		/* kw3 */
++		subL[32] ^= subL[1]; subR[32] ^= subR[1];
 +
-+#define VDSO_PRELINK 0
-+#include "../vdso-layout.lds.S"
++	/* absorb kw4 to other subkeys */
++		kw4l = subL[33]; kw4r = subR[33];
++		/* round 23 */
++		subL[30] ^= kw4l; subR[30] ^= kw4r;
++		/* round 21 */
++		subL[28] ^= kw4l; subR[28] ^= kw4r;
++		/* round 19 */
++		subL[26] ^= kw4l; subR[26] ^= kw4r;
++		kw4l ^= kw4r & ~subR[24];
++		dw = kw4l & subL[24],
++			kw4r ^= ROL1(dw); /* modified for FL(kl5) */
++	}
++	/* round 17 */
++	subL[22] ^= kw4l; subR[22] ^= kw4r;
++	/* round 15 */
++	subL[20] ^= kw4l; subR[20] ^= kw4r;
++	/* round 13 */
++	subL[18] ^= kw4l; subR[18] ^= kw4r;
++	kw4l ^= kw4r & ~subR[16];
++	dw = kw4l & subL[16],
++		kw4r ^= ROL1(dw); /* modified for FL(kl3) */
++	/* round 11 */
++	subL[14] ^= kw4l; subR[14] ^= kw4r;
++	/* round 9 */
++	subL[12] ^= kw4l; subR[12] ^= kw4r;
++	/* round 7 */
++	subL[10] ^= kw4l; subR[10] ^= kw4r;
++	kw4l ^= kw4r & ~subR[8];
++	dw = kw4l & subL[8],
++		kw4r ^= ROL1(dw); /* modified for FL(kl1) */
++	/* round 5 */
++	subL[6] ^= kw4l; subR[6] ^= kw4r;
++	/* round 3 */
++	subL[4] ^= kw4l; subR[4] ^= kw4r;
++	/* round 1 */
++	subL[2] ^= kw4l; subR[2] ^= kw4r;
++	/* kw1 */
++	subL[0] ^= kw4l; subR[0] ^= kw4r;
 +
-+/* The ELF entry point can be used to set the AT_SYSINFO value.  */
-+ENTRY(__kernel_vsyscall);
++	/* key XOR is end of F-function */
++	SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */
++	SUBKEY_R(0) = subR[0] ^ subR[2];
++	SUBKEY_L(2) = subL[3];       /* round 1 */
++	SUBKEY_R(2) = subR[3];
++	SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */
++	SUBKEY_R(3) = subR[2] ^ subR[4];
++	SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */
++	SUBKEY_R(4) = subR[3] ^ subR[5];
++	SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */
++	SUBKEY_R(5) = subR[4] ^ subR[6];
++	SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */
++	SUBKEY_R(6) = subR[5] ^ subR[7];
++	tl = subL[10] ^ (subR[10] & ~subR[8]);
++	dw = tl & subL[8],  /* FL(kl1) */
++		tr = subR[10] ^ ROL1(dw);
++	SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
++	SUBKEY_R(7) = subR[6] ^ tr;
++	SUBKEY_L(8) = subL[8];       /* FL(kl1) */
++	SUBKEY_R(8) = subR[8];
++	SUBKEY_L(9) = subL[9];       /* FLinv(kl2) */
++	SUBKEY_R(9) = subR[9];
++	tl = subL[7] ^ (subR[7] & ~subR[9]);
++	dw = tl & subL[9],  /* FLinv(kl2) */
++		tr = subR[7] ^ ROL1(dw);
++	SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
++	SUBKEY_R(10) = tr ^ subR[11];
++	SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
++	SUBKEY_R(11) = subR[10] ^ subR[12];
++	SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */
++	SUBKEY_R(12) = subR[11] ^ subR[13];
++	SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */
++	SUBKEY_R(13) = subR[12] ^ subR[14];
++	SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */
++	SUBKEY_R(14) = subR[13] ^ subR[15];
++	tl = subL[18] ^ (subR[18] & ~subR[16]);
++	dw = tl & subL[16], /* FL(kl3) */
++		tr = subR[18] ^ ROL1(dw);
++	SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
++	SUBKEY_R(15) = subR[14] ^ tr;
++	SUBKEY_L(16) = subL[16];     /* FL(kl3) */
++	SUBKEY_R(16) = subR[16];
++	SUBKEY_L(17) = subL[17];     /* FLinv(kl4) */
++	SUBKEY_R(17) = subR[17];
++	tl = subL[15] ^ (subR[15] & ~subR[17]);
++	dw = tl & subL[17], /* FLinv(kl4) */
++		tr = subR[15] ^ ROL1(dw);
++	SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
++	SUBKEY_R(18) = tr ^ subR[19];
++	SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
++	SUBKEY_R(19) = subR[18] ^ subR[20];
++	SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */
++	SUBKEY_R(20) = subR[19] ^ subR[21];
++	SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */
++	SUBKEY_R(21) = subR[20] ^ subR[22];
++	SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */
++	SUBKEY_R(22) = subR[21] ^ subR[23];
++	if (max == 24) {
++		SUBKEY_L(23) = subL[22];     /* round 18 */
++		SUBKEY_R(23) = subR[22];
++		SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */
++		SUBKEY_R(24) = subR[24] ^ subR[23];
++	} else {
++		tl = subL[26] ^ (subR[26] & ~subR[24]);
++		dw = tl & subL[24], /* FL(kl5) */
++			tr = subR[26] ^ ROL1(dw);
++		SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
++		SUBKEY_R(23) = subR[22] ^ tr;
++		SUBKEY_L(24) = subL[24];     /* FL(kl5) */
++		SUBKEY_R(24) = subR[24];
++		SUBKEY_L(25) = subL[25];     /* FLinv(kl6) */
++		SUBKEY_R(25) = subR[25];
++		tl = subL[23] ^ (subR[23] & ~subR[25]);
++		dw = tl & subL[25], /* FLinv(kl6) */
++			tr = subR[23] ^ ROL1(dw);
++		SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
++		SUBKEY_R(26) = tr ^ subR[27];
++		SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
++		SUBKEY_R(27) = subR[26] ^ subR[28];
++		SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */
++		SUBKEY_R(28) = subR[27] ^ subR[29];
++		SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */
++		SUBKEY_R(29) = subR[28] ^ subR[30];
++		SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */
++		SUBKEY_R(30) = subR[29] ^ subR[31];
++		SUBKEY_L(31) = subL[30];     /* round 24 */
++		SUBKEY_R(31) = subR[30];
++		SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */
++		SUBKEY_R(32) = subR[32] ^ subR[31];
++	}
 +
-+/*
-+ * This controls what userland symbols we export from the vDSO.
-+ */
-+VERSION
-+{
-+	LINUX_2.5 {
-+	global:
-+		__kernel_vsyscall;
-+		__kernel_sigreturn;
-+		__kernel_rt_sigreturn;
-+	local: *;
-+	};
++	/* apply the inverse of the last half of P-function */
++	i = 2;
++	do {
++		dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */
++		SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
++		dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */
++		SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
++		dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */
++		SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
++		dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */
++		SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
++		dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */
++		SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
++		dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */
++		SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
++		i += 8;
++	} while (i < max);
 +}
-+
-+/*
-+ * Symbols we define here called VDSO* get their values into vdso32-syms.h.
-+ */
-+VDSO32_PRELINK		= VDSO_PRELINK;
-+VDSO32_vsyscall		= __kernel_vsyscall;
-+VDSO32_sigreturn	= __kernel_sigreturn;
-+VDSO32_rt_sigreturn	= __kernel_rt_sigreturn;
-diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
-index 3b1ae1a..c8097f1 100644
---- a/arch/x86/vdso/vgetcpu.c
-+++ b/arch/x86/vdso/vgetcpu.c
-@@ -15,11 +15,11 @@
  
- long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+ static void camellia_setup128(const unsigned char *key, u32 *subkey)
  {
--	unsigned int dummy, p;
-+	unsigned int p;
- 
- 	if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
- 		/* Load per CPU data from RDTSCP */
--		rdtscp(dummy, dummy, p);
-+		native_read_tscp(&p);
- 	} else {
- 		/* Load per CPU data from GDT */
- 		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
-diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
-index ff9333e..3fdd514 100644
---- a/arch/x86/vdso/vma.c
-+++ b/arch/x86/vdso/vma.c
-@@ -11,23 +11,20 @@
- #include <asm/vsyscall.h>
- #include <asm/vgtod.h>
- #include <asm/proto.h>
--#include "voffset.h"
-+#include <asm/vdso.h>
- 
--int vdso_enabled = 1;
--
--#define VEXTERN(x) extern typeof(__ ## x) *vdso_ ## x;
--#include "vextern.h"
-+#include "vextern.h"		/* Just for VMAGIC.  */
- #undef VEXTERN
+ 	u32 kll, klr, krl, krr;
+ 	u32 il, ir, t0, t1, w0, w1;
+-	u32 kw4l, kw4r, dw, tl, tr;
+ 	u32 subL[26];
+ 	u32 subR[26];
  
--extern char vdso_kernel_start[], vdso_start[], vdso_end[];
-+int vdso_enabled = 1;
+ 	/**
+-	 *  k == kll || klr || krl || krr (|| is concatination)
+-	 */
+-	kll = GETU32(key     );
+-	klr = GETU32(key +  4);
+-	krl = GETU32(key +  8);
+-	krr = GETU32(key + 12);
+-	/**
+-	 * generate KL dependent subkeys
++	 *  k == kll || klr || krl || krr (|| is concatenation)
+ 	 */
++	GETU32(kll, key     );
++	GETU32(klr, key +  4);
++	GETU32(krl, key +  8);
++	GETU32(krr, key + 12);
 +
-+extern char vdso_start[], vdso_end[];
- extern unsigned short vdso_sync_cpuid;
++	/* generate KL dependent subkeys */
+ 	/* kw1 */
+-	SUBL(0) = kll; SUBR(0) = klr;
++	subL[0] = kll; subR[0] = klr;
+ 	/* kw2 */
+-	SUBL(1) = krl; SUBR(1) = krr;
++	subL[1] = krl; subR[1] = krr;
+ 	/* rotation left shift 15bit */
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
++	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ 	/* k3 */
+-	SUBL(4) = kll; SUBR(4) = klr;
++	subL[4] = kll; subR[4] = klr;
+ 	/* k4 */
+-	SUBL(5) = krl; SUBR(5) = krr;
++	subL[5] = krl; subR[5] = krr;
+ 	/* rotation left shift 15+30bit */
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
++	ROLDQ(kll, klr, krl, krr, w0, w1, 30);
+ 	/* k7 */
+-	SUBL(10) = kll; SUBR(10) = klr;
++	subL[10] = kll; subR[10] = klr;
+ 	/* k8 */
+-	SUBL(11) = krl; SUBR(11) = krr;
++	subL[11] = krl; subR[11] = krr;
+ 	/* rotation left shift 15+30+15bit */
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
++	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ 	/* k10 */
+-	SUBL(13) = krl; SUBR(13) = krr;
++	subL[13] = krl; subR[13] = krr;
+ 	/* rotation left shift 15+30+15+17 bit */
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
++	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ 	/* kl3 */
+-	SUBL(16) = kll; SUBR(16) = klr;
++	subL[16] = kll; subR[16] = klr;
+ 	/* kl4 */
+-	SUBL(17) = krl; SUBR(17) = krr;
++	subL[17] = krl; subR[17] = krr;
+ 	/* rotation left shift 15+30+15+17+17 bit */
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
++	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ 	/* k13 */
+-	SUBL(18) = kll; SUBR(18) = klr;
++	subL[18] = kll; subR[18] = klr;
+ 	/* k14 */
+-	SUBL(19) = krl; SUBR(19) = krr;
++	subL[19] = krl; subR[19] = krr;
+ 	/* rotation left shift 15+30+15+17+17+17 bit */
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
++	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ 	/* k17 */
+-	SUBL(22) = kll; SUBR(22) = klr;
++	subL[22] = kll; subR[22] = klr;
+ 	/* k18 */
+-	SUBL(23) = krl; SUBR(23) = krr;
++	subL[23] = krl; subR[23] = krr;
  
- struct page **vdso_pages;
+ 	/* generate KA */
+-	kll = SUBL(0); klr = SUBR(0);
+-	krl = SUBL(1); krr = SUBR(1);
++	kll = subL[0]; klr = subR[0];
++	krl = subL[1]; krr = subR[1];
+ 	CAMELLIA_F(kll, klr,
+ 		   CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
+ 		   w0, w1, il, ir, t0, t1);
+@@ -555,306 +666,108 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
  
--static inline void *var_ref(void *vbase, char *var, char *name)
-+static inline void *var_ref(void *p, char *name)
- {
--	unsigned offset = var - &vdso_kernel_start[0] + VDSO_TEXT_OFFSET;
--	void *p = vbase + offset;
- 	if (*(void **)p != (void *)VMAGIC) {
- 		printk("VDSO: variable %s broken\n", name);
- 		vdso_enabled = 0;
-@@ -62,9 +59,8 @@ static int __init init_vdso_vars(void)
- 		vdso_enabled = 0;
- 	}
+ 	/* generate KA dependent subkeys */
+ 	/* k1, k2 */
+-	SUBL(2) = kll; SUBR(2) = klr;
+-	SUBL(3) = krl; SUBR(3) = krr;
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
++	subL[2] = kll; subR[2] = klr;
++	subL[3] = krl; subR[3] = krr;
++	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ 	/* k5,k6 */
+-	SUBL(6) = kll; SUBR(6) = klr;
+-	SUBL(7) = krl; SUBR(7) = krr;
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
++	subL[6] = kll; subR[6] = klr;
++	subL[7] = krl; subR[7] = krr;
++	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ 	/* kl1, kl2 */
+-	SUBL(8) = kll; SUBR(8) = klr;
+-	SUBL(9) = krl; SUBR(9) = krr;
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
++	subL[8] = kll; subR[8] = klr;
++	subL[9] = krl; subR[9] = krr;
++	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ 	/* k9 */
+-	SUBL(12) = kll; SUBR(12) = klr;
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
++	subL[12] = kll; subR[12] = klr;
++	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ 	/* k11, k12 */
+-	SUBL(14) = kll; SUBR(14) = klr;
+-	SUBL(15) = krl; SUBR(15) = krr;
+-	CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
++	subL[14] = kll; subR[14] = klr;
++	subL[15] = krl; subR[15] = krr;
++	ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
+ 	/* k15, k16 */
+-	SUBL(20) = kll; SUBR(20) = klr;
+-	SUBL(21) = krl; SUBR(21) = krr;
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
++	subL[20] = kll; subR[20] = klr;
++	subL[21] = krl; subR[21] = krr;
++	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ 	/* kw3, kw4 */
+-	SUBL(24) = kll; SUBR(24) = klr;
+-	SUBL(25) = krl; SUBR(25) = krr;
++	subL[24] = kll; subR[24] = klr;
++	subL[25] = krl; subR[25] = krr;
  
--#define V(x) *(typeof(x) *) var_ref(vbase, (char *)RELOC_HIDE(&x, 0), #x)
- #define VEXTERN(x) \
--	V(vdso_ ## x) = &__ ## x;
-+	*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
- #include "vextern.h"
- #undef VEXTERN
- 	return 0;
-diff --git a/arch/x86/vdso/voffset.h b/arch/x86/vdso/voffset.h
-deleted file mode 100644
-index 4af67c7..0000000
---- a/arch/x86/vdso/voffset.h
-+++ /dev/null
-@@ -1 +0,0 @@
--#define VDSO_TEXT_OFFSET 0x600
-diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
-index fbfa55c..4d5f264 100644
---- a/arch/x86/xen/Kconfig
-+++ b/arch/x86/xen/Kconfig
-@@ -5,6 +5,7 @@
- config XEN
- 	bool "Xen guest support"
- 	select PARAVIRT
-+	depends on X86_32
- 	depends on X86_CMPXCHG && X86_TSC && !NEED_MULTIPLE_NODES && !(X86_VISWS || X86_VOYAGER)
- 	help
- 	  This is the Linux Xen port.  Enabling this will allow the
-diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 79ad152..de647bc 100644
---- a/arch/x86/xen/enlighten.c
-+++ b/arch/x86/xen/enlighten.c
-@@ -141,8 +141,8 @@ static void __init xen_banner(void)
- 	printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
+-
+-	/* absorb kw2 to other subkeys */
+-	/* round 2 */
+-	SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1);
+-	/* round 4 */
+-	SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1);
+-	/* round 6 */
+-	SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1);
+-	SUBL(1) ^= SUBR(1) & ~SUBR(9);
+-	dw = SUBL(1) & SUBL(9),
+-		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */
+-	/* round 8 */
+-	SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1);
+-	/* round 10 */
+-	SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1);
+-	/* round 12 */
+-	SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1);
+-	SUBL(1) ^= SUBR(1) & ~SUBR(17);
+-	dw = SUBL(1) & SUBL(17),
+-		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */
+-	/* round 14 */
+-	SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1);
+-	/* round 16 */
+-	SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1);
+-	/* round 18 */
+-	SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1);
+-	/* kw3 */
+-	SUBL(24) ^= SUBL(1); SUBR(24) ^= SUBR(1);
+-
+-	/* absorb kw4 to other subkeys */
+-	kw4l = SUBL(25); kw4r = SUBR(25);
+-	/* round 17 */
+-	SUBL(22) ^= kw4l; SUBR(22) ^= kw4r;
+-	/* round 15 */
+-	SUBL(20) ^= kw4l; SUBR(20) ^= kw4r;
+-	/* round 13 */
+-	SUBL(18) ^= kw4l; SUBR(18) ^= kw4r;
+-	kw4l ^= kw4r & ~SUBR(16);
+-	dw = kw4l & SUBL(16),
+-		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */
+-	/* round 11 */
+-	SUBL(14) ^= kw4l; SUBR(14) ^= kw4r;
+-	/* round 9 */
+-	SUBL(12) ^= kw4l; SUBR(12) ^= kw4r;
+-	/* round 7 */
+-	SUBL(10) ^= kw4l; SUBR(10) ^= kw4r;
+-	kw4l ^= kw4r & ~SUBR(8);
+-	dw = kw4l & SUBL(8),
+-		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */
+-	/* round 5 */
+-	SUBL(6) ^= kw4l; SUBR(6) ^= kw4r;
+-	/* round 3 */
+-	SUBL(4) ^= kw4l; SUBR(4) ^= kw4r;
+-	/* round 1 */
+-	SUBL(2) ^= kw4l; SUBR(2) ^= kw4r;
+-	/* kw1 */
+-	SUBL(0) ^= kw4l; SUBR(0) ^= kw4r;
+-
+-
+-	/* key XOR is end of F-function */
+-	CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */
+-	CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2);
+-	CAMELLIA_SUBKEY_L(2) = SUBL(3);       /* round 1 */
+-	CAMELLIA_SUBKEY_R(2) = SUBR(3);
+-	CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */
+-	CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4);
+-	CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */
+-	CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5);
+-	CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */
+-	CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6);
+-	CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */
+-	CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7);
+-	tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8));
+-	dw = tl & SUBL(8),  /* FL(kl1) */
+-		tr = SUBR(10) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */
+-	CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr;
+-	CAMELLIA_SUBKEY_L(8) = SUBL(8);       /* FL(kl1) */
+-	CAMELLIA_SUBKEY_R(8) = SUBR(8);
+-	CAMELLIA_SUBKEY_L(9) = SUBL(9);       /* FLinv(kl2) */
+-	CAMELLIA_SUBKEY_R(9) = SUBR(9);
+-	tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9));
+-	dw = tl & SUBL(9),  /* FLinv(kl2) */
+-		tr = SUBR(7) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */
+-	CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11);
+-	CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */
+-	CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12);
+-	CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */
+-	CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13);
+-	CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */
+-	CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14);
+-	CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */
+-	CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15);
+-	tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16));
+-	dw = tl & SUBL(16), /* FL(kl3) */
+-		tr = SUBR(18) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */
+-	CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr;
+-	CAMELLIA_SUBKEY_L(16) = SUBL(16);     /* FL(kl3) */
+-	CAMELLIA_SUBKEY_R(16) = SUBR(16);
+-	CAMELLIA_SUBKEY_L(17) = SUBL(17);     /* FLinv(kl4) */
+-	CAMELLIA_SUBKEY_R(17) = SUBR(17);
+-	tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17));
+-	dw = tl & SUBL(17), /* FLinv(kl4) */
+-		tr = SUBR(15) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */
+-	CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19);
+-	CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */
+-	CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20);
+-	CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */
+-	CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21);
+-	CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */
+-	CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22);
+-	CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */
+-	CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23);
+-	CAMELLIA_SUBKEY_L(23) = SUBL(22);     /* round 18 */
+-	CAMELLIA_SUBKEY_R(23) = SUBR(22);
+-	CAMELLIA_SUBKEY_L(24) = SUBL(24) ^ SUBL(23); /* kw3 */
+-	CAMELLIA_SUBKEY_R(24) = SUBR(24) ^ SUBR(23);
+-
+-	/* apply the inverse of the last half of P-function */
+-	dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2),
+-		dw = CAMELLIA_RL8(dw);/* round 1 */
+-	CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw,
+-		CAMELLIA_SUBKEY_L(2) = dw;
+-	dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3),
+-		dw = CAMELLIA_RL8(dw);/* round 2 */
+-	CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw,
+-		CAMELLIA_SUBKEY_L(3) = dw;
+-	dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4),
+-		dw = CAMELLIA_RL8(dw);/* round 3 */
+-	CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw,
+-		CAMELLIA_SUBKEY_L(4) = dw;
+-	dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5),
+-		dw = CAMELLIA_RL8(dw);/* round 4 */
+-	CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw,
+-		CAMELLIA_SUBKEY_L(5) = dw;
+-	dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6),
+-		dw = CAMELLIA_RL8(dw);/* round 5 */
+-	CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw,
+-		CAMELLIA_SUBKEY_L(6) = dw;
+-	dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7),
+-		dw = CAMELLIA_RL8(dw);/* round 6 */
+-	CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw,
+-		CAMELLIA_SUBKEY_L(7) = dw;
+-	dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10),
+-		dw = CAMELLIA_RL8(dw);/* round 7 */
+-	CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw,
+-		CAMELLIA_SUBKEY_L(10) = dw;
+-	dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11),
+-		dw = CAMELLIA_RL8(dw);/* round 8 */
+-	CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw,
+-		CAMELLIA_SUBKEY_L(11) = dw;
+-	dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12),
+-		dw = CAMELLIA_RL8(dw);/* round 9 */
+-	CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw,
+-		CAMELLIA_SUBKEY_L(12) = dw;
+-	dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13),
+-		dw = CAMELLIA_RL8(dw);/* round 10 */
+-	CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw,
+-		CAMELLIA_SUBKEY_L(13) = dw;
+-	dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14),
+-		dw = CAMELLIA_RL8(dw);/* round 11 */
+-	CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw,
+-		CAMELLIA_SUBKEY_L(14) = dw;
+-	dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15),
+-		dw = CAMELLIA_RL8(dw);/* round 12 */
+-	CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw,
+-		CAMELLIA_SUBKEY_L(15) = dw;
+-	dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18),
+-		dw = CAMELLIA_RL8(dw);/* round 13 */
+-	CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw,
+-		CAMELLIA_SUBKEY_L(18) = dw;
+-	dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19),
+-		dw = CAMELLIA_RL8(dw);/* round 14 */
+-	CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw,
+-		CAMELLIA_SUBKEY_L(19) = dw;
+-	dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20),
+-		dw = CAMELLIA_RL8(dw);/* round 15 */
+-	CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw,
+-		CAMELLIA_SUBKEY_L(20) = dw;
+-	dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21),
+-		dw = CAMELLIA_RL8(dw);/* round 16 */
+-	CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw,
+-		CAMELLIA_SUBKEY_L(21) = dw;
+-	dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22),
+-		dw = CAMELLIA_RL8(dw);/* round 17 */
+-	CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw,
+-		CAMELLIA_SUBKEY_L(22) = dw;
+-	dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23),
+-		dw = CAMELLIA_RL8(dw);/* round 18 */
+-	CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw,
+-		CAMELLIA_SUBKEY_L(23) = dw;
+-
+-	return;
++	camellia_setup_tail(subkey, subL, subR, 24);
  }
  
--static void xen_cpuid(unsigned int *eax, unsigned int *ebx,
--		      unsigned int *ecx, unsigned int *edx)
-+static void xen_cpuid(unsigned int *ax, unsigned int *bx,
-+		      unsigned int *cx, unsigned int *dx)
+-
+ static void camellia_setup256(const unsigned char *key, u32 *subkey)
  {
- 	unsigned maskedx = ~0;
+-	u32 kll,klr,krl,krr;           /* left half of key */
+-	u32 krll,krlr,krrl,krrr;       /* right half of key */
++	u32 kll, klr, krl, krr;        /* left half of key */
++	u32 krll, krlr, krrl, krrr;    /* right half of key */
+ 	u32 il, ir, t0, t1, w0, w1;    /* temporary variables */
+-	u32 kw4l, kw4r, dw, tl, tr;
+ 	u32 subL[34];
+ 	u32 subR[34];
  
-@@ -150,18 +150,18 @@ static void xen_cpuid(unsigned int *eax, unsigned int *ebx,
- 	 * Mask out inconvenient features, to try and disable as many
- 	 * unsupported kernel subsystems as possible.
+ 	/**
+ 	 *  key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
+-	 *  (|| is concatination)
++	 *  (|| is concatenation)
  	 */
--	if (*eax == 1)
-+	if (*ax == 1)
- 		maskedx = ~((1 << X86_FEATURE_APIC) |  /* disable APIC */
- 			    (1 << X86_FEATURE_ACPI) |  /* disable ACPI */
- 			    (1 << X86_FEATURE_ACC));   /* thermal monitoring */
+-
+-	kll  = GETU32(key     );
+-	klr  = GETU32(key +  4);
+-	krl  = GETU32(key +  8);
+-	krr  = GETU32(key + 12);
+-	krll = GETU32(key + 16);
+-	krlr = GETU32(key + 20);
+-	krrl = GETU32(key + 24);
+-	krrr = GETU32(key + 28);
++	GETU32(kll,  key     );
++	GETU32(klr,  key +  4);
++	GETU32(krl,  key +  8);
++	GETU32(krr,  key + 12);
++	GETU32(krll, key + 16);
++	GETU32(krlr, key + 20);
++	GETU32(krrl, key + 24);
++	GETU32(krrr, key + 28);
  
- 	asm(XEN_EMULATE_PREFIX "cpuid"
--		: "=a" (*eax),
--		  "=b" (*ebx),
--		  "=c" (*ecx),
--		  "=d" (*edx)
--		: "0" (*eax), "2" (*ecx));
--	*edx &= maskedx;
-+		: "=a" (*ax),
-+		  "=b" (*bx),
-+		  "=c" (*cx),
-+		  "=d" (*dx)
-+		: "0" (*ax), "2" (*cx));
-+	*dx &= maskedx;
- }
+ 	/* generate KL dependent subkeys */
+ 	/* kw1 */
+-	SUBL(0) = kll; SUBR(0) = klr;
++	subL[0] = kll; subR[0] = klr;
+ 	/* kw2 */
+-	SUBL(1) = krl; SUBR(1) = krr;
+-	CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 45);
++	subL[1] = krl; subR[1] = krr;
++	ROLDQo32(kll, klr, krl, krr, w0, w1, 45);
+ 	/* k9 */
+-	SUBL(12) = kll; SUBR(12) = klr;
++	subL[12] = kll; subR[12] = klr;
+ 	/* k10 */
+-	SUBL(13) = krl; SUBR(13) = krr;
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
++	subL[13] = krl; subR[13] = krr;
++	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ 	/* kl3 */
+-	SUBL(16) = kll; SUBR(16) = klr;
++	subL[16] = kll; subR[16] = klr;
+ 	/* kl4 */
+-	SUBL(17) = krl; SUBR(17) = krr;
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
++	subL[17] = krl; subR[17] = krr;
++	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ 	/* k17 */
+-	SUBL(22) = kll; SUBR(22) = klr;
++	subL[22] = kll; subR[22] = klr;
+ 	/* k18 */
+-	SUBL(23) = krl; SUBR(23) = krr;
+-	CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
++	subL[23] = krl; subR[23] = krr;
++	ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
+ 	/* k23 */
+-	SUBL(30) = kll; SUBR(30) = klr;
++	subL[30] = kll; subR[30] = klr;
+ 	/* k24 */
+-	SUBL(31) = krl; SUBR(31) = krr;
++	subL[31] = krl; subR[31] = krr;
  
- static void xen_set_debugreg(int reg, unsigned long val)
-@@ -275,19 +275,12 @@ static unsigned long xen_store_tr(void)
+ 	/* generate KR dependent subkeys */
+-	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
++	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
+ 	/* k3 */
+-	SUBL(4) = krll; SUBR(4) = krlr;
++	subL[4] = krll; subR[4] = krlr;
+ 	/* k4 */
+-	SUBL(5) = krrl; SUBR(5) = krrr;
+-	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
++	subL[5] = krrl; subR[5] = krrr;
++	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
+ 	/* kl1 */
+-	SUBL(8) = krll; SUBR(8) = krlr;
++	subL[8] = krll; subR[8] = krlr;
+ 	/* kl2 */
+-	SUBL(9) = krrl; SUBR(9) = krrr;
+-	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
++	subL[9] = krrl; subR[9] = krrr;
++	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ 	/* k13 */
+-	SUBL(18) = krll; SUBR(18) = krlr;
++	subL[18] = krll; subR[18] = krlr;
+ 	/* k14 */
+-	SUBL(19) = krrl; SUBR(19) = krrr;
+-	CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
++	subL[19] = krrl; subR[19] = krrr;
++	ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
+ 	/* k19 */
+-	SUBL(26) = krll; SUBR(26) = krlr;
++	subL[26] = krll; subR[26] = krlr;
+ 	/* k20 */
+-	SUBL(27) = krrl; SUBR(27) = krrr;
+-	CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
++	subL[27] = krrl; subR[27] = krrr;
++	ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
  
- static void xen_set_ldt(const void *addr, unsigned entries)
- {
--	unsigned long linear_addr = (unsigned long)addr;
- 	struct mmuext_op *op;
- 	struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+ 	/* generate KA */
+-	kll = SUBL(0) ^ krll; klr = SUBR(0) ^ krlr;
+-	krl = SUBL(1) ^ krrl; krr = SUBR(1) ^ krrr;
++	kll = subL[0] ^ krll; klr = subR[0] ^ krlr;
++	krl = subL[1] ^ krrl; krr = subR[1] ^ krrr;
+ 	CAMELLIA_F(kll, klr,
+ 		   CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
+ 		   w0, w1, il, ir, t0, t1);
+@@ -885,310 +798,50 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
+ 	krll ^= w0; krlr ^= w1;
  
- 	op = mcs.args;
- 	op->cmd = MMUEXT_SET_LDT;
--	if (linear_addr) {
--		/* ldt my be vmalloced, use arbitrary_virt_to_machine */
--		xmaddr_t maddr;
--		maddr = arbitrary_virt_to_machine((unsigned long)addr);
--		linear_addr = (unsigned long)maddr.maddr;
--	}
--	op->arg1.linear_addr = linear_addr;
-+	op->arg1.linear_addr = (unsigned long)addr;
- 	op->arg2.nr_ents = entries;
+ 	/* generate KA dependent subkeys */
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
++	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ 	/* k5 */
+-	SUBL(6) = kll; SUBR(6) = klr;
++	subL[6] = kll; subR[6] = klr;
+ 	/* k6 */
+-	SUBL(7) = krl; SUBR(7) = krr;
+-	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
++	subL[7] = krl; subR[7] = krr;
++	ROLDQ(kll, klr, krl, krr, w0, w1, 30);
+ 	/* k11 */
+-	SUBL(14) = kll; SUBR(14) = klr;
++	subL[14] = kll; subR[14] = klr;
+ 	/* k12 */
+-	SUBL(15) = krl; SUBR(15) = krr;
++	subL[15] = krl; subR[15] = krr;
+ 	/* rotation left shift 32bit */
+ 	/* kl5 */
+-	SUBL(24) = klr; SUBR(24) = krl;
++	subL[24] = klr; subR[24] = krl;
+ 	/* kl6 */
+-	SUBL(25) = krr; SUBR(25) = kll;
++	subL[25] = krr; subR[25] = kll;
+ 	/* rotation left shift 49 from k11,k12 -> k21,k22 */
+-	CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 49);
++	ROLDQo32(kll, klr, krl, krr, w0, w1, 49);
+ 	/* k21 */
+-	SUBL(28) = kll; SUBR(28) = klr;
++	subL[28] = kll; subR[28] = klr;
+ 	/* k22 */
+-	SUBL(29) = krl; SUBR(29) = krr;
++	subL[29] = krl; subR[29] = krr;
  
- 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-@@ -295,7 +288,7 @@ static void xen_set_ldt(const void *addr, unsigned entries)
- 	xen_mc_issue(PARAVIRT_LAZY_CPU);
- }
+ 	/* generate KB dependent subkeys */
+ 	/* k1 */
+-	SUBL(2) = krll; SUBR(2) = krlr;
++	subL[2] = krll; subR[2] = krlr;
+ 	/* k2 */
+-	SUBL(3) = krrl; SUBR(3) = krrr;
+-	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
++	subL[3] = krrl; subR[3] = krrr;
++	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ 	/* k7 */
+-	SUBL(10) = krll; SUBR(10) = krlr;
++	subL[10] = krll; subR[10] = krlr;
+ 	/* k8 */
+-	SUBL(11) = krrl; SUBR(11) = krrr;
+-	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
++	subL[11] = krrl; subR[11] = krrr;
++	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ 	/* k15 */
+-	SUBL(20) = krll; SUBR(20) = krlr;
++	subL[20] = krll; subR[20] = krlr;
+ 	/* k16 */
+-	SUBL(21) = krrl; SUBR(21) = krrr;
+-	CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51);
++	subL[21] = krrl; subR[21] = krrr;
++	ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51);
+ 	/* kw3 */
+-	SUBL(32) = krll; SUBR(32) = krlr;
++	subL[32] = krll; subR[32] = krlr;
+ 	/* kw4 */
+-	SUBL(33) = krrl; SUBR(33) = krrr;
+-
+-	/* absorb kw2 to other subkeys */
+-	/* round 2 */
+-	SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1);
+-	/* round 4 */
+-	SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1);
+-	/* round 6 */
+-	SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1);
+-	SUBL(1) ^= SUBR(1) & ~SUBR(9);
+-	dw = SUBL(1) & SUBL(9),
+-		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */
+-	/* round 8 */
+-	SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1);
+-	/* round 10 */
+-	SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1);
+-	/* round 12 */
+-	SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1);
+-	SUBL(1) ^= SUBR(1) & ~SUBR(17);
+-	dw = SUBL(1) & SUBL(17),
+-		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */
+-	/* round 14 */
+-	SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1);
+-	/* round 16 */
+-	SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1);
+-	/* round 18 */
+-	SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1);
+-	SUBL(1) ^= SUBR(1) & ~SUBR(25);
+-	dw = SUBL(1) & SUBL(25),
+-		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl6) */
+-	/* round 20 */
+-	SUBL(27) ^= SUBL(1); SUBR(27) ^= SUBR(1);
+-	/* round 22 */
+-	SUBL(29) ^= SUBL(1); SUBR(29) ^= SUBR(1);
+-	/* round 24 */
+-	SUBL(31) ^= SUBL(1); SUBR(31) ^= SUBR(1);
+-	/* kw3 */
+-	SUBL(32) ^= SUBL(1); SUBR(32) ^= SUBR(1);
+-
+-
+-	/* absorb kw4 to other subkeys */
+-	kw4l = SUBL(33); kw4r = SUBR(33);
+-	/* round 23 */
+-	SUBL(30) ^= kw4l; SUBR(30) ^= kw4r;
+-	/* round 21 */
+-	SUBL(28) ^= kw4l; SUBR(28) ^= kw4r;
+-	/* round 19 */
+-	SUBL(26) ^= kw4l; SUBR(26) ^= kw4r;
+-	kw4l ^= kw4r & ~SUBR(24);
+-	dw = kw4l & SUBL(24),
+-		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl5) */
+-	/* round 17 */
+-	SUBL(22) ^= kw4l; SUBR(22) ^= kw4r;
+-	/* round 15 */
+-	SUBL(20) ^= kw4l; SUBR(20) ^= kw4r;
+-	/* round 13 */
+-	SUBL(18) ^= kw4l; SUBR(18) ^= kw4r;
+-	kw4l ^= kw4r & ~SUBR(16);
+-	dw = kw4l & SUBL(16),
+-		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */
+-	/* round 11 */
+-	SUBL(14) ^= kw4l; SUBR(14) ^= kw4r;
+-	/* round 9 */
+-	SUBL(12) ^= kw4l; SUBR(12) ^= kw4r;
+-	/* round 7 */
+-	SUBL(10) ^= kw4l; SUBR(10) ^= kw4r;
+-	kw4l ^= kw4r & ~SUBR(8);
+-	dw = kw4l & SUBL(8),
+-		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */
+-	/* round 5 */
+-	SUBL(6) ^= kw4l; SUBR(6) ^= kw4r;
+-	/* round 3 */
+-	SUBL(4) ^= kw4l; SUBR(4) ^= kw4r;
+-	/* round 1 */
+-	SUBL(2) ^= kw4l; SUBR(2) ^= kw4r;
+-	/* kw1 */
+-	SUBL(0) ^= kw4l; SUBR(0) ^= kw4r;
++	subL[33] = krrl; subR[33] = krrr;
  
--static void xen_load_gdt(const struct Xgt_desc_struct *dtr)
-+static void xen_load_gdt(const struct desc_ptr *dtr)
- {
- 	unsigned long *frames;
- 	unsigned long va = dtr->address;
-@@ -357,11 +350,11 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
+-	/* key XOR is end of F-function */
+-	CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */
+-	CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2);
+-	CAMELLIA_SUBKEY_L(2) = SUBL(3);       /* round 1 */
+-	CAMELLIA_SUBKEY_R(2) = SUBR(3);
+-	CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */
+-	CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4);
+-	CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */
+-	CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5);
+-	CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */
+-	CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6);
+-	CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */
+-	CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7);
+-	tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8));
+-	dw = tl & SUBL(8),  /* FL(kl1) */
+-		tr = SUBR(10) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */
+-	CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr;
+-	CAMELLIA_SUBKEY_L(8) = SUBL(8);       /* FL(kl1) */
+-	CAMELLIA_SUBKEY_R(8) = SUBR(8);
+-	CAMELLIA_SUBKEY_L(9) = SUBL(9);       /* FLinv(kl2) */
+-	CAMELLIA_SUBKEY_R(9) = SUBR(9);
+-	tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9));
+-	dw = tl & SUBL(9),  /* FLinv(kl2) */
+-		tr = SUBR(7) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */
+-	CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11);
+-	CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */
+-	CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12);
+-	CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */
+-	CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13);
+-	CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */
+-	CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14);
+-	CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */
+-	CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15);
+-	tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16));
+-	dw = tl & SUBL(16), /* FL(kl3) */
+-		tr = SUBR(18) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */
+-	CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr;
+-	CAMELLIA_SUBKEY_L(16) = SUBL(16);     /* FL(kl3) */
+-	CAMELLIA_SUBKEY_R(16) = SUBR(16);
+-	CAMELLIA_SUBKEY_L(17) = SUBL(17);     /* FLinv(kl4) */
+-	CAMELLIA_SUBKEY_R(17) = SUBR(17);
+-	tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17));
+-	dw = tl & SUBL(17), /* FLinv(kl4) */
+-		tr = SUBR(15) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */
+-	CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19);
+-	CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */
+-	CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20);
+-	CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */
+-	CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21);
+-	CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */
+-	CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22);
+-	CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */
+-	CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23);
+-	tl = SUBL(26) ^ (SUBR(26)
+-			 & ~SUBR(24));
+-	dw = tl & SUBL(24), /* FL(kl5) */
+-		tr = SUBR(26) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(23) = SUBL(22) ^ tl; /* round 18 */
+-	CAMELLIA_SUBKEY_R(23) = SUBR(22) ^ tr;
+-	CAMELLIA_SUBKEY_L(24) = SUBL(24);     /* FL(kl5) */
+-	CAMELLIA_SUBKEY_R(24) = SUBR(24);
+-	CAMELLIA_SUBKEY_L(25) = SUBL(25);     /* FLinv(kl6) */
+-	CAMELLIA_SUBKEY_R(25) = SUBR(25);
+-	tl = SUBL(23) ^ (SUBR(23) &
+-			 ~SUBR(25));
+-	dw = tl & SUBL(25), /* FLinv(kl6) */
+-		tr = SUBR(23) ^ CAMELLIA_RL1(dw);
+-	CAMELLIA_SUBKEY_L(26) = tl ^ SUBL(27); /* round 19 */
+-	CAMELLIA_SUBKEY_R(26) = tr ^ SUBR(27);
+-	CAMELLIA_SUBKEY_L(27) = SUBL(26) ^ SUBL(28); /* round 20 */
+-	CAMELLIA_SUBKEY_R(27) = SUBR(26) ^ SUBR(28);
+-	CAMELLIA_SUBKEY_L(28) = SUBL(27) ^ SUBL(29); /* round 21 */
+-	CAMELLIA_SUBKEY_R(28) = SUBR(27) ^ SUBR(29);
+-	CAMELLIA_SUBKEY_L(29) = SUBL(28) ^ SUBL(30); /* round 22 */
+-	CAMELLIA_SUBKEY_R(29) = SUBR(28) ^ SUBR(30);
+-	CAMELLIA_SUBKEY_L(30) = SUBL(29) ^ SUBL(31); /* round 23 */
+-	CAMELLIA_SUBKEY_R(30) = SUBR(29) ^ SUBR(31);
+-	CAMELLIA_SUBKEY_L(31) = SUBL(30);     /* round 24 */
+-	CAMELLIA_SUBKEY_R(31) = SUBR(30);
+-	CAMELLIA_SUBKEY_L(32) = SUBL(32) ^ SUBL(31); /* kw3 */
+-	CAMELLIA_SUBKEY_R(32) = SUBR(32) ^ SUBR(31);
+-
+-	/* apply the inverse of the last half of P-function */
+-	dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2),
+-		dw = CAMELLIA_RL8(dw);/* round 1 */
+-	CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw,
+-		CAMELLIA_SUBKEY_L(2) = dw;
+-	dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3),
+-		dw = CAMELLIA_RL8(dw);/* round 2 */
+-	CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw,
+-		CAMELLIA_SUBKEY_L(3) = dw;
+-	dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4),
+-		dw = CAMELLIA_RL8(dw);/* round 3 */
+-	CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw,
+-		CAMELLIA_SUBKEY_L(4) = dw;
+-	dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5),
+-		dw = CAMELLIA_RL8(dw);/* round 4 */
+-	CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw,
+-	CAMELLIA_SUBKEY_L(5) = dw;
+-	dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6),
+-		dw = CAMELLIA_RL8(dw);/* round 5 */
+-	CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw,
+-		CAMELLIA_SUBKEY_L(6) = dw;
+-	dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7),
+-		dw = CAMELLIA_RL8(dw);/* round 6 */
+-	CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw,
+-		CAMELLIA_SUBKEY_L(7) = dw;
+-	dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10),
+-		dw = CAMELLIA_RL8(dw);/* round 7 */
+-	CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw,
+-		CAMELLIA_SUBKEY_L(10) = dw;
+-	dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11),
+-	    dw = CAMELLIA_RL8(dw);/* round 8 */
+-	CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw,
+-		CAMELLIA_SUBKEY_L(11) = dw;
+-	dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12),
+-		dw = CAMELLIA_RL8(dw);/* round 9 */
+-	CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw,
+-		CAMELLIA_SUBKEY_L(12) = dw;
+-	dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13),
+-		dw = CAMELLIA_RL8(dw);/* round 10 */
+-	CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw,
+-		CAMELLIA_SUBKEY_L(13) = dw;
+-	dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14),
+-		dw = CAMELLIA_RL8(dw);/* round 11 */
+-	CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw,
+-		CAMELLIA_SUBKEY_L(14) = dw;
+-	dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15),
+-		dw = CAMELLIA_RL8(dw);/* round 12 */
+-	CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw,
+-		CAMELLIA_SUBKEY_L(15) = dw;
+-	dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18),
+-		dw = CAMELLIA_RL8(dw);/* round 13 */
+-	CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw,
+-		CAMELLIA_SUBKEY_L(18) = dw;
+-	dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19),
+-		dw = CAMELLIA_RL8(dw);/* round 14 */
+-	CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw,
+-		CAMELLIA_SUBKEY_L(19) = dw;
+-	dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20),
+-		dw = CAMELLIA_RL8(dw);/* round 15 */
+-	CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw,
+-		CAMELLIA_SUBKEY_L(20) = dw;
+-	dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21),
+-		dw = CAMELLIA_RL8(dw);/* round 16 */
+-	CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw,
+-		CAMELLIA_SUBKEY_L(21) = dw;
+-	dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22),
+-		dw = CAMELLIA_RL8(dw);/* round 17 */
+-	CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw,
+-		CAMELLIA_SUBKEY_L(22) = dw;
+-	dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23),
+-		dw = CAMELLIA_RL8(dw);/* round 18 */
+-	CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw,
+-		CAMELLIA_SUBKEY_L(23) = dw;
+-	dw = CAMELLIA_SUBKEY_L(26) ^ CAMELLIA_SUBKEY_R(26),
+-		dw = CAMELLIA_RL8(dw);/* round 19 */
+-	CAMELLIA_SUBKEY_R(26) = CAMELLIA_SUBKEY_L(26) ^ dw,
+-		CAMELLIA_SUBKEY_L(26) = dw;
+-	dw = CAMELLIA_SUBKEY_L(27) ^ CAMELLIA_SUBKEY_R(27),
+-		dw = CAMELLIA_RL8(dw);/* round 20 */
+-	CAMELLIA_SUBKEY_R(27) = CAMELLIA_SUBKEY_L(27) ^ dw,
+-		CAMELLIA_SUBKEY_L(27) = dw;
+-	dw = CAMELLIA_SUBKEY_L(28) ^ CAMELLIA_SUBKEY_R(28),
+-		dw = CAMELLIA_RL8(dw);/* round 21 */
+-	CAMELLIA_SUBKEY_R(28) = CAMELLIA_SUBKEY_L(28) ^ dw,
+-		CAMELLIA_SUBKEY_L(28) = dw;
+-	dw = CAMELLIA_SUBKEY_L(29) ^ CAMELLIA_SUBKEY_R(29),
+-		dw = CAMELLIA_RL8(dw);/* round 22 */
+-	CAMELLIA_SUBKEY_R(29) = CAMELLIA_SUBKEY_L(29) ^ dw,
+-		CAMELLIA_SUBKEY_L(29) = dw;
+-	dw = CAMELLIA_SUBKEY_L(30) ^ CAMELLIA_SUBKEY_R(30),
+-		dw = CAMELLIA_RL8(dw);/* round 23 */
+-	CAMELLIA_SUBKEY_R(30) = CAMELLIA_SUBKEY_L(30) ^ dw,
+-		CAMELLIA_SUBKEY_L(30) = dw;
+-	dw = CAMELLIA_SUBKEY_L(31) ^ CAMELLIA_SUBKEY_R(31),
+-		dw = CAMELLIA_RL8(dw);/* round 24 */
+-	CAMELLIA_SUBKEY_R(31) = CAMELLIA_SUBKEY_L(31) ^ dw,
+-		CAMELLIA_SUBKEY_L(31) = dw;
+-
+-	return;
++	camellia_setup_tail(subkey, subL, subR, 32);
  }
  
- static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
--				u32 low, u32 high)
-+				const void *ptr)
- {
- 	unsigned long lp = (unsigned long)&dt[entrynum];
- 	xmaddr_t mach_lp = virt_to_machine(lp);
--	u64 entry = (u64)high << 32 | low;
-+	u64 entry = *(u64 *)ptr;
- 
- 	preempt_disable();
+ static void camellia_setup192(const unsigned char *key, u32 *subkey)
+@@ -1197,482 +850,168 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
+ 	u32 krll, krlr, krrl,krrr;
  
-@@ -395,12 +388,11 @@ static int cvt_gate_to_trap(int vector, u32 low, u32 high,
+ 	memcpy(kk, key, 24);
+-	memcpy((unsigned char *)&krll, key+16,4);
+-	memcpy((unsigned char *)&krlr, key+20,4);
++	memcpy((unsigned char *)&krll, key+16, 4);
++	memcpy((unsigned char *)&krlr, key+20, 4);
+ 	krrl = ~krll;
+ 	krrr = ~krlr;
+ 	memcpy(kk+24, (unsigned char *)&krrl, 4);
+ 	memcpy(kk+28, (unsigned char *)&krrr, 4);
+ 	camellia_setup256(kk, subkey);
+-	return;
  }
  
- /* Locations of each CPU's IDT */
--static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc);
-+static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
- 
- /* Set an IDT entry.  If the entry is part of the current IDT, then
-    also update Xen. */
--static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
--				u32 low, u32 high)
-+static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
- {
- 	unsigned long p = (unsigned long)&dt[entrynum];
- 	unsigned long start, end;
-@@ -412,14 +404,15 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
- 
- 	xen_mc_flush();
- 
--	write_dt_entry(dt, entrynum, low, high);
-+	native_write_idt_entry(dt, entrynum, g);
- 
- 	if (p >= start && (p + 8) <= end) {
- 		struct trap_info info[2];
-+		u32 *desc = (u32 *)g;
- 
- 		info[1].address = 0;
- 
--		if (cvt_gate_to_trap(entrynum, low, high, &info[0]))
-+		if (cvt_gate_to_trap(entrynum, desc[0], desc[1], &info[0]))
- 			if (HYPERVISOR_set_trap_table(info))
- 				BUG();
- 	}
-@@ -427,7 +420,7 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
- 	preempt_enable();
- }
  
--static void xen_convert_trap_info(const struct Xgt_desc_struct *desc,
-+static void xen_convert_trap_info(const struct desc_ptr *desc,
- 				  struct trap_info *traps)
- {
- 	unsigned in, out, count;
-@@ -446,7 +439,7 @@ static void xen_convert_trap_info(const struct Xgt_desc_struct *desc,
+-/**
+- * Stuff related to camellia encryption/decryption
++/*
++ * Encrypt/decrypt
+  */
+-static void camellia_encrypt128(const u32 *subkey, __be32 *io_text)
+-{
+-	u32 il,ir,t0,t1;               /* temporary valiables */
+-
+-	u32 io[4];
+-
+-	io[0] = be32_to_cpu(io_text[0]);
+-	io[1] = be32_to_cpu(io_text[1]);
+-	io[2] = be32_to_cpu(io_text[2]);
+-	io[3] = be32_to_cpu(io_text[3]);
+-
+-	/* pre whitening but absorb kw2*/
+-	io[0] ^= CAMELLIA_SUBKEY_L(0);
+-	io[1] ^= CAMELLIA_SUBKEY_R(0);
+-	/* main iteration */
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
+-		     CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
+-		     CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
+-			 io[0],io[1],il,ir,t0,t1);
++#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \
++    do {								\
++	t0 = kll;							\
++	t2 = krr;							\
++	t0 &= ll;							\
++	t2 |= rr;							\
++	rl ^= t2;							\
++	lr ^= ROL1(t0);							\
++	t3 = krl;							\
++	t1 = klr;							\
++	t3 &= rl;							\
++	t1 |= lr;							\
++	ll ^= t1;							\
++	rr ^= ROL1(t3);							\
++    } while(0)
  
- void xen_copy_trap_info(struct trap_info *traps)
- {
--	const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc);
-+	const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
+-	/* post whitening but kw4 */
+-	io[2] ^= CAMELLIA_SUBKEY_L(24);
+-	io[3] ^= CAMELLIA_SUBKEY_R(24);
+-
+-	t0 = io[0];
+-	t1 = io[1];
+-	io[0] = io[2];
+-	io[1] = io[3];
+-	io[2] = t0;
+-	io[3] = t1;
+-
+-	io_text[0] = cpu_to_be32(io[0]);
+-	io_text[1] = cpu_to_be32(io[1]);
+-	io_text[2] = cpu_to_be32(io[2]);
+-	io_text[3] = cpu_to_be32(io[3]);
+-
+-	return;
+-}
++#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir)		\
++    do {								\
++	ir =  camellia_sp1110[(u8)xr];					\
++	il =  camellia_sp1110[    (xl >> 24)];				\
++	ir ^= camellia_sp0222[    (xr >> 24)];				\
++	il ^= camellia_sp0222[(u8)(xl >> 16)];				\
++	ir ^= camellia_sp3033[(u8)(xr >> 16)];				\
++	il ^= camellia_sp3033[(u8)(xl >> 8)];				\
++	ir ^= camellia_sp4404[(u8)(xr >> 8)];				\
++	il ^= camellia_sp4404[(u8)xl];					\
++	il ^= kl;							\
++	ir ^= il ^ kr;							\
++	yl ^= ir;							\
++	yr ^= ROR8(il) ^ ir;						\
++    } while(0)
  
- 	xen_convert_trap_info(desc, traps);
- }
-@@ -454,7 +447,7 @@ void xen_copy_trap_info(struct trap_info *traps)
- /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
-    hold a spinlock to protect the static traps[] array (static because
-    it avoids allocation, and saves stack space). */
--static void xen_load_idt(const struct Xgt_desc_struct *desc)
-+static void xen_load_idt(const struct desc_ptr *desc)
- {
- 	static DEFINE_SPINLOCK(lock);
- 	static struct trap_info traps[257];
-@@ -475,22 +468,21 @@ static void xen_load_idt(const struct Xgt_desc_struct *desc)
- /* Write a GDT descriptor entry.  Ignore LDT descriptors, since
-    they're handled differently. */
- static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
--				u32 low, u32 high)
-+				const void *desc, int type)
+-static void camellia_decrypt128(const u32 *subkey, __be32 *io_text)
++/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
++static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
  {
- 	preempt_disable();
- 
--	switch ((high >> 8) & 0xff) {
--	case DESCTYPE_LDT:
--	case DESCTYPE_TSS:
-+	switch (type) {
-+	case DESC_LDT:
-+	case DESC_TSS:
- 		/* ignore */
- 		break;
- 
- 	default: {
- 		xmaddr_t maddr = virt_to_machine(&dt[entry]);
--		u64 desc = (u64)high << 32 | low;
+-	u32 il,ir,t0,t1;               /* temporary valiables */
++	u32 il,ir,t0,t1;               /* temporary variables */
  
- 		xen_mc_flush();
--		if (HYPERVISOR_update_descriptor(maddr.maddr, desc))
-+		if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
- 			BUG();
- 	}
+-	u32 io[4];
+-
+-	io[0] = be32_to_cpu(io_text[0]);
+-	io[1] = be32_to_cpu(io_text[1]);
+-	io[2] = be32_to_cpu(io_text[2]);
+-	io[3] = be32_to_cpu(io_text[3]);
+-
+-	/* pre whitening but absorb kw2*/
+-	io[0] ^= CAMELLIA_SUBKEY_L(24);
+-	io[1] ^= CAMELLIA_SUBKEY_R(24);
++	/* pre whitening but absorb kw2 */
++	io[0] ^= SUBKEY_L(0);
++	io[1] ^= SUBKEY_R(0);
  
-@@ -499,11 +491,11 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
- 	preempt_enable();
- }
+ 	/* main iteration */
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
+-		     CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
+-		     CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	/* post whitening but kw4 */
+-	io[2] ^= CAMELLIA_SUBKEY_L(0);
+-	io[3] ^= CAMELLIA_SUBKEY_R(0);
+-
+-	t0 = io[0];
+-	t1 = io[1];
+-	io[0] = io[2];
+-	io[1] = io[3];
+-	io[2] = t0;
+-	io[3] = t1;
+-
+-	io_text[0] = cpu_to_be32(io[0]);
+-	io_text[1] = cpu_to_be32(io[1]);
+-	io_text[2] = cpu_to_be32(io[2]);
+-	io_text[3] = cpu_to_be32(io[3]);
+-
+-	return;
+-}
+-
+-
+-/**
+- * stuff for 192 and 256bit encryption/decryption
+- */
+-static void camellia_encrypt256(const u32 *subkey, __be32 *io_text)
+-{
+-	u32 il,ir,t0,t1;           /* temporary valiables */
+-
+-	u32 io[4];
+-
+-	io[0] = be32_to_cpu(io_text[0]);
+-	io[1] = be32_to_cpu(io_text[1]);
+-	io[2] = be32_to_cpu(io_text[2]);
+-	io[3] = be32_to_cpu(io_text[3]);
++#define ROUNDS(i) do { \
++	CAMELLIA_ROUNDSM(io[0],io[1], \
++			 SUBKEY_L(i + 2),SUBKEY_R(i + 2), \
++			 io[2],io[3],il,ir); \
++	CAMELLIA_ROUNDSM(io[2],io[3], \
++			 SUBKEY_L(i + 3),SUBKEY_R(i + 3), \
++			 io[0],io[1],il,ir); \
++	CAMELLIA_ROUNDSM(io[0],io[1], \
++			 SUBKEY_L(i + 4),SUBKEY_R(i + 4), \
++			 io[2],io[3],il,ir); \
++	CAMELLIA_ROUNDSM(io[2],io[3], \
++			 SUBKEY_L(i + 5),SUBKEY_R(i + 5), \
++			 io[0],io[1],il,ir); \
++	CAMELLIA_ROUNDSM(io[0],io[1], \
++			 SUBKEY_L(i + 6),SUBKEY_R(i + 6), \
++			 io[2],io[3],il,ir); \
++	CAMELLIA_ROUNDSM(io[2],io[3], \
++			 SUBKEY_L(i + 7),SUBKEY_R(i + 7), \
++			 io[0],io[1],il,ir); \
++} while (0)
++#define FLS(i) do { \
++	CAMELLIA_FLS(io[0],io[1],io[2],io[3], \
++		     SUBKEY_L(i + 0),SUBKEY_R(i + 0), \
++		     SUBKEY_L(i + 1),SUBKEY_R(i + 1), \
++		     t0,t1,il,ir); \
++} while (0)
++
++	ROUNDS(0);
++	FLS(8);
++	ROUNDS(8);
++	FLS(16);
++	ROUNDS(16);
++	if (max == 32) {
++		FLS(24);
++		ROUNDS(24);
++	}
  
--static void xen_load_esp0(struct tss_struct *tss,
-+static void xen_load_sp0(struct tss_struct *tss,
- 			  struct thread_struct *thread)
- {
- 	struct multicall_space mcs = xen_mc_entry(0);
--	MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0);
-+	MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
- 	xen_mc_issue(PARAVIRT_LAZY_CPU);
- }
+-	/* pre whitening but absorb kw2*/
+-	io[0] ^= CAMELLIA_SUBKEY_L(0);
+-	io[1] ^= CAMELLIA_SUBKEY_R(0);
+-
+-	/* main iteration */
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
+-		     CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
+-		     CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24),
+-		     CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31),
+-			 io[0],io[1],il,ir,t0,t1);
++#undef ROUNDS
++#undef FLS
  
-@@ -521,12 +513,12 @@ static void xen_io_delay(void)
+ 	/* post whitening but kw4 */
+-	io[2] ^= CAMELLIA_SUBKEY_L(32);
+-	io[3] ^= CAMELLIA_SUBKEY_R(32);
+-
+-	t0 = io[0];
+-	t1 = io[1];
+-	io[0] = io[2];
+-	io[1] = io[3];
+-	io[2] = t0;
+-	io[3] = t1;
+-
+-	io_text[0] = cpu_to_be32(io[0]);
+-	io_text[1] = cpu_to_be32(io[1]);
+-	io_text[2] = cpu_to_be32(io[2]);
+-	io_text[3] = cpu_to_be32(io[3]);
+-
+-	return;
++	io[2] ^= SUBKEY_L(max);
++	io[3] ^= SUBKEY_R(max);
++	/* NB: io[0],[1] should be swapped with [2],[3] by caller! */
  }
  
- #ifdef CONFIG_X86_LOCAL_APIC
--static unsigned long xen_apic_read(unsigned long reg)
-+static u32 xen_apic_read(unsigned long reg)
+-
+-static void camellia_decrypt256(const u32 *subkey, __be32 *io_text)
++static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
  {
- 	return 0;
- }
+-	u32 il,ir,t0,t1;           /* temporary valiables */
++	u32 il,ir,t0,t1;               /* temporary variables */
  
--static void xen_apic_write(unsigned long reg, unsigned long val)
-+static void xen_apic_write(unsigned long reg, u32 val)
- {
- 	/* Warn to see if there's any stray references */
- 	WARN_ON(1);
-@@ -666,6 +658,13 @@ static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
- 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
- }
+-	u32 io[4];
+-
+-	io[0] = be32_to_cpu(io_text[0]);
+-	io[1] = be32_to_cpu(io_text[1]);
+-	io[2] = be32_to_cpu(io_text[2]);
+-	io[3] = be32_to_cpu(io_text[3]);
+-
+-	/* pre whitening but absorb kw2*/
+-	io[0] ^= CAMELLIA_SUBKEY_L(32);
+-	io[1] ^= CAMELLIA_SUBKEY_R(32);
++	/* pre whitening but absorb kw2 */
++	io[0] ^= SUBKEY_L(i);
++	io[1] ^= SUBKEY_R(i);
  
-+/* Early release_pt assumes that all pts are pinned, since there's
-+   only init_mm and anything attached to that is pinned. */
-+static void xen_release_pt_init(u32 pfn)
-+{
-+	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
-+}
+ 	/* main iteration */
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25),
+-		     CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
+-		     CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
+-			 io[0],io[1],il,ir,t0,t1);
+-
+-	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+-		     CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
+-		     CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
+-		     t0,t1,il,ir);
+-
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
+-			 io[0],io[1],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[0],io[1],
+-			 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
+-			 io[2],io[3],il,ir,t0,t1);
+-	CAMELLIA_ROUNDSM(io[2],io[3],
+-			 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
+-			 io[0],io[1],il,ir,t0,t1);
++#define ROUNDS(i) do { \
++	CAMELLIA_ROUNDSM(io[0],io[1], \
++			 SUBKEY_L(i + 7),SUBKEY_R(i + 7), \
++			 io[2],io[3],il,ir); \
++	CAMELLIA_ROUNDSM(io[2],io[3], \
++			 SUBKEY_L(i + 6),SUBKEY_R(i + 6), \
++			 io[0],io[1],il,ir); \
++	CAMELLIA_ROUNDSM(io[0],io[1], \
++			 SUBKEY_L(i + 5),SUBKEY_R(i + 5), \
++			 io[2],io[3],il,ir); \
++	CAMELLIA_ROUNDSM(io[2],io[3], \
++			 SUBKEY_L(i + 4),SUBKEY_R(i + 4), \
++			 io[0],io[1],il,ir); \
++	CAMELLIA_ROUNDSM(io[0],io[1], \
++			 SUBKEY_L(i + 3),SUBKEY_R(i + 3), \
++			 io[2],io[3],il,ir); \
++	CAMELLIA_ROUNDSM(io[2],io[3], \
++			 SUBKEY_L(i + 2),SUBKEY_R(i + 2), \
++			 io[0],io[1],il,ir); \
++} while (0)
++#define FLS(i) do { \
++	CAMELLIA_FLS(io[0],io[1],io[2],io[3], \
++		     SUBKEY_L(i + 1),SUBKEY_R(i + 1), \
++		     SUBKEY_L(i + 0),SUBKEY_R(i + 0), \
++		     t0,t1,il,ir); \
++} while (0)
 +
- static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
- {
- 	struct mmuext_op op;
-@@ -677,7 +676,7 @@ static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
- 
- /* This needs to make sure the new pte page is pinned iff its being
-    attached to a pinned pagetable. */
--static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
-+static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
- {
- 	struct page *page = pfn_to_page(pfn);
- 
-@@ -686,7 +685,7 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
++	if (i == 32) {
++		ROUNDS(24);
++		FLS(24);
++	}
++	ROUNDS(16);
++	FLS(16);
++	ROUNDS(8);
++	FLS(8);
++	ROUNDS(0);
++
++#undef ROUNDS
++#undef FLS
  
- 		if (!PageHighMem(page)) {
- 			make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
--			pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
-+			pin_pagetable_pfn(level, pfn);
- 		} else
- 			/* make sure there are no stray mappings of
- 			   this page */
-@@ -694,6 +693,16 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
- 	}
+ 	/* post whitening but kw4 */
+-	io[2] ^= CAMELLIA_SUBKEY_L(0);
+-	io[3] ^= CAMELLIA_SUBKEY_R(0);
+-
+-	t0 = io[0];
+-	t1 = io[1];
+-	io[0] = io[2];
+-	io[1] = io[3];
+-	io[2] = t0;
+-	io[3] = t1;
+-
+-	io_text[0] = cpu_to_be32(io[0]);
+-	io_text[1] = cpu_to_be32(io[1]);
+-	io_text[2] = cpu_to_be32(io[2]);
+-	io_text[3] = cpu_to_be32(io[3]);
+-
+-	return;
++	io[2] ^= SUBKEY_L(0);
++	io[3] ^= SUBKEY_R(0);
++	/* NB: 0,1 should be swapped with 2,3 by caller! */
  }
  
-+static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
-+{
-+	xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L1_TABLE);
-+}
-+
-+static void xen_alloc_pd(struct mm_struct *mm, u32 pfn)
-+{
-+	xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L2_TABLE);
-+}
-+
- /* This should never happen until we're OK to use struct page */
- static void xen_release_pt(u32 pfn)
- {
-@@ -796,6 +805,9 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
- 	/* This will work as long as patching hasn't happened yet
- 	   (which it hasn't) */
- 	pv_mmu_ops.alloc_pt = xen_alloc_pt;
-+	pv_mmu_ops.alloc_pd = xen_alloc_pd;
-+	pv_mmu_ops.release_pt = xen_release_pt;
-+	pv_mmu_ops.release_pd = xen_release_pt;
- 	pv_mmu_ops.set_pte = xen_set_pte;
- 
- 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-@@ -953,7 +965,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
- 	.read_pmc = native_read_pmc,
  
- 	.iret = (void *)&hypercall_page[__HYPERVISOR_iret],
--	.irq_enable_sysexit = NULL,  /* never called */
-+	.irq_enable_syscall_ret = NULL,  /* never called */
++struct camellia_ctx {
++	int key_length;
++	u32 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u32)];
++};
++
+ static int
+ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ 		 unsigned int key_len)
+@@ -1688,7 +1027,7 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  
- 	.load_tr_desc = paravirt_nop,
- 	.set_ldt = xen_set_ldt,
-@@ -968,7 +980,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
- 	.write_ldt_entry = xen_write_ldt_entry,
- 	.write_gdt_entry = xen_write_gdt_entry,
- 	.write_idt_entry = xen_write_idt_entry,
--	.load_esp0 = xen_load_esp0,
-+	.load_sp0 = xen_load_sp0,
+ 	cctx->key_length = key_len;
  
- 	.set_iopl_mask = xen_set_iopl_mask,
- 	.io_delay = xen_io_delay,
-@@ -1019,10 +1031,10 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
- 	.pte_update_defer = paravirt_nop,
+-	switch(key_len) {
++	switch (key_len) {
+ 	case 16:
+ 		camellia_setup128(key, cctx->key_table);
+ 		break;
+@@ -1698,68 +1037,59 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ 	case 32:
+ 		camellia_setup256(key, cctx->key_table);
+ 		break;
+-	default:
+-		break;
+ 	}
  
- 	.alloc_pt = xen_alloc_pt_init,
--	.release_pt = xen_release_pt,
--	.alloc_pd = paravirt_nop,
-+	.release_pt = xen_release_pt_init,
-+	.alloc_pd = xen_alloc_pt_init,
- 	.alloc_pd_clone = paravirt_nop,
--	.release_pd = paravirt_nop,
-+	.release_pd = xen_release_pt_init,
+ 	return 0;
+ }
  
- #ifdef CONFIG_HIGHPTE
- 	.kmap_atomic_pte = xen_kmap_atomic_pte,
-diff --git a/arch/x86/xen/events.c b/arch/x86/xen/events.c
-index 6d1da58..dcf613e 100644
---- a/arch/x86/xen/events.c
-+++ b/arch/x86/xen/events.c
-@@ -465,7 +465,7 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
-  * a bitset of words which contain pending event bits.  The second
-  * level is a bitset of pending events themselves.
-  */
--fastcall void xen_evtchn_do_upcall(struct pt_regs *regs)
-+void xen_evtchn_do_upcall(struct pt_regs *regs)
+-
+ static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  {
- 	int cpu = get_cpu();
- 	struct shared_info *s = HYPERVISOR_shared_info;
-@@ -487,7 +487,7 @@ fastcall void xen_evtchn_do_upcall(struct pt_regs *regs)
- 			int irq = evtchn_to_irq[port];
+ 	const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
+ 	const __be32 *src = (const __be32 *)in;
+ 	__be32 *dst = (__be32 *)out;
  
- 			if (irq != -1) {
--				regs->orig_eax = ~irq;
-+				regs->orig_ax = ~irq;
- 				do_IRQ(regs);
- 			}
- 		}
-diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index 0ac6c5d..45aa771 100644
---- a/arch/x86/xen/mmu.c
-+++ b/arch/x86/xen/mmu.c
-@@ -58,7 +58,8 @@
+-	__be32 tmp[4];
++	u32 tmp[4];
  
- xmaddr_t arbitrary_virt_to_machine(unsigned long address)
- {
--	pte_t *pte = lookup_address(address);
-+	int level;
-+	pte_t *pte = lookup_address(address, &level);
- 	unsigned offset = address & PAGE_MASK;
+-	memcpy(tmp, src, CAMELLIA_BLOCK_SIZE);
++	tmp[0] = be32_to_cpu(src[0]);
++	tmp[1] = be32_to_cpu(src[1]);
++	tmp[2] = be32_to_cpu(src[2]);
++	tmp[3] = be32_to_cpu(src[3]);
  
- 	BUG_ON(pte == NULL);
-@@ -70,8 +71,9 @@ void make_lowmem_page_readonly(void *vaddr)
- {
- 	pte_t *pte, ptev;
- 	unsigned long address = (unsigned long)vaddr;
-+	int level;
+-	switch (cctx->key_length) {
+-	case 16:
+-		camellia_encrypt128(cctx->key_table, tmp);
+-		break;
+-	case 24:
+-		/* fall through */
+-	case 32:
+-		camellia_encrypt256(cctx->key_table, tmp);
+-		break;
+-	default:
+-		break;
+-	}
++	camellia_do_encrypt(cctx->key_table, tmp,
++		cctx->key_length == 16 ? 24 : 32 /* for key lengths of 24 and 32 */
++	);
  
--	pte = lookup_address(address);
-+	pte = lookup_address(address, &level);
- 	BUG_ON(pte == NULL);
+-	memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE);
++	/* do_encrypt returns 0,1 swapped with 2,3 */
++	dst[0] = cpu_to_be32(tmp[2]);
++	dst[1] = cpu_to_be32(tmp[3]);
++	dst[2] = cpu_to_be32(tmp[0]);
++	dst[3] = cpu_to_be32(tmp[1]);
+ }
  
- 	ptev = pte_wrprotect(*pte);
-@@ -84,8 +86,9 @@ void make_lowmem_page_readwrite(void *vaddr)
+-
+ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  {
- 	pte_t *pte, ptev;
- 	unsigned long address = (unsigned long)vaddr;
-+	int level;
+ 	const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
+ 	const __be32 *src = (const __be32 *)in;
+ 	__be32 *dst = (__be32 *)out;
  
--	pte = lookup_address(address);
-+	pte = lookup_address(address, &level);
- 	BUG_ON(pte == NULL);
+-	__be32 tmp[4];
++	u32 tmp[4];
  
- 	ptev = pte_mkwrite(*pte);
-@@ -241,12 +244,12 @@ unsigned long long xen_pgd_val(pgd_t pgd)
+-	memcpy(tmp, src, CAMELLIA_BLOCK_SIZE);
++	tmp[0] = be32_to_cpu(src[0]);
++	tmp[1] = be32_to_cpu(src[1]);
++	tmp[2] = be32_to_cpu(src[2]);
++	tmp[3] = be32_to_cpu(src[3]);
  
- pte_t xen_make_pte(unsigned long long pte)
- {
--	if (pte & 1)
-+	if (pte & _PAGE_PRESENT) {
- 		pte = phys_to_machine(XPADDR(pte)).maddr;
-+		pte &= ~(_PAGE_PCD | _PAGE_PWT);
-+	}
+-	switch (cctx->key_length) {
+-	case 16:
+-		camellia_decrypt128(cctx->key_table, tmp);
+-		break;
+-	case 24:
+-		/* fall through */
+-	case 32:
+-		camellia_decrypt256(cctx->key_table, tmp);
+-		break;
+-	default:
+-		break;
+-	}
++	camellia_do_decrypt(cctx->key_table, tmp,
++		cctx->key_length == 16 ? 24 : 32 /* for key lengths of 24 and 32 */
++	);
  
--	pte &= ~_PAGE_PCD;
--
--	return (pte_t){ pte, pte >> 32 };
-+	return (pte_t){ .pte = pte };
+-	memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE);
++	/* do_decrypt returns 0,1 swapped with 2,3 */
++	dst[0] = cpu_to_be32(tmp[2]);
++	dst[1] = cpu_to_be32(tmp[3]);
++	dst[2] = cpu_to_be32(tmp[0]);
++	dst[3] = cpu_to_be32(tmp[1]);
  }
  
- pmd_t xen_make_pmd(unsigned long long pmd)
-@@ -290,10 +293,10 @@ unsigned long xen_pgd_val(pgd_t pgd)
- 
- pte_t xen_make_pte(unsigned long pte)
- {
--	if (pte & _PAGE_PRESENT)
-+	if (pte & _PAGE_PRESENT) {
- 		pte = phys_to_machine(XPADDR(pte)).maddr;
 -
--	pte &= ~_PAGE_PCD;
-+		pte &= ~(_PAGE_PCD | _PAGE_PWT);
-+	}
- 
- 	return (pte_t){ pte };
+ static struct crypto_alg camellia_alg = {
+ 	.cra_name		=	"camellia",
+ 	.cra_driver_name	=	"camellia-generic",
+@@ -1786,16 +1116,13 @@ static int __init camellia_init(void)
+ 	return crypto_register_alg(&camellia_alg);
  }
-diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
-index f84e772..3bad477 100644
---- a/arch/x86/xen/setup.c
-+++ b/arch/x86/xen/setup.c
-@@ -10,6 +10,7 @@
- #include <linux/pm.h>
  
- #include <asm/elf.h>
-+#include <asm/vdso.h>
- #include <asm/e820.h>
- #include <asm/setup.h>
- #include <asm/xen/hypervisor.h>
-@@ -59,12 +60,10 @@ static void xen_idle(void)
- /*
-  * Set the bit indicating "nosegneg" library variants should be used.
-  */
--static void fiddle_vdso(void)
-+static void __init fiddle_vdso(void)
+-
+ static void __exit camellia_fini(void)
  {
--	extern u32 VDSO_NOTE_MASK; /* See ../kernel/vsyscall-note.S.  */
--	extern char vsyscall_int80_start;
--	u32 *mask = (u32 *) ((unsigned long) &VDSO_NOTE_MASK - VDSO_PRELINK +
--			     &vsyscall_int80_start);
-+	extern const char vdso32_default_start;
-+	u32 *mask = VDSO32_SYMBOL(&vdso32_default_start, NOTE_MASK);
- 	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
+ 	crypto_unregister_alg(&camellia_alg);
  }
  
-diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
-index c1b131b..aafc544 100644
---- a/arch/x86/xen/smp.c
-+++ b/arch/x86/xen/smp.c
-@@ -146,7 +146,7 @@ void __init xen_smp_prepare_boot_cpu(void)
- 	   old memory can be recycled */
- 	make_lowmem_page_readwrite(&per_cpu__gdt_page);
- 
--	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+	for_each_possible_cpu(cpu) {
- 		cpus_clear(per_cpu(cpu_sibling_map, cpu));
- 		/*
- 		 * cpu_core_map lives in a per cpu area that is cleared
-@@ -163,7 +163,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
- {
- 	unsigned cpu;
- 
--	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+	for_each_possible_cpu(cpu) {
- 		cpus_clear(per_cpu(cpu_sibling_map, cpu));
- 		/*
- 		 * cpu_core_ map will be zeroed when the per
-@@ -239,10 +239,10 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
- 	ctxt->gdt_ents      = ARRAY_SIZE(gdt->gdt);
- 
- 	ctxt->user_regs.cs = __KERNEL_CS;
--	ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
-+	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
- 
- 	ctxt->kernel_ss = __KERNEL_DS;
--	ctxt->kernel_sp = idle->thread.esp0;
-+	ctxt->kernel_sp = idle->thread.sp0;
- 
- 	ctxt->event_callback_cs     = __KERNEL_CS;
- 	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
-diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
-index d083ff5..b3721fd 100644
---- a/arch/x86/xen/time.c
-+++ b/arch/x86/xen/time.c
-@@ -592,7 +592,7 @@ __init void xen_time_init(void)
- 	set_normalized_timespec(&wall_to_monotonic,
- 				-xtime.tv_sec, -xtime.tv_nsec);
- 
--	tsc_disable = 0;
-+	setup_force_cpu_cap(X86_FEATURE_TSC);
- 
- 	xen_setup_timer(cpu);
- 	xen_setup_cpu_clockevents();
-diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
-index f8d6937..288d587 100644
---- a/arch/x86/xen/xen-head.S
-+++ b/arch/x86/xen/xen-head.S
-@@ -4,16 +4,18 @@
- #ifdef CONFIG_XEN
- 
- #include <linux/elfnote.h>
-+#include <linux/init.h>
- #include <asm/boot.h>
- #include <xen/interface/elfnote.h>
- 
--.pushsection .init.text
-+	__INIT
- ENTRY(startup_xen)
- 	movl %esi,xen_start_info
- 	cld
- 	movl $(init_thread_union+THREAD_SIZE),%esp
- 	jmp xen_start_kernel
--.popsection
-+
-+	__FINIT
+-
+ module_init(camellia_init);
+ module_exit(camellia_fini);
  
- .pushsection .bss.page_aligned
- 	.align PAGE_SIZE_asm
-diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
-index ac4ed52..7d0f55a 100644
---- a/arch/xtensa/kernel/vmlinux.lds.S
-+++ b/arch/xtensa/kernel/vmlinux.lds.S
-@@ -136,13 +136,13 @@ SECTIONS
-   __init_begin = .;
-   .init.text : {
-   	_sinittext = .;
--	*(.init.literal) *(.init.text)
-+	*(.init.literal) INIT_TEXT
- 	_einittext = .;
-   }
+-
+ MODULE_DESCRIPTION("Camellia Cipher Algorithm");
+ MODULE_LICENSE("GPL");
+diff --git a/crypto/cast6.c b/crypto/cast6.c
+index 136ab6d..5fd9420 100644
+--- a/crypto/cast6.c
++++ b/crypto/cast6.c
+@@ -369,7 +369,7 @@ static const u8 Tr[4][8] = {
+ };
  
-   .init.data :
-   {
--    *(.init.data)
-+    INIT_DATA
-     . = ALIGN(0x4);
-     __tagtable_begin = .;
-     *(.taglist)
-@@ -278,8 +278,9 @@ SECTIONS
-   /* Sections to be discarded */
-   /DISCARD/ :
-   {
--  	*(.exit.literal .exit.text)
--  	*(.exit.data)
-+	*(.exit.literal)
-+	EXIT_TEXT
-+	EXIT_DATA
-         *(.exitcall.exit)
-   }
+ /* forward octave */
+-static inline void W(u32 *key, unsigned int i) {
++static void W(u32 *key, unsigned int i) {
+ 	u32 I;
+ 	key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
+ 	key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
+@@ -428,7 +428,7 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ }
  
-diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
-index 10aec22..64e304a 100644
---- a/arch/xtensa/mm/Makefile
-+++ b/arch/xtensa/mm/Makefile
-@@ -1,9 +1,5 @@
- #
- # Makefile for the Linux/Xtensa-specific parts of the memory manager.
- #
--# Note! Dependencies are done automagically by 'make dep', which also
--# removes any old dependencies. DON'T put your own dependencies here
--# unless it's something special (ie not a .c file).
--#
+ /*forward quad round*/
+-static inline void Q (u32 * block, u8 * Kr, u32 * Km) {
++static void Q (u32 * block, u8 * Kr, u32 * Km) {
+ 	u32 I;
+ 	block[2] ^= F1(block[3], Kr[0], Km[0]);
+ 	block[1] ^= F2(block[2], Kr[1], Km[1]);
+@@ -437,7 +437,7 @@ static inline void Q (u32 * block, u8 * Kr, u32 * Km) {
+ }
  
- obj-y	 := init.o fault.o tlb.o misc.o cache.o
-diff --git a/arch/xtensa/platform-iss/Makefile b/arch/xtensa/platform-iss/Makefile
-index 5b394e9..af96e31 100644
---- a/arch/xtensa/platform-iss/Makefile
-+++ b/arch/xtensa/platform-iss/Makefile
-@@ -3,11 +3,6 @@
- # Makefile for the Xtensa Instruction Set Simulator (ISS)
- # "prom monitor" library routines under Linux.
- #
--# Note! Dependencies are done automagically by 'make dep', which also
--# removes any old dependencies. DON'T put your own dependencies here
--# unless it's something special (ie not a .c file).
--#
--# Note 2! The CFLAGS definitions are in the main makefile...
+ /*reverse quad round*/
+-static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) {
++static void QBAR (u32 * block, u8 * Kr, u32 * Km) {
+ 	u32 I;
+         block[3] ^= F1(block[0], Kr[3], Km[3]);
+         block[0] ^= F3(block[1], Kr[2], Km[2]);
+diff --git a/crypto/cbc.c b/crypto/cbc.c
+index 1f2649e..6affff8 100644
+--- a/crypto/cbc.c
++++ b/crypto/cbc.c
+@@ -14,13 +14,13 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
++#include <linux/log2.h>
+ #include <linux/module.h>
+ #include <linux/scatterlist.h>
+ #include <linux/slab.h>
  
- obj-y			= io.o console.o setup.o network.o
+ struct crypto_cbc_ctx {
+ 	struct crypto_cipher *child;
+-	void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
+ };
  
-diff --git a/block/Makefile b/block/Makefile
-index 8261081..5a43c7d 100644
---- a/block/Makefile
-+++ b/block/Makefile
-@@ -2,7 +2,9 @@
- # Makefile for the kernel block layer
- #
+ static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
+@@ -41,9 +41,7 @@ static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
  
--obj-$(CONFIG_BLOCK) := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
-+obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
-+			blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
-+			blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o
+ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
+ 				      struct blkcipher_walk *walk,
+-				      struct crypto_cipher *tfm,
+-				      void (*xor)(u8 *, const u8 *,
+-						  unsigned int))
++				      struct crypto_cipher *tfm)
+ {
+ 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ 		crypto_cipher_alg(tfm)->cia_encrypt;
+@@ -54,7 +52,7 @@ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
+ 	u8 *iv = walk->iv;
  
- obj-$(CONFIG_BLK_DEV_BSG)	+= bsg.o
- obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
-diff --git a/block/as-iosched.c b/block/as-iosched.c
-index cb5e53b..9603684 100644
---- a/block/as-iosched.c
-+++ b/block/as-iosched.c
-@@ -170,9 +170,11 @@ static void free_as_io_context(struct as_io_context *aic)
+ 	do {
+-		xor(iv, src, bsize);
++		crypto_xor(iv, src, bsize);
+ 		fn(crypto_cipher_tfm(tfm), dst, iv);
+ 		memcpy(iv, dst, bsize);
  
- static void as_trim(struct io_context *ioc)
+@@ -67,9 +65,7 @@ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
+ 
+ static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
+ 				      struct blkcipher_walk *walk,
+-				      struct crypto_cipher *tfm,
+-				      void (*xor)(u8 *, const u8 *,
+-						  unsigned int))
++				      struct crypto_cipher *tfm)
  {
-+	spin_lock(&ioc->lock);
- 	if (ioc->aic)
- 		free_as_io_context(ioc->aic);
- 	ioc->aic = NULL;
-+	spin_unlock(&ioc->lock);
- }
+ 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ 		crypto_cipher_alg(tfm)->cia_encrypt;
+@@ -79,7 +75,7 @@ static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
+ 	u8 *iv = walk->iv;
  
- /* Called when the task exits */
-@@ -462,7 +464,9 @@ static void as_antic_timeout(unsigned long data)
- 	spin_lock_irqsave(q->queue_lock, flags);
- 	if (ad->antic_status == ANTIC_WAIT_REQ
- 			|| ad->antic_status == ANTIC_WAIT_NEXT) {
--		struct as_io_context *aic = ad->io_context->aic;
-+		struct as_io_context *aic;
-+		spin_lock(&ad->io_context->lock);
-+		aic = ad->io_context->aic;
+ 	do {
+-		xor(src, iv, bsize);
++		crypto_xor(src, iv, bsize);
+ 		fn(crypto_cipher_tfm(tfm), src, src);
+ 		iv = src;
  
- 		ad->antic_status = ANTIC_FINISHED;
- 		kblockd_schedule_work(&ad->antic_work);
-@@ -475,6 +479,7 @@ static void as_antic_timeout(unsigned long data)
- 			/* process not "saved" by a cooperating request */
- 			ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
- 		}
-+		spin_unlock(&ad->io_context->lock);
- 	}
- 	spin_unlock_irqrestore(q->queue_lock, flags);
- }
-@@ -635,9 +640,11 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
+@@ -99,7 +95,6 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
+ 	struct crypto_blkcipher *tfm = desc->tfm;
+ 	struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ 	struct crypto_cipher *child = ctx->child;
+-	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
+ 	int err;
  
- 	ioc = ad->io_context;
- 	BUG_ON(!ioc);
-+	spin_lock(&ioc->lock);
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+@@ -107,11 +102,9 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
  
- 	if (rq && ioc == RQ_IOC(rq)) {
- 		/* request from same process */
-+		spin_unlock(&ioc->lock);
- 		return 1;
+ 	while ((nbytes = walk.nbytes)) {
+ 		if (walk.src.virt.addr == walk.dst.virt.addr)
+-			nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child,
+-							    xor);
++			nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child);
+ 		else
+-			nbytes = crypto_cbc_encrypt_segment(desc, &walk, child,
+-							    xor);
++			nbytes = crypto_cbc_encrypt_segment(desc, &walk, child);
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
  	}
  
-@@ -646,20 +653,25 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
- 		 * In this situation status should really be FINISHED,
- 		 * however the timer hasn't had the chance to run yet.
- 		 */
-+		spin_unlock(&ioc->lock);
- 		return 1;
- 	}
+@@ -120,9 +113,7 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
  
- 	aic = ioc->aic;
--	if (!aic)
-+	if (!aic) {
-+		spin_unlock(&ioc->lock);
- 		return 0;
-+	}
+ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
+ 				      struct blkcipher_walk *walk,
+-				      struct crypto_cipher *tfm,
+-				      void (*xor)(u8 *, const u8 *,
+-						  unsigned int))
++				      struct crypto_cipher *tfm)
+ {
+ 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ 		crypto_cipher_alg(tfm)->cia_decrypt;
+@@ -134,7 +125,7 @@ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
  
- 	if (atomic_read(&aic->nr_queued) > 0) {
- 		/* process has more requests queued */
-+		spin_unlock(&ioc->lock);
- 		return 1;
- 	}
+ 	do {
+ 		fn(crypto_cipher_tfm(tfm), dst, src);
+-		xor(dst, iv, bsize);
++		crypto_xor(dst, iv, bsize);
+ 		iv = src;
  
- 	if (atomic_read(&aic->nr_dispatched) > 0) {
- 		/* process has more requests dispatched */
-+		spin_unlock(&ioc->lock);
- 		return 1;
- 	}
+ 		src += bsize;
+@@ -148,34 +139,29 @@ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
  
-@@ -680,6 +692,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
- 		}
+ static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
+ 				      struct blkcipher_walk *walk,
+-				      struct crypto_cipher *tfm,
+-				      void (*xor)(u8 *, const u8 *,
+-						  unsigned int))
++				      struct crypto_cipher *tfm)
+ {
+ 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ 		crypto_cipher_alg(tfm)->cia_decrypt;
+ 	int bsize = crypto_cipher_blocksize(tfm);
+-	unsigned long alignmask = crypto_cipher_alignmask(tfm);
+ 	unsigned int nbytes = walk->nbytes;
+ 	u8 *src = walk->src.virt.addr;
+-	u8 stack[bsize + alignmask];
+-	u8 *first_iv = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
+-
+-	memcpy(first_iv, walk->iv, bsize);
++	u8 last_iv[bsize];
  
- 		as_update_iohist(ad, aic, rq);
-+		spin_unlock(&ioc->lock);
- 		return 1;
+ 	/* Start of the last block. */
+-	src += nbytes - nbytes % bsize - bsize;
+-	memcpy(walk->iv, src, bsize);
++	src += nbytes - (nbytes & (bsize - 1)) - bsize;
++	memcpy(last_iv, src, bsize);
+ 
+ 	for (;;) {
+ 		fn(crypto_cipher_tfm(tfm), src, src);
+ 		if ((nbytes -= bsize) < bsize)
+ 			break;
+-		xor(src, src - bsize, bsize);
++		crypto_xor(src, src - bsize, bsize);
+ 		src -= bsize;
  	}
  
-@@ -688,20 +701,27 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
- 		if (aic->ttime_samples == 0)
- 			ad->exit_prob = (7*ad->exit_prob + 256)/8;
+-	xor(src, first_iv, bsize);
++	crypto_xor(src, walk->iv, bsize);
++	memcpy(walk->iv, last_iv, bsize);
  
--		if (ad->exit_no_coop > 128)
-+		if (ad->exit_no_coop > 128) {
-+			spin_unlock(&ioc->lock);
- 			return 1;
-+		}
- 	}
+ 	return nbytes;
+ }
+@@ -188,7 +174,6 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
+ 	struct crypto_blkcipher *tfm = desc->tfm;
+ 	struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ 	struct crypto_cipher *child = ctx->child;
+-	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
+ 	int err;
  
- 	if (aic->ttime_samples == 0) {
--		if (ad->new_ttime_mean > ad->antic_expire)
-+		if (ad->new_ttime_mean > ad->antic_expire) {
-+			spin_unlock(&ioc->lock);
- 			return 1;
--		if (ad->exit_prob * ad->exit_no_coop > 128*256)
-+		}
-+		if (ad->exit_prob * ad->exit_no_coop > 128*256) {
-+			spin_unlock(&ioc->lock);
- 			return 1;
-+		}
- 	} else if (aic->ttime_mean > ad->antic_expire) {
- 		/* the process thinks too much between requests */
-+		spin_unlock(&ioc->lock);
- 		return 1;
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+@@ -196,48 +181,15 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
+ 
+ 	while ((nbytes = walk.nbytes)) {
+ 		if (walk.src.virt.addr == walk.dst.virt.addr)
+-			nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child,
+-							    xor);
++			nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
+ 		else
+-			nbytes = crypto_cbc_decrypt_segment(desc, &walk, child,
+-							    xor);
++			nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
  	}
--
-+	spin_unlock(&ioc->lock);
- 	return 0;
+ 
+ 	return err;
  }
  
-@@ -1255,7 +1275,13 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
- 			 * Don't copy here but swap, because when anext is
- 			 * removed below, it must contain the unused context
- 			 */
--			swap_io_context(&rioc, &nioc);
-+			if (rioc != nioc) {
-+				double_spin_lock(&rioc->lock, &nioc->lock,
-+								rioc < nioc);
-+				swap_io_context(&rioc, &nioc);
-+				double_spin_unlock(&rioc->lock, &nioc->lock,
-+								rioc < nioc);
-+			}
- 		}
- 	}
+-static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
+-{
+-	do {
+-		*a++ ^= *b++;
+-	} while (--bs);
+-}
+-
+-static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
+-{
+-	u32 *a = (u32 *)dst;
+-	u32 *b = (u32 *)src;
+-
+-	do {
+-		*a++ ^= *b++;
+-	} while ((bs -= 4));
+-}
+-
+-static void xor_64(u8 *a, const u8 *b, unsigned int bs)
+-{
+-	((u32 *)a)[0] ^= ((u32 *)b)[0];
+-	((u32 *)a)[1] ^= ((u32 *)b)[1];
+-}
+-
+-static void xor_128(u8 *a, const u8 *b, unsigned int bs)
+-{
+-	((u32 *)a)[0] ^= ((u32 *)b)[0];
+-	((u32 *)a)[1] ^= ((u32 *)b)[1];
+-	((u32 *)a)[2] ^= ((u32 *)b)[2];
+-	((u32 *)a)[3] ^= ((u32 *)b)[3];
+-}
+-
+ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
+ {
+ 	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+@@ -245,22 +197,6 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
+ 	struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	struct crypto_cipher *cipher;
  
-diff --git a/block/blk-barrier.c b/block/blk-barrier.c
+-	switch (crypto_tfm_alg_blocksize(tfm)) {
+-	case 8:
+-		ctx->xor = xor_64;
+-		break;
+-
+-	case 16:
+-		ctx->xor = xor_128;
+-		break;
+-
+-	default:
+-		if (crypto_tfm_alg_blocksize(tfm) % 4)
+-			ctx->xor = xor_byte;
+-		else
+-			ctx->xor = xor_quad;
+-	}
+-
+ 	cipher = crypto_spawn_cipher(spawn);
+ 	if (IS_ERR(cipher))
+ 		return PTR_ERR(cipher);
+@@ -290,6 +226,10 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
+ 	if (IS_ERR(alg))
+ 		return ERR_PTR(PTR_ERR(alg));
+ 
++	inst = ERR_PTR(-EINVAL);
++	if (!is_power_of_2(alg->cra_blocksize))
++		goto out_put_alg;
++
+ 	inst = crypto_alloc_instance("cbc", alg);
+ 	if (IS_ERR(inst))
+ 		goto out_put_alg;
+@@ -300,8 +240,9 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
+ 	inst->alg.cra_alignmask = alg->cra_alignmask;
+ 	inst->alg.cra_type = &crypto_blkcipher_type;
+ 
+-	if (!(alg->cra_blocksize % 4))
+-		inst->alg.cra_alignmask |= 3;
++	/* We access the data as u32s when xoring. */
++	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
++
+ 	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
+ 	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
+ 	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+diff --git a/crypto/ccm.c b/crypto/ccm.c
 new file mode 100644
-index 0000000..5f74fec
+index 0000000..7cf7e5a
 --- /dev/null
-+++ b/block/blk-barrier.c
-@@ -0,0 +1,319 @@
++++ b/crypto/ccm.c
+@@ -0,0 +1,889 @@
 +/*
-+ * Functions related to barrier IO handling
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
-+
-+#include "blk.h"
-+
-+/**
-+ * blk_queue_ordered - does this queue support ordered writes
-+ * @q:        the request queue
-+ * @ordered:  one of QUEUE_ORDERED_*
-+ * @prepare_flush_fn: rq setup helper for cache flush ordered writes
++ * CCM: Counter with CBC-MAC
 + *
-+ * Description:
-+ *   For journalled file systems, doing ordered writes on a commit
-+ *   block instead of explicitly doing wait_on_buffer (which is bad
-+ *   for performance) can be a big win. Block drivers supporting this
-+ *   feature should call this function and indicate so.
++ * (C) Copyright IBM Corp. 2007 - Joy Latten <latten at us.ibm.com>
 + *
-+ **/
-+int blk_queue_ordered(struct request_queue *q, unsigned ordered,
-+		      prepare_flush_fn *prepare_flush_fn)
-+{
-+	if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
-+	    prepare_flush_fn == NULL) {
-+		printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
-+		return -EINVAL;
-+	}
-+
-+	if (ordered != QUEUE_ORDERED_NONE &&
-+	    ordered != QUEUE_ORDERED_DRAIN &&
-+	    ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
-+	    ordered != QUEUE_ORDERED_DRAIN_FUA &&
-+	    ordered != QUEUE_ORDERED_TAG &&
-+	    ordered != QUEUE_ORDERED_TAG_FLUSH &&
-+	    ordered != QUEUE_ORDERED_TAG_FUA) {
-+		printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
-+		return -EINVAL;
-+	}
-+
-+	q->ordered = ordered;
-+	q->next_ordered = ordered;
-+	q->prepare_flush_fn = prepare_flush_fn;
-+
-+	return 0;
-+}
-+
-+EXPORT_SYMBOL(blk_queue_ordered);
-+
-+/*
-+ * Cache flushing for ordered writes handling
-+ */
-+inline unsigned blk_ordered_cur_seq(struct request_queue *q)
-+{
-+	if (!q->ordseq)
-+		return 0;
-+	return 1 << ffz(q->ordseq);
-+}
-+
-+unsigned blk_ordered_req_seq(struct request *rq)
-+{
-+	struct request_queue *q = rq->q;
-+
-+	BUG_ON(q->ordseq == 0);
-+
-+	if (rq == &q->pre_flush_rq)
-+		return QUEUE_ORDSEQ_PREFLUSH;
-+	if (rq == &q->bar_rq)
-+		return QUEUE_ORDSEQ_BAR;
-+	if (rq == &q->post_flush_rq)
-+		return QUEUE_ORDSEQ_POSTFLUSH;
-+
-+	/*
-+	 * !fs requests don't need to follow barrier ordering.  Always
-+	 * put them at the front.  This fixes the following deadlock.
-+	 *
-+	 * http://thread.gmane.org/gmane.linux.kernel/537473
-+	 */
-+	if (!blk_fs_request(rq))
-+		return QUEUE_ORDSEQ_DRAIN;
-+
-+	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
-+	    (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
-+		return QUEUE_ORDSEQ_DRAIN;
-+	else
-+		return QUEUE_ORDSEQ_DONE;
-+}
-+
-+void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
-+{
-+	struct request *rq;
-+
-+	if (error && !q->orderr)
-+		q->orderr = error;
-+
-+	BUG_ON(q->ordseq & seq);
-+	q->ordseq |= seq;
-+
-+	if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
-+		return;
-+
-+	/*
-+	 * Okay, sequence complete.
-+	 */
-+	q->ordseq = 0;
-+	rq = q->orig_bar_rq;
-+
-+	if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
-+		BUG();
-+}
-+
-+static void pre_flush_end_io(struct request *rq, int error)
-+{
-+	elv_completed_request(rq->q, rq);
-+	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
-+}
-+
-+static void bar_end_io(struct request *rq, int error)
-+{
-+	elv_completed_request(rq->q, rq);
-+	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
-+}
-+
-+static void post_flush_end_io(struct request *rq, int error)
-+{
-+	elv_completed_request(rq->q, rq);
-+	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
-+}
-+
-+static void queue_flush(struct request_queue *q, unsigned which)
-+{
-+	struct request *rq;
-+	rq_end_io_fn *end_io;
-+
-+	if (which == QUEUE_ORDERED_PREFLUSH) {
-+		rq = &q->pre_flush_rq;
-+		end_io = pre_flush_end_io;
-+	} else {
-+		rq = &q->post_flush_rq;
-+		end_io = post_flush_end_io;
-+	}
-+
-+	rq->cmd_flags = REQ_HARDBARRIER;
-+	rq_init(q, rq);
-+	rq->elevator_private = NULL;
-+	rq->elevator_private2 = NULL;
-+	rq->rq_disk = q->bar_rq.rq_disk;
-+	rq->end_io = end_io;
-+	q->prepare_flush_fn(q, rq);
-+
-+	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
-+}
-+
-+static inline struct request *start_ordered(struct request_queue *q,
-+					    struct request *rq)
-+{
-+	q->orderr = 0;
-+	q->ordered = q->next_ordered;
-+	q->ordseq |= QUEUE_ORDSEQ_STARTED;
-+
-+	/*
-+	 * Prep proxy barrier request.
-+	 */
-+	blkdev_dequeue_request(rq);
-+	q->orig_bar_rq = rq;
-+	rq = &q->bar_rq;
-+	rq->cmd_flags = 0;
-+	rq_init(q, rq);
-+	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
-+		rq->cmd_flags |= REQ_RW;
-+	if (q->ordered & QUEUE_ORDERED_FUA)
-+		rq->cmd_flags |= REQ_FUA;
-+	rq->elevator_private = NULL;
-+	rq->elevator_private2 = NULL;
-+	init_request_from_bio(rq, q->orig_bar_rq->bio);
-+	rq->end_io = bar_end_io;
-+
-+	/*
-+	 * Queue ordered sequence.  As we stack them at the head, we
-+	 * need to queue in reverse order.  Note that we rely on that
-+	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
-+	 * request gets inbetween ordered sequence. If this request is
-+	 * an empty barrier, we don't need to do a postflush ever since
-+	 * there will be no data written between the pre and post flush.
-+	 * Hence a single flush will suffice.
-+	 */
-+	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
-+		queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
-+	else
-+		q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
-+
-+	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
-+
-+	if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
-+		queue_flush(q, QUEUE_ORDERED_PREFLUSH);
-+		rq = &q->pre_flush_rq;
-+	} else
-+		q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
-+
-+	if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
-+		q->ordseq |= QUEUE_ORDSEQ_DRAIN;
-+	else
-+		rq = NULL;
-+
-+	return rq;
-+}
-+
-+int blk_do_ordered(struct request_queue *q, struct request **rqp)
-+{
-+	struct request *rq = *rqp;
-+	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
-+
-+	if (!q->ordseq) {
-+		if (!is_barrier)
-+			return 1;
-+
-+		if (q->next_ordered != QUEUE_ORDERED_NONE) {
-+			*rqp = start_ordered(q, rq);
-+			return 1;
-+		} else {
-+			/*
-+			 * This can happen when the queue switches to
-+			 * ORDERED_NONE while this request is on it.
-+			 */
-+			blkdev_dequeue_request(rq);
-+			if (__blk_end_request(rq, -EOPNOTSUPP,
-+					      blk_rq_bytes(rq)))
-+				BUG();
-+			*rqp = NULL;
-+			return 0;
-+		}
-+	}
-+
-+	/*
-+	 * Ordered sequence in progress
-+	 */
-+
-+	/* Special requests are not subject to ordering rules. */
-+	if (!blk_fs_request(rq) &&
-+	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
-+		return 1;
-+
-+	if (q->ordered & QUEUE_ORDERED_TAG) {
-+		/* Ordered by tag.  Blocking the next barrier is enough. */
-+		if (is_barrier && rq != &q->bar_rq)
-+			*rqp = NULL;
-+	} else {
-+		/* Ordered by draining.  Wait for turn. */
-+		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
-+		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
-+			*rqp = NULL;
-+	}
-+
-+	return 1;
-+}
-+
-+static void bio_end_empty_barrier(struct bio *bio, int err)
-+{
-+	if (err)
-+		clear_bit(BIO_UPTODATE, &bio->bi_flags);
-+
-+	complete(bio->bi_private);
-+}
-+
-+/**
-+ * blkdev_issue_flush - queue a flush
-+ * @bdev:	blockdev to issue flush for
-+ * @error_sector:	error sector
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
 + *
-+ * Description:
-+ *    Issue a flush for the block device in question. Caller can supply
-+ *    room for storing the error offset in case of a flush error, if they
-+ *    wish to.  Caller must run wait_for_completion() on its own.
-+ */
-+int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
-+{
-+	DECLARE_COMPLETION_ONSTACK(wait);
-+	struct request_queue *q;
-+	struct bio *bio;
-+	int ret;
-+
-+	if (bdev->bd_disk == NULL)
-+		return -ENXIO;
-+
-+	q = bdev_get_queue(bdev);
-+	if (!q)
-+		return -ENXIO;
-+
-+	bio = bio_alloc(GFP_KERNEL, 0);
-+	if (!bio)
-+		return -ENOMEM;
-+
-+	bio->bi_end_io = bio_end_empty_barrier;
-+	bio->bi_private = &wait;
-+	bio->bi_bdev = bdev;
-+	submit_bio(1 << BIO_RW_BARRIER, bio);
-+
-+	wait_for_completion(&wait);
-+
-+	/*
-+	 * The driver must store the error location in ->bi_sector, if
-+	 * it supports it. For non-stacked drivers, this should be copied
-+	 * from rq->sector.
-+	 */
-+	if (error_sector)
-+		*error_sector = bio->bi_sector;
-+
-+	ret = 0;
-+	if (!bio_flagged(bio, BIO_UPTODATE))
-+		ret = -EIO;
-+
-+	bio_put(bio);
-+	return ret;
-+}
-+
-+EXPORT_SYMBOL(blkdev_issue_flush);
-diff --git a/block/blk-core.c b/block/blk-core.c
-new file mode 100644
-index 0000000..8ff9944
---- /dev/null
-+++ b/block/blk-core.c
-@@ -0,0 +1,2034 @@
-+/*
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
-+ * Elevator latency, (C) 2000  Andrea Arcangeli <andrea at suse.de> SuSE
-+ * Queue request tables / lock, selectable elevator, Jens Axboe <axboe at suse.de>
-+ * kernel-doc documentation started by NeilBrown <neilb at cse.unsw.edu.au> -  July2000
-+ * bio rewrite, highmem i/o, etc, Jens Axboe <axboe at suse.de> - may 2001
 + */
 +
-+/*
-+ * This handles all read/write requests to block devices
-+ */
++#include <crypto/internal/aead.h>
++#include <crypto/internal/skcipher.h>
++#include <crypto/scatterwalk.h>
++#include <linux/err.h>
++#include <linux/init.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
-+#include <linux/backing-dev.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
-+#include <linux/highmem.h>
-+#include <linux/mm.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/string.h>
-+#include <linux/init.h>
-+#include <linux/completion.h>
 +#include <linux/slab.h>
-+#include <linux/swap.h>
-+#include <linux/writeback.h>
-+#include <linux/task_io_accounting_ops.h>
-+#include <linux/interrupt.h>
-+#include <linux/cpu.h>
-+#include <linux/blktrace_api.h>
-+#include <linux/fault-inject.h>
-+
-+#include "blk.h"
-+
-+static int __make_request(struct request_queue *q, struct bio *bio);
-+
-+/*
-+ * For the allocated request tables
-+ */
-+struct kmem_cache *request_cachep;
-+
-+/*
-+ * For queue allocation
-+ */
-+struct kmem_cache *blk_requestq_cachep = NULL;
-+
-+/*
-+ * Controlling structure to kblockd
-+ */
-+static struct workqueue_struct *kblockd_workqueue;
-+
-+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
-+
-+static void drive_stat_acct(struct request *rq, int new_io)
-+{
-+	int rw = rq_data_dir(rq);
-+
-+	if (!blk_fs_request(rq) || !rq->rq_disk)
-+		return;
-+
-+	if (!new_io) {
-+		__disk_stat_inc(rq->rq_disk, merges[rw]);
-+	} else {
-+		disk_round_stats(rq->rq_disk);
-+		rq->rq_disk->in_flight++;
-+	}
-+}
 +
-+void blk_queue_congestion_threshold(struct request_queue *q)
-+{
-+	int nr;
-+
-+	nr = q->nr_requests - (q->nr_requests / 8) + 1;
-+	if (nr > q->nr_requests)
-+		nr = q->nr_requests;
-+	q->nr_congestion_on = nr;
-+
-+	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
-+	if (nr < 1)
-+		nr = 1;
-+	q->nr_congestion_off = nr;
-+}
-+
-+/**
-+ * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
-+ * @bdev:	device
-+ *
-+ * Locates the passed device's request queue and returns the address of its
-+ * backing_dev_info
-+ *
-+ * Will return NULL if the request queue cannot be located.
-+ */
-+struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
-+{
-+	struct backing_dev_info *ret = NULL;
-+	struct request_queue *q = bdev_get_queue(bdev);
-+
-+	if (q)
-+		ret = &q->backing_dev_info;
-+	return ret;
-+}
-+EXPORT_SYMBOL(blk_get_backing_dev_info);
-+
-+void rq_init(struct request_queue *q, struct request *rq)
-+{
-+	INIT_LIST_HEAD(&rq->queuelist);
-+	INIT_LIST_HEAD(&rq->donelist);
-+
-+	rq->errors = 0;
-+	rq->bio = rq->biotail = NULL;
-+	INIT_HLIST_NODE(&rq->hash);
-+	RB_CLEAR_NODE(&rq->rb_node);
-+	rq->ioprio = 0;
-+	rq->buffer = NULL;
-+	rq->ref_count = 1;
-+	rq->q = q;
-+	rq->special = NULL;
-+	rq->data_len = 0;
-+	rq->data = NULL;
-+	rq->nr_phys_segments = 0;
-+	rq->sense = NULL;
-+	rq->end_io = NULL;
-+	rq->end_io_data = NULL;
-+	rq->completion_data = NULL;
-+	rq->next_rq = NULL;
-+}
-+
-+static void req_bio_endio(struct request *rq, struct bio *bio,
-+			  unsigned int nbytes, int error)
-+{
-+	struct request_queue *q = rq->q;
-+
-+	if (&q->bar_rq != rq) {
-+		if (error)
-+			clear_bit(BIO_UPTODATE, &bio->bi_flags);
-+		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-+			error = -EIO;
-+
-+		if (unlikely(nbytes > bio->bi_size)) {
-+			printk("%s: want %u bytes done, only %u left\n",
-+			       __FUNCTION__, nbytes, bio->bi_size);
-+			nbytes = bio->bi_size;
-+		}
-+
-+		bio->bi_size -= nbytes;
-+		bio->bi_sector += (nbytes >> 9);
-+		if (bio->bi_size == 0)
-+			bio_endio(bio, error);
-+	} else {
-+
-+		/*
-+		 * Okay, this is the barrier request in progress, just
-+		 * record the error;
-+		 */
-+		if (error && !q->orderr)
-+			q->orderr = error;
-+	}
-+}
-+
-+void blk_dump_rq_flags(struct request *rq, char *msg)
-+{
-+	int bit;
-+
-+	printk("%s: dev %s: type=%x, flags=%x\n", msg,
-+		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
-+		rq->cmd_flags);
-+
-+	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
-+						       rq->nr_sectors,
-+						       rq->current_nr_sectors);
-+	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
-+
-+	if (blk_pc_request(rq)) {
-+		printk("cdb: ");
-+		for (bit = 0; bit < sizeof(rq->cmd); bit++)
-+			printk("%02x ", rq->cmd[bit]);
-+		printk("\n");
-+	}
-+}
-+
-+EXPORT_SYMBOL(blk_dump_rq_flags);
-+
-+/*
-+ * "plug" the device if there are no outstanding requests: this will
-+ * force the transfer to start only after we have put all the requests
-+ * on the list.
-+ *
-+ * This is called with interrupts off and no requests on the queue and
-+ * with the queue lock held.
-+ */
-+void blk_plug_device(struct request_queue *q)
-+{
-+	WARN_ON(!irqs_disabled());
-+
-+	/*
-+	 * don't plug a stopped queue, it must be paired with blk_start_queue()
-+	 * which will restart the queueing
-+	 */
-+	if (blk_queue_stopped(q))
-+		return;
-+
-+	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
-+		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
-+		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
-+	}
-+}
-+
-+EXPORT_SYMBOL(blk_plug_device);
-+
-+/*
-+ * remove the queue from the plugged list, if present. called with
-+ * queue lock held and interrupts disabled.
-+ */
-+int blk_remove_plug(struct request_queue *q)
-+{
-+	WARN_ON(!irqs_disabled());
-+
-+	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
-+		return 0;
-+
-+	del_timer(&q->unplug_timer);
-+	return 1;
-+}
-+
-+EXPORT_SYMBOL(blk_remove_plug);
-+
-+/*
-+ * remove the plug and let it rip..
-+ */
-+void __generic_unplug_device(struct request_queue *q)
-+{
-+	if (unlikely(blk_queue_stopped(q)))
-+		return;
-+
-+	if (!blk_remove_plug(q))
-+		return;
++#include "internal.h"
 +
-+	q->request_fn(q);
-+}
-+EXPORT_SYMBOL(__generic_unplug_device);
++struct ccm_instance_ctx {
++	struct crypto_skcipher_spawn ctr;
++	struct crypto_spawn cipher;
++};
 +
-+/**
-+ * generic_unplug_device - fire a request queue
-+ * @q:    The &struct request_queue in question
-+ *
-+ * Description:
-+ *   Linux uses plugging to build bigger requests queues before letting
-+ *   the device have at them. If a queue is plugged, the I/O scheduler
-+ *   is still adding and merging requests on the queue. Once the queue
-+ *   gets unplugged, the request_fn defined for the queue is invoked and
-+ *   transfers started.
-+ **/
-+void generic_unplug_device(struct request_queue *q)
-+{
-+	spin_lock_irq(q->queue_lock);
-+	__generic_unplug_device(q);
-+	spin_unlock_irq(q->queue_lock);
-+}
-+EXPORT_SYMBOL(generic_unplug_device);
++struct crypto_ccm_ctx {
++	struct crypto_cipher *cipher;
++	struct crypto_ablkcipher *ctr;
++};
 +
-+static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
-+				   struct page *page)
-+{
-+	struct request_queue *q = bdi->unplug_io_data;
++struct crypto_rfc4309_ctx {
++	struct crypto_aead *child;
++	u8 nonce[3];
++};
 +
-+	blk_unplug(q);
-+}
++struct crypto_ccm_req_priv_ctx {
++	u8 odata[16];
++	u8 idata[16];
++	u8 auth_tag[16];
++	u32 ilen;
++	u32 flags;
++	struct scatterlist src[2];
++	struct scatterlist dst[2];
++	struct ablkcipher_request abreq;
++};
 +
-+void blk_unplug_work(struct work_struct *work)
++static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
++	struct aead_request *req)
 +{
-+	struct request_queue *q =
-+		container_of(work, struct request_queue, unplug_work);
-+
-+	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-+				q->rq.count[READ] + q->rq.count[WRITE]);
++	unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
 +
-+	q->unplug_fn(q);
++	return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
 +}
 +
-+void blk_unplug_timeout(unsigned long data)
++static int set_msg_len(u8 *block, unsigned int msglen, int csize)
 +{
-+	struct request_queue *q = (struct request_queue *)data;
++	__be32 data;
 +
-+	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
-+				q->rq.count[READ] + q->rq.count[WRITE]);
++	memset(block, 0, csize);
++	block += csize;
 +
-+	kblockd_schedule_work(&q->unplug_work);
-+}
++	if (csize >= 4)
++		csize = 4;
++	else if (msglen > (1 << (8 * csize)))
++		return -EOVERFLOW;
 +
-+void blk_unplug(struct request_queue *q)
-+{
-+	/*
-+	 * devices don't necessarily have an ->unplug_fn defined
-+	 */
-+	if (q->unplug_fn) {
-+		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-+					q->rq.count[READ] + q->rq.count[WRITE]);
++	data = cpu_to_be32(msglen);
++	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
 +
-+		q->unplug_fn(q);
-+	}
++	return 0;
 +}
-+EXPORT_SYMBOL(blk_unplug);
 +
-+/**
-+ * blk_start_queue - restart a previously stopped queue
-+ * @q:    The &struct request_queue in question
-+ *
-+ * Description:
-+ *   blk_start_queue() will clear the stop flag on the queue, and call
-+ *   the request_fn for the queue if it was in a stopped state when
-+ *   entered. Also see blk_stop_queue(). Queue lock must be held.
-+ **/
-+void blk_start_queue(struct request_queue *q)
++static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
++			     unsigned int keylen)
 +{
-+	WARN_ON(!irqs_disabled());
-+
-+	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
-+
-+	/*
-+	 * one level of recursion is ok and is much faster than kicking
-+	 * the unplug handling
-+	 */
-+	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
-+		q->request_fn(q);
-+		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
-+	} else {
-+		blk_plug_device(q);
-+		kblockd_schedule_work(&q->unplug_work);
-+	}
-+}
++	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
++	struct crypto_ablkcipher *ctr = ctx->ctr;
++	struct crypto_cipher *tfm = ctx->cipher;
++	int err = 0;
 +
-+EXPORT_SYMBOL(blk_start_queue);
++	crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
++	crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
++				    CRYPTO_TFM_REQ_MASK);
++	err = crypto_ablkcipher_setkey(ctr, key, keylen);
++	crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
++			      CRYPTO_TFM_RES_MASK);
++	if (err)
++		goto out;
 +
-+/**
-+ * blk_stop_queue - stop a queue
-+ * @q:    The &struct request_queue in question
-+ *
-+ * Description:
-+ *   The Linux block layer assumes that a block driver will consume all
-+ *   entries on the request queue when the request_fn strategy is called.
-+ *   Often this will not happen, because of hardware limitations (queue
-+ *   depth settings). If a device driver gets a 'queue full' response,
-+ *   or if it simply chooses not to queue more I/O at one point, it can
-+ *   call this function to prevent the request_fn from being called until
-+ *   the driver has signalled it's ready to go again. This happens by calling
-+ *   blk_start_queue() to restart queue operations. Queue lock must be held.
-+ **/
-+void blk_stop_queue(struct request_queue *q)
-+{
-+	blk_remove_plug(q);
-+	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
-+}
-+EXPORT_SYMBOL(blk_stop_queue);
++	crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
++	crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) &
++				    CRYPTO_TFM_REQ_MASK);
++	err = crypto_cipher_setkey(tfm, key, keylen);
++	crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) &
++			      CRYPTO_TFM_RES_MASK);
 +
-+/**
-+ * blk_sync_queue - cancel any pending callbacks on a queue
-+ * @q: the queue
-+ *
-+ * Description:
-+ *     The block layer may perform asynchronous callback activity
-+ *     on a queue, such as calling the unplug function after a timeout.
-+ *     A block device may call blk_sync_queue to ensure that any
-+ *     such activity is cancelled, thus allowing it to release resources
-+ *     that the callbacks might use. The caller must already have made sure
-+ *     that its ->make_request_fn will not re-add plugging prior to calling
-+ *     this function.
-+ *
-+ */
-+void blk_sync_queue(struct request_queue *q)
-+{
-+	del_timer_sync(&q->unplug_timer);
-+	kblockd_flush_work(&q->unplug_work);
++out:
++	return err;
 +}
-+EXPORT_SYMBOL(blk_sync_queue);
 +
-+/**
-+ * blk_run_queue - run a single device queue
-+ * @q:	The queue to run
-+ */
-+void blk_run_queue(struct request_queue *q)
++static int crypto_ccm_setauthsize(struct crypto_aead *tfm,
++				  unsigned int authsize)
 +{
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(q->queue_lock, flags);
-+	blk_remove_plug(q);
-+
-+	/*
-+	 * Only recurse once to avoid overrunning the stack, let the unplug
-+	 * handling reinvoke the handler shortly if we already got there.
-+	 */
-+	if (!elv_queue_empty(q)) {
-+		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
-+			q->request_fn(q);
-+			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
-+		} else {
-+			blk_plug_device(q);
-+			kblockd_schedule_work(&q->unplug_work);
-+		}
++	switch (authsize) {
++	case 4:
++	case 6:
++	case 8:
++	case 10:
++	case 12:
++	case 14:
++	case 16:
++		break;
++	default:
++		return -EINVAL;
 +	}
 +
-+	spin_unlock_irqrestore(q->queue_lock, flags);
-+}
-+EXPORT_SYMBOL(blk_run_queue);
-+
-+void blk_put_queue(struct request_queue *q)
-+{
-+	kobject_put(&q->kobj);
-+}
-+EXPORT_SYMBOL(blk_put_queue);
-+
-+void blk_cleanup_queue(struct request_queue * q)
-+{
-+	mutex_lock(&q->sysfs_lock);
-+	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
-+	mutex_unlock(&q->sysfs_lock);
-+
-+	if (q->elevator)
-+		elevator_exit(q->elevator);
-+
-+	blk_put_queue(q);
-+}
-+
-+EXPORT_SYMBOL(blk_cleanup_queue);
-+
-+static int blk_init_free_list(struct request_queue *q)
-+{
-+	struct request_list *rl = &q->rq;
-+
-+	rl->count[READ] = rl->count[WRITE] = 0;
-+	rl->starved[READ] = rl->starved[WRITE] = 0;
-+	rl->elvpriv = 0;
-+	init_waitqueue_head(&rl->wait[READ]);
-+	init_waitqueue_head(&rl->wait[WRITE]);
-+
-+	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
-+				mempool_free_slab, request_cachep, q->node);
-+
-+	if (!rl->rq_pool)
-+		return -ENOMEM;
-+
 +	return 0;
 +}
 +
-+struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
-+{
-+	return blk_alloc_queue_node(gfp_mask, -1);
-+}
-+EXPORT_SYMBOL(blk_alloc_queue);
-+
-+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
-+{
-+	struct request_queue *q;
-+	int err;
-+
-+	q = kmem_cache_alloc_node(blk_requestq_cachep,
-+				gfp_mask | __GFP_ZERO, node_id);
-+	if (!q)
-+		return NULL;
-+
-+	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
-+	q->backing_dev_info.unplug_io_data = q;
-+	err = bdi_init(&q->backing_dev_info);
-+	if (err) {
-+		kmem_cache_free(blk_requestq_cachep, q);
-+		return NULL;
-+	}
-+
-+	init_timer(&q->unplug_timer);
-+
-+	kobject_init(&q->kobj, &blk_queue_ktype);
-+
-+	mutex_init(&q->sysfs_lock);
-+
-+	return q;
-+}
-+EXPORT_SYMBOL(blk_alloc_queue_node);
-+
-+/**
-+ * blk_init_queue  - prepare a request queue for use with a block device
-+ * @rfn:  The function to be called to process requests that have been
-+ *        placed on the queue.
-+ * @lock: Request queue spin lock
-+ *
-+ * Description:
-+ *    If a block device wishes to use the standard request handling procedures,
-+ *    which sorts requests and coalesces adjacent requests, then it must
-+ *    call blk_init_queue().  The function @rfn will be called when there
-+ *    are requests on the queue that need to be processed.  If the device
-+ *    supports plugging, then @rfn may not be called immediately when requests
-+ *    are available on the queue, but may be called at some time later instead.
-+ *    Plugged queues are generally unplugged when a buffer belonging to one
-+ *    of the requests on the queue is needed, or due to memory pressure.
-+ *
-+ *    @rfn is not required, or even expected, to remove all requests off the
-+ *    queue, but only as many as it can handle at a time.  If it does leave
-+ *    requests on the queue, it is responsible for arranging that the requests
-+ *    get dealt with eventually.
-+ *
-+ *    The queue spin lock must be held while manipulating the requests on the
-+ *    request queue; this lock will be taken also from interrupt context, so irq
-+ *    disabling is needed for it.
-+ *
-+ *    Function returns a pointer to the initialized request queue, or NULL if
-+ *    it didn't succeed.
-+ *
-+ * Note:
-+ *    blk_init_queue() must be paired with a blk_cleanup_queue() call
-+ *    when the block device is deactivated (such as at module unload).
-+ **/
-+
-+struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
-+{
-+	return blk_init_queue_node(rfn, lock, -1);
-+}
-+EXPORT_SYMBOL(blk_init_queue);
-+
-+struct request_queue *
-+blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
++static int format_input(u8 *info, struct aead_request *req,
++			unsigned int cryptlen)
 +{
-+	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
-+
-+	if (!q)
-+		return NULL;
-+
-+	q->node = node_id;
-+	if (blk_init_free_list(q)) {
-+		kmem_cache_free(blk_requestq_cachep, q);
-+		return NULL;
-+	}
-+
-+	/*
-+	 * if caller didn't supply a lock, they get per-queue locking with
-+	 * our embedded lock
-+	 */
-+	if (!lock) {
-+		spin_lock_init(&q->__queue_lock);
-+		lock = &q->__queue_lock;
-+	}
-+
-+	q->request_fn		= rfn;
-+	q->prep_rq_fn		= NULL;
-+	q->unplug_fn		= generic_unplug_device;
-+	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
-+	q->queue_lock		= lock;
-+
-+	blk_queue_segment_boundary(q, 0xffffffff);
-+
-+	blk_queue_make_request(q, __make_request);
-+	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	unsigned int lp = req->iv[0];
++	unsigned int l = lp + 1;
++	unsigned int m;
 +
-+	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
-+	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
++	m = crypto_aead_authsize(aead);
 +
-+	q->sg_reserved_size = INT_MAX;
++	memcpy(info, req->iv, 16);
 +
-+	/*
-+	 * all done
++	/* format control info per RFC 3610 and
++	 * NIST Special Publication 800-38C
 +	 */
-+	if (!elevator_init(q, NULL)) {
-+		blk_queue_congestion_threshold(q);
-+		return q;
-+	}
-+
-+	blk_put_queue(q);
-+	return NULL;
-+}
-+EXPORT_SYMBOL(blk_init_queue_node);
-+
-+int blk_get_queue(struct request_queue *q)
-+{
-+	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-+		kobject_get(&q->kobj);
-+		return 0;
-+	}
-+
-+	return 1;
-+}
-+
-+EXPORT_SYMBOL(blk_get_queue);
++	*info |= (8 * ((m - 2) / 2));
++	if (req->assoclen)
++		*info |= 64;
 +
-+static inline void blk_free_request(struct request_queue *q, struct request *rq)
-+{
-+	if (rq->cmd_flags & REQ_ELVPRIV)
-+		elv_put_request(q, rq);
-+	mempool_free(rq, q->rq.rq_pool);
++	return set_msg_len(info + 16 - l, cryptlen, l);
 +}
 +
-+static struct request *
-+blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
++static int format_adata(u8 *adata, unsigned int a)
 +{
-+	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-+
-+	if (!rq)
-+		return NULL;
++	int len = 0;
 +
-+	/*
-+	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
-+	 * see bio.h and blkdev.h
++	/* add control info for associated data
++	 * RFC 3610 and NIST Special Publication 800-38C
 +	 */
-+	rq->cmd_flags = rw | REQ_ALLOCED;
-+
-+	if (priv) {
-+		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
-+			mempool_free(rq, q->rq.rq_pool);
-+			return NULL;
-+		}
-+		rq->cmd_flags |= REQ_ELVPRIV;
++	if (a < 65280) {
++		*(__be16 *)adata = cpu_to_be16(a);
++		len = 2;
++	} else  {
++		*(__be16 *)adata = cpu_to_be16(0xfffe);
++		*(__be32 *)&adata[2] = cpu_to_be32(a);
++		len = 6;
 +	}
 +
-+	return rq;
-+}
-+
-+/*
-+ * ioc_batching returns true if the ioc is a valid batching request and
-+ * should be given priority access to a request.
-+ */
-+static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
-+{
-+	if (!ioc)
-+		return 0;
-+
-+	/*
-+	 * Make sure the process is able to allocate at least 1 request
-+	 * even if the batch times out, otherwise we could theoretically
-+	 * lose wakeups.
-+	 */
-+	return ioc->nr_batch_requests == q->nr_batching ||
-+		(ioc->nr_batch_requests > 0
-+		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
-+}
-+
-+/*
-+ * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
-+ * will cause the process to be a "batcher" on all queues in the system. This
-+ * is the behaviour we want though - once it gets a wakeup it should be given
-+ * a nice run.
-+ */
-+static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
-+{
-+	if (!ioc || ioc_batching(q, ioc))
-+		return;
-+
-+	ioc->nr_batch_requests = q->nr_batching;
-+	ioc->last_waited = jiffies;
++	return len;
 +}
 +
-+static void __freed_request(struct request_queue *q, int rw)
++static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n,
++		       struct crypto_ccm_req_priv_ctx *pctx)
 +{
-+	struct request_list *rl = &q->rq;
-+
-+	if (rl->count[rw] < queue_congestion_off_threshold(q))
-+		blk_clear_queue_congested(q, rw);
++	unsigned int bs = 16;
++	u8 *odata = pctx->odata;
++	u8 *idata = pctx->idata;
++	int datalen, getlen;
 +
-+	if (rl->count[rw] + 1 <= q->nr_requests) {
-+		if (waitqueue_active(&rl->wait[rw]))
-+			wake_up(&rl->wait[rw]);
++	datalen = n;
 +
-+		blk_clear_queue_full(q, rw);
++	/* first time in here, block may be partially filled. */
++	getlen = bs - pctx->ilen;
++	if (datalen >= getlen) {
++		memcpy(idata + pctx->ilen, data, getlen);
++		crypto_xor(odata, idata, bs);
++		crypto_cipher_encrypt_one(tfm, odata, odata);
++		datalen -= getlen;
++		data += getlen;
++		pctx->ilen = 0;
 +	}
-+}
-+
-+/*
-+ * A request has just been released.  Account for it, update the full and
-+ * congestion status, wake up any waiters.   Called under q->queue_lock.
-+ */
-+static void freed_request(struct request_queue *q, int rw, int priv)
-+{
-+	struct request_list *rl = &q->rq;
-+
-+	rl->count[rw]--;
-+	if (priv)
-+		rl->elvpriv--;
 +
-+	__freed_request(q, rw);
-+
-+	if (unlikely(rl->starved[rw ^ 1]))
-+		__freed_request(q, rw ^ 1);
-+}
-+
-+#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
-+/*
-+ * Get a free request, queue_lock must be held.
-+ * Returns NULL on failure, with queue_lock held.
-+ * Returns !NULL on success, with queue_lock *not held*.
-+ */
-+static struct request *get_request(struct request_queue *q, int rw_flags,
-+				   struct bio *bio, gfp_t gfp_mask)
-+{
-+	struct request *rq = NULL;
-+	struct request_list *rl = &q->rq;
-+	struct io_context *ioc = NULL;
-+	const int rw = rw_flags & 0x01;
-+	int may_queue, priv;
-+
-+	may_queue = elv_may_queue(q, rw_flags);
-+	if (may_queue == ELV_MQUEUE_NO)
-+		goto rq_starved;
++	/* now encrypt rest of data */
++	while (datalen >= bs) {
++		crypto_xor(odata, data, bs);
++		crypto_cipher_encrypt_one(tfm, odata, odata);
 +
-+	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
-+		if (rl->count[rw]+1 >= q->nr_requests) {
-+			ioc = current_io_context(GFP_ATOMIC, q->node);
-+			/*
-+			 * The queue will fill after this allocation, so set
-+			 * it as full, and mark this process as "batching".
-+			 * This process will be allowed to complete a batch of
-+			 * requests, others will be blocked.
-+			 */
-+			if (!blk_queue_full(q, rw)) {
-+				ioc_set_batching(q, ioc);
-+				blk_set_queue_full(q, rw);
-+			} else {
-+				if (may_queue != ELV_MQUEUE_MUST
-+						&& !ioc_batching(q, ioc)) {
-+					/*
-+					 * The queue is full and the allocating
-+					 * process is not a "batcher", and not
-+					 * exempted by the IO scheduler
-+					 */
-+					goto out;
-+				}
-+			}
-+		}
-+		blk_set_queue_congested(q, rw);
++		datalen -= bs;
++		data += bs;
 +	}
 +
-+	/*
-+	 * Only allow batching queuers to allocate up to 50% over the defined
-+	 * limit of requests, otherwise we could have thousands of requests
-+	 * allocated with any setting of ->nr_requests
++	/* check and see if there's leftover data that wasn't
++	 * enough to fill a block.
 +	 */
-+	if (rl->count[rw] >= (3 * q->nr_requests / 2))
-+		goto out;
-+
-+	rl->count[rw]++;
-+	rl->starved[rw] = 0;
-+
-+	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-+	if (priv)
-+		rl->elvpriv++;
-+
-+	spin_unlock_irq(q->queue_lock);
-+
-+	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
-+	if (unlikely(!rq)) {
-+		/*
-+		 * Allocation failed presumably due to memory. Undo anything
-+		 * we might have messed up.
-+		 *
-+		 * Allocating task should really be put onto the front of the
-+		 * wait queue, but this is pretty rare.
-+		 */
-+		spin_lock_irq(q->queue_lock);
-+		freed_request(q, rw, priv);
-+
-+		/*
-+		 * in the very unlikely event that allocation failed and no
-+		 * requests for this direction was pending, mark us starved
-+		 * so that freeing of a request in the other direction will
-+		 * notice us. another possible fix would be to split the
-+		 * rq mempool into READ and WRITE
-+		 */
-+rq_starved:
-+		if (unlikely(rl->count[rw] == 0))
-+			rl->starved[rw] = 1;
-+
-+		goto out;
++	if (datalen) {
++		memcpy(idata + pctx->ilen, data, datalen);
++		pctx->ilen += datalen;
 +	}
-+
-+	/*
-+	 * ioc may be NULL here, and ioc_batching will be false. That's
-+	 * OK, if the queue is under the request limit then requests need
-+	 * not count toward the nr_batch_requests limit. There will always
-+	 * be some limit enforced by BLK_BATCH_TIME.
-+	 */
-+	if (ioc_batching(q, ioc))
-+		ioc->nr_batch_requests--;
-+	
-+	rq_init(q, rq);
-+
-+	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
-+out:
-+	return rq;
 +}
 +
-+/*
-+ * No available requests for this queue, unplug the device and wait for some
-+ * requests to become available.
-+ *
-+ * Called with q->queue_lock held, and returns with it unlocked.
-+ */
-+static struct request *get_request_wait(struct request_queue *q, int rw_flags,
-+					struct bio *bio)
++static void get_data_to_compute(struct crypto_cipher *tfm,
++			       struct crypto_ccm_req_priv_ctx *pctx,
++			       struct scatterlist *sg, unsigned int len)
 +{
-+	const int rw = rw_flags & 0x01;
-+	struct request *rq;
-+
-+	rq = get_request(q, rw_flags, bio, GFP_NOIO);
-+	while (!rq) {
-+		DEFINE_WAIT(wait);
-+		struct request_list *rl = &q->rq;
-+
-+		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
-+				TASK_UNINTERRUPTIBLE);
-+
-+		rq = get_request(q, rw_flags, bio, GFP_NOIO);
-+
-+		if (!rq) {
-+			struct io_context *ioc;
-+
-+			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
-+
-+			__generic_unplug_device(q);
-+			spin_unlock_irq(q->queue_lock);
-+			io_schedule();
++	struct scatter_walk walk;
++	u8 *data_src;
++	int n;
 +
-+			/*
-+			 * After sleeping, we become a "batching" process and
-+			 * will be able to allocate at least one request, and
-+			 * up to a big batch of them for a small period time.
-+			 * See ioc_batching, ioc_set_batching
-+			 */
-+			ioc = current_io_context(GFP_NOIO, q->node);
-+			ioc_set_batching(q, ioc);
++	scatterwalk_start(&walk, sg);
 +
-+			spin_lock_irq(q->queue_lock);
++	while (len) {
++		n = scatterwalk_clamp(&walk, len);
++		if (!n) {
++			scatterwalk_start(&walk, sg_next(walk.sg));
++			n = scatterwalk_clamp(&walk, len);
 +		}
-+		finish_wait(&rl->wait[rw], &wait);
-+	}
-+
-+	return rq;
-+}
-+
-+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
-+{
-+	struct request *rq;
-+
-+	BUG_ON(rw != READ && rw != WRITE);
-+
-+	spin_lock_irq(q->queue_lock);
-+	if (gfp_mask & __GFP_WAIT) {
-+		rq = get_request_wait(q, rw, NULL);
-+	} else {
-+		rq = get_request(q, rw, NULL, gfp_mask);
-+		if (!rq)
-+			spin_unlock_irq(q->queue_lock);
-+	}
-+	/* q->queue_lock is unlocked at this point */
-+
-+	return rq;
-+}
-+EXPORT_SYMBOL(blk_get_request);
-+
-+/**
-+ * blk_start_queueing - initiate dispatch of requests to device
-+ * @q:		request queue to kick into gear
-+ *
-+ * This is basically a helper to remove the need to know whether a queue
-+ * is plugged or not if someone just wants to initiate dispatch of requests
-+ * for this queue.
-+ *
-+ * The queue lock must be held with interrupts disabled.
-+ */
-+void blk_start_queueing(struct request_queue *q)
-+{
-+	if (!blk_queue_plugged(q))
-+		q->request_fn(q);
-+	else
-+		__generic_unplug_device(q);
-+}
-+EXPORT_SYMBOL(blk_start_queueing);
-+
-+/**
-+ * blk_requeue_request - put a request back on queue
-+ * @q:		request queue where request should be inserted
-+ * @rq:		request to be inserted
-+ *
-+ * Description:
-+ *    Drivers often keep queueing requests until the hardware cannot accept
-+ *    more, when that condition happens we need to put the request back
-+ *    on the queue. Must be called with queue lock held.
-+ */
-+void blk_requeue_request(struct request_queue *q, struct request *rq)
-+{
-+	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
-+
-+	if (blk_rq_tagged(rq))
-+		blk_queue_end_tag(q, rq);
-+
-+	elv_requeue_request(q, rq);
-+}
-+
-+EXPORT_SYMBOL(blk_requeue_request);
-+
-+/**
-+ * blk_insert_request - insert a special request in to a request queue
-+ * @q:		request queue where request should be inserted
-+ * @rq:		request to be inserted
-+ * @at_head:	insert request at head or tail of queue
-+ * @data:	private data
-+ *
-+ * Description:
-+ *    Many block devices need to execute commands asynchronously, so they don't
-+ *    block the whole kernel from preemption during request execution.  This is
-+ *    accomplished normally by inserting aritficial requests tagged as
-+ *    REQ_SPECIAL in to the corresponding request queue, and letting them be
-+ *    scheduled for actual execution by the request queue.
-+ *
-+ *    We have the option of inserting the head or the tail of the queue.
-+ *    Typically we use the tail for new ioctls and so forth.  We use the head
-+ *    of the queue for things like a QUEUE_FULL message from a device, or a
-+ *    host that is unable to accept a particular command.
-+ */
-+void blk_insert_request(struct request_queue *q, struct request *rq,
-+			int at_head, void *data)
-+{
-+	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
-+	unsigned long flags;
-+
-+	/*
-+	 * tell I/O scheduler that this isn't a regular read/write (ie it
-+	 * must not attempt merges on this) and that it acts as a soft
-+	 * barrier
-+	 */
-+	rq->cmd_type = REQ_TYPE_SPECIAL;
-+	rq->cmd_flags |= REQ_SOFTBARRIER;
-+
-+	rq->special = data;
-+
-+	spin_lock_irqsave(q->queue_lock, flags);
-+
-+	/*
-+	 * If command is tagged, release the tag
-+	 */
-+	if (blk_rq_tagged(rq))
-+		blk_queue_end_tag(q, rq);
-+
-+	drive_stat_acct(rq, 1);
-+	__elv_add_request(q, rq, where, 0);
-+	blk_start_queueing(q);
-+	spin_unlock_irqrestore(q->queue_lock, flags);
-+}
-+
-+EXPORT_SYMBOL(blk_insert_request);
-+
-+/*
-+ * add-request adds a request to the linked list.
-+ * queue lock is held and interrupts disabled, as we muck with the
-+ * request queue list.
-+ */
-+static inline void add_request(struct request_queue * q, struct request * req)
-+{
-+	drive_stat_acct(req, 1);
-+
-+	/*
-+	 * elevator indicated where it wants this request to be
-+	 * inserted at elevator_merge time
-+	 */
-+	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
-+}
-+ 
-+/*
-+ * disk_round_stats()	- Round off the performance stats on a struct
-+ * disk_stats.
-+ *
-+ * The average IO queue length and utilisation statistics are maintained
-+ * by observing the current state of the queue length and the amount of
-+ * time it has been in this state for.
-+ *
-+ * Normally, that accounting is done on IO completion, but that can result
-+ * in more than a second's worth of IO being accounted for within any one
-+ * second, leading to >100% utilisation.  To deal with that, we call this
-+ * function to do a round-off before returning the results when reading
-+ * /proc/diskstats.  This accounts immediately for all queue usage up to
-+ * the current jiffies and restarts the counters again.
-+ */
-+void disk_round_stats(struct gendisk *disk)
-+{
-+	unsigned long now = jiffies;
-+
-+	if (now == disk->stamp)
-+		return;
-+
-+	if (disk->in_flight) {
-+		__disk_stat_add(disk, time_in_queue,
-+				disk->in_flight * (now - disk->stamp));
-+		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
-+	}
-+	disk->stamp = now;
-+}
-+
-+EXPORT_SYMBOL_GPL(disk_round_stats);
-+
-+/*
-+ * queue lock must be held
-+ */
-+void __blk_put_request(struct request_queue *q, struct request *req)
-+{
-+	if (unlikely(!q))
-+		return;
-+	if (unlikely(--req->ref_count))
-+		return;
-+
-+	elv_completed_request(q, req);
-+
-+	/*
-+	 * Request may not have originated from ll_rw_blk. if not,
-+	 * it didn't come out of our reserved rq pools
-+	 */
-+	if (req->cmd_flags & REQ_ALLOCED) {
-+		int rw = rq_data_dir(req);
-+		int priv = req->cmd_flags & REQ_ELVPRIV;
++		data_src = scatterwalk_map(&walk, 0);
 +
-+		BUG_ON(!list_empty(&req->queuelist));
-+		BUG_ON(!hlist_unhashed(&req->hash));
++		compute_mac(tfm, data_src, n, pctx);
++		len -= n;
 +
-+		blk_free_request(q, req);
-+		freed_request(q, rw, priv);
++		scatterwalk_unmap(data_src, 0);
++		scatterwalk_advance(&walk, n);
++		scatterwalk_done(&walk, 0, len);
++		if (len)
++			crypto_yield(pctx->flags);
 +	}
-+}
-+
-+EXPORT_SYMBOL_GPL(__blk_put_request);
 +
-+void blk_put_request(struct request *req)
-+{
-+	unsigned long flags;
-+	struct request_queue *q = req->q;
++	/* any leftover needs padding and then encrypted */
++	if (pctx->ilen) {
++		int padlen;
++		u8 *odata = pctx->odata;
++		u8 *idata = pctx->idata;
 +
-+	/*
-+	 * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
-+	 * following if (q) test.
-+	 */
-+	if (q) {
-+		spin_lock_irqsave(q->queue_lock, flags);
-+		__blk_put_request(q, req);
-+		spin_unlock_irqrestore(q->queue_lock, flags);
++		padlen = 16 - pctx->ilen;
++		memset(idata + pctx->ilen, 0, padlen);
++		crypto_xor(odata, idata, 16);
++		crypto_cipher_encrypt_one(tfm, odata, odata);
++		pctx->ilen = 0;
 +	}
 +}
 +
-+EXPORT_SYMBOL(blk_put_request);
-+
-+void init_request_from_bio(struct request *req, struct bio *bio)
-+{
-+	req->cmd_type = REQ_TYPE_FS;
-+
-+	/*
-+	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
-+	 */
-+	if (bio_rw_ahead(bio) || bio_failfast(bio))
-+		req->cmd_flags |= REQ_FAILFAST;
-+
-+	/*
-+	 * REQ_BARRIER implies no merging, but lets make it explicit
-+	 */
-+	if (unlikely(bio_barrier(bio)))
-+		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
-+
-+	if (bio_sync(bio))
-+		req->cmd_flags |= REQ_RW_SYNC;
-+	if (bio_rw_meta(bio))
-+		req->cmd_flags |= REQ_RW_META;
-+
-+	req->errors = 0;
-+	req->hard_sector = req->sector = bio->bi_sector;
-+	req->ioprio = bio_prio(bio);
-+	req->start_time = jiffies;
-+	blk_rq_bio_prep(req->q, req, bio);
-+}
-+
-+static int __make_request(struct request_queue *q, struct bio *bio)
++static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
++			   unsigned int cryptlen)
 +{
-+	struct request *req;
-+	int el_ret, nr_sectors, barrier, err;
-+	const unsigned short prio = bio_prio(bio);
-+	const int sync = bio_sync(bio);
-+	int rw_flags;
-+
-+	nr_sectors = bio_sectors(bio);
-+
-+	/*
-+	 * low level driver can indicate that it wants pages above a
-+	 * certain limit bounced to low memory (ie for highmem, or even
-+	 * ISA dma in theory)
-+	 */
-+	blk_queue_bounce(q, &bio);
-+
-+	barrier = bio_barrier(bio);
-+	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
-+		err = -EOPNOTSUPP;
-+		goto end_io;
-+	}
-+
-+	spin_lock_irq(q->queue_lock);
-+
-+	if (unlikely(barrier) || elv_queue_empty(q))
-+		goto get_rq;
-+
-+	el_ret = elv_merge(q, &req, bio);
-+	switch (el_ret) {
-+		case ELEVATOR_BACK_MERGE:
-+			BUG_ON(!rq_mergeable(req));
-+
-+			if (!ll_back_merge_fn(q, req, bio))
-+				break;
-+
-+			blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
-+
-+			req->biotail->bi_next = bio;
-+			req->biotail = bio;
-+			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-+			req->ioprio = ioprio_best(req->ioprio, prio);
-+			drive_stat_acct(req, 0);
-+			if (!attempt_back_merge(q, req))
-+				elv_merged_request(q, req, el_ret);
-+			goto out;
-+
-+		case ELEVATOR_FRONT_MERGE:
-+			BUG_ON(!rq_mergeable(req));
-+
-+			if (!ll_front_merge_fn(q, req, bio))
-+				break;
-+
-+			blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
++	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
++	struct crypto_cipher *cipher = ctx->cipher;
++	unsigned int assoclen = req->assoclen;
++	u8 *odata = pctx->odata;
++	u8 *idata = pctx->idata;
++	int err;
 +
-+			bio->bi_next = req->bio;
-+			req->bio = bio;
++	/* format control data for input */
++	err = format_input(odata, req, cryptlen);
++	if (err)
++		goto out;
 +
-+			/*
-+			 * may not be valid. if the low level driver said
-+			 * it didn't need a bounce buffer then it better
-+			 * not touch req->buffer either...
-+			 */
-+			req->buffer = bio_data(bio);
-+			req->current_nr_sectors = bio_cur_sectors(bio);
-+			req->hard_cur_sectors = req->current_nr_sectors;
-+			req->sector = req->hard_sector = bio->bi_sector;
-+			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-+			req->ioprio = ioprio_best(req->ioprio, prio);
-+			drive_stat_acct(req, 0);
-+			if (!attempt_front_merge(q, req))
-+				elv_merged_request(q, req, el_ret);
-+			goto out;
++	/* encrypt first block to use as start in computing mac  */
++	crypto_cipher_encrypt_one(cipher, odata, odata);
 +
-+		/* ELV_NO_MERGE: elevator says don't/can't merge. */
-+		default:
-+			;
++	/* format associated data and compute into mac */
++	if (assoclen) {
++		pctx->ilen = format_adata(idata, assoclen);
++		get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
 +	}
 +
-+get_rq:
-+	/*
-+	 * This sync check and mask will be re-done in init_request_from_bio(),
-+	 * but we need to set it earlier to expose the sync flag to the
-+	 * rq allocator and io schedulers.
-+	 */
-+	rw_flags = bio_data_dir(bio);
-+	if (sync)
-+		rw_flags |= REQ_RW_SYNC;
-+
-+	/*
-+	 * Grab a free request. This is might sleep but can not fail.
-+	 * Returns with the queue unlocked.
-+	 */
-+	req = get_request_wait(q, rw_flags, bio);
-+
-+	/*
-+	 * After dropping the lock and possibly sleeping here, our request
-+	 * may now be mergeable after it had proven unmergeable (above).
-+	 * We don't worry about that case for efficiency. It won't happen
-+	 * often, and the elevators are able to handle it.
-+	 */
-+	init_request_from_bio(req, bio);
++	/* compute plaintext into mac */
++	get_data_to_compute(cipher, pctx, plain, cryptlen);
 +
-+	spin_lock_irq(q->queue_lock);
-+	if (elv_queue_empty(q))
-+		blk_plug_device(q);
-+	add_request(q, req);
 +out:
-+	if (sync)
-+		__generic_unplug_device(q);
-+
-+	spin_unlock_irq(q->queue_lock);
-+	return 0;
-+
-+end_io:
-+	bio_endio(bio, err);
-+	return 0;
-+}
-+
-+/*
-+ * If bio->bi_dev is a partition, remap the location
-+ */
-+static inline void blk_partition_remap(struct bio *bio)
-+{
-+	struct block_device *bdev = bio->bi_bdev;
-+
-+	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
-+		struct hd_struct *p = bdev->bd_part;
-+		const int rw = bio_data_dir(bio);
-+
-+		p->sectors[rw] += bio_sectors(bio);
-+		p->ios[rw]++;
-+
-+		bio->bi_sector += p->start_sect;
-+		bio->bi_bdev = bdev->bd_contains;
-+
-+		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
-+				    bdev->bd_dev, bio->bi_sector,
-+				    bio->bi_sector - p->start_sect);
-+	}
++	return err;
 +}
 +
-+static void handle_bad_sector(struct bio *bio)
++static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
 +{
-+	char b[BDEVNAME_SIZE];
-+
-+	printk(KERN_INFO "attempt to access beyond end of device\n");
-+	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
-+			bdevname(bio->bi_bdev, b),
-+			bio->bi_rw,
-+			(unsigned long long)bio->bi_sector + bio_sectors(bio),
-+			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
-+
-+	set_bit(BIO_EOF, &bio->bi_flags);
-+}
-+
-+#ifdef CONFIG_FAIL_MAKE_REQUEST
-+
-+static DECLARE_FAULT_ATTR(fail_make_request);
++	struct aead_request *req = areq->data;
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
++	u8 *odata = pctx->odata;
 +
-+static int __init setup_fail_make_request(char *str)
-+{
-+	return setup_fault_attr(&fail_make_request, str);
++	if (!err)
++		scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
++					 crypto_aead_authsize(aead), 1);
++	aead_request_complete(req, err);
 +}
-+__setup("fail_make_request=", setup_fail_make_request);
 +
-+static int should_fail_request(struct bio *bio)
++static inline int crypto_ccm_check_iv(const u8 *iv)
 +{
-+	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
-+	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
-+		return should_fail(&fail_make_request, bio->bi_size);
++	/* 2 <= L <= 8, so 1 <= L' <= 7. */
++	if (1 > iv[0] || iv[0] > 7)
++		return -EINVAL;
 +
 +	return 0;
 +}
 +
-+static int __init fail_make_request_debugfs(void)
++static int crypto_ccm_encrypt(struct aead_request *req)
 +{
-+	return init_fault_attr_dentries(&fail_make_request,
-+					"fail_make_request");
-+}
-+
-+late_initcall(fail_make_request_debugfs);
-+
-+#else /* CONFIG_FAIL_MAKE_REQUEST */
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
++	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
++	struct ablkcipher_request *abreq = &pctx->abreq;
++	struct scatterlist *dst;
++	unsigned int cryptlen = req->cryptlen;
++	u8 *odata = pctx->odata;
++	u8 *iv = req->iv;
++	int err;
 +
-+static inline int should_fail_request(struct bio *bio)
-+{
-+	return 0;
-+}
++	err = crypto_ccm_check_iv(iv);
++	if (err)
++		return err;
 +
-+#endif /* CONFIG_FAIL_MAKE_REQUEST */
++	pctx->flags = aead_request_flags(req);
 +
-+/*
-+ * Check whether this bio extends beyond the end of the device.
-+ */
-+static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
-+{
-+	sector_t maxsector;
++	err = crypto_ccm_auth(req, req->src, cryptlen);
++	if (err)
++		return err;
 +
-+	if (!nr_sectors)
-+		return 0;
++	 /* Note: rfc 3610 and NIST 800-38C require counter of
++	 * zero to encrypt auth tag.
++	 */
++	memset(iv + 15 - iv[0], 0, iv[0] + 1);
 +
-+	/* Test device or partition size, when known. */
-+	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-+	if (maxsector) {
-+		sector_t sector = bio->bi_sector;
++	sg_init_table(pctx->src, 2);
++	sg_set_buf(pctx->src, odata, 16);
++	scatterwalk_sg_chain(pctx->src, 2, req->src);
 +
-+		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
-+			/*
-+			 * This may well happen - the kernel calls bread()
-+			 * without checking the size of the device, e.g., when
-+			 * mounting a device.
-+			 */
-+			handle_bad_sector(bio);
-+			return 1;
-+		}
++	dst = pctx->src;
++	if (req->src != req->dst) {
++		sg_init_table(pctx->dst, 2);
++		sg_set_buf(pctx->dst, odata, 16);
++		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
++		dst = pctx->dst;
 +	}
 +
-+	return 0;
++	ablkcipher_request_set_tfm(abreq, ctx->ctr);
++	ablkcipher_request_set_callback(abreq, pctx->flags,
++					crypto_ccm_encrypt_done, req);
++	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
++	err = crypto_ablkcipher_encrypt(abreq);
++	if (err)
++		return err;
++
++	/* copy authtag to end of dst */
++	scatterwalk_map_and_copy(odata, req->dst, cryptlen,
++				 crypto_aead_authsize(aead), 1);
++	return err;
 +}
 +
-+/**
-+ * generic_make_request: hand a buffer to its device driver for I/O
-+ * @bio:  The bio describing the location in memory and on the device.
-+ *
-+ * generic_make_request() is used to make I/O requests of block
-+ * devices. It is passed a &struct bio, which describes the I/O that needs
-+ * to be done.
-+ *
-+ * generic_make_request() does not return any status.  The
-+ * success/failure status of the request, along with notification of
-+ * completion, is delivered asynchronously through the bio->bi_end_io
-+ * function described (one day) else where.
-+ *
-+ * The caller of generic_make_request must make sure that bi_io_vec
-+ * are set to describe the memory buffer, and that bi_dev and bi_sector are
-+ * set to describe the device address, and the
-+ * bi_end_io and optionally bi_private are set to describe how
-+ * completion notification should be signaled.
-+ *
-+ * generic_make_request and the drivers it calls may use bi_next if this
-+ * bio happens to be merged with someone else, and may change bi_dev and
-+ * bi_sector for remaps as it sees fit.  So the values of these fields
-+ * should NOT be depended on after the call to generic_make_request.
-+ */
-+static inline void __generic_make_request(struct bio *bio)
++static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
++				   int err)
 +{
-+	struct request_queue *q;
-+	sector_t old_sector;
-+	int ret, nr_sectors = bio_sectors(bio);
-+	dev_t old_dev;
-+	int err = -EIO;
++	struct aead_request *req = areq->data;
++	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	unsigned int authsize = crypto_aead_authsize(aead);
++	unsigned int cryptlen = req->cryptlen - authsize;
 +
-+	might_sleep();
++	if (!err) {
++		err = crypto_ccm_auth(req, req->dst, cryptlen);
++		if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
++			err = -EBADMSG;
++	}
++	aead_request_complete(req, err);
++}
 +
-+	if (bio_check_eod(bio, nr_sectors))
-+		goto end_io;
++static int crypto_ccm_decrypt(struct aead_request *req)
++{
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
++	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
++	struct ablkcipher_request *abreq = &pctx->abreq;
++	struct scatterlist *dst;
++	unsigned int authsize = crypto_aead_authsize(aead);
++	unsigned int cryptlen = req->cryptlen;
++	u8 *authtag = pctx->auth_tag;
++	u8 *odata = pctx->odata;
++	u8 *iv = req->iv;
++	int err;
 +
-+	/*
-+	 * Resolve the mapping until finished. (drivers are
-+	 * still free to implement/resolve their own stacking
-+	 * by explicitly returning 0)
-+	 *
-+	 * NOTE: we don't repeat the blk_size check for each new device.
-+	 * Stacking drivers are expected to know what they are doing.
-+	 */
-+	old_sector = -1;
-+	old_dev = 0;
-+	do {
-+		char b[BDEVNAME_SIZE];
++	if (cryptlen < authsize)
++		return -EINVAL;
++	cryptlen -= authsize;
 +
-+		q = bdev_get_queue(bio->bi_bdev);
-+		if (!q) {
-+			printk(KERN_ERR
-+			       "generic_make_request: Trying to access "
-+				"nonexistent block-device %s (%Lu)\n",
-+				bdevname(bio->bi_bdev, b),
-+				(long long) bio->bi_sector);
-+end_io:
-+			bio_endio(bio, err);
-+			break;
-+		}
++	err = crypto_ccm_check_iv(iv);
++	if (err)
++		return err;
 +
-+		if (unlikely(nr_sectors > q->max_hw_sectors)) {
-+			printk("bio too big device %s (%u > %u)\n", 
-+				bdevname(bio->bi_bdev, b),
-+				bio_sectors(bio),
-+				q->max_hw_sectors);
-+			goto end_io;
-+		}
++	pctx->flags = aead_request_flags(req);
 +
-+		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
-+			goto end_io;
++	scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
 +
-+		if (should_fail_request(bio))
-+			goto end_io;
++	memset(iv + 15 - iv[0], 0, iv[0] + 1);
 +
-+		/*
-+		 * If this device has partitions, remap block n
-+		 * of partition p to block n+start(p) of the disk.
-+		 */
-+		blk_partition_remap(bio);
++	sg_init_table(pctx->src, 2);
++	sg_set_buf(pctx->src, authtag, 16);
++	scatterwalk_sg_chain(pctx->src, 2, req->src);
 +
-+		if (old_sector != -1)
-+			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
-+					    old_sector);
++	dst = pctx->src;
++	if (req->src != req->dst) {
++		sg_init_table(pctx->dst, 2);
++		sg_set_buf(pctx->dst, authtag, 16);
++		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
++		dst = pctx->dst;
++	}
 +
-+		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
++	ablkcipher_request_set_tfm(abreq, ctx->ctr);
++	ablkcipher_request_set_callback(abreq, pctx->flags,
++					crypto_ccm_decrypt_done, req);
++	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
++	err = crypto_ablkcipher_decrypt(abreq);
++	if (err)
++		return err;
 +
-+		old_sector = bio->bi_sector;
-+		old_dev = bio->bi_bdev->bd_dev;
++	err = crypto_ccm_auth(req, req->dst, cryptlen);
++	if (err)
++		return err;
 +
-+		if (bio_check_eod(bio, nr_sectors))
-+			goto end_io;
-+		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
-+			err = -EOPNOTSUPP;
-+			goto end_io;
-+		}
++	/* verify */
++	if (memcmp(authtag, odata, authsize))
++		return -EBADMSG;
 +
-+		ret = q->make_request_fn(q, bio);
-+	} while (ret);
++	return err;
 +}
 +
-+/*
-+ * We only want one ->make_request_fn to be active at a time,
-+ * else stack usage with stacked devices could be a problem.
-+ * So use current->bio_{list,tail} to keep a list of requests
-+ * submited by a make_request_fn function.
-+ * current->bio_tail is also used as a flag to say if
-+ * generic_make_request is currently active in this task or not.
-+ * If it is NULL, then no make_request is active.  If it is non-NULL,
-+ * then a make_request is active, and new requests should be added
-+ * at the tail
-+ */
-+void generic_make_request(struct bio *bio)
++static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
 +{
-+	if (current->bio_tail) {
-+		/* make_request is active */
-+		*(current->bio_tail) = bio;
-+		bio->bi_next = NULL;
-+		current->bio_tail = &bio->bi_next;
-+		return;
-+	}
-+	/* following loop may be a bit non-obvious, and so deserves some
-+	 * explanation.
-+	 * Before entering the loop, bio->bi_next is NULL (as all callers
-+	 * ensure that) so we have a list with a single bio.
-+	 * We pretend that we have just taken it off a longer list, so
-+	 * we assign bio_list to the next (which is NULL) and bio_tail
-+	 * to &bio_list, thus initialising the bio_list of new bios to be
-+	 * added.  __generic_make_request may indeed add some more bios
-+	 * through a recursive call to generic_make_request.  If it
-+	 * did, we find a non-NULL value in bio_list and re-enter the loop
-+	 * from the top.  In this case we really did just take the bio
-+	 * of the top of the list (no pretending) and so fixup bio_list and
-+	 * bio_tail or bi_next, and call into __generic_make_request again.
-+	 *
-+	 * The loop was structured like this to make only one call to
-+	 * __generic_make_request (which is important as it is large and
-+	 * inlined) and to keep the structure simple.
-+	 */
-+	BUG_ON(bio->bi_next);
-+	do {
-+		current->bio_list = bio->bi_next;
-+		if (bio->bi_next == NULL)
-+			current->bio_tail = &current->bio_list;
-+		else
-+			bio->bi_next = NULL;
-+		__generic_make_request(bio);
-+		bio = current->bio_list;
-+	} while (bio);
-+	current->bio_tail = NULL; /* deactivate */
-+}
++	struct crypto_instance *inst = (void *)tfm->__crt_alg;
++	struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
++	struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct crypto_cipher *cipher;
++	struct crypto_ablkcipher *ctr;
++	unsigned long align;
++	int err;
 +
-+EXPORT_SYMBOL(generic_make_request);
++	cipher = crypto_spawn_cipher(&ictx->cipher);
++	if (IS_ERR(cipher))
++		return PTR_ERR(cipher);
 +
-+/**
-+ * submit_bio: submit a bio to the block device layer for I/O
-+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
-+ * @bio: The &struct bio which describes the I/O
-+ *
-+ * submit_bio() is very similar in purpose to generic_make_request(), and
-+ * uses that function to do most of the work. Both are fairly rough
-+ * interfaces, @bio must be presetup and ready for I/O.
-+ *
-+ */
-+void submit_bio(int rw, struct bio *bio)
-+{
-+	int count = bio_sectors(bio);
++	ctr = crypto_spawn_skcipher(&ictx->ctr);
++	err = PTR_ERR(ctr);
++	if (IS_ERR(ctr))
++		goto err_free_cipher;
 +
-+	bio->bi_rw |= rw;
++	ctx->cipher = cipher;
++	ctx->ctr = ctr;
 +
-+	/*
-+	 * If it's a regular read/write or a barrier with data attached,
-+	 * go through the normal accounting stuff before submission.
-+	 */
-+	if (!bio_empty_barrier(bio)) {
++	align = crypto_tfm_alg_alignmask(tfm);
++	align &= ~(crypto_tfm_ctx_alignment() - 1);
++	tfm->crt_aead.reqsize = align +
++				sizeof(struct crypto_ccm_req_priv_ctx) +
++				crypto_ablkcipher_reqsize(ctr);
 +
-+		BIO_BUG_ON(!bio->bi_size);
-+		BIO_BUG_ON(!bio->bi_io_vec);
++	return 0;
 +
-+		if (rw & WRITE) {
-+			count_vm_events(PGPGOUT, count);
-+		} else {
-+			task_io_account_read(bio->bi_size);
-+			count_vm_events(PGPGIN, count);
-+		}
++err_free_cipher:
++	crypto_free_cipher(cipher);
++	return err;
++}
 +
-+		if (unlikely(block_dump)) {
-+			char b[BDEVNAME_SIZE];
-+			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
-+			current->comm, task_pid_nr(current),
-+				(rw & WRITE) ? "WRITE" : "READ",
-+				(unsigned long long)bio->bi_sector,
-+				bdevname(bio->bi_bdev,b));
-+		}
-+	}
++static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
++{
++	struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+	generic_make_request(bio);
++	crypto_free_cipher(ctx->cipher);
++	crypto_free_ablkcipher(ctx->ctr);
 +}
 +
-+EXPORT_SYMBOL(submit_bio);
-+
-+/**
-+ * __end_that_request_first - end I/O on a request
-+ * @req:      the request being processed
-+ * @error:    0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete
-+ *
-+ * Description:
-+ *     Ends I/O on a number of bytes attached to @req, and sets it up
-+ *     for the next range of segments (if any) in the cluster.
-+ *
-+ * Return:
-+ *     0 - we are done with this request, call end_that_request_last()
-+ *     1 - still buffers pending for this request
-+ **/
-+static int __end_that_request_first(struct request *req, int error,
-+				    int nr_bytes)
++static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
++						       const char *full_name,
++						       const char *ctr_name,
++						       const char *cipher_name)
 +{
-+	int total_bytes, bio_nbytes, next_idx = 0;
-+	struct bio *bio;
-+
-+	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
++	struct crypto_attr_type *algt;
++	struct crypto_instance *inst;
++	struct crypto_alg *ctr;
++	struct crypto_alg *cipher;
++	struct ccm_instance_ctx *ictx;
++	int err;
 +
-+	/*
-+	 * for a REQ_BLOCK_PC request, we want to carry any eventual
-+	 * sense key with us all the way through
-+	 */
-+	if (!blk_pc_request(req))
-+		req->errors = 0;
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
++		return ERR_PTR(err);
 +
-+	if (error) {
-+		if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
-+			printk("end_request: I/O error, dev %s, sector %llu\n",
-+				req->rq_disk ? req->rq_disk->disk_name : "?",
-+				(unsigned long long)req->sector);
-+	}
++	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
++		return ERR_PTR(-EINVAL);
 +
-+	if (blk_fs_request(req) && req->rq_disk) {
-+		const int rw = rq_data_dir(req);
++	cipher = crypto_alg_mod_lookup(cipher_name,  CRYPTO_ALG_TYPE_CIPHER,
++				       CRYPTO_ALG_TYPE_MASK);
++	err = PTR_ERR(cipher);
++	if (IS_ERR(cipher))
++		return ERR_PTR(err);
 +
-+		disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
-+	}
++	err = -EINVAL;
++	if (cipher->cra_blocksize != 16)
++		goto out_put_cipher;
 +
-+	total_bytes = bio_nbytes = 0;
-+	while ((bio = req->bio) != NULL) {
-+		int nbytes;
++	inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
++	err = -ENOMEM;
++	if (!inst)
++		goto out_put_cipher;
 +
-+		/*
-+		 * For an empty barrier request, the low level driver must
-+		 * store a potential error location in ->sector. We pass
-+		 * that back up in ->bi_sector.
-+		 */
-+		if (blk_empty_barrier(req))
-+			bio->bi_sector = req->sector;
++	ictx = crypto_instance_ctx(inst);
 +
-+		if (nr_bytes >= bio->bi_size) {
-+			req->bio = bio->bi_next;
-+			nbytes = bio->bi_size;
-+			req_bio_endio(req, bio, nbytes, error);
-+			next_idx = 0;
-+			bio_nbytes = 0;
-+		} else {
-+			int idx = bio->bi_idx + next_idx;
++	err = crypto_init_spawn(&ictx->cipher, cipher, inst,
++				CRYPTO_ALG_TYPE_MASK);
++	if (err)
++		goto err_free_inst;
 +
-+			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
-+				blk_dump_rq_flags(req, "__end_that");
-+				printk("%s: bio idx %d >= vcnt %d\n",
-+						__FUNCTION__,
-+						bio->bi_idx, bio->bi_vcnt);
-+				break;
-+			}
++	crypto_set_skcipher_spawn(&ictx->ctr, inst);
++	err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
++				   crypto_requires_sync(algt->type,
++							algt->mask));
++	if (err)
++		goto err_drop_cipher;
 +
-+			nbytes = bio_iovec_idx(bio, idx)->bv_len;
-+			BIO_BUG_ON(nbytes > bio->bi_size);
++	ctr = crypto_skcipher_spawn_alg(&ictx->ctr);
 +
-+			/*
-+			 * not a complete bvec done
-+			 */
-+			if (unlikely(nbytes > nr_bytes)) {
-+				bio_nbytes += nr_bytes;
-+				total_bytes += nr_bytes;
-+				break;
-+			}
++	/* Not a stream cipher? */
++	err = -EINVAL;
++	if (ctr->cra_blocksize != 1)
++		goto err_drop_ctr;
 +
-+			/*
-+			 * advance to the next vector
-+			 */
-+			next_idx++;
-+			bio_nbytes += nbytes;
-+		}
++	/* We want the real thing! */
++	if (ctr->cra_ablkcipher.ivsize != 16)
++		goto err_drop_ctr;
 +
-+		total_bytes += nbytes;
-+		nr_bytes -= nbytes;
++	err = -ENAMETOOLONG;
++	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
++		     "ccm_base(%s,%s)", ctr->cra_driver_name,
++		     cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
++		goto err_drop_ctr;
 +
-+		if ((bio = req->bio)) {
-+			/*
-+			 * end more in this run, or just return 'not-done'
-+			 */
-+			if (unlikely(nr_bytes <= 0))
-+				break;
-+		}
-+	}
++	memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
 +
-+	/*
-+	 * completely done
-+	 */
-+	if (!req->bio)
-+		return 0;
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
++	inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
++	inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
++	inst->alg.cra_blocksize = 1;
++	inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
++				  (__alignof__(u32) - 1);
++	inst->alg.cra_type = &crypto_aead_type;
++	inst->alg.cra_aead.ivsize = 16;
++	inst->alg.cra_aead.maxauthsize = 16;
++	inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
++	inst->alg.cra_init = crypto_ccm_init_tfm;
++	inst->alg.cra_exit = crypto_ccm_exit_tfm;
++	inst->alg.cra_aead.setkey = crypto_ccm_setkey;
++	inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
++	inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
++	inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
 +
-+	/*
-+	 * if the request wasn't completed, update state
-+	 */
-+	if (bio_nbytes) {
-+		req_bio_endio(req, bio, bio_nbytes, error);
-+		bio->bi_idx += next_idx;
-+		bio_iovec(bio)->bv_offset += nr_bytes;
-+		bio_iovec(bio)->bv_len -= nr_bytes;
-+	}
++out:
++	crypto_mod_put(cipher);
++	return inst;
 +
-+	blk_recalc_rq_sectors(req, total_bytes >> 9);
-+	blk_recalc_rq_segments(req);
-+	return 1;
++err_drop_ctr:
++	crypto_drop_skcipher(&ictx->ctr);
++err_drop_cipher:
++	crypto_drop_spawn(&ictx->cipher);
++err_free_inst:
++	kfree(inst);
++out_put_cipher:
++	inst = ERR_PTR(err);
++	goto out;
 +}
 +
-+/*
-+ * splice the completion data to a local structure and hand off to
-+ * process_completion_queue() to complete the requests
-+ */
-+static void blk_done_softirq(struct softirq_action *h)
++static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
 +{
-+	struct list_head *cpu_list, local_list;
++	int err;
++	const char *cipher_name;
++	char ctr_name[CRYPTO_MAX_ALG_NAME];
++	char full_name[CRYPTO_MAX_ALG_NAME];
 +
-+	local_irq_disable();
-+	cpu_list = &__get_cpu_var(blk_cpu_done);
-+	list_replace_init(cpu_list, &local_list);
-+	local_irq_enable();
++	cipher_name = crypto_attr_alg_name(tb[1]);
++	err = PTR_ERR(cipher_name);
++	if (IS_ERR(cipher_name))
++		return ERR_PTR(err);
 +
-+	while (!list_empty(&local_list)) {
-+		struct request *rq = list_entry(local_list.next, struct request, donelist);
++	if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
++		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
++		return ERR_PTR(-ENAMETOOLONG);
 +
-+		list_del_init(&rq->donelist);
-+		rq->q->softirq_done_fn(rq);
-+	}
++	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
++	    CRYPTO_MAX_ALG_NAME)
++		return ERR_PTR(-ENAMETOOLONG);
++
++	return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
 +}
 +
-+static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
-+			  void *hcpu)
++static void crypto_ccm_free(struct crypto_instance *inst)
 +{
-+	/*
-+	 * If a CPU goes away, splice its entries to the current CPU
-+	 * and trigger a run of the softirq
-+	 */
-+	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-+		int cpu = (unsigned long) hcpu;
-+
-+		local_irq_disable();
-+		list_splice_init(&per_cpu(blk_cpu_done, cpu),
-+				 &__get_cpu_var(blk_cpu_done));
-+		raise_softirq_irqoff(BLOCK_SOFTIRQ);
-+		local_irq_enable();
-+	}
++	struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
 +
-+	return NOTIFY_OK;
++	crypto_drop_spawn(&ctx->cipher);
++	crypto_drop_skcipher(&ctx->ctr);
++	kfree(inst);
 +}
 +
-+
-+static struct notifier_block blk_cpu_notifier __cpuinitdata = {
-+	.notifier_call	= blk_cpu_notify,
++static struct crypto_template crypto_ccm_tmpl = {
++	.name = "ccm",
++	.alloc = crypto_ccm_alloc,
++	.free = crypto_ccm_free,
++	.module = THIS_MODULE,
 +};
 +
-+/**
-+ * blk_complete_request - end I/O on a request
-+ * @req:      the request being processed
-+ *
-+ * Description:
-+ *     Ends all I/O on a request. It does not handle partial completions,
-+ *     unless the driver actually implements this in its completion callback
-+ *     through requeueing. The actual completion happens out-of-order,
-+ *     through a softirq handler. The user must have registered a completion
-+ *     callback through blk_queue_softirq_done().
-+ **/
-+
-+void blk_complete_request(struct request *req)
++static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
 +{
-+	struct list_head *cpu_list;
-+	unsigned long flags;
++	int err;
++	const char *ctr_name;
++	const char *cipher_name;
++	char full_name[CRYPTO_MAX_ALG_NAME];
 +
-+	BUG_ON(!req->q->softirq_done_fn);
-+		
-+	local_irq_save(flags);
++	ctr_name = crypto_attr_alg_name(tb[1]);
++	err = PTR_ERR(ctr_name);
++	if (IS_ERR(ctr_name))
++		return ERR_PTR(err);
 +
-+	cpu_list = &__get_cpu_var(blk_cpu_done);
-+	list_add_tail(&req->donelist, cpu_list);
-+	raise_softirq_irqoff(BLOCK_SOFTIRQ);
++	cipher_name = crypto_attr_alg_name(tb[2]);
++	err = PTR_ERR(cipher_name);
++	if (IS_ERR(cipher_name))
++		return ERR_PTR(err);
 +
-+	local_irq_restore(flags);
++	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
++		     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
++		return ERR_PTR(-ENAMETOOLONG);
++
++	return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
 +}
 +
-+EXPORT_SYMBOL(blk_complete_request);
-+	
-+/*
-+ * queue lock must be held
-+ */
-+static void end_that_request_last(struct request *req, int error)
-+{
-+	struct gendisk *disk = req->rq_disk;
++static struct crypto_template crypto_ccm_base_tmpl = {
++	.name = "ccm_base",
++	.alloc = crypto_ccm_base_alloc,
++	.free = crypto_ccm_free,
++	.module = THIS_MODULE,
++};
 +
-+	if (blk_rq_tagged(req))
-+		blk_queue_end_tag(req->q, req);
++static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
++				 unsigned int keylen)
++{
++	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
++	struct crypto_aead *child = ctx->child;
++	int err;
 +
-+	if (blk_queued_rq(req))
-+		blkdev_dequeue_request(req);
++	if (keylen < 3)
++		return -EINVAL;
 +
-+	if (unlikely(laptop_mode) && blk_fs_request(req))
-+		laptop_io_completion();
++	keylen -= 3;
++	memcpy(ctx->nonce, key + keylen, 3);
 +
-+	/*
-+	 * Account IO completion.  bar_rq isn't accounted as a normal
-+	 * IO on queueing nor completion.  Accounting the containing
-+	 * request is enough.
-+	 */
-+	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
-+		unsigned long duration = jiffies - req->start_time;
-+		const int rw = rq_data_dir(req);
++	crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
++	crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
++				     CRYPTO_TFM_REQ_MASK);
++	err = crypto_aead_setkey(child, key, keylen);
++	crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
++				      CRYPTO_TFM_RES_MASK);
 +
-+		__disk_stat_inc(disk, ios[rw]);
-+		__disk_stat_add(disk, ticks[rw], duration);
-+		disk_round_stats(disk);
-+		disk->in_flight--;
-+	}
++	return err;
++}
 +
-+	if (req->end_io)
-+		req->end_io(req, error);
-+	else {
-+		if (blk_bidi_rq(req))
-+			__blk_put_request(req->next_rq->q, req->next_rq);
++static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
++				      unsigned int authsize)
++{
++	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
 +
-+		__blk_put_request(req->q, req);
++	switch (authsize) {
++	case 8:
++	case 12:
++	case 16:
++		break;
++	default:
++		return -EINVAL;
 +	}
++
++	return crypto_aead_setauthsize(ctx->child, authsize);
 +}
 +
-+static inline void __end_request(struct request *rq, int uptodate,
-+				 unsigned int nr_bytes)
++static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
 +{
-+	int error = 0;
++	struct aead_request *subreq = aead_request_ctx(req);
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
++	struct crypto_aead *child = ctx->child;
++	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
++			   crypto_aead_alignmask(child) + 1);
 +
-+	if (uptodate <= 0)
-+		error = uptodate ? uptodate : -EIO;
++	/* L' */
++	iv[0] = 3;
 +
-+	__blk_end_request(rq, error, nr_bytes);
-+}
++	memcpy(iv + 1, ctx->nonce, 3);
++	memcpy(iv + 4, req->iv, 8);
 +
-+/**
-+ * blk_rq_bytes - Returns bytes left to complete in the entire request
-+ **/
-+unsigned int blk_rq_bytes(struct request *rq)
-+{
-+	if (blk_fs_request(rq))
-+		return rq->hard_nr_sectors << 9;
++	aead_request_set_tfm(subreq, child);
++	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
++				  req->base.data);
++	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
++	aead_request_set_assoc(subreq, req->assoc, req->assoclen);
 +
-+	return rq->data_len;
++	return subreq;
 +}
-+EXPORT_SYMBOL_GPL(blk_rq_bytes);
 +
-+/**
-+ * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
-+ **/
-+unsigned int blk_rq_cur_bytes(struct request *rq)
++static int crypto_rfc4309_encrypt(struct aead_request *req)
 +{
-+	if (blk_fs_request(rq))
-+		return rq->current_nr_sectors << 9;
-+
-+	if (rq->bio)
-+		return rq->bio->bi_size;
++	req = crypto_rfc4309_crypt(req);
 +
-+	return rq->data_len;
++	return crypto_aead_encrypt(req);
 +}
-+EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 +
-+/**
-+ * end_queued_request - end all I/O on a queued request
-+ * @rq:		the request being processed
-+ * @uptodate:	error value or 0/1 uptodate flag
-+ *
-+ * Description:
-+ *     Ends all I/O on a request, and removes it from the block layer queues.
-+ *     Not suitable for normal IO completion, unless the driver still has
-+ *     the request attached to the block layer.
-+ *
-+ **/
-+void end_queued_request(struct request *rq, int uptodate)
++static int crypto_rfc4309_decrypt(struct aead_request *req)
 +{
-+	__end_request(rq, uptodate, blk_rq_bytes(rq));
++	req = crypto_rfc4309_crypt(req);
++
++	return crypto_aead_decrypt(req);
 +}
-+EXPORT_SYMBOL(end_queued_request);
 +
-+/**
-+ * end_dequeued_request - end all I/O on a dequeued request
-+ * @rq:		the request being processed
-+ * @uptodate:	error value or 0/1 uptodate flag
-+ *
-+ * Description:
-+ *     Ends all I/O on a request. The request must already have been
-+ *     dequeued using blkdev_dequeue_request(), as is normally the case
-+ *     for most drivers.
-+ *
-+ **/
-+void end_dequeued_request(struct request *rq, int uptodate)
++static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
 +{
-+	__end_request(rq, uptodate, blk_rq_bytes(rq));
-+}
-+EXPORT_SYMBOL(end_dequeued_request);
++	struct crypto_instance *inst = (void *)tfm->__crt_alg;
++	struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
++	struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct crypto_aead *aead;
++	unsigned long align;
 +
++	aead = crypto_spawn_aead(spawn);
++	if (IS_ERR(aead))
++		return PTR_ERR(aead);
 +
-+/**
-+ * end_request - end I/O on the current segment of the request
-+ * @req:	the request being processed
-+ * @uptodate:	error value or 0/1 uptodate flag
-+ *
-+ * Description:
-+ *     Ends I/O on the current segment of a request. If that is the only
-+ *     remaining segment, the request is also completed and freed.
-+ *
-+ *     This is a remnant of how older block drivers handled IO completions.
-+ *     Modern drivers typically end IO on the full request in one go, unless
-+ *     they have a residual value to account for. For that case this function
-+ *     isn't really useful, unless the residual just happens to be the
-+ *     full current segment. In other words, don't use this function in new
-+ *     code. Either use end_request_completely(), or the
-+ *     end_that_request_chunk() (along with end_that_request_last()) for
-+ *     partial completions.
-+ *
-+ **/
-+void end_request(struct request *req, int uptodate)
++	ctx->child = aead;
++
++	align = crypto_aead_alignmask(aead);
++	align &= ~(crypto_tfm_ctx_alignment() - 1);
++	tfm->crt_aead.reqsize = sizeof(struct aead_request) +
++				ALIGN(crypto_aead_reqsize(aead),
++				      crypto_tfm_ctx_alignment()) +
++				align + 16;
++
++	return 0;
++}
++
++static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
 +{
-+	__end_request(req, uptodate, req->hard_cur_sectors << 9);
++	struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	crypto_free_aead(ctx->child);
 +}
-+EXPORT_SYMBOL(end_request);
 +
-+/**
-+ * blk_end_io - Generic end_io function to complete a request.
-+ * @rq:           the request being processed
-+ * @error:        0 for success, < 0 for error
-+ * @nr_bytes:     number of bytes to complete @rq
-+ * @bidi_bytes:   number of bytes to complete @rq->next_rq
-+ * @drv_callback: function called between completion of bios in the request
-+ *                and completion of the request.
-+ *                If the callback returns non 0, this helper returns without
-+ *                completion of the request.
-+ *
-+ * Description:
-+ *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
-+ *     If @rq has leftover, sets it up for the next range of segments.
-+ *
-+ * Return:
-+ *     0 - we are done with this request
-+ *     1 - this request is not freed yet, it still has pending buffers.
-+ **/
-+static int blk_end_io(struct request *rq, int error, int nr_bytes,
-+		      int bidi_bytes, int (drv_callback)(struct request *))
++static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
 +{
-+	struct request_queue *q = rq->q;
-+	unsigned long flags = 0UL;
++	struct crypto_attr_type *algt;
++	struct crypto_instance *inst;
++	struct crypto_aead_spawn *spawn;
++	struct crypto_alg *alg;
++	const char *ccm_name;
++	int err;
 +
-+	if (blk_fs_request(rq) || blk_pc_request(rq)) {
-+		if (__end_that_request_first(rq, error, nr_bytes))
-+			return 1;
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
++		return ERR_PTR(err);
 +
-+		/* Bidi request must be completed as a whole */
-+		if (blk_bidi_rq(rq) &&
-+		    __end_that_request_first(rq->next_rq, error, bidi_bytes))
-+			return 1;
-+	}
++	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
++		return ERR_PTR(-EINVAL);
 +
-+	/* Special feature for tricky drivers */
-+	if (drv_callback && drv_callback(rq))
-+		return 1;
++	ccm_name = crypto_attr_alg_name(tb[1]);
++	err = PTR_ERR(ccm_name);
++	if (IS_ERR(ccm_name))
++		return ERR_PTR(err);
 +
-+	add_disk_randomness(rq->rq_disk);
++	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
++	if (!inst)
++		return ERR_PTR(-ENOMEM);
 +
-+	spin_lock_irqsave(q->queue_lock, flags);
-+	end_that_request_last(rq, error);
-+	spin_unlock_irqrestore(q->queue_lock, flags);
++	spawn = crypto_instance_ctx(inst);
++	crypto_set_aead_spawn(spawn, inst);
++	err = crypto_grab_aead(spawn, ccm_name, 0,
++			       crypto_requires_sync(algt->type, algt->mask));
++	if (err)
++		goto out_free_inst;
 +
-+	return 0;
-+}
++	alg = crypto_aead_spawn_alg(spawn);
 +
-+/**
-+ * blk_end_request - Helper function for drivers to complete the request.
-+ * @rq:       the request being processed
-+ * @error:    0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete
-+ *
-+ * Description:
-+ *     Ends I/O on a number of bytes attached to @rq.
-+ *     If @rq has leftover, sets it up for the next range of segments.
-+ *
-+ * Return:
-+ *     0 - we are done with this request
-+ *     1 - still buffers pending for this request
-+ **/
-+int blk_end_request(struct request *rq, int error, int nr_bytes)
-+{
-+	return blk_end_io(rq, error, nr_bytes, 0, NULL);
-+}
-+EXPORT_SYMBOL_GPL(blk_end_request);
++	err = -EINVAL;
 +
-+/**
-+ * __blk_end_request - Helper function for drivers to complete the request.
-+ * @rq:       the request being processed
-+ * @error:    0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete
-+ *
-+ * Description:
-+ *     Must be called with queue lock held unlike blk_end_request().
-+ *
-+ * Return:
-+ *     0 - we are done with this request
-+ *     1 - still buffers pending for this request
-+ **/
-+int __blk_end_request(struct request *rq, int error, int nr_bytes)
-+{
-+	if (blk_fs_request(rq) || blk_pc_request(rq)) {
-+		if (__end_that_request_first(rq, error, nr_bytes))
-+			return 1;
-+	}
++	/* We only support 16-byte blocks. */
++	if (alg->cra_aead.ivsize != 16)
++		goto out_drop_alg;
 +
-+	add_disk_randomness(rq->rq_disk);
++	/* Not a stream cipher? */
++	if (alg->cra_blocksize != 1)
++		goto out_drop_alg;
 +
-+	end_that_request_last(rq, error);
++	err = -ENAMETOOLONG;
++	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
++		     "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
++	    snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
++		     "rfc4309(%s)", alg->cra_driver_name) >=
++	    CRYPTO_MAX_ALG_NAME)
++		goto out_drop_alg;
 +
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(__blk_end_request);
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
++	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
++	inst->alg.cra_priority = alg->cra_priority;
++	inst->alg.cra_blocksize = 1;
++	inst->alg.cra_alignmask = alg->cra_alignmask;
++	inst->alg.cra_type = &crypto_nivaead_type;
 +
-+/**
-+ * blk_end_bidi_request - Helper function for drivers to complete bidi request.
-+ * @rq:         the bidi request being processed
-+ * @error:      0 for success, < 0 for error
-+ * @nr_bytes:   number of bytes to complete @rq
-+ * @bidi_bytes: number of bytes to complete @rq->next_rq
-+ *
-+ * Description:
-+ *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
-+ *
-+ * Return:
-+ *     0 - we are done with this request
-+ *     1 - still buffers pending for this request
-+ **/
-+int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
-+			 int bidi_bytes)
-+{
-+	return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
-+}
-+EXPORT_SYMBOL_GPL(blk_end_bidi_request);
++	inst->alg.cra_aead.ivsize = 8;
++	inst->alg.cra_aead.maxauthsize = 16;
 +
-+/**
-+ * blk_end_request_callback - Special helper function for tricky drivers
-+ * @rq:           the request being processed
-+ * @error:        0 for success, < 0 for error
-+ * @nr_bytes:     number of bytes to complete
-+ * @drv_callback: function called between completion of bios in the request
-+ *                and completion of the request.
-+ *                If the callback returns non 0, this helper returns without
-+ *                completion of the request.
-+ *
-+ * Description:
-+ *     Ends I/O on a number of bytes attached to @rq.
-+ *     If @rq has leftover, sets it up for the next range of segments.
-+ *
-+ *     This special helper function is used only for existing tricky drivers.
-+ *     (e.g. cdrom_newpc_intr() of ide-cd)
-+ *     This interface will be removed when such drivers are rewritten.
-+ *     Don't use this interface in other places anymore.
-+ *
-+ * Return:
-+ *     0 - we are done with this request
-+ *     1 - this request is not freed yet.
-+ *         this request still has pending buffers or
-+ *         the driver doesn't want to finish this request yet.
-+ **/
-+int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
-+			     int (drv_callback)(struct request *))
-+{
-+	return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
-+}
-+EXPORT_SYMBOL_GPL(blk_end_request_callback);
++	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
 +
-+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
-+		     struct bio *bio)
-+{
-+	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
-+	rq->cmd_flags |= (bio->bi_rw & 3);
++	inst->alg.cra_init = crypto_rfc4309_init_tfm;
++	inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
 +
-+	rq->nr_phys_segments = bio_phys_segments(q, bio);
-+	rq->nr_hw_segments = bio_hw_segments(q, bio);
-+	rq->current_nr_sectors = bio_cur_sectors(bio);
-+	rq->hard_cur_sectors = rq->current_nr_sectors;
-+	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
-+	rq->buffer = bio_data(bio);
-+	rq->data_len = bio->bi_size;
++	inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
++	inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
++	inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
++	inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
 +
-+	rq->bio = rq->biotail = bio;
++	inst->alg.cra_aead.geniv = "seqiv";
 +
-+	if (bio->bi_bdev)
-+		rq->rq_disk = bio->bi_bdev->bd_disk;
-+}
++out:
++	return inst;
 +
-+int kblockd_schedule_work(struct work_struct *work)
-+{
-+	return queue_work(kblockd_workqueue, work);
++out_drop_alg:
++	crypto_drop_aead(spawn);
++out_free_inst:
++	kfree(inst);
++	inst = ERR_PTR(err);
++	goto out;
 +}
 +
-+EXPORT_SYMBOL(kblockd_schedule_work);
-+
-+void kblockd_flush_work(struct work_struct *work)
++static void crypto_rfc4309_free(struct crypto_instance *inst)
 +{
-+	cancel_work_sync(work);
++	crypto_drop_spawn(crypto_instance_ctx(inst));
++	kfree(inst);
 +}
-+EXPORT_SYMBOL(kblockd_flush_work);
 +
-+int __init blk_dev_init(void)
++static struct crypto_template crypto_rfc4309_tmpl = {
++	.name = "rfc4309",
++	.alloc = crypto_rfc4309_alloc,
++	.free = crypto_rfc4309_free,
++	.module = THIS_MODULE,
++};
++
++static int __init crypto_ccm_module_init(void)
 +{
-+	int i;
++	int err;
 +
-+	kblockd_workqueue = create_workqueue("kblockd");
-+	if (!kblockd_workqueue)
-+		panic("Failed to create kblockd\n");
++	err = crypto_register_template(&crypto_ccm_base_tmpl);
++	if (err)
++		goto out;
 +
-+	request_cachep = kmem_cache_create("blkdev_requests",
-+			sizeof(struct request), 0, SLAB_PANIC, NULL);
++	err = crypto_register_template(&crypto_ccm_tmpl);
++	if (err)
++		goto out_undo_base;
 +
-+	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
-+			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
++	err = crypto_register_template(&crypto_rfc4309_tmpl);
++	if (err)
++		goto out_undo_ccm;
 +
-+	for_each_possible_cpu(i)
-+		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
++out:
++	return err;
 +
-+	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
-+	register_hotcpu_notifier(&blk_cpu_notifier);
++out_undo_ccm:
++	crypto_unregister_template(&crypto_ccm_tmpl);
++out_undo_base:
++	crypto_unregister_template(&crypto_ccm_base_tmpl);
++	goto out;
++}
 +
-+	return 0;
++static void __exit crypto_ccm_module_exit(void)
++{
++	crypto_unregister_template(&crypto_rfc4309_tmpl);
++	crypto_unregister_template(&crypto_ccm_tmpl);
++	crypto_unregister_template(&crypto_ccm_base_tmpl);
 +}
 +
-diff --git a/block/blk-exec.c b/block/blk-exec.c
++module_init(crypto_ccm_module_init);
++module_exit(crypto_ccm_module_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Counter with CBC MAC");
++MODULE_ALIAS("ccm_base");
++MODULE_ALIAS("rfc4309");
+diff --git a/crypto/chainiv.c b/crypto/chainiv.c
 new file mode 100644
-index 0000000..ebfb44e
+index 0000000..d17fa04
 --- /dev/null
-+++ b/block/blk-exec.c
-@@ -0,0 +1,105 @@
++++ b/crypto/chainiv.c
+@@ -0,0 +1,331 @@
 +/*
-+ * Functions related to setting various queue properties from drivers
++ * chainiv: Chain IV Generator
++ *
++ * Generate IVs simply be using the last block of the previous encryption.
++ * This is mainly useful for CBC with a synchronous algorithm.
++ *
++ * Copyright (c) 2007 Herbert Xu <herbert at gondor.apana.org.au>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
 + */
++
++#include <crypto/internal/skcipher.h>
++#include <linux/err.h>
++#include <linux/init.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
++#include <linux/random.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/workqueue.h>
 +
-+#include "blk.h"
++enum {
++	CHAINIV_STATE_INUSE = 0,
++};
 +
-+/*
-+ * for max sense size
-+ */
-+#include <scsi/scsi_cmnd.h>
++struct chainiv_ctx {
++	spinlock_t lock;
++	char iv[];
++};
 +
-+/**
-+ * blk_end_sync_rq - executes a completion event on a request
-+ * @rq: request to complete
-+ * @error: end io status of the request
-+ */
-+void blk_end_sync_rq(struct request *rq, int error)
-+{
-+	struct completion *waiting = rq->end_io_data;
++struct async_chainiv_ctx {
++	unsigned long state;
 +
-+	rq->end_io_data = NULL;
-+	__blk_put_request(rq->q, rq);
++	spinlock_t lock;
++	int err;
 +
-+	/*
-+	 * complete last, if this is a stack request the process (and thus
-+	 * the rq pointer) could be invalid right after this complete()
-+	 */
-+	complete(waiting);
-+}
-+EXPORT_SYMBOL(blk_end_sync_rq);
++	struct crypto_queue queue;
++	struct work_struct postponed;
 +
-+/**
-+ * blk_execute_rq_nowait - insert a request into queue for execution
-+ * @q:		queue to insert the request in
-+ * @bd_disk:	matching gendisk
-+ * @rq:		request to insert
-+ * @at_head:    insert request at head or tail of queue
-+ * @done:	I/O completion handler
-+ *
-+ * Description:
-+ *    Insert a fully prepared request at the back of the io scheduler queue
-+ *    for execution.  Don't wait for completion.
-+ */
-+void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
-+			   struct request *rq, int at_head,
-+			   rq_end_io_fn *done)
++	char iv[];
++};
++
++static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
 +{
-+	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
++	unsigned int ivsize;
++	int err;
 +
-+	rq->rq_disk = bd_disk;
-+	rq->cmd_flags |= REQ_NOMERGE;
-+	rq->end_io = done;
-+	WARN_ON(irqs_disabled());
-+	spin_lock_irq(q->queue_lock);
-+	__elv_add_request(q, rq, where, 1);
-+	__generic_unplug_device(q);
-+	spin_unlock_irq(q->queue_lock);
-+}
-+EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
++	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
++	ablkcipher_request_set_callback(subreq, req->creq.base.flags &
++						~CRYPTO_TFM_REQ_MAY_SLEEP,
++					req->creq.base.complete,
++					req->creq.base.data);
++	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
++				     req->creq.nbytes, req->creq.info);
 +
-+/**
-+ * blk_execute_rq - insert a request into queue for execution
-+ * @q:		queue to insert the request in
-+ * @bd_disk:	matching gendisk
-+ * @rq:		request to insert
-+ * @at_head:    insert request at head or tail of queue
-+ *
-+ * Description:
-+ *    Insert a fully prepared request at the back of the io scheduler queue
-+ *    for execution and wait for completion.
-+ */
-+int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
-+		   struct request *rq, int at_head)
-+{
-+	DECLARE_COMPLETION_ONSTACK(wait);
-+	char sense[SCSI_SENSE_BUFFERSIZE];
-+	int err = 0;
++	spin_lock_bh(&ctx->lock);
 +
-+	/*
-+	 * we need an extra reference to the request, so we can look at
-+	 * it after io completion
-+	 */
-+	rq->ref_count++;
++	ivsize = crypto_ablkcipher_ivsize(geniv);
 +
-+	if (!rq->sense) {
-+		memset(sense, 0, sizeof(sense));
-+		rq->sense = sense;
-+		rq->sense_len = 0;
-+	}
++	memcpy(req->giv, ctx->iv, ivsize);
++	memcpy(subreq->info, ctx->iv, ivsize);
 +
-+	rq->end_io_data = &wait;
-+	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
-+	wait_for_completion(&wait);
++	err = crypto_ablkcipher_encrypt(subreq);
++	if (err)
++		goto unlock;
 +
-+	if (rq->errors)
-+		err = -EIO;
++	memcpy(ctx->iv, subreq->info, ivsize);
++
++unlock:
++	spin_unlock_bh(&ctx->lock);
 +
 +	return err;
 +}
 +
-+EXPORT_SYMBOL(blk_execute_rq);
-diff --git a/block/blk-ioc.c b/block/blk-ioc.c
-new file mode 100644
-index 0000000..6d16755
---- /dev/null
-+++ b/block/blk-ioc.c
-@@ -0,0 +1,194 @@
-+/*
-+ * Functions related to io context handling
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
-+#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
++static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
++{
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 +
-+#include "blk.h"
++	spin_lock_bh(&ctx->lock);
++	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
++	    chainiv_givencrypt_first)
++		goto unlock;
 +
-+/*
-+ * For io context allocations
-+ */
-+static struct kmem_cache *iocontext_cachep;
++	crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
++	get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
 +
-+static void cfq_dtor(struct io_context *ioc)
-+{
-+	struct cfq_io_context *cic[1];
-+	int r;
++unlock:
++	spin_unlock_bh(&ctx->lock);
 +
-+	/*
-+	 * We don't have a specific key to lookup with, so use the gang
-+	 * lookup to just retrieve the first item stored. The cfq exit
-+	 * function will iterate the full tree, so any member will do.
-+	 */
-+	r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
-+	if (r > 0)
-+		cic[0]->dtor(ioc);
++	return chainiv_givencrypt(req);
 +}
 +
-+/*
-+ * IO Context helper functions. put_io_context() returns 1 if there are no
-+ * more users of this io context, 0 otherwise.
-+ */
-+int put_io_context(struct io_context *ioc)
++static int chainiv_init_common(struct crypto_tfm *tfm)
 +{
-+	if (ioc == NULL)
-+		return 1;
-+
-+	BUG_ON(atomic_read(&ioc->refcount) == 0);
-+
-+	if (atomic_dec_and_test(&ioc->refcount)) {
-+		rcu_read_lock();
-+		if (ioc->aic && ioc->aic->dtor)
-+			ioc->aic->dtor(ioc->aic);
-+		rcu_read_unlock();
-+		cfq_dtor(ioc);
++	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
 +
-+		kmem_cache_free(iocontext_cachep, ioc);
-+		return 1;
-+	}
-+	return 0;
++	return skcipher_geniv_init(tfm);
 +}
-+EXPORT_SYMBOL(put_io_context);
 +
-+static void cfq_exit(struct io_context *ioc)
++static int chainiv_init(struct crypto_tfm *tfm)
 +{
-+	struct cfq_io_context *cic[1];
-+	int r;
++	struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+	rcu_read_lock();
-+	/*
-+	 * See comment for cfq_dtor()
-+	 */
-+	r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
-+	rcu_read_unlock();
++	spin_lock_init(&ctx->lock);
 +
-+	if (r > 0)
-+		cic[0]->exit(ioc);
++	return chainiv_init_common(tfm);
 +}
 +
-+/* Called by the exitting task */
-+void exit_io_context(void)
++static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
 +{
-+	struct io_context *ioc;
-+
-+	task_lock(current);
-+	ioc = current->io_context;
-+	current->io_context = NULL;
-+	task_unlock(current);
++	int queued;
 +
-+	if (atomic_dec_and_test(&ioc->nr_tasks)) {
-+		if (ioc->aic && ioc->aic->exit)
-+			ioc->aic->exit(ioc->aic);
-+		cfq_exit(ioc);
++	if (!ctx->queue.qlen) {
++		smp_mb__before_clear_bit();
++		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
 +
-+		put_io_context(ioc);
++		if (!ctx->queue.qlen ||
++		    test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
++			goto out;
 +	}
-+}
-+
-+struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
-+{
-+	struct io_context *ret;
 +
-+	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
-+	if (ret) {
-+		atomic_set(&ret->refcount, 1);
-+		atomic_set(&ret->nr_tasks, 1);
-+		spin_lock_init(&ret->lock);
-+		ret->ioprio_changed = 0;
-+		ret->ioprio = 0;
-+		ret->last_waited = jiffies; /* doesn't matter... */
-+		ret->nr_batch_requests = 0; /* because this is 0 */
-+		ret->aic = NULL;
-+		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
-+		ret->ioc_data = NULL;
-+	}
++	queued = schedule_work(&ctx->postponed);
++	BUG_ON(!queued);
 +
-+	return ret;
++out:
++	return ctx->err;
 +}
 +
-+/*
-+ * If the current task has no IO context then create one and initialise it.
-+ * Otherwise, return its existing IO context.
-+ *
-+ * This returned IO context doesn't have a specifically elevated refcount,
-+ * but since the current task itself holds a reference, the context can be
-+ * used in general code, so long as it stays within `current` context.
-+ */
-+struct io_context *current_io_context(gfp_t gfp_flags, int node)
++static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
 +{
-+	struct task_struct *tsk = current;
-+	struct io_context *ret;
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++	int err;
 +
-+	ret = tsk->io_context;
-+	if (likely(ret))
-+		return ret;
++	spin_lock_bh(&ctx->lock);
++	err = skcipher_enqueue_givcrypt(&ctx->queue, req);
++	spin_unlock_bh(&ctx->lock);
 +
-+	ret = alloc_io_context(gfp_flags, node);
-+	if (ret) {
-+		/* make sure set_task_ioprio() sees the settings above */
-+		smp_wmb();
-+		tsk->io_context = ret;
-+	}
++	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
++		return err;
 +
-+	return ret;
++	ctx->err = err;
++	return async_chainiv_schedule_work(ctx);
 +}
 +
-+/*
-+ * If the current task has no IO context then create one and initialise it.
-+ * If it does have a context, take a ref on it.
-+ *
-+ * This is always called in the context of the task which submitted the I/O.
-+ */
-+struct io_context *get_io_context(gfp_t gfp_flags, int node)
++static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
 +{
-+	struct io_context *ret = NULL;
-+
-+	/*
-+	 * Check for unlikely race with exiting task. ioc ref count is
-+	 * zero when ioc is being detached.
-+	 */
-+	do {
-+		ret = current_io_context(gfp_flags, node);
-+		if (unlikely(!ret))
-+			break;
-+	} while (!atomic_inc_not_zero(&ret->refcount));
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
++	unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
 +
-+	return ret;
-+}
-+EXPORT_SYMBOL(get_io_context);
++	memcpy(req->giv, ctx->iv, ivsize);
++	memcpy(subreq->info, ctx->iv, ivsize);
 +
-+void copy_io_context(struct io_context **pdst, struct io_context **psrc)
-+{
-+	struct io_context *src = *psrc;
-+	struct io_context *dst = *pdst;
++	ctx->err = crypto_ablkcipher_encrypt(subreq);
++	if (ctx->err)
++		goto out;
 +
-+	if (src) {
-+		BUG_ON(atomic_read(&src->refcount) == 0);
-+		atomic_inc(&src->refcount);
-+		put_io_context(dst);
-+		*pdst = src;
-+	}
-+}
-+EXPORT_SYMBOL(copy_io_context);
++	memcpy(ctx->iv, subreq->info, ivsize);
 +
-+void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
-+{
-+	struct io_context *temp;
-+	temp = *ioc1;
-+	*ioc1 = *ioc2;
-+	*ioc2 = temp;
++out:
++	return async_chainiv_schedule_work(ctx);
 +}
-+EXPORT_SYMBOL(swap_io_context);
 +
-+int __init blk_ioc_init(void)
++static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
 +{
-+	iocontext_cachep = kmem_cache_create("blkdev_ioc",
-+			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
-+	return 0;
-+}
-+subsys_initcall(blk_ioc_init);
-diff --git a/block/blk-map.c b/block/blk-map.c
-new file mode 100644
-index 0000000..916cfc9
---- /dev/null
-+++ b/block/blk-map.c
-@@ -0,0 +1,264 @@
-+/*
-+ * Functions related to mapping data to requests
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
 +
-+#include "blk.h"
++	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
++	ablkcipher_request_set_callback(subreq, req->creq.base.flags,
++					req->creq.base.complete,
++					req->creq.base.data);
++	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
++				     req->creq.nbytes, req->creq.info);
 +
-+int blk_rq_append_bio(struct request_queue *q, struct request *rq,
-+		      struct bio *bio)
-+{
-+	if (!rq->bio)
-+		blk_rq_bio_prep(q, rq, bio);
-+	else if (!ll_back_merge_fn(q, rq, bio))
-+		return -EINVAL;
-+	else {
-+		rq->biotail->bi_next = bio;
-+		rq->biotail = bio;
++	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
++		goto postpone;
 +
-+		rq->data_len += bio->bi_size;
++	if (ctx->queue.qlen) {
++		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
++		goto postpone;
 +	}
-+	return 0;
++
++	return async_chainiv_givencrypt_tail(req);
++
++postpone:
++	return async_chainiv_postpone_request(req);
 +}
-+EXPORT_SYMBOL(blk_rq_append_bio);
 +
-+static int __blk_rq_unmap_user(struct bio *bio)
++static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
 +{
-+	int ret = 0;
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 +
-+	if (bio) {
-+		if (bio_flagged(bio, BIO_USER_MAPPED))
-+			bio_unmap_user(bio);
-+		else
-+			ret = bio_uncopy_user(bio);
-+	}
++	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
++		goto out;
 +
-+	return ret;
-+}
++	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
++	    async_chainiv_givencrypt_first)
++		goto unlock;
 +
-+static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
-+			     void __user *ubuf, unsigned int len)
-+{
-+	unsigned long uaddr;
-+	struct bio *bio, *orig_bio;
-+	int reading, ret;
++	crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
++	get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
 +
-+	reading = rq_data_dir(rq) == READ;
++unlock:
++	clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
 +
-+	/*
-+	 * if alignment requirement is satisfied, map in user pages for
-+	 * direct dma. else, set up kernel bounce buffers
-+	 */
-+	uaddr = (unsigned long) ubuf;
-+	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
-+		bio = bio_map_user(q, NULL, uaddr, len, reading);
-+	else
-+		bio = bio_copy_user(q, uaddr, len, reading);
++out:
++	return async_chainiv_givencrypt(req);
++}
 +
-+	if (IS_ERR(bio))
-+		return PTR_ERR(bio);
++static void async_chainiv_do_postponed(struct work_struct *work)
++{
++	struct async_chainiv_ctx *ctx = container_of(work,
++						     struct async_chainiv_ctx,
++						     postponed);
++	struct skcipher_givcrypt_request *req;
++	struct ablkcipher_request *subreq;
 +
-+	orig_bio = bio;
-+	blk_queue_bounce(q, &bio);
++	/* Only handle one request at a time to avoid hogging keventd. */
++	spin_lock_bh(&ctx->lock);
++	req = skcipher_dequeue_givcrypt(&ctx->queue);
++	spin_unlock_bh(&ctx->lock);
 +
-+	/*
-+	 * We link the bounce buffer in and could have to traverse it
-+	 * later so we have to get a ref to prevent it from being freed
-+	 */
-+	bio_get(bio);
++	if (!req) {
++		async_chainiv_schedule_work(ctx);
++		return;
++	}
 +
-+	ret = blk_rq_append_bio(q, rq, bio);
-+	if (!ret)
-+		return bio->bi_size;
++	subreq = skcipher_givcrypt_reqctx(req);
++	subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
 +
-+	/* if it was boucned we must call the end io function */
-+	bio_endio(bio, 0);
-+	__blk_rq_unmap_user(orig_bio);
-+	bio_put(bio);
-+	return ret;
++	async_chainiv_givencrypt_tail(req);
 +}
 +
-+/**
-+ * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
-+ * @q:		request queue where request should be inserted
-+ * @rq:		request structure to fill
-+ * @ubuf:	the user buffer
-+ * @len:	length of user data
-+ *
-+ * Description:
-+ *    Data will be mapped directly for zero copy io, if possible. Otherwise
-+ *    a kernel bounce buffer is used.
-+ *
-+ *    A matching blk_rq_unmap_user() must be issued at the end of io, while
-+ *    still in process context.
-+ *
-+ *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
-+ *    before being submitted to the device, as pages mapped may be out of
-+ *    reach. It's the callers responsibility to make sure this happens. The
-+ *    original bio must be passed back in to blk_rq_unmap_user() for proper
-+ *    unmapping.
-+ */
-+int blk_rq_map_user(struct request_queue *q, struct request *rq,
-+		    void __user *ubuf, unsigned long len)
++static int async_chainiv_init(struct crypto_tfm *tfm)
 +{
-+	unsigned long bytes_read = 0;
-+	struct bio *bio = NULL;
-+	int ret;
++	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+	if (len > (q->max_hw_sectors << 9))
-+		return -EINVAL;
-+	if (!len || !ubuf)
-+		return -EINVAL;
++	spin_lock_init(&ctx->lock);
 +
-+	while (bytes_read != len) {
-+		unsigned long map_len, end, start;
++	crypto_init_queue(&ctx->queue, 100);
++	INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
 +
-+		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
-+		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
-+								>> PAGE_SHIFT;
-+		start = (unsigned long)ubuf >> PAGE_SHIFT;
++	return chainiv_init_common(tfm);
++}
 +
-+		/*
-+		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
-+		 * pages. If this happens we just lower the requested
-+		 * mapping len by a page so that we can fit
-+		 */
-+		if (end - start > BIO_MAX_PAGES)
-+			map_len -= PAGE_SIZE;
++static void async_chainiv_exit(struct crypto_tfm *tfm)
++{
++	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
-+		if (ret < 0)
-+			goto unmap_rq;
-+		if (!bio)
-+			bio = rq->bio;
-+		bytes_read += ret;
-+		ubuf += ret;
-+	}
++	BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
 +
-+	rq->buffer = rq->data = NULL;
-+	return 0;
-+unmap_rq:
-+	blk_rq_unmap_user(bio);
-+	return ret;
++	skcipher_geniv_exit(tfm);
 +}
 +
-+EXPORT_SYMBOL(blk_rq_map_user);
++static struct crypto_template chainiv_tmpl;
 +
-+/**
-+ * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
-+ * @q:		request queue where request should be inserted
-+ * @rq:		request to map data to
-+ * @iov:	pointer to the iovec
-+ * @iov_count:	number of elements in the iovec
-+ * @len:	I/O byte count
-+ *
-+ * Description:
-+ *    Data will be mapped directly for zero copy io, if possible. Otherwise
-+ *    a kernel bounce buffer is used.
-+ *
-+ *    A matching blk_rq_unmap_user() must be issued at the end of io, while
-+ *    still in process context.
-+ *
-+ *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
-+ *    before being submitted to the device, as pages mapped may be out of
-+ *    reach. It's the callers responsibility to make sure this happens. The
-+ *    original bio must be passed back in to blk_rq_unmap_user() for proper
-+ *    unmapping.
-+ */
-+int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-+			struct sg_iovec *iov, int iov_count, unsigned int len)
++static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
 +{
-+	struct bio *bio;
++	struct crypto_attr_type *algt;
++	struct crypto_instance *inst;
++	int err;
 +
-+	if (!iov || iov_count <= 0)
-+		return -EINVAL;
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
++		return ERR_PTR(err);
 +
-+	/* we don't allow misaligned data like bio_map_user() does.  If the
-+	 * user is using sg, they're expected to know the alignment constraints
-+	 * and respect them accordingly */
-+	bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
-+	if (IS_ERR(bio))
-+		return PTR_ERR(bio);
++	inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
++	if (IS_ERR(inst))
++		goto out;
 +
-+	if (bio->bi_size != len) {
-+		bio_endio(bio, 0);
-+		bio_unmap_user(bio);
-+		return -EINVAL;
-+	}
++	inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first;
 +
-+	bio_get(bio);
-+	blk_rq_bio_prep(q, rq, bio);
-+	rq->buffer = rq->data = NULL;
-+	return 0;
-+}
++	inst->alg.cra_init = chainiv_init;
++	inst->alg.cra_exit = skcipher_geniv_exit;
 +
-+EXPORT_SYMBOL(blk_rq_map_user_iov);
++	inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
 +
-+/**
-+ * blk_rq_unmap_user - unmap a request with user data
-+ * @bio:	       start of bio list
-+ *
-+ * Description:
-+ *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
-+ *    supply the original rq->bio from the blk_rq_map_user() return, since
-+ *    the io completion may have changed rq->bio.
-+ */
-+int blk_rq_unmap_user(struct bio *bio)
-+{
-+	struct bio *mapped_bio;
-+	int ret = 0, ret2;
++	if (!crypto_requires_sync(algt->type, algt->mask)) {
++		inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
 +
-+	while (bio) {
-+		mapped_bio = bio;
-+		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
-+			mapped_bio = bio->bi_private;
++		inst->alg.cra_ablkcipher.givencrypt =
++			async_chainiv_givencrypt_first;
 +
-+		ret2 = __blk_rq_unmap_user(mapped_bio);
-+		if (ret2 && !ret)
-+			ret = ret2;
++		inst->alg.cra_init = async_chainiv_init;
++		inst->alg.cra_exit = async_chainiv_exit;
 +
-+		mapped_bio = bio;
-+		bio = bio->bi_next;
-+		bio_put(mapped_bio);
++		inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
 +	}
 +
-+	return ret;
++	inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
++
++out:
++	return inst;
 +}
 +
-+EXPORT_SYMBOL(blk_rq_unmap_user);
++static struct crypto_template chainiv_tmpl = {
++	.name = "chainiv",
++	.alloc = chainiv_alloc,
++	.free = skcipher_geniv_free,
++	.module = THIS_MODULE,
++};
 +
-+/**
-+ * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
-+ * @q:		request queue where request should be inserted
-+ * @rq:		request to fill
-+ * @kbuf:	the kernel buffer
-+ * @len:	length of user data
-+ * @gfp_mask:	memory allocation flags
-+ */
-+int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
-+		    unsigned int len, gfp_t gfp_mask)
++static int __init chainiv_module_init(void)
 +{
-+	struct bio *bio;
-+
-+	if (len > (q->max_hw_sectors << 9))
-+		return -EINVAL;
-+	if (!len || !kbuf)
-+		return -EINVAL;
-+
-+	bio = bio_map_kern(q, kbuf, len, gfp_mask);
-+	if (IS_ERR(bio))
-+		return PTR_ERR(bio);
-+
-+	if (rq_data_dir(rq) == WRITE)
-+		bio->bi_rw |= (1 << BIO_RW);
++	return crypto_register_template(&chainiv_tmpl);
++}
 +
-+	blk_rq_bio_prep(q, rq, bio);
-+	blk_queue_bounce(q, &rq->bio);
-+	rq->buffer = rq->data = NULL;
-+	return 0;
++static void __exit chainiv_module_exit(void)
++{
++	crypto_unregister_template(&chainiv_tmpl);
 +}
 +
-+EXPORT_SYMBOL(blk_rq_map_kern);
-diff --git a/block/blk-merge.c b/block/blk-merge.c
-new file mode 100644
-index 0000000..5023f0b
---- /dev/null
-+++ b/block/blk-merge.c
-@@ -0,0 +1,485 @@
-+/*
-+ * Functions related to segment and merge handling
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
-+#include <linux/scatterlist.h>
++module_init(chainiv_module_init);
++module_exit(chainiv_module_exit);
 +
-+#include "blk.h"
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Chain IV Generator");
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 8bf2da8..074298f 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -228,7 +228,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
+ 	struct crypto_alg *alg;
+ 
+ 	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
+-				  CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
++				  CRYPTO_ALG_TYPE_MASK);
+ 	if (IS_ERR(alg))
+ 		return ERR_PTR(PTR_ERR(alg));
+ 
+@@ -236,13 +236,15 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
+ 	if (IS_ERR(inst))
+ 		goto out_put_alg;
+ 
+-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC;
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ 	inst->alg.cra_type = &crypto_ablkcipher_type;
+ 
+ 	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
+ 	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
+ 	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
+ 
++	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
 +
-+void blk_recalc_rq_sectors(struct request *rq, int nsect)
+ 	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
+ 
+ 	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
+diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
+index 29f7747..ff7b3de 100644
+--- a/crypto/crypto_null.c
++++ b/crypto/crypto_null.c
+@@ -16,15 +16,17 @@
+  * (at your option) any later version.
+  *
+  */
++
++#include <crypto/internal/skcipher.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+-#include <linux/crypto.h>
+ #include <linux/string.h>
+ 
+ #define NULL_KEY_SIZE		0
+ #define NULL_BLOCK_SIZE		1
+ #define NULL_DIGEST_SIZE	0
++#define NULL_IV_SIZE		0
+ 
+ static int null_compress(struct crypto_tfm *tfm, const u8 *src,
+ 			 unsigned int slen, u8 *dst, unsigned int *dlen)
+@@ -55,6 +57,26 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+ 	memcpy(dst, src, NULL_BLOCK_SIZE);
+ }
+ 
++static int skcipher_null_crypt(struct blkcipher_desc *desc,
++			       struct scatterlist *dst,
++			       struct scatterlist *src, unsigned int nbytes)
 +{
-+	if (blk_fs_request(rq)) {
-+		rq->hard_sector += nsect;
-+		rq->hard_nr_sectors -= nsect;
++	struct blkcipher_walk walk;
++	int err;
 +
-+		/*
-+		 * Move the I/O submission pointers ahead if required.
-+		 */
-+		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
-+		    (rq->sector <= rq->hard_sector)) {
-+			rq->sector = rq->hard_sector;
-+			rq->nr_sectors = rq->hard_nr_sectors;
-+			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
-+			rq->current_nr_sectors = rq->hard_cur_sectors;
-+			rq->buffer = bio_data(rq->bio);
-+		}
++	blkcipher_walk_init(&walk, dst, src, nbytes);
++	err = blkcipher_walk_virt(desc, &walk);
 +
-+		/*
-+		 * if total number of sectors is less than the first segment
-+		 * size, something has gone terribly wrong
-+		 */
-+		if (rq->nr_sectors < rq->current_nr_sectors) {
-+			printk("blk: request botched\n");
-+			rq->nr_sectors = rq->current_nr_sectors;
-+		}
++	while (walk.nbytes) {
++		if (walk.src.virt.addr != walk.dst.virt.addr)
++			memcpy(walk.dst.virt.addr, walk.src.virt.addr,
++			       walk.nbytes);
++		err = blkcipher_walk_done(desc, &walk, 0);
 +	}
++
++	return err;
 +}
 +
-+void blk_recalc_rq_segments(struct request *rq)
-+{
-+	int nr_phys_segs;
-+	int nr_hw_segs;
-+	unsigned int phys_size;
-+	unsigned int hw_size;
-+	struct bio_vec *bv, *bvprv = NULL;
-+	int seg_size;
-+	int hw_seg_size;
-+	int cluster;
-+	struct req_iterator iter;
-+	int high, highprv = 1;
-+	struct request_queue *q = rq->q;
+ static struct crypto_alg compress_null = {
+ 	.cra_name		=	"compress_null",
+ 	.cra_flags		=	CRYPTO_ALG_TYPE_COMPRESS,
+@@ -76,6 +98,7 @@ static struct crypto_alg digest_null = {
+ 	.cra_list		=       LIST_HEAD_INIT(digest_null.cra_list),	
+ 	.cra_u			=	{ .digest = {
+ 	.dia_digestsize		=	NULL_DIGEST_SIZE,
++	.dia_setkey   		=	null_setkey,
+ 	.dia_init   		=	null_init,
+ 	.dia_update 		=	null_update,
+ 	.dia_final  		=	null_final } }
+@@ -96,6 +119,25 @@ static struct crypto_alg cipher_null = {
+ 	.cia_decrypt		=	null_crypt } }
+ };
+ 
++static struct crypto_alg skcipher_null = {
++	.cra_name		=	"ecb(cipher_null)",
++	.cra_driver_name	=	"ecb-cipher_null",
++	.cra_priority		=	100,
++	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
++	.cra_blocksize		=	NULL_BLOCK_SIZE,
++	.cra_type		=	&crypto_blkcipher_type,
++	.cra_ctxsize		=	0,
++	.cra_module		=	THIS_MODULE,
++	.cra_list		=	LIST_HEAD_INIT(skcipher_null.cra_list),
++	.cra_u			=	{ .blkcipher = {
++	.min_keysize		=	NULL_KEY_SIZE,
++	.max_keysize		=	NULL_KEY_SIZE,
++	.ivsize			=	NULL_IV_SIZE,
++	.setkey			= 	null_setkey,
++	.encrypt		=	skcipher_null_crypt,
++	.decrypt		=	skcipher_null_crypt } }
++};
 +
-+	if (!rq->bio)
-+		return;
+ MODULE_ALIAS("compress_null");
+ MODULE_ALIAS("digest_null");
+ MODULE_ALIAS("cipher_null");
+@@ -108,27 +150,35 @@ static int __init init(void)
+ 	if (ret < 0)
+ 		goto out;
+ 
++	ret = crypto_register_alg(&skcipher_null);
++	if (ret < 0)
++		goto out_unregister_cipher;
 +
-+	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-+	hw_seg_size = seg_size = 0;
-+	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
-+	rq_for_each_segment(bv, rq, iter) {
-+		/*
-+		 * the trick here is making sure that a high page is never
-+		 * considered part of another segment, since that might
-+		 * change with the bounce page.
-+		 */
-+		high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
-+		if (high || highprv)
-+			goto new_hw_segment;
-+		if (cluster) {
-+			if (seg_size + bv->bv_len > q->max_segment_size)
-+				goto new_segment;
-+			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
-+				goto new_segment;
-+			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
-+				goto new_segment;
-+			if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-+				goto new_hw_segment;
+ 	ret = crypto_register_alg(&digest_null);
+-	if (ret < 0) {
+-		crypto_unregister_alg(&cipher_null);
+-		goto out;
+-	}
++	if (ret < 0)
++		goto out_unregister_skcipher;
+ 
+ 	ret = crypto_register_alg(&compress_null);
+-	if (ret < 0) {
+-		crypto_unregister_alg(&digest_null);
+-		crypto_unregister_alg(&cipher_null);
+-		goto out;
+-	}
++	if (ret < 0)
++		goto out_unregister_digest;
+ 
+ out:	
+ 	return ret;
++
++out_unregister_digest:
++	crypto_unregister_alg(&digest_null);
++out_unregister_skcipher:
++	crypto_unregister_alg(&skcipher_null);
++out_unregister_cipher:
++	crypto_unregister_alg(&cipher_null);
++	goto out;
+ }
+ 
+ static void __exit fini(void)
+ {
+ 	crypto_unregister_alg(&compress_null);
+ 	crypto_unregister_alg(&digest_null);
++	crypto_unregister_alg(&skcipher_null);
+ 	crypto_unregister_alg(&cipher_null);
+ }
+ 
+diff --git a/crypto/ctr.c b/crypto/ctr.c
+new file mode 100644
+index 0000000..2d7425f
+--- /dev/null
++++ b/crypto/ctr.c
+@@ -0,0 +1,422 @@
++/*
++ * CTR: Counter mode
++ *
++ * (C) Copyright IBM Corp. 2007 - Joy Latten <latten at us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
 +
-+			seg_size += bv->bv_len;
-+			hw_seg_size += bv->bv_len;
-+			bvprv = bv;
-+			continue;
-+		}
-+new_segment:
-+		if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
-+		    !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-+			hw_seg_size += bv->bv_len;
-+		else {
-+new_hw_segment:
-+			if (nr_hw_segs == 1 &&
-+			    hw_seg_size > rq->bio->bi_hw_front_size)
-+				rq->bio->bi_hw_front_size = hw_seg_size;
-+			hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
-+			nr_hw_segs++;
-+		}
++#include <crypto/algapi.h>
++#include <crypto/ctr.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/random.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
 +
-+		nr_phys_segs++;
-+		bvprv = bv;
-+		seg_size = bv->bv_len;
-+		highprv = high;
-+	}
++struct crypto_ctr_ctx {
++	struct crypto_cipher *child;
++};
 +
-+	if (nr_hw_segs == 1 &&
-+	    hw_seg_size > rq->bio->bi_hw_front_size)
-+		rq->bio->bi_hw_front_size = hw_seg_size;
-+	if (hw_seg_size > rq->biotail->bi_hw_back_size)
-+		rq->biotail->bi_hw_back_size = hw_seg_size;
-+	rq->nr_phys_segments = nr_phys_segs;
-+	rq->nr_hw_segments = nr_hw_segs;
-+}
++struct crypto_rfc3686_ctx {
++	struct crypto_blkcipher *child;
++	u8 nonce[CTR_RFC3686_NONCE_SIZE];
++};
 +
-+void blk_recount_segments(struct request_queue *q, struct bio *bio)
++static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
++			     unsigned int keylen)
 +{
-+	struct request rq;
-+	struct bio *nxt = bio->bi_next;
-+	rq.q = q;
-+	rq.bio = rq.biotail = bio;
-+	bio->bi_next = NULL;
-+	blk_recalc_rq_segments(&rq);
-+	bio->bi_next = nxt;
-+	bio->bi_phys_segments = rq.nr_phys_segments;
-+	bio->bi_hw_segments = rq.nr_hw_segments;
-+	bio->bi_flags |= (1 << BIO_SEG_VALID);
++	struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent);
++	struct crypto_cipher *child = ctx->child;
++	int err;
++
++	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
++	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
++				CRYPTO_TFM_REQ_MASK);
++	err = crypto_cipher_setkey(child, key, keylen);
++	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
++			     CRYPTO_TFM_RES_MASK);
++
++	return err;
 +}
-+EXPORT_SYMBOL(blk_recount_segments);
 +
-+static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
-+				   struct bio *nxt)
++static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
++				   struct crypto_cipher *tfm)
 +{
-+	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
-+		return 0;
-+
-+	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
-+		return 0;
-+	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
-+		return 0;
++	unsigned int bsize = crypto_cipher_blocksize(tfm);
++	unsigned long alignmask = crypto_cipher_alignmask(tfm);
++	u8 *ctrblk = walk->iv;
++	u8 tmp[bsize + alignmask];
++	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
++	u8 *src = walk->src.virt.addr;
++	u8 *dst = walk->dst.virt.addr;
++	unsigned int nbytes = walk->nbytes;
 +
-+	/*
-+	 * bio and nxt are contigous in memory, check if the queue allows
-+	 * these two to be merged into one
-+	 */
-+	if (BIO_SEG_BOUNDARY(q, bio, nxt))
-+		return 1;
++	crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
++	crypto_xor(keystream, src, nbytes);
++	memcpy(dst, keystream, nbytes);
 +
-+	return 0;
++	crypto_inc(ctrblk, bsize);
 +}
 +
-+static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
-+				 struct bio *nxt)
++static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
++				    struct crypto_cipher *tfm)
 +{
-+	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-+		blk_recount_segments(q, bio);
-+	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
-+		blk_recount_segments(q, nxt);
-+	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
-+	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
-+		return 0;
-+	if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
-+		return 0;
++	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
++		   crypto_cipher_alg(tfm)->cia_encrypt;
++	unsigned int bsize = crypto_cipher_blocksize(tfm);
++	u8 *ctrblk = walk->iv;
++	u8 *src = walk->src.virt.addr;
++	u8 *dst = walk->dst.virt.addr;
++	unsigned int nbytes = walk->nbytes;
 +
-+	return 1;
++	do {
++		/* create keystream */
++		fn(crypto_cipher_tfm(tfm), dst, ctrblk);
++		crypto_xor(dst, src, bsize);
++
++		/* increment counter in counterblock */
++		crypto_inc(ctrblk, bsize);
++
++		src += bsize;
++		dst += bsize;
++	} while ((nbytes -= bsize) >= bsize);
++
++	return nbytes;
 +}
 +
-+/*
-+ * map a request to scatterlist, return number of sg entries setup. Caller
-+ * must make sure sg can hold rq->nr_phys_segments entries
-+ */
-+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-+		  struct scatterlist *sglist)
++static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
++				    struct crypto_cipher *tfm)
 +{
-+	struct bio_vec *bvec, *bvprv;
-+	struct req_iterator iter;
-+	struct scatterlist *sg;
-+	int nsegs, cluster;
++	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
++		   crypto_cipher_alg(tfm)->cia_encrypt;
++	unsigned int bsize = crypto_cipher_blocksize(tfm);
++	unsigned long alignmask = crypto_cipher_alignmask(tfm);
++	unsigned int nbytes = walk->nbytes;
++	u8 *ctrblk = walk->iv;
++	u8 *src = walk->src.virt.addr;
++	u8 tmp[bsize + alignmask];
++	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
 +
-+	nsegs = 0;
-+	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
++	do {
++		/* create keystream */
++		fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
++		crypto_xor(src, keystream, bsize);
 +
-+	/*
-+	 * for each bio in rq
-+	 */
-+	bvprv = NULL;
-+	sg = NULL;
-+	rq_for_each_segment(bvec, rq, iter) {
-+		int nbytes = bvec->bv_len;
++		/* increment counter in counterblock */
++		crypto_inc(ctrblk, bsize);
 +
-+		if (bvprv && cluster) {
-+			if (sg->length + nbytes > q->max_segment_size)
-+				goto new_segment;
++		src += bsize;
++	} while ((nbytes -= bsize) >= bsize);
 +
-+			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
-+				goto new_segment;
-+			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
-+				goto new_segment;
++	return nbytes;
++}
 +
-+			sg->length += nbytes;
-+		} else {
-+new_segment:
-+			if (!sg)
-+				sg = sglist;
-+			else {
-+				/*
-+				 * If the driver previously mapped a shorter
-+				 * list, we could see a termination bit
-+				 * prematurely unless it fully inits the sg
-+				 * table on each mapping. We KNOW that there
-+				 * must be more entries here or the driver
-+				 * would be buggy, so force clear the
-+				 * termination bit to avoid doing a full
-+				 * sg_init_table() in drivers for each command.
-+				 */
-+				sg->page_link &= ~0x02;
-+				sg = sg_next(sg);
-+			}
++static int crypto_ctr_crypt(struct blkcipher_desc *desc,
++			      struct scatterlist *dst, struct scatterlist *src,
++			      unsigned int nbytes)
++{
++	struct blkcipher_walk walk;
++	struct crypto_blkcipher *tfm = desc->tfm;
++	struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
++	struct crypto_cipher *child = ctx->child;
++	unsigned int bsize = crypto_cipher_blocksize(child);
++	int err;
 +
-+			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
-+			nsegs++;
-+		}
-+		bvprv = bvec;
-+	} /* segments in rq */
++	blkcipher_walk_init(&walk, dst, src, nbytes);
++	err = blkcipher_walk_virt_block(desc, &walk, bsize);
 +
-+	if (q->dma_drain_size) {
-+		sg->page_link &= ~0x02;
-+		sg = sg_next(sg);
-+		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
-+			    q->dma_drain_size,
-+			    ((unsigned long)q->dma_drain_buffer) &
-+			    (PAGE_SIZE - 1));
-+		nsegs++;
++	while (walk.nbytes >= bsize) {
++		if (walk.src.virt.addr == walk.dst.virt.addr)
++			nbytes = crypto_ctr_crypt_inplace(&walk, child);
++		else
++			nbytes = crypto_ctr_crypt_segment(&walk, child);
++
++		err = blkcipher_walk_done(desc, &walk, nbytes);
 +	}
 +
-+	if (sg)
-+		sg_mark_end(sg);
++	if (walk.nbytes) {
++		crypto_ctr_crypt_final(&walk, child);
++		err = blkcipher_walk_done(desc, &walk, 0);
++	}
 +
-+	return nsegs;
++	return err;
 +}
 +
-+EXPORT_SYMBOL(blk_rq_map_sg);
-+
-+static inline int ll_new_mergeable(struct request_queue *q,
-+				   struct request *req,
-+				   struct bio *bio)
++static int crypto_ctr_init_tfm(struct crypto_tfm *tfm)
 +{
-+	int nr_phys_segs = bio_phys_segments(q, bio);
++	struct crypto_instance *inst = (void *)tfm->__crt_alg;
++	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
++	struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct crypto_cipher *cipher;
 +
-+	if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-+		req->cmd_flags |= REQ_NOMERGE;
-+		if (req == q->last_merge)
-+			q->last_merge = NULL;
-+		return 0;
-+	}
++	cipher = crypto_spawn_cipher(spawn);
++	if (IS_ERR(cipher))
++		return PTR_ERR(cipher);
 +
-+	/*
-+	 * A hw segment is just getting larger, bump just the phys
-+	 * counter.
-+	 */
-+	req->nr_phys_segments += nr_phys_segs;
-+	return 1;
++	ctx->child = cipher;
++
++	return 0;
 +}
 +
-+static inline int ll_new_hw_segment(struct request_queue *q,
-+				    struct request *req,
-+				    struct bio *bio)
++static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm)
 +{
-+	int nr_hw_segs = bio_hw_segments(q, bio);
-+	int nr_phys_segs = bio_phys_segments(q, bio);
-+
-+	if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
-+	    || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-+		req->cmd_flags |= REQ_NOMERGE;
-+		if (req == q->last_merge)
-+			q->last_merge = NULL;
-+		return 0;
-+	}
++	struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+	/*
-+	 * This will form the start of a new hw segment.  Bump both
-+	 * counters.
-+	 */
-+	req->nr_hw_segments += nr_hw_segs;
-+	req->nr_phys_segments += nr_phys_segs;
-+	return 1;
++	crypto_free_cipher(ctx->child);
 +}
 +
-+int ll_back_merge_fn(struct request_queue *q, struct request *req,
-+		     struct bio *bio)
++static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
 +{
-+	unsigned short max_sectors;
-+	int len;
++	struct crypto_instance *inst;
++	struct crypto_alg *alg;
++	int err;
 +
-+	if (unlikely(blk_pc_request(req)))
-+		max_sectors = q->max_hw_sectors;
-+	else
-+		max_sectors = q->max_sectors;
++	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
++	if (err)
++		return ERR_PTR(err);
 +
-+	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
-+		req->cmd_flags |= REQ_NOMERGE;
-+		if (req == q->last_merge)
-+			q->last_merge = NULL;
-+		return 0;
-+	}
-+	if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
-+		blk_recount_segments(q, req->biotail);
-+	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-+		blk_recount_segments(q, bio);
-+	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
-+	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
-+	    !BIOVEC_VIRT_OVERSIZE(len)) {
-+		int mergeable =  ll_new_mergeable(q, req, bio);
++	alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
++				  CRYPTO_ALG_TYPE_MASK);
++	if (IS_ERR(alg))
++		return ERR_PTR(PTR_ERR(alg));
 +
-+		if (mergeable) {
-+			if (req->nr_hw_segments == 1)
-+				req->bio->bi_hw_front_size = len;
-+			if (bio->bi_hw_segments == 1)
-+				bio->bi_hw_back_size = len;
-+		}
-+		return mergeable;
-+	}
++	/* Block size must be >= 4 bytes. */
++	err = -EINVAL;
++	if (alg->cra_blocksize < 4)
++		goto out_put_alg;
 +
-+	return ll_new_hw_segment(q, req, bio);
-+}
++	/* If this is false we'd fail the alignment of crypto_inc. */
++	if (alg->cra_blocksize % 4)
++		goto out_put_alg;
 +
-+int ll_front_merge_fn(struct request_queue *q, struct request *req, 
-+		      struct bio *bio)
-+{
-+	unsigned short max_sectors;
-+	int len;
++	inst = crypto_alloc_instance("ctr", alg);
++	if (IS_ERR(inst))
++		goto out;
 +
-+	if (unlikely(blk_pc_request(req)))
-+		max_sectors = q->max_hw_sectors;
-+	else
-+		max_sectors = q->max_sectors;
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
++	inst->alg.cra_priority = alg->cra_priority;
++	inst->alg.cra_blocksize = 1;
++	inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
++	inst->alg.cra_type = &crypto_blkcipher_type;
 +
++	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
++	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
++	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
 +
-+	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
-+		req->cmd_flags |= REQ_NOMERGE;
-+		if (req == q->last_merge)
-+			q->last_merge = NULL;
-+		return 0;
-+	}
-+	len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
-+	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-+		blk_recount_segments(q, bio);
-+	if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
-+		blk_recount_segments(q, req->bio);
-+	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
-+	    !BIOVEC_VIRT_OVERSIZE(len)) {
-+		int mergeable =  ll_new_mergeable(q, req, bio);
++	inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx);
 +
-+		if (mergeable) {
-+			if (bio->bi_hw_segments == 1)
-+				bio->bi_hw_front_size = len;
-+			if (req->nr_hw_segments == 1)
-+				req->biotail->bi_hw_back_size = len;
-+		}
-+		return mergeable;
-+	}
++	inst->alg.cra_init = crypto_ctr_init_tfm;
++	inst->alg.cra_exit = crypto_ctr_exit_tfm;
 +
-+	return ll_new_hw_segment(q, req, bio);
++	inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey;
++	inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
++	inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
++
++out:
++	crypto_mod_put(alg);
++	return inst;
++
++out_put_alg:
++	inst = ERR_PTR(err);
++	goto out;
 +}
 +
-+static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
-+				struct request *next)
++static void crypto_ctr_free(struct crypto_instance *inst)
 +{
-+	int total_phys_segments;
-+	int total_hw_segments;
++	crypto_drop_spawn(crypto_instance_ctx(inst));
++	kfree(inst);
++}
 +
-+	/*
-+	 * First check if the either of the requests are re-queued
-+	 * requests.  Can't merge them if they are.
-+	 */
-+	if (req->special || next->special)
-+		return 0;
++static struct crypto_template crypto_ctr_tmpl = {
++	.name = "ctr",
++	.alloc = crypto_ctr_alloc,
++	.free = crypto_ctr_free,
++	.module = THIS_MODULE,
++};
 +
-+	/*
-+	 * Will it become too large?
-+	 */
-+	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
-+		return 0;
++static int crypto_rfc3686_setkey(struct crypto_tfm *parent, const u8 *key,
++				 unsigned int keylen)
++{
++	struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(parent);
++	struct crypto_blkcipher *child = ctx->child;
++	int err;
 +
-+	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
-+	if (blk_phys_contig_segment(q, req->biotail, next->bio))
-+		total_phys_segments--;
++	/* the nonce is stored in bytes at end of key */
++	if (keylen < CTR_RFC3686_NONCE_SIZE)
++		return -EINVAL;
 +
-+	if (total_phys_segments > q->max_phys_segments)
-+		return 0;
++	memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
++	       CTR_RFC3686_NONCE_SIZE);
 +
-+	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
-+	if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
-+		int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
-+		/*
-+		 * propagate the combined length to the end of the requests
-+		 */
-+		if (req->nr_hw_segments == 1)
-+			req->bio->bi_hw_front_size = len;
-+		if (next->nr_hw_segments == 1)
-+			next->biotail->bi_hw_back_size = len;
-+		total_hw_segments--;
-+	}
++	keylen -= CTR_RFC3686_NONCE_SIZE;
 +
-+	if (total_hw_segments > q->max_hw_segments)
-+		return 0;
++	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
++	crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
++					  CRYPTO_TFM_REQ_MASK);
++	err = crypto_blkcipher_setkey(child, key, keylen);
++	crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
++				     CRYPTO_TFM_RES_MASK);
 +
-+	/* Merge is OK... */
-+	req->nr_phys_segments = total_phys_segments;
-+	req->nr_hw_segments = total_hw_segments;
-+	return 1;
++	return err;
 +}
 +
-+/*
-+ * Has to be called with the request spinlock acquired
-+ */
-+static int attempt_merge(struct request_queue *q, struct request *req,
-+			  struct request *next)
++static int crypto_rfc3686_crypt(struct blkcipher_desc *desc,
++				struct scatterlist *dst,
++				struct scatterlist *src, unsigned int nbytes)
 +{
-+	if (!rq_mergeable(req) || !rq_mergeable(next))
-+		return 0;
++	struct crypto_blkcipher *tfm = desc->tfm;
++	struct crypto_rfc3686_ctx *ctx = crypto_blkcipher_ctx(tfm);
++	struct crypto_blkcipher *child = ctx->child;
++	unsigned long alignmask = crypto_blkcipher_alignmask(tfm);
++	u8 ivblk[CTR_RFC3686_BLOCK_SIZE + alignmask];
++	u8 *iv = PTR_ALIGN(ivblk + 0, alignmask + 1);
++	u8 *info = desc->info;
++	int err;
 +
-+	/*
-+	 * not contiguous
-+	 */
-+	if (req->sector + req->nr_sectors != next->sector)
-+		return 0;
++	/* set up counter block */
++	memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
++	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
 +
-+	if (rq_data_dir(req) != rq_data_dir(next)
-+	    || req->rq_disk != next->rq_disk
-+	    || next->special)
-+		return 0;
++	/* initialize counter portion of counter block */
++	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
++		cpu_to_be32(1);
 +
-+	/*
-+	 * If we are allowed to merge, then append bio list
-+	 * from next to rq and release next. merge_requests_fn
-+	 * will have updated segment counts, update sector
-+	 * counts here.
-+	 */
-+	if (!ll_merge_requests_fn(q, req, next))
-+		return 0;
++	desc->tfm = child;
++	desc->info = iv;
++	err = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
++	desc->tfm = tfm;
++	desc->info = info;
 +
-+	/*
-+	 * At this point we have either done a back merge
-+	 * or front merge. We need the smaller start_time of
-+	 * the merged requests to be the current request
-+	 * for accounting purposes.
-+	 */
-+	if (time_after(req->start_time, next->start_time))
-+		req->start_time = next->start_time;
++	return err;
++}
 +
-+	req->biotail->bi_next = next->bio;
-+	req->biotail = next->biotail;
++static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm)
++{
++	struct crypto_instance *inst = (void *)tfm->__crt_alg;
++	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
++	struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct crypto_blkcipher *cipher;
 +
-+	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
++	cipher = crypto_spawn_blkcipher(spawn);
++	if (IS_ERR(cipher))
++		return PTR_ERR(cipher);
 +
-+	elv_merge_requests(q, req, next);
++	ctx->child = cipher;
 +
-+	if (req->rq_disk) {
-+		disk_round_stats(req->rq_disk);
-+		req->rq_disk->in_flight--;
-+	}
++	return 0;
++}
 +
-+	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
++static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm)
++{
++	struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+	__blk_put_request(q, next);
-+	return 1;
++	crypto_free_blkcipher(ctx->child);
 +}
 +
-+int attempt_back_merge(struct request_queue *q, struct request *rq)
++static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb)
 +{
-+	struct request *next = elv_latter_request(q, rq);
++	struct crypto_instance *inst;
++	struct crypto_alg *alg;
++	int err;
 +
-+	if (next)
-+		return attempt_merge(q, rq, next);
++	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
++	if (err)
++		return ERR_PTR(err);
 +
-+	return 0;
-+}
++	alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER,
++				  CRYPTO_ALG_TYPE_MASK);
++	err = PTR_ERR(alg);
++	if (IS_ERR(alg))
++		return ERR_PTR(err);
 +
-+int attempt_front_merge(struct request_queue *q, struct request *rq)
-+{
-+	struct request *prev = elv_former_request(q, rq);
++	/* We only support 16-byte blocks. */
++	err = -EINVAL;
++	if (alg->cra_blkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE)
++		goto out_put_alg;
 +
-+	if (prev)
-+		return attempt_merge(q, prev, rq);
++	/* Not a stream cipher? */
++	if (alg->cra_blocksize != 1)
++		goto out_put_alg;
 +
-+	return 0;
-+}
-diff --git a/block/blk-settings.c b/block/blk-settings.c
-new file mode 100644
-index 0000000..4df09a1
---- /dev/null
-+++ b/block/blk-settings.c
-@@ -0,0 +1,402 @@
-+/*
-+ * Functions related to setting various queue properties from drivers
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
-+#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
++	inst = crypto_alloc_instance("rfc3686", alg);
++	if (IS_ERR(inst))
++		goto out;
 +
-+#include "blk.h"
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
++	inst->alg.cra_priority = alg->cra_priority;
++	inst->alg.cra_blocksize = 1;
++	inst->alg.cra_alignmask = alg->cra_alignmask;
++	inst->alg.cra_type = &crypto_blkcipher_type;
 +
-+unsigned long blk_max_low_pfn, blk_max_pfn;
-+EXPORT_SYMBOL(blk_max_low_pfn);
-+EXPORT_SYMBOL(blk_max_pfn);
++	inst->alg.cra_blkcipher.ivsize = CTR_RFC3686_IV_SIZE;
++	inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize
++					      + CTR_RFC3686_NONCE_SIZE;
++	inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize
++					      + CTR_RFC3686_NONCE_SIZE;
 +
-+/**
-+ * blk_queue_prep_rq - set a prepare_request function for queue
-+ * @q:		queue
-+ * @pfn:	prepare_request function
-+ *
-+ * It's possible for a queue to register a prepare_request callback which
-+ * is invoked before the request is handed to the request_fn. The goal of
-+ * the function is to prepare a request for I/O, it can be used to build a
-+ * cdb from the request data for instance.
-+ *
-+ */
-+void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
-+{
-+	q->prep_rq_fn = pfn;
-+}
++	inst->alg.cra_blkcipher.geniv = "seqiv";
 +
-+EXPORT_SYMBOL(blk_queue_prep_rq);
++	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
 +
-+/**
-+ * blk_queue_merge_bvec - set a merge_bvec function for queue
-+ * @q:		queue
-+ * @mbfn:	merge_bvec_fn
-+ *
-+ * Usually queues have static limitations on the max sectors or segments that
-+ * we can put in a request. Stacking drivers may have some settings that
-+ * are dynamic, and thus we have to query the queue whether it is ok to
-+ * add a new bio_vec to a bio at a given offset or not. If the block device
-+ * has such limitations, it needs to register a merge_bvec_fn to control
-+ * the size of bio's sent to it. Note that a block device *must* allow a
-+ * single page to be added to an empty bio. The block device driver may want
-+ * to use the bio_split() function to deal with these bio's. By default
-+ * no merge_bvec_fn is defined for a queue, and only the fixed limits are
-+ * honored.
-+ */
-+void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
-+{
-+	q->merge_bvec_fn = mbfn;
-+}
++	inst->alg.cra_init = crypto_rfc3686_init_tfm;
++	inst->alg.cra_exit = crypto_rfc3686_exit_tfm;
 +
-+EXPORT_SYMBOL(blk_queue_merge_bvec);
++	inst->alg.cra_blkcipher.setkey = crypto_rfc3686_setkey;
++	inst->alg.cra_blkcipher.encrypt = crypto_rfc3686_crypt;
++	inst->alg.cra_blkcipher.decrypt = crypto_rfc3686_crypt;
 +
-+void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
-+{
-+	q->softirq_done_fn = fn;
++out:
++	crypto_mod_put(alg);
++	return inst;
++
++out_put_alg:
++	inst = ERR_PTR(err);
++	goto out;
 +}
 +
-+EXPORT_SYMBOL(blk_queue_softirq_done);
++static struct crypto_template crypto_rfc3686_tmpl = {
++	.name = "rfc3686",
++	.alloc = crypto_rfc3686_alloc,
++	.free = crypto_ctr_free,
++	.module = THIS_MODULE,
++};
 +
-+/**
-+ * blk_queue_make_request - define an alternate make_request function for a device
-+ * @q:  the request queue for the device to be affected
-+ * @mfn: the alternate make_request function
-+ *
-+ * Description:
-+ *    The normal way for &struct bios to be passed to a device
-+ *    driver is for them to be collected into requests on a request
-+ *    queue, and then to allow the device driver to select requests
-+ *    off that queue when it is ready.  This works well for many block
-+ *    devices. However some block devices (typically virtual devices
-+ *    such as md or lvm) do not benefit from the processing on the
-+ *    request queue, and are served best by having the requests passed
-+ *    directly to them.  This can be achieved by providing a function
-+ *    to blk_queue_make_request().
-+ *
-+ * Caveat:
-+ *    The driver that does this *must* be able to deal appropriately
-+ *    with buffers in "highmemory". This can be accomplished by either calling
-+ *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
-+ *    blk_queue_bounce() to create a buffer in normal memory.
-+ **/
-+void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
++static int __init crypto_ctr_module_init(void)
 +{
-+	/*
-+	 * set defaults
-+	 */
-+	q->nr_requests = BLKDEV_MAX_RQ;
-+	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
-+	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
-+	q->make_request_fn = mfn;
-+	q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
-+	q->backing_dev_info.state = 0;
-+	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
-+	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
-+	blk_queue_hardsect_size(q, 512);
-+	blk_queue_dma_alignment(q, 511);
-+	blk_queue_congestion_threshold(q);
-+	q->nr_batching = BLK_BATCH_REQ;
++	int err;
 +
-+	q->unplug_thresh = 4;		/* hmm */
-+	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
-+	if (q->unplug_delay == 0)
-+		q->unplug_delay = 1;
++	err = crypto_register_template(&crypto_ctr_tmpl);
++	if (err)
++		goto out;
 +
-+	INIT_WORK(&q->unplug_work, blk_unplug_work);
++	err = crypto_register_template(&crypto_rfc3686_tmpl);
++	if (err)
++		goto out_drop_ctr;
 +
-+	q->unplug_timer.function = blk_unplug_timeout;
-+	q->unplug_timer.data = (unsigned long)q;
++out:
++	return err;
 +
-+	/*
-+	 * by default assume old behaviour and bounce for any highmem page
-+	 */
-+	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
++out_drop_ctr:
++	crypto_unregister_template(&crypto_ctr_tmpl);
++	goto out;
 +}
 +
-+EXPORT_SYMBOL(blk_queue_make_request);
-+
-+/**
-+ * blk_queue_bounce_limit - set bounce buffer limit for queue
-+ * @q:  the request queue for the device
-+ * @dma_addr:   bus address limit
-+ *
-+ * Description:
-+ *    Different hardware can have different requirements as to what pages
-+ *    it can do I/O directly to. A low level driver can call
-+ *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
-+ *    buffers for doing I/O to pages residing above @page.
-+ **/
-+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
++static void __exit crypto_ctr_module_exit(void)
 +{
-+	unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
-+	int dma = 0;
-+
-+	q->bounce_gfp = GFP_NOIO;
-+#if BITS_PER_LONG == 64
-+	/* Assume anything <= 4GB can be handled by IOMMU.
-+	   Actually some IOMMUs can handle everything, but I don't
-+	   know of a way to test this here. */
-+	if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
-+		dma = 1;
-+	q->bounce_pfn = max_low_pfn;
-+#else
-+	if (bounce_pfn < blk_max_low_pfn)
-+		dma = 1;
-+	q->bounce_pfn = bounce_pfn;
-+#endif
-+	if (dma) {
-+		init_emergency_isa_pool();
-+		q->bounce_gfp = GFP_NOIO | GFP_DMA;
-+		q->bounce_pfn = bounce_pfn;
-+	}
++	crypto_unregister_template(&crypto_rfc3686_tmpl);
++	crypto_unregister_template(&crypto_ctr_tmpl);
 +}
 +
-+EXPORT_SYMBOL(blk_queue_bounce_limit);
++module_init(crypto_ctr_module_init);
++module_exit(crypto_ctr_module_exit);
 +
-+/**
-+ * blk_queue_max_sectors - set max sectors for a request for this queue
-+ * @q:  the request queue for the device
-+ * @max_sectors:  max sectors in the usual 512b unit
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("CTR Counter block mode");
++MODULE_ALIAS("rfc3686");
+diff --git a/crypto/des_generic.c b/crypto/des_generic.c
+index 59966d1..355ecb7 100644
+--- a/crypto/des_generic.c
++++ b/crypto/des_generic.c
+@@ -20,13 +20,7 @@
+ #include <linux/crypto.h>
+ #include <linux/types.h>
+ 
+-#define DES_KEY_SIZE		8
+-#define DES_EXPKEY_WORDS	32
+-#define DES_BLOCK_SIZE		8
+-
+-#define DES3_EDE_KEY_SIZE	(3 * DES_KEY_SIZE)
+-#define DES3_EDE_EXPKEY_WORDS	(3 * DES_EXPKEY_WORDS)
+-#define DES3_EDE_BLOCK_SIZE	DES_BLOCK_SIZE
++#include <crypto/des.h>
+ 
+ #define ROL(x, r) ((x) = rol32((x), (r)))
+ #define ROR(x, r) ((x) = ror32((x), (r)))
+@@ -634,7 +628,7 @@ static const u32 S8[64] = {
+  *   Choice 1 has operated on the key.
+  *
+  */
+-static unsigned long ekey(u32 *pe, const u8 *k)
++unsigned long des_ekey(u32 *pe, const u8 *k)
+ {
+ 	/* K&R: long is at least 32 bits */
+ 	unsigned long a, b, c, d, w;
+@@ -709,6 +703,7 @@ static unsigned long ekey(u32 *pe, const u8 *k)
+ 	/* Zero if weak key */
+ 	return w;
+ }
++EXPORT_SYMBOL_GPL(des_ekey);
+ 
+ /*
+  * Decryption key expansion
+@@ -792,7 +787,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
+ 	int ret;
+ 
+ 	/* Expand to tmp */
+-	ret = ekey(tmp, key);
++	ret = des_ekey(tmp, key);
+ 
+ 	if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+@@ -879,9 +874,9 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
+ 		return -EINVAL;
+ 	}
+ 
+-	ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
++	des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
+ 	dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
+-	ekey(expkey, key);
++	des_ekey(expkey, key);
+ 
+ 	return 0;
+ }
+diff --git a/crypto/digest.c b/crypto/digest.c
+index 8871dec..6fd43bd 100644
+--- a/crypto/digest.c
++++ b/crypto/digest.c
+@@ -12,6 +12,7 @@
+  *
+  */
+ 
++#include <crypto/scatterwalk.h>
+ #include <linux/mm.h>
+ #include <linux/errno.h>
+ #include <linux/hardirq.h>
+@@ -20,9 +21,6 @@
+ #include <linux/module.h>
+ #include <linux/scatterlist.h>
+ 
+-#include "internal.h"
+-#include "scatterwalk.h"
+-
+ static int init(struct hash_desc *desc)
+ {
+ 	struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
+diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
+new file mode 100644
+index 0000000..eb90d27
+--- /dev/null
++++ b/crypto/eseqiv.c
+@@ -0,0 +1,264 @@
++/*
++ * eseqiv: Encrypted Sequence Number IV Generator
 + *
-+ * Description:
-+ *    Enables a low level driver to set an upper limit on the size of
-+ *    received requests.
-+ **/
-+void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
-+{
-+	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
-+		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
-+		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
-+	}
++ * This generator generates an IV based on a sequence number by xoring it
++ * with a salt and then encrypting it with the same key as used to encrypt
++ * the plain text.  This algorithm requires that the block size be equal
++ * to the IV size.  It is mainly useful for CBC.
++ *
++ * Copyright (c) 2007 Herbert Xu <herbert at gondor.apana.org.au>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
 +
-+	if (BLK_DEF_MAX_SECTORS > max_sectors)
-+		q->max_hw_sectors = q->max_sectors = max_sectors;
-+	else {
-+		q->max_sectors = BLK_DEF_MAX_SECTORS;
-+		q->max_hw_sectors = max_sectors;
-+	}
-+}
++#include <crypto/internal/skcipher.h>
++#include <crypto/scatterwalk.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/random.h>
++#include <linux/scatterlist.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
 +
-+EXPORT_SYMBOL(blk_queue_max_sectors);
++struct eseqiv_request_ctx {
++	struct scatterlist src[2];
++	struct scatterlist dst[2];
++	char tail[];
++};
 +
-+/**
-+ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
-+ * @q:  the request queue for the device
-+ * @max_segments:  max number of segments
-+ *
-+ * Description:
-+ *    Enables a low level driver to set an upper limit on the number of
-+ *    physical data segments in a request.  This would be the largest sized
-+ *    scatter list the driver could handle.
-+ **/
-+void blk_queue_max_phys_segments(struct request_queue *q,
-+				 unsigned short max_segments)
++struct eseqiv_ctx {
++	spinlock_t lock;
++	unsigned int reqoff;
++	char salt[];
++};
++
++static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
 +{
-+	if (!max_segments) {
-+		max_segments = 1;
-+		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
-+	}
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
 +
-+	q->max_phys_segments = max_segments;
++	memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
++			 crypto_ablkcipher_alignmask(geniv) + 1),
++	       crypto_ablkcipher_ivsize(geniv));
 +}
 +
-+EXPORT_SYMBOL(blk_queue_max_phys_segments);
-+
-+/**
-+ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
-+ * @q:  the request queue for the device
-+ * @max_segments:  max number of segments
-+ *
-+ * Description:
-+ *    Enables a low level driver to set an upper limit on the number of
-+ *    hw data segments in a request.  This would be the largest number of
-+ *    address/length pairs the host adapter can actually give as once
-+ *    to the device.
-+ **/
-+void blk_queue_max_hw_segments(struct request_queue *q,
-+			       unsigned short max_segments)
++static void eseqiv_complete(struct crypto_async_request *base, int err)
 +{
-+	if (!max_segments) {
-+		max_segments = 1;
-+		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
-+	}
++	struct skcipher_givcrypt_request *req = base->data;
 +
-+	q->max_hw_segments = max_segments;
-+}
++	if (err)
++		goto out;
 +
-+EXPORT_SYMBOL(blk_queue_max_hw_segments);
++	eseqiv_complete2(req);
 +
-+/**
-+ * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
-+ * @q:  the request queue for the device
-+ * @max_size:  max size of segment in bytes
-+ *
-+ * Description:
-+ *    Enables a low level driver to set an upper limit on the size of a
-+ *    coalesced segment
-+ **/
-+void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
++out:
++	skcipher_givcrypt_complete(req, err);
++}
++
++static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg,
++			 int chain)
 +{
-+	if (max_size < PAGE_CACHE_SIZE) {
-+		max_size = PAGE_CACHE_SIZE;
-+		printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
++	if (chain) {
++		head->length += sg->length;
++		sg = scatterwalk_sg_next(sg);
 +	}
 +
-+	q->max_segment_size = max_size;
++	if (sg)
++		scatterwalk_sg_chain(head, 2, sg);
++	else
++		sg_mark_end(head);
 +}
 +
-+EXPORT_SYMBOL(blk_queue_max_segment_size);
-+
-+/**
-+ * blk_queue_hardsect_size - set hardware sector size for the queue
-+ * @q:  the request queue for the device
-+ * @size:  the hardware sector size, in bytes
-+ *
-+ * Description:
-+ *   This should typically be set to the lowest possible sector size
-+ *   that the hardware can operate on (possible without reverting to
-+ *   even internal read-modify-write operations). Usually the default
-+ *   of 512 covers most hardware.
-+ **/
-+void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
++static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
 +{
-+	q->hardsect_size = size;
-+}
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++	struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
++	struct ablkcipher_request *subreq;
++	crypto_completion_t complete;
++	void *data;
++	struct scatterlist *osrc, *odst;
++	struct scatterlist *dst;
++	struct page *srcp;
++	struct page *dstp;
++	u8 *giv;
++	u8 *vsrc;
++	u8 *vdst;
++	__be64 seq;
++	unsigned int ivsize;
++	unsigned int len;
++	int err;
 +
-+EXPORT_SYMBOL(blk_queue_hardsect_size);
++	subreq = (void *)(reqctx->tail + ctx->reqoff);
++	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 +
-+/*
-+ * Returns the minimum that is _not_ zero, unless both are zero.
-+ */
-+#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
++	giv = req->giv;
++	complete = req->creq.base.complete;
++	data = req->creq.base.data;
 +
-+/**
-+ * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
-+ * @t:	the stacking driver (top)
-+ * @b:  the underlying device (bottom)
-+ **/
-+void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
-+{
-+	/* zero is "infinity" */
-+	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
-+	t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
++	osrc = req->creq.src;
++	odst = req->creq.dst;
++	srcp = sg_page(osrc);
++	dstp = sg_page(odst);
++	vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
++	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
 +
-+	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
-+	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
-+	t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
-+	t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
-+	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
-+		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
-+}
++	ivsize = crypto_ablkcipher_ivsize(geniv);
 +
-+EXPORT_SYMBOL(blk_queue_stack_limits);
++	if (vsrc != giv + ivsize && vdst != giv + ivsize) {
++		giv = PTR_ALIGN((u8 *)reqctx->tail,
++				crypto_ablkcipher_alignmask(geniv) + 1);
++		complete = eseqiv_complete;
++		data = req;
++	}
 +
-+/**
-+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
-+ *
-+ * @q:  the request queue for the device
-+ * @buf:	physically contiguous buffer
-+ * @size:	size of the buffer in bytes
-+ *
-+ * Some devices have excess DMA problems and can't simply discard (or
-+ * zero fill) the unwanted piece of the transfer.  They have to have a
-+ * real area of memory to transfer it into.  The use case for this is
-+ * ATAPI devices in DMA mode.  If the packet command causes a transfer
-+ * bigger than the transfer size some HBAs will lock up if there
-+ * aren't DMA elements to contain the excess transfer.  What this API
-+ * does is adjust the queue so that the buf is always appended
-+ * silently to the scatterlist.
-+ *
-+ * Note: This routine adjusts max_hw_segments to make room for
-+ * appending the drain buffer.  If you call
-+ * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
-+ * calling this routine, you must set the limit to one fewer than your
-+ * device can support otherwise there won't be room for the drain
-+ * buffer.
-+ */
-+int blk_queue_dma_drain(struct request_queue *q, void *buf,
-+				unsigned int size)
-+{
-+	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
-+		return -EINVAL;
-+	/* make room for appending the drain */
-+	--q->max_hw_segments;
-+	--q->max_phys_segments;
-+	q->dma_drain_buffer = buf;
-+	q->dma_drain_size = size;
++	ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
++					data);
 +
-+	return 0;
-+}
++	sg_init_table(reqctx->src, 2);
++	sg_set_buf(reqctx->src, giv, ivsize);
++	eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
 +
-+EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
++	dst = reqctx->src;
++	if (osrc != odst) {
++		sg_init_table(reqctx->dst, 2);
++		sg_set_buf(reqctx->dst, giv, ivsize);
++		eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
 +
-+/**
-+ * blk_queue_segment_boundary - set boundary rules for segment merging
-+ * @q:  the request queue for the device
-+ * @mask:  the memory boundary mask
-+ **/
-+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
-+{
-+	if (mask < PAGE_CACHE_SIZE - 1) {
-+		mask = PAGE_CACHE_SIZE - 1;
-+		printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
++		dst = reqctx->dst;
 +	}
 +
-+	q->seg_boundary_mask = mask;
-+}
++	ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
++				     req->creq.nbytes, req->creq.info);
 +
-+EXPORT_SYMBOL(blk_queue_segment_boundary);
++	memcpy(req->creq.info, ctx->salt, ivsize);
 +
-+/**
-+ * blk_queue_dma_alignment - set dma length and memory alignment
-+ * @q:     the request queue for the device
-+ * @mask:  alignment mask
-+ *
-+ * description:
-+ *    set required memory and length aligment for direct dma transactions.
-+ *    this is used when buiding direct io requests for the queue.
-+ *
-+ **/
-+void blk_queue_dma_alignment(struct request_queue *q, int mask)
-+{
-+	q->dma_alignment = mask;
-+}
++	len = ivsize;
++	if (ivsize > sizeof(u64)) {
++		memset(req->giv, 0, ivsize - sizeof(u64));
++		len = sizeof(u64);
++	}
++	seq = cpu_to_be64(req->seq);
++	memcpy(req->giv + ivsize - len, &seq, len);
 +
-+EXPORT_SYMBOL(blk_queue_dma_alignment);
++	err = crypto_ablkcipher_encrypt(subreq);
++	if (err)
++		goto out;
 +
-+/**
-+ * blk_queue_update_dma_alignment - update dma length and memory alignment
-+ * @q:     the request queue for the device
-+ * @mask:  alignment mask
-+ *
-+ * description:
-+ *    update required memory and length aligment for direct dma transactions.
-+ *    If the requested alignment is larger than the current alignment, then
-+ *    the current queue alignment is updated to the new value, otherwise it
-+ *    is left alone.  The design of this is to allow multiple objects
-+ *    (driver, device, transport etc) to set their respective
-+ *    alignments without having them interfere.
-+ *
-+ **/
-+void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-+{
-+	BUG_ON(mask > PAGE_SIZE);
++	eseqiv_complete2(req);
 +
-+	if (mask > q->dma_alignment)
-+		q->dma_alignment = mask;
++out:
++	return err;
 +}
 +
-+EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-+
-+int __init blk_settings_init(void)
++static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
 +{
-+	blk_max_low_pfn = max_low_pfn - 1;
-+	blk_max_pfn = max_pfn - 1;
-+	return 0;
-+}
-+subsys_initcall(blk_settings_init);
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-new file mode 100644
-index 0000000..bc28776
---- /dev/null
-+++ b/block/blk-sysfs.c
-@@ -0,0 +1,309 @@
-+/*
-+ * Functions related to sysfs handling
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
-+#include <linux/blktrace_api.h>
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 +
-+#include "blk.h"
++	spin_lock_bh(&ctx->lock);
++	if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first)
++		goto unlock;
 +
-+struct queue_sysfs_entry {
-+	struct attribute attr;
-+	ssize_t (*show)(struct request_queue *, char *);
-+	ssize_t (*store)(struct request_queue *, const char *, size_t);
-+};
++	crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
++	get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv));
 +
-+static ssize_t
-+queue_var_show(unsigned int var, char *page)
-+{
-+	return sprintf(page, "%d\n", var);
++unlock:
++	spin_unlock_bh(&ctx->lock);
++
++	return eseqiv_givencrypt(req);
 +}
 +
-+static ssize_t
-+queue_var_store(unsigned long *var, const char *page, size_t count)
++static int eseqiv_init(struct crypto_tfm *tfm)
 +{
-+	char *p = (char *) page;
++	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
++	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++	unsigned long alignmask;
++	unsigned int reqsize;
 +
-+	*var = simple_strtoul(p, &p, 10);
-+	return count;
-+}
++	spin_lock_init(&ctx->lock);
 +
-+static ssize_t queue_requests_show(struct request_queue *q, char *page)
-+{
-+	return queue_var_show(q->nr_requests, (page));
-+}
++	alignmask = crypto_tfm_ctx_alignment() - 1;
++	reqsize = sizeof(struct eseqiv_request_ctx);
 +
-+static ssize_t
-+queue_requests_store(struct request_queue *q, const char *page, size_t count)
-+{
-+	struct request_list *rl = &q->rq;
-+	unsigned long nr;
-+	int ret = queue_var_store(&nr, page, count);
-+	if (nr < BLKDEV_MIN_RQ)
-+		nr = BLKDEV_MIN_RQ;
++	if (alignmask & reqsize) {
++		alignmask &= reqsize;
++		alignmask--;
++	}
 +
-+	spin_lock_irq(q->queue_lock);
-+	q->nr_requests = nr;
-+	blk_queue_congestion_threshold(q);
++	alignmask = ~alignmask;
++	alignmask &= crypto_ablkcipher_alignmask(geniv);
 +
-+	if (rl->count[READ] >= queue_congestion_on_threshold(q))
-+		blk_set_queue_congested(q, READ);
-+	else if (rl->count[READ] < queue_congestion_off_threshold(q))
-+		blk_clear_queue_congested(q, READ);
++	reqsize += alignmask;
++	reqsize += crypto_ablkcipher_ivsize(geniv);
++	reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
 +
-+	if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
-+		blk_set_queue_congested(q, WRITE);
-+	else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
-+		blk_clear_queue_congested(q, WRITE);
++	ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
 +
-+	if (rl->count[READ] >= q->nr_requests) {
-+		blk_set_queue_full(q, READ);
-+	} else if (rl->count[READ]+1 <= q->nr_requests) {
-+		blk_clear_queue_full(q, READ);
-+		wake_up(&rl->wait[READ]);
-+	}
++	tfm->crt_ablkcipher.reqsize = reqsize +
++				      sizeof(struct ablkcipher_request);
 +
-+	if (rl->count[WRITE] >= q->nr_requests) {
-+		blk_set_queue_full(q, WRITE);
-+	} else if (rl->count[WRITE]+1 <= q->nr_requests) {
-+		blk_clear_queue_full(q, WRITE);
-+		wake_up(&rl->wait[WRITE]);
-+	}
-+	spin_unlock_irq(q->queue_lock);
-+	return ret;
++	return skcipher_geniv_init(tfm);
 +}
 +
-+static ssize_t queue_ra_show(struct request_queue *q, char *page)
++static struct crypto_template eseqiv_tmpl;
++
++static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
 +{
-+	int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
++	struct crypto_instance *inst;
++	int err;
 +
-+	return queue_var_show(ra_kb, (page));
-+}
++	inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
++	if (IS_ERR(inst))
++		goto out;
 +
-+static ssize_t
-+queue_ra_store(struct request_queue *q, const char *page, size_t count)
-+{
-+	unsigned long ra_kb;
-+	ssize_t ret = queue_var_store(&ra_kb, page, count);
++	err = -EINVAL;
++	if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
++		goto free_inst;
 +
-+	spin_lock_irq(q->queue_lock);
-+	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
-+	spin_unlock_irq(q->queue_lock);
++	inst->alg.cra_ablkcipher.givencrypt = eseqiv_givencrypt_first;
 +
-+	return ret;
-+}
++	inst->alg.cra_init = eseqiv_init;
++	inst->alg.cra_exit = skcipher_geniv_exit;
 +
-+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
-+{
-+	int max_sectors_kb = q->max_sectors >> 1;
++	inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
++	inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
 +
-+	return queue_var_show(max_sectors_kb, (page));
++out:
++	return inst;
++
++free_inst:
++	skcipher_geniv_free(inst);
++	inst = ERR_PTR(err);
++	goto out;
 +}
 +
-+static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page)
++static struct crypto_template eseqiv_tmpl = {
++	.name = "eseqiv",
++	.alloc = eseqiv_alloc,
++	.free = skcipher_geniv_free,
++	.module = THIS_MODULE,
++};
++
++static int __init eseqiv_module_init(void)
 +{
-+	return queue_var_show(q->hardsect_size, page);
++	return crypto_register_template(&eseqiv_tmpl);
 +}
 +
-+static ssize_t
-+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
++static void __exit eseqiv_module_exit(void)
 +{
-+	unsigned long max_sectors_kb,
-+			max_hw_sectors_kb = q->max_hw_sectors >> 1,
-+			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
-+	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
-+
-+	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
-+		return -EINVAL;
-+	/*
-+	 * Take the queue lock to update the readahead and max_sectors
-+	 * values synchronously:
-+	 */
-+	spin_lock_irq(q->queue_lock);
-+	q->max_sectors = max_sectors_kb << 1;
-+	spin_unlock_irq(q->queue_lock);
-+
-+	return ret;
++	crypto_unregister_template(&eseqiv_tmpl);
 +}
 +
-+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
-+{
-+	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
++module_init(eseqiv_module_init);
++module_exit(eseqiv_module_exit);
 +
-+	return queue_var_show(max_hw_sectors_kb, (page));
-+}
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+new file mode 100644
+index 0000000..e70afd0
+--- /dev/null
++++ b/crypto/gcm.c
+@@ -0,0 +1,823 @@
++/*
++ * GCM: Galois/Counter Mode.
++ *
++ * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1 at iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
 +
++#include <crypto/gf128mul.h>
++#include <crypto/internal/aead.h>
++#include <crypto/internal/skcipher.h>
++#include <crypto/scatterwalk.h>
++#include <linux/completion.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
 +
-+static struct queue_sysfs_entry queue_requests_entry = {
-+	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
-+	.show = queue_requests_show,
-+	.store = queue_requests_store,
++struct gcm_instance_ctx {
++	struct crypto_skcipher_spawn ctr;
 +};
 +
-+static struct queue_sysfs_entry queue_ra_entry = {
-+	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
-+	.show = queue_ra_show,
-+	.store = queue_ra_store,
++struct crypto_gcm_ctx {
++	struct crypto_ablkcipher *ctr;
++	struct gf128mul_4k *gf128;
 +};
 +
-+static struct queue_sysfs_entry queue_max_sectors_entry = {
-+	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
-+	.show = queue_max_sectors_show,
-+	.store = queue_max_sectors_store,
++struct crypto_rfc4106_ctx {
++	struct crypto_aead *child;
++	u8 nonce[4];
 +};
 +
-+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
-+	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
-+	.show = queue_max_hw_sectors_show,
++struct crypto_gcm_ghash_ctx {
++	u32 bytes;
++	u32 flags;
++	struct gf128mul_4k *gf128;
++	u8 buffer[16];
 +};
 +
-+static struct queue_sysfs_entry queue_iosched_entry = {
-+	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
-+	.show = elv_iosched_show,
-+	.store = elv_iosched_store,
++struct crypto_gcm_req_priv_ctx {
++	u8 auth_tag[16];
++	u8 iauth_tag[16];
++	struct scatterlist src[2];
++	struct scatterlist dst[2];
++	struct crypto_gcm_ghash_ctx ghash;
++	struct ablkcipher_request abreq;
 +};
 +
-+static struct queue_sysfs_entry queue_hw_sector_size_entry = {
-+	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
-+	.show = queue_hw_sector_size_show,
++struct crypto_gcm_setkey_result {
++	int err;
++	struct completion completion;
 +};
 +
-+static struct attribute *default_attrs[] = {
-+	&queue_requests_entry.attr,
-+	&queue_ra_entry.attr,
-+	&queue_max_hw_sectors_entry.attr,
-+	&queue_max_sectors_entry.attr,
-+	&queue_iosched_entry.attr,
-+	&queue_hw_sector_size_entry.attr,
-+	NULL,
-+};
++static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
++	struct aead_request *req)
++{
++	unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
 +
-+#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
++	return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
++}
 +
-+static ssize_t
-+queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
++static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags,
++				  struct gf128mul_4k *gf128)
 +{
-+	struct queue_sysfs_entry *entry = to_queue(attr);
-+	struct request_queue *q =
-+		container_of(kobj, struct request_queue, kobj);
-+	ssize_t res;
-+
-+	if (!entry->show)
-+		return -EIO;
-+	mutex_lock(&q->sysfs_lock);
-+	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
-+		mutex_unlock(&q->sysfs_lock);
-+		return -ENOENT;
-+	}
-+	res = entry->show(q, page);
-+	mutex_unlock(&q->sysfs_lock);
-+	return res;
++	ctx->bytes = 0;
++	ctx->flags = flags;
++	ctx->gf128 = gf128;
++	memset(ctx->buffer, 0, 16);
 +}
 +
-+static ssize_t
-+queue_attr_store(struct kobject *kobj, struct attribute *attr,
-+		    const char *page, size_t length)
++static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
++				    const u8 *src, unsigned int srclen)
 +{
-+	struct queue_sysfs_entry *entry = to_queue(attr);
-+	struct request_queue *q = container_of(kobj, struct request_queue, kobj);
++	u8 *dst = ctx->buffer;
 +
-+	ssize_t res;
++	if (ctx->bytes) {
++		int n = min(srclen, ctx->bytes);
++		u8 *pos = dst + (16 - ctx->bytes);
 +
-+	if (!entry->store)
-+		return -EIO;
-+	mutex_lock(&q->sysfs_lock);
-+	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
-+		mutex_unlock(&q->sysfs_lock);
-+		return -ENOENT;
++		ctx->bytes -= n;
++		srclen -= n;
++
++		while (n--)
++			*pos++ ^= *src++;
++
++		if (!ctx->bytes)
++			gf128mul_4k_lle((be128 *)dst, ctx->gf128);
++	}
++
++	while (srclen >= 16) {
++		crypto_xor(dst, src, 16);
++		gf128mul_4k_lle((be128 *)dst, ctx->gf128);
++		src += 16;
++		srclen -= 16;
++	}
++
++	if (srclen) {
++		ctx->bytes = 16 - srclen;
++		while (srclen--)
++			*dst++ ^= *src++;
 +	}
-+	res = entry->store(q, page, length);
-+	mutex_unlock(&q->sysfs_lock);
-+	return res;
 +}
 +
-+/**
-+ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
-+ * @kobj:    the kobj belonging of the request queue to be released
-+ *
-+ * Description:
-+ *     blk_cleanup_queue is the pair to blk_init_queue() or
-+ *     blk_queue_make_request().  It should be called when a request queue is
-+ *     being released; typically when a block device is being de-registered.
-+ *     Currently, its primary task it to free all the &struct request
-+ *     structures that were allocated to the queue and the queue itself.
-+ *
-+ * Caveat:
-+ *     Hopefully the low level driver will have finished any
-+ *     outstanding requests first...
-+ **/
-+static void blk_release_queue(struct kobject *kobj)
++static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
++				       struct scatterlist *sg, int len)
 +{
-+	struct request_queue *q =
-+		container_of(kobj, struct request_queue, kobj);
-+	struct request_list *rl = &q->rq;
++	struct scatter_walk walk;
++	u8 *src;
++	int n;
 +
-+	blk_sync_queue(q);
++	if (!len)
++		return;
 +
-+	if (rl->rq_pool)
-+		mempool_destroy(rl->rq_pool);
++	scatterwalk_start(&walk, sg);
 +
-+	if (q->queue_tags)
-+		__blk_queue_free_tags(q);
++	while (len) {
++		n = scatterwalk_clamp(&walk, len);
 +
-+	blk_trace_shutdown(q);
++		if (!n) {
++			scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
++			n = scatterwalk_clamp(&walk, len);
++		}
 +
-+	bdi_destroy(&q->backing_dev_info);
-+	kmem_cache_free(blk_requestq_cachep, q);
-+}
++		src = scatterwalk_map(&walk, 0);
 +
-+static struct sysfs_ops queue_sysfs_ops = {
-+	.show	= queue_attr_show,
-+	.store	= queue_attr_store,
-+};
++		crypto_gcm_ghash_update(ctx, src, n);
++		len -= n;
 +
-+struct kobj_type blk_queue_ktype = {
-+	.sysfs_ops	= &queue_sysfs_ops,
-+	.default_attrs	= default_attrs,
-+	.release	= blk_release_queue,
-+};
++		scatterwalk_unmap(src, 0);
++		scatterwalk_advance(&walk, n);
++		scatterwalk_done(&walk, 0, len);
++		if (len)
++			crypto_yield(ctx->flags);
++	}
++}
 +
-+int blk_register_queue(struct gendisk *disk)
++static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx)
 +{
-+	int ret;
-+
-+	struct request_queue *q = disk->queue;
-+
-+	if (!q || !q->request_fn)
-+		return -ENXIO;
++	u8 *dst = ctx->buffer;
 +
-+	ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
-+			  "%s", "queue");
-+	if (ret < 0)
-+		return ret;
++	if (ctx->bytes) {
++		u8 *tmp = dst + (16 - ctx->bytes);
 +
-+	kobject_uevent(&q->kobj, KOBJ_ADD);
++		while (ctx->bytes--)
++			*tmp++ ^= 0;
 +
-+	ret = elv_register_queue(q);
-+	if (ret) {
-+		kobject_uevent(&q->kobj, KOBJ_REMOVE);
-+		kobject_del(&q->kobj);
-+		return ret;
++		gf128mul_4k_lle((be128 *)dst, ctx->gf128);
 +	}
 +
-+	return 0;
++	ctx->bytes = 0;
 +}
 +
-+void blk_unregister_queue(struct gendisk *disk)
++static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
++				       unsigned int authlen,
++				       unsigned int cryptlen, u8 *dst)
 +{
-+	struct request_queue *q = disk->queue;
++	u8 *buf = ctx->buffer;
++	u128 lengths;
 +
-+	if (q && q->request_fn) {
-+		elv_unregister_queue(q);
++	lengths.a = cpu_to_be64(authlen * 8);
++	lengths.b = cpu_to_be64(cryptlen * 8);
 +
-+		kobject_uevent(&q->kobj, KOBJ_REMOVE);
-+		kobject_del(&q->kobj);
-+		kobject_put(&disk->dev.kobj);
-+	}
++	crypto_gcm_ghash_flush(ctx);
++	crypto_xor(buf, (u8 *)&lengths, 16);
++	gf128mul_4k_lle((be128 *)buf, ctx->gf128);
++	crypto_xor(dst, buf, 16);
 +}
-diff --git a/block/blk-tag.c b/block/blk-tag.c
-new file mode 100644
-index 0000000..d1fd300
---- /dev/null
-+++ b/block/blk-tag.c
-@@ -0,0 +1,396 @@
-+/*
-+ * Functions related to tagged command queuing
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
 +
-+/**
-+ * blk_queue_find_tag - find a request by its tag and queue
-+ * @q:	 The request queue for the device
-+ * @tag: The tag of the request
-+ *
-+ * Notes:
-+ *    Should be used when a device returns a tag and you want to match
-+ *    it with a request.
-+ *
-+ *    no locks need be held.
-+ **/
-+struct request *blk_queue_find_tag(struct request_queue *q, int tag)
++static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
 +{
-+	return blk_map_queue_find_tag(q->queue_tags, tag);
-+}
++	struct crypto_gcm_setkey_result *result = req->data;
 +
-+EXPORT_SYMBOL(blk_queue_find_tag);
++	if (err == -EINPROGRESS)
++		return;
 +
-+/**
-+ * __blk_free_tags - release a given set of tag maintenance info
-+ * @bqt:	the tag map to free
-+ *
-+ * Tries to free the specified @bqt at .  Returns true if it was
-+ * actually freed and false if there are still references using it
-+ */
-+static int __blk_free_tags(struct blk_queue_tag *bqt)
++	result->err = err;
++	complete(&result->completion);
++}
++
++static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
++			     unsigned int keylen)
 +{
-+	int retval;
++	struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
++	struct crypto_ablkcipher *ctr = ctx->ctr;
++	struct {
++		be128 hash;
++		u8 iv[8];
 +
-+	retval = atomic_dec_and_test(&bqt->refcnt);
-+	if (retval) {
-+		BUG_ON(bqt->busy);
++		struct crypto_gcm_setkey_result result;
 +
-+		kfree(bqt->tag_index);
-+		bqt->tag_index = NULL;
++		struct scatterlist sg[1];
++		struct ablkcipher_request req;
++	} *data;
++	int err;
 +
-+		kfree(bqt->tag_map);
-+		bqt->tag_map = NULL;
++	crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
++	crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
++				   CRYPTO_TFM_REQ_MASK);
 +
-+		kfree(bqt);
++	err = crypto_ablkcipher_setkey(ctr, key, keylen);
++	if (err)
++		return err;
++
++	crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
++				       CRYPTO_TFM_RES_MASK);
++
++	data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr),
++		       GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
++	init_completion(&data->result.completion);
++	sg_init_one(data->sg, &data->hash, sizeof(data->hash));
++	ablkcipher_request_set_tfm(&data->req, ctr);
++	ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
++						    CRYPTO_TFM_REQ_MAY_BACKLOG,
++					crypto_gcm_setkey_done,
++					&data->result);
++	ablkcipher_request_set_crypt(&data->req, data->sg, data->sg,
++				     sizeof(data->hash), data->iv);
++
++	err = crypto_ablkcipher_encrypt(&data->req);
++	if (err == -EINPROGRESS || err == -EBUSY) {
++		err = wait_for_completion_interruptible(
++			&data->result.completion);
++		if (!err)
++			err = data->result.err;
 +	}
 +
-+	return retval;
-+}
++	if (err)
++		goto out;
 +
-+/**
-+ * __blk_queue_free_tags - release tag maintenance info
-+ * @q:  the request queue for the device
-+ *
-+ *  Notes:
-+ *    blk_cleanup_queue() will take care of calling this function, if tagging
-+ *    has been used. So there's no need to call this directly.
-+ **/
-+void __blk_queue_free_tags(struct request_queue *q)
-+{
-+	struct blk_queue_tag *bqt = q->queue_tags;
++	if (ctx->gf128 != NULL)
++		gf128mul_free_4k(ctx->gf128);
 +
-+	if (!bqt)
-+		return;
++	ctx->gf128 = gf128mul_init_4k_lle(&data->hash);
 +
-+	__blk_free_tags(bqt);
++	if (ctx->gf128 == NULL)
++		err = -ENOMEM;
 +
-+	q->queue_tags = NULL;
-+	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
++out:
++	kfree(data);
++	return err;
 +}
 +
-+/**
-+ * blk_free_tags - release a given set of tag maintenance info
-+ * @bqt:	the tag map to free
-+ *
-+ * For externally managed @bqt@ frees the map.  Callers of this
-+ * function must guarantee to have released all the queues that
-+ * might have been using this tag map.
-+ */
-+void blk_free_tags(struct blk_queue_tag *bqt)
++static int crypto_gcm_setauthsize(struct crypto_aead *tfm,
++				  unsigned int authsize)
 +{
-+	if (unlikely(!__blk_free_tags(bqt)))
-+		BUG();
++	switch (authsize) {
++	case 4:
++	case 8:
++	case 12:
++	case 13:
++	case 14:
++	case 15:
++	case 16:
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	return 0;
 +}
-+EXPORT_SYMBOL(blk_free_tags);
 +
-+/**
-+ * blk_queue_free_tags - release tag maintenance info
-+ * @q:  the request queue for the device
-+ *
-+ *  Notes:
-+ *	This is used to disabled tagged queuing to a device, yet leave
-+ *	queue in function.
-+ **/
-+void blk_queue_free_tags(struct request_queue *q)
++static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
++				  struct aead_request *req,
++				  unsigned int cryptlen)
 +{
-+	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
-+}
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
++	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
++	u32 flags = req->base.tfm->crt_flags;
++	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
++	struct scatterlist *dst;
++	__be32 counter = cpu_to_be32(1);
 +
-+EXPORT_SYMBOL(blk_queue_free_tags);
++	memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
++	memcpy(req->iv + 12, &counter, 4);
 +
-+static int
-+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
-+{
-+	struct request **tag_index;
-+	unsigned long *tag_map;
-+	int nr_ulongs;
++	sg_init_table(pctx->src, 2);
++	sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
++	scatterwalk_sg_chain(pctx->src, 2, req->src);
 +
-+	if (q && depth > q->nr_requests * 2) {
-+		depth = q->nr_requests * 2;
-+		printk(KERN_ERR "%s: adjusted depth to %d\n",
-+				__FUNCTION__, depth);
++	dst = pctx->src;
++	if (req->src != req->dst) {
++		sg_init_table(pctx->dst, 2);
++		sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
++		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
++		dst = pctx->dst;
 +	}
 +
-+	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
-+	if (!tag_index)
-+		goto fail;
-+
-+	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
-+	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
-+	if (!tag_map)
-+		goto fail;
++	ablkcipher_request_set_tfm(ablk_req, ctx->ctr);
++	ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
++				     cryptlen + sizeof(pctx->auth_tag),
++				     req->iv);
 +
-+	tags->real_max_depth = depth;
-+	tags->max_depth = depth;
-+	tags->tag_index = tag_index;
-+	tags->tag_map = tag_map;
++	crypto_gcm_ghash_init(ghash, flags, ctx->gf128);
 +
-+	return 0;
-+fail:
-+	kfree(tag_index);
-+	return -ENOMEM;
++	crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen);
++	crypto_gcm_ghash_flush(ghash);
 +}
 +
-+static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
-+						   int depth)
++static int crypto_gcm_hash(struct aead_request *req)
 +{
-+	struct blk_queue_tag *tags;
-+
-+	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
-+	if (!tags)
-+		goto fail;
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
++	u8 *auth_tag = pctx->auth_tag;
++	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
 +
-+	if (init_tag_map(q, tags, depth))
-+		goto fail;
++	crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen);
++	crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen,
++				   auth_tag);
 +
-+	tags->busy = 0;
-+	atomic_set(&tags->refcnt, 1);
-+	return tags;
-+fail:
-+	kfree(tags);
-+	return NULL;
++	scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
++				 crypto_aead_authsize(aead), 1);
++	return 0;
 +}
 +
-+/**
-+ * blk_init_tags - initialize the tag info for an external tag map
-+ * @depth:	the maximum queue depth supported
-+ * @tags: the tag to use
-+ **/
-+struct blk_queue_tag *blk_init_tags(int depth)
++static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err)
 +{
-+	return __blk_queue_init_tags(NULL, depth);
++	struct aead_request *req = areq->data;
++
++	if (!err)
++		err = crypto_gcm_hash(req);
++
++	aead_request_complete(req, err);
 +}
-+EXPORT_SYMBOL(blk_init_tags);
 +
-+/**
-+ * blk_queue_init_tags - initialize the queue tag info
-+ * @q:  the request queue for the device
-+ * @depth:  the maximum queue depth supported
-+ * @tags: the tag to use
-+ **/
-+int blk_queue_init_tags(struct request_queue *q, int depth,
-+			struct blk_queue_tag *tags)
++static int crypto_gcm_encrypt(struct aead_request *req)
 +{
-+	int rc;
-+
-+	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
++	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
++	struct ablkcipher_request *abreq = &pctx->abreq;
++	int err;
 +
-+	if (!tags && !q->queue_tags) {
-+		tags = __blk_queue_init_tags(q, depth);
++	crypto_gcm_init_crypt(abreq, req, req->cryptlen);
++	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
++					crypto_gcm_encrypt_done, req);
 +
-+		if (!tags)
-+			goto fail;
-+	} else if (q->queue_tags) {
-+		if ((rc = blk_queue_resize_tags(q, depth)))
-+			return rc;
-+		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
-+		return 0;
-+	} else
-+		atomic_inc(&tags->refcnt);
++	err = crypto_ablkcipher_encrypt(abreq);
++	if (err)
++		return err;
 +
-+	/*
-+	 * assign it, all done
-+	 */
-+	q->queue_tags = tags;
-+	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
-+	INIT_LIST_HEAD(&q->tag_busy_list);
-+	return 0;
-+fail:
-+	kfree(tags);
-+	return -ENOMEM;
++	return crypto_gcm_hash(req);
 +}
 +
-+EXPORT_SYMBOL(blk_queue_init_tags);
-+
-+/**
-+ * blk_queue_resize_tags - change the queueing depth
-+ * @q:  the request queue for the device
-+ * @new_depth: the new max command queueing depth
-+ *
-+ *  Notes:
-+ *    Must be called with the queue lock held.
-+ **/
-+int blk_queue_resize_tags(struct request_queue *q, int new_depth)
++static int crypto_gcm_verify(struct aead_request *req)
 +{
-+	struct blk_queue_tag *bqt = q->queue_tags;
-+	struct request **tag_index;
-+	unsigned long *tag_map;
-+	int max_depth, nr_ulongs;
-+
-+	if (!bqt)
-+		return -ENXIO;
-+
-+	/*
-+	 * if we already have large enough real_max_depth.  just
-+	 * adjust max_depth.  *NOTE* as requests with tag value
-+	 * between new_depth and real_max_depth can be in-flight, tag
-+	 * map can not be shrunk blindly here.
-+	 */
-+	if (new_depth <= bqt->real_max_depth) {
-+		bqt->max_depth = new_depth;
-+		return 0;
-+	}
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
++	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
++	u8 *auth_tag = pctx->auth_tag;
++	u8 *iauth_tag = pctx->iauth_tag;
++	unsigned int authsize = crypto_aead_authsize(aead);
++	unsigned int cryptlen = req->cryptlen - authsize;
 +
-+	/*
-+	 * Currently cannot replace a shared tag map with a new
-+	 * one, so error out if this is the case
-+	 */
-+	if (atomic_read(&bqt->refcnt) != 1)
-+		return -EBUSY;
++	crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag);
 +
-+	/*
-+	 * save the old state info, so we can copy it back
-+	 */
-+	tag_index = bqt->tag_index;
-+	tag_map = bqt->tag_map;
-+	max_depth = bqt->real_max_depth;
++	authsize = crypto_aead_authsize(aead);
++	scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
++	return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
++}
 +
-+	if (init_tag_map(q, bqt, new_depth))
-+		return -ENOMEM;
++static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err)
++{
++	struct aead_request *req = areq->data;
 +
-+	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
-+	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
-+	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
++	if (!err)
++		err = crypto_gcm_verify(req);
 +
-+	kfree(tag_index);
-+	kfree(tag_map);
-+	return 0;
++	aead_request_complete(req, err);
 +}
 +
-+EXPORT_SYMBOL(blk_queue_resize_tags);
-+
-+/**
-+ * blk_queue_end_tag - end tag operations for a request
-+ * @q:  the request queue for the device
-+ * @rq: the request that has completed
-+ *
-+ *  Description:
-+ *    Typically called when end_that_request_first() returns 0, meaning
-+ *    all transfers have been done for a request. It's important to call
-+ *    this function before end_that_request_last(), as that will put the
-+ *    request back on the free list thus corrupting the internal tag list.
-+ *
-+ *  Notes:
-+ *   queue lock must be held.
-+ **/
-+void blk_queue_end_tag(struct request_queue *q, struct request *rq)
++static int crypto_gcm_decrypt(struct aead_request *req)
 +{
-+	struct blk_queue_tag *bqt = q->queue_tags;
-+	int tag = rq->tag;
-+
-+	BUG_ON(tag == -1);
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
++	struct ablkcipher_request *abreq = &pctx->abreq;
++	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
++	unsigned int cryptlen = req->cryptlen;
++	unsigned int authsize = crypto_aead_authsize(aead);
++	int err;
 +
-+	if (unlikely(tag >= bqt->real_max_depth))
-+		/*
-+		 * This can happen after tag depth has been reduced.
-+		 * FIXME: how about a warning or info message here?
-+		 */
-+		return;
++	if (cryptlen < authsize)
++		return -EINVAL;
++	cryptlen -= authsize;
 +
-+	list_del_init(&rq->queuelist);
-+	rq->cmd_flags &= ~REQ_QUEUED;
-+	rq->tag = -1;
++	crypto_gcm_init_crypt(abreq, req, cryptlen);
++	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
++					crypto_gcm_decrypt_done, req);
 +
-+	if (unlikely(bqt->tag_index[tag] == NULL))
-+		printk(KERN_ERR "%s: tag %d is missing\n",
-+		       __FUNCTION__, tag);
++	crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen);
 +
-+	bqt->tag_index[tag] = NULL;
++	err = crypto_ablkcipher_decrypt(abreq);
++	if (err)
++		return err;
 +
-+	if (unlikely(!test_bit(tag, bqt->tag_map))) {
-+		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
-+		       __FUNCTION__, tag);
-+		return;
-+	}
-+	/*
-+	 * The tag_map bit acts as a lock for tag_index[bit], so we need
-+	 * unlock memory barrier semantics.
-+	 */
-+	clear_bit_unlock(tag, bqt->tag_map);
-+	bqt->busy--;
++	return crypto_gcm_verify(req);
 +}
 +
-+EXPORT_SYMBOL(blk_queue_end_tag);
-+
-+/**
-+ * blk_queue_start_tag - find a free tag and assign it
-+ * @q:  the request queue for the device
-+ * @rq:  the block request that needs tagging
-+ *
-+ *  Description:
-+ *    This can either be used as a stand-alone helper, or possibly be
-+ *    assigned as the queue &prep_rq_fn (in which case &struct request
-+ *    automagically gets a tag assigned). Note that this function
-+ *    assumes that any type of request can be queued! if this is not
-+ *    true for your device, you must check the request type before
-+ *    calling this function.  The request will also be removed from
-+ *    the request queue, so it's the drivers responsibility to readd
-+ *    it if it should need to be restarted for some reason.
-+ *
-+ *  Notes:
-+ *   queue lock must be held.
-+ **/
-+int blk_queue_start_tag(struct request_queue *q, struct request *rq)
++static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
 +{
-+	struct blk_queue_tag *bqt = q->queue_tags;
-+	int tag;
++	struct crypto_instance *inst = (void *)tfm->__crt_alg;
++	struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
++	struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct crypto_ablkcipher *ctr;
++	unsigned long align;
++	int err;
 +
-+	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
-+		printk(KERN_ERR 
-+		       "%s: request %p for device [%s] already tagged %d",
-+		       __FUNCTION__, rq,
-+		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
-+		BUG();
-+	}
++	ctr = crypto_spawn_skcipher(&ictx->ctr);
++	err = PTR_ERR(ctr);
++	if (IS_ERR(ctr))
++		return err;
 +
-+	/*
-+	 * Protect against shared tag maps, as we may not have exclusive
-+	 * access to the tag map.
-+	 */
-+	do {
-+		tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
-+		if (tag >= bqt->max_depth)
-+			return 1;
++	ctx->ctr = ctr;
++	ctx->gf128 = NULL;
 +
-+	} while (test_and_set_bit_lock(tag, bqt->tag_map));
-+	/*
-+	 * We need lock ordering semantics given by test_and_set_bit_lock.
-+	 * See blk_queue_end_tag for details.
-+	 */
++	align = crypto_tfm_alg_alignmask(tfm);
++	align &= ~(crypto_tfm_ctx_alignment() - 1);
++	tfm->crt_aead.reqsize = align +
++				sizeof(struct crypto_gcm_req_priv_ctx) +
++				crypto_ablkcipher_reqsize(ctr);
 +
-+	rq->cmd_flags |= REQ_QUEUED;
-+	rq->tag = tag;
-+	bqt->tag_index[tag] = rq;
-+	blkdev_dequeue_request(rq);
-+	list_add(&rq->queuelist, &q->tag_busy_list);
-+	bqt->busy++;
 +	return 0;
 +}
 +
-+EXPORT_SYMBOL(blk_queue_start_tag);
-+
-+/**
-+ * blk_queue_invalidate_tags - invalidate all pending tags
-+ * @q:  the request queue for the device
-+ *
-+ *  Description:
-+ *   Hardware conditions may dictate a need to stop all pending requests.
-+ *   In this case, we will safely clear the block side of the tag queue and
-+ *   readd all requests to the request queue in the right order.
-+ *
-+ *  Notes:
-+ *   queue lock must be held.
-+ **/
-+void blk_queue_invalidate_tags(struct request_queue *q)
++static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
 +{
-+	struct list_head *tmp, *n;
++	struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+	list_for_each_safe(tmp, n, &q->tag_busy_list)
-+		blk_requeue_request(q, list_entry_rq(tmp));
++	if (ctx->gf128 != NULL)
++		gf128mul_free_4k(ctx->gf128);
++
++	crypto_free_ablkcipher(ctx->ctr);
 +}
 +
-+EXPORT_SYMBOL(blk_queue_invalidate_tags);
-diff --git a/block/blk.h b/block/blk.h
-new file mode 100644
-index 0000000..ec898dd
---- /dev/null
-+++ b/block/blk.h
-@@ -0,0 +1,53 @@
-+#ifndef BLK_INTERNAL_H
-+#define BLK_INTERNAL_H
++static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
++						       const char *full_name,
++						       const char *ctr_name)
++{
++	struct crypto_attr_type *algt;
++	struct crypto_instance *inst;
++	struct crypto_alg *ctr;
++	struct gcm_instance_ctx *ctx;
++	int err;
 +
-+/* Amount of time in which a process may batch requests */
-+#define BLK_BATCH_TIME	(HZ/50UL)
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
++		return ERR_PTR(err);
 +
-+/* Number of requests a "batching" process may submit */
-+#define BLK_BATCH_REQ	32
++	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
++		return ERR_PTR(-EINVAL);
 +
-+extern struct kmem_cache *blk_requestq_cachep;
-+extern struct kobj_type blk_queue_ktype;
++	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
++	if (!inst)
++		return ERR_PTR(-ENOMEM);
 +
-+void rq_init(struct request_queue *q, struct request *rq);
-+void init_request_from_bio(struct request *req, struct bio *bio);
-+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
-+			struct bio *bio);
-+void __blk_queue_free_tags(struct request_queue *q);
++	ctx = crypto_instance_ctx(inst);
++	crypto_set_skcipher_spawn(&ctx->ctr, inst);
++	err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
++				   crypto_requires_sync(algt->type,
++							algt->mask));
++	if (err)
++		goto err_free_inst;
 +
-+void blk_unplug_work(struct work_struct *work);
-+void blk_unplug_timeout(unsigned long data);
++	ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
 +
-+struct io_context *current_io_context(gfp_t gfp_flags, int node);
++	/* We only support 16-byte blocks. */
++	if (ctr->cra_ablkcipher.ivsize != 16)
++		goto out_put_ctr;
 +
-+int ll_back_merge_fn(struct request_queue *q, struct request *req,
-+		     struct bio *bio);
-+int ll_front_merge_fn(struct request_queue *q, struct request *req, 
-+		      struct bio *bio);
-+int attempt_back_merge(struct request_queue *q, struct request *rq);
-+int attempt_front_merge(struct request_queue *q, struct request *rq);
-+void blk_recalc_rq_segments(struct request *rq);
-+void blk_recalc_rq_sectors(struct request *rq, int nsect);
++	/* Not a stream cipher? */
++	err = -EINVAL;
++	if (ctr->cra_blocksize != 1)
++		goto out_put_ctr;
 +
-+void blk_queue_congestion_threshold(struct request_queue *q);
++	err = -ENAMETOOLONG;
++	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
++		     "gcm_base(%s)", ctr->cra_driver_name) >=
++	    CRYPTO_MAX_ALG_NAME)
++		goto out_put_ctr;
 +
-+/*
-+ * Return the threshold (number of used requests) at which the queue is
-+ * considered to be congested.  It include a little hysteresis to keep the
-+ * context switch rate down.
-+ */
-+static inline int queue_congestion_on_threshold(struct request_queue *q)
++	memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
++
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
++	inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
++	inst->alg.cra_priority = ctr->cra_priority;
++	inst->alg.cra_blocksize = 1;
++	inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1);
++	inst->alg.cra_type = &crypto_aead_type;
++	inst->alg.cra_aead.ivsize = 16;
++	inst->alg.cra_aead.maxauthsize = 16;
++	inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
++	inst->alg.cra_init = crypto_gcm_init_tfm;
++	inst->alg.cra_exit = crypto_gcm_exit_tfm;
++	inst->alg.cra_aead.setkey = crypto_gcm_setkey;
++	inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize;
++	inst->alg.cra_aead.encrypt = crypto_gcm_encrypt;
++	inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
++
++out:
++	return inst;
++
++out_put_ctr:
++	crypto_drop_skcipher(&ctx->ctr);
++err_free_inst:
++	kfree(inst);
++	inst = ERR_PTR(err);
++	goto out;
++}
++
++static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
 +{
-+	return q->nr_congestion_on;
++	int err;
++	const char *cipher_name;
++	char ctr_name[CRYPTO_MAX_ALG_NAME];
++	char full_name[CRYPTO_MAX_ALG_NAME];
++
++	cipher_name = crypto_attr_alg_name(tb[1]);
++	err = PTR_ERR(cipher_name);
++	if (IS_ERR(cipher_name))
++		return ERR_PTR(err);
++
++	if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >=
++	    CRYPTO_MAX_ALG_NAME)
++		return ERR_PTR(-ENAMETOOLONG);
++
++	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
++	    CRYPTO_MAX_ALG_NAME)
++		return ERR_PTR(-ENAMETOOLONG);
++
++	return crypto_gcm_alloc_common(tb, full_name, ctr_name);
 +}
 +
-+/*
-+ * The threshold at which a queue is considered to be uncongested
-+ */
-+static inline int queue_congestion_off_threshold(struct request_queue *q)
++static void crypto_gcm_free(struct crypto_instance *inst)
 +{
-+	return q->nr_congestion_off;
++	struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
++
++	crypto_drop_skcipher(&ctx->ctr);
++	kfree(inst);
 +}
 +
-+#endif
-diff --git a/block/blktrace.c b/block/blktrace.c
-index 9b4da4a..568588c 100644
---- a/block/blktrace.c
-+++ b/block/blktrace.c
-@@ -235,7 +235,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
- 	kfree(bt);
- }
- 
--static int blk_trace_remove(struct request_queue *q)
-+int blk_trace_remove(struct request_queue *q)
- {
- 	struct blk_trace *bt;
- 
-@@ -249,6 +249,7 @@ static int blk_trace_remove(struct request_queue *q)
- 
- 	return 0;
- }
-+EXPORT_SYMBOL_GPL(blk_trace_remove);
- 
- static int blk_dropped_open(struct inode *inode, struct file *filp)
- {
-@@ -316,18 +317,17 @@ static struct rchan_callbacks blk_relay_callbacks = {
- /*
-  * Setup everything required to start tracing
-  */
--int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
-+int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
- 			struct blk_user_trace_setup *buts)
- {
- 	struct blk_trace *old_bt, *bt = NULL;
- 	struct dentry *dir = NULL;
--	char b[BDEVNAME_SIZE];
- 	int ret, i;
- 
- 	if (!buts->buf_size || !buts->buf_nr)
- 		return -EINVAL;
- 
--	strcpy(buts->name, bdevname(bdev, b));
-+	strcpy(buts->name, name);
- 
- 	/*
- 	 * some device names have larger paths - convert the slashes
-@@ -352,7 +352,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
- 		goto err;
- 
- 	bt->dir = dir;
--	bt->dev = bdev->bd_dev;
-+	bt->dev = dev;
- 	atomic_set(&bt->dropped, 0);
- 
- 	ret = -EIO;
-@@ -399,8 +399,8 @@ err:
- 	return ret;
- }
- 
--static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
--			   char __user *arg)
-+int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-+		    char __user *arg)
- {
- 	struct blk_user_trace_setup buts;
- 	int ret;
-@@ -409,7 +409,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
- 	if (ret)
- 		return -EFAULT;
- 
--	ret = do_blk_trace_setup(q, bdev, &buts);
-+	ret = do_blk_trace_setup(q, name, dev, &buts);
- 	if (ret)
- 		return ret;
- 
-@@ -418,8 +418,9 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
- 
- 	return 0;
- }
-+EXPORT_SYMBOL_GPL(blk_trace_setup);
- 
--static int blk_trace_startstop(struct request_queue *q, int start)
-+int blk_trace_startstop(struct request_queue *q, int start)
- {
- 	struct blk_trace *bt;
- 	int ret;
-@@ -452,6 +453,7 @@ static int blk_trace_startstop(struct request_queue *q, int start)
- 
- 	return ret;
- }
-+EXPORT_SYMBOL_GPL(blk_trace_startstop);
- 
- /**
-  * blk_trace_ioctl: - handle the ioctls associated with tracing
-@@ -464,6 +466,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
- {
- 	struct request_queue *q;
- 	int ret, start = 0;
-+	char b[BDEVNAME_SIZE];
- 
- 	q = bdev_get_queue(bdev);
- 	if (!q)
-@@ -473,7 +476,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
- 
- 	switch (cmd) {
- 	case BLKTRACESETUP:
--		ret = blk_trace_setup(q, bdev, arg);
-+		strcpy(b, bdevname(bdev, b));
-+		ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
- 		break;
- 	case BLKTRACESTART:
- 		start = 1;
-diff --git a/block/bsg.c b/block/bsg.c
-index 8e181ab..69b0a9d 100644
---- a/block/bsg.c
-+++ b/block/bsg.c
-@@ -445,6 +445,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
- 	else
- 		hdr->dout_resid = rq->data_len;
- 
-+	/*
-+	 * If the request generated a negative error number, return it
-+	 * (providing we aren't already returning an error); if it's
-+	 * just a protocol response (i.e. non negative), that gets
-+	 * processed above.
-+	 */
-+	if (!ret && rq->errors < 0)
-+		ret = rq->errors;
++static struct crypto_template crypto_gcm_tmpl = {
++	.name = "gcm",
++	.alloc = crypto_gcm_alloc,
++	.free = crypto_gcm_free,
++	.module = THIS_MODULE,
++};
 +
- 	blk_rq_unmap_user(bio);
- 	blk_put_request(rq);
- 
-@@ -837,6 +846,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- {
- 	struct bsg_device *bd = file->private_data;
- 	int __user *uarg = (int __user *) arg;
-+	int ret;
- 
- 	switch (cmd) {
- 		/*
-@@ -889,12 +899,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- 		if (rq->next_rq)
- 			bidi_bio = rq->next_rq->bio;
- 		blk_execute_rq(bd->queue, NULL, rq, 0);
--		blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
-+		ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
- 
- 		if (copy_to_user(uarg, &hdr, sizeof(hdr)))
- 			return -EFAULT;
- 
--		return 0;
-+		return ret;
- 	}
- 	/*
- 	 * block device ioctls
-diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
-index 13553e0..f28d1fb 100644
---- a/block/cfq-iosched.c
-+++ b/block/cfq-iosched.c
-@@ -26,9 +26,9 @@ static const int cfq_slice_async_rq = 2;
- static int cfq_slice_idle = HZ / 125;
- 
- /*
-- * grace period before allowing idle class to get disk access
-+ * offset from end of service tree
-  */
--#define CFQ_IDLE_GRACE		(HZ / 10)
-+#define CFQ_IDLE_DELAY		(HZ / 5)
- 
- /*
-  * below this threshold, we consider thinktime immediate
-@@ -98,8 +98,6 @@ struct cfq_data {
- 	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
- 	struct cfq_queue *async_idle_cfqq;
- 
--	struct timer_list idle_class_timer;
--
- 	sector_t last_position;
- 	unsigned long last_end_request;
- 
-@@ -199,8 +197,8 @@ CFQ_CFQQ_FNS(sync);
- 
- static void cfq_dispatch_insert(struct request_queue *, struct request *);
- static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
--				       struct task_struct *, gfp_t);
--static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
-+				       struct io_context *, gfp_t);
-+static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
- 						struct io_context *);
- 
- static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
-@@ -384,12 +382,15 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
- /*
-  * The below is leftmost cache rbtree addon
-  */
--static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
-+static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
- {
- 	if (!root->left)
- 		root->left = rb_first(&root->rb);
- 
--	return root->left;
-+	if (root->left)
-+		return rb_entry(root->left, struct cfq_queue, rb_node);
++static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
++{
++	int err;
++	const char *ctr_name;
++	char full_name[CRYPTO_MAX_ALG_NAME];
 +
-+	return NULL;
- }
- 
- static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
-@@ -446,12 +447,20 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
- static void cfq_service_tree_add(struct cfq_data *cfqd,
- 				    struct cfq_queue *cfqq, int add_front)
- {
--	struct rb_node **p = &cfqd->service_tree.rb.rb_node;
--	struct rb_node *parent = NULL;
-+	struct rb_node **p, *parent;
-+	struct cfq_queue *__cfqq;
- 	unsigned long rb_key;
- 	int left;
- 
--	if (!add_front) {
-+	if (cfq_class_idle(cfqq)) {
-+		rb_key = CFQ_IDLE_DELAY;
-+		parent = rb_last(&cfqd->service_tree.rb);
-+		if (parent && parent != &cfqq->rb_node) {
-+			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
-+			rb_key += __cfqq->rb_key;
-+		} else
-+			rb_key += jiffies;
-+	} else if (!add_front) {
- 		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
- 		rb_key += cfqq->slice_resid;
- 		cfqq->slice_resid = 0;
-@@ -469,8 +478,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
- 	}
- 
- 	left = 1;
-+	parent = NULL;
-+	p = &cfqd->service_tree.rb.rb_node;
- 	while (*p) {
--		struct cfq_queue *__cfqq;
- 		struct rb_node **n;
- 
- 		parent = *p;
-@@ -524,8 +534,7 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-  * add to busy list of queues for service, trying to be fair in ordering
-  * the pending list according to last request service
-  */
--static inline void
--cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-+static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
- {
- 	BUG_ON(cfq_cfqq_on_rr(cfqq));
- 	cfq_mark_cfqq_on_rr(cfqq);
-@@ -538,8 +547,7 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-  * Called when the cfqq no longer has requests pending, remove it from
-  * the service tree.
-  */
--static inline void
--cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-+static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
- {
- 	BUG_ON(!cfq_cfqq_on_rr(cfqq));
- 	cfq_clear_cfqq_on_rr(cfqq);
-@@ -554,7 +562,7 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
- /*
-  * rb tree support functions
-  */
--static inline void cfq_del_rq_rb(struct request *rq)
-+static void cfq_del_rq_rb(struct request *rq)
- {
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
- 	struct cfq_data *cfqd = cfqq->cfqd;
-@@ -594,8 +602,7 @@ static void cfq_add_rq_rb(struct request *rq)
- 	BUG_ON(!cfqq->next_rq);
- }
- 
--static inline void
--cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
-+static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
- {
- 	elv_rb_del(&cfqq->sort_list, rq);
- 	cfqq->queued[rq_is_sync(rq)]--;
-@@ -609,7 +616,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
- 	struct cfq_io_context *cic;
- 	struct cfq_queue *cfqq;
- 
--	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
-+	cic = cfq_cic_lookup(cfqd, tsk->io_context);
- 	if (!cic)
- 		return NULL;
- 
-@@ -721,7 +728,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
- 	 * Lookup the cfqq that this bio will be queued with. Allow
- 	 * merge only if rq is queued there.
- 	 */
--	cic = cfq_cic_rb_lookup(cfqd, current->io_context);
-+	cic = cfq_cic_lookup(cfqd, current->io_context);
- 	if (!cic)
- 		return 0;
- 
-@@ -732,15 +739,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
- 	return 0;
- }
- 
--static inline void
--__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-+static void __cfq_set_active_queue(struct cfq_data *cfqd,
-+				   struct cfq_queue *cfqq)
- {
- 	if (cfqq) {
--		/*
--		 * stop potential idle class queues waiting service
--		 */
--		del_timer(&cfqd->idle_class_timer);
--
- 		cfqq->slice_end = 0;
- 		cfq_clear_cfqq_must_alloc_slice(cfqq);
- 		cfq_clear_cfqq_fifo_expire(cfqq);
-@@ -789,47 +791,16 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
- 		__cfq_slice_expired(cfqd, cfqq, timed_out);
- }
- 
--static int start_idle_class_timer(struct cfq_data *cfqd)
--{
--	unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
--	unsigned long now = jiffies;
--
--	if (time_before(now, end) &&
--	    time_after_eq(now, cfqd->last_end_request)) {
--		mod_timer(&cfqd->idle_class_timer, end);
--		return 1;
--	}
--
--	return 0;
--}
--
- /*
-  * Get next queue for service. Unless we have a queue preemption,
-  * we'll simply select the first cfqq in the service tree.
-  */
- static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
- {
--	struct cfq_queue *cfqq;
--	struct rb_node *n;
--
- 	if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
- 		return NULL;
- 
--	n = cfq_rb_first(&cfqd->service_tree);
--	cfqq = rb_entry(n, struct cfq_queue, rb_node);
--
--	if (cfq_class_idle(cfqq)) {
--		/*
--		 * if we have idle queues and no rt or be queues had
--		 * pending requests, either allow immediate service if
--		 * the grace period has passed or arm the idle grace
--		 * timer
--		 */
--		if (start_idle_class_timer(cfqd))
--			cfqq = NULL;
--	}
--
--	return cfqq;
-+	return cfq_rb_first(&cfqd->service_tree);
- }
- 
- /*
-@@ -895,7 +866,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
- 	 * task has exited, don't wait
- 	 */
- 	cic = cfqd->active_cic;
--	if (!cic || !cic->ioc->task)
-+	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
- 		return;
- 
- 	/*
-@@ -939,7 +910,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
- /*
-  * return expired entry, or NULL to just start from scratch in rbtree
-  */
--static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
-+static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
- {
- 	struct cfq_data *cfqd = cfqq->cfqd;
- 	struct request *rq;
-@@ -1068,7 +1039,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- 	return dispatched;
- }
- 
--static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
-+static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
- {
- 	int dispatched = 0;
- 
-@@ -1087,14 +1058,11 @@ static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
-  */
- static int cfq_forced_dispatch(struct cfq_data *cfqd)
- {
-+	struct cfq_queue *cfqq;
- 	int dispatched = 0;
--	struct rb_node *n;
--
--	while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
--		struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
- 
-+	while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
- 		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
--	}
- 
- 	cfq_slice_expired(cfqd, 0);
- 
-@@ -1170,20 +1138,69 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
- 	kmem_cache_free(cfq_pool, cfqq);
- }
- 
--static void cfq_free_io_context(struct io_context *ioc)
-+/*
-+ * Call func for each cic attached to this ioc. Returns number of cic's seen.
-+ */
-+#define CIC_GANG_NR	16
-+static unsigned int
-+call_for_each_cic(struct io_context *ioc,
-+		  void (*func)(struct io_context *, struct cfq_io_context *))
- {
--	struct cfq_io_context *__cic;
--	struct rb_node *n;
--	int freed = 0;
-+	struct cfq_io_context *cics[CIC_GANG_NR];
-+	unsigned long index = 0;
-+	unsigned int called = 0;
-+	int nr;
- 
--	ioc->ioc_data = NULL;
-+	rcu_read_lock();
- 
--	while ((n = rb_first(&ioc->cic_root)) != NULL) {
--		__cic = rb_entry(n, struct cfq_io_context, rb_node);
--		rb_erase(&__cic->rb_node, &ioc->cic_root);
--		kmem_cache_free(cfq_ioc_pool, __cic);
--		freed++;
--	}
-+	do {
-+		int i;
++	ctr_name = crypto_attr_alg_name(tb[1]);
++	err = PTR_ERR(ctr_name);
++	if (IS_ERR(ctr_name))
++		return ERR_PTR(err);
 +
-+		/*
-+		 * Perhaps there's a better way - this just gang lookups from
-+		 * 0 to the end, restarting after each CIC_GANG_NR from the
-+		 * last key + 1.
-+		 */
-+		nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics,
-+						index, CIC_GANG_NR);
-+		if (!nr)
-+			break;
++	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)",
++		     ctr_name) >= CRYPTO_MAX_ALG_NAME)
++		return ERR_PTR(-ENAMETOOLONG);
 +
-+		called += nr;
-+		index = 1 + (unsigned long) cics[nr - 1]->key;
++	return crypto_gcm_alloc_common(tb, full_name, ctr_name);
++}
 +
-+		for (i = 0; i < nr; i++)
-+			func(ioc, cics[i]);
-+	} while (nr == CIC_GANG_NR);
++static struct crypto_template crypto_gcm_base_tmpl = {
++	.name = "gcm_base",
++	.alloc = crypto_gcm_base_alloc,
++	.free = crypto_gcm_free,
++	.module = THIS_MODULE,
++};
 +
-+	rcu_read_unlock();
++static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
++				 unsigned int keylen)
++{
++	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
++	struct crypto_aead *child = ctx->child;
++	int err;
 +
-+	return called;
++	if (keylen < 4)
++		return -EINVAL;
++
++	keylen -= 4;
++	memcpy(ctx->nonce, key + keylen, 4);
++
++	crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
++	crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
++				     CRYPTO_TFM_REQ_MASK);
++	err = crypto_aead_setkey(child, key, keylen);
++	crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
++				      CRYPTO_TFM_RES_MASK);
++
++	return err;
 +}
 +
-+static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
++static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
++				      unsigned int authsize)
 +{
-+	unsigned long flags;
-+
-+	BUG_ON(!cic->dead_key);
++	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
 +
-+	spin_lock_irqsave(&ioc->lock, flags);
-+	radix_tree_delete(&ioc->radix_root, cic->dead_key);
-+	spin_unlock_irqrestore(&ioc->lock, flags);
++	switch (authsize) {
++	case 8:
++	case 12:
++	case 16:
++		break;
++	default:
++		return -EINVAL;
++	}
 +
-+	kmem_cache_free(cfq_ioc_pool, cic);
++	return crypto_aead_setauthsize(ctx->child, authsize);
 +}
 +
-+static void cfq_free_io_context(struct io_context *ioc)
++static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
 +{
-+	int freed;
++	struct aead_request *subreq = aead_request_ctx(req);
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
++	struct crypto_aead *child = ctx->child;
++	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
++			   crypto_aead_alignmask(child) + 1);
 +
-+	/*
-+	 * ioc->refcount is zero here, so no more cic's are allowed to be
-+	 * linked into this ioc. So it should be ok to iterate over the known
-+	 * list, we will see all cic's since no new ones are added.
-+	 */
-+	freed = call_for_each_cic(ioc, cic_free_func);
- 
- 	elv_ioc_count_mod(ioc_count, -freed);
- 
-@@ -1205,7 +1222,12 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
- 					 struct cfq_io_context *cic)
- {
- 	list_del_init(&cic->queue_list);
++	memcpy(iv, ctx->nonce, 4);
++	memcpy(iv + 4, req->iv, 8);
 +
-+	/*
-+	 * Make sure key == NULL is seen for dead queues
-+	 */
- 	smp_wmb();
-+	cic->dead_key = (unsigned long) cic->key;
- 	cic->key = NULL;
- 
- 	if (cic->cfqq[ASYNC]) {
-@@ -1219,16 +1241,18 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
- 	}
- }
- 
--static void cfq_exit_single_io_context(struct cfq_io_context *cic)
-+static void cfq_exit_single_io_context(struct io_context *ioc,
-+				       struct cfq_io_context *cic)
- {
- 	struct cfq_data *cfqd = cic->key;
- 
- 	if (cfqd) {
- 		struct request_queue *q = cfqd->queue;
-+		unsigned long flags;
- 
--		spin_lock_irq(q->queue_lock);
-+		spin_lock_irqsave(q->queue_lock, flags);
- 		__cfq_exit_single_io_context(cfqd, cic);
--		spin_unlock_irq(q->queue_lock);
-+		spin_unlock_irqrestore(q->queue_lock, flags);
- 	}
- }
- 
-@@ -1238,21 +1262,8 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
-  */
- static void cfq_exit_io_context(struct io_context *ioc)
- {
--	struct cfq_io_context *__cic;
--	struct rb_node *n;
--
--	ioc->ioc_data = NULL;
--
--	/*
--	 * put the reference this task is holding to the various queues
--	 */
--	n = rb_first(&ioc->cic_root);
--	while (n != NULL) {
--		__cic = rb_entry(n, struct cfq_io_context, rb_node);
--
--		cfq_exit_single_io_context(__cic);
--		n = rb_next(n);
--	}
-+	rcu_assign_pointer(ioc->ioc_data, NULL);
-+	call_for_each_cic(ioc, cfq_exit_single_io_context);
- }
- 
- static struct cfq_io_context *
-@@ -1273,7 +1284,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
- 	return cic;
- }
- 
--static void cfq_init_prio_data(struct cfq_queue *cfqq)
-+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
- {
- 	struct task_struct *tsk = current;
- 	int ioprio_class;
-@@ -1281,7 +1292,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
- 	if (!cfq_cfqq_prio_changed(cfqq))
- 		return;
- 
--	ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
-+	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
- 	switch (ioprio_class) {
- 		default:
- 			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
-@@ -1293,11 +1304,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
- 			cfqq->ioprio_class = IOPRIO_CLASS_BE;
- 			break;
- 		case IOPRIO_CLASS_RT:
--			cfqq->ioprio = task_ioprio(tsk);
-+			cfqq->ioprio = task_ioprio(ioc);
- 			cfqq->ioprio_class = IOPRIO_CLASS_RT;
- 			break;
- 		case IOPRIO_CLASS_BE:
--			cfqq->ioprio = task_ioprio(tsk);
-+			cfqq->ioprio = task_ioprio(ioc);
- 			cfqq->ioprio_class = IOPRIO_CLASS_BE;
- 			break;
- 		case IOPRIO_CLASS_IDLE:
-@@ -1316,7 +1327,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
- 	cfq_clear_cfqq_prio_changed(cfqq);
- }
- 
--static inline void changed_ioprio(struct cfq_io_context *cic)
-+static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
- {
- 	struct cfq_data *cfqd = cic->key;
- 	struct cfq_queue *cfqq;
-@@ -1330,8 +1341,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
- 	cfqq = cic->cfqq[ASYNC];
- 	if (cfqq) {
- 		struct cfq_queue *new_cfqq;
--		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task,
--					 GFP_ATOMIC);
-+		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
- 		if (new_cfqq) {
- 			cic->cfqq[ASYNC] = new_cfqq;
- 			cfq_put_queue(cfqq);
-@@ -1347,29 +1357,19 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
- 
- static void cfq_ioc_set_ioprio(struct io_context *ioc)
- {
--	struct cfq_io_context *cic;
--	struct rb_node *n;
--
-+	call_for_each_cic(ioc, changed_ioprio);
- 	ioc->ioprio_changed = 0;
--
--	n = rb_first(&ioc->cic_root);
--	while (n != NULL) {
--		cic = rb_entry(n, struct cfq_io_context, rb_node);
--
--		changed_ioprio(cic);
--		n = rb_next(n);
--	}
- }
- 
- static struct cfq_queue *
- cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
--		     struct task_struct *tsk, gfp_t gfp_mask)
-+		     struct io_context *ioc, gfp_t gfp_mask)
- {
- 	struct cfq_queue *cfqq, *new_cfqq = NULL;
- 	struct cfq_io_context *cic;
- 
- retry:
--	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
-+	cic = cfq_cic_lookup(cfqd, ioc);
- 	/* cic always exists here */
- 	cfqq = cic_to_cfqq(cic, is_sync);
- 
-@@ -1404,15 +1404,16 @@ retry:
- 		atomic_set(&cfqq->ref, 0);
- 		cfqq->cfqd = cfqd;
- 
--		if (is_sync) {
--			cfq_mark_cfqq_idle_window(cfqq);
--			cfq_mark_cfqq_sync(cfqq);
--		}
--
- 		cfq_mark_cfqq_prio_changed(cfqq);
- 		cfq_mark_cfqq_queue_new(cfqq);
- 
--		cfq_init_prio_data(cfqq);
-+		cfq_init_prio_data(cfqq, ioc);
++	aead_request_set_tfm(subreq, child);
++	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
++				  req->base.data);
++	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
++	aead_request_set_assoc(subreq, req->assoc, req->assoclen);
 +
-+		if (is_sync) {
-+			if (!cfq_class_idle(cfqq))
-+				cfq_mark_cfqq_idle_window(cfqq);
-+			cfq_mark_cfqq_sync(cfqq);
-+		}
- 	}
- 
- 	if (new_cfqq)
-@@ -1439,11 +1440,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
- }
- 
- static struct cfq_queue *
--cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
-+cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
- 	      gfp_t gfp_mask)
- {
--	const int ioprio = task_ioprio(tsk);
--	const int ioprio_class = task_ioprio_class(tsk);
-+	const int ioprio = task_ioprio(ioc);
-+	const int ioprio_class = task_ioprio_class(ioc);
- 	struct cfq_queue **async_cfqq = NULL;
- 	struct cfq_queue *cfqq = NULL;
- 
-@@ -1453,7 +1454,7 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
- 	}
- 
- 	if (!cfqq) {
--		cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
-+		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
- 		if (!cfqq)
- 			return NULL;
- 	}
-@@ -1470,28 +1471,42 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
- 	return cfqq;
- }
- 
-+static void cfq_cic_free(struct cfq_io_context *cic)
++	return subreq;
++}
++
++static int crypto_rfc4106_encrypt(struct aead_request *req)
 +{
-+	kmem_cache_free(cfq_ioc_pool, cic);
-+	elv_ioc_count_dec(ioc_count);
++	req = crypto_rfc4106_crypt(req);
 +
-+	if (ioc_gone && !elv_ioc_count_read(ioc_count))
-+		complete(ioc_gone);
++	return crypto_aead_encrypt(req);
 +}
 +
- /*
-  * We drop cfq io contexts lazily, so we may find a dead one.
-  */
- static void
--cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
-+cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
-+		  struct cfq_io_context *cic)
- {
-+	unsigned long flags;
++static int crypto_rfc4106_decrypt(struct aead_request *req)
++{
++	req = crypto_rfc4106_crypt(req);
 +
- 	WARN_ON(!list_empty(&cic->queue_list));
- 
-+	spin_lock_irqsave(&ioc->lock, flags);
++	return crypto_aead_decrypt(req);
++}
 +
- 	if (ioc->ioc_data == cic)
--		ioc->ioc_data = NULL;
-+		rcu_assign_pointer(ioc->ioc_data, NULL);
- 
--	rb_erase(&cic->rb_node, &ioc->cic_root);
--	kmem_cache_free(cfq_ioc_pool, cic);
--	elv_ioc_count_dec(ioc_count);
-+	radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
-+	spin_unlock_irqrestore(&ioc->lock, flags);
++static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm)
++{
++	struct crypto_instance *inst = (void *)tfm->__crt_alg;
++	struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
++	struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct crypto_aead *aead;
++	unsigned long align;
 +
-+	cfq_cic_free(cic);
- }
- 
- static struct cfq_io_context *
--cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
-+cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
- {
--	struct rb_node *n;
- 	struct cfq_io_context *cic;
--	void *k, *key = cfqd;
-+	void *k;
- 
- 	if (unlikely(!ioc))
- 		return NULL;
-@@ -1499,74 +1514,64 @@ cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
- 	/*
- 	 * we maintain a last-hit cache, to avoid browsing over the tree
- 	 */
--	cic = ioc->ioc_data;
-+	cic = rcu_dereference(ioc->ioc_data);
- 	if (cic && cic->key == cfqd)
- 		return cic;
- 
--restart:
--	n = ioc->cic_root.rb_node;
--	while (n) {
--		cic = rb_entry(n, struct cfq_io_context, rb_node);
-+	do {
-+		rcu_read_lock();
-+		cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
-+		rcu_read_unlock();
-+		if (!cic)
-+			break;
- 		/* ->key must be copied to avoid race with cfq_exit_queue() */
- 		k = cic->key;
- 		if (unlikely(!k)) {
--			cfq_drop_dead_cic(ioc, cic);
--			goto restart;
-+			cfq_drop_dead_cic(cfqd, ioc, cic);
-+			continue;
- 		}
- 
--		if (key < k)
--			n = n->rb_left;
--		else if (key > k)
--			n = n->rb_right;
--		else {
--			ioc->ioc_data = cic;
--			return cic;
--		}
--	}
-+		rcu_assign_pointer(ioc->ioc_data, cic);
-+		break;
-+	} while (1);
- 
--	return NULL;
-+	return cic;
- }
- 
--static inline void
--cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
--	     struct cfq_io_context *cic)
-+/*
-+ * Add cic into ioc, using cfqd as the search key. This enables us to lookup
-+ * the process specific cfq io context when entered from the block layer.
-+ * Also adds the cic to a per-cfqd list, used when this queue is removed.
-+ */
-+static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
-+			struct cfq_io_context *cic, gfp_t gfp_mask)
- {
--	struct rb_node **p;
--	struct rb_node *parent;
--	struct cfq_io_context *__cic;
- 	unsigned long flags;
--	void *k;
-+	int ret;
- 
--	cic->ioc = ioc;
--	cic->key = cfqd;
-+	ret = radix_tree_preload(gfp_mask);
-+	if (!ret) {
-+		cic->ioc = ioc;
-+		cic->key = cfqd;
- 
--restart:
--	parent = NULL;
--	p = &ioc->cic_root.rb_node;
--	while (*p) {
--		parent = *p;
--		__cic = rb_entry(parent, struct cfq_io_context, rb_node);
--		/* ->key must be copied to avoid race with cfq_exit_queue() */
--		k = __cic->key;
--		if (unlikely(!k)) {
--			cfq_drop_dead_cic(ioc, __cic);
--			goto restart;
--		}
-+		spin_lock_irqsave(&ioc->lock, flags);
-+		ret = radix_tree_insert(&ioc->radix_root,
-+						(unsigned long) cfqd, cic);
-+		spin_unlock_irqrestore(&ioc->lock, flags);
- 
--		if (cic->key < k)
--			p = &(*p)->rb_left;
--		else if (cic->key > k)
--			p = &(*p)->rb_right;
--		else
--			BUG();
-+		radix_tree_preload_end();
++	aead = crypto_spawn_aead(spawn);
++	if (IS_ERR(aead))
++		return PTR_ERR(aead);
 +
-+		if (!ret) {
-+			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-+			list_add(&cic->queue_list, &cfqd->cic_list);
-+			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
-+		}
- 	}
- 
--	rb_link_node(&cic->rb_node, parent, p);
--	rb_insert_color(&cic->rb_node, &ioc->cic_root);
-+	if (ret)
-+		printk(KERN_ERR "cfq: cic link failed!\n");
- 
--	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
--	list_add(&cic->queue_list, &cfqd->cic_list);
--	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
-+	return ret;
- }
- 
- /*
-@@ -1586,7 +1591,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
- 	if (!ioc)
- 		return NULL;
- 
--	cic = cfq_cic_rb_lookup(cfqd, ioc);
-+	cic = cfq_cic_lookup(cfqd, ioc);
- 	if (cic)
- 		goto out;
- 
-@@ -1594,13 +1599,17 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
- 	if (cic == NULL)
- 		goto err;
- 
--	cfq_cic_link(cfqd, ioc, cic);
-+	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
-+		goto err_free;
++	ctx->child = aead;
 +
- out:
- 	smp_read_barrier_depends();
- 	if (unlikely(ioc->ioprio_changed))
- 		cfq_ioc_set_ioprio(ioc);
- 
- 	return cic;
-+err_free:
-+	cfq_cic_free(cic);
- err:
- 	put_io_context(ioc);
- 	return NULL;
-@@ -1655,12 +1664,15 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- {
- 	int enable_idle;
- 
--	if (!cfq_cfqq_sync(cfqq))
-+	/*
-+	 * Don't idle for async or idle io prio class
-+	 */
-+	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
- 		return;
- 
- 	enable_idle = cfq_cfqq_idle_window(cfqq);
- 
--	if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
-+	if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
- 	    (cfqd->hw_tag && CIC_SEEKY(cic)))
- 		enable_idle = 0;
- 	else if (sample_valid(cic->ttime_samples)) {
-@@ -1793,7 +1805,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
- 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
- 
--	cfq_init_prio_data(cfqq);
-+	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
- 
- 	cfq_add_rq_rb(rq);
- 
-@@ -1834,7 +1846,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
- 			cfq_set_prio_slice(cfqd, cfqq);
- 			cfq_clear_cfqq_slice_new(cfqq);
- 		}
--		if (cfq_slice_used(cfqq))
-+		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
- 			cfq_slice_expired(cfqd, 1);
- 		else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
- 			cfq_arm_slice_timer(cfqd);
-@@ -1894,13 +1906,13 @@ static int cfq_may_queue(struct request_queue *q, int rw)
- 	 * so just lookup a possibly existing queue, or return 'may queue'
- 	 * if that fails
- 	 */
--	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
-+	cic = cfq_cic_lookup(cfqd, tsk->io_context);
- 	if (!cic)
- 		return ELV_MQUEUE_MAY;
- 
- 	cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
- 	if (cfqq) {
--		cfq_init_prio_data(cfqq);
-+		cfq_init_prio_data(cfqq, cic->ioc);
- 		cfq_prio_boost(cfqq);
- 
- 		return __cfq_may_queue(cfqq);
-@@ -1938,7 +1950,6 @@ static int
- cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
- {
- 	struct cfq_data *cfqd = q->elevator->elevator_data;
--	struct task_struct *tsk = current;
- 	struct cfq_io_context *cic;
- 	const int rw = rq_data_dir(rq);
- 	const int is_sync = rq_is_sync(rq);
-@@ -1956,7 +1967,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
- 
- 	cfqq = cic_to_cfqq(cic, is_sync);
- 	if (!cfqq) {
--		cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask);
-+		cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
- 
- 		if (!cfqq)
- 			goto queue_fail;
-@@ -2039,29 +2050,9 @@ out_cont:
- 	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
- }
- 
--/*
-- * Timer running if an idle class queue is waiting for service
-- */
--static void cfq_idle_class_timer(unsigned long data)
--{
--	struct cfq_data *cfqd = (struct cfq_data *) data;
--	unsigned long flags;
--
--	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
--
--	/*
--	 * race with a non-idle queue, reset timer
--	 */
--	if (!start_idle_class_timer(cfqd))
--		cfq_schedule_dispatch(cfqd);
--
--	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
--}
--
- static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
- {
- 	del_timer_sync(&cfqd->idle_slice_timer);
--	del_timer_sync(&cfqd->idle_class_timer);
- 	kblockd_flush_work(&cfqd->unplug_work);
- }
- 
-@@ -2126,10 +2117,6 @@ static void *cfq_init_queue(struct request_queue *q)
- 	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
- 	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
- 
--	init_timer(&cfqd->idle_class_timer);
--	cfqd->idle_class_timer.function = cfq_idle_class_timer;
--	cfqd->idle_class_timer.data = (unsigned long) cfqd;
--
- 	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
- 
- 	cfqd->last_end_request = jiffies;
-@@ -2160,7 +2147,7 @@ static int __init cfq_slab_setup(void)
- 	if (!cfq_pool)
- 		goto fail;
- 
--	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
-+	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU);
- 	if (!cfq_ioc_pool)
- 		goto fail;
- 
-diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
-index cae0a85..b733732 100644
---- a/block/compat_ioctl.c
-+++ b/block/compat_ioctl.c
-@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
- 	struct blk_user_trace_setup buts;
- 	struct compat_blk_user_trace_setup cbuts;
- 	struct request_queue *q;
-+	char b[BDEVNAME_SIZE];
- 	int ret;
- 
- 	q = bdev_get_queue(bdev);
-@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
- 	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
- 		return -EFAULT;
- 
-+	strcpy(b, bdevname(bdev, b));
++	align = crypto_aead_alignmask(aead);
++	align &= ~(crypto_tfm_ctx_alignment() - 1);
++	tfm->crt_aead.reqsize = sizeof(struct aead_request) +
++				ALIGN(crypto_aead_reqsize(aead),
++				      crypto_tfm_ctx_alignment()) +
++				align + 16;
 +
- 	buts = (struct blk_user_trace_setup) {
- 		.act_mask = cbuts.act_mask,
- 		.buf_size = cbuts.buf_size,
-@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
- 	memcpy(&buts.name, &cbuts.name, 32);
- 
- 	mutex_lock(&bdev->bd_mutex);
--	ret = do_blk_trace_setup(q, bdev, &buts);
-+	ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts);
- 	mutex_unlock(&bdev->bd_mutex);
- 	if (ret)
- 		return ret;
-diff --git a/block/elevator.c b/block/elevator.c
-index e452deb..8cd5775 100644
---- a/block/elevator.c
-+++ b/block/elevator.c
-@@ -185,9 +185,7 @@ static elevator_t *elevator_alloc(struct request_queue *q,
- 
- 	eq->ops = &e->ops;
- 	eq->elevator_type = e;
--	kobject_init(&eq->kobj);
--	kobject_set_name(&eq->kobj, "%s", "iosched");
--	eq->kobj.ktype = &elv_ktype;
-+	kobject_init(&eq->kobj, &elv_ktype);
- 	mutex_init(&eq->sysfs_lock);
- 
- 	eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
-@@ -743,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q)
- 			q->boundary_rq = NULL;
- 		}
- 
--		if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
-+		if (rq->cmd_flags & REQ_DONTPREP)
-+			break;
++	return 0;
++}
 +
-+		if (q->dma_drain_size && rq->data_len) {
-+			/*
-+			 * make sure space for the drain appears we
-+			 * know we can do this because max_hw_segments
-+			 * has been adjusted to be one fewer than the
-+			 * device can handle
-+			 */
-+			rq->nr_phys_segments++;
-+			rq->nr_hw_segments++;
-+		}
++static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm)
++{
++	struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+		if (!q->prep_rq_fn)
- 			break;
- 
- 		ret = q->prep_rq_fn(q, rq);
-@@ -756,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q)
- 			 * avoid resource deadlock.  REQ_STARTED will
- 			 * prevent other fs requests from passing this one.
- 			 */
-+			if (q->dma_drain_size && rq->data_len &&
-+			    !(rq->cmd_flags & REQ_DONTPREP)) {
-+				/*
-+				 * remove the space for the drain we added
-+				 * so that we don't add it again
-+				 */
-+				--rq->nr_phys_segments;
-+				--rq->nr_hw_segments;
-+			}
++	crypto_free_aead(ctx->child);
++}
 +
- 			rq = NULL;
- 			break;
- 		} else if (ret == BLKPREP_KILL) {
-@@ -931,9 +953,7 @@ int elv_register_queue(struct request_queue *q)
- 	elevator_t *e = q->elevator;
- 	int error;
- 
--	e->kobj.parent = &q->kobj;
--
--	error = kobject_add(&e->kobj);
-+	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
- 	if (!error) {
- 		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
- 		if (attr) {
-diff --git a/block/genhd.c b/block/genhd.c
-index f2ac914..de2ebb2 100644
---- a/block/genhd.c
-+++ b/block/genhd.c
-@@ -17,8 +17,10 @@
- #include <linux/buffer_head.h>
- #include <linux/mutex.h>
- 
--struct kset block_subsys;
--static DEFINE_MUTEX(block_subsys_lock);
-+static DEFINE_MUTEX(block_class_lock);
-+#ifndef CONFIG_SYSFS_DEPRECATED
-+struct kobject *block_depr;
-+#endif
- 
- /*
-  * Can be deleted altogether. Later.
-@@ -37,19 +39,17 @@ static inline int major_to_index(int major)
- }
- 
- #ifdef CONFIG_PROC_FS
--
- void blkdev_show(struct seq_file *f, off_t offset)
- {
- 	struct blk_major_name *dp;
- 
- 	if (offset < BLKDEV_MAJOR_HASH_SIZE) {
--		mutex_lock(&block_subsys_lock);
-+		mutex_lock(&block_class_lock);
- 		for (dp = major_names[offset]; dp; dp = dp->next)
- 			seq_printf(f, "%3d %s\n", dp->major, dp->name);
--		mutex_unlock(&block_subsys_lock);
-+		mutex_unlock(&block_class_lock);
- 	}
- }
--
- #endif /* CONFIG_PROC_FS */
- 
- int register_blkdev(unsigned int major, const char *name)
-@@ -57,7 +57,7 @@ int register_blkdev(unsigned int major, const char *name)
- 	struct blk_major_name **n, *p;
- 	int index, ret = 0;
- 
--	mutex_lock(&block_subsys_lock);
-+	mutex_lock(&block_class_lock);
- 
- 	/* temporary */
- 	if (major == 0) {
-@@ -102,7 +102,7 @@ int register_blkdev(unsigned int major, const char *name)
- 		kfree(p);
- 	}
- out:
--	mutex_unlock(&block_subsys_lock);
-+	mutex_unlock(&block_class_lock);
- 	return ret;
- }
- 
-@@ -114,7 +114,7 @@ void unregister_blkdev(unsigned int major, const char *name)
- 	struct blk_major_name *p = NULL;
- 	int index = major_to_index(major);
- 
--	mutex_lock(&block_subsys_lock);
-+	mutex_lock(&block_class_lock);
- 	for (n = &major_names[index]; *n; n = &(*n)->next)
- 		if ((*n)->major == major)
- 			break;
-@@ -124,7 +124,7 @@ void unregister_blkdev(unsigned int major, const char *name)
- 		p = *n;
- 		*n = p->next;
- 	}
--	mutex_unlock(&block_subsys_lock);
-+	mutex_unlock(&block_class_lock);
- 	kfree(p);
- }
- 
-@@ -137,29 +137,30 @@ static struct kobj_map *bdev_map;
-  * range must be nonzero
-  * The hash chain is sorted on range, so that subranges can override.
-  */
--void blk_register_region(dev_t dev, unsigned long range, struct module *module,
-+void blk_register_region(dev_t devt, unsigned long range, struct module *module,
- 			 struct kobject *(*probe)(dev_t, int *, void *),
- 			 int (*lock)(dev_t, void *), void *data)
- {
--	kobj_map(bdev_map, dev, range, module, probe, lock, data);
-+	kobj_map(bdev_map, devt, range, module, probe, lock, data);
- }
- 
- EXPORT_SYMBOL(blk_register_region);
- 
--void blk_unregister_region(dev_t dev, unsigned long range)
-+void blk_unregister_region(dev_t devt, unsigned long range)
- {
--	kobj_unmap(bdev_map, dev, range);
-+	kobj_unmap(bdev_map, devt, range);
- }
- 
- EXPORT_SYMBOL(blk_unregister_region);
- 
--static struct kobject *exact_match(dev_t dev, int *part, void *data)
-+static struct kobject *exact_match(dev_t devt, int *part, void *data)
- {
- 	struct gendisk *p = data;
--	return &p->kobj;
++static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb)
++{
++	struct crypto_attr_type *algt;
++	struct crypto_instance *inst;
++	struct crypto_aead_spawn *spawn;
++	struct crypto_alg *alg;
++	const char *ccm_name;
++	int err;
 +
-+	return &p->dev.kobj;
- }
- 
--static int exact_lock(dev_t dev, void *data)
-+static int exact_lock(dev_t devt, void *data)
- {
- 	struct gendisk *p = data;
- 
-@@ -194,8 +195,6 @@ void unlink_gendisk(struct gendisk *disk)
- 			      disk->minors);
- }
- 
--#define to_disk(obj) container_of(obj,struct gendisk,kobj)
--
- /**
-  * get_gendisk - get partitioning information for a given device
-  * @dev: device to get partitioning information for
-@@ -203,10 +202,12 @@ void unlink_gendisk(struct gendisk *disk)
-  * This function gets the structure containing partitioning
-  * information for the given device @dev.
-  */
--struct gendisk *get_gendisk(dev_t dev, int *part)
-+struct gendisk *get_gendisk(dev_t devt, int *part)
- {
--	struct kobject *kobj = kobj_lookup(bdev_map, dev, part);
--	return  kobj ? to_disk(kobj) : NULL;
-+	struct kobject *kobj = kobj_lookup(bdev_map, devt, part);
-+	struct device *dev = kobj_to_dev(kobj);
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
++		return ERR_PTR(err);
 +
-+	return  kobj ? dev_to_disk(dev) : NULL;
- }
- 
- /*
-@@ -216,13 +217,17 @@ struct gendisk *get_gendisk(dev_t dev, int *part)
-  */
- void __init printk_all_partitions(void)
- {
--	int n;
-+	struct device *dev;
- 	struct gendisk *sgp;
-+	char buf[BDEVNAME_SIZE];
-+	int n;
- 
--	mutex_lock(&block_subsys_lock);
-+	mutex_lock(&block_class_lock);
- 	/* For each block device... */
--	list_for_each_entry(sgp, &block_subsys.list, kobj.entry) {
--		char buf[BDEVNAME_SIZE];
-+	list_for_each_entry(dev, &block_class.devices, node) {
-+		if (dev->type != &disk_type)
-+			continue;
-+		sgp = dev_to_disk(dev);
- 		/*
- 		 * Don't show empty devices or things that have been surpressed
- 		 */
-@@ -255,38 +260,46 @@ void __init printk_all_partitions(void)
- 				sgp->major, n + 1 + sgp->first_minor,
- 				(unsigned long long)sgp->part[n]->nr_sects >> 1,
- 				disk_name(sgp, n + 1, buf));
--		} /* partition subloop */
--	} /* Block device loop */
-+		}
-+	}
- 
--	mutex_unlock(&block_subsys_lock);
--	return;
-+	mutex_unlock(&block_class_lock);
- }
- 
- #ifdef CONFIG_PROC_FS
- /* iterator */
- static void *part_start(struct seq_file *part, loff_t *pos)
- {
--	struct list_head *p;
--	loff_t l = *pos;
-+	loff_t k = *pos;
-+	struct device *dev;
- 
--	mutex_lock(&block_subsys_lock);
--	list_for_each(p, &block_subsys.list)
--		if (!l--)
--			return list_entry(p, struct gendisk, kobj.entry);
-+	mutex_lock(&block_class_lock);
-+	list_for_each_entry(dev, &block_class.devices, node) {
-+		if (dev->type != &disk_type)
-+			continue;
-+		if (!k--)
-+			return dev_to_disk(dev);
-+	}
- 	return NULL;
- }
- 
- static void *part_next(struct seq_file *part, void *v, loff_t *pos)
- {
--	struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
-+	struct gendisk *gp = v;
-+	struct device *dev;
- 	++*pos;
--	return p==&block_subsys.list ? NULL :
--		list_entry(p, struct gendisk, kobj.entry);
-+	list_for_each_entry(dev, &gp->dev.node, node) {
-+		if (&dev->node == &block_class.devices)
-+			return NULL;
-+		if (dev->type == &disk_type)
-+			return dev_to_disk(dev);
-+	}
-+	return NULL;
- }
- 
- static void part_stop(struct seq_file *part, void *v)
- {
--	mutex_unlock(&block_subsys_lock);
-+	mutex_unlock(&block_class_lock);
- }
- 
- static int show_partition(struct seq_file *part, void *v)
-@@ -295,7 +308,7 @@ static int show_partition(struct seq_file *part, void *v)
- 	int n;
- 	char buf[BDEVNAME_SIZE];
- 
--	if (&sgp->kobj.entry == block_subsys.list.next)
-+	if (&sgp->dev.node == block_class.devices.next)
- 		seq_puts(part, "major minor  #blocks  name\n\n");
- 
- 	/* Don't show non-partitionable removeable devices or empty devices */
-@@ -324,111 +337,82 @@ static int show_partition(struct seq_file *part, void *v)
- 	return 0;
- }
- 
--struct seq_operations partitions_op = {
--	.start =part_start,
--	.next =	part_next,
--	.stop =	part_stop,
--	.show =	show_partition
-+const struct seq_operations partitions_op = {
-+	.start	= part_start,
-+	.next	= part_next,
-+	.stop	= part_stop,
-+	.show	= show_partition
- };
- #endif
- 
- 
- extern int blk_dev_init(void);
- 
--static struct kobject *base_probe(dev_t dev, int *part, void *data)
-+static struct kobject *base_probe(dev_t devt, int *part, void *data)
- {
--	if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
-+	if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
- 		/* Make old-style 2.4 aliases work */
--		request_module("block-major-%d", MAJOR(dev));
-+		request_module("block-major-%d", MAJOR(devt));
- 	return NULL;
- }
- 
- static int __init genhd_device_init(void)
- {
--	int err;
--
--	bdev_map = kobj_map_init(base_probe, &block_subsys_lock);
-+	class_register(&block_class);
-+	bdev_map = kobj_map_init(base_probe, &block_class_lock);
- 	blk_dev_init();
--	err = subsystem_register(&block_subsys);
--	if (err < 0)
--		printk(KERN_WARNING "%s: subsystem_register error: %d\n",
--			__FUNCTION__, err);
--	return err;
++	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
++		return ERR_PTR(-EINVAL);
 +
-+#ifndef CONFIG_SYSFS_DEPRECATED
-+	/* create top-level block dir */
-+	block_depr = kobject_create_and_add("block", NULL);
-+#endif
-+	return 0;
- }
- 
- subsys_initcall(genhd_device_init);
- 
--
--
--/*
-- * kobject & sysfs bindings for block devices
-- */
--static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
--			      char *page)
-+static ssize_t disk_range_show(struct device *dev,
-+			       struct device_attribute *attr, char *buf)
- {
--	struct gendisk *disk = to_disk(kobj);
--	struct disk_attribute *disk_attr =
--		container_of(attr,struct disk_attribute,attr);
--	ssize_t ret = -EIO;
-+	struct gendisk *disk = dev_to_disk(dev);
- 
--	if (disk_attr->show)
--		ret = disk_attr->show(disk,page);
--	return ret;
-+	return sprintf(buf, "%d\n", disk->minors);
- }
- 
--static ssize_t disk_attr_store(struct kobject * kobj, struct attribute * attr,
--			       const char *page, size_t count)
-+static ssize_t disk_removable_show(struct device *dev,
-+				   struct device_attribute *attr, char *buf)
- {
--	struct gendisk *disk = to_disk(kobj);
--	struct disk_attribute *disk_attr =
--		container_of(attr,struct disk_attribute,attr);
--	ssize_t ret = 0;
-+	struct gendisk *disk = dev_to_disk(dev);
- 
--	if (disk_attr->store)
--		ret = disk_attr->store(disk, page, count);
--	return ret;
-+	return sprintf(buf, "%d\n",
-+		       (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
- }
- 
--static struct sysfs_ops disk_sysfs_ops = {
--	.show	= &disk_attr_show,
--	.store	= &disk_attr_store,
--};
--
--static ssize_t disk_uevent_store(struct gendisk * disk,
--				 const char *buf, size_t count)
--{
--	kobject_uevent(&disk->kobj, KOBJ_ADD);
--	return count;
--}
--static ssize_t disk_dev_read(struct gendisk * disk, char *page)
--{
--	dev_t base = MKDEV(disk->major, disk->first_minor); 
--	return print_dev_t(page, base);
--}
--static ssize_t disk_range_read(struct gendisk * disk, char *page)
-+static ssize_t disk_size_show(struct device *dev,
-+			      struct device_attribute *attr, char *buf)
- {
--	return sprintf(page, "%d\n", disk->minors);
--}
--static ssize_t disk_removable_read(struct gendisk * disk, char *page)
--{
--	return sprintf(page, "%d\n",
--		       (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
-+	struct gendisk *disk = dev_to_disk(dev);
- 
-+	return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk));
- }
--static ssize_t disk_size_read(struct gendisk * disk, char *page)
--{
--	return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
--}
--static ssize_t disk_capability_read(struct gendisk *disk, char *page)
++	ccm_name = crypto_attr_alg_name(tb[1]);
++	err = PTR_ERR(ccm_name);
++	if (IS_ERR(ccm_name))
++		return ERR_PTR(err);
 +
-+static ssize_t disk_capability_show(struct device *dev,
-+				    struct device_attribute *attr, char *buf)
- {
--	return sprintf(page, "%x\n", disk->flags);
-+	struct gendisk *disk = dev_to_disk(dev);
++	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
++	if (!inst)
++		return ERR_PTR(-ENOMEM);
 +
-+	return sprintf(buf, "%x\n", disk->flags);
- }
--static ssize_t disk_stats_read(struct gendisk * disk, char *page)
++	spawn = crypto_instance_ctx(inst);
++	crypto_set_aead_spawn(spawn, inst);
++	err = crypto_grab_aead(spawn, ccm_name, 0,
++			       crypto_requires_sync(algt->type, algt->mask));
++	if (err)
++		goto out_free_inst;
 +
-+static ssize_t disk_stat_show(struct device *dev,
-+			      struct device_attribute *attr, char *buf)
- {
-+	struct gendisk *disk = dev_to_disk(dev);
++	alg = crypto_aead_spawn_alg(spawn);
 +
- 	preempt_disable();
- 	disk_round_stats(disk);
- 	preempt_enable();
--	return sprintf(page,
-+	return sprintf(buf,
- 		"%8lu %8lu %8llu %8u "
- 		"%8lu %8lu %8llu %8u "
- 		"%8u %8u %8u"
-@@ -445,40 +429,21 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page)
- 		jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
- 		jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
- }
--static struct disk_attribute disk_attr_uevent = {
--	.attr = {.name = "uevent", .mode = S_IWUSR },
--	.store	= disk_uevent_store
--};
--static struct disk_attribute disk_attr_dev = {
--	.attr = {.name = "dev", .mode = S_IRUGO },
--	.show	= disk_dev_read
--};
--static struct disk_attribute disk_attr_range = {
--	.attr = {.name = "range", .mode = S_IRUGO },
--	.show	= disk_range_read
--};
--static struct disk_attribute disk_attr_removable = {
--	.attr = {.name = "removable", .mode = S_IRUGO },
--	.show	= disk_removable_read
--};
--static struct disk_attribute disk_attr_size = {
--	.attr = {.name = "size", .mode = S_IRUGO },
--	.show	= disk_size_read
--};
--static struct disk_attribute disk_attr_capability = {
--	.attr = {.name = "capability", .mode = S_IRUGO },
--	.show	= disk_capability_read
--};
--static struct disk_attribute disk_attr_stat = {
--	.attr = {.name = "stat", .mode = S_IRUGO },
--	.show	= disk_stats_read
--};
- 
- #ifdef CONFIG_FAIL_MAKE_REQUEST
-+static ssize_t disk_fail_show(struct device *dev,
-+			      struct device_attribute *attr, char *buf)
-+{
-+	struct gendisk *disk = dev_to_disk(dev);
++	err = -EINVAL;
 +
-+	return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
-+}
- 
--static ssize_t disk_fail_store(struct gendisk * disk,
-+static ssize_t disk_fail_store(struct device *dev,
-+			       struct device_attribute *attr,
- 			       const char *buf, size_t count)
- {
-+	struct gendisk *disk = dev_to_disk(dev);
- 	int i;
- 
- 	if (count > 0 && sscanf(buf, "%d", &i) > 0) {
-@@ -490,136 +455,100 @@ static ssize_t disk_fail_store(struct gendisk * disk,
- 
- 	return count;
- }
--static ssize_t disk_fail_read(struct gendisk * disk, char *page)
--{
--	return sprintf(page, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
--}
--static struct disk_attribute disk_attr_fail = {
--	.attr = {.name = "make-it-fail", .mode = S_IRUGO | S_IWUSR },
--	.store	= disk_fail_store,
--	.show	= disk_fail_read
--};
- 
- #endif
- 
--static struct attribute * default_attrs[] = {
--	&disk_attr_uevent.attr,
--	&disk_attr_dev.attr,
--	&disk_attr_range.attr,
--	&disk_attr_removable.attr,
--	&disk_attr_size.attr,
--	&disk_attr_stat.attr,
--	&disk_attr_capability.attr,
-+static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
-+static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
-+static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL);
-+static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
-+static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL);
-+#ifdef CONFIG_FAIL_MAKE_REQUEST
-+static struct device_attribute dev_attr_fail =
-+	__ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store);
-+#endif
++	/* We only support 16-byte blocks. */
++	if (alg->cra_aead.ivsize != 16)
++		goto out_drop_alg;
 +
-+static struct attribute *disk_attrs[] = {
-+	&dev_attr_range.attr,
-+	&dev_attr_removable.attr,
-+	&dev_attr_size.attr,
-+	&dev_attr_capability.attr,
-+	&dev_attr_stat.attr,
- #ifdef CONFIG_FAIL_MAKE_REQUEST
--	&disk_attr_fail.attr,
-+	&dev_attr_fail.attr,
- #endif
--	NULL,
-+	NULL
-+};
++	/* Not a stream cipher? */
++	if (alg->cra_blocksize != 1)
++		goto out_drop_alg;
 +
-+static struct attribute_group disk_attr_group = {
-+	.attrs = disk_attrs,
- };
- 
--static void disk_release(struct kobject * kobj)
-+static struct attribute_group *disk_attr_groups[] = {
-+	&disk_attr_group,
-+	NULL
-+};
++	err = -ENAMETOOLONG;
++	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
++		     "rfc4106(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
++	    snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
++		     "rfc4106(%s)", alg->cra_driver_name) >=
++	    CRYPTO_MAX_ALG_NAME)
++		goto out_drop_alg;
 +
-+static void disk_release(struct device *dev)
- {
--	struct gendisk *disk = to_disk(kobj);
-+	struct gendisk *disk = dev_to_disk(dev);
++	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
++	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
++	inst->alg.cra_priority = alg->cra_priority;
++	inst->alg.cra_blocksize = 1;
++	inst->alg.cra_alignmask = alg->cra_alignmask;
++	inst->alg.cra_type = &crypto_nivaead_type;
 +
- 	kfree(disk->random);
- 	kfree(disk->part);
- 	free_disk_stats(disk);
- 	kfree(disk);
- }
--
--static struct kobj_type ktype_block = {
--	.release	= disk_release,
--	.sysfs_ops	= &disk_sysfs_ops,
--	.default_attrs	= default_attrs,
-+struct class block_class = {
-+	.name		= "block",
- };
- 
--extern struct kobj_type ktype_part;
--
--static int block_uevent_filter(struct kset *kset, struct kobject *kobj)
--{
--	struct kobj_type *ktype = get_ktype(kobj);
--
--	return ((ktype == &ktype_block) || (ktype == &ktype_part));
--}
--
--static int block_uevent(struct kset *kset, struct kobject *kobj,
--			struct kobj_uevent_env *env)
--{
--	struct kobj_type *ktype = get_ktype(kobj);
--	struct device *physdev;
--	struct gendisk *disk;
--	struct hd_struct *part;
--
--	if (ktype == &ktype_block) {
--		disk = container_of(kobj, struct gendisk, kobj);
--		add_uevent_var(env, "MINOR=%u", disk->first_minor);
--	} else if (ktype == &ktype_part) {
--		disk = container_of(kobj->parent, struct gendisk, kobj);
--		part = container_of(kobj, struct hd_struct, kobj);
--		add_uevent_var(env, "MINOR=%u",
--			       disk->first_minor + part->partno);
--	} else
--		return 0;
--
--	add_uevent_var(env, "MAJOR=%u", disk->major);
--
--	/* add physical device, backing this device  */
--	physdev = disk->driverfs_dev;
--	if (physdev) {
--		char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
--
--		add_uevent_var(env, "PHYSDEVPATH=%s", path);
--		kfree(path);
--
--		if (physdev->bus)
--			add_uevent_var(env, "PHYSDEVBUS=%s", physdev->bus->name);
--
--		if (physdev->driver)
--			add_uevent_var(env, physdev->driver->name);
--	}
--
--	return 0;
--}
--
--static struct kset_uevent_ops block_uevent_ops = {
--	.filter		= block_uevent_filter,
--	.uevent		= block_uevent,
-+struct device_type disk_type = {
-+	.name		= "disk",
-+	.groups		= disk_attr_groups,
-+	.release	= disk_release,
- };
- 
--decl_subsys(block, &ktype_block, &block_uevent_ops);
--
- /*
-  * aggregate disk stat collector.  Uses the same stats that the sysfs
-  * entries do, above, but makes them available through one seq_file.
-- * Watching a few disks may be efficient through sysfs, but watching
-- * all of them will be more efficient through this interface.
-  *
-  * The output looks suspiciously like /proc/partitions with a bunch of
-  * extra fields.
-  */
- 
--/* iterator */
- static void *diskstats_start(struct seq_file *part, loff_t *pos)
- {
- 	loff_t k = *pos;
--	struct list_head *p;
-+	struct device *dev;
- 
--	mutex_lock(&block_subsys_lock);
--	list_for_each(p, &block_subsys.list)
-+	mutex_lock(&block_class_lock);
-+	list_for_each_entry(dev, &block_class.devices, node) {
-+		if (dev->type != &disk_type)
-+			continue;
- 		if (!k--)
--			return list_entry(p, struct gendisk, kobj.entry);
-+			return dev_to_disk(dev);
-+	}
- 	return NULL;
- }
- 
- static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
- {
--	struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
-+	struct gendisk *gp = v;
-+	struct device *dev;
++	inst->alg.cra_aead.ivsize = 8;
++	inst->alg.cra_aead.maxauthsize = 16;
 +
- 	++*pos;
--	return p==&block_subsys.list ? NULL :
--		list_entry(p, struct gendisk, kobj.entry);
-+	list_for_each_entry(dev, &gp->dev.node, node) {
-+		if (&dev->node == &block_class.devices)
-+			return NULL;
-+		if (dev->type == &disk_type)
-+			return dev_to_disk(dev);
-+	}
-+	return NULL;
- }
- 
- static void diskstats_stop(struct seq_file *part, void *v)
- {
--	mutex_unlock(&block_subsys_lock);
-+	mutex_unlock(&block_class_lock);
- }
- 
- static int diskstats_show(struct seq_file *s, void *v)
-@@ -629,7 +558,7 @@ static int diskstats_show(struct seq_file *s, void *v)
- 	int n = 0;
- 
- 	/*
--	if (&sgp->kobj.entry == block_subsys.kset.list.next)
-+	if (&gp->dev.kobj.entry == block_class.devices.next)
- 		seq_puts(s,	"major minor name"
- 				"     rio rmerge rsect ruse wio wmerge "
- 				"wsect wuse running use aveq"
-@@ -666,7 +595,7 @@ static int diskstats_show(struct seq_file *s, void *v)
- 	return 0;
- }
- 
--struct seq_operations diskstats_op = {
-+const struct seq_operations diskstats_op = {
- 	.start	= diskstats_start,
- 	.next	= diskstats_next,
- 	.stop	= diskstats_stop,
-@@ -683,7 +612,7 @@ static void media_change_notify_thread(struct work_struct *work)
- 	 * set enviroment vars to indicate which event this is for
- 	 * so that user space will know to go check the media status.
- 	 */
--	kobject_uevent_env(&gd->kobj, KOBJ_CHANGE, envp);
-+	kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp);
- 	put_device(gd->driverfs_dev);
- }
- 
-@@ -694,6 +623,25 @@ void genhd_media_change_notify(struct gendisk *disk)
- }
- EXPORT_SYMBOL_GPL(genhd_media_change_notify);
- 
-+dev_t blk_lookup_devt(const char *name)
-+{
-+	struct device *dev;
-+	dev_t devt = MKDEV(0, 0);
++	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
 +
-+	mutex_lock(&block_class_lock);
-+	list_for_each_entry(dev, &block_class.devices, node) {
-+		if (strcmp(dev->bus_id, name) == 0) {
-+			devt = dev->devt;
-+			break;
-+		}
-+	}
-+	mutex_unlock(&block_class_lock);
++	inst->alg.cra_init = crypto_rfc4106_init_tfm;
++	inst->alg.cra_exit = crypto_rfc4106_exit_tfm;
 +
-+	return devt;
++	inst->alg.cra_aead.setkey = crypto_rfc4106_setkey;
++	inst->alg.cra_aead.setauthsize = crypto_rfc4106_setauthsize;
++	inst->alg.cra_aead.encrypt = crypto_rfc4106_encrypt;
++	inst->alg.cra_aead.decrypt = crypto_rfc4106_decrypt;
++
++	inst->alg.cra_aead.geniv = "seqiv";
++
++out:
++	return inst;
++
++out_drop_alg:
++	crypto_drop_aead(spawn);
++out_free_inst:
++	kfree(inst);
++	inst = ERR_PTR(err);
++	goto out;
 +}
 +
-+EXPORT_SYMBOL(blk_lookup_devt);
++static void crypto_rfc4106_free(struct crypto_instance *inst)
++{
++	crypto_drop_spawn(crypto_instance_ctx(inst));
++	kfree(inst);
++}
 +
- struct gendisk *alloc_disk(int minors)
- {
- 	return alloc_disk_node(minors, -1);
-@@ -721,9 +669,10 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
- 			}
- 		}
- 		disk->minors = minors;
--		kobj_set_kset_s(disk,block_subsys);
--		kobject_init(&disk->kobj);
- 		rand_initialize_disk(disk);
-+		disk->dev.class = &block_class;
-+		disk->dev.type = &disk_type;
-+		device_initialize(&disk->dev);
- 		INIT_WORK(&disk->async_notify,
- 			media_change_notify_thread);
- 	}
-@@ -743,7 +692,7 @@ struct kobject *get_disk(struct gendisk *disk)
- 	owner = disk->fops->owner;
- 	if (owner && !try_module_get(owner))
- 		return NULL;
--	kobj = kobject_get(&disk->kobj);
-+	kobj = kobject_get(&disk->dev.kobj);
- 	if (kobj == NULL) {
- 		module_put(owner);
- 		return NULL;
-@@ -757,7 +706,7 @@ EXPORT_SYMBOL(get_disk);
- void put_disk(struct gendisk *disk)
- {
- 	if (disk)
--		kobject_put(&disk->kobj);
-+		kobject_put(&disk->dev.kobj);
- }
- 
- EXPORT_SYMBOL(put_disk);
-diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
-deleted file mode 100644
-index 8b91994..0000000
---- a/block/ll_rw_blk.c
-+++ /dev/null
-@@ -1,4214 +0,0 @@
--/*
-- * Copyright (C) 1991, 1992 Linus Torvalds
-- * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
-- * Elevator latency, (C) 2000  Andrea Arcangeli <andrea at suse.de> SuSE
-- * Queue request tables / lock, selectable elevator, Jens Axboe <axboe at suse.de>
-- * kernel-doc documentation started by NeilBrown <neilb at cse.unsw.edu.au> -  July2000
-- * bio rewrite, highmem i/o, etc, Jens Axboe <axboe at suse.de> - may 2001
-- */
--
--/*
-- * This handles all read/write requests to block devices
-- */
--#include <linux/kernel.h>
--#include <linux/module.h>
--#include <linux/backing-dev.h>
--#include <linux/bio.h>
--#include <linux/blkdev.h>
--#include <linux/highmem.h>
--#include <linux/mm.h>
--#include <linux/kernel_stat.h>
--#include <linux/string.h>
--#include <linux/init.h>
--#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
--#include <linux/completion.h>
--#include <linux/slab.h>
--#include <linux/swap.h>
--#include <linux/writeback.h>
--#include <linux/task_io_accounting_ops.h>
--#include <linux/interrupt.h>
--#include <linux/cpu.h>
--#include <linux/blktrace_api.h>
--#include <linux/fault-inject.h>
--#include <linux/scatterlist.h>
--
--/*
-- * for max sense size
-- */
--#include <scsi/scsi_cmnd.h>
--
--static void blk_unplug_work(struct work_struct *work);
--static void blk_unplug_timeout(unsigned long data);
--static void drive_stat_acct(struct request *rq, int new_io);
--static void init_request_from_bio(struct request *req, struct bio *bio);
--static int __make_request(struct request_queue *q, struct bio *bio);
--static struct io_context *current_io_context(gfp_t gfp_flags, int node);
--static void blk_recalc_rq_segments(struct request *rq);
--static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
--			    struct bio *bio);
--
--/*
-- * For the allocated request tables
-- */
--static struct kmem_cache *request_cachep;
--
--/*
-- * For queue allocation
-- */
--static struct kmem_cache *requestq_cachep;
--
--/*
-- * For io context allocations
-- */
--static struct kmem_cache *iocontext_cachep;
--
--/*
-- * Controlling structure to kblockd
-- */
--static struct workqueue_struct *kblockd_workqueue;
--
--unsigned long blk_max_low_pfn, blk_max_pfn;
--
--EXPORT_SYMBOL(blk_max_low_pfn);
--EXPORT_SYMBOL(blk_max_pfn);
--
--static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
--
--/* Amount of time in which a process may batch requests */
--#define BLK_BATCH_TIME	(HZ/50UL)
--
--/* Number of requests a "batching" process may submit */
--#define BLK_BATCH_REQ	32
--
--/*
-- * Return the threshold (number of used requests) at which the queue is
-- * considered to be congested.  It include a little hysteresis to keep the
-- * context switch rate down.
-- */
--static inline int queue_congestion_on_threshold(struct request_queue *q)
--{
--	return q->nr_congestion_on;
--}
--
--/*
-- * The threshold at which a queue is considered to be uncongested
-- */
--static inline int queue_congestion_off_threshold(struct request_queue *q)
--{
--	return q->nr_congestion_off;
--}
--
--static void blk_queue_congestion_threshold(struct request_queue *q)
--{
--	int nr;
--
--	nr = q->nr_requests - (q->nr_requests / 8) + 1;
--	if (nr > q->nr_requests)
--		nr = q->nr_requests;
--	q->nr_congestion_on = nr;
--
--	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
--	if (nr < 1)
--		nr = 1;
--	q->nr_congestion_off = nr;
--}
--
--/**
-- * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
-- * @bdev:	device
-- *
-- * Locates the passed device's request queue and returns the address of its
-- * backing_dev_info
-- *
-- * Will return NULL if the request queue cannot be located.
-- */
--struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
--{
--	struct backing_dev_info *ret = NULL;
--	struct request_queue *q = bdev_get_queue(bdev);
--
--	if (q)
--		ret = &q->backing_dev_info;
--	return ret;
--}
--EXPORT_SYMBOL(blk_get_backing_dev_info);
--
--/**
-- * blk_queue_prep_rq - set a prepare_request function for queue
-- * @q:		queue
-- * @pfn:	prepare_request function
-- *
-- * It's possible for a queue to register a prepare_request callback which
-- * is invoked before the request is handed to the request_fn. The goal of
-- * the function is to prepare a request for I/O, it can be used to build a
-- * cdb from the request data for instance.
-- *
-- */
--void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
--{
--	q->prep_rq_fn = pfn;
--}
--
--EXPORT_SYMBOL(blk_queue_prep_rq);
--
--/**
-- * blk_queue_merge_bvec - set a merge_bvec function for queue
-- * @q:		queue
-- * @mbfn:	merge_bvec_fn
-- *
-- * Usually queues have static limitations on the max sectors or segments that
-- * we can put in a request. Stacking drivers may have some settings that
-- * are dynamic, and thus we have to query the queue whether it is ok to
-- * add a new bio_vec to a bio at a given offset or not. If the block device
-- * has such limitations, it needs to register a merge_bvec_fn to control
-- * the size of bio's sent to it. Note that a block device *must* allow a
-- * single page to be added to an empty bio. The block device driver may want
-- * to use the bio_split() function to deal with these bio's. By default
-- * no merge_bvec_fn is defined for a queue, and only the fixed limits are
-- * honored.
-- */
--void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
--{
--	q->merge_bvec_fn = mbfn;
--}
--
--EXPORT_SYMBOL(blk_queue_merge_bvec);
--
--void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
--{
--	q->softirq_done_fn = fn;
--}
--
--EXPORT_SYMBOL(blk_queue_softirq_done);
--
--/**
-- * blk_queue_make_request - define an alternate make_request function for a device
-- * @q:  the request queue for the device to be affected
-- * @mfn: the alternate make_request function
-- *
-- * Description:
-- *    The normal way for &struct bios to be passed to a device
-- *    driver is for them to be collected into requests on a request
-- *    queue, and then to allow the device driver to select requests
-- *    off that queue when it is ready.  This works well for many block
-- *    devices. However some block devices (typically virtual devices
-- *    such as md or lvm) do not benefit from the processing on the
-- *    request queue, and are served best by having the requests passed
-- *    directly to them.  This can be achieved by providing a function
-- *    to blk_queue_make_request().
-- *
-- * Caveat:
-- *    The driver that does this *must* be able to deal appropriately
-- *    with buffers in "highmemory". This can be accomplished by either calling
-- *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
-- *    blk_queue_bounce() to create a buffer in normal memory.
-- **/
--void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
--{
--	/*
--	 * set defaults
--	 */
--	q->nr_requests = BLKDEV_MAX_RQ;
--	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
--	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
--	q->make_request_fn = mfn;
--	q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
--	q->backing_dev_info.state = 0;
--	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
--	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
--	blk_queue_hardsect_size(q, 512);
--	blk_queue_dma_alignment(q, 511);
--	blk_queue_congestion_threshold(q);
--	q->nr_batching = BLK_BATCH_REQ;
--
--	q->unplug_thresh = 4;		/* hmm */
--	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
--	if (q->unplug_delay == 0)
--		q->unplug_delay = 1;
--
--	INIT_WORK(&q->unplug_work, blk_unplug_work);
--
--	q->unplug_timer.function = blk_unplug_timeout;
--	q->unplug_timer.data = (unsigned long)q;
--
--	/*
--	 * by default assume old behaviour and bounce for any highmem page
--	 */
--	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
--}
--
--EXPORT_SYMBOL(blk_queue_make_request);
--
--static void rq_init(struct request_queue *q, struct request *rq)
--{
--	INIT_LIST_HEAD(&rq->queuelist);
--	INIT_LIST_HEAD(&rq->donelist);
--
--	rq->errors = 0;
--	rq->bio = rq->biotail = NULL;
--	INIT_HLIST_NODE(&rq->hash);
--	RB_CLEAR_NODE(&rq->rb_node);
--	rq->ioprio = 0;
--	rq->buffer = NULL;
--	rq->ref_count = 1;
--	rq->q = q;
--	rq->special = NULL;
--	rq->data_len = 0;
--	rq->data = NULL;
--	rq->nr_phys_segments = 0;
--	rq->sense = NULL;
--	rq->end_io = NULL;
--	rq->end_io_data = NULL;
--	rq->completion_data = NULL;
--	rq->next_rq = NULL;
--}
--
--/**
-- * blk_queue_ordered - does this queue support ordered writes
-- * @q:        the request queue
-- * @ordered:  one of QUEUE_ORDERED_*
-- * @prepare_flush_fn: rq setup helper for cache flush ordered writes
-- *
-- * Description:
-- *   For journalled file systems, doing ordered writes on a commit
-- *   block instead of explicitly doing wait_on_buffer (which is bad
-- *   for performance) can be a big win. Block drivers supporting this
-- *   feature should call this function and indicate so.
-- *
-- **/
--int blk_queue_ordered(struct request_queue *q, unsigned ordered,
--		      prepare_flush_fn *prepare_flush_fn)
--{
--	if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
--	    prepare_flush_fn == NULL) {
--		printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
--		return -EINVAL;
--	}
--
--	if (ordered != QUEUE_ORDERED_NONE &&
--	    ordered != QUEUE_ORDERED_DRAIN &&
--	    ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
--	    ordered != QUEUE_ORDERED_DRAIN_FUA &&
--	    ordered != QUEUE_ORDERED_TAG &&
--	    ordered != QUEUE_ORDERED_TAG_FLUSH &&
--	    ordered != QUEUE_ORDERED_TAG_FUA) {
--		printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
--		return -EINVAL;
--	}
--
--	q->ordered = ordered;
--	q->next_ordered = ordered;
--	q->prepare_flush_fn = prepare_flush_fn;
--
--	return 0;
--}
--
--EXPORT_SYMBOL(blk_queue_ordered);
--
--/*
-- * Cache flushing for ordered writes handling
-- */
--inline unsigned blk_ordered_cur_seq(struct request_queue *q)
--{
--	if (!q->ordseq)
--		return 0;
--	return 1 << ffz(q->ordseq);
--}
--
--unsigned blk_ordered_req_seq(struct request *rq)
--{
--	struct request_queue *q = rq->q;
--
--	BUG_ON(q->ordseq == 0);
--
--	if (rq == &q->pre_flush_rq)
--		return QUEUE_ORDSEQ_PREFLUSH;
--	if (rq == &q->bar_rq)
--		return QUEUE_ORDSEQ_BAR;
--	if (rq == &q->post_flush_rq)
--		return QUEUE_ORDSEQ_POSTFLUSH;
--
--	/*
--	 * !fs requests don't need to follow barrier ordering.  Always
--	 * put them at the front.  This fixes the following deadlock.
--	 *
--	 * http://thread.gmane.org/gmane.linux.kernel/537473
--	 */
--	if (!blk_fs_request(rq))
--		return QUEUE_ORDSEQ_DRAIN;
--
--	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
--	    (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
--		return QUEUE_ORDSEQ_DRAIN;
--	else
--		return QUEUE_ORDSEQ_DONE;
--}
--
--void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
--{
--	struct request *rq;
--	int uptodate;
--
--	if (error && !q->orderr)
--		q->orderr = error;
--
--	BUG_ON(q->ordseq & seq);
--	q->ordseq |= seq;
--
--	if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
--		return;
--
--	/*
--	 * Okay, sequence complete.
--	 */
--	uptodate = 1;
--	if (q->orderr)
--		uptodate = q->orderr;
--
--	q->ordseq = 0;
--	rq = q->orig_bar_rq;
--
--	end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
--	end_that_request_last(rq, uptodate);
--}
--
--static void pre_flush_end_io(struct request *rq, int error)
--{
--	elv_completed_request(rq->q, rq);
--	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
--}
--
--static void bar_end_io(struct request *rq, int error)
--{
--	elv_completed_request(rq->q, rq);
--	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
--}
--
--static void post_flush_end_io(struct request *rq, int error)
--{
--	elv_completed_request(rq->q, rq);
--	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
--}
--
--static void queue_flush(struct request_queue *q, unsigned which)
--{
--	struct request *rq;
--	rq_end_io_fn *end_io;
--
--	if (which == QUEUE_ORDERED_PREFLUSH) {
--		rq = &q->pre_flush_rq;
--		end_io = pre_flush_end_io;
--	} else {
--		rq = &q->post_flush_rq;
--		end_io = post_flush_end_io;
--	}
--
--	rq->cmd_flags = REQ_HARDBARRIER;
--	rq_init(q, rq);
--	rq->elevator_private = NULL;
--	rq->elevator_private2 = NULL;
--	rq->rq_disk = q->bar_rq.rq_disk;
--	rq->end_io = end_io;
--	q->prepare_flush_fn(q, rq);
--
--	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
--}
--
--static inline struct request *start_ordered(struct request_queue *q,
--					    struct request *rq)
--{
--	q->orderr = 0;
--	q->ordered = q->next_ordered;
--	q->ordseq |= QUEUE_ORDSEQ_STARTED;
--
--	/*
--	 * Prep proxy barrier request.
--	 */
--	blkdev_dequeue_request(rq);
--	q->orig_bar_rq = rq;
--	rq = &q->bar_rq;
--	rq->cmd_flags = 0;
--	rq_init(q, rq);
--	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
--		rq->cmd_flags |= REQ_RW;
--	if (q->ordered & QUEUE_ORDERED_FUA)
--		rq->cmd_flags |= REQ_FUA;
--	rq->elevator_private = NULL;
--	rq->elevator_private2 = NULL;
--	init_request_from_bio(rq, q->orig_bar_rq->bio);
--	rq->end_io = bar_end_io;
--
--	/*
--	 * Queue ordered sequence.  As we stack them at the head, we
--	 * need to queue in reverse order.  Note that we rely on that
--	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
--	 * request gets inbetween ordered sequence. If this request is
--	 * an empty barrier, we don't need to do a postflush ever since
--	 * there will be no data written between the pre and post flush.
--	 * Hence a single flush will suffice.
--	 */
--	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
--		queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
--	else
--		q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
--
--	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
--
--	if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
--		queue_flush(q, QUEUE_ORDERED_PREFLUSH);
--		rq = &q->pre_flush_rq;
--	} else
--		q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
--
--	if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
--		q->ordseq |= QUEUE_ORDSEQ_DRAIN;
--	else
--		rq = NULL;
--
--	return rq;
--}
--
--int blk_do_ordered(struct request_queue *q, struct request **rqp)
--{
--	struct request *rq = *rqp;
--	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
--
--	if (!q->ordseq) {
--		if (!is_barrier)
--			return 1;
--
--		if (q->next_ordered != QUEUE_ORDERED_NONE) {
--			*rqp = start_ordered(q, rq);
--			return 1;
--		} else {
--			/*
--			 * This can happen when the queue switches to
--			 * ORDERED_NONE while this request is on it.
--			 */
--			blkdev_dequeue_request(rq);
--			end_that_request_first(rq, -EOPNOTSUPP,
--					       rq->hard_nr_sectors);
--			end_that_request_last(rq, -EOPNOTSUPP);
--			*rqp = NULL;
--			return 0;
--		}
--	}
--
--	/*
--	 * Ordered sequence in progress
--	 */
--
--	/* Special requests are not subject to ordering rules. */
--	if (!blk_fs_request(rq) &&
--	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
--		return 1;
--
--	if (q->ordered & QUEUE_ORDERED_TAG) {
--		/* Ordered by tag.  Blocking the next barrier is enough. */
--		if (is_barrier && rq != &q->bar_rq)
--			*rqp = NULL;
--	} else {
--		/* Ordered by draining.  Wait for turn. */
--		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
--		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
--			*rqp = NULL;
--	}
--
--	return 1;
--}
--
--static void req_bio_endio(struct request *rq, struct bio *bio,
--			  unsigned int nbytes, int error)
--{
--	struct request_queue *q = rq->q;
--
--	if (&q->bar_rq != rq) {
--		if (error)
--			clear_bit(BIO_UPTODATE, &bio->bi_flags);
--		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
--			error = -EIO;
--
--		if (unlikely(nbytes > bio->bi_size)) {
--			printk("%s: want %u bytes done, only %u left\n",
--			       __FUNCTION__, nbytes, bio->bi_size);
--			nbytes = bio->bi_size;
--		}
--
--		bio->bi_size -= nbytes;
--		bio->bi_sector += (nbytes >> 9);
--		if (bio->bi_size == 0)
--			bio_endio(bio, error);
--	} else {
--
--		/*
--		 * Okay, this is the barrier request in progress, just
--		 * record the error;
--		 */
--		if (error && !q->orderr)
--			q->orderr = error;
--	}
--}
--
--/**
-- * blk_queue_bounce_limit - set bounce buffer limit for queue
-- * @q:  the request queue for the device
-- * @dma_addr:   bus address limit
-- *
-- * Description:
-- *    Different hardware can have different requirements as to what pages
-- *    it can do I/O directly to. A low level driver can call
-- *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
-- *    buffers for doing I/O to pages residing above @page.
-- **/
--void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
--{
--	unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
--	int dma = 0;
--
--	q->bounce_gfp = GFP_NOIO;
--#if BITS_PER_LONG == 64
--	/* Assume anything <= 4GB can be handled by IOMMU.
--	   Actually some IOMMUs can handle everything, but I don't
--	   know of a way to test this here. */
--	if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
--		dma = 1;
--	q->bounce_pfn = max_low_pfn;
--#else
--	if (bounce_pfn < blk_max_low_pfn)
--		dma = 1;
--	q->bounce_pfn = bounce_pfn;
--#endif
--	if (dma) {
--		init_emergency_isa_pool();
--		q->bounce_gfp = GFP_NOIO | GFP_DMA;
--		q->bounce_pfn = bounce_pfn;
--	}
--}
--
--EXPORT_SYMBOL(blk_queue_bounce_limit);
--
--/**
-- * blk_queue_max_sectors - set max sectors for a request for this queue
-- * @q:  the request queue for the device
-- * @max_sectors:  max sectors in the usual 512b unit
-- *
-- * Description:
-- *    Enables a low level driver to set an upper limit on the size of
-- *    received requests.
-- **/
--void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
--{
--	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
--		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
--		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
--	}
--
--	if (BLK_DEF_MAX_SECTORS > max_sectors)
--		q->max_hw_sectors = q->max_sectors = max_sectors;
-- 	else {
--		q->max_sectors = BLK_DEF_MAX_SECTORS;
--		q->max_hw_sectors = max_sectors;
--	}
--}
--
--EXPORT_SYMBOL(blk_queue_max_sectors);
--
--/**
-- * blk_queue_max_phys_segments - set max phys segments for a request for this queue
-- * @q:  the request queue for the device
-- * @max_segments:  max number of segments
-- *
-- * Description:
-- *    Enables a low level driver to set an upper limit on the number of
-- *    physical data segments in a request.  This would be the largest sized
-- *    scatter list the driver could handle.
-- **/
--void blk_queue_max_phys_segments(struct request_queue *q,
--				 unsigned short max_segments)
--{
--	if (!max_segments) {
--		max_segments = 1;
--		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
--	}
--
--	q->max_phys_segments = max_segments;
--}
--
--EXPORT_SYMBOL(blk_queue_max_phys_segments);
--
--/**
-- * blk_queue_max_hw_segments - set max hw segments for a request for this queue
-- * @q:  the request queue for the device
-- * @max_segments:  max number of segments
-- *
-- * Description:
-- *    Enables a low level driver to set an upper limit on the number of
-- *    hw data segments in a request.  This would be the largest number of
-- *    address/length pairs the host adapter can actually give as once
-- *    to the device.
-- **/
--void blk_queue_max_hw_segments(struct request_queue *q,
--			       unsigned short max_segments)
--{
--	if (!max_segments) {
--		max_segments = 1;
--		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
--	}
--
--	q->max_hw_segments = max_segments;
--}
--
--EXPORT_SYMBOL(blk_queue_max_hw_segments);
--
--/**
-- * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
-- * @q:  the request queue for the device
-- * @max_size:  max size of segment in bytes
-- *
-- * Description:
-- *    Enables a low level driver to set an upper limit on the size of a
-- *    coalesced segment
-- **/
--void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
--{
--	if (max_size < PAGE_CACHE_SIZE) {
--		max_size = PAGE_CACHE_SIZE;
--		printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
--	}
--
--	q->max_segment_size = max_size;
--}
--
--EXPORT_SYMBOL(blk_queue_max_segment_size);
--
--/**
-- * blk_queue_hardsect_size - set hardware sector size for the queue
-- * @q:  the request queue for the device
-- * @size:  the hardware sector size, in bytes
-- *
-- * Description:
-- *   This should typically be set to the lowest possible sector size
-- *   that the hardware can operate on (possible without reverting to
-- *   even internal read-modify-write operations). Usually the default
-- *   of 512 covers most hardware.
-- **/
--void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
--{
--	q->hardsect_size = size;
--}
--
--EXPORT_SYMBOL(blk_queue_hardsect_size);
--
--/*
-- * Returns the minimum that is _not_ zero, unless both are zero.
-- */
--#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
--
--/**
-- * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
-- * @t:	the stacking driver (top)
-- * @b:  the underlying device (bottom)
-- **/
--void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
--{
--	/* zero is "infinity" */
--	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
--	t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
--
--	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
--	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
--	t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
--	t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
--	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
--		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
--}
--
--EXPORT_SYMBOL(blk_queue_stack_limits);
--
--/**
-- * blk_queue_segment_boundary - set boundary rules for segment merging
-- * @q:  the request queue for the device
-- * @mask:  the memory boundary mask
-- **/
--void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
--{
--	if (mask < PAGE_CACHE_SIZE - 1) {
--		mask = PAGE_CACHE_SIZE - 1;
--		printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
--	}
--
--	q->seg_boundary_mask = mask;
--}
--
--EXPORT_SYMBOL(blk_queue_segment_boundary);
--
--/**
-- * blk_queue_dma_alignment - set dma length and memory alignment
-- * @q:     the request queue for the device
-- * @mask:  alignment mask
-- *
-- * description:
-- *    set required memory and length aligment for direct dma transactions.
-- *    this is used when buiding direct io requests for the queue.
-- *
-- **/
--void blk_queue_dma_alignment(struct request_queue *q, int mask)
--{
--	q->dma_alignment = mask;
--}
--
--EXPORT_SYMBOL(blk_queue_dma_alignment);
--
--/**
-- * blk_queue_find_tag - find a request by its tag and queue
-- * @q:	 The request queue for the device
-- * @tag: The tag of the request
-- *
-- * Notes:
-- *    Should be used when a device returns a tag and you want to match
-- *    it with a request.
-- *
-- *    no locks need be held.
-- **/
--struct request *blk_queue_find_tag(struct request_queue *q, int tag)
--{
--	return blk_map_queue_find_tag(q->queue_tags, tag);
--}
--
--EXPORT_SYMBOL(blk_queue_find_tag);
--
--/**
-- * __blk_free_tags - release a given set of tag maintenance info
-- * @bqt:	the tag map to free
-- *
-- * Tries to free the specified @bqt at .  Returns true if it was
-- * actually freed and false if there are still references using it
-- */
--static int __blk_free_tags(struct blk_queue_tag *bqt)
--{
--	int retval;
--
--	retval = atomic_dec_and_test(&bqt->refcnt);
--	if (retval) {
--		BUG_ON(bqt->busy);
--
--		kfree(bqt->tag_index);
--		bqt->tag_index = NULL;
--
--		kfree(bqt->tag_map);
--		bqt->tag_map = NULL;
--
--		kfree(bqt);
--
--	}
--
--	return retval;
--}
--
--/**
-- * __blk_queue_free_tags - release tag maintenance info
-- * @q:  the request queue for the device
-- *
-- *  Notes:
-- *    blk_cleanup_queue() will take care of calling this function, if tagging
-- *    has been used. So there's no need to call this directly.
-- **/
--static void __blk_queue_free_tags(struct request_queue *q)
--{
--	struct blk_queue_tag *bqt = q->queue_tags;
--
--	if (!bqt)
--		return;
--
--	__blk_free_tags(bqt);
--
--	q->queue_tags = NULL;
--	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
--}
--
--
--/**
-- * blk_free_tags - release a given set of tag maintenance info
-- * @bqt:	the tag map to free
-- *
-- * For externally managed @bqt@ frees the map.  Callers of this
-- * function must guarantee to have released all the queues that
-- * might have been using this tag map.
-- */
--void blk_free_tags(struct blk_queue_tag *bqt)
--{
--	if (unlikely(!__blk_free_tags(bqt)))
--		BUG();
--}
--EXPORT_SYMBOL(blk_free_tags);
--
--/**
-- * blk_queue_free_tags - release tag maintenance info
-- * @q:  the request queue for the device
-- *
-- *  Notes:
-- *	This is used to disabled tagged queuing to a device, yet leave
-- *	queue in function.
-- **/
--void blk_queue_free_tags(struct request_queue *q)
--{
--	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
--}
--
--EXPORT_SYMBOL(blk_queue_free_tags);
--
--static int
--init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
--{
--	struct request **tag_index;
--	unsigned long *tag_map;
--	int nr_ulongs;
--
--	if (q && depth > q->nr_requests * 2) {
--		depth = q->nr_requests * 2;
--		printk(KERN_ERR "%s: adjusted depth to %d\n",
--				__FUNCTION__, depth);
--	}
--
--	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
--	if (!tag_index)
--		goto fail;
--
--	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
--	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
--	if (!tag_map)
--		goto fail;
--
--	tags->real_max_depth = depth;
--	tags->max_depth = depth;
--	tags->tag_index = tag_index;
--	tags->tag_map = tag_map;
--
--	return 0;
--fail:
--	kfree(tag_index);
--	return -ENOMEM;
--}
--
--static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
--						   int depth)
--{
--	struct blk_queue_tag *tags;
--
--	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
--	if (!tags)
--		goto fail;
--
--	if (init_tag_map(q, tags, depth))
--		goto fail;
--
--	tags->busy = 0;
--	atomic_set(&tags->refcnt, 1);
--	return tags;
--fail:
--	kfree(tags);
--	return NULL;
--}
--
--/**
-- * blk_init_tags - initialize the tag info for an external tag map
-- * @depth:	the maximum queue depth supported
-- * @tags: the tag to use
-- **/
--struct blk_queue_tag *blk_init_tags(int depth)
--{
--	return __blk_queue_init_tags(NULL, depth);
--}
--EXPORT_SYMBOL(blk_init_tags);
--
--/**
-- * blk_queue_init_tags - initialize the queue tag info
-- * @q:  the request queue for the device
-- * @depth:  the maximum queue depth supported
-- * @tags: the tag to use
-- **/
--int blk_queue_init_tags(struct request_queue *q, int depth,
--			struct blk_queue_tag *tags)
--{
--	int rc;
--
--	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
--
--	if (!tags && !q->queue_tags) {
--		tags = __blk_queue_init_tags(q, depth);
--
--		if (!tags)
--			goto fail;
--	} else if (q->queue_tags) {
--		if ((rc = blk_queue_resize_tags(q, depth)))
--			return rc;
--		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
--		return 0;
--	} else
--		atomic_inc(&tags->refcnt);
--
--	/*
--	 * assign it, all done
--	 */
--	q->queue_tags = tags;
--	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
--	INIT_LIST_HEAD(&q->tag_busy_list);
--	return 0;
--fail:
--	kfree(tags);
--	return -ENOMEM;
--}
--
--EXPORT_SYMBOL(blk_queue_init_tags);
--
--/**
-- * blk_queue_resize_tags - change the queueing depth
-- * @q:  the request queue for the device
-- * @new_depth: the new max command queueing depth
-- *
-- *  Notes:
-- *    Must be called with the queue lock held.
-- **/
--int blk_queue_resize_tags(struct request_queue *q, int new_depth)
--{
--	struct blk_queue_tag *bqt = q->queue_tags;
--	struct request **tag_index;
--	unsigned long *tag_map;
--	int max_depth, nr_ulongs;
--
--	if (!bqt)
--		return -ENXIO;
--
--	/*
--	 * if we already have large enough real_max_depth.  just
--	 * adjust max_depth.  *NOTE* as requests with tag value
--	 * between new_depth and real_max_depth can be in-flight, tag
--	 * map can not be shrunk blindly here.
--	 */
--	if (new_depth <= bqt->real_max_depth) {
--		bqt->max_depth = new_depth;
--		return 0;
--	}
--
--	/*
--	 * Currently cannot replace a shared tag map with a new
--	 * one, so error out if this is the case
--	 */
--	if (atomic_read(&bqt->refcnt) != 1)
--		return -EBUSY;
--
--	/*
--	 * save the old state info, so we can copy it back
--	 */
--	tag_index = bqt->tag_index;
--	tag_map = bqt->tag_map;
--	max_depth = bqt->real_max_depth;
--
--	if (init_tag_map(q, bqt, new_depth))
--		return -ENOMEM;
--
--	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
--	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
--	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
--
--	kfree(tag_index);
--	kfree(tag_map);
--	return 0;
--}
--
--EXPORT_SYMBOL(blk_queue_resize_tags);
--
--/**
-- * blk_queue_end_tag - end tag operations for a request
-- * @q:  the request queue for the device
-- * @rq: the request that has completed
-- *
-- *  Description:
-- *    Typically called when end_that_request_first() returns 0, meaning
-- *    all transfers have been done for a request. It's important to call
-- *    this function before end_that_request_last(), as that will put the
-- *    request back on the free list thus corrupting the internal tag list.
-- *
-- *  Notes:
-- *   queue lock must be held.
-- **/
--void blk_queue_end_tag(struct request_queue *q, struct request *rq)
--{
--	struct blk_queue_tag *bqt = q->queue_tags;
--	int tag = rq->tag;
--
--	BUG_ON(tag == -1);
--
--	if (unlikely(tag >= bqt->real_max_depth))
--		/*
--		 * This can happen after tag depth has been reduced.
--		 * FIXME: how about a warning or info message here?
--		 */
--		return;
--
--	list_del_init(&rq->queuelist);
--	rq->cmd_flags &= ~REQ_QUEUED;
--	rq->tag = -1;
--
--	if (unlikely(bqt->tag_index[tag] == NULL))
--		printk(KERN_ERR "%s: tag %d is missing\n",
--		       __FUNCTION__, tag);
--
--	bqt->tag_index[tag] = NULL;
--
--	if (unlikely(!test_bit(tag, bqt->tag_map))) {
--		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
--		       __FUNCTION__, tag);
--		return;
--	}
--	/*
--	 * The tag_map bit acts as a lock for tag_index[bit], so we need
--	 * unlock memory barrier semantics.
--	 */
--	clear_bit_unlock(tag, bqt->tag_map);
--	bqt->busy--;
--}
--
--EXPORT_SYMBOL(blk_queue_end_tag);
--
--/**
-- * blk_queue_start_tag - find a free tag and assign it
-- * @q:  the request queue for the device
-- * @rq:  the block request that needs tagging
-- *
-- *  Description:
-- *    This can either be used as a stand-alone helper, or possibly be
-- *    assigned as the queue &prep_rq_fn (in which case &struct request
-- *    automagically gets a tag assigned). Note that this function
-- *    assumes that any type of request can be queued! if this is not
-- *    true for your device, you must check the request type before
-- *    calling this function.  The request will also be removed from
-- *    the request queue, so it's the drivers responsibility to readd
-- *    it if it should need to be restarted for some reason.
-- *
-- *  Notes:
-- *   queue lock must be held.
-- **/
--int blk_queue_start_tag(struct request_queue *q, struct request *rq)
--{
--	struct blk_queue_tag *bqt = q->queue_tags;
--	int tag;
--
--	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
--		printk(KERN_ERR 
--		       "%s: request %p for device [%s] already tagged %d",
--		       __FUNCTION__, rq,
--		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
--		BUG();
--	}
--
--	/*
--	 * Protect against shared tag maps, as we may not have exclusive
--	 * access to the tag map.
--	 */
--	do {
--		tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
--		if (tag >= bqt->max_depth)
--			return 1;
--
--	} while (test_and_set_bit_lock(tag, bqt->tag_map));
--	/*
--	 * We need lock ordering semantics given by test_and_set_bit_lock.
--	 * See blk_queue_end_tag for details.
--	 */
--
--	rq->cmd_flags |= REQ_QUEUED;
--	rq->tag = tag;
--	bqt->tag_index[tag] = rq;
--	blkdev_dequeue_request(rq);
--	list_add(&rq->queuelist, &q->tag_busy_list);
--	bqt->busy++;
--	return 0;
--}
--
--EXPORT_SYMBOL(blk_queue_start_tag);
--
--/**
-- * blk_queue_invalidate_tags - invalidate all pending tags
-- * @q:  the request queue for the device
-- *
-- *  Description:
-- *   Hardware conditions may dictate a need to stop all pending requests.
-- *   In this case, we will safely clear the block side of the tag queue and
-- *   readd all requests to the request queue in the right order.
-- *
-- *  Notes:
-- *   queue lock must be held.
-- **/
--void blk_queue_invalidate_tags(struct request_queue *q)
--{
--	struct list_head *tmp, *n;
--
--	list_for_each_safe(tmp, n, &q->tag_busy_list)
--		blk_requeue_request(q, list_entry_rq(tmp));
--}
--
--EXPORT_SYMBOL(blk_queue_invalidate_tags);
--
--void blk_dump_rq_flags(struct request *rq, char *msg)
--{
--	int bit;
--
--	printk("%s: dev %s: type=%x, flags=%x\n", msg,
--		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
--		rq->cmd_flags);
--
--	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
--						       rq->nr_sectors,
--						       rq->current_nr_sectors);
--	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
--
--	if (blk_pc_request(rq)) {
--		printk("cdb: ");
--		for (bit = 0; bit < sizeof(rq->cmd); bit++)
--			printk("%02x ", rq->cmd[bit]);
--		printk("\n");
--	}
--}
--
--EXPORT_SYMBOL(blk_dump_rq_flags);
--
--void blk_recount_segments(struct request_queue *q, struct bio *bio)
--{
--	struct request rq;
--	struct bio *nxt = bio->bi_next;
--	rq.q = q;
--	rq.bio = rq.biotail = bio;
--	bio->bi_next = NULL;
--	blk_recalc_rq_segments(&rq);
--	bio->bi_next = nxt;
--	bio->bi_phys_segments = rq.nr_phys_segments;
--	bio->bi_hw_segments = rq.nr_hw_segments;
--	bio->bi_flags |= (1 << BIO_SEG_VALID);
--}
--EXPORT_SYMBOL(blk_recount_segments);
--
--static void blk_recalc_rq_segments(struct request *rq)
--{
--	int nr_phys_segs;
--	int nr_hw_segs;
--	unsigned int phys_size;
--	unsigned int hw_size;
--	struct bio_vec *bv, *bvprv = NULL;
--	int seg_size;
--	int hw_seg_size;
--	int cluster;
--	struct req_iterator iter;
--	int high, highprv = 1;
--	struct request_queue *q = rq->q;
--
--	if (!rq->bio)
--		return;
--
--	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
--	hw_seg_size = seg_size = 0;
--	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
--	rq_for_each_segment(bv, rq, iter) {
--		/*
--		 * the trick here is making sure that a high page is never
--		 * considered part of another segment, since that might
--		 * change with the bounce page.
--		 */
--		high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
--		if (high || highprv)
--			goto new_hw_segment;
--		if (cluster) {
--			if (seg_size + bv->bv_len > q->max_segment_size)
--				goto new_segment;
--			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
--				goto new_segment;
--			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
--				goto new_segment;
--			if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
--				goto new_hw_segment;
--
--			seg_size += bv->bv_len;
--			hw_seg_size += bv->bv_len;
--			bvprv = bv;
--			continue;
--		}
--new_segment:
--		if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
--		    !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
--			hw_seg_size += bv->bv_len;
--		else {
--new_hw_segment:
--			if (nr_hw_segs == 1 &&
--			    hw_seg_size > rq->bio->bi_hw_front_size)
--				rq->bio->bi_hw_front_size = hw_seg_size;
--			hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
--			nr_hw_segs++;
--		}
--
--		nr_phys_segs++;
--		bvprv = bv;
--		seg_size = bv->bv_len;
--		highprv = high;
--	}
--
--	if (nr_hw_segs == 1 &&
--	    hw_seg_size > rq->bio->bi_hw_front_size)
--		rq->bio->bi_hw_front_size = hw_seg_size;
--	if (hw_seg_size > rq->biotail->bi_hw_back_size)
--		rq->biotail->bi_hw_back_size = hw_seg_size;
--	rq->nr_phys_segments = nr_phys_segs;
--	rq->nr_hw_segments = nr_hw_segs;
--}
--
--static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
--				   struct bio *nxt)
--{
--	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
--		return 0;
--
--	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
--		return 0;
--	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
--		return 0;
--
--	/*
--	 * bio and nxt are contigous in memory, check if the queue allows
--	 * these two to be merged into one
--	 */
--	if (BIO_SEG_BOUNDARY(q, bio, nxt))
--		return 1;
--
--	return 0;
--}
--
--static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
--				 struct bio *nxt)
--{
--	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
--		blk_recount_segments(q, bio);
--	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
--		blk_recount_segments(q, nxt);
--	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
--	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
--		return 0;
--	if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
--		return 0;
--
--	return 1;
--}
--
--/*
-- * map a request to scatterlist, return number of sg entries setup. Caller
-- * must make sure sg can hold rq->nr_phys_segments entries
-- */
--int blk_rq_map_sg(struct request_queue *q, struct request *rq,
--		  struct scatterlist *sglist)
--{
--	struct bio_vec *bvec, *bvprv;
--	struct req_iterator iter;
--	struct scatterlist *sg;
--	int nsegs, cluster;
--
--	nsegs = 0;
--	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
--
--	/*
--	 * for each bio in rq
--	 */
--	bvprv = NULL;
--	sg = NULL;
--	rq_for_each_segment(bvec, rq, iter) {
--		int nbytes = bvec->bv_len;
--
--		if (bvprv && cluster) {
--			if (sg->length + nbytes > q->max_segment_size)
--				goto new_segment;
--
--			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
--				goto new_segment;
--			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
--				goto new_segment;
--
--			sg->length += nbytes;
--		} else {
--new_segment:
--			if (!sg)
--				sg = sglist;
--			else {
--				/*
--				 * If the driver previously mapped a shorter
--				 * list, we could see a termination bit
--				 * prematurely unless it fully inits the sg
--				 * table on each mapping. We KNOW that there
--				 * must be more entries here or the driver
--				 * would be buggy, so force clear the
--				 * termination bit to avoid doing a full
--				 * sg_init_table() in drivers for each command.
--				 */
--				sg->page_link &= ~0x02;
--				sg = sg_next(sg);
--			}
--
--			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
--			nsegs++;
--		}
--		bvprv = bvec;
--	} /* segments in rq */
--
--	if (sg)
--		sg_mark_end(sg);
--
--	return nsegs;
--}
--
--EXPORT_SYMBOL(blk_rq_map_sg);
--
--/*
-- * the standard queue merge functions, can be overridden with device
-- * specific ones if so desired
-- */
--
--static inline int ll_new_mergeable(struct request_queue *q,
--				   struct request *req,
--				   struct bio *bio)
--{
--	int nr_phys_segs = bio_phys_segments(q, bio);
--
--	if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
--		req->cmd_flags |= REQ_NOMERGE;
--		if (req == q->last_merge)
--			q->last_merge = NULL;
--		return 0;
--	}
--
--	/*
--	 * A hw segment is just getting larger, bump just the phys
--	 * counter.
--	 */
--	req->nr_phys_segments += nr_phys_segs;
--	return 1;
--}
--
--static inline int ll_new_hw_segment(struct request_queue *q,
--				    struct request *req,
--				    struct bio *bio)
--{
--	int nr_hw_segs = bio_hw_segments(q, bio);
--	int nr_phys_segs = bio_phys_segments(q, bio);
--
--	if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
--	    || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
--		req->cmd_flags |= REQ_NOMERGE;
--		if (req == q->last_merge)
--			q->last_merge = NULL;
--		return 0;
--	}
--
--	/*
--	 * This will form the start of a new hw segment.  Bump both
--	 * counters.
--	 */
--	req->nr_hw_segments += nr_hw_segs;
--	req->nr_phys_segments += nr_phys_segs;
--	return 1;
--}
--
--static int ll_back_merge_fn(struct request_queue *q, struct request *req,
--			    struct bio *bio)
--{
--	unsigned short max_sectors;
--	int len;
--
--	if (unlikely(blk_pc_request(req)))
--		max_sectors = q->max_hw_sectors;
--	else
--		max_sectors = q->max_sectors;
--
--	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
--		req->cmd_flags |= REQ_NOMERGE;
--		if (req == q->last_merge)
--			q->last_merge = NULL;
--		return 0;
--	}
--	if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
--		blk_recount_segments(q, req->biotail);
--	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
--		blk_recount_segments(q, bio);
--	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
--	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
--	    !BIOVEC_VIRT_OVERSIZE(len)) {
--		int mergeable =  ll_new_mergeable(q, req, bio);
--
--		if (mergeable) {
--			if (req->nr_hw_segments == 1)
--				req->bio->bi_hw_front_size = len;
--			if (bio->bi_hw_segments == 1)
--				bio->bi_hw_back_size = len;
--		}
--		return mergeable;
--	}
--
--	return ll_new_hw_segment(q, req, bio);
--}
--
--static int ll_front_merge_fn(struct request_queue *q, struct request *req, 
--			     struct bio *bio)
--{
--	unsigned short max_sectors;
--	int len;
--
--	if (unlikely(blk_pc_request(req)))
--		max_sectors = q->max_hw_sectors;
--	else
--		max_sectors = q->max_sectors;
--
--
--	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
--		req->cmd_flags |= REQ_NOMERGE;
--		if (req == q->last_merge)
--			q->last_merge = NULL;
--		return 0;
--	}
--	len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
--	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
--		blk_recount_segments(q, bio);
--	if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
--		blk_recount_segments(q, req->bio);
--	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
--	    !BIOVEC_VIRT_OVERSIZE(len)) {
--		int mergeable =  ll_new_mergeable(q, req, bio);
--
--		if (mergeable) {
--			if (bio->bi_hw_segments == 1)
--				bio->bi_hw_front_size = len;
--			if (req->nr_hw_segments == 1)
--				req->biotail->bi_hw_back_size = len;
--		}
--		return mergeable;
--	}
--
--	return ll_new_hw_segment(q, req, bio);
--}
--
--static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
--				struct request *next)
--{
--	int total_phys_segments;
--	int total_hw_segments;
--
--	/*
--	 * First check if the either of the requests are re-queued
--	 * requests.  Can't merge them if they are.
--	 */
--	if (req->special || next->special)
--		return 0;
--
--	/*
--	 * Will it become too large?
--	 */
--	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
--		return 0;
--
--	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
--	if (blk_phys_contig_segment(q, req->biotail, next->bio))
--		total_phys_segments--;
--
--	if (total_phys_segments > q->max_phys_segments)
--		return 0;
--
--	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
--	if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
--		int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
--		/*
--		 * propagate the combined length to the end of the requests
--		 */
--		if (req->nr_hw_segments == 1)
--			req->bio->bi_hw_front_size = len;
--		if (next->nr_hw_segments == 1)
--			next->biotail->bi_hw_back_size = len;
--		total_hw_segments--;
--	}
--
--	if (total_hw_segments > q->max_hw_segments)
--		return 0;
--
--	/* Merge is OK... */
--	req->nr_phys_segments = total_phys_segments;
--	req->nr_hw_segments = total_hw_segments;
--	return 1;
--}
--
--/*
-- * "plug" the device if there are no outstanding requests: this will
-- * force the transfer to start only after we have put all the requests
-- * on the list.
-- *
-- * This is called with interrupts off and no requests on the queue and
-- * with the queue lock held.
-- */
--void blk_plug_device(struct request_queue *q)
--{
--	WARN_ON(!irqs_disabled());
--
--	/*
--	 * don't plug a stopped queue, it must be paired with blk_start_queue()
--	 * which will restart the queueing
--	 */
--	if (blk_queue_stopped(q))
--		return;
--
--	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
--		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
--		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
--	}
--}
--
--EXPORT_SYMBOL(blk_plug_device);
--
--/*
-- * remove the queue from the plugged list, if present. called with
-- * queue lock held and interrupts disabled.
-- */
--int blk_remove_plug(struct request_queue *q)
--{
--	WARN_ON(!irqs_disabled());
--
--	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
--		return 0;
--
--	del_timer(&q->unplug_timer);
--	return 1;
--}
--
--EXPORT_SYMBOL(blk_remove_plug);
--
--/*
-- * remove the plug and let it rip..
-- */
--void __generic_unplug_device(struct request_queue *q)
--{
--	if (unlikely(blk_queue_stopped(q)))
--		return;
--
--	if (!blk_remove_plug(q))
--		return;
--
--	q->request_fn(q);
--}
--EXPORT_SYMBOL(__generic_unplug_device);
--
--/**
-- * generic_unplug_device - fire a request queue
-- * @q:    The &struct request_queue in question
-- *
-- * Description:
-- *   Linux uses plugging to build bigger requests queues before letting
-- *   the device have at them. If a queue is plugged, the I/O scheduler
-- *   is still adding and merging requests on the queue. Once the queue
-- *   gets unplugged, the request_fn defined for the queue is invoked and
-- *   transfers started.
-- **/
--void generic_unplug_device(struct request_queue *q)
--{
--	spin_lock_irq(q->queue_lock);
--	__generic_unplug_device(q);
--	spin_unlock_irq(q->queue_lock);
--}
--EXPORT_SYMBOL(generic_unplug_device);
--
--static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
--				   struct page *page)
--{
--	struct request_queue *q = bdi->unplug_io_data;
--
--	blk_unplug(q);
--}
--
--static void blk_unplug_work(struct work_struct *work)
--{
--	struct request_queue *q =
--		container_of(work, struct request_queue, unplug_work);
--
--	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
--				q->rq.count[READ] + q->rq.count[WRITE]);
--
--	q->unplug_fn(q);
--}
--
--static void blk_unplug_timeout(unsigned long data)
--{
--	struct request_queue *q = (struct request_queue *)data;
--
--	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
--				q->rq.count[READ] + q->rq.count[WRITE]);
--
--	kblockd_schedule_work(&q->unplug_work);
--}
--
--void blk_unplug(struct request_queue *q)
--{
--	/*
--	 * devices don't necessarily have an ->unplug_fn defined
--	 */
--	if (q->unplug_fn) {
--		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
--					q->rq.count[READ] + q->rq.count[WRITE]);
--
--		q->unplug_fn(q);
--	}
--}
--EXPORT_SYMBOL(blk_unplug);
--
--/**
-- * blk_start_queue - restart a previously stopped queue
-- * @q:    The &struct request_queue in question
-- *
-- * Description:
-- *   blk_start_queue() will clear the stop flag on the queue, and call
-- *   the request_fn for the queue if it was in a stopped state when
-- *   entered. Also see blk_stop_queue(). Queue lock must be held.
-- **/
--void blk_start_queue(struct request_queue *q)
--{
--	WARN_ON(!irqs_disabled());
--
--	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
--
--	/*
--	 * one level of recursion is ok and is much faster than kicking
--	 * the unplug handling
--	 */
--	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
--		q->request_fn(q);
--		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
--	} else {
--		blk_plug_device(q);
--		kblockd_schedule_work(&q->unplug_work);
--	}
--}
--
--EXPORT_SYMBOL(blk_start_queue);
--
--/**
-- * blk_stop_queue - stop a queue
-- * @q:    The &struct request_queue in question
-- *
-- * Description:
-- *   The Linux block layer assumes that a block driver will consume all
-- *   entries on the request queue when the request_fn strategy is called.
-- *   Often this will not happen, because of hardware limitations (queue
-- *   depth settings). If a device driver gets a 'queue full' response,
-- *   or if it simply chooses not to queue more I/O at one point, it can
-- *   call this function to prevent the request_fn from being called until
-- *   the driver has signalled it's ready to go again. This happens by calling
-- *   blk_start_queue() to restart queue operations. Queue lock must be held.
-- **/
--void blk_stop_queue(struct request_queue *q)
--{
--	blk_remove_plug(q);
--	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
--}
--EXPORT_SYMBOL(blk_stop_queue);
--
--/**
-- * blk_sync_queue - cancel any pending callbacks on a queue
-- * @q: the queue
-- *
-- * Description:
-- *     The block layer may perform asynchronous callback activity
-- *     on a queue, such as calling the unplug function after a timeout.
-- *     A block device may call blk_sync_queue to ensure that any
-- *     such activity is cancelled, thus allowing it to release resources
-- *     that the callbacks might use. The caller must already have made sure
-- *     that its ->make_request_fn will not re-add plugging prior to calling
-- *     this function.
-- *
-- */
--void blk_sync_queue(struct request_queue *q)
--{
--	del_timer_sync(&q->unplug_timer);
--	kblockd_flush_work(&q->unplug_work);
--}
--EXPORT_SYMBOL(blk_sync_queue);
--
--/**
-- * blk_run_queue - run a single device queue
-- * @q:	The queue to run
-- */
--void blk_run_queue(struct request_queue *q)
--{
--	unsigned long flags;
--
--	spin_lock_irqsave(q->queue_lock, flags);
--	blk_remove_plug(q);
--
--	/*
--	 * Only recurse once to avoid overrunning the stack, let the unplug
--	 * handling reinvoke the handler shortly if we already got there.
--	 */
--	if (!elv_queue_empty(q)) {
--		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
--			q->request_fn(q);
--			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
--		} else {
--			blk_plug_device(q);
--			kblockd_schedule_work(&q->unplug_work);
--		}
--	}
--
--	spin_unlock_irqrestore(q->queue_lock, flags);
--}
--EXPORT_SYMBOL(blk_run_queue);
--
--/**
-- * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
-- * @kobj:    the kobj belonging of the request queue to be released
-- *
-- * Description:
-- *     blk_cleanup_queue is the pair to blk_init_queue() or
-- *     blk_queue_make_request().  It should be called when a request queue is
-- *     being released; typically when a block device is being de-registered.
-- *     Currently, its primary task it to free all the &struct request
-- *     structures that were allocated to the queue and the queue itself.
-- *
-- * Caveat:
-- *     Hopefully the low level driver will have finished any
-- *     outstanding requests first...
-- **/
--static void blk_release_queue(struct kobject *kobj)
--{
--	struct request_queue *q =
--		container_of(kobj, struct request_queue, kobj);
--	struct request_list *rl = &q->rq;
--
--	blk_sync_queue(q);
--
--	if (rl->rq_pool)
--		mempool_destroy(rl->rq_pool);
--
--	if (q->queue_tags)
--		__blk_queue_free_tags(q);
--
--	blk_trace_shutdown(q);
--
--	bdi_destroy(&q->backing_dev_info);
--	kmem_cache_free(requestq_cachep, q);
--}
--
--void blk_put_queue(struct request_queue *q)
--{
--	kobject_put(&q->kobj);
--}
--EXPORT_SYMBOL(blk_put_queue);
--
--void blk_cleanup_queue(struct request_queue * q)
--{
--	mutex_lock(&q->sysfs_lock);
--	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
--	mutex_unlock(&q->sysfs_lock);
--
--	if (q->elevator)
--		elevator_exit(q->elevator);
--
--	blk_put_queue(q);
--}
--
--EXPORT_SYMBOL(blk_cleanup_queue);
--
--static int blk_init_free_list(struct request_queue *q)
--{
--	struct request_list *rl = &q->rq;
--
--	rl->count[READ] = rl->count[WRITE] = 0;
--	rl->starved[READ] = rl->starved[WRITE] = 0;
--	rl->elvpriv = 0;
--	init_waitqueue_head(&rl->wait[READ]);
--	init_waitqueue_head(&rl->wait[WRITE]);
--
--	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
--				mempool_free_slab, request_cachep, q->node);
--
--	if (!rl->rq_pool)
--		return -ENOMEM;
--
--	return 0;
--}
--
--struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
--{
--	return blk_alloc_queue_node(gfp_mask, -1);
--}
--EXPORT_SYMBOL(blk_alloc_queue);
--
--static struct kobj_type queue_ktype;
--
--struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
--{
--	struct request_queue *q;
--	int err;
--
--	q = kmem_cache_alloc_node(requestq_cachep,
--				gfp_mask | __GFP_ZERO, node_id);
--	if (!q)
--		return NULL;
--
--	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
--	q->backing_dev_info.unplug_io_data = q;
--	err = bdi_init(&q->backing_dev_info);
--	if (err) {
--		kmem_cache_free(requestq_cachep, q);
--		return NULL;
--	}
--
--	init_timer(&q->unplug_timer);
--
--	kobject_set_name(&q->kobj, "%s", "queue");
--	q->kobj.ktype = &queue_ktype;
--	kobject_init(&q->kobj);
--
--	mutex_init(&q->sysfs_lock);
--
--	return q;
--}
--EXPORT_SYMBOL(blk_alloc_queue_node);
--
--/**
-- * blk_init_queue  - prepare a request queue for use with a block device
-- * @rfn:  The function to be called to process requests that have been
-- *        placed on the queue.
-- * @lock: Request queue spin lock
-- *
-- * Description:
-- *    If a block device wishes to use the standard request handling procedures,
-- *    which sorts requests and coalesces adjacent requests, then it must
-- *    call blk_init_queue().  The function @rfn will be called when there
-- *    are requests on the queue that need to be processed.  If the device
-- *    supports plugging, then @rfn may not be called immediately when requests
-- *    are available on the queue, but may be called at some time later instead.
-- *    Plugged queues are generally unplugged when a buffer belonging to one
-- *    of the requests on the queue is needed, or due to memory pressure.
-- *
-- *    @rfn is not required, or even expected, to remove all requests off the
-- *    queue, but only as many as it can handle at a time.  If it does leave
-- *    requests on the queue, it is responsible for arranging that the requests
-- *    get dealt with eventually.
-- *
-- *    The queue spin lock must be held while manipulating the requests on the
-- *    request queue; this lock will be taken also from interrupt context, so irq
-- *    disabling is needed for it.
-- *
-- *    Function returns a pointer to the initialized request queue, or NULL if
-- *    it didn't succeed.
-- *
-- * Note:
-- *    blk_init_queue() must be paired with a blk_cleanup_queue() call
-- *    when the block device is deactivated (such as at module unload).
-- **/
--
--struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
--{
--	return blk_init_queue_node(rfn, lock, -1);
--}
--EXPORT_SYMBOL(blk_init_queue);
--
--struct request_queue *
--blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
++static struct crypto_template crypto_rfc4106_tmpl = {
++	.name = "rfc4106",
++	.alloc = crypto_rfc4106_alloc,
++	.free = crypto_rfc4106_free,
++	.module = THIS_MODULE,
++};
++
++static int __init crypto_gcm_module_init(void)
++{
++	int err;
++
++	err = crypto_register_template(&crypto_gcm_base_tmpl);
++	if (err)
++		goto out;
++
++	err = crypto_register_template(&crypto_gcm_tmpl);
++	if (err)
++		goto out_undo_base;
++
++	err = crypto_register_template(&crypto_rfc4106_tmpl);
++	if (err)
++		goto out_undo_gcm;
++
++out:
++	return err;
++
++out_undo_gcm:
++	crypto_unregister_template(&crypto_gcm_tmpl);
++out_undo_base:
++	crypto_unregister_template(&crypto_gcm_base_tmpl);
++	goto out;
++}
++
++static void __exit crypto_gcm_module_exit(void)
++{
++	crypto_unregister_template(&crypto_rfc4106_tmpl);
++	crypto_unregister_template(&crypto_gcm_tmpl);
++	crypto_unregister_template(&crypto_gcm_base_tmpl);
++}
++
++module_init(crypto_gcm_module_init);
++module_exit(crypto_gcm_module_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Galois/Counter Mode");
++MODULE_AUTHOR("Mikko Herranen <mh1 at iki.fi>");
++MODULE_ALIAS("gcm_base");
++MODULE_ALIAS("rfc4106");
+diff --git a/crypto/hmac.c b/crypto/hmac.c
+index 0f05be7..a1d016a 100644
+--- a/crypto/hmac.c
++++ b/crypto/hmac.c
+@@ -17,6 +17,7 @@
+  */
+ 
+ #include <crypto/algapi.h>
++#include <crypto/scatterwalk.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+@@ -160,7 +161,7 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
+ 
+ 	sg_init_table(sg1, 2);
+ 	sg_set_buf(sg1, ipad, bs);
+-	sg_set_page(&sg1[1], (void *) sg, 0, 0);
++	scatterwalk_sg_chain(sg1, 2, sg);
+ 
+ 	sg_init_table(sg2, 1);
+ 	sg_set_buf(sg2, opad, bs + ds);
+diff --git a/crypto/internal.h b/crypto/internal.h
+index abb01f7..32f4c21 100644
+--- a/crypto/internal.h
++++ b/crypto/internal.h
+@@ -25,7 +25,6 @@
+ #include <linux/notifier.h>
+ #include <linux/rwsem.h>
+ #include <linux/slab.h>
+-#include <asm/kmap_types.h>
+ 
+ /* Crypto notification events. */
+ enum {
+@@ -50,34 +49,6 @@ extern struct list_head crypto_alg_list;
+ extern struct rw_semaphore crypto_alg_sem;
+ extern struct blocking_notifier_head crypto_chain;
+ 
+-static inline enum km_type crypto_kmap_type(int out)
 -{
--	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
--
--	if (!q)
--		return NULL;
--
--	q->node = node_id;
--	if (blk_init_free_list(q)) {
--		kmem_cache_free(requestq_cachep, q);
--		return NULL;
--	}
--
--	/*
--	 * if caller didn't supply a lock, they get per-queue locking with
--	 * our embedded lock
--	 */
--	if (!lock) {
--		spin_lock_init(&q->__queue_lock);
--		lock = &q->__queue_lock;
--	}
--
--	q->request_fn		= rfn;
--	q->prep_rq_fn		= NULL;
--	q->unplug_fn		= generic_unplug_device;
--	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
--	q->queue_lock		= lock;
--
--	blk_queue_segment_boundary(q, 0xffffffff);
--
--	blk_queue_make_request(q, __make_request);
--	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
--
--	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
--	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
--
--	q->sg_reserved_size = INT_MAX;
+-	enum km_type type;
 -
--	/*
--	 * all done
--	 */
--	if (!elevator_init(q, NULL)) {
--		blk_queue_congestion_threshold(q);
--		return q;
--	}
+-	if (in_softirq())
+-		type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
+-	else
+-		type = out * (KM_USER1 - KM_USER0) + KM_USER0;
 -
--	blk_put_queue(q);
--	return NULL;
+-	return type;
 -}
--EXPORT_SYMBOL(blk_init_queue_node);
 -
--int blk_get_queue(struct request_queue *q)
+-static inline void *crypto_kmap(struct page *page, int out)
 -{
--	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
--		kobject_get(&q->kobj);
--		return 0;
--	}
--
--	return 1;
+-	return kmap_atomic(page, crypto_kmap_type(out));
 -}
 -
--EXPORT_SYMBOL(blk_get_queue);
--
--static inline void blk_free_request(struct request_queue *q, struct request *rq)
+-static inline void crypto_kunmap(void *vaddr, int out)
 -{
--	if (rq->cmd_flags & REQ_ELVPRIV)
--		elv_put_request(q, rq);
--	mempool_free(rq, q->rq.rq_pool);
+-	kunmap_atomic(vaddr, crypto_kmap_type(out));
 -}
 -
--static struct request *
--blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
+-static inline void crypto_yield(u32 flags)
 -{
--	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
--
--	if (!rq)
--		return NULL;
--
--	/*
--	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
--	 * see bio.h and blkdev.h
--	 */
--	rq->cmd_flags = rw | REQ_ALLOCED;
--
--	if (priv) {
--		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
--			mempool_free(rq, q->rq.rq_pool);
--			return NULL;
--		}
--		rq->cmd_flags |= REQ_ELVPRIV;
--	}
--
--	return rq;
+-	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+-		cond_resched();
 -}
 -
--/*
-- * ioc_batching returns true if the ioc is a valid batching request and
-- * should be given priority access to a request.
-- */
--static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
+ #ifdef CONFIG_PROC_FS
+ void __init crypto_init_proc(void);
+ void __exit crypto_exit_proc(void);
+@@ -122,6 +93,8 @@ void crypto_exit_digest_ops(struct crypto_tfm *tfm);
+ void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
+ void crypto_exit_compress_ops(struct crypto_tfm *tfm);
+ 
++void crypto_larval_kill(struct crypto_alg *alg);
++struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
+ void crypto_larval_error(const char *name, u32 type, u32 mask);
+ 
+ void crypto_shoot_alg(struct crypto_alg *alg);
+diff --git a/crypto/lzo.c b/crypto/lzo.c
+new file mode 100644
+index 0000000..48c3288
+--- /dev/null
++++ b/crypto/lzo.c
+@@ -0,0 +1,106 @@
++/*
++ * Cryptographic API.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc., 51
++ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/crypto.h>
++#include <linux/vmalloc.h>
++#include <linux/lzo.h>
++
++struct lzo_ctx {
++	void *lzo_comp_mem;
++};
++
++static int lzo_init(struct crypto_tfm *tfm)
++{
++	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
++	if (!ctx->lzo_comp_mem)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static void lzo_exit(struct crypto_tfm *tfm)
++{
++	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	vfree(ctx->lzo_comp_mem);
++}
++
++static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
++			    unsigned int slen, u8 *dst, unsigned int *dlen)
++{
++	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
++	size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
++	int err;
++
++	err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem);
++
++	if (err != LZO_E_OK)
++		return -EINVAL;
++
++	*dlen = tmp_len;
++	return 0;
++}
++
++static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
++			      unsigned int slen, u8 *dst, unsigned int *dlen)
++{
++	int err;
++	size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
++
++	err = lzo1x_decompress_safe(src, slen, dst, &tmp_len);
++
++	if (err != LZO_E_OK)
++		return -EINVAL;
++
++	*dlen = tmp_len;
++	return 0;
++
++}
++
++static struct crypto_alg alg = {
++	.cra_name		= "lzo",
++	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
++	.cra_ctxsize		= sizeof(struct lzo_ctx),
++	.cra_module		= THIS_MODULE,
++	.cra_list		= LIST_HEAD_INIT(alg.cra_list),
++	.cra_init		= lzo_init,
++	.cra_exit		= lzo_exit,
++	.cra_u			= { .compress = {
++	.coa_compress 		= lzo_compress,
++	.coa_decompress  	= lzo_decompress } }
++};
++
++static int __init init(void)
++{
++	return crypto_register_alg(&alg);
++}
++
++static void __exit fini(void)
++{
++	crypto_unregister_alg(&alg);
++}
++
++module_init(init);
++module_exit(fini);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("LZO Compression Algorithm");
+diff --git a/crypto/pcbc.c b/crypto/pcbc.c
+index c3ed8a1..fe70477 100644
+--- a/crypto/pcbc.c
++++ b/crypto/pcbc.c
+@@ -24,7 +24,6 @@
+ 
+ struct crypto_pcbc_ctx {
+ 	struct crypto_cipher *child;
+-	void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
+ };
+ 
+ static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
+@@ -45,9 +44,7 @@ static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
+ 
+ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
+ 				       struct blkcipher_walk *walk,
+-				       struct crypto_cipher *tfm,
+-				       void (*xor)(u8 *, const u8 *,
+-						   unsigned int))
++				       struct crypto_cipher *tfm)
+ {
+ 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ 		crypto_cipher_alg(tfm)->cia_encrypt;
+@@ -58,10 +55,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
+ 	u8 *iv = walk->iv;
+ 
+ 	do {
+-		xor(iv, src, bsize);
++		crypto_xor(iv, src, bsize);
+ 		fn(crypto_cipher_tfm(tfm), dst, iv);
+ 		memcpy(iv, dst, bsize);
+-		xor(iv, src, bsize);
++		crypto_xor(iv, src, bsize);
+ 
+ 		src += bsize;
+ 		dst += bsize;
+@@ -72,9 +69,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
+ 
+ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
+ 				       struct blkcipher_walk *walk,
+-				       struct crypto_cipher *tfm,
+-				       void (*xor)(u8 *, const u8 *,
+-						   unsigned int))
++				       struct crypto_cipher *tfm)
+ {
+ 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ 		crypto_cipher_alg(tfm)->cia_encrypt;
+@@ -86,10 +81,10 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
+ 
+ 	do {
+ 		memcpy(tmpbuf, src, bsize);
+-		xor(iv, tmpbuf, bsize);
++		crypto_xor(iv, src, bsize);
+ 		fn(crypto_cipher_tfm(tfm), src, iv);
+-		memcpy(iv, src, bsize);
+-		xor(iv, tmpbuf, bsize);
++		memcpy(iv, tmpbuf, bsize);
++		crypto_xor(iv, src, bsize);
+ 
+ 		src += bsize;
+ 	} while ((nbytes -= bsize) >= bsize);
+@@ -107,7 +102,6 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
+ 	struct crypto_blkcipher *tfm = desc->tfm;
+ 	struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ 	struct crypto_cipher *child = ctx->child;
+-	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
+ 	int err;
+ 
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+@@ -115,11 +109,11 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
+ 
+ 	while ((nbytes = walk.nbytes)) {
+ 		if (walk.src.virt.addr == walk.dst.virt.addr)
+-			nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child,
+-							     xor);
++			nbytes = crypto_pcbc_encrypt_inplace(desc, &walk,
++							     child);
+ 		else
+-			nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child,
+-							     xor);
++			nbytes = crypto_pcbc_encrypt_segment(desc, &walk,
++							     child);
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	}
+ 
+@@ -128,9 +122,7 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
+ 
+ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
+ 				       struct blkcipher_walk *walk,
+-				       struct crypto_cipher *tfm,
+-				       void (*xor)(u8 *, const u8 *,
+-						   unsigned int))
++				       struct crypto_cipher *tfm)
+ {
+ 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ 		crypto_cipher_alg(tfm)->cia_decrypt;
+@@ -142,9 +134,9 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
+ 
+ 	do {
+ 		fn(crypto_cipher_tfm(tfm), dst, src);
+-		xor(dst, iv, bsize);
++		crypto_xor(dst, iv, bsize);
+ 		memcpy(iv, src, bsize);
+-		xor(iv, dst, bsize);
++		crypto_xor(iv, dst, bsize);
+ 
+ 		src += bsize;
+ 		dst += bsize;
+@@ -157,9 +149,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
+ 
+ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
+ 				       struct blkcipher_walk *walk,
+-				       struct crypto_cipher *tfm,
+-				       void (*xor)(u8 *, const u8 *,
+-						   unsigned int))
++				       struct crypto_cipher *tfm)
+ {
+ 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ 		crypto_cipher_alg(tfm)->cia_decrypt;
+@@ -172,9 +162,9 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
+ 	do {
+ 		memcpy(tmpbuf, src, bsize);
+ 		fn(crypto_cipher_tfm(tfm), src, src);
+-		xor(src, iv, bsize);
++		crypto_xor(src, iv, bsize);
+ 		memcpy(iv, tmpbuf, bsize);
+-		xor(iv, src, bsize);
++		crypto_xor(iv, src, bsize);
+ 
+ 		src += bsize;
+ 	} while ((nbytes -= bsize) >= bsize);
+@@ -192,7 +182,6 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
+ 	struct crypto_blkcipher *tfm = desc->tfm;
+ 	struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ 	struct crypto_cipher *child = ctx->child;
+-	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
+ 	int err;
+ 
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+@@ -200,48 +189,17 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
+ 
+ 	while ((nbytes = walk.nbytes)) {
+ 		if (walk.src.virt.addr == walk.dst.virt.addr)
+-			nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child,
+-							     xor);
++			nbytes = crypto_pcbc_decrypt_inplace(desc, &walk,
++							     child);
+ 		else
+-			nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child,
+-							     xor);
++			nbytes = crypto_pcbc_decrypt_segment(desc, &walk,
++							     child);
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	}
+ 
+ 	return err;
+ }
+ 
+-static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
 -{
--	if (!ioc)
--		return 0;
--
--	/*
--	 * Make sure the process is able to allocate at least 1 request
--	 * even if the batch times out, otherwise we could theoretically
--	 * lose wakeups.
--	 */
--	return ioc->nr_batch_requests == q->nr_batching ||
--		(ioc->nr_batch_requests > 0
--		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
+-	do {
+-		*a++ ^= *b++;
+-	} while (--bs);
 -}
 -
--/*
-- * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
-- * will cause the process to be a "batcher" on all queues in the system. This
-- * is the behaviour we want though - once it gets a wakeup it should be given
-- * a nice run.
-- */
--static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
+-static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
 -{
--	if (!ioc || ioc_batching(q, ioc))
--		return;
+-	u32 *a = (u32 *)dst;
+-	u32 *b = (u32 *)src;
 -
--	ioc->nr_batch_requests = q->nr_batching;
--	ioc->last_waited = jiffies;
+-	do {
+-		*a++ ^= *b++;
+-	} while ((bs -= 4));
 -}
 -
--static void __freed_request(struct request_queue *q, int rw)
+-static void xor_64(u8 *a, const u8 *b, unsigned int bs)
 -{
--	struct request_list *rl = &q->rq;
--
--	if (rl->count[rw] < queue_congestion_off_threshold(q))
--		blk_clear_queue_congested(q, rw);
--
--	if (rl->count[rw] + 1 <= q->nr_requests) {
--		if (waitqueue_active(&rl->wait[rw]))
--			wake_up(&rl->wait[rw]);
--
--		blk_clear_queue_full(q, rw);
--	}
+-	((u32 *)a)[0] ^= ((u32 *)b)[0];
+-	((u32 *)a)[1] ^= ((u32 *)b)[1];
 -}
 -
--/*
-- * A request has just been released.  Account for it, update the full and
-- * congestion status, wake up any waiters.   Called under q->queue_lock.
-- */
--static void freed_request(struct request_queue *q, int rw, int priv)
+-static void xor_128(u8 *a, const u8 *b, unsigned int bs)
 -{
--	struct request_list *rl = &q->rq;
--
--	rl->count[rw]--;
--	if (priv)
--		rl->elvpriv--;
--
--	__freed_request(q, rw);
--
--	if (unlikely(rl->starved[rw ^ 1]))
--		__freed_request(q, rw ^ 1);
+-	((u32 *)a)[0] ^= ((u32 *)b)[0];
+-	((u32 *)a)[1] ^= ((u32 *)b)[1];
+-	((u32 *)a)[2] ^= ((u32 *)b)[2];
+-	((u32 *)a)[3] ^= ((u32 *)b)[3];
 -}
 -
--#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
--/*
-- * Get a free request, queue_lock must be held.
-- * Returns NULL on failure, with queue_lock held.
-- * Returns !NULL on success, with queue_lock *not held*.
-- */
--static struct request *get_request(struct request_queue *q, int rw_flags,
--				   struct bio *bio, gfp_t gfp_mask)
--{
--	struct request *rq = NULL;
--	struct request_list *rl = &q->rq;
--	struct io_context *ioc = NULL;
--	const int rw = rw_flags & 0x01;
--	int may_queue, priv;
--
--	may_queue = elv_may_queue(q, rw_flags);
--	if (may_queue == ELV_MQUEUE_NO)
--		goto rq_starved;
--
--	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
--		if (rl->count[rw]+1 >= q->nr_requests) {
--			ioc = current_io_context(GFP_ATOMIC, q->node);
--			/*
--			 * The queue will fill after this allocation, so set
--			 * it as full, and mark this process as "batching".
--			 * This process will be allowed to complete a batch of
--			 * requests, others will be blocked.
--			 */
--			if (!blk_queue_full(q, rw)) {
--				ioc_set_batching(q, ioc);
--				blk_set_queue_full(q, rw);
--			} else {
--				if (may_queue != ELV_MQUEUE_MUST
--						&& !ioc_batching(q, ioc)) {
--					/*
--					 * The queue is full and the allocating
--					 * process is not a "batcher", and not
--					 * exempted by the IO scheduler
--					 */
--					goto out;
--				}
--			}
--		}
--		blk_set_queue_congested(q, rw);
--	}
--
--	/*
--	 * Only allow batching queuers to allocate up to 50% over the defined
--	 * limit of requests, otherwise we could have thousands of requests
--	 * allocated with any setting of ->nr_requests
--	 */
--	if (rl->count[rw] >= (3 * q->nr_requests / 2))
--		goto out;
--
--	rl->count[rw]++;
--	rl->starved[rw] = 0;
--
--	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
--	if (priv)
--		rl->elvpriv++;
--
--	spin_unlock_irq(q->queue_lock);
--
--	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
--	if (unlikely(!rq)) {
--		/*
--		 * Allocation failed presumably due to memory. Undo anything
--		 * we might have messed up.
--		 *
--		 * Allocating task should really be put onto the front of the
--		 * wait queue, but this is pretty rare.
--		 */
--		spin_lock_irq(q->queue_lock);
--		freed_request(q, rw, priv);
+ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
+ {
+ 	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+@@ -249,22 +207,6 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
+ 	struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	struct crypto_cipher *cipher;
+ 
+-	switch (crypto_tfm_alg_blocksize(tfm)) {
+-	case 8:
+-		ctx->xor = xor_64;
+-		break;
 -
--		/*
--		 * in the very unlikely event that allocation failed and no
--		 * requests for this direction was pending, mark us starved
--		 * so that freeing of a request in the other direction will
--		 * notice us. another possible fix would be to split the
--		 * rq mempool into READ and WRITE
--		 */
--rq_starved:
--		if (unlikely(rl->count[rw] == 0))
--			rl->starved[rw] = 1;
+-	case 16:
+-		ctx->xor = xor_128;
+-		break;
 -
--		goto out;
+-	default:
+-		if (crypto_tfm_alg_blocksize(tfm) % 4)
+-			ctx->xor = xor_byte;
+-		else
+-			ctx->xor = xor_quad;
 -	}
 -
--	/*
--	 * ioc may be NULL here, and ioc_batching will be false. That's
--	 * OK, if the queue is under the request limit then requests need
--	 * not count toward the nr_batch_requests limit. There will always
--	 * be some limit enforced by BLK_BATCH_TIME.
--	 */
--	if (ioc_batching(q, ioc))
--		ioc->nr_batch_requests--;
--	
--	rq_init(q, rq);
--
--	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
--out:
--	return rq;
--}
+ 	cipher = crypto_spawn_cipher(spawn);
+ 	if (IS_ERR(cipher))
+ 		return PTR_ERR(cipher);
+@@ -304,8 +246,9 @@ static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
+ 	inst->alg.cra_alignmask = alg->cra_alignmask;
+ 	inst->alg.cra_type = &crypto_blkcipher_type;
+ 
+-	if (!(alg->cra_blocksize % 4))
+-		inst->alg.cra_alignmask |= 3;
++	/* We access the data as u32s when xoring. */
++	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
++
+ 	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
+ 	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
+ 	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
+new file mode 100644
+index 0000000..1fa4e4d
+--- /dev/null
++++ b/crypto/salsa20_generic.c
+@@ -0,0 +1,255 @@
++/*
++ * Salsa20: Salsa20 stream cipher algorithm
++ *
++ * Copyright (c) 2007 Tan Swee Heng <thesweeheng at gmail.com>
++ *
++ * Derived from:
++ * - salsa20.c: Public domain C code by Daniel J. Bernstein <djb at cr.yp.to>
++ *
++ * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream
++ * Cipher Project. It is designed by Daniel J. Bernstein <djb at cr.yp.to>.
++ * More information about eSTREAM and Salsa20 can be found here:
++ *   http://www.ecrypt.eu.org/stream/
++ *   http://cr.yp.to/snuffle.html
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/crypto.h>
++#include <linux/types.h>
++#include <crypto/algapi.h>
++#include <asm/byteorder.h>
++
++#define SALSA20_IV_SIZE        8U
++#define SALSA20_MIN_KEY_SIZE  16U
++#define SALSA20_MAX_KEY_SIZE  32U
++
++/*
++ * Start of code taken from D. J. Bernstein's reference implementation.
++ * With some modifications and optimizations made to suit our needs.
++ */
++
++/*
++salsa20-ref.c version 20051118
++D. J. Bernstein
++Public domain.
++*/
++
++#define ROTATE(v,n) (((v) << (n)) | ((v) >> (32 - (n))))
++#define XOR(v,w) ((v) ^ (w))
++#define PLUS(v,w) (((v) + (w)))
++#define PLUSONE(v) (PLUS((v),1))
++#define U32TO8_LITTLE(p, v) \
++	{ (p)[0] = (v >>  0) & 0xff; (p)[1] = (v >>  8) & 0xff; \
++	  (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; }
++#define U8TO32_LITTLE(p)   \
++	(((u32)((p)[0])      ) | ((u32)((p)[1]) <<  8) | \
++	 ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24)   )
++
++struct salsa20_ctx
++{
++	u32 input[16];
++};
++
++static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
++{
++	u32 x[16];
++	int i;
++
++	memcpy(x, input, sizeof(x));
++	for (i = 20; i > 0; i -= 2) {
++		x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 0],x[12]), 7));
++		x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[ 4],x[ 0]), 9));
++		x[12] = XOR(x[12],ROTATE(PLUS(x[ 8],x[ 4]),13));
++		x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[12],x[ 8]),18));
++		x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 5],x[ 1]), 7));
++		x[13] = XOR(x[13],ROTATE(PLUS(x[ 9],x[ 5]), 9));
++		x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[13],x[ 9]),13));
++		x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 1],x[13]),18));
++		x[14] = XOR(x[14],ROTATE(PLUS(x[10],x[ 6]), 7));
++		x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[14],x[10]), 9));
++		x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 2],x[14]),13));
++		x[10] = XOR(x[10],ROTATE(PLUS(x[ 6],x[ 2]),18));
++		x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[15],x[11]), 7));
++		x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 3],x[15]), 9));
++		x[11] = XOR(x[11],ROTATE(PLUS(x[ 7],x[ 3]),13));
++		x[15] = XOR(x[15],ROTATE(PLUS(x[11],x[ 7]),18));
++		x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[ 0],x[ 3]), 7));
++		x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[ 1],x[ 0]), 9));
++		x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[ 2],x[ 1]),13));
++		x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[ 3],x[ 2]),18));
++		x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 5],x[ 4]), 7));
++		x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 6],x[ 5]), 9));
++		x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 7],x[ 6]),13));
++		x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 4],x[ 7]),18));
++		x[11] = XOR(x[11],ROTATE(PLUS(x[10],x[ 9]), 7));
++		x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[11],x[10]), 9));
++		x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 8],x[11]),13));
++		x[10] = XOR(x[10],ROTATE(PLUS(x[ 9],x[ 8]),18));
++		x[12] = XOR(x[12],ROTATE(PLUS(x[15],x[14]), 7));
++		x[13] = XOR(x[13],ROTATE(PLUS(x[12],x[15]), 9));
++		x[14] = XOR(x[14],ROTATE(PLUS(x[13],x[12]),13));
++		x[15] = XOR(x[15],ROTATE(PLUS(x[14],x[13]),18));
++	}
++	for (i = 0; i < 16; ++i)
++		x[i] = PLUS(x[i],input[i]);
++	for (i = 0; i < 16; ++i)
++		U32TO8_LITTLE(output + 4 * i,x[i]);
++}
++
++static const char sigma[16] = "expand 32-byte k";
++static const char tau[16] = "expand 16-byte k";
++
++static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
++{
++	const char *constants;
++
++	ctx->input[1] = U8TO32_LITTLE(k + 0);
++	ctx->input[2] = U8TO32_LITTLE(k + 4);
++	ctx->input[3] = U8TO32_LITTLE(k + 8);
++	ctx->input[4] = U8TO32_LITTLE(k + 12);
++	if (kbytes == 32) { /* recommended */
++		k += 16;
++		constants = sigma;
++	} else { /* kbytes == 16 */
++		constants = tau;
++	}
++	ctx->input[11] = U8TO32_LITTLE(k + 0);
++	ctx->input[12] = U8TO32_LITTLE(k + 4);
++	ctx->input[13] = U8TO32_LITTLE(k + 8);
++	ctx->input[14] = U8TO32_LITTLE(k + 12);
++	ctx->input[0] = U8TO32_LITTLE(constants + 0);
++	ctx->input[5] = U8TO32_LITTLE(constants + 4);
++	ctx->input[10] = U8TO32_LITTLE(constants + 8);
++	ctx->input[15] = U8TO32_LITTLE(constants + 12);
++}
++
++static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
++{
++	ctx->input[6] = U8TO32_LITTLE(iv + 0);
++	ctx->input[7] = U8TO32_LITTLE(iv + 4);
++	ctx->input[8] = 0;
++	ctx->input[9] = 0;
++}
++
++static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
++				  const u8 *src, unsigned int bytes)
++{
++	u8 buf[64];
++
++	if (dst != src)
++		memcpy(dst, src, bytes);
++
++	while (bytes) {
++		salsa20_wordtobyte(buf, ctx->input);
++
++		ctx->input[8] = PLUSONE(ctx->input[8]);
++		if (!ctx->input[8])
++			ctx->input[9] = PLUSONE(ctx->input[9]);
++
++		if (bytes <= 64) {
++			crypto_xor(dst, buf, bytes);
++			return;
++		}
++
++		crypto_xor(dst, buf, 64);
++		bytes -= 64;
++		dst += 64;
++	}
++}
++
++/*
++ * End of code taken from D. J. Bernstein's reference implementation.
++ */
++
++static int setkey(struct crypto_tfm *tfm, const u8 *key,
++		  unsigned int keysize)
++{
++	struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
++	salsa20_keysetup(ctx, key, keysize);
++	return 0;
++}
++
++static int encrypt(struct blkcipher_desc *desc,
++		   struct scatterlist *dst, struct scatterlist *src,
++		   unsigned int nbytes)
++{
++	struct blkcipher_walk walk;
++	struct crypto_blkcipher *tfm = desc->tfm;
++	struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
++	int err;
++
++	blkcipher_walk_init(&walk, dst, src, nbytes);
++	err = blkcipher_walk_virt_block(desc, &walk, 64);
++
++	salsa20_ivsetup(ctx, walk.iv);
++
++	if (likely(walk.nbytes == nbytes))
++	{
++		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
++				      walk.src.virt.addr, nbytes);
++		return blkcipher_walk_done(desc, &walk, 0);
++	}
++
++	while (walk.nbytes >= 64) {
++		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
++				      walk.src.virt.addr,
++				      walk.nbytes - (walk.nbytes % 64));
++		err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
++	}
++
++	if (walk.nbytes) {
++		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
++				      walk.src.virt.addr, walk.nbytes);
++		err = blkcipher_walk_done(desc, &walk, 0);
++	}
++
++	return err;
++}
++
++static struct crypto_alg alg = {
++	.cra_name           =   "salsa20",
++	.cra_driver_name    =   "salsa20-generic",
++	.cra_priority       =   100,
++	.cra_flags          =   CRYPTO_ALG_TYPE_BLKCIPHER,
++	.cra_type           =   &crypto_blkcipher_type,
++	.cra_blocksize      =   1,
++	.cra_ctxsize        =   sizeof(struct salsa20_ctx),
++	.cra_alignmask      =	3,
++	.cra_module         =   THIS_MODULE,
++	.cra_list           =   LIST_HEAD_INIT(alg.cra_list),
++	.cra_u              =   {
++		.blkcipher = {
++			.setkey         =   setkey,
++			.encrypt        =   encrypt,
++			.decrypt        =   encrypt,
++			.min_keysize    =   SALSA20_MIN_KEY_SIZE,
++			.max_keysize    =   SALSA20_MAX_KEY_SIZE,
++			.ivsize         =   SALSA20_IV_SIZE,
++		}
++	}
++};
++
++static int __init init(void)
++{
++	return crypto_register_alg(&alg);
++}
++
++static void __exit fini(void)
++{
++	crypto_unregister_alg(&alg);
++}
++
++module_init(init);
++module_exit(fini);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
++MODULE_ALIAS("salsa20");
+diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
+index b9bbda0..9aeeb52 100644
+--- a/crypto/scatterwalk.c
++++ b/crypto/scatterwalk.c
+@@ -13,6 +13,8 @@
+  * any later version.
+  *
+  */
++
++#include <crypto/scatterwalk.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
+@@ -20,9 +22,6 @@
+ #include <linux/highmem.h>
+ #include <linux/scatterlist.h>
+ 
+-#include "internal.h"
+-#include "scatterwalk.h"
 -
+ static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
+ {
+ 	void *src = out ? buf : sgdata;
+@@ -106,6 +105,9 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+ 	struct scatter_walk walk;
+ 	unsigned int offset = 0;
+ 
++	if (!nbytes)
++		return;
++
+ 	for (;;) {
+ 		scatterwalk_start(&walk, sg);
+ 
+@@ -113,7 +115,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+ 			break;
+ 
+ 		offset += sg->length;
+-		sg = sg_next(sg);
++		sg = scatterwalk_sg_next(sg);
+ 	}
+ 
+ 	scatterwalk_advance(&walk, start - offset);
+diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h
+deleted file mode 100644
+index 87ed681..0000000
+--- a/crypto/scatterwalk.h
++++ /dev/null
+@@ -1,80 +0,0 @@
 -/*
-- * No available requests for this queue, unplug the device and wait for some
-- * requests to become available.
+- * Cryptographic API.
+- *
+- * Copyright (c) 2002 James Morris <jmorris at intercode.com.au>
+- * Copyright (c) 2002 Adam J. Richter <adam at yggdrasil.com>
+- * Copyright (c) 2004 Jean-Luc Cooke <jlcooke at certainkey.com>
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the Free
+- * Software Foundation; either version 2 of the License, or (at your option)
+- * any later version.
 - *
-- * Called with q->queue_lock held, and returns with it unlocked.
 - */
--static struct request *get_request_wait(struct request_queue *q, int rw_flags,
--					struct bio *bio)
--{
--	const int rw = rw_flags & 0x01;
--	struct request *rq;
--
--	rq = get_request(q, rw_flags, bio, GFP_NOIO);
--	while (!rq) {
--		DEFINE_WAIT(wait);
--		struct request_list *rl = &q->rq;
--
--		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
--				TASK_UNINTERRUPTIBLE);
--
--		rq = get_request(q, rw_flags, bio, GFP_NOIO);
--
--		if (!rq) {
--			struct io_context *ioc;
--
--			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
--
--			__generic_unplug_device(q);
--			spin_unlock_irq(q->queue_lock);
--			io_schedule();
 -
--			/*
--			 * After sleeping, we become a "batching" process and
--			 * will be able to allocate at least one request, and
--			 * up to a big batch of them for a small period time.
--			 * See ioc_batching, ioc_set_batching
--			 */
--			ioc = current_io_context(GFP_NOIO, q->node);
--			ioc_set_batching(q, ioc);
+-#ifndef _CRYPTO_SCATTERWALK_H
+-#define _CRYPTO_SCATTERWALK_H
 -
--			spin_lock_irq(q->queue_lock);
--		}
--		finish_wait(&rl->wait[rw], &wait);
--	}
+-#include <linux/mm.h>
+-#include <linux/scatterlist.h>
 -
--	return rq;
--}
+-#include "internal.h"
 -
--struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+-static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
 -{
--	struct request *rq;
--
--	BUG_ON(rw != READ && rw != WRITE);
--
--	spin_lock_irq(q->queue_lock);
--	if (gfp_mask & __GFP_WAIT) {
--		rq = get_request_wait(q, rw, NULL);
--	} else {
--		rq = get_request(q, rw, NULL, gfp_mask);
--		if (!rq)
--			spin_unlock_irq(q->queue_lock);
--	}
--	/* q->queue_lock is unlocked at this point */
--
--	return rq;
+-	return (++sg)->length ? sg : (void *) sg_page(sg);
 -}
--EXPORT_SYMBOL(blk_get_request);
 -
--/**
-- * blk_start_queueing - initiate dispatch of requests to device
-- * @q:		request queue to kick into gear
-- *
-- * This is basically a helper to remove the need to know whether a queue
-- * is plugged or not if someone just wants to initiate dispatch of requests
-- * for this queue.
-- *
-- * The queue lock must be held with interrupts disabled.
-- */
--void blk_start_queueing(struct request_queue *q)
+-static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
+-						struct scatter_walk *walk_out)
 -{
--	if (!blk_queue_plugged(q))
--		q->request_fn(q);
--	else
--		__generic_unplug_device(q);
+-	return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
+-		 (int)(walk_in->offset - walk_out->offset));
 -}
--EXPORT_SYMBOL(blk_start_queueing);
 -
--/**
-- * blk_requeue_request - put a request back on queue
-- * @q:		request queue where request should be inserted
-- * @rq:		request to be inserted
-- *
-- * Description:
-- *    Drivers often keep queueing requests until the hardware cannot accept
-- *    more, when that condition happens we need to put the request back
-- *    on the queue. Must be called with queue lock held.
-- */
--void blk_requeue_request(struct request_queue *q, struct request *rq)
+-static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
 -{
--	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
--
--	if (blk_rq_tagged(rq))
--		blk_queue_end_tag(q, rq);
--
--	elv_requeue_request(q, rq);
+-	unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
+-	unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
+-	return len_this_page > len ? len : len_this_page;
 -}
 -
--EXPORT_SYMBOL(blk_requeue_request);
--
--/**
-- * blk_insert_request - insert a special request in to a request queue
-- * @q:		request queue where request should be inserted
-- * @rq:		request to be inserted
-- * @at_head:	insert request at head or tail of queue
-- * @data:	private data
-- *
-- * Description:
-- *    Many block devices need to execute commands asynchronously, so they don't
-- *    block the whole kernel from preemption during request execution.  This is
-- *    accomplished normally by inserting aritficial requests tagged as
-- *    REQ_SPECIAL in to the corresponding request queue, and letting them be
-- *    scheduled for actual execution by the request queue.
-- *
-- *    We have the option of inserting the head or the tail of the queue.
-- *    Typically we use the tail for new ioctls and so forth.  We use the head
-- *    of the queue for things like a QUEUE_FULL message from a device, or a
-- *    host that is unable to accept a particular command.
-- */
--void blk_insert_request(struct request_queue *q, struct request *rq,
--			int at_head, void *data)
+-static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
+-					     unsigned int nbytes)
 -{
--	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
--	unsigned long flags;
--
--	/*
--	 * tell I/O scheduler that this isn't a regular read/write (ie it
--	 * must not attempt merges on this) and that it acts as a soft
--	 * barrier
--	 */
--	rq->cmd_type = REQ_TYPE_SPECIAL;
--	rq->cmd_flags |= REQ_SOFTBARRIER;
--
--	rq->special = data;
--
--	spin_lock_irqsave(q->queue_lock, flags);
--
--	/*
--	 * If command is tagged, release the tag
--	 */
--	if (blk_rq_tagged(rq))
--		blk_queue_end_tag(q, rq);
--
--	drive_stat_acct(rq, 1);
--	__elv_add_request(q, rq, where, 0);
--	blk_start_queueing(q);
--	spin_unlock_irqrestore(q->queue_lock, flags);
+-	unsigned int len_this_page = scatterwalk_pagelen(walk);
+-	return nbytes > len_this_page ? len_this_page : nbytes;
 -}
 -
--EXPORT_SYMBOL(blk_insert_request);
--
--static int __blk_rq_unmap_user(struct bio *bio)
+-static inline void scatterwalk_advance(struct scatter_walk *walk,
+-				       unsigned int nbytes)
 -{
--	int ret = 0;
--
--	if (bio) {
--		if (bio_flagged(bio, BIO_USER_MAPPED))
--			bio_unmap_user(bio);
--		else
--			ret = bio_uncopy_user(bio);
--	}
--
--	return ret;
+-	walk->offset += nbytes;
 -}
 -
--int blk_rq_append_bio(struct request_queue *q, struct request *rq,
--		      struct bio *bio)
+-static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
+-					       unsigned int alignmask)
 -{
--	if (!rq->bio)
--		blk_rq_bio_prep(q, rq, bio);
--	else if (!ll_back_merge_fn(q, rq, bio))
--		return -EINVAL;
--	else {
--		rq->biotail->bi_next = bio;
--		rq->biotail = bio;
--
--		rq->data_len += bio->bi_size;
--	}
--	return 0;
+-	return !(walk->offset & alignmask);
 -}
--EXPORT_SYMBOL(blk_rq_append_bio);
 -
--static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
--			     void __user *ubuf, unsigned int len)
+-static inline struct page *scatterwalk_page(struct scatter_walk *walk)
 -{
--	unsigned long uaddr;
--	struct bio *bio, *orig_bio;
--	int reading, ret;
--
--	reading = rq_data_dir(rq) == READ;
--
--	/*
--	 * if alignment requirement is satisfied, map in user pages for
--	 * direct dma. else, set up kernel bounce buffers
--	 */
--	uaddr = (unsigned long) ubuf;
--	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
--		bio = bio_map_user(q, NULL, uaddr, len, reading);
--	else
--		bio = bio_copy_user(q, uaddr, len, reading);
--
--	if (IS_ERR(bio))
--		return PTR_ERR(bio);
--
--	orig_bio = bio;
--	blk_queue_bounce(q, &bio);
--
--	/*
--	 * We link the bounce buffer in and could have to traverse it
--	 * later so we have to get a ref to prevent it from being freed
--	 */
--	bio_get(bio);
--
--	ret = blk_rq_append_bio(q, rq, bio);
--	if (!ret)
--		return bio->bi_size;
--
--	/* if it was boucned we must call the end io function */
--	bio_endio(bio, 0);
--	__blk_rq_unmap_user(orig_bio);
--	bio_put(bio);
--	return ret;
+-	return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 -}
 -
--/**
-- * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
-- * @q:		request queue where request should be inserted
-- * @rq:		request structure to fill
-- * @ubuf:	the user buffer
-- * @len:	length of user data
-- *
-- * Description:
-- *    Data will be mapped directly for zero copy io, if possible. Otherwise
-- *    a kernel bounce buffer is used.
-- *
-- *    A matching blk_rq_unmap_user() must be issued at the end of io, while
-- *    still in process context.
-- *
-- *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
-- *    before being submitted to the device, as pages mapped may be out of
-- *    reach. It's the callers responsibility to make sure this happens. The
-- *    original bio must be passed back in to blk_rq_unmap_user() for proper
-- *    unmapping.
-- */
--int blk_rq_map_user(struct request_queue *q, struct request *rq,
--		    void __user *ubuf, unsigned long len)
+-static inline void scatterwalk_unmap(void *vaddr, int out)
 -{
--	unsigned long bytes_read = 0;
--	struct bio *bio = NULL;
--	int ret;
--
--	if (len > (q->max_hw_sectors << 9))
--		return -EINVAL;
--	if (!len || !ubuf)
--		return -EINVAL;
--
--	while (bytes_read != len) {
--		unsigned long map_len, end, start;
--
--		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
--		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
--								>> PAGE_SHIFT;
--		start = (unsigned long)ubuf >> PAGE_SHIFT;
--
--		/*
--		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
--		 * pages. If this happens we just lower the requested
--		 * mapping len by a page so that we can fit
--		 */
--		if (end - start > BIO_MAX_PAGES)
--			map_len -= PAGE_SIZE;
--
--		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
--		if (ret < 0)
--			goto unmap_rq;
--		if (!bio)
--			bio = rq->bio;
--		bytes_read += ret;
--		ubuf += ret;
--	}
--
--	rq->buffer = rq->data = NULL;
--	return 0;
--unmap_rq:
--	blk_rq_unmap_user(bio);
--	return ret;
+-	crypto_kunmap(vaddr, out);
 -}
 -
--EXPORT_SYMBOL(blk_rq_map_user);
+-void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
+-void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
+-			    size_t nbytes, int out);
+-void *scatterwalk_map(struct scatter_walk *walk, int out);
+-void scatterwalk_done(struct scatter_walk *walk, int out, int more);
 -
--/**
-- * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
-- * @q:		request queue where request should be inserted
-- * @rq:		request to map data to
-- * @iov:	pointer to the iovec
-- * @iov_count:	number of elements in the iovec
-- * @len:	I/O byte count
-- *
-- * Description:
-- *    Data will be mapped directly for zero copy io, if possible. Otherwise
-- *    a kernel bounce buffer is used.
-- *
-- *    A matching blk_rq_unmap_user() must be issued at the end of io, while
-- *    still in process context.
-- *
-- *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
-- *    before being submitted to the device, as pages mapped may be out of
-- *    reach. It's the callers responsibility to make sure this happens. The
-- *    original bio must be passed back in to blk_rq_unmap_user() for proper
-- *    unmapping.
-- */
--int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
--			struct sg_iovec *iov, int iov_count, unsigned int len)
--{
--	struct bio *bio;
+-void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+-			      unsigned int start, unsigned int nbytes, int out);
 -
--	if (!iov || iov_count <= 0)
--		return -EINVAL;
+-#endif  /* _CRYPTO_SCATTERWALK_H */
+diff --git a/crypto/seqiv.c b/crypto/seqiv.c
+new file mode 100644
+index 0000000..b903aab
+--- /dev/null
++++ b/crypto/seqiv.c
+@@ -0,0 +1,345 @@
++/*
++ * seqiv: Sequence Number IV Generator
++ *
++ * This generator generates an IV based on a sequence number by xoring it
++ * with a salt.  This algorithm is mainly useful for CTR and similar modes.
++ *
++ * Copyright (c) 2007 Herbert Xu <herbert at gondor.apana.org.au>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ */
++
++#include <crypto/internal/aead.h>
++#include <crypto/internal/skcipher.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/random.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++
++struct seqiv_ctx {
++	spinlock_t lock;
++	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
++};
++
++static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
++{
++	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
++	struct crypto_ablkcipher *geniv;
++
++	if (err == -EINPROGRESS)
++		return;
++
++	if (err)
++		goto out;
++
++	geniv = skcipher_givcrypt_reqtfm(req);
++	memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
++
++out:
++	kfree(subreq->info);
++}
++
++static void seqiv_complete(struct crypto_async_request *base, int err)
++{
++	struct skcipher_givcrypt_request *req = base->data;
++
++	seqiv_complete2(req, err);
++	skcipher_givcrypt_complete(req, err);
++}
++
++static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
++{
++	struct aead_request *subreq = aead_givcrypt_reqctx(req);
++	struct crypto_aead *geniv;
++
++	if (err == -EINPROGRESS)
++		return;
++
++	if (err)
++		goto out;
++
++	geniv = aead_givcrypt_reqtfm(req);
++	memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
++
++out:
++	kfree(subreq->iv);
++}
++
++static void seqiv_aead_complete(struct crypto_async_request *base, int err)
++{
++	struct aead_givcrypt_request *req = base->data;
++
++	seqiv_aead_complete2(req, err);
++	aead_givcrypt_complete(req, err);
++}
++
++static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
++			unsigned int ivsize)
++{
++	unsigned int len = ivsize;
++
++	if (ivsize > sizeof(u64)) {
++		memset(info, 0, ivsize - sizeof(u64));
++		len = sizeof(u64);
++	}
++	seq = cpu_to_be64(seq);
++	memcpy(info + ivsize - len, &seq, len);
++	crypto_xor(info, ctx->salt, ivsize);
++}
++
++static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
++{
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
++	crypto_completion_t complete;
++	void *data;
++	u8 *info;
++	unsigned int ivsize;
++	int err;
++
++	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
++
++	complete = req->creq.base.complete;
++	data = req->creq.base.data;
++	info = req->creq.info;
++
++	ivsize = crypto_ablkcipher_ivsize(geniv);
++
++	if (unlikely(!IS_ALIGNED((unsigned long)info,
++				 crypto_ablkcipher_alignmask(geniv) + 1))) {
++		info = kmalloc(ivsize, req->creq.base.flags &
++				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
++								  GFP_ATOMIC);
++		if (!info)
++			return -ENOMEM;
++
++		complete = seqiv_complete;
++		data = req;
++	}
++
++	ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
++					data);
++	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
++				     req->creq.nbytes, info);
++
++	seqiv_geniv(ctx, info, req->seq, ivsize);
++	memcpy(req->giv, info, ivsize);
++
++	err = crypto_ablkcipher_encrypt(subreq);
++	if (unlikely(info != req->creq.info))
++		seqiv_complete2(req, err);
++	return err;
++}
++
++static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
++{
++	struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
++	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
++	struct aead_request *areq = &req->areq;
++	struct aead_request *subreq = aead_givcrypt_reqctx(req);
++	crypto_completion_t complete;
++	void *data;
++	u8 *info;
++	unsigned int ivsize;
++	int err;
++
++	aead_request_set_tfm(subreq, aead_geniv_base(geniv));
++
++	complete = areq->base.complete;
++	data = areq->base.data;
++	info = areq->iv;
++
++	ivsize = crypto_aead_ivsize(geniv);
++
++	if (unlikely(!IS_ALIGNED((unsigned long)info,
++				 crypto_aead_alignmask(geniv) + 1))) {
++		info = kmalloc(ivsize, areq->base.flags &
++				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
++								  GFP_ATOMIC);
++		if (!info)
++			return -ENOMEM;
++
++		complete = seqiv_aead_complete;
++		data = req;
++	}
++
++	aead_request_set_callback(subreq, areq->base.flags, complete, data);
++	aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
++			       info);
++	aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
++
++	seqiv_geniv(ctx, info, req->seq, ivsize);
++	memcpy(req->giv, info, ivsize);
++
++	err = crypto_aead_encrypt(subreq);
++	if (unlikely(info != areq->iv))
++		seqiv_aead_complete2(req, err);
++	return err;
++}
++
++static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
++{
++	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
++	struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++
++	spin_lock_bh(&ctx->lock);
++	if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first)
++		goto unlock;
++
++	crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
++	get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv));
++
++unlock:
++	spin_unlock_bh(&ctx->lock);
++
++	return seqiv_givencrypt(req);
++}
++
++static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
++{
++	struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
++	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
++
++	spin_lock_bh(&ctx->lock);
++	if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
++		goto unlock;
++
++	crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
++	get_random_bytes(ctx->salt, crypto_aead_ivsize(geniv));
++
++unlock:
++	spin_unlock_bh(&ctx->lock);
++
++	return seqiv_aead_givencrypt(req);
++}
++
++static int seqiv_init(struct crypto_tfm *tfm)
++{
++	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
++	struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++
++	spin_lock_init(&ctx->lock);
++
++	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
++
++	return skcipher_geniv_init(tfm);
++}
++
++static int seqiv_aead_init(struct crypto_tfm *tfm)
++{
++	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
++	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
++
++	spin_lock_init(&ctx->lock);
++
++	tfm->crt_aead.reqsize = sizeof(struct aead_request);
++
++	return aead_geniv_init(tfm);
++}
++
++static struct crypto_template seqiv_tmpl;
++
++static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
++{
++	struct crypto_instance *inst;
++
++	inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
++
++	if (IS_ERR(inst))
++		goto out;
++
++	inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
++
++	inst->alg.cra_init = seqiv_init;
++	inst->alg.cra_exit = skcipher_geniv_exit;
++
++	inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
++
++out:
++	return inst;
++}
++
++static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
++{
++	struct crypto_instance *inst;
++
++	inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
++
++	if (IS_ERR(inst))
++		goto out;
++
++	inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
++
++	inst->alg.cra_init = seqiv_aead_init;
++	inst->alg.cra_exit = aead_geniv_exit;
++
++	inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
++
++out:
++	return inst;
++}
++
++static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
++{
++	struct crypto_attr_type *algt;
++	struct crypto_instance *inst;
++	int err;
++
++	algt = crypto_get_attr_type(tb);
++	err = PTR_ERR(algt);
++	if (IS_ERR(algt))
++		return ERR_PTR(err);
++
++	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
++		inst = seqiv_ablkcipher_alloc(tb);
++	else
++		inst = seqiv_aead_alloc(tb);
++
++	if (IS_ERR(inst))
++		goto out;
++
++	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
++	inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
++
++out:
++	return inst;
++}
++
++static void seqiv_free(struct crypto_instance *inst)
++{
++	if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
++		skcipher_geniv_free(inst);
++	else
++		aead_geniv_free(inst);
++}
++
++static struct crypto_template seqiv_tmpl = {
++	.name = "seqiv",
++	.alloc = seqiv_alloc,
++	.free = seqiv_free,
++	.module = THIS_MODULE,
++};
++
++static int __init seqiv_module_init(void)
++{
++	return crypto_register_template(&seqiv_tmpl);
++}
++
++static void __exit seqiv_module_exit(void)
++{
++	crypto_unregister_template(&seqiv_tmpl);
++}
++
++module_init(seqiv_module_init);
++module_exit(seqiv_module_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Sequence Number IV Generator");
+diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
+index fd3918b..3cc93fd 100644
+--- a/crypto/sha256_generic.c
++++ b/crypto/sha256_generic.c
+@@ -9,6 +9,7 @@
+  * Copyright (c) Jean-Luc Cooke <jlcooke at certainkey.com>
+  * Copyright (c) Andrew McDonald <andrew at mcdonald.org.uk>
+  * Copyright (c) 2002 James Morris <jmorris at intercode.com.au>
++ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch at intel.com>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License as published by the Free
+@@ -218,6 +219,22 @@ static void sha256_transform(u32 *state, const u8 *input)
+ 	memset(W, 0, 64 * sizeof(u32));
+ }
+ 
++
++static void sha224_init(struct crypto_tfm *tfm)
++{
++	struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
++	sctx->state[0] = SHA224_H0;
++	sctx->state[1] = SHA224_H1;
++	sctx->state[2] = SHA224_H2;
++	sctx->state[3] = SHA224_H3;
++	sctx->state[4] = SHA224_H4;
++	sctx->state[5] = SHA224_H5;
++	sctx->state[6] = SHA224_H6;
++	sctx->state[7] = SHA224_H7;
++	sctx->count[0] = 0;
++	sctx->count[1] = 0;
++}
++
+ static void sha256_init(struct crypto_tfm *tfm)
+ {
+ 	struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
+@@ -294,8 +311,17 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
+ 	memset(sctx, 0, sizeof(*sctx));
+ }
+ 
++static void sha224_final(struct crypto_tfm *tfm, u8 *hash)
++{
++	u8 D[SHA256_DIGEST_SIZE];
++
++	sha256_final(tfm, D);
++
++	memcpy(hash, D, SHA224_DIGEST_SIZE);
++	memset(D, 0, SHA256_DIGEST_SIZE);
++}
+ 
+-static struct crypto_alg alg = {
++static struct crypto_alg sha256 = {
+ 	.cra_name	=	"sha256",
+ 	.cra_driver_name=	"sha256-generic",
+ 	.cra_flags	=	CRYPTO_ALG_TYPE_DIGEST,
+@@ -303,28 +329,58 @@ static struct crypto_alg alg = {
+ 	.cra_ctxsize	=	sizeof(struct sha256_ctx),
+ 	.cra_module	=	THIS_MODULE,
+ 	.cra_alignmask	=	3,
+-	.cra_list       =       LIST_HEAD_INIT(alg.cra_list),
++	.cra_list	=	LIST_HEAD_INIT(sha256.cra_list),
+ 	.cra_u		=	{ .digest = {
+ 	.dia_digestsize	=	SHA256_DIGEST_SIZE,
+-	.dia_init   	= 	sha256_init,
+-	.dia_update 	=	sha256_update,
+-	.dia_final  	=	sha256_final } }
++	.dia_init	=	sha256_init,
++	.dia_update	=	sha256_update,
++	.dia_final	=	sha256_final } }
++};
++
++static struct crypto_alg sha224 = {
++	.cra_name	= "sha224",
++	.cra_driver_name = "sha224-generic",
++	.cra_flags	= CRYPTO_ALG_TYPE_DIGEST,
++	.cra_blocksize	= SHA224_BLOCK_SIZE,
++	.cra_ctxsize	= sizeof(struct sha256_ctx),
++	.cra_module	= THIS_MODULE,
++	.cra_alignmask	= 3,
++	.cra_list	= LIST_HEAD_INIT(sha224.cra_list),
++	.cra_u		= { .digest = {
++	.dia_digestsize = SHA224_DIGEST_SIZE,
++	.dia_init	= sha224_init,
++	.dia_update	= sha256_update,
++	.dia_final	= sha224_final } }
+ };
+ 
+ static int __init init(void)
+ {
+-	return crypto_register_alg(&alg);
++	int ret = 0;
++
++	ret = crypto_register_alg(&sha224);
++
++	if (ret < 0)
++		return ret;
++
++	ret = crypto_register_alg(&sha256);
++
++	if (ret < 0)
++		crypto_unregister_alg(&sha224);
++
++	return ret;
+ }
+ 
+ static void __exit fini(void)
+ {
+-	crypto_unregister_alg(&alg);
++	crypto_unregister_alg(&sha224);
++	crypto_unregister_alg(&sha256);
+ }
+ 
+ module_init(init);
+ module_exit(fini);
+ 
+ MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
++MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
+ 
++MODULE_ALIAS("sha224");
+ MODULE_ALIAS("sha256");
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index 24141fb..1ab8c01 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -6,12 +6,16 @@
+  *
+  * Copyright (c) 2002 James Morris <jmorris at intercode.com.au>
+  * Copyright (c) 2002 Jean-Francois Dive <jef at linuxbe.org>
++ * Copyright (c) 2007 Nokia Siemens Networks
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License as published by the Free
+  * Software Foundation; either version 2 of the License, or (at your option)
+  * any later version.
+  *
++ * 2007-11-13 Added GCM tests
++ * 2007-11-13 Added AEAD support
++ * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests
+  * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
+  * 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk at vantronix.net>)
+  * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
+@@ -71,22 +75,23 @@ static unsigned int sec;
+ 
+ static int mode;
+ static char *xbuf;
++static char *axbuf;
+ static char *tvmem;
+ 
+ static char *check[] = {
+-	"des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish",
+-	"twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6",
++	"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
++	"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
++	"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
+ 	"arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
+ 	"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
+-	"camellia", "seed", NULL
++	"camellia", "seed", "salsa20", "lzo", NULL
+ };
+ 
+ static void hexdump(unsigned char *buf, unsigned int len)
+ {
+-	while (len--)
+-		printk("%02x", *buf++);
+-
+-	printk("\n");
++	print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
++			16, 1,
++			buf, len, false);
+ }
+ 
+ static void tcrypt_complete(struct crypto_async_request *req, int err)
+@@ -215,6 +220,238 @@ out:
+ 	crypto_free_hash(tfm);
+ }
+ 
++static void test_aead(char *algo, int enc, struct aead_testvec *template,
++		      unsigned int tcount)
++{
++	unsigned int ret, i, j, k, temp;
++	unsigned int tsize;
++	char *q;
++	struct crypto_aead *tfm;
++	char *key;
++	struct aead_testvec *aead_tv;
++	struct aead_request *req;
++	struct scatterlist sg[8];
++	struct scatterlist asg[8];
++	const char *e;
++	struct tcrypt_result result;
++	unsigned int authsize;
++
++	if (enc == ENCRYPT)
++		e = "encryption";
++	else
++		e = "decryption";
++
++	printk(KERN_INFO "\ntesting %s %s\n", algo, e);
++
++	tsize = sizeof(struct aead_testvec);
++	tsize *= tcount;
++
++	if (tsize > TVMEMSIZE) {
++		printk(KERN_INFO "template (%u) too big for tvmem (%u)\n",
++		       tsize, TVMEMSIZE);
++		return;
++	}
++
++	memcpy(tvmem, template, tsize);
++	aead_tv = (void *)tvmem;
++
++	init_completion(&result.completion);
++
++	tfm = crypto_alloc_aead(algo, 0, 0);
++
++	if (IS_ERR(tfm)) {
++		printk(KERN_INFO "failed to load transform for %s: %ld\n",
++		       algo, PTR_ERR(tfm));
++		return;
++	}
++
++	req = aead_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		printk(KERN_INFO "failed to allocate request for %s\n", algo);
++		goto out;
++	}
++
++	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++				  tcrypt_complete, &result);
++
++	for (i = 0, j = 0; i < tcount; i++) {
++		if (!aead_tv[i].np) {
++			printk(KERN_INFO "test %u (%d bit key):\n",
++			       ++j, aead_tv[i].klen * 8);
++
++			crypto_aead_clear_flags(tfm, ~0);
++			if (aead_tv[i].wk)
++				crypto_aead_set_flags(
++					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
++			key = aead_tv[i].key;
++
++			ret = crypto_aead_setkey(tfm, key,
++						 aead_tv[i].klen);
++			if (ret) {
++				printk(KERN_INFO "setkey() failed flags=%x\n",
++				       crypto_aead_get_flags(tfm));
++
++				if (!aead_tv[i].fail)
++					goto out;
++			}
++
++			authsize = abs(aead_tv[i].rlen - aead_tv[i].ilen);
++			ret = crypto_aead_setauthsize(tfm, authsize);
++			if (ret) {
++				printk(KERN_INFO
++				       "failed to set authsize = %u\n",
++				       authsize);
++				goto out;
++			}
++
++			sg_init_one(&sg[0], aead_tv[i].input,
++				    aead_tv[i].ilen + (enc ? authsize : 0));
++
++			sg_init_one(&asg[0], aead_tv[i].assoc,
++				    aead_tv[i].alen);
++
++			aead_request_set_crypt(req, sg, sg,
++					       aead_tv[i].ilen,
++					       aead_tv[i].iv);
++
++			aead_request_set_assoc(req, asg, aead_tv[i].alen);
++
++			ret = enc ?
++				crypto_aead_encrypt(req) :
++				crypto_aead_decrypt(req);
++
++			switch (ret) {
++			case 0:
++				break;
++			case -EINPROGRESS:
++			case -EBUSY:
++				ret = wait_for_completion_interruptible(
++					&result.completion);
++				if (!ret && !(ret = result.err)) {
++					INIT_COMPLETION(result.completion);
++					break;
++				}
++				/* fall through */
++			default:
++				printk(KERN_INFO "%s () failed err=%d\n",
++				       e, -ret);
++				goto out;
++			}
++
++			q = kmap(sg_page(&sg[0])) + sg[0].offset;
++			hexdump(q, aead_tv[i].rlen);
++
++			printk(KERN_INFO "enc/dec: %s\n",
++			       memcmp(q, aead_tv[i].result,
++				      aead_tv[i].rlen) ? "fail" : "pass");
++		}
++	}
++
++	printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e);
++	memset(xbuf, 0, XBUFSIZE);
++	memset(axbuf, 0, XBUFSIZE);
++
++	for (i = 0, j = 0; i < tcount; i++) {
++		if (aead_tv[i].np) {
++			printk(KERN_INFO "test %u (%d bit key):\n",
++			       ++j, aead_tv[i].klen * 8);
++
++			crypto_aead_clear_flags(tfm, ~0);
++			if (aead_tv[i].wk)
++				crypto_aead_set_flags(
++					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
++			key = aead_tv[i].key;
++
++			ret = crypto_aead_setkey(tfm, key, aead_tv[i].klen);
++			if (ret) {
++				printk(KERN_INFO "setkey() failed flags=%x\n",
++				       crypto_aead_get_flags(tfm));
++
++				if (!aead_tv[i].fail)
++					goto out;
++			}
++
++			sg_init_table(sg, aead_tv[i].np);
++			for (k = 0, temp = 0; k < aead_tv[i].np; k++) {
++				memcpy(&xbuf[IDX[k]],
++				       aead_tv[i].input + temp,
++				       aead_tv[i].tap[k]);
++				temp += aead_tv[i].tap[k];
++				sg_set_buf(&sg[k], &xbuf[IDX[k]],
++					   aead_tv[i].tap[k]);
++			}
++
++			authsize = abs(aead_tv[i].rlen - aead_tv[i].ilen);
++			ret = crypto_aead_setauthsize(tfm, authsize);
++			if (ret) {
++				printk(KERN_INFO
++				       "failed to set authsize = %u\n",
++				       authsize);
++				goto out;
++			}
++
++			if (enc)
++				sg[k - 1].length += authsize;
++
++			sg_init_table(asg, aead_tv[i].anp);
++			for (k = 0, temp = 0; k < aead_tv[i].anp; k++) {
++				memcpy(&axbuf[IDX[k]],
++				       aead_tv[i].assoc + temp,
++				       aead_tv[i].atap[k]);
++				temp += aead_tv[i].atap[k];
++				sg_set_buf(&asg[k], &axbuf[IDX[k]],
++					   aead_tv[i].atap[k]);
++			}
++
++			aead_request_set_crypt(req, sg, sg,
++					       aead_tv[i].ilen,
++					       aead_tv[i].iv);
++
++			aead_request_set_assoc(req, asg, aead_tv[i].alen);
++
++			ret = enc ?
++				crypto_aead_encrypt(req) :
++				crypto_aead_decrypt(req);
++
++			switch (ret) {
++			case 0:
++				break;
++			case -EINPROGRESS:
++			case -EBUSY:
++				ret = wait_for_completion_interruptible(
++					&result.completion);
++				if (!ret && !(ret = result.err)) {
++					INIT_COMPLETION(result.completion);
++					break;
++				}
++				/* fall through */
++			default:
++				printk(KERN_INFO "%s () failed err=%d\n",
++				       e, -ret);
++				goto out;
++			}
++
++			for (k = 0, temp = 0; k < aead_tv[i].np; k++) {
++				printk(KERN_INFO "page %u\n", k);
++				q = kmap(sg_page(&sg[k])) + sg[k].offset;
++				hexdump(q, aead_tv[i].tap[k]);
++				printk(KERN_INFO "%s\n",
++				       memcmp(q, aead_tv[i].result + temp,
++					      aead_tv[i].tap[k] -
++					      (k < aead_tv[i].np - 1 || enc ?
++					       0 : authsize)) ?
++				       "fail" : "pass");
++
++				temp += aead_tv[i].tap[k];
++			}
++		}
++	}
++
++out:
++	crypto_free_aead(tfm);
++	aead_request_free(req);
++}
++
+ static void test_cipher(char *algo, int enc,
+ 			struct cipher_testvec *template, unsigned int tcount)
+ {
+@@ -237,15 +474,11 @@ static void test_cipher(char *algo, int enc,
+ 	printk("\ntesting %s %s\n", algo, e);
+ 
+ 	tsize = sizeof (struct cipher_testvec);
+-	tsize *= tcount;
 -
--	/* we don't allow misaligned data like bio_map_user() does.  If the
--	 * user is using sg, they're expected to know the alignment constraints
--	 * and respect them accordingly */
--	bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
--	if (IS_ERR(bio))
--		return PTR_ERR(bio);
+ 	if (tsize > TVMEMSIZE) {
+ 		printk("template (%u) too big for tvmem (%u)\n", tsize,
+ 		       TVMEMSIZE);
+ 		return;
+ 	}
 -
--	if (bio->bi_size != len) {
--		bio_endio(bio, 0);
--		bio_unmap_user(bio);
--		return -EINVAL;
+-	memcpy(tvmem, template, tsize);
+ 	cipher_tv = (void *)tvmem;
+ 
+ 	init_completion(&result.completion);
+@@ -269,33 +502,34 @@ static void test_cipher(char *algo, int enc,
+ 
+ 	j = 0;
+ 	for (i = 0; i < tcount; i++) {
+-		if (!(cipher_tv[i].np)) {
++		memcpy(cipher_tv, &template[i], tsize);
++		if (!(cipher_tv->np)) {
+ 			j++;
+ 			printk("test %u (%d bit key):\n",
+-			j, cipher_tv[i].klen * 8);
++			j, cipher_tv->klen * 8);
+ 
+ 			crypto_ablkcipher_clear_flags(tfm, ~0);
+-			if (cipher_tv[i].wk)
++			if (cipher_tv->wk)
+ 				crypto_ablkcipher_set_flags(
+ 					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+-			key = cipher_tv[i].key;
++			key = cipher_tv->key;
+ 
+ 			ret = crypto_ablkcipher_setkey(tfm, key,
+-						       cipher_tv[i].klen);
++						       cipher_tv->klen);
+ 			if (ret) {
+ 				printk("setkey() failed flags=%x\n",
+ 				       crypto_ablkcipher_get_flags(tfm));
+ 
+-				if (!cipher_tv[i].fail)
++				if (!cipher_tv->fail)
+ 					goto out;
+ 			}
+ 
+-			sg_init_one(&sg[0], cipher_tv[i].input,
+-				    cipher_tv[i].ilen);
++			sg_init_one(&sg[0], cipher_tv->input,
++				    cipher_tv->ilen);
+ 
+ 			ablkcipher_request_set_crypt(req, sg, sg,
+-						     cipher_tv[i].ilen,
+-						     cipher_tv[i].iv);
++						     cipher_tv->ilen,
++						     cipher_tv->iv);
+ 
+ 			ret = enc ?
+ 				crypto_ablkcipher_encrypt(req) :
+@@ -319,11 +553,11 @@ static void test_cipher(char *algo, int enc,
+ 			}
+ 
+ 			q = kmap(sg_page(&sg[0])) + sg[0].offset;
+-			hexdump(q, cipher_tv[i].rlen);
++			hexdump(q, cipher_tv->rlen);
+ 
+ 			printk("%s\n",
+-			       memcmp(q, cipher_tv[i].result,
+-				      cipher_tv[i].rlen) ? "fail" : "pass");
++			       memcmp(q, cipher_tv->result,
++				      cipher_tv->rlen) ? "fail" : "pass");
+ 		}
+ 	}
+ 
+@@ -332,41 +566,42 @@ static void test_cipher(char *algo, int enc,
+ 
+ 	j = 0;
+ 	for (i = 0; i < tcount; i++) {
+-		if (cipher_tv[i].np) {
++		memcpy(cipher_tv, &template[i], tsize);
++		if (cipher_tv->np) {
+ 			j++;
+ 			printk("test %u (%d bit key):\n",
+-			j, cipher_tv[i].klen * 8);
++			j, cipher_tv->klen * 8);
+ 
+ 			crypto_ablkcipher_clear_flags(tfm, ~0);
+-			if (cipher_tv[i].wk)
++			if (cipher_tv->wk)
+ 				crypto_ablkcipher_set_flags(
+ 					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+-			key = cipher_tv[i].key;
++			key = cipher_tv->key;
+ 
+ 			ret = crypto_ablkcipher_setkey(tfm, key,
+-						       cipher_tv[i].klen);
++						       cipher_tv->klen);
+ 			if (ret) {
+ 				printk("setkey() failed flags=%x\n",
+ 				       crypto_ablkcipher_get_flags(tfm));
+ 
+-				if (!cipher_tv[i].fail)
++				if (!cipher_tv->fail)
+ 					goto out;
+ 			}
+ 
+ 			temp = 0;
+-			sg_init_table(sg, cipher_tv[i].np);
+-			for (k = 0; k < cipher_tv[i].np; k++) {
++			sg_init_table(sg, cipher_tv->np);
++			for (k = 0; k < cipher_tv->np; k++) {
+ 				memcpy(&xbuf[IDX[k]],
+-				       cipher_tv[i].input + temp,
+-				       cipher_tv[i].tap[k]);
+-				temp += cipher_tv[i].tap[k];
++				       cipher_tv->input + temp,
++				       cipher_tv->tap[k]);
++				temp += cipher_tv->tap[k];
+ 				sg_set_buf(&sg[k], &xbuf[IDX[k]],
+-					   cipher_tv[i].tap[k]);
++					   cipher_tv->tap[k]);
+ 			}
+ 
+ 			ablkcipher_request_set_crypt(req, sg, sg,
+-						     cipher_tv[i].ilen,
+-						     cipher_tv[i].iv);
++						     cipher_tv->ilen,
++						     cipher_tv->iv);
+ 
+ 			ret = enc ?
+ 				crypto_ablkcipher_encrypt(req) :
+@@ -390,15 +625,15 @@ static void test_cipher(char *algo, int enc,
+ 			}
+ 
+ 			temp = 0;
+-			for (k = 0; k < cipher_tv[i].np; k++) {
++			for (k = 0; k < cipher_tv->np; k++) {
+ 				printk("page %u\n", k);
+ 				q = kmap(sg_page(&sg[k])) + sg[k].offset;
+-				hexdump(q, cipher_tv[i].tap[k]);
++				hexdump(q, cipher_tv->tap[k]);
+ 				printk("%s\n",
+-					memcmp(q, cipher_tv[i].result + temp,
+-						cipher_tv[i].tap[k]) ? "fail" :
++					memcmp(q, cipher_tv->result + temp,
++						cipher_tv->tap[k]) ? "fail" :
+ 					"pass");
+-				temp += cipher_tv[i].tap[k];
++				temp += cipher_tv->tap[k];
+ 			}
+ 		}
+ 	}
+@@ -800,7 +1035,8 @@ out:
+ 	crypto_free_hash(tfm);
+ }
+ 
+-static void test_deflate(void)
++static void test_comp(char *algo, struct comp_testvec *ctemplate,
++		       struct comp_testvec *dtemplate, int ctcount, int dtcount)
+ {
+ 	unsigned int i;
+ 	char result[COMP_BUF_SIZE];
+@@ -808,25 +1044,26 @@ static void test_deflate(void)
+ 	struct comp_testvec *tv;
+ 	unsigned int tsize;
+ 
+-	printk("\ntesting deflate compression\n");
++	printk("\ntesting %s compression\n", algo);
+ 
+-	tsize = sizeof (deflate_comp_tv_template);
++	tsize = sizeof(struct comp_testvec);
++	tsize *= ctcount;
+ 	if (tsize > TVMEMSIZE) {
+ 		printk("template (%u) too big for tvmem (%u)\n", tsize,
+ 		       TVMEMSIZE);
+ 		return;
+ 	}
+ 
+-	memcpy(tvmem, deflate_comp_tv_template, tsize);
++	memcpy(tvmem, ctemplate, tsize);
+ 	tv = (void *)tvmem;
+ 
+-	tfm = crypto_alloc_comp("deflate", 0, CRYPTO_ALG_ASYNC);
++	tfm = crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC);
+ 	if (IS_ERR(tfm)) {
+-		printk("failed to load transform for deflate\n");
++		printk("failed to load transform for %s\n", algo);
+ 		return;
+ 	}
+ 
+-	for (i = 0; i < DEFLATE_COMP_TEST_VECTORS; i++) {
++	for (i = 0; i < ctcount; i++) {
+ 		int ilen, ret, dlen = COMP_BUF_SIZE;
+ 
+ 		printk("test %u:\n", i + 1);
+@@ -845,19 +1082,20 @@ static void test_deflate(void)
+ 		       ilen, dlen);
+ 	}
+ 
+-	printk("\ntesting deflate decompression\n");
++	printk("\ntesting %s decompression\n", algo);
+ 
+-	tsize = sizeof (deflate_decomp_tv_template);
++	tsize = sizeof(struct comp_testvec);
++	tsize *= dtcount;
+ 	if (tsize > TVMEMSIZE) {
+ 		printk("template (%u) too big for tvmem (%u)\n", tsize,
+ 		       TVMEMSIZE);
+ 		goto out;
+ 	}
+ 
+-	memcpy(tvmem, deflate_decomp_tv_template, tsize);
++	memcpy(tvmem, dtemplate, tsize);
+ 	tv = (void *)tvmem;
+ 
+-	for (i = 0; i < DEFLATE_DECOMP_TEST_VECTORS; i++) {
++	for (i = 0; i < dtcount; i++) {
+ 		int ilen, ret, dlen = COMP_BUF_SIZE;
+ 
+ 		printk("test %u:\n", i + 1);
+@@ -918,6 +1156,8 @@ static void do_test(void)
+ 
+ 		test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
+ 
++		test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
++
+ 		test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS);
+ 
+ 		//BLOWFISH
+@@ -969,6 +1209,18 @@ static void do_test(void)
+ 			    AES_XTS_ENC_TEST_VECTORS);
+ 		test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template,
+ 			    AES_XTS_DEC_TEST_VECTORS);
++		test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template,
++			    AES_CTR_ENC_TEST_VECTORS);
++		test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template,
++			    AES_CTR_DEC_TEST_VECTORS);
++		test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template,
++			  AES_GCM_ENC_TEST_VECTORS);
++		test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template,
++			  AES_GCM_DEC_TEST_VECTORS);
++		test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template,
++			  AES_CCM_ENC_TEST_VECTORS);
++		test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template,
++			  AES_CCM_DEC_TEST_VECTORS);
+ 
+ 		//CAST5
+ 		test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
+@@ -1057,12 +1309,18 @@ static void do_test(void)
+ 		test_hash("tgr192", tgr192_tv_template, TGR192_TEST_VECTORS);
+ 		test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS);
+ 		test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
+-		test_deflate();
++		test_comp("deflate", deflate_comp_tv_template,
++			  deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS,
++			  DEFLATE_DECOMP_TEST_VECTORS);
++		test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template,
++			  LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS);
+ 		test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS);
+ 		test_hash("hmac(md5)", hmac_md5_tv_template,
+ 			  HMAC_MD5_TEST_VECTORS);
+ 		test_hash("hmac(sha1)", hmac_sha1_tv_template,
+ 			  HMAC_SHA1_TEST_VECTORS);
++		test_hash("hmac(sha224)", hmac_sha224_tv_template,
++			  HMAC_SHA224_TEST_VECTORS);
+ 		test_hash("hmac(sha256)", hmac_sha256_tv_template,
+ 			  HMAC_SHA256_TEST_VECTORS);
+ 		test_hash("hmac(sha384)", hmac_sha384_tv_template,
+@@ -1156,6 +1414,10 @@ static void do_test(void)
+ 			    AES_XTS_ENC_TEST_VECTORS);
+ 		test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template,
+ 			    AES_XTS_DEC_TEST_VECTORS);
++		test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template,
++			    AES_CTR_ENC_TEST_VECTORS);
++		test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template,
++			    AES_CTR_DEC_TEST_VECTORS);
+ 		break;
+ 
+ 	case 11:
+@@ -1167,7 +1429,9 @@ static void do_test(void)
+ 		break;
+ 
+ 	case 13:
+-		test_deflate();
++		test_comp("deflate", deflate_comp_tv_template,
++			  deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS,
++			  DEFLATE_DECOMP_TEST_VECTORS);
+ 		break;
+ 
+ 	case 14:
+@@ -1291,6 +1555,34 @@ static void do_test(void)
+ 			    camellia_cbc_dec_tv_template,
+ 			    CAMELLIA_CBC_DEC_TEST_VECTORS);
+ 		break;
++	case 33:
++		test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
++		break;
++
++	case 34:
++		test_cipher("salsa20", ENCRYPT,
++			    salsa20_stream_enc_tv_template,
++			    SALSA20_STREAM_ENC_TEST_VECTORS);
++		break;
++
++	case 35:
++		test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template,
++			  AES_GCM_ENC_TEST_VECTORS);
++		test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template,
++			  AES_GCM_DEC_TEST_VECTORS);
++		break;
++
++	case 36:
++		test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template,
++			  LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS);
++		break;
++
++	case 37:
++		test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template,
++			  AES_CCM_ENC_TEST_VECTORS);
++		test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template,
++			  AES_CCM_DEC_TEST_VECTORS);
++		break;
+ 
+ 	case 100:
+ 		test_hash("hmac(md5)", hmac_md5_tv_template,
+@@ -1317,6 +1609,15 @@ static void do_test(void)
+ 			  HMAC_SHA512_TEST_VECTORS);
+ 		break;
+ 
++	case 105:
++		test_hash("hmac(sha224)", hmac_sha224_tv_template,
++			  HMAC_SHA224_TEST_VECTORS);
++		break;
++
++	case 106:
++		test_hash("xcbc(aes)", aes_xcbc128_tv_template,
++			  XCBC_AES_TEST_VECTORS);
++		break;
+ 
+ 	case 200:
+ 		test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+@@ -1400,6 +1701,11 @@ static void do_test(void)
+ 				camellia_speed_template);
+ 		break;
+ 
++	case 206:
++		test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0,
++				  salsa20_speed_template);
++		break;
++
+ 	case 300:
+ 		/* fall through */
+ 
+@@ -1451,6 +1757,10 @@ static void do_test(void)
+ 		test_hash_speed("tgr192", sec, generic_hash_speed_template);
+ 		if (mode > 300 && mode < 400) break;
+ 
++	case 313:
++		test_hash_speed("sha224", sec, generic_hash_speed_template);
++		if (mode > 300 && mode < 400) break;
++
+ 	case 399:
+ 		break;
+ 
+@@ -1467,20 +1777,21 @@ static void do_test(void)
+ 
+ static int __init init(void)
+ {
++	int err = -ENOMEM;
++
+ 	tvmem = kmalloc(TVMEMSIZE, GFP_KERNEL);
+ 	if (tvmem == NULL)
+-		return -ENOMEM;
++		return err;
+ 
+ 	xbuf = kmalloc(XBUFSIZE, GFP_KERNEL);
+-	if (xbuf == NULL) {
+-		kfree(tvmem);
+-		return -ENOMEM;
 -	}
++	if (xbuf == NULL)
++		goto err_free_tv;
+ 
+-	do_test();
++	axbuf = kmalloc(XBUFSIZE, GFP_KERNEL);
++	if (axbuf == NULL)
++		goto err_free_xbuf;
+ 
+-	kfree(xbuf);
+-	kfree(tvmem);
++	do_test();
+ 
+ 	/* We intentionaly return -EAGAIN to prevent keeping
+ 	 * the module. It does all its work from init()
+@@ -1488,7 +1799,15 @@ static int __init init(void)
+ 	 * => we don't need it in the memory, do we?
+ 	 *                                        -- mludvig
+ 	 */
+-	return -EAGAIN;
++	err = -EAGAIN;
++
++	kfree(axbuf);
++ err_free_xbuf:
++	kfree(xbuf);
++ err_free_tv:
++	kfree(tvmem);
++
++	return err;
+ }
+ 
+ /*
+diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
+index ec86138..f785e56 100644
+--- a/crypto/tcrypt.h
++++ b/crypto/tcrypt.h
+@@ -6,12 +6,15 @@
+  *
+  * Copyright (c) 2002 James Morris <jmorris at intercode.com.au>
+  * Copyright (c) 2002 Jean-Francois Dive <jef at linuxbe.org>
++ * Copyright (c) 2007 Nokia Siemens Networks
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License as published by the Free
+  * Software Foundation; either version 2 of the License, or (at your option)
+  * any later version.
+  *
++ * 2007-11-13 Added GCM tests
++ * 2007-11-13 Added AEAD support
+  * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
+  * 2004-08-09 Cipher speed tests by Reyk Floeter <reyk at vantronix.net>
+  * 2003-09-14 Changes by Kartikey Mahendra Bhatt
+@@ -40,14 +43,32 @@ struct hash_testvec {
+ struct cipher_testvec {
+ 	char key[MAX_KEYLEN] __attribute__ ((__aligned__(4)));
+ 	char iv[MAX_IVLEN];
++	char input[4100];
++	char result[4100];
++	unsigned char tap[MAX_TAP];
++	int np;
++	unsigned char fail;
++	unsigned char wk; /* weak key flag */
++	unsigned char klen;
++	unsigned short ilen;
++	unsigned short rlen;
++};
++
++struct aead_testvec {
++	char key[MAX_KEYLEN] __attribute__ ((__aligned__(4)));
++	char iv[MAX_IVLEN];
+ 	char input[512];
++	char assoc[512];
+ 	char result[512];
+ 	unsigned char tap[MAX_TAP];
++	unsigned char atap[MAX_TAP];
+ 	int np;
++	int anp;
+ 	unsigned char fail;
+ 	unsigned char wk; /* weak key flag */
+ 	unsigned char klen;
+ 	unsigned short ilen;
++	unsigned short alen;
+ 	unsigned short rlen;
+ };
+ 
+@@ -173,6 +194,33 @@ static struct hash_testvec sha1_tv_template[] = {
+ 	}
+ };
+ 
++
++/*
++ * SHA224 test vectors from from FIPS PUB 180-2
++ */
++#define SHA224_TEST_VECTORS     2
++
++static struct hash_testvec sha224_tv_template[] = {
++	{
++		.plaintext = "abc",
++		.psize  = 3,
++		.digest = { 0x23, 0x09, 0x7D, 0x22, 0x34, 0x05, 0xD8, 0x22,
++			0x86, 0x42, 0xA4, 0x77, 0xBD, 0xA2, 0x55, 0xB3,
++			0x2A, 0xAD, 0xBC, 0xE4, 0xBD, 0xA0, 0xB3, 0xF7,
++			0xE3, 0x6C, 0x9D, 0xA7},
++	}, {
++		.plaintext =
++		"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
++		.psize  = 56,
++		.digest = { 0x75, 0x38, 0x8B, 0x16, 0x51, 0x27, 0x76, 0xCC,
++			0x5D, 0xBA, 0x5D, 0xA1, 0xFD, 0x89, 0x01, 0x50,
++			0xB0, 0xC6, 0x45, 0x5C, 0xB4, 0xF5, 0x8B, 0x19,
++			0x52, 0x52, 0x25, 0x25 },
++		.np     = 2,
++		.tap    = { 28, 28 }
++	}
++};
++
+ /*
+  * SHA256 test vectors from from NIST
+  */
+@@ -817,6 +865,121 @@ static struct hash_testvec hmac_sha1_tv_template[] = {
+ 	},
+ };
+ 
++
++/*
++ * SHA224 HMAC test vectors from RFC4231
++ */
++#define HMAC_SHA224_TEST_VECTORS    4
++
++static struct hash_testvec hmac_sha224_tv_template[] = {
++	{
++		.key    = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
++			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
++			0x0b, 0x0b, 0x0b, 0x0b },
++		.ksize  = 20,
++		/*  ("Hi There") */
++		.plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 },
++		.psize  = 8,
++		.digest = { 0x89, 0x6f, 0xb1, 0x12, 0x8a, 0xbb, 0xdf, 0x19,
++			0x68, 0x32, 0x10, 0x7c, 0xd4, 0x9d, 0xf3, 0x3f,
++			0x47, 0xb4, 0xb1, 0x16, 0x99, 0x12, 0xba, 0x4f,
++			0x53, 0x68, 0x4b, 0x22},
++	}, {
++		.key    = { 0x4a, 0x65, 0x66, 0x65 }, /* ("Jefe") */
++		.ksize  = 4,
++		/* ("what do ya want for nothing?") */
++		.plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
++			0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
++			0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
++			0x69, 0x6e, 0x67, 0x3f },
++		.psize  = 28,
++		.digest = { 0xa3, 0x0e, 0x01, 0x09, 0x8b, 0xc6, 0xdb, 0xbf,
++			0x45, 0x69, 0x0f, 0x3a, 0x7e, 0x9e, 0x6d, 0x0f,
++			0x8b, 0xbe, 0xa2, 0xa3, 0x9e, 0x61, 0x48, 0x00,
++			0x8f, 0xd0, 0x5e, 0x44 },
++		.np = 4,
++		.tap    = { 7, 7, 7, 7 }
++	}, {
++		.key    = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa },
++		.ksize  = 131,
++		/* ("Test Using Larger Than Block-Size Key - Hash Key First") */
++		.plaintext = { 0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69,
++			0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72, 0x67, 0x65,
++			0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42,
++			0x6c, 0x6f, 0x63, 0x6b, 0x2d, 0x53, 0x69, 0x7a,
++			0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20,
++			0x48, 0x61, 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79,
++			0x20, 0x46, 0x69, 0x72, 0x73, 0x74 },
++		.psize  = 54,
++		.digest = { 0x95, 0xe9, 0xa0, 0xdb, 0x96, 0x20, 0x95, 0xad,
++			0xae, 0xbe, 0x9b, 0x2d, 0x6f, 0x0d, 0xbc, 0xe2,
++			0xd4, 0x99, 0xf1, 0x12, 0xf2, 0xd2, 0xb7, 0x27,
++			0x3f, 0xa6, 0x87, 0x0e },
++	}, {
++		.key    = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
++			0xaa, 0xaa, 0xaa },
++		.ksize  = 131,
++		/* ("This is a test using a larger than block-size key and a")
++		(" larger than block-size data. The key needs to be")
++			(" hashed before being used by the HMAC algorithm.") */
++		.plaintext = { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20,
++			0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x20, 0x75,
++			0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c,
++			0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68,
++			0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
++			0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6b, 0x65,
++			0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x20,
++			0x6c, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74,
++			0x68, 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63,
++			0x6b, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x64,
++			0x61, 0x74, 0x61, 0x2e, 0x20, 0x54, 0x68, 0x65,
++			0x20, 0x6b, 0x65, 0x79, 0x20, 0x6e, 0x65, 0x65,
++			0x64, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65,
++			0x20, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x20,
++			0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x62,
++			0x65, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x65,
++			0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65,
++			0x20, 0x48, 0x4d, 0x41, 0x43, 0x20, 0x61, 0x6c,
++			0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e },
++		.psize  = 152,
++		.digest = { 0x3a, 0x85, 0x41, 0x66, 0xac, 0x5d, 0x9f, 0x02,
++			0x3f, 0x54, 0xd5, 0x17, 0xd0, 0xb3, 0x9d, 0xbd,
++			0x94, 0x67, 0x70, 0xdb, 0x9c, 0x2b, 0x95, 0xc9,
++			0xf6, 0xf5, 0x65, 0xd1 },
++	},
++};
++
+ /*
+  * HMAC-SHA256 test vectors from
+  * draft-ietf-ipsec-ciph-sha-256-01.txt
+@@ -2140,12 +2303,18 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
+  */
+ #define AES_ENC_TEST_VECTORS 3
+ #define AES_DEC_TEST_VECTORS 3
+-#define AES_CBC_ENC_TEST_VECTORS 2
+-#define AES_CBC_DEC_TEST_VECTORS 2
++#define AES_CBC_ENC_TEST_VECTORS 4
++#define AES_CBC_DEC_TEST_VECTORS 4
+ #define AES_LRW_ENC_TEST_VECTORS 8
+ #define AES_LRW_DEC_TEST_VECTORS 8
+ #define AES_XTS_ENC_TEST_VECTORS 4
+ #define AES_XTS_DEC_TEST_VECTORS 4
++#define AES_CTR_ENC_TEST_VECTORS 7
++#define AES_CTR_DEC_TEST_VECTORS 6
++#define AES_GCM_ENC_TEST_VECTORS 9
++#define AES_GCM_DEC_TEST_VECTORS 8
++#define AES_CCM_ENC_TEST_VECTORS 7
++#define AES_CCM_DEC_TEST_VECTORS 7
+ 
+ static struct cipher_testvec aes_enc_tv_template[] = {
+ 	{ /* From FIPS-197 */
+@@ -2249,6 +2418,57 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
+ 			    0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
+ 			    0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 },
+ 		.rlen   = 32,
++	}, { /* From NIST SP800-38A */
++		.key	= { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
++			    0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
++			    0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b },
++		.klen	= 24,
++		.iv	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
++		.input	= { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
++			    0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
++			    0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
++			    0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
++			    0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
++			    0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
++			    0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
++			    0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
++		.ilen	= 64,
++		.result	= { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d,
++			    0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8,
++			    0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4,
++			    0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a,
++			    0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0,
++			    0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0,
++			    0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81,
++			    0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd },
++		.rlen	= 64,
++	}, {
++		.key	= { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
++			    0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
++			    0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
++			    0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 },
++		.klen	= 32,
++		.iv	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
++		.input	= { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
++			    0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
++			    0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
++			    0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
++			    0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
++			    0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
++			    0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
++			    0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
++		.ilen	= 64,
++		.result	= { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba,
++			    0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6,
++			    0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d,
++			    0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d,
++			    0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf,
++			    0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61,
++			    0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc,
++			    0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b },
++		.rlen	= 64,
+ 	},
+ };
+ 
+@@ -2280,6 +2500,57 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
+ 			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+ 		.rlen   = 32,
++	}, { /* From NIST SP800-38A */
++		.key	= { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
++			    0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
++			    0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b },
++		.klen	= 24,
++		.iv	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
++		.input	= { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d,
++			    0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8,
++			    0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4,
++			    0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a,
++			    0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0,
++			    0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0,
++			    0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81,
++			    0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd },
++		.ilen	= 64,
++		.result	= { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
++			    0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
++			    0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
++			    0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
++			    0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
++			    0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
++			    0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
++			    0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
++		.rlen	= 64,
++	}, {
++		.key	= { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
++			    0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
++			    0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
++			    0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 },
++		.klen	= 32,
++		.iv	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
++		.input	= { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba,
++			    0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6,
++			    0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d,
++			    0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d,
++			    0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf,
++			    0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61,
++			    0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc,
++			    0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b },
++		.ilen	= 64,
++		.result	= { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
++			    0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
++			    0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
++			    0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
++			    0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
++			    0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
++			    0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
++			    0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
++		.rlen	= 64,
+ 	},
+ };
+ 
+@@ -3180,6 +3451,1843 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
+ 	}
+ };
+ 
++
++static struct cipher_testvec aes_ctr_enc_tv_template[] = {
++	{ /* From RFC 3686 */
++		.key	= { 0xae, 0x68, 0x52, 0xf8, 0x12, 0x10, 0x67, 0xcc,
++			    0x4b, 0xf7, 0xa5, 0x76, 0x55, 0x77, 0xf3, 0x9e,
++			    0x00, 0x00, 0x00, 0x30 },
++		.klen	= 20,
++		.iv 	= { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
++		.input 	= { "Single block msg" },
++		.ilen	= 16,
++		.result = { 0xe4, 0x09, 0x5d, 0x4f, 0xb7, 0xa7, 0xb3, 0x79,
++			    0x2d, 0x61, 0x75, 0xa3, 0x26, 0x13, 0x11, 0xb8 },
++		.rlen	= 16,
++	}, {
++		.key	= { 0x7e, 0x24, 0x06, 0x78, 0x17, 0xfa, 0xe0, 0xd7,
++			    0x43, 0xd6, 0xce, 0x1f, 0x32, 0x53, 0x91, 0x63,
++			    0x00, 0x6c, 0xb6, 0xdb },
++		.klen	= 20,
++		.iv 	= { 0xc0, 0x54, 0x3b, 0x59, 0xda, 0x48, 0xd9, 0x0b },
++		.input	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
++		.ilen 	= 32,
++		.result = { 0x51, 0x04, 0xa1, 0x06, 0x16, 0x8a, 0x72, 0xd9,
++			    0x79, 0x0d, 0x41, 0xee, 0x8e, 0xda, 0xd3, 0x88,
++			    0xeb, 0x2e, 0x1e, 0xfc, 0x46, 0xda, 0x57, 0xc8,
++			    0xfc, 0xe6, 0x30, 0xdf, 0x91, 0x41, 0xbe, 0x28 },
++		.rlen	= 32,
++	}, {
++		.key 	= { 0x16, 0xaf, 0x5b, 0x14, 0x5f, 0xc9, 0xf5, 0x79,
++			    0xc1, 0x75, 0xf9, 0x3e, 0x3b, 0xfb, 0x0e, 0xed,
++			    0x86, 0x3d, 0x06, 0xcc, 0xfd, 0xb7, 0x85, 0x15,
++			    0x00, 0x00, 0x00, 0x48 },
++		.klen 	= 28,
++		.iv	= { 0x36, 0x73, 0x3c, 0x14, 0x7d, 0x6d, 0x93, 0xcb },
++		.input	= { "Single block msg" },
++		.ilen 	= 16,
++		.result	= { 0x4b, 0x55, 0x38, 0x4f, 0xe2, 0x59, 0xc9, 0xc8,
++			    0x4e, 0x79, 0x35, 0xa0, 0x03, 0xcb, 0xe9, 0x28 },
++		.rlen	= 16,
++	}, {
++		.key	= { 0x7c, 0x5c, 0xb2, 0x40, 0x1b, 0x3d, 0xc3, 0x3c,
++			    0x19, 0xe7, 0x34, 0x08, 0x19, 0xe0, 0xf6, 0x9c,
++			    0x67, 0x8c, 0x3d, 0xb8, 0xe6, 0xf6, 0xa9, 0x1a,
++			    0x00, 0x96, 0xb0, 0x3b },
++		.klen	= 28,
++		.iv 	= { 0x02, 0x0c, 0x6e, 0xad, 0xc2, 0xcb, 0x50, 0x0d },
++		.input	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
++		.ilen	= 32,
++		.result	= { 0x45, 0x32, 0x43, 0xfc, 0x60, 0x9b, 0x23, 0x32,
++			    0x7e, 0xdf, 0xaa, 0xfa, 0x71, 0x31, 0xcd, 0x9f,
++			    0x84, 0x90, 0x70, 0x1c, 0x5a, 0xd4, 0xa7, 0x9c,
++			    0xfc, 0x1f, 0xe0, 0xff, 0x42, 0xf4, 0xfb, 0x00 },
++		.rlen 	= 32,
++	}, {
++		.key 	= { 0x77, 0x6b, 0xef, 0xf2, 0x85, 0x1d, 0xb0, 0x6f,
++			    0x4c, 0x8a, 0x05, 0x42, 0xc8, 0x69, 0x6f, 0x6c,
++			    0x6a, 0x81, 0xaf, 0x1e, 0xec, 0x96, 0xb4, 0xd3,
++			    0x7f, 0xc1, 0xd6, 0x89, 0xe6, 0xc1, 0xc1, 0x04,
++			    0x00, 0x00, 0x00, 0x60 },
++		.klen	= 36,
++		.iv 	= { 0xdb, 0x56, 0x72, 0xc9, 0x7a, 0xa8, 0xf0, 0xb2 },
++		.input	= { "Single block msg" },
++		.ilen	= 16,
++		.result = { 0x14, 0x5a, 0xd0, 0x1d, 0xbf, 0x82, 0x4e, 0xc7,
++			    0x56, 0x08, 0x63, 0xdc, 0x71, 0xe3, 0xe0, 0xc0 },
++		.rlen 	= 16,
++	}, {
++		.key	= { 0xf6, 0xd6, 0x6d, 0x6b, 0xd5, 0x2d, 0x59, 0xbb,
++			    0x07, 0x96, 0x36, 0x58, 0x79, 0xef, 0xf8, 0x86,
++			    0xc6, 0x6d, 0xd5, 0x1a, 0x5b, 0x6a, 0x99, 0x74,
++			    0x4b, 0x50, 0x59, 0x0c, 0x87, 0xa2, 0x38, 0x84,
++			    0x00, 0xfa, 0xac, 0x24 },
++		.klen 	= 36,
++		.iv	= { 0xc1, 0x58, 0x5e, 0xf1, 0x5a, 0x43, 0xd8, 0x75 },
++		.input	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
++		.ilen	= 32,
++		.result = { 0xf0, 0x5e, 0x23, 0x1b, 0x38, 0x94, 0x61, 0x2c,
++			    0x49, 0xee, 0x00, 0x0b, 0x80, 0x4e, 0xb2, 0xa9,
++			    0xb8, 0x30, 0x6b, 0x50, 0x8f, 0x83, 0x9d, 0x6a,
++			    0x55, 0x30, 0x83, 0x1d, 0x93, 0x44, 0xaf, 0x1c },
++		.rlen	= 32,
++	}, {
++	// generated using Crypto++
++		.key = {
++			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++			0x00, 0x00, 0x00, 0x00,
++		},
++		.klen = 32 + 4,
++		.iv = {
++			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++		},
++		.input = {
++			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++			0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++			0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++			0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++			0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
++			0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
++			0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
++			0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
++			0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
++			0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
++			0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
++			0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
++			0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
++			0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
++			0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
++			0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
++			0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
++			0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
++			0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
++			0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
++			0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
++			0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
++			0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
++			0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
++			0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
++			0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
++			0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
++			0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
++			0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15,
++			0x18, 0x1b, 0x1e, 0x21, 0x24, 0x27, 0x2a, 0x2d,
++			0x30, 0x33, 0x36, 0x39, 0x3c, 0x3f, 0x42, 0x45,
++			0x48, 0x4b, 0x4e, 0x51, 0x54, 0x57, 0x5a, 0x5d,
++			0x60, 0x63, 0x66, 0x69, 0x6c, 0x6f, 0x72, 0x75,
++			0x78, 0x7b, 0x7e, 0x81, 0x84, 0x87, 0x8a, 0x8d,
++			0x90, 0x93, 0x96, 0x99, 0x9c, 0x9f, 0xa2, 0xa5,
++			0xa8, 0xab, 0xae, 0xb1, 0xb4, 0xb7, 0xba, 0xbd,
++			0xc0, 0xc3, 0xc6, 0xc9, 0xcc, 0xcf, 0xd2, 0xd5,
++			0xd8, 0xdb, 0xde, 0xe1, 0xe4, 0xe7, 0xea, 0xed,
++			0xf0, 0xf3, 0xf6, 0xf9, 0xfc, 0xff, 0x02, 0x05,
++			0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x1a, 0x1d,
++			0x20, 0x23, 0x26, 0x29, 0x2c, 0x2f, 0x32, 0x35,
++			0x38, 0x3b, 0x3e, 0x41, 0x44, 0x47, 0x4a, 0x4d,
++			0x50, 0x53, 0x56, 0x59, 0x5c, 0x5f, 0x62, 0x65,
++			0x68, 0x6b, 0x6e, 0x71, 0x74, 0x77, 0x7a, 0x7d,
++			0x80, 0x83, 0x86, 0x89, 0x8c, 0x8f, 0x92, 0x95,
++			0x98, 0x9b, 0x9e, 0xa1, 0xa4, 0xa7, 0xaa, 0xad,
++			0xb0, 0xb3, 0xb6, 0xb9, 0xbc, 0xbf, 0xc2, 0xc5,
++			0xc8, 0xcb, 0xce, 0xd1, 0xd4, 0xd7, 0xda, 0xdd,
++			0xe0, 0xe3, 0xe6, 0xe9, 0xec, 0xef, 0xf2, 0xf5,
++			0xf8, 0xfb, 0xfe, 0x01, 0x04, 0x07, 0x0a, 0x0d,
++			0x10, 0x13, 0x16, 0x19, 0x1c, 0x1f, 0x22, 0x25,
++			0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, 0x3d,
++			0x40, 0x43, 0x46, 0x49, 0x4c, 0x4f, 0x52, 0x55,
++			0x58, 0x5b, 0x5e, 0x61, 0x64, 0x67, 0x6a, 0x6d,
++			0x70, 0x73, 0x76, 0x79, 0x7c, 0x7f, 0x82, 0x85,
++			0x88, 0x8b, 0x8e, 0x91, 0x94, 0x97, 0x9a, 0x9d,
++			0xa0, 0xa3, 0xa6, 0xa9, 0xac, 0xaf, 0xb2, 0xb5,
++			0xb8, 0xbb, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd,
++			0xd0, 0xd3, 0xd6, 0xd9, 0xdc, 0xdf, 0xe2, 0xe5,
++			0xe8, 0xeb, 0xee, 0xf1, 0xf4, 0xf7, 0xfa, 0xfd,
++			0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1e, 0x23,
++			0x28, 0x2d, 0x32, 0x37, 0x3c, 0x41, 0x46, 0x4b,
++			0x50, 0x55, 0x5a, 0x5f, 0x64, 0x69, 0x6e, 0x73,
++			0x78, 0x7d, 0x82, 0x87, 0x8c, 0x91, 0x96, 0x9b,
++			0xa0, 0xa5, 0xaa, 0xaf, 0xb4, 0xb9, 0xbe, 0xc3,
++			0xc8, 0xcd, 0xd2, 0xd7, 0xdc, 0xe1, 0xe6, 0xeb,
++			0xf0, 0xf5, 0xfa, 0xff, 0x04, 0x09, 0x0e, 0x13,
++			0x18, 0x1d, 0x22, 0x27, 0x2c, 0x31, 0x36, 0x3b,
++			0x40, 0x45, 0x4a, 0x4f, 0x54, 0x59, 0x5e, 0x63,
++			0x68, 0x6d, 0x72, 0x77, 0x7c, 0x81, 0x86, 0x8b,
++			0x90, 0x95, 0x9a, 0x9f, 0xa4, 0xa9, 0xae, 0xb3,
++			0xb8, 0xbd, 0xc2, 0xc7, 0xcc, 0xd1, 0xd6, 0xdb,
++			0xe0, 0xe5, 0xea, 0xef, 0xf4, 0xf9, 0xfe, 0x03,
++			0x08, 0x0d, 0x12, 0x17, 0x1c, 0x21, 0x26, 0x2b,
++			0x30, 0x35, 0x3a, 0x3f, 0x44, 0x49, 0x4e, 0x53,
++			0x58, 0x5d, 0x62, 0x67, 0x6c, 0x71, 0x76, 0x7b,
++			0x80, 0x85, 0x8a, 0x8f, 0x94, 0x99, 0x9e, 0xa3,
++			0xa8, 0xad, 0xb2, 0xb7, 0xbc, 0xc1, 0xc6, 0xcb,
++			0xd0, 0xd5, 0xda, 0xdf, 0xe4, 0xe9, 0xee, 0xf3,
++			0xf8, 0xfd, 0x02, 0x07, 0x0c, 0x11, 0x16, 0x1b,
++			0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3e, 0x43,
++			0x48, 0x4d, 0x52, 0x57, 0x5c, 0x61, 0x66, 0x6b,
++			0x70, 0x75, 0x7a, 0x7f, 0x84, 0x89, 0x8e, 0x93,
++			0x98, 0x9d, 0xa2, 0xa7, 0xac, 0xb1, 0xb6, 0xbb,
++			0xc0, 0xc5, 0xca, 0xcf, 0xd4, 0xd9, 0xde, 0xe3,
++			0xe8, 0xed, 0xf2, 0xf7, 0xfc, 0x01, 0x06, 0x0b,
++			0x10, 0x15, 0x1a, 0x1f, 0x24, 0x29, 0x2e, 0x33,
++			0x38, 0x3d, 0x42, 0x47, 0x4c, 0x51, 0x56, 0x5b,
++			0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79, 0x7e, 0x83,
++			0x88, 0x8d, 0x92, 0x97, 0x9c, 0xa1, 0xa6, 0xab,
++			0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, 0xce, 0xd3,
++			0xd8, 0xdd, 0xe2, 0xe7, 0xec, 0xf1, 0xf6, 0xfb,
++			0x00, 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31,
++			0x38, 0x3f, 0x46, 0x4d, 0x54, 0x5b, 0x62, 0x69,
++			0x70, 0x77, 0x7e, 0x85, 0x8c, 0x93, 0x9a, 0xa1,
++			0xa8, 0xaf, 0xb6, 0xbd, 0xc4, 0xcb, 0xd2, 0xd9,
++			0xe0, 0xe7, 0xee, 0xf5, 0xfc, 0x03, 0x0a, 0x11,
++			0x18, 0x1f, 0x26, 0x2d, 0x34, 0x3b, 0x42, 0x49,
++			0x50, 0x57, 0x5e, 0x65, 0x6c, 0x73, 0x7a, 0x81,
++			0x88, 0x8f, 0x96, 0x9d, 0xa4, 0xab, 0xb2, 0xb9,
++			0xc0, 0xc7, 0xce, 0xd5, 0xdc, 0xe3, 0xea, 0xf1,
++			0xf8, 0xff, 0x06, 0x0d, 0x14, 0x1b, 0x22, 0x29,
++			0x30, 0x37, 0x3e, 0x45, 0x4c, 0x53, 0x5a, 0x61,
++			0x68, 0x6f, 0x76, 0x7d, 0x84, 0x8b, 0x92, 0x99,
++			0xa0, 0xa7, 0xae, 0xb5, 0xbc, 0xc3, 0xca, 0xd1,
++			0xd8, 0xdf, 0xe6, 0xed, 0xf4, 0xfb, 0x02, 0x09,
++			0x10, 0x17, 0x1e, 0x25, 0x2c, 0x33, 0x3a, 0x41,
++			0x48, 0x4f, 0x56, 0x5d, 0x64, 0x6b, 0x72, 0x79,
++			0x80, 0x87, 0x8e, 0x95, 0x9c, 0xa3, 0xaa, 0xb1,
++			0xb8, 0xbf, 0xc6, 0xcd, 0xd4, 0xdb, 0xe2, 0xe9,
++			0xf0, 0xf7, 0xfe, 0x05, 0x0c, 0x13, 0x1a, 0x21,
++			0x28, 0x2f, 0x36, 0x3d, 0x44, 0x4b, 0x52, 0x59,
++			0x60, 0x67, 0x6e, 0x75, 0x7c, 0x83, 0x8a, 0x91,
++			0x98, 0x9f, 0xa6, 0xad, 0xb4, 0xbb, 0xc2, 0xc9,
++			0xd0, 0xd7, 0xde, 0xe5, 0xec, 0xf3, 0xfa, 0x01,
++			0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2b, 0x32, 0x39,
++			0x40, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71,
++			0x78, 0x7f, 0x86, 0x8d, 0x94, 0x9b, 0xa2, 0xa9,
++			0xb0, 0xb7, 0xbe, 0xc5, 0xcc, 0xd3, 0xda, 0xe1,
++			0xe8, 0xef, 0xf6, 0xfd, 0x04, 0x0b, 0x12, 0x19,
++			0x20, 0x27, 0x2e, 0x35, 0x3c, 0x43, 0x4a, 0x51,
++			0x58, 0x5f, 0x66, 0x6d, 0x74, 0x7b, 0x82, 0x89,
++			0x90, 0x97, 0x9e, 0xa5, 0xac, 0xb3, 0xba, 0xc1,
++			0xc8, 0xcf, 0xd6, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9,
++			0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
++			0x48, 0x51, 0x5a, 0x63, 0x6c, 0x75, 0x7e, 0x87,
++			0x90, 0x99, 0xa2, 0xab, 0xb4, 0xbd, 0xc6, 0xcf,
++			0xd8, 0xe1, 0xea, 0xf3, 0xfc, 0x05, 0x0e, 0x17,
++			0x20, 0x29, 0x32, 0x3b, 0x44, 0x4d, 0x56, 0x5f,
++			0x68, 0x71, 0x7a, 0x83, 0x8c, 0x95, 0x9e, 0xa7,
++			0xb0, 0xb9, 0xc2, 0xcb, 0xd4, 0xdd, 0xe6, 0xef,
++			0xf8, 0x01, 0x0a, 0x13, 0x1c, 0x25, 0x2e, 0x37,
++			0x40, 0x49, 0x52, 0x5b, 0x64, 0x6d, 0x76, 0x7f,
++			0x88, 0x91, 0x9a, 0xa3, 0xac, 0xb5, 0xbe, 0xc7,
++			0xd0, 0xd9, 0xe2, 0xeb, 0xf4, 0xfd, 0x06, 0x0f,
++			0x18, 0x21, 0x2a, 0x33, 0x3c, 0x45, 0x4e, 0x57,
++			0x60, 0x69, 0x72, 0x7b, 0x84, 0x8d, 0x96, 0x9f,
++			0xa8, 0xb1, 0xba, 0xc3, 0xcc, 0xd5, 0xde, 0xe7,
++			0xf0, 0xf9, 0x02, 0x0b, 0x14, 0x1d, 0x26, 0x2f,
++			0x38, 0x41, 0x4a, 0x53, 0x5c, 0x65, 0x6e, 0x77,
++			0x80, 0x89, 0x92, 0x9b, 0xa4, 0xad, 0xb6, 0xbf,
++			0xc8, 0xd1, 0xda, 0xe3, 0xec, 0xf5, 0xfe, 0x07,
++			0x10, 0x19, 0x22, 0x2b, 0x34, 0x3d, 0x46, 0x4f,
++			0x58, 0x61, 0x6a, 0x73, 0x7c, 0x85, 0x8e, 0x97,
++			0xa0, 0xa9, 0xb2, 0xbb, 0xc4, 0xcd, 0xd6, 0xdf,
++			0xe8, 0xf1, 0xfa, 0x03, 0x0c, 0x15, 0x1e, 0x27,
++			0x30, 0x39, 0x42, 0x4b, 0x54, 0x5d, 0x66, 0x6f,
++			0x78, 0x81, 0x8a, 0x93, 0x9c, 0xa5, 0xae, 0xb7,
++			0xc0, 0xc9, 0xd2, 0xdb, 0xe4, 0xed, 0xf6, 0xff,
++			0x08, 0x11, 0x1a, 0x23, 0x2c, 0x35, 0x3e, 0x47,
++			0x50, 0x59, 0x62, 0x6b, 0x74, 0x7d, 0x86, 0x8f,
++			0x98, 0xa1, 0xaa, 0xb3, 0xbc, 0xc5, 0xce, 0xd7,
++			0xe0, 0xe9, 0xf2, 0xfb, 0x04, 0x0d, 0x16, 0x1f,
++			0x28, 0x31, 0x3a, 0x43, 0x4c, 0x55, 0x5e, 0x67,
++			0x70, 0x79, 0x82, 0x8b, 0x94, 0x9d, 0xa6, 0xaf,
++			0xb8, 0xc1, 0xca, 0xd3, 0xdc, 0xe5, 0xee, 0xf7,
++			0x00, 0x0b, 0x16, 0x21, 0x2c, 0x37, 0x42, 0x4d,
++			0x58, 0x63, 0x6e, 0x79, 0x84, 0x8f, 0x9a, 0xa5,
++			0xb0, 0xbb, 0xc6, 0xd1, 0xdc, 0xe7, 0xf2, 0xfd,
++			0x08, 0x13, 0x1e, 0x29, 0x34, 0x3f, 0x4a, 0x55,
++			0x60, 0x6b, 0x76, 0x81, 0x8c, 0x97, 0xa2, 0xad,
++			0xb8, 0xc3, 0xce, 0xd9, 0xe4, 0xef, 0xfa, 0x05,
++			0x10, 0x1b, 0x26, 0x31, 0x3c, 0x47, 0x52, 0x5d,
++			0x68, 0x73, 0x7e, 0x89, 0x94, 0x9f, 0xaa, 0xb5,
++			0xc0, 0xcb, 0xd6, 0xe1, 0xec, 0xf7, 0x02, 0x0d,
++			0x18, 0x23, 0x2e, 0x39, 0x44, 0x4f, 0x5a, 0x65,
++			0x70, 0x7b, 0x86, 0x91, 0x9c, 0xa7, 0xb2, 0xbd,
++			0xc8, 0xd3, 0xde, 0xe9, 0xf4, 0xff, 0x0a, 0x15,
++			0x20, 0x2b, 0x36, 0x41, 0x4c, 0x57, 0x62, 0x6d,
++			0x78, 0x83, 0x8e, 0x99, 0xa4, 0xaf, 0xba, 0xc5,
++			0xd0, 0xdb, 0xe6, 0xf1, 0xfc, 0x07, 0x12, 0x1d,
++			0x28, 0x33, 0x3e, 0x49, 0x54, 0x5f, 0x6a, 0x75,
++			0x80, 0x8b, 0x96, 0xa1, 0xac, 0xb7, 0xc2, 0xcd,
++			0xd8, 0xe3, 0xee, 0xf9, 0x04, 0x0f, 0x1a, 0x25,
++			0x30, 0x3b, 0x46, 0x51, 0x5c, 0x67, 0x72, 0x7d,
++			0x88, 0x93, 0x9e, 0xa9, 0xb4, 0xbf, 0xca, 0xd5,
++			0xe0, 0xeb, 0xf6, 0x01, 0x0c, 0x17, 0x22, 0x2d,
++			0x38, 0x43, 0x4e, 0x59, 0x64, 0x6f, 0x7a, 0x85,
++			0x90, 0x9b, 0xa6, 0xb1, 0xbc, 0xc7, 0xd2, 0xdd,
++			0xe8, 0xf3, 0xfe, 0x09, 0x14, 0x1f, 0x2a, 0x35,
++			0x40, 0x4b, 0x56, 0x61, 0x6c, 0x77, 0x82, 0x8d,
++			0x98, 0xa3, 0xae, 0xb9, 0xc4, 0xcf, 0xda, 0xe5,
++			0xf0, 0xfb, 0x06, 0x11, 0x1c, 0x27, 0x32, 0x3d,
++			0x48, 0x53, 0x5e, 0x69, 0x74, 0x7f, 0x8a, 0x95,
++			0xa0, 0xab, 0xb6, 0xc1, 0xcc, 0xd7, 0xe2, 0xed,
++			0xf8, 0x03, 0x0e, 0x19, 0x24, 0x2f, 0x3a, 0x45,
++			0x50, 0x5b, 0x66, 0x71, 0x7c, 0x87, 0x92, 0x9d,
++			0xa8, 0xb3, 0xbe, 0xc9, 0xd4, 0xdf, 0xea, 0xf5,
++			0x00, 0x0d, 0x1a, 0x27, 0x34, 0x41, 0x4e, 0x5b,
++			0x68, 0x75, 0x82, 0x8f, 0x9c, 0xa9, 0xb6, 0xc3,
++			0xd0, 0xdd, 0xea, 0xf7, 0x04, 0x11, 0x1e, 0x2b,
++			0x38, 0x45, 0x52, 0x5f, 0x6c, 0x79, 0x86, 0x93,
++			0xa0, 0xad, 0xba, 0xc7, 0xd4, 0xe1, 0xee, 0xfb,
++			0x08, 0x15, 0x22, 0x2f, 0x3c, 0x49, 0x56, 0x63,
++			0x70, 0x7d, 0x8a, 0x97, 0xa4, 0xb1, 0xbe, 0xcb,
++			0xd8, 0xe5, 0xf2, 0xff, 0x0c, 0x19, 0x26, 0x33,
++			0x40, 0x4d, 0x5a, 0x67, 0x74, 0x81, 0x8e, 0x9b,
++			0xa8, 0xb5, 0xc2, 0xcf, 0xdc, 0xe9, 0xf6, 0x03,
++			0x10, 0x1d, 0x2a, 0x37, 0x44, 0x51, 0x5e, 0x6b,
++			0x78, 0x85, 0x92, 0x9f, 0xac, 0xb9, 0xc6, 0xd3,
++			0xe0, 0xed, 0xfa, 0x07, 0x14, 0x21, 0x2e, 0x3b,
++			0x48, 0x55, 0x62, 0x6f, 0x7c, 0x89, 0x96, 0xa3,
++			0xb0, 0xbd, 0xca, 0xd7, 0xe4, 0xf1, 0xfe, 0x0b,
++			0x18, 0x25, 0x32, 0x3f, 0x4c, 0x59, 0x66, 0x73,
++			0x80, 0x8d, 0x9a, 0xa7, 0xb4, 0xc1, 0xce, 0xdb,
++			0xe8, 0xf5, 0x02, 0x0f, 0x1c, 0x29, 0x36, 0x43,
++			0x50, 0x5d, 0x6a, 0x77, 0x84, 0x91, 0x9e, 0xab,
++			0xb8, 0xc5, 0xd2, 0xdf, 0xec, 0xf9, 0x06, 0x13,
++			0x20, 0x2d, 0x3a, 0x47, 0x54, 0x61, 0x6e, 0x7b,
++			0x88, 0x95, 0xa2, 0xaf, 0xbc, 0xc9, 0xd6, 0xe3,
++			0xf0, 0xfd, 0x0a, 0x17, 0x24, 0x31, 0x3e, 0x4b,
++			0x58, 0x65, 0x72, 0x7f, 0x8c, 0x99, 0xa6, 0xb3,
++			0xc0, 0xcd, 0xda, 0xe7, 0xf4, 0x01, 0x0e, 0x1b,
++			0x28, 0x35, 0x42, 0x4f, 0x5c, 0x69, 0x76, 0x83,
++			0x90, 0x9d, 0xaa, 0xb7, 0xc4, 0xd1, 0xde, 0xeb,
++			0xf8, 0x05, 0x12, 0x1f, 0x2c, 0x39, 0x46, 0x53,
++			0x60, 0x6d, 0x7a, 0x87, 0x94, 0xa1, 0xae, 0xbb,
++			0xc8, 0xd5, 0xe2, 0xef, 0xfc, 0x09, 0x16, 0x23,
++			0x30, 0x3d, 0x4a, 0x57, 0x64, 0x71, 0x7e, 0x8b,
++			0x98, 0xa5, 0xb2, 0xbf, 0xcc, 0xd9, 0xe6, 0xf3,
++			0x00, 0x0f, 0x1e, 0x2d, 0x3c, 0x4b, 0x5a, 0x69,
++			0x78, 0x87, 0x96, 0xa5, 0xb4, 0xc3, 0xd2, 0xe1,
++			0xf0, 0xff, 0x0e, 0x1d, 0x2c, 0x3b, 0x4a, 0x59,
++			0x68, 0x77, 0x86, 0x95, 0xa4, 0xb3, 0xc2, 0xd1,
++			0xe0, 0xef, 0xfe, 0x0d, 0x1c, 0x2b, 0x3a, 0x49,
++			0x58, 0x67, 0x76, 0x85, 0x94, 0xa3, 0xb2, 0xc1,
++			0xd0, 0xdf, 0xee, 0xfd, 0x0c, 0x1b, 0x2a, 0x39,
++			0x48, 0x57, 0x66, 0x75, 0x84, 0x93, 0xa2, 0xb1,
++			0xc0, 0xcf, 0xde, 0xed, 0xfc, 0x0b, 0x1a, 0x29,
++			0x38, 0x47, 0x56, 0x65, 0x74, 0x83, 0x92, 0xa1,
++			0xb0, 0xbf, 0xce, 0xdd, 0xec, 0xfb, 0x0a, 0x19,
++			0x28, 0x37, 0x46, 0x55, 0x64, 0x73, 0x82, 0x91,
++			0xa0, 0xaf, 0xbe, 0xcd, 0xdc, 0xeb, 0xfa, 0x09,
++			0x18, 0x27, 0x36, 0x45, 0x54, 0x63, 0x72, 0x81,
++			0x90, 0x9f, 0xae, 0xbd, 0xcc, 0xdb, 0xea, 0xf9,
++			0x08, 0x17, 0x26, 0x35, 0x44, 0x53, 0x62, 0x71,
++			0x80, 0x8f, 0x9e, 0xad, 0xbc, 0xcb, 0xda, 0xe9,
++			0xf8, 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61,
++			0x70, 0x7f, 0x8e, 0x9d, 0xac, 0xbb, 0xca, 0xd9,
++			0xe8, 0xf7, 0x06, 0x15, 0x24, 0x33, 0x42, 0x51,
++			0x60, 0x6f, 0x7e, 0x8d, 0x9c, 0xab, 0xba, 0xc9,
++			0xd8, 0xe7, 0xf6, 0x05, 0x14, 0x23, 0x32, 0x41,
++			0x50, 0x5f, 0x6e, 0x7d, 0x8c, 0x9b, 0xaa, 0xb9,
++			0xc8, 0xd7, 0xe6, 0xf5, 0x04, 0x13, 0x22, 0x31,
++			0x40, 0x4f, 0x5e, 0x6d, 0x7c, 0x8b, 0x9a, 0xa9,
++			0xb8, 0xc7, 0xd6, 0xe5, 0xf4, 0x03, 0x12, 0x21,
++			0x30, 0x3f, 0x4e, 0x5d, 0x6c, 0x7b, 0x8a, 0x99,
++			0xa8, 0xb7, 0xc6, 0xd5, 0xe4, 0xf3, 0x02, 0x11,
++			0x20, 0x2f, 0x3e, 0x4d, 0x5c, 0x6b, 0x7a, 0x89,
++			0x98, 0xa7, 0xb6, 0xc5, 0xd4, 0xe3, 0xf2, 0x01,
++			0x10, 0x1f, 0x2e, 0x3d, 0x4c, 0x5b, 0x6a, 0x79,
++			0x88, 0x97, 0xa6, 0xb5, 0xc4, 0xd3, 0xe2, 0xf1,
++			0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
++			0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff,
++			0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87,
++			0x98, 0xa9, 0xba, 0xcb, 0xdc, 0xed, 0xfe, 0x0f,
++			0x20, 0x31, 0x42, 0x53, 0x64, 0x75, 0x86, 0x97,
++			0xa8, 0xb9, 0xca, 0xdb, 0xec, 0xfd, 0x0e, 0x1f,
++			0x30, 0x41, 0x52, 0x63, 0x74, 0x85, 0x96, 0xa7,
++			0xb8, 0xc9, 0xda, 0xeb, 0xfc, 0x0d, 0x1e, 0x2f,
++			0x40, 0x51, 0x62, 0x73, 0x84, 0x95, 0xa6, 0xb7,
++			0xc8, 0xd9, 0xea, 0xfb, 0x0c, 0x1d, 0x2e, 0x3f,
++			0x50, 0x61, 0x72, 0x83, 0x94, 0xa5, 0xb6, 0xc7,
++			0xd8, 0xe9, 0xfa, 0x0b, 0x1c, 0x2d, 0x3e, 0x4f,
++			0x60, 0x71, 0x82, 0x93, 0xa4, 0xb5, 0xc6, 0xd7,
++			0xe8, 0xf9, 0x0a, 0x1b, 0x2c, 0x3d, 0x4e, 0x5f,
++			0x70, 0x81, 0x92, 0xa3, 0xb4, 0xc5, 0xd6, 0xe7,
++			0xf8, 0x09, 0x1a, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f,
++			0x80, 0x91, 0xa2, 0xb3, 0xc4, 0xd5, 0xe6, 0xf7,
++			0x08, 0x19, 0x2a, 0x3b, 0x4c, 0x5d, 0x6e, 0x7f,
++			0x90, 0xa1, 0xb2, 0xc3, 0xd4, 0xe5, 0xf6, 0x07,
++			0x18, 0x29, 0x3a, 0x4b, 0x5c, 0x6d, 0x7e, 0x8f,
++			0xa0, 0xb1, 0xc2, 0xd3, 0xe4, 0xf5, 0x06, 0x17,
++			0x28, 0x39, 0x4a, 0x5b, 0x6c, 0x7d, 0x8e, 0x9f,
++			0xb0, 0xc1, 0xd2, 0xe3, 0xf4, 0x05, 0x16, 0x27,
++			0x38, 0x49, 0x5a, 0x6b, 0x7c, 0x8d, 0x9e, 0xaf,
++			0xc0, 0xd1, 0xe2, 0xf3, 0x04, 0x15, 0x26, 0x37,
++			0x48, 0x59, 0x6a, 0x7b, 0x8c, 0x9d, 0xae, 0xbf,
++			0xd0, 0xe1, 0xf2, 0x03, 0x14, 0x25, 0x36, 0x47,
++			0x58, 0x69, 0x7a, 0x8b, 0x9c, 0xad, 0xbe, 0xcf,
++			0xe0, 0xf1, 0x02, 0x13, 0x24, 0x35, 0x46, 0x57,
++			0x68, 0x79, 0x8a, 0x9b, 0xac, 0xbd, 0xce, 0xdf,
++			0xf0, 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67,
++			0x78, 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef,
++			0x00, 0x13, 0x26, 0x39, 0x4c, 0x5f, 0x72, 0x85,
++			0x98, 0xab, 0xbe, 0xd1, 0xe4, 0xf7, 0x0a, 0x1d,
++			0x30, 0x43, 0x56, 0x69, 0x7c, 0x8f, 0xa2, 0xb5,
++			0xc8, 0xdb, 0xee, 0x01, 0x14, 0x27, 0x3a, 0x4d,
++			0x60, 0x73, 0x86, 0x99, 0xac, 0xbf, 0xd2, 0xe5,
++			0xf8, 0x0b, 0x1e, 0x31, 0x44, 0x57, 0x6a, 0x7d,
++			0x90, 0xa3, 0xb6, 0xc9, 0xdc, 0xef, 0x02, 0x15,
++			0x28, 0x3b, 0x4e, 0x61, 0x74, 0x87, 0x9a, 0xad,
++			0xc0, 0xd3, 0xe6, 0xf9, 0x0c, 0x1f, 0x32, 0x45,
++			0x58, 0x6b, 0x7e, 0x91, 0xa4, 0xb7, 0xca, 0xdd,
++			0xf0, 0x03, 0x16, 0x29, 0x3c, 0x4f, 0x62, 0x75,
++			0x88, 0x9b, 0xae, 0xc1, 0xd4, 0xe7, 0xfa, 0x0d,
++			0x20, 0x33, 0x46, 0x59, 0x6c, 0x7f, 0x92, 0xa5,
++			0xb8, 0xcb, 0xde, 0xf1, 0x04, 0x17, 0x2a, 0x3d,
++			0x50, 0x63, 0x76, 0x89, 0x9c, 0xaf, 0xc2, 0xd5,
++			0xe8, 0xfb, 0x0e, 0x21, 0x34, 0x47, 0x5a, 0x6d,
++			0x80, 0x93, 0xa6, 0xb9, 0xcc, 0xdf, 0xf2, 0x05,
++			0x18, 0x2b, 0x3e, 0x51, 0x64, 0x77, 0x8a, 0x9d,
++			0xb0, 0xc3, 0xd6, 0xe9, 0xfc, 0x0f, 0x22, 0x35,
++			0x48, 0x5b, 0x6e, 0x81, 0x94, 0xa7, 0xba, 0xcd,
++			0xe0, 0xf3, 0x06, 0x19, 0x2c, 0x3f, 0x52, 0x65,
++			0x78, 0x8b, 0x9e, 0xb1, 0xc4, 0xd7, 0xea, 0xfd,
++			0x10, 0x23, 0x36, 0x49, 0x5c, 0x6f, 0x82, 0x95,
++			0xa8, 0xbb, 0xce, 0xe1, 0xf4, 0x07, 0x1a, 0x2d,
++			0x40, 0x53, 0x66, 0x79, 0x8c, 0x9f, 0xb2, 0xc5,
++			0xd8, 0xeb, 0xfe, 0x11, 0x24, 0x37, 0x4a, 0x5d,
++			0x70, 0x83, 0x96, 0xa9, 0xbc, 0xcf, 0xe2, 0xf5,
++			0x08, 0x1b, 0x2e, 0x41, 0x54, 0x67, 0x7a, 0x8d,
++			0xa0, 0xb3, 0xc6, 0xd9, 0xec, 0xff, 0x12, 0x25,
++			0x38, 0x4b, 0x5e, 0x71, 0x84, 0x97, 0xaa, 0xbd,
++			0xd0, 0xe3, 0xf6, 0x09, 0x1c, 0x2f, 0x42, 0x55,
++			0x68, 0x7b, 0x8e, 0xa1, 0xb4, 0xc7, 0xda, 0xed,
++			0x00, 0x15, 0x2a, 0x3f, 0x54, 0x69, 0x7e, 0x93,
++			0xa8, 0xbd, 0xd2, 0xe7, 0xfc, 0x11, 0x26, 0x3b,
++			0x50, 0x65, 0x7a, 0x8f, 0xa4, 0xb9, 0xce, 0xe3,
++			0xf8, 0x0d, 0x22, 0x37, 0x4c, 0x61, 0x76, 0x8b,
++			0xa0, 0xb5, 0xca, 0xdf, 0xf4, 0x09, 0x1e, 0x33,
++			0x48, 0x5d, 0x72, 0x87, 0x9c, 0xb1, 0xc6, 0xdb,
++			0xf0, 0x05, 0x1a, 0x2f, 0x44, 0x59, 0x6e, 0x83,
++			0x98, 0xad, 0xc2, 0xd7, 0xec, 0x01, 0x16, 0x2b,
++			0x40, 0x55, 0x6a, 0x7f, 0x94, 0xa9, 0xbe, 0xd3,
++			0xe8, 0xfd, 0x12, 0x27, 0x3c, 0x51, 0x66, 0x7b,
++			0x90, 0xa5, 0xba, 0xcf, 0xe4, 0xf9, 0x0e, 0x23,
++			0x38, 0x4d, 0x62, 0x77, 0x8c, 0xa1, 0xb6, 0xcb,
++			0xe0, 0xf5, 0x0a, 0x1f, 0x34, 0x49, 0x5e, 0x73,
++			0x88, 0x9d, 0xb2, 0xc7, 0xdc, 0xf1, 0x06, 0x1b,
++			0x30, 0x45, 0x5a, 0x6f, 0x84, 0x99, 0xae, 0xc3,
++			0xd8, 0xed, 0x02, 0x17, 0x2c, 0x41, 0x56, 0x6b,
++			0x80, 0x95, 0xaa, 0xbf, 0xd4, 0xe9, 0xfe, 0x13,
++			0x28, 0x3d, 0x52, 0x67, 0x7c, 0x91, 0xa6, 0xbb,
++			0xd0, 0xe5, 0xfa, 0x0f, 0x24, 0x39, 0x4e, 0x63,
++			0x78, 0x8d, 0xa2, 0xb7, 0xcc, 0xe1, 0xf6, 0x0b,
++			0x20, 0x35, 0x4a, 0x5f, 0x74, 0x89, 0x9e, 0xb3,
++			0xc8, 0xdd, 0xf2, 0x07, 0x1c, 0x31, 0x46, 0x5b,
++			0x70, 0x85, 0x9a, 0xaf, 0xc4, 0xd9, 0xee, 0x03,
++			0x18, 0x2d, 0x42, 0x57, 0x6c, 0x81, 0x96, 0xab,
++			0xc0, 0xd5, 0xea, 0xff, 0x14, 0x29, 0x3e, 0x53,
++			0x68, 0x7d, 0x92, 0xa7, 0xbc, 0xd1, 0xe6, 0xfb,
++			0x10, 0x25, 0x3a, 0x4f, 0x64, 0x79, 0x8e, 0xa3,
++			0xb8, 0xcd, 0xe2, 0xf7, 0x0c, 0x21, 0x36, 0x4b,
++			0x60, 0x75, 0x8a, 0x9f, 0xb4, 0xc9, 0xde, 0xf3,
++			0x08, 0x1d, 0x32, 0x47, 0x5c, 0x71, 0x86, 0x9b,
++			0xb0, 0xc5, 0xda, 0xef, 0x04, 0x19, 0x2e, 0x43,
++			0x58, 0x6d, 0x82, 0x97, 0xac, 0xc1, 0xd6, 0xeb,
++			0x00, 0x17, 0x2e, 0x45, 0x5c, 0x73, 0x8a, 0xa1,
++			0xb8, 0xcf, 0xe6, 0xfd, 0x14, 0x2b, 0x42, 0x59,
++			0x70, 0x87, 0x9e, 0xb5, 0xcc, 0xe3, 0xfa, 0x11,
++			0x28, 0x3f, 0x56, 0x6d, 0x84, 0x9b, 0xb2, 0xc9,
++			0xe0, 0xf7, 0x0e, 0x25, 0x3c, 0x53, 0x6a, 0x81,
++			0x98, 0xaf, 0xc6, 0xdd, 0xf4, 0x0b, 0x22, 0x39,
++			0x50, 0x67, 0x7e, 0x95, 0xac, 0xc3, 0xda, 0xf1,
++			0x08, 0x1f, 0x36, 0x4d, 0x64, 0x7b, 0x92, 0xa9,
++			0xc0, 0xd7, 0xee, 0x05, 0x1c, 0x33, 0x4a, 0x61,
++			0x78, 0x8f, 0xa6, 0xbd, 0xd4, 0xeb, 0x02, 0x19,
++			0x30, 0x47, 0x5e, 0x75, 0x8c, 0xa3, 0xba, 0xd1,
++			0xe8, 0xff, 0x16, 0x2d, 0x44, 0x5b, 0x72, 0x89,
++			0xa0, 0xb7, 0xce, 0xe5, 0xfc, 0x13, 0x2a, 0x41,
++			0x58, 0x6f, 0x86, 0x9d, 0xb4, 0xcb, 0xe2, 0xf9,
++			0x10, 0x27, 0x3e, 0x55, 0x6c, 0x83, 0x9a, 0xb1,
++			0xc8, 0xdf, 0xf6, 0x0d, 0x24, 0x3b, 0x52, 0x69,
++			0x80, 0x97, 0xae, 0xc5, 0xdc, 0xf3, 0x0a, 0x21,
++			0x38, 0x4f, 0x66, 0x7d, 0x94, 0xab, 0xc2, 0xd9,
++			0xf0, 0x07, 0x1e, 0x35, 0x4c, 0x63, 0x7a, 0x91,
++			0xa8, 0xbf, 0xd6, 0xed, 0x04, 0x1b, 0x32, 0x49,
++			0x60, 0x77, 0x8e, 0xa5, 0xbc, 0xd3, 0xea, 0x01,
++			0x18, 0x2f, 0x46, 0x5d, 0x74, 0x8b, 0xa2, 0xb9,
++			0xd0, 0xe7, 0xfe, 0x15, 0x2c, 0x43, 0x5a, 0x71,
++			0x88, 0x9f, 0xb6, 0xcd, 0xe4, 0xfb, 0x12, 0x29,
++			0x40, 0x57, 0x6e, 0x85, 0x9c, 0xb3, 0xca, 0xe1,
++			0xf8, 0x0f, 0x26, 0x3d, 0x54, 0x6b, 0x82, 0x99,
++			0xb0, 0xc7, 0xde, 0xf5, 0x0c, 0x23, 0x3a, 0x51,
++			0x68, 0x7f, 0x96, 0xad, 0xc4, 0xdb, 0xf2, 0x09,
++			0x20, 0x37, 0x4e, 0x65, 0x7c, 0x93, 0xaa, 0xc1,
++			0xd8, 0xef, 0x06, 0x1d, 0x34, 0x4b, 0x62, 0x79,
++			0x90, 0xa7, 0xbe, 0xd5, 0xec, 0x03, 0x1a, 0x31,
++			0x48, 0x5f, 0x76, 0x8d, 0xa4, 0xbb, 0xd2, 0xe9,
++			0x00, 0x19, 0x32, 0x4b, 0x64, 0x7d, 0x96, 0xaf,
++			0xc8, 0xe1, 0xfa, 0x13, 0x2c, 0x45, 0x5e, 0x77,
++			0x90, 0xa9, 0xc2, 0xdb, 0xf4, 0x0d, 0x26, 0x3f,
++			0x58, 0x71, 0x8a, 0xa3, 0xbc, 0xd5, 0xee, 0x07,
++			0x20, 0x39, 0x52, 0x6b, 0x84, 0x9d, 0xb6, 0xcf,
++			0xe8, 0x01, 0x1a, 0x33, 0x4c, 0x65, 0x7e, 0x97,
++			0xb0, 0xc9, 0xe2, 0xfb, 0x14, 0x2d, 0x46, 0x5f,
++			0x78, 0x91, 0xaa, 0xc3, 0xdc, 0xf5, 0x0e, 0x27,
++			0x40, 0x59, 0x72, 0x8b, 0xa4, 0xbd, 0xd6, 0xef,
++			0x08, 0x21, 0x3a, 0x53, 0x6c, 0x85, 0x9e, 0xb7,
++			0xd0, 0xe9, 0x02, 0x1b, 0x34, 0x4d, 0x66, 0x7f,
++			0x98, 0xb1, 0xca, 0xe3, 0xfc, 0x15, 0x2e, 0x47,
++			0x60, 0x79, 0x92, 0xab, 0xc4, 0xdd, 0xf6, 0x0f,
++			0x28, 0x41, 0x5a, 0x73, 0x8c, 0xa5, 0xbe, 0xd7,
++			0xf0, 0x09, 0x22, 0x3b, 0x54, 0x6d, 0x86, 0x9f,
++			0xb8, 0xd1, 0xea, 0x03, 0x1c, 0x35, 0x4e, 0x67,
++			0x80, 0x99, 0xb2, 0xcb, 0xe4, 0xfd, 0x16, 0x2f,
++			0x48, 0x61, 0x7a, 0x93, 0xac, 0xc5, 0xde, 0xf7,
++			0x10, 0x29, 0x42, 0x5b, 0x74, 0x8d, 0xa6, 0xbf,
++			0xd8, 0xf1, 0x0a, 0x23, 0x3c, 0x55, 0x6e, 0x87,
++			0xa0, 0xb9, 0xd2, 0xeb, 0x04, 0x1d, 0x36, 0x4f,
++			0x68, 0x81, 0x9a, 0xb3, 0xcc, 0xe5, 0xfe, 0x17,
++			0x30, 0x49, 0x62, 0x7b, 0x94, 0xad, 0xc6, 0xdf,
++			0xf8, 0x11, 0x2a, 0x43, 0x5c, 0x75, 0x8e, 0xa7,
++			0xc0, 0xd9, 0xf2, 0x0b, 0x24, 0x3d, 0x56, 0x6f,
++			0x88, 0xa1, 0xba, 0xd3, 0xec, 0x05, 0x1e, 0x37,
++			0x50, 0x69, 0x82, 0x9b, 0xb4, 0xcd, 0xe6, 0xff,
++			0x18, 0x31, 0x4a, 0x63, 0x7c, 0x95, 0xae, 0xc7,
++			0xe0, 0xf9, 0x12, 0x2b, 0x44, 0x5d, 0x76, 0x8f,
++			0xa8, 0xc1, 0xda, 0xf3, 0x0c, 0x25, 0x3e, 0x57,
++			0x70, 0x89, 0xa2, 0xbb, 0xd4, 0xed, 0x06, 0x1f,
++			0x38, 0x51, 0x6a, 0x83, 0x9c, 0xb5, 0xce, 0xe7,
++			0x00, 0x1b, 0x36, 0x51, 0x6c, 0x87, 0xa2, 0xbd,
++			0xd8, 0xf3, 0x0e, 0x29, 0x44, 0x5f, 0x7a, 0x95,
++			0xb0, 0xcb, 0xe6, 0x01, 0x1c, 0x37, 0x52, 0x6d,
++			0x88, 0xa3, 0xbe, 0xd9, 0xf4, 0x0f, 0x2a, 0x45,
++			0x60, 0x7b, 0x96, 0xb1, 0xcc, 0xe7, 0x02, 0x1d,
++			0x38, 0x53, 0x6e, 0x89, 0xa4, 0xbf, 0xda, 0xf5,
++			0x10, 0x2b, 0x46, 0x61, 0x7c, 0x97, 0xb2, 0xcd,
++			0xe8, 0x03, 0x1e, 0x39, 0x54, 0x6f, 0x8a, 0xa5,
++			0xc0, 0xdb, 0xf6, 0x11, 0x2c, 0x47, 0x62, 0x7d,
++			0x98, 0xb3, 0xce, 0xe9, 0x04, 0x1f, 0x3a, 0x55,
++			0x70, 0x8b, 0xa6, 0xc1, 0xdc, 0xf7, 0x12, 0x2d,
++			0x48, 0x63, 0x7e, 0x99, 0xb4, 0xcf, 0xea, 0x05,
++			0x20, 0x3b, 0x56, 0x71, 0x8c, 0xa7, 0xc2, 0xdd,
++			0xf8, 0x13, 0x2e, 0x49, 0x64, 0x7f, 0x9a, 0xb5,
++			0xd0, 0xeb, 0x06, 0x21, 0x3c, 0x57, 0x72, 0x8d,
++			0xa8, 0xc3, 0xde, 0xf9, 0x14, 0x2f, 0x4a, 0x65,
++			0x80, 0x9b, 0xb6, 0xd1, 0xec, 0x07, 0x22, 0x3d,
++			0x58, 0x73, 0x8e, 0xa9, 0xc4, 0xdf, 0xfa, 0x15,
++			0x30, 0x4b, 0x66, 0x81, 0x9c, 0xb7, 0xd2, 0xed,
++			0x08, 0x23, 0x3e, 0x59, 0x74, 0x8f, 0xaa, 0xc5,
++			0xe0, 0xfb, 0x16, 0x31, 0x4c, 0x67, 0x82, 0x9d,
++			0xb8, 0xd3, 0xee, 0x09, 0x24, 0x3f, 0x5a, 0x75,
++			0x90, 0xab, 0xc6, 0xe1, 0xfc, 0x17, 0x32, 0x4d,
++			0x68, 0x83, 0x9e, 0xb9, 0xd4, 0xef, 0x0a, 0x25,
++			0x40, 0x5b, 0x76, 0x91, 0xac, 0xc7, 0xe2, 0xfd,
++			0x18, 0x33, 0x4e, 0x69, 0x84, 0x9f, 0xba, 0xd5,
++			0xf0, 0x0b, 0x26, 0x41, 0x5c, 0x77, 0x92, 0xad,
++			0xc8, 0xe3, 0xfe, 0x19, 0x34, 0x4f, 0x6a, 0x85,
++			0xa0, 0xbb, 0xd6, 0xf1, 0x0c, 0x27, 0x42, 0x5d,
++			0x78, 0x93, 0xae, 0xc9, 0xe4, 0xff, 0x1a, 0x35,
++			0x50, 0x6b, 0x86, 0xa1, 0xbc, 0xd7, 0xf2, 0x0d,
++			0x28, 0x43, 0x5e, 0x79, 0x94, 0xaf, 0xca, 0xe5,
++			0x00, 0x1d, 0x3a, 0x57, 0x74, 0x91, 0xae, 0xcb,
++			0xe8, 0x05, 0x22, 0x3f, 0x5c, 0x79, 0x96, 0xb3,
++			0xd0, 0xed, 0x0a, 0x27, 0x44, 0x61, 0x7e, 0x9b,
++			0xb8, 0xd5, 0xf2, 0x0f, 0x2c, 0x49, 0x66, 0x83,
++			0xa0, 0xbd, 0xda, 0xf7, 0x14, 0x31, 0x4e, 0x6b,
++			0x88, 0xa5, 0xc2, 0xdf, 0xfc, 0x19, 0x36, 0x53,
++			0x70, 0x8d, 0xaa, 0xc7, 0xe4, 0x01, 0x1e, 0x3b,
++			0x58, 0x75, 0x92, 0xaf, 0xcc, 0xe9, 0x06, 0x23,
++			0x40, 0x5d, 0x7a, 0x97, 0xb4, 0xd1, 0xee, 0x0b,
++			0x28, 0x45, 0x62, 0x7f, 0x9c, 0xb9, 0xd6, 0xf3,
++			0x10, 0x2d, 0x4a, 0x67, 0x84, 0xa1, 0xbe, 0xdb,
++			0xf8, 0x15, 0x32, 0x4f, 0x6c, 0x89, 0xa6, 0xc3,
++			0xe0, 0xfd, 0x1a, 0x37, 0x54, 0x71, 0x8e, 0xab,
++			0xc8, 0xe5, 0x02, 0x1f, 0x3c, 0x59, 0x76, 0x93,
++			0xb0, 0xcd, 0xea, 0x07, 0x24, 0x41, 0x5e, 0x7b,
++			0x98, 0xb5, 0xd2, 0xef, 0x0c, 0x29, 0x46, 0x63,
++			0x80, 0x9d, 0xba, 0xd7, 0xf4, 0x11, 0x2e, 0x4b,
++			0x68, 0x85, 0xa2, 0xbf, 0xdc, 0xf9, 0x16, 0x33,
++			0x50, 0x6d, 0x8a, 0xa7, 0xc4, 0xe1, 0xfe, 0x1b,
++			0x38, 0x55, 0x72, 0x8f, 0xac, 0xc9, 0xe6, 0x03,
++			0x20, 0x3d, 0x5a, 0x77, 0x94, 0xb1, 0xce, 0xeb,
++			0x08, 0x25, 0x42, 0x5f, 0x7c, 0x99, 0xb6, 0xd3,
++			0xf0, 0x0d, 0x2a, 0x47, 0x64, 0x81, 0x9e, 0xbb,
++			0xd8, 0xf5, 0x12, 0x2f, 0x4c, 0x69, 0x86, 0xa3,
++			0xc0, 0xdd, 0xfa, 0x17, 0x34, 0x51, 0x6e, 0x8b,
++			0xa8, 0xc5, 0xe2, 0xff, 0x1c, 0x39, 0x56, 0x73,
++			0x90, 0xad, 0xca, 0xe7, 0x04, 0x21, 0x3e, 0x5b,
++			0x78, 0x95, 0xb2, 0xcf, 0xec, 0x09, 0x26, 0x43,
++			0x60, 0x7d, 0x9a, 0xb7, 0xd4, 0xf1, 0x0e, 0x2b,
++			0x48, 0x65, 0x82, 0x9f, 0xbc, 0xd9, 0xf6, 0x13,
++			0x30, 0x4d, 0x6a, 0x87, 0xa4, 0xc1, 0xde, 0xfb,
++			0x18, 0x35, 0x52, 0x6f, 0x8c, 0xa9, 0xc6, 0xe3,
++			0x00, 0x1f, 0x3e, 0x5d, 0x7c, 0x9b, 0xba, 0xd9,
++			0xf8, 0x17, 0x36, 0x55, 0x74, 0x93, 0xb2, 0xd1,
++			0xf0, 0x0f, 0x2e, 0x4d, 0x6c, 0x8b, 0xaa, 0xc9,
++			0xe8, 0x07, 0x26, 0x45, 0x64, 0x83, 0xa2, 0xc1,
++			0xe0, 0xff, 0x1e, 0x3d, 0x5c, 0x7b, 0x9a, 0xb9,
++			0xd8, 0xf7, 0x16, 0x35, 0x54, 0x73, 0x92, 0xb1,
++			0xd0, 0xef, 0x0e, 0x2d, 0x4c, 0x6b, 0x8a, 0xa9,
++			0xc8, 0xe7, 0x06, 0x25, 0x44, 0x63, 0x82, 0xa1,
++			0xc0, 0xdf, 0xfe, 0x1d, 0x3c, 0x5b, 0x7a, 0x99,
++			0xb8, 0xd7, 0xf6, 0x15, 0x34, 0x53, 0x72, 0x91,
++			0xb0, 0xcf, 0xee, 0x0d, 0x2c, 0x4b, 0x6a, 0x89,
++			0xa8, 0xc7, 0xe6, 0x05, 0x24, 0x43, 0x62, 0x81,
++			0xa0, 0xbf, 0xde, 0xfd, 0x1c, 0x3b, 0x5a, 0x79,
++			0x98, 0xb7, 0xd6, 0xf5, 0x14, 0x33, 0x52, 0x71,
++			0x90, 0xaf, 0xce, 0xed, 0x0c, 0x2b, 0x4a, 0x69,
++			0x88, 0xa7, 0xc6, 0xe5, 0x04, 0x23, 0x42, 0x61,
++			0x80, 0x9f, 0xbe, 0xdd, 0xfc, 0x1b, 0x3a, 0x59,
++			0x78, 0x97, 0xb6, 0xd5, 0xf4, 0x13, 0x32, 0x51,
++			0x70, 0x8f, 0xae, 0xcd, 0xec, 0x0b, 0x2a, 0x49,
++			0x68, 0x87, 0xa6, 0xc5, 0xe4, 0x03, 0x22, 0x41,
++			0x60, 0x7f, 0x9e, 0xbd, 0xdc, 0xfb, 0x1a, 0x39,
++			0x58, 0x77, 0x96, 0xb5, 0xd4, 0xf3, 0x12, 0x31,
++			0x50, 0x6f, 0x8e, 0xad, 0xcc, 0xeb, 0x0a, 0x29,
++			0x48, 0x67, 0x86, 0xa5, 0xc4, 0xe3, 0x02, 0x21,
++			0x40, 0x5f, 0x7e, 0x9d, 0xbc, 0xdb, 0xfa, 0x19,
++			0x38, 0x57, 0x76, 0x95, 0xb4, 0xd3, 0xf2, 0x11,
++			0x30, 0x4f, 0x6e, 0x8d, 0xac, 0xcb, 0xea, 0x09,
++			0x28, 0x47, 0x66, 0x85, 0xa4, 0xc3, 0xe2, 0x01,
++			0x20, 0x3f, 0x5e, 0x7d, 0x9c, 0xbb, 0xda, 0xf9,
++			0x18, 0x37, 0x56, 0x75, 0x94, 0xb3, 0xd2, 0xf1,
++			0x10, 0x2f, 0x4e, 0x6d, 0x8c, 0xab, 0xca, 0xe9,
++			0x08, 0x27, 0x46, 0x65, 0x84, 0xa3, 0xc2, 0xe1,
++			0x00, 0x21, 0x42, 0x63,
++		},
++		.ilen = 4100,
++		.result = {
++			0xf0, 0x5c, 0x74, 0xad, 0x4e, 0xbc, 0x99, 0xe2,
++			0xae, 0xff, 0x91, 0x3a, 0x44, 0xcf, 0x38, 0x32,
++			0x1e, 0xad, 0xa7, 0xcd, 0xa1, 0x39, 0x95, 0xaa,
++			0x10, 0xb1, 0xb3, 0x2e, 0x04, 0x31, 0x8f, 0x86,
++			0xf2, 0x62, 0x74, 0x70, 0x0c, 0xa4, 0x46, 0x08,
++			0xa8, 0xb7, 0x99, 0xa8, 0xe9, 0xd2, 0x73, 0x79,
++			0x7e, 0x6e, 0xd4, 0x8f, 0x1e, 0xc7, 0x8e, 0x31,
++			0x0b, 0xfa, 0x4b, 0xce, 0xfd, 0xf3, 0x57, 0x71,
++			0xe9, 0x46, 0x03, 0xa5, 0x3d, 0x34, 0x00, 0xe2,
++			0x18, 0xff, 0x75, 0x6d, 0x06, 0x2d, 0x00, 0xab,
++			0xb9, 0x3e, 0x6c, 0x59, 0xc5, 0x84, 0x06, 0xb5,
++			0x8b, 0xd0, 0x89, 0x9c, 0x4a, 0x79, 0x16, 0xc6,
++			0x3d, 0x74, 0x54, 0xfa, 0x44, 0xcd, 0x23, 0x26,
++			0x5c, 0xcf, 0x7e, 0x28, 0x92, 0x32, 0xbf, 0xdf,
++			0xa7, 0x20, 0x3c, 0x74, 0x58, 0x2a, 0x9a, 0xde,
++			0x61, 0x00, 0x1c, 0x4f, 0xff, 0x59, 0xc4, 0x22,
++			0xac, 0x3c, 0xd0, 0xe8, 0x6c, 0xf9, 0x97, 0x1b,
++			0x58, 0x9b, 0xad, 0x71, 0xe8, 0xa9, 0xb5, 0x0d,
++			0xee, 0x2f, 0x04, 0x1f, 0x7f, 0xbc, 0x99, 0xee,
++			0x84, 0xff, 0x42, 0x60, 0xdc, 0x3a, 0x18, 0xa5,
++			0x81, 0xf9, 0xef, 0xdc, 0x7a, 0x0f, 0x65, 0x41,
++			0x2f, 0xa3, 0xd3, 0xf9, 0xc2, 0xcb, 0xc0, 0x4d,
++			0x8f, 0xd3, 0x76, 0x96, 0xad, 0x49, 0x6d, 0x38,
++			0x3d, 0x39, 0x0b, 0x6c, 0x80, 0xb7, 0x54, 0x69,
++			0xf0, 0x2c, 0x90, 0x02, 0x29, 0x0d, 0x1c, 0x12,
++			0xad, 0x55, 0xc3, 0x8b, 0x68, 0xd9, 0xcc, 0xb3,
++			0xb2, 0x64, 0x33, 0x90, 0x5e, 0xca, 0x4b, 0xe2,
++			0xfb, 0x75, 0xdc, 0x63, 0xf7, 0x9f, 0x82, 0x74,
++			0xf0, 0xc9, 0xaa, 0x7f, 0xe9, 0x2a, 0x9b, 0x33,
++			0xbc, 0x88, 0x00, 0x7f, 0xca, 0xb2, 0x1f, 0x14,
++			0xdb, 0xc5, 0x8e, 0x7b, 0x11, 0x3c, 0x3e, 0x08,
++			0xf3, 0x83, 0xe8, 0xe0, 0x94, 0x86, 0x2e, 0x92,
++			0x78, 0x6b, 0x01, 0xc9, 0xc7, 0x83, 0xba, 0x21,
++			0x6a, 0x25, 0x15, 0x33, 0x4e, 0x45, 0x08, 0xec,
++			0x35, 0xdb, 0xe0, 0x6e, 0x31, 0x51, 0x79, 0xa9,
++			0x42, 0x44, 0x65, 0xc1, 0xa0, 0xf1, 0xf9, 0x2a,
++			0x70, 0xd5, 0xb6, 0xc6, 0xc1, 0x8c, 0x39, 0xfc,
++			0x25, 0xa6, 0x55, 0xd9, 0xdd, 0x2d, 0x4c, 0xec,
++			0x49, 0xc6, 0xeb, 0x0e, 0xa8, 0x25, 0x2a, 0x16,
++			0x1b, 0x66, 0x84, 0xda, 0xe2, 0x92, 0xe5, 0xc0,
++			0xc8, 0x53, 0x07, 0xaf, 0x80, 0x84, 0xec, 0xfd,
++			0xcd, 0xd1, 0x6e, 0xcd, 0x6f, 0x6a, 0xf5, 0x36,
++			0xc5, 0x15, 0xe5, 0x25, 0x7d, 0x77, 0xd1, 0x1a,
++			0x93, 0x36, 0xa9, 0xcf, 0x7c, 0xa4, 0x54, 0x4a,
++			0x06, 0x51, 0x48, 0x4e, 0xf6, 0x59, 0x87, 0xd2,
++			0x04, 0x02, 0xef, 0xd3, 0x44, 0xde, 0x76, 0x31,
++			0xb3, 0x34, 0x17, 0x1b, 0x9d, 0x66, 0x11, 0x9f,
++			0x1e, 0xcc, 0x17, 0xe9, 0xc7, 0x3c, 0x1b, 0xe7,
++			0xcb, 0x50, 0x08, 0xfc, 0xdc, 0x2b, 0x24, 0xdb,
++			0x65, 0x83, 0xd0, 0x3b, 0xe3, 0x30, 0xea, 0x94,
++			0x6c, 0xe7, 0xe8, 0x35, 0x32, 0xc7, 0xdb, 0x64,
++			0xb4, 0x01, 0xab, 0x36, 0x2c, 0x77, 0x13, 0xaf,
++			0xf8, 0x2b, 0x88, 0x3f, 0x54, 0x39, 0xc4, 0x44,
++			0xfe, 0xef, 0x6f, 0x68, 0x34, 0xbe, 0x0f, 0x05,
++			0x16, 0x6d, 0xf6, 0x0a, 0x30, 0xe7, 0xe3, 0xed,
++			0xc4, 0xde, 0x3c, 0x1b, 0x13, 0xd8, 0xdb, 0xfe,
++			0x41, 0x62, 0xe5, 0x28, 0xd4, 0x8d, 0xa3, 0xc7,
++			0x93, 0x97, 0xc6, 0x48, 0x45, 0x1d, 0x9f, 0x83,
++			0xdf, 0x4b, 0x40, 0x3e, 0x42, 0x25, 0x87, 0x80,
++			0x4c, 0x7d, 0xa8, 0xd4, 0x98, 0x23, 0x95, 0x75,
++			0x41, 0x8c, 0xda, 0x41, 0x9b, 0xd4, 0xa7, 0x06,
++			0xb5, 0xf1, 0x71, 0x09, 0x53, 0xbe, 0xca, 0xbf,
++			0x32, 0x03, 0xed, 0xf0, 0x50, 0x1c, 0x56, 0x39,
++			0x5b, 0xa4, 0x75, 0x18, 0xf7, 0x9b, 0x58, 0xef,
++			0x53, 0xfc, 0x2a, 0x38, 0x23, 0x15, 0x75, 0xcd,
++			0x45, 0xe5, 0x5a, 0x82, 0x55, 0xba, 0x21, 0xfa,
++			0xd4, 0xbd, 0xc6, 0x94, 0x7c, 0xc5, 0x80, 0x12,
++			0xf7, 0x4b, 0x32, 0xc4, 0x9a, 0x82, 0xd8, 0x28,
++			0x8f, 0xd9, 0xc2, 0x0f, 0x60, 0x03, 0xbe, 0x5e,
++			0x21, 0xd6, 0x5f, 0x58, 0xbf, 0x5c, 0xb1, 0x32,
++			0x82, 0x8d, 0xa9, 0xe5, 0xf2, 0x66, 0x1a, 0xc0,
++			0xa0, 0xbc, 0x58, 0x2f, 0x71, 0xf5, 0x2f, 0xed,
++			0xd1, 0x26, 0xb9, 0xd8, 0x49, 0x5a, 0x07, 0x19,
++			0x01, 0x7c, 0x59, 0xb0, 0xf8, 0xa4, 0xb7, 0xd3,
++			0x7b, 0x1a, 0x8c, 0x38, 0xf4, 0x50, 0xa4, 0x59,
++			0xb0, 0xcc, 0x41, 0x0b, 0x88, 0x7f, 0xe5, 0x31,
++			0xb3, 0x42, 0xba, 0xa2, 0x7e, 0xd4, 0x32, 0x71,
++			0x45, 0x87, 0x48, 0xa9, 0xc2, 0xf2, 0x89, 0xb3,
++			0xe4, 0xa7, 0x7e, 0x52, 0x15, 0x61, 0xfa, 0xfe,
++			0xc9, 0xdd, 0x81, 0xeb, 0x13, 0xab, 0xab, 0xc3,
++			0x98, 0x59, 0xd8, 0x16, 0x3d, 0x14, 0x7a, 0x1c,
++			0x3c, 0x41, 0x9a, 0x16, 0x16, 0x9b, 0xd2, 0xd2,
++			0x69, 0x3a, 0x29, 0x23, 0xac, 0x86, 0x32, 0xa5,
++			0x48, 0x9c, 0x9e, 0xf3, 0x47, 0x77, 0x81, 0x70,
++			0x24, 0xe8, 0x85, 0xd2, 0xf5, 0xb5, 0xfa, 0xff,
++			0x59, 0x6a, 0xd3, 0x50, 0x59, 0x43, 0x59, 0xde,
++			0xd9, 0xf1, 0x55, 0xa5, 0x0c, 0xc3, 0x1a, 0x1a,
++			0x18, 0x34, 0x0d, 0x1a, 0x63, 0x33, 0xed, 0x10,
++			0xe0, 0x1d, 0x2a, 0x18, 0xd2, 0xc0, 0x54, 0xa8,
++			0xca, 0xb5, 0x9a, 0xd3, 0xdd, 0xca, 0x45, 0x84,
++			0x50, 0xe7, 0x0f, 0xfe, 0xa4, 0x99, 0x5a, 0xbe,
++			0x43, 0x2d, 0x9a, 0xcb, 0x92, 0x3f, 0x5a, 0x1d,
++			0x85, 0xd8, 0xc9, 0xdf, 0x68, 0xc9, 0x12, 0x80,
++			0x56, 0x0c, 0xdc, 0x00, 0xdc, 0x3a, 0x7d, 0x9d,
++			0xa3, 0xa2, 0xe8, 0x4d, 0xbf, 0xf9, 0x70, 0xa0,
++			0xa4, 0x13, 0x4f, 0x6b, 0xaf, 0x0a, 0x89, 0x7f,
++			0xda, 0xf0, 0xbf, 0x9b, 0xc8, 0x1d, 0xe5, 0xf8,
++			0x2e, 0x8b, 0x07, 0xb5, 0x73, 0x1b, 0xcc, 0xa2,
++			0xa6, 0xad, 0x30, 0xbc, 0x78, 0x3c, 0x5b, 0x10,
++			0xfa, 0x5e, 0x62, 0x2d, 0x9e, 0x64, 0xb3, 0x33,
++			0xce, 0xf9, 0x1f, 0x86, 0xe7, 0x8b, 0xa2, 0xb8,
++			0xe8, 0x99, 0x57, 0x8c, 0x11, 0xed, 0x66, 0xd9,
++			0x3c, 0x72, 0xb9, 0xc3, 0xe6, 0x4e, 0x17, 0x3a,
++			0x6a, 0xcb, 0x42, 0x24, 0x06, 0xed, 0x3e, 0x4e,
++			0xa3, 0xe8, 0x6a, 0x94, 0xda, 0x0d, 0x4e, 0xd5,
++			0x14, 0x19, 0xcf, 0xb6, 0x26, 0xd8, 0x2e, 0xcc,
++			0x64, 0x76, 0x38, 0x49, 0x4d, 0xfe, 0x30, 0x6d,
++			0xe4, 0xc8, 0x8c, 0x7b, 0xc4, 0xe0, 0x35, 0xba,
++			0x22, 0x6e, 0x76, 0xe1, 0x1a, 0xf2, 0x53, 0xc3,
++			0x28, 0xa2, 0x82, 0x1f, 0x61, 0x69, 0xad, 0xc1,
++			0x7b, 0x28, 0x4b, 0x1e, 0x6c, 0x85, 0x95, 0x9b,
++			0x51, 0xb5, 0x17, 0x7f, 0x12, 0x69, 0x8c, 0x24,
++			0xd5, 0xc7, 0x5a, 0x5a, 0x11, 0x54, 0xff, 0x5a,
++			0xf7, 0x16, 0xc3, 0x91, 0xa6, 0xf0, 0xdc, 0x0a,
++			0xb6, 0xa7, 0x4a, 0x0d, 0x7a, 0x58, 0xfe, 0xa5,
++			0xf5, 0xcb, 0x8f, 0x7b, 0x0e, 0xea, 0x57, 0xe7,
++			0xbd, 0x79, 0xd6, 0x1c, 0x88, 0x23, 0x6c, 0xf2,
++			0x4d, 0x29, 0x77, 0x53, 0x35, 0x6a, 0x00, 0x8d,
++			0xcd, 0xa3, 0x58, 0xbe, 0x77, 0x99, 0x18, 0xf8,
++			0xe6, 0xe1, 0x8f, 0xe9, 0x37, 0x8f, 0xe3, 0xe2,
++			0x5a, 0x8a, 0x93, 0x25, 0xaf, 0xf3, 0x78, 0x80,
++			0xbe, 0xa6, 0x1b, 0xc6, 0xac, 0x8b, 0x1c, 0x91,
++			0x58, 0xe1, 0x9f, 0x89, 0x35, 0x9d, 0x1d, 0x21,
++			0x29, 0x9f, 0xf4, 0x99, 0x02, 0x27, 0x0f, 0xa8,
++			0x4f, 0x79, 0x94, 0x2b, 0x33, 0x2c, 0xda, 0xa2,
++			0x26, 0x39, 0x83, 0x94, 0xef, 0x27, 0xd8, 0x53,
++			0x8f, 0x66, 0x0d, 0xe4, 0x41, 0x7d, 0x34, 0xcd,
++			0x43, 0x7c, 0x95, 0x0a, 0x53, 0xef, 0x66, 0xda,
++			0x7e, 0x9b, 0xf3, 0x93, 0xaf, 0xd0, 0x73, 0x71,
++			0xba, 0x40, 0x9b, 0x74, 0xf8, 0xd7, 0xd7, 0x41,
++			0x6d, 0xaf, 0x72, 0x9c, 0x8d, 0x21, 0x87, 0x3c,
++			0xfd, 0x0a, 0x90, 0xa9, 0x47, 0x96, 0x9e, 0xd3,
++			0x88, 0xee, 0x73, 0xcf, 0x66, 0x2f, 0x52, 0x56,
++			0x6d, 0xa9, 0x80, 0x4c, 0xe2, 0x6f, 0x62, 0x88,
++			0x3f, 0x0e, 0x54, 0x17, 0x48, 0x80, 0x5d, 0xd3,
++			0xc3, 0xda, 0x25, 0x3d, 0xa1, 0xc8, 0xcb, 0x9f,
++			0x9b, 0x70, 0xb3, 0xa1, 0xeb, 0x04, 0x52, 0xa1,
++			0xf2, 0x22, 0x0f, 0xfc, 0xc8, 0x18, 0xfa, 0xf9,
++			0x85, 0x9c, 0xf1, 0xac, 0xeb, 0x0c, 0x02, 0x46,
++			0x75, 0xd2, 0xf5, 0x2c, 0xe3, 0xd2, 0x59, 0x94,
++			0x12, 0xf3, 0x3c, 0xfc, 0xd7, 0x92, 0xfa, 0x36,
++			0xba, 0x61, 0x34, 0x38, 0x7c, 0xda, 0x48, 0x3e,
++			0x08, 0xc9, 0x39, 0x23, 0x5e, 0x02, 0x2c, 0x1a,
++			0x18, 0x7e, 0xb4, 0xd9, 0xfd, 0x9e, 0x40, 0x02,
++			0xb1, 0x33, 0x37, 0x32, 0xe7, 0xde, 0xd6, 0xd0,
++			0x7c, 0x58, 0x65, 0x4b, 0xf8, 0x34, 0x27, 0x9c,
++			0x44, 0xb4, 0xbd, 0xe9, 0xe9, 0x4c, 0x78, 0x7d,
++			0x4b, 0x9f, 0xce, 0xb1, 0xcd, 0x47, 0xa5, 0x37,
++			0xe5, 0x6d, 0xbd, 0xb9, 0x43, 0x94, 0x0a, 0xd4,
++			0xd6, 0xf9, 0x04, 0x5f, 0xb5, 0x66, 0x6c, 0x1a,
++			0x35, 0x12, 0xe3, 0x36, 0x28, 0x27, 0x36, 0x58,
++			0x01, 0x2b, 0x79, 0xe4, 0xba, 0x6d, 0x10, 0x7d,
++			0x65, 0xdf, 0x84, 0x95, 0xf4, 0xd5, 0xb6, 0x8f,
++			0x2b, 0x9f, 0x96, 0x00, 0x86, 0x60, 0xf0, 0x21,
++			0x76, 0xa8, 0x6a, 0x8c, 0x28, 0x1c, 0xb3, 0x6b,
++			0x97, 0xd7, 0xb6, 0x53, 0x2a, 0xcc, 0xab, 0x40,
++			0x9d, 0x62, 0x79, 0x58, 0x52, 0xe6, 0x65, 0xb7,
++			0xab, 0x55, 0x67, 0x9c, 0x89, 0x7c, 0x03, 0xb0,
++			0x73, 0x59, 0xc5, 0x81, 0xf5, 0x18, 0x17, 0x5c,
++			0x89, 0xf3, 0x78, 0x35, 0x44, 0x62, 0x78, 0x72,
++			0xd0, 0x96, 0xeb, 0x31, 0xe7, 0x87, 0x77, 0x14,
++			0x99, 0x51, 0xf2, 0x59, 0x26, 0x9e, 0xb5, 0xa6,
++			0x45, 0xfe, 0x6e, 0xbd, 0x07, 0x4c, 0x94, 0x5a,
++			0xa5, 0x7d, 0xfc, 0xf1, 0x2b, 0x77, 0xe2, 0xfe,
++			0x17, 0xd4, 0x84, 0xa0, 0xac, 0xb5, 0xc7, 0xda,
++			0xa9, 0x1a, 0xb6, 0xf3, 0x74, 0x11, 0xb4, 0x9d,
++			0xfb, 0x79, 0x2e, 0x04, 0x2d, 0x50, 0x28, 0x83,
++			0xbf, 0xc6, 0x52, 0xd3, 0x34, 0xd6, 0xe8, 0x7a,
++			0xb6, 0xea, 0xe7, 0xa8, 0x6c, 0x15, 0x1e, 0x2c,
++			0x57, 0xbc, 0x48, 0x4e, 0x5f, 0x5c, 0xb6, 0x92,
++			0xd2, 0x49, 0x77, 0x81, 0x6d, 0x90, 0x70, 0xae,
++			0x98, 0xa1, 0x03, 0x0d, 0x6b, 0xb9, 0x77, 0x14,
++			0xf1, 0x4e, 0x23, 0xd3, 0xf8, 0x68, 0xbd, 0xc2,
++			0xfe, 0x04, 0xb7, 0x5c, 0xc5, 0x17, 0x60, 0x8f,
++			0x65, 0x54, 0xa4, 0x7a, 0x42, 0xdc, 0x18, 0x0d,
++			0xb5, 0xcf, 0x0f, 0xd3, 0xc7, 0x91, 0x66, 0x1b,
++			0x45, 0x42, 0x27, 0x75, 0x50, 0xe5, 0xee, 0xb8,
++			0x7f, 0x33, 0x2c, 0xba, 0x4a, 0x92, 0x4d, 0x2c,
++			0x3c, 0xe3, 0x0d, 0x80, 0x01, 0xba, 0x0d, 0x29,
++			0xd8, 0x3c, 0xe9, 0x13, 0x16, 0x57, 0xe6, 0xea,
++			0x94, 0x52, 0xe7, 0x00, 0x4d, 0x30, 0xb0, 0x0f,
++			0x35, 0xb8, 0xb8, 0xa7, 0xb1, 0xb5, 0x3b, 0x44,
++			0xe1, 0x2f, 0xfd, 0x88, 0xed, 0x43, 0xe7, 0x52,
++			0x10, 0x93, 0xb3, 0x8a, 0x30, 0x6b, 0x0a, 0xf7,
++			0x23, 0xc6, 0x50, 0x9d, 0x4a, 0xb0, 0xde, 0xc3,
++			0xdc, 0x9b, 0x2f, 0x01, 0x56, 0x36, 0x09, 0xc5,
++			0x2f, 0x6b, 0xfe, 0xf1, 0xd8, 0x27, 0x45, 0x03,
++			0x30, 0x5e, 0x5c, 0x5b, 0xb4, 0x62, 0x0e, 0x1a,
++			0xa9, 0x21, 0x2b, 0x92, 0x94, 0x87, 0x62, 0x57,
++			0x4c, 0x10, 0x74, 0x1a, 0xf1, 0x0a, 0xc5, 0x84,
++			0x3b, 0x9e, 0x72, 0x02, 0xd7, 0xcc, 0x09, 0x56,
++			0xbd, 0x54, 0xc1, 0xf0, 0xc3, 0xe3, 0xb3, 0xf8,
++			0xd2, 0x0d, 0x61, 0xcb, 0xef, 0xce, 0x0d, 0x05,
++			0xb0, 0x98, 0xd9, 0x8e, 0x4f, 0xf9, 0xbc, 0x93,
++			0xa6, 0xea, 0xc8, 0xcf, 0x10, 0x53, 0x4b, 0xf1,
++			0xec, 0xfc, 0x89, 0xf9, 0x64, 0xb0, 0x22, 0xbf,
++			0x9e, 0x55, 0x46, 0x9f, 0x7c, 0x50, 0x8e, 0x84,
++			0x54, 0x20, 0x98, 0xd7, 0x6c, 0x40, 0x1e, 0xdb,
++			0x69, 0x34, 0x78, 0x61, 0x24, 0x21, 0x9c, 0x8a,
++			0xb3, 0x62, 0x31, 0x8b, 0x6e, 0xf5, 0x2a, 0x35,
++			0x86, 0x13, 0xb1, 0x6c, 0x64, 0x2e, 0x41, 0xa5,
++			0x05, 0xf2, 0x42, 0xba, 0xd2, 0x3a, 0x0d, 0x8e,
++			0x8a, 0x59, 0x94, 0x3c, 0xcf, 0x36, 0x27, 0x82,
++			0xc2, 0x45, 0xee, 0x58, 0xcd, 0x88, 0xb4, 0xec,
++			0xde, 0xb2, 0x96, 0x0a, 0xaf, 0x38, 0x6f, 0x88,
++			0xd7, 0xd8, 0xe1, 0xdf, 0xb9, 0x96, 0xa9, 0x0a,
++			0xb1, 0x95, 0x28, 0x86, 0x20, 0xe9, 0x17, 0x49,
++			0xa2, 0x29, 0x38, 0xaa, 0xa5, 0xe9, 0x6e, 0xf1,
++			0x19, 0x27, 0xc0, 0xd5, 0x2a, 0x22, 0xc3, 0x0b,
++			0xdb, 0x7c, 0x73, 0x10, 0xb9, 0xba, 0x89, 0x76,
++			0x54, 0xae, 0x7d, 0x71, 0xb3, 0x93, 0xf6, 0x32,
++			0xe6, 0x47, 0x43, 0x55, 0xac, 0xa0, 0x0d, 0xc2,
++			0x93, 0x27, 0x4a, 0x8e, 0x0e, 0x74, 0x15, 0xc7,
++			0x0b, 0x85, 0xd9, 0x0c, 0xa9, 0x30, 0x7a, 0x3e,
++			0xea, 0x8f, 0x85, 0x6d, 0x3a, 0x12, 0x4f, 0x72,
++			0x69, 0x58, 0x7a, 0x80, 0xbb, 0xb5, 0x97, 0xf3,
++			0xcf, 0x70, 0xd2, 0x5d, 0xdd, 0x4d, 0x21, 0x79,
++			0x54, 0x4d, 0xe4, 0x05, 0xe8, 0xbd, 0xc2, 0x62,
++			0xb1, 0x3b, 0x77, 0x1c, 0xd6, 0x5c, 0xf3, 0xa0,
++			0x79, 0x00, 0xa8, 0x6c, 0x29, 0xd9, 0x18, 0x24,
++			0x36, 0xa2, 0x46, 0xc0, 0x96, 0x65, 0x7f, 0xbd,
++			0x2a, 0xed, 0x36, 0x16, 0x0c, 0xaa, 0x9f, 0xf4,
++			0xc5, 0xb4, 0xe2, 0x12, 0xed, 0x69, 0xed, 0x4f,
++			0x26, 0x2c, 0x39, 0x52, 0x89, 0x98, 0xe7, 0x2c,
++			0x99, 0xa4, 0x9e, 0xa3, 0x9b, 0x99, 0x46, 0x7a,
++			0x3a, 0xdc, 0xa8, 0x59, 0xa3, 0xdb, 0xc3, 0x3b,
++			0x95, 0x0d, 0x3b, 0x09, 0x6e, 0xee, 0x83, 0x5d,
++			0x32, 0x4d, 0xed, 0xab, 0xfa, 0x98, 0x14, 0x4e,
++			0xc3, 0x15, 0x45, 0x53, 0x61, 0xc4, 0x93, 0xbd,
++			0x90, 0xf4, 0x99, 0x95, 0x4c, 0xe6, 0x76, 0x92,
++			0x29, 0x90, 0x46, 0x30, 0x92, 0x69, 0x7d, 0x13,
++			0xf2, 0xa5, 0xcd, 0x69, 0x49, 0x44, 0xb2, 0x0f,
++			0x63, 0x40, 0x36, 0x5f, 0x09, 0xe2, 0x78, 0xf8,
++			0x91, 0xe3, 0xe2, 0xfa, 0x10, 0xf7, 0xc8, 0x24,
++			0xa8, 0x89, 0x32, 0x5c, 0x37, 0x25, 0x1d, 0xb2,
++			0xea, 0x17, 0x8a, 0x0a, 0xa9, 0x64, 0xc3, 0x7c,
++			0x3c, 0x7c, 0xbd, 0xc6, 0x79, 0x34, 0xe7, 0xe2,
++			0x85, 0x8e, 0xbf, 0xf8, 0xde, 0x92, 0xa0, 0xae,
++			0x20, 0xc4, 0xf6, 0xbb, 0x1f, 0x38, 0x19, 0x0e,
++			0xe8, 0x79, 0x9c, 0xa1, 0x23, 0xe9, 0x54, 0x7e,
++			0x37, 0x2f, 0xe2, 0x94, 0x32, 0xaf, 0xa0, 0x23,
++			0x49, 0xe4, 0xc0, 0xb3, 0xac, 0x00, 0x8f, 0x36,
++			0x05, 0xc4, 0xa6, 0x96, 0xec, 0x05, 0x98, 0x4f,
++			0x96, 0x67, 0x57, 0x1f, 0x20, 0x86, 0x1b, 0x2d,
++			0x69, 0xe4, 0x29, 0x93, 0x66, 0x5f, 0xaf, 0x6b,
++			0x88, 0x26, 0x2c, 0x67, 0x02, 0x4b, 0x52, 0xd0,
++			0x83, 0x7a, 0x43, 0x1f, 0xc0, 0x71, 0x15, 0x25,
++			0x77, 0x65, 0x08, 0x60, 0x11, 0x76, 0x4c, 0x8d,
++			0xed, 0xa9, 0x27, 0xc6, 0xb1, 0x2a, 0x2c, 0x6a,
++			0x4a, 0x97, 0xf5, 0xc6, 0xb7, 0x70, 0x42, 0xd3,
++			0x03, 0xd1, 0x24, 0x95, 0xec, 0x6d, 0xab, 0x38,
++			0x72, 0xce, 0xe2, 0x8b, 0x33, 0xd7, 0x51, 0x09,
++			0xdc, 0x45, 0xe0, 0x09, 0x96, 0x32, 0xf3, 0xc4,
++			0x84, 0xdc, 0x73, 0x73, 0x2d, 0x1b, 0x11, 0x98,
++			0xc5, 0x0e, 0x69, 0x28, 0x94, 0xc7, 0xb5, 0x4d,
++			0xc8, 0x8a, 0xd0, 0xaa, 0x13, 0x2e, 0x18, 0x74,
++			0xdd, 0xd1, 0x1e, 0xf3, 0x90, 0xe8, 0xfc, 0x9a,
++			0x72, 0x4a, 0x0e, 0xd1, 0xe4, 0xfb, 0x0d, 0x96,
++			0xd1, 0x0c, 0x79, 0x85, 0x1b, 0x1c, 0xfe, 0xe1,
++			0x62, 0x8f, 0x7a, 0x73, 0x32, 0xab, 0xc8, 0x18,
++			0x69, 0xe3, 0x34, 0x30, 0xdf, 0x13, 0xa6, 0xe5,
++			0xe8, 0x0e, 0x67, 0x7f, 0x81, 0x11, 0xb4, 0x60,
++			0xc7, 0xbd, 0x79, 0x65, 0x50, 0xdc, 0xc4, 0x5b,
++			0xde, 0x39, 0xa4, 0x01, 0x72, 0x63, 0xf3, 0xd1,
++			0x64, 0x4e, 0xdf, 0xfc, 0x27, 0x92, 0x37, 0x0d,
++			0x57, 0xcd, 0x11, 0x4f, 0x11, 0x04, 0x8e, 0x1d,
++			0x16, 0xf7, 0xcd, 0x92, 0x9a, 0x99, 0x30, 0x14,
++			0xf1, 0x7c, 0x67, 0x1b, 0x1f, 0x41, 0x0b, 0xe8,
++			0x32, 0xe8, 0xb8, 0xc1, 0x4f, 0x54, 0x86, 0x4f,
++			0xe5, 0x79, 0x81, 0x73, 0xcd, 0x43, 0x59, 0x68,
++			0x73, 0x02, 0x3b, 0x78, 0x21, 0x72, 0x43, 0x00,
++			0x49, 0x17, 0xf7, 0x00, 0xaf, 0x68, 0x24, 0x53,
++			0x05, 0x0a, 0xc3, 0x33, 0xe0, 0x33, 0x3f, 0x69,
++			0xd2, 0x84, 0x2f, 0x0b, 0xed, 0xde, 0x04, 0xf4,
++			0x11, 0x94, 0x13, 0x69, 0x51, 0x09, 0x28, 0xde,
++			0x57, 0x5c, 0xef, 0xdc, 0x9a, 0x49, 0x1c, 0x17,
++			0x97, 0xf3, 0x96, 0xc1, 0x7f, 0x5d, 0x2e, 0x7d,
++			0x55, 0xb8, 0xb3, 0x02, 0x09, 0xb3, 0x1f, 0xe7,
++			0xc9, 0x8d, 0xa3, 0x36, 0x34, 0x8a, 0x77, 0x13,
++			0x30, 0x63, 0x4c, 0xa5, 0xcd, 0xc3, 0xe0, 0x7e,
++			0x05, 0xa1, 0x7b, 0x0c, 0xcb, 0x74, 0x47, 0x31,
++			0x62, 0x03, 0x43, 0xf1, 0x87, 0xb4, 0xb0, 0x85,
++			0x87, 0x8e, 0x4b, 0x25, 0xc7, 0xcf, 0xae, 0x4b,
++			0x36, 0x46, 0x3e, 0x62, 0xbc, 0x6f, 0xeb, 0x5f,
++			0x73, 0xac, 0xe6, 0x07, 0xee, 0xc1, 0xa1, 0xd6,
++			0xc4, 0xab, 0xc9, 0xd6, 0x89, 0x45, 0xe1, 0xf1,
++			0x04, 0x4e, 0x1a, 0x6f, 0xbb, 0x4f, 0x3a, 0xa3,
++			0xa0, 0xcb, 0xa3, 0x0a, 0xd8, 0x71, 0x35, 0x55,
++			0xe4, 0xbc, 0x2e, 0x04, 0x06, 0xe6, 0xff, 0x5b,
++			0x1c, 0xc0, 0x11, 0x7c, 0xc5, 0x17, 0xf3, 0x38,
++			0xcf, 0xe9, 0xba, 0x0f, 0x0e, 0xef, 0x02, 0xc2,
++			0x8d, 0xc6, 0xbc, 0x4b, 0x67, 0x20, 0x95, 0xd7,
++			0x2c, 0x45, 0x5b, 0x86, 0x44, 0x8c, 0x6f, 0x2e,
++			0x7e, 0x9f, 0x1c, 0x77, 0xba, 0x6b, 0x0e, 0xa3,
++			0x69, 0xdc, 0xab, 0x24, 0x57, 0x60, 0x47, 0xc1,
++			0xd1, 0xa5, 0x9d, 0x23, 0xe6, 0xb1, 0x37, 0xfe,
++			0x93, 0xd2, 0x4c, 0x46, 0xf9, 0x0c, 0xc6, 0xfb,
++			0xd6, 0x9d, 0x99, 0x69, 0xab, 0x7a, 0x07, 0x0c,
++			0x65, 0xe7, 0xc4, 0x08, 0x96, 0xe2, 0xa5, 0x01,
++			0x3f, 0x46, 0x07, 0x05, 0x7e, 0xe8, 0x9a, 0x90,
++			0x50, 0xdc, 0xe9, 0x7a, 0xea, 0xa1, 0x39, 0x6e,
++			0x66, 0xe4, 0x6f, 0xa5, 0x5f, 0xb2, 0xd9, 0x5b,
++			0xf5, 0xdb, 0x2a, 0x32, 0xf0, 0x11, 0x6f, 0x7c,
++			0x26, 0x10, 0x8f, 0x3d, 0x80, 0xe9, 0x58, 0xf7,
++			0xe0, 0xa8, 0x57, 0xf8, 0xdb, 0x0e, 0xce, 0x99,
++			0x63, 0x19, 0x3d, 0xd5, 0xec, 0x1b, 0x77, 0x69,
++			0x98, 0xf6, 0xe4, 0x5f, 0x67, 0x17, 0x4b, 0x09,
++			0x85, 0x62, 0x82, 0x70, 0x18, 0xe2, 0x9a, 0x78,
++			0xe2, 0x62, 0xbd, 0xb4, 0xf1, 0x42, 0xc6, 0xfb,
++			0x08, 0xd0, 0xbd, 0xeb, 0x4e, 0x09, 0xf2, 0xc8,
++			0x1e, 0xdc, 0x3d, 0x32, 0x21, 0x56, 0x9c, 0x4f,
++			0x35, 0xf3, 0x61, 0x06, 0x72, 0x84, 0xc4, 0x32,
++			0xf2, 0xf1, 0xfa, 0x0b, 0x2f, 0xc3, 0xdb, 0x02,
++			0x04, 0xc2, 0xde, 0x57, 0x64, 0x60, 0x8d, 0xcf,
++			0xcb, 0x86, 0x5d, 0x97, 0x3e, 0xb1, 0x9c, 0x01,
++			0xd6, 0x28, 0x8f, 0x99, 0xbc, 0x46, 0xeb, 0x05,
++			0xaf, 0x7e, 0xb8, 0x21, 0x2a, 0x56, 0x85, 0x1c,
++			0xb3, 0x71, 0xa0, 0xde, 0xca, 0x96, 0xf1, 0x78,
++			0x49, 0xa2, 0x99, 0x81, 0x80, 0x5c, 0x01, 0xf5,
++			0xa0, 0xa2, 0x56, 0x63, 0xe2, 0x70, 0x07, 0xa5,
++			0x95, 0xd6, 0x85, 0xeb, 0x36, 0x9e, 0xa9, 0x51,
++			0x66, 0x56, 0x5f, 0x1d, 0x02, 0x19, 0xe2, 0xf6,
++			0x4f, 0x73, 0x38, 0x09, 0x75, 0x64, 0x48, 0xe0,
++			0xf1, 0x7e, 0x0e, 0xe8, 0x9d, 0xf9, 0xed, 0x94,
++			0xfe, 0x16, 0x26, 0x62, 0x49, 0x74, 0xf4, 0xb0,
++			0xd4, 0xa9, 0x6c, 0xb0, 0xfd, 0x53, 0xe9, 0x81,
++			0xe0, 0x7a, 0xbf, 0xcf, 0xb5, 0xc4, 0x01, 0x81,
++			0x79, 0x99, 0x77, 0x01, 0x3b, 0xe9, 0xa2, 0xb6,
++			0xe6, 0x6a, 0x8a, 0x9e, 0x56, 0x1c, 0x8d, 0x1e,
++			0x8f, 0x06, 0x55, 0x2c, 0x6c, 0xdc, 0x92, 0x87,
++			0x64, 0x3b, 0x4b, 0x19, 0xa1, 0x13, 0x64, 0x1d,
++			0x4a, 0xe9, 0xc0, 0x00, 0xb8, 0x95, 0xef, 0x6b,
++			0x1a, 0x86, 0x6d, 0x37, 0x52, 0x02, 0xc2, 0xe0,
++			0xc8, 0xbb, 0x42, 0x0c, 0x02, 0x21, 0x4a, 0xc9,
++			0xef, 0xa0, 0x54, 0xe4, 0x5e, 0x16, 0x53, 0x81,
++			0x70, 0x62, 0x10, 0xaf, 0xde, 0xb8, 0xb5, 0xd3,
++			0xe8, 0x5e, 0x6c, 0xc3, 0x8a, 0x3e, 0x18, 0x07,
++			0xf2, 0x2f, 0x7d, 0xa7, 0xe1, 0x3d, 0x4e, 0xb4,
++			0x26, 0xa7, 0xa3, 0x93, 0x86, 0xb2, 0x04, 0x1e,
++			0x53, 0x5d, 0x86, 0xd6, 0xde, 0x65, 0xca, 0xe3,
++			0x4e, 0xc1, 0xcf, 0xef, 0xc8, 0x70, 0x1b, 0x83,
++			0x13, 0xdd, 0x18, 0x8b, 0x0d, 0x76, 0xd2, 0xf6,
++			0x37, 0x7a, 0x93, 0x7a, 0x50, 0x11, 0x9f, 0x96,
++			0x86, 0x25, 0xfd, 0xac, 0xdc, 0xbe, 0x18, 0x93,
++			0x19, 0x6b, 0xec, 0x58, 0x4f, 0xb9, 0x75, 0xa7,
++			0xdd, 0x3f, 0x2f, 0xec, 0xc8, 0x5a, 0x84, 0xab,
++			0xd5, 0xe4, 0x8a, 0x07, 0xf6, 0x4d, 0x23, 0xd6,
++			0x03, 0xfb, 0x03, 0x6a, 0xea, 0x66, 0xbf, 0xd4,
++			0xb1, 0x34, 0xfb, 0x78, 0xe9, 0x55, 0xdc, 0x7c,
++			0x3d, 0x9c, 0xe5, 0x9a, 0xac, 0xc3, 0x7a, 0x80,
++			0x24, 0x6d, 0xa0, 0xef, 0x25, 0x7c, 0xb7, 0xea,
++			0xce, 0x4d, 0x5f, 0x18, 0x60, 0xce, 0x87, 0x22,
++			0x66, 0x2f, 0xd5, 0xdd, 0xdd, 0x02, 0x21, 0x75,
++			0x82, 0xa0, 0x1f, 0x58, 0xc6, 0xd3, 0x62, 0xf7,
++			0x32, 0xd8, 0xaf, 0x1e, 0x07, 0x77, 0x51, 0x96,
++			0xd5, 0x6b, 0x1e, 0x7e, 0x80, 0x02, 0xe8, 0x67,
++			0xea, 0x17, 0x0b, 0x10, 0xd2, 0x3f, 0x28, 0x25,
++			0x4f, 0x05, 0x77, 0x02, 0x14, 0x69, 0xf0, 0x2c,
++			0xbe, 0x0c, 0xf1, 0x74, 0x30, 0xd1, 0xb9, 0x9b,
++			0xfc, 0x8c, 0xbb, 0x04, 0x16, 0xd9, 0xba, 0xc3,
++			0xbc, 0x91, 0x8a, 0xc4, 0x30, 0xa4, 0xb0, 0x12,
++			0x4c, 0x21, 0x87, 0xcb, 0xc9, 0x1d, 0x16, 0x96,
++			0x07, 0x6f, 0x23, 0x54, 0xb9, 0x6f, 0x79, 0xe5,
++			0x64, 0xc0, 0x64, 0xda, 0xb1, 0xae, 0xdd, 0x60,
++			0x6c, 0x1a, 0x9d, 0xd3, 0x04, 0x8e, 0x45, 0xb0,
++			0x92, 0x61, 0xd0, 0x48, 0x81, 0xed, 0x5e, 0x1d,
++			0xa0, 0xc9, 0xa4, 0x33, 0xc7, 0x13, 0x51, 0x5d,
++			0x7f, 0x83, 0x73, 0xb6, 0x70, 0x18, 0x65, 0x3e,
++			0x2f, 0x0e, 0x7a, 0x12, 0x39, 0x98, 0xab, 0xd8,
++			0x7e, 0x6f, 0xa3, 0xd1, 0xba, 0x56, 0xad, 0xbd,
++			0xf0, 0x03, 0x01, 0x1c, 0x85, 0x35, 0x9f, 0xeb,
++			0x19, 0x63, 0xa1, 0xaf, 0xfe, 0x2d, 0x35, 0x50,
++			0x39, 0xa0, 0x65, 0x7c, 0x95, 0x7e, 0x6b, 0xfe,
++			0xc1, 0xac, 0x07, 0x7c, 0x98, 0x4f, 0xbe, 0x57,
++			0xa7, 0x22, 0xec, 0xe2, 0x7e, 0x29, 0x09, 0x53,
++			0xe8, 0xbf, 0xb4, 0x7e, 0x3f, 0x8f, 0xfc, 0x14,
++			0xce, 0x54, 0xf9, 0x18, 0x58, 0xb5, 0xff, 0x44,
++			0x05, 0x9d, 0xce, 0x1b, 0xb6, 0x82, 0x23, 0xc8,
++			0x2e, 0xbc, 0x69, 0xbb, 0x4a, 0x29, 0x0f, 0x65,
++			0x94, 0xf0, 0x63, 0x06, 0x0e, 0xef, 0x8c, 0xbd,
++			0xff, 0xfd, 0xb0, 0x21, 0x6e, 0x57, 0x05, 0x75,
++			0xda, 0xd5, 0xc4, 0xeb, 0x8d, 0x32, 0xf7, 0x50,
++			0xd3, 0x6f, 0x22, 0xed, 0x5f, 0x8e, 0xa2, 0x5b,
++			0x80, 0x8c, 0xc8, 0x78, 0x40, 0x24, 0x4b, 0x89,
++			0x30, 0xce, 0x7a, 0x97, 0x0e, 0xc4, 0xaf, 0xef,
++			0x9b, 0xb4, 0xcd, 0x66, 0x74, 0x14, 0x04, 0x2b,
++			0xf7, 0xce, 0x0b, 0x1c, 0x6e, 0xc2, 0x78, 0x8c,
++			0xca, 0xc5, 0xd0, 0x1c, 0x95, 0x4a, 0x91, 0x2d,
++			0xa7, 0x20, 0xeb, 0x86, 0x52, 0xb7, 0x67, 0xd8,
++			0x0c, 0xd6, 0x04, 0x14, 0xde, 0x51, 0x74, 0x75,
++			0xe7, 0x11, 0xb4, 0x87, 0xa3, 0x3d, 0x2d, 0xad,
++			0x4f, 0xef, 0xa0, 0x0f, 0x70, 0x00, 0x6d, 0x13,
++			0x19, 0x1d, 0x41, 0x50, 0xe9, 0xd8, 0xf0, 0x32,
++			0x71, 0xbc, 0xd3, 0x11, 0xf2, 0xac, 0xbe, 0xaf,
++			0x75, 0x46, 0x65, 0x4e, 0x07, 0x34, 0x37, 0xa3,
++			0x89, 0xfe, 0x75, 0xd4, 0x70, 0x4c, 0xc6, 0x3f,
++			0x69, 0x24, 0x0e, 0x38, 0x67, 0x43, 0x8c, 0xde,
++			0x06, 0xb5, 0xb8, 0xe7, 0xc4, 0xf0, 0x41, 0x8f,
++			0xf0, 0xbd, 0x2f, 0x0b, 0xb9, 0x18, 0xf8, 0xde,
++			0x64, 0xb1, 0xdb, 0xee, 0x00, 0x50, 0x77, 0xe1,
++			0xc7, 0xff, 0xa6, 0xfa, 0xdd, 0x70, 0xf4, 0xe3,
++			0x93, 0xe9, 0x77, 0x35, 0x3d, 0x4b, 0x2f, 0x2b,
++			0x6d, 0x55, 0xf0, 0xfc, 0x88, 0x54, 0x4e, 0x89,
++			0xc1, 0x8a, 0x23, 0x31, 0x2d, 0x14, 0x2a, 0xb8,
++			0x1b, 0x15, 0xdd, 0x9e, 0x6e, 0x7b, 0xda, 0x05,
++			0x91, 0x7d, 0x62, 0x64, 0x96, 0x72, 0xde, 0xfc,
++			0xc1, 0xec, 0xf0, 0x23, 0x51, 0x6f, 0xdb, 0x5b,
++			0x1d, 0x08, 0x57, 0xce, 0x09, 0xb8, 0xf6, 0xcd,
++			0x8d, 0x95, 0xf2, 0x20, 0xbf, 0x0f, 0x20, 0x57,
++			0x98, 0x81, 0x84, 0x4f, 0x15, 0x5c, 0x76, 0xe7,
++			0x3e, 0x0a, 0x3a, 0x6c, 0xc4, 0x8a, 0xbe, 0x78,
++			0x74, 0x77, 0xc3, 0x09, 0x4b, 0x5d, 0x48, 0xe4,
++			0xc8, 0xcb, 0x0b, 0xea, 0x17, 0x28, 0xcf, 0xcf,
++			0x31, 0x32, 0x44, 0xa4, 0xe5, 0x0e, 0x1a, 0x98,
++			0x94, 0xc4, 0xf0, 0xff, 0xae, 0x3e, 0x44, 0xe8,
++			0xa5, 0xb3, 0xb5, 0x37, 0x2f, 0xe8, 0xaf, 0x6f,
++			0x28, 0xc1, 0x37, 0x5f, 0x31, 0xd2, 0xb9, 0x33,
++			0xb1, 0xb2, 0x52, 0x94, 0x75, 0x2c, 0x29, 0x59,
++			0x06, 0xc2, 0x25, 0xe8, 0x71, 0x65, 0x4e, 0xed,
++			0xc0, 0x9c, 0xb1, 0xbb, 0x25, 0xdc, 0x6c, 0xe7,
++			0x4b, 0xa5, 0x7a, 0x54, 0x7a, 0x60, 0xff, 0x7a,
++			0xe0, 0x50, 0x40, 0x96, 0x35, 0x63, 0xe4, 0x0b,
++			0x76, 0xbd, 0xa4, 0x65, 0x00, 0x1b, 0x57, 0x88,
++			0xae, 0xed, 0x39, 0x88, 0x42, 0x11, 0x3c, 0xed,
++			0x85, 0x67, 0x7d, 0xb9, 0x68, 0x82, 0xe9, 0x43,
++			0x3c, 0x47, 0x53, 0xfa, 0xe8, 0xf8, 0x9f, 0x1f,
++			0x9f, 0xef, 0x0f, 0xf7, 0x30, 0xd9, 0x30, 0x0e,
++			0xb9, 0x9f, 0x69, 0x18, 0x2f, 0x7e, 0xf8, 0xf8,
++			0xf8, 0x8c, 0x0f, 0xd4, 0x02, 0x4d, 0xea, 0xcd,
++			0x0a, 0x9c, 0x6f, 0x71, 0x6d, 0x5a, 0x4c, 0x60,
++			0xce, 0x20, 0x56, 0x32, 0xc6, 0xc5, 0x99, 0x1f,
++			0x09, 0xe6, 0x4e, 0x18, 0x1a, 0x15, 0x13, 0xa8,
++			0x7d, 0xb1, 0x6b, 0xc0, 0xb2, 0x6d, 0xf8, 0x26,
++			0x66, 0xf8, 0x3d, 0x18, 0x74, 0x70, 0x66, 0x7a,
++			0x34, 0x17, 0xde, 0xba, 0x47, 0xf1, 0x06, 0x18,
++			0xcb, 0xaf, 0xeb, 0x4a, 0x1e, 0x8f, 0xa7, 0x77,
++			0xe0, 0x3b, 0x78, 0x62, 0x66, 0xc9, 0x10, 0xea,
++			0x1f, 0xb7, 0x29, 0x0a, 0x45, 0xa1, 0x1d, 0x1e,
++			0x1d, 0xe2, 0x65, 0x61, 0x50, 0x9c, 0xd7, 0x05,
++			0xf2, 0x0b, 0x5b, 0x12, 0x61, 0x02, 0xc8, 0xe5,
++			0x63, 0x4f, 0x20, 0x0c, 0x07, 0x17, 0x33, 0x5e,
++			0x03, 0x9a, 0x53, 0x0f, 0x2e, 0x55, 0xfe, 0x50,
++			0x43, 0x7d, 0xd0, 0xb6, 0x7e, 0x5a, 0xda, 0xae,
++			0x58, 0xef, 0x15, 0xa9, 0x83, 0xd9, 0x46, 0xb1,
++			0x42, 0xaa, 0xf5, 0x02, 0x6c, 0xce, 0x92, 0x06,
++			0x1b, 0xdb, 0x66, 0x45, 0x91, 0x79, 0xc2, 0x2d,
++			0xe6, 0x53, 0xd3, 0x14, 0xfd, 0xbb, 0x44, 0x63,
++			0xc6, 0xd7, 0x3d, 0x7a, 0x0c, 0x75, 0x78, 0x9d,
++			0x5c, 0xa6, 0x39, 0xb3, 0xe5, 0x63, 0xca, 0x8b,
++			0xfe, 0xd3, 0xef, 0x60, 0x83, 0xf6, 0x8e, 0x70,
++			0xb6, 0x67, 0xc7, 0x77, 0xed, 0x23, 0xef, 0x4c,
++			0xf0, 0xed, 0x2d, 0x07, 0x59, 0x6f, 0xc1, 0x01,
++			0x34, 0x37, 0x08, 0xab, 0xd9, 0x1f, 0x09, 0xb1,
++			0xce, 0x5b, 0x17, 0xff, 0x74, 0xf8, 0x9c, 0xd5,
++			0x2c, 0x56, 0x39, 0x79, 0x0f, 0x69, 0x44, 0x75,
++			0x58, 0x27, 0x01, 0xc4, 0xbf, 0xa7, 0xa1, 0x1d,
++			0x90, 0x17, 0x77, 0x86, 0x5a, 0x3f, 0xd9, 0xd1,
++			0x0e, 0xa0, 0x10, 0xf8, 0xec, 0x1e, 0xa5, 0x7f,
++			0x5e, 0x36, 0xd1, 0xe3, 0x04, 0x2c, 0x70, 0xf7,
++			0x8e, 0xc0, 0x98, 0x2f, 0x6c, 0x94, 0x2b, 0x41,
++			0xb7, 0x60, 0x00, 0xb7, 0x2e, 0xb8, 0x02, 0x8d,
++			0xb8, 0xb0, 0xd3, 0x86, 0xba, 0x1d, 0xd7, 0x90,
++			0xd6, 0xb6, 0xe1, 0xfc, 0xd7, 0xd8, 0x28, 0x06,
++			0x63, 0x9b, 0xce, 0x61, 0x24, 0x79, 0xc0, 0x70,
++			0x52, 0xd0, 0xb6, 0xd4, 0x28, 0x95, 0x24, 0x87,
++			0x03, 0x1f, 0xb7, 0x9a, 0xda, 0xa3, 0xfb, 0x52,
++			0x5b, 0x68, 0xe7, 0x4c, 0x8c, 0x24, 0xe1, 0x42,
++			0xf7, 0xd5, 0xfd, 0xad, 0x06, 0x32, 0x9f, 0xba,
++			0xc1, 0xfc, 0xdd, 0xc6, 0xfc, 0xfc, 0xb3, 0x38,
++			0x74, 0x56, 0x58, 0x40, 0x02, 0x37, 0x52, 0x2c,
++			0x55, 0xcc, 0xb3, 0x9e, 0x7a, 0xe9, 0xd4, 0x38,
++			0x41, 0x5e, 0x0c, 0x35, 0xe2, 0x11, 0xd1, 0x13,
++			0xf8, 0xb7, 0x8d, 0x72, 0x6b, 0x22, 0x2a, 0xb0,
++			0xdb, 0x08, 0xba, 0x35, 0xb9, 0x3f, 0xc8, 0xd3,
++			0x24, 0x90, 0xec, 0x58, 0xd2, 0x09, 0xc7, 0x2d,
++			0xed, 0x38, 0x80, 0x36, 0x72, 0x43, 0x27, 0x49,
++			0x4a, 0x80, 0x8a, 0xa2, 0xe8, 0xd3, 0xda, 0x30,
++			0x7d, 0xb6, 0x82, 0x37, 0x86, 0x92, 0x86, 0x3e,
++			0x08, 0xb2, 0x28, 0x5a, 0x55, 0x44, 0x24, 0x7d,
++			0x40, 0x48, 0x8a, 0xb6, 0x89, 0x58, 0x08, 0xa0,
++			0xd6, 0x6d, 0x3a, 0x17, 0xbf, 0xf6, 0x54, 0xa2,
++			0xf5, 0xd3, 0x8c, 0x0f, 0x78, 0x12, 0x57, 0x8b,
++			0xd5, 0xc2, 0xfd, 0x58, 0x5b, 0x7f, 0x38, 0xe3,
++			0xcc, 0xb7, 0x7c, 0x48, 0xb3, 0x20, 0xe8, 0x81,
++			0x14, 0x32, 0x45, 0x05, 0xe0, 0xdb, 0x9f, 0x75,
++			0x85, 0xb4, 0x6a, 0xfc, 0x95, 0xe3, 0x54, 0x22,
++			0x12, 0xee, 0x30, 0xfe, 0xd8, 0x30, 0xef, 0x34,
++			0x50, 0xab, 0x46, 0x30, 0x98, 0x2f, 0xb7, 0xc0,
++			0x15, 0xa2, 0x83, 0xb6, 0xf2, 0x06, 0x21, 0xa2,
++			0xc3, 0x26, 0x37, 0x14, 0xd1, 0x4d, 0xb5, 0x10,
++			0x52, 0x76, 0x4d, 0x6a, 0xee, 0xb5, 0x2b, 0x15,
++			0xb7, 0xf9, 0x51, 0xe8, 0x2a, 0xaf, 0xc7, 0xfa,
++			0x77, 0xaf, 0xb0, 0x05, 0x4d, 0xd1, 0x68, 0x8e,
++			0x74, 0x05, 0x9f, 0x9d, 0x93, 0xa5, 0x3e, 0x7f,
++			0x4e, 0x5f, 0x9d, 0xcb, 0x09, 0xc7, 0x83, 0xe3,
++			0x02, 0x9d, 0x27, 0x1f, 0xef, 0x85, 0x05, 0x8d,
++			0xec, 0x55, 0x88, 0x0f, 0x0d, 0x7c, 0x4c, 0xe8,
++			0xa1, 0x75, 0xa0, 0xd8, 0x06, 0x47, 0x14, 0xef,
++			0xaa, 0x61, 0xcf, 0x26, 0x15, 0xad, 0xd8, 0xa3,
++			0xaa, 0x75, 0xf2, 0x78, 0x4a, 0x5a, 0x61, 0xdf,
++			0x8b, 0xc7, 0x04, 0xbc, 0xb2, 0x32, 0xd2, 0x7e,
++			0x42, 0xee, 0xb4, 0x2f, 0x51, 0xff, 0x7b, 0x2e,
++			0xd3, 0x02, 0xe8, 0xdc, 0x5d, 0x0d, 0x50, 0xdc,
++			0xae, 0xb7, 0x46, 0xf9, 0xa8, 0xe6, 0xd0, 0x16,
++			0xcc, 0xe6, 0x2c, 0x81, 0xc7, 0xad, 0xe9, 0xf0,
++			0x05, 0x72, 0x6d, 0x3d, 0x0a, 0x7a, 0xa9, 0x02,
++			0xac, 0x82, 0x93, 0x6e, 0xb6, 0x1c, 0x28, 0xfc,
++			0x44, 0x12, 0xfb, 0x73, 0x77, 0xd4, 0x13, 0x39,
++			0x29, 0x88, 0x8a, 0xf3, 0x5c, 0xa6, 0x36, 0xa0,
++			0x2a, 0xed, 0x7e, 0xb1, 0x1d, 0xd6, 0x4c, 0x6b,
++			0x41, 0x01, 0x18, 0x5d, 0x5d, 0x07, 0x97, 0xa6,
++			0x4b, 0xef, 0x31, 0x18, 0xea, 0xac, 0xb1, 0x84,
++			0x21, 0xed, 0xda, 0x86,
++		},
++		.rlen = 4100,
++	},
++};
++
++static struct cipher_testvec aes_ctr_dec_tv_template[] = {
++	{ /* From RFC 3686 */
++		.key	= { 0xae, 0x68, 0x52, 0xf8, 0x12, 0x10, 0x67, 0xcc,
++			    0x4b, 0xf7, 0xa5, 0x76, 0x55, 0x77, 0xf3, 0x9e,
++			    0x00, 0x00, 0x00, 0x30 },
++		.klen	= 20,
++		.iv 	= { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
++		.input	= { 0xe4, 0x09, 0x5d, 0x4f, 0xb7, 0xa7, 0xb3, 0x79,
++			    0x2d, 0x61, 0x75, 0xa3, 0x26, 0x13, 0x11, 0xb8 },
++		.ilen	= 16,
++		.result	= { "Single block msg" },
++		.rlen	= 16,
++	}, {
++		.key	= { 0x7e, 0x24, 0x06, 0x78, 0x17, 0xfa, 0xe0, 0xd7,
++			    0x43, 0xd6, 0xce, 0x1f, 0x32, 0x53, 0x91, 0x63,
++			    0x00, 0x6c, 0xb6, 0xdb },
++		.klen	= 20,
++		.iv 	= { 0xc0, 0x54, 0x3b, 0x59, 0xda, 0x48, 0xd9, 0x0b },
++		.input	= { 0x51, 0x04, 0xa1, 0x06, 0x16, 0x8a, 0x72, 0xd9,
++			    0x79, 0x0d, 0x41, 0xee, 0x8e, 0xda, 0xd3, 0x88,
++			    0xeb, 0x2e, 0x1e, 0xfc, 0x46, 0xda, 0x57, 0xc8,
++			    0xfc, 0xe6, 0x30, 0xdf, 0x91, 0x41, 0xbe, 0x28 },
++		.ilen 	= 32,
++		.result	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
++		.rlen	= 32,
++	}, {
++		.key 	= { 0x16, 0xaf, 0x5b, 0x14, 0x5f, 0xc9, 0xf5, 0x79,
++			    0xc1, 0x75, 0xf9, 0x3e, 0x3b, 0xfb, 0x0e, 0xed,
++			    0x86, 0x3d, 0x06, 0xcc, 0xfd, 0xb7, 0x85, 0x15,
++			    0x00, 0x00, 0x00, 0x48 },
++		.klen 	= 28,
++		.iv	= { 0x36, 0x73, 0x3c, 0x14, 0x7d, 0x6d, 0x93, 0xcb },
++		.input	= { 0x4b, 0x55, 0x38, 0x4f, 0xe2, 0x59, 0xc9, 0xc8,
++			    0x4e, 0x79, 0x35, 0xa0, 0x03, 0xcb, 0xe9, 0x28 },
++		.ilen 	= 16,
++		.result	= { "Single block msg" },
++		.rlen	= 16,
++	}, {
++		.key	= { 0x7c, 0x5c, 0xb2, 0x40, 0x1b, 0x3d, 0xc3, 0x3c,
++			    0x19, 0xe7, 0x34, 0x08, 0x19, 0xe0, 0xf6, 0x9c,
++			    0x67, 0x8c, 0x3d, 0xb8, 0xe6, 0xf6, 0xa9, 0x1a,
++			    0x00, 0x96, 0xb0, 0x3b },
++		.klen	= 28,
++		.iv 	= { 0x02, 0x0c, 0x6e, 0xad, 0xc2, 0xcb, 0x50, 0x0d },
++		.input	= { 0x45, 0x32, 0x43, 0xfc, 0x60, 0x9b, 0x23, 0x32,
++			    0x7e, 0xdf, 0xaa, 0xfa, 0x71, 0x31, 0xcd, 0x9f,
++			    0x84, 0x90, 0x70, 0x1c, 0x5a, 0xd4, 0xa7, 0x9c,
++			    0xfc, 0x1f, 0xe0, 0xff, 0x42, 0xf4, 0xfb, 0x00 },
++		.ilen	= 32,
++		.result	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
++		.rlen 	= 32,
++	}, { 
++		.key 	= { 0x77, 0x6b, 0xef, 0xf2, 0x85, 0x1d, 0xb0, 0x6f,
++			    0x4c, 0x8a, 0x05, 0x42, 0xc8, 0x69, 0x6f, 0x6c,
++			    0x6a, 0x81, 0xaf, 0x1e, 0xec, 0x96, 0xb4, 0xd3,
++			    0x7f, 0xc1, 0xd6, 0x89, 0xe6, 0xc1, 0xc1, 0x04,
++			    0x00, 0x00, 0x00, 0x60 },
++		.klen	= 36,
++		.iv 	= { 0xdb, 0x56, 0x72, 0xc9, 0x7a, 0xa8, 0xf0, 0xb2 },
++		.input	= { 0x14, 0x5a, 0xd0, 0x1d, 0xbf, 0x82, 0x4e, 0xc7,
++			    0x56, 0x08, 0x63, 0xdc, 0x71, 0xe3, 0xe0, 0xc0 },
++		.ilen	= 16,
++		.result	= { "Single block msg" },
++		.rlen 	= 16,
++	}, {
++		.key	= { 0xf6, 0xd6, 0x6d, 0x6b, 0xd5, 0x2d, 0x59, 0xbb,
++			    0x07, 0x96, 0x36, 0x58, 0x79, 0xef, 0xf8, 0x86,
++			    0xc6, 0x6d, 0xd5, 0x1a, 0x5b, 0x6a, 0x99, 0x74,
++			    0x4b, 0x50, 0x59, 0x0c, 0x87, 0xa2, 0x38, 0x84,
++			    0x00, 0xfa, 0xac, 0x24 },
++		.klen 	= 36,
++		.iv	= { 0xc1, 0x58, 0x5e, 0xf1, 0x5a, 0x43, 0xd8, 0x75 },
++		.input	= { 0xf0, 0x5e, 0x23, 0x1b, 0x38, 0x94, 0x61, 0x2c,
++			    0x49, 0xee, 0x00, 0x0b, 0x80, 0x4e, 0xb2, 0xa9,
++			    0xb8, 0x30, 0x6b, 0x50, 0x8f, 0x83, 0x9d, 0x6a,
++			    0x55, 0x30, 0x83, 0x1d, 0x93, 0x44, 0xaf, 0x1c },
++		.ilen	= 32,
++		.result	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
++		.rlen	= 32,
++	},
++};
++
++static struct aead_testvec aes_gcm_enc_tv_template[] = {
++	{ /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
++		.klen	= 16,
++		.result	= { 0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61,
++			    0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a },
++		.rlen	= 16,
++	}, {
++		.klen	= 16,
++		.ilen	= 16,
++		.result = { 0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
++			    0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78,
++			    0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
++			    0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf },
++		.rlen	= 32,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
++		.klen	= 16,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
++		.ilen	= 64,
++		.result = { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
++			    0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
++			    0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
++			    0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
++			    0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
++			    0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
++			    0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
++			    0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85,
++			    0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
++			    0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
++		.rlen	= 80,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
++		.klen	= 16,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39 },
++		.ilen	= 60,
++		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xab, 0xad, 0xda, 0xd2 },
++		.alen	= 20,
++		.result = { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
++			    0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
++			    0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
++			    0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
++			    0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
++			    0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
++			    0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
++			    0x3d, 0x58, 0xe0, 0x91,
++			    0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb,
++			    0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 },
++		.rlen	= 76,
++	}, {
++		.klen	= 24,
++		.result	= { 0xcd, 0x33, 0xb2, 0x8a, 0xc7, 0x73, 0xf7, 0x4b,
++			    0xa0, 0x0e, 0xd1, 0xf3, 0x12, 0x57, 0x24, 0x35 },
++		.rlen	= 16,
++	}, {
++		.klen	= 24,
++		.ilen	= 16,
++		.result = { 0x98, 0xe7, 0x24, 0x7c, 0x07, 0xf0, 0xfe, 0x41,
++			    0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00,
++			    0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab,
++			    0x8e, 0xf4, 0xd4, 0x58, 0x75, 0x14, 0xf0, 0xfb },
++		.rlen	= 32,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
++			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c },
++		.klen	= 24,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
++		.ilen	= 64,
++		.result = { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41,
++			    0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57,
++			    0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84,
++			    0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c,
++			    0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
++			    0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47,
++			    0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9,
++			    0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56,
++			    0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf,
++			    0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 },
++		.rlen	= 80,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
++			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c },
++		.klen	= 24,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39 },
++		.ilen	= 60,
++		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xab, 0xad, 0xda, 0xd2 },
++		.alen	= 20,
++		.result = { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41,
++			    0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57,
++			    0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84,
++			    0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c,
++			    0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
++			    0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47,
++			    0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9,
++			    0xcc, 0xda, 0x27, 0x10,
++			    0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f,
++			    0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c },
++		.rlen	= 76,
++		.np	= 2,
++		.tap	= { 32, 28 },
++		.anp	= 2,
++		.atap	= { 8, 12 }
++	}, {
++		.klen	= 32,
++		.result	= { 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9,
++			    0xa9, 0x63, 0xb4, 0xf1, 0xc4, 0xcb, 0x73, 0x8b },
++		.rlen	= 16,
++	}
++};
++
++static struct aead_testvec aes_gcm_dec_tv_template[] = {
++	{ /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
++		.klen	= 32,
++		.input	= { 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e,
++			    0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18,
++			    0xd0, 0xd1, 0xc8, 0xa7, 0x99, 0x99, 0x6b, 0xf0,
++			    0x26, 0x5b, 0x98, 0xb5, 0xd4, 0x8a, 0xb9, 0x19 },
++		.ilen	= 32,
++		.rlen	= 16,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
++			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
++		.klen	= 32,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0x52, 0x2d, 0xc1, 0xf0, 0x99, 0x56, 0x7d, 0x07,
++			    0xf4, 0x7f, 0x37, 0xa3, 0x2a, 0x84, 0x42, 0x7d,
++			    0x64, 0x3a, 0x8c, 0xdc, 0xbf, 0xe5, 0xc0, 0xc9,
++			    0x75, 0x98, 0xa2, 0xbd, 0x25, 0x55, 0xd1, 0xaa,
++			    0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d,
++			    0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38,
++			    0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a,
++			    0xbc, 0xc9, 0xf6, 0x62, 0x89, 0x80, 0x15, 0xad,
++			    0xb0, 0x94, 0xda, 0xc5, 0xd9, 0x34, 0x71, 0xbd,
++			    0xec, 0x1a, 0x50, 0x22, 0x70, 0xe3, 0xcc, 0x6c },
++		.ilen	= 80,
++		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
++		.rlen	= 64,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
++			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
++		.klen	= 32,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0x52, 0x2d, 0xc1, 0xf0, 0x99, 0x56, 0x7d, 0x07,
++			    0xf4, 0x7f, 0x37, 0xa3, 0x2a, 0x84, 0x42, 0x7d,
++			    0x64, 0x3a, 0x8c, 0xdc, 0xbf, 0xe5, 0xc0, 0xc9,
++			    0x75, 0x98, 0xa2, 0xbd, 0x25, 0x55, 0xd1, 0xaa,
++			    0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d,
++			    0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38,
++			    0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a,
++			    0xbc, 0xc9, 0xf6, 0x62,
++			    0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68,
++			    0xcd, 0xdf, 0x88, 0x53, 0xbb, 0x2d, 0x55, 0x1b },
++		.ilen	= 76,
++		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xab, 0xad, 0xda, 0xd2 },
++		.alen	= 20,
++		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39 },
++		.rlen	= 60,
++		.np     = 2,
++		.tap    = { 48, 28 },
++		.anp	= 3,
++		.atap	= { 8, 8, 4 }
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
++		.klen	= 16,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
++			    0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
++			    0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
++			    0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
++			    0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
++			    0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
++			    0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
++			    0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85,
++			    0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
++			    0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
++		.ilen	= 80,
++		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
++		.rlen	= 64,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
++		.klen	= 16,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
++			    0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
++			    0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
++			    0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
++			    0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
++			    0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
++			    0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
++			    0x3d, 0x58, 0xe0, 0x91,
++			    0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb,
++			    0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 },
++		.ilen	= 76,
++		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xab, 0xad, 0xda, 0xd2 },
++		.alen	= 20,
++		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39 },
++		.rlen	= 60,
++	}, {
++		.klen	= 24,
++		.input	= { 0x98, 0xe7, 0x24, 0x7c, 0x07, 0xf0, 0xfe, 0x41,
++			    0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00,
++			    0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab,
++			    0x8e, 0xf4, 0xd4, 0x58, 0x75, 0x14, 0xf0, 0xfb },
++		.ilen	= 32,
++		.rlen	= 16,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
++			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c },
++		.klen	= 24,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41,
++			    0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57,
++			    0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84,
++			    0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c,
++			    0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
++			    0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47,
++			    0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9,
++			    0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56,
++			    0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf,
++			    0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 },
++		.ilen	= 80,
++		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
++		.rlen	= 64,
++	}, {
++		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
++			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
++			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c },
++		.klen	= 24,
++		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
++			    0xde, 0xca, 0xf8, 0x88 },
++		.input	= { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41,
++			    0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57,
++			    0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84,
++			    0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c,
++			    0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
++			    0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47,
++			    0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9,
++			    0xcc, 0xda, 0x27, 0x10,
++			    0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f,
++			    0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c },
++		.ilen	= 76,
++		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
++			    0xab, 0xad, 0xda, 0xd2 },
++		.alen	= 20,
++		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
++			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
++			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
++			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
++			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
++			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
++			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
++			    0xba, 0x63, 0x7b, 0x39 },
++		.rlen	= 60,
++	}
++};
++
++static struct aead_testvec aes_ccm_enc_tv_template[] = {
++	{ /* From RFC 3610 */
++		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00,
++			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
++		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
++		.alen	= 8,
++		.input	= { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
++		.ilen	= 23,
++		.result	= { 0x58, 0x8c, 0x97, 0x9a, 0x61, 0xc6, 0x63, 0xd2,
++			    0xf0, 0x66, 0xd0, 0xc2, 0xc0, 0xf9, 0x89, 0x80,
++			    0x6d, 0x5f, 0x6b, 0x61, 0xda, 0xc3, 0x84, 0x17,
++			    0xe8, 0xd1, 0x2c, 0xfd, 0xf9, 0x26, 0xe0 },
++		.rlen	= 31,
++	}, {
++		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x07, 0x06, 0x05, 0x04,
++			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
++		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b },
++		.alen	= 12,
++		.input	= { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
++			    0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
++			    0x1c, 0x1d, 0x1e, 0x1f },
++		.ilen	= 20,
++		.result	= { 0xdc, 0xf1, 0xfb, 0x7b, 0x5d, 0x9e, 0x23, 0xfb,
++			    0x9d, 0x4e, 0x13, 0x12, 0x53, 0x65, 0x8a, 0xd8,
++			    0x6e, 0xbd, 0xca, 0x3e, 0x51, 0xe8, 0x3f, 0x07,
++			    0x7d, 0x9c, 0x2d, 0x93 },
++		.rlen	= 28,
++	}, {
++		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x0b, 0x0a, 0x09, 0x08,
++			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
++		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
++		.alen	= 8,
++		.input	= { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++			    0x20 },
++		.ilen	= 25,
++		.result	= { 0x82, 0x53, 0x1a, 0x60, 0xcc, 0x24, 0x94, 0x5a,
++			    0x4b, 0x82, 0x79, 0x18, 0x1a, 0xb5, 0xc8, 0x4d,
++			    0xf2, 0x1c, 0xe7, 0xf9, 0xb7, 0x3f, 0x42, 0xe1,
++			    0x97, 0xea, 0x9c, 0x07, 0xe5, 0x6b, 0x5e, 0xb1,
++			    0x7e, 0x5f, 0x4e },
++		.rlen	= 35,
++	}, {
++		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x0c, 0x0b, 0x0a, 0x09,
++			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
++		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b },
++		.alen	= 12,
++		.input	= { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
++			    0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
++			    0x1c, 0x1d, 0x1e },
++		.ilen	= 19,
++		.result	= { 0x07, 0x34, 0x25, 0x94, 0x15, 0x77, 0x85, 0x15,
++			    0x2b, 0x07, 0x40, 0x98, 0x33, 0x0a, 0xbb, 0x14,
++			    0x1b, 0x94, 0x7b, 0x56, 0x6a, 0xa9, 0x40, 0x6b,
++			    0x4d, 0x99, 0x99, 0x88, 0xdd },
++		.rlen	= 29,
++	}, {
++		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
++			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x33, 0x56, 0x8e, 0xf7, 0xb2, 0x63,
++			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
++		.assoc	= { 0x63, 0x01, 0x8f, 0x76, 0xdc, 0x8a, 0x1b, 0xcb },
++		.alen	= 8,
++		.input	= { 0x90, 0x20, 0xea, 0x6f, 0x91, 0xbd, 0xd8, 0x5a,
++			    0xfa, 0x00, 0x39, 0xba, 0x4b, 0xaf, 0xf9, 0xbf,
++			    0xb7, 0x9c, 0x70, 0x28, 0x94, 0x9c, 0xd0, 0xec },
++		.ilen	= 24,
++		.result	= { 0x4c, 0xcb, 0x1e, 0x7c, 0xa9, 0x81, 0xbe, 0xfa,
++			    0xa0, 0x72, 0x6c, 0x55, 0xd3, 0x78, 0x06, 0x12,
++			    0x98, 0xc8, 0x5c, 0x92, 0x81, 0x4a, 0xbc, 0x33,
++			    0xc5, 0x2e, 0xe8, 0x1d, 0x7d, 0x77, 0xc0, 0x8a },
++		.rlen	= 32,
++	}, {
++		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
++			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0xd5, 0x60, 0x91, 0x2d, 0x3f, 0x70,
++			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
++		.assoc	= { 0xcd, 0x90, 0x44, 0xd2, 0xb7, 0x1f, 0xdb, 0x81,
++			    0x20, 0xea, 0x60, 0xc0 },
++		.alen	= 12,
++		.input	= { 0x64, 0x35, 0xac, 0xba, 0xfb, 0x11, 0xa8, 0x2e,
++			    0x2f, 0x07, 0x1d, 0x7c, 0xa4, 0xa5, 0xeb, 0xd9,
++			    0x3a, 0x80, 0x3b, 0xa8, 0x7f },
++		.ilen	= 21,
++		.result	= { 0x00, 0x97, 0x69, 0xec, 0xab, 0xdf, 0x48, 0x62,
++			    0x55, 0x94, 0xc5, 0x92, 0x51, 0xe6, 0x03, 0x57,
++			    0x22, 0x67, 0x5e, 0x04, 0xc8, 0x47, 0x09, 0x9e,
++			    0x5a, 0xe0, 0x70, 0x45, 0x51 },
++		.rlen	= 29,
++	}, {
++		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
++			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x42, 0xff, 0xf8, 0xf1, 0x95, 0x1c,
++			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
++		.assoc	= { 0xd8, 0x5b, 0xc7, 0xe6, 0x9f, 0x94, 0x4f, 0xb8 },
++		.alen	= 8,
++		.input	= { 0x8a, 0x19, 0xb9, 0x50, 0xbc, 0xf7, 0x1a, 0x01,
++			    0x8e, 0x5e, 0x67, 0x01, 0xc9, 0x17, 0x87, 0x65,
++			    0x98, 0x09, 0xd6, 0x7d, 0xbe, 0xdd, 0x18 },
++		.ilen	= 23,
++		.result	= { 0xbc, 0x21, 0x8d, 0xaa, 0x94, 0x74, 0x27, 0xb6,
++			    0xdb, 0x38, 0x6a, 0x99, 0xac, 0x1a, 0xef, 0x23,
++			    0xad, 0xe0, 0xb5, 0x29, 0x39, 0xcb, 0x6a, 0x63,
++			    0x7c, 0xf9, 0xbe, 0xc2, 0x40, 0x88, 0x97, 0xc6,
++			    0xba },
++		.rlen	= 33,
++	},
++};
++
++static struct aead_testvec aes_ccm_dec_tv_template[] = {
++	{ /* From RFC 3610 */
++		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00,
++			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
++		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
++		.alen	= 8,
++		.input	= { 0x58, 0x8c, 0x97, 0x9a, 0x61, 0xc6, 0x63, 0xd2,
++			    0xf0, 0x66, 0xd0, 0xc2, 0xc0, 0xf9, 0x89, 0x80,
++			    0x6d, 0x5f, 0x6b, 0x61, 0xda, 0xc3, 0x84, 0x17,
++			    0xe8, 0xd1, 0x2c, 0xfd, 0xf9, 0x26, 0xe0 },
++		.ilen	= 31,
++		.result	= { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
++		.rlen	= 23,
++	}, {
++		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x07, 0x06, 0x05, 0x04,
++			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
++		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b },
++		.alen	= 12,
++		.input	= { 0xdc, 0xf1, 0xfb, 0x7b, 0x5d, 0x9e, 0x23, 0xfb,
++			    0x9d, 0x4e, 0x13, 0x12, 0x53, 0x65, 0x8a, 0xd8,
++			    0x6e, 0xbd, 0xca, 0x3e, 0x51, 0xe8, 0x3f, 0x07,
++			    0x7d, 0x9c, 0x2d, 0x93 },
++		.ilen	= 28,
++		.result	= { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
++			    0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
++			    0x1c, 0x1d, 0x1e, 0x1f },
++		.rlen	= 20,
++	}, {
++		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x0b, 0x0a, 0x09, 0x08,
++			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
++		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
++		.alen	= 8,
++		.input	= { 0x82, 0x53, 0x1a, 0x60, 0xcc, 0x24, 0x94, 0x5a,
++			    0x4b, 0x82, 0x79, 0x18, 0x1a, 0xb5, 0xc8, 0x4d,
++			    0xf2, 0x1c, 0xe7, 0xf9, 0xb7, 0x3f, 0x42, 0xe1,
++			    0x97, 0xea, 0x9c, 0x07, 0xe5, 0x6b, 0x5e, 0xb1,
++			    0x7e, 0x5f, 0x4e },
++		.ilen	= 35,
++		.result	= { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++			    0x20 },
++		.rlen	= 25,
++	}, {
++		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x0c, 0x0b, 0x0a, 0x09,
++			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
++		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0a, 0x0b },
++		.alen	= 12,
++		.input	= { 0x07, 0x34, 0x25, 0x94, 0x15, 0x77, 0x85, 0x15,
++			    0x2b, 0x07, 0x40, 0x98, 0x33, 0x0a, 0xbb, 0x14,
++			    0x1b, 0x94, 0x7b, 0x56, 0x6a, 0xa9, 0x40, 0x6b,
++			    0x4d, 0x99, 0x99, 0x88, 0xdd },
++		.ilen	= 29,
++		.result	= { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
++			    0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
++			    0x1c, 0x1d, 0x1e },
++		.rlen	= 19,
++	}, {
++		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
++			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x33, 0x56, 0x8e, 0xf7, 0xb2, 0x63,
++			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
++		.assoc	= { 0x63, 0x01, 0x8f, 0x76, 0xdc, 0x8a, 0x1b, 0xcb },
++		.alen	= 8,
++		.input	= { 0x4c, 0xcb, 0x1e, 0x7c, 0xa9, 0x81, 0xbe, 0xfa,
++			    0xa0, 0x72, 0x6c, 0x55, 0xd3, 0x78, 0x06, 0x12,
++			    0x98, 0xc8, 0x5c, 0x92, 0x81, 0x4a, 0xbc, 0x33,
++			    0xc5, 0x2e, 0xe8, 0x1d, 0x7d, 0x77, 0xc0, 0x8a },
++		.ilen	= 32,
++		.result	= { 0x90, 0x20, 0xea, 0x6f, 0x91, 0xbd, 0xd8, 0x5a,
++			    0xfa, 0x00, 0x39, 0xba, 0x4b, 0xaf, 0xf9, 0xbf,
++			    0xb7, 0x9c, 0x70, 0x28, 0x94, 0x9c, 0xd0, 0xec },
++		.rlen	= 24,
++	}, {
++		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
++			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0xd5, 0x60, 0x91, 0x2d, 0x3f, 0x70,
++			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
++		.assoc	= { 0xcd, 0x90, 0x44, 0xd2, 0xb7, 0x1f, 0xdb, 0x81,
++			    0x20, 0xea, 0x60, 0xc0 },
++		.alen	= 12,
++		.input	= { 0x00, 0x97, 0x69, 0xec, 0xab, 0xdf, 0x48, 0x62,
++			    0x55, 0x94, 0xc5, 0x92, 0x51, 0xe6, 0x03, 0x57,
++			    0x22, 0x67, 0x5e, 0x04, 0xc8, 0x47, 0x09, 0x9e,
++			    0x5a, 0xe0, 0x70, 0x45, 0x51 },
++		.ilen	= 29,
++		.result	= { 0x64, 0x35, 0xac, 0xba, 0xfb, 0x11, 0xa8, 0x2e,
++			    0x2f, 0x07, 0x1d, 0x7c, 0xa4, 0xa5, 0xeb, 0xd9,
++			    0x3a, 0x80, 0x3b, 0xa8, 0x7f },
++		.rlen	= 21,
++	}, {
++		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
++			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
++		.klen	= 16,
++		.iv	= { 0x01, 0x00, 0x42, 0xff, 0xf8, 0xf1, 0x95, 0x1c,
++			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
++		.assoc	= { 0xd8, 0x5b, 0xc7, 0xe6, 0x9f, 0x94, 0x4f, 0xb8 },
++		.alen	= 8,
++		.input	= { 0xbc, 0x21, 0x8d, 0xaa, 0x94, 0x74, 0x27, 0xb6,
++			    0xdb, 0x38, 0x6a, 0x99, 0xac, 0x1a, 0xef, 0x23,
++			    0xad, 0xe0, 0xb5, 0x29, 0x39, 0xcb, 0x6a, 0x63,
++			    0x7c, 0xf9, 0xbe, 0xc2, 0x40, 0x88, 0x97, 0xc6,
++			    0xba },
++		.ilen	= 33,
++		.result	= { 0x8a, 0x19, 0xb9, 0x50, 0xbc, 0xf7, 0x1a, 0x01,
++			    0x8e, 0x5e, 0x67, 0x01, 0xc9, 0x17, 0x87, 0x65,
++			    0x98, 0x09, 0xd6, 0x7d, 0xbe, 0xdd, 0x18 },
++		.rlen	= 23,
++	},
++};
++
+ /* Cast5 test vectors from RFC 2144 */
+ #define CAST5_ENC_TEST_VECTORS	3
+ #define CAST5_DEC_TEST_VECTORS	3
+@@ -4317,6 +6425,1211 @@ static struct cipher_testvec seed_dec_tv_template[] = {
+ 	}
+ };
+ 
++#define SALSA20_STREAM_ENC_TEST_VECTORS 5
++static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
++	/*
++	* Testvectors from verified.test-vectors submitted to ECRYPT.
++	* They are truncated to size 39, 64, 111, 129 to test a variety
++	* of input length.
++	*/
++	{ /* Set 3, vector 0 */
++		.key	= {
++			    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			    0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
++			  },
++		.klen	= 16,
++		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
++		.input	= {
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			  },
++		.ilen	= 39,
++		.result	= {
++			    0x2D, 0xD5, 0xC3, 0xF7, 0xBA, 0x2B, 0x20, 0xF7,
++                            0x68, 0x02, 0x41, 0x0C, 0x68, 0x86, 0x88, 0x89,
++                            0x5A, 0xD8, 0xC1, 0xBD, 0x4E, 0xA6, 0xC9, 0xB1,
++                            0x40, 0xFB, 0x9B, 0x90, 0xE2, 0x10, 0x49, 0xBF,
++                            0x58, 0x3F, 0x52, 0x79, 0x70, 0xEB, 0xC1,
++			},
++		.rlen	= 39,
++	}, { /* Set 5, vector 0 */
++		.key	= {
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++			  },
++		.klen	= 16,
++		.iv     = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
++		.input	= {
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			  },
++		.ilen	= 64,
++		.result	= {
++			    0xB6, 0x6C, 0x1E, 0x44, 0x46, 0xDD, 0x95, 0x57,
++                            0xE5, 0x78, 0xE2, 0x23, 0xB0, 0xB7, 0x68, 0x01,
++                            0x7B, 0x23, 0xB2, 0x67, 0xBB, 0x02, 0x34, 0xAE,
++                            0x46, 0x26, 0xBF, 0x44, 0x3F, 0x21, 0x97, 0x76,
++                            0x43, 0x6F, 0xB1, 0x9F, 0xD0, 0xE8, 0x86, 0x6F,
++                            0xCD, 0x0D, 0xE9, 0xA9, 0x53, 0x8F, 0x4A, 0x09,
++                            0xCA, 0x9A, 0xC0, 0x73, 0x2E, 0x30, 0xBC, 0xF9,
++                            0x8E, 0x4F, 0x13, 0xE4, 0xB9, 0xE2, 0x01, 0xD9,
++			  },
++		.rlen	= 64,
++	}, { /* Set 3, vector 27 */
++		.key	= {
++			    0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22,
++			    0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A,
++                            0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32,
++			    0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A
++			  },
++		.klen	= 32,
++		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
++		.input	= {
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			  },
++		.ilen	= 111,
++		.result	= {
++			    0xAE, 0x39, 0x50, 0x8E, 0xAC, 0x9A, 0xEC, 0xE7,
++                            0xBF, 0x97, 0xBB, 0x20, 0xB9, 0xDE, 0xE4, 0x1F,
++                            0x87, 0xD9, 0x47, 0xF8, 0x28, 0x91, 0x35, 0x98,
++                            0xDB, 0x72, 0xCC, 0x23, 0x29, 0x48, 0x56, 0x5E,
++                            0x83, 0x7E, 0x0B, 0xF3, 0x7D, 0x5D, 0x38, 0x7B,
++                            0x2D, 0x71, 0x02, 0xB4, 0x3B, 0xB5, 0xD8, 0x23,
++                            0xB0, 0x4A, 0xDF, 0x3C, 0xEC, 0xB6, 0xD9, 0x3B,
++                            0x9B, 0xA7, 0x52, 0xBE, 0xC5, 0xD4, 0x50, 0x59,
++
++                            0x15, 0x14, 0xB4, 0x0E, 0x40, 0xE6, 0x53, 0xD1,
++                            0x83, 0x9C, 0x5B, 0xA0, 0x92, 0x29, 0x6B, 0x5E,
++                            0x96, 0x5B, 0x1E, 0x2F, 0xD3, 0xAC, 0xC1, 0x92,
++                            0xB1, 0x41, 0x3F, 0x19, 0x2F, 0xC4, 0x3B, 0xC6,
++                            0x95, 0x46, 0x45, 0x54, 0xE9, 0x75, 0x03, 0x08,
++                            0x44, 0xAF, 0xE5, 0x8A, 0x81, 0x12, 0x09,
++			  },
++		.rlen	= 111,
++
++	}, { /* Set 5, vector 27 */
++		.key	= {
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++			  },
++		.klen	= 32,
++		.iv     = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00 },
++		.input	= {
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++
++			    0x00,
++			  },
++		.ilen	= 129,
++		.result	= {
++			    0xD2, 0xDB, 0x1A, 0x5C, 0xF1, 0xC1, 0xAC, 0xDB,
++                            0xE8, 0x1A, 0x7A, 0x43, 0x40, 0xEF, 0x53, 0x43,
++                            0x5E, 0x7F, 0x4B, 0x1A, 0x50, 0x52, 0x3F, 0x8D,
++                            0x28, 0x3D, 0xCF, 0x85, 0x1D, 0x69, 0x6E, 0x60,
++                            0xF2, 0xDE, 0x74, 0x56, 0x18, 0x1B, 0x84, 0x10,
++                            0xD4, 0x62, 0xBA, 0x60, 0x50, 0xF0, 0x61, 0xF2,
++                            0x1C, 0x78, 0x7F, 0xC1, 0x24, 0x34, 0xAF, 0x58,
++                            0xBF, 0x2C, 0x59, 0xCA, 0x90, 0x77, 0xF3, 0xB0,
++
++                            0x5B, 0x4A, 0xDF, 0x89, 0xCE, 0x2C, 0x2F, 0xFC,
++                            0x67, 0xF0, 0xE3, 0x45, 0xE8, 0xB3, 0xB3, 0x75,
++                            0xA0, 0x95, 0x71, 0xA1, 0x29, 0x39, 0x94, 0xCA,
++                            0x45, 0x2F, 0xBD, 0xCB, 0x10, 0xB6, 0xBE, 0x9F,
++                            0x8E, 0xF9, 0xB2, 0x01, 0x0A, 0x5A, 0x0A, 0xB7,
++                            0x6B, 0x9D, 0x70, 0x8E, 0x4B, 0xD6, 0x2F, 0xCD,
++                            0x2E, 0x40, 0x48, 0x75, 0xE9, 0xE2, 0x21, 0x45,
++                            0x0B, 0xC9, 0xB6, 0xB5, 0x66, 0xBC, 0x9A, 0x59,
++
++                            0x5A,
++			  },
++		.rlen	= 129,
++	}, { /* large test vector generated using Crypto++ */
++		.key = {
++			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++		},
++		.klen = 32,
++		.iv = {
++			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++		},
++		.input = {
++			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++			0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++			0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++			0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++			0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
++			0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
++			0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
++			0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
++			0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
++			0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
++			0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
++			0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
++			0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
++			0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
++			0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
++			0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
++			0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
++			0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
++			0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
++			0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
++			0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
++			0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++			0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
++			0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
++			0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
++			0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
++			0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
++			0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
++			0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
++			0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15,
++			0x18, 0x1b, 0x1e, 0x21, 0x24, 0x27, 0x2a, 0x2d,
++			0x30, 0x33, 0x36, 0x39, 0x3c, 0x3f, 0x42, 0x45,
++			0x48, 0x4b, 0x4e, 0x51, 0x54, 0x57, 0x5a, 0x5d,
++			0x60, 0x63, 0x66, 0x69, 0x6c, 0x6f, 0x72, 0x75,
++			0x78, 0x7b, 0x7e, 0x81, 0x84, 0x87, 0x8a, 0x8d,
++			0x90, 0x93, 0x96, 0x99, 0x9c, 0x9f, 0xa2, 0xa5,
++			0xa8, 0xab, 0xae, 0xb1, 0xb4, 0xb7, 0xba, 0xbd,
++			0xc0, 0xc3, 0xc6, 0xc9, 0xcc, 0xcf, 0xd2, 0xd5,
++			0xd8, 0xdb, 0xde, 0xe1, 0xe4, 0xe7, 0xea, 0xed,
++			0xf0, 0xf3, 0xf6, 0xf9, 0xfc, 0xff, 0x02, 0x05,
++			0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x1a, 0x1d,
++			0x20, 0x23, 0x26, 0x29, 0x2c, 0x2f, 0x32, 0x35,
++			0x38, 0x3b, 0x3e, 0x41, 0x44, 0x47, 0x4a, 0x4d,
++			0x50, 0x53, 0x56, 0x59, 0x5c, 0x5f, 0x62, 0x65,
++			0x68, 0x6b, 0x6e, 0x71, 0x74, 0x77, 0x7a, 0x7d,
++			0x80, 0x83, 0x86, 0x89, 0x8c, 0x8f, 0x92, 0x95,
++			0x98, 0x9b, 0x9e, 0xa1, 0xa4, 0xa7, 0xaa, 0xad,
++			0xb0, 0xb3, 0xb6, 0xb9, 0xbc, 0xbf, 0xc2, 0xc5,
++			0xc8, 0xcb, 0xce, 0xd1, 0xd4, 0xd7, 0xda, 0xdd,
++			0xe0, 0xe3, 0xe6, 0xe9, 0xec, 0xef, 0xf2, 0xf5,
++			0xf8, 0xfb, 0xfe, 0x01, 0x04, 0x07, 0x0a, 0x0d,
++			0x10, 0x13, 0x16, 0x19, 0x1c, 0x1f, 0x22, 0x25,
++			0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, 0x3d,
++			0x40, 0x43, 0x46, 0x49, 0x4c, 0x4f, 0x52, 0x55,
++			0x58, 0x5b, 0x5e, 0x61, 0x64, 0x67, 0x6a, 0x6d,
++			0x70, 0x73, 0x76, 0x79, 0x7c, 0x7f, 0x82, 0x85,
++			0x88, 0x8b, 0x8e, 0x91, 0x94, 0x97, 0x9a, 0x9d,
++			0xa0, 0xa3, 0xa6, 0xa9, 0xac, 0xaf, 0xb2, 0xb5,
++			0xb8, 0xbb, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd,
++			0xd0, 0xd3, 0xd6, 0xd9, 0xdc, 0xdf, 0xe2, 0xe5,
++			0xe8, 0xeb, 0xee, 0xf1, 0xf4, 0xf7, 0xfa, 0xfd,
++			0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1e, 0x23,
++			0x28, 0x2d, 0x32, 0x37, 0x3c, 0x41, 0x46, 0x4b,
++			0x50, 0x55, 0x5a, 0x5f, 0x64, 0x69, 0x6e, 0x73,
++			0x78, 0x7d, 0x82, 0x87, 0x8c, 0x91, 0x96, 0x9b,
++			0xa0, 0xa5, 0xaa, 0xaf, 0xb4, 0xb9, 0xbe, 0xc3,
++			0xc8, 0xcd, 0xd2, 0xd7, 0xdc, 0xe1, 0xe6, 0xeb,
++			0xf0, 0xf5, 0xfa, 0xff, 0x04, 0x09, 0x0e, 0x13,
++			0x18, 0x1d, 0x22, 0x27, 0x2c, 0x31, 0x36, 0x3b,
++			0x40, 0x45, 0x4a, 0x4f, 0x54, 0x59, 0x5e, 0x63,
++			0x68, 0x6d, 0x72, 0x77, 0x7c, 0x81, 0x86, 0x8b,
++			0x90, 0x95, 0x9a, 0x9f, 0xa4, 0xa9, 0xae, 0xb3,
++			0xb8, 0xbd, 0xc2, 0xc7, 0xcc, 0xd1, 0xd6, 0xdb,
++			0xe0, 0xe5, 0xea, 0xef, 0xf4, 0xf9, 0xfe, 0x03,
++			0x08, 0x0d, 0x12, 0x17, 0x1c, 0x21, 0x26, 0x2b,
++			0x30, 0x35, 0x3a, 0x3f, 0x44, 0x49, 0x4e, 0x53,
++			0x58, 0x5d, 0x62, 0x67, 0x6c, 0x71, 0x76, 0x7b,
++			0x80, 0x85, 0x8a, 0x8f, 0x94, 0x99, 0x9e, 0xa3,
++			0xa8, 0xad, 0xb2, 0xb7, 0xbc, 0xc1, 0xc6, 0xcb,
++			0xd0, 0xd5, 0xda, 0xdf, 0xe4, 0xe9, 0xee, 0xf3,
++			0xf8, 0xfd, 0x02, 0x07, 0x0c, 0x11, 0x16, 0x1b,
++			0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3e, 0x43,
++			0x48, 0x4d, 0x52, 0x57, 0x5c, 0x61, 0x66, 0x6b,
++			0x70, 0x75, 0x7a, 0x7f, 0x84, 0x89, 0x8e, 0x93,
++			0x98, 0x9d, 0xa2, 0xa7, 0xac, 0xb1, 0xb6, 0xbb,
++			0xc0, 0xc5, 0xca, 0xcf, 0xd4, 0xd9, 0xde, 0xe3,
++			0xe8, 0xed, 0xf2, 0xf7, 0xfc, 0x01, 0x06, 0x0b,
++			0x10, 0x15, 0x1a, 0x1f, 0x24, 0x29, 0x2e, 0x33,
++			0x38, 0x3d, 0x42, 0x47, 0x4c, 0x51, 0x56, 0x5b,
++			0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79, 0x7e, 0x83,
++			0x88, 0x8d, 0x92, 0x97, 0x9c, 0xa1, 0xa6, 0xab,
++			0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, 0xce, 0xd3,
++			0xd8, 0xdd, 0xe2, 0xe7, 0xec, 0xf1, 0xf6, 0xfb,
++			0x00, 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31,
++			0x38, 0x3f, 0x46, 0x4d, 0x54, 0x5b, 0x62, 0x69,
++			0x70, 0x77, 0x7e, 0x85, 0x8c, 0x93, 0x9a, 0xa1,
++			0xa8, 0xaf, 0xb6, 0xbd, 0xc4, 0xcb, 0xd2, 0xd9,
++			0xe0, 0xe7, 0xee, 0xf5, 0xfc, 0x03, 0x0a, 0x11,
++			0x18, 0x1f, 0x26, 0x2d, 0x34, 0x3b, 0x42, 0x49,
++			0x50, 0x57, 0x5e, 0x65, 0x6c, 0x73, 0x7a, 0x81,
++			0x88, 0x8f, 0x96, 0x9d, 0xa4, 0xab, 0xb2, 0xb9,
++			0xc0, 0xc7, 0xce, 0xd5, 0xdc, 0xe3, 0xea, 0xf1,
++			0xf8, 0xff, 0x06, 0x0d, 0x14, 0x1b, 0x22, 0x29,
++			0x30, 0x37, 0x3e, 0x45, 0x4c, 0x53, 0x5a, 0x61,
++			0x68, 0x6f, 0x76, 0x7d, 0x84, 0x8b, 0x92, 0x99,
++			0xa0, 0xa7, 0xae, 0xb5, 0xbc, 0xc3, 0xca, 0xd1,
++			0xd8, 0xdf, 0xe6, 0xed, 0xf4, 0xfb, 0x02, 0x09,
++			0x10, 0x17, 0x1e, 0x25, 0x2c, 0x33, 0x3a, 0x41,
++			0x48, 0x4f, 0x56, 0x5d, 0x64, 0x6b, 0x72, 0x79,
++			0x80, 0x87, 0x8e, 0x95, 0x9c, 0xa3, 0xaa, 0xb1,
++			0xb8, 0xbf, 0xc6, 0xcd, 0xd4, 0xdb, 0xe2, 0xe9,
++			0xf0, 0xf7, 0xfe, 0x05, 0x0c, 0x13, 0x1a, 0x21,
++			0x28, 0x2f, 0x36, 0x3d, 0x44, 0x4b, 0x52, 0x59,
++			0x60, 0x67, 0x6e, 0x75, 0x7c, 0x83, 0x8a, 0x91,
++			0x98, 0x9f, 0xa6, 0xad, 0xb4, 0xbb, 0xc2, 0xc9,
++			0xd0, 0xd7, 0xde, 0xe5, 0xec, 0xf3, 0xfa, 0x01,
++			0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2b, 0x32, 0x39,
++			0x40, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71,
++			0x78, 0x7f, 0x86, 0x8d, 0x94, 0x9b, 0xa2, 0xa9,
++			0xb0, 0xb7, 0xbe, 0xc5, 0xcc, 0xd3, 0xda, 0xe1,
++			0xe8, 0xef, 0xf6, 0xfd, 0x04, 0x0b, 0x12, 0x19,
++			0x20, 0x27, 0x2e, 0x35, 0x3c, 0x43, 0x4a, 0x51,
++			0x58, 0x5f, 0x66, 0x6d, 0x74, 0x7b, 0x82, 0x89,
++			0x90, 0x97, 0x9e, 0xa5, 0xac, 0xb3, 0xba, 0xc1,
++			0xc8, 0xcf, 0xd6, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9,
++			0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
++			0x48, 0x51, 0x5a, 0x63, 0x6c, 0x75, 0x7e, 0x87,
++			0x90, 0x99, 0xa2, 0xab, 0xb4, 0xbd, 0xc6, 0xcf,
++			0xd8, 0xe1, 0xea, 0xf3, 0xfc, 0x05, 0x0e, 0x17,
++			0x20, 0x29, 0x32, 0x3b, 0x44, 0x4d, 0x56, 0x5f,
++			0x68, 0x71, 0x7a, 0x83, 0x8c, 0x95, 0x9e, 0xa7,
++			0xb0, 0xb9, 0xc2, 0xcb, 0xd4, 0xdd, 0xe6, 0xef,
++			0xf8, 0x01, 0x0a, 0x13, 0x1c, 0x25, 0x2e, 0x37,
++			0x40, 0x49, 0x52, 0x5b, 0x64, 0x6d, 0x76, 0x7f,
++			0x88, 0x91, 0x9a, 0xa3, 0xac, 0xb5, 0xbe, 0xc7,
++			0xd0, 0xd9, 0xe2, 0xeb, 0xf4, 0xfd, 0x06, 0x0f,
++			0x18, 0x21, 0x2a, 0x33, 0x3c, 0x45, 0x4e, 0x57,
++			0x60, 0x69, 0x72, 0x7b, 0x84, 0x8d, 0x96, 0x9f,
++			0xa8, 0xb1, 0xba, 0xc3, 0xcc, 0xd5, 0xde, 0xe7,
++			0xf0, 0xf9, 0x02, 0x0b, 0x14, 0x1d, 0x26, 0x2f,
++			0x38, 0x41, 0x4a, 0x53, 0x5c, 0x65, 0x6e, 0x77,
++			0x80, 0x89, 0x92, 0x9b, 0xa4, 0xad, 0xb6, 0xbf,
++			0xc8, 0xd1, 0xda, 0xe3, 0xec, 0xf5, 0xfe, 0x07,
++			0x10, 0x19, 0x22, 0x2b, 0x34, 0x3d, 0x46, 0x4f,
++			0x58, 0x61, 0x6a, 0x73, 0x7c, 0x85, 0x8e, 0x97,
++			0xa0, 0xa9, 0xb2, 0xbb, 0xc4, 0xcd, 0xd6, 0xdf,
++			0xe8, 0xf1, 0xfa, 0x03, 0x0c, 0x15, 0x1e, 0x27,
++			0x30, 0x39, 0x42, 0x4b, 0x54, 0x5d, 0x66, 0x6f,
++			0x78, 0x81, 0x8a, 0x93, 0x9c, 0xa5, 0xae, 0xb7,
++			0xc0, 0xc9, 0xd2, 0xdb, 0xe4, 0xed, 0xf6, 0xff,
++			0x08, 0x11, 0x1a, 0x23, 0x2c, 0x35, 0x3e, 0x47,
++			0x50, 0x59, 0x62, 0x6b, 0x74, 0x7d, 0x86, 0x8f,
++			0x98, 0xa1, 0xaa, 0xb3, 0xbc, 0xc5, 0xce, 0xd7,
++			0xe0, 0xe9, 0xf2, 0xfb, 0x04, 0x0d, 0x16, 0x1f,
++			0x28, 0x31, 0x3a, 0x43, 0x4c, 0x55, 0x5e, 0x67,
++			0x70, 0x79, 0x82, 0x8b, 0x94, 0x9d, 0xa6, 0xaf,
++			0xb8, 0xc1, 0xca, 0xd3, 0xdc, 0xe5, 0xee, 0xf7,
++			0x00, 0x0b, 0x16, 0x21, 0x2c, 0x37, 0x42, 0x4d,
++			0x58, 0x63, 0x6e, 0x79, 0x84, 0x8f, 0x9a, 0xa5,
++			0xb0, 0xbb, 0xc6, 0xd1, 0xdc, 0xe7, 0xf2, 0xfd,
++			0x08, 0x13, 0x1e, 0x29, 0x34, 0x3f, 0x4a, 0x55,
++			0x60, 0x6b, 0x76, 0x81, 0x8c, 0x97, 0xa2, 0xad,
++			0xb8, 0xc3, 0xce, 0xd9, 0xe4, 0xef, 0xfa, 0x05,
++			0x10, 0x1b, 0x26, 0x31, 0x3c, 0x47, 0x52, 0x5d,
++			0x68, 0x73, 0x7e, 0x89, 0x94, 0x9f, 0xaa, 0xb5,
++			0xc0, 0xcb, 0xd6, 0xe1, 0xec, 0xf7, 0x02, 0x0d,
++			0x18, 0x23, 0x2e, 0x39, 0x44, 0x4f, 0x5a, 0x65,
++			0x70, 0x7b, 0x86, 0x91, 0x9c, 0xa7, 0xb2, 0xbd,
++			0xc8, 0xd3, 0xde, 0xe9, 0xf4, 0xff, 0x0a, 0x15,
++			0x20, 0x2b, 0x36, 0x41, 0x4c, 0x57, 0x62, 0x6d,
++			0x78, 0x83, 0x8e, 0x99, 0xa4, 0xaf, 0xba, 0xc5,
++			0xd0, 0xdb, 0xe6, 0xf1, 0xfc, 0x07, 0x12, 0x1d,
++			0x28, 0x33, 0x3e, 0x49, 0x54, 0x5f, 0x6a, 0x75,
++			0x80, 0x8b, 0x96, 0xa1, 0xac, 0xb7, 0xc2, 0xcd,
++			0xd8, 0xe3, 0xee, 0xf9, 0x04, 0x0f, 0x1a, 0x25,
++			0x30, 0x3b, 0x46, 0x51, 0x5c, 0x67, 0x72, 0x7d,
++			0x88, 0x93, 0x9e, 0xa9, 0xb4, 0xbf, 0xca, 0xd5,
++			0xe0, 0xeb, 0xf6, 0x01, 0x0c, 0x17, 0x22, 0x2d,
++			0x38, 0x43, 0x4e, 0x59, 0x64, 0x6f, 0x7a, 0x85,
++			0x90, 0x9b, 0xa6, 0xb1, 0xbc, 0xc7, 0xd2, 0xdd,
++			0xe8, 0xf3, 0xfe, 0x09, 0x14, 0x1f, 0x2a, 0x35,
++			0x40, 0x4b, 0x56, 0x61, 0x6c, 0x77, 0x82, 0x8d,
++			0x98, 0xa3, 0xae, 0xb9, 0xc4, 0xcf, 0xda, 0xe5,
++			0xf0, 0xfb, 0x06, 0x11, 0x1c, 0x27, 0x32, 0x3d,
++			0x48, 0x53, 0x5e, 0x69, 0x74, 0x7f, 0x8a, 0x95,
++			0xa0, 0xab, 0xb6, 0xc1, 0xcc, 0xd7, 0xe2, 0xed,
++			0xf8, 0x03, 0x0e, 0x19, 0x24, 0x2f, 0x3a, 0x45,
++			0x50, 0x5b, 0x66, 0x71, 0x7c, 0x87, 0x92, 0x9d,
++			0xa8, 0xb3, 0xbe, 0xc9, 0xd4, 0xdf, 0xea, 0xf5,
++			0x00, 0x0d, 0x1a, 0x27, 0x34, 0x41, 0x4e, 0x5b,
++			0x68, 0x75, 0x82, 0x8f, 0x9c, 0xa9, 0xb6, 0xc3,
++			0xd0, 0xdd, 0xea, 0xf7, 0x04, 0x11, 0x1e, 0x2b,
++			0x38, 0x45, 0x52, 0x5f, 0x6c, 0x79, 0x86, 0x93,
++			0xa0, 0xad, 0xba, 0xc7, 0xd4, 0xe1, 0xee, 0xfb,
++			0x08, 0x15, 0x22, 0x2f, 0x3c, 0x49, 0x56, 0x63,
++			0x70, 0x7d, 0x8a, 0x97, 0xa4, 0xb1, 0xbe, 0xcb,
++			0xd8, 0xe5, 0xf2, 0xff, 0x0c, 0x19, 0x26, 0x33,
++			0x40, 0x4d, 0x5a, 0x67, 0x74, 0x81, 0x8e, 0x9b,
++			0xa8, 0xb5, 0xc2, 0xcf, 0xdc, 0xe9, 0xf6, 0x03,
++			0x10, 0x1d, 0x2a, 0x37, 0x44, 0x51, 0x5e, 0x6b,
++			0x78, 0x85, 0x92, 0x9f, 0xac, 0xb9, 0xc6, 0xd3,
++			0xe0, 0xed, 0xfa, 0x07, 0x14, 0x21, 0x2e, 0x3b,
++			0x48, 0x55, 0x62, 0x6f, 0x7c, 0x89, 0x96, 0xa3,
++			0xb0, 0xbd, 0xca, 0xd7, 0xe4, 0xf1, 0xfe, 0x0b,
++			0x18, 0x25, 0x32, 0x3f, 0x4c, 0x59, 0x66, 0x73,
++			0x80, 0x8d, 0x9a, 0xa7, 0xb4, 0xc1, 0xce, 0xdb,
++			0xe8, 0xf5, 0x02, 0x0f, 0x1c, 0x29, 0x36, 0x43,
++			0x50, 0x5d, 0x6a, 0x77, 0x84, 0x91, 0x9e, 0xab,
++			0xb8, 0xc5, 0xd2, 0xdf, 0xec, 0xf9, 0x06, 0x13,
++			0x20, 0x2d, 0x3a, 0x47, 0x54, 0x61, 0x6e, 0x7b,
++			0x88, 0x95, 0xa2, 0xaf, 0xbc, 0xc9, 0xd6, 0xe3,
++			0xf0, 0xfd, 0x0a, 0x17, 0x24, 0x31, 0x3e, 0x4b,
++			0x58, 0x65, 0x72, 0x7f, 0x8c, 0x99, 0xa6, 0xb3,
++			0xc0, 0xcd, 0xda, 0xe7, 0xf4, 0x01, 0x0e, 0x1b,
++			0x28, 0x35, 0x42, 0x4f, 0x5c, 0x69, 0x76, 0x83,
++			0x90, 0x9d, 0xaa, 0xb7, 0xc4, 0xd1, 0xde, 0xeb,
++			0xf8, 0x05, 0x12, 0x1f, 0x2c, 0x39, 0x46, 0x53,
++			0x60, 0x6d, 0x7a, 0x87, 0x94, 0xa1, 0xae, 0xbb,
++			0xc8, 0xd5, 0xe2, 0xef, 0xfc, 0x09, 0x16, 0x23,
++			0x30, 0x3d, 0x4a, 0x57, 0x64, 0x71, 0x7e, 0x8b,
++			0x98, 0xa5, 0xb2, 0xbf, 0xcc, 0xd9, 0xe6, 0xf3,
++			0x00, 0x0f, 0x1e, 0x2d, 0x3c, 0x4b, 0x5a, 0x69,
++			0x78, 0x87, 0x96, 0xa5, 0xb4, 0xc3, 0xd2, 0xe1,
++			0xf0, 0xff, 0x0e, 0x1d, 0x2c, 0x3b, 0x4a, 0x59,
++			0x68, 0x77, 0x86, 0x95, 0xa4, 0xb3, 0xc2, 0xd1,
++			0xe0, 0xef, 0xfe, 0x0d, 0x1c, 0x2b, 0x3a, 0x49,
++			0x58, 0x67, 0x76, 0x85, 0x94, 0xa3, 0xb2, 0xc1,
++			0xd0, 0xdf, 0xee, 0xfd, 0x0c, 0x1b, 0x2a, 0x39,
++			0x48, 0x57, 0x66, 0x75, 0x84, 0x93, 0xa2, 0xb1,
++			0xc0, 0xcf, 0xde, 0xed, 0xfc, 0x0b, 0x1a, 0x29,
++			0x38, 0x47, 0x56, 0x65, 0x74, 0x83, 0x92, 0xa1,
++			0xb0, 0xbf, 0xce, 0xdd, 0xec, 0xfb, 0x0a, 0x19,
++			0x28, 0x37, 0x46, 0x55, 0x64, 0x73, 0x82, 0x91,
++			0xa0, 0xaf, 0xbe, 0xcd, 0xdc, 0xeb, 0xfa, 0x09,
++			0x18, 0x27, 0x36, 0x45, 0x54, 0x63, 0x72, 0x81,
++			0x90, 0x9f, 0xae, 0xbd, 0xcc, 0xdb, 0xea, 0xf9,
++			0x08, 0x17, 0x26, 0x35, 0x44, 0x53, 0x62, 0x71,
++			0x80, 0x8f, 0x9e, 0xad, 0xbc, 0xcb, 0xda, 0xe9,
++			0xf8, 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61,
++			0x70, 0x7f, 0x8e, 0x9d, 0xac, 0xbb, 0xca, 0xd9,
++			0xe8, 0xf7, 0x06, 0x15, 0x24, 0x33, 0x42, 0x51,
++			0x60, 0x6f, 0x7e, 0x8d, 0x9c, 0xab, 0xba, 0xc9,
++			0xd8, 0xe7, 0xf6, 0x05, 0x14, 0x23, 0x32, 0x41,
++			0x50, 0x5f, 0x6e, 0x7d, 0x8c, 0x9b, 0xaa, 0xb9,
++			0xc8, 0xd7, 0xe6, 0xf5, 0x04, 0x13, 0x22, 0x31,
++			0x40, 0x4f, 0x5e, 0x6d, 0x7c, 0x8b, 0x9a, 0xa9,
++			0xb8, 0xc7, 0xd6, 0xe5, 0xf4, 0x03, 0x12, 0x21,
++			0x30, 0x3f, 0x4e, 0x5d, 0x6c, 0x7b, 0x8a, 0x99,
++			0xa8, 0xb7, 0xc6, 0xd5, 0xe4, 0xf3, 0x02, 0x11,
++			0x20, 0x2f, 0x3e, 0x4d, 0x5c, 0x6b, 0x7a, 0x89,
++			0x98, 0xa7, 0xb6, 0xc5, 0xd4, 0xe3, 0xf2, 0x01,
++			0x10, 0x1f, 0x2e, 0x3d, 0x4c, 0x5b, 0x6a, 0x79,
++			0x88, 0x97, 0xa6, 0xb5, 0xc4, 0xd3, 0xe2, 0xf1,
++			0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
++			0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff,
++			0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87,
++			0x98, 0xa9, 0xba, 0xcb, 0xdc, 0xed, 0xfe, 0x0f,
++			0x20, 0x31, 0x42, 0x53, 0x64, 0x75, 0x86, 0x97,
++			0xa8, 0xb9, 0xca, 0xdb, 0xec, 0xfd, 0x0e, 0x1f,
++			0x30, 0x41, 0x52, 0x63, 0x74, 0x85, 0x96, 0xa7,
++			0xb8, 0xc9, 0xda, 0xeb, 0xfc, 0x0d, 0x1e, 0x2f,
++			0x40, 0x51, 0x62, 0x73, 0x84, 0x95, 0xa6, 0xb7,
++			0xc8, 0xd9, 0xea, 0xfb, 0x0c, 0x1d, 0x2e, 0x3f,
++			0x50, 0x61, 0x72, 0x83, 0x94, 0xa5, 0xb6, 0xc7,
++			0xd8, 0xe9, 0xfa, 0x0b, 0x1c, 0x2d, 0x3e, 0x4f,
++			0x60, 0x71, 0x82, 0x93, 0xa4, 0xb5, 0xc6, 0xd7,
++			0xe8, 0xf9, 0x0a, 0x1b, 0x2c, 0x3d, 0x4e, 0x5f,
++			0x70, 0x81, 0x92, 0xa3, 0xb4, 0xc5, 0xd6, 0xe7,
++			0xf8, 0x09, 0x1a, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f,
++			0x80, 0x91, 0xa2, 0xb3, 0xc4, 0xd5, 0xe6, 0xf7,
++			0x08, 0x19, 0x2a, 0x3b, 0x4c, 0x5d, 0x6e, 0x7f,
++			0x90, 0xa1, 0xb2, 0xc3, 0xd4, 0xe5, 0xf6, 0x07,
++			0x18, 0x29, 0x3a, 0x4b, 0x5c, 0x6d, 0x7e, 0x8f,
++			0xa0, 0xb1, 0xc2, 0xd3, 0xe4, 0xf5, 0x06, 0x17,
++			0x28, 0x39, 0x4a, 0x5b, 0x6c, 0x7d, 0x8e, 0x9f,
++			0xb0, 0xc1, 0xd2, 0xe3, 0xf4, 0x05, 0x16, 0x27,
++			0x38, 0x49, 0x5a, 0x6b, 0x7c, 0x8d, 0x9e, 0xaf,
++			0xc0, 0xd1, 0xe2, 0xf3, 0x04, 0x15, 0x26, 0x37,
++			0x48, 0x59, 0x6a, 0x7b, 0x8c, 0x9d, 0xae, 0xbf,
++			0xd0, 0xe1, 0xf2, 0x03, 0x14, 0x25, 0x36, 0x47,
++			0x58, 0x69, 0x7a, 0x8b, 0x9c, 0xad, 0xbe, 0xcf,
++			0xe0, 0xf1, 0x02, 0x13, 0x24, 0x35, 0x46, 0x57,
++			0x68, 0x79, 0x8a, 0x9b, 0xac, 0xbd, 0xce, 0xdf,
++			0xf0, 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67,
++			0x78, 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef,
++			0x00, 0x13, 0x26, 0x39, 0x4c, 0x5f, 0x72, 0x85,
++			0x98, 0xab, 0xbe, 0xd1, 0xe4, 0xf7, 0x0a, 0x1d,
++			0x30, 0x43, 0x56, 0x69, 0x7c, 0x8f, 0xa2, 0xb5,
++			0xc8, 0xdb, 0xee, 0x01, 0x14, 0x27, 0x3a, 0x4d,
++			0x60, 0x73, 0x86, 0x99, 0xac, 0xbf, 0xd2, 0xe5,
++			0xf8, 0x0b, 0x1e, 0x31, 0x44, 0x57, 0x6a, 0x7d,
++			0x90, 0xa3, 0xb6, 0xc9, 0xdc, 0xef, 0x02, 0x15,
++			0x28, 0x3b, 0x4e, 0x61, 0x74, 0x87, 0x9a, 0xad,
++			0xc0, 0xd3, 0xe6, 0xf9, 0x0c, 0x1f, 0x32, 0x45,
++			0x58, 0x6b, 0x7e, 0x91, 0xa4, 0xb7, 0xca, 0xdd,
++			0xf0, 0x03, 0x16, 0x29, 0x3c, 0x4f, 0x62, 0x75,
++			0x88, 0x9b, 0xae, 0xc1, 0xd4, 0xe7, 0xfa, 0x0d,
++			0x20, 0x33, 0x46, 0x59, 0x6c, 0x7f, 0x92, 0xa5,
++			0xb8, 0xcb, 0xde, 0xf1, 0x04, 0x17, 0x2a, 0x3d,
++			0x50, 0x63, 0x76, 0x89, 0x9c, 0xaf, 0xc2, 0xd5,
++			0xe8, 0xfb, 0x0e, 0x21, 0x34, 0x47, 0x5a, 0x6d,
++			0x80, 0x93, 0xa6, 0xb9, 0xcc, 0xdf, 0xf2, 0x05,
++			0x18, 0x2b, 0x3e, 0x51, 0x64, 0x77, 0x8a, 0x9d,
++			0xb0, 0xc3, 0xd6, 0xe9, 0xfc, 0x0f, 0x22, 0x35,
++			0x48, 0x5b, 0x6e, 0x81, 0x94, 0xa7, 0xba, 0xcd,
++			0xe0, 0xf3, 0x06, 0x19, 0x2c, 0x3f, 0x52, 0x65,
++			0x78, 0x8b, 0x9e, 0xb1, 0xc4, 0xd7, 0xea, 0xfd,
++			0x10, 0x23, 0x36, 0x49, 0x5c, 0x6f, 0x82, 0x95,
++			0xa8, 0xbb, 0xce, 0xe1, 0xf4, 0x07, 0x1a, 0x2d,
++			0x40, 0x53, 0x66, 0x79, 0x8c, 0x9f, 0xb2, 0xc5,
++			0xd8, 0xeb, 0xfe, 0x11, 0x24, 0x37, 0x4a, 0x5d,
++			0x70, 0x83, 0x96, 0xa9, 0xbc, 0xcf, 0xe2, 0xf5,
++			0x08, 0x1b, 0x2e, 0x41, 0x54, 0x67, 0x7a, 0x8d,
++			0xa0, 0xb3, 0xc6, 0xd9, 0xec, 0xff, 0x12, 0x25,
++			0x38, 0x4b, 0x5e, 0x71, 0x84, 0x97, 0xaa, 0xbd,
++			0xd0, 0xe3, 0xf6, 0x09, 0x1c, 0x2f, 0x42, 0x55,
++			0x68, 0x7b, 0x8e, 0xa1, 0xb4, 0xc7, 0xda, 0xed,
++			0x00, 0x15, 0x2a, 0x3f, 0x54, 0x69, 0x7e, 0x93,
++			0xa8, 0xbd, 0xd2, 0xe7, 0xfc, 0x11, 0x26, 0x3b,
++			0x50, 0x65, 0x7a, 0x8f, 0xa4, 0xb9, 0xce, 0xe3,
++			0xf8, 0x0d, 0x22, 0x37, 0x4c, 0x61, 0x76, 0x8b,
++			0xa0, 0xb5, 0xca, 0xdf, 0xf4, 0x09, 0x1e, 0x33,
++			0x48, 0x5d, 0x72, 0x87, 0x9c, 0xb1, 0xc6, 0xdb,
++			0xf0, 0x05, 0x1a, 0x2f, 0x44, 0x59, 0x6e, 0x83,
++			0x98, 0xad, 0xc2, 0xd7, 0xec, 0x01, 0x16, 0x2b,
++			0x40, 0x55, 0x6a, 0x7f, 0x94, 0xa9, 0xbe, 0xd3,
++			0xe8, 0xfd, 0x12, 0x27, 0x3c, 0x51, 0x66, 0x7b,
++			0x90, 0xa5, 0xba, 0xcf, 0xe4, 0xf9, 0x0e, 0x23,
++			0x38, 0x4d, 0x62, 0x77, 0x8c, 0xa1, 0xb6, 0xcb,
++			0xe0, 0xf5, 0x0a, 0x1f, 0x34, 0x49, 0x5e, 0x73,
++			0x88, 0x9d, 0xb2, 0xc7, 0xdc, 0xf1, 0x06, 0x1b,
++			0x30, 0x45, 0x5a, 0x6f, 0x84, 0x99, 0xae, 0xc3,
++			0xd8, 0xed, 0x02, 0x17, 0x2c, 0x41, 0x56, 0x6b,
++			0x80, 0x95, 0xaa, 0xbf, 0xd4, 0xe9, 0xfe, 0x13,
++			0x28, 0x3d, 0x52, 0x67, 0x7c, 0x91, 0xa6, 0xbb,
++			0xd0, 0xe5, 0xfa, 0x0f, 0x24, 0x39, 0x4e, 0x63,
++			0x78, 0x8d, 0xa2, 0xb7, 0xcc, 0xe1, 0xf6, 0x0b,
++			0x20, 0x35, 0x4a, 0x5f, 0x74, 0x89, 0x9e, 0xb3,
++			0xc8, 0xdd, 0xf2, 0x07, 0x1c, 0x31, 0x46, 0x5b,
++			0x70, 0x85, 0x9a, 0xaf, 0xc4, 0xd9, 0xee, 0x03,
++			0x18, 0x2d, 0x42, 0x57, 0x6c, 0x81, 0x96, 0xab,
++			0xc0, 0xd5, 0xea, 0xff, 0x14, 0x29, 0x3e, 0x53,
++			0x68, 0x7d, 0x92, 0xa7, 0xbc, 0xd1, 0xe6, 0xfb,
++			0x10, 0x25, 0x3a, 0x4f, 0x64, 0x79, 0x8e, 0xa3,
++			0xb8, 0xcd, 0xe2, 0xf7, 0x0c, 0x21, 0x36, 0x4b,
++			0x60, 0x75, 0x8a, 0x9f, 0xb4, 0xc9, 0xde, 0xf3,
++			0x08, 0x1d, 0x32, 0x47, 0x5c, 0x71, 0x86, 0x9b,
++			0xb0, 0xc5, 0xda, 0xef, 0x04, 0x19, 0x2e, 0x43,
++			0x58, 0x6d, 0x82, 0x97, 0xac, 0xc1, 0xd6, 0xeb,
++			0x00, 0x17, 0x2e, 0x45, 0x5c, 0x73, 0x8a, 0xa1,
++			0xb8, 0xcf, 0xe6, 0xfd, 0x14, 0x2b, 0x42, 0x59,
++			0x70, 0x87, 0x9e, 0xb5, 0xcc, 0xe3, 0xfa, 0x11,
++			0x28, 0x3f, 0x56, 0x6d, 0x84, 0x9b, 0xb2, 0xc9,
++			0xe0, 0xf7, 0x0e, 0x25, 0x3c, 0x53, 0x6a, 0x81,
++			0x98, 0xaf, 0xc6, 0xdd, 0xf4, 0x0b, 0x22, 0x39,
++			0x50, 0x67, 0x7e, 0x95, 0xac, 0xc3, 0xda, 0xf1,
++			0x08, 0x1f, 0x36, 0x4d, 0x64, 0x7b, 0x92, 0xa9,
++			0xc0, 0xd7, 0xee, 0x05, 0x1c, 0x33, 0x4a, 0x61,
++			0x78, 0x8f, 0xa6, 0xbd, 0xd4, 0xeb, 0x02, 0x19,
++			0x30, 0x47, 0x5e, 0x75, 0x8c, 0xa3, 0xba, 0xd1,
++			0xe8, 0xff, 0x16, 0x2d, 0x44, 0x5b, 0x72, 0x89,
++			0xa0, 0xb7, 0xce, 0xe5, 0xfc, 0x13, 0x2a, 0x41,
++			0x58, 0x6f, 0x86, 0x9d, 0xb4, 0xcb, 0xe2, 0xf9,
++			0x10, 0x27, 0x3e, 0x55, 0x6c, 0x83, 0x9a, 0xb1,
++			0xc8, 0xdf, 0xf6, 0x0d, 0x24, 0x3b, 0x52, 0x69,
++			0x80, 0x97, 0xae, 0xc5, 0xdc, 0xf3, 0x0a, 0x21,
++			0x38, 0x4f, 0x66, 0x7d, 0x94, 0xab, 0xc2, 0xd9,
++			0xf0, 0x07, 0x1e, 0x35, 0x4c, 0x63, 0x7a, 0x91,
++			0xa8, 0xbf, 0xd6, 0xed, 0x04, 0x1b, 0x32, 0x49,
++			0x60, 0x77, 0x8e, 0xa5, 0xbc, 0xd3, 0xea, 0x01,
++			0x18, 0x2f, 0x46, 0x5d, 0x74, 0x8b, 0xa2, 0xb9,
++			0xd0, 0xe7, 0xfe, 0x15, 0x2c, 0x43, 0x5a, 0x71,
++			0x88, 0x9f, 0xb6, 0xcd, 0xe4, 0xfb, 0x12, 0x29,
++			0x40, 0x57, 0x6e, 0x85, 0x9c, 0xb3, 0xca, 0xe1,
++			0xf8, 0x0f, 0x26, 0x3d, 0x54, 0x6b, 0x82, 0x99,
++			0xb0, 0xc7, 0xde, 0xf5, 0x0c, 0x23, 0x3a, 0x51,
++			0x68, 0x7f, 0x96, 0xad, 0xc4, 0xdb, 0xf2, 0x09,
++			0x20, 0x37, 0x4e, 0x65, 0x7c, 0x93, 0xaa, 0xc1,
++			0xd8, 0xef, 0x06, 0x1d, 0x34, 0x4b, 0x62, 0x79,
++			0x90, 0xa7, 0xbe, 0xd5, 0xec, 0x03, 0x1a, 0x31,
++			0x48, 0x5f, 0x76, 0x8d, 0xa4, 0xbb, 0xd2, 0xe9,
++			0x00, 0x19, 0x32, 0x4b, 0x64, 0x7d, 0x96, 0xaf,
++			0xc8, 0xe1, 0xfa, 0x13, 0x2c, 0x45, 0x5e, 0x77,
++			0x90, 0xa9, 0xc2, 0xdb, 0xf4, 0x0d, 0x26, 0x3f,
++			0x58, 0x71, 0x8a, 0xa3, 0xbc, 0xd5, 0xee, 0x07,
++			0x20, 0x39, 0x52, 0x6b, 0x84, 0x9d, 0xb6, 0xcf,
++			0xe8, 0x01, 0x1a, 0x33, 0x4c, 0x65, 0x7e, 0x97,
++			0xb0, 0xc9, 0xe2, 0xfb, 0x14, 0x2d, 0x46, 0x5f,
++			0x78, 0x91, 0xaa, 0xc3, 0xdc, 0xf5, 0x0e, 0x27,
++			0x40, 0x59, 0x72, 0x8b, 0xa4, 0xbd, 0xd6, 0xef,
++			0x08, 0x21, 0x3a, 0x53, 0x6c, 0x85, 0x9e, 0xb7,
++			0xd0, 0xe9, 0x02, 0x1b, 0x34, 0x4d, 0x66, 0x7f,
++			0x98, 0xb1, 0xca, 0xe3, 0xfc, 0x15, 0x2e, 0x47,
++			0x60, 0x79, 0x92, 0xab, 0xc4, 0xdd, 0xf6, 0x0f,
++			0x28, 0x41, 0x5a, 0x73, 0x8c, 0xa5, 0xbe, 0xd7,
++			0xf0, 0x09, 0x22, 0x3b, 0x54, 0x6d, 0x86, 0x9f,
++			0xb8, 0xd1, 0xea, 0x03, 0x1c, 0x35, 0x4e, 0x67,
++			0x80, 0x99, 0xb2, 0xcb, 0xe4, 0xfd, 0x16, 0x2f,
++			0x48, 0x61, 0x7a, 0x93, 0xac, 0xc5, 0xde, 0xf7,
++			0x10, 0x29, 0x42, 0x5b, 0x74, 0x8d, 0xa6, 0xbf,
++			0xd8, 0xf1, 0x0a, 0x23, 0x3c, 0x55, 0x6e, 0x87,
++			0xa0, 0xb9, 0xd2, 0xeb, 0x04, 0x1d, 0x36, 0x4f,
++			0x68, 0x81, 0x9a, 0xb3, 0xcc, 0xe5, 0xfe, 0x17,
++			0x30, 0x49, 0x62, 0x7b, 0x94, 0xad, 0xc6, 0xdf,
++			0xf8, 0x11, 0x2a, 0x43, 0x5c, 0x75, 0x8e, 0xa7,
++			0xc0, 0xd9, 0xf2, 0x0b, 0x24, 0x3d, 0x56, 0x6f,
++			0x88, 0xa1, 0xba, 0xd3, 0xec, 0x05, 0x1e, 0x37,
++			0x50, 0x69, 0x82, 0x9b, 0xb4, 0xcd, 0xe6, 0xff,
++			0x18, 0x31, 0x4a, 0x63, 0x7c, 0x95, 0xae, 0xc7,
++			0xe0, 0xf9, 0x12, 0x2b, 0x44, 0x5d, 0x76, 0x8f,
++			0xa8, 0xc1, 0xda, 0xf3, 0x0c, 0x25, 0x3e, 0x57,
++			0x70, 0x89, 0xa2, 0xbb, 0xd4, 0xed, 0x06, 0x1f,
++			0x38, 0x51, 0x6a, 0x83, 0x9c, 0xb5, 0xce, 0xe7,
++			0x00, 0x1b, 0x36, 0x51, 0x6c, 0x87, 0xa2, 0xbd,
++			0xd8, 0xf3, 0x0e, 0x29, 0x44, 0x5f, 0x7a, 0x95,
++			0xb0, 0xcb, 0xe6, 0x01, 0x1c, 0x37, 0x52, 0x6d,
++			0x88, 0xa3, 0xbe, 0xd9, 0xf4, 0x0f, 0x2a, 0x45,
++			0x60, 0x7b, 0x96, 0xb1, 0xcc, 0xe7, 0x02, 0x1d,
++			0x38, 0x53, 0x6e, 0x89, 0xa4, 0xbf, 0xda, 0xf5,
++			0x10, 0x2b, 0x46, 0x61, 0x7c, 0x97, 0xb2, 0xcd,
++			0xe8, 0x03, 0x1e, 0x39, 0x54, 0x6f, 0x8a, 0xa5,
++			0xc0, 0xdb, 0xf6, 0x11, 0x2c, 0x47, 0x62, 0x7d,
++			0x98, 0xb3, 0xce, 0xe9, 0x04, 0x1f, 0x3a, 0x55,
++			0x70, 0x8b, 0xa6, 0xc1, 0xdc, 0xf7, 0x12, 0x2d,
++			0x48, 0x63, 0x7e, 0x99, 0xb4, 0xcf, 0xea, 0x05,
++			0x20, 0x3b, 0x56, 0x71, 0x8c, 0xa7, 0xc2, 0xdd,
++			0xf8, 0x13, 0x2e, 0x49, 0x64, 0x7f, 0x9a, 0xb5,
++			0xd0, 0xeb, 0x06, 0x21, 0x3c, 0x57, 0x72, 0x8d,
++			0xa8, 0xc3, 0xde, 0xf9, 0x14, 0x2f, 0x4a, 0x65,
++			0x80, 0x9b, 0xb6, 0xd1, 0xec, 0x07, 0x22, 0x3d,
++			0x58, 0x73, 0x8e, 0xa9, 0xc4, 0xdf, 0xfa, 0x15,
++			0x30, 0x4b, 0x66, 0x81, 0x9c, 0xb7, 0xd2, 0xed,
++			0x08, 0x23, 0x3e, 0x59, 0x74, 0x8f, 0xaa, 0xc5,
++			0xe0, 0xfb, 0x16, 0x31, 0x4c, 0x67, 0x82, 0x9d,
++			0xb8, 0xd3, 0xee, 0x09, 0x24, 0x3f, 0x5a, 0x75,
++			0x90, 0xab, 0xc6, 0xe1, 0xfc, 0x17, 0x32, 0x4d,
++			0x68, 0x83, 0x9e, 0xb9, 0xd4, 0xef, 0x0a, 0x25,
++			0x40, 0x5b, 0x76, 0x91, 0xac, 0xc7, 0xe2, 0xfd,
++			0x18, 0x33, 0x4e, 0x69, 0x84, 0x9f, 0xba, 0xd5,
++			0xf0, 0x0b, 0x26, 0x41, 0x5c, 0x77, 0x92, 0xad,
++			0xc8, 0xe3, 0xfe, 0x19, 0x34, 0x4f, 0x6a, 0x85,
++			0xa0, 0xbb, 0xd6, 0xf1, 0x0c, 0x27, 0x42, 0x5d,
++			0x78, 0x93, 0xae, 0xc9, 0xe4, 0xff, 0x1a, 0x35,
++			0x50, 0x6b, 0x86, 0xa1, 0xbc, 0xd7, 0xf2, 0x0d,
++			0x28, 0x43, 0x5e, 0x79, 0x94, 0xaf, 0xca, 0xe5,
++			0x00, 0x1d, 0x3a, 0x57, 0x74, 0x91, 0xae, 0xcb,
++			0xe8, 0x05, 0x22, 0x3f, 0x5c, 0x79, 0x96, 0xb3,
++			0xd0, 0xed, 0x0a, 0x27, 0x44, 0x61, 0x7e, 0x9b,
++			0xb8, 0xd5, 0xf2, 0x0f, 0x2c, 0x49, 0x66, 0x83,
++			0xa0, 0xbd, 0xda, 0xf7, 0x14, 0x31, 0x4e, 0x6b,
++			0x88, 0xa5, 0xc2, 0xdf, 0xfc, 0x19, 0x36, 0x53,
++			0x70, 0x8d, 0xaa, 0xc7, 0xe4, 0x01, 0x1e, 0x3b,
++			0x58, 0x75, 0x92, 0xaf, 0xcc, 0xe9, 0x06, 0x23,
++			0x40, 0x5d, 0x7a, 0x97, 0xb4, 0xd1, 0xee, 0x0b,
++			0x28, 0x45, 0x62, 0x7f, 0x9c, 0xb9, 0xd6, 0xf3,
++			0x10, 0x2d, 0x4a, 0x67, 0x84, 0xa1, 0xbe, 0xdb,
++			0xf8, 0x15, 0x32, 0x4f, 0x6c, 0x89, 0xa6, 0xc3,
++			0xe0, 0xfd, 0x1a, 0x37, 0x54, 0x71, 0x8e, 0xab,
++			0xc8, 0xe5, 0x02, 0x1f, 0x3c, 0x59, 0x76, 0x93,
++			0xb0, 0xcd, 0xea, 0x07, 0x24, 0x41, 0x5e, 0x7b,
++			0x98, 0xb5, 0xd2, 0xef, 0x0c, 0x29, 0x46, 0x63,
++			0x80, 0x9d, 0xba, 0xd7, 0xf4, 0x11, 0x2e, 0x4b,
++			0x68, 0x85, 0xa2, 0xbf, 0xdc, 0xf9, 0x16, 0x33,
++			0x50, 0x6d, 0x8a, 0xa7, 0xc4, 0xe1, 0xfe, 0x1b,
++			0x38, 0x55, 0x72, 0x8f, 0xac, 0xc9, 0xe6, 0x03,
++			0x20, 0x3d, 0x5a, 0x77, 0x94, 0xb1, 0xce, 0xeb,
++			0x08, 0x25, 0x42, 0x5f, 0x7c, 0x99, 0xb6, 0xd3,
++			0xf0, 0x0d, 0x2a, 0x47, 0x64, 0x81, 0x9e, 0xbb,
++			0xd8, 0xf5, 0x12, 0x2f, 0x4c, 0x69, 0x86, 0xa3,
++			0xc0, 0xdd, 0xfa, 0x17, 0x34, 0x51, 0x6e, 0x8b,
++			0xa8, 0xc5, 0xe2, 0xff, 0x1c, 0x39, 0x56, 0x73,
++			0x90, 0xad, 0xca, 0xe7, 0x04, 0x21, 0x3e, 0x5b,
++			0x78, 0x95, 0xb2, 0xcf, 0xec, 0x09, 0x26, 0x43,
++			0x60, 0x7d, 0x9a, 0xb7, 0xd4, 0xf1, 0x0e, 0x2b,
++			0x48, 0x65, 0x82, 0x9f, 0xbc, 0xd9, 0xf6, 0x13,
++			0x30, 0x4d, 0x6a, 0x87, 0xa4, 0xc1, 0xde, 0xfb,
++			0x18, 0x35, 0x52, 0x6f, 0x8c, 0xa9, 0xc6, 0xe3,
++			0x00, 0x1f, 0x3e, 0x5d, 0x7c, 0x9b, 0xba, 0xd9,
++			0xf8, 0x17, 0x36, 0x55, 0x74, 0x93, 0xb2, 0xd1,
++			0xf0, 0x0f, 0x2e, 0x4d, 0x6c, 0x8b, 0xaa, 0xc9,
++			0xe8, 0x07, 0x26, 0x45, 0x64, 0x83, 0xa2, 0xc1,
++			0xe0, 0xff, 0x1e, 0x3d, 0x5c, 0x7b, 0x9a, 0xb9,
++			0xd8, 0xf7, 0x16, 0x35, 0x54, 0x73, 0x92, 0xb1,
++			0xd0, 0xef, 0x0e, 0x2d, 0x4c, 0x6b, 0x8a, 0xa9,
++			0xc8, 0xe7, 0x06, 0x25, 0x44, 0x63, 0x82, 0xa1,
++			0xc0, 0xdf, 0xfe, 0x1d, 0x3c, 0x5b, 0x7a, 0x99,
++			0xb8, 0xd7, 0xf6, 0x15, 0x34, 0x53, 0x72, 0x91,
++			0xb0, 0xcf, 0xee, 0x0d, 0x2c, 0x4b, 0x6a, 0x89,
++			0xa8, 0xc7, 0xe6, 0x05, 0x24, 0x43, 0x62, 0x81,
++			0xa0, 0xbf, 0xde, 0xfd, 0x1c, 0x3b, 0x5a, 0x79,
++			0x98, 0xb7, 0xd6, 0xf5, 0x14, 0x33, 0x52, 0x71,
++			0x90, 0xaf, 0xce, 0xed, 0x0c, 0x2b, 0x4a, 0x69,
++			0x88, 0xa7, 0xc6, 0xe5, 0x04, 0x23, 0x42, 0x61,
++			0x80, 0x9f, 0xbe, 0xdd, 0xfc, 0x1b, 0x3a, 0x59,
++			0x78, 0x97, 0xb6, 0xd5, 0xf4, 0x13, 0x32, 0x51,
++			0x70, 0x8f, 0xae, 0xcd, 0xec, 0x0b, 0x2a, 0x49,
++			0x68, 0x87, 0xa6, 0xc5, 0xe4, 0x03, 0x22, 0x41,
++			0x60, 0x7f, 0x9e, 0xbd, 0xdc, 0xfb, 0x1a, 0x39,
++			0x58, 0x77, 0x96, 0xb5, 0xd4, 0xf3, 0x12, 0x31,
++			0x50, 0x6f, 0x8e, 0xad, 0xcc, 0xeb, 0x0a, 0x29,
++			0x48, 0x67, 0x86, 0xa5, 0xc4, 0xe3, 0x02, 0x21,
++			0x40, 0x5f, 0x7e, 0x9d, 0xbc, 0xdb, 0xfa, 0x19,
++			0x38, 0x57, 0x76, 0x95, 0xb4, 0xd3, 0xf2, 0x11,
++			0x30, 0x4f, 0x6e, 0x8d, 0xac, 0xcb, 0xea, 0x09,
++			0x28, 0x47, 0x66, 0x85, 0xa4, 0xc3, 0xe2, 0x01,
++			0x20, 0x3f, 0x5e, 0x7d, 0x9c, 0xbb, 0xda, 0xf9,
++			0x18, 0x37, 0x56, 0x75, 0x94, 0xb3, 0xd2, 0xf1,
++			0x10, 0x2f, 0x4e, 0x6d, 0x8c, 0xab, 0xca, 0xe9,
++			0x08, 0x27, 0x46, 0x65, 0x84, 0xa3, 0xc2, 0xe1,
++			0x00, 0x21, 0x42, 0x63,
++		},
++		.ilen = 4100,
++		.result = {
++			0xb5, 0x81, 0xf5, 0x64, 0x18, 0x73, 0xe3, 0xf0,
++			0x4c, 0x13, 0xf2, 0x77, 0x18, 0x60, 0x65, 0x5e,
++			0x29, 0x01, 0xce, 0x98, 0x55, 0x53, 0xf9, 0x0c,
++			0x2a, 0x08, 0xd5, 0x09, 0xb3, 0x57, 0x55, 0x56,
++			0xc5, 0xe9, 0x56, 0x90, 0xcb, 0x6a, 0xa3, 0xc0,
++			0xff, 0xc4, 0x79, 0xb4, 0xd2, 0x97, 0x5d, 0xc4,
++			0x43, 0xd1, 0xfe, 0x94, 0x7b, 0x88, 0x06, 0x5a,
++			0xb2, 0x9e, 0x2c, 0xfc, 0x44, 0x03, 0xb7, 0x90,
++			0xa0, 0xc1, 0xba, 0x6a, 0x33, 0xb8, 0xc7, 0xb2,
++			0x9d, 0xe1, 0x12, 0x4f, 0xc0, 0x64, 0xd4, 0x01,
++			0xfe, 0x8c, 0x7a, 0x66, 0xf7, 0xe6, 0x5a, 0x91,
++			0xbb, 0xde, 0x56, 0x86, 0xab, 0x65, 0x21, 0x30,
++			0x00, 0x84, 0x65, 0x24, 0xa5, 0x7d, 0x85, 0xb4,
++			0xe3, 0x17, 0xed, 0x3a, 0xb7, 0x6f, 0xb4, 0x0b,
++			0x0b, 0xaf, 0x15, 0xae, 0x5a, 0x8f, 0xf2, 0x0c,
++			0x2f, 0x27, 0xf4, 0x09, 0xd8, 0xd2, 0x96, 0xb7,
++			0x71, 0xf2, 0xc5, 0x99, 0x4d, 0x7e, 0x7f, 0x75,
++			0x77, 0x89, 0x30, 0x8b, 0x59, 0xdb, 0xa2, 0xb2,
++			0xa0, 0xf3, 0x19, 0x39, 0x2b, 0xc5, 0x7e, 0x3f,
++			0x4f, 0xd9, 0xd3, 0x56, 0x28, 0x97, 0x44, 0xdc,
++			0xc0, 0x8b, 0x77, 0x24, 0xd9, 0x52, 0xe7, 0xc5,
++			0xaf, 0xf6, 0x7d, 0x59, 0xb2, 0x44, 0x05, 0x1d,
++			0xb1, 0xb0, 0x11, 0xa5, 0x0f, 0xec, 0x33, 0xe1,
++			0x6d, 0x1b, 0x4e, 0x1f, 0xff, 0x57, 0x91, 0xb4,
++			0x5b, 0x9a, 0x96, 0xc5, 0x53, 0xbc, 0xae, 0x20,
++			0x3c, 0xbb, 0x14, 0xe2, 0xe8, 0x22, 0x33, 0xc1,
++			0x5e, 0x76, 0x9e, 0x46, 0x99, 0xf6, 0x2a, 0x15,
++			0xc6, 0x97, 0x02, 0xa0, 0x66, 0x43, 0xd1, 0xa6,
++			0x31, 0xa6, 0x9f, 0xfb, 0xf4, 0xd3, 0x69, 0xe5,
++			0xcd, 0x76, 0x95, 0xb8, 0x7a, 0x82, 0x7f, 0x21,
++			0x45, 0xff, 0x3f, 0xce, 0x55, 0xf6, 0x95, 0x10,
++			0x08, 0x77, 0x10, 0x43, 0xc6, 0xf3, 0x09, 0xe5,
++			0x68, 0xe7, 0x3c, 0xad, 0x00, 0x52, 0x45, 0x0d,
++			0xfe, 0x2d, 0xc6, 0xc2, 0x94, 0x8c, 0x12, 0x1d,
++			0xe6, 0x25, 0xae, 0x98, 0x12, 0x8e, 0x19, 0x9c,
++			0x81, 0x68, 0xb1, 0x11, 0xf6, 0x69, 0xda, 0xe3,
++			0x62, 0x08, 0x18, 0x7a, 0x25, 0x49, 0x28, 0xac,
++			0xba, 0x71, 0x12, 0x0b, 0xe4, 0xa2, 0xe5, 0xc7,
++			0x5d, 0x8e, 0xec, 0x49, 0x40, 0x21, 0xbf, 0x5a,
++			0x98, 0xf3, 0x02, 0x68, 0x55, 0x03, 0x7f, 0x8a,
++			0xe5, 0x94, 0x0c, 0x32, 0x5c, 0x07, 0x82, 0x63,
++			0xaf, 0x6f, 0x91, 0x40, 0x84, 0x8e, 0x52, 0x25,
++			0xd0, 0xb0, 0x29, 0x53, 0x05, 0xe2, 0x50, 0x7a,
++			0x34, 0xeb, 0xc9, 0x46, 0x20, 0xa8, 0x3d, 0xde,
++			0x7f, 0x16, 0x5f, 0x36, 0xc5, 0x2e, 0xdc, 0xd1,
++			0x15, 0x47, 0xc7, 0x50, 0x40, 0x6d, 0x91, 0xc5,
++			0xe7, 0x93, 0x95, 0x1a, 0xd3, 0x57, 0xbc, 0x52,
++			0x33, 0xee, 0x14, 0x19, 0x22, 0x52, 0x89, 0xa7,
++			0x4a, 0x25, 0x56, 0x77, 0x4b, 0xca, 0xcf, 0x0a,
++			0xe1, 0xf5, 0x35, 0x85, 0x30, 0x7e, 0x59, 0x4a,
++			0xbd, 0x14, 0x5b, 0xdf, 0xe3, 0x46, 0xcb, 0xac,
++			0x1f, 0x6c, 0x96, 0x0e, 0xf4, 0x81, 0xd1, 0x99,
++			0xca, 0x88, 0x63, 0x3d, 0x02, 0x58, 0x6b, 0xa9,
++			0xe5, 0x9f, 0xb3, 0x00, 0xb2, 0x54, 0xc6, 0x74,
++			0x1c, 0xbf, 0x46, 0xab, 0x97, 0xcc, 0xf8, 0x54,
++			0x04, 0x07, 0x08, 0x52, 0xe6, 0xc0, 0xda, 0x93,
++			0x74, 0x7d, 0x93, 0x99, 0x5d, 0x78, 0x68, 0xa6,
++			0x2e, 0x6b, 0xd3, 0x6a, 0x69, 0xcc, 0x12, 0x6b,
++			0xd4, 0xc7, 0xa5, 0xc6, 0xe7, 0xf6, 0x03, 0x04,
++			0x5d, 0xcd, 0x61, 0x5e, 0x17, 0x40, 0xdc, 0xd1,
++			0x5c, 0xf5, 0x08, 0xdf, 0x5c, 0x90, 0x85, 0xa4,
++			0xaf, 0xf6, 0x78, 0xbb, 0x0d, 0xf1, 0xf4, 0xa4,
++			0x54, 0x26, 0x72, 0x9e, 0x61, 0xfa, 0x86, 0xcf,
++			0xe8, 0x9e, 0xa1, 0xe0, 0xc7, 0x48, 0x23, 0xae,
++			0x5a, 0x90, 0xae, 0x75, 0x0a, 0x74, 0x18, 0x89,
++			0x05, 0xb1, 0x92, 0xb2, 0x7f, 0xd0, 0x1b, 0xa6,
++			0x62, 0x07, 0x25, 0x01, 0xc7, 0xc2, 0x4f, 0xf9,
++			0xe8, 0xfe, 0x63, 0x95, 0x80, 0x07, 0xb4, 0x26,
++			0xcc, 0xd1, 0x26, 0xb6, 0xc4, 0x3f, 0x9e, 0xcb,
++			0x8e, 0x3b, 0x2e, 0x44, 0x16, 0xd3, 0x10, 0x9a,
++			0x95, 0x08, 0xeb, 0xc8, 0xcb, 0xeb, 0xbf, 0x6f,
++			0x0b, 0xcd, 0x1f, 0xc8, 0xca, 0x86, 0xaa, 0xec,
++			0x33, 0xe6, 0x69, 0xf4, 0x45, 0x25, 0x86, 0x3a,
++			0x22, 0x94, 0x4f, 0x00, 0x23, 0x6a, 0x44, 0xc2,
++			0x49, 0x97, 0x33, 0xab, 0x36, 0x14, 0x0a, 0x70,
++			0x24, 0xc3, 0xbe, 0x04, 0x3b, 0x79, 0xa0, 0xf9,
++			0xb8, 0xe7, 0x76, 0x29, 0x22, 0x83, 0xd7, 0xf2,
++			0x94, 0xf4, 0x41, 0x49, 0xba, 0x5f, 0x7b, 0x07,
++			0xb5, 0xfb, 0xdb, 0x03, 0x1a, 0x9f, 0xb6, 0x4c,
++			0xc2, 0x2e, 0x37, 0x40, 0x49, 0xc3, 0x38, 0x16,
++			0xe2, 0x4f, 0x77, 0x82, 0xb0, 0x68, 0x4c, 0x71,
++			0x1d, 0x57, 0x61, 0x9c, 0xd9, 0x4e, 0x54, 0x99,
++			0x47, 0x13, 0x28, 0x73, 0x3c, 0xbb, 0x00, 0x90,
++			0xf3, 0x4d, 0xc9, 0x0e, 0xfd, 0xe7, 0xb1, 0x71,
++			0xd3, 0x15, 0x79, 0xbf, 0xcc, 0x26, 0x2f, 0xbd,
++			0xad, 0x6c, 0x50, 0x69, 0x6c, 0x3e, 0x6d, 0x80,
++			0x9a, 0xea, 0x78, 0xaf, 0x19, 0xb2, 0x0d, 0x4d,
++			0xad, 0x04, 0x07, 0xae, 0x22, 0x90, 0x4a, 0x93,
++			0x32, 0x0e, 0x36, 0x9b, 0x1b, 0x46, 0xba, 0x3b,
++			0xb4, 0xac, 0xc6, 0xd1, 0xa2, 0x31, 0x53, 0x3b,
++			0x2a, 0x3d, 0x45, 0xfe, 0x03, 0x61, 0x10, 0x85,
++			0x17, 0x69, 0xa6, 0x78, 0xcc, 0x6c, 0x87, 0x49,
++			0x53, 0xf9, 0x80, 0x10, 0xde, 0x80, 0xa2, 0x41,
++			0x6a, 0xc3, 0x32, 0x02, 0xad, 0x6d, 0x3c, 0x56,
++			0x00, 0x71, 0x51, 0x06, 0xa7, 0xbd, 0xfb, 0xef,
++			0x3c, 0xb5, 0x9f, 0xfc, 0x48, 0x7d, 0x53, 0x7c,
++			0x66, 0xb0, 0x49, 0x23, 0xc4, 0x47, 0x10, 0x0e,
++			0xe5, 0x6c, 0x74, 0x13, 0xe6, 0xc5, 0x3f, 0xaa,
++			0xde, 0xff, 0x07, 0x44, 0xdd, 0x56, 0x1b, 0xad,
++			0x09, 0x77, 0xfb, 0x5b, 0x12, 0xb8, 0x0d, 0x38,
++			0x17, 0x37, 0x35, 0x7b, 0x9b, 0xbc, 0xfe, 0xd4,
++			0x7e, 0x8b, 0xda, 0x7e, 0x5b, 0x04, 0xa7, 0x22,
++			0xa7, 0x31, 0xa1, 0x20, 0x86, 0xc7, 0x1b, 0x99,
++			0xdb, 0xd1, 0x89, 0xf4, 0x94, 0xa3, 0x53, 0x69,
++			0x8d, 0xe7, 0xe8, 0x74, 0x11, 0x8d, 0x74, 0xd6,
++			0x07, 0x37, 0x91, 0x9f, 0xfd, 0x67, 0x50, 0x3a,
++			0xc9, 0xe1, 0xf4, 0x36, 0xd5, 0xa0, 0x47, 0xd1,
++			0xf9, 0xe5, 0x39, 0xa3, 0x31, 0xac, 0x07, 0x36,
++			0x23, 0xf8, 0x66, 0x18, 0x14, 0x28, 0x34, 0x0f,
++			0xb8, 0xd0, 0xe7, 0x29, 0xb3, 0x04, 0x4b, 0x55,
++			0x01, 0x41, 0xb2, 0x75, 0x8d, 0xcb, 0x96, 0x85,
++			0x3a, 0xfb, 0xab, 0x2b, 0x9e, 0xfa, 0x58, 0x20,
++			0x44, 0x1f, 0xc0, 0x14, 0x22, 0x75, 0x61, 0xe8,
++			0xaa, 0x19, 0xcf, 0xf1, 0x82, 0x56, 0xf4, 0xd7,
++			0x78, 0x7b, 0x3d, 0x5f, 0xb3, 0x9e, 0x0b, 0x8a,
++			0x57, 0x50, 0xdb, 0x17, 0x41, 0x65, 0x4d, 0xa3,
++			0x02, 0xc9, 0x9c, 0x9c, 0x53, 0xfb, 0x39, 0x39,
++			0x9b, 0x1d, 0x72, 0x24, 0xda, 0xb7, 0x39, 0xbe,
++			0x13, 0x3b, 0xfa, 0x29, 0xda, 0x9e, 0x54, 0x64,
++			0x6e, 0xba, 0xd8, 0xa1, 0xcb, 0xb3, 0x36, 0xfa,
++			0xcb, 0x47, 0x85, 0xe9, 0x61, 0x38, 0xbc, 0xbe,
++			0xc5, 0x00, 0x38, 0x2a, 0x54, 0xf7, 0xc4, 0xb9,
++			0xb3, 0xd3, 0x7b, 0xa0, 0xa0, 0xf8, 0x72, 0x7f,
++			0x8c, 0x8e, 0x82, 0x0e, 0xc6, 0x1c, 0x75, 0x9d,
++			0xca, 0x8e, 0x61, 0x87, 0xde, 0xad, 0x80, 0xd2,
++			0xf5, 0xf9, 0x80, 0xef, 0x15, 0x75, 0xaf, 0xf5,
++			0x80, 0xfb, 0xff, 0x6d, 0x1e, 0x25, 0xb7, 0x40,
++			0x61, 0x6a, 0x39, 0x5a, 0x6a, 0xb5, 0x31, 0xab,
++			0x97, 0x8a, 0x19, 0x89, 0x44, 0x40, 0xc0, 0xa6,
++			0xb4, 0x4e, 0x30, 0x32, 0x7b, 0x13, 0xe7, 0x67,
++			0xa9, 0x8b, 0x57, 0x04, 0xc2, 0x01, 0xa6, 0xf4,
++			0x28, 0x99, 0xad, 0x2c, 0x76, 0xa3, 0x78, 0xc2,
++			0x4a, 0xe6, 0xca, 0x5c, 0x50, 0x6a, 0xc1, 0xb0,
++			0x62, 0x4b, 0x10, 0x8e, 0x7c, 0x17, 0x43, 0xb3,
++			0x17, 0x66, 0x1c, 0x3e, 0x8d, 0x69, 0xf0, 0x5a,
++			0x71, 0xf5, 0x97, 0xdc, 0xd1, 0x45, 0xdd, 0x28,
++			0xf3, 0x5d, 0xdf, 0x53, 0x7b, 0x11, 0xe5, 0xbc,
++			0x4c, 0xdb, 0x1b, 0x51, 0x6b, 0xe9, 0xfb, 0x3d,
++			0xc1, 0xc3, 0x2c, 0xb9, 0x71, 0xf5, 0xb6, 0xb2,
++			0x13, 0x36, 0x79, 0x80, 0x53, 0xe8, 0xd3, 0xa6,
++			0x0a, 0xaf, 0xfd, 0x56, 0x97, 0xf7, 0x40, 0x8e,
++			0x45, 0xce, 0xf8, 0xb0, 0x9e, 0x5c, 0x33, 0x82,
++			0xb0, 0x44, 0x56, 0xfc, 0x05, 0x09, 0xe9, 0x2a,
++			0xac, 0x26, 0x80, 0x14, 0x1d, 0xc8, 0x3a, 0x35,
++			0x4c, 0x82, 0x97, 0xfd, 0x76, 0xb7, 0xa9, 0x0a,
++			0x35, 0x58, 0x79, 0x8e, 0x0f, 0x66, 0xea, 0xaf,
++			0x51, 0x6c, 0x09, 0xa9, 0x6e, 0x9b, 0xcb, 0x9a,
++			0x31, 0x47, 0xa0, 0x2f, 0x7c, 0x71, 0xb4, 0x4a,
++			0x11, 0xaa, 0x8c, 0x66, 0xc5, 0x64, 0xe6, 0x3a,
++			0x54, 0xda, 0x24, 0x6a, 0xc4, 0x41, 0x65, 0x46,
++			0x82, 0xa0, 0x0a, 0x0f, 0x5f, 0xfb, 0x25, 0xd0,
++			0x2c, 0x91, 0xa7, 0xee, 0xc4, 0x81, 0x07, 0x86,
++			0x75, 0x5e, 0x33, 0x69, 0x97, 0xe4, 0x2c, 0xa8,
++			0x9d, 0x9f, 0x0b, 0x6a, 0xbe, 0xad, 0x98, 0xda,
++			0x6d, 0x94, 0x41, 0xda, 0x2c, 0x1e, 0x89, 0xc4,
++			0xc2, 0xaf, 0x1e, 0x00, 0x05, 0x0b, 0x83, 0x60,
++			0xbd, 0x43, 0xea, 0x15, 0x23, 0x7f, 0xb9, 0xac,
++			0xee, 0x4f, 0x2c, 0xaf, 0x2a, 0xf3, 0xdf, 0xd0,
++			0xf3, 0x19, 0x31, 0xbb, 0x4a, 0x74, 0x84, 0x17,
++			0x52, 0x32, 0x2c, 0x7d, 0x61, 0xe4, 0xcb, 0xeb,
++			0x80, 0x38, 0x15, 0x52, 0xcb, 0x6f, 0xea, 0xe5,
++			0x73, 0x9c, 0xd9, 0x24, 0x69, 0xc6, 0x95, 0x32,
++			0x21, 0xc8, 0x11, 0xe4, 0xdc, 0x36, 0xd7, 0x93,
++			0x38, 0x66, 0xfb, 0xb2, 0x7f, 0x3a, 0xb9, 0xaf,
++			0x31, 0xdd, 0x93, 0x75, 0x78, 0x8a, 0x2c, 0x94,
++			0x87, 0x1a, 0x58, 0xec, 0x9e, 0x7d, 0x4d, 0xba,
++			0xe1, 0xe5, 0x4d, 0xfc, 0xbc, 0xa4, 0x2a, 0x14,
++			0xef, 0xcc, 0xa7, 0xec, 0xab, 0x43, 0x09, 0x18,
++			0xd3, 0xab, 0x68, 0xd1, 0x07, 0x99, 0x44, 0x47,
++			0xd6, 0x83, 0x85, 0x3b, 0x30, 0xea, 0xa9, 0x6b,
++			0x63, 0xea, 0xc4, 0x07, 0xfb, 0x43, 0x2f, 0xa4,
++			0xaa, 0xb0, 0xab, 0x03, 0x89, 0xce, 0x3f, 0x8c,
++			0x02, 0x7c, 0x86, 0x54, 0xbc, 0x88, 0xaf, 0x75,
++			0xd2, 0xdc, 0x63, 0x17, 0xd3, 0x26, 0xf6, 0x96,
++			0xa9, 0x3c, 0xf1, 0x61, 0x8c, 0x11, 0x18, 0xcc,
++			0xd6, 0xea, 0x5b, 0xe2, 0xcd, 0xf0, 0xf1, 0xb2,
++			0xe5, 0x35, 0x90, 0x1f, 0x85, 0x4c, 0x76, 0x5b,
++			0x66, 0xce, 0x44, 0xa4, 0x32, 0x9f, 0xe6, 0x7b,
++			0x71, 0x6e, 0x9f, 0x58, 0x15, 0x67, 0x72, 0x87,
++			0x64, 0x8e, 0x3a, 0x44, 0x45, 0xd4, 0x76, 0xfa,
++			0xc2, 0xf6, 0xef, 0x85, 0x05, 0x18, 0x7a, 0x9b,
++			0xba, 0x41, 0x54, 0xac, 0xf0, 0xfc, 0x59, 0x12,
++			0x3f, 0xdf, 0xa0, 0xe5, 0x8a, 0x65, 0xfd, 0x3a,
++			0x62, 0x8d, 0x83, 0x2c, 0x03, 0xbe, 0x05, 0x76,
++			0x2e, 0x53, 0x49, 0x97, 0x94, 0x33, 0xae, 0x40,
++			0x81, 0x15, 0xdb, 0x6e, 0xad, 0xaa, 0xf5, 0x4b,
++			0xe3, 0x98, 0x70, 0xdf, 0xe0, 0x7c, 0xcd, 0xdb,
++			0x02, 0xd4, 0x7d, 0x2f, 0xc1, 0xe6, 0xb4, 0xf3,
++			0xd7, 0x0d, 0x7a, 0xd9, 0x23, 0x9e, 0x87, 0x2d,
++			0xce, 0x87, 0xad, 0xcc, 0x72, 0x05, 0x00, 0x29,
++			0xdc, 0x73, 0x7f, 0x64, 0xc1, 0x15, 0x0e, 0xc2,
++			0xdf, 0xa7, 0x5f, 0xeb, 0x41, 0xa1, 0xcd, 0xef,
++			0x5c, 0x50, 0x79, 0x2a, 0x56, 0x56, 0x71, 0x8c,
++			0xac, 0xc0, 0x79, 0x50, 0x69, 0xca, 0x59, 0x32,
++			0x65, 0xf2, 0x54, 0xe4, 0x52, 0x38, 0x76, 0xd1,
++			0x5e, 0xde, 0x26, 0x9e, 0xfb, 0x75, 0x2e, 0x11,
++			0xb5, 0x10, 0xf4, 0x17, 0x73, 0xf5, 0x89, 0xc7,
++			0x4f, 0x43, 0x5c, 0x8e, 0x7c, 0xb9, 0x05, 0x52,
++			0x24, 0x40, 0x99, 0xfe, 0x9b, 0x85, 0x0b, 0x6c,
++			0x22, 0x3e, 0x8b, 0xae, 0x86, 0xa1, 0xd2, 0x79,
++			0x05, 0x68, 0x6b, 0xab, 0xe3, 0x41, 0x49, 0xed,
++			0x15, 0xa1, 0x8d, 0x40, 0x2d, 0x61, 0xdf, 0x1a,
++			0x59, 0xc9, 0x26, 0x8b, 0xef, 0x30, 0x4c, 0x88,
++			0x4b, 0x10, 0xf8, 0x8d, 0xa6, 0x92, 0x9f, 0x4b,
++			0xf3, 0xc4, 0x53, 0x0b, 0x89, 0x5d, 0x28, 0x92,
++			0xcf, 0x78, 0xb2, 0xc0, 0x5d, 0xed, 0x7e, 0xfc,
++			0xc0, 0x12, 0x23, 0x5f, 0x5a, 0x78, 0x86, 0x43,
++			0x6e, 0x27, 0xf7, 0x5a, 0xa7, 0x6a, 0xed, 0x19,
++			0x04, 0xf0, 0xb3, 0x12, 0xd1, 0xbd, 0x0e, 0x89,
++			0x6e, 0xbc, 0x96, 0xa8, 0xd8, 0x49, 0x39, 0x9f,
++			0x7e, 0x67, 0xf0, 0x2e, 0x3e, 0x01, 0xa9, 0xba,
++			0xec, 0x8b, 0x62, 0x8e, 0xcb, 0x4a, 0x70, 0x43,
++			0xc7, 0xc2, 0xc4, 0xca, 0x82, 0x03, 0x73, 0xe9,
++			0x11, 0xdf, 0xcf, 0x54, 0xea, 0xc9, 0xb0, 0x95,
++			0x51, 0xc0, 0x13, 0x3d, 0x92, 0x05, 0xfa, 0xf4,
++			0xa9, 0x34, 0xc8, 0xce, 0x6c, 0x3d, 0x54, 0xcc,
++			0xc4, 0xaf, 0xf1, 0xdc, 0x11, 0x44, 0x26, 0xa2,
++			0xaf, 0xf1, 0x85, 0x75, 0x7d, 0x03, 0x61, 0x68,
++			0x4e, 0x78, 0xc6, 0x92, 0x7d, 0x86, 0x7d, 0x77,
++			0xdc, 0x71, 0x72, 0xdb, 0xc6, 0xae, 0xa1, 0xcb,
++			0x70, 0x9a, 0x0b, 0x19, 0xbe, 0x4a, 0x6c, 0x2a,
++			0xe2, 0xba, 0x6c, 0x64, 0x9a, 0x13, 0x28, 0xdf,
++			0x85, 0x75, 0xe6, 0x43, 0xf6, 0x87, 0x08, 0x68,
++			0x6e, 0xba, 0x6e, 0x79, 0x9f, 0x04, 0xbc, 0x23,
++			0x50, 0xf6, 0x33, 0x5c, 0x1f, 0x24, 0x25, 0xbe,
++			0x33, 0x47, 0x80, 0x45, 0x56, 0xa3, 0xa7, 0xd7,
++			0x7a, 0xb1, 0x34, 0x0b, 0x90, 0x3c, 0x9c, 0xad,
++			0x44, 0x5f, 0x9e, 0x0e, 0x9d, 0xd4, 0xbd, 0x93,
++			0x5e, 0xfa, 0x3c, 0xe0, 0xb0, 0xd9, 0xed, 0xf3,
++			0xd6, 0x2e, 0xff, 0x24, 0xd8, 0x71, 0x6c, 0xed,
++			0xaf, 0x55, 0xeb, 0x22, 0xac, 0x93, 0x68, 0x32,
++			0x05, 0x5b, 0x47, 0xdd, 0xc6, 0x4a, 0xcb, 0xc7,
++			0x10, 0xe1, 0x3c, 0x92, 0x1a, 0xf3, 0x23, 0x78,
++			0x2b, 0xa1, 0xd2, 0x80, 0xf4, 0x12, 0xb1, 0x20,
++			0x8f, 0xff, 0x26, 0x35, 0xdd, 0xfb, 0xc7, 0x4e,
++			0x78, 0xf1, 0x2d, 0x50, 0x12, 0x77, 0xa8, 0x60,
++			0x7c, 0x0f, 0xf5, 0x16, 0x2f, 0x63, 0x70, 0x2a,
++			0xc0, 0x96, 0x80, 0x4e, 0x0a, 0xb4, 0x93, 0x35,
++			0x5d, 0x1d, 0x3f, 0x56, 0xf7, 0x2f, 0xbb, 0x90,
++			0x11, 0x16, 0x8f, 0xa2, 0xec, 0x47, 0xbe, 0xac,
++			0x56, 0x01, 0x26, 0x56, 0xb1, 0x8c, 0xb2, 0x10,
++			0xf9, 0x1a, 0xca, 0xf5, 0xd1, 0xb7, 0x39, 0x20,
++			0x63, 0xf1, 0x69, 0x20, 0x4f, 0x13, 0x12, 0x1f,
++			0x5b, 0x65, 0xfc, 0x98, 0xf7, 0xc4, 0x7a, 0xbe,
++			0xf7, 0x26, 0x4d, 0x2b, 0x84, 0x7b, 0x42, 0xad,
++			0xd8, 0x7a, 0x0a, 0xb4, 0xd8, 0x74, 0xbf, 0xc1,
++			0xf0, 0x6e, 0xb4, 0x29, 0xa3, 0xbb, 0xca, 0x46,
++			0x67, 0x70, 0x6a, 0x2d, 0xce, 0x0e, 0xa2, 0x8a,
++			0xa9, 0x87, 0xbf, 0x05, 0xc4, 0xc1, 0x04, 0xa3,
++			0xab, 0xd4, 0x45, 0x43, 0x8c, 0xb6, 0x02, 0xb0,
++			0x41, 0xc8, 0xfc, 0x44, 0x3d, 0x59, 0xaa, 0x2e,
++			0x44, 0x21, 0x2a, 0x8d, 0x88, 0x9d, 0x57, 0xf4,
++			0xa0, 0x02, 0x77, 0xb8, 0xa6, 0xa0, 0xe6, 0x75,
++			0x5c, 0x82, 0x65, 0x3e, 0x03, 0x5c, 0x29, 0x8f,
++			0x38, 0x55, 0xab, 0x33, 0x26, 0xef, 0x9f, 0x43,
++			0x52, 0xfd, 0x68, 0xaf, 0x36, 0xb4, 0xbb, 0x9a,
++			0x58, 0x09, 0x09, 0x1b, 0xc3, 0x65, 0x46, 0x46,
++			0x1d, 0xa7, 0x94, 0x18, 0x23, 0x50, 0x2c, 0xca,
++			0x2c, 0x55, 0x19, 0x97, 0x01, 0x9d, 0x93, 0x3b,
++			0x63, 0x86, 0xf2, 0x03, 0x67, 0x45, 0xd2, 0x72,
++			0x28, 0x52, 0x6c, 0xf4, 0xe3, 0x1c, 0xb5, 0x11,
++			0x13, 0xf1, 0xeb, 0x21, 0xc7, 0xd9, 0x56, 0x82,
++			0x2b, 0x82, 0x39, 0xbd, 0x69, 0x54, 0xed, 0x62,
++			0xc3, 0xe2, 0xde, 0x73, 0xd4, 0x6a, 0x12, 0xae,
++			0x13, 0x21, 0x7f, 0x4b, 0x5b, 0xfc, 0xbf, 0xe8,
++			0x2b, 0xbe, 0x56, 0xba, 0x68, 0x8b, 0x9a, 0xb1,
++			0x6e, 0xfa, 0xbf, 0x7e, 0x5a, 0x4b, 0xf1, 0xac,
++			0x98, 0x65, 0x85, 0xd1, 0x93, 0x53, 0xd3, 0x7b,
++			0x09, 0xdd, 0x4b, 0x10, 0x6d, 0x84, 0xb0, 0x13,
++			0x65, 0xbd, 0xcf, 0x52, 0x09, 0xc4, 0x85, 0xe2,
++			0x84, 0x74, 0x15, 0x65, 0xb7, 0xf7, 0x51, 0xaf,
++			0x55, 0xad, 0xa4, 0xd1, 0x22, 0x54, 0x70, 0x94,
++			0xa0, 0x1c, 0x90, 0x41, 0xfd, 0x99, 0xd7, 0x5a,
++			0x31, 0xef, 0xaa, 0x25, 0xd0, 0x7f, 0x4f, 0xea,
++			0x1d, 0x55, 0x42, 0xe5, 0x49, 0xb0, 0xd0, 0x46,
++			0x62, 0x36, 0x43, 0xb2, 0x82, 0x15, 0x75, 0x50,
++			0xa4, 0x72, 0xeb, 0x54, 0x27, 0x1f, 0x8a, 0xe4,
++			0x7d, 0xe9, 0x66, 0xc5, 0xf1, 0x53, 0xa4, 0xd1,
++			0x0c, 0xeb, 0xb8, 0xf8, 0xbc, 0xd4, 0xe2, 0xe7,
++			0xe1, 0xf8, 0x4b, 0xcb, 0xa9, 0xa1, 0xaf, 0x15,
++			0x83, 0xcb, 0x72, 0xd0, 0x33, 0x79, 0x00, 0x2d,
++			0x9f, 0xd7, 0xf1, 0x2e, 0x1e, 0x10, 0xe4, 0x45,
++			0xc0, 0x75, 0x3a, 0x39, 0xea, 0x68, 0xf7, 0x5d,
++			0x1b, 0x73, 0x8f, 0xe9, 0x8e, 0x0f, 0x72, 0x47,
++			0xae, 0x35, 0x0a, 0x31, 0x7a, 0x14, 0x4d, 0x4a,
++			0x6f, 0x47, 0xf7, 0x7e, 0x91, 0x6e, 0x74, 0x8b,
++			0x26, 0x47, 0xf9, 0xc3, 0xf9, 0xde, 0x70, 0xf5,
++			0x61, 0xab, 0xa9, 0x27, 0x9f, 0x82, 0xe4, 0x9c,
++			0x89, 0x91, 0x3f, 0x2e, 0x6a, 0xfd, 0xb5, 0x49,
++			0xe9, 0xfd, 0x59, 0x14, 0x36, 0x49, 0x40, 0x6d,
++			0x32, 0xd8, 0x85, 0x42, 0xf3, 0xa5, 0xdf, 0x0c,
++			0xa8, 0x27, 0xd7, 0x54, 0xe2, 0x63, 0x2f, 0xf2,
++			0x7e, 0x8b, 0x8b, 0xe7, 0xf1, 0x9a, 0x95, 0x35,
++			0x43, 0xdc, 0x3a, 0xe4, 0xb6, 0xf4, 0xd0, 0xdf,
++			0x9c, 0xcb, 0x94, 0xf3, 0x21, 0xa0, 0x77, 0x50,
++			0xe2, 0xc6, 0xc4, 0xc6, 0x5f, 0x09, 0x64, 0x5b,
++			0x92, 0x90, 0xd8, 0xe1, 0xd1, 0xed, 0x4b, 0x42,
++			0xd7, 0x37, 0xaf, 0x65, 0x3d, 0x11, 0x39, 0xb6,
++			0x24, 0x8a, 0x60, 0xae, 0xd6, 0x1e, 0xbf, 0x0e,
++			0x0d, 0xd7, 0xdc, 0x96, 0x0e, 0x65, 0x75, 0x4e,
++			0x29, 0x06, 0x9d, 0xa4, 0x51, 0x3a, 0x10, 0x63,
++			0x8f, 0x17, 0x07, 0xd5, 0x8e, 0x3c, 0xf4, 0x28,
++			0x00, 0x5a, 0x5b, 0x05, 0x19, 0xd8, 0xc0, 0x6c,
++			0xe5, 0x15, 0xe4, 0x9c, 0x9d, 0x71, 0x9d, 0x5e,
++			0x94, 0x29, 0x1a, 0xa7, 0x80, 0xfa, 0x0e, 0x33,
++			0x03, 0xdd, 0xb7, 0x3e, 0x9a, 0xa9, 0x26, 0x18,
++			0x37, 0xa9, 0x64, 0x08, 0x4d, 0x94, 0x5a, 0x88,
++			0xca, 0x35, 0xce, 0x81, 0x02, 0xe3, 0x1f, 0x1b,
++			0x89, 0x1a, 0x77, 0x85, 0xe3, 0x41, 0x6d, 0x32,
++			0x42, 0x19, 0x23, 0x7d, 0xc8, 0x73, 0xee, 0x25,
++			0x85, 0x0d, 0xf8, 0x31, 0x25, 0x79, 0x1b, 0x6f,
++			0x79, 0x25, 0xd2, 0xd8, 0xd4, 0x23, 0xfd, 0xf7,
++			0x82, 0x36, 0x6a, 0x0c, 0x46, 0x22, 0x15, 0xe9,
++			0xff, 0x72, 0x41, 0x91, 0x91, 0x7d, 0x3a, 0xb7,
++			0xdd, 0x65, 0x99, 0x70, 0xf6, 0x8d, 0x84, 0xf8,
++			0x67, 0x15, 0x20, 0x11, 0xd6, 0xb2, 0x55, 0x7b,
++			0xdb, 0x87, 0xee, 0xef, 0x55, 0x89, 0x2a, 0x59,
++			0x2b, 0x07, 0x8f, 0x43, 0x8a, 0x59, 0x3c, 0x01,
++			0x8b, 0x65, 0x54, 0xa1, 0x66, 0xd5, 0x38, 0xbd,
++			0xc6, 0x30, 0xa9, 0xcc, 0x49, 0xb6, 0xa8, 0x1b,
++			0xb8, 0xc0, 0x0e, 0xe3, 0x45, 0x28, 0xe2, 0xff,
++			0x41, 0x9f, 0x7e, 0x7c, 0xd1, 0xae, 0x9e, 0x25,
++			0x3f, 0x4c, 0x7c, 0x7c, 0xf4, 0xa8, 0x26, 0x4d,
++			0x5c, 0xfd, 0x4b, 0x27, 0x18, 0xf9, 0x61, 0x76,
++			0x48, 0xba, 0x0c, 0x6b, 0xa9, 0x4d, 0xfc, 0xf5,
++			0x3b, 0x35, 0x7e, 0x2f, 0x4a, 0xa9, 0xc2, 0x9a,
++			0xae, 0xab, 0x86, 0x09, 0x89, 0xc9, 0xc2, 0x40,
++			0x39, 0x2c, 0x81, 0xb3, 0xb8, 0x17, 0x67, 0xc2,
++			0x0d, 0x32, 0x4a, 0x3a, 0x67, 0x81, 0xd7, 0x1a,
++			0x34, 0x52, 0xc5, 0xdb, 0x0a, 0xf5, 0x63, 0x39,
++			0xea, 0x1f, 0xe1, 0x7c, 0xa1, 0x9e, 0xc1, 0x35,
++			0xe3, 0xb1, 0x18, 0x45, 0x67, 0xf9, 0x22, 0x38,
++			0x95, 0xd9, 0x34, 0x34, 0x86, 0xc6, 0x41, 0x94,
++			0x15, 0xf9, 0x5b, 0x41, 0xa6, 0x87, 0x8b, 0xf8,
++			0xd5, 0xe1, 0x1b, 0xe2, 0x5b, 0xf3, 0x86, 0x10,
++			0xff, 0xe6, 0xae, 0x69, 0x76, 0xbc, 0x0d, 0xb4,
++			0x09, 0x90, 0x0c, 0xa2, 0x65, 0x0c, 0xad, 0x74,
++			0xf5, 0xd7, 0xff, 0xda, 0xc1, 0xce, 0x85, 0xbe,
++			0x00, 0xa7, 0xff, 0x4d, 0x2f, 0x65, 0xd3, 0x8c,
++			0x86, 0x2d, 0x05, 0xe8, 0xed, 0x3e, 0x6b, 0x8b,
++			0x0f, 0x3d, 0x83, 0x8c, 0xf1, 0x1d, 0x5b, 0x96,
++			0x2e, 0xb1, 0x9c, 0xc2, 0x98, 0xe1, 0x70, 0xb9,
++			0xba, 0x5c, 0x8a, 0x43, 0xd6, 0x34, 0xa7, 0x2d,
++			0xc9, 0x92, 0xae, 0xf2, 0xa5, 0x7b, 0x05, 0x49,
++			0xa7, 0x33, 0x34, 0x86, 0xca, 0xe4, 0x96, 0x23,
++			0x76, 0x5b, 0xf2, 0xc6, 0xf1, 0x51, 0x28, 0x42,
++			0x7b, 0xcc, 0x76, 0x8f, 0xfa, 0xa2, 0xad, 0x31,
++			0xd4, 0xd6, 0x7a, 0x6d, 0x25, 0x25, 0x54, 0xe4,
++			0x3f, 0x50, 0x59, 0xe1, 0x5c, 0x05, 0xb7, 0x27,
++			0x48, 0xbf, 0x07, 0xec, 0x1b, 0x13, 0xbe, 0x2b,
++			0xa1, 0x57, 0x2b, 0xd5, 0xab, 0xd7, 0xd0, 0x4c,
++			0x1e, 0xcb, 0x71, 0x9b, 0xc5, 0x90, 0x85, 0xd3,
++			0xde, 0x59, 0xec, 0x71, 0xeb, 0x89, 0xbb, 0xd0,
++			0x09, 0x50, 0xe1, 0x16, 0x3f, 0xfd, 0x1c, 0x34,
++			0xc3, 0x1c, 0xa1, 0x10, 0x77, 0x53, 0x98, 0xef,
++			0xf2, 0xfd, 0xa5, 0x01, 0x59, 0xc2, 0x9b, 0x26,
++			0xc7, 0x42, 0xd9, 0x49, 0xda, 0x58, 0x2b, 0x6e,
++			0x9f, 0x53, 0x19, 0x76, 0x7e, 0xd9, 0xc9, 0x0e,
++			0x68, 0xc8, 0x7f, 0x51, 0x22, 0x42, 0xef, 0x49,
++			0xa4, 0x55, 0xb6, 0x36, 0xac, 0x09, 0xc7, 0x31,
++			0x88, 0x15, 0x4b, 0x2e, 0x8f, 0x3a, 0x08, 0xf7,
++			0xd8, 0xf7, 0xa8, 0xc5, 0xa9, 0x33, 0xa6, 0x45,
++			0xe4, 0xc4, 0x94, 0x76, 0xf3, 0x0d, 0x8f, 0x7e,
++			0xc8, 0xf6, 0xbc, 0x23, 0x0a, 0xb6, 0x4c, 0xd3,
++			0x6a, 0xcd, 0x36, 0xc2, 0x90, 0x5c, 0x5c, 0x3c,
++			0x65, 0x7b, 0xc2, 0xd6, 0xcc, 0xe6, 0x0d, 0x87,
++			0x73, 0x2e, 0x71, 0x79, 0x16, 0x06, 0x63, 0x28,
++			0x09, 0x15, 0xd8, 0x89, 0x38, 0x38, 0x3d, 0xb5,
++			0x42, 0x1c, 0x08, 0x24, 0xf7, 0x2a, 0xd2, 0x9d,
++			0xc8, 0xca, 0xef, 0xf9, 0x27, 0xd8, 0x07, 0x86,
++			0xf7, 0x43, 0x0b, 0x55, 0x15, 0x3f, 0x9f, 0x83,
++			0xef, 0xdc, 0x49, 0x9d, 0x2a, 0xc1, 0x54, 0x62,
++			0xbd, 0x9b, 0x66, 0x55, 0x9f, 0xb7, 0x12, 0xf3,
++			0x1b, 0x4d, 0x9d, 0x2a, 0x5c, 0xed, 0x87, 0x75,
++			0x87, 0x26, 0xec, 0x61, 0x2c, 0xb4, 0x0f, 0x89,
++			0xb0, 0xfb, 0x2e, 0x68, 0x5d, 0x15, 0xc7, 0x8d,
++			0x2e, 0xc0, 0xd9, 0xec, 0xaf, 0x4f, 0xd2, 0x25,
++			0x29, 0xe8, 0xd2, 0x26, 0x2b, 0x67, 0xe9, 0xfc,
++			0x2b, 0xa8, 0x67, 0x96, 0x12, 0x1f, 0x5b, 0x96,
++			0xc6, 0x14, 0x53, 0xaf, 0x44, 0xea, 0xd6, 0xe2,
++			0x94, 0x98, 0xe4, 0x12, 0x93, 0x4c, 0x92, 0xe0,
++			0x18, 0xa5, 0x8d, 0x2d, 0xe4, 0x71, 0x3c, 0x47,
++			0x4c, 0xf7, 0xe6, 0x47, 0x9e, 0xc0, 0x68, 0xdf,
++			0xd4, 0xf5, 0x5a, 0x74, 0xb1, 0x2b, 0x29, 0x03,
++			0x19, 0x07, 0xaf, 0x90, 0x62, 0x5c, 0x68, 0x98,
++			0x48, 0x16, 0x11, 0x02, 0x9d, 0xee, 0xb4, 0x9b,
++			0xe5, 0x42, 0x7f, 0x08, 0xfd, 0x16, 0x32, 0x0b,
++			0xd0, 0xb3, 0xfa, 0x2b, 0xb7, 0x99, 0xf9, 0x29,
++			0xcd, 0x20, 0x45, 0x9f, 0xb3, 0x1a, 0x5d, 0xa2,
++			0xaf, 0x4d, 0xe0, 0xbd, 0x42, 0x0d, 0xbc, 0x74,
++			0x99, 0x9c, 0x8e, 0x53, 0x1a, 0xb4, 0x3e, 0xbd,
++			0xa2, 0x9a, 0x2d, 0xf7, 0xf8, 0x39, 0x0f, 0x67,
++			0x63, 0xfc, 0x6b, 0xc0, 0xaf, 0xb3, 0x4b, 0x4f,
++			0x55, 0xc4, 0xcf, 0xa7, 0xc8, 0x04, 0x11, 0x3e,
++			0x14, 0x32, 0xbb, 0x1b, 0x38, 0x77, 0xd6, 0x7f,
++			0x54, 0x4c, 0xdf, 0x75, 0xf3, 0x07, 0x2d, 0x33,
++			0x9b, 0xa8, 0x20, 0xe1, 0x7b, 0x12, 0xb5, 0xf3,
++			0xef, 0x2f, 0xce, 0x72, 0xe5, 0x24, 0x60, 0xc1,
++			0x30, 0xe2, 0xab, 0xa1, 0x8e, 0x11, 0x09, 0xa8,
++			0x21, 0x33, 0x44, 0xfe, 0x7f, 0x35, 0x32, 0x93,
++			0x39, 0xa7, 0xad, 0x8b, 0x79, 0x06, 0xb2, 0xcb,
++			0x4e, 0xa9, 0x5f, 0xc7, 0xba, 0x74, 0x29, 0xec,
++			0x93, 0xa0, 0x4e, 0x54, 0x93, 0xc0, 0xbc, 0x55,
++			0x64, 0xf0, 0x48, 0xe5, 0x57, 0x99, 0xee, 0x75,
++			0xd6, 0x79, 0x0f, 0x66, 0xb7, 0xc6, 0x57, 0x76,
++			0xf7, 0xb7, 0xf3, 0x9c, 0xc5, 0x60, 0xe8, 0x7f,
++			0x83, 0x76, 0xd6, 0x0e, 0xaa, 0xe6, 0x90, 0x39,
++			0x1d, 0xa6, 0x32, 0x6a, 0x34, 0xe3, 0x55, 0xf8,
++			0x58, 0xa0, 0x58, 0x7d, 0x33, 0xe0, 0x22, 0x39,
++			0x44, 0x64, 0x87, 0x86, 0x5a, 0x2f, 0xa7, 0x7e,
++			0x0f, 0x38, 0xea, 0xb0, 0x30, 0xcc, 0x61, 0xa5,
++			0x6a, 0x32, 0xae, 0x1e, 0xf7, 0xe9, 0xd0, 0xa9,
++			0x0c, 0x32, 0x4b, 0xb5, 0x49, 0x28, 0xab, 0x85,
++			0x2f, 0x8e, 0x01, 0x36, 0x38, 0x52, 0xd0, 0xba,
++			0xd6, 0x02, 0x78, 0xf8, 0x0e, 0x3e, 0x9c, 0x8b,
++			0x6b, 0x45, 0x99, 0x3f, 0x5c, 0xfe, 0x58, 0xf1,
++			0x5c, 0x94, 0x04, 0xe1, 0xf5, 0x18, 0x6d, 0x51,
++			0xb2, 0x5d, 0x18, 0x20, 0xb6, 0xc2, 0x9a, 0x42,
++			0x1d, 0xb3, 0xab, 0x3c, 0xb6, 0x3a, 0x13, 0x03,
++			0xb2, 0x46, 0x82, 0x4f, 0xfc, 0x64, 0xbc, 0x4f,
++			0xca, 0xfa, 0x9c, 0xc0, 0xd5, 0xa7, 0xbd, 0x11,
++			0xb7, 0xe4, 0x5a, 0xf6, 0x6f, 0x4d, 0x4d, 0x54,
++			0xea, 0xa4, 0x98, 0x66, 0xd4, 0x22, 0x3b, 0xd3,
++			0x8f, 0x34, 0x47, 0xd9, 0x7c, 0xf4, 0x72, 0x3b,
++			0x4d, 0x02, 0x77, 0xf6, 0xd6, 0xdd, 0x08, 0x0a,
++			0x81, 0xe1, 0x86, 0x89, 0x3e, 0x56, 0x10, 0x3c,
++			0xba, 0xd7, 0x81, 0x8c, 0x08, 0xbc, 0x8b, 0xe2,
++			0x53, 0xec, 0xa7, 0x89, 0xee, 0xc8, 0x56, 0xb5,
++			0x36, 0x2c, 0xb2, 0x03, 0xba, 0x99, 0xdd, 0x7c,
++			0x48, 0xa0, 0xb0, 0xbc, 0x91, 0x33, 0xe9, 0xa8,
++			0xcb, 0xcd, 0xcf, 0x59, 0x5f, 0x1f, 0x15, 0xe2,
++			0x56, 0xf5, 0x4e, 0x01, 0x35, 0x27, 0x45, 0x77,
++			0x47, 0xc8, 0xbc, 0xcb, 0x7e, 0x39, 0xc1, 0x97,
++			0x28, 0xd3, 0x84, 0xfc, 0x2c, 0x3e, 0xc8, 0xad,
++			0x9c, 0xf8, 0x8a, 0x61, 0x9c, 0x28, 0xaa, 0xc5,
++			0x99, 0x20, 0x43, 0x85, 0x9d, 0xa5, 0xe2, 0x8b,
++			0xb8, 0xae, 0xeb, 0xd0, 0x32, 0x0d, 0x52, 0x78,
++			0x09, 0x56, 0x3f, 0xc7, 0xd8, 0x7e, 0x26, 0xfc,
++			0x37, 0xfb, 0x6f, 0x04, 0xfc, 0xfa, 0x92, 0x10,
++			0xac, 0xf8, 0x3e, 0x21, 0xdc, 0x8c, 0x21, 0x16,
++			0x7d, 0x67, 0x6e, 0xf6, 0xcd, 0xda, 0xb6, 0x98,
++			0x23, 0xab, 0x23, 0x3c, 0xb2, 0x10, 0xa0, 0x53,
++			0x5a, 0x56, 0x9f, 0xc5, 0xd0, 0xff, 0xbb, 0xe4,
++			0x98, 0x3c, 0x69, 0x1e, 0xdb, 0x38, 0x8f, 0x7e,
++			0x0f, 0xd2, 0x98, 0x88, 0x81, 0x8b, 0x45, 0x67,
++			0xea, 0x33, 0xf1, 0xeb, 0xe9, 0x97, 0x55, 0x2e,
++			0xd9, 0xaa, 0xeb, 0x5a, 0xec, 0xda, 0xe1, 0x68,
++			0xa8, 0x9d, 0x3c, 0x84, 0x7c, 0x05, 0x3d, 0x62,
++			0x87, 0x8f, 0x03, 0x21, 0x28, 0x95, 0x0c, 0x89,
++			0x25, 0x22, 0x4a, 0xb0, 0x93, 0xa9, 0x50, 0xa2,
++			0x2f, 0x57, 0x6e, 0x18, 0x42, 0x19, 0x54, 0x0c,
++			0x55, 0x67, 0xc6, 0x11, 0x49, 0xf4, 0x5c, 0xd2,
++			0xe9, 0x3d, 0xdd, 0x8b, 0x48, 0x71, 0x21, 0x00,
++			0xc3, 0x9a, 0x6c, 0x85, 0x74, 0x28, 0x83, 0x4a,
++			0x1b, 0x31, 0x05, 0xe1, 0x06, 0x92, 0xe7, 0xda,
++			0x85, 0x73, 0x78, 0x45, 0x20, 0x7f, 0xae, 0x13,
++			0x7c, 0x33, 0x06, 0x22, 0xf4, 0x83, 0xf9, 0x35,
++			0x3f, 0x6c, 0x71, 0xa8, 0x4e, 0x48, 0xbe, 0x9b,
++			0xce, 0x8a, 0xba, 0xda, 0xbe, 0x28, 0x08, 0xf7,
++			0xe2, 0x14, 0x8c, 0x71, 0xea, 0x72, 0xf9, 0x33,
++			0xf2, 0x88, 0x3f, 0xd7, 0xbb, 0x69, 0x6c, 0x29,
++			0x19, 0xdc, 0x84, 0xce, 0x1f, 0x12, 0x4f, 0xc8,
++			0xaf, 0xa5, 0x04, 0xba, 0x5a, 0xab, 0xb0, 0xd9,
++			0x14, 0x1f, 0x6c, 0x68, 0x98, 0x39, 0x89, 0x7a,
++			0xd9, 0xd8, 0x2f, 0xdf, 0xa8, 0x47, 0x4a, 0x25,
++			0xe2, 0xfb, 0x33, 0xf4, 0x59, 0x78, 0xe1, 0x68,
++			0x85, 0xcf, 0xfe, 0x59, 0x20, 0xd4, 0x05, 0x1d,
++			0x80, 0x99, 0xae, 0xbc, 0xca, 0xae, 0x0f, 0x2f,
++			0x65, 0x43, 0x34, 0x8e, 0x7e, 0xac, 0xd3, 0x93,
++			0x2f, 0xac, 0x6d, 0x14, 0x3d, 0x02, 0x07, 0x70,
++			0x9d, 0xa4, 0xf3, 0x1b, 0x5c, 0x36, 0xfc, 0x01,
++			0x73, 0x34, 0x85, 0x0c, 0x6c, 0xd6, 0xf1, 0xbd,
++			0x3f, 0xdf, 0xee, 0xf5, 0xd9, 0xba, 0x56, 0xef,
++			0xf4, 0x9b, 0x6b, 0xee, 0x9f, 0x5a, 0x78, 0x6d,
++			0x32, 0x19, 0xf4, 0xf7, 0xf8, 0x4c, 0x69, 0x0b,
++			0x4b, 0xbc, 0xbb, 0xb7, 0xf2, 0x85, 0xaf, 0x70,
++			0x75, 0x24, 0x6c, 0x54, 0xa7, 0x0e, 0x4d, 0x1d,
++			0x01, 0xbf, 0x08, 0xac, 0xcf, 0x7f, 0x2c, 0xe3,
++			0x14, 0x89, 0x5e, 0x70, 0x5a, 0x99, 0x92, 0xcd,
++			0x01, 0x84, 0xc8, 0xd2, 0xab, 0xe5, 0x4f, 0x58,
++			0xe7, 0x0f, 0x2f, 0x0e, 0xff, 0x68, 0xea, 0xfd,
++			0x15, 0xb3, 0x17, 0xe6, 0xb0, 0xe7, 0x85, 0xd8,
++			0x23, 0x2e, 0x05, 0xc7, 0xc9, 0xc4, 0x46, 0x1f,
++			0xe1, 0x9e, 0x49, 0x20, 0x23, 0x24, 0x4d, 0x7e,
++			0x29, 0x65, 0xff, 0xf4, 0xb6, 0xfd, 0x1a, 0x85,
++			0xc4, 0x16, 0xec, 0xfc, 0xea, 0x7b, 0xd6, 0x2c,
++			0x43, 0xf8, 0xb7, 0xbf, 0x79, 0xc0, 0x85, 0xcd,
++			0xef, 0xe1, 0x98, 0xd3, 0xa5, 0xf7, 0x90, 0x8c,
++			0xe9, 0x7f, 0x80, 0x6b, 0xd2, 0xac, 0x4c, 0x30,
++			0xa7, 0xc6, 0x61, 0x6c, 0xd2, 0xf9, 0x2c, 0xff,
++			0x30, 0xbc, 0x22, 0x81, 0x7d, 0x93, 0x12, 0xe4,
++			0x0a, 0xcd, 0xaf, 0xdd, 0xe8, 0xab, 0x0a, 0x1e,
++			0x13, 0xa4, 0x27, 0xc3, 0x5f, 0xf7, 0x4b, 0xbb,
++			0x37, 0x09, 0x4b, 0x91, 0x6f, 0x92, 0x4f, 0xaf,
++			0x52, 0xee, 0xdf, 0xef, 0x09, 0x6f, 0xf7, 0x5c,
++			0x6e, 0x12, 0x17, 0x72, 0x63, 0x57, 0xc7, 0xba,
++			0x3b, 0x6b, 0x38, 0x32, 0x73, 0x1b, 0x9c, 0x80,
++			0xc1, 0x7a, 0xc6, 0xcf, 0xcd, 0x35, 0xc0, 0x6b,
++			0x31, 0x1a, 0x6b, 0xe9, 0xd8, 0x2c, 0x29, 0x3f,
++			0x96, 0xfb, 0xb6, 0xcd, 0x13, 0x91, 0x3b, 0xc2,
++			0xd2, 0xa3, 0x31, 0x8d, 0xa4, 0xcd, 0x57, 0xcd,
++			0x13, 0x3d, 0x64, 0xfd, 0x06, 0xce, 0xe6, 0xdc,
++			0x0c, 0x24, 0x43, 0x31, 0x40, 0x57, 0xf1, 0x72,
++			0x17, 0xe3, 0x3a, 0x63, 0x6d, 0x35, 0xcf, 0x5d,
++			0x97, 0x40, 0x59, 0xdd, 0xf7, 0x3c, 0x02, 0xf7,
++			0x1c, 0x7e, 0x05, 0xbb, 0xa9, 0x0d, 0x01, 0xb1,
++			0x8e, 0xc0, 0x30, 0xa9, 0x53, 0x24, 0xc9, 0x89,
++			0x84, 0x6d, 0xaa, 0xd0, 0xcd, 0x91, 0xc2, 0x4d,
++			0x91, 0xb0, 0x89, 0xe2, 0xbf, 0x83, 0x44, 0xaa,
++			0x28, 0x72, 0x23, 0xa0, 0xc2, 0xad, 0xad, 0x1c,
++			0xfc, 0x3f, 0x09, 0x7a, 0x0b, 0xdc, 0xc5, 0x1b,
++			0x87, 0x13, 0xc6, 0x5b, 0x59, 0x8d, 0xf2, 0xc8,
++			0xaf, 0xdf, 0x11, 0x95,
++		},
++		.rlen = 4100,
++	},
++};
++
+ /*
+  * Compression stuff.
+  */
+@@ -4408,6 +7721,88 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
+ };
+ 
+ /*
++ * LZO test vectors (null-terminated strings).
++ */
++#define LZO_COMP_TEST_VECTORS 2
++#define LZO_DECOMP_TEST_VECTORS 2
++
++static struct comp_testvec lzo_comp_tv_template[] = {
++	{
++		.inlen	= 70,
++		.outlen	= 46,
++		.input	= "Join us now and share the software "
++			  "Join us now and share the software ",
++		.output	= {  0x00, 0x0d, 0x4a, 0x6f, 0x69, 0x6e, 0x20, 0x75,
++			     0x73, 0x20, 0x6e, 0x6f, 0x77, 0x20, 0x61, 0x6e,
++			     0x64, 0x20, 0x73, 0x68, 0x61, 0x72, 0x65, 0x20,
++			     0x74, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x66, 0x74,
++			     0x77, 0x70, 0x01, 0x01, 0x4a, 0x6f, 0x69, 0x6e,
++			     0x3d, 0x88, 0x00, 0x11, 0x00, 0x00 },
++	}, {
++		.inlen	= 159,
++		.outlen	= 133,
++		.input	= "This document describes a compression method based on the LZO "
++			  "compression algorithm.  This document defines the application of "
++			  "the LZO algorithm used in UBIFS.",
++		.output	= { 0x00, 0x2b, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64,
++			    0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x20,
++			    0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
++			    0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70,
++			    0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20,
++			    0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x62,
++			    0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20,
++			    0x74, 0x68, 0x65, 0x20, 0x4c, 0x5a, 0x4f, 0x2b,
++			    0x8c, 0x00, 0x0d, 0x61, 0x6c, 0x67, 0x6f, 0x72,
++			    0x69, 0x74, 0x68, 0x6d, 0x2e, 0x20, 0x20, 0x54,
++			    0x68, 0x69, 0x73, 0x2a, 0x54, 0x01, 0x02, 0x66,
++			    0x69, 0x6e, 0x65, 0x73, 0x94, 0x06, 0x05, 0x61,
++			    0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x76,
++			    0x0a, 0x6f, 0x66, 0x88, 0x02, 0x60, 0x09, 0x27,
++			    0xf0, 0x00, 0x0c, 0x20, 0x75, 0x73, 0x65, 0x64,
++			    0x20, 0x69, 0x6e, 0x20, 0x55, 0x42, 0x49, 0x46,
++			    0x53, 0x2e, 0x11, 0x00, 0x00 },
++	},
++};
++
++static struct comp_testvec lzo_decomp_tv_template[] = {
++	{
++		.inlen	= 133,
++		.outlen	= 159,
++		.input	= { 0x00, 0x2b, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64,
++			    0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x20,
++			    0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
++			    0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70,
++			    0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20,
++			    0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x62,
++			    0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20,
++			    0x74, 0x68, 0x65, 0x20, 0x4c, 0x5a, 0x4f, 0x2b,
++			    0x8c, 0x00, 0x0d, 0x61, 0x6c, 0x67, 0x6f, 0x72,
++			    0x69, 0x74, 0x68, 0x6d, 0x2e, 0x20, 0x20, 0x54,
++			    0x68, 0x69, 0x73, 0x2a, 0x54, 0x01, 0x02, 0x66,
++			    0x69, 0x6e, 0x65, 0x73, 0x94, 0x06, 0x05, 0x61,
++			    0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x76,
++			    0x0a, 0x6f, 0x66, 0x88, 0x02, 0x60, 0x09, 0x27,
++			    0xf0, 0x00, 0x0c, 0x20, 0x75, 0x73, 0x65, 0x64,
++			    0x20, 0x69, 0x6e, 0x20, 0x55, 0x42, 0x49, 0x46,
++			    0x53, 0x2e, 0x11, 0x00, 0x00 },
++		.output	= "This document describes a compression method based on the LZO "
++			  "compression algorithm.  This document defines the application of "
++			  "the LZO algorithm used in UBIFS.",
++	}, {
++		.inlen	= 46,
++		.outlen	= 70,
++		.input	= { 0x00, 0x0d, 0x4a, 0x6f, 0x69, 0x6e, 0x20, 0x75,
++			    0x73, 0x20, 0x6e, 0x6f, 0x77, 0x20, 0x61, 0x6e,
++			    0x64, 0x20, 0x73, 0x68, 0x61, 0x72, 0x65, 0x20,
++			    0x74, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x66, 0x74,
++			    0x77, 0x70, 0x01, 0x01, 0x4a, 0x6f, 0x69, 0x6e,
++			    0x3d, 0x88, 0x00, 0x11, 0x00, 0x00 },
++		.output	= "Join us now and share the software "
++			  "Join us now and share the software ",
++	},
++};
++
++/*
+  * Michael MIC test vectors from IEEE 802.11i
+  */
+ #define MICHAEL_MIC_TEST_VECTORS 6
+@@ -4812,4 +8207,20 @@ static struct cipher_speed camellia_speed_template[] = {
+       {  .klen = 0, .blen = 0, }
+ };
+ 
++static struct cipher_speed salsa20_speed_template[] = {
++      { .klen = 16, .blen = 16, },
++      { .klen = 16, .blen = 64, },
++      { .klen = 16, .blen = 256, },
++      { .klen = 16, .blen = 1024, },
++      { .klen = 16, .blen = 8192, },
++      { .klen = 32, .blen = 16, },
++      { .klen = 32, .blen = 64, },
++      { .klen = 32, .blen = 256, },
++      { .klen = 32, .blen = 1024, },
++      { .klen = 32, .blen = 8192, },
++
++      /* End marker */
++      {  .klen = 0, .blen = 0, }
++};
++
+ #endif	/* _CRYPTO_TCRYPT_H */
+diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
+index b4b9c0c..0af216c 100644
+--- a/crypto/twofish_common.c
++++ b/crypto/twofish_common.c
+@@ -655,84 +655,48 @@ int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
+ 			CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
+ 		}
+ 
+-		/* Calculate whitening and round subkeys.  The constants are
+-		 * indices of subkeys, preprocessed through q0 and q1. */
+-		CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
+-		CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
+-		CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
+-		CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
+-		CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
+-		CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
+-		CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
+-		CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
+-		CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
+-		CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
+-		CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71);
+-		CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
+-		CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F);
+-		CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
+-		CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
+-		CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F);
+-		CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
+-		CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
+-		CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00);
+-		CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
++		/* CALC_K256/CALC_K192/CALC_K loops were unrolled.
++		 * Unrolling produced x2.5 more code (+18k on i386),
++		 * and speeded up key setup by 7%:
++		 * unrolled: twofish_setkey/sec: 41128
++		 *     loop: twofish_setkey/sec: 38148
++		 * CALC_K256: ~100 insns each
++		 * CALC_K192: ~90 insns
++		 *    CALC_K: ~70 insns
++		 */
++		/* Calculate whitening and round subkeys */
++		for ( i = 0; i < 8; i += 2 ) {
++			CALC_K256 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
++		}
++		for ( i = 0; i < 32; i += 2 ) {
++			CALC_K256 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
++		}
+ 	} else if (key_len == 24) { /* 192-bit key */
+ 		/* Compute the S-boxes. */
+ 		for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
+ 		        CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
+ 		}
+ 
+-		/* Calculate whitening and round subkeys.  The constants are
+-		 * indices of subkeys, preprocessed through q0 and q1. */
+-		CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
+-		CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
+-		CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
+-		CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
+-		CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
+-		CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
+-		CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
+-		CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
+-		CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
+-		CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
+-		CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71);
+-		CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
+-		CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F);
+-		CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
+-		CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
+-		CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F);
+-		CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
+-		CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
+-		CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00);
+-		CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
++		/* Calculate whitening and round subkeys */
++		for ( i = 0; i < 8; i += 2 ) {
++			CALC_K192 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
++		}
++		for ( i = 0; i < 32; i += 2 ) {
++			CALC_K192 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
++		}
+ 	} else { /* 128-bit key */
+ 		/* Compute the S-boxes. */
+ 		for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
+ 			CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
+ 		}
+ 
+-		/* Calculate whitening and round subkeys.  The constants are
+-		 * indices of subkeys, preprocessed through q0 and q1. */
+-		CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3);
+-		CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
+-		CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
+-		CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
+-		CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
+-		CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B);
+-		CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
+-		CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
+-		CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
+-		CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD);
+-		CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71);
+-		CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
+-		CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F);
+-		CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B);
+-		CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA);
+-		CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F);
+-		CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
+-		CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
+-		CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00);
+-		CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
++		/* Calculate whitening and round subkeys */
++		for ( i = 0; i < 8; i += 2 ) {
++			CALC_K (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
++		}
++		for ( i = 0; i < 32; i += 2 ) {
++			CALC_K (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/crypto/xcbc.c b/crypto/xcbc.c
+index ac68f3b..a82959d 100644
+--- a/crypto/xcbc.c
++++ b/crypto/xcbc.c
+@@ -19,6 +19,7 @@
+  * 	Kazunori Miyazawa <miyazawa at linux-ipv6.org>
+  */
+ 
++#include <crypto/scatterwalk.h>
+ #include <linux/crypto.h>
+ #include <linux/err.h>
+ #include <linux/hardirq.h>
+@@ -27,7 +28,6 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/slab.h>
+ #include <linux/scatterlist.h>
+-#include "internal.h"
+ 
+ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
+ 			   0x02020202, 0x02020202, 0x02020202, 0x02020202,
+@@ -307,7 +307,8 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
+ 	case 16:
+ 		break;
+ 	default:
+-		return ERR_PTR(PTR_ERR(alg));
++		inst = ERR_PTR(-EINVAL);
++		goto out_put_alg;
+ 	}
+ 
+ 	inst = crypto_alloc_instance("xcbc", alg);
+@@ -320,10 +321,7 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
+ 	inst->alg.cra_alignmask = alg->cra_alignmask;
+ 	inst->alg.cra_type = &crypto_hash_type;
+ 
+-	inst->alg.cra_hash.digestsize =
+-		(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+-		CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
+-				       alg->cra_blocksize;
++	inst->alg.cra_hash.digestsize = alg->cra_blocksize;
+ 	inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
+ 				ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
+ 	inst->alg.cra_init = xcbc_init_tfm;
+diff --git a/drivers/Kconfig b/drivers/Kconfig
+index f4076d9..08d4ae2 100644
+--- a/drivers/Kconfig
++++ b/drivers/Kconfig
+@@ -90,8 +90,6 @@ source "drivers/dca/Kconfig"
+ 
+ source "drivers/auxdisplay/Kconfig"
+ 
+-source "drivers/kvm/Kconfig"
 -
--	bio_get(bio);
--	blk_rq_bio_prep(q, rq, bio);
--	rq->buffer = rq->data = NULL;
--	return 0;
--}
--
--EXPORT_SYMBOL(blk_rq_map_user_iov);
--
--/**
-- * blk_rq_unmap_user - unmap a request with user data
-- * @bio:	       start of bio list
-- *
-- * Description:
-- *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
-- *    supply the original rq->bio from the blk_rq_map_user() return, since
-- *    the io completion may have changed rq->bio.
-- */
--int blk_rq_unmap_user(struct bio *bio)
--{
--	struct bio *mapped_bio;
--	int ret = 0, ret2;
--
--	while (bio) {
--		mapped_bio = bio;
--		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
--			mapped_bio = bio->bi_private;
--
--		ret2 = __blk_rq_unmap_user(mapped_bio);
--		if (ret2 && !ret)
--			ret = ret2;
--
--		mapped_bio = bio;
--		bio = bio->bi_next;
--		bio_put(mapped_bio);
+ source "drivers/uio/Kconfig"
+ 
+ source "drivers/virtio/Kconfig"
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 8cb37e3..0ee9a8a 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -38,7 +38,7 @@ obj-$(CONFIG_SCSI)		+= scsi/
+ obj-$(CONFIG_ATA)		+= ata/
+ obj-$(CONFIG_FUSION)		+= message/
+ obj-$(CONFIG_FIREWIRE)		+= firewire/
+-obj-$(CONFIG_IEEE1394)		+= ieee1394/
++obj-y				+= ieee1394/
+ obj-$(CONFIG_UIO)		+= uio/
+ obj-y				+= cdrom/
+ obj-y				+= auxdisplay/
+@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI)		+= spi/
+ obj-$(CONFIG_PCCARD)		+= pcmcia/
+ obj-$(CONFIG_DIO)		+= dio/
+ obj-$(CONFIG_SBUS)		+= sbus/
+-obj-$(CONFIG_KVM)		+= kvm/
+ obj-$(CONFIG_ZORRO)		+= zorro/
+ obj-$(CONFIG_MAC)		+= macintosh/
+ obj-$(CONFIG_ATA_OVER_ETH)	+= block/aoe/
+@@ -73,7 +72,7 @@ obj-$(CONFIG_ISDN)		+= isdn/
+ obj-$(CONFIG_EDAC)		+= edac/
+ obj-$(CONFIG_MCA)		+= mca/
+ obj-$(CONFIG_EISA)		+= eisa/
+-obj-$(CONFIG_LGUEST_GUEST)	+= lguest/
++obj-y				+= lguest/
+ obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
+ obj-$(CONFIG_CPU_IDLE)		+= cpuidle/
+ obj-$(CONFIG_MMC)		+= mmc/
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index f4487c3..1b4cf98 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -743,7 +743,7 @@ static int __init acpi_bus_init(void)
+ 	return -ENODEV;
+ }
+ 
+-decl_subsys(acpi, NULL, NULL);
++struct kobject *acpi_kobj;
+ 
+ static int __init acpi_init(void)
+ {
+@@ -755,10 +755,11 @@ static int __init acpi_init(void)
+ 		return -ENODEV;
+ 	}
+ 
+-	result = firmware_register(&acpi_subsys);
+-	if (result < 0)
+-		printk(KERN_WARNING "%s: firmware_register error: %d\n",
+-			__FUNCTION__, result);
++	acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
++	if (!acpi_kobj) {
++		printk(KERN_WARNING "%s: kset create error\n", __FUNCTION__);
++		acpi_kobj = NULL;
++	}
+ 
+ 	result = acpi_bus_init();
+ 
+diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
+index c9f526e..5400ea1 100644
+--- a/drivers/acpi/pci_link.c
++++ b/drivers/acpi/pci_link.c
+@@ -911,7 +911,7 @@ __setup("acpi_irq_balance", acpi_irq_balance_set);
+ 
+ /* FIXME: we will remove this interface after all drivers call pci_disable_device */
+ static struct sysdev_class irqrouter_sysdev_class = {
+-	set_kset_name("irqrouter"),
++	.name = "irqrouter",
+ 	.resume = irqrouter_resume,
+ };
+ 
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 2235f4e..eb1f82f 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -357,6 +357,26 @@ int acpi_processor_resume(struct acpi_device * device)
+ 	return 0;
+ }
+ 
++#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
++static int tsc_halts_in_c(int state)
++{
++	switch (boot_cpu_data.x86_vendor) {
++	case X86_VENDOR_AMD:
++		/*
++		 * AMD Fam10h TSC will tick in all
++		 * C/P/S0/S1 states when this bit is set.
++		 */
++		if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
++			return 0;
++		/*FALL THROUGH*/
++	case X86_VENDOR_INTEL:
++		/* Several cases known where TSC halts in C2 too */
++	default:
++		return state > ACPI_STATE_C1;
++	}
++}
++#endif
++
+ #ifndef CONFIG_CPU_IDLE
+ static void acpi_processor_idle(void)
+ {
+@@ -516,7 +536,8 @@ static void acpi_processor_idle(void)
+ 
+ #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
+ 		/* TSC halts in C2, so notify users */
+-		mark_tsc_unstable("possible TSC halt in C2");
++		if (tsc_halts_in_c(ACPI_STATE_C2))
++			mark_tsc_unstable("possible TSC halt in C2");
+ #endif
+ 		/* Compute time (ticks) that we were actually asleep */
+ 		sleep_ticks = ticks_elapsed(t1, t2);
+@@ -534,6 +555,7 @@ static void acpi_processor_idle(void)
+ 		break;
+ 
+ 	case ACPI_STATE_C3:
++		acpi_unlazy_tlb(smp_processor_id());
+ 		/*
+ 		 * Must be done before busmaster disable as we might
+ 		 * need to access HPET !
+@@ -579,7 +601,8 @@ static void acpi_processor_idle(void)
+ 
+ #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
+ 		/* TSC halts in C3, so notify users */
+-		mark_tsc_unstable("TSC halts in C3");
++		if (tsc_halts_in_c(ACPI_STATE_C3))
++			mark_tsc_unstable("TSC halts in C3");
+ #endif
+ 		/* Compute time (ticks) that we were actually asleep */
+ 		sleep_ticks = ticks_elapsed(t1, t2);
+@@ -1423,6 +1446,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
+ 		return 0;
+ 	}
+ 
++	acpi_unlazy_tlb(smp_processor_id());
+ 	/*
+ 	 * Must be done before busmaster disable as we might need to
+ 	 * access HPET !
+@@ -1443,7 +1467,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
+ 
+ #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
+ 	/* TSC could halt in idle, so notify users */
+-	mark_tsc_unstable("TSC halts in idle");;
++	if (tsc_halts_in_c(cx->type))
++		mark_tsc_unstable("TSC halts in idle");;
+ #endif
+ 	sleep_ticks = ticks_elapsed(t1, t2);
+ 
+@@ -1554,7 +1579,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
+ 
+ #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
+ 	/* TSC could halt in idle, so notify users */
+-	mark_tsc_unstable("TSC halts in idle");
++	if (tsc_halts_in_c(ACPI_STATE_C3))
++		mark_tsc_unstable("TSC halts in idle");
+ #endif
+ 	sleep_ticks = ticks_elapsed(t1, t2);
+ 	/* Tell the scheduler how much we idled: */
+diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
+index edee280..5ffe0ea 100644
+--- a/drivers/acpi/system.c
++++ b/drivers/acpi/system.c
+@@ -58,7 +58,7 @@ module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
+                               FS Interface (/sys)
+    -------------------------------------------------------------------------- */
+ static LIST_HEAD(acpi_table_attr_list);
+-static struct kobject tables_kobj;
++static struct kobject *tables_kobj;
+ 
+ struct acpi_table_attr {
+ 	struct bin_attribute attr;
+@@ -135,11 +135,9 @@ static int acpi_system_sysfs_init(void)
+ 	int table_index = 0;
+ 	int result;
+ 
+-	tables_kobj.parent = &acpi_subsys.kobj;
+-	kobject_set_name(&tables_kobj, "tables");
+-	result = kobject_register(&tables_kobj);
+-	if (result)
+-		return result;
++	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
++	if (!tables_kobj)
++		return -ENOMEM;
+ 
+ 	do {
+ 		result = acpi_get_table_by_index(table_index, &table_header);
+@@ -153,7 +151,7 @@ static int acpi_system_sysfs_init(void)
+ 
+ 			acpi_table_attr_init(table_attr, table_header);
+ 			result =
+-			    sysfs_create_bin_file(&tables_kobj,
++			    sysfs_create_bin_file(tables_kobj,
+ 						  &table_attr->attr);
+ 			if (result) {
+ 				kfree(table_attr);
+@@ -163,6 +161,7 @@ static int acpi_system_sysfs_init(void)
+ 					      &acpi_table_attr_list);
+ 		}
+ 	} while (!result);
++	kobject_uevent(tables_kobj, KOBJ_ADD);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
+index ba63619..ae19c9b 100644
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -184,7 +184,7 @@ config PATA_ACPI
+ 
+ config SATA_FSL
+ 	tristate "Freescale 3.0Gbps SATA support"
+-	depends on PPC_MPC837x
++	depends on FSL_SOC
+ 	help
+ 	  This option enables support for Freescale 3.0Gbps SATA controller.
+ 	  It can be found on MPC837x and MPC8315.
+@@ -459,6 +459,15 @@ config PATA_NETCELL
+ 
+ 	  If unsure, say N.
+ 
++config PATA_NINJA32
++	tristate "Ninja32/Delkin Cardbus ATA support (Experimental)"
++	depends on PCI && EXPERIMENTAL
++	help
++	  This option enables support for the Ninja32, Delkin and
++	  possibly other brands of Cardbus ATA adapter
++
++	  If unsure, say N.
++
+ config PATA_NS87410
+ 	tristate "Nat Semi NS87410 PATA support (Experimental)"
+ 	depends on PCI && EXPERIMENTAL
+@@ -607,13 +616,23 @@ config PATA_WINBOND_VLB
+ 
+ config PATA_PLATFORM
+ 	tristate "Generic platform device PATA support"
+-	depends on EMBEDDED || ARCH_RPC
++	depends on EMBEDDED || ARCH_RPC || PPC
+ 	help
+ 	  This option enables support for generic directly connected ATA
+ 	  devices commonly found on embedded systems.
+ 
+ 	  If unsure, say N.
+ 
++config PATA_OF_PLATFORM
++	tristate "OpenFirmware platform device PATA support"
++	depends on PATA_PLATFORM && PPC_OF
++	help
++	  This option enables support for generic directly connected ATA
++	  devices commonly found on embedded systems with OpenFirmware
++	  bindings.
++
++	  If unsure, say N.
++
+ config PATA_ICSIDE
+ 	tristate "Acorn ICS PATA support"
+ 	depends on ARM && ARCH_ACORN
+diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
+index b13feb2..701651e 100644
+--- a/drivers/ata/Makefile
++++ b/drivers/ata/Makefile
+@@ -41,6 +41,7 @@ obj-$(CONFIG_PATA_IT821X)	+= pata_it821x.o
+ obj-$(CONFIG_PATA_IT8213)	+= pata_it8213.o
+ obj-$(CONFIG_PATA_JMICRON)	+= pata_jmicron.o
+ obj-$(CONFIG_PATA_NETCELL)	+= pata_netcell.o
++obj-$(CONFIG_PATA_NINJA32)	+= pata_ninja32.o
+ obj-$(CONFIG_PATA_NS87410)	+= pata_ns87410.o
+ obj-$(CONFIG_PATA_NS87415)	+= pata_ns87415.o
+ obj-$(CONFIG_PATA_OPTI)		+= pata_opti.o
+@@ -67,6 +68,7 @@ obj-$(CONFIG_PATA_IXP4XX_CF)	+= pata_ixp4xx_cf.o
+ obj-$(CONFIG_PATA_SCC)		+= pata_scc.o
+ obj-$(CONFIG_PATA_BF54X)	+= pata_bf54x.o
+ obj-$(CONFIG_PATA_PLATFORM)	+= pata_platform.o
++obj-$(CONFIG_PATA_OF_PLATFORM)	+= pata_of_platform.o
+ obj-$(CONFIG_PATA_ICSIDE)	+= pata_icside.o
+ # Should be last but two libata driver
+ obj-$(CONFIG_PATA_ACPI)		+= pata_acpi.o
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 54f38c2..6f089b8 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -198,18 +198,18 @@ enum {
+ };
+ 
+ struct ahci_cmd_hdr {
+-	u32			opts;
+-	u32			status;
+-	u32			tbl_addr;
+-	u32			tbl_addr_hi;
+-	u32			reserved[4];
++	__le32			opts;
++	__le32			status;
++	__le32			tbl_addr;
++	__le32			tbl_addr_hi;
++	__le32			reserved[4];
+ };
+ 
+ struct ahci_sg {
+-	u32			addr;
+-	u32			addr_hi;
+-	u32			reserved;
+-	u32			flags_size;
++	__le32			addr;
++	__le32			addr_hi;
++	__le32			reserved;
++	__le32			flags_size;
+ };
+ 
+ struct ahci_host_priv {
+@@ -597,6 +597,20 @@ static inline void __iomem *ahci_port_base(struct ata_port *ap)
+ 	return __ahci_port_base(ap->host, ap->port_no);
+ }
+ 
++static void ahci_enable_ahci(void __iomem *mmio)
++{
++	u32 tmp;
++
++	/* turn on AHCI_EN */
++	tmp = readl(mmio + HOST_CTL);
++	if (!(tmp & HOST_AHCI_EN)) {
++		tmp |= HOST_AHCI_EN;
++		writel(tmp, mmio + HOST_CTL);
++		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
++		WARN_ON(!(tmp & HOST_AHCI_EN));
++	}
++}
++
+ /**
+  *	ahci_save_initial_config - Save and fixup initial config values
+  *	@pdev: target PCI device
+@@ -619,6 +633,9 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
+ 	u32 cap, port_map;
+ 	int i;
+ 
++	/* make sure AHCI mode is enabled before accessing CAP */
++	ahci_enable_ahci(mmio);
++
+ 	/* Values prefixed with saved_ are written back to host after
+ 	 * reset.  Values without are used for driver operation.
+ 	 */
+@@ -1036,19 +1053,17 @@ static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
+ static int ahci_reset_controller(struct ata_host *host)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(host->dev);
++	struct ahci_host_priv *hpriv = host->private_data;
+ 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ 	u32 tmp;
+ 
+ 	/* we must be in AHCI mode, before using anything
+ 	 * AHCI-specific, such as HOST_RESET.
+ 	 */
+-	tmp = readl(mmio + HOST_CTL);
+-	if (!(tmp & HOST_AHCI_EN)) {
+-		tmp |= HOST_AHCI_EN;
+-		writel(tmp, mmio + HOST_CTL);
 -	}
++	ahci_enable_ahci(mmio);
+ 
+ 	/* global controller reset */
++	tmp = readl(mmio + HOST_CTL);
+ 	if ((tmp & HOST_RESET) == 0) {
+ 		writel(tmp | HOST_RESET, mmio + HOST_CTL);
+ 		readl(mmio + HOST_CTL); /* flush */
+@@ -1067,8 +1082,7 @@ static int ahci_reset_controller(struct ata_host *host)
+ 	}
+ 
+ 	/* turn on AHCI mode */
+-	writel(HOST_AHCI_EN, mmio + HOST_CTL);
+-	(void) readl(mmio + HOST_CTL);	/* flush */
++	ahci_enable_ahci(mmio);
+ 
+ 	/* some registers might be cleared on reset.  restore initial values */
+ 	ahci_restore_initial_config(host);
+@@ -1078,8 +1092,10 @@ static int ahci_reset_controller(struct ata_host *host)
+ 
+ 		/* configure PCS */
+ 		pci_read_config_word(pdev, 0x92, &tmp16);
+-		tmp16 |= 0xf;
+-		pci_write_config_word(pdev, 0x92, tmp16);
++		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
++			tmp16 |= hpriv->port_map;
++			pci_write_config_word(pdev, 0x92, tmp16);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -1480,35 +1496,31 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+ {
+ 	struct scatterlist *sg;
+-	struct ahci_sg *ahci_sg;
+-	unsigned int n_sg = 0;
++	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
++	unsigned int si;
+ 
+ 	VPRINTK("ENTER\n");
+ 
+ 	/*
+ 	 * Next, the S/G list.
+ 	 */
+-	ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		dma_addr_t addr = sg_dma_address(sg);
+ 		u32 sg_len = sg_dma_len(sg);
+ 
+-		ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
+-		ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
+-		ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
 -
--	return ret;
--}
--
--EXPORT_SYMBOL(blk_rq_unmap_user);
+-		ahci_sg++;
+-		n_sg++;
++		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
++		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
++		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
+ 	}
+ 
+-	return n_sg;
++	return si;
+ }
+ 
+ static void ahci_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct ahci_port_priv *pp = ap->private_data;
+-	int is_atapi = is_atapi_taskfile(&qc->tf);
++	int is_atapi = ata_is_atapi(qc->tf.protocol);
+ 	void *cmd_tbl;
+ 	u32 opts;
+ 	const u32 cmd_fis_len = 5; /* five dwords */
+diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
+index 9032998..2053420 100644
+--- a/drivers/ata/ata_generic.c
++++ b/drivers/ata/ata_generic.c
+@@ -26,7 +26,7 @@
+ #include <linux/libata.h>
+ 
+ #define DRV_NAME "ata_generic"
+-#define DRV_VERSION "0.2.13"
++#define DRV_VERSION "0.2.15"
+ 
+ /*
+  *	A generic parallel ATA driver using libata
+@@ -48,27 +48,47 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
+ 	struct ata_port *ap = link->ap;
+ 	int dma_enabled = 0;
+ 	struct ata_device *dev;
++	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ 
+ 	/* Bits 5 and 6 indicate if DMA is active on master/slave */
+ 	if (ap->ioaddr.bmdma_addr)
+ 		dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
++		dma_enabled = 0xFF;
++
+ 	ata_link_for_each_dev(dev, link) {
+-		if (ata_dev_enabled(dev)) {
+-			/* We don't really care */
+-			dev->pio_mode = XFER_PIO_0;
+-			dev->dma_mode = XFER_MW_DMA_0;
+-			/* We do need the right mode information for DMA or PIO
+-			   and this comes from the current configuration flags */
+-			if (dma_enabled & (1 << (5 + dev->devno))) {
+-				ata_id_to_dma_mode(dev, XFER_MW_DMA_0);
+-				dev->flags &= ~ATA_DFLAG_PIO;
+-			} else {
+-				ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+-				dev->xfer_mode = XFER_PIO_0;
+-				dev->xfer_shift = ATA_SHIFT_PIO;
+-				dev->flags |= ATA_DFLAG_PIO;
++		if (!ata_dev_enabled(dev))
++			continue;
++
++		/* We don't really care */
++		dev->pio_mode = XFER_PIO_0;
++		dev->dma_mode = XFER_MW_DMA_0;
++		/* We do need the right mode information for DMA or PIO
++		   and this comes from the current configuration flags */
++		if (dma_enabled & (1 << (5 + dev->devno))) {
++			unsigned int xfer_mask = ata_id_xfermask(dev->id);
++			const char *name;
++
++			if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
++				name = ata_mode_string(xfer_mask);
++			else {
++				/* SWDMA perhaps? */
++				name = "DMA";
++				xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0);
+ 			}
++
++			ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
++				       name);
++
++			dev->xfer_mode = ata_xfer_mask2mode(xfer_mask);
++			dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode);
++			dev->flags &= ~ATA_DFLAG_PIO;
++		} else {
++			ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
++			dev->xfer_mode = XFER_PIO_0;
++			dev->xfer_shift = ATA_SHIFT_PIO;
++			dev->flags |= ATA_DFLAG_PIO;
+ 		}
+ 	}
+ 	return 0;
+@@ -185,6 +205,7 @@ static struct pci_device_id ata_generic[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_HINT,   PCI_DEVICE_ID_HINT_VXPROII_IDE), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA,    PCI_DEVICE_ID_VIA_82C561), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_OPTI,   PCI_DEVICE_ID_OPTI_82C558), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2),  },
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index b406b39..a65c8ae 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -101,39 +101,21 @@ enum {
+ 	ICH5_PMR		= 0x90, /* port mapping register */
+ 	ICH5_PCS		= 0x92,	/* port control and status */
+ 	PIIX_SCC		= 0x0A, /* sub-class code register */
++	PIIX_SIDPR_BAR		= 5,
++	PIIX_SIDPR_LEN		= 16,
++	PIIX_SIDPR_IDX		= 0,
++	PIIX_SIDPR_DATA		= 4,
+ 
+-	PIIX_FLAG_SCR		= (1 << 26), /* SCR available */
+ 	PIIX_FLAG_AHCI		= (1 << 27), /* AHCI possible */
+ 	PIIX_FLAG_CHECKINTR	= (1 << 28), /* make sure PCI INTx enabled */
++	PIIX_FLAG_SIDPR		= (1 << 29), /* SATA idx/data pair regs */
+ 
+ 	PIIX_PATA_FLAGS		= ATA_FLAG_SLAVE_POSS,
+ 	PIIX_SATA_FLAGS		= ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
+ 
+-	/* combined mode.  if set, PATA is channel 0.
+-	 * if clear, PATA is channel 1.
+-	 */
+-	PIIX_PORT_ENABLED	= (1 << 0),
+-	PIIX_PORT_PRESENT	= (1 << 4),
 -
--/**
-- * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
-- * @q:		request queue where request should be inserted
-- * @rq:		request to fill
-- * @kbuf:	the kernel buffer
-- * @len:	length of user data
-- * @gfp_mask:	memory allocation flags
-- */
--int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
--		    unsigned int len, gfp_t gfp_mask)
--{
--	struct bio *bio;
+ 	PIIX_80C_PRI		= (1 << 5) | (1 << 4),
+ 	PIIX_80C_SEC		= (1 << 7) | (1 << 6),
+ 
+-	/* controller IDs */
+-	piix_pata_mwdma		= 0,	/* PIIX3 MWDMA only */
+-	piix_pata_33,			/* PIIX4 at 33Mhz */
+-	ich_pata_33,			/* ICH up to UDMA 33 only */
+-	ich_pata_66,			/* ICH up to 66 Mhz */
+-	ich_pata_100,			/* ICH up to UDMA 100 */
+-	ich5_sata,
+-	ich6_sata,
+-	ich6_sata_ahci,
+-	ich6m_sata_ahci,
+-	ich8_sata_ahci,
+-	ich8_2port_sata,
+-	ich8m_apple_sata_ahci,		/* locks up on second port enable */
+-	tolapai_sata_ahci,
+-	piix_pata_vmw,			/* PIIX4 for VMware, spurious DMA_ERR */
 -
--	if (len > (q->max_hw_sectors << 9))
--		return -EINVAL;
--	if (!len || !kbuf)
--		return -EINVAL;
+ 	/* constants for mapping table */
+ 	P0			= 0,  /* port 0 */
+ 	P1			= 1,  /* port 1 */
+@@ -149,6 +131,24 @@ enum {
+ 	PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
+ };
+ 
++enum piix_controller_ids {
++	/* controller IDs */
++	piix_pata_mwdma,	/* PIIX3 MWDMA only */
++	piix_pata_33,		/* PIIX4 at 33Mhz */
++	ich_pata_33,		/* ICH up to UDMA 33 only */
++	ich_pata_66,		/* ICH up to 66 Mhz */
++	ich_pata_100,		/* ICH up to UDMA 100 */
++	ich5_sata,
++	ich6_sata,
++	ich6_sata_ahci,
++	ich6m_sata_ahci,
++	ich8_sata_ahci,
++	ich8_2port_sata,
++	ich8m_apple_sata_ahci,	/* locks up on second port enable */
++	tolapai_sata_ahci,
++	piix_pata_vmw,			/* PIIX4 for VMware, spurious DMA_ERR */
++};
++
+ struct piix_map_db {
+ 	const u32 mask;
+ 	const u16 port_enable;
+@@ -157,6 +157,7 @@ struct piix_map_db {
+ 
+ struct piix_host_priv {
+ 	const int *map;
++	void __iomem *sidpr;
+ };
+ 
+ static int piix_init_one(struct pci_dev *pdev,
+@@ -167,6 +168,9 @@ static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+ static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+ static int ich_pata_cable_detect(struct ata_port *ap);
+ static u8 piix_vmw_bmdma_status(struct ata_port *ap);
++static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val);
++static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val);
++static void piix_sidpr_error_handler(struct ata_port *ap);
+ #ifdef CONFIG_PM
+ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+ static int piix_pci_device_resume(struct pci_dev *pdev);
+@@ -321,7 +325,6 @@ static const struct ata_port_operations piix_pata_ops = {
+ 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+ 	.cable_detect		= ata_cable_40wire,
+ 
+-	.irq_handler		= ata_interrupt,
+ 	.irq_clear		= ata_bmdma_irq_clear,
+ 	.irq_on			= ata_irq_on,
+ 
+@@ -353,7 +356,6 @@ static const struct ata_port_operations ich_pata_ops = {
+ 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+ 	.cable_detect		= ich_pata_cable_detect,
+ 
+-	.irq_handler		= ata_interrupt,
+ 	.irq_clear		= ata_bmdma_irq_clear,
+ 	.irq_on			= ata_irq_on,
+ 
+@@ -380,7 +382,6 @@ static const struct ata_port_operations piix_sata_ops = {
+ 	.error_handler		= ata_bmdma_error_handler,
+ 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+ 
+-	.irq_handler		= ata_interrupt,
+ 	.irq_clear		= ata_bmdma_irq_clear,
+ 	.irq_on			= ata_irq_on,
+ 
+@@ -419,6 +420,35 @@ static const struct ata_port_operations piix_vmw_ops = {
+ 	.port_start		= ata_port_start,
+ };
+ 
++static const struct ata_port_operations piix_sidpr_sata_ops = {
++	.tf_load		= ata_tf_load,
++	.tf_read		= ata_tf_read,
++	.check_status		= ata_check_status,
++	.exec_command		= ata_exec_command,
++	.dev_select		= ata_std_dev_select,
++
++	.bmdma_setup		= ata_bmdma_setup,
++	.bmdma_start		= ata_bmdma_start,
++	.bmdma_stop		= ata_bmdma_stop,
++	.bmdma_status		= ata_bmdma_status,
++	.qc_prep		= ata_qc_prep,
++	.qc_issue		= ata_qc_issue_prot,
++	.data_xfer		= ata_data_xfer,
++
++	.scr_read		= piix_sidpr_scr_read,
++	.scr_write		= piix_sidpr_scr_write,
++
++	.freeze			= ata_bmdma_freeze,
++	.thaw			= ata_bmdma_thaw,
++	.error_handler		= piix_sidpr_error_handler,
++	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
++
++	.irq_clear		= ata_bmdma_irq_clear,
++	.irq_on			= ata_irq_on,
++
++	.port_start		= ata_port_start,
++};
++
+ static const struct piix_map_db ich5_map_db = {
+ 	.mask = 0x7,
+ 	.port_enable = 0x3,
+@@ -526,7 +556,6 @@ static const struct piix_map_db *piix_map_db_table[] = {
+ static struct ata_port_info piix_port_info[] = {
+ 	[piix_pata_mwdma] = 	/* PIIX3 MWDMA only */
+ 	{
+-		.sht		= &piix_sht,
+ 		.flags		= PIIX_PATA_FLAGS,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+@@ -535,7 +564,6 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[piix_pata_33] =	/* PIIX4 at 33MHz */
+ 	{
+-		.sht		= &piix_sht,
+ 		.flags		= PIIX_PATA_FLAGS,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+@@ -545,7 +573,6 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich_pata_33] = 	/* ICH0 - ICH at 33Mhz*/
+ 	{
+-		.sht		= &piix_sht,
+ 		.flags		= PIIX_PATA_FLAGS,
+ 		.pio_mask 	= 0x1f,	/* pio 0-4 */
+ 		.mwdma_mask	= 0x06, /* Check: maybe 0x07  */
+@@ -555,7 +582,6 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich_pata_66] = 	/* ICH controllers up to 66MHz */
+ 	{
+-		.sht		= &piix_sht,
+ 		.flags		= PIIX_PATA_FLAGS,
+ 		.pio_mask 	= 0x1f,	/* pio 0-4 */
+ 		.mwdma_mask	= 0x06, /* MWDMA0 is broken on chip */
+@@ -565,7 +591,6 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich_pata_100] =
+ 	{
+-		.sht		= &piix_sht,
+ 		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x06, /* mwdma1-2 */
+@@ -575,7 +600,6 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich5_sata] =
+ 	{
+-		.sht		= &piix_sht,
+ 		.flags		= PIIX_SATA_FLAGS,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x07, /* mwdma0-2 */
+@@ -585,8 +609,7 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich6_sata] =
+ 	{
+-		.sht		= &piix_sht,
+-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
++		.flags		= PIIX_SATA_FLAGS,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x07, /* mwdma0-2 */
+ 		.udma_mask	= ATA_UDMA6,
+@@ -595,9 +618,7 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich6_sata_ahci] =
+ 	{
+-		.sht		= &piix_sht,
+-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+-				  PIIX_FLAG_AHCI,
++		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x07, /* mwdma0-2 */
+ 		.udma_mask	= ATA_UDMA6,
+@@ -606,9 +627,7 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich6m_sata_ahci] =
+ 	{
+-		.sht		= &piix_sht,
+-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+-				  PIIX_FLAG_AHCI,
++		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x07, /* mwdma0-2 */
+ 		.udma_mask	= ATA_UDMA6,
+@@ -617,9 +636,8 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich8_sata_ahci] =
+ 	{
+-		.sht		= &piix_sht,
+-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+-				  PIIX_FLAG_AHCI,
++		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
++				  PIIX_FLAG_SIDPR,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x07, /* mwdma0-2 */
+ 		.udma_mask	= ATA_UDMA6,
+@@ -628,9 +646,8 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich8_2port_sata] =
+ 	{
+-		.sht		= &piix_sht,
+-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+-				  PIIX_FLAG_AHCI,
++		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
++				  PIIX_FLAG_SIDPR,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x07, /* mwdma0-2 */
+ 		.udma_mask	= ATA_UDMA6,
+@@ -639,9 +656,7 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[tolapai_sata_ahci] =
+ 	{
+-		.sht		= &piix_sht,
+-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+-				  PIIX_FLAG_AHCI,
++		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x07, /* mwdma0-2 */
+ 		.udma_mask	= ATA_UDMA6,
+@@ -650,9 +665,8 @@ static struct ata_port_info piix_port_info[] = {
+ 
+ 	[ich8m_apple_sata_ahci] =
+ 	{
+-		.sht		= &piix_sht,
+-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+-				  PIIX_FLAG_AHCI,
++		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
++				  PIIX_FLAG_SIDPR,
+ 		.pio_mask	= 0x1f,	/* pio0-4 */
+ 		.mwdma_mask	= 0x07, /* mwdma0-2 */
+ 		.udma_mask	= ATA_UDMA6,
+@@ -1001,6 +1015,180 @@ static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+ 	do_pata_set_dmamode(ap, adev, 1);
+ }
+ 
++/*
++ * Serial ATA Index/Data Pair Superset Registers access
++ *
++ * Beginning from ICH8, there's a sane way to access SCRs using index
++ * and data register pair located at BAR5.  This creates an
++ * interesting problem of mapping two SCRs to one port.
++ *
++ * Although they have separate SCRs, the master and slave aren't
++ * independent enough to be treated as separate links - e.g. softreset
++ * resets both.  Also, there's no protocol defined for hard resetting
++ * singled device sharing the virtual port (no defined way to acquire
++ * device signature).  This is worked around by merging the SCR values
++ * into one sensible value and requesting follow-up SRST after
++ * hardreset.
++ *
++ * SCR merging is perfomed in nibbles which is the unit contents in
++ * SCRs are organized.  If two values are equal, the value is used.
++ * When they differ, merge table which lists precedence of possible
++ * values is consulted and the first match or the last entry when
++ * nothing matches is used.  When there's no merge table for the
++ * specific nibble, value from the first port is used.
++ */
++static const int piix_sidx_map[] = {
++	[SCR_STATUS]	= 0,
++	[SCR_ERROR]	= 2,
++	[SCR_CONTROL]	= 1,
++};
++
++static void piix_sidpr_sel(struct ata_device *dev, unsigned int reg)
++{
++	struct ata_port *ap = dev->link->ap;
++	struct piix_host_priv *hpriv = ap->host->private_data;
++
++	iowrite32(((ap->port_no * 2 + dev->devno) << 8) | piix_sidx_map[reg],
++		  hpriv->sidpr + PIIX_SIDPR_IDX);
++}
++
++static int piix_sidpr_read(struct ata_device *dev, unsigned int reg)
++{
++	struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
++
++	piix_sidpr_sel(dev, reg);
++	return ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
++}
++
++static void piix_sidpr_write(struct ata_device *dev, unsigned int reg, u32 val)
++{
++	struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
++
++	piix_sidpr_sel(dev, reg);
++	iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
++}
++
++u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl)
++{
++	u32 val = 0;
++	int i, mi;
++
++	for (i = 0, mi = 0; i < 32 / 4; i++) {
++		u8 c0 = (val0 >> (i * 4)) & 0xf;
++		u8 c1 = (val1 >> (i * 4)) & 0xf;
++		u8 merged = c0;
++		const int *cur;
++
++		/* if no merge preference, assume the first value */
++		cur = merge_tbl[mi];
++		if (!cur)
++			goto done;
++		mi++;
++
++		/* if two values equal, use it */
++		if (c0 == c1)
++			goto done;
++
++		/* choose the first match or the last from the merge table */
++		while (*cur != -1) {
++			if (c0 == *cur || c1 == *cur)
++				break;
++			cur++;
++		}
++		if (*cur == -1)
++			cur--;
++		merged = *cur;
++	done:
++		val |= merged << (i * 4);
++	}
++
++	return val;
++}
++
++static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val)
++{
++	const int * const sstatus_merge_tbl[] = {
++		/* DET */ (const int []){ 1, 3, 0, 4, 3, -1 },
++		/* SPD */ (const int []){ 2, 1, 0, -1 },
++		/* IPM */ (const int []){ 6, 2, 1, 0, -1 },
++		NULL,
++	};
++	const int * const scontrol_merge_tbl[] = {
++		/* DET */ (const int []){ 1, 0, 4, 0, -1 },
++		/* SPD */ (const int []){ 0, 2, 1, 0, -1 },
++		/* IPM */ (const int []){ 0, 1, 2, 3, 0, -1 },
++		NULL,
++	};
++	u32 v0, v1;
++
++	if (reg >= ARRAY_SIZE(piix_sidx_map))
++		return -EINVAL;
++
++	if (!(ap->flags & ATA_FLAG_SLAVE_POSS)) {
++		*val = piix_sidpr_read(&ap->link.device[0], reg);
++		return 0;
++	}
++
++	v0 = piix_sidpr_read(&ap->link.device[0], reg);
++	v1 = piix_sidpr_read(&ap->link.device[1], reg);
++
++	switch (reg) {
++	case SCR_STATUS:
++		*val = piix_merge_scr(v0, v1, sstatus_merge_tbl);
++		break;
++	case SCR_ERROR:
++		*val = v0 | v1;
++		break;
++	case SCR_CONTROL:
++		*val = piix_merge_scr(v0, v1, scontrol_merge_tbl);
++		break;
++	}
++
++	return 0;
++}
++
++static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val)
++{
++	if (reg >= ARRAY_SIZE(piix_sidx_map))
++		return -EINVAL;
++
++	piix_sidpr_write(&ap->link.device[0], reg, val);
++
++	if (ap->flags & ATA_FLAG_SLAVE_POSS)
++		piix_sidpr_write(&ap->link.device[1], reg, val);
++
++	return 0;
++}
++
++static int piix_sidpr_hardreset(struct ata_link *link, unsigned int *class,
++				unsigned long deadline)
++{
++	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
++	int rc;
++
++	/* do hardreset */
++	rc = sata_link_hardreset(link, timing, deadline);
++	if (rc) {
++		ata_link_printk(link, KERN_ERR,
++				"COMRESET failed (errno=%d)\n", rc);
++		return rc;
++	}
++
++	/* TODO: phy layer with polling, timeouts, etc. */
++	if (ata_link_offline(link)) {
++		*class = ATA_DEV_NONE;
++		return 0;
++	}
++
++	return -EAGAIN;
++}
++
++static void piix_sidpr_error_handler(struct ata_port *ap)
++{
++	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
++			   piix_sidpr_hardreset, ata_std_postreset);
++}
++
+ #ifdef CONFIG_PM
+ static int piix_broken_suspend(void)
+ {
+@@ -1034,6 +1222,13 @@ static int piix_broken_suspend(void)
+ 			},
+ 		},
+ 		{
++			.ident = "TECRA M6",
++			.matches = {
++				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"),
++			},
++		},
++		{
+ 			.ident = "TECRA M7",
+ 			.matches = {
+ 				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+@@ -1048,6 +1243,13 @@ static int piix_broken_suspend(void)
+ 			},
+ 		},
+ 		{
++			.ident = "Satellite R20",
++			.matches = {
++				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"),
++			},
++		},
++		{
+ 			.ident = "Satellite R25",
+ 			.matches = {
+ 				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+@@ -1253,10 +1455,10 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
+ 	return no_piix_dma;
+ }
+ 
+-static void __devinit piix_init_pcs(struct pci_dev *pdev,
+-				    struct ata_port_info *pinfo,
++static void __devinit piix_init_pcs(struct ata_host *host,
+ 				    const struct piix_map_db *map_db)
+ {
++	struct pci_dev *pdev = to_pci_dev(host->dev);
+ 	u16 pcs, new_pcs;
+ 
+ 	pci_read_config_word(pdev, ICH5_PCS, &pcs);
+@@ -1270,11 +1472,10 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
+ 	}
+ }
+ 
+-static void __devinit piix_init_sata_map(struct pci_dev *pdev,
+-					 struct ata_port_info *pinfo,
+-					 const struct piix_map_db *map_db)
++static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
++					       struct ata_port_info *pinfo,
++					       const struct piix_map_db *map_db)
+ {
+-	struct piix_host_priv *hpriv = pinfo[0].private_data;
+ 	const int *map;
+ 	int i, invalid_map = 0;
+ 	u8 map_value;
+@@ -1298,7 +1499,6 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
+ 		case IDE:
+ 			WARN_ON((i & 1) || map[i + 1] != IDE);
+ 			pinfo[i / 2] = piix_port_info[ich_pata_100];
+-			pinfo[i / 2].private_data = hpriv;
+ 			i++;
+ 			printk(" IDE IDE");
+ 			break;
+@@ -1316,7 +1516,33 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
+ 		dev_printk(KERN_ERR, &pdev->dev,
+ 			   "invalid MAP value %u\n", map_value);
+ 
+-	hpriv->map = map;
++	return map;
++}
++
++static void __devinit piix_init_sidpr(struct ata_host *host)
++{
++	struct pci_dev *pdev = to_pci_dev(host->dev);
++	struct piix_host_priv *hpriv = host->private_data;
++	int i;
++
++	/* check for availability */
++	for (i = 0; i < 4; i++)
++		if (hpriv->map[i] == IDE)
++			return;
++
++	if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
++		return;
++
++	if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
++	    pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
++		return;
++
++	if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
++		return;
++
++	hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
++	host->ports[0]->ops = &piix_sidpr_sata_ops;
++	host->ports[1]->ops = &piix_sidpr_sata_ops;
+ }
+ 
+ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
+@@ -1375,8 +1601,10 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	struct device *dev = &pdev->dev;
+ 	struct ata_port_info port_info[2];
+ 	const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
+-	struct piix_host_priv *hpriv;
+ 	unsigned long port_flags;
++	struct ata_host *host;
++	struct piix_host_priv *hpriv;
++	int rc;
+ 
+ 	if (!printed_version++)
+ 		dev_printk(KERN_DEBUG, &pdev->dev,
+@@ -1386,17 +1614,31 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (!in_module_init)
+ 		return -ENODEV;
+ 
++	port_info[0] = piix_port_info[ent->driver_data];
++	port_info[1] = piix_port_info[ent->driver_data];
++
++	port_flags = port_info[0].flags;
++
++	/* enable device and prepare host */
++	rc = pcim_enable_device(pdev);
++	if (rc)
++		return rc;
++
++	/* SATA map init can change port_info, do it before prepping host */
+ 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ 	if (!hpriv)
+ 		return -ENOMEM;
+ 
+-	port_info[0] = piix_port_info[ent->driver_data];
+-	port_info[1] = piix_port_info[ent->driver_data];
+-	port_info[0].private_data = hpriv;
+-	port_info[1].private_data = hpriv;
++	if (port_flags & ATA_FLAG_SATA)
++		hpriv->map = piix_init_sata_map(pdev, port_info,
++					piix_map_db_table[ent->driver_data]);
+ 
+-	port_flags = port_info[0].flags;
++	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
++	if (rc)
++		return rc;
++	host->private_data = hpriv;
+ 
++	/* initialize controller */
+ 	if (port_flags & PIIX_FLAG_AHCI) {
+ 		u8 tmp;
+ 		pci_read_config_byte(pdev, PIIX_SCC, &tmp);
+@@ -1407,12 +1649,9 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		}
+ 	}
+ 
+-	/* Initialize SATA map */
+ 	if (port_flags & ATA_FLAG_SATA) {
+-		piix_init_sata_map(pdev, port_info,
+-				   piix_map_db_table[ent->driver_data]);
+-		piix_init_pcs(pdev, port_info,
+-			      piix_map_db_table[ent->driver_data]);
++		piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
++		piix_init_sidpr(host);
+ 	}
+ 
+ 	/* apply IOCFG bit18 quirk */
+@@ -1431,12 +1670,14 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		/* This writes into the master table but it does not
+ 		   really matter for this errata as we will apply it to
+ 		   all the PIIX devices on the board */
+-		port_info[0].mwdma_mask = 0;
+-		port_info[0].udma_mask = 0;
+-		port_info[1].mwdma_mask = 0;
+-		port_info[1].udma_mask = 0;
++		host->ports[0]->mwdma_mask = 0;
++		host->ports[0]->udma_mask = 0;
++		host->ports[1]->mwdma_mask = 0;
++		host->ports[1]->udma_mask = 0;
+ 	}
+-	return ata_pci_init_one(pdev, ppi);
++
++	pci_set_master(pdev);
++	return ata_pci_activate_sff_host(host, ata_interrupt, &piix_sht);
+ }
+ 
+ static int __init piix_init(void)
+diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
+index 7bf4bef..9e8ec19 100644
+--- a/drivers/ata/libata-acpi.c
++++ b/drivers/ata/libata-acpi.c
+@@ -442,40 +442,77 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
+ }
+ 
+ /**
++ * ata_acpi_gtm_xfermode - determine xfermode from GTM parameter
++ * @dev: target device
++ * @gtm: GTM parameter to use
++ *
++ * Determine xfermask for @dev from @gtm.
++ *
++ * LOCKING:
++ * None.
++ *
++ * RETURNS:
++ * Determined xfermask.
++ */
++unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
++				    const struct ata_acpi_gtm *gtm)
++{
++	unsigned long xfer_mask = 0;
++	unsigned int type;
++	int unit;
++	u8 mode;
++
++	/* we always use the 0 slot for crap hardware */
++	unit = dev->devno;
++	if (!(gtm->flags & 0x10))
++		unit = 0;
++
++	/* PIO */
++	mode = ata_timing_cycle2mode(ATA_SHIFT_PIO, gtm->drive[unit].pio);
++	xfer_mask |= ata_xfer_mode2mask(mode);
++
++	/* See if we have MWDMA or UDMA data. We don't bother with
++	 * MWDMA if UDMA is available as this means the BIOS set UDMA
++	 * and our error changedown if it works is UDMA to PIO anyway.
++	 */
++	if (!(gtm->flags & (1 << (2 * unit))))
++		type = ATA_SHIFT_MWDMA;
++	else
++		type = ATA_SHIFT_UDMA;
++
++	mode = ata_timing_cycle2mode(type, gtm->drive[unit].dma);
++	xfer_mask |= ata_xfer_mode2mask(mode);
++
++	return xfer_mask;
++}
++EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
++
++/**
+  * ata_acpi_cbl_80wire		-	Check for 80 wire cable
+  * @ap: Port to check
++ * @gtm: GTM data to use
+  *
+- * Return 1 if the ACPI mode data for this port indicates the BIOS selected
+- * an 80wire mode.
++ * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
+  */
 -
--	bio = bio_map_kern(q, kbuf, len, gfp_mask);
--	if (IS_ERR(bio))
--		return PTR_ERR(bio);
+-int ata_acpi_cbl_80wire(struct ata_port *ap)
++int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+ {
+-	const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+-	int valid = 0;
++	struct ata_device *dev;
+ 
+-	if (!gtm)
+-		return 0;
++	ata_link_for_each_dev(dev, &ap->link) {
++		unsigned long xfer_mask, udma_mask;
++
++		if (!ata_dev_enabled(dev))
++			continue;
++
++		xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
++		ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
++
++		if (udma_mask & ~ATA_UDMA_MASK_40C)
++			return 1;
++	}
+ 
+-	/* Split timing, DMA enabled */
+-	if ((gtm->flags & 0x11) == 0x11 && gtm->drive[0].dma < 55)
+-		valid |= 1;
+-	if ((gtm->flags & 0x14) == 0x14 && gtm->drive[1].dma < 55)
+-		valid |= 2;
+-	/* Shared timing, DMA enabled */
+-	if ((gtm->flags & 0x11) == 0x01 && gtm->drive[0].dma < 55)
+-		valid |= 1;
+-	if ((gtm->flags & 0x14) == 0x04 && gtm->drive[0].dma < 55)
+-		valid |= 2;
 -
--	if (rq_data_dir(rq) == WRITE)
--		bio->bi_rw |= (1 << BIO_RW);
+-	/* Drive check */
+-	if ((valid & 1) && ata_dev_enabled(&ap->link.device[0]))
+-		return 1;
+-	if ((valid & 2) && ata_dev_enabled(&ap->link.device[1]))
+-		return 1;
+ 	return 0;
+ }
 -
--	blk_rq_bio_prep(q, rq, bio);
--	blk_queue_bounce(q, &rq->bio);
--	rq->buffer = rq->data = NULL;
+ EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+ 
+ static void ata_acpi_gtf_to_tf(struct ata_device *dev,
+@@ -776,6 +813,36 @@ void ata_acpi_on_resume(struct ata_port *ap)
+ }
+ 
+ /**
++ * ata_acpi_set_state - set the port power state
++ * @ap: target ATA port
++ * @state: state, on/off
++ *
++ * This function executes the _PS0/_PS3 ACPI method to set the power state.
++ * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
++ */
++void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
++{
++	struct ata_device *dev;
++
++	if (!ap->acpi_handle || (ap->flags & ATA_FLAG_ACPI_SATA))
++		return;
++
++	/* channel first and then drives for power on and vica versa
++	   for power off */
++	if (state.event == PM_EVENT_ON)
++		acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D0);
++
++	ata_link_for_each_dev(dev, &ap->link) {
++		if (dev->acpi_handle && ata_dev_enabled(dev))
++			acpi_bus_set_power(dev->acpi_handle,
++				state.event == PM_EVENT_ON ?
++					ACPI_STATE_D0 : ACPI_STATE_D3);
++	}
++	if (state.event != PM_EVENT_ON)
++		acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D3);
++}
++
++/**
+  * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration
+  * @dev: target ATA device
+  *
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 6380726..bdbd55a 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -119,6 +119,10 @@ int libata_noacpi = 0;
+ module_param_named(noacpi, libata_noacpi, int, 0444);
+ MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
+ 
++int libata_allow_tpm = 0;
++module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
++MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
++
+ MODULE_AUTHOR("Jeff Garzik");
+ MODULE_DESCRIPTION("Library module for ATA devices");
+ MODULE_LICENSE("GPL");
+@@ -450,9 +454,9 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+  *	RETURNS:
+  *	Packed xfer_mask.
+  */
+-static unsigned int ata_pack_xfermask(unsigned int pio_mask,
+-				      unsigned int mwdma_mask,
+-				      unsigned int udma_mask)
++unsigned long ata_pack_xfermask(unsigned long pio_mask,
++				unsigned long mwdma_mask,
++				unsigned long udma_mask)
+ {
+ 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
+ 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
+@@ -469,10 +473,8 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask,
+  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
+  *	Any NULL distination masks will be ignored.
+  */
+-static void ata_unpack_xfermask(unsigned int xfer_mask,
+-				unsigned int *pio_mask,
+-				unsigned int *mwdma_mask,
+-				unsigned int *udma_mask)
++void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
++			 unsigned long *mwdma_mask, unsigned long *udma_mask)
+ {
+ 	if (pio_mask)
+ 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
+@@ -486,9 +488,9 @@ static const struct ata_xfer_ent {
+ 	int shift, bits;
+ 	u8 base;
+ } ata_xfer_tbl[] = {
+-	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
+-	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
+-	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
++	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
++	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
++	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
+ 	{ -1, },
+ };
+ 
+@@ -503,9 +505,9 @@ static const struct ata_xfer_ent {
+  *	None.
+  *
+  *	RETURNS:
+- *	Matching XFER_* value, 0 if no match found.
++ *	Matching XFER_* value, 0xff if no match found.
+  */
+-static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
++u8 ata_xfer_mask2mode(unsigned long xfer_mask)
+ {
+ 	int highbit = fls(xfer_mask) - 1;
+ 	const struct ata_xfer_ent *ent;
+@@ -513,7 +515,7 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
+ 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+ 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
+ 			return ent->base + highbit - ent->shift;
 -	return 0;
--}
--
--EXPORT_SYMBOL(blk_rq_map_kern);
--
--/**
-- * blk_execute_rq_nowait - insert a request into queue for execution
-- * @q:		queue to insert the request in
-- * @bd_disk:	matching gendisk
-- * @rq:		request to insert
-- * @at_head:    insert request at head or tail of queue
-- * @done:	I/O completion handler
++	return 0xff;
+ }
+ 
+ /**
+@@ -528,13 +530,14 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
+  *	RETURNS:
+  *	Matching xfer_mask, 0 if no match found.
+  */
+-static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
++unsigned long ata_xfer_mode2mask(u8 xfer_mode)
+ {
+ 	const struct ata_xfer_ent *ent;
+ 
+ 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+ 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
+-			return 1 << (ent->shift + xfer_mode - ent->base);
++			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
++				& ~((1 << ent->shift) - 1);
+ 	return 0;
+ }
+ 
+@@ -550,7 +553,7 @@ static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
+  *	RETURNS:
+  *	Matching xfer_shift, -1 if no match found.
+  */
+-static int ata_xfer_mode2shift(unsigned int xfer_mode)
++int ata_xfer_mode2shift(unsigned long xfer_mode)
+ {
+ 	const struct ata_xfer_ent *ent;
+ 
+@@ -574,7 +577,7 @@ static int ata_xfer_mode2shift(unsigned int xfer_mode)
+  *	Constant C string representing highest speed listed in
+  *	@mode_mask, or the constant C string "<n/a>".
+  */
+-static const char *ata_mode_string(unsigned int xfer_mask)
++const char *ata_mode_string(unsigned long xfer_mask)
+ {
+ 	static const char * const xfer_mode_str[] = {
+ 		"PIO0",
+@@ -947,8 +950,8 @@ unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
+ 	if (r_err)
+ 		*r_err = err;
+ 
+-	/* see if device passed diags: if master then continue and warn later */
+-	if (err == 0 && dev->devno == 0)
++	/* see if device passed diags: continue and warn later */
++	if (err == 0)
+ 		/* diagnostic fail : do nothing _YET_ */
+ 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
+ 	else if (err == 1)
+@@ -1286,48 +1289,6 @@ static int ata_hpa_resize(struct ata_device *dev)
+ }
+ 
+ /**
+- *	ata_id_to_dma_mode	-	Identify DMA mode from id block
+- *	@dev: device to identify
+- *	@unknown: mode to assume if we cannot tell
 - *
-- * Description:
-- *    Insert a fully prepared request at the back of the io scheduler queue
-- *    for execution.  Don't wait for completion.
-- */
--void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
--			   struct request *rq, int at_head,
--			   rq_end_io_fn *done)
--{
--	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
--
--	rq->rq_disk = bd_disk;
--	rq->cmd_flags |= REQ_NOMERGE;
--	rq->end_io = done;
--	WARN_ON(irqs_disabled());
--	spin_lock_irq(q->queue_lock);
--	__elv_add_request(q, rq, where, 1);
--	__generic_unplug_device(q);
--	spin_unlock_irq(q->queue_lock);
--}
--EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
--
--/**
-- * blk_execute_rq - insert a request into queue for execution
-- * @q:		queue to insert the request in
-- * @bd_disk:	matching gendisk
-- * @rq:		request to insert
-- * @at_head:    insert request at head or tail of queue
+- *	Set up the timing values for the device based upon the identify
+- *	reported values for the DMA mode. This function is used by drivers
+- *	which rely upon firmware configured modes, but wish to report the
+- *	mode correctly when possible.
 - *
-- * Description:
-- *    Insert a fully prepared request at the back of the io scheduler queue
-- *    for execution and wait for completion.
+- *	In addition we emit similarly formatted messages to the default
+- *	ata_dev_set_mode handler, in order to provide consistency of
+- *	presentation.
 - */
--int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
--		   struct request *rq, int at_head)
--{
--	DECLARE_COMPLETION_ONSTACK(wait);
--	char sense[SCSI_SENSE_BUFFERSIZE];
--	int err = 0;
--
--	/*
--	 * we need an extra reference to the request, so we can look at
--	 * it after io completion
--	 */
--	rq->ref_count++;
--
--	if (!rq->sense) {
--		memset(sense, 0, sizeof(sense));
--		rq->sense = sense;
--		rq->sense_len = 0;
--	}
--
--	rq->end_io_data = &wait;
--	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
--	wait_for_completion(&wait);
--
--	if (rq->errors)
--		err = -EIO;
--
--	return err;
--}
--
--EXPORT_SYMBOL(blk_execute_rq);
--
--static void bio_end_empty_barrier(struct bio *bio, int err)
--{
--	if (err)
--		clear_bit(BIO_UPTODATE, &bio->bi_flags);
--
--	complete(bio->bi_private);
--}
 -
--/**
-- * blkdev_issue_flush - queue a flush
-- * @bdev:	blockdev to issue flush for
-- * @error_sector:	error sector
-- *
-- * Description:
-- *    Issue a flush for the block device in question. Caller can supply
-- *    room for storing the error offset in case of a flush error, if they
-- *    wish to.  Caller must run wait_for_completion() on its own.
-- */
--int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+-void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
 -{
--	DECLARE_COMPLETION_ONSTACK(wait);
--	struct request_queue *q;
--	struct bio *bio;
--	int ret;
--
--	if (bdev->bd_disk == NULL)
--		return -ENXIO;
--
--	q = bdev_get_queue(bdev);
--	if (!q)
--		return -ENXIO;
--
--	bio = bio_alloc(GFP_KERNEL, 0);
--	if (!bio)
--		return -ENOMEM;
--
--	bio->bi_end_io = bio_end_empty_barrier;
--	bio->bi_private = &wait;
--	bio->bi_bdev = bdev;
--	submit_bio(1 << BIO_RW_BARRIER, bio);
--
--	wait_for_completion(&wait);
--
--	/*
--	 * The driver must store the error location in ->bi_sector, if
--	 * it supports it. For non-stacked drivers, this should be copied
--	 * from rq->sector.
--	 */
--	if (error_sector)
--		*error_sector = bio->bi_sector;
--
--	ret = 0;
--	if (!bio_flagged(bio, BIO_UPTODATE))
--		ret = -EIO;
--
--	bio_put(bio);
--	return ret;
--}
--
--EXPORT_SYMBOL(blkdev_issue_flush);
+-	unsigned int mask;
+-	u8 mode;
 -
--static void drive_stat_acct(struct request *rq, int new_io)
--{
--	int rw = rq_data_dir(rq);
+-	/* Pack the DMA modes */
+-	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
+-	if (dev->id[53] & 0x04)
+-		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
 -
--	if (!blk_fs_request(rq) || !rq->rq_disk)
--		return;
+-	/* Select the mode in use */
+-	mode = ata_xfer_mask2mode(mask);
 -
--	if (!new_io) {
--		__disk_stat_inc(rq->rq_disk, merges[rw]);
+-	if (mode != 0) {
+-		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
+-		       ata_mode_string(mask));
 -	} else {
--		disk_round_stats(rq->rq_disk);
--		rq->rq_disk->in_flight++;
+-		/* SWDMA perhaps ? */
+-		mode = unknown;
+-		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
 -	}
--}
--
--/*
-- * add-request adds a request to the linked list.
-- * queue lock is held and interrupts disabled, as we muck with the
-- * request queue list.
-- */
--static inline void add_request(struct request_queue * q, struct request * req)
--{
--	drive_stat_acct(req, 1);
--
--	/*
--	 * elevator indicated where it wants this request to be
--	 * inserted at elevator_merge time
--	 */
--	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
--}
-- 
--/*
-- * disk_round_stats()	- Round off the performance stats on a struct
-- * disk_stats.
-- *
-- * The average IO queue length and utilisation statistics are maintained
-- * by observing the current state of the queue length and the amount of
-- * time it has been in this state for.
-- *
-- * Normally, that accounting is done on IO completion, but that can result
-- * in more than a second's worth of IO being accounted for within any one
-- * second, leading to >100% utilisation.  To deal with that, we call this
-- * function to do a round-off before returning the results when reading
-- * /proc/diskstats.  This accounts immediately for all queue usage up to
-- * the current jiffies and restarts the counters again.
-- */
--void disk_round_stats(struct gendisk *disk)
--{
--	unsigned long now = jiffies;
 -
--	if (now == disk->stamp)
--		return;
--
--	if (disk->in_flight) {
--		__disk_stat_add(disk, time_in_queue,
--				disk->in_flight * (now - disk->stamp));
--		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
--	}
--	disk->stamp = now;
+-	/* Configure the device reporting */
+-	dev->xfer_mode = mode;
+-	dev->xfer_shift = ata_xfer_mode2shift(mode);
 -}
 -
--EXPORT_SYMBOL_GPL(disk_round_stats);
--
--/*
-- * queue lock must be held
-- */
--void __blk_put_request(struct request_queue *q, struct request *req)
--{
--	if (unlikely(!q))
--		return;
--	if (unlikely(--req->ref_count))
--		return;
--
--	elv_completed_request(q, req);
--
--	/*
--	 * Request may not have originated from ll_rw_blk. if not,
--	 * it didn't come out of our reserved rq pools
--	 */
--	if (req->cmd_flags & REQ_ALLOCED) {
--		int rw = rq_data_dir(req);
--		int priv = req->cmd_flags & REQ_ELVPRIV;
--
--		BUG_ON(!list_empty(&req->queuelist));
--		BUG_ON(!hlist_unhashed(&req->hash));
--
--		blk_free_request(q, req);
--		freed_request(q, rw, priv);
+-/**
+  *	ata_noop_dev_select - Select device 0/1 on ATA bus
+  *	@ap: ATA channel to manipulate
+  *	@device: ATA device (numbered from zero) to select
+@@ -1464,9 +1425,9 @@ static inline void ata_dump_id(const u16 *id)
+  *	RETURNS:
+  *	Computed xfermask
+  */
+-static unsigned int ata_id_xfermask(const u16 *id)
++unsigned long ata_id_xfermask(const u16 *id)
+ {
+-	unsigned int pio_mask, mwdma_mask, udma_mask;
++	unsigned long pio_mask, mwdma_mask, udma_mask;
+ 
+ 	/* Usual case. Word 53 indicates word 64 is valid */
+ 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
+@@ -1519,7 +1480,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
+ }
+ 
+ /**
+- *	ata_port_queue_task - Queue port_task
++ *	ata_pio_queue_task - Queue port_task
+  *	@ap: The ata_port to queue port_task for
+  *	@fn: workqueue function to be scheduled
+  *	@data: data for @fn to use
+@@ -1531,16 +1492,15 @@ static unsigned int ata_id_xfermask(const u16 *id)
+  *	one task is active at any given time.
+  *
+  *	libata core layer takes care of synchronization between
+- *	port_task and EH.  ata_port_queue_task() may be ignored for EH
++ *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
+  *	synchronization.
+  *
+  *	LOCKING:
+  *	Inherited from caller.
+  */
+-void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
+-			 unsigned long delay)
++static void ata_pio_queue_task(struct ata_port *ap, void *data,
++			       unsigned long delay)
+ {
+-	PREPARE_DELAYED_WORK(&ap->port_task, fn);
+ 	ap->port_task_data = data;
+ 
+ 	/* may fail if ata_port_flush_task() in progress */
+@@ -2090,7 +2050,7 @@ int ata_dev_configure(struct ata_device *dev)
+ 	struct ata_eh_context *ehc = &dev->link->eh_context;
+ 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+ 	const u16 *id = dev->id;
+-	unsigned int xfer_mask;
++	unsigned long xfer_mask;
+ 	char revbuf[7];		/* XYZ-99\0 */
+ 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
+ 	char modelbuf[ATA_ID_PROD_LEN+1];
+@@ -2161,8 +2121,14 @@ int ata_dev_configure(struct ata_device *dev)
+ 					       "supports DRM functions and may "
+ 					       "not be fully accessable.\n");
+ 			snprintf(revbuf, 7, "CFA");
+-		} else
++		} else {
+ 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
++			/* Warn the user if the device has TPM extensions */
++			if (ata_id_has_tpm(id))
++				ata_dev_printk(dev, KERN_WARNING,
++					       "supports DRM functions and may "
++					       "not be fully accessable.\n");
++		}
+ 
+ 		dev->n_sectors = ata_id_n_sectors(id);
+ 
+@@ -2295,19 +2261,8 @@ int ata_dev_configure(struct ata_device *dev)
+ 			dev->flags |= ATA_DFLAG_DIPM;
+ 	}
+ 
+-	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
+-		/* Let the user know. We don't want to disallow opens for
+-		   rescue purposes, or in case the vendor is just a blithering
+-		   idiot */
+-		if (print_info) {
+-			ata_dev_printk(dev, KERN_WARNING,
+-"Drive reports diagnostics failure. This may indicate a drive\n");
+-			ata_dev_printk(dev, KERN_WARNING,
+-"fault or invalid emulation. Contact drive vendor for information.\n");
+-		}
 -	}
--}
--
--EXPORT_SYMBOL_GPL(__blk_put_request);
--
--void blk_put_request(struct request *req)
--{
--	unsigned long flags;
--	struct request_queue *q = req->q;
 -
--	/*
--	 * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
--	 * following if (q) test.
--	 */
--	if (q) {
--		spin_lock_irqsave(q->queue_lock, flags);
--		__blk_put_request(q, req);
--		spin_unlock_irqrestore(q->queue_lock, flags);
--	}
--}
+-	/* limit bridge transfers to udma5, 200 sectors */
++	/* Limit PATA drive on SATA cable bridge transfers to udma5,
++	   200 sectors */
+ 	if (ata_dev_knobble(dev)) {
+ 		if (ata_msg_drv(ap) && print_info)
+ 			ata_dev_printk(dev, KERN_INFO,
+@@ -2336,6 +2291,21 @@ int ata_dev_configure(struct ata_device *dev)
+ 	if (ap->ops->dev_config)
+ 		ap->ops->dev_config(dev);
+ 
++	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
++		/* Let the user know. We don't want to disallow opens for
++		   rescue purposes, or in case the vendor is just a blithering
++		   idiot. Do this after the dev_config call as some controllers
++		   with buggy firmware may want to avoid reporting false device
++		   bugs */
++
++		if (print_info) {
++			ata_dev_printk(dev, KERN_WARNING,
++"Drive reports diagnostics failure. This may indicate a drive\n");
++			ata_dev_printk(dev, KERN_WARNING,
++"fault or invalid emulation. Contact drive vendor for information.\n");
++		}
++	}
++
+ 	if (ata_msg_probe(ap))
+ 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
+ 			__FUNCTION__, ata_chk_status(ap));
+@@ -2387,6 +2357,18 @@ int ata_cable_unknown(struct ata_port *ap)
+ }
+ 
+ /**
++ *	ata_cable_ignore	-	return ignored PATA cable.
++ *	@ap: port
++ *
++ *	Helper method for drivers which don't use cable type to limit
++ *	transfer mode.
++ */
++int ata_cable_ignore(struct ata_port *ap)
++{
++	return ATA_CBL_PATA_IGN;
++}
++
++/**
+  *	ata_cable_sata	-	return SATA cable type
+  *	@ap: port
+  *
+@@ -2781,38 +2763,33 @@ int sata_set_spd(struct ata_link *link)
+  */
+ 
+ static const struct ata_timing ata_timing[] = {
++/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
++	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
++	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
++	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
++	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
++	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
++	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
++	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
+ 
+-	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
+-	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
+-	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
+-	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
++	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
++	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
++	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
+ 
+-	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
++	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
++	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
++	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
+ 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
+-	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
+-	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
+-	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
++	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
+ 
+ /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
 -
--EXPORT_SYMBOL(blk_put_request);
+-	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
+-	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
+-	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
 -
--/**
-- * blk_end_sync_rq - executes a completion event on a request
-- * @rq: request to complete
-- * @error: end io status of the request
-- */
--void blk_end_sync_rq(struct request *rq, int error)
--{
--	struct completion *waiting = rq->end_io_data;
+-	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
+-	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
+-	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
 -
--	rq->end_io_data = NULL;
--	__blk_put_request(rq->q, rq);
+-	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
+-	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
+-	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
+-	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
 -
--	/*
--	 * complete last, if this is a stack request the process (and thus
--	 * the rq pointer) could be invalid right after this complete()
--	 */
--	complete(waiting);
--}
--EXPORT_SYMBOL(blk_end_sync_rq);
+-	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
+-	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
+-	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
 -
--/*
-- * Has to be called with the request spinlock acquired
+-/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
++	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
++	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
++	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
++	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
++	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
++	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
++	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
+ 
+ 	{ 0xFF }
+ };
+@@ -2845,14 +2822,16 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
+ 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
+ }
+ 
+-static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
++const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
+ {
+-	const struct ata_timing *t;
++	const struct ata_timing *t = ata_timing;
++
++	while (xfer_mode > t->mode)
++		t++;
+ 
+-	for (t = ata_timing; t->mode != speed; t++)
+-		if (t->mode == 0xFF)
+-			return NULL;
+-	return t;
++	if (xfer_mode == t->mode)
++		return t;
++	return NULL;
+ }
+ 
+ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+@@ -2927,6 +2906,57 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+ }
+ 
+ /**
++ *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
++ *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
++ *	@cycle: cycle duration in ns
++ *
++ *	Return matching xfer mode for @cycle.  The returned mode is of
++ *	the transfer type specified by @xfer_shift.  If @cycle is too
++ *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
++ *	than the fastest known mode, the fasted mode is returned.
++ *
++ *	LOCKING:
++ *	None.
++ *
++ *	RETURNS:
++ *	Matching xfer_mode, 0xff if no match found.
++ */
++u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
++{
++	u8 base_mode = 0xff, last_mode = 0xff;
++	const struct ata_xfer_ent *ent;
++	const struct ata_timing *t;
++
++	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
++		if (ent->shift == xfer_shift)
++			base_mode = ent->base;
++
++	for (t = ata_timing_find_mode(base_mode);
++	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
++		unsigned short this_cycle;
++
++		switch (xfer_shift) {
++		case ATA_SHIFT_PIO:
++		case ATA_SHIFT_MWDMA:
++			this_cycle = t->cycle;
++			break;
++		case ATA_SHIFT_UDMA:
++			this_cycle = t->udma;
++			break;
++		default:
++			return 0xff;
++		}
++
++		if (cycle > this_cycle)
++			break;
++
++		last_mode = t->mode;
++	}
++
++	return last_mode;
++}
++
++/**
+  *	ata_down_xfermask_limit - adjust dev xfer masks downward
+  *	@dev: Device to adjust xfer masks
+  *	@sel: ATA_DNXFER_* selector
+@@ -2944,8 +2974,8 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
+ {
+ 	char buf[32];
+-	unsigned int orig_mask, xfer_mask;
+-	unsigned int pio_mask, mwdma_mask, udma_mask;
++	unsigned long orig_mask, xfer_mask;
++	unsigned long pio_mask, mwdma_mask, udma_mask;
+ 	int quiet, highbit;
+ 
+ 	quiet = !!(sel & ATA_DNXFER_QUIET);
+@@ -3039,7 +3069,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
+ 
+ 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
+ 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
+-	if (dev->xfer_shift == ATA_SHIFT_MWDMA && 
++	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
+ 	    dev->dma_mode == XFER_MW_DMA_0 &&
+ 	    (dev->id[63] >> 8) & 1)
+ 		err_mask &= ~AC_ERR_DEV;
+@@ -3089,7 +3119,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+ 
+ 	/* step 1: calculate xfer_mask */
+ 	ata_link_for_each_dev(dev, link) {
+-		unsigned int pio_mask, dma_mask;
++		unsigned long pio_mask, dma_mask;
+ 		unsigned int mode_mask;
+ 
+ 		if (!ata_dev_enabled(dev))
+@@ -3115,7 +3145,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+ 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
+ 
+ 		found = 1;
+-		if (dev->dma_mode)
++		if (dev->dma_mode != 0xff)
+ 			used_dma = 1;
+ 	}
+ 	if (!found)
+@@ -3126,7 +3156,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+ 		if (!ata_dev_enabled(dev))
+ 			continue;
+ 
+-		if (!dev->pio_mode) {
++		if (dev->pio_mode == 0xff) {
+ 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
+ 			rc = -EINVAL;
+ 			goto out;
+@@ -3140,7 +3170,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+ 
+ 	/* step 3: set host DMA timings */
+ 	ata_link_for_each_dev(dev, link) {
+-		if (!ata_dev_enabled(dev) || !dev->dma_mode)
++		if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
+ 			continue;
+ 
+ 		dev->xfer_mode = dev->dma_mode;
+@@ -3173,31 +3203,6 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+ }
+ 
+ /**
+- *	ata_set_mode - Program timings and issue SET FEATURES - XFER
+- *	@link: link on which timings will be programmed
+- *	@r_failed_dev: out paramter for failed device
+- *
+- *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
+- *	ata_set_mode() fails, pointer to the failing device is
+- *	returned in @r_failed_dev.
+- *
+- *	LOCKING:
+- *	PCI/etc. bus probe sem.
+- *
+- *	RETURNS:
+- *	0 on success, negative errno otherwise
 - */
--static int attempt_merge(struct request_queue *q, struct request *req,
--			  struct request *next)
+-int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
 -{
--	if (!rq_mergeable(req) || !rq_mergeable(next))
--		return 0;
--
--	/*
--	 * not contiguous
--	 */
--	if (req->sector + req->nr_sectors != next->sector)
--		return 0;
--
--	if (rq_data_dir(req) != rq_data_dir(next)
--	    || req->rq_disk != next->rq_disk
--	    || next->special)
--		return 0;
--
--	/*
--	 * If we are allowed to merge, then append bio list
--	 * from next to rq and release next. merge_requests_fn
--	 * will have updated segment counts, update sector
--	 * counts here.
--	 */
--	if (!ll_merge_requests_fn(q, req, next))
--		return 0;
--
--	/*
--	 * At this point we have either done a back merge
--	 * or front merge. We need the smaller start_time of
--	 * the merged requests to be the current request
--	 * for accounting purposes.
--	 */
--	if (time_after(req->start_time, next->start_time))
--		req->start_time = next->start_time;
--
--	req->biotail->bi_next = next->bio;
--	req->biotail = next->biotail;
--
--	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
--
--	elv_merge_requests(q, req, next);
--
--	if (req->rq_disk) {
--		disk_round_stats(req->rq_disk);
--		req->rq_disk->in_flight--;
--	}
--
--	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+-	struct ata_port *ap = link->ap;
 -
--	__blk_put_request(q, next);
--	return 1;
+-	/* has private set_mode? */
+-	if (ap->ops->set_mode)
+-		return ap->ops->set_mode(link, r_failed_dev);
+-	return ata_do_set_mode(link, r_failed_dev);
 -}
 -
--static inline int attempt_back_merge(struct request_queue *q,
--				     struct request *rq)
--{
--	struct request *next = elv_latter_request(q, rq);
--
--	if (next)
--		return attempt_merge(q, rq, next);
--
--	return 0;
--}
+-/**
+  *	ata_tf_to_host - issue ATA taskfile to host controller
+  *	@ap: port to which command is being issued
+  *	@tf: ATA taskfile register set
+@@ -4363,7 +4368,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
+ 	tf.feature = SETFEATURES_XFER;
+ 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
+ 	tf.protocol = ATA_PROT_NODATA;
+-	tf.nsect = dev->xfer_mode;
++	/* If we are using IORDY we must send the mode setting command */
++	if (ata_pio_need_iordy(dev))
++		tf.nsect = dev->xfer_mode;
++	/* If the device has IORDY and the controller does not - turn it off */
++ 	else if (ata_id_has_iordy(dev->id))
++		tf.nsect = 0x01;
++	else /* In the ancient relic department - skip all of this */
++		return 0;
+ 
+ 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ 
+@@ -4462,17 +4474,13 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
+ void ata_sg_clean(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+-	struct scatterlist *sg = qc->__sg;
++	struct scatterlist *sg = qc->sg;
+ 	int dir = qc->dma_dir;
+ 	void *pad_buf = NULL;
+ 
+-	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
+ 	WARN_ON(sg == NULL);
+ 
+-	if (qc->flags & ATA_QCFLAG_SINGLE)
+-		WARN_ON(qc->n_elem > 1);
 -
--static inline int attempt_front_merge(struct request_queue *q,
--				      struct request *rq)
--{
--	struct request *prev = elv_former_request(q, rq);
+-	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
++	VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem);
+ 
+ 	/* if we padded the buffer out to 32-bit bound, and data
+ 	 * xfer direction is from-device, we must copy from the
+@@ -4481,31 +4489,20 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
+ 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
+ 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+ 
+-	if (qc->flags & ATA_QCFLAG_SG) {
+-		if (qc->n_elem)
+-			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
+-		/* restore last sg */
+-		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
+-		if (pad_buf) {
+-			struct scatterlist *psg = &qc->pad_sgent;
+-			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
+-			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
+-			kunmap_atomic(addr, KM_IRQ0);
+-		}
+-	} else {
+-		if (qc->n_elem)
+-			dma_unmap_single(ap->dev,
+-				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
+-				dir);
+-		/* restore sg */
+-		sg->length += qc->pad_len;
+-		if (pad_buf)
+-			memcpy(qc->buf_virt + sg->length - qc->pad_len,
+-			       pad_buf, qc->pad_len);
++	if (qc->mapped_n_elem)
++		dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
++	/* restore last sg */
++	if (qc->last_sg)
++		*qc->last_sg = qc->saved_last_sg;
++	if (pad_buf) {
++		struct scatterlist *psg = &qc->extra_sg[1];
++		void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
++		memcpy(addr + psg->offset, pad_buf, qc->pad_len);
++		kunmap_atomic(addr, KM_IRQ0);
+ 	}
+ 
+ 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
+-	qc->__sg = NULL;
++	qc->sg = NULL;
+ }
+ 
+ /**
+@@ -4523,13 +4520,10 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct scatterlist *sg;
+-	unsigned int idx;
++	unsigned int si, pi;
+ 
+-	WARN_ON(qc->__sg == NULL);
+-	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
 -
--	if (prev)
--		return attempt_merge(q, prev, rq);
+-	idx = 0;
+-	ata_for_each_sg(sg, qc) {
++	pi = 0;
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		u32 addr, offset;
+ 		u32 sg_len, len;
+ 
+@@ -4546,18 +4540,17 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
+ 			if ((offset + sg_len) > 0x10000)
+ 				len = 0x10000 - offset;
+ 
+-			ap->prd[idx].addr = cpu_to_le32(addr);
+-			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+-			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
++			ap->prd[pi].addr = cpu_to_le32(addr);
++			ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
++			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+ 
+-			idx++;
++			pi++;
+ 			sg_len -= len;
+ 			addr += len;
+ 		}
+ 	}
+ 
+-	if (idx)
+-		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
++	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+ }
+ 
+ /**
+@@ -4577,13 +4570,10 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct scatterlist *sg;
+-	unsigned int idx;
 -
--	return 0;
--}
+-	WARN_ON(qc->__sg == NULL);
+-	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
++	unsigned int si, pi;
+ 
+-	idx = 0;
+-	ata_for_each_sg(sg, qc) {
++	pi = 0;
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		u32 addr, offset;
+ 		u32 sg_len, len, blen;
+ 
+@@ -4601,25 +4591,24 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
+ 				len = 0x10000 - offset;
+ 
+ 			blen = len & 0xffff;
+-			ap->prd[idx].addr = cpu_to_le32(addr);
++			ap->prd[pi].addr = cpu_to_le32(addr);
+ 			if (blen == 0) {
+ 			   /* Some PATA chipsets like the CS5530 can't
+ 			      cope with 0x0000 meaning 64K as the spec says */
+-				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
++				ap->prd[pi].flags_len = cpu_to_le32(0x8000);
+ 				blen = 0x8000;
+-				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
++				ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+ 			}
+-			ap->prd[idx].flags_len = cpu_to_le32(blen);
+-			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
++			ap->prd[pi].flags_len = cpu_to_le32(blen);
++			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+ 
+-			idx++;
++			pi++;
+ 			sg_len -= len;
+ 			addr += len;
+ 		}
+ 	}
+ 
+-	if (idx)
+-		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
++	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+ }
+ 
+ /**
+@@ -4669,8 +4658,8 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
+  */
+ static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
+ {
+-	if (qc->tf.protocol != ATA_PROT_ATAPI &&
+-	    qc->tf.protocol != ATA_PROT_ATAPI_DMA)
++	if (qc->tf.protocol != ATAPI_PROT_PIO &&
++	    qc->tf.protocol != ATAPI_PROT_DMA)
+ 		return 0;
+ 
+ 	if (qc->tf.flags & ATA_TFLAG_WRITE)
+@@ -4756,33 +4745,6 @@ void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
+ void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
+ 
+ /**
+- *	ata_sg_init_one - Associate command with memory buffer
+- *	@qc: Command to be associated
+- *	@buf: Memory buffer
+- *	@buflen: Length of memory buffer, in bytes.
+- *
+- *	Initialize the data-related elements of queued_cmd @qc
+- *	to point to a single memory buffer, @buf of byte length @buflen.
+- *
+- *	LOCKING:
+- *	spin_lock_irqsave(host lock)
+- */
 -
--static void init_request_from_bio(struct request *req, struct bio *bio)
+-void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
 -{
--	req->cmd_type = REQ_TYPE_FS;
--
--	/*
--	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
--	 */
--	if (bio_rw_ahead(bio) || bio_failfast(bio))
--		req->cmd_flags |= REQ_FAILFAST;
--
--	/*
--	 * REQ_BARRIER implies no merging, but lets make it explicit
--	 */
--	if (unlikely(bio_barrier(bio)))
--		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+-	qc->flags |= ATA_QCFLAG_SINGLE;
 -
--	if (bio_sync(bio))
--		req->cmd_flags |= REQ_RW_SYNC;
--	if (bio_rw_meta(bio))
--		req->cmd_flags |= REQ_RW_META;
+-	qc->__sg = &qc->sgent;
+-	qc->n_elem = 1;
+-	qc->orig_n_elem = 1;
+-	qc->buf_virt = buf;
+-	qc->nbytes = buflen;
+-	qc->cursg = qc->__sg;
 -
--	req->errors = 0;
--	req->hard_sector = req->sector = bio->bi_sector;
--	req->ioprio = bio_prio(bio);
--	req->start_time = jiffies;
--	blk_rq_bio_prep(req->q, req, bio);
+-	sg_init_one(&qc->sgent, buf, buflen);
 -}
 -
--static int __make_request(struct request_queue *q, struct bio *bio)
--{
--	struct request *req;
--	int el_ret, nr_sectors, barrier, err;
--	const unsigned short prio = bio_prio(bio);
--	const int sync = bio_sync(bio);
--	int rw_flags;
--
--	nr_sectors = bio_sectors(bio);
+-/**
+  *	ata_sg_init - Associate command with scatter-gather table.
+  *	@qc: Command to be associated
+  *	@sg: Scatter-gather table.
+@@ -4795,84 +4757,103 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
+  *	LOCKING:
+  *	spin_lock_irqsave(host lock)
+  */
 -
--	/*
--	 * low level driver can indicate that it wants pages above a
--	 * certain limit bounced to low memory (ie for highmem, or even
--	 * ISA dma in theory)
--	 */
--	blk_queue_bounce(q, &bio);
+ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
+ 		 unsigned int n_elem)
+ {
+-	qc->flags |= ATA_QCFLAG_SG;
+-	qc->__sg = sg;
++	qc->sg = sg;
+ 	qc->n_elem = n_elem;
+-	qc->orig_n_elem = n_elem;
+-	qc->cursg = qc->__sg;
++	qc->cursg = qc->sg;
+ }
+ 
+-/**
+- *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
+- *	@qc: Command with memory buffer to be mapped.
+- *
+- *	DMA-map the memory buffer associated with queued_cmd @qc.
+- *
+- *	LOCKING:
+- *	spin_lock_irqsave(host lock)
+- *
+- *	RETURNS:
+- *	Zero on success, negative on error.
+- */
 -
--	barrier = bio_barrier(bio);
--	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
--		err = -EOPNOTSUPP;
--		goto end_io;
+-static int ata_sg_setup_one(struct ata_queued_cmd *qc)
++static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
++				       unsigned int *n_elem_extra,
++				       unsigned int *nbytes_extra)
+ {
+ 	struct ata_port *ap = qc->ap;
+-	int dir = qc->dma_dir;
+-	struct scatterlist *sg = qc->__sg;
+-	dma_addr_t dma_address;
+-	int trim_sg = 0;
++	unsigned int n_elem = qc->n_elem;
++	struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
++
++	*n_elem_extra = 0;
++	*nbytes_extra = 0;
++
++	/* needs padding? */
++	qc->pad_len = qc->nbytes & 3;
++
++	if (likely(!qc->pad_len))
++		return n_elem;
++
++	/* locate last sg and save it */
++	lsg = sg_last(qc->sg, n_elem);
++	qc->last_sg = lsg;
++	qc->saved_last_sg = *lsg;
++
++	sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
+ 
+-	/* we must lengthen transfers to end on a 32-bit boundary */
+-	qc->pad_len = sg->length & 3;
+ 	if (qc->pad_len) {
++		struct scatterlist *psg = &qc->extra_sg[1];
+ 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+-		struct scatterlist *psg = &qc->pad_sgent;
++		unsigned int offset;
+ 
+ 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
+ 
+ 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
+ 
+-		if (qc->tf.flags & ATA_TFLAG_WRITE)
+-			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
+-			       qc->pad_len);
++		/* psg->page/offset are used to copy to-be-written
++		 * data in this function or read data in ata_sg_clean.
++		 */
++		offset = lsg->offset + lsg->length - qc->pad_len;
++		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
++			    qc->pad_len, offset_in_page(offset));
++
++		if (qc->tf.flags & ATA_TFLAG_WRITE) {
++			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
++			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
++			kunmap_atomic(addr, KM_IRQ0);
++		}
+ 
+ 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
+ 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
+-		/* trim sg */
+-		sg->length -= qc->pad_len;
+-		if (sg->length == 0)
+-			trim_sg = 1;
+ 
+-		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
+-			sg->length, qc->pad_len);
 -	}
--
--	spin_lock_irq(q->queue_lock);
--
--	if (unlikely(barrier) || elv_queue_empty(q))
--		goto get_rq;
--
--	el_ret = elv_merge(q, &req, bio);
--	switch (el_ret) {
--		case ELEVATOR_BACK_MERGE:
--			BUG_ON(!rq_mergeable(req));
--
--			if (!ll_back_merge_fn(q, req, bio))
--				break;
--
--			blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
--
--			req->biotail->bi_next = bio;
--			req->biotail = bio;
--			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
--			req->ioprio = ioprio_best(req->ioprio, prio);
--			drive_stat_acct(req, 0);
--			if (!attempt_back_merge(q, req))
--				elv_merged_request(q, req, el_ret);
--			goto out;
--
--		case ELEVATOR_FRONT_MERGE:
--			BUG_ON(!rq_mergeable(req));
--
--			if (!ll_front_merge_fn(q, req, bio))
--				break;
--
--			blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
--
--			bio->bi_next = req->bio;
--			req->bio = bio;
--
--			/*
--			 * may not be valid. if the low level driver said
--			 * it didn't need a bounce buffer then it better
--			 * not touch req->buffer either...
--			 */
--			req->buffer = bio_data(bio);
--			req->current_nr_sectors = bio_cur_sectors(bio);
--			req->hard_cur_sectors = req->current_nr_sectors;
--			req->sector = req->hard_sector = bio->bi_sector;
--			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
--			req->ioprio = ioprio_best(req->ioprio, prio);
--			drive_stat_acct(req, 0);
--			if (!attempt_front_merge(q, req))
--				elv_merged_request(q, req, el_ret);
--			goto out;
--
--		/* ELV_NO_MERGE: elevator says don't/can't merge. */
--		default:
--			;
++		/* Trim the last sg entry and chain the original and
++		 * padding sg lists.
++		 *
++		 * Because chaining consumes one sg entry, one extra
++		 * sg entry is allocated and the last sg entry is
++		 * copied to it if the length isn't zero after padded
++		 * amount is removed.
++		 *
++		 * If the last sg entry is completely replaced by
++		 * padding sg entry, the first sg entry is skipped
++		 * while chaining.
++		 */
++		lsg->length -= qc->pad_len;
++		if (lsg->length) {
++			copy_lsg = &qc->extra_sg[0];
++			tsg = &qc->extra_sg[0];
++		} else {
++			n_elem--;
++			tsg = &qc->extra_sg[1];
++		}
+ 
+-	if (trim_sg) {
+-		qc->n_elem--;
+-		goto skip_map;
 -	}
--
--get_rq:
--	/*
--	 * This sync check and mask will be re-done in init_request_from_bio(),
--	 * but we need to set it earlier to expose the sync flag to the
--	 * rq allocator and io schedulers.
--	 */
--	rw_flags = bio_data_dir(bio);
--	if (sync)
--		rw_flags |= REQ_RW_SYNC;
--
--	/*
--	 * Grab a free request. This is might sleep but can not fail.
--	 * Returns with the queue unlocked.
--	 */
--	req = get_request_wait(q, rw_flags, bio);
--
--	/*
--	 * After dropping the lock and possibly sleeping here, our request
--	 * may now be mergeable after it had proven unmergeable (above).
--	 * We don't worry about that case for efficiency. It won't happen
--	 * often, and the elevators are able to handle it.
--	 */
--	init_request_from_bio(req, bio);
--
--	spin_lock_irq(q->queue_lock);
--	if (elv_queue_empty(q))
--		blk_plug_device(q);
--	add_request(q, req);
--out:
--	if (sync)
--		__generic_unplug_device(q);
--
--	spin_unlock_irq(q->queue_lock);
++		esg = &qc->extra_sg[1];
+ 
+-	dma_address = dma_map_single(ap->dev, qc->buf_virt,
+-				     sg->length, dir);
+-	if (dma_mapping_error(dma_address)) {
+-		/* restore sg */
+-		sg->length += qc->pad_len;
+-		return -1;
++		(*n_elem_extra)++;
++		(*nbytes_extra) += 4 - qc->pad_len;
+ 	}
+ 
+-	sg_dma_address(sg) = dma_address;
+-	sg_dma_len(sg) = sg->length;
++	if (copy_lsg)
++		sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
+ 
+-skip_map:
+-	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
+-		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
++	sg_chain(lsg, 1, tsg);
++	sg_mark_end(esg);
+ 
 -	return 0;
++	/* sglist can't start with chaining sg entry, fast forward */
++	if (qc->sg == lsg) {
++		qc->sg = tsg;
++		qc->cursg = tsg;
++	}
++
++	return n_elem;
+ }
+ 
+ /**
+@@ -4888,75 +4869,30 @@ skip_map:
+  *	Zero on success, negative on error.
+  *
+  */
 -
--end_io:
--	bio_endio(bio, err);
--	return 0;
--}
+ static int ata_sg_setup(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+-	struct scatterlist *sg = qc->__sg;
+-	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
+-	int n_elem, pre_n_elem, dir, trim_sg = 0;
++	unsigned int n_elem, n_elem_extra, nbytes_extra;
+ 
+ 	VPRINTK("ENTER, ata%u\n", ap->print_id);
+-	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
+ 
+-	/* we must lengthen transfers to end on a 32-bit boundary */
+-	qc->pad_len = lsg->length & 3;
+-	if (qc->pad_len) {
+-		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+-		struct scatterlist *psg = &qc->pad_sgent;
+-		unsigned int offset;
 -
--/*
-- * If bio->bi_dev is a partition, remap the location
-- */
--static inline void blk_partition_remap(struct bio *bio)
--{
--	struct block_device *bdev = bio->bi_bdev;
+-		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
++	n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra);
+ 
+-		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
 -
--	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
--		struct hd_struct *p = bdev->bd_part;
--		const int rw = bio_data_dir(bio);
+-		/*
+-		 * psg->page/offset are used to copy to-be-written
+-		 * data in this function or read data in ata_sg_clean.
+-		 */
+-		offset = lsg->offset + lsg->length - qc->pad_len;
+-		sg_init_table(psg, 1);
+-		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
+-				qc->pad_len, offset_in_page(offset));
 -
--		p->sectors[rw] += bio_sectors(bio);
--		p->ios[rw]++;
+-		if (qc->tf.flags & ATA_TFLAG_WRITE) {
+-			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
+-			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
+-			kunmap_atomic(addr, KM_IRQ0);
++	if (n_elem) {
++		n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
++		if (n_elem < 1) {
++			/* restore last sg */
++			if (qc->last_sg)
++				*qc->last_sg = qc->saved_last_sg;
++			return -1;
+ 		}
 -
--		bio->bi_sector += p->start_sect;
--		bio->bi_bdev = bdev->bd_contains;
+-		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
+-		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
+-		/* trim last sg */
+-		lsg->length -= qc->pad_len;
+-		if (lsg->length == 0)
+-			trim_sg = 1;
 -
--		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
--				    bdev->bd_dev, bio->bi_sector,
--				    bio->bi_sector - p->start_sect);
+-		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
+-			qc->n_elem - 1, lsg->length, qc->pad_len);
 -	}
--}
--
--static void handle_bad_sector(struct bio *bio)
--{
--	char b[BDEVNAME_SIZE];
--
--	printk(KERN_INFO "attempt to access beyond end of device\n");
--	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
--			bdevname(bio->bi_bdev, b),
--			bio->bi_rw,
--			(unsigned long long)bio->bi_sector + bio_sectors(bio),
--			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
--
--	set_bit(BIO_EOF, &bio->bi_flags);
--}
--
--#ifdef CONFIG_FAIL_MAKE_REQUEST
--
--static DECLARE_FAULT_ATTR(fail_make_request);
 -
--static int __init setup_fail_make_request(char *str)
--{
--	return setup_fault_attr(&fail_make_request, str);
--}
--__setup("fail_make_request=", setup_fail_make_request);
+-	pre_n_elem = qc->n_elem;
+-	if (trim_sg && pre_n_elem)
+-		pre_n_elem--;
 -
--static int should_fail_request(struct bio *bio)
--{
--	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
--	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
--		return should_fail(&fail_make_request, bio->bi_size);
+-	if (!pre_n_elem) {
+-		n_elem = 0;
+-		goto skip_map;
+-	}
 -
--	return 0;
--}
+-	dir = qc->dma_dir;
+-	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
+-	if (n_elem < 1) {
+-		/* restore last sg */
+-		lsg->length += qc->pad_len;
+-		return -1;
++		DPRINTK("%d sg elements mapped\n", n_elem);
+ 	}
+ 
+-	DPRINTK("%d sg elements mapped\n", n_elem);
 -
--static int __init fail_make_request_debugfs(void)
+-skip_map:
+-	qc->n_elem = n_elem;
++	qc->n_elem = qc->mapped_n_elem = n_elem;
++	qc->n_elem += n_elem_extra;
++	qc->nbytes += nbytes_extra;
++	qc->flags |= ATA_QCFLAG_DMAMAP;
+ 
+ 	return 0;
+ }
+@@ -4985,63 +4921,77 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+ 
+ /**
+  *	ata_data_xfer - Transfer data by PIO
+- *	@adev: device to target
++ *	@dev: device to target
+  *	@buf: data buffer
+  *	@buflen: buffer length
+- *	@write_data: read/write
++ *	@rw: read/write
+  *
+  *	Transfer data from/to the device data register by PIO.
+  *
+  *	LOCKING:
+  *	Inherited from caller.
++ *
++ *	RETURNS:
++ *	Bytes consumed.
+  */
+-void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
+-		   unsigned int buflen, int write_data)
++unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
++			   unsigned int buflen, int rw)
+ {
+-	struct ata_port *ap = adev->link->ap;
++	struct ata_port *ap = dev->link->ap;
++	void __iomem *data_addr = ap->ioaddr.data_addr;
+ 	unsigned int words = buflen >> 1;
+ 
+ 	/* Transfer multiple of 2 bytes */
+-	if (write_data)
+-		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
++	if (rw == READ)
++		ioread16_rep(data_addr, buf, words);
+ 	else
+-		ioread16_rep(ap->ioaddr.data_addr, buf, words);
++		iowrite16_rep(data_addr, buf, words);
+ 
+ 	/* Transfer trailing 1 byte, if any. */
+ 	if (unlikely(buflen & 0x01)) {
+-		u16 align_buf[1] = { 0 };
++		__le16 align_buf[1] = { 0 };
+ 		unsigned char *trailing_buf = buf + buflen - 1;
+ 
+-		if (write_data) {
+-			memcpy(align_buf, trailing_buf, 1);
+-			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
+-		} else {
+-			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
++		if (rw == READ) {
++			align_buf[0] = cpu_to_le16(ioread16(data_addr));
+ 			memcpy(trailing_buf, align_buf, 1);
++		} else {
++			memcpy(align_buf, trailing_buf, 1);
++			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
+ 		}
++		words++;
+ 	}
++
++	return words << 1;
+ }
+ 
+ /**
+  *	ata_data_xfer_noirq - Transfer data by PIO
+- *	@adev: device to target
++ *	@dev: device to target
+  *	@buf: data buffer
+  *	@buflen: buffer length
+- *	@write_data: read/write
++ *	@rw: read/write
+  *
+  *	Transfer data from/to the device data register by PIO. Do the
+  *	transfer with interrupts disabled.
+  *
+  *	LOCKING:
+  *	Inherited from caller.
++ *
++ *	RETURNS:
++ *	Bytes consumed.
+  */
+-void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
+-			 unsigned int buflen, int write_data)
++unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
++				 unsigned int buflen, int rw)
+ {
+ 	unsigned long flags;
++	unsigned int consumed;
++
+ 	local_irq_save(flags);
+-	ata_data_xfer(adev, buf, buflen, write_data);
++	consumed = ata_data_xfer(dev, buf, buflen, rw);
+ 	local_irq_restore(flags);
++
++	return consumed;
+ }
+ 
+ 
+@@ -5152,13 +5102,13 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
+ 	ata_altstatus(ap); /* flush */
+ 
+ 	switch (qc->tf.protocol) {
+-	case ATA_PROT_ATAPI:
++	case ATAPI_PROT_PIO:
+ 		ap->hsm_task_state = HSM_ST;
+ 		break;
+-	case ATA_PROT_ATAPI_NODATA:
++	case ATAPI_PROT_NODATA:
+ 		ap->hsm_task_state = HSM_ST_LAST;
+ 		break;
+-	case ATA_PROT_ATAPI_DMA:
++	case ATAPI_PROT_DMA:
+ 		ap->hsm_task_state = HSM_ST_LAST;
+ 		/* initiate bmdma */
+ 		ap->ops->bmdma_start(qc);
+@@ -5300,12 +5250,15 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
+ 	bytes = (bc_hi << 8) | bc_lo;
+ 
+ 	/* shall be cleared to zero, indicating xfer of data */
+-	if (ireason & (1 << 0))
++	if (unlikely(ireason & (1 << 0)))
+ 		goto err_out;
+ 
+ 	/* make sure transfer direction matches expected */
+ 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+-	if (do_write != i_write)
++	if (unlikely(do_write != i_write))
++		goto err_out;
++
++	if (unlikely(!bytes))
+ 		goto err_out;
+ 
+ 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
+@@ -5341,7 +5294,7 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *q
+ 		    (qc->tf.flags & ATA_TFLAG_WRITE))
+ 		    return 1;
+ 
+-		if (is_atapi_taskfile(&qc->tf) &&
++		if (ata_is_atapi(qc->tf.protocol) &&
+ 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ 			return 1;
+ 	}
+@@ -5506,7 +5459,7 @@ fsm_start:
+ 
+ 	case HSM_ST:
+ 		/* complete command or read/write the data register */
+-		if (qc->tf.protocol == ATA_PROT_ATAPI) {
++		if (qc->tf.protocol == ATAPI_PROT_PIO) {
+ 			/* ATAPI PIO protocol */
+ 			if ((status & ATA_DRQ) == 0) {
+ 				/* No more data to transfer or device error.
+@@ -5664,7 +5617,7 @@ fsm_start:
+ 		msleep(2);
+ 		status = ata_busy_wait(ap, ATA_BUSY, 10);
+ 		if (status & ATA_BUSY) {
+-			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
++			ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
+ 			return;
+ 		}
+ 	}
+@@ -5805,6 +5758,22 @@ static void fill_result_tf(struct ata_queued_cmd *qc)
+ 	ap->ops->tf_read(ap, &qc->result_tf);
+ }
+ 
++static void ata_verify_xfer(struct ata_queued_cmd *qc)
++{
++	struct ata_device *dev = qc->dev;
++
++	if (ata_tag_internal(qc->tag))
++		return;
++
++	if (ata_is_nodata(qc->tf.protocol))
++		return;
++
++	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
++		return;
++
++	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
++}
++
+ /**
+  *	ata_qc_complete - Complete an active ATA command
+  *	@qc: Command to complete
+@@ -5876,6 +5845,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
+ 			break;
+ 		}
+ 
++		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
++			ata_verify_xfer(qc);
++
+ 		__ata_qc_complete(qc);
+ 	} else {
+ 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
+@@ -5938,30 +5910,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
+ 	return nr_done;
+ }
+ 
+-static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
 -{
--	return init_fault_attr_dentries(&fail_make_request,
--					"fail_make_request");
--}
--
--late_initcall(fail_make_request_debugfs);
--
--#else /* CONFIG_FAIL_MAKE_REQUEST */
+-	struct ata_port *ap = qc->ap;
 -
--static inline int should_fail_request(struct bio *bio)
--{
--	return 0;
--}
+-	switch (qc->tf.protocol) {
+-	case ATA_PROT_NCQ:
+-	case ATA_PROT_DMA:
+-	case ATA_PROT_ATAPI_DMA:
+-		return 1;
 -
--#endif /* CONFIG_FAIL_MAKE_REQUEST */
+-	case ATA_PROT_ATAPI:
+-	case ATA_PROT_PIO:
+-		if (ap->flags & ATA_FLAG_PIO_DMA)
+-			return 1;
 -
--/*
-- * Check whether this bio extends beyond the end of the device.
-- */
--static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
--{
--	sector_t maxsector;
+-		/* fall through */
 -
--	if (!nr_sectors)
+-	default:
 -		return 0;
--
--	/* Test device or partition size, when known. */
--	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
--	if (maxsector) {
--		sector_t sector = bio->bi_sector;
--
--		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
--			/*
--			 * This may well happen - the kernel calls bread()
--			 * without checking the size of the device, e.g., when
--			 * mounting a device.
--			 */
--			handle_bad_sector(bio);
--			return 1;
--		}
 -	}
 -
--	return 0;
+-	/* never reached */
 -}
 -
--/**
-- * generic_make_request: hand a buffer to its device driver for I/O
-- * @bio:  The bio describing the location in memory and on the device.
-- *
-- * generic_make_request() is used to make I/O requests of block
-- * devices. It is passed a &struct bio, which describes the I/O that needs
-- * to be done.
-- *
-- * generic_make_request() does not return any status.  The
-- * success/failure status of the request, along with notification of
-- * completion, is delivered asynchronously through the bio->bi_end_io
-- * function described (one day) else where.
-- *
-- * The caller of generic_make_request must make sure that bi_io_vec
-- * are set to describe the memory buffer, and that bi_dev and bi_sector are
-- * set to describe the device address, and the
-- * bi_end_io and optionally bi_private are set to describe how
-- * completion notification should be signaled.
-- *
-- * generic_make_request and the drivers it calls may use bi_next if this
-- * bio happens to be merged with someone else, and may change bi_dev and
-- * bi_sector for remaps as it sees fit.  So the values of these fields
-- * should NOT be depended on after the call to generic_make_request.
-- */
--static inline void __generic_make_request(struct bio *bio)
--{
--	struct request_queue *q;
--	sector_t old_sector;
--	int ret, nr_sectors = bio_sectors(bio);
--	dev_t old_dev;
--	int err = -EIO;
--
--	might_sleep();
--
--	if (bio_check_eod(bio, nr_sectors))
--		goto end_io;
--
--	/*
--	 * Resolve the mapping until finished. (drivers are
--	 * still free to implement/resolve their own stacking
--	 * by explicitly returning 0)
--	 *
--	 * NOTE: we don't repeat the blk_size check for each new device.
--	 * Stacking drivers are expected to know what they are doing.
--	 */
--	old_sector = -1;
--	old_dev = 0;
--	do {
--		char b[BDEVNAME_SIZE];
--
--		q = bdev_get_queue(bio->bi_bdev);
--		if (!q) {
--			printk(KERN_ERR
--			       "generic_make_request: Trying to access "
--				"nonexistent block-device %s (%Lu)\n",
--				bdevname(bio->bi_bdev, b),
--				(long long) bio->bi_sector);
--end_io:
--			bio_endio(bio, err);
--			break;
+ /**
+  *	ata_qc_issue - issue taskfile to device
+  *	@qc: command to issue to device
+@@ -5978,6 +5926,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct ata_link *link = qc->dev->link;
++	u8 prot = qc->tf.protocol;
+ 
+ 	/* Make sure only one non-NCQ command is outstanding.  The
+ 	 * check is skipped for old EH because it reuses active qc to
+@@ -5985,7 +5934,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
+ 	 */
+ 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
+ 
+-	if (qc->tf.protocol == ATA_PROT_NCQ) {
++	if (ata_is_ncq(prot)) {
+ 		WARN_ON(link->sactive & (1 << qc->tag));
+ 
+ 		if (!link->sactive)
+@@ -6001,17 +5950,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
+ 	qc->flags |= ATA_QCFLAG_ACTIVE;
+ 	ap->qc_active |= 1 << qc->tag;
+ 
+-	if (ata_should_dma_map(qc)) {
+-		if (qc->flags & ATA_QCFLAG_SG) {
+-			if (ata_sg_setup(qc))
+-				goto sg_err;
+-		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
+-			if (ata_sg_setup_one(qc))
+-				goto sg_err;
 -		}
+-	} else {
+-		qc->flags &= ~ATA_QCFLAG_DMAMAP;
+-	}
++	/* We guarantee to LLDs that they will have at least one
++	 * non-zero sg if the command is a data command.
++	 */
++	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
++
++	/* ata_sg_setup() may update nbytes */
++	qc->raw_nbytes = qc->nbytes;
++
++	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
++				 (ap->flags & ATA_FLAG_PIO_DMA)))
++		if (ata_sg_setup(qc))
++			goto sg_err;
+ 
+ 	/* if device is sleeping, schedule softreset and abort the link */
+ 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
+@@ -6029,7 +5979,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
+ 	return;
+ 
+ sg_err:
+-	qc->flags &= ~ATA_QCFLAG_DMAMAP;
+ 	qc->err_mask |= AC_ERR_SYSTEM;
+ err:
+ 	ata_qc_complete(qc);
+@@ -6064,11 +6013,11 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+ 		switch (qc->tf.protocol) {
+ 		case ATA_PROT_PIO:
+ 		case ATA_PROT_NODATA:
+-		case ATA_PROT_ATAPI:
+-		case ATA_PROT_ATAPI_NODATA:
++		case ATAPI_PROT_PIO:
++		case ATAPI_PROT_NODATA:
+ 			qc->tf.flags |= ATA_TFLAG_POLLING;
+ 			break;
+-		case ATA_PROT_ATAPI_DMA:
++		case ATAPI_PROT_DMA:
+ 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
+ 				/* see ata_dma_blacklisted() */
+ 				BUG();
+@@ -6091,7 +6040,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+ 		ap->hsm_task_state = HSM_ST_LAST;
+ 
+ 		if (qc->tf.flags & ATA_TFLAG_POLLING)
+-			ata_port_queue_task(ap, ata_pio_task, qc, 0);
++			ata_pio_queue_task(ap, qc, 0);
+ 
+ 		break;
+ 
+@@ -6113,7 +6062,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+ 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ 			/* PIO data out protocol */
+ 			ap->hsm_task_state = HSM_ST_FIRST;
+-			ata_port_queue_task(ap, ata_pio_task, qc, 0);
++			ata_pio_queue_task(ap, qc, 0);
+ 
+ 			/* always send first data block using
+ 			 * the ata_pio_task() codepath.
+@@ -6123,7 +6072,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+ 			ap->hsm_task_state = HSM_ST;
+ 
+ 			if (qc->tf.flags & ATA_TFLAG_POLLING)
+-				ata_port_queue_task(ap, ata_pio_task, qc, 0);
++				ata_pio_queue_task(ap, qc, 0);
+ 
+ 			/* if polling, ata_pio_task() handles the rest.
+ 			 * otherwise, interrupt handler takes over from here.
+@@ -6132,8 +6081,8 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+ 
+ 		break;
+ 
+-	case ATA_PROT_ATAPI:
+-	case ATA_PROT_ATAPI_NODATA:
++	case ATAPI_PROT_PIO:
++	case ATAPI_PROT_NODATA:
+ 		if (qc->tf.flags & ATA_TFLAG_POLLING)
+ 			ata_qc_set_polling(qc);
+ 
+@@ -6144,10 +6093,10 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+ 		/* send cdb by polling if no cdb interrupt */
+ 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
+ 		    (qc->tf.flags & ATA_TFLAG_POLLING))
+-			ata_port_queue_task(ap, ata_pio_task, qc, 0);
++			ata_pio_queue_task(ap, qc, 0);
+ 		break;
+ 
+-	case ATA_PROT_ATAPI_DMA:
++	case ATAPI_PROT_DMA:
+ 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+ 
+ 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
+@@ -6156,7 +6105,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+ 
+ 		/* send cdb by polling if no cdb interrupt */
+ 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+-			ata_port_queue_task(ap, ata_pio_task, qc, 0);
++			ata_pio_queue_task(ap, qc, 0);
+ 		break;
+ 
+ 	default:
+@@ -6200,15 +6149,15 @@ inline unsigned int ata_host_intr(struct ata_port *ap,
+ 		 */
+ 
+ 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+-		 * The flag was turned on only for atapi devices.
+-		 * No need to check is_atapi_taskfile(&qc->tf) again.
++		 * The flag was turned on only for atapi devices.  No
++		 * need to check ata_is_atapi(qc->tf.protocol) again.
+ 		 */
+ 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ 			goto idle_irq;
+ 		break;
+ 	case HSM_ST_LAST:
+ 		if (qc->tf.protocol == ATA_PROT_DMA ||
+-		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
++		    qc->tf.protocol == ATAPI_PROT_DMA) {
+ 			/* check status of DMA engine */
+ 			host_stat = ap->ops->bmdma_status(ap);
+ 			VPRINTK("ata%u: host_stat 0x%X\n",
+@@ -6250,7 +6199,7 @@ inline unsigned int ata_host_intr(struct ata_port *ap,
+ 	ata_hsm_move(ap, qc, status, 0);
+ 
+ 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+-				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
++				       qc->tf.protocol == ATAPI_PROT_DMA))
+ 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+ 
+ 	return 1;	/* irq handled */
+@@ -6772,7 +6721,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
+ 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
+ #endif
+ 
+-	INIT_DELAYED_WORK(&ap->port_task, NULL);
++	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
+ 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+ 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
+ 	INIT_LIST_HEAD(&ap->eh_done_q);
+@@ -7589,7 +7538,6 @@ EXPORT_SYMBOL_GPL(ata_host_register);
+ EXPORT_SYMBOL_GPL(ata_host_activate);
+ EXPORT_SYMBOL_GPL(ata_host_detach);
+ EXPORT_SYMBOL_GPL(ata_sg_init);
+-EXPORT_SYMBOL_GPL(ata_sg_init_one);
+ EXPORT_SYMBOL_GPL(ata_hsm_move);
+ EXPORT_SYMBOL_GPL(ata_qc_complete);
+ EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
+@@ -7601,6 +7549,13 @@ EXPORT_SYMBOL_GPL(ata_std_dev_select);
+ EXPORT_SYMBOL_GPL(sata_print_link_status);
+ EXPORT_SYMBOL_GPL(ata_tf_to_fis);
+ EXPORT_SYMBOL_GPL(ata_tf_from_fis);
++EXPORT_SYMBOL_GPL(ata_pack_xfermask);
++EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
++EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
++EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
++EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
++EXPORT_SYMBOL_GPL(ata_mode_string);
++EXPORT_SYMBOL_GPL(ata_id_xfermask);
+ EXPORT_SYMBOL_GPL(ata_check_status);
+ EXPORT_SYMBOL_GPL(ata_altstatus);
+ EXPORT_SYMBOL_GPL(ata_exec_command);
+@@ -7643,7 +7598,6 @@ EXPORT_SYMBOL_GPL(ata_wait_register);
+ EXPORT_SYMBOL_GPL(ata_busy_sleep);
+ EXPORT_SYMBOL_GPL(ata_wait_after_reset);
+ EXPORT_SYMBOL_GPL(ata_wait_ready);
+-EXPORT_SYMBOL_GPL(ata_port_queue_task);
+ EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
+ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
+ EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+@@ -7662,18 +7616,20 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
+ #endif /* CONFIG_PM */
+ EXPORT_SYMBOL_GPL(ata_id_string);
+ EXPORT_SYMBOL_GPL(ata_id_c_string);
+-EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
+ EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+ 
+ EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
++EXPORT_SYMBOL_GPL(ata_timing_find_mode);
+ EXPORT_SYMBOL_GPL(ata_timing_compute);
+ EXPORT_SYMBOL_GPL(ata_timing_merge);
++EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
+ 
+ #ifdef CONFIG_PCI
+ EXPORT_SYMBOL_GPL(pci_test_config_bits);
+ EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
+ EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
+ EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
++EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
+ EXPORT_SYMBOL_GPL(ata_pci_init_one);
+ EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+ #ifdef CONFIG_PM
+@@ -7715,4 +7671,5 @@ EXPORT_SYMBOL_GPL(ata_dev_try_classify);
+ EXPORT_SYMBOL_GPL(ata_cable_40wire);
+ EXPORT_SYMBOL_GPL(ata_cable_80wire);
+ EXPORT_SYMBOL_GPL(ata_cable_unknown);
++EXPORT_SYMBOL_GPL(ata_cable_ignore);
+ EXPORT_SYMBOL_GPL(ata_cable_sata);
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 21a81cd..4e31071 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -46,9 +46,26 @@
+ #include "libata.h"
+ 
+ enum {
++	/* speed down verdicts */
+ 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
+ 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
+ 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
++	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
++
++	/* error flags */
++	ATA_EFLAG_IS_IO			= (1 << 0),
++	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
++
++	/* error categories */
++	ATA_ECAT_NONE			= 0,
++	ATA_ECAT_ATA_BUS		= 1,
++	ATA_ECAT_TOUT_HSM		= 2,
++	ATA_ECAT_UNK_DEV		= 3,
++	ATA_ECAT_DUBIOUS_NONE		= 4,
++	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
++	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
++	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
++	ATA_ECAT_NR			= 8,
+ };
+ 
+ /* Waiting in ->prereset can never be reliable.  It's sometimes nice
+@@ -213,12 +230,13 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
+ 	if (offset < 0)
+ 		ata_port_desc(ap, "%s %s%llu at 0x%llx", name, type, len, start);
+ 	else
+-		ata_port_desc(ap, "%s 0x%llx", name, start + offset);
++		ata_port_desc(ap, "%s 0x%llx", name,
++				start + (unsigned long long)offset);
+ }
+ 
+ #endif /* CONFIG_PCI */
+ 
+-static void ata_ering_record(struct ata_ering *ering, int is_io,
++static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
+ 			     unsigned int err_mask)
+ {
+ 	struct ata_ering_entry *ent;
+@@ -229,11 +247,20 @@ static void ata_ering_record(struct ata_ering *ering, int is_io,
+ 	ering->cursor %= ATA_ERING_SIZE;
+ 
+ 	ent = &ering->ring[ering->cursor];
+-	ent->is_io = is_io;
++	ent->eflags = eflags;
+ 	ent->err_mask = err_mask;
+ 	ent->timestamp = get_jiffies_64();
+ }
+ 
++static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
++{
++	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
++
++	if (ent->err_mask)
++		return ent;
++	return NULL;
++}
++
+ static void ata_ering_clear(struct ata_ering *ering)
+ {
+ 	memset(ering, 0, sizeof(*ering));
+@@ -445,9 +472,20 @@ void ata_scsi_error(struct Scsi_Host *host)
+ 		spin_lock_irqsave(ap->lock, flags);
+ 
+ 		__ata_port_for_each_link(link, ap) {
++			struct ata_eh_context *ehc = &link->eh_context;
++			struct ata_device *dev;
++
+ 			memset(&link->eh_context, 0, sizeof(link->eh_context));
+ 			link->eh_context.i = link->eh_info;
+ 			memset(&link->eh_info, 0, sizeof(link->eh_info));
++
++			ata_link_for_each_dev(dev, link) {
++				int devno = dev->devno;
++
++				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
++				if (ata_ncq_enabled(dev))
++					ehc->saved_ncq_enabled |= 1 << devno;
++			}
+ 		}
+ 
+ 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
+@@ -1260,10 +1298,10 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
+ 
+ 	/* is it pointless to prefer PIO for "safety reasons"? */
+ 	if (ap->flags & ATA_FLAG_PIO_DMA) {
+-		tf.protocol = ATA_PROT_ATAPI_DMA;
++		tf.protocol = ATAPI_PROT_DMA;
+ 		tf.feature |= ATAPI_PKT_DMA;
+ 	} else {
+-		tf.protocol = ATA_PROT_ATAPI;
++		tf.protocol = ATAPI_PROT_PIO;
+ 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
+ 		tf.lbah = 0;
+ 	}
+@@ -1451,20 +1489,29 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
+ 	return action;
+ }
+ 
+-static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
++static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
++				   int *xfer_ok)
+ {
++	int base = 0;
++
++	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
++		*xfer_ok = 1;
++
++	if (!*xfer_ok)
++		base = ATA_ECAT_DUBIOUS_NONE;
++
+ 	if (err_mask & AC_ERR_ATA_BUS)
+-		return 1;
++		return base + ATA_ECAT_ATA_BUS;
+ 
+ 	if (err_mask & AC_ERR_TIMEOUT)
+-		return 2;
++		return base + ATA_ECAT_TOUT_HSM;
+ 
+-	if (is_io) {
++	if (eflags & ATA_EFLAG_IS_IO) {
+ 		if (err_mask & AC_ERR_HSM)
+-			return 2;
++			return base + ATA_ECAT_TOUT_HSM;
+ 		if ((err_mask &
+ 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
+-			return 3;
++			return base + ATA_ECAT_UNK_DEV;
+ 	}
+ 
+ 	return 0;
+@@ -1472,18 +1519,22 @@ static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
+ 
+ struct speed_down_verdict_arg {
+ 	u64 since;
+-	int nr_errors[4];
++	int xfer_ok;
++	int nr_errors[ATA_ECAT_NR];
+ };
+ 
+ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
+ {
+ 	struct speed_down_verdict_arg *arg = void_arg;
+-	int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask);
++	int cat;
+ 
+ 	if (ent->timestamp < arg->since)
+ 		return -1;
+ 
++	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
++				      &arg->xfer_ok);
+ 	arg->nr_errors[cat]++;
++
+ 	return 0;
+ }
+ 
+@@ -1495,22 +1546,48 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
+  *	whether NCQ needs to be turned off, transfer speed should be
+  *	stepped down, or falling back to PIO is necessary.
+  *
+- *	Cat-1 is ATA_BUS error for any command.
++ *	ECAT_ATA_BUS	: ATA_BUS error for any command
++ *
++ *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
++ *			  IO commands
++ *
++ *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
++ *
++ *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
++ *			  data transfer hasn't been verified.
++ *
++ *	Verdicts are
++ *
++ *	NCQ_OFF		: Turn off NCQ.
++ *
++ *	SPEED_DOWN	: Speed down transfer speed but don't fall back
++ *			  to PIO.
++ *
++ *	FALLBACK_TO_PIO	: Fall back to PIO.
++ *
++ *	Even if multiple verdicts are returned, only one action is
++ *	taken per error.  An action triggered by non-DUBIOUS errors
++ *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
++ *	This is to expedite speed down decisions right after device is
++ *	initially configured.
+  *
+- *	Cat-2 is TIMEOUT for any command or HSM violation for known
+- *	supported commands.
++ *	The followings are speed down rules.  #1 and #2 deal with
++ *	DUBIOUS errors.
+  *
+- *	Cat-3 is is unclassified DEV error for known supported
+- *	command.
++ *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
++ *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
+  *
+- *	NCQ needs to be turned off if there have been more than 3
+- *	Cat-2 + Cat-3 errors during last 10 minutes.
++ *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
++ *	   occurred during last 5 mins, NCQ_OFF.
+  *
+- *	Speed down is necessary if there have been more than 3 Cat-1 +
+- *	Cat-2 errors or 10 Cat-3 errors during last 10 minutes.
++ *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
++ *	   ocurred during last 5 mins, FALLBACK_TO_PIO
+  *
+- *	Falling back to PIO mode is necessary if there have been more
+- *	than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes.
++ *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
++ *	   during last 10 mins, NCQ_OFF.
++ *
++ *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
++ *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
+  *
+  *	LOCKING:
+  *	Inherited from caller.
+@@ -1525,23 +1602,38 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
+ 	struct speed_down_verdict_arg arg;
+ 	unsigned int verdict = 0;
+ 
+-	/* scan past 10 mins of error history */
++	/* scan past 5 mins of error history */
+ 	memset(&arg, 0, sizeof(arg));
+-	arg.since = j64 - min(j64, j10mins);
++	arg.since = j64 - min(j64, j5mins);
+ 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
+ 
+-	if (arg.nr_errors[2] + arg.nr_errors[3] > 3)
+-		verdict |= ATA_EH_SPDN_NCQ_OFF;
+-	if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10)
+-		verdict |= ATA_EH_SPDN_SPEED_DOWN;
++	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
++	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
++		verdict |= ATA_EH_SPDN_SPEED_DOWN |
++			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
+ 
+-	/* scan past 3 mins of error history */
++	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
++	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
++		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
++
++	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
++	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
++	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
++		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
++
++	/* scan past 10 mins of error history */
+ 	memset(&arg, 0, sizeof(arg));
+-	arg.since = j64 - min(j64, j5mins);
++	arg.since = j64 - min(j64, j10mins);
+ 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
+ 
+-	if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10)
+-		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
++	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
++	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
++		verdict |= ATA_EH_SPDN_NCQ_OFF;
++
++	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
++	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
++	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
++		verdict |= ATA_EH_SPDN_SPEED_DOWN;
+ 
+ 	return verdict;
+ }
+@@ -1549,7 +1641,7 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
+ /**
+  *	ata_eh_speed_down - record error and speed down if necessary
+  *	@dev: Failed device
+- *	@is_io: Did the device fail during normal IO?
++ *	@eflags: mask of ATA_EFLAG_* flags
+  *	@err_mask: err_mask of the error
+  *
+  *	Record error and examine error history to determine whether
+@@ -1563,18 +1655,20 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
+  *	RETURNS:
+  *	Determined recovery action.
+  */
+-static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
+-				      unsigned int err_mask)
++static unsigned int ata_eh_speed_down(struct ata_device *dev,
++				unsigned int eflags, unsigned int err_mask)
+ {
++	struct ata_link *link = dev->link;
++	int xfer_ok = 0;
+ 	unsigned int verdict;
+ 	unsigned int action = 0;
+ 
+ 	/* don't bother if Cat-0 error */
+-	if (ata_eh_categorize_error(is_io, err_mask) == 0)
++	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
+ 		return 0;
+ 
+ 	/* record error and determine whether speed down is necessary */
+-	ata_ering_record(&dev->ering, is_io, err_mask);
++	ata_ering_record(&dev->ering, eflags, err_mask);
+ 	verdict = ata_eh_speed_down_verdict(dev);
+ 
+ 	/* turn off NCQ? */
+@@ -1590,7 +1684,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
+ 	/* speed down? */
+ 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
+ 		/* speed down SATA link speed if possible */
+-		if (sata_down_spd_limit(dev->link) == 0) {
++		if (sata_down_spd_limit(link) == 0) {
+ 			action |= ATA_EH_HARDRESET;
+ 			goto done;
+ 		}
+@@ -1618,10 +1712,10 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
+ 	}
+ 
+ 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
+-	 * SATA.  Consider it only for PATA.
++	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
+ 	 */
+ 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
+-	    (dev->link->ap->cbl != ATA_CBL_SATA) &&
++	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
+ 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
+ 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
+ 			dev->spdn_cnt = 0;
+@@ -1633,7 +1727,8 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
+ 	return 0;
+  done:
+ 	/* device has been slowed down, blow error history */
+-	ata_ering_clear(&dev->ering);
++	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
++		ata_ering_clear(&dev->ering);
+ 	return action;
+ }
+ 
+@@ -1653,8 +1748,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
+ 	struct ata_port *ap = link->ap;
+ 	struct ata_eh_context *ehc = &link->eh_context;
+ 	struct ata_device *dev;
+-	unsigned int all_err_mask = 0;
+-	int tag, is_io = 0;
++	unsigned int all_err_mask = 0, eflags = 0;
++	int tag;
+ 	u32 serror;
+ 	int rc;
+ 
+@@ -1713,15 +1808,15 @@ static void ata_eh_link_autopsy(struct ata_link *link)
+ 		ehc->i.dev = qc->dev;
+ 		all_err_mask |= qc->err_mask;
+ 		if (qc->flags & ATA_QCFLAG_IO)
+-			is_io = 1;
++			eflags |= ATA_EFLAG_IS_IO;
+ 	}
+ 
+ 	/* enforce default EH actions */
+ 	if (ap->pflags & ATA_PFLAG_FROZEN ||
+ 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
+ 		ehc->i.action |= ATA_EH_SOFTRESET;
+-	else if ((is_io && all_err_mask) ||
+-		 (!is_io && (all_err_mask & ~AC_ERR_DEV)))
++	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
++		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
+ 		ehc->i.action |= ATA_EH_REVALIDATE;
+ 
+ 	/* If we have offending qcs and the associated failed device,
+@@ -1743,8 +1838,11 @@ static void ata_eh_link_autopsy(struct ata_link *link)
+ 		      ata_dev_enabled(link->device))))
+ 	    dev = link->device;
+ 
+-	if (dev)
+-		ehc->i.action |= ata_eh_speed_down(dev, is_io, all_err_mask);
++	if (dev) {
++		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
++			eflags |= ATA_EFLAG_DUBIOUS_XFER;
++		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
++	}
+ 
+ 	DPRINTK("EXIT\n");
+ }
+@@ -1880,8 +1978,8 @@ static void ata_eh_link_report(struct ata_link *link)
+ 				[ATA_PROT_PIO]		= "pio",
+ 				[ATA_PROT_DMA]		= "dma",
+ 				[ATA_PROT_NCQ]		= "ncq",
+-				[ATA_PROT_ATAPI]	= "pio",
+-				[ATA_PROT_ATAPI_DMA]	= "dma",
++				[ATAPI_PROT_PIO]	= "pio",
++				[ATAPI_PROT_DMA]	= "dma",
+ 			};
+ 
+ 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
+@@ -1889,7 +1987,7 @@ static void ata_eh_link_report(struct ata_link *link)
+ 				 dma_str[qc->dma_dir]);
+ 		}
+ 
+-		if (is_atapi_taskfile(&qc->tf))
++		if (ata_is_atapi(qc->tf.protocol))
+ 			snprintf(cdb_buf, sizeof(cdb_buf),
+ 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
+ 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
+@@ -2329,6 +2427,58 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
+ 	return rc;
+ }
+ 
++/**
++ *	ata_set_mode - Program timings and issue SET FEATURES - XFER
++ *	@link: link on which timings will be programmed
++ *	@r_failed_dev: out paramter for failed device
++ *
++ *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
++ *	ata_set_mode() fails, pointer to the failing device is
++ *	returned in @r_failed_dev.
++ *
++ *	LOCKING:
++ *	PCI/etc. bus probe sem.
++ *
++ *	RETURNS:
++ *	0 on success, negative errno otherwise
++ */
++int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
++{
++	struct ata_port *ap = link->ap;
++	struct ata_device *dev;
++	int rc;
++
++	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
++	ata_link_for_each_dev(dev, link) {
++		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
++			struct ata_ering_entry *ent;
++
++			ent = ata_ering_top(&dev->ering);
++			if (ent)
++				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
++		}
++	}
++
++	/* has private set_mode? */
++	if (ap->ops->set_mode)
++		rc = ap->ops->set_mode(link, r_failed_dev);
++	else
++		rc = ata_do_set_mode(link, r_failed_dev);
++
++	/* if transfer mode has changed, set DUBIOUS_XFER on device */
++	ata_link_for_each_dev(dev, link) {
++		struct ata_eh_context *ehc = &link->eh_context;
++		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
++		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
++
++		if (dev->xfer_mode != saved_xfer_mode ||
++		    ata_ncq_enabled(dev) != saved_ncq)
++			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
++	}
++
++	return rc;
++}
++
+ static int ata_link_nr_enabled(struct ata_link *link)
+ {
+ 	struct ata_device *dev;
+@@ -2375,6 +2525,24 @@ static int ata_eh_skip_recovery(struct ata_link *link)
+ 	return 1;
+ }
+ 
++static int ata_eh_schedule_probe(struct ata_device *dev)
++{
++	struct ata_eh_context *ehc = &dev->link->eh_context;
++
++	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
++	    (ehc->did_probe_mask & (1 << dev->devno)))
++		return 0;
++
++	ata_eh_detach_dev(dev);
++	ata_dev_init(dev);
++	ehc->did_probe_mask |= (1 << dev->devno);
++	ehc->i.action |= ATA_EH_SOFTRESET;
++	ehc->saved_xfer_mode[dev->devno] = 0;
++	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
++
++	return 1;
++}
++
+ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
+ {
+ 	struct ata_eh_context *ehc = &dev->link->eh_context;
+@@ -2406,16 +2574,9 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
+ 		if (ata_link_offline(dev->link))
+ 			ata_eh_detach_dev(dev);
+ 
+-		/* probe if requested */
+-		if ((ehc->i.probe_mask & (1 << dev->devno)) &&
+-		    !(ehc->did_probe_mask & (1 << dev->devno))) {
+-			ata_eh_detach_dev(dev);
+-			ata_dev_init(dev);
 -
--		if (unlikely(nr_sectors > q->max_hw_sectors)) {
--			printk("bio too big device %s (%u > %u)\n", 
--				bdevname(bio->bi_bdev, b),
--				bio_sectors(bio),
--				q->max_hw_sectors);
--			goto end_io;
++		/* schedule probe if necessary */
++		if (ata_eh_schedule_probe(dev))
+ 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+-			ehc->did_probe_mask |= (1 << dev->devno);
+-			ehc->i.action |= ATA_EH_SOFTRESET;
 -		}
+ 
+ 		return 1;
+ 	} else {
+@@ -2492,14 +2653,9 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+ 			if (dev->flags & ATA_DFLAG_DETACH)
+ 				ata_eh_detach_dev(dev);
+ 
+-			if (!ata_dev_enabled(dev) &&
+-			    ((ehc->i.probe_mask & (1 << dev->devno)) &&
+-			     !(ehc->did_probe_mask & (1 << dev->devno)))) {
+-				ata_eh_detach_dev(dev);
+-				ata_dev_init(dev);
+-				ehc->did_probe_mask |= (1 << dev->devno);
+-				ehc->i.action |= ATA_EH_SOFTRESET;
+-			}
++			/* schedule probe if necessary */
++			if (!ata_dev_enabled(dev))
++				ata_eh_schedule_probe(dev);
+ 		}
+ 	}
+ 
+@@ -2747,6 +2903,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
+ 	if (ap->ops->port_suspend)
+ 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
+ 
++	ata_acpi_set_state(ap, PMSG_SUSPEND);
+  out:
+ 	/* report result */
+ 	spin_lock_irqsave(ap->lock, flags);
+@@ -2792,6 +2949,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
+ 
+ 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
+ 
++	ata_acpi_set_state(ap, PMSG_ON);
++
+ 	if (ap->ops->port_resume)
+ 		rc = ap->ops->port_resume(ap);
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 14daf48..c02c490 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -517,7 +517,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
+ 		qc->scsicmd = cmd;
+ 		qc->scsidone = done;
+ 
+-		qc->__sg = scsi_sglist(cmd);
++		qc->sg = scsi_sglist(cmd);
+ 		qc->n_elem = scsi_sg_count(cmd);
+ 	} else {
+ 		cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
+@@ -839,7 +839,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
+ 	if (dev->class == ATA_DEV_ATAPI) {
+ 		struct request_queue *q = sdev->request_queue;
+ 		blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
+-	}
++
++		/* set the min alignment */
++		blk_queue_update_dma_alignment(sdev->request_queue,
++					       ATA_DMA_PAD_SZ - 1);
++	} else
++		/* ATA devices must be sector aligned */
++		blk_queue_update_dma_alignment(sdev->request_queue,
++					       ATA_SECT_SIZE - 1);
+ 
+ 	if (dev->class == ATA_DEV_ATA)
+ 		sdev->manage_start_stop = 1;
+@@ -878,7 +885,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
+ 	if (dev)
+ 		ata_scsi_dev_config(sdev, dev);
+ 
+-	return 0;	/* scsi layer doesn't check return value, sigh */
++	return 0;
+ }
+ 
+ /**
+@@ -2210,7 +2217,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
+ 
+ 		/* sector size */
+ 		ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8);
+-		ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE);
++		ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE & 0xff);
+ 	} else {
+ 		/* sector count, 64-bit */
+ 		ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7));
+@@ -2224,7 +2231,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
+ 
+ 		/* sector size */
+ 		ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8);
+-		ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE);
++		ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE & 0xff);
+ 	}
+ 
+ 	return 0;
+@@ -2331,7 +2338,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
+ 	DPRINTK("ATAPI request sense\n");
+ 
+ 	/* FIXME: is this needed? */
+-	memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
++	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ 
+ 	ap->ops->tf_read(ap, &qc->tf);
+ 
+@@ -2341,7 +2348,9 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
+ 
+ 	ata_qc_reinit(qc);
+ 
+-	ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
++	/* setup sg table and init transfer direction */
++	sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
++	ata_sg_init(qc, &qc->sgent, 1);
+ 	qc->dma_dir = DMA_FROM_DEVICE;
+ 
+ 	memset(&qc->cdb, 0, qc->dev->cdb_len);
+@@ -2352,10 +2361,10 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
+ 	qc->tf.command = ATA_CMD_PACKET;
+ 
+ 	if (ata_pio_use_silly(ap)) {
+-		qc->tf.protocol = ATA_PROT_ATAPI_DMA;
++		qc->tf.protocol = ATAPI_PROT_DMA;
+ 		qc->tf.feature |= ATAPI_PKT_DMA;
+ 	} else {
+-		qc->tf.protocol = ATA_PROT_ATAPI;
++		qc->tf.protocol = ATAPI_PROT_PIO;
+ 		qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
+ 		qc->tf.lbah = 0;
+ 	}
+@@ -2526,12 +2535,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
+ 	if (using_pio || nodata) {
+ 		/* no data, or PIO data xfer */
+ 		if (nodata)
+-			qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
++			qc->tf.protocol = ATAPI_PROT_NODATA;
+ 		else
+-			qc->tf.protocol = ATA_PROT_ATAPI;
++			qc->tf.protocol = ATAPI_PROT_PIO;
+ 	} else {
+ 		/* DMA data xfer */
+-		qc->tf.protocol = ATA_PROT_ATAPI_DMA;
++		qc->tf.protocol = ATAPI_PROT_DMA;
+ 		qc->tf.feature |= ATAPI_PKT_DMA;
+ 
+ 		if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE))
+@@ -2690,6 +2699,24 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
+ 	if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
+ 		goto invalid_fld;
+ 
++	/*
++	 * Filter TPM commands by default. These provide an
++	 * essentially uncontrolled encrypted "back door" between
++	 * applications and the disk. Set libata.allow_tpm=1 if you
++	 * have a real reason for wanting to use them. This ensures
++	 * that installed software cannot easily mess stuff up without
++	 * user intent. DVR type users will probably ship with this enabled
++	 * for movie content management.
++	 *
++	 * Note that for ATA8 we can issue a DCS change and DCS freeze lock
++	 * for this and should do in future but that it is not sufficient as
++	 * DCS is an optional feature set. Thus we also do the software filter
++	 * so that we comply with the TC consortium stated goal that the user
++	 * can turn off TC features of their system.
++	 */
++	if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
++		goto invalid_fld;
++
+ 	/* We may not issue DMA commands if no DMA mode is set */
+ 	if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
+ 		goto invalid_fld;
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index b7ac80b..60cd4b1 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -147,7 +147,9 @@ void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
+  *	@tf: ATA taskfile register set for storing input
+  *
+  *	Reads ATA taskfile registers for currently-selected device
+- *	into @tf.
++ *	into @tf. Assumes the device has a fully SFF compliant task file
++ *	layout and behaviour. If you device does not (eg has a different
++ *	status method) then you will need to provide a replacement tf_read
+  *
+  *	LOCKING:
+  *	Inherited from caller.
+@@ -156,7 +158,7 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+ {
+ 	struct ata_ioports *ioaddr = &ap->ioaddr;
+ 
+-	tf->command = ata_chk_status(ap);
++	tf->command = ata_check_status(ap);
+ 	tf->feature = ioread8(ioaddr->error_addr);
+ 	tf->nsect = ioread8(ioaddr->nsect_addr);
+ 	tf->lbal = ioread8(ioaddr->lbal_addr);
+@@ -415,7 +417,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
+ 	ap->hsm_task_state = HSM_ST_IDLE;
+ 
+ 	if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
+-		   qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
++		   qc->tf.protocol == ATAPI_PROT_DMA)) {
+ 		u8 host_stat;
+ 
+ 		host_stat = ap->ops->bmdma_status(ap);
+@@ -549,7 +551,7 @@ int ata_pci_init_bmdma(struct ata_host *host)
+ 		return rc;
+ 
+ 	/* request and iomap DMA region */
+-	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
++	rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
+ 	if (rc) {
+ 		dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
+ 		return -ENOMEM;
+@@ -619,7 +621,8 @@ int ata_pci_init_sff_host(struct ata_host *host)
+ 			continue;
+ 		}
+ 
+-		rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME);
++		rc = pcim_iomap_regions(pdev, 0x3 << base,
++					dev_driver_string(gdev));
+ 		if (rc) {
+ 			dev_printk(KERN_WARNING, gdev,
+ 				   "failed to request/iomap BARs for port %d "
+@@ -711,6 +714,99 @@ int ata_pci_prepare_sff_host(struct pci_dev *pdev,
+ }
+ 
+ /**
++ *	ata_pci_activate_sff_host - start SFF host, request IRQ and register it
++ *	@host: target SFF ATA host
++ *	@irq_handler: irq_handler used when requesting IRQ(s)
++ *	@sht: scsi_host_template to use when registering the host
++ *
++ *	This is the counterpart of ata_host_activate() for SFF ATA
++ *	hosts.  This separate helper is necessary because SFF hosts
++ *	use two separate interrupts in legacy mode.
++ *
++ *	LOCKING:
++ *	Inherited from calling layer (may sleep).
++ *
++ *	RETURNS:
++ *	0 on success, -errno otherwise.
++ */
++int ata_pci_activate_sff_host(struct ata_host *host,
++			      irq_handler_t irq_handler,
++			      struct scsi_host_template *sht)
++{
++	struct device *dev = host->dev;
++	struct pci_dev *pdev = to_pci_dev(dev);
++	const char *drv_name = dev_driver_string(host->dev);
++	int legacy_mode = 0, rc;
++
++	rc = ata_host_start(host);
++	if (rc)
++		return rc;
++
++	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
++		u8 tmp8, mask;
++
++		/* TODO: What if one channel is in native mode ... */
++		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
++		mask = (1 << 2) | (1 << 0);
++		if ((tmp8 & mask) != mask)
++			legacy_mode = 1;
++#if defined(CONFIG_NO_ATA_LEGACY)
++		/* Some platforms with PCI limits cannot address compat
++		   port space. In that case we punt if their firmware has
++		   left a device in compatibility mode */
++		if (legacy_mode) {
++			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
++			return -EOPNOTSUPP;
++		}
++#endif
++	}
++
++	if (!devres_open_group(dev, NULL, GFP_KERNEL))
++		return -ENOMEM;
++
++	if (!legacy_mode && pdev->irq) {
++		rc = devm_request_irq(dev, pdev->irq, irq_handler,
++				      IRQF_SHARED, drv_name, host);
++		if (rc)
++			goto out;
++
++		ata_port_desc(host->ports[0], "irq %d", pdev->irq);
++		ata_port_desc(host->ports[1], "irq %d", pdev->irq);
++	} else if (legacy_mode) {
++		if (!ata_port_is_dummy(host->ports[0])) {
++			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
++					      irq_handler, IRQF_SHARED,
++					      drv_name, host);
++			if (rc)
++				goto out;
++
++			ata_port_desc(host->ports[0], "irq %d",
++				      ATA_PRIMARY_IRQ(pdev));
++		}
++
++		if (!ata_port_is_dummy(host->ports[1])) {
++			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
++					      irq_handler, IRQF_SHARED,
++					      drv_name, host);
++			if (rc)
++				goto out;
++
++			ata_port_desc(host->ports[1], "irq %d",
++				      ATA_SECONDARY_IRQ(pdev));
++		}
++	}
++
++	rc = ata_host_register(host, sht);
++ out:
++	if (rc == 0)
++		devres_remove_group(dev, NULL);
++	else
++		devres_release_group(dev, NULL);
++
++	return rc;
++}
++
++/**
+  *	ata_pci_init_one - Initialize/register PCI IDE host controller
+  *	@pdev: Controller to be initialized
+  *	@ppi: array of port_info, must be enough for two ports
+@@ -739,8 +835,6 @@ int ata_pci_init_one(struct pci_dev *pdev,
+ 	struct device *dev = &pdev->dev;
+ 	const struct ata_port_info *pi = NULL;
+ 	struct ata_host *host = NULL;
+-	u8 mask;
+-	int legacy_mode = 0;
+ 	int i, rc;
+ 
+ 	DPRINTK("ENTER\n");
+@@ -762,95 +856,24 @@ int ata_pci_init_one(struct pci_dev *pdev,
+ 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+-	/* FIXME: Really for ATA it isn't safe because the device may be
+-	   multi-purpose and we want to leave it alone if it was already
+-	   enabled. Secondly for shared use as Arjan says we want refcounting
 -
--		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
--			goto end_io;
--
--		if (should_fail_request(bio))
--			goto end_io;
--
--		/*
--		 * If this device has partitions, remap block n
--		 * of partition p to block n+start(p) of the disk.
--		 */
--		blk_partition_remap(bio);
--
--		if (old_sector != -1)
--			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
--					    old_sector);
--
--		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+-	   Checking dev->is_enabled is insufficient as this is not set at
+-	   boot for the primary video which is BIOS enabled
+-	  */
 -
--		old_sector = bio->bi_sector;
--		old_dev = bio->bi_bdev->bd_dev;
+ 	rc = pcim_enable_device(pdev);
+ 	if (rc)
+-		goto err_out;
++		goto out;
+ 
+-	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+-		u8 tmp8;
 -
--		if (bio_check_eod(bio, nr_sectors))
--			goto end_io;
--		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
--			err = -EOPNOTSUPP;
--			goto end_io;
+-		/* TODO: What if one channel is in native mode ... */
+-		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
+-		mask = (1 << 2) | (1 << 0);
+-		if ((tmp8 & mask) != mask)
+-			legacy_mode = 1;
+-#if defined(CONFIG_NO_ATA_LEGACY)
+-		/* Some platforms with PCI limits cannot address compat
+-		   port space. In that case we punt if their firmware has
+-		   left a device in compatibility mode */
+-		if (legacy_mode) {
+-			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
+-			rc = -EOPNOTSUPP;
+-			goto err_out;
 -		}
--
--		ret = q->make_request_fn(q, bio);
--	} while (ret);
--}
--
--/*
-- * We only want one ->make_request_fn to be active at a time,
-- * else stack usage with stacked devices could be a problem.
-- * So use current->bio_{list,tail} to keep a list of requests
-- * submited by a make_request_fn function.
-- * current->bio_tail is also used as a flag to say if
-- * generic_make_request is currently active in this task or not.
-- * If it is NULL, then no make_request is active.  If it is non-NULL,
-- * then a make_request is active, and new requests should be added
-- * at the tail
-- */
--void generic_make_request(struct bio *bio)
--{
--	if (current->bio_tail) {
--		/* make_request is active */
--		*(current->bio_tail) = bio;
--		bio->bi_next = NULL;
--		current->bio_tail = &bio->bi_next;
--		return;
+-#endif
 -	}
--	/* following loop may be a bit non-obvious, and so deserves some
--	 * explanation.
--	 * Before entering the loop, bio->bi_next is NULL (as all callers
--	 * ensure that) so we have a list with a single bio.
--	 * We pretend that we have just taken it off a longer list, so
--	 * we assign bio_list to the next (which is NULL) and bio_tail
--	 * to &bio_list, thus initialising the bio_list of new bios to be
--	 * added.  __generic_make_request may indeed add some more bios
--	 * through a recursive call to generic_make_request.  If it
--	 * did, we find a non-NULL value in bio_list and re-enter the loop
--	 * from the top.  In this case we really did just take the bio
--	 * of the top of the list (no pretending) and so fixup bio_list and
--	 * bio_tail or bi_next, and call into __generic_make_request again.
--	 *
--	 * The loop was structured like this to make only one call to
--	 * __generic_make_request (which is important as it is large and
--	 * inlined) and to keep the structure simple.
--	 */
--	BUG_ON(bio->bi_next);
--	do {
--		current->bio_list = bio->bi_next;
--		if (bio->bi_next == NULL)
--			current->bio_tail = &current->bio_list;
--		else
--			bio->bi_next = NULL;
--		__generic_make_request(bio);
--		bio = current->bio_list;
--	} while (bio);
--	current->bio_tail = NULL; /* deactivate */
--}
--
--EXPORT_SYMBOL(generic_make_request);
--
--/**
-- * submit_bio: submit a bio to the block device layer for I/O
-- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
-- * @bio: The &struct bio which describes the I/O
-- *
-- * submit_bio() is very similar in purpose to generic_make_request(), and
-- * uses that function to do most of the work. Both are fairly rough
-- * interfaces, @bio must be presetup and ready for I/O.
-- *
-- */
--void submit_bio(int rw, struct bio *bio)
--{
--	int count = bio_sectors(bio);
--
--	bio->bi_rw |= rw;
 -
--	/*
--	 * If it's a regular read/write or a barrier with data attached,
--	 * go through the normal accounting stuff before submission.
--	 */
--	if (!bio_empty_barrier(bio)) {
+-	/* prepare host */
++	/* prepare and activate SFF host */
+ 	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
+ 	if (rc)
+-		goto err_out;
++		goto out;
+ 
+ 	pci_set_master(pdev);
++	rc = ata_pci_activate_sff_host(host, pi->port_ops->irq_handler,
++				       pi->sht);
++ out:
++	if (rc == 0)
++		devres_remove_group(&pdev->dev, NULL);
++	else
++		devres_release_group(&pdev->dev, NULL);
+ 
+-	/* start host and request IRQ */
+-	rc = ata_host_start(host);
+-	if (rc)
+-		goto err_out;
 -
--		BIO_BUG_ON(!bio->bi_size);
--		BIO_BUG_ON(!bio->bi_io_vec);
+-	if (!legacy_mode && pdev->irq) {
+-		/* We may have no IRQ assigned in which case we can poll. This
+-		   shouldn't happen on a sane system but robustness is cheap
+-		   in this case */
+-		rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler,
+-				      IRQF_SHARED, DRV_NAME, host);
+-		if (rc)
+-			goto err_out;
 -
--		if (rw & WRITE) {
--			count_vm_events(PGPGOUT, count);
--		} else {
--			task_io_account_read(bio->bi_size);
--			count_vm_events(PGPGIN, count);
--		}
+-		ata_port_desc(host->ports[0], "irq %d", pdev->irq);
+-		ata_port_desc(host->ports[1], "irq %d", pdev->irq);
+-	} else if (legacy_mode) {
+-		if (!ata_port_is_dummy(host->ports[0])) {
+-			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
+-					      pi->port_ops->irq_handler,
+-					      IRQF_SHARED, DRV_NAME, host);
+-			if (rc)
+-				goto err_out;
 -
--		if (unlikely(block_dump)) {
--			char b[BDEVNAME_SIZE];
--			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
--			current->comm, task_pid_nr(current),
--				(rw & WRITE) ? "WRITE" : "READ",
--				(unsigned long long)bio->bi_sector,
--				bdevname(bio->bi_bdev,b));
+-			ata_port_desc(host->ports[0], "irq %d",
+-				      ATA_PRIMARY_IRQ(pdev));
 -		}
--	}
--
--	generic_make_request(bio);
--}
--
--EXPORT_SYMBOL(submit_bio);
--
--static void blk_recalc_rq_sectors(struct request *rq, int nsect)
--{
--	if (blk_fs_request(rq)) {
--		rq->hard_sector += nsect;
--		rq->hard_nr_sectors -= nsect;
 -
--		/*
--		 * Move the I/O submission pointers ahead if required.
--		 */
--		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
--		    (rq->sector <= rq->hard_sector)) {
--			rq->sector = rq->hard_sector;
--			rq->nr_sectors = rq->hard_nr_sectors;
--			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
--			rq->current_nr_sectors = rq->hard_cur_sectors;
--			rq->buffer = bio_data(rq->bio);
--		}
+-		if (!ata_port_is_dummy(host->ports[1])) {
+-			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
+-					      pi->port_ops->irq_handler,
+-					      IRQF_SHARED, DRV_NAME, host);
+-			if (rc)
+-				goto err_out;
 -
--		/*
--		 * if total number of sectors is less than the first segment
--		 * size, something has gone terribly wrong
--		 */
--		if (rq->nr_sectors < rq->current_nr_sectors) {
--			printk("blk: request botched\n");
--			rq->nr_sectors = rq->current_nr_sectors;
+-			ata_port_desc(host->ports[1], "irq %d",
+-				      ATA_SECONDARY_IRQ(pdev));
 -		}
 -	}
--}
--
--static int __end_that_request_first(struct request *req, int uptodate,
--				    int nr_bytes)
--{
--	int total_bytes, bio_nbytes, error, next_idx = 0;
--	struct bio *bio;
 -
--	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+-	/* register */
+-	rc = ata_host_register(host, pi->sht);
+-	if (rc)
+-		goto err_out;
 -
--	/*
--	 * extend uptodate bool to allow < 0 value to be direct io error
--	 */
--	error = 0;
--	if (end_io_error(uptodate))
--		error = !uptodate ? -EIO : uptodate;
+-	devres_remove_group(dev, NULL);
+-	return 0;
 -
--	/*
--	 * for a REQ_BLOCK_PC request, we want to carry any eventual
--	 * sense key with us all the way through
--	 */
--	if (!blk_pc_request(req))
--		req->errors = 0;
+-err_out:
+-	devres_release_group(dev, NULL);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index bbe59c2..409ffb9 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -60,6 +60,7 @@ extern int atapi_dmadir;
+ extern int atapi_passthru16;
+ extern int libata_fua;
+ extern int libata_noacpi;
++extern int libata_allow_tpm;
+ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
+ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+ 			   u64 block, u32 n_block, unsigned int tf_flags,
+@@ -85,7 +86,6 @@ extern int ata_dev_configure(struct ata_device *dev);
+ extern int sata_down_spd_limit(struct ata_link *link);
+ extern int sata_set_spd_needed(struct ata_link *link);
+ extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
+-extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+ extern void ata_sg_clean(struct ata_queued_cmd *qc);
+ extern void ata_qc_free(struct ata_queued_cmd *qc);
+ extern void ata_qc_issue(struct ata_queued_cmd *qc);
+@@ -113,6 +113,7 @@ extern int ata_acpi_on_suspend(struct ata_port *ap);
+ extern void ata_acpi_on_resume(struct ata_port *ap);
+ extern int ata_acpi_on_devcfg(struct ata_device *dev);
+ extern void ata_acpi_on_disable(struct ata_device *dev);
++extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
+ #else
+ static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { }
+ static inline void ata_acpi_associate(struct ata_host *host) { }
+@@ -121,6 +122,8 @@ static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
+ static inline void ata_acpi_on_resume(struct ata_port *ap) { }
+ static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
+ static inline void ata_acpi_on_disable(struct ata_device *dev) { }
++static inline void ata_acpi_set_state(struct ata_port *ap,
++				      pm_message_t state) { }
+ #endif
+ 
+ /* libata-scsi.c */
+@@ -183,6 +186,7 @@ extern void ata_eh_report(struct ata_port *ap);
+ extern int ata_eh_reset(struct ata_link *link, int classify,
+ 			ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+ 			ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
++extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+ extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+ 			  ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+ 			  ata_postreset_fn_t postreset,
+diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
+index e4542ab..244098a 100644
+--- a/drivers/ata/pata_acpi.c
++++ b/drivers/ata/pata_acpi.c
+@@ -81,17 +81,6 @@ static void pacpi_error_handler(struct ata_port *ap)
+ 				  NULL, ata_std_postreset);
+ }
+ 
+-/* Welcome to ACPI, bring a bucket */
+-static const unsigned int pio_cycle[7] = {
+-	600, 383, 240, 180, 120, 100, 80
+-};
+-static const unsigned int mwdma_cycle[5] = {
+-	480, 150, 120, 100, 80
+-};
+-static const unsigned int udma_cycle[7] = {
+-	120, 80, 60, 45, 30, 20, 15
+-};
 -
--	if (!uptodate) {
--		if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
--			printk("end_request: I/O error, dev %s, sector %llu\n",
--				req->rq_disk ? req->rq_disk->disk_name : "?",
--				(unsigned long long)req->sector);
--	}
+ /**
+  *	pacpi_discover_modes	-	filter non ACPI modes
+  *	@adev: ATA device
+@@ -103,56 +92,20 @@ static const unsigned int udma_cycle[7] = {
+ 
+ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev)
+ {
+-	int unit = adev->devno;
+ 	struct pata_acpi *acpi = ap->private_data;
+-	int i;
+-	u32 t;
+-	unsigned long mask = (0x7f << ATA_SHIFT_UDMA) | (0x7 << ATA_SHIFT_MWDMA) | (0x1F << ATA_SHIFT_PIO);
 -
--	if (blk_fs_request(req) && req->rq_disk) {
--		const int rw = rq_data_dir(req);
+ 	struct ata_acpi_gtm probe;
++	unsigned int xfer_mask;
+ 
+ 	probe = acpi->gtm;
+ 
+-	/* We always use the 0 slot for crap hardware */
+-	if (!(probe.flags & 0x10))
+-		unit = 0;
 -
--		disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
+ 	ata_acpi_gtm(ap, &probe);
+ 
+-	/* Start by scanning for PIO modes */
+-	for (i = 0; i < 7; i++) {
+-		t = probe.drive[unit].pio;
+-		if (t <= pio_cycle[i]) {
+-			mask |= (2 << (ATA_SHIFT_PIO + i)) - 1;
+-			break;
+-		}
 -	}
--
--	total_bytes = bio_nbytes = 0;
--	while ((bio = req->bio) != NULL) {
--		int nbytes;
--
--		/*
--		 * For an empty barrier request, the low level driver must
--		 * store a potential error location in ->sector. We pass
--		 * that back up in ->bi_sector.
--		 */
--		if (blk_empty_barrier(req))
--			bio->bi_sector = req->sector;
--
--		if (nr_bytes >= bio->bi_size) {
--			req->bio = bio->bi_next;
--			nbytes = bio->bi_size;
--			req_bio_endio(req, bio, nbytes, error);
--			next_idx = 0;
--			bio_nbytes = 0;
--		} else {
--			int idx = bio->bi_idx + next_idx;
--
--			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
--				blk_dump_rq_flags(req, "__end_that");
--				printk("%s: bio idx %d >= vcnt %d\n",
--						__FUNCTION__,
--						bio->bi_idx, bio->bi_vcnt);
--				break;
--			}
--
--			nbytes = bio_iovec_idx(bio, idx)->bv_len;
--			BIO_BUG_ON(nbytes > bio->bi_size);
--
--			/*
--			 * not a complete bvec done
--			 */
--			if (unlikely(nbytes > nr_bytes)) {
--				bio_nbytes += nr_bytes;
--				total_bytes += nr_bytes;
++	xfer_mask = ata_acpi_gtm_xfermask(adev, &probe);
+ 
+-	/* See if we have MWDMA or UDMA data. We don't bother with MWDMA
+-	   if UDMA is availabe as this means the BIOS set UDMA and our
+-	   error changedown if it works is UDMA to PIO anyway */
+-	if (probe.flags & (1 << (2 * unit))) {
+-		/* MWDMA */
+-		for (i = 0; i < 5; i++) {
+-			t = probe.drive[unit].dma;
+-			if (t <= mwdma_cycle[i]) {
+-				mask |= (2 << (ATA_SHIFT_MWDMA + i)) - 1;
 -				break;
 -			}
--
--			/*
--			 * advance to the next vector
--			 */
--			next_idx++;
--			bio_nbytes += nbytes;
 -		}
--
--		total_bytes += nbytes;
--		nr_bytes -= nbytes;
--
--		if ((bio = req->bio)) {
--			/*
--			 * end more in this run, or just return 'not-done'
--			 */
--			if (unlikely(nr_bytes <= 0))
+-	} else {
+-		/* UDMA */
+-		for (i = 0; i < 7; i++) {
+-			t = probe.drive[unit].dma;
+-			if (t <= udma_cycle[i]) {
+-				mask |= (2 << (ATA_SHIFT_UDMA + i)) - 1;
 -				break;
+-			}
 -		}
 -	}
--
--	/*
--	 * completely done
--	 */
--	if (!req->bio)
--		return 0;
--
--	/*
--	 * if the request wasn't completed, update state
--	 */
--	if (bio_nbytes) {
--		req_bio_endio(req, bio, bio_nbytes, error);
--		bio->bi_idx += next_idx;
--		bio_iovec(bio)->bv_offset += nr_bytes;
--		bio_iovec(bio)->bv_len -= nr_bytes;
--	}
--
--	blk_recalc_rq_sectors(req, total_bytes >> 9);
--	blk_recalc_rq_segments(req);
--	return 1;
--}
--
--/**
-- * end_that_request_first - end I/O on a request
-- * @req:      the request being processed
-- * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
-- * @nr_sectors: number of sectors to end I/O on
-- *
-- * Description:
-- *     Ends I/O on a number of sectors attached to @req, and sets it up
-- *     for the next range of segments (if any) in the cluster.
-- *
-- * Return:
-- *     0 - we are done with this request, call end_that_request_last()
-- *     1 - still buffers pending for this request
-- **/
--int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
--{
--	return __end_that_request_first(req, uptodate, nr_sectors << 9);
--}
--
--EXPORT_SYMBOL(end_that_request_first);
--
--/**
-- * end_that_request_chunk - end I/O on a request
-- * @req:      the request being processed
-- * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
-- * @nr_bytes: number of bytes to complete
-- *
-- * Description:
-- *     Ends I/O on a number of bytes attached to @req, and sets it up
-- *     for the next range of segments (if any). Like end_that_request_first(),
-- *     but deals with bytes instead of sectors.
-- *
-- * Return:
-- *     0 - we are done with this request, call end_that_request_last()
-- *     1 - still buffers pending for this request
-- **/
--int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
--{
--	return __end_that_request_first(req, uptodate, nr_bytes);
--}
--
--EXPORT_SYMBOL(end_that_request_chunk);
--
--/*
-- * splice the completion data to a local structure and hand off to
-- * process_completion_queue() to complete the requests
-- */
--static void blk_done_softirq(struct softirq_action *h)
--{
--	struct list_head *cpu_list, local_list;
--
--	local_irq_disable();
--	cpu_list = &__get_cpu_var(blk_cpu_done);
--	list_replace_init(cpu_list, &local_list);
--	local_irq_enable();
--
--	while (!list_empty(&local_list)) {
--		struct request *rq = list_entry(local_list.next, struct request, donelist);
--
--		list_del_init(&rq->donelist);
--		rq->q->softirq_done_fn(rq);
--	}
--}
--
--static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
--			  void *hcpu)
--{
--	/*
--	 * If a CPU goes away, splice its entries to the current CPU
--	 * and trigger a run of the softirq
--	 */
--	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
--		int cpu = (unsigned long) hcpu;
--
--		local_irq_disable();
--		list_splice_init(&per_cpu(blk_cpu_done, cpu),
--				 &__get_cpu_var(blk_cpu_done));
--		raise_softirq_irqoff(BLOCK_SOFTIRQ);
--		local_irq_enable();
--	}
--
--	return NOTIFY_OK;
--}
--
--
--static struct notifier_block blk_cpu_notifier __cpuinitdata = {
--	.notifier_call	= blk_cpu_notify,
--};
--
--/**
-- * blk_complete_request - end I/O on a request
-- * @req:      the request being processed
-- *
-- * Description:
-- *     Ends all I/O on a request. It does not handle partial completions,
-- *     unless the driver actually implements this in its completion callback
-- *     through requeueing. The actual completion happens out-of-order,
-- *     through a softirq handler. The user must have registered a completion
-- *     callback through blk_queue_softirq_done().
-- **/
--
--void blk_complete_request(struct request *req)
--{
--	struct list_head *cpu_list;
--	unsigned long flags;
--
--	BUG_ON(!req->q->softirq_done_fn);
--		
--	local_irq_save(flags);
--
--	cpu_list = &__get_cpu_var(blk_cpu_done);
--	list_add_tail(&req->donelist, cpu_list);
--	raise_softirq_irqoff(BLOCK_SOFTIRQ);
--
--	local_irq_restore(flags);
--}
--
--EXPORT_SYMBOL(blk_complete_request);
--	
--/*
-- * queue lock must be held
-- */
--void end_that_request_last(struct request *req, int uptodate)
+-	if (mask & (0xF8 << ATA_SHIFT_UDMA))
++	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
+ 		ap->cbl = ATA_CBL_PATA80;
+-	return mask;
++
++	return xfer_mask;
+ }
+ 
+ /**
+@@ -180,12 +133,14 @@ static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ {
+ 	int unit = adev->devno;
+ 	struct pata_acpi *acpi = ap->private_data;
++	const struct ata_timing *t;
+ 
+ 	if (!(acpi->gtm.flags & 0x10))
+ 		unit = 0;
+ 
+ 	/* Now stuff the nS values into the structure */
+-	acpi->gtm.drive[unit].pio = pio_cycle[adev->pio_mode - XFER_PIO_0];
++	t = ata_timing_find_mode(adev->pio_mode);
++	acpi->gtm.drive[unit].pio = t->cycle;
+ 	ata_acpi_stm(ap, &acpi->gtm);
+ 	/* See what mode we actually got */
+ 	ata_acpi_gtm(ap, &acpi->gtm);
+@@ -201,16 +156,18 @@ static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+ {
+ 	int unit = adev->devno;
+ 	struct pata_acpi *acpi = ap->private_data;
++	const struct ata_timing *t;
+ 
+ 	if (!(acpi->gtm.flags & 0x10))
+ 		unit = 0;
+ 
+ 	/* Now stuff the nS values into the structure */
++	t = ata_timing_find_mode(adev->dma_mode);
+ 	if (adev->dma_mode >= XFER_UDMA_0) {
+-		acpi->gtm.drive[unit].dma = udma_cycle[adev->dma_mode - XFER_UDMA_0];
++		acpi->gtm.drive[unit].dma = t->udma;
+ 		acpi->gtm.flags |= (1 << (2 * unit));
+ 	} else {
+-		acpi->gtm.drive[unit].dma = mwdma_cycle[adev->dma_mode - XFER_MW_DMA_0];
++		acpi->gtm.drive[unit].dma = t->cycle;
+ 		acpi->gtm.flags &= ~(1 << (2 * unit));
+ 	}
+ 	ata_acpi_stm(ap, &acpi->gtm);
+diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
+index 8caf9af..7e68edf 100644
+--- a/drivers/ata/pata_ali.c
++++ b/drivers/ata/pata_ali.c
+@@ -64,7 +64,7 @@ static int ali_cable_override(struct pci_dev *pdev)
+ 	if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
+ 	   	return 1;
+ 	/* Mitac 8317 (Winbook-A) and relatives */
+-	if (pdev->subsystem_vendor == 0x1071  && pdev->subsystem_device == 0x8317)
++	if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317)
+ 		return 1;
+ 	/* Systems by DMI */
+ 	if (dmi_check_system(cable_dmi_table))
+diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
+index 3cc27b5..761a666 100644
+--- a/drivers/ata/pata_amd.c
++++ b/drivers/ata/pata_amd.c
+@@ -220,6 +220,62 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+ 	timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
+ }
+ 
++/* Both host-side and drive-side detection results are worthless on NV
++ * PATAs.  Ignore them and just follow what BIOS configured.  Both the
++ * current configuration in PCI config reg and ACPI GTM result are
++ * cached during driver attach and are consulted to select transfer
++ * mode.
++ */
++static unsigned long nv_mode_filter(struct ata_device *dev,
++				    unsigned long xfer_mask)
++{
++	static const unsigned int udma_mask_map[] =
++		{ ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
++		  ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 };
++	struct ata_port *ap = dev->link->ap;
++	char acpi_str[32] = "";
++	u32 saved_udma, udma;
++	const struct ata_acpi_gtm *gtm;
++	unsigned long bios_limit = 0, acpi_limit = 0, limit;
++
++	/* find out what BIOS configured */
++	udma = saved_udma = (unsigned long)ap->host->private_data;
++
++	if (ap->port_no == 0)
++		udma >>= 16;
++	if (dev->devno == 0)
++		udma >>= 8;
++
++	if ((udma & 0xc0) == 0xc0)
++		bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]);
++
++	/* consult ACPI GTM too */
++	gtm = ata_acpi_init_gtm(ap);
++	if (gtm) {
++		acpi_limit = ata_acpi_gtm_xfermask(dev, gtm);
++
++		snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)",
++			 gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags);
++	}
++
++	/* be optimistic, EH can take care of things if something goes wrong */
++	limit = bios_limit | acpi_limit;
++
++	/* If PIO or DMA isn't configured at all, don't limit.  Let EH
++	 * handle it.
++	 */
++	if (!(limit & ATA_MASK_PIO))
++		limit |= ATA_MASK_PIO;
++	if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA)))
++		limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA;
++
++	ata_port_printk(ap, KERN_DEBUG, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
++			"BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
++			xfer_mask, limit, xfer_mask & limit, bios_limit,
++			saved_udma, acpi_limit, acpi_str);
++
++	return xfer_mask & limit;
++}
+ 
+ /**
+  *	nv_probe_init	-	cable detection
+@@ -252,31 +308,6 @@ static void nv_error_handler(struct ata_port *ap)
+ 			       ata_std_postreset);
+ }
+ 
+-static int nv_cable_detect(struct ata_port *ap)
 -{
--	struct gendisk *disk = req->rq_disk;
--	int error;
--
--	/*
--	 * extend uptodate bool to allow < 0 value to be direct io error
--	 */
--	error = 0;
--	if (end_io_error(uptodate))
--		error = !uptodate ? -EIO : uptodate;
--
--	if (unlikely(laptop_mode) && blk_fs_request(req))
--		laptop_io_completion();
--
--	/*
--	 * Account IO completion.  bar_rq isn't accounted as a normal
--	 * IO on queueing nor completion.  Accounting the containing
--	 * request is enough.
--	 */
--	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
--		unsigned long duration = jiffies - req->start_time;
--		const int rw = rq_data_dir(req);
+-	static const u8 bitmask[2] = {0x03, 0x0C};
+-	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+-	u8 ata66;
+-	u16 udma;
+-	int cbl;
 -
--		__disk_stat_inc(disk, ios[rw]);
--		__disk_stat_add(disk, ticks[rw], duration);
--		disk_round_stats(disk);
--		disk->in_flight--;
--	}
--	if (req->end_io)
--		req->end_io(req, error);
+-	pci_read_config_byte(pdev, 0x52, &ata66);
+-	if (ata66 & bitmask[ap->port_no])
+-		cbl = ATA_CBL_PATA80;
 -	else
--		__blk_put_request(req->q, req);
--}
--
--EXPORT_SYMBOL(end_that_request_last);
--
--static inline void __end_request(struct request *rq, int uptodate,
--				 unsigned int nr_bytes, int dequeue)
--{
--	if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
--		if (dequeue)
--			blkdev_dequeue_request(rq);
--		add_disk_randomness(rq->rq_disk);
--		end_that_request_last(rq, uptodate);
--	}
--}
--
--static unsigned int rq_byte_size(struct request *rq)
--{
--	if (blk_fs_request(rq))
--		return rq->hard_nr_sectors << 9;
--
--	return rq->data_len;
--}
--
--/**
-- * end_queued_request - end all I/O on a queued request
-- * @rq:		the request being processed
-- * @uptodate:	error value or 0/1 uptodate flag
-- *
-- * Description:
-- *     Ends all I/O on a request, and removes it from the block layer queues.
-- *     Not suitable for normal IO completion, unless the driver still has
-- *     the request attached to the block layer.
-- *
-- **/
--void end_queued_request(struct request *rq, int uptodate)
--{
--	__end_request(rq, uptodate, rq_byte_size(rq), 1);
--}
--EXPORT_SYMBOL(end_queued_request);
--
--/**
-- * end_dequeued_request - end all I/O on a dequeued request
-- * @rq:		the request being processed
-- * @uptodate:	error value or 0/1 uptodate flag
-- *
-- * Description:
-- *     Ends all I/O on a request. The request must already have been
-- *     dequeued using blkdev_dequeue_request(), as is normally the case
-- *     for most drivers.
-- *
-- **/
--void end_dequeued_request(struct request *rq, int uptodate)
--{
--	__end_request(rq, uptodate, rq_byte_size(rq), 0);
--}
--EXPORT_SYMBOL(end_dequeued_request);
--
--
--/**
-- * end_request - end I/O on the current segment of the request
-- * @req:	the request being processed
-- * @uptodate:	error value or 0/1 uptodate flag
-- *
-- * Description:
-- *     Ends I/O on the current segment of a request. If that is the only
-- *     remaining segment, the request is also completed and freed.
-- *
-- *     This is a remnant of how older block drivers handled IO completions.
-- *     Modern drivers typically end IO on the full request in one go, unless
-- *     they have a residual value to account for. For that case this function
-- *     isn't really useful, unless the residual just happens to be the
-- *     full current segment. In other words, don't use this function in new
-- *     code. Either use end_request_completely(), or the
-- *     end_that_request_chunk() (along with end_that_request_last()) for
-- *     partial completions.
-- *
-- **/
--void end_request(struct request *req, int uptodate)
--{
--	__end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
--}
--EXPORT_SYMBOL(end_request);
--
--static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
--			    struct bio *bio)
--{
--	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
--	rq->cmd_flags |= (bio->bi_rw & 3);
--
--	rq->nr_phys_segments = bio_phys_segments(q, bio);
--	rq->nr_hw_segments = bio_hw_segments(q, bio);
--	rq->current_nr_sectors = bio_cur_sectors(bio);
--	rq->hard_cur_sectors = rq->current_nr_sectors;
--	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
--	rq->buffer = bio_data(bio);
--	rq->data_len = bio->bi_size;
--
--	rq->bio = rq->biotail = bio;
--
--	if (bio->bi_bdev)
--		rq->rq_disk = bio->bi_bdev->bd_disk;
--}
--
--int kblockd_schedule_work(struct work_struct *work)
--{
--	return queue_work(kblockd_workqueue, work);
--}
--
--EXPORT_SYMBOL(kblockd_schedule_work);
--
--void kblockd_flush_work(struct work_struct *work)
--{
--	cancel_work_sync(work);
--}
--EXPORT_SYMBOL(kblockd_flush_work);
--
--int __init blk_dev_init(void)
--{
--	int i;
--
--	kblockd_workqueue = create_workqueue("kblockd");
--	if (!kblockd_workqueue)
--		panic("Failed to create kblockd\n");
--
--	request_cachep = kmem_cache_create("blkdev_requests",
--			sizeof(struct request), 0, SLAB_PANIC, NULL);
--
--	requestq_cachep = kmem_cache_create("blkdev_queue",
--			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
--
--	iocontext_cachep = kmem_cache_create("blkdev_ioc",
--			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
--
--	for_each_possible_cpu(i)
--		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
--
--	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
--	register_hotcpu_notifier(&blk_cpu_notifier);
--
--	blk_max_low_pfn = max_low_pfn - 1;
--	blk_max_pfn = max_pfn - 1;
--
--	return 0;
--}
--
--/*
-- * IO Context helper functions
-- */
--void put_io_context(struct io_context *ioc)
--{
--	if (ioc == NULL)
--		return;
--
--	BUG_ON(atomic_read(&ioc->refcount) == 0);
--
--	if (atomic_dec_and_test(&ioc->refcount)) {
--		struct cfq_io_context *cic;
--
--		rcu_read_lock();
--		if (ioc->aic && ioc->aic->dtor)
--			ioc->aic->dtor(ioc->aic);
--		if (ioc->cic_root.rb_node != NULL) {
--			struct rb_node *n = rb_first(&ioc->cic_root);
--
--			cic = rb_entry(n, struct cfq_io_context, rb_node);
--			cic->dtor(ioc);
--		}
--		rcu_read_unlock();
--
--		kmem_cache_free(iocontext_cachep, ioc);
--	}
--}
--EXPORT_SYMBOL(put_io_context);
--
--/* Called by the exitting task */
--void exit_io_context(void)
--{
--	struct io_context *ioc;
--	struct cfq_io_context *cic;
--
--	task_lock(current);
--	ioc = current->io_context;
--	current->io_context = NULL;
--	task_unlock(current);
--
--	ioc->task = NULL;
--	if (ioc->aic && ioc->aic->exit)
--		ioc->aic->exit(ioc->aic);
--	if (ioc->cic_root.rb_node != NULL) {
--		cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
--		cic->exit(ioc);
--	}
--
--	put_io_context(ioc);
--}
--
--/*
-- * If the current task has no IO context then create one and initialise it.
-- * Otherwise, return its existing IO context.
-- *
-- * This returned IO context doesn't have a specifically elevated refcount,
-- * but since the current task itself holds a reference, the context can be
-- * used in general code, so long as it stays within `current` context.
-- */
--static struct io_context *current_io_context(gfp_t gfp_flags, int node)
--{
--	struct task_struct *tsk = current;
--	struct io_context *ret;
--
--	ret = tsk->io_context;
--	if (likely(ret))
--		return ret;
--
--	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
--	if (ret) {
--		atomic_set(&ret->refcount, 1);
--		ret->task = current;
--		ret->ioprio_changed = 0;
--		ret->last_waited = jiffies; /* doesn't matter... */
--		ret->nr_batch_requests = 0; /* because this is 0 */
--		ret->aic = NULL;
--		ret->cic_root.rb_node = NULL;
--		ret->ioc_data = NULL;
--		/* make sure set_task_ioprio() sees the settings above */
--		smp_wmb();
--		tsk->io_context = ret;
--	}
--
--	return ret;
--}
--
--/*
-- * If the current task has no IO context then create one and initialise it.
-- * If it does have a context, take a ref on it.
-- *
-- * This is always called in the context of the task which submitted the I/O.
-- */
--struct io_context *get_io_context(gfp_t gfp_flags, int node)
--{
--	struct io_context *ret;
--	ret = current_io_context(gfp_flags, node);
--	if (likely(ret))
--		atomic_inc(&ret->refcount);
--	return ret;
--}
--EXPORT_SYMBOL(get_io_context);
--
--void copy_io_context(struct io_context **pdst, struct io_context **psrc)
--{
--	struct io_context *src = *psrc;
--	struct io_context *dst = *pdst;
--
--	if (src) {
--		BUG_ON(atomic_read(&src->refcount) == 0);
--		atomic_inc(&src->refcount);
--		put_io_context(dst);
--		*pdst = src;
--	}
--}
--EXPORT_SYMBOL(copy_io_context);
--
--void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
--{
--	struct io_context *temp;
--	temp = *ioc1;
--	*ioc1 = *ioc2;
--	*ioc2 = temp;
--}
--EXPORT_SYMBOL(swap_io_context);
--
--/*
-- * sysfs parts below
-- */
--struct queue_sysfs_entry {
--	struct attribute attr;
--	ssize_t (*show)(struct request_queue *, char *);
--	ssize_t (*store)(struct request_queue *, const char *, size_t);
--};
--
--static ssize_t
--queue_var_show(unsigned int var, char *page)
--{
--	return sprintf(page, "%d\n", var);
--}
--
--static ssize_t
--queue_var_store(unsigned long *var, const char *page, size_t count)
--{
--	char *p = (char *) page;
--
--	*var = simple_strtoul(p, &p, 10);
--	return count;
--}
--
--static ssize_t queue_requests_show(struct request_queue *q, char *page)
--{
--	return queue_var_show(q->nr_requests, (page));
--}
--
--static ssize_t
--queue_requests_store(struct request_queue *q, const char *page, size_t count)
--{
--	struct request_list *rl = &q->rq;
--	unsigned long nr;
--	int ret = queue_var_store(&nr, page, count);
--	if (nr < BLKDEV_MIN_RQ)
--		nr = BLKDEV_MIN_RQ;
--
--	spin_lock_irq(q->queue_lock);
--	q->nr_requests = nr;
--	blk_queue_congestion_threshold(q);
--
--	if (rl->count[READ] >= queue_congestion_on_threshold(q))
--		blk_set_queue_congested(q, READ);
--	else if (rl->count[READ] < queue_congestion_off_threshold(q))
--		blk_clear_queue_congested(q, READ);
--
--	if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
--		blk_set_queue_congested(q, WRITE);
--	else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
--		blk_clear_queue_congested(q, WRITE);
--
--	if (rl->count[READ] >= q->nr_requests) {
--		blk_set_queue_full(q, READ);
--	} else if (rl->count[READ]+1 <= q->nr_requests) {
--		blk_clear_queue_full(q, READ);
--		wake_up(&rl->wait[READ]);
--	}
--
--	if (rl->count[WRITE] >= q->nr_requests) {
--		blk_set_queue_full(q, WRITE);
--	} else if (rl->count[WRITE]+1 <= q->nr_requests) {
--		blk_clear_queue_full(q, WRITE);
--		wake_up(&rl->wait[WRITE]);
--	}
--	spin_unlock_irq(q->queue_lock);
--	return ret;
--}
--
--static ssize_t queue_ra_show(struct request_queue *q, char *page)
--{
--	int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
--
--	return queue_var_show(ra_kb, (page));
--}
--
--static ssize_t
--queue_ra_store(struct request_queue *q, const char *page, size_t count)
--{
--	unsigned long ra_kb;
--	ssize_t ret = queue_var_store(&ra_kb, page, count);
--
--	spin_lock_irq(q->queue_lock);
--	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
--	spin_unlock_irq(q->queue_lock);
--
--	return ret;
--}
--
--static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
--{
--	int max_sectors_kb = q->max_sectors >> 1;
--
--	return queue_var_show(max_sectors_kb, (page));
--}
--
--static ssize_t
--queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
--{
--	unsigned long max_sectors_kb,
--			max_hw_sectors_kb = q->max_hw_sectors >> 1,
--			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
--	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
--
--	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
--		return -EINVAL;
--	/*
--	 * Take the queue lock to update the readahead and max_sectors
--	 * values synchronously:
--	 */
--	spin_lock_irq(q->queue_lock);
--	q->max_sectors = max_sectors_kb << 1;
--	spin_unlock_irq(q->queue_lock);
--
--	return ret;
--}
--
--static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
--{
--	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
--
--	return queue_var_show(max_hw_sectors_kb, (page));
--}
--
--
--static struct queue_sysfs_entry queue_requests_entry = {
--	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
--	.show = queue_requests_show,
--	.store = queue_requests_store,
--};
--
--static struct queue_sysfs_entry queue_ra_entry = {
--	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
--	.show = queue_ra_show,
--	.store = queue_ra_store,
--};
--
--static struct queue_sysfs_entry queue_max_sectors_entry = {
--	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
--	.show = queue_max_sectors_show,
--	.store = queue_max_sectors_store,
--};
--
--static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
--	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
--	.show = queue_max_hw_sectors_show,
--};
--
--static struct queue_sysfs_entry queue_iosched_entry = {
--	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
--	.show = elv_iosched_show,
--	.store = elv_iosched_store,
--};
--
--static struct attribute *default_attrs[] = {
--	&queue_requests_entry.attr,
--	&queue_ra_entry.attr,
--	&queue_max_hw_sectors_entry.attr,
--	&queue_max_sectors_entry.attr,
--	&queue_iosched_entry.attr,
--	NULL,
--};
--
--#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
--
--static ssize_t
--queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
--{
--	struct queue_sysfs_entry *entry = to_queue(attr);
--	struct request_queue *q =
--		container_of(kobj, struct request_queue, kobj);
--	ssize_t res;
--
--	if (!entry->show)
--		return -EIO;
--	mutex_lock(&q->sysfs_lock);
--	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
--		mutex_unlock(&q->sysfs_lock);
--		return -ENOENT;
--	}
--	res = entry->show(q, page);
--	mutex_unlock(&q->sysfs_lock);
--	return res;
--}
--
--static ssize_t
--queue_attr_store(struct kobject *kobj, struct attribute *attr,
--		    const char *page, size_t length)
--{
--	struct queue_sysfs_entry *entry = to_queue(attr);
--	struct request_queue *q = container_of(kobj, struct request_queue, kobj);
--
--	ssize_t res;
--
--	if (!entry->store)
--		return -EIO;
--	mutex_lock(&q->sysfs_lock);
--	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
--		mutex_unlock(&q->sysfs_lock);
--		return -ENOENT;
--	}
--	res = entry->store(q, page, length);
--	mutex_unlock(&q->sysfs_lock);
--	return res;
--}
--
--static struct sysfs_ops queue_sysfs_ops = {
--	.show	= queue_attr_show,
--	.store	= queue_attr_store,
--};
--
--static struct kobj_type queue_ktype = {
--	.sysfs_ops	= &queue_sysfs_ops,
--	.default_attrs	= default_attrs,
--	.release	= blk_release_queue,
--};
--
--int blk_register_queue(struct gendisk *disk)
--{
--	int ret;
--
--	struct request_queue *q = disk->queue;
--
--	if (!q || !q->request_fn)
--		return -ENXIO;
--
--	q->kobj.parent = kobject_get(&disk->kobj);
--
--	ret = kobject_add(&q->kobj);
--	if (ret < 0)
--		return ret;
--
--	kobject_uevent(&q->kobj, KOBJ_ADD);
--
--	ret = elv_register_queue(q);
--	if (ret) {
--		kobject_uevent(&q->kobj, KOBJ_REMOVE);
--		kobject_del(&q->kobj);
--		return ret;
--	}
+-		cbl = ATA_CBL_PATA40;
 -
--	return 0;
+- 	/* We now have to double check because the Nvidia boxes BIOS
+- 	   doesn't always set the cable bits but does set mode bits */
+- 	pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
+- 	if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
+-		cbl = ATA_CBL_PATA80;
+-	/* And a triple check across suspend/resume with ACPI around */
+-	if (ata_acpi_cbl_80wire(ap))
+-		cbl = ATA_CBL_PATA80;
+-	return cbl;
 -}
 -
--void blk_unregister_queue(struct gendisk *disk)
--{
--	struct request_queue *q = disk->queue;
--
--	if (q && q->request_fn) {
--		elv_unregister_queue(q);
--
--		kobject_uevent(&q->kobj, KOBJ_REMOVE);
--		kobject_del(&q->kobj);
--		kobject_put(&disk->kobj);
--	}
--}
-diff --git a/crypto/Kconfig b/crypto/Kconfig
-index 083d2e1..c3166a1 100644
---- a/crypto/Kconfig
-+++ b/crypto/Kconfig
-@@ -24,10 +24,6 @@ config CRYPTO_ALGAPI
- 	help
- 	  This option provides the API for cryptographic algorithms.
+ /**
+  *	nv100_set_piomode	-	set initial PIO mode data
+  *	@ap: ATA interface
+@@ -314,6 +345,14 @@ static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+ 	timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
+ }
  
--config CRYPTO_ABLKCIPHER
--	tristate
--	select CRYPTO_BLKCIPHER
--
- config CRYPTO_AEAD
- 	tristate
- 	select CRYPTO_ALGAPI
-@@ -36,6 +32,15 @@ config CRYPTO_BLKCIPHER
- 	tristate
- 	select CRYPTO_ALGAPI
++static void nv_host_stop(struct ata_host *host)
++{
++	u32 udma = (unsigned long)host->private_data;
++
++	/* restore PCI config register 0x60 */
++	pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
++}
++
+ static struct scsi_host_template amd_sht = {
+ 	.module			= THIS_MODULE,
+ 	.name			= DRV_NAME,
+@@ -478,7 +517,8 @@ static struct ata_port_operations nv100_port_ops = {
+ 	.thaw		= ata_bmdma_thaw,
+ 	.error_handler	= nv_error_handler,
+ 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+-	.cable_detect	= nv_cable_detect,
++	.cable_detect	= ata_cable_ignore,
++	.mode_filter	= nv_mode_filter,
  
-+config CRYPTO_SEQIV
-+	tristate "Sequence Number IV Generator"
-+	select CRYPTO_AEAD
-+	select CRYPTO_BLKCIPHER
-+	help
-+	  This IV generator generates an IV based on a sequence number by
-+	  xoring it with a salt.  This algorithm is mainly useful for CTR
-+	  and similar modes.
+ 	.bmdma_setup 	= ata_bmdma_setup,
+ 	.bmdma_start 	= ata_bmdma_start,
+@@ -495,6 +535,7 @@ static struct ata_port_operations nv100_port_ops = {
+ 	.irq_on		= ata_irq_on,
+ 
+ 	.port_start	= ata_sff_port_start,
++	.host_stop	= nv_host_stop,
+ };
+ 
+ static struct ata_port_operations nv133_port_ops = {
+@@ -511,7 +552,8 @@ static struct ata_port_operations nv133_port_ops = {
+ 	.thaw		= ata_bmdma_thaw,
+ 	.error_handler	= nv_error_handler,
+ 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+-	.cable_detect	= nv_cable_detect,
++	.cable_detect	= ata_cable_ignore,
++	.mode_filter	= nv_mode_filter,
+ 
+ 	.bmdma_setup 	= ata_bmdma_setup,
+ 	.bmdma_start 	= ata_bmdma_start,
+@@ -528,6 +570,7 @@ static struct ata_port_operations nv133_port_ops = {
+ 	.irq_on		= ata_irq_on,
+ 
+ 	.port_start	= ata_sff_port_start,
++	.host_stop	= nv_host_stop,
+ };
+ 
+ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+@@ -614,7 +657,8 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 			.port_ops = &amd100_port_ops
+ 		}
+ 	};
+-	const struct ata_port_info *ppi[] = { NULL, NULL };
++	struct ata_port_info pi;
++	const struct ata_port_info *ppi[] = { &pi, NULL };
+ 	static int printed_version;
+ 	int type = id->driver_data;
+ 	u8 fifo;
+@@ -628,6 +672,19 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (type == 1 && pdev->revision > 0x7)
+ 		type = 2;
+ 
++	/* Serenade ? */
++	if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
++			 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
++		type = 6;	/* UDMA 100 only */
 +
- config CRYPTO_HASH
- 	tristate
- 	select CRYPTO_ALGAPI
-@@ -91,7 +96,7 @@ config CRYPTO_SHA1
- 	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
++	/*
++	 * Okay, type is determined now.  Apply type-specific workarounds.
++	 */
++	pi = info[type];
++
++	if (type < 3)
++		ata_pci_clear_simplex(pdev);
++
+ 	/* Check for AMD7411 */
+ 	if (type == 3)
+ 		/* FIFO is broken */
+@@ -635,16 +692,17 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	else
+ 		pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
  
- config CRYPTO_SHA256
--	tristate "SHA256 digest algorithm"
-+	tristate "SHA224 and SHA256 digest algorithm"
- 	select CRYPTO_ALGAPI
- 	help
- 	  SHA256 secure hash standard (DFIPS 180-2).
-@@ -99,6 +104,9 @@ config CRYPTO_SHA256
- 	  This version of SHA implements a 256 bit hash with 128 bits of
- 	  security against collision attacks.
+-	/* Serenade ? */
+-	if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
+-			 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
+-		type = 6;	/* UDMA 100 only */
++	/* Cable detection on Nvidia chips doesn't work too well,
++	 * cache BIOS programmed UDMA mode.
++	 */
++	if (type == 7 || type == 8) {
++		u32 udma;
  
-+          This code also includes SHA-224, a 224 bit hash with 112 bits
-+          of security against collision attacks.
+-	if (type < 3)
+-		ata_pci_clear_simplex(pdev);
++		pci_read_config_dword(pdev, 0x60, &udma);
++		pi.private_data = (void *)(unsigned long)udma;
++	}
+ 
+ 	/* And fire it up */
+-	ppi[0] = &info[type];
+ 	return ata_pci_init_one(pdev, ppi);
+ }
+ 
+diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
+index 7842cc4..a32e3c4 100644
+--- a/drivers/ata/pata_bf54x.c
++++ b/drivers/ata/pata_bf54x.c
+@@ -832,6 +832,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
+ {
+ 	unsigned short config = WDSIZE_16;
+ 	struct scatterlist *sg;
++	unsigned int si;
+ 
+ 	pr_debug("in atapi dma setup\n");
+ 	/* Program the ATA_CTRL register with dir */
+@@ -839,7 +840,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
+ 		/* fill the ATAPI DMA controller */
+ 		set_dma_config(CH_ATAPI_TX, config);
+ 		set_dma_x_modify(CH_ATAPI_TX, 2);
+-		ata_for_each_sg(sg, qc) {
++		for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 			set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
+ 			set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
+ 		}
+@@ -848,7 +849,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
+ 		/* fill the ATAPI DMA controller */
+ 		set_dma_config(CH_ATAPI_RX, config);
+ 		set_dma_x_modify(CH_ATAPI_RX, 2);
+-		ata_for_each_sg(sg, qc) {
++		for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 			set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
+ 			set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
+ 		}
+@@ -867,6 +868,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+ 	struct ata_port *ap = qc->ap;
+ 	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ 	struct scatterlist *sg;
++	unsigned int si;
+ 
+ 	pr_debug("in atapi dma start\n");
+ 	if (!(ap->udma_mask || ap->mwdma_mask))
+@@ -881,7 +883,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+ 		 * data cache is enabled. Otherwise, this loop
+ 		 * is an empty loop and optimized out.
+ 		 */
+-		ata_for_each_sg(sg, qc) {
++		for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 			flush_dcache_range(sg_dma_address(sg),
+ 				sg_dma_address(sg) + sg_dma_len(sg));
+ 		}
+@@ -910,7 +912,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+ 	ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
+ 
+ 		/* Set transfer length to buffer len */
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
+ 	}
+ 
+@@ -932,6 +934,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct scatterlist *sg;
++	unsigned int si;
+ 
+ 	pr_debug("in atapi dma stop\n");
+ 	if (!(ap->udma_mask || ap->mwdma_mask))
+@@ -950,7 +953,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
+ 			 * data cache is enabled. Otherwise, this loop
+ 			 * is an empty loop and optimized out.
+ 			 */
+-			ata_for_each_sg(sg, qc) {
++			for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 				invalidate_dcache_range(
+ 					sg_dma_address(sg),
+ 					sg_dma_address(sg)
+@@ -1167,34 +1170,36 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
+  *	Note: Original code is ata_data_xfer().
+  */
+ 
+-static void bfin_data_xfer(struct ata_device *adev, unsigned char *buf,
+-			   unsigned int buflen, int write_data)
++static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
++				   unsigned int buflen, int rw)
+ {
+-	struct ata_port *ap = adev->link->ap;
+-	unsigned int words = buflen >> 1;
+-	unsigned short *buf16 = (u16 *) buf;
++	struct ata_port *ap = dev->link->ap;
+ 	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
++	unsigned int words = buflen >> 1;
++	unsigned short *buf16 = (u16 *)buf;
+ 
+ 	/* Transfer multiple of 2 bytes */
+-	if (write_data) {
+-		write_atapi_data(base, words, buf16);
+-	} else {
++	if (rw == READ)
+ 		read_atapi_data(base, words, buf16);
+-	}
++	else
++		write_atapi_data(base, words, buf16);
+ 
+ 	/* Transfer trailing 1 byte, if any. */
+ 	if (unlikely(buflen & 0x01)) {
+ 		unsigned short align_buf[1] = { 0 };
+ 		unsigned char *trailing_buf = buf + buflen - 1;
+ 
+-		if (write_data) {
+-			memcpy(align_buf, trailing_buf, 1);
+-			write_atapi_data(base, 1, align_buf);
+-		} else {
++		if (rw == READ) {
+ 			read_atapi_data(base, 1, align_buf);
+ 			memcpy(trailing_buf, align_buf, 1);
++		} else {
++			memcpy(align_buf, trailing_buf, 1);
++			write_atapi_data(base, 1, align_buf);
+ 		}
++		words++;
+ 	}
 +
- config CRYPTO_SHA512
- 	tristate "SHA384 and SHA512 digest algorithms"
- 	select CRYPTO_ALGAPI
-@@ -195,9 +203,34 @@ config CRYPTO_XTS
- 	  key size 256, 384 or 512 bits. This implementation currently
- 	  can't handle a sectorsize which is not a multiple of 16 bytes.
++	return words << 1;
+ }
  
-+config CRYPTO_CTR
-+	tristate "CTR support"
-+	select CRYPTO_BLKCIPHER
-+	select CRYPTO_SEQIV
-+	select CRYPTO_MANAGER
-+	help
-+	  CTR: Counter mode
-+	  This block cipher algorithm is required for IPSec.
+ /**
+diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
+index 33f7f08..d4590f5 100644
+--- a/drivers/ata/pata_cs5520.c
++++ b/drivers/ata/pata_cs5520.c
+@@ -198,7 +198,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
+ 	};
+ 	const struct ata_port_info *ppi[2];
+ 	u8 pcicfg;
+-	void *iomap[5];
++	void __iomem *iomap[5];
+ 	struct ata_host *host;
+ 	struct ata_ioports *ioaddr;
+ 	int i, rc;
+diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
+index c79f066..68eb349 100644
+--- a/drivers/ata/pata_hpt37x.c
++++ b/drivers/ata/pata_hpt37x.c
+@@ -847,15 +847,16 @@ static u32 hpt374_read_freq(struct pci_dev *pdev)
+ 	u32 freq;
+ 	unsigned long io_base = pci_resource_start(pdev, 4);
+ 	if (PCI_FUNC(pdev->devfn) & 1) {
+-		struct pci_dev *pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
++		struct pci_dev *pdev_0;
 +
-+config CRYPTO_GCM
-+	tristate "GCM/GMAC support"
-+	select CRYPTO_CTR
-+	select CRYPTO_AEAD
-+	select CRYPTO_GF128MUL
-+	help
-+	  Support for Galois/Counter Mode (GCM) and Galois Message
-+	  Authentication Code (GMAC). Required for IPSec.
++		pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
+ 		/* Someone hot plugged the controller on us ? */
+ 		if (pdev_0 == NULL)
+ 			return 0;
+ 		io_base = pci_resource_start(pdev_0, 4);
+ 		freq = inl(io_base + 0x90);
+ 		pci_dev_put(pdev_0);
+-	}
+-	else
++	} else
+ 		freq = inl(io_base + 0x90);
+ 	return freq;
+ }
+diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
+index 842fe08..5b8586d 100644
+--- a/drivers/ata/pata_icside.c
++++ b/drivers/ata/pata_icside.c
+@@ -224,6 +224,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
+ 	struct pata_icside_state *state = ap->host->private_data;
+ 	struct scatterlist *sg, *rsg = state->sg;
+ 	unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
++	unsigned int si;
+ 
+ 	/*
+ 	 * We are simplex; BUG if we try to fiddle with DMA
+@@ -234,7 +235,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
+ 	/*
+ 	 * Copy ATAs scattered sg list into a contiguous array of sg
+ 	 */
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		memcpy(rsg, sg, sizeof(*sg));
+ 		rsg++;
+ 	}
+diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
+index ca9aae0..109ddd4 100644
+--- a/drivers/ata/pata_it821x.c
++++ b/drivers/ata/pata_it821x.c
+@@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc)
+ 			return ata_qc_issue_prot(qc);
+ 	}
+ 	printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
+-	return AC_ERR_INVALID;
++	return AC_ERR_DEV;
+ }
+ 
+ /**
+@@ -516,6 +516,37 @@ static void it821x_dev_config(struct ata_device *adev)
+ 			printk("(%dK stripe)", adev->id[146]);
+ 		printk(".\n");
+ 	}
++	/* This is a controller firmware triggered funny, don't
++	   report the drive faulty! */
++	adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
++}
 +
-+config CRYPTO_CCM
-+	tristate "CCM support"
-+	select CRYPTO_CTR
-+	select CRYPTO_AEAD
-+	help
-+	  Support for Counter with CBC MAC. Required for IPsec.
++/**
++ *	it821x_ident_hack	-	Hack identify data up
++ *	@ap: Port
++ *
++ *	Walk the devices on this firmware driven port and slightly
++ *	mash the identify data to stop us and common tools trying to
++ *	use features not firmware supported. The firmware itself does
++ *	some masking (eg SMART) but not enough.
++ *
++ *	This is a bit of an abuse of the cable method, but it is the
++ *	only method called at the right time. We could modify the libata
++ *	core specifically for ident hacking but while we have one offender
++ *	it seems better to keep the fallout localised.
++ */
 +
- config CRYPTO_CRYPTD
- 	tristate "Software async crypto daemon"
--	select CRYPTO_ABLKCIPHER
-+	select CRYPTO_BLKCIPHER
- 	select CRYPTO_MANAGER
- 	help
- 	  This is a generic software asynchronous crypto daemon that
-@@ -320,6 +353,7 @@ config CRYPTO_AES_586
- 	tristate "AES cipher algorithms (i586)"
- 	depends on (X86 || UML_X86) && !64BIT
- 	select CRYPTO_ALGAPI
-+	select CRYPTO_AES
- 	help
- 	  AES cipher algorithms (FIPS-197). AES uses the Rijndael 
- 	  algorithm.
-@@ -341,6 +375,7 @@ config CRYPTO_AES_X86_64
- 	tristate "AES cipher algorithms (x86_64)"
- 	depends on (X86 || UML_X86) && 64BIT
- 	select CRYPTO_ALGAPI
-+	select CRYPTO_AES
- 	help
- 	  AES cipher algorithms (FIPS-197). AES uses the Rijndael 
- 	  algorithm.
-@@ -441,6 +476,46 @@ config CRYPTO_SEED
- 	  See also:
- 	  <http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp>
++static int it821x_ident_hack(struct ata_port *ap)
++{
++	struct ata_device *adev;
++	ata_link_for_each_dev(adev, &ap->link) {
++		if (ata_dev_enabled(adev)) {
++			adev->id[84] &= ~(1 << 6);	/* No FUA */
++			adev->id[85] &= ~(1 << 10);	/* No HPA */
++			adev->id[76] = 0;		/* No NCQ/AN etc */
++		}
++	}
++	return ata_cable_unknown(ap);
+ }
  
-+config CRYPTO_SALSA20
-+	tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)"
-+	depends on EXPERIMENTAL
-+	select CRYPTO_BLKCIPHER
-+	help
-+	  Salsa20 stream cipher algorithm.
+ 
+@@ -634,7 +665,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
+ 	.thaw		= ata_bmdma_thaw,
+ 	.error_handler	= ata_bmdma_error_handler,
+ 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+-	.cable_detect	= ata_cable_unknown,
++	.cable_detect	= it821x_ident_hack,
+ 
+ 	.bmdma_setup 	= ata_bmdma_setup,
+ 	.bmdma_start 	= ata_bmdma_start,
+diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
+index 120b5bf..030878f 100644
+--- a/drivers/ata/pata_ixp4xx_cf.c
++++ b/drivers/ata/pata_ixp4xx_cf.c
+@@ -42,13 +42,13 @@ static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
+ 	return 0;
+ }
+ 
+-static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
+-				unsigned int buflen, int write_data)
++static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev,
++				unsigned char *buf, unsigned int buflen, int rw)
+ {
+ 	unsigned int i;
+ 	unsigned int words = buflen >> 1;
+ 	u16 *buf16 = (u16 *) buf;
+-	struct ata_port *ap = adev->link->ap;
++	struct ata_port *ap = dev->link->ap;
+ 	void __iomem *mmio = ap->ioaddr.data_addr;
+ 	struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
+ 
+@@ -59,30 +59,32 @@ static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
+ 	udelay(100);
+ 
+ 	/* Transfer multiple of 2 bytes */
+-	if (write_data) {
+-		for (i = 0; i < words; i++)
+-			writew(buf16[i], mmio);
+-	} else {
++	if (rw == READ)
+ 		for (i = 0; i < words; i++)
+ 			buf16[i] = readw(mmio);
+-	}
++	else
++		for (i = 0; i < words; i++)
++			writew(buf16[i], mmio);
+ 
+ 	/* Transfer trailing 1 byte, if any. */
+ 	if (unlikely(buflen & 0x01)) {
+ 		u16 align_buf[1] = { 0 };
+ 		unsigned char *trailing_buf = buf + buflen - 1;
+ 
+-		if (write_data) {
+-			memcpy(align_buf, trailing_buf, 1);
+-			writew(align_buf[0], mmio);
+-		} else {
++		if (rw == READ) {
+ 			align_buf[0] = readw(mmio);
+ 			memcpy(trailing_buf, align_buf, 1);
++		} else {
++			memcpy(align_buf, trailing_buf, 1);
++			writew(align_buf[0], mmio);
+ 		}
++		words++;
+ 	}
+ 
+ 	udelay(100);
+ 	*data->cs0_cfg |= 0x01;
 +
-+	  Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
-+	  Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
++	return words << 1;
+ }
+ 
+ static struct scsi_host_template ixp4xx_sht = {
+diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
+index 17159b5..333dc15 100644
+--- a/drivers/ata/pata_legacy.c
++++ b/drivers/ata/pata_legacy.c
+@@ -28,7 +28,6 @@
+  *
+  *  Unsupported but docs exist:
+  *	Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
+- *	Winbond W83759A
+  *
+  *  This driver handles legacy (that is "ISA/VLB side") IDE ports found
+  *  on PC class systems. There are three hybrid devices that are exceptions
+@@ -36,7 +35,7 @@
+  *  the MPIIX where the tuning is PCI side but the IDE is "ISA side".
+  *
+  *  Specific support is included for the ht6560a/ht6560b/opti82c611a/
+- *  opti82c465mv/promise 20230c/20630
++ *  opti82c465mv/promise 20230c/20630/winbond83759A
+  *
+  *  Use the autospeed and pio_mask options with:
+  *	Appian ADI/2 aka CLPD7220 or AIC25VL01.
+@@ -47,9 +46,6 @@
+  *  For now use autospeed and pio_mask as above with the W83759A. This may
+  *  change.
+  *
+- *  TODO
+- *	Merge existing pata_qdi driver
+- *
+  */
+ 
+ #include <linux/kernel.h>
+@@ -64,12 +60,13 @@
+ #include <linux/platform_device.h>
+ 
+ #define DRV_NAME "pata_legacy"
+-#define DRV_VERSION "0.5.5"
++#define DRV_VERSION "0.6.5"
+ 
+ #define NR_HOST 6
+ 
+-static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+-static int legacy_irq[NR_HOST] = { 14, 15, 11, 10, 8, 12 };
++static int all;
++module_param(all, int, 0444);
++MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)");
+ 
+ struct legacy_data {
+ 	unsigned long timing;
+@@ -80,21 +77,107 @@ struct legacy_data {
+ 
+ };
+ 
++enum controller {
++	BIOS = 0,
++	SNOOP = 1,
++	PDC20230 = 2,
++	HT6560A = 3,
++	HT6560B = 4,
++	OPTI611A = 5,
++	OPTI46X = 6,
++	QDI6500 = 7,
++	QDI6580 = 8,
++	QDI6580DP = 9,		/* Dual channel mode is different */
++	W83759A = 10,
 +
-+	  The Salsa20 stream cipher algorithm is designed by Daniel J.
-+	  Bernstein <djb at cr.yp.to>. See <http://cr.yp.to/snuffle.html>
++	UNKNOWN = -1
++};
 +
-+config CRYPTO_SALSA20_586
-+	tristate "Salsa20 stream cipher algorithm (i586) (EXPERIMENTAL)"
-+	depends on (X86 || UML_X86) && !64BIT
-+	depends on EXPERIMENTAL
-+	select CRYPTO_BLKCIPHER
-+	help
-+	  Salsa20 stream cipher algorithm.
 +
-+	  Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
-+	  Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
++struct legacy_probe {
++	unsigned char *name;
++	unsigned long port;
++	unsigned int irq;
++	unsigned int slot;
++	enum controller type;
++	unsigned long private;
++};
 +
-+	  The Salsa20 stream cipher algorithm is designed by Daniel J.
-+	  Bernstein <djb at cr.yp.to>. See <http://cr.yp.to/snuffle.html>
++struct legacy_controller {
++	const char *name;
++	struct ata_port_operations *ops;
++	unsigned int pio_mask;
++	unsigned int flags;
++	int (*setup)(struct platform_device *, struct legacy_probe *probe,
++		struct legacy_data *data);
++};
 +
-+config CRYPTO_SALSA20_X86_64
-+	tristate "Salsa20 stream cipher algorithm (x86_64) (EXPERIMENTAL)"
-+	depends on (X86 || UML_X86) && 64BIT
-+	depends on EXPERIMENTAL
-+	select CRYPTO_BLKCIPHER
-+	help
-+	  Salsa20 stream cipher algorithm.
++static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
 +
-+	  Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
-+	  Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
++static struct legacy_probe probe_list[NR_HOST];
+ static struct legacy_data legacy_data[NR_HOST];
+ static struct ata_host *legacy_host[NR_HOST];
+ static int nr_legacy_host;
+ 
+ 
+-static int probe_all;			/* Set to check all ISA port ranges */
+-static int ht6560a;			/* HT 6560A on primary 1, secondary 2, both 3 */
+-static int ht6560b;			/* HT 6560A on primary 1, secondary 2, both 3 */
+-static int opti82c611a;			/* Opti82c611A on primary 1, secondary 2, both 3 */
+-static int opti82c46x;			/* Opti 82c465MV present (pri/sec autodetect) */
+-static int autospeed;			/* Chip present which snoops speed changes */
+-static int pio_mask = 0x1F;		/* PIO range for autospeed devices */
++static int probe_all;		/* Set to check all ISA port ranges */
++static int ht6560a;		/* HT 6560A on primary 1, second 2, both 3 */
++static int ht6560b;		/* HT 6560A on primary 1, second 2, both 3 */
++static int opti82c611a;		/* Opti82c611A on primary 1, sec 2, both 3 */
++static int opti82c46x;		/* Opti 82c465MV present(pri/sec autodetect) */
++static int qdi;			/* Set to probe QDI controllers */
++static int winbond;		/* Set to probe Winbond controllers,
++					give I/O port if non stdanard */
++static int autospeed;		/* Chip present which snoops speed changes */
++static int pio_mask = 0x1F;	/* PIO range for autospeed devices */
+ static int iordy_mask = 0xFFFFFFFF;	/* Use iordy if available */
+ 
+ /**
++ *	legacy_probe_add	-	Add interface to probe list
++ *	@port: Controller port
++ *	@irq: IRQ number
++ *	@type: Controller type
++ *	@private: Controller specific info
++ *
++ *	Add an entry into the probe list for ATA controllers. This is used
++ *	to add the default ISA slots and then to build up the table
++ *	further according to other ISA/VLB/Weird device scans
++ *
++ *	An I/O port list is used to keep ordering stable and sane, as we
++ *	don't have any good way to talk about ordering otherwise
++ */
 +
-+	  The Salsa20 stream cipher algorithm is designed by Daniel J.
-+	  Bernstein <djb at cr.yp.to>. See <http://cr.yp.to/snuffle.html>
++static int legacy_probe_add(unsigned long port, unsigned int irq,
++				enum controller type, unsigned long private)
++{
++	struct legacy_probe *lp = &probe_list[0];
++	int i;
++	struct legacy_probe *free = NULL;
++
++	for (i = 0; i < NR_HOST; i++) {
++		if (lp->port == 0 && free == NULL)
++			free = lp;
++		/* Matching port, or the correct slot for ordering */
++		if (lp->port == port || legacy_port[i] == port) {
++			free = lp;
++			break;
++		}
++		lp++;
++	}
++	if (free == NULL) {
++		printk(KERN_ERR "pata_legacy: Too many interfaces.\n");
++		return -1;
++	}
++	/* Fill in the entry for later probing */
++	free->port = port;
++	free->irq = irq;
++	free->type = type;
++	free->private = private;
++	return 0;
++}
++
++
++/**
+  *	legacy_set_mode		-	mode setting
+  *	@link: IDE link
+  *	@unused: Device that failed when error is returned
+@@ -113,7 +196,8 @@ static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
+ 
+ 	ata_link_for_each_dev(dev, link) {
+ 		if (ata_dev_enabled(dev)) {
+-			ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
++			ata_dev_printk(dev, KERN_INFO,
++						"configured for PIO\n");
+ 			dev->pio_mode = XFER_PIO_0;
+ 			dev->xfer_mode = XFER_PIO_0;
+ 			dev->xfer_shift = ATA_SHIFT_PIO;
+@@ -171,7 +255,7 @@ static struct ata_port_operations simple_port_ops = {
+ 	.irq_clear	= ata_bmdma_irq_clear,
+ 	.irq_on		= ata_irq_on,
+ 
+-	.port_start	= ata_port_start,
++	.port_start	= ata_sff_port_start,
+ };
+ 
+ static struct ata_port_operations legacy_port_ops = {
+@@ -198,15 +282,16 @@ static struct ata_port_operations legacy_port_ops = {
+ 	.irq_clear	= ata_bmdma_irq_clear,
+ 	.irq_on		= ata_irq_on,
+ 
+-	.port_start	= ata_port_start,
++	.port_start	= ata_sff_port_start,
+ };
+ 
+ /*
+  *	Promise 20230C and 20620 support
+  *
+- *	This controller supports PIO0 to PIO2. We set PIO timings conservatively to
+- *	allow for 50MHz Vesa Local Bus. The 20620 DMA support is weird being DMA to
+- *	controller and PIO'd to the host and not supported.
++ *	This controller supports PIO0 to PIO2. We set PIO timings
++ *	conservatively to allow for 50MHz Vesa Local Bus. The 20620 DMA
++ *	support is weird being DMA to controller and PIO'd to the host
++ *	and not supported.
+  */
+ 
+ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
+@@ -221,8 +306,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ 	local_irq_save(flags);
+ 
+ 	/* Unlock the control interface */
+-	do
+-	{
++	do {
+ 		inb(0x1F5);
+ 		outb(inb(0x1F2) | 0x80, 0x1F2);
+ 		inb(0x1F2);
+@@ -231,7 +315,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ 		inb(0x1F2);
+ 		inb(0x1F2);
+ 	}
+-	while((inb(0x1F2) & 0x80) && --tries);
++	while ((inb(0x1F2) & 0x80) && --tries);
  
- config CRYPTO_DEFLATE
- 	tristate "Deflate compression algorithm"
-@@ -491,6 +566,7 @@ config CRYPTO_TEST
- 	tristate "Testing module"
- 	depends on m
- 	select CRYPTO_ALGAPI
-+	select CRYPTO_AEAD
- 	help
- 	  Quick & dirty crypto test module.
+ 	local_irq_restore(flags);
  
-@@ -498,10 +574,19 @@ config CRYPTO_AUTHENC
- 	tristate "Authenc support"
- 	select CRYPTO_AEAD
- 	select CRYPTO_MANAGER
-+	select CRYPTO_HASH
- 	help
- 	  Authenc: Combined mode wrapper for IPsec.
- 	  This is required for IPSec.
+@@ -249,13 +333,14 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
  
-+config CRYPTO_LZO
-+	tristate "LZO compression algorithm"
-+	select CRYPTO_ALGAPI
-+	select LZO_COMPRESS
-+	select LZO_DECOMPRESS
-+	help
-+	  This is the LZO algorithm.
-+
- source "drivers/crypto/Kconfig"
+ }
  
- endif	# if CRYPTO
-diff --git a/crypto/Makefile b/crypto/Makefile
-index 43c2a0d..48c7583 100644
---- a/crypto/Makefile
-+++ b/crypto/Makefile
-@@ -8,9 +8,14 @@ crypto_algapi-$(CONFIG_PROC_FS) += proc.o
- crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
- obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
+-static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
++static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
++			unsigned char *buf, unsigned int buflen, int rw)
+ {
+-	struct ata_port *ap = adev->link->ap;
+-	int slop = buflen & 3;
+-	unsigned long flags;
++	if (ata_id_has_dword_io(dev->id)) {
++		struct ata_port *ap = dev->link->ap;
++		int slop = buflen & 3;
++		unsigned long flags;
  
--obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o
- obj-$(CONFIG_CRYPTO_AEAD) += aead.o
--obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o
+-	if (ata_id_has_dword_io(adev->id)) {
+ 		local_irq_save(flags);
+ 
+ 		/* Perform the 32bit I/O synchronization sequence */
+@@ -264,26 +349,27 @@ static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsig
+ 		ioread8(ap->ioaddr.nsect_addr);
+ 
+ 		/* Now the data */
+-
+-		if (write_data)
+-			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+-		else
++		if (rw == READ)
+ 			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
++		else
++			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ 
+ 		if (unlikely(slop)) {
+-			__le32 pad = 0;
+-			if (write_data) {
+-				memcpy(&pad, buf + buflen - slop, slop);
+-				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+-			} else {
++			u32 pad;
++			if (rw == READ) {
+ 				pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+ 				memcpy(buf + buflen - slop, &pad, slop);
++			} else {
++				memcpy(&pad, buf + buflen - slop, slop);
++				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+ 			}
++			buflen += 4 - slop;
+ 		}
+ 		local_irq_restore(flags);
+-	}
+-	else
+-		ata_data_xfer_noirq(adev, buf, buflen, write_data);
++	} else
++		buflen = ata_data_xfer_noirq(dev, buf, buflen, rw);
 +
-+crypto_blkcipher-objs := ablkcipher.o
-+crypto_blkcipher-objs += blkcipher.o
-+obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
-+obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o
-+obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o
-+obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
++	return buflen;
+ }
  
- crypto_hash-objs := hash.o
- obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
-@@ -32,6 +37,9 @@ obj-$(CONFIG_CRYPTO_CBC) += cbc.o
- obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
- obj-$(CONFIG_CRYPTO_LRW) += lrw.o
- obj-$(CONFIG_CRYPTO_XTS) += xts.o
-+obj-$(CONFIG_CRYPTO_CTR) += ctr.o
-+obj-$(CONFIG_CRYPTO_GCM) += gcm.o
-+obj-$(CONFIG_CRYPTO_CCM) += ccm.o
- obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
- obj-$(CONFIG_CRYPTO_DES) += des_generic.o
- obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
-@@ -48,10 +56,12 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
- obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
- obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
- obj-$(CONFIG_CRYPTO_SEED) += seed.o
-+obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
- obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
- obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
- obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
- obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
-+obj-$(CONFIG_CRYPTO_LZO) += lzo.o
+ static struct ata_port_operations pdc20230_port_ops = {
+@@ -310,14 +396,14 @@ static struct ata_port_operations pdc20230_port_ops = {
+ 	.irq_clear	= ata_bmdma_irq_clear,
+ 	.irq_on		= ata_irq_on,
  
- obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+-	.port_start	= ata_port_start,
++	.port_start	= ata_sff_port_start,
+ };
  
-diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
-index 2731acb..3bcb099 100644
---- a/crypto/ablkcipher.c
-+++ b/crypto/ablkcipher.c
-@@ -13,14 +13,18 @@
+ /*
+  *	Holtek 6560A support
   *
+- *	This controller supports PIO0 to PIO2 (no IORDY even though higher timings
+- *	can be loaded).
++ *	This controller supports PIO0 to PIO2 (no IORDY even though higher
++ *	timings can be loaded).
   */
  
--#include <crypto/algapi.h>
--#include <linux/errno.h>
-+#include <crypto/internal/skcipher.h>
-+#include <linux/err.h>
- #include <linux/init.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
-+#include <linux/rtnetlink.h>
-+#include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/seq_file.h>
+ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
+@@ -364,14 +450,14 @@ static struct ata_port_operations ht6560a_port_ops = {
+ 	.irq_clear	= ata_bmdma_irq_clear,
+ 	.irq_on		= ata_irq_on,
  
-+#include "internal.h"
-+
- static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
- 			    unsigned int keylen)
- {
-@@ -66,6 +70,16 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
- 	return alg->cra_ctxsize;
- }
+-	.port_start	= ata_port_start,
++	.port_start	= ata_sff_port_start,
+ };
  
-+int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
-+{
-+	return crypto_ablkcipher_encrypt(&req->creq);
-+}
-+
-+int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
-+{
-+	return crypto_ablkcipher_decrypt(&req->creq);
-+}
-+
- static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
- 				      u32 mask)
+ /*
+  *	Holtek 6560B support
+  *
+- *	This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO setting
+- *	unless we see an ATAPI device in which case we force it off.
++ *	This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO
++ *	setting unless we see an ATAPI device in which case we force it off.
+  *
+  *	FIXME: need to implement 2nd channel support.
+  */
+@@ -398,7 +484,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ 	if (adev->class != ATA_DEV_ATA) {
+ 		u8 rconf = inb(0x3E6);
+ 		if (rconf & 0x24) {
+-			rconf &= ~ 0x24;
++			rconf &= ~0x24;
+ 			outb(rconf, 0x3E6);
+ 		}
+ 	}
+@@ -423,13 +509,13 @@ static struct ata_port_operations ht6560b_port_ops = {
+ 	.qc_prep 	= ata_qc_prep,
+ 	.qc_issue	= ata_qc_issue_prot,
+ 
+-	.data_xfer	= ata_data_xfer,	/* FIXME: Check 32bit and noirq */
++	.data_xfer	= ata_data_xfer,    /* FIXME: Check 32bit and noirq */
+ 
+ 	.irq_handler	= ata_interrupt,
+ 	.irq_clear	= ata_bmdma_irq_clear,
+ 	.irq_on		= ata_irq_on,
+ 
+-	.port_start	= ata_port_start,
++	.port_start	= ata_sff_port_start,
+ };
+ 
+ /*
+@@ -462,7 +548,8 @@ static u8 opti_syscfg(u8 reg)
+  *	This controller supports PIO0 to PIO3.
+  */
+ 
+-static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev)
++static void opti82c611a_set_piomode(struct ata_port *ap,
++						struct ata_device *adev)
  {
-@@ -78,6 +92,11 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
- 	crt->setkey = setkey;
- 	crt->encrypt = alg->encrypt;
- 	crt->decrypt = alg->decrypt;
-+	if (!alg->ivsize) {
-+		crt->givencrypt = skcipher_null_givencrypt;
-+		crt->givdecrypt = skcipher_null_givdecrypt;
-+	}
-+	crt->base = __crypto_ablkcipher_cast(tfm);
- 	crt->ivsize = alg->ivsize;
+ 	u8 active, recover, setup;
+ 	struct ata_timing t;
+@@ -549,7 +636,7 @@ static struct ata_port_operations opti82c611a_port_ops = {
+ 	.irq_clear	= ata_bmdma_irq_clear,
+ 	.irq_on		= ata_irq_on,
  
- 	return 0;
-@@ -90,10 +109,13 @@ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
- 	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
+-	.port_start	= ata_port_start,
++	.port_start	= ata_sff_port_start,
+ };
  
- 	seq_printf(m, "type         : ablkcipher\n");
-+	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-+					     "yes" : "no");
- 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
- 	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
- 	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
- 	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
-+	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
- }
+ /*
+@@ -681,77 +768,398 @@ static struct ata_port_operations opti82c46x_port_ops = {
+ 	.irq_clear	= ata_bmdma_irq_clear,
+ 	.irq_on		= ata_irq_on,
  
- const struct crypto_type crypto_ablkcipher_type = {
-@@ -105,5 +127,220 @@ const struct crypto_type crypto_ablkcipher_type = {
+-	.port_start	= ata_port_start,
++	.port_start	= ata_sff_port_start,
  };
- EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
  
-+static int no_givdecrypt(struct skcipher_givcrypt_request *req)
-+{
-+	return -ENOSYS;
-+}
-+
-+static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
-+				      u32 mask)
++static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
 +{
-+	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
-+	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
-+
-+	if (alg->ivsize > PAGE_SIZE / 8)
-+		return -EINVAL;
-+
-+	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
-+		      alg->setkey : setkey;
-+	crt->encrypt = alg->encrypt;
-+	crt->decrypt = alg->decrypt;
-+	crt->givencrypt = alg->givencrypt;
-+	crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
-+	crt->base = __crypto_ablkcipher_cast(tfm);
-+	crt->ivsize = alg->ivsize;
-+
-+	return 0;
-+}
++	struct ata_timing t;
++	struct legacy_data *qdi = ap->host->private_data;
++	int active, recovery;
++	u8 timing;
 +
-+static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
-+	__attribute__ ((unused));
-+static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
-+{
-+	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
++	/* Get the timing data in cycles */
++	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
 +
-+	seq_printf(m, "type         : givcipher\n");
-+	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-+					     "yes" : "no");
-+	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
-+	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
-+	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
-+	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
-+	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
-+}
++	if (qdi->fast) {
++		active = 8 - FIT(t.active, 1, 8);
++		recovery = 18 - FIT(t.recover, 3, 18);
++	} else {
++		active = 9 - FIT(t.active, 2, 9);
++		recovery = 15 - FIT(t.recover, 0, 15);
++	}
++	timing = (recovery << 4) | active | 0x08;
 +
-+const struct crypto_type crypto_givcipher_type = {
-+	.ctxsize = crypto_ablkcipher_ctxsize,
-+	.init = crypto_init_givcipher_ops,
-+#ifdef CONFIG_PROC_FS
-+	.show = crypto_givcipher_show,
-+#endif
-+};
-+EXPORT_SYMBOL_GPL(crypto_givcipher_type);
++	qdi->clock[adev->devno] = timing;
 +
-+const char *crypto_default_geniv(const struct crypto_alg *alg)
-+{
-+	return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv";
++	outb(timing, qdi->timing);
 +}
+ 
+ /**
+- *	legacy_init_one		-	attach a legacy interface
+- *	@port: port number
+- *	@io: I/O port start
+- *	@ctrl: control port
++ *	qdi6580dp_set_piomode		-	PIO setup for dual channel
++ *	@ap: Port
++ *	@adev: Device
+  *	@irq: interrupt line
+  *
+- *	Register an ISA bus IDE interface. Such interfaces are PIO and we
+- *	assume do not support IRQ sharing.
++ *	In dual channel mode the 6580 has one clock per channel and we have
++ *	to software clockswitch in qc_issue_prot.
+  */
+ 
+-static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl, int irq)
++static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ {
+-	struct legacy_data *ld = &legacy_data[nr_legacy_host];
+-	struct ata_host *host;
+-	struct ata_port *ap;
+-	struct platform_device *pdev;
+-	struct ata_port_operations *ops = &legacy_port_ops;
+-	void __iomem *io_addr, *ctrl_addr;
+-	int pio_modes = pio_mask;
+-	u32 mask = (1 << port);
+-	u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
+-	int ret;
++	struct ata_timing t;
++	struct legacy_data *qdi = ap->host->private_data;
++	int active, recovery;
++	u8 timing;
+ 
+-	pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0);
+-	if (IS_ERR(pdev))
+-		return PTR_ERR(pdev);
++	/* Get the timing data in cycles */
++	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
 +
-+static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
-+{
-+	struct rtattr *tb[3];
-+	struct {
-+		struct rtattr attr;
-+		struct crypto_attr_type data;
-+	} ptype;
-+	struct {
-+		struct rtattr attr;
-+		struct crypto_attr_alg data;
-+	} palg;
-+	struct crypto_template *tmpl;
-+	struct crypto_instance *inst;
-+	struct crypto_alg *larval;
-+	const char *geniv;
-+	int err;
-+
-+	larval = crypto_larval_lookup(alg->cra_driver_name,
-+				      CRYPTO_ALG_TYPE_GIVCIPHER,
-+				      CRYPTO_ALG_TYPE_MASK);
-+	err = PTR_ERR(larval);
-+	if (IS_ERR(larval))
-+		goto out;
-+
-+	err = -EAGAIN;
-+	if (!crypto_is_larval(larval))
-+		goto drop_larval;
-+
-+	ptype.attr.rta_len = sizeof(ptype);
-+	ptype.attr.rta_type = CRYPTOA_TYPE;
-+	ptype.data.type = type | CRYPTO_ALG_GENIV;
-+	/* GENIV tells the template that we're making a default geniv. */
-+	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
-+	tb[0] = &ptype.attr;
-+
-+	palg.attr.rta_len = sizeof(palg);
-+	palg.attr.rta_type = CRYPTOA_ALG;
-+	/* Must use the exact name to locate ourselves. */
-+	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
-+	tb[1] = &palg.attr;
-+
-+	tb[2] = NULL;
-+
-+	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-+	    CRYPTO_ALG_TYPE_BLKCIPHER)
-+		geniv = alg->cra_blkcipher.geniv;
-+	else
-+		geniv = alg->cra_ablkcipher.geniv;
-+
-+	if (!geniv)
-+		geniv = crypto_default_geniv(alg);
-+
-+	tmpl = crypto_lookup_template(geniv);
-+	err = -ENOENT;
-+	if (!tmpl)
-+		goto kill_larval;
-+
-+	inst = tmpl->alloc(tb);
-+	err = PTR_ERR(inst);
-+	if (IS_ERR(inst))
-+		goto put_tmpl;
-+
-+	if ((err = crypto_register_instance(tmpl, inst))) {
-+		tmpl->free(inst);
-+		goto put_tmpl;
++	if (qdi->fast) {
++		active = 8 - FIT(t.active, 1, 8);
++		recovery = 18 - FIT(t.recover, 3, 18);
++	} else {
++		active = 9 - FIT(t.active, 2, 9);
++		recovery = 15 - FIT(t.recover, 0, 15);
 +	}
-+
-+	/* Redo the lookup to use the instance we just registered. */
-+	err = -EAGAIN;
-+
-+put_tmpl:
-+	crypto_tmpl_put(tmpl);
-+kill_larval:
-+	crypto_larval_kill(larval);
-+drop_larval:
-+	crypto_mod_put(larval);
-+out:
-+	crypto_mod_put(alg);
-+	return err;
++	timing = (recovery << 4) | active | 0x08;
+ 
+-	ret = -EBUSY;
+-	if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
+-	    devm_request_region(&pdev->dev, ctrl, 1, "pata_legacy") == NULL)
+-		goto fail;
++	qdi->clock[adev->devno] = timing;
+ 
+-	ret = -ENOMEM;
+-	io_addr = devm_ioport_map(&pdev->dev, io, 8);
+-	ctrl_addr = devm_ioport_map(&pdev->dev, ctrl, 1);
+-	if (!io_addr || !ctrl_addr)
+-		goto fail;
++	outb(timing, qdi->timing + 2 * ap->port_no);
++	/* Clear the FIFO */
++	if (adev->class != ATA_DEV_ATA)
++		outb(0x5F, qdi->timing + 3);
 +}
+ 
+-	if (ht6560a & mask) {
+-		ops = &ht6560a_port_ops;
+-		pio_modes = 0x07;
+-		iordy = ATA_FLAG_NO_IORDY;
+-	}
+-	if (ht6560b & mask) {
+-		ops = &ht6560b_port_ops;
+-		pio_modes = 0x1F;
+-	}
+-	if (opti82c611a & mask) {
+-		ops = &opti82c611a_port_ops;
+-		pio_modes = 0x0F;
++/**
++ *	qdi6580_set_piomode		-	PIO setup for single channel
++ *	@ap: Port
++ *	@adev: Device
++ *
++ *	In single channel mode the 6580 has one clock per device and we can
++ *	avoid the requirement to clock switch. We also have to load the timing
++ *	into the right clock according to whether we are master or slave.
++ */
 +
-+static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
-+						 u32 mask)
++static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
 +{
-+	struct crypto_alg *alg;
-+
-+	alg = crypto_alg_mod_lookup(name, type, mask);
-+	if (IS_ERR(alg))
-+		return alg;
-+
-+	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-+	    CRYPTO_ALG_TYPE_GIVCIPHER)
-+		return alg;
++	struct ata_timing t;
++	struct legacy_data *qdi = ap->host->private_data;
++	int active, recovery;
++	u8 timing;
 +
-+	if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-+	      CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
-+					  alg->cra_ablkcipher.ivsize))
-+		return alg;
++	/* Get the timing data in cycles */
++	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
 +
-+	return ERR_PTR(crypto_givcipher_default(alg, type, mask));
++	if (qdi->fast) {
++		active = 8 - FIT(t.active, 1, 8);
++		recovery = 18 - FIT(t.recover, 3, 18);
++	} else {
++		active = 9 - FIT(t.active, 2, 9);
++		recovery = 15 - FIT(t.recover, 0, 15);
+ 	}
+-	if (opti82c46x & mask) {
+-		ops = &opti82c46x_port_ops;
+-		pio_modes = 0x0F;
++	timing = (recovery << 4) | active | 0x08;
++	qdi->clock[adev->devno] = timing;
++	outb(timing, qdi->timing + 2 * adev->devno);
++	/* Clear the FIFO */
++	if (adev->class != ATA_DEV_ATA)
++		outb(0x5F, qdi->timing + 3);
 +}
 +
-+int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
-+			 u32 type, u32 mask)
-+{
-+	struct crypto_alg *alg;
-+	int err;
-+
-+	type = crypto_skcipher_type(type);
-+	mask = crypto_skcipher_mask(mask);
-+
-+	alg = crypto_lookup_skcipher(name, type, mask);
-+	if (IS_ERR(alg))
-+		return PTR_ERR(alg);
-+
-+	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
-+	crypto_mod_put(alg);
-+	return err;
-+}
-+EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
++/**
++ *	qdi_qc_issue_prot	-	command issue
++ *	@qc: command pending
++ *
++ *	Called when the libata layer is about to issue a command. We wrap
++ *	this interface so that we can load the correct ATA timings.
++ */
 +
-+struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
-+						  u32 type, u32 mask)
++static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
 +{
-+	struct crypto_tfm *tfm;
-+	int err;
-+
-+	type = crypto_skcipher_type(type);
-+	mask = crypto_skcipher_mask(mask);
-+
-+	for (;;) {
-+		struct crypto_alg *alg;
-+
-+		alg = crypto_lookup_skcipher(alg_name, type, mask);
-+		if (IS_ERR(alg)) {
-+			err = PTR_ERR(alg);
-+			goto err;
-+		}
-+
-+		tfm = __crypto_alloc_tfm(alg, type, mask);
-+		if (!IS_ERR(tfm))
-+			return __crypto_ablkcipher_cast(tfm);
-+
-+		crypto_mod_put(alg);
-+		err = PTR_ERR(tfm);
++	struct ata_port *ap = qc->ap;
++	struct ata_device *adev = qc->dev;
++	struct legacy_data *qdi = ap->host->private_data;
 +
-+err:
-+		if (err != -EAGAIN)
-+			break;
-+		if (signal_pending(current)) {
-+			err = -EINTR;
-+			break;
++	if (qdi->clock[adev->devno] != qdi->last) {
++		if (adev->pio_mode) {
++			qdi->last = qdi->clock[adev->devno];
++			outb(qdi->clock[adev->devno], qdi->timing +
++							2 * ap->port_no);
 +		}
-+	}
-+
-+	return ERR_PTR(err);
-+}
-+EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
-+
- MODULE_LICENSE("GPL");
- MODULE_DESCRIPTION("Asynchronous block chaining cipher type");
-diff --git a/crypto/aead.c b/crypto/aead.c
-index 84a3501..3a6f3f5 100644
---- a/crypto/aead.c
-+++ b/crypto/aead.c
-@@ -12,14 +12,17 @@
-  *
-  */
- 
--#include <crypto/algapi.h>
--#include <linux/errno.h>
-+#include <crypto/internal/aead.h>
-+#include <linux/err.h>
- #include <linux/init.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
-+#include <linux/rtnetlink.h>
- #include <linux/slab.h>
- #include <linux/seq_file.h>
- 
-+#include "internal.h"
-+
- static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
- 			    unsigned int keylen)
- {
-@@ -53,25 +56,54 @@ static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
- 	return aead->setkey(tfm, key, keylen);
- }
- 
-+int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
-+{
-+	struct aead_tfm *crt = crypto_aead_crt(tfm);
-+	int err;
-+
-+	if (authsize > crypto_aead_alg(tfm)->maxauthsize)
-+		return -EINVAL;
-+
-+	if (crypto_aead_alg(tfm)->setauthsize) {
-+		err = crypto_aead_alg(tfm)->setauthsize(crt->base, authsize);
-+		if (err)
-+			return err;
-+	}
-+
-+	crypto_aead_crt(crt->base)->authsize = authsize;
-+	crt->authsize = authsize;
-+	return 0;
+ 	}
++	return ata_qc_issue_prot(qc);
 +}
-+EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
-+
- static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type,
- 					u32 mask)
- {
- 	return alg->cra_ctxsize;
- }
  
-+static int no_givcrypt(struct aead_givcrypt_request *req)
+-	/* Probe for automatically detectable controllers */
++static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
++					unsigned int buflen, int rw)
 +{
-+	return -ENOSYS;
-+}
-+
- static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
- {
- 	struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
- 	struct aead_tfm *crt = &tfm->crt_aead;
- 
--	if (max(alg->authsize, alg->ivsize) > PAGE_SIZE / 8)
-+	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
- 		return -EINVAL;
- 
--	crt->setkey = setkey;
-+	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
-+		      alg->setkey : setkey;
- 	crt->encrypt = alg->encrypt;
- 	crt->decrypt = alg->decrypt;
-+	crt->givencrypt = alg->givencrypt ?: no_givcrypt;
-+	crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
-+	crt->base = __crypto_aead_cast(tfm);
- 	crt->ivsize = alg->ivsize;
--	crt->authsize = alg->authsize;
-+	crt->authsize = alg->maxauthsize;
- 
- 	return 0;
- }
-@@ -83,9 +115,12 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
- 	struct aead_alg *aead = &alg->cra_aead;
- 
- 	seq_printf(m, "type         : aead\n");
-+	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-+					     "yes" : "no");
- 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
- 	seq_printf(m, "ivsize       : %u\n", aead->ivsize);
--	seq_printf(m, "authsize     : %u\n", aead->authsize);
-+	seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
-+	seq_printf(m, "geniv        : %s\n", aead->geniv ?: "<built-in>");
- }
++	struct ata_port *ap = adev->link->ap;
++	int slop = buflen & 3;
  
- const struct crypto_type crypto_aead_type = {
-@@ -97,5 +132,358 @@ const struct crypto_type crypto_aead_type = {
- };
- EXPORT_SYMBOL_GPL(crypto_aead_type);
+-	if (io == 0x1F0 && ops == &legacy_port_ops) {
+-		unsigned long flags;
++	if (ata_id_has_dword_io(adev->id)) {
++		if (rw == WRITE)
++			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
++		else
++			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
  
-+static int aead_null_givencrypt(struct aead_givcrypt_request *req)
-+{
-+	return crypto_aead_encrypt(&req->areq);
-+}
-+
-+static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
-+{
-+	return crypto_aead_decrypt(&req->areq);
+-		local_irq_save(flags);
++		if (unlikely(slop)) {
++			u32 pad;
++			if (rw == WRITE) {
++				memcpy(&pad, buf + buflen - slop, slop);
++				pad = le32_to_cpu(pad);
++				iowrite32(pad, ap->ioaddr.data_addr);
++			} else {
++				pad = ioread32(ap->ioaddr.data_addr);
++				pad = cpu_to_le32(pad);
++				memcpy(buf + buflen - slop, &pad, slop);
++			}
++		}
++		return (buflen + 3) & ~3;
++	} else
++		return ata_data_xfer(adev, buf, buflen, rw);
 +}
 +
-+static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
++static int qdi_port(struct platform_device *dev,
++			struct legacy_probe *lp, struct legacy_data *ld)
 +{
-+	struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
-+	struct aead_tfm *crt = &tfm->crt_aead;
-+
-+	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
-+		return -EINVAL;
-+
-+	crt->setkey = setkey;
-+	crt->encrypt = alg->encrypt;
-+	crt->decrypt = alg->decrypt;
-+	if (!alg->ivsize) {
-+		crt->givencrypt = aead_null_givencrypt;
-+		crt->givdecrypt = aead_null_givdecrypt;
-+	}
-+	crt->base = __crypto_aead_cast(tfm);
-+	crt->ivsize = alg->ivsize;
-+	crt->authsize = alg->maxauthsize;
-+
++	if (devm_request_region(&dev->dev, lp->private, 4, "qdi") == NULL)
++		return -EBUSY;
++	ld->timing = lp->private;
 +	return 0;
 +}
 +
-+static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
-+	__attribute__ ((unused));
-+static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
-+{
-+	struct aead_alg *aead = &alg->cra_aead;
-+
-+	seq_printf(m, "type         : nivaead\n");
-+	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-+					     "yes" : "no");
-+	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
-+	seq_printf(m, "ivsize       : %u\n", aead->ivsize);
-+	seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
-+	seq_printf(m, "geniv        : %s\n", aead->geniv);
-+}
-+
-+const struct crypto_type crypto_nivaead_type = {
-+	.ctxsize = crypto_aead_ctxsize,
-+	.init = crypto_init_nivaead_ops,
-+#ifdef CONFIG_PROC_FS
-+	.show = crypto_nivaead_show,
-+#endif
-+};
-+EXPORT_SYMBOL_GPL(crypto_nivaead_type);
++static struct ata_port_operations qdi6500_port_ops = {
++	.set_piomode	= qdi6500_set_piomode,
 +
-+static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
-+			       const char *name, u32 type, u32 mask)
-+{
-+	struct crypto_alg *alg;
-+	int err;
++	.tf_load	= ata_tf_load,
++	.tf_read	= ata_tf_read,
++	.check_status 	= ata_check_status,
++	.exec_command	= ata_exec_command,
++	.dev_select 	= ata_std_dev_select,
 +
-+	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-+	type |= CRYPTO_ALG_TYPE_AEAD;
-+	mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV;
++	.freeze		= ata_bmdma_freeze,
++	.thaw		= ata_bmdma_thaw,
++	.error_handler	= ata_bmdma_error_handler,
++	.post_internal_cmd = ata_bmdma_post_internal_cmd,
++	.cable_detect	= ata_cable_40wire,
 +
-+	alg = crypto_alg_mod_lookup(name, type, mask);
-+	if (IS_ERR(alg))
-+		return PTR_ERR(alg);
++	.qc_prep 	= ata_qc_prep,
++	.qc_issue	= qdi_qc_issue_prot,
 +
-+	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
-+	crypto_mod_put(alg);
-+	return err;
-+}
++	.data_xfer	= vlb32_data_xfer,
 +
-+struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
-+					 struct rtattr **tb, u32 type,
-+					 u32 mask)
-+{
-+	const char *name;
-+	struct crypto_aead_spawn *spawn;
-+	struct crypto_attr_type *algt;
-+	struct crypto_instance *inst;
-+	struct crypto_alg *alg;
-+	int err;
++	.irq_handler	= ata_interrupt,
++	.irq_clear	= ata_bmdma_irq_clear,
++	.irq_on		= ata_irq_on,
 +
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
-+		return ERR_PTR(err);
++	.port_start	= ata_sff_port_start,
++};
 +
-+	if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
-+	    algt->mask)
-+		return ERR_PTR(-EINVAL);
++static struct ata_port_operations qdi6580_port_ops = {
++	.set_piomode	= qdi6580_set_piomode,
 +
-+	name = crypto_attr_alg_name(tb[1]);
-+	err = PTR_ERR(name);
-+	if (IS_ERR(name))
-+		return ERR_PTR(err);
++	.tf_load	= ata_tf_load,
++	.tf_read	= ata_tf_read,
++	.check_status 	= ata_check_status,
++	.exec_command	= ata_exec_command,
++	.dev_select 	= ata_std_dev_select,
 +
-+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
-+	if (!inst)
-+		return ERR_PTR(-ENOMEM);
++	.freeze		= ata_bmdma_freeze,
++	.thaw		= ata_bmdma_thaw,
++	.error_handler	= ata_bmdma_error_handler,
++	.post_internal_cmd = ata_bmdma_post_internal_cmd,
++	.cable_detect	= ata_cable_40wire,
 +
-+	spawn = crypto_instance_ctx(inst);
++	.qc_prep 	= ata_qc_prep,
++	.qc_issue	= ata_qc_issue_prot,
 +
-+	/* Ignore async algorithms if necessary. */
-+	mask |= crypto_requires_sync(algt->type, algt->mask);
++	.data_xfer	= vlb32_data_xfer,
 +
-+	crypto_set_aead_spawn(spawn, inst);
-+	err = crypto_grab_nivaead(spawn, name, type, mask);
-+	if (err)
-+		goto err_free_inst;
++	.irq_handler	= ata_interrupt,
++	.irq_clear	= ata_bmdma_irq_clear,
++	.irq_on		= ata_irq_on,
 +
-+	alg = crypto_aead_spawn_alg(spawn);
++	.port_start	= ata_sff_port_start,
++};
 +
-+	err = -EINVAL;
-+	if (!alg->cra_aead.ivsize)
-+		goto err_drop_alg;
++static struct ata_port_operations qdi6580dp_port_ops = {
++	.set_piomode	= qdi6580dp_set_piomode,
 +
-+	/*
-+	 * This is only true if we're constructing an algorithm with its
-+	 * default IV generator.  For the default generator we elide the
-+	 * template name and double-check the IV generator.
-+	 */
-+	if (algt->mask & CRYPTO_ALG_GENIV) {
-+		if (strcmp(tmpl->name, alg->cra_aead.geniv))
-+			goto err_drop_alg;
++	.tf_load	= ata_tf_load,
++	.tf_read	= ata_tf_read,
++	.check_status 	= ata_check_status,
++	.exec_command	= ata_exec_command,
++	.dev_select 	= ata_std_dev_select,
 +
-+		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
-+		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
-+		       CRYPTO_MAX_ALG_NAME);
-+	} else {
-+		err = -ENAMETOOLONG;
-+		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-+			     "%s(%s)", tmpl->name, alg->cra_name) >=
-+		    CRYPTO_MAX_ALG_NAME)
-+			goto err_drop_alg;
-+		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-+			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
-+		    CRYPTO_MAX_ALG_NAME)
-+			goto err_drop_alg;
-+	}
++	.freeze		= ata_bmdma_freeze,
++	.thaw		= ata_bmdma_thaw,
++	.error_handler	= ata_bmdma_error_handler,
++	.post_internal_cmd = ata_bmdma_post_internal_cmd,
++	.cable_detect	= ata_cable_40wire,
 +
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV;
-+	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
-+	inst->alg.cra_priority = alg->cra_priority;
-+	inst->alg.cra_blocksize = alg->cra_blocksize;
-+	inst->alg.cra_alignmask = alg->cra_alignmask;
-+	inst->alg.cra_type = &crypto_aead_type;
++	.qc_prep 	= ata_qc_prep,
++	.qc_issue	= qdi_qc_issue_prot,
 +
-+	inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
-+	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
-+	inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
++	.data_xfer	= vlb32_data_xfer,
 +
-+	inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
-+	inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
-+	inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt;
-+	inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt;
++	.irq_handler	= ata_interrupt,
++	.irq_clear	= ata_bmdma_irq_clear,
++	.irq_on		= ata_irq_on,
 +
-+out:
-+	return inst;
++	.port_start	= ata_sff_port_start,
++};
 +
-+err_drop_alg:
-+	crypto_drop_aead(spawn);
-+err_free_inst:
-+	kfree(inst);
-+	inst = ERR_PTR(err);
-+	goto out;
-+}
-+EXPORT_SYMBOL_GPL(aead_geniv_alloc);
++static DEFINE_SPINLOCK(winbond_lock);
 +
-+void aead_geniv_free(struct crypto_instance *inst)
++static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
 +{
-+	crypto_drop_aead(crypto_instance_ctx(inst));
-+	kfree(inst);
++	unsigned long flags;
++	spin_lock_irqsave(&winbond_lock, flags);
++	outb(reg, port + 0x01);
++	outb(val, port + 0x02);
++	spin_unlock_irqrestore(&winbond_lock, flags);
 +}
-+EXPORT_SYMBOL_GPL(aead_geniv_free);
 +
-+int aead_geniv_init(struct crypto_tfm *tfm)
++static u8 winbond_readcfg(unsigned long port, u8 reg)
 +{
-+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-+	struct crypto_aead *aead;
-+
-+	aead = crypto_spawn_aead(crypto_instance_ctx(inst));
-+	if (IS_ERR(aead))
-+		return PTR_ERR(aead);
-+
-+	tfm->crt_aead.base = aead;
-+	tfm->crt_aead.reqsize += crypto_aead_reqsize(aead);
++	u8 val;
 +
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(aead_geniv_init);
++	unsigned long flags;
++	spin_lock_irqsave(&winbond_lock, flags);
++	outb(reg, port + 0x01);
++	val = inb(port + 0x02);
++	spin_unlock_irqrestore(&winbond_lock, flags);
 +
-+void aead_geniv_exit(struct crypto_tfm *tfm)
-+{
-+	crypto_free_aead(tfm->crt_aead.base);
++	return val;
 +}
-+EXPORT_SYMBOL_GPL(aead_geniv_exit);
 +
-+static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
++static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
 +{
-+	struct rtattr *tb[3];
-+	struct {
-+		struct rtattr attr;
-+		struct crypto_attr_type data;
-+	} ptype;
-+	struct {
-+		struct rtattr attr;
-+		struct crypto_attr_alg data;
-+	} palg;
-+	struct crypto_template *tmpl;
-+	struct crypto_instance *inst;
-+	struct crypto_alg *larval;
-+	const char *geniv;
-+	int err;
-+
-+	larval = crypto_larval_lookup(alg->cra_driver_name,
-+				      CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
-+				      CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-+	err = PTR_ERR(larval);
-+	if (IS_ERR(larval))
-+		goto out;
-+
-+	err = -EAGAIN;
-+	if (!crypto_is_larval(larval))
-+		goto drop_larval;
-+
-+	ptype.attr.rta_len = sizeof(ptype);
-+	ptype.attr.rta_type = CRYPTOA_TYPE;
-+	ptype.data.type = type | CRYPTO_ALG_GENIV;
-+	/* GENIV tells the template that we're making a default geniv. */
-+	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
-+	tb[0] = &ptype.attr;
-+
-+	palg.attr.rta_len = sizeof(palg);
-+	palg.attr.rta_type = CRYPTOA_ALG;
-+	/* Must use the exact name to locate ourselves. */
-+	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
-+	tb[1] = &palg.attr;
-+
-+	tb[2] = NULL;
-+
-+	geniv = alg->cra_aead.geniv;
++	struct ata_timing t;
++	struct legacy_data *winbond = ap->host->private_data;
++	int active, recovery;
++	u8 reg;
++	int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
 +
-+	tmpl = crypto_lookup_template(geniv);
-+	err = -ENOENT;
-+	if (!tmpl)
-+		goto kill_larval;
++	reg = winbond_readcfg(winbond->timing, 0x81);
 +
-+	inst = tmpl->alloc(tb);
-+	err = PTR_ERR(inst);
-+	if (IS_ERR(inst))
-+		goto put_tmpl;
++	/* Get the timing data in cycles */
++	if (reg & 0x40)		/* Fast VLB bus, assume 50MHz */
++		ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
++	else
++		ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
 +
-+	if ((err = crypto_register_instance(tmpl, inst))) {
-+		tmpl->free(inst);
-+		goto put_tmpl;
-+	}
++	active = (FIT(t.active, 3, 17) - 1) & 0x0F;
++	recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
++	timing = (active << 4) | recovery;
++	winbond_writecfg(winbond->timing, timing, reg);
 +
-+	/* Redo the lookup to use the instance we just registered. */
-+	err = -EAGAIN;
++	/* Load the setup timing */
 +
-+put_tmpl:
-+	crypto_tmpl_put(tmpl);
-+kill_larval:
-+	crypto_larval_kill(larval);
-+drop_larval:
-+	crypto_mod_put(larval);
-+out:
-+	crypto_mod_put(alg);
-+	return err;
++	reg = 0x35;
++	if (adev->class != ATA_DEV_ATA)
++		reg |= 0x08;	/* FIFO off */
++	if (!ata_pio_need_iordy(adev))
++		reg |= 0x02;	/* IORDY off */
++	reg |= (FIT(t.setup, 0, 3) << 6);
++	winbond_writecfg(winbond->timing, timing + 1, reg);
 +}
 +
-+static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
-+					     u32 mask)
++static int winbond_port(struct platform_device *dev,
++			struct legacy_probe *lp, struct legacy_data *ld)
 +{
-+	struct crypto_alg *alg;
++	if (devm_request_region(&dev->dev, lp->private, 4, "winbond") == NULL)
++		return -EBUSY;
++	ld->timing = lp->private;
++	return 0;
++}
 +
-+	alg = crypto_alg_mod_lookup(name, type, mask);
-+	if (IS_ERR(alg))
-+		return alg;
++static struct ata_port_operations winbond_port_ops = {
++	.set_piomode	= winbond_set_piomode,
 +
-+	if (alg->cra_type == &crypto_aead_type)
-+		return alg;
++	.tf_load	= ata_tf_load,
++	.tf_read	= ata_tf_read,
++	.check_status 	= ata_check_status,
++	.exec_command	= ata_exec_command,
++	.dev_select 	= ata_std_dev_select,
 +
-+	if (!alg->cra_aead.ivsize)
-+		return alg;
++	.freeze		= ata_bmdma_freeze,
++	.thaw		= ata_bmdma_thaw,
++	.error_handler	= ata_bmdma_error_handler,
++	.post_internal_cmd = ata_bmdma_post_internal_cmd,
++	.cable_detect	= ata_cable_40wire,
 +
-+	return ERR_PTR(crypto_nivaead_default(alg, type, mask));
-+}
++	.qc_prep 	= ata_qc_prep,
++	.qc_issue	= ata_qc_issue_prot,
 +
-+int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
-+		     u32 type, u32 mask)
-+{
-+	struct crypto_alg *alg;
-+	int err;
++	.data_xfer	= vlb32_data_xfer,
 +
-+	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-+	type |= CRYPTO_ALG_TYPE_AEAD;
-+	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-+	mask |= CRYPTO_ALG_TYPE_MASK;
++	.irq_clear	= ata_bmdma_irq_clear,
++	.irq_on		= ata_irq_on,
+ 
++	.port_start	= ata_sff_port_start,
++};
 +
-+	alg = crypto_lookup_aead(name, type, mask);
-+	if (IS_ERR(alg))
-+		return PTR_ERR(alg);
++static struct legacy_controller controllers[] = {
++	{"BIOS",	&legacy_port_ops, 	0x1F,
++						ATA_FLAG_NO_IORDY,	NULL },
++	{"Snooping", 	&simple_port_ops, 	0x1F,
++						0	       ,	NULL },
++	{"PDC20230",	&pdc20230_port_ops,	0x7,
++						ATA_FLAG_NO_IORDY,	NULL },
++	{"HT6560A",	&ht6560a_port_ops,	0x07,
++						ATA_FLAG_NO_IORDY,	NULL },
++	{"HT6560B",	&ht6560b_port_ops,	0x1F,
++						ATA_FLAG_NO_IORDY,	NULL },
++	{"OPTI82C611A",	&opti82c611a_port_ops,	0x0F,
++						0	       ,	NULL },
++	{"OPTI82C46X",	&opti82c46x_port_ops,	0x0F,
++						0	       ,	NULL },
++	{"QDI6500",	&qdi6500_port_ops,	0x07,
++					ATA_FLAG_NO_IORDY,	qdi_port },
++	{"QDI6580",	&qdi6580_port_ops,	0x1F,
++					0	       ,	qdi_port },
++	{"QDI6580DP",	&qdi6580dp_port_ops,	0x1F,
++					0	       ,	qdi_port },
++	{"W83759A",	&winbond_port_ops,	0x1F,
++					0	       ,	winbond_port }
++};
 +
-+	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
-+	crypto_mod_put(alg);
-+	return err;
-+}
-+EXPORT_SYMBOL_GPL(crypto_grab_aead);
++/**
++ *	probe_chip_type		-	Discover controller
++ *	@probe: Probe entry to check
++ *
++ *	Probe an ATA port and identify the type of controller. We don't
++ *	check if the controller appears to be driveless at this point.
++ */
 +
-+struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
++static __init int probe_chip_type(struct legacy_probe *probe)
 +{
-+	struct crypto_tfm *tfm;
-+	int err;
++	int mask = 1 << probe->slot;
 +
-+	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-+	type |= CRYPTO_ALG_TYPE_AEAD;
-+	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-+	mask |= CRYPTO_ALG_TYPE_MASK;
++	if (winbond && (probe->port == 0x1F0 || probe->port == 0x170)) {
++		u8 reg = winbond_readcfg(winbond, 0x81);
++		reg |= 0x80;	/* jumpered mode off */
++		winbond_writecfg(winbond, 0x81, reg);
++		reg = winbond_readcfg(winbond, 0x83);
++		reg |= 0xF0;	/* local control */
++		winbond_writecfg(winbond, 0x83, reg);
++		reg = winbond_readcfg(winbond, 0x85);
++		reg |= 0xF0;	/* programmable timing */
++		winbond_writecfg(winbond, 0x85, reg);
 +
-+	for (;;) {
-+		struct crypto_alg *alg;
++		reg = winbond_readcfg(winbond, 0x81);
 +
-+		alg = crypto_lookup_aead(alg_name, type, mask);
-+		if (IS_ERR(alg)) {
-+			err = PTR_ERR(alg);
-+			goto err;
-+		}
++		if (reg & mask)
++			return W83759A;
++	}
++	if (probe->port == 0x1F0) {
++		unsigned long flags;
++		local_irq_save(flags);
+ 		/* Probes */
+-		inb(0x1F5);
+ 		outb(inb(0x1F2) | 0x80, 0x1F2);
++		inb(0x1F5);
+ 		inb(0x1F2);
+ 		inb(0x3F6);
+ 		inb(0x3F6);
+@@ -760,29 +1168,83 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
+ 
+ 		if ((inb(0x1F2) & 0x80) == 0) {
+ 			/* PDC20230c or 20630 ? */
+-			printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n");
+-				pio_modes = 0x07;
+-			ops = &pdc20230_port_ops;
+-			iordy = ATA_FLAG_NO_IORDY;
++			printk(KERN_INFO  "PDC20230-C/20630 VLB ATA controller"
++							" detected.\n");
+ 			udelay(100);
+ 			inb(0x1F5);
++			local_irq_restore(flags);
++			return PDC20230;
+ 		} else {
+ 			outb(0x55, 0x1F2);
+ 			inb(0x1F2);
+ 			inb(0x1F2);
+-			if (inb(0x1F2) == 0x00) {
+-				printk(KERN_INFO "PDC20230-B VLB ATA controller detected.\n");
+-			}
++			if (inb(0x1F2) == 0x00)
++				printk(KERN_INFO "PDC20230-B VLB ATA "
++						     "controller detected.\n");
++			local_irq_restore(flags);
++			return BIOS;
+ 		}
+ 		local_irq_restore(flags);
+ 	}
+ 
++	if (ht6560a & mask)
++		return HT6560A;
++	if (ht6560b & mask)
++		return HT6560B;
++	if (opti82c611a & mask)
++		return OPTI611A;
++	if (opti82c46x & mask)
++		return OPTI46X;
++	if (autospeed & mask)
++		return SNOOP;
++	return BIOS;
++}
 +
-+		tfm = __crypto_alloc_tfm(alg, type, mask);
-+		if (!IS_ERR(tfm))
-+			return __crypto_aead_cast(tfm);
 +
-+		crypto_mod_put(alg);
-+		err = PTR_ERR(tfm);
++/**
++ *	legacy_init_one		-	attach a legacy interface
++ *	@pl: probe record
++ *
++ *	Register an ISA bus IDE interface. Such interfaces are PIO and we
++ *	assume do not support IRQ sharing.
++ */
 +
-+err:
-+		if (err != -EAGAIN)
-+			break;
-+		if (signal_pending(current)) {
-+			err = -EINTR;
-+			break;
-+		}
-+	}
++static __init int legacy_init_one(struct legacy_probe *probe)
++{
++	struct legacy_controller *controller = &controllers[probe->type];
++	int pio_modes = controller->pio_mask;
++	unsigned long io = probe->port;
++	u32 mask = (1 << probe->slot);
++	struct ata_port_operations *ops = controller->ops;
++	struct legacy_data *ld = &legacy_data[probe->slot];
++	struct ata_host *host = NULL;
++	struct ata_port *ap;
++	struct platform_device *pdev;
++	struct ata_device *dev;
++	void __iomem *io_addr, *ctrl_addr;
++	u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
++	int ret;
+ 
+-	/* Chip does mode setting by command snooping */
+-	if (ops == &legacy_port_ops && (autospeed & mask))
+-		ops = &simple_port_ops;
++	iordy |= controller->flags;
 +
-+	return ERR_PTR(err);
-+}
-+EXPORT_SYMBOL_GPL(crypto_alloc_aead);
++	pdev = platform_device_register_simple(DRV_NAME, probe->slot, NULL, 0);
++	if (IS_ERR(pdev))
++		return PTR_ERR(pdev);
 +
- MODULE_LICENSE("GPL");
- MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)");
-diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
-index 9401dca..cf30af7 100644
---- a/crypto/aes_generic.c
-+++ b/crypto/aes_generic.c
-@@ -47,11 +47,7 @@
-  * ---------------------------------------------------------------------------
-  */
++	ret = -EBUSY;
++	if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
++	    devm_request_region(&pdev->dev, io + 0x0206, 1,
++							"pata_legacy") == NULL)
++		goto fail;
  
--/* Some changes from the Gladman version:
--    s/RIJNDAEL(e_key)/E_KEY/g
--    s/RIJNDAEL(d_key)/D_KEY/g
--*/
--
-+#include <crypto/aes.h>
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/types.h>
-@@ -59,88 +55,46 @@
- #include <linux/crypto.h>
- #include <asm/byteorder.h>
+ 	ret = -ENOMEM;
++	io_addr = devm_ioport_map(&pdev->dev, io, 8);
++	ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
++	if (!io_addr || !ctrl_addr)
++		goto fail;
++	if (controller->setup)
++		if (controller->setup(pdev, probe, ld) < 0)
++			goto fail;
+ 	host = ata_host_alloc(&pdev->dev, 1);
+ 	if (!host)
+ 		goto fail;
+@@ -795,19 +1257,29 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
+ 	ap->ioaddr.altstatus_addr = ctrl_addr;
+ 	ap->ioaddr.ctl_addr = ctrl_addr;
+ 	ata_std_ports(&ap->ioaddr);
+-	ap->private_data = ld;
++	ap->host->private_data = ld;
  
--#define AES_MIN_KEY_SIZE	16
--#define AES_MAX_KEY_SIZE	32
--
--#define AES_BLOCK_SIZE		16
--
--/*
-- * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) 
-- */
--static inline u8
--byte(const u32 x, const unsigned n)
-+static inline u8 byte(const u32 x, const unsigned n)
- {
- 	return x >> (n << 3);
- }
+-	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, ctrl);
++	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206);
  
--struct aes_ctx {
--	int key_length;
--	u32 buf[120];
--};
--
--#define E_KEY (&ctx->buf[0])
--#define D_KEY (&ctx->buf[60])
+-	ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht);
++	ret = ata_host_activate(host, probe->irq, ata_interrupt, 0,
++								&legacy_sht);
+ 	if (ret)
+ 		goto fail;
 -
- static u8 pow_tab[256] __initdata;
- static u8 log_tab[256] __initdata;
- static u8 sbx_tab[256] __initdata;
- static u8 isb_tab[256] __initdata;
- static u32 rco_tab[10];
--static u32 ft_tab[4][256];
--static u32 it_tab[4][256];
- 
--static u32 fl_tab[4][256];
--static u32 il_tab[4][256];
-+u32 crypto_ft_tab[4][256];
-+u32 crypto_fl_tab[4][256];
-+u32 crypto_it_tab[4][256];
-+u32 crypto_il_tab[4][256];
+-	legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev);
+ 	ld->platform_dev = pdev;
+-	return 0;
  
--static inline u8 __init
--f_mult (u8 a, u8 b)
-+EXPORT_SYMBOL_GPL(crypto_ft_tab);
-+EXPORT_SYMBOL_GPL(crypto_fl_tab);
-+EXPORT_SYMBOL_GPL(crypto_it_tab);
-+EXPORT_SYMBOL_GPL(crypto_il_tab);
++	/* Nothing found means we drop the port as its probably not there */
 +
-+static inline u8 __init f_mult(u8 a, u8 b)
- {
- 	u8 aa = log_tab[a], cc = aa + log_tab[b];
- 
- 	return pow_tab[cc + (cc < aa ? 1 : 0)];
++	ret = -ENODEV;
++	ata_link_for_each_dev(dev, &ap->link) {
++		if (!ata_dev_absent(dev)) {
++			legacy_host[probe->slot] = host;
++			ld->platform_dev = pdev;
++			return 0;
++		}
++	}
+ fail:
++	if (host)
++		ata_host_detach(host);
+ 	platform_device_unregister(pdev);
+ 	return ret;
  }
+@@ -818,13 +1290,15 @@ fail:
+  *	@master: set this if we find an ATA master
+  *	@master: set this if we find an ATA secondary
+  *
+- *	A small number of vendors implemented early PCI ATA interfaces on bridge logic
+- *	without the ATA interface being PCI visible. Where we have a matching PCI driver
+- *	we must skip the relevant device here. If we don't know about it then the legacy
+- *	driver is the right driver anyway.
++ *	A small number of vendors implemented early PCI ATA interfaces
++ *	on bridge logic without the ATA interface being PCI visible.
++ *	Where we have a matching PCI driver we must skip the relevant
++ *	device here. If we don't know about it then the legacy driver
++ *	is the right driver anyway.
+  */
  
--#define ff_mult(a,b)    (a && b ? f_mult(a, b) : 0)
--
--#define f_rn(bo, bi, n, k)					\
--    bo[n] =  ft_tab[0][byte(bi[n],0)] ^				\
--             ft_tab[1][byte(bi[(n + 1) & 3],1)] ^		\
--             ft_tab[2][byte(bi[(n + 2) & 3],2)] ^		\
--             ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
--
--#define i_rn(bo, bi, n, k)					\
--    bo[n] =  it_tab[0][byte(bi[n],0)] ^				\
--             it_tab[1][byte(bi[(n + 3) & 3],1)] ^		\
--             it_tab[2][byte(bi[(n + 2) & 3],2)] ^		\
--             it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
--
--#define ls_box(x)				\
--    ( fl_tab[0][byte(x, 0)] ^			\
--      fl_tab[1][byte(x, 1)] ^			\
--      fl_tab[2][byte(x, 2)] ^			\
--      fl_tab[3][byte(x, 3)] )
--
--#define f_rl(bo, bi, n, k)					\
--    bo[n] =  fl_tab[0][byte(bi[n],0)] ^				\
--             fl_tab[1][byte(bi[(n + 1) & 3],1)] ^		\
--             fl_tab[2][byte(bi[(n + 2) & 3],2)] ^		\
--             fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
--
--#define i_rl(bo, bi, n, k)					\
--    bo[n] =  il_tab[0][byte(bi[n],0)] ^				\
--             il_tab[1][byte(bi[(n + 3) & 3],1)] ^		\
--             il_tab[2][byte(bi[(n + 2) & 3],2)] ^		\
--             il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
--
--static void __init
--gen_tabs (void)
-+#define ff_mult(a, b)	(a && b ? f_mult(a, b) : 0)
-+
-+static void __init gen_tabs(void)
+-static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *secondary)
++static void __init legacy_check_special_cases(struct pci_dev *p, int *primary,
++								int *secondary)
  {
- 	u32 i, t;
- 	u8 p, q;
- 
--	/* log and power tables for GF(2**8) finite field with
--	   0x011b as modular polynomial - the simplest primitive
--	   root is 0x03, used here to generate the tables */
-+	/*
-+	 * log and power tables for GF(2**8) finite field with
-+	 * 0x011b as modular polynomial - the simplest primitive
-+	 * root is 0x03, used here to generate the tables
-+	 */
- 
- 	for (i = 0, p = 1; i < 256; ++i) {
- 		pow_tab[i] = (u8) p;
-@@ -169,92 +123,119 @@ gen_tabs (void)
- 		p = sbx_tab[i];
- 
- 		t = p;
--		fl_tab[0][i] = t;
--		fl_tab[1][i] = rol32(t, 8);
--		fl_tab[2][i] = rol32(t, 16);
--		fl_tab[3][i] = rol32(t, 24);
-+		crypto_fl_tab[0][i] = t;
-+		crypto_fl_tab[1][i] = rol32(t, 8);
-+		crypto_fl_tab[2][i] = rol32(t, 16);
-+		crypto_fl_tab[3][i] = rol32(t, 24);
- 
--		t = ((u32) ff_mult (2, p)) |
-+		t = ((u32) ff_mult(2, p)) |
- 		    ((u32) p << 8) |
--		    ((u32) p << 16) | ((u32) ff_mult (3, p) << 24);
-+		    ((u32) p << 16) | ((u32) ff_mult(3, p) << 24);
- 
--		ft_tab[0][i] = t;
--		ft_tab[1][i] = rol32(t, 8);
--		ft_tab[2][i] = rol32(t, 16);
--		ft_tab[3][i] = rol32(t, 24);
-+		crypto_ft_tab[0][i] = t;
-+		crypto_ft_tab[1][i] = rol32(t, 8);
-+		crypto_ft_tab[2][i] = rol32(t, 16);
-+		crypto_ft_tab[3][i] = rol32(t, 24);
- 
- 		p = isb_tab[i];
- 
- 		t = p;
--		il_tab[0][i] = t;
--		il_tab[1][i] = rol32(t, 8);
--		il_tab[2][i] = rol32(t, 16);
--		il_tab[3][i] = rol32(t, 24);
--
--		t = ((u32) ff_mult (14, p)) |
--		    ((u32) ff_mult (9, p) << 8) |
--		    ((u32) ff_mult (13, p) << 16) |
--		    ((u32) ff_mult (11, p) << 24);
--
--		it_tab[0][i] = t;
--		it_tab[1][i] = rol32(t, 8);
--		it_tab[2][i] = rol32(t, 16);
--		it_tab[3][i] = rol32(t, 24);
-+		crypto_il_tab[0][i] = t;
-+		crypto_il_tab[1][i] = rol32(t, 8);
-+		crypto_il_tab[2][i] = rol32(t, 16);
-+		crypto_il_tab[3][i] = rol32(t, 24);
-+
-+		t = ((u32) ff_mult(14, p)) |
-+		    ((u32) ff_mult(9, p) << 8) |
-+		    ((u32) ff_mult(13, p) << 16) |
-+		    ((u32) ff_mult(11, p) << 24);
-+
-+		crypto_it_tab[0][i] = t;
-+		crypto_it_tab[1][i] = rol32(t, 8);
-+		crypto_it_tab[2][i] = rol32(t, 16);
-+		crypto_it_tab[3][i] = rol32(t, 24);
+ 	/* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
+ 	if (p->vendor == 0x1078 && p->device == 0x0000) {
+@@ -840,7 +1314,8 @@ static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *sec
+ 	if (p->vendor == 0x8086 && p->device == 0x1234) {
+ 		u16 r;
+ 		pci_read_config_word(p, 0x6C, &r);
+-		if (r & 0x8000) {	/* ATA port enabled */
++		if (r & 0x8000) {
++			/* ATA port enabled */
+ 			if (r & 0x4000)
+ 				*secondary = 1;
+ 			else
+@@ -850,6 +1325,114 @@ static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *sec
  	}
  }
  
--#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
--
--#define imix_col(y,x)       \
--    u   = star_x(x);        \
--    v   = star_x(u);        \
--    w   = star_x(v);        \
--    t   = w ^ (x);          \
--   (y)  = u ^ v ^ w;        \
--   (y) ^= ror32(u ^ t,  8) ^ \
--          ror32(v ^ t, 16) ^ \
--          ror32(t,24)
--
- /* initialise the key schedule from the user supplied key */
- 
--#define loop4(i)                                    \
--{   t = ror32(t,  8); t = ls_box(t) ^ rco_tab[i];    \
--    t ^= E_KEY[4 * i];     E_KEY[4 * i + 4] = t;    \
--    t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t;    \
--    t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t;    \
--    t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t;    \
--}
--
--#define loop6(i)                                    \
--{   t = ror32(t,  8); t = ls_box(t) ^ rco_tab[i];    \
--    t ^= E_KEY[6 * i];     E_KEY[6 * i + 6] = t;    \
--    t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t;    \
--    t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t;    \
--    t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t;    \
--    t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t;   \
--    t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t;   \
--}
--
--#define loop8(i)                                    \
--{   t = ror32(t,  8); ; t = ls_box(t) ^ rco_tab[i];  \
--    t ^= E_KEY[8 * i];     E_KEY[8 * i + 8] = t;    \
--    t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t;    \
--    t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t;   \
--    t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t;   \
--    t  = E_KEY[8 * i + 4] ^ ls_box(t);    \
--    E_KEY[8 * i + 12] = t;                \
--    t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t;   \
--    t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t;   \
--    t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t;   \
--}
-+#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
- 
--static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
--		       unsigned int key_len)
-+#define imix_col(y,x)	do {		\
-+	u	= star_x(x);		\
-+	v	= star_x(u);		\
-+	w	= star_x(v);		\
-+	t	= w ^ (x);		\
-+	(y)	= u ^ v ^ w;		\
-+	(y)	^= ror32(u ^ t, 8) ^	\
-+		ror32(v ^ t, 16) ^	\
-+		ror32(t, 24);		\
-+} while (0)
++static __init void probe_opti_vlb(void)
++{
++	/* If an OPTI 82C46X is present find out where the channels are */
++	static const char *optis[4] = {
++		"3/463MV", "5MV",
++		"5MVA", "5MVB"
++	};
++	u8 chans = 1;
++	u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
 +
-+#define ls_box(x)		\
-+	crypto_fl_tab[0][byte(x, 0)] ^	\
-+	crypto_fl_tab[1][byte(x, 1)] ^	\
-+	crypto_fl_tab[2][byte(x, 2)] ^	\
-+	crypto_fl_tab[3][byte(x, 3)]
++	opti82c46x = 3;	/* Assume master and slave first */
++	printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n",
++								optis[ctrl]);
++	if (ctrl == 3)
++		chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
++	ctrl = opti_syscfg(0xAC);
++	/* Check enabled and this port is the 465MV port. On the
++	   MVB we may have two channels */
++	if (ctrl & 8) {
++		if (chans == 2) {
++			legacy_probe_add(0x1F0, 14, OPTI46X, 0);
++			legacy_probe_add(0x170, 15, OPTI46X, 0);
++		}
++		if (ctrl & 4)
++			legacy_probe_add(0x170, 15, OPTI46X, 0);
++		else
++			legacy_probe_add(0x1F0, 14, OPTI46X, 0);
++	} else
++		legacy_probe_add(0x1F0, 14, OPTI46X, 0);
++}
 +
-+#define loop4(i)	do {		\
-+	t = ror32(t, 8);		\
-+	t = ls_box(t) ^ rco_tab[i];	\
-+	t ^= ctx->key_enc[4 * i];		\
-+	ctx->key_enc[4 * i + 4] = t;		\
-+	t ^= ctx->key_enc[4 * i + 1];		\
-+	ctx->key_enc[4 * i + 5] = t;		\
-+	t ^= ctx->key_enc[4 * i + 2];		\
-+	ctx->key_enc[4 * i + 6] = t;		\
-+	t ^= ctx->key_enc[4 * i + 3];		\
-+	ctx->key_enc[4 * i + 7] = t;		\
-+} while (0)
++static __init void qdi65_identify_port(u8 r, u8 res, unsigned long port)
++{
++	static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
++	/* Check card type */
++	if ((r & 0xF0) == 0xC0) {
++		/* QD6500: single channel */
++		if (r & 8)
++			/* Disabled ? */
++			return;
++		legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
++								QDI6500, port);
++	}
++	if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
++		/* QD6580: dual channel */
++		if (!request_region(port + 2 , 2, "pata_qdi")) {
++			release_region(port, 2);
++			return;
++		}
++		res = inb(port + 3);
++		/* Single channel mode ? */
++		if (res & 1)
++			legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
++								QDI6580, port);
++		else { /* Dual channel mode */
++			legacy_probe_add(0x1F0, 14, QDI6580DP, port);
++			/* port + 0x02, r & 0x04 */
++			legacy_probe_add(0x170, 15, QDI6580DP, port + 2);
++		}
++		release_region(port + 2, 2);
++	}
++}
 +
-+#define loop6(i)	do {		\
-+	t = ror32(t, 8);		\
-+	t = ls_box(t) ^ rco_tab[i];	\
-+	t ^= ctx->key_enc[6 * i];		\
-+	ctx->key_enc[6 * i + 6] = t;		\
-+	t ^= ctx->key_enc[6 * i + 1];		\
-+	ctx->key_enc[6 * i + 7] = t;		\
-+	t ^= ctx->key_enc[6 * i + 2];		\
-+	ctx->key_enc[6 * i + 8] = t;		\
-+	t ^= ctx->key_enc[6 * i + 3];		\
-+	ctx->key_enc[6 * i + 9] = t;		\
-+	t ^= ctx->key_enc[6 * i + 4];		\
-+	ctx->key_enc[6 * i + 10] = t;		\
-+	t ^= ctx->key_enc[6 * i + 5];		\
-+	ctx->key_enc[6 * i + 11] = t;		\
-+} while (0)
++static __init void probe_qdi_vlb(void)
++{
++	unsigned long flags;
++	static const unsigned long qd_port[2] = { 0x30, 0xB0 };
++	int i;
 +
-+#define loop8(i)	do {			\
-+	t = ror32(t, 8);			\
-+	t = ls_box(t) ^ rco_tab[i];		\
-+	t ^= ctx->key_enc[8 * i];			\
-+	ctx->key_enc[8 * i + 8] = t;			\
-+	t ^= ctx->key_enc[8 * i + 1];			\
-+	ctx->key_enc[8 * i + 9] = t;			\
-+	t ^= ctx->key_enc[8 * i + 2];			\
-+	ctx->key_enc[8 * i + 10] = t;			\
-+	t ^= ctx->key_enc[8 * i + 3];			\
-+	ctx->key_enc[8 * i + 11] = t;			\
-+	t  = ctx->key_enc[8 * i + 4] ^ ls_box(t);	\
-+	ctx->key_enc[8 * i + 12] = t;			\
-+	t ^= ctx->key_enc[8 * i + 5];			\
-+	ctx->key_enc[8 * i + 13] = t;			\
-+	t ^= ctx->key_enc[8 * i + 6];			\
-+	ctx->key_enc[8 * i + 14] = t;			\
-+	t ^= ctx->key_enc[8 * i + 7];			\
-+	ctx->key_enc[8 * i + 15] = t;			\
-+} while (0)
++	/*
++	 *	Check each possible QD65xx base address
++	 */
 +
-+int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
-+		unsigned int key_len)
- {
--	struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
-+	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- 	const __le32 *key = (const __le32 *)in_key;
- 	u32 *flags = &tfm->crt_flags;
--	u32 i, t, u, v, w;
-+	u32 i, t, u, v, w, j;
- 
- 	if (key_len % 8) {
- 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-@@ -263,95 +244,113 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- 
- 	ctx->key_length = key_len;
- 
--	E_KEY[0] = le32_to_cpu(key[0]);
--	E_KEY[1] = le32_to_cpu(key[1]);
--	E_KEY[2] = le32_to_cpu(key[2]);
--	E_KEY[3] = le32_to_cpu(key[3]);
-+	ctx->key_dec[key_len + 24] = ctx->key_enc[0] = le32_to_cpu(key[0]);
-+	ctx->key_dec[key_len + 25] = ctx->key_enc[1] = le32_to_cpu(key[1]);
-+	ctx->key_dec[key_len + 26] = ctx->key_enc[2] = le32_to_cpu(key[2]);
-+	ctx->key_dec[key_len + 27] = ctx->key_enc[3] = le32_to_cpu(key[3]);
- 
- 	switch (key_len) {
- 	case 16:
--		t = E_KEY[3];
-+		t = ctx->key_enc[3];
- 		for (i = 0; i < 10; ++i)
--			loop4 (i);
-+			loop4(i);
- 		break;
- 
- 	case 24:
--		E_KEY[4] = le32_to_cpu(key[4]);
--		t = E_KEY[5] = le32_to_cpu(key[5]);
-+		ctx->key_enc[4] = le32_to_cpu(key[4]);
-+		t = ctx->key_enc[5] = le32_to_cpu(key[5]);
- 		for (i = 0; i < 8; ++i)
--			loop6 (i);
-+			loop6(i);
- 		break;
- 
- 	case 32:
--		E_KEY[4] = le32_to_cpu(key[4]);
--		E_KEY[5] = le32_to_cpu(key[5]);
--		E_KEY[6] = le32_to_cpu(key[6]);
--		t = E_KEY[7] = le32_to_cpu(key[7]);
-+		ctx->key_enc[4] = le32_to_cpu(key[4]);
-+		ctx->key_enc[5] = le32_to_cpu(key[5]);
-+		ctx->key_enc[6] = le32_to_cpu(key[6]);
-+		t = ctx->key_enc[7] = le32_to_cpu(key[7]);
- 		for (i = 0; i < 7; ++i)
--			loop8 (i);
-+			loop8(i);
- 		break;
- 	}
- 
--	D_KEY[0] = E_KEY[0];
--	D_KEY[1] = E_KEY[1];
--	D_KEY[2] = E_KEY[2];
--	D_KEY[3] = E_KEY[3];
-+	ctx->key_dec[0] = ctx->key_enc[key_len + 24];
-+	ctx->key_dec[1] = ctx->key_enc[key_len + 25];
-+	ctx->key_dec[2] = ctx->key_enc[key_len + 26];
-+	ctx->key_dec[3] = ctx->key_enc[key_len + 27];
- 
- 	for (i = 4; i < key_len + 24; ++i) {
--		imix_col (D_KEY[i], E_KEY[i]);
-+		j = key_len + 24 - (i & ~3) + (i & 3);
-+		imix_col(ctx->key_dec[j], ctx->key_enc[i]);
- 	}
--
- 	return 0;
- }
-+EXPORT_SYMBOL_GPL(crypto_aes_set_key);
- 
- /* encrypt a block of text */
- 
--#define f_nround(bo, bi, k) \
--    f_rn(bo, bi, 0, k);     \
--    f_rn(bo, bi, 1, k);     \
--    f_rn(bo, bi, 2, k);     \
--    f_rn(bo, bi, 3, k);     \
--    k += 4
--
--#define f_lround(bo, bi, k) \
--    f_rl(bo, bi, 0, k);     \
--    f_rl(bo, bi, 1, k);     \
--    f_rl(bo, bi, 2, k);     \
--    f_rl(bo, bi, 3, k)
-+#define f_rn(bo, bi, n, k)	do {				\
-+	bo[n] = crypto_ft_tab[0][byte(bi[n], 0)] ^			\
-+		crypto_ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^		\
-+		crypto_ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^		\
-+		crypto_ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n);	\
-+} while (0)
++	for (i = 0; i < 2; i++) {
++		unsigned long port = qd_port[i];
++		u8 r, res;
 +
-+#define f_nround(bo, bi, k)	do {\
-+	f_rn(bo, bi, 0, k);	\
-+	f_rn(bo, bi, 1, k);	\
-+	f_rn(bo, bi, 2, k);	\
-+	f_rn(bo, bi, 3, k);	\
-+	k += 4;			\
-+} while (0)
 +
-+#define f_rl(bo, bi, n, k)	do {				\
-+	bo[n] = crypto_fl_tab[0][byte(bi[n], 0)] ^			\
-+		crypto_fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^		\
-+		crypto_fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^		\
-+		crypto_fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n);	\
-+} while (0)
++		if (request_region(port, 2, "pata_qdi")) {
++			/* Check for a card */
++			local_irq_save(flags);
++			/* I have no h/w that needs this delay but it
++			   is present in the historic code */
++			r = inb(port);
++			udelay(1);
++			outb(0x19, port);
++			udelay(1);
++			res = inb(port);
++			udelay(1);
++			outb(r, port);
++			udelay(1);
++			local_irq_restore(flags);
 +
-+#define f_lround(bo, bi, k)	do {\
-+	f_rl(bo, bi, 0, k);	\
-+	f_rl(bo, bi, 1, k);	\
-+	f_rl(bo, bi, 2, k);	\
-+	f_rl(bo, bi, 3, k);	\
-+} while (0)
++			/* Fail */
++			if (res == 0x19) {
++				release_region(port, 2);
++				continue;
++			}
++			/* Passes the presence test */
++			r = inb(port + 1);
++			udelay(1);
++			/* Check port agrees with port set */
++			if ((r & 2) >> 1 == i)
++				qdi65_identify_port(r, res, port);
++			release_region(port, 2);
++		}
++	}
++}
  
- static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- {
--	const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
-+	const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- 	const __le32 *src = (const __le32 *)in;
- 	__le32 *dst = (__le32 *)out;
- 	u32 b0[4], b1[4];
--	const u32 *kp = E_KEY + 4;
-+	const u32 *kp = ctx->key_enc + 4;
-+	const int key_len = ctx->key_length;
+ /**
+  *	legacy_init		-	attach legacy interfaces
+@@ -867,15 +1450,17 @@ static __init int legacy_init(void)
+ 	int ct = 0;
+ 	int primary = 0;
+ 	int secondary = 0;
+-	int last_port = NR_HOST;
++	int pci_present = 0;
++	struct legacy_probe *pl = &probe_list[0];
++	int slot = 0;
  
--	b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0];
--	b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1];
--	b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2];
--	b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3];
-+	b0[0] = le32_to_cpu(src[0]) ^ ctx->key_enc[0];
-+	b0[1] = le32_to_cpu(src[1]) ^ ctx->key_enc[1];
-+	b0[2] = le32_to_cpu(src[2]) ^ ctx->key_enc[2];
-+	b0[3] = le32_to_cpu(src[3]) ^ ctx->key_enc[3];
+ 	struct pci_dev *p = NULL;
  
--	if (ctx->key_length > 24) {
--		f_nround (b1, b0, kp);
--		f_nround (b0, b1, kp);
-+	if (key_len > 24) {
-+		f_nround(b1, b0, kp);
-+		f_nround(b0, b1, kp);
- 	}
+ 	for_each_pci_dev(p) {
+ 		int r;
+-		/* Check for any overlap of the system ATA mappings. Native mode controllers
+-		   stuck on these addresses or some devices in 'raid' mode won't be found by
+-		   the storage class test */
++		/* Check for any overlap of the system ATA mappings. Native
++		   mode controllers stuck on these addresses or some devices
++		   in 'raid' mode won't be found by the storage class test */
+ 		for (r = 0; r < 6; r++) {
+ 			if (pci_resource_start(p, r) == 0x1f0)
+ 				primary = 1;
+@@ -885,49 +1470,39 @@ static __init int legacy_init(void)
+ 		/* Check for special cases */
+ 		legacy_check_special_cases(p, &primary, &secondary);
  
--	if (ctx->key_length > 16) {
--		f_nround (b1, b0, kp);
--		f_nround (b0, b1, kp);
-+	if (key_len > 16) {
-+		f_nround(b1, b0, kp);
-+		f_nround(b0, b1, kp);
+-		/* If PCI bus is present then don't probe for tertiary legacy ports */
+-		if (probe_all == 0)
+-			last_port = 2;
++		/* If PCI bus is present then don't probe for tertiary
++		   legacy ports */
++		pci_present = 1;
  	}
  
--	f_nround (b1, b0, kp);
--	f_nround (b0, b1, kp);
--	f_nround (b1, b0, kp);
--	f_nround (b0, b1, kp);
--	f_nround (b1, b0, kp);
--	f_nround (b0, b1, kp);
--	f_nround (b1, b0, kp);
--	f_nround (b0, b1, kp);
--	f_nround (b1, b0, kp);
--	f_lround (b0, b1, kp);
-+	f_nround(b1, b0, kp);
-+	f_nround(b0, b1, kp);
-+	f_nround(b1, b0, kp);
-+	f_nround(b0, b1, kp);
-+	f_nround(b1, b0, kp);
-+	f_nround(b0, b1, kp);
-+	f_nround(b1, b0, kp);
-+	f_nround(b0, b1, kp);
-+	f_nround(b1, b0, kp);
-+	f_lround(b0, b1, kp);
- 
- 	dst[0] = cpu_to_le32(b0[0]);
- 	dst[1] = cpu_to_le32(b0[1]);
-@@ -361,53 +360,69 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- 
- /* decrypt a block of text */
- 
--#define i_nround(bo, bi, k) \
--    i_rn(bo, bi, 0, k);     \
--    i_rn(bo, bi, 1, k);     \
--    i_rn(bo, bi, 2, k);     \
--    i_rn(bo, bi, 3, k);     \
--    k -= 4
+-	/* If an OPTI 82C46X is present find out where the channels are */
+-	if (opti82c46x) {
+-		static const char *optis[4] = {
+-			"3/463MV", "5MV",
+-			"5MVA", "5MVB"
+-		};
+-		u8 chans = 1;
+-		u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
 -
--#define i_lround(bo, bi, k) \
--    i_rl(bo, bi, 0, k);     \
--    i_rl(bo, bi, 1, k);     \
--    i_rl(bo, bi, 2, k);     \
--    i_rl(bo, bi, 3, k)
-+#define i_rn(bo, bi, n, k)	do {				\
-+	bo[n] = crypto_it_tab[0][byte(bi[n], 0)] ^			\
-+		crypto_it_tab[1][byte(bi[(n + 3) & 3], 1)] ^		\
-+		crypto_it_tab[2][byte(bi[(n + 2) & 3], 2)] ^		\
-+		crypto_it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n);	\
-+} while (0)
-+
-+#define i_nround(bo, bi, k)	do {\
-+	i_rn(bo, bi, 0, k);	\
-+	i_rn(bo, bi, 1, k);	\
-+	i_rn(bo, bi, 2, k);	\
-+	i_rn(bo, bi, 3, k);	\
-+	k += 4;			\
-+} while (0)
+-		opti82c46x = 3;	/* Assume master and slave first */
+-		printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", optis[ctrl]);
+-		if (ctrl == 3)
+-			chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
+-		ctrl = opti_syscfg(0xAC);
+-		/* Check enabled and this port is the 465MV port. On the
+-		   MVB we may have two channels */
+-		if (ctrl & 8) {
+-			if (ctrl & 4)
+-				opti82c46x = 2;	/* Slave */
+-			else
+-				opti82c46x = 1;	/* Master */
+-			if (chans == 2)
+-				opti82c46x = 3; /* Master and Slave */
+-		}	/* Slave only */
+-		else if (chans == 1)
+-			opti82c46x = 1;
++	if (winbond == 1)
++		winbond = 0x130;	/* Default port, alt is 1B0 */
 +
-+#define i_rl(bo, bi, n, k)	do {			\
-+	bo[n] = crypto_il_tab[0][byte(bi[n], 0)] ^		\
-+	crypto_il_tab[1][byte(bi[(n + 3) & 3], 1)] ^		\
-+	crypto_il_tab[2][byte(bi[(n + 2) & 3], 2)] ^		\
-+	crypto_il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n);	\
-+} while (0)
++	if (primary == 0 || all)
++		legacy_probe_add(0x1F0, 14, UNKNOWN, 0);
++	if (secondary == 0 || all)
++		legacy_probe_add(0x170, 15, UNKNOWN, 0);
 +
-+#define i_lround(bo, bi, k)	do {\
-+	i_rl(bo, bi, 0, k);	\
-+	i_rl(bo, bi, 1, k);	\
-+	i_rl(bo, bi, 2, k);	\
-+	i_rl(bo, bi, 3, k);	\
-+} while (0)
- 
- static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- {
--	const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
-+	const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- 	const __le32 *src = (const __le32 *)in;
- 	__le32 *dst = (__le32 *)out;
- 	u32 b0[4], b1[4];
- 	const int key_len = ctx->key_length;
--	const u32 *kp = D_KEY + key_len + 20;
-+	const u32 *kp = ctx->key_dec + 4;
- 
--	b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24];
--	b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25];
--	b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26];
--	b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27];
-+	b0[0] = le32_to_cpu(src[0]) ^  ctx->key_dec[0];
-+	b0[1] = le32_to_cpu(src[1]) ^  ctx->key_dec[1];
-+	b0[2] = le32_to_cpu(src[2]) ^  ctx->key_dec[2];
-+	b0[3] = le32_to_cpu(src[3]) ^  ctx->key_dec[3];
- 
- 	if (key_len > 24) {
--		i_nround (b1, b0, kp);
--		i_nround (b0, b1, kp);
-+		i_nround(b1, b0, kp);
-+		i_nround(b0, b1, kp);
++	if (probe_all || !pci_present) {
++		/* ISA/VLB extra ports */
++		legacy_probe_add(0x1E8, 11, UNKNOWN, 0);
++		legacy_probe_add(0x168, 10, UNKNOWN, 0);
++		legacy_probe_add(0x1E0, 8, UNKNOWN, 0);
++		legacy_probe_add(0x160, 12, UNKNOWN, 0);
  	}
  
- 	if (key_len > 16) {
--		i_nround (b1, b0, kp);
--		i_nround (b0, b1, kp);
-+		i_nround(b1, b0, kp);
-+		i_nround(b0, b1, kp);
+-	for (i = 0; i < last_port; i++) {
+-		/* Skip primary if we have seen a PCI one */
+-		if (i == 0 && primary == 1)
+-			continue;
+-		/* Skip secondary if we have seen a PCI one */
+-		if (i == 1 && secondary == 1)
++	if (opti82c46x)
++		probe_opti_vlb();
++	if (qdi)
++		probe_qdi_vlb();
++
++	for (i = 0; i < NR_HOST; i++, pl++) {
++		if (pl->port == 0)
+ 			continue;
+-		if (legacy_init_one(i, legacy_port[i],
+-				   legacy_port[i] + 0x0206,
+-				   legacy_irq[i]) == 0)
++		if (pl->type == UNKNOWN)
++			pl->type = probe_chip_type(pl);
++		pl->slot = slot++;
++		if (legacy_init_one(pl) == 0)
+ 			ct++;
  	}
+ 	if (ct != 0)
+@@ -941,11 +1516,8 @@ static __exit void legacy_exit(void)
  
--	i_nround (b1, b0, kp);
--	i_nround (b0, b1, kp);
--	i_nround (b1, b0, kp);
--	i_nround (b0, b1, kp);
--	i_nround (b1, b0, kp);
--	i_nround (b0, b1, kp);
--	i_nround (b1, b0, kp);
--	i_nround (b0, b1, kp);
--	i_nround (b1, b0, kp);
--	i_lround (b0, b1, kp);
-+	i_nround(b1, b0, kp);
-+	i_nround(b0, b1, kp);
-+	i_nround(b1, b0, kp);
-+	i_nround(b0, b1, kp);
-+	i_nround(b1, b0, kp);
-+	i_nround(b0, b1, kp);
-+	i_nround(b1, b0, kp);
-+	i_nround(b0, b1, kp);
-+	i_nround(b1, b0, kp);
-+	i_lround(b0, b1, kp);
- 
- 	dst[0] = cpu_to_le32(b0[0]);
- 	dst[1] = cpu_to_le32(b0[1]);
-@@ -415,14 +430,13 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- 	dst[3] = cpu_to_le32(b0[3]);
- }
- 
+ 	for (i = 0; i < nr_legacy_host; i++) {
+ 		struct legacy_data *ld = &legacy_data[i];
 -
- static struct crypto_alg aes_alg = {
- 	.cra_name		=	"aes",
- 	.cra_driver_name	=	"aes-generic",
- 	.cra_priority		=	100,
- 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
- 	.cra_blocksize		=	AES_BLOCK_SIZE,
--	.cra_ctxsize		=	sizeof(struct aes_ctx),
-+	.cra_ctxsize		=	sizeof(struct crypto_aes_ctx),
- 	.cra_alignmask		=	3,
- 	.cra_module		=	THIS_MODULE,
- 	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
-@@ -430,9 +444,9 @@ static struct crypto_alg aes_alg = {
- 		.cipher = {
- 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
- 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
--			.cia_setkey	   	= 	aes_set_key,
--			.cia_encrypt	 	=	aes_encrypt,
--			.cia_decrypt	  	=	aes_decrypt
-+			.cia_setkey		=	crypto_aes_set_key,
-+			.cia_encrypt		=	aes_encrypt,
-+			.cia_decrypt		=	aes_decrypt
- 		}
+ 		ata_host_detach(legacy_host[i]);
+ 		platform_device_unregister(ld->platform_dev);
+-		if (ld->timing)
+-			release_region(ld->timing, 2);
  	}
- };
-diff --git a/crypto/algapi.c b/crypto/algapi.c
-index 8383282..e65cb50 100644
---- a/crypto/algapi.c
-+++ b/crypto/algapi.c
-@@ -472,7 +472,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type)
  }
- EXPORT_SYMBOL_GPL(crypto_check_attr_type);
  
--struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
-+const char *crypto_attr_alg_name(struct rtattr *rta)
+@@ -960,9 +1532,9 @@ module_param(ht6560a, int, 0);
+ module_param(ht6560b, int, 0);
+ module_param(opti82c611a, int, 0);
+ module_param(opti82c46x, int, 0);
++module_param(qdi, int, 0);
+ module_param(pio_mask, int, 0);
+ module_param(iordy_mask, int, 0);
+ 
+ module_init(legacy_init);
+ module_exit(legacy_exit);
+-
+diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
+index 50c56e2..5413ebf 100644
+--- a/drivers/ata/pata_mpc52xx.c
++++ b/drivers/ata/pata_mpc52xx.c
+@@ -364,7 +364,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
  {
- 	struct crypto_attr_alg *alga;
+ 	unsigned int ipb_freq;
+ 	struct resource res_mem;
+-	int ata_irq = NO_IRQ;
++	int ata_irq;
+ 	struct mpc52xx_ata __iomem *ata_regs;
+ 	struct mpc52xx_ata_priv *priv;
+ 	int rv;
+@@ -494,10 +494,8 @@ mpc52xx_ata_resume(struct of_device *op)
  
-@@ -486,7 +486,21 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
- 	alga = RTA_DATA(rta);
- 	alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0;
  
--	return crypto_alg_mod_lookup(alga->name, type, mask);
-+	return alga->name;
-+}
-+EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
+ static struct of_device_id mpc52xx_ata_of_match[] = {
+-	{
+-		.type		= "ata",
+-		.compatible	= "mpc5200-ata",
+-	},
++	{ .compatible = "fsl,mpc5200-ata", },
++	{ .compatible = "mpc5200-ata", },
+ 	{},
+ };
+ 
+diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
+new file mode 100644
+index 0000000..1c1b835
+--- /dev/null
++++ b/drivers/ata/pata_ninja32.c
+@@ -0,0 +1,214 @@
++/*
++ * pata_ninja32.c 	- Ninja32 PATA for new ATA layer
++ *			  (C) 2007 Red Hat Inc
++ *			  Alan Cox <alan at redhat.com>
++ *
++ * Note: The controller like many controllers has shared timings for
++ * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
++ * in the dma_stop function. Thus we actually don't need a set_dmamode
++ * method as the PIO method is always called and will set the right PIO
++ * timing parameters.
++ *
++ * The Ninja32 Cardbus is not a generic SFF controller. Instead it is
++ * laid out as follows off BAR 0. This is based upon Mark Lord's delkin
++ * driver and the extensive analysis done by the BSD developers, notably
++ * ITOH Yasufumi.
++ *
++ *	Base + 0x00 IRQ Status
++ *	Base + 0x01 IRQ control
++ *	Base + 0x02 Chipset control
++ *	Base + 0x04 VDMA and reset control + wait bits
++ *	Base + 0x08 BMIMBA
++ *	Base + 0x0C DMA Length
++ *	Base + 0x10 Taskfile
++ *	Base + 0x18 BMDMA Status ?
++ *	Base + 0x1C
++ *	Base + 0x1D Bus master control
++ *		bit 0 = enable
++ *		bit 1 = 0 write/1 read
++ *		bit 2 = 1 sgtable
++ *		bit 3 = go
++ *		bit 4-6 wait bits
++ *		bit 7 = done
++ *	Base + 0x1E AltStatus
++ *	Base + 0x1F timing register
++ */
 +
-+struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
-+{
-+	const char *name;
-+	int err;
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/blkdev.h>
++#include <linux/delay.h>
++#include <scsi/scsi_host.h>
++#include <linux/libata.h>
 +
-+	name = crypto_attr_alg_name(rta);
-+	err = PTR_ERR(name);
-+	if (IS_ERR(name))
-+		return ERR_PTR(err);
++#define DRV_NAME "pata_ninja32"
++#define DRV_VERSION "0.0.1"
 +
-+	return crypto_alg_mod_lookup(name, type, mask);
- }
- EXPORT_SYMBOL_GPL(crypto_attr_alg);
- 
-@@ -605,6 +619,53 @@ int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm)
- }
- EXPORT_SYMBOL_GPL(crypto_tfm_in_queue);
- 
-+static inline void crypto_inc_byte(u8 *a, unsigned int size)
++
++/**
++ *	ninja32_set_piomode	-	set initial PIO mode data
++ *	@ap: ATA interface
++ *	@adev: ATA device
++ *
++ *	Called to do the PIO mode setup. Our timing registers are shared
++ *	but we want to set the PIO timing by default.
++ */
++
++static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev)
 +{
-+	u8 *b = (a + size);
-+	u8 c;
++	static u16 pio_timing[5] = {
++		0xd6, 0x85, 0x44, 0x33, 0x13
++	};
++	iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0],
++		 ap->ioaddr.bmdma_addr + 0x1f);
++	ap->private_data = adev;
++}
 +
-+	for (; size; size--) {
-+		c = *--b + 1;
-+		*b = c;
-+		if (c)
-+			break;
++
++static void ninja32_dev_select(struct ata_port *ap, unsigned int device)
++{
++	struct ata_device *adev = &ap->link.device[device];
++	if (ap->private_data != adev) {
++		iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f);
++		ata_std_dev_select(ap, device);
++		ninja32_set_piomode(ap, adev);
 +	}
 +}
 +
-+void crypto_inc(u8 *a, unsigned int size)
-+{
-+	__be32 *b = (__be32 *)(a + size);
-+	u32 c;
++static struct scsi_host_template ninja32_sht = {
++	.module			= THIS_MODULE,
++	.name			= DRV_NAME,
++	.ioctl			= ata_scsi_ioctl,
++	.queuecommand		= ata_scsi_queuecmd,
++	.can_queue		= ATA_DEF_QUEUE,
++	.this_id		= ATA_SHT_THIS_ID,
++	.sg_tablesize		= LIBATA_MAX_PRD,
++	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
++	.emulated		= ATA_SHT_EMULATED,
++	.use_clustering		= ATA_SHT_USE_CLUSTERING,
++	.proc_name		= DRV_NAME,
++	.dma_boundary		= ATA_DMA_BOUNDARY,
++	.slave_configure	= ata_scsi_slave_config,
++	.slave_destroy		= ata_scsi_slave_destroy,
++	.bios_param		= ata_std_bios_param,
++};
 +
-+	for (; size >= 4; size -= 4) {
-+		c = be32_to_cpu(*--b) + 1;
-+		*b = cpu_to_be32(c);
-+		if (c)
-+			return;
-+	}
++static struct ata_port_operations ninja32_port_ops = {
++	.set_piomode	= ninja32_set_piomode,
++	.mode_filter	= ata_pci_default_filter,
 +
-+	crypto_inc_byte(a, size);
-+}
-+EXPORT_SYMBOL_GPL(crypto_inc);
++	.tf_load	= ata_tf_load,
++	.tf_read	= ata_tf_read,
++	.check_status 	= ata_check_status,
++	.exec_command	= ata_exec_command,
++	.dev_select 	= ninja32_dev_select,
 +
-+static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size)
-+{
-+	for (; size; size--)
-+		*a++ ^= *b++;
-+}
++	.freeze		= ata_bmdma_freeze,
++	.thaw		= ata_bmdma_thaw,
++	.error_handler	= ata_bmdma_error_handler,
++	.post_internal_cmd = ata_bmdma_post_internal_cmd,
++	.cable_detect	= ata_cable_40wire,
 +
-+void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
-+{
-+	u32 *a = (u32 *)dst;
-+	u32 *b = (u32 *)src;
++	.bmdma_setup 	= ata_bmdma_setup,
++	.bmdma_start 	= ata_bmdma_start,
++	.bmdma_stop	= ata_bmdma_stop,
++	.bmdma_status 	= ata_bmdma_status,
 +
-+	for (; size >= 4; size -= 4)
-+		*a++ ^= *b++;
++	.qc_prep 	= ata_qc_prep,
++	.qc_issue	= ata_qc_issue_prot,
 +
-+	crypto_xor_byte((u8 *)a, (u8 *)b, size);
-+}
-+EXPORT_SYMBOL_GPL(crypto_xor);
++	.data_xfer	= ata_data_xfer,
 +
- static int __init crypto_algapi_init(void)
- {
- 	crypto_init_proc();
-diff --git a/crypto/api.c b/crypto/api.c
-index 1f5c724..a2496d1 100644
---- a/crypto/api.c
-+++ b/crypto/api.c
-@@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type,
- 	return alg;
- }
- 
--static void crypto_larval_kill(struct crypto_alg *alg)
-+void crypto_larval_kill(struct crypto_alg *alg)
- {
- 	struct crypto_larval *larval = (void *)alg;
- 
-@@ -147,6 +147,7 @@ static void crypto_larval_kill(struct crypto_alg *alg)
- 	complete_all(&larval->completion);
- 	crypto_alg_put(alg);
- }
-+EXPORT_SYMBOL_GPL(crypto_larval_kill);
- 
- static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
- {
-@@ -176,11 +177,9 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
- 	return alg;
- }
- 
--struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
-+struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
- {
- 	struct crypto_alg *alg;
--	struct crypto_alg *larval;
--	int ok;
- 
- 	if (!name)
- 		return ERR_PTR(-ENOENT);
-@@ -193,7 +192,17 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
- 	if (alg)
- 		return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
- 
--	larval = crypto_larval_alloc(name, type, mask);
-+	return crypto_larval_alloc(name, type, mask);
-+}
-+EXPORT_SYMBOL_GPL(crypto_larval_lookup);
++	.irq_handler	= ata_interrupt,
++	.irq_clear	= ata_bmdma_irq_clear,
++	.irq_on		= ata_irq_on,
 +
-+struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
-+{
-+	struct crypto_alg *alg;
-+	struct crypto_alg *larval;
-+	int ok;
++	.port_start	= ata_sff_port_start,
++};
 +
-+	larval = crypto_larval_lookup(name, type, mask);
- 	if (IS_ERR(larval) || !crypto_is_larval(larval))
- 		return larval;
- 
-diff --git a/crypto/authenc.c b/crypto/authenc.c
-index 126a529..ed8ac5a 100644
---- a/crypto/authenc.c
-+++ b/crypto/authenc.c
-@@ -10,22 +10,21 @@
-  *
-  */
- 
--#include <crypto/algapi.h>
-+#include <crypto/aead.h>
-+#include <crypto/internal/skcipher.h>
-+#include <crypto/authenc.h>
-+#include <crypto/scatterwalk.h>
- #include <linux/err.h>
- #include <linux/init.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
-+#include <linux/rtnetlink.h>
- #include <linux/slab.h>
- #include <linux/spinlock.h>
- 
--#include "scatterwalk.h"
--
- struct authenc_instance_ctx {
- 	struct crypto_spawn auth;
--	struct crypto_spawn enc;
--
--	unsigned int authsize;
--	unsigned int enckeylen;
-+	struct crypto_skcipher_spawn enc;
- };
- 
- struct crypto_authenc_ctx {
-@@ -37,19 +36,31 @@ struct crypto_authenc_ctx {
- static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
- 				 unsigned int keylen)
- {
--	struct authenc_instance_ctx *ictx =
--		crypto_instance_ctx(crypto_aead_alg_instance(authenc));
--	unsigned int enckeylen = ictx->enckeylen;
- 	unsigned int authkeylen;
-+	unsigned int enckeylen;
- 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- 	struct crypto_hash *auth = ctx->auth;
- 	struct crypto_ablkcipher *enc = ctx->enc;
-+	struct rtattr *rta = (void *)key;
-+	struct crypto_authenc_key_param *param;
- 	int err = -EINVAL;
- 
--	if (keylen < enckeylen) {
--		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
--		goto out;
--	}
-+	if (!RTA_OK(rta, keylen))
-+		goto badkey;
-+	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-+		goto badkey;
-+	if (RTA_PAYLOAD(rta) < sizeof(*param))
-+		goto badkey;
++static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
++{
++	struct ata_host *host;
++	struct ata_port *ap;
++	void __iomem *base;
++	int rc;
 +
-+	param = RTA_DATA(rta);
-+	enckeylen = be32_to_cpu(param->enckeylen);
++	host = ata_host_alloc(&dev->dev, 1);
++	if (!host)
++		return -ENOMEM;
++	ap = host->ports[0];
 +
-+	key += RTA_ALIGN(rta->rta_len);
-+	keylen -= RTA_ALIGN(rta->rta_len);
++	/* Set up the PCI device */
++	rc = pcim_enable_device(dev);
++	if (rc)
++		return rc;
++	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
++	if (rc == -EBUSY)
++		pcim_pin_device(dev);
++	if (rc)
++		return rc;
 +
-+	if (keylen < enckeylen)
-+		goto badkey;
++	host->iomap = pcim_iomap_table(dev);
++	rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
++	if (rc)
++		return rc;
++	rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
++	if (rc)
++		return rc;
++	pci_set_master(dev);
 +
- 	authkeylen = keylen - enckeylen;
- 
- 	crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
-@@ -71,21 +82,38 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
- 
- out:
- 	return err;
++	/* Set up the register mappings */
++	base = host->iomap[0];
++	if (!base)
++		return -ENOMEM;
++	ap->ops = &ninja32_port_ops;
++	ap->pio_mask = 0x1F;
++	ap->flags |= ATA_FLAG_SLAVE_POSS;
 +
-+badkey:
-+	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+	goto out;
- }
- 
--static int crypto_authenc_hash(struct aead_request *req)
-+static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
-+			  int chain)
-+{
-+	if (chain) {
-+		head->length += sg->length;
-+		sg = scatterwalk_sg_next(sg);
-+	}
++	ap->ioaddr.cmd_addr = base + 0x10;
++	ap->ioaddr.ctl_addr = base + 0x1E;
++	ap->ioaddr.altstatus_addr = base + 0x1E;
++	ap->ioaddr.bmdma_addr = base;
++	ata_std_ports(&ap->ioaddr);
 +
-+	if (sg)
-+		scatterwalk_sg_chain(head, 2, sg);
-+	else
-+		sg_mark_end(head);
++	iowrite8(0x05, base + 0x01);	/* Enable interrupt lines */
++	iowrite8(0xB3, base + 0x02);	/* Burst, ?? setup */
++	iowrite8(0x00, base + 0x04);	/* WAIT0 ? */
++	/* FIXME: Should we disable them at remove ? */
++	return ata_host_activate(host, dev->irq, ata_interrupt,
++				 IRQF_SHARED, &ninja32_sht);
 +}
 +
-+static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags,
-+			       struct scatterlist *cipher,
-+			       unsigned int cryptlen)
- {
- 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
--	struct authenc_instance_ctx *ictx =
--		crypto_instance_ctx(crypto_aead_alg_instance(authenc));
- 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- 	struct crypto_hash *auth = ctx->auth;
- 	struct hash_desc desc = {
- 		.tfm = auth,
-+		.flags = aead_request_flags(req) & flags,
- 	};
- 	u8 *hash = aead_request_ctx(req);
--	struct scatterlist *dst = req->dst;
--	unsigned int cryptlen = req->cryptlen;
- 	int err;
- 
- 	hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), 
-@@ -100,7 +128,7 @@ static int crypto_authenc_hash(struct aead_request *req)
- 	if (err)
- 		goto auth_unlock;
- 
--	err = crypto_hash_update(&desc, dst, cryptlen);
-+	err = crypto_hash_update(&desc, cipher, cryptlen);
- 	if (err)
- 		goto auth_unlock;
- 
-@@ -109,17 +137,53 @@ auth_unlock:
- 	spin_unlock_bh(&ctx->auth_lock);
- 
- 	if (err)
--		return err;
-+		return ERR_PTR(err);
++static const struct pci_device_id ninja32[] = {
++	{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
++	{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
++	{ },
++};
 +
-+	return hash;
++static struct pci_driver ninja32_pci_driver = {
++	.name 		= DRV_NAME,
++	.id_table	= ninja32,
++	.probe 		= ninja32_init_one,
++	.remove		= ata_pci_remove_one
++};
++
++static int __init ninja32_init(void)
++{
++	return pci_register_driver(&ninja32_pci_driver);
 +}
- 
--	scatterwalk_map_and_copy(hash, dst, cryptlen, ictx->authsize, 1);
-+static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
-+				 unsigned int flags)
++
++static void __exit ninja32_exit(void)
 +{
-+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-+	struct scatterlist *dst = req->dst;
-+	struct scatterlist cipher[2];
-+	struct page *dstp;
-+	unsigned int ivsize = crypto_aead_ivsize(authenc);
-+	unsigned int cryptlen;
-+	u8 *vdst;
-+	u8 *hash;
++	pci_unregister_driver(&ninja32_pci_driver);
++}
 +
-+	dstp = sg_page(dst);
-+	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
++MODULE_AUTHOR("Alan Cox");
++MODULE_DESCRIPTION("low-level driver for Ninja32 ATA");
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(pci, ninja32);
++MODULE_VERSION(DRV_VERSION);
 +
-+	sg_init_table(cipher, 2);
-+	sg_set_buf(cipher, iv, ivsize);
-+	authenc_chain(cipher, dst, vdst == iv + ivsize);
++module_init(ninja32_init);
++module_exit(ninja32_exit);
+diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
+new file mode 100644
+index 0000000..938f48a
+--- /dev/null
++++ b/drivers/ata/pata_of_platform.c
+@@ -0,0 +1,114 @@
++/*
++ * OF-platform PATA driver
++ *
++ * Copyright (c) 2007  MontaVista Software, Inc.
++ *                     Anton Vorontsov <avorontsov at ru.mvista.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ */
 +
-+	cryptlen = req->cryptlen + ivsize;
-+	hash = crypto_authenc_hash(req, flags, cipher, cryptlen);
-+	if (IS_ERR(hash))
-+		return PTR_ERR(hash);
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <linux/pata_platform.h>
 +
-+	scatterwalk_map_and_copy(hash, cipher, cryptlen,
-+				 crypto_aead_authsize(authenc), 1);
- 	return 0;
- }
- 
- static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
- 					int err)
- {
--	if (!err)
--		err = crypto_authenc_hash(req->data);
-+	if (!err) {
-+		struct aead_request *areq = req->data;
-+		struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-+		struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-+		struct ablkcipher_request *abreq = aead_request_ctx(areq);
-+		u8 *iv = (u8 *)(abreq + 1) +
-+			 crypto_ablkcipher_reqsize(ctx->enc);
++static int __devinit pata_of_platform_probe(struct of_device *ofdev,
++					    const struct of_device_id *match)
++{
++	int ret;
++	struct device_node *dn = ofdev->node;
++	struct resource io_res;
++	struct resource ctl_res;
++	struct resource irq_res;
++	unsigned int reg_shift = 0;
++	int pio_mode = 0;
++	int pio_mask;
++	const u32 *prop;
 +
-+		err = crypto_authenc_genicv(areq, iv, 0);
++	ret = of_address_to_resource(dn, 0, &io_res);
++	if (ret) {
++		dev_err(&ofdev->dev, "can't get IO address from "
++			"device tree\n");
++		return -EINVAL;
 +	}
- 
- 	aead_request_complete(req->data, err);
- }
-@@ -129,72 +193,99 @@ static int crypto_authenc_encrypt(struct aead_request *req)
- 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
- 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- 	struct ablkcipher_request *abreq = aead_request_ctx(req);
-+	struct crypto_ablkcipher *enc = ctx->enc;
-+	struct scatterlist *dst = req->dst;
-+	unsigned int cryptlen = req->cryptlen;
-+	u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc);
- 	int err;
- 
--	ablkcipher_request_set_tfm(abreq, ctx->enc);
-+	ablkcipher_request_set_tfm(abreq, enc);
- 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- 					crypto_authenc_encrypt_done, req);
--	ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen,
--				     req->iv);
-+	ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
 +
-+	memcpy(iv, req->iv, crypto_aead_ivsize(authenc));
- 
- 	err = crypto_ablkcipher_encrypt(abreq);
- 	if (err)
- 		return err;
- 
--	return crypto_authenc_hash(req);
-+	return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
- }
- 
--static int crypto_authenc_verify(struct aead_request *req)
-+static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
-+					   int err)
- {
--	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
--	struct authenc_instance_ctx *ictx =
--		crypto_instance_ctx(crypto_aead_alg_instance(authenc));
-+	if (!err) {
-+		struct aead_givcrypt_request *greq = req->data;
++	if (of_device_is_compatible(dn, "electra-ide")) {
++		/* Altstatus is really at offset 0x3f6 from the primary window
++		 * on electra-ide. Adjust ctl_res and io_res accordingly.
++		 */
++		ctl_res = io_res;
++		ctl_res.start = ctl_res.start+0x3f6;
++		io_res.end = ctl_res.start-1;
++	} else {
++		ret = of_address_to_resource(dn, 1, &ctl_res);
++		if (ret) {
++			dev_err(&ofdev->dev, "can't get CTL address from "
++				"device tree\n");
++			return -EINVAL;
++		}
++	}
 +
-+		err = crypto_authenc_genicv(&greq->areq, greq->giv, 0);
++	ret = of_irq_to_resource(dn, 0, &irq_res);
++	if (ret == NO_IRQ)
++		irq_res.start = irq_res.end = -1;
++	else
++		irq_res.flags = 0;
++
++	prop = of_get_property(dn, "reg-shift", NULL);
++	if (prop)
++		reg_shift = *prop;
++
++	prop = of_get_property(dn, "pio-mode", NULL);
++	if (prop) {
++		pio_mode = *prop;
++		if (pio_mode > 6) {
++			dev_err(&ofdev->dev, "invalid pio-mode\n");
++			return -EINVAL;
++		}
++	} else {
++		dev_info(&ofdev->dev, "pio-mode unspecified, assuming PIO0\n");
 +	}
 +
-+	aead_request_complete(req->data, err);
++	pio_mask = 1 << pio_mode;
++	pio_mask |= (1 << pio_mode) - 1;
++
++	return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, &irq_res,
++				     reg_shift, pio_mask);
 +}
 +
-+static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
++static int __devexit pata_of_platform_remove(struct of_device *ofdev)
 +{
-+	struct crypto_aead *authenc = aead_givcrypt_reqtfm(req);
- 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
--	struct crypto_hash *auth = ctx->auth;
--	struct hash_desc desc = {
--		.tfm = auth,
--		.flags = aead_request_flags(req),
--	};
--	u8 *ohash = aead_request_ctx(req);
--	u8 *ihash;
--	struct scatterlist *src = req->src;
--	unsigned int cryptlen = req->cryptlen;
--	unsigned int authsize;
-+	struct aead_request *areq = &req->areq;
-+	struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-+	u8 *iv = req->giv;
- 	int err;
- 
--	ohash = (u8 *)ALIGN((unsigned long)ohash + crypto_hash_alignmask(auth), 
--			    crypto_hash_alignmask(auth) + 1);
--	ihash = ohash + crypto_hash_digestsize(auth);
--
--	spin_lock_bh(&ctx->auth_lock);
--	err = crypto_hash_init(&desc);
--	if (err)
--		goto auth_unlock;
-+	skcipher_givcrypt_set_tfm(greq, ctx->enc);
-+	skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
-+				       crypto_authenc_givencrypt_done, areq);
-+	skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
-+				    areq->iv);
-+	skcipher_givcrypt_set_giv(greq, iv, req->seq);
- 
--	err = crypto_hash_update(&desc, req->assoc, req->assoclen);
-+	err = crypto_skcipher_givencrypt(greq);
- 	if (err)
--		goto auth_unlock;
-+		return err;
- 
--	err = crypto_hash_update(&desc, src, cryptlen);
--	if (err)
--		goto auth_unlock;
-+	return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
++	return __pata_platform_remove(&ofdev->dev);
 +}
- 
--	err = crypto_hash_final(&desc, ohash);
--auth_unlock:
--	spin_unlock_bh(&ctx->auth_lock);
-+static int crypto_authenc_verify(struct aead_request *req,
-+				 struct scatterlist *cipher,
-+				 unsigned int cryptlen)
++
++static struct of_device_id pata_of_platform_match[] = {
++	{ .compatible = "ata-generic", },
++	{ .compatible = "electra-ide", },
++	{},
++};
++MODULE_DEVICE_TABLE(of, pata_of_platform_match);
++
++static struct of_platform_driver pata_of_platform_driver = {
++	.name		= "pata_of_platform",
++	.match_table	= pata_of_platform_match,
++	.probe		= pata_of_platform_probe,
++	.remove		= __devexit_p(pata_of_platform_remove),
++};
++
++static int __init pata_of_platform_init(void)
 +{
-+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-+	u8 *ohash;
-+	u8 *ihash;
-+	unsigned int authsize;
++	return of_register_platform_driver(&pata_of_platform_driver);
++}
++module_init(pata_of_platform_init);
++
++static void __exit pata_of_platform_exit(void)
++{
++	of_unregister_platform_driver(&pata_of_platform_driver);
++}
++module_exit(pata_of_platform_exit);
++
++MODULE_DESCRIPTION("OF-platform PATA driver");
++MODULE_AUTHOR("Anton Vorontsov <avorontsov at ru.mvista.com>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
+index fd36099..3e7f6a9 100644
+--- a/drivers/ata/pata_pcmcia.c
++++ b/drivers/ata/pata_pcmcia.c
+@@ -42,7 +42,7 @@
  
--	if (err)
--		return err;
-+	ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher,
-+				    cryptlen);
-+	if (IS_ERR(ohash))
-+		return PTR_ERR(ohash);
  
--	authsize = ictx->authsize;
--	scatterwalk_map_and_copy(ihash, src, cryptlen, authsize, 0);
--	return memcmp(ihash, ohash, authsize) ? -EINVAL : 0;
-+	authsize = crypto_aead_authsize(authenc);
-+	ihash = ohash + authsize;
-+	scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0);
-+	return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
+ #define DRV_NAME "pata_pcmcia"
+-#define DRV_VERSION "0.3.2"
++#define DRV_VERSION "0.3.3"
+ 
+ /*
+  *	Private data structure to glue stuff together
+@@ -86,6 +86,47 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
+ 	return ata_do_set_mode(link, r_failed_dev);
  }
  
--static void crypto_authenc_decrypt_done(struct crypto_async_request *req,
--					int err)
-+static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
-+				  unsigned int cryptlen)
- {
--	aead_request_complete(req->data, err);
-+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-+	struct scatterlist *src = req->src;
-+	struct scatterlist cipher[2];
-+	struct page *srcp;
-+	unsigned int ivsize = crypto_aead_ivsize(authenc);
-+	u8 *vsrc;
++/**
++ *	pcmcia_set_mode_8bit	-	PCMCIA specific mode setup
++ *	@link: link
++ *	@r_failed_dev: Return pointer for failed device
++ *
++ *	For the simple emulated 8bit stuff the less we do the better.
++ */
 +
-+	srcp = sg_page(src);
-+	vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
++static int pcmcia_set_mode_8bit(struct ata_link *link,
++				struct ata_device **r_failed_dev)
++{
++	return 0;
++}
 +
-+	sg_init_table(cipher, 2);
-+	sg_set_buf(cipher, iv, ivsize);
-+	authenc_chain(cipher, src, vsrc == iv + ivsize);
++/**
++ *	ata_data_xfer_8bit	 -	Transfer data by 8bit PIO
++ *	@dev: device to target
++ *	@buf: data buffer
++ *	@buflen: buffer length
++ *	@rw: read/write
++ *
++ *	Transfer data from/to the device data register by 8 bit PIO.
++ *
++ *	LOCKING:
++ *	Inherited from caller.
++ */
 +
-+	return crypto_authenc_verify(req, cipher, cryptlen + ivsize);
- }
- 
- static int crypto_authenc_decrypt(struct aead_request *req)
-@@ -202,17 +293,23 @@ static int crypto_authenc_decrypt(struct aead_request *req)
- 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
- 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- 	struct ablkcipher_request *abreq = aead_request_ctx(req);
-+	unsigned int cryptlen = req->cryptlen;
-+	unsigned int authsize = crypto_aead_authsize(authenc);
-+	u8 *iv = req->iv;
- 	int err;
++static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
++				unsigned char *buf, unsigned int buflen, int rw)
++{
++	struct ata_port *ap = dev->link->ap;
++
++	if (rw == READ)
++		ioread8_rep(ap->ioaddr.data_addr, buf, buflen);
++	else
++		iowrite8_rep(ap->ioaddr.data_addr, buf, buflen);
++
++	return buflen;
++}
++
++
+ static struct scsi_host_template pcmcia_sht = {
+ 	.module			= THIS_MODULE,
+ 	.name			= DRV_NAME,
+@@ -129,6 +170,31 @@ static struct ata_port_operations pcmcia_port_ops = {
+ 	.port_start	= ata_sff_port_start,
+ };
  
--	err = crypto_authenc_verify(req);
-+	if (cryptlen < authsize)
-+		return -EINVAL;
-+	cryptlen -= authsize;
++static struct ata_port_operations pcmcia_8bit_port_ops = {
++	.set_mode	= pcmcia_set_mode_8bit,
++	.tf_load	= ata_tf_load,
++	.tf_read	= ata_tf_read,
++	.check_status 	= ata_check_status,
++	.exec_command	= ata_exec_command,
++	.dev_select 	= ata_std_dev_select,
 +
-+	err = crypto_authenc_iverify(req, iv, cryptlen);
- 	if (err)
- 		return err;
++	.freeze		= ata_bmdma_freeze,
++	.thaw		= ata_bmdma_thaw,
++	.error_handler	= ata_bmdma_error_handler,
++	.post_internal_cmd = ata_bmdma_post_internal_cmd,
++	.cable_detect	= ata_cable_40wire,
++
++	.qc_prep 	= ata_qc_prep,
++	.qc_issue	= ata_qc_issue_prot,
++
++	.data_xfer	= ata_data_xfer_8bit,
++
++	.irq_clear	= ata_bmdma_irq_clear,
++	.irq_on		= ata_irq_on,
++
++	.port_start	= ata_sff_port_start,
++};
++
+ #define CS_CHECK(fn, ret) \
+ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
  
- 	ablkcipher_request_set_tfm(abreq, ctx->enc);
- 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
--					crypto_authenc_decrypt_done, req);
--	ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen,
--				     req->iv);
-+					req->base.complete, req->base.data);
-+	ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
+@@ -153,9 +219,12 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
+ 		cistpl_cftable_entry_t dflt;
+ 	} *stk = NULL;
+ 	cistpl_cftable_entry_t *cfg;
+-	int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM;
++	int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM, p;
+ 	unsigned long io_base, ctl_base;
+ 	void __iomem *io_addr, *ctl_addr;
++	int n_ports = 1;
++
++	struct ata_port_operations *ops = &pcmcia_port_ops;
  
- 	return crypto_ablkcipher_decrypt(abreq);
- }
-@@ -224,19 +321,13 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
- 	struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
- 	struct crypto_hash *auth;
- 	struct crypto_ablkcipher *enc;
--	unsigned int digestsize;
- 	int err;
+ 	info = kzalloc(sizeof(*info), GFP_KERNEL);
+ 	if (info == NULL)
+@@ -282,27 +351,32 @@ next_entry:
+ 	/* FIXME: Could be more ports at base + 0x10 but we only deal with
+ 	   one right now */
+ 	if (pdev->io.NumPorts1 >= 0x20)
+-		printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
++		n_ports = 2;
  
- 	auth = crypto_spawn_hash(&ictx->auth);
- 	if (IS_ERR(auth))
- 		return PTR_ERR(auth);
++	if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620)
++		ops = &pcmcia_8bit_port_ops;
+ 	/*
+ 	 *	Having done the PCMCIA plumbing the ATA side is relatively
+ 	 *	sane.
+ 	 */
+ 	ret = -ENOMEM;
+-	host = ata_host_alloc(&pdev->dev, 1);
++	host = ata_host_alloc(&pdev->dev, n_ports);
+ 	if (!host)
+ 		goto failed;
+-	ap = host->ports[0];
  
--	err = -EINVAL;
--	digestsize = crypto_hash_digestsize(auth);
--	if (ictx->authsize > digestsize)
--		goto err_free_hash;
--
--	enc = crypto_spawn_ablkcipher(&ictx->enc);
-+	enc = crypto_spawn_skcipher(&ictx->enc);
- 	err = PTR_ERR(enc);
- 	if (IS_ERR(enc))
- 		goto err_free_hash;
-@@ -246,9 +337,10 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
- 	tfm->crt_aead.reqsize = max_t(unsigned int,
- 				      (crypto_hash_alignmask(auth) &
- 				       ~(crypto_tfm_ctx_alignment() - 1)) +
--				      digestsize * 2,
--				      sizeof(struct ablkcipher_request) +
--				      crypto_ablkcipher_reqsize(enc));
-+				      crypto_hash_digestsize(auth) * 2,
-+				      sizeof(struct skcipher_givcrypt_request) +
-+				      crypto_ablkcipher_reqsize(enc) +
-+				      crypto_ablkcipher_ivsize(enc));
+-	ap->ops = &pcmcia_port_ops;
+-	ap->pio_mask = 1;		/* ISA so PIO 0 cycles */
+-	ap->flags |= ATA_FLAG_SLAVE_POSS;
+-	ap->ioaddr.cmd_addr = io_addr;
+-	ap->ioaddr.altstatus_addr = ctl_addr;
+-	ap->ioaddr.ctl_addr = ctl_addr;
+-	ata_std_ports(&ap->ioaddr);
++	for (p = 0; p < n_ports; p++) {
++		ap = host->ports[p];
  
- 	spin_lock_init(&ctx->auth_lock);
+-	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
++		ap->ops = ops;
++		ap->pio_mask = 1;		/* ISA so PIO 0 cycles */
++		ap->flags |= ATA_FLAG_SLAVE_POSS;
++		ap->ioaddr.cmd_addr = io_addr + 0x10 * p;
++		ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p;
++		ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p;
++		ata_std_ports(&ap->ioaddr);
++
++		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
++	}
  
-@@ -269,75 +361,74 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
+ 	/* activate */
+ 	ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt,
+@@ -360,6 +434,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
+ 	PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
+ 	PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
+ 	PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),	/* SanDisk CFA */
++	PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), 	/* TI emulated */
+ 	PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000),	/* Toshiba */
+ 	PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
+ 	PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000),	/* Samsung */
+diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
+index 2622577..028af5d 100644
+--- a/drivers/ata/pata_pdc2027x.c
++++ b/drivers/ata/pata_pdc2027x.c
+@@ -348,7 +348,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
+ 	ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
+ 			  ATA_ID_PROD_LEN + 1);
+ 	/* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
+-	if (strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
++	if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
+ 		mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
  
- static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
- {
-+	struct crypto_attr_type *algt;
- 	struct crypto_instance *inst;
- 	struct crypto_alg *auth;
- 	struct crypto_alg *enc;
- 	struct authenc_instance_ctx *ctx;
--	unsigned int authsize;
--	unsigned int enckeylen;
-+	const char *enc_name;
- 	int err;
+ 	return ata_pci_default_filter(adev, mask);
+diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
+index 6c9689b..3ed8667 100644
+--- a/drivers/ata/pata_pdc202xx_old.c
++++ b/drivers/ata/pata_pdc202xx_old.c
+@@ -168,8 +168,7 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
+ 	pdc202xx_set_dmamode(ap, qc->dev);
  
--	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD);
--	if (err)
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
- 		return ERR_PTR(err);
+ 	/* Cases the state machine will not complete correctly without help */
+-	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATA_PROT_ATAPI_DMA)
+-	{
++	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATAPI_PROT_DMA) {
+ 		len = qc->nbytes / 2;
  
-+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-+		return ERR_PTR(-EINVAL);
-+
- 	auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- 			       CRYPTO_ALG_TYPE_HASH_MASK);
- 	if (IS_ERR(auth))
- 		return ERR_PTR(PTR_ERR(auth));
+ 		if (tf->flags & ATA_TFLAG_WRITE)
+@@ -208,7 +207,7 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
+ 	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
  
--	err = crypto_attr_u32(tb[2], &authsize);
--	inst = ERR_PTR(err);
--	if (err)
--		goto out_put_auth;
--
--	enc = crypto_attr_alg(tb[3], CRYPTO_ALG_TYPE_BLKCIPHER,
--			      CRYPTO_ALG_TYPE_MASK);
--	inst = ERR_PTR(PTR_ERR(enc));
--	if (IS_ERR(enc))
-+	enc_name = crypto_attr_alg_name(tb[2]);
-+	err = PTR_ERR(enc_name);
-+	if (IS_ERR(enc_name))
- 		goto out_put_auth;
+ 	/* Cases the state machine will not complete correctly */
+-	if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
++	if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) {
+ 		iowrite32(0, atapi_reg);
+ 		iowrite8(ioread8(clock) & ~sel66, clock);
+ 	}
+diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
+index ac03a90..224bb6c 100644
+--- a/drivers/ata/pata_platform.c
++++ b/drivers/ata/pata_platform.c
+@@ -93,14 +93,9 @@ static struct ata_port_operations pata_platform_port_ops = {
+ };
+ 
+ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
+-				     struct pata_platform_info *info)
++				     unsigned int shift)
+ {
+-	unsigned int shift = 0;
+-
+ 	/* Fixup the port shift for platforms that need it */
+-	if (info && info->ioport_shift)
+-		shift = info->ioport_shift;
+-
+ 	ioaddr->data_addr	= ioaddr->cmd_addr + (ATA_REG_DATA    << shift);
+ 	ioaddr->error_addr	= ioaddr->cmd_addr + (ATA_REG_ERR     << shift);
+ 	ioaddr->feature_addr	= ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
+@@ -114,8 +109,13 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
+ }
  
--	err = crypto_attr_u32(tb[4], &enckeylen);
--	if (err)
--		goto out_put_enc;
+ /**
+- *	pata_platform_probe		-	attach a platform interface
+- *	@pdev: platform device
++ *	__pata_platform_probe		-	attach a platform interface
++ *	@dev: device
++ *	@io_res: Resource representing I/O base
++ *	@ctl_res: Resource representing CTL base
++ *	@irq_res: Resource representing IRQ and its flags
++ *	@ioport_shift: I/O port shift
++ *	@__pio_mask: PIO mask
+  *
+  *	Register a platform bus IDE interface. Such interfaces are PIO and we
+  *	assume do not support IRQ sharing.
+@@ -135,42 +135,18 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
+  *
+  *	If no IRQ resource is present, PIO polling mode is used instead.
+  */
+-static int __devinit pata_platform_probe(struct platform_device *pdev)
++int __devinit __pata_platform_probe(struct device *dev,
++				    struct resource *io_res,
++				    struct resource *ctl_res,
++				    struct resource *irq_res,
++				    unsigned int ioport_shift,
++				    int __pio_mask)
+ {
+-	struct resource *io_res, *ctl_res;
+ 	struct ata_host *host;
+ 	struct ata_port *ap;
+-	struct pata_platform_info *pp_info;
+ 	unsigned int mmio;
+-	int irq;
 -
- 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- 	err = -ENOMEM;
- 	if (!inst)
--		goto out_put_enc;
+-	/*
+-	 * Simple resource validation ..
+-	 */
+-	if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) {
+-		dev_err(&pdev->dev, "invalid number of resources\n");
+-		return -EINVAL;
+-	}
 -
--	err = -ENAMETOOLONG;
--	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
--		     "authenc(%s,%u,%s,%u)", auth->cra_name, authsize,
--		     enc->cra_name, enckeylen) >= CRYPTO_MAX_ALG_NAME)
--		goto err_free_inst;
+-	/*
+-	 * Get the I/O base first
+-	 */
+-	io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+-	if (io_res == NULL) {
+-		io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-		if (unlikely(io_res == NULL))
+-			return -EINVAL;
+-	}
 -
--	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
--		     "authenc(%s,%u,%s,%u)", auth->cra_driver_name,
--		     authsize, enc->cra_driver_name, enckeylen) >=
--	    CRYPTO_MAX_ALG_NAME)
--		goto err_free_inst;
-+		goto out_put_auth;
- 
- 	ctx = crypto_instance_ctx(inst);
--	ctx->authsize = authsize;
--	ctx->enckeylen = enckeylen;
- 
- 	err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK);
- 	if (err)
- 		goto err_free_inst;
- 
--	err = crypto_init_spawn(&ctx->enc, enc, inst, CRYPTO_ALG_TYPE_MASK);
-+	crypto_set_skcipher_spawn(&ctx->enc, inst);
-+	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
-+				   crypto_requires_sync(algt->type,
-+							algt->mask));
- 	if (err)
- 		goto err_drop_auth;
+-	/*
+-	 * Then the CTL base
+-	 */
+-	ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+-	if (ctl_res == NULL) {
+-		ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+-		if (unlikely(ctl_res == NULL))
+-			return -EINVAL;
+-	}
++	int irq = 0;
++	int irq_flags = 0;
  
--	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
-+	enc = crypto_skcipher_spawn_alg(&ctx->enc);
-+
-+	err = -ENAMETOOLONG;
-+	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-+		     "authenc(%s,%s)", auth->cra_name, enc->cra_name) >=
-+	    CRYPTO_MAX_ALG_NAME)
-+		goto err_drop_enc;
-+
-+	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-+		     "authenc(%s,%s)", auth->cra_driver_name,
-+		     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
-+		goto err_drop_enc;
-+
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-+	inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
- 	inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority;
- 	inst->alg.cra_blocksize = enc->cra_blocksize;
--	inst->alg.cra_alignmask = max(auth->cra_alignmask, enc->cra_alignmask);
-+	inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask;
- 	inst->alg.cra_type = &crypto_aead_type;
+ 	/*
+ 	 * Check for MMIO
+@@ -181,20 +157,21 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
+ 	/*
+ 	 * And the IRQ
+ 	 */
+-	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0)
+-		irq = 0;	/* no irq */
++	if (irq_res && irq_res->start > 0) {
++		irq = irq_res->start;
++		irq_flags = irq_res->flags;
++	}
  
--	inst->alg.cra_aead.ivsize = enc->cra_blkcipher.ivsize;
--	inst->alg.cra_aead.authsize = authsize;
-+	inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
-+	inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ?
-+					 auth->cra_hash.digestsize :
-+					 auth->cra_digest.dia_digestsize;
+ 	/*
+ 	 * Now that that's out of the way, wire up the port..
+ 	 */
+-	host = ata_host_alloc(&pdev->dev, 1);
++	host = ata_host_alloc(dev, 1);
+ 	if (!host)
+ 		return -ENOMEM;
+ 	ap = host->ports[0];
  
- 	inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
+ 	ap->ops = &pata_platform_port_ops;
+-	ap->pio_mask = pio_mask;
++	ap->pio_mask = __pio_mask;
+ 	ap->flags |= ATA_FLAG_SLAVE_POSS;
+ 
+ 	/*
+@@ -209,25 +186,24 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
+ 	 * Handle the MMIO case
+ 	 */
+ 	if (mmio) {
+-		ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, io_res->start,
++		ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start,
+ 				io_res->end - io_res->start + 1);
+-		ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
++		ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start,
+ 				ctl_res->end - ctl_res->start + 1);
+ 	} else {
+-		ap->ioaddr.cmd_addr = devm_ioport_map(&pdev->dev, io_res->start,
++		ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start,
+ 				io_res->end - io_res->start + 1);
+-		ap->ioaddr.ctl_addr = devm_ioport_map(&pdev->dev, ctl_res->start,
++		ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start,
+ 				ctl_res->end - ctl_res->start + 1);
+ 	}
+ 	if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
+-		dev_err(&pdev->dev, "failed to map IO/CTL base\n");
++		dev_err(dev, "failed to map IO/CTL base\n");
+ 		return -ENOMEM;
+ 	}
  
-@@ -347,18 +438,19 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
- 	inst->alg.cra_aead.setkey = crypto_authenc_setkey;
- 	inst->alg.cra_aead.encrypt = crypto_authenc_encrypt;
- 	inst->alg.cra_aead.decrypt = crypto_authenc_decrypt;
-+	inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
+ 	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
  
- out:
--	crypto_mod_put(enc);
--out_put_auth:
- 	crypto_mod_put(auth);
- 	return inst;
+-	pp_info = pdev->dev.platform_data;
+-	pata_platform_setup_port(&ap->ioaddr, pp_info);
++	pata_platform_setup_port(&ap->ioaddr, ioport_shift);
+ 
+ 	ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport",
+ 		      (unsigned long long)io_res->start,
+@@ -235,26 +211,78 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
  
-+err_drop_enc:
-+	crypto_drop_skcipher(&ctx->enc);
- err_drop_auth:
- 	crypto_drop_spawn(&ctx->auth);
- err_free_inst:
- 	kfree(inst);
--out_put_enc:
-+out_put_auth:
- 	inst = ERR_PTR(err);
- 	goto out;
+ 	/* activate */
+ 	return ata_host_activate(host, irq, irq ? ata_interrupt : NULL,
+-				 pp_info ? pp_info->irq_flags : 0,
+-				 &pata_platform_sht);
++				 irq_flags, &pata_platform_sht);
  }
-@@ -367,7 +459,7 @@ static void crypto_authenc_free(struct crypto_instance *inst)
- {
- 	struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
++EXPORT_SYMBOL_GPL(__pata_platform_probe);
  
--	crypto_drop_spawn(&ctx->enc);
-+	crypto_drop_skcipher(&ctx->enc);
- 	crypto_drop_spawn(&ctx->auth);
- 	kfree(inst);
- }
-diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
-index f6c67f9..4a7e65c 100644
---- a/crypto/blkcipher.c
-+++ b/crypto/blkcipher.c
-@@ -14,7 +14,8 @@
+ /**
+- *	pata_platform_remove	-	unplug a platform interface
+- *	@pdev: platform device
++ *	__pata_platform_remove		-	unplug a platform interface
++ *	@dev: device
   *
+  *	A platform bus ATA device has been unplugged. Perform the needed
+  *	cleanup. Also called on module unload for any active devices.
   */
+-static int __devexit pata_platform_remove(struct platform_device *pdev)
++int __devexit __pata_platform_remove(struct device *dev)
+ {
+-	struct device *dev = &pdev->dev;
+ 	struct ata_host *host = dev_get_drvdata(dev);
  
--#include <linux/crypto.h>
-+#include <crypto/internal/skcipher.h>
-+#include <crypto/scatterwalk.h>
- #include <linux/errno.h>
- #include <linux/hardirq.h>
- #include <linux/kernel.h>
-@@ -25,7 +26,6 @@
- #include <linux/string.h>
- 
- #include "internal.h"
--#include "scatterwalk.h"
- 
- enum {
- 	BLKCIPHER_WALK_PHYS = 1 << 0,
-@@ -433,9 +433,8 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
- 	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
- 	unsigned int len = alg->cra_ctxsize;
- 
--	type ^= CRYPTO_ALG_ASYNC;
--	mask &= CRYPTO_ALG_ASYNC;
--	if ((type & mask) && cipher->ivsize) {
-+	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
-+	    cipher->ivsize) {
- 		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
- 		len += cipher->ivsize;
- 	}
-@@ -451,6 +450,11 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
- 	crt->setkey = async_setkey;
- 	crt->encrypt = async_encrypt;
- 	crt->decrypt = async_decrypt;
-+	if (!alg->ivsize) {
-+		crt->givencrypt = skcipher_null_givencrypt;
-+		crt->givdecrypt = skcipher_null_givdecrypt;
-+	}
-+	crt->base = __crypto_ablkcipher_cast(tfm);
- 	crt->ivsize = alg->ivsize;
+ 	ata_host_detach(host);
  
  	return 0;
-@@ -482,9 +486,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
- 	if (alg->ivsize > PAGE_SIZE / 8)
- 		return -EINVAL;
- 
--	type ^= CRYPTO_ALG_ASYNC;
--	mask &= CRYPTO_ALG_ASYNC;
--	if (type & mask)
-+	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
- 		return crypto_init_blkcipher_ops_sync(tfm);
- 	else
- 		return crypto_init_blkcipher_ops_async(tfm);
-@@ -499,6 +501,8 @@ static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
- 	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
- 	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
- 	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
-+	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
-+					     "<default>");
  }
- 
- const struct crypto_type crypto_blkcipher_type = {
-@@ -510,5 +514,187 @@ const struct crypto_type crypto_blkcipher_type = {
- };
- EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
- 
-+static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
-+				const char *name, u32 type, u32 mask)
-+{
-+	struct crypto_alg *alg;
-+	int err;
-+
-+	type = crypto_skcipher_type(type);
-+	mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV;
-+
-+	alg = crypto_alg_mod_lookup(name, type, mask);
-+	if (IS_ERR(alg))
-+		return PTR_ERR(alg);
-+
-+	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
-+	crypto_mod_put(alg);
-+	return err;
-+}
++EXPORT_SYMBOL_GPL(__pata_platform_remove);
 +
-+struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
-+					     struct rtattr **tb, u32 type,
-+					     u32 mask)
++static int __devinit pata_platform_probe(struct platform_device *pdev)
 +{
-+	struct {
-+		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
-+			      unsigned int keylen);
-+		int (*encrypt)(struct ablkcipher_request *req);
-+		int (*decrypt)(struct ablkcipher_request *req);
-+
-+		unsigned int min_keysize;
-+		unsigned int max_keysize;
-+		unsigned int ivsize;
-+
-+		const char *geniv;
-+	} balg;
-+	const char *name;
-+	struct crypto_skcipher_spawn *spawn;
-+	struct crypto_attr_type *algt;
-+	struct crypto_instance *inst;
-+	struct crypto_alg *alg;
-+	int err;
-+
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
-+		return ERR_PTR(err);
-+
-+	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
-+	    algt->mask)
-+		return ERR_PTR(-EINVAL);
-+
-+	name = crypto_attr_alg_name(tb[1]);
-+	err = PTR_ERR(name);
-+	if (IS_ERR(name))
-+		return ERR_PTR(err);
-+
-+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
-+	if (!inst)
-+		return ERR_PTR(-ENOMEM);
-+
-+	spawn = crypto_instance_ctx(inst);
-+
-+	/* Ignore async algorithms if necessary. */
-+	mask |= crypto_requires_sync(algt->type, algt->mask);
-+
-+	crypto_set_skcipher_spawn(spawn, inst);
-+	err = crypto_grab_nivcipher(spawn, name, type, mask);
-+	if (err)
-+		goto err_free_inst;
-+
-+	alg = crypto_skcipher_spawn_alg(spawn);
-+
-+	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-+	    CRYPTO_ALG_TYPE_BLKCIPHER) {
-+		balg.ivsize = alg->cra_blkcipher.ivsize;
-+		balg.min_keysize = alg->cra_blkcipher.min_keysize;
-+		balg.max_keysize = alg->cra_blkcipher.max_keysize;
-+
-+		balg.setkey = async_setkey;
-+		balg.encrypt = async_encrypt;
-+		balg.decrypt = async_decrypt;
-+
-+		balg.geniv = alg->cra_blkcipher.geniv;
-+	} else {
-+		balg.ivsize = alg->cra_ablkcipher.ivsize;
-+		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
-+		balg.max_keysize = alg->cra_ablkcipher.max_keysize;
-+
-+		balg.setkey = alg->cra_ablkcipher.setkey;
-+		balg.encrypt = alg->cra_ablkcipher.encrypt;
-+		balg.decrypt = alg->cra_ablkcipher.decrypt;
++	struct resource *io_res;
++	struct resource *ctl_res;
++	struct resource *irq_res;
++	struct pata_platform_info *pp_info = pdev->dev.platform_data;
 +
-+		balg.geniv = alg->cra_ablkcipher.geniv;
++	/*
++	 * Simple resource validation ..
++	 */
++	if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) {
++		dev_err(&pdev->dev, "invalid number of resources\n");
++		return -EINVAL;
 +	}
 +
-+	err = -EINVAL;
-+	if (!balg.ivsize)
-+		goto err_drop_alg;
-+
 +	/*
-+	 * This is only true if we're constructing an algorithm with its
-+	 * default IV generator.  For the default generator we elide the
-+	 * template name and double-check the IV generator.
++	 * Get the I/O base first
 +	 */
-+	if (algt->mask & CRYPTO_ALG_GENIV) {
-+		if (!balg.geniv)
-+			balg.geniv = crypto_default_geniv(alg);
-+		err = -EAGAIN;
-+		if (strcmp(tmpl->name, balg.geniv))
-+			goto err_drop_alg;
-+
-+		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
-+		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
-+		       CRYPTO_MAX_ALG_NAME);
-+	} else {
-+		err = -ENAMETOOLONG;
-+		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-+			     "%s(%s)", tmpl->name, alg->cra_name) >=
-+		    CRYPTO_MAX_ALG_NAME)
-+			goto err_drop_alg;
-+		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-+			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
-+		    CRYPTO_MAX_ALG_NAME)
-+			goto err_drop_alg;
++	io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
++	if (io_res == NULL) {
++		io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++		if (unlikely(io_res == NULL))
++			return -EINVAL;
 +	}
 +
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
-+	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
-+	inst->alg.cra_priority = alg->cra_priority;
-+	inst->alg.cra_blocksize = alg->cra_blocksize;
-+	inst->alg.cra_alignmask = alg->cra_alignmask;
-+	inst->alg.cra_type = &crypto_givcipher_type;
-+
-+	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
-+	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
-+	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
-+	inst->alg.cra_ablkcipher.geniv = balg.geniv;
-+
-+	inst->alg.cra_ablkcipher.setkey = balg.setkey;
-+	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
-+	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
-+
-+out:
-+	return inst;
++	/*
++	 * Then the CTL base
++	 */
++	ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
++	if (ctl_res == NULL) {
++		ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++		if (unlikely(ctl_res == NULL))
++			return -EINVAL;
++	}
 +
-+err_drop_alg:
-+	crypto_drop_skcipher(spawn);
-+err_free_inst:
-+	kfree(inst);
-+	inst = ERR_PTR(err);
-+	goto out;
-+}
-+EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
++	/*
++	 * And the IRQ
++	 */
++	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++	if (irq_res)
++		irq_res->flags = pp_info ? pp_info->irq_flags : 0;
 +
-+void skcipher_geniv_free(struct crypto_instance *inst)
-+{
-+	crypto_drop_skcipher(crypto_instance_ctx(inst));
-+	kfree(inst);
++	return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res,
++				     pp_info ? pp_info->ioport_shift : 0,
++				     pio_mask);
 +}
-+EXPORT_SYMBOL_GPL(skcipher_geniv_free);
 +
-+int skcipher_geniv_init(struct crypto_tfm *tfm)
++static int __devexit pata_platform_remove(struct platform_device *pdev)
 +{
-+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-+	struct crypto_ablkcipher *cipher;
-+
-+	cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
-+	if (IS_ERR(cipher))
-+		return PTR_ERR(cipher);
-+
-+	tfm->crt_ablkcipher.base = cipher;
-+	tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
-+
-+	return 0;
++	return __pata_platform_remove(&pdev->dev);
 +}
-+EXPORT_SYMBOL_GPL(skcipher_geniv_init);
+ 
+ static struct platform_driver pata_platform_driver = {
+ 	.probe		= pata_platform_probe,
+diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
+index a4c0e50..9f308ed 100644
+--- a/drivers/ata/pata_qdi.c
++++ b/drivers/ata/pata_qdi.c
+@@ -124,29 +124,33 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
+ 	return ata_qc_issue_prot(qc);
+ }
+ 
+-static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
++static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
++				  unsigned int buflen, int rw)
+ {
+-	struct ata_port *ap = adev->link->ap;
+-	int slop = buflen & 3;
++	if (ata_id_has_dword_io(dev->id)) {
++		struct ata_port *ap = dev->link->ap;
++		int slop = buflen & 3;
+ 
+-	if (ata_id_has_dword_io(adev->id)) {
+-		if (write_data)
+-			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+-		else
++		if (rw == READ)
+ 			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
++		else
++			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ 
+ 		if (unlikely(slop)) {
+-			__le32 pad = 0;
+-			if (write_data) {
+-				memcpy(&pad, buf + buflen - slop, slop);
+-				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+-			} else {
++			u32 pad;
++			if (rw == READ) {
+ 				pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+ 				memcpy(buf + buflen - slop, &pad, slop);
++			} else {
++				memcpy(&pad, buf + buflen - slop, slop);
++				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+ 			}
++			buflen += 4 - slop;
+ 		}
+ 	} else
+-		ata_data_xfer(adev, buf, buflen, write_data);
++		buflen = ata_data_xfer(dev, buf, buflen, rw);
 +
-+void skcipher_geniv_exit(struct crypto_tfm *tfm)
-+{
-+	crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
-+}
-+EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
++	return buflen;
+ }
+ 
+ static struct scsi_host_template qdi_sht = {
+diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
+index ea2ef9f..55055b2 100644
+--- a/drivers/ata/pata_scc.c
++++ b/drivers/ata/pata_scc.c
+@@ -768,45 +768,47 @@ static u8 scc_bmdma_status (struct ata_port *ap)
+ 
+ /**
+  *	scc_data_xfer - Transfer data by PIO
+- *	@adev: device for this I/O
++ *	@dev: device for this I/O
+  *	@buf: data buffer
+  *	@buflen: buffer length
+- *	@write_data: read/write
++ *	@rw: read/write
+  *
+  *	Note: Original code is ata_data_xfer().
+  */
+ 
+-static void scc_data_xfer (struct ata_device *adev, unsigned char *buf,
+-			   unsigned int buflen, int write_data)
++static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
++				   unsigned int buflen, int rw)
+ {
+-	struct ata_port *ap = adev->link->ap;
++	struct ata_port *ap = dev->link->ap;
+ 	unsigned int words = buflen >> 1;
+ 	unsigned int i;
+ 	u16 *buf16 = (u16 *) buf;
+ 	void __iomem *mmio = ap->ioaddr.data_addr;
+ 
+ 	/* Transfer multiple of 2 bytes */
+-	if (write_data) {
+-		for (i = 0; i < words; i++)
+-			out_be32(mmio, cpu_to_le16(buf16[i]));
+-	} else {
++	if (rw == READ)
+ 		for (i = 0; i < words; i++)
+ 			buf16[i] = le16_to_cpu(in_be32(mmio));
+-	}
++	else
++		for (i = 0; i < words; i++)
++			out_be32(mmio, cpu_to_le16(buf16[i]));
+ 
+ 	/* Transfer trailing 1 byte, if any. */
+ 	if (unlikely(buflen & 0x01)) {
+ 		u16 align_buf[1] = { 0 };
+ 		unsigned char *trailing_buf = buf + buflen - 1;
+ 
+-		if (write_data) {
+-			memcpy(align_buf, trailing_buf, 1);
+-			out_be32(mmio, cpu_to_le16(align_buf[0]));
+-		} else {
++		if (rw == READ) {
+ 			align_buf[0] = le16_to_cpu(in_be32(mmio));
+ 			memcpy(trailing_buf, align_buf, 1);
++		} else {
++			memcpy(align_buf, trailing_buf, 1);
++			out_be32(mmio, cpu_to_le16(align_buf[0]));
+ 		}
++		words++;
+ 	}
 +
- MODULE_LICENSE("GPL");
- MODULE_DESCRIPTION("Generic block chaining cipher type");
-diff --git a/crypto/camellia.c b/crypto/camellia.c
-index 6877ecf..493fee7 100644
---- a/crypto/camellia.c
-+++ b/crypto/camellia.c
-@@ -36,176 +36,6 @@
- #include <linux/kernel.h>
- #include <linux/module.h>
++	return words << 1;
+ }
+ 
+ /**
+diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
+index 8bed888..9c523fb 100644
+--- a/drivers/ata/pata_serverworks.c
++++ b/drivers/ata/pata_serverworks.c
+@@ -41,7 +41,7 @@
+ #include <linux/libata.h>
+ 
+ #define DRV_NAME "pata_serverworks"
+-#define DRV_VERSION "0.4.2"
++#define DRV_VERSION "0.4.3"
+ 
+ #define SVWKS_CSB5_REVISION_NEW	0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
+ #define SVWKS_CSB6_REVISION	0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
+@@ -102,7 +102,7 @@ static int osb4_cable(struct ata_port *ap) {
+ }
+ 
+ /**
+- *	csb4_cable	-	CSB5/6 cable detect
++ *	csb_cable	-	CSB5/6 cable detect
+  *	@ap: ATA port to check
+  *
+  *	Serverworks default arrangement is to use the drive side detection
+@@ -110,7 +110,7 @@ static int osb4_cable(struct ata_port *ap) {
+  */
+ 
+ static int csb_cable(struct ata_port *ap) {
+-	return ATA_CBL_PATA80;
++	return ATA_CBL_PATA_UNK;
+ }
+ 
+ struct sv_cable_table {
+@@ -231,7 +231,6 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
+ 	return ata_pci_default_filter(adev, mask);
+ }
  
 -
--#define CAMELLIA_MIN_KEY_SIZE        16
--#define CAMELLIA_MAX_KEY_SIZE        32
--#define CAMELLIA_BLOCK_SIZE 16
--#define CAMELLIA_TABLE_BYTE_LEN 272
--#define CAMELLIA_TABLE_WORD_LEN (CAMELLIA_TABLE_BYTE_LEN / 4)
--
--typedef u32 KEY_TABLE_TYPE[CAMELLIA_TABLE_WORD_LEN];
--
--
--/* key constants */
--
--#define CAMELLIA_SIGMA1L (0xA09E667FL)
--#define CAMELLIA_SIGMA1R (0x3BCC908BL)
--#define CAMELLIA_SIGMA2L (0xB67AE858L)
--#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
--#define CAMELLIA_SIGMA3L (0xC6EF372FL)
--#define CAMELLIA_SIGMA3R (0xE94F82BEL)
--#define CAMELLIA_SIGMA4L (0x54FF53A5L)
--#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
--#define CAMELLIA_SIGMA5L (0x10E527FAL)
--#define CAMELLIA_SIGMA5R (0xDE682D1DL)
--#define CAMELLIA_SIGMA6L (0xB05688C2L)
--#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
--
--struct camellia_ctx {
--	int key_length;
--	KEY_TABLE_TYPE key_table;
--};
--
--
--/*
-- *  macros
-- */
--
--
--# define GETU32(pt) (((u32)(pt)[0] << 24)	\
--		     ^ ((u32)(pt)[1] << 16)	\
--		     ^ ((u32)(pt)[2] <<  8)	\
--		     ^ ((u32)(pt)[3]))
--
--#define COPY4WORD(dst, src)			\
--    do {					\
--	(dst)[0]=(src)[0];			\
--	(dst)[1]=(src)[1];			\
--	(dst)[2]=(src)[2];			\
--	(dst)[3]=(src)[3];			\
--    }while(0)
--
--#define SWAP4WORD(word)				\
--    do {					\
--	CAMELLIA_SWAP4((word)[0]);		\
--	CAMELLIA_SWAP4((word)[1]);		\
--	CAMELLIA_SWAP4((word)[2]);		\
--	CAMELLIA_SWAP4((word)[3]);		\
--    }while(0)
--
--#define XOR4WORD(a, b)/* a = a ^ b */		\
--    do {					\
--	(a)[0]^=(b)[0];				\
--	(a)[1]^=(b)[1];				\
--	(a)[2]^=(b)[2];				\
--	(a)[3]^=(b)[3];				\
--    }while(0)
--
--#define XOR4WORD2(a, b, c)/* a = b ^ c */	\
--    do {					\
--	(a)[0]=(b)[0]^(c)[0];			\
--	(a)[1]=(b)[1]^(c)[1];			\
--	(a)[2]=(b)[2]^(c)[2];			\
--	(a)[3]=(b)[3]^(c)[3];			\
--    }while(0)
--
--#define CAMELLIA_SUBKEY_L(INDEX) (subkey[(INDEX)*2])
--#define CAMELLIA_SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
--
--/* rotation right shift 1byte */
--#define CAMELLIA_RR8(x) (((x) >> 8) + ((x) << 24))
--/* rotation left shift 1bit */
--#define CAMELLIA_RL1(x) (((x) << 1) + ((x) >> 31))
--/* rotation left shift 1byte */
--#define CAMELLIA_RL8(x) (((x) << 8) + ((x) >> 24))
--
--#define CAMELLIA_ROLDQ(ll, lr, rl, rr, w0, w1, bits)	\
--    do {						\
--	w0 = ll;					\
--	ll = (ll << bits) + (lr >> (32 - bits));	\
--	lr = (lr << bits) + (rl >> (32 - bits));	\
--	rl = (rl << bits) + (rr >> (32 - bits));	\
--	rr = (rr << bits) + (w0 >> (32 - bits));	\
--    } while(0)
--
--#define CAMELLIA_ROLDQo32(ll, lr, rl, rr, w0, w1, bits)	\
--    do {						\
--	w0 = ll;					\
--	w1 = lr;					\
--	ll = (lr << (bits - 32)) + (rl >> (64 - bits));	\
--	lr = (rl << (bits - 32)) + (rr >> (64 - bits));	\
--	rl = (rr << (bits - 32)) + (w0 >> (64 - bits));	\
--	rr = (w0 << (bits - 32)) + (w1 >> (64 - bits));	\
--    } while(0)
--
--#define CAMELLIA_SP1110(INDEX) (camellia_sp1110[(INDEX)])
--#define CAMELLIA_SP0222(INDEX) (camellia_sp0222[(INDEX)])
--#define CAMELLIA_SP3033(INDEX) (camellia_sp3033[(INDEX)])
--#define CAMELLIA_SP4404(INDEX) (camellia_sp4404[(INDEX)])
--
--#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1)	\
--    do {							\
--	il = xl ^ kl;						\
--	ir = xr ^ kr;						\
--	t0 = il >> 16;						\
--	t1 = ir >> 16;						\
--	yl = CAMELLIA_SP1110(ir & 0xff)				\
--	    ^ CAMELLIA_SP0222((t1 >> 8) & 0xff)			\
--	    ^ CAMELLIA_SP3033(t1 & 0xff)			\
--	    ^ CAMELLIA_SP4404((ir >> 8) & 0xff);		\
--	yr = CAMELLIA_SP1110((t0 >> 8) & 0xff)			\
--	    ^ CAMELLIA_SP0222(t0 & 0xff)			\
--	    ^ CAMELLIA_SP3033((il >> 8) & 0xff)			\
--	    ^ CAMELLIA_SP4404(il & 0xff);			\
--	yl ^= yr;						\
--	yr = CAMELLIA_RR8(yr);					\
--	yr ^= yl;						\
--    } while(0)
--
--
--/*
-- * for speed up
-- *
-- */
--#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \
--    do {								\
--	t0 = kll;							\
--	t2 = krr;							\
--	t0 &= ll;							\
--	t2 |= rr;							\
--	rl ^= t2;							\
--	lr ^= CAMELLIA_RL1(t0);						\
--	t3 = krl;							\
--	t1 = klr;							\
--	t3 &= rl;							\
--	t1 |= lr;							\
--	ll ^= t1;							\
--	rr ^= CAMELLIA_RL1(t3);						\
--    } while(0)
--
--#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1)	\
--    do {								\
--	ir =  CAMELLIA_SP1110(xr & 0xff);				\
--	il =  CAMELLIA_SP1110((xl>>24) & 0xff);				\
--	ir ^= CAMELLIA_SP0222((xr>>24) & 0xff);				\
--	il ^= CAMELLIA_SP0222((xl>>16) & 0xff);				\
--	ir ^= CAMELLIA_SP3033((xr>>16) & 0xff);				\
--	il ^= CAMELLIA_SP3033((xl>>8) & 0xff);				\
--	ir ^= CAMELLIA_SP4404((xr>>8) & 0xff);				\
--	il ^= CAMELLIA_SP4404(xl & 0xff);				\
--	il ^= kl;							\
--	ir ^= il ^ kr;							\
--	yl ^= ir;							\
--	yr ^= CAMELLIA_RR8(il) ^ ir;					\
--    } while(0)
--
--/**
-- * Stuff related to the Camellia key schedule
-- */
--#define SUBL(x) subL[(x)]
--#define SUBR(x) subR[(x)]
--
--
- static const u32 camellia_sp1110[256] = {
- 	0x70707000,0x82828200,0x2c2c2c00,0xececec00,
- 	0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500,
-@@ -475,67 +305,348 @@ static const u32 camellia_sp4404[256] = {
- };
+ /**
+  *	serverworks_set_piomode	-	set initial PIO mode data
+  *	@ap: ATA interface
+@@ -243,7 +242,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
+ static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ {
+ 	static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
+-	int offset = 1 + (2 * ap->port_no) - adev->devno;
++	int offset = 1 + 2 * ap->port_no - adev->devno;
+ 	int devbits = (2 * ap->port_no + adev->devno) * 4;
+ 	u16 csb5_pio;
+ 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
+index 453d72b..39627ab 100644
+--- a/drivers/ata/pata_via.c
++++ b/drivers/ata/pata_via.c
+@@ -185,7 +185,8 @@ static int via_cable_detect(struct ata_port *ap) {
+ 	if (ata66 & (0x10100000 >> (16 * ap->port_no)))
+ 		return ATA_CBL_PATA80;
+ 	/* Check with ACPI so we can spot BIOS reported SATA bridges */
+-	if (ata_acpi_cbl_80wire(ap))
++	if (ata_acpi_init_gtm(ap) &&
++	    ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
+ 		return ATA_CBL_PATA80;
+ 	return ATA_CBL_PATA40;
+ }
+diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
+index 7116a9e..99c92ed 100644
+--- a/drivers/ata/pata_winbond.c
++++ b/drivers/ata/pata_winbond.c
+@@ -92,29 +92,33 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ }
  
  
-+#define CAMELLIA_MIN_KEY_SIZE        16
-+#define CAMELLIA_MAX_KEY_SIZE        32
-+#define CAMELLIA_BLOCK_SIZE          16
-+#define CAMELLIA_TABLE_BYTE_LEN     272
-+
-+/*
-+ * NB: L and R below stand for 'left' and 'right' as in written numbers.
-+ * That is, in (xxxL,xxxR) pair xxxL holds most significant digits,
-+ * _not_ least significant ones!
-+ */
-+
-+
-+/* key constants */
-+
-+#define CAMELLIA_SIGMA1L (0xA09E667FL)
-+#define CAMELLIA_SIGMA1R (0x3BCC908BL)
-+#define CAMELLIA_SIGMA2L (0xB67AE858L)
-+#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
-+#define CAMELLIA_SIGMA3L (0xC6EF372FL)
-+#define CAMELLIA_SIGMA3R (0xE94F82BEL)
-+#define CAMELLIA_SIGMA4L (0x54FF53A5L)
-+#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
-+#define CAMELLIA_SIGMA5L (0x10E527FAL)
-+#define CAMELLIA_SIGMA5R (0xDE682D1DL)
-+#define CAMELLIA_SIGMA6L (0xB05688C2L)
-+#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
-+
-+/*
-+ *  macros
-+ */
-+#define GETU32(v, pt) \
-+    do { \
-+	/* latest breed of gcc is clever enough to use move */ \
-+	memcpy(&(v), (pt), 4); \
-+	(v) = be32_to_cpu(v); \
-+    } while(0)
-+
-+/* rotation right shift 1byte */
-+#define ROR8(x) (((x) >> 8) + ((x) << 24))
-+/* rotation left shift 1bit */
-+#define ROL1(x) (((x) << 1) + ((x) >> 31))
-+/* rotation left shift 1byte */
-+#define ROL8(x) (((x) << 8) + ((x) >> 24))
-+
-+#define ROLDQ(ll, lr, rl, rr, w0, w1, bits)		\
-+    do {						\
-+	w0 = ll;					\
-+	ll = (ll << bits) + (lr >> (32 - bits));	\
-+	lr = (lr << bits) + (rl >> (32 - bits));	\
-+	rl = (rl << bits) + (rr >> (32 - bits));	\
-+	rr = (rr << bits) + (w0 >> (32 - bits));	\
-+    } while(0)
-+
-+#define ROLDQo32(ll, lr, rl, rr, w0, w1, bits)		\
-+    do {						\
-+	w0 = ll;					\
-+	w1 = lr;					\
-+	ll = (lr << (bits - 32)) + (rl >> (64 - bits));	\
-+	lr = (rl << (bits - 32)) + (rr >> (64 - bits));	\
-+	rl = (rr << (bits - 32)) + (w0 >> (64 - bits));	\
-+	rr = (w0 << (bits - 32)) + (w1 >> (64 - bits));	\
-+    } while(0)
-+
-+#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1)	\
-+    do {							\
-+	il = xl ^ kl;						\
-+	ir = xr ^ kr;						\
-+	t0 = il >> 16;						\
-+	t1 = ir >> 16;						\
-+	yl = camellia_sp1110[(u8)(ir     )]			\
-+	   ^ camellia_sp0222[    (t1 >> 8)]			\
-+	   ^ camellia_sp3033[(u8)(t1     )]			\
-+	   ^ camellia_sp4404[(u8)(ir >> 8)];			\
-+	yr = camellia_sp1110[    (t0 >> 8)]			\
-+	   ^ camellia_sp0222[(u8)(t0     )]			\
-+	   ^ camellia_sp3033[(u8)(il >> 8)]			\
-+	   ^ camellia_sp4404[(u8)(il     )];			\
-+	yl ^= yr;						\
-+	yr = ROR8(yr);						\
-+	yr ^= yl;						\
-+    } while(0)
-+
-+#define SUBKEY_L(INDEX) (subkey[(INDEX)*2])
-+#define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
-+
-+static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
-+{
-+	u32 dw, tl, tr;
-+	u32 kw4l, kw4r;
-+	int i;
-+
-+	/* absorb kw2 to other subkeys */
-+	/* round 2 */
-+	subL[3] ^= subL[1]; subR[3] ^= subR[1];
-+	/* round 4 */
-+	subL[5] ^= subL[1]; subR[5] ^= subR[1];
-+	/* round 6 */
-+	subL[7] ^= subL[1]; subR[7] ^= subR[1];
-+	subL[1] ^= subR[1] & ~subR[9];
-+	dw = subL[1] & subL[9],
-+		subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */
-+	/* round 8 */
-+	subL[11] ^= subL[1]; subR[11] ^= subR[1];
-+	/* round 10 */
-+	subL[13] ^= subL[1]; subR[13] ^= subR[1];
-+	/* round 12 */
-+	subL[15] ^= subL[1]; subR[15] ^= subR[1];
-+	subL[1] ^= subR[1] & ~subR[17];
-+	dw = subL[1] & subL[17],
-+		subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */
-+	/* round 14 */
-+	subL[19] ^= subL[1]; subR[19] ^= subR[1];
-+	/* round 16 */
-+	subL[21] ^= subL[1]; subR[21] ^= subR[1];
-+	/* round 18 */
-+	subL[23] ^= subL[1]; subR[23] ^= subR[1];
-+	if (max == 24) {
-+		/* kw3 */
-+		subL[24] ^= subL[1]; subR[24] ^= subR[1];
-+
-+	/* absorb kw4 to other subkeys */
-+		kw4l = subL[25]; kw4r = subR[25];
-+	} else {
-+		subL[1] ^= subR[1] & ~subR[25];
-+		dw = subL[1] & subL[25],
-+			subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */
-+		/* round 20 */
-+		subL[27] ^= subL[1]; subR[27] ^= subR[1];
-+		/* round 22 */
-+		subL[29] ^= subL[1]; subR[29] ^= subR[1];
-+		/* round 24 */
-+		subL[31] ^= subL[1]; subR[31] ^= subR[1];
-+		/* kw3 */
-+		subL[32] ^= subL[1]; subR[32] ^= subR[1];
-+
-+	/* absorb kw4 to other subkeys */
-+		kw4l = subL[33]; kw4r = subR[33];
-+		/* round 23 */
-+		subL[30] ^= kw4l; subR[30] ^= kw4r;
-+		/* round 21 */
-+		subL[28] ^= kw4l; subR[28] ^= kw4r;
-+		/* round 19 */
-+		subL[26] ^= kw4l; subR[26] ^= kw4r;
-+		kw4l ^= kw4r & ~subR[24];
-+		dw = kw4l & subL[24],
-+			kw4r ^= ROL1(dw); /* modified for FL(kl5) */
-+	}
-+	/* round 17 */
-+	subL[22] ^= kw4l; subR[22] ^= kw4r;
-+	/* round 15 */
-+	subL[20] ^= kw4l; subR[20] ^= kw4r;
-+	/* round 13 */
-+	subL[18] ^= kw4l; subR[18] ^= kw4r;
-+	kw4l ^= kw4r & ~subR[16];
-+	dw = kw4l & subL[16],
-+		kw4r ^= ROL1(dw); /* modified for FL(kl3) */
-+	/* round 11 */
-+	subL[14] ^= kw4l; subR[14] ^= kw4r;
-+	/* round 9 */
-+	subL[12] ^= kw4l; subR[12] ^= kw4r;
-+	/* round 7 */
-+	subL[10] ^= kw4l; subR[10] ^= kw4r;
-+	kw4l ^= kw4r & ~subR[8];
-+	dw = kw4l & subL[8],
-+		kw4r ^= ROL1(dw); /* modified for FL(kl1) */
-+	/* round 5 */
-+	subL[6] ^= kw4l; subR[6] ^= kw4r;
-+	/* round 3 */
-+	subL[4] ^= kw4l; subR[4] ^= kw4r;
-+	/* round 1 */
-+	subL[2] ^= kw4l; subR[2] ^= kw4r;
-+	/* kw1 */
-+	subL[0] ^= kw4l; subR[0] ^= kw4r;
-+
-+	/* key XOR is end of F-function */
-+	SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */
-+	SUBKEY_R(0) = subR[0] ^ subR[2];
-+	SUBKEY_L(2) = subL[3];       /* round 1 */
-+	SUBKEY_R(2) = subR[3];
-+	SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */
-+	SUBKEY_R(3) = subR[2] ^ subR[4];
-+	SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */
-+	SUBKEY_R(4) = subR[3] ^ subR[5];
-+	SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */
-+	SUBKEY_R(5) = subR[4] ^ subR[6];
-+	SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */
-+	SUBKEY_R(6) = subR[5] ^ subR[7];
-+	tl = subL[10] ^ (subR[10] & ~subR[8]);
-+	dw = tl & subL[8],  /* FL(kl1) */
-+		tr = subR[10] ^ ROL1(dw);
-+	SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
-+	SUBKEY_R(7) = subR[6] ^ tr;
-+	SUBKEY_L(8) = subL[8];       /* FL(kl1) */
-+	SUBKEY_R(8) = subR[8];
-+	SUBKEY_L(9) = subL[9];       /* FLinv(kl2) */
-+	SUBKEY_R(9) = subR[9];
-+	tl = subL[7] ^ (subR[7] & ~subR[9]);
-+	dw = tl & subL[9],  /* FLinv(kl2) */
-+		tr = subR[7] ^ ROL1(dw);
-+	SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
-+	SUBKEY_R(10) = tr ^ subR[11];
-+	SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
-+	SUBKEY_R(11) = subR[10] ^ subR[12];
-+	SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */
-+	SUBKEY_R(12) = subR[11] ^ subR[13];
-+	SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */
-+	SUBKEY_R(13) = subR[12] ^ subR[14];
-+	SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */
-+	SUBKEY_R(14) = subR[13] ^ subR[15];
-+	tl = subL[18] ^ (subR[18] & ~subR[16]);
-+	dw = tl & subL[16], /* FL(kl3) */
-+		tr = subR[18] ^ ROL1(dw);
-+	SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
-+	SUBKEY_R(15) = subR[14] ^ tr;
-+	SUBKEY_L(16) = subL[16];     /* FL(kl3) */
-+	SUBKEY_R(16) = subR[16];
-+	SUBKEY_L(17) = subL[17];     /* FLinv(kl4) */
-+	SUBKEY_R(17) = subR[17];
-+	tl = subL[15] ^ (subR[15] & ~subR[17]);
-+	dw = tl & subL[17], /* FLinv(kl4) */
-+		tr = subR[15] ^ ROL1(dw);
-+	SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
-+	SUBKEY_R(18) = tr ^ subR[19];
-+	SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
-+	SUBKEY_R(19) = subR[18] ^ subR[20];
-+	SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */
-+	SUBKEY_R(20) = subR[19] ^ subR[21];
-+	SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */
-+	SUBKEY_R(21) = subR[20] ^ subR[22];
-+	SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */
-+	SUBKEY_R(22) = subR[21] ^ subR[23];
-+	if (max == 24) {
-+		SUBKEY_L(23) = subL[22];     /* round 18 */
-+		SUBKEY_R(23) = subR[22];
-+		SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */
-+		SUBKEY_R(24) = subR[24] ^ subR[23];
-+	} else {
-+		tl = subL[26] ^ (subR[26] & ~subR[24]);
-+		dw = tl & subL[24], /* FL(kl5) */
-+			tr = subR[26] ^ ROL1(dw);
-+		SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
-+		SUBKEY_R(23) = subR[22] ^ tr;
-+		SUBKEY_L(24) = subL[24];     /* FL(kl5) */
-+		SUBKEY_R(24) = subR[24];
-+		SUBKEY_L(25) = subL[25];     /* FLinv(kl6) */
-+		SUBKEY_R(25) = subR[25];
-+		tl = subL[23] ^ (subR[23] & ~subR[25]);
-+		dw = tl & subL[25], /* FLinv(kl6) */
-+			tr = subR[23] ^ ROL1(dw);
-+		SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
-+		SUBKEY_R(26) = tr ^ subR[27];
-+		SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
-+		SUBKEY_R(27) = subR[26] ^ subR[28];
-+		SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */
-+		SUBKEY_R(28) = subR[27] ^ subR[29];
-+		SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */
-+		SUBKEY_R(29) = subR[28] ^ subR[30];
-+		SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */
-+		SUBKEY_R(30) = subR[29] ^ subR[31];
-+		SUBKEY_L(31) = subL[30];     /* round 24 */
-+		SUBKEY_R(31) = subR[30];
-+		SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */
-+		SUBKEY_R(32) = subR[32] ^ subR[31];
-+	}
+-static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
++static unsigned int winbond_data_xfer(struct ata_device *dev,
++			unsigned char *buf, unsigned int buflen, int rw)
+ {
+-	struct ata_port *ap = adev->link->ap;
++	struct ata_port *ap = dev->link->ap;
+ 	int slop = buflen & 3;
+ 
+-	if (ata_id_has_dword_io(adev->id)) {
+-		if (write_data)
+-			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+-		else
++	if (ata_id_has_dword_io(dev->id)) {
++		if (rw == READ)
+ 			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
++		else
++			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ 
+ 		if (unlikely(slop)) {
+-			__le32 pad = 0;
+-			if (write_data) {
+-				memcpy(&pad, buf + buflen - slop, slop);
+-				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+-			} else {
++			u32 pad;
++			if (rw == READ) {
+ 				pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+ 				memcpy(buf + buflen - slop, &pad, slop);
++			} else {
++				memcpy(&pad, buf + buflen - slop, slop);
++				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+ 			}
++			buflen += 4 - slop;
+ 		}
+ 	} else
+-		ata_data_xfer(adev, buf, buflen, write_data);
++		buflen = ata_data_xfer(dev, buf, buflen, rw);
 +
-+	/* apply the inverse of the last half of P-function */
-+	i = 2;
-+	do {
-+		dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */
-+		SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
-+		dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */
-+		SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
-+		dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */
-+		SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
-+		dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */
-+		SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
-+		dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */
-+		SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
-+		dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */
-+		SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
-+		i += 8;
-+	} while (i < max);
-+}
++	return buflen;
+ }
  
- static void camellia_setup128(const unsigned char *key, u32 *subkey)
- {
- 	u32 kll, klr, krl, krr;
- 	u32 il, ir, t0, t1, w0, w1;
--	u32 kw4l, kw4r, dw, tl, tr;
- 	u32 subL[26];
- 	u32 subR[26];
+ static struct scsi_host_template winbond_sht = {
+@@ -191,7 +195,7 @@ static __init int winbond_init_one(unsigned long port)
+ 	reg = winbond_readcfg(port, 0x81);
  
- 	/**
--	 *  k == kll || klr || krl || krr (|| is concatination)
--	 */
--	kll = GETU32(key     );
--	klr = GETU32(key +  4);
--	krl = GETU32(key +  8);
--	krr = GETU32(key + 12);
--	/**
--	 * generate KL dependent subkeys
-+	 *  k == kll || klr || krl || krr (|| is concatenation)
- 	 */
-+	GETU32(kll, key     );
-+	GETU32(klr, key +  4);
-+	GETU32(krl, key +  8);
-+	GETU32(krr, key + 12);
-+
-+	/* generate KL dependent subkeys */
- 	/* kw1 */
--	SUBL(0) = kll; SUBR(0) = klr;
-+	subL[0] = kll; subR[0] = klr;
- 	/* kw2 */
--	SUBL(1) = krl; SUBR(1) = krr;
-+	subL[1] = krl; subR[1] = krr;
- 	/* rotation left shift 15bit */
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
- 	/* k3 */
--	SUBL(4) = kll; SUBR(4) = klr;
-+	subL[4] = kll; subR[4] = klr;
- 	/* k4 */
--	SUBL(5) = krl; SUBR(5) = krr;
-+	subL[5] = krl; subR[5] = krr;
- 	/* rotation left shift 15+30bit */
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 30);
- 	/* k7 */
--	SUBL(10) = kll; SUBR(10) = klr;
-+	subL[10] = kll; subR[10] = klr;
- 	/* k8 */
--	SUBL(11) = krl; SUBR(11) = krr;
-+	subL[11] = krl; subR[11] = krr;
- 	/* rotation left shift 15+30+15bit */
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
- 	/* k10 */
--	SUBL(13) = krl; SUBR(13) = krr;
-+	subL[13] = krl; subR[13] = krr;
- 	/* rotation left shift 15+30+15+17 bit */
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
- 	/* kl3 */
--	SUBL(16) = kll; SUBR(16) = klr;
-+	subL[16] = kll; subR[16] = klr;
- 	/* kl4 */
--	SUBL(17) = krl; SUBR(17) = krr;
-+	subL[17] = krl; subR[17] = krr;
- 	/* rotation left shift 15+30+15+17+17 bit */
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
- 	/* k13 */
--	SUBL(18) = kll; SUBR(18) = klr;
-+	subL[18] = kll; subR[18] = klr;
- 	/* k14 */
--	SUBL(19) = krl; SUBR(19) = krr;
-+	subL[19] = krl; subR[19] = krr;
- 	/* rotation left shift 15+30+15+17+17+17 bit */
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
- 	/* k17 */
--	SUBL(22) = kll; SUBR(22) = klr;
-+	subL[22] = kll; subR[22] = klr;
- 	/* k18 */
--	SUBL(23) = krl; SUBR(23) = krr;
-+	subL[23] = krl; subR[23] = krr;
+ 	if (!(reg & 0x03))		/* Disabled */
+-		return 0;
++		return -ENODEV;
  
- 	/* generate KA */
--	kll = SUBL(0); klr = SUBR(0);
--	krl = SUBL(1); krr = SUBR(1);
-+	kll = subL[0]; klr = subR[0];
-+	krl = subL[1]; krr = subR[1];
- 	CAMELLIA_F(kll, klr,
- 		   CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
- 		   w0, w1, il, ir, t0, t1);
-@@ -555,306 +666,108 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
+ 	for (i = 0; i < 2 ; i ++) {
+ 		unsigned long cmd_port = 0x1F0 - (0x80 * i);
+diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
+index bd4c2a3..8e1b7e9 100644
+--- a/drivers/ata/pdc_adma.c
++++ b/drivers/ata/pdc_adma.c
+@@ -321,8 +321,9 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
+ 	u8  *buf = pp->pkt, *last_buf = NULL;
+ 	int i = (2 + buf[3]) * 8;
+ 	u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
++	unsigned int si;
  
- 	/* generate KA dependent subkeys */
- 	/* k1, k2 */
--	SUBL(2) = kll; SUBR(2) = klr;
--	SUBL(3) = krl; SUBR(3) = krr;
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
-+	subL[2] = kll; subR[2] = klr;
-+	subL[3] = krl; subR[3] = krr;
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
- 	/* k5,k6 */
--	SUBL(6) = kll; SUBR(6) = klr;
--	SUBL(7) = krl; SUBR(7) = krr;
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
-+	subL[6] = kll; subR[6] = klr;
-+	subL[7] = krl; subR[7] = krr;
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
- 	/* kl1, kl2 */
--	SUBL(8) = kll; SUBR(8) = klr;
--	SUBL(9) = krl; SUBR(9) = krr;
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
-+	subL[8] = kll; subR[8] = klr;
-+	subL[9] = krl; subR[9] = krr;
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
- 	/* k9 */
--	SUBL(12) = kll; SUBR(12) = klr;
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
-+	subL[12] = kll; subR[12] = klr;
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
- 	/* k11, k12 */
--	SUBL(14) = kll; SUBR(14) = klr;
--	SUBL(15) = krl; SUBR(15) = krr;
--	CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
-+	subL[14] = kll; subR[14] = klr;
-+	subL[15] = krl; subR[15] = krr;
-+	ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
- 	/* k15, k16 */
--	SUBL(20) = kll; SUBR(20) = klr;
--	SUBL(21) = krl; SUBR(21) = krr;
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
-+	subL[20] = kll; subR[20] = klr;
-+	subL[21] = krl; subR[21] = krr;
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
- 	/* kw3, kw4 */
--	SUBL(24) = kll; SUBR(24) = klr;
--	SUBL(25) = krl; SUBR(25) = krr;
-+	subL[24] = kll; subR[24] = klr;
-+	subL[25] = krl; subR[25] = krr;
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		u32 addr;
+ 		u32 len;
  
+@@ -455,7 +456,7 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
+ 		adma_packet_start(qc);
+ 		return 0;
+ 
+-	case ATA_PROT_ATAPI_DMA:
++	case ATAPI_PROT_DMA:
+ 		BUG();
+ 		break;
+ 
+diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
+index d015b4a..922d7b2 100644
+--- a/drivers/ata/sata_fsl.c
++++ b/drivers/ata/sata_fsl.c
+@@ -333,13 +333,14 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
+ 	struct prde *prd_ptr_to_indirect_ext = NULL;
+ 	unsigned indirect_ext_segment_sz = 0;
+ 	dma_addr_t indirect_ext_segment_paddr;
++	unsigned int si;
+ 
+ 	VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd);
+ 
+ 	indirect_ext_segment_paddr = cmd_desc_paddr +
+ 	    SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
+ 
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		dma_addr_t sg_addr = sg_dma_address(sg);
+ 		u32 sg_len = sg_dma_len(sg);
+ 
+@@ -417,7 +418,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+ 	}
+ 
+ 	/* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
+-	if (is_atapi_taskfile(&qc->tf)) {
++	if (ata_is_atapi(qc->tf.protocol)) {
+ 		desc_info |= ATAPI_CMD;
+ 		memset((void *)&cd->acmd, 0, 32);
+ 		memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len);
+diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
+index 323c087..96e614a 100644
+--- a/drivers/ata/sata_inic162x.c
++++ b/drivers/ata/sata_inic162x.c
+@@ -585,7 +585,7 @@ static struct ata_port_operations inic_port_ops = {
+ };
+ 
+ static struct ata_port_info inic_port_info = {
+-	/* For some reason, ATA_PROT_ATAPI is broken on this
++	/* For some reason, ATAPI_PROT_PIO is broken on this
+ 	 * controller, and no, PIO_POLLING does't fix it.  It somehow
+ 	 * manages to report the wrong ireason and ignoring ireason
+ 	 * results in machine lock up.  Tell libata to always prefer
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 37b850a..7e72463 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -1136,9 +1136,10 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
+ 	struct mv_port_priv *pp = qc->ap->private_data;
+ 	struct scatterlist *sg;
+ 	struct mv_sg *mv_sg, *last_sg = NULL;
++	unsigned int si;
+ 
+ 	mv_sg = pp->sg_tbl;
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		dma_addr_t addr = sg_dma_address(sg);
+ 		u32 sg_len = sg_dma_len(sg);
+ 
+diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
+index ed5dc7c..a0f98fd 100644
+--- a/drivers/ata/sata_nv.c
++++ b/drivers/ata/sata_nv.c
+@@ -1336,21 +1336,18 @@ static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
+ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
+ {
+ 	struct nv_adma_port_priv *pp = qc->ap->private_data;
+-	unsigned int idx;
+ 	struct nv_adma_prd *aprd;
+ 	struct scatterlist *sg;
++	unsigned int si;
+ 
+ 	VPRINTK("ENTER\n");
+ 
+-	idx = 0;
 -
--	/* absorb kw2 to other subkeys */
--	/* round 2 */
--	SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1);
--	/* round 4 */
--	SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1);
--	/* round 6 */
--	SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1);
--	SUBL(1) ^= SUBR(1) & ~SUBR(9);
--	dw = SUBL(1) & SUBL(9),
--		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */
--	/* round 8 */
--	SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1);
--	/* round 10 */
--	SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1);
--	/* round 12 */
--	SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1);
--	SUBL(1) ^= SUBR(1) & ~SUBR(17);
--	dw = SUBL(1) & SUBL(17),
--		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */
--	/* round 14 */
--	SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1);
--	/* round 16 */
--	SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1);
--	/* round 18 */
--	SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1);
--	/* kw3 */
--	SUBL(24) ^= SUBL(1); SUBR(24) ^= SUBR(1);
--
--	/* absorb kw4 to other subkeys */
--	kw4l = SUBL(25); kw4r = SUBR(25);
--	/* round 17 */
--	SUBL(22) ^= kw4l; SUBR(22) ^= kw4r;
--	/* round 15 */
--	SUBL(20) ^= kw4l; SUBR(20) ^= kw4r;
--	/* round 13 */
--	SUBL(18) ^= kw4l; SUBR(18) ^= kw4r;
--	kw4l ^= kw4r & ~SUBR(16);
--	dw = kw4l & SUBL(16),
--		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */
--	/* round 11 */
--	SUBL(14) ^= kw4l; SUBR(14) ^= kw4r;
--	/* round 9 */
--	SUBL(12) ^= kw4l; SUBR(12) ^= kw4r;
--	/* round 7 */
--	SUBL(10) ^= kw4l; SUBR(10) ^= kw4r;
--	kw4l ^= kw4r & ~SUBR(8);
--	dw = kw4l & SUBL(8),
--		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */
--	/* round 5 */
--	SUBL(6) ^= kw4l; SUBR(6) ^= kw4r;
--	/* round 3 */
--	SUBL(4) ^= kw4l; SUBR(4) ^= kw4r;
--	/* round 1 */
--	SUBL(2) ^= kw4l; SUBR(2) ^= kw4r;
--	/* kw1 */
--	SUBL(0) ^= kw4l; SUBR(0) ^= kw4r;
--
--
--	/* key XOR is end of F-function */
--	CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */
--	CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2);
--	CAMELLIA_SUBKEY_L(2) = SUBL(3);       /* round 1 */
--	CAMELLIA_SUBKEY_R(2) = SUBR(3);
--	CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */
--	CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4);
--	CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */
--	CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5);
--	CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */
--	CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6);
--	CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */
--	CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7);
--	tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8));
--	dw = tl & SUBL(8),  /* FL(kl1) */
--		tr = SUBR(10) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */
--	CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr;
--	CAMELLIA_SUBKEY_L(8) = SUBL(8);       /* FL(kl1) */
--	CAMELLIA_SUBKEY_R(8) = SUBR(8);
--	CAMELLIA_SUBKEY_L(9) = SUBL(9);       /* FLinv(kl2) */
--	CAMELLIA_SUBKEY_R(9) = SUBR(9);
--	tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9));
--	dw = tl & SUBL(9),  /* FLinv(kl2) */
--		tr = SUBR(7) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */
--	CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11);
--	CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */
--	CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12);
--	CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */
--	CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13);
--	CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */
--	CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14);
--	CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */
--	CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15);
--	tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16));
--	dw = tl & SUBL(16), /* FL(kl3) */
--		tr = SUBR(18) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */
--	CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr;
--	CAMELLIA_SUBKEY_L(16) = SUBL(16);     /* FL(kl3) */
--	CAMELLIA_SUBKEY_R(16) = SUBR(16);
--	CAMELLIA_SUBKEY_L(17) = SUBL(17);     /* FLinv(kl4) */
--	CAMELLIA_SUBKEY_R(17) = SUBR(17);
--	tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17));
--	dw = tl & SUBL(17), /* FLinv(kl4) */
--		tr = SUBR(15) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */
--	CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19);
--	CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */
--	CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20);
--	CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */
--	CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21);
--	CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */
--	CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22);
--	CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */
--	CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23);
--	CAMELLIA_SUBKEY_L(23) = SUBL(22);     /* round 18 */
--	CAMELLIA_SUBKEY_R(23) = SUBR(22);
--	CAMELLIA_SUBKEY_L(24) = SUBL(24) ^ SUBL(23); /* kw3 */
--	CAMELLIA_SUBKEY_R(24) = SUBR(24) ^ SUBR(23);
--
--	/* apply the inverse of the last half of P-function */
--	dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2),
--		dw = CAMELLIA_RL8(dw);/* round 1 */
--	CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw,
--		CAMELLIA_SUBKEY_L(2) = dw;
--	dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3),
--		dw = CAMELLIA_RL8(dw);/* round 2 */
--	CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw,
--		CAMELLIA_SUBKEY_L(3) = dw;
--	dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4),
--		dw = CAMELLIA_RL8(dw);/* round 3 */
--	CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw,
--		CAMELLIA_SUBKEY_L(4) = dw;
--	dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5),
--		dw = CAMELLIA_RL8(dw);/* round 4 */
--	CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw,
--		CAMELLIA_SUBKEY_L(5) = dw;
--	dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6),
--		dw = CAMELLIA_RL8(dw);/* round 5 */
--	CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw,
--		CAMELLIA_SUBKEY_L(6) = dw;
--	dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7),
--		dw = CAMELLIA_RL8(dw);/* round 6 */
--	CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw,
--		CAMELLIA_SUBKEY_L(7) = dw;
--	dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10),
--		dw = CAMELLIA_RL8(dw);/* round 7 */
--	CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw,
--		CAMELLIA_SUBKEY_L(10) = dw;
--	dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11),
--		dw = CAMELLIA_RL8(dw);/* round 8 */
--	CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw,
--		CAMELLIA_SUBKEY_L(11) = dw;
--	dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12),
--		dw = CAMELLIA_RL8(dw);/* round 9 */
--	CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw,
--		CAMELLIA_SUBKEY_L(12) = dw;
--	dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13),
--		dw = CAMELLIA_RL8(dw);/* round 10 */
--	CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw,
--		CAMELLIA_SUBKEY_L(13) = dw;
--	dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14),
--		dw = CAMELLIA_RL8(dw);/* round 11 */
--	CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw,
--		CAMELLIA_SUBKEY_L(14) = dw;
--	dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15),
--		dw = CAMELLIA_RL8(dw);/* round 12 */
--	CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw,
--		CAMELLIA_SUBKEY_L(15) = dw;
--	dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18),
--		dw = CAMELLIA_RL8(dw);/* round 13 */
--	CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw,
--		CAMELLIA_SUBKEY_L(18) = dw;
--	dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19),
--		dw = CAMELLIA_RL8(dw);/* round 14 */
--	CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw,
--		CAMELLIA_SUBKEY_L(19) = dw;
--	dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20),
--		dw = CAMELLIA_RL8(dw);/* round 15 */
--	CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw,
--		CAMELLIA_SUBKEY_L(20) = dw;
--	dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21),
--		dw = CAMELLIA_RL8(dw);/* round 16 */
--	CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw,
--		CAMELLIA_SUBKEY_L(21) = dw;
--	dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22),
--		dw = CAMELLIA_RL8(dw);/* round 17 */
--	CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw,
--		CAMELLIA_SUBKEY_L(22) = dw;
--	dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23),
--		dw = CAMELLIA_RL8(dw);/* round 18 */
--	CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw,
--		CAMELLIA_SUBKEY_L(23) = dw;
+-	ata_for_each_sg(sg, qc) {
+-		aprd = (idx < 5) ? &cpb->aprd[idx] :
+-			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
+-		nv_adma_fill_aprd(qc, sg, idx, aprd);
+-		idx++;
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
++		aprd = (si < 5) ? &cpb->aprd[si] :
++			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
++		nv_adma_fill_aprd(qc, sg, si, aprd);
+ 	}
+-	if (idx > 5)
++	if (si > 5)
+ 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
+ 	else
+ 		cpb->next_aprd = cpu_to_le64(0);
+@@ -1995,17 +1992,14 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct scatterlist *sg;
+-	unsigned int idx;
+ 	struct nv_swncq_port_priv *pp = ap->private_data;
+ 	struct ata_prd *prd;
 -
--	return;
-+	camellia_setup_tail(subkey, subL, subR, 24);
+-	WARN_ON(qc->__sg == NULL);
+-	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
++	unsigned int si, idx;
+ 
+ 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
+ 
+ 	idx = 0;
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		u32 addr, offset;
+ 		u32 sg_len, len;
+ 
+@@ -2027,8 +2021,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
+ 		}
+ 	}
+ 
+-	if (idx)
+-		prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
++	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
  }
  
--
- static void camellia_setup256(const unsigned char *key, u32 *subkey)
- {
--	u32 kll,klr,krl,krr;           /* left half of key */
--	u32 krll,krlr,krrl,krrr;       /* right half of key */
-+	u32 kll, klr, krl, krr;        /* left half of key */
-+	u32 krll, krlr, krrl, krrr;    /* right half of key */
- 	u32 il, ir, t0, t1, w0, w1;    /* temporary variables */
--	u32 kw4l, kw4r, dw, tl, tr;
- 	u32 subL[34];
- 	u32 subR[34];
+ static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
+diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
+index 7914def..a07d319 100644
+--- a/drivers/ata/sata_promise.c
++++ b/drivers/ata/sata_promise.c
+@@ -450,19 +450,19 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
+ 	struct pdc_port_priv *pp = ap->private_data;
+ 	u8 *buf = pp->pkt;
+ 	u32 *buf32 = (u32 *) buf;
+-	unsigned int dev_sel, feature, nbytes;
++	unsigned int dev_sel, feature;
  
- 	/**
- 	 *  key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
--	 *  (|| is concatination)
-+	 *  (|| is concatenation)
+ 	/* set control bits (byte 0), zero delay seq id (byte 3),
+ 	 * and seq id (byte 2)
  	 */
--
--	kll  = GETU32(key     );
--	klr  = GETU32(key +  4);
--	krl  = GETU32(key +  8);
--	krr  = GETU32(key + 12);
--	krll = GETU32(key + 16);
--	krlr = GETU32(key + 20);
--	krrl = GETU32(key + 24);
--	krrr = GETU32(key + 28);
-+	GETU32(kll,  key     );
-+	GETU32(klr,  key +  4);
-+	GETU32(krl,  key +  8);
-+	GETU32(krr,  key + 12);
-+	GETU32(krll, key + 16);
-+	GETU32(krlr, key + 20);
-+	GETU32(krrl, key + 24);
-+	GETU32(krrr, key + 28);
+ 	switch (qc->tf.protocol) {
+-	case ATA_PROT_ATAPI_DMA:
++	case ATAPI_PROT_DMA:
+ 		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+ 			buf32[0] = cpu_to_le32(PDC_PKT_READ);
+ 		else
+ 			buf32[0] = 0;
+ 		break;
+-	case ATA_PROT_ATAPI_NODATA:
++	case ATAPI_PROT_NODATA:
+ 		buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
+ 		break;
+ 	default:
+@@ -473,45 +473,37 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
+ 	buf32[2] = 0;				/* no next-packet */
  
- 	/* generate KL dependent subkeys */
- 	/* kw1 */
--	SUBL(0) = kll; SUBR(0) = klr;
-+	subL[0] = kll; subR[0] = klr;
- 	/* kw2 */
--	SUBL(1) = krl; SUBR(1) = krr;
--	CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 45);
-+	subL[1] = krl; subR[1] = krr;
-+	ROLDQo32(kll, klr, krl, krr, w0, w1, 45);
- 	/* k9 */
--	SUBL(12) = kll; SUBR(12) = klr;
-+	subL[12] = kll; subR[12] = klr;
- 	/* k10 */
--	SUBL(13) = krl; SUBR(13) = krr;
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
-+	subL[13] = krl; subR[13] = krr;
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
- 	/* kl3 */
--	SUBL(16) = kll; SUBR(16) = klr;
-+	subL[16] = kll; subR[16] = klr;
- 	/* kl4 */
--	SUBL(17) = krl; SUBR(17) = krr;
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
-+	subL[17] = krl; subR[17] = krr;
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 17);
- 	/* k17 */
--	SUBL(22) = kll; SUBR(22) = klr;
-+	subL[22] = kll; subR[22] = klr;
- 	/* k18 */
--	SUBL(23) = krl; SUBR(23) = krr;
--	CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
-+	subL[23] = krl; subR[23] = krr;
-+	ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
- 	/* k23 */
--	SUBL(30) = kll; SUBR(30) = klr;
-+	subL[30] = kll; subR[30] = klr;
- 	/* k24 */
--	SUBL(31) = krl; SUBR(31) = krr;
-+	subL[31] = krl; subR[31] = krr;
+ 	/* select drive */
+-	if (sata_scr_valid(&ap->link)) {
++	if (sata_scr_valid(&ap->link))
+ 		dev_sel = PDC_DEVICE_SATA;
+-	} else {
+-		dev_sel = ATA_DEVICE_OBS;
+-		if (qc->dev->devno != 0)
+-			dev_sel |= ATA_DEV1;
+-	}
++	else
++		dev_sel = qc->tf.device;
++
+ 	buf[12] = (1 << 5) | ATA_REG_DEVICE;
+ 	buf[13] = dev_sel;
+ 	buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
+ 	buf[15] = dev_sel; /* once more, waiting for BSY to clear */
  
- 	/* generate KR dependent subkeys */
--	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
-+	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
- 	/* k3 */
--	SUBL(4) = krll; SUBR(4) = krlr;
-+	subL[4] = krll; subR[4] = krlr;
- 	/* k4 */
--	SUBL(5) = krrl; SUBR(5) = krrr;
--	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
-+	subL[5] = krrl; subR[5] = krrr;
-+	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
- 	/* kl1 */
--	SUBL(8) = krll; SUBR(8) = krlr;
-+	subL[8] = krll; subR[8] = krlr;
- 	/* kl2 */
--	SUBL(9) = krrl; SUBR(9) = krrr;
--	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
-+	subL[9] = krrl; subR[9] = krrr;
-+	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
- 	/* k13 */
--	SUBL(18) = krll; SUBR(18) = krlr;
-+	subL[18] = krll; subR[18] = krlr;
- 	/* k14 */
--	SUBL(19) = krrl; SUBR(19) = krrr;
--	CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
-+	subL[19] = krrl; subR[19] = krrr;
-+	ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
- 	/* k19 */
--	SUBL(26) = krll; SUBR(26) = krlr;
-+	subL[26] = krll; subR[26] = krlr;
- 	/* k20 */
--	SUBL(27) = krrl; SUBR(27) = krrr;
--	CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
-+	subL[27] = krrl; subR[27] = krrr;
-+	ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
+ 	buf[16] = (1 << 5) | ATA_REG_NSECT;
+-	buf[17] = 0x00;
++	buf[17] = qc->tf.nsect;
+ 	buf[18] = (1 << 5) | ATA_REG_LBAL;
+-	buf[19] = 0x00;
++	buf[19] = qc->tf.lbal;
  
- 	/* generate KA */
--	kll = SUBL(0) ^ krll; klr = SUBR(0) ^ krlr;
--	krl = SUBL(1) ^ krrl; krr = SUBR(1) ^ krrr;
-+	kll = subL[0] ^ krll; klr = subR[0] ^ krlr;
-+	krl = subL[1] ^ krrl; krr = subR[1] ^ krrr;
- 	CAMELLIA_F(kll, klr,
- 		   CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
- 		   w0, w1, il, ir, t0, t1);
-@@ -885,310 +798,50 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
- 	krll ^= w0; krlr ^= w1;
+ 	/* set feature and byte counter registers */
+-	if (qc->tf.protocol != ATA_PROT_ATAPI_DMA) {
++	if (qc->tf.protocol != ATAPI_PROT_DMA)
+ 		feature = PDC_FEATURE_ATAPI_PIO;
+-		/* set byte counter register to real transfer byte count */
+-		nbytes = qc->nbytes;
+-		if (nbytes > 0xffff)
+-			nbytes = 0xffff;
+-	} else {
++	else
+ 		feature = PDC_FEATURE_ATAPI_DMA;
+-		/* set byte counter register to 0 */
+-		nbytes = 0;
+-	}
++
+ 	buf[20] = (1 << 5) | ATA_REG_FEATURE;
+ 	buf[21] = feature;
+ 	buf[22] = (1 << 5) | ATA_REG_BYTEL;
+-	buf[23] = nbytes & 0xFF;
++	buf[23] = qc->tf.lbam;
+ 	buf[24] = (1 << 5) | ATA_REG_BYTEH;
+-	buf[25] = (nbytes >> 8) & 0xFF;
++	buf[25] = qc->tf.lbah;
  
- 	/* generate KA dependent subkeys */
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 15);
- 	/* k5 */
--	SUBL(6) = kll; SUBR(6) = klr;
-+	subL[6] = kll; subR[6] = klr;
- 	/* k6 */
--	SUBL(7) = krl; SUBR(7) = krr;
--	CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
-+	subL[7] = krl; subR[7] = krr;
-+	ROLDQ(kll, klr, krl, krr, w0, w1, 30);
- 	/* k11 */
--	SUBL(14) = kll; SUBR(14) = klr;
-+	subL[14] = kll; subR[14] = klr;
- 	/* k12 */
--	SUBL(15) = krl; SUBR(15) = krr;
-+	subL[15] = krl; subR[15] = krr;
- 	/* rotation left shift 32bit */
- 	/* kl5 */
--	SUBL(24) = klr; SUBR(24) = krl;
-+	subL[24] = klr; subR[24] = krl;
- 	/* kl6 */
--	SUBL(25) = krr; SUBR(25) = kll;
-+	subL[25] = krr; subR[25] = kll;
- 	/* rotation left shift 49 from k11,k12 -> k21,k22 */
--	CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 49);
-+	ROLDQo32(kll, klr, krl, krr, w0, w1, 49);
- 	/* k21 */
--	SUBL(28) = kll; SUBR(28) = klr;
-+	subL[28] = kll; subR[28] = klr;
- 	/* k22 */
--	SUBL(29) = krl; SUBR(29) = krr;
-+	subL[29] = krl; subR[29] = krr;
+ 	/* send ATAPI packet command 0xA0 */
+ 	buf[26] = (1 << 5) | ATA_REG_CMD;
+-	buf[27] = ATA_CMD_PACKET;
++	buf[27] = qc->tf.command;
  
- 	/* generate KB dependent subkeys */
- 	/* k1 */
--	SUBL(2) = krll; SUBR(2) = krlr;
-+	subL[2] = krll; subR[2] = krlr;
- 	/* k2 */
--	SUBL(3) = krrl; SUBR(3) = krrr;
--	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
-+	subL[3] = krrl; subR[3] = krrr;
-+	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
- 	/* k7 */
--	SUBL(10) = krll; SUBR(10) = krlr;
-+	subL[10] = krll; subR[10] = krlr;
- 	/* k8 */
--	SUBL(11) = krrl; SUBR(11) = krrr;
--	CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
-+	subL[11] = krrl; subR[11] = krrr;
-+	ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
- 	/* k15 */
--	SUBL(20) = krll; SUBR(20) = krlr;
-+	subL[20] = krll; subR[20] = krlr;
- 	/* k16 */
--	SUBL(21) = krrl; SUBR(21) = krrr;
--	CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51);
-+	subL[21] = krrl; subR[21] = krrr;
-+	ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51);
- 	/* kw3 */
--	SUBL(32) = krll; SUBR(32) = krlr;
-+	subL[32] = krll; subR[32] = krlr;
- 	/* kw4 */
--	SUBL(33) = krrl; SUBR(33) = krrr;
--
--	/* absorb kw2 to other subkeys */
--	/* round 2 */
--	SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1);
--	/* round 4 */
--	SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1);
--	/* round 6 */
--	SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1);
--	SUBL(1) ^= SUBR(1) & ~SUBR(9);
--	dw = SUBL(1) & SUBL(9),
--		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */
--	/* round 8 */
--	SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1);
--	/* round 10 */
--	SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1);
--	/* round 12 */
--	SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1);
--	SUBL(1) ^= SUBR(1) & ~SUBR(17);
--	dw = SUBL(1) & SUBL(17),
--		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */
--	/* round 14 */
--	SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1);
--	/* round 16 */
--	SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1);
--	/* round 18 */
--	SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1);
--	SUBL(1) ^= SUBR(1) & ~SUBR(25);
--	dw = SUBL(1) & SUBL(25),
--		SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl6) */
--	/* round 20 */
--	SUBL(27) ^= SUBL(1); SUBR(27) ^= SUBR(1);
--	/* round 22 */
--	SUBL(29) ^= SUBL(1); SUBR(29) ^= SUBR(1);
--	/* round 24 */
--	SUBL(31) ^= SUBL(1); SUBR(31) ^= SUBR(1);
--	/* kw3 */
--	SUBL(32) ^= SUBL(1); SUBR(32) ^= SUBR(1);
--
--
--	/* absorb kw4 to other subkeys */
--	kw4l = SUBL(33); kw4r = SUBR(33);
--	/* round 23 */
--	SUBL(30) ^= kw4l; SUBR(30) ^= kw4r;
--	/* round 21 */
--	SUBL(28) ^= kw4l; SUBR(28) ^= kw4r;
--	/* round 19 */
--	SUBL(26) ^= kw4l; SUBR(26) ^= kw4r;
--	kw4l ^= kw4r & ~SUBR(24);
--	dw = kw4l & SUBL(24),
--		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl5) */
--	/* round 17 */
--	SUBL(22) ^= kw4l; SUBR(22) ^= kw4r;
--	/* round 15 */
--	SUBL(20) ^= kw4l; SUBR(20) ^= kw4r;
--	/* round 13 */
--	SUBL(18) ^= kw4l; SUBR(18) ^= kw4r;
--	kw4l ^= kw4r & ~SUBR(16);
--	dw = kw4l & SUBL(16),
--		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */
--	/* round 11 */
--	SUBL(14) ^= kw4l; SUBR(14) ^= kw4r;
--	/* round 9 */
--	SUBL(12) ^= kw4l; SUBR(12) ^= kw4r;
--	/* round 7 */
--	SUBL(10) ^= kw4l; SUBR(10) ^= kw4r;
--	kw4l ^= kw4r & ~SUBR(8);
--	dw = kw4l & SUBL(8),
--		kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */
--	/* round 5 */
--	SUBL(6) ^= kw4l; SUBR(6) ^= kw4r;
--	/* round 3 */
--	SUBL(4) ^= kw4l; SUBR(4) ^= kw4r;
--	/* round 1 */
--	SUBL(2) ^= kw4l; SUBR(2) ^= kw4r;
--	/* kw1 */
--	SUBL(0) ^= kw4l; SUBR(0) ^= kw4r;
-+	subL[33] = krrl; subR[33] = krrr;
+ 	/* select drive and check DRQ */
+ 	buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
+@@ -541,17 +533,15 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct scatterlist *sg;
+-	unsigned int idx;
+ 	const u32 SG_COUNT_ASIC_BUG = 41*4;
++	unsigned int si, idx;
++	u32 len;
  
--	/* key XOR is end of F-function */
--	CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */
--	CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2);
--	CAMELLIA_SUBKEY_L(2) = SUBL(3);       /* round 1 */
--	CAMELLIA_SUBKEY_R(2) = SUBR(3);
--	CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */
--	CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4);
--	CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */
--	CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5);
--	CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */
--	CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6);
--	CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */
--	CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7);
--	tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8));
--	dw = tl & SUBL(8),  /* FL(kl1) */
--		tr = SUBR(10) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */
--	CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr;
--	CAMELLIA_SUBKEY_L(8) = SUBL(8);       /* FL(kl1) */
--	CAMELLIA_SUBKEY_R(8) = SUBR(8);
--	CAMELLIA_SUBKEY_L(9) = SUBL(9);       /* FLinv(kl2) */
--	CAMELLIA_SUBKEY_R(9) = SUBR(9);
--	tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9));
--	dw = tl & SUBL(9),  /* FLinv(kl2) */
--		tr = SUBR(7) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */
--	CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11);
--	CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */
--	CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12);
--	CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */
--	CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13);
--	CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */
--	CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14);
--	CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */
--	CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15);
--	tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16));
--	dw = tl & SUBL(16), /* FL(kl3) */
--		tr = SUBR(18) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */
--	CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr;
--	CAMELLIA_SUBKEY_L(16) = SUBL(16);     /* FL(kl3) */
--	CAMELLIA_SUBKEY_R(16) = SUBR(16);
--	CAMELLIA_SUBKEY_L(17) = SUBL(17);     /* FLinv(kl4) */
--	CAMELLIA_SUBKEY_R(17) = SUBR(17);
--	tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17));
--	dw = tl & SUBL(17), /* FLinv(kl4) */
--		tr = SUBR(15) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */
--	CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19);
--	CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */
--	CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20);
--	CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */
--	CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21);
--	CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */
--	CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22);
--	CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */
--	CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23);
--	tl = SUBL(26) ^ (SUBR(26)
--			 & ~SUBR(24));
--	dw = tl & SUBL(24), /* FL(kl5) */
--		tr = SUBR(26) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(23) = SUBL(22) ^ tl; /* round 18 */
--	CAMELLIA_SUBKEY_R(23) = SUBR(22) ^ tr;
--	CAMELLIA_SUBKEY_L(24) = SUBL(24);     /* FL(kl5) */
--	CAMELLIA_SUBKEY_R(24) = SUBR(24);
--	CAMELLIA_SUBKEY_L(25) = SUBL(25);     /* FLinv(kl6) */
--	CAMELLIA_SUBKEY_R(25) = SUBR(25);
--	tl = SUBL(23) ^ (SUBR(23) &
--			 ~SUBR(25));
--	dw = tl & SUBL(25), /* FLinv(kl6) */
--		tr = SUBR(23) ^ CAMELLIA_RL1(dw);
--	CAMELLIA_SUBKEY_L(26) = tl ^ SUBL(27); /* round 19 */
--	CAMELLIA_SUBKEY_R(26) = tr ^ SUBR(27);
--	CAMELLIA_SUBKEY_L(27) = SUBL(26) ^ SUBL(28); /* round 20 */
--	CAMELLIA_SUBKEY_R(27) = SUBR(26) ^ SUBR(28);
--	CAMELLIA_SUBKEY_L(28) = SUBL(27) ^ SUBL(29); /* round 21 */
--	CAMELLIA_SUBKEY_R(28) = SUBR(27) ^ SUBR(29);
--	CAMELLIA_SUBKEY_L(29) = SUBL(28) ^ SUBL(30); /* round 22 */
--	CAMELLIA_SUBKEY_R(29) = SUBR(28) ^ SUBR(30);
--	CAMELLIA_SUBKEY_L(30) = SUBL(29) ^ SUBL(31); /* round 23 */
--	CAMELLIA_SUBKEY_R(30) = SUBR(29) ^ SUBR(31);
--	CAMELLIA_SUBKEY_L(31) = SUBL(30);     /* round 24 */
--	CAMELLIA_SUBKEY_R(31) = SUBR(30);
--	CAMELLIA_SUBKEY_L(32) = SUBL(32) ^ SUBL(31); /* kw3 */
--	CAMELLIA_SUBKEY_R(32) = SUBR(32) ^ SUBR(31);
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ 		return;
+ 
+-	WARN_ON(qc->__sg == NULL);
+-	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
 -
--	/* apply the inverse of the last half of P-function */
--	dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2),
--		dw = CAMELLIA_RL8(dw);/* round 1 */
--	CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw,
--		CAMELLIA_SUBKEY_L(2) = dw;
--	dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3),
--		dw = CAMELLIA_RL8(dw);/* round 2 */
--	CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw,
--		CAMELLIA_SUBKEY_L(3) = dw;
--	dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4),
--		dw = CAMELLIA_RL8(dw);/* round 3 */
--	CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw,
--		CAMELLIA_SUBKEY_L(4) = dw;
--	dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5),
--		dw = CAMELLIA_RL8(dw);/* round 4 */
--	CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw,
--	CAMELLIA_SUBKEY_L(5) = dw;
--	dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6),
--		dw = CAMELLIA_RL8(dw);/* round 5 */
--	CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw,
--		CAMELLIA_SUBKEY_L(6) = dw;
--	dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7),
--		dw = CAMELLIA_RL8(dw);/* round 6 */
--	CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw,
--		CAMELLIA_SUBKEY_L(7) = dw;
--	dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10),
--		dw = CAMELLIA_RL8(dw);/* round 7 */
--	CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw,
--		CAMELLIA_SUBKEY_L(10) = dw;
--	dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11),
--	    dw = CAMELLIA_RL8(dw);/* round 8 */
--	CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw,
--		CAMELLIA_SUBKEY_L(11) = dw;
--	dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12),
--		dw = CAMELLIA_RL8(dw);/* round 9 */
--	CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw,
--		CAMELLIA_SUBKEY_L(12) = dw;
--	dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13),
--		dw = CAMELLIA_RL8(dw);/* round 10 */
--	CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw,
--		CAMELLIA_SUBKEY_L(13) = dw;
--	dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14),
--		dw = CAMELLIA_RL8(dw);/* round 11 */
--	CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw,
--		CAMELLIA_SUBKEY_L(14) = dw;
--	dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15),
--		dw = CAMELLIA_RL8(dw);/* round 12 */
--	CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw,
--		CAMELLIA_SUBKEY_L(15) = dw;
--	dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18),
--		dw = CAMELLIA_RL8(dw);/* round 13 */
--	CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw,
--		CAMELLIA_SUBKEY_L(18) = dw;
--	dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19),
--		dw = CAMELLIA_RL8(dw);/* round 14 */
--	CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw,
--		CAMELLIA_SUBKEY_L(19) = dw;
--	dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20),
--		dw = CAMELLIA_RL8(dw);/* round 15 */
--	CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw,
--		CAMELLIA_SUBKEY_L(20) = dw;
--	dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21),
--		dw = CAMELLIA_RL8(dw);/* round 16 */
--	CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw,
--		CAMELLIA_SUBKEY_L(21) = dw;
--	dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22),
--		dw = CAMELLIA_RL8(dw);/* round 17 */
--	CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw,
--		CAMELLIA_SUBKEY_L(22) = dw;
--	dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23),
--		dw = CAMELLIA_RL8(dw);/* round 18 */
--	CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw,
--		CAMELLIA_SUBKEY_L(23) = dw;
--	dw = CAMELLIA_SUBKEY_L(26) ^ CAMELLIA_SUBKEY_R(26),
--		dw = CAMELLIA_RL8(dw);/* round 19 */
--	CAMELLIA_SUBKEY_R(26) = CAMELLIA_SUBKEY_L(26) ^ dw,
--		CAMELLIA_SUBKEY_L(26) = dw;
--	dw = CAMELLIA_SUBKEY_L(27) ^ CAMELLIA_SUBKEY_R(27),
--		dw = CAMELLIA_RL8(dw);/* round 20 */
--	CAMELLIA_SUBKEY_R(27) = CAMELLIA_SUBKEY_L(27) ^ dw,
--		CAMELLIA_SUBKEY_L(27) = dw;
--	dw = CAMELLIA_SUBKEY_L(28) ^ CAMELLIA_SUBKEY_R(28),
--		dw = CAMELLIA_RL8(dw);/* round 21 */
--	CAMELLIA_SUBKEY_R(28) = CAMELLIA_SUBKEY_L(28) ^ dw,
--		CAMELLIA_SUBKEY_L(28) = dw;
--	dw = CAMELLIA_SUBKEY_L(29) ^ CAMELLIA_SUBKEY_R(29),
--		dw = CAMELLIA_RL8(dw);/* round 22 */
--	CAMELLIA_SUBKEY_R(29) = CAMELLIA_SUBKEY_L(29) ^ dw,
--		CAMELLIA_SUBKEY_L(29) = dw;
--	dw = CAMELLIA_SUBKEY_L(30) ^ CAMELLIA_SUBKEY_R(30),
--		dw = CAMELLIA_RL8(dw);/* round 23 */
--	CAMELLIA_SUBKEY_R(30) = CAMELLIA_SUBKEY_L(30) ^ dw,
--		CAMELLIA_SUBKEY_L(30) = dw;
--	dw = CAMELLIA_SUBKEY_L(31) ^ CAMELLIA_SUBKEY_R(31),
--		dw = CAMELLIA_RL8(dw);/* round 24 */
--	CAMELLIA_SUBKEY_R(31) = CAMELLIA_SUBKEY_L(31) ^ dw,
--		CAMELLIA_SUBKEY_L(31) = dw;
+ 	idx = 0;
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		u32 addr, offset;
+ 		u32 sg_len, len;
+ 
+@@ -578,29 +568,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
+ 		}
+ 	}
+ 
+-	if (idx) {
+-		u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
++	len = le32_to_cpu(ap->prd[idx - 1].flags_len);
+ 
+-		if (len > SG_COUNT_ASIC_BUG) {
+-			u32 addr;
++	if (len > SG_COUNT_ASIC_BUG) {
++		u32 addr;
+ 
+-			VPRINTK("Splitting last PRD.\n");
++		VPRINTK("Splitting last PRD.\n");
+ 
+-			addr = le32_to_cpu(ap->prd[idx - 1].addr);
+-			ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
+-			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
++		addr = le32_to_cpu(ap->prd[idx - 1].addr);
++		ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
++		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
+ 
+-			addr = addr + len - SG_COUNT_ASIC_BUG;
+-			len = SG_COUNT_ASIC_BUG;
+-			ap->prd[idx].addr = cpu_to_le32(addr);
+-			ap->prd[idx].flags_len = cpu_to_le32(len);
+-			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
++		addr = addr + len - SG_COUNT_ASIC_BUG;
++		len = SG_COUNT_ASIC_BUG;
++		ap->prd[idx].addr = cpu_to_le32(addr);
++		ap->prd[idx].flags_len = cpu_to_le32(len);
++		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+ 
+-			idx++;
+-		}
 -
--	return;
-+	camellia_setup_tail(subkey, subL, subR, 32);
+-		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
++		idx++;
+ 	}
++
++	ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
  }
  
- static void camellia_setup192(const unsigned char *key, u32 *subkey)
-@@ -1197,482 +850,168 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
- 	u32 krll, krlr, krrl,krrr;
+ static void pdc_qc_prep(struct ata_queued_cmd *qc)
+@@ -627,14 +615,14 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
+ 		pdc_pkt_footer(&qc->tf, pp->pkt, i);
+ 		break;
  
- 	memcpy(kk, key, 24);
--	memcpy((unsigned char *)&krll, key+16,4);
--	memcpy((unsigned char *)&krlr, key+20,4);
-+	memcpy((unsigned char *)&krll, key+16, 4);
-+	memcpy((unsigned char *)&krlr, key+20, 4);
- 	krrl = ~krll;
- 	krrr = ~krlr;
- 	memcpy(kk+24, (unsigned char *)&krrl, 4);
- 	memcpy(kk+28, (unsigned char *)&krrr, 4);
- 	camellia_setup256(kk, subkey);
--	return;
+-	case ATA_PROT_ATAPI:
++	case ATAPI_PROT_PIO:
+ 		pdc_fill_sg(qc);
+ 		break;
+ 
+-	case ATA_PROT_ATAPI_DMA:
++	case ATAPI_PROT_DMA:
+ 		pdc_fill_sg(qc);
+ 		/*FALLTHROUGH*/
+-	case ATA_PROT_ATAPI_NODATA:
++	case ATAPI_PROT_NODATA:
+ 		pdc_atapi_pkt(qc);
+ 		break;
+ 
+@@ -754,8 +742,8 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
+ 	switch (qc->tf.protocol) {
+ 	case ATA_PROT_DMA:
+ 	case ATA_PROT_NODATA:
+-	case ATA_PROT_ATAPI_DMA:
+-	case ATA_PROT_ATAPI_NODATA:
++	case ATAPI_PROT_DMA:
++	case ATAPI_PROT_NODATA:
+ 		qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
+ 		ata_qc_complete(qc);
+ 		handled = 1;
+@@ -900,7 +888,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
+ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
+ {
+ 	switch (qc->tf.protocol) {
+-	case ATA_PROT_ATAPI_NODATA:
++	case ATAPI_PROT_NODATA:
+ 		if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
+ 			break;
+ 		/*FALLTHROUGH*/
+@@ -908,7 +896,7 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
+ 		if (qc->tf.flags & ATA_TFLAG_POLLING)
+ 			break;
+ 		/*FALLTHROUGH*/
+-	case ATA_PROT_ATAPI_DMA:
++	case ATAPI_PROT_DMA:
+ 	case ATA_PROT_DMA:
+ 		pdc_packet_start(qc);
+ 		return 0;
+@@ -922,16 +910,14 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
+ 
+ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+ {
+-	WARN_ON(tf->protocol == ATA_PROT_DMA ||
+-		tf->protocol == ATA_PROT_ATAPI_DMA);
++	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
+ 	ata_tf_load(ap, tf);
  }
  
+ static void pdc_exec_command_mmio(struct ata_port *ap,
+ 				  const struct ata_taskfile *tf)
+ {
+-	WARN_ON(tf->protocol == ATA_PROT_DMA ||
+-		tf->protocol == ATA_PROT_ATAPI_DMA);
++	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
+ 	ata_exec_command(ap, tf);
+ }
  
--/**
-- * Stuff related to camellia encryption/decryption
-+/*
-+ * Encrypt/decrypt
-  */
--static void camellia_encrypt128(const u32 *subkey, __be32 *io_text)
--{
--	u32 il,ir,t0,t1;               /* temporary valiables */
--
--	u32 io[4];
--
--	io[0] = be32_to_cpu(io_text[0]);
--	io[1] = be32_to_cpu(io_text[1]);
--	io[2] = be32_to_cpu(io_text[2]);
--	io[3] = be32_to_cpu(io_text[3]);
--
--	/* pre whitening but absorb kw2*/
--	io[0] ^= CAMELLIA_SUBKEY_L(0);
--	io[1] ^= CAMELLIA_SUBKEY_R(0);
--	/* main iteration */
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
--		     CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
--		     CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
--			 io[0],io[1],il,ir,t0,t1);
-+#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \
-+    do {								\
-+	t0 = kll;							\
-+	t2 = krr;							\
-+	t0 &= ll;							\
-+	t2 |= rr;							\
-+	rl ^= t2;							\
-+	lr ^= ROL1(t0);							\
-+	t3 = krl;							\
-+	t1 = klr;							\
-+	t3 &= rl;							\
-+	t1 |= lr;							\
-+	ll ^= t1;							\
-+	rr ^= ROL1(t3);							\
-+    } while(0)
+diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
+index 6ee5e19..00d6000 100644
+--- a/drivers/ata/sata_promise.h
++++ b/drivers/ata/sata_promise.h
+@@ -46,7 +46,7 @@ static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
+ 					  unsigned int devno, u8 *buf)
+ {
+ 	u8 dev_reg;
+-	u32 *buf32 = (u32 *) buf;
++	__le32 *buf32 = (__le32 *) buf;
  
--	/* post whitening but kw4 */
--	io[2] ^= CAMELLIA_SUBKEY_L(24);
--	io[3] ^= CAMELLIA_SUBKEY_R(24);
--
--	t0 = io[0];
--	t1 = io[1];
--	io[0] = io[2];
--	io[1] = io[3];
--	io[2] = t0;
--	io[3] = t1;
--
--	io_text[0] = cpu_to_be32(io[0]);
--	io_text[1] = cpu_to_be32(io[1]);
--	io_text[2] = cpu_to_be32(io[2]);
--	io_text[3] = cpu_to_be32(io[3]);
+ 	/* set control bits (byte 0), zero delay seq id (byte 3),
+ 	 * and seq id (byte 2)
+diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
+index c68b241..91cc12c 100644
+--- a/drivers/ata/sata_qstor.c
++++ b/drivers/ata/sata_qstor.c
+@@ -287,14 +287,10 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
+ 	struct scatterlist *sg;
+ 	struct ata_port *ap = qc->ap;
+ 	struct qs_port_priv *pp = ap->private_data;
+-	unsigned int nelem;
+ 	u8 *prd = pp->pkt + QS_CPB_BYTES;
++	unsigned int si;
+ 
+-	WARN_ON(qc->__sg == NULL);
+-	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
 -
--	return;
--}
-+#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir)		\
-+    do {								\
-+	ir =  camellia_sp1110[(u8)xr];					\
-+	il =  camellia_sp1110[    (xl >> 24)];				\
-+	ir ^= camellia_sp0222[    (xr >> 24)];				\
-+	il ^= camellia_sp0222[(u8)(xl >> 16)];				\
-+	ir ^= camellia_sp3033[(u8)(xr >> 16)];				\
-+	il ^= camellia_sp3033[(u8)(xl >> 8)];				\
-+	ir ^= camellia_sp4404[(u8)(xr >> 8)];				\
-+	il ^= camellia_sp4404[(u8)xl];					\
-+	il ^= kl;							\
-+	ir ^= il ^ kr;							\
-+	yl ^= ir;							\
-+	yr ^= ROR8(il) ^ ir;						\
-+    } while(0)
+-	nelem = 0;
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		u64 addr;
+ 		u32 len;
  
--static void camellia_decrypt128(const u32 *subkey, __be32 *io_text)
-+/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
-+static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
+@@ -306,12 +302,11 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
+ 		*(__le32 *)prd = cpu_to_le32(len);
+ 		prd += sizeof(u64);
+ 
+-		VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
++		VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
+ 					(unsigned long long)addr, len);
+-		nelem++;
+ 	}
+ 
+-	return nelem;
++	return si;
+ }
+ 
+ static void qs_qc_prep(struct ata_queued_cmd *qc)
+@@ -376,7 +371,7 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
+ 		qs_packet_start(qc);
+ 		return 0;
+ 
+-	case ATA_PROT_ATAPI_DMA:
++	case ATAPI_PROT_DMA:
+ 		BUG();
+ 		break;
+ 
+diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
+index f5119bf..0b8191b 100644
+--- a/drivers/ata/sata_sil.c
++++ b/drivers/ata/sata_sil.c
+@@ -416,15 +416,14 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
+ 		 */
+ 
+ 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+-		 * The flag was turned on only for atapi devices.
+-		 * No need to check is_atapi_taskfile(&qc->tf) again.
++		 * The flag was turned on only for atapi devices.  No
++		 * need to check ata_is_atapi(qc->tf.protocol) again.
+ 		 */
+ 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ 			goto err_hsm;
+ 		break;
+ 	case HSM_ST_LAST:
+-		if (qc->tf.protocol == ATA_PROT_DMA ||
+-		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
++		if (ata_is_dma(qc->tf.protocol)) {
+ 			/* clear DMA-Start bit */
+ 			ap->ops->bmdma_stop(qc);
+ 
+@@ -451,8 +450,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
+ 	/* kick HSM in the ass */
+ 	ata_hsm_move(ap, qc, status, 0);
+ 
+-	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+-				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
++	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+ 		ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
+ 
+ 	return;
+diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
+index 864c1c1..b4b1f91 100644
+--- a/drivers/ata/sata_sil24.c
++++ b/drivers/ata/sata_sil24.c
+@@ -813,8 +813,9 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
  {
--	u32 il,ir,t0,t1;               /* temporary valiables */
-+	u32 il,ir,t0,t1;               /* temporary variables */
+ 	struct scatterlist *sg;
+ 	struct sil24_sge *last_sge = NULL;
++	unsigned int si;
  
--	u32 io[4];
--
--	io[0] = be32_to_cpu(io_text[0]);
--	io[1] = be32_to_cpu(io_text[1]);
--	io[2] = be32_to_cpu(io_text[2]);
--	io[3] = be32_to_cpu(io_text[3]);
--
--	/* pre whitening but absorb kw2*/
--	io[0] ^= CAMELLIA_SUBKEY_L(24);
--	io[1] ^= CAMELLIA_SUBKEY_R(24);
-+	/* pre whitening but absorb kw2 */
-+	io[0] ^= SUBKEY_L(0);
-+	io[1] ^= SUBKEY_R(0);
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		sge->addr = cpu_to_le64(sg_dma_address(sg));
+ 		sge->cnt = cpu_to_le32(sg_dma_len(sg));
+ 		sge->flags = 0;
+@@ -823,8 +824,7 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
+ 		sge++;
+ 	}
  
- 	/* main iteration */
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
--		     CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
--		     CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
--			 io[0],io[1],il,ir,t0,t1);
+-	if (likely(last_sge))
+-		last_sge->flags = cpu_to_le32(SGE_TRM);
++	last_sge->flags = cpu_to_le32(SGE_TRM);
+ }
+ 
+ static int sil24_qc_defer(struct ata_queued_cmd *qc)
+@@ -852,9 +852,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
+ 	 *   set.
+ 	 *
+  	 */
+-	int is_excl = (prot == ATA_PROT_ATAPI ||
+-		       prot == ATA_PROT_ATAPI_NODATA ||
+-		       prot == ATA_PROT_ATAPI_DMA ||
++	int is_excl = (ata_is_atapi(prot) ||
+ 		       (qc->flags & ATA_QCFLAG_RESULT_TF));
+ 
+ 	if (unlikely(ap->excl_link)) {
+@@ -885,35 +883,21 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
+ 
+ 	cb = &pp->cmd_block[sil24_tag(qc->tag)];
+ 
+-	switch (qc->tf.protocol) {
+-	case ATA_PROT_PIO:
+-	case ATA_PROT_DMA:
+-	case ATA_PROT_NCQ:
+-	case ATA_PROT_NODATA:
++	if (!ata_is_atapi(qc->tf.protocol)) {
+ 		prb = &cb->ata.prb;
+ 		sge = cb->ata.sge;
+-		break;
 -
--	/* post whitening but kw4 */
--	io[2] ^= CAMELLIA_SUBKEY_L(0);
--	io[3] ^= CAMELLIA_SUBKEY_R(0);
+-	case ATA_PROT_ATAPI:
+-	case ATA_PROT_ATAPI_DMA:
+-	case ATA_PROT_ATAPI_NODATA:
++	} else {
+ 		prb = &cb->atapi.prb;
+ 		sge = cb->atapi.sge;
+ 		memset(cb->atapi.cdb, 0, 32);
+ 		memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
+ 
+-		if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
++		if (ata_is_data(qc->tf.protocol)) {
+ 			if (qc->tf.flags & ATA_TFLAG_WRITE)
+ 				ctrl = PRB_CTRL_PACKET_WRITE;
+ 			else
+ 				ctrl = PRB_CTRL_PACKET_READ;
+ 		}
+-		break;
 -
--	t0 = io[0];
--	t1 = io[1];
--	io[0] = io[2];
--	io[1] = io[3];
--	io[2] = t0;
--	io[3] = t1;
+-	default:
+-		prb = NULL;	/* shut up, gcc */
+-		sge = NULL;
+-		BUG();
+ 	}
+ 
+ 	prb->ctrl = cpu_to_le16(ctrl);
+diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
+index 4d85718..e3d56bc 100644
+--- a/drivers/ata/sata_sx4.c
++++ b/drivers/ata/sata_sx4.c
+@@ -334,7 +334,7 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
+ {
+ 	u32 addr;
+ 	unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
+-	u32 *buf32 = (u32 *) buf;
++	__le32 *buf32 = (__le32 *) buf;
+ 
+ 	/* output ATA packet S/G table */
+ 	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
+@@ -356,7 +356,7 @@ static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
+ {
+ 	u32 addr;
+ 	unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
+-	u32 *buf32 = (u32 *) buf;
++	__le32 *buf32 = (__le32 *) buf;
+ 
+ 	/* output Host DMA packet S/G table */
+ 	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
+@@ -377,7 +377,7 @@ static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
+ 					    unsigned int portno)
+ {
+ 	unsigned int i, dw;
+-	u32 *buf32 = (u32 *) buf;
++	__le32 *buf32 = (__le32 *) buf;
+ 	u8 dev_reg;
+ 
+ 	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
+@@ -429,7 +429,8 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
+ 				     unsigned int portno)
+ {
+ 	unsigned int dw;
+-	u32 tmp, *buf32 = (u32 *) buf;
++	u32 tmp;
++	__le32 *buf32 = (__le32 *) buf;
+ 
+ 	unsigned int host_sg = PDC_20621_DIMM_BASE +
+ 			       (PDC_DIMM_WINDOW_STEP * portno) +
+@@ -473,7 +474,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
+ 	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
+ 	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
+ 	unsigned int portno = ap->port_no;
+-	unsigned int i, idx, total_len = 0, sgt_len;
++	unsigned int i, si, idx, total_len = 0, sgt_len;
+ 	u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
+ 
+ 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
+@@ -487,7 +488,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
+ 	 * Build S/G table
+ 	 */
+ 	idx = 0;
+-	ata_for_each_sg(sg, qc) {
++	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ 		buf[idx++] = cpu_to_le32(sg_dma_address(sg));
+ 		buf[idx++] = cpu_to_le32(sg_dma_len(sg));
+ 		total_len += sg_dma_len(sg);
+@@ -700,7 +701,7 @@ static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
+ 		pdc20621_packet_start(qc);
+ 		return 0;
+ 
+-	case ATA_PROT_ATAPI_DMA:
++	case ATAPI_PROT_DMA:
+ 		BUG();
+ 		break;
+ 
+diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
+index b34b382..7b44a59 100644
+--- a/drivers/atm/ambassador.c
++++ b/drivers/atm/ambassador.c
+@@ -2163,7 +2163,6 @@ static int __devinit amb_init (amb_dev * dev)
+ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev) 
+ {
+       unsigned char pool;
+-      memset (dev, 0, sizeof(amb_dev));
+       
+       // set up known dev items straight away
+       dev->pci_dev = pci_dev; 
+@@ -2253,7 +2252,7 @@ static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_
+ 		goto out_disable;
+ 	}
+ 
+-	dev = kmalloc (sizeof(amb_dev), GFP_KERNEL);
++	dev = kzalloc(sizeof(amb_dev), GFP_KERNEL);
+ 	if (!dev) {
+ 		PRINTK (KERN_ERR, "out of memory!");
+ 		err = -ENOMEM;
+diff --git a/drivers/atm/he.c b/drivers/atm/he.c
+index 3b64a99..2e3395b 100644
+--- a/drivers/atm/he.c
++++ b/drivers/atm/he.c
+@@ -1,5 +1,3 @@
+-/* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
 -
--	io_text[0] = cpu_to_be32(io[0]);
--	io_text[1] = cpu_to_be32(io[1]);
--	io_text[2] = cpu_to_be32(io[2]);
--	io_text[3] = cpu_to_be32(io[3]);
+ /*
+ 
+   he.c
+@@ -99,10 +97,6 @@
+ #define HPRINTK(fmt,args...)	do { } while (0)
+ #endif /* HE_DEBUG */
+ 
+-/* version definition */
 -
--	return;
--}
+-static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
 -
+ /* declarations */
+ 
+ static int he_open(struct atm_vcc *vcc);
+@@ -366,7 +360,7 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+ 	struct he_dev *he_dev = NULL;
+ 	int err = 0;
+ 
+-	printk(KERN_INFO "he: %s\n", version);
++	printk(KERN_INFO "ATM he driver\n");
+ 
+ 	if (pci_enable_device(pci_dev))
+ 		return -EIO;
+@@ -1643,6 +1637,8 @@ he_stop(struct he_dev *he_dev)
+ 
+ 	if (he_dev->rbpl_base) {
+ #ifdef USE_RBPL_POOL
++		int i;
++
+ 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
+ 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
+ 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
+@@ -1665,6 +1661,8 @@ he_stop(struct he_dev *he_dev)
+ #ifdef USE_RBPS
+ 	if (he_dev->rbps_base) {
+ #ifdef USE_RBPS_POOL
++		int i;
++
+ 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
+ 			void *cpuaddr = he_dev->rbps_virt[i].virt;
+ 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
+@@ -2933,7 +2931,7 @@ he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
+ 
+ 	left = *pos;
+ 	if (!left--)
+-		return sprintf(page, "%s\n", version);
++		return sprintf(page, "ATM he driver\n");
+ 
+ 	if (!left--)
+ 		return sprintf(page, "%s%s\n\n",
+diff --git a/drivers/base/Makefile b/drivers/base/Makefile
+index b39ea3f..63e09c0 100644
+--- a/drivers/base/Makefile
++++ b/drivers/base/Makefile
+@@ -11,6 +11,9 @@ obj-$(CONFIG_FW_LOADER)	+= firmware_class.o
+ obj-$(CONFIG_NUMA)	+= node.o
+ obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
+ obj-$(CONFIG_SMP)	+= topology.o
++ifeq ($(CONFIG_SYSFS),y)
++obj-$(CONFIG_MODULES)	+= module.o
++endif
+ obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
+ 
+ ifeq ($(CONFIG_DEBUG_DRIVER),y)
+diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
+index 7370d7c..3b43e8a 100644
+--- a/drivers/base/attribute_container.c
++++ b/drivers/base/attribute_container.c
+@@ -61,7 +61,7 @@ attribute_container_classdev_to_container(struct class_device *classdev)
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
+ 
+-static struct list_head attribute_container_list;
++static LIST_HEAD(attribute_container_list);
+ 
+ static DEFINE_MUTEX(attribute_container_mutex);
+ 
+@@ -320,9 +320,14 @@ attribute_container_add_attrs(struct class_device *classdev)
+ 	struct class_device_attribute **attrs =	cont->attrs;
+ 	int i, error;
+ 
+-	if (!attrs)
++	BUG_ON(attrs && cont->grp);
++
++	if (!attrs && !cont->grp)
+ 		return 0;
+ 
++	if (cont->grp)
++		return sysfs_create_group(&classdev->kobj, cont->grp);
++
+ 	for (i = 0; attrs[i]; i++) {
+ 		error = class_device_create_file(classdev, attrs[i]);
+ 		if (error)
+@@ -378,9 +383,14 @@ attribute_container_remove_attrs(struct class_device *classdev)
+ 	struct class_device_attribute **attrs =	cont->attrs;
+ 	int i;
+ 
+-	if (!attrs)
++	if (!attrs && !cont->grp)
+ 		return;
+ 
++	if (cont->grp) {
++		sysfs_remove_group(&classdev->kobj, cont->grp);
++		return ;
++	}
++
+ 	for (i = 0; attrs[i]; i++)
+ 		class_device_remove_file(classdev, attrs[i]);
+ }
+@@ -429,10 +439,3 @@ attribute_container_find_class_device(struct attribute_container *cont,
+ 	return cdev;
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
 -
--/**
-- * stuff for 192 and 256bit encryption/decryption
-- */
--static void camellia_encrypt256(const u32 *subkey, __be32 *io_text)
+-int __init
+-attribute_container_init(void)
 -{
--	u32 il,ir,t0,t1;           /* temporary valiables */
--
--	u32 io[4];
--
--	io[0] = be32_to_cpu(io_text[0]);
--	io[1] = be32_to_cpu(io_text[1]);
--	io[2] = be32_to_cpu(io_text[2]);
--	io[3] = be32_to_cpu(io_text[3]);
-+#define ROUNDS(i) do { \
-+	CAMELLIA_ROUNDSM(io[0],io[1], \
-+			 SUBKEY_L(i + 2),SUBKEY_R(i + 2), \
-+			 io[2],io[3],il,ir); \
-+	CAMELLIA_ROUNDSM(io[2],io[3], \
-+			 SUBKEY_L(i + 3),SUBKEY_R(i + 3), \
-+			 io[0],io[1],il,ir); \
-+	CAMELLIA_ROUNDSM(io[0],io[1], \
-+			 SUBKEY_L(i + 4),SUBKEY_R(i + 4), \
-+			 io[2],io[3],il,ir); \
-+	CAMELLIA_ROUNDSM(io[2],io[3], \
-+			 SUBKEY_L(i + 5),SUBKEY_R(i + 5), \
-+			 io[0],io[1],il,ir); \
-+	CAMELLIA_ROUNDSM(io[0],io[1], \
-+			 SUBKEY_L(i + 6),SUBKEY_R(i + 6), \
-+			 io[2],io[3],il,ir); \
-+	CAMELLIA_ROUNDSM(io[2],io[3], \
-+			 SUBKEY_L(i + 7),SUBKEY_R(i + 7), \
-+			 io[0],io[1],il,ir); \
-+} while (0)
-+#define FLS(i) do { \
-+	CAMELLIA_FLS(io[0],io[1],io[2],io[3], \
-+		     SUBKEY_L(i + 0),SUBKEY_R(i + 0), \
-+		     SUBKEY_L(i + 1),SUBKEY_R(i + 1), \
-+		     t0,t1,il,ir); \
-+} while (0)
+-	INIT_LIST_HEAD(&attribute_container_list);
+-	return 0;
+-}
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index 10b2fb6..c044414 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -1,6 +1,42 @@
+ 
+-/* initialisation functions */
++/**
++ * struct bus_type_private - structure to hold the private to the driver core portions of the bus_type structure.
++ *
++ * @subsys - the struct kset that defines this bus.  This is the main kobject
++ * @drivers_kset - the list of drivers associated with this bus
++ * @devices_kset - the list of devices associated with this bus
++ * @klist_devices - the klist to iterate over the @devices_kset
++ * @klist_drivers - the klist to iterate over the @drivers_kset
++ * @bus_notifier - the bus notifier list for anything that cares about things
++ * on this bus.
++ * @bus - pointer back to the struct bus_type that this structure is associated
++ * with.
++ *
++ * This structure is the one that is the actual kobject allowing struct
++ * bus_type to be statically allocated safely.  Nothing outside of the driver
++ * core should ever touch these fields.
++ */
++struct bus_type_private {
++	struct kset subsys;
++	struct kset *drivers_kset;
++	struct kset *devices_kset;
++	struct klist klist_devices;
++	struct klist klist_drivers;
++	struct blocking_notifier_head bus_notifier;
++	unsigned int drivers_autoprobe:1;
++	struct bus_type *bus;
++};
 +
-+	ROUNDS(0);
-+	FLS(8);
-+	ROUNDS(8);
-+	FLS(16);
-+	ROUNDS(16);
-+	if (max == 32) {
-+		FLS(24);
-+		ROUNDS(24);
-+	}
++struct driver_private {
++	struct kobject kobj;
++	struct klist klist_devices;
++	struct klist_node knode_bus;
++	struct module_kobject *mkobj;
++	struct device_driver *driver;
++};
++#define to_driver(obj) container_of(obj, struct driver_private, kobj)
  
--	/* pre whitening but absorb kw2*/
--	io[0] ^= CAMELLIA_SUBKEY_L(0);
--	io[1] ^= CAMELLIA_SUBKEY_R(0);
--
--	/* main iteration */
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
--		     CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
--		     CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24),
--		     CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31),
--			 io[0],io[1],il,ir,t0,t1);
-+#undef ROUNDS
-+#undef FLS
++/* initialisation functions */
+ extern int devices_init(void);
+ extern int buses_init(void);
+ extern int classes_init(void);
+@@ -13,17 +49,16 @@ static inline int hypervisor_init(void) { return 0; }
+ extern int platform_bus_init(void);
+ extern int system_bus_init(void);
+ extern int cpu_dev_init(void);
+-extern int attribute_container_init(void);
  
- 	/* post whitening but kw4 */
--	io[2] ^= CAMELLIA_SUBKEY_L(32);
--	io[3] ^= CAMELLIA_SUBKEY_R(32);
--
--	t0 = io[0];
--	t1 = io[1];
--	io[0] = io[2];
--	io[1] = io[3];
--	io[2] = t0;
--	io[3] = t1;
--
--	io_text[0] = cpu_to_be32(io[0]);
--	io_text[1] = cpu_to_be32(io[1]);
--	io_text[2] = cpu_to_be32(io[2]);
--	io_text[3] = cpu_to_be32(io[3]);
--
--	return;
-+	io[2] ^= SUBKEY_L(max);
-+	io[3] ^= SUBKEY_R(max);
-+	/* NB: io[0],[1] should be swapped with [2],[3] by caller! */
+-extern int bus_add_device(struct device * dev);
+-extern void bus_attach_device(struct device * dev);
+-extern void bus_remove_device(struct device * dev);
++extern int bus_add_device(struct device *dev);
++extern void bus_attach_device(struct device *dev);
++extern void bus_remove_device(struct device *dev);
+ 
+-extern int bus_add_driver(struct device_driver *);
+-extern void bus_remove_driver(struct device_driver *);
++extern int bus_add_driver(struct device_driver *drv);
++extern void bus_remove_driver(struct device_driver *drv);
+ 
+-extern void driver_detach(struct device_driver * drv);
+-extern int driver_probe_device(struct device_driver *, struct device *);
++extern void driver_detach(struct device_driver *drv);
++extern int driver_probe_device(struct device_driver *drv, struct device *dev);
+ 
+ extern void sysdev_shutdown(void);
+ extern int sysdev_suspend(pm_message_t state);
+@@ -44,4 +79,13 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
+ 
+ extern int devres_release_all(struct device *dev);
+ 
+-extern struct kset devices_subsys;
++extern struct kset *devices_kset;
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
++extern void module_add_driver(struct module *mod, struct device_driver *drv);
++extern void module_remove_driver(struct device_driver *drv);
++#else
++static inline void module_add_driver(struct module *mod,
++				     struct device_driver *drv) { }
++static inline void module_remove_driver(struct device_driver *drv) { }
++#endif
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 9a19b07..055989e 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -3,6 +3,8 @@
+  *
+  * Copyright (c) 2002-3 Patrick Mochel
+  * Copyright (c) 2002-3 Open Source Development Labs
++ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh at suse.de>
++ * Copyright (c) 2007 Novell Inc.
+  *
+  * This file is released under the GPLv2
+  *
+@@ -17,14 +19,13 @@
+ #include "power/power.h"
+ 
+ #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
+-#define to_bus(obj) container_of(obj, struct bus_type, subsys.kobj)
++#define to_bus(obj) container_of(obj, struct bus_type_private, subsys.kobj)
+ 
+ /*
+  * sysfs bindings for drivers
+  */
+ 
+ #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
+-#define to_driver(obj) container_of(obj, struct device_driver, kobj)
+ 
+ 
+ static int __must_check bus_rescan_devices_helper(struct device *dev,
+@@ -32,37 +33,40 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
+ 
+ static struct bus_type *bus_get(struct bus_type *bus)
+ {
+-	return bus ? container_of(kset_get(&bus->subsys),
+-				struct bus_type, subsys) : NULL;
++	if (bus) {
++		kset_get(&bus->p->subsys);
++		return bus;
++	}
++	return NULL;
+ }
+ 
+ static void bus_put(struct bus_type *bus)
+ {
+-	kset_put(&bus->subsys);
++	if (bus)
++		kset_put(&bus->p->subsys);
  }
  
--
--static void camellia_decrypt256(const u32 *subkey, __be32 *io_text)
-+static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
+-static ssize_t
+-drv_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
++static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
++			     char *buf)
  {
--	u32 il,ir,t0,t1;           /* temporary valiables */
-+	u32 il,ir,t0,t1;               /* temporary variables */
+-	struct driver_attribute * drv_attr = to_drv_attr(attr);
+-	struct device_driver * drv = to_driver(kobj);
++	struct driver_attribute *drv_attr = to_drv_attr(attr);
++	struct driver_private *drv_priv = to_driver(kobj);
+ 	ssize_t ret = -EIO;
  
--	u32 io[4];
--
--	io[0] = be32_to_cpu(io_text[0]);
--	io[1] = be32_to_cpu(io_text[1]);
--	io[2] = be32_to_cpu(io_text[2]);
--	io[3] = be32_to_cpu(io_text[3]);
--
--	/* pre whitening but absorb kw2*/
--	io[0] ^= CAMELLIA_SUBKEY_L(32);
--	io[1] ^= CAMELLIA_SUBKEY_R(32);
-+	/* pre whitening but absorb kw2 */
-+	io[0] ^= SUBKEY_L(i);
-+	io[1] ^= SUBKEY_R(i);
+ 	if (drv_attr->show)
+-		ret = drv_attr->show(drv, buf);
++		ret = drv_attr->show(drv_priv->driver, buf);
+ 	return ret;
+ }
+ 
+-static ssize_t
+-drv_attr_store(struct kobject * kobj, struct attribute * attr,
+-	       const char * buf, size_t count)
++static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
++			      const char *buf, size_t count)
+ {
+-	struct driver_attribute * drv_attr = to_drv_attr(attr);
+-	struct device_driver * drv = to_driver(kobj);
++	struct driver_attribute *drv_attr = to_drv_attr(attr);
++	struct driver_private *drv_priv = to_driver(kobj);
+ 	ssize_t ret = -EIO;
+ 
+ 	if (drv_attr->store)
+-		ret = drv_attr->store(drv, buf, count);
++		ret = drv_attr->store(drv_priv->driver, buf, count);
+ 	return ret;
+ }
+ 
+@@ -71,22 +75,12 @@ static struct sysfs_ops driver_sysfs_ops = {
+ 	.store	= drv_attr_store,
+ };
  
- 	/* main iteration */
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25),
--		     CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
--		     CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
--		     t0,t1,il,ir);
--
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
--			 io[0],io[1],il,ir,t0,t1);
--
--	CAMELLIA_FLS(io[0],io[1],io[2],io[3],
--		     CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
--		     CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
--		     t0,t1,il,ir);
 -
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
--			 io[0],io[1],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[0],io[1],
--			 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
--			 io[2],io[3],il,ir,t0,t1);
--	CAMELLIA_ROUNDSM(io[2],io[3],
--			 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
--			 io[0],io[1],il,ir,t0,t1);
-+#define ROUNDS(i) do { \
-+	CAMELLIA_ROUNDSM(io[0],io[1], \
-+			 SUBKEY_L(i + 7),SUBKEY_R(i + 7), \
-+			 io[2],io[3],il,ir); \
-+	CAMELLIA_ROUNDSM(io[2],io[3], \
-+			 SUBKEY_L(i + 6),SUBKEY_R(i + 6), \
-+			 io[0],io[1],il,ir); \
-+	CAMELLIA_ROUNDSM(io[0],io[1], \
-+			 SUBKEY_L(i + 5),SUBKEY_R(i + 5), \
-+			 io[2],io[3],il,ir); \
-+	CAMELLIA_ROUNDSM(io[2],io[3], \
-+			 SUBKEY_L(i + 4),SUBKEY_R(i + 4), \
-+			 io[0],io[1],il,ir); \
-+	CAMELLIA_ROUNDSM(io[0],io[1], \
-+			 SUBKEY_L(i + 3),SUBKEY_R(i + 3), \
-+			 io[2],io[3],il,ir); \
-+	CAMELLIA_ROUNDSM(io[2],io[3], \
-+			 SUBKEY_L(i + 2),SUBKEY_R(i + 2), \
-+			 io[0],io[1],il,ir); \
-+} while (0)
-+#define FLS(i) do { \
-+	CAMELLIA_FLS(io[0],io[1],io[2],io[3], \
-+		     SUBKEY_L(i + 1),SUBKEY_R(i + 1), \
-+		     SUBKEY_L(i + 0),SUBKEY_R(i + 0), \
-+		     t0,t1,il,ir); \
-+} while (0)
-+
-+	if (i == 32) {
-+		ROUNDS(24);
-+		FLS(24);
-+	}
-+	ROUNDS(16);
-+	FLS(16);
-+	ROUNDS(8);
-+	FLS(8);
-+	ROUNDS(0);
+-static void driver_release(struct kobject * kobj)
++static void driver_release(struct kobject *kobj)
+ {
+-	/*
+-	 * Yes this is an empty release function, it is this way because struct
+-	 * device is always a static object, not a dynamic one.  Yes, this is
+-	 * not nice and bad, but remember, drivers are code, reference counted
+-	 * by the module count, not a device, which is really data.  And yes,
+-	 * in the future I do want to have all drivers be created dynamically,
+-	 * and am working toward that goal, but it will take a bit longer...
+-	 *
+-	 * But do not let this example give _anyone_ the idea that they can
+-	 * create a release function without any code in it at all, to do that
+-	 * is almost always wrong.  If you have any questions about this,
+-	 * please send an email to <greg at kroah.com>
+-	 */
++	struct driver_private *drv_priv = to_driver(kobj);
 +
-+#undef ROUNDS
-+#undef FLS
++	pr_debug("driver: '%s': %s\n", kobject_name(kobj), __FUNCTION__);
++	kfree(drv_priv);
+ }
+ 
+ static struct kobj_type driver_ktype = {
+@@ -94,34 +88,30 @@ static struct kobj_type driver_ktype = {
+ 	.release	= driver_release,
+ };
  
- 	/* post whitening but kw4 */
--	io[2] ^= CAMELLIA_SUBKEY_L(0);
--	io[3] ^= CAMELLIA_SUBKEY_R(0);
 -
--	t0 = io[0];
--	t1 = io[1];
--	io[0] = io[2];
--	io[1] = io[3];
--	io[2] = t0;
--	io[3] = t1;
+ /*
+  * sysfs bindings for buses
+  */
 -
--	io_text[0] = cpu_to_be32(io[0]);
--	io_text[1] = cpu_to_be32(io[1]);
--	io_text[2] = cpu_to_be32(io[2]);
--	io_text[3] = cpu_to_be32(io[3]);
 -
--	return;
-+	io[2] ^= SUBKEY_L(0);
-+	io[3] ^= SUBKEY_R(0);
-+	/* NB: 0,1 should be swapped with 2,3 by caller! */
- }
+-static ssize_t
+-bus_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
++static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
++			     char *buf)
+ {
+-	struct bus_attribute * bus_attr = to_bus_attr(attr);
+-	struct bus_type * bus = to_bus(kobj);
++	struct bus_attribute *bus_attr = to_bus_attr(attr);
++	struct bus_type_private *bus_priv = to_bus(kobj);
+ 	ssize_t ret = 0;
  
+ 	if (bus_attr->show)
+-		ret = bus_attr->show(bus, buf);
++		ret = bus_attr->show(bus_priv->bus, buf);
+ 	return ret;
+ }
  
-+struct camellia_ctx {
-+	int key_length;
-+	u32 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u32)];
-+};
-+
- static int
- camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- 		 unsigned int key_len)
-@@ -1688,7 +1027,7 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+-static ssize_t
+-bus_attr_store(struct kobject * kobj, struct attribute * attr,
+-	       const char * buf, size_t count)
++static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
++			      const char *buf, size_t count)
+ {
+-	struct bus_attribute * bus_attr = to_bus_attr(attr);
+-	struct bus_type * bus = to_bus(kobj);
++	struct bus_attribute *bus_attr = to_bus_attr(attr);
++	struct bus_type_private *bus_priv = to_bus(kobj);
+ 	ssize_t ret = 0;
  
- 	cctx->key_length = key_len;
+ 	if (bus_attr->store)
+-		ret = bus_attr->store(bus, buf, count);
++		ret = bus_attr->store(bus_priv->bus, buf, count);
+ 	return ret;
+ }
  
--	switch(key_len) {
-+	switch (key_len) {
- 	case 16:
- 		camellia_setup128(key, cctx->key_table);
- 		break;
-@@ -1698,68 +1037,59 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- 	case 32:
- 		camellia_setup256(key, cctx->key_table);
- 		break;
--	default:
--		break;
- 	}
+@@ -130,24 +120,26 @@ static struct sysfs_ops bus_sysfs_ops = {
+ 	.store	= bus_attr_store,
+ };
  
- 	return 0;
+-int bus_create_file(struct bus_type * bus, struct bus_attribute * attr)
++int bus_create_file(struct bus_type *bus, struct bus_attribute *attr)
+ {
+ 	int error;
+ 	if (bus_get(bus)) {
+-		error = sysfs_create_file(&bus->subsys.kobj, &attr->attr);
++		error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr);
+ 		bus_put(bus);
+ 	} else
+ 		error = -EINVAL;
+ 	return error;
  }
++EXPORT_SYMBOL_GPL(bus_create_file);
  
--
- static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+-void bus_remove_file(struct bus_type * bus, struct bus_attribute * attr)
++void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
  {
- 	const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
- 	const __be32 *src = (const __be32 *)in;
- 	__be32 *dst = (__be32 *)out;
- 
--	__be32 tmp[4];
-+	u32 tmp[4];
+ 	if (bus_get(bus)) {
+-		sysfs_remove_file(&bus->subsys.kobj, &attr->attr);
++		sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr);
+ 		bus_put(bus);
+ 	}
+ }
++EXPORT_SYMBOL_GPL(bus_remove_file);
  
--	memcpy(tmp, src, CAMELLIA_BLOCK_SIZE);
-+	tmp[0] = be32_to_cpu(src[0]);
-+	tmp[1] = be32_to_cpu(src[1]);
-+	tmp[2] = be32_to_cpu(src[2]);
-+	tmp[3] = be32_to_cpu(src[3]);
+ static struct kobj_type bus_ktype = {
+ 	.sysfs_ops	= &bus_sysfs_ops,
+@@ -166,20 +158,11 @@ static struct kset_uevent_ops bus_uevent_ops = {
+ 	.filter = bus_uevent_filter,
+ };
  
--	switch (cctx->key_length) {
--	case 16:
--		camellia_encrypt128(cctx->key_table, tmp);
--		break;
--	case 24:
--		/* fall through */
--	case 32:
--		camellia_encrypt256(cctx->key_table, tmp);
--		break;
--	default:
--		break;
--	}
-+	camellia_do_encrypt(cctx->key_table, tmp,
-+		cctx->key_length == 16 ? 24 : 32 /* for key lengths of 24 and 32 */
-+	);
+-static decl_subsys(bus, &bus_ktype, &bus_uevent_ops);
++static struct kset *bus_kset;
  
--	memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE);
-+	/* do_encrypt returns 0,1 swapped with 2,3 */
-+	dst[0] = cpu_to_be32(tmp[2]);
-+	dst[1] = cpu_to_be32(tmp[3]);
-+	dst[2] = cpu_to_be32(tmp[0]);
-+	dst[3] = cpu_to_be32(tmp[1]);
- }
  
+ #ifdef CONFIG_HOTPLUG
+ /* Manually detach a device from its associated driver. */
+-static int driver_helper(struct device *dev, void *data)
+-{
+-	const char *name = data;
 -
- static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+-	if (strcmp(name, dev->bus_id) == 0)
+-		return 1;
+-	return 0;
+-}
+-
+ static ssize_t driver_unbind(struct device_driver *drv,
+ 			     const char *buf, size_t count)
  {
- 	const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
- 	const __be32 *src = (const __be32 *)in;
- 	__be32 *dst = (__be32 *)out;
+@@ -187,7 +170,7 @@ static ssize_t driver_unbind(struct device_driver *drv,
+ 	struct device *dev;
+ 	int err = -ENODEV;
  
--	__be32 tmp[4];
-+	u32 tmp[4];
+-	dev = bus_find_device(bus, NULL, (void *)buf, driver_helper);
++	dev = bus_find_device_by_name(bus, NULL, buf);
+ 	if (dev && dev->driver == drv) {
+ 		if (dev->parent)	/* Needed for USB */
+ 			down(&dev->parent->sem);
+@@ -214,7 +197,7 @@ static ssize_t driver_bind(struct device_driver *drv,
+ 	struct device *dev;
+ 	int err = -ENODEV;
  
--	memcpy(tmp, src, CAMELLIA_BLOCK_SIZE);
-+	tmp[0] = be32_to_cpu(src[0]);
-+	tmp[1] = be32_to_cpu(src[1]);
-+	tmp[2] = be32_to_cpu(src[2]);
-+	tmp[3] = be32_to_cpu(src[3]);
+-	dev = bus_find_device(bus, NULL, (void *)buf, driver_helper);
++	dev = bus_find_device_by_name(bus, NULL, buf);
+ 	if (dev && dev->driver == NULL) {
+ 		if (dev->parent)	/* Needed for USB */
+ 			down(&dev->parent->sem);
+@@ -224,10 +207,13 @@ static ssize_t driver_bind(struct device_driver *drv,
+ 		if (dev->parent)
+ 			up(&dev->parent->sem);
  
--	switch (cctx->key_length) {
--	case 16:
--		camellia_decrypt128(cctx->key_table, tmp);
--		break;
--	case 24:
--		/* fall through */
--	case 32:
--		camellia_decrypt256(cctx->key_table, tmp);
--		break;
--	default:
--		break;
--	}
-+	camellia_do_decrypt(cctx->key_table, tmp,
-+		cctx->key_length == 16 ? 24 : 32 /* for key lengths of 24 and 32 */
-+	);
+-		if (err > 0) 		/* success */
++		if (err > 0) {
++			/* success */
+ 			err = count;
+-		else if (err == 0)	/* driver didn't accept device */
++		} else if (err == 0) {
++			/* driver didn't accept device */
+ 			err = -ENODEV;
++		}
+ 	}
+ 	put_device(dev);
+ 	bus_put(bus);
+@@ -237,16 +223,16 @@ static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
  
--	memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE);
-+	/* do_decrypt returns 0,1 swapped with 2,3 */
-+	dst[0] = cpu_to_be32(tmp[2]);
-+	dst[1] = cpu_to_be32(tmp[3]);
-+	dst[2] = cpu_to_be32(tmp[0]);
-+	dst[3] = cpu_to_be32(tmp[1]);
+ static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
+ {
+-	return sprintf(buf, "%d\n", bus->drivers_autoprobe);
++	return sprintf(buf, "%d\n", bus->p->drivers_autoprobe);
  }
  
--
- static struct crypto_alg camellia_alg = {
- 	.cra_name		=	"camellia",
- 	.cra_driver_name	=	"camellia-generic",
-@@ -1786,16 +1116,13 @@ static int __init camellia_init(void)
- 	return crypto_register_alg(&camellia_alg);
+ static ssize_t store_drivers_autoprobe(struct bus_type *bus,
+ 				       const char *buf, size_t count)
+ {
+ 	if (buf[0] == '0')
+-		bus->drivers_autoprobe = 0;
++		bus->p->drivers_autoprobe = 0;
+ 	else
+-		bus->drivers_autoprobe = 1;
++		bus->p->drivers_autoprobe = 1;
+ 	return count;
  }
  
--
- static void __exit camellia_fini(void)
+@@ -255,7 +241,7 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
  {
- 	crypto_unregister_alg(&camellia_alg);
+ 	struct device *dev;
+ 
+-	dev = bus_find_device(bus, NULL, (void *)buf, driver_helper);
++	dev = bus_find_device_by_name(bus, NULL, buf);
+ 	if (!dev)
+ 		return -ENODEV;
+ 	if (bus_rescan_devices_helper(dev, NULL) != 0)
+@@ -264,49 +250,49 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
  }
+ #endif
  
--
- module_init(camellia_init);
- module_exit(camellia_fini);
+-static struct device * next_device(struct klist_iter * i)
++static struct device *next_device(struct klist_iter *i)
+ {
+-	struct klist_node * n = klist_next(i);
++	struct klist_node *n = klist_next(i);
+ 	return n ? container_of(n, struct device, knode_bus) : NULL;
+ }
  
+ /**
+- *	bus_for_each_dev - device iterator.
+- *	@bus:	bus type.
+- *	@start:	device to start iterating from.
+- *	@data:	data for the callback.
+- *	@fn:	function to be called for each device.
++ * bus_for_each_dev - device iterator.
++ * @bus: bus type.
++ * @start: device to start iterating from.
++ * @data: data for the callback.
++ * @fn: function to be called for each device.
+  *
+- *	Iterate over @bus's list of devices, and call @fn for each,
+- *	passing it @data. If @start is not NULL, we use that device to
+- *	begin iterating from.
++ * Iterate over @bus's list of devices, and call @fn for each,
++ * passing it @data. If @start is not NULL, we use that device to
++ * begin iterating from.
+  *
+- *	We check the return of @fn each time. If it returns anything
+- *	other than 0, we break out and return that value.
++ * We check the return of @fn each time. If it returns anything
++ * other than 0, we break out and return that value.
+  *
+- *	NOTE: The device that returns a non-zero value is not retained
+- *	in any way, nor is its refcount incremented. If the caller needs
+- *	to retain this data, it should do, and increment the reference
+- *	count in the supplied callback.
++ * NOTE: The device that returns a non-zero value is not retained
++ * in any way, nor is its refcount incremented. If the caller needs
++ * to retain this data, it should do, and increment the reference
++ * count in the supplied callback.
+  */
 -
- MODULE_DESCRIPTION("Camellia Cipher Algorithm");
- MODULE_LICENSE("GPL");
-diff --git a/crypto/cast6.c b/crypto/cast6.c
-index 136ab6d..5fd9420 100644
---- a/crypto/cast6.c
-+++ b/crypto/cast6.c
-@@ -369,7 +369,7 @@ static const u8 Tr[4][8] = {
- };
+-int bus_for_each_dev(struct bus_type * bus, struct device * start,
+-		     void * data, int (*fn)(struct device *, void *))
++int bus_for_each_dev(struct bus_type *bus, struct device *start,
++		     void *data, int (*fn)(struct device *, void *))
+ {
+ 	struct klist_iter i;
+-	struct device * dev;
++	struct device *dev;
+ 	int error = 0;
  
- /* forward octave */
--static inline void W(u32 *key, unsigned int i) {
-+static void W(u32 *key, unsigned int i) {
- 	u32 I;
- 	key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
- 	key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
-@@ -428,7 +428,7 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ 	if (!bus)
+ 		return -EINVAL;
+ 
+-	klist_iter_init_node(&bus->klist_devices, &i,
++	klist_iter_init_node(&bus->p->klist_devices, &i,
+ 			     (start ? &start->knode_bus : NULL));
+ 	while ((dev = next_device(&i)) && !error)
+ 		error = fn(dev, data);
+ 	klist_iter_exit(&i);
+ 	return error;
  }
++EXPORT_SYMBOL_GPL(bus_for_each_dev);
  
- /*forward quad round*/
--static inline void Q (u32 * block, u8 * Kr, u32 * Km) {
-+static void Q (u32 * block, u8 * Kr, u32 * Km) {
- 	u32 I;
- 	block[2] ^= F1(block[3], Kr[0], Km[0]);
- 	block[1] ^= F2(block[2], Kr[1], Km[1]);
-@@ -437,7 +437,7 @@ static inline void Q (u32 * block, u8 * Kr, u32 * Km) {
+ /**
+  * bus_find_device - device iterator for locating a particular device.
+@@ -323,9 +309,9 @@ int bus_for_each_dev(struct bus_type * bus, struct device * start,
+  * if it does.  If the callback returns non-zero, this function will
+  * return to the caller and not iterate over any more devices.
+  */
+-struct device * bus_find_device(struct bus_type *bus,
+-				struct device *start, void *data,
+-				int (*match)(struct device *, void *))
++struct device *bus_find_device(struct bus_type *bus,
++			       struct device *start, void *data,
++			       int (*match)(struct device *dev, void *data))
+ {
+ 	struct klist_iter i;
+ 	struct device *dev;
+@@ -333,7 +319,7 @@ struct device * bus_find_device(struct bus_type *bus,
+ 	if (!bus)
+ 		return NULL;
+ 
+-	klist_iter_init_node(&bus->klist_devices, &i,
++	klist_iter_init_node(&bus->p->klist_devices, &i,
+ 			     (start ? &start->knode_bus : NULL));
+ 	while ((dev = next_device(&i)))
+ 		if (match(dev, data) && get_device(dev))
+@@ -341,51 +327,83 @@ struct device * bus_find_device(struct bus_type *bus,
+ 	klist_iter_exit(&i);
+ 	return dev;
  }
++EXPORT_SYMBOL_GPL(bus_find_device);
++
++static int match_name(struct device *dev, void *data)
++{
++	const char *name = data;
  
- /*reverse quad round*/
--static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) {
-+static void QBAR (u32 * block, u8 * Kr, u32 * Km) {
- 	u32 I;
-         block[3] ^= F1(block[0], Kr[3], Km[3]);
-         block[0] ^= F3(block[1], Kr[2], Km[2]);
-diff --git a/crypto/cbc.c b/crypto/cbc.c
-index 1f2649e..6affff8 100644
---- a/crypto/cbc.c
-+++ b/crypto/cbc.c
-@@ -14,13 +14,13 @@
- #include <linux/err.h>
- #include <linux/init.h>
- #include <linux/kernel.h>
-+#include <linux/log2.h>
- #include <linux/module.h>
- #include <linux/scatterlist.h>
- #include <linux/slab.h>
++	if (strcmp(name, dev->bus_id) == 0)
++		return 1;
++	return 0;
++}
  
- struct crypto_cbc_ctx {
- 	struct crypto_cipher *child;
--	void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
- };
+-static struct device_driver * next_driver(struct klist_iter * i)
++/**
++ * bus_find_device_by_name - device iterator for locating a particular device of a specific name
++ * @bus: bus type
++ * @start: Device to begin with
++ * @name: name of the device to match
++ *
++ * This is similar to the bus_find_device() function above, but it handles
++ * searching by a name automatically, no need to write another strcmp matching
++ * function.
++ */
++struct device *bus_find_device_by_name(struct bus_type *bus,
++				       struct device *start, const char *name)
+ {
+-	struct klist_node * n = klist_next(i);
+-	return n ? container_of(n, struct device_driver, knode_bus) : NULL;
++	return bus_find_device(bus, start, (void *)name, match_name);
++}
++EXPORT_SYMBOL_GPL(bus_find_device_by_name);
++
++static struct device_driver *next_driver(struct klist_iter *i)
++{
++	struct klist_node *n = klist_next(i);
++	struct driver_private *drv_priv;
++
++	if (n) {
++		drv_priv = container_of(n, struct driver_private, knode_bus);
++		return drv_priv->driver;
++	}
++	return NULL;
+ }
  
- static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
-@@ -41,9 +41,7 @@ static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
+ /**
+- *	bus_for_each_drv - driver iterator
+- *	@bus:	bus we're dealing with.
+- *	@start:	driver to start iterating on.
+- *	@data:	data to pass to the callback.
+- *	@fn:	function to call for each driver.
++ * bus_for_each_drv - driver iterator
++ * @bus: bus we're dealing with.
++ * @start: driver to start iterating on.
++ * @data: data to pass to the callback.
++ * @fn: function to call for each driver.
+  *
+- *	This is nearly identical to the device iterator above.
+- *	We iterate over each driver that belongs to @bus, and call
+- *	@fn for each. If @fn returns anything but 0, we break out
+- *	and return it. If @start is not NULL, we use it as the head
+- *	of the list.
++ * This is nearly identical to the device iterator above.
++ * We iterate over each driver that belongs to @bus, and call
++ * @fn for each. If @fn returns anything but 0, we break out
++ * and return it. If @start is not NULL, we use it as the head
++ * of the list.
+  *
+- *	NOTE: we don't return the driver that returns a non-zero
+- *	value, nor do we leave the reference count incremented for that
+- *	driver. If the caller needs to know that info, it must set it
+- *	in the callback. It must also be sure to increment the refcount
+- *	so it doesn't disappear before returning to the caller.
++ * NOTE: we don't return the driver that returns a non-zero
++ * value, nor do we leave the reference count incremented for that
++ * driver. If the caller needs to know that info, it must set it
++ * in the callback. It must also be sure to increment the refcount
++ * so it doesn't disappear before returning to the caller.
+  */
+-
+-int bus_for_each_drv(struct bus_type * bus, struct device_driver * start,
+-		     void * data, int (*fn)(struct device_driver *, void *))
++int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
++		     void *data, int (*fn)(struct device_driver *, void *))
+ {
+ 	struct klist_iter i;
+-	struct device_driver * drv;
++	struct device_driver *drv;
+ 	int error = 0;
  
- static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
- 				      struct blkcipher_walk *walk,
--				      struct crypto_cipher *tfm,
--				      void (*xor)(u8 *, const u8 *,
--						  unsigned int))
-+				      struct crypto_cipher *tfm)
+ 	if (!bus)
+ 		return -EINVAL;
+ 
+-	klist_iter_init_node(&bus->klist_drivers, &i,
+-			     start ? &start->knode_bus : NULL);
++	klist_iter_init_node(&bus->p->klist_drivers, &i,
++			     start ? &start->p->knode_bus : NULL);
+ 	while ((drv = next_driver(&i)) && !error)
+ 		error = fn(drv, data);
+ 	klist_iter_exit(&i);
+ 	return error;
+ }
++EXPORT_SYMBOL_GPL(bus_for_each_drv);
+ 
+ static int device_add_attrs(struct bus_type *bus, struct device *dev)
  {
- 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- 		crypto_cipher_alg(tfm)->cia_encrypt;
-@@ -54,7 +52,7 @@ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
- 	u8 *iv = walk->iv;
+@@ -396,7 +414,7 @@ static int device_add_attrs(struct bus_type *bus, struct device *dev)
+ 		return 0;
  
- 	do {
--		xor(iv, src, bsize);
-+		crypto_xor(iv, src, bsize);
- 		fn(crypto_cipher_tfm(tfm), dst, iv);
- 		memcpy(iv, dst, bsize);
+ 	for (i = 0; attr_name(bus->dev_attrs[i]); i++) {
+-		error = device_create_file(dev,&bus->dev_attrs[i]);
++		error = device_create_file(dev, &bus->dev_attrs[i]);
+ 		if (error) {
+ 			while (--i >= 0)
+ 				device_remove_file(dev, &bus->dev_attrs[i]);
+@@ -406,13 +424,13 @@ static int device_add_attrs(struct bus_type *bus, struct device *dev)
+ 	return error;
+ }
  
-@@ -67,9 +65,7 @@ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
+-static void device_remove_attrs(struct bus_type * bus, struct device * dev)
++static void device_remove_attrs(struct bus_type *bus, struct device *dev)
+ {
+ 	int i;
  
- static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
- 				      struct blkcipher_walk *walk,
--				      struct crypto_cipher *tfm,
--				      void (*xor)(u8 *, const u8 *,
--						  unsigned int))
-+				      struct crypto_cipher *tfm)
+ 	if (bus->dev_attrs) {
+ 		for (i = 0; attr_name(bus->dev_attrs[i]); i++)
+-			device_remove_file(dev,&bus->dev_attrs[i]);
++			device_remove_file(dev, &bus->dev_attrs[i]);
+ 	}
+ }
+ 
+@@ -420,7 +438,7 @@ static void device_remove_attrs(struct bus_type * bus, struct device * dev)
+ static int make_deprecated_bus_links(struct device *dev)
  {
- 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- 		crypto_cipher_alg(tfm)->cia_encrypt;
-@@ -79,7 +75,7 @@ static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
- 	u8 *iv = walk->iv;
+ 	return sysfs_create_link(&dev->kobj,
+-				 &dev->bus->subsys.kobj, "bus");
++				 &dev->bus->p->subsys.kobj, "bus");
+ }
  
- 	do {
--		xor(src, iv, bsize);
-+		crypto_xor(src, iv, bsize);
- 		fn(crypto_cipher_tfm(tfm), src, src);
- 		iv = src;
+ static void remove_deprecated_bus_links(struct device *dev)
+@@ -433,28 +451,28 @@ static inline void remove_deprecated_bus_links(struct device *dev) { }
+ #endif
  
-@@ -99,7 +95,6 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
- 	struct crypto_blkcipher *tfm = desc->tfm;
- 	struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
- 	struct crypto_cipher *child = ctx->child;
--	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
- 	int err;
+ /**
+- *	bus_add_device - add device to bus
+- *	@dev:	device being added
++ * bus_add_device - add device to bus
++ * @dev: device being added
+  *
+- *	- Add the device to its bus's list of devices.
+- *	- Create link to device's bus.
++ * - Add the device to its bus's list of devices.
++ * - Create link to device's bus.
+  */
+-int bus_add_device(struct device * dev)
++int bus_add_device(struct device *dev)
+ {
+-	struct bus_type * bus = bus_get(dev->bus);
++	struct bus_type *bus = bus_get(dev->bus);
+ 	int error = 0;
  
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
-@@ -107,11 +102,9 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
+ 	if (bus) {
+-		pr_debug("bus %s: add device %s\n", bus->name, dev->bus_id);
++		pr_debug("bus: '%s': add device %s\n", bus->name, dev->bus_id);
+ 		error = device_add_attrs(bus, dev);
+ 		if (error)
+ 			goto out_put;
+-		error = sysfs_create_link(&bus->devices.kobj,
++		error = sysfs_create_link(&bus->p->devices_kset->kobj,
+ 						&dev->kobj, dev->bus_id);
+ 		if (error)
+ 			goto out_id;
+ 		error = sysfs_create_link(&dev->kobj,
+-				&dev->bus->subsys.kobj, "subsystem");
++				&dev->bus->p->subsys.kobj, "subsystem");
+ 		if (error)
+ 			goto out_subsys;
+ 		error = make_deprecated_bus_links(dev);
+@@ -466,7 +484,7 @@ int bus_add_device(struct device * dev)
+ out_deprecated:
+ 	sysfs_remove_link(&dev->kobj, "subsystem");
+ out_subsys:
+-	sysfs_remove_link(&bus->devices.kobj, dev->bus_id);
++	sysfs_remove_link(&bus->p->devices_kset->kobj, dev->bus_id);
+ out_id:
+ 	device_remove_attrs(bus, dev);
+ out_put:
+@@ -475,56 +493,58 @@ out_put:
+ }
  
- 	while ((nbytes = walk.nbytes)) {
- 		if (walk.src.virt.addr == walk.dst.virt.addr)
--			nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child,
--							    xor);
-+			nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child);
+ /**
+- *	bus_attach_device - add device to bus
+- *	@dev:	device tried to attach to a driver
++ * bus_attach_device - add device to bus
++ * @dev: device tried to attach to a driver
+  *
+- *	- Add device to bus's list of devices.
+- *	- Try to attach to driver.
++ * - Add device to bus's list of devices.
++ * - Try to attach to driver.
+  */
+-void bus_attach_device(struct device * dev)
++void bus_attach_device(struct device *dev)
+ {
+ 	struct bus_type *bus = dev->bus;
+ 	int ret = 0;
+ 
+ 	if (bus) {
+ 		dev->is_registered = 1;
+-		if (bus->drivers_autoprobe)
++		if (bus->p->drivers_autoprobe)
+ 			ret = device_attach(dev);
+ 		WARN_ON(ret < 0);
+ 		if (ret >= 0)
+-			klist_add_tail(&dev->knode_bus, &bus->klist_devices);
++			klist_add_tail(&dev->knode_bus, &bus->p->klist_devices);
  		else
--			nbytes = crypto_cbc_encrypt_segment(desc, &walk, child,
--							    xor);
-+			nbytes = crypto_cbc_encrypt_segment(desc, &walk, child);
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 			dev->is_registered = 0;
  	}
+ }
  
-@@ -120,9 +113,7 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
+ /**
+- *	bus_remove_device - remove device from bus
+- *	@dev:	device to be removed
++ * bus_remove_device - remove device from bus
++ * @dev: device to be removed
+  *
+- *	- Remove symlink from bus's directory.
+- *	- Delete device from bus's list.
+- *	- Detach from its driver.
+- *	- Drop reference taken in bus_add_device().
++ * - Remove symlink from bus's directory.
++ * - Delete device from bus's list.
++ * - Detach from its driver.
++ * - Drop reference taken in bus_add_device().
+  */
+-void bus_remove_device(struct device * dev)
++void bus_remove_device(struct device *dev)
+ {
+ 	if (dev->bus) {
+ 		sysfs_remove_link(&dev->kobj, "subsystem");
+ 		remove_deprecated_bus_links(dev);
+-		sysfs_remove_link(&dev->bus->devices.kobj, dev->bus_id);
++		sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
++				  dev->bus_id);
+ 		device_remove_attrs(dev->bus, dev);
+ 		if (dev->is_registered) {
+ 			dev->is_registered = 0;
+ 			klist_del(&dev->knode_bus);
+ 		}
+-		pr_debug("bus %s: remove device %s\n", dev->bus->name, dev->bus_id);
++		pr_debug("bus: '%s': remove device %s\n",
++			 dev->bus->name, dev->bus_id);
+ 		device_release_driver(dev);
+ 		bus_put(dev->bus);
+ 	}
+ }
  
- static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
- 				      struct blkcipher_walk *walk,
--				      struct crypto_cipher *tfm,
--				      void (*xor)(u8 *, const u8 *,
--						  unsigned int))
-+				      struct crypto_cipher *tfm)
+-static int driver_add_attrs(struct bus_type * bus, struct device_driver * drv)
++static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
  {
- 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- 		crypto_cipher_alg(tfm)->cia_decrypt;
-@@ -134,7 +125,7 @@ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
+ 	int error = 0;
+ 	int i;
+@@ -533,19 +553,19 @@ static int driver_add_attrs(struct bus_type * bus, struct device_driver * drv)
+ 		for (i = 0; attr_name(bus->drv_attrs[i]); i++) {
+ 			error = driver_create_file(drv, &bus->drv_attrs[i]);
+ 			if (error)
+-				goto Err;
++				goto err;
+ 		}
+ 	}
+- Done:
++done:
+ 	return error;
+- Err:
++err:
+ 	while (--i >= 0)
+ 		driver_remove_file(drv, &bus->drv_attrs[i]);
+-	goto Done;
++	goto done;
+ }
  
- 	do {
- 		fn(crypto_cipher_tfm(tfm), dst, src);
--		xor(dst, iv, bsize);
-+		crypto_xor(dst, iv, bsize);
- 		iv = src;
+-
+-static void driver_remove_attrs(struct bus_type * bus, struct device_driver * drv)
++static void driver_remove_attrs(struct bus_type *bus,
++				struct device_driver *drv)
+ {
+ 	int i;
  
- 		src += bsize;
-@@ -148,34 +139,29 @@ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
+@@ -616,39 +636,46 @@ static ssize_t driver_uevent_store(struct device_driver *drv,
+ 	enum kobject_action action;
  
- static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
- 				      struct blkcipher_walk *walk,
--				      struct crypto_cipher *tfm,
--				      void (*xor)(u8 *, const u8 *,
--						  unsigned int))
-+				      struct crypto_cipher *tfm)
+ 	if (kobject_action_type(buf, count, &action) == 0)
+-		kobject_uevent(&drv->kobj, action);
++		kobject_uevent(&drv->p->kobj, action);
+ 	return count;
+ }
+ static DRIVER_ATTR(uevent, S_IWUSR, NULL, driver_uevent_store);
+ 
+ /**
+- *	bus_add_driver - Add a driver to the bus.
+- *	@drv:	driver.
+- *
++ * bus_add_driver - Add a driver to the bus.
++ * @drv: driver.
+  */
+ int bus_add_driver(struct device_driver *drv)
  {
- 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- 		crypto_cipher_alg(tfm)->cia_decrypt;
- 	int bsize = crypto_cipher_blocksize(tfm);
--	unsigned long alignmask = crypto_cipher_alignmask(tfm);
- 	unsigned int nbytes = walk->nbytes;
- 	u8 *src = walk->src.virt.addr;
--	u8 stack[bsize + alignmask];
--	u8 *first_iv = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
--
--	memcpy(first_iv, walk->iv, bsize);
-+	u8 last_iv[bsize];
+-	struct bus_type * bus = bus_get(drv->bus);
++	struct bus_type *bus;
++	struct driver_private *priv;
+ 	int error = 0;
  
- 	/* Start of the last block. */
--	src += nbytes - nbytes % bsize - bsize;
--	memcpy(walk->iv, src, bsize);
-+	src += nbytes - (nbytes & (bsize - 1)) - bsize;
-+	memcpy(last_iv, src, bsize);
++	bus = bus_get(drv->bus);
+ 	if (!bus)
+ 		return -EINVAL;
  
- 	for (;;) {
- 		fn(crypto_cipher_tfm(tfm), src, src);
- 		if ((nbytes -= bsize) < bsize)
- 			break;
--		xor(src, src - bsize, bsize);
-+		crypto_xor(src, src - bsize, bsize);
- 		src -= bsize;
+-	pr_debug("bus %s: add driver %s\n", bus->name, drv->name);
+-	error = kobject_set_name(&drv->kobj, "%s", drv->name);
+-	if (error)
+-		goto out_put_bus;
+-	drv->kobj.kset = &bus->drivers;
+-	error = kobject_register(&drv->kobj);
++	pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name);
++
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	klist_init(&priv->klist_devices, NULL, NULL);
++	priv->driver = drv;
++	drv->p = priv;
++	priv->kobj.kset = bus->p->drivers_kset;
++	error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
++				     "%s", drv->name);
+ 	if (error)
+ 		goto out_put_bus;
+ 
+-	if (drv->bus->drivers_autoprobe) {
++	if (drv->bus->p->drivers_autoprobe) {
+ 		error = driver_attach(drv);
+ 		if (error)
+ 			goto out_unregister;
  	}
+-	klist_add_tail(&drv->knode_bus, &bus->klist_drivers);
++	klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
+ 	module_add_driver(drv->owner, drv);
  
--	xor(src, first_iv, bsize);
-+	crypto_xor(src, walk->iv, bsize);
-+	memcpy(walk->iv, last_iv, bsize);
+ 	error = driver_create_file(drv, &driver_attr_uevent);
+@@ -669,24 +696,24 @@ int bus_add_driver(struct device_driver *drv)
+ 			__FUNCTION__, drv->name);
+ 	}
  
- 	return nbytes;
++	kobject_uevent(&priv->kobj, KOBJ_ADD);
+ 	return error;
+ out_unregister:
+-	kobject_unregister(&drv->kobj);
++	kobject_put(&priv->kobj);
+ out_put_bus:
+ 	bus_put(bus);
+ 	return error;
  }
-@@ -188,7 +174,6 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
- 	struct crypto_blkcipher *tfm = desc->tfm;
- 	struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
- 	struct crypto_cipher *child = ctx->child;
--	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
- 	int err;
  
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
-@@ -196,48 +181,15 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
+ /**
+- *	bus_remove_driver - delete driver from bus's knowledge.
+- *	@drv:	driver.
++ * bus_remove_driver - delete driver from bus's knowledge.
++ * @drv: driver.
+  *
+- *	Detach the driver from the devices it controls, and remove
+- *	it from its bus's list of drivers. Finally, we drop the reference
+- *	to the bus we took in bus_add_driver().
++ * Detach the driver from the devices it controls, and remove
++ * it from its bus's list of drivers. Finally, we drop the reference
++ * to the bus we took in bus_add_driver().
+  */
+-
+-void bus_remove_driver(struct device_driver * drv)
++void bus_remove_driver(struct device_driver *drv)
+ {
+ 	if (!drv->bus)
+ 		return;
+@@ -694,18 +721,17 @@ void bus_remove_driver(struct device_driver * drv)
+ 	remove_bind_files(drv);
+ 	driver_remove_attrs(drv->bus, drv);
+ 	driver_remove_file(drv, &driver_attr_uevent);
+-	klist_remove(&drv->knode_bus);
+-	pr_debug("bus %s: remove driver %s\n", drv->bus->name, drv->name);
++	klist_remove(&drv->p->knode_bus);
++	pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
+ 	driver_detach(drv);
+ 	module_remove_driver(drv);
+-	kobject_unregister(&drv->kobj);
++	kobject_put(&drv->p->kobj);
+ 	bus_put(drv->bus);
+ }
  
- 	while ((nbytes = walk.nbytes)) {
- 		if (walk.src.virt.addr == walk.dst.virt.addr)
--			nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child,
--							    xor);
-+			nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
- 		else
--			nbytes = crypto_cbc_decrypt_segment(desc, &walk, child,
--							    xor);
-+			nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
- 	}
+-
+ /* Helper for bus_rescan_devices's iter */
+ static int __must_check bus_rescan_devices_helper(struct device *dev,
+-						void *data)
++						  void *data)
+ {
+ 	int ret = 0;
  
- 	return err;
+@@ -727,10 +753,11 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
+  * attached and rescan it against existing drivers to see if it matches
+  * any by calling device_attach() for the unbound devices.
+  */
+-int bus_rescan_devices(struct bus_type * bus)
++int bus_rescan_devices(struct bus_type *bus)
+ {
+ 	return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
  }
++EXPORT_SYMBOL_GPL(bus_rescan_devices);
  
--static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
--{
--	do {
--		*a++ ^= *b++;
--	} while (--bs);
--}
--
--static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
--{
--	u32 *a = (u32 *)dst;
--	u32 *b = (u32 *)src;
--
--	do {
--		*a++ ^= *b++;
--	} while ((bs -= 4));
--}
--
--static void xor_64(u8 *a, const u8 *b, unsigned int bs)
--{
--	((u32 *)a)[0] ^= ((u32 *)b)[0];
--	((u32 *)a)[1] ^= ((u32 *)b)[1];
--}
--
--static void xor_128(u8 *a, const u8 *b, unsigned int bs)
--{
--	((u32 *)a)[0] ^= ((u32 *)b)[0];
--	((u32 *)a)[1] ^= ((u32 *)b)[1];
--	((u32 *)a)[2] ^= ((u32 *)b)[2];
--	((u32 *)a)[3] ^= ((u32 *)b)[3];
--}
--
- static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
+ /**
+  * device_reprobe - remove driver for a device and probe for a new driver
+@@ -755,55 +782,55 @@ int device_reprobe(struct device *dev)
+ EXPORT_SYMBOL_GPL(device_reprobe);
+ 
+ /**
+- *	find_bus - locate bus by name.
+- *	@name:	name of bus.
++ * find_bus - locate bus by name.
++ * @name: name of bus.
+  *
+- *	Call kset_find_obj() to iterate over list of buses to
+- *	find a bus by name. Return bus if found.
++ * Call kset_find_obj() to iterate over list of buses to
++ * find a bus by name. Return bus if found.
+  *
+- *	Note that kset_find_obj increments bus' reference count.
++ * Note that kset_find_obj increments bus' reference count.
+  */
+ #if 0
+-struct bus_type * find_bus(char * name)
++struct bus_type *find_bus(char *name)
  {
- 	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-@@ -245,22 +197,6 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
- 	struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
- 	struct crypto_cipher *cipher;
+-	struct kobject * k = kset_find_obj(&bus_subsys.kset, name);
++	struct kobject *k = kset_find_obj(bus_kset, name);
+ 	return k ? to_bus(k) : NULL;
+ }
+ #endif  /*  0  */
  
--	switch (crypto_tfm_alg_blocksize(tfm)) {
--	case 8:
--		ctx->xor = xor_64;
--		break;
--
--	case 16:
--		ctx->xor = xor_128;
--		break;
--
--	default:
--		if (crypto_tfm_alg_blocksize(tfm) % 4)
--			ctx->xor = xor_byte;
--		else
--			ctx->xor = xor_quad;
--	}
--
- 	cipher = crypto_spawn_cipher(spawn);
- 	if (IS_ERR(cipher))
- 		return PTR_ERR(cipher);
-@@ -290,6 +226,10 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
- 	if (IS_ERR(alg))
- 		return ERR_PTR(PTR_ERR(alg));
  
-+	inst = ERR_PTR(-EINVAL);
-+	if (!is_power_of_2(alg->cra_blocksize))
-+		goto out_put_alg;
-+
- 	inst = crypto_alloc_instance("cbc", alg);
- 	if (IS_ERR(inst))
- 		goto out_put_alg;
-@@ -300,8 +240,9 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
- 	inst->alg.cra_alignmask = alg->cra_alignmask;
- 	inst->alg.cra_type = &crypto_blkcipher_type;
+ /**
+- *	bus_add_attrs - Add default attributes for this bus.
+- *	@bus:	Bus that has just been registered.
++ * bus_add_attrs - Add default attributes for this bus.
++ * @bus: Bus that has just been registered.
+  */
  
--	if (!(alg->cra_blocksize % 4))
--		inst->alg.cra_alignmask |= 3;
-+	/* We access the data as u32s when xoring. */
-+	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
-+
- 	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- 	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- 	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
-diff --git a/crypto/ccm.c b/crypto/ccm.c
-new file mode 100644
-index 0000000..7cf7e5a
---- /dev/null
-+++ b/crypto/ccm.c
-@@ -0,0 +1,889 @@
-+/*
-+ * CCM: Counter with CBC-MAC
-+ *
-+ * (C) Copyright IBM Corp. 2007 - Joy Latten <latten at us.ibm.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
-+ * any later version.
-+ *
-+ */
-+
-+#include <crypto/internal/aead.h>
-+#include <crypto/internal/skcipher.h>
-+#include <crypto/scatterwalk.h>
-+#include <linux/err.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+
-+#include "internal.h"
-+
-+struct ccm_instance_ctx {
-+	struct crypto_skcipher_spawn ctr;
-+	struct crypto_spawn cipher;
-+};
-+
-+struct crypto_ccm_ctx {
-+	struct crypto_cipher *cipher;
-+	struct crypto_ablkcipher *ctr;
-+};
-+
-+struct crypto_rfc4309_ctx {
-+	struct crypto_aead *child;
-+	u8 nonce[3];
-+};
-+
-+struct crypto_ccm_req_priv_ctx {
-+	u8 odata[16];
-+	u8 idata[16];
-+	u8 auth_tag[16];
-+	u32 ilen;
-+	u32 flags;
-+	struct scatterlist src[2];
-+	struct scatterlist dst[2];
-+	struct ablkcipher_request abreq;
-+};
-+
-+static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
-+	struct aead_request *req)
-+{
-+	unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
-+
-+	return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
-+}
-+
-+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
-+{
-+	__be32 data;
-+
-+	memset(block, 0, csize);
-+	block += csize;
-+
-+	if (csize >= 4)
-+		csize = 4;
-+	else if (msglen > (1 << (8 * csize)))
-+		return -EOVERFLOW;
-+
-+	data = cpu_to_be32(msglen);
-+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
-+
-+	return 0;
-+}
-+
-+static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
-+			     unsigned int keylen)
-+{
-+	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
-+	struct crypto_ablkcipher *ctr = ctx->ctr;
-+	struct crypto_cipher *tfm = ctx->cipher;
-+	int err = 0;
-+
-+	crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
-+	crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
-+				    CRYPTO_TFM_REQ_MASK);
-+	err = crypto_ablkcipher_setkey(ctr, key, keylen);
-+	crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
-+			      CRYPTO_TFM_RES_MASK);
-+	if (err)
-+		goto out;
-+
-+	crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
-+	crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) &
-+				    CRYPTO_TFM_REQ_MASK);
-+	err = crypto_cipher_setkey(tfm, key, keylen);
-+	crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) &
-+			      CRYPTO_TFM_RES_MASK);
-+
-+out:
-+	return err;
-+}
-+
-+static int crypto_ccm_setauthsize(struct crypto_aead *tfm,
-+				  unsigned int authsize)
-+{
-+	switch (authsize) {
-+	case 4:
-+	case 6:
-+	case 8:
-+	case 10:
-+	case 12:
-+	case 14:
-+	case 16:
-+		break;
-+	default:
-+		return -EINVAL;
-+	}
-+
-+	return 0;
-+}
-+
-+static int format_input(u8 *info, struct aead_request *req,
-+			unsigned int cryptlen)
-+{
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	unsigned int lp = req->iv[0];
-+	unsigned int l = lp + 1;
-+	unsigned int m;
-+
-+	m = crypto_aead_authsize(aead);
-+
-+	memcpy(info, req->iv, 16);
-+
-+	/* format control info per RFC 3610 and
-+	 * NIST Special Publication 800-38C
-+	 */
-+	*info |= (8 * ((m - 2) / 2));
-+	if (req->assoclen)
-+		*info |= 64;
-+
-+	return set_msg_len(info + 16 - l, cryptlen, l);
-+}
-+
-+static int format_adata(u8 *adata, unsigned int a)
-+{
-+	int len = 0;
-+
-+	/* add control info for associated data
-+	 * RFC 3610 and NIST Special Publication 800-38C
-+	 */
-+	if (a < 65280) {
-+		*(__be16 *)adata = cpu_to_be16(a);
-+		len = 2;
-+	} else  {
-+		*(__be16 *)adata = cpu_to_be16(0xfffe);
-+		*(__be32 *)&adata[2] = cpu_to_be32(a);
-+		len = 6;
-+	}
-+
-+	return len;
-+}
-+
-+static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n,
-+		       struct crypto_ccm_req_priv_ctx *pctx)
-+{
-+	unsigned int bs = 16;
-+	u8 *odata = pctx->odata;
-+	u8 *idata = pctx->idata;
-+	int datalen, getlen;
-+
-+	datalen = n;
-+
-+	/* first time in here, block may be partially filled. */
-+	getlen = bs - pctx->ilen;
-+	if (datalen >= getlen) {
-+		memcpy(idata + pctx->ilen, data, getlen);
-+		crypto_xor(odata, idata, bs);
-+		crypto_cipher_encrypt_one(tfm, odata, odata);
-+		datalen -= getlen;
-+		data += getlen;
-+		pctx->ilen = 0;
-+	}
-+
-+	/* now encrypt rest of data */
-+	while (datalen >= bs) {
-+		crypto_xor(odata, data, bs);
-+		crypto_cipher_encrypt_one(tfm, odata, odata);
-+
-+		datalen -= bs;
-+		data += bs;
-+	}
-+
-+	/* check and see if there's leftover data that wasn't
-+	 * enough to fill a block.
-+	 */
-+	if (datalen) {
-+		memcpy(idata + pctx->ilen, data, datalen);
-+		pctx->ilen += datalen;
-+	}
-+}
-+
-+static void get_data_to_compute(struct crypto_cipher *tfm,
-+			       struct crypto_ccm_req_priv_ctx *pctx,
-+			       struct scatterlist *sg, unsigned int len)
-+{
-+	struct scatter_walk walk;
-+	u8 *data_src;
-+	int n;
-+
-+	scatterwalk_start(&walk, sg);
-+
-+	while (len) {
-+		n = scatterwalk_clamp(&walk, len);
-+		if (!n) {
-+			scatterwalk_start(&walk, sg_next(walk.sg));
-+			n = scatterwalk_clamp(&walk, len);
-+		}
-+		data_src = scatterwalk_map(&walk, 0);
-+
-+		compute_mac(tfm, data_src, n, pctx);
-+		len -= n;
-+
-+		scatterwalk_unmap(data_src, 0);
-+		scatterwalk_advance(&walk, n);
-+		scatterwalk_done(&walk, 0, len);
-+		if (len)
-+			crypto_yield(pctx->flags);
-+	}
-+
-+	/* any leftover needs padding and then encrypted */
-+	if (pctx->ilen) {
-+		int padlen;
-+		u8 *odata = pctx->odata;
-+		u8 *idata = pctx->idata;
-+
-+		padlen = 16 - pctx->ilen;
-+		memset(idata + pctx->ilen, 0, padlen);
-+		crypto_xor(odata, idata, 16);
-+		crypto_cipher_encrypt_one(tfm, odata, odata);
-+		pctx->ilen = 0;
-+	}
-+}
-+
-+static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
-+			   unsigned int cryptlen)
-+{
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
-+	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
-+	struct crypto_cipher *cipher = ctx->cipher;
-+	unsigned int assoclen = req->assoclen;
-+	u8 *odata = pctx->odata;
-+	u8 *idata = pctx->idata;
-+	int err;
-+
-+	/* format control data for input */
-+	err = format_input(odata, req, cryptlen);
-+	if (err)
-+		goto out;
-+
-+	/* encrypt first block to use as start in computing mac  */
-+	crypto_cipher_encrypt_one(cipher, odata, odata);
-+
-+	/* format associated data and compute into mac */
-+	if (assoclen) {
-+		pctx->ilen = format_adata(idata, assoclen);
-+		get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
-+	}
-+
-+	/* compute plaintext into mac */
-+	get_data_to_compute(cipher, pctx, plain, cryptlen);
-+
-+out:
-+	return err;
-+}
-+
-+static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
-+{
-+	struct aead_request *req = areq->data;
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
-+	u8 *odata = pctx->odata;
-+
-+	if (!err)
-+		scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
-+					 crypto_aead_authsize(aead), 1);
-+	aead_request_complete(req, err);
-+}
-+
-+static inline int crypto_ccm_check_iv(const u8 *iv)
-+{
-+	/* 2 <= L <= 8, so 1 <= L' <= 7. */
-+	if (1 > iv[0] || iv[0] > 7)
-+		return -EINVAL;
-+
-+	return 0;
-+}
-+
-+static int crypto_ccm_encrypt(struct aead_request *req)
-+{
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
-+	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
-+	struct ablkcipher_request *abreq = &pctx->abreq;
-+	struct scatterlist *dst;
-+	unsigned int cryptlen = req->cryptlen;
-+	u8 *odata = pctx->odata;
-+	u8 *iv = req->iv;
-+	int err;
-+
-+	err = crypto_ccm_check_iv(iv);
-+	if (err)
-+		return err;
-+
-+	pctx->flags = aead_request_flags(req);
-+
-+	err = crypto_ccm_auth(req, req->src, cryptlen);
-+	if (err)
-+		return err;
-+
-+	 /* Note: rfc 3610 and NIST 800-38C require counter of
-+	 * zero to encrypt auth tag.
-+	 */
-+	memset(iv + 15 - iv[0], 0, iv[0] + 1);
-+
-+	sg_init_table(pctx->src, 2);
-+	sg_set_buf(pctx->src, odata, 16);
-+	scatterwalk_sg_chain(pctx->src, 2, req->src);
-+
-+	dst = pctx->src;
-+	if (req->src != req->dst) {
-+		sg_init_table(pctx->dst, 2);
-+		sg_set_buf(pctx->dst, odata, 16);
-+		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
-+		dst = pctx->dst;
-+	}
-+
-+	ablkcipher_request_set_tfm(abreq, ctx->ctr);
-+	ablkcipher_request_set_callback(abreq, pctx->flags,
-+					crypto_ccm_encrypt_done, req);
-+	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
-+	err = crypto_ablkcipher_encrypt(abreq);
-+	if (err)
-+		return err;
-+
-+	/* copy authtag to end of dst */
-+	scatterwalk_map_and_copy(odata, req->dst, cryptlen,
-+				 crypto_aead_authsize(aead), 1);
-+	return err;
-+}
-+
-+static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
-+				   int err)
-+{
-+	struct aead_request *req = areq->data;
-+	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	unsigned int authsize = crypto_aead_authsize(aead);
-+	unsigned int cryptlen = req->cryptlen - authsize;
-+
-+	if (!err) {
-+		err = crypto_ccm_auth(req, req->dst, cryptlen);
-+		if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
-+			err = -EBADMSG;
-+	}
-+	aead_request_complete(req, err);
-+}
-+
-+static int crypto_ccm_decrypt(struct aead_request *req)
-+{
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
-+	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
-+	struct ablkcipher_request *abreq = &pctx->abreq;
-+	struct scatterlist *dst;
-+	unsigned int authsize = crypto_aead_authsize(aead);
-+	unsigned int cryptlen = req->cryptlen;
-+	u8 *authtag = pctx->auth_tag;
-+	u8 *odata = pctx->odata;
-+	u8 *iv = req->iv;
-+	int err;
-+
-+	if (cryptlen < authsize)
-+		return -EINVAL;
-+	cryptlen -= authsize;
-+
-+	err = crypto_ccm_check_iv(iv);
-+	if (err)
-+		return err;
-+
-+	pctx->flags = aead_request_flags(req);
-+
-+	scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
-+
-+	memset(iv + 15 - iv[0], 0, iv[0] + 1);
-+
-+	sg_init_table(pctx->src, 2);
-+	sg_set_buf(pctx->src, authtag, 16);
-+	scatterwalk_sg_chain(pctx->src, 2, req->src);
-+
-+	dst = pctx->src;
-+	if (req->src != req->dst) {
-+		sg_init_table(pctx->dst, 2);
-+		sg_set_buf(pctx->dst, authtag, 16);
-+		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
-+		dst = pctx->dst;
-+	}
-+
-+	ablkcipher_request_set_tfm(abreq, ctx->ctr);
-+	ablkcipher_request_set_callback(abreq, pctx->flags,
-+					crypto_ccm_decrypt_done, req);
-+	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
-+	err = crypto_ablkcipher_decrypt(abreq);
-+	if (err)
-+		return err;
-+
-+	err = crypto_ccm_auth(req, req->dst, cryptlen);
-+	if (err)
-+		return err;
-+
-+	/* verify */
-+	if (memcmp(authtag, odata, authsize))
-+		return -EBADMSG;
-+
-+	return err;
-+}
-+
-+static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-+	struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
-+	struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
-+	struct crypto_cipher *cipher;
-+	struct crypto_ablkcipher *ctr;
-+	unsigned long align;
-+	int err;
-+
-+	cipher = crypto_spawn_cipher(&ictx->cipher);
-+	if (IS_ERR(cipher))
-+		return PTR_ERR(cipher);
-+
-+	ctr = crypto_spawn_skcipher(&ictx->ctr);
-+	err = PTR_ERR(ctr);
-+	if (IS_ERR(ctr))
-+		goto err_free_cipher;
-+
-+	ctx->cipher = cipher;
-+	ctx->ctr = ctr;
-+
-+	align = crypto_tfm_alg_alignmask(tfm);
-+	align &= ~(crypto_tfm_ctx_alignment() - 1);
-+	tfm->crt_aead.reqsize = align +
-+				sizeof(struct crypto_ccm_req_priv_ctx) +
-+				crypto_ablkcipher_reqsize(ctr);
-+
-+	return 0;
-+
-+err_free_cipher:
-+	crypto_free_cipher(cipher);
-+	return err;
-+}
-+
-+static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	crypto_free_cipher(ctx->cipher);
-+	crypto_free_ablkcipher(ctx->ctr);
-+}
-+
-+static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
-+						       const char *full_name,
-+						       const char *ctr_name,
-+						       const char *cipher_name)
-+{
-+	struct crypto_attr_type *algt;
-+	struct crypto_instance *inst;
-+	struct crypto_alg *ctr;
-+	struct crypto_alg *cipher;
-+	struct ccm_instance_ctx *ictx;
-+	int err;
-+
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
-+		return ERR_PTR(err);
-+
-+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-+		return ERR_PTR(-EINVAL);
-+
-+	cipher = crypto_alg_mod_lookup(cipher_name,  CRYPTO_ALG_TYPE_CIPHER,
-+				       CRYPTO_ALG_TYPE_MASK);
-+	err = PTR_ERR(cipher);
-+	if (IS_ERR(cipher))
-+		return ERR_PTR(err);
-+
-+	err = -EINVAL;
-+	if (cipher->cra_blocksize != 16)
-+		goto out_put_cipher;
-+
-+	inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
-+	err = -ENOMEM;
-+	if (!inst)
-+		goto out_put_cipher;
-+
-+	ictx = crypto_instance_ctx(inst);
-+
-+	err = crypto_init_spawn(&ictx->cipher, cipher, inst,
-+				CRYPTO_ALG_TYPE_MASK);
-+	if (err)
-+		goto err_free_inst;
-+
-+	crypto_set_skcipher_spawn(&ictx->ctr, inst);
-+	err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
-+				   crypto_requires_sync(algt->type,
-+							algt->mask));
-+	if (err)
-+		goto err_drop_cipher;
-+
-+	ctr = crypto_skcipher_spawn_alg(&ictx->ctr);
-+
-+	/* Not a stream cipher? */
-+	err = -EINVAL;
-+	if (ctr->cra_blocksize != 1)
-+		goto err_drop_ctr;
-+
-+	/* We want the real thing! */
-+	if (ctr->cra_ablkcipher.ivsize != 16)
-+		goto err_drop_ctr;
-+
-+	err = -ENAMETOOLONG;
-+	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-+		     "ccm_base(%s,%s)", ctr->cra_driver_name,
-+		     cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
-+		goto err_drop_ctr;
-+
-+	memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-+
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-+	inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
-+	inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
-+	inst->alg.cra_blocksize = 1;
-+	inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
-+				  (__alignof__(u32) - 1);
-+	inst->alg.cra_type = &crypto_aead_type;
-+	inst->alg.cra_aead.ivsize = 16;
-+	inst->alg.cra_aead.maxauthsize = 16;
-+	inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
-+	inst->alg.cra_init = crypto_ccm_init_tfm;
-+	inst->alg.cra_exit = crypto_ccm_exit_tfm;
-+	inst->alg.cra_aead.setkey = crypto_ccm_setkey;
-+	inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
-+	inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
-+	inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
-+
-+out:
-+	crypto_mod_put(cipher);
-+	return inst;
-+
-+err_drop_ctr:
-+	crypto_drop_skcipher(&ictx->ctr);
-+err_drop_cipher:
-+	crypto_drop_spawn(&ictx->cipher);
-+err_free_inst:
-+	kfree(inst);
-+out_put_cipher:
-+	inst = ERR_PTR(err);
-+	goto out;
-+}
-+
-+static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
-+{
-+	int err;
-+	const char *cipher_name;
-+	char ctr_name[CRYPTO_MAX_ALG_NAME];
-+	char full_name[CRYPTO_MAX_ALG_NAME];
-+
-+	cipher_name = crypto_attr_alg_name(tb[1]);
-+	err = PTR_ERR(cipher_name);
-+	if (IS_ERR(cipher_name))
-+		return ERR_PTR(err);
-+
-+	if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
-+		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
-+		return ERR_PTR(-ENAMETOOLONG);
-+
-+	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
-+	    CRYPTO_MAX_ALG_NAME)
-+		return ERR_PTR(-ENAMETOOLONG);
-+
-+	return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
-+}
-+
-+static void crypto_ccm_free(struct crypto_instance *inst)
-+{
-+	struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
-+
-+	crypto_drop_spawn(&ctx->cipher);
-+	crypto_drop_skcipher(&ctx->ctr);
-+	kfree(inst);
-+}
-+
-+static struct crypto_template crypto_ccm_tmpl = {
-+	.name = "ccm",
-+	.alloc = crypto_ccm_alloc,
-+	.free = crypto_ccm_free,
-+	.module = THIS_MODULE,
-+};
-+
-+static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
-+{
-+	int err;
-+	const char *ctr_name;
-+	const char *cipher_name;
-+	char full_name[CRYPTO_MAX_ALG_NAME];
-+
-+	ctr_name = crypto_attr_alg_name(tb[1]);
-+	err = PTR_ERR(ctr_name);
-+	if (IS_ERR(ctr_name))
-+		return ERR_PTR(err);
-+
-+	cipher_name = crypto_attr_alg_name(tb[2]);
-+	err = PTR_ERR(cipher_name);
-+	if (IS_ERR(cipher_name))
-+		return ERR_PTR(err);
-+
-+	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
-+		     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
-+		return ERR_PTR(-ENAMETOOLONG);
-+
-+	return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
-+}
-+
-+static struct crypto_template crypto_ccm_base_tmpl = {
-+	.name = "ccm_base",
-+	.alloc = crypto_ccm_base_alloc,
-+	.free = crypto_ccm_free,
-+	.module = THIS_MODULE,
-+};
-+
-+static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
-+				 unsigned int keylen)
-+{
-+	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
-+	struct crypto_aead *child = ctx->child;
-+	int err;
-+
-+	if (keylen < 3)
-+		return -EINVAL;
-+
-+	keylen -= 3;
-+	memcpy(ctx->nonce, key + keylen, 3);
-+
-+	crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-+	crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
-+				     CRYPTO_TFM_REQ_MASK);
-+	err = crypto_aead_setkey(child, key, keylen);
-+	crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
-+				      CRYPTO_TFM_RES_MASK);
-+
-+	return err;
-+}
-+
-+static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
-+				      unsigned int authsize)
-+{
-+	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
-+
-+	switch (authsize) {
-+	case 8:
-+	case 12:
-+	case 16:
-+		break;
-+	default:
-+		return -EINVAL;
-+	}
-+
-+	return crypto_aead_setauthsize(ctx->child, authsize);
-+}
-+
-+static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
-+{
-+	struct aead_request *subreq = aead_request_ctx(req);
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
-+	struct crypto_aead *child = ctx->child;
-+	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
-+			   crypto_aead_alignmask(child) + 1);
-+
-+	/* L' */
-+	iv[0] = 3;
-+
-+	memcpy(iv + 1, ctx->nonce, 3);
-+	memcpy(iv + 4, req->iv, 8);
-+
-+	aead_request_set_tfm(subreq, child);
-+	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
-+				  req->base.data);
-+	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
-+	aead_request_set_assoc(subreq, req->assoc, req->assoclen);
-+
-+	return subreq;
-+}
-+
-+static int crypto_rfc4309_encrypt(struct aead_request *req)
-+{
-+	req = crypto_rfc4309_crypt(req);
-+
-+	return crypto_aead_encrypt(req);
-+}
-+
-+static int crypto_rfc4309_decrypt(struct aead_request *req)
-+{
-+	req = crypto_rfc4309_crypt(req);
-+
-+	return crypto_aead_decrypt(req);
-+}
-+
-+static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-+	struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
-+	struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
-+	struct crypto_aead *aead;
-+	unsigned long align;
-+
-+	aead = crypto_spawn_aead(spawn);
-+	if (IS_ERR(aead))
-+		return PTR_ERR(aead);
-+
-+	ctx->child = aead;
-+
-+	align = crypto_aead_alignmask(aead);
-+	align &= ~(crypto_tfm_ctx_alignment() - 1);
-+	tfm->crt_aead.reqsize = sizeof(struct aead_request) +
-+				ALIGN(crypto_aead_reqsize(aead),
-+				      crypto_tfm_ctx_alignment()) +
-+				align + 16;
-+
-+	return 0;
-+}
-+
-+static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	crypto_free_aead(ctx->child);
-+}
-+
-+static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
-+{
-+	struct crypto_attr_type *algt;
-+	struct crypto_instance *inst;
-+	struct crypto_aead_spawn *spawn;
-+	struct crypto_alg *alg;
-+	const char *ccm_name;
-+	int err;
-+
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
-+		return ERR_PTR(err);
-+
-+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-+		return ERR_PTR(-EINVAL);
-+
-+	ccm_name = crypto_attr_alg_name(tb[1]);
-+	err = PTR_ERR(ccm_name);
-+	if (IS_ERR(ccm_name))
-+		return ERR_PTR(err);
-+
-+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
-+	if (!inst)
-+		return ERR_PTR(-ENOMEM);
-+
-+	spawn = crypto_instance_ctx(inst);
-+	crypto_set_aead_spawn(spawn, inst);
-+	err = crypto_grab_aead(spawn, ccm_name, 0,
-+			       crypto_requires_sync(algt->type, algt->mask));
-+	if (err)
-+		goto out_free_inst;
-+
-+	alg = crypto_aead_spawn_alg(spawn);
-+
-+	err = -EINVAL;
-+
-+	/* We only support 16-byte blocks. */
-+	if (alg->cra_aead.ivsize != 16)
-+		goto out_drop_alg;
-+
-+	/* Not a stream cipher? */
-+	if (alg->cra_blocksize != 1)
-+		goto out_drop_alg;
-+
-+	err = -ENAMETOOLONG;
-+	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-+		     "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
-+	    snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-+		     "rfc4309(%s)", alg->cra_driver_name) >=
-+	    CRYPTO_MAX_ALG_NAME)
-+		goto out_drop_alg;
-+
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-+	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
-+	inst->alg.cra_priority = alg->cra_priority;
-+	inst->alg.cra_blocksize = 1;
-+	inst->alg.cra_alignmask = alg->cra_alignmask;
-+	inst->alg.cra_type = &crypto_nivaead_type;
-+
-+	inst->alg.cra_aead.ivsize = 8;
-+	inst->alg.cra_aead.maxauthsize = 16;
-+
-+	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
-+
-+	inst->alg.cra_init = crypto_rfc4309_init_tfm;
-+	inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
-+
-+	inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
-+	inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
-+	inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
-+	inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
-+
-+	inst->alg.cra_aead.geniv = "seqiv";
+-static int bus_add_attrs(struct bus_type * bus)
++static int bus_add_attrs(struct bus_type *bus)
+ {
+ 	int error = 0;
+ 	int i;
+ 
+ 	if (bus->bus_attrs) {
+ 		for (i = 0; attr_name(bus->bus_attrs[i]); i++) {
+-			error = bus_create_file(bus,&bus->bus_attrs[i]);
++			error = bus_create_file(bus, &bus->bus_attrs[i]);
+ 			if (error)
+-				goto Err;
++				goto err;
+ 		}
+ 	}
+- Done:
++done:
+ 	return error;
+- Err:
++err:
+ 	while (--i >= 0)
+-		bus_remove_file(bus,&bus->bus_attrs[i]);
+-	goto Done;
++		bus_remove_file(bus, &bus->bus_attrs[i]);
++	goto done;
+ }
+ 
+-static void bus_remove_attrs(struct bus_type * bus)
++static void bus_remove_attrs(struct bus_type *bus)
+ {
+ 	int i;
+ 
+ 	if (bus->bus_attrs) {
+ 		for (i = 0; attr_name(bus->bus_attrs[i]); i++)
+-			bus_remove_file(bus,&bus->bus_attrs[i]);
++			bus_remove_file(bus, &bus->bus_attrs[i]);
+ 	}
+ }
+ 
+@@ -827,32 +854,42 @@ static ssize_t bus_uevent_store(struct bus_type *bus,
+ 	enum kobject_action action;
+ 
+ 	if (kobject_action_type(buf, count, &action) == 0)
+-		kobject_uevent(&bus->subsys.kobj, action);
++		kobject_uevent(&bus->p->subsys.kobj, action);
+ 	return count;
+ }
+ static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
+ 
+ /**
+- *	bus_register - register a bus with the system.
+- *	@bus:	bus.
++ * bus_register - register a bus with the system.
++ * @bus: bus.
+  *
+- *	Once we have that, we registered the bus with the kobject
+- *	infrastructure, then register the children subsystems it has:
+- *	the devices and drivers that belong to the bus.
++ * Once we have that, we registered the bus with the kobject
++ * infrastructure, then register the children subsystems it has:
++ * the devices and drivers that belong to the bus.
+  */
+-int bus_register(struct bus_type * bus)
++int bus_register(struct bus_type *bus)
+ {
+ 	int retval;
++	struct bus_type_private *priv;
 +
-+out:
-+	return inst;
++	priv = kzalloc(sizeof(struct bus_type_private), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
+ 
+-	BLOCKING_INIT_NOTIFIER_HEAD(&bus->bus_notifier);
++	priv->bus = bus;
++	bus->p = priv;
+ 
+-	retval = kobject_set_name(&bus->subsys.kobj, "%s", bus->name);
++	BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
 +
-+out_drop_alg:
-+	crypto_drop_aead(spawn);
-+out_free_inst:
-+	kfree(inst);
-+	inst = ERR_PTR(err);
-+	goto out;
++	retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name);
+ 	if (retval)
+ 		goto out;
+ 
+-	bus->subsys.kobj.kset = &bus_subsys;
++	priv->subsys.kobj.kset = bus_kset;
++	priv->subsys.kobj.ktype = &bus_ktype;
++	priv->drivers_autoprobe = 1;
+ 
+-	retval = subsystem_register(&bus->subsys);
++	retval = kset_register(&priv->subsys);
+ 	if (retval)
+ 		goto out;
+ 
+@@ -860,23 +897,23 @@ int bus_register(struct bus_type * bus)
+ 	if (retval)
+ 		goto bus_uevent_fail;
+ 
+-	kobject_set_name(&bus->devices.kobj, "devices");
+-	bus->devices.kobj.parent = &bus->subsys.kobj;
+-	retval = kset_register(&bus->devices);
+-	if (retval)
++	priv->devices_kset = kset_create_and_add("devices", NULL,
++						 &priv->subsys.kobj);
++	if (!priv->devices_kset) {
++		retval = -ENOMEM;
+ 		goto bus_devices_fail;
++	}
+ 
+-	kobject_set_name(&bus->drivers.kobj, "drivers");
+-	bus->drivers.kobj.parent = &bus->subsys.kobj;
+-	bus->drivers.ktype = &driver_ktype;
+-	retval = kset_register(&bus->drivers);
+-	if (retval)
++	priv->drivers_kset = kset_create_and_add("drivers", NULL,
++						 &priv->subsys.kobj);
++	if (!priv->drivers_kset) {
++		retval = -ENOMEM;
+ 		goto bus_drivers_fail;
++	}
+ 
+-	klist_init(&bus->klist_devices, klist_devices_get, klist_devices_put);
+-	klist_init(&bus->klist_drivers, NULL, NULL);
++	klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
++	klist_init(&priv->klist_drivers, NULL, NULL);
+ 
+-	bus->drivers_autoprobe = 1;
+ 	retval = add_probe_files(bus);
+ 	if (retval)
+ 		goto bus_probe_files_fail;
+@@ -885,66 +922,73 @@ int bus_register(struct bus_type * bus)
+ 	if (retval)
+ 		goto bus_attrs_fail;
+ 
+-	pr_debug("bus type '%s' registered\n", bus->name);
++	pr_debug("bus: '%s': registered\n", bus->name);
+ 	return 0;
+ 
+ bus_attrs_fail:
+ 	remove_probe_files(bus);
+ bus_probe_files_fail:
+-	kset_unregister(&bus->drivers);
++	kset_unregister(bus->p->drivers_kset);
+ bus_drivers_fail:
+-	kset_unregister(&bus->devices);
++	kset_unregister(bus->p->devices_kset);
+ bus_devices_fail:
+ 	bus_remove_file(bus, &bus_attr_uevent);
+ bus_uevent_fail:
+-	subsystem_unregister(&bus->subsys);
++	kset_unregister(&bus->p->subsys);
++	kfree(bus->p);
+ out:
+ 	return retval;
+ }
++EXPORT_SYMBOL_GPL(bus_register);
+ 
+ /**
+- *	bus_unregister - remove a bus from the system
+- *	@bus:	bus.
++ * bus_unregister - remove a bus from the system
++ * @bus: bus.
+  *
+- *	Unregister the child subsystems and the bus itself.
+- *	Finally, we call bus_put() to release the refcount
++ * Unregister the child subsystems and the bus itself.
++ * Finally, we call bus_put() to release the refcount
+  */
+-void bus_unregister(struct bus_type * bus)
++void bus_unregister(struct bus_type *bus)
+ {
+-	pr_debug("bus %s: unregistering\n", bus->name);
++	pr_debug("bus: '%s': unregistering\n", bus->name);
+ 	bus_remove_attrs(bus);
+ 	remove_probe_files(bus);
+-	kset_unregister(&bus->drivers);
+-	kset_unregister(&bus->devices);
++	kset_unregister(bus->p->drivers_kset);
++	kset_unregister(bus->p->devices_kset);
+ 	bus_remove_file(bus, &bus_attr_uevent);
+-	subsystem_unregister(&bus->subsys);
++	kset_unregister(&bus->p->subsys);
++	kfree(bus->p);
+ }
++EXPORT_SYMBOL_GPL(bus_unregister);
+ 
+ int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb)
+ {
+-	return blocking_notifier_chain_register(&bus->bus_notifier, nb);
++	return blocking_notifier_chain_register(&bus->p->bus_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(bus_register_notifier);
+ 
+ int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb)
+ {
+-	return blocking_notifier_chain_unregister(&bus->bus_notifier, nb);
++	return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(bus_unregister_notifier);
+ 
+-int __init buses_init(void)
++struct kset *bus_get_kset(struct bus_type *bus)
+ {
+-	return subsystem_register(&bus_subsys);
++	return &bus->p->subsys;
+ }
++EXPORT_SYMBOL_GPL(bus_get_kset);
+ 
++struct klist *bus_get_device_klist(struct bus_type *bus)
++{
++	return &bus->p->klist_devices;
 +}
-+
-+static void crypto_rfc4309_free(struct crypto_instance *inst)
++EXPORT_SYMBOL_GPL(bus_get_device_klist);
+ 
+-EXPORT_SYMBOL_GPL(bus_for_each_dev);
+-EXPORT_SYMBOL_GPL(bus_find_device);
+-EXPORT_SYMBOL_GPL(bus_for_each_drv);
+-
+-EXPORT_SYMBOL_GPL(bus_register);
+-EXPORT_SYMBOL_GPL(bus_unregister);
+-EXPORT_SYMBOL_GPL(bus_rescan_devices);
+-
+-EXPORT_SYMBOL_GPL(bus_create_file);
+-EXPORT_SYMBOL_GPL(bus_remove_file);
++int __init buses_init(void)
 +{
-+	crypto_drop_spawn(crypto_instance_ctx(inst));
-+	kfree(inst);
++	bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
++	if (!bus_kset)
++		return -ENOMEM;
++	return 0;
 +}
-+
-+static struct crypto_template crypto_rfc4309_tmpl = {
-+	.name = "rfc4309",
-+	.alloc = crypto_rfc4309_alloc,
-+	.free = crypto_rfc4309_free,
-+	.module = THIS_MODULE,
+diff --git a/drivers/base/class.c b/drivers/base/class.c
+index a863bb0..9d91537 100644
+--- a/drivers/base/class.c
++++ b/drivers/base/class.c
+@@ -17,16 +17,17 @@
+ #include <linux/kdev_t.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
++#include <linux/genhd.h>
+ #include "base.h"
+ 
+ #define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
+ #define to_class(obj) container_of(obj, struct class, subsys.kobj)
+ 
+-static ssize_t
+-class_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
++static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
++			       char *buf)
+ {
+-	struct class_attribute * class_attr = to_class_attr(attr);
+-	struct class * dc = to_class(kobj);
++	struct class_attribute *class_attr = to_class_attr(attr);
++	struct class *dc = to_class(kobj);
+ 	ssize_t ret = -EIO;
+ 
+ 	if (class_attr->show)
+@@ -34,12 +35,11 @@ class_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
+ 	return ret;
+ }
+ 
+-static ssize_t
+-class_attr_store(struct kobject * kobj, struct attribute * attr,
+-		 const char * buf, size_t count)
++static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
++				const char *buf, size_t count)
+ {
+-	struct class_attribute * class_attr = to_class_attr(attr);
+-	struct class * dc = to_class(kobj);
++	struct class_attribute *class_attr = to_class_attr(attr);
++	struct class *dc = to_class(kobj);
+ 	ssize_t ret = -EIO;
+ 
+ 	if (class_attr->store)
+@@ -47,7 +47,7 @@ class_attr_store(struct kobject * kobj, struct attribute * attr,
+ 	return ret;
+ }
+ 
+-static void class_release(struct kobject * kobj)
++static void class_release(struct kobject *kobj)
+ {
+ 	struct class *class = to_class(kobj);
+ 
+@@ -71,20 +71,20 @@ static struct kobj_type class_ktype = {
+ };
+ 
+ /* Hotplug events for classes go to the class_obj subsys */
+-static decl_subsys(class, &class_ktype, NULL);
++static struct kset *class_kset;
+ 
+ 
+-int class_create_file(struct class * cls, const struct class_attribute * attr)
++int class_create_file(struct class *cls, const struct class_attribute *attr)
+ {
+ 	int error;
+-	if (cls) {
++	if (cls)
+ 		error = sysfs_create_file(&cls->subsys.kobj, &attr->attr);
+-	} else
++	else
+ 		error = -EINVAL;
+ 	return error;
+ }
+ 
+-void class_remove_file(struct class * cls, const struct class_attribute * attr)
++void class_remove_file(struct class *cls, const struct class_attribute *attr)
+ {
+ 	if (cls)
+ 		sysfs_remove_file(&cls->subsys.kobj, &attr->attr);
+@@ -93,48 +93,48 @@ void class_remove_file(struct class * cls, const struct class_attribute * attr)
+ static struct class *class_get(struct class *cls)
+ {
+ 	if (cls)
+-		return container_of(kset_get(&cls->subsys), struct class, subsys);
++		return container_of(kset_get(&cls->subsys),
++				    struct class, subsys);
+ 	return NULL;
+ }
+ 
+-static void class_put(struct class * cls)
++static void class_put(struct class *cls)
+ {
+ 	if (cls)
+ 		kset_put(&cls->subsys);
+ }
+ 
+-
+-static int add_class_attrs(struct class * cls)
++static int add_class_attrs(struct class *cls)
+ {
+ 	int i;
+ 	int error = 0;
+ 
+ 	if (cls->class_attrs) {
+ 		for (i = 0; attr_name(cls->class_attrs[i]); i++) {
+-			error = class_create_file(cls,&cls->class_attrs[i]);
++			error = class_create_file(cls, &cls->class_attrs[i]);
+ 			if (error)
+-				goto Err;
++				goto error;
+ 		}
+ 	}
+- Done:
++done:
+ 	return error;
+- Err:
++error:
+ 	while (--i >= 0)
+-		class_remove_file(cls,&cls->class_attrs[i]);
+-	goto Done;
++		class_remove_file(cls, &cls->class_attrs[i]);
++	goto done;
+ }
+ 
+-static void remove_class_attrs(struct class * cls)
++static void remove_class_attrs(struct class *cls)
+ {
+ 	int i;
+ 
+ 	if (cls->class_attrs) {
+ 		for (i = 0; attr_name(cls->class_attrs[i]); i++)
+-			class_remove_file(cls,&cls->class_attrs[i]);
++			class_remove_file(cls, &cls->class_attrs[i]);
+ 	}
+ }
+ 
+-int class_register(struct class * cls)
++int class_register(struct class *cls)
+ {
+ 	int error;
+ 
+@@ -149,9 +149,16 @@ int class_register(struct class * cls)
+ 	if (error)
+ 		return error;
+ 
+-	cls->subsys.kobj.kset = &class_subsys;
++#if defined(CONFIG_SYSFS_DEPRECATED) && defined(CONFIG_BLOCK)
++	/* let the block class directory show up in the root of sysfs */
++	if (cls != &block_class)
++		cls->subsys.kobj.kset = class_kset;
++#else
++	cls->subsys.kobj.kset = class_kset;
++#endif
++	cls->subsys.kobj.ktype = &class_ktype;
+ 
+-	error = subsystem_register(&cls->subsys);
++	error = kset_register(&cls->subsys);
+ 	if (!error) {
+ 		error = add_class_attrs(class_get(cls));
+ 		class_put(cls);
+@@ -159,11 +166,11 @@ int class_register(struct class * cls)
+ 	return error;
+ }
+ 
+-void class_unregister(struct class * cls)
++void class_unregister(struct class *cls)
+ {
+ 	pr_debug("device class '%s': unregistering\n", cls->name);
+ 	remove_class_attrs(cls);
+-	subsystem_unregister(&cls->subsys);
++	kset_unregister(&cls->subsys);
+ }
+ 
+ static void class_create_release(struct class *cls)
+@@ -241,8 +248,8 @@ void class_destroy(struct class *cls)
+ 
+ /* Class Device Stuff */
+ 
+-int class_device_create_file(struct class_device * class_dev,
+-			     const struct class_device_attribute * attr)
++int class_device_create_file(struct class_device *class_dev,
++			     const struct class_device_attribute *attr)
+ {
+ 	int error = -EINVAL;
+ 	if (class_dev)
+@@ -250,8 +257,8 @@ int class_device_create_file(struct class_device * class_dev,
+ 	return error;
+ }
+ 
+-void class_device_remove_file(struct class_device * class_dev,
+-			      const struct class_device_attribute * attr)
++void class_device_remove_file(struct class_device *class_dev,
++			      const struct class_device_attribute *attr)
+ {
+ 	if (class_dev)
+ 		sysfs_remove_file(&class_dev->kobj, &attr->attr);
+@@ -273,12 +280,11 @@ void class_device_remove_bin_file(struct class_device *class_dev,
+ 		sysfs_remove_bin_file(&class_dev->kobj, attr);
+ }
+ 
+-static ssize_t
+-class_device_attr_show(struct kobject * kobj, struct attribute * attr,
+-		       char * buf)
++static ssize_t class_device_attr_show(struct kobject *kobj,
++				      struct attribute *attr, char *buf)
+ {
+-	struct class_device_attribute * class_dev_attr = to_class_dev_attr(attr);
+-	struct class_device * cd = to_class_dev(kobj);
++	struct class_device_attribute *class_dev_attr = to_class_dev_attr(attr);
++	struct class_device *cd = to_class_dev(kobj);
+ 	ssize_t ret = 0;
+ 
+ 	if (class_dev_attr->show)
+@@ -286,12 +292,12 @@ class_device_attr_show(struct kobject * kobj, struct attribute * attr,
+ 	return ret;
+ }
+ 
+-static ssize_t
+-class_device_attr_store(struct kobject * kobj, struct attribute * attr,
+-			const char * buf, size_t count)
++static ssize_t class_device_attr_store(struct kobject *kobj,
++				       struct attribute *attr,
++				       const char *buf, size_t count)
+ {
+-	struct class_device_attribute * class_dev_attr = to_class_dev_attr(attr);
+-	struct class_device * cd = to_class_dev(kobj);
++	struct class_device_attribute *class_dev_attr = to_class_dev_attr(attr);
++	struct class_device *cd = to_class_dev(kobj);
+ 	ssize_t ret = 0;
+ 
+ 	if (class_dev_attr->store)
+@@ -304,10 +310,10 @@ static struct sysfs_ops class_dev_sysfs_ops = {
+ 	.store	= class_device_attr_store,
+ };
+ 
+-static void class_dev_release(struct kobject * kobj)
++static void class_dev_release(struct kobject *kobj)
+ {
+ 	struct class_device *cd = to_class_dev(kobj);
+-	struct class * cls = cd->class;
++	struct class *cls = cd->class;
+ 
+ 	pr_debug("device class '%s': release.\n", cd->class_id);
+ 
+@@ -316,8 +322,8 @@ static void class_dev_release(struct kobject * kobj)
+ 	else if (cls->release)
+ 		cls->release(cd);
+ 	else {
+-		printk(KERN_ERR "Class Device '%s' does not have a release() function, "
+-			"it is broken and must be fixed.\n",
++		printk(KERN_ERR "Class Device '%s' does not have a release() "
++			"function, it is broken and must be fixed.\n",
+ 			cd->class_id);
+ 		WARN_ON(1);
+ 	}
+@@ -428,7 +434,8 @@ static int class_uevent(struct kset *kset, struct kobject *kobj,
+ 			add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
+ 
+ 		if (dev->driver)
+-			add_uevent_var(env, "PHYSDEVDRIVER=%s", dev->driver->name);
++			add_uevent_var(env, "PHYSDEVDRIVER=%s",
++				       dev->driver->name);
+ 	}
+ 
+ 	if (class_dev->uevent) {
+@@ -452,43 +459,49 @@ static struct kset_uevent_ops class_uevent_ops = {
+ 	.uevent =	class_uevent,
+ };
+ 
+-static decl_subsys(class_obj, &class_device_ktype, &class_uevent_ops);
+-
++/*
++ * DO NOT copy how this is created, kset_create_and_add() should be
++ * called, but this is a hold-over from the old-way and will be deleted
++ * entirely soon.
++ */
++static struct kset class_obj_subsys = {
++	.uevent_ops = &class_uevent_ops,
 +};
-+
-+static int __init crypto_ccm_module_init(void)
+ 
+-static int class_device_add_attrs(struct class_device * cd)
++static int class_device_add_attrs(struct class_device *cd)
+ {
+ 	int i;
+ 	int error = 0;
+-	struct class * cls = cd->class;
++	struct class *cls = cd->class;
+ 
+ 	if (cls->class_dev_attrs) {
+ 		for (i = 0; attr_name(cls->class_dev_attrs[i]); i++) {
+ 			error = class_device_create_file(cd,
+-							 &cls->class_dev_attrs[i]);
++						&cls->class_dev_attrs[i]);
+ 			if (error)
+-				goto Err;
++				goto err;
+ 		}
+ 	}
+- Done:
++done:
+ 	return error;
+- Err:
++err:
+ 	while (--i >= 0)
+-		class_device_remove_file(cd,&cls->class_dev_attrs[i]);
+-	goto Done;
++		class_device_remove_file(cd, &cls->class_dev_attrs[i]);
++	goto done;
+ }
+ 
+-static void class_device_remove_attrs(struct class_device * cd)
++static void class_device_remove_attrs(struct class_device *cd)
+ {
+ 	int i;
+-	struct class * cls = cd->class;
++	struct class *cls = cd->class;
+ 
+ 	if (cls->class_dev_attrs) {
+ 		for (i = 0; attr_name(cls->class_dev_attrs[i]); i++)
+-			class_device_remove_file(cd,&cls->class_dev_attrs[i]);
++			class_device_remove_file(cd, &cls->class_dev_attrs[i]);
+ 	}
+ }
+ 
+-static int class_device_add_groups(struct class_device * cd)
++static int class_device_add_groups(struct class_device *cd)
+ {
+ 	int i;
+ 	int error = 0;
+@@ -498,7 +511,8 @@ static int class_device_add_groups(struct class_device * cd)
+ 			error = sysfs_create_group(&cd->kobj, cd->groups[i]);
+ 			if (error) {
+ 				while (--i >= 0)
+-					sysfs_remove_group(&cd->kobj, cd->groups[i]);
++					sysfs_remove_group(&cd->kobj,
++							   cd->groups[i]);
+ 				goto out;
+ 			}
+ 		}
+@@ -507,14 +521,12 @@ out:
+ 	return error;
+ }
+ 
+-static void class_device_remove_groups(struct class_device * cd)
++static void class_device_remove_groups(struct class_device *cd)
+ {
+ 	int i;
+-	if (cd->groups) {
+-		for (i = 0; cd->groups[i]; i++) {
++	if (cd->groups)
++		for (i = 0; cd->groups[i]; i++)
+ 			sysfs_remove_group(&cd->kobj, cd->groups[i]);
+-		}
+-	}
+ }
+ 
+ static ssize_t show_dev(struct class_device *class_dev, char *buf)
+@@ -537,8 +549,8 @@ static struct class_device_attribute class_uevent_attr =
+ 
+ void class_device_initialize(struct class_device *class_dev)
+ {
+-	kobj_set_kset_s(class_dev, class_obj_subsys);
+-	kobject_init(&class_dev->kobj);
++	class_dev->kobj.kset = &class_obj_subsys;
++	kobject_init(&class_dev->kobj, &class_device_ktype);
+ 	INIT_LIST_HEAD(&class_dev->node);
+ }
+ 
+@@ -566,16 +578,13 @@ int class_device_add(struct class_device *class_dev)
+ 		 class_dev->class_id);
+ 
+ 	/* first, register with generic layer. */
+-	error = kobject_set_name(&class_dev->kobj, "%s", class_dev->class_id);
+-	if (error)
+-		goto out2;
+-
+ 	if (parent_class_dev)
+ 		class_dev->kobj.parent = &parent_class_dev->kobj;
+ 	else
+ 		class_dev->kobj.parent = &parent_class->subsys.kobj;
+ 
+-	error = kobject_add(&class_dev->kobj);
++	error = kobject_add(&class_dev->kobj, class_dev->kobj.parent,
++			    "%s", class_dev->class_id);
+ 	if (error)
+ 		goto out2;
+ 
+@@ -642,7 +651,7 @@ int class_device_add(struct class_device *class_dev)
+  out3:
+ 	kobject_del(&class_dev->kobj);
+  out2:
+-	if(parent_class_dev)
++	if (parent_class_dev)
+ 		class_device_put(parent_class_dev);
+ 	class_put(parent_class);
+  out1:
+@@ -659,9 +668,11 @@ int class_device_register(struct class_device *class_dev)
+ /**
+  * class_device_create - creates a class device and registers it with sysfs
+  * @cls: pointer to the struct class that this device should be registered to.
+- * @parent: pointer to the parent struct class_device of this new device, if any.
++ * @parent: pointer to the parent struct class_device of this new device, if
++ * any.
+  * @devt: the dev_t for the char device to be added.
+- * @device: a pointer to a struct device that is assiociated with this class device.
++ * @device: a pointer to a struct device that is assiociated with this class
++ * device.
+  * @fmt: string for the class device's name
+  *
+  * This function can be used by char device classes.  A struct
+@@ -785,7 +796,7 @@ void class_device_destroy(struct class *cls, dev_t devt)
+ 		class_device_unregister(class_dev);
+ }
+ 
+-struct class_device * class_device_get(struct class_device *class_dev)
++struct class_device *class_device_get(struct class_device *class_dev)
+ {
+ 	if (class_dev)
+ 		return to_class_dev(kobject_get(&class_dev->kobj));
+@@ -798,6 +809,139 @@ void class_device_put(struct class_device *class_dev)
+ 		kobject_put(&class_dev->kobj);
+ }
+ 
++/**
++ * class_for_each_device - device iterator
++ * @class: the class we're iterating
++ * @data: data for the callback
++ * @fn: function to be called for each device
++ *
++ * Iterate over @class's list of devices, and call @fn for each,
++ * passing it @data.
++ *
++ * We check the return of @fn each time. If it returns anything
++ * other than 0, we break out and return that value.
++ *
++ * Note, we hold class->sem in this function, so it can not be
++ * re-acquired in @fn, otherwise it will self-deadlocking. For
++ * example, calls to add or remove class members would be verboten.
++ */
++int class_for_each_device(struct class *class, void *data,
++			   int (*fn)(struct device *, void *))
 +{
-+	int err;
-+
-+	err = crypto_register_template(&crypto_ccm_base_tmpl);
-+	if (err)
-+		goto out;
-+
-+	err = crypto_register_template(&crypto_ccm_tmpl);
-+	if (err)
-+		goto out_undo_base;
-+
-+	err = crypto_register_template(&crypto_rfc4309_tmpl);
-+	if (err)
-+		goto out_undo_ccm;
-+
-+out:
-+	return err;
++	struct device *dev;
++	int error = 0;
 +
-+out_undo_ccm:
-+	crypto_unregister_template(&crypto_ccm_tmpl);
-+out_undo_base:
-+	crypto_unregister_template(&crypto_ccm_base_tmpl);
-+	goto out;
-+}
++	if (!class)
++		return -EINVAL;
++	down(&class->sem);
++	list_for_each_entry(dev, &class->devices, node) {
++		dev = get_device(dev);
++		if (dev) {
++			error = fn(dev, data);
++			put_device(dev);
++		} else
++			error = -ENODEV;
++		if (error)
++			break;
++	}
++	up(&class->sem);
 +
-+static void __exit crypto_ccm_module_exit(void)
-+{
-+	crypto_unregister_template(&crypto_rfc4309_tmpl);
-+	crypto_unregister_template(&crypto_ccm_tmpl);
-+	crypto_unregister_template(&crypto_ccm_base_tmpl);
++	return error;
 +}
++EXPORT_SYMBOL_GPL(class_for_each_device);
 +
-+module_init(crypto_ccm_module_init);
-+module_exit(crypto_ccm_module_exit);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Counter with CBC MAC");
-+MODULE_ALIAS("ccm_base");
-+MODULE_ALIAS("rfc4309");
-diff --git a/crypto/chainiv.c b/crypto/chainiv.c
-new file mode 100644
-index 0000000..d17fa04
---- /dev/null
-+++ b/crypto/chainiv.c
-@@ -0,0 +1,331 @@
-+/*
-+ * chainiv: Chain IV Generator
++/**
++ * class_find_device - device iterator for locating a particular device
++ * @class: the class we're iterating
++ * @data: data for the match function
++ * @match: function to check device
 + *
-+ * Generate IVs simply be using the last block of the previous encryption.
-+ * This is mainly useful for CBC with a synchronous algorithm.
++ * This is similar to the class_for_each_dev() function above, but it
++ * returns a reference to a device that is 'found' for later use, as
++ * determined by the @match callback.
 + *
-+ * Copyright (c) 2007 Herbert Xu <herbert at gondor.apana.org.au>
++ * The callback should return 0 if the device doesn't match and non-zero
++ * if it does.  If the callback returns non-zero, this function will
++ * return to the caller and not iterate over any more devices.
 + *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
-+ * any later version.
++ * Note, you will need to drop the reference with put_device() after use.
 + *
++ * We hold class->sem in this function, so it can not be
++ * re-acquired in @match, otherwise it will self-deadlocking. For
++ * example, calls to add or remove class members would be verboten.
 + */
-+
-+#include <crypto/internal/skcipher.h>
-+#include <linux/err.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/random.h>
-+#include <linux/spinlock.h>
-+#include <linux/string.h>
-+#include <linux/workqueue.h>
-+
-+enum {
-+	CHAINIV_STATE_INUSE = 0,
-+};
-+
-+struct chainiv_ctx {
-+	spinlock_t lock;
-+	char iv[];
-+};
-+
-+struct async_chainiv_ctx {
-+	unsigned long state;
-+
-+	spinlock_t lock;
-+	int err;
-+
-+	struct crypto_queue queue;
-+	struct work_struct postponed;
-+
-+	char iv[];
-+};
-+
-+static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
++struct device *class_find_device(struct class *class, void *data,
++				   int (*match)(struct device *, void *))
 +{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-+	unsigned int ivsize;
-+	int err;
-+
-+	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
-+	ablkcipher_request_set_callback(subreq, req->creq.base.flags &
-+						~CRYPTO_TFM_REQ_MAY_SLEEP,
-+					req->creq.base.complete,
-+					req->creq.base.data);
-+	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
-+				     req->creq.nbytes, req->creq.info);
-+
-+	spin_lock_bh(&ctx->lock);
-+
-+	ivsize = crypto_ablkcipher_ivsize(geniv);
-+
-+	memcpy(req->giv, ctx->iv, ivsize);
-+	memcpy(subreq->info, ctx->iv, ivsize);
-+
-+	err = crypto_ablkcipher_encrypt(subreq);
-+	if (err)
-+		goto unlock;
++	struct device *dev;
++	int found = 0;
 +
-+	memcpy(ctx->iv, subreq->info, ivsize);
++	if (!class)
++		return NULL;
 +
-+unlock:
-+	spin_unlock_bh(&ctx->lock);
++	down(&class->sem);
++	list_for_each_entry(dev, &class->devices, node) {
++		dev = get_device(dev);
++		if (dev) {
++			if (match(dev, data)) {
++				found = 1;
++				break;
++			} else
++				put_device(dev);
++		} else
++			break;
++	}
++	up(&class->sem);
 +
-+	return err;
++	return found ? dev : NULL;
 +}
++EXPORT_SYMBOL_GPL(class_find_device);
 +
-+static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
++/**
++ * class_find_child - device iterator for locating a particular class_device
++ * @class: the class we're iterating
++ * @data: data for the match function
++ * @match: function to check class_device
++ *
++ * This function returns a reference to a class_device that is 'found' for
++ * later use, as determined by the @match callback.
++ *
++ * The callback should return 0 if the class_device doesn't match and non-zero
++ * if it does.  If the callback returns non-zero, this function will
++ * return to the caller and not iterate over any more class_devices.
++ *
++ * Note, you will need to drop the reference with class_device_put() after use.
++ *
++ * We hold class->sem in this function, so it can not be
++ * re-acquired in @match, otherwise it will self-deadlocking. For
++ * example, calls to add or remove class members would be verboten.
++ */
++struct class_device *class_find_child(struct class *class, void *data,
++				   int (*match)(struct class_device *, void *))
 +{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+
-+	spin_lock_bh(&ctx->lock);
-+	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
-+	    chainiv_givencrypt_first)
-+		goto unlock;
++	struct class_device *dev;
++	int found = 0;
 +
-+	crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
-+	get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
++	if (!class)
++		return NULL;
 +
-+unlock:
-+	spin_unlock_bh(&ctx->lock);
++	down(&class->sem);
++	list_for_each_entry(dev, &class->children, node) {
++		dev = class_device_get(dev);
++		if (dev) {
++			if (match(dev, data)) {
++				found = 1;
++				break;
++			} else
++				class_device_put(dev);
++		} else
++			break;
++	}
++	up(&class->sem);
 +
-+	return chainiv_givencrypt(req);
++	return found ? dev : NULL;
 +}
-+
-+static int chainiv_init_common(struct crypto_tfm *tfm)
++EXPORT_SYMBOL_GPL(class_find_child);
+ 
+ int class_interface_register(struct class_interface *class_intf)
+ {
+@@ -829,7 +973,7 @@ int class_interface_register(struct class_interface *class_intf)
+ 
+ void class_interface_unregister(struct class_interface *class_intf)
+ {
+-	struct class * parent = class_intf->class;
++	struct class *parent = class_intf->class;
+ 	struct class_device *class_dev;
+ 	struct device *dev;
+ 
+@@ -853,15 +997,14 @@ void class_interface_unregister(struct class_interface *class_intf)
+ 
+ int __init classes_init(void)
+ {
+-	int retval;
+-
+-	retval = subsystem_register(&class_subsys);
+-	if (retval)
+-		return retval;
++	class_kset = kset_create_and_add("class", NULL, NULL);
++	if (!class_kset)
++		return -ENOMEM;
+ 
+ 	/* ick, this is ugly, the things we go through to keep from showing up
+ 	 * in sysfs... */
+ 	kset_init(&class_obj_subsys);
++	kobject_set_name(&class_obj_subsys.kobj, "class_obj");
+ 	if (!class_obj_subsys.kobj.parent)
+ 		class_obj_subsys.kobj.parent = &class_obj_subsys.kobj;
+ 	return 0;
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 2683eac..b172787 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -18,18 +18,26 @@
+ #include <linux/string.h>
+ #include <linux/kdev_t.h>
+ #include <linux/notifier.h>
+-
++#include <linux/genhd.h>
+ #include <asm/semaphore.h>
+ 
+ #include "base.h"
+ #include "power/power.h"
+ 
+-int (*platform_notify)(struct device * dev) = NULL;
+-int (*platform_notify_remove)(struct device * dev) = NULL;
++int (*platform_notify)(struct device *dev) = NULL;
++int (*platform_notify_remove)(struct device *dev) = NULL;
+ 
+-/*
+- * sysfs bindings for devices.
+- */
++#ifdef CONFIG_BLOCK
++static inline int device_is_not_partition(struct device *dev)
 +{
-+	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
-+
-+	return skcipher_geniv_init(tfm);
++	return !(dev->type == &part_type);
 +}
-+
-+static int chainiv_init(struct crypto_tfm *tfm)
++#else
++static inline int device_is_not_partition(struct device *dev)
 +{
-+	struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	spin_lock_init(&ctx->lock);
-+
-+	return chainiv_init_common(tfm);
++	return 1;
 +}
++#endif
+ 
+ /**
+  * dev_driver_string - Return a device's driver name, if at all possible
+@@ -51,11 +59,11 @@ EXPORT_SYMBOL(dev_driver_string);
+ #define to_dev(obj) container_of(obj, struct device, kobj)
+ #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+ 
+-static ssize_t
+-dev_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
++static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
++			     char *buf)
+ {
+-	struct device_attribute * dev_attr = to_dev_attr(attr);
+-	struct device * dev = to_dev(kobj);
++	struct device_attribute *dev_attr = to_dev_attr(attr);
++	struct device *dev = to_dev(kobj);
+ 	ssize_t ret = -EIO;
+ 
+ 	if (dev_attr->show)
+@@ -63,12 +71,11 @@ dev_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
+ 	return ret;
+ }
+ 
+-static ssize_t
+-dev_attr_store(struct kobject * kobj, struct attribute * attr,
+-	       const char * buf, size_t count)
++static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
++			      const char *buf, size_t count)
+ {
+-	struct device_attribute * dev_attr = to_dev_attr(attr);
+-	struct device * dev = to_dev(kobj);
++	struct device_attribute *dev_attr = to_dev_attr(attr);
++	struct device *dev = to_dev(kobj);
+ 	ssize_t ret = -EIO;
+ 
+ 	if (dev_attr->store)
+@@ -90,9 +97,9 @@ static struct sysfs_ops dev_sysfs_ops = {
+  *	reaches 0. We forward the call to the device's release
+  *	method, which should handle actually freeing the structure.
+  */
+-static void device_release(struct kobject * kobj)
++static void device_release(struct kobject *kobj)
+ {
+-	struct device * dev = to_dev(kobj);
++	struct device *dev = to_dev(kobj);
+ 
+ 	if (dev->release)
+ 		dev->release(dev);
+@@ -101,8 +108,8 @@ static void device_release(struct kobject * kobj)
+ 	else if (dev->class && dev->class->dev_release)
+ 		dev->class->dev_release(dev);
+ 	else {
+-		printk(KERN_ERR "Device '%s' does not have a release() function, "
+-			"it is broken and must be fixed.\n",
++		printk(KERN_ERR "Device '%s' does not have a release() "
++			"function, it is broken and must be fixed.\n",
+ 			dev->bus_id);
+ 		WARN_ON(1);
+ 	}
+@@ -185,7 +192,8 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
+ 		add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
+ 
+ 		if (dev->driver)
+-			add_uevent_var(env, "PHYSDEVDRIVER=%s", dev->driver->name);
++			add_uevent_var(env, "PHYSDEVDRIVER=%s",
++				       dev->driver->name);
+ 	}
+ #endif
+ 
+@@ -193,15 +201,16 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
+ 	if (dev->bus && dev->bus->uevent) {
+ 		retval = dev->bus->uevent(dev, env);
+ 		if (retval)
+-			pr_debug ("%s: bus uevent() returned %d\n",
+-				  __FUNCTION__, retval);
++			pr_debug("device: '%s': %s: bus uevent() returned %d\n",
++				 dev->bus_id, __FUNCTION__, retval);
+ 	}
+ 
+ 	/* have the class specific function add its stuff */
+ 	if (dev->class && dev->class->dev_uevent) {
+ 		retval = dev->class->dev_uevent(dev, env);
+ 		if (retval)
+-			pr_debug("%s: class uevent() returned %d\n",
++			pr_debug("device: '%s': %s: class uevent() "
++				 "returned %d\n", dev->bus_id,
+ 				 __FUNCTION__, retval);
+ 	}
+ 
+@@ -209,7 +218,8 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
+ 	if (dev->type && dev->type->uevent) {
+ 		retval = dev->type->uevent(dev, env);
+ 		if (retval)
+-			pr_debug("%s: dev_type uevent() returned %d\n",
++			pr_debug("device: '%s': %s: dev_type uevent() "
++				 "returned %d\n", dev->bus_id,
+ 				 __FUNCTION__, retval);
+ 	}
+ 
+@@ -325,7 +335,8 @@ static int device_add_groups(struct device *dev,
+ 			error = sysfs_create_group(&dev->kobj, groups[i]);
+ 			if (error) {
+ 				while (--i >= 0)
+-					sysfs_remove_group(&dev->kobj, groups[i]);
++					sysfs_remove_group(&dev->kobj,
++							   groups[i]);
+ 				break;
+ 			}
+ 		}
+@@ -401,20 +412,15 @@ static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
+ static struct device_attribute devt_attr =
+ 	__ATTR(dev, S_IRUGO, show_dev, NULL);
+ 
+-/*
+- *	devices_subsys - structure to be registered with kobject core.
+- */
+-
+-decl_subsys(devices, &device_ktype, &device_uevent_ops);
+-
++/* kset to create /sys/devices/  */
++struct kset *devices_kset;
+ 
+ /**
+- *	device_create_file - create sysfs attribute file for device.
+- *	@dev:	device.
+- *	@attr:	device attribute descriptor.
++ * device_create_file - create sysfs attribute file for device.
++ * @dev: device.
++ * @attr: device attribute descriptor.
+  */
+-
+-int device_create_file(struct device * dev, struct device_attribute * attr)
++int device_create_file(struct device *dev, struct device_attribute *attr)
+ {
+ 	int error = 0;
+ 	if (get_device(dev)) {
+@@ -425,12 +431,11 @@ int device_create_file(struct device * dev, struct device_attribute * attr)
+ }
+ 
+ /**
+- *	device_remove_file - remove sysfs attribute file.
+- *	@dev:	device.
+- *	@attr:	device attribute descriptor.
++ * device_remove_file - remove sysfs attribute file.
++ * @dev: device.
++ * @attr: device attribute descriptor.
+  */
+-
+-void device_remove_file(struct device * dev, struct device_attribute * attr)
++void device_remove_file(struct device *dev, struct device_attribute *attr)
+ {
+ 	if (get_device(dev)) {
+ 		sysfs_remove_file(&dev->kobj, &attr->attr);
+@@ -511,22 +516,20 @@ static void klist_children_put(struct klist_node *n)
+ 	put_device(dev);
+ }
+ 
+-
+ /**
+- *	device_initialize - init device structure.
+- *	@dev:	device.
++ * device_initialize - init device structure.
++ * @dev: device.
+  *
+- *	This prepares the device for use by other layers,
+- *	including adding it to the device hierarchy.
+- *	It is the first half of device_register(), if called by
+- *	that, though it can also be called separately, so one
+- *	may use @dev's fields (e.g. the refcount).
++ * This prepares the device for use by other layers,
++ * including adding it to the device hierarchy.
++ * It is the first half of device_register(), if called by
++ * that, though it can also be called separately, so one
++ * may use @dev's fields (e.g. the refcount).
+  */
+-
+ void device_initialize(struct device *dev)
+ {
+-	kobj_set_kset_s(dev, devices_subsys);
+-	kobject_init(&dev->kobj);
++	dev->kobj.kset = devices_kset;
++	kobject_init(&dev->kobj, &device_ktype);
+ 	klist_init(&dev->klist_children, klist_children_get,
+ 		   klist_children_put);
+ 	INIT_LIST_HEAD(&dev->dma_pools);
+@@ -539,36 +542,39 @@ void device_initialize(struct device *dev)
+ }
+ 
+ #ifdef CONFIG_SYSFS_DEPRECATED
+-static struct kobject * get_device_parent(struct device *dev,
+-					  struct device *parent)
++static struct kobject *get_device_parent(struct device *dev,
++					 struct device *parent)
+ {
+-	/*
+-	 * Set the parent to the class, not the parent device
+-	 * for topmost devices in class hierarchy.
+-	 * This keeps sysfs from having a symlink to make old
+-	 * udevs happy
+-	 */
++	/* class devices without a parent live in /sys/class/<classname>/ */
+ 	if (dev->class && (!parent || parent->class != dev->class))
+ 		return &dev->class->subsys.kobj;
++	/* all other devices keep their parent */
+ 	else if (parent)
+ 		return &parent->kobj;
+ 
+ 	return NULL;
+ }
 +
-+static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
-+{
-+	int queued;
-+
-+	if (!ctx->queue.qlen) {
-+		smp_mb__before_clear_bit();
-+		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
-+
-+		if (!ctx->queue.qlen ||
-+		    test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
-+			goto out;
-+	}
-+
-+	queued = schedule_work(&ctx->postponed);
-+	BUG_ON(!queued);
++static inline void cleanup_device_parent(struct device *dev) {}
++static inline void cleanup_glue_dir(struct device *dev,
++				    struct kobject *glue_dir) {}
+ #else
+ static struct kobject *virtual_device_parent(struct device *dev)
+ {
+ 	static struct kobject *virtual_dir = NULL;
+ 
+ 	if (!virtual_dir)
+-		virtual_dir = kobject_add_dir(&devices_subsys.kobj, "virtual");
++		virtual_dir = kobject_create_and_add("virtual",
++						     &devices_kset->kobj);
+ 
+ 	return virtual_dir;
+ }
+ 
+-static struct kobject * get_device_parent(struct device *dev,
+-					  struct device *parent)
++static struct kobject *get_device_parent(struct device *dev,
++					 struct device *parent)
+ {
++	int retval;
 +
-+out:
-+	return ctx->err;
-+}
+ 	if (dev->class) {
+ 		struct kobject *kobj = NULL;
+ 		struct kobject *parent_kobj;
+@@ -576,8 +582,8 @@ static struct kobject * get_device_parent(struct device *dev,
+ 
+ 		/*
+ 		 * If we have no parent, we live in "virtual".
+-		 * Class-devices with a bus-device as parent, live
+-		 * in a class-directory to prevent namespace collisions.
++		 * Class-devices with a non class-device as parent, live
++		 * in a "glue" directory to prevent namespace collisions.
+ 		 */
+ 		if (parent == NULL)
+ 			parent_kobj = virtual_device_parent(dev);
+@@ -598,25 +604,45 @@ static struct kobject * get_device_parent(struct device *dev,
+ 			return kobj;
+ 
+ 		/* or create a new class-directory at the parent device */
+-		return kobject_kset_add_dir(&dev->class->class_dirs,
+-					    parent_kobj, dev->class->name);
++		k = kobject_create();
++		if (!k)
++			return NULL;
++		k->kset = &dev->class->class_dirs;
++		retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
++		if (retval < 0) {
++			kobject_put(k);
++			return NULL;
++		}
++		/* do not emit an uevent for this simple "glue" directory */
++		return k;
+ 	}
+ 
+ 	if (parent)
+ 		return &parent->kobj;
+ 	return NULL;
+ }
 +
-+static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
++static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
 +{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+	int err;
-+
-+	spin_lock_bh(&ctx->lock);
-+	err = skcipher_enqueue_givcrypt(&ctx->queue, req);
-+	spin_unlock_bh(&ctx->lock);
-+
-+	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
-+		return err;
++	/* see if we live in a "glue" directory */
++	if (!dev->class || glue_dir->kset != &dev->class->class_dirs)
++		return;
 +
-+	ctx->err = err;
-+	return async_chainiv_schedule_work(ctx);
++	kobject_put(glue_dir);
 +}
 +
-+static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
++static void cleanup_device_parent(struct device *dev)
 +{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-+	unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
-+
-+	memcpy(req->giv, ctx->iv, ivsize);
-+	memcpy(subreq->info, ctx->iv, ivsize);
-+
-+	ctx->err = crypto_ablkcipher_encrypt(subreq);
-+	if (ctx->err)
-+		goto out;
-+
-+	memcpy(ctx->iv, subreq->info, ivsize);
-+
-+out:
-+	return async_chainiv_schedule_work(ctx);
++	cleanup_glue_dir(dev, dev->kobj.parent);
 +}
+ #endif
+ 
+-static int setup_parent(struct device *dev, struct device *parent)
++static void setup_parent(struct device *dev, struct device *parent)
+ {
+ 	struct kobject *kobj;
+ 	kobj = get_device_parent(dev, parent);
+-	if (IS_ERR(kobj))
+-		return PTR_ERR(kobj);
+ 	if (kobj)
+ 		dev->kobj.parent = kobj;
+-	return 0;
+ }
+ 
+ static int device_add_class_symlinks(struct device *dev)
+@@ -625,65 +651,76 @@ static int device_add_class_symlinks(struct device *dev)
+ 
+ 	if (!dev->class)
+ 		return 0;
 +
-+static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
-+{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-+
-+	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
-+	ablkcipher_request_set_callback(subreq, req->creq.base.flags,
-+					req->creq.base.complete,
-+					req->creq.base.data);
-+	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
-+				     req->creq.nbytes, req->creq.info);
-+
-+	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
-+		goto postpone;
+ 	error = sysfs_create_link(&dev->kobj, &dev->class->subsys.kobj,
+ 				  "subsystem");
+ 	if (error)
+ 		goto out;
+-	/*
+-	 * If this is not a "fake" compatible device, then create the
+-	 * symlink from the class to the device.
+-	 */
+-	if (dev->kobj.parent != &dev->class->subsys.kobj) {
 +
-+	if (ctx->queue.qlen) {
-+		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
-+		goto postpone;
-+	}
++#ifdef CONFIG_SYSFS_DEPRECATED
++	/* stacked class devices need a symlink in the class directory */
++	if (dev->kobj.parent != &dev->class->subsys.kobj &&
++	    device_is_not_partition(dev)) {
+ 		error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
+ 					  dev->bus_id);
+ 		if (error)
+ 			goto out_subsys;
+ 	}
+-	if (dev->parent) {
+-#ifdef CONFIG_SYSFS_DEPRECATED
+-		{
+-			struct device *parent = dev->parent;
+-			char *class_name;
+-
+-			/*
+-			 * In old sysfs stacked class devices had 'device'
+-			 * link pointing to real device instead of parent
+-			 */
+-			while (parent->class && !parent->bus && parent->parent)
+-				parent = parent->parent;
+-
+-			error = sysfs_create_link(&dev->kobj,
+-						  &parent->kobj,
+-						  "device");
+-			if (error)
+-				goto out_busid;
+ 
+-			class_name = make_class_name(dev->class->name,
+-							&dev->kobj);
+-			if (class_name)
+-				error = sysfs_create_link(&dev->parent->kobj,
+-							&dev->kobj, class_name);
+-			kfree(class_name);
+-			if (error)
+-				goto out_device;
+-		}
+-#else
+-		error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
++	if (dev->parent && device_is_not_partition(dev)) {
++		struct device *parent = dev->parent;
++		char *class_name;
 +
-+	return async_chainiv_givencrypt_tail(req);
++		/*
++		 * stacked class devices have the 'device' link
++		 * pointing to the bus device instead of the parent
++		 */
++		while (parent->class && !parent->bus && parent->parent)
++			parent = parent->parent;
 +
-+postpone:
-+	return async_chainiv_postpone_request(req);
-+}
++		error = sysfs_create_link(&dev->kobj,
++					  &parent->kobj,
+ 					  "device");
+ 		if (error)
+ 			goto out_busid;
+-#endif
 +
-+static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
-+{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
++		class_name = make_class_name(dev->class->name,
++						&dev->kobj);
++		if (class_name)
++			error = sysfs_create_link(&dev->parent->kobj,
++						&dev->kobj, class_name);
++		kfree(class_name);
++		if (error)
++			goto out_device;
+ 	}
+ 	return 0;
+ 
+-#ifdef CONFIG_SYSFS_DEPRECATED
+ out_device:
+-	if (dev->parent)
++	if (dev->parent && device_is_not_partition(dev))
+ 		sysfs_remove_link(&dev->kobj, "device");
+-#endif
+ out_busid:
+-	if (dev->kobj.parent != &dev->class->subsys.kobj)
++	if (dev->kobj.parent != &dev->class->subsys.kobj &&
++	    device_is_not_partition(dev))
+ 		sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
++#else
++	/* link in the class directory pointing to the device */
++	error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
++				  dev->bus_id);
++	if (error)
++		goto out_subsys;
 +
-+	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
-+		goto out;
++	if (dev->parent && device_is_not_partition(dev)) {
++		error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
++					  "device");
++		if (error)
++			goto out_busid;
++	}
++	return 0;
 +
-+	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
-+	    async_chainiv_givencrypt_first)
-+		goto unlock;
++out_busid:
++	sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
++#endif
 +
-+	crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
-+	get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
+ out_subsys:
+ 	sysfs_remove_link(&dev->kobj, "subsystem");
+ out:
+@@ -694,8 +731,9 @@ static void device_remove_class_symlinks(struct device *dev)
+ {
+ 	if (!dev->class)
+ 		return;
+-	if (dev->parent) {
 +
-+unlock:
-+	clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
+ #ifdef CONFIG_SYSFS_DEPRECATED
++	if (dev->parent && device_is_not_partition(dev)) {
+ 		char *class_name;
+ 
+ 		class_name = make_class_name(dev->class->name, &dev->kobj);
+@@ -703,45 +741,59 @@ static void device_remove_class_symlinks(struct device *dev)
+ 			sysfs_remove_link(&dev->parent->kobj, class_name);
+ 			kfree(class_name);
+ 		}
+-#endif
+ 		sysfs_remove_link(&dev->kobj, "device");
+ 	}
+-	if (dev->kobj.parent != &dev->class->subsys.kobj)
 +
-+out:
-+	return async_chainiv_givencrypt(req);
-+}
++	if (dev->kobj.parent != &dev->class->subsys.kobj &&
++	    device_is_not_partition(dev))
+ 		sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
++#else
++	if (dev->parent && device_is_not_partition(dev))
++		sysfs_remove_link(&dev->kobj, "device");
 +
-+static void async_chainiv_do_postponed(struct work_struct *work)
-+{
-+	struct async_chainiv_ctx *ctx = container_of(work,
-+						     struct async_chainiv_ctx,
-+						     postponed);
-+	struct skcipher_givcrypt_request *req;
-+	struct ablkcipher_request *subreq;
++	sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
++#endif
 +
-+	/* Only handle one request at a time to avoid hogging keventd. */
-+	spin_lock_bh(&ctx->lock);
-+	req = skcipher_dequeue_givcrypt(&ctx->queue);
-+	spin_unlock_bh(&ctx->lock);
+ 	sysfs_remove_link(&dev->kobj, "subsystem");
+ }
+ 
+ /**
+- *	device_add - add device to device hierarchy.
+- *	@dev:	device.
++ * device_add - add device to device hierarchy.
++ * @dev: device.
+  *
+- *	This is part 2 of device_register(), though may be called
+- *	separately _iff_ device_initialize() has been called separately.
++ * This is part 2 of device_register(), though may be called
++ * separately _iff_ device_initialize() has been called separately.
+  *
+- *	This adds it to the kobject hierarchy via kobject_add(), adds it
+- *	to the global and sibling lists for the device, then
+- *	adds it to the other relevant subsystems of the driver model.
++ * This adds it to the kobject hierarchy via kobject_add(), adds it
++ * to the global and sibling lists for the device, then
++ * adds it to the other relevant subsystems of the driver model.
+  */
+ int device_add(struct device *dev)
+ {
+ 	struct device *parent = NULL;
+ 	struct class_interface *class_intf;
+-	int error = -EINVAL;
++	int error;
 +
-+	if (!req) {
-+		async_chainiv_schedule_work(ctx);
-+		return;
++	error = pm_sleep_lock();
++	if (error) {
++		dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__);
++		dump_stack();
++		return error;
 +	}
-+
-+	subreq = skcipher_givcrypt_reqctx(req);
-+	subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
-+
-+	async_chainiv_givencrypt_tail(req);
+ 
+ 	dev = get_device(dev);
+-	if (!dev || !strlen(dev->bus_id))
++	if (!dev || !strlen(dev->bus_id)) {
++		error = -EINVAL;
+ 		goto Error;
++	}
+ 
+-	pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id);
++	pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__);
+ 
+ 	parent = get_device(dev->parent);
+-	error = setup_parent(dev, parent);
+-	if (error)
+-		goto Error;
++	setup_parent(dev, parent);
+ 
+ 	/* first, register with generic layer. */
+-	kobject_set_name(&dev->kobj, "%s", dev->bus_id);
+-	error = kobject_add(&dev->kobj);
++	error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev->bus_id);
+ 	if (error)
+ 		goto Error;
+ 
+@@ -751,7 +803,7 @@ int device_add(struct device *dev)
+ 
+ 	/* notify clients of device entry (new way) */
+ 	if (dev->bus)
+-		blocking_notifier_call_chain(&dev->bus->bus_notifier,
++		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ 					     BUS_NOTIFY_ADD_DEVICE, dev);
+ 
+ 	error = device_create_file(dev, &uevent_attr);
+@@ -795,13 +847,14 @@ int device_add(struct device *dev)
+ 	}
+  Done:
+ 	put_device(dev);
++	pm_sleep_unlock();
+ 	return error;
+  BusError:
+ 	device_pm_remove(dev);
+ 	dpm_sysfs_remove(dev);
+  PMError:
+ 	if (dev->bus)
+-		blocking_notifier_call_chain(&dev->bus->bus_notifier,
++		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ 					     BUS_NOTIFY_DEL_DEVICE, dev);
+ 	device_remove_attrs(dev);
+  AttrsError:
+@@ -809,124 +862,84 @@ int device_add(struct device *dev)
+  SymlinkError:
+ 	if (MAJOR(dev->devt))
+ 		device_remove_file(dev, &devt_attr);
+-
+-	if (dev->class) {
+-		sysfs_remove_link(&dev->kobj, "subsystem");
+-		/* If this is not a "fake" compatible device, remove the
+-		 * symlink from the class to the device. */
+-		if (dev->kobj.parent != &dev->class->subsys.kobj)
+-			sysfs_remove_link(&dev->class->subsys.kobj,
+-					  dev->bus_id);
+-		if (parent) {
+-#ifdef CONFIG_SYSFS_DEPRECATED
+-			char *class_name = make_class_name(dev->class->name,
+-							   &dev->kobj);
+-			if (class_name)
+-				sysfs_remove_link(&dev->parent->kobj,
+-						  class_name);
+-			kfree(class_name);
+-#endif
+-			sysfs_remove_link(&dev->kobj, "device");
+-		}
+-	}
+  ueventattrError:
+ 	device_remove_file(dev, &uevent_attr);
+  attrError:
+ 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+ 	kobject_del(&dev->kobj);
+  Error:
++	cleanup_device_parent(dev);
+ 	if (parent)
+ 		put_device(parent);
+ 	goto Done;
+ }
+ 
+-
+ /**
+- *	device_register - register a device with the system.
+- *	@dev:	pointer to the device structure
++ * device_register - register a device with the system.
++ * @dev: pointer to the device structure
+  *
+- *	This happens in two clean steps - initialize the device
+- *	and add it to the system. The two steps can be called
+- *	separately, but this is the easiest and most common.
+- *	I.e. you should only call the two helpers separately if
+- *	have a clearly defined need to use and refcount the device
+- *	before it is added to the hierarchy.
++ * This happens in two clean steps - initialize the device
++ * and add it to the system. The two steps can be called
++ * separately, but this is the easiest and most common.
++ * I.e. you should only call the two helpers separately if
++ * have a clearly defined need to use and refcount the device
++ * before it is added to the hierarchy.
+  */
+-
+ int device_register(struct device *dev)
+ {
+ 	device_initialize(dev);
+ 	return device_add(dev);
+ }
+ 
+-
+ /**
+- *	get_device - increment reference count for device.
+- *	@dev:	device.
++ * get_device - increment reference count for device.
++ * @dev: device.
+  *
+- *	This simply forwards the call to kobject_get(), though
+- *	we do take care to provide for the case that we get a NULL
+- *	pointer passed in.
++ * This simply forwards the call to kobject_get(), though
++ * we do take care to provide for the case that we get a NULL
++ * pointer passed in.
+  */
+-
+-struct device * get_device(struct device * dev)
++struct device *get_device(struct device *dev)
+ {
+ 	return dev ? to_dev(kobject_get(&dev->kobj)) : NULL;
+ }
+ 
+-
+ /**
+- *	put_device - decrement reference count.
+- *	@dev:	device in question.
++ * put_device - decrement reference count.
++ * @dev: device in question.
+  */
+-void put_device(struct device * dev)
++void put_device(struct device *dev)
+ {
++	/* might_sleep(); */
+ 	if (dev)
+ 		kobject_put(&dev->kobj);
+ }
+ 
+-
+ /**
+- *	device_del - delete device from system.
+- *	@dev:	device.
++ * device_del - delete device from system.
++ * @dev: device.
+  *
+- *	This is the first part of the device unregistration
+- *	sequence. This removes the device from the lists we control
+- *	from here, has it removed from the other driver model
+- *	subsystems it was added to in device_add(), and removes it
+- *	from the kobject hierarchy.
++ * This is the first part of the device unregistration
++ * sequence. This removes the device from the lists we control
++ * from here, has it removed from the other driver model
++ * subsystems it was added to in device_add(), and removes it
++ * from the kobject hierarchy.
+  *
+- *	NOTE: this should be called manually _iff_ device_add() was
+- *	also called manually.
++ * NOTE: this should be called manually _iff_ device_add() was
++ * also called manually.
+  */
+-
+-void device_del(struct device * dev)
++void device_del(struct device *dev)
+ {
+-	struct device * parent = dev->parent;
++	struct device *parent = dev->parent;
+ 	struct class_interface *class_intf;
+ 
++	device_pm_remove(dev);
+ 	if (parent)
+ 		klist_del(&dev->knode_parent);
+ 	if (MAJOR(dev->devt))
+ 		device_remove_file(dev, &devt_attr);
+ 	if (dev->class) {
+-		sysfs_remove_link(&dev->kobj, "subsystem");
+-		/* If this is not a "fake" compatible device, remove the
+-		 * symlink from the class to the device. */
+-		if (dev->kobj.parent != &dev->class->subsys.kobj)
+-			sysfs_remove_link(&dev->class->subsys.kobj,
+-					  dev->bus_id);
+-		if (parent) {
+-#ifdef CONFIG_SYSFS_DEPRECATED
+-			char *class_name = make_class_name(dev->class->name,
+-							   &dev->kobj);
+-			if (class_name)
+-				sysfs_remove_link(&dev->parent->kobj,
+-						  class_name);
+-			kfree(class_name);
+-#endif
+-			sysfs_remove_link(&dev->kobj, "device");
+-		}
++		device_remove_class_symlinks(dev);
+ 
+ 		down(&dev->class->sem);
+ 		/* notify any interfaces that the device is now gone */
+@@ -936,31 +949,6 @@ void device_del(struct device * dev)
+ 		/* remove the device from the class list */
+ 		list_del_init(&dev->node);
+ 		up(&dev->class->sem);
+-
+-		/* If we live in a parent class-directory, unreference it */
+-		if (dev->kobj.parent->kset == &dev->class->class_dirs) {
+-			struct device *d;
+-			int other = 0;
+-
+-			/*
+-			 * if we are the last child of our class, delete
+-			 * our class-directory at this parent
+-			 */
+-			down(&dev->class->sem);
+-			list_for_each_entry(d, &dev->class->devices, node) {
+-				if (d == dev)
+-					continue;
+-				if (d->kobj.parent == dev->kobj.parent) {
+-					other = 1;
+-					break;
+-				}
+-			}
+-			if (!other)
+-				kobject_del(dev->kobj.parent);
+-
+-			kobject_put(dev->kobj.parent);
+-			up(&dev->class->sem);
+-		}
+ 	}
+ 	device_remove_file(dev, &uevent_attr);
+ 	device_remove_attrs(dev);
+@@ -979,57 +967,55 @@ void device_del(struct device * dev)
+ 	if (platform_notify_remove)
+ 		platform_notify_remove(dev);
+ 	if (dev->bus)
+-		blocking_notifier_call_chain(&dev->bus->bus_notifier,
++		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ 					     BUS_NOTIFY_DEL_DEVICE, dev);
+-	device_pm_remove(dev);
+ 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
++	cleanup_device_parent(dev);
+ 	kobject_del(&dev->kobj);
+-	if (parent)
+-		put_device(parent);
++	put_device(parent);
+ }
+ 
+ /**
+- *	device_unregister - unregister device from system.
+- *	@dev:	device going away.
++ * device_unregister - unregister device from system.
++ * @dev: device going away.
+  *
+- *	We do this in two parts, like we do device_register(). First,
+- *	we remove it from all the subsystems with device_del(), then
+- *	we decrement the reference count via put_device(). If that
+- *	is the final reference count, the device will be cleaned up
+- *	via device_release() above. Otherwise, the structure will
+- *	stick around until the final reference to the device is dropped.
++ * We do this in two parts, like we do device_register(). First,
++ * we remove it from all the subsystems with device_del(), then
++ * we decrement the reference count via put_device(). If that
++ * is the final reference count, the device will be cleaned up
++ * via device_release() above. Otherwise, the structure will
++ * stick around until the final reference to the device is dropped.
+  */
+-void device_unregister(struct device * dev)
++void device_unregister(struct device *dev)
+ {
+-	pr_debug("DEV: Unregistering device. ID = '%s'\n", dev->bus_id);
++	pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__);
+ 	device_del(dev);
+ 	put_device(dev);
+ }
+ 
+-
+-static struct device * next_device(struct klist_iter * i)
++static struct device *next_device(struct klist_iter *i)
+ {
+-	struct klist_node * n = klist_next(i);
++	struct klist_node *n = klist_next(i);
+ 	return n ? container_of(n, struct device, knode_parent) : NULL;
+ }
+ 
+ /**
+- *	device_for_each_child - device child iterator.
+- *	@parent: parent struct device.
+- *	@data:	data for the callback.
+- *	@fn:	function to be called for each device.
++ * device_for_each_child - device child iterator.
++ * @parent: parent struct device.
++ * @data: data for the callback.
++ * @fn: function to be called for each device.
+  *
+- *	Iterate over @parent's child devices, and call @fn for each,
+- *	passing it @data.
++ * Iterate over @parent's child devices, and call @fn for each,
++ * passing it @data.
+  *
+- *	We check the return of @fn each time. If it returns anything
+- *	other than 0, we break out and return that value.
++ * We check the return of @fn each time. If it returns anything
++ * other than 0, we break out and return that value.
+  */
+-int device_for_each_child(struct device * parent, void * data,
+-		     int (*fn)(struct device *, void *))
++int device_for_each_child(struct device *parent, void *data,
++			  int (*fn)(struct device *dev, void *data))
+ {
+ 	struct klist_iter i;
+-	struct device * child;
++	struct device *child;
+ 	int error = 0;
+ 
+ 	klist_iter_init(&parent->klist_children, &i);
+@@ -1054,8 +1040,8 @@ int device_for_each_child(struct device * parent, void * data,
+  * current device can be obtained, this function will return to the caller
+  * and not iterate over any more devices.
+  */
+-struct device * device_find_child(struct device *parent, void *data,
+-				  int (*match)(struct device *, void *))
++struct device *device_find_child(struct device *parent, void *data,
++				 int (*match)(struct device *dev, void *data))
+ {
+ 	struct klist_iter i;
+ 	struct device *child;
+@@ -1073,7 +1059,10 @@ struct device * device_find_child(struct device *parent, void *data,
+ 
+ int __init devices_init(void)
+ {
+-	return subsystem_register(&devices_subsys);
++	devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
++	if (!devices_kset)
++		return -ENOMEM;
++	return 0;
+ }
+ 
+ EXPORT_SYMBOL_GPL(device_for_each_child);
+@@ -1094,7 +1083,7 @@ EXPORT_SYMBOL_GPL(device_remove_file);
+ 
+ static void device_create_release(struct device *dev)
+ {
+-	pr_debug("%s called for %s\n", __FUNCTION__, dev->bus_id);
++	pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__);
+ 	kfree(dev);
+ }
+ 
+@@ -1156,14 +1145,11 @@ error:
+ EXPORT_SYMBOL_GPL(device_create);
+ 
+ /**
+- * device_destroy - removes a device that was created with device_create()
++ * find_device - finds a device that was created with device_create()
+  * @class: pointer to the struct class that this device was registered with
+  * @devt: the dev_t of the device that was previously registered
+- *
+- * This call unregisters and cleans up a device that was created with a
+- * call to device_create().
+  */
+-void device_destroy(struct class *class, dev_t devt)
++static struct device *find_device(struct class *class, dev_t devt)
+ {
+ 	struct device *dev = NULL;
+ 	struct device *dev_tmp;
+@@ -1176,12 +1162,54 @@ void device_destroy(struct class *class, dev_t devt)
+ 		}
+ 	}
+ 	up(&class->sem);
++	return dev;
 +}
-+
-+static int async_chainiv_init(struct crypto_tfm *tfm)
+ 
++/**
++ * device_destroy - removes a device that was created with device_create()
++ * @class: pointer to the struct class that this device was registered with
++ * @devt: the dev_t of the device that was previously registered
++ *
++ * This call unregisters and cleans up a device that was created with a
++ * call to device_create().
++ */
++void device_destroy(struct class *class, dev_t devt)
 +{
-+	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	spin_lock_init(&ctx->lock);
-+
-+	crypto_init_queue(&ctx->queue, 100);
-+	INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
-+
-+	return chainiv_init_common(tfm);
-+}
++	struct device *dev;
 +
-+static void async_chainiv_exit(struct crypto_tfm *tfm)
++	dev = find_device(class, devt);
+ 	if (dev)
+ 		device_unregister(dev);
+ }
+ EXPORT_SYMBOL_GPL(device_destroy);
+ 
++#ifdef CONFIG_PM_SLEEP
++/**
++ * destroy_suspended_device - asks the PM core to remove a suspended device
++ * @class: pointer to the struct class that this device was registered with
++ * @devt: the dev_t of the device that was previously registered
++ *
++ * This call notifies the PM core of the necessity to unregister a suspended
++ * device created with a call to device_create() (devices cannot be
++ * unregistered directly while suspended, since the PM core holds their
++ * semaphores at that time).
++ *
++ * It can only be called within the scope of a system sleep transition.  In
++ * practice this means it has to be directly or indirectly invoked either by
++ * a suspend or resume method, or by the PM core (e.g. via
++ * disable_nonboot_cpus() or enable_nonboot_cpus()).
++ */
++void destroy_suspended_device(struct class *class, dev_t devt)
 +{
-+	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
++	struct device *dev;
 +
-+	skcipher_geniv_exit(tfm);
++	dev = find_device(class, devt);
++	if (dev)
++		device_pm_schedule_removal(dev);
 +}
++EXPORT_SYMBOL_GPL(destroy_suspended_device);
++#endif /* CONFIG_PM_SLEEP */
 +
-+static struct crypto_template chainiv_tmpl;
-+
-+static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
-+{
-+	struct crypto_attr_type *algt;
-+	struct crypto_instance *inst;
-+	int err;
-+
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
-+		return ERR_PTR(err);
-+
-+	inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
-+	if (IS_ERR(inst))
-+		goto out;
-+
-+	inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first;
-+
-+	inst->alg.cra_init = chainiv_init;
-+	inst->alg.cra_exit = skcipher_geniv_exit;
-+
-+	inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
-+
-+	if (!crypto_requires_sync(algt->type, algt->mask)) {
-+		inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
-+
-+		inst->alg.cra_ablkcipher.givencrypt =
-+			async_chainiv_givencrypt_first;
-+
-+		inst->alg.cra_init = async_chainiv_init;
-+		inst->alg.cra_exit = async_chainiv_exit;
-+
-+		inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
-+	}
-+
-+	inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
-+
-+out:
-+	return inst;
-+}
+ /**
+  * device_rename - renames a device
+  * @dev: the pointer to the struct device to be renamed
+@@ -1198,7 +1226,8 @@ int device_rename(struct device *dev, char *new_name)
+ 	if (!dev)
+ 		return -EINVAL;
+ 
+-	pr_debug("DEVICE: renaming '%s' to '%s'\n", dev->bus_id, new_name);
++	pr_debug("device: '%s': %s: renaming to '%s'\n", dev->bus_id,
++		 __FUNCTION__, new_name);
+ 
+ #ifdef CONFIG_SYSFS_DEPRECATED
+ 	if ((dev->class) && (dev->parent))
+@@ -1279,8 +1308,7 @@ static int device_move_class_links(struct device *dev,
+ 					  class_name);
+ 		if (error)
+ 			sysfs_remove_link(&dev->kobj, "device");
+-	}
+-	else
++	} else
+ 		error = 0;
+ out:
+ 	kfree(class_name);
+@@ -1311,16 +1339,13 @@ int device_move(struct device *dev, struct device *new_parent)
+ 		return -EINVAL;
+ 
+ 	new_parent = get_device(new_parent);
+-	new_parent_kobj = get_device_parent (dev, new_parent);
+-	if (IS_ERR(new_parent_kobj)) {
+-		error = PTR_ERR(new_parent_kobj);
+-		put_device(new_parent);
+-		goto out;
+-	}
+-	pr_debug("DEVICE: moving '%s' to '%s'\n", dev->bus_id,
+-		 new_parent ? new_parent->bus_id : "<NULL>");
++	new_parent_kobj = get_device_parent(dev, new_parent);
 +
-+static struct crypto_template chainiv_tmpl = {
-+	.name = "chainiv",
-+	.alloc = chainiv_alloc,
-+	.free = skcipher_geniv_free,
-+	.module = THIS_MODULE,
-+};
++	pr_debug("device: '%s': %s: moving to '%s'\n", dev->bus_id,
++		 __FUNCTION__, new_parent ? new_parent->bus_id : "<NULL>");
+ 	error = kobject_move(&dev->kobj, new_parent_kobj);
+ 	if (error) {
++		cleanup_glue_dir(dev, new_parent_kobj);
+ 		put_device(new_parent);
+ 		goto out;
+ 	}
+@@ -1343,6 +1368,7 @@ int device_move(struct device *dev, struct device *new_parent)
+ 				klist_add_tail(&dev->knode_parent,
+ 					       &old_parent->klist_children);
+ 		}
++		cleanup_glue_dir(dev, new_parent_kobj);
+ 		put_device(new_parent);
+ 		goto out;
+ 	}
+@@ -1352,5 +1378,23 @@ out:
+ 	put_device(dev);
+ 	return error;
+ }
+-
+ EXPORT_SYMBOL_GPL(device_move);
 +
-+static int __init chainiv_module_init(void)
++/**
++ * device_shutdown - call ->shutdown() on each device to shutdown.
++ */
++void device_shutdown(void)
 +{
-+	return crypto_register_template(&chainiv_tmpl);
-+}
++	struct device *dev, *devn;
 +
-+static void __exit chainiv_module_exit(void)
-+{
-+	crypto_unregister_template(&chainiv_tmpl);
++	list_for_each_entry_safe_reverse(dev, devn, &devices_kset->list,
++				kobj.entry) {
++		if (dev->bus && dev->bus->shutdown) {
++			dev_dbg(dev, "shutdown\n");
++			dev->bus->shutdown(dev);
++		} else if (dev->driver && dev->driver->shutdown) {
++			dev_dbg(dev, "shutdown\n");
++			dev->driver->shutdown(dev);
++		}
++	}
 +}
-+
-+module_init(chainiv_module_init);
-+module_exit(chainiv_module_exit);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Chain IV Generator");
-diff --git a/crypto/cryptd.c b/crypto/cryptd.c
-index 8bf2da8..074298f 100644
---- a/crypto/cryptd.c
-+++ b/crypto/cryptd.c
-@@ -228,7 +228,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
- 	struct crypto_alg *alg;
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index 4054507..c5885f5 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -14,7 +14,7 @@
+ #include "base.h"
+ 
+ struct sysdev_class cpu_sysdev_class = {
+-	set_kset_name("cpu"),
++	.name = "cpu",
+ };
+ EXPORT_SYMBOL(cpu_sysdev_class);
+ 
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 7ac474d..a5cde94 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1,18 +1,20 @@
+ /*
+- *	drivers/base/dd.c - The core device/driver interactions.
++ * drivers/base/dd.c - The core device/driver interactions.
+  *
+- * 	This file contains the (sometimes tricky) code that controls the
+- *	interactions between devices and drivers, which primarily includes
+- *	driver binding and unbinding.
++ * This file contains the (sometimes tricky) code that controls the
++ * interactions between devices and drivers, which primarily includes
++ * driver binding and unbinding.
+  *
+- *	All of this code used to exist in drivers/base/bus.c, but was
+- *	relocated to here in the name of compartmentalization (since it wasn't
+- *	strictly code just for the 'struct bus_type'.
++ * All of this code used to exist in drivers/base/bus.c, but was
++ * relocated to here in the name of compartmentalization (since it wasn't
++ * strictly code just for the 'struct bus_type'.
+  *
+- *	Copyright (c) 2002-5 Patrick Mochel
+- *	Copyright (c) 2002-3 Open Source Development Labs
++ * Copyright (c) 2002-5 Patrick Mochel
++ * Copyright (c) 2002-3 Open Source Development Labs
++ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh at suse.de>
++ * Copyright (c) 2007 Novell Inc.
+  *
+- *	This file is released under the GPLv2
++ * This file is released under the GPLv2
+  */
+ 
+ #include <linux/device.h>
+@@ -23,8 +25,6 @@
+ #include "base.h"
+ #include "power/power.h"
+ 
+-#define to_drv(node) container_of(node, struct device_driver, kobj.entry)
+-
  
- 	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
--				  CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
-+				  CRYPTO_ALG_TYPE_MASK);
- 	if (IS_ERR(alg))
- 		return ERR_PTR(PTR_ERR(alg));
+ static void driver_bound(struct device *dev)
+ {
+@@ -34,27 +34,27 @@ static void driver_bound(struct device *dev)
+ 		return;
+ 	}
  
-@@ -236,13 +236,15 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
- 	if (IS_ERR(inst))
- 		goto out_put_alg;
+-	pr_debug("bound device '%s' to driver '%s'\n",
+-		 dev->bus_id, dev->driver->name);
++	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->bus_id,
++		 __FUNCTION__, dev->driver->name);
  
--	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC;
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- 	inst->alg.cra_type = &crypto_ablkcipher_type;
+ 	if (dev->bus)
+-		blocking_notifier_call_chain(&dev->bus->bus_notifier,
++		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ 					     BUS_NOTIFY_BOUND_DRIVER, dev);
  
- 	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
- 	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
- 	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
+-	klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
++	klist_add_tail(&dev->knode_driver, &dev->driver->p->klist_devices);
+ }
  
-+	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
-+
- 	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
+ static int driver_sysfs_add(struct device *dev)
+ {
+ 	int ret;
  
- 	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
-diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
-index 29f7747..ff7b3de 100644
---- a/crypto/crypto_null.c
-+++ b/crypto/crypto_null.c
-@@ -16,15 +16,17 @@
-  * (at your option) any later version.
+-	ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj,
++	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
+ 			  kobject_name(&dev->kobj));
+ 	if (ret == 0) {
+-		ret = sysfs_create_link(&dev->kobj, &dev->driver->kobj,
++		ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
+ 					"driver");
+ 		if (ret)
+-			sysfs_remove_link(&dev->driver->kobj,
++			sysfs_remove_link(&dev->driver->p->kobj,
+ 					kobject_name(&dev->kobj));
+ 	}
+ 	return ret;
+@@ -65,24 +65,24 @@ static void driver_sysfs_remove(struct device *dev)
+ 	struct device_driver *drv = dev->driver;
+ 
+ 	if (drv) {
+-		sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
++		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
+ 		sysfs_remove_link(&dev->kobj, "driver");
+ 	}
+ }
+ 
+ /**
+- *	device_bind_driver - bind a driver to one device.
+- *	@dev:	device.
++ * device_bind_driver - bind a driver to one device.
++ * @dev: device.
+  *
+- *	Allow manual attachment of a driver to a device.
+- *	Caller must have already set @dev->driver.
++ * Allow manual attachment of a driver to a device.
++ * Caller must have already set @dev->driver.
+  *
+- *	Note that this does not modify the bus reference count
+- *	nor take the bus's rwsem. Please verify those are accounted
+- *	for before calling this. (It is ok to call with no other effort
+- *	from a driver's probe() method.)
++ * Note that this does not modify the bus reference count
++ * nor take the bus's rwsem. Please verify those are accounted
++ * for before calling this. (It is ok to call with no other effort
++ * from a driver's probe() method.)
   *
+- *	This function must be called with @dev->sem held.
++ * This function must be called with @dev->sem held.
   */
-+
-+#include <crypto/internal/skcipher.h>
- #include <linux/init.h>
- #include <linux/module.h>
- #include <linux/mm.h>
--#include <linux/crypto.h>
- #include <linux/string.h>
+ int device_bind_driver(struct device *dev)
+ {
+@@ -93,6 +93,7 @@ int device_bind_driver(struct device *dev)
+ 		driver_bound(dev);
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(device_bind_driver);
  
- #define NULL_KEY_SIZE		0
- #define NULL_BLOCK_SIZE		1
- #define NULL_DIGEST_SIZE	0
-+#define NULL_IV_SIZE		0
+ static atomic_t probe_count = ATOMIC_INIT(0);
+ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+@@ -102,8 +103,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
+ 	int ret = 0;
  
- static int null_compress(struct crypto_tfm *tfm, const u8 *src,
- 			 unsigned int slen, u8 *dst, unsigned int *dlen)
-@@ -55,6 +57,26 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
- 	memcpy(dst, src, NULL_BLOCK_SIZE);
- }
+ 	atomic_inc(&probe_count);
+-	pr_debug("%s: Probing driver %s with device %s\n",
+-		 drv->bus->name, drv->name, dev->bus_id);
++	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
++		 drv->bus->name, __FUNCTION__, drv->name, dev->bus_id);
+ 	WARN_ON(!list_empty(&dev->devres_head));
  
-+static int skcipher_null_crypt(struct blkcipher_desc *desc,
-+			       struct scatterlist *dst,
-+			       struct scatterlist *src, unsigned int nbytes)
-+{
-+	struct blkcipher_walk walk;
-+	int err;
-+
-+	blkcipher_walk_init(&walk, dst, src, nbytes);
-+	err = blkcipher_walk_virt(desc, &walk);
-+
-+	while (walk.nbytes) {
-+		if (walk.src.virt.addr != walk.dst.virt.addr)
-+			memcpy(walk.dst.virt.addr, walk.src.virt.addr,
-+			       walk.nbytes);
-+		err = blkcipher_walk_done(desc, &walk, 0);
-+	}
-+
-+	return err;
-+}
-+
- static struct crypto_alg compress_null = {
- 	.cra_name		=	"compress_null",
- 	.cra_flags		=	CRYPTO_ALG_TYPE_COMPRESS,
-@@ -76,6 +98,7 @@ static struct crypto_alg digest_null = {
- 	.cra_list		=       LIST_HEAD_INIT(digest_null.cra_list),	
- 	.cra_u			=	{ .digest = {
- 	.dia_digestsize		=	NULL_DIGEST_SIZE,
-+	.dia_setkey   		=	null_setkey,
- 	.dia_init   		=	null_init,
- 	.dia_update 		=	null_update,
- 	.dia_final  		=	null_final } }
-@@ -96,6 +119,25 @@ static struct crypto_alg cipher_null = {
- 	.cia_decrypt		=	null_crypt } }
- };
+ 	dev->driver = drv;
+@@ -125,8 +126,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
  
-+static struct crypto_alg skcipher_null = {
-+	.cra_name		=	"ecb(cipher_null)",
-+	.cra_driver_name	=	"ecb-cipher_null",
-+	.cra_priority		=	100,
-+	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
-+	.cra_blocksize		=	NULL_BLOCK_SIZE,
-+	.cra_type		=	&crypto_blkcipher_type,
-+	.cra_ctxsize		=	0,
-+	.cra_module		=	THIS_MODULE,
-+	.cra_list		=	LIST_HEAD_INIT(skcipher_null.cra_list),
-+	.cra_u			=	{ .blkcipher = {
-+	.min_keysize		=	NULL_KEY_SIZE,
-+	.max_keysize		=	NULL_KEY_SIZE,
-+	.ivsize			=	NULL_IV_SIZE,
-+	.setkey			= 	null_setkey,
-+	.encrypt		=	skcipher_null_crypt,
-+	.decrypt		=	skcipher_null_crypt } }
-+};
-+
- MODULE_ALIAS("compress_null");
- MODULE_ALIAS("digest_null");
- MODULE_ALIAS("cipher_null");
-@@ -108,27 +150,35 @@ static int __init init(void)
- 	if (ret < 0)
- 		goto out;
+ 	driver_bound(dev);
+ 	ret = 1;
+-	pr_debug("%s: Bound Device %s to Driver %s\n",
+-		 drv->bus->name, dev->bus_id, drv->name);
++	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
++		 drv->bus->name, __FUNCTION__, dev->bus_id, drv->name);
+ 	goto done;
  
-+	ret = crypto_register_alg(&skcipher_null);
-+	if (ret < 0)
-+		goto out_unregister_cipher;
-+
- 	ret = crypto_register_alg(&digest_null);
--	if (ret < 0) {
--		crypto_unregister_alg(&cipher_null);
--		goto out;
--	}
-+	if (ret < 0)
-+		goto out_unregister_skcipher;
+ probe_failed:
+@@ -183,7 +184,7 @@ int driver_probe_done(void)
+  * This function must be called with @dev->sem held.  When called for a
+  * USB interface, @dev->parent->sem must be held as well.
+  */
+-int driver_probe_device(struct device_driver * drv, struct device * dev)
++int driver_probe_device(struct device_driver *drv, struct device *dev)
+ {
+ 	int ret = 0;
  
- 	ret = crypto_register_alg(&compress_null);
--	if (ret < 0) {
--		crypto_unregister_alg(&digest_null);
--		crypto_unregister_alg(&cipher_null);
--		goto out;
--	}
-+	if (ret < 0)
-+		goto out_unregister_digest;
+@@ -192,8 +193,8 @@ int driver_probe_device(struct device_driver * drv, struct device * dev)
+ 	if (drv->bus->match && !drv->bus->match(dev, drv))
+ 		goto done;
  
- out:	
+-	pr_debug("%s: Matched Device %s with Driver %s\n",
+-		 drv->bus->name, dev->bus_id, drv->name);
++	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
++		 drv->bus->name, __FUNCTION__, dev->bus_id, drv->name);
+ 
+ 	ret = really_probe(dev, drv);
+ 
+@@ -201,27 +202,27 @@ done:
  	return ret;
-+
-+out_unregister_digest:
-+	crypto_unregister_alg(&digest_null);
-+out_unregister_skcipher:
-+	crypto_unregister_alg(&skcipher_null);
-+out_unregister_cipher:
-+	crypto_unregister_alg(&cipher_null);
-+	goto out;
  }
  
- static void __exit fini(void)
+-static int __device_attach(struct device_driver * drv, void * data)
++static int __device_attach(struct device_driver *drv, void *data)
  {
- 	crypto_unregister_alg(&compress_null);
- 	crypto_unregister_alg(&digest_null);
-+	crypto_unregister_alg(&skcipher_null);
- 	crypto_unregister_alg(&cipher_null);
+-	struct device * dev = data;
++	struct device *dev = data;
+ 	return driver_probe_device(drv, dev);
  }
  
-diff --git a/crypto/ctr.c b/crypto/ctr.c
-new file mode 100644
-index 0000000..2d7425f
---- /dev/null
-+++ b/crypto/ctr.c
-@@ -0,0 +1,422 @@
-+/*
-+ * CTR: Counter mode
-+ *
-+ * (C) Copyright IBM Corp. 2007 - Joy Latten <latten at us.ibm.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
-+ * any later version.
-+ *
-+ */
-+
-+#include <crypto/algapi.h>
-+#include <crypto/ctr.h>
-+#include <linux/err.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/random.h>
-+#include <linux/scatterlist.h>
-+#include <linux/slab.h>
-+
-+struct crypto_ctr_ctx {
-+	struct crypto_cipher *child;
-+};
-+
-+struct crypto_rfc3686_ctx {
-+	struct crypto_blkcipher *child;
-+	u8 nonce[CTR_RFC3686_NONCE_SIZE];
-+};
-+
-+static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
-+			     unsigned int keylen)
-+{
-+	struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent);
-+	struct crypto_cipher *child = ctx->child;
-+	int err;
-+
-+	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-+	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
-+				CRYPTO_TFM_REQ_MASK);
-+	err = crypto_cipher_setkey(child, key, keylen);
-+	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
-+			     CRYPTO_TFM_RES_MASK);
-+
-+	return err;
-+}
-+
-+static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
-+				   struct crypto_cipher *tfm)
-+{
-+	unsigned int bsize = crypto_cipher_blocksize(tfm);
-+	unsigned long alignmask = crypto_cipher_alignmask(tfm);
-+	u8 *ctrblk = walk->iv;
-+	u8 tmp[bsize + alignmask];
-+	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
-+	u8 *src = walk->src.virt.addr;
-+	u8 *dst = walk->dst.virt.addr;
-+	unsigned int nbytes = walk->nbytes;
-+
-+	crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
-+	crypto_xor(keystream, src, nbytes);
-+	memcpy(dst, keystream, nbytes);
-+
-+	crypto_inc(ctrblk, bsize);
-+}
-+
-+static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
-+				    struct crypto_cipher *tfm)
-+{
-+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-+		   crypto_cipher_alg(tfm)->cia_encrypt;
-+	unsigned int bsize = crypto_cipher_blocksize(tfm);
-+	u8 *ctrblk = walk->iv;
-+	u8 *src = walk->src.virt.addr;
-+	u8 *dst = walk->dst.virt.addr;
-+	unsigned int nbytes = walk->nbytes;
-+
-+	do {
-+		/* create keystream */
-+		fn(crypto_cipher_tfm(tfm), dst, ctrblk);
-+		crypto_xor(dst, src, bsize);
-+
-+		/* increment counter in counterblock */
-+		crypto_inc(ctrblk, bsize);
-+
-+		src += bsize;
-+		dst += bsize;
-+	} while ((nbytes -= bsize) >= bsize);
-+
-+	return nbytes;
-+}
-+
-+static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
-+				    struct crypto_cipher *tfm)
-+{
-+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-+		   crypto_cipher_alg(tfm)->cia_encrypt;
-+	unsigned int bsize = crypto_cipher_blocksize(tfm);
-+	unsigned long alignmask = crypto_cipher_alignmask(tfm);
-+	unsigned int nbytes = walk->nbytes;
-+	u8 *ctrblk = walk->iv;
-+	u8 *src = walk->src.virt.addr;
-+	u8 tmp[bsize + alignmask];
-+	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
-+
-+	do {
-+		/* create keystream */
-+		fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
-+		crypto_xor(src, keystream, bsize);
-+
-+		/* increment counter in counterblock */
-+		crypto_inc(ctrblk, bsize);
-+
-+		src += bsize;
-+	} while ((nbytes -= bsize) >= bsize);
-+
-+	return nbytes;
-+}
-+
-+static int crypto_ctr_crypt(struct blkcipher_desc *desc,
-+			      struct scatterlist *dst, struct scatterlist *src,
-+			      unsigned int nbytes)
-+{
-+	struct blkcipher_walk walk;
-+	struct crypto_blkcipher *tfm = desc->tfm;
-+	struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
-+	struct crypto_cipher *child = ctx->child;
-+	unsigned int bsize = crypto_cipher_blocksize(child);
-+	int err;
-+
-+	blkcipher_walk_init(&walk, dst, src, nbytes);
-+	err = blkcipher_walk_virt_block(desc, &walk, bsize);
-+
-+	while (walk.nbytes >= bsize) {
-+		if (walk.src.virt.addr == walk.dst.virt.addr)
-+			nbytes = crypto_ctr_crypt_inplace(&walk, child);
-+		else
-+			nbytes = crypto_ctr_crypt_segment(&walk, child);
-+
-+		err = blkcipher_walk_done(desc, &walk, nbytes);
-+	}
-+
-+	if (walk.nbytes) {
-+		crypto_ctr_crypt_final(&walk, child);
-+		err = blkcipher_walk_done(desc, &walk, 0);
-+	}
-+
-+	return err;
-+}
-+
-+static int crypto_ctr_init_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-+	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-+	struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
-+	struct crypto_cipher *cipher;
-+
-+	cipher = crypto_spawn_cipher(spawn);
-+	if (IS_ERR(cipher))
-+		return PTR_ERR(cipher);
-+
-+	ctx->child = cipher;
-+
-+	return 0;
-+}
-+
-+static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	crypto_free_cipher(ctx->child);
-+}
-+
-+static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
-+{
-+	struct crypto_instance *inst;
-+	struct crypto_alg *alg;
-+	int err;
-+
-+	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
-+	if (err)
-+		return ERR_PTR(err);
-+
-+	alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
-+				  CRYPTO_ALG_TYPE_MASK);
-+	if (IS_ERR(alg))
-+		return ERR_PTR(PTR_ERR(alg));
-+
-+	/* Block size must be >= 4 bytes. */
-+	err = -EINVAL;
-+	if (alg->cra_blocksize < 4)
-+		goto out_put_alg;
-+
-+	/* If this is false we'd fail the alignment of crypto_inc. */
-+	if (alg->cra_blocksize % 4)
-+		goto out_put_alg;
-+
-+	inst = crypto_alloc_instance("ctr", alg);
-+	if (IS_ERR(inst))
-+		goto out;
-+
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
-+	inst->alg.cra_priority = alg->cra_priority;
-+	inst->alg.cra_blocksize = 1;
-+	inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
-+	inst->alg.cra_type = &crypto_blkcipher_type;
-+
-+	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
-+	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
-+	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
-+
-+	inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx);
-+
-+	inst->alg.cra_init = crypto_ctr_init_tfm;
-+	inst->alg.cra_exit = crypto_ctr_exit_tfm;
-+
-+	inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey;
-+	inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
-+	inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
-+
-+out:
-+	crypto_mod_put(alg);
-+	return inst;
-+
-+out_put_alg:
-+	inst = ERR_PTR(err);
-+	goto out;
-+}
-+
-+static void crypto_ctr_free(struct crypto_instance *inst)
-+{
-+	crypto_drop_spawn(crypto_instance_ctx(inst));
-+	kfree(inst);
-+}
-+
-+static struct crypto_template crypto_ctr_tmpl = {
-+	.name = "ctr",
-+	.alloc = crypto_ctr_alloc,
-+	.free = crypto_ctr_free,
-+	.module = THIS_MODULE,
-+};
-+
-+static int crypto_rfc3686_setkey(struct crypto_tfm *parent, const u8 *key,
-+				 unsigned int keylen)
-+{
-+	struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(parent);
-+	struct crypto_blkcipher *child = ctx->child;
-+	int err;
-+
-+	/* the nonce is stored in bytes at end of key */
-+	if (keylen < CTR_RFC3686_NONCE_SIZE)
-+		return -EINVAL;
-+
-+	memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
-+	       CTR_RFC3686_NONCE_SIZE);
-+
-+	keylen -= CTR_RFC3686_NONCE_SIZE;
-+
-+	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-+	crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
-+					  CRYPTO_TFM_REQ_MASK);
-+	err = crypto_blkcipher_setkey(child, key, keylen);
-+	crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
-+				     CRYPTO_TFM_RES_MASK);
-+
-+	return err;
-+}
-+
-+static int crypto_rfc3686_crypt(struct blkcipher_desc *desc,
-+				struct scatterlist *dst,
-+				struct scatterlist *src, unsigned int nbytes)
-+{
-+	struct crypto_blkcipher *tfm = desc->tfm;
-+	struct crypto_rfc3686_ctx *ctx = crypto_blkcipher_ctx(tfm);
-+	struct crypto_blkcipher *child = ctx->child;
-+	unsigned long alignmask = crypto_blkcipher_alignmask(tfm);
-+	u8 ivblk[CTR_RFC3686_BLOCK_SIZE + alignmask];
-+	u8 *iv = PTR_ALIGN(ivblk + 0, alignmask + 1);
-+	u8 *info = desc->info;
-+	int err;
-+
-+	/* set up counter block */
-+	memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
-+	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
-+
-+	/* initialize counter portion of counter block */
-+	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
-+		cpu_to_be32(1);
-+
-+	desc->tfm = child;
-+	desc->info = iv;
-+	err = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
-+	desc->tfm = tfm;
-+	desc->info = info;
-+
-+	return err;
-+}
-+
-+static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-+	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-+	struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
-+	struct crypto_blkcipher *cipher;
-+
-+	cipher = crypto_spawn_blkcipher(spawn);
-+	if (IS_ERR(cipher))
-+		return PTR_ERR(cipher);
-+
-+	ctx->child = cipher;
-+
-+	return 0;
-+}
-+
-+static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	crypto_free_blkcipher(ctx->child);
-+}
-+
-+static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb)
-+{
-+	struct crypto_instance *inst;
-+	struct crypto_alg *alg;
-+	int err;
-+
-+	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
-+	if (err)
-+		return ERR_PTR(err);
-+
-+	alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER,
-+				  CRYPTO_ALG_TYPE_MASK);
-+	err = PTR_ERR(alg);
-+	if (IS_ERR(alg))
-+		return ERR_PTR(err);
-+
-+	/* We only support 16-byte blocks. */
-+	err = -EINVAL;
-+	if (alg->cra_blkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE)
-+		goto out_put_alg;
-+
-+	/* Not a stream cipher? */
-+	if (alg->cra_blocksize != 1)
-+		goto out_put_alg;
-+
-+	inst = crypto_alloc_instance("rfc3686", alg);
-+	if (IS_ERR(inst))
-+		goto out;
-+
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
-+	inst->alg.cra_priority = alg->cra_priority;
-+	inst->alg.cra_blocksize = 1;
-+	inst->alg.cra_alignmask = alg->cra_alignmask;
-+	inst->alg.cra_type = &crypto_blkcipher_type;
-+
-+	inst->alg.cra_blkcipher.ivsize = CTR_RFC3686_IV_SIZE;
-+	inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize
-+					      + CTR_RFC3686_NONCE_SIZE;
-+	inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize
-+					      + CTR_RFC3686_NONCE_SIZE;
-+
-+	inst->alg.cra_blkcipher.geniv = "seqiv";
-+
-+	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
-+
-+	inst->alg.cra_init = crypto_rfc3686_init_tfm;
-+	inst->alg.cra_exit = crypto_rfc3686_exit_tfm;
-+
-+	inst->alg.cra_blkcipher.setkey = crypto_rfc3686_setkey;
-+	inst->alg.cra_blkcipher.encrypt = crypto_rfc3686_crypt;
-+	inst->alg.cra_blkcipher.decrypt = crypto_rfc3686_crypt;
-+
-+out:
-+	crypto_mod_put(alg);
-+	return inst;
-+
-+out_put_alg:
-+	inst = ERR_PTR(err);
-+	goto out;
-+}
-+
-+static struct crypto_template crypto_rfc3686_tmpl = {
-+	.name = "rfc3686",
-+	.alloc = crypto_rfc3686_alloc,
-+	.free = crypto_ctr_free,
-+	.module = THIS_MODULE,
-+};
-+
-+static int __init crypto_ctr_module_init(void)
-+{
-+	int err;
-+
-+	err = crypto_register_template(&crypto_ctr_tmpl);
-+	if (err)
-+		goto out;
-+
-+	err = crypto_register_template(&crypto_rfc3686_tmpl);
-+	if (err)
-+		goto out_drop_ctr;
-+
-+out:
-+	return err;
-+
-+out_drop_ctr:
-+	crypto_unregister_template(&crypto_ctr_tmpl);
-+	goto out;
-+}
-+
-+static void __exit crypto_ctr_module_exit(void)
-+{
-+	crypto_unregister_template(&crypto_rfc3686_tmpl);
-+	crypto_unregister_template(&crypto_ctr_tmpl);
-+}
-+
-+module_init(crypto_ctr_module_init);
-+module_exit(crypto_ctr_module_exit);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("CTR Counter block mode");
-+MODULE_ALIAS("rfc3686");
-diff --git a/crypto/des_generic.c b/crypto/des_generic.c
-index 59966d1..355ecb7 100644
---- a/crypto/des_generic.c
-+++ b/crypto/des_generic.c
-@@ -20,13 +20,7 @@
- #include <linux/crypto.h>
- #include <linux/types.h>
+ /**
+- *	device_attach - try to attach device to a driver.
+- *	@dev:	device.
++ * device_attach - try to attach device to a driver.
++ * @dev: device.
+  *
+- *	Walk the list of drivers that the bus has and call
+- *	driver_probe_device() for each pair. If a compatible
+- *	pair is found, break out and return.
++ * Walk the list of drivers that the bus has and call
++ * driver_probe_device() for each pair. If a compatible
++ * pair is found, break out and return.
+  *
+- *	Returns 1 if the device was bound to a driver;
+- *	0 if no matching device was found;
+- *	-ENODEV if the device is not registered.
++ * Returns 1 if the device was bound to a driver;
++ * 0 if no matching device was found;
++ * -ENODEV if the device is not registered.
+  *
+- *	When called for a USB interface, @dev->parent->sem must be held.
++ * When called for a USB interface, @dev->parent->sem must be held.
+  */
+-int device_attach(struct device * dev)
++int device_attach(struct device *dev)
+ {
+ 	int ret = 0;
  
--#define DES_KEY_SIZE		8
--#define DES_EXPKEY_WORDS	32
--#define DES_BLOCK_SIZE		8
--
--#define DES3_EDE_KEY_SIZE	(3 * DES_KEY_SIZE)
--#define DES3_EDE_EXPKEY_WORDS	(3 * DES_EXPKEY_WORDS)
--#define DES3_EDE_BLOCK_SIZE	DES_BLOCK_SIZE
-+#include <crypto/des.h>
+@@ -240,10 +241,11 @@ int device_attach(struct device * dev)
+ 	up(&dev->sem);
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(device_attach);
  
- #define ROL(x, r) ((x) = rol32((x), (r)))
- #define ROR(x, r) ((x) = ror32((x), (r)))
-@@ -634,7 +628,7 @@ static const u32 S8[64] = {
-  *   Choice 1 has operated on the key.
+-static int __driver_attach(struct device * dev, void * data)
++static int __driver_attach(struct device *dev, void *data)
+ {
+-	struct device_driver * drv = data;
++	struct device_driver *drv = data;
+ 
+ 	/*
+ 	 * Lock device and try to bind to it. We drop the error
+@@ -268,35 +270,35 @@ static int __driver_attach(struct device * dev, void * data)
+ }
+ 
+ /**
+- *	driver_attach - try to bind driver to devices.
+- *	@drv:	driver.
++ * driver_attach - try to bind driver to devices.
++ * @drv: driver.
   *
+- *	Walk the list of devices that the bus has on it and try to
+- *	match the driver with each one.  If driver_probe_device()
+- *	returns 0 and the @dev->driver is set, we've found a
+- *	compatible pair.
++ * Walk the list of devices that the bus has on it and try to
++ * match the driver with each one.  If driver_probe_device()
++ * returns 0 and the @dev->driver is set, we've found a
++ * compatible pair.
   */
--static unsigned long ekey(u32 *pe, const u8 *k)
-+unsigned long des_ekey(u32 *pe, const u8 *k)
+-int driver_attach(struct device_driver * drv)
++int driver_attach(struct device_driver *drv)
  {
- 	/* K&R: long is at least 32 bits */
- 	unsigned long a, b, c, d, w;
-@@ -709,6 +703,7 @@ static unsigned long ekey(u32 *pe, const u8 *k)
- 	/* Zero if weak key */
- 	return w;
+ 	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
  }
-+EXPORT_SYMBOL_GPL(des_ekey);
++EXPORT_SYMBOL_GPL(driver_attach);
  
  /*
-  * Decryption key expansion
-@@ -792,7 +787,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
- 	int ret;
+- *	__device_release_driver() must be called with @dev->sem held.
+- *	When called for a USB interface, @dev->parent->sem must be held as well.
++ * __device_release_driver() must be called with @dev->sem held.
++ * When called for a USB interface, @dev->parent->sem must be held as well.
+  */
+-static void __device_release_driver(struct device * dev)
++static void __device_release_driver(struct device *dev)
+ {
+-	struct device_driver * drv;
++	struct device_driver *drv;
  
- 	/* Expand to tmp */
--	ret = ekey(tmp, key);
-+	ret = des_ekey(tmp, key);
+-	drv = get_driver(dev->driver);
++	drv = dev->driver;
+ 	if (drv) {
+ 		driver_sysfs_remove(dev);
+ 		sysfs_remove_link(&dev->kobj, "driver");
+-		klist_remove(&dev->knode_driver);
  
- 	if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
- 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-@@ -879,9 +874,9 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
- 		return -EINVAL;
+ 		if (dev->bus)
+-			blocking_notifier_call_chain(&dev->bus->bus_notifier,
++			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ 						     BUS_NOTIFY_UNBIND_DRIVER,
+ 						     dev);
+ 
+@@ -306,18 +308,18 @@ static void __device_release_driver(struct device * dev)
+ 			drv->remove(dev);
+ 		devres_release_all(dev);
+ 		dev->driver = NULL;
+-		put_driver(drv);
++		klist_remove(&dev->knode_driver);
  	}
+ }
  
--	ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
-+	des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
- 	dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
--	ekey(expkey, key);
-+	des_ekey(expkey, key);
+ /**
+- *	device_release_driver - manually detach device from driver.
+- *	@dev:	device.
++ * device_release_driver - manually detach device from driver.
++ * @dev: device.
+  *
+- *	Manually detach device from driver.
+- *	When called for a USB interface, @dev->parent->sem must be held.
++ * Manually detach device from driver.
++ * When called for a USB interface, @dev->parent->sem must be held.
+  */
+-void device_release_driver(struct device * dev)
++void device_release_driver(struct device *dev)
+ {
+ 	/*
+ 	 * If anyone calls device_release_driver() recursively from
+@@ -328,26 +330,26 @@ void device_release_driver(struct device * dev)
+ 	__device_release_driver(dev);
+ 	up(&dev->sem);
+ }
+-
++EXPORT_SYMBOL_GPL(device_release_driver);
  
- 	return 0;
+ /**
+  * driver_detach - detach driver from all devices it controls.
+  * @drv: driver.
+  */
+-void driver_detach(struct device_driver * drv)
++void driver_detach(struct device_driver *drv)
+ {
+-	struct device * dev;
++	struct device *dev;
+ 
+ 	for (;;) {
+-		spin_lock(&drv->klist_devices.k_lock);
+-		if (list_empty(&drv->klist_devices.k_list)) {
+-			spin_unlock(&drv->klist_devices.k_lock);
++		spin_lock(&drv->p->klist_devices.k_lock);
++		if (list_empty(&drv->p->klist_devices.k_list)) {
++			spin_unlock(&drv->p->klist_devices.k_lock);
+ 			break;
+ 		}
+-		dev = list_entry(drv->klist_devices.k_list.prev,
++		dev = list_entry(drv->p->klist_devices.k_list.prev,
+ 				struct device, knode_driver.n_node);
+ 		get_device(dev);
+-		spin_unlock(&drv->klist_devices.k_lock);
++		spin_unlock(&drv->p->klist_devices.k_lock);
+ 
+ 		if (dev->parent)	/* Needed for USB */
+ 			down(&dev->parent->sem);
+@@ -360,9 +362,3 @@ void driver_detach(struct device_driver * drv)
+ 		put_device(dev);
+ 	}
  }
-diff --git a/crypto/digest.c b/crypto/digest.c
-index 8871dec..6fd43bd 100644
---- a/crypto/digest.c
-+++ b/crypto/digest.c
-@@ -12,6 +12,7 @@
+-
+-EXPORT_SYMBOL_GPL(device_bind_driver);
+-EXPORT_SYMBOL_GPL(device_release_driver);
+-EXPORT_SYMBOL_GPL(device_attach);
+-EXPORT_SYMBOL_GPL(driver_attach);
+-
+diff --git a/drivers/base/driver.c b/drivers/base/driver.c
+index eb11475..a35f041 100644
+--- a/drivers/base/driver.c
++++ b/drivers/base/driver.c
+@@ -3,6 +3,8 @@
+  *
+  * Copyright (c) 2002-3 Patrick Mochel
+  * Copyright (c) 2002-3 Open Source Development Labs
++ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh at suse.de>
++ * Copyright (c) 2007 Novell Inc.
+  *
+  * This file is released under the GPLv2
+  *
+@@ -15,46 +17,42 @@
+ #include "base.h"
+ 
+ #define to_dev(node) container_of(node, struct device, driver_list)
+-#define to_drv(obj) container_of(obj, struct device_driver, kobj)
+ 
+ 
+-static struct device * next_device(struct klist_iter * i)
++static struct device *next_device(struct klist_iter *i)
+ {
+-	struct klist_node * n = klist_next(i);
++	struct klist_node *n = klist_next(i);
+ 	return n ? container_of(n, struct device, knode_driver) : NULL;
+ }
+ 
+ /**
+- *	driver_for_each_device - Iterator for devices bound to a driver.
+- *	@drv:	Driver we're iterating.
+- *	@start: Device to begin with
+- *	@data:	Data to pass to the callback.
+- *	@fn:	Function to call for each device.
++ * driver_for_each_device - Iterator for devices bound to a driver.
++ * @drv: Driver we're iterating.
++ * @start: Device to begin with
++ * @data: Data to pass to the callback.
++ * @fn: Function to call for each device.
   *
+- *	Iterate over the @drv's list of devices calling @fn for each one.
++ * Iterate over the @drv's list of devices calling @fn for each one.
   */
+-
+-int driver_for_each_device(struct device_driver * drv, struct device * start, 
+-			   void * data, int (*fn)(struct device *, void *))
++int driver_for_each_device(struct device_driver *drv, struct device *start,
++			   void *data, int (*fn)(struct device *, void *))
+ {
+ 	struct klist_iter i;
+-	struct device * dev;
++	struct device *dev;
+ 	int error = 0;
  
-+#include <crypto/scatterwalk.h>
- #include <linux/mm.h>
- #include <linux/errno.h>
- #include <linux/hardirq.h>
-@@ -20,9 +21,6 @@
- #include <linux/module.h>
- #include <linux/scatterlist.h>
+ 	if (!drv)
+ 		return -EINVAL;
  
--#include "internal.h"
--#include "scatterwalk.h"
+-	klist_iter_init_node(&drv->klist_devices, &i,
++	klist_iter_init_node(&drv->p->klist_devices, &i,
+ 			     start ? &start->knode_driver : NULL);
+ 	while ((dev = next_device(&i)) && !error)
+ 		error = fn(dev, data);
+ 	klist_iter_exit(&i);
+ 	return error;
+ }
 -
- static int init(struct hash_desc *desc)
+ EXPORT_SYMBOL_GPL(driver_for_each_device);
+ 
+-
+ /**
+  * driver_find_device - device iterator for locating a particular device.
+  * @drv: The device's driver
+@@ -70,9 +68,9 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
+  * if it does.  If the callback returns non-zero, this function will
+  * return to the caller and not iterate over any more devices.
+  */
+-struct device * driver_find_device(struct device_driver *drv,
+-				   struct device * start, void * data,
+-				   int (*match)(struct device *, void *))
++struct device *driver_find_device(struct device_driver *drv,
++				  struct device *start, void *data,
++				  int (*match)(struct device *dev, void *data))
  {
- 	struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
-diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
-new file mode 100644
-index 0000000..eb90d27
---- /dev/null
-+++ b/crypto/eseqiv.c
-@@ -0,0 +1,264 @@
-+/*
-+ * eseqiv: Encrypted Sequence Number IV Generator
-+ *
-+ * This generator generates an IV based on a sequence number by xoring it
-+ * with a salt and then encrypting it with the same key as used to encrypt
-+ * the plain text.  This algorithm requires that the block size be equal
-+ * to the IV size.  It is mainly useful for CBC.
-+ *
-+ * Copyright (c) 2007 Herbert Xu <herbert at gondor.apana.org.au>
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
-+ * any later version.
+ 	struct klist_iter i;
+ 	struct device *dev;
+@@ -80,7 +78,7 @@ struct device * driver_find_device(struct device_driver *drv,
+ 	if (!drv)
+ 		return NULL;
+ 
+-	klist_iter_init_node(&drv->klist_devices, &i,
++	klist_iter_init_node(&drv->p->klist_devices, &i,
+ 			     (start ? &start->knode_driver : NULL));
+ 	while ((dev = next_device(&i)))
+ 		if (match(dev, data) && get_device(dev))
+@@ -91,111 +89,179 @@ struct device * driver_find_device(struct device_driver *drv,
+ EXPORT_SYMBOL_GPL(driver_find_device);
+ 
+ /**
+- *	driver_create_file - create sysfs file for driver.
+- *	@drv:	driver.
+- *	@attr:	driver attribute descriptor.
++ * driver_create_file - create sysfs file for driver.
++ * @drv: driver.
++ * @attr: driver attribute descriptor.
+  */
+-
+-int driver_create_file(struct device_driver * drv, struct driver_attribute * attr)
++int driver_create_file(struct device_driver *drv,
++		       struct driver_attribute *attr)
+ {
+ 	int error;
+ 	if (get_driver(drv)) {
+-		error = sysfs_create_file(&drv->kobj, &attr->attr);
++		error = sysfs_create_file(&drv->p->kobj, &attr->attr);
+ 		put_driver(drv);
+ 	} else
+ 		error = -EINVAL;
+ 	return error;
+ }
+-
++EXPORT_SYMBOL_GPL(driver_create_file);
+ 
+ /**
+- *	driver_remove_file - remove sysfs file for driver.
+- *	@drv:	driver.
+- *	@attr:	driver attribute descriptor.
++ * driver_remove_file - remove sysfs file for driver.
++ * @drv: driver.
++ * @attr: driver attribute descriptor.
+  */
+-
+-void driver_remove_file(struct device_driver * drv, struct driver_attribute * attr)
++void driver_remove_file(struct device_driver *drv,
++			struct driver_attribute *attr)
+ {
+ 	if (get_driver(drv)) {
+-		sysfs_remove_file(&drv->kobj, &attr->attr);
++		sysfs_remove_file(&drv->p->kobj, &attr->attr);
+ 		put_driver(drv);
+ 	}
+ }
+-
++EXPORT_SYMBOL_GPL(driver_remove_file);
+ 
+ /**
+- *	get_driver - increment driver reference count.
+- *	@drv:	driver.
++ * driver_add_kobj - add a kobject below the specified driver
 + *
-+ */
-+
-+#include <crypto/internal/skcipher.h>
-+#include <crypto/scatterwalk.h>
-+#include <linux/err.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/random.h>
-+#include <linux/scatterlist.h>
-+#include <linux/spinlock.h>
-+#include <linux/string.h>
-+
-+struct eseqiv_request_ctx {
-+	struct scatterlist src[2];
-+	struct scatterlist dst[2];
-+	char tail[];
-+};
-+
-+struct eseqiv_ctx {
-+	spinlock_t lock;
-+	unsigned int reqoff;
-+	char salt[];
-+};
-+
-+static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
-+{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
-+
-+	memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
-+			 crypto_ablkcipher_alignmask(geniv) + 1),
-+	       crypto_ablkcipher_ivsize(geniv));
-+}
-+
-+static void eseqiv_complete(struct crypto_async_request *base, int err)
-+{
-+	struct skcipher_givcrypt_request *req = base->data;
-+
-+	if (err)
-+		goto out;
-+
-+	eseqiv_complete2(req);
-+
-+out:
-+	skcipher_givcrypt_complete(req, err);
-+}
-+
-+static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg,
-+			 int chain)
-+{
-+	if (chain) {
-+		head->length += sg->length;
-+		sg = scatterwalk_sg_next(sg);
-+	}
-+
-+	if (sg)
-+		scatterwalk_sg_chain(head, 2, sg);
-+	else
-+		sg_mark_end(head);
-+}
-+
-+static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
-+{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+	struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
-+	struct ablkcipher_request *subreq;
-+	crypto_completion_t complete;
-+	void *data;
-+	struct scatterlist *osrc, *odst;
-+	struct scatterlist *dst;
-+	struct page *srcp;
-+	struct page *dstp;
-+	u8 *giv;
-+	u8 *vsrc;
-+	u8 *vdst;
-+	__be64 seq;
-+	unsigned int ivsize;
-+	unsigned int len;
-+	int err;
-+
-+	subreq = (void *)(reqctx->tail + ctx->reqoff);
-+	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
-+
-+	giv = req->giv;
-+	complete = req->creq.base.complete;
-+	data = req->creq.base.data;
-+
-+	osrc = req->creq.src;
-+	odst = req->creq.dst;
-+	srcp = sg_page(osrc);
-+	dstp = sg_page(odst);
-+	vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
-+	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
-+
-+	ivsize = crypto_ablkcipher_ivsize(geniv);
-+
-+	if (vsrc != giv + ivsize && vdst != giv + ivsize) {
-+		giv = PTR_ALIGN((u8 *)reqctx->tail,
-+				crypto_ablkcipher_alignmask(geniv) + 1);
-+		complete = eseqiv_complete;
-+		data = req;
-+	}
-+
-+	ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
-+					data);
-+
-+	sg_init_table(reqctx->src, 2);
-+	sg_set_buf(reqctx->src, giv, ivsize);
-+	eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
-+
-+	dst = reqctx->src;
-+	if (osrc != odst) {
-+		sg_init_table(reqctx->dst, 2);
-+		sg_set_buf(reqctx->dst, giv, ivsize);
-+		eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
-+
-+		dst = reqctx->dst;
-+	}
-+
-+	ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
-+				     req->creq.nbytes, req->creq.info);
-+
-+	memcpy(req->creq.info, ctx->salt, ivsize);
-+
-+	len = ivsize;
-+	if (ivsize > sizeof(u64)) {
-+		memset(req->giv, 0, ivsize - sizeof(u64));
-+		len = sizeof(u64);
-+	}
-+	seq = cpu_to_be64(req->seq);
-+	memcpy(req->giv + ivsize - len, &seq, len);
-+
-+	err = crypto_ablkcipher_encrypt(subreq);
-+	if (err)
-+		goto out;
-+
-+	eseqiv_complete2(req);
-+
-+out:
-+	return err;
-+}
-+
-+static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
-+{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+
-+	spin_lock_bh(&ctx->lock);
-+	if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first)
-+		goto unlock;
-+
-+	crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
-+	get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv));
-+
-+unlock:
-+	spin_unlock_bh(&ctx->lock);
-+
-+	return eseqiv_givencrypt(req);
-+}
-+
-+static int eseqiv_init(struct crypto_tfm *tfm)
-+{
-+	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
-+	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+	unsigned long alignmask;
-+	unsigned int reqsize;
-+
-+	spin_lock_init(&ctx->lock);
-+
-+	alignmask = crypto_tfm_ctx_alignment() - 1;
-+	reqsize = sizeof(struct eseqiv_request_ctx);
-+
-+	if (alignmask & reqsize) {
-+		alignmask &= reqsize;
-+		alignmask--;
-+	}
-+
-+	alignmask = ~alignmask;
-+	alignmask &= crypto_ablkcipher_alignmask(geniv);
-+
-+	reqsize += alignmask;
-+	reqsize += crypto_ablkcipher_ivsize(geniv);
-+	reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
-+
-+	ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
-+
-+	tfm->crt_ablkcipher.reqsize = reqsize +
-+				      sizeof(struct ablkcipher_request);
-+
-+	return skcipher_geniv_init(tfm);
-+}
-+
-+static struct crypto_template eseqiv_tmpl;
-+
-+static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
-+{
-+	struct crypto_instance *inst;
-+	int err;
-+
-+	inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
-+	if (IS_ERR(inst))
-+		goto out;
-+
-+	err = -EINVAL;
-+	if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
-+		goto free_inst;
-+
-+	inst->alg.cra_ablkcipher.givencrypt = eseqiv_givencrypt_first;
-+
-+	inst->alg.cra_init = eseqiv_init;
-+	inst->alg.cra_exit = skcipher_geniv_exit;
-+
-+	inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
-+	inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
-+
-+out:
-+	return inst;
-+
-+free_inst:
-+	skcipher_geniv_free(inst);
-+	inst = ERR_PTR(err);
-+	goto out;
-+}
-+
-+static struct crypto_template eseqiv_tmpl = {
-+	.name = "eseqiv",
-+	.alloc = eseqiv_alloc,
-+	.free = skcipher_geniv_free,
-+	.module = THIS_MODULE,
-+};
++ * You really don't want to do this, this is only here due to one looney
++ * iseries driver, go poke those developers if you are annoyed about
++ * this...
+  */
+-struct device_driver * get_driver(struct device_driver * drv)
++int driver_add_kobj(struct device_driver *drv, struct kobject *kobj,
++		    const char *fmt, ...)
+ {
+-	return drv ? to_drv(kobject_get(&drv->kobj)) : NULL;
++	va_list args;
++	char *name;
 +
-+static int __init eseqiv_module_init(void)
-+{
-+	return crypto_register_template(&eseqiv_tmpl);
-+}
++	va_start(args, fmt);
++	name = kvasprintf(GFP_KERNEL, fmt, args);
++	va_end(args);
 +
-+static void __exit eseqiv_module_exit(void)
-+{
-+	crypto_unregister_template(&eseqiv_tmpl);
-+}
++	if (!name)
++		return -ENOMEM;
 +
-+module_init(eseqiv_module_init);
-+module_exit(eseqiv_module_exit);
++	return kobject_add(kobj, &drv->p->kobj, "%s", name);
+ }
++EXPORT_SYMBOL_GPL(driver_add_kobj);
 +
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
-diff --git a/crypto/gcm.c b/crypto/gcm.c
-new file mode 100644
-index 0000000..e70afd0
---- /dev/null
-+++ b/crypto/gcm.c
-@@ -0,0 +1,823 @@
-+/*
-+ * GCM: Galois/Counter Mode.
-+ *
-+ * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1 at iki.fi>
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License version 2 as published
-+ * by the Free Software Foundation.
++/**
++ * get_driver - increment driver reference count.
++ * @drv: driver.
 + */
-+
-+#include <crypto/gf128mul.h>
-+#include <crypto/internal/aead.h>
-+#include <crypto/internal/skcipher.h>
-+#include <crypto/scatterwalk.h>
-+#include <linux/completion.h>
-+#include <linux/err.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+
-+struct gcm_instance_ctx {
-+	struct crypto_skcipher_spawn ctr;
-+};
-+
-+struct crypto_gcm_ctx {
-+	struct crypto_ablkcipher *ctr;
-+	struct gf128mul_4k *gf128;
-+};
-+
-+struct crypto_rfc4106_ctx {
-+	struct crypto_aead *child;
-+	u8 nonce[4];
-+};
-+
-+struct crypto_gcm_ghash_ctx {
-+	u32 bytes;
-+	u32 flags;
-+	struct gf128mul_4k *gf128;
-+	u8 buffer[16];
-+};
-+
-+struct crypto_gcm_req_priv_ctx {
-+	u8 auth_tag[16];
-+	u8 iauth_tag[16];
-+	struct scatterlist src[2];
-+	struct scatterlist dst[2];
-+	struct crypto_gcm_ghash_ctx ghash;
-+	struct ablkcipher_request abreq;
-+};
-+
-+struct crypto_gcm_setkey_result {
-+	int err;
-+	struct completion completion;
-+};
-+
-+static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
-+	struct aead_request *req)
-+{
-+	unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
-+
-+	return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
-+}
-+
-+static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags,
-+				  struct gf128mul_4k *gf128)
-+{
-+	ctx->bytes = 0;
-+	ctx->flags = flags;
-+	ctx->gf128 = gf128;
-+	memset(ctx->buffer, 0, 16);
-+}
-+
-+static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
-+				    const u8 *src, unsigned int srclen)
++struct device_driver *get_driver(struct device_driver *drv)
 +{
-+	u8 *dst = ctx->buffer;
-+
-+	if (ctx->bytes) {
-+		int n = min(srclen, ctx->bytes);
-+		u8 *pos = dst + (16 - ctx->bytes);
-+
-+		ctx->bytes -= n;
-+		srclen -= n;
-+
-+		while (n--)
-+			*pos++ ^= *src++;
-+
-+		if (!ctx->bytes)
-+			gf128mul_4k_lle((be128 *)dst, ctx->gf128);
-+	}
-+
-+	while (srclen >= 16) {
-+		crypto_xor(dst, src, 16);
-+		gf128mul_4k_lle((be128 *)dst, ctx->gf128);
-+		src += 16;
-+		srclen -= 16;
-+	}
-+
-+	if (srclen) {
-+		ctx->bytes = 16 - srclen;
-+		while (srclen--)
-+			*dst++ ^= *src++;
++	if (drv) {
++		struct driver_private *priv;
++		struct kobject *kobj;
+ 
++		kobj = kobject_get(&drv->p->kobj);
++		priv = to_driver(kobj);
++		return priv->driver;
 +	}
++	return NULL;
 +}
-+
-+static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
-+				       struct scatterlist *sg, int len)
++EXPORT_SYMBOL_GPL(get_driver);
+ 
+ /**
+- *	put_driver - decrement driver's refcount.
+- *	@drv:	driver.
++ * put_driver - decrement driver's refcount.
++ * @drv: driver.
+  */
+-void put_driver(struct device_driver * drv)
++void put_driver(struct device_driver *drv)
 +{
-+	struct scatter_walk walk;
-+	u8 *src;
-+	int n;
-+
-+	if (!len)
-+		return;
-+
-+	scatterwalk_start(&walk, sg);
++	kobject_put(&drv->p->kobj);
++}
++EXPORT_SYMBOL_GPL(put_driver);
 +
-+	while (len) {
-+		n = scatterwalk_clamp(&walk, len);
++static int driver_add_groups(struct device_driver *drv,
++			     struct attribute_group **groups)
+ {
+-	kobject_put(&drv->kobj);
++	int error = 0;
++	int i;
 +
-+		if (!n) {
-+			scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
-+			n = scatterwalk_clamp(&walk, len);
++	if (groups) {
++		for (i = 0; groups[i]; i++) {
++			error = sysfs_create_group(&drv->p->kobj, groups[i]);
++			if (error) {
++				while (--i >= 0)
++					sysfs_remove_group(&drv->p->kobj,
++							   groups[i]);
++				break;
++			}
 +		}
-+
-+		src = scatterwalk_map(&walk, 0);
-+
-+		crypto_gcm_ghash_update(ctx, src, n);
-+		len -= n;
-+
-+		scatterwalk_unmap(src, 0);
-+		scatterwalk_advance(&walk, n);
-+		scatterwalk_done(&walk, 0, len);
-+		if (len)
-+			crypto_yield(ctx->flags);
 +	}
++	return error;
 +}
 +
-+static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx)
++static void driver_remove_groups(struct device_driver *drv,
++				 struct attribute_group **groups)
 +{
-+	u8 *dst = ctx->buffer;
++	int i;
 +
-+	if (ctx->bytes) {
-+		u8 *tmp = dst + (16 - ctx->bytes);
++	if (groups)
++		for (i = 0; groups[i]; i++)
++			sysfs_remove_group(&drv->p->kobj, groups[i]);
+ }
+ 
+ /**
+- *	driver_register - register driver with bus
+- *	@drv:	driver to register
++ * driver_register - register driver with bus
++ * @drv: driver to register
+  *
+- *	We pass off most of the work to the bus_add_driver() call,
+- *	since most of the things we have to do deal with the bus
+- *	structures.
++ * We pass off most of the work to the bus_add_driver() call,
++ * since most of the things we have to do deal with the bus
++ * structures.
+  */
+-int driver_register(struct device_driver * drv)
++int driver_register(struct device_driver *drv)
+ {
++	int ret;
 +
-+		while (ctx->bytes--)
-+			*tmp++ ^= 0;
+ 	if ((drv->bus->probe && drv->probe) ||
+ 	    (drv->bus->remove && drv->remove) ||
+-	    (drv->bus->shutdown && drv->shutdown)) {
+-		printk(KERN_WARNING "Driver '%s' needs updating - please use bus_type methods\n", drv->name);
+-	}
+-	klist_init(&drv->klist_devices, NULL, NULL);
+-	return bus_add_driver(drv);
++	    (drv->bus->shutdown && drv->shutdown))
++		printk(KERN_WARNING "Driver '%s' needs updating - please use "
++			"bus_type methods\n", drv->name);
++	ret = bus_add_driver(drv);
++	if (ret)
++		return ret;
++	ret = driver_add_groups(drv, drv->groups);
++	if (ret)
++		bus_remove_driver(drv);
++	return ret;
+ }
++EXPORT_SYMBOL_GPL(driver_register);
+ 
+ /**
+- *	driver_unregister - remove driver from system.
+- *	@drv:	driver.
++ * driver_unregister - remove driver from system.
++ * @drv: driver.
+  *
+- *	Again, we pass off most of the work to the bus-level call.
++ * Again, we pass off most of the work to the bus-level call.
+  */
+-
+-void driver_unregister(struct device_driver * drv)
++void driver_unregister(struct device_driver *drv)
+ {
++	driver_remove_groups(drv, drv->groups);
+ 	bus_remove_driver(drv);
+ }
++EXPORT_SYMBOL_GPL(driver_unregister);
+ 
+ /**
+- *	driver_find - locate driver on a bus by its name.
+- *	@name:	name of the driver.
+- *	@bus:	bus to scan for the driver.
++ * driver_find - locate driver on a bus by its name.
++ * @name: name of the driver.
++ * @bus: bus to scan for the driver.
+  *
+- *	Call kset_find_obj() to iterate over list of drivers on
+- *	a bus to find driver by name. Return driver if found.
++ * Call kset_find_obj() to iterate over list of drivers on
++ * a bus to find driver by name. Return driver if found.
+  *
+- *	Note that kset_find_obj increments driver's reference count.
++ * Note that kset_find_obj increments driver's reference count.
+  */
+ struct device_driver *driver_find(const char *name, struct bus_type *bus)
+ {
+-	struct kobject *k = kset_find_obj(&bus->drivers, name);
+-	if (k)
+-		return to_drv(k);
++	struct kobject *k = kset_find_obj(bus->p->drivers_kset, name);
++	struct driver_private *priv;
 +
-+		gf128mul_4k_lle((be128 *)dst, ctx->gf128);
++	if (k) {
++		priv = to_driver(k);
++		return priv->driver;
 +	}
-+
-+	ctx->bytes = 0;
-+}
-+
-+static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
-+				       unsigned int authlen,
-+				       unsigned int cryptlen, u8 *dst)
-+{
-+	u8 *buf = ctx->buffer;
-+	u128 lengths;
-+
-+	lengths.a = cpu_to_be64(authlen * 8);
-+	lengths.b = cpu_to_be64(cryptlen * 8);
-+
-+	crypto_gcm_ghash_flush(ctx);
-+	crypto_xor(buf, (u8 *)&lengths, 16);
-+	gf128mul_4k_lle((be128 *)buf, ctx->gf128);
-+	crypto_xor(dst, buf, 16);
-+}
-+
-+static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
-+{
-+	struct crypto_gcm_setkey_result *result = req->data;
-+
-+	if (err == -EINPROGRESS)
-+		return;
-+
-+	result->err = err;
-+	complete(&result->completion);
-+}
-+
-+static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
-+			     unsigned int keylen)
-+{
-+	struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
-+	struct crypto_ablkcipher *ctr = ctx->ctr;
-+	struct {
-+		be128 hash;
-+		u8 iv[8];
-+
-+		struct crypto_gcm_setkey_result result;
-+
-+		struct scatterlist sg[1];
-+		struct ablkcipher_request req;
-+	} *data;
-+	int err;
-+
-+	crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
-+	crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
-+				   CRYPTO_TFM_REQ_MASK);
-+
-+	err = crypto_ablkcipher_setkey(ctr, key, keylen);
-+	if (err)
-+		return err;
-+
-+	crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
-+				       CRYPTO_TFM_RES_MASK);
-+
-+	data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr),
-+		       GFP_KERNEL);
-+	if (!data)
+ 	return NULL;
+ }
+-
+-EXPORT_SYMBOL_GPL(driver_register);
+-EXPORT_SYMBOL_GPL(driver_unregister);
+-EXPORT_SYMBOL_GPL(get_driver);
+-EXPORT_SYMBOL_GPL(put_driver);
+ EXPORT_SYMBOL_GPL(driver_find);
+-
+-EXPORT_SYMBOL_GPL(driver_create_file);
+-EXPORT_SYMBOL_GPL(driver_remove_file);
+diff --git a/drivers/base/firmware.c b/drivers/base/firmware.c
+index 90c8629..1138155 100644
+--- a/drivers/base/firmware.c
++++ b/drivers/base/firmware.c
+@@ -3,11 +3,11 @@
+  *
+  * Copyright (c) 2002-3 Patrick Mochel
+  * Copyright (c) 2002-3 Open Source Development Labs
++ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh at suse.de>
++ * Copyright (c) 2007 Novell Inc.
+  *
+  * This file is released under the GPLv2
+- *
+  */
+-
+ #include <linux/kobject.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -15,23 +15,13 @@
+ 
+ #include "base.h"
+ 
+-static decl_subsys(firmware, NULL, NULL);
+-
+-int firmware_register(struct kset *s)
+-{
+-	kobj_set_kset_s(s, firmware_subsys);
+-	return subsystem_register(s);
+-}
+-
+-void firmware_unregister(struct kset *s)
+-{
+-	subsystem_unregister(s);
+-}
++struct kobject *firmware_kobj;
++EXPORT_SYMBOL_GPL(firmware_kobj);
+ 
+ int __init firmware_init(void)
+ {
+-	return subsystem_register(&firmware_subsys);
++	firmware_kobj = kobject_create_and_add("firmware", NULL);
++	if (!firmware_kobj)
 +		return -ENOMEM;
-+
-+	init_completion(&data->result.completion);
-+	sg_init_one(data->sg, &data->hash, sizeof(data->hash));
-+	ablkcipher_request_set_tfm(&data->req, ctr);
-+	ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
-+						    CRYPTO_TFM_REQ_MAY_BACKLOG,
-+					crypto_gcm_setkey_done,
-+					&data->result);
-+	ablkcipher_request_set_crypt(&data->req, data->sg, data->sg,
-+				     sizeof(data->hash), data->iv);
-+
-+	err = crypto_ablkcipher_encrypt(&data->req);
-+	if (err == -EINPROGRESS || err == -EBUSY) {
-+		err = wait_for_completion_interruptible(
-+			&data->result.completion);
-+		if (!err)
-+			err = data->result.err;
-+	}
-+
-+	if (err)
-+		goto out;
-+
-+	if (ctx->gf128 != NULL)
-+		gf128mul_free_4k(ctx->gf128);
-+
-+	ctx->gf128 = gf128mul_init_4k_lle(&data->hash);
-+
-+	if (ctx->gf128 == NULL)
-+		err = -ENOMEM;
-+
-+out:
-+	kfree(data);
-+	return err;
-+}
-+
-+static int crypto_gcm_setauthsize(struct crypto_aead *tfm,
-+				  unsigned int authsize)
-+{
-+	switch (authsize) {
-+	case 4:
-+	case 8:
-+	case 12:
-+	case 13:
-+	case 14:
-+	case 15:
-+	case 16:
-+		break;
-+	default:
-+		return -EINVAL;
-+	}
-+
-+	return 0;
-+}
-+
-+static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
-+				  struct aead_request *req,
-+				  unsigned int cryptlen)
-+{
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
-+	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-+	u32 flags = req->base.tfm->crt_flags;
-+	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
-+	struct scatterlist *dst;
-+	__be32 counter = cpu_to_be32(1);
-+
-+	memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
-+	memcpy(req->iv + 12, &counter, 4);
-+
-+	sg_init_table(pctx->src, 2);
-+	sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
-+	scatterwalk_sg_chain(pctx->src, 2, req->src);
-+
-+	dst = pctx->src;
-+	if (req->src != req->dst) {
-+		sg_init_table(pctx->dst, 2);
-+		sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
-+		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
-+		dst = pctx->dst;
-+	}
-+
-+	ablkcipher_request_set_tfm(ablk_req, ctx->ctr);
-+	ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
-+				     cryptlen + sizeof(pctx->auth_tag),
-+				     req->iv);
-+
-+	crypto_gcm_ghash_init(ghash, flags, ctx->gf128);
-+
-+	crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen);
-+	crypto_gcm_ghash_flush(ghash);
-+}
-+
-+static int crypto_gcm_hash(struct aead_request *req)
-+{
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-+	u8 *auth_tag = pctx->auth_tag;
-+	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
-+
-+	crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen);
-+	crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen,
-+				   auth_tag);
-+
-+	scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
-+				 crypto_aead_authsize(aead), 1);
 +	return 0;
-+}
-+
-+static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err)
-+{
-+	struct aead_request *req = areq->data;
-+
-+	if (!err)
-+		err = crypto_gcm_hash(req);
-+
-+	aead_request_complete(req, err);
-+}
-+
-+static int crypto_gcm_encrypt(struct aead_request *req)
-+{
-+	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-+	struct ablkcipher_request *abreq = &pctx->abreq;
-+	int err;
-+
-+	crypto_gcm_init_crypt(abreq, req, req->cryptlen);
-+	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-+					crypto_gcm_encrypt_done, req);
-+
-+	err = crypto_ablkcipher_encrypt(abreq);
-+	if (err)
-+		return err;
-+
-+	return crypto_gcm_hash(req);
-+}
-+
-+static int crypto_gcm_verify(struct aead_request *req)
-+{
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-+	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
-+	u8 *auth_tag = pctx->auth_tag;
-+	u8 *iauth_tag = pctx->iauth_tag;
-+	unsigned int authsize = crypto_aead_authsize(aead);
-+	unsigned int cryptlen = req->cryptlen - authsize;
-+
-+	crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag);
-+
-+	authsize = crypto_aead_authsize(aead);
-+	scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
-+	return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
-+}
-+
-+static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err)
-+{
-+	struct aead_request *req = areq->data;
-+
-+	if (!err)
-+		err = crypto_gcm_verify(req);
-+
-+	aead_request_complete(req, err);
-+}
-+
-+static int crypto_gcm_decrypt(struct aead_request *req)
-+{
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-+	struct ablkcipher_request *abreq = &pctx->abreq;
-+	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
-+	unsigned int cryptlen = req->cryptlen;
-+	unsigned int authsize = crypto_aead_authsize(aead);
-+	int err;
-+
-+	if (cryptlen < authsize)
-+		return -EINVAL;
-+	cryptlen -= authsize;
-+
-+	crypto_gcm_init_crypt(abreq, req, cryptlen);
-+	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-+					crypto_gcm_decrypt_done, req);
-+
-+	crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen);
-+
-+	err = crypto_ablkcipher_decrypt(abreq);
-+	if (err)
-+		return err;
-+
-+	return crypto_gcm_verify(req);
-+}
-+
-+static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-+	struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
-+	struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
-+	struct crypto_ablkcipher *ctr;
-+	unsigned long align;
-+	int err;
-+
-+	ctr = crypto_spawn_skcipher(&ictx->ctr);
-+	err = PTR_ERR(ctr);
-+	if (IS_ERR(ctr))
-+		return err;
-+
-+	ctx->ctr = ctr;
-+	ctx->gf128 = NULL;
-+
-+	align = crypto_tfm_alg_alignmask(tfm);
-+	align &= ~(crypto_tfm_ctx_alignment() - 1);
-+	tfm->crt_aead.reqsize = align +
-+				sizeof(struct crypto_gcm_req_priv_ctx) +
-+				crypto_ablkcipher_reqsize(ctr);
-+
+ }
+-
+-EXPORT_SYMBOL_GPL(firmware_register);
+-EXPORT_SYMBOL_GPL(firmware_unregister);
+diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
+index 7080b41..6428cba 100644
+--- a/drivers/base/hypervisor.c
++++ b/drivers/base/hypervisor.c
+@@ -2,19 +2,23 @@
+  * hypervisor.c - /sys/hypervisor subsystem.
+  *
+  * Copyright (C) IBM Corp. 2006
++ * Copyright (C) 2007 Greg Kroah-Hartman <gregkh at suse.de>
++ * Copyright (C) 2007 Novell Inc.
+  *
+  * This file is released under the GPLv2
+  */
+ 
+ #include <linux/kobject.h>
+ #include <linux/device.h>
+-
+ #include "base.h"
+ 
+-decl_subsys(hypervisor, NULL, NULL);
+-EXPORT_SYMBOL_GPL(hypervisor_subsys);
++struct kobject *hypervisor_kobj;
++EXPORT_SYMBOL_GPL(hypervisor_kobj);
+ 
+ int __init hypervisor_init(void)
+ {
+-	return subsystem_register(&hypervisor_subsys);
++	hypervisor_kobj = kobject_create_and_add("hypervisor", NULL);
++	if (!hypervisor_kobj)
++		return -ENOMEM;
 +	return 0;
-+}
-+
-+static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
-+{
-+	struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	if (ctx->gf128 != NULL)
-+		gf128mul_free_4k(ctx->gf128);
-+
-+	crypto_free_ablkcipher(ctx->ctr);
-+}
-+
-+static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
-+						       const char *full_name,
-+						       const char *ctr_name)
-+{
-+	struct crypto_attr_type *algt;
-+	struct crypto_instance *inst;
-+	struct crypto_alg *ctr;
-+	struct gcm_instance_ctx *ctx;
-+	int err;
-+
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
-+		return ERR_PTR(err);
-+
-+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-+		return ERR_PTR(-EINVAL);
-+
-+	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
-+	if (!inst)
-+		return ERR_PTR(-ENOMEM);
-+
-+	ctx = crypto_instance_ctx(inst);
-+	crypto_set_skcipher_spawn(&ctx->ctr, inst);
-+	err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
-+				   crypto_requires_sync(algt->type,
-+							algt->mask));
-+	if (err)
-+		goto err_free_inst;
-+
-+	ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
-+
-+	/* We only support 16-byte blocks. */
-+	if (ctr->cra_ablkcipher.ivsize != 16)
-+		goto out_put_ctr;
-+
-+	/* Not a stream cipher? */
-+	err = -EINVAL;
-+	if (ctr->cra_blocksize != 1)
-+		goto out_put_ctr;
-+
-+	err = -ENAMETOOLONG;
-+	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-+		     "gcm_base(%s)", ctr->cra_driver_name) >=
-+	    CRYPTO_MAX_ALG_NAME)
-+		goto out_put_ctr;
-+
-+	memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
+ }
+diff --git a/drivers/base/init.c b/drivers/base/init.c
+index 3713815..7bd9b6a 100644
+--- a/drivers/base/init.c
++++ b/drivers/base/init.c
+@@ -1,10 +1,8 @@
+ /*
+- *
+  * Copyright (c) 2002-3 Patrick Mochel
+  * Copyright (c) 2002-3 Open Source Development Labs
+  *
+  * This file is released under the GPLv2
+- *
+  */
+ 
+ #include <linux/device.h>
+@@ -14,12 +12,11 @@
+ #include "base.h"
+ 
+ /**
+- *	driver_init - initialize driver model.
++ * driver_init - initialize driver model.
+  *
+- *	Call the driver model init functions to initialize their
+- *	subsystems. Called early from init/main.c.
++ * Call the driver model init functions to initialize their
++ * subsystems. Called early from init/main.c.
+  */
+-
+ void __init driver_init(void)
+ {
+ 	/* These are the core pieces */
+@@ -36,5 +33,4 @@ void __init driver_init(void)
+ 	system_bus_init();
+ 	cpu_dev_init();
+ 	memory_dev_init();
+-	attribute_container_init();
+ }
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index 7868707..7ae413f 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -26,7 +26,7 @@
+ #define MEMORY_CLASS_NAME	"memory"
+ 
+ static struct sysdev_class memory_sysdev_class = {
+-	set_kset_name(MEMORY_CLASS_NAME),
++	.name = MEMORY_CLASS_NAME,
+ };
+ 
+ static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
+diff --git a/drivers/base/module.c b/drivers/base/module.c
+new file mode 100644
+index 0000000..103be9c
+--- /dev/null
++++ b/drivers/base/module.c
+@@ -0,0 +1,94 @@
++/*
++ * module.c - module sysfs fun for drivers
++ *
++ * This file is released under the GPLv2
++ *
++ */
++#include <linux/device.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include "base.h"
 +
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-+	inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
-+	inst->alg.cra_priority = ctr->cra_priority;
-+	inst->alg.cra_blocksize = 1;
-+	inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1);
-+	inst->alg.cra_type = &crypto_aead_type;
-+	inst->alg.cra_aead.ivsize = 16;
-+	inst->alg.cra_aead.maxauthsize = 16;
-+	inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
-+	inst->alg.cra_init = crypto_gcm_init_tfm;
-+	inst->alg.cra_exit = crypto_gcm_exit_tfm;
-+	inst->alg.cra_aead.setkey = crypto_gcm_setkey;
-+	inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize;
-+	inst->alg.cra_aead.encrypt = crypto_gcm_encrypt;
-+	inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
++static char *make_driver_name(struct device_driver *drv)
++{
++	char *driver_name;
 +
-+out:
-+	return inst;
++	driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
++			      GFP_KERNEL);
++	if (!driver_name)
++		return NULL;
 +
-+out_put_ctr:
-+	crypto_drop_skcipher(&ctx->ctr);
-+err_free_inst:
-+	kfree(inst);
-+	inst = ERR_PTR(err);
-+	goto out;
++	sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
++	return driver_name;
 +}
 +
-+static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
++static void module_create_drivers_dir(struct module_kobject *mk)
 +{
-+	int err;
-+	const char *cipher_name;
-+	char ctr_name[CRYPTO_MAX_ALG_NAME];
-+	char full_name[CRYPTO_MAX_ALG_NAME];
-+
-+	cipher_name = crypto_attr_alg_name(tb[1]);
-+	err = PTR_ERR(cipher_name);
-+	if (IS_ERR(cipher_name))
-+		return ERR_PTR(err);
-+
-+	if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >=
-+	    CRYPTO_MAX_ALG_NAME)
-+		return ERR_PTR(-ENAMETOOLONG);
-+
-+	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
-+	    CRYPTO_MAX_ALG_NAME)
-+		return ERR_PTR(-ENAMETOOLONG);
++	if (!mk || mk->drivers_dir)
++		return;
 +
-+	return crypto_gcm_alloc_common(tb, full_name, ctr_name);
++	mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
 +}
 +
-+static void crypto_gcm_free(struct crypto_instance *inst)
++void module_add_driver(struct module *mod, struct device_driver *drv)
 +{
-+	struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
-+
-+	crypto_drop_skcipher(&ctx->ctr);
-+	kfree(inst);
-+}
++	char *driver_name;
++	int no_warn;
++	struct module_kobject *mk = NULL;
 +
-+static struct crypto_template crypto_gcm_tmpl = {
-+	.name = "gcm",
-+	.alloc = crypto_gcm_alloc,
-+	.free = crypto_gcm_free,
-+	.module = THIS_MODULE,
-+};
++	if (!drv)
++		return;
 +
-+static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
-+{
-+	int err;
-+	const char *ctr_name;
-+	char full_name[CRYPTO_MAX_ALG_NAME];
++	if (mod)
++		mk = &mod->mkobj;
++	else if (drv->mod_name) {
++		struct kobject *mkobj;
 +
-+	ctr_name = crypto_attr_alg_name(tb[1]);
-+	err = PTR_ERR(ctr_name);
-+	if (IS_ERR(ctr_name))
-+		return ERR_PTR(err);
++		/* Lookup built-in module entry in /sys/modules */
++		mkobj = kset_find_obj(module_kset, drv->mod_name);
++		if (mkobj) {
++			mk = container_of(mkobj, struct module_kobject, kobj);
++			/* remember our module structure */
++			drv->p->mkobj = mk;
++			/* kset_find_obj took a reference */
++			kobject_put(mkobj);
++		}
++	}
 +
-+	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)",
-+		     ctr_name) >= CRYPTO_MAX_ALG_NAME)
-+		return ERR_PTR(-ENAMETOOLONG);
++	if (!mk)
++		return;
 +
-+	return crypto_gcm_alloc_common(tb, full_name, ctr_name);
++	/* Don't check return codes; these calls are idempotent */
++	no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
++	driver_name = make_driver_name(drv);
++	if (driver_name) {
++		module_create_drivers_dir(mk);
++		no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
++					    driver_name);
++		kfree(driver_name);
++	}
 +}
 +
-+static struct crypto_template crypto_gcm_base_tmpl = {
-+	.name = "gcm_base",
-+	.alloc = crypto_gcm_base_alloc,
-+	.free = crypto_gcm_free,
-+	.module = THIS_MODULE,
-+};
-+
-+static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
-+				 unsigned int keylen)
++void module_remove_driver(struct device_driver *drv)
 +{
-+	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
-+	struct crypto_aead *child = ctx->child;
-+	int err;
-+
-+	if (keylen < 4)
-+		return -EINVAL;
++	struct module_kobject *mk = NULL;
++	char *driver_name;
 +
-+	keylen -= 4;
-+	memcpy(ctx->nonce, key + keylen, 4);
++	if (!drv)
++		return;
 +
-+	crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-+	crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
-+				     CRYPTO_TFM_REQ_MASK);
-+	err = crypto_aead_setkey(child, key, keylen);
-+	crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
-+				      CRYPTO_TFM_RES_MASK);
++	sysfs_remove_link(&drv->p->kobj, "module");
 +
-+	return err;
++	if (drv->owner)
++		mk = &drv->owner->mkobj;
++	else if (drv->p->mkobj)
++		mk = drv->p->mkobj;
++	if (mk && mk->drivers_dir) {
++		driver_name = make_driver_name(drv);
++		if (driver_name) {
++			sysfs_remove_link(mk->drivers_dir, driver_name);
++			kfree(driver_name);
++		}
++	}
 +}
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 88eeed7..e59861f 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -15,7 +15,7 @@
+ #include <linux/device.h>
+ 
+ static struct sysdev_class node_class = {
+-	set_kset_name("node"),
++	.name = "node",
+ };
+ 
+ 
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index fb56092..efaf282 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -20,7 +20,8 @@
+ 
+ #include "base.h"
+ 
+-#define to_platform_driver(drv)	(container_of((drv), struct platform_driver, driver))
++#define to_platform_driver(drv)	(container_of((drv), struct platform_driver, \
++				 driver))
+ 
+ struct device platform_bus = {
+ 	.bus_id		= "platform",
+@@ -28,14 +29,13 @@ struct device platform_bus = {
+ EXPORT_SYMBOL_GPL(platform_bus);
+ 
+ /**
+- *	platform_get_resource - get a resource for a device
+- *	@dev: platform device
+- *	@type: resource type
+- *	@num: resource index
++ * platform_get_resource - get a resource for a device
++ * @dev: platform device
++ * @type: resource type
++ * @num: resource index
+  */
+-struct resource *
+-platform_get_resource(struct platform_device *dev, unsigned int type,
+-		      unsigned int num)
++struct resource *platform_get_resource(struct platform_device *dev,
++				       unsigned int type, unsigned int num)
+ {
+ 	int i;
+ 
+@@ -43,8 +43,7 @@ platform_get_resource(struct platform_device *dev, unsigned int type,
+ 		struct resource *r = &dev->resource[i];
+ 
+ 		if ((r->flags & (IORESOURCE_IO|IORESOURCE_MEM|
+-				 IORESOURCE_IRQ|IORESOURCE_DMA))
+-		    == type)
++				 IORESOURCE_IRQ|IORESOURCE_DMA)) == type)
+ 			if (num-- == 0)
+ 				return r;
+ 	}
+@@ -53,9 +52,9 @@ platform_get_resource(struct platform_device *dev, unsigned int type,
+ EXPORT_SYMBOL_GPL(platform_get_resource);
+ 
+ /**
+- *	platform_get_irq - get an IRQ for a device
+- *	@dev: platform device
+- *	@num: IRQ number index
++ * platform_get_irq - get an IRQ for a device
++ * @dev: platform device
++ * @num: IRQ number index
+  */
+ int platform_get_irq(struct platform_device *dev, unsigned int num)
+ {
+@@ -66,14 +65,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
+ EXPORT_SYMBOL_GPL(platform_get_irq);
+ 
+ /**
+- *	platform_get_resource_byname - get a resource for a device by name
+- *	@dev: platform device
+- *	@type: resource type
+- *	@name: resource name
++ * platform_get_resource_byname - get a resource for a device by name
++ * @dev: platform device
++ * @type: resource type
++ * @name: resource name
+  */
+-struct resource *
+-platform_get_resource_byname(struct platform_device *dev, unsigned int type,
+-		      char *name)
++struct resource *platform_get_resource_byname(struct platform_device *dev,
++					      unsigned int type, char *name)
+ {
+ 	int i;
+ 
+@@ -90,22 +88,23 @@ platform_get_resource_byname(struct platform_device *dev, unsigned int type,
+ EXPORT_SYMBOL_GPL(platform_get_resource_byname);
+ 
+ /**
+- *	platform_get_irq - get an IRQ for a device
+- *	@dev: platform device
+- *	@name: IRQ name
++ * platform_get_irq - get an IRQ for a device
++ * @dev: platform device
++ * @name: IRQ name
+  */
+ int platform_get_irq_byname(struct platform_device *dev, char *name)
+ {
+-	struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
++	struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
++							  name);
+ 
+ 	return r ? r->start : -ENXIO;
+ }
+ EXPORT_SYMBOL_GPL(platform_get_irq_byname);
+ 
+ /**
+- *	platform_add_devices - add a numbers of platform devices
+- *	@devs: array of platform devices to add
+- *	@num: number of platform devices in array
++ * platform_add_devices - add a numbers of platform devices
++ * @devs: array of platform devices to add
++ * @num: number of platform devices in array
+  */
+ int platform_add_devices(struct platform_device **devs, int num)
+ {
+@@ -130,12 +129,11 @@ struct platform_object {
+ };
+ 
+ /**
+- *	platform_device_put
+- *	@pdev:	platform device to free
++ * platform_device_put
++ * @pdev: platform device to free
+  *
+- *	Free all memory associated with a platform device.  This function
+- *	must _only_ be externally called in error cases.  All other usage
+- *	is a bug.
++ * Free all memory associated with a platform device.  This function must
++ * _only_ be externally called in error cases.  All other usage is a bug.
+  */
+ void platform_device_put(struct platform_device *pdev)
+ {
+@@ -146,7 +144,8 @@ EXPORT_SYMBOL_GPL(platform_device_put);
+ 
+ static void platform_device_release(struct device *dev)
+ {
+-	struct platform_object *pa = container_of(dev, struct platform_object, pdev.dev);
++	struct platform_object *pa = container_of(dev, struct platform_object,
++						  pdev.dev);
+ 
+ 	kfree(pa->pdev.dev.platform_data);
+ 	kfree(pa->pdev.resource);
+@@ -154,12 +153,12 @@ static void platform_device_release(struct device *dev)
+ }
+ 
+ /**
+- *	platform_device_alloc
+- *	@name:	base name of the device we're adding
+- *	@id:    instance id
++ * platform_device_alloc
++ * @name: base name of the device we're adding
++ * @id: instance id
+  *
+- *	Create a platform device object which can have other objects attached
+- *	to it, and which will have attached objects freed when it is released.
++ * Create a platform device object which can have other objects attached
++ * to it, and which will have attached objects freed when it is released.
+  */
+ struct platform_device *platform_device_alloc(const char *name, int id)
+ {
+@@ -179,16 +178,17 @@ struct platform_device *platform_device_alloc(const char *name, int id)
+ EXPORT_SYMBOL_GPL(platform_device_alloc);
+ 
+ /**
+- *	platform_device_add_resources
+- *	@pdev:	platform device allocated by platform_device_alloc to add resources to
+- *	@res:   set of resources that needs to be allocated for the device
+- *	@num:	number of resources
++ * platform_device_add_resources
++ * @pdev: platform device allocated by platform_device_alloc to add resources to
++ * @res: set of resources that needs to be allocated for the device
++ * @num: number of resources
+  *
+- *	Add a copy of the resources to the platform device.  The memory
+- *	associated with the resources will be freed when the platform
+- *	device is released.
++ * Add a copy of the resources to the platform device.  The memory
++ * associated with the resources will be freed when the platform device is
++ * released.
+  */
+-int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num)
++int platform_device_add_resources(struct platform_device *pdev,
++				  struct resource *res, unsigned int num)
+ {
+ 	struct resource *r;
+ 
+@@ -203,16 +203,17 @@ int platform_device_add_resources(struct platform_device *pdev, struct resource
+ EXPORT_SYMBOL_GPL(platform_device_add_resources);
+ 
+ /**
+- *	platform_device_add_data
+- *	@pdev:	platform device allocated by platform_device_alloc to add resources to
+- *	@data:	platform specific data for this platform device
+- *	@size:	size of platform specific data
++ * platform_device_add_data
++ * @pdev: platform device allocated by platform_device_alloc to add resources to
++ * @data: platform specific data for this platform device
++ * @size: size of platform specific data
+  *
+- *	Add a copy of platform specific data to the platform device's platform_data
+- *	pointer.  The memory associated with the platform data will be freed
+- *	when the platform device is released.
++ * Add a copy of platform specific data to the platform device's
++ * platform_data pointer.  The memory associated with the platform data
++ * will be freed when the platform device is released.
+  */
+-int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size)
++int platform_device_add_data(struct platform_device *pdev, const void *data,
++			     size_t size)
+ {
+ 	void *d;
+ 
+@@ -226,11 +227,11 @@ int platform_device_add_data(struct platform_device *pdev, const void *data, siz
+ EXPORT_SYMBOL_GPL(platform_device_add_data);
+ 
+ /**
+- *	platform_device_add - add a platform device to device hierarchy
+- *	@pdev:	platform device we're adding
++ * platform_device_add - add a platform device to device hierarchy
++ * @pdev: platform device we're adding
+  *
+- *	This is part 2 of platform_device_register(), though may be called
+- *	separately _iff_ pdev was allocated by platform_device_alloc().
++ * This is part 2 of platform_device_register(), though may be called
++ * separately _iff_ pdev was allocated by platform_device_alloc().
+  */
+ int platform_device_add(struct platform_device *pdev)
+ {
+@@ -289,13 +290,12 @@ int platform_device_add(struct platform_device *pdev)
+ EXPORT_SYMBOL_GPL(platform_device_add);
+ 
+ /**
+- *	platform_device_del - remove a platform-level device
+- *	@pdev:	platform device we're removing
++ * platform_device_del - remove a platform-level device
++ * @pdev: platform device we're removing
+  *
+- *	Note that this function will also release all memory- and port-based
+- *	resources owned by the device (@dev->resource).  This function
+- *	must _only_ be externally called in error cases.  All other usage
+- *	is a bug.
++ * Note that this function will also release all memory- and port-based
++ * resources owned by the device (@dev->resource).  This function must
++ * _only_ be externally called in error cases.  All other usage is a bug.
+  */
+ void platform_device_del(struct platform_device *pdev)
+ {
+@@ -314,11 +314,10 @@ void platform_device_del(struct platform_device *pdev)
+ EXPORT_SYMBOL_GPL(platform_device_del);
+ 
+ /**
+- *	platform_device_register - add a platform-level device
+- *	@pdev:	platform device we're adding
+- *
++ * platform_device_register - add a platform-level device
++ * @pdev: platform device we're adding
+  */
+-int platform_device_register(struct platform_device * pdev)
++int platform_device_register(struct platform_device *pdev)
+ {
+ 	device_initialize(&pdev->dev);
+ 	return platform_device_add(pdev);
+@@ -326,14 +325,14 @@ int platform_device_register(struct platform_device * pdev)
+ EXPORT_SYMBOL_GPL(platform_device_register);
+ 
+ /**
+- *	platform_device_unregister - unregister a platform-level device
+- *	@pdev:	platform device we're unregistering
++ * platform_device_unregister - unregister a platform-level device
++ * @pdev: platform device we're unregistering
+  *
+- *	Unregistration is done in 2 steps. First we release all resources
+- *	and remove it from the subsystem, then we drop reference count by
+- *	calling platform_device_put().
++ * Unregistration is done in 2 steps. First we release all resources
++ * and remove it from the subsystem, then we drop reference count by
++ * calling platform_device_put().
+  */
+-void platform_device_unregister(struct platform_device * pdev)
++void platform_device_unregister(struct platform_device *pdev)
+ {
+ 	platform_device_del(pdev);
+ 	platform_device_put(pdev);
+@@ -341,27 +340,29 @@ void platform_device_unregister(struct platform_device * pdev)
+ EXPORT_SYMBOL_GPL(platform_device_unregister);
+ 
+ /**
+- *	platform_device_register_simple
+- *	@name:  base name of the device we're adding
+- *	@id:    instance id
+- *	@res:   set of resources that needs to be allocated for the device
+- *	@num:	number of resources
++ * platform_device_register_simple
++ * @name: base name of the device we're adding
++ * @id: instance id
++ * @res: set of resources that needs to be allocated for the device
++ * @num: number of resources
+  *
+- *	This function creates a simple platform device that requires minimal
+- *	resource and memory management. Canned release function freeing
+- *	memory allocated for the device allows drivers using such devices
+- *	to be unloaded without waiting for the last reference to the device
+- *	to be dropped.
++ * This function creates a simple platform device that requires minimal
++ * resource and memory management. Canned release function freeing memory
++ * allocated for the device allows drivers using such devices to be
++ * unloaded without waiting for the last reference to the device to be
++ * dropped.
+  *
+- *	This interface is primarily intended for use with legacy drivers
+- *	which probe hardware directly.  Because such drivers create sysfs
+- *	device nodes themselves, rather than letting system infrastructure
+- *	handle such device enumeration tasks, they don't fully conform to
+- *	the Linux driver model.  In particular, when such drivers are built
+- *	as modules, they can't be "hotplugged".
++ * This interface is primarily intended for use with legacy drivers which
++ * probe hardware directly.  Because such drivers create sysfs device nodes
++ * themselves, rather than letting system infrastructure handle such device
++ * enumeration tasks, they don't fully conform to the Linux driver model.
++ * In particular, when such drivers are built as modules, they can't be
++ * "hotplugged".
+  */
+-struct platform_device *platform_device_register_simple(char *name, int id,
+-							struct resource *res, unsigned int num)
++struct platform_device *platform_device_register_simple(const char *name,
++							int id,
++							struct resource *res,
++							unsigned int num)
+ {
+ 	struct platform_device *pdev;
+ 	int retval;
+@@ -436,8 +437,8 @@ static int platform_drv_resume(struct device *_dev)
+ }
+ 
+ /**
+- *	platform_driver_register
+- *	@drv: platform driver structure
++ * platform_driver_register
++ * @drv: platform driver structure
+  */
+ int platform_driver_register(struct platform_driver *drv)
+ {
+@@ -457,8 +458,8 @@ int platform_driver_register(struct platform_driver *drv)
+ EXPORT_SYMBOL_GPL(platform_driver_register);
+ 
+ /**
+- *	platform_driver_unregister
+- *	@drv: platform driver structure
++ * platform_driver_unregister
++ * @drv: platform driver structure
+  */
+ void platform_driver_unregister(struct platform_driver *drv)
+ {
+@@ -497,12 +498,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
+ 	 * if the probe was successful, and make sure any forced probes of
+ 	 * new devices fail.
+ 	 */
+-	spin_lock(&platform_bus_type.klist_drivers.k_lock);
++	spin_lock(&platform_bus_type.p->klist_drivers.k_lock);
+ 	drv->probe = NULL;
+-	if (code == 0 && list_empty(&drv->driver.klist_devices.k_list))
++	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
+ 		retval = -ENODEV;
+ 	drv->driver.probe = platform_drv_probe_fail;
+-	spin_unlock(&platform_bus_type.klist_drivers.k_lock);
++	spin_unlock(&platform_bus_type.p->klist_drivers.k_lock);
+ 
+ 	if (code != retval)
+ 		platform_driver_unregister(drv);
+@@ -516,8 +517,8 @@ EXPORT_SYMBOL_GPL(platform_driver_probe);
+  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
+  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
+  */
+-static ssize_t
+-modalias_show(struct device *dev, struct device_attribute *a, char *buf)
++static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
++			     char *buf)
+ {
+ 	struct platform_device	*pdev = to_platform_device(dev);
+ 	int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
+@@ -538,26 +539,24 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 	return 0;
+ }
+ 
+-
+ /**
+- *	platform_match - bind platform device to platform driver.
+- *	@dev:	device.
+- *	@drv:	driver.
++ * platform_match - bind platform device to platform driver.
++ * @dev: device.
++ * @drv: driver.
+  *
+- *	Platform device IDs are assumed to be encoded like this:
+- *	"<name><instance>", where <name> is a short description of the
+- *	type of device, like "pci" or "floppy", and <instance> is the
+- *	enumerated instance of the device, like '0' or '42'.
+- *	Driver IDs are simply "<name>".
+- *	So, extract the <name> from the platform_device structure,
+- *	and compare it against the name of the driver. Return whether
+- *	they match or not.
++ * Platform device IDs are assumed to be encoded like this:
++ * "<name><instance>", where <name> is a short description of the type of
++ * device, like "pci" or "floppy", and <instance> is the enumerated
++ * instance of the device, like '0' or '42'.  Driver IDs are simply
++ * "<name>".  So, extract the <name> from the platform_device structure,
++ * and compare it against the name of the driver. Return whether they match
++ * or not.
+  */
+-
+-static int platform_match(struct device * dev, struct device_driver * drv)
++static int platform_match(struct device *dev, struct device_driver *drv)
+ {
+-	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
++	struct platform_device *pdev;
+ 
++	pdev = container_of(dev, struct platform_device, dev);
+ 	return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0);
+ }
+ 
+@@ -574,9 +573,10 @@ static int platform_suspend(struct device *dev, pm_message_t mesg)
+ static int platform_suspend_late(struct device *dev, pm_message_t mesg)
+ {
+ 	struct platform_driver *drv = to_platform_driver(dev->driver);
+-	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
++	struct platform_device *pdev;
+ 	int ret = 0;
+ 
++	pdev = container_of(dev, struct platform_device, dev);
+ 	if (dev->driver && drv->suspend_late)
+ 		ret = drv->suspend_late(pdev, mesg);
+ 
+@@ -586,16 +586,17 @@ static int platform_suspend_late(struct device *dev, pm_message_t mesg)
+ static int platform_resume_early(struct device *dev)
+ {
+ 	struct platform_driver *drv = to_platform_driver(dev->driver);
+-	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
++	struct platform_device *pdev;
+ 	int ret = 0;
+ 
++	pdev = container_of(dev, struct platform_device, dev);
+ 	if (dev->driver && drv->resume_early)
+ 		ret = drv->resume_early(pdev);
+ 
+ 	return ret;
+ }
+ 
+-static int platform_resume(struct device * dev)
++static int platform_resume(struct device *dev)
+ {
+ 	int ret = 0;
+ 
+diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
+index 44504e6..de28dfd 100644
+--- a/drivers/base/power/Makefile
++++ b/drivers/base/power/Makefile
+@@ -1,11 +1,6 @@
+-obj-y			:= shutdown.o
+ obj-$(CONFIG_PM)	+= sysfs.o
+ obj-$(CONFIG_PM_SLEEP)	+= main.o
+ obj-$(CONFIG_PM_TRACE)	+= trace.o
+ 
+-ifeq ($(CONFIG_DEBUG_DRIVER),y)
+-EXTRA_CFLAGS += -DDEBUG
+-endif
+-ifeq ($(CONFIG_PM_VERBOSE),y)
+-EXTRA_CFLAGS += -DDEBUG
+-endif
++ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
++ccflags-$(CONFIG_PM_VERBOSE)   += -DDEBUG
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 691ffb6..200ed5f 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -24,20 +24,45 @@
+ #include <linux/mutex.h>
+ #include <linux/pm.h>
+ #include <linux/resume-trace.h>
++#include <linux/rwsem.h>
+ 
+ #include "../base.h"
+ #include "power.h"
+ 
++/*
++ * The entries in the dpm_active list are in a depth first order, simply
++ * because children are guaranteed to be discovered after parents, and
++ * are inserted at the back of the list on discovery.
++ *
++ * All the other lists are kept in the same order, for consistency.
++ * However the lists aren't always traversed in the same order.
++ * Semaphores must be acquired from the top (i.e., front) down
++ * and released in the opposite order.  Devices must be suspended
++ * from the bottom (i.e., end) up and resumed in the opposite order.
++ * That way no parent will be suspended while it still has an active
++ * child.
++ *
++ * Since device_pm_add() may be called with a device semaphore held,
++ * we must never try to acquire a device semaphore while holding
++ * dpm_list_mutex.
++ */
 +
-+static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
-+				      unsigned int authsize)
-+{
-+	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
-+
-+	switch (authsize) {
-+	case 8:
-+	case 12:
-+	case 16:
-+		break;
-+	default:
-+		return -EINVAL;
+ LIST_HEAD(dpm_active);
++static LIST_HEAD(dpm_locked);
+ static LIST_HEAD(dpm_off);
+ static LIST_HEAD(dpm_off_irq);
++static LIST_HEAD(dpm_destroy);
+ 
+-static DEFINE_MUTEX(dpm_mtx);
+ static DEFINE_MUTEX(dpm_list_mtx);
+ 
+-int (*platform_enable_wakeup)(struct device *dev, int is_on);
++static DECLARE_RWSEM(pm_sleep_rwsem);
+ 
++int (*platform_enable_wakeup)(struct device *dev, int is_on);
+ 
++/**
++ *	device_pm_add - add a device to the list of active devices
++ *	@dev:	Device to be added to the list
++ */
+ void device_pm_add(struct device *dev)
+ {
+ 	pr_debug("PM: Adding info for %s:%s\n",
+@@ -48,8 +73,36 @@ void device_pm_add(struct device *dev)
+ 	mutex_unlock(&dpm_list_mtx);
+ }
+ 
++/**
++ *	device_pm_remove - remove a device from the list of active devices
++ *	@dev:	Device to be removed from the list
++ *
++ *	This function also removes the device's PM-related sysfs attributes.
++ */
+ void device_pm_remove(struct device *dev)
+ {
++	/*
++	 * If this function is called during a suspend, it will be blocked,
++	 * because we're holding the device's semaphore at that time, which may
++	 * lead to a deadlock.  In that case we want to print a warning.
++	 * However, it may also be called by unregister_dropped_devices() with
++	 * the device's semaphore released, in which case the warning should
++	 * not be printed.
++	 */
++	if (down_trylock(&dev->sem)) {
++		if (down_read_trylock(&pm_sleep_rwsem)) {
++			/* No suspend in progress, wait on dev->sem */
++			down(&dev->sem);
++			up_read(&pm_sleep_rwsem);
++		} else {
++			/* Suspend in progress, we may deadlock */
++			dev_warn(dev, "Suspicious %s during suspend\n",
++				__FUNCTION__);
++			dump_stack();
++			/* The user has been warned ... */
++			down(&dev->sem);
++		}
 +	}
-+
-+	return crypto_aead_setauthsize(ctx->child, authsize);
+ 	pr_debug("PM: Removing info for %s:%s\n",
+ 		 dev->bus ? dev->bus->name : "No Bus",
+ 		 kobject_name(&dev->kobj));
+@@ -57,25 +110,124 @@ void device_pm_remove(struct device *dev)
+ 	dpm_sysfs_remove(dev);
+ 	list_del_init(&dev->power.entry);
+ 	mutex_unlock(&dpm_list_mtx);
++	up(&dev->sem);
 +}
 +
-+static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
++/**
++ *	device_pm_schedule_removal - schedule the removal of a suspended device
++ *	@dev:	Device to destroy
++ *
++ *	Moves the device to the dpm_destroy list for further processing by
++ *	unregister_dropped_devices().
++ */
++void device_pm_schedule_removal(struct device *dev)
 +{
-+	struct aead_request *subreq = aead_request_ctx(req);
-+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
-+	struct crypto_aead *child = ctx->child;
-+	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
-+			   crypto_aead_alignmask(child) + 1);
-+
-+	memcpy(iv, ctx->nonce, 4);
-+	memcpy(iv + 4, req->iv, 8);
-+
-+	aead_request_set_tfm(subreq, child);
-+	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
-+				  req->base.data);
-+	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
-+	aead_request_set_assoc(subreq, req->assoc, req->assoclen);
-+
-+	return subreq;
++	pr_debug("PM: Preparing for removal: %s:%s\n",
++		dev->bus ? dev->bus->name : "No Bus",
++		kobject_name(&dev->kobj));
++	mutex_lock(&dpm_list_mtx);
++	list_move_tail(&dev->power.entry, &dpm_destroy);
++	mutex_unlock(&dpm_list_mtx);
 +}
 +
-+static int crypto_rfc4106_encrypt(struct aead_request *req)
++/**
++ *	pm_sleep_lock - mutual exclusion for registration and suspend
++ *
++ *	Returns 0 if no suspend is underway and device registration
++ *	may proceed, otherwise -EBUSY.
++ */
++int pm_sleep_lock(void)
 +{
-+	req = crypto_rfc4106_crypt(req);
++	if (down_read_trylock(&pm_sleep_rwsem))
++		return 0;
 +
-+	return crypto_aead_encrypt(req);
++	return -EBUSY;
 +}
 +
-+static int crypto_rfc4106_decrypt(struct aead_request *req)
++/**
++ *	pm_sleep_unlock - mutual exclusion for registration and suspend
++ *
++ *	This routine undoes the effect of device_pm_add_lock
++ *	when a device's registration is complete.
++ */
++void pm_sleep_unlock(void)
 +{
-+	req = crypto_rfc4106_crypt(req);
++	up_read(&pm_sleep_rwsem);
+ }
+ 
+ 
+ /*------------------------- Resume routines -------------------------*/
+ 
+ /**
+- *	resume_device - Restore state for one device.
++ *	resume_device_early - Power on one device (early resume).
+  *	@dev:	Device.
+  *
++ *	Must be called with interrupts disabled.
+  */
+-
+-static int resume_device(struct device * dev)
++static int resume_device_early(struct device *dev)
+ {
+ 	int error = 0;
+ 
+ 	TRACE_DEVICE(dev);
+ 	TRACE_RESUME(0);
+ 
+-	down(&dev->sem);
++	if (dev->bus && dev->bus->resume_early) {
++		dev_dbg(dev, "EARLY resume\n");
++		error = dev->bus->resume_early(dev);
++	}
 +
-+	return crypto_aead_decrypt(req);
++	TRACE_RESUME(error);
++	return error;
 +}
 +
-+static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm)
++/**
++ *	dpm_power_up - Power on all regular (non-sysdev) devices.
++ *
++ *	Walk the dpm_off_irq list and power each device up. This
++ *	is used for devices that required they be powered down with
++ *	interrupts disabled. As devices are powered on, they are moved
++ *	to the dpm_off list.
++ *
++ *	Must be called with interrupts disabled and only one CPU running.
++ */
++static void dpm_power_up(void)
 +{
-+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-+	struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
-+	struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);
-+	struct crypto_aead *aead;
-+	unsigned long align;
-+
-+	aead = crypto_spawn_aead(spawn);
-+	if (IS_ERR(aead))
-+		return PTR_ERR(aead);
-+
-+	ctx->child = aead;
 +
-+	align = crypto_aead_alignmask(aead);
-+	align &= ~(crypto_tfm_ctx_alignment() - 1);
-+	tfm->crt_aead.reqsize = sizeof(struct aead_request) +
-+				ALIGN(crypto_aead_reqsize(aead),
-+				      crypto_tfm_ctx_alignment()) +
-+				align + 16;
++	while (!list_empty(&dpm_off_irq)) {
++		struct list_head *entry = dpm_off_irq.next;
++		struct device *dev = to_device(entry);
 +
-+	return 0;
++		list_move_tail(entry, &dpm_off);
++		resume_device_early(dev);
++	}
 +}
 +
-+static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm)
++/**
++ *	device_power_up - Turn on all devices that need special attention.
++ *
++ *	Power on system devices, then devices that required we shut them down
++ *	with interrupts disabled.
++ *
++ *	Must be called with interrupts disabled.
++ */
++void device_power_up(void)
 +{
-+	struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	crypto_free_aead(ctx->child);
++	sysdev_resume();
++	dpm_power_up();
 +}
++EXPORT_SYMBOL_GPL(device_power_up);
 +
-+static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb)
++/**
++ *	resume_device - Restore state for one device.
++ *	@dev:	Device.
++ *
++ */
++static int resume_device(struct device *dev)
 +{
-+	struct crypto_attr_type *algt;
-+	struct crypto_instance *inst;
-+	struct crypto_aead_spawn *spawn;
-+	struct crypto_alg *alg;
-+	const char *ccm_name;
-+	int err;
-+
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
-+		return ERR_PTR(err);
-+
-+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-+		return ERR_PTR(-EINVAL);
-+
-+	ccm_name = crypto_attr_alg_name(tb[1]);
-+	err = PTR_ERR(ccm_name);
-+	if (IS_ERR(ccm_name))
-+		return ERR_PTR(err);
-+
-+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
-+	if (!inst)
-+		return ERR_PTR(-ENOMEM);
-+
-+	spawn = crypto_instance_ctx(inst);
-+	crypto_set_aead_spawn(spawn, inst);
-+	err = crypto_grab_aead(spawn, ccm_name, 0,
-+			       crypto_requires_sync(algt->type, algt->mask));
-+	if (err)
-+		goto out_free_inst;
-+
-+	alg = crypto_aead_spawn_alg(spawn);
-+
-+	err = -EINVAL;
-+
-+	/* We only support 16-byte blocks. */
-+	if (alg->cra_aead.ivsize != 16)
-+		goto out_drop_alg;
-+
-+	/* Not a stream cipher? */
-+	if (alg->cra_blocksize != 1)
-+		goto out_drop_alg;
-+
-+	err = -ENAMETOOLONG;
-+	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-+		     "rfc4106(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
-+	    snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-+		     "rfc4106(%s)", alg->cra_driver_name) >=
-+	    CRYPTO_MAX_ALG_NAME)
-+		goto out_drop_alg;
-+
-+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-+	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
-+	inst->alg.cra_priority = alg->cra_priority;
-+	inst->alg.cra_blocksize = 1;
-+	inst->alg.cra_alignmask = alg->cra_alignmask;
-+	inst->alg.cra_type = &crypto_nivaead_type;
-+
-+	inst->alg.cra_aead.ivsize = 8;
-+	inst->alg.cra_aead.maxauthsize = 16;
-+
-+	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
-+
-+	inst->alg.cra_init = crypto_rfc4106_init_tfm;
-+	inst->alg.cra_exit = crypto_rfc4106_exit_tfm;
-+
-+	inst->alg.cra_aead.setkey = crypto_rfc4106_setkey;
-+	inst->alg.cra_aead.setauthsize = crypto_rfc4106_setauthsize;
-+	inst->alg.cra_aead.encrypt = crypto_rfc4106_encrypt;
-+	inst->alg.cra_aead.decrypt = crypto_rfc4106_decrypt;
-+
-+	inst->alg.cra_aead.geniv = "seqiv";
-+
-+out:
-+	return inst;
++	int error = 0;
 +
-+out_drop_alg:
-+	crypto_drop_aead(spawn);
-+out_free_inst:
-+	kfree(inst);
-+	inst = ERR_PTR(err);
-+	goto out;
++	TRACE_DEVICE(dev);
++	TRACE_RESUME(0);
+ 
+ 	if (dev->bus && dev->bus->resume) {
+ 		dev_dbg(dev,"resuming\n");
+@@ -92,126 +244,94 @@ static int resume_device(struct device * dev)
+ 		error = dev->class->resume(dev);
+ 	}
+ 
+-	up(&dev->sem);
+-
+ 	TRACE_RESUME(error);
+ 	return error;
+ }
+ 
+-
+-static int resume_device_early(struct device * dev)
+-{
+-	int error = 0;
+-
+-	TRACE_DEVICE(dev);
+-	TRACE_RESUME(0);
+-	if (dev->bus && dev->bus->resume_early) {
+-		dev_dbg(dev,"EARLY resume\n");
+-		error = dev->bus->resume_early(dev);
+-	}
+-	TRACE_RESUME(error);
+-	return error;
+-}
+-
+-/*
+- * Resume the devices that have either not gone through
+- * the late suspend, or that did go through it but also
+- * went through the early resume
++/**
++ *	dpm_resume - Resume every device.
++ *
++ *	Resume the devices that have either not gone through
++ *	the late suspend, or that did go through it but also
++ *	went through the early resume.
++ *
++ *	Take devices from the dpm_off_list, resume them,
++ *	and put them on the dpm_locked list.
+  */
+ static void dpm_resume(void)
+ {
+ 	mutex_lock(&dpm_list_mtx);
+ 	while(!list_empty(&dpm_off)) {
+-		struct list_head * entry = dpm_off.next;
+-		struct device * dev = to_device(entry);
+-
+-		get_device(dev);
+-		list_move_tail(entry, &dpm_active);
++		struct list_head *entry = dpm_off.next;
++		struct device *dev = to_device(entry);
+ 
++		list_move_tail(entry, &dpm_locked);
+ 		mutex_unlock(&dpm_list_mtx);
+ 		resume_device(dev);
+ 		mutex_lock(&dpm_list_mtx);
+-		put_device(dev);
+ 	}
+ 	mutex_unlock(&dpm_list_mtx);
+ }
+ 
+-
+ /**
+- *	device_resume - Restore state of each device in system.
++ *	unlock_all_devices - Release each device's semaphore
+  *
+- *	Walk the dpm_off list, remove each entry, resume the device,
+- *	then add it to the dpm_active list.
++ *	Go through the dpm_off list.  Put each device on the dpm_active
++ *	list and unlock it.
+  */
+-
+-void device_resume(void)
++static void unlock_all_devices(void)
+ {
+-	might_sleep();
+-	mutex_lock(&dpm_mtx);
+-	dpm_resume();
+-	mutex_unlock(&dpm_mtx);
+-}
+-
+-EXPORT_SYMBOL_GPL(device_resume);
++	mutex_lock(&dpm_list_mtx);
++	while (!list_empty(&dpm_locked)) {
++		struct list_head *entry = dpm_locked.prev;
++		struct device *dev = to_device(entry);
+ 
++		list_move(entry, &dpm_active);
++		up(&dev->sem);
++	}
++	mutex_unlock(&dpm_list_mtx);
 +}
-+
-+static void crypto_rfc4106_free(struct crypto_instance *inst)
+ 
+ /**
+- *	dpm_power_up - Power on some devices.
+- *
+- *	Walk the dpm_off_irq list and power each device up. This
+- *	is used for devices that required they be powered down with
+- *	interrupts disabled. As devices are powered on, they are moved
+- *	to the dpm_active list.
++ *	unregister_dropped_devices - Unregister devices scheduled for removal
+  *
+- *	Interrupts must be disabled when calling this.
++ *	Unregister all devices on the dpm_destroy list.
+  */
+-
+-static void dpm_power_up(void)
++static void unregister_dropped_devices(void)
+ {
+-	while(!list_empty(&dpm_off_irq)) {
+-		struct list_head * entry = dpm_off_irq.next;
+-		struct device * dev = to_device(entry);
++	mutex_lock(&dpm_list_mtx);
++	while (!list_empty(&dpm_destroy)) {
++		struct list_head *entry = dpm_destroy.next;
++		struct device *dev = to_device(entry);
+ 
+-		list_move_tail(entry, &dpm_off);
+-		resume_device_early(dev);
++		up(&dev->sem);
++		mutex_unlock(&dpm_list_mtx);
++		/* This also removes the device from the list */
++		device_unregister(dev);
++		mutex_lock(&dpm_list_mtx);
+ 	}
++	mutex_unlock(&dpm_list_mtx);
+ }
+ 
+-
+ /**
+- *	device_power_up - Turn on all devices that need special attention.
++ *	device_resume - Restore state of each device in system.
+  *
+- *	Power on system devices then devices that required we shut them down
+- *	with interrupts disabled.
+- *	Called with interrupts disabled.
++ *	Resume all the devices, unlock them all, and allow new
++ *	devices to be registered once again.
+  */
+-
+-void device_power_up(void)
++void device_resume(void)
+ {
+-	sysdev_resume();
+-	dpm_power_up();
++	might_sleep();
++	dpm_resume();
++	unlock_all_devices();
++	unregister_dropped_devices();
++	up_write(&pm_sleep_rwsem);
+ }
+-
+-EXPORT_SYMBOL_GPL(device_power_up);
++EXPORT_SYMBOL_GPL(device_resume);
+ 
+ 
+ /*------------------------- Suspend routines -------------------------*/
+ 
+-/*
+- * The entries in the dpm_active list are in a depth first order, simply
+- * because children are guaranteed to be discovered after parents, and
+- * are inserted at the back of the list on discovery.
+- *
+- * All list on the suspend path are done in reverse order, so we operate
+- * on the leaves of the device tree (or forests, depending on how you want
+- * to look at it ;) first. As nodes are removed from the back of the list,
+- * they are inserted into the front of their destintation lists.
+- *
+- * Things are the reverse on the resume path - iterations are done in
+- * forward order, and nodes are inserted at the back of their destination
+- * lists. This way, the ancestors will be accessed before their descendents.
+- */
+-
+ static inline char *suspend_verb(u32 event)
+ {
+ 	switch (event) {
+@@ -222,7 +342,6 @@ static inline char *suspend_verb(u32 event)
+ 	}
+ }
+ 
+-
+ static void
+ suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
+ {
+@@ -232,16 +351,73 @@ suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
+ }
+ 
+ /**
+- *	suspend_device - Save state of one device.
++ *	suspend_device_late - Shut down one device (late suspend).
+  *	@dev:	Device.
+  *	@state:	Power state device is entering.
++ *
++ *	This is called with interrupts off and only a single CPU running.
+  */
++static int suspend_device_late(struct device *dev, pm_message_t state)
 +{
-+	crypto_drop_spawn(crypto_instance_ctx(inst));
-+	kfree(inst);
++	int error = 0;
+ 
+-static int suspend_device(struct device * dev, pm_message_t state)
++	if (dev->bus && dev->bus->suspend_late) {
++		suspend_device_dbg(dev, state, "LATE ");
++		error = dev->bus->suspend_late(dev, state);
++		suspend_report_result(dev->bus->suspend_late, error);
++	}
++	return error;
 +}
 +
-+static struct crypto_template crypto_rfc4106_tmpl = {
-+	.name = "rfc4106",
-+	.alloc = crypto_rfc4106_alloc,
-+	.free = crypto_rfc4106_free,
-+	.module = THIS_MODULE,
-+};
-+
-+static int __init crypto_gcm_module_init(void)
++/**
++ *	device_power_down - Shut down special devices.
++ *	@state:		Power state to enter.
++ *
++ *	Power down devices that require interrupts to be disabled
++ *	and move them from the dpm_off list to the dpm_off_irq list.
++ *	Then power down system devices.
++ *
++ *	Must be called with interrupts disabled and only one CPU running.
++ */
++int device_power_down(pm_message_t state)
 +{
-+	int err;
++	int error = 0;
 +
-+	err = crypto_register_template(&crypto_gcm_base_tmpl);
-+	if (err)
-+		goto out;
++	while (!list_empty(&dpm_off)) {
++		struct list_head *entry = dpm_off.prev;
++		struct device *dev = to_device(entry);
 +
-+	err = crypto_register_template(&crypto_gcm_tmpl);
-+	if (err)
-+		goto out_undo_base;
++		list_del_init(&dev->power.entry);
++		error = suspend_device_late(dev, state);
++		if (error) {
++			printk(KERN_ERR "Could not power down device %s: "
++					"error %d\n",
++					kobject_name(&dev->kobj), error);
++			if (list_empty(&dev->power.entry))
++				list_add(&dev->power.entry, &dpm_off);
++			break;
++		}
++		if (list_empty(&dev->power.entry))
++			list_add(&dev->power.entry, &dpm_off_irq);
++	}
 +
-+	err = crypto_register_template(&crypto_rfc4106_tmpl);
-+	if (err)
-+		goto out_undo_gcm;
++	if (!error)
++		error = sysdev_suspend(state);
++	if (error)
++		dpm_power_up();
++	return error;
++}
++EXPORT_SYMBOL_GPL(device_power_down);
 +
-+out:
-+	return err;
++/**
++ *	suspend_device - Save state of one device.
++ *	@dev:	Device.
++ *	@state:	Power state device is entering.
++ */
++int suspend_device(struct device *dev, pm_message_t state)
+ {
+ 	int error = 0;
+ 
+-	down(&dev->sem);
+ 	if (dev->power.power_state.event) {
+ 		dev_dbg(dev, "PM: suspend %d-->%d\n",
+ 			dev->power.power_state.event, state.event);
+@@ -264,123 +440,105 @@ static int suspend_device(struct device * dev, pm_message_t state)
+ 		error = dev->bus->suspend(dev, state);
+ 		suspend_report_result(dev->bus->suspend, error);
+ 	}
+-	up(&dev->sem);
+-	return error;
+-}
+-
+-
+-/*
+- * This is called with interrupts off, only a single CPU
+- * running. We can't acquire a mutex or semaphore (and we don't
+- * need the protection)
+- */
+-static int suspend_device_late(struct device *dev, pm_message_t state)
+-{
+-	int error = 0;
+-
+-	if (dev->bus && dev->bus->suspend_late) {
+-		suspend_device_dbg(dev, state, "LATE ");
+-		error = dev->bus->suspend_late(dev, state);
+-		suspend_report_result(dev->bus->suspend_late, error);
+-	}
+ 	return error;
+ }
+ 
+ /**
+- *	device_suspend - Save state and stop all devices in system.
+- *	@state:		Power state to put each device in.
++ *	dpm_suspend - Suspend every device.
++ *	@state:	Power state to put each device in.
+  *
+- *	Walk the dpm_active list, call ->suspend() for each device, and move
+- *	it to the dpm_off list.
++ *	Walk the dpm_locked list.  Suspend each device and move it
++ *	to the dpm_off list.
+  *
+  *	(For historical reasons, if it returns -EAGAIN, that used to mean
+  *	that the device would be called again with interrupts disabled.
+  *	These days, we use the "suspend_late()" callback for that, so we
+  *	print a warning and consider it an error).
+- *
+- *	If we get a different error, try and back out.
+- *
+- *	If we hit a failure with any of the devices, call device_resume()
+- *	above to bring the suspended devices back to life.
+- *
+  */
+-
+-int device_suspend(pm_message_t state)
++static int dpm_suspend(pm_message_t state)
+ {
+ 	int error = 0;
+ 
+-	might_sleep();
+-	mutex_lock(&dpm_mtx);
+ 	mutex_lock(&dpm_list_mtx);
+-	while (!list_empty(&dpm_active) && error == 0) {
+-		struct list_head * entry = dpm_active.prev;
+-		struct device * dev = to_device(entry);
++	while (!list_empty(&dpm_locked)) {
++		struct list_head *entry = dpm_locked.prev;
++		struct device *dev = to_device(entry);
+ 
+-		get_device(dev);
++		list_del_init(&dev->power.entry);
+ 		mutex_unlock(&dpm_list_mtx);
+-
+ 		error = suspend_device(dev, state);
+-
+-		mutex_lock(&dpm_list_mtx);
+-
+-		/* Check if the device got removed */
+-		if (!list_empty(&dev->power.entry)) {
+-			/* Move it to the dpm_off list */
+-			if (!error)
+-				list_move(&dev->power.entry, &dpm_off);
+-		}
+-		if (error)
++		if (error) {
+ 			printk(KERN_ERR "Could not suspend device %s: "
+-				"error %d%s\n",
+-				kobject_name(&dev->kobj), error,
+-				error == -EAGAIN ? " (please convert to suspend_late)" : "");
+-		put_device(dev);
++					"error %d%s\n",
++					kobject_name(&dev->kobj),
++					error,
++					(error == -EAGAIN ?
++					" (please convert to suspend_late)" :
++					""));
++			mutex_lock(&dpm_list_mtx);
++			if (list_empty(&dev->power.entry))
++				list_add(&dev->power.entry, &dpm_locked);
++			mutex_unlock(&dpm_list_mtx);
++			break;
++		}
++		mutex_lock(&dpm_list_mtx);
++		if (list_empty(&dev->power.entry))
++			list_add(&dev->power.entry, &dpm_off);
+ 	}
+ 	mutex_unlock(&dpm_list_mtx);
+-	if (error)
+-		dpm_resume();
+ 
+-	mutex_unlock(&dpm_mtx);
+ 	return error;
+ }
+ 
+-EXPORT_SYMBOL_GPL(device_suspend);
+-
+ /**
+- *	device_power_down - Shut down special devices.
+- *	@state:		Power state to enter.
++ *	lock_all_devices - Acquire every device's semaphore
+  *
+- *	Walk the dpm_off_irq list, calling ->power_down() for each device that
+- *	couldn't power down the device with interrupts enabled. When we're
+- *	done, power down system devices.
++ *	Go through the dpm_active list. Carefully lock each device's
++ *	semaphore and put it in on the dpm_locked list.
+  */
+-
+-int device_power_down(pm_message_t state)
++static void lock_all_devices(void)
+ {
+-	int error = 0;
+-	struct device * dev;
++	mutex_lock(&dpm_list_mtx);
++	while (!list_empty(&dpm_active)) {
++		struct list_head *entry = dpm_active.next;
++		struct device *dev = to_device(entry);
+ 
+-	while (!list_empty(&dpm_off)) {
+-		struct list_head * entry = dpm_off.prev;
++		/* Required locking order is dev->sem first,
++		 * then dpm_list_mutex.  Hence this awkward code.
++		 */
++		get_device(dev);
++		mutex_unlock(&dpm_list_mtx);
++		down(&dev->sem);
++		mutex_lock(&dpm_list_mtx);
+ 
+-		dev = to_device(entry);
+-		error = suspend_device_late(dev, state);
+-		if (error)
+-			goto Error;
+-		list_move(&dev->power.entry, &dpm_off_irq);
++		if (list_empty(entry))
++			up(&dev->sem);		/* Device was removed */
++		else
++			list_move_tail(entry, &dpm_locked);
++		put_device(dev);
+ 	}
++	mutex_unlock(&dpm_list_mtx);
++}
 +
-+out_undo_gcm:
-+	crypto_unregister_template(&crypto_gcm_tmpl);
-+out_undo_base:
-+	crypto_unregister_template(&crypto_gcm_base_tmpl);
-+	goto out;
++/**
++ *	device_suspend - Save state and stop all devices in system.
++ *
++ *	Prevent new devices from being registered, then lock all devices
++ *	and suspend them.
++ */
++int device_suspend(pm_message_t state)
++{
++	int error;
+ 
+-	error = sysdev_suspend(state);
+- Done:
++	might_sleep();
++	down_write(&pm_sleep_rwsem);
++	lock_all_devices();
++	error = dpm_suspend(state);
++	if (error)
++		device_resume();
+ 	return error;
+- Error:
+-	printk(KERN_ERR "Could not power down device %s: "
+-		"error %d\n", kobject_name(&dev->kobj), error);
+-	dpm_power_up();
+-	goto Done;
+ }
+-
+-EXPORT_SYMBOL_GPL(device_power_down);
++EXPORT_SYMBOL_GPL(device_suspend);
+ 
+ void __suspend_report_result(const char *function, void *fn, int ret)
+ {
+diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
+index 379da4e..6f0dfca 100644
+--- a/drivers/base/power/power.h
++++ b/drivers/base/power/power.h
+@@ -1,10 +1,3 @@
+-/*
+- * shutdown.c
+- */
+-
+-extern void device_shutdown(void);
+-
+-
+ #ifdef CONFIG_PM_SLEEP
+ 
+ /*
+@@ -20,6 +13,9 @@ static inline struct device *to_device(struct list_head *entry)
+ 
+ extern void device_pm_add(struct device *);
+ extern void device_pm_remove(struct device *);
++extern void device_pm_schedule_removal(struct device *);
++extern int pm_sleep_lock(void);
++extern void pm_sleep_unlock(void);
+ 
+ #else /* CONFIG_PM_SLEEP */
+ 
+@@ -32,6 +28,15 @@ static inline void device_pm_remove(struct device *dev)
+ {
+ }
+ 
++static inline int pm_sleep_lock(void)
++{
++	return 0;
 +}
 +
-+static void __exit crypto_gcm_module_exit(void)
++static inline void pm_sleep_unlock(void)
 +{
-+	crypto_unregister_template(&crypto_rfc4106_tmpl);
-+	crypto_unregister_template(&crypto_gcm_tmpl);
-+	crypto_unregister_template(&crypto_gcm_base_tmpl);
 +}
 +
-+module_init(crypto_gcm_module_init);
-+module_exit(crypto_gcm_module_exit);
+ #endif
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/base/power/shutdown.c b/drivers/base/power/shutdown.c
+deleted file mode 100644
+index 56e8eaa..0000000
+--- a/drivers/base/power/shutdown.c
++++ /dev/null
+@@ -1,48 +0,0 @@
+-/*
+- * shutdown.c - power management functions for the device tree.
+- *
+- * Copyright (c) 2002-3 Patrick Mochel
+- *		 2002-3 Open Source Development Lab
+- *
+- * This file is released under the GPLv2
+- *
+- */
+-
+-#include <linux/device.h>
+-#include <asm/semaphore.h>
+-
+-#include "../base.h"
+-#include "power.h"
+-
+-#define to_dev(node) container_of(node, struct device, kobj.entry)
+-
+-
+-/**
+- * We handle system devices differently - we suspend and shut them
+- * down last and resume them first. That way, we don't do anything stupid like
+- * shutting down the interrupt controller before any devices..
+- *
+- * Note that there are not different stages for power management calls -
+- * they only get one called once when interrupts are disabled.
+- */
+-
+-
+-/**
+- * device_shutdown - call ->shutdown() on each device to shutdown.
+- */
+-void device_shutdown(void)
+-{
+-	struct device * dev, *devn;
+-
+-	list_for_each_entry_safe_reverse(dev, devn, &devices_subsys.list,
+-				kobj.entry) {
+-		if (dev->bus && dev->bus->shutdown) {
+-			dev_dbg(dev, "shutdown\n");
+-			dev->bus->shutdown(dev);
+-		} else if (dev->driver && dev->driver->shutdown) {
+-			dev_dbg(dev, "shutdown\n");
+-			dev->driver->shutdown(dev);
+-		}
+-	}
+-}
+-
+diff --git a/drivers/base/sys.c b/drivers/base/sys.c
+index ac7ff6d..2f79c55 100644
+--- a/drivers/base/sys.c
++++ b/drivers/base/sys.c
+@@ -25,8 +25,6 @@
+ 
+ #include "base.h"
+ 
+-extern struct kset devices_subsys;
+-
+ #define to_sysdev(k) container_of(k, struct sys_device, kobj)
+ #define to_sysdev_attr(a) container_of(a, struct sysdev_attribute, attr)
+ 
+@@ -128,18 +126,17 @@ void sysdev_class_remove_file(struct sysdev_class *c,
+ }
+ EXPORT_SYMBOL_GPL(sysdev_class_remove_file);
+ 
+-/*
+- * declare system_subsys
+- */
+-static decl_subsys(system, &ktype_sysdev_class, NULL);
++static struct kset *system_kset;
+ 
+ int sysdev_class_register(struct sysdev_class * cls)
+ {
+ 	pr_debug("Registering sysdev class '%s'\n",
+ 		 kobject_name(&cls->kset.kobj));
+ 	INIT_LIST_HEAD(&cls->drivers);
+-	cls->kset.kobj.parent = &system_subsys.kobj;
+-	cls->kset.kobj.kset = &system_subsys;
++	cls->kset.kobj.parent = &system_kset->kobj;
++	cls->kset.kobj.ktype = &ktype_sysdev_class;
++	cls->kset.kobj.kset = system_kset;
++	kobject_set_name(&cls->kset.kobj, cls->name);
+ 	return kset_register(&cls->kset);
+ }
+ 
+@@ -228,20 +225,15 @@ int sysdev_register(struct sys_device * sysdev)
+ 	if (!cls)
+ 		return -EINVAL;
+ 
++	pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj));
++
+ 	/* Make sure the kset is set */
+ 	sysdev->kobj.kset = &cls->kset;
+ 
+-	/* But make sure we point to the right type for sysfs translation */
+-	sysdev->kobj.ktype = &ktype_sysdev;
+-	error = kobject_set_name(&sysdev->kobj, "%s%d",
+-			 kobject_name(&cls->kset.kobj), sysdev->id);
+-	if (error)
+-		return error;
+-
+-	pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj));
+-
+ 	/* Register the object */
+-	error = kobject_register(&sysdev->kobj);
++	error = kobject_init_and_add(&sysdev->kobj, &ktype_sysdev, NULL,
++				     "%s%d", kobject_name(&cls->kset.kobj),
++				     sysdev->id);
+ 
+ 	if (!error) {
+ 		struct sysdev_driver * drv;
+@@ -258,6 +250,7 @@ int sysdev_register(struct sys_device * sysdev)
+ 		}
+ 		mutex_unlock(&sysdev_drivers_lock);
+ 	}
++	kobject_uevent(&sysdev->kobj, KOBJ_ADD);
+ 	return error;
+ }
+ 
+@@ -272,7 +265,7 @@ void sysdev_unregister(struct sys_device * sysdev)
+ 	}
+ 	mutex_unlock(&sysdev_drivers_lock);
+ 
+-	kobject_unregister(&sysdev->kobj);
++	kobject_put(&sysdev->kobj);
+ }
+ 
+ 
+@@ -298,8 +291,7 @@ void sysdev_shutdown(void)
+ 	pr_debug("Shutting Down System Devices\n");
+ 
+ 	mutex_lock(&sysdev_drivers_lock);
+-	list_for_each_entry_reverse(cls, &system_subsys.list,
+-				    kset.kobj.entry) {
++	list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
+ 		struct sys_device * sysdev;
+ 
+ 		pr_debug("Shutting down type '%s':\n",
+@@ -361,9 +353,7 @@ int sysdev_suspend(pm_message_t state)
+ 
+ 	pr_debug("Suspending System Devices\n");
+ 
+-	list_for_each_entry_reverse(cls, &system_subsys.list,
+-				    kset.kobj.entry) {
+-
++	list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
+ 		pr_debug("Suspending type '%s':\n",
+ 			 kobject_name(&cls->kset.kobj));
+ 
+@@ -414,8 +404,7 @@ aux_driver:
+ 	}
+ 
+ 	/* resume other classes */
+-	list_for_each_entry_continue(cls, &system_subsys.list,
+-					kset.kobj.entry) {
++	list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) {
+ 		list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) {
+ 			pr_debug(" %s\n", kobject_name(&err_dev->kobj));
+ 			__sysdev_resume(err_dev);
+@@ -440,7 +429,7 @@ int sysdev_resume(void)
+ 
+ 	pr_debug("Resuming System Devices\n");
+ 
+-	list_for_each_entry(cls, &system_subsys.list, kset.kobj.entry) {
++	list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) {
+ 		struct sys_device * sysdev;
+ 
+ 		pr_debug("Resuming type '%s':\n",
+@@ -458,8 +447,10 @@ int sysdev_resume(void)
+ 
+ int __init system_bus_init(void)
+ {
+-	system_subsys.kobj.parent = &devices_subsys.kobj;
+-	return subsystem_register(&system_subsys);
++	system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
++	if (!system_kset)
++		return -ENOMEM;
++	return 0;
+ }
+ 
+ EXPORT_SYMBOL_GPL(sysdev_register);
+diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
+index 9030c37..cd03473 100644
+--- a/drivers/block/DAC960.c
++++ b/drivers/block/DAC960.c
+@@ -3455,19 +3455,12 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
+ 						 bool SuccessfulIO)
+ {
+ 	struct request *Request = Command->Request;
+-	int UpToDate;
+-
+-	UpToDate = 0;
+-	if (SuccessfulIO)
+-		UpToDate = 1;
++	int Error = SuccessfulIO ? 0 : -EIO;
+ 
+ 	pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
+ 		Command->SegmentCount, Command->DmaDirection);
+ 
+-	 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
+-		add_disk_randomness(Request->rq_disk);
+- 	 	end_that_request_last(Request, UpToDate);
+-
++	 if (!__blk_end_request(Request, Error, Command->BlockCount << 9)) {
+ 		if (Command->Completion) {
+ 			complete(Command->Completion);
+ 			Command->Completion = NULL;
+diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
+index 4d0119e..f212285 100644
+--- a/drivers/block/Kconfig
++++ b/drivers/block/Kconfig
+@@ -105,6 +105,17 @@ config PARIDE
+ 	  "MicroSolutions backpack protocol", "DataStor Commuter protocol"
+ 	  etc.).
+ 
++config GDROM
++	tristate "SEGA Dreamcast GD-ROM drive"
++	depends on SH_DREAMCAST
++	help
++	  A standard SEGA Dreamcast comes with a modified CD ROM drive called a
++	  "GD-ROM" by SEGA to signify it is capable of reading special disks
++	  with up to 1 GB of data. This drive will also read standard CD ROM
++	  disks. Select this option to access any disks in your GD ROM drive.
++	  Most users will want to say "Y" here.
++	  You can also build this as a module which will be called gdrom.ko
++
+ source "drivers/block/paride/Kconfig"
+ 
+ config BLK_CPQ_DA
+diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
+index ad00b3d..826d123 100644
+--- a/drivers/block/aoe/aoeblk.c
++++ b/drivers/block/aoe/aoeblk.c
+@@ -15,8 +15,10 @@
+ 
+ static struct kmem_cache *buf_pool_cache;
+ 
+-static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
++static ssize_t aoedisk_show_state(struct device *dev,
++				  struct device_attribute *attr, char *page)
+ {
++	struct gendisk *disk = dev_to_disk(dev);
+ 	struct aoedev *d = disk->private_data;
+ 
+ 	return snprintf(page, PAGE_SIZE,
+@@ -26,50 +28,47 @@ static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
+ 			(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
+ 	/* I'd rather see nopen exported so we can ditch closewait */
+ }
+-static ssize_t aoedisk_show_mac(struct gendisk * disk, char *page)
++static ssize_t aoedisk_show_mac(struct device *dev,
++				struct device_attribute *attr, char *page)
+ {
++	struct gendisk *disk = dev_to_disk(dev);
+ 	struct aoedev *d = disk->private_data;
+ 
+ 	return snprintf(page, PAGE_SIZE, "%012llx\n",
+ 			(unsigned long long)mac_addr(d->addr));
+ }
+-static ssize_t aoedisk_show_netif(struct gendisk * disk, char *page)
++static ssize_t aoedisk_show_netif(struct device *dev,
++				  struct device_attribute *attr, char *page)
+ {
++	struct gendisk *disk = dev_to_disk(dev);
+ 	struct aoedev *d = disk->private_data;
+ 
+ 	return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
+ }
+ /* firmware version */
+-static ssize_t aoedisk_show_fwver(struct gendisk * disk, char *page)
++static ssize_t aoedisk_show_fwver(struct device *dev,
++				  struct device_attribute *attr, char *page)
+ {
++	struct gendisk *disk = dev_to_disk(dev);
+ 	struct aoedev *d = disk->private_data;
+ 
+ 	return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
+ }
+ 
+-static struct disk_attribute disk_attr_state = {
+-	.attr = {.name = "state", .mode = S_IRUGO },
+-	.show = aoedisk_show_state
+-};
+-static struct disk_attribute disk_attr_mac = {
+-	.attr = {.name = "mac", .mode = S_IRUGO },
+-	.show = aoedisk_show_mac
+-};
+-static struct disk_attribute disk_attr_netif = {
+-	.attr = {.name = "netif", .mode = S_IRUGO },
+-	.show = aoedisk_show_netif
+-};
+-static struct disk_attribute disk_attr_fwver = {
+-	.attr = {.name = "firmware-version", .mode = S_IRUGO },
+-	.show = aoedisk_show_fwver
++static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
++static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
++static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
++static struct device_attribute dev_attr_firmware_version = {
++	.attr = { .name = "firmware-version", .mode = S_IRUGO, .owner = THIS_MODULE },
++	.show = aoedisk_show_fwver,
+ };
+ 
+ static struct attribute *aoe_attrs[] = {
+-	&disk_attr_state.attr,
+-	&disk_attr_mac.attr,
+-	&disk_attr_netif.attr,
+-	&disk_attr_fwver.attr,
+-	NULL
++	&dev_attr_state.attr,
++	&dev_attr_mac.attr,
++	&dev_attr_netif.attr,
++	&dev_attr_firmware_version.attr,
++	NULL,
+ };
+ 
+ static const struct attribute_group attr_group = {
+@@ -79,12 +78,12 @@ static const struct attribute_group attr_group = {
+ static int
+ aoedisk_add_sysfs(struct aoedev *d)
+ {
+-	return sysfs_create_group(&d->gd->kobj, &attr_group);
++	return sysfs_create_group(&d->gd->dev.kobj, &attr_group);
+ }
+ void
+ aoedisk_rm_sysfs(struct aoedev *d)
+ {
+-	sysfs_remove_group(&d->gd->kobj, &attr_group);
++	sysfs_remove_group(&d->gd->dev.kobj, &attr_group);
+ }
+ 
+ static int
+diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
+index 39e563e..d5480e3 100644
+--- a/drivers/block/aoe/aoechr.c
++++ b/drivers/block/aoe/aoechr.c
+@@ -259,9 +259,8 @@ aoechr_init(void)
+ 		return PTR_ERR(aoe_class);
+ 	}
+ 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
+-		class_device_create(aoe_class, NULL,
+-					MKDEV(AOE_MAJOR, chardevs[i].minor),
+-					NULL, chardevs[i].name);
++		device_create(aoe_class, NULL,
++			      MKDEV(AOE_MAJOR, chardevs[i].minor), chardevs[i].name);
+ 
+ 	return 0;
+ }
+@@ -272,7 +271,7 @@ aoechr_exit(void)
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
+-		class_device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
++		device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
+ 	class_destroy(aoe_class);
+ 	unregister_chrdev(AOE_MAJOR, "aoechr");
+ }
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index 509b649..855ce8e 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -1187,17 +1187,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
+ 	}
+ }
+ 
+-static inline void complete_buffers(struct bio *bio, int status)
+-{
+-	while (bio) {
+-		struct bio *xbh = bio->bi_next;
+-
+-		bio->bi_next = NULL;
+-		bio_endio(bio, status ? 0 : -EIO);
+-		bio = xbh;
+-	}
+-}
+-
+ static void cciss_check_queues(ctlr_info_t *h)
+ {
+ 	int start_queue = h->next_to_run;
+@@ -1263,21 +1252,14 @@ static void cciss_softirq_done(struct request *rq)
+ 		pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
+ 	}
+ 
+-	complete_buffers(rq->bio, (rq->errors == 0));
+-
+-	if (blk_fs_request(rq)) {
+-		const int rw = rq_data_dir(rq);
+-
+-		disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
+-	}
+-
+ #ifdef CCISS_DEBUG
+ 	printk("Done with %p\n", rq);
+ #endif				/* CCISS_DEBUG */
+ 
+-	add_disk_randomness(rq->rq_disk);
++	if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
++		BUG();
++
+ 	spin_lock_irqsave(&h->lock, flags);
+-	end_that_request_last(rq, (rq->errors == 0));
+ 	cmd_free(h, cmd, 1);
+ 	cciss_check_queues(h);
+ 	spin_unlock_irqrestore(&h->lock, flags);
+@@ -2542,9 +2524,7 @@ after_error_processing:
+ 		resend_cciss_cmd(h, cmd);
+ 		return;
+ 	}
+-	cmd->rq->data_len = 0;
+ 	cmd->rq->completion_data = cmd;
+-	blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
+ 	blk_complete_request(cmd->rq);
+ }
+ 
+diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
+index c8132d9..6919918 100644
+--- a/drivers/block/cpqarray.c
++++ b/drivers/block/cpqarray.c
+@@ -167,7 +167,6 @@ static void start_io(ctlr_info_t *h);
+ 
+ static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
+ static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
+-static inline void complete_buffers(struct bio *bio, int ok);
+ static inline void complete_command(cmdlist_t *cmd, int timeout);
+ 
+ static irqreturn_t do_ida_intr(int irq, void *dev_id);
+@@ -980,26 +979,13 @@ static void start_io(ctlr_info_t *h)
+ 	}
+ }
+ 
+-static inline void complete_buffers(struct bio *bio, int ok)
+-{
+-	struct bio *xbh;
+-
+-	while (bio) {
+-		xbh = bio->bi_next;
+-		bio->bi_next = NULL;
+-		
+-		bio_endio(bio, ok ? 0 : -EIO);
+-
+-		bio = xbh;
+-	}
+-}
+ /*
+  * Mark all buffers that cmd was responsible for
+  */
+ static inline void complete_command(cmdlist_t *cmd, int timeout)
+ {
+ 	struct request *rq = cmd->rq;
+-	int ok=1;
++	int error = 0;
+ 	int i, ddir;
+ 
+ 	if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
+@@ -1011,16 +997,17 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
+ 	if (cmd->req.hdr.rcode & RCODE_FATAL) {
+ 		printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
+ 				cmd->ctlr, cmd->hdr.unit);
+-		ok = 0;
++		error = -EIO;
+ 	}
+ 	if (cmd->req.hdr.rcode & RCODE_INVREQ) {
+ 				printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
+ 				cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
+ 				cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
+ 				cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
+-		ok = 0;	
++		error = -EIO;
+ 	}
+-	if (timeout) ok = 0;
++	if (timeout)
++		error = -EIO;
+ 	/* unmap the DMA mapping for all the scatter gather elements */
+ 	if (cmd->req.hdr.cmd == IDA_READ)
+ 		ddir = PCI_DMA_FROMDEVICE;
+@@ -1030,18 +1017,9 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
+                 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
+ 				cmd->req.sg[i].size, ddir);
+ 
+-	complete_buffers(rq->bio, ok);
+-
+-	if (blk_fs_request(rq)) {
+-		const int rw = rq_data_dir(rq);
+-
+-		disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
+-	}
+-
+-	add_disk_randomness(rq->rq_disk);
+-
+ 	DBGPX(printk("Done with %p\n", rq););
+-	end_that_request_last(rq, ok ? 1 : -EIO);
++	if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
++		BUG();
+ }
+ 
+ /*
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 639ed14..32c79a5 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -2287,21 +2287,19 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
+  * =============================
+  */
+ 
+-static void floppy_end_request(struct request *req, int uptodate)
++static void floppy_end_request(struct request *req, int error)
+ {
+ 	unsigned int nr_sectors = current_count_sectors;
++	unsigned int drive = (unsigned long)req->rq_disk->private_data;
+ 
+ 	/* current_count_sectors can be zero if transfer failed */
+-	if (!uptodate)
++	if (error)
+ 		nr_sectors = req->current_nr_sectors;
+-	if (end_that_request_first(req, uptodate, nr_sectors))
++	if (__blk_end_request(req, error, nr_sectors << 9))
+ 		return;
+-	add_disk_randomness(req->rq_disk);
+-	floppy_off((long)req->rq_disk->private_data);
+-	blkdev_dequeue_request(req);
+-	end_that_request_last(req, uptodate);
+ 
+ 	/* We're done with the request */
++	floppy_off(drive);
+ 	current_req = NULL;
+ }
+ 
+@@ -2332,7 +2330,7 @@ static void request_done(int uptodate)
+ 
+ 		/* unlock chained buffers */
+ 		spin_lock_irqsave(q->queue_lock, flags);
+-		floppy_end_request(req, 1);
++		floppy_end_request(req, 0);
+ 		spin_unlock_irqrestore(q->queue_lock, flags);
+ 	} else {
+ 		if (rq_data_dir(req) == WRITE) {
+@@ -2346,7 +2344,7 @@ static void request_done(int uptodate)
+ 			DRWE->last_error_generation = DRS->generation;
+ 		}
+ 		spin_lock_irqsave(q->queue_lock, flags);
+-		floppy_end_request(req, 0);
++		floppy_end_request(req, -EIO);
+ 		spin_unlock_irqrestore(q->queue_lock, flags);
+ 	}
+ }
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index b4c0888..ae31060 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -100,17 +100,15 @@ static const char *nbdcmd_to_ascii(int cmd)
+ 
+ static void nbd_end_request(struct request *req)
+ {
+-	int uptodate = (req->errors == 0) ? 1 : 0;
++	int error = req->errors ? -EIO : 0;
+ 	struct request_queue *q = req->q;
+ 	unsigned long flags;
+ 
+ 	dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
+-			req, uptodate? "done": "failed");
++			req, error ? "failed" : "done");
+ 
+ 	spin_lock_irqsave(q->queue_lock, flags);
+-	if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
+-		end_that_request_last(req, uptodate);
+-	}
++	__blk_end_request(req, error, req->nr_sectors << 9);
+ 	spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+ 
+@@ -375,14 +373,17 @@ harderror:
+ 	return NULL;
+ }
+ 
+-static ssize_t pid_show(struct gendisk *disk, char *page)
++static ssize_t pid_show(struct device *dev,
++			struct device_attribute *attr, char *buf)
+ {
+-	return sprintf(page, "%ld\n",
++	struct gendisk *disk = dev_to_disk(dev);
 +
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Galois/Counter Mode");
-+MODULE_AUTHOR("Mikko Herranen <mh1 at iki.fi>");
-+MODULE_ALIAS("gcm_base");
-+MODULE_ALIAS("rfc4106");
-diff --git a/crypto/hmac.c b/crypto/hmac.c
-index 0f05be7..a1d016a 100644
---- a/crypto/hmac.c
-+++ b/crypto/hmac.c
-@@ -17,6 +17,7 @@
-  */
++	return sprintf(buf, "%ld\n",
+ 		(long) ((struct nbd_device *)disk->private_data)->pid);
+ }
  
- #include <crypto/algapi.h>
-+#include <crypto/scatterwalk.h>
- #include <linux/err.h>
- #include <linux/init.h>
- #include <linux/kernel.h>
-@@ -160,7 +161,7 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
+-static struct disk_attribute pid_attr = {
+-	.attr = { .name = "pid", .mode = S_IRUGO },
++static struct device_attribute pid_attr = {
++	.attr = { .name = "pid", .mode = S_IRUGO, .owner = THIS_MODULE },
+ 	.show = pid_show,
+ };
  
- 	sg_init_table(sg1, 2);
- 	sg_set_buf(sg1, ipad, bs);
--	sg_set_page(&sg1[1], (void *) sg, 0, 0);
-+	scatterwalk_sg_chain(sg1, 2, sg);
+@@ -394,7 +395,7 @@ static int nbd_do_it(struct nbd_device *lo)
+ 	BUG_ON(lo->magic != LO_MAGIC);
  
- 	sg_init_table(sg2, 1);
- 	sg_set_buf(sg2, opad, bs + ds);
-diff --git a/crypto/internal.h b/crypto/internal.h
-index abb01f7..32f4c21 100644
---- a/crypto/internal.h
-+++ b/crypto/internal.h
-@@ -25,7 +25,6 @@
- #include <linux/notifier.h>
- #include <linux/rwsem.h>
- #include <linux/slab.h>
--#include <asm/kmap_types.h>
+ 	lo->pid = current->pid;
+-	ret = sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
++	ret = sysfs_create_file(&lo->disk->dev.kobj, &pid_attr.attr);
+ 	if (ret) {
+ 		printk(KERN_ERR "nbd: sysfs_create_file failed!");
+ 		return ret;
+@@ -403,7 +404,7 @@ static int nbd_do_it(struct nbd_device *lo)
+ 	while ((req = nbd_read_stat(lo)) != NULL)
+ 		nbd_end_request(req);
  
- /* Crypto notification events. */
- enum {
-@@ -50,34 +49,6 @@ extern struct list_head crypto_alg_list;
- extern struct rw_semaphore crypto_alg_sem;
- extern struct blocking_notifier_head crypto_chain;
+-	sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
++	sysfs_remove_file(&lo->disk->dev.kobj, &pid_attr.attr);
+ 	return 0;
+ }
  
--static inline enum km_type crypto_kmap_type(int out)
--{
--	enum km_type type;
--
--	if (in_softirq())
--		type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
--	else
--		type = out * (KM_USER1 - KM_USER0) + KM_USER0;
--
--	return type;
--}
--
--static inline void *crypto_kmap(struct page *page, int out)
--{
--	return kmap_atomic(page, crypto_kmap_type(out));
--}
--
--static inline void crypto_kunmap(void *vaddr, int out)
--{
--	kunmap_atomic(vaddr, crypto_kmap_type(out));
--}
--
--static inline void crypto_yield(u32 flags)
--{
--	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
--		cond_resched();
--}
--
- #ifdef CONFIG_PROC_FS
- void __init crypto_init_proc(void);
- void __exit crypto_exit_proc(void);
-@@ -122,6 +93,8 @@ void crypto_exit_digest_ops(struct crypto_tfm *tfm);
- void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
- void crypto_exit_compress_ops(struct crypto_tfm *tfm);
+diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
+index d89e7d3..ab86e23 100644
+--- a/drivers/block/paride/pg.c
++++ b/drivers/block/paride/pg.c
+@@ -676,8 +676,8 @@ static int __init pg_init(void)
+ 	for (unit = 0; unit < PG_UNITS; unit++) {
+ 		struct pg *dev = &devices[unit];
+ 		if (dev->present)
+-			class_device_create(pg_class, NULL, MKDEV(major, unit),
+-					NULL, "pg%u", unit);
++			device_create(pg_class, NULL, MKDEV(major, unit),
++				      "pg%u", unit);
+ 	}
+ 	err = 0;
+ 	goto out;
+@@ -695,7 +695,7 @@ static void __exit pg_exit(void)
+ 	for (unit = 0; unit < PG_UNITS; unit++) {
+ 		struct pg *dev = &devices[unit];
+ 		if (dev->present)
+-			class_device_destroy(pg_class, MKDEV(major, unit));
++			device_destroy(pg_class, MKDEV(major, unit));
+ 	}
+ 	class_destroy(pg_class);
+ 	unregister_chrdev(major, name);
+diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
+index b91accf..76096ca 100644
+--- a/drivers/block/paride/pt.c
++++ b/drivers/block/paride/pt.c
+@@ -972,10 +972,10 @@ static int __init pt_init(void)
  
-+void crypto_larval_kill(struct crypto_alg *alg);
-+struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
- void crypto_larval_error(const char *name, u32 type, u32 mask);
+ 	for (unit = 0; unit < PT_UNITS; unit++)
+ 		if (pt[unit].present) {
+-			class_device_create(pt_class, NULL, MKDEV(major, unit),
+-					NULL, "pt%d", unit);
+-			class_device_create(pt_class, NULL, MKDEV(major, unit + 128),
+-					NULL, "pt%dn", unit);
++			device_create(pt_class, NULL, MKDEV(major, unit),
++				      "pt%d", unit);
++			device_create(pt_class, NULL, MKDEV(major, unit + 128),
++				      "pt%dn", unit);
+ 		}
+ 	goto out;
  
- void crypto_shoot_alg(struct crypto_alg *alg);
-diff --git a/crypto/lzo.c b/crypto/lzo.c
-new file mode 100644
-index 0000000..48c3288
---- /dev/null
-+++ b/crypto/lzo.c
-@@ -0,0 +1,106 @@
-+/*
-+ * Cryptographic API.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License version 2 as published by
-+ * the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc., 51
-+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/crypto.h>
-+#include <linux/vmalloc.h>
-+#include <linux/lzo.h>
-+
-+struct lzo_ctx {
-+	void *lzo_comp_mem;
-+};
-+
-+static int lzo_init(struct crypto_tfm *tfm)
-+{
-+	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
-+	if (!ctx->lzo_comp_mem)
-+		return -ENOMEM;
-+
-+	return 0;
-+}
-+
-+static void lzo_exit(struct crypto_tfm *tfm)
-+{
-+	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	vfree(ctx->lzo_comp_mem);
-+}
-+
-+static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
-+			    unsigned int slen, u8 *dst, unsigned int *dlen)
-+{
-+	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-+	size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
-+	int err;
-+
-+	err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem);
-+
-+	if (err != LZO_E_OK)
-+		return -EINVAL;
-+
-+	*dlen = tmp_len;
-+	return 0;
-+}
-+
-+static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
-+			      unsigned int slen, u8 *dst, unsigned int *dlen)
-+{
-+	int err;
-+	size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
-+
-+	err = lzo1x_decompress_safe(src, slen, dst, &tmp_len);
-+
-+	if (err != LZO_E_OK)
-+		return -EINVAL;
-+
-+	*dlen = tmp_len;
-+	return 0;
-+
-+}
-+
-+static struct crypto_alg alg = {
-+	.cra_name		= "lzo",
-+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
-+	.cra_ctxsize		= sizeof(struct lzo_ctx),
-+	.cra_module		= THIS_MODULE,
-+	.cra_list		= LIST_HEAD_INIT(alg.cra_list),
-+	.cra_init		= lzo_init,
-+	.cra_exit		= lzo_exit,
-+	.cra_u			= { .compress = {
-+	.coa_compress 		= lzo_compress,
-+	.coa_decompress  	= lzo_decompress } }
-+};
-+
-+static int __init init(void)
-+{
-+	return crypto_register_alg(&alg);
-+}
-+
-+static void __exit fini(void)
-+{
-+	crypto_unregister_alg(&alg);
-+}
-+
-+module_init(init);
-+module_exit(fini);
+@@ -990,8 +990,8 @@ static void __exit pt_exit(void)
+ 	int unit;
+ 	for (unit = 0; unit < PT_UNITS; unit++)
+ 		if (pt[unit].present) {
+-			class_device_destroy(pt_class, MKDEV(major, unit));
+-			class_device_destroy(pt_class, MKDEV(major, unit + 128));
++			device_destroy(pt_class, MKDEV(major, unit));
++			device_destroy(pt_class, MKDEV(major, unit + 128));
+ 		}
+ 	class_destroy(pt_class);
+ 	unregister_chrdev(major, name);
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index 3535ef8..e9de171 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -110,17 +110,18 @@ static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
+ 					struct kobj_type* ktype)
+ {
+ 	struct pktcdvd_kobj *p;
++	int error;
 +
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("LZO Compression Algorithm");
-diff --git a/crypto/pcbc.c b/crypto/pcbc.c
-index c3ed8a1..fe70477 100644
---- a/crypto/pcbc.c
-+++ b/crypto/pcbc.c
-@@ -24,7 +24,6 @@
+ 	p = kzalloc(sizeof(*p), GFP_KERNEL);
+ 	if (!p)
+ 		return NULL;
+-	kobject_set_name(&p->kobj, "%s", name);
+-	p->kobj.parent = parent;
+-	p->kobj.ktype = ktype;
+ 	p->pd = pd;
+-	if (kobject_register(&p->kobj) != 0) {
++	error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
++	if (error) {
+ 		kobject_put(&p->kobj);
+ 		return NULL;
+ 	}
++	kobject_uevent(&p->kobj, KOBJ_ADD);
+ 	return p;
+ }
+ /*
+@@ -129,7 +130,7 @@ static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
+ static void pkt_kobj_remove(struct pktcdvd_kobj *p)
+ {
+ 	if (p)
+-		kobject_unregister(&p->kobj);
++		kobject_put(&p->kobj);
+ }
+ /*
+  * default release function for pktcdvd kernel objects.
+@@ -301,18 +302,16 @@ static struct kobj_type kobj_pkt_type_wqueue = {
+ static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
+ {
+ 	if (class_pktcdvd) {
+-		pd->clsdev = class_device_create(class_pktcdvd,
+-					NULL, pd->pkt_dev,
+-					NULL, "%s", pd->name);
+-		if (IS_ERR(pd->clsdev))
+-			pd->clsdev = NULL;
++		pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, "%s", pd->name);
++		if (IS_ERR(pd->dev))
++			pd->dev = NULL;
+ 	}
+-	if (pd->clsdev) {
++	if (pd->dev) {
+ 		pd->kobj_stat = pkt_kobj_create(pd, "stat",
+-					&pd->clsdev->kobj,
++					&pd->dev->kobj,
+ 					&kobj_pkt_type_stat);
+ 		pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
+-					&pd->clsdev->kobj,
++					&pd->dev->kobj,
+ 					&kobj_pkt_type_wqueue);
+ 	}
+ }
+@@ -322,7 +321,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
+ 	pkt_kobj_remove(pd->kobj_stat);
+ 	pkt_kobj_remove(pd->kobj_wqueue);
+ 	if (class_pktcdvd)
+-		class_device_destroy(class_pktcdvd, pd->pkt_dev);
++		device_destroy(class_pktcdvd, pd->pkt_dev);
+ }
  
- struct crypto_pcbc_ctx {
- 	struct crypto_cipher *child;
--	void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
- };
  
- static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
-@@ -45,9 +44,7 @@ static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
+diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
+index e354bfc..7483f94 100644
+--- a/drivers/block/ps3disk.c
++++ b/drivers/block/ps3disk.c
+@@ -229,7 +229,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
+ 	struct ps3_storage_device *dev = data;
+ 	struct ps3disk_private *priv;
+ 	struct request *req;
+-	int res, read, uptodate;
++	int res, read, error;
+ 	u64 tag, status;
+ 	unsigned long num_sectors;
+ 	const char *op;
+@@ -270,21 +270,17 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
+ 	if (status) {
+ 		dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+ 			__LINE__, op, status);
+-		uptodate = 0;
++		error = -EIO;
+ 	} else {
+ 		dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
+ 			__LINE__, op);
+-		uptodate = 1;
++		error = 0;
+ 		if (read)
+ 			ps3disk_scatter_gather(dev, req, 0);
+ 	}
  
- static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
- 				       struct blkcipher_walk *walk,
--				       struct crypto_cipher *tfm,
--				       void (*xor)(u8 *, const u8 *,
--						   unsigned int))
-+				       struct crypto_cipher *tfm)
+ 	spin_lock(&priv->lock);
+-	if (!end_that_request_first(req, uptodate, num_sectors)) {
+-		add_disk_randomness(req->rq_disk);
+-		blkdev_dequeue_request(req);
+-		end_that_request_last(req, uptodate);
+-	}
++	__blk_end_request(req, error, num_sectors << 9);
+ 	priv->req = NULL;
+ 	ps3disk_do_request(dev, priv->queue);
+ 	spin_unlock(&priv->lock);
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index fac4c6c..66e3015 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -212,12 +212,9 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
+ 	vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
+ }
+ 
+-static void vdc_end_request(struct request *req, int uptodate, int num_sectors)
++static void vdc_end_request(struct request *req, int error, int num_sectors)
  {
- 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- 		crypto_cipher_alg(tfm)->cia_encrypt;
-@@ -58,10 +55,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
- 	u8 *iv = walk->iv;
+-	if (end_that_request_first(req, uptodate, num_sectors))
+-		return;
+-	add_disk_randomness(req->rq_disk);
+-	end_that_request_last(req, uptodate);
++	__blk_end_request(req, error, num_sectors << 9);
+ }
  
- 	do {
--		xor(iv, src, bsize);
-+		crypto_xor(iv, src, bsize);
- 		fn(crypto_cipher_tfm(tfm), dst, iv);
- 		memcpy(iv, dst, bsize);
--		xor(iv, src, bsize);
-+		crypto_xor(iv, src, bsize);
+ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
+@@ -242,7 +239,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
  
- 		src += bsize;
- 		dst += bsize;
-@@ -72,9 +69,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
+ 	rqe->req = NULL;
  
- static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
- 				       struct blkcipher_walk *walk,
--				       struct crypto_cipher *tfm,
--				       void (*xor)(u8 *, const u8 *,
--						   unsigned int))
-+				       struct crypto_cipher *tfm)
+-	vdc_end_request(req, !desc->status, desc->size >> 9);
++	vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
+ 
+ 	if (blk_queue_stopped(port->disk->queue))
+ 		blk_start_queue(port->disk->queue);
+@@ -456,7 +453,7 @@ static void do_vdc_request(struct request_queue *q)
+ 
+ 		blkdev_dequeue_request(req);
+ 		if (__send_request(req) < 0)
+-			vdc_end_request(req, 0, req->hard_nr_sectors);
++			vdc_end_request(req, -EIO, req->hard_nr_sectors);
+ 	}
+ }
+ 
+diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
+index 52dc5e1..cd5674b 100644
+--- a/drivers/block/sx8.c
++++ b/drivers/block/sx8.c
+@@ -744,16 +744,14 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
+ 
+ static inline void carm_end_request_queued(struct carm_host *host,
+ 					   struct carm_request *crq,
+-					   int uptodate)
++					   int error)
  {
- 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- 		crypto_cipher_alg(tfm)->cia_encrypt;
-@@ -86,10 +81,10 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
+ 	struct request *req = crq->rq;
+ 	int rc;
  
- 	do {
- 		memcpy(tmpbuf, src, bsize);
--		xor(iv, tmpbuf, bsize);
-+		crypto_xor(iv, src, bsize);
- 		fn(crypto_cipher_tfm(tfm), src, iv);
--		memcpy(iv, src, bsize);
--		xor(iv, tmpbuf, bsize);
-+		memcpy(iv, tmpbuf, bsize);
-+		crypto_xor(iv, src, bsize);
+-	rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
++	rc = __blk_end_request(req, error, blk_rq_bytes(req));
+ 	assert(rc == 0);
  
- 		src += bsize;
- 	} while ((nbytes -= bsize) >= bsize);
-@@ -107,7 +102,6 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
- 	struct crypto_blkcipher *tfm = desc->tfm;
- 	struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
- 	struct crypto_cipher *child = ctx->child;
--	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
- 	int err;
+-	end_that_request_last(req, uptodate);
+-
+ 	rc = carm_put_request(host, crq);
+ 	assert(rc == 0);
+ }
+@@ -793,9 +791,9 @@ static inline void carm_round_robin(struct carm_host *host)
+ }
  
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
-@@ -115,11 +109,11 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
+ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
+-			int is_ok)
++			       int error)
+ {
+-	carm_end_request_queued(host, crq, is_ok);
++	carm_end_request_queued(host, crq, error);
+ 	if (max_queue == 1)
+ 		carm_round_robin(host);
+ 	else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
+@@ -873,14 +871,14 @@ queue_one_request:
+ 	sg = &crq->sg[0];
+ 	n_elem = blk_rq_map_sg(q, rq, sg);
+ 	if (n_elem <= 0) {
+-		carm_end_rq(host, crq, 0);
++		carm_end_rq(host, crq, -EIO);
+ 		return;		/* request with no s/g entries? */
+ 	}
  
- 	while ((nbytes = walk.nbytes)) {
- 		if (walk.src.virt.addr == walk.dst.virt.addr)
--			nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child,
--							     xor);
-+			nbytes = crypto_pcbc_encrypt_inplace(desc, &walk,
-+							     child);
- 		else
--			nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child,
--							     xor);
-+			nbytes = crypto_pcbc_encrypt_segment(desc, &walk,
-+							     child);
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	/* map scatterlist to PCI bus addresses */
+ 	n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
+ 	if (n_elem <= 0) {
+-		carm_end_rq(host, crq, 0);
++		carm_end_rq(host, crq, -EIO);
+ 		return;		/* request with no s/g entries? */
  	}
+ 	crq->n_elem = n_elem;
+@@ -941,7 +939,7 @@ queue_one_request:
  
-@@ -128,9 +122,7 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
+ static void carm_handle_array_info(struct carm_host *host,
+ 				   struct carm_request *crq, u8 *mem,
+-				   int is_ok)
++				   int error)
+ {
+ 	struct carm_port *port;
+ 	u8 *msg_data = mem + sizeof(struct carm_array_info);
+@@ -952,9 +950,9 @@ static void carm_handle_array_info(struct carm_host *host,
  
- static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
- 				       struct blkcipher_walk *walk,
--				       struct crypto_cipher *tfm,
--				       void (*xor)(u8 *, const u8 *,
--						   unsigned int))
-+				       struct crypto_cipher *tfm)
+ 	DPRINTK("ENTER\n");
+ 
+-	carm_end_rq(host, crq, is_ok);
++	carm_end_rq(host, crq, error);
+ 
+-	if (!is_ok)
++	if (error)
+ 		goto out;
+ 	if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
+ 		goto out;
+@@ -1001,7 +999,7 @@ out:
+ 
+ static void carm_handle_scan_chan(struct carm_host *host,
+ 				  struct carm_request *crq, u8 *mem,
+-				  int is_ok)
++				  int error)
  {
- 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- 		crypto_cipher_alg(tfm)->cia_decrypt;
-@@ -142,9 +134,9 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
+ 	u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
+ 	unsigned int i, dev_count = 0;
+@@ -1009,9 +1007,9 @@ static void carm_handle_scan_chan(struct carm_host *host,
  
- 	do {
- 		fn(crypto_cipher_tfm(tfm), dst, src);
--		xor(dst, iv, bsize);
-+		crypto_xor(dst, iv, bsize);
- 		memcpy(iv, src, bsize);
--		xor(iv, dst, bsize);
-+		crypto_xor(iv, dst, bsize);
+ 	DPRINTK("ENTER\n");
  
- 		src += bsize;
- 		dst += bsize;
-@@ -157,9 +149,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
+-	carm_end_rq(host, crq, is_ok);
++	carm_end_rq(host, crq, error);
  
- static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
- 				       struct blkcipher_walk *walk,
--				       struct crypto_cipher *tfm,
--				       void (*xor)(u8 *, const u8 *,
--						   unsigned int))
-+				       struct crypto_cipher *tfm)
+-	if (!is_ok) {
++	if (error) {
+ 		new_state = HST_ERROR;
+ 		goto out;
+ 	}
+@@ -1033,23 +1031,23 @@ out:
+ }
+ 
+ static void carm_handle_generic(struct carm_host *host,
+-				struct carm_request *crq, int is_ok,
++				struct carm_request *crq, int error,
+ 				int cur_state, int next_state)
  {
- 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- 		crypto_cipher_alg(tfm)->cia_decrypt;
-@@ -172,9 +162,9 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
- 	do {
- 		memcpy(tmpbuf, src, bsize);
- 		fn(crypto_cipher_tfm(tfm), src, src);
--		xor(src, iv, bsize);
-+		crypto_xor(src, iv, bsize);
- 		memcpy(iv, tmpbuf, bsize);
--		xor(iv, src, bsize);
-+		crypto_xor(iv, src, bsize);
+ 	DPRINTK("ENTER\n");
  
- 		src += bsize;
- 	} while ((nbytes -= bsize) >= bsize);
-@@ -192,7 +182,6 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
- 	struct crypto_blkcipher *tfm = desc->tfm;
- 	struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
- 	struct crypto_cipher *child = ctx->child;
--	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
- 	int err;
+-	carm_end_rq(host, crq, is_ok);
++	carm_end_rq(host, crq, error);
  
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
-@@ -200,48 +189,17 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
+ 	assert(host->state == cur_state);
+-	if (is_ok)
+-		host->state = next_state;
+-	else
++	if (error)
+ 		host->state = HST_ERROR;
++	else
++		host->state = next_state;
+ 	schedule_work(&host->fsm_task);
+ }
  
- 	while ((nbytes = walk.nbytes)) {
- 		if (walk.src.virt.addr == walk.dst.virt.addr)
--			nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child,
--							     xor);
-+			nbytes = crypto_pcbc_decrypt_inplace(desc, &walk,
-+							     child);
- 		else
--			nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child,
--							     xor);
-+			nbytes = crypto_pcbc_decrypt_segment(desc, &walk,
-+							     child);
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ static inline void carm_handle_rw(struct carm_host *host,
+-				  struct carm_request *crq, int is_ok)
++				  struct carm_request *crq, int error)
+ {
+ 	int pci_dir;
+ 
+@@ -1062,7 +1060,7 @@ static inline void carm_handle_rw(struct carm_host *host,
+ 
+ 	pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
+ 
+-	carm_end_rq(host, crq, is_ok);
++	carm_end_rq(host, crq, error);
+ }
+ 
+ static inline void carm_handle_resp(struct carm_host *host,
+@@ -1071,7 +1069,7 @@ static inline void carm_handle_resp(struct carm_host *host,
+ 	u32 handle = le32_to_cpu(ret_handle_le);
+ 	unsigned int msg_idx;
+ 	struct carm_request *crq;
+-	int is_ok = (status == RMSG_OK);
++	int error = (status == RMSG_OK) ? 0 : -EIO;
+ 	u8 *mem;
+ 
+ 	VPRINTK("ENTER, handle == 0x%x\n", handle);
+@@ -1090,7 +1088,7 @@ static inline void carm_handle_resp(struct carm_host *host,
+ 	/* fast path */
+ 	if (likely(crq->msg_type == CARM_MSG_READ ||
+ 		   crq->msg_type == CARM_MSG_WRITE)) {
+-		carm_handle_rw(host, crq, is_ok);
++		carm_handle_rw(host, crq, error);
+ 		return;
  	}
  
- 	return err;
+@@ -1100,7 +1098,7 @@ static inline void carm_handle_resp(struct carm_host *host,
+ 	case CARM_MSG_IOCTL: {
+ 		switch (crq->msg_subtype) {
+ 		case CARM_IOC_SCAN_CHAN:
+-			carm_handle_scan_chan(host, crq, mem, is_ok);
++			carm_handle_scan_chan(host, crq, mem, error);
+ 			break;
+ 		default:
+ 			/* unknown / invalid response */
+@@ -1112,21 +1110,21 @@ static inline void carm_handle_resp(struct carm_host *host,
+ 	case CARM_MSG_MISC: {
+ 		switch (crq->msg_subtype) {
+ 		case MISC_ALLOC_MEM:
+-			carm_handle_generic(host, crq, is_ok,
++			carm_handle_generic(host, crq, error,
+ 					    HST_ALLOC_BUF, HST_SYNC_TIME);
+ 			break;
+ 		case MISC_SET_TIME:
+-			carm_handle_generic(host, crq, is_ok,
++			carm_handle_generic(host, crq, error,
+ 					    HST_SYNC_TIME, HST_GET_FW_VER);
+ 			break;
+ 		case MISC_GET_FW_VER: {
+ 			struct carm_fw_ver *ver = (struct carm_fw_ver *)
+ 				mem + sizeof(struct carm_msg_get_fw_ver);
+-			if (is_ok) {
++			if (!error) {
+ 				host->fw_ver = le32_to_cpu(ver->version);
+ 				host->flags |= (ver->features & FL_FW_VER_MASK);
+ 			}
+-			carm_handle_generic(host, crq, is_ok,
++			carm_handle_generic(host, crq, error,
+ 					    HST_GET_FW_VER, HST_PORT_SCAN);
+ 			break;
+ 		}
+@@ -1140,7 +1138,7 @@ static inline void carm_handle_resp(struct carm_host *host,
+ 	case CARM_MSG_ARRAY: {
+ 		switch (crq->msg_subtype) {
+ 		case CARM_ARRAY_INFO:
+-			carm_handle_array_info(host, crq, mem, is_ok);
++			carm_handle_array_info(host, crq, mem, error);
+ 			break;
+ 		default:
+ 			/* unknown / invalid response */
+@@ -1159,7 +1157,7 @@ static inline void carm_handle_resp(struct carm_host *host,
+ err_out:
+ 	printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
+ 	       pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
+-	carm_end_rq(host, crq, 0);
++	carm_end_rq(host, crq, -EIO);
  }
  
--static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
--{
--	do {
--		*a++ ^= *b++;
--	} while (--bs);
--}
--
--static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
--{
--	u32 *a = (u32 *)dst;
--	u32 *b = (u32 *)src;
--
--	do {
--		*a++ ^= *b++;
--	} while ((bs -= 4));
--}
--
--static void xor_64(u8 *a, const u8 *b, unsigned int bs)
--{
--	((u32 *)a)[0] ^= ((u32 *)b)[0];
--	((u32 *)a)[1] ^= ((u32 *)b)[1];
--}
--
--static void xor_128(u8 *a, const u8 *b, unsigned int bs)
--{
--	((u32 *)a)[0] ^= ((u32 *)b)[0];
--	((u32 *)a)[1] ^= ((u32 *)b)[1];
--	((u32 *)a)[2] ^= ((u32 *)b)[2];
--	((u32 *)a)[3] ^= ((u32 *)b)[3];
--}
--
- static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
+ static inline void carm_handle_responses(struct carm_host *host)
+diff --git a/drivers/block/ub.c b/drivers/block/ub.c
+index 08e909d..c6179d6 100644
+--- a/drivers/block/ub.c
++++ b/drivers/block/ub.c
+@@ -808,16 +808,16 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+ 
+ static void ub_end_rq(struct request *rq, unsigned int scsi_status)
  {
- 	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-@@ -249,22 +207,6 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
- 	struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
- 	struct crypto_cipher *cipher;
+-	int uptodate;
++	int error;
  
--	switch (crypto_tfm_alg_blocksize(tfm)) {
--	case 8:
--		ctx->xor = xor_64;
--		break;
--
--	case 16:
--		ctx->xor = xor_128;
--		break;
--
--	default:
--		if (crypto_tfm_alg_blocksize(tfm) % 4)
--			ctx->xor = xor_byte;
--		else
--			ctx->xor = xor_quad;
--	}
--
- 	cipher = crypto_spawn_cipher(spawn);
- 	if (IS_ERR(cipher))
- 		return PTR_ERR(cipher);
-@@ -304,8 +246,9 @@ static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
- 	inst->alg.cra_alignmask = alg->cra_alignmask;
- 	inst->alg.cra_type = &crypto_blkcipher_type;
+ 	if (scsi_status == 0) {
+-		uptodate = 1;
++		error = 0;
+ 	} else {
+-		uptodate = 0;
++		error = -EIO;
+ 		rq->errors = scsi_status;
+ 	}
+-	end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
+-	end_that_request_last(rq, uptodate);
++	if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
++		BUG();
+ }
  
--	if (!(alg->cra_blocksize % 4))
--		inst->alg.cra_alignmask |= 3;
-+	/* We access the data as u32s when xoring. */
-+	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
-+
- 	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- 	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- 	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
-diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
+ static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
+diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
+index ab5d404..9e61fca 100644
+--- a/drivers/block/viodasd.c
++++ b/drivers/block/viodasd.c
+@@ -229,13 +229,10 @@ static struct block_device_operations viodasd_fops = {
+ /*
+  * End a request
+  */
+-static void viodasd_end_request(struct request *req, int uptodate,
++static void viodasd_end_request(struct request *req, int error,
+ 		int num_sectors)
+ {
+-	if (end_that_request_first(req, uptodate, num_sectors))
+-		return;
+-	add_disk_randomness(req->rq_disk);
+-	end_that_request_last(req, uptodate);
++	__blk_end_request(req, error, num_sectors << 9);
+ }
+ 
+ /*
+@@ -374,12 +371,12 @@ static void do_viodasd_request(struct request_queue *q)
+ 		blkdev_dequeue_request(req);
+ 		/* check that request contains a valid command */
+ 		if (!blk_fs_request(req)) {
+-			viodasd_end_request(req, 0, req->hard_nr_sectors);
++			viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ 			continue;
+ 		}
+ 		/* Try sending the request */
+ 		if (send_request(req) != 0)
+-			viodasd_end_request(req, 0, req->hard_nr_sectors);
++			viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ 	}
+ }
+ 
+@@ -591,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
+ 	num_req_outstanding--;
+ 	spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
+ 
+-	error = event->xRc != HvLpEvent_Rc_Good;
++	error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
+ 	if (error) {
+ 		const struct vio_error_entry *err;
+ 		err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
+@@ -601,7 +598,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
+ 	}
+ 	qlock = req->q->queue_lock;
+ 	spin_lock_irqsave(qlock, irq_flags);
+-	viodasd_end_request(req, !error, num_sect);
++	viodasd_end_request(req, error, num_sect);
+ 	spin_unlock_irqrestore(qlock, irq_flags);
+ 
+ 	/* Finally, try to get more requests off of this device's queue */
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 2bdebcb..8afce67 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -452,7 +452,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+ 	RING_IDX i, rp;
+ 	unsigned long flags;
+ 	struct blkfront_info *info = (struct blkfront_info *)dev_id;
+-	int uptodate;
++	int error;
+ 
+ 	spin_lock_irqsave(&blkif_io_lock, flags);
+ 
+@@ -477,13 +477,13 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+ 
+ 		add_id_to_freelist(info, id);
+ 
+-		uptodate = (bret->status == BLKIF_RSP_OKAY);
++		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ 		switch (bret->operation) {
+ 		case BLKIF_OP_WRITE_BARRIER:
+ 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ 				printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
+ 				       info->gd->disk_name);
+-				uptodate = -EOPNOTSUPP;
++				error = -EOPNOTSUPP;
+ 				info->feature_barrier = 0;
+ 				xlvbd_barrier(info);
+ 			}
+@@ -494,10 +494,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+ 				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
+ 					"request: %x\n", bret->status);
+ 
+-			ret = end_that_request_first(req, uptodate,
+-				req->hard_nr_sectors);
++			ret = __blk_end_request(req, error, blk_rq_bytes(req));
+ 			BUG_ON(ret);
+-			end_that_request_last(req, uptodate);
+ 			break;
+ 		default:
+ 			BUG();
+diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
+index 82effce..78ebfff 100644
+--- a/drivers/block/xsysace.c
++++ b/drivers/block/xsysace.c
+@@ -483,7 +483,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
+ 	u32 status;
+ 	u16 val;
+ 	int count;
+-	int i;
+ 
+ #if defined(DEBUG)
+ 	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
+@@ -688,7 +687,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
+ 		}
+ 
+ 		/* Transfer the next buffer */
+-		i = 16;
+ 		if (ace->fsm_task == ACE_TASK_WRITE)
+ 			ace->reg_ops->dataout(ace);
+ 		else
+@@ -702,8 +700,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
+ 		}
+ 
+ 		/* bio finished; is there another one? */
+-		i = ace->req->current_nr_sectors;
+-		if (end_that_request_first(ace->req, 1, i)) {
++		if (__blk_end_request(ace->req, 0,
++					blk_rq_cur_bytes(ace->req))) {
+ 			/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
+ 			 *      ace->req->hard_nr_sectors,
+ 			 *      ace->req->current_nr_sectors);
+@@ -718,9 +716,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
+ 		break;
+ 
+ 	case ACE_FSM_STATE_REQ_COMPLETE:
+-		/* Complete the block request */
+-		blkdev_dequeue_request(ace->req);
+-		end_that_request_last(ace->req, 1);
+ 		ace->req = NULL;
+ 
+ 		/* Finished request; go to idle state */
+diff --git a/drivers/cdrom/Makefile b/drivers/cdrom/Makefile
+index 774c180..ecf85fd 100644
+--- a/drivers/cdrom/Makefile
++++ b/drivers/cdrom/Makefile
+@@ -11,3 +11,4 @@ obj-$(CONFIG_PARIDE_PCD)	+=		cdrom.o
+ obj-$(CONFIG_CDROM_PKTCDVD)	+=		cdrom.o
+ 
+ obj-$(CONFIG_VIOCD)		+= viocd.o      cdrom.o
++obj-$(CONFIG_GDROM)		+= gdrom.o      cdrom.o
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
 new file mode 100644
-index 0000000..1fa4e4d
+index 0000000..4e2bbcc
 --- /dev/null
-+++ b/crypto/salsa20_generic.c
-@@ -0,0 +1,255 @@
-+/*
-+ * Salsa20: Salsa20 stream cipher algorithm
-+ *
-+ * Copyright (c) 2007 Tan Swee Heng <thesweeheng at gmail.com>
++++ b/drivers/cdrom/gdrom.c
+@@ -0,0 +1,867 @@
++/* GD ROM driver for the SEGA Dreamcast
++ * copyright Adrian McMenamin, 2007
++ * With thanks to Marcus Comstedt and Nathan Keynes
++ * for work in reversing PIO and DMA
 + *
-+ * Derived from:
-+ * - salsa20.c: Public domain C code by Daniel J. Bernstein <djb at cr.yp.to>
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
 + *
-+ * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream
-+ * Cipher Project. It is designed by Daniel J. Bernstein <djb at cr.yp.to>.
-+ * More information about eSTREAM and Salsa20 can be found here:
-+ *   http://www.ecrypt.eu.org/stream/
-+ *   http://cr.yp.to/snuffle.html
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
 + *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
-+ * any later version.
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 + *
 + */
 +
 +#include <linux/init.h>
 +#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/crypto.h>
-+#include <linux/types.h>
-+#include <crypto/algapi.h>
-+#include <asm/byteorder.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/dma-mapping.h>
++#include <linux/cdrom.h>
++#include <linux/genhd.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/interrupt.h>
++#include <linux/device.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++#include <linux/platform_device.h>
++#include <scsi/scsi.h>
++#include <asm/io.h>
++#include <asm/dma.h>
++#include <asm/delay.h>
++#include <asm/mach/dma.h>
++#include <asm/mach/sysasic.h>
 +
-+#define SALSA20_IV_SIZE        8U
-+#define SALSA20_MIN_KEY_SIZE  16U
-+#define SALSA20_MAX_KEY_SIZE  32U
++#define GDROM_DEV_NAME "gdrom"
++#define GD_SESSION_OFFSET 150
 +
-+/*
-+ * Start of code taken from D. J. Bernstein's reference implementation.
-+ * With some modifications and optimizations made to suit our needs.
-+ */
++/* GD Rom commands */
++#define GDROM_COM_SOFTRESET 0x08
++#define GDROM_COM_EXECDIAG 0x90
++#define GDROM_COM_PACKET 0xA0
++#define GDROM_COM_IDDEV 0xA1
 +
-+/*
-+salsa20-ref.c version 20051118
-+D. J. Bernstein
-+Public domain.
-+*/
++/* GD Rom registers */
++#define GDROM_BASE_REG			0xA05F7000
++#define GDROM_ALTSTATUS_REG		(GDROM_BASE_REG + 0x18)
++#define GDROM_DATA_REG			(GDROM_BASE_REG + 0x80)
++#define GDROM_ERROR_REG		(GDROM_BASE_REG + 0x84)
++#define GDROM_INTSEC_REG		(GDROM_BASE_REG + 0x88)
++#define GDROM_SECNUM_REG		(GDROM_BASE_REG + 0x8C)
++#define GDROM_BCL_REG			(GDROM_BASE_REG + 0x90)
++#define GDROM_BCH_REG			(GDROM_BASE_REG + 0x94)
++#define GDROM_DSEL_REG			(GDROM_BASE_REG + 0x98)
++#define GDROM_STATUSCOMMAND_REG	(GDROM_BASE_REG + 0x9C)
++#define GDROM_RESET_REG		(GDROM_BASE_REG + 0x4E4)
 +
-+#define ROTATE(v,n) (((v) << (n)) | ((v) >> (32 - (n))))
-+#define XOR(v,w) ((v) ^ (w))
-+#define PLUS(v,w) (((v) + (w)))
-+#define PLUSONE(v) (PLUS((v),1))
-+#define U32TO8_LITTLE(p, v) \
-+	{ (p)[0] = (v >>  0) & 0xff; (p)[1] = (v >>  8) & 0xff; \
-+	  (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; }
-+#define U8TO32_LITTLE(p)   \
-+	(((u32)((p)[0])      ) | ((u32)((p)[1]) <<  8) | \
-+	 ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24)   )
++#define GDROM_DMA_STARTADDR_REG	(GDROM_BASE_REG + 0x404)
++#define GDROM_DMA_LENGTH_REG		(GDROM_BASE_REG + 0x408)
++#define GDROM_DMA_DIRECTION_REG	(GDROM_BASE_REG + 0x40C)
++#define GDROM_DMA_ENABLE_REG		(GDROM_BASE_REG + 0x414)
++#define GDROM_DMA_STATUS_REG		(GDROM_BASE_REG + 0x418)
++#define GDROM_DMA_WAIT_REG		(GDROM_BASE_REG + 0x4A0)
++#define GDROM_DMA_ACCESS_CTRL_REG	(GDROM_BASE_REG + 0x4B8)
 +
-+struct salsa20_ctx
-+{
-+	u32 input[16];
++#define GDROM_HARD_SECTOR	2048
++#define BLOCK_LAYER_SECTOR	512
++#define GD_TO_BLK		4
++
++#define GDROM_DEFAULT_TIMEOUT	(HZ * 7)
++
++static const struct {
++	int sense_key;
++	const char * const text;
++} sense_texts[] = {
++	{NO_SENSE, "OK"},
++	{RECOVERED_ERROR, "Recovered from error"},
++	{NOT_READY, "Device not ready"},
++	{MEDIUM_ERROR, "Disk not ready"},
++	{HARDWARE_ERROR, "Hardware error"},
++	{ILLEGAL_REQUEST, "Command has failed"},
++	{UNIT_ATTENTION, "Device needs attention - disk may have been changed"},
++	{DATA_PROTECT, "Data protection error"},
++	{ABORTED_COMMAND, "Command aborted"},
 +};
 +
-+static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
-+{
-+	u32 x[16];
-+	int i;
++static struct platform_device *pd;
++static int gdrom_major;
++static DECLARE_WAIT_QUEUE_HEAD(command_queue);
++static DECLARE_WAIT_QUEUE_HEAD(request_queue);
 +
-+	memcpy(x, input, sizeof(x));
-+	for (i = 20; i > 0; i -= 2) {
-+		x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 0],x[12]), 7));
-+		x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[ 4],x[ 0]), 9));
-+		x[12] = XOR(x[12],ROTATE(PLUS(x[ 8],x[ 4]),13));
-+		x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[12],x[ 8]),18));
-+		x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 5],x[ 1]), 7));
-+		x[13] = XOR(x[13],ROTATE(PLUS(x[ 9],x[ 5]), 9));
-+		x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[13],x[ 9]),13));
-+		x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 1],x[13]),18));
-+		x[14] = XOR(x[14],ROTATE(PLUS(x[10],x[ 6]), 7));
-+		x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[14],x[10]), 9));
-+		x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 2],x[14]),13));
-+		x[10] = XOR(x[10],ROTATE(PLUS(x[ 6],x[ 2]),18));
-+		x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[15],x[11]), 7));
-+		x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 3],x[15]), 9));
-+		x[11] = XOR(x[11],ROTATE(PLUS(x[ 7],x[ 3]),13));
-+		x[15] = XOR(x[15],ROTATE(PLUS(x[11],x[ 7]),18));
-+		x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[ 0],x[ 3]), 7));
-+		x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[ 1],x[ 0]), 9));
-+		x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[ 2],x[ 1]),13));
-+		x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[ 3],x[ 2]),18));
-+		x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 5],x[ 4]), 7));
-+		x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 6],x[ 5]), 9));
-+		x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 7],x[ 6]),13));
-+		x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 4],x[ 7]),18));
-+		x[11] = XOR(x[11],ROTATE(PLUS(x[10],x[ 9]), 7));
-+		x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[11],x[10]), 9));
-+		x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 8],x[11]),13));
-+		x[10] = XOR(x[10],ROTATE(PLUS(x[ 9],x[ 8]),18));
-+		x[12] = XOR(x[12],ROTATE(PLUS(x[15],x[14]), 7));
-+		x[13] = XOR(x[13],ROTATE(PLUS(x[12],x[15]), 9));
-+		x[14] = XOR(x[14],ROTATE(PLUS(x[13],x[12]),13));
-+		x[15] = XOR(x[15],ROTATE(PLUS(x[14],x[13]),18));
-+	}
-+	for (i = 0; i < 16; ++i)
-+		x[i] = PLUS(x[i],input[i]);
-+	for (i = 0; i < 16; ++i)
-+		U32TO8_LITTLE(output + 4 * i,x[i]);
-+}
++static DEFINE_SPINLOCK(gdrom_lock);
++static void gdrom_readdisk_dma(struct work_struct *work);
++static DECLARE_WORK(work, gdrom_readdisk_dma);
++static LIST_HEAD(gdrom_deferred);
 +
-+static const char sigma[16] = "expand 32-byte k";
-+static const char tau[16] = "expand 16-byte k";
++struct gdromtoc {
++	unsigned int entry[99];
++	unsigned int first, last;
++	unsigned int leadout;
++};
 +
-+static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
++static struct gdrom_unit {
++	struct gendisk *disk;
++	struct cdrom_device_info *cd_info;
++	int status;
++	int pending;
++	int transfer;
++	char disk_type;
++	struct gdromtoc *toc;
++	struct request_queue *gdrom_rq;
++} gd;
++
++struct gdrom_id {
++	char mid;
++	char modid;
++	char verid;
++	char padA[13];
++	char mname[16];
++	char modname[16];
++	char firmver[16];
++	char padB[16];
++};
++
++static int gdrom_getsense(short *bufstring);
++static int gdrom_packetcommand(struct cdrom_device_info *cd_info,
++	struct packet_command *command);
++static int gdrom_hardreset(struct cdrom_device_info *cd_info);
++
++static bool gdrom_is_busy(void)
 +{
-+	const char *constants;
++	return (ctrl_inb(GDROM_ALTSTATUS_REG) & 0x80) != 0;
++}
 +
-+	ctx->input[1] = U8TO32_LITTLE(k + 0);
-+	ctx->input[2] = U8TO32_LITTLE(k + 4);
-+	ctx->input[3] = U8TO32_LITTLE(k + 8);
-+	ctx->input[4] = U8TO32_LITTLE(k + 12);
-+	if (kbytes == 32) { /* recommended */
-+		k += 16;
-+		constants = sigma;
-+	} else { /* kbytes == 16 */
-+		constants = tau;
-+	}
-+	ctx->input[11] = U8TO32_LITTLE(k + 0);
-+	ctx->input[12] = U8TO32_LITTLE(k + 4);
-+	ctx->input[13] = U8TO32_LITTLE(k + 8);
-+	ctx->input[14] = U8TO32_LITTLE(k + 12);
-+	ctx->input[0] = U8TO32_LITTLE(constants + 0);
-+	ctx->input[5] = U8TO32_LITTLE(constants + 4);
-+	ctx->input[10] = U8TO32_LITTLE(constants + 8);
-+	ctx->input[15] = U8TO32_LITTLE(constants + 12);
++static bool gdrom_data_request(void)
++{
++	return (ctrl_inb(GDROM_ALTSTATUS_REG) & 0x88) == 8;
 +}
 +
-+static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
++static bool gdrom_wait_clrbusy(void)
 +{
-+	ctx->input[6] = U8TO32_LITTLE(iv + 0);
-+	ctx->input[7] = U8TO32_LITTLE(iv + 4);
-+	ctx->input[8] = 0;
-+	ctx->input[9] = 0;
++	unsigned long timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
++	while ((ctrl_inb(GDROM_ALTSTATUS_REG) & 0x80) &&
++		(time_before(jiffies, timeout)))
++		cpu_relax();
++	return time_before(jiffies, timeout + 1);
 +}
 +
-+static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
-+				  const u8 *src, unsigned int bytes)
++static bool gdrom_wait_busy_sleeps(void)
 +{
-+	u8 buf[64];
++	unsigned long timeout;
++	/* Wait to get busy first */
++	timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
++	while (!gdrom_is_busy() && time_before(jiffies, timeout))
++		cpu_relax();
++	/* Now wait for busy to clear */
++	return gdrom_wait_clrbusy();
++}
 +
-+	if (dst != src)
-+		memcpy(dst, src, bytes);
++static void gdrom_identifydevice(void *buf)
++{
++	int c;
++	short *data = buf;
++	/* If the device won't clear it has probably
++	* been hit by a serious failure - but we'll
++	* try to return a sense key even so */
++	if (!gdrom_wait_clrbusy()) {
++		gdrom_getsense(NULL);
++		return;
++	}
++	ctrl_outb(GDROM_COM_IDDEV, GDROM_STATUSCOMMAND_REG);
++	if (!gdrom_wait_busy_sleeps()) {
++		gdrom_getsense(NULL);
++		return;
++	}
++	/* now read in the data */
++	for (c = 0; c < 40; c++)
++		data[c] = ctrl_inw(GDROM_DATA_REG);
++}
 +
-+	while (bytes) {
-+		salsa20_wordtobyte(buf, ctx->input);
++static void gdrom_spicommand(void *spi_string, int buflen)
++{
++	short *cmd = spi_string;
++	unsigned long timeout;
 +
-+		ctx->input[8] = PLUSONE(ctx->input[8]);
-+		if (!ctx->input[8])
-+			ctx->input[9] = PLUSONE(ctx->input[9]);
++	/* ensure IRQ_WAIT is set */
++	ctrl_outb(0x08, GDROM_ALTSTATUS_REG);
++	/* specify how many bytes we expect back */
++	ctrl_outb(buflen & 0xFF, GDROM_BCL_REG);
++	ctrl_outb((buflen >> 8) & 0xFF, GDROM_BCH_REG);
++	/* other parameters */
++	ctrl_outb(0, GDROM_INTSEC_REG);
++	ctrl_outb(0, GDROM_SECNUM_REG);
++	ctrl_outb(0, GDROM_ERROR_REG);
++	/* Wait until we can go */
++	if (!gdrom_wait_clrbusy()) {
++		gdrom_getsense(NULL);
++		return;
++	}
++	timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
++	ctrl_outb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
++	while (!gdrom_data_request() && time_before(jiffies, timeout))
++		cpu_relax();
++	if (!time_before(jiffies, timeout + 1)) {
++		gdrom_getsense(NULL);
++		return;
++	}
++	outsw(PHYSADDR(GDROM_DATA_REG), cmd, 6);
++}
 +
-+		if (bytes <= 64) {
-+			crypto_xor(dst, buf, bytes);
-+			return;
-+		}
 +
-+		crypto_xor(dst, buf, 64);
-+		bytes -= 64;
-+		dst += 64;
-+	}
++/* gdrom_command_executediagnostic:
++ * Used to probe for presence of working GDROM
++ * Restarts GDROM device and then applies standard ATA 3
++ * Execute Diagnostic Command: a return of '1' indicates device 0
++ * present and device 1 absent
++ */
++static char gdrom_execute_diagnostic(void)
++{
++	gdrom_hardreset(gd.cd_info);
++	if (!gdrom_wait_clrbusy())
++		return 0;
++	ctrl_outb(GDROM_COM_EXECDIAG, GDROM_STATUSCOMMAND_REG);
++	if (!gdrom_wait_busy_sleeps())
++		return 0;
++	return ctrl_inb(GDROM_ERROR_REG);
 +}
 +
 +/*
-+ * End of code taken from D. J. Bernstein's reference implementation.
++ * Prepare disk command
++ * byte 0 = 0x70
++ * byte 1 = 0x1f
 + */
-+
-+static int setkey(struct crypto_tfm *tfm, const u8 *key,
-+		  unsigned int keysize)
++static int gdrom_preparedisk_cmd(void)
 +{
-+	struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
-+	salsa20_keysetup(ctx, key, keysize);
++	struct packet_command *spin_command;
++	spin_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
++	if (!spin_command)
++		return -ENOMEM;
++	spin_command->cmd[0] = 0x70;
++	spin_command->cmd[2] = 0x1f;
++	spin_command->buflen = 0;
++	gd.pending = 1;
++	gdrom_packetcommand(gd.cd_info, spin_command);
++	/* 60 second timeout */
++	wait_event_interruptible_timeout(command_queue, gd.pending == 0,
++		GDROM_DEFAULT_TIMEOUT);
++	gd.pending = 0;
++	kfree(spin_command);
++	if (gd.status & 0x01) {
++		/* log an error */
++		gdrom_getsense(NULL);
++		return -EIO;
++	}
 +	return 0;
 +}
 +
-+static int encrypt(struct blkcipher_desc *desc,
-+		   struct scatterlist *dst, struct scatterlist *src,
-+		   unsigned int nbytes)
++/*
++ * Read TOC command
++ * byte 0 = 0x14
++ * byte 1 = session
++ * byte 3 = sizeof TOC >> 8  ie upper byte
++ * byte 4 = sizeof TOC & 0xff ie lower byte
++ */
++static int gdrom_readtoc_cmd(struct gdromtoc *toc, int session)
 +{
-+	struct blkcipher_walk walk;
-+	struct crypto_blkcipher *tfm = desc->tfm;
-+	struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
-+	int err;
-+
-+	blkcipher_walk_init(&walk, dst, src, nbytes);
-+	err = blkcipher_walk_virt_block(desc, &walk, 64);
-+
-+	salsa20_ivsetup(ctx, walk.iv);
-+
-+	if (likely(walk.nbytes == nbytes))
-+	{
-+		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
-+				      walk.src.virt.addr, nbytes);
-+		return blkcipher_walk_done(desc, &walk, 0);
-+	}
++	int tocsize;
++	struct packet_command *toc_command;
++	int err = 0;
 +
-+	while (walk.nbytes >= 64) {
-+		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
-+				      walk.src.virt.addr,
-+				      walk.nbytes - (walk.nbytes % 64));
-+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
++	toc_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
++	if (!toc_command)
++		return -ENOMEM;
++	tocsize = sizeof(struct gdromtoc);
++	toc_command->cmd[0] = 0x14;
++	toc_command->cmd[1] = session;
++	toc_command->cmd[3] = tocsize >> 8;
++	toc_command->cmd[4] = tocsize & 0xff;
++	toc_command->buflen = tocsize;
++	if (gd.pending) {
++		err = -EBUSY;
++		goto cleanup_readtoc_final;
 +	}
-+
-+	if (walk.nbytes) {
-+		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
-+				      walk.src.virt.addr, walk.nbytes);
-+		err = blkcipher_walk_done(desc, &walk, 0);
++	gd.pending = 1;
++	gdrom_packetcommand(gd.cd_info, toc_command);
++	wait_event_interruptible_timeout(command_queue, gd.pending == 0,
++		GDROM_DEFAULT_TIMEOUT);
++	if (gd.pending) {
++		err = -EINVAL;
++		goto cleanup_readtoc;
 +	}
++	insw(PHYSADDR(GDROM_DATA_REG), toc, tocsize/2);
++	if (gd.status & 0x01)
++		err = -EINVAL;
 +
++cleanup_readtoc:
++	gd.pending = 0;
++cleanup_readtoc_final:
++	kfree(toc_command);
 +	return err;
 +}
 +
-+static struct crypto_alg alg = {
-+	.cra_name           =   "salsa20",
-+	.cra_driver_name    =   "salsa20-generic",
-+	.cra_priority       =   100,
-+	.cra_flags          =   CRYPTO_ALG_TYPE_BLKCIPHER,
-+	.cra_type           =   &crypto_blkcipher_type,
-+	.cra_blocksize      =   1,
-+	.cra_ctxsize        =   sizeof(struct salsa20_ctx),
-+	.cra_alignmask      =	3,
-+	.cra_module         =   THIS_MODULE,
-+	.cra_list           =   LIST_HEAD_INIT(alg.cra_list),
-+	.cra_u              =   {
-+		.blkcipher = {
-+			.setkey         =   setkey,
-+			.encrypt        =   encrypt,
-+			.decrypt        =   encrypt,
-+			.min_keysize    =   SALSA20_MIN_KEY_SIZE,
-+			.max_keysize    =   SALSA20_MAX_KEY_SIZE,
-+			.ivsize         =   SALSA20_IV_SIZE,
-+		}
-+	}
-+};
-+
-+static int __init init(void)
++/* TOC helpers */
++static int get_entry_lba(int track)
 +{
-+	return crypto_register_alg(&alg);
++	return (cpu_to_be32(track & 0xffffff00) - GD_SESSION_OFFSET);
 +}
 +
-+static void __exit fini(void)
++static int get_entry_q_ctrl(int track)
 +{
-+	crypto_unregister_alg(&alg);
++	return (track & 0x000000f0) >> 4;
 +}
 +
-+module_init(init);
-+module_exit(fini);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
-+MODULE_ALIAS("salsa20");
-diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
-index b9bbda0..9aeeb52 100644
---- a/crypto/scatterwalk.c
-+++ b/crypto/scatterwalk.c
-@@ -13,6 +13,8 @@
-  * any later version.
-  *
-  */
-+
-+#include <crypto/scatterwalk.h>
- #include <linux/kernel.h>
- #include <linux/mm.h>
- #include <linux/module.h>
-@@ -20,9 +22,6 @@
- #include <linux/highmem.h>
- #include <linux/scatterlist.h>
- 
--#include "internal.h"
--#include "scatterwalk.h"
--
- static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
- {
- 	void *src = out ? buf : sgdata;
-@@ -106,6 +105,9 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
- 	struct scatter_walk walk;
- 	unsigned int offset = 0;
- 
-+	if (!nbytes)
-+		return;
-+
- 	for (;;) {
- 		scatterwalk_start(&walk, sg);
- 
-@@ -113,7 +115,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
- 			break;
- 
- 		offset += sg->length;
--		sg = sg_next(sg);
-+		sg = scatterwalk_sg_next(sg);
- 	}
- 
- 	scatterwalk_advance(&walk, start - offset);
-diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h
-deleted file mode 100644
-index 87ed681..0000000
---- a/crypto/scatterwalk.h
-+++ /dev/null
-@@ -1,80 +0,0 @@
--/*
-- * Cryptographic API.
-- *
-- * Copyright (c) 2002 James Morris <jmorris at intercode.com.au>
-- * Copyright (c) 2002 Adam J. Richter <adam at yggdrasil.com>
-- * Copyright (c) 2004 Jean-Luc Cooke <jlcooke at certainkey.com>
-- *
-- * This program is free software; you can redistribute it and/or modify it
-- * under the terms of the GNU General Public License as published by the Free
-- * Software Foundation; either version 2 of the License, or (at your option)
-- * any later version.
-- *
-- */
--
--#ifndef _CRYPTO_SCATTERWALK_H
--#define _CRYPTO_SCATTERWALK_H
--
--#include <linux/mm.h>
--#include <linux/scatterlist.h>
--
--#include "internal.h"
--
--static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
--{
--	return (++sg)->length ? sg : (void *) sg_page(sg);
--}
--
--static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
--						struct scatter_walk *walk_out)
--{
--	return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
--		 (int)(walk_in->offset - walk_out->offset));
--}
--
--static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
--{
--	unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
--	unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
--	return len_this_page > len ? len : len_this_page;
--}
--
--static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
--					     unsigned int nbytes)
--{
--	unsigned int len_this_page = scatterwalk_pagelen(walk);
--	return nbytes > len_this_page ? len_this_page : nbytes;
--}
--
--static inline void scatterwalk_advance(struct scatter_walk *walk,
--				       unsigned int nbytes)
--{
--	walk->offset += nbytes;
--}
--
--static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
--					       unsigned int alignmask)
--{
--	return !(walk->offset & alignmask);
--}
--
--static inline struct page *scatterwalk_page(struct scatter_walk *walk)
--{
--	return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
--}
--
--static inline void scatterwalk_unmap(void *vaddr, int out)
--{
--	crypto_kunmap(vaddr, out);
--}
--
--void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
--void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
--			    size_t nbytes, int out);
--void *scatterwalk_map(struct scatter_walk *walk, int out);
--void scatterwalk_done(struct scatter_walk *walk, int out, int more);
--
--void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
--			      unsigned int start, unsigned int nbytes, int out);
--
--#endif  /* _CRYPTO_SCATTERWALK_H */
-diff --git a/crypto/seqiv.c b/crypto/seqiv.c
-new file mode 100644
-index 0000000..b903aab
---- /dev/null
-+++ b/crypto/seqiv.c
-@@ -0,0 +1,345 @@
-+/*
-+ * seqiv: Sequence Number IV Generator
-+ *
-+ * This generator generates an IV based on a sequence number by xoring it
-+ * with a salt.  This algorithm is mainly useful for CTR and similar modes.
-+ *
-+ * Copyright (c) 2007 Herbert Xu <herbert at gondor.apana.org.au>
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
-+ * any later version.
-+ *
-+ */
-+
-+#include <crypto/internal/aead.h>
-+#include <crypto/internal/skcipher.h>
-+#include <linux/err.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/random.h>
-+#include <linux/spinlock.h>
-+#include <linux/string.h>
-+
-+struct seqiv_ctx {
-+	spinlock_t lock;
-+	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
-+};
-+
-+static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
++static int get_entry_track(int track)
 +{
-+	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-+	struct crypto_ablkcipher *geniv;
++	return (track & 0x0000ff00) >> 8;
++}
 +
-+	if (err == -EINPROGRESS)
-+		return;
++static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
++	struct cdrom_multisession *ms_info)
++{
++	int fentry, lentry, track, data, tocuse, err;
++	if (!gd.toc)
++		return -ENOMEM;
++	tocuse = 1;
++	/* Check if GD-ROM */
++	err = gdrom_readtoc_cmd(gd.toc, 1);
++	/* Not a GD-ROM so check if standard CD-ROM */
++	if (err) {
++		tocuse = 0;
++		err = gdrom_readtoc_cmd(gd.toc, 0);
++		if (err) {
++			printk(KERN_INFO "GDROM: Could not get CD "
++				"table of contents\n");
++			return -ENXIO;
++		}
++	}
 +
-+	if (err)
-+		goto out;
++	fentry = get_entry_track(gd.toc->first);
++	lentry = get_entry_track(gd.toc->last);
++	/* Find the first data track */
++	track = get_entry_track(gd.toc->last);
++	do {
++		data = gd.toc->entry[track - 1];
++		if (get_entry_q_ctrl(data))
++			break;	/* ie a real data track */
++		track--;
++	} while (track >= fentry);
 +
-+	geniv = skcipher_givcrypt_reqtfm(req);
-+	memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
++	if ((track > 100) || (track < get_entry_track(gd.toc->first))) {
++		printk(KERN_INFO "GDROM: No data on the last "
++			"session of the CD\n");
++		gdrom_getsense(NULL);
++		return -ENXIO;
++	}
 +
-+out:
-+	kfree(subreq->info);
++	ms_info->addr_format = CDROM_LBA;
++	ms_info->addr.lba = get_entry_lba(data);
++	ms_info->xa_flag = 1;
++	return 0;
 +}
 +
-+static void seqiv_complete(struct crypto_async_request *base, int err)
++static int gdrom_open(struct cdrom_device_info *cd_info, int purpose)
 +{
-+	struct skcipher_givcrypt_request *req = base->data;
-+
-+	seqiv_complete2(req, err);
-+	skcipher_givcrypt_complete(req, err);
++	/* spin up the disk */
++	return gdrom_preparedisk_cmd();
 +}
 +
-+static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
++/* this function is required even if empty */
++static void gdrom_release(struct cdrom_device_info *cd_info)
 +{
-+	struct aead_request *subreq = aead_givcrypt_reqctx(req);
-+	struct crypto_aead *geniv;
-+
-+	if (err == -EINPROGRESS)
-+		return;
-+
-+	if (err)
-+		goto out;
-+
-+	geniv = aead_givcrypt_reqtfm(req);
-+	memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
-+
-+out:
-+	kfree(subreq->iv);
 +}
 +
-+static void seqiv_aead_complete(struct crypto_async_request *base, int err)
++static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
 +{
-+	struct aead_givcrypt_request *req = base->data;
-+
-+	seqiv_aead_complete2(req, err);
-+	aead_givcrypt_complete(req, err);
++	/* read the sense key */
++	char sense = ctrl_inb(GDROM_ERROR_REG);
++	sense &= 0xF0;
++	if (sense == 0)
++		return CDS_DISC_OK;
++	if (sense == 0x20)
++		return CDS_DRIVE_NOT_READY;
++	/* default */
++	return CDS_NO_INFO;
 +}
 +
-+static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
-+			unsigned int ivsize)
++static int gdrom_mediachanged(struct cdrom_device_info *cd_info, int ignore)
 +{
-+	unsigned int len = ivsize;
-+
-+	if (ivsize > sizeof(u64)) {
-+		memset(info, 0, ivsize - sizeof(u64));
-+		len = sizeof(u64);
-+	}
-+	seq = cpu_to_be64(seq);
-+	memcpy(info + ivsize - len, &seq, len);
-+	crypto_xor(info, ctx->salt, ivsize);
++	/* check the sense key */
++	return (ctrl_inb(GDROM_ERROR_REG) & 0xF0) == 0x60;
 +}
 +
-+static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
++/* reset the G1 bus */
++static int gdrom_hardreset(struct cdrom_device_info *cd_info)
 +{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-+	crypto_completion_t complete;
-+	void *data;
-+	u8 *info;
-+	unsigned int ivsize;
-+	int err;
-+
-+	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
-+
-+	complete = req->creq.base.complete;
-+	data = req->creq.base.data;
-+	info = req->creq.info;
++	int count;
++	ctrl_outl(0x1fffff, GDROM_RESET_REG);
++	for (count = 0xa0000000; count < 0xa0200000; count += 4)
++		ctrl_inl(count);
++	return 0;
++}
 +
-+	ivsize = crypto_ablkcipher_ivsize(geniv);
++/* keep the function looking like the universal
++ * CD Rom specification  - returning int */
++static int gdrom_packetcommand(struct cdrom_device_info *cd_info,
++	struct packet_command *command)
++{
++	gdrom_spicommand(&command->cmd, command->buflen);
++	return 0;
++}
 +
-+	if (unlikely(!IS_ALIGNED((unsigned long)info,
-+				 crypto_ablkcipher_alignmask(geniv) + 1))) {
-+		info = kmalloc(ivsize, req->creq.base.flags &
-+				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-+								  GFP_ATOMIC);
-+		if (!info)
-+			return -ENOMEM;
++/* Get Sense SPI command
++ * From Marcus Comstedt
++ * cmd = 0x13
++ * cmd + 4 = length of returned buffer
++ * Returns 5 16 bit words
++ */
++static int gdrom_getsense(short *bufstring)
++{
++	struct packet_command *sense_command;
++	short sense[5];
++	int sense_key;
++	int err = -EIO;
 +
-+		complete = seqiv_complete;
-+		data = req;
++	sense_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
++	if (!sense_command)
++		return -ENOMEM;
++	sense_command->cmd[0] = 0x13;
++	sense_command->cmd[4] = 10;
++	sense_command->buflen = 10;
++	/* even if something is pending try to get
++	* the sense key if possible */
++	if (gd.pending && !gdrom_wait_clrbusy()) {
++		err = -EBUSY;
++		goto cleanup_sense_final;
 +	}
++	gd.pending = 1;
++	gdrom_packetcommand(gd.cd_info, sense_command);
++	wait_event_interruptible_timeout(command_queue, gd.pending == 0,
++		GDROM_DEFAULT_TIMEOUT);
++	if (gd.pending)
++		goto cleanup_sense;
++	insw(PHYSADDR(GDROM_DATA_REG), &sense, sense_command->buflen/2);
++	if (sense[1] & 40) {
++		printk(KERN_INFO "GDROM: Drive not ready - command aborted\n");
++		goto cleanup_sense;
++	}
++	sense_key = sense[1] & 0x0F;
++	if (sense_key < ARRAY_SIZE(sense_texts))
++		printk(KERN_INFO "GDROM: %s\n", sense_texts[sense_key].text);
++	else
++		printk(KERN_ERR "GDROM: Unknown sense key: %d\n", sense_key);
++	if (bufstring) /* return addional sense data */
++		memcpy(bufstring, &sense[4], 2);
++	if (sense_key < 2)
++		err = 0;
 +
-+	ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
-+					data);
-+	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
-+				     req->creq.nbytes, info);
-+
-+	seqiv_geniv(ctx, info, req->seq, ivsize);
-+	memcpy(req->giv, info, ivsize);
-+
-+	err = crypto_ablkcipher_encrypt(subreq);
-+	if (unlikely(info != req->creq.info))
-+		seqiv_complete2(req, err);
++cleanup_sense:
++	gd.pending = 0;
++cleanup_sense_final:
++	kfree(sense_command);
 +	return err;
 +}
 +
-+static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
++static struct cdrom_device_ops gdrom_ops = {
++	.open			= gdrom_open,
++	.release		= gdrom_release,
++	.drive_status		= gdrom_drivestatus,
++	.media_changed		= gdrom_mediachanged,
++	.get_last_session	= gdrom_get_last_session,
++	.reset			= gdrom_hardreset,
++	.capability		= CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
++				  CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
++	.n_minors		= 1,
++};
++
++static int gdrom_bdops_open(struct inode *inode, struct file *file)
 +{
-+	struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
-+	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-+	struct aead_request *areq = &req->areq;
-+	struct aead_request *subreq = aead_givcrypt_reqctx(req);
-+	crypto_completion_t complete;
-+	void *data;
-+	u8 *info;
-+	unsigned int ivsize;
-+	int err;
++	return cdrom_open(gd.cd_info, inode, file);
++}
 +
-+	aead_request_set_tfm(subreq, aead_geniv_base(geniv));
++static int gdrom_bdops_release(struct inode *inode, struct file *file)
++{
++	return cdrom_release(gd.cd_info, file);
++}
 +
-+	complete = areq->base.complete;
-+	data = areq->base.data;
-+	info = areq->iv;
++static int gdrom_bdops_mediachanged(struct gendisk *disk)
++{
++	return cdrom_media_changed(gd.cd_info);
++}
 +
-+	ivsize = crypto_aead_ivsize(geniv);
++static int gdrom_bdops_ioctl(struct inode *inode, struct file *file,
++	unsigned cmd, unsigned long arg)
++{
++	return cdrom_ioctl(file, gd.cd_info, inode, cmd, arg);
++}
 +
-+	if (unlikely(!IS_ALIGNED((unsigned long)info,
-+				 crypto_aead_alignmask(geniv) + 1))) {
-+		info = kmalloc(ivsize, areq->base.flags &
-+				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-+								  GFP_ATOMIC);
-+		if (!info)
-+			return -ENOMEM;
++static struct block_device_operations gdrom_bdops = {
++	.owner			= THIS_MODULE,
++	.open			= gdrom_bdops_open,
++	.release		= gdrom_bdops_release,
++	.media_changed		= gdrom_bdops_mediachanged,
++	.ioctl			= gdrom_bdops_ioctl,
++};
 +
-+		complete = seqiv_aead_complete;
-+		data = req;
-+	}
++static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
++{
++	gd.status = ctrl_inb(GDROM_STATUSCOMMAND_REG);
++	if (gd.pending != 1)
++		return IRQ_HANDLED;
++	gd.pending = 0;
++	wake_up_interruptible(&command_queue);
++	return IRQ_HANDLED;
++}
 +
-+	aead_request_set_callback(subreq, areq->base.flags, complete, data);
-+	aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
-+			       info);
-+	aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
++static irqreturn_t gdrom_dma_interrupt(int irq, void *dev_id)
++{
++	gd.status = ctrl_inb(GDROM_STATUSCOMMAND_REG);
++	if (gd.transfer != 1)
++		return IRQ_HANDLED;
++	gd.transfer = 0;
++	wake_up_interruptible(&request_queue);
++	return IRQ_HANDLED;
++}
 +
-+	seqiv_geniv(ctx, info, req->seq, ivsize);
-+	memcpy(req->giv, info, ivsize);
++static int __devinit gdrom_set_interrupt_handlers(void)
++{
++	int err;
 +
-+	err = crypto_aead_encrypt(subreq);
-+	if (unlikely(info != areq->iv))
-+		seqiv_aead_complete2(req, err);
++	err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
++		IRQF_DISABLED, "gdrom_command", &gd);
++	if (err)
++		return err;
++	err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
++		IRQF_DISABLED, "gdrom_dma", &gd);
++	if (err)
++		free_irq(HW_EVENT_GDROM_CMD, &gd);
 +	return err;
 +}
 +
-+static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
++/* Implement DMA read using SPI command
++ * 0 -> 0x30
++ * 1 -> mode
++ * 2 -> block >> 16
++ * 3 -> block >> 8
++ * 4 -> block
++ * 8 -> sectors >> 16
++ * 9 -> sectors >> 8
++ * 10 -> sectors
++ */
++static void gdrom_readdisk_dma(struct work_struct *work)
 +{
-+	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
-+	struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+
-+	spin_lock_bh(&ctx->lock);
-+	if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first)
-+		goto unlock;
-+
-+	crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
-+	get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv));
-+
-+unlock:
-+	spin_unlock_bh(&ctx->lock);
++	int err, block, block_cnt;
++	struct packet_command *read_command;
++	struct list_head *elem, *next;
++	struct request *req;
++	unsigned long timeout;
 +
-+	return seqiv_givencrypt(req);
++	if (list_empty(&gdrom_deferred))
++		return;
++	read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
++	if (!read_command)
++		return; /* get more memory later? */
++	read_command->cmd[0] = 0x30;
++	read_command->cmd[1] = 0x20;
++	spin_lock(&gdrom_lock);
++	list_for_each_safe(elem, next, &gdrom_deferred) {
++		req = list_entry(elem, struct request, queuelist);
++		spin_unlock(&gdrom_lock);
++		block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET;
++		block_cnt = req->nr_sectors/GD_TO_BLK;
++		ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
++		ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
++		ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
++		ctrl_outl(1, GDROM_DMA_ENABLE_REG);
++		read_command->cmd[2] = (block >> 16) & 0xFF;
++		read_command->cmd[3] = (block >> 8) & 0xFF;
++		read_command->cmd[4] = block & 0xFF;
++		read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
++		read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
++		read_command->cmd[10] = block_cnt & 0xFF;
++		/* set for DMA */
++		ctrl_outb(1, GDROM_ERROR_REG);
++		/* other registers */
++		ctrl_outb(0, GDROM_SECNUM_REG);
++		ctrl_outb(0, GDROM_BCL_REG);
++		ctrl_outb(0, GDROM_BCH_REG);
++		ctrl_outb(0, GDROM_DSEL_REG);
++		ctrl_outb(0, GDROM_INTSEC_REG);
++		/* Wait for registers to reset after any previous activity */
++		timeout = jiffies + HZ / 2;
++		while (gdrom_is_busy() && time_before(jiffies, timeout))
++			cpu_relax();
++		ctrl_outb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
++		timeout = jiffies + HZ / 2;
++		/* Wait for packet command to finish */
++		while (gdrom_is_busy() && time_before(jiffies, timeout))
++			cpu_relax();
++		gd.pending = 1;
++		gd.transfer = 1;
++		outsw(PHYSADDR(GDROM_DATA_REG), &read_command->cmd, 6);
++		timeout = jiffies + HZ / 2;
++		/* Wait for any pending DMA to finish */
++		while (ctrl_inb(GDROM_DMA_STATUS_REG) &&
++			time_before(jiffies, timeout))
++			cpu_relax();
++		/* start transfer */
++		ctrl_outb(1, GDROM_DMA_STATUS_REG);
++		wait_event_interruptible_timeout(request_queue,
++			gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
++		err = gd.transfer;
++		gd.transfer = 0;
++		gd.pending = 0;
++		/* now seek to take the request spinlock
++		* before handling ending the request */
++		spin_lock(&gdrom_lock);
++		list_del_init(&req->queuelist);
++		end_dequeued_request(req, 1 - err);
++	}
++	spin_unlock(&gdrom_lock);
++	kfree(read_command);
 +}
 +
-+static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
++static void gdrom_request_handler_dma(struct request *req)
 +{
-+	struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
-+	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-+
-+	spin_lock_bh(&ctx->lock);
-+	if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
-+		goto unlock;
-+
-+	crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
-+	get_random_bytes(ctx->salt, crypto_aead_ivsize(geniv));
++	/* dequeue, add to list of deferred work
++	* and then schedule workqueue */
++	blkdev_dequeue_request(req);
++	list_add_tail(&req->queuelist, &gdrom_deferred);
++	schedule_work(&work);
++}
 +
-+unlock:
-+	spin_unlock_bh(&ctx->lock);
++static void gdrom_request(struct request_queue *rq)
++{
++	struct request *req;
 +
-+	return seqiv_aead_givencrypt(req);
++	while ((req = elv_next_request(rq)) != NULL) {
++		if (!blk_fs_request(req)) {
++			printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
++			end_request(req, 0);
++		}
++		if (rq_data_dir(req) != READ) {
++			printk(KERN_NOTICE "GDROM: Read only device -");
++			printk(" write request ignored\n");
++			end_request(req, 0);
++		}
++		if (req->nr_sectors)
++			gdrom_request_handler_dma(req);
++		else
++			end_request(req, 0);
++	}
 +}
 +
-+static int seqiv_init(struct crypto_tfm *tfm)
++/* Print string identifying GD ROM device */
++static int __devinit gdrom_outputversion(void)
 +{
-+	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
-+	struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
-+
-+	spin_lock_init(&ctx->lock);
-+
-+	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
++	struct gdrom_id *id;
++	char *model_name, *manuf_name, *firmw_ver;
++	int err = -ENOMEM;
 +
-+	return skcipher_geniv_init(tfm);
++	/* query device ID */
++	id = kzalloc(sizeof(struct gdrom_id), GFP_KERNEL);
++	if (!id)
++		return err;
++	gdrom_identifydevice(id);
++	model_name = kstrndup(id->modname, 16, GFP_KERNEL);
++	if (!model_name)
++		goto free_id;
++	manuf_name = kstrndup(id->mname, 16, GFP_KERNEL);
++	if (!manuf_name)
++		goto free_model_name;
++	firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL);
++	if (!firmw_ver)
++		goto free_manuf_name;
++	printk(KERN_INFO "GDROM: %s from %s with firmware %s\n",
++		model_name, manuf_name, firmw_ver);
++	err = 0;
++	kfree(firmw_ver);
++free_manuf_name:
++	kfree(manuf_name);
++free_model_name:
++	kfree(model_name);
++free_id:
++	kfree(id);
++	return err;
 +}
 +
-+static int seqiv_aead_init(struct crypto_tfm *tfm)
++/* set the default mode for DMA transfer */
++static int __devinit gdrom_init_dma_mode(void)
 +{
-+	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-+	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-+
-+	spin_lock_init(&ctx->lock);
-+
-+	tfm->crt_aead.reqsize = sizeof(struct aead_request);
-+
-+	return aead_geniv_init(tfm);
++	ctrl_outb(0x13, GDROM_ERROR_REG);
++	ctrl_outb(0x22, GDROM_INTSEC_REG);
++	if (!gdrom_wait_clrbusy())
++		return -EBUSY;
++	ctrl_outb(0xEF, GDROM_STATUSCOMMAND_REG);
++	if (!gdrom_wait_busy_sleeps())
++		return -EBUSY;
++	/* Memory protection setting for GDROM DMA
++	* Bits 31 - 16 security: 0x8843
++	* Bits 15 and 7 reserved (0)
++	* Bits 14 - 8 start of transfer range in 1 MB blocks OR'ed with 0x80
++	* Bits 6 - 0 end of transfer range in 1 MB blocks OR'ed with 0x80
++	* (0x40 | 0x80) = start range at 0x0C000000
++	* (0x7F | 0x80) = end range at 0x0FFFFFFF */
++	ctrl_outl(0x8843407F, GDROM_DMA_ACCESS_CTRL_REG);
++	ctrl_outl(9, GDROM_DMA_WAIT_REG); /* DMA word setting */
++	return 0;
 +}
 +
-+static struct crypto_template seqiv_tmpl;
-+
-+static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
++static void __devinit probe_gdrom_setupcd(void)
 +{
-+	struct crypto_instance *inst;
-+
-+	inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
-+
-+	if (IS_ERR(inst))
-+		goto out;
-+
-+	inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
-+
-+	inst->alg.cra_init = seqiv_init;
-+	inst->alg.cra_exit = skcipher_geniv_exit;
-+
-+	inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
-+
-+out:
-+	return inst;
++	gd.cd_info->ops = &gdrom_ops;
++	gd.cd_info->capacity = 1;
++	strcpy(gd.cd_info->name, GDROM_DEV_NAME);
++	gd.cd_info->mask = CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|
++		CDC_SELECT_DISC;
 +}
 +
-+static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
++static void __devinit probe_gdrom_setupdisk(void)
 +{
-+	struct crypto_instance *inst;
-+
-+	inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
-+
-+	if (IS_ERR(inst))
-+		goto out;
-+
-+	inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
-+
-+	inst->alg.cra_init = seqiv_aead_init;
-+	inst->alg.cra_exit = aead_geniv_exit;
-+
-+	inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
++	gd.disk->major = gdrom_major;
++	gd.disk->first_minor = 1;
++	gd.disk->minors = 1;
++	strcpy(gd.disk->disk_name, GDROM_DEV_NAME);
++}
 +
-+out:
-+	return inst;
++static int __devinit probe_gdrom_setupqueue(void)
++{
++	blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
++	/* using DMA so memory will need to be contiguous */
++	blk_queue_max_hw_segments(gd.gdrom_rq, 1);
++	/* set a large max size to get most from DMA */
++	blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
++	gd.disk->queue = gd.gdrom_rq;
++	return gdrom_init_dma_mode();
 +}
 +
-+static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
++/*
++ * register this as a block device and as compliant with the
++ * universal CD Rom driver interface
++ */
++static int __devinit probe_gdrom(struct platform_device *devptr)
 +{
-+	struct crypto_attr_type *algt;
-+	struct crypto_instance *inst;
 +	int err;
++	/* Start the device */
++	if (gdrom_execute_diagnostic() != 1) {
++		printk(KERN_WARNING "GDROM: ATA Probe for GDROM failed.\n");
++		return -ENODEV;
++	}
++	/* Print out firmware ID */
++	if (gdrom_outputversion())
++		return -ENOMEM;
++	/* Register GDROM */
++	gdrom_major = register_blkdev(0, GDROM_DEV_NAME);
++	if (gdrom_major <= 0)
++		return gdrom_major;
++	printk(KERN_INFO "GDROM: Registered with major number %d\n",
++		gdrom_major);
++	/* Specify basic properties of drive */
++	gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL);
++	if (!gd.cd_info) {
++		err = -ENOMEM;
++		goto probe_fail_no_mem;
++	}
++	probe_gdrom_setupcd();
++	gd.disk = alloc_disk(1);
++	if (!gd.disk) {
++		err = -ENODEV;
++		goto probe_fail_no_disk;
++	}
++	probe_gdrom_setupdisk();
++	if (register_cdrom(gd.cd_info)) {
++		err = -ENODEV;
++		goto probe_fail_cdrom_register;
++	}
++	gd.disk->fops = &gdrom_bdops;
++	/* latch on to the interrupt */
++	err = gdrom_set_interrupt_handlers();
++	if (err)
++		goto probe_fail_cmdirq_register;
++	gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
++	if (!gd.gdrom_rq)
++		goto probe_fail_requestq;
 +
-+	algt = crypto_get_attr_type(tb);
-+	err = PTR_ERR(algt);
-+	if (IS_ERR(algt))
-+		return ERR_PTR(err);
-+
-+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
-+		inst = seqiv_ablkcipher_alloc(tb);
-+	else
-+		inst = seqiv_aead_alloc(tb);
-+
-+	if (IS_ERR(inst))
-+		goto out;
++	err = probe_gdrom_setupqueue();
++	if (err)
++		goto probe_fail_toc;
 +
-+	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
-+	inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
++	gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL);
++	if (!gd.toc)
++		goto probe_fail_toc;
++	add_disk(gd.disk);
++	return 0;
 +
-+out:
-+	return inst;
++probe_fail_toc:
++	blk_cleanup_queue(gd.gdrom_rq);
++probe_fail_requestq:
++	free_irq(HW_EVENT_GDROM_DMA, &gd);
++	free_irq(HW_EVENT_GDROM_CMD, &gd);
++probe_fail_cmdirq_register:
++probe_fail_cdrom_register:
++	del_gendisk(gd.disk);
++probe_fail_no_disk:
++	kfree(gd.cd_info);
++	unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
++	gdrom_major = 0;
++probe_fail_no_mem:
++	printk(KERN_WARNING "GDROM: Probe failed - error is 0x%X\n", err);
++	return err;
 +}
 +
-+static void seqiv_free(struct crypto_instance *inst)
++static int __devexit remove_gdrom(struct platform_device *devptr)
 +{
-+	if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
-+		skcipher_geniv_free(inst);
-+	else
-+		aead_geniv_free(inst);
++	flush_scheduled_work();
++	blk_cleanup_queue(gd.gdrom_rq);
++	free_irq(HW_EVENT_GDROM_CMD, &gd);
++	free_irq(HW_EVENT_GDROM_DMA, &gd);
++	del_gendisk(gd.disk);
++	if (gdrom_major)
++		unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
++	return unregister_cdrom(gd.cd_info);
 +}
 +
-+static struct crypto_template seqiv_tmpl = {
-+	.name = "seqiv",
-+	.alloc = seqiv_alloc,
-+	.free = seqiv_free,
-+	.module = THIS_MODULE,
++static struct platform_driver gdrom_driver = {
++	.probe = probe_gdrom,
++	.remove = __devexit_p(remove_gdrom),
++	.driver = {
++			.name = GDROM_DEV_NAME,
++	},
 +};
 +
-+static int __init seqiv_module_init(void)
++static int __init init_gdrom(void)
 +{
-+	return crypto_register_template(&seqiv_tmpl);
++	int rc;
++	gd.toc = NULL;
++	rc = platform_driver_register(&gdrom_driver);
++	if (rc)
++		return rc;
++	pd = platform_device_register_simple(GDROM_DEV_NAME, -1, NULL, 0);
++	if (IS_ERR(pd)) {
++		platform_driver_unregister(&gdrom_driver);
++		return PTR_ERR(pd);
++	}
++	return 0;
 +}
 +
-+static void __exit seqiv_module_exit(void)
++static void __exit exit_gdrom(void)
 +{
-+	crypto_unregister_template(&seqiv_tmpl);
++	platform_device_unregister(pd);
++	platform_driver_unregister(&gdrom_driver);
++	kfree(gd.toc);
 +}
 +
-+module_init(seqiv_module_init);
-+module_exit(seqiv_module_exit);
-+
++module_init(init_gdrom);
++module_exit(exit_gdrom);
++MODULE_AUTHOR("Adrian McMenamin <adrian at mcmen.demon.co.uk>");
++MODULE_DESCRIPTION("SEGA Dreamcast GD-ROM Driver");
 +MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Sequence Number IV Generator");
-diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
-index fd3918b..3cc93fd 100644
---- a/crypto/sha256_generic.c
-+++ b/crypto/sha256_generic.c
-@@ -9,6 +9,7 @@
-  * Copyright (c) Jean-Luc Cooke <jlcooke at certainkey.com>
-  * Copyright (c) Andrew McDonald <andrew at mcdonald.org.uk>
-  * Copyright (c) 2002 James Morris <jmorris at intercode.com.au>
-+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch at intel.com>
-  *
-  * This program is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU General Public License as published by the Free
-@@ -218,6 +219,22 @@ static void sha256_transform(u32 *state, const u8 *input)
- 	memset(W, 0, 64 * sizeof(u32));
+diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
+index d8bb44b..8473b9f 100644
+--- a/drivers/cdrom/viocd.c
++++ b/drivers/cdrom/viocd.c
+@@ -289,7 +289,7 @@ static int send_request(struct request *req)
+ 	return 0;
  }
  
-+
-+static void sha224_init(struct crypto_tfm *tfm)
-+{
-+	struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
-+	sctx->state[0] = SHA224_H0;
-+	sctx->state[1] = SHA224_H1;
-+	sctx->state[2] = SHA224_H2;
-+	sctx->state[3] = SHA224_H3;
-+	sctx->state[4] = SHA224_H4;
-+	sctx->state[5] = SHA224_H5;
-+	sctx->state[6] = SHA224_H6;
-+	sctx->state[7] = SHA224_H7;
-+	sctx->count[0] = 0;
-+	sctx->count[1] = 0;
-+}
-+
- static void sha256_init(struct crypto_tfm *tfm)
+-static void viocd_end_request(struct request *req, int uptodate)
++static void viocd_end_request(struct request *req, int error)
  {
- 	struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
-@@ -294,8 +311,17 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
- 	memset(sctx, 0, sizeof(*sctx));
+ 	int nsectors = req->hard_nr_sectors;
+ 
+@@ -302,11 +302,8 @@ static void viocd_end_request(struct request *req, int uptodate)
+ 	if (!nsectors)
+ 		nsectors = 1;
+ 
+-	if (end_that_request_first(req, uptodate, nsectors))
++	if (__blk_end_request(req, error, nsectors << 9))
+ 		BUG();
+-	add_disk_randomness(req->rq_disk);
+-	blkdev_dequeue_request(req);
+-	end_that_request_last(req, uptodate);
  }
  
-+static void sha224_final(struct crypto_tfm *tfm, u8 *hash)
-+{
-+	u8 D[SHA256_DIGEST_SIZE];
-+
-+	sha256_final(tfm, D);
-+
-+	memcpy(hash, D, SHA224_DIGEST_SIZE);
-+	memset(D, 0, SHA256_DIGEST_SIZE);
-+}
+ static int rwreq;
+@@ -317,11 +314,11 @@ static void do_viocd_request(struct request_queue *q)
  
--static struct crypto_alg alg = {
-+static struct crypto_alg sha256 = {
- 	.cra_name	=	"sha256",
- 	.cra_driver_name=	"sha256-generic",
- 	.cra_flags	=	CRYPTO_ALG_TYPE_DIGEST,
-@@ -303,28 +329,58 @@ static struct crypto_alg alg = {
- 	.cra_ctxsize	=	sizeof(struct sha256_ctx),
- 	.cra_module	=	THIS_MODULE,
- 	.cra_alignmask	=	3,
--	.cra_list       =       LIST_HEAD_INIT(alg.cra_list),
-+	.cra_list	=	LIST_HEAD_INIT(sha256.cra_list),
- 	.cra_u		=	{ .digest = {
- 	.dia_digestsize	=	SHA256_DIGEST_SIZE,
--	.dia_init   	= 	sha256_init,
--	.dia_update 	=	sha256_update,
--	.dia_final  	=	sha256_final } }
-+	.dia_init	=	sha256_init,
-+	.dia_update	=	sha256_update,
-+	.dia_final	=	sha256_final } }
-+};
-+
-+static struct crypto_alg sha224 = {
-+	.cra_name	= "sha224",
-+	.cra_driver_name = "sha224-generic",
-+	.cra_flags	= CRYPTO_ALG_TYPE_DIGEST,
-+	.cra_blocksize	= SHA224_BLOCK_SIZE,
-+	.cra_ctxsize	= sizeof(struct sha256_ctx),
-+	.cra_module	= THIS_MODULE,
-+	.cra_alignmask	= 3,
-+	.cra_list	= LIST_HEAD_INIT(sha224.cra_list),
-+	.cra_u		= { .digest = {
-+	.dia_digestsize = SHA224_DIGEST_SIZE,
-+	.dia_init	= sha224_init,
-+	.dia_update	= sha256_update,
-+	.dia_final	= sha224_final } }
- };
+ 	while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
+ 		if (!blk_fs_request(req))
+-			viocd_end_request(req, 0);
++			viocd_end_request(req, -EIO);
+ 		else if (send_request(req) < 0) {
+ 			printk(VIOCD_KERN_WARNING
+ 					"unable to send message to OS/400!");
+-			viocd_end_request(req, 0);
++			viocd_end_request(req, -EIO);
+ 		} else
+ 			rwreq++;
+ 	}
+@@ -532,9 +529,9 @@ return_complete:
+ 					"with rc %d:0x%04X: %s\n",
+ 					req, event->xRc,
+ 					bevent->sub_result, err->msg);
+-			viocd_end_request(req, 0);
++			viocd_end_request(req, -EIO);
+ 		} else
+-			viocd_end_request(req, 1);
++			viocd_end_request(req, 0);
  
- static int __init init(void)
- {
--	return crypto_register_alg(&alg);
-+	int ret = 0;
-+
-+	ret = crypto_register_alg(&sha224);
-+
-+	if (ret < 0)
-+		return ret;
-+
-+	ret = crypto_register_alg(&sha256);
+ 		/* restart handling of incoming requests */
+ 		spin_unlock_irqrestore(&viocd_reqlock, flags);
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index 2e3a0d4..4666295 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -373,6 +373,16 @@ config ISTALLION
+ 	  To compile this driver as a module, choose M here: the
+ 	  module will be called istallion.
+ 
++config NOZOMI
++	tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
++	depends on PCI && EXPERIMENTAL
++	help
++	  If you have a HSDPA driver Broadband Wireless Data Card -
++	  Globe Trotter PCMCIA card, say Y here.
 +
-+	if (ret < 0)
-+		crypto_unregister_alg(&sha224);
++	  To compile this driver as a module, choose M here, the module
++	  will be called nozomi.
 +
-+	return ret;
+ config A2232
+ 	tristate "Commodore A2232 serial support (EXPERIMENTAL)"
+ 	depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP
+diff --git a/drivers/char/Makefile b/drivers/char/Makefile
+index 07304d5..96fc01e 100644
+--- a/drivers/char/Makefile
++++ b/drivers/char/Makefile
+@@ -26,6 +26,7 @@ obj-$(CONFIG_SERIAL167)		+= serial167.o
+ obj-$(CONFIG_CYCLADES)		+= cyclades.o
+ obj-$(CONFIG_STALLION)		+= stallion.o
+ obj-$(CONFIG_ISTALLION)		+= istallion.o
++obj-$(CONFIG_NOZOMI)		+= nozomi.o
+ obj-$(CONFIG_DIGIEPCA)		+= epca.o
+ obj-$(CONFIG_SPECIALIX)		+= specialix.o
+ obj-$(CONFIG_MOXA_INTELLIO)	+= moxa.o
+diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
+index aa5ddb7..1ffb381 100644
+--- a/drivers/char/agp/ali-agp.c
++++ b/drivers/char/agp/ali-agp.c
+@@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
+ 	void *addr = agp_generic_alloc_page(agp_bridge);
+ 	u32 temp;
+ 
+-	global_flush_tlb();
+ 	if (!addr)
+ 		return NULL;
+ 
+@@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags)
+ 		if (flags & AGP_PAGE_DESTROY_UNMAP) {
+ 			global_cache_flush();	/* is this really needed?  --hch */
+ 			agp_generic_destroy_page(addr, flags);
+-			global_flush_tlb();
+ 		} else
+ 			agp_generic_destroy_page(addr, flags);
+ 	}
+diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
+index 832ded2..2720882 100644
+--- a/drivers/char/agp/backend.c
++++ b/drivers/char/agp/backend.c
+@@ -147,7 +147,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
+ 			printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
+ 			return -ENOMEM;
+ 		}
+-		flush_agp_mappings();
+ 
+ 		bridge->scratch_page_real = virt_to_gart(addr);
+ 		bridge->scratch_page =
+@@ -191,7 +190,6 @@ err_out:
+ 	if (bridge->driver->needs_scratch_page) {
+ 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
+ 						 AGP_PAGE_DESTROY_UNMAP);
+-		flush_agp_mappings();
+ 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
+ 						 AGP_PAGE_DESTROY_FREE);
+ 	}
+@@ -219,7 +217,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
+ 	    bridge->driver->needs_scratch_page) {
+ 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
+ 						 AGP_PAGE_DESTROY_UNMAP);
+-		flush_agp_mappings();
+ 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
+ 						 AGP_PAGE_DESTROY_FREE);
+ 	}
+diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
+index 64b2f6d..1a4674c 100644
+--- a/drivers/char/agp/generic.c
++++ b/drivers/char/agp/generic.c
+@@ -197,7 +197,6 @@ void agp_free_memory(struct agp_memory *curr)
+ 		for (i = 0; i < curr->page_count; i++) {
+ 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP);
+ 		}
+-		flush_agp_mappings();
+ 		for (i = 0; i < curr->page_count; i++) {
+ 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE);
+ 		}
+@@ -267,8 +266,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
+ 	}
+ 	new->bridge = bridge;
+ 
+-	flush_agp_mappings();
+-
+ 	return new;
  }
+ EXPORT_SYMBOL(agp_allocate_memory);
+diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
+index e72a83e..76f581c 100644
+--- a/drivers/char/agp/i460-agp.c
++++ b/drivers/char/agp/i460-agp.c
+@@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
  
- static void __exit fini(void)
+ 	if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
+ 		page = agp_generic_alloc_page(agp_bridge);
+-		global_flush_tlb();
+ 	} else
+ 		/* Returning NULL would cause problems */
+ 		/* AK: really dubious code. */
+@@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags)
  {
--	crypto_unregister_alg(&alg);
-+	crypto_unregister_alg(&sha224);
-+	crypto_unregister_alg(&sha256);
+ 	if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
+ 		agp_generic_destroy_page(page, flags);
+-		global_flush_tlb();
+ 	}
  }
  
- module_init(init);
- module_exit(fini);
- 
- MODULE_LICENSE("GPL");
--MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
-+MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
+diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
+index 03eac1e..189efb6 100644
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -210,13 +210,11 @@ static void *i8xx_alloc_pages(void)
+ 	if (page == NULL)
+ 		return NULL;
  
-+MODULE_ALIAS("sha224");
- MODULE_ALIAS("sha256");
-diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
-index 24141fb..1ab8c01 100644
---- a/crypto/tcrypt.c
-+++ b/crypto/tcrypt.c
-@@ -6,12 +6,16 @@
-  *
-  * Copyright (c) 2002 James Morris <jmorris at intercode.com.au>
-  * Copyright (c) 2002 Jean-Francois Dive <jef at linuxbe.org>
-+ * Copyright (c) 2007 Nokia Siemens Networks
-  *
-  * This program is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU General Public License as published by the Free
-  * Software Foundation; either version 2 of the License, or (at your option)
-  * any later version.
-  *
-+ * 2007-11-13 Added GCM tests
-+ * 2007-11-13 Added AEAD support
-+ * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests
-  * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
-  * 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk at vantronix.net>)
-  * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
-@@ -71,22 +75,23 @@ static unsigned int sec;
+-	if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
+-		change_page_attr(page, 4, PAGE_KERNEL);
+-		global_flush_tlb();
++	if (set_pages_uc(page, 4) < 0) {
++		set_pages_wb(page, 4);
+ 		__free_pages(page, 2);
+ 		return NULL;
+ 	}
+-	global_flush_tlb();
+ 	get_page(page);
+ 	atomic_inc(&agp_bridge->current_memory_agp);
+ 	return page_address(page);
+@@ -230,8 +228,7 @@ static void i8xx_destroy_pages(void *addr)
+ 		return;
  
- static int mode;
- static char *xbuf;
-+static char *axbuf;
- static char *tvmem;
+ 	page = virt_to_page(addr);
+-	change_page_attr(page, 4, PAGE_KERNEL);
+-	global_flush_tlb();
++	set_pages_wb(page, 4);
+ 	put_page(page);
+ 	__free_pages(page, 2);
+ 	atomic_dec(&agp_bridge->current_memory_agp);
+@@ -341,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
  
- static char *check[] = {
--	"des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish",
--	"twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6",
-+	"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
-+	"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
-+	"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
- 	"arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
- 	"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
--	"camellia", "seed", NULL
-+	"camellia", "seed", "salsa20", "lzo", NULL
- };
+ 	switch (pg_count) {
+ 	case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
+-		global_flush_tlb();
+ 		break;
+ 	case 4:
+ 		/* kludge to get 4 physical pages for ARGB cursor */
+@@ -404,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
+ 		else {
+ 			agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
+ 							     AGP_PAGE_DESTROY_UNMAP);
+-			global_flush_tlb();
+ 			agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
+ 							     AGP_PAGE_DESTROY_FREE);
+ 		}
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index 4c16778..465ad35 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -600,63 +600,6 @@ static int hpet_is_known(struct hpet_data *hdp)
+ 	return 0;
+ }
  
- static void hexdump(unsigned char *buf, unsigned int len)
- {
--	while (len--)
--		printk("%02x", *buf++);
+-EXPORT_SYMBOL(hpet_alloc);
+-EXPORT_SYMBOL(hpet_register);
+-EXPORT_SYMBOL(hpet_unregister);
+-EXPORT_SYMBOL(hpet_control);
 -
--	printk("\n");
-+	print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
-+			16, 1,
-+			buf, len, false);
+-int hpet_register(struct hpet_task *tp, int periodic)
+-{
+-	unsigned int i;
+-	u64 mask;
+-	struct hpet_timer __iomem *timer;
+-	struct hpet_dev *devp;
+-	struct hpets *hpetp;
+-
+-	switch (periodic) {
+-	case 1:
+-		mask = Tn_PER_INT_CAP_MASK;
+-		break;
+-	case 0:
+-		mask = 0;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	tp->ht_opaque = NULL;
+-
+-	spin_lock_irq(&hpet_task_lock);
+-	spin_lock(&hpet_lock);
+-
+-	for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
+-		for (timer = hpetp->hp_hpet->hpet_timers, i = 0;
+-		     i < hpetp->hp_ntimer; i++, timer++) {
+-			if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK)
+-			    != mask)
+-				continue;
+-
+-			devp = &hpetp->hp_dev[i];
+-
+-			if (devp->hd_flags & HPET_OPEN || devp->hd_task) {
+-				devp = NULL;
+-				continue;
+-			}
+-
+-			tp->ht_opaque = devp;
+-			devp->hd_task = tp;
+-			break;
+-		}
+-
+-	spin_unlock(&hpet_lock);
+-	spin_unlock_irq(&hpet_task_lock);
+-
+-	if (tp->ht_opaque)
+-		return 0;
+-	else
+-		return -EBUSY;
+-}
+-
+ static inline int hpet_tpcheck(struct hpet_task *tp)
+ {
+ 	struct hpet_dev *devp;
+@@ -706,24 +649,6 @@ int hpet_unregister(struct hpet_task *tp)
+ 	return 0;
  }
  
- static void tcrypt_complete(struct crypto_async_request *req, int err)
-@@ -215,6 +220,238 @@ out:
- 	crypto_free_hash(tfm);
- }
+-int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg)
+-{
+-	struct hpet_dev *devp;
+-	int err;
+-
+-	if ((err = hpet_tpcheck(tp)))
+-		return err;
+-
+-	spin_lock_irq(&hpet_lock);
+-	devp = tp->ht_opaque;
+-	if (devp->hd_task != tp) {
+-		spin_unlock_irq(&hpet_lock);
+-		return -ENXIO;
+-	}
+-	spin_unlock_irq(&hpet_lock);
+-	return hpet_ioctl_common(devp, cmd, arg, 1);
+-}
+-
+ static ctl_table hpet_table[] = {
+ 	{
+ 	 .ctl_name = CTL_UNNUMBERED,
+@@ -806,14 +731,14 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
  
-+static void test_aead(char *algo, int enc, struct aead_testvec *template,
-+		      unsigned int tcount)
-+{
-+	unsigned int ret, i, j, k, temp;
-+	unsigned int tsize;
-+	char *q;
-+	struct crypto_aead *tfm;
-+	char *key;
-+	struct aead_testvec *aead_tv;
-+	struct aead_request *req;
-+	struct scatterlist sg[8];
-+	struct scatterlist asg[8];
-+	const char *e;
-+	struct tcrypt_result result;
-+	unsigned int authsize;
-+
-+	if (enc == ENCRYPT)
-+		e = "encryption";
-+	else
-+		e = "decryption";
-+
-+	printk(KERN_INFO "\ntesting %s %s\n", algo, e);
-+
-+	tsize = sizeof(struct aead_testvec);
-+	tsize *= tcount;
-+
-+	if (tsize > TVMEMSIZE) {
-+		printk(KERN_INFO "template (%u) too big for tvmem (%u)\n",
-+		       tsize, TVMEMSIZE);
-+		return;
-+	}
-+
-+	memcpy(tvmem, template, tsize);
-+	aead_tv = (void *)tvmem;
-+
-+	init_completion(&result.completion);
-+
-+	tfm = crypto_alloc_aead(algo, 0, 0);
-+
-+	if (IS_ERR(tfm)) {
-+		printk(KERN_INFO "failed to load transform for %s: %ld\n",
-+		       algo, PTR_ERR(tfm));
-+		return;
-+	}
-+
-+	req = aead_request_alloc(tfm, GFP_KERNEL);
-+	if (!req) {
-+		printk(KERN_INFO "failed to allocate request for %s\n", algo);
-+		goto out;
-+	}
-+
-+	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-+				  tcrypt_complete, &result);
-+
-+	for (i = 0, j = 0; i < tcount; i++) {
-+		if (!aead_tv[i].np) {
-+			printk(KERN_INFO "test %u (%d bit key):\n",
-+			       ++j, aead_tv[i].klen * 8);
-+
-+			crypto_aead_clear_flags(tfm, ~0);
-+			if (aead_tv[i].wk)
-+				crypto_aead_set_flags(
-+					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-+			key = aead_tv[i].key;
-+
-+			ret = crypto_aead_setkey(tfm, key,
-+						 aead_tv[i].klen);
-+			if (ret) {
-+				printk(KERN_INFO "setkey() failed flags=%x\n",
-+				       crypto_aead_get_flags(tfm));
-+
-+				if (!aead_tv[i].fail)
-+					goto out;
-+			}
-+
-+			authsize = abs(aead_tv[i].rlen - aead_tv[i].ilen);
-+			ret = crypto_aead_setauthsize(tfm, authsize);
-+			if (ret) {
-+				printk(KERN_INFO
-+				       "failed to set authsize = %u\n",
-+				       authsize);
-+				goto out;
-+			}
-+
-+			sg_init_one(&sg[0], aead_tv[i].input,
-+				    aead_tv[i].ilen + (enc ? authsize : 0));
-+
-+			sg_init_one(&asg[0], aead_tv[i].assoc,
-+				    aead_tv[i].alen);
-+
-+			aead_request_set_crypt(req, sg, sg,
-+					       aead_tv[i].ilen,
-+					       aead_tv[i].iv);
-+
-+			aead_request_set_assoc(req, asg, aead_tv[i].alen);
+ int hpet_alloc(struct hpet_data *hdp)
+ {
+-	u64 cap, mcfg;
++	u64 cap, mcfg, hpet_config;
+ 	struct hpet_dev *devp;
+-	u32 i, ntimer;
++	u32 i, ntimer, irq;
+ 	struct hpets *hpetp;
+ 	size_t siz;
+ 	struct hpet __iomem *hpet;
+ 	static struct hpets *last = NULL;
+-	unsigned long period;
++	unsigned long period, irq_bitmap;
+ 	unsigned long long temp;
+ 
+ 	/*
+@@ -840,11 +765,47 @@ int hpet_alloc(struct hpet_data *hdp)
+ 	hpetp->hp_hpet_phys = hdp->hd_phys_address;
+ 
+ 	hpetp->hp_ntimer = hdp->hd_nirqs;
++	hpet = hpetp->hp_hpet;
+ 
+-	for (i = 0; i < hdp->hd_nirqs; i++)
+-		hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
++	/* Assign IRQs statically for legacy devices */
++	hpetp->hp_dev[0].hd_hdwirq = hdp->hd_irq[0];
++	hpetp->hp_dev[1].hd_hdwirq = hdp->hd_irq[1];
+ 
+-	hpet = hpetp->hp_hpet;
++	/* Assign IRQs dynamically for the others */
++	for (i = 2, devp = &hpetp->hp_dev[2]; i < hdp->hd_nirqs; i++, devp++) {
++		struct hpet_timer __iomem *timer;
 +
-+			ret = enc ?
-+				crypto_aead_encrypt(req) :
-+				crypto_aead_decrypt(req);
++		timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
 +
-+			switch (ret) {
-+			case 0:
-+				break;
-+			case -EINPROGRESS:
-+			case -EBUSY:
-+				ret = wait_for_completion_interruptible(
-+					&result.completion);
-+				if (!ret && !(ret = result.err)) {
-+					INIT_COMPLETION(result.completion);
-+					break;
-+				}
-+				/* fall through */
-+			default:
-+				printk(KERN_INFO "%s () failed err=%d\n",
-+				       e, -ret);
-+				goto out;
-+			}
++		/* Check if there's already an IRQ assigned to the timer */
++		if (hdp->hd_irq[i]) {
++			hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
++			continue;
++		}
 +
-+			q = kmap(sg_page(&sg[0])) + sg[0].offset;
-+			hexdump(q, aead_tv[i].rlen);
++		hpet_config = readq(&timer->hpet_config);
++		irq_bitmap = (hpet_config & Tn_INT_ROUTE_CAP_MASK)
++			>> Tn_INT_ROUTE_CAP_SHIFT;
++		if (!irq_bitmap)
++			irq = 0;        /* No valid IRQ Assignable */
++		else {
++			irq = find_first_bit(&irq_bitmap, 32);
++			do {
++				hpet_config |= irq << Tn_INT_ROUTE_CNF_SHIFT;
++				writeq(hpet_config, &timer->hpet_config);
 +
-+			printk(KERN_INFO "enc/dec: %s\n",
-+			       memcmp(q, aead_tv[i].result,
-+				      aead_tv[i].rlen) ? "fail" : "pass");
++				/*
++				 * Verify whether we have written a valid
++				 * IRQ number by reading it back again
++				 */
++				hpet_config = readq(&timer->hpet_config);
++				if (irq == (hpet_config & Tn_INT_ROUTE_CNF_MASK)
++						>> Tn_INT_ROUTE_CNF_SHIFT)
++					break;  /* Success */
++			} while ((irq = (find_next_bit(&irq_bitmap, 32, irq))));
 +		}
++		hpetp->hp_dev[i].hd_hdwirq = irq;
 +	}
+ 
+ 	cap = readq(&hpet->hpet_cap);
+ 
+@@ -875,7 +836,8 @@ int hpet_alloc(struct hpet_data *hdp)
+ 		hpetp->hp_which, hdp->hd_phys_address,
+ 		hpetp->hp_ntimer > 1 ? "s" : "");
+ 	for (i = 0; i < hpetp->hp_ntimer; i++)
+-		printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
++		printk("%s %d", i > 0 ? "," : "",
++				hpetp->hp_dev[i].hd_hdwirq);
+ 	printk("\n");
+ 
+ 	printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n",
+diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
+index 8252f86..480fae2 100644
+--- a/drivers/char/hvc_console.c
++++ b/drivers/char/hvc_console.c
+@@ -27,7 +27,7 @@
+ #include <linux/init.h>
+ #include <linux/kbd_kern.h>
+ #include <linux/kernel.h>
+-#include <linux/kobject.h>
++#include <linux/kref.h>
+ #include <linux/kthread.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
+@@ -89,7 +89,7 @@ struct hvc_struct {
+ 	int irq_requested;
+ 	int irq;
+ 	struct list_head next;
+-	struct kobject kobj; /* ref count & hvc_struct lifetime */
++	struct kref kref; /* ref count & hvc_struct lifetime */
+ };
+ 
+ /* dynamic list of hvc_struct instances */
+@@ -110,7 +110,7 @@ static int last_hvc = -1;
+ 
+ /*
+  * Do not call this function with either the hvc_structs_lock or the hvc_struct
+- * lock held.  If successful, this function increments the kobject reference
++ * lock held.  If successful, this function increments the kref reference
+  * count against the target hvc_struct so it should be released when finished.
+  */
+ static struct hvc_struct *hvc_get_by_index(int index)
+@@ -123,7 +123,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
+ 	list_for_each_entry(hp, &hvc_structs, next) {
+ 		spin_lock_irqsave(&hp->lock, flags);
+ 		if (hp->index == index) {
+-			kobject_get(&hp->kobj);
++			kref_get(&hp->kref);
+ 			spin_unlock_irqrestore(&hp->lock, flags);
+ 			spin_unlock(&hvc_structs_lock);
+ 			return hp;
+@@ -242,6 +242,23 @@ static int __init hvc_console_init(void)
+ }
+ console_initcall(hvc_console_init);
+ 
++/* callback when the kboject ref count reaches zero. */
++static void destroy_hvc_struct(struct kref *kref)
++{
++	struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref);
++	unsigned long flags;
 +
-+	printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e);
-+	memset(xbuf, 0, XBUFSIZE);
-+	memset(axbuf, 0, XBUFSIZE);
-+
-+	for (i = 0, j = 0; i < tcount; i++) {
-+		if (aead_tv[i].np) {
-+			printk(KERN_INFO "test %u (%d bit key):\n",
-+			       ++j, aead_tv[i].klen * 8);
-+
-+			crypto_aead_clear_flags(tfm, ~0);
-+			if (aead_tv[i].wk)
-+				crypto_aead_set_flags(
-+					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-+			key = aead_tv[i].key;
-+
-+			ret = crypto_aead_setkey(tfm, key, aead_tv[i].klen);
-+			if (ret) {
-+				printk(KERN_INFO "setkey() failed flags=%x\n",
-+				       crypto_aead_get_flags(tfm));
-+
-+				if (!aead_tv[i].fail)
-+					goto out;
-+			}
-+
-+			sg_init_table(sg, aead_tv[i].np);
-+			for (k = 0, temp = 0; k < aead_tv[i].np; k++) {
-+				memcpy(&xbuf[IDX[k]],
-+				       aead_tv[i].input + temp,
-+				       aead_tv[i].tap[k]);
-+				temp += aead_tv[i].tap[k];
-+				sg_set_buf(&sg[k], &xbuf[IDX[k]],
-+					   aead_tv[i].tap[k]);
-+			}
-+
-+			authsize = abs(aead_tv[i].rlen - aead_tv[i].ilen);
-+			ret = crypto_aead_setauthsize(tfm, authsize);
-+			if (ret) {
-+				printk(KERN_INFO
-+				       "failed to set authsize = %u\n",
-+				       authsize);
-+				goto out;
-+			}
-+
-+			if (enc)
-+				sg[k - 1].length += authsize;
-+
-+			sg_init_table(asg, aead_tv[i].anp);
-+			for (k = 0, temp = 0; k < aead_tv[i].anp; k++) {
-+				memcpy(&axbuf[IDX[k]],
-+				       aead_tv[i].assoc + temp,
-+				       aead_tv[i].atap[k]);
-+				temp += aead_tv[i].atap[k];
-+				sg_set_buf(&asg[k], &axbuf[IDX[k]],
-+					   aead_tv[i].atap[k]);
-+			}
-+
-+			aead_request_set_crypt(req, sg, sg,
-+					       aead_tv[i].ilen,
-+					       aead_tv[i].iv);
-+
-+			aead_request_set_assoc(req, asg, aead_tv[i].alen);
-+
-+			ret = enc ?
-+				crypto_aead_encrypt(req) :
-+				crypto_aead_decrypt(req);
-+
-+			switch (ret) {
-+			case 0:
-+				break;
-+			case -EINPROGRESS:
-+			case -EBUSY:
-+				ret = wait_for_completion_interruptible(
-+					&result.completion);
-+				if (!ret && !(ret = result.err)) {
-+					INIT_COMPLETION(result.completion);
-+					break;
-+				}
-+				/* fall through */
-+			default:
-+				printk(KERN_INFO "%s () failed err=%d\n",
-+				       e, -ret);
-+				goto out;
-+			}
++	spin_lock(&hvc_structs_lock);
 +
-+			for (k = 0, temp = 0; k < aead_tv[i].np; k++) {
-+				printk(KERN_INFO "page %u\n", k);
-+				q = kmap(sg_page(&sg[k])) + sg[k].offset;
-+				hexdump(q, aead_tv[i].tap[k]);
-+				printk(KERN_INFO "%s\n",
-+				       memcmp(q, aead_tv[i].result + temp,
-+					      aead_tv[i].tap[k] -
-+					      (k < aead_tv[i].np - 1 || enc ?
-+					       0 : authsize)) ?
-+				       "fail" : "pass");
++	spin_lock_irqsave(&hp->lock, flags);
++	list_del(&(hp->next));
++	spin_unlock_irqrestore(&hp->lock, flags);
 +
-+				temp += aead_tv[i].tap[k];
-+			}
-+		}
-+	}
++	spin_unlock(&hvc_structs_lock);
 +
-+out:
-+	crypto_free_aead(tfm);
-+	aead_request_free(req);
++	kfree(hp);
 +}
 +
- static void test_cipher(char *algo, int enc,
- 			struct cipher_testvec *template, unsigned int tcount)
- {
-@@ -237,15 +474,11 @@ static void test_cipher(char *algo, int enc,
- 	printk("\ntesting %s %s\n", algo, e);
+ /*
+  * hvc_instantiate() is an early console discovery method which locates
+  * consoles * prior to the vio subsystem discovering them.  Hotplugged
+@@ -261,7 +278,7 @@ int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
+ 	/* make sure no no tty has been registered in this index */
+ 	hp = hvc_get_by_index(index);
+ 	if (hp) {
+-		kobject_put(&hp->kobj);
++		kref_put(&hp->kref, destroy_hvc_struct);
+ 		return -1;
+ 	}
  
- 	tsize = sizeof (struct cipher_testvec);
--	tsize *= tcount;
+@@ -318,9 +335,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
+ 	unsigned long flags;
+ 	int irq = 0;
+ 	int rc = 0;
+-	struct kobject *kobjp;
+ 
+-	/* Auto increments kobject reference if found. */
++	/* Auto increments kref reference if found. */
+ 	if (!(hp = hvc_get_by_index(tty->index)))
+ 		return -ENODEV;
+ 
+@@ -341,8 +357,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
+ 	if (irq)
+ 		hp->irq_requested = 1;
+ 
+-	kobjp = &hp->kobj;
 -
- 	if (tsize > TVMEMSIZE) {
- 		printk("template (%u) too big for tvmem (%u)\n", tsize,
- 		       TVMEMSIZE);
+ 	spin_unlock_irqrestore(&hp->lock, flags);
+ 	/* check error, fallback to non-irq */
+ 	if (irq)
+@@ -352,7 +366,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
+ 	 * If the request_irq() fails and we return an error.  The tty layer
+ 	 * will call hvc_close() after a failed open but we don't want to clean
+ 	 * up there so we'll clean up here and clear out the previously set
+-	 * tty fields and return the kobject reference.
++	 * tty fields and return the kref reference.
+ 	 */
+ 	if (rc) {
+ 		spin_lock_irqsave(&hp->lock, flags);
+@@ -360,7 +374,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
+ 		hp->irq_requested = 0;
+ 		spin_unlock_irqrestore(&hp->lock, flags);
+ 		tty->driver_data = NULL;
+-		kobject_put(kobjp);
++		kref_put(&hp->kref, destroy_hvc_struct);
+ 		printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
+ 	}
+ 	/* Force wakeup of the polling thread */
+@@ -372,7 +386,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
+ static void hvc_close(struct tty_struct *tty, struct file * filp)
+ {
+ 	struct hvc_struct *hp;
+-	struct kobject *kobjp;
+ 	int irq = 0;
+ 	unsigned long flags;
+ 
+@@ -382,7 +395,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+ 	/*
+ 	 * No driver_data means that this close was issued after a failed
+ 	 * hvc_open by the tty layer's release_dev() function and we can just
+-	 * exit cleanly because the kobject reference wasn't made.
++	 * exit cleanly because the kref reference wasn't made.
+ 	 */
+ 	if (!tty->driver_data)
+ 		return;
+@@ -390,7 +403,6 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+ 	hp = tty->driver_data;
+ 	spin_lock_irqsave(&hp->lock, flags);
+ 
+-	kobjp = &hp->kobj;
+ 	if (--hp->count == 0) {
+ 		if (hp->irq_requested)
+ 			irq = hp->irq;
+@@ -417,7 +429,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+ 		spin_unlock_irqrestore(&hp->lock, flags);
+ 	}
+ 
+-	kobject_put(kobjp);
++	kref_put(&hp->kref, destroy_hvc_struct);
+ }
+ 
+ static void hvc_hangup(struct tty_struct *tty)
+@@ -426,7 +438,6 @@ static void hvc_hangup(struct tty_struct *tty)
+ 	unsigned long flags;
+ 	int irq = 0;
+ 	int temp_open_count;
+-	struct kobject *kobjp;
+ 
+ 	if (!hp)
  		return;
+@@ -443,7 +454,6 @@ static void hvc_hangup(struct tty_struct *tty)
+ 		return;
+ 	}
+ 
+-	kobjp = &hp->kobj;
+ 	temp_open_count = hp->count;
+ 	hp->count = 0;
+ 	hp->n_outbuf = 0;
+@@ -457,7 +467,7 @@ static void hvc_hangup(struct tty_struct *tty)
+ 		free_irq(irq, hp);
+ 	while(temp_open_count) {
+ 		--temp_open_count;
+-		kobject_put(kobjp);
++		kref_put(&hp->kref, destroy_hvc_struct);
  	}
+ }
+ 
+@@ -729,27 +739,6 @@ static const struct tty_operations hvc_ops = {
+ 	.chars_in_buffer = hvc_chars_in_buffer,
+ };
+ 
+-/* callback when the kboject ref count reaches zero. */
+-static void destroy_hvc_struct(struct kobject *kobj)
+-{
+-	struct hvc_struct *hp = container_of(kobj, struct hvc_struct, kobj);
+-	unsigned long flags;
 -
--	memcpy(tvmem, template, tsize);
- 	cipher_tv = (void *)tvmem;
+-	spin_lock(&hvc_structs_lock);
+-
+-	spin_lock_irqsave(&hp->lock, flags);
+-	list_del(&(hp->next));
+-	spin_unlock_irqrestore(&hp->lock, flags);
+-
+-	spin_unlock(&hvc_structs_lock);
+-
+-	kfree(hp);
+-}
+-
+-static struct kobj_type hvc_kobj_type = {
+-	.release = destroy_hvc_struct,
+-};
+-
+ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
+ 					struct hv_ops *ops, int outbuf_size)
+ {
+@@ -776,8 +765,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
+ 	hp->outbuf_size = outbuf_size;
+ 	hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
  
- 	init_completion(&result.completion);
-@@ -269,33 +502,34 @@ static void test_cipher(char *algo, int enc,
+-	kobject_init(&hp->kobj);
+-	hp->kobj.ktype = &hvc_kobj_type;
++	kref_init(&hp->kref);
  
- 	j = 0;
- 	for (i = 0; i < tcount; i++) {
--		if (!(cipher_tv[i].np)) {
-+		memcpy(cipher_tv, &template[i], tsize);
-+		if (!(cipher_tv->np)) {
- 			j++;
- 			printk("test %u (%d bit key):\n",
--			j, cipher_tv[i].klen * 8);
-+			j, cipher_tv->klen * 8);
+ 	spin_lock_init(&hp->lock);
+ 	spin_lock(&hvc_structs_lock);
+@@ -806,12 +794,10 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
+ int __devexit hvc_remove(struct hvc_struct *hp)
+ {
+ 	unsigned long flags;
+-	struct kobject *kobjp;
+ 	struct tty_struct *tty;
  
- 			crypto_ablkcipher_clear_flags(tfm, ~0);
--			if (cipher_tv[i].wk)
-+			if (cipher_tv->wk)
- 				crypto_ablkcipher_set_flags(
- 					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
--			key = cipher_tv[i].key;
-+			key = cipher_tv->key;
+ 	spin_lock_irqsave(&hp->lock, flags);
+ 	tty = hp->tty;
+-	kobjp = &hp->kobj;
  
- 			ret = crypto_ablkcipher_setkey(tfm, key,
--						       cipher_tv[i].klen);
-+						       cipher_tv->klen);
- 			if (ret) {
- 				printk("setkey() failed flags=%x\n",
- 				       crypto_ablkcipher_get_flags(tfm));
+ 	if (hp->index < MAX_NR_HVC_CONSOLES)
+ 		vtermnos[hp->index] = -1;
+@@ -821,12 +807,12 @@ int __devexit hvc_remove(struct hvc_struct *hp)
+ 	spin_unlock_irqrestore(&hp->lock, flags);
  
--				if (!cipher_tv[i].fail)
-+				if (!cipher_tv->fail)
- 					goto out;
- 			}
+ 	/*
+-	 * We 'put' the instance that was grabbed when the kobject instance
+-	 * was initialized using kobject_init().  Let the last holder of this
+-	 * kobject cause it to be removed, which will probably be the tty_hangup
++	 * We 'put' the instance that was grabbed when the kref instance
++	 * was initialized using kref_init().  Let the last holder of this
++	 * kref cause it to be removed, which will probably be the tty_hangup
+ 	 * below.
+ 	 */
+-	kobject_put(kobjp);
++	kref_put(&hp->kref, destroy_hvc_struct);
  
--			sg_init_one(&sg[0], cipher_tv[i].input,
--				    cipher_tv[i].ilen);
-+			sg_init_one(&sg[0], cipher_tv->input,
-+				    cipher_tv->ilen);
+ 	/*
+ 	 * This function call will auto chain call hvc_hangup.  The tty should
+diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
+index 69d8866..fd75590 100644
+--- a/drivers/char/hvcs.c
++++ b/drivers/char/hvcs.c
+@@ -57,11 +57,7 @@
+  * rescanning partner information upon a user's request.
+  *
+  * Each vty-server, prior to being exposed to this driver is reference counted
+- * using the 2.6 Linux kernel kobject construct.  This kobject is also used by
+- * the vio bus to provide a vio device sysfs entry that this driver attaches
+- * device specific attributes to, including partner information.  The vio bus
+- * framework also provides a sysfs entry for each vio driver.  The hvcs driver
+- * provides driver attributes in this entry.
++ * using the 2.6 Linux kernel kref construct.
+  *
+  * For direction on installation and usage of this driver please reference
+  * Documentation/powerpc/hvcs.txt.
+@@ -71,7 +67,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+-#include <linux/kobject.h>
++#include <linux/kref.h>
+ #include <linux/kthread.h>
+ #include <linux/list.h>
+ #include <linux/major.h>
+@@ -293,12 +289,12 @@ struct hvcs_struct {
+ 	int chars_in_buffer;
  
- 			ablkcipher_request_set_crypt(req, sg, sg,
--						     cipher_tv[i].ilen,
--						     cipher_tv[i].iv);
-+						     cipher_tv->ilen,
-+						     cipher_tv->iv);
+ 	/*
+-	 * Any variable below the kobject is valid before a tty is connected and
++	 * Any variable below the kref is valid before a tty is connected and
+ 	 * stays valid after the tty is disconnected.  These shouldn't be
+ 	 * whacked until the koject refcount reaches zero though some entries
+ 	 * may be changed via sysfs initiatives.
+ 	 */
+-	struct kobject kobj; /* ref count & hvcs_struct lifetime */
++	struct kref kref; /* ref count & hvcs_struct lifetime */
+ 	int connected; /* is the vty-server currently connected to a vty? */
+ 	uint32_t p_unit_address; /* partner unit address */
+ 	uint32_t p_partition_ID; /* partner partition ID */
+@@ -307,8 +303,8 @@ struct hvcs_struct {
+ 	struct vio_dev *vdev;
+ };
  
- 			ret = enc ?
- 				crypto_ablkcipher_encrypt(req) :
-@@ -319,11 +553,11 @@ static void test_cipher(char *algo, int enc,
- 			}
+-/* Required to back map a kobject to its containing object */
+-#define from_kobj(kobj) container_of(kobj, struct hvcs_struct, kobj)
++/* Required to back map a kref to its containing object */
++#define from_kref(k) container_of(k, struct hvcs_struct, kref)
  
- 			q = kmap(sg_page(&sg[0])) + sg[0].offset;
--			hexdump(q, cipher_tv[i].rlen);
-+			hexdump(q, cipher_tv->rlen);
+ static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs);
+ static DEFINE_SPINLOCK(hvcs_structs_lock);
+@@ -334,7 +330,6 @@ static void hvcs_partner_free(struct hvcs_struct *hvcsd);
+ static int hvcs_enable_device(struct hvcs_struct *hvcsd,
+ 		uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
  
- 			printk("%s\n",
--			       memcmp(q, cipher_tv[i].result,
--				      cipher_tv[i].rlen) ? "fail" : "pass");
-+			       memcmp(q, cipher_tv->result,
-+				      cipher_tv->rlen) ? "fail" : "pass");
- 		}
- 	}
+-static void destroy_hvcs_struct(struct kobject *kobj);
+ static int hvcs_open(struct tty_struct *tty, struct file *filp);
+ static void hvcs_close(struct tty_struct *tty, struct file *filp);
+ static void hvcs_hangup(struct tty_struct * tty);
+@@ -703,10 +698,10 @@ static void hvcs_return_index(int index)
+ 		hvcs_index_list[index] = -1;
+ }
  
-@@ -332,41 +566,42 @@ static void test_cipher(char *algo, int enc,
+-/* callback when the kboject ref count reaches zero */
+-static void destroy_hvcs_struct(struct kobject *kobj)
++/* callback when the kref ref count reaches zero */
++static void destroy_hvcs_struct(struct kref *kref)
+ {
+-	struct hvcs_struct *hvcsd = from_kobj(kobj);
++	struct hvcs_struct *hvcsd = from_kref(kref);
+ 	struct vio_dev *vdev;
+ 	unsigned long flags;
  
- 	j = 0;
- 	for (i = 0; i < tcount; i++) {
--		if (cipher_tv[i].np) {
-+		memcpy(cipher_tv, &template[i], tsize);
-+		if (cipher_tv->np) {
- 			j++;
- 			printk("test %u (%d bit key):\n",
--			j, cipher_tv[i].klen * 8);
-+			j, cipher_tv->klen * 8);
+@@ -743,10 +738,6 @@ static void destroy_hvcs_struct(struct kobject *kobj)
+ 	kfree(hvcsd);
+ }
  
- 			crypto_ablkcipher_clear_flags(tfm, ~0);
--			if (cipher_tv[i].wk)
-+			if (cipher_tv->wk)
- 				crypto_ablkcipher_set_flags(
- 					tfm, CRYPTO_TFM_REQ_WEAK_KEY);
--			key = cipher_tv[i].key;
-+			key = cipher_tv->key;
+-static struct kobj_type hvcs_kobj_type = {
+-	.release = destroy_hvcs_struct,
+-};
+-
+ static int hvcs_get_index(void)
+ {
+ 	int i;
+@@ -791,9 +782,7 @@ static int __devinit hvcs_probe(
  
- 			ret = crypto_ablkcipher_setkey(tfm, key,
--						       cipher_tv[i].klen);
-+						       cipher_tv->klen);
- 			if (ret) {
- 				printk("setkey() failed flags=%x\n",
- 				       crypto_ablkcipher_get_flags(tfm));
+ 	spin_lock_init(&hvcsd->lock);
+ 	/* Automatically incs the refcount the first time */
+-	kobject_init(&hvcsd->kobj);
+-	/* Set up the callback for terminating the hvcs_struct's life */
+-	hvcsd->kobj.ktype = &hvcs_kobj_type;
++	kref_init(&hvcsd->kref);
  
--				if (!cipher_tv[i].fail)
-+				if (!cipher_tv->fail)
- 					goto out;
- 			}
+ 	hvcsd->vdev = dev;
+ 	dev->dev.driver_data = hvcsd;
+@@ -844,7 +833,6 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
+ {
+ 	struct hvcs_struct *hvcsd = dev->dev.driver_data;
+ 	unsigned long flags;
+-	struct kobject *kobjp;
+ 	struct tty_struct *tty;
  
- 			temp = 0;
--			sg_init_table(sg, cipher_tv[i].np);
--			for (k = 0; k < cipher_tv[i].np; k++) {
-+			sg_init_table(sg, cipher_tv->np);
-+			for (k = 0; k < cipher_tv->np; k++) {
- 				memcpy(&xbuf[IDX[k]],
--				       cipher_tv[i].input + temp,
--				       cipher_tv[i].tap[k]);
--				temp += cipher_tv[i].tap[k];
-+				       cipher_tv->input + temp,
-+				       cipher_tv->tap[k]);
-+				temp += cipher_tv->tap[k];
- 				sg_set_buf(&sg[k], &xbuf[IDX[k]],
--					   cipher_tv[i].tap[k]);
-+					   cipher_tv->tap[k]);
- 			}
+ 	if (!hvcsd)
+@@ -856,15 +844,13 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
  
- 			ablkcipher_request_set_crypt(req, sg, sg,
--						     cipher_tv[i].ilen,
--						     cipher_tv[i].iv);
-+						     cipher_tv->ilen,
-+						     cipher_tv->iv);
+ 	tty = hvcsd->tty;
  
- 			ret = enc ?
- 				crypto_ablkcipher_encrypt(req) :
-@@ -390,15 +625,15 @@ static void test_cipher(char *algo, int enc,
- 			}
+-	kobjp = &hvcsd->kobj;
+-
+ 	spin_unlock_irqrestore(&hvcsd->lock, flags);
  
- 			temp = 0;
--			for (k = 0; k < cipher_tv[i].np; k++) {
-+			for (k = 0; k < cipher_tv->np; k++) {
- 				printk("page %u\n", k);
- 				q = kmap(sg_page(&sg[k])) + sg[k].offset;
--				hexdump(q, cipher_tv[i].tap[k]);
-+				hexdump(q, cipher_tv->tap[k]);
- 				printk("%s\n",
--					memcmp(q, cipher_tv[i].result + temp,
--						cipher_tv[i].tap[k]) ? "fail" :
-+					memcmp(q, cipher_tv->result + temp,
-+						cipher_tv->tap[k]) ? "fail" :
- 					"pass");
--				temp += cipher_tv[i].tap[k];
-+				temp += cipher_tv->tap[k];
- 			}
- 		}
- 	}
-@@ -800,7 +1035,8 @@ out:
- 	crypto_free_hash(tfm);
+ 	/*
+ 	 * Let the last holder of this object cause it to be removed, which
+ 	 * would probably be tty_hangup below.
+ 	 */
+-	kobject_put (kobjp);
++	kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ 
+ 	/*
+ 	 * The hangup is a scheduled function which will auto chain call
+@@ -1086,7 +1072,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
  }
  
--static void test_deflate(void)
-+static void test_comp(char *algo, struct comp_testvec *ctemplate,
-+		       struct comp_testvec *dtemplate, int ctcount, int dtcount)
- {
- 	unsigned int i;
- 	char result[COMP_BUF_SIZE];
-@@ -808,25 +1044,26 @@ static void test_deflate(void)
- 	struct comp_testvec *tv;
- 	unsigned int tsize;
+ /*
+- * This always increments the kobject ref count if the call is successful.
++ * This always increments the kref ref count if the call is successful.
+  * Please remember to dec when you are done with the instance.
+  *
+  * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
+@@ -1103,7 +1089,7 @@ static struct hvcs_struct *hvcs_get_by_index(int index)
+ 		list_for_each_entry(hvcsd, &hvcs_structs, next) {
+ 			spin_lock_irqsave(&hvcsd->lock, flags);
+ 			if (hvcsd->index == index) {
+-				kobject_get(&hvcsd->kobj);
++				kref_get(&hvcsd->kref);
+ 				spin_unlock_irqrestore(&hvcsd->lock, flags);
+ 				spin_unlock(&hvcs_structs_lock);
+ 				return hvcsd;
+@@ -1129,14 +1115,13 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
+ 	unsigned int irq;
+ 	struct vio_dev *vdev;
+ 	unsigned long unit_address;
+-	struct kobject *kobjp;
  
--	printk("\ntesting deflate compression\n");
-+	printk("\ntesting %s compression\n", algo);
+ 	if (tty->driver_data)
+ 		goto fast_open;
  
--	tsize = sizeof (deflate_comp_tv_template);
-+	tsize = sizeof(struct comp_testvec);
-+	tsize *= ctcount;
- 	if (tsize > TVMEMSIZE) {
- 		printk("template (%u) too big for tvmem (%u)\n", tsize,
- 		       TVMEMSIZE);
- 		return;
+ 	/*
+ 	 * Is there a vty-server that shares the same index?
+-	 * This function increments the kobject index.
++	 * This function increments the kref index.
+ 	 */
+ 	if (!(hvcsd = hvcs_get_by_index(tty->index))) {
+ 		printk(KERN_WARNING "HVCS: open failed, no device associated"
+@@ -1181,7 +1166,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
+ 	 * and will grab the spinlock and free the connection if it fails.
+ 	 */
+ 	if (((rc = hvcs_enable_device(hvcsd, unit_address, irq, vdev)))) {
+-		kobject_put(&hvcsd->kobj);
++		kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ 		printk(KERN_WARNING "HVCS: enable device failed.\n");
+ 		return rc;
  	}
+@@ -1192,17 +1177,11 @@ fast_open:
+ 	hvcsd = tty->driver_data;
  
--	memcpy(tvmem, deflate_comp_tv_template, tsize);
-+	memcpy(tvmem, ctemplate, tsize);
- 	tv = (void *)tvmem;
+ 	spin_lock_irqsave(&hvcsd->lock, flags);
+-	if (!kobject_get(&hvcsd->kobj)) {
+-		spin_unlock_irqrestore(&hvcsd->lock, flags);
+-		printk(KERN_ERR "HVCS: Kobject of open"
+-			" hvcs doesn't exist.\n");
+-		return -EFAULT; /* Is this the right return value? */
+-	}
+-
++	kref_get(&hvcsd->kref);
+ 	hvcsd->open_count++;
+-
+ 	hvcsd->todo_mask |= HVCS_SCHED_READ;
+ 	spin_unlock_irqrestore(&hvcsd->lock, flags);
++
+ open_success:
+ 	hvcs_kick();
  
--	tfm = crypto_alloc_comp("deflate", 0, CRYPTO_ALG_ASYNC);
-+	tfm = crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC);
- 	if (IS_ERR(tfm)) {
--		printk("failed to load transform for deflate\n");
-+		printk("failed to load transform for %s\n", algo);
+@@ -1212,9 +1191,8 @@ open_success:
+ 	return 0;
+ 
+ error_release:
+-	kobjp = &hvcsd->kobj;
+ 	spin_unlock_irqrestore(&hvcsd->lock, flags);
+-	kobject_put(&hvcsd->kobj);
++	kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ 
+ 	printk(KERN_WARNING "HVCS: partner connect failed.\n");
+ 	return retval;
+@@ -1224,7 +1202,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+ {
+ 	struct hvcs_struct *hvcsd;
+ 	unsigned long flags;
+-	struct kobject *kobjp;
+ 	int irq = NO_IRQ;
+ 
+ 	/*
+@@ -1245,7 +1222,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+ 	hvcsd = tty->driver_data;
+ 
+ 	spin_lock_irqsave(&hvcsd->lock, flags);
+-	kobjp = &hvcsd->kobj;
+ 	if (--hvcsd->open_count == 0) {
+ 
+ 		vio_disable_interrupts(hvcsd->vdev);
+@@ -1270,7 +1246,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+ 		tty->driver_data = NULL;
+ 
+ 		free_irq(irq, hvcsd);
+-		kobject_put(kobjp);
++		kref_put(&hvcsd->kref, destroy_hvcs_struct);
  		return;
+ 	} else if (hvcsd->open_count < 0) {
+ 		printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
+@@ -1279,7 +1255,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
  	}
  
--	for (i = 0; i < DEFLATE_COMP_TEST_VECTORS; i++) {
-+	for (i = 0; i < ctcount; i++) {
- 		int ilen, ret, dlen = COMP_BUF_SIZE;
+ 	spin_unlock_irqrestore(&hvcsd->lock, flags);
+-	kobject_put(kobjp);
++	kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ }
  
- 		printk("test %u:\n", i + 1);
-@@ -845,19 +1082,20 @@ static void test_deflate(void)
- 		       ilen, dlen);
- 	}
+ static void hvcs_hangup(struct tty_struct * tty)
+@@ -1287,21 +1263,17 @@ static void hvcs_hangup(struct tty_struct * tty)
+ 	struct hvcs_struct *hvcsd = tty->driver_data;
+ 	unsigned long flags;
+ 	int temp_open_count;
+-	struct kobject *kobjp;
+ 	int irq = NO_IRQ;
  
--	printk("\ntesting deflate decompression\n");
-+	printk("\ntesting %s decompression\n", algo);
+ 	spin_lock_irqsave(&hvcsd->lock, flags);
+-	/* Preserve this so that we know how many kobject refs to put */
++	/* Preserve this so that we know how many kref refs to put */
+ 	temp_open_count = hvcsd->open_count;
  
--	tsize = sizeof (deflate_decomp_tv_template);
-+	tsize = sizeof(struct comp_testvec);
-+	tsize *= dtcount;
- 	if (tsize > TVMEMSIZE) {
- 		printk("template (%u) too big for tvmem (%u)\n", tsize,
- 		       TVMEMSIZE);
- 		goto out;
+ 	/*
+-	 * Don't kobject put inside the spinlock because the destruction
++	 * Don't kref put inside the spinlock because the destruction
+ 	 * callback may use the spinlock and it may get called before the
+-	 * spinlock has been released.  Get a pointer to the kobject and
+-	 * kobject_put on that after releasing the spinlock.
++	 * spinlock has been released.
+ 	 */
+-	kobjp = &hvcsd->kobj;
+-
+ 	vio_disable_interrupts(hvcsd->vdev);
+ 
+ 	hvcsd->todo_mask = 0;
+@@ -1324,7 +1296,7 @@ static void hvcs_hangup(struct tty_struct * tty)
+ 	free_irq(irq, hvcsd);
+ 
+ 	/*
+-	 * We need to kobject_put() for every open_count we have since the
++	 * We need to kref_put() for every open_count we have since the
+ 	 * tty_hangup() function doesn't invoke a close per open connection on a
+ 	 * non-console device.
+ 	 */
+@@ -1335,7 +1307,7 @@ static void hvcs_hangup(struct tty_struct * tty)
+ 		 * NOTE:  If this hangup was signaled from user space then the
+ 		 * final put will never happen.
+ 		 */
+-		kobject_put(kobjp);
++		kref_put(&hvcsd->kref, destroy_hvcs_struct);
  	}
+ }
  
--	memcpy(tvmem, deflate_decomp_tv_template, tsize);
-+	memcpy(tvmem, dtemplate, tsize);
- 	tv = (void *)tvmem;
+diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
+index 2d7cd48..6bbd4fa 100644
+--- a/drivers/char/hw_random/Kconfig
++++ b/drivers/char/hw_random/Kconfig
+@@ -98,7 +98,7 @@ config HW_RANDOM_PASEMI
+ 	default HW_RANDOM
+ 	---help---
+ 	  This driver provides kernel-side support for the Random Number
+-	  Generator hardware found on PA6T-1682M processor.
++	  Generator hardware found on PA Semi PWRficient SoCs.
  
--	for (i = 0; i < DEFLATE_DECOMP_TEST_VECTORS; i++) {
-+	for (i = 0; i < dtcount; i++) {
- 		int ilen, ret, dlen = COMP_BUF_SIZE;
+ 	  To compile this driver as a module, choose M here: the
+ 	  module will be called pasemi-rng.
+diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
+index 556fd81..c422e87 100644
+--- a/drivers/char/hw_random/amd-rng.c
++++ b/drivers/char/hw_random/amd-rng.c
+@@ -28,6 +28,7 @@
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ #include <linux/hw_random.h>
++#include <linux/delay.h>
+ #include <asm/io.h>
  
- 		printk("test %u:\n", i + 1);
-@@ -918,6 +1156,8 @@ static void do_test(void)
  
- 		test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
+@@ -52,11 +53,18 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
+ static struct pci_dev *amd_pdev;
  
-+		test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
-+
- 		test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS);
  
- 		//BLOWFISH
-@@ -969,6 +1209,18 @@ static void do_test(void)
- 			    AES_XTS_ENC_TEST_VECTORS);
- 		test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template,
- 			    AES_XTS_DEC_TEST_VECTORS);
-+		test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template,
-+			    AES_CTR_ENC_TEST_VECTORS);
-+		test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template,
-+			    AES_CTR_DEC_TEST_VECTORS);
-+		test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template,
-+			  AES_GCM_ENC_TEST_VECTORS);
-+		test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template,
-+			  AES_GCM_DEC_TEST_VECTORS);
-+		test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template,
-+			  AES_CCM_ENC_TEST_VECTORS);
-+		test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template,
-+			  AES_CCM_DEC_TEST_VECTORS);
+-static int amd_rng_data_present(struct hwrng *rng)
++static int amd_rng_data_present(struct hwrng *rng, int wait)
+ {
+ 	u32 pmbase = (u32)rng->priv;
++	int data, i;
  
- 		//CAST5
- 		test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
-@@ -1057,12 +1309,18 @@ static void do_test(void)
- 		test_hash("tgr192", tgr192_tv_template, TGR192_TEST_VECTORS);
- 		test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS);
- 		test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
--		test_deflate();
-+		test_comp("deflate", deflate_comp_tv_template,
-+			  deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS,
-+			  DEFLATE_DECOMP_TEST_VECTORS);
-+		test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template,
-+			  LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS);
- 		test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS);
- 		test_hash("hmac(md5)", hmac_md5_tv_template,
- 			  HMAC_MD5_TEST_VECTORS);
- 		test_hash("hmac(sha1)", hmac_sha1_tv_template,
- 			  HMAC_SHA1_TEST_VECTORS);
-+		test_hash("hmac(sha224)", hmac_sha224_tv_template,
-+			  HMAC_SHA224_TEST_VECTORS);
- 		test_hash("hmac(sha256)", hmac_sha256_tv_template,
- 			  HMAC_SHA256_TEST_VECTORS);
- 		test_hash("hmac(sha384)", hmac_sha384_tv_template,
-@@ -1156,6 +1414,10 @@ static void do_test(void)
- 			    AES_XTS_ENC_TEST_VECTORS);
- 		test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template,
- 			    AES_XTS_DEC_TEST_VECTORS);
-+		test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template,
-+			    AES_CTR_ENC_TEST_VECTORS);
-+		test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template,
-+			    AES_CTR_DEC_TEST_VECTORS);
- 		break;
+-      	return !!(inl(pmbase + 0xF4) & 1);
++	for (i = 0; i < 20; i++) {
++		data = !!(inl(pmbase + 0xF4) & 1);
++		if (data || !wait)
++			break;
++		udelay(10);
++	}
++	return data;
+ }
  
- 	case 11:
-@@ -1167,7 +1429,9 @@ static void do_test(void)
- 		break;
+ static int amd_rng_data_read(struct hwrng *rng, u32 *data)
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 26a860a..0118b98 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -66,11 +66,11 @@ static inline void hwrng_cleanup(struct hwrng *rng)
+ 		rng->cleanup(rng);
+ }
  
- 	case 13:
--		test_deflate();
-+		test_comp("deflate", deflate_comp_tv_template,
-+			  deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS,
-+			  DEFLATE_DECOMP_TEST_VECTORS);
- 		break;
+-static inline int hwrng_data_present(struct hwrng *rng)
++static inline int hwrng_data_present(struct hwrng *rng, int wait)
+ {
+ 	if (!rng->data_present)
+ 		return 1;
+-	return rng->data_present(rng);
++	return rng->data_present(rng, wait);
+ }
  
- 	case 14:
-@@ -1291,6 +1555,34 @@ static void do_test(void)
- 			    camellia_cbc_dec_tv_template,
- 			    CAMELLIA_CBC_DEC_TEST_VECTORS);
- 		break;
-+	case 33:
-+		test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
-+		break;
-+
-+	case 34:
-+		test_cipher("salsa20", ENCRYPT,
-+			    salsa20_stream_enc_tv_template,
-+			    SALSA20_STREAM_ENC_TEST_VECTORS);
-+		break;
-+
-+	case 35:
-+		test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template,
-+			  AES_GCM_ENC_TEST_VECTORS);
-+		test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template,
-+			  AES_GCM_DEC_TEST_VECTORS);
-+		break;
-+
-+	case 36:
-+		test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template,
-+			  LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS);
-+		break;
+ static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
+@@ -94,8 +94,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ {
+ 	u32 data;
+ 	ssize_t ret = 0;
+-	int i, err = 0;
+-	int data_present;
++	int err = 0;
+ 	int bytes_read;
+ 
+ 	while (size) {
+@@ -107,21 +106,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ 			err = -ENODEV;
+ 			goto out;
+ 		}
+-		if (filp->f_flags & O_NONBLOCK) {
+-			data_present = hwrng_data_present(current_rng);
+-		} else {
+-			/* Some RNG require some time between data_reads to gather
+-			 * new entropy. Poll it.
+-			 */
+-			for (i = 0; i < 20; i++) {
+-				data_present = hwrng_data_present(current_rng);
+-				if (data_present)
+-					break;
+-				udelay(10);
+-			}
+-		}
 +
-+	case 37:
-+		test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template,
-+			  AES_CCM_ENC_TEST_VECTORS);
-+		test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template,
-+			  AES_CCM_DEC_TEST_VECTORS);
-+		break;
+ 		bytes_read = 0;
+-		if (data_present)
++		if (hwrng_data_present(current_rng,
++				       !(filp->f_flags & O_NONBLOCK)))
+ 			bytes_read = hwrng_data_read(current_rng, &data);
+ 		mutex_unlock(&rng_mutex);
  
- 	case 100:
- 		test_hash("hmac(md5)", hmac_md5_tv_template,
-@@ -1317,6 +1609,15 @@ static void do_test(void)
- 			  HMAC_SHA512_TEST_VECTORS);
- 		break;
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index 8e8658d..fed4ef5 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -28,6 +28,7 @@
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ #include <linux/hw_random.h>
++#include <linux/delay.h>
+ #include <asm/io.h>
  
-+	case 105:
-+		test_hash("hmac(sha224)", hmac_sha224_tv_template,
-+			  HMAC_SHA224_TEST_VECTORS);
-+		break;
-+
-+	case 106:
-+		test_hash("xcbc(aes)", aes_xcbc128_tv_template,
-+			  XCBC_AES_TEST_VECTORS);
-+		break;
  
- 	case 200:
- 		test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
-@@ -1400,6 +1701,11 @@ static void do_test(void)
- 				camellia_speed_template);
- 		break;
+@@ -61,11 +62,18 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ 	return 4;
+ }
  
-+	case 206:
-+		test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0,
-+				  salsa20_speed_template);
-+		break;
-+
- 	case 300:
- 		/* fall through */
+-static int geode_rng_data_present(struct hwrng *rng)
++static int geode_rng_data_present(struct hwrng *rng, int wait)
+ {
+ 	void __iomem *mem = (void __iomem *)rng->priv;
++	int data, i;
  
-@@ -1451,6 +1757,10 @@ static void do_test(void)
- 		test_hash_speed("tgr192", sec, generic_hash_speed_template);
- 		if (mode > 300 && mode < 400) break;
+-	return !!(readl(mem + GEODE_RNG_STATUS_REG));
++	for (i = 0; i < 20; i++) {
++		data = !!(readl(mem + GEODE_RNG_STATUS_REG));
++		if (data || !wait)
++			break;
++		udelay(10);
++	}
++	return data;
+ }
  
-+	case 313:
-+		test_hash_speed("sha224", sec, generic_hash_speed_template);
-+		if (mode > 300 && mode < 400) break;
-+
- 	case 399:
- 		break;
  
-@@ -1467,20 +1777,21 @@ static void do_test(void)
+diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
+index 753f460..5cc651e 100644
+--- a/drivers/char/hw_random/intel-rng.c
++++ b/drivers/char/hw_random/intel-rng.c
+@@ -29,6 +29,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/stop_machine.h>
++#include <linux/delay.h>
+ #include <asm/io.h>
  
- static int __init init(void)
+ 
+@@ -162,11 +163,19 @@ static inline u8 hwstatus_set(void __iomem *mem,
+ 	return hwstatus_get(mem);
+ }
+ 
+-static int intel_rng_data_present(struct hwrng *rng)
++static int intel_rng_data_present(struct hwrng *rng, int wait)
  {
-+	int err = -ENOMEM;
+ 	void __iomem *mem = (void __iomem *)rng->priv;
+-
+-	return !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT);
++	int data, i;
 +
- 	tvmem = kmalloc(TVMEMSIZE, GFP_KERNEL);
- 	if (tvmem == NULL)
--		return -ENOMEM;
-+		return err;
++	for (i = 0; i < 20; i++) {
++		data = !!(readb(mem + INTEL_RNG_STATUS) &
++			  INTEL_RNG_DATA_PRESENT);
++		if (data || !wait)
++			break;
++		udelay(10);
++	}
++	return data;
+ }
  
- 	xbuf = kmalloc(XBUFSIZE, GFP_KERNEL);
--	if (xbuf == NULL) {
--		kfree(tvmem);
--		return -ENOMEM;
--	}
-+	if (xbuf == NULL)
-+		goto err_free_tv;
+ static int intel_rng_data_read(struct hwrng *rng, u32 *data)
+diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
+index 3f35a1c..7e31995 100644
+--- a/drivers/char/hw_random/omap-rng.c
++++ b/drivers/char/hw_random/omap-rng.c
+@@ -29,6 +29,7 @@
+ #include <linux/err.h>
+ #include <linux/platform_device.h>
+ #include <linux/hw_random.h>
++#include <linux/delay.h>
  
--	do_test();
-+	axbuf = kmalloc(XBUFSIZE, GFP_KERNEL);
-+	if (axbuf == NULL)
-+		goto err_free_xbuf;
+ #include <asm/io.h>
  
--	kfree(xbuf);
--	kfree(tvmem);
-+	do_test();
+@@ -65,9 +66,17 @@ static void omap_rng_write_reg(int reg, u32 val)
+ }
  
- 	/* We intentionaly return -EAGAIN to prevent keeping
- 	 * the module. It does all its work from init()
-@@ -1488,7 +1799,15 @@ static int __init init(void)
- 	 * => we don't need it in the memory, do we?
- 	 *                                        -- mludvig
- 	 */
--	return -EAGAIN;
-+	err = -EAGAIN;
-+
-+	kfree(axbuf);
-+ err_free_xbuf:
-+	kfree(xbuf);
-+ err_free_tv:
-+	kfree(tvmem);
+ /* REVISIT: Does the status bit really work on 16xx? */
+-static int omap_rng_data_present(struct hwrng *rng)
++static int omap_rng_data_present(struct hwrng *rng, int wait)
+ {
+-	return omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
++	int data, i;
 +
-+	return err;
++	for (i = 0; i < 20; i++) {
++		data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
++		if (data || !wait)
++			break;
++		udelay(10);
++	}
++	return data;
  }
  
- /*
-diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
-index ec86138..f785e56 100644
---- a/crypto/tcrypt.h
-+++ b/crypto/tcrypt.h
-@@ -6,12 +6,15 @@
-  *
-  * Copyright (c) 2002 James Morris <jmorris at intercode.com.au>
-  * Copyright (c) 2002 Jean-Francois Dive <jef at linuxbe.org>
-+ * Copyright (c) 2007 Nokia Siemens Networks
-  *
-  * This program is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU General Public License as published by the Free
-  * Software Foundation; either version 2 of the License, or (at your option)
-  * any later version.
-  *
-+ * 2007-11-13 Added GCM tests
-+ * 2007-11-13 Added AEAD support
-  * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
-  * 2004-08-09 Cipher speed tests by Reyk Floeter <reyk at vantronix.net>
-  * 2003-09-14 Changes by Kartikey Mahendra Bhatt
-@@ -40,14 +43,32 @@ struct hash_testvec {
- struct cipher_testvec {
- 	char key[MAX_KEYLEN] __attribute__ ((__aligned__(4)));
- 	char iv[MAX_IVLEN];
-+	char input[4100];
-+	char result[4100];
-+	unsigned char tap[MAX_TAP];
-+	int np;
-+	unsigned char fail;
-+	unsigned char wk; /* weak key flag */
-+	unsigned char klen;
-+	unsigned short ilen;
-+	unsigned short rlen;
-+};
-+
-+struct aead_testvec {
-+	char key[MAX_KEYLEN] __attribute__ ((__aligned__(4)));
-+	char iv[MAX_IVLEN];
- 	char input[512];
-+	char assoc[512];
- 	char result[512];
- 	unsigned char tap[MAX_TAP];
-+	unsigned char atap[MAX_TAP];
- 	int np;
-+	int anp;
- 	unsigned char fail;
- 	unsigned char wk; /* weak key flag */
- 	unsigned char klen;
- 	unsigned short ilen;
-+	unsigned short alen;
- 	unsigned short rlen;
- };
+ static int omap_rng_data_read(struct hwrng *rng, u32 *data)
+diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
+index fa6040b..6d50e9b 100644
+--- a/drivers/char/hw_random/pasemi-rng.c
++++ b/drivers/char/hw_random/pasemi-rng.c
+@@ -23,6 +23,7 @@
+ #include <linux/kernel.h>
+ #include <linux/platform_device.h>
+ #include <linux/hw_random.h>
++#include <linux/delay.h>
+ #include <asm/of_platform.h>
+ #include <asm/io.h>
  
-@@ -173,6 +194,33 @@ static struct hash_testvec sha1_tv_template[] = {
- 	}
- };
+@@ -41,12 +42,19 @@
  
+ #define MODULE_NAME "pasemi_rng"
+ 
+-static int pasemi_rng_data_present(struct hwrng *rng)
++static int pasemi_rng_data_present(struct hwrng *rng, int wait)
+ {
+ 	void __iomem *rng_regs = (void __iomem *)rng->priv;
+-
+-	return (in_le32(rng_regs + SDCRNG_CTL_REG)
+-		& SDCRNG_CTL_FVLD_M) ? 1 : 0;
++	int data, i;
 +
-+/*
-+ * SHA224 test vectors from from FIPS PUB 180-2
-+ */
-+#define SHA224_TEST_VECTORS     2
-+
-+static struct hash_testvec sha224_tv_template[] = {
-+	{
-+		.plaintext = "abc",
-+		.psize  = 3,
-+		.digest = { 0x23, 0x09, 0x7D, 0x22, 0x34, 0x05, 0xD8, 0x22,
-+			0x86, 0x42, 0xA4, 0x77, 0xBD, 0xA2, 0x55, 0xB3,
-+			0x2A, 0xAD, 0xBC, 0xE4, 0xBD, 0xA0, 0xB3, 0xF7,
-+			0xE3, 0x6C, 0x9D, 0xA7},
-+	}, {
-+		.plaintext =
-+		"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
-+		.psize  = 56,
-+		.digest = { 0x75, 0x38, 0x8B, 0x16, 0x51, 0x27, 0x76, 0xCC,
-+			0x5D, 0xBA, 0x5D, 0xA1, 0xFD, 0x89, 0x01, 0x50,
-+			0xB0, 0xC6, 0x45, 0x5C, 0xB4, 0xF5, 0x8B, 0x19,
-+			0x52, 0x52, 0x25, 0x25 },
-+		.np     = 2,
-+		.tap    = { 28, 28 }
++	for (i = 0; i < 20; i++) {
++		data = (in_le32(rng_regs + SDCRNG_CTL_REG)
++			& SDCRNG_CTL_FVLD_M) ? 1 : 0;
++		if (data || !wait)
++			break;
++		udelay(10);
 +	}
-+};
-+
- /*
-  * SHA256 test vectors from from NIST
-  */
-@@ -817,6 +865,121 @@ static struct hash_testvec hmac_sha1_tv_template[] = {
- 	},
++	return data;
+ }
+ 
+ static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
+@@ -126,10 +134,9 @@ static int __devexit rng_remove(struct of_device *dev)
+ }
+ 
+ static struct of_device_id rng_match[] = {
+-	{
+-		.compatible      = "1682m-rng",
+-	},
+-	{},
++	{ .compatible      = "1682m-rng", },
++	{ .compatible      = "pasemi,pwrficient-rng", },
++	{ },
  };
  
+ static struct of_platform_driver rng_driver = {
+diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
+index ec435cb..868e39f 100644
+--- a/drivers/char/hw_random/via-rng.c
++++ b/drivers/char/hw_random/via-rng.c
+@@ -27,6 +27,7 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/hw_random.h>
++#include <linux/delay.h>
+ #include <asm/io.h>
+ #include <asm/msr.h>
+ #include <asm/cpufeature.h>
+@@ -77,10 +78,11 @@ static inline u32 xstore(u32 *addr, u32 edx_in)
+ 	return eax_out;
+ }
+ 
+-static int via_rng_data_present(struct hwrng *rng)
++static int via_rng_data_present(struct hwrng *rng, int wait)
+ {
+ 	u32 bytes_out;
+ 	u32 *via_rng_datum = (u32 *)(&rng->priv);
++	int i;
+ 
+ 	/* We choose the recommended 1-byte-per-instruction RNG rate,
+ 	 * for greater randomness at the expense of speed.  Larger
+@@ -95,12 +97,15 @@ static int via_rng_data_present(struct hwrng *rng)
+ 	 * completes.
+ 	 */
+ 
+-	*via_rng_datum = 0; /* paranoia, not really necessary */
+-	bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
+-	bytes_out &= VIA_XSTORE_CNT_MASK;
+-	if (bytes_out == 0)
+-		return 0;
+-	return 1;
++	for (i = 0; i < 20; i++) {
++		*via_rng_datum = 0; /* paranoia, not really necessary */
++		bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
++		bytes_out &= VIA_XSTORE_CNT_MASK;
++		if (bytes_out || !wait)
++			break;
++		udelay(10);
++	}
++	return bytes_out ? 1 : 0;
+ }
+ 
+ static int via_rng_data_read(struct hwrng *rng, u32 *data)
+diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
+new file mode 100644
+index 0000000..6076e66
+--- /dev/null
++++ b/drivers/char/nozomi.c
+@@ -0,0 +1,1993 @@
++/*
++ * nozomi.c  -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
++ *
++ * Written by: Ulf Jakobsson,
++ *             Jan �erfeldt,
++ *             Stefan Thomasson,
++ *
++ * Maintained by: Paul Hardwick (p.hardwick at option.com)
++ *
++ * Patches:
++ *          Locking code changes for Vodafone by Sphere Systems Ltd,
++ *                              Andrew Bird (ajb at spheresystems.co.uk )
++ *                              & Phil Sanderson
++ *
++ * Source has been ported from an implementation made by Filip Aben @ Option
++ *
++ * --------------------------------------------------------------------------
++ *
++ * Copyright (c) 2005,2006 Option Wireless Sweden AB
++ * Copyright (c) 2006 Sphere Systems Ltd
++ * Copyright (c) 2006 Option Wireless n/v
++ * All rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ *
++ * --------------------------------------------------------------------------
++ */
 +
 +/*
-+ * SHA224 HMAC test vectors from RFC4231
++ * CHANGELOG
++ * Version 2.1d
++ * 11-November-2007 Jiri Slaby, Frank Seidel
++ * - Big rework of multicard support by Jiri
++ * - Major cleanups (semaphore to mutex, endianess, no major reservation)
++ * - Optimizations
++ *
++ * Version 2.1c
++ * 30-October-2007 Frank Seidel
++ * - Completed multicard support
++ * - Minor cleanups
++ *
++ * Version 2.1b
++ * 07-August-2007 Frank Seidel
++ * - Minor cleanups
++ * - theoretical multicard support
++ *
++ * Version 2.1
++ * 03-July-2006 Paul Hardwick
++ *
++ * - Stability Improvements. Incorporated spinlock wraps patch.
++ * - Updated for newer 2.6.14+ kernels (tty_buffer_request_room)
++ * - using __devexit macro for tty
++ *
++ *
++ * Version 2.0
++ * 08-feb-2006 15:34:10:Ulf
++ *
++ * -Fixed issue when not waking up line disipine layer, could probably result
++ *  in better uplink performance for 2.4.
++ *
++ * -Fixed issue with big endian during initalization, now proper toggle flags
++ *  are handled between preloader and maincode.
++ *
++ * -Fixed flow control issue.
++ *
++ * -Added support for setting DTR.
++ *
++ * -For 2.4 kernels, removing temporary buffer that's not needed.
++ *
++ * -Reading CTS only for modem port (only port that supports it).
++ *
++ * -Return 0 in write_room instead of netative value, it's not handled in
++ *  upper layer.
++ *
++ * --------------------------------------------------------------------------
++ * Version 1.0
++ *
++ * First version of driver, only tested with card of type F32_2.
++ * Works fine with 2.4 and 2.6 kernels.
++ * Driver also support big endian architecture.
 + */
-+#define HMAC_SHA224_TEST_VECTORS    4
 +
-+static struct hash_testvec hmac_sha224_tv_template[] = {
-+	{
-+		.key    = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
-+			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
-+			0x0b, 0x0b, 0x0b, 0x0b },
-+		.ksize  = 20,
-+		/*  ("Hi There") */
-+		.plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 },
-+		.psize  = 8,
-+		.digest = { 0x89, 0x6f, 0xb1, 0x12, 0x8a, 0xbb, 0xdf, 0x19,
-+			0x68, 0x32, 0x10, 0x7c, 0xd4, 0x9d, 0xf3, 0x3f,
-+			0x47, 0xb4, 0xb1, 0x16, 0x99, 0x12, 0xba, 0x4f,
-+			0x53, 0x68, 0x4b, 0x22},
-+	}, {
-+		.key    = { 0x4a, 0x65, 0x66, 0x65 }, /* ("Jefe") */
-+		.ksize  = 4,
-+		/* ("what do ya want for nothing?") */
-+		.plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
-+			0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
-+			0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
-+			0x69, 0x6e, 0x67, 0x3f },
-+		.psize  = 28,
-+		.digest = { 0xa3, 0x0e, 0x01, 0x09, 0x8b, 0xc6, 0xdb, 0xbf,
-+			0x45, 0x69, 0x0f, 0x3a, 0x7e, 0x9e, 0x6d, 0x0f,
-+			0x8b, 0xbe, 0xa2, 0xa3, 0x9e, 0x61, 0x48, 0x00,
-+			0x8f, 0xd0, 0x5e, 0x44 },
-+		.np = 4,
-+		.tap    = { 7, 7, 7, 7 }
-+	}, {
-+		.key    = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa },
-+		.ksize  = 131,
-+		/* ("Test Using Larger Than Block-Size Key - Hash Key First") */
-+		.plaintext = { 0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69,
-+			0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72, 0x67, 0x65,
-+			0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42,
-+			0x6c, 0x6f, 0x63, 0x6b, 0x2d, 0x53, 0x69, 0x7a,
-+			0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20,
-+			0x48, 0x61, 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79,
-+			0x20, 0x46, 0x69, 0x72, 0x73, 0x74 },
-+		.psize  = 54,
-+		.digest = { 0x95, 0xe9, 0xa0, 0xdb, 0x96, 0x20, 0x95, 0xad,
-+			0xae, 0xbe, 0x9b, 0x2d, 0x6f, 0x0d, 0xbc, 0xe2,
-+			0xd4, 0x99, 0xf1, 0x12, 0xf2, 0xd2, 0xb7, 0x27,
-+			0x3f, 0xa6, 0x87, 0x0e },
-+	}, {
-+		.key    = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
-+			0xaa, 0xaa, 0xaa },
-+		.ksize  = 131,
-+		/* ("This is a test using a larger than block-size key and a")
-+		(" larger than block-size data. The key needs to be")
-+			(" hashed before being used by the HMAC algorithm.") */
-+		.plaintext = { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20,
-+			0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x20, 0x75,
-+			0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c,
-+			0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68,
-+			0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
-+			0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6b, 0x65,
-+			0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x20,
-+			0x6c, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74,
-+			0x68, 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63,
-+			0x6b, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x64,
-+			0x61, 0x74, 0x61, 0x2e, 0x20, 0x54, 0x68, 0x65,
-+			0x20, 0x6b, 0x65, 0x79, 0x20, 0x6e, 0x65, 0x65,
-+			0x64, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65,
-+			0x20, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x20,
-+			0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x62,
-+			0x65, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x65,
-+			0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65,
-+			0x20, 0x48, 0x4d, 0x41, 0x43, 0x20, 0x61, 0x6c,
-+			0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e },
-+		.psize  = 152,
-+		.digest = { 0x3a, 0x85, 0x41, 0x66, 0xac, 0x5d, 0x9f, 0x02,
-+			0x3f, 0x54, 0xd5, 0x17, 0xd0, 0xb3, 0x9d, 0xbd,
-+			0x94, 0x67, 0x70, 0xdb, 0x9c, 0x2b, 0x95, 0xc9,
-+			0xf6, 0xf5, 0x65, 0xd1 },
-+	},
-+};
++/* Enable this to have a lot of debug printouts */
++#define DEBUG
 +
- /*
-  * HMAC-SHA256 test vectors from
-  * draft-ietf-ipsec-ciph-sha-256-01.txt
-@@ -2140,12 +2303,18 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
-  */
- #define AES_ENC_TEST_VECTORS 3
- #define AES_DEC_TEST_VECTORS 3
--#define AES_CBC_ENC_TEST_VECTORS 2
--#define AES_CBC_DEC_TEST_VECTORS 2
-+#define AES_CBC_ENC_TEST_VECTORS 4
-+#define AES_CBC_DEC_TEST_VECTORS 4
- #define AES_LRW_ENC_TEST_VECTORS 8
- #define AES_LRW_DEC_TEST_VECTORS 8
- #define AES_XTS_ENC_TEST_VECTORS 4
- #define AES_XTS_DEC_TEST_VECTORS 4
-+#define AES_CTR_ENC_TEST_VECTORS 7
-+#define AES_CTR_DEC_TEST_VECTORS 6
-+#define AES_GCM_ENC_TEST_VECTORS 9
-+#define AES_GCM_DEC_TEST_VECTORS 8
-+#define AES_CCM_ENC_TEST_VECTORS 7
-+#define AES_CCM_DEC_TEST_VECTORS 7
- 
- static struct cipher_testvec aes_enc_tv_template[] = {
- 	{ /* From FIPS-197 */
-@@ -2249,6 +2418,57 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
- 			    0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
- 			    0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 },
- 		.rlen   = 32,
-+	}, { /* From NIST SP800-38A */
-+		.key	= { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
-+			    0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
-+			    0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b },
-+		.klen	= 24,
-+		.iv	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
-+		.input	= { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
-+			    0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
-+			    0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
-+			    0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
-+			    0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
-+			    0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
-+			    0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
-+			    0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
-+		.ilen	= 64,
-+		.result	= { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d,
-+			    0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8,
-+			    0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4,
-+			    0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a,
-+			    0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0,
-+			    0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0,
-+			    0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81,
-+			    0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd },
-+		.rlen	= 64,
-+	}, {
-+		.key	= { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
-+			    0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
-+			    0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
-+			    0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 },
-+		.klen	= 32,
-+		.iv	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
-+		.input	= { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
-+			    0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
-+			    0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
-+			    0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
-+			    0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
-+			    0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
-+			    0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
-+			    0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
-+		.ilen	= 64,
-+		.result	= { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba,
-+			    0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6,
-+			    0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d,
-+			    0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d,
-+			    0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf,
-+			    0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61,
-+			    0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc,
-+			    0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b },
-+		.rlen	= 64,
- 	},
- };
- 
-@@ -2280,6 +2500,57 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
- 			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
- 		.rlen   = 32,
-+	}, { /* From NIST SP800-38A */
-+		.key	= { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
-+			    0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
-+			    0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b },
-+		.klen	= 24,
-+		.iv	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
-+		.input	= { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d,
-+			    0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8,
-+			    0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4,
-+			    0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a,
-+			    0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0,
-+			    0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0,
-+			    0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81,
-+			    0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd },
-+		.ilen	= 64,
-+		.result	= { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
-+			    0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
-+			    0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
-+			    0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
-+			    0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
-+			    0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
-+			    0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
-+			    0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
-+		.rlen	= 64,
-+	}, {
-+		.key	= { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
-+			    0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
-+			    0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
-+			    0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 },
-+		.klen	= 32,
-+		.iv	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
-+		.input	= { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba,
-+			    0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6,
-+			    0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d,
-+			    0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d,
-+			    0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf,
-+			    0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61,
-+			    0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc,
-+			    0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b },
-+		.ilen	= 64,
-+		.result	= { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
-+			    0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
-+			    0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
-+			    0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
-+			    0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
-+			    0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
-+			    0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
-+			    0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
-+		.rlen	= 64,
- 	},
- };
- 
-@@ -3180,6 +3451,1843 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
- 	}
- };
- 
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/ioport.h>
++#include <linux/tty.h>
++#include <linux/tty_driver.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/interrupt.h>
++#include <linux/kmod.h>
++#include <linux/init.h>
++#include <linux/kfifo.h>
++#include <linux/uaccess.h>
++#include <asm/byteorder.h>
 +
-+static struct cipher_testvec aes_ctr_enc_tv_template[] = {
-+	{ /* From RFC 3686 */
-+		.key	= { 0xae, 0x68, 0x52, 0xf8, 0x12, 0x10, 0x67, 0xcc,
-+			    0x4b, 0xf7, 0xa5, 0x76, 0x55, 0x77, 0xf3, 0x9e,
-+			    0x00, 0x00, 0x00, 0x30 },
-+		.klen	= 20,
-+		.iv 	= { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
-+		.input 	= { "Single block msg" },
-+		.ilen	= 16,
-+		.result = { 0xe4, 0x09, 0x5d, 0x4f, 0xb7, 0xa7, 0xb3, 0x79,
-+			    0x2d, 0x61, 0x75, 0xa3, 0x26, 0x13, 0x11, 0xb8 },
-+		.rlen	= 16,
-+	}, {
-+		.key	= { 0x7e, 0x24, 0x06, 0x78, 0x17, 0xfa, 0xe0, 0xd7,
-+			    0x43, 0xd6, 0xce, 0x1f, 0x32, 0x53, 0x91, 0x63,
-+			    0x00, 0x6c, 0xb6, 0xdb },
-+		.klen	= 20,
-+		.iv 	= { 0xc0, 0x54, 0x3b, 0x59, 0xda, 0x48, 0xd9, 0x0b },
-+		.input	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
-+		.ilen 	= 32,
-+		.result = { 0x51, 0x04, 0xa1, 0x06, 0x16, 0x8a, 0x72, 0xd9,
-+			    0x79, 0x0d, 0x41, 0xee, 0x8e, 0xda, 0xd3, 0x88,
-+			    0xeb, 0x2e, 0x1e, 0xfc, 0x46, 0xda, 0x57, 0xc8,
-+			    0xfc, 0xe6, 0x30, 0xdf, 0x91, 0x41, 0xbe, 0x28 },
-+		.rlen	= 32,
-+	}, {
-+		.key 	= { 0x16, 0xaf, 0x5b, 0x14, 0x5f, 0xc9, 0xf5, 0x79,
-+			    0xc1, 0x75, 0xf9, 0x3e, 0x3b, 0xfb, 0x0e, 0xed,
-+			    0x86, 0x3d, 0x06, 0xcc, 0xfd, 0xb7, 0x85, 0x15,
-+			    0x00, 0x00, 0x00, 0x48 },
-+		.klen 	= 28,
-+		.iv	= { 0x36, 0x73, 0x3c, 0x14, 0x7d, 0x6d, 0x93, 0xcb },
-+		.input	= { "Single block msg" },
-+		.ilen 	= 16,
-+		.result	= { 0x4b, 0x55, 0x38, 0x4f, 0xe2, 0x59, 0xc9, 0xc8,
-+			    0x4e, 0x79, 0x35, 0xa0, 0x03, 0xcb, 0xe9, 0x28 },
-+		.rlen	= 16,
-+	}, {
-+		.key	= { 0x7c, 0x5c, 0xb2, 0x40, 0x1b, 0x3d, 0xc3, 0x3c,
-+			    0x19, 0xe7, 0x34, 0x08, 0x19, 0xe0, 0xf6, 0x9c,
-+			    0x67, 0x8c, 0x3d, 0xb8, 0xe6, 0xf6, 0xa9, 0x1a,
-+			    0x00, 0x96, 0xb0, 0x3b },
-+		.klen	= 28,
-+		.iv 	= { 0x02, 0x0c, 0x6e, 0xad, 0xc2, 0xcb, 0x50, 0x0d },
-+		.input	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
-+		.ilen	= 32,
-+		.result	= { 0x45, 0x32, 0x43, 0xfc, 0x60, 0x9b, 0x23, 0x32,
-+			    0x7e, 0xdf, 0xaa, 0xfa, 0x71, 0x31, 0xcd, 0x9f,
-+			    0x84, 0x90, 0x70, 0x1c, 0x5a, 0xd4, 0xa7, 0x9c,
-+			    0xfc, 0x1f, 0xe0, 0xff, 0x42, 0xf4, 0xfb, 0x00 },
-+		.rlen 	= 32,
-+	}, {
-+		.key 	= { 0x77, 0x6b, 0xef, 0xf2, 0x85, 0x1d, 0xb0, 0x6f,
-+			    0x4c, 0x8a, 0x05, 0x42, 0xc8, 0x69, 0x6f, 0x6c,
-+			    0x6a, 0x81, 0xaf, 0x1e, 0xec, 0x96, 0xb4, 0xd3,
-+			    0x7f, 0xc1, 0xd6, 0x89, 0xe6, 0xc1, 0xc1, 0x04,
-+			    0x00, 0x00, 0x00, 0x60 },
-+		.klen	= 36,
-+		.iv 	= { 0xdb, 0x56, 0x72, 0xc9, 0x7a, 0xa8, 0xf0, 0xb2 },
-+		.input	= { "Single block msg" },
-+		.ilen	= 16,
-+		.result = { 0x14, 0x5a, 0xd0, 0x1d, 0xbf, 0x82, 0x4e, 0xc7,
-+			    0x56, 0x08, 0x63, 0xdc, 0x71, 0xe3, 0xe0, 0xc0 },
-+		.rlen 	= 16,
-+	}, {
-+		.key	= { 0xf6, 0xd6, 0x6d, 0x6b, 0xd5, 0x2d, 0x59, 0xbb,
-+			    0x07, 0x96, 0x36, 0x58, 0x79, 0xef, 0xf8, 0x86,
-+			    0xc6, 0x6d, 0xd5, 0x1a, 0x5b, 0x6a, 0x99, 0x74,
-+			    0x4b, 0x50, 0x59, 0x0c, 0x87, 0xa2, 0x38, 0x84,
-+			    0x00, 0xfa, 0xac, 0x24 },
-+		.klen 	= 36,
-+		.iv	= { 0xc1, 0x58, 0x5e, 0xf1, 0x5a, 0x43, 0xd8, 0x75 },
-+		.input	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
-+		.ilen	= 32,
-+		.result = { 0xf0, 0x5e, 0x23, 0x1b, 0x38, 0x94, 0x61, 0x2c,
-+			    0x49, 0xee, 0x00, 0x0b, 0x80, 0x4e, 0xb2, 0xa9,
-+			    0xb8, 0x30, 0x6b, 0x50, 0x8f, 0x83, 0x9d, 0x6a,
-+			    0x55, 0x30, 0x83, 0x1d, 0x93, 0x44, 0xaf, 0x1c },
-+		.rlen	= 32,
-+	}, {
-+	// generated using Crypto++
-+		.key = {
-+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
-+			0x00, 0x00, 0x00, 0x00,
-+		},
-+		.klen = 32 + 4,
-+		.iv = {
-+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+		},
-+		.input = {
-+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
-+			0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
-+			0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
-+			0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
-+			0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
-+			0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
-+			0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
-+			0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
-+			0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
-+			0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
-+			0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
-+			0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
-+			0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
-+			0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
-+			0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
-+			0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
-+			0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
-+			0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
-+			0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
-+			0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
-+			0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
-+			0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
-+			0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
-+			0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
-+			0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
-+			0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
-+			0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
-+			0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
-+			0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15,
-+			0x18, 0x1b, 0x1e, 0x21, 0x24, 0x27, 0x2a, 0x2d,
-+			0x30, 0x33, 0x36, 0x39, 0x3c, 0x3f, 0x42, 0x45,
-+			0x48, 0x4b, 0x4e, 0x51, 0x54, 0x57, 0x5a, 0x5d,
-+			0x60, 0x63, 0x66, 0x69, 0x6c, 0x6f, 0x72, 0x75,
-+			0x78, 0x7b, 0x7e, 0x81, 0x84, 0x87, 0x8a, 0x8d,
-+			0x90, 0x93, 0x96, 0x99, 0x9c, 0x9f, 0xa2, 0xa5,
-+			0xa8, 0xab, 0xae, 0xb1, 0xb4, 0xb7, 0xba, 0xbd,
-+			0xc0, 0xc3, 0xc6, 0xc9, 0xcc, 0xcf, 0xd2, 0xd5,
-+			0xd8, 0xdb, 0xde, 0xe1, 0xe4, 0xe7, 0xea, 0xed,
-+			0xf0, 0xf3, 0xf6, 0xf9, 0xfc, 0xff, 0x02, 0x05,
-+			0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x1a, 0x1d,
-+			0x20, 0x23, 0x26, 0x29, 0x2c, 0x2f, 0x32, 0x35,
-+			0x38, 0x3b, 0x3e, 0x41, 0x44, 0x47, 0x4a, 0x4d,
-+			0x50, 0x53, 0x56, 0x59, 0x5c, 0x5f, 0x62, 0x65,
-+			0x68, 0x6b, 0x6e, 0x71, 0x74, 0x77, 0x7a, 0x7d,
-+			0x80, 0x83, 0x86, 0x89, 0x8c, 0x8f, 0x92, 0x95,
-+			0x98, 0x9b, 0x9e, 0xa1, 0xa4, 0xa7, 0xaa, 0xad,
-+			0xb0, 0xb3, 0xb6, 0xb9, 0xbc, 0xbf, 0xc2, 0xc5,
-+			0xc8, 0xcb, 0xce, 0xd1, 0xd4, 0xd7, 0xda, 0xdd,
-+			0xe0, 0xe3, 0xe6, 0xe9, 0xec, 0xef, 0xf2, 0xf5,
-+			0xf8, 0xfb, 0xfe, 0x01, 0x04, 0x07, 0x0a, 0x0d,
-+			0x10, 0x13, 0x16, 0x19, 0x1c, 0x1f, 0x22, 0x25,
-+			0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, 0x3d,
-+			0x40, 0x43, 0x46, 0x49, 0x4c, 0x4f, 0x52, 0x55,
-+			0x58, 0x5b, 0x5e, 0x61, 0x64, 0x67, 0x6a, 0x6d,
-+			0x70, 0x73, 0x76, 0x79, 0x7c, 0x7f, 0x82, 0x85,
-+			0x88, 0x8b, 0x8e, 0x91, 0x94, 0x97, 0x9a, 0x9d,
-+			0xa0, 0xa3, 0xa6, 0xa9, 0xac, 0xaf, 0xb2, 0xb5,
-+			0xb8, 0xbb, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd,
-+			0xd0, 0xd3, 0xd6, 0xd9, 0xdc, 0xdf, 0xe2, 0xe5,
-+			0xe8, 0xeb, 0xee, 0xf1, 0xf4, 0xf7, 0xfa, 0xfd,
-+			0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1e, 0x23,
-+			0x28, 0x2d, 0x32, 0x37, 0x3c, 0x41, 0x46, 0x4b,
-+			0x50, 0x55, 0x5a, 0x5f, 0x64, 0x69, 0x6e, 0x73,
-+			0x78, 0x7d, 0x82, 0x87, 0x8c, 0x91, 0x96, 0x9b,
-+			0xa0, 0xa5, 0xaa, 0xaf, 0xb4, 0xb9, 0xbe, 0xc3,
-+			0xc8, 0xcd, 0xd2, 0xd7, 0xdc, 0xe1, 0xe6, 0xeb,
-+			0xf0, 0xf5, 0xfa, 0xff, 0x04, 0x09, 0x0e, 0x13,
-+			0x18, 0x1d, 0x22, 0x27, 0x2c, 0x31, 0x36, 0x3b,
-+			0x40, 0x45, 0x4a, 0x4f, 0x54, 0x59, 0x5e, 0x63,
-+			0x68, 0x6d, 0x72, 0x77, 0x7c, 0x81, 0x86, 0x8b,
-+			0x90, 0x95, 0x9a, 0x9f, 0xa4, 0xa9, 0xae, 0xb3,
-+			0xb8, 0xbd, 0xc2, 0xc7, 0xcc, 0xd1, 0xd6, 0xdb,
-+			0xe0, 0xe5, 0xea, 0xef, 0xf4, 0xf9, 0xfe, 0x03,
-+			0x08, 0x0d, 0x12, 0x17, 0x1c, 0x21, 0x26, 0x2b,
-+			0x30, 0x35, 0x3a, 0x3f, 0x44, 0x49, 0x4e, 0x53,
-+			0x58, 0x5d, 0x62, 0x67, 0x6c, 0x71, 0x76, 0x7b,
-+			0x80, 0x85, 0x8a, 0x8f, 0x94, 0x99, 0x9e, 0xa3,
-+			0xa8, 0xad, 0xb2, 0xb7, 0xbc, 0xc1, 0xc6, 0xcb,
-+			0xd0, 0xd5, 0xda, 0xdf, 0xe4, 0xe9, 0xee, 0xf3,
-+			0xf8, 0xfd, 0x02, 0x07, 0x0c, 0x11, 0x16, 0x1b,
-+			0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3e, 0x43,
-+			0x48, 0x4d, 0x52, 0x57, 0x5c, 0x61, 0x66, 0x6b,
-+			0x70, 0x75, 0x7a, 0x7f, 0x84, 0x89, 0x8e, 0x93,
-+			0x98, 0x9d, 0xa2, 0xa7, 0xac, 0xb1, 0xb6, 0xbb,
-+			0xc0, 0xc5, 0xca, 0xcf, 0xd4, 0xd9, 0xde, 0xe3,
-+			0xe8, 0xed, 0xf2, 0xf7, 0xfc, 0x01, 0x06, 0x0b,
-+			0x10, 0x15, 0x1a, 0x1f, 0x24, 0x29, 0x2e, 0x33,
-+			0x38, 0x3d, 0x42, 0x47, 0x4c, 0x51, 0x56, 0x5b,
-+			0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79, 0x7e, 0x83,
-+			0x88, 0x8d, 0x92, 0x97, 0x9c, 0xa1, 0xa6, 0xab,
-+			0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, 0xce, 0xd3,
-+			0xd8, 0xdd, 0xe2, 0xe7, 0xec, 0xf1, 0xf6, 0xfb,
-+			0x00, 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31,
-+			0x38, 0x3f, 0x46, 0x4d, 0x54, 0x5b, 0x62, 0x69,
-+			0x70, 0x77, 0x7e, 0x85, 0x8c, 0x93, 0x9a, 0xa1,
-+			0xa8, 0xaf, 0xb6, 0xbd, 0xc4, 0xcb, 0xd2, 0xd9,
-+			0xe0, 0xe7, 0xee, 0xf5, 0xfc, 0x03, 0x0a, 0x11,
-+			0x18, 0x1f, 0x26, 0x2d, 0x34, 0x3b, 0x42, 0x49,
-+			0x50, 0x57, 0x5e, 0x65, 0x6c, 0x73, 0x7a, 0x81,
-+			0x88, 0x8f, 0x96, 0x9d, 0xa4, 0xab, 0xb2, 0xb9,
-+			0xc0, 0xc7, 0xce, 0xd5, 0xdc, 0xe3, 0xea, 0xf1,
-+			0xf8, 0xff, 0x06, 0x0d, 0x14, 0x1b, 0x22, 0x29,
-+			0x30, 0x37, 0x3e, 0x45, 0x4c, 0x53, 0x5a, 0x61,
-+			0x68, 0x6f, 0x76, 0x7d, 0x84, 0x8b, 0x92, 0x99,
-+			0xa0, 0xa7, 0xae, 0xb5, 0xbc, 0xc3, 0xca, 0xd1,
-+			0xd8, 0xdf, 0xe6, 0xed, 0xf4, 0xfb, 0x02, 0x09,
-+			0x10, 0x17, 0x1e, 0x25, 0x2c, 0x33, 0x3a, 0x41,
-+			0x48, 0x4f, 0x56, 0x5d, 0x64, 0x6b, 0x72, 0x79,
-+			0x80, 0x87, 0x8e, 0x95, 0x9c, 0xa3, 0xaa, 0xb1,
-+			0xb8, 0xbf, 0xc6, 0xcd, 0xd4, 0xdb, 0xe2, 0xe9,
-+			0xf0, 0xf7, 0xfe, 0x05, 0x0c, 0x13, 0x1a, 0x21,
-+			0x28, 0x2f, 0x36, 0x3d, 0x44, 0x4b, 0x52, 0x59,
-+			0x60, 0x67, 0x6e, 0x75, 0x7c, 0x83, 0x8a, 0x91,
-+			0x98, 0x9f, 0xa6, 0xad, 0xb4, 0xbb, 0xc2, 0xc9,
-+			0xd0, 0xd7, 0xde, 0xe5, 0xec, 0xf3, 0xfa, 0x01,
-+			0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2b, 0x32, 0x39,
-+			0x40, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71,
-+			0x78, 0x7f, 0x86, 0x8d, 0x94, 0x9b, 0xa2, 0xa9,
-+			0xb0, 0xb7, 0xbe, 0xc5, 0xcc, 0xd3, 0xda, 0xe1,
-+			0xe8, 0xef, 0xf6, 0xfd, 0x04, 0x0b, 0x12, 0x19,
-+			0x20, 0x27, 0x2e, 0x35, 0x3c, 0x43, 0x4a, 0x51,
-+			0x58, 0x5f, 0x66, 0x6d, 0x74, 0x7b, 0x82, 0x89,
-+			0x90, 0x97, 0x9e, 0xa5, 0xac, 0xb3, 0xba, 0xc1,
-+			0xc8, 0xcf, 0xd6, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9,
-+			0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
-+			0x48, 0x51, 0x5a, 0x63, 0x6c, 0x75, 0x7e, 0x87,
-+			0x90, 0x99, 0xa2, 0xab, 0xb4, 0xbd, 0xc6, 0xcf,
-+			0xd8, 0xe1, 0xea, 0xf3, 0xfc, 0x05, 0x0e, 0x17,
-+			0x20, 0x29, 0x32, 0x3b, 0x44, 0x4d, 0x56, 0x5f,
-+			0x68, 0x71, 0x7a, 0x83, 0x8c, 0x95, 0x9e, 0xa7,
-+			0xb0, 0xb9, 0xc2, 0xcb, 0xd4, 0xdd, 0xe6, 0xef,
-+			0xf8, 0x01, 0x0a, 0x13, 0x1c, 0x25, 0x2e, 0x37,
-+			0x40, 0x49, 0x52, 0x5b, 0x64, 0x6d, 0x76, 0x7f,
-+			0x88, 0x91, 0x9a, 0xa3, 0xac, 0xb5, 0xbe, 0xc7,
-+			0xd0, 0xd9, 0xe2, 0xeb, 0xf4, 0xfd, 0x06, 0x0f,
-+			0x18, 0x21, 0x2a, 0x33, 0x3c, 0x45, 0x4e, 0x57,
-+			0x60, 0x69, 0x72, 0x7b, 0x84, 0x8d, 0x96, 0x9f,
-+			0xa8, 0xb1, 0xba, 0xc3, 0xcc, 0xd5, 0xde, 0xe7,
-+			0xf0, 0xf9, 0x02, 0x0b, 0x14, 0x1d, 0x26, 0x2f,
-+			0x38, 0x41, 0x4a, 0x53, 0x5c, 0x65, 0x6e, 0x77,
-+			0x80, 0x89, 0x92, 0x9b, 0xa4, 0xad, 0xb6, 0xbf,
-+			0xc8, 0xd1, 0xda, 0xe3, 0xec, 0xf5, 0xfe, 0x07,
-+			0x10, 0x19, 0x22, 0x2b, 0x34, 0x3d, 0x46, 0x4f,
-+			0x58, 0x61, 0x6a, 0x73, 0x7c, 0x85, 0x8e, 0x97,
-+			0xa0, 0xa9, 0xb2, 0xbb, 0xc4, 0xcd, 0xd6, 0xdf,
-+			0xe8, 0xf1, 0xfa, 0x03, 0x0c, 0x15, 0x1e, 0x27,
-+			0x30, 0x39, 0x42, 0x4b, 0x54, 0x5d, 0x66, 0x6f,
-+			0x78, 0x81, 0x8a, 0x93, 0x9c, 0xa5, 0xae, 0xb7,
-+			0xc0, 0xc9, 0xd2, 0xdb, 0xe4, 0xed, 0xf6, 0xff,
-+			0x08, 0x11, 0x1a, 0x23, 0x2c, 0x35, 0x3e, 0x47,
-+			0x50, 0x59, 0x62, 0x6b, 0x74, 0x7d, 0x86, 0x8f,
-+			0x98, 0xa1, 0xaa, 0xb3, 0xbc, 0xc5, 0xce, 0xd7,
-+			0xe0, 0xe9, 0xf2, 0xfb, 0x04, 0x0d, 0x16, 0x1f,
-+			0x28, 0x31, 0x3a, 0x43, 0x4c, 0x55, 0x5e, 0x67,
-+			0x70, 0x79, 0x82, 0x8b, 0x94, 0x9d, 0xa6, 0xaf,
-+			0xb8, 0xc1, 0xca, 0xd3, 0xdc, 0xe5, 0xee, 0xf7,
-+			0x00, 0x0b, 0x16, 0x21, 0x2c, 0x37, 0x42, 0x4d,
-+			0x58, 0x63, 0x6e, 0x79, 0x84, 0x8f, 0x9a, 0xa5,
-+			0xb0, 0xbb, 0xc6, 0xd1, 0xdc, 0xe7, 0xf2, 0xfd,
-+			0x08, 0x13, 0x1e, 0x29, 0x34, 0x3f, 0x4a, 0x55,
-+			0x60, 0x6b, 0x76, 0x81, 0x8c, 0x97, 0xa2, 0xad,
-+			0xb8, 0xc3, 0xce, 0xd9, 0xe4, 0xef, 0xfa, 0x05,
-+			0x10, 0x1b, 0x26, 0x31, 0x3c, 0x47, 0x52, 0x5d,
-+			0x68, 0x73, 0x7e, 0x89, 0x94, 0x9f, 0xaa, 0xb5,
-+			0xc0, 0xcb, 0xd6, 0xe1, 0xec, 0xf7, 0x02, 0x0d,
-+			0x18, 0x23, 0x2e, 0x39, 0x44, 0x4f, 0x5a, 0x65,
-+			0x70, 0x7b, 0x86, 0x91, 0x9c, 0xa7, 0xb2, 0xbd,
-+			0xc8, 0xd3, 0xde, 0xe9, 0xf4, 0xff, 0x0a, 0x15,
-+			0x20, 0x2b, 0x36, 0x41, 0x4c, 0x57, 0x62, 0x6d,
-+			0x78, 0x83, 0x8e, 0x99, 0xa4, 0xaf, 0xba, 0xc5,
-+			0xd0, 0xdb, 0xe6, 0xf1, 0xfc, 0x07, 0x12, 0x1d,
-+			0x28, 0x33, 0x3e, 0x49, 0x54, 0x5f, 0x6a, 0x75,
-+			0x80, 0x8b, 0x96, 0xa1, 0xac, 0xb7, 0xc2, 0xcd,
-+			0xd8, 0xe3, 0xee, 0xf9, 0x04, 0x0f, 0x1a, 0x25,
-+			0x30, 0x3b, 0x46, 0x51, 0x5c, 0x67, 0x72, 0x7d,
-+			0x88, 0x93, 0x9e, 0xa9, 0xb4, 0xbf, 0xca, 0xd5,
-+			0xe0, 0xeb, 0xf6, 0x01, 0x0c, 0x17, 0x22, 0x2d,
-+			0x38, 0x43, 0x4e, 0x59, 0x64, 0x6f, 0x7a, 0x85,
-+			0x90, 0x9b, 0xa6, 0xb1, 0xbc, 0xc7, 0xd2, 0xdd,
-+			0xe8, 0xf3, 0xfe, 0x09, 0x14, 0x1f, 0x2a, 0x35,
-+			0x40, 0x4b, 0x56, 0x61, 0x6c, 0x77, 0x82, 0x8d,
-+			0x98, 0xa3, 0xae, 0xb9, 0xc4, 0xcf, 0xda, 0xe5,
-+			0xf0, 0xfb, 0x06, 0x11, 0x1c, 0x27, 0x32, 0x3d,
-+			0x48, 0x53, 0x5e, 0x69, 0x74, 0x7f, 0x8a, 0x95,
-+			0xa0, 0xab, 0xb6, 0xc1, 0xcc, 0xd7, 0xe2, 0xed,
-+			0xf8, 0x03, 0x0e, 0x19, 0x24, 0x2f, 0x3a, 0x45,
-+			0x50, 0x5b, 0x66, 0x71, 0x7c, 0x87, 0x92, 0x9d,
-+			0xa8, 0xb3, 0xbe, 0xc9, 0xd4, 0xdf, 0xea, 0xf5,
-+			0x00, 0x0d, 0x1a, 0x27, 0x34, 0x41, 0x4e, 0x5b,
-+			0x68, 0x75, 0x82, 0x8f, 0x9c, 0xa9, 0xb6, 0xc3,
-+			0xd0, 0xdd, 0xea, 0xf7, 0x04, 0x11, 0x1e, 0x2b,
-+			0x38, 0x45, 0x52, 0x5f, 0x6c, 0x79, 0x86, 0x93,
-+			0xa0, 0xad, 0xba, 0xc7, 0xd4, 0xe1, 0xee, 0xfb,
-+			0x08, 0x15, 0x22, 0x2f, 0x3c, 0x49, 0x56, 0x63,
-+			0x70, 0x7d, 0x8a, 0x97, 0xa4, 0xb1, 0xbe, 0xcb,
-+			0xd8, 0xe5, 0xf2, 0xff, 0x0c, 0x19, 0x26, 0x33,
-+			0x40, 0x4d, 0x5a, 0x67, 0x74, 0x81, 0x8e, 0x9b,
-+			0xa8, 0xb5, 0xc2, 0xcf, 0xdc, 0xe9, 0xf6, 0x03,
-+			0x10, 0x1d, 0x2a, 0x37, 0x44, 0x51, 0x5e, 0x6b,
-+			0x78, 0x85, 0x92, 0x9f, 0xac, 0xb9, 0xc6, 0xd3,
-+			0xe0, 0xed, 0xfa, 0x07, 0x14, 0x21, 0x2e, 0x3b,
-+			0x48, 0x55, 0x62, 0x6f, 0x7c, 0x89, 0x96, 0xa3,
-+			0xb0, 0xbd, 0xca, 0xd7, 0xe4, 0xf1, 0xfe, 0x0b,
-+			0x18, 0x25, 0x32, 0x3f, 0x4c, 0x59, 0x66, 0x73,
-+			0x80, 0x8d, 0x9a, 0xa7, 0xb4, 0xc1, 0xce, 0xdb,
-+			0xe8, 0xf5, 0x02, 0x0f, 0x1c, 0x29, 0x36, 0x43,
-+			0x50, 0x5d, 0x6a, 0x77, 0x84, 0x91, 0x9e, 0xab,
-+			0xb8, 0xc5, 0xd2, 0xdf, 0xec, 0xf9, 0x06, 0x13,
-+			0x20, 0x2d, 0x3a, 0x47, 0x54, 0x61, 0x6e, 0x7b,
-+			0x88, 0x95, 0xa2, 0xaf, 0xbc, 0xc9, 0xd6, 0xe3,
-+			0xf0, 0xfd, 0x0a, 0x17, 0x24, 0x31, 0x3e, 0x4b,
-+			0x58, 0x65, 0x72, 0x7f, 0x8c, 0x99, 0xa6, 0xb3,
-+			0xc0, 0xcd, 0xda, 0xe7, 0xf4, 0x01, 0x0e, 0x1b,
-+			0x28, 0x35, 0x42, 0x4f, 0x5c, 0x69, 0x76, 0x83,
-+			0x90, 0x9d, 0xaa, 0xb7, 0xc4, 0xd1, 0xde, 0xeb,
-+			0xf8, 0x05, 0x12, 0x1f, 0x2c, 0x39, 0x46, 0x53,
-+			0x60, 0x6d, 0x7a, 0x87, 0x94, 0xa1, 0xae, 0xbb,
-+			0xc8, 0xd5, 0xe2, 0xef, 0xfc, 0x09, 0x16, 0x23,
-+			0x30, 0x3d, 0x4a, 0x57, 0x64, 0x71, 0x7e, 0x8b,
-+			0x98, 0xa5, 0xb2, 0xbf, 0xcc, 0xd9, 0xe6, 0xf3,
-+			0x00, 0x0f, 0x1e, 0x2d, 0x3c, 0x4b, 0x5a, 0x69,
-+			0x78, 0x87, 0x96, 0xa5, 0xb4, 0xc3, 0xd2, 0xe1,
-+			0xf0, 0xff, 0x0e, 0x1d, 0x2c, 0x3b, 0x4a, 0x59,
-+			0x68, 0x77, 0x86, 0x95, 0xa4, 0xb3, 0xc2, 0xd1,
-+			0xe0, 0xef, 0xfe, 0x0d, 0x1c, 0x2b, 0x3a, 0x49,
-+			0x58, 0x67, 0x76, 0x85, 0x94, 0xa3, 0xb2, 0xc1,
-+			0xd0, 0xdf, 0xee, 0xfd, 0x0c, 0x1b, 0x2a, 0x39,
-+			0x48, 0x57, 0x66, 0x75, 0x84, 0x93, 0xa2, 0xb1,
-+			0xc0, 0xcf, 0xde, 0xed, 0xfc, 0x0b, 0x1a, 0x29,
-+			0x38, 0x47, 0x56, 0x65, 0x74, 0x83, 0x92, 0xa1,
-+			0xb0, 0xbf, 0xce, 0xdd, 0xec, 0xfb, 0x0a, 0x19,
-+			0x28, 0x37, 0x46, 0x55, 0x64, 0x73, 0x82, 0x91,
-+			0xa0, 0xaf, 0xbe, 0xcd, 0xdc, 0xeb, 0xfa, 0x09,
-+			0x18, 0x27, 0x36, 0x45, 0x54, 0x63, 0x72, 0x81,
-+			0x90, 0x9f, 0xae, 0xbd, 0xcc, 0xdb, 0xea, 0xf9,
-+			0x08, 0x17, 0x26, 0x35, 0x44, 0x53, 0x62, 0x71,
-+			0x80, 0x8f, 0x9e, 0xad, 0xbc, 0xcb, 0xda, 0xe9,
-+			0xf8, 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61,
-+			0x70, 0x7f, 0x8e, 0x9d, 0xac, 0xbb, 0xca, 0xd9,
-+			0xe8, 0xf7, 0x06, 0x15, 0x24, 0x33, 0x42, 0x51,
-+			0x60, 0x6f, 0x7e, 0x8d, 0x9c, 0xab, 0xba, 0xc9,
-+			0xd8, 0xe7, 0xf6, 0x05, 0x14, 0x23, 0x32, 0x41,
-+			0x50, 0x5f, 0x6e, 0x7d, 0x8c, 0x9b, 0xaa, 0xb9,
-+			0xc8, 0xd7, 0xe6, 0xf5, 0x04, 0x13, 0x22, 0x31,
-+			0x40, 0x4f, 0x5e, 0x6d, 0x7c, 0x8b, 0x9a, 0xa9,
-+			0xb8, 0xc7, 0xd6, 0xe5, 0xf4, 0x03, 0x12, 0x21,
-+			0x30, 0x3f, 0x4e, 0x5d, 0x6c, 0x7b, 0x8a, 0x99,
-+			0xa8, 0xb7, 0xc6, 0xd5, 0xe4, 0xf3, 0x02, 0x11,
-+			0x20, 0x2f, 0x3e, 0x4d, 0x5c, 0x6b, 0x7a, 0x89,
-+			0x98, 0xa7, 0xb6, 0xc5, 0xd4, 0xe3, 0xf2, 0x01,
-+			0x10, 0x1f, 0x2e, 0x3d, 0x4c, 0x5b, 0x6a, 0x79,
-+			0x88, 0x97, 0xa6, 0xb5, 0xc4, 0xd3, 0xe2, 0xf1,
-+			0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
-+			0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff,
-+			0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87,
-+			0x98, 0xa9, 0xba, 0xcb, 0xdc, 0xed, 0xfe, 0x0f,
-+			0x20, 0x31, 0x42, 0x53, 0x64, 0x75, 0x86, 0x97,
-+			0xa8, 0xb9, 0xca, 0xdb, 0xec, 0xfd, 0x0e, 0x1f,
-+			0x30, 0x41, 0x52, 0x63, 0x74, 0x85, 0x96, 0xa7,
-+			0xb8, 0xc9, 0xda, 0xeb, 0xfc, 0x0d, 0x1e, 0x2f,
-+			0x40, 0x51, 0x62, 0x73, 0x84, 0x95, 0xa6, 0xb7,
-+			0xc8, 0xd9, 0xea, 0xfb, 0x0c, 0x1d, 0x2e, 0x3f,
-+			0x50, 0x61, 0x72, 0x83, 0x94, 0xa5, 0xb6, 0xc7,
-+			0xd8, 0xe9, 0xfa, 0x0b, 0x1c, 0x2d, 0x3e, 0x4f,
-+			0x60, 0x71, 0x82, 0x93, 0xa4, 0xb5, 0xc6, 0xd7,
-+			0xe8, 0xf9, 0x0a, 0x1b, 0x2c, 0x3d, 0x4e, 0x5f,
-+			0x70, 0x81, 0x92, 0xa3, 0xb4, 0xc5, 0xd6, 0xe7,
-+			0xf8, 0x09, 0x1a, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f,
-+			0x80, 0x91, 0xa2, 0xb3, 0xc4, 0xd5, 0xe6, 0xf7,
-+			0x08, 0x19, 0x2a, 0x3b, 0x4c, 0x5d, 0x6e, 0x7f,
-+			0x90, 0xa1, 0xb2, 0xc3, 0xd4, 0xe5, 0xf6, 0x07,
-+			0x18, 0x29, 0x3a, 0x4b, 0x5c, 0x6d, 0x7e, 0x8f,
-+			0xa0, 0xb1, 0xc2, 0xd3, 0xe4, 0xf5, 0x06, 0x17,
-+			0x28, 0x39, 0x4a, 0x5b, 0x6c, 0x7d, 0x8e, 0x9f,
-+			0xb0, 0xc1, 0xd2, 0xe3, 0xf4, 0x05, 0x16, 0x27,
-+			0x38, 0x49, 0x5a, 0x6b, 0x7c, 0x8d, 0x9e, 0xaf,
-+			0xc0, 0xd1, 0xe2, 0xf3, 0x04, 0x15, 0x26, 0x37,
-+			0x48, 0x59, 0x6a, 0x7b, 0x8c, 0x9d, 0xae, 0xbf,
-+			0xd0, 0xe1, 0xf2, 0x03, 0x14, 0x25, 0x36, 0x47,
-+			0x58, 0x69, 0x7a, 0x8b, 0x9c, 0xad, 0xbe, 0xcf,
-+			0xe0, 0xf1, 0x02, 0x13, 0x24, 0x35, 0x46, 0x57,
-+			0x68, 0x79, 0x8a, 0x9b, 0xac, 0xbd, 0xce, 0xdf,
-+			0xf0, 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67,
-+			0x78, 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef,
-+			0x00, 0x13, 0x26, 0x39, 0x4c, 0x5f, 0x72, 0x85,
-+			0x98, 0xab, 0xbe, 0xd1, 0xe4, 0xf7, 0x0a, 0x1d,
-+			0x30, 0x43, 0x56, 0x69, 0x7c, 0x8f, 0xa2, 0xb5,
-+			0xc8, 0xdb, 0xee, 0x01, 0x14, 0x27, 0x3a, 0x4d,
-+			0x60, 0x73, 0x86, 0x99, 0xac, 0xbf, 0xd2, 0xe5,
-+			0xf8, 0x0b, 0x1e, 0x31, 0x44, 0x57, 0x6a, 0x7d,
-+			0x90, 0xa3, 0xb6, 0xc9, 0xdc, 0xef, 0x02, 0x15,
-+			0x28, 0x3b, 0x4e, 0x61, 0x74, 0x87, 0x9a, 0xad,
-+			0xc0, 0xd3, 0xe6, 0xf9, 0x0c, 0x1f, 0x32, 0x45,
-+			0x58, 0x6b, 0x7e, 0x91, 0xa4, 0xb7, 0xca, 0xdd,
-+			0xf0, 0x03, 0x16, 0x29, 0x3c, 0x4f, 0x62, 0x75,
-+			0x88, 0x9b, 0xae, 0xc1, 0xd4, 0xe7, 0xfa, 0x0d,
-+			0x20, 0x33, 0x46, 0x59, 0x6c, 0x7f, 0x92, 0xa5,
-+			0xb8, 0xcb, 0xde, 0xf1, 0x04, 0x17, 0x2a, 0x3d,
-+			0x50, 0x63, 0x76, 0x89, 0x9c, 0xaf, 0xc2, 0xd5,
-+			0xe8, 0xfb, 0x0e, 0x21, 0x34, 0x47, 0x5a, 0x6d,
-+			0x80, 0x93, 0xa6, 0xb9, 0xcc, 0xdf, 0xf2, 0x05,
-+			0x18, 0x2b, 0x3e, 0x51, 0x64, 0x77, 0x8a, 0x9d,
-+			0xb0, 0xc3, 0xd6, 0xe9, 0xfc, 0x0f, 0x22, 0x35,
-+			0x48, 0x5b, 0x6e, 0x81, 0x94, 0xa7, 0xba, 0xcd,
-+			0xe0, 0xf3, 0x06, 0x19, 0x2c, 0x3f, 0x52, 0x65,
-+			0x78, 0x8b, 0x9e, 0xb1, 0xc4, 0xd7, 0xea, 0xfd,
-+			0x10, 0x23, 0x36, 0x49, 0x5c, 0x6f, 0x82, 0x95,
-+			0xa8, 0xbb, 0xce, 0xe1, 0xf4, 0x07, 0x1a, 0x2d,
-+			0x40, 0x53, 0x66, 0x79, 0x8c, 0x9f, 0xb2, 0xc5,
-+			0xd8, 0xeb, 0xfe, 0x11, 0x24, 0x37, 0x4a, 0x5d,
-+			0x70, 0x83, 0x96, 0xa9, 0xbc, 0xcf, 0xe2, 0xf5,
-+			0x08, 0x1b, 0x2e, 0x41, 0x54, 0x67, 0x7a, 0x8d,
-+			0xa0, 0xb3, 0xc6, 0xd9, 0xec, 0xff, 0x12, 0x25,
-+			0x38, 0x4b, 0x5e, 0x71, 0x84, 0x97, 0xaa, 0xbd,
-+			0xd0, 0xe3, 0xf6, 0x09, 0x1c, 0x2f, 0x42, 0x55,
-+			0x68, 0x7b, 0x8e, 0xa1, 0xb4, 0xc7, 0xda, 0xed,
-+			0x00, 0x15, 0x2a, 0x3f, 0x54, 0x69, 0x7e, 0x93,
-+			0xa8, 0xbd, 0xd2, 0xe7, 0xfc, 0x11, 0x26, 0x3b,
-+			0x50, 0x65, 0x7a, 0x8f, 0xa4, 0xb9, 0xce, 0xe3,
-+			0xf8, 0x0d, 0x22, 0x37, 0x4c, 0x61, 0x76, 0x8b,
-+			0xa0, 0xb5, 0xca, 0xdf, 0xf4, 0x09, 0x1e, 0x33,
-+			0x48, 0x5d, 0x72, 0x87, 0x9c, 0xb1, 0xc6, 0xdb,
-+			0xf0, 0x05, 0x1a, 0x2f, 0x44, 0x59, 0x6e, 0x83,
-+			0x98, 0xad, 0xc2, 0xd7, 0xec, 0x01, 0x16, 0x2b,
-+			0x40, 0x55, 0x6a, 0x7f, 0x94, 0xa9, 0xbe, 0xd3,
-+			0xe8, 0xfd, 0x12, 0x27, 0x3c, 0x51, 0x66, 0x7b,
-+			0x90, 0xa5, 0xba, 0xcf, 0xe4, 0xf9, 0x0e, 0x23,
-+			0x38, 0x4d, 0x62, 0x77, 0x8c, 0xa1, 0xb6, 0xcb,
-+			0xe0, 0xf5, 0x0a, 0x1f, 0x34, 0x49, 0x5e, 0x73,
-+			0x88, 0x9d, 0xb2, 0xc7, 0xdc, 0xf1, 0x06, 0x1b,
-+			0x30, 0x45, 0x5a, 0x6f, 0x84, 0x99, 0xae, 0xc3,
-+			0xd8, 0xed, 0x02, 0x17, 0x2c, 0x41, 0x56, 0x6b,
-+			0x80, 0x95, 0xaa, 0xbf, 0xd4, 0xe9, 0xfe, 0x13,
-+			0x28, 0x3d, 0x52, 0x67, 0x7c, 0x91, 0xa6, 0xbb,
-+			0xd0, 0xe5, 0xfa, 0x0f, 0x24, 0x39, 0x4e, 0x63,
-+			0x78, 0x8d, 0xa2, 0xb7, 0xcc, 0xe1, 0xf6, 0x0b,
-+			0x20, 0x35, 0x4a, 0x5f, 0x74, 0x89, 0x9e, 0xb3,
-+			0xc8, 0xdd, 0xf2, 0x07, 0x1c, 0x31, 0x46, 0x5b,
-+			0x70, 0x85, 0x9a, 0xaf, 0xc4, 0xd9, 0xee, 0x03,
-+			0x18, 0x2d, 0x42, 0x57, 0x6c, 0x81, 0x96, 0xab,
-+			0xc0, 0xd5, 0xea, 0xff, 0x14, 0x29, 0x3e, 0x53,
-+			0x68, 0x7d, 0x92, 0xa7, 0xbc, 0xd1, 0xe6, 0xfb,
-+			0x10, 0x25, 0x3a, 0x4f, 0x64, 0x79, 0x8e, 0xa3,
-+			0xb8, 0xcd, 0xe2, 0xf7, 0x0c, 0x21, 0x36, 0x4b,
-+			0x60, 0x75, 0x8a, 0x9f, 0xb4, 0xc9, 0xde, 0xf3,
-+			0x08, 0x1d, 0x32, 0x47, 0x5c, 0x71, 0x86, 0x9b,
-+			0xb0, 0xc5, 0xda, 0xef, 0x04, 0x19, 0x2e, 0x43,
-+			0x58, 0x6d, 0x82, 0x97, 0xac, 0xc1, 0xd6, 0xeb,
-+			0x00, 0x17, 0x2e, 0x45, 0x5c, 0x73, 0x8a, 0xa1,
-+			0xb8, 0xcf, 0xe6, 0xfd, 0x14, 0x2b, 0x42, 0x59,
-+			0x70, 0x87, 0x9e, 0xb5, 0xcc, 0xe3, 0xfa, 0x11,
-+			0x28, 0x3f, 0x56, 0x6d, 0x84, 0x9b, 0xb2, 0xc9,
-+			0xe0, 0xf7, 0x0e, 0x25, 0x3c, 0x53, 0x6a, 0x81,
-+			0x98, 0xaf, 0xc6, 0xdd, 0xf4, 0x0b, 0x22, 0x39,
-+			0x50, 0x67, 0x7e, 0x95, 0xac, 0xc3, 0xda, 0xf1,
-+			0x08, 0x1f, 0x36, 0x4d, 0x64, 0x7b, 0x92, 0xa9,
-+			0xc0, 0xd7, 0xee, 0x05, 0x1c, 0x33, 0x4a, 0x61,
-+			0x78, 0x8f, 0xa6, 0xbd, 0xd4, 0xeb, 0x02, 0x19,
-+			0x30, 0x47, 0x5e, 0x75, 0x8c, 0xa3, 0xba, 0xd1,
-+			0xe8, 0xff, 0x16, 0x2d, 0x44, 0x5b, 0x72, 0x89,
-+			0xa0, 0xb7, 0xce, 0xe5, 0xfc, 0x13, 0x2a, 0x41,
-+			0x58, 0x6f, 0x86, 0x9d, 0xb4, 0xcb, 0xe2, 0xf9,
-+			0x10, 0x27, 0x3e, 0x55, 0x6c, 0x83, 0x9a, 0xb1,
-+			0xc8, 0xdf, 0xf6, 0x0d, 0x24, 0x3b, 0x52, 0x69,
-+			0x80, 0x97, 0xae, 0xc5, 0xdc, 0xf3, 0x0a, 0x21,
-+			0x38, 0x4f, 0x66, 0x7d, 0x94, 0xab, 0xc2, 0xd9,
-+			0xf0, 0x07, 0x1e, 0x35, 0x4c, 0x63, 0x7a, 0x91,
-+			0xa8, 0xbf, 0xd6, 0xed, 0x04, 0x1b, 0x32, 0x49,
-+			0x60, 0x77, 0x8e, 0xa5, 0xbc, 0xd3, 0xea, 0x01,
-+			0x18, 0x2f, 0x46, 0x5d, 0x74, 0x8b, 0xa2, 0xb9,
-+			0xd0, 0xe7, 0xfe, 0x15, 0x2c, 0x43, 0x5a, 0x71,
-+			0x88, 0x9f, 0xb6, 0xcd, 0xe4, 0xfb, 0x12, 0x29,
-+			0x40, 0x57, 0x6e, 0x85, 0x9c, 0xb3, 0xca, 0xe1,
-+			0xf8, 0x0f, 0x26, 0x3d, 0x54, 0x6b, 0x82, 0x99,
-+			0xb0, 0xc7, 0xde, 0xf5, 0x0c, 0x23, 0x3a, 0x51,
-+			0x68, 0x7f, 0x96, 0xad, 0xc4, 0xdb, 0xf2, 0x09,
-+			0x20, 0x37, 0x4e, 0x65, 0x7c, 0x93, 0xaa, 0xc1,
-+			0xd8, 0xef, 0x06, 0x1d, 0x34, 0x4b, 0x62, 0x79,
-+			0x90, 0xa7, 0xbe, 0xd5, 0xec, 0x03, 0x1a, 0x31,
-+			0x48, 0x5f, 0x76, 0x8d, 0xa4, 0xbb, 0xd2, 0xe9,
-+			0x00, 0x19, 0x32, 0x4b, 0x64, 0x7d, 0x96, 0xaf,
-+			0xc8, 0xe1, 0xfa, 0x13, 0x2c, 0x45, 0x5e, 0x77,
-+			0x90, 0xa9, 0xc2, 0xdb, 0xf4, 0x0d, 0x26, 0x3f,
-+			0x58, 0x71, 0x8a, 0xa3, 0xbc, 0xd5, 0xee, 0x07,
-+			0x20, 0x39, 0x52, 0x6b, 0x84, 0x9d, 0xb6, 0xcf,
-+			0xe8, 0x01, 0x1a, 0x33, 0x4c, 0x65, 0x7e, 0x97,
-+			0xb0, 0xc9, 0xe2, 0xfb, 0x14, 0x2d, 0x46, 0x5f,
-+			0x78, 0x91, 0xaa, 0xc3, 0xdc, 0xf5, 0x0e, 0x27,
-+			0x40, 0x59, 0x72, 0x8b, 0xa4, 0xbd, 0xd6, 0xef,
-+			0x08, 0x21, 0x3a, 0x53, 0x6c, 0x85, 0x9e, 0xb7,
-+			0xd0, 0xe9, 0x02, 0x1b, 0x34, 0x4d, 0x66, 0x7f,
-+			0x98, 0xb1, 0xca, 0xe3, 0xfc, 0x15, 0x2e, 0x47,
-+			0x60, 0x79, 0x92, 0xab, 0xc4, 0xdd, 0xf6, 0x0f,
-+			0x28, 0x41, 0x5a, 0x73, 0x8c, 0xa5, 0xbe, 0xd7,
-+			0xf0, 0x09, 0x22, 0x3b, 0x54, 0x6d, 0x86, 0x9f,
-+			0xb8, 0xd1, 0xea, 0x03, 0x1c, 0x35, 0x4e, 0x67,
-+			0x80, 0x99, 0xb2, 0xcb, 0xe4, 0xfd, 0x16, 0x2f,
-+			0x48, 0x61, 0x7a, 0x93, 0xac, 0xc5, 0xde, 0xf7,
-+			0x10, 0x29, 0x42, 0x5b, 0x74, 0x8d, 0xa6, 0xbf,
-+			0xd8, 0xf1, 0x0a, 0x23, 0x3c, 0x55, 0x6e, 0x87,
-+			0xa0, 0xb9, 0xd2, 0xeb, 0x04, 0x1d, 0x36, 0x4f,
-+			0x68, 0x81, 0x9a, 0xb3, 0xcc, 0xe5, 0xfe, 0x17,
-+			0x30, 0x49, 0x62, 0x7b, 0x94, 0xad, 0xc6, 0xdf,
-+			0xf8, 0x11, 0x2a, 0x43, 0x5c, 0x75, 0x8e, 0xa7,
-+			0xc0, 0xd9, 0xf2, 0x0b, 0x24, 0x3d, 0x56, 0x6f,
-+			0x88, 0xa1, 0xba, 0xd3, 0xec, 0x05, 0x1e, 0x37,
-+			0x50, 0x69, 0x82, 0x9b, 0xb4, 0xcd, 0xe6, 0xff,
-+			0x18, 0x31, 0x4a, 0x63, 0x7c, 0x95, 0xae, 0xc7,
-+			0xe0, 0xf9, 0x12, 0x2b, 0x44, 0x5d, 0x76, 0x8f,
-+			0xa8, 0xc1, 0xda, 0xf3, 0x0c, 0x25, 0x3e, 0x57,
-+			0x70, 0x89, 0xa2, 0xbb, 0xd4, 0xed, 0x06, 0x1f,
-+			0x38, 0x51, 0x6a, 0x83, 0x9c, 0xb5, 0xce, 0xe7,
-+			0x00, 0x1b, 0x36, 0x51, 0x6c, 0x87, 0xa2, 0xbd,
-+			0xd8, 0xf3, 0x0e, 0x29, 0x44, 0x5f, 0x7a, 0x95,
-+			0xb0, 0xcb, 0xe6, 0x01, 0x1c, 0x37, 0x52, 0x6d,
-+			0x88, 0xa3, 0xbe, 0xd9, 0xf4, 0x0f, 0x2a, 0x45,
-+			0x60, 0x7b, 0x96, 0xb1, 0xcc, 0xe7, 0x02, 0x1d,
-+			0x38, 0x53, 0x6e, 0x89, 0xa4, 0xbf, 0xda, 0xf5,
-+			0x10, 0x2b, 0x46, 0x61, 0x7c, 0x97, 0xb2, 0xcd,
-+			0xe8, 0x03, 0x1e, 0x39, 0x54, 0x6f, 0x8a, 0xa5,
-+			0xc0, 0xdb, 0xf6, 0x11, 0x2c, 0x47, 0x62, 0x7d,
-+			0x98, 0xb3, 0xce, 0xe9, 0x04, 0x1f, 0x3a, 0x55,
-+			0x70, 0x8b, 0xa6, 0xc1, 0xdc, 0xf7, 0x12, 0x2d,
-+			0x48, 0x63, 0x7e, 0x99, 0xb4, 0xcf, 0xea, 0x05,
-+			0x20, 0x3b, 0x56, 0x71, 0x8c, 0xa7, 0xc2, 0xdd,
-+			0xf8, 0x13, 0x2e, 0x49, 0x64, 0x7f, 0x9a, 0xb5,
-+			0xd0, 0xeb, 0x06, 0x21, 0x3c, 0x57, 0x72, 0x8d,
-+			0xa8, 0xc3, 0xde, 0xf9, 0x14, 0x2f, 0x4a, 0x65,
-+			0x80, 0x9b, 0xb6, 0xd1, 0xec, 0x07, 0x22, 0x3d,
-+			0x58, 0x73, 0x8e, 0xa9, 0xc4, 0xdf, 0xfa, 0x15,
-+			0x30, 0x4b, 0x66, 0x81, 0x9c, 0xb7, 0xd2, 0xed,
-+			0x08, 0x23, 0x3e, 0x59, 0x74, 0x8f, 0xaa, 0xc5,
-+			0xe0, 0xfb, 0x16, 0x31, 0x4c, 0x67, 0x82, 0x9d,
-+			0xb8, 0xd3, 0xee, 0x09, 0x24, 0x3f, 0x5a, 0x75,
-+			0x90, 0xab, 0xc6, 0xe1, 0xfc, 0x17, 0x32, 0x4d,
-+			0x68, 0x83, 0x9e, 0xb9, 0xd4, 0xef, 0x0a, 0x25,
-+			0x40, 0x5b, 0x76, 0x91, 0xac, 0xc7, 0xe2, 0xfd,
-+			0x18, 0x33, 0x4e, 0x69, 0x84, 0x9f, 0xba, 0xd5,
-+			0xf0, 0x0b, 0x26, 0x41, 0x5c, 0x77, 0x92, 0xad,
-+			0xc8, 0xe3, 0xfe, 0x19, 0x34, 0x4f, 0x6a, 0x85,
-+			0xa0, 0xbb, 0xd6, 0xf1, 0x0c, 0x27, 0x42, 0x5d,
-+			0x78, 0x93, 0xae, 0xc9, 0xe4, 0xff, 0x1a, 0x35,
-+			0x50, 0x6b, 0x86, 0xa1, 0xbc, 0xd7, 0xf2, 0x0d,
-+			0x28, 0x43, 0x5e, 0x79, 0x94, 0xaf, 0xca, 0xe5,
-+			0x00, 0x1d, 0x3a, 0x57, 0x74, 0x91, 0xae, 0xcb,
-+			0xe8, 0x05, 0x22, 0x3f, 0x5c, 0x79, 0x96, 0xb3,
-+			0xd0, 0xed, 0x0a, 0x27, 0x44, 0x61, 0x7e, 0x9b,
-+			0xb8, 0xd5, 0xf2, 0x0f, 0x2c, 0x49, 0x66, 0x83,
-+			0xa0, 0xbd, 0xda, 0xf7, 0x14, 0x31, 0x4e, 0x6b,
-+			0x88, 0xa5, 0xc2, 0xdf, 0xfc, 0x19, 0x36, 0x53,
-+			0x70, 0x8d, 0xaa, 0xc7, 0xe4, 0x01, 0x1e, 0x3b,
-+			0x58, 0x75, 0x92, 0xaf, 0xcc, 0xe9, 0x06, 0x23,
-+			0x40, 0x5d, 0x7a, 0x97, 0xb4, 0xd1, 0xee, 0x0b,
-+			0x28, 0x45, 0x62, 0x7f, 0x9c, 0xb9, 0xd6, 0xf3,
-+			0x10, 0x2d, 0x4a, 0x67, 0x84, 0xa1, 0xbe, 0xdb,
-+			0xf8, 0x15, 0x32, 0x4f, 0x6c, 0x89, 0xa6, 0xc3,
-+			0xe0, 0xfd, 0x1a, 0x37, 0x54, 0x71, 0x8e, 0xab,
-+			0xc8, 0xe5, 0x02, 0x1f, 0x3c, 0x59, 0x76, 0x93,
-+			0xb0, 0xcd, 0xea, 0x07, 0x24, 0x41, 0x5e, 0x7b,
-+			0x98, 0xb5, 0xd2, 0xef, 0x0c, 0x29, 0x46, 0x63,
-+			0x80, 0x9d, 0xba, 0xd7, 0xf4, 0x11, 0x2e, 0x4b,
-+			0x68, 0x85, 0xa2, 0xbf, 0xdc, 0xf9, 0x16, 0x33,
-+			0x50, 0x6d, 0x8a, 0xa7, 0xc4, 0xe1, 0xfe, 0x1b,
-+			0x38, 0x55, 0x72, 0x8f, 0xac, 0xc9, 0xe6, 0x03,
-+			0x20, 0x3d, 0x5a, 0x77, 0x94, 0xb1, 0xce, 0xeb,
-+			0x08, 0x25, 0x42, 0x5f, 0x7c, 0x99, 0xb6, 0xd3,
-+			0xf0, 0x0d, 0x2a, 0x47, 0x64, 0x81, 0x9e, 0xbb,
-+			0xd8, 0xf5, 0x12, 0x2f, 0x4c, 0x69, 0x86, 0xa3,
-+			0xc0, 0xdd, 0xfa, 0x17, 0x34, 0x51, 0x6e, 0x8b,
-+			0xa8, 0xc5, 0xe2, 0xff, 0x1c, 0x39, 0x56, 0x73,
-+			0x90, 0xad, 0xca, 0xe7, 0x04, 0x21, 0x3e, 0x5b,
-+			0x78, 0x95, 0xb2, 0xcf, 0xec, 0x09, 0x26, 0x43,
-+			0x60, 0x7d, 0x9a, 0xb7, 0xd4, 0xf1, 0x0e, 0x2b,
-+			0x48, 0x65, 0x82, 0x9f, 0xbc, 0xd9, 0xf6, 0x13,
-+			0x30, 0x4d, 0x6a, 0x87, 0xa4, 0xc1, 0xde, 0xfb,
-+			0x18, 0x35, 0x52, 0x6f, 0x8c, 0xa9, 0xc6, 0xe3,
-+			0x00, 0x1f, 0x3e, 0x5d, 0x7c, 0x9b, 0xba, 0xd9,
-+			0xf8, 0x17, 0x36, 0x55, 0x74, 0x93, 0xb2, 0xd1,
-+			0xf0, 0x0f, 0x2e, 0x4d, 0x6c, 0x8b, 0xaa, 0xc9,
-+			0xe8, 0x07, 0x26, 0x45, 0x64, 0x83, 0xa2, 0xc1,
-+			0xe0, 0xff, 0x1e, 0x3d, 0x5c, 0x7b, 0x9a, 0xb9,
-+			0xd8, 0xf7, 0x16, 0x35, 0x54, 0x73, 0x92, 0xb1,
-+			0xd0, 0xef, 0x0e, 0x2d, 0x4c, 0x6b, 0x8a, 0xa9,
-+			0xc8, 0xe7, 0x06, 0x25, 0x44, 0x63, 0x82, 0xa1,
-+			0xc0, 0xdf, 0xfe, 0x1d, 0x3c, 0x5b, 0x7a, 0x99,
-+			0xb8, 0xd7, 0xf6, 0x15, 0x34, 0x53, 0x72, 0x91,
-+			0xb0, 0xcf, 0xee, 0x0d, 0x2c, 0x4b, 0x6a, 0x89,
-+			0xa8, 0xc7, 0xe6, 0x05, 0x24, 0x43, 0x62, 0x81,
-+			0xa0, 0xbf, 0xde, 0xfd, 0x1c, 0x3b, 0x5a, 0x79,
-+			0x98, 0xb7, 0xd6, 0xf5, 0x14, 0x33, 0x52, 0x71,
-+			0x90, 0xaf, 0xce, 0xed, 0x0c, 0x2b, 0x4a, 0x69,
-+			0x88, 0xa7, 0xc6, 0xe5, 0x04, 0x23, 0x42, 0x61,
-+			0x80, 0x9f, 0xbe, 0xdd, 0xfc, 0x1b, 0x3a, 0x59,
-+			0x78, 0x97, 0xb6, 0xd5, 0xf4, 0x13, 0x32, 0x51,
-+			0x70, 0x8f, 0xae, 0xcd, 0xec, 0x0b, 0x2a, 0x49,
-+			0x68, 0x87, 0xa6, 0xc5, 0xe4, 0x03, 0x22, 0x41,
-+			0x60, 0x7f, 0x9e, 0xbd, 0xdc, 0xfb, 0x1a, 0x39,
-+			0x58, 0x77, 0x96, 0xb5, 0xd4, 0xf3, 0x12, 0x31,
-+			0x50, 0x6f, 0x8e, 0xad, 0xcc, 0xeb, 0x0a, 0x29,
-+			0x48, 0x67, 0x86, 0xa5, 0xc4, 0xe3, 0x02, 0x21,
-+			0x40, 0x5f, 0x7e, 0x9d, 0xbc, 0xdb, 0xfa, 0x19,
-+			0x38, 0x57, 0x76, 0x95, 0xb4, 0xd3, 0xf2, 0x11,
-+			0x30, 0x4f, 0x6e, 0x8d, 0xac, 0xcb, 0xea, 0x09,
-+			0x28, 0x47, 0x66, 0x85, 0xa4, 0xc3, 0xe2, 0x01,
-+			0x20, 0x3f, 0x5e, 0x7d, 0x9c, 0xbb, 0xda, 0xf9,
-+			0x18, 0x37, 0x56, 0x75, 0x94, 0xb3, 0xd2, 0xf1,
-+			0x10, 0x2f, 0x4e, 0x6d, 0x8c, 0xab, 0xca, 0xe9,
-+			0x08, 0x27, 0x46, 0x65, 0x84, 0xa3, 0xc2, 0xe1,
-+			0x00, 0x21, 0x42, 0x63,
-+		},
-+		.ilen = 4100,
-+		.result = {
-+			0xf0, 0x5c, 0x74, 0xad, 0x4e, 0xbc, 0x99, 0xe2,
-+			0xae, 0xff, 0x91, 0x3a, 0x44, 0xcf, 0x38, 0x32,
-+			0x1e, 0xad, 0xa7, 0xcd, 0xa1, 0x39, 0x95, 0xaa,
-+			0x10, 0xb1, 0xb3, 0x2e, 0x04, 0x31, 0x8f, 0x86,
-+			0xf2, 0x62, 0x74, 0x70, 0x0c, 0xa4, 0x46, 0x08,
-+			0xa8, 0xb7, 0x99, 0xa8, 0xe9, 0xd2, 0x73, 0x79,
-+			0x7e, 0x6e, 0xd4, 0x8f, 0x1e, 0xc7, 0x8e, 0x31,
-+			0x0b, 0xfa, 0x4b, 0xce, 0xfd, 0xf3, 0x57, 0x71,
-+			0xe9, 0x46, 0x03, 0xa5, 0x3d, 0x34, 0x00, 0xe2,
-+			0x18, 0xff, 0x75, 0x6d, 0x06, 0x2d, 0x00, 0xab,
-+			0xb9, 0x3e, 0x6c, 0x59, 0xc5, 0x84, 0x06, 0xb5,
-+			0x8b, 0xd0, 0x89, 0x9c, 0x4a, 0x79, 0x16, 0xc6,
-+			0x3d, 0x74, 0x54, 0xfa, 0x44, 0xcd, 0x23, 0x26,
-+			0x5c, 0xcf, 0x7e, 0x28, 0x92, 0x32, 0xbf, 0xdf,
-+			0xa7, 0x20, 0x3c, 0x74, 0x58, 0x2a, 0x9a, 0xde,
-+			0x61, 0x00, 0x1c, 0x4f, 0xff, 0x59, 0xc4, 0x22,
-+			0xac, 0x3c, 0xd0, 0xe8, 0x6c, 0xf9, 0x97, 0x1b,
-+			0x58, 0x9b, 0xad, 0x71, 0xe8, 0xa9, 0xb5, 0x0d,
-+			0xee, 0x2f, 0x04, 0x1f, 0x7f, 0xbc, 0x99, 0xee,
-+			0x84, 0xff, 0x42, 0x60, 0xdc, 0x3a, 0x18, 0xa5,
-+			0x81, 0xf9, 0xef, 0xdc, 0x7a, 0x0f, 0x65, 0x41,
-+			0x2f, 0xa3, 0xd3, 0xf9, 0xc2, 0xcb, 0xc0, 0x4d,
-+			0x8f, 0xd3, 0x76, 0x96, 0xad, 0x49, 0x6d, 0x38,
-+			0x3d, 0x39, 0x0b, 0x6c, 0x80, 0xb7, 0x54, 0x69,
-+			0xf0, 0x2c, 0x90, 0x02, 0x29, 0x0d, 0x1c, 0x12,
-+			0xad, 0x55, 0xc3, 0x8b, 0x68, 0xd9, 0xcc, 0xb3,
-+			0xb2, 0x64, 0x33, 0x90, 0x5e, 0xca, 0x4b, 0xe2,
-+			0xfb, 0x75, 0xdc, 0x63, 0xf7, 0x9f, 0x82, 0x74,
-+			0xf0, 0xc9, 0xaa, 0x7f, 0xe9, 0x2a, 0x9b, 0x33,
-+			0xbc, 0x88, 0x00, 0x7f, 0xca, 0xb2, 0x1f, 0x14,
-+			0xdb, 0xc5, 0x8e, 0x7b, 0x11, 0x3c, 0x3e, 0x08,
-+			0xf3, 0x83, 0xe8, 0xe0, 0x94, 0x86, 0x2e, 0x92,
-+			0x78, 0x6b, 0x01, 0xc9, 0xc7, 0x83, 0xba, 0x21,
-+			0x6a, 0x25, 0x15, 0x33, 0x4e, 0x45, 0x08, 0xec,
-+			0x35, 0xdb, 0xe0, 0x6e, 0x31, 0x51, 0x79, 0xa9,
-+			0x42, 0x44, 0x65, 0xc1, 0xa0, 0xf1, 0xf9, 0x2a,
-+			0x70, 0xd5, 0xb6, 0xc6, 0xc1, 0x8c, 0x39, 0xfc,
-+			0x25, 0xa6, 0x55, 0xd9, 0xdd, 0x2d, 0x4c, 0xec,
-+			0x49, 0xc6, 0xeb, 0x0e, 0xa8, 0x25, 0x2a, 0x16,
-+			0x1b, 0x66, 0x84, 0xda, 0xe2, 0x92, 0xe5, 0xc0,
-+			0xc8, 0x53, 0x07, 0xaf, 0x80, 0x84, 0xec, 0xfd,
-+			0xcd, 0xd1, 0x6e, 0xcd, 0x6f, 0x6a, 0xf5, 0x36,
-+			0xc5, 0x15, 0xe5, 0x25, 0x7d, 0x77, 0xd1, 0x1a,
-+			0x93, 0x36, 0xa9, 0xcf, 0x7c, 0xa4, 0x54, 0x4a,
-+			0x06, 0x51, 0x48, 0x4e, 0xf6, 0x59, 0x87, 0xd2,
-+			0x04, 0x02, 0xef, 0xd3, 0x44, 0xde, 0x76, 0x31,
-+			0xb3, 0x34, 0x17, 0x1b, 0x9d, 0x66, 0x11, 0x9f,
-+			0x1e, 0xcc, 0x17, 0xe9, 0xc7, 0x3c, 0x1b, 0xe7,
-+			0xcb, 0x50, 0x08, 0xfc, 0xdc, 0x2b, 0x24, 0xdb,
-+			0x65, 0x83, 0xd0, 0x3b, 0xe3, 0x30, 0xea, 0x94,
-+			0x6c, 0xe7, 0xe8, 0x35, 0x32, 0xc7, 0xdb, 0x64,
-+			0xb4, 0x01, 0xab, 0x36, 0x2c, 0x77, 0x13, 0xaf,
-+			0xf8, 0x2b, 0x88, 0x3f, 0x54, 0x39, 0xc4, 0x44,
-+			0xfe, 0xef, 0x6f, 0x68, 0x34, 0xbe, 0x0f, 0x05,
-+			0x16, 0x6d, 0xf6, 0x0a, 0x30, 0xe7, 0xe3, 0xed,
-+			0xc4, 0xde, 0x3c, 0x1b, 0x13, 0xd8, 0xdb, 0xfe,
-+			0x41, 0x62, 0xe5, 0x28, 0xd4, 0x8d, 0xa3, 0xc7,
-+			0x93, 0x97, 0xc6, 0x48, 0x45, 0x1d, 0x9f, 0x83,
-+			0xdf, 0x4b, 0x40, 0x3e, 0x42, 0x25, 0x87, 0x80,
-+			0x4c, 0x7d, 0xa8, 0xd4, 0x98, 0x23, 0x95, 0x75,
-+			0x41, 0x8c, 0xda, 0x41, 0x9b, 0xd4, 0xa7, 0x06,
-+			0xb5, 0xf1, 0x71, 0x09, 0x53, 0xbe, 0xca, 0xbf,
-+			0x32, 0x03, 0xed, 0xf0, 0x50, 0x1c, 0x56, 0x39,
-+			0x5b, 0xa4, 0x75, 0x18, 0xf7, 0x9b, 0x58, 0xef,
-+			0x53, 0xfc, 0x2a, 0x38, 0x23, 0x15, 0x75, 0xcd,
-+			0x45, 0xe5, 0x5a, 0x82, 0x55, 0xba, 0x21, 0xfa,
-+			0xd4, 0xbd, 0xc6, 0x94, 0x7c, 0xc5, 0x80, 0x12,
-+			0xf7, 0x4b, 0x32, 0xc4, 0x9a, 0x82, 0xd8, 0x28,
-+			0x8f, 0xd9, 0xc2, 0x0f, 0x60, 0x03, 0xbe, 0x5e,
-+			0x21, 0xd6, 0x5f, 0x58, 0xbf, 0x5c, 0xb1, 0x32,
-+			0x82, 0x8d, 0xa9, 0xe5, 0xf2, 0x66, 0x1a, 0xc0,
-+			0xa0, 0xbc, 0x58, 0x2f, 0x71, 0xf5, 0x2f, 0xed,
-+			0xd1, 0x26, 0xb9, 0xd8, 0x49, 0x5a, 0x07, 0x19,
-+			0x01, 0x7c, 0x59, 0xb0, 0xf8, 0xa4, 0xb7, 0xd3,
-+			0x7b, 0x1a, 0x8c, 0x38, 0xf4, 0x50, 0xa4, 0x59,
-+			0xb0, 0xcc, 0x41, 0x0b, 0x88, 0x7f, 0xe5, 0x31,
-+			0xb3, 0x42, 0xba, 0xa2, 0x7e, 0xd4, 0x32, 0x71,
-+			0x45, 0x87, 0x48, 0xa9, 0xc2, 0xf2, 0x89, 0xb3,
-+			0xe4, 0xa7, 0x7e, 0x52, 0x15, 0x61, 0xfa, 0xfe,
-+			0xc9, 0xdd, 0x81, 0xeb, 0x13, 0xab, 0xab, 0xc3,
-+			0x98, 0x59, 0xd8, 0x16, 0x3d, 0x14, 0x7a, 0x1c,
-+			0x3c, 0x41, 0x9a, 0x16, 0x16, 0x9b, 0xd2, 0xd2,
-+			0x69, 0x3a, 0x29, 0x23, 0xac, 0x86, 0x32, 0xa5,
-+			0x48, 0x9c, 0x9e, 0xf3, 0x47, 0x77, 0x81, 0x70,
-+			0x24, 0xe8, 0x85, 0xd2, 0xf5, 0xb5, 0xfa, 0xff,
-+			0x59, 0x6a, 0xd3, 0x50, 0x59, 0x43, 0x59, 0xde,
-+			0xd9, 0xf1, 0x55, 0xa5, 0x0c, 0xc3, 0x1a, 0x1a,
-+			0x18, 0x34, 0x0d, 0x1a, 0x63, 0x33, 0xed, 0x10,
-+			0xe0, 0x1d, 0x2a, 0x18, 0xd2, 0xc0, 0x54, 0xa8,
-+			0xca, 0xb5, 0x9a, 0xd3, 0xdd, 0xca, 0x45, 0x84,
-+			0x50, 0xe7, 0x0f, 0xfe, 0xa4, 0x99, 0x5a, 0xbe,
-+			0x43, 0x2d, 0x9a, 0xcb, 0x92, 0x3f, 0x5a, 0x1d,
-+			0x85, 0xd8, 0xc9, 0xdf, 0x68, 0xc9, 0x12, 0x80,
-+			0x56, 0x0c, 0xdc, 0x00, 0xdc, 0x3a, 0x7d, 0x9d,
-+			0xa3, 0xa2, 0xe8, 0x4d, 0xbf, 0xf9, 0x70, 0xa0,
-+			0xa4, 0x13, 0x4f, 0x6b, 0xaf, 0x0a, 0x89, 0x7f,
-+			0xda, 0xf0, 0xbf, 0x9b, 0xc8, 0x1d, 0xe5, 0xf8,
-+			0x2e, 0x8b, 0x07, 0xb5, 0x73, 0x1b, 0xcc, 0xa2,
-+			0xa6, 0xad, 0x30, 0xbc, 0x78, 0x3c, 0x5b, 0x10,
-+			0xfa, 0x5e, 0x62, 0x2d, 0x9e, 0x64, 0xb3, 0x33,
-+			0xce, 0xf9, 0x1f, 0x86, 0xe7, 0x8b, 0xa2, 0xb8,
-+			0xe8, 0x99, 0x57, 0x8c, 0x11, 0xed, 0x66, 0xd9,
-+			0x3c, 0x72, 0xb9, 0xc3, 0xe6, 0x4e, 0x17, 0x3a,
-+			0x6a, 0xcb, 0x42, 0x24, 0x06, 0xed, 0x3e, 0x4e,
-+			0xa3, 0xe8, 0x6a, 0x94, 0xda, 0x0d, 0x4e, 0xd5,
-+			0x14, 0x19, 0xcf, 0xb6, 0x26, 0xd8, 0x2e, 0xcc,
-+			0x64, 0x76, 0x38, 0x49, 0x4d, 0xfe, 0x30, 0x6d,
-+			0xe4, 0xc8, 0x8c, 0x7b, 0xc4, 0xe0, 0x35, 0xba,
-+			0x22, 0x6e, 0x76, 0xe1, 0x1a, 0xf2, 0x53, 0xc3,
-+			0x28, 0xa2, 0x82, 0x1f, 0x61, 0x69, 0xad, 0xc1,
-+			0x7b, 0x28, 0x4b, 0x1e, 0x6c, 0x85, 0x95, 0x9b,
-+			0x51, 0xb5, 0x17, 0x7f, 0x12, 0x69, 0x8c, 0x24,
-+			0xd5, 0xc7, 0x5a, 0x5a, 0x11, 0x54, 0xff, 0x5a,
-+			0xf7, 0x16, 0xc3, 0x91, 0xa6, 0xf0, 0xdc, 0x0a,
-+			0xb6, 0xa7, 0x4a, 0x0d, 0x7a, 0x58, 0xfe, 0xa5,
-+			0xf5, 0xcb, 0x8f, 0x7b, 0x0e, 0xea, 0x57, 0xe7,
-+			0xbd, 0x79, 0xd6, 0x1c, 0x88, 0x23, 0x6c, 0xf2,
-+			0x4d, 0x29, 0x77, 0x53, 0x35, 0x6a, 0x00, 0x8d,
-+			0xcd, 0xa3, 0x58, 0xbe, 0x77, 0x99, 0x18, 0xf8,
-+			0xe6, 0xe1, 0x8f, 0xe9, 0x37, 0x8f, 0xe3, 0xe2,
-+			0x5a, 0x8a, 0x93, 0x25, 0xaf, 0xf3, 0x78, 0x80,
-+			0xbe, 0xa6, 0x1b, 0xc6, 0xac, 0x8b, 0x1c, 0x91,
-+			0x58, 0xe1, 0x9f, 0x89, 0x35, 0x9d, 0x1d, 0x21,
-+			0x29, 0x9f, 0xf4, 0x99, 0x02, 0x27, 0x0f, 0xa8,
-+			0x4f, 0x79, 0x94, 0x2b, 0x33, 0x2c, 0xda, 0xa2,
-+			0x26, 0x39, 0x83, 0x94, 0xef, 0x27, 0xd8, 0x53,
-+			0x8f, 0x66, 0x0d, 0xe4, 0x41, 0x7d, 0x34, 0xcd,
-+			0x43, 0x7c, 0x95, 0x0a, 0x53, 0xef, 0x66, 0xda,
-+			0x7e, 0x9b, 0xf3, 0x93, 0xaf, 0xd0, 0x73, 0x71,
-+			0xba, 0x40, 0x9b, 0x74, 0xf8, 0xd7, 0xd7, 0x41,
-+			0x6d, 0xaf, 0x72, 0x9c, 0x8d, 0x21, 0x87, 0x3c,
-+			0xfd, 0x0a, 0x90, 0xa9, 0x47, 0x96, 0x9e, 0xd3,
-+			0x88, 0xee, 0x73, 0xcf, 0x66, 0x2f, 0x52, 0x56,
-+			0x6d, 0xa9, 0x80, 0x4c, 0xe2, 0x6f, 0x62, 0x88,
-+			0x3f, 0x0e, 0x54, 0x17, 0x48, 0x80, 0x5d, 0xd3,
-+			0xc3, 0xda, 0x25, 0x3d, 0xa1, 0xc8, 0xcb, 0x9f,
-+			0x9b, 0x70, 0xb3, 0xa1, 0xeb, 0x04, 0x52, 0xa1,
-+			0xf2, 0x22, 0x0f, 0xfc, 0xc8, 0x18, 0xfa, 0xf9,
-+			0x85, 0x9c, 0xf1, 0xac, 0xeb, 0x0c, 0x02, 0x46,
-+			0x75, 0xd2, 0xf5, 0x2c, 0xe3, 0xd2, 0x59, 0x94,
-+			0x12, 0xf3, 0x3c, 0xfc, 0xd7, 0x92, 0xfa, 0x36,
-+			0xba, 0x61, 0x34, 0x38, 0x7c, 0xda, 0x48, 0x3e,
-+			0x08, 0xc9, 0x39, 0x23, 0x5e, 0x02, 0x2c, 0x1a,
-+			0x18, 0x7e, 0xb4, 0xd9, 0xfd, 0x9e, 0x40, 0x02,
-+			0xb1, 0x33, 0x37, 0x32, 0xe7, 0xde, 0xd6, 0xd0,
-+			0x7c, 0x58, 0x65, 0x4b, 0xf8, 0x34, 0x27, 0x9c,
-+			0x44, 0xb4, 0xbd, 0xe9, 0xe9, 0x4c, 0x78, 0x7d,
-+			0x4b, 0x9f, 0xce, 0xb1, 0xcd, 0x47, 0xa5, 0x37,
-+			0xe5, 0x6d, 0xbd, 0xb9, 0x43, 0x94, 0x0a, 0xd4,
-+			0xd6, 0xf9, 0x04, 0x5f, 0xb5, 0x66, 0x6c, 0x1a,
-+			0x35, 0x12, 0xe3, 0x36, 0x28, 0x27, 0x36, 0x58,
-+			0x01, 0x2b, 0x79, 0xe4, 0xba, 0x6d, 0x10, 0x7d,
-+			0x65, 0xdf, 0x84, 0x95, 0xf4, 0xd5, 0xb6, 0x8f,
-+			0x2b, 0x9f, 0x96, 0x00, 0x86, 0x60, 0xf0, 0x21,
-+			0x76, 0xa8, 0x6a, 0x8c, 0x28, 0x1c, 0xb3, 0x6b,
-+			0x97, 0xd7, 0xb6, 0x53, 0x2a, 0xcc, 0xab, 0x40,
-+			0x9d, 0x62, 0x79, 0x58, 0x52, 0xe6, 0x65, 0xb7,
-+			0xab, 0x55, 0x67, 0x9c, 0x89, 0x7c, 0x03, 0xb0,
-+			0x73, 0x59, 0xc5, 0x81, 0xf5, 0x18, 0x17, 0x5c,
-+			0x89, 0xf3, 0x78, 0x35, 0x44, 0x62, 0x78, 0x72,
-+			0xd0, 0x96, 0xeb, 0x31, 0xe7, 0x87, 0x77, 0x14,
-+			0x99, 0x51, 0xf2, 0x59, 0x26, 0x9e, 0xb5, 0xa6,
-+			0x45, 0xfe, 0x6e, 0xbd, 0x07, 0x4c, 0x94, 0x5a,
-+			0xa5, 0x7d, 0xfc, 0xf1, 0x2b, 0x77, 0xe2, 0xfe,
-+			0x17, 0xd4, 0x84, 0xa0, 0xac, 0xb5, 0xc7, 0xda,
-+			0xa9, 0x1a, 0xb6, 0xf3, 0x74, 0x11, 0xb4, 0x9d,
-+			0xfb, 0x79, 0x2e, 0x04, 0x2d, 0x50, 0x28, 0x83,
-+			0xbf, 0xc6, 0x52, 0xd3, 0x34, 0xd6, 0xe8, 0x7a,
-+			0xb6, 0xea, 0xe7, 0xa8, 0x6c, 0x15, 0x1e, 0x2c,
-+			0x57, 0xbc, 0x48, 0x4e, 0x5f, 0x5c, 0xb6, 0x92,
-+			0xd2, 0x49, 0x77, 0x81, 0x6d, 0x90, 0x70, 0xae,
-+			0x98, 0xa1, 0x03, 0x0d, 0x6b, 0xb9, 0x77, 0x14,
-+			0xf1, 0x4e, 0x23, 0xd3, 0xf8, 0x68, 0xbd, 0xc2,
-+			0xfe, 0x04, 0xb7, 0x5c, 0xc5, 0x17, 0x60, 0x8f,
-+			0x65, 0x54, 0xa4, 0x7a, 0x42, 0xdc, 0x18, 0x0d,
-+			0xb5, 0xcf, 0x0f, 0xd3, 0xc7, 0x91, 0x66, 0x1b,
-+			0x45, 0x42, 0x27, 0x75, 0x50, 0xe5, 0xee, 0xb8,
-+			0x7f, 0x33, 0x2c, 0xba, 0x4a, 0x92, 0x4d, 0x2c,
-+			0x3c, 0xe3, 0x0d, 0x80, 0x01, 0xba, 0x0d, 0x29,
-+			0xd8, 0x3c, 0xe9, 0x13, 0x16, 0x57, 0xe6, 0xea,
-+			0x94, 0x52, 0xe7, 0x00, 0x4d, 0x30, 0xb0, 0x0f,
-+			0x35, 0xb8, 0xb8, 0xa7, 0xb1, 0xb5, 0x3b, 0x44,
-+			0xe1, 0x2f, 0xfd, 0x88, 0xed, 0x43, 0xe7, 0x52,
-+			0x10, 0x93, 0xb3, 0x8a, 0x30, 0x6b, 0x0a, 0xf7,
-+			0x23, 0xc6, 0x50, 0x9d, 0x4a, 0xb0, 0xde, 0xc3,
-+			0xdc, 0x9b, 0x2f, 0x01, 0x56, 0x36, 0x09, 0xc5,
-+			0x2f, 0x6b, 0xfe, 0xf1, 0xd8, 0x27, 0x45, 0x03,
-+			0x30, 0x5e, 0x5c, 0x5b, 0xb4, 0x62, 0x0e, 0x1a,
-+			0xa9, 0x21, 0x2b, 0x92, 0x94, 0x87, 0x62, 0x57,
-+			0x4c, 0x10, 0x74, 0x1a, 0xf1, 0x0a, 0xc5, 0x84,
-+			0x3b, 0x9e, 0x72, 0x02, 0xd7, 0xcc, 0x09, 0x56,
-+			0xbd, 0x54, 0xc1, 0xf0, 0xc3, 0xe3, 0xb3, 0xf8,
-+			0xd2, 0x0d, 0x61, 0xcb, 0xef, 0xce, 0x0d, 0x05,
-+			0xb0, 0x98, 0xd9, 0x8e, 0x4f, 0xf9, 0xbc, 0x93,
-+			0xa6, 0xea, 0xc8, 0xcf, 0x10, 0x53, 0x4b, 0xf1,
-+			0xec, 0xfc, 0x89, 0xf9, 0x64, 0xb0, 0x22, 0xbf,
-+			0x9e, 0x55, 0x46, 0x9f, 0x7c, 0x50, 0x8e, 0x84,
-+			0x54, 0x20, 0x98, 0xd7, 0x6c, 0x40, 0x1e, 0xdb,
-+			0x69, 0x34, 0x78, 0x61, 0x24, 0x21, 0x9c, 0x8a,
-+			0xb3, 0x62, 0x31, 0x8b, 0x6e, 0xf5, 0x2a, 0x35,
-+			0x86, 0x13, 0xb1, 0x6c, 0x64, 0x2e, 0x41, 0xa5,
-+			0x05, 0xf2, 0x42, 0xba, 0xd2, 0x3a, 0x0d, 0x8e,
-+			0x8a, 0x59, 0x94, 0x3c, 0xcf, 0x36, 0x27, 0x82,
-+			0xc2, 0x45, 0xee, 0x58, 0xcd, 0x88, 0xb4, 0xec,
-+			0xde, 0xb2, 0x96, 0x0a, 0xaf, 0x38, 0x6f, 0x88,
-+			0xd7, 0xd8, 0xe1, 0xdf, 0xb9, 0x96, 0xa9, 0x0a,
-+			0xb1, 0x95, 0x28, 0x86, 0x20, 0xe9, 0x17, 0x49,
-+			0xa2, 0x29, 0x38, 0xaa, 0xa5, 0xe9, 0x6e, 0xf1,
-+			0x19, 0x27, 0xc0, 0xd5, 0x2a, 0x22, 0xc3, 0x0b,
-+			0xdb, 0x7c, 0x73, 0x10, 0xb9, 0xba, 0x89, 0x76,
-+			0x54, 0xae, 0x7d, 0x71, 0xb3, 0x93, 0xf6, 0x32,
-+			0xe6, 0x47, 0x43, 0x55, 0xac, 0xa0, 0x0d, 0xc2,
-+			0x93, 0x27, 0x4a, 0x8e, 0x0e, 0x74, 0x15, 0xc7,
-+			0x0b, 0x85, 0xd9, 0x0c, 0xa9, 0x30, 0x7a, 0x3e,
-+			0xea, 0x8f, 0x85, 0x6d, 0x3a, 0x12, 0x4f, 0x72,
-+			0x69, 0x58, 0x7a, 0x80, 0xbb, 0xb5, 0x97, 0xf3,
-+			0xcf, 0x70, 0xd2, 0x5d, 0xdd, 0x4d, 0x21, 0x79,
-+			0x54, 0x4d, 0xe4, 0x05, 0xe8, 0xbd, 0xc2, 0x62,
-+			0xb1, 0x3b, 0x77, 0x1c, 0xd6, 0x5c, 0xf3, 0xa0,
-+			0x79, 0x00, 0xa8, 0x6c, 0x29, 0xd9, 0x18, 0x24,
-+			0x36, 0xa2, 0x46, 0xc0, 0x96, 0x65, 0x7f, 0xbd,
-+			0x2a, 0xed, 0x36, 0x16, 0x0c, 0xaa, 0x9f, 0xf4,
-+			0xc5, 0xb4, 0xe2, 0x12, 0xed, 0x69, 0xed, 0x4f,
-+			0x26, 0x2c, 0x39, 0x52, 0x89, 0x98, 0xe7, 0x2c,
-+			0x99, 0xa4, 0x9e, 0xa3, 0x9b, 0x99, 0x46, 0x7a,
-+			0x3a, 0xdc, 0xa8, 0x59, 0xa3, 0xdb, 0xc3, 0x3b,
-+			0x95, 0x0d, 0x3b, 0x09, 0x6e, 0xee, 0x83, 0x5d,
-+			0x32, 0x4d, 0xed, 0xab, 0xfa, 0x98, 0x14, 0x4e,
-+			0xc3, 0x15, 0x45, 0x53, 0x61, 0xc4, 0x93, 0xbd,
-+			0x90, 0xf4, 0x99, 0x95, 0x4c, 0xe6, 0x76, 0x92,
-+			0x29, 0x90, 0x46, 0x30, 0x92, 0x69, 0x7d, 0x13,
-+			0xf2, 0xa5, 0xcd, 0x69, 0x49, 0x44, 0xb2, 0x0f,
-+			0x63, 0x40, 0x36, 0x5f, 0x09, 0xe2, 0x78, 0xf8,
-+			0x91, 0xe3, 0xe2, 0xfa, 0x10, 0xf7, 0xc8, 0x24,
-+			0xa8, 0x89, 0x32, 0x5c, 0x37, 0x25, 0x1d, 0xb2,
-+			0xea, 0x17, 0x8a, 0x0a, 0xa9, 0x64, 0xc3, 0x7c,
-+			0x3c, 0x7c, 0xbd, 0xc6, 0x79, 0x34, 0xe7, 0xe2,
-+			0x85, 0x8e, 0xbf, 0xf8, 0xde, 0x92, 0xa0, 0xae,
-+			0x20, 0xc4, 0xf6, 0xbb, 0x1f, 0x38, 0x19, 0x0e,
-+			0xe8, 0x79, 0x9c, 0xa1, 0x23, 0xe9, 0x54, 0x7e,
-+			0x37, 0x2f, 0xe2, 0x94, 0x32, 0xaf, 0xa0, 0x23,
-+			0x49, 0xe4, 0xc0, 0xb3, 0xac, 0x00, 0x8f, 0x36,
-+			0x05, 0xc4, 0xa6, 0x96, 0xec, 0x05, 0x98, 0x4f,
-+			0x96, 0x67, 0x57, 0x1f, 0x20, 0x86, 0x1b, 0x2d,
-+			0x69, 0xe4, 0x29, 0x93, 0x66, 0x5f, 0xaf, 0x6b,
-+			0x88, 0x26, 0x2c, 0x67, 0x02, 0x4b, 0x52, 0xd0,
-+			0x83, 0x7a, 0x43, 0x1f, 0xc0, 0x71, 0x15, 0x25,
-+			0x77, 0x65, 0x08, 0x60, 0x11, 0x76, 0x4c, 0x8d,
-+			0xed, 0xa9, 0x27, 0xc6, 0xb1, 0x2a, 0x2c, 0x6a,
-+			0x4a, 0x97, 0xf5, 0xc6, 0xb7, 0x70, 0x42, 0xd3,
-+			0x03, 0xd1, 0x24, 0x95, 0xec, 0x6d, 0xab, 0x38,
-+			0x72, 0xce, 0xe2, 0x8b, 0x33, 0xd7, 0x51, 0x09,
-+			0xdc, 0x45, 0xe0, 0x09, 0x96, 0x32, 0xf3, 0xc4,
-+			0x84, 0xdc, 0x73, 0x73, 0x2d, 0x1b, 0x11, 0x98,
-+			0xc5, 0x0e, 0x69, 0x28, 0x94, 0xc7, 0xb5, 0x4d,
-+			0xc8, 0x8a, 0xd0, 0xaa, 0x13, 0x2e, 0x18, 0x74,
-+			0xdd, 0xd1, 0x1e, 0xf3, 0x90, 0xe8, 0xfc, 0x9a,
-+			0x72, 0x4a, 0x0e, 0xd1, 0xe4, 0xfb, 0x0d, 0x96,
-+			0xd1, 0x0c, 0x79, 0x85, 0x1b, 0x1c, 0xfe, 0xe1,
-+			0x62, 0x8f, 0x7a, 0x73, 0x32, 0xab, 0xc8, 0x18,
-+			0x69, 0xe3, 0x34, 0x30, 0xdf, 0x13, 0xa6, 0xe5,
-+			0xe8, 0x0e, 0x67, 0x7f, 0x81, 0x11, 0xb4, 0x60,
-+			0xc7, 0xbd, 0x79, 0x65, 0x50, 0xdc, 0xc4, 0x5b,
-+			0xde, 0x39, 0xa4, 0x01, 0x72, 0x63, 0xf3, 0xd1,
-+			0x64, 0x4e, 0xdf, 0xfc, 0x27, 0x92, 0x37, 0x0d,
-+			0x57, 0xcd, 0x11, 0x4f, 0x11, 0x04, 0x8e, 0x1d,
-+			0x16, 0xf7, 0xcd, 0x92, 0x9a, 0x99, 0x30, 0x14,
-+			0xf1, 0x7c, 0x67, 0x1b, 0x1f, 0x41, 0x0b, 0xe8,
-+			0x32, 0xe8, 0xb8, 0xc1, 0x4f, 0x54, 0x86, 0x4f,
-+			0xe5, 0x79, 0x81, 0x73, 0xcd, 0x43, 0x59, 0x68,
-+			0x73, 0x02, 0x3b, 0x78, 0x21, 0x72, 0x43, 0x00,
-+			0x49, 0x17, 0xf7, 0x00, 0xaf, 0x68, 0x24, 0x53,
-+			0x05, 0x0a, 0xc3, 0x33, 0xe0, 0x33, 0x3f, 0x69,
-+			0xd2, 0x84, 0x2f, 0x0b, 0xed, 0xde, 0x04, 0xf4,
-+			0x11, 0x94, 0x13, 0x69, 0x51, 0x09, 0x28, 0xde,
-+			0x57, 0x5c, 0xef, 0xdc, 0x9a, 0x49, 0x1c, 0x17,
-+			0x97, 0xf3, 0x96, 0xc1, 0x7f, 0x5d, 0x2e, 0x7d,
-+			0x55, 0xb8, 0xb3, 0x02, 0x09, 0xb3, 0x1f, 0xe7,
-+			0xc9, 0x8d, 0xa3, 0x36, 0x34, 0x8a, 0x77, 0x13,
-+			0x30, 0x63, 0x4c, 0xa5, 0xcd, 0xc3, 0xe0, 0x7e,
-+			0x05, 0xa1, 0x7b, 0x0c, 0xcb, 0x74, 0x47, 0x31,
-+			0x62, 0x03, 0x43, 0xf1, 0x87, 0xb4, 0xb0, 0x85,
-+			0x87, 0x8e, 0x4b, 0x25, 0xc7, 0xcf, 0xae, 0x4b,
-+			0x36, 0x46, 0x3e, 0x62, 0xbc, 0x6f, 0xeb, 0x5f,
-+			0x73, 0xac, 0xe6, 0x07, 0xee, 0xc1, 0xa1, 0xd6,
-+			0xc4, 0xab, 0xc9, 0xd6, 0x89, 0x45, 0xe1, 0xf1,
-+			0x04, 0x4e, 0x1a, 0x6f, 0xbb, 0x4f, 0x3a, 0xa3,
-+			0xa0, 0xcb, 0xa3, 0x0a, 0xd8, 0x71, 0x35, 0x55,
-+			0xe4, 0xbc, 0x2e, 0x04, 0x06, 0xe6, 0xff, 0x5b,
-+			0x1c, 0xc0, 0x11, 0x7c, 0xc5, 0x17, 0xf3, 0x38,
-+			0xcf, 0xe9, 0xba, 0x0f, 0x0e, 0xef, 0x02, 0xc2,
-+			0x8d, 0xc6, 0xbc, 0x4b, 0x67, 0x20, 0x95, 0xd7,
-+			0x2c, 0x45, 0x5b, 0x86, 0x44, 0x8c, 0x6f, 0x2e,
-+			0x7e, 0x9f, 0x1c, 0x77, 0xba, 0x6b, 0x0e, 0xa3,
-+			0x69, 0xdc, 0xab, 0x24, 0x57, 0x60, 0x47, 0xc1,
-+			0xd1, 0xa5, 0x9d, 0x23, 0xe6, 0xb1, 0x37, 0xfe,
-+			0x93, 0xd2, 0x4c, 0x46, 0xf9, 0x0c, 0xc6, 0xfb,
-+			0xd6, 0x9d, 0x99, 0x69, 0xab, 0x7a, 0x07, 0x0c,
-+			0x65, 0xe7, 0xc4, 0x08, 0x96, 0xe2, 0xa5, 0x01,
-+			0x3f, 0x46, 0x07, 0x05, 0x7e, 0xe8, 0x9a, 0x90,
-+			0x50, 0xdc, 0xe9, 0x7a, 0xea, 0xa1, 0x39, 0x6e,
-+			0x66, 0xe4, 0x6f, 0xa5, 0x5f, 0xb2, 0xd9, 0x5b,
-+			0xf5, 0xdb, 0x2a, 0x32, 0xf0, 0x11, 0x6f, 0x7c,
-+			0x26, 0x10, 0x8f, 0x3d, 0x80, 0xe9, 0x58, 0xf7,
-+			0xe0, 0xa8, 0x57, 0xf8, 0xdb, 0x0e, 0xce, 0x99,
-+			0x63, 0x19, 0x3d, 0xd5, 0xec, 0x1b, 0x77, 0x69,
-+			0x98, 0xf6, 0xe4, 0x5f, 0x67, 0x17, 0x4b, 0x09,
-+			0x85, 0x62, 0x82, 0x70, 0x18, 0xe2, 0x9a, 0x78,
-+			0xe2, 0x62, 0xbd, 0xb4, 0xf1, 0x42, 0xc6, 0xfb,
-+			0x08, 0xd0, 0xbd, 0xeb, 0x4e, 0x09, 0xf2, 0xc8,
-+			0x1e, 0xdc, 0x3d, 0x32, 0x21, 0x56, 0x9c, 0x4f,
-+			0x35, 0xf3, 0x61, 0x06, 0x72, 0x84, 0xc4, 0x32,
-+			0xf2, 0xf1, 0xfa, 0x0b, 0x2f, 0xc3, 0xdb, 0x02,
-+			0x04, 0xc2, 0xde, 0x57, 0x64, 0x60, 0x8d, 0xcf,
-+			0xcb, 0x86, 0x5d, 0x97, 0x3e, 0xb1, 0x9c, 0x01,
-+			0xd6, 0x28, 0x8f, 0x99, 0xbc, 0x46, 0xeb, 0x05,
-+			0xaf, 0x7e, 0xb8, 0x21, 0x2a, 0x56, 0x85, 0x1c,
-+			0xb3, 0x71, 0xa0, 0xde, 0xca, 0x96, 0xf1, 0x78,
-+			0x49, 0xa2, 0x99, 0x81, 0x80, 0x5c, 0x01, 0xf5,
-+			0xa0, 0xa2, 0x56, 0x63, 0xe2, 0x70, 0x07, 0xa5,
-+			0x95, 0xd6, 0x85, 0xeb, 0x36, 0x9e, 0xa9, 0x51,
-+			0x66, 0x56, 0x5f, 0x1d, 0x02, 0x19, 0xe2, 0xf6,
-+			0x4f, 0x73, 0x38, 0x09, 0x75, 0x64, 0x48, 0xe0,
-+			0xf1, 0x7e, 0x0e, 0xe8, 0x9d, 0xf9, 0xed, 0x94,
-+			0xfe, 0x16, 0x26, 0x62, 0x49, 0x74, 0xf4, 0xb0,
-+			0xd4, 0xa9, 0x6c, 0xb0, 0xfd, 0x53, 0xe9, 0x81,
-+			0xe0, 0x7a, 0xbf, 0xcf, 0xb5, 0xc4, 0x01, 0x81,
-+			0x79, 0x99, 0x77, 0x01, 0x3b, 0xe9, 0xa2, 0xb6,
-+			0xe6, 0x6a, 0x8a, 0x9e, 0x56, 0x1c, 0x8d, 0x1e,
-+			0x8f, 0x06, 0x55, 0x2c, 0x6c, 0xdc, 0x92, 0x87,
-+			0x64, 0x3b, 0x4b, 0x19, 0xa1, 0x13, 0x64, 0x1d,
-+			0x4a, 0xe9, 0xc0, 0x00, 0xb8, 0x95, 0xef, 0x6b,
-+			0x1a, 0x86, 0x6d, 0x37, 0x52, 0x02, 0xc2, 0xe0,
-+			0xc8, 0xbb, 0x42, 0x0c, 0x02, 0x21, 0x4a, 0xc9,
-+			0xef, 0xa0, 0x54, 0xe4, 0x5e, 0x16, 0x53, 0x81,
-+			0x70, 0x62, 0x10, 0xaf, 0xde, 0xb8, 0xb5, 0xd3,
-+			0xe8, 0x5e, 0x6c, 0xc3, 0x8a, 0x3e, 0x18, 0x07,
-+			0xf2, 0x2f, 0x7d, 0xa7, 0xe1, 0x3d, 0x4e, 0xb4,
-+			0x26, 0xa7, 0xa3, 0x93, 0x86, 0xb2, 0x04, 0x1e,
-+			0x53, 0x5d, 0x86, 0xd6, 0xde, 0x65, 0xca, 0xe3,
-+			0x4e, 0xc1, 0xcf, 0xef, 0xc8, 0x70, 0x1b, 0x83,
-+			0x13, 0xdd, 0x18, 0x8b, 0x0d, 0x76, 0xd2, 0xf6,
-+			0x37, 0x7a, 0x93, 0x7a, 0x50, 0x11, 0x9f, 0x96,
-+			0x86, 0x25, 0xfd, 0xac, 0xdc, 0xbe, 0x18, 0x93,
-+			0x19, 0x6b, 0xec, 0x58, 0x4f, 0xb9, 0x75, 0xa7,
-+			0xdd, 0x3f, 0x2f, 0xec, 0xc8, 0x5a, 0x84, 0xab,
-+			0xd5, 0xe4, 0x8a, 0x07, 0xf6, 0x4d, 0x23, 0xd6,
-+			0x03, 0xfb, 0x03, 0x6a, 0xea, 0x66, 0xbf, 0xd4,
-+			0xb1, 0x34, 0xfb, 0x78, 0xe9, 0x55, 0xdc, 0x7c,
-+			0x3d, 0x9c, 0xe5, 0x9a, 0xac, 0xc3, 0x7a, 0x80,
-+			0x24, 0x6d, 0xa0, 0xef, 0x25, 0x7c, 0xb7, 0xea,
-+			0xce, 0x4d, 0x5f, 0x18, 0x60, 0xce, 0x87, 0x22,
-+			0x66, 0x2f, 0xd5, 0xdd, 0xdd, 0x02, 0x21, 0x75,
-+			0x82, 0xa0, 0x1f, 0x58, 0xc6, 0xd3, 0x62, 0xf7,
-+			0x32, 0xd8, 0xaf, 0x1e, 0x07, 0x77, 0x51, 0x96,
-+			0xd5, 0x6b, 0x1e, 0x7e, 0x80, 0x02, 0xe8, 0x67,
-+			0xea, 0x17, 0x0b, 0x10, 0xd2, 0x3f, 0x28, 0x25,
-+			0x4f, 0x05, 0x77, 0x02, 0x14, 0x69, 0xf0, 0x2c,
-+			0xbe, 0x0c, 0xf1, 0x74, 0x30, 0xd1, 0xb9, 0x9b,
-+			0xfc, 0x8c, 0xbb, 0x04, 0x16, 0xd9, 0xba, 0xc3,
-+			0xbc, 0x91, 0x8a, 0xc4, 0x30, 0xa4, 0xb0, 0x12,
-+			0x4c, 0x21, 0x87, 0xcb, 0xc9, 0x1d, 0x16, 0x96,
-+			0x07, 0x6f, 0x23, 0x54, 0xb9, 0x6f, 0x79, 0xe5,
-+			0x64, 0xc0, 0x64, 0xda, 0xb1, 0xae, 0xdd, 0x60,
-+			0x6c, 0x1a, 0x9d, 0xd3, 0x04, 0x8e, 0x45, 0xb0,
-+			0x92, 0x61, 0xd0, 0x48, 0x81, 0xed, 0x5e, 0x1d,
-+			0xa0, 0xc9, 0xa4, 0x33, 0xc7, 0x13, 0x51, 0x5d,
-+			0x7f, 0x83, 0x73, 0xb6, 0x70, 0x18, 0x65, 0x3e,
-+			0x2f, 0x0e, 0x7a, 0x12, 0x39, 0x98, 0xab, 0xd8,
-+			0x7e, 0x6f, 0xa3, 0xd1, 0xba, 0x56, 0xad, 0xbd,
-+			0xf0, 0x03, 0x01, 0x1c, 0x85, 0x35, 0x9f, 0xeb,
-+			0x19, 0x63, 0xa1, 0xaf, 0xfe, 0x2d, 0x35, 0x50,
-+			0x39, 0xa0, 0x65, 0x7c, 0x95, 0x7e, 0x6b, 0xfe,
-+			0xc1, 0xac, 0x07, 0x7c, 0x98, 0x4f, 0xbe, 0x57,
-+			0xa7, 0x22, 0xec, 0xe2, 0x7e, 0x29, 0x09, 0x53,
-+			0xe8, 0xbf, 0xb4, 0x7e, 0x3f, 0x8f, 0xfc, 0x14,
-+			0xce, 0x54, 0xf9, 0x18, 0x58, 0xb5, 0xff, 0x44,
-+			0x05, 0x9d, 0xce, 0x1b, 0xb6, 0x82, 0x23, 0xc8,
-+			0x2e, 0xbc, 0x69, 0xbb, 0x4a, 0x29, 0x0f, 0x65,
-+			0x94, 0xf0, 0x63, 0x06, 0x0e, 0xef, 0x8c, 0xbd,
-+			0xff, 0xfd, 0xb0, 0x21, 0x6e, 0x57, 0x05, 0x75,
-+			0xda, 0xd5, 0xc4, 0xeb, 0x8d, 0x32, 0xf7, 0x50,
-+			0xd3, 0x6f, 0x22, 0xed, 0x5f, 0x8e, 0xa2, 0x5b,
-+			0x80, 0x8c, 0xc8, 0x78, 0x40, 0x24, 0x4b, 0x89,
-+			0x30, 0xce, 0x7a, 0x97, 0x0e, 0xc4, 0xaf, 0xef,
-+			0x9b, 0xb4, 0xcd, 0x66, 0x74, 0x14, 0x04, 0x2b,
-+			0xf7, 0xce, 0x0b, 0x1c, 0x6e, 0xc2, 0x78, 0x8c,
-+			0xca, 0xc5, 0xd0, 0x1c, 0x95, 0x4a, 0x91, 0x2d,
-+			0xa7, 0x20, 0xeb, 0x86, 0x52, 0xb7, 0x67, 0xd8,
-+			0x0c, 0xd6, 0x04, 0x14, 0xde, 0x51, 0x74, 0x75,
-+			0xe7, 0x11, 0xb4, 0x87, 0xa3, 0x3d, 0x2d, 0xad,
-+			0x4f, 0xef, 0xa0, 0x0f, 0x70, 0x00, 0x6d, 0x13,
-+			0x19, 0x1d, 0x41, 0x50, 0xe9, 0xd8, 0xf0, 0x32,
-+			0x71, 0xbc, 0xd3, 0x11, 0xf2, 0xac, 0xbe, 0xaf,
-+			0x75, 0x46, 0x65, 0x4e, 0x07, 0x34, 0x37, 0xa3,
-+			0x89, 0xfe, 0x75, 0xd4, 0x70, 0x4c, 0xc6, 0x3f,
-+			0x69, 0x24, 0x0e, 0x38, 0x67, 0x43, 0x8c, 0xde,
-+			0x06, 0xb5, 0xb8, 0xe7, 0xc4, 0xf0, 0x41, 0x8f,
-+			0xf0, 0xbd, 0x2f, 0x0b, 0xb9, 0x18, 0xf8, 0xde,
-+			0x64, 0xb1, 0xdb, 0xee, 0x00, 0x50, 0x77, 0xe1,
-+			0xc7, 0xff, 0xa6, 0xfa, 0xdd, 0x70, 0xf4, 0xe3,
-+			0x93, 0xe9, 0x77, 0x35, 0x3d, 0x4b, 0x2f, 0x2b,
-+			0x6d, 0x55, 0xf0, 0xfc, 0x88, 0x54, 0x4e, 0x89,
-+			0xc1, 0x8a, 0x23, 0x31, 0x2d, 0x14, 0x2a, 0xb8,
-+			0x1b, 0x15, 0xdd, 0x9e, 0x6e, 0x7b, 0xda, 0x05,
-+			0x91, 0x7d, 0x62, 0x64, 0x96, 0x72, 0xde, 0xfc,
-+			0xc1, 0xec, 0xf0, 0x23, 0x51, 0x6f, 0xdb, 0x5b,
-+			0x1d, 0x08, 0x57, 0xce, 0x09, 0xb8, 0xf6, 0xcd,
-+			0x8d, 0x95, 0xf2, 0x20, 0xbf, 0x0f, 0x20, 0x57,
-+			0x98, 0x81, 0x84, 0x4f, 0x15, 0x5c, 0x76, 0xe7,
-+			0x3e, 0x0a, 0x3a, 0x6c, 0xc4, 0x8a, 0xbe, 0x78,
-+			0x74, 0x77, 0xc3, 0x09, 0x4b, 0x5d, 0x48, 0xe4,
-+			0xc8, 0xcb, 0x0b, 0xea, 0x17, 0x28, 0xcf, 0xcf,
-+			0x31, 0x32, 0x44, 0xa4, 0xe5, 0x0e, 0x1a, 0x98,
-+			0x94, 0xc4, 0xf0, 0xff, 0xae, 0x3e, 0x44, 0xe8,
-+			0xa5, 0xb3, 0xb5, 0x37, 0x2f, 0xe8, 0xaf, 0x6f,
-+			0x28, 0xc1, 0x37, 0x5f, 0x31, 0xd2, 0xb9, 0x33,
-+			0xb1, 0xb2, 0x52, 0x94, 0x75, 0x2c, 0x29, 0x59,
-+			0x06, 0xc2, 0x25, 0xe8, 0x71, 0x65, 0x4e, 0xed,
-+			0xc0, 0x9c, 0xb1, 0xbb, 0x25, 0xdc, 0x6c, 0xe7,
-+			0x4b, 0xa5, 0x7a, 0x54, 0x7a, 0x60, 0xff, 0x7a,
-+			0xe0, 0x50, 0x40, 0x96, 0x35, 0x63, 0xe4, 0x0b,
-+			0x76, 0xbd, 0xa4, 0x65, 0x00, 0x1b, 0x57, 0x88,
-+			0xae, 0xed, 0x39, 0x88, 0x42, 0x11, 0x3c, 0xed,
-+			0x85, 0x67, 0x7d, 0xb9, 0x68, 0x82, 0xe9, 0x43,
-+			0x3c, 0x47, 0x53, 0xfa, 0xe8, 0xf8, 0x9f, 0x1f,
-+			0x9f, 0xef, 0x0f, 0xf7, 0x30, 0xd9, 0x30, 0x0e,
-+			0xb9, 0x9f, 0x69, 0x18, 0x2f, 0x7e, 0xf8, 0xf8,
-+			0xf8, 0x8c, 0x0f, 0xd4, 0x02, 0x4d, 0xea, 0xcd,
-+			0x0a, 0x9c, 0x6f, 0x71, 0x6d, 0x5a, 0x4c, 0x60,
-+			0xce, 0x20, 0x56, 0x32, 0xc6, 0xc5, 0x99, 0x1f,
-+			0x09, 0xe6, 0x4e, 0x18, 0x1a, 0x15, 0x13, 0xa8,
-+			0x7d, 0xb1, 0x6b, 0xc0, 0xb2, 0x6d, 0xf8, 0x26,
-+			0x66, 0xf8, 0x3d, 0x18, 0x74, 0x70, 0x66, 0x7a,
-+			0x34, 0x17, 0xde, 0xba, 0x47, 0xf1, 0x06, 0x18,
-+			0xcb, 0xaf, 0xeb, 0x4a, 0x1e, 0x8f, 0xa7, 0x77,
-+			0xe0, 0x3b, 0x78, 0x62, 0x66, 0xc9, 0x10, 0xea,
-+			0x1f, 0xb7, 0x29, 0x0a, 0x45, 0xa1, 0x1d, 0x1e,
-+			0x1d, 0xe2, 0x65, 0x61, 0x50, 0x9c, 0xd7, 0x05,
-+			0xf2, 0x0b, 0x5b, 0x12, 0x61, 0x02, 0xc8, 0xe5,
-+			0x63, 0x4f, 0x20, 0x0c, 0x07, 0x17, 0x33, 0x5e,
-+			0x03, 0x9a, 0x53, 0x0f, 0x2e, 0x55, 0xfe, 0x50,
-+			0x43, 0x7d, 0xd0, 0xb6, 0x7e, 0x5a, 0xda, 0xae,
-+			0x58, 0xef, 0x15, 0xa9, 0x83, 0xd9, 0x46, 0xb1,
-+			0x42, 0xaa, 0xf5, 0x02, 0x6c, 0xce, 0x92, 0x06,
-+			0x1b, 0xdb, 0x66, 0x45, 0x91, 0x79, 0xc2, 0x2d,
-+			0xe6, 0x53, 0xd3, 0x14, 0xfd, 0xbb, 0x44, 0x63,
-+			0xc6, 0xd7, 0x3d, 0x7a, 0x0c, 0x75, 0x78, 0x9d,
-+			0x5c, 0xa6, 0x39, 0xb3, 0xe5, 0x63, 0xca, 0x8b,
-+			0xfe, 0xd3, 0xef, 0x60, 0x83, 0xf6, 0x8e, 0x70,
-+			0xb6, 0x67, 0xc7, 0x77, 0xed, 0x23, 0xef, 0x4c,
-+			0xf0, 0xed, 0x2d, 0x07, 0x59, 0x6f, 0xc1, 0x01,
-+			0x34, 0x37, 0x08, 0xab, 0xd9, 0x1f, 0x09, 0xb1,
-+			0xce, 0x5b, 0x17, 0xff, 0x74, 0xf8, 0x9c, 0xd5,
-+			0x2c, 0x56, 0x39, 0x79, 0x0f, 0x69, 0x44, 0x75,
-+			0x58, 0x27, 0x01, 0xc4, 0xbf, 0xa7, 0xa1, 0x1d,
-+			0x90, 0x17, 0x77, 0x86, 0x5a, 0x3f, 0xd9, 0xd1,
-+			0x0e, 0xa0, 0x10, 0xf8, 0xec, 0x1e, 0xa5, 0x7f,
-+			0x5e, 0x36, 0xd1, 0xe3, 0x04, 0x2c, 0x70, 0xf7,
-+			0x8e, 0xc0, 0x98, 0x2f, 0x6c, 0x94, 0x2b, 0x41,
-+			0xb7, 0x60, 0x00, 0xb7, 0x2e, 0xb8, 0x02, 0x8d,
-+			0xb8, 0xb0, 0xd3, 0x86, 0xba, 0x1d, 0xd7, 0x90,
-+			0xd6, 0xb6, 0xe1, 0xfc, 0xd7, 0xd8, 0x28, 0x06,
-+			0x63, 0x9b, 0xce, 0x61, 0x24, 0x79, 0xc0, 0x70,
-+			0x52, 0xd0, 0xb6, 0xd4, 0x28, 0x95, 0x24, 0x87,
-+			0x03, 0x1f, 0xb7, 0x9a, 0xda, 0xa3, 0xfb, 0x52,
-+			0x5b, 0x68, 0xe7, 0x4c, 0x8c, 0x24, 0xe1, 0x42,
-+			0xf7, 0xd5, 0xfd, 0xad, 0x06, 0x32, 0x9f, 0xba,
-+			0xc1, 0xfc, 0xdd, 0xc6, 0xfc, 0xfc, 0xb3, 0x38,
-+			0x74, 0x56, 0x58, 0x40, 0x02, 0x37, 0x52, 0x2c,
-+			0x55, 0xcc, 0xb3, 0x9e, 0x7a, 0xe9, 0xd4, 0x38,
-+			0x41, 0x5e, 0x0c, 0x35, 0xe2, 0x11, 0xd1, 0x13,
-+			0xf8, 0xb7, 0x8d, 0x72, 0x6b, 0x22, 0x2a, 0xb0,
-+			0xdb, 0x08, 0xba, 0x35, 0xb9, 0x3f, 0xc8, 0xd3,
-+			0x24, 0x90, 0xec, 0x58, 0xd2, 0x09, 0xc7, 0x2d,
-+			0xed, 0x38, 0x80, 0x36, 0x72, 0x43, 0x27, 0x49,
-+			0x4a, 0x80, 0x8a, 0xa2, 0xe8, 0xd3, 0xda, 0x30,
-+			0x7d, 0xb6, 0x82, 0x37, 0x86, 0x92, 0x86, 0x3e,
-+			0x08, 0xb2, 0x28, 0x5a, 0x55, 0x44, 0x24, 0x7d,
-+			0x40, 0x48, 0x8a, 0xb6, 0x89, 0x58, 0x08, 0xa0,
-+			0xd6, 0x6d, 0x3a, 0x17, 0xbf, 0xf6, 0x54, 0xa2,
-+			0xf5, 0xd3, 0x8c, 0x0f, 0x78, 0x12, 0x57, 0x8b,
-+			0xd5, 0xc2, 0xfd, 0x58, 0x5b, 0x7f, 0x38, 0xe3,
-+			0xcc, 0xb7, 0x7c, 0x48, 0xb3, 0x20, 0xe8, 0x81,
-+			0x14, 0x32, 0x45, 0x05, 0xe0, 0xdb, 0x9f, 0x75,
-+			0x85, 0xb4, 0x6a, 0xfc, 0x95, 0xe3, 0x54, 0x22,
-+			0x12, 0xee, 0x30, 0xfe, 0xd8, 0x30, 0xef, 0x34,
-+			0x50, 0xab, 0x46, 0x30, 0x98, 0x2f, 0xb7, 0xc0,
-+			0x15, 0xa2, 0x83, 0xb6, 0xf2, 0x06, 0x21, 0xa2,
-+			0xc3, 0x26, 0x37, 0x14, 0xd1, 0x4d, 0xb5, 0x10,
-+			0x52, 0x76, 0x4d, 0x6a, 0xee, 0xb5, 0x2b, 0x15,
-+			0xb7, 0xf9, 0x51, 0xe8, 0x2a, 0xaf, 0xc7, 0xfa,
-+			0x77, 0xaf, 0xb0, 0x05, 0x4d, 0xd1, 0x68, 0x8e,
-+			0x74, 0x05, 0x9f, 0x9d, 0x93, 0xa5, 0x3e, 0x7f,
-+			0x4e, 0x5f, 0x9d, 0xcb, 0x09, 0xc7, 0x83, 0xe3,
-+			0x02, 0x9d, 0x27, 0x1f, 0xef, 0x85, 0x05, 0x8d,
-+			0xec, 0x55, 0x88, 0x0f, 0x0d, 0x7c, 0x4c, 0xe8,
-+			0xa1, 0x75, 0xa0, 0xd8, 0x06, 0x47, 0x14, 0xef,
-+			0xaa, 0x61, 0xcf, 0x26, 0x15, 0xad, 0xd8, 0xa3,
-+			0xaa, 0x75, 0xf2, 0x78, 0x4a, 0x5a, 0x61, 0xdf,
-+			0x8b, 0xc7, 0x04, 0xbc, 0xb2, 0x32, 0xd2, 0x7e,
-+			0x42, 0xee, 0xb4, 0x2f, 0x51, 0xff, 0x7b, 0x2e,
-+			0xd3, 0x02, 0xe8, 0xdc, 0x5d, 0x0d, 0x50, 0xdc,
-+			0xae, 0xb7, 0x46, 0xf9, 0xa8, 0xe6, 0xd0, 0x16,
-+			0xcc, 0xe6, 0x2c, 0x81, 0xc7, 0xad, 0xe9, 0xf0,
-+			0x05, 0x72, 0x6d, 0x3d, 0x0a, 0x7a, 0xa9, 0x02,
-+			0xac, 0x82, 0x93, 0x6e, 0xb6, 0x1c, 0x28, 0xfc,
-+			0x44, 0x12, 0xfb, 0x73, 0x77, 0xd4, 0x13, 0x39,
-+			0x29, 0x88, 0x8a, 0xf3, 0x5c, 0xa6, 0x36, 0xa0,
-+			0x2a, 0xed, 0x7e, 0xb1, 0x1d, 0xd6, 0x4c, 0x6b,
-+			0x41, 0x01, 0x18, 0x5d, 0x5d, 0x07, 0x97, 0xa6,
-+			0x4b, 0xef, 0x31, 0x18, 0xea, 0xac, 0xb1, 0x84,
-+			0x21, 0xed, 0xda, 0x86,
-+		},
-+		.rlen = 4100,
-+	},
-+};
++#include <linux/delay.h>
 +
-+static struct cipher_testvec aes_ctr_dec_tv_template[] = {
-+	{ /* From RFC 3686 */
-+		.key	= { 0xae, 0x68, 0x52, 0xf8, 0x12, 0x10, 0x67, 0xcc,
-+			    0x4b, 0xf7, 0xa5, 0x76, 0x55, 0x77, 0xf3, 0x9e,
-+			    0x00, 0x00, 0x00, 0x30 },
-+		.klen	= 20,
-+		.iv 	= { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
-+		.input	= { 0xe4, 0x09, 0x5d, 0x4f, 0xb7, 0xa7, 0xb3, 0x79,
-+			    0x2d, 0x61, 0x75, 0xa3, 0x26, 0x13, 0x11, 0xb8 },
-+		.ilen	= 16,
-+		.result	= { "Single block msg" },
-+		.rlen	= 16,
-+	}, {
-+		.key	= { 0x7e, 0x24, 0x06, 0x78, 0x17, 0xfa, 0xe0, 0xd7,
-+			    0x43, 0xd6, 0xce, 0x1f, 0x32, 0x53, 0x91, 0x63,
-+			    0x00, 0x6c, 0xb6, 0xdb },
-+		.klen	= 20,
-+		.iv 	= { 0xc0, 0x54, 0x3b, 0x59, 0xda, 0x48, 0xd9, 0x0b },
-+		.input	= { 0x51, 0x04, 0xa1, 0x06, 0x16, 0x8a, 0x72, 0xd9,
-+			    0x79, 0x0d, 0x41, 0xee, 0x8e, 0xda, 0xd3, 0x88,
-+			    0xeb, 0x2e, 0x1e, 0xfc, 0x46, 0xda, 0x57, 0xc8,
-+			    0xfc, 0xe6, 0x30, 0xdf, 0x91, 0x41, 0xbe, 0x28 },
-+		.ilen 	= 32,
-+		.result	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
-+		.rlen	= 32,
-+	}, {
-+		.key 	= { 0x16, 0xaf, 0x5b, 0x14, 0x5f, 0xc9, 0xf5, 0x79,
-+			    0xc1, 0x75, 0xf9, 0x3e, 0x3b, 0xfb, 0x0e, 0xed,
-+			    0x86, 0x3d, 0x06, 0xcc, 0xfd, 0xb7, 0x85, 0x15,
-+			    0x00, 0x00, 0x00, 0x48 },
-+		.klen 	= 28,
-+		.iv	= { 0x36, 0x73, 0x3c, 0x14, 0x7d, 0x6d, 0x93, 0xcb },
-+		.input	= { 0x4b, 0x55, 0x38, 0x4f, 0xe2, 0x59, 0xc9, 0xc8,
-+			    0x4e, 0x79, 0x35, 0xa0, 0x03, 0xcb, 0xe9, 0x28 },
-+		.ilen 	= 16,
-+		.result	= { "Single block msg" },
-+		.rlen	= 16,
-+	}, {
-+		.key	= { 0x7c, 0x5c, 0xb2, 0x40, 0x1b, 0x3d, 0xc3, 0x3c,
-+			    0x19, 0xe7, 0x34, 0x08, 0x19, 0xe0, 0xf6, 0x9c,
-+			    0x67, 0x8c, 0x3d, 0xb8, 0xe6, 0xf6, 0xa9, 0x1a,
-+			    0x00, 0x96, 0xb0, 0x3b },
-+		.klen	= 28,
-+		.iv 	= { 0x02, 0x0c, 0x6e, 0xad, 0xc2, 0xcb, 0x50, 0x0d },
-+		.input	= { 0x45, 0x32, 0x43, 0xfc, 0x60, 0x9b, 0x23, 0x32,
-+			    0x7e, 0xdf, 0xaa, 0xfa, 0x71, 0x31, 0xcd, 0x9f,
-+			    0x84, 0x90, 0x70, 0x1c, 0x5a, 0xd4, 0xa7, 0x9c,
-+			    0xfc, 0x1f, 0xe0, 0xff, 0x42, 0xf4, 0xfb, 0x00 },
-+		.ilen	= 32,
-+		.result	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
-+		.rlen 	= 32,
-+	}, { 
-+		.key 	= { 0x77, 0x6b, 0xef, 0xf2, 0x85, 0x1d, 0xb0, 0x6f,
-+			    0x4c, 0x8a, 0x05, 0x42, 0xc8, 0x69, 0x6f, 0x6c,
-+			    0x6a, 0x81, 0xaf, 0x1e, 0xec, 0x96, 0xb4, 0xd3,
-+			    0x7f, 0xc1, 0xd6, 0x89, 0xe6, 0xc1, 0xc1, 0x04,
-+			    0x00, 0x00, 0x00, 0x60 },
-+		.klen	= 36,
-+		.iv 	= { 0xdb, 0x56, 0x72, 0xc9, 0x7a, 0xa8, 0xf0, 0xb2 },
-+		.input	= { 0x14, 0x5a, 0xd0, 0x1d, 0xbf, 0x82, 0x4e, 0xc7,
-+			    0x56, 0x08, 0x63, 0xdc, 0x71, 0xe3, 0xe0, 0xc0 },
-+		.ilen	= 16,
-+		.result	= { "Single block msg" },
-+		.rlen 	= 16,
-+	}, {
-+		.key	= { 0xf6, 0xd6, 0x6d, 0x6b, 0xd5, 0x2d, 0x59, 0xbb,
-+			    0x07, 0x96, 0x36, 0x58, 0x79, 0xef, 0xf8, 0x86,
-+			    0xc6, 0x6d, 0xd5, 0x1a, 0x5b, 0x6a, 0x99, 0x74,
-+			    0x4b, 0x50, 0x59, 0x0c, 0x87, 0xa2, 0x38, 0x84,
-+			    0x00, 0xfa, 0xac, 0x24 },
-+		.klen 	= 36,
-+		.iv	= { 0xc1, 0x58, 0x5e, 0xf1, 0x5a, 0x43, 0xd8, 0x75 },
-+		.input	= { 0xf0, 0x5e, 0x23, 0x1b, 0x38, 0x94, 0x61, 0x2c,
-+			    0x49, 0xee, 0x00, 0x0b, 0x80, 0x4e, 0xb2, 0xa9,
-+			    0xb8, 0x30, 0x6b, 0x50, 0x8f, 0x83, 0x9d, 0x6a,
-+			    0x55, 0x30, 0x83, 0x1d, 0x93, 0x44, 0xaf, 0x1c },
-+		.ilen	= 32,
-+		.result	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
-+		.rlen	= 32,
-+	},
-+};
 +
-+static struct aead_testvec aes_gcm_enc_tv_template[] = {
-+	{ /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
-+		.klen	= 16,
-+		.result	= { 0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61,
-+			    0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a },
-+		.rlen	= 16,
-+	}, {
-+		.klen	= 16,
-+		.ilen	= 16,
-+		.result = { 0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
-+			    0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78,
-+			    0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
-+			    0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf },
-+		.rlen	= 32,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
-+		.klen	= 16,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
-+		.ilen	= 64,
-+		.result = { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
-+			    0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
-+			    0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
-+			    0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
-+			    0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
-+			    0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
-+			    0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
-+			    0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85,
-+			    0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
-+			    0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
-+		.rlen	= 80,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
-+		.klen	= 16,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39 },
-+		.ilen	= 60,
-+		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xab, 0xad, 0xda, 0xd2 },
-+		.alen	= 20,
-+		.result = { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
-+			    0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
-+			    0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
-+			    0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
-+			    0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
-+			    0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
-+			    0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
-+			    0x3d, 0x58, 0xe0, 0x91,
-+			    0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb,
-+			    0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 },
-+		.rlen	= 76,
-+	}, {
-+		.klen	= 24,
-+		.result	= { 0xcd, 0x33, 0xb2, 0x8a, 0xc7, 0x73, 0xf7, 0x4b,
-+			    0xa0, 0x0e, 0xd1, 0xf3, 0x12, 0x57, 0x24, 0x35 },
-+		.rlen	= 16,
-+	}, {
-+		.klen	= 24,
-+		.ilen	= 16,
-+		.result = { 0x98, 0xe7, 0x24, 0x7c, 0x07, 0xf0, 0xfe, 0x41,
-+			    0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00,
-+			    0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab,
-+			    0x8e, 0xf4, 0xd4, 0x58, 0x75, 0x14, 0xf0, 0xfb },
-+		.rlen	= 32,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
-+			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c },
-+		.klen	= 24,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
-+		.ilen	= 64,
-+		.result = { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41,
-+			    0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57,
-+			    0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84,
-+			    0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c,
-+			    0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
-+			    0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47,
-+			    0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9,
-+			    0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56,
-+			    0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf,
-+			    0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 },
-+		.rlen	= 80,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
-+			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c },
-+		.klen	= 24,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39 },
-+		.ilen	= 60,
-+		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xab, 0xad, 0xda, 0xd2 },
-+		.alen	= 20,
-+		.result = { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41,
-+			    0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57,
-+			    0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84,
-+			    0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c,
-+			    0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
-+			    0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47,
-+			    0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9,
-+			    0xcc, 0xda, 0x27, 0x10,
-+			    0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f,
-+			    0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c },
-+		.rlen	= 76,
-+		.np	= 2,
-+		.tap	= { 32, 28 },
-+		.anp	= 2,
-+		.atap	= { 8, 12 }
-+	}, {
-+		.klen	= 32,
-+		.result	= { 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9,
-+			    0xa9, 0x63, 0xb4, 0xf1, 0xc4, 0xcb, 0x73, 0x8b },
-+		.rlen	= 16,
-+	}
-+};
++#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \
++					__DATE__ " " __TIME__ ")"
 +
-+static struct aead_testvec aes_gcm_dec_tv_template[] = {
-+	{ /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
-+		.klen	= 32,
-+		.input	= { 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e,
-+			    0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18,
-+			    0xd0, 0xd1, 0xc8, 0xa7, 0x99, 0x99, 0x6b, 0xf0,
-+			    0x26, 0x5b, 0x98, 0xb5, 0xd4, 0x8a, 0xb9, 0x19 },
-+		.ilen	= 32,
-+		.rlen	= 16,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
-+			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
-+		.klen	= 32,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0x52, 0x2d, 0xc1, 0xf0, 0x99, 0x56, 0x7d, 0x07,
-+			    0xf4, 0x7f, 0x37, 0xa3, 0x2a, 0x84, 0x42, 0x7d,
-+			    0x64, 0x3a, 0x8c, 0xdc, 0xbf, 0xe5, 0xc0, 0xc9,
-+			    0x75, 0x98, 0xa2, 0xbd, 0x25, 0x55, 0xd1, 0xaa,
-+			    0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d,
-+			    0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38,
-+			    0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a,
-+			    0xbc, 0xc9, 0xf6, 0x62, 0x89, 0x80, 0x15, 0xad,
-+			    0xb0, 0x94, 0xda, 0xc5, 0xd9, 0x34, 0x71, 0xbd,
-+			    0xec, 0x1a, 0x50, 0x22, 0x70, 0xe3, 0xcc, 0x6c },
-+		.ilen	= 80,
-+		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
-+		.rlen	= 64,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
-+			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
-+		.klen	= 32,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0x52, 0x2d, 0xc1, 0xf0, 0x99, 0x56, 0x7d, 0x07,
-+			    0xf4, 0x7f, 0x37, 0xa3, 0x2a, 0x84, 0x42, 0x7d,
-+			    0x64, 0x3a, 0x8c, 0xdc, 0xbf, 0xe5, 0xc0, 0xc9,
-+			    0x75, 0x98, 0xa2, 0xbd, 0x25, 0x55, 0xd1, 0xaa,
-+			    0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d,
-+			    0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38,
-+			    0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a,
-+			    0xbc, 0xc9, 0xf6, 0x62,
-+			    0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68,
-+			    0xcd, 0xdf, 0x88, 0x53, 0xbb, 0x2d, 0x55, 0x1b },
-+		.ilen	= 76,
-+		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xab, 0xad, 0xda, 0xd2 },
-+		.alen	= 20,
-+		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39 },
-+		.rlen	= 60,
-+		.np     = 2,
-+		.tap    = { 48, 28 },
-+		.anp	= 3,
-+		.atap	= { 8, 8, 4 }
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
-+		.klen	= 16,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
-+			    0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
-+			    0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
-+			    0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
-+			    0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
-+			    0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
-+			    0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
-+			    0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85,
-+			    0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
-+			    0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
-+		.ilen	= 80,
-+		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
-+		.rlen	= 64,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
-+		.klen	= 16,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
-+			    0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
-+			    0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
-+			    0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
-+			    0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
-+			    0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
-+			    0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
-+			    0x3d, 0x58, 0xe0, 0x91,
-+			    0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb,
-+			    0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 },
-+		.ilen	= 76,
-+		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xab, 0xad, 0xda, 0xd2 },
-+		.alen	= 20,
-+		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39 },
-+		.rlen	= 60,
-+	}, {
-+		.klen	= 24,
-+		.input	= { 0x98, 0xe7, 0x24, 0x7c, 0x07, 0xf0, 0xfe, 0x41,
-+			    0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00,
-+			    0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab,
-+			    0x8e, 0xf4, 0xd4, 0x58, 0x75, 0x14, 0xf0, 0xfb },
-+		.ilen	= 32,
-+		.rlen	= 16,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
-+			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c },
-+		.klen	= 24,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41,
-+			    0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57,
-+			    0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84,
-+			    0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c,
-+			    0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
-+			    0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47,
-+			    0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9,
-+			    0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56,
-+			    0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf,
-+			    0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 },
-+		.ilen	= 80,
-+		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
-+		.rlen	= 64,
-+	}, {
-+		.key	= { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
-+			    0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
-+			    0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c },
-+		.klen	= 24,
-+		.iv	= { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
-+			    0xde, 0xca, 0xf8, 0x88 },
-+		.input	= { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41,
-+			    0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57,
-+			    0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84,
-+			    0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c,
-+			    0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
-+			    0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47,
-+			    0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9,
-+			    0xcc, 0xda, 0x27, 0x10,
-+			    0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f,
-+			    0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c },
-+		.ilen	= 76,
-+		.assoc	= { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
-+			    0xab, 0xad, 0xda, 0xd2 },
-+		.alen	= 20,
-+		.result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
-+			    0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
-+			    0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
-+			    0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
-+			    0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
-+			    0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
-+			    0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
-+			    0xba, 0x63, 0x7b, 0x39 },
-+		.rlen	= 60,
-+	}
++/*    Macros definitions */
++
++/* Default debug printout level */
++#define NOZOMI_DEBUG_LEVEL 0x00
++
++#define P_BUF_SIZE 128
++#define NFO(_err_flag_, args...)				\
++do {								\
++	char tmp[P_BUF_SIZE];					\
++	snprintf(tmp, sizeof(tmp), ##args);			\
++	printk(_err_flag_ "[%d] %s(): %s\n", __LINE__,		\
++		__FUNCTION__, tmp);				\
++} while (0)
++
++#define DBG1(args...) D_(0x01, ##args)
++#define DBG2(args...) D_(0x02, ##args)
++#define DBG3(args...) D_(0x04, ##args)
++#define DBG4(args...) D_(0x08, ##args)
++#define DBG5(args...) D_(0x10, ##args)
++#define DBG6(args...) D_(0x20, ##args)
++#define DBG7(args...) D_(0x40, ##args)
++#define DBG8(args...) D_(0x80, ##args)
++
++#ifdef DEBUG
++/* Do we need this settable at runtime? */
++static int debug = NOZOMI_DEBUG_LEVEL;
++
++#define D(lvl, args...)  do {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
++				while (0)
++#define D_(lvl, args...) D(lvl, ##args)
++
++/* These printouts are always printed */
++
++#else
++static int debug;
++#define D_(lvl, args...)
++#endif
++
++/* TODO: rewrite to optimize macros... */
++
++#define TMP_BUF_MAX 256
++
++#define DUMP(buf__,len__) \
++  do {  \
++    char tbuf[TMP_BUF_MAX] = {0};\
++    if (len__ > 1) {\
++	snprintf(tbuf, len__ > TMP_BUF_MAX ? TMP_BUF_MAX : len__, "%s", buf__);\
++	if (tbuf[len__-2] == '\r') {\
++		tbuf[len__-2] = 'r';\
++	} \
++	DBG1("SENDING: '%s' (%d+n)", tbuf, len__);\
++    } else {\
++	DBG1("SENDING: '%s' (%d)", tbuf, len__);\
++    } \
++} while (0)
++
++/*    Defines */
++#define NOZOMI_NAME		"nozomi"
++#define NOZOMI_NAME_TTY		"nozomi_tty"
++#define DRIVER_DESC		"Nozomi driver"
++
++#define NTTY_TTY_MAXMINORS	256
++#define NTTY_FIFO_BUFFER_SIZE	8192
++
++/* Must be power of 2 */
++#define FIFO_BUFFER_SIZE_UL	8192
++
++/* Size of tmp send buffer to card */
++#define SEND_BUF_MAX		1024
++#define RECEIVE_BUF_MAX		4
++
++
++/* Define all types of vendors and devices to support */
++#define VENDOR1		0x1931	/* Vendor Option */
++#define DEVICE1		0x000c	/* HSDPA card */
++
++#define R_IIR		0x0000	/* Interrupt Identity Register */
++#define R_FCR		0x0000	/* Flow Control Register */
++#define R_IER		0x0004	/* Interrupt Enable Register */
++
++#define CONFIG_MAGIC	0xEFEFFEFE
++#define TOGGLE_VALID	0x0000
++
++/* Definition of interrupt tokens */
++#define MDM_DL1		0x0001
++#define MDM_UL1		0x0002
++#define MDM_DL2		0x0004
++#define MDM_UL2		0x0008
++#define DIAG_DL1	0x0010
++#define DIAG_DL2	0x0020
++#define DIAG_UL		0x0040
++#define APP1_DL		0x0080
++#define APP1_UL		0x0100
++#define APP2_DL		0x0200
++#define APP2_UL		0x0400
++#define CTRL_DL		0x0800
++#define CTRL_UL		0x1000
++#define RESET		0x8000
++
++#define MDM_DL		(MDM_DL1  | MDM_DL2)
++#define MDM_UL		(MDM_UL1  | MDM_UL2)
++#define DIAG_DL		(DIAG_DL1 | DIAG_DL2)
++
++/* modem signal definition */
++#define CTRL_DSR	0x0001
++#define CTRL_DCD	0x0002
++#define CTRL_RI		0x0004
++#define CTRL_CTS	0x0008
++
++#define CTRL_DTR	0x0001
++#define CTRL_RTS	0x0002
++
++#define MAX_PORT		4
++#define NOZOMI_MAX_PORTS	5
++#define NOZOMI_MAX_CARDS	(NTTY_TTY_MAXMINORS / MAX_PORT)
++
++/*    Type definitions */
++
++/*
++ * There are two types of nozomi cards,
++ * one with 2048 memory and with 8192 memory
++ */
++enum card_type {
++	F32_2 = 2048,	/* 512 bytes downlink + uplink * 2 -> 2048 */
++	F32_8 = 8192,	/* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */
 +};
 +
-+static struct aead_testvec aes_ccm_enc_tv_template[] = {
-+	{ /* From RFC 3610 */
-+		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00,
-+			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
-+		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
-+		.alen	= 8,
-+		.input	= { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
-+		.ilen	= 23,
-+		.result	= { 0x58, 0x8c, 0x97, 0x9a, 0x61, 0xc6, 0x63, 0xd2,
-+			    0xf0, 0x66, 0xd0, 0xc2, 0xc0, 0xf9, 0x89, 0x80,
-+			    0x6d, 0x5f, 0x6b, 0x61, 0xda, 0xc3, 0x84, 0x17,
-+			    0xe8, 0xd1, 0x2c, 0xfd, 0xf9, 0x26, 0xe0 },
-+		.rlen	= 31,
-+	}, {
-+		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x07, 0x06, 0x05, 0x04,
-+			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
-+		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b },
-+		.alen	= 12,
-+		.input	= { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-+			    0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
-+			    0x1c, 0x1d, 0x1e, 0x1f },
-+		.ilen	= 20,
-+		.result	= { 0xdc, 0xf1, 0xfb, 0x7b, 0x5d, 0x9e, 0x23, 0xfb,
-+			    0x9d, 0x4e, 0x13, 0x12, 0x53, 0x65, 0x8a, 0xd8,
-+			    0x6e, 0xbd, 0xca, 0x3e, 0x51, 0xe8, 0x3f, 0x07,
-+			    0x7d, 0x9c, 0x2d, 0x93 },
-+		.rlen	= 28,
-+	}, {
-+		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x0b, 0x0a, 0x09, 0x08,
-+			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
-+		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
-+		.alen	= 8,
-+		.input	= { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
-+			    0x20 },
-+		.ilen	= 25,
-+		.result	= { 0x82, 0x53, 0x1a, 0x60, 0xcc, 0x24, 0x94, 0x5a,
-+			    0x4b, 0x82, 0x79, 0x18, 0x1a, 0xb5, 0xc8, 0x4d,
-+			    0xf2, 0x1c, 0xe7, 0xf9, 0xb7, 0x3f, 0x42, 0xe1,
-+			    0x97, 0xea, 0x9c, 0x07, 0xe5, 0x6b, 0x5e, 0xb1,
-+			    0x7e, 0x5f, 0x4e },
-+		.rlen	= 35,
-+	}, {
-+		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x0c, 0x0b, 0x0a, 0x09,
-+			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
-+		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b },
-+		.alen	= 12,
-+		.input	= { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-+			    0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
-+			    0x1c, 0x1d, 0x1e },
-+		.ilen	= 19,
-+		.result	= { 0x07, 0x34, 0x25, 0x94, 0x15, 0x77, 0x85, 0x15,
-+			    0x2b, 0x07, 0x40, 0x98, 0x33, 0x0a, 0xbb, 0x14,
-+			    0x1b, 0x94, 0x7b, 0x56, 0x6a, 0xa9, 0x40, 0x6b,
-+			    0x4d, 0x99, 0x99, 0x88, 0xdd },
-+		.rlen	= 29,
-+	}, {
-+		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
-+			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x33, 0x56, 0x8e, 0xf7, 0xb2, 0x63,
-+			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
-+		.assoc	= { 0x63, 0x01, 0x8f, 0x76, 0xdc, 0x8a, 0x1b, 0xcb },
-+		.alen	= 8,
-+		.input	= { 0x90, 0x20, 0xea, 0x6f, 0x91, 0xbd, 0xd8, 0x5a,
-+			    0xfa, 0x00, 0x39, 0xba, 0x4b, 0xaf, 0xf9, 0xbf,
-+			    0xb7, 0x9c, 0x70, 0x28, 0x94, 0x9c, 0xd0, 0xec },
-+		.ilen	= 24,
-+		.result	= { 0x4c, 0xcb, 0x1e, 0x7c, 0xa9, 0x81, 0xbe, 0xfa,
-+			    0xa0, 0x72, 0x6c, 0x55, 0xd3, 0x78, 0x06, 0x12,
-+			    0x98, 0xc8, 0x5c, 0x92, 0x81, 0x4a, 0xbc, 0x33,
-+			    0xc5, 0x2e, 0xe8, 0x1d, 0x7d, 0x77, 0xc0, 0x8a },
-+		.rlen	= 32,
-+	}, {
-+		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
-+			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0xd5, 0x60, 0x91, 0x2d, 0x3f, 0x70,
-+			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
-+		.assoc	= { 0xcd, 0x90, 0x44, 0xd2, 0xb7, 0x1f, 0xdb, 0x81,
-+			    0x20, 0xea, 0x60, 0xc0 },
-+		.alen	= 12,
-+		.input	= { 0x64, 0x35, 0xac, 0xba, 0xfb, 0x11, 0xa8, 0x2e,
-+			    0x2f, 0x07, 0x1d, 0x7c, 0xa4, 0xa5, 0xeb, 0xd9,
-+			    0x3a, 0x80, 0x3b, 0xa8, 0x7f },
-+		.ilen	= 21,
-+		.result	= { 0x00, 0x97, 0x69, 0xec, 0xab, 0xdf, 0x48, 0x62,
-+			    0x55, 0x94, 0xc5, 0x92, 0x51, 0xe6, 0x03, 0x57,
-+			    0x22, 0x67, 0x5e, 0x04, 0xc8, 0x47, 0x09, 0x9e,
-+			    0x5a, 0xe0, 0x70, 0x45, 0x51 },
-+		.rlen	= 29,
-+	}, {
-+		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
-+			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x42, 0xff, 0xf8, 0xf1, 0x95, 0x1c,
-+			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
-+		.assoc	= { 0xd8, 0x5b, 0xc7, 0xe6, 0x9f, 0x94, 0x4f, 0xb8 },
-+		.alen	= 8,
-+		.input	= { 0x8a, 0x19, 0xb9, 0x50, 0xbc, 0xf7, 0x1a, 0x01,
-+			    0x8e, 0x5e, 0x67, 0x01, 0xc9, 0x17, 0x87, 0x65,
-+			    0x98, 0x09, 0xd6, 0x7d, 0xbe, 0xdd, 0x18 },
-+		.ilen	= 23,
-+		.result	= { 0xbc, 0x21, 0x8d, 0xaa, 0x94, 0x74, 0x27, 0xb6,
-+			    0xdb, 0x38, 0x6a, 0x99, 0xac, 0x1a, 0xef, 0x23,
-+			    0xad, 0xe0, 0xb5, 0x29, 0x39, 0xcb, 0x6a, 0x63,
-+			    0x7c, 0xf9, 0xbe, 0xc2, 0x40, 0x88, 0x97, 0xc6,
-+			    0xba },
-+		.rlen	= 33,
-+	},
++/* Two different toggle channels exist */
++enum channel_type {
++	CH_A = 0,
++	CH_B = 1,
 +};
 +
-+static struct aead_testvec aes_ccm_dec_tv_template[] = {
-+	{ /* From RFC 3610 */
-+		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00,
-+			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
-+		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
-+		.alen	= 8,
-+		.input	= { 0x58, 0x8c, 0x97, 0x9a, 0x61, 0xc6, 0x63, 0xd2,
-+			    0xf0, 0x66, 0xd0, 0xc2, 0xc0, 0xf9, 0x89, 0x80,
-+			    0x6d, 0x5f, 0x6b, 0x61, 0xda, 0xc3, 0x84, 0x17,
-+			    0xe8, 0xd1, 0x2c, 0xfd, 0xf9, 0x26, 0xe0 },
-+		.ilen	= 31,
-+		.result	= { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
-+		.rlen	= 23,
-+	}, {
-+		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x07, 0x06, 0x05, 0x04,
-+			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
-+		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b },
-+		.alen	= 12,
-+		.input	= { 0xdc, 0xf1, 0xfb, 0x7b, 0x5d, 0x9e, 0x23, 0xfb,
-+			    0x9d, 0x4e, 0x13, 0x12, 0x53, 0x65, 0x8a, 0xd8,
-+			    0x6e, 0xbd, 0xca, 0x3e, 0x51, 0xe8, 0x3f, 0x07,
-+			    0x7d, 0x9c, 0x2d, 0x93 },
-+		.ilen	= 28,
-+		.result	= { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-+			    0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
-+			    0x1c, 0x1d, 0x1e, 0x1f },
-+		.rlen	= 20,
-+	}, {
-+		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x0b, 0x0a, 0x09, 0x08,
-+			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
-+		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
-+		.alen	= 8,
-+		.input	= { 0x82, 0x53, 0x1a, 0x60, 0xcc, 0x24, 0x94, 0x5a,
-+			    0x4b, 0x82, 0x79, 0x18, 0x1a, 0xb5, 0xc8, 0x4d,
-+			    0xf2, 0x1c, 0xe7, 0xf9, 0xb7, 0x3f, 0x42, 0xe1,
-+			    0x97, 0xea, 0x9c, 0x07, 0xe5, 0x6b, 0x5e, 0xb1,
-+			    0x7e, 0x5f, 0x4e },
-+		.ilen	= 35,
-+		.result	= { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
-+			    0x20 },
-+		.rlen	= 25,
-+	}, {
-+		.key	= { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			    0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x00, 0x00, 0x0c, 0x0b, 0x0a, 0x09,
-+			    0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 },
-+		.assoc	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0a, 0x0b },
-+		.alen	= 12,
-+		.input	= { 0x07, 0x34, 0x25, 0x94, 0x15, 0x77, 0x85, 0x15,
-+			    0x2b, 0x07, 0x40, 0x98, 0x33, 0x0a, 0xbb, 0x14,
-+			    0x1b, 0x94, 0x7b, 0x56, 0x6a, 0xa9, 0x40, 0x6b,
-+			    0x4d, 0x99, 0x99, 0x88, 0xdd },
-+		.ilen	= 29,
-+		.result	= { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-+			    0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
-+			    0x1c, 0x1d, 0x1e },
-+		.rlen	= 19,
-+	}, {
-+		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
-+			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x33, 0x56, 0x8e, 0xf7, 0xb2, 0x63,
-+			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
-+		.assoc	= { 0x63, 0x01, 0x8f, 0x76, 0xdc, 0x8a, 0x1b, 0xcb },
-+		.alen	= 8,
-+		.input	= { 0x4c, 0xcb, 0x1e, 0x7c, 0xa9, 0x81, 0xbe, 0xfa,
-+			    0xa0, 0x72, 0x6c, 0x55, 0xd3, 0x78, 0x06, 0x12,
-+			    0x98, 0xc8, 0x5c, 0x92, 0x81, 0x4a, 0xbc, 0x33,
-+			    0xc5, 0x2e, 0xe8, 0x1d, 0x7d, 0x77, 0xc0, 0x8a },
-+		.ilen	= 32,
-+		.result	= { 0x90, 0x20, 0xea, 0x6f, 0x91, 0xbd, 0xd8, 0x5a,
-+			    0xfa, 0x00, 0x39, 0xba, 0x4b, 0xaf, 0xf9, 0xbf,
-+			    0xb7, 0x9c, 0x70, 0x28, 0x94, 0x9c, 0xd0, 0xec },
-+		.rlen	= 24,
-+	}, {
-+		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
-+			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0xd5, 0x60, 0x91, 0x2d, 0x3f, 0x70,
-+			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
-+		.assoc	= { 0xcd, 0x90, 0x44, 0xd2, 0xb7, 0x1f, 0xdb, 0x81,
-+			    0x20, 0xea, 0x60, 0xc0 },
-+		.alen	= 12,
-+		.input	= { 0x00, 0x97, 0x69, 0xec, 0xab, 0xdf, 0x48, 0x62,
-+			    0x55, 0x94, 0xc5, 0x92, 0x51, 0xe6, 0x03, 0x57,
-+			    0x22, 0x67, 0x5e, 0x04, 0xc8, 0x47, 0x09, 0x9e,
-+			    0x5a, 0xe0, 0x70, 0x45, 0x51 },
-+		.ilen	= 29,
-+		.result	= { 0x64, 0x35, 0xac, 0xba, 0xfb, 0x11, 0xa8, 0x2e,
-+			    0x2f, 0x07, 0x1d, 0x7c, 0xa4, 0xa5, 0xeb, 0xd9,
-+			    0x3a, 0x80, 0x3b, 0xa8, 0x7f },
-+		.rlen	= 21,
-+	}, {
-+		.key	= { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3,
-+			    0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b },
-+		.klen	= 16,
-+		.iv	= { 0x01, 0x00, 0x42, 0xff, 0xf8, 0xf1, 0x95, 0x1c,
-+			    0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 },
-+		.assoc	= { 0xd8, 0x5b, 0xc7, 0xe6, 0x9f, 0x94, 0x4f, 0xb8 },
-+		.alen	= 8,
-+		.input	= { 0xbc, 0x21, 0x8d, 0xaa, 0x94, 0x74, 0x27, 0xb6,
-+			    0xdb, 0x38, 0x6a, 0x99, 0xac, 0x1a, 0xef, 0x23,
-+			    0xad, 0xe0, 0xb5, 0x29, 0x39, 0xcb, 0x6a, 0x63,
-+			    0x7c, 0xf9, 0xbe, 0xc2, 0x40, 0x88, 0x97, 0xc6,
-+			    0xba },
-+		.ilen	= 33,
-+		.result	= { 0x8a, 0x19, 0xb9, 0x50, 0xbc, 0xf7, 0x1a, 0x01,
-+			    0x8e, 0x5e, 0x67, 0x01, 0xc9, 0x17, 0x87, 0x65,
-+			    0x98, 0x09, 0xd6, 0x7d, 0xbe, 0xdd, 0x18 },
-+		.rlen	= 23,
-+	},
++/* Port definition for the card regarding flow control */
++enum ctrl_port_type {
++	CTRL_CMD	= 0,
++	CTRL_MDM	= 1,
++	CTRL_DIAG	= 2,
++	CTRL_APP1	= 3,
++	CTRL_APP2	= 4,
++	CTRL_ERROR	= -1,
 +};
 +
- /* Cast5 test vectors from RFC 2144 */
- #define CAST5_ENC_TEST_VECTORS	3
- #define CAST5_DEC_TEST_VECTORS	3
-@@ -4317,6 +6425,1211 @@ static struct cipher_testvec seed_dec_tv_template[] = {
- 	}
- };
- 
-+#define SALSA20_STREAM_ENC_TEST_VECTORS 5
-+static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
-+	/*
-+	* Testvectors from verified.test-vectors submitted to ECRYPT.
-+	* They are truncated to size 39, 64, 111, 129 to test a variety
-+	* of input length.
-+	*/
-+	{ /* Set 3, vector 0 */
-+		.key	= {
-+			    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			    0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
-+			  },
-+		.klen	= 16,
-+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
-+		.input	= {
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			  },
-+		.ilen	= 39,
-+		.result	= {
-+			    0x2D, 0xD5, 0xC3, 0xF7, 0xBA, 0x2B, 0x20, 0xF7,
-+                            0x68, 0x02, 0x41, 0x0C, 0x68, 0x86, 0x88, 0x89,
-+                            0x5A, 0xD8, 0xC1, 0xBD, 0x4E, 0xA6, 0xC9, 0xB1,
-+                            0x40, 0xFB, 0x9B, 0x90, 0xE2, 0x10, 0x49, 0xBF,
-+                            0x58, 0x3F, 0x52, 0x79, 0x70, 0xEB, 0xC1,
-+			},
-+		.rlen	= 39,
-+	}, { /* Set 5, vector 0 */
-+		.key	= {
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-+			  },
-+		.klen	= 16,
-+		.iv     = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
-+		.input	= {
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			  },
-+		.ilen	= 64,
-+		.result	= {
-+			    0xB6, 0x6C, 0x1E, 0x44, 0x46, 0xDD, 0x95, 0x57,
-+                            0xE5, 0x78, 0xE2, 0x23, 0xB0, 0xB7, 0x68, 0x01,
-+                            0x7B, 0x23, 0xB2, 0x67, 0xBB, 0x02, 0x34, 0xAE,
-+                            0x46, 0x26, 0xBF, 0x44, 0x3F, 0x21, 0x97, 0x76,
-+                            0x43, 0x6F, 0xB1, 0x9F, 0xD0, 0xE8, 0x86, 0x6F,
-+                            0xCD, 0x0D, 0xE9, 0xA9, 0x53, 0x8F, 0x4A, 0x09,
-+                            0xCA, 0x9A, 0xC0, 0x73, 0x2E, 0x30, 0xBC, 0xF9,
-+                            0x8E, 0x4F, 0x13, 0xE4, 0xB9, 0xE2, 0x01, 0xD9,
-+			  },
-+		.rlen	= 64,
-+	}, { /* Set 3, vector 27 */
-+		.key	= {
-+			    0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22,
-+			    0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A,
-+                            0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32,
-+			    0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A
-+			  },
-+		.klen	= 32,
-+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
-+		.input	= {
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++/* Ports that the nozomi has */
++enum port_type {
++	PORT_MDM	= 0,
++	PORT_DIAG	= 1,
++	PORT_APP1	= 2,
++	PORT_APP2	= 3,
++	PORT_CTRL	= 4,
++	PORT_ERROR	= -1,
++};
 +
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			  },
-+		.ilen	= 111,
-+		.result	= {
-+			    0xAE, 0x39, 0x50, 0x8E, 0xAC, 0x9A, 0xEC, 0xE7,
-+                            0xBF, 0x97, 0xBB, 0x20, 0xB9, 0xDE, 0xE4, 0x1F,
-+                            0x87, 0xD9, 0x47, 0xF8, 0x28, 0x91, 0x35, 0x98,
-+                            0xDB, 0x72, 0xCC, 0x23, 0x29, 0x48, 0x56, 0x5E,
-+                            0x83, 0x7E, 0x0B, 0xF3, 0x7D, 0x5D, 0x38, 0x7B,
-+                            0x2D, 0x71, 0x02, 0xB4, 0x3B, 0xB5, 0xD8, 0x23,
-+                            0xB0, 0x4A, 0xDF, 0x3C, 0xEC, 0xB6, 0xD9, 0x3B,
-+                            0x9B, 0xA7, 0x52, 0xBE, 0xC5, 0xD4, 0x50, 0x59,
++#ifdef __BIG_ENDIAN
++/* Big endian */
 +
-+                            0x15, 0x14, 0xB4, 0x0E, 0x40, 0xE6, 0x53, 0xD1,
-+                            0x83, 0x9C, 0x5B, 0xA0, 0x92, 0x29, 0x6B, 0x5E,
-+                            0x96, 0x5B, 0x1E, 0x2F, 0xD3, 0xAC, 0xC1, 0x92,
-+                            0xB1, 0x41, 0x3F, 0x19, 0x2F, 0xC4, 0x3B, 0xC6,
-+                            0x95, 0x46, 0x45, 0x54, 0xE9, 0x75, 0x03, 0x08,
-+                            0x44, 0xAF, 0xE5, 0x8A, 0x81, 0x12, 0x09,
-+			  },
-+		.rlen	= 111,
++struct toggles {
++	unsigned enabled:5;	/*
++				 * Toggle fields are valid if enabled is 0,
++				 * else A-channels must always be used.
++				 */
++	unsigned diag_dl:1;
++	unsigned mdm_dl:1;
++	unsigned mdm_ul:1;
++} __attribute__ ((packed));
 +
-+	}, { /* Set 5, vector 27 */
-+		.key	= {
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-+			  },
-+		.klen	= 32,
-+		.iv     = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00 },
-+		.input	= {
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++/* Configuration table to read at startup of card */
++/* Is for now only needed during initialization phase */
++struct config_table {
++	u32 signature;
++	u16 product_information;
++	u16 version;
++	u8 pad3[3];
++	struct toggles toggle;
++	u8 pad1[4];
++	u16 dl_mdm_len1;	/*
++				 * If this is 64, it can hold
++				 * 60 bytes + 4 that is length field
++				 */
++	u16 dl_start;
 +
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++	u16 dl_diag_len1;
++	u16 dl_mdm_len2;	/*
++				 * If this is 64, it can hold
++				 * 60 bytes + 4 that is length field
++				 */
++	u16 dl_app1_len;
 +
-+			    0x00,
-+			  },
-+		.ilen	= 129,
-+		.result	= {
-+			    0xD2, 0xDB, 0x1A, 0x5C, 0xF1, 0xC1, 0xAC, 0xDB,
-+                            0xE8, 0x1A, 0x7A, 0x43, 0x40, 0xEF, 0x53, 0x43,
-+                            0x5E, 0x7F, 0x4B, 0x1A, 0x50, 0x52, 0x3F, 0x8D,
-+                            0x28, 0x3D, 0xCF, 0x85, 0x1D, 0x69, 0x6E, 0x60,
-+                            0xF2, 0xDE, 0x74, 0x56, 0x18, 0x1B, 0x84, 0x10,
-+                            0xD4, 0x62, 0xBA, 0x60, 0x50, 0xF0, 0x61, 0xF2,
-+                            0x1C, 0x78, 0x7F, 0xC1, 0x24, 0x34, 0xAF, 0x58,
-+                            0xBF, 0x2C, 0x59, 0xCA, 0x90, 0x77, 0xF3, 0xB0,
++	u16 dl_diag_len2;
++	u16 dl_ctrl_len;
++	u16 dl_app2_len;
++	u8 pad2[16];
++	u16 ul_mdm_len1;
++	u16 ul_start;
++	u16 ul_diag_len;
++	u16 ul_mdm_len2;
++	u16 ul_app1_len;
++	u16 ul_app2_len;
++	u16 ul_ctrl_len;
++} __attribute__ ((packed));
 +
-+                            0x5B, 0x4A, 0xDF, 0x89, 0xCE, 0x2C, 0x2F, 0xFC,
-+                            0x67, 0xF0, 0xE3, 0x45, 0xE8, 0xB3, 0xB3, 0x75,
-+                            0xA0, 0x95, 0x71, 0xA1, 0x29, 0x39, 0x94, 0xCA,
-+                            0x45, 0x2F, 0xBD, 0xCB, 0x10, 0xB6, 0xBE, 0x9F,
-+                            0x8E, 0xF9, 0xB2, 0x01, 0x0A, 0x5A, 0x0A, 0xB7,
-+                            0x6B, 0x9D, 0x70, 0x8E, 0x4B, 0xD6, 0x2F, 0xCD,
-+                            0x2E, 0x40, 0x48, 0x75, 0xE9, 0xE2, 0x21, 0x45,
-+                            0x0B, 0xC9, 0xB6, 0xB5, 0x66, 0xBC, 0x9A, 0x59,
++/* This stores all control downlink flags */
++struct ctrl_dl {
++	u8 port;
++	unsigned reserved:4;
++	unsigned CTS:1;
++	unsigned RI:1;
++	unsigned DCD:1;
++	unsigned DSR:1;
++} __attribute__ ((packed));
 +
-+                            0x5A,
-+			  },
-+		.rlen	= 129,
-+	}, { /* large test vector generated using Crypto++ */
-+		.key = {
-+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
-+		},
-+		.klen = 32,
-+		.iv = {
-+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+		},
-+		.input = {
-+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-+			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-+			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
-+			0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
-+			0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
-+			0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
-+			0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
-+			0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
-+			0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
-+			0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
-+			0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
-+			0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
-+			0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
-+			0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
-+			0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
-+			0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
-+			0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
-+			0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
-+			0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
-+			0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
-+			0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
-+			0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
-+			0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
-+			0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
-+			0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
-+			0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
-+			0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
-+			0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
-+			0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
-+			0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
-+			0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
-+			0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15,
-+			0x18, 0x1b, 0x1e, 0x21, 0x24, 0x27, 0x2a, 0x2d,
-+			0x30, 0x33, 0x36, 0x39, 0x3c, 0x3f, 0x42, 0x45,
-+			0x48, 0x4b, 0x4e, 0x51, 0x54, 0x57, 0x5a, 0x5d,
-+			0x60, 0x63, 0x66, 0x69, 0x6c, 0x6f, 0x72, 0x75,
-+			0x78, 0x7b, 0x7e, 0x81, 0x84, 0x87, 0x8a, 0x8d,
-+			0x90, 0x93, 0x96, 0x99, 0x9c, 0x9f, 0xa2, 0xa5,
-+			0xa8, 0xab, 0xae, 0xb1, 0xb4, 0xb7, 0xba, 0xbd,
-+			0xc0, 0xc3, 0xc6, 0xc9, 0xcc, 0xcf, 0xd2, 0xd5,
-+			0xd8, 0xdb, 0xde, 0xe1, 0xe4, 0xe7, 0xea, 0xed,
-+			0xf0, 0xf3, 0xf6, 0xf9, 0xfc, 0xff, 0x02, 0x05,
-+			0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x1a, 0x1d,
-+			0x20, 0x23, 0x26, 0x29, 0x2c, 0x2f, 0x32, 0x35,
-+			0x38, 0x3b, 0x3e, 0x41, 0x44, 0x47, 0x4a, 0x4d,
-+			0x50, 0x53, 0x56, 0x59, 0x5c, 0x5f, 0x62, 0x65,
-+			0x68, 0x6b, 0x6e, 0x71, 0x74, 0x77, 0x7a, 0x7d,
-+			0x80, 0x83, 0x86, 0x89, 0x8c, 0x8f, 0x92, 0x95,
-+			0x98, 0x9b, 0x9e, 0xa1, 0xa4, 0xa7, 0xaa, 0xad,
-+			0xb0, 0xb3, 0xb6, 0xb9, 0xbc, 0xbf, 0xc2, 0xc5,
-+			0xc8, 0xcb, 0xce, 0xd1, 0xd4, 0xd7, 0xda, 0xdd,
-+			0xe0, 0xe3, 0xe6, 0xe9, 0xec, 0xef, 0xf2, 0xf5,
-+			0xf8, 0xfb, 0xfe, 0x01, 0x04, 0x07, 0x0a, 0x0d,
-+			0x10, 0x13, 0x16, 0x19, 0x1c, 0x1f, 0x22, 0x25,
-+			0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, 0x3d,
-+			0x40, 0x43, 0x46, 0x49, 0x4c, 0x4f, 0x52, 0x55,
-+			0x58, 0x5b, 0x5e, 0x61, 0x64, 0x67, 0x6a, 0x6d,
-+			0x70, 0x73, 0x76, 0x79, 0x7c, 0x7f, 0x82, 0x85,
-+			0x88, 0x8b, 0x8e, 0x91, 0x94, 0x97, 0x9a, 0x9d,
-+			0xa0, 0xa3, 0xa6, 0xa9, 0xac, 0xaf, 0xb2, 0xb5,
-+			0xb8, 0xbb, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd,
-+			0xd0, 0xd3, 0xd6, 0xd9, 0xdc, 0xdf, 0xe2, 0xe5,
-+			0xe8, 0xeb, 0xee, 0xf1, 0xf4, 0xf7, 0xfa, 0xfd,
-+			0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1e, 0x23,
-+			0x28, 0x2d, 0x32, 0x37, 0x3c, 0x41, 0x46, 0x4b,
-+			0x50, 0x55, 0x5a, 0x5f, 0x64, 0x69, 0x6e, 0x73,
-+			0x78, 0x7d, 0x82, 0x87, 0x8c, 0x91, 0x96, 0x9b,
-+			0xa0, 0xa5, 0xaa, 0xaf, 0xb4, 0xb9, 0xbe, 0xc3,
-+			0xc8, 0xcd, 0xd2, 0xd7, 0xdc, 0xe1, 0xe6, 0xeb,
-+			0xf0, 0xf5, 0xfa, 0xff, 0x04, 0x09, 0x0e, 0x13,
-+			0x18, 0x1d, 0x22, 0x27, 0x2c, 0x31, 0x36, 0x3b,
-+			0x40, 0x45, 0x4a, 0x4f, 0x54, 0x59, 0x5e, 0x63,
-+			0x68, 0x6d, 0x72, 0x77, 0x7c, 0x81, 0x86, 0x8b,
-+			0x90, 0x95, 0x9a, 0x9f, 0xa4, 0xa9, 0xae, 0xb3,
-+			0xb8, 0xbd, 0xc2, 0xc7, 0xcc, 0xd1, 0xd6, 0xdb,
-+			0xe0, 0xe5, 0xea, 0xef, 0xf4, 0xf9, 0xfe, 0x03,
-+			0x08, 0x0d, 0x12, 0x17, 0x1c, 0x21, 0x26, 0x2b,
-+			0x30, 0x35, 0x3a, 0x3f, 0x44, 0x49, 0x4e, 0x53,
-+			0x58, 0x5d, 0x62, 0x67, 0x6c, 0x71, 0x76, 0x7b,
-+			0x80, 0x85, 0x8a, 0x8f, 0x94, 0x99, 0x9e, 0xa3,
-+			0xa8, 0xad, 0xb2, 0xb7, 0xbc, 0xc1, 0xc6, 0xcb,
-+			0xd0, 0xd5, 0xda, 0xdf, 0xe4, 0xe9, 0xee, 0xf3,
-+			0xf8, 0xfd, 0x02, 0x07, 0x0c, 0x11, 0x16, 0x1b,
-+			0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3e, 0x43,
-+			0x48, 0x4d, 0x52, 0x57, 0x5c, 0x61, 0x66, 0x6b,
-+			0x70, 0x75, 0x7a, 0x7f, 0x84, 0x89, 0x8e, 0x93,
-+			0x98, 0x9d, 0xa2, 0xa7, 0xac, 0xb1, 0xb6, 0xbb,
-+			0xc0, 0xc5, 0xca, 0xcf, 0xd4, 0xd9, 0xde, 0xe3,
-+			0xe8, 0xed, 0xf2, 0xf7, 0xfc, 0x01, 0x06, 0x0b,
-+			0x10, 0x15, 0x1a, 0x1f, 0x24, 0x29, 0x2e, 0x33,
-+			0x38, 0x3d, 0x42, 0x47, 0x4c, 0x51, 0x56, 0x5b,
-+			0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79, 0x7e, 0x83,
-+			0x88, 0x8d, 0x92, 0x97, 0x9c, 0xa1, 0xa6, 0xab,
-+			0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, 0xce, 0xd3,
-+			0xd8, 0xdd, 0xe2, 0xe7, 0xec, 0xf1, 0xf6, 0xfb,
-+			0x00, 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31,
-+			0x38, 0x3f, 0x46, 0x4d, 0x54, 0x5b, 0x62, 0x69,
-+			0x70, 0x77, 0x7e, 0x85, 0x8c, 0x93, 0x9a, 0xa1,
-+			0xa8, 0xaf, 0xb6, 0xbd, 0xc4, 0xcb, 0xd2, 0xd9,
-+			0xe0, 0xe7, 0xee, 0xf5, 0xfc, 0x03, 0x0a, 0x11,
-+			0x18, 0x1f, 0x26, 0x2d, 0x34, 0x3b, 0x42, 0x49,
-+			0x50, 0x57, 0x5e, 0x65, 0x6c, 0x73, 0x7a, 0x81,
-+			0x88, 0x8f, 0x96, 0x9d, 0xa4, 0xab, 0xb2, 0xb9,
-+			0xc0, 0xc7, 0xce, 0xd5, 0xdc, 0xe3, 0xea, 0xf1,
-+			0xf8, 0xff, 0x06, 0x0d, 0x14, 0x1b, 0x22, 0x29,
-+			0x30, 0x37, 0x3e, 0x45, 0x4c, 0x53, 0x5a, 0x61,
-+			0x68, 0x6f, 0x76, 0x7d, 0x84, 0x8b, 0x92, 0x99,
-+			0xa0, 0xa7, 0xae, 0xb5, 0xbc, 0xc3, 0xca, 0xd1,
-+			0xd8, 0xdf, 0xe6, 0xed, 0xf4, 0xfb, 0x02, 0x09,
-+			0x10, 0x17, 0x1e, 0x25, 0x2c, 0x33, 0x3a, 0x41,
-+			0x48, 0x4f, 0x56, 0x5d, 0x64, 0x6b, 0x72, 0x79,
-+			0x80, 0x87, 0x8e, 0x95, 0x9c, 0xa3, 0xaa, 0xb1,
-+			0xb8, 0xbf, 0xc6, 0xcd, 0xd4, 0xdb, 0xe2, 0xe9,
-+			0xf0, 0xf7, 0xfe, 0x05, 0x0c, 0x13, 0x1a, 0x21,
-+			0x28, 0x2f, 0x36, 0x3d, 0x44, 0x4b, 0x52, 0x59,
-+			0x60, 0x67, 0x6e, 0x75, 0x7c, 0x83, 0x8a, 0x91,
-+			0x98, 0x9f, 0xa6, 0xad, 0xb4, 0xbb, 0xc2, 0xc9,
-+			0xd0, 0xd7, 0xde, 0xe5, 0xec, 0xf3, 0xfa, 0x01,
-+			0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2b, 0x32, 0x39,
-+			0x40, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71,
-+			0x78, 0x7f, 0x86, 0x8d, 0x94, 0x9b, 0xa2, 0xa9,
-+			0xb0, 0xb7, 0xbe, 0xc5, 0xcc, 0xd3, 0xda, 0xe1,
-+			0xe8, 0xef, 0xf6, 0xfd, 0x04, 0x0b, 0x12, 0x19,
-+			0x20, 0x27, 0x2e, 0x35, 0x3c, 0x43, 0x4a, 0x51,
-+			0x58, 0x5f, 0x66, 0x6d, 0x74, 0x7b, 0x82, 0x89,
-+			0x90, 0x97, 0x9e, 0xa5, 0xac, 0xb3, 0xba, 0xc1,
-+			0xc8, 0xcf, 0xd6, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9,
-+			0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
-+			0x48, 0x51, 0x5a, 0x63, 0x6c, 0x75, 0x7e, 0x87,
-+			0x90, 0x99, 0xa2, 0xab, 0xb4, 0xbd, 0xc6, 0xcf,
-+			0xd8, 0xe1, 0xea, 0xf3, 0xfc, 0x05, 0x0e, 0x17,
-+			0x20, 0x29, 0x32, 0x3b, 0x44, 0x4d, 0x56, 0x5f,
-+			0x68, 0x71, 0x7a, 0x83, 0x8c, 0x95, 0x9e, 0xa7,
-+			0xb0, 0xb9, 0xc2, 0xcb, 0xd4, 0xdd, 0xe6, 0xef,
-+			0xf8, 0x01, 0x0a, 0x13, 0x1c, 0x25, 0x2e, 0x37,
-+			0x40, 0x49, 0x52, 0x5b, 0x64, 0x6d, 0x76, 0x7f,
-+			0x88, 0x91, 0x9a, 0xa3, 0xac, 0xb5, 0xbe, 0xc7,
-+			0xd0, 0xd9, 0xe2, 0xeb, 0xf4, 0xfd, 0x06, 0x0f,
-+			0x18, 0x21, 0x2a, 0x33, 0x3c, 0x45, 0x4e, 0x57,
-+			0x60, 0x69, 0x72, 0x7b, 0x84, 0x8d, 0x96, 0x9f,
-+			0xa8, 0xb1, 0xba, 0xc3, 0xcc, 0xd5, 0xde, 0xe7,
-+			0xf0, 0xf9, 0x02, 0x0b, 0x14, 0x1d, 0x26, 0x2f,
-+			0x38, 0x41, 0x4a, 0x53, 0x5c, 0x65, 0x6e, 0x77,
-+			0x80, 0x89, 0x92, 0x9b, 0xa4, 0xad, 0xb6, 0xbf,
-+			0xc8, 0xd1, 0xda, 0xe3, 0xec, 0xf5, 0xfe, 0x07,
-+			0x10, 0x19, 0x22, 0x2b, 0x34, 0x3d, 0x46, 0x4f,
-+			0x58, 0x61, 0x6a, 0x73, 0x7c, 0x85, 0x8e, 0x97,
-+			0xa0, 0xa9, 0xb2, 0xbb, 0xc4, 0xcd, 0xd6, 0xdf,
-+			0xe8, 0xf1, 0xfa, 0x03, 0x0c, 0x15, 0x1e, 0x27,
-+			0x30, 0x39, 0x42, 0x4b, 0x54, 0x5d, 0x66, 0x6f,
-+			0x78, 0x81, 0x8a, 0x93, 0x9c, 0xa5, 0xae, 0xb7,
-+			0xc0, 0xc9, 0xd2, 0xdb, 0xe4, 0xed, 0xf6, 0xff,
-+			0x08, 0x11, 0x1a, 0x23, 0x2c, 0x35, 0x3e, 0x47,
-+			0x50, 0x59, 0x62, 0x6b, 0x74, 0x7d, 0x86, 0x8f,
-+			0x98, 0xa1, 0xaa, 0xb3, 0xbc, 0xc5, 0xce, 0xd7,
-+			0xe0, 0xe9, 0xf2, 0xfb, 0x04, 0x0d, 0x16, 0x1f,
-+			0x28, 0x31, 0x3a, 0x43, 0x4c, 0x55, 0x5e, 0x67,
-+			0x70, 0x79, 0x82, 0x8b, 0x94, 0x9d, 0xa6, 0xaf,
-+			0xb8, 0xc1, 0xca, 0xd3, 0xdc, 0xe5, 0xee, 0xf7,
-+			0x00, 0x0b, 0x16, 0x21, 0x2c, 0x37, 0x42, 0x4d,
-+			0x58, 0x63, 0x6e, 0x79, 0x84, 0x8f, 0x9a, 0xa5,
-+			0xb0, 0xbb, 0xc6, 0xd1, 0xdc, 0xe7, 0xf2, 0xfd,
-+			0x08, 0x13, 0x1e, 0x29, 0x34, 0x3f, 0x4a, 0x55,
-+			0x60, 0x6b, 0x76, 0x81, 0x8c, 0x97, 0xa2, 0xad,
-+			0xb8, 0xc3, 0xce, 0xd9, 0xe4, 0xef, 0xfa, 0x05,
-+			0x10, 0x1b, 0x26, 0x31, 0x3c, 0x47, 0x52, 0x5d,
-+			0x68, 0x73, 0x7e, 0x89, 0x94, 0x9f, 0xaa, 0xb5,
-+			0xc0, 0xcb, 0xd6, 0xe1, 0xec, 0xf7, 0x02, 0x0d,
-+			0x18, 0x23, 0x2e, 0x39, 0x44, 0x4f, 0x5a, 0x65,
-+			0x70, 0x7b, 0x86, 0x91, 0x9c, 0xa7, 0xb2, 0xbd,
-+			0xc8, 0xd3, 0xde, 0xe9, 0xf4, 0xff, 0x0a, 0x15,
-+			0x20, 0x2b, 0x36, 0x41, 0x4c, 0x57, 0x62, 0x6d,
-+			0x78, 0x83, 0x8e, 0x99, 0xa4, 0xaf, 0xba, 0xc5,
-+			0xd0, 0xdb, 0xe6, 0xf1, 0xfc, 0x07, 0x12, 0x1d,
-+			0x28, 0x33, 0x3e, 0x49, 0x54, 0x5f, 0x6a, 0x75,
-+			0x80, 0x8b, 0x96, 0xa1, 0xac, 0xb7, 0xc2, 0xcd,
-+			0xd8, 0xe3, 0xee, 0xf9, 0x04, 0x0f, 0x1a, 0x25,
-+			0x30, 0x3b, 0x46, 0x51, 0x5c, 0x67, 0x72, 0x7d,
-+			0x88, 0x93, 0x9e, 0xa9, 0xb4, 0xbf, 0xca, 0xd5,
-+			0xe0, 0xeb, 0xf6, 0x01, 0x0c, 0x17, 0x22, 0x2d,
-+			0x38, 0x43, 0x4e, 0x59, 0x64, 0x6f, 0x7a, 0x85,
-+			0x90, 0x9b, 0xa6, 0xb1, 0xbc, 0xc7, 0xd2, 0xdd,
-+			0xe8, 0xf3, 0xfe, 0x09, 0x14, 0x1f, 0x2a, 0x35,
-+			0x40, 0x4b, 0x56, 0x61, 0x6c, 0x77, 0x82, 0x8d,
-+			0x98, 0xa3, 0xae, 0xb9, 0xc4, 0xcf, 0xda, 0xe5,
-+			0xf0, 0xfb, 0x06, 0x11, 0x1c, 0x27, 0x32, 0x3d,
-+			0x48, 0x53, 0x5e, 0x69, 0x74, 0x7f, 0x8a, 0x95,
-+			0xa0, 0xab, 0xb6, 0xc1, 0xcc, 0xd7, 0xe2, 0xed,
-+			0xf8, 0x03, 0x0e, 0x19, 0x24, 0x2f, 0x3a, 0x45,
-+			0x50, 0x5b, 0x66, 0x71, 0x7c, 0x87, 0x92, 0x9d,
-+			0xa8, 0xb3, 0xbe, 0xc9, 0xd4, 0xdf, 0xea, 0xf5,
-+			0x00, 0x0d, 0x1a, 0x27, 0x34, 0x41, 0x4e, 0x5b,
-+			0x68, 0x75, 0x82, 0x8f, 0x9c, 0xa9, 0xb6, 0xc3,
-+			0xd0, 0xdd, 0xea, 0xf7, 0x04, 0x11, 0x1e, 0x2b,
-+			0x38, 0x45, 0x52, 0x5f, 0x6c, 0x79, 0x86, 0x93,
-+			0xa0, 0xad, 0xba, 0xc7, 0xd4, 0xe1, 0xee, 0xfb,
-+			0x08, 0x15, 0x22, 0x2f, 0x3c, 0x49, 0x56, 0x63,
-+			0x70, 0x7d, 0x8a, 0x97, 0xa4, 0xb1, 0xbe, 0xcb,
-+			0xd8, 0xe5, 0xf2, 0xff, 0x0c, 0x19, 0x26, 0x33,
-+			0x40, 0x4d, 0x5a, 0x67, 0x74, 0x81, 0x8e, 0x9b,
-+			0xa8, 0xb5, 0xc2, 0xcf, 0xdc, 0xe9, 0xf6, 0x03,
-+			0x10, 0x1d, 0x2a, 0x37, 0x44, 0x51, 0x5e, 0x6b,
-+			0x78, 0x85, 0x92, 0x9f, 0xac, 0xb9, 0xc6, 0xd3,
-+			0xe0, 0xed, 0xfa, 0x07, 0x14, 0x21, 0x2e, 0x3b,
-+			0x48, 0x55, 0x62, 0x6f, 0x7c, 0x89, 0x96, 0xa3,
-+			0xb0, 0xbd, 0xca, 0xd7, 0xe4, 0xf1, 0xfe, 0x0b,
-+			0x18, 0x25, 0x32, 0x3f, 0x4c, 0x59, 0x66, 0x73,
-+			0x80, 0x8d, 0x9a, 0xa7, 0xb4, 0xc1, 0xce, 0xdb,
-+			0xe8, 0xf5, 0x02, 0x0f, 0x1c, 0x29, 0x36, 0x43,
-+			0x50, 0x5d, 0x6a, 0x77, 0x84, 0x91, 0x9e, 0xab,
-+			0xb8, 0xc5, 0xd2, 0xdf, 0xec, 0xf9, 0x06, 0x13,
-+			0x20, 0x2d, 0x3a, 0x47, 0x54, 0x61, 0x6e, 0x7b,
-+			0x88, 0x95, 0xa2, 0xaf, 0xbc, 0xc9, 0xd6, 0xe3,
-+			0xf0, 0xfd, 0x0a, 0x17, 0x24, 0x31, 0x3e, 0x4b,
-+			0x58, 0x65, 0x72, 0x7f, 0x8c, 0x99, 0xa6, 0xb3,
-+			0xc0, 0xcd, 0xda, 0xe7, 0xf4, 0x01, 0x0e, 0x1b,
-+			0x28, 0x35, 0x42, 0x4f, 0x5c, 0x69, 0x76, 0x83,
-+			0x90, 0x9d, 0xaa, 0xb7, 0xc4, 0xd1, 0xde, 0xeb,
-+			0xf8, 0x05, 0x12, 0x1f, 0x2c, 0x39, 0x46, 0x53,
-+			0x60, 0x6d, 0x7a, 0x87, 0x94, 0xa1, 0xae, 0xbb,
-+			0xc8, 0xd5, 0xe2, 0xef, 0xfc, 0x09, 0x16, 0x23,
-+			0x30, 0x3d, 0x4a, 0x57, 0x64, 0x71, 0x7e, 0x8b,
-+			0x98, 0xa5, 0xb2, 0xbf, 0xcc, 0xd9, 0xe6, 0xf3,
-+			0x00, 0x0f, 0x1e, 0x2d, 0x3c, 0x4b, 0x5a, 0x69,
-+			0x78, 0x87, 0x96, 0xa5, 0xb4, 0xc3, 0xd2, 0xe1,
-+			0xf0, 0xff, 0x0e, 0x1d, 0x2c, 0x3b, 0x4a, 0x59,
-+			0x68, 0x77, 0x86, 0x95, 0xa4, 0xb3, 0xc2, 0xd1,
-+			0xe0, 0xef, 0xfe, 0x0d, 0x1c, 0x2b, 0x3a, 0x49,
-+			0x58, 0x67, 0x76, 0x85, 0x94, 0xa3, 0xb2, 0xc1,
-+			0xd0, 0xdf, 0xee, 0xfd, 0x0c, 0x1b, 0x2a, 0x39,
-+			0x48, 0x57, 0x66, 0x75, 0x84, 0x93, 0xa2, 0xb1,
-+			0xc0, 0xcf, 0xde, 0xed, 0xfc, 0x0b, 0x1a, 0x29,
-+			0x38, 0x47, 0x56, 0x65, 0x74, 0x83, 0x92, 0xa1,
-+			0xb0, 0xbf, 0xce, 0xdd, 0xec, 0xfb, 0x0a, 0x19,
-+			0x28, 0x37, 0x46, 0x55, 0x64, 0x73, 0x82, 0x91,
-+			0xa0, 0xaf, 0xbe, 0xcd, 0xdc, 0xeb, 0xfa, 0x09,
-+			0x18, 0x27, 0x36, 0x45, 0x54, 0x63, 0x72, 0x81,
-+			0x90, 0x9f, 0xae, 0xbd, 0xcc, 0xdb, 0xea, 0xf9,
-+			0x08, 0x17, 0x26, 0x35, 0x44, 0x53, 0x62, 0x71,
-+			0x80, 0x8f, 0x9e, 0xad, 0xbc, 0xcb, 0xda, 0xe9,
-+			0xf8, 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61,
-+			0x70, 0x7f, 0x8e, 0x9d, 0xac, 0xbb, 0xca, 0xd9,
-+			0xe8, 0xf7, 0x06, 0x15, 0x24, 0x33, 0x42, 0x51,
-+			0x60, 0x6f, 0x7e, 0x8d, 0x9c, 0xab, 0xba, 0xc9,
-+			0xd8, 0xe7, 0xf6, 0x05, 0x14, 0x23, 0x32, 0x41,
-+			0x50, 0x5f, 0x6e, 0x7d, 0x8c, 0x9b, 0xaa, 0xb9,
-+			0xc8, 0xd7, 0xe6, 0xf5, 0x04, 0x13, 0x22, 0x31,
-+			0x40, 0x4f, 0x5e, 0x6d, 0x7c, 0x8b, 0x9a, 0xa9,
-+			0xb8, 0xc7, 0xd6, 0xe5, 0xf4, 0x03, 0x12, 0x21,
-+			0x30, 0x3f, 0x4e, 0x5d, 0x6c, 0x7b, 0x8a, 0x99,
-+			0xa8, 0xb7, 0xc6, 0xd5, 0xe4, 0xf3, 0x02, 0x11,
-+			0x20, 0x2f, 0x3e, 0x4d, 0x5c, 0x6b, 0x7a, 0x89,
-+			0x98, 0xa7, 0xb6, 0xc5, 0xd4, 0xe3, 0xf2, 0x01,
-+			0x10, 0x1f, 0x2e, 0x3d, 0x4c, 0x5b, 0x6a, 0x79,
-+			0x88, 0x97, 0xa6, 0xb5, 0xc4, 0xd3, 0xe2, 0xf1,
-+			0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
-+			0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff,
-+			0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87,
-+			0x98, 0xa9, 0xba, 0xcb, 0xdc, 0xed, 0xfe, 0x0f,
-+			0x20, 0x31, 0x42, 0x53, 0x64, 0x75, 0x86, 0x97,
-+			0xa8, 0xb9, 0xca, 0xdb, 0xec, 0xfd, 0x0e, 0x1f,
-+			0x30, 0x41, 0x52, 0x63, 0x74, 0x85, 0x96, 0xa7,
-+			0xb8, 0xc9, 0xda, 0xeb, 0xfc, 0x0d, 0x1e, 0x2f,
-+			0x40, 0x51, 0x62, 0x73, 0x84, 0x95, 0xa6, 0xb7,
-+			0xc8, 0xd9, 0xea, 0xfb, 0x0c, 0x1d, 0x2e, 0x3f,
-+			0x50, 0x61, 0x72, 0x83, 0x94, 0xa5, 0xb6, 0xc7,
-+			0xd8, 0xe9, 0xfa, 0x0b, 0x1c, 0x2d, 0x3e, 0x4f,
-+			0x60, 0x71, 0x82, 0x93, 0xa4, 0xb5, 0xc6, 0xd7,
-+			0xe8, 0xf9, 0x0a, 0x1b, 0x2c, 0x3d, 0x4e, 0x5f,
-+			0x70, 0x81, 0x92, 0xa3, 0xb4, 0xc5, 0xd6, 0xe7,
-+			0xf8, 0x09, 0x1a, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f,
-+			0x80, 0x91, 0xa2, 0xb3, 0xc4, 0xd5, 0xe6, 0xf7,
-+			0x08, 0x19, 0x2a, 0x3b, 0x4c, 0x5d, 0x6e, 0x7f,
-+			0x90, 0xa1, 0xb2, 0xc3, 0xd4, 0xe5, 0xf6, 0x07,
-+			0x18, 0x29, 0x3a, 0x4b, 0x5c, 0x6d, 0x7e, 0x8f,
-+			0xa0, 0xb1, 0xc2, 0xd3, 0xe4, 0xf5, 0x06, 0x17,
-+			0x28, 0x39, 0x4a, 0x5b, 0x6c, 0x7d, 0x8e, 0x9f,
-+			0xb0, 0xc1, 0xd2, 0xe3, 0xf4, 0x05, 0x16, 0x27,
-+			0x38, 0x49, 0x5a, 0x6b, 0x7c, 0x8d, 0x9e, 0xaf,
-+			0xc0, 0xd1, 0xe2, 0xf3, 0x04, 0x15, 0x26, 0x37,
-+			0x48, 0x59, 0x6a, 0x7b, 0x8c, 0x9d, 0xae, 0xbf,
-+			0xd0, 0xe1, 0xf2, 0x03, 0x14, 0x25, 0x36, 0x47,
-+			0x58, 0x69, 0x7a, 0x8b, 0x9c, 0xad, 0xbe, 0xcf,
-+			0xe0, 0xf1, 0x02, 0x13, 0x24, 0x35, 0x46, 0x57,
-+			0x68, 0x79, 0x8a, 0x9b, 0xac, 0xbd, 0xce, 0xdf,
-+			0xf0, 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67,
-+			0x78, 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef,
-+			0x00, 0x13, 0x26, 0x39, 0x4c, 0x5f, 0x72, 0x85,
-+			0x98, 0xab, 0xbe, 0xd1, 0xe4, 0xf7, 0x0a, 0x1d,
-+			0x30, 0x43, 0x56, 0x69, 0x7c, 0x8f, 0xa2, 0xb5,
-+			0xc8, 0xdb, 0xee, 0x01, 0x14, 0x27, 0x3a, 0x4d,
-+			0x60, 0x73, 0x86, 0x99, 0xac, 0xbf, 0xd2, 0xe5,
-+			0xf8, 0x0b, 0x1e, 0x31, 0x44, 0x57, 0x6a, 0x7d,
-+			0x90, 0xa3, 0xb6, 0xc9, 0xdc, 0xef, 0x02, 0x15,
-+			0x28, 0x3b, 0x4e, 0x61, 0x74, 0x87, 0x9a, 0xad,
-+			0xc0, 0xd3, 0xe6, 0xf9, 0x0c, 0x1f, 0x32, 0x45,
-+			0x58, 0x6b, 0x7e, 0x91, 0xa4, 0xb7, 0xca, 0xdd,
-+			0xf0, 0x03, 0x16, 0x29, 0x3c, 0x4f, 0x62, 0x75,
-+			0x88, 0x9b, 0xae, 0xc1, 0xd4, 0xe7, 0xfa, 0x0d,
-+			0x20, 0x33, 0x46, 0x59, 0x6c, 0x7f, 0x92, 0xa5,
-+			0xb8, 0xcb, 0xde, 0xf1, 0x04, 0x17, 0x2a, 0x3d,
-+			0x50, 0x63, 0x76, 0x89, 0x9c, 0xaf, 0xc2, 0xd5,
-+			0xe8, 0xfb, 0x0e, 0x21, 0x34, 0x47, 0x5a, 0x6d,
-+			0x80, 0x93, 0xa6, 0xb9, 0xcc, 0xdf, 0xf2, 0x05,
-+			0x18, 0x2b, 0x3e, 0x51, 0x64, 0x77, 0x8a, 0x9d,
-+			0xb0, 0xc3, 0xd6, 0xe9, 0xfc, 0x0f, 0x22, 0x35,
-+			0x48, 0x5b, 0x6e, 0x81, 0x94, 0xa7, 0xba, 0xcd,
-+			0xe0, 0xf3, 0x06, 0x19, 0x2c, 0x3f, 0x52, 0x65,
-+			0x78, 0x8b, 0x9e, 0xb1, 0xc4, 0xd7, 0xea, 0xfd,
-+			0x10, 0x23, 0x36, 0x49, 0x5c, 0x6f, 0x82, 0x95,
-+			0xa8, 0xbb, 0xce, 0xe1, 0xf4, 0x07, 0x1a, 0x2d,
-+			0x40, 0x53, 0x66, 0x79, 0x8c, 0x9f, 0xb2, 0xc5,
-+			0xd8, 0xeb, 0xfe, 0x11, 0x24, 0x37, 0x4a, 0x5d,
-+			0x70, 0x83, 0x96, 0xa9, 0xbc, 0xcf, 0xe2, 0xf5,
-+			0x08, 0x1b, 0x2e, 0x41, 0x54, 0x67, 0x7a, 0x8d,
-+			0xa0, 0xb3, 0xc6, 0xd9, 0xec, 0xff, 0x12, 0x25,
-+			0x38, 0x4b, 0x5e, 0x71, 0x84, 0x97, 0xaa, 0xbd,
-+			0xd0, 0xe3, 0xf6, 0x09, 0x1c, 0x2f, 0x42, 0x55,
-+			0x68, 0x7b, 0x8e, 0xa1, 0xb4, 0xc7, 0xda, 0xed,
-+			0x00, 0x15, 0x2a, 0x3f, 0x54, 0x69, 0x7e, 0x93,
-+			0xa8, 0xbd, 0xd2, 0xe7, 0xfc, 0x11, 0x26, 0x3b,
-+			0x50, 0x65, 0x7a, 0x8f, 0xa4, 0xb9, 0xce, 0xe3,
-+			0xf8, 0x0d, 0x22, 0x37, 0x4c, 0x61, 0x76, 0x8b,
-+			0xa0, 0xb5, 0xca, 0xdf, 0xf4, 0x09, 0x1e, 0x33,
-+			0x48, 0x5d, 0x72, 0x87, 0x9c, 0xb1, 0xc6, 0xdb,
-+			0xf0, 0x05, 0x1a, 0x2f, 0x44, 0x59, 0x6e, 0x83,
-+			0x98, 0xad, 0xc2, 0xd7, 0xec, 0x01, 0x16, 0x2b,
-+			0x40, 0x55, 0x6a, 0x7f, 0x94, 0xa9, 0xbe, 0xd3,
-+			0xe8, 0xfd, 0x12, 0x27, 0x3c, 0x51, 0x66, 0x7b,
-+			0x90, 0xa5, 0xba, 0xcf, 0xe4, 0xf9, 0x0e, 0x23,
-+			0x38, 0x4d, 0x62, 0x77, 0x8c, 0xa1, 0xb6, 0xcb,
-+			0xe0, 0xf5, 0x0a, 0x1f, 0x34, 0x49, 0x5e, 0x73,
-+			0x88, 0x9d, 0xb2, 0xc7, 0xdc, 0xf1, 0x06, 0x1b,
-+			0x30, 0x45, 0x5a, 0x6f, 0x84, 0x99, 0xae, 0xc3,
-+			0xd8, 0xed, 0x02, 0x17, 0x2c, 0x41, 0x56, 0x6b,
-+			0x80, 0x95, 0xaa, 0xbf, 0xd4, 0xe9, 0xfe, 0x13,
-+			0x28, 0x3d, 0x52, 0x67, 0x7c, 0x91, 0xa6, 0xbb,
-+			0xd0, 0xe5, 0xfa, 0x0f, 0x24, 0x39, 0x4e, 0x63,
-+			0x78, 0x8d, 0xa2, 0xb7, 0xcc, 0xe1, 0xf6, 0x0b,
-+			0x20, 0x35, 0x4a, 0x5f, 0x74, 0x89, 0x9e, 0xb3,
-+			0xc8, 0xdd, 0xf2, 0x07, 0x1c, 0x31, 0x46, 0x5b,
-+			0x70, 0x85, 0x9a, 0xaf, 0xc4, 0xd9, 0xee, 0x03,
-+			0x18, 0x2d, 0x42, 0x57, 0x6c, 0x81, 0x96, 0xab,
-+			0xc0, 0xd5, 0xea, 0xff, 0x14, 0x29, 0x3e, 0x53,
-+			0x68, 0x7d, 0x92, 0xa7, 0xbc, 0xd1, 0xe6, 0xfb,
-+			0x10, 0x25, 0x3a, 0x4f, 0x64, 0x79, 0x8e, 0xa3,
-+			0xb8, 0xcd, 0xe2, 0xf7, 0x0c, 0x21, 0x36, 0x4b,
-+			0x60, 0x75, 0x8a, 0x9f, 0xb4, 0xc9, 0xde, 0xf3,
-+			0x08, 0x1d, 0x32, 0x47, 0x5c, 0x71, 0x86, 0x9b,
-+			0xb0, 0xc5, 0xda, 0xef, 0x04, 0x19, 0x2e, 0x43,
-+			0x58, 0x6d, 0x82, 0x97, 0xac, 0xc1, 0xd6, 0xeb,
-+			0x00, 0x17, 0x2e, 0x45, 0x5c, 0x73, 0x8a, 0xa1,
-+			0xb8, 0xcf, 0xe6, 0xfd, 0x14, 0x2b, 0x42, 0x59,
-+			0x70, 0x87, 0x9e, 0xb5, 0xcc, 0xe3, 0xfa, 0x11,
-+			0x28, 0x3f, 0x56, 0x6d, 0x84, 0x9b, 0xb2, 0xc9,
-+			0xe0, 0xf7, 0x0e, 0x25, 0x3c, 0x53, 0x6a, 0x81,
-+			0x98, 0xaf, 0xc6, 0xdd, 0xf4, 0x0b, 0x22, 0x39,
-+			0x50, 0x67, 0x7e, 0x95, 0xac, 0xc3, 0xda, 0xf1,
-+			0x08, 0x1f, 0x36, 0x4d, 0x64, 0x7b, 0x92, 0xa9,
-+			0xc0, 0xd7, 0xee, 0x05, 0x1c, 0x33, 0x4a, 0x61,
-+			0x78, 0x8f, 0xa6, 0xbd, 0xd4, 0xeb, 0x02, 0x19,
-+			0x30, 0x47, 0x5e, 0x75, 0x8c, 0xa3, 0xba, 0xd1,
-+			0xe8, 0xff, 0x16, 0x2d, 0x44, 0x5b, 0x72, 0x89,
-+			0xa0, 0xb7, 0xce, 0xe5, 0xfc, 0x13, 0x2a, 0x41,
-+			0x58, 0x6f, 0x86, 0x9d, 0xb4, 0xcb, 0xe2, 0xf9,
-+			0x10, 0x27, 0x3e, 0x55, 0x6c, 0x83, 0x9a, 0xb1,
-+			0xc8, 0xdf, 0xf6, 0x0d, 0x24, 0x3b, 0x52, 0x69,
-+			0x80, 0x97, 0xae, 0xc5, 0xdc, 0xf3, 0x0a, 0x21,
-+			0x38, 0x4f, 0x66, 0x7d, 0x94, 0xab, 0xc2, 0xd9,
-+			0xf0, 0x07, 0x1e, 0x35, 0x4c, 0x63, 0x7a, 0x91,
-+			0xa8, 0xbf, 0xd6, 0xed, 0x04, 0x1b, 0x32, 0x49,
-+			0x60, 0x77, 0x8e, 0xa5, 0xbc, 0xd3, 0xea, 0x01,
-+			0x18, 0x2f, 0x46, 0x5d, 0x74, 0x8b, 0xa2, 0xb9,
-+			0xd0, 0xe7, 0xfe, 0x15, 0x2c, 0x43, 0x5a, 0x71,
-+			0x88, 0x9f, 0xb6, 0xcd, 0xe4, 0xfb, 0x12, 0x29,
-+			0x40, 0x57, 0x6e, 0x85, 0x9c, 0xb3, 0xca, 0xe1,
-+			0xf8, 0x0f, 0x26, 0x3d, 0x54, 0x6b, 0x82, 0x99,
-+			0xb0, 0xc7, 0xde, 0xf5, 0x0c, 0x23, 0x3a, 0x51,
-+			0x68, 0x7f, 0x96, 0xad, 0xc4, 0xdb, 0xf2, 0x09,
-+			0x20, 0x37, 0x4e, 0x65, 0x7c, 0x93, 0xaa, 0xc1,
-+			0xd8, 0xef, 0x06, 0x1d, 0x34, 0x4b, 0x62, 0x79,
-+			0x90, 0xa7, 0xbe, 0xd5, 0xec, 0x03, 0x1a, 0x31,
-+			0x48, 0x5f, 0x76, 0x8d, 0xa4, 0xbb, 0xd2, 0xe9,
-+			0x00, 0x19, 0x32, 0x4b, 0x64, 0x7d, 0x96, 0xaf,
-+			0xc8, 0xe1, 0xfa, 0x13, 0x2c, 0x45, 0x5e, 0x77,
-+			0x90, 0xa9, 0xc2, 0xdb, 0xf4, 0x0d, 0x26, 0x3f,
-+			0x58, 0x71, 0x8a, 0xa3, 0xbc, 0xd5, 0xee, 0x07,
-+			0x20, 0x39, 0x52, 0x6b, 0x84, 0x9d, 0xb6, 0xcf,
-+			0xe8, 0x01, 0x1a, 0x33, 0x4c, 0x65, 0x7e, 0x97,
-+			0xb0, 0xc9, 0xe2, 0xfb, 0x14, 0x2d, 0x46, 0x5f,
-+			0x78, 0x91, 0xaa, 0xc3, 0xdc, 0xf5, 0x0e, 0x27,
-+			0x40, 0x59, 0x72, 0x8b, 0xa4, 0xbd, 0xd6, 0xef,
-+			0x08, 0x21, 0x3a, 0x53, 0x6c, 0x85, 0x9e, 0xb7,
-+			0xd0, 0xe9, 0x02, 0x1b, 0x34, 0x4d, 0x66, 0x7f,
-+			0x98, 0xb1, 0xca, 0xe3, 0xfc, 0x15, 0x2e, 0x47,
-+			0x60, 0x79, 0x92, 0xab, 0xc4, 0xdd, 0xf6, 0x0f,
-+			0x28, 0x41, 0x5a, 0x73, 0x8c, 0xa5, 0xbe, 0xd7,
-+			0xf0, 0x09, 0x22, 0x3b, 0x54, 0x6d, 0x86, 0x9f,
-+			0xb8, 0xd1, 0xea, 0x03, 0x1c, 0x35, 0x4e, 0x67,
-+			0x80, 0x99, 0xb2, 0xcb, 0xe4, 0xfd, 0x16, 0x2f,
-+			0x48, 0x61, 0x7a, 0x93, 0xac, 0xc5, 0xde, 0xf7,
-+			0x10, 0x29, 0x42, 0x5b, 0x74, 0x8d, 0xa6, 0xbf,
-+			0xd8, 0xf1, 0x0a, 0x23, 0x3c, 0x55, 0x6e, 0x87,
-+			0xa0, 0xb9, 0xd2, 0xeb, 0x04, 0x1d, 0x36, 0x4f,
-+			0x68, 0x81, 0x9a, 0xb3, 0xcc, 0xe5, 0xfe, 0x17,
-+			0x30, 0x49, 0x62, 0x7b, 0x94, 0xad, 0xc6, 0xdf,
-+			0xf8, 0x11, 0x2a, 0x43, 0x5c, 0x75, 0x8e, 0xa7,
-+			0xc0, 0xd9, 0xf2, 0x0b, 0x24, 0x3d, 0x56, 0x6f,
-+			0x88, 0xa1, 0xba, 0xd3, 0xec, 0x05, 0x1e, 0x37,
-+			0x50, 0x69, 0x82, 0x9b, 0xb4, 0xcd, 0xe6, 0xff,
-+			0x18, 0x31, 0x4a, 0x63, 0x7c, 0x95, 0xae, 0xc7,
-+			0xe0, 0xf9, 0x12, 0x2b, 0x44, 0x5d, 0x76, 0x8f,
-+			0xa8, 0xc1, 0xda, 0xf3, 0x0c, 0x25, 0x3e, 0x57,
-+			0x70, 0x89, 0xa2, 0xbb, 0xd4, 0xed, 0x06, 0x1f,
-+			0x38, 0x51, 0x6a, 0x83, 0x9c, 0xb5, 0xce, 0xe7,
-+			0x00, 0x1b, 0x36, 0x51, 0x6c, 0x87, 0xa2, 0xbd,
-+			0xd8, 0xf3, 0x0e, 0x29, 0x44, 0x5f, 0x7a, 0x95,
-+			0xb0, 0xcb, 0xe6, 0x01, 0x1c, 0x37, 0x52, 0x6d,
-+			0x88, 0xa3, 0xbe, 0xd9, 0xf4, 0x0f, 0x2a, 0x45,
-+			0x60, 0x7b, 0x96, 0xb1, 0xcc, 0xe7, 0x02, 0x1d,
-+			0x38, 0x53, 0x6e, 0x89, 0xa4, 0xbf, 0xda, 0xf5,
-+			0x10, 0x2b, 0x46, 0x61, 0x7c, 0x97, 0xb2, 0xcd,
-+			0xe8, 0x03, 0x1e, 0x39, 0x54, 0x6f, 0x8a, 0xa5,
-+			0xc0, 0xdb, 0xf6, 0x11, 0x2c, 0x47, 0x62, 0x7d,
-+			0x98, 0xb3, 0xce, 0xe9, 0x04, 0x1f, 0x3a, 0x55,
-+			0x70, 0x8b, 0xa6, 0xc1, 0xdc, 0xf7, 0x12, 0x2d,
-+			0x48, 0x63, 0x7e, 0x99, 0xb4, 0xcf, 0xea, 0x05,
-+			0x20, 0x3b, 0x56, 0x71, 0x8c, 0xa7, 0xc2, 0xdd,
-+			0xf8, 0x13, 0x2e, 0x49, 0x64, 0x7f, 0x9a, 0xb5,
-+			0xd0, 0xeb, 0x06, 0x21, 0x3c, 0x57, 0x72, 0x8d,
-+			0xa8, 0xc3, 0xde, 0xf9, 0x14, 0x2f, 0x4a, 0x65,
-+			0x80, 0x9b, 0xb6, 0xd1, 0xec, 0x07, 0x22, 0x3d,
-+			0x58, 0x73, 0x8e, 0xa9, 0xc4, 0xdf, 0xfa, 0x15,
-+			0x30, 0x4b, 0x66, 0x81, 0x9c, 0xb7, 0xd2, 0xed,
-+			0x08, 0x23, 0x3e, 0x59, 0x74, 0x8f, 0xaa, 0xc5,
-+			0xe0, 0xfb, 0x16, 0x31, 0x4c, 0x67, 0x82, 0x9d,
-+			0xb8, 0xd3, 0xee, 0x09, 0x24, 0x3f, 0x5a, 0x75,
-+			0x90, 0xab, 0xc6, 0xe1, 0xfc, 0x17, 0x32, 0x4d,
-+			0x68, 0x83, 0x9e, 0xb9, 0xd4, 0xef, 0x0a, 0x25,
-+			0x40, 0x5b, 0x76, 0x91, 0xac, 0xc7, 0xe2, 0xfd,
-+			0x18, 0x33, 0x4e, 0x69, 0x84, 0x9f, 0xba, 0xd5,
-+			0xf0, 0x0b, 0x26, 0x41, 0x5c, 0x77, 0x92, 0xad,
-+			0xc8, 0xe3, 0xfe, 0x19, 0x34, 0x4f, 0x6a, 0x85,
-+			0xa0, 0xbb, 0xd6, 0xf1, 0x0c, 0x27, 0x42, 0x5d,
-+			0x78, 0x93, 0xae, 0xc9, 0xe4, 0xff, 0x1a, 0x35,
-+			0x50, 0x6b, 0x86, 0xa1, 0xbc, 0xd7, 0xf2, 0x0d,
-+			0x28, 0x43, 0x5e, 0x79, 0x94, 0xaf, 0xca, 0xe5,
-+			0x00, 0x1d, 0x3a, 0x57, 0x74, 0x91, 0xae, 0xcb,
-+			0xe8, 0x05, 0x22, 0x3f, 0x5c, 0x79, 0x96, 0xb3,
-+			0xd0, 0xed, 0x0a, 0x27, 0x44, 0x61, 0x7e, 0x9b,
-+			0xb8, 0xd5, 0xf2, 0x0f, 0x2c, 0x49, 0x66, 0x83,
-+			0xa0, 0xbd, 0xda, 0xf7, 0x14, 0x31, 0x4e, 0x6b,
-+			0x88, 0xa5, 0xc2, 0xdf, 0xfc, 0x19, 0x36, 0x53,
-+			0x70, 0x8d, 0xaa, 0xc7, 0xe4, 0x01, 0x1e, 0x3b,
-+			0x58, 0x75, 0x92, 0xaf, 0xcc, 0xe9, 0x06, 0x23,
-+			0x40, 0x5d, 0x7a, 0x97, 0xb4, 0xd1, 0xee, 0x0b,
-+			0x28, 0x45, 0x62, 0x7f, 0x9c, 0xb9, 0xd6, 0xf3,
-+			0x10, 0x2d, 0x4a, 0x67, 0x84, 0xa1, 0xbe, 0xdb,
-+			0xf8, 0x15, 0x32, 0x4f, 0x6c, 0x89, 0xa6, 0xc3,
-+			0xe0, 0xfd, 0x1a, 0x37, 0x54, 0x71, 0x8e, 0xab,
-+			0xc8, 0xe5, 0x02, 0x1f, 0x3c, 0x59, 0x76, 0x93,
-+			0xb0, 0xcd, 0xea, 0x07, 0x24, 0x41, 0x5e, 0x7b,
-+			0x98, 0xb5, 0xd2, 0xef, 0x0c, 0x29, 0x46, 0x63,
-+			0x80, 0x9d, 0xba, 0xd7, 0xf4, 0x11, 0x2e, 0x4b,
-+			0x68, 0x85, 0xa2, 0xbf, 0xdc, 0xf9, 0x16, 0x33,
-+			0x50, 0x6d, 0x8a, 0xa7, 0xc4, 0xe1, 0xfe, 0x1b,
-+			0x38, 0x55, 0x72, 0x8f, 0xac, 0xc9, 0xe6, 0x03,
-+			0x20, 0x3d, 0x5a, 0x77, 0x94, 0xb1, 0xce, 0xeb,
-+			0x08, 0x25, 0x42, 0x5f, 0x7c, 0x99, 0xb6, 0xd3,
-+			0xf0, 0x0d, 0x2a, 0x47, 0x64, 0x81, 0x9e, 0xbb,
-+			0xd8, 0xf5, 0x12, 0x2f, 0x4c, 0x69, 0x86, 0xa3,
-+			0xc0, 0xdd, 0xfa, 0x17, 0x34, 0x51, 0x6e, 0x8b,
-+			0xa8, 0xc5, 0xe2, 0xff, 0x1c, 0x39, 0x56, 0x73,
-+			0x90, 0xad, 0xca, 0xe7, 0x04, 0x21, 0x3e, 0x5b,
-+			0x78, 0x95, 0xb2, 0xcf, 0xec, 0x09, 0x26, 0x43,
-+			0x60, 0x7d, 0x9a, 0xb7, 0xd4, 0xf1, 0x0e, 0x2b,
-+			0x48, 0x65, 0x82, 0x9f, 0xbc, 0xd9, 0xf6, 0x13,
-+			0x30, 0x4d, 0x6a, 0x87, 0xa4, 0xc1, 0xde, 0xfb,
-+			0x18, 0x35, 0x52, 0x6f, 0x8c, 0xa9, 0xc6, 0xe3,
-+			0x00, 0x1f, 0x3e, 0x5d, 0x7c, 0x9b, 0xba, 0xd9,
-+			0xf8, 0x17, 0x36, 0x55, 0x74, 0x93, 0xb2, 0xd1,
-+			0xf0, 0x0f, 0x2e, 0x4d, 0x6c, 0x8b, 0xaa, 0xc9,
-+			0xe8, 0x07, 0x26, 0x45, 0x64, 0x83, 0xa2, 0xc1,
-+			0xe0, 0xff, 0x1e, 0x3d, 0x5c, 0x7b, 0x9a, 0xb9,
-+			0xd8, 0xf7, 0x16, 0x35, 0x54, 0x73, 0x92, 0xb1,
-+			0xd0, 0xef, 0x0e, 0x2d, 0x4c, 0x6b, 0x8a, 0xa9,
-+			0xc8, 0xe7, 0x06, 0x25, 0x44, 0x63, 0x82, 0xa1,
-+			0xc0, 0xdf, 0xfe, 0x1d, 0x3c, 0x5b, 0x7a, 0x99,
-+			0xb8, 0xd7, 0xf6, 0x15, 0x34, 0x53, 0x72, 0x91,
-+			0xb0, 0xcf, 0xee, 0x0d, 0x2c, 0x4b, 0x6a, 0x89,
-+			0xa8, 0xc7, 0xe6, 0x05, 0x24, 0x43, 0x62, 0x81,
-+			0xa0, 0xbf, 0xde, 0xfd, 0x1c, 0x3b, 0x5a, 0x79,
-+			0x98, 0xb7, 0xd6, 0xf5, 0x14, 0x33, 0x52, 0x71,
-+			0x90, 0xaf, 0xce, 0xed, 0x0c, 0x2b, 0x4a, 0x69,
-+			0x88, 0xa7, 0xc6, 0xe5, 0x04, 0x23, 0x42, 0x61,
-+			0x80, 0x9f, 0xbe, 0xdd, 0xfc, 0x1b, 0x3a, 0x59,
-+			0x78, 0x97, 0xb6, 0xd5, 0xf4, 0x13, 0x32, 0x51,
-+			0x70, 0x8f, 0xae, 0xcd, 0xec, 0x0b, 0x2a, 0x49,
-+			0x68, 0x87, 0xa6, 0xc5, 0xe4, 0x03, 0x22, 0x41,
-+			0x60, 0x7f, 0x9e, 0xbd, 0xdc, 0xfb, 0x1a, 0x39,
-+			0x58, 0x77, 0x96, 0xb5, 0xd4, 0xf3, 0x12, 0x31,
-+			0x50, 0x6f, 0x8e, 0xad, 0xcc, 0xeb, 0x0a, 0x29,
-+			0x48, 0x67, 0x86, 0xa5, 0xc4, 0xe3, 0x02, 0x21,
-+			0x40, 0x5f, 0x7e, 0x9d, 0xbc, 0xdb, 0xfa, 0x19,
-+			0x38, 0x57, 0x76, 0x95, 0xb4, 0xd3, 0xf2, 0x11,
-+			0x30, 0x4f, 0x6e, 0x8d, 0xac, 0xcb, 0xea, 0x09,
-+			0x28, 0x47, 0x66, 0x85, 0xa4, 0xc3, 0xe2, 0x01,
-+			0x20, 0x3f, 0x5e, 0x7d, 0x9c, 0xbb, 0xda, 0xf9,
-+			0x18, 0x37, 0x56, 0x75, 0x94, 0xb3, 0xd2, 0xf1,
-+			0x10, 0x2f, 0x4e, 0x6d, 0x8c, 0xab, 0xca, 0xe9,
-+			0x08, 0x27, 0x46, 0x65, 0x84, 0xa3, 0xc2, 0xe1,
-+			0x00, 0x21, 0x42, 0x63,
-+		},
-+		.ilen = 4100,
-+		.result = {
-+			0xb5, 0x81, 0xf5, 0x64, 0x18, 0x73, 0xe3, 0xf0,
-+			0x4c, 0x13, 0xf2, 0x77, 0x18, 0x60, 0x65, 0x5e,
-+			0x29, 0x01, 0xce, 0x98, 0x55, 0x53, 0xf9, 0x0c,
-+			0x2a, 0x08, 0xd5, 0x09, 0xb3, 0x57, 0x55, 0x56,
-+			0xc5, 0xe9, 0x56, 0x90, 0xcb, 0x6a, 0xa3, 0xc0,
-+			0xff, 0xc4, 0x79, 0xb4, 0xd2, 0x97, 0x5d, 0xc4,
-+			0x43, 0xd1, 0xfe, 0x94, 0x7b, 0x88, 0x06, 0x5a,
-+			0xb2, 0x9e, 0x2c, 0xfc, 0x44, 0x03, 0xb7, 0x90,
-+			0xa0, 0xc1, 0xba, 0x6a, 0x33, 0xb8, 0xc7, 0xb2,
-+			0x9d, 0xe1, 0x12, 0x4f, 0xc0, 0x64, 0xd4, 0x01,
-+			0xfe, 0x8c, 0x7a, 0x66, 0xf7, 0xe6, 0x5a, 0x91,
-+			0xbb, 0xde, 0x56, 0x86, 0xab, 0x65, 0x21, 0x30,
-+			0x00, 0x84, 0x65, 0x24, 0xa5, 0x7d, 0x85, 0xb4,
-+			0xe3, 0x17, 0xed, 0x3a, 0xb7, 0x6f, 0xb4, 0x0b,
-+			0x0b, 0xaf, 0x15, 0xae, 0x5a, 0x8f, 0xf2, 0x0c,
-+			0x2f, 0x27, 0xf4, 0x09, 0xd8, 0xd2, 0x96, 0xb7,
-+			0x71, 0xf2, 0xc5, 0x99, 0x4d, 0x7e, 0x7f, 0x75,
-+			0x77, 0x89, 0x30, 0x8b, 0x59, 0xdb, 0xa2, 0xb2,
-+			0xa0, 0xf3, 0x19, 0x39, 0x2b, 0xc5, 0x7e, 0x3f,
-+			0x4f, 0xd9, 0xd3, 0x56, 0x28, 0x97, 0x44, 0xdc,
-+			0xc0, 0x8b, 0x77, 0x24, 0xd9, 0x52, 0xe7, 0xc5,
-+			0xaf, 0xf6, 0x7d, 0x59, 0xb2, 0x44, 0x05, 0x1d,
-+			0xb1, 0xb0, 0x11, 0xa5, 0x0f, 0xec, 0x33, 0xe1,
-+			0x6d, 0x1b, 0x4e, 0x1f, 0xff, 0x57, 0x91, 0xb4,
-+			0x5b, 0x9a, 0x96, 0xc5, 0x53, 0xbc, 0xae, 0x20,
-+			0x3c, 0xbb, 0x14, 0xe2, 0xe8, 0x22, 0x33, 0xc1,
-+			0x5e, 0x76, 0x9e, 0x46, 0x99, 0xf6, 0x2a, 0x15,
-+			0xc6, 0x97, 0x02, 0xa0, 0x66, 0x43, 0xd1, 0xa6,
-+			0x31, 0xa6, 0x9f, 0xfb, 0xf4, 0xd3, 0x69, 0xe5,
-+			0xcd, 0x76, 0x95, 0xb8, 0x7a, 0x82, 0x7f, 0x21,
-+			0x45, 0xff, 0x3f, 0xce, 0x55, 0xf6, 0x95, 0x10,
-+			0x08, 0x77, 0x10, 0x43, 0xc6, 0xf3, 0x09, 0xe5,
-+			0x68, 0xe7, 0x3c, 0xad, 0x00, 0x52, 0x45, 0x0d,
-+			0xfe, 0x2d, 0xc6, 0xc2, 0x94, 0x8c, 0x12, 0x1d,
-+			0xe6, 0x25, 0xae, 0x98, 0x12, 0x8e, 0x19, 0x9c,
-+			0x81, 0x68, 0xb1, 0x11, 0xf6, 0x69, 0xda, 0xe3,
-+			0x62, 0x08, 0x18, 0x7a, 0x25, 0x49, 0x28, 0xac,
-+			0xba, 0x71, 0x12, 0x0b, 0xe4, 0xa2, 0xe5, 0xc7,
-+			0x5d, 0x8e, 0xec, 0x49, 0x40, 0x21, 0xbf, 0x5a,
-+			0x98, 0xf3, 0x02, 0x68, 0x55, 0x03, 0x7f, 0x8a,
-+			0xe5, 0x94, 0x0c, 0x32, 0x5c, 0x07, 0x82, 0x63,
-+			0xaf, 0x6f, 0x91, 0x40, 0x84, 0x8e, 0x52, 0x25,
-+			0xd0, 0xb0, 0x29, 0x53, 0x05, 0xe2, 0x50, 0x7a,
-+			0x34, 0xeb, 0xc9, 0x46, 0x20, 0xa8, 0x3d, 0xde,
-+			0x7f, 0x16, 0x5f, 0x36, 0xc5, 0x2e, 0xdc, 0xd1,
-+			0x15, 0x47, 0xc7, 0x50, 0x40, 0x6d, 0x91, 0xc5,
-+			0xe7, 0x93, 0x95, 0x1a, 0xd3, 0x57, 0xbc, 0x52,
-+			0x33, 0xee, 0x14, 0x19, 0x22, 0x52, 0x89, 0xa7,
-+			0x4a, 0x25, 0x56, 0x77, 0x4b, 0xca, 0xcf, 0x0a,
-+			0xe1, 0xf5, 0x35, 0x85, 0x30, 0x7e, 0x59, 0x4a,
-+			0xbd, 0x14, 0x5b, 0xdf, 0xe3, 0x46, 0xcb, 0xac,
-+			0x1f, 0x6c, 0x96, 0x0e, 0xf4, 0x81, 0xd1, 0x99,
-+			0xca, 0x88, 0x63, 0x3d, 0x02, 0x58, 0x6b, 0xa9,
-+			0xe5, 0x9f, 0xb3, 0x00, 0xb2, 0x54, 0xc6, 0x74,
-+			0x1c, 0xbf, 0x46, 0xab, 0x97, 0xcc, 0xf8, 0x54,
-+			0x04, 0x07, 0x08, 0x52, 0xe6, 0xc0, 0xda, 0x93,
-+			0x74, 0x7d, 0x93, 0x99, 0x5d, 0x78, 0x68, 0xa6,
-+			0x2e, 0x6b, 0xd3, 0x6a, 0x69, 0xcc, 0x12, 0x6b,
-+			0xd4, 0xc7, 0xa5, 0xc6, 0xe7, 0xf6, 0x03, 0x04,
-+			0x5d, 0xcd, 0x61, 0x5e, 0x17, 0x40, 0xdc, 0xd1,
-+			0x5c, 0xf5, 0x08, 0xdf, 0x5c, 0x90, 0x85, 0xa4,
-+			0xaf, 0xf6, 0x78, 0xbb, 0x0d, 0xf1, 0xf4, 0xa4,
-+			0x54, 0x26, 0x72, 0x9e, 0x61, 0xfa, 0x86, 0xcf,
-+			0xe8, 0x9e, 0xa1, 0xe0, 0xc7, 0x48, 0x23, 0xae,
-+			0x5a, 0x90, 0xae, 0x75, 0x0a, 0x74, 0x18, 0x89,
-+			0x05, 0xb1, 0x92, 0xb2, 0x7f, 0xd0, 0x1b, 0xa6,
-+			0x62, 0x07, 0x25, 0x01, 0xc7, 0xc2, 0x4f, 0xf9,
-+			0xe8, 0xfe, 0x63, 0x95, 0x80, 0x07, 0xb4, 0x26,
-+			0xcc, 0xd1, 0x26, 0xb6, 0xc4, 0x3f, 0x9e, 0xcb,
-+			0x8e, 0x3b, 0x2e, 0x44, 0x16, 0xd3, 0x10, 0x9a,
-+			0x95, 0x08, 0xeb, 0xc8, 0xcb, 0xeb, 0xbf, 0x6f,
-+			0x0b, 0xcd, 0x1f, 0xc8, 0xca, 0x86, 0xaa, 0xec,
-+			0x33, 0xe6, 0x69, 0xf4, 0x45, 0x25, 0x86, 0x3a,
-+			0x22, 0x94, 0x4f, 0x00, 0x23, 0x6a, 0x44, 0xc2,
-+			0x49, 0x97, 0x33, 0xab, 0x36, 0x14, 0x0a, 0x70,
-+			0x24, 0xc3, 0xbe, 0x04, 0x3b, 0x79, 0xa0, 0xf9,
-+			0xb8, 0xe7, 0x76, 0x29, 0x22, 0x83, 0xd7, 0xf2,
-+			0x94, 0xf4, 0x41, 0x49, 0xba, 0x5f, 0x7b, 0x07,
-+			0xb5, 0xfb, 0xdb, 0x03, 0x1a, 0x9f, 0xb6, 0x4c,
-+			0xc2, 0x2e, 0x37, 0x40, 0x49, 0xc3, 0x38, 0x16,
-+			0xe2, 0x4f, 0x77, 0x82, 0xb0, 0x68, 0x4c, 0x71,
-+			0x1d, 0x57, 0x61, 0x9c, 0xd9, 0x4e, 0x54, 0x99,
-+			0x47, 0x13, 0x28, 0x73, 0x3c, 0xbb, 0x00, 0x90,
-+			0xf3, 0x4d, 0xc9, 0x0e, 0xfd, 0xe7, 0xb1, 0x71,
-+			0xd3, 0x15, 0x79, 0xbf, 0xcc, 0x26, 0x2f, 0xbd,
-+			0xad, 0x6c, 0x50, 0x69, 0x6c, 0x3e, 0x6d, 0x80,
-+			0x9a, 0xea, 0x78, 0xaf, 0x19, 0xb2, 0x0d, 0x4d,
-+			0xad, 0x04, 0x07, 0xae, 0x22, 0x90, 0x4a, 0x93,
-+			0x32, 0x0e, 0x36, 0x9b, 0x1b, 0x46, 0xba, 0x3b,
-+			0xb4, 0xac, 0xc6, 0xd1, 0xa2, 0x31, 0x53, 0x3b,
-+			0x2a, 0x3d, 0x45, 0xfe, 0x03, 0x61, 0x10, 0x85,
-+			0x17, 0x69, 0xa6, 0x78, 0xcc, 0x6c, 0x87, 0x49,
-+			0x53, 0xf9, 0x80, 0x10, 0xde, 0x80, 0xa2, 0x41,
-+			0x6a, 0xc3, 0x32, 0x02, 0xad, 0x6d, 0x3c, 0x56,
-+			0x00, 0x71, 0x51, 0x06, 0xa7, 0xbd, 0xfb, 0xef,
-+			0x3c, 0xb5, 0x9f, 0xfc, 0x48, 0x7d, 0x53, 0x7c,
-+			0x66, 0xb0, 0x49, 0x23, 0xc4, 0x47, 0x10, 0x0e,
-+			0xe5, 0x6c, 0x74, 0x13, 0xe6, 0xc5, 0x3f, 0xaa,
-+			0xde, 0xff, 0x07, 0x44, 0xdd, 0x56, 0x1b, 0xad,
-+			0x09, 0x77, 0xfb, 0x5b, 0x12, 0xb8, 0x0d, 0x38,
-+			0x17, 0x37, 0x35, 0x7b, 0x9b, 0xbc, 0xfe, 0xd4,
-+			0x7e, 0x8b, 0xda, 0x7e, 0x5b, 0x04, 0xa7, 0x22,
-+			0xa7, 0x31, 0xa1, 0x20, 0x86, 0xc7, 0x1b, 0x99,
-+			0xdb, 0xd1, 0x89, 0xf4, 0x94, 0xa3, 0x53, 0x69,
-+			0x8d, 0xe7, 0xe8, 0x74, 0x11, 0x8d, 0x74, 0xd6,
-+			0x07, 0x37, 0x91, 0x9f, 0xfd, 0x67, 0x50, 0x3a,
-+			0xc9, 0xe1, 0xf4, 0x36, 0xd5, 0xa0, 0x47, 0xd1,
-+			0xf9, 0xe5, 0x39, 0xa3, 0x31, 0xac, 0x07, 0x36,
-+			0x23, 0xf8, 0x66, 0x18, 0x14, 0x28, 0x34, 0x0f,
-+			0xb8, 0xd0, 0xe7, 0x29, 0xb3, 0x04, 0x4b, 0x55,
-+			0x01, 0x41, 0xb2, 0x75, 0x8d, 0xcb, 0x96, 0x85,
-+			0x3a, 0xfb, 0xab, 0x2b, 0x9e, 0xfa, 0x58, 0x20,
-+			0x44, 0x1f, 0xc0, 0x14, 0x22, 0x75, 0x61, 0xe8,
-+			0xaa, 0x19, 0xcf, 0xf1, 0x82, 0x56, 0xf4, 0xd7,
-+			0x78, 0x7b, 0x3d, 0x5f, 0xb3, 0x9e, 0x0b, 0x8a,
-+			0x57, 0x50, 0xdb, 0x17, 0x41, 0x65, 0x4d, 0xa3,
-+			0x02, 0xc9, 0x9c, 0x9c, 0x53, 0xfb, 0x39, 0x39,
-+			0x9b, 0x1d, 0x72, 0x24, 0xda, 0xb7, 0x39, 0xbe,
-+			0x13, 0x3b, 0xfa, 0x29, 0xda, 0x9e, 0x54, 0x64,
-+			0x6e, 0xba, 0xd8, 0xa1, 0xcb, 0xb3, 0x36, 0xfa,
-+			0xcb, 0x47, 0x85, 0xe9, 0x61, 0x38, 0xbc, 0xbe,
-+			0xc5, 0x00, 0x38, 0x2a, 0x54, 0xf7, 0xc4, 0xb9,
-+			0xb3, 0xd3, 0x7b, 0xa0, 0xa0, 0xf8, 0x72, 0x7f,
-+			0x8c, 0x8e, 0x82, 0x0e, 0xc6, 0x1c, 0x75, 0x9d,
-+			0xca, 0x8e, 0x61, 0x87, 0xde, 0xad, 0x80, 0xd2,
-+			0xf5, 0xf9, 0x80, 0xef, 0x15, 0x75, 0xaf, 0xf5,
-+			0x80, 0xfb, 0xff, 0x6d, 0x1e, 0x25, 0xb7, 0x40,
-+			0x61, 0x6a, 0x39, 0x5a, 0x6a, 0xb5, 0x31, 0xab,
-+			0x97, 0x8a, 0x19, 0x89, 0x44, 0x40, 0xc0, 0xa6,
-+			0xb4, 0x4e, 0x30, 0x32, 0x7b, 0x13, 0xe7, 0x67,
-+			0xa9, 0x8b, 0x57, 0x04, 0xc2, 0x01, 0xa6, 0xf4,
-+			0x28, 0x99, 0xad, 0x2c, 0x76, 0xa3, 0x78, 0xc2,
-+			0x4a, 0xe6, 0xca, 0x5c, 0x50, 0x6a, 0xc1, 0xb0,
-+			0x62, 0x4b, 0x10, 0x8e, 0x7c, 0x17, 0x43, 0xb3,
-+			0x17, 0x66, 0x1c, 0x3e, 0x8d, 0x69, 0xf0, 0x5a,
-+			0x71, 0xf5, 0x97, 0xdc, 0xd1, 0x45, 0xdd, 0x28,
-+			0xf3, 0x5d, 0xdf, 0x53, 0x7b, 0x11, 0xe5, 0xbc,
-+			0x4c, 0xdb, 0x1b, 0x51, 0x6b, 0xe9, 0xfb, 0x3d,
-+			0xc1, 0xc3, 0x2c, 0xb9, 0x71, 0xf5, 0xb6, 0xb2,
-+			0x13, 0x36, 0x79, 0x80, 0x53, 0xe8, 0xd3, 0xa6,
-+			0x0a, 0xaf, 0xfd, 0x56, 0x97, 0xf7, 0x40, 0x8e,
-+			0x45, 0xce, 0xf8, 0xb0, 0x9e, 0x5c, 0x33, 0x82,
-+			0xb0, 0x44, 0x56, 0xfc, 0x05, 0x09, 0xe9, 0x2a,
-+			0xac, 0x26, 0x80, 0x14, 0x1d, 0xc8, 0x3a, 0x35,
-+			0x4c, 0x82, 0x97, 0xfd, 0x76, 0xb7, 0xa9, 0x0a,
-+			0x35, 0x58, 0x79, 0x8e, 0x0f, 0x66, 0xea, 0xaf,
-+			0x51, 0x6c, 0x09, 0xa9, 0x6e, 0x9b, 0xcb, 0x9a,
-+			0x31, 0x47, 0xa0, 0x2f, 0x7c, 0x71, 0xb4, 0x4a,
-+			0x11, 0xaa, 0x8c, 0x66, 0xc5, 0x64, 0xe6, 0x3a,
-+			0x54, 0xda, 0x24, 0x6a, 0xc4, 0x41, 0x65, 0x46,
-+			0x82, 0xa0, 0x0a, 0x0f, 0x5f, 0xfb, 0x25, 0xd0,
-+			0x2c, 0x91, 0xa7, 0xee, 0xc4, 0x81, 0x07, 0x86,
-+			0x75, 0x5e, 0x33, 0x69, 0x97, 0xe4, 0x2c, 0xa8,
-+			0x9d, 0x9f, 0x0b, 0x6a, 0xbe, 0xad, 0x98, 0xda,
-+			0x6d, 0x94, 0x41, 0xda, 0x2c, 0x1e, 0x89, 0xc4,
-+			0xc2, 0xaf, 0x1e, 0x00, 0x05, 0x0b, 0x83, 0x60,
-+			0xbd, 0x43, 0xea, 0x15, 0x23, 0x7f, 0xb9, 0xac,
-+			0xee, 0x4f, 0x2c, 0xaf, 0x2a, 0xf3, 0xdf, 0xd0,
-+			0xf3, 0x19, 0x31, 0xbb, 0x4a, 0x74, 0x84, 0x17,
-+			0x52, 0x32, 0x2c, 0x7d, 0x61, 0xe4, 0xcb, 0xeb,
-+			0x80, 0x38, 0x15, 0x52, 0xcb, 0x6f, 0xea, 0xe5,
-+			0x73, 0x9c, 0xd9, 0x24, 0x69, 0xc6, 0x95, 0x32,
-+			0x21, 0xc8, 0x11, 0xe4, 0xdc, 0x36, 0xd7, 0x93,
-+			0x38, 0x66, 0xfb, 0xb2, 0x7f, 0x3a, 0xb9, 0xaf,
-+			0x31, 0xdd, 0x93, 0x75, 0x78, 0x8a, 0x2c, 0x94,
-+			0x87, 0x1a, 0x58, 0xec, 0x9e, 0x7d, 0x4d, 0xba,
-+			0xe1, 0xe5, 0x4d, 0xfc, 0xbc, 0xa4, 0x2a, 0x14,
-+			0xef, 0xcc, 0xa7, 0xec, 0xab, 0x43, 0x09, 0x18,
-+			0xd3, 0xab, 0x68, 0xd1, 0x07, 0x99, 0x44, 0x47,
-+			0xd6, 0x83, 0x85, 0x3b, 0x30, 0xea, 0xa9, 0x6b,
-+			0x63, 0xea, 0xc4, 0x07, 0xfb, 0x43, 0x2f, 0xa4,
-+			0xaa, 0xb0, 0xab, 0x03, 0x89, 0xce, 0x3f, 0x8c,
-+			0x02, 0x7c, 0x86, 0x54, 0xbc, 0x88, 0xaf, 0x75,
-+			0xd2, 0xdc, 0x63, 0x17, 0xd3, 0x26, 0xf6, 0x96,
-+			0xa9, 0x3c, 0xf1, 0x61, 0x8c, 0x11, 0x18, 0xcc,
-+			0xd6, 0xea, 0x5b, 0xe2, 0xcd, 0xf0, 0xf1, 0xb2,
-+			0xe5, 0x35, 0x90, 0x1f, 0x85, 0x4c, 0x76, 0x5b,
-+			0x66, 0xce, 0x44, 0xa4, 0x32, 0x9f, 0xe6, 0x7b,
-+			0x71, 0x6e, 0x9f, 0x58, 0x15, 0x67, 0x72, 0x87,
-+			0x64, 0x8e, 0x3a, 0x44, 0x45, 0xd4, 0x76, 0xfa,
-+			0xc2, 0xf6, 0xef, 0x85, 0x05, 0x18, 0x7a, 0x9b,
-+			0xba, 0x41, 0x54, 0xac, 0xf0, 0xfc, 0x59, 0x12,
-+			0x3f, 0xdf, 0xa0, 0xe5, 0x8a, 0x65, 0xfd, 0x3a,
-+			0x62, 0x8d, 0x83, 0x2c, 0x03, 0xbe, 0x05, 0x76,
-+			0x2e, 0x53, 0x49, 0x97, 0x94, 0x33, 0xae, 0x40,
-+			0x81, 0x15, 0xdb, 0x6e, 0xad, 0xaa, 0xf5, 0x4b,
-+			0xe3, 0x98, 0x70, 0xdf, 0xe0, 0x7c, 0xcd, 0xdb,
-+			0x02, 0xd4, 0x7d, 0x2f, 0xc1, 0xe6, 0xb4, 0xf3,
-+			0xd7, 0x0d, 0x7a, 0xd9, 0x23, 0x9e, 0x87, 0x2d,
-+			0xce, 0x87, 0xad, 0xcc, 0x72, 0x05, 0x00, 0x29,
-+			0xdc, 0x73, 0x7f, 0x64, 0xc1, 0x15, 0x0e, 0xc2,
-+			0xdf, 0xa7, 0x5f, 0xeb, 0x41, 0xa1, 0xcd, 0xef,
-+			0x5c, 0x50, 0x79, 0x2a, 0x56, 0x56, 0x71, 0x8c,
-+			0xac, 0xc0, 0x79, 0x50, 0x69, 0xca, 0x59, 0x32,
-+			0x65, 0xf2, 0x54, 0xe4, 0x52, 0x38, 0x76, 0xd1,
-+			0x5e, 0xde, 0x26, 0x9e, 0xfb, 0x75, 0x2e, 0x11,
-+			0xb5, 0x10, 0xf4, 0x17, 0x73, 0xf5, 0x89, 0xc7,
-+			0x4f, 0x43, 0x5c, 0x8e, 0x7c, 0xb9, 0x05, 0x52,
-+			0x24, 0x40, 0x99, 0xfe, 0x9b, 0x85, 0x0b, 0x6c,
-+			0x22, 0x3e, 0x8b, 0xae, 0x86, 0xa1, 0xd2, 0x79,
-+			0x05, 0x68, 0x6b, 0xab, 0xe3, 0x41, 0x49, 0xed,
-+			0x15, 0xa1, 0x8d, 0x40, 0x2d, 0x61, 0xdf, 0x1a,
-+			0x59, 0xc9, 0x26, 0x8b, 0xef, 0x30, 0x4c, 0x88,
-+			0x4b, 0x10, 0xf8, 0x8d, 0xa6, 0x92, 0x9f, 0x4b,
-+			0xf3, 0xc4, 0x53, 0x0b, 0x89, 0x5d, 0x28, 0x92,
-+			0xcf, 0x78, 0xb2, 0xc0, 0x5d, 0xed, 0x7e, 0xfc,
-+			0xc0, 0x12, 0x23, 0x5f, 0x5a, 0x78, 0x86, 0x43,
-+			0x6e, 0x27, 0xf7, 0x5a, 0xa7, 0x6a, 0xed, 0x19,
-+			0x04, 0xf0, 0xb3, 0x12, 0xd1, 0xbd, 0x0e, 0x89,
-+			0x6e, 0xbc, 0x96, 0xa8, 0xd8, 0x49, 0x39, 0x9f,
-+			0x7e, 0x67, 0xf0, 0x2e, 0x3e, 0x01, 0xa9, 0xba,
-+			0xec, 0x8b, 0x62, 0x8e, 0xcb, 0x4a, 0x70, 0x43,
-+			0xc7, 0xc2, 0xc4, 0xca, 0x82, 0x03, 0x73, 0xe9,
-+			0x11, 0xdf, 0xcf, 0x54, 0xea, 0xc9, 0xb0, 0x95,
-+			0x51, 0xc0, 0x13, 0x3d, 0x92, 0x05, 0xfa, 0xf4,
-+			0xa9, 0x34, 0xc8, 0xce, 0x6c, 0x3d, 0x54, 0xcc,
-+			0xc4, 0xaf, 0xf1, 0xdc, 0x11, 0x44, 0x26, 0xa2,
-+			0xaf, 0xf1, 0x85, 0x75, 0x7d, 0x03, 0x61, 0x68,
-+			0x4e, 0x78, 0xc6, 0x92, 0x7d, 0x86, 0x7d, 0x77,
-+			0xdc, 0x71, 0x72, 0xdb, 0xc6, 0xae, 0xa1, 0xcb,
-+			0x70, 0x9a, 0x0b, 0x19, 0xbe, 0x4a, 0x6c, 0x2a,
-+			0xe2, 0xba, 0x6c, 0x64, 0x9a, 0x13, 0x28, 0xdf,
-+			0x85, 0x75, 0xe6, 0x43, 0xf6, 0x87, 0x08, 0x68,
-+			0x6e, 0xba, 0x6e, 0x79, 0x9f, 0x04, 0xbc, 0x23,
-+			0x50, 0xf6, 0x33, 0x5c, 0x1f, 0x24, 0x25, 0xbe,
-+			0x33, 0x47, 0x80, 0x45, 0x56, 0xa3, 0xa7, 0xd7,
-+			0x7a, 0xb1, 0x34, 0x0b, 0x90, 0x3c, 0x9c, 0xad,
-+			0x44, 0x5f, 0x9e, 0x0e, 0x9d, 0xd4, 0xbd, 0x93,
-+			0x5e, 0xfa, 0x3c, 0xe0, 0xb0, 0xd9, 0xed, 0xf3,
-+			0xd6, 0x2e, 0xff, 0x24, 0xd8, 0x71, 0x6c, 0xed,
-+			0xaf, 0x55, 0xeb, 0x22, 0xac, 0x93, 0x68, 0x32,
-+			0x05, 0x5b, 0x47, 0xdd, 0xc6, 0x4a, 0xcb, 0xc7,
-+			0x10, 0xe1, 0x3c, 0x92, 0x1a, 0xf3, 0x23, 0x78,
-+			0x2b, 0xa1, 0xd2, 0x80, 0xf4, 0x12, 0xb1, 0x20,
-+			0x8f, 0xff, 0x26, 0x35, 0xdd, 0xfb, 0xc7, 0x4e,
-+			0x78, 0xf1, 0x2d, 0x50, 0x12, 0x77, 0xa8, 0x60,
-+			0x7c, 0x0f, 0xf5, 0x16, 0x2f, 0x63, 0x70, 0x2a,
-+			0xc0, 0x96, 0x80, 0x4e, 0x0a, 0xb4, 0x93, 0x35,
-+			0x5d, 0x1d, 0x3f, 0x56, 0xf7, 0x2f, 0xbb, 0x90,
-+			0x11, 0x16, 0x8f, 0xa2, 0xec, 0x47, 0xbe, 0xac,
-+			0x56, 0x01, 0x26, 0x56, 0xb1, 0x8c, 0xb2, 0x10,
-+			0xf9, 0x1a, 0xca, 0xf5, 0xd1, 0xb7, 0x39, 0x20,
-+			0x63, 0xf1, 0x69, 0x20, 0x4f, 0x13, 0x12, 0x1f,
-+			0x5b, 0x65, 0xfc, 0x98, 0xf7, 0xc4, 0x7a, 0xbe,
-+			0xf7, 0x26, 0x4d, 0x2b, 0x84, 0x7b, 0x42, 0xad,
-+			0xd8, 0x7a, 0x0a, 0xb4, 0xd8, 0x74, 0xbf, 0xc1,
-+			0xf0, 0x6e, 0xb4, 0x29, 0xa3, 0xbb, 0xca, 0x46,
-+			0x67, 0x70, 0x6a, 0x2d, 0xce, 0x0e, 0xa2, 0x8a,
-+			0xa9, 0x87, 0xbf, 0x05, 0xc4, 0xc1, 0x04, 0xa3,
-+			0xab, 0xd4, 0x45, 0x43, 0x8c, 0xb6, 0x02, 0xb0,
-+			0x41, 0xc8, 0xfc, 0x44, 0x3d, 0x59, 0xaa, 0x2e,
-+			0x44, 0x21, 0x2a, 0x8d, 0x88, 0x9d, 0x57, 0xf4,
-+			0xa0, 0x02, 0x77, 0xb8, 0xa6, 0xa0, 0xe6, 0x75,
-+			0x5c, 0x82, 0x65, 0x3e, 0x03, 0x5c, 0x29, 0x8f,
-+			0x38, 0x55, 0xab, 0x33, 0x26, 0xef, 0x9f, 0x43,
-+			0x52, 0xfd, 0x68, 0xaf, 0x36, 0xb4, 0xbb, 0x9a,
-+			0x58, 0x09, 0x09, 0x1b, 0xc3, 0x65, 0x46, 0x46,
-+			0x1d, 0xa7, 0x94, 0x18, 0x23, 0x50, 0x2c, 0xca,
-+			0x2c, 0x55, 0x19, 0x97, 0x01, 0x9d, 0x93, 0x3b,
-+			0x63, 0x86, 0xf2, 0x03, 0x67, 0x45, 0xd2, 0x72,
-+			0x28, 0x52, 0x6c, 0xf4, 0xe3, 0x1c, 0xb5, 0x11,
-+			0x13, 0xf1, 0xeb, 0x21, 0xc7, 0xd9, 0x56, 0x82,
-+			0x2b, 0x82, 0x39, 0xbd, 0x69, 0x54, 0xed, 0x62,
-+			0xc3, 0xe2, 0xde, 0x73, 0xd4, 0x6a, 0x12, 0xae,
-+			0x13, 0x21, 0x7f, 0x4b, 0x5b, 0xfc, 0xbf, 0xe8,
-+			0x2b, 0xbe, 0x56, 0xba, 0x68, 0x8b, 0x9a, 0xb1,
-+			0x6e, 0xfa, 0xbf, 0x7e, 0x5a, 0x4b, 0xf1, 0xac,
-+			0x98, 0x65, 0x85, 0xd1, 0x93, 0x53, 0xd3, 0x7b,
-+			0x09, 0xdd, 0x4b, 0x10, 0x6d, 0x84, 0xb0, 0x13,
-+			0x65, 0xbd, 0xcf, 0x52, 0x09, 0xc4, 0x85, 0xe2,
-+			0x84, 0x74, 0x15, 0x65, 0xb7, 0xf7, 0x51, 0xaf,
-+			0x55, 0xad, 0xa4, 0xd1, 0x22, 0x54, 0x70, 0x94,
-+			0xa0, 0x1c, 0x90, 0x41, 0xfd, 0x99, 0xd7, 0x5a,
-+			0x31, 0xef, 0xaa, 0x25, 0xd0, 0x7f, 0x4f, 0xea,
-+			0x1d, 0x55, 0x42, 0xe5, 0x49, 0xb0, 0xd0, 0x46,
-+			0x62, 0x36, 0x43, 0xb2, 0x82, 0x15, 0x75, 0x50,
-+			0xa4, 0x72, 0xeb, 0x54, 0x27, 0x1f, 0x8a, 0xe4,
-+			0x7d, 0xe9, 0x66, 0xc5, 0xf1, 0x53, 0xa4, 0xd1,
-+			0x0c, 0xeb, 0xb8, 0xf8, 0xbc, 0xd4, 0xe2, 0xe7,
-+			0xe1, 0xf8, 0x4b, 0xcb, 0xa9, 0xa1, 0xaf, 0x15,
-+			0x83, 0xcb, 0x72, 0xd0, 0x33, 0x79, 0x00, 0x2d,
-+			0x9f, 0xd7, 0xf1, 0x2e, 0x1e, 0x10, 0xe4, 0x45,
-+			0xc0, 0x75, 0x3a, 0x39, 0xea, 0x68, 0xf7, 0x5d,
-+			0x1b, 0x73, 0x8f, 0xe9, 0x8e, 0x0f, 0x72, 0x47,
-+			0xae, 0x35, 0x0a, 0x31, 0x7a, 0x14, 0x4d, 0x4a,
-+			0x6f, 0x47, 0xf7, 0x7e, 0x91, 0x6e, 0x74, 0x8b,
-+			0x26, 0x47, 0xf9, 0xc3, 0xf9, 0xde, 0x70, 0xf5,
-+			0x61, 0xab, 0xa9, 0x27, 0x9f, 0x82, 0xe4, 0x9c,
-+			0x89, 0x91, 0x3f, 0x2e, 0x6a, 0xfd, 0xb5, 0x49,
-+			0xe9, 0xfd, 0x59, 0x14, 0x36, 0x49, 0x40, 0x6d,
-+			0x32, 0xd8, 0x85, 0x42, 0xf3, 0xa5, 0xdf, 0x0c,
-+			0xa8, 0x27, 0xd7, 0x54, 0xe2, 0x63, 0x2f, 0xf2,
-+			0x7e, 0x8b, 0x8b, 0xe7, 0xf1, 0x9a, 0x95, 0x35,
-+			0x43, 0xdc, 0x3a, 0xe4, 0xb6, 0xf4, 0xd0, 0xdf,
-+			0x9c, 0xcb, 0x94, 0xf3, 0x21, 0xa0, 0x77, 0x50,
-+			0xe2, 0xc6, 0xc4, 0xc6, 0x5f, 0x09, 0x64, 0x5b,
-+			0x92, 0x90, 0xd8, 0xe1, 0xd1, 0xed, 0x4b, 0x42,
-+			0xd7, 0x37, 0xaf, 0x65, 0x3d, 0x11, 0x39, 0xb6,
-+			0x24, 0x8a, 0x60, 0xae, 0xd6, 0x1e, 0xbf, 0x0e,
-+			0x0d, 0xd7, 0xdc, 0x96, 0x0e, 0x65, 0x75, 0x4e,
-+			0x29, 0x06, 0x9d, 0xa4, 0x51, 0x3a, 0x10, 0x63,
-+			0x8f, 0x17, 0x07, 0xd5, 0x8e, 0x3c, 0xf4, 0x28,
-+			0x00, 0x5a, 0x5b, 0x05, 0x19, 0xd8, 0xc0, 0x6c,
-+			0xe5, 0x15, 0xe4, 0x9c, 0x9d, 0x71, 0x9d, 0x5e,
-+			0x94, 0x29, 0x1a, 0xa7, 0x80, 0xfa, 0x0e, 0x33,
-+			0x03, 0xdd, 0xb7, 0x3e, 0x9a, 0xa9, 0x26, 0x18,
-+			0x37, 0xa9, 0x64, 0x08, 0x4d, 0x94, 0x5a, 0x88,
-+			0xca, 0x35, 0xce, 0x81, 0x02, 0xe3, 0x1f, 0x1b,
-+			0x89, 0x1a, 0x77, 0x85, 0xe3, 0x41, 0x6d, 0x32,
-+			0x42, 0x19, 0x23, 0x7d, 0xc8, 0x73, 0xee, 0x25,
-+			0x85, 0x0d, 0xf8, 0x31, 0x25, 0x79, 0x1b, 0x6f,
-+			0x79, 0x25, 0xd2, 0xd8, 0xd4, 0x23, 0xfd, 0xf7,
-+			0x82, 0x36, 0x6a, 0x0c, 0x46, 0x22, 0x15, 0xe9,
-+			0xff, 0x72, 0x41, 0x91, 0x91, 0x7d, 0x3a, 0xb7,
-+			0xdd, 0x65, 0x99, 0x70, 0xf6, 0x8d, 0x84, 0xf8,
-+			0x67, 0x15, 0x20, 0x11, 0xd6, 0xb2, 0x55, 0x7b,
-+			0xdb, 0x87, 0xee, 0xef, 0x55, 0x89, 0x2a, 0x59,
-+			0x2b, 0x07, 0x8f, 0x43, 0x8a, 0x59, 0x3c, 0x01,
-+			0x8b, 0x65, 0x54, 0xa1, 0x66, 0xd5, 0x38, 0xbd,
-+			0xc6, 0x30, 0xa9, 0xcc, 0x49, 0xb6, 0xa8, 0x1b,
-+			0xb8, 0xc0, 0x0e, 0xe3, 0x45, 0x28, 0xe2, 0xff,
-+			0x41, 0x9f, 0x7e, 0x7c, 0xd1, 0xae, 0x9e, 0x25,
-+			0x3f, 0x4c, 0x7c, 0x7c, 0xf4, 0xa8, 0x26, 0x4d,
-+			0x5c, 0xfd, 0x4b, 0x27, 0x18, 0xf9, 0x61, 0x76,
-+			0x48, 0xba, 0x0c, 0x6b, 0xa9, 0x4d, 0xfc, 0xf5,
-+			0x3b, 0x35, 0x7e, 0x2f, 0x4a, 0xa9, 0xc2, 0x9a,
-+			0xae, 0xab, 0x86, 0x09, 0x89, 0xc9, 0xc2, 0x40,
-+			0x39, 0x2c, 0x81, 0xb3, 0xb8, 0x17, 0x67, 0xc2,
-+			0x0d, 0x32, 0x4a, 0x3a, 0x67, 0x81, 0xd7, 0x1a,
-+			0x34, 0x52, 0xc5, 0xdb, 0x0a, 0xf5, 0x63, 0x39,
-+			0xea, 0x1f, 0xe1, 0x7c, 0xa1, 0x9e, 0xc1, 0x35,
-+			0xe3, 0xb1, 0x18, 0x45, 0x67, 0xf9, 0x22, 0x38,
-+			0x95, 0xd9, 0x34, 0x34, 0x86, 0xc6, 0x41, 0x94,
-+			0x15, 0xf9, 0x5b, 0x41, 0xa6, 0x87, 0x8b, 0xf8,
-+			0xd5, 0xe1, 0x1b, 0xe2, 0x5b, 0xf3, 0x86, 0x10,
-+			0xff, 0xe6, 0xae, 0x69, 0x76, 0xbc, 0x0d, 0xb4,
-+			0x09, 0x90, 0x0c, 0xa2, 0x65, 0x0c, 0xad, 0x74,
-+			0xf5, 0xd7, 0xff, 0xda, 0xc1, 0xce, 0x85, 0xbe,
-+			0x00, 0xa7, 0xff, 0x4d, 0x2f, 0x65, 0xd3, 0x8c,
-+			0x86, 0x2d, 0x05, 0xe8, 0xed, 0x3e, 0x6b, 0x8b,
-+			0x0f, 0x3d, 0x83, 0x8c, 0xf1, 0x1d, 0x5b, 0x96,
-+			0x2e, 0xb1, 0x9c, 0xc2, 0x98, 0xe1, 0x70, 0xb9,
-+			0xba, 0x5c, 0x8a, 0x43, 0xd6, 0x34, 0xa7, 0x2d,
-+			0xc9, 0x92, 0xae, 0xf2, 0xa5, 0x7b, 0x05, 0x49,
-+			0xa7, 0x33, 0x34, 0x86, 0xca, 0xe4, 0x96, 0x23,
-+			0x76, 0x5b, 0xf2, 0xc6, 0xf1, 0x51, 0x28, 0x42,
-+			0x7b, 0xcc, 0x76, 0x8f, 0xfa, 0xa2, 0xad, 0x31,
-+			0xd4, 0xd6, 0x7a, 0x6d, 0x25, 0x25, 0x54, 0xe4,
-+			0x3f, 0x50, 0x59, 0xe1, 0x5c, 0x05, 0xb7, 0x27,
-+			0x48, 0xbf, 0x07, 0xec, 0x1b, 0x13, 0xbe, 0x2b,
-+			0xa1, 0x57, 0x2b, 0xd5, 0xab, 0xd7, 0xd0, 0x4c,
-+			0x1e, 0xcb, 0x71, 0x9b, 0xc5, 0x90, 0x85, 0xd3,
-+			0xde, 0x59, 0xec, 0x71, 0xeb, 0x89, 0xbb, 0xd0,
-+			0x09, 0x50, 0xe1, 0x16, 0x3f, 0xfd, 0x1c, 0x34,
-+			0xc3, 0x1c, 0xa1, 0x10, 0x77, 0x53, 0x98, 0xef,
-+			0xf2, 0xfd, 0xa5, 0x01, 0x59, 0xc2, 0x9b, 0x26,
-+			0xc7, 0x42, 0xd9, 0x49, 0xda, 0x58, 0x2b, 0x6e,
-+			0x9f, 0x53, 0x19, 0x76, 0x7e, 0xd9, 0xc9, 0x0e,
-+			0x68, 0xc8, 0x7f, 0x51, 0x22, 0x42, 0xef, 0x49,
-+			0xa4, 0x55, 0xb6, 0x36, 0xac, 0x09, 0xc7, 0x31,
-+			0x88, 0x15, 0x4b, 0x2e, 0x8f, 0x3a, 0x08, 0xf7,
-+			0xd8, 0xf7, 0xa8, 0xc5, 0xa9, 0x33, 0xa6, 0x45,
-+			0xe4, 0xc4, 0x94, 0x76, 0xf3, 0x0d, 0x8f, 0x7e,
-+			0xc8, 0xf6, 0xbc, 0x23, 0x0a, 0xb6, 0x4c, 0xd3,
-+			0x6a, 0xcd, 0x36, 0xc2, 0x90, 0x5c, 0x5c, 0x3c,
-+			0x65, 0x7b, 0xc2, 0xd6, 0xcc, 0xe6, 0x0d, 0x87,
-+			0x73, 0x2e, 0x71, 0x79, 0x16, 0x06, 0x63, 0x28,
-+			0x09, 0x15, 0xd8, 0x89, 0x38, 0x38, 0x3d, 0xb5,
-+			0x42, 0x1c, 0x08, 0x24, 0xf7, 0x2a, 0xd2, 0x9d,
-+			0xc8, 0xca, 0xef, 0xf9, 0x27, 0xd8, 0x07, 0x86,
-+			0xf7, 0x43, 0x0b, 0x55, 0x15, 0x3f, 0x9f, 0x83,
-+			0xef, 0xdc, 0x49, 0x9d, 0x2a, 0xc1, 0x54, 0x62,
-+			0xbd, 0x9b, 0x66, 0x55, 0x9f, 0xb7, 0x12, 0xf3,
-+			0x1b, 0x4d, 0x9d, 0x2a, 0x5c, 0xed, 0x87, 0x75,
-+			0x87, 0x26, 0xec, 0x61, 0x2c, 0xb4, 0x0f, 0x89,
-+			0xb0, 0xfb, 0x2e, 0x68, 0x5d, 0x15, 0xc7, 0x8d,
-+			0x2e, 0xc0, 0xd9, 0xec, 0xaf, 0x4f, 0xd2, 0x25,
-+			0x29, 0xe8, 0xd2, 0x26, 0x2b, 0x67, 0xe9, 0xfc,
-+			0x2b, 0xa8, 0x67, 0x96, 0x12, 0x1f, 0x5b, 0x96,
-+			0xc6, 0x14, 0x53, 0xaf, 0x44, 0xea, 0xd6, 0xe2,
-+			0x94, 0x98, 0xe4, 0x12, 0x93, 0x4c, 0x92, 0xe0,
-+			0x18, 0xa5, 0x8d, 0x2d, 0xe4, 0x71, 0x3c, 0x47,
-+			0x4c, 0xf7, 0xe6, 0x47, 0x9e, 0xc0, 0x68, 0xdf,
-+			0xd4, 0xf5, 0x5a, 0x74, 0xb1, 0x2b, 0x29, 0x03,
-+			0x19, 0x07, 0xaf, 0x90, 0x62, 0x5c, 0x68, 0x98,
-+			0x48, 0x16, 0x11, 0x02, 0x9d, 0xee, 0xb4, 0x9b,
-+			0xe5, 0x42, 0x7f, 0x08, 0xfd, 0x16, 0x32, 0x0b,
-+			0xd0, 0xb3, 0xfa, 0x2b, 0xb7, 0x99, 0xf9, 0x29,
-+			0xcd, 0x20, 0x45, 0x9f, 0xb3, 0x1a, 0x5d, 0xa2,
-+			0xaf, 0x4d, 0xe0, 0xbd, 0x42, 0x0d, 0xbc, 0x74,
-+			0x99, 0x9c, 0x8e, 0x53, 0x1a, 0xb4, 0x3e, 0xbd,
-+			0xa2, 0x9a, 0x2d, 0xf7, 0xf8, 0x39, 0x0f, 0x67,
-+			0x63, 0xfc, 0x6b, 0xc0, 0xaf, 0xb3, 0x4b, 0x4f,
-+			0x55, 0xc4, 0xcf, 0xa7, 0xc8, 0x04, 0x11, 0x3e,
-+			0x14, 0x32, 0xbb, 0x1b, 0x38, 0x77, 0xd6, 0x7f,
-+			0x54, 0x4c, 0xdf, 0x75, 0xf3, 0x07, 0x2d, 0x33,
-+			0x9b, 0xa8, 0x20, 0xe1, 0x7b, 0x12, 0xb5, 0xf3,
-+			0xef, 0x2f, 0xce, 0x72, 0xe5, 0x24, 0x60, 0xc1,
-+			0x30, 0xe2, 0xab, 0xa1, 0x8e, 0x11, 0x09, 0xa8,
-+			0x21, 0x33, 0x44, 0xfe, 0x7f, 0x35, 0x32, 0x93,
-+			0x39, 0xa7, 0xad, 0x8b, 0x79, 0x06, 0xb2, 0xcb,
-+			0x4e, 0xa9, 0x5f, 0xc7, 0xba, 0x74, 0x29, 0xec,
-+			0x93, 0xa0, 0x4e, 0x54, 0x93, 0xc0, 0xbc, 0x55,
-+			0x64, 0xf0, 0x48, 0xe5, 0x57, 0x99, 0xee, 0x75,
-+			0xd6, 0x79, 0x0f, 0x66, 0xb7, 0xc6, 0x57, 0x76,
-+			0xf7, 0xb7, 0xf3, 0x9c, 0xc5, 0x60, 0xe8, 0x7f,
-+			0x83, 0x76, 0xd6, 0x0e, 0xaa, 0xe6, 0x90, 0x39,
-+			0x1d, 0xa6, 0x32, 0x6a, 0x34, 0xe3, 0x55, 0xf8,
-+			0x58, 0xa0, 0x58, 0x7d, 0x33, 0xe0, 0x22, 0x39,
-+			0x44, 0x64, 0x87, 0x86, 0x5a, 0x2f, 0xa7, 0x7e,
-+			0x0f, 0x38, 0xea, 0xb0, 0x30, 0xcc, 0x61, 0xa5,
-+			0x6a, 0x32, 0xae, 0x1e, 0xf7, 0xe9, 0xd0, 0xa9,
-+			0x0c, 0x32, 0x4b, 0xb5, 0x49, 0x28, 0xab, 0x85,
-+			0x2f, 0x8e, 0x01, 0x36, 0x38, 0x52, 0xd0, 0xba,
-+			0xd6, 0x02, 0x78, 0xf8, 0x0e, 0x3e, 0x9c, 0x8b,
-+			0x6b, 0x45, 0x99, 0x3f, 0x5c, 0xfe, 0x58, 0xf1,
-+			0x5c, 0x94, 0x04, 0xe1, 0xf5, 0x18, 0x6d, 0x51,
-+			0xb2, 0x5d, 0x18, 0x20, 0xb6, 0xc2, 0x9a, 0x42,
-+			0x1d, 0xb3, 0xab, 0x3c, 0xb6, 0x3a, 0x13, 0x03,
-+			0xb2, 0x46, 0x82, 0x4f, 0xfc, 0x64, 0xbc, 0x4f,
-+			0xca, 0xfa, 0x9c, 0xc0, 0xd5, 0xa7, 0xbd, 0x11,
-+			0xb7, 0xe4, 0x5a, 0xf6, 0x6f, 0x4d, 0x4d, 0x54,
-+			0xea, 0xa4, 0x98, 0x66, 0xd4, 0x22, 0x3b, 0xd3,
-+			0x8f, 0x34, 0x47, 0xd9, 0x7c, 0xf4, 0x72, 0x3b,
-+			0x4d, 0x02, 0x77, 0xf6, 0xd6, 0xdd, 0x08, 0x0a,
-+			0x81, 0xe1, 0x86, 0x89, 0x3e, 0x56, 0x10, 0x3c,
-+			0xba, 0xd7, 0x81, 0x8c, 0x08, 0xbc, 0x8b, 0xe2,
-+			0x53, 0xec, 0xa7, 0x89, 0xee, 0xc8, 0x56, 0xb5,
-+			0x36, 0x2c, 0xb2, 0x03, 0xba, 0x99, 0xdd, 0x7c,
-+			0x48, 0xa0, 0xb0, 0xbc, 0x91, 0x33, 0xe9, 0xa8,
-+			0xcb, 0xcd, 0xcf, 0x59, 0x5f, 0x1f, 0x15, 0xe2,
-+			0x56, 0xf5, 0x4e, 0x01, 0x35, 0x27, 0x45, 0x77,
-+			0x47, 0xc8, 0xbc, 0xcb, 0x7e, 0x39, 0xc1, 0x97,
-+			0x28, 0xd3, 0x84, 0xfc, 0x2c, 0x3e, 0xc8, 0xad,
-+			0x9c, 0xf8, 0x8a, 0x61, 0x9c, 0x28, 0xaa, 0xc5,
-+			0x99, 0x20, 0x43, 0x85, 0x9d, 0xa5, 0xe2, 0x8b,
-+			0xb8, 0xae, 0xeb, 0xd0, 0x32, 0x0d, 0x52, 0x78,
-+			0x09, 0x56, 0x3f, 0xc7, 0xd8, 0x7e, 0x26, 0xfc,
-+			0x37, 0xfb, 0x6f, 0x04, 0xfc, 0xfa, 0x92, 0x10,
-+			0xac, 0xf8, 0x3e, 0x21, 0xdc, 0x8c, 0x21, 0x16,
-+			0x7d, 0x67, 0x6e, 0xf6, 0xcd, 0xda, 0xb6, 0x98,
-+			0x23, 0xab, 0x23, 0x3c, 0xb2, 0x10, 0xa0, 0x53,
-+			0x5a, 0x56, 0x9f, 0xc5, 0xd0, 0xff, 0xbb, 0xe4,
-+			0x98, 0x3c, 0x69, 0x1e, 0xdb, 0x38, 0x8f, 0x7e,
-+			0x0f, 0xd2, 0x98, 0x88, 0x81, 0x8b, 0x45, 0x67,
-+			0xea, 0x33, 0xf1, 0xeb, 0xe9, 0x97, 0x55, 0x2e,
-+			0xd9, 0xaa, 0xeb, 0x5a, 0xec, 0xda, 0xe1, 0x68,
-+			0xa8, 0x9d, 0x3c, 0x84, 0x7c, 0x05, 0x3d, 0x62,
-+			0x87, 0x8f, 0x03, 0x21, 0x28, 0x95, 0x0c, 0x89,
-+			0x25, 0x22, 0x4a, 0xb0, 0x93, 0xa9, 0x50, 0xa2,
-+			0x2f, 0x57, 0x6e, 0x18, 0x42, 0x19, 0x54, 0x0c,
-+			0x55, 0x67, 0xc6, 0x11, 0x49, 0xf4, 0x5c, 0xd2,
-+			0xe9, 0x3d, 0xdd, 0x8b, 0x48, 0x71, 0x21, 0x00,
-+			0xc3, 0x9a, 0x6c, 0x85, 0x74, 0x28, 0x83, 0x4a,
-+			0x1b, 0x31, 0x05, 0xe1, 0x06, 0x92, 0xe7, 0xda,
-+			0x85, 0x73, 0x78, 0x45, 0x20, 0x7f, 0xae, 0x13,
-+			0x7c, 0x33, 0x06, 0x22, 0xf4, 0x83, 0xf9, 0x35,
-+			0x3f, 0x6c, 0x71, 0xa8, 0x4e, 0x48, 0xbe, 0x9b,
-+			0xce, 0x8a, 0xba, 0xda, 0xbe, 0x28, 0x08, 0xf7,
-+			0xe2, 0x14, 0x8c, 0x71, 0xea, 0x72, 0xf9, 0x33,
-+			0xf2, 0x88, 0x3f, 0xd7, 0xbb, 0x69, 0x6c, 0x29,
-+			0x19, 0xdc, 0x84, 0xce, 0x1f, 0x12, 0x4f, 0xc8,
-+			0xaf, 0xa5, 0x04, 0xba, 0x5a, 0xab, 0xb0, 0xd9,
-+			0x14, 0x1f, 0x6c, 0x68, 0x98, 0x39, 0x89, 0x7a,
-+			0xd9, 0xd8, 0x2f, 0xdf, 0xa8, 0x47, 0x4a, 0x25,
-+			0xe2, 0xfb, 0x33, 0xf4, 0x59, 0x78, 0xe1, 0x68,
-+			0x85, 0xcf, 0xfe, 0x59, 0x20, 0xd4, 0x05, 0x1d,
-+			0x80, 0x99, 0xae, 0xbc, 0xca, 0xae, 0x0f, 0x2f,
-+			0x65, 0x43, 0x34, 0x8e, 0x7e, 0xac, 0xd3, 0x93,
-+			0x2f, 0xac, 0x6d, 0x14, 0x3d, 0x02, 0x07, 0x70,
-+			0x9d, 0xa4, 0xf3, 0x1b, 0x5c, 0x36, 0xfc, 0x01,
-+			0x73, 0x34, 0x85, 0x0c, 0x6c, 0xd6, 0xf1, 0xbd,
-+			0x3f, 0xdf, 0xee, 0xf5, 0xd9, 0xba, 0x56, 0xef,
-+			0xf4, 0x9b, 0x6b, 0xee, 0x9f, 0x5a, 0x78, 0x6d,
-+			0x32, 0x19, 0xf4, 0xf7, 0xf8, 0x4c, 0x69, 0x0b,
-+			0x4b, 0xbc, 0xbb, 0xb7, 0xf2, 0x85, 0xaf, 0x70,
-+			0x75, 0x24, 0x6c, 0x54, 0xa7, 0x0e, 0x4d, 0x1d,
-+			0x01, 0xbf, 0x08, 0xac, 0xcf, 0x7f, 0x2c, 0xe3,
-+			0x14, 0x89, 0x5e, 0x70, 0x5a, 0x99, 0x92, 0xcd,
-+			0x01, 0x84, 0xc8, 0xd2, 0xab, 0xe5, 0x4f, 0x58,
-+			0xe7, 0x0f, 0x2f, 0x0e, 0xff, 0x68, 0xea, 0xfd,
-+			0x15, 0xb3, 0x17, 0xe6, 0xb0, 0xe7, 0x85, 0xd8,
-+			0x23, 0x2e, 0x05, 0xc7, 0xc9, 0xc4, 0x46, 0x1f,
-+			0xe1, 0x9e, 0x49, 0x20, 0x23, 0x24, 0x4d, 0x7e,
-+			0x29, 0x65, 0xff, 0xf4, 0xb6, 0xfd, 0x1a, 0x85,
-+			0xc4, 0x16, 0xec, 0xfc, 0xea, 0x7b, 0xd6, 0x2c,
-+			0x43, 0xf8, 0xb7, 0xbf, 0x79, 0xc0, 0x85, 0xcd,
-+			0xef, 0xe1, 0x98, 0xd3, 0xa5, 0xf7, 0x90, 0x8c,
-+			0xe9, 0x7f, 0x80, 0x6b, 0xd2, 0xac, 0x4c, 0x30,
-+			0xa7, 0xc6, 0x61, 0x6c, 0xd2, 0xf9, 0x2c, 0xff,
-+			0x30, 0xbc, 0x22, 0x81, 0x7d, 0x93, 0x12, 0xe4,
-+			0x0a, 0xcd, 0xaf, 0xdd, 0xe8, 0xab, 0x0a, 0x1e,
-+			0x13, 0xa4, 0x27, 0xc3, 0x5f, 0xf7, 0x4b, 0xbb,
-+			0x37, 0x09, 0x4b, 0x91, 0x6f, 0x92, 0x4f, 0xaf,
-+			0x52, 0xee, 0xdf, 0xef, 0x09, 0x6f, 0xf7, 0x5c,
-+			0x6e, 0x12, 0x17, 0x72, 0x63, 0x57, 0xc7, 0xba,
-+			0x3b, 0x6b, 0x38, 0x32, 0x73, 0x1b, 0x9c, 0x80,
-+			0xc1, 0x7a, 0xc6, 0xcf, 0xcd, 0x35, 0xc0, 0x6b,
-+			0x31, 0x1a, 0x6b, 0xe9, 0xd8, 0x2c, 0x29, 0x3f,
-+			0x96, 0xfb, 0xb6, 0xcd, 0x13, 0x91, 0x3b, 0xc2,
-+			0xd2, 0xa3, 0x31, 0x8d, 0xa4, 0xcd, 0x57, 0xcd,
-+			0x13, 0x3d, 0x64, 0xfd, 0x06, 0xce, 0xe6, 0xdc,
-+			0x0c, 0x24, 0x43, 0x31, 0x40, 0x57, 0xf1, 0x72,
-+			0x17, 0xe3, 0x3a, 0x63, 0x6d, 0x35, 0xcf, 0x5d,
-+			0x97, 0x40, 0x59, 0xdd, 0xf7, 0x3c, 0x02, 0xf7,
-+			0x1c, 0x7e, 0x05, 0xbb, 0xa9, 0x0d, 0x01, 0xb1,
-+			0x8e, 0xc0, 0x30, 0xa9, 0x53, 0x24, 0xc9, 0x89,
-+			0x84, 0x6d, 0xaa, 0xd0, 0xcd, 0x91, 0xc2, 0x4d,
-+			0x91, 0xb0, 0x89, 0xe2, 0xbf, 0x83, 0x44, 0xaa,
-+			0x28, 0x72, 0x23, 0xa0, 0xc2, 0xad, 0xad, 0x1c,
-+			0xfc, 0x3f, 0x09, 0x7a, 0x0b, 0xdc, 0xc5, 0x1b,
-+			0x87, 0x13, 0xc6, 0x5b, 0x59, 0x8d, 0xf2, 0xc8,
-+			0xaf, 0xdf, 0x11, 0x95,
-+		},
-+		.rlen = 4100,
-+	},
++/* This stores all control uplink flags */
++struct ctrl_ul {
++	u8 port;
++	unsigned reserved:6;
++	unsigned RTS:1;
++	unsigned DTR:1;
++} __attribute__ ((packed));
++
++#else
++/* Little endian */
++
++/* This represents the toggle information */
++struct toggles {
++	unsigned mdm_ul:1;
++	unsigned mdm_dl:1;
++	unsigned diag_dl:1;
++	unsigned enabled:5;	/*
++				 * Toggle fields are valid if enabled is 0,
++				 * else A-channels must always be used.
++				 */
++} __attribute__ ((packed));
++
++/* Configuration table to read at startup of card */
++struct config_table {
++	u32 signature;
++	u16 version;
++	u16 product_information;
++	struct toggles toggle;
++	u8 pad1[7];
++	u16 dl_start;
++	u16 dl_mdm_len1;	/*
++				 * If this is 64, it can hold
++				 * 60 bytes + 4 that is length field
++				 */
++	u16 dl_mdm_len2;
++	u16 dl_diag_len1;
++	u16 dl_diag_len2;
++	u16 dl_app1_len;
++	u16 dl_app2_len;
++	u16 dl_ctrl_len;
++	u8 pad2[16];
++	u16 ul_start;
++	u16 ul_mdm_len2;
++	u16 ul_mdm_len1;
++	u16 ul_diag_len;
++	u16 ul_app1_len;
++	u16 ul_app2_len;
++	u16 ul_ctrl_len;
++} __attribute__ ((packed));
++
++/* This stores all control downlink flags */
++struct ctrl_dl {
++	unsigned DSR:1;
++	unsigned DCD:1;
++	unsigned RI:1;
++	unsigned CTS:1;
++	unsigned reserverd:4;
++	u8 port;
++} __attribute__ ((packed));
++
++/* This stores all control uplink flags */
++struct ctrl_ul {
++	unsigned DTR:1;
++	unsigned RTS:1;
++	unsigned reserved:6;
++	u8 port;
++} __attribute__ ((packed));
++#endif
++
++/* This holds all information that is needed regarding a port */
++struct port {
++	u8 update_flow_control;
++	struct ctrl_ul ctrl_ul;
++	struct ctrl_dl ctrl_dl;
++	struct kfifo *fifo_ul;
++	void __iomem *dl_addr[2];
++	u32 dl_size[2];
++	u8 toggle_dl;
++	void __iomem *ul_addr[2];
++	u32 ul_size[2];
++	u8 toggle_ul;
++	u16 token_dl;
++
++	struct tty_struct *tty;
++	int tty_open_count;
++	/* mutex to ensure one access patch to this port */
++	struct mutex tty_sem;
++	wait_queue_head_t tty_wait;
++	struct async_icount tty_icount;
++};
++
++/* Private data one for each card in the system */
++struct nozomi {
++	void __iomem *base_addr;
++	unsigned long flip;
++
++	/* Pointers to registers */
++	void __iomem *reg_iir;
++	void __iomem *reg_fcr;
++	void __iomem *reg_ier;
++
++	u16 last_ier;
++	enum card_type card_type;
++	struct config_table config_table;	/* Configuration table */
++	struct pci_dev *pdev;
++	struct port port[NOZOMI_MAX_PORTS];
++	u8 *send_buf;
++
++	spinlock_t spin_mutex;	/* secures access to registers and tty */
++
++	unsigned int index_start;
++	u32 open_ttys;
++};
++
++/* This is a data packet that is read or written to/from card */
++struct buffer {
++	u32 size;		/* size is the length of the data buffer */
++	u8 *data;
++} __attribute__ ((packed));
++
++/*    Global variables */
++static struct pci_device_id nozomi_pci_tbl[] = {
++	{PCI_DEVICE(VENDOR1, DEVICE1)},
++	{},
 +};
 +
- /*
-  * Compression stuff.
-  */
-@@ -4408,6 +7721,88 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
- };
- 
- /*
-+ * LZO test vectors (null-terminated strings).
++MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
++
++static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
++static struct tty_driver *ntty_driver;
++
++/*
++ * find card by tty_index
++ */
++static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty)
++{
++	return tty ? ndevs[tty->index / MAX_PORT] : NULL;
++}
++
++static inline struct port *get_port_by_tty(const struct tty_struct *tty)
++{
++	struct nozomi *ndev = get_dc_by_tty(tty);
++	return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL;
++}
++
++/*
++ * TODO:
++ * -Optimize
++ * -Rewrite cleaner
++ */
++
++static void read_mem32(u32 *buf, const void __iomem *mem_addr_start,
++			u32 size_bytes)
++{
++	u32 i = 0;
++	const u32 *ptr = (__force u32 *) mem_addr_start;
++	u16 *buf16;
++
++	if (unlikely(!ptr || !buf))
++		goto out;
++
++	/* shortcut for extremely often used cases */
++	switch (size_bytes) {
++	case 2:	/* 2 bytes */
++		buf16 = (u16 *) buf;
++		*buf16 = __le16_to_cpu(readw((void __iomem *)ptr));
++		goto out;
++		break;
++	case 4:	/* 4 bytes */
++		*(buf) = __le32_to_cpu(readl((void __iomem *)ptr));
++		goto out;
++		break;
++	}
++
++	while (i < size_bytes) {
++		if (size_bytes - i == 2) {
++			/* Handle 2 bytes in the end */
++			buf16 = (u16 *) buf;
++			*(buf16) = __le16_to_cpu(readw((void __iomem *)ptr));
++			i += 2;
++		} else {
++			/* Read 4 bytes */
++			*(buf) = __le32_to_cpu(readl((void __iomem *)ptr));
++			i += 4;
++		}
++		buf++;
++		ptr++;
++	}
++out:
++	return;
++}
++
++/*
++ * TODO:
++ * -Optimize
++ * -Rewrite cleaner
++ */
++static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
++			u32 size_bytes)
++{
++	u32 i = 0;
++	u32 *ptr = (__force u32 *) mem_addr_start;
++	u16 *buf16;
++
++	if (unlikely(!ptr || !buf))
++		return 0;
++
++	/* shortcut for extremely often used cases */
++	switch (size_bytes) {
++	case 2:	/* 2 bytes */
++		buf16 = (u16 *) buf;
++		writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
++		return 2;
++		break;
++	case 1: /*
++		 * also needs to write 4 bytes in this case
++		 * so falling through..
++		 */
++	case 4: /* 4 bytes */
++		writel(__cpu_to_le32(*buf), (void __iomem *)ptr);
++		return 4;
++		break;
++	}
++
++	while (i < size_bytes) {
++		if (size_bytes - i == 2) {
++			/* 2 bytes */
++			buf16 = (u16 *) buf;
++			writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
++			i += 2;
++		} else {
++			/* 4 bytes */
++			writel(__cpu_to_le32(*buf), (void __iomem *)ptr);
++			i += 4;
++		}
++		buf++;
++		ptr++;
++	}
++	return i;
++}
++
++/* Setup pointers to different channels and also setup buffer sizes. */
++static void setup_memory(struct nozomi *dc)
++{
++	void __iomem *offset = dc->base_addr + dc->config_table.dl_start;
++	/* The length reported is including the length field of 4 bytes,
++	 * hence subtract with 4.
++	 */
++	const u16 buff_offset = 4;
++
++	/* Modem port dl configuration */
++	dc->port[PORT_MDM].dl_addr[CH_A] = offset;
++	dc->port[PORT_MDM].dl_addr[CH_B] =
++				(offset += dc->config_table.dl_mdm_len1);
++	dc->port[PORT_MDM].dl_size[CH_A] =
++				dc->config_table.dl_mdm_len1 - buff_offset;
++	dc->port[PORT_MDM].dl_size[CH_B] =
++				dc->config_table.dl_mdm_len2 - buff_offset;
++
++	/* Diag port dl configuration */
++	dc->port[PORT_DIAG].dl_addr[CH_A] =
++				(offset += dc->config_table.dl_mdm_len2);
++	dc->port[PORT_DIAG].dl_size[CH_A] =
++				dc->config_table.dl_diag_len1 - buff_offset;
++	dc->port[PORT_DIAG].dl_addr[CH_B] =
++				(offset += dc->config_table.dl_diag_len1);
++	dc->port[PORT_DIAG].dl_size[CH_B] =
++				dc->config_table.dl_diag_len2 - buff_offset;
++
++	/* App1 port dl configuration */
++	dc->port[PORT_APP1].dl_addr[CH_A] =
++				(offset += dc->config_table.dl_diag_len2);
++	dc->port[PORT_APP1].dl_size[CH_A] =
++				dc->config_table.dl_app1_len - buff_offset;
++
++	/* App2 port dl configuration */
++	dc->port[PORT_APP2].dl_addr[CH_A] =
++				(offset += dc->config_table.dl_app1_len);
++	dc->port[PORT_APP2].dl_size[CH_A] =
++				dc->config_table.dl_app2_len - buff_offset;
++
++	/* Ctrl dl configuration */
++	dc->port[PORT_CTRL].dl_addr[CH_A] =
++				(offset += dc->config_table.dl_app2_len);
++	dc->port[PORT_CTRL].dl_size[CH_A] =
++				dc->config_table.dl_ctrl_len - buff_offset;
++
++	offset = dc->base_addr + dc->config_table.ul_start;
++
++	/* Modem Port ul configuration */
++	dc->port[PORT_MDM].ul_addr[CH_A] = offset;
++	dc->port[PORT_MDM].ul_size[CH_A] =
++				dc->config_table.ul_mdm_len1 - buff_offset;
++	dc->port[PORT_MDM].ul_addr[CH_B] =
++				(offset += dc->config_table.ul_mdm_len1);
++	dc->port[PORT_MDM].ul_size[CH_B] =
++				dc->config_table.ul_mdm_len2 - buff_offset;
++
++	/* Diag port ul configuration */
++	dc->port[PORT_DIAG].ul_addr[CH_A] =
++				(offset += dc->config_table.ul_mdm_len2);
++	dc->port[PORT_DIAG].ul_size[CH_A] =
++				dc->config_table.ul_diag_len - buff_offset;
++
++	/* App1 port ul configuration */
++	dc->port[PORT_APP1].ul_addr[CH_A] =
++				(offset += dc->config_table.ul_diag_len);
++	dc->port[PORT_APP1].ul_size[CH_A] =
++				dc->config_table.ul_app1_len - buff_offset;
++
++	/* App2 port ul configuration */
++	dc->port[PORT_APP2].ul_addr[CH_A] =
++				(offset += dc->config_table.ul_app1_len);
++	dc->port[PORT_APP2].ul_size[CH_A] =
++				dc->config_table.ul_app2_len - buff_offset;
++
++	/* Ctrl ul configuration */
++	dc->port[PORT_CTRL].ul_addr[CH_A] =
++				(offset += dc->config_table.ul_app2_len);
++	dc->port[PORT_CTRL].ul_size[CH_A] =
++				dc->config_table.ul_ctrl_len - buff_offset;
++}
++
++/* Dump config table under initalization phase */
++#ifdef DEBUG
++static void dump_table(const struct nozomi *dc)
++{
++	DBG3("signature: 0x%08X", dc->config_table.signature);
++	DBG3("version: 0x%04X", dc->config_table.version);
++	DBG3("product_information: 0x%04X", \
++				dc->config_table.product_information);
++	DBG3("toggle enabled: %d", dc->config_table.toggle.enabled);
++	DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul);
++	DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl);
++	DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl);
++
++	DBG3("dl_start: 0x%04X", dc->config_table.dl_start);
++	DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1,
++	   dc->config_table.dl_mdm_len1);
++	DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2,
++	   dc->config_table.dl_mdm_len2);
++	DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1,
++	   dc->config_table.dl_diag_len1);
++	DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2,
++	   dc->config_table.dl_diag_len2);
++	DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len,
++	   dc->config_table.dl_app1_len);
++	DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len,
++	   dc->config_table.dl_app2_len);
++	DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len,
++	   dc->config_table.dl_ctrl_len);
++	DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start,
++	   dc->config_table.ul_start);
++	DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1,
++	   dc->config_table.ul_mdm_len1);
++	DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2,
++	   dc->config_table.ul_mdm_len2);
++	DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len,
++	   dc->config_table.ul_diag_len);
++	DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len,
++	   dc->config_table.ul_app1_len);
++	DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len,
++	   dc->config_table.ul_app2_len);
++	DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len,
++	   dc->config_table.ul_ctrl_len);
++}
++#else
++static __inline__ void dump_table(const struct nozomi *dc) { }
++#endif
++
++/*
++ * Read configuration table from card under intalization phase
++ * Returns 1 if ok, else 0
++ */
++static int nozomi_read_config_table(struct nozomi *dc)
++{
++	read_mem32((u32 *) &dc->config_table, dc->base_addr + 0,
++						sizeof(struct config_table));
++
++	if (dc->config_table.signature != CONFIG_MAGIC) {
++		dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n",
++			dc->config_table.signature, CONFIG_MAGIC);
++		return 0;
++	}
++
++	if ((dc->config_table.version == 0)
++	    || (dc->config_table.toggle.enabled == TOGGLE_VALID)) {
++		int i;
++		DBG1("Second phase, configuring card");
++
++		setup_memory(dc);
++
++		dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul;
++		dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl;
++		dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl;
++		DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d",
++		   dc->port[PORT_MDM].toggle_ul,
++		   dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl);
++
++		dump_table(dc);
++
++		for (i = PORT_MDM; i < MAX_PORT; i++) {
++			dc->port[i].fifo_ul =
++			    kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
++			memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
++			memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
++		}
++
++		/* Enable control channel */
++		dc->last_ier = dc->last_ier | CTRL_DL;
++		writew(dc->last_ier, dc->reg_ier);
++
++		dev_info(&dc->pdev->dev, "Initialization OK!\n");
++		return 1;
++	}
++
++	if ((dc->config_table.version > 0)
++	    && (dc->config_table.toggle.enabled != TOGGLE_VALID)) {
++		u32 offset = 0;
++		DBG1("First phase: pushing upload buffers, clearing download");
++
++		dev_info(&dc->pdev->dev, "Version of card: %d\n",
++			 dc->config_table.version);
++
++		/* Here we should disable all I/O over F32. */
++		setup_memory(dc);
++
++		/*
++		 * We should send ALL channel pair tokens back along
++		 * with reset token
++		 */
++
++		/* push upload modem buffers */
++		write_mem32(dc->port[PORT_MDM].ul_addr[CH_A],
++			(u32 *) &offset, 4);
++		write_mem32(dc->port[PORT_MDM].ul_addr[CH_B],
++			(u32 *) &offset, 4);
++
++		writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr);
++
++		DBG1("First phase done");
++	}
++
++	return 1;
++}
++
++/* Enable uplink interrupts  */
++static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
++{
++	u16 mask[NOZOMI_MAX_PORTS] = \
++			{MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
++
++	if (port < NOZOMI_MAX_PORTS) {
++		dc->last_ier |= mask[port];
++		writew(dc->last_ier, dc->reg_ier);
++	} else {
++		dev_err(&dc->pdev->dev, "Called with wrong port?\n");
++	}
++}
++
++/* Disable uplink interrupts  */
++static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
++{
++	u16 mask[NOZOMI_MAX_PORTS] = \
++			{~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
++
++	if (port < NOZOMI_MAX_PORTS) {
++		dc->last_ier &= mask[port];
++		writew(dc->last_ier, dc->reg_ier);
++	} else {
++		dev_err(&dc->pdev->dev, "Called with wrong port?\n");
++	}
++}
++
++/* Enable downlink interrupts */
++static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
++{
++	u16 mask[NOZOMI_MAX_PORTS] = \
++			{MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
++
++	if (port < NOZOMI_MAX_PORTS) {
++		dc->last_ier |= mask[port];
++		writew(dc->last_ier, dc->reg_ier);
++	} else {
++		dev_err(&dc->pdev->dev, "Called with wrong port?\n");
++	}
++}
++
++/* Disable downlink interrupts */
++static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
++{
++	u16 mask[NOZOMI_MAX_PORTS] = \
++			{~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
++
++	if (port < NOZOMI_MAX_PORTS) {
++		dc->last_ier &= mask[port];
++		writew(dc->last_ier, dc->reg_ier);
++	} else {
++		dev_err(&dc->pdev->dev, "Called with wrong port?\n");
++	}
++}
++
++/*
++ * Return 1 - send buffer to card and ack.
++ * Return 0 - don't ack, don't send buffer to card.
 + */
-+#define LZO_COMP_TEST_VECTORS 2
-+#define LZO_DECOMP_TEST_VECTORS 2
++static int send_data(enum port_type index, struct nozomi *dc)
++{
++	u32 size = 0;
++	struct port *port = &dc->port[index];
++	u8 toggle = port->toggle_ul;
++	void __iomem *addr = port->ul_addr[toggle];
++	u32 ul_size = port->ul_size[toggle];
++	struct tty_struct *tty = port->tty;
 +
-+static struct comp_testvec lzo_comp_tv_template[] = {
-+	{
-+		.inlen	= 70,
-+		.outlen	= 46,
-+		.input	= "Join us now and share the software "
-+			  "Join us now and share the software ",
-+		.output	= {  0x00, 0x0d, 0x4a, 0x6f, 0x69, 0x6e, 0x20, 0x75,
-+			     0x73, 0x20, 0x6e, 0x6f, 0x77, 0x20, 0x61, 0x6e,
-+			     0x64, 0x20, 0x73, 0x68, 0x61, 0x72, 0x65, 0x20,
-+			     0x74, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x66, 0x74,
-+			     0x77, 0x70, 0x01, 0x01, 0x4a, 0x6f, 0x69, 0x6e,
-+			     0x3d, 0x88, 0x00, 0x11, 0x00, 0x00 },
-+	}, {
-+		.inlen	= 159,
-+		.outlen	= 133,
-+		.input	= "This document describes a compression method based on the LZO "
-+			  "compression algorithm.  This document defines the application of "
-+			  "the LZO algorithm used in UBIFS.",
-+		.output	= { 0x00, 0x2b, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64,
-+			    0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x20,
-+			    0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
-+			    0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70,
-+			    0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20,
-+			    0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x62,
-+			    0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20,
-+			    0x74, 0x68, 0x65, 0x20, 0x4c, 0x5a, 0x4f, 0x2b,
-+			    0x8c, 0x00, 0x0d, 0x61, 0x6c, 0x67, 0x6f, 0x72,
-+			    0x69, 0x74, 0x68, 0x6d, 0x2e, 0x20, 0x20, 0x54,
-+			    0x68, 0x69, 0x73, 0x2a, 0x54, 0x01, 0x02, 0x66,
-+			    0x69, 0x6e, 0x65, 0x73, 0x94, 0x06, 0x05, 0x61,
-+			    0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x76,
-+			    0x0a, 0x6f, 0x66, 0x88, 0x02, 0x60, 0x09, 0x27,
-+			    0xf0, 0x00, 0x0c, 0x20, 0x75, 0x73, 0x65, 0x64,
-+			    0x20, 0x69, 0x6e, 0x20, 0x55, 0x42, 0x49, 0x46,
-+			    0x53, 0x2e, 0x11, 0x00, 0x00 },
-+	},
-+};
++	/* Get data from tty and place in buf for now */
++	size = __kfifo_get(port->fifo_ul, dc->send_buf,
++			   ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
 +
-+static struct comp_testvec lzo_decomp_tv_template[] = {
-+	{
-+		.inlen	= 133,
-+		.outlen	= 159,
-+		.input	= { 0x00, 0x2b, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64,
-+			    0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x20,
-+			    0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
-+			    0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70,
-+			    0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20,
-+			    0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x62,
-+			    0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20,
-+			    0x74, 0x68, 0x65, 0x20, 0x4c, 0x5a, 0x4f, 0x2b,
-+			    0x8c, 0x00, 0x0d, 0x61, 0x6c, 0x67, 0x6f, 0x72,
-+			    0x69, 0x74, 0x68, 0x6d, 0x2e, 0x20, 0x20, 0x54,
-+			    0x68, 0x69, 0x73, 0x2a, 0x54, 0x01, 0x02, 0x66,
-+			    0x69, 0x6e, 0x65, 0x73, 0x94, 0x06, 0x05, 0x61,
-+			    0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x76,
-+			    0x0a, 0x6f, 0x66, 0x88, 0x02, 0x60, 0x09, 0x27,
-+			    0xf0, 0x00, 0x0c, 0x20, 0x75, 0x73, 0x65, 0x64,
-+			    0x20, 0x69, 0x6e, 0x20, 0x55, 0x42, 0x49, 0x46,
-+			    0x53, 0x2e, 0x11, 0x00, 0x00 },
-+		.output	= "This document describes a compression method based on the LZO "
-+			  "compression algorithm.  This document defines the application of "
-+			  "the LZO algorithm used in UBIFS.",
-+	}, {
-+		.inlen	= 46,
-+		.outlen	= 70,
-+		.input	= { 0x00, 0x0d, 0x4a, 0x6f, 0x69, 0x6e, 0x20, 0x75,
-+			    0x73, 0x20, 0x6e, 0x6f, 0x77, 0x20, 0x61, 0x6e,
-+			    0x64, 0x20, 0x73, 0x68, 0x61, 0x72, 0x65, 0x20,
-+			    0x74, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x66, 0x74,
-+			    0x77, 0x70, 0x01, 0x01, 0x4a, 0x6f, 0x69, 0x6e,
-+			    0x3d, 0x88, 0x00, 0x11, 0x00, 0x00 },
-+		.output	= "Join us now and share the software "
-+			  "Join us now and share the software ",
-+	},
-+};
++	if (size == 0) {
++		DBG4("No more data to send, disable link:");
++		return 0;
++	}
++
++	/* DUMP(buf, size); */
++
++	/* Write length + data */
++	write_mem32(addr, (u32 *) &size, 4);
++	write_mem32(addr + 4, (u32 *) dc->send_buf, size);
++
++	if (tty)
++		tty_wakeup(tty);
++
++	return 1;
++}
++
++/* If all data has been read, return 1, else 0 */
++static int receive_data(enum port_type index, struct nozomi *dc)
++{
++	u8 buf[RECEIVE_BUF_MAX] = { 0 };
++	int size;
++	u32 offset = 4;
++	struct port *port = &dc->port[index];
++	void __iomem *addr = port->dl_addr[port->toggle_dl];
++	struct tty_struct *tty = port->tty;
++	int i;
++
++	if (unlikely(!tty)) {
++		DBG1("tty not open for port: %d?", index);
++		return 1;
++	}
++
++	read_mem32((u32 *) &size, addr, 4);
++	/*  DBG1( "%d bytes port: %d", size, index); */
++
++	if (test_bit(TTY_THROTTLED, &tty->flags)) {
++		DBG1("No room in tty, don't read data, don't ack interrupt, "
++			"disable interrupt");
++
++		/* disable interrupt in downlink... */
++		disable_transmit_dl(index, dc);
++		return 0;
++	}
++
++	if (unlikely(size == 0)) {
++		dev_err(&dc->pdev->dev, "size == 0?\n");
++		return 1;
++	}
++
++	tty_buffer_request_room(tty, size);
++
++	while (size > 0) {
++		read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
++
++		if (size == 1) {
++			tty_insert_flip_char(tty, buf[0], TTY_NORMAL);
++			size = 0;
++		} else if (size < RECEIVE_BUF_MAX) {
++			size -= tty_insert_flip_string(tty, (char *) buf, size);
++		} else {
++			i = tty_insert_flip_string(tty, \
++						(char *) buf, RECEIVE_BUF_MAX);
++			size -= i;
++			offset += i;
++		}
++	}
++
++	set_bit(index, &dc->flip);
++
++	return 1;
++}
++
++/* Debug for interrupts */
++#ifdef DEBUG
++static char *interrupt2str(u16 interrupt)
++{
++	static char buf[TMP_BUF_MAX];
++	char *p = buf;
++
++	interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL;
++	interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"MDM_DL2 ") : NULL;
++
++	interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"MDM_UL1 ") : NULL;
++	interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"MDM_UL2 ") : NULL;
++
++	interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"DIAG_DL1 ") : NULL;
++	interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"DIAG_DL2 ") : NULL;
++
++	interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"DIAG_UL ") : NULL;
++
++	interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"APP1_DL ") : NULL;
++	interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"APP2_DL ") : NULL;
++
++	interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"APP1_UL ") : NULL;
++	interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"APP2_UL ") : NULL;
++
++	interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"CTRL_DL ") : NULL;
++	interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"CTRL_UL ") : NULL;
++
++	interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
++					"RESET ") : NULL;
++
++	return buf;
++}
++#endif
 +
 +/*
-  * Michael MIC test vectors from IEEE 802.11i
-  */
- #define MICHAEL_MIC_TEST_VECTORS 6
-@@ -4812,4 +8207,20 @@ static struct cipher_speed camellia_speed_template[] = {
-       {  .klen = 0, .blen = 0, }
- };
- 
-+static struct cipher_speed salsa20_speed_template[] = {
-+      { .klen = 16, .blen = 16, },
-+      { .klen = 16, .blen = 64, },
-+      { .klen = 16, .blen = 256, },
-+      { .klen = 16, .blen = 1024, },
-+      { .klen = 16, .blen = 8192, },
-+      { .klen = 32, .blen = 16, },
-+      { .klen = 32, .blen = 64, },
-+      { .klen = 32, .blen = 256, },
-+      { .klen = 32, .blen = 1024, },
-+      { .klen = 32, .blen = 8192, },
++ * Receive flow control
++ * Return 1 - If ok, else 0
++ */
++static int receive_flow_control(struct nozomi *dc)
++{
++	enum port_type port = PORT_MDM;
++	struct ctrl_dl ctrl_dl;
++	struct ctrl_dl old_ctrl;
++	u16 enable_ier = 0;
 +
-+      /* End marker */
-+      {  .klen = 0, .blen = 0, }
-+};
++	read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2);
 +
- #endif	/* _CRYPTO_TCRYPT_H */
-diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
-index b4b9c0c..0af216c 100644
---- a/crypto/twofish_common.c
-+++ b/crypto/twofish_common.c
-@@ -655,84 +655,48 @@ int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
- 			CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
- 		}
- 
--		/* Calculate whitening and round subkeys.  The constants are
--		 * indices of subkeys, preprocessed through q0 and q1. */
--		CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
--		CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
--		CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
--		CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
--		CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
--		CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
--		CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
--		CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
--		CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
--		CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
--		CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71);
--		CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
--		CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F);
--		CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
--		CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
--		CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F);
--		CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
--		CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
--		CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00);
--		CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
-+		/* CALC_K256/CALC_K192/CALC_K loops were unrolled.
-+		 * Unrolling produced x2.5 more code (+18k on i386),
-+		 * and speeded up key setup by 7%:
-+		 * unrolled: twofish_setkey/sec: 41128
-+		 *     loop: twofish_setkey/sec: 38148
-+		 * CALC_K256: ~100 insns each
-+		 * CALC_K192: ~90 insns
-+		 *    CALC_K: ~70 insns
-+		 */
-+		/* Calculate whitening and round subkeys */
-+		for ( i = 0; i < 8; i += 2 ) {
-+			CALC_K256 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
++	switch (ctrl_dl.port) {
++	case CTRL_CMD:
++		DBG1("The Base Band sends this value as a response to a "
++			"request for IMSI detach sent over the control "
++			"channel uplink (see section 7.6.1).");
++		break;
++	case CTRL_MDM:
++		port = PORT_MDM;
++		enable_ier = MDM_DL;
++		break;
++	case CTRL_DIAG:
++		port = PORT_DIAG;
++		enable_ier = DIAG_DL;
++		break;
++	case CTRL_APP1:
++		port = PORT_APP1;
++		enable_ier = APP1_DL;
++		break;
++	case CTRL_APP2:
++		port = PORT_APP2;
++		enable_ier = APP2_DL;
++		break;
++	default:
++		dev_err(&dc->pdev->dev,
++			"ERROR: flow control received for non-existing port\n");
++		return 0;
++	};
++
++	DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl),
++	   *((u16 *)&ctrl_dl));
++
++	old_ctrl = dc->port[port].ctrl_dl;
++	dc->port[port].ctrl_dl = ctrl_dl;
++
++	if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) {
++		DBG1("Disable interrupt (0x%04X) on port: %d",
++			enable_ier, port);
++		disable_transmit_ul(port, dc);
++
++	} else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
++
++		if (__kfifo_len(dc->port[port].fifo_ul)) {
++			DBG1("Enable interrupt (0x%04X) on port: %d",
++				enable_ier, port);
++			DBG1("Data in buffer [%d], enable transmit! ",
++				__kfifo_len(dc->port[port].fifo_ul));
++			enable_transmit_ul(port, dc);
++		} else {
++			DBG1("No data in buffer...");
 +		}
-+		for ( i = 0; i < 32; i += 2 ) {
-+			CALC_K256 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
++	}
++
++	if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) {
++		DBG1(" No change in mctrl");
++		return 1;
++	}
++	/* Update statistics */
++	if (old_ctrl.CTS != ctrl_dl.CTS)
++		dc->port[port].tty_icount.cts++;
++	if (old_ctrl.DSR != ctrl_dl.DSR)
++		dc->port[port].tty_icount.dsr++;
++	if (old_ctrl.RI != ctrl_dl.RI)
++		dc->port[port].tty_icount.rng++;
++	if (old_ctrl.DCD != ctrl_dl.DCD)
++		dc->port[port].tty_icount.dcd++;
++
++	wake_up_interruptible(&dc->port[port].tty_wait);
++
++	DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)",
++	   port,
++	   dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts,
++	   dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr);
++
++	return 1;
++}
++
++static enum ctrl_port_type port2ctrl(enum port_type port,
++					const struct nozomi *dc)
++{
++	switch (port) {
++	case PORT_MDM:
++		return CTRL_MDM;
++	case PORT_DIAG:
++		return CTRL_DIAG;
++	case PORT_APP1:
++		return CTRL_APP1;
++	case PORT_APP2:
++		return CTRL_APP2;
++	default:
++		dev_err(&dc->pdev->dev,
++			"ERROR: send flow control " \
++			"received for non-existing port\n");
++	};
++	return CTRL_ERROR;
++}
++
++/*
++ * Send flow control, can only update one channel at a time
++ * Return 0 - If we have updated all flow control
++ * Return 1 - If we need to update more flow control, ack current enable more
++ */
++static int send_flow_control(struct nozomi *dc)
++{
++	u32 i, more_flow_control_to_be_updated = 0;
++	u16 *ctrl;
++
++	for (i = PORT_MDM; i < MAX_PORT; i++) {
++		if (dc->port[i].update_flow_control) {
++			if (more_flow_control_to_be_updated) {
++				/* We have more flow control to be updated */
++				return 1;
++			}
++			dc->port[i].ctrl_ul.port = port2ctrl(i, dc);
++			ctrl = (u16 *)&dc->port[i].ctrl_ul;
++			write_mem32(dc->port[PORT_CTRL].ul_addr[0], \
++				(u32 *) ctrl, 2);
++			dc->port[i].update_flow_control = 0;
++			more_flow_control_to_be_updated = 1;
 +		}
- 	} else if (key_len == 24) { /* 192-bit key */
- 		/* Compute the S-boxes. */
- 		for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
- 		        CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
- 		}
- 
--		/* Calculate whitening and round subkeys.  The constants are
--		 * indices of subkeys, preprocessed through q0 and q1. */
--		CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
--		CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
--		CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
--		CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
--		CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
--		CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
--		CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
--		CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
--		CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
--		CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
--		CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71);
--		CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
--		CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F);
--		CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
--		CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
--		CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F);
--		CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
--		CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
--		CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00);
--		CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
-+		/* Calculate whitening and round subkeys */
-+		for ( i = 0; i < 8; i += 2 ) {
-+			CALC_K192 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
++	}
++	return 0;
++}
++
++/*
++ * Handle donlink data, ports that are handled are modem and diagnostics
++ * Return 1 - ok
++ * Return 0 - toggle fields are out of sync
++ */
++static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle,
++			u16 read_iir, u16 mask1, u16 mask2)
++{
++	if (*toggle == 0 && read_iir & mask1) {
++		if (receive_data(port, dc)) {
++			writew(mask1, dc->reg_fcr);
++			*toggle = !(*toggle);
 +		}
-+		for ( i = 0; i < 32; i += 2 ) {
-+			CALC_K192 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
++
++		if (read_iir & mask2) {
++			if (receive_data(port, dc)) {
++				writew(mask2, dc->reg_fcr);
++				*toggle = !(*toggle);
++			}
 +		}
- 	} else { /* 128-bit key */
- 		/* Compute the S-boxes. */
- 		for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
- 			CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
- 		}
- 
--		/* Calculate whitening and round subkeys.  The constants are
--		 * indices of subkeys, preprocessed through q0 and q1. */
--		CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3);
--		CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
--		CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
--		CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
--		CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
--		CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B);
--		CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
--		CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
--		CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
--		CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD);
--		CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71);
--		CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
--		CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F);
--		CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B);
--		CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA);
--		CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F);
--		CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
--		CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
--		CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00);
--		CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
-+		/* Calculate whitening and round subkeys */
-+		for ( i = 0; i < 8; i += 2 ) {
-+			CALC_K (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
++	} else if (*toggle == 1 && read_iir & mask2) {
++		if (receive_data(port, dc)) {
++			writew(mask2, dc->reg_fcr);
++			*toggle = !(*toggle);
 +		}
-+		for ( i = 0; i < 32; i += 2 ) {
-+			CALC_K (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
++
++		if (read_iir & mask1) {
++			if (receive_data(port, dc)) {
++				writew(mask1, dc->reg_fcr);
++				*toggle = !(*toggle);
++			}
 +		}
- 	}
- 
- 	return 0;
-diff --git a/crypto/xcbc.c b/crypto/xcbc.c
-index ac68f3b..a82959d 100644
---- a/crypto/xcbc.c
-+++ b/crypto/xcbc.c
-@@ -19,6 +19,7 @@
-  * 	Kazunori Miyazawa <miyazawa at linux-ipv6.org>
-  */
- 
-+#include <crypto/scatterwalk.h>
- #include <linux/crypto.h>
- #include <linux/err.h>
- #include <linux/hardirq.h>
-@@ -27,7 +28,6 @@
- #include <linux/rtnetlink.h>
- #include <linux/slab.h>
- #include <linux/scatterlist.h>
--#include "internal.h"
- 
- static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
- 			   0x02020202, 0x02020202, 0x02020202, 0x02020202,
-@@ -307,7 +307,8 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
- 	case 16:
- 		break;
- 	default:
--		return ERR_PTR(PTR_ERR(alg));
-+		inst = ERR_PTR(-EINVAL);
-+		goto out_put_alg;
- 	}
- 
- 	inst = crypto_alloc_instance("xcbc", alg);
-@@ -320,10 +321,7 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
- 	inst->alg.cra_alignmask = alg->cra_alignmask;
- 	inst->alg.cra_type = &crypto_hash_type;
- 
--	inst->alg.cra_hash.digestsize =
--		(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
--		CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
--				       alg->cra_blocksize;
-+	inst->alg.cra_hash.digestsize = alg->cra_blocksize;
- 	inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
- 				ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
- 	inst->alg.cra_init = xcbc_init_tfm;
-diff --git a/drivers/Makefile b/drivers/Makefile
-index 8cb37e3..d92d4d8 100644
---- a/drivers/Makefile
-+++ b/drivers/Makefile
-@@ -38,7 +38,7 @@ obj-$(CONFIG_SCSI)		+= scsi/
- obj-$(CONFIG_ATA)		+= ata/
- obj-$(CONFIG_FUSION)		+= message/
- obj-$(CONFIG_FIREWIRE)		+= firewire/
--obj-$(CONFIG_IEEE1394)		+= ieee1394/
-+obj-y				+= ieee1394/
- obj-$(CONFIG_UIO)		+= uio/
- obj-y				+= cdrom/
- obj-y				+= auxdisplay/
-diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
-index f4487c3..1b4cf98 100644
---- a/drivers/acpi/bus.c
-+++ b/drivers/acpi/bus.c
-@@ -743,7 +743,7 @@ static int __init acpi_bus_init(void)
- 	return -ENODEV;
- }
- 
--decl_subsys(acpi, NULL, NULL);
-+struct kobject *acpi_kobj;
- 
- static int __init acpi_init(void)
- {
-@@ -755,10 +755,11 @@ static int __init acpi_init(void)
- 		return -ENODEV;
- 	}
- 
--	result = firmware_register(&acpi_subsys);
--	if (result < 0)
--		printk(KERN_WARNING "%s: firmware_register error: %d\n",
--			__FUNCTION__, result);
-+	acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
-+	if (!acpi_kobj) {
-+		printk(KERN_WARNING "%s: kset create error\n", __FUNCTION__);
-+		acpi_kobj = NULL;
++	} else {
++		dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n",
++			*toggle);
++		return 0;
 +	}
- 
- 	result = acpi_bus_init();
- 
-diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
-index c9f526e..5400ea1 100644
---- a/drivers/acpi/pci_link.c
-+++ b/drivers/acpi/pci_link.c
-@@ -911,7 +911,7 @@ __setup("acpi_irq_balance", acpi_irq_balance_set);
- 
- /* FIXME: we will remove this interface after all drivers call pci_disable_device */
- static struct sysdev_class irqrouter_sysdev_class = {
--	set_kset_name("irqrouter"),
-+	.name = "irqrouter",
- 	.resume = irqrouter_resume,
- };
- 
-diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
-index 2235f4e..eb1f82f 100644
---- a/drivers/acpi/processor_idle.c
-+++ b/drivers/acpi/processor_idle.c
-@@ -357,6 +357,26 @@ int acpi_processor_resume(struct acpi_device * device)
- 	return 0;
- }
- 
-+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
-+static int tsc_halts_in_c(int state)
++	return 1;
++}
++
++/*
++ * Handle uplink data, this is currently for the modem port
++ * Return 1 - ok
++ * Return 0 - toggle field are out of sync
++ */
++static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir)
 +{
-+	switch (boot_cpu_data.x86_vendor) {
-+	case X86_VENDOR_AMD:
-+		/*
-+		 * AMD Fam10h TSC will tick in all
-+		 * C/P/S0/S1 states when this bit is set.
-+		 */
-+		if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
-+			return 0;
-+		/*FALL THROUGH*/
-+	case X86_VENDOR_INTEL:
-+		/* Several cases known where TSC halts in C2 too */
-+	default:
-+		return state > ACPI_STATE_C1;
++	u8 *toggle = &(dc->port[port].toggle_ul);
++
++	if (*toggle == 0 && read_iir & MDM_UL1) {
++		dc->last_ier &= ~MDM_UL;
++		writew(dc->last_ier, dc->reg_ier);
++		if (send_data(port, dc)) {
++			writew(MDM_UL1, dc->reg_fcr);
++			dc->last_ier = dc->last_ier | MDM_UL;
++			writew(dc->last_ier, dc->reg_ier);
++			*toggle = !*toggle;
++		}
++
++		if (read_iir & MDM_UL2) {
++			dc->last_ier &= ~MDM_UL;
++			writew(dc->last_ier, dc->reg_ier);
++			if (send_data(port, dc)) {
++				writew(MDM_UL2, dc->reg_fcr);
++				dc->last_ier = dc->last_ier | MDM_UL;
++				writew(dc->last_ier, dc->reg_ier);
++				*toggle = !*toggle;
++			}
++		}
++
++	} else if (*toggle == 1 && read_iir & MDM_UL2) {
++		dc->last_ier &= ~MDM_UL;
++		writew(dc->last_ier, dc->reg_ier);
++		if (send_data(port, dc)) {
++			writew(MDM_UL2, dc->reg_fcr);
++			dc->last_ier = dc->last_ier | MDM_UL;
++			writew(dc->last_ier, dc->reg_ier);
++			*toggle = !*toggle;
++		}
++
++		if (read_iir & MDM_UL1) {
++			dc->last_ier &= ~MDM_UL;
++			writew(dc->last_ier, dc->reg_ier);
++			if (send_data(port, dc)) {
++				writew(MDM_UL1, dc->reg_fcr);
++				dc->last_ier = dc->last_ier | MDM_UL;
++				writew(dc->last_ier, dc->reg_ier);
++				*toggle = !*toggle;
++			}
++		}
++	} else {
++		writew(read_iir & MDM_UL, dc->reg_fcr);
++		dev_err(&dc->pdev->dev, "port out of sync!\n");
++		return 0;
 +	}
++	return 1;
 +}
-+#endif
 +
- #ifndef CONFIG_CPU_IDLE
- static void acpi_processor_idle(void)
- {
-@@ -516,7 +536,8 @@ static void acpi_processor_idle(void)
- 
- #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
- 		/* TSC halts in C2, so notify users */
--		mark_tsc_unstable("possible TSC halt in C2");
-+		if (tsc_halts_in_c(ACPI_STATE_C2))
-+			mark_tsc_unstable("possible TSC halt in C2");
- #endif
- 		/* Compute time (ticks) that we were actually asleep */
- 		sleep_ticks = ticks_elapsed(t1, t2);
-@@ -534,6 +555,7 @@ static void acpi_processor_idle(void)
- 		break;
- 
- 	case ACPI_STATE_C3:
-+		acpi_unlazy_tlb(smp_processor_id());
- 		/*
- 		 * Must be done before busmaster disable as we might
- 		 * need to access HPET !
-@@ -579,7 +601,8 @@ static void acpi_processor_idle(void)
- 
- #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
- 		/* TSC halts in C3, so notify users */
--		mark_tsc_unstable("TSC halts in C3");
-+		if (tsc_halts_in_c(ACPI_STATE_C3))
-+			mark_tsc_unstable("TSC halts in C3");
- #endif
- 		/* Compute time (ticks) that we were actually asleep */
- 		sleep_ticks = ticks_elapsed(t1, t2);
-@@ -1423,6 +1446,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
- 		return 0;
- 	}
- 
-+	acpi_unlazy_tlb(smp_processor_id());
- 	/*
- 	 * Must be done before busmaster disable as we might need to
- 	 * access HPET !
-@@ -1443,7 +1467,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
- 
- #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
- 	/* TSC could halt in idle, so notify users */
--	mark_tsc_unstable("TSC halts in idle");;
-+	if (tsc_halts_in_c(cx->type))
-+		mark_tsc_unstable("TSC halts in idle");;
- #endif
- 	sleep_ticks = ticks_elapsed(t1, t2);
- 
-@@ -1554,7 +1579,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
- 
- #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
- 	/* TSC could halt in idle, so notify users */
--	mark_tsc_unstable("TSC halts in idle");
-+	if (tsc_halts_in_c(ACPI_STATE_C3))
-+		mark_tsc_unstable("TSC halts in idle");
- #endif
- 	sleep_ticks = ticks_elapsed(t1, t2);
- 	/* Tell the scheduler how much we idled: */
-diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
-index edee280..5ffe0ea 100644
---- a/drivers/acpi/system.c
-+++ b/drivers/acpi/system.c
-@@ -58,7 +58,7 @@ module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
-                               FS Interface (/sys)
-    -------------------------------------------------------------------------- */
- static LIST_HEAD(acpi_table_attr_list);
--static struct kobject tables_kobj;
-+static struct kobject *tables_kobj;
- 
- struct acpi_table_attr {
- 	struct bin_attribute attr;
-@@ -135,11 +135,9 @@ static int acpi_system_sysfs_init(void)
- 	int table_index = 0;
- 	int result;
- 
--	tables_kobj.parent = &acpi_subsys.kobj;
--	kobject_set_name(&tables_kobj, "tables");
--	result = kobject_register(&tables_kobj);
--	if (result)
--		return result;
-+	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
-+	if (!tables_kobj)
-+		return -ENOMEM;
- 
- 	do {
- 		result = acpi_get_table_by_index(table_index, &table_header);
-@@ -153,7 +151,7 @@ static int acpi_system_sysfs_init(void)
- 
- 			acpi_table_attr_init(table_attr, table_header);
- 			result =
--			    sysfs_create_bin_file(&tables_kobj,
-+			    sysfs_create_bin_file(tables_kobj,
- 						  &table_attr->attr);
- 			if (result) {
- 				kfree(table_attr);
-@@ -163,6 +161,7 @@ static int acpi_system_sysfs_init(void)
- 					      &acpi_table_attr_list);
- 		}
- 	} while (!result);
-+	kobject_uevent(tables_kobj, KOBJ_ADD);
- 
- 	return 0;
- }
-diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
-index ba63619..2478cca 100644
---- a/drivers/ata/Kconfig
-+++ b/drivers/ata/Kconfig
-@@ -459,6 +459,15 @@ config PATA_NETCELL
- 
- 	  If unsure, say N.
- 
-+config PATA_NINJA32
-+	tristate "Ninja32/Delkin Cardbus ATA support (Experimental)"
-+	depends on PCI && EXPERIMENTAL
-+	help
-+	  This option enables support for the Ninja32, Delkin and
-+	  possibly other brands of Cardbus ATA adapter
++static irqreturn_t interrupt_handler(int irq, void *dev_id)
++{
++	struct nozomi *dc = dev_id;
++	unsigned int a;
++	u16 read_iir;
 +
-+	  If unsure, say N.
++	if (!dc)
++		return IRQ_NONE;
 +
- config PATA_NS87410
- 	tristate "Nat Semi NS87410 PATA support (Experimental)"
- 	depends on PCI && EXPERIMENTAL
-diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
-index b13feb2..82550c1 100644
---- a/drivers/ata/Makefile
-+++ b/drivers/ata/Makefile
-@@ -41,6 +41,7 @@ obj-$(CONFIG_PATA_IT821X)	+= pata_it821x.o
- obj-$(CONFIG_PATA_IT8213)	+= pata_it8213.o
- obj-$(CONFIG_PATA_JMICRON)	+= pata_jmicron.o
- obj-$(CONFIG_PATA_NETCELL)	+= pata_netcell.o
-+obj-$(CONFIG_PATA_NINJA32)	+= pata_ninja32.o
- obj-$(CONFIG_PATA_NS87410)	+= pata_ns87410.o
- obj-$(CONFIG_PATA_NS87415)	+= pata_ns87415.o
- obj-$(CONFIG_PATA_OPTI)		+= pata_opti.o
-diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
-index 54f38c2..6f089b8 100644
---- a/drivers/ata/ahci.c
-+++ b/drivers/ata/ahci.c
-@@ -198,18 +198,18 @@ enum {
- };
- 
- struct ahci_cmd_hdr {
--	u32			opts;
--	u32			status;
--	u32			tbl_addr;
--	u32			tbl_addr_hi;
--	u32			reserved[4];
-+	__le32			opts;
-+	__le32			status;
-+	__le32			tbl_addr;
-+	__le32			tbl_addr_hi;
-+	__le32			reserved[4];
- };
- 
- struct ahci_sg {
--	u32			addr;
--	u32			addr_hi;
--	u32			reserved;
--	u32			flags_size;
-+	__le32			addr;
-+	__le32			addr_hi;
-+	__le32			reserved;
-+	__le32			flags_size;
- };
- 
- struct ahci_host_priv {
-@@ -597,6 +597,20 @@ static inline void __iomem *ahci_port_base(struct ata_port *ap)
- 	return __ahci_port_base(ap->host, ap->port_no);
- }
- 
-+static void ahci_enable_ahci(void __iomem *mmio)
++	spin_lock(&dc->spin_mutex);
++	read_iir = readw(dc->reg_iir);
++
++	/* Card removed */
++	if (read_iir == (u16)-1)
++		goto none;
++	/*
++	 * Just handle interrupt enabled in IER
++	 * (by masking with dc->last_ier)
++	 */
++	read_iir &= dc->last_ier;
++
++	if (read_iir == 0)
++		goto none;
++
++
++	DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
++		dc->last_ier);
++
++	if (read_iir & RESET) {
++		if (unlikely(!nozomi_read_config_table(dc))) {
++			dc->last_ier = 0x0;
++			writew(dc->last_ier, dc->reg_ier);
++			dev_err(&dc->pdev->dev, "Could not read status from "
++				"card, we should disable interface\n");
++		} else {
++			writew(RESET, dc->reg_fcr);
++		}
++		/* No more useful info if this was the reset interrupt. */
++		goto exit_handler;
++	}
++	if (read_iir & CTRL_UL) {
++		DBG1("CTRL_UL");
++		dc->last_ier &= ~CTRL_UL;
++		writew(dc->last_ier, dc->reg_ier);
++		if (send_flow_control(dc)) {
++			writew(CTRL_UL, dc->reg_fcr);
++			dc->last_ier = dc->last_ier | CTRL_UL;
++			writew(dc->last_ier, dc->reg_ier);
++		}
++	}
++	if (read_iir & CTRL_DL) {
++		receive_flow_control(dc);
++		writew(CTRL_DL, dc->reg_fcr);
++	}
++	if (read_iir & MDM_DL) {
++		if (!handle_data_dl(dc, PORT_MDM,
++				&(dc->port[PORT_MDM].toggle_dl), read_iir,
++				MDM_DL1, MDM_DL2)) {
++			dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
++			goto exit_handler;
++		}
++	}
++	if (read_iir & MDM_UL) {
++		if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
++			dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
++			goto exit_handler;
++		}
++	}
++	if (read_iir & DIAG_DL) {
++		if (!handle_data_dl(dc, PORT_DIAG,
++				&(dc->port[PORT_DIAG].toggle_dl), read_iir,
++				DIAG_DL1, DIAG_DL2)) {
++			dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
++			goto exit_handler;
++		}
++	}
++	if (read_iir & DIAG_UL) {
++		dc->last_ier &= ~DIAG_UL;
++		writew(dc->last_ier, dc->reg_ier);
++		if (send_data(PORT_DIAG, dc)) {
++			writew(DIAG_UL, dc->reg_fcr);
++			dc->last_ier = dc->last_ier | DIAG_UL;
++			writew(dc->last_ier, dc->reg_ier);
++		}
++	}
++	if (read_iir & APP1_DL) {
++		if (receive_data(PORT_APP1, dc))
++			writew(APP1_DL, dc->reg_fcr);
++	}
++	if (read_iir & APP1_UL) {
++		dc->last_ier &= ~APP1_UL;
++		writew(dc->last_ier, dc->reg_ier);
++		if (send_data(PORT_APP1, dc)) {
++			writew(APP1_UL, dc->reg_fcr);
++			dc->last_ier = dc->last_ier | APP1_UL;
++			writew(dc->last_ier, dc->reg_ier);
++		}
++	}
++	if (read_iir & APP2_DL) {
++		if (receive_data(PORT_APP2, dc))
++			writew(APP2_DL, dc->reg_fcr);
++	}
++	if (read_iir & APP2_UL) {
++		dc->last_ier &= ~APP2_UL;
++		writew(dc->last_ier, dc->reg_ier);
++		if (send_data(PORT_APP2, dc)) {
++			writew(APP2_UL, dc->reg_fcr);
++			dc->last_ier = dc->last_ier | APP2_UL;
++			writew(dc->last_ier, dc->reg_ier);
++		}
++	}
++
++exit_handler:
++	spin_unlock(&dc->spin_mutex);
++	for (a = 0; a < NOZOMI_MAX_PORTS; a++)
++		if (test_and_clear_bit(a, &dc->flip))
++			tty_flip_buffer_push(dc->port[a].tty);
++	return IRQ_HANDLED;
++none:
++	spin_unlock(&dc->spin_mutex);
++	return IRQ_NONE;
++}
++
++static void nozomi_get_card_type(struct nozomi *dc)
 +{
-+	u32 tmp;
++	int i;
++	u32 size = 0;
 +
-+	/* turn on AHCI_EN */
-+	tmp = readl(mmio + HOST_CTL);
-+	if (!(tmp & HOST_AHCI_EN)) {
-+		tmp |= HOST_AHCI_EN;
-+		writel(tmp, mmio + HOST_CTL);
-+		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
-+		WARN_ON(!(tmp & HOST_AHCI_EN));
++	for (i = 0; i < 6; i++)
++		size += pci_resource_len(dc->pdev, i);
++
++	/* Assume card type F32_8 if no match */
++	dc->card_type = size == 2048 ? F32_2 : F32_8;
++
++	dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type);
++}
++
++static void nozomi_setup_private_data(struct nozomi *dc)
++{
++	void __iomem *offset = dc->base_addr + dc->card_type / 2;
++	unsigned int i;
++
++	dc->reg_fcr = (void __iomem *)(offset + R_FCR);
++	dc->reg_iir = (void __iomem *)(offset + R_IIR);
++	dc->reg_ier = (void __iomem *)(offset + R_IER);
++	dc->last_ier = 0;
++	dc->flip = 0;
++
++	dc->port[PORT_MDM].token_dl = MDM_DL;
++	dc->port[PORT_DIAG].token_dl = DIAG_DL;
++	dc->port[PORT_APP1].token_dl = APP1_DL;
++	dc->port[PORT_APP2].token_dl = APP2_DL;
++
++	for (i = 0; i < MAX_PORT; i++)
++		init_waitqueue_head(&dc->port[i].tty_wait);
++}
++
++static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
++			  char *buf)
++{
++	struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
++
++	return sprintf(buf, "%d\n", dc->card_type);
++}
++static DEVICE_ATTR(card_type, 0444, card_type_show, NULL);
++
++static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
++			  char *buf)
++{
++	struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
++
++	return sprintf(buf, "%u\n", dc->open_ttys);
++}
++static DEVICE_ATTR(open_ttys, 0444, open_ttys_show, NULL);
++
++static void make_sysfs_files(struct nozomi *dc)
++{
++	if (device_create_file(&dc->pdev->dev, &dev_attr_card_type))
++		dev_err(&dc->pdev->dev,
++			"Could not create sysfs file for card_type\n");
++	if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys))
++		dev_err(&dc->pdev->dev,
++			"Could not create sysfs file for open_ttys\n");
++}
++
++static void remove_sysfs_files(struct nozomi *dc)
++{
++	device_remove_file(&dc->pdev->dev, &dev_attr_card_type);
++	device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys);
++}
++
++/* Allocate memory for one device */
++static int __devinit nozomi_card_init(struct pci_dev *pdev,
++				      const struct pci_device_id *ent)
++{
++	resource_size_t start;
++	int ret;
++	struct nozomi *dc = NULL;
++	int ndev_idx;
++	int i;
++
++	dev_dbg(&pdev->dev, "Init, new card found\n");
++
++	for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++)
++		if (!ndevs[ndev_idx])
++			break;
++
++	if (ndev_idx >= ARRAY_SIZE(ndevs)) {
++		dev_err(&pdev->dev, "no free tty range for this card left\n");
++		ret = -EIO;
++		goto err;
++	}
++
++	dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL);
++	if (unlikely(!dc)) {
++		dev_err(&pdev->dev, "Could not allocate memory\n");
++		ret = -ENOMEM;
++		goto err_free;
++	}
++
++	dc->pdev = pdev;
++
++	/* Find out what card type it is */
++	nozomi_get_card_type(dc);
++
++	ret = pci_enable_device(dc->pdev);
++	if (ret) {
++		dev_err(&pdev->dev, "Failed to enable PCI Device\n");
++		goto err_free;
++	}
++
++	start = pci_resource_start(dc->pdev, 0);
++	if (start == 0) {
++		dev_err(&pdev->dev, "No I/O address for card detected\n");
++		ret = -ENODEV;
++		goto err_disable_device;
++	}
++
++	ret = pci_request_regions(dc->pdev, NOZOMI_NAME);
++	if (ret) {
++		dev_err(&pdev->dev, "I/O address 0x%04x already in use\n",
++			(int) /* nozomi_private.io_addr */ 0);
++		goto err_disable_device;
++	}
++
++	dc->base_addr = ioremap(start, dc->card_type);
++	if (!dc->base_addr) {
++		dev_err(&pdev->dev, "Unable to map card MMIO\n");
++		ret = -ENODEV;
++		goto err_rel_regs;
++	}
++
++	dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL);
++	if (!dc->send_buf) {
++		dev_err(&pdev->dev, "Could not allocate send buffer?\n");
++		ret = -ENOMEM;
++		goto err_free_sbuf;
++	}
++
++	spin_lock_init(&dc->spin_mutex);
++
++	nozomi_setup_private_data(dc);
++
++	/* Disable all interrupts */
++	dc->last_ier = 0;
++	writew(dc->last_ier, dc->reg_ier);
++
++	ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED,
++			NOZOMI_NAME, dc);
++	if (unlikely(ret)) {
++		dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
++		goto err_free_sbuf;
++	}
++
++	DBG1("base_addr: %p", dc->base_addr);
++
++	make_sysfs_files(dc);
++
++	dc->index_start = ndev_idx * MAX_PORT;
++	ndevs[ndev_idx] = dc;
++
++	for (i = 0; i < MAX_PORT; i++) {
++		mutex_init(&dc->port[i].tty_sem);
++		dc->port[i].tty_open_count = 0;
++		dc->port[i].tty = NULL;
++		tty_register_device(ntty_driver, dc->index_start + i,
++							&pdev->dev);
 +	}
++
++	/* Enable  RESET interrupt. */
++	dc->last_ier = RESET;
++	writew(dc->last_ier, dc->reg_ier);
++
++	pci_set_drvdata(pdev, dc);
++
++	return 0;
++
++err_free_sbuf:
++	kfree(dc->send_buf);
++	iounmap(dc->base_addr);
++err_rel_regs:
++	pci_release_regions(pdev);
++err_disable_device:
++	pci_disable_device(pdev);
++err_free:
++	kfree(dc);
++err:
++	return ret;
 +}
 +
- /**
-  *	ahci_save_initial_config - Save and fixup initial config values
-  *	@pdev: target PCI device
-@@ -619,6 +633,9 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
- 	u32 cap, port_map;
- 	int i;
- 
-+	/* make sure AHCI mode is enabled before accessing CAP */
-+	ahci_enable_ahci(mmio);
++static void __devexit tty_exit(struct nozomi *dc)
++{
++	unsigned int i;
 +
- 	/* Values prefixed with saved_ are written back to host after
- 	 * reset.  Values without are used for driver operation.
- 	 */
-@@ -1036,19 +1053,17 @@ static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
- static int ahci_reset_controller(struct ata_host *host)
- {
- 	struct pci_dev *pdev = to_pci_dev(host->dev);
-+	struct ahci_host_priv *hpriv = host->private_data;
- 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- 	u32 tmp;
- 
- 	/* we must be in AHCI mode, before using anything
- 	 * AHCI-specific, such as HOST_RESET.
- 	 */
--	tmp = readl(mmio + HOST_CTL);
--	if (!(tmp & HOST_AHCI_EN)) {
--		tmp |= HOST_AHCI_EN;
--		writel(tmp, mmio + HOST_CTL);
--	}
-+	ahci_enable_ahci(mmio);
- 
- 	/* global controller reset */
-+	tmp = readl(mmio + HOST_CTL);
- 	if ((tmp & HOST_RESET) == 0) {
- 		writel(tmp | HOST_RESET, mmio + HOST_CTL);
- 		readl(mmio + HOST_CTL); /* flush */
-@@ -1067,8 +1082,7 @@ static int ahci_reset_controller(struct ata_host *host)
- 	}
- 
- 	/* turn on AHCI mode */
--	writel(HOST_AHCI_EN, mmio + HOST_CTL);
--	(void) readl(mmio + HOST_CTL);	/* flush */
-+	ahci_enable_ahci(mmio);
- 
- 	/* some registers might be cleared on reset.  restore initial values */
- 	ahci_restore_initial_config(host);
-@@ -1078,8 +1092,10 @@ static int ahci_reset_controller(struct ata_host *host)
- 
- 		/* configure PCS */
- 		pci_read_config_word(pdev, 0x92, &tmp16);
--		tmp16 |= 0xf;
--		pci_write_config_word(pdev, 0x92, tmp16);
-+		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
-+			tmp16 |= hpriv->port_map;
-+			pci_write_config_word(pdev, 0x92, tmp16);
-+		}
- 	}
- 
- 	return 0;
-@@ -1480,35 +1496,31 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
- static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
- {
- 	struct scatterlist *sg;
--	struct ahci_sg *ahci_sg;
--	unsigned int n_sg = 0;
-+	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
-+	unsigned int si;
- 
- 	VPRINTK("ENTER\n");
- 
- 	/*
- 	 * Next, the S/G list.
- 	 */
--	ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		dma_addr_t addr = sg_dma_address(sg);
- 		u32 sg_len = sg_dma_len(sg);
- 
--		ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
--		ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
--		ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
--
--		ahci_sg++;
--		n_sg++;
-+		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
-+		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
-+		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
- 	}
- 
--	return n_sg;
-+	return si;
- }
- 
- static void ahci_qc_prep(struct ata_queued_cmd *qc)
- {
- 	struct ata_port *ap = qc->ap;
- 	struct ahci_port_priv *pp = ap->private_data;
--	int is_atapi = is_atapi_taskfile(&qc->tf);
-+	int is_atapi = ata_is_atapi(qc->tf.protocol);
- 	void *cmd_tbl;
- 	u32 opts;
- 	const u32 cmd_fis_len = 5; /* five dwords */
-diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
-index 9032998..2053420 100644
---- a/drivers/ata/ata_generic.c
-+++ b/drivers/ata/ata_generic.c
-@@ -26,7 +26,7 @@
- #include <linux/libata.h>
- 
- #define DRV_NAME "ata_generic"
--#define DRV_VERSION "0.2.13"
-+#define DRV_VERSION "0.2.15"
- 
- /*
-  *	A generic parallel ATA driver using libata
-@@ -48,27 +48,47 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
- 	struct ata_port *ap = link->ap;
- 	int dma_enabled = 0;
- 	struct ata_device *dev;
-+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- 
- 	/* Bits 5 and 6 indicate if DMA is active on master/slave */
- 	if (ap->ioaddr.bmdma_addr)
- 		dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
- 
-+	if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
-+		dma_enabled = 0xFF;
++	DBG1(" ");
 +
- 	ata_link_for_each_dev(dev, link) {
--		if (ata_dev_enabled(dev)) {
--			/* We don't really care */
--			dev->pio_mode = XFER_PIO_0;
--			dev->dma_mode = XFER_MW_DMA_0;
--			/* We do need the right mode information for DMA or PIO
--			   and this comes from the current configuration flags */
--			if (dma_enabled & (1 << (5 + dev->devno))) {
--				ata_id_to_dma_mode(dev, XFER_MW_DMA_0);
--				dev->flags &= ~ATA_DFLAG_PIO;
--			} else {
--				ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
--				dev->xfer_mode = XFER_PIO_0;
--				dev->xfer_shift = ATA_SHIFT_PIO;
--				dev->flags |= ATA_DFLAG_PIO;
-+		if (!ata_dev_enabled(dev))
-+			continue;
++	flush_scheduled_work();
 +
-+		/* We don't really care */
-+		dev->pio_mode = XFER_PIO_0;
-+		dev->dma_mode = XFER_MW_DMA_0;
-+		/* We do need the right mode information for DMA or PIO
-+		   and this comes from the current configuration flags */
-+		if (dma_enabled & (1 << (5 + dev->devno))) {
-+			unsigned int xfer_mask = ata_id_xfermask(dev->id);
-+			const char *name;
++	for (i = 0; i < MAX_PORT; ++i)
++		if (dc->port[i].tty && \
++				list_empty(&dc->port[i].tty->hangup_work.entry))
++			tty_hangup(dc->port[i].tty);
 +
-+			if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
-+				name = ata_mode_string(xfer_mask);
-+			else {
-+				/* SWDMA perhaps? */
-+				name = "DMA";
-+				xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0);
- 			}
++	while (dc->open_ttys)
++		msleep(1);
 +
-+			ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
-+				       name);
++	for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
++		tty_unregister_device(ntty_driver, i);
++}
 +
-+			dev->xfer_mode = ata_xfer_mask2mode(xfer_mask);
-+			dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode);
-+			dev->flags &= ~ATA_DFLAG_PIO;
-+		} else {
-+			ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
-+			dev->xfer_mode = XFER_PIO_0;
-+			dev->xfer_shift = ATA_SHIFT_PIO;
-+			dev->flags |= ATA_DFLAG_PIO;
- 		}
- 	}
- 	return 0;
-@@ -185,6 +205,7 @@ static struct pci_device_id ata_generic[] = {
- 	{ PCI_DEVICE(PCI_VENDOR_ID_HINT,   PCI_DEVICE_ID_HINT_VXPROII_IDE), },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA,    PCI_DEVICE_ID_VIA_82C561), },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_OPTI,   PCI_DEVICE_ID_OPTI_82C558), },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2),  },
-diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
-index b406b39..a65c8ae 100644
---- a/drivers/ata/ata_piix.c
-+++ b/drivers/ata/ata_piix.c
-@@ -101,39 +101,21 @@ enum {
- 	ICH5_PMR		= 0x90, /* port mapping register */
- 	ICH5_PCS		= 0x92,	/* port control and status */
- 	PIIX_SCC		= 0x0A, /* sub-class code register */
-+	PIIX_SIDPR_BAR		= 5,
-+	PIIX_SIDPR_LEN		= 16,
-+	PIIX_SIDPR_IDX		= 0,
-+	PIIX_SIDPR_DATA		= 4,
- 
--	PIIX_FLAG_SCR		= (1 << 26), /* SCR available */
- 	PIIX_FLAG_AHCI		= (1 << 27), /* AHCI possible */
- 	PIIX_FLAG_CHECKINTR	= (1 << 28), /* make sure PCI INTx enabled */
-+	PIIX_FLAG_SIDPR		= (1 << 29), /* SATA idx/data pair regs */
- 
- 	PIIX_PATA_FLAGS		= ATA_FLAG_SLAVE_POSS,
- 	PIIX_SATA_FLAGS		= ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
- 
--	/* combined mode.  if set, PATA is channel 0.
--	 * if clear, PATA is channel 1.
--	 */
--	PIIX_PORT_ENABLED	= (1 << 0),
--	PIIX_PORT_PRESENT	= (1 << 4),
--
- 	PIIX_80C_PRI		= (1 << 5) | (1 << 4),
- 	PIIX_80C_SEC		= (1 << 7) | (1 << 6),
- 
--	/* controller IDs */
--	piix_pata_mwdma		= 0,	/* PIIX3 MWDMA only */
--	piix_pata_33,			/* PIIX4 at 33Mhz */
--	ich_pata_33,			/* ICH up to UDMA 33 only */
--	ich_pata_66,			/* ICH up to 66 Mhz */
--	ich_pata_100,			/* ICH up to UDMA 100 */
--	ich5_sata,
--	ich6_sata,
--	ich6_sata_ahci,
--	ich6m_sata_ahci,
--	ich8_sata_ahci,
--	ich8_2port_sata,
--	ich8m_apple_sata_ahci,		/* locks up on second port enable */
--	tolapai_sata_ahci,
--	piix_pata_vmw,			/* PIIX4 for VMware, spurious DMA_ERR */
--
- 	/* constants for mapping table */
- 	P0			= 0,  /* port 0 */
- 	P1			= 1,  /* port 1 */
-@@ -149,6 +131,24 @@ enum {
- 	PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
- };
- 
-+enum piix_controller_ids {
-+	/* controller IDs */
-+	piix_pata_mwdma,	/* PIIX3 MWDMA only */
-+	piix_pata_33,		/* PIIX4 at 33Mhz */
-+	ich_pata_33,		/* ICH up to UDMA 33 only */
-+	ich_pata_66,		/* ICH up to 66 Mhz */
-+	ich_pata_100,		/* ICH up to UDMA 100 */
-+	ich5_sata,
-+	ich6_sata,
-+	ich6_sata_ahci,
-+	ich6m_sata_ahci,
-+	ich8_sata_ahci,
-+	ich8_2port_sata,
-+	ich8m_apple_sata_ahci,	/* locks up on second port enable */
-+	tolapai_sata_ahci,
-+	piix_pata_vmw,			/* PIIX4 for VMware, spurious DMA_ERR */
-+};
++/* Deallocate memory for one device */
++static void __devexit nozomi_card_exit(struct pci_dev *pdev)
++{
++	int i;
++	struct ctrl_ul ctrl;
++	struct nozomi *dc = pci_get_drvdata(pdev);
 +
- struct piix_map_db {
- 	const u32 mask;
- 	const u16 port_enable;
-@@ -157,6 +157,7 @@ struct piix_map_db {
- 
- struct piix_host_priv {
- 	const int *map;
-+	void __iomem *sidpr;
- };
- 
- static int piix_init_one(struct pci_dev *pdev,
-@@ -167,6 +168,9 @@ static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
- static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
- static int ich_pata_cable_detect(struct ata_port *ap);
- static u8 piix_vmw_bmdma_status(struct ata_port *ap);
-+static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val);
-+static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val);
-+static void piix_sidpr_error_handler(struct ata_port *ap);
- #ifdef CONFIG_PM
- static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
- static int piix_pci_device_resume(struct pci_dev *pdev);
-@@ -321,7 +325,6 @@ static const struct ata_port_operations piix_pata_ops = {
- 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
- 	.cable_detect		= ata_cable_40wire,
- 
--	.irq_handler		= ata_interrupt,
- 	.irq_clear		= ata_bmdma_irq_clear,
- 	.irq_on			= ata_irq_on,
- 
-@@ -353,7 +356,6 @@ static const struct ata_port_operations ich_pata_ops = {
- 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
- 	.cable_detect		= ich_pata_cable_detect,
- 
--	.irq_handler		= ata_interrupt,
- 	.irq_clear		= ata_bmdma_irq_clear,
- 	.irq_on			= ata_irq_on,
- 
-@@ -380,7 +382,6 @@ static const struct ata_port_operations piix_sata_ops = {
- 	.error_handler		= ata_bmdma_error_handler,
- 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
- 
--	.irq_handler		= ata_interrupt,
- 	.irq_clear		= ata_bmdma_irq_clear,
- 	.irq_on			= ata_irq_on,
- 
-@@ -419,6 +420,35 @@ static const struct ata_port_operations piix_vmw_ops = {
- 	.port_start		= ata_port_start,
- };
- 
-+static const struct ata_port_operations piix_sidpr_sata_ops = {
-+	.tf_load		= ata_tf_load,
-+	.tf_read		= ata_tf_read,
-+	.check_status		= ata_check_status,
-+	.exec_command		= ata_exec_command,
-+	.dev_select		= ata_std_dev_select,
++	/* Disable all interrupts */
++	dc->last_ier = 0;
++	writew(dc->last_ier, dc->reg_ier);
 +
-+	.bmdma_setup		= ata_bmdma_setup,
-+	.bmdma_start		= ata_bmdma_start,
-+	.bmdma_stop		= ata_bmdma_stop,
-+	.bmdma_status		= ata_bmdma_status,
-+	.qc_prep		= ata_qc_prep,
-+	.qc_issue		= ata_qc_issue_prot,
-+	.data_xfer		= ata_data_xfer,
++	tty_exit(dc);
 +
-+	.scr_read		= piix_sidpr_scr_read,
-+	.scr_write		= piix_sidpr_scr_write,
++	/* Send 0x0001, command card to resend the reset token.  */
++	/* This is to get the reset when the module is reloaded. */
++	ctrl.port = 0x00;
++	ctrl.reserved = 0;
++	ctrl.RTS = 0;
++	ctrl.DTR = 1;
++	DBG1("sending flow control 0x%04X", *((u16 *)&ctrl));
 +
-+	.freeze			= ata_bmdma_freeze,
-+	.thaw			= ata_bmdma_thaw,
-+	.error_handler		= piix_sidpr_error_handler,
-+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
++	/* Setup dc->reg addresses to we can use defines here */
++	write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2);
++	writew(CTRL_UL, dc->reg_fcr);	/* push the token to the card. */
 +
-+	.irq_clear		= ata_bmdma_irq_clear,
-+	.irq_on			= ata_irq_on,
++	remove_sysfs_files(dc);
 +
-+	.port_start		= ata_port_start,
-+};
++	free_irq(pdev->irq, dc);
++
++	for (i = 0; i < MAX_PORT; i++)
++		if (dc->port[i].fifo_ul)
++			kfifo_free(dc->port[i].fifo_ul);
++
++	kfree(dc->send_buf);
++
++	iounmap(dc->base_addr);
++
++	pci_release_regions(pdev);
++
++	pci_disable_device(pdev);
++
++	ndevs[dc->index_start / MAX_PORT] = NULL;
++
++	kfree(dc);
++}
++
++static void set_rts(const struct tty_struct *tty, int rts)
++{
++	struct port *port = get_port_by_tty(tty);
++
++	port->ctrl_ul.RTS = rts;
++	port->update_flow_control = 1;
++	enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
++}
++
++static void set_dtr(const struct tty_struct *tty, int dtr)
++{
++	struct port *port = get_port_by_tty(tty);
++
++	DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr);
++
++	port->ctrl_ul.DTR = dtr;
++	port->update_flow_control = 1;
++	enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
++}
 +
- static const struct piix_map_db ich5_map_db = {
- 	.mask = 0x7,
- 	.port_enable = 0x3,
-@@ -526,7 +556,6 @@ static const struct piix_map_db *piix_map_db_table[] = {
- static struct ata_port_info piix_port_info[] = {
- 	[piix_pata_mwdma] = 	/* PIIX3 MWDMA only */
- 	{
--		.sht		= &piix_sht,
- 		.flags		= PIIX_PATA_FLAGS,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
-@@ -535,7 +564,6 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[piix_pata_33] =	/* PIIX4 at 33MHz */
- 	{
--		.sht		= &piix_sht,
- 		.flags		= PIIX_PATA_FLAGS,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
-@@ -545,7 +573,6 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich_pata_33] = 	/* ICH0 - ICH at 33Mhz*/
- 	{
--		.sht		= &piix_sht,
- 		.flags		= PIIX_PATA_FLAGS,
- 		.pio_mask 	= 0x1f,	/* pio 0-4 */
- 		.mwdma_mask	= 0x06, /* Check: maybe 0x07  */
-@@ -555,7 +582,6 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich_pata_66] = 	/* ICH controllers up to 66MHz */
- 	{
--		.sht		= &piix_sht,
- 		.flags		= PIIX_PATA_FLAGS,
- 		.pio_mask 	= 0x1f,	/* pio 0-4 */
- 		.mwdma_mask	= 0x06, /* MWDMA0 is broken on chip */
-@@ -565,7 +591,6 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich_pata_100] =
- 	{
--		.sht		= &piix_sht,
- 		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x06, /* mwdma1-2 */
-@@ -575,7 +600,6 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich5_sata] =
- 	{
--		.sht		= &piix_sht,
- 		.flags		= PIIX_SATA_FLAGS,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-@@ -585,8 +609,7 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich6_sata] =
- 	{
--		.sht		= &piix_sht,
--		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
-+		.flags		= PIIX_SATA_FLAGS,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x07, /* mwdma0-2 */
- 		.udma_mask	= ATA_UDMA6,
-@@ -595,9 +618,7 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich6_sata_ahci] =
- 	{
--		.sht		= &piix_sht,
--		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
--				  PIIX_FLAG_AHCI,
-+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x07, /* mwdma0-2 */
- 		.udma_mask	= ATA_UDMA6,
-@@ -606,9 +627,7 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich6m_sata_ahci] =
- 	{
--		.sht		= &piix_sht,
--		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
--				  PIIX_FLAG_AHCI,
-+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x07, /* mwdma0-2 */
- 		.udma_mask	= ATA_UDMA6,
-@@ -617,9 +636,8 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich8_sata_ahci] =
- 	{
--		.sht		= &piix_sht,
--		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
--				  PIIX_FLAG_AHCI,
-+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
-+				  PIIX_FLAG_SIDPR,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x07, /* mwdma0-2 */
- 		.udma_mask	= ATA_UDMA6,
-@@ -628,9 +646,8 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich8_2port_sata] =
- 	{
--		.sht		= &piix_sht,
--		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
--				  PIIX_FLAG_AHCI,
-+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
-+				  PIIX_FLAG_SIDPR,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x07, /* mwdma0-2 */
- 		.udma_mask	= ATA_UDMA6,
-@@ -639,9 +656,7 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[tolapai_sata_ahci] =
- 	{
--		.sht		= &piix_sht,
--		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
--				  PIIX_FLAG_AHCI,
-+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x07, /* mwdma0-2 */
- 		.udma_mask	= ATA_UDMA6,
-@@ -650,9 +665,8 @@ static struct ata_port_info piix_port_info[] = {
- 
- 	[ich8m_apple_sata_ahci] =
- 	{
--		.sht		= &piix_sht,
--		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
--				  PIIX_FLAG_AHCI,
-+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_AHCI |
-+				  PIIX_FLAG_SIDPR,
- 		.pio_mask	= 0x1f,	/* pio0-4 */
- 		.mwdma_mask	= 0x07, /* mwdma0-2 */
- 		.udma_mask	= ATA_UDMA6,
-@@ -1001,6 +1015,180 @@ static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
- 	do_pata_set_dmamode(ap, adev, 1);
- }
- 
 +/*
-+ * Serial ATA Index/Data Pair Superset Registers access
-+ *
-+ * Beginning from ICH8, there's a sane way to access SCRs using index
-+ * and data register pair located at BAR5.  This creates an
-+ * interesting problem of mapping two SCRs to one port.
-+ *
-+ * Although they have separate SCRs, the master and slave aren't
-+ * independent enough to be treated as separate links - e.g. softreset
-+ * resets both.  Also, there's no protocol defined for hard resetting
-+ * singled device sharing the virtual port (no defined way to acquire
-+ * device signature).  This is worked around by merging the SCR values
-+ * into one sensible value and requesting follow-up SRST after
-+ * hardreset.
-+ *
-+ * SCR merging is perfomed in nibbles which is the unit contents in
-+ * SCRs are organized.  If two values are equal, the value is used.
-+ * When they differ, merge table which lists precedence of possible
-+ * values is consulted and the first match or the last entry when
-+ * nothing matches is used.  When there's no merge table for the
-+ * specific nibble, value from the first port is used.
++ * ----------------------------------------------------------------------------
++ * TTY code
++ * ----------------------------------------------------------------------------
 + */
-+static const int piix_sidx_map[] = {
-+	[SCR_STATUS]	= 0,
-+	[SCR_ERROR]	= 2,
-+	[SCR_CONTROL]	= 1,
-+};
 +
-+static void piix_sidpr_sel(struct ata_device *dev, unsigned int reg)
++/* Called when the userspace process opens the tty, /dev/noz*.  */
++static int ntty_open(struct tty_struct *tty, struct file *file)
 +{
-+	struct ata_port *ap = dev->link->ap;
-+	struct piix_host_priv *hpriv = ap->host->private_data;
++	struct port *port = get_port_by_tty(tty);
++	struct nozomi *dc = get_dc_by_tty(tty);
++	unsigned long flags;
++
++	if (!port || !dc)
++		return -ENODEV;
++
++	if (mutex_lock_interruptible(&port->tty_sem))
++		return -ERESTARTSYS;
++
++	port->tty_open_count++;
++	dc->open_ttys++;
++
++	/* Enable interrupt downlink for channel */
++	if (port->tty_open_count == 1) {
++		tty->low_latency = 1;
++		tty->driver_data = port;
++		port->tty = tty;
++		DBG1("open: %d", port->token_dl);
++		spin_lock_irqsave(&dc->spin_mutex, flags);
++		dc->last_ier = dc->last_ier | port->token_dl;
++		writew(dc->last_ier, dc->reg_ier);
++		spin_unlock_irqrestore(&dc->spin_mutex, flags);
++	}
++
++	mutex_unlock(&port->tty_sem);
++
++	return 0;
++}
++
++/* Called when the userspace process close the tty, /dev/noz*. */
++static void ntty_close(struct tty_struct *tty, struct file *file)
++{
++	struct nozomi *dc = get_dc_by_tty(tty);
++	struct port *port = tty->driver_data;
++	unsigned long flags;
++
++	if (!dc || !port)
++		return;
++
++	if (mutex_lock_interruptible(&port->tty_sem))
++		return;
++
++	if (!port->tty_open_count)
++		goto exit;
++
++	dc->open_ttys--;
++	port->tty_open_count--;
++
++	if (port->tty_open_count == 0) {
++		DBG1("close: %d", port->token_dl);
++		spin_lock_irqsave(&dc->spin_mutex, flags);
++		dc->last_ier &= ~(port->token_dl);
++		writew(dc->last_ier, dc->reg_ier);
++		spin_unlock_irqrestore(&dc->spin_mutex, flags);
++	}
++
++exit:
++	mutex_unlock(&port->tty_sem);
++}
++
++/*
++ * called when the userspace process writes to the tty (/dev/noz*).
++ * Data is inserted into a fifo, which is then read and transfered to the modem.
++ */
++static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
++		      int count)
++{
++	int rval = -EINVAL;
++	struct nozomi *dc = get_dc_by_tty(tty);
++	struct port *port = tty->driver_data;
++	unsigned long flags;
++
++	/* DBG1( "WRITEx: %d, index = %d", count, index); */
++
++	if (!dc || !port)
++		return -ENODEV;
++
++	if (unlikely(!mutex_trylock(&port->tty_sem))) {
++		/*
++		 * must test lock as tty layer wraps calls
++		 * to this function with BKL
++		 */
++		dev_err(&dc->pdev->dev, "Would have deadlocked - "
++			"return EAGAIN\n");
++		return -EAGAIN;
++	}
++
++	if (unlikely(!port->tty_open_count)) {
++		DBG1(" ");
++		goto exit;
++	}
++
++	rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count);
++
++	/* notify card */
++	if (unlikely(dc == NULL)) {
++		DBG1("No device context?");
++		goto exit;
++	}
++
++	spin_lock_irqsave(&dc->spin_mutex, flags);
++	/* CTS is only valid on the modem channel */
++	if (port == &(dc->port[PORT_MDM])) {
++		if (port->ctrl_dl.CTS) {
++			DBG4("Enable interrupt");
++			enable_transmit_ul(tty->index % MAX_PORT, dc);
++		} else {
++			dev_err(&dc->pdev->dev,
++				"CTS not active on modem port?\n");
++		}
++	} else {
++		enable_transmit_ul(tty->index % MAX_PORT, dc);
++	}
++	spin_unlock_irqrestore(&dc->spin_mutex, flags);
++
++exit:
++	mutex_unlock(&port->tty_sem);
++	return rval;
++}
++
++/*
++ * Calculate how much is left in device
++ * This method is called by the upper tty layer.
++ *   #according to sources N_TTY.c it expects a value >= 0 and
++ *    does not check for negative values.
++ */
++static int ntty_write_room(struct tty_struct *tty)
++{
++	struct port *port = tty->driver_data;
++	int room = 0;
++	struct nozomi *dc = get_dc_by_tty(tty);
++
++	if (!dc || !port)
++		return 0;
++	if (!mutex_trylock(&port->tty_sem))
++		return 0;
++
++	if (!port->tty_open_count)
++		goto exit;
++
++	room = port->fifo_ul->size - __kfifo_len(port->fifo_ul);
++
++exit:
++	mutex_unlock(&port->tty_sem);
++	return room;
++}
++
++/* Gets io control parameters */
++static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
++{
++	struct port *port = tty->driver_data;
++	struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
++	struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
++
++	return	(ctrl_ul->RTS ? TIOCM_RTS : 0) |
++		(ctrl_ul->DTR ? TIOCM_DTR : 0) |
++		(ctrl_dl->DCD ? TIOCM_CAR : 0) |
++		(ctrl_dl->RI  ? TIOCM_RNG : 0) |
++		(ctrl_dl->DSR ? TIOCM_DSR : 0) |
++		(ctrl_dl->CTS ? TIOCM_CTS : 0);
++}
++
++/* Sets io controls parameters */
++static int ntty_tiocmset(struct tty_struct *tty, struct file *file,
++	unsigned int set, unsigned int clear)
++{
++	if (set & TIOCM_RTS)
++		set_rts(tty, 1);
++	else if (clear & TIOCM_RTS)
++		set_rts(tty, 0);
++
++	if (set & TIOCM_DTR)
++		set_dtr(tty, 1);
++	else if (clear & TIOCM_DTR)
++		set_dtr(tty, 0);
++
++	return 0;
++}
++
++static int ntty_cflags_changed(struct port *port, unsigned long flags,
++		struct async_icount *cprev)
++{
++	struct async_icount cnow = port->tty_icount;
++	int ret;
++
++	ret =	((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
++		((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
++		((flags & TIOCM_CD)  && (cnow.dcd != cprev->dcd)) ||
++		((flags & TIOCM_CTS) && (cnow.cts != cprev->cts));
++
++	*cprev = cnow;
++
++	return ret;
++}
++
++static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
++{
++	struct async_icount cnow = port->tty_icount;
++	struct serial_icounter_struct icount;
++
++	icount.cts = cnow.cts;
++	icount.dsr = cnow.dsr;
++	icount.rng = cnow.rng;
++	icount.dcd = cnow.dcd;
++	icount.rx = cnow.rx;
++	icount.tx = cnow.tx;
++	icount.frame = cnow.frame;
++	icount.overrun = cnow.overrun;
++	icount.parity = cnow.parity;
++	icount.brk = cnow.brk;
++	icount.buf_overrun = cnow.buf_overrun;
 +
-+	iowrite32(((ap->port_no * 2 + dev->devno) << 8) | piix_sidx_map[reg],
-+		  hpriv->sidpr + PIIX_SIDPR_IDX);
++	return copy_to_user(argp, &icount, sizeof(icount));
 +}
 +
-+static int piix_sidpr_read(struct ata_device *dev, unsigned int reg)
++static int ntty_ioctl(struct tty_struct *tty, struct file *file,
++		      unsigned int cmd, unsigned long arg)
 +{
-+	struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
++	struct port *port = tty->driver_data;
++	void __user *argp = (void __user *)arg;
++	int rval = -ENOIOCTLCMD;
 +
-+	piix_sidpr_sel(dev, reg);
-+	return ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
-+}
++	DBG1("******** IOCTL, cmd: %d", cmd);
 +
-+static void piix_sidpr_write(struct ata_device *dev, unsigned int reg, u32 val)
-+{
-+	struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
++	switch (cmd) {
++	case TIOCMIWAIT: {
++		struct async_icount cprev = port->tty_icount;
 +
-+	piix_sidpr_sel(dev, reg);
-+	iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
++		rval = wait_event_interruptible(port->tty_wait,
++				ntty_cflags_changed(port, arg, &cprev));
++		break;
++	} case TIOCGICOUNT:
++		rval = ntty_ioctl_tiocgicount(port, argp);
++		break;
++	default:
++		DBG1("ERR: 0x%08X, %d", cmd, cmd);
++		break;
++	};
++
++	return rval;
 +}
 +
-+u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl)
++/*
++ * Called by the upper tty layer when tty buffers are ready
++ * to receive data again after a call to throttle.
++ */
++static void ntty_unthrottle(struct tty_struct *tty)
 +{
-+	u32 val = 0;
-+	int i, mi;
-+
-+	for (i = 0, mi = 0; i < 32 / 4; i++) {
-+		u8 c0 = (val0 >> (i * 4)) & 0xf;
-+		u8 c1 = (val1 >> (i * 4)) & 0xf;
-+		u8 merged = c0;
-+		const int *cur;
++	struct nozomi *dc = get_dc_by_tty(tty);
++	unsigned long flags;
 +
-+		/* if no merge preference, assume the first value */
-+		cur = merge_tbl[mi];
-+		if (!cur)
-+			goto done;
-+		mi++;
++	DBG1("UNTHROTTLE");
++	spin_lock_irqsave(&dc->spin_mutex, flags);
++	enable_transmit_dl(tty->index % MAX_PORT, dc);
++	set_rts(tty, 1);
 +
-+		/* if two values equal, use it */
-+		if (c0 == c1)
-+			goto done;
++	spin_unlock_irqrestore(&dc->spin_mutex, flags);
++}
 +
-+		/* choose the first match or the last from the merge table */
-+		while (*cur != -1) {
-+			if (c0 == *cur || c1 == *cur)
-+				break;
-+			cur++;
-+		}
-+		if (*cur == -1)
-+			cur--;
-+		merged = *cur;
-+	done:
-+		val |= merged << (i * 4);
-+	}
++/*
++ * Called by the upper tty layer when the tty buffers are almost full.
++ * The driver should stop send more data.
++ */
++static void ntty_throttle(struct tty_struct *tty)
++{
++	struct nozomi *dc = get_dc_by_tty(tty);
++	unsigned long flags;
 +
-+	return val;
++	DBG1("THROTTLE");
++	spin_lock_irqsave(&dc->spin_mutex, flags);
++	set_rts(tty, 0);
++	spin_unlock_irqrestore(&dc->spin_mutex, flags);
 +}
 +
-+static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val)
++/* just to discard single character writes */
++static void ntty_put_char(struct tty_struct *tty, unsigned char c)
 +{
-+	const int * const sstatus_merge_tbl[] = {
-+		/* DET */ (const int []){ 1, 3, 0, 4, 3, -1 },
-+		/* SPD */ (const int []){ 2, 1, 0, -1 },
-+		/* IPM */ (const int []){ 6, 2, 1, 0, -1 },
-+		NULL,
-+	};
-+	const int * const scontrol_merge_tbl[] = {
-+		/* DET */ (const int []){ 1, 0, 4, 0, -1 },
-+		/* SPD */ (const int []){ 0, 2, 1, 0, -1 },
-+		/* IPM */ (const int []){ 0, 1, 2, 3, 0, -1 },
-+		NULL,
-+	};
-+	u32 v0, v1;
++	/* FIXME !!! */
++	DBG2("PUT CHAR Function: %c", c);
++}
 +
-+	if (reg >= ARRAY_SIZE(piix_sidx_map))
-+		return -EINVAL;
++/* Returns number of chars in buffer, called by tty layer */
++static s32 ntty_chars_in_buffer(struct tty_struct *tty)
++{
++	struct port *port = tty->driver_data;
++	struct nozomi *dc = get_dc_by_tty(tty);
++	s32 rval;
 +
-+	if (!(ap->flags & ATA_FLAG_SLAVE_POSS)) {
-+		*val = piix_sidpr_read(&ap->link.device[0], reg);
-+		return 0;
++	if (unlikely(!dc || !port)) {
++		rval = -ENODEV;
++		goto exit_in_buffer;
 +	}
 +
-+	v0 = piix_sidpr_read(&ap->link.device[0], reg);
-+	v1 = piix_sidpr_read(&ap->link.device[1], reg);
-+
-+	switch (reg) {
-+	case SCR_STATUS:
-+		*val = piix_merge_scr(v0, v1, sstatus_merge_tbl);
-+		break;
-+	case SCR_ERROR:
-+		*val = v0 | v1;
-+		break;
-+	case SCR_CONTROL:
-+		*val = piix_merge_scr(v0, v1, scontrol_merge_tbl);
-+		break;
++	if (unlikely(!port->tty_open_count)) {
++		dev_err(&dc->pdev->dev, "No tty open?\n");
++		rval = -ENODEV;
++		goto exit_in_buffer;
 +	}
 +
-+	return 0;
++	rval = __kfifo_len(port->fifo_ul);
++
++exit_in_buffer:
++	return rval;
 +}
 +
-+static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val)
-+{
-+	if (reg >= ARRAY_SIZE(piix_sidx_map))
-+		return -EINVAL;
++static struct tty_operations tty_ops = {
++	.ioctl = ntty_ioctl,
++	.open = ntty_open,
++	.close = ntty_close,
++	.write = ntty_write,
++	.write_room = ntty_write_room,
++	.unthrottle = ntty_unthrottle,
++	.throttle = ntty_throttle,
++	.chars_in_buffer = ntty_chars_in_buffer,
++	.put_char = ntty_put_char,
++	.tiocmget = ntty_tiocmget,
++	.tiocmset = ntty_tiocmset,
++};
 +
-+	piix_sidpr_write(&ap->link.device[0], reg, val);
++/* Module initialization */
++static struct pci_driver nozomi_driver = {
++	.name = NOZOMI_NAME,
++	.id_table = nozomi_pci_tbl,
++	.probe = nozomi_card_init,
++	.remove = __devexit_p(nozomi_card_exit),
++};
 +
-+	if (ap->flags & ATA_FLAG_SLAVE_POSS)
-+		piix_sidpr_write(&ap->link.device[1], reg, val);
++static __init int nozomi_init(void)
++{
++	int ret;
 +
-+	return 0;
-+}
++	printk(KERN_INFO "Initializing %s\n", VERSION_STRING);
 +
-+static int piix_sidpr_hardreset(struct ata_link *link, unsigned int *class,
-+				unsigned long deadline)
-+{
-+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
-+	int rc;
++	ntty_driver = alloc_tty_driver(NTTY_TTY_MAXMINORS);
++	if (!ntty_driver)
++		return -ENOMEM;
 +
-+	/* do hardreset */
-+	rc = sata_link_hardreset(link, timing, deadline);
-+	if (rc) {
-+		ata_link_printk(link, KERN_ERR,
-+				"COMRESET failed (errno=%d)\n", rc);
-+		return rc;
++	ntty_driver->owner = THIS_MODULE;
++	ntty_driver->driver_name = NOZOMI_NAME_TTY;
++	ntty_driver->name = "noz";
++	ntty_driver->major = 0;
++	ntty_driver->type = TTY_DRIVER_TYPE_SERIAL;
++	ntty_driver->subtype = SERIAL_TYPE_NORMAL;
++	ntty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
++	ntty_driver->init_termios = tty_std_termios;
++	ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \
++						HUPCL | CLOCAL;
++	ntty_driver->init_termios.c_ispeed = 115200;
++	ntty_driver->init_termios.c_ospeed = 115200;
++	tty_set_operations(ntty_driver, &tty_ops);
++
++	ret = tty_register_driver(ntty_driver);
++	if (ret) {
++		printk(KERN_ERR "Nozomi: failed to register ntty driver\n");
++		goto free_tty;
 +	}
 +
-+	/* TODO: phy layer with polling, timeouts, etc. */
-+	if (ata_link_offline(link)) {
-+		*class = ATA_DEV_NONE;
-+		return 0;
++	ret = pci_register_driver(&nozomi_driver);
++	if (ret) {
++		printk(KERN_ERR "Nozomi: can't register pci driver\n");
++		goto unr_tty;
 +	}
 +
-+	return -EAGAIN;
++	return 0;
++unr_tty:
++	tty_unregister_driver(ntty_driver);
++free_tty:
++	put_tty_driver(ntty_driver);
++	return ret;
 +}
 +
-+static void piix_sidpr_error_handler(struct ata_port *ap)
++static __exit void nozomi_exit(void)
 +{
-+	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
-+			   piix_sidpr_hardreset, ata_std_postreset);
++	printk(KERN_INFO "Unloading %s\n", DRIVER_DESC);
++	pci_unregister_driver(&nozomi_driver);
++	tty_unregister_driver(ntty_driver);
++	put_tty_driver(ntty_driver);
 +}
 +
- #ifdef CONFIG_PM
- static int piix_broken_suspend(void)
- {
-@@ -1034,6 +1222,13 @@ static int piix_broken_suspend(void)
- 			},
- 		},
- 		{
-+			.ident = "TECRA M6",
-+			.matches = {
-+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"),
-+			},
-+		},
-+		{
- 			.ident = "TECRA M7",
- 			.matches = {
- 				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-@@ -1048,6 +1243,13 @@ static int piix_broken_suspend(void)
- 			},
- 		},
- 		{
-+			.ident = "Satellite R20",
-+			.matches = {
-+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"),
-+			},
-+		},
-+		{
- 			.ident = "Satellite R25",
- 			.matches = {
- 				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-@@ -1253,10 +1455,10 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
- 	return no_piix_dma;
- }
++module_init(nozomi_init);
++module_exit(nozomi_exit);
++
++module_param(debug, int, S_IRUGO | S_IWUSR);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION(DRIVER_DESC);
+diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
+index 0c66b80..78b151c 100644
+--- a/drivers/char/rtc.c
++++ b/drivers/char/rtc.c
+@@ -1,5 +1,5 @@
+ /*
+- *	Real Time Clock interface for Linux	
++ *	Real Time Clock interface for Linux
+  *
+  *	Copyright (C) 1996 Paul Gortmaker
+  *
+@@ -17,7 +17,7 @@
+  *	has been received. If a RTC interrupt has already happened,
+  *	it will output an unsigned long and then block. The output value
+  *	contains the interrupt status in the low byte and the number of
+- *	interrupts since the last read in the remaining high bytes. The 
++ *	interrupts since the last read in the remaining high bytes. The
+  *	/dev/rtc interface can also be used with the select(2) call.
+  *
+  *	This program is free software; you can redistribute it and/or
+@@ -104,12 +104,14 @@ static int rtc_has_irq = 1;
  
--static void __devinit piix_init_pcs(struct pci_dev *pdev,
--				    struct ata_port_info *pinfo,
-+static void __devinit piix_init_pcs(struct ata_host *host,
- 				    const struct piix_map_db *map_db)
+ #ifndef CONFIG_HPET_EMULATE_RTC
+ #define is_hpet_enabled()			0
+-#define hpet_set_alarm_time(hrs, min, sec) 	0
+-#define hpet_set_periodic_freq(arg) 		0
+-#define hpet_mask_rtc_irq_bit(arg) 		0
+-#define hpet_set_rtc_irq_bit(arg) 		0
+-#define hpet_rtc_timer_init() 			do { } while (0)
+-#define hpet_rtc_dropped_irq() 			0
++#define hpet_set_alarm_time(hrs, min, sec)	0
++#define hpet_set_periodic_freq(arg)		0
++#define hpet_mask_rtc_irq_bit(arg)		0
++#define hpet_set_rtc_irq_bit(arg)		0
++#define hpet_rtc_timer_init()			do { } while (0)
++#define hpet_rtc_dropped_irq()			0
++#define hpet_register_irq_handler(h)		0
++#define hpet_unregister_irq_handler(h)		0
+ #ifdef RTC_IRQ
+ static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
  {
-+	struct pci_dev *pdev = to_pci_dev(host->dev);
- 	u16 pcs, new_pcs;
+@@ -147,7 +149,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file,
+ static unsigned int rtc_poll(struct file *file, poll_table *wait);
+ #endif
  
- 	pci_read_config_word(pdev, ICH5_PCS, &pcs);
-@@ -1270,11 +1472,10 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
- 	}
- }
+-static void get_rtc_alm_time (struct rtc_time *alm_tm);
++static void get_rtc_alm_time(struct rtc_time *alm_tm);
+ #ifdef RTC_IRQ
+ static void set_rtc_irq_bit_locked(unsigned char bit);
+ static void mask_rtc_irq_bit_locked(unsigned char bit);
+@@ -185,9 +187,9 @@ static int rtc_proc_open(struct inode *inode, struct file *file);
+  * rtc_status but before mod_timer is called, which would then reenable the
+  * timer (but you would need to have an awful timing before you'd trip on it)
+  */
+-static unsigned long rtc_status = 0;	/* bitmapped status byte.	*/
+-static unsigned long rtc_freq = 0;	/* Current periodic IRQ rate	*/
+-static unsigned long rtc_irq_data = 0;	/* our output to the world	*/
++static unsigned long rtc_status;	/* bitmapped status byte.	*/
++static unsigned long rtc_freq;		/* Current periodic IRQ rate	*/
++static unsigned long rtc_irq_data;	/* our output to the world	*/
+ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
  
--static void __devinit piix_init_sata_map(struct pci_dev *pdev,
--					 struct ata_port_info *pinfo,
--					 const struct piix_map_db *map_db)
-+static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
-+					       struct ata_port_info *pinfo,
-+					       const struct piix_map_db *map_db)
- {
--	struct piix_host_priv *hpriv = pinfo[0].private_data;
- 	const int *map;
- 	int i, invalid_map = 0;
- 	u8 map_value;
-@@ -1298,7 +1499,6 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
- 		case IDE:
- 			WARN_ON((i & 1) || map[i + 1] != IDE);
- 			pinfo[i / 2] = piix_port_info[ich_pata_100];
--			pinfo[i / 2].private_data = hpriv;
- 			i++;
- 			printk(" IDE IDE");
- 			break;
-@@ -1316,7 +1516,33 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
- 		dev_printk(KERN_ERR, &pdev->dev,
- 			   "invalid MAP value %u\n", map_value);
+ #ifdef RTC_IRQ
+@@ -195,7 +197,7 @@ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
+  * rtc_task_lock nests inside rtc_lock.
+  */
+ static DEFINE_SPINLOCK(rtc_task_lock);
+-static rtc_task_t *rtc_callback = NULL;
++static rtc_task_t *rtc_callback;
+ #endif
  
--	hpriv->map = map;
-+	return map;
-+}
-+
-+static void __devinit piix_init_sidpr(struct ata_host *host)
-+{
-+	struct pci_dev *pdev = to_pci_dev(host->dev);
-+	struct piix_host_priv *hpriv = host->private_data;
-+	int i;
-+
-+	/* check for availability */
-+	for (i = 0; i < 4; i++)
-+		if (hpriv->map[i] == IDE)
-+			return;
-+
-+	if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
-+		return;
-+
-+	if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
-+	    pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
-+		return;
-+
-+	if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
-+		return;
-+
-+	hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
-+	host->ports[0]->ops = &piix_sidpr_sata_ops;
-+	host->ports[1]->ops = &piix_sidpr_sata_ops;
- }
+ /*
+@@ -205,7 +207,7 @@ static rtc_task_t *rtc_callback = NULL;
  
- static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
-@@ -1375,8 +1601,10 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- 	struct device *dev = &pdev->dev;
- 	struct ata_port_info port_info[2];
- 	const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
--	struct piix_host_priv *hpriv;
- 	unsigned long port_flags;
-+	struct ata_host *host;
-+	struct piix_host_priv *hpriv;
-+	int rc;
+ static unsigned long epoch = 1900;	/* year corresponding to 0x00	*/
  
- 	if (!printed_version++)
- 		dev_printk(KERN_DEBUG, &pdev->dev,
-@@ -1386,17 +1614,31 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- 	if (!in_module_init)
- 		return -ENODEV;
+-static const unsigned char days_in_mo[] = 
++static const unsigned char days_in_mo[] =
+ {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
  
-+	port_info[0] = piix_port_info[ent->driver_data];
-+	port_info[1] = piix_port_info[ent->driver_data];
-+
-+	port_flags = port_info[0].flags;
-+
-+	/* enable device and prepare host */
-+	rc = pcim_enable_device(pdev);
-+	if (rc)
-+		return rc;
-+
-+	/* SATA map init can change port_info, do it before prepping host */
- 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
- 	if (!hpriv)
- 		return -ENOMEM;
+ /*
+@@ -242,7 +244,7 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
+ 	 *	the last read in the remainder of rtc_irq_data.
+ 	 */
  
--	port_info[0] = piix_port_info[ent->driver_data];
--	port_info[1] = piix_port_info[ent->driver_data];
--	port_info[0].private_data = hpriv;
--	port_info[1].private_data = hpriv;
-+	if (port_flags & ATA_FLAG_SATA)
-+		hpriv->map = piix_init_sata_map(pdev, port_info,
-+					piix_map_db_table[ent->driver_data]);
+-	spin_lock (&rtc_lock);
++	spin_lock(&rtc_lock);
+ 	rtc_irq_data += 0x100;
+ 	rtc_irq_data &= ~0xff;
+ 	if (is_hpet_enabled()) {
+@@ -259,16 +261,16 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
+ 	if (rtc_status & RTC_TIMER_ON)
+ 		mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
  
--	port_flags = port_info[0].flags;
-+	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
-+	if (rc)
-+		return rc;
-+	host->private_data = hpriv;
+-	spin_unlock (&rtc_lock);
++	spin_unlock(&rtc_lock);
  
-+	/* initialize controller */
- 	if (port_flags & PIIX_FLAG_AHCI) {
- 		u8 tmp;
- 		pci_read_config_byte(pdev, PIIX_SCC, &tmp);
-@@ -1407,12 +1649,9 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- 		}
- 	}
+ 	/* Now do the rest of the actions */
+ 	spin_lock(&rtc_task_lock);
+ 	if (rtc_callback)
+ 		rtc_callback->func(rtc_callback->private_data);
+ 	spin_unlock(&rtc_task_lock);
+-	wake_up_interruptible(&rtc_wait);	
++	wake_up_interruptible(&rtc_wait);
  
--	/* Initialize SATA map */
- 	if (port_flags & ATA_FLAG_SATA) {
--		piix_init_sata_map(pdev, port_info,
--				   piix_map_db_table[ent->driver_data]);
--		piix_init_pcs(pdev, port_info,
--			      piix_map_db_table[ent->driver_data]);
-+		piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
-+		piix_init_sidpr(host);
- 	}
+-	kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
++	kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
  
- 	/* apply IOCFG bit18 quirk */
-@@ -1431,12 +1670,14 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- 		/* This writes into the master table but it does not
- 		   really matter for this errata as we will apply it to
- 		   all the PIIX devices on the board */
--		port_info[0].mwdma_mask = 0;
--		port_info[0].udma_mask = 0;
--		port_info[1].mwdma_mask = 0;
--		port_info[1].udma_mask = 0;
-+		host->ports[0]->mwdma_mask = 0;
-+		host->ports[0]->udma_mask = 0;
-+		host->ports[1]->mwdma_mask = 0;
-+		host->ports[1]->udma_mask = 0;
- 	}
--	return ata_pci_init_one(pdev, ppi);
-+
-+	pci_set_master(pdev);
-+	return ata_pci_activate_sff_host(host, ata_interrupt, &piix_sht);
+ 	return IRQ_HANDLED;
  }
+@@ -335,7 +337,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
+ 	DECLARE_WAITQUEUE(wait, current);
+ 	unsigned long data;
+ 	ssize_t retval;
+-	
++
+ 	if (rtc_has_irq == 0)
+ 		return -EIO;
  
- static int __init piix_init(void)
-diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
-index 7bf4bef..9e8ec19 100644
---- a/drivers/ata/libata-acpi.c
-+++ b/drivers/ata/libata-acpi.c
-@@ -442,40 +442,77 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
- }
+@@ -358,11 +360,11 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
+ 		 * confusing. And no, xchg() is not the answer. */
  
- /**
-+ * ata_acpi_gtm_xfermode - determine xfermode from GTM parameter
-+ * @dev: target device
-+ * @gtm: GTM parameter to use
-+ *
-+ * Determine xfermask for @dev from @gtm.
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * RETURNS:
-+ * Determined xfermask.
-+ */
-+unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
-+				    const struct ata_acpi_gtm *gtm)
-+{
-+	unsigned long xfer_mask = 0;
-+	unsigned int type;
-+	int unit;
-+	u8 mode;
-+
-+	/* we always use the 0 slot for crap hardware */
-+	unit = dev->devno;
-+	if (!(gtm->flags & 0x10))
-+		unit = 0;
-+
-+	/* PIO */
-+	mode = ata_timing_cycle2mode(ATA_SHIFT_PIO, gtm->drive[unit].pio);
-+	xfer_mask |= ata_xfer_mode2mask(mode);
-+
-+	/* See if we have MWDMA or UDMA data. We don't bother with
-+	 * MWDMA if UDMA is available as this means the BIOS set UDMA
-+	 * and our error changedown if it works is UDMA to PIO anyway.
-+	 */
-+	if (!(gtm->flags & (1 << (2 * unit))))
-+		type = ATA_SHIFT_MWDMA;
-+	else
-+		type = ATA_SHIFT_UDMA;
-+
-+	mode = ata_timing_cycle2mode(type, gtm->drive[unit].dma);
-+	xfer_mask |= ata_xfer_mode2mask(mode);
-+
-+	return xfer_mask;
-+}
-+EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
+ 		__set_current_state(TASK_INTERRUPTIBLE);
+-		
+-		spin_lock_irq (&rtc_lock);
 +
-+/**
-  * ata_acpi_cbl_80wire		-	Check for 80 wire cable
-  * @ap: Port to check
-+ * @gtm: GTM data to use
-  *
-- * Return 1 if the ACPI mode data for this port indicates the BIOS selected
-- * an 80wire mode.
-+ * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
-  */
--
--int ata_acpi_cbl_80wire(struct ata_port *ap)
-+int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
++		spin_lock_irq(&rtc_lock);
+ 		data = rtc_irq_data;
+ 		rtc_irq_data = 0;
+-		spin_unlock_irq (&rtc_lock);
++		spin_unlock_irq(&rtc_lock);
+ 
+ 		if (data != 0)
+ 			break;
+@@ -378,10 +380,13 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
+ 		schedule();
+ 	} while (1);
+ 
+-	if (count == sizeof(unsigned int))
+-		retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int);
+-	else
+-		retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long);
++	if (count == sizeof(unsigned int)) {
++		retval = put_user(data,
++				  (unsigned int __user *)buf) ?: sizeof(int);
++	} else {
++		retval = put_user(data,
++				  (unsigned long __user *)buf) ?: sizeof(long);
++	}
+ 	if (!retval)
+ 		retval = count;
+  out:
+@@ -394,7 +399,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
+ 
+ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
  {
--	const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
--	int valid = 0;
-+	struct ata_device *dev;
+-	struct rtc_time wtime; 
++	struct rtc_time wtime;
  
--	if (!gtm)
--		return 0;
-+	ata_link_for_each_dev(dev, &ap->link) {
-+		unsigned long xfer_mask, udma_mask;
+ #ifdef RTC_IRQ
+ 	if (rtc_has_irq == 0) {
+@@ -426,35 +431,41 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+ 	}
+ 	case RTC_PIE_OFF:	/* Mask periodic int. enab. bit	*/
+ 	{
+-		unsigned long flags; /* can be called from isr via rtc_control() */
+-		spin_lock_irqsave (&rtc_lock, flags);
++		/* can be called from isr via rtc_control() */
++		unsigned long flags;
 +
-+		if (!ata_dev_enabled(dev))
-+			continue;
++		spin_lock_irqsave(&rtc_lock, flags);
+ 		mask_rtc_irq_bit_locked(RTC_PIE);
+ 		if (rtc_status & RTC_TIMER_ON) {
+ 			rtc_status &= ~RTC_TIMER_ON;
+ 			del_timer(&rtc_irq_timer);
+ 		}
+-		spin_unlock_irqrestore (&rtc_lock, flags);
++		spin_unlock_irqrestore(&rtc_lock, flags);
 +
-+		xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
-+		ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
+ 		return 0;
+ 	}
+ 	case RTC_PIE_ON:	/* Allow periodic ints		*/
+ 	{
+-		unsigned long flags; /* can be called from isr via rtc_control() */
++		/* can be called from isr via rtc_control() */
++		unsigned long flags;
 +
-+		if (udma_mask & ~ATA_UDMA_MASK_40C)
-+			return 1;
-+	}
+ 		/*
+ 		 * We don't really want Joe User enabling more
+ 		 * than 64Hz of interrupts on a multi-user machine.
+ 		 */
+ 		if (!kernel && (rtc_freq > rtc_max_user_freq) &&
+-			(!capable(CAP_SYS_RESOURCE)))
++						(!capable(CAP_SYS_RESOURCE)))
+ 			return -EACCES;
  
--	/* Split timing, DMA enabled */
--	if ((gtm->flags & 0x11) == 0x11 && gtm->drive[0].dma < 55)
--		valid |= 1;
--	if ((gtm->flags & 0x14) == 0x14 && gtm->drive[1].dma < 55)
--		valid |= 2;
--	/* Shared timing, DMA enabled */
--	if ((gtm->flags & 0x11) == 0x01 && gtm->drive[0].dma < 55)
--		valid |= 1;
--	if ((gtm->flags & 0x14) == 0x04 && gtm->drive[0].dma < 55)
--		valid |= 2;
+-		spin_lock_irqsave (&rtc_lock, flags);
++		spin_lock_irqsave(&rtc_lock, flags);
+ 		if (!(rtc_status & RTC_TIMER_ON)) {
+ 			mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
+ 					2*HZ/100);
+ 			rtc_status |= RTC_TIMER_ON;
+ 		}
+ 		set_rtc_irq_bit_locked(RTC_PIE);
+-		spin_unlock_irqrestore (&rtc_lock, flags);
++		spin_unlock_irqrestore(&rtc_lock, flags);
++
+ 		return 0;
+ 	}
+ 	case RTC_UIE_OFF:	/* Mask ints from RTC updates.	*/
+@@ -477,7 +488,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+ 		 */
+ 		memset(&wtime, 0, sizeof(struct rtc_time));
+ 		get_rtc_alm_time(&wtime);
+-		break; 
++		break;
+ 	}
+ 	case RTC_ALM_SET:	/* Store a time into the alarm */
+ 	{
+@@ -505,16 +516,21 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+ 			 */
+ 		}
+ 		if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
+-		    RTC_ALWAYS_BCD)
+-		{
+-			if (sec < 60) BIN_TO_BCD(sec);
+-			else sec = 0xff;
 -
--	/* Drive check */
--	if ((valid & 1) && ata_dev_enabled(&ap->link.device[0]))
--		return 1;
--	if ((valid & 2) && ata_dev_enabled(&ap->link.device[1]))
--		return 1;
- 	return 0;
- }
+-			if (min < 60) BIN_TO_BCD(min);
+-			else min = 0xff;
 -
- EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
- 
- static void ata_acpi_gtf_to_tf(struct ata_device *dev,
-@@ -776,6 +813,36 @@ void ata_acpi_on_resume(struct ata_port *ap)
- }
- 
- /**
-+ * ata_acpi_set_state - set the port power state
-+ * @ap: target ATA port
-+ * @state: state, on/off
-+ *
-+ * This function executes the _PS0/_PS3 ACPI method to set the power state.
-+ * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
-+ */
-+void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
-+{
-+	struct ata_device *dev;
-+
-+	if (!ap->acpi_handle || (ap->flags & ATA_FLAG_ACPI_SATA))
-+		return;
-+
-+	/* channel first and then drives for power on and vica versa
-+	   for power off */
-+	if (state.event == PM_EVENT_ON)
-+		acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D0);
+-			if (hrs < 24) BIN_TO_BCD(hrs);
+-			else hrs = 0xff;
++							RTC_ALWAYS_BCD) {
++			if (sec < 60)
++				BIN_TO_BCD(sec);
++			else
++				sec = 0xff;
 +
-+	ata_link_for_each_dev(dev, &ap->link) {
-+		if (dev->acpi_handle && ata_dev_enabled(dev))
-+			acpi_bus_set_power(dev->acpi_handle,
-+				state.event == PM_EVENT_ON ?
-+					ACPI_STATE_D0 : ACPI_STATE_D3);
-+	}
-+	if (state.event != PM_EVENT_ON)
-+		acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D3);
-+}
++			if (min < 60)
++				BIN_TO_BCD(min);
++			else
++				min = 0xff;
 +
-+/**
-  * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration
-  * @dev: target ATA device
-  *
-diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 6380726..bdbd55a 100644
---- a/drivers/ata/libata-core.c
-+++ b/drivers/ata/libata-core.c
-@@ -119,6 +119,10 @@ int libata_noacpi = 0;
- module_param_named(noacpi, libata_noacpi, int, 0444);
- MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
++			if (hrs < 24)
++				BIN_TO_BCD(hrs);
++			else
++				hrs = 0xff;
+ 		}
+ 		CMOS_WRITE(hrs, RTC_HOURS_ALARM);
+ 		CMOS_WRITE(min, RTC_MINUTES_ALARM);
+@@ -563,11 +579,12 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
  
-+int libata_allow_tpm = 0;
-+module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
-+MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
+ 		if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
+ 			return -EINVAL;
+-			
 +
- MODULE_AUTHOR("Jeff Garzik");
- MODULE_DESCRIPTION("Library module for ATA devices");
- MODULE_LICENSE("GPL");
-@@ -450,9 +454,9 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
-  *	RETURNS:
-  *	Packed xfer_mask.
-  */
--static unsigned int ata_pack_xfermask(unsigned int pio_mask,
--				      unsigned int mwdma_mask,
--				      unsigned int udma_mask)
-+unsigned long ata_pack_xfermask(unsigned long pio_mask,
-+				unsigned long mwdma_mask,
-+				unsigned long udma_mask)
- {
- 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
- 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
-@@ -469,10 +473,8 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask,
-  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
-  *	Any NULL distination masks will be ignored.
-  */
--static void ata_unpack_xfermask(unsigned int xfer_mask,
--				unsigned int *pio_mask,
--				unsigned int *mwdma_mask,
--				unsigned int *udma_mask)
-+void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
-+			 unsigned long *mwdma_mask, unsigned long *udma_mask)
- {
- 	if (pio_mask)
- 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
-@@ -486,9 +488,9 @@ static const struct ata_xfer_ent {
- 	int shift, bits;
- 	u8 base;
- } ata_xfer_tbl[] = {
--	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
--	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
--	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
-+	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
-+	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
-+	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
- 	{ -1, },
- };
+ 		if ((hrs >= 24) || (min >= 60) || (sec >= 60))
+ 			return -EINVAL;
  
-@@ -503,9 +505,9 @@ static const struct ata_xfer_ent {
-  *	None.
-  *
-  *	RETURNS:
-- *	Matching XFER_* value, 0 if no match found.
-+ *	Matching XFER_* value, 0xff if no match found.
-  */
--static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
-+u8 ata_xfer_mask2mode(unsigned long xfer_mask)
- {
- 	int highbit = fls(xfer_mask) - 1;
- 	const struct ata_xfer_ent *ent;
-@@ -513,7 +515,7 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
- 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
- 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
- 			return ent->base + highbit - ent->shift;
--	return 0;
-+	return 0xff;
- }
+-		if ((yrs -= epoch) > 255)    /* They are unsigned */
++		yrs -= epoch;
++		if (yrs > 255)		/* They are unsigned */
+ 			return -EINVAL;
  
- /**
-@@ -528,13 +530,14 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
-  *	RETURNS:
-  *	Matching xfer_mask, 0 if no match found.
-  */
--static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
-+unsigned long ata_xfer_mode2mask(u8 xfer_mode)
- {
- 	const struct ata_xfer_ent *ent;
+ 		spin_lock_irq(&rtc_lock);
+@@ -635,9 +652,10 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+ 	{
+ 		int tmp = 0;
+ 		unsigned char val;
+-		unsigned long flags; /* can be called from isr via rtc_control() */
++		/* can be called from isr via rtc_control() */
++		unsigned long flags;
  
- 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
- 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
--			return 1 << (ent->shift + xfer_mode - ent->base);
-+			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
-+				& ~((1 << ent->shift) - 1);
- 	return 0;
+-		/* 
++		/*
+ 		 * The max we can do is 8192Hz.
+ 		 */
+ 		if ((arg < 2) || (arg > 8192))
+@@ -646,7 +664,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+ 		 * We don't really want Joe User generating more
+ 		 * than 64Hz of interrupts on a multi-user machine.
+ 		 */
+-		if (!kernel && (arg > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE)))
++		if (!kernel && (arg > rtc_max_user_freq) &&
++					!capable(CAP_SYS_RESOURCE))
+ 			return -EACCES;
+ 
+ 		while (arg > (1<<tmp))
+@@ -674,11 +693,11 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+ #endif
+ 	case RTC_EPOCH_READ:	/* Read the epoch.	*/
+ 	{
+-		return put_user (epoch, (unsigned long __user *)arg);
++		return put_user(epoch, (unsigned long __user *)arg);
+ 	}
+ 	case RTC_EPOCH_SET:	/* Set the epoch.	*/
+ 	{
+-		/* 
++		/*
+ 		 * There were no RTC clocks before 1900.
+ 		 */
+ 		if (arg < 1900)
+@@ -693,7 +712,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+ 	default:
+ 		return -ENOTTY;
+ 	}
+-	return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
++	return copy_to_user((void __user *)arg,
++			    &wtime, sizeof wtime) ? -EFAULT : 0;
  }
  
-@@ -550,7 +553,7 @@ static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
-  *	RETURNS:
-  *	Matching xfer_shift, -1 if no match found.
-  */
--static int ata_xfer_mode2shift(unsigned int xfer_mode)
-+int ata_xfer_mode2shift(unsigned long xfer_mode)
+ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+@@ -712,26 +732,25 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+  * needed here. Or anywhere else in this driver. */
+ static int rtc_open(struct inode *inode, struct file *file)
  {
- 	const struct ata_xfer_ent *ent;
+-	spin_lock_irq (&rtc_lock);
++	spin_lock_irq(&rtc_lock);
  
-@@ -574,7 +577,7 @@ static int ata_xfer_mode2shift(unsigned int xfer_mode)
-  *	Constant C string representing highest speed listed in
-  *	@mode_mask, or the constant C string "<n/a>".
-  */
--static const char *ata_mode_string(unsigned int xfer_mask)
-+const char *ata_mode_string(unsigned long xfer_mask)
- {
- 	static const char * const xfer_mode_str[] = {
- 		"PIO0",
-@@ -947,8 +950,8 @@ unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
- 	if (r_err)
- 		*r_err = err;
+-	if(rtc_status & RTC_IS_OPEN)
++	if (rtc_status & RTC_IS_OPEN)
+ 		goto out_busy;
  
--	/* see if device passed diags: if master then continue and warn later */
--	if (err == 0 && dev->devno == 0)
-+	/* see if device passed diags: continue and warn later */
-+	if (err == 0)
- 		/* diagnostic fail : do nothing _YET_ */
- 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
- 	else if (err == 1)
-@@ -1286,48 +1289,6 @@ static int ata_hpa_resize(struct ata_device *dev)
- }
+ 	rtc_status |= RTC_IS_OPEN;
  
- /**
-- *	ata_id_to_dma_mode	-	Identify DMA mode from id block
-- *	@dev: device to identify
-- *	@unknown: mode to assume if we cannot tell
-- *
-- *	Set up the timing values for the device based upon the identify
-- *	reported values for the DMA mode. This function is used by drivers
-- *	which rely upon firmware configured modes, but wish to report the
-- *	mode correctly when possible.
-- *
-- *	In addition we emit similarly formatted messages to the default
-- *	ata_dev_set_mode handler, in order to provide consistency of
-- *	presentation.
-- */
--
--void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
--{
--	unsigned int mask;
--	u8 mode;
--
--	/* Pack the DMA modes */
--	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
--	if (dev->id[53] & 0x04)
--		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
--
--	/* Select the mode in use */
--	mode = ata_xfer_mask2mode(mask);
--
--	if (mode != 0) {
--		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
--		       ata_mode_string(mask));
--	} else {
--		/* SWDMA perhaps ? */
--		mode = unknown;
--		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
--	}
--
--	/* Configure the device reporting */
--	dev->xfer_mode = mode;
--	dev->xfer_shift = ata_xfer_mode2shift(mode);
--}
--
--/**
-  *	ata_noop_dev_select - Select device 0/1 on ATA bus
-  *	@ap: ATA channel to manipulate
-  *	@device: ATA device (numbered from zero) to select
-@@ -1464,9 +1425,9 @@ static inline void ata_dump_id(const u16 *id)
-  *	RETURNS:
-  *	Computed xfermask
-  */
--static unsigned int ata_id_xfermask(const u16 *id)
-+unsigned long ata_id_xfermask(const u16 *id)
- {
--	unsigned int pio_mask, mwdma_mask, udma_mask;
-+	unsigned long pio_mask, mwdma_mask, udma_mask;
+ 	rtc_irq_data = 0;
+-	spin_unlock_irq (&rtc_lock);
++	spin_unlock_irq(&rtc_lock);
+ 	return 0;
  
- 	/* Usual case. Word 53 indicates word 64 is valid */
- 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
-@@ -1519,7 +1480,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
+ out_busy:
+-	spin_unlock_irq (&rtc_lock);
++	spin_unlock_irq(&rtc_lock);
+ 	return -EBUSY;
  }
  
- /**
-- *	ata_port_queue_task - Queue port_task
-+ *	ata_pio_queue_task - Queue port_task
-  *	@ap: The ata_port to queue port_task for
-  *	@fn: workqueue function to be scheduled
-  *	@data: data for @fn to use
-@@ -1531,16 +1492,15 @@ static unsigned int ata_id_xfermask(const u16 *id)
-  *	one task is active at any given time.
-  *
-  *	libata core layer takes care of synchronization between
-- *	port_task and EH.  ata_port_queue_task() may be ignored for EH
-+ *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
-  *	synchronization.
-  *
-  *	LOCKING:
-  *	Inherited from caller.
-  */
--void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
--			 unsigned long delay)
-+static void ata_pio_queue_task(struct ata_port *ap, void *data,
-+			       unsigned long delay)
+-static int rtc_fasync (int fd, struct file *filp, int on)
+-
++static int rtc_fasync(int fd, struct file *filp, int on)
  {
--	PREPARE_DELAYED_WORK(&ap->port_task, fn);
- 	ap->port_task_data = data;
- 
- 	/* may fail if ata_port_flush_task() in progress */
-@@ -2090,7 +2050,7 @@ int ata_dev_configure(struct ata_device *dev)
- 	struct ata_eh_context *ehc = &dev->link->eh_context;
- 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
- 	const u16 *id = dev->id;
--	unsigned int xfer_mask;
-+	unsigned long xfer_mask;
- 	char revbuf[7];		/* XYZ-99\0 */
- 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
- 	char modelbuf[ATA_ID_PROD_LEN+1];
-@@ -2161,8 +2121,14 @@ int ata_dev_configure(struct ata_device *dev)
- 					       "supports DRM functions and may "
- 					       "not be fully accessable.\n");
- 			snprintf(revbuf, 7, "CFA");
--		} else
-+		} else {
- 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
-+			/* Warn the user if the device has TPM extensions */
-+			if (ata_id_has_tpm(id))
-+				ata_dev_printk(dev, KERN_WARNING,
-+					       "supports DRM functions and may "
-+					       "not be fully accessable.\n");
-+		}
- 
- 		dev->n_sectors = ata_id_n_sectors(id);
+-	return fasync_helper (fd, filp, on, &rtc_async_queue);
++	return fasync_helper(fd, filp, on, &rtc_async_queue);
+ }
  
-@@ -2295,19 +2261,8 @@ int ata_dev_configure(struct ata_device *dev)
- 			dev->flags |= ATA_DFLAG_DIPM;
+ static int rtc_release(struct inode *inode, struct file *file)
+@@ -762,16 +781,16 @@ static int rtc_release(struct inode *inode, struct file *file)
  	}
+ 	spin_unlock_irq(&rtc_lock);
  
--	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
--		/* Let the user know. We don't want to disallow opens for
--		   rescue purposes, or in case the vendor is just a blithering
--		   idiot */
--		if (print_info) {
--			ata_dev_printk(dev, KERN_WARNING,
--"Drive reports diagnostics failure. This may indicate a drive\n");
--			ata_dev_printk(dev, KERN_WARNING,
--"fault or invalid emulation. Contact drive vendor for information.\n");
--		}
+-	if (file->f_flags & FASYNC) {
+-		rtc_fasync (-1, file, 0);
 -	}
--
--	/* limit bridge transfers to udma5, 200 sectors */
-+	/* Limit PATA drive on SATA cable bridge transfers to udma5,
-+	   200 sectors */
- 	if (ata_dev_knobble(dev)) {
- 		if (ata_msg_drv(ap) && print_info)
- 			ata_dev_printk(dev, KERN_INFO,
-@@ -2336,6 +2291,21 @@ int ata_dev_configure(struct ata_device *dev)
- 	if (ap->ops->dev_config)
- 		ap->ops->dev_config(dev);
++	if (file->f_flags & FASYNC)
++		rtc_fasync(-1, file, 0);
+ no_irq:
+ #endif
  
-+	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
-+		/* Let the user know. We don't want to disallow opens for
-+		   rescue purposes, or in case the vendor is just a blithering
-+		   idiot. Do this after the dev_config call as some controllers
-+		   with buggy firmware may want to avoid reporting false device
-+		   bugs */
-+
-+		if (print_info) {
-+			ata_dev_printk(dev, KERN_WARNING,
-+"Drive reports diagnostics failure. This may indicate a drive\n");
-+			ata_dev_printk(dev, KERN_WARNING,
-+"fault or invalid emulation. Contact drive vendor for information.\n");
-+		}
-+	}
+-	spin_lock_irq (&rtc_lock);
++	spin_lock_irq(&rtc_lock);
+ 	rtc_irq_data = 0;
+ 	rtc_status &= ~RTC_IS_OPEN;
+-	spin_unlock_irq (&rtc_lock);
++	spin_unlock_irq(&rtc_lock);
 +
- 	if (ata_msg_probe(ap))
- 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
- 			__FUNCTION__, ata_chk_status(ap));
-@@ -2387,6 +2357,18 @@ int ata_cable_unknown(struct ata_port *ap)
+ 	return 0;
  }
  
- /**
-+ *	ata_cable_ignore	-	return ignored PATA cable.
-+ *	@ap: port
-+ *
-+ *	Helper method for drivers which don't use cable type to limit
-+ *	transfer mode.
-+ */
-+int ata_cable_ignore(struct ata_port *ap)
-+{
-+	return ATA_CBL_PATA_IGN;
-+}
-+
-+/**
-  *	ata_cable_sata	-	return SATA cable type
-  *	@ap: port
-  *
-@@ -2781,38 +2763,33 @@ int sata_set_spd(struct ata_link *link)
-  */
+@@ -786,9 +805,9 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
  
- static const struct ata_timing ata_timing[] = {
-+/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
-+	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
-+	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
-+	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
-+	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
-+	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
-+	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
-+	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
+ 	poll_wait(file, &rtc_wait, wait);
  
--	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
--	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
--	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
--	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
-+	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
-+	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
-+	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
+-	spin_lock_irq (&rtc_lock);
++	spin_lock_irq(&rtc_lock);
+ 	l = rtc_irq_data;
+-	spin_unlock_irq (&rtc_lock);
++	spin_unlock_irq(&rtc_lock);
  
--	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
-+	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
-+	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
-+	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
- 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
--	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
--	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
--	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
-+	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
+ 	if (l != 0)
+ 		return POLLIN | POLLRDNORM;
+@@ -796,14 +815,6 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
+ }
+ #endif
  
- /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
--
--	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
--	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
--	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
--
--	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
--	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
--	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
--
--	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
--	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
--	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
--	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
+-/*
+- * exported stuffs
+- */
 -
--	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
--	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
--	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
+-EXPORT_SYMBOL(rtc_register);
+-EXPORT_SYMBOL(rtc_unregister);
+-EXPORT_SYMBOL(rtc_control);
 -
--/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
-+	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
-+	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
-+	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
-+	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
-+	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
-+	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
-+	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
- 
- 	{ 0xFF }
- };
-@@ -2845,14 +2822,16 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
- 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
+ int rtc_register(rtc_task_t *task)
+ {
+ #ifndef RTC_IRQ
+@@ -829,6 +840,7 @@ int rtc_register(rtc_task_t *task)
+ 	return 0;
+ #endif
  }
++EXPORT_SYMBOL(rtc_register);
  
--static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
-+const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
+ int rtc_unregister(rtc_task_t *task)
  {
--	const struct ata_timing *t;
-+	const struct ata_timing *t = ata_timing;
+@@ -845,7 +857,7 @@ int rtc_unregister(rtc_task_t *task)
+ 		return -ENXIO;
+ 	}
+ 	rtc_callback = NULL;
+-	
 +
-+	while (xfer_mode > t->mode)
-+		t++;
- 
--	for (t = ata_timing; t->mode != speed; t++)
--		if (t->mode == 0xFF)
--			return NULL;
--	return t;
-+	if (xfer_mode == t->mode)
-+		return t;
-+	return NULL;
+ 	/* disable controls */
+ 	if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
+ 		tmp = CMOS_READ(RTC_CONTROL);
+@@ -865,6 +877,7 @@ int rtc_unregister(rtc_task_t *task)
+ 	return 0;
+ #endif
  }
++EXPORT_SYMBOL(rtc_unregister);
  
- int ata_timing_compute(struct ata_device *adev, unsigned short speed,
-@@ -2927,6 +2906,57 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
+ {
+@@ -883,7 +896,7 @@ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
+ 	return rtc_do_ioctl(cmd, arg, 1);
+ #endif
  }
+-
++EXPORT_SYMBOL(rtc_control);
  
- /**
-+ *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
-+ *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
-+ *	@cycle: cycle duration in ns
-+ *
-+ *	Return matching xfer mode for @cycle.  The returned mode is of
-+ *	the transfer type specified by @xfer_shift.  If @cycle is too
-+ *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
-+ *	than the fastest known mode, the fasted mode is returned.
-+ *
-+ *	LOCKING:
-+ *	None.
-+ *
-+ *	RETURNS:
-+ *	Matching xfer_mode, 0xff if no match found.
-+ */
-+u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
-+{
-+	u8 base_mode = 0xff, last_mode = 0xff;
-+	const struct ata_xfer_ent *ent;
-+	const struct ata_timing *t;
+ /*
+  *	The various file operations we support.
+@@ -910,11 +923,11 @@ static struct miscdevice rtc_dev = {
+ 
+ #ifdef CONFIG_PROC_FS
+ static const struct file_operations rtc_proc_fops = {
+-	.owner = THIS_MODULE,
+-	.open = rtc_proc_open,
+-	.read  = seq_read,
+-	.llseek = seq_lseek,
+-	.release = single_release,
++	.owner		= THIS_MODULE,
++	.open		= rtc_proc_open,
++	.read		= seq_read,
++	.llseek		= seq_lseek,
++	.release	= single_release,
+ };
+ #endif
+ 
+@@ -965,7 +978,7 @@ static int __init rtc_init(void)
+ #ifdef CONFIG_SPARC32
+ 	for_each_ebus(ebus) {
+ 		for_each_ebusdev(edev, ebus) {
+-			if(strcmp(edev->prom_node->name, "rtc") == 0) {
++			if (strcmp(edev->prom_node->name, "rtc") == 0) {
+ 				rtc_port = edev->resource[0].start;
+ 				rtc_irq = edev->irqs[0];
+ 				goto found;
+@@ -986,7 +999,8 @@ found:
+ 	 * XXX Interrupt pin #7 in Espresso is shared between RTC and
+ 	 * PCI Slot 2 INTA# (and some INTx# in Slot 1).
+ 	 */
+-	if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) {
++	if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
++			(void *)&rtc_port)) {
+ 		rtc_has_irq = 0;
+ 		printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
+ 		return -EIO;
+@@ -1015,16 +1029,26 @@ no_irq:
+ 
+ #ifdef RTC_IRQ
+ 	if (is_hpet_enabled()) {
++		int err;
 +
-+	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
-+		if (ent->shift == xfer_shift)
-+			base_mode = ent->base;
+ 		rtc_int_handler_ptr = hpet_rtc_interrupt;
++		err = hpet_register_irq_handler(rtc_interrupt);
++		if (err != 0) {
++			printk(KERN_WARNING "hpet_register_irq_handler failed "
++					"in rtc_init().");
++			return err;
++		}
+ 	} else {
+ 		rtc_int_handler_ptr = rtc_interrupt;
+ 	}
+ 
+-	if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) {
++	if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED,
++			"rtc", NULL)) {
+ 		/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
+ 		rtc_has_irq = 0;
+ 		printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
+ 		rtc_release_region();
 +
-+	for (t = ata_timing_find_mode(base_mode);
-+	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
-+		unsigned short this_cycle;
+ 		return -EIO;
+ 	}
+ 	hpet_rtc_timer_init();
+@@ -1036,6 +1060,7 @@ no_irq:
+ 	if (misc_register(&rtc_dev)) {
+ #ifdef RTC_IRQ
+ 		free_irq(RTC_IRQ, NULL);
++		hpet_unregister_irq_handler(rtc_interrupt);
+ 		rtc_has_irq = 0;
+ #endif
+ 		rtc_release_region();
+@@ -1052,21 +1077,21 @@ no_irq:
+ 
+ #if defined(__alpha__) || defined(__mips__)
+ 	rtc_freq = HZ;
+-	
 +
-+		switch (xfer_shift) {
-+		case ATA_SHIFT_PIO:
-+		case ATA_SHIFT_MWDMA:
-+			this_cycle = t->cycle;
-+			break;
-+		case ATA_SHIFT_UDMA:
-+			this_cycle = t->udma;
-+			break;
-+		default:
-+			return 0xff;
-+		}
+ 	/* Each operating system on an Alpha uses its own epoch.
+ 	   Let's try to guess which one we are using now. */
+-	
 +
-+		if (cycle > this_cycle)
-+			break;
+ 	if (rtc_is_updating() != 0)
+ 		msleep(20);
+-	
 +
-+		last_mode = t->mode;
-+	}
+ 	spin_lock_irq(&rtc_lock);
+ 	year = CMOS_READ(RTC_YEAR);
+ 	ctrl = CMOS_READ(RTC_CONTROL);
+ 	spin_unlock_irq(&rtc_lock);
+-	
 +
-+	return last_mode;
-+}
+ 	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ 		BCD_TO_BIN(year);       /* This should never happen... */
+-	
 +
-+/**
-  *	ata_down_xfermask_limit - adjust dev xfer masks downward
-  *	@dev: Device to adjust xfer masks
-  *	@sel: ATA_DNXFER_* selector
-@@ -2944,8 +2974,8 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
- int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
+ 	if (year < 20) {
+ 		epoch = 2000;
+ 		guess = "SRM (post-2000)";
+@@ -1087,7 +1112,8 @@ no_irq:
+ #endif
+ 	}
+ 	if (guess)
+-		printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch);
++		printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
++			guess, epoch);
+ #endif
+ #ifdef RTC_IRQ
+ 	if (rtc_has_irq == 0)
+@@ -1096,8 +1122,12 @@ no_irq:
+ 	spin_lock_irq(&rtc_lock);
+ 	rtc_freq = 1024;
+ 	if (!hpet_set_periodic_freq(rtc_freq)) {
+-		/* Initialize periodic freq. to CMOS reset default, which is 1024Hz */
+-		CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT);
++		/*
++		 * Initialize periodic frequency to CMOS reset default,
++		 * which is 1024Hz
++		 */
++		CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
++			   RTC_FREQ_SELECT);
+ 	}
+ 	spin_unlock_irq(&rtc_lock);
+ no_irq2:
+@@ -1110,20 +1140,22 @@ no_irq2:
+ 	return 0;
+ }
+ 
+-static void __exit rtc_exit (void)
++static void __exit rtc_exit(void)
  {
- 	char buf[32];
--	unsigned int orig_mask, xfer_mask;
--	unsigned int pio_mask, mwdma_mask, udma_mask;
-+	unsigned long orig_mask, xfer_mask;
-+	unsigned long pio_mask, mwdma_mask, udma_mask;
- 	int quiet, highbit;
+ 	cleanup_sysctl();
+-	remove_proc_entry ("driver/rtc", NULL);
++	remove_proc_entry("driver/rtc", NULL);
+ 	misc_deregister(&rtc_dev);
  
- 	quiet = !!(sel & ATA_DNXFER_QUIET);
-@@ -3039,7 +3069,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
+ #ifdef CONFIG_SPARC32
+ 	if (rtc_has_irq)
+-		free_irq (rtc_irq, &rtc_port);
++		free_irq(rtc_irq, &rtc_port);
+ #else
+ 	rtc_release_region();
+ #ifdef RTC_IRQ
+-	if (rtc_has_irq)
+-		free_irq (RTC_IRQ, NULL);
++	if (rtc_has_irq) {
++		free_irq(RTC_IRQ, NULL);
++		hpet_unregister_irq_handler(hpet_rtc_interrupt);
++	}
+ #endif
+ #endif /* CONFIG_SPARC32 */
+ }
+@@ -1133,14 +1165,14 @@ module_exit(rtc_exit);
  
- 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
- 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
--	if (dev->xfer_shift == ATA_SHIFT_MWDMA && 
-+	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
- 	    dev->dma_mode == XFER_MW_DMA_0 &&
- 	    (dev->id[63] >> 8) & 1)
- 		err_mask &= ~AC_ERR_DEV;
-@@ -3089,7 +3119,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+ #ifdef RTC_IRQ
+ /*
+- * 	At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
++ *	At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
+  *	(usually during an IDE disk interrupt, with IRQ unmasking off)
+  *	Since the interrupt handler doesn't get called, the IRQ status
+  *	byte doesn't get read, and the RTC stops generating interrupts.
+  *	A timer is set, and will call this function if/when that happens.
+  *	To get it out of this stalled state, we just read the status.
+  *	At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
+- *	(You *really* shouldn't be trying to use a non-realtime system 
++ *	(You *really* shouldn't be trying to use a non-realtime system
+  *	for something that requires a steady > 1KHz signal anyways.)
+  */
  
- 	/* step 1: calculate xfer_mask */
- 	ata_link_for_each_dev(dev, link) {
--		unsigned int pio_mask, dma_mask;
-+		unsigned long pio_mask, dma_mask;
- 		unsigned int mode_mask;
+@@ -1148,7 +1180,7 @@ static void rtc_dropped_irq(unsigned long data)
+ {
+ 	unsigned long freq;
  
- 		if (!ata_dev_enabled(dev))
-@@ -3115,7 +3145,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
- 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
+-	spin_lock_irq (&rtc_lock);
++	spin_lock_irq(&rtc_lock);
  
- 		found = 1;
--		if (dev->dma_mode)
-+		if (dev->dma_mode != 0xff)
- 			used_dma = 1;
+ 	if (hpet_rtc_dropped_irq()) {
+ 		spin_unlock_irq(&rtc_lock);
+@@ -1167,13 +1199,15 @@ static void rtc_dropped_irq(unsigned long data)
+ 
+ 	spin_unlock_irq(&rtc_lock);
+ 
+-	if (printk_ratelimit())
+-		printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq);
++	if (printk_ratelimit()) {
++		printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
++			freq);
++	}
+ 
+ 	/* Now we have new data */
+ 	wake_up_interruptible(&rtc_wait);
+ 
+-	kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
++	kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
+ }
+ #endif
+ 
+@@ -1277,7 +1311,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
+ 	 * can take just over 2ms. We wait 20ms. There is no need to
+ 	 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
+ 	 * If you need to know *exactly* when a second has started, enable
+-	 * periodic update complete interrupts, (via ioctl) and then 
++	 * periodic update complete interrupts, (via ioctl) and then
+ 	 * immediately read /dev/rtc which will block until you get the IRQ.
+ 	 * Once the read clears, read the RTC time (again via ioctl). Easy.
+ 	 */
+@@ -1307,8 +1341,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
+ 	ctrl = CMOS_READ(RTC_CONTROL);
+ 	spin_unlock_irqrestore(&rtc_lock, flags);
+ 
+-	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+-	{
++	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ 		BCD_TO_BIN(rtc_tm->tm_sec);
+ 		BCD_TO_BIN(rtc_tm->tm_min);
+ 		BCD_TO_BIN(rtc_tm->tm_hour);
+@@ -1326,7 +1359,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
+ 	 * Account for differences between how the RTC uses the values
+ 	 * and how they are defined in a struct rtc_time;
+ 	 */
+-	if ((rtc_tm->tm_year += (epoch - 1900)) <= 69)
++	rtc_tm->tm_year += epoch - 1900;
++	if (rtc_tm->tm_year <= 69)
+ 		rtc_tm->tm_year += 100;
+ 
+ 	rtc_tm->tm_mon--;
+@@ -1347,8 +1381,7 @@ static void get_rtc_alm_time(struct rtc_time *alm_tm)
+ 	ctrl = CMOS_READ(RTC_CONTROL);
+ 	spin_unlock_irq(&rtc_lock);
+ 
+-	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+-	{
++	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ 		BCD_TO_BIN(alm_tm->tm_sec);
+ 		BCD_TO_BIN(alm_tm->tm_min);
+ 		BCD_TO_BIN(alm_tm->tm_hour);
+diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
+index 12ceed5..5732ca3 100644
+--- a/drivers/connector/cn_queue.c
++++ b/drivers/connector/cn_queue.c
+@@ -104,7 +104,6 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id
+ 		return -EINVAL;
  	}
- 	if (!found)
-@@ -3126,7 +3156,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
- 		if (!ata_dev_enabled(dev))
- 			continue;
  
--		if (!dev->pio_mode) {
-+		if (dev->pio_mode == 0xff) {
- 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
- 			rc = -EINVAL;
- 			goto out;
-@@ -3140,7 +3170,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+-	cbq->nls = dev->nls;
+ 	cbq->seq = 0;
+ 	cbq->group = cbq->id.id.idx;
  
- 	/* step 3: set host DMA timings */
- 	ata_link_for_each_dev(dev, link) {
--		if (!ata_dev_enabled(dev) || !dev->dma_mode)
-+		if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
- 			continue;
+@@ -146,7 +145,6 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
+ 	spin_lock_init(&dev->queue_lock);
  
- 		dev->xfer_mode = dev->dma_mode;
-@@ -3173,31 +3203,6 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+ 	dev->nls = nls;
+-	dev->netlink_groups = 0;
+ 
+ 	dev->cn_queue = create_workqueue(dev->name);
+ 	if (!dev->cn_queue) {
+diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
+index bf9716b..fea2d3e 100644
+--- a/drivers/connector/connector.c
++++ b/drivers/connector/connector.c
+@@ -88,6 +88,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
+ 			if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
+ 				found = 1;
+ 				group = __cbq->group;
++				break;
+ 			}
+ 		}
+ 		spin_unlock_bh(&dev->cbdev->queue_lock);
+@@ -181,33 +182,14 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
  }
  
- /**
-- *	ata_set_mode - Program timings and issue SET FEATURES - XFER
-- *	@link: link on which timings will be programmed
-- *	@r_failed_dev: out paramter for failed device
-- *
-- *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
-- *	ata_set_mode() fails, pointer to the failing device is
-- *	returned in @r_failed_dev.
-- *
-- *	LOCKING:
-- *	PCI/etc. bus probe sem.
-- *
-- *	RETURNS:
-- *	0 on success, negative errno otherwise
+ /*
+- * Skb receive helper - checks skb and msg size and calls callback
+- * helper.
 - */
--int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+-static int __cn_rx_skb(struct sk_buff *skb, struct nlmsghdr *nlh)
 -{
--	struct ata_port *ap = link->ap;
+-	u32 pid, uid, seq, group;
+-	struct cn_msg *msg;
 -
--	/* has private set_mode? */
--	if (ap->ops->set_mode)
--		return ap->ops->set_mode(link, r_failed_dev);
--	return ata_do_set_mode(link, r_failed_dev);
+-	pid = NETLINK_CREDS(skb)->pid;
+-	uid = NETLINK_CREDS(skb)->uid;
+-	seq = nlh->nlmsg_seq;
+-	group = NETLINK_CB((skb)).dst_group;
+-	msg = NLMSG_DATA(nlh);
+-
+-	return cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
 -}
 -
--/**
-  *	ata_tf_to_host - issue ATA taskfile to host controller
-  *	@ap: port to which command is being issued
-  *	@tf: ATA taskfile register set
-@@ -4363,7 +4368,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
- 	tf.feature = SETFEATURES_XFER;
- 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
- 	tf.protocol = ATA_PROT_NODATA;
--	tf.nsect = dev->xfer_mode;
-+	/* If we are using IORDY we must send the mode setting command */
-+	if (ata_pio_need_iordy(dev))
-+		tf.nsect = dev->xfer_mode;
-+	/* If the device has IORDY and the controller does not - turn it off */
-+ 	else if (ata_id_has_iordy(dev->id))
-+		tf.nsect = 0x01;
-+	else /* In the ancient relic department - skip all of this */
-+		return 0;
- 
- 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- 
-@@ -4462,17 +4474,13 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
- void ata_sg_clean(struct ata_queued_cmd *qc)
+-/*
+  * Main netlink receiving function.
+  *
+- * It checks skb and netlink header sizes and calls the skb receive
+- * helper with a shared skb.
++ * It checks skb, netlink header and msg sizes, and calls callback helper.
+  */
+ static void cn_rx_skb(struct sk_buff *__skb)
  {
- 	struct ata_port *ap = qc->ap;
--	struct scatterlist *sg = qc->__sg;
-+	struct scatterlist *sg = qc->sg;
- 	int dir = qc->dma_dir;
- 	void *pad_buf = NULL;
++	struct cn_msg *msg;
+ 	struct nlmsghdr *nlh;
+-	u32 len;
+ 	int err;
+ 	struct sk_buff *skb;
  
--	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
- 	WARN_ON(sg == NULL);
+@@ -223,11 +205,8 @@ static void cn_rx_skb(struct sk_buff *__skb)
+ 			return;
+ 		}
  
--	if (qc->flags & ATA_QCFLAG_SINGLE)
--		WARN_ON(qc->n_elem > 1);
+-		len = NLMSG_ALIGN(nlh->nlmsg_len);
+-		if (len > skb->len)
+-			len = skb->len;
 -
--	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
-+	VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem);
- 
- 	/* if we padded the buffer out to 32-bit bound, and data
- 	 * xfer direction is from-device, we must copy from the
-@@ -4481,31 +4489,20 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
- 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
- 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+-		err = __cn_rx_skb(skb, nlh);
++		msg = NLMSG_DATA(nlh);
++		err = cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
+ 		if (err < 0)
+ 			kfree_skb(skb);
+ 	}
+@@ -441,8 +420,7 @@ static int __devinit cn_init(void)
  
--	if (qc->flags & ATA_QCFLAG_SG) {
--		if (qc->n_elem)
--			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
--		/* restore last sg */
--		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
--		if (pad_buf) {
--			struct scatterlist *psg = &qc->pad_sgent;
--			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
--			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
--			kunmap_atomic(addr, KM_IRQ0);
--		}
--	} else {
--		if (qc->n_elem)
--			dma_unmap_single(ap->dev,
--				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
--				dir);
--		/* restore sg */
--		sg->length += qc->pad_len;
--		if (pad_buf)
--			memcpy(qc->buf_virt + sg->length - qc->pad_len,
--			       pad_buf, qc->pad_len);
-+	if (qc->mapped_n_elem)
-+		dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
-+	/* restore last sg */
-+	if (qc->last_sg)
-+		*qc->last_sg = qc->saved_last_sg;
-+	if (pad_buf) {
-+		struct scatterlist *psg = &qc->extra_sg[1];
-+		void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
-+		memcpy(addr + psg->offset, pad_buf, qc->pad_len);
-+		kunmap_atomic(addr, KM_IRQ0);
+ 	dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
+ 	if (!dev->cbdev) {
+-		if (dev->nls->sk_socket)
+-			sock_release(dev->nls->sk_socket);
++		netlink_kernel_release(dev->nls);
+ 		return -EINVAL;
+ 	}
+ 	
+@@ -452,8 +430,7 @@ static int __devinit cn_init(void)
+ 	if (err) {
+ 		cn_already_initialized = 0;
+ 		cn_queue_free_dev(dev->cbdev);
+-		if (dev->nls->sk_socket)
+-			sock_release(dev->nls->sk_socket);
++		netlink_kernel_release(dev->nls);
+ 		return -EINVAL;
  	}
  
- 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
--	qc->__sg = NULL;
-+	qc->sg = NULL;
+@@ -468,8 +445,7 @@ static void __devexit cn_fini(void)
+ 
+ 	cn_del_callback(&dev->id);
+ 	cn_queue_free_dev(dev->cbdev);
+-	if (dev->nls->sk_socket)
+-		sock_release(dev->nls->sk_socket);
++	netlink_kernel_release(dev->nls);
  }
  
- /**
-@@ -4523,13 +4520,10 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
- {
- 	struct ata_port *ap = qc->ap;
- 	struct scatterlist *sg;
--	unsigned int idx;
-+	unsigned int si, pi;
+ subsys_initcall(cn_init);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 79581fa..b730d67 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -828,11 +828,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
+ 	memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
  
--	WARN_ON(qc->__sg == NULL);
--	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
+ 	/* prepare interface data */
+-	policy->kobj.parent = &sys_dev->kobj;
+-	policy->kobj.ktype = &ktype_cpufreq;
+-	kobject_set_name(&policy->kobj, "cpufreq");
 -
--	idx = 0;
--	ata_for_each_sg(sg, qc) {
-+	pi = 0;
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		u32 addr, offset;
- 		u32 sg_len, len;
+-	ret = kobject_register(&policy->kobj);
++	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
++				   "cpufreq");
+ 	if (ret) {
+ 		unlock_policy_rwsem_write(cpu);
+ 		goto err_out_driver_exit;
+@@ -902,6 +899,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
+ 		goto err_out_unregister;
+ 	}
  
-@@ -4546,18 +4540,17 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
- 			if ((offset + sg_len) > 0x10000)
- 				len = 0x10000 - offset;
++	kobject_uevent(&policy->kobj, KOBJ_ADD);
+ 	module_put(cpufreq_driver->owner);
+ 	dprintk("initialization complete\n");
+ 	cpufreq_debug_enable_ratelimit();
+@@ -915,7 +913,7 @@ err_out_unregister:
+ 		cpufreq_cpu_data[j] = NULL;
+ 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  
--			ap->prd[idx].addr = cpu_to_le32(addr);
--			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
--			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
-+			ap->prd[pi].addr = cpu_to_le32(addr);
-+			ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
-+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+-	kobject_unregister(&policy->kobj);
++	kobject_put(&policy->kobj);
+ 	wait_for_completion(&policy->kobj_unregister);
  
--			idx++;
-+			pi++;
- 			sg_len -= len;
- 			addr += len;
- 		}
- 	}
+ err_out_driver_exit:
+@@ -1032,8 +1030,6 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev)
  
--	if (idx)
--		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-+	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
- }
+ 	unlock_policy_rwsem_write(cpu);
  
- /**
-@@ -4577,13 +4570,10 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
- {
- 	struct ata_port *ap = qc->ap;
- 	struct scatterlist *sg;
--	unsigned int idx;
+-	kobject_unregister(&data->kobj);
 -
--	WARN_ON(qc->__sg == NULL);
--	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
-+	unsigned int si, pi;
+ 	kobject_put(&data->kobj);
  
--	idx = 0;
--	ata_for_each_sg(sg, qc) {
-+	pi = 0;
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		u32 addr, offset;
- 		u32 sg_len, len, blen;
+ 	/* we need to make sure that the underlying kobj is actually
+@@ -1608,7 +1604,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
+ 	memcpy(&policy->cpuinfo, &data->cpuinfo,
+ 				sizeof(struct cpufreq_cpuinfo));
  
-@@ -4601,25 +4591,24 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
- 				len = 0x10000 - offset;
+-	if (policy->min > data->min && policy->min > policy->max) {
++	if (policy->min > data->max || policy->max < data->min) {
+ 		ret = -EINVAL;
+ 		goto error_out;
+ 	}
+diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
+index 0f3515e..088ea74 100644
+--- a/drivers/cpuidle/sysfs.c
++++ b/drivers/cpuidle/sysfs.c
+@@ -277,7 +277,7 @@ static struct kobj_type ktype_state_cpuidle = {
  
- 			blen = len & 0xffff;
--			ap->prd[idx].addr = cpu_to_le32(addr);
-+			ap->prd[pi].addr = cpu_to_le32(addr);
- 			if (blen == 0) {
- 			   /* Some PATA chipsets like the CS5530 can't
- 			      cope with 0x0000 meaning 64K as the spec says */
--				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
-+				ap->prd[pi].flags_len = cpu_to_le32(0x8000);
- 				blen = 0x8000;
--				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
-+				ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
- 			}
--			ap->prd[idx].flags_len = cpu_to_le32(blen);
--			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
-+			ap->prd[pi].flags_len = cpu_to_le32(blen);
-+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+ static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
+ {
+-	kobject_unregister(&device->kobjs[i]->kobj);
++	kobject_put(&device->kobjs[i]->kobj);
+ 	wait_for_completion(&device->kobjs[i]->kobj_unregister);
+ 	kfree(device->kobjs[i]);
+ 	device->kobjs[i] = NULL;
+@@ -300,14 +300,13 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
+ 		kobj->state = &device->states[i];
+ 		init_completion(&kobj->kobj_unregister);
  
--			idx++;
-+			pi++;
- 			sg_len -= len;
- 			addr += len;
+-		kobj->kobj.parent = &device->kobj;
+-		kobj->kobj.ktype = &ktype_state_cpuidle;
+-		kobject_set_name(&kobj->kobj, "state%d", i);
+-		ret = kobject_register(&kobj->kobj);
++		ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
++					   "state%d", i);
+ 		if (ret) {
+ 			kfree(kobj);
+ 			goto error_state;
  		}
++		kobject_uevent(&kobj->kobj, KOBJ_ADD);
+ 		device->kobjs[i] = kobj;
  	}
  
--	if (idx)
--		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-+	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
- }
- 
- /**
-@@ -4669,8 +4658,8 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
-  */
- static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
+@@ -339,12 +338,14 @@ int cpuidle_add_sysfs(struct sys_device *sysdev)
  {
--	if (qc->tf.protocol != ATA_PROT_ATAPI &&
--	    qc->tf.protocol != ATA_PROT_ATAPI_DMA)
-+	if (qc->tf.protocol != ATAPI_PROT_PIO &&
-+	    qc->tf.protocol != ATAPI_PROT_DMA)
- 		return 0;
+ 	int cpu = sysdev->id;
+ 	struct cpuidle_device *dev;
++	int error;
  
- 	if (qc->tf.flags & ATA_TFLAG_WRITE)
-@@ -4756,33 +4745,6 @@ void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
- void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
+ 	dev = per_cpu(cpuidle_devices, cpu);
+-	dev->kobj.parent = &sysdev->kobj;
+-	dev->kobj.ktype = &ktype_cpuidle;
+-	kobject_set_name(&dev->kobj, "%s", "cpuidle");
+-	return kobject_register(&dev->kobj);
++	error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &sysdev->kobj,
++				     "cpuidle");
++	if (!error)
++		kobject_uevent(&dev->kobj, KOBJ_ADD);
++	return error;
+ }
  
  /**
-- *	ata_sg_init_one - Associate command with memory buffer
-- *	@qc: Command to be associated
-- *	@buf: Memory buffer
-- *	@buflen: Length of memory buffer, in bytes.
-- *
-- *	Initialize the data-related elements of queued_cmd @qc
-- *	to point to a single memory buffer, @buf of byte length @buflen.
-- *
-- *	LOCKING:
-- *	spin_lock_irqsave(host lock)
-- */
--
--void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
--{
--	qc->flags |= ATA_QCFLAG_SINGLE;
--
--	qc->__sg = &qc->sgent;
--	qc->n_elem = 1;
--	qc->orig_n_elem = 1;
--	qc->buf_virt = buf;
--	qc->nbytes = buflen;
--	qc->cursg = qc->__sg;
--
--	sg_init_one(&qc->sgent, buf, buflen);
--}
--
--/**
-  *	ata_sg_init - Associate command with scatter-gather table.
-  *	@qc: Command to be associated
-  *	@sg: Scatter-gather table.
-@@ -4795,84 +4757,103 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
-  *	LOCKING:
-  *	spin_lock_irqsave(host lock)
-  */
--
- void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
- 		 unsigned int n_elem)
- {
--	qc->flags |= ATA_QCFLAG_SG;
--	qc->__sg = sg;
-+	qc->sg = sg;
- 	qc->n_elem = n_elem;
--	qc->orig_n_elem = n_elem;
--	qc->cursg = qc->__sg;
-+	qc->cursg = qc->sg;
+@@ -357,5 +358,5 @@ void cpuidle_remove_sysfs(struct sys_device *sysdev)
+ 	struct cpuidle_device *dev;
+ 
+ 	dev = per_cpu(cpuidle_devices, cpu);
+-	kobject_unregister(&dev->kobj);
++	kobject_put(&dev->kobj);
  }
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index ddd3a25..6b658d8 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -48,8 +48,6 @@ config CRYPTO_DEV_PADLOCK_SHA
+ 	  If unsure say M. The compiled module will be
+ 	  called padlock-sha.ko
  
--/**
-- *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
-- *	@qc: Command with memory buffer to be mapped.
-- *
-- *	DMA-map the memory buffer associated with queued_cmd @qc.
-- *
-- *	LOCKING:
-- *	spin_lock_irqsave(host lock)
-- *
-- *	RETURNS:
-- *	Zero on success, negative on error.
-- */
+-source "arch/s390/crypto/Kconfig"
 -
--static int ata_sg_setup_one(struct ata_queued_cmd *qc)
-+static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
-+				       unsigned int *n_elem_extra,
-+				       unsigned int *nbytes_extra)
- {
- 	struct ata_port *ap = qc->ap;
--	int dir = qc->dma_dir;
--	struct scatterlist *sg = qc->__sg;
--	dma_addr_t dma_address;
--	int trim_sg = 0;
-+	unsigned int n_elem = qc->n_elem;
-+	struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
+ config CRYPTO_DEV_GEODE
+ 	tristate "Support for the Geode LX AES engine"
+ 	depends on X86_32 && PCI
+@@ -83,4 +81,82 @@ config ZCRYPT_MONOLITHIC
+ 	  that contains all parts of the crypto device driver (ap bus,
+ 	  request router and all the card drivers).
+ 
++config CRYPTO_SHA1_S390
++	tristate "SHA1 digest algorithm"
++	depends on S390
++	select CRYPTO_ALGAPI
++	help
++	  This is the s390 hardware accelerated implementation of the
++	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
++
++config CRYPTO_SHA256_S390
++	tristate "SHA256 digest algorithm"
++	depends on S390
++	select CRYPTO_ALGAPI
++	help
++	  This is the s390 hardware accelerated implementation of the
++	  SHA256 secure hash standard (DFIPS 180-2).
++
++	  This version of SHA implements a 256 bit hash with 128 bits of
++	  security against collision attacks.
++
++config CRYPTO_DES_S390
++	tristate "DES and Triple DES cipher algorithms"
++	depends on S390
++	select CRYPTO_ALGAPI
++	select CRYPTO_BLKCIPHER
++	help
++	  This us the s390 hardware accelerated implementation of the
++	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
 +
-+	*n_elem_extra = 0;
-+	*nbytes_extra = 0;
++config CRYPTO_AES_S390
++	tristate "AES cipher algorithms"
++	depends on S390
++	select CRYPTO_ALGAPI
++	select CRYPTO_BLKCIPHER
++	help
++	  This is the s390 hardware accelerated implementation of the
++	  AES cipher algorithms (FIPS-197). AES uses the Rijndael
++	  algorithm.
 +
-+	/* needs padding? */
-+	qc->pad_len = qc->nbytes & 3;
++	  Rijndael appears to be consistently a very good performer in
++	  both hardware and software across a wide range of computing
++	  environments regardless of its use in feedback or non-feedback
++	  modes. Its key setup time is excellent, and its key agility is
++	  good. Rijndael's very low memory requirements make it very well
++	  suited for restricted-space environments, in which it also
++	  demonstrates excellent performance. Rijndael's operations are
++	  among the easiest to defend against power and timing attacks.
 +
-+	if (likely(!qc->pad_len))
-+		return n_elem;
++	  On s390 the System z9-109 currently only supports the key size
++	  of 128 bit.
 +
-+	/* locate last sg and save it */
-+	lsg = sg_last(qc->sg, n_elem);
-+	qc->last_sg = lsg;
-+	qc->saved_last_sg = *lsg;
++config S390_PRNG
++	tristate "Pseudo random number generator device driver"
++	depends on S390
++	default "m"
++	help
++	  Select this option if you want to use the s390 pseudo random number
++	  generator. The PRNG is part of the cryptographic processor functions
++	  and uses triple-DES to generate secure random numbers like the
++	  ANSI X9.17 standard. The PRNG is usable via the char device
++	  /dev/prandom.
 +
-+	sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
- 
--	/* we must lengthen transfers to end on a 32-bit boundary */
--	qc->pad_len = sg->length & 3;
- 	if (qc->pad_len) {
-+		struct scatterlist *psg = &qc->extra_sg[1];
- 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
--		struct scatterlist *psg = &qc->pad_sgent;
-+		unsigned int offset;
- 
- 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
- 
- 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
- 
--		if (qc->tf.flags & ATA_TFLAG_WRITE)
--			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
--			       qc->pad_len);
-+		/* psg->page/offset are used to copy to-be-written
-+		 * data in this function or read data in ata_sg_clean.
-+		 */
-+		offset = lsg->offset + lsg->length - qc->pad_len;
-+		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
-+			    qc->pad_len, offset_in_page(offset));
++config CRYPTO_DEV_HIFN_795X
++	tristate "Driver HIFN 795x crypto accelerator chips"
++	select CRYPTO_DES
++	select CRYPTO_ALGAPI
++	select CRYPTO_BLKCIPHER
++	select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
++	depends on PCI
++	help
++	  This option allows you to have support for HIFN 795x crypto adapters.
 +
-+		if (qc->tf.flags & ATA_TFLAG_WRITE) {
-+			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
-+			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
-+			kunmap_atomic(addr, KM_IRQ0);
-+		}
- 
- 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
- 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
--		/* trim sg */
--		sg->length -= qc->pad_len;
--		if (sg->length == 0)
--			trim_sg = 1;
- 
--		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
--			sg->length, qc->pad_len);
--	}
-+		/* Trim the last sg entry and chain the original and
-+		 * padding sg lists.
-+		 *
-+		 * Because chaining consumes one sg entry, one extra
-+		 * sg entry is allocated and the last sg entry is
-+		 * copied to it if the length isn't zero after padded
-+		 * amount is removed.
-+		 *
-+		 * If the last sg entry is completely replaced by
-+		 * padding sg entry, the first sg entry is skipped
-+		 * while chaining.
-+		 */
-+		lsg->length -= qc->pad_len;
-+		if (lsg->length) {
-+			copy_lsg = &qc->extra_sg[0];
-+			tsg = &qc->extra_sg[0];
-+		} else {
-+			n_elem--;
-+			tsg = &qc->extra_sg[1];
-+		}
- 
--	if (trim_sg) {
--		qc->n_elem--;
--		goto skip_map;
--	}
-+		esg = &qc->extra_sg[1];
- 
--	dma_address = dma_map_single(ap->dev, qc->buf_virt,
--				     sg->length, dir);
--	if (dma_mapping_error(dma_address)) {
--		/* restore sg */
--		sg->length += qc->pad_len;
--		return -1;
-+		(*n_elem_extra)++;
-+		(*nbytes_extra) += 4 - qc->pad_len;
- 	}
- 
--	sg_dma_address(sg) = dma_address;
--	sg_dma_len(sg) = sg->length;
-+	if (copy_lsg)
-+		sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
- 
--skip_map:
--	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
--		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
-+	sg_chain(lsg, 1, tsg);
-+	sg_mark_end(esg);
- 
--	return 0;
-+	/* sglist can't start with chaining sg entry, fast forward */
-+	if (qc->sg == lsg) {
-+		qc->sg = tsg;
-+		qc->cursg = tsg;
-+	}
++config CRYPTO_DEV_HIFN_795X_RNG
++	bool "HIFN 795x random number generator"
++	depends on CRYPTO_DEV_HIFN_795X
++	help
++	  Select this option if you want to enable the random number generator
++	  on the HIFN 795x crypto adapters.
 +
-+	return n_elem;
- }
+ endif # CRYPTO_HW
+diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
+index d070030..c0327f0 100644
+--- a/drivers/crypto/Makefile
++++ b/drivers/crypto/Makefile
+@@ -1,3 +1,4 @@
+ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
+ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
+ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
++obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
+index 711e246..4801162 100644
+--- a/drivers/crypto/geode-aes.c
++++ b/drivers/crypto/geode-aes.c
+@@ -13,44 +13,13 @@
+ #include <linux/crypto.h>
+ #include <linux/spinlock.h>
+ #include <crypto/algapi.h>
++#include <crypto/aes.h>
  
- /**
-@@ -4888,75 +4869,30 @@ skip_map:
-  *	Zero on success, negative on error.
-  *
-  */
--
- static int ata_sg_setup(struct ata_queued_cmd *qc)
- {
- 	struct ata_port *ap = qc->ap;
--	struct scatterlist *sg = qc->__sg;
--	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
--	int n_elem, pre_n_elem, dir, trim_sg = 0;
-+	unsigned int n_elem, n_elem_extra, nbytes_extra;
+ #include <asm/io.h>
+ #include <asm/delay.h>
  
- 	VPRINTK("ENTER, ata%u\n", ap->print_id);
--	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
+ #include "geode-aes.h"
  
--	/* we must lengthen transfers to end on a 32-bit boundary */
--	qc->pad_len = lsg->length & 3;
--	if (qc->pad_len) {
--		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
--		struct scatterlist *psg = &qc->pad_sgent;
--		unsigned int offset;
+-/* Register definitions */
 -
--		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
-+	n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra);
- 
--		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
+-#define AES_CTRLA_REG  0x0000
 -
--		/*
--		 * psg->page/offset are used to copy to-be-written
--		 * data in this function or read data in ata_sg_clean.
--		 */
--		offset = lsg->offset + lsg->length - qc->pad_len;
--		sg_init_table(psg, 1);
--		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
--				qc->pad_len, offset_in_page(offset));
+-#define AES_CTRL_START     0x01
+-#define AES_CTRL_DECRYPT   0x00
+-#define AES_CTRL_ENCRYPT   0x02
+-#define AES_CTRL_WRKEY     0x04
+-#define AES_CTRL_DCA       0x08
+-#define AES_CTRL_SCA       0x10
+-#define AES_CTRL_CBC       0x20
 -
--		if (qc->tf.flags & ATA_TFLAG_WRITE) {
--			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
--			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
--			kunmap_atomic(addr, KM_IRQ0);
-+	if (n_elem) {
-+		n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
-+		if (n_elem < 1) {
-+			/* restore last sg */
-+			if (qc->last_sg)
-+				*qc->last_sg = qc->saved_last_sg;
-+			return -1;
- 		}
+-#define AES_INTR_REG  0x0008
 -
--		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
--		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
--		/* trim last sg */
--		lsg->length -= qc->pad_len;
--		if (lsg->length == 0)
--			trim_sg = 1;
+-#define AES_INTRA_PENDING (1 << 16)
+-#define AES_INTRB_PENDING (1 << 17)
 -
--		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
--			qc->n_elem - 1, lsg->length, qc->pad_len);
--	}
+-#define AES_INTR_PENDING  (AES_INTRA_PENDING | AES_INTRB_PENDING)
+-#define AES_INTR_MASK     0x07
 -
--	pre_n_elem = qc->n_elem;
--	if (trim_sg && pre_n_elem)
--		pre_n_elem--;
+-#define AES_SOURCEA_REG   0x0010
+-#define AES_DSTA_REG      0x0014
+-#define AES_LENA_REG      0x0018
+-#define AES_WRITEKEY0_REG 0x0030
+-#define AES_WRITEIV0_REG  0x0040
 -
--	if (!pre_n_elem) {
--		n_elem = 0;
--		goto skip_map;
--	}
+-/*  A very large counter that is used to gracefully bail out of an
+- *  operation in case of trouble
+- */
 -
--	dir = qc->dma_dir;
--	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
--	if (n_elem < 1) {
--		/* restore last sg */
--		lsg->length += qc->pad_len;
--		return -1;
-+		DPRINTK("%d sg elements mapped\n", n_elem);
- 	}
- 
--	DPRINTK("%d sg elements mapped\n", n_elem);
+-#define AES_OP_TIMEOUT    0x50000
 -
--skip_map:
--	qc->n_elem = n_elem;
-+	qc->n_elem = qc->mapped_n_elem = n_elem;
-+	qc->n_elem += n_elem_extra;
-+	qc->nbytes += nbytes_extra;
-+	qc->flags |= ATA_QCFLAG_DMAMAP;
- 
- 	return 0;
- }
-@@ -4985,63 +4921,77 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+ /* Static structures */
  
- /**
-  *	ata_data_xfer - Transfer data by PIO
-- *	@adev: device to target
-+ *	@dev: device to target
-  *	@buf: data buffer
-  *	@buflen: buffer length
-- *	@write_data: read/write
-+ *	@rw: read/write
-  *
-  *	Transfer data from/to the device data register by PIO.
-  *
-  *	LOCKING:
-  *	Inherited from caller.
-+ *
-+ *	RETURNS:
-+ *	Bytes consumed.
-  */
--void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
--		   unsigned int buflen, int write_data)
-+unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
-+			   unsigned int buflen, int rw)
- {
--	struct ata_port *ap = adev->link->ap;
-+	struct ata_port *ap = dev->link->ap;
-+	void __iomem *data_addr = ap->ioaddr.data_addr;
- 	unsigned int words = buflen >> 1;
+ static void __iomem * _iobase;
+@@ -87,9 +56,10 @@ do_crypt(void *src, void *dst, int len, u32 flags)
+ 	/* Start the operation */
+ 	iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
  
- 	/* Transfer multiple of 2 bytes */
--	if (write_data)
--		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
-+	if (rw == READ)
-+		ioread16_rep(data_addr, buf, words);
- 	else
--		ioread16_rep(ap->ioaddr.data_addr, buf, words);
-+		iowrite16_rep(data_addr, buf, words);
+-	do
++	do {
+ 		status = ioread32(_iobase + AES_INTR_REG);
+-	while(!(status & AES_INTRA_PENDING) && --counter);
++		cpu_relax();
++	} while(!(status & AES_INTRA_PENDING) && --counter);
  
- 	/* Transfer trailing 1 byte, if any. */
- 	if (unlikely(buflen & 0x01)) {
--		u16 align_buf[1] = { 0 };
-+		__le16 align_buf[1] = { 0 };
- 		unsigned char *trailing_buf = buf + buflen - 1;
+ 	/* Clear the event */
+ 	iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
+@@ -101,6 +71,7 @@ geode_aes_crypt(struct geode_aes_op *op)
+ {
+ 	u32 flags = 0;
+ 	unsigned long iflags;
++	int ret;
  
--		if (write_data) {
--			memcpy(align_buf, trailing_buf, 1);
--			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
--		} else {
--			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
-+		if (rw == READ) {
-+			align_buf[0] = cpu_to_le16(ioread16(data_addr));
- 			memcpy(trailing_buf, align_buf, 1);
-+		} else {
-+			memcpy(align_buf, trailing_buf, 1);
-+			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
- 		}
-+		words++;
+ 	if (op->len == 0)
+ 		return 0;
+@@ -129,7 +100,8 @@ geode_aes_crypt(struct geode_aes_op *op)
+ 		_writefield(AES_WRITEKEY0_REG, op->key);
  	}
-+
-+	return words << 1;
- }
- 
- /**
-  *	ata_data_xfer_noirq - Transfer data by PIO
-- *	@adev: device to target
-+ *	@dev: device to target
-  *	@buf: data buffer
-  *	@buflen: buffer length
-- *	@write_data: read/write
-+ *	@rw: read/write
-  *
-  *	Transfer data from/to the device data register by PIO. Do the
-  *	transfer with interrupts disabled.
-  *
-  *	LOCKING:
-  *	Inherited from caller.
-+ *
-+ *	RETURNS:
-+ *	Bytes consumed.
-  */
--void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
--			 unsigned int buflen, int write_data)
-+unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
-+				 unsigned int buflen, int rw)
- {
- 	unsigned long flags;
-+	unsigned int consumed;
-+
- 	local_irq_save(flags);
--	ata_data_xfer(adev, buf, buflen, write_data);
-+	consumed = ata_data_xfer(dev, buf, buflen, rw);
- 	local_irq_restore(flags);
-+
-+	return consumed;
- }
  
+-	do_crypt(op->src, op->dst, op->len, flags);
++	ret = do_crypt(op->src, op->dst, op->len, flags);
++	BUG_ON(ret);
  
-@@ -5152,13 +5102,13 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
- 	ata_altstatus(ap); /* flush */
+ 	if (op->mode == AES_MODE_CBC)
+ 		_readfield(AES_WRITEIV0_REG, op->iv);
+@@ -141,18 +113,103 @@ geode_aes_crypt(struct geode_aes_op *op)
  
- 	switch (qc->tf.protocol) {
--	case ATA_PROT_ATAPI:
-+	case ATAPI_PROT_PIO:
- 		ap->hsm_task_state = HSM_ST;
- 		break;
--	case ATA_PROT_ATAPI_NODATA:
-+	case ATAPI_PROT_NODATA:
- 		ap->hsm_task_state = HSM_ST_LAST;
- 		break;
--	case ATA_PROT_ATAPI_DMA:
-+	case ATAPI_PROT_DMA:
- 		ap->hsm_task_state = HSM_ST_LAST;
- 		/* initiate bmdma */
- 		ap->ops->bmdma_start(qc);
-@@ -5300,12 +5250,15 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
- 	bytes = (bc_hi << 8) | bc_lo;
+ /* CRYPTO-API Functions */
  
- 	/* shall be cleared to zero, indicating xfer of data */
--	if (ireason & (1 << 0))
-+	if (unlikely(ireason & (1 << 0)))
- 		goto err_out;
+-static int
+-geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len)
++static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
++		unsigned int len)
+ {
+ 	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++	unsigned int ret;
  
- 	/* make sure transfer direction matches expected */
- 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
--	if (do_write != i_write)
-+	if (unlikely(do_write != i_write))
-+		goto err_out;
+-	if (len != AES_KEY_LENGTH) {
++	op->keylen = len;
 +
-+	if (unlikely(!bytes))
- 		goto err_out;
- 
- 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
-@@ -5341,7 +5294,7 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *q
- 		    (qc->tf.flags & ATA_TFLAG_WRITE))
- 		    return 1;
- 
--		if (is_atapi_taskfile(&qc->tf) &&
-+		if (ata_is_atapi(qc->tf.protocol) &&
- 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- 			return 1;
- 	}
-@@ -5506,7 +5459,7 @@ fsm_start:
- 
- 	case HSM_ST:
- 		/* complete command or read/write the data register */
--		if (qc->tf.protocol == ATA_PROT_ATAPI) {
-+		if (qc->tf.protocol == ATAPI_PROT_PIO) {
- 			/* ATAPI PIO protocol */
- 			if ((status & ATA_DRQ) == 0) {
- 				/* No more data to transfer or device error.
-@@ -5664,7 +5617,7 @@ fsm_start:
- 		msleep(2);
- 		status = ata_busy_wait(ap, ATA_BUSY, 10);
- 		if (status & ATA_BUSY) {
--			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
-+			ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
- 			return;
- 		}
++	if (len == AES_KEYSIZE_128) {
++		memcpy(op->key, key, len);
++		return 0;
++	}
++
++	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
++		/* not supported at all */
+ 		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ 		return -EINVAL;
  	}
-@@ -5805,6 +5758,22 @@ static void fill_result_tf(struct ata_queued_cmd *qc)
- 	ap->ops->tf_read(ap, &qc->result_tf);
- }
  
-+static void ata_verify_xfer(struct ata_queued_cmd *qc)
+-	memcpy(op->key, key, len);
+-	return 0;
++	/*
++	 * The requested key size is not supported by HW, do a fallback
++	 */
++	op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
++	op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
++
++	ret = crypto_cipher_setkey(op->fallback.cip, key, len);
++	if (ret) {
++		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
++		tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
++	}
++	return ret;
++}
++
++static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
++		unsigned int len)
 +{
-+	struct ata_device *dev = qc->dev;
++	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++	unsigned int ret;
 +
-+	if (ata_tag_internal(qc->tag))
-+		return;
++	op->keylen = len;
 +
-+	if (ata_is_nodata(qc->tf.protocol))
-+		return;
++	if (len == AES_KEYSIZE_128) {
++		memcpy(op->key, key, len);
++		return 0;
++	}
 +
-+	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
-+		return;
++	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
++		/* not supported at all */
++		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
++		return -EINVAL;
++	}
 +
-+	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
++	/*
++	 * The requested key size is not supported by HW, do a fallback
++	 */
++	op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
++	op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
++
++	ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
++	if (ret) {
++		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
++		tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
++	}
++	return ret;
 +}
 +
- /**
-  *	ata_qc_complete - Complete an active ATA command
-  *	@qc: Command to complete
-@@ -5876,6 +5845,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
- 			break;
- 		}
- 
-+		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
-+			ata_verify_xfer(qc);
++static int fallback_blk_dec(struct blkcipher_desc *desc,
++		struct scatterlist *dst, struct scatterlist *src,
++		unsigned int nbytes)
++{
++	unsigned int ret;
++	struct crypto_blkcipher *tfm;
++	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
 +
- 		__ata_qc_complete(qc);
- 	} else {
- 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
-@@ -5938,30 +5910,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
- 	return nr_done;
++	tfm = desc->tfm;
++	desc->tfm = op->fallback.blk;
++
++	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
++
++	desc->tfm = tfm;
++	return ret;
++}
++static int fallback_blk_enc(struct blkcipher_desc *desc,
++		struct scatterlist *dst, struct scatterlist *src,
++		unsigned int nbytes)
++{
++	unsigned int ret;
++	struct crypto_blkcipher *tfm;
++	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
++
++	tfm = desc->tfm;
++	desc->tfm = op->fallback.blk;
++
++	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
++
++	desc->tfm = tfm;
++	return ret;
  }
  
--static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
--{
--	struct ata_port *ap = qc->ap;
--
--	switch (qc->tf.protocol) {
--	case ATA_PROT_NCQ:
--	case ATA_PROT_DMA:
--	case ATA_PROT_ATAPI_DMA:
--		return 1;
--
--	case ATA_PROT_ATAPI:
--	case ATA_PROT_PIO:
--		if (ap->flags & ATA_FLAG_PIO_DMA)
--			return 1;
--
--		/* fall through */
--
--	default:
--		return 0;
--	}
--
--	/* never reached */
--}
--
- /**
-  *	ata_qc_issue - issue taskfile to device
-  *	@qc: command to issue to device
-@@ -5978,6 +5926,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
+ static void
+@@ -160,8 +217,10 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  {
- 	struct ata_port *ap = qc->ap;
- 	struct ata_link *link = qc->dev->link;
-+	u8 prot = qc->tf.protocol;
+ 	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  
- 	/* Make sure only one non-NCQ command is outstanding.  The
- 	 * check is skipped for old EH because it reuses active qc to
-@@ -5985,7 +5934,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
- 	 */
- 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
+-	if ((out == NULL) || (in == NULL))
++	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
++		crypto_cipher_encrypt_one(op->fallback.cip, out, in);
+ 		return;
++	}
  
--	if (qc->tf.protocol == ATA_PROT_NCQ) {
-+	if (ata_is_ncq(prot)) {
- 		WARN_ON(link->sactive & (1 << qc->tag));
+ 	op->src = (void *) in;
+ 	op->dst = (void *) out;
+@@ -179,8 +238,10 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+ 	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
  
- 		if (!link->sactive)
-@@ -6001,17 +5950,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
- 	qc->flags |= ATA_QCFLAG_ACTIVE;
- 	ap->qc_active |= 1 << qc->tag;
+-	if ((out == NULL) || (in == NULL))
++	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
++		crypto_cipher_decrypt_one(op->fallback.cip, out, in);
+ 		return;
++	}
  
--	if (ata_should_dma_map(qc)) {
--		if (qc->flags & ATA_QCFLAG_SG) {
--			if (ata_sg_setup(qc))
--				goto sg_err;
--		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
--			if (ata_sg_setup_one(qc))
--				goto sg_err;
--		}
--	} else {
--		qc->flags &= ~ATA_QCFLAG_DMAMAP;
--	}
-+	/* We guarantee to LLDs that they will have at least one
-+	 * non-zero sg if the command is a data command.
-+	 */
-+	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
+ 	op->src = (void *) in;
+ 	op->dst = (void *) out;
+@@ -192,24 +253,50 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ 	geode_aes_crypt(op);
+ }
+ 
++static int fallback_init_cip(struct crypto_tfm *tfm)
++{
++	const char *name = tfm->__crt_alg->cra_name;
++	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 +
-+	/* ata_sg_setup() may update nbytes */
-+	qc->raw_nbytes = qc->nbytes;
++	op->fallback.cip = crypto_alloc_cipher(name, 0,
++				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 +
-+	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
-+				 (ap->flags & ATA_FLAG_PIO_DMA)))
-+		if (ata_sg_setup(qc))
-+			goto sg_err;
++	if (IS_ERR(op->fallback.cip)) {
++		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
++		return PTR_ERR(op->fallback.blk);
++	}
++
++	return 0;
++}
++
++static void fallback_exit_cip(struct crypto_tfm *tfm)
++{
++	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++
++	crypto_free_cipher(op->fallback.cip);
++	op->fallback.cip = NULL;
++}
  
- 	/* if device is sleeping, schedule softreset and abort the link */
- 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
-@@ -6029,7 +5979,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
- 	return;
+ static struct crypto_alg geode_alg = {
+-	.cra_name               =       "aes",
+-	.cra_driver_name	=       "geode-aes-128",
+-	.cra_priority           =       300,
+-	.cra_alignmask          =       15,
+-	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
++	.cra_name			=	"aes",
++	.cra_driver_name	=	"geode-aes",
++	.cra_priority		=	300,
++	.cra_alignmask		=	15,
++	.cra_flags			=	CRYPTO_ALG_TYPE_CIPHER |
++							CRYPTO_ALG_NEED_FALLBACK,
++	.cra_init			=	fallback_init_cip,
++	.cra_exit			=	fallback_exit_cip,
+ 	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+ 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+-	.cra_module		=	THIS_MODULE,
+-	.cra_list		=	LIST_HEAD_INIT(geode_alg.cra_list),
+-	.cra_u			=	{
+-		.cipher = {
+-			.cia_min_keysize	=  AES_KEY_LENGTH,
+-			.cia_max_keysize	=  AES_KEY_LENGTH,
+-			.cia_setkey		=  geode_setkey,
+-			.cia_encrypt		=  geode_encrypt,
+-			.cia_decrypt		=  geode_decrypt
++	.cra_module			=	THIS_MODULE,
++	.cra_list			=	LIST_HEAD_INIT(geode_alg.cra_list),
++	.cra_u				=	{
++		.cipher	=	{
++			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
++			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
++			.cia_setkey			=	geode_setkey_cip,
++			.cia_encrypt		=	geode_encrypt,
++			.cia_decrypt		=	geode_decrypt
+ 		}
+ 	}
+ };
+@@ -223,8 +310,12 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
+ 	struct blkcipher_walk walk;
+ 	int err, ret;
  
- sg_err:
--	qc->flags &= ~ATA_QCFLAG_DMAMAP;
- 	qc->err_mask |= AC_ERR_SYSTEM;
- err:
- 	ata_qc_complete(qc);
-@@ -6064,11 +6013,11 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
- 		switch (qc->tf.protocol) {
- 		case ATA_PROT_PIO:
- 		case ATA_PROT_NODATA:
--		case ATA_PROT_ATAPI:
--		case ATA_PROT_ATAPI_NODATA:
-+		case ATAPI_PROT_PIO:
-+		case ATAPI_PROT_NODATA:
- 			qc->tf.flags |= ATA_TFLAG_POLLING;
- 			break;
--		case ATA_PROT_ATAPI_DMA:
-+		case ATAPI_PROT_DMA:
- 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
- 				/* see ata_dma_blacklisted() */
- 				BUG();
-@@ -6091,7 +6040,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
- 		ap->hsm_task_state = HSM_ST_LAST;
++	if (unlikely(op->keylen != AES_KEYSIZE_128))
++		return fallback_blk_dec(desc, dst, src, nbytes);
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	err = blkcipher_walk_virt(desc, &walk);
++	op->iv = walk.iv;
  
- 		if (qc->tf.flags & ATA_TFLAG_POLLING)
--			ata_port_queue_task(ap, ata_pio_task, qc, 0);
-+			ata_pio_queue_task(ap, qc, 0);
+ 	while((nbytes = walk.nbytes)) {
+ 		op->src = walk.src.virt.addr,
+@@ -233,13 +324,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
+ 		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+ 		op->dir = AES_DIR_DECRYPT;
  
- 		break;
+-		memcpy(op->iv, walk.iv, AES_IV_LENGTH);
+-
+ 		ret = geode_aes_crypt(op);
  
-@@ -6113,7 +6062,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
- 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
- 			/* PIO data out protocol */
- 			ap->hsm_task_state = HSM_ST_FIRST;
--			ata_port_queue_task(ap, ata_pio_task, qc, 0);
-+			ata_pio_queue_task(ap, qc, 0);
+-		memcpy(walk.iv, op->iv, AES_IV_LENGTH);
+ 		nbytes -= ret;
+-
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	}
  
- 			/* always send first data block using
- 			 * the ata_pio_task() codepath.
-@@ -6123,7 +6072,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
- 			ap->hsm_task_state = HSM_ST;
+@@ -255,8 +342,12 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
+ 	struct blkcipher_walk walk;
+ 	int err, ret;
  
- 			if (qc->tf.flags & ATA_TFLAG_POLLING)
--				ata_port_queue_task(ap, ata_pio_task, qc, 0);
-+				ata_pio_queue_task(ap, qc, 0);
++	if (unlikely(op->keylen != AES_KEYSIZE_128))
++		return fallback_blk_enc(desc, dst, src, nbytes);
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	err = blkcipher_walk_virt(desc, &walk);
++	op->iv = walk.iv;
  
- 			/* if polling, ata_pio_task() handles the rest.
- 			 * otherwise, interrupt handler takes over from here.
-@@ -6132,8 +6081,8 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+ 	while((nbytes = walk.nbytes)) {
+ 		op->src = walk.src.virt.addr,
+@@ -265,8 +356,6 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
+ 		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+ 		op->dir = AES_DIR_ENCRYPT;
  
- 		break;
+-		memcpy(op->iv, walk.iv, AES_IV_LENGTH);
+-
+ 		ret = geode_aes_crypt(op);
+ 		nbytes -= ret;
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+@@ -275,22 +364,49 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
+ 	return err;
+ }
  
--	case ATA_PROT_ATAPI:
--	case ATA_PROT_ATAPI_NODATA:
-+	case ATAPI_PROT_PIO:
-+	case ATAPI_PROT_NODATA:
- 		if (qc->tf.flags & ATA_TFLAG_POLLING)
- 			ata_qc_set_polling(qc);
++static int fallback_init_blk(struct crypto_tfm *tfm)
++{
++	const char *name = tfm->__crt_alg->cra_name;
++	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++
++	op->fallback.blk = crypto_alloc_blkcipher(name, 0,
++			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
++
++	if (IS_ERR(op->fallback.blk)) {
++		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
++		return PTR_ERR(op->fallback.blk);
++	}
++
++	return 0;
++}
++
++static void fallback_exit_blk(struct crypto_tfm *tfm)
++{
++	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++
++	crypto_free_blkcipher(op->fallback.blk);
++	op->fallback.blk = NULL;
++}
++
+ static struct crypto_alg geode_cbc_alg = {
+ 	.cra_name		=	"cbc(aes)",
+-	.cra_driver_name	=	"cbc-aes-geode-128",
++	.cra_driver_name	=	"cbc-aes-geode",
+ 	.cra_priority		=	400,
+-	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
++	.cra_flags			=	CRYPTO_ALG_TYPE_BLKCIPHER |
++							CRYPTO_ALG_NEED_FALLBACK,
++	.cra_init			=	fallback_init_blk,
++	.cra_exit			=	fallback_exit_blk,
+ 	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+ 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+ 	.cra_alignmask		=	15,
+-	.cra_type		=	&crypto_blkcipher_type,
+-	.cra_module		=	THIS_MODULE,
+-	.cra_list		=	LIST_HEAD_INIT(geode_cbc_alg.cra_list),
+-	.cra_u			=	{
+-		.blkcipher = {
+-			.min_keysize		=	AES_KEY_LENGTH,
+-			.max_keysize		=	AES_KEY_LENGTH,
+-			.setkey			=	geode_setkey,
++	.cra_type			=	&crypto_blkcipher_type,
++	.cra_module			=	THIS_MODULE,
++	.cra_list			=	LIST_HEAD_INIT(geode_cbc_alg.cra_list),
++	.cra_u				=	{
++		.blkcipher	=	{
++			.min_keysize	=	AES_MIN_KEY_SIZE,
++			.max_keysize	=	AES_MAX_KEY_SIZE,
++			.setkey			=	geode_setkey_blk,
+ 			.encrypt		=	geode_cbc_encrypt,
+ 			.decrypt		=	geode_cbc_decrypt,
+ 			.ivsize			=	AES_IV_LENGTH,
+@@ -307,6 +423,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
+ 	struct blkcipher_walk walk;
+ 	int err, ret;
  
-@@ -6144,10 +6093,10 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
- 		/* send cdb by polling if no cdb interrupt */
- 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
- 		    (qc->tf.flags & ATA_TFLAG_POLLING))
--			ata_port_queue_task(ap, ata_pio_task, qc, 0);
-+			ata_pio_queue_task(ap, qc, 0);
- 		break;
++	if (unlikely(op->keylen != AES_KEYSIZE_128))
++		return fallback_blk_dec(desc, dst, src, nbytes);
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	err = blkcipher_walk_virt(desc, &walk);
  
--	case ATA_PROT_ATAPI_DMA:
-+	case ATAPI_PROT_DMA:
- 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+@@ -334,6 +453,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
+ 	struct blkcipher_walk walk;
+ 	int err, ret;
  
- 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
-@@ -6156,7 +6105,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
++	if (unlikely(op->keylen != AES_KEYSIZE_128))
++		return fallback_blk_enc(desc, dst, src, nbytes);
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	err = blkcipher_walk_virt(desc, &walk);
  
- 		/* send cdb by polling if no cdb interrupt */
- 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
--			ata_port_queue_task(ap, ata_pio_task, qc, 0);
-+			ata_pio_queue_task(ap, qc, 0);
- 		break;
+@@ -353,28 +475,31 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
+ }
  
- 	default:
-@@ -6200,15 +6149,15 @@ inline unsigned int ata_host_intr(struct ata_port *ap,
- 		 */
+ static struct crypto_alg geode_ecb_alg = {
+-	.cra_name		=	"ecb(aes)",
+-	.cra_driver_name	=	"ecb-aes-geode-128",
++	.cra_name			=	"ecb(aes)",
++	.cra_driver_name	=	"ecb-aes-geode",
+ 	.cra_priority		=	400,
+-	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
++	.cra_flags			=	CRYPTO_ALG_TYPE_BLKCIPHER |
++							CRYPTO_ALG_NEED_FALLBACK,
++	.cra_init			=	fallback_init_blk,
++	.cra_exit			=	fallback_exit_blk,
+ 	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+ 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+ 	.cra_alignmask		=	15,
+-	.cra_type		=	&crypto_blkcipher_type,
+-	.cra_module		=	THIS_MODULE,
+-	.cra_list		=	LIST_HEAD_INIT(geode_ecb_alg.cra_list),
+-	.cra_u			=	{
+-		.blkcipher = {
+-			.min_keysize		=	AES_KEY_LENGTH,
+-			.max_keysize		=	AES_KEY_LENGTH,
+-			.setkey			=	geode_setkey,
++	.cra_type			=	&crypto_blkcipher_type,
++	.cra_module			=	THIS_MODULE,
++	.cra_list			=	LIST_HEAD_INIT(geode_ecb_alg.cra_list),
++	.cra_u				=	{
++		.blkcipher	=	{
++			.min_keysize	=	AES_MIN_KEY_SIZE,
++			.max_keysize	=	AES_MAX_KEY_SIZE,
++			.setkey			=	geode_setkey_blk,
+ 			.encrypt		=	geode_ecb_encrypt,
+ 			.decrypt		=	geode_ecb_decrypt,
+ 		}
+ 	}
+ };
  
- 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
--		 * The flag was turned on only for atapi devices.
--		 * No need to check is_atapi_taskfile(&qc->tf) again.
-+		 * The flag was turned on only for atapi devices.  No
-+		 * need to check ata_is_atapi(qc->tf.protocol) again.
- 		 */
- 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- 			goto idle_irq;
- 		break;
- 	case HSM_ST_LAST:
- 		if (qc->tf.protocol == ATA_PROT_DMA ||
--		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
-+		    qc->tf.protocol == ATAPI_PROT_DMA) {
- 			/* check status of DMA engine */
- 			host_stat = ap->ops->bmdma_status(ap);
- 			VPRINTK("ata%u: host_stat 0x%X\n",
-@@ -6250,7 +6199,7 @@ inline unsigned int ata_host_intr(struct ata_port *ap,
- 	ata_hsm_move(ap, qc, status, 0);
+-static void
++static void __devexit
+ geode_aes_remove(struct pci_dev *dev)
+ {
+ 	crypto_unregister_alg(&geode_alg);
+@@ -389,7 +514,7 @@ geode_aes_remove(struct pci_dev *dev)
+ }
  
- 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
--				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
-+				       qc->tf.protocol == ATAPI_PROT_DMA))
- 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
  
- 	return 1;	/* irq handled */
-@@ -6772,7 +6721,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
- 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
- #endif
+-static int
++static int __devinit
+ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+ 	int ret;
+@@ -397,7 +522,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if ((ret = pci_enable_device(dev)))
+ 		return ret;
  
--	INIT_DELAYED_WORK(&ap->port_task, NULL);
-+	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
- 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
- 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
- 	INIT_LIST_HEAD(&ap->eh_done_q);
-@@ -7589,7 +7538,6 @@ EXPORT_SYMBOL_GPL(ata_host_register);
- EXPORT_SYMBOL_GPL(ata_host_activate);
- EXPORT_SYMBOL_GPL(ata_host_detach);
- EXPORT_SYMBOL_GPL(ata_sg_init);
--EXPORT_SYMBOL_GPL(ata_sg_init_one);
- EXPORT_SYMBOL_GPL(ata_hsm_move);
- EXPORT_SYMBOL_GPL(ata_qc_complete);
- EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
-@@ -7601,6 +7549,13 @@ EXPORT_SYMBOL_GPL(ata_std_dev_select);
- EXPORT_SYMBOL_GPL(sata_print_link_status);
- EXPORT_SYMBOL_GPL(ata_tf_to_fis);
- EXPORT_SYMBOL_GPL(ata_tf_from_fis);
-+EXPORT_SYMBOL_GPL(ata_pack_xfermask);
-+EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
-+EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
-+EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
-+EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
-+EXPORT_SYMBOL_GPL(ata_mode_string);
-+EXPORT_SYMBOL_GPL(ata_id_xfermask);
- EXPORT_SYMBOL_GPL(ata_check_status);
- EXPORT_SYMBOL_GPL(ata_altstatus);
- EXPORT_SYMBOL_GPL(ata_exec_command);
-@@ -7643,7 +7598,6 @@ EXPORT_SYMBOL_GPL(ata_wait_register);
- EXPORT_SYMBOL_GPL(ata_busy_sleep);
- EXPORT_SYMBOL_GPL(ata_wait_after_reset);
- EXPORT_SYMBOL_GPL(ata_wait_ready);
--EXPORT_SYMBOL_GPL(ata_port_queue_task);
- EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
- EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
- EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
-@@ -7662,18 +7616,20 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
- #endif /* CONFIG_PM */
- EXPORT_SYMBOL_GPL(ata_id_string);
- EXPORT_SYMBOL_GPL(ata_id_c_string);
--EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
- EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+-	if ((ret = pci_request_regions(dev, "geode-aes-128")))
++	if ((ret = pci_request_regions(dev, "geode-aes")))
+ 		goto eenable;
  
- EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
-+EXPORT_SYMBOL_GPL(ata_timing_find_mode);
- EXPORT_SYMBOL_GPL(ata_timing_compute);
- EXPORT_SYMBOL_GPL(ata_timing_merge);
-+EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
+ 	_iobase = pci_iomap(dev, 0, 0);
+@@ -472,7 +597,6 @@ geode_aes_exit(void)
+ MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+ MODULE_DESCRIPTION("Geode LX Hardware AES driver");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("aes");
  
- #ifdef CONFIG_PCI
- EXPORT_SYMBOL_GPL(pci_test_config_bits);
- EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
- EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
- EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
-+EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
- EXPORT_SYMBOL_GPL(ata_pci_init_one);
- EXPORT_SYMBOL_GPL(ata_pci_remove_one);
- #ifdef CONFIG_PM
-@@ -7715,4 +7671,5 @@ EXPORT_SYMBOL_GPL(ata_dev_try_classify);
- EXPORT_SYMBOL_GPL(ata_cable_40wire);
- EXPORT_SYMBOL_GPL(ata_cable_80wire);
- EXPORT_SYMBOL_GPL(ata_cable_unknown);
-+EXPORT_SYMBOL_GPL(ata_cable_ignore);
- EXPORT_SYMBOL_GPL(ata_cable_sata);
-diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
-index 21a81cd..4e31071 100644
---- a/drivers/ata/libata-eh.c
-+++ b/drivers/ata/libata-eh.c
-@@ -46,9 +46,26 @@
- #include "libata.h"
+ module_init(geode_aes_init);
+ module_exit(geode_aes_exit);
+diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
+index f479686..f1855b5 100644
+--- a/drivers/crypto/geode-aes.h
++++ b/drivers/crypto/geode-aes.h
+@@ -9,9 +9,9 @@
+ #ifndef _GEODE_AES_H_
+ #define _GEODE_AES_H_
  
- enum {
-+	/* speed down verdicts */
- 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
- 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
- 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
-+	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
-+
-+	/* error flags */
-+	ATA_EFLAG_IS_IO			= (1 << 0),
-+	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
-+
-+	/* error categories */
-+	ATA_ECAT_NONE			= 0,
-+	ATA_ECAT_ATA_BUS		= 1,
-+	ATA_ECAT_TOUT_HSM		= 2,
-+	ATA_ECAT_UNK_DEV		= 3,
-+	ATA_ECAT_DUBIOUS_NONE		= 4,
-+	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
-+	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
-+	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
-+	ATA_ECAT_NR			= 8,
- };
+-#define AES_KEY_LENGTH 16
++/* driver logic flags */
+ #define AES_IV_LENGTH  16
+-
++#define AES_KEY_LENGTH 16
+ #define AES_MIN_BLOCK_SIZE 16
  
- /* Waiting in ->prereset can never be reliable.  It's sometimes nice
-@@ -213,12 +230,13 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
- 	if (offset < 0)
- 		ata_port_desc(ap, "%s %s%llu at 0x%llx", name, type, len, start);
- 	else
--		ata_port_desc(ap, "%s 0x%llx", name, start + offset);
-+		ata_port_desc(ap, "%s 0x%llx", name,
-+				start + (unsigned long long)offset);
- }
+ #define AES_MODE_ECB 0
+@@ -22,6 +22,38 @@
  
- #endif /* CONFIG_PCI */
+ #define AES_FLAGS_HIDDENKEY (1 << 0)
  
--static void ata_ering_record(struct ata_ering *ering, int is_io,
-+static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
- 			     unsigned int err_mask)
- {
- 	struct ata_ering_entry *ent;
-@@ -229,11 +247,20 @@ static void ata_ering_record(struct ata_ering *ering, int is_io,
- 	ering->cursor %= ATA_ERING_SIZE;
++/* Register definitions */
++
++#define AES_CTRLA_REG  0x0000
++
++#define AES_CTRL_START     0x01
++#define AES_CTRL_DECRYPT   0x00
++#define AES_CTRL_ENCRYPT   0x02
++#define AES_CTRL_WRKEY     0x04
++#define AES_CTRL_DCA       0x08
++#define AES_CTRL_SCA       0x10
++#define AES_CTRL_CBC       0x20
++
++#define AES_INTR_REG  0x0008
++
++#define AES_INTRA_PENDING (1 << 16)
++#define AES_INTRB_PENDING (1 << 17)
++
++#define AES_INTR_PENDING  (AES_INTRA_PENDING | AES_INTRB_PENDING)
++#define AES_INTR_MASK     0x07
++
++#define AES_SOURCEA_REG   0x0010
++#define AES_DSTA_REG      0x0014
++#define AES_LENA_REG      0x0018
++#define AES_WRITEKEY0_REG 0x0030
++#define AES_WRITEIV0_REG  0x0040
++
++/*  A very large counter that is used to gracefully bail out of an
++ *  operation in case of trouble
++ */
++
++#define AES_OP_TIMEOUT    0x50000
++
+ struct geode_aes_op {
  
- 	ent = &ering->ring[ering->cursor];
--	ent->is_io = is_io;
-+	ent->eflags = eflags;
- 	ent->err_mask = err_mask;
- 	ent->timestamp = get_jiffies_64();
- }
+ 	void *src;
+@@ -33,7 +65,13 @@ struct geode_aes_op {
+ 	int len;
  
-+static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
+ 	u8 key[AES_KEY_LENGTH];
+-	u8 iv[AES_IV_LENGTH];
++	u8 *iv;
++
++	union {
++		struct crypto_blkcipher *blk;
++		struct crypto_cipher *cip;
++	} fallback;
++	u32 keylen;
+ };
+ 
+ #endif
+diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
+new file mode 100644
+index 0000000..dfbf24c
+--- /dev/null
++++ b/drivers/crypto/hifn_795x.c
+@@ -0,0 +1,2838 @@
++/*
++ * 2007+ Copyright (c) Evgeniy Polyakov <johnpol at 2ka.mipt.ru>
++ * All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/mod_devicetable.h>
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/mm.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++#include <linux/highmem.h>
++#include <linux/interrupt.h>
++#include <linux/crypto.h>
++#include <linux/hw_random.h>
++#include <linux/ktime.h>
++
++#include <crypto/algapi.h>
++#include <crypto/des.h>
++
++#include <asm/kmap_types.h>
++
++#undef dprintk
++
++#define HIFN_TEST
++//#define HIFN_DEBUG
++
++#ifdef HIFN_DEBUG
++#define dprintk(f, a...) 	printk(f, ##a)
++#else
++#define dprintk(f, a...)	do {} while (0)
++#endif
++
++static char hifn_pll_ref[sizeof("extNNN")] = "ext";
++module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
++MODULE_PARM_DESC(hifn_pll_ref,
++		 "PLL reference clock (pci[freq] or ext[freq], default ext)");
++
++static atomic_t hifn_dev_number;
++
++#define ACRYPTO_OP_DECRYPT	0
++#define ACRYPTO_OP_ENCRYPT	1
++#define ACRYPTO_OP_HMAC		2
++#define ACRYPTO_OP_RNG		3
++
++#define ACRYPTO_MODE_ECB		0
++#define ACRYPTO_MODE_CBC		1
++#define ACRYPTO_MODE_CFB		2
++#define ACRYPTO_MODE_OFB		3
++
++#define ACRYPTO_TYPE_AES_128	0
++#define ACRYPTO_TYPE_AES_192	1
++#define ACRYPTO_TYPE_AES_256	2
++#define ACRYPTO_TYPE_3DES	3
++#define ACRYPTO_TYPE_DES	4
++
++#define PCI_VENDOR_ID_HIFN		0x13A3
++#define PCI_DEVICE_ID_HIFN_7955		0x0020
++#define	PCI_DEVICE_ID_HIFN_7956		0x001d
++
++/* I/O region sizes */
++
++#define HIFN_BAR0_SIZE			0x1000
++#define HIFN_BAR1_SIZE			0x2000
++#define HIFN_BAR2_SIZE			0x8000
++
++/* DMA registres */
++
++#define HIFN_DMA_CRA 			0x0C	/* DMA Command Ring Address */
++#define HIFN_DMA_SDRA 			0x1C	/* DMA Source Data Ring Address */
++#define HIFN_DMA_RRA			0x2C	/* DMA Result Ring Address */
++#define HIFN_DMA_DDRA			0x3C	/* DMA Destination Data Ring Address */
++#define HIFN_DMA_STCTL			0x40	/* DMA Status and Control */
++#define HIFN_DMA_INTREN 		0x44	/* DMA Interrupt Enable */
++#define HIFN_DMA_CFG1			0x48	/* DMA Configuration #1 */
++#define HIFN_DMA_CFG2			0x6C	/* DMA Configuration #2 */
++#define HIFN_CHIP_ID			0x98	/* Chip ID */
++
++/*
++ * Processing Unit Registers (offset from BASEREG0)
++ */
++#define	HIFN_0_PUDATA		0x00	/* Processing Unit Data */
++#define	HIFN_0_PUCTRL		0x04	/* Processing Unit Control */
++#define	HIFN_0_PUISR		0x08	/* Processing Unit Interrupt Status */
++#define	HIFN_0_PUCNFG		0x0c	/* Processing Unit Configuration */
++#define	HIFN_0_PUIER		0x10	/* Processing Unit Interrupt Enable */
++#define	HIFN_0_PUSTAT		0x14	/* Processing Unit Status/Chip ID */
++#define	HIFN_0_FIFOSTAT		0x18	/* FIFO Status */
++#define	HIFN_0_FIFOCNFG		0x1c	/* FIFO Configuration */
++#define	HIFN_0_SPACESIZE	0x20	/* Register space size */
++
++/* Processing Unit Control Register (HIFN_0_PUCTRL) */
++#define	HIFN_PUCTRL_CLRSRCFIFO	0x0010	/* clear source fifo */
++#define	HIFN_PUCTRL_STOP	0x0008	/* stop pu */
++#define	HIFN_PUCTRL_LOCKRAM	0x0004	/* lock ram */
++#define	HIFN_PUCTRL_DMAENA	0x0002	/* enable dma */
++#define	HIFN_PUCTRL_RESET	0x0001	/* Reset processing unit */
++
++/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
++#define	HIFN_PUISR_CMDINVAL	0x8000	/* Invalid command interrupt */
++#define	HIFN_PUISR_DATAERR	0x4000	/* Data error interrupt */
++#define	HIFN_PUISR_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
++#define	HIFN_PUISR_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
++#define	HIFN_PUISR_DSTOVER	0x0200	/* Destination overrun interrupt */
++#define	HIFN_PUISR_SRCCMD	0x0080	/* Source command interrupt */
++#define	HIFN_PUISR_SRCCTX	0x0040	/* Source context interrupt */
++#define	HIFN_PUISR_SRCDATA	0x0020	/* Source data interrupt */
++#define	HIFN_PUISR_DSTDATA	0x0010	/* Destination data interrupt */
++#define	HIFN_PUISR_DSTRESULT	0x0004	/* Destination result interrupt */
++
++/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
++#define	HIFN_PUCNFG_DRAMMASK	0xe000	/* DRAM size mask */
++#define	HIFN_PUCNFG_DSZ_256K	0x0000	/* 256k dram */
++#define	HIFN_PUCNFG_DSZ_512K	0x2000	/* 512k dram */
++#define	HIFN_PUCNFG_DSZ_1M	0x4000	/* 1m dram */
++#define	HIFN_PUCNFG_DSZ_2M	0x6000	/* 2m dram */
++#define	HIFN_PUCNFG_DSZ_4M	0x8000	/* 4m dram */
++#define	HIFN_PUCNFG_DSZ_8M	0xa000	/* 8m dram */
++#define	HIFN_PUNCFG_DSZ_16M	0xc000	/* 16m dram */
++#define	HIFN_PUCNFG_DSZ_32M	0xe000	/* 32m dram */
++#define	HIFN_PUCNFG_DRAMREFRESH	0x1800	/* DRAM refresh rate mask */
++#define	HIFN_PUCNFG_DRFR_512	0x0000	/* 512 divisor of ECLK */
++#define	HIFN_PUCNFG_DRFR_256	0x0800	/* 256 divisor of ECLK */
++#define	HIFN_PUCNFG_DRFR_128	0x1000	/* 128 divisor of ECLK */
++#define	HIFN_PUCNFG_TCALLPHASES	0x0200	/* your guess is as good as mine... */
++#define	HIFN_PUCNFG_TCDRVTOTEM	0x0100	/* your guess is as good as mine... */
++#define	HIFN_PUCNFG_BIGENDIAN	0x0080	/* DMA big endian mode */
++#define	HIFN_PUCNFG_BUS32	0x0040	/* Bus width 32bits */
++#define	HIFN_PUCNFG_BUS16	0x0000	/* Bus width 16 bits */
++#define	HIFN_PUCNFG_CHIPID	0x0020	/* Allow chipid from PUSTAT */
++#define	HIFN_PUCNFG_DRAM	0x0010	/* Context RAM is DRAM */
++#define	HIFN_PUCNFG_SRAM	0x0000	/* Context RAM is SRAM */
++#define	HIFN_PUCNFG_COMPSING	0x0004	/* Enable single compression context */
++#define	HIFN_PUCNFG_ENCCNFG	0x0002	/* Encryption configuration */
++
++/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
++#define	HIFN_PUIER_CMDINVAL	0x8000	/* Invalid command interrupt */
++#define	HIFN_PUIER_DATAERR	0x4000	/* Data error interrupt */
++#define	HIFN_PUIER_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
++#define	HIFN_PUIER_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
++#define	HIFN_PUIER_DSTOVER	0x0200	/* Destination overrun interrupt */
++#define	HIFN_PUIER_SRCCMD	0x0080	/* Source command interrupt */
++#define	HIFN_PUIER_SRCCTX	0x0040	/* Source context interrupt */
++#define	HIFN_PUIER_SRCDATA	0x0020	/* Source data interrupt */
++#define	HIFN_PUIER_DSTDATA	0x0010	/* Destination data interrupt */
++#define	HIFN_PUIER_DSTRESULT	0x0004	/* Destination result interrupt */
++
++/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
++#define	HIFN_PUSTAT_CMDINVAL	0x8000	/* Invalid command interrupt */
++#define	HIFN_PUSTAT_DATAERR	0x4000	/* Data error interrupt */
++#define	HIFN_PUSTAT_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
++#define	HIFN_PUSTAT_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
++#define	HIFN_PUSTAT_DSTOVER	0x0200	/* Destination overrun interrupt */
++#define	HIFN_PUSTAT_SRCCMD	0x0080	/* Source command interrupt */
++#define	HIFN_PUSTAT_SRCCTX	0x0040	/* Source context interrupt */
++#define	HIFN_PUSTAT_SRCDATA	0x0020	/* Source data interrupt */
++#define	HIFN_PUSTAT_DSTDATA	0x0010	/* Destination data interrupt */
++#define	HIFN_PUSTAT_DSTRESULT	0x0004	/* Destination result interrupt */
++#define	HIFN_PUSTAT_CHIPREV	0x00ff	/* Chip revision mask */
++#define	HIFN_PUSTAT_CHIPENA	0xff00	/* Chip enabled mask */
++#define	HIFN_PUSTAT_ENA_2	0x1100	/* Level 2 enabled */
++#define	HIFN_PUSTAT_ENA_1	0x1000	/* Level 1 enabled */
++#define	HIFN_PUSTAT_ENA_0	0x3000	/* Level 0 enabled */
++#define	HIFN_PUSTAT_REV_2	0x0020	/* 7751 PT6/2 */
++#define	HIFN_PUSTAT_REV_3	0x0030	/* 7751 PT6/3 */
++
++/* FIFO Status Register (HIFN_0_FIFOSTAT) */
++#define	HIFN_FIFOSTAT_SRC	0x7f00	/* Source FIFO available */
++#define	HIFN_FIFOSTAT_DST	0x007f	/* Destination FIFO available */
++
++/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
++#define	HIFN_FIFOCNFG_THRESHOLD	0x0400	/* must be written as 1 */
++
++/*
++ * DMA Interface Registers (offset from BASEREG1)
++ */
++#define	HIFN_1_DMA_CRAR		0x0c	/* DMA Command Ring Address */
++#define	HIFN_1_DMA_SRAR		0x1c	/* DMA Source Ring Address */
++#define	HIFN_1_DMA_RRAR		0x2c	/* DMA Result Ring Address */
++#define	HIFN_1_DMA_DRAR		0x3c	/* DMA Destination Ring Address */
++#define	HIFN_1_DMA_CSR		0x40	/* DMA Status and Control */
++#define	HIFN_1_DMA_IER		0x44	/* DMA Interrupt Enable */
++#define	HIFN_1_DMA_CNFG		0x48	/* DMA Configuration */
++#define	HIFN_1_PLL		0x4c	/* 795x: PLL config */
++#define	HIFN_1_7811_RNGENA	0x60	/* 7811: rng enable */
++#define	HIFN_1_7811_RNGCFG	0x64	/* 7811: rng config */
++#define	HIFN_1_7811_RNGDAT	0x68	/* 7811: rng data */
++#define	HIFN_1_7811_RNGSTS	0x6c	/* 7811: rng status */
++#define	HIFN_1_7811_MIPSRST	0x94	/* 7811: MIPS reset */
++#define	HIFN_1_REVID		0x98	/* Revision ID */
++#define	HIFN_1_UNLOCK_SECRET1	0xf4
++#define	HIFN_1_UNLOCK_SECRET2	0xfc
++#define	HIFN_1_PUB_RESET	0x204	/* Public/RNG Reset */
++#define	HIFN_1_PUB_BASE		0x300	/* Public Base Address */
++#define	HIFN_1_PUB_OPLEN	0x304	/* Public Operand Length */
++#define	HIFN_1_PUB_OP		0x308	/* Public Operand */
++#define	HIFN_1_PUB_STATUS	0x30c	/* Public Status */
++#define	HIFN_1_PUB_IEN		0x310	/* Public Interrupt enable */
++#define	HIFN_1_RNG_CONFIG	0x314	/* RNG config */
++#define	HIFN_1_RNG_DATA		0x318	/* RNG data */
++#define	HIFN_1_PUB_MEM		0x400	/* start of Public key memory */
++#define	HIFN_1_PUB_MEMEND	0xbff	/* end of Public key memory */
++
++/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
++#define	HIFN_DMACSR_D_CTRLMASK	0xc0000000	/* Destinition Ring Control */
++#define	HIFN_DMACSR_D_CTRL_NOP	0x00000000	/* Dest. Control: no-op */
++#define	HIFN_DMACSR_D_CTRL_DIS	0x40000000	/* Dest. Control: disable */
++#define	HIFN_DMACSR_D_CTRL_ENA	0x80000000	/* Dest. Control: enable */
++#define	HIFN_DMACSR_D_ABORT	0x20000000	/* Destinition Ring PCIAbort */
++#define	HIFN_DMACSR_D_DONE	0x10000000	/* Destinition Ring Done */
++#define	HIFN_DMACSR_D_LAST	0x08000000	/* Destinition Ring Last */
++#define	HIFN_DMACSR_D_WAIT	0x04000000	/* Destinition Ring Waiting */
++#define	HIFN_DMACSR_D_OVER	0x02000000	/* Destinition Ring Overflow */
++#define	HIFN_DMACSR_R_CTRL	0x00c00000	/* Result Ring Control */
++#define	HIFN_DMACSR_R_CTRL_NOP	0x00000000	/* Result Control: no-op */
++#define	HIFN_DMACSR_R_CTRL_DIS	0x00400000	/* Result Control: disable */
++#define	HIFN_DMACSR_R_CTRL_ENA	0x00800000	/* Result Control: enable */
++#define	HIFN_DMACSR_R_ABORT	0x00200000	/* Result Ring PCI Abort */
++#define	HIFN_DMACSR_R_DONE	0x00100000	/* Result Ring Done */
++#define	HIFN_DMACSR_R_LAST	0x00080000	/* Result Ring Last */
++#define	HIFN_DMACSR_R_WAIT	0x00040000	/* Result Ring Waiting */
++#define	HIFN_DMACSR_R_OVER	0x00020000	/* Result Ring Overflow */
++#define	HIFN_DMACSR_S_CTRL	0x0000c000	/* Source Ring Control */
++#define	HIFN_DMACSR_S_CTRL_NOP	0x00000000	/* Source Control: no-op */
++#define	HIFN_DMACSR_S_CTRL_DIS	0x00004000	/* Source Control: disable */
++#define	HIFN_DMACSR_S_CTRL_ENA	0x00008000	/* Source Control: enable */
++#define	HIFN_DMACSR_S_ABORT	0x00002000	/* Source Ring PCI Abort */
++#define	HIFN_DMACSR_S_DONE	0x00001000	/* Source Ring Done */
++#define	HIFN_DMACSR_S_LAST	0x00000800	/* Source Ring Last */
++#define	HIFN_DMACSR_S_WAIT	0x00000400	/* Source Ring Waiting */
++#define	HIFN_DMACSR_ILLW	0x00000200	/* Illegal write (7811 only) */
++#define	HIFN_DMACSR_ILLR	0x00000100	/* Illegal read (7811 only) */
++#define	HIFN_DMACSR_C_CTRL	0x000000c0	/* Command Ring Control */
++#define	HIFN_DMACSR_C_CTRL_NOP	0x00000000	/* Command Control: no-op */
++#define	HIFN_DMACSR_C_CTRL_DIS	0x00000040	/* Command Control: disable */
++#define	HIFN_DMACSR_C_CTRL_ENA	0x00000080	/* Command Control: enable */
++#define	HIFN_DMACSR_C_ABORT	0x00000020	/* Command Ring PCI Abort */
++#define	HIFN_DMACSR_C_DONE	0x00000010	/* Command Ring Done */
++#define	HIFN_DMACSR_C_LAST	0x00000008	/* Command Ring Last */
++#define	HIFN_DMACSR_C_WAIT	0x00000004	/* Command Ring Waiting */
++#define	HIFN_DMACSR_PUBDONE	0x00000002	/* Public op done (7951 only) */
++#define	HIFN_DMACSR_ENGINE	0x00000001	/* Command Ring Engine IRQ */
++
++/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
++#define	HIFN_DMAIER_D_ABORT	0x20000000	/* Destination Ring PCIAbort */
++#define	HIFN_DMAIER_D_DONE	0x10000000	/* Destination Ring Done */
++#define	HIFN_DMAIER_D_LAST	0x08000000	/* Destination Ring Last */
++#define	HIFN_DMAIER_D_WAIT	0x04000000	/* Destination Ring Waiting */
++#define	HIFN_DMAIER_D_OVER	0x02000000	/* Destination Ring Overflow */
++#define	HIFN_DMAIER_R_ABORT	0x00200000	/* Result Ring PCI Abort */
++#define	HIFN_DMAIER_R_DONE	0x00100000	/* Result Ring Done */
++#define	HIFN_DMAIER_R_LAST	0x00080000	/* Result Ring Last */
++#define	HIFN_DMAIER_R_WAIT	0x00040000	/* Result Ring Waiting */
++#define	HIFN_DMAIER_R_OVER	0x00020000	/* Result Ring Overflow */
++#define	HIFN_DMAIER_S_ABORT	0x00002000	/* Source Ring PCI Abort */
++#define	HIFN_DMAIER_S_DONE	0x00001000	/* Source Ring Done */
++#define	HIFN_DMAIER_S_LAST	0x00000800	/* Source Ring Last */
++#define	HIFN_DMAIER_S_WAIT	0x00000400	/* Source Ring Waiting */
++#define	HIFN_DMAIER_ILLW	0x00000200	/* Illegal write (7811 only) */
++#define	HIFN_DMAIER_ILLR	0x00000100	/* Illegal read (7811 only) */
++#define	HIFN_DMAIER_C_ABORT	0x00000020	/* Command Ring PCI Abort */
++#define	HIFN_DMAIER_C_DONE	0x00000010	/* Command Ring Done */
++#define	HIFN_DMAIER_C_LAST	0x00000008	/* Command Ring Last */
++#define	HIFN_DMAIER_C_WAIT	0x00000004	/* Command Ring Waiting */
++#define	HIFN_DMAIER_PUBDONE	0x00000002	/* public op done (7951 only) */
++#define	HIFN_DMAIER_ENGINE	0x00000001	/* Engine IRQ */
++
++/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
++#define	HIFN_DMACNFG_BIGENDIAN	0x10000000	/* big endian mode */
++#define	HIFN_DMACNFG_POLLFREQ	0x00ff0000	/* Poll frequency mask */
++#define	HIFN_DMACNFG_UNLOCK	0x00000800
++#define	HIFN_DMACNFG_POLLINVAL	0x00000700	/* Invalid Poll Scalar */
++#define	HIFN_DMACNFG_LAST	0x00000010	/* Host control LAST bit */
++#define	HIFN_DMACNFG_MODE	0x00000004	/* DMA mode */
++#define	HIFN_DMACNFG_DMARESET	0x00000002	/* DMA Reset # */
++#define	HIFN_DMACNFG_MSTRESET	0x00000001	/* Master Reset # */
++
++/* PLL configuration register */
++#define HIFN_PLL_REF_CLK_HBI	0x00000000	/* HBI reference clock */
++#define HIFN_PLL_REF_CLK_PLL	0x00000001	/* PLL reference clock */
++#define HIFN_PLL_BP		0x00000002	/* Reference clock bypass */
++#define HIFN_PLL_PK_CLK_HBI	0x00000000	/* PK engine HBI clock */
++#define HIFN_PLL_PK_CLK_PLL	0x00000008	/* PK engine PLL clock */
++#define HIFN_PLL_PE_CLK_HBI	0x00000000	/* PE engine HBI clock */
++#define HIFN_PLL_PE_CLK_PLL	0x00000010	/* PE engine PLL clock */
++#define HIFN_PLL_RESERVED_1	0x00000400	/* Reserved bit, must be 1 */
++#define HIFN_PLL_ND_SHIFT	11		/* Clock multiplier shift */
++#define HIFN_PLL_ND_MULT_2	0x00000000	/* PLL clock multiplier 2 */
++#define HIFN_PLL_ND_MULT_4	0x00000800	/* PLL clock multiplier 4 */
++#define HIFN_PLL_ND_MULT_6	0x00001000	/* PLL clock multiplier 6 */
++#define HIFN_PLL_ND_MULT_8	0x00001800	/* PLL clock multiplier 8 */
++#define HIFN_PLL_ND_MULT_10	0x00002000	/* PLL clock multiplier 10 */
++#define HIFN_PLL_ND_MULT_12	0x00002800	/* PLL clock multiplier 12 */
++#define HIFN_PLL_IS_1_8		0x00000000	/* charge pump (mult. 1-8) */
++#define HIFN_PLL_IS_9_12	0x00010000	/* charge pump (mult. 9-12) */
++
++#define HIFN_PLL_FCK_MAX	266		/* Maximum PLL frequency */
++
++/* Public key reset register (HIFN_1_PUB_RESET) */
++#define	HIFN_PUBRST_RESET	0x00000001	/* reset public/rng unit */
++
++/* Public base address register (HIFN_1_PUB_BASE) */
++#define	HIFN_PUBBASE_ADDR	0x00003fff	/* base address */
++
++/* Public operand length register (HIFN_1_PUB_OPLEN) */
++#define	HIFN_PUBOPLEN_MOD_M	0x0000007f	/* modulus length mask */
++#define	HIFN_PUBOPLEN_MOD_S	0		/* modulus length shift */
++#define	HIFN_PUBOPLEN_EXP_M	0x0003ff80	/* exponent length mask */
++#define	HIFN_PUBOPLEN_EXP_S	7		/* exponent lenght shift */
++#define	HIFN_PUBOPLEN_RED_M	0x003c0000	/* reducend length mask */
++#define	HIFN_PUBOPLEN_RED_S	18		/* reducend length shift */
++
++/* Public operation register (HIFN_1_PUB_OP) */
++#define	HIFN_PUBOP_AOFFSET_M	0x0000007f	/* A offset mask */
++#define	HIFN_PUBOP_AOFFSET_S	0		/* A offset shift */
++#define	HIFN_PUBOP_BOFFSET_M	0x00000f80	/* B offset mask */
++#define	HIFN_PUBOP_BOFFSET_S	7		/* B offset shift */
++#define	HIFN_PUBOP_MOFFSET_M	0x0003f000	/* M offset mask */
++#define	HIFN_PUBOP_MOFFSET_S	12		/* M offset shift */
++#define	HIFN_PUBOP_OP_MASK	0x003c0000	/* Opcode: */
++#define	HIFN_PUBOP_OP_NOP	0x00000000	/*  NOP */
++#define	HIFN_PUBOP_OP_ADD	0x00040000	/*  ADD */
++#define	HIFN_PUBOP_OP_ADDC	0x00080000	/*  ADD w/carry */
++#define	HIFN_PUBOP_OP_SUB	0x000c0000	/*  SUB */
++#define	HIFN_PUBOP_OP_SUBC	0x00100000	/*  SUB w/carry */
++#define	HIFN_PUBOP_OP_MODADD	0x00140000	/*  Modular ADD */
++#define	HIFN_PUBOP_OP_MODSUB	0x00180000	/*  Modular SUB */
++#define	HIFN_PUBOP_OP_INCA	0x001c0000	/*  INC A */
++#define	HIFN_PUBOP_OP_DECA	0x00200000	/*  DEC A */
++#define	HIFN_PUBOP_OP_MULT	0x00240000	/*  MULT */
++#define	HIFN_PUBOP_OP_MODMULT	0x00280000	/*  Modular MULT */
++#define	HIFN_PUBOP_OP_MODRED	0x002c0000	/*  Modular RED */
++#define	HIFN_PUBOP_OP_MODEXP	0x00300000	/*  Modular EXP */
++
++/* Public status register (HIFN_1_PUB_STATUS) */
++#define	HIFN_PUBSTS_DONE	0x00000001	/* operation done */
++#define	HIFN_PUBSTS_CARRY	0x00000002	/* carry */
++
++/* Public interrupt enable register (HIFN_1_PUB_IEN) */
++#define	HIFN_PUBIEN_DONE	0x00000001	/* operation done interrupt */
++
++/* Random number generator config register (HIFN_1_RNG_CONFIG) */
++#define	HIFN_RNGCFG_ENA		0x00000001	/* enable rng */
++
++#define HIFN_NAMESIZE			32
++#define HIFN_MAX_RESULT_ORDER		5
++
++#define	HIFN_D_CMD_RSIZE		24*4
++#define	HIFN_D_SRC_RSIZE		80*4
++#define	HIFN_D_DST_RSIZE		80*4
++#define	HIFN_D_RES_RSIZE		24*4
++
++#define HIFN_QUEUE_LENGTH		HIFN_D_CMD_RSIZE-5
++
++#define AES_MIN_KEY_SIZE		16
++#define AES_MAX_KEY_SIZE		32
++
++#define HIFN_DES_KEY_LENGTH		8
++#define HIFN_3DES_KEY_LENGTH		24
++#define HIFN_MAX_CRYPT_KEY_LENGTH	AES_MAX_KEY_SIZE
++#define HIFN_IV_LENGTH			8
++#define HIFN_AES_IV_LENGTH		16
++#define	HIFN_MAX_IV_LENGTH		HIFN_AES_IV_LENGTH
++
++#define HIFN_MAC_KEY_LENGTH		64
++#define HIFN_MD5_LENGTH			16
++#define HIFN_SHA1_LENGTH		20
++#define HIFN_MAC_TRUNC_LENGTH		12
++
++#define	HIFN_MAX_COMMAND		(8 + 8 + 8 + 64 + 260)
++#define	HIFN_MAX_RESULT			(8 + 4 + 4 + 20 + 4)
++#define HIFN_USED_RESULT		12
++
++struct hifn_desc
 +{
-+	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
++	volatile u32		l;
++	volatile u32		p;
++};
++
++struct hifn_dma {
++	struct hifn_desc	cmdr[HIFN_D_CMD_RSIZE+1];
++	struct hifn_desc	srcr[HIFN_D_SRC_RSIZE+1];
++	struct hifn_desc	dstr[HIFN_D_DST_RSIZE+1];
++	struct hifn_desc	resr[HIFN_D_RES_RSIZE+1];
++
++	u8			command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
++	u8			result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
++
++	u64			test_src, test_dst;
++
++	/*
++	 *  Our current positions for insertion and removal from the descriptor
++	 *  rings.
++	 */
++	volatile int		cmdi, srci, dsti, resi;
++	volatile int		cmdu, srcu, dstu, resu;
++	int			cmdk, srck, dstk, resk;
++};
++
++#define HIFN_FLAG_CMD_BUSY	(1<<0)
++#define HIFN_FLAG_SRC_BUSY	(1<<1)
++#define HIFN_FLAG_DST_BUSY	(1<<2)
++#define HIFN_FLAG_RES_BUSY	(1<<3)
++#define HIFN_FLAG_OLD_KEY	(1<<4)
++
++#define HIFN_DEFAULT_ACTIVE_NUM	5
++
++struct hifn_device
++{
++	char			name[HIFN_NAMESIZE];
++
++	int			irq;
++
++	struct pci_dev		*pdev;
++	void __iomem		*bar[3];
++
++	unsigned long		result_mem;
++	dma_addr_t		dst;
++
++	void			*desc_virt;
++	dma_addr_t		desc_dma;
++
++	u32			dmareg;
++
++	void 			*sa[HIFN_D_RES_RSIZE];
++
++	spinlock_t		lock;
++
++	void 			*priv;
++
++	u32			flags;
++	int			active, started;
++	struct delayed_work	work;
++	unsigned long		reset;
++	unsigned long		success;
++	unsigned long		prev_success;
++
++	u8			snum;
++
++	struct tasklet_struct	tasklet;
++
++	struct crypto_queue 	queue;
++	struct list_head	alg_list;
++
++	unsigned int		pk_clk_freq;
++
++#ifdef CRYPTO_DEV_HIFN_795X_RNG
++	unsigned int		rng_wait_time;
++	ktime_t			rngtime;
++	struct hwrng		rng;
++#endif
++};
++
++#define	HIFN_D_LENGTH			0x0000ffff
++#define	HIFN_D_NOINVALID		0x01000000
++#define	HIFN_D_MASKDONEIRQ		0x02000000
++#define	HIFN_D_DESTOVER			0x04000000
++#define	HIFN_D_OVER			0x08000000
++#define	HIFN_D_LAST			0x20000000
++#define	HIFN_D_JUMP			0x40000000
++#define	HIFN_D_VALID			0x80000000
++
++struct hifn_base_command
++{
++	volatile u16		masks;
++	volatile u16		session_num;
++	volatile u16		total_source_count;
++	volatile u16		total_dest_count;
++};
++
++#define	HIFN_BASE_CMD_COMP		0x0100	/* enable compression engine */
++#define	HIFN_BASE_CMD_PAD		0x0200	/* enable padding engine */
++#define	HIFN_BASE_CMD_MAC		0x0400	/* enable MAC engine */
++#define	HIFN_BASE_CMD_CRYPT		0x0800	/* enable crypt engine */
++#define	HIFN_BASE_CMD_DECODE		0x2000
++#define	HIFN_BASE_CMD_SRCLEN_M		0xc000
++#define	HIFN_BASE_CMD_SRCLEN_S		14
++#define	HIFN_BASE_CMD_DSTLEN_M		0x3000
++#define	HIFN_BASE_CMD_DSTLEN_S		12
++#define	HIFN_BASE_CMD_LENMASK_HI	0x30000
++#define	HIFN_BASE_CMD_LENMASK_LO	0x0ffff
++
++/*
++ * Structure to help build up the command data structure.
++ */
++struct hifn_crypt_command
++{
++	volatile u16 		masks;
++	volatile u16 		header_skip;
++	volatile u16 		source_count;
++	volatile u16 		reserved;
++};
++
++#define	HIFN_CRYPT_CMD_ALG_MASK		0x0003		/* algorithm: */
++#define	HIFN_CRYPT_CMD_ALG_DES		0x0000		/*   DES */
++#define	HIFN_CRYPT_CMD_ALG_3DES		0x0001		/*   3DES */
++#define	HIFN_CRYPT_CMD_ALG_RC4		0x0002		/*   RC4 */
++#define	HIFN_CRYPT_CMD_ALG_AES		0x0003		/*   AES */
++#define	HIFN_CRYPT_CMD_MODE_MASK	0x0018		/* Encrypt mode: */
++#define	HIFN_CRYPT_CMD_MODE_ECB		0x0000		/*   ECB */
++#define	HIFN_CRYPT_CMD_MODE_CBC		0x0008		/*   CBC */
++#define	HIFN_CRYPT_CMD_MODE_CFB		0x0010		/*   CFB */
++#define	HIFN_CRYPT_CMD_MODE_OFB		0x0018		/*   OFB */
++#define	HIFN_CRYPT_CMD_CLR_CTX		0x0040		/* clear context */
++#define	HIFN_CRYPT_CMD_KSZ_MASK		0x0600		/* AES key size: */
++#define	HIFN_CRYPT_CMD_KSZ_128		0x0000		/*  128 bit */
++#define	HIFN_CRYPT_CMD_KSZ_192		0x0200		/*  192 bit */
++#define	HIFN_CRYPT_CMD_KSZ_256		0x0400		/*  256 bit */
++#define	HIFN_CRYPT_CMD_NEW_KEY		0x0800		/* expect new key */
++#define	HIFN_CRYPT_CMD_NEW_IV		0x1000		/* expect new iv */
++#define	HIFN_CRYPT_CMD_SRCLEN_M		0xc000
++#define	HIFN_CRYPT_CMD_SRCLEN_S		14
++
++/*
++ * Structure to help build up the command data structure.
++ */
++struct hifn_mac_command
++{
++	volatile u16 		masks;
++	volatile u16 		header_skip;
++	volatile u16 		source_count;
++	volatile u16 		reserved;
++};
++
++#define	HIFN_MAC_CMD_ALG_MASK		0x0001
++#define	HIFN_MAC_CMD_ALG_SHA1		0x0000
++#define	HIFN_MAC_CMD_ALG_MD5		0x0001
++#define	HIFN_MAC_CMD_MODE_MASK		0x000c
++#define	HIFN_MAC_CMD_MODE_HMAC		0x0000
++#define	HIFN_MAC_CMD_MODE_SSL_MAC	0x0004
++#define	HIFN_MAC_CMD_MODE_HASH		0x0008
++#define	HIFN_MAC_CMD_MODE_FULL		0x0004
++#define	HIFN_MAC_CMD_TRUNC		0x0010
++#define	HIFN_MAC_CMD_RESULT		0x0020
++#define	HIFN_MAC_CMD_APPEND		0x0040
++#define	HIFN_MAC_CMD_SRCLEN_M		0xc000
++#define	HIFN_MAC_CMD_SRCLEN_S		14
++
++/*
++ * MAC POS IPsec initiates authentication after encryption on encodes
++ * and before decryption on decodes.
++ */
++#define	HIFN_MAC_CMD_POS_IPSEC		0x0200
++#define	HIFN_MAC_CMD_NEW_KEY		0x0800
++
++struct hifn_comp_command
++{
++	volatile u16 		masks;
++	volatile u16 		header_skip;
++	volatile u16 		source_count;
++	volatile u16 		reserved;
++};
++
++#define	HIFN_COMP_CMD_SRCLEN_M		0xc000
++#define	HIFN_COMP_CMD_SRCLEN_S		14
++#define	HIFN_COMP_CMD_ONE		0x0100	/* must be one */
++#define	HIFN_COMP_CMD_CLEARHIST		0x0010	/* clear history */
++#define	HIFN_COMP_CMD_UPDATEHIST	0x0008	/* update history */
++#define	HIFN_COMP_CMD_LZS_STRIP0	0x0004	/* LZS: strip zero */
++#define	HIFN_COMP_CMD_MPPC_RESTART	0x0004	/* MPPC: restart */
++#define	HIFN_COMP_CMD_ALG_MASK		0x0001	/* compression mode: */
++#define	HIFN_COMP_CMD_ALG_MPPC		0x0001	/*   MPPC */
++#define	HIFN_COMP_CMD_ALG_LZS		0x0000	/*   LZS */
++
++struct hifn_base_result
++{
++	volatile u16 		flags;
++	volatile u16 		session;
++	volatile u16 		src_cnt;		/* 15:0 of source count */
++	volatile u16 		dst_cnt;		/* 15:0 of dest count */
++};
++
++#define	HIFN_BASE_RES_DSTOVERRUN	0x0200	/* destination overrun */
++#define	HIFN_BASE_RES_SRCLEN_M		0xc000	/* 17:16 of source count */
++#define	HIFN_BASE_RES_SRCLEN_S		14
++#define	HIFN_BASE_RES_DSTLEN_M		0x3000	/* 17:16 of dest count */
++#define	HIFN_BASE_RES_DSTLEN_S		12
++
++struct hifn_comp_result
++{
++	volatile u16 		flags;
++	volatile u16 		crc;
++};
++
++#define	HIFN_COMP_RES_LCB_M		0xff00	/* longitudinal check byte */
++#define	HIFN_COMP_RES_LCB_S		8
++#define	HIFN_COMP_RES_RESTART		0x0004	/* MPPC: restart */
++#define	HIFN_COMP_RES_ENDMARKER		0x0002	/* LZS: end marker seen */
++#define	HIFN_COMP_RES_SRC_NOTZERO	0x0001	/* source expired */
++
++struct hifn_mac_result
++{
++	volatile u16 		flags;
++	volatile u16 		reserved;
++	/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
++};
++
++#define	HIFN_MAC_RES_MISCOMPARE		0x0002	/* compare failed */
++#define	HIFN_MAC_RES_SRC_NOTZERO	0x0001	/* source expired */
++
++struct hifn_crypt_result
++{
++	volatile u16 		flags;
++	volatile u16 		reserved;
++};
++
++#define	HIFN_CRYPT_RES_SRC_NOTZERO	0x0001	/* source expired */
++
++#ifndef HIFN_POLL_FREQUENCY
++#define	HIFN_POLL_FREQUENCY	0x1
++#endif
++
++#ifndef HIFN_POLL_SCALAR
++#define	HIFN_POLL_SCALAR	0x0
++#endif
++
++#define	HIFN_MAX_SEGLEN 	0xffff		/* maximum dma segment len */
++#define	HIFN_MAX_DMALEN		0x3ffff		/* maximum dma length */
++
++struct hifn_crypto_alg
++{
++	struct list_head	entry;
++	struct crypto_alg	alg;
++	struct hifn_device	*dev;
++};
++
++#define ASYNC_SCATTERLIST_CACHE	16
++
++#define ASYNC_FLAGS_MISALIGNED	(1<<0)
++
++struct ablkcipher_walk
++{
++	struct scatterlist	cache[ASYNC_SCATTERLIST_CACHE];
++	u32			flags;
++	int			num;
++};
++
++struct hifn_context
++{
++	u8			key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv;
++	struct hifn_device	*dev;
++	unsigned int		keysize, ivsize;
++	u8			op, type, mode, unused;
++	struct ablkcipher_walk	walk;
++	atomic_t		sg_num;
++};
++
++#define crypto_alg_to_hifn(a)	container_of(a, struct hifn_crypto_alg, alg)
++
++static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
++{
++	u32 ret;
++
++	ret = readl((char *)(dev->bar[0]) + reg);
++
++	return ret;
++}
++
++static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
++{
++	u32 ret;
++
++	ret = readl((char *)(dev->bar[1]) + reg);
++
++	return ret;
++}
++
++static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
++{
++	writel(val, (char *)(dev->bar[0]) + reg);
++}
++
++static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
++{
++	writel(val, (char *)(dev->bar[1]) + reg);
++}
++
++static void hifn_wait_puc(struct hifn_device *dev)
++{
++	int i;
++	u32 ret;
++
++	for (i=10000; i > 0; --i) {
++		ret = hifn_read_0(dev, HIFN_0_PUCTRL);
++		if (!(ret & HIFN_PUCTRL_RESET))
++			break;
++
++		udelay(1);
++	}
++
++	if (!i)
++		dprintk("%s: Failed to reset PUC unit.\n", dev->name);
++}
++
++static void hifn_reset_puc(struct hifn_device *dev)
++{
++	hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
++	hifn_wait_puc(dev);
++}
++
++static void hifn_stop_device(struct hifn_device *dev)
++{
++	hifn_write_1(dev, HIFN_1_DMA_CSR,
++		HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
++		HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
++	hifn_write_0(dev, HIFN_0_PUIER, 0);
++	hifn_write_1(dev, HIFN_1_DMA_IER, 0);
++}
++
++static void hifn_reset_dma(struct hifn_device *dev, int full)
++{
++	hifn_stop_device(dev);
++
++	/*
++	 * Setting poll frequency and others to 0.
++	 */
++	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++	mdelay(1);
++
++	/*
++	 * Reset DMA.
++	 */
++	if (full) {
++		hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
++		mdelay(1);
++	} else {
++		hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
++				HIFN_DMACNFG_MSTRESET);
++		hifn_reset_puc(dev);
++	}
++
++	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++
++	hifn_reset_puc(dev);
++}
++
++static u32 hifn_next_signature(u_int32_t a, u_int cnt)
++{
++	int i;
++	u32 v;
++
++	for (i = 0; i < cnt; i++) {
++
++		/* get the parity */
++		v = a & 0x80080125;
++		v ^= v >> 16;
++		v ^= v >> 8;
++		v ^= v >> 4;
++		v ^= v >> 2;
++		v ^= v >> 1;
++
++		a = (v & 1) ^ (a << 1);
++	}
++
++	return a;
++}
++
++static struct pci2id {
++	u_short		pci_vendor;
++	u_short		pci_prod;
++	char		card_id[13];
++} pci2id[] = {
++	{
++		PCI_VENDOR_ID_HIFN,
++		PCI_DEVICE_ID_HIFN_7955,
++		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++		  0x00, 0x00, 0x00, 0x00, 0x00 }
++	},
++	{
++		PCI_VENDOR_ID_HIFN,
++		PCI_DEVICE_ID_HIFN_7956,
++		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++		  0x00, 0x00, 0x00, 0x00, 0x00 }
++	}
++};
++
++#ifdef CRYPTO_DEV_HIFN_795X_RNG
++static int hifn_rng_data_present(struct hwrng *rng, int wait)
++{
++	struct hifn_device *dev = (struct hifn_device *)rng->priv;
++	s64 nsec;
++
++	nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime));
++	nsec -= dev->rng_wait_time;
++	if (nsec <= 0)
++		return 1;
++	if (!wait)
++		return 0;
++	ndelay(nsec);
++	return 1;
++}
++
++static int hifn_rng_data_read(struct hwrng *rng, u32 *data)
++{
++	struct hifn_device *dev = (struct hifn_device *)rng->priv;
++
++	*data = hifn_read_1(dev, HIFN_1_RNG_DATA);
++	dev->rngtime = ktime_get();
++	return 4;
++}
++
++static int hifn_register_rng(struct hifn_device *dev)
++{
++	/*
++	 * We must wait at least 256 Pk_clk cycles between two reads of the rng.
++	 */
++	dev->rng_wait_time	= DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) *
++				  256;
++
++	dev->rng.name		= dev->name;
++	dev->rng.data_present	= hifn_rng_data_present,
++	dev->rng.data_read	= hifn_rng_data_read,
++	dev->rng.priv		= (unsigned long)dev;
++
++	return hwrng_register(&dev->rng);
++}
++
++static void hifn_unregister_rng(struct hifn_device *dev)
++{
++	hwrng_unregister(&dev->rng);
++}
++#else
++#define hifn_register_rng(dev)		0
++#define hifn_unregister_rng(dev)
++#endif
++
++static int hifn_init_pubrng(struct hifn_device *dev)
++{
++	int i;
++
++	hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
++			HIFN_PUBRST_RESET);
++
++	for (i=100; i > 0; --i) {
++		mdelay(1);
++
++		if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
++			break;
++	}
++
++	if (!i)
++		dprintk("Chip %s: Failed to initialise public key engine.\n",
++				dev->name);
++	else {
++		hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
++		dev->dmareg |= HIFN_DMAIER_PUBDONE;
++		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
++
++		dprintk("Chip %s: Public key engine has been sucessfully "
++				"initialised.\n", dev->name);
++	}
++
++	/*
++	 * Enable RNG engine.
++	 */
 +
-+	if (ent->err_mask)
-+		return ent;
-+	return NULL;
++	hifn_write_1(dev, HIFN_1_RNG_CONFIG,
++			hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
++	dprintk("Chip %s: RNG engine has been successfully initialised.\n",
++			dev->name);
++
++#ifdef CRYPTO_DEV_HIFN_795X_RNG
++	/* First value must be discarded */
++	hifn_read_1(dev, HIFN_1_RNG_DATA);
++	dev->rngtime = ktime_get();
++#endif
++	return 0;
 +}
 +
- static void ata_ering_clear(struct ata_ering *ering)
- {
- 	memset(ering, 0, sizeof(*ering));
-@@ -445,9 +472,20 @@ void ata_scsi_error(struct Scsi_Host *host)
- 		spin_lock_irqsave(ap->lock, flags);
- 
- 		__ata_port_for_each_link(link, ap) {
-+			struct ata_eh_context *ehc = &link->eh_context;
-+			struct ata_device *dev;
++static int hifn_enable_crypto(struct hifn_device *dev)
++{
++	u32 dmacfg, addr;
++	char *offtbl = NULL;
++	int i;
 +
- 			memset(&link->eh_context, 0, sizeof(link->eh_context));
- 			link->eh_context.i = link->eh_info;
- 			memset(&link->eh_info, 0, sizeof(link->eh_info));
++	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
++		if (pci2id[i].pci_vendor == dev->pdev->vendor &&
++				pci2id[i].pci_prod == dev->pdev->device) {
++			offtbl = pci2id[i].card_id;
++			break;
++		}
++	}
 +
-+			ata_link_for_each_dev(dev, link) {
-+				int devno = dev->devno;
++	if (offtbl == NULL) {
++		dprintk("Chip %s: Unknown card!\n", dev->name);
++		return -ENODEV;
++	}
 +
-+				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
-+				if (ata_ncq_enabled(dev))
-+					ehc->saved_ncq_enabled |= 1 << devno;
-+			}
- 		}
- 
- 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
-@@ -1260,10 +1298,10 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
- 
- 	/* is it pointless to prefer PIO for "safety reasons"? */
- 	if (ap->flags & ATA_FLAG_PIO_DMA) {
--		tf.protocol = ATA_PROT_ATAPI_DMA;
-+		tf.protocol = ATAPI_PROT_DMA;
- 		tf.feature |= ATAPI_PKT_DMA;
- 	} else {
--		tf.protocol = ATA_PROT_ATAPI;
-+		tf.protocol = ATAPI_PROT_PIO;
- 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
- 		tf.lbah = 0;
- 	}
-@@ -1451,20 +1489,29 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
- 	return action;
- }
- 
--static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
-+static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
-+				   int *xfer_ok)
- {
-+	int base = 0;
++	dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
 +
-+	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
-+		*xfer_ok = 1;
++	hifn_write_1(dev, HIFN_1_DMA_CNFG,
++			HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
++			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++	mdelay(1);
++	addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
++	mdelay(1);
++	hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
++	mdelay(1);
 +
-+	if (!*xfer_ok)
-+		base = ATA_ECAT_DUBIOUS_NONE;
++	for (i=0; i<12; ++i) {
++		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
++		hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
 +
- 	if (err_mask & AC_ERR_ATA_BUS)
--		return 1;
-+		return base + ATA_ECAT_ATA_BUS;
- 
- 	if (err_mask & AC_ERR_TIMEOUT)
--		return 2;
-+		return base + ATA_ECAT_TOUT_HSM;
- 
--	if (is_io) {
-+	if (eflags & ATA_EFLAG_IS_IO) {
- 		if (err_mask & AC_ERR_HSM)
--			return 2;
-+			return base + ATA_ECAT_TOUT_HSM;
- 		if ((err_mask &
- 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
--			return 3;
-+			return base + ATA_ECAT_UNK_DEV;
- 	}
- 
- 	return 0;
-@@ -1472,18 +1519,22 @@ static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
- 
- struct speed_down_verdict_arg {
- 	u64 since;
--	int nr_errors[4];
-+	int xfer_ok;
-+	int nr_errors[ATA_ECAT_NR];
- };
- 
- static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
- {
- 	struct speed_down_verdict_arg *arg = void_arg;
--	int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask);
-+	int cat;
- 
- 	if (ent->timestamp < arg->since)
- 		return -1;
- 
-+	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
-+				      &arg->xfer_ok);
- 	arg->nr_errors[cat]++;
++		mdelay(1);
++	}
++	hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
 +
- 	return 0;
- }
- 
-@@ -1495,22 +1546,48 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
-  *	whether NCQ needs to be turned off, transfer speed should be
-  *	stepped down, or falling back to PIO is necessary.
-  *
-- *	Cat-1 is ATA_BUS error for any command.
-+ *	ECAT_ATA_BUS	: ATA_BUS error for any command
-+ *
-+ *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
-+ *			  IO commands
-+ *
-+ *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
-+ *
-+ *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
-+ *			  data transfer hasn't been verified.
-+ *
-+ *	Verdicts are
-+ *
-+ *	NCQ_OFF		: Turn off NCQ.
-+ *
-+ *	SPEED_DOWN	: Speed down transfer speed but don't fall back
-+ *			  to PIO.
-+ *
-+ *	FALLBACK_TO_PIO	: Fall back to PIO.
-+ *
-+ *	Even if multiple verdicts are returned, only one action is
-+ *	taken per error.  An action triggered by non-DUBIOUS errors
-+ *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
-+ *	This is to expedite speed down decisions right after device is
-+ *	initially configured.
-  *
-- *	Cat-2 is TIMEOUT for any command or HSM violation for known
-- *	supported commands.
-+ *	The followings are speed down rules.  #1 and #2 deal with
-+ *	DUBIOUS errors.
-  *
-- *	Cat-3 is is unclassified DEV error for known supported
-- *	command.
-+ *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
-+ *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
-  *
-- *	NCQ needs to be turned off if there have been more than 3
-- *	Cat-2 + Cat-3 errors during last 10 minutes.
-+ *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
-+ *	   occurred during last 5 mins, NCQ_OFF.
-  *
-- *	Speed down is necessary if there have been more than 3 Cat-1 +
-- *	Cat-2 errors or 10 Cat-3 errors during last 10 minutes.
-+ *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
-+ *	   ocurred during last 5 mins, FALLBACK_TO_PIO
-  *
-- *	Falling back to PIO mode is necessary if there have been more
-- *	than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes.
-+ *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
-+ *	   during last 10 mins, NCQ_OFF.
-+ *
-+ *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
-+ *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
-  *
-  *	LOCKING:
-  *	Inherited from caller.
-@@ -1525,23 +1602,38 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
- 	struct speed_down_verdict_arg arg;
- 	unsigned int verdict = 0;
- 
--	/* scan past 10 mins of error history */
-+	/* scan past 5 mins of error history */
- 	memset(&arg, 0, sizeof(arg));
--	arg.since = j64 - min(j64, j10mins);
-+	arg.since = j64 - min(j64, j5mins);
- 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
- 
--	if (arg.nr_errors[2] + arg.nr_errors[3] > 3)
--		verdict |= ATA_EH_SPDN_NCQ_OFF;
--	if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10)
--		verdict |= ATA_EH_SPDN_SPEED_DOWN;
-+	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
-+	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
-+		verdict |= ATA_EH_SPDN_SPEED_DOWN |
-+			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
- 
--	/* scan past 3 mins of error history */
-+	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
-+	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
-+		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
++	dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
 +
-+	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
-+	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
-+	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
-+		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
++	return 0;
++}
 +
-+	/* scan past 10 mins of error history */
- 	memset(&arg, 0, sizeof(arg));
--	arg.since = j64 - min(j64, j5mins);
-+	arg.since = j64 - min(j64, j10mins);
- 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
- 
--	if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10)
--		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
-+	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
-+	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
-+		verdict |= ATA_EH_SPDN_NCQ_OFF;
++static void hifn_init_dma(struct hifn_device *dev)
++{
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++	u32 dptr = dev->desc_dma;
++	int i;
 +
-+	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
-+	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
-+	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
-+		verdict |= ATA_EH_SPDN_SPEED_DOWN;
- 
- 	return verdict;
- }
-@@ -1549,7 +1641,7 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
- /**
-  *	ata_eh_speed_down - record error and speed down if necessary
-  *	@dev: Failed device
-- *	@is_io: Did the device fail during normal IO?
-+ *	@eflags: mask of ATA_EFLAG_* flags
-  *	@err_mask: err_mask of the error
-  *
-  *	Record error and examine error history to determine whether
-@@ -1563,18 +1655,20 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
-  *	RETURNS:
-  *	Determined recovery action.
-  */
--static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
--				      unsigned int err_mask)
-+static unsigned int ata_eh_speed_down(struct ata_device *dev,
-+				unsigned int eflags, unsigned int err_mask)
- {
-+	struct ata_link *link = dev->link;
-+	int xfer_ok = 0;
- 	unsigned int verdict;
- 	unsigned int action = 0;
- 
- 	/* don't bother if Cat-0 error */
--	if (ata_eh_categorize_error(is_io, err_mask) == 0)
-+	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
- 		return 0;
- 
- 	/* record error and determine whether speed down is necessary */
--	ata_ering_record(&dev->ering, is_io, err_mask);
-+	ata_ering_record(&dev->ering, eflags, err_mask);
- 	verdict = ata_eh_speed_down_verdict(dev);
- 
- 	/* turn off NCQ? */
-@@ -1590,7 +1684,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
- 	/* speed down? */
- 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
- 		/* speed down SATA link speed if possible */
--		if (sata_down_spd_limit(dev->link) == 0) {
-+		if (sata_down_spd_limit(link) == 0) {
- 			action |= ATA_EH_HARDRESET;
- 			goto done;
- 		}
-@@ -1618,10 +1712,10 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
- 	}
- 
- 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
--	 * SATA.  Consider it only for PATA.
-+	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
- 	 */
- 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
--	    (dev->link->ap->cbl != ATA_CBL_SATA) &&
-+	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
- 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
- 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
- 			dev->spdn_cnt = 0;
-@@ -1633,7 +1727,8 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
- 	return 0;
-  done:
- 	/* device has been slowed down, blow error history */
--	ata_ering_clear(&dev->ering);
-+	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
-+		ata_ering_clear(&dev->ering);
- 	return action;
- }
- 
-@@ -1653,8 +1748,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
- 	struct ata_port *ap = link->ap;
- 	struct ata_eh_context *ehc = &link->eh_context;
- 	struct ata_device *dev;
--	unsigned int all_err_mask = 0;
--	int tag, is_io = 0;
-+	unsigned int all_err_mask = 0, eflags = 0;
-+	int tag;
- 	u32 serror;
- 	int rc;
- 
-@@ -1713,15 +1808,15 @@ static void ata_eh_link_autopsy(struct ata_link *link)
- 		ehc->i.dev = qc->dev;
- 		all_err_mask |= qc->err_mask;
- 		if (qc->flags & ATA_QCFLAG_IO)
--			is_io = 1;
-+			eflags |= ATA_EFLAG_IS_IO;
- 	}
- 
- 	/* enforce default EH actions */
- 	if (ap->pflags & ATA_PFLAG_FROZEN ||
- 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
- 		ehc->i.action |= ATA_EH_SOFTRESET;
--	else if ((is_io && all_err_mask) ||
--		 (!is_io && (all_err_mask & ~AC_ERR_DEV)))
-+	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
-+		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
- 		ehc->i.action |= ATA_EH_REVALIDATE;
- 
- 	/* If we have offending qcs and the associated failed device,
-@@ -1743,8 +1838,11 @@ static void ata_eh_link_autopsy(struct ata_link *link)
- 		      ata_dev_enabled(link->device))))
- 	    dev = link->device;
- 
--	if (dev)
--		ehc->i.action |= ata_eh_speed_down(dev, is_io, all_err_mask);
-+	if (dev) {
-+		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
-+			eflags |= ATA_EFLAG_DUBIOUS_XFER;
-+		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
-+	}
- 
- 	DPRINTK("EXIT\n");
- }
-@@ -1880,8 +1978,8 @@ static void ata_eh_link_report(struct ata_link *link)
- 				[ATA_PROT_PIO]		= "pio",
- 				[ATA_PROT_DMA]		= "dma",
- 				[ATA_PROT_NCQ]		= "ncq",
--				[ATA_PROT_ATAPI]	= "pio",
--				[ATA_PROT_ATAPI_DMA]	= "dma",
-+				[ATAPI_PROT_PIO]	= "pio",
-+				[ATAPI_PROT_DMA]	= "dma",
- 			};
- 
- 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
-@@ -1889,7 +1987,7 @@ static void ata_eh_link_report(struct ata_link *link)
- 				 dma_str[qc->dma_dir]);
- 		}
- 
--		if (is_atapi_taskfile(&qc->tf))
-+		if (ata_is_atapi(qc->tf.protocol))
- 			snprintf(cdb_buf, sizeof(cdb_buf),
- 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
- 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
-@@ -2329,6 +2427,58 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
- 	return rc;
- }
- 
-+/**
-+ *	ata_set_mode - Program timings and issue SET FEATURES - XFER
-+ *	@link: link on which timings will be programmed
-+ *	@r_failed_dev: out paramter for failed device
-+ *
-+ *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
-+ *	ata_set_mode() fails, pointer to the failing device is
-+ *	returned in @r_failed_dev.
-+ *
-+ *	LOCKING:
-+ *	PCI/etc. bus probe sem.
++	for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
++		dma->cmdr[i].p = __cpu_to_le32(dptr +
++				offsetof(struct hifn_dma, command_bufs[i][0]));
++	for (i=0; i<HIFN_D_RES_RSIZE; ++i)
++		dma->resr[i].p = __cpu_to_le32(dptr +
++				offsetof(struct hifn_dma, result_bufs[i][0]));
++
++	/*
++	 * Setup LAST descriptors.
++	 */
++	dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
++			offsetof(struct hifn_dma, cmdr[0]));
++	dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
++			offsetof(struct hifn_dma, srcr[0]));
++	dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
++			offsetof(struct hifn_dma, dstr[0]));
++	dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
++			offsetof(struct hifn_dma, resr[0]));
++
++	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
++	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
++	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
++}
++
++/*
++ * Initialize the PLL. We need to know the frequency of the reference clock
++ * to calculate the optimal multiplier. For PCI we assume 66MHz, since that
++ * allows us to operate without the risk of overclocking the chip. If it
++ * actually uses 33MHz, the chip will operate at half the speed, this can be
++ * overriden by specifying the frequency as module parameter (pci33).
 + *
-+ *	RETURNS:
-+ *	0 on success, negative errno otherwise
++ * Unfortunately the PCI clock is not very suitable since the HIFN needs a
++ * stable clock and the PCI clock frequency may vary, so the default is the
++ * external clock. There is no way to find out its frequency, we default to
++ * 66MHz since according to Mike Ham of HiFn, almost every board in existence
++ * has an external crystal populated at 66MHz.
 + */
-+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
++static void hifn_init_pll(struct hifn_device *dev)
 +{
-+	struct ata_port *ap = link->ap;
-+	struct ata_device *dev;
-+	int rc;
++	unsigned int freq, m;
++	u32 pllcfg;
 +
-+	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
-+	ata_link_for_each_dev(dev, link) {
-+		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
-+			struct ata_ering_entry *ent;
++	pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1;
 +
-+			ent = ata_ering_top(&dev->ering);
-+			if (ent)
-+				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
-+		}
++	if (strncmp(hifn_pll_ref, "ext", 3) == 0)
++		pllcfg |= HIFN_PLL_REF_CLK_PLL;
++	else
++		pllcfg |= HIFN_PLL_REF_CLK_HBI;
++
++	if (hifn_pll_ref[3] != '\0')
++		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
++	else {
++		freq = 66;
++		printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, "
++				 "override with hifn_pll_ref=%.3s<frequency>\n",
++		       freq, hifn_pll_ref);
 +	}
 +
-+	/* has private set_mode? */
-+	if (ap->ops->set_mode)
-+		rc = ap->ops->set_mode(link, r_failed_dev);
++	m = HIFN_PLL_FCK_MAX / freq;
++
++	pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT;
++	if (m <= 8)
++		pllcfg |= HIFN_PLL_IS_1_8;
 +	else
-+		rc = ata_do_set_mode(link, r_failed_dev);
++		pllcfg |= HIFN_PLL_IS_9_12;
 +
-+	/* if transfer mode has changed, set DUBIOUS_XFER on device */
-+	ata_link_for_each_dev(dev, link) {
-+		struct ata_eh_context *ehc = &link->eh_context;
-+		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
-+		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
++	/* Select clock source and enable clock bypass */
++	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
++		     HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP);
 +
-+		if (dev->xfer_mode != saved_xfer_mode ||
-+		    ata_ncq_enabled(dev) != saved_ncq)
-+			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
-+	}
++	/* Let the chip lock to the input clock */
++	mdelay(10);
 +
-+	return rc;
++	/* Disable clock bypass */
++	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
++		     HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI);
++
++	/* Switch the engines to the PLL */
++	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
++		     HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL);
++
++	/*
++	 * The Fpk_clk runs at half the total speed. Its frequency is needed to
++	 * calculate the minimum time between two reads of the rng. Since 33MHz
++	 * is actually 33.333... we overestimate the frequency here, resulting
++	 * in slightly larger intervals.
++	 */
++	dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2;
 +}
 +
- static int ata_link_nr_enabled(struct ata_link *link)
- {
- 	struct ata_device *dev;
-@@ -2375,6 +2525,24 @@ static int ata_eh_skip_recovery(struct ata_link *link)
- 	return 1;
- }
- 
-+static int ata_eh_schedule_probe(struct ata_device *dev)
++static void hifn_init_registers(struct hifn_device *dev)
 +{
-+	struct ata_eh_context *ehc = &dev->link->eh_context;
++	u32 dptr = dev->desc_dma;
 +
-+	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
-+	    (ehc->did_probe_mask & (1 << dev->devno)))
-+		return 0;
++	/* Initialization magic... */
++	hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
++	hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
++	hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
 +
-+	ata_eh_detach_dev(dev);
-+	ata_dev_init(dev);
-+	ehc->did_probe_mask |= (1 << dev->devno);
-+	ehc->i.action |= ATA_EH_SOFTRESET;
-+	ehc->saved_xfer_mode[dev->devno] = 0;
-+	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
++	/* write all 4 ring address registers */
++	hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr +
++				offsetof(struct hifn_dma, cmdr[0])));
++	hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr +
++				offsetof(struct hifn_dma, srcr[0])));
++	hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr +
++				offsetof(struct hifn_dma, dstr[0])));
++	hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr +
++				offsetof(struct hifn_dma, resr[0])));
 +
-+	return 1;
-+}
++	mdelay(2);
++#if 0
++	hifn_write_1(dev, HIFN_1_DMA_CSR,
++	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
++	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
++	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
++	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
++	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
++	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
++	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
++	    HIFN_DMACSR_S_WAIT |
++	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
++	    HIFN_DMACSR_C_WAIT |
++	    HIFN_DMACSR_ENGINE |
++	    HIFN_DMACSR_PUBDONE);
++#else
++	hifn_write_1(dev, HIFN_1_DMA_CSR,
++	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
++	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
++	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
++	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
++	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
++	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
++	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
++	    HIFN_DMACSR_S_WAIT |
++	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
++	    HIFN_DMACSR_C_WAIT |
++	    HIFN_DMACSR_ENGINE |
++	    HIFN_DMACSR_PUBDONE);
++#endif
++	hifn_read_1(dev, HIFN_1_DMA_CSR);
 +
- static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
- {
- 	struct ata_eh_context *ehc = &dev->link->eh_context;
-@@ -2406,16 +2574,9 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
- 		if (ata_link_offline(dev->link))
- 			ata_eh_detach_dev(dev);
- 
--		/* probe if requested */
--		if ((ehc->i.probe_mask & (1 << dev->devno)) &&
--		    !(ehc->did_probe_mask & (1 << dev->devno))) {
--			ata_eh_detach_dev(dev);
--			ata_dev_init(dev);
--
-+		/* schedule probe if necessary */
-+		if (ata_eh_schedule_probe(dev))
- 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
--			ehc->did_probe_mask |= (1 << dev->devno);
--			ehc->i.action |= ATA_EH_SOFTRESET;
--		}
- 
- 		return 1;
- 	} else {
-@@ -2492,14 +2653,9 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- 			if (dev->flags & ATA_DFLAG_DETACH)
- 				ata_eh_detach_dev(dev);
- 
--			if (!ata_dev_enabled(dev) &&
--			    ((ehc->i.probe_mask & (1 << dev->devno)) &&
--			     !(ehc->did_probe_mask & (1 << dev->devno)))) {
--				ata_eh_detach_dev(dev);
--				ata_dev_init(dev);
--				ehc->did_probe_mask |= (1 << dev->devno);
--				ehc->i.action |= ATA_EH_SOFTRESET;
--			}
-+			/* schedule probe if necessary */
-+			if (!ata_dev_enabled(dev))
-+				ata_eh_schedule_probe(dev);
- 		}
- 	}
- 
-@@ -2747,6 +2903,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
- 	if (ap->ops->port_suspend)
- 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
- 
-+	ata_acpi_set_state(ap, PMSG_SUSPEND);
-  out:
- 	/* report result */
- 	spin_lock_irqsave(ap->lock, flags);
-@@ -2792,6 +2949,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
- 
- 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
- 
-+	ata_acpi_set_state(ap, PMSG_ON);
++	dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
++	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
++	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
++	    HIFN_DMAIER_ENGINE;
++	dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
 +
- 	if (ap->ops->port_resume)
- 		rc = ap->ops->port_resume(ap);
- 
-diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
-index 14daf48..c02c490 100644
---- a/drivers/ata/libata-scsi.c
-+++ b/drivers/ata/libata-scsi.c
-@@ -517,7 +517,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
- 		qc->scsicmd = cmd;
- 		qc->scsidone = done;
- 
--		qc->__sg = scsi_sglist(cmd);
-+		qc->sg = scsi_sglist(cmd);
- 		qc->n_elem = scsi_sg_count(cmd);
- 	} else {
- 		cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
-@@ -839,7 +839,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
- 	if (dev->class == ATA_DEV_ATAPI) {
- 		struct request_queue *q = sdev->request_queue;
- 		blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
--	}
++	hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
++	hifn_read_1(dev, HIFN_1_DMA_IER);
++#if 0
++	hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
++		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
++		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
++		    HIFN_PUCNFG_DRAM);
++#else
++	hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
++#endif
++	hifn_init_pll(dev);
 +
-+		/* set the min alignment */
-+		blk_queue_update_dma_alignment(sdev->request_queue,
-+					       ATA_DMA_PAD_SZ - 1);
-+	} else
-+		/* ATA devices must be sector aligned */
-+		blk_queue_update_dma_alignment(sdev->request_queue,
-+					       ATA_SECT_SIZE - 1);
- 
- 	if (dev->class == ATA_DEV_ATA)
- 		sdev->manage_start_stop = 1;
-@@ -878,7 +885,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
- 	if (dev)
- 		ata_scsi_dev_config(sdev, dev);
- 
--	return 0;	/* scsi layer doesn't check return value, sigh */
-+	return 0;
- }
- 
- /**
-@@ -2210,7 +2217,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
- 
- 		/* sector size */
- 		ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8);
--		ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE);
-+		ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE & 0xff);
- 	} else {
- 		/* sector count, 64-bit */
- 		ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7));
-@@ -2224,7 +2231,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
- 
- 		/* sector size */
- 		ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8);
--		ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE);
-+		ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE & 0xff);
- 	}
- 
- 	return 0;
-@@ -2331,7 +2338,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
- 	DPRINTK("ATAPI request sense\n");
- 
- 	/* FIXME: is this needed? */
--	memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
-+	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- 
- 	ap->ops->tf_read(ap, &qc->tf);
- 
-@@ -2341,7 +2348,9 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
- 
- 	ata_qc_reinit(qc);
- 
--	ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
-+	/* setup sg table and init transfer direction */
-+	sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
-+	ata_sg_init(qc, &qc->sgent, 1);
- 	qc->dma_dir = DMA_FROM_DEVICE;
- 
- 	memset(&qc->cdb, 0, qc->dev->cdb_len);
-@@ -2352,10 +2361,10 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
- 	qc->tf.command = ATA_CMD_PACKET;
- 
- 	if (ata_pio_use_silly(ap)) {
--		qc->tf.protocol = ATA_PROT_ATAPI_DMA;
-+		qc->tf.protocol = ATAPI_PROT_DMA;
- 		qc->tf.feature |= ATAPI_PKT_DMA;
- 	} else {
--		qc->tf.protocol = ATA_PROT_ATAPI;
-+		qc->tf.protocol = ATAPI_PROT_PIO;
- 		qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
- 		qc->tf.lbah = 0;
- 	}
-@@ -2526,12 +2535,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
- 	if (using_pio || nodata) {
- 		/* no data, or PIO data xfer */
- 		if (nodata)
--			qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
-+			qc->tf.protocol = ATAPI_PROT_NODATA;
- 		else
--			qc->tf.protocol = ATA_PROT_ATAPI;
-+			qc->tf.protocol = ATAPI_PROT_PIO;
- 	} else {
- 		/* DMA data xfer */
--		qc->tf.protocol = ATA_PROT_ATAPI_DMA;
-+		qc->tf.protocol = ATAPI_PROT_DMA;
- 		qc->tf.feature |= ATAPI_PKT_DMA;
- 
- 		if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE))
-@@ -2690,6 +2699,24 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
- 	if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
- 		goto invalid_fld;
- 
-+	/*
-+	 * Filter TPM commands by default. These provide an
-+	 * essentially uncontrolled encrypted "back door" between
-+	 * applications and the disk. Set libata.allow_tpm=1 if you
-+	 * have a real reason for wanting to use them. This ensures
-+	 * that installed software cannot easily mess stuff up without
-+	 * user intent. DVR type users will probably ship with this enabled
-+	 * for movie content management.
-+	 *
-+	 * Note that for ATA8 we can issue a DCS change and DCS freeze lock
-+	 * for this and should do in future but that it is not sufficient as
-+	 * DCS is an optional feature set. Thus we also do the software filter
-+	 * so that we comply with the TC consortium stated goal that the user
-+	 * can turn off TC features of their system.
-+	 */
-+	if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
-+		goto invalid_fld;
++	hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
++	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
++	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
++	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
++}
 +
- 	/* We may not issue DMA commands if no DMA mode is set */
- 	if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
- 		goto invalid_fld;
-diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
-index b7ac80b..60cd4b1 100644
---- a/drivers/ata/libata-sff.c
-+++ b/drivers/ata/libata-sff.c
-@@ -147,7 +147,9 @@ void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
-  *	@tf: ATA taskfile register set for storing input
-  *
-  *	Reads ATA taskfile registers for currently-selected device
-- *	into @tf.
-+ *	into @tf. Assumes the device has a fully SFF compliant task file
-+ *	layout and behaviour. If you device does not (eg has a different
-+ *	status method) then you will need to provide a replacement tf_read
-  *
-  *	LOCKING:
-  *	Inherited from caller.
-@@ -156,7 +158,7 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
- {
- 	struct ata_ioports *ioaddr = &ap->ioaddr;
- 
--	tf->command = ata_chk_status(ap);
-+	tf->command = ata_check_status(ap);
- 	tf->feature = ioread8(ioaddr->error_addr);
- 	tf->nsect = ioread8(ioaddr->nsect_addr);
- 	tf->lbal = ioread8(ioaddr->lbal_addr);
-@@ -415,7 +417,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
- 	ap->hsm_task_state = HSM_ST_IDLE;
- 
- 	if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
--		   qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
-+		   qc->tf.protocol == ATAPI_PROT_DMA)) {
- 		u8 host_stat;
- 
- 		host_stat = ap->ops->bmdma_status(ap);
-@@ -549,7 +551,7 @@ int ata_pci_init_bmdma(struct ata_host *host)
- 		return rc;
- 
- 	/* request and iomap DMA region */
--	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
-+	rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
- 	if (rc) {
- 		dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
- 		return -ENOMEM;
-@@ -619,7 +621,8 @@ int ata_pci_init_sff_host(struct ata_host *host)
- 			continue;
- 		}
- 
--		rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME);
-+		rc = pcim_iomap_regions(pdev, 0x3 << base,
-+					dev_driver_string(gdev));
- 		if (rc) {
- 			dev_printk(KERN_WARNING, gdev,
- 				   "failed to request/iomap BARs for port %d "
-@@ -711,6 +714,99 @@ int ata_pci_prepare_sff_host(struct pci_dev *pdev,
- }
- 
- /**
-+ *	ata_pci_activate_sff_host - start SFF host, request IRQ and register it
-+ *	@host: target SFF ATA host
-+ *	@irq_handler: irq_handler used when requesting IRQ(s)
-+ *	@sht: scsi_host_template to use when registering the host
-+ *
-+ *	This is the counterpart of ata_host_activate() for SFF ATA
-+ *	hosts.  This separate helper is necessary because SFF hosts
-+ *	use two separate interrupts in legacy mode.
-+ *
-+ *	LOCKING:
-+ *	Inherited from calling layer (may sleep).
-+ *
-+ *	RETURNS:
-+ *	0 on success, -errno otherwise.
-+ */
-+int ata_pci_activate_sff_host(struct ata_host *host,
-+			      irq_handler_t irq_handler,
-+			      struct scsi_host_template *sht)
++static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
++		unsigned dlen, unsigned slen, u16 mask, u8 snum)
 +{
-+	struct device *dev = host->dev;
-+	struct pci_dev *pdev = to_pci_dev(dev);
-+	const char *drv_name = dev_driver_string(host->dev);
-+	int legacy_mode = 0, rc;
++	struct hifn_base_command *base_cmd;
++	u8 *buf_pos = buf;
 +
-+	rc = ata_host_start(host);
-+	if (rc)
-+		return rc;
++	base_cmd = (struct hifn_base_command *)buf_pos;
++	base_cmd->masks = __cpu_to_le16(mask);
++	base_cmd->total_source_count =
++		__cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
++	base_cmd->total_dest_count =
++		__cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
 +
-+	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
-+		u8 tmp8, mask;
++	dlen >>= 16;
++	slen >>= 16;
++	base_cmd->session_num = __cpu_to_le16(snum |
++	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
++	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
 +
-+		/* TODO: What if one channel is in native mode ... */
-+		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
-+		mask = (1 << 2) | (1 << 0);
-+		if ((tmp8 & mask) != mask)
-+			legacy_mode = 1;
-+#if defined(CONFIG_NO_ATA_LEGACY)
-+		/* Some platforms with PCI limits cannot address compat
-+		   port space. In that case we punt if their firmware has
-+		   left a device in compatibility mode */
-+		if (legacy_mode) {
-+			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
-+			return -EOPNOTSUPP;
-+		}
-+#endif
-+	}
++	return sizeof(struct hifn_base_command);
++}
 +
-+	if (!devres_open_group(dev, NULL, GFP_KERNEL))
-+		return -ENOMEM;
++static int hifn_setup_crypto_command(struct hifn_device *dev,
++		u8 *buf, unsigned dlen, unsigned slen,
++		u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
++{
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++	struct hifn_crypt_command *cry_cmd;
++	u8 *buf_pos = buf;
++	u16 cmd_len;
 +
-+	if (!legacy_mode && pdev->irq) {
-+		rc = devm_request_irq(dev, pdev->irq, irq_handler,
-+				      IRQF_SHARED, drv_name, host);
-+		if (rc)
-+			goto out;
++	cry_cmd = (struct hifn_crypt_command *)buf_pos;
 +
-+		ata_port_desc(host->ports[0], "irq %d", pdev->irq);
-+		ata_port_desc(host->ports[1], "irq %d", pdev->irq);
-+	} else if (legacy_mode) {
-+		if (!ata_port_is_dummy(host->ports[0])) {
-+			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
-+					      irq_handler, IRQF_SHARED,
-+					      drv_name, host);
-+			if (rc)
-+				goto out;
++	cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
++	dlen >>= 16;
++	cry_cmd->masks = __cpu_to_le16(mode |
++			((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
++			 HIFN_CRYPT_CMD_SRCLEN_M));
++	cry_cmd->header_skip = 0;
++	cry_cmd->reserved = 0;
 +
-+			ata_port_desc(host->ports[0], "irq %d",
-+				      ATA_PRIMARY_IRQ(pdev));
-+		}
++	buf_pos += sizeof(struct hifn_crypt_command);
 +
-+		if (!ata_port_is_dummy(host->ports[1])) {
-+			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
-+					      irq_handler, IRQF_SHARED,
-+					      drv_name, host);
-+			if (rc)
-+				goto out;
++	dma->cmdu++;
++	if (dma->cmdu > 1) {
++		dev->dmareg |= HIFN_DMAIER_C_WAIT;
++		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
++	}
 +
-+			ata_port_desc(host->ports[1], "irq %d",
-+				      ATA_SECONDARY_IRQ(pdev));
-+		}
++	if (keylen) {
++		memcpy(buf_pos, key, keylen);
++		buf_pos += keylen;
++	}
++	if (ivsize) {
++		memcpy(buf_pos, iv, ivsize);
++		buf_pos += ivsize;
 +	}
 +
-+	rc = ata_host_register(host, sht);
-+ out:
-+	if (rc == 0)
-+		devres_remove_group(dev, NULL);
-+	else
-+		devres_release_group(dev, NULL);
++	cmd_len = buf_pos - buf;
 +
-+	return rc;
++	return cmd_len;
 +}
 +
-+/**
-  *	ata_pci_init_one - Initialize/register PCI IDE host controller
-  *	@pdev: Controller to be initialized
-  *	@ppi: array of port_info, must be enough for two ports
-@@ -739,8 +835,6 @@ int ata_pci_init_one(struct pci_dev *pdev,
- 	struct device *dev = &pdev->dev;
- 	const struct ata_port_info *pi = NULL;
- 	struct ata_host *host = NULL;
--	u8 mask;
--	int legacy_mode = 0;
- 	int i, rc;
- 
- 	DPRINTK("ENTER\n");
-@@ -762,95 +856,24 @@ int ata_pci_init_one(struct pci_dev *pdev,
- 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
- 		return -ENOMEM;
- 
--	/* FIXME: Really for ATA it isn't safe because the device may be
--	   multi-purpose and we want to leave it alone if it was already
--	   enabled. Secondly for shared use as Arjan says we want refcounting
--
--	   Checking dev->is_enabled is insufficient as this is not set at
--	   boot for the primary video which is BIOS enabled
--	  */
--
- 	rc = pcim_enable_device(pdev);
- 	if (rc)
--		goto err_out;
-+		goto out;
- 
--	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
--		u8 tmp8;
--
--		/* TODO: What if one channel is in native mode ... */
--		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
--		mask = (1 << 2) | (1 << 0);
--		if ((tmp8 & mask) != mask)
--			legacy_mode = 1;
--#if defined(CONFIG_NO_ATA_LEGACY)
--		/* Some platforms with PCI limits cannot address compat
--		   port space. In that case we punt if their firmware has
--		   left a device in compatibility mode */
--		if (legacy_mode) {
--			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
--			rc = -EOPNOTSUPP;
--			goto err_out;
--		}
--#endif
--	}
--
--	/* prepare host */
-+	/* prepare and activate SFF host */
- 	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
- 	if (rc)
--		goto err_out;
-+		goto out;
- 
- 	pci_set_master(pdev);
-+	rc = ata_pci_activate_sff_host(host, pi->port_ops->irq_handler,
-+				       pi->sht);
-+ out:
-+	if (rc == 0)
-+		devres_remove_group(&pdev->dev, NULL);
-+	else
-+		devres_release_group(&pdev->dev, NULL);
- 
--	/* start host and request IRQ */
--	rc = ata_host_start(host);
--	if (rc)
--		goto err_out;
--
--	if (!legacy_mode && pdev->irq) {
--		/* We may have no IRQ assigned in which case we can poll. This
--		   shouldn't happen on a sane system but robustness is cheap
--		   in this case */
--		rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler,
--				      IRQF_SHARED, DRV_NAME, host);
--		if (rc)
--			goto err_out;
--
--		ata_port_desc(host->ports[0], "irq %d", pdev->irq);
--		ata_port_desc(host->ports[1], "irq %d", pdev->irq);
--	} else if (legacy_mode) {
--		if (!ata_port_is_dummy(host->ports[0])) {
--			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
--					      pi->port_ops->irq_handler,
--					      IRQF_SHARED, DRV_NAME, host);
--			if (rc)
--				goto err_out;
--
--			ata_port_desc(host->ports[0], "irq %d",
--				      ATA_PRIMARY_IRQ(pdev));
--		}
--
--		if (!ata_port_is_dummy(host->ports[1])) {
--			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
--					      pi->port_ops->irq_handler,
--					      IRQF_SHARED, DRV_NAME, host);
--			if (rc)
--				goto err_out;
--
--			ata_port_desc(host->ports[1], "irq %d",
--				      ATA_SECONDARY_IRQ(pdev));
--		}
--	}
--
--	/* register */
--	rc = ata_host_register(host, pi->sht);
--	if (rc)
--		goto err_out;
--
--	devres_remove_group(dev, NULL);
--	return 0;
--
--err_out:
--	devres_release_group(dev, NULL);
- 	return rc;
- }
- 
-diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
-index bbe59c2..409ffb9 100644
---- a/drivers/ata/libata.h
-+++ b/drivers/ata/libata.h
-@@ -60,6 +60,7 @@ extern int atapi_dmadir;
- extern int atapi_passthru16;
- extern int libata_fua;
- extern int libata_noacpi;
-+extern int libata_allow_tpm;
- extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
- extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
- 			   u64 block, u32 n_block, unsigned int tf_flags,
-@@ -85,7 +86,6 @@ extern int ata_dev_configure(struct ata_device *dev);
- extern int sata_down_spd_limit(struct ata_link *link);
- extern int sata_set_spd_needed(struct ata_link *link);
- extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
--extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
- extern void ata_sg_clean(struct ata_queued_cmd *qc);
- extern void ata_qc_free(struct ata_queued_cmd *qc);
- extern void ata_qc_issue(struct ata_queued_cmd *qc);
-@@ -113,6 +113,7 @@ extern int ata_acpi_on_suspend(struct ata_port *ap);
- extern void ata_acpi_on_resume(struct ata_port *ap);
- extern int ata_acpi_on_devcfg(struct ata_device *dev);
- extern void ata_acpi_on_disable(struct ata_device *dev);
-+extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
- #else
- static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { }
- static inline void ata_acpi_associate(struct ata_host *host) { }
-@@ -121,6 +122,8 @@ static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
- static inline void ata_acpi_on_resume(struct ata_port *ap) { }
- static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
- static inline void ata_acpi_on_disable(struct ata_device *dev) { }
-+static inline void ata_acpi_set_state(struct ata_port *ap,
-+				      pm_message_t state) { }
- #endif
- 
- /* libata-scsi.c */
-@@ -183,6 +186,7 @@ extern void ata_eh_report(struct ata_port *ap);
- extern int ata_eh_reset(struct ata_link *link, int classify,
- 			ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- 			ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
-+extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
- extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- 			  ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- 			  ata_postreset_fn_t postreset,
-diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
-index e4542ab..244098a 100644
---- a/drivers/ata/pata_acpi.c
-+++ b/drivers/ata/pata_acpi.c
-@@ -81,17 +81,6 @@ static void pacpi_error_handler(struct ata_port *ap)
- 				  NULL, ata_std_postreset);
- }
- 
--/* Welcome to ACPI, bring a bucket */
--static const unsigned int pio_cycle[7] = {
--	600, 383, 240, 180, 120, 100, 80
--};
--static const unsigned int mwdma_cycle[5] = {
--	480, 150, 120, 100, 80
--};
--static const unsigned int udma_cycle[7] = {
--	120, 80, 60, 45, 30, 20, 15
--};
--
- /**
-  *	pacpi_discover_modes	-	filter non ACPI modes
-  *	@adev: ATA device
-@@ -103,56 +92,20 @@ static const unsigned int udma_cycle[7] = {
- 
- static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev)
- {
--	int unit = adev->devno;
- 	struct pata_acpi *acpi = ap->private_data;
--	int i;
--	u32 t;
--	unsigned long mask = (0x7f << ATA_SHIFT_UDMA) | (0x7 << ATA_SHIFT_MWDMA) | (0x1F << ATA_SHIFT_PIO);
--
- 	struct ata_acpi_gtm probe;
-+	unsigned int xfer_mask;
- 
- 	probe = acpi->gtm;
- 
--	/* We always use the 0 slot for crap hardware */
--	if (!(probe.flags & 0x10))
--		unit = 0;
--
- 	ata_acpi_gtm(ap, &probe);
- 
--	/* Start by scanning for PIO modes */
--	for (i = 0; i < 7; i++) {
--		t = probe.drive[unit].pio;
--		if (t <= pio_cycle[i]) {
--			mask |= (2 << (ATA_SHIFT_PIO + i)) - 1;
--			break;
--		}
--	}
-+	xfer_mask = ata_acpi_gtm_xfermask(adev, &probe);
- 
--	/* See if we have MWDMA or UDMA data. We don't bother with MWDMA
--	   if UDMA is availabe as this means the BIOS set UDMA and our
--	   error changedown if it works is UDMA to PIO anyway */
--	if (probe.flags & (1 << (2 * unit))) {
--		/* MWDMA */
--		for (i = 0; i < 5; i++) {
--			t = probe.drive[unit].dma;
--			if (t <= mwdma_cycle[i]) {
--				mask |= (2 << (ATA_SHIFT_MWDMA + i)) - 1;
--				break;
--			}
--		}
--	} else {
--		/* UDMA */
--		for (i = 0; i < 7; i++) {
--			t = probe.drive[unit].dma;
--			if (t <= udma_cycle[i]) {
--				mask |= (2 << (ATA_SHIFT_UDMA + i)) - 1;
--				break;
--			}
--		}
--	}
--	if (mask & (0xF8 << ATA_SHIFT_UDMA))
-+	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
- 		ap->cbl = ATA_CBL_PATA80;
--	return mask;
-+
-+	return xfer_mask;
- }
- 
- /**
-@@ -180,12 +133,14 @@ static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
- {
- 	int unit = adev->devno;
- 	struct pata_acpi *acpi = ap->private_data;
-+	const struct ata_timing *t;
- 
- 	if (!(acpi->gtm.flags & 0x10))
- 		unit = 0;
- 
- 	/* Now stuff the nS values into the structure */
--	acpi->gtm.drive[unit].pio = pio_cycle[adev->pio_mode - XFER_PIO_0];
-+	t = ata_timing_find_mode(adev->pio_mode);
-+	acpi->gtm.drive[unit].pio = t->cycle;
- 	ata_acpi_stm(ap, &acpi->gtm);
- 	/* See what mode we actually got */
- 	ata_acpi_gtm(ap, &acpi->gtm);
-@@ -201,16 +156,18 @@ static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
- {
- 	int unit = adev->devno;
- 	struct pata_acpi *acpi = ap->private_data;
-+	const struct ata_timing *t;
- 
- 	if (!(acpi->gtm.flags & 0x10))
- 		unit = 0;
- 
- 	/* Now stuff the nS values into the structure */
-+	t = ata_timing_find_mode(adev->dma_mode);
- 	if (adev->dma_mode >= XFER_UDMA_0) {
--		acpi->gtm.drive[unit].dma = udma_cycle[adev->dma_mode - XFER_UDMA_0];
-+		acpi->gtm.drive[unit].dma = t->udma;
- 		acpi->gtm.flags |= (1 << (2 * unit));
- 	} else {
--		acpi->gtm.drive[unit].dma = mwdma_cycle[adev->dma_mode - XFER_MW_DMA_0];
-+		acpi->gtm.drive[unit].dma = t->cycle;
- 		acpi->gtm.flags &= ~(1 << (2 * unit));
- 	}
- 	ata_acpi_stm(ap, &acpi->gtm);
-diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
-index 8caf9af..7e68edf 100644
---- a/drivers/ata/pata_ali.c
-+++ b/drivers/ata/pata_ali.c
-@@ -64,7 +64,7 @@ static int ali_cable_override(struct pci_dev *pdev)
- 	if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
- 	   	return 1;
- 	/* Mitac 8317 (Winbook-A) and relatives */
--	if (pdev->subsystem_vendor == 0x1071  && pdev->subsystem_device == 0x8317)
-+	if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317)
- 		return 1;
- 	/* Systems by DMI */
- 	if (dmi_check_system(cable_dmi_table))
-diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
-index 3cc27b5..761a666 100644
---- a/drivers/ata/pata_amd.c
-+++ b/drivers/ata/pata_amd.c
-@@ -220,6 +220,62 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
- 	timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
- }
- 
-+/* Both host-side and drive-side detection results are worthless on NV
-+ * PATAs.  Ignore them and just follow what BIOS configured.  Both the
-+ * current configuration in PCI config reg and ACPI GTM result are
-+ * cached during driver attach and are consulted to select transfer
-+ * mode.
-+ */
-+static unsigned long nv_mode_filter(struct ata_device *dev,
-+				    unsigned long xfer_mask)
++static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
++		unsigned int offset, unsigned int size)
 +{
-+	static const unsigned int udma_mask_map[] =
-+		{ ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
-+		  ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 };
-+	struct ata_port *ap = dev->link->ap;
-+	char acpi_str[32] = "";
-+	u32 saved_udma, udma;
-+	const struct ata_acpi_gtm *gtm;
-+	unsigned long bios_limit = 0, acpi_limit = 0, limit;
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++	int idx;
++	dma_addr_t addr;
 +
-+	/* find out what BIOS configured */
-+	udma = saved_udma = (unsigned long)ap->host->private_data;
++	addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
 +
-+	if (ap->port_no == 0)
-+		udma >>= 16;
-+	if (dev->devno == 0)
-+		udma >>= 8;
++	idx = dma->srci;
 +
-+	if ((udma & 0xc0) == 0xc0)
-+		bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]);
++	dma->srcr[idx].p = __cpu_to_le32(addr);
++	dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
++			HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
 +
-+	/* consult ACPI GTM too */
-+	gtm = ata_acpi_init_gtm(ap);
-+	if (gtm) {
-+		acpi_limit = ata_acpi_gtm_xfermask(dev, gtm);
++	if (++idx == HIFN_D_SRC_RSIZE) {
++		dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
++				HIFN_D_JUMP |
++				HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
++		idx = 0;
++	}
 +
-+		snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)",
-+			 gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags);
++	dma->srci = idx;
++	dma->srcu++;
++
++	if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
++		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
++		dev->flags |= HIFN_FLAG_SRC_BUSY;
 +	}
 +
-+	/* be optimistic, EH can take care of things if something goes wrong */
-+	limit = bios_limit | acpi_limit;
++	return size;
++}
 +
-+	/* If PIO or DMA isn't configured at all, don't limit.  Let EH
-+	 * handle it.
++static void hifn_setup_res_desc(struct hifn_device *dev)
++{
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++
++	dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
++			HIFN_D_VALID | HIFN_D_LAST);
++	/*
++	 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
++	 *					HIFN_D_LAST | HIFN_D_NOINVALID);
 +	 */
-+	if (!(limit & ATA_MASK_PIO))
-+		limit |= ATA_MASK_PIO;
-+	if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA)))
-+		limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA;
 +
-+	ata_port_printk(ap, KERN_DEBUG, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
-+			"BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
-+			xfer_mask, limit, xfer_mask & limit, bios_limit,
-+			saved_udma, acpi_limit, acpi_str);
++	if (++dma->resi == HIFN_D_RES_RSIZE) {
++		dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
++				HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
++		dma->resi = 0;
++	}
 +
-+	return xfer_mask & limit;
++	dma->resu++;
++
++	if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
++		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
++		dev->flags |= HIFN_FLAG_RES_BUSY;
++	}
 +}
- 
- /**
-  *	nv_probe_init	-	cable detection
-@@ -252,31 +308,6 @@ static void nv_error_handler(struct ata_port *ap)
- 			       ata_std_postreset);
- }
- 
--static int nv_cable_detect(struct ata_port *ap)
--{
--	static const u8 bitmask[2] = {0x03, 0x0C};
--	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
--	u8 ata66;
--	u16 udma;
--	int cbl;
--
--	pci_read_config_byte(pdev, 0x52, &ata66);
--	if (ata66 & bitmask[ap->port_no])
--		cbl = ATA_CBL_PATA80;
--	else
--		cbl = ATA_CBL_PATA40;
--
-- 	/* We now have to double check because the Nvidia boxes BIOS
-- 	   doesn't always set the cable bits but does set mode bits */
-- 	pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
-- 	if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
--		cbl = ATA_CBL_PATA80;
--	/* And a triple check across suspend/resume with ACPI around */
--	if (ata_acpi_cbl_80wire(ap))
--		cbl = ATA_CBL_PATA80;
--	return cbl;
--}
--
- /**
-  *	nv100_set_piomode	-	set initial PIO mode data
-  *	@ap: ATA interface
-@@ -314,6 +345,14 @@ static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
- 	timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
- }
- 
-+static void nv_host_stop(struct ata_host *host)
++
++static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
++		unsigned offset, unsigned size)
 +{
-+	u32 udma = (unsigned long)host->private_data;
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++	int idx;
++	dma_addr_t addr;
 +
-+	/* restore PCI config register 0x60 */
-+	pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
++	addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
++
++	idx = dma->dsti;
++	dma->dstr[idx].p = __cpu_to_le32(addr);
++	dma->dstr[idx].l = __cpu_to_le32(size |	HIFN_D_VALID |
++			HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
++
++	if (++idx == HIFN_D_DST_RSIZE) {
++		dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
++				HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
++				HIFN_D_LAST | HIFN_D_NOINVALID);
++		idx = 0;
++	}
++	dma->dsti = idx;
++	dma->dstu++;
++
++	if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
++		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
++		dev->flags |= HIFN_FLAG_DST_BUSY;
++	}
 +}
 +
- static struct scsi_host_template amd_sht = {
- 	.module			= THIS_MODULE,
- 	.name			= DRV_NAME,
-@@ -478,7 +517,8 @@ static struct ata_port_operations nv100_port_ops = {
- 	.thaw		= ata_bmdma_thaw,
- 	.error_handler	= nv_error_handler,
- 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
--	.cable_detect	= nv_cable_detect,
-+	.cable_detect	= ata_cable_ignore,
-+	.mode_filter	= nv_mode_filter,
- 
- 	.bmdma_setup 	= ata_bmdma_setup,
- 	.bmdma_start 	= ata_bmdma_start,
-@@ -495,6 +535,7 @@ static struct ata_port_operations nv100_port_ops = {
- 	.irq_on		= ata_irq_on,
- 
- 	.port_start	= ata_sff_port_start,
-+	.host_stop	= nv_host_stop,
- };
- 
- static struct ata_port_operations nv133_port_ops = {
-@@ -511,7 +552,8 @@ static struct ata_port_operations nv133_port_ops = {
- 	.thaw		= ata_bmdma_thaw,
- 	.error_handler	= nv_error_handler,
- 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
--	.cable_detect	= nv_cable_detect,
-+	.cable_detect	= ata_cable_ignore,
-+	.mode_filter	= nv_mode_filter,
- 
- 	.bmdma_setup 	= ata_bmdma_setup,
- 	.bmdma_start 	= ata_bmdma_start,
-@@ -528,6 +570,7 @@ static struct ata_port_operations nv133_port_ops = {
- 	.irq_on		= ata_irq_on,
- 
- 	.port_start	= ata_sff_port_start,
-+	.host_stop	= nv_host_stop,
- };
- 
- static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
-@@ -614,7 +657,8 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
- 			.port_ops = &amd100_port_ops
- 		}
- 	};
--	const struct ata_port_info *ppi[] = { NULL, NULL };
-+	struct ata_port_info pi;
-+	const struct ata_port_info *ppi[] = { &pi, NULL };
- 	static int printed_version;
- 	int type = id->driver_data;
- 	u8 fifo;
-@@ -628,6 +672,19 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
- 	if (type == 1 && pdev->revision > 0x7)
- 		type = 2;
- 
-+	/* Serenade ? */
-+	if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
-+			 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
-+		type = 6;	/* UDMA 100 only */
++static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
++		struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
++		struct hifn_context *ctx)
++{
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++	int cmd_len, sa_idx;
++	u8 *buf, *buf_pos;
++	u16 mask;
 +
-+	/*
-+	 * Okay, type is determined now.  Apply type-specific workarounds.
-+	 */
-+	pi = info[type];
++	dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
++			dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
 +
-+	if (type < 3)
-+		ata_pci_clear_simplex(pdev);
++	sa_idx = dma->resi;
 +
- 	/* Check for AMD7411 */
- 	if (type == 3)
- 		/* FIFO is broken */
-@@ -635,16 +692,17 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
- 	else
- 		pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
- 
--	/* Serenade ? */
--	if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
--			 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
--		type = 6;	/* UDMA 100 only */
-+	/* Cable detection on Nvidia chips doesn't work too well,
-+	 * cache BIOS programmed UDMA mode.
-+	 */
-+	if (type == 7 || type == 8) {
-+		u32 udma;
- 
--	if (type < 3)
--		ata_pci_clear_simplex(pdev);
-+		pci_read_config_dword(pdev, 0x60, &udma);
-+		pi.private_data = (void *)(unsigned long)udma;
++	hifn_setup_src_desc(dev, spage, soff, nbytes);
++
++	buf_pos = buf = dma->command_bufs[dma->cmdi];
++
++	mask = 0;
++	switch (ctx->op) {
++		case ACRYPTO_OP_DECRYPT:
++			mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
++			break;
++		case ACRYPTO_OP_ENCRYPT:
++			mask = HIFN_BASE_CMD_CRYPT;
++			break;
++		case ACRYPTO_OP_HMAC:
++			mask = HIFN_BASE_CMD_MAC;
++			break;
++		default:
++			goto err_out;
 +	}
- 
- 	/* And fire it up */
--	ppi[0] = &info[type];
- 	return ata_pci_init_one(pdev, ppi);
- }
- 
-diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
-index 7842cc4..a32e3c4 100644
---- a/drivers/ata/pata_bf54x.c
-+++ b/drivers/ata/pata_bf54x.c
-@@ -832,6 +832,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
- {
- 	unsigned short config = WDSIZE_16;
- 	struct scatterlist *sg;
-+	unsigned int si;
- 
- 	pr_debug("in atapi dma setup\n");
- 	/* Program the ATA_CTRL register with dir */
-@@ -839,7 +840,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
- 		/* fill the ATAPI DMA controller */
- 		set_dma_config(CH_ATAPI_TX, config);
- 		set_dma_x_modify(CH_ATAPI_TX, 2);
--		ata_for_each_sg(sg, qc) {
-+		for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 			set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
- 			set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
- 		}
-@@ -848,7 +849,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
- 		/* fill the ATAPI DMA controller */
- 		set_dma_config(CH_ATAPI_RX, config);
- 		set_dma_x_modify(CH_ATAPI_RX, 2);
--		ata_for_each_sg(sg, qc) {
-+		for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 			set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
- 			set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
- 		}
-@@ -867,6 +868,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
- 	struct ata_port *ap = qc->ap;
- 	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- 	struct scatterlist *sg;
-+	unsigned int si;
- 
- 	pr_debug("in atapi dma start\n");
- 	if (!(ap->udma_mask || ap->mwdma_mask))
-@@ -881,7 +883,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
- 		 * data cache is enabled. Otherwise, this loop
- 		 * is an empty loop and optimized out.
- 		 */
--		ata_for_each_sg(sg, qc) {
-+		for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 			flush_dcache_range(sg_dma_address(sg),
- 				sg_dma_address(sg) + sg_dma_len(sg));
- 		}
-@@ -910,7 +912,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
- 	ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
- 
- 		/* Set transfer length to buffer len */
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
- 	}
- 
-@@ -932,6 +934,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
- {
- 	struct ata_port *ap = qc->ap;
- 	struct scatterlist *sg;
-+	unsigned int si;
- 
- 	pr_debug("in atapi dma stop\n");
- 	if (!(ap->udma_mask || ap->mwdma_mask))
-@@ -950,7 +953,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
- 			 * data cache is enabled. Otherwise, this loop
- 			 * is an empty loop and optimized out.
- 			 */
--			ata_for_each_sg(sg, qc) {
-+			for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 				invalidate_dcache_range(
- 					sg_dma_address(sg),
- 					sg_dma_address(sg)
-@@ -1167,34 +1170,36 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
-  *	Note: Original code is ata_data_xfer().
-  */
- 
--static void bfin_data_xfer(struct ata_device *adev, unsigned char *buf,
--			   unsigned int buflen, int write_data)
-+static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
-+				   unsigned int buflen, int rw)
- {
--	struct ata_port *ap = adev->link->ap;
--	unsigned int words = buflen >> 1;
--	unsigned short *buf16 = (u16 *) buf;
-+	struct ata_port *ap = dev->link->ap;
- 	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
-+	unsigned int words = buflen >> 1;
-+	unsigned short *buf16 = (u16 *)buf;
- 
- 	/* Transfer multiple of 2 bytes */
--	if (write_data) {
--		write_atapi_data(base, words, buf16);
--	} else {
-+	if (rw == READ)
- 		read_atapi_data(base, words, buf16);
--	}
-+	else
-+		write_atapi_data(base, words, buf16);
- 
- 	/* Transfer trailing 1 byte, if any. */
- 	if (unlikely(buflen & 0x01)) {
- 		unsigned short align_buf[1] = { 0 };
- 		unsigned char *trailing_buf = buf + buflen - 1;
- 
--		if (write_data) {
--			memcpy(align_buf, trailing_buf, 1);
--			write_atapi_data(base, 1, align_buf);
--		} else {
-+		if (rw == READ) {
- 			read_atapi_data(base, 1, align_buf);
- 			memcpy(trailing_buf, align_buf, 1);
-+		} else {
-+			memcpy(align_buf, trailing_buf, 1);
-+			write_atapi_data(base, 1, align_buf);
- 		}
-+		words++;
- 	}
 +
-+	return words << 1;
- }
- 
- /**
-diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
-index 33f7f08..d4590f5 100644
---- a/drivers/ata/pata_cs5520.c
-+++ b/drivers/ata/pata_cs5520.c
-@@ -198,7 +198,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
- 	};
- 	const struct ata_port_info *ppi[2];
- 	u8 pcicfg;
--	void *iomap[5];
-+	void __iomem *iomap[5];
- 	struct ata_host *host;
- 	struct ata_ioports *ioaddr;
- 	int i, rc;
-diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
-index c79f066..68eb349 100644
---- a/drivers/ata/pata_hpt37x.c
-+++ b/drivers/ata/pata_hpt37x.c
-@@ -847,15 +847,16 @@ static u32 hpt374_read_freq(struct pci_dev *pdev)
- 	u32 freq;
- 	unsigned long io_base = pci_resource_start(pdev, 4);
- 	if (PCI_FUNC(pdev->devfn) & 1) {
--		struct pci_dev *pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
-+		struct pci_dev *pdev_0;
++	buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
++			nbytes, mask, dev->snum);
 +
-+		pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
- 		/* Someone hot plugged the controller on us ? */
- 		if (pdev_0 == NULL)
- 			return 0;
- 		io_base = pci_resource_start(pdev_0, 4);
- 		freq = inl(io_base + 0x90);
- 		pci_dev_put(pdev_0);
--	}
--	else
++	if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) {
++		u16 md = 0;
++
++		if (ctx->keysize)
++			md |= HIFN_CRYPT_CMD_NEW_KEY;
++		if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB)
++			md |= HIFN_CRYPT_CMD_NEW_IV;
++
++		switch (ctx->mode) {
++			case ACRYPTO_MODE_ECB:
++				md |= HIFN_CRYPT_CMD_MODE_ECB;
++				break;
++			case ACRYPTO_MODE_CBC:
++				md |= HIFN_CRYPT_CMD_MODE_CBC;
++				break;
++			case ACRYPTO_MODE_CFB:
++				md |= HIFN_CRYPT_CMD_MODE_CFB;
++				break;
++			case ACRYPTO_MODE_OFB:
++				md |= HIFN_CRYPT_CMD_MODE_OFB;
++				break;
++			default:
++				goto err_out;
++		}
++
++		switch (ctx->type) {
++			case ACRYPTO_TYPE_AES_128:
++				if (ctx->keysize != 16)
++					goto err_out;
++				md |= HIFN_CRYPT_CMD_KSZ_128 |
++					HIFN_CRYPT_CMD_ALG_AES;
++				break;
++			case ACRYPTO_TYPE_AES_192:
++				if (ctx->keysize != 24)
++					goto err_out;
++				md |= HIFN_CRYPT_CMD_KSZ_192 |
++					HIFN_CRYPT_CMD_ALG_AES;
++				break;
++			case ACRYPTO_TYPE_AES_256:
++				if (ctx->keysize != 32)
++					goto err_out;
++				md |= HIFN_CRYPT_CMD_KSZ_256 |
++					HIFN_CRYPT_CMD_ALG_AES;
++				break;
++			case ACRYPTO_TYPE_3DES:
++				if (ctx->keysize != 24)
++					goto err_out;
++				md |= HIFN_CRYPT_CMD_ALG_3DES;
++				break;
++			case ACRYPTO_TYPE_DES:
++				if (ctx->keysize != 8)
++					goto err_out;
++				md |= HIFN_CRYPT_CMD_ALG_DES;
++				break;
++			default:
++				goto err_out;
++		}
++
++		buf_pos += hifn_setup_crypto_command(dev, buf_pos,
++				nbytes, nbytes, ctx->key, ctx->keysize,
++				ctx->iv, ctx->ivsize, md);
++	}
++
++	dev->sa[sa_idx] = priv;
++
++	cmd_len = buf_pos - buf;
++	dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
++			HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
++
++	if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
++		dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND |
++			HIFN_D_VALID | HIFN_D_LAST |
++			HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
++		dma->cmdi = 0;
 +	} else
- 		freq = inl(io_base + 0x90);
- 	return freq;
- }
-diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
-index 842fe08..5b8586d 100644
---- a/drivers/ata/pata_icside.c
-+++ b/drivers/ata/pata_icside.c
-@@ -224,6 +224,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
- 	struct pata_icside_state *state = ap->host->private_data;
- 	struct scatterlist *sg, *rsg = state->sg;
- 	unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
-+	unsigned int si;
- 
- 	/*
- 	 * We are simplex; BUG if we try to fiddle with DMA
-@@ -234,7 +235,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
- 	/*
- 	 * Copy ATAs scattered sg list into a contiguous array of sg
- 	 */
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		memcpy(rsg, sg, sizeof(*sg));
- 		rsg++;
- 	}
-diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
-index ca9aae0..109ddd4 100644
---- a/drivers/ata/pata_it821x.c
-+++ b/drivers/ata/pata_it821x.c
-@@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc)
- 			return ata_qc_issue_prot(qc);
- 	}
- 	printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
--	return AC_ERR_INVALID;
-+	return AC_ERR_DEV;
- }
- 
- /**
-@@ -516,6 +516,37 @@ static void it821x_dev_config(struct ata_device *adev)
- 			printk("(%dK stripe)", adev->id[146]);
- 		printk(".\n");
- 	}
-+	/* This is a controller firmware triggered funny, don't
-+	   report the drive faulty! */
-+	adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
++		dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
++
++	if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
++		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
++		dev->flags |= HIFN_FLAG_CMD_BUSY;
++	}
++
++	hifn_setup_dst_desc(dev, dpage, doff, nbytes);
++	hifn_setup_res_desc(dev);
++
++	return 0;
++
++err_out:
++	return -EINVAL;
 +}
 +
-+/**
-+ *	it821x_ident_hack	-	Hack identify data up
-+ *	@ap: Port
-+ *
-+ *	Walk the devices on this firmware driven port and slightly
-+ *	mash the identify data to stop us and common tools trying to
-+ *	use features not firmware supported. The firmware itself does
-+ *	some masking (eg SMART) but not enough.
-+ *
-+ *	This is a bit of an abuse of the cable method, but it is the
-+ *	only method called at the right time. We could modify the libata
-+ *	core specifically for ident hacking but while we have one offender
-+ *	it seems better to keep the fallout localised.
-+ */
++static int ablkcipher_walk_init(struct ablkcipher_walk *w,
++		int num, gfp_t gfp_flags)
++{
++	int i;
 +
-+static int it821x_ident_hack(struct ata_port *ap)
++	num = min(ASYNC_SCATTERLIST_CACHE, num);
++	sg_init_table(w->cache, num);
++
++	w->num = 0;
++	for (i=0; i<num; ++i) {
++		struct page *page = alloc_page(gfp_flags);
++		struct scatterlist *s;
++
++		if (!page)
++			break;
++
++		s = &w->cache[i];
++
++		sg_set_page(s, page, PAGE_SIZE, 0);
++		w->num++;
++	}
++
++	return i;
++}
++
++static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
 +{
-+	struct ata_device *adev;
-+	ata_link_for_each_dev(adev, &ap->link) {
-+		if (ata_dev_enabled(adev)) {
-+			adev->id[84] &= ~(1 << 6);	/* No FUA */
-+			adev->id[85] &= ~(1 << 10);	/* No HPA */
-+			adev->id[76] = 0;		/* No NCQ/AN etc */
-+		}
++	int i;
++
++	for (i=0; i<w->num; ++i) {
++		struct scatterlist *s = &w->cache[i];
++
++		__free_page(sg_page(s));
++
++		s->length = 0;
 +	}
-+	return ata_cable_unknown(ap);
- }
- 
- 
-@@ -634,7 +665,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
- 	.thaw		= ata_bmdma_thaw,
- 	.error_handler	= ata_bmdma_error_handler,
- 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
--	.cable_detect	= ata_cable_unknown,
-+	.cable_detect	= it821x_ident_hack,
- 
- 	.bmdma_setup 	= ata_bmdma_setup,
- 	.bmdma_start 	= ata_bmdma_start,
-diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
-index 120b5bf..030878f 100644
---- a/drivers/ata/pata_ixp4xx_cf.c
-+++ b/drivers/ata/pata_ixp4xx_cf.c
-@@ -42,13 +42,13 @@ static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
- 	return 0;
- }
- 
--static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
--				unsigned int buflen, int write_data)
-+static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev,
-+				unsigned char *buf, unsigned int buflen, int rw)
- {
- 	unsigned int i;
- 	unsigned int words = buflen >> 1;
- 	u16 *buf16 = (u16 *) buf;
--	struct ata_port *ap = adev->link->ap;
-+	struct ata_port *ap = dev->link->ap;
- 	void __iomem *mmio = ap->ioaddr.data_addr;
- 	struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
- 
-@@ -59,30 +59,32 @@ static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
- 	udelay(100);
- 
- 	/* Transfer multiple of 2 bytes */
--	if (write_data) {
--		for (i = 0; i < words; i++)
--			writew(buf16[i], mmio);
--	} else {
-+	if (rw == READ)
- 		for (i = 0; i < words; i++)
- 			buf16[i] = readw(mmio);
--	}
-+	else
-+		for (i = 0; i < words; i++)
-+			writew(buf16[i], mmio);
- 
- 	/* Transfer trailing 1 byte, if any. */
- 	if (unlikely(buflen & 0x01)) {
- 		u16 align_buf[1] = { 0 };
- 		unsigned char *trailing_buf = buf + buflen - 1;
- 
--		if (write_data) {
--			memcpy(align_buf, trailing_buf, 1);
--			writew(align_buf[0], mmio);
--		} else {
-+		if (rw == READ) {
- 			align_buf[0] = readw(mmio);
- 			memcpy(trailing_buf, align_buf, 1);
-+		} else {
-+			memcpy(align_buf, trailing_buf, 1);
-+			writew(align_buf[0], mmio);
- 		}
-+		words++;
- 	}
- 
- 	udelay(100);
- 	*data->cs0_cfg |= 0x01;
 +
-+	return words << 1;
- }
- 
- static struct scsi_host_template ixp4xx_sht = {
-diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
-index 17159b5..333dc15 100644
---- a/drivers/ata/pata_legacy.c
-+++ b/drivers/ata/pata_legacy.c
-@@ -28,7 +28,6 @@
-  *
-  *  Unsupported but docs exist:
-  *	Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
-- *	Winbond W83759A
-  *
-  *  This driver handles legacy (that is "ISA/VLB side") IDE ports found
-  *  on PC class systems. There are three hybrid devices that are exceptions
-@@ -36,7 +35,7 @@
-  *  the MPIIX where the tuning is PCI side but the IDE is "ISA side".
-  *
-  *  Specific support is included for the ht6560a/ht6560b/opti82c611a/
-- *  opti82c465mv/promise 20230c/20630
-+ *  opti82c465mv/promise 20230c/20630/winbond83759A
-  *
-  *  Use the autospeed and pio_mask options with:
-  *	Appian ADI/2 aka CLPD7220 or AIC25VL01.
-@@ -47,9 +46,6 @@
-  *  For now use autospeed and pio_mask as above with the W83759A. This may
-  *  change.
-  *
-- *  TODO
-- *	Merge existing pata_qdi driver
-- *
-  */
- 
- #include <linux/kernel.h>
-@@ -64,12 +60,13 @@
- #include <linux/platform_device.h>
- 
- #define DRV_NAME "pata_legacy"
--#define DRV_VERSION "0.5.5"
-+#define DRV_VERSION "0.6.5"
- 
- #define NR_HOST 6
- 
--static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
--static int legacy_irq[NR_HOST] = { 14, 15, 11, 10, 8, 12 };
-+static int all;
-+module_param(all, int, 0444);
-+MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)");
- 
- struct legacy_data {
- 	unsigned long timing;
-@@ -80,21 +77,107 @@ struct legacy_data {
- 
- };
- 
-+enum controller {
-+	BIOS = 0,
-+	SNOOP = 1,
-+	PDC20230 = 2,
-+	HT6560A = 3,
-+	HT6560B = 4,
-+	OPTI611A = 5,
-+	OPTI46X = 6,
-+	QDI6500 = 7,
-+	QDI6580 = 8,
-+	QDI6580DP = 9,		/* Dual channel mode is different */
-+	W83759A = 10,
++	w->num = 0;
++}
++
++static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src,
++		unsigned int size, unsigned int *nbytesp)
++{
++	unsigned int copy, drest = *drestp, nbytes = *nbytesp;
++	int idx = 0;
++	void *saddr;
++
++	if (drest < size || size > nbytes)
++		return -EINVAL;
++
++	while (size) {
++		copy = min(drest, src->length);
++
++		saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
++		memcpy(daddr, saddr + src->offset, copy);
++		kunmap_atomic(saddr, KM_SOFTIRQ1);
++
++		size -= copy;
++		drest -= copy;
++		nbytes -= copy;
++		daddr += copy;
++
++		dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
++				__func__, copy, size, drest, nbytes);
++
++		src++;
++		idx++;
++	}
++
++	*nbytesp = nbytes;
++	*drestp = drest;
++
++	return idx;
++}
++
++static int ablkcipher_walk(struct ablkcipher_request *req,
++		struct ablkcipher_walk *w)
++{
++	unsigned blocksize =
++		crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
++	unsigned alignmask =
++		crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
++	struct scatterlist *src, *dst, *t;
++	void *daddr;
++	unsigned int nbytes = req->nbytes, offset, copy, diff;
++	int idx, tidx, err;
++
++	tidx = idx = 0;
++	offset = 0;
++	while (nbytes) {
++		if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
++			return -EINVAL;
++
++		src = &req->src[idx];
++		dst = &req->dst[idx];
++
++		dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
++				"blocksize: %u, nbytes: %u.\n",
++				__func__, src->length, dst->length, src->offset,
++				dst->offset, offset, blocksize, nbytes);
++
++		if (src->length & (blocksize - 1) ||
++				src->offset & (alignmask - 1) ||
++				dst->length & (blocksize - 1) ||
++				dst->offset & (alignmask - 1) ||
++				offset) {
++			unsigned slen = src->length - offset;
++			unsigned dlen = PAGE_SIZE;
++
++			t = &w->cache[idx];
++
++			daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
++			err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes);
++			if (err < 0)
++				goto err_out_unmap;
++
++			idx += err;
++
++			copy = slen & ~(blocksize - 1);
++			diff = slen & (blocksize - 1);
++
++			if (dlen < nbytes) {
++				/*
++				 * Destination page does not have enough space
++				 * to put there additional blocksized chunk,
++				 * so we mark that page as containing only
++				 * blocksize aligned chunks:
++				 * 	t->length = (slen & ~(blocksize - 1));
++				 * and increase number of bytes to be processed
++				 * in next chunk:
++				 * 	nbytes += diff;
++				 */
++				nbytes += diff;
++
++				/*
++				 * Temporary of course...
++				 * Kick author if you will catch this one.
++				 */
++				printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
++					"slen: %u, offset: %u.\n",
++					__func__, dlen, nbytes, slen, offset);
++				printk(KERN_ERR "%s: please contact author to fix this "
++					"issue, generally you should not catch "
++					"this path under any condition but who "
++					"knows how did you use crypto code.\n"
++					"Thank you.\n",	__func__);
++				BUG();
++			} else {
++				copy += diff + nbytes;
++
++				src = &req->src[idx];
 +
-+	UNKNOWN = -1
-+};
++				err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes);
++				if (err < 0)
++					goto err_out_unmap;
 +
++				idx += err;
++			}
 +
-+struct legacy_probe {
-+	unsigned char *name;
-+	unsigned long port;
-+	unsigned int irq;
-+	unsigned int slot;
-+	enum controller type;
-+	unsigned long private;
-+};
++			t->length = copy;
++			t->offset = offset;
 +
-+struct legacy_controller {
-+	const char *name;
-+	struct ata_port_operations *ops;
-+	unsigned int pio_mask;
-+	unsigned int flags;
-+	int (*setup)(struct platform_device *, struct legacy_probe *probe,
-+		struct legacy_data *data);
-+};
++			kunmap_atomic(daddr, KM_SOFTIRQ0);
++		} else {
++			nbytes -= src->length;
++			idx++;
++		}
 +
-+static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
++		tidx++;
++	}
 +
-+static struct legacy_probe probe_list[NR_HOST];
- static struct legacy_data legacy_data[NR_HOST];
- static struct ata_host *legacy_host[NR_HOST];
- static int nr_legacy_host;
- 
- 
--static int probe_all;			/* Set to check all ISA port ranges */
--static int ht6560a;			/* HT 6560A on primary 1, secondary 2, both 3 */
--static int ht6560b;			/* HT 6560A on primary 1, secondary 2, both 3 */
--static int opti82c611a;			/* Opti82c611A on primary 1, secondary 2, both 3 */
--static int opti82c46x;			/* Opti 82c465MV present (pri/sec autodetect) */
--static int autospeed;			/* Chip present which snoops speed changes */
--static int pio_mask = 0x1F;		/* PIO range for autospeed devices */
-+static int probe_all;		/* Set to check all ISA port ranges */
-+static int ht6560a;		/* HT 6560A on primary 1, second 2, both 3 */
-+static int ht6560b;		/* HT 6560A on primary 1, second 2, both 3 */
-+static int opti82c611a;		/* Opti82c611A on primary 1, sec 2, both 3 */
-+static int opti82c46x;		/* Opti 82c465MV present(pri/sec autodetect) */
-+static int qdi;			/* Set to probe QDI controllers */
-+static int winbond;		/* Set to probe Winbond controllers,
-+					give I/O port if non stdanard */
-+static int autospeed;		/* Chip present which snoops speed changes */
-+static int pio_mask = 0x1F;	/* PIO range for autospeed devices */
- static int iordy_mask = 0xFFFFFFFF;	/* Use iordy if available */
- 
- /**
-+ *	legacy_probe_add	-	Add interface to probe list
-+ *	@port: Controller port
-+ *	@irq: IRQ number
-+ *	@type: Controller type
-+ *	@private: Controller specific info
-+ *
-+ *	Add an entry into the probe list for ATA controllers. This is used
-+ *	to add the default ISA slots and then to build up the table
-+ *	further according to other ISA/VLB/Weird device scans
-+ *
-+ *	An I/O port list is used to keep ordering stable and sane, as we
-+ *	don't have any good way to talk about ordering otherwise
-+ */
++	return tidx;
 +
-+static int legacy_probe_add(unsigned long port, unsigned int irq,
-+				enum controller type, unsigned long private)
++err_out_unmap:
++	kunmap_atomic(daddr, KM_SOFTIRQ0);
++	return err;
++}
++
++static int hifn_setup_session(struct ablkcipher_request *req)
 +{
-+	struct legacy_probe *lp = &probe_list[0];
-+	int i;
-+	struct legacy_probe *free = NULL;
++	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
++	struct hifn_device *dev = ctx->dev;
++	struct page *spage, *dpage;
++	unsigned long soff, doff, flags;
++	unsigned int nbytes = req->nbytes, idx = 0, len;
++	int err = -EINVAL, sg_num;
++	struct scatterlist *src, *dst, *t;
++	unsigned blocksize =
++		crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
++	unsigned alignmask =
++		crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
 +
-+	for (i = 0; i < NR_HOST; i++) {
-+		if (lp->port == 0 && free == NULL)
-+			free = lp;
-+		/* Matching port, or the correct slot for ordering */
-+		if (lp->port == port || legacy_port[i] == port) {
-+			free = lp;
-+			break;
++	if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
++		goto err_out_exit;
++
++	ctx->walk.flags = 0;
++
++	while (nbytes) {
++		src = &req->src[idx];
++		dst = &req->dst[idx];
++
++		if (src->length & (blocksize - 1) ||
++				src->offset & (alignmask - 1) ||
++				dst->length & (blocksize - 1) ||
++				dst->offset & (alignmask - 1)) {
++			ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
 +		}
-+		lp++;
++
++		nbytes -= src->length;
++		idx++;
 +	}
-+	if (free == NULL) {
-+		printk(KERN_ERR "pata_legacy: Too many interfaces.\n");
-+		return -1;
++
++	if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
++		err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC);
++		if (err < 0)
++			return err;
 +	}
-+	/* Fill in the entry for later probing */
-+	free->port = port;
-+	free->irq = irq;
-+	free->type = type;
-+	free->private = private;
-+	return 0;
-+}
 +
++	nbytes = req->nbytes;
++	idx = 0;
 +
-+/**
-  *	legacy_set_mode		-	mode setting
-  *	@link: IDE link
-  *	@unused: Device that failed when error is returned
-@@ -113,7 +196,8 @@ static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
- 
- 	ata_link_for_each_dev(dev, link) {
- 		if (ata_dev_enabled(dev)) {
--			ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
-+			ata_dev_printk(dev, KERN_INFO,
-+						"configured for PIO\n");
- 			dev->pio_mode = XFER_PIO_0;
- 			dev->xfer_mode = XFER_PIO_0;
- 			dev->xfer_shift = ATA_SHIFT_PIO;
-@@ -171,7 +255,7 @@ static struct ata_port_operations simple_port_ops = {
- 	.irq_clear	= ata_bmdma_irq_clear,
- 	.irq_on		= ata_irq_on,
- 
--	.port_start	= ata_port_start,
-+	.port_start	= ata_sff_port_start,
- };
- 
- static struct ata_port_operations legacy_port_ops = {
-@@ -198,15 +282,16 @@ static struct ata_port_operations legacy_port_ops = {
- 	.irq_clear	= ata_bmdma_irq_clear,
- 	.irq_on		= ata_irq_on,
- 
--	.port_start	= ata_port_start,
-+	.port_start	= ata_sff_port_start,
- };
- 
- /*
-  *	Promise 20230C and 20620 support
-  *
-- *	This controller supports PIO0 to PIO2. We set PIO timings conservatively to
-- *	allow for 50MHz Vesa Local Bus. The 20620 DMA support is weird being DMA to
-- *	controller and PIO'd to the host and not supported.
-+ *	This controller supports PIO0 to PIO2. We set PIO timings
-+ *	conservatively to allow for 50MHz Vesa Local Bus. The 20620 DMA
-+ *	support is weird being DMA to controller and PIO'd to the host
-+ *	and not supported.
-  */
- 
- static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
-@@ -221,8 +306,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
- 	local_irq_save(flags);
- 
- 	/* Unlock the control interface */
--	do
--	{
-+	do {
- 		inb(0x1F5);
- 		outb(inb(0x1F2) | 0x80, 0x1F2);
- 		inb(0x1F2);
-@@ -231,7 +315,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
- 		inb(0x1F2);
- 		inb(0x1F2);
- 	}
--	while((inb(0x1F2) & 0x80) && --tries);
-+	while ((inb(0x1F2) & 0x80) && --tries);
- 
- 	local_irq_restore(flags);
- 
-@@ -249,13 +333,14 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
- 
- }
- 
--static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
-+static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
-+			unsigned char *buf, unsigned int buflen, int rw)
- {
--	struct ata_port *ap = adev->link->ap;
--	int slop = buflen & 3;
--	unsigned long flags;
-+	if (ata_id_has_dword_io(dev->id)) {
-+		struct ata_port *ap = dev->link->ap;
-+		int slop = buflen & 3;
-+		unsigned long flags;
- 
--	if (ata_id_has_dword_io(adev->id)) {
- 		local_irq_save(flags);
- 
- 		/* Perform the 32bit I/O synchronization sequence */
-@@ -264,26 +349,27 @@ static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsig
- 		ioread8(ap->ioaddr.nsect_addr);
- 
- 		/* Now the data */
--
--		if (write_data)
--			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
--		else
-+		if (rw == READ)
- 			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-+		else
-+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
- 
- 		if (unlikely(slop)) {
--			__le32 pad = 0;
--			if (write_data) {
--				memcpy(&pad, buf + buflen - slop, slop);
--				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
--			} else {
-+			u32 pad;
-+			if (rw == READ) {
- 				pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
- 				memcpy(buf + buflen - slop, &pad, slop);
-+			} else {
-+				memcpy(&pad, buf + buflen - slop, slop);
-+				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
- 			}
-+			buflen += 4 - slop;
- 		}
- 		local_irq_restore(flags);
--	}
--	else
--		ata_data_xfer_noirq(adev, buf, buflen, write_data);
-+	} else
-+		buflen = ata_data_xfer_noirq(dev, buf, buflen, rw);
++	sg_num = ablkcipher_walk(req, &ctx->walk);
 +
-+	return buflen;
- }
- 
- static struct ata_port_operations pdc20230_port_ops = {
-@@ -310,14 +396,14 @@ static struct ata_port_operations pdc20230_port_ops = {
- 	.irq_clear	= ata_bmdma_irq_clear,
- 	.irq_on		= ata_irq_on,
- 
--	.port_start	= ata_port_start,
-+	.port_start	= ata_sff_port_start,
- };
- 
- /*
-  *	Holtek 6560A support
-  *
-- *	This controller supports PIO0 to PIO2 (no IORDY even though higher timings
-- *	can be loaded).
-+ *	This controller supports PIO0 to PIO2 (no IORDY even though higher
-+ *	timings can be loaded).
-  */
- 
- static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
-@@ -364,14 +450,14 @@ static struct ata_port_operations ht6560a_port_ops = {
- 	.irq_clear	= ata_bmdma_irq_clear,
- 	.irq_on		= ata_irq_on,
- 
--	.port_start	= ata_port_start,
-+	.port_start	= ata_sff_port_start,
- };
- 
- /*
-  *	Holtek 6560B support
-  *
-- *	This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO setting
-- *	unless we see an ATAPI device in which case we force it off.
-+ *	This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO
-+ *	setting unless we see an ATAPI device in which case we force it off.
-  *
-  *	FIXME: need to implement 2nd channel support.
-  */
-@@ -398,7 +484,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
- 	if (adev->class != ATA_DEV_ATA) {
- 		u8 rconf = inb(0x3E6);
- 		if (rconf & 0x24) {
--			rconf &= ~ 0x24;
-+			rconf &= ~0x24;
- 			outb(rconf, 0x3E6);
- 		}
- 	}
-@@ -423,13 +509,13 @@ static struct ata_port_operations ht6560b_port_ops = {
- 	.qc_prep 	= ata_qc_prep,
- 	.qc_issue	= ata_qc_issue_prot,
- 
--	.data_xfer	= ata_data_xfer,	/* FIXME: Check 32bit and noirq */
-+	.data_xfer	= ata_data_xfer,    /* FIXME: Check 32bit and noirq */
- 
- 	.irq_handler	= ata_interrupt,
- 	.irq_clear	= ata_bmdma_irq_clear,
- 	.irq_on		= ata_irq_on,
- 
--	.port_start	= ata_port_start,
-+	.port_start	= ata_sff_port_start,
- };
- 
- /*
-@@ -462,7 +548,8 @@ static u8 opti_syscfg(u8 reg)
-  *	This controller supports PIO0 to PIO3.
-  */
- 
--static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev)
-+static void opti82c611a_set_piomode(struct ata_port *ap,
-+						struct ata_device *adev)
- {
- 	u8 active, recover, setup;
- 	struct ata_timing t;
-@@ -549,7 +636,7 @@ static struct ata_port_operations opti82c611a_port_ops = {
- 	.irq_clear	= ata_bmdma_irq_clear,
- 	.irq_on		= ata_irq_on,
- 
--	.port_start	= ata_port_start,
-+	.port_start	= ata_sff_port_start,
- };
- 
- /*
-@@ -681,77 +768,398 @@ static struct ata_port_operations opti82c46x_port_ops = {
- 	.irq_clear	= ata_bmdma_irq_clear,
- 	.irq_on		= ata_irq_on,
- 
--	.port_start	= ata_port_start,
-+	.port_start	= ata_sff_port_start,
- };
- 
-+static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
-+{
-+	struct ata_timing t;
-+	struct legacy_data *qdi = ap->host->private_data;
-+	int active, recovery;
-+	u8 timing;
++	atomic_set(&ctx->sg_num, sg_num);
 +
-+	/* Get the timing data in cycles */
-+	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
++	spin_lock_irqsave(&dev->lock, flags);
++	if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
++		err = -EAGAIN;
++		goto err_out;
++	}
 +
-+	if (qdi->fast) {
-+		active = 8 - FIT(t.active, 1, 8);
-+		recovery = 18 - FIT(t.recover, 3, 18);
-+	} else {
-+		active = 9 - FIT(t.active, 2, 9);
-+		recovery = 15 - FIT(t.recover, 0, 15);
++	dev->snum++;
++	dev->started += sg_num;
++
++	while (nbytes) {
++		src = &req->src[idx];
++		dst = &req->dst[idx];
++		t = &ctx->walk.cache[idx];
++
++		if (t->length) {
++			spage = dpage = sg_page(t);
++			soff = doff = 0;
++			len = t->length;
++		} else {
++			spage = sg_page(src);
++			soff = src->offset;
++
++			dpage = sg_page(dst);
++			doff = dst->offset;
++
++			len = dst->length;
++		}
++
++		idx++;
++
++		err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes,
++				req, ctx);
++		if (err)
++			goto err_out;
++
++		nbytes -= len;
 +	}
-+	timing = (recovery << 4) | active | 0x08;
 +
-+	qdi->clock[adev->devno] = timing;
++	dev->active = HIFN_DEFAULT_ACTIVE_NUM;
++	spin_unlock_irqrestore(&dev->lock, flags);
 +
-+	outb(timing, qdi->timing);
++	return 0;
++
++err_out:
++	spin_unlock_irqrestore(&dev->lock, flags);
++err_out_exit:
++	if (err && printk_ratelimit())
++		dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
++				"type: %u, err: %d.\n",
++			dev->name, ctx->iv, ctx->ivsize,
++			ctx->key, ctx->keysize,
++			ctx->mode, ctx->op, ctx->type, err);
++
++	return err;
 +}
- 
- /**
-- *	legacy_init_one		-	attach a legacy interface
-- *	@port: port number
-- *	@io: I/O port start
-- *	@ctrl: control port
-+ *	qdi6580dp_set_piomode		-	PIO setup for dual channel
-+ *	@ap: Port
-+ *	@adev: Device
-  *	@irq: interrupt line
-  *
-- *	Register an ISA bus IDE interface. Such interfaces are PIO and we
-- *	assume do not support IRQ sharing.
-+ *	In dual channel mode the 6580 has one clock per channel and we have
-+ *	to software clockswitch in qc_issue_prot.
-  */
- 
--static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl, int irq)
-+static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
- {
--	struct legacy_data *ld = &legacy_data[nr_legacy_host];
--	struct ata_host *host;
--	struct ata_port *ap;
--	struct platform_device *pdev;
--	struct ata_port_operations *ops = &legacy_port_ops;
--	void __iomem *io_addr, *ctrl_addr;
--	int pio_modes = pio_mask;
--	u32 mask = (1 << port);
--	u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
--	int ret;
-+	struct ata_timing t;
-+	struct legacy_data *qdi = ap->host->private_data;
-+	int active, recovery;
-+	u8 timing;
- 
--	pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0);
--	if (IS_ERR(pdev))
--		return PTR_ERR(pdev);
-+	/* Get the timing data in cycles */
-+	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
 +
-+	if (qdi->fast) {
-+		active = 8 - FIT(t.active, 1, 8);
-+		recovery = 18 - FIT(t.recover, 3, 18);
-+	} else {
-+		active = 9 - FIT(t.active, 2, 9);
-+		recovery = 15 - FIT(t.recover, 0, 15);
++static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
++{
++	int n, err;
++	u8 src[16];
++	struct hifn_context ctx;
++	u8 fips_aes_ecb_from_zero[16] = {
++		0x66, 0xE9, 0x4B, 0xD4,
++		0xEF, 0x8A, 0x2C, 0x3B,
++		0x88, 0x4C, 0xFA, 0x59,
++		0xCA, 0x34, 0x2B, 0x2E};
++
++	memset(src, 0, sizeof(src));
++	memset(ctx.key, 0, sizeof(ctx.key));
++
++	ctx.dev = dev;
++	ctx.keysize = 16;
++	ctx.ivsize = 0;
++	ctx.iv = NULL;
++	ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
++	ctx.mode = ACRYPTO_MODE_ECB;
++	ctx.type = ACRYPTO_TYPE_AES_128;
++	atomic_set(&ctx.sg_num, 1);
++
++	err = hifn_setup_dma(dev,
++			virt_to_page(src), offset_in_page(src),
++			virt_to_page(src), offset_in_page(src),
++			sizeof(src), NULL, &ctx);
++	if (err)
++		goto err_out;
++
++	msleep(200);
++
++	dprintk("%s: decoded: ", dev->name);
++	for (n=0; n<sizeof(src); ++n)
++		dprintk("%02x ", src[n]);
++	dprintk("\n");
++	dprintk("%s: FIPS   : ", dev->name);
++	for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
++		dprintk("%02x ", fips_aes_ecb_from_zero[n]);
++	dprintk("\n");
++
++	if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
++		printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
++				"passed.\n", dev->name);
++		return 0;
 +	}
-+	timing = (recovery << 4) | active | 0x08;
- 
--	ret = -EBUSY;
--	if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
--	    devm_request_region(&pdev->dev, ctrl, 1, "pata_legacy") == NULL)
--		goto fail;
-+	qdi->clock[adev->devno] = timing;
- 
--	ret = -ENOMEM;
--	io_addr = devm_ioport_map(&pdev->dev, io, 8);
--	ctrl_addr = devm_ioport_map(&pdev->dev, ctrl, 1);
--	if (!io_addr || !ctrl_addr)
--		goto fail;
-+	outb(timing, qdi->timing + 2 * ap->port_no);
-+	/* Clear the FIFO */
-+	if (adev->class != ATA_DEV_ATA)
-+		outb(0x5F, qdi->timing + 3);
++
++err_out:
++	printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
++	return -1;
 +}
- 
--	if (ht6560a & mask) {
--		ops = &ht6560a_port_ops;
--		pio_modes = 0x07;
--		iordy = ATA_FLAG_NO_IORDY;
--	}
--	if (ht6560b & mask) {
--		ops = &ht6560b_port_ops;
--		pio_modes = 0x1F;
--	}
--	if (opti82c611a & mask) {
--		ops = &opti82c611a_port_ops;
--		pio_modes = 0x0F;
-+/**
-+ *	qdi6580_set_piomode		-	PIO setup for single channel
-+ *	@ap: Port
-+ *	@adev: Device
-+ *
-+ *	In single channel mode the 6580 has one clock per device and we can
-+ *	avoid the requirement to clock switch. We also have to load the timing
-+ *	into the right clock according to whether we are master or slave.
-+ */
 +
-+static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
++static int hifn_start_device(struct hifn_device *dev)
 +{
-+	struct ata_timing t;
-+	struct legacy_data *qdi = ap->host->private_data;
-+	int active, recovery;
-+	u8 timing;
++	int err;
 +
-+	/* Get the timing data in cycles */
-+	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
++	hifn_reset_dma(dev, 1);
 +
-+	if (qdi->fast) {
-+		active = 8 - FIT(t.active, 1, 8);
-+		recovery = 18 - FIT(t.recover, 3, 18);
-+	} else {
-+		active = 9 - FIT(t.active, 2, 9);
-+		recovery = 15 - FIT(t.recover, 0, 15);
- 	}
--	if (opti82c46x & mask) {
--		ops = &opti82c46x_port_ops;
--		pio_modes = 0x0F;
-+	timing = (recovery << 4) | active | 0x08;
-+	qdi->clock[adev->devno] = timing;
-+	outb(timing, qdi->timing + 2 * adev->devno);
-+	/* Clear the FIFO */
-+	if (adev->class != ATA_DEV_ATA)
-+		outb(0x5F, qdi->timing + 3);
-+}
++	err = hifn_enable_crypto(dev);
++	if (err)
++		return err;
 +
-+/**
-+ *	qdi_qc_issue_prot	-	command issue
-+ *	@qc: command pending
-+ *
-+ *	Called when the libata layer is about to issue a command. We wrap
-+ *	this interface so that we can load the correct ATA timings.
-+ */
++	hifn_reset_puc(dev);
 +
-+static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
++	hifn_init_dma(dev);
++
++	hifn_init_registers(dev);
++
++	hifn_init_pubrng(dev);
++
++	return 0;
++}
++
++static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
++		struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
 +{
-+	struct ata_port *ap = qc->ap;
-+	struct ata_device *adev = qc->dev;
-+	struct legacy_data *qdi = ap->host->private_data;
++	unsigned int srest = *srestp, nbytes = *nbytesp, copy;
++	void *daddr;
++	int idx = 0;
 +
-+	if (qdi->clock[adev->devno] != qdi->last) {
-+		if (adev->pio_mode) {
-+			qdi->last = qdi->clock[adev->devno];
-+			outb(qdi->clock[adev->devno], qdi->timing +
-+							2 * ap->port_no);
-+		}
- 	}
-+	return ata_qc_issue_prot(qc);
++	if (srest < size || size > nbytes)
++		return -EINVAL;
++
++	while (size) {
++
++		copy = min(dst->length, srest);
++
++		daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
++		memcpy(daddr + dst->offset + offset, saddr, copy);
++		kunmap_atomic(daddr, KM_IRQ0);
++
++		nbytes -= copy;
++		size -= copy;
++		srest -= copy;
++		saddr += copy;
++		offset = 0;
++
++		dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
++				__func__, copy, size, srest, nbytes);
++
++		dst++;
++		idx++;
++	}
++
++	*nbytesp = nbytes;
++	*srestp = srest;
++
++	return idx;
 +}
- 
--	/* Probe for automatically detectable controllers */
-+static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
-+					unsigned int buflen, int rw)
++
++static void hifn_process_ready(struct ablkcipher_request *req, int error)
 +{
-+	struct ata_port *ap = adev->link->ap;
-+	int slop = buflen & 3;
- 
--	if (io == 0x1F0 && ops == &legacy_port_ops) {
--		unsigned long flags;
-+	if (ata_id_has_dword_io(adev->id)) {
-+		if (rw == WRITE)
-+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-+		else
-+			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
- 
--		local_irq_save(flags);
-+		if (unlikely(slop)) {
-+			u32 pad;
-+			if (rw == WRITE) {
-+				memcpy(&pad, buf + buflen - slop, slop);
-+				pad = le32_to_cpu(pad);
-+				iowrite32(pad, ap->ioaddr.data_addr);
-+			} else {
-+				pad = ioread32(ap->ioaddr.data_addr);
-+				pad = cpu_to_le32(pad);
-+				memcpy(buf + buflen - slop, &pad, slop);
++	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
++	struct hifn_device *dev;
++
++	dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx);
++
++	dev = ctx->dev;
++	dprintk("%s: req: %p, started: %d, sg_num: %d.\n",
++		__func__, req, dev->started, atomic_read(&ctx->sg_num));
++
++	if (--dev->started < 0)
++		BUG();
++
++	if (atomic_dec_and_test(&ctx->sg_num)) {
++		unsigned int nbytes = req->nbytes;
++		int idx = 0, err;
++		struct scatterlist *dst, *t;
++		void *saddr;
++
++		if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
++			while (nbytes) {
++				t = &ctx->walk.cache[idx];
++				dst = &req->dst[idx];
++
++				dprintk("\n%s: sg_page(t): %p, t->length: %u, "
++					"sg_page(dst): %p, dst->length: %u, "
++					"nbytes: %u.\n",
++					__func__, sg_page(t), t->length,
++					sg_page(dst), dst->length, nbytes);
++
++				if (!t->length) {
++					nbytes -= dst->length;
++					idx++;
++					continue;
++				}
++
++				saddr = kmap_atomic(sg_page(t), KM_IRQ1);
++
++				err = ablkcipher_get(saddr, &t->length, t->offset,
++						dst, nbytes, &nbytes);
++				if (err < 0) {
++					kunmap_atomic(saddr, KM_IRQ1);
++					break;
++				}
++
++				idx += err;
++				kunmap_atomic(saddr, KM_IRQ1);
 +			}
++
++			ablkcipher_walk_exit(&ctx->walk);
 +		}
-+		return (buflen + 3) & ~3;
-+	} else
-+		return ata_data_xfer(adev, buf, buflen, rw);
++
++		req->base.complete(&req->base, error);
++	}
 +}
 +
-+static int qdi_port(struct platform_device *dev,
-+			struct legacy_probe *lp, struct legacy_data *ld)
++static void hifn_check_for_completion(struct hifn_device *dev, int error)
 +{
-+	if (devm_request_region(&dev->dev, lp->private, 4, "qdi") == NULL)
-+		return -EBUSY;
-+	ld->timing = lp->private;
-+	return 0;
++	int i;
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++
++	for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
++		struct hifn_desc *d = &dma->resr[i];
++
++		if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) {
++			dev->success++;
++			dev->reset = 0;
++			hifn_process_ready(dev->sa[i], error);
++			dev->sa[i] = NULL;
++		}
++
++		if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER))
++			if (printk_ratelimit())
++				printk("%s: overflow detected [d: %u, o: %u] "
++						"at %d resr: l: %08x, p: %08x.\n",
++					dev->name,
++					!!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)),
++					!!(d->l & __cpu_to_le32(HIFN_D_OVER)),
++					i, d->l, d->p);
++	}
 +}
 +
-+static struct ata_port_operations qdi6500_port_ops = {
-+	.set_piomode	= qdi6500_set_piomode,
++static void hifn_clear_rings(struct hifn_device *dev)
++{
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++	int i, u;
 +
-+	.tf_load	= ata_tf_load,
-+	.tf_read	= ata_tf_read,
-+	.check_status 	= ata_check_status,
-+	.exec_command	= ata_exec_command,
-+	.dev_select 	= ata_std_dev_select,
++	dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
++			"k: %d.%d.%d.%d.\n",
++			dev->name,
++			dma->cmdi, dma->srci, dma->dsti, dma->resi,
++			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
++			dma->cmdk, dma->srck, dma->dstk, dma->resk);
 +
-+	.freeze		= ata_bmdma_freeze,
-+	.thaw		= ata_bmdma_thaw,
-+	.error_handler	= ata_bmdma_error_handler,
-+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-+	.cable_detect	= ata_cable_40wire,
++	i = dma->resk; u = dma->resu;
++	while (u != 0) {
++		if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
++			break;
 +
-+	.qc_prep 	= ata_qc_prep,
-+	.qc_issue	= qdi_qc_issue_prot,
++		if (i != HIFN_D_RES_RSIZE)
++			u--;
 +
-+	.data_xfer	= vlb32_data_xfer,
++		if (++i == (HIFN_D_RES_RSIZE + 1))
++			i = 0;
++	}
++	dma->resk = i; dma->resu = u;
 +
-+	.irq_handler	= ata_interrupt,
-+	.irq_clear	= ata_bmdma_irq_clear,
-+	.irq_on		= ata_irq_on,
++	i = dma->srck; u = dma->srcu;
++	while (u != 0) {
++		if (i == HIFN_D_SRC_RSIZE)
++			i = 0;
++		if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
++			break;
++		i++, u--;
++	}
++	dma->srck = i; dma->srcu = u;
 +
-+	.port_start	= ata_sff_port_start,
-+};
++	i = dma->cmdk; u = dma->cmdu;
++	while (u != 0) {
++		if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
++			break;
++		if (i != HIFN_D_CMD_RSIZE)
++			u--;
++		if (++i == (HIFN_D_CMD_RSIZE + 1))
++			i = 0;
++	}
++	dma->cmdk = i; dma->cmdu = u;
 +
-+static struct ata_port_operations qdi6580_port_ops = {
-+	.set_piomode	= qdi6580_set_piomode,
++	i = dma->dstk; u = dma->dstu;
++	while (u != 0) {
++		if (i == HIFN_D_DST_RSIZE)
++			i = 0;
++		if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
++			break;
++		i++, u--;
++	}
++	dma->dstk = i; dma->dstu = u;
 +
-+	.tf_load	= ata_tf_load,
-+	.tf_read	= ata_tf_read,
-+	.check_status 	= ata_check_status,
-+	.exec_command	= ata_exec_command,
-+	.dev_select 	= ata_std_dev_select,
++	dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
++			"k: %d.%d.%d.%d.\n",
++			dev->name,
++			dma->cmdi, dma->srci, dma->dsti, dma->resi,
++			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
++			dma->cmdk, dma->srck, dma->dstk, dma->resk);
++}
 +
-+	.freeze		= ata_bmdma_freeze,
-+	.thaw		= ata_bmdma_thaw,
-+	.error_handler	= ata_bmdma_error_handler,
-+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-+	.cable_detect	= ata_cable_40wire,
++static void hifn_work(struct work_struct *work)
++{
++	struct delayed_work *dw = container_of(work, struct delayed_work, work);
++	struct hifn_device *dev = container_of(dw, struct hifn_device, work);
++	unsigned long flags;
++	int reset = 0;
++	u32 r = 0;
 +
-+	.qc_prep 	= ata_qc_prep,
-+	.qc_issue	= ata_qc_issue_prot,
++	spin_lock_irqsave(&dev->lock, flags);
++	if (dev->active == 0) {
++		struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
 +
-+	.data_xfer	= vlb32_data_xfer,
++		if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
++			dev->flags &= ~HIFN_FLAG_CMD_BUSY;
++			r |= HIFN_DMACSR_C_CTRL_DIS;
++		}
++		if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
++			dev->flags &= ~HIFN_FLAG_SRC_BUSY;
++			r |= HIFN_DMACSR_S_CTRL_DIS;
++		}
++		if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
++			dev->flags &= ~HIFN_FLAG_DST_BUSY;
++			r |= HIFN_DMACSR_D_CTRL_DIS;
++		}
++		if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
++			dev->flags &= ~HIFN_FLAG_RES_BUSY;
++			r |= HIFN_DMACSR_R_CTRL_DIS;
++		}
++		if (r)
++			hifn_write_1(dev, HIFN_1_DMA_CSR, r);
++	} else
++		dev->active--;
 +
-+	.irq_handler	= ata_interrupt,
-+	.irq_clear	= ata_bmdma_irq_clear,
-+	.irq_on		= ata_irq_on,
++	if (dev->prev_success == dev->success && dev->started)
++		reset = 1;
++	dev->prev_success = dev->success;
++	spin_unlock_irqrestore(&dev->lock, flags);
 +
-+	.port_start	= ata_sff_port_start,
-+};
++	if (reset) {
++		dprintk("%s: r: %08x, active: %d, started: %d, "
++				"success: %lu: reset: %d.\n",
++			dev->name, r, dev->active, dev->started,
++			dev->success, reset);
 +
-+static struct ata_port_operations qdi6580dp_port_ops = {
-+	.set_piomode	= qdi6580dp_set_piomode,
++		if (++dev->reset >= 5) {
++			dprintk("%s: really hard reset.\n", dev->name);
++			hifn_reset_dma(dev, 1);
++			hifn_stop_device(dev);
++			hifn_start_device(dev);
++			dev->reset = 0;
++		}
 +
-+	.tf_load	= ata_tf_load,
-+	.tf_read	= ata_tf_read,
-+	.check_status 	= ata_check_status,
-+	.exec_command	= ata_exec_command,
-+	.dev_select 	= ata_std_dev_select,
++		spin_lock_irqsave(&dev->lock, flags);
++		hifn_check_for_completion(dev, -EBUSY);
++		hifn_clear_rings(dev);
++		dev->started = 0;
++		spin_unlock_irqrestore(&dev->lock, flags);
++	}
 +
-+	.freeze		= ata_bmdma_freeze,
-+	.thaw		= ata_bmdma_thaw,
-+	.error_handler	= ata_bmdma_error_handler,
-+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-+	.cable_detect	= ata_cable_40wire,
++	schedule_delayed_work(&dev->work, HZ);
++}
 +
-+	.qc_prep 	= ata_qc_prep,
-+	.qc_issue	= qdi_qc_issue_prot,
++static irqreturn_t hifn_interrupt(int irq, void *data)
++{
++	struct hifn_device *dev = (struct hifn_device *)data;
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++	u32 dmacsr, restart;
 +
-+	.data_xfer	= vlb32_data_xfer,
++	dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
 +
-+	.irq_handler	= ata_interrupt,
-+	.irq_clear	= ata_bmdma_irq_clear,
-+	.irq_on		= ata_irq_on,
++	dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
++			"i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
++		dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
++		dma->cmdu, dma->srcu, dma->dstu, dma->resu,
++		dma->cmdi, dma->srci, dma->dsti, dma->resi);
 +
-+	.port_start	= ata_sff_port_start,
-+};
++	if ((dmacsr & dev->dmareg) == 0)
++		return IRQ_NONE;
 +
-+static DEFINE_SPINLOCK(winbond_lock);
++	hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
 +
-+static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
-+{
-+	unsigned long flags;
-+	spin_lock_irqsave(&winbond_lock, flags);
-+	outb(reg, port + 0x01);
-+	outb(val, port + 0x02);
-+	spin_unlock_irqrestore(&winbond_lock, flags);
++	if (dmacsr & HIFN_DMACSR_ENGINE)
++		hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
++	if (dmacsr & HIFN_DMACSR_PUBDONE)
++		hifn_write_1(dev, HIFN_1_PUB_STATUS,
++			hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
++
++	restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
++	if (restart) {
++		u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
++
++		if (printk_ratelimit())
++			printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
++				dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
++				!!(dmacsr & HIFN_DMACSR_D_OVER),
++				puisr, !!(puisr & HIFN_PUISR_DSTOVER));
++		if (!!(puisr & HIFN_PUISR_DSTOVER))
++			hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
++		hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
++					HIFN_DMACSR_D_OVER));
++	}
++
++	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
++			HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
++	if (restart) {
++		if (printk_ratelimit())
++			printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
++				dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
++				!!(dmacsr & HIFN_DMACSR_S_ABORT),
++				!!(dmacsr & HIFN_DMACSR_D_ABORT),
++				!!(dmacsr & HIFN_DMACSR_R_ABORT));
++		hifn_reset_dma(dev, 1);
++		hifn_init_dma(dev);
++		hifn_init_registers(dev);
++	}
++
++	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
++		dprintk("%s: wait on command.\n", dev->name);
++		dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
++		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
++	}
++
++	tasklet_schedule(&dev->tasklet);
++	hifn_clear_rings(dev);
++
++	return IRQ_HANDLED;
 +}
 +
-+static u8 winbond_readcfg(unsigned long port, u8 reg)
++static void hifn_flush(struct hifn_device *dev)
 +{
-+	u8 val;
-+
 +	unsigned long flags;
-+	spin_lock_irqsave(&winbond_lock, flags);
-+	outb(reg, port + 0x01);
-+	val = inb(port + 0x02);
-+	spin_unlock_irqrestore(&winbond_lock, flags);
++	struct crypto_async_request *async_req;
++	struct hifn_context *ctx;
++	struct ablkcipher_request *req;
++	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
++	int i;
 +
-+	return val;
++	spin_lock_irqsave(&dev->lock, flags);
++	for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
++		struct hifn_desc *d = &dma->resr[i];
++
++		if (dev->sa[i]) {
++			hifn_process_ready(dev->sa[i],
++				(d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
++		}
++	}
++
++	while ((async_req = crypto_dequeue_request(&dev->queue))) {
++		ctx = crypto_tfm_ctx(async_req->tfm);
++		req = container_of(async_req, struct ablkcipher_request, base);
++
++		hifn_process_ready(req, -ENODEV);
++	}
++	spin_unlock_irqrestore(&dev->lock, flags);
 +}
 +
-+static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
++static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
++		unsigned int len)
 +{
-+	struct ata_timing t;
-+	struct legacy_data *winbond = ap->host->private_data;
-+	int active, recovery;
-+	u8 reg;
-+	int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
++	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
++	struct hifn_context *ctx = crypto_tfm_ctx(tfm);
++	struct hifn_device *dev = ctx->dev;
 +
-+	reg = winbond_readcfg(winbond->timing, 0x81);
++	if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
++		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++		return -1;
++	}
 +
-+	/* Get the timing data in cycles */
-+	if (reg & 0x40)		/* Fast VLB bus, assume 50MHz */
-+		ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
-+	else
-+		ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
++	if (len == HIFN_DES_KEY_LENGTH) {
++		u32 tmp[DES_EXPKEY_WORDS];
++		int ret = des_ekey(tmp, key);
++		
++		if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
++			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
++			return -EINVAL;
++		}
++	}
 +
-+	active = (FIT(t.active, 3, 17) - 1) & 0x0F;
-+	recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
-+	timing = (active << 4) | recovery;
-+	winbond_writecfg(winbond->timing, timing, reg);
++	dev->flags &= ~HIFN_FLAG_OLD_KEY;
 +
-+	/* Load the setup timing */
++	memcpy(ctx->key, key, len);
++	ctx->keysize = len;
 +
-+	reg = 0x35;
-+	if (adev->class != ATA_DEV_ATA)
-+		reg |= 0x08;	/* FIFO off */
-+	if (!ata_pio_need_iordy(adev))
-+		reg |= 0x02;	/* IORDY off */
-+	reg |= (FIT(t.setup, 0, 3) << 6);
-+	winbond_writecfg(winbond->timing, timing + 1, reg);
++	return 0;
 +}
 +
-+static int winbond_port(struct platform_device *dev,
-+			struct legacy_probe *lp, struct legacy_data *ld)
++static int hifn_handle_req(struct ablkcipher_request *req)
 +{
-+	if (devm_request_region(&dev->dev, lp->private, 4, "winbond") == NULL)
-+		return -EBUSY;
-+	ld->timing = lp->private;
-+	return 0;
-+}
++	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
++	struct hifn_device *dev = ctx->dev;
++	int err = -EAGAIN;
 +
-+static struct ata_port_operations winbond_port_ops = {
-+	.set_piomode	= winbond_set_piomode,
++	if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
++		err = hifn_setup_session(req);
 +
-+	.tf_load	= ata_tf_load,
-+	.tf_read	= ata_tf_read,
-+	.check_status 	= ata_check_status,
-+	.exec_command	= ata_exec_command,
-+	.dev_select 	= ata_std_dev_select,
++	if (err == -EAGAIN) {
++		unsigned long flags;
 +
-+	.freeze		= ata_bmdma_freeze,
-+	.thaw		= ata_bmdma_thaw,
-+	.error_handler	= ata_bmdma_error_handler,
-+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-+	.cable_detect	= ata_cable_40wire,
++		spin_lock_irqsave(&dev->lock, flags);
++		err = ablkcipher_enqueue_request(&dev->queue, req);
++		spin_unlock_irqrestore(&dev->lock, flags);
++	}
 +
-+	.qc_prep 	= ata_qc_prep,
-+	.qc_issue	= ata_qc_issue_prot,
++	return err;
++}
 +
-+	.data_xfer	= vlb32_data_xfer,
++static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
++		u8 type, u8 mode)
++{
++	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
++	unsigned ivsize;
 +
-+	.irq_clear	= ata_bmdma_irq_clear,
-+	.irq_on		= ata_irq_on,
- 
-+	.port_start	= ata_sff_port_start,
-+};
++	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
 +
-+static struct legacy_controller controllers[] = {
-+	{"BIOS",	&legacy_port_ops, 	0x1F,
-+						ATA_FLAG_NO_IORDY,	NULL },
-+	{"Snooping", 	&simple_port_ops, 	0x1F,
-+						0	       ,	NULL },
-+	{"PDC20230",	&pdc20230_port_ops,	0x7,
-+						ATA_FLAG_NO_IORDY,	NULL },
-+	{"HT6560A",	&ht6560a_port_ops,	0x07,
-+						ATA_FLAG_NO_IORDY,	NULL },
-+	{"HT6560B",	&ht6560b_port_ops,	0x1F,
-+						ATA_FLAG_NO_IORDY,	NULL },
-+	{"OPTI82C611A",	&opti82c611a_port_ops,	0x0F,
-+						0	       ,	NULL },
-+	{"OPTI82C46X",	&opti82c46x_port_ops,	0x0F,
-+						0	       ,	NULL },
-+	{"QDI6500",	&qdi6500_port_ops,	0x07,
-+					ATA_FLAG_NO_IORDY,	qdi_port },
-+	{"QDI6580",	&qdi6580_port_ops,	0x1F,
-+					0	       ,	qdi_port },
-+	{"QDI6580DP",	&qdi6580dp_port_ops,	0x1F,
-+					0	       ,	qdi_port },
-+	{"W83759A",	&winbond_port_ops,	0x1F,
-+					0	       ,	winbond_port }
-+};
++	if (req->info && mode != ACRYPTO_MODE_ECB) {
++		if (type == ACRYPTO_TYPE_AES_128)
++			ivsize = HIFN_AES_IV_LENGTH;
++		else if (type == ACRYPTO_TYPE_DES)
++			ivsize = HIFN_DES_KEY_LENGTH;
++		else if (type == ACRYPTO_TYPE_3DES)
++			ivsize = HIFN_3DES_KEY_LENGTH;
++	}
 +
-+/**
-+ *	probe_chip_type		-	Discover controller
-+ *	@probe: Probe entry to check
-+ *
-+ *	Probe an ATA port and identify the type of controller. We don't
-+ *	check if the controller appears to be driveless at this point.
-+ */
++	if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
++		if (ctx->keysize == 24)
++			type = ACRYPTO_TYPE_AES_192;
++		else if (ctx->keysize == 32)
++			type = ACRYPTO_TYPE_AES_256;
++	}
 +
-+static __init int probe_chip_type(struct legacy_probe *probe)
++	ctx->op = op;
++	ctx->mode = mode;
++	ctx->type = type;
++	ctx->iv = req->info;
++	ctx->ivsize = ivsize;
++
++	/*
++	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
++	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
++	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
++	 */
++
++	return hifn_handle_req(req);
++}
++
++static int hifn_process_queue(struct hifn_device *dev)
 +{
-+	int mask = 1 << probe->slot;
++	struct crypto_async_request *async_req;
++	struct hifn_context *ctx;
++	struct ablkcipher_request *req;
++	unsigned long flags;
++	int err = 0;
 +
-+	if (winbond && (probe->port == 0x1F0 || probe->port == 0x170)) {
-+		u8 reg = winbond_readcfg(winbond, 0x81);
-+		reg |= 0x80;	/* jumpered mode off */
-+		winbond_writecfg(winbond, 0x81, reg);
-+		reg = winbond_readcfg(winbond, 0x83);
-+		reg |= 0xF0;	/* local control */
-+		winbond_writecfg(winbond, 0x83, reg);
-+		reg = winbond_readcfg(winbond, 0x85);
-+		reg |= 0xF0;	/* programmable timing */
-+		winbond_writecfg(winbond, 0x85, reg);
++	while (dev->started < HIFN_QUEUE_LENGTH) {
++		spin_lock_irqsave(&dev->lock, flags);
++		async_req = crypto_dequeue_request(&dev->queue);
++		spin_unlock_irqrestore(&dev->lock, flags);
 +
-+		reg = winbond_readcfg(winbond, 0x81);
++		if (!async_req)
++			break;
 +
-+		if (reg & mask)
-+			return W83759A;
++		ctx = crypto_tfm_ctx(async_req->tfm);
++		req = container_of(async_req, struct ablkcipher_request, base);
++
++		err = hifn_handle_req(req);
++		if (err)
++			break;
 +	}
-+	if (probe->port == 0x1F0) {
-+		unsigned long flags;
-+		local_irq_save(flags);
- 		/* Probes */
--		inb(0x1F5);
- 		outb(inb(0x1F2) | 0x80, 0x1F2);
-+		inb(0x1F5);
- 		inb(0x1F2);
- 		inb(0x3F6);
- 		inb(0x3F6);
-@@ -760,29 +1168,83 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
- 
- 		if ((inb(0x1F2) & 0x80) == 0) {
- 			/* PDC20230c or 20630 ? */
--			printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n");
--				pio_modes = 0x07;
--			ops = &pdc20230_port_ops;
--			iordy = ATA_FLAG_NO_IORDY;
-+			printk(KERN_INFO  "PDC20230-C/20630 VLB ATA controller"
-+							" detected.\n");
- 			udelay(100);
- 			inb(0x1F5);
-+			local_irq_restore(flags);
-+			return PDC20230;
- 		} else {
- 			outb(0x55, 0x1F2);
- 			inb(0x1F2);
- 			inb(0x1F2);
--			if (inb(0x1F2) == 0x00) {
--				printk(KERN_INFO "PDC20230-B VLB ATA controller detected.\n");
--			}
-+			if (inb(0x1F2) == 0x00)
-+				printk(KERN_INFO "PDC20230-B VLB ATA "
-+						     "controller detected.\n");
-+			local_irq_restore(flags);
-+			return BIOS;
- 		}
- 		local_irq_restore(flags);
- 	}
- 
-+	if (ht6560a & mask)
-+		return HT6560A;
-+	if (ht6560b & mask)
-+		return HT6560B;
-+	if (opti82c611a & mask)
-+		return OPTI611A;
-+	if (opti82c46x & mask)
-+		return OPTI46X;
-+	if (autospeed & mask)
-+		return SNOOP;
-+	return BIOS;
++
++	return err;
 +}
 +
++static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
++		u8 type, u8 mode)
++{
++	int err;
++	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
++	struct hifn_device *dev = ctx->dev;
++
++	err = hifn_setup_crypto_req(req, op, type, mode);
++	if (err)
++		return err;
 +
-+/**
-+ *	legacy_init_one		-	attach a legacy interface
-+ *	@pl: probe record
-+ *
-+ *	Register an ISA bus IDE interface. Such interfaces are PIO and we
-+ *	assume do not support IRQ sharing.
-+ */
++	if (dev->started < HIFN_QUEUE_LENGTH &&	dev->queue.qlen)
++		err = hifn_process_queue(dev);
 +
-+static __init int legacy_init_one(struct legacy_probe *probe)
++	return err;
++}
++
++/*
++ * AES ecryption functions.
++ */
++static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
 +{
-+	struct legacy_controller *controller = &controllers[probe->type];
-+	int pio_modes = controller->pio_mask;
-+	unsigned long io = probe->port;
-+	u32 mask = (1 << probe->slot);
-+	struct ata_port_operations *ops = controller->ops;
-+	struct legacy_data *ld = &legacy_data[probe->slot];
-+	struct ata_host *host = NULL;
-+	struct ata_port *ap;
-+	struct platform_device *pdev;
-+	struct ata_device *dev;
-+	void __iomem *io_addr, *ctrl_addr;
-+	u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
-+	int ret;
- 
--	/* Chip does mode setting by command snooping */
--	if (ops == &legacy_port_ops && (autospeed & mask))
--		ops = &simple_port_ops;
-+	iordy |= controller->flags;
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
++}
++static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
++}
++static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
++}
++static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
++}
 +
-+	pdev = platform_device_register_simple(DRV_NAME, probe->slot, NULL, 0);
-+	if (IS_ERR(pdev))
-+		return PTR_ERR(pdev);
++/*
++ * AES decryption functions.
++ */
++static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
++}
++static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
++}
++static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
++}
++static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
++}
 +
-+	ret = -EBUSY;
-+	if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
-+	    devm_request_region(&pdev->dev, io + 0x0206, 1,
-+							"pata_legacy") == NULL)
-+		goto fail;
- 
- 	ret = -ENOMEM;
-+	io_addr = devm_ioport_map(&pdev->dev, io, 8);
-+	ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
-+	if (!io_addr || !ctrl_addr)
-+		goto fail;
-+	if (controller->setup)
-+		if (controller->setup(pdev, probe, ld) < 0)
-+			goto fail;
- 	host = ata_host_alloc(&pdev->dev, 1);
- 	if (!host)
- 		goto fail;
-@@ -795,19 +1257,29 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
- 	ap->ioaddr.altstatus_addr = ctrl_addr;
- 	ap->ioaddr.ctl_addr = ctrl_addr;
- 	ata_std_ports(&ap->ioaddr);
--	ap->private_data = ld;
-+	ap->host->private_data = ld;
- 
--	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, ctrl);
-+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206);
- 
--	ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht);
-+	ret = ata_host_activate(host, probe->irq, ata_interrupt, 0,
-+								&legacy_sht);
- 	if (ret)
- 		goto fail;
--
--	legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev);
- 	ld->platform_dev = pdev;
--	return 0;
- 
-+	/* Nothing found means we drop the port as its probably not there */
++/*
++ * DES ecryption functions.
++ */
++static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
++}
++static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
++}
++static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
++}
++static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
++}
 +
-+	ret = -ENODEV;
-+	ata_link_for_each_dev(dev, &ap->link) {
-+		if (!ata_dev_absent(dev)) {
-+			legacy_host[probe->slot] = host;
-+			ld->platform_dev = pdev;
-+			return 0;
-+		}
-+	}
- fail:
-+	if (host)
-+		ata_host_detach(host);
- 	platform_device_unregister(pdev);
- 	return ret;
- }
-@@ -818,13 +1290,15 @@ fail:
-  *	@master: set this if we find an ATA master
-  *	@master: set this if we find an ATA secondary
-  *
-- *	A small number of vendors implemented early PCI ATA interfaces on bridge logic
-- *	without the ATA interface being PCI visible. Where we have a matching PCI driver
-- *	we must skip the relevant device here. If we don't know about it then the legacy
-- *	driver is the right driver anyway.
-+ *	A small number of vendors implemented early PCI ATA interfaces
-+ *	on bridge logic without the ATA interface being PCI visible.
-+ *	Where we have a matching PCI driver we must skip the relevant
-+ *	device here. If we don't know about it then the legacy driver
-+ *	is the right driver anyway.
-  */
- 
--static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *secondary)
-+static void __init legacy_check_special_cases(struct pci_dev *p, int *primary,
-+								int *secondary)
- {
- 	/* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
- 	if (p->vendor == 0x1078 && p->device == 0x0000) {
-@@ -840,7 +1314,8 @@ static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *sec
- 	if (p->vendor == 0x8086 && p->device == 0x1234) {
- 		u16 r;
- 		pci_read_config_word(p, 0x6C, &r);
--		if (r & 0x8000) {	/* ATA port enabled */
-+		if (r & 0x8000) {
-+			/* ATA port enabled */
- 			if (r & 0x4000)
- 				*secondary = 1;
- 			else
-@@ -850,6 +1325,114 @@ static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *sec
- 	}
- }
- 
-+static __init void probe_opti_vlb(void)
++/*
++ * DES decryption functions.
++ */
++static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
 +{
-+	/* If an OPTI 82C46X is present find out where the channels are */
-+	static const char *optis[4] = {
-+		"3/463MV", "5MV",
-+		"5MVA", "5MVB"
-+	};
-+	u8 chans = 1;
-+	u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
++}
++static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
++}
++static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
++}
++static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
++}
 +
-+	opti82c46x = 3;	/* Assume master and slave first */
-+	printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n",
-+								optis[ctrl]);
-+	if (ctrl == 3)
-+		chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
-+	ctrl = opti_syscfg(0xAC);
-+	/* Check enabled and this port is the 465MV port. On the
-+	   MVB we may have two channels */
-+	if (ctrl & 8) {
-+		if (chans == 2) {
-+			legacy_probe_add(0x1F0, 14, OPTI46X, 0);
-+			legacy_probe_add(0x170, 15, OPTI46X, 0);
-+		}
-+		if (ctrl & 4)
-+			legacy_probe_add(0x170, 15, OPTI46X, 0);
-+		else
-+			legacy_probe_add(0x1F0, 14, OPTI46X, 0);
-+	} else
-+		legacy_probe_add(0x1F0, 14, OPTI46X, 0);
++/*
++ * 3DES ecryption functions.
++ */
++static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
++}
++static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
++}
++static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
++}
++static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
++			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
 +}
 +
-+static __init void qdi65_identify_port(u8 r, u8 res, unsigned long port)
++/*
++ * 3DES decryption functions.
++ */
++static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
 +{
-+	static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
-+	/* Check card type */
-+	if ((r & 0xF0) == 0xC0) {
-+		/* QD6500: single channel */
-+		if (r & 8)
-+			/* Disabled ? */
-+			return;
-+		legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
-+								QDI6500, port);
-+	}
-+	if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
-+		/* QD6580: dual channel */
-+		if (!request_region(port + 2 , 2, "pata_qdi")) {
-+			release_region(port, 2);
-+			return;
-+		}
-+		res = inb(port + 3);
-+		/* Single channel mode ? */
-+		if (res & 1)
-+			legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
-+								QDI6580, port);
-+		else { /* Dual channel mode */
-+			legacy_probe_add(0x1F0, 14, QDI6580DP, port);
-+			/* port + 0x02, r & 0x04 */
-+			legacy_probe_add(0x170, 15, QDI6580DP, port + 2);
-+		}
-+		release_region(port + 2, 2);
-+	}
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
++}
++static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
++}
++static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
++}
++static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
++{
++	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
++			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
 +}
 +
-+static __init void probe_qdi_vlb(void)
++struct hifn_alg_template
 +{
-+	unsigned long flags;
-+	static const unsigned long qd_port[2] = { 0x30, 0xB0 };
-+	int i;
++	char name[CRYPTO_MAX_ALG_NAME];
++	char drv_name[CRYPTO_MAX_ALG_NAME];
++	unsigned int bsize;
++	struct ablkcipher_alg ablkcipher;
++};
 +
++static struct hifn_alg_template hifn_alg_templates[] = {
 +	/*
-+	 *	Check each possible QD65xx base address
++	 * 3DES ECB, CBC, CFB and OFB modes.
 +	 */
++	{
++		.name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
++		.ablkcipher = {
++			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
++			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_3des_cfb,
++			.decrypt	=	hifn_decrypt_3des_cfb,
++		},
++	},
++	{
++		.name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
++		.ablkcipher = {
++			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
++			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_3des_ofb,
++			.decrypt	=	hifn_decrypt_3des_ofb,
++		},
++	},
++	{
++		.name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
++		.ablkcipher = {
++			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
++			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_3des_cbc,
++			.decrypt	=	hifn_decrypt_3des_cbc,
++		},
++	},
++	{
++		.name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
++		.ablkcipher = {
++			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
++			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_3des_ecb,
++			.decrypt	=	hifn_decrypt_3des_ecb,
++		},
++	},
 +
-+	for (i = 0; i < 2; i++) {
-+		unsigned long port = qd_port[i];
-+		u8 r, res;
++	/*
++	 * DES ECB, CBC, CFB and OFB modes.
++	 */
++	{
++		.name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8,
++		.ablkcipher = {
++			.min_keysize	=	HIFN_DES_KEY_LENGTH,
++			.max_keysize	=	HIFN_DES_KEY_LENGTH,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_des_cfb,
++			.decrypt	=	hifn_decrypt_des_cfb,
++		},
++	},
++	{
++		.name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8,
++		.ablkcipher = {
++			.min_keysize	=	HIFN_DES_KEY_LENGTH,
++			.max_keysize	=	HIFN_DES_KEY_LENGTH,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_des_ofb,
++			.decrypt	=	hifn_decrypt_des_ofb,
++		},
++	},
++	{
++		.name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8,
++		.ablkcipher = {
++			.min_keysize	=	HIFN_DES_KEY_LENGTH,
++			.max_keysize	=	HIFN_DES_KEY_LENGTH,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_des_cbc,
++			.decrypt	=	hifn_decrypt_des_cbc,
++		},
++	},
++	{
++		.name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8,
++		.ablkcipher = {
++			.min_keysize	=	HIFN_DES_KEY_LENGTH,
++			.max_keysize	=	HIFN_DES_KEY_LENGTH,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_des_ecb,
++			.decrypt	=	hifn_decrypt_des_ecb,
++		},
++	},
++
++	/*
++	 * AES ECB, CBC, CFB and OFB modes.
++	 */
++	{
++		.name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16,
++		.ablkcipher = {
++			.min_keysize	=	AES_MIN_KEY_SIZE,
++			.max_keysize	=	AES_MAX_KEY_SIZE,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_aes_ecb,
++			.decrypt	=	hifn_decrypt_aes_ecb,
++		},
++	},
++	{
++		.name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16,
++		.ablkcipher = {
++			.min_keysize	=	AES_MIN_KEY_SIZE,
++			.max_keysize	=	AES_MAX_KEY_SIZE,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_aes_cbc,
++			.decrypt	=	hifn_decrypt_aes_cbc,
++		},
++	},
++	{
++		.name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16,
++		.ablkcipher = {
++			.min_keysize	=	AES_MIN_KEY_SIZE,
++			.max_keysize	=	AES_MAX_KEY_SIZE,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_aes_cfb,
++			.decrypt	=	hifn_decrypt_aes_cfb,
++		},
++	},
++	{
++		.name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16,
++		.ablkcipher = {
++			.min_keysize	=	AES_MIN_KEY_SIZE,
++			.max_keysize	=	AES_MAX_KEY_SIZE,
++			.setkey		=	hifn_setkey,
++			.encrypt	=	hifn_encrypt_aes_ofb,
++			.decrypt	=	hifn_decrypt_aes_ofb,
++		},
++	},
++};
 +
++static int hifn_cra_init(struct crypto_tfm *tfm)
++{
++	struct crypto_alg *alg = tfm->__crt_alg;
++	struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
++	struct hifn_context *ctx = crypto_tfm_ctx(tfm);
 +
-+		if (request_region(port, 2, "pata_qdi")) {
-+			/* Check for a card */
-+			local_irq_save(flags);
-+			/* I have no h/w that needs this delay but it
-+			   is present in the historic code */
-+			r = inb(port);
-+			udelay(1);
-+			outb(0x19, port);
-+			udelay(1);
-+			res = inb(port);
-+			udelay(1);
-+			outb(r, port);
-+			udelay(1);
-+			local_irq_restore(flags);
++	ctx->dev = ha->dev;
 +
-+			/* Fail */
-+			if (res == 0x19) {
-+				release_region(port, 2);
-+				continue;
-+			}
-+			/* Passes the presence test */
-+			r = inb(port + 1);
-+			udelay(1);
-+			/* Check port agrees with port set */
-+			if ((r & 2) >> 1 == i)
-+				qdi65_identify_port(r, res, port);
-+			release_region(port, 2);
-+		}
-+	}
++	return 0;
 +}
- 
- /**
-  *	legacy_init		-	attach legacy interfaces
-@@ -867,15 +1450,17 @@ static __init int legacy_init(void)
- 	int ct = 0;
- 	int primary = 0;
- 	int secondary = 0;
--	int last_port = NR_HOST;
-+	int pci_present = 0;
-+	struct legacy_probe *pl = &probe_list[0];
-+	int slot = 0;
- 
- 	struct pci_dev *p = NULL;
- 
- 	for_each_pci_dev(p) {
- 		int r;
--		/* Check for any overlap of the system ATA mappings. Native mode controllers
--		   stuck on these addresses or some devices in 'raid' mode won't be found by
--		   the storage class test */
-+		/* Check for any overlap of the system ATA mappings. Native
-+		   mode controllers stuck on these addresses or some devices
-+		   in 'raid' mode won't be found by the storage class test */
- 		for (r = 0; r < 6; r++) {
- 			if (pci_resource_start(p, r) == 0x1f0)
- 				primary = 1;
-@@ -885,49 +1470,39 @@ static __init int legacy_init(void)
- 		/* Check for special cases */
- 		legacy_check_special_cases(p, &primary, &secondary);
- 
--		/* If PCI bus is present then don't probe for tertiary legacy ports */
--		if (probe_all == 0)
--			last_port = 2;
-+		/* If PCI bus is present then don't probe for tertiary
-+		   legacy ports */
-+		pci_present = 1;
- 	}
- 
--	/* If an OPTI 82C46X is present find out where the channels are */
--	if (opti82c46x) {
--		static const char *optis[4] = {
--			"3/463MV", "5MV",
--			"5MVA", "5MVB"
--		};
--		u8 chans = 1;
--		u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
--
--		opti82c46x = 3;	/* Assume master and slave first */
--		printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", optis[ctrl]);
--		if (ctrl == 3)
--			chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
--		ctrl = opti_syscfg(0xAC);
--		/* Check enabled and this port is the 465MV port. On the
--		   MVB we may have two channels */
--		if (ctrl & 8) {
--			if (ctrl & 4)
--				opti82c46x = 2;	/* Slave */
--			else
--				opti82c46x = 1;	/* Master */
--			if (chans == 2)
--				opti82c46x = 3; /* Master and Slave */
--		}	/* Slave only */
--		else if (chans == 1)
--			opti82c46x = 1;
-+	if (winbond == 1)
-+		winbond = 0x130;	/* Default port, alt is 1B0 */
 +
-+	if (primary == 0 || all)
-+		legacy_probe_add(0x1F0, 14, UNKNOWN, 0);
-+	if (secondary == 0 || all)
-+		legacy_probe_add(0x170, 15, UNKNOWN, 0);
++static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
++{
++	struct hifn_crypto_alg *alg;
++	int err;
 +
-+	if (probe_all || !pci_present) {
-+		/* ISA/VLB extra ports */
-+		legacy_probe_add(0x1E8, 11, UNKNOWN, 0);
-+		legacy_probe_add(0x168, 10, UNKNOWN, 0);
-+		legacy_probe_add(0x1E0, 8, UNKNOWN, 0);
-+		legacy_probe_add(0x160, 12, UNKNOWN, 0);
- 	}
- 
--	for (i = 0; i < last_port; i++) {
--		/* Skip primary if we have seen a PCI one */
--		if (i == 0 && primary == 1)
--			continue;
--		/* Skip secondary if we have seen a PCI one */
--		if (i == 1 && secondary == 1)
-+	if (opti82c46x)
-+		probe_opti_vlb();
-+	if (qdi)
-+		probe_qdi_vlb();
++	alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
++	if (!alg)
++		return -ENOMEM;
 +
-+	for (i = 0; i < NR_HOST; i++, pl++) {
-+		if (pl->port == 0)
- 			continue;
--		if (legacy_init_one(i, legacy_port[i],
--				   legacy_port[i] + 0x0206,
--				   legacy_irq[i]) == 0)
-+		if (pl->type == UNKNOWN)
-+			pl->type = probe_chip_type(pl);
-+		pl->slot = slot++;
-+		if (legacy_init_one(pl) == 0)
- 			ct++;
- 	}
- 	if (ct != 0)
-@@ -941,11 +1516,8 @@ static __exit void legacy_exit(void)
- 
- 	for (i = 0; i < nr_legacy_host; i++) {
- 		struct legacy_data *ld = &legacy_data[i];
--
- 		ata_host_detach(legacy_host[i]);
- 		platform_device_unregister(ld->platform_dev);
--		if (ld->timing)
--			release_region(ld->timing, 2);
- 	}
- }
- 
-@@ -960,9 +1532,9 @@ module_param(ht6560a, int, 0);
- module_param(ht6560b, int, 0);
- module_param(opti82c611a, int, 0);
- module_param(opti82c46x, int, 0);
-+module_param(qdi, int, 0);
- module_param(pio_mask, int, 0);
- module_param(iordy_mask, int, 0);
- 
- module_init(legacy_init);
- module_exit(legacy_exit);
--
-diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
-index 50c56e2..dc40162 100644
---- a/drivers/ata/pata_mpc52xx.c
-+++ b/drivers/ata/pata_mpc52xx.c
-@@ -364,7 +364,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
- {
- 	unsigned int ipb_freq;
- 	struct resource res_mem;
--	int ata_irq = NO_IRQ;
-+	int ata_irq;
- 	struct mpc52xx_ata __iomem *ata_regs;
- 	struct mpc52xx_ata_priv *priv;
- 	int rv;
-diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
-new file mode 100644
-index 0000000..1c1b835
---- /dev/null
-+++ b/drivers/ata/pata_ninja32.c
-@@ -0,0 +1,214 @@
-+/*
-+ * pata_ninja32.c 	- Ninja32 PATA for new ATA layer
-+ *			  (C) 2007 Red Hat Inc
-+ *			  Alan Cox <alan at redhat.com>
-+ *
-+ * Note: The controller like many controllers has shared timings for
-+ * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
-+ * in the dma_stop function. Thus we actually don't need a set_dmamode
-+ * method as the PIO method is always called and will set the right PIO
-+ * timing parameters.
-+ *
-+ * The Ninja32 Cardbus is not a generic SFF controller. Instead it is
-+ * laid out as follows off BAR 0. This is based upon Mark Lord's delkin
-+ * driver and the extensive analysis done by the BSD developers, notably
-+ * ITOH Yasufumi.
-+ *
-+ *	Base + 0x00 IRQ Status
-+ *	Base + 0x01 IRQ control
-+ *	Base + 0x02 Chipset control
-+ *	Base + 0x04 VDMA and reset control + wait bits
-+ *	Base + 0x08 BMIMBA
-+ *	Base + 0x0C DMA Length
-+ *	Base + 0x10 Taskfile
-+ *	Base + 0x18 BMDMA Status ?
-+ *	Base + 0x1C
-+ *	Base + 0x1D Bus master control
-+ *		bit 0 = enable
-+ *		bit 1 = 0 write/1 read
-+ *		bit 2 = 1 sgtable
-+ *		bit 3 = go
-+ *		bit 4-6 wait bits
-+ *		bit 7 = done
-+ *	Base + 0x1E AltStatus
-+ *	Base + 0x1F timing register
-+ */
++	snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
++	snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name);
 +
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/blkdev.h>
-+#include <linux/delay.h>
-+#include <scsi/scsi_host.h>
-+#include <linux/libata.h>
++	alg->alg.cra_priority = 300;
++	alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
++	alg->alg.cra_blocksize = t->bsize;
++	alg->alg.cra_ctxsize = sizeof(struct hifn_context);
++	alg->alg.cra_alignmask = 15;
++	if (t->bsize == 8)
++		alg->alg.cra_alignmask = 3;
++	alg->alg.cra_type = &crypto_ablkcipher_type;
++	alg->alg.cra_module = THIS_MODULE;
++	alg->alg.cra_u.ablkcipher = t->ablkcipher;
++	alg->alg.cra_init = hifn_cra_init;
++
++	alg->dev = dev;
++
++	list_add_tail(&alg->entry, &dev->alg_list);
 +
-+#define DRV_NAME "pata_ninja32"
-+#define DRV_VERSION "0.0.1"
++	err = crypto_register_alg(&alg->alg);
++	if (err) {
++		list_del(&alg->entry);
++		kfree(alg);
++	}
 +
++	return err;
++}
 +
-+/**
-+ *	ninja32_set_piomode	-	set initial PIO mode data
-+ *	@ap: ATA interface
-+ *	@adev: ATA device
-+ *
-+ *	Called to do the PIO mode setup. Our timing registers are shared
-+ *	but we want to set the PIO timing by default.
-+ */
++static void hifn_unregister_alg(struct hifn_device *dev)
++{
++	struct hifn_crypto_alg *a, *n;
 +
-+static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev)
++	list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
++		list_del(&a->entry);
++		crypto_unregister_alg(&a->alg);
++		kfree(a);
++	}
++}
++
++static int hifn_register_alg(struct hifn_device *dev)
 +{
-+	static u16 pio_timing[5] = {
-+		0xd6, 0x85, 0x44, 0x33, 0x13
-+	};
-+	iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0],
-+		 ap->ioaddr.bmdma_addr + 0x1f);
-+	ap->private_data = adev;
++	int i, err;
++
++	for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
++		err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
++		if (err)
++			goto err_out_exit;
++	}
++
++	return 0;
++
++err_out_exit:
++	hifn_unregister_alg(dev);
++	return err;
 +}
 +
++static void hifn_tasklet_callback(unsigned long data)
++{
++	struct hifn_device *dev = (struct hifn_device *)data;
 +
-+static void ninja32_dev_select(struct ata_port *ap, unsigned int device)
++	/*
++	 * This is ok to call this without lock being held,
++	 * althogh it modifies some parameters used in parallel,
++	 * (like dev->success), but they are used in process
++	 * context or update is atomic (like setting dev->sa[i] to NULL).
++	 */
++	hifn_check_for_completion(dev, 0);
++}
++
++static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 +{
-+	struct ata_device *adev = &ap->link.device[device];
-+	if (ap->private_data != adev) {
-+		iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f);
-+		ata_std_dev_select(ap, device);
-+		ninja32_set_piomode(ap, adev);
++	int err, i;
++	struct hifn_device *dev;
++	char name[8];
++
++	err = pci_enable_device(pdev);
++	if (err)
++		return err;
++	pci_set_master(pdev);
++
++	err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
++	if (err)
++		goto err_out_disable_pci_device;
++
++	snprintf(name, sizeof(name), "hifn%d",
++			atomic_inc_return(&hifn_dev_number)-1);
++
++	err = pci_request_regions(pdev, name);
++	if (err)
++		goto err_out_disable_pci_device;
++
++	if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
++	    pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
++	    pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
++		dprintk("%s: Broken hardware - I/O regions are too small.\n",
++				pci_name(pdev));
++		err = -ENODEV;
++		goto err_out_free_regions;
 +	}
-+}
 +
-+static struct scsi_host_template ninja32_sht = {
-+	.module			= THIS_MODULE,
-+	.name			= DRV_NAME,
-+	.ioctl			= ata_scsi_ioctl,
-+	.queuecommand		= ata_scsi_queuecmd,
-+	.can_queue		= ATA_DEF_QUEUE,
-+	.this_id		= ATA_SHT_THIS_ID,
-+	.sg_tablesize		= LIBATA_MAX_PRD,
-+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-+	.emulated		= ATA_SHT_EMULATED,
-+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
-+	.proc_name		= DRV_NAME,
-+	.dma_boundary		= ATA_DMA_BOUNDARY,
-+	.slave_configure	= ata_scsi_slave_config,
-+	.slave_destroy		= ata_scsi_slave_destroy,
-+	.bios_param		= ata_std_bios_param,
-+};
++	dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
++			GFP_KERNEL);
++	if (!dev) {
++		err = -ENOMEM;
++		goto err_out_free_regions;
++	}
 +
-+static struct ata_port_operations ninja32_port_ops = {
-+	.set_piomode	= ninja32_set_piomode,
-+	.mode_filter	= ata_pci_default_filter,
++	INIT_LIST_HEAD(&dev->alg_list);
 +
-+	.tf_load	= ata_tf_load,
-+	.tf_read	= ata_tf_read,
-+	.check_status 	= ata_check_status,
-+	.exec_command	= ata_exec_command,
-+	.dev_select 	= ninja32_dev_select,
++	snprintf(dev->name, sizeof(dev->name), "%s", name);
++	spin_lock_init(&dev->lock);
 +
-+	.freeze		= ata_bmdma_freeze,
-+	.thaw		= ata_bmdma_thaw,
-+	.error_handler	= ata_bmdma_error_handler,
-+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-+	.cable_detect	= ata_cable_40wire,
++	for (i=0; i<3; ++i) {
++		unsigned long addr, size;
 +
-+	.bmdma_setup 	= ata_bmdma_setup,
-+	.bmdma_start 	= ata_bmdma_start,
-+	.bmdma_stop	= ata_bmdma_stop,
-+	.bmdma_status 	= ata_bmdma_status,
++		addr = pci_resource_start(pdev, i);
++		size = pci_resource_len(pdev, i);
 +
-+	.qc_prep 	= ata_qc_prep,
-+	.qc_issue	= ata_qc_issue_prot,
++		dev->bar[i] = ioremap_nocache(addr, size);
++		if (!dev->bar[i])
++			goto err_out_unmap_bars;
++	}
 +
-+	.data_xfer	= ata_data_xfer,
++	dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER);
++	if (!dev->result_mem) {
++		dprintk("Failed to allocate %d pages for result_mem.\n",
++				HIFN_MAX_RESULT_ORDER);
++		goto err_out_unmap_bars;
++	}
++	memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER));
 +
-+	.irq_handler	= ata_interrupt,
-+	.irq_clear	= ata_bmdma_irq_clear,
-+	.irq_on		= ata_irq_on,
++	dev->dst = pci_map_single(pdev, (void *)dev->result_mem,
++			PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE);
 +
-+	.port_start	= ata_sff_port_start,
-+};
++	dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
++			&dev->desc_dma);
++	if (!dev->desc_virt) {
++		dprintk("Failed to allocate descriptor rings.\n");
++		goto err_out_free_result_pages;
++	}
++	memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
 +
-+static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-+{
-+	struct ata_host *host;
-+	struct ata_port *ap;
-+	void __iomem *base;
-+	int rc;
++	dev->pdev = pdev;
++	dev->irq = pdev->irq;
 +
-+	host = ata_host_alloc(&dev->dev, 1);
-+	if (!host)
-+		return -ENOMEM;
-+	ap = host->ports[0];
++	for (i=0; i<HIFN_D_RES_RSIZE; ++i)
++		dev->sa[i] = NULL;
 +
-+	/* Set up the PCI device */
-+	rc = pcim_enable_device(dev);
-+	if (rc)
-+		return rc;
-+	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
-+	if (rc == -EBUSY)
-+		pcim_pin_device(dev);
-+	if (rc)
-+		return rc;
++	pci_set_drvdata(pdev, dev);
 +
-+	host->iomap = pcim_iomap_table(dev);
-+	rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
-+	if (rc)
-+		return rc;
-+	rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
-+	if (rc)
-+		return rc;
-+	pci_set_master(dev);
++	tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev);
 +
-+	/* Set up the register mappings */
-+	base = host->iomap[0];
-+	if (!base)
-+		return -ENOMEM;
-+	ap->ops = &ninja32_port_ops;
-+	ap->pio_mask = 0x1F;
-+	ap->flags |= ATA_FLAG_SLAVE_POSS;
++	crypto_init_queue(&dev->queue, 1);
 +
-+	ap->ioaddr.cmd_addr = base + 0x10;
-+	ap->ioaddr.ctl_addr = base + 0x1E;
-+	ap->ioaddr.altstatus_addr = base + 0x1E;
-+	ap->ioaddr.bmdma_addr = base;
-+	ata_std_ports(&ap->ioaddr);
++	err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
++	if (err) {
++		dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
++		dev->irq = 0;
++		goto err_out_free_desc;
++	}
 +
-+	iowrite8(0x05, base + 0x01);	/* Enable interrupt lines */
-+	iowrite8(0xB3, base + 0x02);	/* Burst, ?? setup */
-+	iowrite8(0x00, base + 0x04);	/* WAIT0 ? */
-+	/* FIXME: Should we disable them at remove ? */
-+	return ata_host_activate(host, dev->irq, ata_interrupt,
-+				 IRQF_SHARED, &ninja32_sht);
-+}
++	err = hifn_start_device(dev);
++	if (err)
++		goto err_out_free_irq;
 +
-+static const struct pci_device_id ninja32[] = {
-+	{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ },
-+};
++	err = hifn_test(dev, 1, 0);
++	if (err)
++		goto err_out_stop_device;
 +
-+static struct pci_driver ninja32_pci_driver = {
-+	.name 		= DRV_NAME,
-+	.id_table	= ninja32,
-+	.probe 		= ninja32_init_one,
-+	.remove		= ata_pci_remove_one
-+};
++	err = hifn_register_rng(dev);
++	if (err)
++		goto err_out_stop_device;
 +
-+static int __init ninja32_init(void)
-+{
-+	return pci_register_driver(&ninja32_pci_driver);
++	err = hifn_register_alg(dev);
++	if (err)
++		goto err_out_unregister_rng;
++
++	INIT_DELAYED_WORK(&dev->work, hifn_work);
++	schedule_delayed_work(&dev->work, HZ);
++
++	dprintk("HIFN crypto accelerator card at %s has been "
++			"successfully registered as %s.\n",
++			pci_name(pdev), dev->name);
++
++	return 0;
++
++err_out_unregister_rng:
++	hifn_unregister_rng(dev);
++err_out_stop_device:
++	hifn_reset_dma(dev, 1);
++	hifn_stop_device(dev);
++err_out_free_irq:
++	free_irq(dev->irq, dev->name);
++	tasklet_kill(&dev->tasklet);
++err_out_free_desc:
++	pci_free_consistent(pdev, sizeof(struct hifn_dma),
++			dev->desc_virt, dev->desc_dma);
++
++err_out_free_result_pages:
++	pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
++			PCI_DMA_FROMDEVICE);
++	free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
++
++err_out_unmap_bars:
++	for (i=0; i<3; ++i)
++		if (dev->bar[i])
++			iounmap(dev->bar[i]);
++
++err_out_free_regions:
++	pci_release_regions(pdev);
++
++err_out_disable_pci_device:
++	pci_disable_device(pdev);
++
++	return err;
 +}
 +
-+static void __exit ninja32_exit(void)
++static void hifn_remove(struct pci_dev *pdev)
 +{
-+	pci_unregister_driver(&ninja32_pci_driver);
-+}
++	int i;
++	struct hifn_device *dev;
 +
-+MODULE_AUTHOR("Alan Cox");
-+MODULE_DESCRIPTION("low-level driver for Ninja32 ATA");
-+MODULE_LICENSE("GPL");
-+MODULE_DEVICE_TABLE(pci, ninja32);
-+MODULE_VERSION(DRV_VERSION);
++	dev = pci_get_drvdata(pdev);
 +
-+module_init(ninja32_init);
-+module_exit(ninja32_exit);
-diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
-index fd36099..3e7f6a9 100644
---- a/drivers/ata/pata_pcmcia.c
-+++ b/drivers/ata/pata_pcmcia.c
-@@ -42,7 +42,7 @@
- 
- 
- #define DRV_NAME "pata_pcmcia"
--#define DRV_VERSION "0.3.2"
-+#define DRV_VERSION "0.3.3"
- 
- /*
-  *	Private data structure to glue stuff together
-@@ -86,6 +86,47 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
- 	return ata_do_set_mode(link, r_failed_dev);
- }
- 
-+/**
-+ *	pcmcia_set_mode_8bit	-	PCMCIA specific mode setup
-+ *	@link: link
-+ *	@r_failed_dev: Return pointer for failed device
-+ *
-+ *	For the simple emulated 8bit stuff the less we do the better.
-+ */
++	if (dev) {
++		cancel_delayed_work(&dev->work);
++		flush_scheduled_work();
 +
-+static int pcmcia_set_mode_8bit(struct ata_link *link,
-+				struct ata_device **r_failed_dev)
-+{
-+	return 0;
++		hifn_unregister_rng(dev);
++		hifn_unregister_alg(dev);
++		hifn_reset_dma(dev, 1);
++		hifn_stop_device(dev);
++
++		free_irq(dev->irq, dev->name);
++		tasklet_kill(&dev->tasklet);
++
++		hifn_flush(dev);
++
++		pci_free_consistent(pdev, sizeof(struct hifn_dma),
++				dev->desc_virt, dev->desc_dma);
++		pci_unmap_single(pdev, dev->dst,
++				PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
++				PCI_DMA_FROMDEVICE);
++		free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
++		for (i=0; i<3; ++i)
++			if (dev->bar[i])
++				iounmap(dev->bar[i]);
++
++		kfree(dev);
++	}
++
++	pci_release_regions(pdev);
++	pci_disable_device(pdev);
 +}
 +
-+/**
-+ *	ata_data_xfer_8bit	 -	Transfer data by 8bit PIO
-+ *	@dev: device to target
-+ *	@buf: data buffer
-+ *	@buflen: buffer length
-+ *	@rw: read/write
-+ *
-+ *	Transfer data from/to the device data register by 8 bit PIO.
-+ *
-+ *	LOCKING:
-+ *	Inherited from caller.
-+ */
++static struct pci_device_id hifn_pci_tbl[] = {
++	{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
++	{ 0 }
++};
++MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
 +
-+static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
-+				unsigned char *buf, unsigned int buflen, int rw)
++static struct pci_driver hifn_pci_driver = {
++	.name     = "hifn795x",
++	.id_table = hifn_pci_tbl,
++	.probe    = hifn_probe,
++	.remove   = __devexit_p(hifn_remove),
++};
++
++static int __devinit hifn_init(void)
 +{
-+	struct ata_port *ap = dev->link->ap;
++	unsigned int freq;
++	int err;
 +
-+	if (rw == READ)
-+		ioread8_rep(ap->ioaddr.data_addr, buf, buflen);
-+	else
-+		iowrite8_rep(ap->ioaddr.data_addr, buf, buflen);
++	if (strncmp(hifn_pll_ref, "ext", 3) &&
++	    strncmp(hifn_pll_ref, "pci", 3)) {
++		printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, "
++				"must be pci or ext");
++		return -EINVAL;
++	}
 +
-+	return buflen;
-+}
++	/*
++	 * For the 7955/7956 the reference clock frequency must be in the
++	 * range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz,
++	 * but this chip is currently not supported.
++	 */
++	if (hifn_pll_ref[3] != '\0') {
++		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
++		if (freq < 20 || freq > 100) {
++			printk(KERN_ERR "hifn795x: invalid hifn_pll_ref "
++					"frequency, must be in the range "
++					"of 20-100");
++			return -EINVAL;
++		}
++	}
 +
++	err = pci_register_driver(&hifn_pci_driver);
++	if (err < 0) {
++		dprintk("Failed to register PCI driver for %s device.\n",
++				hifn_pci_driver.name);
++		return -ENODEV;
++	}
 +
- static struct scsi_host_template pcmcia_sht = {
- 	.module			= THIS_MODULE,
- 	.name			= DRV_NAME,
-@@ -129,6 +170,31 @@ static struct ata_port_operations pcmcia_port_ops = {
- 	.port_start	= ata_sff_port_start,
- };
- 
-+static struct ata_port_operations pcmcia_8bit_port_ops = {
-+	.set_mode	= pcmcia_set_mode_8bit,
-+	.tf_load	= ata_tf_load,
-+	.tf_read	= ata_tf_read,
-+	.check_status 	= ata_check_status,
-+	.exec_command	= ata_exec_command,
-+	.dev_select 	= ata_std_dev_select,
++	printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
++			"has been successfully registered.\n");
 +
-+	.freeze		= ata_bmdma_freeze,
-+	.thaw		= ata_bmdma_thaw,
-+	.error_handler	= ata_bmdma_error_handler,
-+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
-+	.cable_detect	= ata_cable_40wire,
++	return 0;
++}
 +
-+	.qc_prep 	= ata_qc_prep,
-+	.qc_issue	= ata_qc_issue_prot,
++static void __devexit hifn_fini(void)
++{
++	pci_unregister_driver(&hifn_pci_driver);
 +
-+	.data_xfer	= ata_data_xfer_8bit,
++	printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
++			"has been successfully unregistered.\n");
++}
 +
-+	.irq_clear	= ata_bmdma_irq_clear,
-+	.irq_on		= ata_irq_on,
++module_init(hifn_init);
++module_exit(hifn_fini);
 +
-+	.port_start	= ata_sff_port_start,
-+};
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Evgeniy Polyakov <johnpol at 2ka.mipt.ru>");
++MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");
+diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
+index 5f7e718..2f3ad3f 100644
+--- a/drivers/crypto/padlock-aes.c
++++ b/drivers/crypto/padlock-aes.c
+@@ -44,6 +44,7 @@
+  */
+ 
+ #include <crypto/algapi.h>
++#include <crypto/aes.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
+@@ -53,9 +54,6 @@
+ #include <asm/byteorder.h>
+ #include "padlock.h"
+ 
+-#define AES_MIN_KEY_SIZE	16	/* in uint8_t units */
+-#define AES_MAX_KEY_SIZE	32	/* ditto */
+-#define AES_BLOCK_SIZE		16	/* ditto */
+ #define AES_EXTENDED_KEY_SIZE	64	/* in uint32_t units */
+ #define AES_EXTENDED_KEY_SIZE_B	(AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
+ 
+@@ -419,6 +417,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ /* ====== Encryption/decryption routines ====== */
+ 
+ /* These are the real call to PadLock. */
++static inline void padlock_reset_key(void)
++{
++	asm volatile ("pushfl; popfl");
++}
 +
- #define CS_CHECK(fn, ret) \
- do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+ static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
+ 				  void *control_word)
+ {
+@@ -439,8 +442,6 @@ static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
+ static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
+ 			     struct cword *cword)
+ {
+-	asm volatile ("pushfl; popfl");
+-
+ 	/* padlock_xcrypt requires at least two blocks of data. */
+ 	if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
+ 		       (PAGE_SIZE - 1)))) {
+@@ -459,7 +460,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
+ 		return;
+ 	}
  
-@@ -153,9 +219,12 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
- 		cistpl_cftable_entry_t dflt;
- 	} *stk = NULL;
- 	cistpl_cftable_entry_t *cfg;
--	int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM;
-+	int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM, p;
- 	unsigned long io_base, ctl_base;
- 	void __iomem *io_addr, *ctl_addr;
-+	int n_ports = 1;
+-	asm volatile ("pushfl; popfl");		/* enforce key reload. */
+ 	asm volatile ("test $1, %%cl;"
+ 		      "je 1f;"
+ 		      "lea -1(%%ecx), %%eax;"
+@@ -476,8 +476,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
+ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+ 				     u8 *iv, void *control_word, u32 count)
+ {
+-	/* Enforce key reload. */
+-	asm volatile ("pushfl; popfl");
+ 	/* rep xcryptcbc */
+ 	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
+ 		      : "+S" (input), "+D" (output), "+a" (iv)
+@@ -488,12 +486,14 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+ 	struct aes_ctx *ctx = aes_ctx(tfm);
++	padlock_reset_key();
+ 	aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
+ }
+ 
+ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+ 	struct aes_ctx *ctx = aes_ctx(tfm);
++	padlock_reset_key();
+ 	aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
+ }
+ 
+@@ -526,6 +526,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
+ 	struct blkcipher_walk walk;
+ 	int err;
+ 
++	padlock_reset_key();
 +
-+	struct ata_port_operations *ops = &pcmcia_port_ops;
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	err = blkcipher_walk_virt(desc, &walk);
  
- 	info = kzalloc(sizeof(*info), GFP_KERNEL);
- 	if (info == NULL)
-@@ -282,27 +351,32 @@ next_entry:
- 	/* FIXME: Could be more ports at base + 0x10 but we only deal with
- 	   one right now */
- 	if (pdev->io.NumPorts1 >= 0x20)
--		printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
-+		n_ports = 2;
+@@ -548,6 +550,8 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
+ 	struct blkcipher_walk walk;
+ 	int err;
  
-+	if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620)
-+		ops = &pcmcia_8bit_port_ops;
- 	/*
- 	 *	Having done the PCMCIA plumbing the ATA side is relatively
- 	 *	sane.
- 	 */
- 	ret = -ENOMEM;
--	host = ata_host_alloc(&pdev->dev, 1);
-+	host = ata_host_alloc(&pdev->dev, n_ports);
- 	if (!host)
- 		goto failed;
--	ap = host->ports[0];
++	padlock_reset_key();
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	err = blkcipher_walk_virt(desc, &walk);
  
--	ap->ops = &pcmcia_port_ops;
--	ap->pio_mask = 1;		/* ISA so PIO 0 cycles */
--	ap->flags |= ATA_FLAG_SLAVE_POSS;
--	ap->ioaddr.cmd_addr = io_addr;
--	ap->ioaddr.altstatus_addr = ctl_addr;
--	ap->ioaddr.ctl_addr = ctl_addr;
--	ata_std_ports(&ap->ioaddr);
-+	for (p = 0; p < n_ports; p++) {
-+		ap = host->ports[p];
+@@ -592,6 +596,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+ 	struct blkcipher_walk walk;
+ 	int err;
  
--	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
-+		ap->ops = ops;
-+		ap->pio_mask = 1;		/* ISA so PIO 0 cycles */
-+		ap->flags |= ATA_FLAG_SLAVE_POSS;
-+		ap->ioaddr.cmd_addr = io_addr + 0x10 * p;
-+		ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p;
-+		ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p;
-+		ata_std_ports(&ap->ioaddr);
++	padlock_reset_key();
 +
-+		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
-+	}
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	err = blkcipher_walk_virt(desc, &walk);
  
- 	/* activate */
- 	ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt,
-@@ -360,6 +434,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
- 	PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
- 	PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
- 	PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),	/* SanDisk CFA */
-+	PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), 	/* TI emulated */
- 	PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000),	/* Toshiba */
- 	PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
- 	PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000),	/* Samsung */
-diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
-index 2622577..028af5d 100644
---- a/drivers/ata/pata_pdc2027x.c
-+++ b/drivers/ata/pata_pdc2027x.c
-@@ -348,7 +348,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
- 	ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
- 			  ATA_ID_PROD_LEN + 1);
- 	/* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
--	if (strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
-+	if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
- 		mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
+@@ -616,6 +622,8 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+ 	struct blkcipher_walk walk;
+ 	int err;
  
- 	return ata_pci_default_filter(adev, mask);
-diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
-index 6c9689b..3ed8667 100644
---- a/drivers/ata/pata_pdc202xx_old.c
-+++ b/drivers/ata/pata_pdc202xx_old.c
-@@ -168,8 +168,7 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
- 	pdc202xx_set_dmamode(ap, qc->dev);
++	padlock_reset_key();
++
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+ 	err = blkcipher_walk_virt(desc, &walk);
  
- 	/* Cases the state machine will not complete correctly without help */
--	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATA_PROT_ATAPI_DMA)
--	{
-+	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATAPI_PROT_DMA) {
- 		len = qc->nbytes / 2;
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index d59b2f4..bcf52df 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -41,12 +41,12 @@
+  * the definition of dma_event_callback in dmaengine.h.
+  *
+  * Each device has a kref, which is initialized to 1 when the device is
+- * registered. A kref_get is done for each class_device registered.  When the
+- * class_device is released, the coresponding kref_put is done in the release
++ * registered. A kref_get is done for each device registered.  When the
++ * device is released, the coresponding kref_put is done in the release
+  * method. Every time one of the device's channels is allocated to a client,
+  * a kref_get occurs.  When the channel is freed, the coresponding kref_put
+  * happens. The device's release function does a completion, so
+- * unregister_device does a remove event, class_device_unregister, a kref_put
++ * unregister_device does a remove event, device_unregister, a kref_put
+  * for the first reference, then waits on the completion for all other
+  * references to finish.
+  *
+@@ -77,9 +77,9 @@ static LIST_HEAD(dma_client_list);
  
- 		if (tf->flags & ATA_TFLAG_WRITE)
-@@ -208,7 +207,7 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
- 	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
+ /* --- sysfs implementation --- */
  
- 	/* Cases the state machine will not complete correctly */
--	if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
-+	if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) {
- 		iowrite32(0, atapi_reg);
- 		iowrite8(ioread8(clock) & ~sel66, clock);
- 	}
-diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
-index a4c0e50..9f308ed 100644
---- a/drivers/ata/pata_qdi.c
-+++ b/drivers/ata/pata_qdi.c
-@@ -124,29 +124,33 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
- 	return ata_qc_issue_prot(qc);
+-static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
++static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+-	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
++	struct dma_chan *chan = to_dma_chan(dev);
+ 	unsigned long count = 0;
+ 	int i;
+ 
+@@ -89,9 +89,10 @@ static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
+ 	return sprintf(buf, "%lu\n", count);
  }
  
--static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
-+static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
-+				  unsigned int buflen, int rw)
+-static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
++static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
++				      char *buf)
  {
--	struct ata_port *ap = adev->link->ap;
--	int slop = buflen & 3;
-+	if (ata_id_has_dword_io(dev->id)) {
-+		struct ata_port *ap = dev->link->ap;
-+		int slop = buflen & 3;
+-	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
++	struct dma_chan *chan = to_dma_chan(dev);
+ 	unsigned long count = 0;
+ 	int i;
  
--	if (ata_id_has_dword_io(adev->id)) {
--		if (write_data)
--			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
--		else
-+		if (rw == READ)
- 			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-+		else
-+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+@@ -101,9 +102,9 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
+ 	return sprintf(buf, "%lu\n", count);
+ }
  
- 		if (unlikely(slop)) {
--			__le32 pad = 0;
--			if (write_data) {
--				memcpy(&pad, buf + buflen - slop, slop);
--				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
--			} else {
-+			u32 pad;
-+			if (rw == READ) {
- 				pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
- 				memcpy(buf + buflen - slop, &pad, slop);
-+			} else {
-+				memcpy(&pad, buf + buflen - slop, slop);
-+				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
- 			}
-+			buflen += 4 - slop;
- 		}
- 	} else
--		ata_data_xfer(adev, buf, buflen, write_data);
-+		buflen = ata_data_xfer(dev, buf, buflen, rw);
-+
-+	return buflen;
+-static ssize_t show_in_use(struct class_device *cd, char *buf)
++static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+-	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
++	struct dma_chan *chan = to_dma_chan(dev);
+ 	int in_use = 0;
+ 
+ 	if (unlikely(chan->slow_ref) &&
+@@ -119,7 +120,7 @@ static ssize_t show_in_use(struct class_device *cd, char *buf)
+ 	return sprintf(buf, "%d\n", in_use);
  }
  
- static struct scsi_host_template qdi_sht = {
-diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
-index ea2ef9f..55055b2 100644
---- a/drivers/ata/pata_scc.c
-+++ b/drivers/ata/pata_scc.c
-@@ -768,45 +768,47 @@ static u8 scc_bmdma_status (struct ata_port *ap)
+-static struct class_device_attribute dma_class_attrs[] = {
++static struct device_attribute dma_attrs[] = {
+ 	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
+ 	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
+ 	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
+@@ -128,16 +129,16 @@ static struct class_device_attribute dma_class_attrs[] = {
  
- /**
-  *	scc_data_xfer - Transfer data by PIO
-- *	@adev: device for this I/O
-+ *	@dev: device for this I/O
-  *	@buf: data buffer
-  *	@buflen: buffer length
-- *	@write_data: read/write
-+ *	@rw: read/write
-  *
-  *	Note: Original code is ata_data_xfer().
-  */
+ static void dma_async_device_cleanup(struct kref *kref);
  
--static void scc_data_xfer (struct ata_device *adev, unsigned char *buf,
--			   unsigned int buflen, int write_data)
-+static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
-+				   unsigned int buflen, int rw)
+-static void dma_class_dev_release(struct class_device *cd)
++static void dma_dev_release(struct device *dev)
  {
--	struct ata_port *ap = adev->link->ap;
-+	struct ata_port *ap = dev->link->ap;
- 	unsigned int words = buflen >> 1;
- 	unsigned int i;
- 	u16 *buf16 = (u16 *) buf;
- 	void __iomem *mmio = ap->ioaddr.data_addr;
+-	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
++	struct dma_chan *chan = to_dma_chan(dev);
+ 	kref_put(&chan->device->refcount, dma_async_device_cleanup);
+ }
  
- 	/* Transfer multiple of 2 bytes */
--	if (write_data) {
--		for (i = 0; i < words; i++)
--			out_be32(mmio, cpu_to_le16(buf16[i]));
--	} else {
-+	if (rw == READ)
- 		for (i = 0; i < words; i++)
- 			buf16[i] = le16_to_cpu(in_be32(mmio));
--	}
-+	else
-+		for (i = 0; i < words; i++)
-+			out_be32(mmio, cpu_to_le16(buf16[i]));
+ static struct class dma_devclass = {
+-	.name            = "dma",
+-	.class_dev_attrs = dma_class_attrs,
+-	.release = dma_class_dev_release,
++	.name		= "dma",
++	.dev_attrs	= dma_attrs,
++	.dev_release	= dma_dev_release,
+ };
  
- 	/* Transfer trailing 1 byte, if any. */
- 	if (unlikely(buflen & 0x01)) {
- 		u16 align_buf[1] = { 0 };
- 		unsigned char *trailing_buf = buf + buflen - 1;
+ /* --- client and device registration --- */
+@@ -377,12 +378,12 @@ int dma_async_device_register(struct dma_device *device)
+ 			continue;
  
--		if (write_data) {
--			memcpy(align_buf, trailing_buf, 1);
--			out_be32(mmio, cpu_to_le16(align_buf[0]));
--		} else {
-+		if (rw == READ) {
- 			align_buf[0] = le16_to_cpu(in_be32(mmio));
- 			memcpy(trailing_buf, align_buf, 1);
-+		} else {
-+			memcpy(align_buf, trailing_buf, 1);
-+			out_be32(mmio, cpu_to_le16(align_buf[0]));
+ 		chan->chan_id = chancnt++;
+-		chan->class_dev.class = &dma_devclass;
+-		chan->class_dev.dev = NULL;
+-		snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
++		chan->dev.class = &dma_devclass;
++		chan->dev.parent = NULL;
++		snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
+ 		         device->dev_id, chan->chan_id);
+ 
+-		rc = class_device_register(&chan->class_dev);
++		rc = device_register(&chan->dev);
+ 		if (rc) {
+ 			chancnt--;
+ 			free_percpu(chan->local);
+@@ -411,7 +412,7 @@ err_out:
+ 		if (chan->local == NULL)
+ 			continue;
+ 		kref_put(&device->refcount, dma_async_device_cleanup);
+-		class_device_unregister(&chan->class_dev);
++		device_unregister(&chan->dev);
+ 		chancnt--;
+ 		free_percpu(chan->local);
+ 	}
+@@ -445,7 +446,7 @@ void dma_async_device_unregister(struct dma_device *device)
+ 
+ 	list_for_each_entry(chan, &device->channels, device_node) {
+ 		dma_clients_notify_removed(chan);
+-		class_device_unregister(&chan->class_dev);
++		device_unregister(&chan->dev);
+ 		dma_chan_release(chan);
+ 	}
+ 
+diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
+index 70b837f..5376457 100644
+--- a/drivers/edac/edac_device_sysfs.c
++++ b/drivers/edac/edac_device_sysfs.c
+@@ -246,16 +246,6 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
+ 
+ 	/* Init the devices's kobject */
+ 	memset(&edac_dev->kobj, 0, sizeof(struct kobject));
+-	edac_dev->kobj.ktype = &ktype_device_ctrl;
+-
+-	/* set this new device under the edac_class kobject */
+-	edac_dev->kobj.parent = &edac_class->kset.kobj;
+-
+-	/* generate sysfs "..../edac/<name>"   */
+-	debugf4("%s() set name of kobject to: %s\n", __func__, edac_dev->name);
+-	err = kobject_set_name(&edac_dev->kobj, "%s", edac_dev->name);
+-	if (err)
+-		goto err_out;
+ 
+ 	/* Record which module 'owns' this control structure
+ 	 * and bump the ref count of the module
+@@ -268,12 +258,15 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
+ 	}
+ 
+ 	/* register */
+-	err = kobject_register(&edac_dev->kobj);
++	err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
++				   &edac_class->kset.kobj,
++				   "%s", edac_dev->name);
+ 	if (err) {
+ 		debugf1("%s()Failed to register '.../edac/%s'\n",
+ 			__func__, edac_dev->name);
+ 		goto err_kobj_reg;
+ 	}
++	kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
+ 
+ 	/* At this point, to 'free' the control struct,
+ 	 * edac_device_unregister_sysfs_main_kobj() must be used
+@@ -310,7 +303,7 @@ void edac_device_unregister_sysfs_main_kobj(
+ 	 *   a) module_put() this module
+ 	 *   b) 'kfree' the memory
+ 	 */
+-	kobject_unregister(&edac_dev->kobj);
++	kobject_put(&edac_dev->kobj);
+ }
+ 
+ /* edac_dev -> instance information */
+@@ -533,12 +526,6 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
+ 
+ 	/* init this block's kobject */
+ 	memset(&block->kobj, 0, sizeof(struct kobject));
+-	block->kobj.parent = &instance->kobj;
+-	block->kobj.ktype = &ktype_block_ctrl;
+-
+-	err = kobject_set_name(&block->kobj, "%s", block->name);
+-	if (err)
+-		return err;
+ 
+ 	/* bump the main kobject's reference count for this controller
+ 	 * and this instance is dependant on the main
+@@ -550,7 +537,9 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
+ 	}
+ 
+ 	/* Add this block's kobject */
+-	err = kobject_register(&block->kobj);
++	err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl,
++				   &instance->kobj,
++				   "%s", block->name);
+ 	if (err) {
+ 		debugf1("%s() Failed to register instance '%s'\n",
+ 			__func__, block->name);
+@@ -579,12 +568,13 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
+ 				goto err_on_attrib;
  		}
-+		words++;
  	}
-+
-+	return words << 1;
++	kobject_uevent(&block->kobj, KOBJ_ADD);
+ 
+ 	return 0;
+ 
+ 	/* Error unwind stack */
+ err_on_attrib:
+-	kobject_unregister(&block->kobj);
++	kobject_put(&block->kobj);
+ 
+ err_out:
+ 	return err;
+@@ -615,7 +605,7 @@ static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
+ 	/* unregister this block's kobject, SEE:
+ 	 *	edac_device_ctrl_block_release() callback operation
+ 	 */
+-	kobject_unregister(&block->kobj);
++	kobject_put(&block->kobj);
  }
  
- /**
-diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
-index 8bed888..9c523fb 100644
---- a/drivers/ata/pata_serverworks.c
-+++ b/drivers/ata/pata_serverworks.c
-@@ -41,7 +41,7 @@
- #include <linux/libata.h>
+ /* instance ctor/dtor code */
+@@ -637,15 +627,8 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+ 	/* Init the instance's kobject */
+ 	memset(&instance->kobj, 0, sizeof(struct kobject));
  
- #define DRV_NAME "pata_serverworks"
--#define DRV_VERSION "0.4.2"
-+#define DRV_VERSION "0.4.3"
+-	/* set this new device under the edac_device main kobject */
+-	instance->kobj.parent = &edac_dev->kobj;
+-	instance->kobj.ktype = &ktype_instance_ctrl;
+ 	instance->ctl = edac_dev;
  
- #define SVWKS_CSB5_REVISION_NEW	0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
- #define SVWKS_CSB6_REVISION	0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
-@@ -102,7 +102,7 @@ static int osb4_cable(struct ata_port *ap) {
+-	err = kobject_set_name(&instance->kobj, "%s", instance->name);
+-	if (err)
+-		goto err_out;
+-
+ 	/* bump the main kobject's reference count for this controller
+ 	 * and this instance is dependant on the main
+ 	 */
+@@ -655,8 +638,9 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+ 		goto err_out;
+ 	}
+ 
+-	/* Formally register this instance's kobject */
+-	err = kobject_register(&instance->kobj);
++	/* Formally register this instance's kobject under the edac_device */
++	err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
++				   &edac_dev->kobj, "%s", instance->name);
+ 	if (err != 0) {
+ 		debugf2("%s() Failed to register instance '%s'\n",
+ 			__func__, instance->name);
+@@ -679,6 +663,7 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+ 			goto err_release_instance_kobj;
+ 		}
+ 	}
++	kobject_uevent(&instance->kobj, KOBJ_ADD);
+ 
+ 	debugf4("%s() Registered instance %d '%s' kobject\n",
+ 		__func__, idx, instance->name);
+@@ -687,7 +672,7 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+ 
+ 	/* error unwind stack */
+ err_release_instance_kobj:
+-	kobject_unregister(&instance->kobj);
++	kobject_put(&instance->kobj);
+ 
+ err_out:
+ 	return err;
+@@ -712,7 +697,7 @@ static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
+ 	/* unregister this instance's kobject, SEE:
+ 	 *	edac_device_ctrl_instance_release() for callback operation
+ 	 */
+-	kobject_unregister(&instance->kobj);
++	kobject_put(&instance->kobj);
  }
  
- /**
-- *	csb4_cable	-	CSB5/6 cable detect
-+ *	csb_cable	-	CSB5/6 cable detect
-  *	@ap: ATA port to check
-  *
-  *	Serverworks default arrangement is to use the drive side detection
-@@ -110,7 +110,7 @@ static int osb4_cable(struct ata_port *ap) {
+ /*
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 3706b2b..9aac880 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -380,13 +380,6 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ 	/* generate ..../edac/mc/mc<id>/csrow<index>   */
+ 	memset(&csrow->kobj, 0, sizeof(csrow->kobj));
+ 	csrow->mci = mci;	/* include container up link */
+-	csrow->kobj.parent = kobj_mci;
+-	csrow->kobj.ktype = &ktype_csrow;
+-
+-	/* name this instance of csrow<id> */
+-	err = kobject_set_name(&csrow->kobj, "csrow%d", index);
+-	if (err)
+-		goto err_out;
+ 
+ 	/* bump the mci instance's kobject's ref count */
+ 	kobj = kobject_get(&mci->edac_mci_kobj);
+@@ -396,12 +389,13 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ 	}
+ 
+ 	/* Instanstiate the csrow object */
+-	err = kobject_register(&csrow->kobj);
++	err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci,
++				   "csrow%d", index);
+ 	if (err)
+ 		goto err_release_top_kobj;
+ 
+ 	/* At this point, to release a csrow kobj, one must
+-	 * call the kobject_unregister and allow that tear down
++	 * call the kobject_put and allow that tear down
+ 	 * to work the releasing
+ 	 */
+ 
+@@ -412,11 +406,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ 		err = edac_create_channel_files(&csrow->kobj, chan);
+ 		if (err) {
+ 			/* special case the unregister here */
+-			kobject_unregister(&csrow->kobj);
++			kobject_put(&csrow->kobj);
+ 			goto err_out;
+ 		}
+ 	}
+-
++	kobject_uevent(&csrow->kobj, KOBJ_ADD);
+ 	return 0;
+ 
+ 	/* error unwind stack */
+@@ -744,7 +738,6 @@ static struct kobj_type ktype_mc_set_attribs = {
   */
+ static struct kset mc_kset = {
+ 	.kobj = {.ktype = &ktype_mc_set_attribs },
+-	.ktype = &ktype_mci,
+ };
  
- static int csb_cable(struct ata_port *ap) {
--	return ATA_CBL_PATA80;
-+	return ATA_CBL_PATA_UNK;
+ 
+@@ -765,14 +758,6 @@ int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
+ 	/* Init the mci's kobject */
+ 	memset(kobj_mci, 0, sizeof(*kobj_mci));
+ 
+-	/* this instance become part of the mc_kset */
+-	kobj_mci->kset = &mc_kset;
+-
+-	/* set the name of the mc<id> object */
+-	err = kobject_set_name(kobj_mci, "mc%d", mci->mc_idx);
+-	if (err)
+-		goto fail_out;
+-
+ 	/* Record which module 'owns' this control structure
+ 	 * and bump the ref count of the module
+ 	 */
+@@ -784,13 +769,18 @@ int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
+ 		goto fail_out;
+ 	}
+ 
++	/* this instance become part of the mc_kset */
++	kobj_mci->kset = &mc_kset;
++
+ 	/* register the mc<id> kobject to the mc_kset */
+-	err = kobject_register(kobj_mci);
++	err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
++				   "mc%d", mci->mc_idx);
+ 	if (err) {
+ 		debugf1("%s()Failed to register '.../edac/mc%d'\n",
+ 			__func__, mci->mc_idx);
+ 		goto kobj_reg_fail;
+ 	}
++	kobject_uevent(kobj_mci, KOBJ_ADD);
+ 
+ 	/* At this point, to 'free' the control struct,
+ 	 * edac_mc_unregister_sysfs_main_kobj() must be used
+@@ -818,7 +808,7 @@ fail_out:
+ void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
+ {
+ 	/* delete the kobj from the mc_kset */
+-	kobject_unregister(&mci->edac_mci_kobj);
++	kobject_put(&mci->edac_mci_kobj);
  }
  
- struct sv_cable_table {
-@@ -231,7 +231,6 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
- 	return ata_pci_default_filter(adev, mask);
+ #define EDAC_DEVICE_SYMLINK	"device"
+@@ -933,7 +923,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+ fail1:
+ 	for (i--; i >= 0; i--) {
+ 		if (csrow->nr_pages > 0) {
+-			kobject_unregister(&mci->csrows[i].kobj);
++			kobject_put(&mci->csrows[i].kobj);
+ 		}
+ 	}
+ 
+@@ -960,7 +950,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+ 	for (i = 0; i < mci->nr_csrows; i++) {
+ 		if (mci->csrows[i].nr_pages > 0) {
+ 			debugf0("%s()  unreg csrow-%d\n", __func__, i);
+-			kobject_unregister(&mci->csrows[i].kobj);
++			kobject_put(&mci->csrows[i].kobj);
+ 		}
+ 	}
+ 
+@@ -977,7 +967,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+ 	debugf0("%s()  unregister this mci kobj\n", __func__);
+ 
+ 	/* unregister this instance's kobject */
+-	kobject_unregister(&mci->edac_mci_kobj);
++	kobject_put(&mci->edac_mci_kobj);
  }
  
+ 
+diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
+index e0c4a40..7e1374a 100644
+--- a/drivers/edac/edac_module.c
++++ b/drivers/edac/edac_module.c
+@@ -31,7 +31,7 @@ struct workqueue_struct *edac_workqueue;
+  *	need to export to other files in this modules
+  */
+ static struct sysdev_class edac_class = {
+-	set_kset_name("edac"),
++	.name = "edac",
+ };
+ static int edac_class_valid;
+ 
+diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
+index 69f5ddd..5b075da 100644
+--- a/drivers/edac/edac_pci_sysfs.c
++++ b/drivers/edac/edac_pci_sysfs.c
+@@ -162,14 +162,6 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+ 
+ 	debugf0("%s()\n", __func__);
+ 
+-	/* Set the parent and the instance's ktype */
+-	pci->kobj.parent = &edac_pci_top_main_kobj;
+-	pci->kobj.ktype = &ktype_pci_instance;
 -
- /**
-  *	serverworks_set_piomode	-	set initial PIO mode data
-  *	@ap: ATA interface
-@@ -243,7 +242,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
- static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
- {
- 	static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
--	int offset = 1 + (2 * ap->port_no) - adev->devno;
-+	int offset = 1 + 2 * ap->port_no - adev->devno;
- 	int devbits = (2 * ap->port_no + adev->devno) * 4;
- 	u16 csb5_pio;
- 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
-index 453d72b..39627ab 100644
---- a/drivers/ata/pata_via.c
-+++ b/drivers/ata/pata_via.c
-@@ -185,7 +185,8 @@ static int via_cable_detect(struct ata_port *ap) {
- 	if (ata66 & (0x10100000 >> (16 * ap->port_no)))
- 		return ATA_CBL_PATA80;
- 	/* Check with ACPI so we can spot BIOS reported SATA bridges */
--	if (ata_acpi_cbl_80wire(ap))
-+	if (ata_acpi_init_gtm(ap) &&
-+	    ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
- 		return ATA_CBL_PATA80;
- 	return ATA_CBL_PATA40;
+-	err = kobject_set_name(&pci->kobj, "pci%d", idx);
+-	if (err)
+-		return err;
+-
+ 	/* First bump the ref count on the top main kobj, which will
+ 	 * track the number of PCI instances we have, and thus nest
+ 	 * properly on keeping the module loaded
+@@ -181,7 +173,8 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+ 	}
+ 
+ 	/* And now register this new kobject under the main kobj */
+-	err = kobject_register(&pci->kobj);
++	err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
++				   &edac_pci_top_main_kobj, "pci%d", idx);
+ 	if (err != 0) {
+ 		debugf2("%s() failed to register instance pci%d\n",
+ 			__func__, idx);
+@@ -189,6 +182,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+ 		goto error_out;
+ 	}
+ 
++	kobject_uevent(&pci->kobj, KOBJ_ADD);
+ 	debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
+ 
+ 	return 0;
+@@ -211,7 +205,7 @@ void edac_pci_unregister_sysfs_instance_kobj(struct edac_pci_ctl_info *pci)
+ 	 * function release the main reference count and then
+ 	 * kfree the memory
+ 	 */
+-	kobject_unregister(&pci->kobj);
++	kobject_put(&pci->kobj);
  }
-diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
-index 7116a9e..99c92ed 100644
---- a/drivers/ata/pata_winbond.c
-+++ b/drivers/ata/pata_winbond.c
-@@ -92,29 +92,33 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ 
+ /***************************** EDAC PCI sysfs root **********************/
+@@ -364,14 +358,6 @@ int edac_pci_main_kobj_setup(void)
+ 		goto decrement_count_fail;
+ 	}
+ 
+-	/* Need the kobject hook ups, and name setting */
+-	edac_pci_top_main_kobj.ktype = &ktype_edac_pci_main_kobj;
+-	edac_pci_top_main_kobj.parent = &edac_class->kset.kobj;
+-
+-	err = kobject_set_name(&edac_pci_top_main_kobj, "pci");
+-	if (err)
+-		goto decrement_count_fail;
+-
+ 	/* Bump the reference count on this module to ensure the
+ 	 * modules isn't unloaded until we deconstruct the top
+ 	 * level main kobj for EDAC PCI
+@@ -383,23 +369,24 @@ int edac_pci_main_kobj_setup(void)
+ 	}
+ 
+ 	/* Instanstiate the pci object */
+-	/* FIXME: maybe new sysdev_create_subdir() */
+-	err = kobject_register(&edac_pci_top_main_kobj);
++	err = kobject_init_and_add(&edac_pci_top_main_kobj, &ktype_edac_pci_main_kobj,
++				   &edac_class->kset.kobj, "pci");
+ 	if (err) {
+ 		debugf1("Failed to register '.../edac/pci'\n");
+-		goto kobject_register_fail;
++		goto kobject_init_and_add_fail;
+ 	}
+ 
+ 	/* At this point, to 'release' the top level kobject
+ 	 * for EDAC PCI, then edac_pci_main_kobj_teardown()
+ 	 * must be used, for resources to be cleaned up properly
+ 	 */
++	kobject_uevent(&edac_pci_top_main_kobj, KOBJ_ADD);
+ 	debugf1("Registered '.../edac/pci' kobject\n");
+ 
+ 	return 0;
+ 
+ 	/* Error unwind statck */
+-kobject_register_fail:
++kobject_init_and_add_fail:
+ 	module_put(THIS_MODULE);
+ 
+ decrement_count_fail:
+@@ -424,9 +411,9 @@ static void edac_pci_main_kobj_teardown(void)
+ 	 * main kobj
+ 	 */
+ 	if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
+-		debugf0("%s() called kobject_unregister on main kobj\n",
++		debugf0("%s() called kobject_put on main kobj\n",
+ 			__func__);
+-		kobject_unregister(&edac_pci_top_main_kobj);
++		kobject_put(&edac_pci_top_main_kobj);
+ 	}
  }
  
+diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
+index 9007d06..9032091 100644
+--- a/drivers/edac/pasemi_edac.c
++++ b/drivers/edac/pasemi_edac.c
+@@ -225,7 +225,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
+ 		EDAC_FLAG_NONE;
+ 	mci->mod_name = MODULE_NAME;
+ 	mci->dev_name = pci_name(pdev);
+-	mci->ctl_name = "pasemi,1682m-mc";
++	mci->ctl_name = "pasemi,pwrficient-mc";
+ 	mci->edac_check = pasemi_edac_check;
+ 	mci->ctl_page_to_phys = NULL;
+ 	pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub);
+@@ -297,4 +297,4 @@ module_exit(pasemi_edac_exit);
  
--static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
-+static unsigned int winbond_data_xfer(struct ata_device *dev,
-+			unsigned char *buf, unsigned int buflen, int rw)
- {
--	struct ata_port *ap = adev->link->ap;
-+	struct ata_port *ap = dev->link->ap;
- 	int slop = buflen & 3;
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Egor Martovetsky <egor at pasemi.com>");
+-MODULE_DESCRIPTION("MC support for PA Semi PA6T-1682M memory controller");
++MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
+diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
+index 60f1a89..7e73cba 100644
+--- a/drivers/firewire/fw-cdev.c
++++ b/drivers/firewire/fw-cdev.c
+@@ -206,12 +206,13 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
+ 
+ 	event->closure	     = client->bus_reset_closure;
+ 	event->type          = FW_CDEV_EVENT_BUS_RESET;
++	event->generation    = client->device->generation;
++	smp_rmb();           /* node_id must not be older than generation */
+ 	event->node_id       = client->device->node_id;
+ 	event->local_node_id = card->local_node->node_id;
+ 	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
+ 	event->irm_node_id   = card->irm_node->node_id;
+ 	event->root_node_id  = card->root_node->node_id;
+-	event->generation    = card->generation;
+ }
  
--	if (ata_id_has_dword_io(adev->id)) {
--		if (write_data)
--			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
--		else
-+	if (ata_id_has_dword_io(dev->id)) {
-+		if (rw == READ)
- 			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-+		else
-+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ static void
+diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
+index 56681b3..de9066e 100644
+--- a/drivers/firewire/fw-device.c
++++ b/drivers/firewire/fw-device.c
+@@ -27,6 +27,7 @@
+ #include <linux/idr.h>
+ #include <linux/rwsem.h>
+ #include <asm/semaphore.h>
++#include <asm/system.h>
+ #include <linux/ctype.h>
+ #include "fw-transaction.h"
+ #include "fw-topology.h"
+@@ -182,9 +183,14 @@ static void fw_device_release(struct device *dev)
  
- 		if (unlikely(slop)) {
--			__le32 pad = 0;
--			if (write_data) {
--				memcpy(&pad, buf + buflen - slop, slop);
--				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
--			} else {
-+			u32 pad;
-+			if (rw == READ) {
- 				pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
- 				memcpy(buf + buflen - slop, &pad, slop);
-+			} else {
-+				memcpy(&pad, buf + buflen - slop, slop);
-+				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
- 			}
-+			buflen += 4 - slop;
- 		}
- 	} else
--		ata_data_xfer(adev, buf, buflen, write_data);
-+		buflen = ata_data_xfer(dev, buf, buflen, rw);
+ int fw_device_enable_phys_dma(struct fw_device *device)
+ {
++	int generation = device->generation;
 +
-+	return buflen;
++	/* device->node_id, accessed below, must not be older than generation */
++	smp_rmb();
++
+ 	return device->card->driver->enable_phys_dma(device->card,
+ 						     device->node_id,
+-						     device->generation);
++						     generation);
  }
+ EXPORT_SYMBOL(fw_device_enable_phys_dma);
  
- static struct scsi_host_template winbond_sht = {
-@@ -191,7 +195,7 @@ static __init int winbond_init_one(unsigned long port)
- 	reg = winbond_readcfg(port, 0x81);
+@@ -384,17 +390,21 @@ complete_transaction(struct fw_card *card, int rcode,
+ 	complete(&callback_data->done);
+ }
  
- 	if (!(reg & 0x03))		/* Disabled */
--		return 0;
-+		return -ENODEV;
+-static int read_rom(struct fw_device *device, int index, u32 * data)
++static int
++read_rom(struct fw_device *device, int generation, int index, u32 *data)
+ {
+ 	struct read_quadlet_callback_data callback_data;
+ 	struct fw_transaction t;
+ 	u64 offset;
  
- 	for (i = 0; i < 2 ; i ++) {
- 		unsigned long cmd_port = 0x1F0 - (0x80 * i);
-diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
-index bd4c2a3..8e1b7e9 100644
---- a/drivers/ata/pdc_adma.c
-+++ b/drivers/ata/pdc_adma.c
-@@ -321,8 +321,9 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
- 	u8  *buf = pp->pkt, *last_buf = NULL;
- 	int i = (2 + buf[3]) * 8;
- 	u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
-+	unsigned int si;
++	/* device->node_id, accessed below, must not be older than generation */
++	smp_rmb();
++
+ 	init_completion(&callback_data.done);
  
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		u32 addr;
- 		u32 len;
+ 	offset = 0xfffff0000400ULL + index * 4;
+ 	fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
+-			device->node_id, device->generation, device->max_speed,
++			device->node_id, generation, device->max_speed,
+ 			offset, NULL, 4, complete_transaction, &callback_data);
  
-@@ -455,7 +456,7 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
- 		adma_packet_start(qc);
- 		return 0;
+ 	wait_for_completion(&callback_data.done);
+@@ -404,7 +414,14 @@ static int read_rom(struct fw_device *device, int index, u32 * data)
+ 	return callback_data.rcode;
+ }
  
--	case ATA_PROT_ATAPI_DMA:
-+	case ATAPI_PROT_DMA:
- 		BUG();
- 		break;
+-static int read_bus_info_block(struct fw_device *device)
++/*
++ * Read the bus info block, perform a speed probe, and read all of the rest of
++ * the config ROM.  We do all this with a cached bus generation.  If the bus
++ * generation changes under us, read_bus_info_block will fail and get retried.
++ * It's better to start all over in this case because the node from which we
++ * are reading the ROM may have changed the ROM during the reset.
++ */
++static int read_bus_info_block(struct fw_device *device, int generation)
+ {
+ 	static u32 rom[256];
+ 	u32 stack[16], sp, key;
+@@ -414,7 +431,7 @@ static int read_bus_info_block(struct fw_device *device)
  
-diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
-index d015b4a..922d7b2 100644
---- a/drivers/ata/sata_fsl.c
-+++ b/drivers/ata/sata_fsl.c
-@@ -333,13 +333,14 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
- 	struct prde *prd_ptr_to_indirect_ext = NULL;
- 	unsigned indirect_ext_segment_sz = 0;
- 	dma_addr_t indirect_ext_segment_paddr;
-+	unsigned int si;
+ 	/* First read the bus info block. */
+ 	for (i = 0; i < 5; i++) {
+-		if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
++		if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+ 			return -1;
+ 		/*
+ 		 * As per IEEE1212 7.2, during power-up, devices can
+@@ -449,7 +466,8 @@ static int read_bus_info_block(struct fw_device *device)
+ 			device->max_speed = device->card->link_speed;
+ 
+ 		while (device->max_speed > SCODE_100) {
+-			if (read_rom(device, 0, &dummy) == RCODE_COMPLETE)
++			if (read_rom(device, generation, 0, &dummy) ==
++			    RCODE_COMPLETE)
+ 				break;
+ 			device->max_speed--;
+ 		}
+@@ -482,7 +500,7 @@ static int read_bus_info_block(struct fw_device *device)
+ 			return -1;
  
- 	VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd);
+ 		/* Read header quadlet for the block to get the length. */
+-		if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
++		if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+ 			return -1;
+ 		end = i + (rom[i] >> 16) + 1;
+ 		i++;
+@@ -501,7 +519,8 @@ static int read_bus_info_block(struct fw_device *device)
+ 		 * it references another block, and push it in that case.
+ 		 */
+ 		while (i < end) {
+-			if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
++			if (read_rom(device, generation, i, &rom[i]) !=
++			    RCODE_COMPLETE)
+ 				return -1;
+ 			if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
+ 			    sp < ARRAY_SIZE(stack))
+@@ -648,7 +667,7 @@ static void fw_device_init(struct work_struct *work)
+ 	 * device.
+ 	 */
+ 
+-	if (read_bus_info_block(device) < 0) {
++	if (read_bus_info_block(device, device->generation) < 0) {
+ 		if (device->config_rom_retries < MAX_RETRIES) {
+ 			device->config_rom_retries++;
+ 			schedule_delayed_work(&device->work, RETRY_DELAY);
+@@ -801,6 +820,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ 
+ 		device = node->data;
+ 		device->node_id = node->node_id;
++		smp_wmb();  /* update node_id before generation */
+ 		device->generation = card->generation;
+ 		if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
+ 			PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
+index 894d4a9..0854fe2 100644
+--- a/drivers/firewire/fw-device.h
++++ b/drivers/firewire/fw-device.h
+@@ -35,6 +35,18 @@ struct fw_attribute_group {
+ 	struct attribute *attrs[11];
+ };
  
- 	indirect_ext_segment_paddr = cmd_desc_paddr +
- 	    SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
++/*
++ * Note, fw_device.generation always has to be read before fw_device.node_id.
++ * Use SMP memory barriers to ensure this.  Otherwise requests will be sent
++ * to an outdated node_id if the generation was updated in the meantime due
++ * to a bus reset.
++ *
++ * Likewise, fw-core will take care to update .node_id before .generation so
++ * that whenever fw_device.generation is current WRT the actual bus generation,
++ * fw_device.node_id is guaranteed to be current too.
++ *
++ * The same applies to fw_device.card->node_id vs. fw_device.generation.
++ */
+ struct fw_device {
+ 	atomic_t state;
+ 	struct fw_node *node;
+diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
+index 436a855..7ebad3c 100644
+--- a/drivers/firewire/fw-ohci.c
++++ b/drivers/firewire/fw-ohci.c
+@@ -98,17 +98,48 @@ struct context;
+ typedef int (*descriptor_callback_t)(struct context *ctx,
+ 				     struct descriptor *d,
+ 				     struct descriptor *last);
++
++/*
++ * A buffer that contains a block of DMA-able coherent memory used for
++ * storing a portion of a DMA descriptor program.
++ */
++struct descriptor_buffer {
++	struct list_head list;
++	dma_addr_t buffer_bus;
++	size_t buffer_size;
++	size_t used;
++	struct descriptor buffer[0];
++};
++
+ struct context {
+ 	struct fw_ohci *ohci;
+ 	u32 regs;
++	int total_allocation;
  
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		dma_addr_t sg_addr = sg_dma_address(sg);
- 		u32 sg_len = sg_dma_len(sg);
+-	struct descriptor *buffer;
+-	dma_addr_t buffer_bus;
+-	size_t buffer_size;
+-	struct descriptor *head_descriptor;
+-	struct descriptor *tail_descriptor;
+-	struct descriptor *tail_descriptor_last;
+-	struct descriptor *prev_descriptor;
++	/*
++	 * List of page-sized buffers for storing DMA descriptors.
++	 * Head of list contains buffers in use and tail of list contains
++	 * free buffers.
++	 */
++	struct list_head buffer_list;
++
++	/*
++	 * Pointer to a buffer inside buffer_list that contains the tail
++	 * end of the current DMA program.
++	 */
++	struct descriptor_buffer *buffer_tail;
++
++	/*
++	 * The descriptor containing the branch address of the first
++	 * descriptor that has not yet been filled by the device.
++	 */
++	struct descriptor *last;
++
++	/*
++	 * The last descriptor in the DMA program.  It contains the branch
++	 * address that must be updated upon appending a new descriptor.
++	 */
++	struct descriptor *prev;
  
-@@ -417,7 +418,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
- 	}
+ 	descriptor_callback_t callback;
  
- 	/* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
--	if (is_atapi_taskfile(&qc->tf)) {
-+	if (ata_is_atapi(qc->tf.protocol)) {
- 		desc_info |= ATAPI_CMD;
- 		memset((void *)&cd->acmd, 0, 32);
- 		memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len);
-diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
-index 323c087..96e614a 100644
---- a/drivers/ata/sata_inic162x.c
-+++ b/drivers/ata/sata_inic162x.c
-@@ -585,7 +585,7 @@ static struct ata_port_operations inic_port_ops = {
+@@ -125,6 +156,7 @@ struct context {
+ struct iso_context {
+ 	struct fw_iso_context base;
+ 	struct context context;
++	int excess_bytes;
+ 	void *header;
+ 	size_t header_length;
  };
+@@ -197,8 +229,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
+ #define SELF_ID_BUF_SIZE		0x800
+ #define OHCI_TCODE_PHY_PACKET		0x0e
+ #define OHCI_VERSION_1_1		0x010010
+-#define ISO_BUFFER_SIZE			(64 * 1024)
+-#define AT_BUFFER_SIZE			4096
  
- static struct ata_port_info inic_port_info = {
--	/* For some reason, ATA_PROT_ATAPI is broken on this
-+	/* For some reason, ATAPI_PROT_PIO is broken on this
- 	 * controller, and no, PIO_POLLING does't fix it.  It somehow
- 	 * manages to report the wrong ireason and ignoring ireason
- 	 * results in machine lock up.  Tell libata to always prefer
-diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
-index 37b850a..7e72463 100644
---- a/drivers/ata/sata_mv.c
-+++ b/drivers/ata/sata_mv.c
-@@ -1136,9 +1136,10 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
- 	struct mv_port_priv *pp = qc->ap->private_data;
- 	struct scatterlist *sg;
- 	struct mv_sg *mv_sg, *last_sg = NULL;
-+	unsigned int si;
- 
- 	mv_sg = pp->sg_tbl;
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		dma_addr_t addr = sg_dma_address(sg);
- 		u32 sg_len = sg_dma_len(sg);
+ static char ohci_driver_name[] = KBUILD_MODNAME;
  
-diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
-index ed5dc7c..a0f98fd 100644
---- a/drivers/ata/sata_nv.c
-+++ b/drivers/ata/sata_nv.c
-@@ -1336,21 +1336,18 @@ static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
- static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
+@@ -455,71 +485,108 @@ find_branch_descriptor(struct descriptor *d, int z)
+ static void context_tasklet(unsigned long data)
  {
- 	struct nv_adma_port_priv *pp = qc->ap->private_data;
--	unsigned int idx;
- 	struct nv_adma_prd *aprd;
- 	struct scatterlist *sg;
-+	unsigned int si;
- 
- 	VPRINTK("ENTER\n");
+ 	struct context *ctx = (struct context *) data;
+-	struct fw_ohci *ohci = ctx->ohci;
+ 	struct descriptor *d, *last;
+ 	u32 address;
+ 	int z;
++	struct descriptor_buffer *desc;
  
--	idx = 0;
+-	dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
+-				ctx->buffer_size, DMA_TO_DEVICE);
 -
--	ata_for_each_sg(sg, qc) {
--		aprd = (idx < 5) ? &cpb->aprd[idx] :
--			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
--		nv_adma_fill_aprd(qc, sg, idx, aprd);
--		idx++;
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
-+		aprd = (si < 5) ? &cpb->aprd[si] :
-+			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
-+		nv_adma_fill_aprd(qc, sg, si, aprd);
+-	d    = ctx->tail_descriptor;
+-	last = ctx->tail_descriptor_last;
+-
++	desc = list_entry(ctx->buffer_list.next,
++			struct descriptor_buffer, list);
++	last = ctx->last;
+ 	while (last->branch_address != 0) {
++		struct descriptor_buffer *old_desc = desc;
+ 		address = le32_to_cpu(last->branch_address);
+ 		z = address & 0xf;
+-		d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
++		address &= ~0xf;
++
++		/* If the branch address points to a buffer outside of the
++		 * current buffer, advance to the next buffer. */
++		if (address < desc->buffer_bus ||
++				address >= desc->buffer_bus + desc->used)
++			desc = list_entry(desc->list.next,
++					struct descriptor_buffer, list);
++		d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
+ 		last = find_branch_descriptor(d, z);
+ 
+ 		if (!ctx->callback(ctx, d, last))
+ 			break;
+ 
+-		ctx->tail_descriptor      = d;
+-		ctx->tail_descriptor_last = last;
++		if (old_desc != desc) {
++			/* If we've advanced to the next buffer, move the
++			 * previous buffer to the free list. */
++			unsigned long flags;
++			old_desc->used = 0;
++			spin_lock_irqsave(&ctx->ohci->lock, flags);
++			list_move_tail(&old_desc->list, &ctx->buffer_list);
++			spin_unlock_irqrestore(&ctx->ohci->lock, flags);
++		}
++		ctx->last = last;
  	}
--	if (idx > 5)
-+	if (si > 5)
- 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
- 	else
- 		cpb->next_aprd = cpu_to_le64(0);
-@@ -1995,17 +1992,14 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
- {
- 	struct ata_port *ap = qc->ap;
- 	struct scatterlist *sg;
--	unsigned int idx;
- 	struct nv_swncq_port_priv *pp = ap->private_data;
- 	struct ata_prd *prd;
+ }
+ 
++/*
++ * Allocate a new buffer and add it to the list of free buffers for this
++ * context.  Must be called with ohci->lock held.
++ */
++static int
++context_add_buffer(struct context *ctx)
++{
++	struct descriptor_buffer *desc;
++	dma_addr_t bus_addr;
++	int offset;
++
++	/*
++	 * 16MB of descriptors should be far more than enough for any DMA
++	 * program.  This will catch run-away userspace or DoS attacks.
++	 */
++	if (ctx->total_allocation >= 16*1024*1024)
++		return -ENOMEM;
++
++	desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
++			&bus_addr, GFP_ATOMIC);
++	if (!desc)
++		return -ENOMEM;
++
++	offset = (void *)&desc->buffer - (void *)desc;
++	desc->buffer_size = PAGE_SIZE - offset;
++	desc->buffer_bus = bus_addr + offset;
++	desc->used = 0;
++
++	list_add_tail(&desc->list, &ctx->buffer_list);
++	ctx->total_allocation += PAGE_SIZE;
++
++	return 0;
++}
++
+ static int
+ context_init(struct context *ctx, struct fw_ohci *ohci,
+-	     size_t buffer_size, u32 regs,
+-	     descriptor_callback_t callback)
++	     u32 regs, descriptor_callback_t callback)
+ {
+ 	ctx->ohci = ohci;
+ 	ctx->regs = regs;
+-	ctx->buffer_size = buffer_size;
+-	ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
+-	if (ctx->buffer == NULL)
++	ctx->total_allocation = 0;
++
++	INIT_LIST_HEAD(&ctx->buffer_list);
++	if (context_add_buffer(ctx) < 0)
+ 		return -ENOMEM;
+ 
++	ctx->buffer_tail = list_entry(ctx->buffer_list.next,
++			struct descriptor_buffer, list);
++
+ 	tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
+ 	ctx->callback = callback;
+ 
+-	ctx->buffer_bus =
+-		dma_map_single(ohci->card.device, ctx->buffer,
+-			       buffer_size, DMA_TO_DEVICE);
+-	if (dma_mapping_error(ctx->buffer_bus)) {
+-		kfree(ctx->buffer);
+-		return -ENOMEM;
+-	}
 -
--	WARN_ON(qc->__sg == NULL);
--	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
-+	unsigned int si, idx;
+-	ctx->head_descriptor      = ctx->buffer;
+-	ctx->prev_descriptor      = ctx->buffer;
+-	ctx->tail_descriptor      = ctx->buffer;
+-	ctx->tail_descriptor_last = ctx->buffer;
+-
+ 	/*
+ 	 * We put a dummy descriptor in the buffer that has a NULL
+ 	 * branch address and looks like it's been sent.  That way we
+-	 * have a descriptor to append DMA programs to.  Also, the
+-	 * ring buffer invariant is that it always has at least one
+-	 * element so that head == tail means buffer full.
++	 * have a descriptor to append DMA programs to.
+ 	 */
+-
+-	memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
+-	ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
+-	ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
+-	ctx->head_descriptor++;
++	memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
++	ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
++	ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
++	ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
++	ctx->last = ctx->buffer_tail->buffer;
++	ctx->prev = ctx->buffer_tail->buffer;
+ 
+ 	return 0;
+ }
+@@ -528,35 +595,42 @@ static void
+ context_release(struct context *ctx)
+ {
+ 	struct fw_card *card = &ctx->ohci->card;
++	struct descriptor_buffer *desc, *tmp;
+ 
+-	dma_unmap_single(card->device, ctx->buffer_bus,
+-			 ctx->buffer_size, DMA_TO_DEVICE);
+-	kfree(ctx->buffer);
++	list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
++		dma_free_coherent(card->device, PAGE_SIZE, desc,
++			desc->buffer_bus -
++			((void *)&desc->buffer - (void *)desc));
+ }
+ 
++/* Must be called with ohci->lock held */
+ static struct descriptor *
+ context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
+ {
+-	struct descriptor *d, *tail, *end;
+-
+-	d = ctx->head_descriptor;
+-	tail = ctx->tail_descriptor;
+-	end = ctx->buffer + ctx->buffer_size / sizeof(*d);
+-
+-	if (d + z <= tail) {
+-		goto has_space;
+-	} else if (d > tail && d + z <= end) {
+-		goto has_space;
+-	} else if (d > tail && ctx->buffer + z <= tail) {
+-		d = ctx->buffer;
+-		goto has_space;
+-	}
++	struct descriptor *d = NULL;
++	struct descriptor_buffer *desc = ctx->buffer_tail;
++
++	if (z * sizeof(*d) > desc->buffer_size)
++		return NULL;
  
- 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
+-	return NULL;
++	if (z * sizeof(*d) > desc->buffer_size - desc->used) {
++		/* No room for the descriptor in this buffer, so advance to the
++		 * next one. */
++
++		if (desc->list.next == &ctx->buffer_list) {
++			/* If there is no free buffer next in the list,
++			 * allocate one. */
++			if (context_add_buffer(ctx) < 0)
++				return NULL;
++		}
++		desc = list_entry(desc->list.next,
++				struct descriptor_buffer, list);
++		ctx->buffer_tail = desc;
++	}
+ 
+- has_space:
++	d = desc->buffer + desc->used / sizeof(*d);
+ 	memset(d, 0, z * sizeof(*d));
+-	*d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
++	*d_bus = desc->buffer_bus + desc->used;
+ 
+ 	return d;
+ }
+@@ -566,7 +640,7 @@ static void context_run(struct context *ctx, u32 extra)
+ 	struct fw_ohci *ohci = ctx->ohci;
+ 
+ 	reg_write(ohci, COMMAND_PTR(ctx->regs),
+-		  le32_to_cpu(ctx->tail_descriptor_last->branch_address));
++		  le32_to_cpu(ctx->last->branch_address));
+ 	reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
+ 	reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
+ 	flush_writes(ohci);
+@@ -576,15 +650,13 @@ static void context_append(struct context *ctx,
+ 			   struct descriptor *d, int z, int extra)
+ {
+ 	dma_addr_t d_bus;
++	struct descriptor_buffer *desc = ctx->buffer_tail;
+ 
+-	d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
++	d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
+ 
+-	ctx->head_descriptor = d + z + extra;
+-	ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
+-	ctx->prev_descriptor = find_branch_descriptor(d, z);
+-
+-	dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
+-				   ctx->buffer_size, DMA_TO_DEVICE);
++	desc->used += (z + extra) * sizeof(*d);
++	ctx->prev->branch_address = cpu_to_le32(d_bus | z);
++	ctx->prev = find_branch_descriptor(d, z);
+ 
+ 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+ 	flush_writes(ctx->ohci);
+@@ -1078,6 +1150,13 @@ static irqreturn_t irq_handler(int irq, void *data)
+ 	if (unlikely(event & OHCI1394_postedWriteErr))
+ 		fw_error("PCI posted write error\n");
  
- 	idx = 0;
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		u32 addr, offset;
- 		u32 sg_len, len;
++	if (unlikely(event & OHCI1394_cycleTooLong)) {
++		if (printk_ratelimit())
++			fw_notify("isochronous cycle too long\n");
++		reg_write(ohci, OHCI1394_LinkControlSet,
++			  OHCI1394_LinkControl_cycleMaster);
++	}
++
+ 	if (event & OHCI1394_cycle64Seconds) {
+ 		cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ 		if ((cycle_time & 0x80000000) == 0)
+@@ -1151,8 +1230,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
+ 		  OHCI1394_RQPkt | OHCI1394_RSPkt |
+ 		  OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
+ 		  OHCI1394_isochRx | OHCI1394_isochTx |
+-		  OHCI1394_postedWriteErr | OHCI1394_cycle64Seconds |
+-		  OHCI1394_masterIntEnable);
++		  OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
++		  OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
+ 
+ 	/* Activate link_on bit and contender bit in our self ID packets.*/
+ 	if (ohci_update_phy_reg(card, 4, 0,
+@@ -1408,9 +1487,13 @@ static int handle_ir_dualbuffer_packet(struct context *context,
+ 	void *p, *end;
+ 	int i;
  
-@@ -2027,8 +2021,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
- 		}
+-	if (db->first_res_count > 0 && db->second_res_count > 0)
+-		/* This descriptor isn't done yet, stop iteration. */
+-		return 0;
++	if (db->first_res_count > 0 && db->second_res_count > 0) {
++		if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
++			/* This descriptor isn't done yet, stop iteration. */
++			return 0;
++		}
++		ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
++	}
+ 
+ 	header_length = le16_to_cpu(db->first_req_count) -
+ 		le16_to_cpu(db->first_res_count);
+@@ -1429,11 +1512,15 @@ static int handle_ir_dualbuffer_packet(struct context *context,
+ 		*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+ 		memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
+ 		i += ctx->base.header_size;
++		ctx->excess_bytes +=
++			(le32_to_cpu(*(u32 *)(p + 4)) >> 16) & 0xffff;
+ 		p += ctx->base.header_size + 4;
+ 	}
+-
+ 	ctx->header_length = i;
+ 
++	ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
++		le16_to_cpu(db->second_res_count);
++
+ 	if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
+ 		ir_header = (__le32 *) (db + 1);
+ 		ctx->base.callback(&ctx->base,
+@@ -1452,24 +1539,24 @@ static int handle_ir_packet_per_buffer(struct context *context,
+ {
+ 	struct iso_context *ctx =
+ 		container_of(context, struct iso_context, context);
+-	struct descriptor *pd = d + 1;
++	struct descriptor *pd;
+ 	__le32 *ir_header;
+-	size_t header_length;
+-	void *p, *end;
+-	int i, z;
++	void *p;
++	int i;
+ 
+-	if (pd->res_count == pd->req_count)
++	for (pd = d; pd <= last; pd++) {
++		if (pd->transfer_status)
++			break;
++	}
++	if (pd > last)
+ 		/* Descriptor(s) not done yet, stop iteration */
+ 		return 0;
+ 
+-	header_length = le16_to_cpu(d->req_count);
+-
+ 	i   = ctx->header_length;
+-	z   = le32_to_cpu(pd->branch_address) & 0xf;
+-	p   = d + z;
+-	end = p + header_length;
++	p   = last + 1;
+ 
+-	while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
++	if (ctx->base.header_size > 0 &&
++			i + ctx->base.header_size <= PAGE_SIZE) {
+ 		/*
+ 		 * The iso header is byteswapped to little endian by
+ 		 * the controller, but the remaining header quadlets
+@@ -1478,14 +1565,11 @@ static int handle_ir_packet_per_buffer(struct context *context,
+ 		 */
+ 		*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+ 		memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
+-		i += ctx->base.header_size;
+-		p += ctx->base.header_size + 4;
++		ctx->header_length += ctx->base.header_size;
+ 	}
+ 
+-	ctx->header_length = i;
+-
+-	if (le16_to_cpu(pd->control) & DESCRIPTOR_IRQ_ALWAYS) {
+-		ir_header = (__le32 *) (d + z);
++	if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
++		ir_header = (__le32 *) p;
+ 		ctx->base.callback(&ctx->base,
+ 				   le32_to_cpu(ir_header[0]) & 0xffff,
+ 				   ctx->header_length, ctx->header,
+@@ -1493,7 +1577,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
+ 		ctx->header_length = 0;
  	}
  
--	if (idx)
--		prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-+	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+-
+ 	return 1;
  }
  
- static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
-diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
-index 7914def..a07d319 100644
---- a/drivers/ata/sata_promise.c
-+++ b/drivers/ata/sata_promise.c
-@@ -450,19 +450,19 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
- 	struct pdc_port_priv *pp = ap->private_data;
- 	u8 *buf = pp->pkt;
- 	u32 *buf32 = (u32 *) buf;
--	unsigned int dev_sel, feature, nbytes;
-+	unsigned int dev_sel, feature;
+@@ -1559,8 +1642,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
+ 	if (ctx->header == NULL)
+ 		goto out;
  
- 	/* set control bits (byte 0), zero delay seq id (byte 3),
- 	 * and seq id (byte 2)
+-	retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
+-			      regs, callback);
++	retval = context_init(&ctx->context, ohci, regs, callback);
+ 	if (retval < 0)
+ 		goto out_with_header;
+ 
+@@ -1775,19 +1857,6 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+ 	 * packet, retransmit or terminate..
  	 */
- 	switch (qc->tf.protocol) {
--	case ATA_PROT_ATAPI_DMA:
-+	case ATAPI_PROT_DMA:
- 		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
- 			buf32[0] = cpu_to_le32(PDC_PKT_READ);
+ 
+-	if (packet->skip) {
+-		d = context_get_descriptors(&ctx->context, 2, &d_bus);
+-		if (d == NULL)
+-			return -ENOMEM;
+-
+-		db = (struct db_descriptor *) d;
+-		db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+-					  DESCRIPTOR_BRANCH_ALWAYS |
+-					  DESCRIPTOR_WAIT);
+-		db->first_size = cpu_to_le16(ctx->base.header_size + 4);
+-		context_append(&ctx->context, d, 2, 0);
+-	}
+-
+ 	p = packet;
+ 	z = 2;
+ 
+@@ -1815,11 +1884,18 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+ 		db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+ 					  DESCRIPTOR_BRANCH_ALWAYS);
+ 		db->first_size = cpu_to_le16(ctx->base.header_size + 4);
+-		db->first_req_count = cpu_to_le16(header_size);
++		if (p->skip && rest == p->payload_length) {
++			db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
++			db->first_req_count = db->first_size;
++		} else {
++			db->first_req_count = cpu_to_le16(header_size);
++		}
+ 		db->first_res_count = db->first_req_count;
+ 		db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
+ 
+-		if (offset + rest < PAGE_SIZE)
++		if (p->skip && rest == p->payload_length)
++			length = 4;
++		else if (offset + rest < PAGE_SIZE)
+ 			length = rest;
  		else
- 			buf32[0] = 0;
- 		break;
--	case ATA_PROT_ATAPI_NODATA:
-+	case ATAPI_PROT_NODATA:
- 		buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
- 		break;
- 	default:
-@@ -473,45 +473,37 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
- 	buf32[2] = 0;				/* no next-packet */
+ 			length = PAGE_SIZE - offset;
+@@ -1835,7 +1911,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+ 		context_append(&ctx->context, d, z, header_z);
+ 		offset = (offset + length) & ~PAGE_MASK;
+ 		rest -= length;
+-		page++;
++		if (offset == 0)
++			page++;
+ 	}
  
- 	/* select drive */
--	if (sata_scr_valid(&ap->link)) {
-+	if (sata_scr_valid(&ap->link))
- 		dev_sel = PDC_DEVICE_SATA;
--	} else {
--		dev_sel = ATA_DEVICE_OBS;
--		if (qc->dev->devno != 0)
--			dev_sel |= ATA_DEV1;
--	}
-+	else
-+		dev_sel = qc->tf.device;
+ 	return 0;
+@@ -1849,67 +1926,70 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
+ {
+ 	struct iso_context *ctx = container_of(base, struct iso_context, base);
+ 	struct descriptor *d = NULL, *pd = NULL;
+-	struct fw_iso_packet *p;
++	struct fw_iso_packet *p = packet;
+ 	dma_addr_t d_bus, page_bus;
+ 	u32 z, header_z, rest;
+-	int i, page, offset, packet_count, header_size;
+-
+-	if (packet->skip) {
+-		d = context_get_descriptors(&ctx->context, 1, &d_bus);
+-		if (d == NULL)
+-			return -ENOMEM;
+-
+-		d->control = cpu_to_le16(DESCRIPTOR_STATUS |
+-					 DESCRIPTOR_INPUT_LAST |
+-					 DESCRIPTOR_BRANCH_ALWAYS |
+-					 DESCRIPTOR_WAIT);
+-		context_append(&ctx->context, d, 1, 0);
+-	}
+-
+-	/* one descriptor for header, one for payload */
+-	/* FIXME: handle cases where we need multiple desc. for payload */
+-	z = 2;
+-	p = packet;
++	int i, j, length;
++	int page, offset, packet_count, header_size, payload_per_buffer;
+ 
+ 	/*
+ 	 * The OHCI controller puts the status word in the
+ 	 * buffer too, so we need 4 extra bytes per packet.
+ 	 */
+ 	packet_count = p->header_length / ctx->base.header_size;
+-	header_size  = packet_count * (ctx->base.header_size + 4);
++	header_size  = ctx->base.header_size + 4;
+ 
+ 	/* Get header size in number of descriptors. */
+ 	header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+ 	page     = payload >> PAGE_SHIFT;
+ 	offset   = payload & ~PAGE_MASK;
+-	rest     = p->payload_length;
++	payload_per_buffer = p->payload_length / packet_count;
+ 
+ 	for (i = 0; i < packet_count; i++) {
+ 		/* d points to the header descriptor */
++		z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
+ 		d = context_get_descriptors(&ctx->context,
+-					    z + header_z, &d_bus);
++				z + header_z, &d_bus);
+ 		if (d == NULL)
+ 			return -ENOMEM;
+ 
+-		d->control      = cpu_to_le16(DESCRIPTOR_INPUT_MORE);
++		d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
++					      DESCRIPTOR_INPUT_MORE);
++		if (p->skip && i == 0)
++			d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+ 		d->req_count    = cpu_to_le16(header_size);
+ 		d->res_count    = d->req_count;
++		d->transfer_status = 0;
+ 		d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
+ 
+-		/* pd points to the payload descriptor */
+-		pd = d + 1;
++		rest = payload_per_buffer;
++		for (j = 1; j < z; j++) {
++			pd = d + j;
++			pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
++						  DESCRIPTOR_INPUT_MORE);
 +
- 	buf[12] = (1 << 5) | ATA_REG_DEVICE;
- 	buf[13] = dev_sel;
- 	buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
- 	buf[15] = dev_sel; /* once more, waiting for BSY to clear */
++			if (offset + rest < PAGE_SIZE)
++				length = rest;
++			else
++				length = PAGE_SIZE - offset;
++			pd->req_count = cpu_to_le16(length);
++			pd->res_count = pd->req_count;
++			pd->transfer_status = 0;
++
++			page_bus = page_private(buffer->pages[page]);
++			pd->data_address = cpu_to_le32(page_bus + offset);
++
++			offset = (offset + length) & ~PAGE_MASK;
++			rest -= length;
++			if (offset == 0)
++				page++;
++		}
+ 		pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
+ 					  DESCRIPTOR_INPUT_LAST |
+ 					  DESCRIPTOR_BRANCH_ALWAYS);
+-		if (p->interrupt)
++		if (p->interrupt && i == packet_count - 1)
+ 			pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
  
- 	buf[16] = (1 << 5) | ATA_REG_NSECT;
--	buf[17] = 0x00;
-+	buf[17] = qc->tf.nsect;
- 	buf[18] = (1 << 5) | ATA_REG_LBAL;
--	buf[19] = 0x00;
-+	buf[19] = qc->tf.lbal;
+-		pd->req_count = cpu_to_le16(rest);
+-		pd->res_count = pd->req_count;
+-
+-		page_bus = page_private(buffer->pages[page]);
+-		pd->data_address = cpu_to_le32(page_bus + offset);
+-
+ 		context_append(&ctx->context, d, z, header_z);
+ 	}
  
- 	/* set feature and byte counter registers */
--	if (qc->tf.protocol != ATA_PROT_ATAPI_DMA) {
-+	if (qc->tf.protocol != ATAPI_PROT_DMA)
- 		feature = PDC_FEATURE_ATAPI_PIO;
--		/* set byte counter register to real transfer byte count */
--		nbytes = qc->nbytes;
--		if (nbytes > 0xffff)
--			nbytes = 0xffff;
--	} else {
-+	else
- 		feature = PDC_FEATURE_ATAPI_DMA;
--		/* set byte counter register to 0 */
--		nbytes = 0;
--	}
+@@ -1923,16 +2003,22 @@ ohci_queue_iso(struct fw_iso_context *base,
+ 	       unsigned long payload)
+ {
+ 	struct iso_context *ctx = container_of(base, struct iso_context, base);
++	unsigned long flags;
++	int retval;
+ 
++	spin_lock_irqsave(&ctx->context.ohci->lock, flags);
+ 	if (base->type == FW_ISO_CONTEXT_TRANSMIT)
+-		return ohci_queue_iso_transmit(base, packet, buffer, payload);
++		retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
+ 	else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
+-		return ohci_queue_iso_receive_dualbuffer(base, packet,
++		retval = ohci_queue_iso_receive_dualbuffer(base, packet,
+ 							 buffer, payload);
+ 	else
+-		return ohci_queue_iso_receive_packet_per_buffer(base, packet,
++		retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
+ 								buffer,
+ 								payload);
++	spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
 +
- 	buf[20] = (1 << 5) | ATA_REG_FEATURE;
- 	buf[21] = feature;
- 	buf[22] = (1 << 5) | ATA_REG_BYTEL;
--	buf[23] = nbytes & 0xFF;
-+	buf[23] = qc->tf.lbam;
- 	buf[24] = (1 << 5) | ATA_REG_BYTEH;
--	buf[25] = (nbytes >> 8) & 0xFF;
-+	buf[25] = qc->tf.lbah;
++	return retval;
+ }
  
- 	/* send ATAPI packet command 0xA0 */
- 	buf[26] = (1 << 5) | ATA_REG_CMD;
--	buf[27] = ATA_CMD_PACKET;
-+	buf[27] = qc->tf.command;
+ static const struct fw_card_driver ohci_driver = {
+@@ -2004,10 +2090,10 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+ 	ar_context_init(&ohci->ar_response_ctx, ohci,
+ 			OHCI1394_AsRspRcvContextControlSet);
+ 
+-	context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
++	context_init(&ohci->at_request_ctx, ohci,
+ 		     OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+ 
+-	context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
++	context_init(&ohci->at_response_ctx, ohci,
+ 		     OHCI1394_AsRspTrContextControlSet, handle_at_packet);
  
- 	/* select drive and check DRQ */
- 	buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
-@@ -541,17 +533,15 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
- {
- 	struct ata_port *ap = qc->ap;
- 	struct scatterlist *sg;
--	unsigned int idx;
- 	const u32 SG_COUNT_ASIC_BUG = 41*4;
-+	unsigned int si, idx;
-+	u32 len;
+ 	reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
+diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
+index 624ff3e..19ece9b 100644
+--- a/drivers/firewire/fw-sbp2.c
++++ b/drivers/firewire/fw-sbp2.c
+@@ -40,6 +40,7 @@
+ #include <linux/stringify.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
++#include <asm/system.h>
  
- 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- 		return;
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -148,18 +149,26 @@ struct sbp2_target {
  
--	WARN_ON(qc->__sg == NULL);
--	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
+ 	unsigned workarounds;
+ 	struct list_head lu_list;
++
++	unsigned int mgt_orb_timeout;
+ };
+ 
+-#define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
+-#define SBP2_MAX_SECTORS		255	/* Max sectors supported */
++/*
++ * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
++ * provided in the config rom. Most devices do provide a value, which
++ * we'll use for login management orbs, but with some sane limits.
++ */
++#define SBP2_MIN_LOGIN_ORB_TIMEOUT	5000U	/* Timeout in ms */
++#define SBP2_MAX_LOGIN_ORB_TIMEOUT	40000U	/* Timeout in ms */
+ #define SBP2_ORB_TIMEOUT		2000	/* Timeout in ms */
 -
- 	idx = 0;
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		u32 addr, offset;
- 		u32 sg_len, len;
+ #define SBP2_ORB_NULL			0x80000000
++#define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
  
-@@ -578,29 +568,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
- 		}
- 	}
+ #define SBP2_DIRECTION_TO_MEDIA		0x0
+ #define SBP2_DIRECTION_FROM_MEDIA	0x1
  
--	if (idx) {
--		u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
-+	len = le32_to_cpu(ap->prd[idx - 1].flags_len);
+ /* Unit directory keys */
++#define SBP2_CSR_UNIT_CHARACTERISTICS	0x3a
+ #define SBP2_CSR_FIRMWARE_REVISION	0x3c
+ #define SBP2_CSR_LOGICAL_UNIT_NUMBER	0x14
+ #define SBP2_CSR_LOGICAL_UNIT_DIRECTORY	0xd4
+@@ -489,6 +498,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+ {
+ 	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ 	struct sbp2_management_orb *orb;
++	unsigned int timeout;
+ 	int retval = -ENOMEM;
  
--		if (len > SG_COUNT_ASIC_BUG) {
--			u32 addr;
-+	if (len > SG_COUNT_ASIC_BUG) {
-+		u32 addr;
+ 	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+@@ -516,9 +526,13 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+ 	orb->request.status_fifo.low  = lu->address_handler.offset;
+ 
+ 	if (function == SBP2_LOGIN_REQUEST) {
++		/* Ask for 2^2 == 4 seconds reconnect grace period */
+ 		orb->request.misc |=
+-			MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login) |
+-			MANAGEMENT_ORB_RECONNECT(0);
++			MANAGEMENT_ORB_RECONNECT(2) |
++			MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login);
++		timeout = lu->tgt->mgt_orb_timeout;
++	} else {
++		timeout = SBP2_ORB_TIMEOUT;
+ 	}
+ 
+ 	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
+@@ -535,8 +549,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+ 	sbp2_send_orb(&orb->base, lu, node_id, generation,
+ 		      lu->tgt->management_agent_address);
+ 
+-	wait_for_completion_timeout(&orb->done,
+-				    msecs_to_jiffies(SBP2_ORB_TIMEOUT));
++	wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
  
--			VPRINTK("Splitting last PRD.\n");
-+		VPRINTK("Splitting last PRD.\n");
+ 	retval = -EIO;
+ 	if (sbp2_cancel_orbs(lu) == 0) {
+@@ -608,13 +621,17 @@ static void sbp2_release_target(struct kref *kref)
+ 	struct sbp2_logical_unit *lu, *next;
+ 	struct Scsi_Host *shost =
+ 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
++	struct fw_device *device = fw_device(tgt->unit->device.parent);
  
--			addr = le32_to_cpu(ap->prd[idx - 1].addr);
--			ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
--			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
-+		addr = le32_to_cpu(ap->prd[idx - 1].addr);
-+		ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
-+		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
+ 	list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+ 		if (lu->sdev)
+ 			scsi_remove_device(lu->sdev);
  
--			addr = addr + len - SG_COUNT_ASIC_BUG;
--			len = SG_COUNT_ASIC_BUG;
--			ap->prd[idx].addr = cpu_to_le32(addr);
--			ap->prd[idx].flags_len = cpu_to_le32(len);
--			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
-+		addr = addr + len - SG_COUNT_ASIC_BUG;
-+		len = SG_COUNT_ASIC_BUG;
-+		ap->prd[idx].addr = cpu_to_le32(addr);
-+		ap->prd[idx].flags_len = cpu_to_le32(len);
-+		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+-		sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
+-				SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
++		if (!fw_device_is_shutdown(device))
++			sbp2_send_management_orb(lu, tgt->node_id,
++					lu->generation, SBP2_LOGOUT_REQUEST,
++					lu->login_id, NULL);
++
+ 		fw_core_remove_address_handler(&lu->address_handler);
+ 		list_del(&lu->link);
+ 		kfree(lu);
+@@ -628,6 +645,21 @@ static void sbp2_release_target(struct kref *kref)
  
--			idx++;
--		}
--
--		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-+		idx++;
- 	}
+ static struct workqueue_struct *sbp2_wq;
+ 
++/*
++ * Always get the target's kref when scheduling work on one its units.
++ * Each workqueue job is responsible to call sbp2_target_put() upon return.
++ */
++static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
++{
++	if (queue_delayed_work(sbp2_wq, &lu->work, delay))
++		kref_get(&lu->tgt->kref);
++}
 +
-+	ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
- }
++static void sbp2_target_put(struct sbp2_target *tgt)
++{
++	kref_put(&tgt->kref, sbp2_release_target);
++}
++
+ static void sbp2_reconnect(struct work_struct *work);
  
- static void pdc_qc_prep(struct ata_queued_cmd *qc)
-@@ -627,14 +615,14 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
- 		pdc_pkt_footer(&qc->tf, pp->pkt, i);
- 		break;
+ static void sbp2_login(struct work_struct *work)
+@@ -643,22 +675,19 @@ static void sbp2_login(struct work_struct *work)
+ 	struct sbp2_login_response response;
+ 	int generation, node_id, local_node_id;
  
--	case ATA_PROT_ATAPI:
-+	case ATAPI_PROT_PIO:
- 		pdc_fill_sg(qc);
- 		break;
+-	generation    = device->card->generation;
+-	node_id       = device->node->node_id;
+-	local_node_id = device->card->local_node->node_id;
++	generation    = device->generation;
++	smp_rmb();    /* node_id must not be older than generation */
++	node_id       = device->node_id;
++	local_node_id = device->card->node_id;
  
--	case ATA_PROT_ATAPI_DMA:
-+	case ATAPI_PROT_DMA:
- 		pdc_fill_sg(qc);
- 		/*FALLTHROUGH*/
--	case ATA_PROT_ATAPI_NODATA:
-+	case ATAPI_PROT_NODATA:
- 		pdc_atapi_pkt(qc);
- 		break;
+ 	if (sbp2_send_management_orb(lu, node_id, generation,
+ 				SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
+-		if (lu->retries++ < 5) {
+-			if (queue_delayed_work(sbp2_wq, &lu->work,
+-					       DIV_ROUND_UP(HZ, 5)))
+-				kref_get(&lu->tgt->kref);
+-		} else {
++		if (lu->retries++ < 5)
++			sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
++		else
+ 			fw_error("failed to login to %s LUN %04x\n",
+ 				 unit->device.bus_id, lu->lun);
+-		}
+-		kref_put(&lu->tgt->kref, sbp2_release_target);
+-		return;
++		goto out;
+ 	}
  
-@@ -754,8 +742,8 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
- 	switch (qc->tf.protocol) {
- 	case ATA_PROT_DMA:
- 	case ATA_PROT_NODATA:
--	case ATA_PROT_ATAPI_DMA:
--	case ATA_PROT_ATAPI_NODATA:
-+	case ATAPI_PROT_DMA:
-+	case ATAPI_PROT_NODATA:
- 		qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
- 		ata_qc_complete(qc);
- 		handled = 1;
-@@ -900,7 +888,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
- static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
+ 	lu->generation        = generation;
+@@ -700,7 +729,8 @@ static void sbp2_login(struct work_struct *work)
+ 		lu->sdev = sdev;
+ 		scsi_device_put(sdev);
+ 	}
+-	kref_put(&lu->tgt->kref, sbp2_release_target);
++ out:
++	sbp2_target_put(lu->tgt);
+ }
+ 
+ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+@@ -750,6 +780,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
  {
- 	switch (qc->tf.protocol) {
--	case ATA_PROT_ATAPI_NODATA:
-+	case ATAPI_PROT_NODATA:
- 		if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
- 			break;
- 		/*FALLTHROUGH*/
-@@ -908,7 +896,7 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
- 		if (qc->tf.flags & ATA_TFLAG_POLLING)
+ 	struct fw_csr_iterator ci;
+ 	int key, value;
++	unsigned int timeout;
+ 
+ 	fw_csr_iterator_init(&ci, directory);
+ 	while (fw_csr_iterator_next(&ci, &key, &value)) {
+@@ -772,6 +803,21 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
+ 			*firmware_revision = value;
  			break;
- 		/*FALLTHROUGH*/
--	case ATA_PROT_ATAPI_DMA:
-+	case ATAPI_PROT_DMA:
- 	case ATA_PROT_DMA:
- 		pdc_packet_start(qc);
- 		return 0;
-@@ -922,16 +910,14 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
  
- static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
- {
--	WARN_ON(tf->protocol == ATA_PROT_DMA ||
--		tf->protocol == ATA_PROT_ATAPI_DMA);
-+	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
- 	ata_tf_load(ap, tf);
- }
++		case SBP2_CSR_UNIT_CHARACTERISTICS:
++			/* the timeout value is stored in 500ms units */
++			timeout = ((unsigned int) value >> 8 & 0xff) * 500;
++			timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
++			tgt->mgt_orb_timeout =
++				  min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
++
++			if (timeout > tgt->mgt_orb_timeout)
++				fw_notify("%s: config rom contains %ds "
++					  "management ORB timeout, limiting "
++					  "to %ds\n", tgt->unit->device.bus_id,
++					  timeout / 1000,
++					  tgt->mgt_orb_timeout / 1000);
++			break;
++
+ 		case SBP2_CSR_LOGICAL_UNIT_NUMBER:
+ 			if (sbp2_add_logical_unit(tgt, value) < 0)
+ 				return -ENOMEM;
+@@ -865,18 +911,13 @@ static int sbp2_probe(struct device *dev)
  
- static void pdc_exec_command_mmio(struct ata_port *ap,
- 				  const struct ata_taskfile *tf)
- {
--	WARN_ON(tf->protocol == ATA_PROT_DMA ||
--		tf->protocol == ATA_PROT_ATAPI_DMA);
-+	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
- 	ata_exec_command(ap, tf);
+ 	get_device(&unit->device);
+ 
+-	/*
+-	 * We schedule work to do the login so we can easily
+-	 * reschedule retries. Always get the ref before scheduling
+-	 * work.
+-	 */
++	/* Do the login in a workqueue so we can easily reschedule retries. */
+ 	list_for_each_entry(lu, &tgt->lu_list, link)
+-		if (queue_delayed_work(sbp2_wq, &lu->work, 0))
+-			kref_get(&tgt->kref);
++		sbp2_queue_work(lu, 0);
+ 	return 0;
+ 
+  fail_tgt_put:
+-	kref_put(&tgt->kref, sbp2_release_target);
++	sbp2_target_put(tgt);
+ 	return -ENOMEM;
+ 
+  fail_shost_put:
+@@ -889,7 +930,7 @@ static int sbp2_remove(struct device *dev)
+ 	struct fw_unit *unit = fw_unit(dev);
+ 	struct sbp2_target *tgt = unit->device.driver_data;
+ 
+-	kref_put(&tgt->kref, sbp2_release_target);
++	sbp2_target_put(tgt);
+ 	return 0;
  }
  
-diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
-index 6ee5e19..00d6000 100644
---- a/drivers/ata/sata_promise.h
-+++ b/drivers/ata/sata_promise.h
-@@ -46,7 +46,7 @@ static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
- 					  unsigned int devno, u8 *buf)
- {
- 	u8 dev_reg;
--	u32 *buf32 = (u32 *) buf;
-+	__le32 *buf32 = (__le32 *) buf;
+@@ -901,9 +942,10 @@ static void sbp2_reconnect(struct work_struct *work)
+ 	struct fw_device *device = fw_device(unit->device.parent);
+ 	int generation, node_id, local_node_id;
+ 
+-	generation    = device->card->generation;
+-	node_id       = device->node->node_id;
+-	local_node_id = device->card->local_node->node_id;
++	generation    = device->generation;
++	smp_rmb();    /* node_id must not be older than generation */
++	node_id       = device->node_id;
++	local_node_id = device->card->node_id;
+ 
+ 	if (sbp2_send_management_orb(lu, node_id, generation,
+ 				     SBP2_RECONNECT_REQUEST,
+@@ -915,10 +957,8 @@ static void sbp2_reconnect(struct work_struct *work)
+ 			lu->retries = 0;
+ 			PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ 		}
+-		if (queue_delayed_work(sbp2_wq, &lu->work, DIV_ROUND_UP(HZ, 5)))
+-			kref_get(&lu->tgt->kref);
+-		kref_put(&lu->tgt->kref, sbp2_release_target);
+-		return;
++		sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
++		goto out;
+ 	}
  
- 	/* set control bits (byte 0), zero delay seq id (byte 3),
- 	 * and seq id (byte 2)
-diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
-index c68b241..91cc12c 100644
---- a/drivers/ata/sata_qstor.c
-+++ b/drivers/ata/sata_qstor.c
-@@ -287,14 +287,10 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
- 	struct scatterlist *sg;
- 	struct ata_port *ap = qc->ap;
- 	struct qs_port_priv *pp = ap->private_data;
--	unsigned int nelem;
- 	u8 *prd = pp->pkt + QS_CPB_BYTES;
-+	unsigned int si;
+ 	lu->generation        = generation;
+@@ -930,8 +970,8 @@ static void sbp2_reconnect(struct work_struct *work)
  
--	WARN_ON(qc->__sg == NULL);
--	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
+ 	sbp2_agent_reset(lu);
+ 	sbp2_cancel_orbs(lu);
 -
--	nelem = 0;
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		u64 addr;
- 		u32 len;
+-	kref_put(&lu->tgt->kref, sbp2_release_target);
++ out:
++	sbp2_target_put(lu->tgt);
+ }
  
-@@ -306,12 +302,11 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
- 		*(__le32 *)prd = cpu_to_le32(len);
- 		prd += sizeof(u64);
+ static void sbp2_update(struct fw_unit *unit)
+@@ -947,8 +987,7 @@ static void sbp2_update(struct fw_unit *unit)
+ 	 */
+ 	list_for_each_entry(lu, &tgt->lu_list, link) {
+ 		lu->retries = 0;
+-		if (queue_delayed_work(sbp2_wq, &lu->work, 0))
+-			kref_get(&tgt->kref);
++		sbp2_queue_work(lu, 0);
+ 	}
+ }
  
--		VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
-+		VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
- 					(unsigned long long)addr, len);
--		nelem++;
+@@ -1103,9 +1142,9 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
+ 	 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
+ 	 * during DMA mapping, and Linux currently doesn't prevent this.
+ 	 */
+-	for (i = 0, j = 0; i < count; i++) {
+-		sg_len = sg_dma_len(sg + i);
+-		sg_addr = sg_dma_address(sg + i);
++	for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) {
++		sg_len = sg_dma_len(sg);
++		sg_addr = sg_dma_address(sg);
+ 		while (sg_len) {
+ 			/* FIXME: This won't get us out of the pinch. */
+ 			if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
+@@ -1238,6 +1277,12 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+ 
+ 	sdev->allow_restart = 1;
+ 
++	/*
++	 * Update the dma alignment (minimum alignment requirements for
++	 * start and end of DMA transfers) to be a sector
++	 */
++	blk_queue_update_dma_alignment(sdev->request_queue, 511);
++
+ 	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
+ 		sdev->inquiry_len = 36;
+ 
+diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
+index 0fc9b00..172c186 100644
+--- a/drivers/firewire/fw-topology.c
++++ b/drivers/firewire/fw-topology.c
+@@ -21,6 +21,7 @@
+ #include <linux/module.h>
+ #include <linux/wait.h>
+ #include <linux/errno.h>
++#include <asm/system.h>
+ #include "fw-transaction.h"
+ #include "fw-topology.h"
+ 
+@@ -518,6 +519,11 @@ fw_core_handle_bus_reset(struct fw_card *card,
+ 		card->bm_retries = 0;
+ 
+ 	card->node_id = node_id;
++	/*
++	 * Update node_id before generation to prevent anybody from using
++	 * a stale node_id together with a current generation.
++	 */
++	smp_wmb();
+ 	card->generation = generation;
+ 	card->reset_jiffies = jiffies;
+ 	schedule_delayed_work(&card->work, 0);
+diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
+index c00d4a9..7fcc59d 100644
+--- a/drivers/firewire/fw-transaction.c
++++ b/drivers/firewire/fw-transaction.c
+@@ -153,7 +153,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
+ 	int ext_tcode;
+ 
+ 	if (tcode > 0x10) {
+-		ext_tcode = tcode - 0x10;
++		ext_tcode = tcode & ~0x10;
+ 		tcode = TCODE_LOCK_REQUEST;
+ 	} else
+ 		ext_tcode = 0;
+@@ -650,7 +650,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+ 		 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
+ 	tcode       = HEADER_GET_TCODE(p->header[0]);
+ 	destination = HEADER_GET_DESTINATION(p->header[0]);
+-	source      = HEADER_GET_SOURCE(p->header[0]);
++	source      = HEADER_GET_SOURCE(p->header[1]);
+ 
+ 	spin_lock_irqsave(&address_handler_lock, flags);
+ 	handler = lookup_enclosing_address_handler(&address_handler_list,
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 5e596a7..9008ed5 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -8,6 +8,8 @@
+ #include <linux/slab.h>
+ #include <asm/dmi.h>
+ 
++static char dmi_empty_string[] = "        ";
++
+ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
+ {
+ 	const u8 *bp = ((u8 *) dm) + dm->length;
+@@ -21,11 +23,16 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
+ 		}
+ 
+ 		if (*bp != 0) {
+-			str = dmi_alloc(strlen(bp) + 1);
++			size_t len = strlen(bp)+1;
++			size_t cmp_len = len > 8 ? 8 : len;
++
++			if (!memcmp(bp, dmi_empty_string, cmp_len))
++				return dmi_empty_string;
++			str = dmi_alloc(len);
+ 			if (str != NULL)
+ 				strcpy(str, bp);
+ 			else
+-				printk(KERN_ERR "dmi_string: out of memory.\n");
++				printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
+ 		}
  	}
  
--	return nelem;
-+	return si;
+@@ -175,12 +182,23 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
+ 	}
  }
  
- static void qs_qc_prep(struct ata_queued_cmd *qc)
-@@ -376,7 +371,7 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
- 		qs_packet_start(qc);
- 		return 0;
++static struct dmi_device empty_oem_string_dev = {
++	.name = dmi_empty_string,
++};
++
+ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
+ {
+ 	int i, count = *(u8 *)(dm + 1);
+ 	struct dmi_device *dev;
  
--	case ATA_PROT_ATAPI_DMA:
-+	case ATAPI_PROT_DMA:
- 		BUG();
- 		break;
+ 	for (i = 1; i <= count; i++) {
++		char *devname = dmi_string(dm, i);
++
++		if (!strcmp(devname, dmi_empty_string)) {
++			list_add(&empty_oem_string_dev.list, &dmi_devices);
++			continue;
++		}
++
+ 		dev = dmi_alloc(sizeof(*dev));
+ 		if (!dev) {
+ 			printk(KERN_ERR
+@@ -189,7 +207,7 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
+ 		}
  
-diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
-index f5119bf..0b8191b 100644
---- a/drivers/ata/sata_sil.c
-+++ b/drivers/ata/sata_sil.c
-@@ -416,15 +416,14 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
- 		 */
+ 		dev->type = DMI_DEV_TYPE_OEM_STRING;
+-		dev->name = dmi_string(dm, i);
++		dev->name = devname;
+ 		dev->device_data = NULL;
  
- 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
--		 * The flag was turned on only for atapi devices.
--		 * No need to check is_atapi_taskfile(&qc->tf) again.
-+		 * The flag was turned on only for atapi devices.  No
-+		 * need to check ata_is_atapi(qc->tf.protocol) again.
- 		 */
- 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- 			goto err_hsm;
- 		break;
- 	case HSM_ST_LAST:
--		if (qc->tf.protocol == ATA_PROT_DMA ||
--		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
-+		if (ata_is_dma(qc->tf.protocol)) {
- 			/* clear DMA-Start bit */
- 			ap->ops->bmdma_stop(qc);
+ 		list_add(&dev->list, &dmi_devices);
+@@ -331,9 +349,11 @@ void __init dmi_scan_machine(void)
+ 			rc = dmi_present(q);
+ 			if (!rc) {
+ 				dmi_available = 1;
++				dmi_iounmap(p, 0x10000);
+ 				return;
+ 			}
+ 		}
++		dmi_iounmap(p, 0x10000);
+ 	}
+  out:	printk(KERN_INFO "DMI not present or invalid.\n");
+ }
+diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
+index 6942e06..d168223 100644
+--- a/drivers/firmware/edd.c
++++ b/drivers/firmware/edd.c
+@@ -631,7 +631,7 @@ static struct kobj_type edd_ktype = {
+ 	.default_attrs	= def_attrs,
+ };
  
-@@ -451,8 +450,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
- 	/* kick HSM in the ass */
- 	ata_hsm_move(ap, qc, status, 0);
+-static decl_subsys(edd, &edd_ktype, NULL);
++static struct kset *edd_kset;
  
--	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
--				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
-+	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
- 		ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
  
- 	return;
-diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
-index 864c1c1..b4b1f91 100644
---- a/drivers/ata/sata_sil24.c
-+++ b/drivers/ata/sata_sil24.c
-@@ -813,8 +813,9 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
+ /**
+@@ -693,7 +693,7 @@ edd_create_symlink_to_pcidev(struct edd_device *edev)
+ static inline void
+ edd_device_unregister(struct edd_device *edev)
  {
- 	struct scatterlist *sg;
- 	struct sil24_sge *last_sge = NULL;
-+	unsigned int si;
+-	kobject_unregister(&edev->kobj);
++	kobject_put(&edev->kobj);
+ }
+ 
+ static void edd_populate_dir(struct edd_device * edev)
+@@ -721,12 +721,13 @@ edd_device_register(struct edd_device *edev, int i)
+ 	if (!edev)
+ 		return 1;
+ 	edd_dev_set_info(edev, i);
+-	kobject_set_name(&edev->kobj, "int13_dev%02x",
+-			 0x80 + i);
+-	kobj_set_kset_s(edev,edd_subsys);
+-	error = kobject_register(&edev->kobj);
+-	if (!error)
++	edev->kobj.kset = edd_kset;
++	error = kobject_init_and_add(&edev->kobj, &edd_ktype, NULL,
++				     "int13_dev%02x", 0x80 + i);
++	if (!error) {
+ 		edd_populate_dir(edev);
++		kobject_uevent(&edev->kobj, KOBJ_ADD);
++	}
+ 	return error;
+ }
  
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		sge->addr = cpu_to_le64(sg_dma_address(sg));
- 		sge->cnt = cpu_to_le32(sg_dma_len(sg));
- 		sge->flags = 0;
-@@ -823,8 +824,7 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
- 		sge++;
+@@ -755,9 +756,9 @@ edd_init(void)
+ 		return 1;
  	}
  
--	if (likely(last_sge))
--		last_sge->flags = cpu_to_le32(SGE_TRM);
-+	last_sge->flags = cpu_to_le32(SGE_TRM);
+-	rc = firmware_register(&edd_subsys);
+-	if (rc)
+-		return rc;
++	edd_kset = kset_create_and_add("edd", NULL, firmware_kobj);
++	if (!edd_kset)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < edd_num_devices() && !rc; i++) {
+ 		edev = kzalloc(sizeof (*edev), GFP_KERNEL);
+@@ -773,7 +774,7 @@ edd_init(void)
+ 	}
+ 
+ 	if (rc)
+-		firmware_unregister(&edd_subsys);
++		kset_unregister(edd_kset);
+ 	return rc;
  }
  
- static int sil24_qc_defer(struct ata_queued_cmd *qc)
-@@ -852,9 +852,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
- 	 *   set.
- 	 *
-  	 */
--	int is_excl = (prot == ATA_PROT_ATAPI ||
--		       prot == ATA_PROT_ATAPI_NODATA ||
--		       prot == ATA_PROT_ATAPI_DMA ||
-+	int is_excl = (ata_is_atapi(prot) ||
- 		       (qc->flags & ATA_QCFLAG_RESULT_TF));
+@@ -787,7 +788,7 @@ edd_exit(void)
+ 		if ((edev = edd_devices[i]))
+ 			edd_device_unregister(edev);
+ 	}
+-	firmware_unregister(&edd_subsys);
++	kset_unregister(edd_kset);
+ }
  
- 	if (unlikely(ap->excl_link)) {
-@@ -885,35 +883,21 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
+ late_initcall(edd_init);
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 858a7b9..f4f709d 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -129,13 +129,6 @@ struct efivar_attribute {
+ };
  
- 	cb = &pp->cmd_block[sil24_tag(qc->tag)];
  
--	switch (qc->tf.protocol) {
--	case ATA_PROT_PIO:
--	case ATA_PROT_DMA:
--	case ATA_PROT_NCQ:
--	case ATA_PROT_NODATA:
-+	if (!ata_is_atapi(qc->tf.protocol)) {
- 		prb = &cb->ata.prb;
- 		sge = cb->ata.sge;
--		break;
+-#define EFI_ATTR(_name, _mode, _show, _store) \
+-struct subsys_attribute efi_attr_##_name = { \
+-	.attr = {.name = __stringify(_name), .mode = _mode}, \
+-	.show = _show, \
+-	.store = _store, \
+-};
 -
--	case ATA_PROT_ATAPI:
--	case ATA_PROT_ATAPI_DMA:
--	case ATA_PROT_ATAPI_NODATA:
-+	} else {
- 		prb = &cb->atapi.prb;
- 		sge = cb->atapi.sge;
- 		memset(cb->atapi.cdb, 0, 32);
- 		memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
+ #define EFIVAR_ATTR(_name, _mode, _show, _store) \
+ struct efivar_attribute efivar_attr_##_name = { \
+ 	.attr = {.name = __stringify(_name), .mode = _mode}, \
+@@ -143,13 +136,6 @@ struct efivar_attribute efivar_attr_##_name = { \
+ 	.store = _store, \
+ };
  
--		if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
-+		if (ata_is_data(qc->tf.protocol)) {
- 			if (qc->tf.flags & ATA_TFLAG_WRITE)
- 				ctrl = PRB_CTRL_PACKET_WRITE;
- 			else
- 				ctrl = PRB_CTRL_PACKET_READ;
- 		}
--		break;
+-#define VAR_SUBSYS_ATTR(_name, _mode, _show, _store) \
+-struct subsys_attribute var_subsys_attr_##_name = { \
+-	.attr = {.name = __stringify(_name), .mode = _mode}, \
+-	.show = _show, \
+-	.store = _store, \
+-};
 -
--	default:
--		prb = NULL;	/* shut up, gcc */
--		sge = NULL;
--		BUG();
- 	}
+ #define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
+ #define to_efivar_entry(obj)  container_of(obj, struct efivar_entry, kobj)
  
- 	prb->ctrl = cpu_to_le16(ctrl);
-diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
-index 4d85718..e3d56bc 100644
---- a/drivers/ata/sata_sx4.c
-+++ b/drivers/ata/sata_sx4.c
-@@ -334,7 +334,7 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
+@@ -408,21 +394,16 @@ static struct kobj_type efivar_ktype = {
+ 	.default_attrs = def_attrs,
+ };
+ 
+-static ssize_t
+-dummy(struct kset *kset, char *buf)
+-{
+-	return -ENODEV;
+-}
+-
+ static inline void
+ efivar_unregister(struct efivar_entry *var)
  {
- 	u32 addr;
- 	unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
--	u32 *buf32 = (u32 *) buf;
-+	__le32 *buf32 = (__le32 *) buf;
+-	kobject_unregister(&var->kobj);
++	kobject_put(&var->kobj);
+ }
  
- 	/* output ATA packet S/G table */
- 	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
-@@ -356,7 +356,7 @@ static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
+ 
+-static ssize_t
+-efivar_create(struct kset *kset, const char *buf, size_t count)
++static ssize_t efivar_create(struct kobject *kobj,
++			     struct bin_attribute *bin_attr,
++			     char *buf, loff_t pos, size_t count)
  {
- 	u32 addr;
- 	unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
--	u32 *buf32 = (u32 *) buf;
-+	__le32 *buf32 = (__le32 *) buf;
+ 	struct efi_variable *new_var = (struct efi_variable *)buf;
+ 	struct efivar_entry *search_efivar, *n;
+@@ -479,8 +460,9 @@ efivar_create(struct kset *kset, const char *buf, size_t count)
+ 	return count;
+ }
  
- 	/* output Host DMA packet S/G table */
- 	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
-@@ -377,7 +377,7 @@ static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
- 					    unsigned int portno)
+-static ssize_t
+-efivar_delete(struct kset *kset, const char *buf, size_t count)
++static ssize_t efivar_delete(struct kobject *kobj,
++			     struct bin_attribute *bin_attr,
++			     char *buf, loff_t pos, size_t count)
  {
- 	unsigned int i, dw;
--	u32 *buf32 = (u32 *) buf;
-+	__le32 *buf32 = (__le32 *) buf;
- 	u8 dev_reg;
+ 	struct efi_variable *del_var = (struct efi_variable *)buf;
+ 	struct efivar_entry *search_efivar, *n;
+@@ -537,25 +519,26 @@ efivar_delete(struct kset *kset, const char *buf, size_t count)
+ 	return count;
+ }
  
- 	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
-@@ -429,7 +429,8 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
- 				     unsigned int portno)
+-static VAR_SUBSYS_ATTR(new_var, 0200, dummy, efivar_create);
+-static VAR_SUBSYS_ATTR(del_var, 0200, dummy, efivar_delete);
++static struct bin_attribute var_subsys_attr_new_var = {
++	.attr = {.name = "new_var", .mode = 0200},
++	.write = efivar_create,
++};
+ 
+-static struct subsys_attribute *var_subsys_attrs[] = {
+-	&var_subsys_attr_new_var,
+-	&var_subsys_attr_del_var,
+-	NULL,
++static struct bin_attribute var_subsys_attr_del_var = {
++	.attr = {.name = "del_var", .mode = 0200},
++	.write = efivar_delete,
+ };
+ 
+ /*
+  * Let's not leave out systab information that snuck into
+  * the efivars driver
+  */
+-static ssize_t
+-systab_read(struct kset *kset, char *buf)
++static ssize_t systab_show(struct kobject *kobj,
++			   struct kobj_attribute *attr, char *buf)
  {
- 	unsigned int dw;
--	u32 tmp, *buf32 = (u32 *) buf;
-+	u32 tmp;
-+	__le32 *buf32 = (__le32 *) buf;
+ 	char *str = buf;
  
- 	unsigned int host_sg = PDC_20621_DIMM_BASE +
- 			       (PDC_DIMM_WINDOW_STEP * portno) +
-@@ -473,7 +474,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
- 	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
- 	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
- 	unsigned int portno = ap->port_no;
--	unsigned int i, idx, total_len = 0, sgt_len;
-+	unsigned int i, si, idx, total_len = 0, sgt_len;
- 	u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
+-	if (!kset || !buf)
++	if (!kobj || !buf)
+ 		return -EINVAL;
  
- 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
-@@ -487,7 +488,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
- 	 * Build S/G table
- 	 */
- 	idx = 0;
--	ata_for_each_sg(sg, qc) {
-+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
- 		buf[idx++] = cpu_to_le32(sg_dma_address(sg));
- 		buf[idx++] = cpu_to_le32(sg_dma_len(sg));
- 		total_len += sg_dma_len(sg);
-@@ -700,7 +701,7 @@ static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
- 		pdc20621_packet_start(qc);
- 		return 0;
+ 	if (efi.mps != EFI_INVALID_TABLE_ADDR)
+@@ -576,15 +559,21 @@ systab_read(struct kset *kset, char *buf)
+ 	return str - buf;
+ }
  
--	case ATA_PROT_ATAPI_DMA:
-+	case ATAPI_PROT_DMA:
- 		BUG();
- 		break;
+-static EFI_ATTR(systab, 0400, systab_read, NULL);
++static struct kobj_attribute efi_attr_systab =
++			__ATTR(systab, 0400, systab_show, NULL);
  
-diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
-index b34b382..7b44a59 100644
---- a/drivers/atm/ambassador.c
-+++ b/drivers/atm/ambassador.c
-@@ -2163,7 +2163,6 @@ static int __devinit amb_init (amb_dev * dev)
- static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev) 
- {
-       unsigned char pool;
--      memset (dev, 0, sizeof(amb_dev));
-       
-       // set up known dev items straight away
-       dev->pci_dev = pci_dev; 
-@@ -2253,7 +2252,7 @@ static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_
- 		goto out_disable;
- 	}
+-static struct subsys_attribute *efi_subsys_attrs[] = {
+-	&efi_attr_systab,
++static struct attribute *efi_subsys_attrs[] = {
++	&efi_attr_systab.attr,
+ 	NULL,	/* maybe more in the future? */
+ };
+ 
+-static decl_subsys(vars, &efivar_ktype, NULL);
+-static decl_subsys(efi, NULL, NULL);
++static struct attribute_group efi_subsys_attr_group = {
++	.attrs = efi_subsys_attrs,
++};
++
++
++static struct kset *vars_kset;
++static struct kobject *efi_kobj;
  
--	dev = kmalloc (sizeof(amb_dev), GFP_KERNEL);
-+	dev = kzalloc(sizeof(amb_dev), GFP_KERNEL);
- 	if (!dev) {
- 		PRINTK (KERN_ERR, "out of memory!");
- 		err = -ENOMEM;
-diff --git a/drivers/atm/he.c b/drivers/atm/he.c
-index 3b64a99..2e3395b 100644
---- a/drivers/atm/he.c
-+++ b/drivers/atm/he.c
-@@ -1,5 +1,3 @@
--/* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
--
  /*
+  * efivar_create_sysfs_entry()
+@@ -628,15 +617,16 @@ efivar_create_sysfs_entry(unsigned long variable_name_size,
+ 	*(short_name + strlen(short_name)) = '-';
+ 	efi_guid_unparse(vendor_guid, short_name + strlen(short_name));
  
-   he.c
-@@ -99,10 +97,6 @@
- #define HPRINTK(fmt,args...)	do { } while (0)
- #endif /* HE_DEBUG */
+-	kobject_set_name(&new_efivar->kobj, "%s", short_name);
+-	kobj_set_kset_s(new_efivar, vars_subsys);
+-	i = kobject_register(&new_efivar->kobj);
++	new_efivar->kobj.kset = vars_kset;
++	i = kobject_init_and_add(&new_efivar->kobj, &efivar_ktype, NULL,
++				 "%s", short_name);
+ 	if (i) {
+ 		kfree(short_name);
+ 		kfree(new_efivar);
+ 		return 1;
+ 	}
  
--/* version definition */
++	kobject_uevent(&new_efivar->kobj, KOBJ_ADD);
+ 	kfree(short_name);
+ 	short_name = NULL;
+ 
+@@ -660,9 +650,8 @@ efivars_init(void)
+ 	efi_status_t status = EFI_NOT_FOUND;
+ 	efi_guid_t vendor_guid;
+ 	efi_char16_t *variable_name;
+-	struct subsys_attribute *attr;
+ 	unsigned long variable_name_size = 1024;
+-	int i, error = 0;
++	int error = 0;
+ 
+ 	if (!efi_enabled)
+ 		return -ENODEV;
+@@ -676,23 +665,18 @@ efivars_init(void)
+ 	printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
+ 	       EFIVARS_DATE);
+ 
+-	/*
+-	 * For now we'll register the efi subsys within this driver
+-	 */
 -
--static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
+-	error = firmware_register(&efi_subsys);
 -
- /* declarations */
+-	if (error) {
+-		printk(KERN_ERR "efivars: Firmware registration failed with error %d.\n", error);
++	/* For now we'll register the efi directory at /sys/firmware/efi */
++	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
++	if (!efi_kobj) {
++		printk(KERN_ERR "efivars: Firmware registration failed.\n");
++		error = -ENOMEM;
+ 		goto out_free;
+ 	}
  
- static int he_open(struct atm_vcc *vcc);
-@@ -366,7 +360,7 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
- 	struct he_dev *he_dev = NULL;
- 	int err = 0;
+-	kobj_set_kset_s(&vars_subsys, efi_subsys);
+-
+-	error = subsystem_register(&vars_subsys);
+-
+-	if (error) {
+-		printk(KERN_ERR "efivars: Subsystem registration failed with error %d.\n", error);
++	vars_kset = kset_create_and_add("vars", NULL, efi_kobj);
++	if (!vars_kset) {
++		printk(KERN_ERR "efivars: Subsystem registration failed.\n");
++		error = -ENOMEM;
+ 		goto out_firmware_unregister;
+ 	}
  
--	printk(KERN_INFO "he: %s\n", version);
-+	printk(KERN_INFO "ATM he driver\n");
+@@ -727,28 +711,28 @@ efivars_init(void)
+ 	 * Now add attributes to allow creation of new vars
+ 	 * and deletion of existing ones...
+ 	 */
+-
+-	for (i = 0; (attr = var_subsys_attrs[i]) && !error; i++) {
+-		if (attr->show && attr->store)
+-			error = subsys_create_file(&vars_subsys, attr);
+-	}
++	error = sysfs_create_bin_file(&vars_kset->kobj,
++				      &var_subsys_attr_new_var);
++	if (error)
++		printk(KERN_ERR "efivars: unable to create new_var sysfs file"
++			" due to error %d\n", error);
++	error = sysfs_create_bin_file(&vars_kset->kobj,
++				      &var_subsys_attr_del_var);
++	if (error)
++		printk(KERN_ERR "efivars: unable to create del_var sysfs file"
++			" due to error %d\n", error);
  
- 	if (pci_enable_device(pci_dev))
- 		return -EIO;
-@@ -1643,6 +1637,8 @@ he_stop(struct he_dev *he_dev)
+ 	/* Don't forget the systab entry */
+-
+-	for (i = 0; (attr = efi_subsys_attrs[i]) && !error; i++) {
+-		if (attr->show)
+-			error = subsys_create_file(&efi_subsys, attr);
+-	}
+-
++	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
+ 	if (error)
+ 		printk(KERN_ERR "efivars: Sysfs attribute export failed with error %d.\n", error);
+ 	else
+ 		goto out_free;
  
- 	if (he_dev->rbpl_base) {
- #ifdef USE_RBPL_POOL
-+		int i;
-+
- 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
- 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
- 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
-@@ -1665,6 +1661,8 @@ he_stop(struct he_dev *he_dev)
- #ifdef USE_RBPS
- 	if (he_dev->rbps_base) {
- #ifdef USE_RBPS_POOL
-+		int i;
-+
- 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
- 			void *cpuaddr = he_dev->rbps_virt[i].virt;
- 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
-@@ -2933,7 +2931,7 @@ he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
+-	subsystem_unregister(&vars_subsys);
++	kset_unregister(vars_kset);
  
- 	left = *pos;
- 	if (!left--)
--		return sprintf(page, "%s\n", version);
-+		return sprintf(page, "ATM he driver\n");
+ out_firmware_unregister:
+-	firmware_unregister(&efi_subsys);
++	kobject_put(efi_kobj);
  
- 	if (!left--)
- 		return sprintf(page, "%s%s\n\n",
-diff --git a/drivers/base/Makefile b/drivers/base/Makefile
-index b39ea3f..63e09c0 100644
---- a/drivers/base/Makefile
-+++ b/drivers/base/Makefile
-@@ -11,6 +11,9 @@ obj-$(CONFIG_FW_LOADER)	+= firmware_class.o
- obj-$(CONFIG_NUMA)	+= node.o
- obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
- obj-$(CONFIG_SMP)	+= topology.o
-+ifeq ($(CONFIG_SYSFS),y)
-+obj-$(CONFIG_MODULES)	+= module.o
-+endif
- obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
+ out_free:
+ 	kfree(variable_name);
+@@ -768,8 +752,8 @@ efivars_exit(void)
+ 		efivar_unregister(entry);
+ 	}
  
- ifeq ($(CONFIG_DEBUG_DRIVER),y)
-diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
-index 7370d7c..3b43e8a 100644
---- a/drivers/base/attribute_container.c
-+++ b/drivers/base/attribute_container.c
-@@ -61,7 +61,7 @@ attribute_container_classdev_to_container(struct class_device *classdev)
+-	subsystem_unregister(&vars_subsys);
+-	firmware_unregister(&efi_subsys);
++	kset_unregister(vars_kset);
++	kobject_put(efi_kobj);
  }
- EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
  
--static struct list_head attribute_container_list;
-+static LIST_HEAD(attribute_container_list);
+ module_init(efivars_init);
+diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
+index 1ac5103..275dc52 100644
+--- a/drivers/hid/Makefile
++++ b/drivers/hid/Makefile
+@@ -1,7 +1,7 @@
+ #
+ # Makefile for the HID driver
+ #
+-hid-objs			:= hid-core.o hid-input.o
++hid-objs			:= hid-core.o hid-input.o hid-input-quirks.o
  
- static DEFINE_MUTEX(attribute_container_mutex);
+ obj-$(CONFIG_HID)		+= hid.o
  
-@@ -320,9 +320,14 @@ attribute_container_add_attrs(struct class_device *classdev)
- 	struct class_device_attribute **attrs =	cont->attrs;
- 	int i, error;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 2884b03..d73a768 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -26,6 +26,7 @@
+ #include <linux/input.h>
+ #include <linux/wait.h>
+ #include <linux/vmalloc.h>
++#include <linux/sched.h>
  
--	if (!attrs)
-+	BUG_ON(attrs && cont->grp);
-+
-+	if (!attrs && !cont->grp)
- 		return 0;
+ #include <linux/hid.h>
+ #include <linux/hiddev.h>
+@@ -758,7 +759,9 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
+ {
+ 	u64 x;
  
-+	if (cont->grp)
-+		return sysfs_create_group(&classdev->kobj, cont->grp);
-+
- 	for (i = 0; attrs[i]; i++) {
- 		error = class_device_create_file(classdev, attrs[i]);
- 		if (error)
-@@ -378,9 +383,14 @@ attribute_container_remove_attrs(struct class_device *classdev)
- 	struct class_device_attribute **attrs =	cont->attrs;
- 	int i;
+-	WARN_ON(n > 32);
++	if (n > 32)
++		printk(KERN_WARNING "HID: extract() called with n (%d) > 32! (%s)\n",
++				n, current->comm);
  
--	if (!attrs)
-+	if (!attrs && !cont->grp)
- 		return;
+ 	report += offset >> 3;  /* adjust byte index */
+ 	offset &= 7;            /* now only need bit offset into one byte */
+@@ -780,8 +783,13 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3
+ 	__le64 x;
+ 	u64 m = (1ULL << n) - 1;
  
-+	if (cont->grp) {
-+		sysfs_remove_group(&classdev->kobj, cont->grp);
-+		return ;
-+	}
-+
- 	for (i = 0; attrs[i]; i++)
- 		class_device_remove_file(classdev, attrs[i]);
- }
-@@ -429,10 +439,3 @@ attribute_container_find_class_device(struct attribute_container *cont,
- 	return cdev;
- }
- EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
--
--int __init
--attribute_container_init(void)
--{
--	INIT_LIST_HEAD(&attribute_container_list);
--	return 0;
--}
-diff --git a/drivers/base/base.h b/drivers/base/base.h
-index 10b2fb6..c044414 100644
---- a/drivers/base/base.h
-+++ b/drivers/base/base.h
-@@ -1,6 +1,42 @@
+-	WARN_ON(n > 32);
++	if (n > 32)
++		printk(KERN_WARNING "HID: implement() called with n (%d) > 32! (%s)\n",
++				n, current->comm);
  
--/* initialisation functions */
-+/**
-+ * struct bus_type_private - structure to hold the private to the driver core portions of the bus_type structure.
++	if (value > m)
++		printk(KERN_WARNING "HID: implement() called with too large value %d! (%s)\n",
++				value, current->comm);
+ 	WARN_ON(value > m);
+ 	value &= m;
+ 
+diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
+new file mode 100644
+index 0000000..a870ba5
+--- /dev/null
++++ b/drivers/hid/hid-input-quirks.c
+@@ -0,0 +1,423 @@
++/*
++ *  HID-input usage mapping quirks
 + *
-+ * @subsys - the struct kset that defines this bus.  This is the main kobject
-+ * @drivers_kset - the list of drivers associated with this bus
-+ * @devices_kset - the list of devices associated with this bus
-+ * @klist_devices - the klist to iterate over the @devices_kset
-+ * @klist_drivers - the klist to iterate over the @drivers_kset
-+ * @bus_notifier - the bus notifier list for anything that cares about things
-+ * on this bus.
-+ * @bus - pointer back to the struct bus_type that this structure is associated
-+ * with.
++ *  This is used to handle HID-input mappings for devices violating
++ *  HUT 1.12 specification.
 + *
-+ * This structure is the one that is the actual kobject allowing struct
-+ * bus_type to be statically allocated safely.  Nothing outside of the driver
-+ * core should ever touch these fields.
++ * Copyright (c) 2007-2008 Jiri Kosina
 + */
-+struct bus_type_private {
-+	struct kset subsys;
-+	struct kset *drivers_kset;
-+	struct kset *devices_kset;
-+	struct klist klist_devices;
-+	struct klist klist_drivers;
-+	struct blocking_notifier_head bus_notifier;
-+	unsigned int drivers_autoprobe:1;
-+	struct bus_type *bus;
-+};
 +
-+struct driver_private {
-+	struct kobject kobj;
-+	struct klist klist_devices;
-+	struct klist_node knode_bus;
-+	struct module_kobject *mkobj;
-+	struct device_driver *driver;
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License
++ */
++
++#include <linux/input.h>
++#include <linux/hid.h>
++
++#define map_abs(c)      do { usage->code = c; usage->type = EV_ABS; *bit = input->absbit; *max = ABS_MAX; } while (0)
++#define map_rel(c)      do { usage->code = c; usage->type = EV_REL; *bit = input->relbit; *max = REL_MAX; } while (0)
++#define map_key(c)      do { usage->code = c; usage->type = EV_KEY; *bit = input->keybit; *max = KEY_MAX; } while (0)
++#define map_led(c)      do { usage->code = c; usage->type = EV_LED; *bit = input->ledbit; *max = LED_MAX; } while (0)
++
++#define map_abs_clear(c)        do { map_abs(c); clear_bit(c, *bit); } while (0)
++#define map_key_clear(c)        do { map_key(c); clear_bit(c, *bit); } while (0)
++
++static int quirk_belkin_wkbd(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
++		return 0;
++
++	switch (usage->hid & HID_USAGE) {
++		case 0x03a: map_key_clear(KEY_SOUND);		break;
++		case 0x03b: map_key_clear(KEY_CAMERA);		break;
++		case 0x03c: map_key_clear(KEY_DOCUMENTS);	break;
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++static int quirk_cherry_cymotion(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
++		return 0;
++
++	switch (usage->hid & HID_USAGE) {
++		case 0x301: map_key_clear(KEY_PROG1);		break;
++		case 0x302: map_key_clear(KEY_PROG2);		break;
++		case 0x303: map_key_clear(KEY_PROG3);		break;
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++static int quirk_logitech_ultrax_remote(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
++		return 0;
++
++	set_bit(EV_REP, input->evbit);
++	switch(usage->hid & HID_USAGE) {
++		/* Reported on Logitech Ultra X Media Remote */
++		case 0x004: map_key_clear(KEY_AGAIN);		break;
++		case 0x00d: map_key_clear(KEY_HOME);		break;
++		case 0x024: map_key_clear(KEY_SHUFFLE);		break;
++		case 0x025: map_key_clear(KEY_TV);		break;
++		case 0x026: map_key_clear(KEY_MENU);		break;
++		case 0x031: map_key_clear(KEY_AUDIO);		break;
++		case 0x032: map_key_clear(KEY_TEXT);		break;
++		case 0x033: map_key_clear(KEY_LAST);		break;
++		case 0x047: map_key_clear(KEY_MP3);		break;
++		case 0x048: map_key_clear(KEY_DVD);		break;
++		case 0x049: map_key_clear(KEY_MEDIA);		break;
++		case 0x04a: map_key_clear(KEY_VIDEO);		break;
++		case 0x04b: map_key_clear(KEY_ANGLE);		break;
++		case 0x04c: map_key_clear(KEY_LANGUAGE);	break;
++		case 0x04d: map_key_clear(KEY_SUBTITLE);	break;
++		case 0x051: map_key_clear(KEY_RED);		break;
++		case 0x052: map_key_clear(KEY_CLOSE);		break;
++
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
++		return 0;
++
++	set_bit(EV_REP, input->evbit);
++	switch (usage->hid & HID_USAGE) {
++		case 0xff01: map_key_clear(BTN_1);		break;
++		case 0xff02: map_key_clear(BTN_2);		break;
++		case 0xff03: map_key_clear(BTN_3);		break;
++		case 0xff04: map_key_clear(BTN_4);		break;
++		case 0xff05: map_key_clear(BTN_5);		break;
++		case 0xff06: map_key_clear(BTN_6);		break;
++		case 0xff07: map_key_clear(BTN_7);		break;
++		case 0xff08: map_key_clear(BTN_8);		break;
++		case 0xff09: map_key_clear(BTN_9);		break;
++		case 0xff0a: map_key_clear(BTN_A);		break;
++		case 0xff0b: map_key_clear(BTN_B);		break;
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++static int quirk_microsoft_ergonomy_kb(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
++		return 0;
++
++	switch(usage->hid & HID_USAGE) {
++		case 0xfd06: map_key_clear(KEY_CHAT);		break;
++		case 0xfd07: map_key_clear(KEY_PHONE);		break;
++		case 0xff05:
++			set_bit(EV_REP, input->evbit);
++			map_key_clear(KEY_F13);
++			set_bit(KEY_F14, input->keybit);
++			set_bit(KEY_F15, input->keybit);
++			set_bit(KEY_F16, input->keybit);
++			set_bit(KEY_F17, input->keybit);
++			set_bit(KEY_F18, input->keybit);
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++static int quirk_microsoft_presenter_8k(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
++		return 0;
++
++	set_bit(EV_REP, input->evbit);
++	switch(usage->hid & HID_USAGE) {
++		case 0xfd08: map_key_clear(KEY_FORWARD);	break;
++		case 0xfd09: map_key_clear(KEY_BACK);		break;
++		case 0xfd0b: map_key_clear(KEY_PLAYPAUSE);	break;
++		case 0xfd0e: map_key_clear(KEY_CLOSE);		break;
++		case 0xfd0f: map_key_clear(KEY_PLAY);		break;
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++static int quirk_petalynx_remote(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if (((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) &&
++			((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER))
++		return 0;
++
++	if ((usage->hid & HID_USAGE_PAGE) == HID_UP_LOGIVENDOR)
++		switch(usage->hid & HID_USAGE) {
++			case 0x05a: map_key_clear(KEY_TEXT);		break;
++			case 0x05b: map_key_clear(KEY_RED);		break;
++			case 0x05c: map_key_clear(KEY_GREEN);		break;
++			case 0x05d: map_key_clear(KEY_YELLOW);		break;
++			case 0x05e: map_key_clear(KEY_BLUE);		break;
++			default:
++				return 0;
++		}
++
++	if ((usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
++		switch(usage->hid & HID_USAGE) {
++			case 0x0f6: map_key_clear(KEY_NEXT);            break;
++			case 0x0fa: map_key_clear(KEY_BACK);            break;
++			default:
++				return 0;
++		}
++	return 1;
++}
++
++static int quirk_logitech_wireless(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
++		return 0;
++
++	switch (usage->hid & HID_USAGE) {
++		case 0x1001: map_key_clear(KEY_MESSENGER);	break;
++		case 0x1003: map_key_clear(KEY_SOUND);		break;
++		case 0x1004: map_key_clear(KEY_VIDEO);		break;
++		case 0x1005: map_key_clear(KEY_AUDIO);		break;
++		case 0x100a: map_key_clear(KEY_DOCUMENTS);	break;
++		case 0x1011: map_key_clear(KEY_PREVIOUSSONG);	break;
++		case 0x1012: map_key_clear(KEY_NEXTSONG);	break;
++		case 0x1013: map_key_clear(KEY_CAMERA);		break;
++		case 0x1014: map_key_clear(KEY_MESSENGER);	break;
++		case 0x1015: map_key_clear(KEY_RECORD);		break;
++		case 0x1016: map_key_clear(KEY_PLAYER);		break;
++		case 0x1017: map_key_clear(KEY_EJECTCD);	break;
++		case 0x1018: map_key_clear(KEY_MEDIA);		break;
++		case 0x1019: map_key_clear(KEY_PROG1);		break;
++		case 0x101a: map_key_clear(KEY_PROG2);		break;
++		case 0x101b: map_key_clear(KEY_PROG3);		break;
++		case 0x101f: map_key_clear(KEY_ZOOMIN);		break;
++		case 0x1020: map_key_clear(KEY_ZOOMOUT);	break;
++		case 0x1021: map_key_clear(KEY_ZOOMRESET);	break;
++		case 0x1023: map_key_clear(KEY_CLOSE);		break;
++		case 0x1027: map_key_clear(KEY_MENU);		break;
++		/* this one is marked as 'Rotate' */
++		case 0x1028: map_key_clear(KEY_ANGLE);		break;
++		case 0x1029: map_key_clear(KEY_SHUFFLE);	break;
++		case 0x102a: map_key_clear(KEY_BACK);		break;
++		case 0x102b: map_key_clear(KEY_CYCLEWINDOWS);	break;
++		case 0x1041: map_key_clear(KEY_BATTERY);	break;
++		case 0x1042: map_key_clear(KEY_WORDPROCESSOR);	break;
++		case 0x1043: map_key_clear(KEY_SPREADSHEET);	break;
++		case 0x1044: map_key_clear(KEY_PRESENTATION);	break;
++		case 0x1045: map_key_clear(KEY_UNDO);		break;
++		case 0x1046: map_key_clear(KEY_REDO);		break;
++		case 0x1047: map_key_clear(KEY_PRINT);		break;
++		case 0x1048: map_key_clear(KEY_SAVE);		break;
++		case 0x1049: map_key_clear(KEY_PROG1);		break;
++		case 0x104a: map_key_clear(KEY_PROG2);		break;
++		case 0x104b: map_key_clear(KEY_PROG3);		break;
++		case 0x104c: map_key_clear(KEY_PROG4);		break;
++
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++static int quirk_cherry_genius_29e(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
++		return 0;
++
++	switch (usage->hid & HID_USAGE) {
++		case 0x156: map_key_clear(KEY_WORDPROCESSOR);	break;
++		case 0x157: map_key_clear(KEY_SPREADSHEET);	break;
++		case 0x158: map_key_clear(KEY_PRESENTATION);	break;
++		case 0x15c: map_key_clear(KEY_STOP);		break;
++
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++static int quirk_btc_8193(struct hid_usage *usage, struct input_dev *input,
++			      unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
++		return 0;
++
++	switch (usage->hid & HID_USAGE) {
++		case 0x230: map_key(BTN_MOUSE);			break;
++		case 0x231: map_rel(REL_WHEEL);			break;
++		/* 
++		 * this keyboard has a scrollwheel implemented in
++		 * totally broken way. We map this usage temporarily
++		 * to HWHEEL and handle it in the event quirk handler
++		 */
++		case 0x232: map_rel(REL_HWHEEL);		break;
++
++		default:
++			return 0;
++	}
++	return 1;
++}
++
++#define VENDOR_ID_BELKIN			0x1020
++#define DEVICE_ID_BELKIN_WIRELESS_KEYBOARD	0x0006
++
++#define VENDOR_ID_CHERRY			0x046a
++#define DEVICE_ID_CHERRY_CYMOTION		0x0023
++
++#define VENDOR_ID_CHICONY			0x04f2
++#define DEVICE_ID_CHICONY_TACTICAL_PAD		0x0418
++
++#define VENDOR_ID_EZKEY				0x0518
++#define DEVICE_ID_BTC_8193			0x0002
++
++#define VENDOR_ID_LOGITECH			0x046d
++#define DEVICE_ID_LOGITECH_RECEIVER		0xc101
++#define DEVICE_ID_S510_RECEIVER			0xc50c
++#define DEVICE_ID_S510_RECEIVER_2		0xc517
++#define DEVICE_ID_MX3000_RECEIVER		0xc513
++
++#define VENDOR_ID_MICROSOFT			0x045e
++#define DEVICE_ID_MS4K				0x00db
++#define DEVICE_ID_MS6K				0x00f9
++#define DEVICE_IS_MS_PRESENTER_8K_BT		0x0701
++#define DEVICE_ID_MS_PRESENTER_8K_USB		0x0713
++
++#define VENDOR_ID_MONTEREY			0x0566
++#define DEVICE_ID_GENIUS_KB29E			0x3004
++
++#define VENDOR_ID_PETALYNX			0x18b1
++#define DEVICE_ID_PETALYNX_MAXTER_REMOTE	0x0037
++
++static const struct hid_input_blacklist {
++	__u16 idVendor;
++	__u16 idProduct;
++	int (*quirk)(struct hid_usage *, struct input_dev *, unsigned long **, int *);
++} hid_input_blacklist[] = {
++	{ VENDOR_ID_BELKIN, DEVICE_ID_BELKIN_WIRELESS_KEYBOARD, quirk_belkin_wkbd },
++
++	{ VENDOR_ID_CHERRY, DEVICE_ID_CHERRY_CYMOTION, quirk_cherry_cymotion },
++
++	{ VENDOR_ID_CHICONY, DEVICE_ID_CHICONY_TACTICAL_PAD, quirk_chicony_tactical_pad },
++
++	{ VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 },
++
++	{ VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote },
++	{ VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless },
++	{ VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless },
++	{ VENDOR_ID_LOGITECH, DEVICE_ID_MX3000_RECEIVER, quirk_logitech_wireless },
++
++	{ VENDOR_ID_MICROSOFT, DEVICE_ID_MS4K, quirk_microsoft_ergonomy_kb },
++	{ VENDOR_ID_MICROSOFT, DEVICE_ID_MS6K, quirk_microsoft_ergonomy_kb },
++	{ VENDOR_ID_MICROSOFT, DEVICE_IS_MS_PRESENTER_8K_BT, quirk_microsoft_presenter_8k },
++	{ VENDOR_ID_MICROSOFT, DEVICE_ID_MS_PRESENTER_8K_USB, quirk_microsoft_presenter_8k },
++
++	{ VENDOR_ID_MONTEREY, DEVICE_ID_GENIUS_KB29E, quirk_cherry_genius_29e },
++
++	{ VENDOR_ID_PETALYNX, DEVICE_ID_PETALYNX_MAXTER_REMOTE, quirk_petalynx_remote },
++	
++	{ 0, 0, 0 }
 +};
-+#define to_driver(obj) container_of(obj, struct driver_private, kobj)
- 
-+/* initialisation functions */
- extern int devices_init(void);
- extern int buses_init(void);
- extern int classes_init(void);
-@@ -13,17 +49,16 @@ static inline int hypervisor_init(void) { return 0; }
- extern int platform_bus_init(void);
- extern int system_bus_init(void);
- extern int cpu_dev_init(void);
--extern int attribute_container_init(void);
++
++int hidinput_mapping_quirks(struct hid_usage *usage, 
++				   struct input_dev *input, 
++				   unsigned long **bit, int *max)
++{
++	struct hid_device *device = input_get_drvdata(input);
++	int i = 0;
++	
++	while (hid_input_blacklist[i].quirk) {
++		if (hid_input_blacklist[i].idVendor == device->vendor &&
++				hid_input_blacklist[i].idProduct == device->product)
++			return hid_input_blacklist[i].quirk(usage, input, bit, max);
++		i++;
++	}
++	return 0;
++}
++
++void hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value)
++{
++	struct input_dev *input;
++
++	input = field->hidinput->input;
++
++	if (((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_5) && (usage->hid == 0x00090005))
++		|| ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_7) && (usage->hid == 0x00090007))) {
++		if (value) hid->quirks |=  HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
++		else       hid->quirks &= ~HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
++		return;
++	}
++
++	if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_B8) &&
++			(usage->type == EV_REL) &&
++			(usage->code == REL_WHEEL)) {
++		hid->delayed_value = value;
++		return;
++	}
++
++	if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_B8) &&
++			(usage->hid == 0x000100b8)) {
++		input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, hid->delayed_value);
++		return;
++	}
++
++	if ((hid->quirks & HID_QUIRK_INVERT_HWHEEL) && (usage->code == REL_HWHEEL)) {
++		input_event(input, usage->type, usage->code, -value);
++		return;
++	}
++
++	if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_ON) && (usage->code == REL_WHEEL)) {
++		input_event(input, usage->type, REL_HWHEEL, value);
++		return;
++	}
++
++	if ((hid->quirks & HID_QUIRK_APPLE_HAS_FN) && hidinput_apple_event(hid, input, usage, value))
++		return;
++
++	/* Handling MS keyboards special buttons */
++	if (hid->quirks & HID_QUIRK_MICROSOFT_KEYS && 
++			usage->hid == (HID_UP_MSVENDOR | 0xff05)) {
++		int key = 0;
++		static int last_key = 0;
++		switch (value) {
++			case 0x01: key = KEY_F14; break;
++			case 0x02: key = KEY_F15; break;
++			case 0x04: key = KEY_F16; break;
++			case 0x08: key = KEY_F17; break;
++			case 0x10: key = KEY_F18; break;
++			default: break;
++		}
++		if (key) {
++			input_event(input, usage->type, key, 1);
++			last_key = key;
++		} else {
++			input_event(input, usage->type, last_key, 0);
++		}
++	}
++
++	/* handle the temporary quirky mapping to HWHEEL */
++	if (hid->quirks & HID_QUIRK_HWHEEL_WHEEL_INVERT &&
++			usage->type == EV_REL && usage->code == REL_HWHEEL) {
++		input_event(input, usage->type, REL_WHEEL, -value);
++		return;
++	}
++}
++
++
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 0b27da7..5325d98 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -34,10 +34,10 @@
+ #include <linux/hid.h>
+ #include <linux/hid-debug.h>
  
--extern int bus_add_device(struct device * dev);
--extern void bus_attach_device(struct device * dev);
--extern void bus_remove_device(struct device * dev);
-+extern int bus_add_device(struct device *dev);
-+extern void bus_attach_device(struct device *dev);
-+extern void bus_remove_device(struct device *dev);
+-static int hid_pb_fnmode = 1;
+-module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644);
++static int hid_apple_fnmode = 1;
++module_param_named(pb_fnmode, hid_apple_fnmode, int, 0644);
+ MODULE_PARM_DESC(pb_fnmode,
+-		"Mode of fn key on PowerBooks (0 = disabled, 1 = fkeyslast, 2 = fkeysfirst)");
++		"Mode of fn key on Apple keyboards (0 = disabled, 1 = fkeyslast, 2 = fkeysfirst)");
  
--extern int bus_add_driver(struct device_driver *);
--extern void bus_remove_driver(struct device_driver *);
-+extern int bus_add_driver(struct device_driver *drv);
-+extern void bus_remove_driver(struct device_driver *drv);
+ #define unk	KEY_UNKNOWN
  
--extern void driver_detach(struct device_driver * drv);
--extern int driver_probe_device(struct device_driver *, struct device *);
-+extern void driver_detach(struct device_driver *drv);
-+extern int driver_probe_device(struct device_driver *drv, struct device *dev);
+@@ -86,10 +86,6 @@ static const struct {
+ #define map_abs_clear(c)	do { map_abs(c); clear_bit(c, bit); } while (0)
+ #define map_key_clear(c)	do { map_key(c); clear_bit(c, bit); } while (0)
  
- extern void sysdev_shutdown(void);
- extern int sysdev_suspend(pm_message_t state);
-@@ -44,4 +79,13 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
+-/* hardware needing special handling due to colliding MSVENDOR page usages */
+-#define IS_CHICONY_TACTICAL_PAD(x) (x->vendor == 0x04f2 && device->product == 0x0418)
+-#define IS_MS_KB(x) (x->vendor == 0x045e && (x->product == 0x00db || x->product == 0x00f9))
+-
+ #ifdef CONFIG_USB_HIDINPUT_POWERBOOK
  
- extern int devres_release_all(struct device *dev);
+ struct hidinput_key_translation {
+@@ -98,20 +94,36 @@ struct hidinput_key_translation {
+ 	u8 flags;
+ };
  
--extern struct kset devices_subsys;
-+extern struct kset *devices_kset;
+-#define POWERBOOK_FLAG_FKEY 0x01
++#define APPLE_FLAG_FKEY 0x01
 +
-+#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
-+extern void module_add_driver(struct module *mod, struct device_driver *drv);
-+extern void module_remove_driver(struct device_driver *drv);
-+#else
-+static inline void module_add_driver(struct module *mod,
-+				     struct device_driver *drv) { }
-+static inline void module_remove_driver(struct device_driver *drv) { }
-+#endif
-diff --git a/drivers/base/bus.c b/drivers/base/bus.c
-index 9a19b07..f484495 100644
---- a/drivers/base/bus.c
-+++ b/drivers/base/bus.c
-@@ -3,6 +3,8 @@
-  *
-  * Copyright (c) 2002-3 Patrick Mochel
-  * Copyright (c) 2002-3 Open Source Development Labs
-+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh at suse.de>
-+ * Copyright (c) 2007 Novell Inc.
-  *
-  * This file is released under the GPLv2
-  *
-@@ -17,14 +19,13 @@
- #include "power/power.h"
++static struct hidinput_key_translation apple_fn_keys[] = {
++	{ KEY_F1,       KEY_BRIGHTNESSDOWN,     APPLE_FLAG_FKEY },
++	{ KEY_F2,       KEY_BRIGHTNESSUP,       APPLE_FLAG_FKEY },
++	{ KEY_F3,       KEY_CYCLEWINDOWS,       APPLE_FLAG_FKEY }, /* Exposé */
++	{ KEY_F4,       KEY_FN_F4,              APPLE_FLAG_FKEY }, /* Dashboard */
++	{ KEY_F5,       KEY_FN_F5 },
++	{ KEY_F6,       KEY_FN_F6 },
++	{ KEY_F7,       KEY_BACK,               APPLE_FLAG_FKEY },
++	{ KEY_F8,       KEY_PLAYPAUSE,          APPLE_FLAG_FKEY },
++	{ KEY_F9,       KEY_FORWARD,            APPLE_FLAG_FKEY },
++	{ KEY_F10,      KEY_MUTE,               APPLE_FLAG_FKEY },
++	{ KEY_F11,      KEY_VOLUMEDOWN,         APPLE_FLAG_FKEY },
++	{ KEY_F12,      KEY_VOLUMEUP,           APPLE_FLAG_FKEY },
++	{ }
++};
  
- #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
--#define to_bus(obj) container_of(obj, struct bus_type, subsys.kobj)
-+#define to_bus(obj) container_of(obj, struct bus_type_private, subsys.kobj)
+ static struct hidinput_key_translation powerbook_fn_keys[] = {
+ 	{ KEY_BACKSPACE, KEY_DELETE },
+-	{ KEY_F1,       KEY_BRIGHTNESSDOWN,     POWERBOOK_FLAG_FKEY },
+-	{ KEY_F2,       KEY_BRIGHTNESSUP,       POWERBOOK_FLAG_FKEY },
+-	{ KEY_F3,       KEY_MUTE,               POWERBOOK_FLAG_FKEY },
+-	{ KEY_F4,       KEY_VOLUMEDOWN,         POWERBOOK_FLAG_FKEY },
+-	{ KEY_F5,       KEY_VOLUMEUP,           POWERBOOK_FLAG_FKEY },
+-	{ KEY_F6,       KEY_NUMLOCK,            POWERBOOK_FLAG_FKEY },
+-	{ KEY_F7,       KEY_SWITCHVIDEOMODE,    POWERBOOK_FLAG_FKEY },
+-	{ KEY_F8,       KEY_KBDILLUMTOGGLE,     POWERBOOK_FLAG_FKEY },
+-	{ KEY_F9,       KEY_KBDILLUMDOWN,       POWERBOOK_FLAG_FKEY },
+-	{ KEY_F10,      KEY_KBDILLUMUP,         POWERBOOK_FLAG_FKEY },
++	{ KEY_F1,       KEY_BRIGHTNESSDOWN,     APPLE_FLAG_FKEY },
++	{ KEY_F2,       KEY_BRIGHTNESSUP,       APPLE_FLAG_FKEY },
++	{ KEY_F3,       KEY_MUTE,               APPLE_FLAG_FKEY },
++	{ KEY_F4,       KEY_VOLUMEDOWN,         APPLE_FLAG_FKEY },
++	{ KEY_F5,       KEY_VOLUMEUP,           APPLE_FLAG_FKEY },
++	{ KEY_F6,       KEY_NUMLOCK,            APPLE_FLAG_FKEY },
++	{ KEY_F7,       KEY_SWITCHVIDEOMODE,    APPLE_FLAG_FKEY },
++	{ KEY_F8,       KEY_KBDILLUMTOGGLE,     APPLE_FLAG_FKEY },
++	{ KEY_F9,       KEY_KBDILLUMDOWN,       APPLE_FLAG_FKEY },
++	{ KEY_F10,      KEY_KBDILLUMUP,         APPLE_FLAG_FKEY },
+ 	{ KEY_UP,       KEY_PAGEUP },
+ 	{ KEY_DOWN,     KEY_PAGEDOWN },
+ 	{ KEY_LEFT,     KEY_HOME },
+@@ -142,7 +154,7 @@ static struct hidinput_key_translation powerbook_numlock_keys[] = {
+ 	{ }
+ };
  
- /*
-  * sysfs bindings for drivers
-  */
+-static struct hidinput_key_translation powerbook_iso_keyboard[] = {
++static struct hidinput_key_translation apple_iso_keyboard[] = {
+ 	{ KEY_GRAVE,    KEY_102ND },
+ 	{ KEY_102ND,    KEY_GRAVE },
+ 	{ }
+@@ -160,39 +172,42 @@ static struct hidinput_key_translation *find_translation(struct hidinput_key_tra
+ 	return NULL;
+ }
  
- #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
--#define to_driver(obj) container_of(obj, struct device_driver, kobj)
+-static int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
++int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ 		struct hid_usage *usage, __s32 value)
+ {
+ 	struct hidinput_key_translation *trans;
  
+ 	if (usage->code == KEY_FN) {
+-		if (value) hid->quirks |=  HID_QUIRK_POWERBOOK_FN_ON;
+-		else       hid->quirks &= ~HID_QUIRK_POWERBOOK_FN_ON;
++		if (value) hid->quirks |=  HID_QUIRK_APPLE_FN_ON;
++		else       hid->quirks &= ~HID_QUIRK_APPLE_FN_ON;
  
- static int __must_check bus_rescan_devices_helper(struct device *dev,
-@@ -32,37 +33,40 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
+ 		input_event(input, usage->type, usage->code, value);
  
- static struct bus_type *bus_get(struct bus_type *bus)
- {
--	return bus ? container_of(kset_get(&bus->subsys),
--				struct bus_type, subsys) : NULL;
-+	if (bus) {
-+		kset_get(&bus->p->subsys);
-+		return bus;
-+	}
-+	return NULL;
- }
+ 		return 1;
+ 	}
  
- static void bus_put(struct bus_type *bus)
- {
--	kset_put(&bus->subsys);
-+	if (bus)
-+		kset_put(&bus->p->subsys);
- }
+-	if (hid_pb_fnmode) {
++	if (hid_apple_fnmode) {
+ 		int do_translate;
  
--static ssize_t
--drv_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
-+static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
-+			     char *buf)
- {
--	struct driver_attribute * drv_attr = to_drv_attr(attr);
--	struct device_driver * drv = to_driver(kobj);
-+	struct driver_attribute *drv_attr = to_drv_attr(attr);
-+	struct driver_private *drv_priv = to_driver(kobj);
- 	ssize_t ret = -EIO;
+-		trans = find_translation(powerbook_fn_keys, usage->code);
++		trans = find_translation((hid->product < 0x220 ||
++					  hid->product >= 0x300) ?
++					 powerbook_fn_keys : apple_fn_keys,
++					 usage->code);
+ 		if (trans) {
+-			if (test_bit(usage->code, hid->pb_pressed_fn))
++			if (test_bit(usage->code, hid->apple_pressed_fn))
+ 				do_translate = 1;
+-			else if (trans->flags & POWERBOOK_FLAG_FKEY)
++			else if (trans->flags & APPLE_FLAG_FKEY)
+ 				do_translate =
+-					(hid_pb_fnmode == 2 &&  (hid->quirks & HID_QUIRK_POWERBOOK_FN_ON)) ||
+-					(hid_pb_fnmode == 1 && !(hid->quirks & HID_QUIRK_POWERBOOK_FN_ON));
++					(hid_apple_fnmode == 2 &&  (hid->quirks & HID_QUIRK_APPLE_FN_ON)) ||
++					(hid_apple_fnmode == 1 && !(hid->quirks & HID_QUIRK_APPLE_FN_ON));
+ 			else
+-				do_translate = (hid->quirks & HID_QUIRK_POWERBOOK_FN_ON);
++				do_translate = (hid->quirks & HID_QUIRK_APPLE_FN_ON);
  
- 	if (drv_attr->show)
--		ret = drv_attr->show(drv, buf);
-+		ret = drv_attr->show(drv_priv->driver, buf);
- 	return ret;
- }
+ 			if (do_translate) {
+ 				if (value)
+-					set_bit(usage->code, hid->pb_pressed_fn);
++					set_bit(usage->code, hid->apple_pressed_fn);
+ 				else
+-					clear_bit(usage->code, hid->pb_pressed_fn);
++					clear_bit(usage->code, hid->apple_pressed_fn);
  
--static ssize_t
--drv_attr_store(struct kobject * kobj, struct attribute * attr,
--	       const char * buf, size_t count)
-+static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
-+			      const char *buf, size_t count)
- {
--	struct driver_attribute * drv_attr = to_drv_attr(attr);
--	struct device_driver * drv = to_driver(kobj);
-+	struct driver_attribute *drv_attr = to_drv_attr(attr);
-+	struct driver_private *drv_priv = to_driver(kobj);
- 	ssize_t ret = -EIO;
+ 				input_event(input, usage->type, trans->to, value);
  
- 	if (drv_attr->store)
--		ret = drv_attr->store(drv, buf, count);
-+		ret = drv_attr->store(drv_priv->driver, buf, count);
- 	return ret;
- }
+@@ -217,8 +232,8 @@ static int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
+ 		}
+ 	}
  
-@@ -71,22 +75,12 @@ static struct sysfs_ops driver_sysfs_ops = {
- 	.store	= drv_attr_store,
- };
+-	if (hid->quirks & HID_QUIRK_POWERBOOK_ISO_KEYBOARD) {
+-		trans = find_translation(powerbook_iso_keyboard, usage->code);
++	if (hid->quirks & HID_QUIRK_APPLE_ISO_KEYBOARD) {
++		trans = find_translation(apple_iso_keyboard, usage->code);
+ 		if (trans) {
+ 			input_event(input, usage->type, trans->to, value);
+ 			return 1;
+@@ -228,31 +243,35 @@ static int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
+ 	return 0;
+ }
  
--
--static void driver_release(struct kobject * kobj)
-+static void driver_release(struct kobject *kobj)
+-static void hidinput_pb_setup(struct input_dev *input)
++static void hidinput_apple_setup(struct input_dev *input)
  {
--	/*
--	 * Yes this is an empty release function, it is this way because struct
--	 * device is always a static object, not a dynamic one.  Yes, this is
--	 * not nice and bad, but remember, drivers are code, reference counted
--	 * by the module count, not a device, which is really data.  And yes,
--	 * in the future I do want to have all drivers be created dynamically,
--	 * and am working toward that goal, but it will take a bit longer...
--	 *
--	 * But do not let this example give _anyone_ the idea that they can
--	 * create a release function without any code in it at all, to do that
--	 * is almost always wrong.  If you have any questions about this,
--	 * please send an email to <greg at kroah.com>
--	 */
-+	struct driver_private *drv_priv = to_driver(kobj);
+ 	struct hidinput_key_translation *trans;
+ 
+ 	set_bit(KEY_NUMLOCK, input->keybit);
+ 
+ 	/* Enable all needed keys */
++	for (trans = apple_fn_keys; trans->from; trans++)
++		set_bit(trans->to, input->keybit);
 +
-+	pr_debug("driver: '%s': %s\n", kobject_name(kobj), __FUNCTION__);
-+	kfree(drv_priv);
- }
+ 	for (trans = powerbook_fn_keys; trans->from; trans++)
+ 		set_bit(trans->to, input->keybit);
  
- static struct kobj_type driver_ktype = {
-@@ -94,34 +88,30 @@ static struct kobj_type driver_ktype = {
- 	.release	= driver_release,
- };
+ 	for (trans = powerbook_numlock_keys; trans->from; trans++)
+ 		set_bit(trans->to, input->keybit);
  
--
- /*
-  * sysfs bindings for buses
-  */
--
--
--static ssize_t
--bus_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
-+static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
-+			     char *buf)
- {
--	struct bus_attribute * bus_attr = to_bus_attr(attr);
--	struct bus_type * bus = to_bus(kobj);
-+	struct bus_attribute *bus_attr = to_bus_attr(attr);
-+	struct bus_type_private *bus_priv = to_bus(kobj);
- 	ssize_t ret = 0;
+-	for (trans = powerbook_iso_keyboard; trans->from; trans++)
++	for (trans = apple_iso_keyboard; trans->from; trans++)
+ 		set_bit(trans->to, input->keybit);
  
- 	if (bus_attr->show)
--		ret = bus_attr->show(bus, buf);
-+		ret = bus_attr->show(bus_priv->bus, buf);
- 	return ret;
  }
- 
--static ssize_t
--bus_attr_store(struct kobject * kobj, struct attribute * attr,
--	       const char * buf, size_t count)
-+static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
-+			      const char *buf, size_t count)
+ #else
+-static inline int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
+-		struct hid_usage *usage, __s32 value)
++inline int hidinput_apple_event(struct hid_device *hid,
++				       struct input_dev *input,
++				       struct hid_usage *usage, __s32 value)
  {
--	struct bus_attribute * bus_attr = to_bus_attr(attr);
--	struct bus_type * bus = to_bus(kobj);
-+	struct bus_attribute *bus_attr = to_bus_attr(attr);
-+	struct bus_type_private *bus_priv = to_bus(kobj);
- 	ssize_t ret = 0;
- 
- 	if (bus_attr->store)
--		ret = bus_attr->store(bus, buf, count);
-+		ret = bus_attr->store(bus_priv->bus, buf, count);
- 	return ret;
+ 	return 0;
  }
  
-@@ -130,24 +120,26 @@ static struct sysfs_ops bus_sysfs_ops = {
- 	.store	= bus_attr_store,
- };
- 
--int bus_create_file(struct bus_type * bus, struct bus_attribute * attr)
-+int bus_create_file(struct bus_type *bus, struct bus_attribute *attr)
+-static inline void hidinput_pb_setup(struct input_dev *input)
++static inline void hidinput_apple_setup(struct input_dev *input)
  {
- 	int error;
- 	if (bus_get(bus)) {
--		error = sysfs_create_file(&bus->subsys.kobj, &attr->attr);
-+		error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr);
- 		bus_put(bus);
- 	} else
- 		error = -EINVAL;
- 	return error;
  }
-+EXPORT_SYMBOL_GPL(bus_create_file);
- 
--void bus_remove_file(struct bus_type * bus, struct bus_attribute * attr)
-+void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
+ #endif
+@@ -343,7 +362,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
  {
- 	if (bus_get(bus)) {
--		sysfs_remove_file(&bus->subsys.kobj, &attr->attr);
-+		sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr);
- 		bus_put(bus);
+ 	struct input_dev *input = hidinput->input;
+ 	struct hid_device *device = input_get_drvdata(input);
+-	int max = 0, code;
++	int max = 0, code, ret;
+ 	unsigned long *bit = NULL;
+ 
+ 	field->hidinput = hidinput;
+@@ -362,6 +381,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		goto ignore;
  	}
- }
-+EXPORT_SYMBOL_GPL(bus_remove_file);
  
- static struct kobj_type bus_ktype = {
- 	.sysfs_ops	= &bus_sysfs_ops,
-@@ -166,7 +158,7 @@ static struct kset_uevent_ops bus_uevent_ops = {
- 	.filter = bus_uevent_filter,
- };
++	/* handle input mappings for quirky devices */
++	ret = hidinput_mapping_quirks(usage, input, &bit, &max);
++	if (ret)
++		goto mapped;
++
+ 	switch (usage->hid & HID_USAGE_PAGE) {
  
--static decl_subsys(bus, &bus_ktype, &bus_uevent_ops);
-+static struct kset *bus_kset;
+ 		case HID_UP_UNDEFINED:
+@@ -549,14 +573,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 				case 0x000: goto ignore;
+ 				case 0x034: map_key_clear(KEY_SLEEP);		break;
+ 				case 0x036: map_key_clear(BTN_MISC);		break;
+-				/*
+-				 * The next three are reported by Belkin wireless
+-				 * keyboard (1020:0006). These values are "reserved"
+-				 * in HUT 1.12.
+-				 */
+-				case 0x03a: map_key_clear(KEY_SOUND);           break;
+-				case 0x03b: map_key_clear(KEY_CAMERA);          break;
+-				case 0x03c: map_key_clear(KEY_DOCUMENTS);       break;
  
+ 				case 0x040: map_key_clear(KEY_MENU);		break;
+ 				case 0x045: map_key_clear(KEY_RADIO);		break;
+@@ -602,10 +618,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 				case 0x0e9: map_key_clear(KEY_VOLUMEUP);	break;
+ 				case 0x0ea: map_key_clear(KEY_VOLUMEDOWN);	break;
  
- #ifdef CONFIG_HOTPLUG
-@@ -224,10 +216,13 @@ static ssize_t driver_bind(struct device_driver *drv,
- 		if (dev->parent)
- 			up(&dev->parent->sem);
+-				/* reserved in HUT 1.12. Reported on Petalynx remote */
+-				case 0x0f6: map_key_clear(KEY_NEXT);		break;
+-				case 0x0fa: map_key_clear(KEY_BACK);		break;
+-
+ 				case 0x182: map_key_clear(KEY_BOOKMARKS);	break;
+ 				case 0x183: map_key_clear(KEY_CONFIG);		break;
+ 				case 0x184: map_key_clear(KEY_WORDPROCESSOR);	break;
+@@ -665,51 +677,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 				case 0x28b: map_key_clear(KEY_FORWARDMAIL);	break;
+ 				case 0x28c: map_key_clear(KEY_SEND);		break;
  
--		if (err > 0) 		/* success */
-+		if (err > 0) {
-+			/* success */
- 			err = count;
--		else if (err == 0)	/* driver didn't accept device */
-+		} else if (err == 0) {
-+			/* driver didn't accept device */
- 			err = -ENODEV;
-+		}
- 	}
- 	put_device(dev);
- 	bus_put(bus);
-@@ -237,16 +232,16 @@ static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
+-				/* Reported on a Cherry Cymotion keyboard */
+-				case 0x301: map_key_clear(KEY_PROG1);		break;
+-				case 0x302: map_key_clear(KEY_PROG2);		break;
+-				case 0x303: map_key_clear(KEY_PROG3);		break;
+-
+-				/* Reported on certain Logitech wireless keyboards */
+-				case 0x1001: map_key_clear(KEY_MESSENGER);	break;
+-				case 0x1003: map_key_clear(KEY_SOUND);		break;
+-				case 0x1004: map_key_clear(KEY_VIDEO);		break;
+-				case 0x1005: map_key_clear(KEY_AUDIO);		break;
+-				case 0x100a: map_key_clear(KEY_DOCUMENTS);	break;
+-				case 0x1011: map_key_clear(KEY_PREVIOUSSONG);	break;
+-				case 0x1012: map_key_clear(KEY_NEXTSONG);	break;
+-				case 0x1013: map_key_clear(KEY_CAMERA);		break;
+-				case 0x1014: map_key_clear(KEY_MESSENGER);	break;
+-				case 0x1015: map_key_clear(KEY_RECORD);		break;
+-				case 0x1016: map_key_clear(KEY_PLAYER);		break;
+-				case 0x1017: map_key_clear(KEY_EJECTCD);	break;
+-				case 0x1018: map_key_clear(KEY_MEDIA);          break;
+-				case 0x1019: map_key_clear(KEY_PROG1);		break;
+-				case 0x101a: map_key_clear(KEY_PROG2);		break;
+-				case 0x101b: map_key_clear(KEY_PROG3);		break;
+-				case 0x101f: map_key_clear(KEY_ZOOMIN);		break;
+-				case 0x1020: map_key_clear(KEY_ZOOMOUT);	break;
+-				case 0x1021: map_key_clear(KEY_ZOOMRESET);	break;
+-				case 0x1023: map_key_clear(KEY_CLOSE);		break;
+-				case 0x1027: map_key_clear(KEY_MENU);           break;
+-				/* this one is marked as 'Rotate' */
+-				case 0x1028: map_key_clear(KEY_ANGLE);		break;
+-				case 0x1029: map_key_clear(KEY_SHUFFLE);	break;
+-				case 0x102a: map_key_clear(KEY_BACK);           break;
+-				case 0x102b: map_key_clear(KEY_CYCLEWINDOWS);   break;
+-				case 0x1041: map_key_clear(KEY_BATTERY);	break;
+-				case 0x1042: map_key_clear(KEY_WORDPROCESSOR);	break;
+-				case 0x1043: map_key_clear(KEY_SPREADSHEET);	break;
+-				case 0x1044: map_key_clear(KEY_PRESENTATION);	break;
+-				case 0x1045: map_key_clear(KEY_UNDO);		break;
+-				case 0x1046: map_key_clear(KEY_REDO);		break;
+-				case 0x1047: map_key_clear(KEY_PRINT);		break;
+-				case 0x1048: map_key_clear(KEY_SAVE);		break;
+-				case 0x1049: map_key_clear(KEY_PROG1);		break;
+-				case 0x104a: map_key_clear(KEY_PROG2);		break;
+-				case 0x104b: map_key_clear(KEY_PROG3);		break;
+-				case 0x104c: map_key_clear(KEY_PROG4);		break;
+-
+ 				default:    goto ignore;
+ 			}
+ 			break;
+@@ -736,63 +703,16 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
  
- static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
- {
--	return sprintf(buf, "%d\n", bus->drivers_autoprobe);
-+	return sprintf(buf, "%d\n", bus->p->drivers_autoprobe);
- }
+ 		case HID_UP_MSVENDOR:
  
- static ssize_t store_drivers_autoprobe(struct bus_type *bus,
- 				       const char *buf, size_t count)
- {
- 	if (buf[0] == '0')
--		bus->drivers_autoprobe = 0;
-+		bus->p->drivers_autoprobe = 0;
- 	else
--		bus->drivers_autoprobe = 1;
-+		bus->p->drivers_autoprobe = 1;
- 	return count;
- }
+-			/* Unfortunately, there are multiple devices which
+-			 * emit usages from MSVENDOR page that require different
+-			 * handling. If this list grows too much in the future,
+-			 * more general handling will have to be introduced here
+-			 * (i.e. another blacklist).
+-			 */
+-
+-			/* Chicony Chicony KU-0418 tactical pad */
+-			if (IS_CHICONY_TACTICAL_PAD(device)) {
+-				set_bit(EV_REP, input->evbit);
+-				switch(usage->hid & HID_USAGE) {
+-					case 0xff01: map_key_clear(BTN_1);		break;
+-					case 0xff02: map_key_clear(BTN_2);		break;
+-					case 0xff03: map_key_clear(BTN_3);		break;
+-					case 0xff04: map_key_clear(BTN_4);		break;
+-					case 0xff05: map_key_clear(BTN_5);		break;
+-					case 0xff06: map_key_clear(BTN_6);		break;
+-					case 0xff07: map_key_clear(BTN_7);		break;
+-					case 0xff08: map_key_clear(BTN_8);		break;
+-					case 0xff09: map_key_clear(BTN_9);		break;
+-					case 0xff0a: map_key_clear(BTN_A);		break;
+-					case 0xff0b: map_key_clear(BTN_B);		break;
+-					default:    goto ignore;
+-				}
+-
+-			/* Microsoft Natural Ergonomic Keyboard 4000 */
+-			} else if (IS_MS_KB(device)) {
+-				switch(usage->hid & HID_USAGE) {
+-					case 0xfd06:
+-						map_key_clear(KEY_CHAT);
+-						break;
+-					case 0xfd07:
+-						map_key_clear(KEY_PHONE);
+-						break;
+-					case 0xff05:
+-						set_bit(EV_REP, input->evbit);
+-						map_key_clear(KEY_F13);
+-						set_bit(KEY_F14, input->keybit);
+-						set_bit(KEY_F15, input->keybit);
+-						set_bit(KEY_F16, input->keybit);
+-						set_bit(KEY_F17, input->keybit);
+-						set_bit(KEY_F18, input->keybit);
+-					default:	goto ignore;
+-				}
+-			} else {
+-				goto ignore;
+-			}
+-			break;
++			goto ignore;
  
-@@ -264,49 +259,49 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
- }
- #endif
+-		case HID_UP_CUSTOM: /* Reported on Logitech and Powerbook USB keyboards */
++		case HID_UP_CUSTOM: /* Reported on Logitech and Apple USB keyboards */
  
--static struct device * next_device(struct klist_iter * i)
-+static struct device *next_device(struct klist_iter *i)
- {
--	struct klist_node * n = klist_next(i);
-+	struct klist_node *n = klist_next(i);
- 	return n ? container_of(n, struct device, knode_bus) : NULL;
- }
+ 			set_bit(EV_REP, input->evbit);
+ 			switch(usage->hid & HID_USAGE) {
+ 				case 0x003:
+-					/* The fn key on Apple PowerBooks */
++					/* The fn key on Apple USB keyboards */
+ 					map_key_clear(KEY_FN);
+-					hidinput_pb_setup(input);
++					hidinput_apple_setup(input);
+ 					break;
  
- /**
-- *	bus_for_each_dev - device iterator.
-- *	@bus:	bus type.
-- *	@start:	device to start iterating from.
-- *	@data:	data for the callback.
-- *	@fn:	function to be called for each device.
-+ * bus_for_each_dev - device iterator.
-+ * @bus: bus type.
-+ * @start: device to start iterating from.
-+ * @data: data for the callback.
-+ * @fn: function to be called for each device.
-  *
-- *	Iterate over @bus's list of devices, and call @fn for each,
-- *	passing it @data. If @start is not NULL, we use that device to
-- *	begin iterating from.
-+ * Iterate over @bus's list of devices, and call @fn for each,
-+ * passing it @data. If @start is not NULL, we use that device to
-+ * begin iterating from.
-  *
-- *	We check the return of @fn each time. If it returns anything
-- *	other than 0, we break out and return that value.
-+ * We check the return of @fn each time. If it returns anything
-+ * other than 0, we break out and return that value.
-  *
-- *	NOTE: The device that returns a non-zero value is not retained
-- *	in any way, nor is its refcount incremented. If the caller needs
-- *	to retain this data, it should do, and increment the reference
-- *	count in the supplied callback.
-+ * NOTE: The device that returns a non-zero value is not retained
-+ * in any way, nor is its refcount incremented. If the caller needs
-+ * to retain this data, it should do, and increment the reference
-+ * count in the supplied callback.
-  */
+ 				default:    goto ignore;
+@@ -800,38 +720,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			break;
+ 
+ 		case HID_UP_LOGIVENDOR:
+-			set_bit(EV_REP, input->evbit);
+-			switch(usage->hid & HID_USAGE) {
+-				/* Reported on Logitech Ultra X Media Remote */
+-				case 0x004: map_key_clear(KEY_AGAIN);		break;
+-				case 0x00d: map_key_clear(KEY_HOME);		break;
+-				case 0x024: map_key_clear(KEY_SHUFFLE);		break;
+-				case 0x025: map_key_clear(KEY_TV);		break;
+-				case 0x026: map_key_clear(KEY_MENU);		break;
+-				case 0x031: map_key_clear(KEY_AUDIO);		break;
+-				case 0x032: map_key_clear(KEY_TEXT);		break;
+-				case 0x033: map_key_clear(KEY_LAST);		break;
+-				case 0x047: map_key_clear(KEY_MP3);		break;
+-				case 0x048: map_key_clear(KEY_DVD);		break;
+-				case 0x049: map_key_clear(KEY_MEDIA);		break;
+-				case 0x04a: map_key_clear(KEY_VIDEO);		break;
+-				case 0x04b: map_key_clear(KEY_ANGLE);		break;
+-				case 0x04c: map_key_clear(KEY_LANGUAGE);	break;
+-				case 0x04d: map_key_clear(KEY_SUBTITLE);	break;
+-				case 0x051: map_key_clear(KEY_RED);		break;
+-				case 0x052: map_key_clear(KEY_CLOSE);		break;
 -
--int bus_for_each_dev(struct bus_type * bus, struct device * start,
--		     void * data, int (*fn)(struct device *, void *))
-+int bus_for_each_dev(struct bus_type *bus, struct device *start,
-+		     void *data, int (*fn)(struct device *, void *))
- {
- 	struct klist_iter i;
--	struct device * dev;
-+	struct device *dev;
- 	int error = 0;
+-				/* Reported on Petalynx Maxter remote */
+-				case 0x05a: map_key_clear(KEY_TEXT);		break;
+-				case 0x05b: map_key_clear(KEY_RED);		break;
+-				case 0x05c: map_key_clear(KEY_GREEN);		break;
+-				case 0x05d: map_key_clear(KEY_YELLOW);		break;
+-				case 0x05e: map_key_clear(KEY_BLUE);		break;
+-
+-				default:    goto ignore;
+-			}
+-			break;
  
- 	if (!bus)
- 		return -EINVAL;
++			goto ignore;
++		
+ 		case HID_UP_PID:
  
--	klist_iter_init_node(&bus->klist_devices, &i,
-+	klist_iter_init_node(&bus->p->klist_devices, &i,
- 			     (start ? &start->knode_bus : NULL));
- 	while ((dev = next_device(&i)) && !error)
- 		error = fn(dev, data);
- 	klist_iter_exit(&i);
- 	return error;
- }
-+EXPORT_SYMBOL_GPL(bus_for_each_dev);
+ 			switch(usage->hid & HID_USAGE) {
+@@ -858,6 +749,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			break;
+ 	}
  
- /**
-  * bus_find_device - device iterator for locating a particular device.
-@@ -323,9 +318,9 @@ int bus_for_each_dev(struct bus_type * bus, struct device * start,
-  * if it does.  If the callback returns non-zero, this function will
-  * return to the caller and not iterate over any more devices.
-  */
--struct device * bus_find_device(struct bus_type *bus,
--				struct device *start, void *data,
--				int (*match)(struct device *, void *))
-+struct device *bus_find_device(struct bus_type *bus,
-+			       struct device *start, void *data,
-+			       int (*match)(struct device *dev, void *data))
- {
- 	struct klist_iter i;
- 	struct device *dev;
-@@ -333,7 +328,7 @@ struct device * bus_find_device(struct bus_type *bus,
- 	if (!bus)
- 		return NULL;
++mapped:
+ 	if (device->quirks & HID_QUIRK_MIGHTYMOUSE) {
+ 		if (usage->hid == HID_GD_Z)
+ 			map_rel(REL_HWHEEL);
+@@ -867,9 +759,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			map_key(BTN_1);
+ 	}
  
--	klist_iter_init_node(&bus->klist_devices, &i,
-+	klist_iter_init_node(&bus->p->klist_devices, &i,
- 			     (start ? &start->knode_bus : NULL));
- 	while ((dev = next_device(&i)))
- 		if (match(dev, data) && get_device(dev))
-@@ -341,51 +336,57 @@ struct device * bus_find_device(struct bus_type *bus,
- 	klist_iter_exit(&i);
- 	return dev;
- }
-+EXPORT_SYMBOL_GPL(bus_find_device);
+-	if ((device->quirks & (HID_QUIRK_2WHEEL_MOUSE_HACK_7 | HID_QUIRK_2WHEEL_MOUSE_HACK_5)) &&
+-		 (usage->type == EV_REL) && (usage->code == REL_WHEEL))
+-			set_bit(REL_HWHEEL, bit);
++	if ((device->quirks & (HID_QUIRK_2WHEEL_MOUSE_HACK_7 | HID_QUIRK_2WHEEL_MOUSE_HACK_5 |
++			HID_QUIRK_2WHEEL_MOUSE_HACK_B8)) && (usage->type == EV_REL) &&
++			(usage->code == REL_WHEEL))
++		set_bit(REL_HWHEEL, bit);
  
--
--static struct device_driver * next_driver(struct klist_iter * i)
-+static struct device_driver *next_driver(struct klist_iter *i)
- {
--	struct klist_node * n = klist_next(i);
--	return n ? container_of(n, struct device_driver, knode_bus) : NULL;
-+	struct klist_node *n = klist_next(i);
-+	struct driver_private *drv_priv;
-+
-+	if (n) {
-+		drv_priv = container_of(n, struct driver_private, knode_bus);
-+		return drv_priv->driver;
-+	}
-+	return NULL;
- }
+ 	if (((device->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_5) && (usage->hid == 0x00090005))
+ 		|| ((device->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_7) && (usage->hid == 0x00090007)))
+@@ -960,25 +853,8 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 	if (!usage->type)
+ 		return;
  
- /**
-- *	bus_for_each_drv - driver iterator
-- *	@bus:	bus we're dealing with.
-- *	@start:	driver to start iterating on.
-- *	@data:	data to pass to the callback.
-- *	@fn:	function to call for each driver.
-+ * bus_for_each_drv - driver iterator
-+ * @bus: bus we're dealing with.
-+ * @start: driver to start iterating on.
-+ * @data: data to pass to the callback.
-+ * @fn: function to call for each driver.
-  *
-- *	This is nearly identical to the device iterator above.
-- *	We iterate over each driver that belongs to @bus, and call
-- *	@fn for each. If @fn returns anything but 0, we break out
-- *	and return it. If @start is not NULL, we use it as the head
-- *	of the list.
-+ * This is nearly identical to the device iterator above.
-+ * We iterate over each driver that belongs to @bus, and call
-+ * @fn for each. If @fn returns anything but 0, we break out
-+ * and return it. If @start is not NULL, we use it as the head
-+ * of the list.
-  *
-- *	NOTE: we don't return the driver that returns a non-zero
-- *	value, nor do we leave the reference count incremented for that
-- *	driver. If the caller needs to know that info, it must set it
-- *	in the callback. It must also be sure to increment the refcount
-- *	so it doesn't disappear before returning to the caller.
-+ * NOTE: we don't return the driver that returns a non-zero
-+ * value, nor do we leave the reference count incremented for that
-+ * driver. If the caller needs to know that info, it must set it
-+ * in the callback. It must also be sure to increment the refcount
-+ * so it doesn't disappear before returning to the caller.
-  */
+-	if (((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_5) && (usage->hid == 0x00090005))
+-		|| ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_7) && (usage->hid == 0x00090007))) {
+-		if (value) hid->quirks |=  HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
+-		else       hid->quirks &= ~HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
+-		return;
+-	}
 -
--int bus_for_each_drv(struct bus_type * bus, struct device_driver * start,
--		     void * data, int (*fn)(struct device_driver *, void *))
-+int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
-+		     void *data, int (*fn)(struct device_driver *, void *))
- {
- 	struct klist_iter i;
--	struct device_driver * drv;
-+	struct device_driver *drv;
- 	int error = 0;
- 
- 	if (!bus)
- 		return -EINVAL;
- 
--	klist_iter_init_node(&bus->klist_drivers, &i,
--			     start ? &start->knode_bus : NULL);
-+	klist_iter_init_node(&bus->p->klist_drivers, &i,
-+			     start ? &start->p->knode_bus : NULL);
- 	while ((drv = next_driver(&i)) && !error)
- 		error = fn(drv, data);
- 	klist_iter_exit(&i);
- 	return error;
- }
-+EXPORT_SYMBOL_GPL(bus_for_each_drv);
+-	if ((hid->quirks & HID_QUIRK_INVERT_HWHEEL) && (usage->code == REL_HWHEEL)) {
+-		input_event(input, usage->type, usage->code, -value);
+-		return;
+-	}
+-
+-	if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_ON) && (usage->code == REL_WHEEL)) {
+-		input_event(input, usage->type, REL_HWHEEL, value);
+-		return;
+-	}
+-
+-	if ((hid->quirks & HID_QUIRK_POWERBOOK_HAS_FN) && hidinput_pb_event(hid, input, usage, value))
+-		return;
++	/* handle input events for quirky devices */
++	hidinput_event_quirks(hid, field, usage, value);
  
- static int device_add_attrs(struct bus_type *bus, struct device *dev)
- {
-@@ -396,7 +397,7 @@ static int device_add_attrs(struct bus_type *bus, struct device *dev)
- 		return 0;
+ 	if (usage->hat_min < usage->hat_max || usage->hat_dir) {
+ 		int hat_dir = usage->hat_dir;
+@@ -1039,25 +915,6 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 		return;
+ 	}
  
- 	for (i = 0; attr_name(bus->dev_attrs[i]); i++) {
--		error = device_create_file(dev,&bus->dev_attrs[i]);
-+		error = device_create_file(dev, &bus->dev_attrs[i]);
- 		if (error) {
- 			while (--i >= 0)
- 				device_remove_file(dev, &bus->dev_attrs[i]);
-@@ -406,13 +407,13 @@ static int device_add_attrs(struct bus_type *bus, struct device *dev)
- 	return error;
- }
+-	/* Handling MS keyboards special buttons */
+-	if (IS_MS_KB(hid) && usage->hid == (HID_UP_MSVENDOR | 0xff05)) {
+-		int key = 0;
+-		static int last_key = 0;
+-		switch (value) {
+-			case 0x01: key = KEY_F14; break;
+-			case 0x02: key = KEY_F15; break;
+-			case 0x04: key = KEY_F16; break;
+-			case 0x08: key = KEY_F17; break;
+-			case 0x10: key = KEY_F18; break;
+-			default: break;
+-		}
+-		if (key) {
+-			input_event(input, usage->type, key, 1);
+-			last_key = key;
+-		} else {
+-			input_event(input, usage->type, last_key, 0);
+-		}
+-	}
+ 	/* report the usage code as scancode if the key status has changed */
+ 	if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
+ 		input_event(input, EV_MSC, MSC_SCAN, usage->hid);
+diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
+index c557d70..7160fa6 100644
+--- a/drivers/hid/usbhid/Kconfig
++++ b/drivers/hid/usbhid/Kconfig
+@@ -25,12 +25,13 @@ comment "Input core support is needed for USB HID input layer or HIDBP support"
+ 	depends on USB_HID && INPUT=n
  
--static void device_remove_attrs(struct bus_type * bus, struct device * dev)
-+static void device_remove_attrs(struct bus_type *bus, struct device *dev)
- {
- 	int i;
+ config USB_HIDINPUT_POWERBOOK
+-	bool "Enable support for iBook/PowerBook/MacBook/MacBookPro special keys"
++	bool "Enable support for Apple laptop/aluminum USB special keys"
+ 	default n
+ 	depends on USB_HID
+ 	help
+ 	  Say Y here if you want support for the special keys (Fn, Numlock) on
+-	  Apple iBooks, PowerBooks, MacBooks and MacBook Pros.
++	  Apple iBooks, PowerBooks, MacBooks, MacBook Pros and aluminum USB
++	  keyboards.
  
- 	if (bus->dev_attrs) {
- 		for (i = 0; attr_name(bus->dev_attrs[i]); i++)
--			device_remove_file(dev,&bus->dev_attrs[i]);
-+			device_remove_file(dev, &bus->dev_attrs[i]);
- 	}
- }
+ 	  If unsure, say N.
  
-@@ -420,7 +421,7 @@ static void device_remove_attrs(struct bus_type * bus, struct device * dev)
- static int make_deprecated_bus_links(struct device *dev)
- {
- 	return sysfs_create_link(&dev->kobj,
--				 &dev->bus->subsys.kobj, "bus");
-+				 &dev->bus->p->subsys.kobj, "bus");
- }
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index a255285..b77b61e 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -19,6 +19,7 @@
  
- static void remove_deprecated_bus_links(struct device *dev)
-@@ -433,28 +434,28 @@ static inline void remove_deprecated_bus_links(struct device *dev) { }
- #endif
+ #define USB_VENDOR_ID_A4TECH		0x09da
+ #define USB_DEVICE_ID_A4TECH_WCP32PU	0x0006
++#define USB_DEVICE_ID_A4TECH_X5_005D	0x000a
  
- /**
-- *	bus_add_device - add device to bus
-- *	@dev:	device being added
-+ * bus_add_device - add device to bus
-+ * @dev: device being added
-  *
-- *	- Add the device to its bus's list of devices.
-- *	- Create link to device's bus.
-+ * - Add the device to its bus's list of devices.
-+ * - Create link to device's bus.
-  */
--int bus_add_device(struct device * dev)
-+int bus_add_device(struct device *dev)
- {
--	struct bus_type * bus = bus_get(dev->bus);
-+	struct bus_type *bus = bus_get(dev->bus);
- 	int error = 0;
+ #define USB_VENDOR_ID_AASHIMA		0x06d6
+ #define USB_DEVICE_ID_AASHIMA_GAMEPAD	0x0025
+@@ -28,6 +29,9 @@
+ #define USB_DEVICE_ID_ACECAD_FLAIR	0x0004
+ #define USB_DEVICE_ID_ACECAD_302	0x0008
  
- 	if (bus) {
--		pr_debug("bus %s: add device %s\n", bus->name, dev->bus_id);
-+		pr_debug("bus: '%s': add device %s\n", bus->name, dev->bus_id);
- 		error = device_add_attrs(bus, dev);
- 		if (error)
- 			goto out_put;
--		error = sysfs_create_link(&bus->devices.kobj,
-+		error = sysfs_create_link(&bus->p->devices_kset->kobj,
- 						&dev->kobj, dev->bus_id);
- 		if (error)
- 			goto out_id;
- 		error = sysfs_create_link(&dev->kobj,
--				&dev->bus->subsys.kobj, "subsystem");
-+				&dev->bus->p->subsys.kobj, "subsystem");
- 		if (error)
- 			goto out_subsys;
- 		error = make_deprecated_bus_links(dev);
-@@ -466,7 +467,7 @@ int bus_add_device(struct device * dev)
- out_deprecated:
- 	sysfs_remove_link(&dev->kobj, "subsystem");
- out_subsys:
--	sysfs_remove_link(&bus->devices.kobj, dev->bus_id);
-+	sysfs_remove_link(&bus->p->devices_kset->kobj, dev->bus_id);
- out_id:
- 	device_remove_attrs(bus, dev);
- out_put:
-@@ -475,56 +476,58 @@ out_put:
- }
++#define USB_VENDOR_ID_ADS_TECH 		0x06e1
++#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X	0xa155
++
+ #define USB_VENDOR_ID_AIPTEK		0x08ca
+ #define USB_DEVICE_ID_AIPTEK_01		0x0001
+ #define USB_DEVICE_ID_AIPTEK_10		0x0010
+@@ -59,6 +63,9 @@
+ #define USB_DEVICE_ID_APPLE_GEYSER4_ANSI	0x021a
+ #define USB_DEVICE_ID_APPLE_GEYSER4_ISO	0x021b
+ #define USB_DEVICE_ID_APPLE_GEYSER4_JIS	0x021c
++#define USB_DEVICE_ID_APPLE_ALU_ANSI	0x0220
++#define USB_DEVICE_ID_APPLE_ALU_ISO	0x0221
++#define USB_DEVICE_ID_APPLE_ALU_JIS	0x0222
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY	0x030a
+ #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY	0x030b
+ #define USB_DEVICE_ID_APPLE_IRCONTROL4	0x8242
+@@ -94,6 +101,9 @@
+ #define USB_DEVICE_ID_CODEMERCS_IOW_FIRST	0x1500
+ #define USB_DEVICE_ID_CODEMERCS_IOW_LAST	0x15ff
  
- /**
-- *	bus_attach_device - add device to bus
-- *	@dev:	device tried to attach to a driver
-+ * bus_attach_device - add device to bus
-+ * @dev: device tried to attach to a driver
-  *
-- *	- Add device to bus's list of devices.
-- *	- Try to attach to driver.
-+ * - Add device to bus's list of devices.
-+ * - Try to attach to driver.
-  */
--void bus_attach_device(struct device * dev)
-+void bus_attach_device(struct device *dev)
- {
- 	struct bus_type *bus = dev->bus;
- 	int ret = 0;
++#define USB_VENDOR_ID_CYGNAL		0x10c4
++#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X	0x818a
++
+ #define USB_VENDOR_ID_CYPRESS		0x04b4
+ #define USB_DEVICE_ID_CYPRESS_MOUSE	0x0001
+ #define USB_DEVICE_ID_CYPRESS_HIDCOM	0x5500
+@@ -114,6 +124,9 @@
+ #define USB_VENDOR_ID_ESSENTIAL_REALITY	0x0d7f
+ #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
  
- 	if (bus) {
- 		dev->is_registered = 1;
--		if (bus->drivers_autoprobe)
-+		if (bus->p->drivers_autoprobe)
- 			ret = device_attach(dev);
- 		WARN_ON(ret < 0);
- 		if (ret >= 0)
--			klist_add_tail(&dev->knode_bus, &bus->klist_devices);
-+			klist_add_tail(&dev->knode_bus, &bus->p->klist_devices);
- 		else
- 			dev->is_registered = 0;
- 	}
- }
++#define USB_VENDOR_ID_EZKEY 		0x0518
++#define USB_DEVICE_ID_BTC_8193		0x0002
++
+ #define USB_VENDOR_ID_GAMERON		0x0810
+ #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR	0x0001
  
- /**
-- *	bus_remove_device - remove device from bus
-- *	@dev:	device to be removed
-+ * bus_remove_device - remove device from bus
-+ * @dev: device to be removed
-  *
-- *	- Remove symlink from bus's directory.
-- *	- Delete device from bus's list.
-- *	- Detach from its driver.
-- *	- Drop reference taken in bus_add_device().
-+ * - Remove symlink from bus's directory.
-+ * - Delete device from bus's list.
-+ * - Detach from its driver.
-+ * - Drop reference taken in bus_add_device().
-  */
--void bus_remove_device(struct device * dev)
-+void bus_remove_device(struct device *dev)
- {
- 	if (dev->bus) {
- 		sysfs_remove_link(&dev->kobj, "subsystem");
- 		remove_deprecated_bus_links(dev);
--		sysfs_remove_link(&dev->bus->devices.kobj, dev->bus_id);
-+		sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
-+				  dev->bus_id);
- 		device_remove_attrs(dev->bus, dev);
- 		if (dev->is_registered) {
- 			dev->is_registered = 0;
- 			klist_del(&dev->knode_bus);
- 		}
--		pr_debug("bus %s: remove device %s\n", dev->bus->name, dev->bus_id);
-+		pr_debug("bus: '%s': remove device %s\n",
-+			 dev->bus->name, dev->bus_id);
- 		device_release_driver(dev);
- 		bus_put(dev->bus);
- 	}
- }
+@@ -134,6 +147,9 @@
+ #define USB_DEVICE_ID_GOGOPEN		0x00ce
+ #define USB_DEVICE_ID_PENPOWER		0x00f4
  
--static int driver_add_attrs(struct bus_type * bus, struct device_driver * drv)
-+static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
- {
- 	int error = 0;
- 	int i;
-@@ -533,19 +536,19 @@ static int driver_add_attrs(struct bus_type * bus, struct device_driver * drv)
- 		for (i = 0; attr_name(bus->drv_attrs[i]); i++) {
- 			error = driver_create_file(drv, &bus->drv_attrs[i]);
- 			if (error)
--				goto Err;
-+				goto err;
- 		}
- 	}
-- Done:
-+done:
- 	return error;
-- Err:
-+err:
- 	while (--i >= 0)
- 		driver_remove_file(drv, &bus->drv_attrs[i]);
--	goto Done;
-+	goto done;
- }
++#define USB_VENDOR_ID_GRETAGMACBETH	0x0971
++#define USB_DEVICE_ID_GRETAGMACBETH_HUEY	0x2005
++
+ #define USB_VENDOR_ID_GRIFFIN		0x077d
+ #define USB_DEVICE_ID_POWERMATE		0x0410
+ #define USB_DEVICE_ID_SOUNDKNOB		0x04AA
+@@ -278,7 +294,9 @@
+ #define USB_DEVICE_ID_LOGITECH_HARMONY_62 0xc14d
+ #define USB_DEVICE_ID_LOGITECH_HARMONY_63 0xc14e
+ #define USB_DEVICE_ID_LOGITECH_HARMONY_64 0xc14f
++#define USB_DEVICE_ID_LOGITECH_EXTREME_3D	0xc215
+ #define USB_DEVICE_ID_LOGITECH_WHEEL	0xc294
++#define USB_DEVICE_ID_LOGITECH_ELITE_KBD	0xc30a
+ #define USB_DEVICE_ID_LOGITECH_KBD	0xc311
+ #define USB_DEVICE_ID_S510_RECEIVER	0xc50c
+ #define USB_DEVICE_ID_S510_RECEIVER_2	0xc517
+@@ -296,6 +314,12 @@
  
--
--static void driver_remove_attrs(struct bus_type * bus, struct device_driver * drv)
-+static void driver_remove_attrs(struct bus_type *bus,
-+				struct device_driver *drv)
- {
- 	int i;
+ #define USB_VENDOR_ID_MICROSOFT		0x045e
+ #define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
++#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
++#define USB_DEVICE_ID_MS_NE4K		0x00db
++#define USB_DEVICE_ID_MS_LK6K		0x00f9
++
++#define USB_VENDOR_ID_MONTEREY		0x0566
++#define USB_DEVICE_ID_GENIUS_KB29E	0x3004
  
-@@ -616,39 +619,46 @@ static ssize_t driver_uevent_store(struct device_driver *drv,
- 	enum kobject_action action;
+ #define USB_VENDOR_ID_NCR		0x0404
+ #define USB_DEVICE_ID_NCR_FIRST		0x0300
+@@ -324,6 +348,9 @@
+ #define USB_VENDOR_ID_SAITEK		0x06a3
+ #define USB_DEVICE_ID_SAITEK_RUMBLEPAD	0xff17
  
- 	if (kobject_action_type(buf, count, &action) == 0)
--		kobject_uevent(&drv->kobj, action);
-+		kobject_uevent(&drv->p->kobj, action);
- 	return count;
- }
- static DRIVER_ATTR(uevent, S_IWUSR, NULL, driver_uevent_store);
++#define USB_VENDOR_ID_SAMSUNG		0x0419
++#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001
++
+ #define USB_VENDOR_ID_SONY			0x054c
+ #define USB_DEVICE_ID_SONY_PS3_CONTROLLER	0x0268
  
- /**
-- *	bus_add_driver - Add a driver to the bus.
-- *	@drv:	driver.
-- *
-+ * bus_add_driver - Add a driver to the bus.
-+ * @drv: driver.
-  */
- int bus_add_driver(struct device_driver *drv)
- {
--	struct bus_type * bus = bus_get(drv->bus);
-+	struct bus_type *bus;
-+	struct driver_private *priv;
- 	int error = 0;
+@@ -368,6 +395,7 @@ static const struct hid_blacklist {
+ } hid_blacklist[] = {
  
-+	bus = bus_get(drv->bus);
- 	if (!bus)
- 		return -EINVAL;
+ 	{ USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU, HID_QUIRK_2WHEEL_MOUSE_HACK_7 },
++	{ USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D, HID_QUIRK_2WHEEL_MOUSE_HACK_B8 },
+ 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE, HID_QUIRK_2WHEEL_MOUSE_HACK_5 },
  
--	pr_debug("bus %s: add driver %s\n", bus->name, drv->name);
--	error = kobject_set_name(&drv->kobj, "%s", drv->name);
--	if (error)
--		goto out_put_bus;
--	drv->kobj.kset = &bus->drivers;
--	error = kobject_register(&drv->kobj);
-+	pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name);
-+
-+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-+	if (!priv)
-+		return -ENOMEM;
+ 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS },
+@@ -390,6 +418,9 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT },
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV, HID_QUIRK_HIDINPUT },
+ 
++	{ USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193, HID_QUIRK_HWHEEL_WHEEL_INVERT },
 +
-+	klist_init(&priv->klist_devices, NULL, NULL);
-+	priv->driver = drv;
-+	drv->p = priv;
-+	priv->kobj.kset = bus->p->drivers_kset;
-+	error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
-+				     "%s", drv->name);
- 	if (error)
- 		goto out_put_bus;
++	{ USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20, HID_QUIRK_IGNORE },
+@@ -402,6 +433,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM, HID_QUIRK_IGNORE},
+ 	{ USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_CIDC, 0x0103, HID_QUIRK_IGNORE },
++	{ USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE },
+@@ -423,6 +455,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER, HID_QUIRK_IGNORE },
++	{ USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90, HID_QUIRK_IGNORE },
+@@ -516,14 +549,18 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR, HID_QUIRK_IGNORE },
+ 	{ USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302, HID_QUIRK_IGNORE },
  
--	if (drv->bus->drivers_autoprobe) {
-+	if (drv->bus->p->drivers_autoprobe) {
- 		error = driver_attach(drv);
- 		if (error)
- 			goto out_unregister;
- 	}
--	klist_add_tail(&drv->knode_bus, &bus->klist_drivers);
-+	klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
- 	module_add_driver(drv->owner, drv);
++	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
+ 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
  
- 	error = driver_create_file(drv, &driver_attr_uevent);
-@@ -669,24 +679,24 @@ int bus_add_driver(struct device_driver *drv)
- 			__FUNCTION__, drv->name);
- 	}
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS },
++
+ 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE, HID_QUIRK_MIGHTYMOUSE | HID_QUIRK_INVERT_HWHEEL },
  
-+	kobject_uevent(&priv->kobj, KOBJ_ADD);
- 	return error;
- out_unregister:
--	kobject_unregister(&drv->kobj);
-+	kobject_put(&priv->kobj);
- out_put_bus:
- 	bus_put(bus);
- 	return error;
- }
+ 	{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ 	{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
  
- /**
-- *	bus_remove_driver - delete driver from bus's knowledge.
-- *	@drv:	driver.
-+ * bus_remove_driver - delete driver from bus's knowledge.
-+ * @drv: driver.
-  *
-- *	Detach the driver from the devices it controls, and remove
-- *	it from its bus's list of drivers. Finally, we drop the reference
-- *	to the bus we took in bus_add_driver().
-+ * Detach the driver from the devices it controls, and remove
-+ * it from its bus's list of drivers. Finally, we drop the reference
-+ * to the bus we took in bus_add_driver().
-  */
--
--void bus_remove_driver(struct device_driver * drv)
-+void bus_remove_driver(struct device_driver *drv)
- {
- 	if (!drv->bus)
- 		return;
-@@ -694,18 +704,17 @@ void bus_remove_driver(struct device_driver * drv)
- 	remove_bind_files(drv);
- 	driver_remove_attrs(drv->bus, drv);
- 	driver_remove_file(drv, &driver_attr_uevent);
--	klist_remove(&drv->knode_bus);
--	pr_debug("bus %s: remove driver %s\n", drv->bus->name, drv->name);
-+	klist_remove(&drv->p->knode_bus);
-+	pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
- 	driver_detach(drv);
- 	module_remove_driver(drv);
--	kobject_unregister(&drv->kobj);
-+	kobject_put(&drv->p->kobj);
- 	bus_put(drv->bus);
- }
+-	{ USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER, HID_QUIRK_SONY_PS3_CONTROLLER },
++	{ USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER, HID_QUIRK_SONY_PS3_CONTROLLER | HID_QUIRK_HIDDEV },
  
--
- /* Helper for bus_rescan_devices's iter */
- static int __must_check bus_rescan_devices_helper(struct device *dev,
--						void *data)
-+						  void *data)
- {
- 	int ret = 0;
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
+@@ -531,7 +568,9 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
+@@ -540,19 +579,22 @@ static const struct hid_blacklist {
  
-@@ -727,10 +736,11 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
-  * attached and rescan it against existing drivers to see if it matches
-  * any by calling device_attach() for the unbound devices.
-  */
--int bus_rescan_devices(struct bus_type * bus)
-+int bus_rescan_devices(struct bus_type *bus)
- {
- 	return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
- }
-+EXPORT_SYMBOL_GPL(bus_rescan_devices);
+ 	{ USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
  
- /**
-  * device_reprobe - remove driver for a device and probe for a new driver
-@@ -755,55 +765,55 @@ int device_reprobe(struct device *dev)
- EXPORT_SYMBOL_GPL(device_reprobe);
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+-	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_APPLE_ISO_KEYBOARD},
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_APPLE_ISO_KEYBOARD},
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_APPLE_ISO_KEYBOARD},
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI, HID_QUIRK_APPLE_HAS_FN },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS, HID_QUIRK_APPLE_HAS_FN },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
++	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
  
- /**
-- *	find_bus - locate bus by name.
-- *	@name:	name of bus.
-+ * find_bus - locate bus by name.
-+ * @name: name of bus.
-  *
-- *	Call kset_find_obj() to iterate over list of buses to
-- *	find a bus by name. Return bus if found.
-+ * Call kset_find_obj() to iterate over list of buses to
-+ * find a bus by name. Return bus if found.
-  *
-- *	Note that kset_find_obj increments bus' reference count.
-+ * Note that kset_find_obj increments bus' reference count.
-  */
- #if 0
--struct bus_type * find_bus(char * name)
-+struct bus_type *find_bus(char *name)
- {
--	struct kobject * k = kset_find_obj(&bus_subsys.kset, name);
-+	struct kobject *k = kset_find_obj(bus_kset, name);
- 	return k ? to_bus(k) : NULL;
- }
- #endif  /*  0  */
+ 	{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS },
+ 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD, HID_QUIRK_RESET_LEDS },
+@@ -638,10 +680,14 @@ static const struct hid_rdesc_blacklist {
+ 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
+ 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_RDESC_LOGITECH },
+ 
++	{ USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E, HID_QUIRK_RDESC_BUTTON_CONSUMER },
++
+ 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_RDESC_MACBOOK_JIS },
  
+ 	{ USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_RDESC_PETALYNX },
  
- /**
-- *	bus_add_attrs - Add default attributes for this bus.
-- *	@bus:	Bus that has just been registered.
-+ * bus_add_attrs - Add default attributes for this bus.
-+ * @bus: Bus that has just been registered.
-  */
++	{ USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_RDESC_SAMSUNG_REMOTE },
++
+ 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
+ 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
  
--static int bus_add_attrs(struct bus_type * bus)
-+static int bus_add_attrs(struct bus_type *bus)
- {
- 	int error = 0;
- 	int i;
+@@ -884,6 +930,8 @@ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
+ 	return quirks;
+ }
  
- 	if (bus->bus_attrs) {
- 		for (i = 0; attr_name(bus->bus_attrs[i]); i++) {
--			error = bus_create_file(bus,&bus->bus_attrs[i]);
-+			error = bus_create_file(bus, &bus->bus_attrs[i]);
- 			if (error)
--				goto Err;
-+				goto err;
- 		}
++EXPORT_SYMBOL_GPL(usbhid_lookup_quirk);
++
+ /*
+  * Cherry Cymotion keyboard have an invalid HID report descriptor,
+  * that needs fixing before we can parse it.
+@@ -914,6 +962,33 @@ static void usbhid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize)
  	}
-- Done:
-+done:
- 	return error;
-- Err:
-+err:
- 	while (--i >= 0)
--		bus_remove_file(bus,&bus->bus_attrs[i]);
--	goto Done;
-+		bus_remove_file(bus, &bus->bus_attrs[i]);
-+	goto done;
  }
  
--static void bus_remove_attrs(struct bus_type * bus)
-+static void bus_remove_attrs(struct bus_type *bus)
++/*
++ * Samsung IrDA remote controller (reports as Cypress USB Mouse).
++ *
++ * Vendor specific report #4 has a size of 48 bit,
++ * and therefore is not accepted when inspecting the descriptors.
++ * As a workaround we reinterpret the report as:
++ *   Variable type, count 6, size 8 bit, log. maximum 255
++ * The burden to reconstruct the data is moved into user space.
++ */
++static void usbhid_fixup_samsung_irda_descriptor(unsigned char *rdesc,
++						  int rsize)
++{
++	if (rsize >= 182 && rdesc[175] == 0x25
++			 && rdesc[176] == 0x40
++			 && rdesc[177] == 0x75
++			 && rdesc[178] == 0x30
++			 && rdesc[179] == 0x95
++			 && rdesc[180] == 0x01
++			 && rdesc[182] == 0x40) {
++		printk(KERN_INFO "Fixing up Samsung IrDA report descriptor\n");
++		rdesc[176] = 0xff;
++		rdesc[178] = 0x08;
++		rdesc[180] = 0x06;
++		rdesc[182] = 0x42;
++	}
++}
++
+ /* Petalynx Maxter Remote has maximum for consumer page set too low */
+ static void usbhid_fixup_petalynx_descriptor(unsigned char *rdesc, int rsize)
  {
- 	int i;
- 
- 	if (bus->bus_attrs) {
- 		for (i = 0; attr_name(bus->bus_attrs[i]); i++)
--			bus_remove_file(bus,&bus->bus_attrs[i]);
-+			bus_remove_file(bus, &bus->bus_attrs[i]);
+@@ -965,6 +1040,14 @@ static void usbhid_fixup_macbook_descriptor(unsigned char *rdesc, int rsize)
  	}
  }
  
-@@ -827,32 +837,42 @@ static ssize_t bus_uevent_store(struct bus_type *bus,
- 	enum kobject_action action;
++static void usbhid_fixup_button_consumer_descriptor(unsigned char *rdesc, int rsize)
++{
++	if (rsize >= 30 && rdesc[29] == 0x05
++			&& rdesc[30] == 0x09) {
++		printk(KERN_INFO "Fixing up button/consumer in HID report descriptor\n");
++		rdesc[30] = 0x0c;
++	}
++}
  
- 	if (kobject_action_type(buf, count, &action) == 0)
--		kobject_uevent(&bus->subsys.kobj, action);
-+		kobject_uevent(&bus->p->subsys.kobj, action);
- 	return count;
+ static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned rsize)
+ {
+@@ -982,6 +1065,13 @@ static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned
+ 
+ 	if (quirks & HID_QUIRK_RDESC_MACBOOK_JIS)
+ 		usbhid_fixup_macbook_descriptor(rdesc, rsize);
++
++	if (quirks & HID_QUIRK_RDESC_BUTTON_CONSUMER)
++		usbhid_fixup_button_consumer_descriptor(rdesc, rsize);
++
++	if (quirks & HID_QUIRK_RDESC_SAMSUNG_REMOTE)
++		usbhid_fixup_samsung_irda_descriptor(rdesc, rsize);
++
  }
- static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
  
  /**
-- *	bus_register - register a bus with the system.
-- *	@bus:	bus.
-+ * bus_register - register a bus with the system.
-+ * @bus: bus.
-  *
-- *	Once we have that, we registered the bus with the kobject
-- *	infrastructure, then register the children subsystems it has:
-- *	the devices and drivers that belong to the bus.
-+ * Once we have that, we registered the bus with the kobject
-+ * infrastructure, then register the children subsystems it has:
-+ * the devices and drivers that belong to the bus.
-  */
--int bus_register(struct bus_type * bus)
-+int bus_register(struct bus_type *bus)
+diff --git a/drivers/hid/usbhid/hid-tmff.c b/drivers/hid/usbhid/hid-tmff.c
+index 69882a7..144578b 100644
+--- a/drivers/hid/usbhid/hid-tmff.c
++++ b/drivers/hid/usbhid/hid-tmff.c
+@@ -137,7 +137,8 @@ static int hid_tmff_play(struct input_dev *dev, void *data, struct ff_effect *ef
+ int hid_tmff_init(struct hid_device *hid)
  {
- 	int retval;
-+	struct bus_type_private *priv;
-+
-+	priv = kzalloc(sizeof(struct bus_type_private), GFP_KERNEL);
-+	if (!priv)
-+		return -ENOMEM;
- 
--	BLOCKING_INIT_NOTIFIER_HEAD(&bus->bus_notifier);
-+	priv->bus = bus;
-+	bus->p = priv;
+ 	struct tmff_device *tmff;
+-	struct list_head *pos;
++	struct hid_report *report;
++	struct list_head *report_list;
+ 	struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ 	struct input_dev *input_dev = hidinput->input;
+ 	const signed short *ff_bits = ff_joystick;
+@@ -149,8 +150,8 @@ int hid_tmff_init(struct hid_device *hid)
+ 		return -ENOMEM;
  
--	retval = kobject_set_name(&bus->subsys.kobj, "%s", bus->name);
-+	BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
-+
-+	retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name);
- 	if (retval)
- 		goto out;
+ 	/* Find the report to use */
+-	list_for_each(pos, &hid->report_enum[HID_OUTPUT_REPORT].report_list) {
+-		struct hid_report *report = (struct hid_report *)pos;
++	report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
++	list_for_each_entry(report, report_list, list) {
+ 		int fieldnum;
  
--	bus->subsys.kobj.kset = &bus_subsys;
-+	priv->subsys.kobj.kset = bus_kset;
-+	priv->subsys.kobj.ktype = &bus_ktype;
-+	priv->drivers_autoprobe = 1;
+ 		for (fieldnum = 0; fieldnum < report->maxfield; ++fieldnum) {
+diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
+index 775a1ef..5d9dbb4 100644
+--- a/drivers/hid/usbhid/usbkbd.c
++++ b/drivers/hid/usbhid/usbkbd.c
+@@ -235,6 +235,14 @@ static int usb_kbd_probe(struct usb_interface *iface,
+ 	if (!usb_endpoint_is_int_in(endpoint))
+ 		return -ENODEV;
  
--	retval = subsystem_register(&bus->subsys);
-+	retval = kset_register(&priv->subsys);
- 	if (retval)
- 		goto out;
++#ifdef CONFIG_USB_HID
++	if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
++				le16_to_cpu(dev->descriptor.idProduct))
++			& HID_QUIRK_IGNORE) {
++		return -ENODEV;
++	}
++#endif
++
+ 	pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
+ 	maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
  
-@@ -860,23 +880,23 @@ int bus_register(struct bus_type * bus)
- 	if (retval)
- 		goto bus_uevent_fail;
+diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
+index f8ad691..df0d96d 100644
+--- a/drivers/hid/usbhid/usbmouse.c
++++ b/drivers/hid/usbhid/usbmouse.c
+@@ -131,6 +131,14 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
+ 	if (!usb_endpoint_is_int_in(endpoint))
+ 		return -ENODEV;
  
--	kobject_set_name(&bus->devices.kobj, "devices");
--	bus->devices.kobj.parent = &bus->subsys.kobj;
--	retval = kset_register(&bus->devices);
--	if (retval)
-+	priv->devices_kset = kset_create_and_add("devices", NULL,
-+						 &priv->subsys.kobj);
-+	if (!priv->devices_kset) {
-+		retval = -ENOMEM;
- 		goto bus_devices_fail;
++#ifdef CONFIG_USB_HID
++	if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
++				le16_to_cpu(dev->descriptor.idProduct))
++			& (HID_QUIRK_IGNORE|HID_QUIRK_IGNORE_MOUSE)) {
++		return -ENODEV;
 +	}
++#endif
++
+ 	pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
+ 	maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
  
--	kobject_set_name(&bus->drivers.kobj, "drivers");
--	bus->drivers.kobj.parent = &bus->subsys.kobj;
--	bus->drivers.ktype = &driver_ktype;
--	retval = kset_register(&bus->drivers);
--	if (retval)
-+	priv->drivers_kset = kset_create_and_add("drivers", NULL,
-+						 &priv->subsys.kobj);
-+	if (!priv->drivers_kset) {
-+		retval = -ENOMEM;
- 		goto bus_drivers_fail;
-+	}
+diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
+index a37cb6b..3581282 100644
+--- a/drivers/i2c/algos/i2c-algo-bit.c
++++ b/drivers/i2c/algos/i2c-algo-bit.c
+@@ -1,7 +1,7 @@
+-/* ------------------------------------------------------------------------- */
+-/* i2c-algo-bit.c i2c driver algorithms for bit-shift adapters		     */
+-/* ------------------------------------------------------------------------- */
+-/*   Copyright (C) 1995-2000 Simon G. Vogl
++/* -------------------------------------------------------------------------
++ * i2c-algo-bit.c i2c driver algorithms for bit-shift adapters
++ * -------------------------------------------------------------------------
++ *   Copyright (C) 1995-2000 Simon G. Vogl
  
--	klist_init(&bus->klist_devices, klist_devices_get, klist_devices_put);
--	klist_init(&bus->klist_drivers, NULL, NULL);
-+	klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
-+	klist_init(&priv->klist_drivers, NULL, NULL);
+     This program is free software; you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+@@ -15,8 +15,8 @@
  
--	bus->drivers_autoprobe = 1;
- 	retval = add_probe_files(bus);
- 	if (retval)
- 		goto bus_probe_files_fail;
-@@ -885,66 +905,73 @@ int bus_register(struct bus_type * bus)
- 	if (retval)
- 		goto bus_attrs_fail;
+     You should have received a copy of the GNU General Public License
+     along with this program; if not, write to the Free Software
+-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.		     */
+-/* ------------------------------------------------------------------------- */
++    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ * ------------------------------------------------------------------------- */
  
--	pr_debug("bus type '%s' registered\n", bus->name);
-+	pr_debug("bus: '%s': registered\n", bus->name);
- 	return 0;
+ /* With some changes from Frodo Looijaard <frodol at dds.nl>, Kyösti Mälkki
+    <kmalkki at cc.hut.fi> and Jean Delvare <khali at linux-fr.org> */
+@@ -60,26 +60,26 @@ MODULE_PARM_DESC(i2c_debug,
  
- bus_attrs_fail:
- 	remove_probe_files(bus);
- bus_probe_files_fail:
--	kset_unregister(&bus->drivers);
-+	kset_unregister(bus->p->drivers_kset);
- bus_drivers_fail:
--	kset_unregister(&bus->devices);
-+	kset_unregister(bus->p->devices_kset);
- bus_devices_fail:
- 	bus_remove_file(bus, &bus_attr_uevent);
- bus_uevent_fail:
--	subsystem_unregister(&bus->subsys);
-+	kset_unregister(&bus->p->subsys);
-+	kfree(bus->p);
- out:
- 	return retval;
- }
-+EXPORT_SYMBOL_GPL(bus_register);
+ /* --- setting states on the bus with the right timing: ---------------	*/
  
- /**
-- *	bus_unregister - remove a bus from the system
-- *	@bus:	bus.
-+ * bus_unregister - remove a bus from the system
-+ * @bus: bus.
-  *
-- *	Unregister the child subsystems and the bus itself.
-- *	Finally, we call bus_put() to release the refcount
-+ * Unregister the child subsystems and the bus itself.
-+ * Finally, we call bus_put() to release the refcount
-  */
--void bus_unregister(struct bus_type * bus)
-+void bus_unregister(struct bus_type *bus)
+-#define setsda(adap,val) adap->setsda(adap->data, val)
+-#define setscl(adap,val) adap->setscl(adap->data, val)
+-#define getsda(adap) adap->getsda(adap->data)
+-#define getscl(adap) adap->getscl(adap->data)
++#define setsda(adap, val)	adap->setsda(adap->data, val)
++#define setscl(adap, val)	adap->setscl(adap->data, val)
++#define getsda(adap)		adap->getsda(adap->data)
++#define getscl(adap)		adap->getscl(adap->data)
+ 
+ static inline void sdalo(struct i2c_algo_bit_data *adap)
  {
--	pr_debug("bus %s: unregistering\n", bus->name);
-+	pr_debug("bus: '%s': unregistering\n", bus->name);
- 	bus_remove_attrs(bus);
- 	remove_probe_files(bus);
--	kset_unregister(&bus->drivers);
--	kset_unregister(&bus->devices);
-+	kset_unregister(bus->p->drivers_kset);
-+	kset_unregister(bus->p->devices_kset);
- 	bus_remove_file(bus, &bus_attr_uevent);
--	subsystem_unregister(&bus->subsys);
-+	kset_unregister(&bus->p->subsys);
-+	kfree(bus->p);
+-	setsda(adap,0);
++	setsda(adap, 0);
+ 	udelay((adap->udelay + 1) / 2);
  }
-+EXPORT_SYMBOL_GPL(bus_unregister);
  
- int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb)
+ static inline void sdahi(struct i2c_algo_bit_data *adap)
  {
--	return blocking_notifier_chain_register(&bus->bus_notifier, nb);
-+	return blocking_notifier_chain_register(&bus->p->bus_notifier, nb);
+-	setsda(adap,1);
++	setsda(adap, 1);
+ 	udelay((adap->udelay + 1) / 2);
  }
- EXPORT_SYMBOL_GPL(bus_register_notifier);
  
- int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb)
+ static inline void scllo(struct i2c_algo_bit_data *adap)
  {
--	return blocking_notifier_chain_unregister(&bus->bus_notifier, nb);
-+	return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb);
+-	setscl(adap,0);
++	setscl(adap, 0);
+ 	udelay(adap->udelay / 2);
  }
- EXPORT_SYMBOL_GPL(bus_unregister_notifier);
  
--int __init buses_init(void)
-+struct kset *bus_get_kset(struct bus_type *bus)
+@@ -91,22 +91,21 @@ static int sclhi(struct i2c_algo_bit_data *adap)
  {
--	return subsystem_register(&bus_subsys);
-+	return &bus->p->subsys;
- }
-+EXPORT_SYMBOL_GPL(bus_get_kset);
+ 	unsigned long start;
  
-+struct klist *bus_get_device_klist(struct bus_type *bus)
-+{
-+	return &bus->p->klist_devices;
-+}
-+EXPORT_SYMBOL_GPL(bus_get_device_klist);
+-	setscl(adap,1);
++	setscl(adap, 1);
  
--EXPORT_SYMBOL_GPL(bus_for_each_dev);
--EXPORT_SYMBOL_GPL(bus_find_device);
--EXPORT_SYMBOL_GPL(bus_for_each_drv);
--
--EXPORT_SYMBOL_GPL(bus_register);
--EXPORT_SYMBOL_GPL(bus_unregister);
--EXPORT_SYMBOL_GPL(bus_rescan_devices);
--
--EXPORT_SYMBOL_GPL(bus_create_file);
--EXPORT_SYMBOL_GPL(bus_remove_file);
-+int __init buses_init(void)
-+{
-+	bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
-+	if (!bus_kset)
-+		return -ENOMEM;
-+	return 0;
+ 	/* Not all adapters have scl sense line... */
+ 	if (!adap->getscl)
+ 		goto done;
+ 
+-	start=jiffies;
+-	while (! getscl(adap) ) {	
+- 		/* the hw knows how to read the clock line,
+- 		 * so we wait until it actually gets high.
+- 		 * This is safer as some chips may hold it low
+- 		 * while they are processing data internally. 
+- 		 */
+-		if (time_after_eq(jiffies, start+adap->timeout)) {
++	start = jiffies;
++	while (!getscl(adap)) {
++		/* This hw knows how to read the clock line, so we wait
++		 * until it actually gets high.  This is safer as some
++		 * chips may hold it low ("clock stretching") while they
++		 * are processing data internally.
++		 */
++		if (time_after_eq(jiffies, start + adap->timeout))
+ 			return -ETIMEDOUT;
+-		}
+ 		cond_resched();
+ 	}
+ #ifdef DEBUG
+@@ -118,11 +117,11 @@ static int sclhi(struct i2c_algo_bit_data *adap)
+ done:
+ 	udelay(adap->udelay);
+ 	return 0;
+-} 
 +}
-diff --git a/drivers/base/class.c b/drivers/base/class.c
-index a863bb0..59cf358 100644
---- a/drivers/base/class.c
-+++ b/drivers/base/class.c
-@@ -17,16 +17,17 @@
- #include <linux/kdev_t.h>
- #include <linux/err.h>
- #include <linux/slab.h>
-+#include <linux/genhd.h>
- #include "base.h"
  
- #define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
- #define to_class(obj) container_of(obj, struct class, subsys.kobj)
  
--static ssize_t
--class_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
-+static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
-+			       char *buf)
+ /* --- other auxiliary functions --------------------------------------	*/
+-static void i2c_start(struct i2c_algo_bit_data *adap) 
++static void i2c_start(struct i2c_algo_bit_data *adap)
  {
--	struct class_attribute * class_attr = to_class_attr(attr);
--	struct class * dc = to_class(kobj);
-+	struct class_attribute *class_attr = to_class_attr(attr);
-+	struct class *dc = to_class(kobj);
- 	ssize_t ret = -EIO;
+ 	/* assert: scl, sda are high */
+ 	setsda(adap, 0);
+@@ -130,7 +129,7 @@ static void i2c_start(struct i2c_algo_bit_data *adap)
+ 	scllo(adap);
+ }
  
- 	if (class_attr->show)
-@@ -34,12 +35,11 @@ class_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
- 	return ret;
+-static void i2c_repstart(struct i2c_algo_bit_data *adap) 
++static void i2c_repstart(struct i2c_algo_bit_data *adap)
+ {
+ 	/* assert: scl is low */
+ 	sdahi(adap);
+@@ -141,18 +140,18 @@ static void i2c_repstart(struct i2c_algo_bit_data *adap)
  }
  
--static ssize_t
--class_attr_store(struct kobject * kobj, struct attribute * attr,
--		 const char * buf, size_t count)
-+static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
-+				const char *buf, size_t count)
+ 
+-static void i2c_stop(struct i2c_algo_bit_data *adap) 
++static void i2c_stop(struct i2c_algo_bit_data *adap)
  {
--	struct class_attribute * class_attr = to_class_attr(attr);
--	struct class * dc = to_class(kobj);
-+	struct class_attribute *class_attr = to_class_attr(attr);
-+	struct class *dc = to_class(kobj);
- 	ssize_t ret = -EIO;
+ 	/* assert: scl is low */
+ 	sdalo(adap);
+-	sclhi(adap); 
++	sclhi(adap);
+ 	setsda(adap, 1);
+ 	udelay(adap->udelay);
+ }
  
- 	if (class_attr->store)
-@@ -47,7 +47,7 @@ class_attr_store(struct kobject * kobj, struct attribute * attr,
- 	return ret;
+ 
+ 
+-/* send a byte without start cond., look for arbitration, 
++/* send a byte without start cond., look for arbitration,
+    check ackn. from slave */
+ /* returns:
+  * 1 if the device acknowledged
+@@ -167,27 +166,33 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
+ 	struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+ 
+ 	/* assert: scl is low */
+-	for ( i=7 ; i>=0 ; i-- ) {
++	for (i = 7; i >= 0; i--) {
+ 		sb = (c >> i) & 1;
+-		setsda(adap,sb);
++		setsda(adap, sb);
+ 		udelay((adap->udelay + 1) / 2);
+-		if (sclhi(adap)<0) { /* timed out */
++		if (sclhi(adap) < 0) { /* timed out */
+ 			bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
+ 				"timeout at bit #%d\n", (int)c, i);
+ 			return -ETIMEDOUT;
+-		};
+-		/* do arbitration here: 
+-		 * if ( sb && ! getsda(adap) ) -> ouch! Get out of here.
++		}
++		/* FIXME do arbitration here:
++		 * if (sb && !getsda(adap)) -> ouch! Get out of here.
++		 *
++		 * Report a unique code, so higher level code can retry
++		 * the whole (combined) message and *NOT* issue STOP.
+ 		 */
+ 		scllo(adap);
+ 	}
+ 	sdahi(adap);
+-	if (sclhi(adap)<0){ /* timeout */
++	if (sclhi(adap) < 0) { /* timeout */
+ 		bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
+ 			"timeout at ack\n", (int)c);
+ 		return -ETIMEDOUT;
+-	};
+-	/* read ack: SDA should be pulled down by slave */
++	}
++
++	/* read ack: SDA should be pulled down by slave, or it may
++	 * NAK (usually to report problems with the data we wrote).
++	 */
+ 	ack = !getsda(adap);    /* ack: sda is pulled low -> success */
+ 	bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c,
+ 		ack ? "A" : "NA");
+@@ -198,24 +203,24 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
  }
  
--static void class_release(struct kobject * kobj)
-+static void class_release(struct kobject *kobj)
+ 
+-static int i2c_inb(struct i2c_adapter *i2c_adap) 
++static int i2c_inb(struct i2c_adapter *i2c_adap)
  {
- 	struct class *class = to_class(kobj);
+ 	/* read byte via i2c port, without start/stop sequence	*/
+ 	/* acknowledge is sent in i2c_read.			*/
+ 	int i;
+-	unsigned char indata=0;
++	unsigned char indata = 0;
+ 	struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
  
-@@ -71,20 +71,20 @@ static struct kobj_type class_ktype = {
- };
+ 	/* assert: scl is low */
+ 	sdahi(adap);
+-	for (i=0;i<8;i++) {
+-		if (sclhi(adap)<0) { /* timeout */
++	for (i = 0; i < 8; i++) {
++		if (sclhi(adap) < 0) { /* timeout */
+ 			bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit "
+ 				"#%d\n", 7 - i);
+ 			return -ETIMEDOUT;
+-		};
++		}
+ 		indata *= 2;
+-		if ( getsda(adap) ) 
++		if (getsda(adap))
+ 			indata |= 0x01;
+ 		setscl(adap, 0);
+ 		udelay(i == 7 ? adap->udelay / 2 : adap->udelay);
+@@ -228,66 +233,67 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
+  * Sanity check for the adapter hardware - check the reaction of
+  * the bus lines only if it seems to be idle.
+  */
+-static int test_bus(struct i2c_algo_bit_data *adap, char* name) {
+-	int scl,sda;
++static int test_bus(struct i2c_algo_bit_data *adap, char *name)
++{
++	int scl, sda;
  
- /* Hotplug events for classes go to the class_obj subsys */
--static decl_subsys(class, &class_ktype, NULL);
-+static struct kset *class_kset;
+-	if (adap->getscl==NULL)
++	if (adap->getscl == NULL)
+ 		pr_info("%s: Testing SDA only, SCL is not readable\n", name);
  
+-	sda=getsda(adap);
+-	scl=(adap->getscl==NULL?1:getscl(adap));
+-	if (!scl || !sda ) {
++	sda = getsda(adap);
++	scl = (adap->getscl == NULL) ? 1 : getscl(adap);
++	if (!scl || !sda) {
+ 		printk(KERN_WARNING "%s: bus seems to be busy\n", name);
+ 		goto bailout;
+ 	}
  
--int class_create_file(struct class * cls, const struct class_attribute * attr)
-+int class_create_file(struct class *cls, const struct class_attribute *attr)
- {
- 	int error;
--	if (cls) {
-+	if (cls)
- 		error = sysfs_create_file(&cls->subsys.kobj, &attr->attr);
--	} else
-+	else
- 		error = -EINVAL;
- 	return error;
- }
+ 	sdalo(adap);
+-	sda=getsda(adap);
+-	scl=(adap->getscl==NULL?1:getscl(adap));
+-	if ( 0 != sda ) {
++	sda = getsda(adap);
++	scl = (adap->getscl == NULL) ? 1 : getscl(adap);
++	if (sda) {
+ 		printk(KERN_WARNING "%s: SDA stuck high!\n", name);
+ 		goto bailout;
+ 	}
+-	if ( 0 == scl ) {
++	if (!scl) {
+ 		printk(KERN_WARNING "%s: SCL unexpected low "
+ 		       "while pulling SDA low!\n", name);
+ 		goto bailout;
+-	}		
++	}
  
--void class_remove_file(struct class * cls, const struct class_attribute * attr)
-+void class_remove_file(struct class *cls, const struct class_attribute *attr)
+ 	sdahi(adap);
+-	sda=getsda(adap);
+-	scl=(adap->getscl==NULL?1:getscl(adap));
+-	if ( 0 == sda ) {
++	sda = getsda(adap);
++	scl = (adap->getscl == NULL) ? 1 : getscl(adap);
++	if (!sda) {
+ 		printk(KERN_WARNING "%s: SDA stuck low!\n", name);
+ 		goto bailout;
+ 	}
+-	if ( 0 == scl ) {
++	if (!scl) {
+ 		printk(KERN_WARNING "%s: SCL unexpected low "
+ 		       "while pulling SDA high!\n", name);
+ 		goto bailout;
+ 	}
+ 
+ 	scllo(adap);
+-	sda=getsda(adap);
+-	scl=(adap->getscl==NULL?0:getscl(adap));
+-	if ( 0 != scl ) {
++	sda = getsda(adap);
++	scl = (adap->getscl == NULL) ? 0 : getscl(adap);
++	if (scl) {
+ 		printk(KERN_WARNING "%s: SCL stuck high!\n", name);
+ 		goto bailout;
+ 	}
+-	if ( 0 == sda ) {
++	if (!sda) {
+ 		printk(KERN_WARNING "%s: SDA unexpected low "
+ 		       "while pulling SCL low!\n", name);
+ 		goto bailout;
+ 	}
+-	
++
+ 	sclhi(adap);
+-	sda=getsda(adap);
+-	scl=(adap->getscl==NULL?1:getscl(adap));
+-	if ( 0 == scl ) {
++	sda = getsda(adap);
++	scl = (adap->getscl == NULL) ? 1 : getscl(adap);
++	if (!scl) {
+ 		printk(KERN_WARNING "%s: SCL stuck low!\n", name);
+ 		goto bailout;
+ 	}
+-	if ( 0 == sda ) {
++	if (!sda) {
+ 		printk(KERN_WARNING "%s: SDA unexpected low "
+ 		       "while pulling SCL high!\n", name);
+ 		goto bailout;
+@@ -314,9 +320,10 @@ static int try_address(struct i2c_adapter *i2c_adap,
+ 		       unsigned char addr, int retries)
  {
- 	if (cls)
- 		sysfs_remove_file(&cls->subsys.kobj, &attr->attr);
-@@ -93,48 +93,48 @@ void class_remove_file(struct class * cls, const struct class_attribute * attr)
- static struct class *class_get(struct class *cls)
+ 	struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+-	int i,ret = -1;
+-	for (i=0;i<=retries;i++) {
+-		ret = i2c_outb(i2c_adap,addr);
++	int i, ret = -1;
++
++	for (i = 0; i <= retries; i++) {
++		ret = i2c_outb(i2c_adap, addr);
+ 		if (ret == 1 || i == retries)
+ 			break;
+ 		bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n");
+@@ -338,20 +345,38 @@ static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
  {
- 	if (cls)
--		return container_of(kset_get(&cls->subsys), struct class, subsys);
-+		return container_of(kset_get(&cls->subsys),
-+				    struct class, subsys);
- 	return NULL;
- }
+ 	const unsigned char *temp = msg->buf;
+ 	int count = msg->len;
+-	unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK; 
++	unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK;
+ 	int retval;
+-	int wrcount=0;
++	int wrcount = 0;
  
--static void class_put(struct class * cls)
-+static void class_put(struct class *cls)
+ 	while (count > 0) {
+ 		retval = i2c_outb(i2c_adap, *temp);
+-		if ((retval>0) || (nak_ok && (retval==0)))  { /* ok or ignored NAK */
+-			count--; 
++
++		/* OK/ACK; or ignored NAK */
++		if ((retval > 0) || (nak_ok && (retval == 0))) {
++			count--;
+ 			temp++;
+ 			wrcount++;
+-		} else { /* arbitration or no acknowledge */
+-			dev_err(&i2c_adap->dev, "sendbytes: error - bailout.\n");
+-			return (retval<0)? retval : -EFAULT;
+-			        /* got a better one ?? */
++
++		/* A slave NAKing the master means the slave didn't like
++		 * something about the data it saw.  For example, maybe
++		 * the SMBus PEC was wrong.
++		 */
++		} else if (retval == 0) {
++			dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n");
++			return -EIO;
++
++		/* Timeout; or (someday) lost arbitration
++		 *
++		 * FIXME Lost ARB implies retrying the transaction from
++		 * the first message, after the "winning" master issues
++		 * its STOP.  As a rule, upper layer code has no reason
++		 * to know or care about this ... it is *NOT* an error.
++		 */
++		} else {
++			dev_err(&i2c_adap->dev, "sendbytes: error %d\n",
++					retval);
++			return retval;
+ 		}
+ 	}
+ 	return wrcount;
+@@ -376,14 +401,14 @@ static int acknak(struct i2c_adapter *i2c_adap, int is_ack)
+ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
  {
- 	if (cls)
- 		kset_put(&cls->subsys);
- }
+ 	int inval;
+-	int rdcount=0;   	/* counts bytes read */
++	int rdcount = 0;	/* counts bytes read */
+ 	unsigned char *temp = msg->buf;
+ 	int count = msg->len;
+ 	const unsigned flags = msg->flags;
  
--
--static int add_class_attrs(struct class * cls)
-+static int add_class_attrs(struct class *cls)
+ 	while (count > 0) {
+ 		inval = i2c_inb(i2c_adap);
+-		if (inval>=0) {
++		if (inval >= 0) {
+ 			*temp = inval;
+ 			rdcount++;
+ 		} else {   /* read timed out */
+@@ -431,7 +456,7 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
+  * returns:
+  *  0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set
+  * -x an error occurred (like: -EREMOTEIO if the device did not answer, or
+- *	-ETIMEDOUT, for example if the lines are stuck...) 
++ *	-ETIMEDOUT, for example if the lines are stuck...)
+  */
+ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
  {
- 	int i;
- 	int error = 0;
+@@ -443,10 +468,10 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
+ 	int ret, retries;
  
- 	if (cls->class_attrs) {
- 		for (i = 0; attr_name(cls->class_attrs[i]); i++) {
--			error = class_create_file(cls,&cls->class_attrs[i]);
-+			error = class_create_file(cls, &cls->class_attrs[i]);
- 			if (error)
--				goto Err;
-+				goto error;
+ 	retries = nak_ok ? 0 : i2c_adap->retries;
+-	
+-	if ( (flags & I2C_M_TEN)  ) { 
++
++	if (flags & I2C_M_TEN) {
+ 		/* a ten bit address */
+-		addr = 0xf0 | (( msg->addr >> 7) & 0x03);
++		addr = 0xf0 | ((msg->addr >> 7) & 0x03);
+ 		bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
+ 		/* try extended address code...*/
+ 		ret = try_address(i2c_adap, addr, retries);
+@@ -456,33 +481,33 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
+ 			return -EREMOTEIO;
  		}
+ 		/* the remaining 8 bit address */
+-		ret = i2c_outb(i2c_adap,msg->addr & 0x7f);
++		ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
+ 		if ((ret != 1) && !nak_ok) {
+ 			/* the chip did not ack / xmission error occurred */
+ 			dev_err(&i2c_adap->dev, "died at 2nd address code\n");
+ 			return -EREMOTEIO;
+ 		}
+-		if ( flags & I2C_M_RD ) {
++		if (flags & I2C_M_RD) {
+ 			bit_dbg(3, &i2c_adap->dev, "emitting repeated "
+ 				"start condition\n");
+ 			i2c_repstart(adap);
+ 			/* okay, now switch into reading mode */
+ 			addr |= 0x01;
+ 			ret = try_address(i2c_adap, addr, retries);
+-			if ((ret!=1) && !nak_ok) {
++			if ((ret != 1) && !nak_ok) {
+ 				dev_err(&i2c_adap->dev,
+ 					"died at repeated address code\n");
+ 				return -EREMOTEIO;
+ 			}
+ 		}
+ 	} else {		/* normal 7bit address	*/
+-		addr = ( msg->addr << 1 );
+-		if (flags & I2C_M_RD )
++		addr = msg->addr << 1;
++		if (flags & I2C_M_RD)
+ 			addr |= 1;
+-		if (flags & I2C_M_REV_DIR_ADDR )
++		if (flags & I2C_M_REV_DIR_ADDR)
+ 			addr ^= 1;
+ 		ret = try_address(i2c_adap, addr, retries);
+-		if ((ret!=1) && !nak_ok)
++		if ((ret != 1) && !nak_ok)
+ 			return -EREMOTEIO;
  	}
-- Done:
-+done:
- 	return error;
-- Err:
-+error:
- 	while (--i >= 0)
--		class_remove_file(cls,&cls->class_attrs[i]);
--	goto Done;
-+		class_remove_file(cls, &cls->class_attrs[i]);
-+	goto done;
- }
  
--static void remove_class_attrs(struct class * cls)
-+static void remove_class_attrs(struct class *cls)
+@@ -494,15 +519,14 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
  {
- 	int i;
+ 	struct i2c_msg *pmsg;
+ 	struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+-	
+-	int i,ret;
++	int i, ret;
+ 	unsigned short nak_ok;
  
- 	if (cls->class_attrs) {
- 		for (i = 0; attr_name(cls->class_attrs[i]); i++)
--			class_remove_file(cls,&cls->class_attrs[i]);
-+			class_remove_file(cls, &cls->class_attrs[i]);
- 	}
- }
+ 	bit_dbg(3, &i2c_adap->dev, "emitting start condition\n");
+ 	i2c_start(adap);
+-	for (i=0;i<num;i++) {
++	for (i = 0; i < num; i++) {
+ 		pmsg = &msgs[i];
+-		nak_ok = pmsg->flags & I2C_M_IGNORE_NAK; 
++		nak_ok = pmsg->flags & I2C_M_IGNORE_NAK;
+ 		if (!(pmsg->flags & I2C_M_NOSTART)) {
+ 			if (i) {
+ 				bit_dbg(3, &i2c_adap->dev, "emitting "
+@@ -517,7 +541,7 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
+ 				goto bailout;
+ 			}
+ 		}
+-		if (pmsg->flags & I2C_M_RD ) {
++		if (pmsg->flags & I2C_M_RD) {
+ 			/* read bytes into buffer*/
+ 			ret = readbytes(i2c_adap, pmsg);
+ 			if (ret >= 1)
+@@ -551,7 +575,7 @@ bailout:
  
--int class_register(struct class * cls)
-+int class_register(struct class *cls)
+ static u32 bit_func(struct i2c_adapter *adap)
  {
- 	int error;
+-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 
++	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ 	       I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
+@@ -565,8 +589,8 @@ static const struct i2c_algorithm i2c_bit_algo = {
+ 	.functionality	= bit_func,
+ };
  
-@@ -149,9 +149,16 @@ int class_register(struct class * cls)
- 	if (error)
- 		return error;
+-/* 
+- * registering functions to load algorithms at runtime 
++/*
++ * registering functions to load algorithms at runtime
+  */
+ static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
+ {
+@@ -574,7 +598,7 @@ static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
  
--	cls->subsys.kobj.kset = &class_subsys;
-+#ifdef CONFIG_SYSFS_DEPRECATED
-+	/* let the block class directory show up in the root of sysfs */
-+	if (cls != &block_class)
-+		cls->subsys.kobj.kset = class_kset;
-+#else
-+	cls->subsys.kobj.kset = class_kset;
-+#endif
-+	cls->subsys.kobj.ktype = &class_ktype;
+ 	if (bit_test) {
+ 		int ret = test_bus(bit_adap, adap->name);
+-		if (ret<0)
++		if (ret < 0)
+ 			return -ENODEV;
+ 	}
  
--	error = subsystem_register(&cls->subsys);
-+	error = kset_register(&cls->subsys);
- 	if (!error) {
- 		error = add_class_attrs(class_get(cls));
- 		class_put(cls);
-@@ -159,11 +166,11 @@ int class_register(struct class * cls)
- 	return error;
- }
+diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
+index ab2e6f3..8907b01 100644
+--- a/drivers/i2c/algos/i2c-algo-pcf.c
++++ b/drivers/i2c/algos/i2c-algo-pcf.c
+@@ -203,35 +203,6 @@ static int pcf_init_8584 (struct i2c_algo_pcf_data *adap)
+ /* ----- Utility functions
+  */
  
--void class_unregister(struct class * cls)
-+void class_unregister(struct class *cls)
+-static inline int try_address(struct i2c_algo_pcf_data *adap,
+-		       unsigned char addr, int retries)
+-{
+-	int i, status, ret = -1;
+-	int wfp;
+-	for (i=0;i<retries;i++) {
+-		i2c_outb(adap, addr);
+-		i2c_start(adap);
+-		status = get_pcf(adap, 1);
+-		if ((wfp = wait_for_pin(adap, &status)) >= 0) {
+-			if ((status & I2C_PCF_LRB) == 0) { 
+-				i2c_stop(adap);
+-				break;	/* success! */
+-			}
+-		}
+-		if (wfp == -EINTR) {
+-			/* arbitration lost */
+-			udelay(adap->udelay);
+-			return -EINTR;
+-		}
+-		i2c_stop(adap);
+-		udelay(adap->udelay);
+-	}
+-	DEB2(if (i) printk(KERN_DEBUG "i2c-algo-pcf.o: needed %d retries for %d\n",i,
+-	                   addr));
+-	return ret;
+-}
+-
+-
+ static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf,
+                          int count, int last)
  {
- 	pr_debug("device class '%s': unregistering\n", cls->name);
- 	remove_class_attrs(cls);
--	subsystem_unregister(&cls->subsys);
-+	kset_unregister(&cls->subsys);
+@@ -321,47 +292,19 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf,
  }
  
- static void class_create_release(struct class *cls)
-@@ -241,8 +248,8 @@ void class_destroy(struct class *cls)
- 
- /* Class Device Stuff */
  
--int class_device_create_file(struct class_device * class_dev,
--			     const struct class_device_attribute * attr)
-+int class_device_create_file(struct class_device *class_dev,
-+			     const struct class_device_attribute *attr)
+-static inline int pcf_doAddress(struct i2c_algo_pcf_data *adap,
+-                                struct i2c_msg *msg, int retries) 
++static int pcf_doAddress(struct i2c_algo_pcf_data *adap,
++			 struct i2c_msg *msg)
  {
- 	int error = -EINVAL;
- 	if (class_dev)
-@@ -250,8 +257,8 @@ int class_device_create_file(struct class_device * class_dev,
- 	return error;
+ 	unsigned short flags = msg->flags;
+ 	unsigned char addr;
+-	int ret;
+-	if ( (flags & I2C_M_TEN)  ) { 
+-		/* a ten bit address */
+-		addr = 0xf0 | (( msg->addr >> 7) & 0x03);
+-		DEB2(printk(KERN_DEBUG "addr0: %d\n",addr));
+-		/* try extended address code...*/
+-		ret = try_address(adap, addr, retries);
+-		if (ret!=1) {
+-			printk(KERN_ERR "died at extended address code.\n");
+-			return -EREMOTEIO;
+-		}
+-		/* the remaining 8 bit address */
+-		i2c_outb(adap,msg->addr & 0x7f);
+-/* Status check comes here */
+-		if (ret != 1) {
+-			printk(KERN_ERR "died at 2nd address code.\n");
+-			return -EREMOTEIO;
+-		}
+-		if ( flags & I2C_M_RD ) {
+-			i2c_repstart(adap);
+-			/* okay, now switch into reading mode */
+-			addr |= 0x01;
+-			ret = try_address(adap, addr, retries);
+-			if (ret!=1) {
+-				printk(KERN_ERR "died at extended address code.\n");
+-				return -EREMOTEIO;
+-			}
+-		}
+-	} else {		/* normal 7bit address	*/
+-		addr = ( msg->addr << 1 );
+-		if (flags & I2C_M_RD )
+-			addr |= 1;
+-		if (flags & I2C_M_REV_DIR_ADDR )
+-			addr ^= 1;
+-		i2c_outb(adap, addr);
+-	}
++
++	addr = msg->addr << 1;
++	if (flags & I2C_M_RD)
++		addr |= 1;
++	if (flags & I2C_M_REV_DIR_ADDR)
++		addr ^= 1;
++	i2c_outb(adap, addr);
++
+ 	return 0;
  }
  
--void class_device_remove_file(struct class_device * class_dev,
--			      const struct class_device_attribute * attr)
-+void class_device_remove_file(struct class_device *class_dev,
-+			      const struct class_device_attribute *attr)
+@@ -390,7 +333,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
+ 		     pmsg->flags & I2C_M_RD ? "read" : "write",
+                      pmsg->len, pmsg->addr, i + 1, num);)
+     
+-		ret = pcf_doAddress(adap, pmsg, i2c_adap->retries);
++		ret = pcf_doAddress(adap, pmsg);
+ 
+ 		/* Send START */
+ 		if (i == 0) {
+@@ -453,7 +396,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
+ static u32 pcf_func(struct i2c_adapter *adap)
  {
- 	if (class_dev)
- 		sysfs_remove_file(&class_dev->kobj, &attr->attr);
-@@ -273,12 +280,11 @@ void class_device_remove_bin_file(struct class_device *class_dev,
- 		sysfs_remove_bin_file(&class_dev->kobj, attr);
+ 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 
+-	       I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; 
++	       I2C_FUNC_PROTOCOL_MANGLING;
  }
  
--static ssize_t
--class_device_attr_show(struct kobject * kobj, struct attribute * attr,
--		       char * buf)
-+static ssize_t class_device_attr_show(struct kobject *kobj,
-+				      struct attribute *attr, char *buf)
- {
--	struct class_device_attribute * class_dev_attr = to_class_dev_attr(attr);
--	struct class_device * cd = to_class_dev(kobj);
-+	struct class_device_attribute *class_dev_attr = to_class_dev_attr(attr);
-+	struct class_device *cd = to_class_dev(kobj);
- 	ssize_t ret = 0;
+ /* -----exported algorithm data: -------------------------------------	*/
+@@ -475,9 +418,7 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap)
  
- 	if (class_dev_attr->show)
-@@ -286,12 +292,12 @@ class_device_attr_show(struct kobject * kobj, struct attribute * attr,
- 	return ret;
- }
+ 	/* register new adapter to i2c module... */
+ 	adap->algo = &pcf_algo;
+-
+-	adap->timeout = 100;		/* default values, should	*/
+-	adap->retries = 3;		/* be replaced by defines	*/
++	adap->timeout = 100;
  
--static ssize_t
--class_device_attr_store(struct kobject * kobj, struct attribute * attr,
--			const char * buf, size_t count)
-+static ssize_t class_device_attr_store(struct kobject *kobj,
-+				       struct attribute *attr,
-+				       const char *buf, size_t count)
- {
--	struct class_device_attribute * class_dev_attr = to_class_dev_attr(attr);
--	struct class_device * cd = to_class_dev(kobj);
-+	struct class_device_attribute *class_dev_attr = to_class_dev_attr(attr);
-+	struct class_device *cd = to_class_dev(kobj);
- 	ssize_t ret = 0;
+ 	if ((rval = pcf_init_8584(pcf_adap)))
+ 		return rval;
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index c466c6c..b61f56b 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -182,7 +182,8 @@ config I2C_I801
+ 	  will be called i2c-i801.
  
- 	if (class_dev_attr->store)
-@@ -304,10 +310,10 @@ static struct sysfs_ops class_dev_sysfs_ops = {
- 	.store	= class_device_attr_store,
- };
+ config I2C_I810
+-	tristate "Intel 810/815"
++	tristate "Intel 810/815 (DEPRECATED)"
++	default n
+ 	depends on PCI
+ 	select I2C_ALGOBIT
+ 	help
+@@ -195,6 +196,8 @@ config I2C_I810
+ 	    i815
+ 	    i845G
  
--static void class_dev_release(struct kobject * kobj)
-+static void class_dev_release(struct kobject *kobj)
- {
- 	struct class_device *cd = to_class_dev(kobj);
--	struct class * cls = cd->class;
-+	struct class *cls = cd->class;
++	  This driver is deprecated in favor of the i810fb and intelfb drivers.
++
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called i2c-i810.
  
- 	pr_debug("device class '%s': release.\n", cd->class_id);
+@@ -259,20 +262,6 @@ config I2C_IOP3XX
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called i2c-iop3xx.
  
-@@ -316,8 +322,8 @@ static void class_dev_release(struct kobject * kobj)
- 	else if (cls->release)
- 		cls->release(cd);
- 	else {
--		printk(KERN_ERR "Class Device '%s' does not have a release() function, "
--			"it is broken and must be fixed.\n",
-+		printk(KERN_ERR "Class Device '%s' does not have a release() "
-+			"function, it is broken and must be fixed.\n",
- 			cd->class_id);
- 		WARN_ON(1);
- 	}
-@@ -428,7 +434,8 @@ static int class_uevent(struct kset *kset, struct kobject *kobj,
- 			add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
+-config I2C_IXP4XX
+-	tristate "IXP4xx GPIO-Based I2C Interface (DEPRECATED)"
+-	depends on ARCH_IXP4XX
+-	select I2C_ALGOBIT
+-	help
+-	  Say Y here if you have an Intel IXP4xx(420,421,422,425) based 
+-	  system and are using GPIO lines for an I2C bus.
+-
+-	  This support is also available as a module. If so, the module
+-	  will be called i2c-ixp4xx.
+-
+-	  This driver is deprecated and will be dropped soon. Use i2c-gpio
+-	  instead.
+-
+ config I2C_IXP2000
+ 	tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)"
+ 	depends on ARCH_IXP2000
+@@ -396,7 +385,8 @@ config I2C_PASEMI
+ 	  Supports the PA Semi PWRficient on-chip SMBus interfaces.
  
- 		if (dev->driver)
--			add_uevent_var(env, "PHYSDEVDRIVER=%s", dev->driver->name);
-+			add_uevent_var(env, "PHYSDEVDRIVER=%s",
-+				       dev->driver->name);
- 	}
+ config I2C_PROSAVAGE
+-	tristate "S3/VIA (Pro)Savage"
++	tristate "S3/VIA (Pro)Savage (DEPRECATED)"
++	default n
+ 	depends on PCI
+ 	select I2C_ALGOBIT
+ 	help
+@@ -407,6 +397,8 @@ config I2C_PROSAVAGE
+ 	    S3/VIA KM266/VT8375 aka ProSavage8
+ 	    S3/VIA KM133/VT8365 aka Savage4
  
- 	if (class_dev->uevent) {
-@@ -452,43 +459,49 @@ static struct kset_uevent_ops class_uevent_ops = {
- 	.uevent =	class_uevent,
++	  This driver is deprecated in favor of the savagefb driver.
++
+ 	  This support is also available as a module.  If so, the module 
+ 	  will be called i2c-prosavage.
+ 
+@@ -418,13 +410,16 @@ config I2C_S3C2410
+ 	  Samsung S3C2410 based System-on-Chip devices.
+ 
+ config I2C_SAVAGE4
+-	tristate "S3 Savage 4"
+-	depends on PCI && EXPERIMENTAL
++	tristate "S3 Savage 4 (DEPRECATED)"
++	default n
++	depends on PCI
+ 	select I2C_ALGOBIT
+ 	help
+ 	  If you say yes to this option, support will be included for the 
+ 	  S3 Savage 4 I2C interface.
+ 
++	  This driver is deprecated in favor of the savagefb driver.
++
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called i2c-savage4.
+ 
+@@ -611,7 +606,7 @@ config I2C_VIAPRO
+ 	    VT8231
+ 	    VT8233/A
+ 	    VT8235
+-	    VT8237R/A
++	    VT8237R/A/S
+ 	    VT8251
+ 	    CX700
+ 
+@@ -648,7 +643,7 @@ config I2C_PCA_ISA
+ 
+ config I2C_MV64XXX
+ 	tristate "Marvell mv64xxx I2C Controller"
+-	depends on MV64X60 && EXPERIMENTAL
++	depends on (MV64X60 || ARCH_ORION) && EXPERIMENTAL
+ 	help
+ 	  If you say yes to this option, support will be included for the
+ 	  built-in I2C interface on the Marvell 64xxx line of host bridges.
+diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
+index 81d43c2..ea7068f 100644
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -20,7 +20,6 @@ obj-$(CONFIG_I2C_I810)		+= i2c-i810.o
+ obj-$(CONFIG_I2C_IBM_IIC)	+= i2c-ibm_iic.o
+ obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
+ obj-$(CONFIG_I2C_IXP2000)	+= i2c-ixp2000.o
+-obj-$(CONFIG_I2C_IXP4XX)	+= i2c-ixp4xx.o
+ obj-$(CONFIG_I2C_POWERMAC)	+= i2c-powermac.o
+ obj-$(CONFIG_I2C_MPC)		+= i2c-mpc.o
+ obj-$(CONFIG_I2C_MV64XXX)	+= i2c-mv64xxx.o
+diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
+index 7490dc1..573abe4 100644
+--- a/drivers/i2c/busses/i2c-amd756.c
++++ b/drivers/i2c/busses/i2c-amd756.c
+@@ -334,6 +334,10 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
+ 	int error;
+ 	u8 temp;
+ 	
++	/* driver_data might come from user-space, so check it */
++	if (id->driver_data > ARRAY_SIZE(chipname))
++		return -EINVAL;
++
+ 	if (amd756_ioport) {
+ 		dev_err(&pdev->dev, "Only one device supported "
+ 		       "(you have a strange motherboard, btw)\n");
+@@ -405,6 +409,7 @@ static struct pci_driver amd756_driver = {
+ 	.id_table	= amd756_ids,
+ 	.probe		= amd756_probe,
+ 	.remove		= __devexit_p(amd756_remove),
++	.dynids.use_driver_data = 1,
  };
  
--static decl_subsys(class_obj, &class_device_ktype, &class_uevent_ops);
--
-+/*
-+ * DO NOT copy how this is created, kset_create_and_add() should be
-+ * called, but this is a hold-over from the old-way and will be deleted
-+ * entirely soon.
-+ */
-+static struct kset class_obj_subsys = {
-+	.uevent_ops = &class_uevent_ops,
+ static int __init amd756_init(void)
+diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
+index 2f68416..1953b26 100644
+--- a/drivers/i2c/busses/i2c-au1550.c
++++ b/drivers/i2c/busses/i2c-au1550.c
+@@ -30,14 +30,22 @@
+ #include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/platform_device.h>
+ #include <linux/init.h>
+ #include <linux/errno.h>
+ #include <linux/i2c.h>
++#include <linux/slab.h>
+ 
+ #include <asm/mach-au1x00/au1xxx.h>
+ #include <asm/mach-au1x00/au1xxx_psc.h>
+ 
+-#include "i2c-au1550.h"
++struct i2c_au1550_data {
++	u32	psc_base;
++	int	xfer_timeout;
++	int	ack_timeout;
++	struct i2c_adapter adap;
++	struct resource *ioarea;
 +};
  
--static int class_device_add_attrs(struct class_device * cd)
-+static int class_device_add_attrs(struct class_device *cd)
+ static int
+ wait_xfer_done(struct i2c_au1550_data *adap)
+@@ -105,7 +113,7 @@ wait_master_done(struct i2c_au1550_data *adap)
+ }
+ 
+ static int
+-do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd)
++do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q)
  {
- 	int i;
- 	int error = 0;
--	struct class * cls = cd->class;
-+	struct class *cls = cd->class;
+ 	volatile psc_smb_t	*sp;
+ 	u32			stat;
+@@ -134,6 +142,10 @@ do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd)
+ 	if (rd)
+ 		addr |= 1;
  
- 	if (cls->class_dev_attrs) {
- 		for (i = 0; attr_name(cls->class_dev_attrs[i]); i++) {
- 			error = class_device_create_file(cd,
--							 &cls->class_dev_attrs[i]);
-+						&cls->class_dev_attrs[i]);
- 			if (error)
--				goto Err;
-+				goto err;
- 		}
- 	}
-- Done:
-+done:
- 	return error;
-- Err:
-+err:
- 	while (--i >= 0)
--		class_device_remove_file(cd,&cls->class_dev_attrs[i]);
--	goto Done;
-+		class_device_remove_file(cd, &cls->class_dev_attrs[i]);
-+	goto done;
++	/* zero-byte xfers stop immediately */
++	if (q)
++		addr |= PSC_SMBTXRX_STP;
++
+ 	/* Put byte into fifo, start up master.
+ 	*/
+ 	sp->psc_smbtxrx = addr;
+@@ -142,7 +154,7 @@ do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd)
+ 	au_sync();
+ 	if (wait_ack(adap))
+ 		return -EIO;
+-	return 0;
++	return (q) ? wait_master_done(adap) : 0;
  }
  
--static void class_device_remove_attrs(struct class_device * cd)
-+static void class_device_remove_attrs(struct class_device *cd)
+ static u32
+@@ -262,7 +274,8 @@ au1550_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
+ 
+ 	for (i = 0; !err && i < num; i++) {
+ 		p = &msgs[i];
+-		err = do_address(adap, p->addr, p->flags & I2C_M_RD);
++		err = do_address(adap, p->addr, p->flags & I2C_M_RD,
++				 (p->len == 0));
+ 		if (err || !p->len)
+ 			continue;
+ 		if (p->flags & I2C_M_RD)
+@@ -294,18 +307,48 @@ static const struct i2c_algorithm au1550_algo = {
+  * Prior to calling us, the 50MHz clock frequency and routing
+  * must have been set up for the PSC indicated by the adapter.
+  */
+-int
+-i2c_au1550_add_bus(struct i2c_adapter *i2c_adap)
++static int __devinit
++i2c_au1550_probe(struct platform_device *pdev)
  {
- 	int i;
--	struct class * cls = cd->class;
-+	struct class *cls = cd->class;
+-	struct i2c_au1550_data *adap = i2c_adap->algo_data;
+-	volatile psc_smb_t	*sp;
+-	u32	stat;
++	struct i2c_au1550_data *priv;
++	volatile psc_smb_t *sp;
++	struct resource *r;
++	u32 stat;
++	int ret;
++
++	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!r) {
++		ret = -ENODEV;
++		goto out;
++	}
++
++	priv = kzalloc(sizeof(struct i2c_au1550_data), GFP_KERNEL);
++	if (!priv) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	priv->ioarea = request_mem_region(r->start, r->end - r->start + 1,
++					  pdev->name);
++	if (!priv->ioarea) {
++		ret = -EBUSY;
++		goto out_mem;
++	}
  
- 	if (cls->class_dev_attrs) {
- 		for (i = 0; attr_name(cls->class_dev_attrs[i]); i++)
--			class_device_remove_file(cd,&cls->class_dev_attrs[i]);
-+			class_device_remove_file(cd, &cls->class_dev_attrs[i]);
- 	}
+-	i2c_adap->algo = &au1550_algo;
++	priv->psc_base = r->start;
++	priv->xfer_timeout = 200;
++	priv->ack_timeout = 200;
++
++	priv->adap.id = I2C_HW_AU1550_PSC;
++	priv->adap.nr = pdev->id;
++	priv->adap.algo = &au1550_algo;
++	priv->adap.algo_data = priv;
++	priv->adap.dev.parent = &pdev->dev;
++	strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
+ 
+ 	/* Now, set up the PSC for SMBus PIO mode.
+ 	*/
+-	sp = (volatile psc_smb_t *)(adap->psc_base);
++	sp = (volatile psc_smb_t *)priv->psc_base;
+ 	sp->psc_ctrl = PSC_CTRL_DISABLE;
+ 	au_sync();
+ 	sp->psc_sel = PSC_SEL_PS_SMBUSMODE;
+@@ -343,87 +386,87 @@ i2c_au1550_add_bus(struct i2c_adapter *i2c_adap)
+ 		au_sync();
+ 	} while ((stat & PSC_SMBSTAT_DR) == 0);
+ 
+-	return i2c_add_adapter(i2c_adap);
+-}
++	ret = i2c_add_numbered_adapter(&priv->adap);
++	if (ret == 0) {
++		platform_set_drvdata(pdev, priv);
++		return 0;
++	}
+ 
++	/* disable the PSC */
++	sp->psc_smbcfg = 0;
++	sp->psc_ctrl = PSC_CTRL_DISABLE;
++	au_sync();
+ 
+-int
+-i2c_au1550_del_bus(struct i2c_adapter *adap)
++	release_resource(priv->ioarea);
++	kfree(priv->ioarea);
++out_mem:
++	kfree(priv);
++out:
++	return ret;
++}
++
++static int __devexit
++i2c_au1550_remove(struct platform_device *pdev)
+ {
+-	return i2c_del_adapter(adap);
++	struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
++	volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base;
++
++	platform_set_drvdata(pdev, NULL);
++	i2c_del_adapter(&priv->adap);
++	sp->psc_smbcfg = 0;
++	sp->psc_ctrl = PSC_CTRL_DISABLE;
++	au_sync();
++	release_resource(priv->ioarea);
++	kfree(priv->ioarea);
++	kfree(priv);
++	return 0;
  }
  
--static int class_device_add_groups(struct class_device * cd)
-+static int class_device_add_groups(struct class_device *cd)
+ static int
+-pb1550_reg(struct i2c_client *client)
++i2c_au1550_suspend(struct platform_device *pdev, pm_message_t state)
  {
- 	int i;
- 	int error = 0;
-@@ -498,7 +511,8 @@ static int class_device_add_groups(struct class_device * cd)
- 			error = sysfs_create_group(&cd->kobj, cd->groups[i]);
- 			if (error) {
- 				while (--i >= 0)
--					sysfs_remove_group(&cd->kobj, cd->groups[i]);
-+					sysfs_remove_group(&cd->kobj,
-+							   cd->groups[i]);
- 				goto out;
- 			}
- 		}
-@@ -507,14 +521,12 @@ out:
- 	return error;
++	struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
++	volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base;
++
++	sp->psc_ctrl = PSC_CTRL_SUSPEND;
++	au_sync();
+ 	return 0;
  }
  
--static void class_device_remove_groups(struct class_device * cd)
-+static void class_device_remove_groups(struct class_device *cd)
+ static int
+-pb1550_unreg(struct i2c_client *client)
++i2c_au1550_resume(struct platform_device *pdev)
  {
- 	int i;
--	if (cd->groups) {
--		for (i = 0; cd->groups[i]; i++) {
-+	if (cd->groups)
-+		for (i = 0; cd->groups[i]; i++)
- 			sysfs_remove_group(&cd->kobj, cd->groups[i]);
--		}
--	}
++	struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
++	volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base;
++
++	sp->psc_ctrl = PSC_CTRL_ENABLE;
++	au_sync();
++	while (!(sp->psc_smbstat & PSC_SMBSTAT_SR))
++		au_sync();
+ 	return 0;
  }
  
- static ssize_t show_dev(struct class_device *class_dev, char *buf)
-@@ -537,8 +549,8 @@ static struct class_device_attribute class_uevent_attr =
+-static struct i2c_au1550_data pb1550_i2c_info = {
+-	SMBUS_PSC_BASE, 200, 200
+-};
+-
+-static struct i2c_adapter pb1550_board_adapter = {
+-	name:              "pb1550 adapter",
+-	id:                I2C_HW_AU1550_PSC,
+-	algo:              NULL,
+-	algo_data:         &pb1550_i2c_info,
+-	client_register:   pb1550_reg,
+-	client_unregister: pb1550_unreg,
++static struct platform_driver au1xpsc_smbus_driver = {
++	.driver = {
++		.name	= "au1xpsc_smbus",
++		.owner	= THIS_MODULE,
++	},
++	.probe		= i2c_au1550_probe,
++	.remove		= __devexit_p(i2c_au1550_remove),
++	.suspend	= i2c_au1550_suspend,
++	.resume		= i2c_au1550_resume,
+ };
  
- void class_device_initialize(struct class_device *class_dev)
+-/* BIG hack to support the control interface on the Wolfson WM8731
+- * audio codec on the Pb1550 board.  We get an address and two data
+- * bytes to write, create an i2c message, and send it across the
+- * i2c transfer function.  We do this here because we have access to
+- * the i2c adapter structure.
+- */
+-static struct i2c_msg wm_i2c_msg;  /* We don't want this stuff on the stack */
+-static	u8 i2cbuf[2];
+-
+-int
+-pb1550_wm_codec_write(u8 addr, u8 reg, u8 val)
+-{
+-	wm_i2c_msg.addr = addr;
+-	wm_i2c_msg.flags = 0;
+-	wm_i2c_msg.buf = i2cbuf;
+-	wm_i2c_msg.len = 2;
+-	i2cbuf[0] = reg;
+-	i2cbuf[1] = val;
+-
+-	return pb1550_board_adapter.algo->master_xfer(&pb1550_board_adapter, &wm_i2c_msg, 1);
+-}
+-
+ static int __init
+ i2c_au1550_init(void)
  {
--	kobj_set_kset_s(class_dev, class_obj_subsys);
--	kobject_init(&class_dev->kobj);
-+	class_dev->kobj.kset = &class_obj_subsys;
-+	kobject_init(&class_dev->kobj, &class_device_ktype);
- 	INIT_LIST_HEAD(&class_dev->node);
+-	printk(KERN_INFO "Au1550 I2C: ");
+-
+-	/* This is where we would set up a 50MHz clock source
+-	 * and routing.  On the Pb1550, the SMBus is PSC2, which
+-	 * uses a shared clock with USB.  This has been already
+-	 * configured by Yamon as a 48MHz clock, close enough
+-	 * for our work.
+-	 */
+-        if (i2c_au1550_add_bus(&pb1550_board_adapter) < 0) {
+-		printk("failed to initialize.\n");
+-                return -ENODEV;
+-	}
+-
+-	printk("initialized.\n");
+-	return 0;
++	return platform_driver_register(&au1xpsc_smbus_driver);
  }
  
-@@ -566,16 +578,13 @@ int class_device_add(struct class_device *class_dev)
- 		 class_dev->class_id);
+ static void __exit
+ i2c_au1550_exit(void)
+ {
+-	i2c_au1550_del_bus(&pb1550_board_adapter);
++	platform_driver_unregister(&au1xpsc_smbus_driver);
+ }
  
- 	/* first, register with generic layer. */
--	error = kobject_set_name(&class_dev->kobj, "%s", class_dev->class_id);
--	if (error)
--		goto out2;
+ MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC.");
+diff --git a/drivers/i2c/busses/i2c-au1550.h b/drivers/i2c/busses/i2c-au1550.h
+deleted file mode 100644
+index fce15d1..0000000
+--- a/drivers/i2c/busses/i2c-au1550.h
++++ /dev/null
+@@ -1,32 +0,0 @@
+-/*
+- * Copyright (C) 2004 Embedded Edge, LLC <dan at embeddededge.com>
+- * 2.6 port by Matt Porter <mporter at kernel.crashing.org>
+- *
+- *  This program is free software; you can redistribute it and/or modify
+- *  it under the terms of the GNU General Public License as published by
+- *  the Free Software Foundation; either version 2 of the License, or
+- *  (at your option) any later version.
+- *
+- *  This program is distributed in the hope that it will be useful,
+- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- *  GNU General Public License for more details.
+- *
+- *  You should have received a copy of the GNU General Public License
+- *  along with this program; if not, write to the Free Software
+- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+- */
 -
- 	if (parent_class_dev)
- 		class_dev->kobj.parent = &parent_class_dev->kobj;
+-#ifndef I2C_AU1550_H
+-#define I2C_AU1550_H
+-
+-struct i2c_au1550_data {
+-	u32	psc_base;
+-	int	xfer_timeout;
+-	int	ack_timeout;
+-};
+-
+-int i2c_au1550_add_bus(struct i2c_adapter *);
+-int i2c_au1550_del_bus(struct i2c_adapter *);
+-
+-#endif /* I2C_AU1550_H */
+diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
+index 67224a4..7dbdaeb 100644
+--- a/drivers/i2c/busses/i2c-bfin-twi.c
++++ b/drivers/i2c/busses/i2c-bfin-twi.c
+@@ -550,6 +550,7 @@ static int i2c_bfin_twi_probe(struct platform_device *dev)
+ 
+ 	p_adap = &iface->adap;
+ 	p_adap->id = I2C_HW_BLACKFIN;
++	p_adap->nr = dev->id;
+ 	strlcpy(p_adap->name, dev->name, sizeof(p_adap->name));
+ 	p_adap->algo = &bfin_twi_algorithm;
+ 	p_adap->algo_data = iface;
+@@ -576,7 +577,7 @@ static int i2c_bfin_twi_probe(struct platform_device *dev)
+ 	bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA);
+ 	SSYNC();
+ 
+-	rc = i2c_add_adapter(p_adap);
++	rc = i2c_add_numbered_adapter(p_adap);
+ 	if (rc < 0)
+ 		free_irq(iface->irq, iface);
  	else
- 		class_dev->kobj.parent = &parent_class->subsys.kobj;
+diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
+index 6767988..cce5a61 100644
+--- a/drivers/i2c/busses/i2c-davinci.c
++++ b/drivers/i2c/busses/i2c-davinci.c
+@@ -510,7 +510,6 @@ static int davinci_i2c_probe(struct platform_device *pdev)
  
--	error = kobject_add(&class_dev->kobj);
-+	error = kobject_add(&class_dev->kobj, class_dev->kobj.parent,
-+			    "%s", class_dev->class_id);
- 	if (error)
- 		goto out2;
+ 	/* FIXME */
+ 	adap->timeout = 1;
+-	adap->retries = 1;
  
-@@ -642,7 +651,7 @@ int class_device_add(struct class_device *class_dev)
-  out3:
- 	kobject_del(&class_dev->kobj);
-  out2:
--	if(parent_class_dev)
-+	if (parent_class_dev)
- 		class_device_put(parent_class_dev);
- 	class_put(parent_class);
-  out1:
-@@ -659,9 +668,11 @@ int class_device_register(struct class_device *class_dev)
- /**
-  * class_device_create - creates a class device and registers it with sysfs
-  * @cls: pointer to the struct class that this device should be registered to.
-- * @parent: pointer to the parent struct class_device of this new device, if any.
-+ * @parent: pointer to the parent struct class_device of this new device, if
-+ * any.
-  * @devt: the dev_t for the char device to be added.
-- * @device: a pointer to a struct device that is assiociated with this class device.
-+ * @device: a pointer to a struct device that is assiociated with this class
-+ * device.
-  * @fmt: string for the class device's name
-  *
-  * This function can be used by char device classes.  A struct
-@@ -785,7 +796,7 @@ void class_device_destroy(struct class *cls, dev_t devt)
- 		class_device_unregister(class_dev);
- }
+ 	adap->nr = pdev->id;
+ 	r = i2c_add_numbered_adapter(adap);
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index ac27e5f..aa91579 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -4,6 +4,7 @@
+     Copyright (c) 1998 - 2002  Frodo Looijaard <frodol at dds.nl>,
+     Philip Edelbrock <phil at netroedge.com>, and Mark D. Studebaker
+     <mdsxyz123 at yahoo.com>
++    Copyright (C) 2007         Jean Delvare <khali at linux-fr.org>
  
--struct class_device * class_device_get(struct class_device *class_dev)
-+struct class_device *class_device_get(struct class_device *class_dev)
- {
- 	if (class_dev)
- 		return to_class_dev(kobject_get(&class_dev->kobj));
-@@ -798,6 +809,139 @@ void class_device_put(struct class_device *class_dev)
- 		kobject_put(&class_dev->kobj);
- }
+     This program is free software; you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+@@ -21,25 +22,34 @@
+ */
  
-+/**
-+ * class_for_each_device - device iterator
-+ * @class: the class we're iterating
-+ * @data: data for the callback
-+ * @fn: function to be called for each device
-+ *
-+ * Iterate over @class's list of devices, and call @fn for each,
-+ * passing it @data.
-+ *
-+ * We check the return of @fn each time. If it returns anything
-+ * other than 0, we break out and return that value.
-+ *
-+ * Note, we hold class->sem in this function, so it can not be
-+ * re-acquired in @fn, otherwise it will self-deadlocking. For
-+ * example, calls to add or remove class members would be verboten.
-+ */
-+int class_for_each_device(struct class *class, void *data,
-+			   int (*fn)(struct device *, void *))
-+{
-+	struct device *dev;
-+	int error = 0;
-+
-+	if (!class)
-+		return -EINVAL;
-+	down(&class->sem);
-+	list_for_each_entry(dev, &class->devices, node) {
-+		dev = get_device(dev);
-+		if (dev) {
-+			error = fn(dev, data);
-+			put_device(dev);
-+		} else
-+			error = -ENODEV;
-+		if (error)
-+			break;
-+	}
-+	up(&class->sem);
-+
-+	return error;
-+}
-+EXPORT_SYMBOL_GPL(class_for_each_device);
-+
-+/**
-+ * class_find_device - device iterator for locating a particular device
-+ * @class: the class we're iterating
-+ * @data: data for the match function
-+ * @match: function to check device
-+ *
-+ * This is similar to the class_for_each_dev() function above, but it
-+ * returns a reference to a device that is 'found' for later use, as
-+ * determined by the @match callback.
-+ *
-+ * The callback should return 0 if the device doesn't match and non-zero
-+ * if it does.  If the callback returns non-zero, this function will
-+ * return to the caller and not iterate over any more devices.
-+
-+ * Note, you will need to drop the reference with put_device() after use.
-+ *
-+ * We hold class->sem in this function, so it can not be
-+ * re-acquired in @match, otherwise it will self-deadlocking. For
-+ * example, calls to add or remove class members would be verboten.
-+ */
-+struct device *class_find_device(struct class *class, void *data,
-+				   int (*match)(struct device *, void *))
-+{
-+	struct device *dev;
-+	int found = 0;
-+
-+	if (!class)
-+		return NULL;
-+
-+	down(&class->sem);
-+	list_for_each_entry(dev, &class->devices, node) {
-+		dev = get_device(dev);
-+		if (dev) {
-+			if (match(dev, data)) {
-+				found = 1;
-+				break;
-+			} else
-+				put_device(dev);
-+		} else
-+			break;
-+	}
-+	up(&class->sem);
-+
-+	return found ? dev : NULL;
-+}
-+EXPORT_SYMBOL_GPL(class_find_device);
-+
-+/**
-+ * class_find_child - device iterator for locating a particular class_device
-+ * @class: the class we're iterating
-+ * @data: data for the match function
-+ * @match: function to check class_device
-+ *
-+ * This function returns a reference to a class_device that is 'found' for
-+ * later use, as determined by the @match callback.
-+ *
-+ * The callback should return 0 if the class_device doesn't match and non-zero
-+ * if it does.  If the callback returns non-zero, this function will
-+ * return to the caller and not iterate over any more class_devices.
-+ *
-+ * Note, you will need to drop the reference with class_device_put() after use.
-+ *
-+ * We hold class->sem in this function, so it can not be
-+ * re-acquired in @match, otherwise it will self-deadlocking. For
-+ * example, calls to add or remove class members would be verboten.
-+ */
-+struct class_device *class_find_child(struct class *class, void *data,
-+				   int (*match)(struct class_device *, void *))
-+{
-+	struct class_device *dev;
-+	int found = 0;
+ /*
+-    SUPPORTED DEVICES	PCI ID
+-    82801AA		2413
+-    82801AB		2423
+-    82801BA		2443
+-    82801CA/CAM		2483
+-    82801DB		24C3   (HW PEC supported)
+-    82801EB		24D3   (HW PEC supported)
+-    6300ESB		25A4
+-    ICH6		266A
+-    ICH7		27DA
+-    ESB2		269B
+-    ICH8		283E
+-    ICH9		2930
+-    Tolapai		5032
+-    This driver supports several versions of Intel's I/O Controller Hubs (ICH).
+-    For SMBus support, they are similar to the PIIX4 and are part
+-    of Intel's '810' and other chipsets.
+-    See the file Documentation/i2c/busses/i2c-i801 for details.
+-    I2C Block Read and Process Call are not supported.
++  Supports the following Intel I/O Controller Hubs (ICH):
 +
-+	if (!class)
-+		return NULL;
++                                  I/O                     Block   I2C
++                                  region  SMBus   Block   proc.   block
++  Chip name             PCI ID    size    PEC     buffer  call    read
++  ----------------------------------------------------------------------
++  82801AA  (ICH)        0x2413     16      no      no      no      no
++  82801AB  (ICH0)       0x2423     16      no      no      no      no
++  82801BA  (ICH2)       0x2443     16      no      no      no      no
++  82801CA  (ICH3)       0x2483     32     soft     no      no      no
++  82801DB  (ICH4)       0x24c3     32     hard     yes     no      no
++  82801E   (ICH5)       0x24d3     32     hard     yes     yes     yes
++  6300ESB               0x25a4     32     hard     yes     yes     yes
++  82801F   (ICH6)       0x266a     32     hard     yes     yes     yes
++  6310ESB/6320ESB       0x269b     32     hard     yes     yes     yes
++  82801G   (ICH7)       0x27da     32     hard     yes     yes     yes
++  82801H   (ICH8)       0x283e     32     hard     yes     yes     yes
++  82801I   (ICH9)       0x2930     32     hard     yes     yes     yes
++  Tolapai               0x5032     32     hard     yes     ?       ?
 +
-+	down(&class->sem);
-+	list_for_each_entry(dev, &class->children, node) {
-+		dev = class_device_get(dev);
-+		if (dev) {
-+			if (match(dev, data)) {
-+				found = 1;
-+				break;
-+			} else
-+				class_device_put(dev);
-+		} else
-+			break;
-+	}
-+	up(&class->sem);
++  Features supported by this driver:
++  Software PEC                     no
++  Hardware PEC                     yes
++  Block buffer                     yes
++  Block process call transaction   no
++  I2C block read transaction       yes  (doesn't use the block buffer)
 +
-+	return found ? dev : NULL;
-+}
-+EXPORT_SYMBOL_GPL(class_find_child);
++  See the file Documentation/i2c/busses/i2c-i801 for details.
+ */
  
- int class_interface_register(struct class_interface *class_intf)
- {
-@@ -829,7 +973,7 @@ int class_interface_register(struct class_interface *class_intf)
+ /* Note: we assume there can only be one I801, with one SMBus interface */
+@@ -62,9 +72,9 @@
+ #define SMBHSTDAT0	(5 + i801_smba)
+ #define SMBHSTDAT1	(6 + i801_smba)
+ #define SMBBLKDAT	(7 + i801_smba)
+-#define SMBPEC		(8 + i801_smba)	/* ICH4 only */
+-#define SMBAUXSTS	(12 + i801_smba)	/* ICH4 only */
+-#define SMBAUXCTL	(13 + i801_smba)	/* ICH4 only */
++#define SMBPEC		(8 + i801_smba)		/* ICH3 and later */
++#define SMBAUXSTS	(12 + i801_smba)	/* ICH4 and later */
++#define SMBAUXCTL	(13 + i801_smba)	/* ICH4 and later */
  
- void class_interface_unregister(struct class_interface *class_intf)
- {
--	struct class * parent = class_intf->class;
-+	struct class *parent = class_intf->class;
- 	struct class_device *class_dev;
- 	struct device *dev;
+ /* PCI Address Constants */
+ #define SMBBAR		4
+@@ -91,13 +101,13 @@
+ #define I801_BYTE		0x04
+ #define I801_BYTE_DATA		0x08
+ #define I801_WORD_DATA		0x0C
+-#define I801_PROC_CALL		0x10	/* later chips only, unimplemented */
++#define I801_PROC_CALL		0x10	/* unimplemented */
+ #define I801_BLOCK_DATA		0x14
+-#define I801_I2C_BLOCK_DATA	0x18	/* unimplemented */
++#define I801_I2C_BLOCK_DATA	0x18	/* ICH5 and later */
+ #define I801_BLOCK_LAST		0x34
+-#define I801_I2C_BLOCK_LAST	0x38	/* unimplemented */
++#define I801_I2C_BLOCK_LAST	0x38	/* ICH5 and later */
+ #define I801_START		0x40
+-#define I801_PEC_EN		0x80	/* ICH4 only */
++#define I801_PEC_EN		0x80	/* ICH3 and later */
  
-@@ -853,15 +997,14 @@ void class_interface_unregister(struct class_interface *class_intf)
+ /* I801 Hosts Status register bits */
+ #define SMBHSTSTS_BYTE_DONE	0x80
+@@ -113,7 +123,12 @@ static unsigned long i801_smba;
+ static unsigned char i801_original_hstcfg;
+ static struct pci_driver i801_driver;
+ static struct pci_dev *I801_dev;
+-static int isich4;
++
++#define FEATURE_SMBUS_PEC	(1 << 0)
++#define FEATURE_BLOCK_BUFFER	(1 << 1)
++#define FEATURE_BLOCK_PROC	(1 << 2)
++#define FEATURE_I2C_BLOCK_READ	(1 << 3)
++static unsigned int i801_features;
  
- int __init classes_init(void)
+ static int i801_transaction(int xact)
  {
--	int retval;
--
--	retval = subsystem_register(&class_subsys);
--	if (retval)
--		return retval;
-+	class_kset = kset_create_and_add("class", NULL, NULL);
-+	if (!class_kset)
-+		return -ENOMEM;
- 
- 	/* ick, this is ugly, the things we go through to keep from showing up
- 	 * in sysfs... */
- 	kset_init(&class_obj_subsys);
-+	kobject_set_name(&class_obj_subsys.kobj, "class_obj");
- 	if (!class_obj_subsys.kobj.parent)
- 		class_obj_subsys.kobj.parent = &class_obj_subsys.kobj;
- 	return 0;
-diff --git a/drivers/base/core.c b/drivers/base/core.c
-index 2683eac..edf3bbe 100644
---- a/drivers/base/core.c
-+++ b/drivers/base/core.c
-@@ -18,14 +18,14 @@
- #include <linux/string.h>
- #include <linux/kdev_t.h>
- #include <linux/notifier.h>
--
-+#include <linux/genhd.h>
- #include <asm/semaphore.h>
- 
- #include "base.h"
- #include "power/power.h"
- 
--int (*platform_notify)(struct device * dev) = NULL;
--int (*platform_notify_remove)(struct device * dev) = NULL;
-+int (*platform_notify)(struct device *dev) = NULL;
-+int (*platform_notify_remove)(struct device *dev) = NULL;
- 
- /*
-  * sysfs bindings for devices.
-@@ -51,11 +51,11 @@ EXPORT_SYMBOL(dev_driver_string);
- #define to_dev(obj) container_of(obj, struct device, kobj)
- #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+@@ -242,7 +257,8 @@ static int i801_block_transaction_by_block(union i2c_smbus_data *data,
+ }
  
--static ssize_t
--dev_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
-+static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
-+			     char *buf)
+ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
+-					       char read_write, int hwpec)
++					       char read_write, int command,
++					       int hwpec)
  {
--	struct device_attribute * dev_attr = to_dev_attr(attr);
--	struct device * dev = to_dev(kobj);
-+	struct device_attribute *dev_attr = to_dev_attr(attr);
-+	struct device *dev = to_dev(kobj);
- 	ssize_t ret = -EIO;
+ 	int i, len;
+ 	int smbcmd;
+@@ -259,16 +275,24 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
+ 	}
  
- 	if (dev_attr->show)
-@@ -63,12 +63,11 @@ dev_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
- 	return ret;
- }
+ 	for (i = 1; i <= len; i++) {
+-		if (i == len && read_write == I2C_SMBUS_READ)
+-			smbcmd = I801_BLOCK_LAST;
+-		else
+-			smbcmd = I801_BLOCK_DATA;
++		if (i == len && read_write == I2C_SMBUS_READ) {
++			if (command == I2C_SMBUS_I2C_BLOCK_DATA)
++				smbcmd = I801_I2C_BLOCK_LAST;
++			else
++				smbcmd = I801_BLOCK_LAST;
++		} else {
++			if (command == I2C_SMBUS_I2C_BLOCK_DATA
++			 && read_write == I2C_SMBUS_READ)
++				smbcmd = I801_I2C_BLOCK_DATA;
++			else
++				smbcmd = I801_BLOCK_DATA;
++		}
+ 		outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT);
  
--static ssize_t
--dev_attr_store(struct kobject * kobj, struct attribute * attr,
--	       const char * buf, size_t count)
-+static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
-+			      const char *buf, size_t count)
- {
--	struct device_attribute * dev_attr = to_dev_attr(attr);
--	struct device * dev = to_dev(kobj);
-+	struct device_attribute *dev_attr = to_dev_attr(attr);
-+	struct device *dev = to_dev(kobj);
- 	ssize_t ret = -EIO;
+ 		dev_dbg(&I801_dev->dev, "Block (pre %d): CNT=%02x, CMD=%02x, "
+-			"ADD=%02x, DAT0=%02x, BLKDAT=%02x\n", i,
++			"ADD=%02x, DAT0=%02x, DAT1=%02x, BLKDAT=%02x\n", i,
+ 			inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD),
+-			inb_p(SMBHSTDAT0), inb_p(SMBBLKDAT));
++			inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1), inb_p(SMBBLKDAT));
  
- 	if (dev_attr->store)
-@@ -90,9 +89,9 @@ static struct sysfs_ops dev_sysfs_ops = {
-  *	reaches 0. We forward the call to the device's release
-  *	method, which should handle actually freeing the structure.
-  */
--static void device_release(struct kobject * kobj)
-+static void device_release(struct kobject *kobj)
- {
--	struct device * dev = to_dev(kobj);
-+	struct device *dev = to_dev(kobj);
+ 		/* Make sure the SMBus host is ready to start transmitting */
+ 		temp = inb_p(SMBHSTSTS);
+@@ -332,7 +356,8 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
+ 			dev_dbg(&I801_dev->dev, "Error: no response!\n");
+ 		}
  
- 	if (dev->release)
- 		dev->release(dev);
-@@ -101,8 +100,8 @@ static void device_release(struct kobject * kobj)
- 	else if (dev->class && dev->class->dev_release)
- 		dev->class->dev_release(dev);
- 	else {
--		printk(KERN_ERR "Device '%s' does not have a release() function, "
--			"it is broken and must be fixed.\n",
-+		printk(KERN_ERR "Device '%s' does not have a release() "
-+			"function, it is broken and must be fixed.\n",
- 			dev->bus_id);
- 		WARN_ON(1);
- 	}
-@@ -185,7 +184,8 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
- 		add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
+-		if (i == 1 && read_write == I2C_SMBUS_READ) {
++		if (i == 1 && read_write == I2C_SMBUS_READ
++		 && command != I2C_SMBUS_I2C_BLOCK_DATA) {
+ 			len = inb_p(SMBHSTDAT0);
+ 			if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
+ 				return -1;
+@@ -353,9 +378,9 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
+ 				temp);
+ 		}
+ 		dev_dbg(&I801_dev->dev, "Block (post %d): CNT=%02x, CMD=%02x, "
+-			"ADD=%02x, DAT0=%02x, BLKDAT=%02x\n", i,
++			"ADD=%02x, DAT0=%02x, DAT1=%02x, BLKDAT=%02x\n", i,
+ 			inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD),
+-			inb_p(SMBHSTDAT0), inb_p(SMBBLKDAT));
++			inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1), inb_p(SMBBLKDAT));
  
- 		if (dev->driver)
--			add_uevent_var(env, "PHYSDEVDRIVER=%s", dev->driver->name);
-+			add_uevent_var(env, "PHYSDEVDRIVER=%s",
-+				       dev->driver->name);
+ 		if (result < 0)
+ 			return result;
+@@ -384,33 +409,38 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
+ 			pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc);
+ 			pci_write_config_byte(I801_dev, SMBHSTCFG,
+ 					      hostc | SMBHSTCFG_I2C_EN);
+-		} else {
++		} else if (!(i801_features & FEATURE_I2C_BLOCK_READ)) {
+ 			dev_err(&I801_dev->dev,
+-				"I2C_SMBUS_I2C_BLOCK_READ not DB!\n");
++				"I2C block read is unsupported!\n");
+ 			return -1;
+ 		}
  	}
- #endif
  
-@@ -193,15 +193,16 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
- 	if (dev->bus && dev->bus->uevent) {
- 		retval = dev->bus->uevent(dev, env);
- 		if (retval)
--			pr_debug ("%s: bus uevent() returned %d\n",
--				  __FUNCTION__, retval);
-+			pr_debug("device: '%s': %s: bus uevent() returned %d\n",
-+				 dev->bus_id, __FUNCTION__, retval);
+-	if (read_write == I2C_SMBUS_WRITE) {
++	if (read_write == I2C_SMBUS_WRITE
++	 || command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ 		if (data->block[0] < 1)
+ 			data->block[0] = 1;
+ 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ 			data->block[0] = I2C_SMBUS_BLOCK_MAX;
+ 	} else {
+-		data->block[0] = 32;	/* max for reads */
++		data->block[0] = 32;	/* max for SMBus block reads */
  	}
  
- 	/* have the class specific function add its stuff */
- 	if (dev->class && dev->class->dev_uevent) {
- 		retval = dev->class->dev_uevent(dev, env);
- 		if (retval)
--			pr_debug("%s: class uevent() returned %d\n",
-+			pr_debug("device: '%s': %s: class uevent() "
-+				 "returned %d\n", dev->bus_id,
- 				 __FUNCTION__, retval);
- 	}
+-	if (isich4 && i801_set_block_buffer_mode() == 0 )
++	if ((i801_features & FEATURE_BLOCK_BUFFER)
++	 && !(command == I2C_SMBUS_I2C_BLOCK_DATA
++	      && read_write == I2C_SMBUS_READ)
++	 && i801_set_block_buffer_mode() == 0)
+ 		result = i801_block_transaction_by_block(data, read_write,
+ 							 hwpec);
+ 	else
+ 		result = i801_block_transaction_byte_by_byte(data, read_write,
+-							     hwpec);
++							     command, hwpec);
  
-@@ -209,7 +210,8 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
- 	if (dev->type && dev->type->uevent) {
- 		retval = dev->type->uevent(dev, env);
- 		if (retval)
--			pr_debug("%s: dev_type uevent() returned %d\n",
-+			pr_debug("device: '%s': %s: dev_type uevent() "
-+				 "returned %d\n", dev->bus_id,
- 				 __FUNCTION__, retval);
+ 	if (result == 0 && hwpec)
+ 		i801_wait_hwpec();
+ 
+-	if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
++	if (command == I2C_SMBUS_I2C_BLOCK_DATA
++	 && read_write == I2C_SMBUS_WRITE) {
+ 		/* restore saved configuration register value */
+ 		pci_write_config_byte(I801_dev, SMBHSTCFG, hostc);
  	}
+@@ -426,7 +456,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+ 	int block = 0;
+ 	int ret, xact = 0;
  
-@@ -325,7 +327,8 @@ static int device_add_groups(struct device *dev,
- 			error = sysfs_create_group(&dev->kobj, groups[i]);
- 			if (error) {
- 				while (--i >= 0)
--					sysfs_remove_group(&dev->kobj, groups[i]);
-+					sysfs_remove_group(&dev->kobj,
-+							   groups[i]);
- 				break;
- 			}
- 		}
-@@ -401,20 +404,15 @@ static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
- static struct device_attribute devt_attr =
- 	__ATTR(dev, S_IRUGO, show_dev, NULL);
+-	hwpec = isich4 && (flags & I2C_CLIENT_PEC)
++	hwpec = (i801_features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
+ 		&& size != I2C_SMBUS_QUICK
+ 		&& size != I2C_SMBUS_I2C_BLOCK_DATA;
  
--/*
-- *	devices_subsys - structure to be registered with kobject core.
-- */
--
--decl_subsys(devices, &device_ktype, &device_uevent_ops);
--
-+/* kset to create /sys/devices/  */
-+struct kset *devices_kset;
+@@ -462,12 +492,23 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+ 		xact = I801_WORD_DATA;
+ 		break;
+ 	case I2C_SMBUS_BLOCK_DATA:
+-	case I2C_SMBUS_I2C_BLOCK_DATA:
+ 		outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ 		       SMBHSTADD);
+ 		outb_p(command, SMBHSTCMD);
+ 		block = 1;
+ 		break;
++	case I2C_SMBUS_I2C_BLOCK_DATA:
++		/* NB: page 240 of ICH5 datasheet shows that the R/#W
++		 * bit should be cleared here, even when reading */
++		outb_p((addr & 0x7f) << 1, SMBHSTADD);
++		if (read_write == I2C_SMBUS_READ) {
++			/* NB: page 240 of ICH5 datasheet also shows
++			 * that DATA1 is the cmd field when reading */
++			outb_p(command, SMBHSTDAT1);
++		} else
++			outb_p(command, SMBHSTCMD);
++		block = 1;
++		break;
+ 	case I2C_SMBUS_PROC_CALL:
+ 	default:
+ 		dev_err(&I801_dev->dev, "Unsupported transaction %d\n", size);
+@@ -487,7 +528,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+ 	/* Some BIOSes don't like it when PEC is enabled at reboot or resume
+ 	   time, so we forcibly disable it after every transaction. Turn off
+ 	   E32B for the same reason. */
+-	if (hwpec)
++	if (hwpec || block)
+ 		outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
+ 		       SMBAUXCTL);
  
- /**
-- *	device_create_file - create sysfs attribute file for device.
-- *	@dev:	device.
-- *	@attr:	device attribute descriptor.
-+ * device_create_file - create sysfs attribute file for device.
-+ * @dev: device.
-+ * @attr: device attribute descriptor.
-  */
--
--int device_create_file(struct device * dev, struct device_attribute * attr)
-+int device_create_file(struct device *dev, struct device_attribute *attr)
+@@ -514,9 +555,11 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+ static u32 i801_func(struct i2c_adapter *adapter)
  {
- 	int error = 0;
- 	if (get_device(dev)) {
-@@ -425,12 +423,11 @@ int device_create_file(struct device * dev, struct device_attribute * attr)
+ 	return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+-	    I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+-	    I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK
+-	     | (isich4 ? I2C_FUNC_SMBUS_PEC : 0);
++	       I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
++	       I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK |
++	       ((i801_features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
++	       ((i801_features & FEATURE_I2C_BLOCK_READ) ?
++		I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0);
  }
  
- /**
-- *	device_remove_file - remove sysfs attribute file.
-- *	@dev:	device.
-- *	@attr:	device attribute descriptor.
-+ * device_remove_file - remove sysfs attribute file.
-+ * @dev: device.
-+ * @attr: device attribute descriptor.
-  */
--
--void device_remove_file(struct device * dev, struct device_attribute * attr)
-+void device_remove_file(struct device *dev, struct device_attribute *attr)
- {
- 	if (get_device(dev)) {
- 		sysfs_remove_file(&dev->kobj, &attr->attr);
-@@ -511,22 +508,20 @@ static void klist_children_put(struct klist_node *n)
- 	put_device(dev);
+ static const struct i2c_algorithm smbus_algorithm = {
+@@ -556,8 +599,8 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
+ 	int err;
+ 
+ 	I801_dev = dev;
++	i801_features = 0;
+ 	switch (dev->device) {
+-	case PCI_DEVICE_ID_INTEL_82801DB_3:
+ 	case PCI_DEVICE_ID_INTEL_82801EB_3:
+ 	case PCI_DEVICE_ID_INTEL_ESB_4:
+ 	case PCI_DEVICE_ID_INTEL_ICH6_16:
+@@ -565,11 +608,13 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
+ 	case PCI_DEVICE_ID_INTEL_ESB2_17:
+ 	case PCI_DEVICE_ID_INTEL_ICH8_5:
+ 	case PCI_DEVICE_ID_INTEL_ICH9_6:
++		i801_features |= FEATURE_I2C_BLOCK_READ;
++		/* fall through */
++	case PCI_DEVICE_ID_INTEL_82801DB_3:
+ 	case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
+-		isich4 = 1;
++		i801_features |= FEATURE_SMBUS_PEC;
++		i801_features |= FEATURE_BLOCK_BUFFER;
+ 		break;
+-	default:
+-		isich4 = 0;
+ 	}
+ 
+ 	err = pci_enable_device(dev);
+@@ -610,6 +655,11 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
+ 	else
+ 		dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n");
+ 
++	/* Clear special mode bits */
++	if (i801_features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
++		outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
++		       SMBAUXCTL);
++
+ 	/* set up the sysfs linkage to our parent device */
+ 	i801_adapter.dev.parent = &dev->dev;
+ 
+@@ -678,9 +728,8 @@ static void __exit i2c_i801_exit(void)
+ 	pci_unregister_driver(&i801_driver);
  }
  
--
- /**
-- *	device_initialize - init device structure.
-- *	@dev:	device.
-+ * device_initialize - init device structure.
-+ * @dev: device.
+-MODULE_AUTHOR ("Frodo Looijaard <frodol at dds.nl>, "
+-		"Philip Edelbrock <phil at netroedge.com>, "
+-		"and Mark D. Studebaker <mdsxyz123 at yahoo.com>");
++MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123 at yahoo.com>, "
++	      "Jean Delvare <khali at linux-fr.org>");
+ MODULE_DESCRIPTION("I801 SMBus driver");
+ MODULE_LICENSE("GPL");
+ 
+diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
+index 9b43ff7..7c7eb0c 100644
+--- a/drivers/i2c/busses/i2c-ibm_iic.c
++++ b/drivers/i2c/busses/i2c-ibm_iic.c
+@@ -6,7 +6,7 @@
+  * Copyright (c) 2003, 2004 Zultys Technologies.
+  * Eugene Surovegin <eugene.surovegin at zultys.com> or <ebs at ebshome.net>
   *
-- *	This prepares the device for use by other layers,
-- *	including adding it to the device hierarchy.
-- *	It is the first half of device_register(), if called by
-- *	that, though it can also be called separately, so one
-- *	may use @dev's fields (e.g. the refcount).
-+ * This prepares the device for use by other layers,
-+ * including adding it to the device hierarchy.
-+ * It is the first half of device_register(), if called by
-+ * that, though it can also be called separately, so one
-+ * may use @dev's fields (e.g. the refcount).
-  */
--
- void device_initialize(struct device *dev)
- {
--	kobj_set_kset_s(dev, devices_subsys);
--	kobject_init(&dev->kobj);
-+	dev->kobj.kset = devices_kset;
-+	kobject_init(&dev->kobj, &device_ktype);
- 	klist_init(&dev->klist_children, klist_children_get,
- 		   klist_children_put);
- 	INIT_LIST_HEAD(&dev->dma_pools);
-@@ -539,36 +534,39 @@ void device_initialize(struct device *dev)
+- * Based on original work by 
++ * Based on original work by
+  * 	Ian DaSilva  <idasilva at mvista.com>
+  *      Armin Kuster <akuster at mvista.com>
+  * 	Matt Porter  <mporter at mvista.com>
+@@ -86,8 +86,8 @@ static void dump_iic_regs(const char* header, struct ibm_iic_private* dev)
+ 	       KERN_DEBUG "  sts      = 0x%02x, extsts = 0x%02x\n"
+ 	       KERN_DEBUG "  clkdiv   = 0x%02x, xfrcnt = 0x%02x\n"
+ 	       KERN_DEBUG "  xtcntlss = 0x%02x, directcntl = 0x%02x\n",
+-		in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts), 
+-		in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt), 
++		in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts),
++		in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt),
+ 		in_8(&iic->xtcntlss), in_8(&iic->directcntl));
  }
- 
- #ifdef CONFIG_SYSFS_DEPRECATED
--static struct kobject * get_device_parent(struct device *dev,
--					  struct device *parent)
-+static struct kobject *get_device_parent(struct device *dev,
-+					 struct device *parent)
+ #  define DUMP_REGS(h,dev)	dump_iic_regs((h),(dev))
+@@ -125,7 +125,7 @@ static inline void iic_interrupt_mode(struct ibm_iic_private* dev, int enable)
  {
--	/*
--	 * Set the parent to the class, not the parent device
--	 * for topmost devices in class hierarchy.
--	 * This keeps sysfs from having a symlink to make old
--	 * udevs happy
--	 */
-+	/* class devices without a parent live in /sys/class/<classname>/ */
- 	if (dev->class && (!parent || parent->class != dev->class))
- 		return &dev->class->subsys.kobj;
-+	/* all other devices keep their parent */
- 	else if (parent)
- 		return &parent->kobj;
- 
- 	return NULL;
+ 	out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0);
  }
+- 
 +
-+static inline void cleanup_device_parent(struct device *dev) {}
-+static inline void cleanup_glue_dir(struct device *dev,
-+				    struct kobject *glue_dir) {}
- #else
- static struct kobject *virtual_device_parent(struct device *dev)
- {
- 	static struct kobject *virtual_dir = NULL;
+ /*
+  * Initialize IIC interface.
+  */
+@@ -134,7 +134,7 @@ static void iic_dev_init(struct ibm_iic_private* dev)
+ 	volatile struct iic_regs __iomem *iic = dev->vaddr;
  
- 	if (!virtual_dir)
--		virtual_dir = kobject_add_dir(&devices_subsys.kobj, "virtual");
-+		virtual_dir = kobject_create_and_add("virtual",
-+						     &devices_kset->kobj);
+ 	DBG("%d: init\n", dev->idx);
+-	
++
+ 	/* Clear master address */
+ 	out_8(&iic->lmadr, 0);
+ 	out_8(&iic->hmadr, 0);
+@@ -160,7 +160,7 @@ static void iic_dev_init(struct ibm_iic_private* dev)
  
- 	return virtual_dir;
+ 	/* Clear control register */
+ 	out_8(&iic->cntl, 0);
+-	
++
+ 	/* Enable interrupts if possible */
+ 	iic_interrupt_mode(dev, dev->irq >= 0);
+ 
+@@ -171,7 +171,7 @@ static void iic_dev_init(struct ibm_iic_private* dev)
+ 	DUMP_REGS("iic_init", dev);
  }
  
--static struct kobject * get_device_parent(struct device *dev,
--					  struct device *parent)
-+static struct kobject *get_device_parent(struct device *dev,
-+					 struct device *parent)
+-/* 
++/*
+  * Reset IIC interface
+  */
+ static void iic_dev_reset(struct ibm_iic_private* dev)
+@@ -179,42 +179,42 @@ static void iic_dev_reset(struct ibm_iic_private* dev)
+ 	volatile struct iic_regs __iomem *iic = dev->vaddr;
+ 	int i;
+ 	u8 dc;
+-	
++
+ 	DBG("%d: soft reset\n", dev->idx);
+ 	DUMP_REGS("reset", dev);
+-	
++
+     	/* Place chip in the reset state */
+ 	out_8(&iic->xtcntlss, XTCNTLSS_SRST);
+-	
++
+ 	/* Check if bus is free */
+-	dc = in_8(&iic->directcntl);	
++	dc = in_8(&iic->directcntl);
+ 	if (!DIRCTNL_FREE(dc)){
+ 		DBG("%d: trying to regain bus control\n", dev->idx);
+-	
++
+ 		/* Try to set bus free state */
+-		out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC);	
+-	
++		out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC);
++
+ 		/* Wait until we regain bus control */
+ 		for (i = 0; i < 100; ++i){
+ 			dc = in_8(&iic->directcntl);
+ 			if (DIRCTNL_FREE(dc))
+ 				break;
+-			
++
+ 			/* Toggle SCL line */
+ 			dc ^= DIRCNTL_SCC;
+ 			out_8(&iic->directcntl, dc);
+ 			udelay(10);
+ 			dc ^= DIRCNTL_SCC;
+ 			out_8(&iic->directcntl, dc);
+-			
++
+ 			/* be nice */
+ 			cond_resched();
+ 		}
+ 	}
+-	
++
+ 	/* Remove reset */
+ 	out_8(&iic->xtcntlss, 0);
+-	
++
+ 	/* Reinitialize interface */
+ 	iic_dev_init(dev);
+ }
+@@ -324,14 +324,14 @@ static irqreturn_t iic_handler(int irq, void *dev_id)
  {
-+	int retval;
+ 	struct ibm_iic_private* dev = (struct ibm_iic_private*)dev_id;
+ 	volatile struct iic_regs __iomem *iic = dev->vaddr;
+-	
+-	DBG2("%d: irq handler, STS = 0x%02x, EXTSTS = 0x%02x\n", 
 +
- 	if (dev->class) {
- 		struct kobject *kobj = NULL;
- 		struct kobject *parent_kobj;
-@@ -576,8 +574,8 @@ static struct kobject * get_device_parent(struct device *dev,
++	DBG2("%d: irq handler, STS = 0x%02x, EXTSTS = 0x%02x\n",
+ 	     dev->idx, in_8(&iic->sts), in_8(&iic->extsts));
+-	
++
+ 	/* Acknowledge IRQ and wakeup iic_wait_for_tc */
+ 	out_8(&iic->sts, STS_IRQA | STS_SCMP);
+ 	wake_up_interruptible(&dev->wq);
+-	
++
+ 	return IRQ_HANDLED;
+ }
  
- 		/*
- 		 * If we have no parent, we live in "virtual".
--		 * Class-devices with a bus-device as parent, live
--		 * in a class-directory to prevent namespace collisions.
-+		 * Class-devices with a non class-device as parent, live
-+		 * in a "glue" directory to prevent namespace collisions.
- 		 */
- 		if (parent == NULL)
- 			parent_kobj = virtual_device_parent(dev);
-@@ -598,25 +596,45 @@ static struct kobject * get_device_parent(struct device *dev,
- 			return kobj;
+@@ -341,19 +341,19 @@ static irqreturn_t iic_handler(int irq, void *dev_id)
+  */
+ static int iic_xfer_result(struct ibm_iic_private* dev)
+ {
+-	volatile struct iic_regs __iomem *iic = dev->vaddr;	
+-	
++	volatile struct iic_regs __iomem *iic = dev->vaddr;
++
+ 	if (unlikely(in_8(&iic->sts) & STS_ERR)){
+-		DBG("%d: xfer error, EXTSTS = 0x%02x\n", dev->idx, 
++		DBG("%d: xfer error, EXTSTS = 0x%02x\n", dev->idx,
+ 			in_8(&iic->extsts));
+-				
++
+ 		/* Clear errors and possible pending IRQs */
+-		out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD | 
++		out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD |
+ 			EXTSTS_LA | EXTSTS_ICT | EXTSTS_XFRA);
+-			
++
+ 		/* Flush master data buffer */
+ 		out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB);
+-		
++
+ 		/* Is bus free?
+ 		 * If error happened during combined xfer
+ 		 * IIC interface is usually stuck in some strange
+@@ -376,11 +376,11 @@ static void iic_abort_xfer(struct ibm_iic_private* dev)
+ {
+ 	volatile struct iic_regs __iomem *iic = dev->vaddr;
+ 	unsigned long x;
+-	
++
+ 	DBG("%d: iic_abort_xfer\n", dev->idx);
+-	
++
+ 	out_8(&iic->cntl, CNTL_HMT);
+-	
++
+ 	/*
+ 	 * Wait for the abort command to complete.
+ 	 * It's not worth to be optimized, just poll (timeout >= 1 tick)
+@@ -405,13 +405,13 @@ static void iic_abort_xfer(struct ibm_iic_private* dev)
+  * Returns the number of transferred bytes or error (<0)
+  */
+ static int iic_wait_for_tc(struct ibm_iic_private* dev){
+-	
++
+ 	volatile struct iic_regs __iomem *iic = dev->vaddr;
+ 	int ret = 0;
+-	
++
+ 	if (dev->irq >= 0){
+ 		/* Interrupt mode */
+-		ret = wait_event_interruptible_timeout(dev->wq, 
++		ret = wait_event_interruptible_timeout(dev->wq,
+ 			!(in_8(&iic->sts) & STS_PT), dev->adap.timeout * HZ);
  
- 		/* or create a new class-directory at the parent device */
--		return kobject_kset_add_dir(&dev->class->class_dirs,
--					    parent_kobj, dev->class->name);
-+		k = kobject_create();
-+		if (!k)
-+			return NULL;
-+		k->kset = &dev->class->class_dirs;
-+		retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
-+		if (retval < 0) {
-+			kobject_put(k);
-+			return NULL;
+ 		if (unlikely(ret < 0))
+@@ -424,37 +424,37 @@ static int iic_wait_for_tc(struct ibm_iic_private* dev){
+ 	else {
+ 		/* Polling mode */
+ 		unsigned long x = jiffies + dev->adap.timeout * HZ;
+-		
++
+ 		while (in_8(&iic->sts) & STS_PT){
+ 			if (unlikely(time_after(jiffies, x))){
+ 				DBG("%d: poll timeout\n", dev->idx);
+ 				ret = -ETIMEDOUT;
+ 				break;
+ 			}
+-		
++
+ 			if (unlikely(signal_pending(current))){
+ 				DBG("%d: poll interrupted\n", dev->idx);
+ 				ret = -ERESTARTSYS;
+ 				break;
+ 			}
+ 			schedule();
+-		}	
 +		}
-+		/* do not emit an uevent for this simple "glue" directory */
-+		return k;
  	}
- 
- 	if (parent)
- 		return &parent->kobj;
- 	return NULL;
+-	
++
+ 	if (unlikely(ret < 0))
+ 		iic_abort_xfer(dev);
+ 	else
+ 		ret = iic_xfer_result(dev);
+-	
++
+ 	DBG2("%d: iic_wait_for_tc -> %d\n", dev->idx, ret);
+-	
++
+ 	return ret;
  }
+ 
+ /*
+  * Low level master transfer routine
+  */
+-static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm, 
++static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm,
+ 			  int combined_xfer)
+ {
+ 	volatile struct iic_regs __iomem *iic = dev->vaddr;
+@@ -465,48 +465,48 @@ static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm,
+ 	u8 cntl = (in_8(&iic->cntl) & CNTL_AMD) | CNTL_PT;
+ 	if (pm->flags & I2C_M_RD)
+ 		cntl |= CNTL_RW;
+-	
 +
-+static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
-+{
-+	/* see if we live in a "glue" directory */
-+	if (!dev->class || glue_dir->kset != &dev->class->class_dirs)
-+		return;
+ 	loops = (len + 3) / 4;
+ 	for (i = 0; i < loops; ++i, len -= 4){
+ 		int count = len > 4 ? 4 : len;
+ 		u8 cmd = cntl | ((count - 1) << CNTL_TCT_SHIFT);
+-		
 +
-+	kobject_put(glue_dir);
-+}
+ 		if (!(cntl & CNTL_RW))
+ 			for (j = 0; j < count; ++j)
+ 				out_8((void __iomem *)&iic->mdbuf, *buf++);
+-		
 +
-+static void cleanup_device_parent(struct device *dev)
-+{
-+	cleanup_glue_dir(dev, dev->kobj.parent);
-+}
- #endif
+ 		if (i < loops - 1)
+ 			cmd |= CNTL_CHT;
+ 		else if (combined_xfer)
+ 			cmd |= CNTL_RPST;
+-		
++
+ 		DBG2("%d: xfer_bytes, %d, CNTL = 0x%02x\n", dev->idx, count, cmd);
+-		
++
+ 		/* Start transfer */
+ 		out_8(&iic->cntl, cmd);
+-		
++
+ 		/* Wait for completion */
+ 		ret = iic_wait_for_tc(dev);
  
--static int setup_parent(struct device *dev, struct device *parent)
-+static void setup_parent(struct device *dev, struct device *parent)
+ 		if (unlikely(ret < 0))
+ 			break;
+ 		else if (unlikely(ret != count)){
+-			DBG("%d: xfer_bytes, requested %d, transfered %d\n", 
++			DBG("%d: xfer_bytes, requested %d, transfered %d\n",
+ 				dev->idx, count, ret);
+-			
++
+ 			/* If it's not a last part of xfer, abort it */
+ 			if (combined_xfer || (i < loops - 1))
+     				iic_abort_xfer(dev);
+-				
++
+ 			ret = -EREMOTEIO;
+-			break;				
++			break;
+ 		}
+-		
++
+ 		if (cntl & CNTL_RW)
+ 			for (j = 0; j < count; ++j)
+ 				*buf++ = in_8((void __iomem *)&iic->mdbuf);
+ 	}
+-	
++
+ 	return ret > 0 ? 0 : ret;
+ }
+ 
+@@ -517,10 +517,10 @@ static inline void iic_address(struct ibm_iic_private* dev, struct i2c_msg* msg)
  {
- 	struct kobject *kobj;
- 	kobj = get_device_parent(dev, parent);
--	if (IS_ERR(kobj))
--		return PTR_ERR(kobj);
- 	if (kobj)
- 		dev->kobj.parent = kobj;
--	return 0;
+ 	volatile struct iic_regs __iomem *iic = dev->vaddr;
+ 	u16 addr = msg->addr;
+-	
+-	DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx, 
++
++	DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx,
+ 		addr, msg->flags & I2C_M_TEN ? 10 : 7);
+-	
++
+ 	if (msg->flags & I2C_M_TEN){
+ 	    out_8(&iic->cntl, CNTL_AMD);
+ 	    out_8(&iic->lmadr, addr);
+@@ -537,15 +537,15 @@ static inline int iic_invalid_address(const struct i2c_msg* p)
+ 	return (p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN) && (p->addr > 0x7f));
  }
  
- static int device_add_class_symlinks(struct device *dev)
-@@ -625,65 +643,76 @@ static int device_add_class_symlinks(struct device *dev)
+-static inline int iic_address_neq(const struct i2c_msg* p1, 
++static inline int iic_address_neq(const struct i2c_msg* p1,
+ 				  const struct i2c_msg* p2)
+ {
+-	return (p1->addr != p2->addr) 
++	return (p1->addr != p2->addr)
+ 		|| ((p1->flags & I2C_M_TEN) != (p2->flags & I2C_M_TEN));
+-} 
++}
  
- 	if (!dev->class)
- 		return 0;
+ /*
+- * Generic master transfer entrypoint. 
++ * Generic master transfer entrypoint.
+  * Returns the number of processed messages or error (<0)
+  */
+ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+@@ -553,20 +553,20 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+     	struct ibm_iic_private* dev = (struct ibm_iic_private*)(i2c_get_adapdata(adap));
+ 	volatile struct iic_regs __iomem *iic = dev->vaddr;
+ 	int i, ret = 0;
+-	
 +
- 	error = sysfs_create_link(&dev->kobj, &dev->class->subsys.kobj,
- 				  "subsystem");
- 	if (error)
- 		goto out;
--	/*
--	 * If this is not a "fake" compatible device, then create the
--	 * symlink from the class to the device.
--	 */
--	if (dev->kobj.parent != &dev->class->subsys.kobj) {
+ 	DBG2("%d: iic_xfer, %d msg(s)\n", dev->idx, num);
+-	
 +
-+#ifdef CONFIG_SYSFS_DEPRECATED
-+	/* stacked class devices need a symlink in the class directory */
-+	if (dev->kobj.parent != &dev->class->subsys.kobj &&
-+	    dev->type != &part_type) {
- 		error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
- 					  dev->bus_id);
- 		if (error)
- 			goto out_subsys;
+ 	if (!num)
+ 		return 0;
+-	
++
+ 	/* Check the sanity of the passed messages.
+ 	 * Uhh, generic i2c layer is more suitable place for such code...
+ 	 */
+ 	if (unlikely(iic_invalid_address(&msgs[0]))){
+-		DBG("%d: invalid address 0x%03x (%d-bit)\n", dev->idx, 
++		DBG("%d: invalid address 0x%03x (%d-bit)\n", dev->idx,
+ 			msgs[0].addr, msgs[0].flags & I2C_M_TEN ? 10 : 7);
+ 		return -EINVAL;
+-	}		
++	}
+ 	for (i = 0; i < num; ++i){
+ 		if (unlikely(msgs[i].len <= 0)){
+ 			if (num == 1 && !msgs[0].len){
+@@ -576,7 +576,7 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 				 */
+ 				return iic_smbus_quick(dev, &msgs[0]);
+ 			}
+-			DBG("%d: invalid len %d in msg[%d]\n", dev->idx, 
++			DBG("%d: invalid len %d in msg[%d]\n", dev->idx,
+ 				msgs[i].len, i);
+ 			return -EINVAL;
+ 		}
+@@ -585,34 +585,34 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 			return -EINVAL;
+ 		}
  	}
--	if (dev->parent) {
--#ifdef CONFIG_SYSFS_DEPRECATED
--		{
--			struct device *parent = dev->parent;
--			char *class_name;
--
--			/*
--			 * In old sysfs stacked class devices had 'device'
--			 * link pointing to real device instead of parent
--			 */
--			while (parent->class && !parent->bus && parent->parent)
--				parent = parent->parent;
--
--			error = sysfs_create_link(&dev->kobj,
--						  &parent->kobj,
--						  "device");
--			if (error)
--				goto out_busid;
- 
--			class_name = make_class_name(dev->class->name,
--							&dev->kobj);
--			if (class_name)
--				error = sysfs_create_link(&dev->parent->kobj,
--							&dev->kobj, class_name);
--			kfree(class_name);
--			if (error)
--				goto out_device;
--		}
--#else
--		error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
-+	if (dev->parent && dev->type != &part_type) {
-+		struct device *parent = dev->parent;
-+		char *class_name;
+-	
 +
-+		/*
-+		 * stacked class devices have the 'device' link
-+		 * pointing to the bus device instead of the parent
-+		 */
-+		while (parent->class && !parent->bus && parent->parent)
-+			parent = parent->parent;
+ 	/* Check bus state */
+ 	if (unlikely((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE)){
+ 		DBG("%d: iic_xfer, bus is not free\n", dev->idx);
+-		
 +
-+		error = sysfs_create_link(&dev->kobj,
-+					  &parent->kobj,
- 					  "device");
- 		if (error)
- 			goto out_busid;
--#endif
+ 		/* Usually it means something serious has happend.
+ 		 * We *cannot* have unfinished previous transfer
+ 		 * so it doesn't make any sense to try to stop it.
+-		 * Probably we were not able to recover from the 
++		 * Probably we were not able to recover from the
+ 		 * previous error.
+ 		 * The only *reasonable* thing I can think of here
+ 		 * is soft reset.  --ebs
+ 		 */
+ 		iic_dev_reset(dev);
+-		
 +
-+		class_name = make_class_name(dev->class->name,
-+						&dev->kobj);
-+		if (class_name)
-+			error = sysfs_create_link(&dev->parent->kobj,
-+						&dev->kobj, class_name);
-+		kfree(class_name);
-+		if (error)
-+			goto out_device;
+ 		if ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){
+ 			DBG("%d: iic_xfer, bus is still not free\n", dev->idx);
+ 			return -EREMOTEIO;
+ 		}
+-	} 
++	}
+ 	else {
+ 		/* Flush master data buffer (just in case) */
+ 		out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB);
  	}
- 	return 0;
+-	
++
+ 	/* Load slave address */
+ 	iic_address(dev, &msgs[0]);
+-	
++
+ 	/* Do real transfer */
+     	for (i = 0; i < num && !ret; ++i)
+ 		ret = iic_xfer_bytes(dev, &msgs[i], i < num - 1);
+@@ -648,7 +648,7 @@ static inline u8 iic_clckdiv(unsigned int opb)
  
--#ifdef CONFIG_SYSFS_DEPRECATED
- out_device:
--	if (dev->parent)
-+	if (dev->parent && dev->type != &part_type)
- 		sysfs_remove_link(&dev->kobj, "device");
--#endif
- out_busid:
--	if (dev->kobj.parent != &dev->class->subsys.kobj)
-+	if (dev->kobj.parent != &dev->class->subsys.kobj &&
-+	    dev->type != &part_type)
- 		sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
-+#else
-+	/* link in the class directory pointing to the device */
-+	error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
-+				  dev->bus_id);
-+	if (error)
-+		goto out_subsys;
+ 	/* Convert to MHz */
+ 	opb /= 1000000;
+-	
 +
-+	if (dev->parent && dev->type != &part_type) {
-+		error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
-+					  "device");
-+		if (error)
-+			goto out_busid;
-+	}
-+	return 0;
+ 	if (opb < 20 || opb > 150){
+ 		printk(KERN_CRIT "ibm-iic: invalid OPB clock frequency %u MHz\n",
+ 			opb);
+@@ -666,7 +666,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
+ 	struct i2c_adapter* adap;
+ 	struct ocp_func_iic_data* iic_data = ocp->def->additions;
+ 	int ret;
+-	
 +
-+out_busid:
-+	sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
-+#endif
+ 	if (!iic_data)
+ 		printk(KERN_WARNING"ibm-iic%d: missing additional data!\n",
+ 			ocp->def->index);
+@@ -679,7 +679,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
+ 
+ 	dev->idx = ocp->def->index;
+ 	ocp_set_drvdata(ocp, dev);
+-	
 +
- out_subsys:
- 	sysfs_remove_link(&dev->kobj, "subsystem");
- out:
-@@ -694,8 +723,9 @@ static void device_remove_class_symlinks(struct device *dev)
- {
- 	if (!dev->class)
- 		return;
--	if (dev->parent) {
+ 	if (!request_mem_region(ocp->def->paddr, sizeof(struct iic_regs),
+ 				"ibm_iic")) {
+ 		ret = -EBUSY;
+@@ -692,7 +692,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
+ 		ret = -ENXIO;
+ 		goto fail2;
+ 	}
+-	
 +
- #ifdef CONFIG_SYSFS_DEPRECATED
-+	if (dev->parent && dev->type != &part_type) {
- 		char *class_name;
+ 	init_waitqueue_head(&dev->wq);
  
- 		class_name = make_class_name(dev->class->name, &dev->kobj);
-@@ -703,45 +733,59 @@ static void device_remove_class_symlinks(struct device *dev)
- 			sysfs_remove_link(&dev->parent->kobj, class_name);
- 			kfree(class_name);
+ 	dev->irq = iic_force_poll ? -1 : ocp->def->irq;
+@@ -702,29 +702,29 @@ static int __devinit iic_probe(struct ocp_device *ocp){
+ 		 */
+ 		iic_interrupt_mode(dev, 0);
+ 		if (request_irq(dev->irq, iic_handler, 0, "IBM IIC", dev)){
+-			printk(KERN_ERR "ibm-iic%d: request_irq %d failed\n", 
++			printk(KERN_ERR "ibm-iic%d: request_irq %d failed\n",
+ 				dev->idx, dev->irq);
+-			/* Fallback to the polling mode */	
++			/* Fallback to the polling mode */
+ 			dev->irq = -1;
  		}
--#endif
- 		sysfs_remove_link(&dev->kobj, "device");
  	}
--	if (dev->kobj.parent != &dev->class->subsys.kobj)
+-	
 +
-+	if (dev->kobj.parent != &dev->class->subsys.kobj &&
-+	    dev->type != &part_type)
- 		sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
-+#else
-+	if (dev->parent && dev->type != &part_type)
-+		sysfs_remove_link(&dev->kobj, "device");
+ 	if (dev->irq < 0)
+-		printk(KERN_WARNING "ibm-iic%d: using polling mode\n", 
++		printk(KERN_WARNING "ibm-iic%d: using polling mode\n",
+ 			dev->idx);
+-		
 +
-+	sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
-+#endif
+ 	/* Board specific settings */
+ 	dev->fast_mode = iic_force_fast ? 1 : (iic_data ? iic_data->fast_mode : 0);
+-	
+-	/* clckdiv is the same for *all* IIC interfaces, 
 +
- 	sysfs_remove_link(&dev->kobj, "subsystem");
- }
++	/* clckdiv is the same for *all* IIC interfaces,
+ 	 * but I'd rather make a copy than introduce another global. --ebs
+ 	 */
+ 	dev->clckdiv = iic_clckdiv(ocp_sys_info.opb_bus_freq);
+ 	DBG("%d: clckdiv = %d\n", dev->idx, dev->clckdiv);
+-	
++
+ 	/* Initialize IIC interface */
+ 	iic_dev_init(dev);
+-	
++
+ 	/* Register it with i2c layer */
+ 	adap = &dev->adap;
+ 	adap->dev.parent = &ocp->dev;
+@@ -736,7 +736,6 @@ static int __devinit iic_probe(struct ocp_device *ocp){
+ 	adap->client_register = NULL;
+ 	adap->client_unregister = NULL;
+ 	adap->timeout = 1;
+-	adap->retries = 1;
  
- /**
-- *	device_add - add device to device hierarchy.
-- *	@dev:	device.
-+ * device_add - add device to device hierarchy.
-+ * @dev: device.
-  *
-- *	This is part 2 of device_register(), though may be called
-- *	separately _iff_ device_initialize() has been called separately.
-+ * This is part 2 of device_register(), though may be called
-+ * separately _iff_ device_initialize() has been called separately.
-  *
-- *	This adds it to the kobject hierarchy via kobject_add(), adds it
-- *	to the global and sibling lists for the device, then
-- *	adds it to the other relevant subsystems of the driver model.
-+ * This adds it to the kobject hierarchy via kobject_add(), adds it
-+ * to the global and sibling lists for the device, then
-+ * adds it to the other relevant subsystems of the driver model.
-  */
- int device_add(struct device *dev)
- {
- 	struct device *parent = NULL;
- 	struct class_interface *class_intf;
--	int error = -EINVAL;
-+	int error;
+ 	/*
+ 	 * If "dev->idx" is negative we consider it as zero.
+@@ -750,24 +749,24 @@ static int __devinit iic_probe(struct ocp_device *ocp){
+ 			dev->idx);
+ 		goto fail;
+ 	}
+-	
 +
-+	error = pm_sleep_lock();
-+	if (error) {
-+		dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__);
-+		dump_stack();
-+		return error;
-+	}
+ 	printk(KERN_INFO "ibm-iic%d: using %s mode\n", dev->idx,
+ 		dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
  
- 	dev = get_device(dev);
--	if (!dev || !strlen(dev->bus_id))
-+	if (!dev || !strlen(dev->bus_id)) {
-+		error = -EINVAL;
- 		goto Error;
+ 	return 0;
+ 
+-fail:	
++fail:
+ 	if (dev->irq >= 0){
+ 		iic_interrupt_mode(dev, 0);
+ 		free_irq(dev->irq, dev);
+-	}	
 +	}
  
--	pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id);
-+	pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__);
+ 	iounmap(dev->vaddr);
+-fail2:	
++fail2:
+ 	release_mem_region(ocp->def->paddr, sizeof(struct iic_regs));
+ fail1:
+ 	ocp_set_drvdata(ocp, NULL);
+-	kfree(dev);	
++	kfree(dev);
+ 	return ret;
+ }
  
- 	parent = get_device(dev->parent);
--	error = setup_parent(dev, parent);
--	if (error)
--		goto Error;
-+	setup_parent(dev, parent);
+@@ -783,13 +782,13 @@ static void __devexit iic_remove(struct ocp_device *ocp)
+ 			dev->idx);
+ 		/* That's *very* bad, just shutdown IRQ ... */
+ 		if (dev->irq >= 0){
+-		    iic_interrupt_mode(dev, 0);	
++		    iic_interrupt_mode(dev, 0);
+ 		    free_irq(dev->irq, dev);
+ 		    dev->irq = -1;
+ 		}
+ 	} else {
+ 		if (dev->irq >= 0){
+-		    iic_interrupt_mode(dev, 0);	
++		    iic_interrupt_mode(dev, 0);
+ 		    free_irq(dev->irq, dev);
+ 		}
+ 		iounmap(dev->vaddr);
+@@ -798,7 +797,7 @@ static void __devexit iic_remove(struct ocp_device *ocp)
+ 	}
+ }
  
- 	/* first, register with generic layer. */
--	kobject_set_name(&dev->kobj, "%s", dev->bus_id);
--	error = kobject_add(&dev->kobj);
-+	error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev->bus_id);
- 	if (error)
- 		goto Error;
+-static struct ocp_device_id ibm_iic_ids[] __devinitdata = 
++static struct ocp_device_id ibm_iic_ids[] __devinitdata =
+ {
+ 	{ .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_IIC },
+ 	{ .vendor = OCP_VENDOR_INVALID }
+diff --git a/drivers/i2c/busses/i2c-ibm_iic.h b/drivers/i2c/busses/i2c-ibm_iic.h
+index 59d7b43..fdaa482 100644
+--- a/drivers/i2c/busses/i2c-ibm_iic.h
++++ b/drivers/i2c/busses/i2c-ibm_iic.h
+@@ -2,11 +2,11 @@
+  * drivers/i2c/busses/i2c-ibm_iic.h
+  *
+  * Support for the IIC peripheral on IBM PPC 4xx
+- * 
++ *
+  * Copyright (c) 2003 Zultys Technologies.
+  * Eugene Surovegin <eugene.surovegin at zultys.com> or <ebs at ebshome.net>
+  *
+- * Based on original work by 
++ * Based on original work by
+  * 	Ian DaSilva  <idasilva at mvista.com>
+  *      Armin Kuster <akuster at mvista.com>
+  * 	Matt Porter  <mporter at mvista.com>
+@@ -22,7 +22,7 @@
+ #ifndef __I2C_IBM_IIC_H_
+ #define __I2C_IBM_IIC_H_
  
-@@ -751,7 +795,7 @@ int device_add(struct device *dev)
+-#include <linux/i2c.h> 
++#include <linux/i2c.h>
  
- 	/* notify clients of device entry (new way) */
- 	if (dev->bus)
--		blocking_notifier_call_chain(&dev->bus->bus_notifier,
-+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- 					     BUS_NOTIFY_ADD_DEVICE, dev);
+ struct iic_regs {
+ 	u16 mdbuf;
+@@ -58,7 +58,7 @@ struct ibm_iic_private {
+ #define CNTL_TCT_MASK	0x30
+ #define CNTL_TCT_SHIFT	4
+ #define CNTL_RPST	0x08
+-#define CNTL_CHT	0x04 
++#define CNTL_CHT	0x04
+ #define CNTL_RW		0x02
+ #define CNTL_PT		0x01
  
- 	error = device_create_file(dev, &uevent_attr);
-@@ -795,13 +839,14 @@ int device_add(struct device *dev)
- 	}
-  Done:
- 	put_device(dev);
-+	pm_sleep_unlock();
- 	return error;
-  BusError:
- 	device_pm_remove(dev);
- 	dpm_sysfs_remove(dev);
-  PMError:
- 	if (dev->bus)
--		blocking_notifier_call_chain(&dev->bus->bus_notifier,
-+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- 					     BUS_NOTIFY_DEL_DEVICE, dev);
- 	device_remove_attrs(dev);
-  AttrsError:
-@@ -809,124 +854,84 @@ int device_add(struct device *dev)
-  SymlinkError:
- 	if (MAJOR(dev->devt))
- 		device_remove_file(dev, &devt_attr);
+diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
+index c70146e..ab41400 100644
+--- a/drivers/i2c/busses/i2c-iop3xx.c
++++ b/drivers/i2c/busses/i2c-iop3xx.c
+@@ -490,7 +490,6 @@ iop3xx_i2c_probe(struct platform_device *pdev)
+ 	 * Default values...should these come in from board code?
+ 	 */
+ 	new_adapter->timeout = 100;	
+-	new_adapter->retries = 3;
+ 	new_adapter->algo = &iop3xx_i2c_algo;
+ 
+ 	init_waitqueue_head(&adapter_data->waitq);
+diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c
+deleted file mode 100644
+index 069ed7f..0000000
+--- a/drivers/i2c/busses/i2c-ixp4xx.c
++++ /dev/null
+@@ -1,178 +0,0 @@
+-/*
+- * drivers/i2c/busses/i2c-ixp4xx.c
+- *
+- * Intel's IXP4xx XScale NPU chipsets (IXP420, 421, 422, 425) do not have
+- * an on board I2C controller but provide 16 GPIO pins that are often
+- * used to create an I2C bus. This driver provides an i2c_adapter 
+- * interface that plugs in under algo_bit and drives the GPIO pins
+- * as instructed by the alogorithm driver.
+- *
+- * Author: Deepak Saxena <dsaxena at plexity.net>
+- *
+- * Copyright (c) 2003-2004 MontaVista Software Inc.
+- *
+- * This file is licensed under the terms of the GNU General Public 
+- * License version 2. This program is licensed "as is" without any 
+- * warranty of any kind, whether express or implied.
+- *
+- * NOTE: Since different platforms will use different GPIO pins for
+- *       I2C, this driver uses an IXP4xx-specific platform_data
+- *       pointer to pass the GPIO numbers to the driver. This 
+- *       allows us to support all the different IXP4xx platforms
+- *       w/o having to put #ifdefs in this driver.
+- *
+- *       See arch/arm/mach-ixp4xx/ixdp425.c for an example of building a 
+- *       device list and filling in the ixp4xx_i2c_pins data structure 
+- *       that is passed as the platform_data to this driver.
+- */
 -
--	if (dev->class) {
--		sysfs_remove_link(&dev->kobj, "subsystem");
--		/* If this is not a "fake" compatible device, remove the
--		 * symlink from the class to the device. */
--		if (dev->kobj.parent != &dev->class->subsys.kobj)
--			sysfs_remove_link(&dev->class->subsys.kobj,
--					  dev->bus_id);
--		if (parent) {
--#ifdef CONFIG_SYSFS_DEPRECATED
--			char *class_name = make_class_name(dev->class->name,
--							   &dev->kobj);
--			if (class_name)
--				sysfs_remove_link(&dev->parent->kobj,
--						  class_name);
--			kfree(class_name);
--#endif
--			sysfs_remove_link(&dev->kobj, "device");
--		}
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/platform_device.h>
+-#include <linux/module.h>
+-#include <linux/i2c.h>
+-#include <linux/i2c-algo-bit.h>
+-
+-#include <asm/hardware.h>	/* Pick up IXP4xx-specific bits */
+-
+-static inline int ixp4xx_scl_pin(void *data)
+-{
+-	return ((struct ixp4xx_i2c_pins*)data)->scl_pin;
+-}
+-
+-static inline int ixp4xx_sda_pin(void *data)
+-{
+-	return ((struct ixp4xx_i2c_pins*)data)->sda_pin;
+-}
+-
+-static void ixp4xx_bit_setscl(void *data, int val)
+-{
+-	gpio_line_set(ixp4xx_scl_pin(data), 0);
+-	gpio_line_config(ixp4xx_scl_pin(data),
+-		val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT );
+-}
+-
+-static void ixp4xx_bit_setsda(void *data, int val)
+-{
+-	gpio_line_set(ixp4xx_sda_pin(data), 0);
+-	gpio_line_config(ixp4xx_sda_pin(data),
+-		val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT );
+-}
+-
+-static int ixp4xx_bit_getscl(void *data)
+-{
+-	int scl;
+-
+-	gpio_line_config(ixp4xx_scl_pin(data), IXP4XX_GPIO_IN );
+-	gpio_line_get(ixp4xx_scl_pin(data), &scl);
+-
+-	return scl;
+-}	
+-
+-static int ixp4xx_bit_getsda(void *data)
+-{
+-	int sda;
+-
+-	gpio_line_config(ixp4xx_sda_pin(data), IXP4XX_GPIO_IN );
+-	gpio_line_get(ixp4xx_sda_pin(data), &sda);
+-
+-	return sda;
+-}	
+-
+-struct ixp4xx_i2c_data {
+-	struct ixp4xx_i2c_pins *gpio_pins;
+-	struct i2c_adapter adapter;
+-	struct i2c_algo_bit_data algo_data;
+-};
+-
+-static int ixp4xx_i2c_remove(struct platform_device *plat_dev)
+-{
+-	struct ixp4xx_i2c_data *drv_data = platform_get_drvdata(plat_dev);
+-
+-	platform_set_drvdata(plat_dev, NULL);
+-
+-	i2c_del_adapter(&drv_data->adapter);
+-
+-	kfree(drv_data);
+-
+-	return 0;
+-}
+-
+-static int ixp4xx_i2c_probe(struct platform_device *plat_dev)
+-{
+-	int err;
+-	struct ixp4xx_i2c_pins *gpio = plat_dev->dev.platform_data;
+-	struct ixp4xx_i2c_data *drv_data = 
+-		kzalloc(sizeof(struct ixp4xx_i2c_data), GFP_KERNEL);
+-
+-	if(!drv_data)
+-		return -ENOMEM;
+-
+-	drv_data->gpio_pins = gpio;
+-
+-	/*
+-	 * We could make a lot of these structures static, but
+-	 * certain platforms may have multiple GPIO-based I2C
+-	 * buses for various device domains, so we need per-device
+-	 * algo_data->data. 
+-	 */
+-	drv_data->algo_data.data = gpio;
+-	drv_data->algo_data.setsda = ixp4xx_bit_setsda;
+-	drv_data->algo_data.setscl = ixp4xx_bit_setscl;
+-	drv_data->algo_data.getsda = ixp4xx_bit_getsda;
+-	drv_data->algo_data.getscl = ixp4xx_bit_getscl;
+-	drv_data->algo_data.udelay = 10;
+-	drv_data->algo_data.timeout = 100;
+-
+-	drv_data->adapter.id = I2C_HW_B_IXP4XX;
+-	drv_data->adapter.class = I2C_CLASS_HWMON;
+-	strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
+-		sizeof(drv_data->adapter.name));
+-	drv_data->adapter.algo_data = &drv_data->algo_data;
+-
+-	drv_data->adapter.dev.parent = &plat_dev->dev;
+-
+-	gpio_line_config(gpio->scl_pin, IXP4XX_GPIO_IN);
+-	gpio_line_config(gpio->sda_pin, IXP4XX_GPIO_IN);
+-	gpio_line_set(gpio->scl_pin, 0);
+-	gpio_line_set(gpio->sda_pin, 0);
+-
+-	err = i2c_bit_add_bus(&drv_data->adapter);
+-	if (err) {
+-		printk(KERN_ERR "ERROR: Could not install %s\n", plat_dev->dev.bus_id);
+-
+-		kfree(drv_data);
+-		return err;
 -	}
-  ueventattrError:
- 	device_remove_file(dev, &uevent_attr);
-  attrError:
- 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
- 	kobject_del(&dev->kobj);
-  Error:
-+	cleanup_device_parent(dev);
- 	if (parent)
- 		put_device(parent);
- 	goto Done;
- }
- 
 -
- /**
-- *	device_register - register a device with the system.
-- *	@dev:	pointer to the device structure
-+ * device_register - register a device with the system.
-+ * @dev: pointer to the device structure
-  *
-- *	This happens in two clean steps - initialize the device
-- *	and add it to the system. The two steps can be called
-- *	separately, but this is the easiest and most common.
-- *	I.e. you should only call the two helpers separately if
-- *	have a clearly defined need to use and refcount the device
-- *	before it is added to the hierarchy.
-+ * This happens in two clean steps - initialize the device
-+ * and add it to the system. The two steps can be called
-+ * separately, but this is the easiest and most common.
-+ * I.e. you should only call the two helpers separately if
-+ * have a clearly defined need to use and refcount the device
-+ * before it is added to the hierarchy.
-  */
+-	platform_set_drvdata(plat_dev, drv_data);
 -
- int device_register(struct device *dev)
- {
- 	device_initialize(dev);
- 	return device_add(dev);
- }
- 
+-	return 0;
+-}
 -
- /**
-- *	get_device - increment reference count for device.
-- *	@dev:	device.
-+ * get_device - increment reference count for device.
-+ * @dev: device.
-  *
-- *	This simply forwards the call to kobject_get(), though
-- *	we do take care to provide for the case that we get a NULL
-- *	pointer passed in.
-+ * This simply forwards the call to kobject_get(), though
-+ * we do take care to provide for the case that we get a NULL
-+ * pointer passed in.
-  */
+-static struct platform_driver ixp4xx_i2c_driver = {
+-	.probe		= ixp4xx_i2c_probe,
+-	.remove		= ixp4xx_i2c_remove,
+-	.driver		= {
+-		.name	= "IXP4XX-I2C",
+-		.owner	= THIS_MODULE,
+-	},
+-};
 -
--struct device * get_device(struct device * dev)
-+struct device *get_device(struct device *dev)
- {
- 	return dev ? to_dev(kobject_get(&dev->kobj)) : NULL;
- }
- 
+-static int __init ixp4xx_i2c_init(void)
+-{
+-	return platform_driver_register(&ixp4xx_i2c_driver);
+-}
 -
- /**
-- *	put_device - decrement reference count.
-- *	@dev:	device in question.
-+ * put_device - decrement reference count.
-+ * @dev: device in question.
-  */
--void put_device(struct device * dev)
-+void put_device(struct device *dev)
- {
-+	/* might_sleep(); */
- 	if (dev)
- 		kobject_put(&dev->kobj);
- }
- 
+-static void __exit ixp4xx_i2c_exit(void)
+-{
+-	platform_driver_unregister(&ixp4xx_i2c_driver);
+-}
 -
- /**
-- *	device_del - delete device from system.
-- *	@dev:	device.
-+ * device_del - delete device from system.
-+ * @dev: device.
-  *
-- *	This is the first part of the device unregistration
-- *	sequence. This removes the device from the lists we control
-- *	from here, has it removed from the other driver model
-- *	subsystems it was added to in device_add(), and removes it
-- *	from the kobject hierarchy.
-+ * This is the first part of the device unregistration
-+ * sequence. This removes the device from the lists we control
-+ * from here, has it removed from the other driver model
-+ * subsystems it was added to in device_add(), and removes it
-+ * from the kobject hierarchy.
-  *
-- *	NOTE: this should be called manually _iff_ device_add() was
-- *	also called manually.
-+ * NOTE: this should be called manually _iff_ device_add() was
-+ * also called manually.
-  */
+-module_init(ixp4xx_i2c_init);
+-module_exit(ixp4xx_i2c_exit);
 -
--void device_del(struct device * dev)
-+void device_del(struct device *dev)
+-MODULE_DESCRIPTION("GPIO-based I2C adapter for IXP4xx systems");
+-MODULE_LICENSE("GPL");
+-MODULE_AUTHOR("Deepak Saxena <dsaxena at plexity.net>");
+-
+diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
+index d8de4ac..bbe787b 100644
+--- a/drivers/i2c/busses/i2c-mpc.c
++++ b/drivers/i2c/busses/i2c-mpc.c
+@@ -180,7 +180,7 @@ static void mpc_i2c_stop(struct mpc_i2c *i2c)
+ static int mpc_write(struct mpc_i2c *i2c, int target,
+ 		     const u8 * data, int length, int restart)
  {
--	struct device * parent = dev->parent;
-+	struct device *parent = dev->parent;
- 	struct class_interface *class_intf;
+-	int i;
++	int i, result;
+ 	unsigned timeout = i2c->adap.timeout;
+ 	u32 flags = restart ? CCR_RSTA : 0;
  
-+	device_pm_remove(dev);
- 	if (parent)
- 		klist_del(&dev->knode_parent);
- 	if (MAJOR(dev->devt))
- 		device_remove_file(dev, &devt_attr);
- 	if (dev->class) {
--		sysfs_remove_link(&dev->kobj, "subsystem");
--		/* If this is not a "fake" compatible device, remove the
--		 * symlink from the class to the device. */
--		if (dev->kobj.parent != &dev->class->subsys.kobj)
--			sysfs_remove_link(&dev->class->subsys.kobj,
--					  dev->bus_id);
--		if (parent) {
--#ifdef CONFIG_SYSFS_DEPRECATED
--			char *class_name = make_class_name(dev->class->name,
--							   &dev->kobj);
--			if (class_name)
--				sysfs_remove_link(&dev->parent->kobj,
--						  class_name);
--			kfree(class_name);
--#endif
--			sysfs_remove_link(&dev->kobj, "device");
--		}
-+		device_remove_class_symlinks(dev);
+@@ -192,15 +192,17 @@ static int mpc_write(struct mpc_i2c *i2c, int target,
+ 	/* Write target byte */
+ 	writeb((target << 1), i2c->base + MPC_I2C_DR);
  
- 		down(&dev->class->sem);
- 		/* notify any interfaces that the device is now gone */
-@@ -936,31 +941,6 @@ void device_del(struct device * dev)
- 		/* remove the device from the class list */
- 		list_del_init(&dev->node);
- 		up(&dev->class->sem);
--
--		/* If we live in a parent class-directory, unreference it */
--		if (dev->kobj.parent->kset == &dev->class->class_dirs) {
--			struct device *d;
--			int other = 0;
--
--			/*
--			 * if we are the last child of our class, delete
--			 * our class-directory at this parent
--			 */
--			down(&dev->class->sem);
--			list_for_each_entry(d, &dev->class->devices, node) {
--				if (d == dev)
--					continue;
--				if (d->kobj.parent == dev->kobj.parent) {
--					other = 1;
--					break;
--				}
--			}
--			if (!other)
--				kobject_del(dev->kobj.parent);
--
--			kobject_put(dev->kobj.parent);
--			up(&dev->class->sem);
--		}
+-	if (i2c_wait(i2c, timeout, 1) < 0)
+-		return -1;
++	result = i2c_wait(i2c, timeout, 1);
++	if (result < 0)
++		return result;
+ 
+ 	for (i = 0; i < length; i++) {
+ 		/* Write data byte */
+ 		writeb(data[i], i2c->base + MPC_I2C_DR);
+ 
+-		if (i2c_wait(i2c, timeout, 1) < 0)
+-			return -1;
++		result = i2c_wait(i2c, timeout, 1);
++		if (result < 0)
++			return result;
  	}
- 	device_remove_file(dev, &uevent_attr);
- 	device_remove_attrs(dev);
-@@ -979,57 +959,55 @@ void device_del(struct device * dev)
- 	if (platform_notify_remove)
- 		platform_notify_remove(dev);
- 	if (dev->bus)
--		blocking_notifier_call_chain(&dev->bus->bus_notifier,
-+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- 					     BUS_NOTIFY_DEL_DEVICE, dev);
--	device_pm_remove(dev);
- 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
-+	cleanup_device_parent(dev);
- 	kobject_del(&dev->kobj);
--	if (parent)
--		put_device(parent);
-+	put_device(parent);
- }
  
- /**
-- *	device_unregister - unregister device from system.
-- *	@dev:	device going away.
-+ * device_unregister - unregister device from system.
-+ * @dev: device going away.
-  *
-- *	We do this in two parts, like we do device_register(). First,
-- *	we remove it from all the subsystems with device_del(), then
-- *	we decrement the reference count via put_device(). If that
-- *	is the final reference count, the device will be cleaned up
-- *	via device_release() above. Otherwise, the structure will
-- *	stick around until the final reference to the device is dropped.
-+ * We do this in two parts, like we do device_register(). First,
-+ * we remove it from all the subsystems with device_del(), then
-+ * we decrement the reference count via put_device(). If that
-+ * is the final reference count, the device will be cleaned up
-+ * via device_release() above. Otherwise, the structure will
-+ * stick around until the final reference to the device is dropped.
-  */
--void device_unregister(struct device * dev)
-+void device_unregister(struct device *dev)
+ 	return 0;
+@@ -210,7 +212,7 @@ static int mpc_read(struct mpc_i2c *i2c, int target,
+ 		    u8 * data, int length, int restart)
  {
--	pr_debug("DEV: Unregistering device. ID = '%s'\n", dev->bus_id);
-+	pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__);
- 	device_del(dev);
- 	put_device(dev);
- }
+ 	unsigned timeout = i2c->adap.timeout;
+-	int i;
++	int i, result;
+ 	u32 flags = restart ? CCR_RSTA : 0;
  
--
--static struct device * next_device(struct klist_iter * i)
-+static struct device *next_device(struct klist_iter *i)
- {
--	struct klist_node * n = klist_next(i);
-+	struct klist_node *n = klist_next(i);
- 	return n ? container_of(n, struct device, knode_parent) : NULL;
- }
+ 	/* Start with MEN */
+@@ -221,8 +223,9 @@ static int mpc_read(struct mpc_i2c *i2c, int target,
+ 	/* Write target address byte - this time with the read flag set */
+ 	writeb((target << 1) | 1, i2c->base + MPC_I2C_DR);
  
- /**
-- *	device_for_each_child - device child iterator.
-- *	@parent: parent struct device.
-- *	@data:	data for the callback.
-- *	@fn:	function to be called for each device.
-+ * device_for_each_child - device child iterator.
-+ * @parent: parent struct device.
-+ * @data: data for the callback.
-+ * @fn: function to be called for each device.
+-	if (i2c_wait(i2c, timeout, 1) < 0)
+-		return -1;
++	result = i2c_wait(i2c, timeout, 1);
++	if (result < 0)
++		return result;
+ 
+ 	if (length) {
+ 		if (length == 1)
+@@ -234,8 +237,9 @@ static int mpc_read(struct mpc_i2c *i2c, int target,
+ 	}
+ 
+ 	for (i = 0; i < length; i++) {
+-		if (i2c_wait(i2c, timeout, 0) < 0)
+-			return -1;
++		result = i2c_wait(i2c, timeout, 0);
++		if (result < 0)
++			return result;
+ 
+ 		/* Generate txack on next to last byte */
+ 		if (i == length - 2)
+@@ -309,7 +313,6 @@ static struct i2c_adapter mpc_ops = {
+ 	.algo = &mpc_algo,
+ 	.class = I2C_CLASS_HWMON,
+ 	.timeout = 1,
+-	.retries = 1
+ };
+ 
+ static int fsl_i2c_probe(struct platform_device *pdev)
+@@ -321,9 +324,9 @@ static int fsl_i2c_probe(struct platform_device *pdev)
+ 
+ 	pdata = (struct fsl_i2c_platform_data *) pdev->dev.platform_data;
+ 
+-	if (!(i2c = kzalloc(sizeof(*i2c), GFP_KERNEL))) {
++	i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
++	if (!i2c)
+ 		return -ENOMEM;
+-	}
+ 
+ 	i2c->irq = platform_get_irq(pdev, 0);
+ 	if (i2c->irq < 0) {
+diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
+index bb7bf68..036e6a8 100644
+--- a/drivers/i2c/busses/i2c-mv64xxx.c
++++ b/drivers/i2c/busses/i2c-mv64xxx.c
+@@ -1,6 +1,6 @@
+ /*
+- * Driver for the i2c controller on the Marvell line of host bridges for MIPS
+- * and PPC (e.g, gt642[46]0, mv643[46]0, mv644[46]0).
++ * Driver for the i2c controller on the Marvell line of host bridges
++ * (e.g, gt642[46]0, mv643[46]0, mv644[46]0, and Orion SoC family).
   *
-- *	Iterate over @parent's child devices, and call @fn for each,
-- *	passing it @data.
-+ * Iterate over @parent's child devices, and call @fn for each,
-+ * passing it @data.
+  * Author: Mark A. Greer <mgreer at mvista.com>
   *
-- *	We check the return of @fn each time. If it returns anything
-- *	other than 0, we break out and return that value.
-+ * We check the return of @fn each time. If it returns anything
-+ * other than 0, we break out and return that value.
-  */
--int device_for_each_child(struct device * parent, void * data,
--		     int (*fn)(struct device *, void *))
-+int device_for_each_child(struct device *parent, void *data,
-+			  int (*fn)(struct device *dev, void *data))
- {
- 	struct klist_iter i;
--	struct device * child;
-+	struct device *child;
- 	int error = 0;
+@@ -14,7 +14,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/i2c.h>
+ #include <linux/interrupt.h>
+-#include <linux/mv643xx.h>
++#include <linux/mv643xx_i2c.h>
+ #include <linux/platform_device.h>
  
- 	klist_iter_init(&parent->klist_children, &i);
-@@ -1054,8 +1032,8 @@ int device_for_each_child(struct device * parent, void * data,
-  * current device can be obtained, this function will return to the caller
-  * and not iterate over any more devices.
-  */
--struct device * device_find_child(struct device *parent, void *data,
--				  int (*match)(struct device *, void *))
-+struct device *device_find_child(struct device *parent, void *data,
-+				 int (*match)(struct device *dev, void *data))
+ #include <asm/io.h>
+@@ -86,6 +86,7 @@ struct mv64xxx_i2c_data {
+ 	u32			cntl_bits;
+ 	void __iomem		*reg_base;
+ 	u32			reg_base_p;
++	u32			reg_size;
+ 	u32			addr1;
+ 	u32			addr2;
+ 	u32			bytes_left;
+@@ -463,17 +464,20 @@ static int __devinit
+ mv64xxx_i2c_map_regs(struct platform_device *pd,
+ 	struct mv64xxx_i2c_data *drv_data)
  {
- 	struct klist_iter i;
- 	struct device *child;
-@@ -1073,7 +1051,10 @@ struct device * device_find_child(struct device *parent, void *data,
+-	struct resource	*r;
++	int size;
++	struct resource	*r = platform_get_resource(pd, IORESOURCE_MEM, 0);
  
- int __init devices_init(void)
- {
--	return subsystem_register(&devices_subsys);
-+	devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
-+	if (!devices_kset)
-+		return -ENOMEM;
-+	return 0;
- }
+-	if ((r = platform_get_resource(pd, IORESOURCE_MEM, 0)) &&
+-		request_mem_region(r->start, MV64XXX_I2C_REG_BLOCK_SIZE,
+-			drv_data->adapter.name)) {
++	if (!r)
++		return -ENODEV;
  
- EXPORT_SYMBOL_GPL(device_for_each_child);
-@@ -1094,7 +1075,7 @@ EXPORT_SYMBOL_GPL(device_remove_file);
+-		drv_data->reg_base = ioremap(r->start,
+-			MV64XXX_I2C_REG_BLOCK_SIZE);
+-		drv_data->reg_base_p = r->start;
+-	} else
+-		return -ENOMEM;
++	size = r->end - r->start + 1;
++
++	if (!request_mem_region(r->start, size, drv_data->adapter.name))
++		return -EBUSY;
++
++	drv_data->reg_base = ioremap(r->start, size);
++	drv_data->reg_base_p = r->start;
++	drv_data->reg_size = size;
  
- static void device_create_release(struct device *dev)
- {
--	pr_debug("%s called for %s\n", __FUNCTION__, dev->bus_id);
-+	pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__);
- 	kfree(dev);
+ 	return 0;
  }
+@@ -483,8 +487,7 @@ mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
+ {
+ 	if (drv_data->reg_base) {
+ 		iounmap(drv_data->reg_base);
+-		release_mem_region(drv_data->reg_base_p,
+-			MV64XXX_I2C_REG_BLOCK_SIZE);
++		release_mem_region(drv_data->reg_base_p, drv_data->reg_size);
+ 	}
  
-@@ -1156,14 +1137,11 @@ error:
- EXPORT_SYMBOL_GPL(device_create);
+ 	drv_data->reg_base = NULL;
+@@ -529,7 +532,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
+ 	drv_data->adapter.owner = THIS_MODULE;
+ 	drv_data->adapter.class = I2C_CLASS_HWMON;
+ 	drv_data->adapter.timeout = pdata->timeout;
+-	drv_data->adapter.retries = pdata->retries;
+ 	drv_data->adapter.nr = pd->id;
+ 	platform_set_drvdata(pd, drv_data);
+ 	i2c_set_adapdata(&drv_data->adapter, drv_data);
+diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
+index 1bf590c..3dac920 100644
+--- a/drivers/i2c/busses/i2c-nforce2.c
++++ b/drivers/i2c/busses/i2c-nforce2.c
+@@ -351,6 +351,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
+ 	pci_set_drvdata(dev, smbuses);
  
- /**
-- * device_destroy - removes a device that was created with device_create()
-+ * find_device - finds a device that was created with device_create()
-  * @class: pointer to the struct class that this device was registered with
-  * @devt: the dev_t of the device that was previously registered
-- *
-- * This call unregisters and cleans up a device that was created with a
-- * call to device_create().
-  */
--void device_destroy(struct class *class, dev_t devt)
-+static struct device *find_device(struct class *class, dev_t devt)
+ 	switch(dev->device) {
++	case PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS:
+ 	case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS:
+ 	case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS:
+ 		smbuses[0].blockops = 1;
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index f2552b1..da66397 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -362,8 +362,6 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+ 
+ 	omap_i2c_enable_clocks(dev);
+ 
+-	/* REVISIT: initialize and use adap->retries. This is an optional
+-	 * feature */
+ 	if ((r = omap_i2c_wait_for_bb(dev)) < 0)
+ 		goto out;
+ 
+diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
+index ca18e0b..1603c81 100644
+--- a/drivers/i2c/busses/i2c-pasemi.c
++++ b/drivers/i2c/busses/i2c-pasemi.c
+@@ -368,6 +368,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev,
+ 	smbus->adapter.class = I2C_CLASS_HWMON;
+ 	smbus->adapter.algo = &smbus_algorithm;
+ 	smbus->adapter.algo_data = smbus;
++	smbus->adapter.nr = PCI_FUNC(dev->devfn);
+ 
+ 	/* set up the sysfs linkage to our parent device */
+ 	smbus->adapter.dev.parent = &dev->dev;
+@@ -375,7 +376,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev,
+ 	reg_write(smbus, REG_CTL, (CTL_MTR | CTL_MRR |
+ 		  (CLK_100K_DIV & CTL_CLK_M)));
+ 
+-	error = i2c_add_adapter(&smbus->adapter);
++	error = i2c_add_numbered_adapter(&smbus->adapter);
+ 	if (error)
+ 		goto out_release_region;
+ 
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index 167e413..9bbe96c 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -121,10 +121,6 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
  {
- 	struct device *dev = NULL;
- 	struct device *dev_tmp;
-@@ -1176,12 +1154,54 @@ void device_destroy(struct class *class, dev_t devt)
- 		}
- 	}
- 	up(&class->sem);
-+	return dev;
-+}
-+
-+/**
-+ * device_destroy - removes a device that was created with device_create()
-+ * @class: pointer to the struct class that this device was registered with
-+ * @devt: the dev_t of the device that was previously registered
-+ *
-+ * This call unregisters and cleans up a device that was created with a
-+ * call to device_create().
-+ */
-+void device_destroy(struct class *class, dev_t devt)
-+{
-+	struct device *dev;
+ 	unsigned char temp;
+ 
+-	/* match up the function */
+-	if (PCI_FUNC(PIIX4_dev->devfn) != id->driver_data)
+-		return -ENODEV;
+-
+ 	dev_info(&PIIX4_dev->dev, "Found %s device\n", pci_name(PIIX4_dev));
+ 
+ 	/* Don't access SMBus on IBM systems which get corrupted eeproms */
+@@ -389,28 +385,21 @@ static struct i2c_adapter piix4_adapter = {
+ };
+ 
+ static struct pci_device_id piix4_ids[] = {
+-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3),
+-	  .driver_data = 3 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS),
+-	  .driver_data = 0 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS),
+-	  .driver_data = 0 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS),
+-	  .driver_data = 0 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS),
+-	  .driver_data = 0 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4),
+-	  .driver_data = 0 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5),
+-	  .driver_data = 0 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6),
+-	  .driver_data = 0 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB),
+-	  .driver_data = 0 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3),
+-	  .driver_data = 3 },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3),
+-	  .driver_data = 0 },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
++		     PCI_DEVICE_ID_SERVERWORKS_OSB4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
++		     PCI_DEVICE_ID_SERVERWORKS_CSB5) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
++		     PCI_DEVICE_ID_SERVERWORKS_CSB6) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
++		     PCI_DEVICE_ID_SERVERWORKS_HT1000SB) },
+ 	{ 0, }
+ };
+ 
+diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
+index 6426a61..2598d29 100644
+--- a/drivers/i2c/busses/i2c-pxa.c
++++ b/drivers/i2c/busses/i2c-pxa.c
+@@ -65,6 +65,7 @@ struct pxa_i2c {
+ 	unsigned long		iosize;
  
-+	dev = find_device(class, devt);
- 	if (dev)
- 		device_unregister(dev);
+ 	int			irq;
++	int			use_pio;
+ };
+ 
+ #define _IBMR(i2c)	((i2c)->reg_base + 0)
+@@ -163,6 +164,7 @@ static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname)
+ #define eedbg(lvl, x...) do { if ((lvl) < 1) { printk(KERN_DEBUG "" x); } } while(0)
+ 
+ static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret);
++static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id);
+ 
+ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
+ {
+@@ -554,6 +556,71 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c)
+ 	writel(icr, _ICR(i2c));
  }
- EXPORT_SYMBOL_GPL(device_destroy);
  
-+#ifdef CONFIG_PM_SLEEP
-+/**
-+ * destroy_suspended_device - asks the PM core to remove a suspended device
-+ * @class: pointer to the struct class that this device was registered with
-+ * @devt: the dev_t of the device that was previously registered
-+ *
-+ * This call notifies the PM core of the necessity to unregister a suspended
-+ * device created with a call to device_create() (devices cannot be
-+ * unregistered directly while suspended, since the PM core holds their
-+ * semaphores at that time).
-+ *
-+ * It can only be called within the scope of a system sleep transition.  In
-+ * practice this means it has to be directly or indirectly invoked either by
-+ * a suspend or resume method, or by the PM core (e.g. via
-+ * disable_nonboot_cpus() or enable_nonboot_cpus()).
-+ */
-+void destroy_suspended_device(struct class *class, dev_t devt)
++static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c)
 +{
-+	struct device *dev;
++	/* make timeout the same as for interrupt based functions */
++	long timeout = 2 * DEF_TIMEOUT;
 +
-+	dev = find_device(class, devt);
-+	if (dev)
-+		device_pm_schedule_removal(dev);
-+}
-+EXPORT_SYMBOL_GPL(destroy_suspended_device);
-+#endif /* CONFIG_PM_SLEEP */
++	/*
++	 * Wait for the bus to become free.
++	 */
++	while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) {
++		udelay(1000);
++		show_state(i2c);
++	}
 +
- /**
-  * device_rename - renames a device
-  * @dev: the pointer to the struct device to be renamed
-@@ -1198,7 +1218,8 @@ int device_rename(struct device *dev, char *new_name)
- 	if (!dev)
- 		return -EINVAL;
- 
--	pr_debug("DEVICE: renaming '%s' to '%s'\n", dev->bus_id, new_name);
-+	pr_debug("device: '%s': %s: renaming to '%s'\n", dev->bus_id,
-+		 __FUNCTION__, new_name);
- 
- #ifdef CONFIG_SYSFS_DEPRECATED
- 	if ((dev->class) && (dev->parent))
-@@ -1279,8 +1300,7 @@ static int device_move_class_links(struct device *dev,
- 					  class_name);
- 		if (error)
- 			sysfs_remove_link(&dev->kobj, "device");
--	}
--	else
-+	} else
- 		error = 0;
- out:
- 	kfree(class_name);
-@@ -1311,16 +1331,13 @@ int device_move(struct device *dev, struct device *new_parent)
- 		return -EINVAL;
- 
- 	new_parent = get_device(new_parent);
--	new_parent_kobj = get_device_parent (dev, new_parent);
--	if (IS_ERR(new_parent_kobj)) {
--		error = PTR_ERR(new_parent_kobj);
--		put_device(new_parent);
--		goto out;
--	}
--	pr_debug("DEVICE: moving '%s' to '%s'\n", dev->bus_id,
--		 new_parent ? new_parent->bus_id : "<NULL>");
-+	new_parent_kobj = get_device_parent(dev, new_parent);
++	if (timeout <= 0) {
++		show_state(i2c);
++		dev_err(&i2c->adap.dev,
++			"i2c_pxa: timeout waiting for bus free\n");
++		return I2C_RETRY;
++	}
 +
-+	pr_debug("device: '%s': %s: moving to '%s'\n", dev->bus_id,
-+		 __FUNCTION__, new_parent ? new_parent->bus_id : "<NULL>");
- 	error = kobject_move(&dev->kobj, new_parent_kobj);
- 	if (error) {
-+		cleanup_glue_dir(dev, new_parent_kobj);
- 		put_device(new_parent);
- 		goto out;
- 	}
-@@ -1343,6 +1360,7 @@ int device_move(struct device *dev, struct device *new_parent)
- 				klist_add_tail(&dev->knode_parent,
- 					       &old_parent->klist_children);
- 		}
-+		cleanup_glue_dir(dev, new_parent_kobj);
- 		put_device(new_parent);
- 		goto out;
- 	}
-@@ -1352,5 +1370,23 @@ out:
- 	put_device(dev);
- 	return error;
- }
--
- EXPORT_SYMBOL_GPL(device_move);
++	/*
++	 * Set master mode.
++	 */
++	writel(readl(_ICR(i2c)) | ICR_SCLE, _ICR(i2c));
 +
-+/**
-+ * device_shutdown - call ->shutdown() on each device to shutdown.
-+ */
-+void device_shutdown(void)
++	return 0;
++}
++
++static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c,
++			       struct i2c_msg *msg, int num)
 +{
-+	struct device *dev, *devn;
++	unsigned long timeout = 500000; /* 5 seconds */
++	int ret = 0;
 +
-+	list_for_each_entry_safe_reverse(dev, devn, &devices_kset->list,
-+				kobj.entry) {
-+		if (dev->bus && dev->bus->shutdown) {
-+			dev_dbg(dev, "shutdown\n");
-+			dev->bus->shutdown(dev);
-+		} else if (dev->driver && dev->driver->shutdown) {
-+			dev_dbg(dev, "shutdown\n");
-+			dev->driver->shutdown(dev);
-+		}
++	ret = i2c_pxa_pio_set_master(i2c);
++	if (ret)
++		goto out;
++
++	i2c->msg = msg;
++	i2c->msg_num = num;
++	i2c->msg_idx = 0;
++	i2c->msg_ptr = 0;
++	i2c->irqlogidx = 0;
++
++	i2c_pxa_start_message(i2c);
++
++	while (timeout-- && i2c->msg_num > 0) {
++		i2c_pxa_handler(0, i2c);
++		udelay(10);
 +	}
++
++	i2c_pxa_stop_message(i2c);
++
++	/*
++	 * We place the return code in i2c->msg_idx.
++	 */
++	ret = i2c->msg_idx;
++
++out:
++	if (timeout == 0)
++		i2c_pxa_scream_blue_murder(i2c, "timeout");
++
++	return ret;
 +}
-diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
-index 4054507..c5885f5 100644
---- a/drivers/base/cpu.c
-+++ b/drivers/base/cpu.c
-@@ -14,7 +14,7 @@
- #include "base.h"
- 
- struct sysdev_class cpu_sysdev_class = {
--	set_kset_name("cpu"),
-+	.name = "cpu",
- };
- EXPORT_SYMBOL(cpu_sysdev_class);
- 
-diff --git a/drivers/base/dd.c b/drivers/base/dd.c
-index 7ac474d..a5cde94 100644
---- a/drivers/base/dd.c
-+++ b/drivers/base/dd.c
-@@ -1,18 +1,20 @@
++
  /*
-- *	drivers/base/dd.c - The core device/driver interactions.
-+ * drivers/base/dd.c - The core device/driver interactions.
-  *
-- * 	This file contains the (sometimes tricky) code that controls the
-- *	interactions between devices and drivers, which primarily includes
-- *	driver binding and unbinding.
-+ * This file contains the (sometimes tricky) code that controls the
-+ * interactions between devices and drivers, which primarily includes
-+ * driver binding and unbinding.
-  *
-- *	All of this code used to exist in drivers/base/bus.c, but was
-- *	relocated to here in the name of compartmentalization (since it wasn't
-- *	strictly code just for the 'struct bus_type'.
-+ * All of this code used to exist in drivers/base/bus.c, but was
-+ * relocated to here in the name of compartmentalization (since it wasn't
-+ * strictly code just for the 'struct bus_type'.
-  *
-- *	Copyright (c) 2002-5 Patrick Mochel
-- *	Copyright (c) 2002-3 Open Source Development Labs
-+ * Copyright (c) 2002-5 Patrick Mochel
-+ * Copyright (c) 2002-3 Open Source Development Labs
-+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh at suse.de>
-+ * Copyright (c) 2007 Novell Inc.
-  *
-- *	This file is released under the GPLv2
-+ * This file is released under the GPLv2
+  * We are protected by the adapter bus mutex.
   */
+@@ -610,6 +677,35 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
+ 	return ret;
+ }
  
- #include <linux/device.h>
-@@ -23,8 +25,6 @@
- #include "base.h"
- #include "power/power.h"
++static int i2c_pxa_pio_xfer(struct i2c_adapter *adap,
++			    struct i2c_msg msgs[], int num)
++{
++	struct pxa_i2c *i2c = adap->algo_data;
++	int ret, i;
++
++	/* If the I2C controller is disabled we need to reset it
++	  (probably due to a suspend/resume destroying state). We do
++	  this here as we can then avoid worrying about resuming the
++	  controller before its users. */
++	if (!(readl(_ICR(i2c)) & ICR_IUE))
++		i2c_pxa_reset(i2c);
++
++	for (i = adap->retries; i >= 0; i--) {
++		ret = i2c_pxa_do_pio_xfer(i2c, msgs, num);
++		if (ret != I2C_RETRY)
++			goto out;
++
++		if (i2c_debug)
++			dev_dbg(&adap->dev, "Retrying transmission\n");
++		udelay(100);
++	}
++	i2c_pxa_scream_blue_murder(i2c, "exhausted retries");
++	ret = -EREMOTEIO;
++ out:
++	i2c_pxa_set_slave(i2c, ret);
++	return ret;
++}
++
+ /*
+  * i2c_pxa_master_complete - complete the message and wake up.
+  */
+@@ -621,7 +717,8 @@ static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret)
+ 	i2c->msg_num = 0;
+ 	if (ret)
+ 		i2c->msg_idx = ret;
+-	wake_up(&i2c->wait);
++	if (!i2c->use_pio)
++		wake_up(&i2c->wait);
+ }
  
--#define to_drv(node) container_of(node, struct device_driver, kobj.entry)
--
+ static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr)
+@@ -840,6 +937,37 @@ static const struct i2c_algorithm i2c_pxa_algorithm = {
+ 	.functionality	= i2c_pxa_functionality,
+ };
  
- static void driver_bound(struct device *dev)
++static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
++	.master_xfer	= i2c_pxa_pio_xfer,
++	.functionality	= i2c_pxa_functionality,
++};
++
++static void i2c_pxa_enable(struct platform_device *dev)
++{
++	if (cpu_is_pxa27x()) {
++		switch (dev->id) {
++		case 0:
++			pxa_gpio_mode(GPIO117_I2CSCL_MD);
++			pxa_gpio_mode(GPIO118_I2CSDA_MD);
++			break;
++		case 1:
++			local_irq_disable();
++			PCFR |= PCFR_PI2CEN;
++			local_irq_enable();
++			break;
++		}
++	}
++}
++
++static void i2c_pxa_disable(struct platform_device *dev)
++{
++	if (cpu_is_pxa27x() && dev->id == 1) {
++		local_irq_disable();
++		PCFR &= ~PCFR_PI2CEN;
++		local_irq_enable();
++	}
++}
++
+ #define res_len(r)		((r)->end - (r)->start + 1)
+ static int i2c_pxa_probe(struct platform_device *dev)
  {
-@@ -34,27 +34,27 @@ static void driver_bound(struct device *dev)
- 		return;
+@@ -864,7 +992,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
  	}
  
--	pr_debug("bound device '%s' to driver '%s'\n",
--		 dev->bus_id, dev->driver->name);
-+	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->bus_id,
-+		 __FUNCTION__, dev->driver->name);
+ 	i2c->adap.owner   = THIS_MODULE;
+-	i2c->adap.algo    = &i2c_pxa_algorithm;
+ 	i2c->adap.retries = 5;
  
- 	if (dev->bus)
--		blocking_notifier_call_chain(&dev->bus->bus_notifier,
-+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- 					     BUS_NOTIFY_BOUND_DRIVER, dev);
+ 	spin_lock_init(&i2c->lock);
+@@ -899,34 +1026,28 @@ static int i2c_pxa_probe(struct platform_device *dev)
+ #endif
  
--	klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
-+	klist_add_tail(&dev->knode_driver, &dev->driver->p->klist_devices);
- }
+ 	clk_enable(i2c->clk);
+-#ifdef CONFIG_PXA27x
+-	switch (dev->id) {
+-	case 0:
+-		pxa_gpio_mode(GPIO117_I2CSCL_MD);
+-		pxa_gpio_mode(GPIO118_I2CSDA_MD);
+-		break;
+-	case 1:
+-		local_irq_disable();
+-		PCFR |= PCFR_PI2CEN;
+-		local_irq_enable();
+-	}
+-#endif
++	i2c_pxa_enable(dev);
  
- static int driver_sysfs_add(struct device *dev)
- {
- 	int ret;
+-	ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED,
+-			  i2c->adap.name, i2c);
+-	if (ret)
+-		goto ereqirq;
++	if (plat) {
++		i2c->adap.class = plat->class;
++		i2c->use_pio = plat->use_pio;
++	}
  
--	ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj,
-+	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
- 			  kobject_name(&dev->kobj));
- 	if (ret == 0) {
--		ret = sysfs_create_link(&dev->kobj, &dev->driver->kobj,
-+		ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
- 					"driver");
- 		if (ret)
--			sysfs_remove_link(&dev->driver->kobj,
-+			sysfs_remove_link(&dev->driver->p->kobj,
- 					kobject_name(&dev->kobj));
- 	}
- 	return ret;
-@@ -65,24 +65,24 @@ static void driver_sysfs_remove(struct device *dev)
- 	struct device_driver *drv = dev->driver;
++	if (i2c->use_pio) {
++		i2c->adap.algo = &i2c_pxa_pio_algorithm;
++	} else {
++		i2c->adap.algo = &i2c_pxa_algorithm;
++		ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED,
++				  i2c->adap.name, i2c);
++		if (ret)
++			goto ereqirq;
++	}
  
- 	if (drv) {
--		sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
-+		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
- 		sysfs_remove_link(&dev->kobj, "driver");
- 	}
- }
+ 	i2c_pxa_reset(i2c);
  
- /**
-- *	device_bind_driver - bind a driver to one device.
-- *	@dev:	device.
-+ * device_bind_driver - bind a driver to one device.
-+ * @dev: device.
-  *
-- *	Allow manual attachment of a driver to a device.
-- *	Caller must have already set @dev->driver.
-+ * Allow manual attachment of a driver to a device.
-+ * Caller must have already set @dev->driver.
-  *
-- *	Note that this does not modify the bus reference count
-- *	nor take the bus's rwsem. Please verify those are accounted
-- *	for before calling this. (It is ok to call with no other effort
-- *	from a driver's probe() method.)
-+ * Note that this does not modify the bus reference count
-+ * nor take the bus's rwsem. Please verify those are accounted
-+ * for before calling this. (It is ok to call with no other effort
-+ * from a driver's probe() method.)
-  *
-- *	This function must be called with @dev->sem held.
-+ * This function must be called with @dev->sem held.
-  */
- int device_bind_driver(struct device *dev)
- {
-@@ -93,6 +93,7 @@ int device_bind_driver(struct device *dev)
- 		driver_bound(dev);
- 	return ret;
- }
-+EXPORT_SYMBOL_GPL(device_bind_driver);
+ 	i2c->adap.algo_data = i2c;
+ 	i2c->adap.dev.parent = &dev->dev;
  
- static atomic_t probe_count = ATOMIC_INIT(0);
- static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
-@@ -102,8 +103,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
- 	int ret = 0;
+-	if (plat) {
+-		i2c->adap.class = plat->class;
+-	}
+-
+ 	/*
+ 	 * If "dev->id" is negative we consider it as zero.
+ 	 * The reason to do so is to avoid sysfs names that only make
+@@ -952,17 +1073,11 @@ static int i2c_pxa_probe(struct platform_device *dev)
+ 	return 0;
  
- 	atomic_inc(&probe_count);
--	pr_debug("%s: Probing driver %s with device %s\n",
--		 drv->bus->name, drv->name, dev->bus_id);
-+	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
-+		 drv->bus->name, __FUNCTION__, drv->name, dev->bus_id);
- 	WARN_ON(!list_empty(&dev->devres_head));
+ eadapt:
+-	free_irq(irq, i2c);
++	if (!i2c->use_pio)
++		free_irq(irq, i2c);
+ ereqirq:
+ 	clk_disable(i2c->clk);
+-
+-#ifdef CONFIG_PXA27x
+-	if (dev->id == 1) {
+-		local_irq_disable();
+-		PCFR &= ~PCFR_PI2CEN;
+-		local_irq_enable();
+-	}
+-#endif
++	i2c_pxa_disable(dev);
+ eremap:
+ 	clk_put(i2c->clk);
+ eclk:
+@@ -979,18 +1094,12 @@ static int i2c_pxa_remove(struct platform_device *dev)
+ 	platform_set_drvdata(dev, NULL);
  
- 	dev->driver = drv;
-@@ -125,8 +126,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
+ 	i2c_del_adapter(&i2c->adap);
+-	free_irq(i2c->irq, i2c);
++	if (!i2c->use_pio)
++		free_irq(i2c->irq, i2c);
  
- 	driver_bound(dev);
- 	ret = 1;
--	pr_debug("%s: Bound Device %s to Driver %s\n",
--		 drv->bus->name, dev->bus_id, drv->name);
-+	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
-+		 drv->bus->name, __FUNCTION__, dev->bus_id, drv->name);
- 	goto done;
+ 	clk_disable(i2c->clk);
+ 	clk_put(i2c->clk);
+-
+-#ifdef CONFIG_PXA27x
+-	if (dev->id == 1) {
+-		local_irq_disable();
+-		PCFR &= ~PCFR_PI2CEN;
+-		local_irq_enable();
+-	}
+-#endif
++	i2c_pxa_disable(dev);
  
- probe_failed:
-@@ -183,7 +184,7 @@ int driver_probe_done(void)
-  * This function must be called with @dev->sem held.  When called for a
-  * USB interface, @dev->parent->sem must be held as well.
-  */
--int driver_probe_device(struct device_driver * drv, struct device * dev)
-+int driver_probe_device(struct device_driver *drv, struct device *dev)
- {
- 	int ret = 0;
+ 	release_mem_region(i2c->iobase, i2c->iosize);
+ 	kfree(i2c);
+diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
+index 503a134..8fbbdb4 100644
+--- a/drivers/i2c/busses/i2c-sibyte.c
++++ b/drivers/i2c/busses/i2c-sibyte.c
+@@ -36,14 +36,6 @@ struct i2c_algo_sibyte_data {
+ /* ----- global defines ----------------------------------------------- */
+ #define SMB_CSR(a,r) ((long)(a->reg_base + r))
  
-@@ -192,8 +193,8 @@ int driver_probe_device(struct device_driver * drv, struct device * dev)
- 	if (drv->bus->match && !drv->bus->match(dev, drv))
- 		goto done;
+-/* ----- global variables --------------------------------------------- */
+-
+-/* module parameters:
+- */
+-static int bit_scan;	/* have a look at what's hanging 'round */
+-module_param(bit_scan, int, 0);
+-MODULE_PARM_DESC(bit_scan, "Scan for active chips on the bus");
+-
  
--	pr_debug("%s: Matched Device %s with Driver %s\n",
--		 drv->bus->name, dev->bus_id, drv->name);
-+	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
-+		 drv->bus->name, __FUNCTION__, dev->bus_id, drv->name);
+ static int smbus_xfer(struct i2c_adapter *i2c_adap, u16 addr,
+ 		      unsigned short flags, char read_write,
+@@ -140,9 +132,8 @@ static const struct i2c_algorithm i2c_sibyte_algo = {
+ /*
+  * registering functions to load algorithms at runtime
+  */
+-int i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
++int __init i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
+ {
+-	int i;
+ 	struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data;
  
- 	ret = really_probe(dev, drv);
+ 	/* register new adapter to i2c module... */
+@@ -152,24 +143,6 @@ int i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
+ 	csr_out32(speed, SMB_CSR(adap,R_SMB_FREQ));
+ 	csr_out32(0, SMB_CSR(adap,R_SMB_CONTROL));
  
-@@ -201,27 +202,27 @@ done:
- 	return ret;
+-	/* scan bus */
+-	if (bit_scan) {
+-		union i2c_smbus_data data;
+-		int rc;
+-		printk(KERN_INFO " i2c-algo-sibyte.o: scanning bus %s.\n",
+-		       i2c_adap->name);
+-		for (i = 0x00; i < 0x7f; i++) {
+-			/* XXXKW is this a realistic probe? */
+-			rc = smbus_xfer(i2c_adap, i, 0, I2C_SMBUS_READ, 0,
+-					I2C_SMBUS_BYTE_DATA, &data);
+-			if (!rc) {
+-				printk("(%02x)",i);
+-			} else
+-				printk(".");
+-		}
+-		printk("\n");
+-	}
+-
+ 	return i2c_add_adapter(i2c_adap);
  }
  
--static int __device_attach(struct device_driver * drv, void * data)
-+static int __device_attach(struct device_driver *drv, void *data)
- {
--	struct device * dev = data;
-+	struct device *dev = data;
- 	return driver_probe_device(drv, dev);
- }
+diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
+index 84df29d..c2a9f8c 100644
+--- a/drivers/i2c/busses/i2c-stub.c
++++ b/drivers/i2c/busses/i2c-stub.c
+@@ -1,8 +1,8 @@
+ /*
+-    i2c-stub.c - Part of lm_sensors, Linux kernel modules for hardware
+-              monitoring
++    i2c-stub.c - I2C/SMBus chip emulator
  
- /**
-- *	device_attach - try to attach device to a driver.
-- *	@dev:	device.
-+ * device_attach - try to attach device to a driver.
-+ * @dev: device.
-  *
-- *	Walk the list of drivers that the bus has and call
-- *	driver_probe_device() for each pair. If a compatible
-- *	pair is found, break out and return.
-+ * Walk the list of drivers that the bus has and call
-+ * driver_probe_device() for each pair. If a compatible
-+ * pair is found, break out and return.
-  *
-- *	Returns 1 if the device was bound to a driver;
-- *	0 if no matching device was found;
-- *	-ENODEV if the device is not registered.
-+ * Returns 1 if the device was bound to a driver;
-+ * 0 if no matching device was found;
-+ * -ENODEV if the device is not registered.
-  *
-- *	When called for a USB interface, @dev->parent->sem must be held.
-+ * When called for a USB interface, @dev->parent->sem must be held.
-  */
--int device_attach(struct device * dev)
-+int device_attach(struct device *dev)
- {
- 	int ret = 0;
+     Copyright (c) 2004 Mark M. Hoffman <mhoffman at lightlink.com>
++    Copyright (C) 2007 Jean Delvare <khali at linux-fr.org>
  
-@@ -240,10 +241,11 @@ int device_attach(struct device * dev)
- 	up(&dev->sem);
- 	return ret;
- }
-+EXPORT_SYMBOL_GPL(device_attach);
+     This program is free software; you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+@@ -37,8 +37,8 @@ MODULE_PARM_DESC(chip_addr,
  
--static int __driver_attach(struct device * dev, void * data)
-+static int __driver_attach(struct device *dev, void *data)
- {
--	struct device_driver * drv = data;
-+	struct device_driver *drv = data;
+ struct stub_chip {
+ 	u8 pointer;
+-	u8 bytes[256];
+-	u16 words[256];
++	u16 words[256];		/* Byte operations use the LSB as per SMBus
++				   specification */
+ };
  
- 	/*
- 	 * Lock device and try to bind to it. We drop the error
-@@ -268,35 +270,35 @@ static int __driver_attach(struct device * dev, void * data)
- }
+ static struct stub_chip *stub_chips;
+@@ -75,7 +75,7 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
+ 					"wrote 0x%02x.\n",
+ 					addr, command);
+ 		} else {
+-			data->byte = chip->bytes[chip->pointer++];
++			data->byte = chip->words[chip->pointer++] & 0xff;
+ 			dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, "
+ 					"read  0x%02x.\n",
+ 					addr, data->byte);
+@@ -86,12 +86,13 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
  
- /**
-- *	driver_attach - try to bind driver to devices.
-- *	@drv:	driver.
-+ * driver_attach - try to bind driver to devices.
-+ * @drv: driver.
-  *
-- *	Walk the list of devices that the bus has on it and try to
-- *	match the driver with each one.  If driver_probe_device()
-- *	returns 0 and the @dev->driver is set, we've found a
-- *	compatible pair.
-+ * Walk the list of devices that the bus has on it and try to
-+ * match the driver with each one.  If driver_probe_device()
-+ * returns 0 and the @dev->driver is set, we've found a
-+ * compatible pair.
-  */
--int driver_attach(struct device_driver * drv)
-+int driver_attach(struct device_driver *drv)
- {
- 	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
- }
-+EXPORT_SYMBOL_GPL(driver_attach);
+ 	case I2C_SMBUS_BYTE_DATA:
+ 		if (read_write == I2C_SMBUS_WRITE) {
+-			chip->bytes[command] = data->byte;
++			chip->words[command] &= 0xff00;
++			chip->words[command] |= data->byte;
+ 			dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
+ 					"wrote 0x%02x at 0x%02x.\n",
+ 					addr, data->byte, command);
+ 		} else {
+-			data->byte = chip->bytes[command];
++			data->byte = chip->words[command] & 0xff;
+ 			dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
+ 					"read  0x%02x at 0x%02x.\n",
+ 					addr, data->byte, command);
+diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
+index c9ce77f..77b13d0 100644
+--- a/drivers/i2c/busses/i2c-viapro.c
++++ b/drivers/i2c/busses/i2c-viapro.c
+@@ -4,7 +4,7 @@
+     Copyright (c) 1998 - 2002  Frodo Looijaard <frodol at dds.nl>,
+     Philip Edelbrock <phil at netroedge.com>, Kyösti Mälkki <kmalkki at cc.hut.fi>,
+     Mark D. Studebaker <mdsxyz123 at yahoo.com>
+-    Copyright (C) 2005 - 2007  Jean Delvare <khali at linux-fr.org>
++    Copyright (C) 2005 - 2008  Jean Delvare <khali at linux-fr.org>
  
- /*
-- *	__device_release_driver() must be called with @dev->sem held.
-- *	When called for a USB interface, @dev->parent->sem must be held as well.
-+ * __device_release_driver() must be called with @dev->sem held.
-+ * When called for a USB interface, @dev->parent->sem must be held as well.
-  */
--static void __device_release_driver(struct device * dev)
-+static void __device_release_driver(struct device *dev)
- {
--	struct device_driver * drv;
-+	struct device_driver *drv;
+     This program is free software; you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+@@ -35,6 +35,7 @@
+    VT8235             0x3177             yes
+    VT8237R            0x3227             yes
+    VT8237A            0x3337             yes
++   VT8237S            0x3372             yes
+    VT8251             0x3287             yes
+    CX700              0x8324             yes
  
--	drv = get_driver(dev->driver);
-+	drv = dev->driver;
- 	if (drv) {
- 		driver_sysfs_remove(dev);
- 		sysfs_remove_link(&dev->kobj, "driver");
--		klist_remove(&dev->knode_driver);
+@@ -318,6 +319,10 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
+ 	unsigned char temp;
+ 	int error = -ENODEV;
  
- 		if (dev->bus)
--			blocking_notifier_call_chain(&dev->bus->bus_notifier,
-+			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- 						     BUS_NOTIFY_UNBIND_DRIVER,
- 						     dev);
++	/* driver_data might come from user-space, so check it */
++	if (id->driver_data & 1 || id->driver_data > 0xff)
++		return -EINVAL;
++
+ 	/* Determine the address of the SMBus areas */
+ 	if (force_addr) {
+ 		vt596_smba = force_addr & 0xfff0;
+@@ -389,6 +394,7 @@ found:
+ 	case PCI_DEVICE_ID_VIA_8251:
+ 	case PCI_DEVICE_ID_VIA_8237:
+ 	case PCI_DEVICE_ID_VIA_8237A:
++	case PCI_DEVICE_ID_VIA_8237S:
+ 	case PCI_DEVICE_ID_VIA_8235:
+ 	case PCI_DEVICE_ID_VIA_8233A:
+ 	case PCI_DEVICE_ID_VIA_8233_0:
+@@ -440,6 +446,8 @@ static struct pci_device_id vt596_ids[] = {
+ 	  .driver_data = SMBBA3 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A),
+ 	  .driver_data = SMBBA3 },
++	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237S),
++	  .driver_data = SMBBA3 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4),
+ 	  .driver_data = SMBBA1 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8251),
+@@ -455,6 +463,7 @@ static struct pci_driver vt596_driver = {
+ 	.name		= "vt596_smbus",
+ 	.id_table	= vt596_ids,
+ 	.probe		= vt596_probe,
++	.dynids.use_driver_data = 1,
+ };
  
-@@ -306,18 +308,18 @@ static void __device_release_driver(struct device * dev)
- 			drv->remove(dev);
- 		devres_release_all(dev);
- 		dev->driver = NULL;
--		put_driver(drv);
-+		klist_remove(&dev->knode_driver);
- 	}
- }
+ static int __init i2c_vt596_init(void)
+diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
+index 2e1c24f..bd7082c 100644
+--- a/drivers/i2c/chips/Kconfig
++++ b/drivers/i2c/chips/Kconfig
+@@ -4,32 +4,6 @@
  
- /**
-- *	device_release_driver - manually detach device from driver.
-- *	@dev:	device.
-+ * device_release_driver - manually detach device from driver.
-+ * @dev: device.
-  *
-- *	Manually detach device from driver.
-- *	When called for a USB interface, @dev->parent->sem must be held.
-+ * Manually detach device from driver.
-+ * When called for a USB interface, @dev->parent->sem must be held.
-  */
--void device_release_driver(struct device * dev)
-+void device_release_driver(struct device *dev)
- {
- 	/*
- 	 * If anyone calls device_release_driver() recursively from
-@@ -328,26 +330,26 @@ void device_release_driver(struct device * dev)
- 	__device_release_driver(dev);
- 	up(&dev->sem);
- }
+ menu "Miscellaneous I2C Chip support"
+ 
+-config SENSORS_DS1337
+-	tristate "Dallas DS1337 and DS1339 Real Time Clock (DEPRECATED)"
+-	depends on EXPERIMENTAL
+-	help
+-	  If you say yes here you get support for Dallas Semiconductor
+-	  DS1337 and DS1339 real-time clock chips.
 -
-+EXPORT_SYMBOL_GPL(device_release_driver);
+-	  This driver can also be built as a module.  If so, the module
+-	  will be called ds1337.
+-
+-	  This driver is deprecated and will be dropped soon. Use
+-	  rtc-ds1307 instead.
+-
+-config SENSORS_DS1374
+-	tristate "Dallas DS1374 Real Time Clock (DEPRECATED)"
+-	depends on EXPERIMENTAL
+-	help
+-	  If you say yes here you get support for Dallas Semiconductor
+-	  DS1374 real-time clock chips.
+-
+-	  This driver can also be built as a module.  If so, the module
+-	  will be called ds1374.
+-
+-	  This driver is deprecated and will be dropped soon. Use
+-	  rtc-ds1374 instead.
+-
+ config DS1682
+ 	tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
+ 	depends on EXPERIMENTAL
+@@ -57,7 +31,7 @@ config SENSORS_PCF8574
+ 	default n
+ 	help
+ 	  If you say yes here you get support for Philips PCF8574 and 
+-	  PCF8574A chips.
++	  PCF8574A chips. These chips are 8-bit I/O expanders for the I2C bus.
  
- /**
-  * driver_detach - detach driver from all devices it controls.
-  * @drv: driver.
-  */
--void driver_detach(struct device_driver * drv)
-+void driver_detach(struct device_driver *drv)
- {
--	struct device * dev;
-+	struct device *dev;
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called pcf8574.
+@@ -65,6 +39,20 @@ config SENSORS_PCF8574
+ 	  These devices are hard to detect and rarely found on mainstream
+ 	  hardware.  If unsure, say N.
  
- 	for (;;) {
--		spin_lock(&drv->klist_devices.k_lock);
--		if (list_empty(&drv->klist_devices.k_list)) {
--			spin_unlock(&drv->klist_devices.k_lock);
-+		spin_lock(&drv->p->klist_devices.k_lock);
-+		if (list_empty(&drv->p->klist_devices.k_list)) {
-+			spin_unlock(&drv->p->klist_devices.k_lock);
- 			break;
- 		}
--		dev = list_entry(drv->klist_devices.k_list.prev,
-+		dev = list_entry(drv->p->klist_devices.k_list.prev,
- 				struct device, knode_driver.n_node);
- 		get_device(dev);
--		spin_unlock(&drv->klist_devices.k_lock);
-+		spin_unlock(&drv->p->klist_devices.k_lock);
++config PCF8575
++	tristate "Philips PCF8575"
++	default n
++	help
++	  If you say yes here you get support for Philips PCF8575 chip.
++	  This chip is a 16-bit I/O expander for the I2C bus.  Several other
++	  chip manufacturers sell equivalent chips, e.g. Texas Instruments.
++
++	  This driver can also be built as a module.  If so, the module
++	  will be called pcf8575.
++
++	  This device is hard to detect and is rarely found on mainstream
++	  hardware.  If unsure, say N.
++
+ config SENSORS_PCA9539
+ 	tristate "Philips PCA9539 16-bit I/O port"
+ 	depends on EXPERIMENTAL
+@@ -100,12 +88,8 @@ config ISP1301_OMAP
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called isp1301_omap.
  
- 		if (dev->parent)	/* Needed for USB */
- 			down(&dev->parent->sem);
-@@ -360,9 +362,3 @@ void driver_detach(struct device_driver * drv)
- 		put_device(dev);
- 	}
- }
+-# NOTE:  This isn't really OMAP-specific, except for the current
+-# interface location in  <include/asm-arm/arch-omap/tps65010.h>
+-# and having mostly OMAP-specific board support
+ config TPS65010
+ 	tristate "TPS6501x Power Management chips"
+-	depends on ARCH_OMAP
+ 	default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK
+ 	help
+ 	  If you say yes here you get support for the TPS6501x series of
+@@ -116,18 +100,6 @@ config TPS65010
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called tps65010.
+ 
+-config SENSORS_M41T00
+-	tristate "ST M41T00 RTC chip (DEPRECATED)"
+-	depends on PPC32
+-	help
+-	  If you say yes here you get support for the ST M41T00 RTC chip.
 -
--EXPORT_SYMBOL_GPL(device_bind_driver);
--EXPORT_SYMBOL_GPL(device_release_driver);
--EXPORT_SYMBOL_GPL(device_attach);
--EXPORT_SYMBOL_GPL(driver_attach);
+-	  This driver can also be built as a module.  If so, the module
+-	  will be called m41t00.
 -
-diff --git a/drivers/base/driver.c b/drivers/base/driver.c
-index eb11475..a35f041 100644
---- a/drivers/base/driver.c
-+++ b/drivers/base/driver.c
-@@ -3,6 +3,8 @@
-  *
-  * Copyright (c) 2002-3 Patrick Mochel
-  * Copyright (c) 2002-3 Open Source Development Labs
-+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh at suse.de>
-+ * Copyright (c) 2007 Novell Inc.
-  *
-  * This file is released under the GPLv2
-  *
-@@ -15,46 +17,42 @@
- #include "base.h"
+-	  This driver is deprecated and will be dropped soon. Use
+-	  rtc-ds1307 or rtc-m41t80 instead.
+-
+ config SENSORS_MAX6875
+ 	tristate "Maxim MAX6875 Power supply supervisor"
+ 	depends on EXPERIMENTAL
+diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
+index ca924e1..501f00c 100644
+--- a/drivers/i2c/chips/Makefile
++++ b/drivers/i2c/chips/Makefile
+@@ -2,14 +2,12 @@
+ # Makefile for miscellaneous I2C chip drivers.
+ #
  
- #define to_dev(node) container_of(node, struct device, driver_list)
--#define to_drv(obj) container_of(obj, struct device_driver, kobj)
+-obj-$(CONFIG_SENSORS_DS1337)	+= ds1337.o
+-obj-$(CONFIG_SENSORS_DS1374)	+= ds1374.o
+ obj-$(CONFIG_DS1682)		+= ds1682.o
+ obj-$(CONFIG_SENSORS_EEPROM)	+= eeprom.o
+ obj-$(CONFIG_SENSORS_MAX6875)	+= max6875.o
+-obj-$(CONFIG_SENSORS_M41T00)	+= m41t00.o
+ obj-$(CONFIG_SENSORS_PCA9539)	+= pca9539.o
+ obj-$(CONFIG_SENSORS_PCF8574)	+= pcf8574.o
++obj-$(CONFIG_PCF8575)		+= pcf8575.o
+ obj-$(CONFIG_SENSORS_PCF8591)	+= pcf8591.o
+ obj-$(CONFIG_ISP1301_OMAP)	+= isp1301_omap.o
+ obj-$(CONFIG_TPS65010)		+= tps65010.o
+diff --git a/drivers/i2c/chips/ds1337.c b/drivers/i2c/chips/ds1337.c
+deleted file mode 100644
+index ec17d6b..0000000
+--- a/drivers/i2c/chips/ds1337.c
++++ /dev/null
+@@ -1,410 +0,0 @@
+-/*
+- *  linux/drivers/i2c/chips/ds1337.c
+- *
+- *  Copyright (C) 2005 James Chapman <jchapman at katalix.com>
+- *
+- *	based on linux/drivers/acorn/char/pcf8583.c
+- *  Copyright (C) 2000 Russell King
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * Driver for Dallas Semiconductor DS1337 and DS1339 real time clock chip
+- */
+-
+-#include <linux/module.h>
+-#include <linux/init.h>
+-#include <linux/slab.h>
+-#include <linux/i2c.h>
+-#include <linux/string.h>
+-#include <linux/rtc.h>		/* get the user-level API */
+-#include <linux/bcd.h>
+-#include <linux/list.h>
+-
+-/* Device registers */
+-#define DS1337_REG_HOUR		2
+-#define DS1337_REG_DAY		3
+-#define DS1337_REG_DATE		4
+-#define DS1337_REG_MONTH	5
+-#define DS1337_REG_CONTROL	14
+-#define DS1337_REG_STATUS	15
+-
+-/* FIXME - how do we export these interface constants? */
+-#define DS1337_GET_DATE		0
+-#define DS1337_SET_DATE		1
+-
+-/*
+- * Functions declaration
+- */
+-static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
+-
+-I2C_CLIENT_INSMOD_1(ds1337);
+-
+-static int ds1337_attach_adapter(struct i2c_adapter *adapter);
+-static int ds1337_detect(struct i2c_adapter *adapter, int address, int kind);
+-static void ds1337_init_client(struct i2c_client *client);
+-static int ds1337_detach_client(struct i2c_client *client);
+-static int ds1337_command(struct i2c_client *client, unsigned int cmd,
+-			  void *arg);
+-
+-/*
+- * Driver data (common to all clients)
+- */
+-static struct i2c_driver ds1337_driver = {
+-	.driver = {
+-		.name	= "ds1337",
+-	},
+-	.attach_adapter	= ds1337_attach_adapter,
+-	.detach_client	= ds1337_detach_client,
+-	.command	= ds1337_command,
+-};
+-
+-/*
+- * Client data (each client gets its own)
+- */
+-struct ds1337_data {
+-	struct i2c_client client;
+-	struct list_head list;
+-};
+-
+-/*
+- * Internal variables
+- */
+-static LIST_HEAD(ds1337_clients);
+-
+-static inline int ds1337_read(struct i2c_client *client, u8 reg, u8 *value)
+-{
+-	s32 tmp = i2c_smbus_read_byte_data(client, reg);
+-
+-	if (tmp < 0)
+-		return -EIO;
+-
+-	*value = tmp;
+-
+-	return 0;
+-}
+-
+-/*
+- * Chip access functions
+- */
+-static int ds1337_get_datetime(struct i2c_client *client, struct rtc_time *dt)
+-{
+-	int result;
+-	u8 buf[7];
+-	u8 val;
+-	struct i2c_msg msg[2];
+-	u8 offs = 0;
+-
+-	if (!dt) {
+-		dev_dbg(&client->dev, "%s: EINVAL: dt=NULL\n", __FUNCTION__);
+-		return -EINVAL;
+-	}
+-
+-	msg[0].addr = client->addr;
+-	msg[0].flags = 0;
+-	msg[0].len = 1;
+-	msg[0].buf = &offs;
+-
+-	msg[1].addr = client->addr;
+-	msg[1].flags = I2C_M_RD;
+-	msg[1].len = sizeof(buf);
+-	msg[1].buf = &buf[0];
+-
+-	result = i2c_transfer(client->adapter, msg, 2);
+-
+-	dev_dbg(&client->dev, "%s: [%d] %02x %02x %02x %02x %02x %02x %02x\n",
+-		__FUNCTION__, result, buf[0], buf[1], buf[2], buf[3],
+-		buf[4], buf[5], buf[6]);
+-
+-	if (result == 2) {
+-		dt->tm_sec = BCD2BIN(buf[0]);
+-		dt->tm_min = BCD2BIN(buf[1]);
+-		val = buf[2] & 0x3f;
+-		dt->tm_hour = BCD2BIN(val);
+-		dt->tm_wday = BCD2BIN(buf[3]) - 1;
+-		dt->tm_mday = BCD2BIN(buf[4]);
+-		val = buf[5] & 0x7f;
+-		dt->tm_mon = BCD2BIN(val) - 1;
+-		dt->tm_year = BCD2BIN(buf[6]);
+-		if (buf[5] & 0x80)
+-			dt->tm_year += 100;
+-
+-		dev_dbg(&client->dev, "%s: secs=%d, mins=%d, "
+-			"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
+-			__FUNCTION__, dt->tm_sec, dt->tm_min,
+-			dt->tm_hour, dt->tm_mday,
+-			dt->tm_mon, dt->tm_year, dt->tm_wday);
+-
+-		return 0;
+-	}
+-
+-	dev_err(&client->dev, "error reading data! %d\n", result);
+-	return -EIO;
+-}
+-
+-static int ds1337_set_datetime(struct i2c_client *client, struct rtc_time *dt)
+-{
+-	int result;
+-	u8 buf[8];
+-	u8 val;
+-	struct i2c_msg msg[1];
+-
+-	if (!dt) {
+-		dev_dbg(&client->dev, "%s: EINVAL: dt=NULL\n", __FUNCTION__);
+-		return -EINVAL;
+-	}
+-
+-	dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
+-		"mday=%d, mon=%d, year=%d, wday=%d\n", __FUNCTION__,
+-		dt->tm_sec, dt->tm_min, dt->tm_hour,
+-		dt->tm_mday, dt->tm_mon, dt->tm_year, dt->tm_wday);
+-
+-	buf[0] = 0;		/* reg offset */
+-	buf[1] = BIN2BCD(dt->tm_sec);
+-	buf[2] = BIN2BCD(dt->tm_min);
+-	buf[3] = BIN2BCD(dt->tm_hour);
+-	buf[4] = BIN2BCD(dt->tm_wday + 1);
+-	buf[5] = BIN2BCD(dt->tm_mday);
+-	buf[6] = BIN2BCD(dt->tm_mon + 1);
+-	val = dt->tm_year;
+-	if (val >= 100) {
+-		val -= 100;
+-		buf[6] |= (1 << 7);
+-	}
+-	buf[7] = BIN2BCD(val);
+-
+-	msg[0].addr = client->addr;
+-	msg[0].flags = 0;
+-	msg[0].len = sizeof(buf);
+-	msg[0].buf = &buf[0];
+-
+-	result = i2c_transfer(client->adapter, msg, 1);
+-	if (result == 1)
+-		return 0;
+-
+-	dev_err(&client->dev, "error writing data! %d\n", result);
+-	return -EIO;
+-}
+-
+-static int ds1337_command(struct i2c_client *client, unsigned int cmd,
+-			  void *arg)
+-{
+-	dev_dbg(&client->dev, "%s: cmd=%d\n", __FUNCTION__, cmd);
+-
+-	switch (cmd) {
+-	case DS1337_GET_DATE:
+-		return ds1337_get_datetime(client, arg);
+-
+-	case DS1337_SET_DATE:
+-		return ds1337_set_datetime(client, arg);
+-
+-	default:
+-		return -EINVAL;
+-	}
+-}
+-
+-/*
+- * Public API for access to specific device. Useful for low-level
+- * RTC access from kernel code.
+- */
+-int ds1337_do_command(int bus, int cmd, void *arg)
+-{
+-	struct list_head *walk;
+-	struct list_head *tmp;
+-	struct ds1337_data *data;
+-
+-	list_for_each_safe(walk, tmp, &ds1337_clients) {
+-		data = list_entry(walk, struct ds1337_data, list);
+-		if (data->client.adapter->nr == bus)
+-			return ds1337_command(&data->client, cmd, arg);
+-	}
+-
+-	return -ENODEV;
+-}
+-
+-static int ds1337_attach_adapter(struct i2c_adapter *adapter)
+-{
+-	return i2c_probe(adapter, &addr_data, ds1337_detect);
+-}
+-
+-/*
+- * The following function does more than just detection. If detection
+- * succeeds, it also registers the new chip.
+- */
+-static int ds1337_detect(struct i2c_adapter *adapter, int address, int kind)
+-{
+-	struct i2c_client *new_client;
+-	struct ds1337_data *data;
+-	int err = 0;
+-	const char *name = "";
+-
+-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+-				     I2C_FUNC_I2C))
+-		goto exit;
+-
+-	if (!(data = kzalloc(sizeof(struct ds1337_data), GFP_KERNEL))) {
+-		err = -ENOMEM;
+-		goto exit;
+-	}
+-	INIT_LIST_HEAD(&data->list);
+-
+-	/* The common I2C client data is placed right before the
+-	 * DS1337-specific data. 
+-	 */
+-	new_client = &data->client;
+-	i2c_set_clientdata(new_client, data);
+-	new_client->addr = address;
+-	new_client->adapter = adapter;
+-	new_client->driver = &ds1337_driver;
+-	new_client->flags = 0;
+-
+-	/*
+-	 * Now we do the remaining detection. A negative kind means that
+-	 * the driver was loaded with no force parameter (default), so we
+-	 * must both detect and identify the chip. A zero kind means that
+-	 * the driver was loaded with the force parameter, the detection
+-	 * step shall be skipped. A positive kind means that the driver
+-	 * was loaded with the force parameter and a given kind of chip is
+-	 * requested, so both the detection and the identification steps
+-	 * are skipped.
+-	 *
+-	 * For detection, we read registers that are most likely to cause
+-	 * detection failure, i.e. those that have more bits with fixed
+-	 * or reserved values.
+-	 */
+-
+-	/* Default to an DS1337 if forced */
+-	if (kind == 0)
+-		kind = ds1337;
+-
+-	if (kind < 0) {		/* detection and identification */
+-		u8 data;
+-
+-		/* Check that status register bits 6-2 are zero */
+-		if ((ds1337_read(new_client, DS1337_REG_STATUS, &data) < 0) ||
+-		    (data & 0x7c))
+-			goto exit_free;
+-
+-		/* Check for a valid day register value */
+-		if ((ds1337_read(new_client, DS1337_REG_DAY, &data) < 0) ||
+-		    (data == 0) || (data & 0xf8))
+-			goto exit_free;
+-
+-		/* Check for a valid date register value */
+-		if ((ds1337_read(new_client, DS1337_REG_DATE, &data) < 0) ||
+-		    (data == 0) || (data & 0xc0) || ((data & 0x0f) > 9) ||
+-		    (data >= 0x32))
+-			goto exit_free;
+-
+-		/* Check for a valid month register value */
+-		if ((ds1337_read(new_client, DS1337_REG_MONTH, &data) < 0) ||
+-		    (data == 0) || (data & 0x60) || ((data & 0x0f) > 9) ||
+-		    ((data >= 0x13) && (data <= 0x19)))
+-			goto exit_free;
+-
+-		/* Check that control register bits 6-5 are zero */
+-		if ((ds1337_read(new_client, DS1337_REG_CONTROL, &data) < 0) ||
+-		    (data & 0x60))
+-			goto exit_free;
+-
+-		kind = ds1337;
+-	}
+-
+-	if (kind == ds1337)
+-		name = "ds1337";
+-
+-	/* We can fill in the remaining client fields */
+-	strlcpy(new_client->name, name, I2C_NAME_SIZE);
+-
+-	/* Tell the I2C layer a new client has arrived */
+-	if ((err = i2c_attach_client(new_client)))
+-		goto exit_free;
+-
+-	/* Initialize the DS1337 chip */
+-	ds1337_init_client(new_client);
+-
+-	/* Add client to local list */
+-	list_add(&data->list, &ds1337_clients);
+-
+-	return 0;
+-
+-exit_free:
+-	kfree(data);
+-exit:
+-	return err;
+-}
+-
+-static void ds1337_init_client(struct i2c_client *client)
+-{
+-	u8 status, control;
+-
+-	/* On some boards, the RTC isn't configured by boot firmware.
+-	 * Handle that case by starting/configuring the RTC now.
+-	 */
+-	status = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
+-	control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+-
+-	if ((status & 0x80) || (control & 0x80)) {
+-		/* RTC not running */
+-		u8 buf[1+16];	/* First byte is interpreted as address */
+-		struct i2c_msg msg[1];
+-
+-		dev_dbg(&client->dev, "%s: RTC not running!\n", __FUNCTION__);
+-
+-		/* Initialize all, including STATUS and CONTROL to zero */
+-		memset(buf, 0, sizeof(buf));
+-
+-		/* Write valid values in the date/time registers */
+-		buf[1+DS1337_REG_DAY] = 1;
+-		buf[1+DS1337_REG_DATE] = 1;
+-		buf[1+DS1337_REG_MONTH] = 1;
+-
+-		msg[0].addr = client->addr;
+-		msg[0].flags = 0;
+-		msg[0].len = sizeof(buf);
+-		msg[0].buf = &buf[0];
+-
+-		i2c_transfer(client->adapter, msg, 1);
+-	} else {
+-		/* Running: ensure that device is set in 24-hour mode */
+-		s32 val;
+-
+-		val = i2c_smbus_read_byte_data(client, DS1337_REG_HOUR);
+-		if ((val >= 0) && (val & (1 << 6)))
+-			i2c_smbus_write_byte_data(client, DS1337_REG_HOUR,
+-						  val & 0x3f);
+-	}
+-}
+-
+-static int ds1337_detach_client(struct i2c_client *client)
+-{
+-	int err;
+-	struct ds1337_data *data = i2c_get_clientdata(client);
+-
+-	if ((err = i2c_detach_client(client)))
+-		return err;
+-
+-	list_del(&data->list);
+-	kfree(data);
+-	return 0;
+-}
+-
+-static int __init ds1337_init(void)
+-{
+-	return i2c_add_driver(&ds1337_driver);
+-}
+-
+-static void __exit ds1337_exit(void)
+-{
+-	i2c_del_driver(&ds1337_driver);
+-}
+-
+-MODULE_AUTHOR("James Chapman <jchapman at katalix.com>");
+-MODULE_DESCRIPTION("DS1337 RTC driver");
+-MODULE_LICENSE("GPL");
+-
+-EXPORT_SYMBOL_GPL(ds1337_do_command);
+-
+-module_init(ds1337_init);
+-module_exit(ds1337_exit);
+diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
+deleted file mode 100644
+index 8a2ff0c..0000000
+--- a/drivers/i2c/chips/ds1374.c
++++ /dev/null
+@@ -1,267 +0,0 @@
+-/*
+- * drivers/i2c/chips/ds1374.c
+- *
+- * I2C client/driver for the Maxim/Dallas DS1374 Real-Time Clock
+- *
+- * Author: Randy Vinson <rvinson at mvista.com>
+- *
+- * Based on the m41t00.c by Mark Greer <mgreer at mvista.com>
+- *
+- * 2005 (c) MontaVista Software, Inc. This file is licensed under
+- * the terms of the GNU General Public License version 2. This program
+- * is licensed "as is" without any warranty of any kind, whether express
+- * or implied.
+- */
+-/*
+- * This i2c client/driver wedges between the drivers/char/genrtc.c RTC
+- * interface and the SMBus interface of the i2c subsystem.
+- * It would be more efficient to use i2c msgs/i2c_transfer directly but, as
+- * recommened in .../Documentation/i2c/writing-clients section
+- * "Sending and receiving", using SMBus level communication is preferred.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/interrupt.h>
+-#include <linux/i2c.h>
+-#include <linux/rtc.h>
+-#include <linux/bcd.h>
+-#include <linux/mutex.h>
+-#include <linux/workqueue.h>
+-
+-#define DS1374_REG_TOD0		0x00
+-#define DS1374_REG_TOD1		0x01
+-#define DS1374_REG_TOD2		0x02
+-#define DS1374_REG_TOD3		0x03
+-#define DS1374_REG_WDALM0	0x04
+-#define DS1374_REG_WDALM1	0x05
+-#define DS1374_REG_WDALM2	0x06
+-#define DS1374_REG_CR		0x07
+-#define DS1374_REG_SR		0x08
+-#define DS1374_REG_SR_OSF	0x80
+-#define DS1374_REG_TCR		0x09
+-
+-#define	DS1374_DRV_NAME		"ds1374"
+-
+-static DEFINE_MUTEX(ds1374_mutex);
+-
+-static struct i2c_driver ds1374_driver;
+-static struct i2c_client *save_client;
+-
+-static unsigned short ignore[] = { I2C_CLIENT_END };
+-static unsigned short normal_addr[] = { 0x68, I2C_CLIENT_END };
+-
+-static struct i2c_client_address_data addr_data = {
+-	.normal_i2c = normal_addr,
+-	.probe = ignore,
+-	.ignore = ignore,
+-};
+-
+-static ulong ds1374_read_rtc(void)
+-{
+-	ulong time = 0;
+-	int reg = DS1374_REG_WDALM0;
+-
+-	while (reg--) {
+-		s32 tmp;
+-		if ((tmp = i2c_smbus_read_byte_data(save_client, reg)) < 0) {
+-			dev_warn(&save_client->dev,
+-				 "can't read from rtc chip\n");
+-			return 0;
+-		}
+-		time = (time << 8) | (tmp & 0xff);
+-	}
+-	return time;
+-}
+-
+-static void ds1374_write_rtc(ulong time)
+-{
+-	int reg;
+-
+-	for (reg = DS1374_REG_TOD0; reg < DS1374_REG_WDALM0; reg++) {
+-		if (i2c_smbus_write_byte_data(save_client, reg, time & 0xff)
+-		    < 0) {
+-			dev_warn(&save_client->dev,
+-				 "can't write to rtc chip\n");
+-			break;
+-		}
+-		time = time >> 8;
+-	}
+-}
+-
+-static void ds1374_check_rtc_status(void)
+-{
+-	s32 tmp;
+-
+-	tmp = i2c_smbus_read_byte_data(save_client, DS1374_REG_SR);
+-	if (tmp < 0) {
+-		dev_warn(&save_client->dev,
+-			 "can't read status from rtc chip\n");
+-		return;
+-	}
+-	if (tmp & DS1374_REG_SR_OSF) {
+-		dev_warn(&save_client->dev,
+-			 "oscillator discontinuity flagged, time unreliable\n");
+-		tmp &= ~DS1374_REG_SR_OSF;
+-		tmp = i2c_smbus_write_byte_data(save_client, DS1374_REG_SR,
+-						tmp & 0xff);
+-		if (tmp < 0)
+-			dev_warn(&save_client->dev,
+-				 "can't clear discontinuity notification\n");
+-	}
+-}
+-
+-ulong ds1374_get_rtc_time(void)
+-{
+-	ulong t1, t2;
+-	int limit = 10;		/* arbitrary retry limit */
+-
+-	mutex_lock(&ds1374_mutex);
+-
+-	/*
+-	 * Since the reads are being performed one byte at a time using
+-	 * the SMBus vs a 4-byte i2c transfer, there is a chance that a
+-	 * carry will occur during the read. To detect this, 2 reads are
+-	 * performed and compared.
+-	 */
+-	do {
+-		t1 = ds1374_read_rtc();
+-		t2 = ds1374_read_rtc();
+-	} while (t1 != t2 && limit--);
+-
+-	mutex_unlock(&ds1374_mutex);
+-
+-	if (t1 != t2) {
+-		dev_warn(&save_client->dev,
+-			 "can't get consistent time from rtc chip\n");
+-		t1 = 0;
+-	}
+-
+-	return t1;
+-}
+-
+-static ulong new_time;
+-
+-static void ds1374_set_work(struct work_struct *work)
+-{
+-	ulong t1, t2;
+-	int limit = 10;		/* arbitrary retry limit */
+-
+-	t1 = new_time;
+-
+-	mutex_lock(&ds1374_mutex);
+-
+-	/*
+-	 * Since the writes are being performed one byte at a time using
+-	 * the SMBus vs a 4-byte i2c transfer, there is a chance that a
+-	 * carry will occur during the write. To detect this, the write
+-	 * value is read back and compared.
+-	 */
+-	do {
+-		ds1374_write_rtc(t1);
+-		t2 = ds1374_read_rtc();
+-	} while (t1 != t2 && limit--);
+-
+-	mutex_unlock(&ds1374_mutex);
+-
+-	if (t1 != t2)
+-		dev_warn(&save_client->dev,
+-			 "can't confirm time set from rtc chip\n");
+-}
+-
+-static struct workqueue_struct *ds1374_workqueue;
+-
+-static DECLARE_WORK(ds1374_work, ds1374_set_work);
+-
+-int ds1374_set_rtc_time(ulong nowtime)
+-{
+-	new_time = nowtime;
+-
+-	if (in_interrupt())
+-		queue_work(ds1374_workqueue, &ds1374_work);
+-	else
+-		ds1374_set_work(NULL);
+-
+-	return 0;
+-}
+-
+-/*
+- *****************************************************************************
+- *
+- *	Driver Interface
+- *
+- *****************************************************************************
+- */
+-static int ds1374_probe(struct i2c_adapter *adap, int addr, int kind)
+-{
+-	struct i2c_client *client;
+-	int rc;
+-
+-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
+-	if (!client)
+-		return -ENOMEM;
+-
+-	strncpy(client->name, DS1374_DRV_NAME, I2C_NAME_SIZE);
+-	client->addr = addr;
+-	client->adapter = adap;
+-	client->driver = &ds1374_driver;
+-
+-	ds1374_workqueue = create_singlethread_workqueue("ds1374");
+-	if (!ds1374_workqueue) {
+-		kfree(client);
+-		return -ENOMEM;	/* most expected reason */
+-	}
+-
+-	if ((rc = i2c_attach_client(client)) != 0) {
+-		kfree(client);
+-		return rc;
+-	}
+-
+-	save_client = client;
+-
+-	ds1374_check_rtc_status();
+-
+-	return 0;
+-}
+-
+-static int ds1374_attach(struct i2c_adapter *adap)
+-{
+-	return i2c_probe(adap, &addr_data, ds1374_probe);
+-}
+-
+-static int ds1374_detach(struct i2c_client *client)
+-{
+-	int rc;
+-
+-	if ((rc = i2c_detach_client(client)) == 0) {
+-		kfree(i2c_get_clientdata(client));
+-		destroy_workqueue(ds1374_workqueue);
+-	}
+-	return rc;
+-}
+-
+-static struct i2c_driver ds1374_driver = {
+-	.driver = {
+-		.name	= DS1374_DRV_NAME,
+-	},
+-	.id = I2C_DRIVERID_DS1374,
+-	.attach_adapter = ds1374_attach,
+-	.detach_client = ds1374_detach,
+-};
+-
+-static int __init ds1374_init(void)
+-{
+-	return i2c_add_driver(&ds1374_driver);
+-}
+-
+-static void __exit ds1374_exit(void)
+-{
+-	i2c_del_driver(&ds1374_driver);
+-}
+-
+-module_init(ds1374_init);
+-module_exit(ds1374_exit);
+-
+-MODULE_AUTHOR("Randy Vinson <rvinson at mvista.com>");
+-MODULE_DESCRIPTION("Maxim/Dallas DS1374 RTC I2C Client Driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
+index 1a7eeeb..fde297b 100644
+--- a/drivers/i2c/chips/eeprom.c
++++ b/drivers/i2c/chips/eeprom.c
+@@ -35,7 +35,7 @@
+ #include <linux/mutex.h>
  
+ /* Addresses to scan */
+-static unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
++static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
+ 					0x55, 0x56, 0x57, I2C_CLIENT_END };
  
--static struct device * next_device(struct klist_iter * i)
-+static struct device *next_device(struct klist_iter *i)
- {
--	struct klist_node * n = klist_next(i);
-+	struct klist_node *n = klist_next(i);
- 	return n ? container_of(n, struct device, knode_driver) : NULL;
- }
+ /* Insmod parameters */
+diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
+index b767603..2a31601 100644
+--- a/drivers/i2c/chips/isp1301_omap.c
++++ b/drivers/i2c/chips/isp1301_omap.c
+@@ -100,7 +100,7 @@ struct isp1301 {
  
- /**
-- *	driver_for_each_device - Iterator for devices bound to a driver.
-- *	@drv:	Driver we're iterating.
-- *	@start: Device to begin with
-- *	@data:	Data to pass to the callback.
-- *	@fn:	Function to call for each device.
-+ * driver_for_each_device - Iterator for devices bound to a driver.
-+ * @drv: Driver we're iterating.
-+ * @start: Device to begin with
-+ * @data: Data to pass to the callback.
-+ * @fn: Function to call for each device.
-  *
-- *	Iterate over the @drv's list of devices calling @fn for each one.
-+ * Iterate over the @drv's list of devices calling @fn for each one.
-  */
--
--int driver_for_each_device(struct device_driver * drv, struct device * start, 
--			   void * data, int (*fn)(struct device *, void *))
-+int driver_for_each_device(struct device_driver *drv, struct device *start,
-+			   void *data, int (*fn)(struct device *, void *))
- {
- 	struct klist_iter i;
--	struct device * dev;
-+	struct device *dev;
- 	int error = 0;
+ #if	defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE)
  
- 	if (!drv)
- 		return -EINVAL;
+-#include <asm/arch/tps65010.h>
++#include <linux/i2c/tps65010.h>
  
--	klist_iter_init_node(&drv->klist_devices, &i,
-+	klist_iter_init_node(&drv->p->klist_devices, &i,
- 			     start ? &start->knode_driver : NULL);
- 	while ((dev = next_device(&i)) && !error)
- 		error = fn(dev, data);
- 	klist_iter_exit(&i);
- 	return error;
+ #else
+ 
+@@ -259,12 +259,6 @@ static inline const char *state_name(struct isp1301 *isp)
+ 	return state_string(isp->otg.state);
  }
--
- EXPORT_SYMBOL_GPL(driver_for_each_device);
  
+-#ifdef	VERBOSE
+-#define	dev_vdbg			dev_dbg
+-#else
+-#define	dev_vdbg(dev, fmt, arg...)	do{}while(0)
+-#endif
 -
- /**
-  * driver_find_device - device iterator for locating a particular device.
-  * @drv: The device's driver
-@@ -70,9 +68,9 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
-  * if it does.  If the callback returns non-zero, this function will
-  * return to the caller and not iterate over any more devices.
-  */
--struct device * driver_find_device(struct device_driver *drv,
--				   struct device * start, void * data,
--				   int (*match)(struct device *, void *))
-+struct device *driver_find_device(struct device_driver *drv,
-+				  struct device *start, void *data,
-+				  int (*match)(struct device *dev, void *data))
- {
- 	struct klist_iter i;
- 	struct device *dev;
-@@ -80,7 +78,7 @@ struct device * driver_find_device(struct device_driver *drv,
- 	if (!drv)
- 		return NULL;
- 
--	klist_iter_init_node(&drv->klist_devices, &i,
-+	klist_iter_init_node(&drv->p->klist_devices, &i,
- 			     (start ? &start->knode_driver : NULL));
- 	while ((dev = next_device(&i)))
- 		if (match(dev, data) && get_device(dev))
-@@ -91,111 +89,179 @@ struct device * driver_find_device(struct device_driver *drv,
- EXPORT_SYMBOL_GPL(driver_find_device);
+ /*-------------------------------------------------------------------------*/
  
- /**
-- *	driver_create_file - create sysfs file for driver.
-- *	@drv:	driver.
-- *	@attr:	driver attribute descriptor.
-+ * driver_create_file - create sysfs file for driver.
-+ * @drv: driver.
-+ * @attr: driver attribute descriptor.
-  */
+ /* NOTE:  some of this ISP1301 setup is specific to H2 boards;
+diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c
+deleted file mode 100644
+index 3fcb646..0000000
+--- a/drivers/i2c/chips/m41t00.c
++++ /dev/null
+@@ -1,413 +0,0 @@
+-/*
+- * I2C client/driver for the ST M41T00 family of i2c rtc chips.
+- *
+- * Author: Mark A. Greer <mgreer at mvista.com>
+- *
+- * 2005, 2006 (c) MontaVista Software, Inc. This file is licensed under
+- * the terms of the GNU General Public License version 2. This program
+- * is licensed "as is" without any warranty of any kind, whether express
+- * or implied.
+- */
+-/*
+- * This i2c client/driver wedges between the drivers/char/genrtc.c RTC
+- * interface and the SMBus interface of the i2c subsystem.
+- */
 -
--int driver_create_file(struct device_driver * drv, struct driver_attribute * attr)
-+int driver_create_file(struct device_driver *drv,
-+		       struct driver_attribute *attr)
- {
- 	int error;
- 	if (get_driver(drv)) {
--		error = sysfs_create_file(&drv->kobj, &attr->attr);
-+		error = sysfs_create_file(&drv->p->kobj, &attr->attr);
- 		put_driver(drv);
- 	} else
- 		error = -EINVAL;
- 	return error;
- }
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/interrupt.h>
+-#include <linux/i2c.h>
+-#include <linux/rtc.h>
+-#include <linux/bcd.h>
+-#include <linux/workqueue.h>
+-#include <linux/platform_device.h>
+-#include <linux/m41t00.h>
+-#include <asm/time.h>
+-#include <asm/rtc.h>
+-
+-static struct i2c_driver m41t00_driver;
+-static struct i2c_client *save_client;
+-
+-static unsigned short ignore[] = { I2C_CLIENT_END };
+-static unsigned short normal_addr[] = { I2C_CLIENT_END, I2C_CLIENT_END };
+-
+-static struct i2c_client_address_data addr_data = {
+-	.normal_i2c	= normal_addr,
+-	.probe		= ignore,
+-	.ignore		= ignore,
+-};
+-
+-struct m41t00_chip_info {
+-	u8	type;
+-	char	*name;
+-	u8	read_limit;
+-	u8	sec;		/* Offsets for chip regs */
+-	u8	min;
+-	u8	hour;
+-	u8	day;
+-	u8	mon;
+-	u8	year;
+-	u8	alarm_mon;
+-	u8	alarm_hour;
+-	u8	sqw;
+-	u8	sqw_freq;
+-};
+-
+-static struct m41t00_chip_info m41t00_chip_info_tbl[] = {
+-	{
+-		.type		= M41T00_TYPE_M41T00,
+-		.name		= "m41t00",
+-		.read_limit	= 5,
+-		.sec		= 0,
+-		.min		= 1,
+-		.hour		= 2,
+-		.day		= 4,
+-		.mon		= 5,
+-		.year		= 6,
+-	},
+-	{
+-		.type		= M41T00_TYPE_M41T81,
+-		.name		= "m41t81",
+-		.read_limit	= 1,
+-		.sec		= 1,
+-		.min		= 2,
+-		.hour		= 3,
+-		.day		= 5,
+-		.mon		= 6,
+-		.year		= 7,
+-		.alarm_mon	= 0xa,
+-		.alarm_hour	= 0xc,
+-		.sqw		= 0x13,
+-	},
+-	{
+-		.type		= M41T00_TYPE_M41T85,
+-		.name		= "m41t85",
+-		.read_limit	= 1,
+-		.sec		= 1,
+-		.min		= 2,
+-		.hour		= 3,
+-		.day		= 5,
+-		.mon		= 6,
+-		.year		= 7,
+-		.alarm_mon	= 0xa,
+-		.alarm_hour	= 0xc,
+-		.sqw		= 0x13,
+-	},
+-};
+-static struct m41t00_chip_info *m41t00_chip;
+-
+-ulong
+-m41t00_get_rtc_time(void)
+-{
+-	s32 sec, min, hour, day, mon, year;
+-	s32 sec1, min1, hour1, day1, mon1, year1;
+-	u8 reads = 0;
+-	u8 buf[8], msgbuf[1] = { 0 }; /* offset into rtc's regs */
+-	struct i2c_msg msgs[] = {
+-		{
+-			.addr	= save_client->addr,
+-			.flags	= 0,
+-			.len	= 1,
+-			.buf	= msgbuf,
+-		},
+-		{
+-			.addr	= save_client->addr,
+-			.flags	= I2C_M_RD,
+-			.len	= 8,
+-			.buf	= buf,
+-		},
+-	};
+-
+-	sec = min = hour = day = mon = year = 0;
+-
+-	do {
+-		if (i2c_transfer(save_client->adapter, msgs, 2) < 0)
+-			goto read_err;
+-
+-		sec1 = sec;
+-		min1 = min;
+-		hour1 = hour;
+-		day1 = day;
+-		mon1 = mon;
+-		year1 = year;
+-
+-		sec = buf[m41t00_chip->sec] & 0x7f;
+-		min = buf[m41t00_chip->min] & 0x7f;
+-		hour = buf[m41t00_chip->hour] & 0x3f;
+-		day = buf[m41t00_chip->day] & 0x3f;
+-		mon = buf[m41t00_chip->mon] & 0x1f;
+-		year = buf[m41t00_chip->year];
+-	} while ((++reads < m41t00_chip->read_limit) && ((sec != sec1)
+-			|| (min != min1) || (hour != hour1) || (day != day1)
+-			|| (mon != mon1) || (year != year1)));
+-
+-	if ((m41t00_chip->read_limit > 1) && ((sec != sec1) || (min != min1)
+-			|| (hour != hour1) || (day != day1) || (mon != mon1)
+-			|| (year != year1)))
+-		goto read_err;
+-
+-	sec = BCD2BIN(sec);
+-	min = BCD2BIN(min);
+-	hour = BCD2BIN(hour);
+-	day = BCD2BIN(day);
+-	mon = BCD2BIN(mon);
+-	year = BCD2BIN(year);
+-
+-	year += 1900;
+-	if (year < 1970)
+-		year += 100;
+-
+-	return mktime(year, mon, day, hour, min, sec);
+-
+-read_err:
+-	dev_err(&save_client->dev, "m41t00_get_rtc_time: Read error\n");
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(m41t00_get_rtc_time);
+-
+-static void
+-m41t00_set(void *arg)
+-{
+-	struct rtc_time	tm;
+-	int nowtime = *(int *)arg;
+-	s32 sec, min, hour, day, mon, year;
+-	u8 wbuf[9], *buf = &wbuf[1], msgbuf[1] = { 0 };
+-	struct i2c_msg msgs[] = {
+-		{
+-			.addr	= save_client->addr,
+-			.flags	= 0,
+-			.len	= 1,
+-			.buf	= msgbuf,
+-		},
+-		{
+-			.addr	= save_client->addr,
+-			.flags	= I2C_M_RD,
+-			.len	= 8,
+-			.buf	= buf,
+-		},
+-	};
+-
+-	to_tm(nowtime, &tm);
+-	tm.tm_year = (tm.tm_year - 1900) % 100;
+-
+-	sec = BIN2BCD(tm.tm_sec);
+-	min = BIN2BCD(tm.tm_min);
+-	hour = BIN2BCD(tm.tm_hour);
+-	day = BIN2BCD(tm.tm_mday);
+-	mon = BIN2BCD(tm.tm_mon);
+-	year = BIN2BCD(tm.tm_year);
+-
+-	/* Read reg values into buf[0..7]/wbuf[1..8] */
+-	if (i2c_transfer(save_client->adapter, msgs, 2) < 0) {
+-		dev_err(&save_client->dev, "m41t00_set: Read error\n");
+-		return;
+-	}
+-
+-	wbuf[0] = 0; /* offset into rtc's regs */
+-	buf[m41t00_chip->sec] = (buf[m41t00_chip->sec] & ~0x7f) | (sec & 0x7f);
+-	buf[m41t00_chip->min] = (buf[m41t00_chip->min] & ~0x7f) | (min & 0x7f);
+-	buf[m41t00_chip->hour] = (buf[m41t00_chip->hour] & ~0x3f) | (hour& 0x3f);
+-	buf[m41t00_chip->day] = (buf[m41t00_chip->day] & ~0x3f) | (day & 0x3f);
+-	buf[m41t00_chip->mon] = (buf[m41t00_chip->mon] & ~0x1f) | (mon & 0x1f);
+-	buf[m41t00_chip->year] = year;
+-
+-	if (i2c_master_send(save_client, wbuf, 9) < 0)
+-		dev_err(&save_client->dev, "m41t00_set: Write error\n");
+-}
+-
+-static ulong new_time;
+-/* well, isn't this API just _lovely_? */
+-static void
+-m41t00_barf(struct work_struct *unusable)
+-{
+-	m41t00_set(&new_time);
+-}
+-
+-static struct workqueue_struct *m41t00_wq;
+-static DECLARE_WORK(m41t00_work, m41t00_barf);
+-
+-int
+-m41t00_set_rtc_time(ulong nowtime)
+-{
+-	new_time = nowtime;
+-
+-	if (in_interrupt())
+-		queue_work(m41t00_wq, &m41t00_work);
+-	else
+-		m41t00_set(&new_time);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(m41t00_set_rtc_time);
+-
+-/*
+- *****************************************************************************
+- *
+- *	platform_data Driver Interface
+- *
+- *****************************************************************************
+- */
+-static int __init
+-m41t00_platform_probe(struct platform_device *pdev)
+-{
+-	struct m41t00_platform_data *pdata;
+-	int i;
+-
+-	if (pdev && (pdata = pdev->dev.platform_data)) {
+-		normal_addr[0] = pdata->i2c_addr;
+-
+-		for (i=0; i<ARRAY_SIZE(m41t00_chip_info_tbl); i++)
+-			if (m41t00_chip_info_tbl[i].type == pdata->type) {
+-				m41t00_chip = &m41t00_chip_info_tbl[i];
+-				m41t00_chip->sqw_freq = pdata->sqw_freq;
+-				return 0;
+-			}
+-	}
+-	return -ENODEV;
+-}
+-
+-static int __exit
+-m41t00_platform_remove(struct platform_device *pdev)
+-{
+-	return 0;
+-}
+-
+-static struct platform_driver m41t00_platform_driver = {
+-	.probe  = m41t00_platform_probe,
+-	.remove = m41t00_platform_remove,
+-	.driver = {
+-		.owner = THIS_MODULE,
+-		.name  = M41T00_DRV_NAME,
+-	},
+-};
+-
+-/*
+- *****************************************************************************
+- *
+- *	Driver Interface
+- *
+- *****************************************************************************
+- */
+-static int
+-m41t00_probe(struct i2c_adapter *adap, int addr, int kind)
+-{
+-	struct i2c_client *client;
+-	int rc;
+-
+-	if (!i2c_check_functionality(adap, I2C_FUNC_I2C
+-			| I2C_FUNC_SMBUS_BYTE_DATA))
+-		return 0;
+-
+-	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
+-	if (!client)
+-		return -ENOMEM;
+-
+-	strlcpy(client->name, m41t00_chip->name, I2C_NAME_SIZE);
+-	client->addr = addr;
+-	client->adapter = adap;
+-	client->driver = &m41t00_driver;
+-
+-	if ((rc = i2c_attach_client(client)))
+-		goto attach_err;
 -
-+EXPORT_SYMBOL_GPL(driver_create_file);
- 
- /**
-- *	driver_remove_file - remove sysfs file for driver.
-- *	@drv:	driver.
-- *	@attr:	driver attribute descriptor.
-+ * driver_remove_file - remove sysfs file for driver.
-+ * @drv: driver.
-+ * @attr: driver attribute descriptor.
-  */
+-	if (m41t00_chip->type != M41T00_TYPE_M41T00) {
+-		/* If asked, disable SQW, set SQW frequency & re-enable */
+-		if (m41t00_chip->sqw_freq)
+-			if (((rc = i2c_smbus_read_byte_data(client,
+-					m41t00_chip->alarm_mon)) < 0)
+-			 || ((rc = i2c_smbus_write_byte_data(client,
+-					m41t00_chip->alarm_mon, rc & ~0x40)) <0)
+-			 || ((rc = i2c_smbus_write_byte_data(client,
+-					m41t00_chip->sqw,
+-					m41t00_chip->sqw_freq)) < 0)
+-			 || ((rc = i2c_smbus_write_byte_data(client,
+-					m41t00_chip->alarm_mon, rc | 0x40)) <0))
+-				goto sqw_err;
 -
--void driver_remove_file(struct device_driver * drv, struct driver_attribute * attr)
-+void driver_remove_file(struct device_driver *drv,
-+			struct driver_attribute *attr)
- {
- 	if (get_driver(drv)) {
--		sysfs_remove_file(&drv->kobj, &attr->attr);
-+		sysfs_remove_file(&drv->p->kobj, &attr->attr);
- 		put_driver(drv);
- 	}
- }
+-		/* Make sure HT (Halt Update) bit is cleared */
+-		if ((rc = i2c_smbus_read_byte_data(client,
+-				m41t00_chip->alarm_hour)) < 0)
+-			goto ht_err;
 -
-+EXPORT_SYMBOL_GPL(driver_remove_file);
- 
- /**
-- *	get_driver - increment driver reference count.
-- *	@drv:	driver.
-+ * driver_add_kobj - add a kobject below the specified driver
-+ *
-+ * You really don't want to do this, this is only here due to one looney
-+ * iseries driver, go poke those developers if you are annoyed about
-+ * this...
-  */
--struct device_driver * get_driver(struct device_driver * drv)
-+int driver_add_kobj(struct device_driver *drv, struct kobject *kobj,
-+		    const char *fmt, ...)
- {
--	return drv ? to_drv(kobject_get(&drv->kobj)) : NULL;
-+	va_list args;
-+	char *name;
-+
-+	va_start(args, fmt);
-+	name = kvasprintf(GFP_KERNEL, fmt, args);
-+	va_end(args);
-+
-+	if (!name)
-+		return -ENOMEM;
-+
-+	return kobject_add(kobj, &drv->p->kobj, "%s", name);
- }
-+EXPORT_SYMBOL_GPL(driver_add_kobj);
-+
-+/**
-+ * get_driver - increment driver reference count.
-+ * @drv: driver.
-+ */
-+struct device_driver *get_driver(struct device_driver *drv)
-+{
-+	if (drv) {
-+		struct driver_private *priv;
-+		struct kobject *kobj;
- 
-+		kobj = kobject_get(&drv->p->kobj);
-+		priv = to_driver(kobj);
-+		return priv->driver;
-+	}
-+	return NULL;
-+}
-+EXPORT_SYMBOL_GPL(get_driver);
- 
- /**
-- *	put_driver - decrement driver's refcount.
-- *	@drv:	driver.
-+ * put_driver - decrement driver's refcount.
-+ * @drv: driver.
-  */
--void put_driver(struct device_driver * drv)
-+void put_driver(struct device_driver *drv)
-+{
-+	kobject_put(&drv->p->kobj);
-+}
-+EXPORT_SYMBOL_GPL(put_driver);
-+
-+static int driver_add_groups(struct device_driver *drv,
-+			     struct attribute_group **groups)
- {
--	kobject_put(&drv->kobj);
-+	int error = 0;
-+	int i;
-+
-+	if (groups) {
-+		for (i = 0; groups[i]; i++) {
-+			error = sysfs_create_group(&drv->p->kobj, groups[i]);
-+			if (error) {
-+				while (--i >= 0)
-+					sysfs_remove_group(&drv->p->kobj,
-+							   groups[i]);
-+				break;
-+			}
-+		}
-+	}
-+	return error;
-+}
-+
-+static void driver_remove_groups(struct device_driver *drv,
-+				 struct attribute_group **groups)
-+{
-+	int i;
-+
-+	if (groups)
-+		for (i = 0; groups[i]; i++)
-+			sysfs_remove_group(&drv->p->kobj, groups[i]);
- }
- 
- /**
-- *	driver_register - register driver with bus
-- *	@drv:	driver to register
-+ * driver_register - register driver with bus
-+ * @drv: driver to register
-  *
-- *	We pass off most of the work to the bus_add_driver() call,
-- *	since most of the things we have to do deal with the bus
-- *	structures.
-+ * We pass off most of the work to the bus_add_driver() call,
-+ * since most of the things we have to do deal with the bus
-+ * structures.
-  */
--int driver_register(struct device_driver * drv)
-+int driver_register(struct device_driver *drv)
- {
-+	int ret;
-+
- 	if ((drv->bus->probe && drv->probe) ||
- 	    (drv->bus->remove && drv->remove) ||
--	    (drv->bus->shutdown && drv->shutdown)) {
--		printk(KERN_WARNING "Driver '%s' needs updating - please use bus_type methods\n", drv->name);
+-		if (rc & 0x40)
+-			if ((rc = i2c_smbus_write_byte_data(client,
+-					m41t00_chip->alarm_hour, rc & ~0x40))<0)
+-				goto ht_err;
 -	}
--	klist_init(&drv->klist_devices, NULL, NULL);
--	return bus_add_driver(drv);
-+	    (drv->bus->shutdown && drv->shutdown))
-+		printk(KERN_WARNING "Driver '%s' needs updating - please use "
-+			"bus_type methods\n", drv->name);
-+	ret = bus_add_driver(drv);
-+	if (ret)
-+		return ret;
-+	ret = driver_add_groups(drv, drv->groups);
-+	if (ret)
-+		bus_remove_driver(drv);
-+	return ret;
- }
-+EXPORT_SYMBOL_GPL(driver_register);
- 
- /**
-- *	driver_unregister - remove driver from system.
-- *	@drv:	driver.
-+ * driver_unregister - remove driver from system.
-+ * @drv: driver.
-  *
-- *	Again, we pass off most of the work to the bus-level call.
-+ * Again, we pass off most of the work to the bus-level call.
-  */
 -
--void driver_unregister(struct device_driver * drv)
-+void driver_unregister(struct device_driver *drv)
- {
-+	driver_remove_groups(drv, drv->groups);
- 	bus_remove_driver(drv);
- }
-+EXPORT_SYMBOL_GPL(driver_unregister);
- 
- /**
-- *	driver_find - locate driver on a bus by its name.
-- *	@name:	name of the driver.
-- *	@bus:	bus to scan for the driver.
-+ * driver_find - locate driver on a bus by its name.
-+ * @name: name of the driver.
-+ * @bus: bus to scan for the driver.
-  *
-- *	Call kset_find_obj() to iterate over list of drivers on
-- *	a bus to find driver by name. Return driver if found.
-+ * Call kset_find_obj() to iterate over list of drivers on
-+ * a bus to find driver by name. Return driver if found.
-  *
-- *	Note that kset_find_obj increments driver's reference count.
-+ * Note that kset_find_obj increments driver's reference count.
-  */
- struct device_driver *driver_find(const char *name, struct bus_type *bus)
- {
--	struct kobject *k = kset_find_obj(&bus->drivers, name);
--	if (k)
--		return to_drv(k);
-+	struct kobject *k = kset_find_obj(bus->p->drivers_kset, name);
-+	struct driver_private *priv;
-+
-+	if (k) {
-+		priv = to_driver(k);
-+		return priv->driver;
-+	}
- 	return NULL;
- }
+-	/* Make sure ST (stop) bit is cleared */
+-	if ((rc = i2c_smbus_read_byte_data(client, m41t00_chip->sec)) < 0)
+-		goto st_err;
 -
--EXPORT_SYMBOL_GPL(driver_register);
--EXPORT_SYMBOL_GPL(driver_unregister);
--EXPORT_SYMBOL_GPL(get_driver);
--EXPORT_SYMBOL_GPL(put_driver);
- EXPORT_SYMBOL_GPL(driver_find);
+-	if (rc & 0x80)
+-		if ((rc = i2c_smbus_write_byte_data(client, m41t00_chip->sec,
+-				rc & ~0x80)) < 0)
+-			goto st_err;
 -
--EXPORT_SYMBOL_GPL(driver_create_file);
--EXPORT_SYMBOL_GPL(driver_remove_file);
-diff --git a/drivers/base/firmware.c b/drivers/base/firmware.c
-index 90c8629..1138155 100644
---- a/drivers/base/firmware.c
-+++ b/drivers/base/firmware.c
-@@ -3,11 +3,11 @@
-  *
-  * Copyright (c) 2002-3 Patrick Mochel
-  * Copyright (c) 2002-3 Open Source Development Labs
-+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh at suse.de>
-+ * Copyright (c) 2007 Novell Inc.
-  *
-  * This file is released under the GPLv2
-- *
-  */
+-	m41t00_wq = create_singlethread_workqueue(m41t00_chip->name);
+-	save_client = client;
+-	return 0;
 -
- #include <linux/kobject.h>
- #include <linux/module.h>
- #include <linux/init.h>
-@@ -15,23 +15,13 @@
- 
- #include "base.h"
- 
--static decl_subsys(firmware, NULL, NULL);
+-st_err:
+-	dev_err(&client->dev, "m41t00_probe: Can't clear ST bit\n");
+-	goto attach_err;
+-ht_err:
+-	dev_err(&client->dev, "m41t00_probe: Can't clear HT bit\n");
+-	goto attach_err;
+-sqw_err:
+-	dev_err(&client->dev, "m41t00_probe: Can't set SQW Frequency\n");
+-attach_err:
+-	kfree(client);
+-	return rc;
+-}
 -
--int firmware_register(struct kset *s)
+-static int
+-m41t00_attach(struct i2c_adapter *adap)
 -{
--	kobj_set_kset_s(s, firmware_subsys);
--	return subsystem_register(s);
+-	return i2c_probe(adap, &addr_data, m41t00_probe);
 -}
 -
--void firmware_unregister(struct kset *s)
+-static int
+-m41t00_detach(struct i2c_client *client)
 -{
--	subsystem_unregister(s);
+-	int rc;
+-
+-	if ((rc = i2c_detach_client(client)) == 0) {
+-		kfree(client);
+-		destroy_workqueue(m41t00_wq);
+-	}
+-	return rc;
 -}
-+struct kobject *firmware_kobj;
-+EXPORT_SYMBOL_GPL(firmware_kobj);
- 
- int __init firmware_init(void)
- {
--	return subsystem_register(&firmware_subsys);
-+	firmware_kobj = kobject_create_and_add("firmware", NULL);
-+	if (!firmware_kobj)
-+		return -ENOMEM;
-+	return 0;
- }
 -
--EXPORT_SYMBOL_GPL(firmware_register);
--EXPORT_SYMBOL_GPL(firmware_unregister);
-diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
-index 7080b41..6428cba 100644
---- a/drivers/base/hypervisor.c
-+++ b/drivers/base/hypervisor.c
-@@ -2,19 +2,23 @@
-  * hypervisor.c - /sys/hypervisor subsystem.
-  *
-  * Copyright (C) IBM Corp. 2006
-+ * Copyright (C) 2007 Greg Kroah-Hartman <gregkh at suse.de>
-+ * Copyright (C) 2007 Novell Inc.
-  *
-  * This file is released under the GPLv2
-  */
- 
- #include <linux/kobject.h>
- #include <linux/device.h>
+-static struct i2c_driver m41t00_driver = {
+-	.driver = {
+-		.name	= M41T00_DRV_NAME,
+-	},
+-	.id		= I2C_DRIVERID_STM41T00,
+-	.attach_adapter	= m41t00_attach,
+-	.detach_client	= m41t00_detach,
+-};
 -
- #include "base.h"
- 
--decl_subsys(hypervisor, NULL, NULL);
--EXPORT_SYMBOL_GPL(hypervisor_subsys);
-+struct kobject *hypervisor_kobj;
-+EXPORT_SYMBOL_GPL(hypervisor_kobj);
- 
- int __init hypervisor_init(void)
- {
--	return subsystem_register(&hypervisor_subsys);
-+	hypervisor_kobj = kobject_create_and_add("hypervisor", NULL);
-+	if (!hypervisor_kobj)
-+		return -ENOMEM;
-+	return 0;
- }
-diff --git a/drivers/base/init.c b/drivers/base/init.c
-index 3713815..7bd9b6a 100644
---- a/drivers/base/init.c
-+++ b/drivers/base/init.c
-@@ -1,10 +1,8 @@
- /*
-- *
-  * Copyright (c) 2002-3 Patrick Mochel
-  * Copyright (c) 2002-3 Open Source Development Labs
-  *
-  * This file is released under the GPLv2
-- *
-  */
+-static int __init
+-m41t00_init(void)
+-{
+-	int rc;
+-
+-	if (!(rc = platform_driver_register(&m41t00_platform_driver)))
+-		rc = i2c_add_driver(&m41t00_driver);
+-	return rc;
+-}
+-
+-static void __exit
+-m41t00_exit(void)
+-{
+-	i2c_del_driver(&m41t00_driver);
+-	platform_driver_unregister(&m41t00_platform_driver);
+-}
+-
+-module_init(m41t00_init);
+-module_exit(m41t00_exit);
+-
+-MODULE_AUTHOR("Mark A. Greer <mgreer at mvista.com>");
+-MODULE_DESCRIPTION("ST Microelectronics M41T00 RTC I2C Client Driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c
+index 64692f6..fb7ea56 100644
+--- a/drivers/i2c/chips/max6875.c
++++ b/drivers/i2c/chips/max6875.c
+@@ -34,7 +34,7 @@
+ #include <linux/mutex.h>
  
- #include <linux/device.h>
-@@ -14,12 +12,11 @@
- #include "base.h"
+ /* Do not scan - the MAX6875 access method will write to some EEPROM chips */
+-static unsigned short normal_i2c[] = {I2C_CLIENT_END};
++static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
  
- /**
-- *	driver_init - initialize driver model.
-+ * driver_init - initialize driver model.
-  *
-- *	Call the driver model init functions to initialize their
-- *	subsystems. Called early from init/main.c.
-+ * Call the driver model init functions to initialize their
-+ * subsystems. Called early from init/main.c.
-  */
--
- void __init driver_init(void)
- {
- 	/* These are the core pieces */
-@@ -36,5 +33,4 @@ void __init driver_init(void)
- 	system_bus_init();
- 	cpu_dev_init();
- 	memory_dev_init();
--	attribute_container_init();
- }
-diff --git a/drivers/base/memory.c b/drivers/base/memory.c
-index 7868707..7ae413f 100644
---- a/drivers/base/memory.c
-+++ b/drivers/base/memory.c
-@@ -26,7 +26,7 @@
- #define MEMORY_CLASS_NAME	"memory"
+ /* Insmod parameters */
+ I2C_CLIENT_INSMOD_1(max6875);
+diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c
+index 21c6dd6..b3b830c 100644
+--- a/drivers/i2c/chips/pcf8574.c
++++ b/drivers/i2c/chips/pcf8574.c
+@@ -41,9 +41,11 @@
+ #include <linux/i2c.h>
  
- static struct sysdev_class memory_sysdev_class = {
--	set_kset_name(MEMORY_CLASS_NAME),
-+	.name = MEMORY_CLASS_NAME,
- };
+ /* Addresses to scan */
+-static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+-					0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+-					I2C_CLIENT_END };
++static const unsigned short normal_i2c[] = {
++	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
++	I2C_CLIENT_END
++};
  
- static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
-diff --git a/drivers/base/module.c b/drivers/base/module.c
+ /* Insmod parameters */
+ I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a);
+diff --git a/drivers/i2c/chips/pcf8575.c b/drivers/i2c/chips/pcf8575.c
 new file mode 100644
-index 0000000..103be9c
+index 0000000..3ea08ac
 --- /dev/null
-+++ b/drivers/base/module.c
-@@ -0,0 +1,94 @@
++++ b/drivers/i2c/chips/pcf8575.c
+@@ -0,0 +1,214 @@
 +/*
-+ * module.c - module sysfs fun for drivers
-+ *
-+ * This file is released under the GPLv2
-+ *
-+ */
-+#include <linux/device.h>
++  pcf8575.c
++
++  About the PCF8575 chip: the PCF8575 is a 16-bit I/O expander for the I2C bus
++  produced by a.o. Philips Semiconductors.
++
++  Copyright (C) 2006 Michael Hennerich, Analog Devices Inc.
++  <hennerich at blackfin.uclinux.org>
++  Based on pcf8574.c.
++
++  Copyright (c) 2007 Bart Van Assche <bart.vanassche at gmail.com>.
++  Ported this driver from ucLinux to the mainstream Linux kernel.
++
++  This program is free software; you can redistribute it and/or modify
++  it under the terms of the GNU General Public License as published by
++  the Free Software Foundation; either version 2 of the License, or
++  (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*/
++
 +#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include "base.h"
++#include <linux/init.h>
++#include <linux/i2c.h>
++#include <linux/slab.h>  /* kzalloc() */
++#include <linux/sysfs.h> /* sysfs_create_group() */
 +
-+static char *make_driver_name(struct device_driver *drv)
++/* Addresses to scan */
++static const unsigned short normal_i2c[] = {
++	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++	I2C_CLIENT_END
++};
++
++/* Insmod parameters */
++I2C_CLIENT_INSMOD;
++
++
++/* Each client has this additional data */
++struct pcf8575_data {
++	struct i2c_client client;
++	int write;		/* last written value, or error code */
++};
++
++static int pcf8575_attach_adapter(struct i2c_adapter *adapter);
++static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind);
++static int pcf8575_detach_client(struct i2c_client *client);
++
++/* This is the driver that will be inserted */
++static struct i2c_driver pcf8575_driver = {
++	.driver = {
++		.owner	= THIS_MODULE,
++		.name	= "pcf8575",
++	},
++	.attach_adapter	= pcf8575_attach_adapter,
++	.detach_client	= pcf8575_detach_client,
++};
++
++/* following are the sysfs callback functions */
++static ssize_t show_read(struct device *dev, struct device_attribute *attr,
++			 char *buf)
 +{
-+	char *driver_name;
++	struct i2c_client *client = to_i2c_client(dev);
++	u16 val;
++	u8 iopin_state[2];
 +
-+	driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
-+			      GFP_KERNEL);
-+	if (!driver_name)
-+		return NULL;
++	i2c_master_recv(client, iopin_state, 2);
 +
-+	sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
-+	return driver_name;
++	val = iopin_state[0];
++	val |= iopin_state[1] << 8;
++
++	return sprintf(buf, "%u\n", val);
 +}
 +
-+static void module_create_drivers_dir(struct module_kobject *mk)
-+{
-+	if (!mk || mk->drivers_dir)
-+		return;
++static DEVICE_ATTR(read, S_IRUGO, show_read, NULL);
 +
-+	mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++static ssize_t show_write(struct device *dev, struct device_attribute *attr,
++			  char *buf)
++{
++	struct pcf8575_data *data = dev_get_drvdata(dev);
++	if (data->write < 0)
++		return data->write;
++	return sprintf(buf, "%d\n", data->write);
 +}
 +
-+void module_add_driver(struct module *mod, struct device_driver *drv)
++static ssize_t set_write(struct device *dev, struct device_attribute *attr,
++			 const char *buf, size_t count)
 +{
-+	char *driver_name;
-+	int no_warn;
-+	struct module_kobject *mk = NULL;
++	struct i2c_client *client = to_i2c_client(dev);
++	struct pcf8575_data *data = i2c_get_clientdata(client);
++	unsigned long val = simple_strtoul(buf, NULL, 10);
++	u8 iopin_state[2];
 +
-+	if (!drv)
-+		return;
++	if (val > 0xffff)
++		return -EINVAL;
 +
-+	if (mod)
-+		mk = &mod->mkobj;
-+	else if (drv->mod_name) {
-+		struct kobject *mkobj;
++	data->write = val;
 +
-+		/* Lookup built-in module entry in /sys/modules */
-+		mkobj = kset_find_obj(module_kset, drv->mod_name);
-+		if (mkobj) {
-+			mk = container_of(mkobj, struct module_kobject, kobj);
-+			/* remember our module structure */
-+			drv->p->mkobj = mk;
-+			/* kset_find_obj took a reference */
-+			kobject_put(mkobj);
-+		}
-+	}
++	iopin_state[0] = val & 0xFF;
++	iopin_state[1] = val >> 8;
 +
-+	if (!mk)
-+		return;
++	i2c_master_send(client, iopin_state, 2);
 +
-+	/* Don't check return codes; these calls are idempotent */
-+	no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
-+	driver_name = make_driver_name(drv);
-+	if (driver_name) {
-+		module_create_drivers_dir(mk);
-+		no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
-+					    driver_name);
-+		kfree(driver_name);
-+	}
++	return count;
 +}
 +
-+void module_remove_driver(struct device_driver *drv)
-+{
-+	struct module_kobject *mk = NULL;
-+	char *driver_name;
++static DEVICE_ATTR(write, S_IWUSR | S_IRUGO, show_write, set_write);
 +
-+	if (!drv)
-+		return;
++static struct attribute *pcf8575_attributes[] = {
++	&dev_attr_read.attr,
++	&dev_attr_write.attr,
++	NULL
++};
 +
-+	sysfs_remove_link(&drv->p->kobj, "module");
++static const struct attribute_group pcf8575_attr_group = {
++	.attrs = pcf8575_attributes,
++};
 +
-+	if (drv->owner)
-+		mk = &drv->owner->mkobj;
-+	else if (drv->p->mkobj)
-+		mk = drv->p->mkobj;
-+	if (mk && mk->drivers_dir) {
-+		driver_name = make_driver_name(drv);
-+		if (driver_name) {
-+			sysfs_remove_link(mk->drivers_dir, driver_name);
-+			kfree(driver_name);
-+		}
-+	}
-+}
-diff --git a/drivers/base/node.c b/drivers/base/node.c
-index 88eeed7..e59861f 100644
---- a/drivers/base/node.c
-+++ b/drivers/base/node.c
-@@ -15,7 +15,7 @@
- #include <linux/device.h>
- 
- static struct sysdev_class node_class = {
--	set_kset_name("node"),
-+	.name = "node",
- };
- 
- 
-diff --git a/drivers/base/platform.c b/drivers/base/platform.c
-index fb56092..efaf282 100644
---- a/drivers/base/platform.c
-+++ b/drivers/base/platform.c
-@@ -20,7 +20,8 @@
- 
- #include "base.h"
- 
--#define to_platform_driver(drv)	(container_of((drv), struct platform_driver, driver))
-+#define to_platform_driver(drv)	(container_of((drv), struct platform_driver, \
-+				 driver))
- 
- struct device platform_bus = {
- 	.bus_id		= "platform",
-@@ -28,14 +29,13 @@ struct device platform_bus = {
- EXPORT_SYMBOL_GPL(platform_bus);
- 
- /**
-- *	platform_get_resource - get a resource for a device
-- *	@dev: platform device
-- *	@type: resource type
-- *	@num: resource index
-+ * platform_get_resource - get a resource for a device
-+ * @dev: platform device
-+ * @type: resource type
-+ * @num: resource index
-  */
--struct resource *
--platform_get_resource(struct platform_device *dev, unsigned int type,
--		      unsigned int num)
-+struct resource *platform_get_resource(struct platform_device *dev,
-+				       unsigned int type, unsigned int num)
- {
- 	int i;
- 
-@@ -43,8 +43,7 @@ platform_get_resource(struct platform_device *dev, unsigned int type,
- 		struct resource *r = &dev->resource[i];
- 
- 		if ((r->flags & (IORESOURCE_IO|IORESOURCE_MEM|
--				 IORESOURCE_IRQ|IORESOURCE_DMA))
--		    == type)
-+				 IORESOURCE_IRQ|IORESOURCE_DMA)) == type)
- 			if (num-- == 0)
- 				return r;
- 	}
-@@ -53,9 +52,9 @@ platform_get_resource(struct platform_device *dev, unsigned int type,
- EXPORT_SYMBOL_GPL(platform_get_resource);
- 
- /**
-- *	platform_get_irq - get an IRQ for a device
-- *	@dev: platform device
-- *	@num: IRQ number index
-+ * platform_get_irq - get an IRQ for a device
-+ * @dev: platform device
-+ * @num: IRQ number index
-  */
- int platform_get_irq(struct platform_device *dev, unsigned int num)
- {
-@@ -66,14 +65,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
- EXPORT_SYMBOL_GPL(platform_get_irq);
- 
- /**
-- *	platform_get_resource_byname - get a resource for a device by name
-- *	@dev: platform device
-- *	@type: resource type
-- *	@name: resource name
-+ * platform_get_resource_byname - get a resource for a device by name
-+ * @dev: platform device
-+ * @type: resource type
-+ * @name: resource name
-  */
--struct resource *
--platform_get_resource_byname(struct platform_device *dev, unsigned int type,
--		      char *name)
-+struct resource *platform_get_resource_byname(struct platform_device *dev,
-+					      unsigned int type, char *name)
- {
- 	int i;
- 
-@@ -90,22 +88,23 @@ platform_get_resource_byname(struct platform_device *dev, unsigned int type,
- EXPORT_SYMBOL_GPL(platform_get_resource_byname);
- 
- /**
-- *	platform_get_irq - get an IRQ for a device
-- *	@dev: platform device
-- *	@name: IRQ name
-+ * platform_get_irq - get an IRQ for a device
-+ * @dev: platform device
-+ * @name: IRQ name
-  */
- int platform_get_irq_byname(struct platform_device *dev, char *name)
- {
--	struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
-+	struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
-+							  name);
- 
- 	return r ? r->start : -ENXIO;
- }
- EXPORT_SYMBOL_GPL(platform_get_irq_byname);
- 
- /**
-- *	platform_add_devices - add a numbers of platform devices
-- *	@devs: array of platform devices to add
-- *	@num: number of platform devices in array
-+ * platform_add_devices - add a numbers of platform devices
-+ * @devs: array of platform devices to add
-+ * @num: number of platform devices in array
-  */
- int platform_add_devices(struct platform_device **devs, int num)
- {
-@@ -130,12 +129,11 @@ struct platform_object {
- };
- 
- /**
-- *	platform_device_put
-- *	@pdev:	platform device to free
-+ * platform_device_put
-+ * @pdev: platform device to free
-  *
-- *	Free all memory associated with a platform device.  This function
-- *	must _only_ be externally called in error cases.  All other usage
-- *	is a bug.
-+ * Free all memory associated with a platform device.  This function must
-+ * _only_ be externally called in error cases.  All other usage is a bug.
-  */
- void platform_device_put(struct platform_device *pdev)
- {
-@@ -146,7 +144,8 @@ EXPORT_SYMBOL_GPL(platform_device_put);
- 
- static void platform_device_release(struct device *dev)
- {
--	struct platform_object *pa = container_of(dev, struct platform_object, pdev.dev);
-+	struct platform_object *pa = container_of(dev, struct platform_object,
-+						  pdev.dev);
- 
- 	kfree(pa->pdev.dev.platform_data);
- 	kfree(pa->pdev.resource);
-@@ -154,12 +153,12 @@ static void platform_device_release(struct device *dev)
- }
- 
- /**
-- *	platform_device_alloc
-- *	@name:	base name of the device we're adding
-- *	@id:    instance id
-+ * platform_device_alloc
-+ * @name: base name of the device we're adding
-+ * @id: instance id
-  *
-- *	Create a platform device object which can have other objects attached
-- *	to it, and which will have attached objects freed when it is released.
-+ * Create a platform device object which can have other objects attached
-+ * to it, and which will have attached objects freed when it is released.
-  */
- struct platform_device *platform_device_alloc(const char *name, int id)
- {
-@@ -179,16 +178,17 @@ struct platform_device *platform_device_alloc(const char *name, int id)
- EXPORT_SYMBOL_GPL(platform_device_alloc);
- 
- /**
-- *	platform_device_add_resources
-- *	@pdev:	platform device allocated by platform_device_alloc to add resources to
-- *	@res:   set of resources that needs to be allocated for the device
-- *	@num:	number of resources
-+ * platform_device_add_resources
-+ * @pdev: platform device allocated by platform_device_alloc to add resources to
-+ * @res: set of resources that needs to be allocated for the device
-+ * @num: number of resources
-  *
-- *	Add a copy of the resources to the platform device.  The memory
-- *	associated with the resources will be freed when the platform
-- *	device is released.
-+ * Add a copy of the resources to the platform device.  The memory
-+ * associated with the resources will be freed when the platform device is
-+ * released.
-  */
--int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num)
-+int platform_device_add_resources(struct platform_device *pdev,
-+				  struct resource *res, unsigned int num)
- {
- 	struct resource *r;
- 
-@@ -203,16 +203,17 @@ int platform_device_add_resources(struct platform_device *pdev, struct resource
- EXPORT_SYMBOL_GPL(platform_device_add_resources);
- 
- /**
-- *	platform_device_add_data
-- *	@pdev:	platform device allocated by platform_device_alloc to add resources to
-- *	@data:	platform specific data for this platform device
-- *	@size:	size of platform specific data
-+ * platform_device_add_data
-+ * @pdev: platform device allocated by platform_device_alloc to add resources to
-+ * @data: platform specific data for this platform device
-+ * @size: size of platform specific data
-  *
-- *	Add a copy of platform specific data to the platform device's platform_data
-- *	pointer.  The memory associated with the platform data will be freed
-- *	when the platform device is released.
-+ * Add a copy of platform specific data to the platform device's
-+ * platform_data pointer.  The memory associated with the platform data
-+ * will be freed when the platform device is released.
-  */
--int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size)
-+int platform_device_add_data(struct platform_device *pdev, const void *data,
-+			     size_t size)
- {
- 	void *d;
- 
-@@ -226,11 +227,11 @@ int platform_device_add_data(struct platform_device *pdev, const void *data, siz
- EXPORT_SYMBOL_GPL(platform_device_add_data);
- 
- /**
-- *	platform_device_add - add a platform device to device hierarchy
-- *	@pdev:	platform device we're adding
-+ * platform_device_add - add a platform device to device hierarchy
-+ * @pdev: platform device we're adding
-  *
-- *	This is part 2 of platform_device_register(), though may be called
-- *	separately _iff_ pdev was allocated by platform_device_alloc().
-+ * This is part 2 of platform_device_register(), though may be called
-+ * separately _iff_ pdev was allocated by platform_device_alloc().
-  */
- int platform_device_add(struct platform_device *pdev)
- {
-@@ -289,13 +290,12 @@ int platform_device_add(struct platform_device *pdev)
- EXPORT_SYMBOL_GPL(platform_device_add);
- 
- /**
-- *	platform_device_del - remove a platform-level device
-- *	@pdev:	platform device we're removing
-+ * platform_device_del - remove a platform-level device
-+ * @pdev: platform device we're removing
-  *
-- *	Note that this function will also release all memory- and port-based
-- *	resources owned by the device (@dev->resource).  This function
-- *	must _only_ be externally called in error cases.  All other usage
-- *	is a bug.
-+ * Note that this function will also release all memory- and port-based
-+ * resources owned by the device (@dev->resource).  This function must
-+ * _only_ be externally called in error cases.  All other usage is a bug.
-  */
- void platform_device_del(struct platform_device *pdev)
- {
-@@ -314,11 +314,10 @@ void platform_device_del(struct platform_device *pdev)
- EXPORT_SYMBOL_GPL(platform_device_del);
- 
- /**
-- *	platform_device_register - add a platform-level device
-- *	@pdev:	platform device we're adding
-- *
-+ * platform_device_register - add a platform-level device
-+ * @pdev: platform device we're adding
-  */
--int platform_device_register(struct platform_device * pdev)
-+int platform_device_register(struct platform_device *pdev)
- {
- 	device_initialize(&pdev->dev);
- 	return platform_device_add(pdev);
-@@ -326,14 +325,14 @@ int platform_device_register(struct platform_device * pdev)
- EXPORT_SYMBOL_GPL(platform_device_register);
- 
- /**
-- *	platform_device_unregister - unregister a platform-level device
-- *	@pdev:	platform device we're unregistering
-+ * platform_device_unregister - unregister a platform-level device
-+ * @pdev: platform device we're unregistering
-  *
-- *	Unregistration is done in 2 steps. First we release all resources
-- *	and remove it from the subsystem, then we drop reference count by
-- *	calling platform_device_put().
-+ * Unregistration is done in 2 steps. First we release all resources
-+ * and remove it from the subsystem, then we drop reference count by
-+ * calling platform_device_put().
-  */
--void platform_device_unregister(struct platform_device * pdev)
-+void platform_device_unregister(struct platform_device *pdev)
- {
- 	platform_device_del(pdev);
- 	platform_device_put(pdev);
-@@ -341,27 +340,29 @@ void platform_device_unregister(struct platform_device * pdev)
- EXPORT_SYMBOL_GPL(platform_device_unregister);
- 
- /**
-- *	platform_device_register_simple
-- *	@name:  base name of the device we're adding
-- *	@id:    instance id
-- *	@res:   set of resources that needs to be allocated for the device
-- *	@num:	number of resources
-+ * platform_device_register_simple
-+ * @name: base name of the device we're adding
-+ * @id: instance id
-+ * @res: set of resources that needs to be allocated for the device
-+ * @num: number of resources
-  *
-- *	This function creates a simple platform device that requires minimal
-- *	resource and memory management. Canned release function freeing
-- *	memory allocated for the device allows drivers using such devices
-- *	to be unloaded without waiting for the last reference to the device
-- *	to be dropped.
-+ * This function creates a simple platform device that requires minimal
-+ * resource and memory management. Canned release function freeing memory
-+ * allocated for the device allows drivers using such devices to be
-+ * unloaded without waiting for the last reference to the device to be
-+ * dropped.
-  *
-- *	This interface is primarily intended for use with legacy drivers
-- *	which probe hardware directly.  Because such drivers create sysfs
-- *	device nodes themselves, rather than letting system infrastructure
-- *	handle such device enumeration tasks, they don't fully conform to
-- *	the Linux driver model.  In particular, when such drivers are built
-- *	as modules, they can't be "hotplugged".
-+ * This interface is primarily intended for use with legacy drivers which
-+ * probe hardware directly.  Because such drivers create sysfs device nodes
-+ * themselves, rather than letting system infrastructure handle such device
-+ * enumeration tasks, they don't fully conform to the Linux driver model.
-+ * In particular, when such drivers are built as modules, they can't be
-+ * "hotplugged".
-  */
--struct platform_device *platform_device_register_simple(char *name, int id,
--							struct resource *res, unsigned int num)
-+struct platform_device *platform_device_register_simple(const char *name,
-+							int id,
-+							struct resource *res,
-+							unsigned int num)
- {
- 	struct platform_device *pdev;
- 	int retval;
-@@ -436,8 +437,8 @@ static int platform_drv_resume(struct device *_dev)
- }
- 
- /**
-- *	platform_driver_register
-- *	@drv: platform driver structure
-+ * platform_driver_register
-+ * @drv: platform driver structure
-  */
- int platform_driver_register(struct platform_driver *drv)
- {
-@@ -457,8 +458,8 @@ int platform_driver_register(struct platform_driver *drv)
- EXPORT_SYMBOL_GPL(platform_driver_register);
- 
- /**
-- *	platform_driver_unregister
-- *	@drv: platform driver structure
-+ * platform_driver_unregister
-+ * @drv: platform driver structure
-  */
- void platform_driver_unregister(struct platform_driver *drv)
- {
-@@ -497,12 +498,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
- 	 * if the probe was successful, and make sure any forced probes of
- 	 * new devices fail.
- 	 */
--	spin_lock(&platform_bus_type.klist_drivers.k_lock);
-+	spin_lock(&platform_bus_type.p->klist_drivers.k_lock);
- 	drv->probe = NULL;
--	if (code == 0 && list_empty(&drv->driver.klist_devices.k_list))
-+	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
- 		retval = -ENODEV;
- 	drv->driver.probe = platform_drv_probe_fail;
--	spin_unlock(&platform_bus_type.klist_drivers.k_lock);
-+	spin_unlock(&platform_bus_type.p->klist_drivers.k_lock);
- 
- 	if (code != retval)
- 		platform_driver_unregister(drv);
-@@ -516,8 +517,8 @@ EXPORT_SYMBOL_GPL(platform_driver_probe);
-  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
-  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
-  */
--static ssize_t
--modalias_show(struct device *dev, struct device_attribute *a, char *buf)
-+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
-+			     char *buf)
- {
- 	struct platform_device	*pdev = to_platform_device(dev);
- 	int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
-@@ -538,26 +539,24 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
- 	return 0;
- }
- 
--
- /**
-- *	platform_match - bind platform device to platform driver.
-- *	@dev:	device.
-- *	@drv:	driver.
-+ * platform_match - bind platform device to platform driver.
-+ * @dev: device.
-+ * @drv: driver.
-  *
-- *	Platform device IDs are assumed to be encoded like this:
-- *	"<name><instance>", where <name> is a short description of the
-- *	type of device, like "pci" or "floppy", and <instance> is the
-- *	enumerated instance of the device, like '0' or '42'.
-- *	Driver IDs are simply "<name>".
-- *	So, extract the <name> from the platform_device structure,
-- *	and compare it against the name of the driver. Return whether
-- *	they match or not.
-+ * Platform device IDs are assumed to be encoded like this:
-+ * "<name><instance>", where <name> is a short description of the type of
-+ * device, like "pci" or "floppy", and <instance> is the enumerated
-+ * instance of the device, like '0' or '42'.  Driver IDs are simply
-+ * "<name>".  So, extract the <name> from the platform_device structure,
-+ * and compare it against the name of the driver. Return whether they match
-+ * or not.
-  */
--
--static int platform_match(struct device * dev, struct device_driver * drv)
-+static int platform_match(struct device *dev, struct device_driver *drv)
- {
--	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
-+	struct platform_device *pdev;
- 
-+	pdev = container_of(dev, struct platform_device, dev);
- 	return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0);
- }
- 
-@@ -574,9 +573,10 @@ static int platform_suspend(struct device *dev, pm_message_t mesg)
- static int platform_suspend_late(struct device *dev, pm_message_t mesg)
- {
- 	struct platform_driver *drv = to_platform_driver(dev->driver);
--	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
-+	struct platform_device *pdev;
- 	int ret = 0;
- 
-+	pdev = container_of(dev, struct platform_device, dev);
- 	if (dev->driver && drv->suspend_late)
- 		ret = drv->suspend_late(pdev, mesg);
- 
-@@ -586,16 +586,17 @@ static int platform_suspend_late(struct device *dev, pm_message_t mesg)
- static int platform_resume_early(struct device *dev)
- {
- 	struct platform_driver *drv = to_platform_driver(dev->driver);
--	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
-+	struct platform_device *pdev;
- 	int ret = 0;
- 
-+	pdev = container_of(dev, struct platform_device, dev);
- 	if (dev->driver && drv->resume_early)
- 		ret = drv->resume_early(pdev);
- 
- 	return ret;
- }
- 
--static int platform_resume(struct device * dev)
-+static int platform_resume(struct device *dev)
- {
- 	int ret = 0;
- 
-diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
-index 44504e6..de28dfd 100644
---- a/drivers/base/power/Makefile
-+++ b/drivers/base/power/Makefile
-@@ -1,11 +1,6 @@
--obj-y			:= shutdown.o
- obj-$(CONFIG_PM)	+= sysfs.o
- obj-$(CONFIG_PM_SLEEP)	+= main.o
- obj-$(CONFIG_PM_TRACE)	+= trace.o
- 
--ifeq ($(CONFIG_DEBUG_DRIVER),y)
--EXTRA_CFLAGS += -DDEBUG
--endif
--ifeq ($(CONFIG_PM_VERBOSE),y)
--EXTRA_CFLAGS += -DDEBUG
--endif
-+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
-+ccflags-$(CONFIG_PM_VERBOSE)   += -DDEBUG
-diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
-index 691ffb6..200ed5f 100644
---- a/drivers/base/power/main.c
-+++ b/drivers/base/power/main.c
-@@ -24,20 +24,45 @@
- #include <linux/mutex.h>
- #include <linux/pm.h>
- #include <linux/resume-trace.h>
-+#include <linux/rwsem.h>
- 
- #include "../base.h"
- #include "power.h"
- 
 +/*
-+ * The entries in the dpm_active list are in a depth first order, simply
-+ * because children are guaranteed to be discovered after parents, and
-+ * are inserted at the back of the list on discovery.
-+ *
-+ * All the other lists are kept in the same order, for consistency.
-+ * However the lists aren't always traversed in the same order.
-+ * Semaphores must be acquired from the top (i.e., front) down
-+ * and released in the opposite order.  Devices must be suspended
-+ * from the bottom (i.e., end) up and resumed in the opposite order.
-+ * That way no parent will be suspended while it still has an active
-+ * child.
-+ *
-+ * Since device_pm_add() may be called with a device semaphore held,
-+ * we must never try to acquire a device semaphore while holding
-+ * dpm_list_mutex.
-+ */
-+
- LIST_HEAD(dpm_active);
-+static LIST_HEAD(dpm_locked);
- static LIST_HEAD(dpm_off);
- static LIST_HEAD(dpm_off_irq);
-+static LIST_HEAD(dpm_destroy);
- 
--static DEFINE_MUTEX(dpm_mtx);
- static DEFINE_MUTEX(dpm_list_mtx);
- 
--int (*platform_enable_wakeup)(struct device *dev, int is_on);
-+static DECLARE_RWSEM(pm_sleep_rwsem);
- 
-+int (*platform_enable_wakeup)(struct device *dev, int is_on);
- 
-+/**
-+ *	device_pm_add - add a device to the list of active devices
-+ *	@dev:	Device to be added to the list
-+ */
- void device_pm_add(struct device *dev)
- {
- 	pr_debug("PM: Adding info for %s:%s\n",
-@@ -48,8 +73,36 @@ void device_pm_add(struct device *dev)
- 	mutex_unlock(&dpm_list_mtx);
- }
- 
-+/**
-+ *	device_pm_remove - remove a device from the list of active devices
-+ *	@dev:	Device to be removed from the list
-+ *
-+ *	This function also removes the device's PM-related sysfs attributes.
++ * Real code
 + */
- void device_pm_remove(struct device *dev)
- {
-+	/*
-+	 * If this function is called during a suspend, it will be blocked,
-+	 * because we're holding the device's semaphore at that time, which may
-+	 * lead to a deadlock.  In that case we want to print a warning.
-+	 * However, it may also be called by unregister_dropped_devices() with
-+	 * the device's semaphore released, in which case the warning should
-+	 * not be printed.
-+	 */
-+	if (down_trylock(&dev->sem)) {
-+		if (down_read_trylock(&pm_sleep_rwsem)) {
-+			/* No suspend in progress, wait on dev->sem */
-+			down(&dev->sem);
-+			up_read(&pm_sleep_rwsem);
-+		} else {
-+			/* Suspend in progress, we may deadlock */
-+			dev_warn(dev, "Suspicious %s during suspend\n",
-+				__FUNCTION__);
-+			dump_stack();
-+			/* The user has been warned ... */
-+			down(&dev->sem);
-+		}
-+	}
- 	pr_debug("PM: Removing info for %s:%s\n",
- 		 dev->bus ? dev->bus->name : "No Bus",
- 		 kobject_name(&dev->kobj));
-@@ -57,25 +110,124 @@ void device_pm_remove(struct device *dev)
- 	dpm_sysfs_remove(dev);
- 	list_del_init(&dev->power.entry);
- 	mutex_unlock(&dpm_list_mtx);
-+	up(&dev->sem);
-+}
 +
-+/**
-+ *	device_pm_schedule_removal - schedule the removal of a suspended device
-+ *	@dev:	Device to destroy
-+ *
-+ *	Moves the device to the dpm_destroy list for further processing by
-+ *	unregister_dropped_devices().
-+ */
-+void device_pm_schedule_removal(struct device *dev)
++static int pcf8575_attach_adapter(struct i2c_adapter *adapter)
 +{
-+	pr_debug("PM: Preparing for removal: %s:%s\n",
-+		dev->bus ? dev->bus->name : "No Bus",
-+		kobject_name(&dev->kobj));
-+	mutex_lock(&dpm_list_mtx);
-+	list_move_tail(&dev->power.entry, &dpm_destroy);
-+	mutex_unlock(&dpm_list_mtx);
++	return i2c_probe(adapter, &addr_data, pcf8575_detect);
 +}
 +
-+/**
-+ *	pm_sleep_lock - mutual exclusion for registration and suspend
-+ *
-+ *	Returns 0 if no suspend is underway and device registration
-+ *	may proceed, otherwise -EBUSY.
-+ */
-+int pm_sleep_lock(void)
++/* This function is called by i2c_probe */
++static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind)
 +{
-+	if (down_read_trylock(&pm_sleep_rwsem))
-+		return 0;
++	struct i2c_client *client;
++	struct pcf8575_data *data;
++	int err = 0;
 +
-+	return -EBUSY;
-+}
++	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
++		goto exit;
 +
-+/**
-+ *	pm_sleep_unlock - mutual exclusion for registration and suspend
-+ *
-+ *	This routine undoes the effect of device_pm_add_lock
-+ *	when a device's registration is complete.
-+ */
-+void pm_sleep_unlock(void)
-+{
-+	up_read(&pm_sleep_rwsem);
- }
- 
- 
- /*------------------------- Resume routines -------------------------*/
- 
- /**
-- *	resume_device - Restore state for one device.
-+ *	resume_device_early - Power on one device (early resume).
-  *	@dev:	Device.
-  *
-+ *	Must be called with interrupts disabled.
-  */
--
--static int resume_device(struct device * dev)
-+static int resume_device_early(struct device *dev)
- {
- 	int error = 0;
- 
- 	TRACE_DEVICE(dev);
- 	TRACE_RESUME(0);
- 
--	down(&dev->sem);
-+	if (dev->bus && dev->bus->resume_early) {
-+		dev_dbg(dev, "EARLY resume\n");
-+		error = dev->bus->resume_early(dev);
++	/* OK. For now, we presume we have a valid client. We now create the
++	   client structure, even though we cannot fill it completely yet. */
++	data = kzalloc(sizeof(struct pcf8575_data), GFP_KERNEL);
++	if (!data) {
++		err = -ENOMEM;
++		goto exit;
 +	}
 +
-+	TRACE_RESUME(error);
-+	return error;
++	client = &data->client;
++	i2c_set_clientdata(client, data);
++	client->addr = address;
++	client->adapter = adapter;
++	client->driver = &pcf8575_driver;
++	strlcpy(client->name, "pcf8575", I2C_NAME_SIZE);
++	data->write = -EAGAIN;
++
++	/* This is the place to detect whether the chip at the specified
++	   address really is a PCF8575 chip. However, there is no method known
++	   to detect whether an I2C chip is a PCF8575 or any other I2C chip. */
++
++	/* Tell the I2C layer a new client has arrived */
++	err = i2c_attach_client(client);
++	if (err)
++		goto exit_free;
++
++	/* Register sysfs hooks */
++	err = sysfs_create_group(&client->dev.kobj, &pcf8575_attr_group);
++	if (err)
++		goto exit_detach;
++
++	return 0;
++
++exit_detach:
++	i2c_detach_client(client);
++exit_free:
++	kfree(data);
++exit:
++	return err;
 +}
 +
-+/**
-+ *	dpm_power_up - Power on all regular (non-sysdev) devices.
-+ *
-+ *	Walk the dpm_off_irq list and power each device up. This
-+ *	is used for devices that required they be powered down with
-+ *	interrupts disabled. As devices are powered on, they are moved
-+ *	to the dpm_off list.
-+ *
-+ *	Must be called with interrupts disabled and only one CPU running.
-+ */
-+static void dpm_power_up(void)
++static int pcf8575_detach_client(struct i2c_client *client)
 +{
++	int err;
 +
-+	while (!list_empty(&dpm_off_irq)) {
-+		struct list_head *entry = dpm_off_irq.next;
-+		struct device *dev = to_device(entry);
++	sysfs_remove_group(&client->dev.kobj, &pcf8575_attr_group);
 +
-+		list_move_tail(entry, &dpm_off);
-+		resume_device_early(dev);
-+	}
++	err = i2c_detach_client(client);
++	if (err)
++		return err;
++
++	kfree(i2c_get_clientdata(client));
++	return 0;
 +}
 +
-+/**
-+ *	device_power_up - Turn on all devices that need special attention.
-+ *
-+ *	Power on system devices, then devices that required we shut them down
-+ *	with interrupts disabled.
-+ *
-+ *	Must be called with interrupts disabled.
-+ */
-+void device_power_up(void)
++static int __init pcf8575_init(void)
 +{
-+	sysdev_resume();
-+	dpm_power_up();
++	return i2c_add_driver(&pcf8575_driver);
 +}
-+EXPORT_SYMBOL_GPL(device_power_up);
 +
-+/**
-+ *	resume_device - Restore state for one device.
-+ *	@dev:	Device.
-+ *
-+ */
-+static int resume_device(struct device *dev)
++static void __exit pcf8575_exit(void)
 +{
-+	int error = 0;
-+
-+	TRACE_DEVICE(dev);
-+	TRACE_RESUME(0);
- 
- 	if (dev->bus && dev->bus->resume) {
- 		dev_dbg(dev,"resuming\n");
-@@ -92,126 +244,94 @@ static int resume_device(struct device * dev)
- 		error = dev->class->resume(dev);
- 	}
- 
--	up(&dev->sem);
--
- 	TRACE_RESUME(error);
- 	return error;
- }
- 
--
--static int resume_device_early(struct device * dev)
--{
--	int error = 0;
--
--	TRACE_DEVICE(dev);
--	TRACE_RESUME(0);
--	if (dev->bus && dev->bus->resume_early) {
--		dev_dbg(dev,"EARLY resume\n");
--		error = dev->bus->resume_early(dev);
--	}
--	TRACE_RESUME(error);
--	return error;
--}
--
--/*
-- * Resume the devices that have either not gone through
-- * the late suspend, or that did go through it but also
-- * went through the early resume
-+/**
-+ *	dpm_resume - Resume every device.
-+ *
-+ *	Resume the devices that have either not gone through
-+ *	the late suspend, or that did go through it but also
-+ *	went through the early resume.
-+ *
-+ *	Take devices from the dpm_off_list, resume them,
-+ *	and put them on the dpm_locked list.
-  */
- static void dpm_resume(void)
- {
- 	mutex_lock(&dpm_list_mtx);
- 	while(!list_empty(&dpm_off)) {
--		struct list_head * entry = dpm_off.next;
--		struct device * dev = to_device(entry);
--
--		get_device(dev);
--		list_move_tail(entry, &dpm_active);
-+		struct list_head *entry = dpm_off.next;
-+		struct device *dev = to_device(entry);
- 
-+		list_move_tail(entry, &dpm_locked);
- 		mutex_unlock(&dpm_list_mtx);
- 		resume_device(dev);
- 		mutex_lock(&dpm_list_mtx);
--		put_device(dev);
- 	}
- 	mutex_unlock(&dpm_list_mtx);
- }
- 
--
- /**
-- *	device_resume - Restore state of each device in system.
-+ *	unlock_all_devices - Release each device's semaphore
-  *
-- *	Walk the dpm_off list, remove each entry, resume the device,
-- *	then add it to the dpm_active list.
-+ *	Go through the dpm_off list.  Put each device on the dpm_active
-+ *	list and unlock it.
-  */
--
--void device_resume(void)
-+static void unlock_all_devices(void)
- {
--	might_sleep();
--	mutex_lock(&dpm_mtx);
--	dpm_resume();
--	mutex_unlock(&dpm_mtx);
--}
--
--EXPORT_SYMBOL_GPL(device_resume);
-+	mutex_lock(&dpm_list_mtx);
-+	while (!list_empty(&dpm_locked)) {
-+		struct list_head *entry = dpm_locked.prev;
-+		struct device *dev = to_device(entry);
- 
-+		list_move(entry, &dpm_active);
-+		up(&dev->sem);
-+	}
-+	mutex_unlock(&dpm_list_mtx);
++	i2c_del_driver(&pcf8575_driver);
 +}
++
++MODULE_AUTHOR("Michael Hennerich <hennerich at blackfin.uclinux.org>, "
++	      "Bart Van Assche <bart.vanassche at gmail.com>");
++MODULE_DESCRIPTION("pcf8575 driver");
++MODULE_LICENSE("GPL");
++
++module_init(pcf8575_init);
++module_exit(pcf8575_exit);
+diff --git a/drivers/i2c/chips/pcf8591.c b/drivers/i2c/chips/pcf8591.c
+index 4dc3637..865f440 100644
+--- a/drivers/i2c/chips/pcf8591.c
++++ b/drivers/i2c/chips/pcf8591.c
+@@ -27,7 +27,7 @@
+ #include <linux/mutex.h>
  
- /**
-- *	dpm_power_up - Power on some devices.
-- *
-- *	Walk the dpm_off_irq list and power each device up. This
-- *	is used for devices that required they be powered down with
-- *	interrupts disabled. As devices are powered on, they are moved
-- *	to the dpm_active list.
-+ *	unregister_dropped_devices - Unregister devices scheduled for removal
-  *
-- *	Interrupts must be disabled when calling this.
-+ *	Unregister all devices on the dpm_destroy list.
-  */
--
--static void dpm_power_up(void)
-+static void unregister_dropped_devices(void)
- {
--	while(!list_empty(&dpm_off_irq)) {
--		struct list_head * entry = dpm_off_irq.next;
--		struct device * dev = to_device(entry);
-+	mutex_lock(&dpm_list_mtx);
-+	while (!list_empty(&dpm_destroy)) {
-+		struct list_head *entry = dpm_destroy.next;
-+		struct device *dev = to_device(entry);
- 
--		list_move_tail(entry, &dpm_off);
--		resume_device_early(dev);
-+		up(&dev->sem);
-+		mutex_unlock(&dpm_list_mtx);
-+		/* This also removes the device from the list */
-+		device_unregister(dev);
-+		mutex_lock(&dpm_list_mtx);
- 	}
-+	mutex_unlock(&dpm_list_mtx);
- }
- 
--
- /**
-- *	device_power_up - Turn on all devices that need special attention.
-+ *	device_resume - Restore state of each device in system.
-  *
-- *	Power on system devices then devices that required we shut them down
-- *	with interrupts disabled.
-- *	Called with interrupts disabled.
-+ *	Resume all the devices, unlock them all, and allow new
-+ *	devices to be registered once again.
-  */
--
--void device_power_up(void)
-+void device_resume(void)
- {
--	sysdev_resume();
--	dpm_power_up();
-+	might_sleep();
-+	dpm_resume();
-+	unlock_all_devices();
-+	unregister_dropped_devices();
-+	up_write(&pm_sleep_rwsem);
- }
--
--EXPORT_SYMBOL_GPL(device_power_up);
-+EXPORT_SYMBOL_GPL(device_resume);
+ /* Addresses to scan */
+-static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
++static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
+ 					0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
  
+ /* Insmod parameters */
+diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
+index e320994..4154a91 100644
+--- a/drivers/i2c/chips/tps65010.c
++++ b/drivers/i2c/chips/tps65010.c
+@@ -31,7 +31,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/mutex.h>
  
- /*------------------------- Suspend routines -------------------------*/
+-#include <asm/arch/tps65010.h>
++#include <linux/i2c/tps65010.h>
  
--/*
-- * The entries in the dpm_active list are in a depth first order, simply
-- * because children are guaranteed to be discovered after parents, and
-- * are inserted at the back of the list on discovery.
-- *
-- * All list on the suspend path are done in reverse order, so we operate
-- * on the leaves of the device tree (or forests, depending on how you want
-- * to look at it ;) first. As nodes are removed from the back of the list,
-- * they are inserted into the front of their destintation lists.
-- *
-- * Things are the reverse on the resume path - iterations are done in
-- * forward order, and nodes are inserted at the back of their destination
-- * lists. This way, the ancestors will be accessed before their descendents.
-- */
--
- static inline char *suspend_verb(u32 event)
- {
- 	switch (event) {
-@@ -222,7 +342,6 @@ static inline char *suspend_verb(u32 event)
- 	}
- }
+ /*-------------------------------------------------------------------------*/
  
--
- static void
- suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
- {
-@@ -232,16 +351,73 @@ suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
+diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
+index 3de4b19..a10fd27 100644
+--- a/drivers/i2c/chips/tsl2550.c
++++ b/drivers/i2c/chips/tsl2550.c
+@@ -432,11 +432,32 @@ static int __devexit tsl2550_remove(struct i2c_client *client)
+ 	return 0;
  }
  
- /**
-- *	suspend_device - Save state of one device.
-+ *	suspend_device_late - Shut down one device (late suspend).
-  *	@dev:	Device.
-  *	@state:	Power state device is entering.
-+ *
-+ *	This is called with interrupts off and only a single CPU running.
-  */
-+static int suspend_device_late(struct device *dev, pm_message_t state)
++#ifdef CONFIG_PM
++
++static int tsl2550_suspend(struct i2c_client *client, pm_message_t mesg)
 +{
-+	int error = 0;
- 
--static int suspend_device(struct device * dev, pm_message_t state)
-+	if (dev->bus && dev->bus->suspend_late) {
-+		suspend_device_dbg(dev, state, "LATE ");
-+		error = dev->bus->suspend_late(dev, state);
-+		suspend_report_result(dev->bus->suspend_late, error);
-+	}
-+	return error;
++	return tsl2550_set_power_state(client, 0);
 +}
 +
-+/**
-+ *	device_power_down - Shut down special devices.
-+ *	@state:		Power state to enter.
-+ *
-+ *	Power down devices that require interrupts to be disabled
-+ *	and move them from the dpm_off list to the dpm_off_irq list.
-+ *	Then power down system devices.
-+ *
-+ *	Must be called with interrupts disabled and only one CPU running.
-+ */
-+int device_power_down(pm_message_t state)
++static int tsl2550_resume(struct i2c_client *client)
 +{
-+	int error = 0;
++	return tsl2550_set_power_state(client, 1);
++}
 +
-+	while (!list_empty(&dpm_off)) {
-+		struct list_head *entry = dpm_off.prev;
-+		struct device *dev = to_device(entry);
++#else
 +
-+		list_del_init(&dev->power.entry);
-+		error = suspend_device_late(dev, state);
-+		if (error) {
-+			printk(KERN_ERR "Could not power down device %s: "
-+					"error %d\n",
-+					kobject_name(&dev->kobj), error);
-+			if (list_empty(&dev->power.entry))
-+				list_add(&dev->power.entry, &dpm_off);
-+			break;
-+		}
-+		if (list_empty(&dev->power.entry))
-+			list_add(&dev->power.entry, &dpm_off_irq);
-+	}
++#define tsl2550_suspend		NULL
++#define tsl2550_resume		NULL
 +
-+	if (!error)
-+		error = sysdev_suspend(state);
-+	if (error)
-+		dpm_power_up();
-+	return error;
-+}
-+EXPORT_SYMBOL_GPL(device_power_down);
++#endif /* CONFIG_PM */
 +
-+/**
-+ *	suspend_device - Save state of one device.
-+ *	@dev:	Device.
-+ *	@state:	Power state device is entering.
-+ */
-+int suspend_device(struct device *dev, pm_message_t state)
- {
- 	int error = 0;
- 
--	down(&dev->sem);
- 	if (dev->power.power_state.event) {
- 		dev_dbg(dev, "PM: suspend %d-->%d\n",
- 			dev->power.power_state.event, state.event);
-@@ -264,123 +440,105 @@ static int suspend_device(struct device * dev, pm_message_t state)
- 		error = dev->bus->suspend(dev, state);
- 		suspend_report_result(dev->bus->suspend, error);
- 	}
--	up(&dev->sem);
--	return error;
--}
--
--
--/*
-- * This is called with interrupts off, only a single CPU
-- * running. We can't acquire a mutex or semaphore (and we don't
-- * need the protection)
-- */
--static int suspend_device_late(struct device *dev, pm_message_t state)
--{
--	int error = 0;
--
--	if (dev->bus && dev->bus->suspend_late) {
--		suspend_device_dbg(dev, state, "LATE ");
--		error = dev->bus->suspend_late(dev, state);
--		suspend_report_result(dev->bus->suspend_late, error);
--	}
- 	return error;
- }
- 
- /**
-- *	device_suspend - Save state and stop all devices in system.
-- *	@state:		Power state to put each device in.
-+ *	dpm_suspend - Suspend every device.
-+ *	@state:	Power state to put each device in.
-  *
-- *	Walk the dpm_active list, call ->suspend() for each device, and move
-- *	it to the dpm_off list.
-+ *	Walk the dpm_locked list.  Suspend each device and move it
-+ *	to the dpm_off list.
-  *
-  *	(For historical reasons, if it returns -EAGAIN, that used to mean
-  *	that the device would be called again with interrupts disabled.
-  *	These days, we use the "suspend_late()" callback for that, so we
-  *	print a warning and consider it an error).
-- *
-- *	If we get a different error, try and back out.
-- *
-- *	If we hit a failure with any of the devices, call device_resume()
-- *	above to bring the suspended devices back to life.
-- *
-  */
--
--int device_suspend(pm_message_t state)
-+static int dpm_suspend(pm_message_t state)
- {
- 	int error = 0;
- 
--	might_sleep();
--	mutex_lock(&dpm_mtx);
- 	mutex_lock(&dpm_list_mtx);
--	while (!list_empty(&dpm_active) && error == 0) {
--		struct list_head * entry = dpm_active.prev;
--		struct device * dev = to_device(entry);
-+	while (!list_empty(&dpm_locked)) {
-+		struct list_head *entry = dpm_locked.prev;
-+		struct device *dev = to_device(entry);
+ static struct i2c_driver tsl2550_driver = {
+ 	.driver = {
+ 		.name	= TSL2550_DRV_NAME,
+ 		.owner	= THIS_MODULE,
+ 	},
++	.suspend = tsl2550_suspend,
++	.resume	= tsl2550_resume,
+ 	.probe	= tsl2550_probe,
+ 	.remove	= __devexit_p(tsl2550_remove),
+ };
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index b5e13e4..96da22e 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -33,14 +33,15 @@
+ #include <linux/platform_device.h>
+ #include <linux/mutex.h>
+ #include <linux/completion.h>
++#include <linux/hardirq.h>
++#include <linux/irqflags.h>
+ #include <asm/uaccess.h>
++#include <asm/semaphore.h>
  
--		get_device(dev);
-+		list_del_init(&dev->power.entry);
- 		mutex_unlock(&dpm_list_mtx);
--
- 		error = suspend_device(dev, state);
--
--		mutex_lock(&dpm_list_mtx);
--
--		/* Check if the device got removed */
--		if (!list_empty(&dev->power.entry)) {
--			/* Move it to the dpm_off list */
--			if (!error)
--				list_move(&dev->power.entry, &dpm_off);
--		}
--		if (error)
-+		if (error) {
- 			printk(KERN_ERR "Could not suspend device %s: "
--				"error %d%s\n",
--				kobject_name(&dev->kobj), error,
--				error == -EAGAIN ? " (please convert to suspend_late)" : "");
--		put_device(dev);
-+					"error %d%s\n",
-+					kobject_name(&dev->kobj),
-+					error,
-+					(error == -EAGAIN ?
-+					" (please convert to suspend_late)" :
-+					""));
-+			mutex_lock(&dpm_list_mtx);
-+			if (list_empty(&dev->power.entry))
-+				list_add(&dev->power.entry, &dpm_locked);
-+			mutex_unlock(&dpm_list_mtx);
-+			break;
-+		}
-+		mutex_lock(&dpm_list_mtx);
-+		if (list_empty(&dev->power.entry))
-+			list_add(&dev->power.entry, &dpm_off);
- 	}
- 	mutex_unlock(&dpm_list_mtx);
--	if (error)
--		dpm_resume();
+ #include "i2c-core.h"
  
--	mutex_unlock(&dpm_mtx);
- 	return error;
- }
  
--EXPORT_SYMBOL_GPL(device_suspend);
--
- /**
-- *	device_power_down - Shut down special devices.
-- *	@state:		Power state to enter.
-+ *	lock_all_devices - Acquire every device's semaphore
-  *
-- *	Walk the dpm_off_irq list, calling ->power_down() for each device that
-- *	couldn't power down the device with interrupts enabled. When we're
-- *	done, power down system devices.
-+ *	Go through the dpm_active list. Carefully lock each device's
-+ *	semaphore and put it in on the dpm_locked list.
-  */
--
--int device_power_down(pm_message_t state)
-+static void lock_all_devices(void)
- {
--	int error = 0;
--	struct device * dev;
-+	mutex_lock(&dpm_list_mtx);
-+	while (!list_empty(&dpm_active)) {
-+		struct list_head *entry = dpm_active.next;
-+		struct device *dev = to_device(entry);
+-static LIST_HEAD(adapters);
+-static LIST_HEAD(drivers);
+-static DEFINE_MUTEX(core_lists);
++static DEFINE_MUTEX(core_lock);
+ static DEFINE_IDR(i2c_adapter_idr);
  
--	while (!list_empty(&dpm_off)) {
--		struct list_head * entry = dpm_off.prev;
-+		/* Required locking order is dev->sem first,
-+		 * then dpm_list_mutex.  Hence this awkward code.
-+		 */
-+		get_device(dev);
-+		mutex_unlock(&dpm_list_mtx);
-+		down(&dev->sem);
-+		mutex_lock(&dpm_list_mtx);
+ #define is_newstyle_driver(d) ((d)->probe || (d)->remove)
+@@ -198,6 +199,25 @@ static struct bus_type i2c_bus_type = {
+ 	.resume		= i2c_device_resume,
+ };
  
--		dev = to_device(entry);
--		error = suspend_device_late(dev, state);
--		if (error)
--			goto Error;
--		list_move(&dev->power.entry, &dpm_off_irq);
-+		if (list_empty(entry))
-+			up(&dev->sem);		/* Device was removed */
-+		else
-+			list_move_tail(entry, &dpm_locked);
-+		put_device(dev);
- 	}
-+	mutex_unlock(&dpm_list_mtx);
-+}
 +
 +/**
-+ *	device_suspend - Save state and stop all devices in system.
++ * i2c_verify_client - return parameter as i2c_client, or NULL
++ * @dev: device, probably from some driver model iterator
 + *
-+ *	Prevent new devices from being registered, then lock all devices
-+ *	and suspend them.
++ * When traversing the driver model tree, perhaps using driver model
++ * iterators like @device_for_each_child(), you can't assume very much
++ * about the nodes you find.  Use this function to avoid oopses caused
++ * by wrongly treating some non-I2C device as an i2c_client.
 + */
-+int device_suspend(pm_message_t state)
++struct i2c_client *i2c_verify_client(struct device *dev)
 +{
-+	int error;
- 
--	error = sysdev_suspend(state);
-- Done:
-+	might_sleep();
-+	down_write(&pm_sleep_rwsem);
-+	lock_all_devices();
-+	error = dpm_suspend(state);
-+	if (error)
-+		device_resume();
- 	return error;
-- Error:
--	printk(KERN_ERR "Could not power down device %s: "
--		"error %d\n", kobject_name(&dev->kobj), error);
--	dpm_power_up();
--	goto Done;
- }
--
--EXPORT_SYMBOL_GPL(device_power_down);
-+EXPORT_SYMBOL_GPL(device_suspend);
- 
- void __suspend_report_result(const char *function, void *fn, int ret)
- {
-diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
-index 379da4e..6f0dfca 100644
---- a/drivers/base/power/power.h
-+++ b/drivers/base/power/power.h
-@@ -1,10 +1,3 @@
--/*
-- * shutdown.c
-- */
--
--extern void device_shutdown(void);
--
--
- #ifdef CONFIG_PM_SLEEP
- 
- /*
-@@ -20,6 +13,9 @@ static inline struct device *to_device(struct list_head *entry)
- 
- extern void device_pm_add(struct device *);
- extern void device_pm_remove(struct device *);
-+extern void device_pm_schedule_removal(struct device *);
-+extern int pm_sleep_lock(void);
-+extern void pm_sleep_unlock(void);
- 
- #else /* CONFIG_PM_SLEEP */
++	return (dev->bus == &i2c_bus_type)
++			? to_i2c_client(dev)
++			: NULL;
++}
++EXPORT_SYMBOL(i2c_verify_client);
++
++
+ /**
+  * i2c_new_device - instantiate an i2c device for use with a new style driver
+  * @adap: the adapter managing the device
+@@ -276,6 +296,50 @@ void i2c_unregister_device(struct i2c_client *client)
+ EXPORT_SYMBOL_GPL(i2c_unregister_device);
  
-@@ -32,6 +28,15 @@ static inline void device_pm_remove(struct device *dev)
- {
- }
  
-+static inline int pm_sleep_lock(void)
++static int dummy_nop(struct i2c_client *client)
 +{
 +	return 0;
 +}
 +
-+static inline void pm_sleep_unlock(void)
-+{
-+}
-+
- #endif
- 
- #ifdef CONFIG_PM
-diff --git a/drivers/base/power/shutdown.c b/drivers/base/power/shutdown.c
-deleted file mode 100644
-index 56e8eaa..0000000
---- a/drivers/base/power/shutdown.c
-+++ /dev/null
-@@ -1,48 +0,0 @@
--/*
-- * shutdown.c - power management functions for the device tree.
-- *
-- * Copyright (c) 2002-3 Patrick Mochel
-- *		 2002-3 Open Source Development Lab
-- *
-- * This file is released under the GPLv2
-- *
-- */
--
--#include <linux/device.h>
--#include <asm/semaphore.h>
--
--#include "../base.h"
--#include "power.h"
--
--#define to_dev(node) container_of(node, struct device, kobj.entry)
--
--
--/**
-- * We handle system devices differently - we suspend and shut them
-- * down last and resume them first. That way, we don't do anything stupid like
-- * shutting down the interrupt controller before any devices..
-- *
-- * Note that there are not different stages for power management calls -
-- * they only get one called once when interrupts are disabled.
-- */
--
--
--/**
-- * device_shutdown - call ->shutdown() on each device to shutdown.
-- */
--void device_shutdown(void)
--{
--	struct device * dev, *devn;
--
--	list_for_each_entry_safe_reverse(dev, devn, &devices_subsys.list,
--				kobj.entry) {
--		if (dev->bus && dev->bus->shutdown) {
--			dev_dbg(dev, "shutdown\n");
--			dev->bus->shutdown(dev);
--		} else if (dev->driver && dev->driver->shutdown) {
--			dev_dbg(dev, "shutdown\n");
--			dev->driver->shutdown(dev);
--		}
--	}
--}
--
-diff --git a/drivers/base/sys.c b/drivers/base/sys.c
-index ac7ff6d..2f79c55 100644
---- a/drivers/base/sys.c
-+++ b/drivers/base/sys.c
-@@ -25,8 +25,6 @@
- 
- #include "base.h"
- 
--extern struct kset devices_subsys;
--
- #define to_sysdev(k) container_of(k, struct sys_device, kobj)
- #define to_sysdev_attr(a) container_of(a, struct sysdev_attribute, attr)
- 
-@@ -128,18 +126,17 @@ void sysdev_class_remove_file(struct sysdev_class *c,
- }
- EXPORT_SYMBOL_GPL(sysdev_class_remove_file);
- 
--/*
-- * declare system_subsys
-- */
--static decl_subsys(system, &ktype_sysdev_class, NULL);
-+static struct kset *system_kset;
- 
- int sysdev_class_register(struct sysdev_class * cls)
- {
- 	pr_debug("Registering sysdev class '%s'\n",
- 		 kobject_name(&cls->kset.kobj));
- 	INIT_LIST_HEAD(&cls->drivers);
--	cls->kset.kobj.parent = &system_subsys.kobj;
--	cls->kset.kobj.kset = &system_subsys;
-+	cls->kset.kobj.parent = &system_kset->kobj;
-+	cls->kset.kobj.ktype = &ktype_sysdev_class;
-+	cls->kset.kobj.kset = system_kset;
-+	kobject_set_name(&cls->kset.kobj, cls->name);
- 	return kset_register(&cls->kset);
- }
- 
-@@ -228,20 +225,15 @@ int sysdev_register(struct sys_device * sysdev)
- 	if (!cls)
- 		return -EINVAL;
- 
-+	pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj));
++static struct i2c_driver dummy_driver = {
++	.driver.name	= "dummy",
++	.probe		= dummy_nop,
++	.remove		= dummy_nop,
++};
 +
- 	/* Make sure the kset is set */
- 	sysdev->kobj.kset = &cls->kset;
- 
--	/* But make sure we point to the right type for sysfs translation */
--	sysdev->kobj.ktype = &ktype_sysdev;
--	error = kobject_set_name(&sysdev->kobj, "%s%d",
--			 kobject_name(&cls->kset.kobj), sysdev->id);
--	if (error)
--		return error;
--
--	pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj));
--
- 	/* Register the object */
--	error = kobject_register(&sysdev->kobj);
-+	error = kobject_init_and_add(&sysdev->kobj, &ktype_sysdev, NULL,
-+				     "%s%d", kobject_name(&cls->kset.kobj),
-+				     sysdev->id);
- 
- 	if (!error) {
- 		struct sysdev_driver * drv;
-@@ -258,6 +250,7 @@ int sysdev_register(struct sys_device * sysdev)
- 		}
- 		mutex_unlock(&sysdev_drivers_lock);
- 	}
-+	kobject_uevent(&sysdev->kobj, KOBJ_ADD);
- 	return error;
- }
- 
-@@ -272,7 +265,7 @@ void sysdev_unregister(struct sys_device * sysdev)
- 	}
- 	mutex_unlock(&sysdev_drivers_lock);
++/**
++ * i2c_new_dummy - return a new i2c device bound to a dummy driver
++ * @adapter: the adapter managing the device
++ * @address: seven bit address to be used
++ * @type: optional label used for i2c_client.name
++ * Context: can sleep
++ *
++ * This returns an I2C client bound to the "dummy" driver, intended for use
++ * with devices that consume multiple addresses.  Examples of such chips
++ * include various EEPROMS (like 24c04 and 24c08 models).
++ *
++ * These dummy devices have two main uses.  First, most I2C and SMBus calls
++ * except i2c_transfer() need a client handle; the dummy will be that handle.
++ * And second, this prevents the specified address from being bound to a
++ * different driver.
++ *
++ * This returns the new i2c client, which should be saved for later use with
++ * i2c_unregister_device(); or NULL to indicate an error.
++ */
++struct i2c_client *
++i2c_new_dummy(struct i2c_adapter *adapter, u16 address, const char *type)
++{
++	struct i2c_board_info info = {
++		.driver_name	= "dummy",
++		.addr		= address,
++	};
++
++	if (type)
++		strlcpy(info.type, type, sizeof info.type);
++	return i2c_new_device(adapter, &info);
++}
++EXPORT_SYMBOL_GPL(i2c_new_dummy);
++
+ /* ------------------------------------------------------------------------- */
  
--	kobject_unregister(&sysdev->kobj);
-+	kobject_put(&sysdev->kobj);
+ /* I2C bus adapters -- one roots each I2C or SMBUS segment */
+@@ -320,18 +384,27 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
+ 	mutex_unlock(&__i2c_board_lock);
  }
  
++static int i2c_do_add_adapter(struct device_driver *d, void *data)
++{
++	struct i2c_driver *driver = to_i2c_driver(d);
++	struct i2c_adapter *adap = data;
++
++	if (driver->attach_adapter) {
++		/* We ignore the return code; if it fails, too bad */
++		driver->attach_adapter(adap);
++	}
++	return 0;
++}
++
+ static int i2c_register_adapter(struct i2c_adapter *adap)
+ {
+-	int res = 0;
+-	struct list_head   *item;
+-	struct i2c_driver  *driver;
++	int res = 0, dummy;
  
-@@ -298,8 +291,7 @@ void sysdev_shutdown(void)
- 	pr_debug("Shutting Down System Devices\n");
- 
- 	mutex_lock(&sysdev_drivers_lock);
--	list_for_each_entry_reverse(cls, &system_subsys.list,
--				    kset.kobj.entry) {
-+	list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
- 		struct sys_device * sysdev;
+ 	mutex_init(&adap->bus_lock);
+ 	mutex_init(&adap->clist_lock);
+ 	INIT_LIST_HEAD(&adap->clients);
  
- 		pr_debug("Shutting down type '%s':\n",
-@@ -361,9 +353,7 @@ int sysdev_suspend(pm_message_t state)
+-	mutex_lock(&core_lists);
+-	list_add_tail(&adap->list, &adapters);
++	mutex_lock(&core_lock);
  
- 	pr_debug("Suspending System Devices\n");
+ 	/* Add the adapter to the driver core.
+ 	 * If the parent pointer is not set up,
+@@ -356,19 +429,14 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
+ 		i2c_scan_static_board_info(adap);
  
--	list_for_each_entry_reverse(cls, &system_subsys.list,
--				    kset.kobj.entry) {
--
-+	list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
- 		pr_debug("Suspending type '%s':\n",
- 			 kobject_name(&cls->kset.kobj));
+ 	/* let legacy drivers scan this bus for matching devices */
+-	list_for_each(item,&drivers) {
+-		driver = list_entry(item, struct i2c_driver, list);
+-		if (driver->attach_adapter)
+-			/* We ignore the return code; if it fails, too bad */
+-			driver->attach_adapter(adap);
+-	}
++	dummy = bus_for_each_drv(&i2c_bus_type, NULL, adap,
++				 i2c_do_add_adapter);
  
-@@ -414,8 +404,7 @@ aux_driver:
- 	}
+ out_unlock:
+-	mutex_unlock(&core_lists);
++	mutex_unlock(&core_lock);
+ 	return res;
  
- 	/* resume other classes */
--	list_for_each_entry_continue(cls, &system_subsys.list,
--					kset.kobj.entry) {
-+	list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) {
- 		list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) {
- 			pr_debug(" %s\n", kobject_name(&err_dev->kobj));
- 			__sysdev_resume(err_dev);
-@@ -440,7 +429,7 @@ int sysdev_resume(void)
+ out_list:
+-	list_del(&adap->list);
+ 	idr_remove(&i2c_adapter_idr, adap->nr);
+ 	goto out_unlock;
+ }
+@@ -394,11 +462,11 @@ retry:
+ 	if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
+ 		return -ENOMEM;
  
- 	pr_debug("Resuming System Devices\n");
+-	mutex_lock(&core_lists);
++	mutex_lock(&core_lock);
+ 	/* "above" here means "above or equal to", sigh */
+ 	res = idr_get_new_above(&i2c_adapter_idr, adapter,
+ 				__i2c_first_dynamic_bus_num, &id);
+-	mutex_unlock(&core_lists);
++	mutex_unlock(&core_lock);
  
--	list_for_each_entry(cls, &system_subsys.list, kset.kobj.entry) {
-+	list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) {
- 		struct sys_device * sysdev;
+ 	if (res < 0) {
+ 		if (res == -EAGAIN)
+@@ -443,7 +511,7 @@ retry:
+ 	if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
+ 		return -ENOMEM;
  
- 		pr_debug("Resuming type '%s':\n",
-@@ -458,8 +447,10 @@ int sysdev_resume(void)
+-	mutex_lock(&core_lists);
++	mutex_lock(&core_lock);
+ 	/* "above" here means "above or equal to", sigh;
+ 	 * we need the "equal to" result to force the result
+ 	 */
+@@ -452,7 +520,7 @@ retry:
+ 		status = -EBUSY;
+ 		idr_remove(&i2c_adapter_idr, id);
+ 	}
+-	mutex_unlock(&core_lists);
++	mutex_unlock(&core_lock);
+ 	if (status == -EAGAIN)
+ 		goto retry;
  
- int __init system_bus_init(void)
- {
--	system_subsys.kobj.parent = &devices_subsys.kobj;
--	return subsystem_register(&system_subsys);
-+	system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
-+	if (!system_kset)
-+		return -ENOMEM;
-+	return 0;
+@@ -462,6 +530,21 @@ retry:
  }
+ EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
  
- EXPORT_SYMBOL_GPL(sysdev_register);
-diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
-index 9030c37..cd03473 100644
---- a/drivers/block/DAC960.c
-+++ b/drivers/block/DAC960.c
-@@ -3455,19 +3455,12 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
- 						 bool SuccessfulIO)
- {
- 	struct request *Request = Command->Request;
--	int UpToDate;
--
--	UpToDate = 0;
--	if (SuccessfulIO)
--		UpToDate = 1;
-+	int Error = SuccessfulIO ? 0 : -EIO;
- 
- 	pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
- 		Command->SegmentCount, Command->DmaDirection);
- 
--	 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
--		add_disk_randomness(Request->rq_disk);
-- 	 	end_that_request_last(Request, UpToDate);
--
-+	 if (!__blk_end_request(Request, Error, Command->BlockCount << 9)) {
- 		if (Command->Completion) {
- 			complete(Command->Completion);
- 			Command->Completion = NULL;
-diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
-index 4d0119e..f212285 100644
---- a/drivers/block/Kconfig
-+++ b/drivers/block/Kconfig
-@@ -105,6 +105,17 @@ config PARIDE
- 	  "MicroSolutions backpack protocol", "DataStor Commuter protocol"
- 	  etc.).
- 
-+config GDROM
-+	tristate "SEGA Dreamcast GD-ROM drive"
-+	depends on SH_DREAMCAST
-+	help
-+	  A standard SEGA Dreamcast comes with a modified CD ROM drive called a
-+	  "GD-ROM" by SEGA to signify it is capable of reading special disks
-+	  with up to 1 GB of data. This drive will also read standard CD ROM
-+	  disks. Select this option to access any disks in your GD ROM drive.
-+	  Most users will want to say "Y" here.
-+	  You can also build this as a module which will be called gdrom.ko
++static int i2c_do_del_adapter(struct device_driver *d, void *data)
++{
++	struct i2c_driver *driver = to_i2c_driver(d);
++	struct i2c_adapter *adapter = data;
++	int res;
 +
- source "drivers/block/paride/Kconfig"
- 
- config BLK_CPQ_DA
-diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
-index ad00b3d..826d123 100644
---- a/drivers/block/aoe/aoeblk.c
-+++ b/drivers/block/aoe/aoeblk.c
-@@ -15,8 +15,10 @@
++	if (!driver->detach_adapter)
++		return 0;
++	res = driver->detach_adapter(adapter);
++	if (res)
++		dev_err(&adapter->dev, "detach_adapter failed (%d) "
++			"for driver [%s]\n", res, driver->driver.name);
++	return res;
++}
++
+ /**
+  * i2c_del_adapter - unregister I2C adapter
+  * @adap: the adapter being unregistered
+@@ -473,35 +556,24 @@ EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
+ int i2c_del_adapter(struct i2c_adapter *adap)
+ {
+ 	struct list_head  *item, *_n;
+-	struct i2c_adapter *adap_from_list;
+-	struct i2c_driver *driver;
+ 	struct i2c_client *client;
+ 	int res = 0;
  
- static struct kmem_cache *buf_pool_cache;
+-	mutex_lock(&core_lists);
++	mutex_lock(&core_lock);
  
--static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
-+static ssize_t aoedisk_show_state(struct device *dev,
-+				  struct device_attribute *attr, char *page)
- {
-+	struct gendisk *disk = dev_to_disk(dev);
- 	struct aoedev *d = disk->private_data;
+ 	/* First make sure that this adapter was ever added */
+-	list_for_each_entry(adap_from_list, &adapters, list) {
+-		if (adap_from_list == adap)
+-			break;
+-	}
+-	if (adap_from_list != adap) {
++	if (idr_find(&i2c_adapter_idr, adap->nr) != adap) {
+ 		pr_debug("i2c-core: attempting to delete unregistered "
+ 			 "adapter [%s]\n", adap->name);
+ 		res = -EINVAL;
+ 		goto out_unlock;
+ 	}
  
- 	return snprintf(page, PAGE_SIZE,
-@@ -26,50 +28,47 @@ static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
- 			(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
- 	/* I'd rather see nopen exported so we can ditch closewait */
- }
--static ssize_t aoedisk_show_mac(struct gendisk * disk, char *page)
-+static ssize_t aoedisk_show_mac(struct device *dev,
-+				struct device_attribute *attr, char *page)
- {
-+	struct gendisk *disk = dev_to_disk(dev);
- 	struct aoedev *d = disk->private_data;
+-	list_for_each(item,&drivers) {
+-		driver = list_entry(item, struct i2c_driver, list);
+-		if (driver->detach_adapter)
+-			if ((res = driver->detach_adapter(adap))) {
+-				dev_err(&adap->dev, "detach_adapter failed "
+-					"for driver [%s]\n",
+-					driver->driver.name);
+-				goto out_unlock;
+-			}
+-	}
++	/* Tell drivers about this removal */
++	res = bus_for_each_drv(&i2c_bus_type, NULL, adap,
++			       i2c_do_del_adapter);
++	if (res)
++		goto out_unlock;
  
- 	return snprintf(page, PAGE_SIZE, "%012llx\n",
- 			(unsigned long long)mac_addr(d->addr));
- }
--static ssize_t aoedisk_show_netif(struct gendisk * disk, char *page)
-+static ssize_t aoedisk_show_netif(struct device *dev,
-+				  struct device_attribute *attr, char *page)
- {
-+	struct gendisk *disk = dev_to_disk(dev);
- 	struct aoedev *d = disk->private_data;
+ 	/* detach any active clients. This must be done first, because
+ 	 * it can fail; in which case we give up. */
+@@ -529,7 +601,6 @@ int i2c_del_adapter(struct i2c_adapter *adap)
+ 	/* clean up the sysfs representation */
+ 	init_completion(&adap->dev_released);
+ 	device_unregister(&adap->dev);
+-	list_del(&adap->list);
  
- 	return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
- }
- /* firmware version */
--static ssize_t aoedisk_show_fwver(struct gendisk * disk, char *page)
-+static ssize_t aoedisk_show_fwver(struct device *dev,
-+				  struct device_attribute *attr, char *page)
- {
-+	struct gendisk *disk = dev_to_disk(dev);
- 	struct aoedev *d = disk->private_data;
+ 	/* wait for sysfs to drop all references */
+ 	wait_for_completion(&adap->dev_released);
+@@ -540,7 +611,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
+ 	dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
  
- 	return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
+  out_unlock:
+-	mutex_unlock(&core_lists);
++	mutex_unlock(&core_lock);
+ 	return res;
  }
+ EXPORT_SYMBOL(i2c_del_adapter);
+@@ -583,21 +654,23 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
+ 	if (res)
+ 		return res;
  
--static struct disk_attribute disk_attr_state = {
--	.attr = {.name = "state", .mode = S_IRUGO },
--	.show = aoedisk_show_state
--};
--static struct disk_attribute disk_attr_mac = {
--	.attr = {.name = "mac", .mode = S_IRUGO },
--	.show = aoedisk_show_mac
--};
--static struct disk_attribute disk_attr_netif = {
--	.attr = {.name = "netif", .mode = S_IRUGO },
--	.show = aoedisk_show_netif
--};
--static struct disk_attribute disk_attr_fwver = {
--	.attr = {.name = "firmware-version", .mode = S_IRUGO },
--	.show = aoedisk_show_fwver
-+static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
-+static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
-+static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
-+static struct device_attribute dev_attr_firmware_version = {
-+	.attr = { .name = "firmware-version", .mode = S_IRUGO, .owner = THIS_MODULE },
-+	.show = aoedisk_show_fwver,
- };
+-	mutex_lock(&core_lists);
++	mutex_lock(&core_lock);
  
- static struct attribute *aoe_attrs[] = {
--	&disk_attr_state.attr,
--	&disk_attr_mac.attr,
--	&disk_attr_netif.attr,
--	&disk_attr_fwver.attr,
--	NULL
-+	&dev_attr_state.attr,
-+	&dev_attr_mac.attr,
-+	&dev_attr_netif.attr,
-+	&dev_attr_firmware_version.attr,
-+	NULL,
- };
+-	list_add_tail(&driver->list,&drivers);
+ 	pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
  
- static const struct attribute_group attr_group = {
-@@ -79,12 +78,12 @@ static const struct attribute_group attr_group = {
- static int
- aoedisk_add_sysfs(struct aoedev *d)
- {
--	return sysfs_create_group(&d->gd->kobj, &attr_group);
-+	return sysfs_create_group(&d->gd->dev.kobj, &attr_group);
- }
- void
- aoedisk_rm_sysfs(struct aoedev *d)
- {
--	sysfs_remove_group(&d->gd->kobj, &attr_group);
-+	sysfs_remove_group(&d->gd->dev.kobj, &attr_group);
- }
+ 	/* legacy drivers scan i2c busses directly */
+ 	if (driver->attach_adapter) {
+ 		struct i2c_adapter *adapter;
  
- static int
-diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
-index 39e563e..d5480e3 100644
---- a/drivers/block/aoe/aoechr.c
-+++ b/drivers/block/aoe/aoechr.c
-@@ -259,9 +259,8 @@ aoechr_init(void)
- 		return PTR_ERR(aoe_class);
+-		list_for_each_entry(adapter, &adapters, list) {
++		down(&i2c_adapter_class.sem);
++		list_for_each_entry(adapter, &i2c_adapter_class.devices,
++				    dev.node) {
+ 			driver->attach_adapter(adapter);
+ 		}
++		up(&i2c_adapter_class.sem);
  	}
- 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
--		class_device_create(aoe_class, NULL,
--					MKDEV(AOE_MAJOR, chardevs[i].minor),
--					NULL, chardevs[i].name);
-+		device_create(aoe_class, NULL,
-+			      MKDEV(AOE_MAJOR, chardevs[i].minor), chardevs[i].name);
  
+-	mutex_unlock(&core_lists);
++	mutex_unlock(&core_lock);
  	return 0;
  }
-@@ -272,7 +271,7 @@ aoechr_exit(void)
- 	int i;
+ EXPORT_SYMBOL(i2c_register_driver);
+@@ -609,11 +682,11 @@ EXPORT_SYMBOL(i2c_register_driver);
+  */
+ void i2c_del_driver(struct i2c_driver *driver)
+ {
+-	struct list_head   *item1, *item2, *_n;
++	struct list_head   *item2, *_n;
+ 	struct i2c_client  *client;
+ 	struct i2c_adapter *adap;
  
- 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
--		class_device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
-+		device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
- 	class_destroy(aoe_class);
- 	unregister_chrdev(AOE_MAJOR, "aoechr");
- }
-diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
-index 509b649..855ce8e 100644
---- a/drivers/block/cciss.c
-+++ b/drivers/block/cciss.c
-@@ -1187,17 +1187,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
- 	}
- }
+-	mutex_lock(&core_lists);
++	mutex_lock(&core_lock);
  
--static inline void complete_buffers(struct bio *bio, int status)
--{
--	while (bio) {
--		struct bio *xbh = bio->bi_next;
--
--		bio->bi_next = NULL;
--		bio_endio(bio, status ? 0 : -EIO);
--		bio = xbh;
--	}
--}
--
- static void cciss_check_queues(ctlr_info_t *h)
- {
- 	int start_queue = h->next_to_run;
-@@ -1263,21 +1252,14 @@ static void cciss_softirq_done(struct request *rq)
- 		pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
+ 	/* new-style driver? */
+ 	if (is_newstyle_driver(driver))
+@@ -623,8 +696,8 @@ void i2c_del_driver(struct i2c_driver *driver)
+ 	 * attached. If so, detach them to be able to kill the driver
+ 	 * afterwards.
+ 	 */
+-	list_for_each(item1,&adapters) {
+-		adap = list_entry(item1, struct i2c_adapter, list);
++	down(&i2c_adapter_class.sem);
++	list_for_each_entry(adap, &i2c_adapter_class.devices, dev.node) {
+ 		if (driver->detach_adapter) {
+ 			if (driver->detach_adapter(adap)) {
+ 				dev_err(&adap->dev, "detach_adapter failed "
+@@ -648,40 +721,31 @@ void i2c_del_driver(struct i2c_driver *driver)
+ 			}
+ 		}
  	}
++	up(&i2c_adapter_class.sem);
  
--	complete_buffers(rq->bio, (rq->errors == 0));
--
--	if (blk_fs_request(rq)) {
--		const int rw = rq_data_dir(rq);
--
--		disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
--	}
--
- #ifdef CCISS_DEBUG
- 	printk("Done with %p\n", rq);
- #endif				/* CCISS_DEBUG */
+  unregister:
+ 	driver_unregister(&driver->driver);
+-	list_del(&driver->list);
+ 	pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name);
  
--	add_disk_randomness(rq->rq_disk);
-+	if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
-+		BUG();
-+
- 	spin_lock_irqsave(&h->lock, flags);
--	end_that_request_last(rq, (rq->errors == 0));
- 	cmd_free(h, cmd, 1);
- 	cciss_check_queues(h);
- 	spin_unlock_irqrestore(&h->lock, flags);
-@@ -2542,9 +2524,7 @@ after_error_processing:
- 		resend_cciss_cmd(h, cmd);
- 		return;
- 	}
--	cmd->rq->data_len = 0;
- 	cmd->rq->completion_data = cmd;
--	blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
- 	blk_complete_request(cmd->rq);
+-	mutex_unlock(&core_lists);
++	mutex_unlock(&core_lock);
  }
+ EXPORT_SYMBOL(i2c_del_driver);
  
-diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
-index c8132d9..6919918 100644
---- a/drivers/block/cpqarray.c
-+++ b/drivers/block/cpqarray.c
-@@ -167,7 +167,6 @@ static void start_io(ctlr_info_t *h);
+ /* ------------------------------------------------------------------------- */
  
- static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
- static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
--static inline void complete_buffers(struct bio *bio, int ok);
- static inline void complete_command(cmdlist_t *cmd, int timeout);
+-static int __i2c_check_addr(struct i2c_adapter *adapter, unsigned int addr)
++static int __i2c_check_addr(struct device *dev, void *addrp)
+ {
+-	struct list_head   *item;
+-	struct i2c_client  *client;
++	struct i2c_client	*client = i2c_verify_client(dev);
++	int			addr = *(int *)addrp;
  
- static irqreturn_t do_ida_intr(int irq, void *dev_id);
-@@ -980,26 +979,13 @@ static void start_io(ctlr_info_t *h)
- 	}
+-	list_for_each(item,&adapter->clients) {
+-		client = list_entry(item, struct i2c_client, list);
+-		if (client->addr == addr)
+-			return -EBUSY;
+-	}
++	if (client && client->addr == addr)
++		return -EBUSY;
+ 	return 0;
  }
  
--static inline void complete_buffers(struct bio *bio, int ok)
--{
--	struct bio *xbh;
+ static int i2c_check_addr(struct i2c_adapter *adapter, int addr)
+ {
+-	int rval;
 -
--	while (bio) {
--		xbh = bio->bi_next;
--		bio->bi_next = NULL;
--		
--		bio_endio(bio, ok ? 0 : -EIO);
+-	mutex_lock(&adapter->clist_lock);
+-	rval = __i2c_check_addr(adapter, addr);
+-	mutex_unlock(&adapter->clist_lock);
 -
--		bio = xbh;
--	}
--}
- /*
-  * Mark all buffers that cmd was responsible for
-  */
- static inline void complete_command(cmdlist_t *cmd, int timeout)
- {
- 	struct request *rq = cmd->rq;
--	int ok=1;
-+	int error = 0;
- 	int i, ddir;
+-	return rval;
++	return device_for_each_child(&adapter->dev, &addr, __i2c_check_addr);
+ }
  
- 	if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
-@@ -1011,16 +997,17 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
- 	if (cmd->req.hdr.rcode & RCODE_FATAL) {
- 		printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
- 				cmd->ctlr, cmd->hdr.unit);
--		ok = 0;
-+		error = -EIO;
- 	}
- 	if (cmd->req.hdr.rcode & RCODE_INVREQ) {
- 				printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
- 				cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
- 				cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
- 				cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
--		ok = 0;	
-+		error = -EIO;
- 	}
--	if (timeout) ok = 0;
-+	if (timeout)
-+		error = -EIO;
- 	/* unmap the DMA mapping for all the scatter gather elements */
- 	if (cmd->req.hdr.cmd == IDA_READ)
- 		ddir = PCI_DMA_FROMDEVICE;
-@@ -1030,18 +1017,9 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
-                 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
- 				cmd->req.sg[i].size, ddir);
+ int i2c_attach_client(struct i2c_client *client)
+@@ -689,15 +753,6 @@ int i2c_attach_client(struct i2c_client *client)
+ 	struct i2c_adapter *adapter = client->adapter;
+ 	int res = 0;
  
--	complete_buffers(rq->bio, ok);
--
--	if (blk_fs_request(rq)) {
--		const int rw = rq_data_dir(rq);
--
--		disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
+-	mutex_lock(&adapter->clist_lock);
+-	if (__i2c_check_addr(client->adapter, client->addr)) {
+-		res = -EBUSY;
+-		goto out_unlock;
 -	}
+-	list_add_tail(&client->list,&adapter->clients);
 -
--	add_disk_randomness(rq->rq_disk);
+-	client->usage_count = 0;
 -
- 	DBGPX(printk("Done with %p\n", rq););
--	end_that_request_last(rq, ok ? 1 : -EIO);
-+	if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
-+		BUG();
- }
- 
- /*
-diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
-index 639ed14..32c79a5 100644
---- a/drivers/block/floppy.c
-+++ b/drivers/block/floppy.c
-@@ -2287,21 +2287,19 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
-  * =============================
-  */
+ 	client->dev.parent = &client->adapter->dev;
+ 	client->dev.bus = &i2c_bus_type;
  
--static void floppy_end_request(struct request *req, int uptodate)
-+static void floppy_end_request(struct request *req, int error)
- {
- 	unsigned int nr_sectors = current_count_sectors;
-+	unsigned int drive = (unsigned long)req->rq_disk->private_data;
+@@ -712,13 +767,17 @@ int i2c_attach_client(struct i2c_client *client)
  
- 	/* current_count_sectors can be zero if transfer failed */
--	if (!uptodate)
-+	if (error)
- 		nr_sectors = req->current_nr_sectors;
--	if (end_that_request_first(req, uptodate, nr_sectors))
-+	if (__blk_end_request(req, error, nr_sectors << 9))
- 		return;
--	add_disk_randomness(req->rq_disk);
--	floppy_off((long)req->rq_disk->private_data);
--	blkdev_dequeue_request(req);
--	end_that_request_last(req, uptodate);
+ 	snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id),
+ 		"%d-%04x", i2c_adapter_id(adapter), client->addr);
+-	dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
+-		client->name, client->dev.bus_id);
+ 	res = device_register(&client->dev);
+ 	if (res)
+-		goto out_list;
++		goto out_err;
++
++	mutex_lock(&adapter->clist_lock);
++	list_add_tail(&client->list, &adapter->clients);
+ 	mutex_unlock(&adapter->clist_lock);
  
- 	/* We're done with the request */
-+	floppy_off(drive);
- 	current_req = NULL;
- }
++	dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
++		client->name, client->dev.bus_id);
++
+ 	if (adapter->client_register)  {
+ 		if (adapter->client_register(client)) {
+ 			dev_dbg(&adapter->dev, "client_register "
+@@ -729,12 +788,9 @@ int i2c_attach_client(struct i2c_client *client)
  
-@@ -2332,7 +2330,7 @@ static void request_done(int uptodate)
+ 	return 0;
  
- 		/* unlock chained buffers */
- 		spin_lock_irqsave(q->queue_lock, flags);
--		floppy_end_request(req, 1);
-+		floppy_end_request(req, 0);
- 		spin_unlock_irqrestore(q->queue_lock, flags);
- 	} else {
- 		if (rq_data_dir(req) == WRITE) {
-@@ -2346,7 +2344,7 @@ static void request_done(int uptodate)
- 			DRWE->last_error_generation = DRS->generation;
- 		}
- 		spin_lock_irqsave(q->queue_lock, flags);
--		floppy_end_request(req, 0);
-+		floppy_end_request(req, -EIO);
- 		spin_unlock_irqrestore(q->queue_lock, flags);
- 	}
+-out_list:
+-	list_del(&client->list);
++out_err:
+ 	dev_err(&adapter->dev, "Failed to attach i2c client %s at 0x%02x "
+ 		"(%d)\n", client->name, client->addr, res);
+-out_unlock:
+-	mutex_unlock(&adapter->clist_lock);
+ 	return res;
  }
-diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
-index b4c0888..ae31060 100644
---- a/drivers/block/nbd.c
-+++ b/drivers/block/nbd.c
-@@ -100,17 +100,15 @@ static const char *nbdcmd_to_ascii(int cmd)
- 
- static void nbd_end_request(struct request *req)
- {
--	int uptodate = (req->errors == 0) ? 1 : 0;
-+	int error = req->errors ? -EIO : 0;
- 	struct request_queue *q = req->q;
- 	unsigned long flags;
- 
- 	dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
--			req, uptodate? "done": "failed");
-+			req, error ? "failed" : "done");
+ EXPORT_SYMBOL(i2c_attach_client);
+@@ -744,12 +800,6 @@ int i2c_detach_client(struct i2c_client *client)
+ 	struct i2c_adapter *adapter = client->adapter;
+ 	int res = 0;
  
- 	spin_lock_irqsave(q->queue_lock, flags);
--	if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
--		end_that_request_last(req, uptodate);
+-	if (client->usage_count > 0) {
+-		dev_warn(&client->dev, "Client [%s] still busy, "
+-			 "can't detach\n", client->name);
+-		return -EBUSY;
 -	}
-+	__blk_end_request(req, error, req->nr_sectors << 9);
- 	spin_unlock_irqrestore(q->queue_lock, flags);
- }
- 
-@@ -375,14 +373,17 @@ harderror:
- 	return NULL;
- }
+-
+ 	if (adapter->client_unregister)  {
+ 		res = adapter->client_unregister(client);
+ 		if (res) {
+@@ -762,9 +812,10 @@ int i2c_detach_client(struct i2c_client *client)
  
--static ssize_t pid_show(struct gendisk *disk, char *page)
-+static ssize_t pid_show(struct device *dev,
-+			struct device_attribute *attr, char *buf)
- {
--	return sprintf(page, "%ld\n",
-+	struct gendisk *disk = dev_to_disk(dev);
+ 	mutex_lock(&adapter->clist_lock);
+ 	list_del(&client->list);
++	mutex_unlock(&adapter->clist_lock);
 +
-+	return sprintf(buf, "%ld\n",
- 		(long) ((struct nbd_device *)disk->private_data)->pid);
- }
- 
--static struct disk_attribute pid_attr = {
--	.attr = { .name = "pid", .mode = S_IRUGO },
-+static struct device_attribute pid_attr = {
-+	.attr = { .name = "pid", .mode = S_IRUGO, .owner = THIS_MODULE },
- 	.show = pid_show,
- };
- 
-@@ -394,7 +395,7 @@ static int nbd_do_it(struct nbd_device *lo)
- 	BUG_ON(lo->magic != LO_MAGIC);
- 
- 	lo->pid = current->pid;
--	ret = sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
-+	ret = sysfs_create_file(&lo->disk->dev.kobj, &pid_attr.attr);
- 	if (ret) {
- 		printk(KERN_ERR "nbd: sysfs_create_file failed!");
- 		return ret;
-@@ -403,7 +404,7 @@ static int nbd_do_it(struct nbd_device *lo)
- 	while ((req = nbd_read_stat(lo)) != NULL)
- 		nbd_end_request(req);
+ 	init_completion(&client->released);
+ 	device_unregister(&client->dev);
+-	mutex_unlock(&adapter->clist_lock);
+ 	wait_for_completion(&client->released);
  
--	sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
-+	sysfs_remove_file(&lo->disk->dev.kobj, &pid_attr.attr);
- 	return 0;
+  out:
+@@ -772,72 +823,58 @@ int i2c_detach_client(struct i2c_client *client)
  }
+ EXPORT_SYMBOL(i2c_detach_client);
  
-diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
-index d89e7d3..ab86e23 100644
---- a/drivers/block/paride/pg.c
-+++ b/drivers/block/paride/pg.c
-@@ -676,8 +676,8 @@ static int __init pg_init(void)
- 	for (unit = 0; unit < PG_UNITS; unit++) {
- 		struct pg *dev = &devices[unit];
- 		if (dev->present)
--			class_device_create(pg_class, NULL, MKDEV(major, unit),
--					NULL, "pg%u", unit);
-+			device_create(pg_class, NULL, MKDEV(major, unit),
-+				      "pg%u", unit);
- 	}
- 	err = 0;
- 	goto out;
-@@ -695,7 +695,7 @@ static void __exit pg_exit(void)
- 	for (unit = 0; unit < PG_UNITS; unit++) {
- 		struct pg *dev = &devices[unit];
- 		if (dev->present)
--			class_device_destroy(pg_class, MKDEV(major, unit));
-+			device_destroy(pg_class, MKDEV(major, unit));
- 	}
- 	class_destroy(pg_class);
- 	unregister_chrdev(major, name);
-diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
-index b91accf..76096ca 100644
---- a/drivers/block/paride/pt.c
-+++ b/drivers/block/paride/pt.c
-@@ -972,10 +972,10 @@ static int __init pt_init(void)
- 
- 	for (unit = 0; unit < PT_UNITS; unit++)
- 		if (pt[unit].present) {
--			class_device_create(pt_class, NULL, MKDEV(major, unit),
--					NULL, "pt%d", unit);
--			class_device_create(pt_class, NULL, MKDEV(major, unit + 128),
--					NULL, "pt%dn", unit);
-+			device_create(pt_class, NULL, MKDEV(major, unit),
-+				      "pt%d", unit);
-+			device_create(pt_class, NULL, MKDEV(major, unit + 128),
-+				      "pt%dn", unit);
- 		}
- 	goto out;
- 
-@@ -990,8 +990,8 @@ static void __exit pt_exit(void)
- 	int unit;
- 	for (unit = 0; unit < PT_UNITS; unit++)
- 		if (pt[unit].present) {
--			class_device_destroy(pt_class, MKDEV(major, unit));
--			class_device_destroy(pt_class, MKDEV(major, unit + 128));
-+			device_destroy(pt_class, MKDEV(major, unit));
-+			device_destroy(pt_class, MKDEV(major, unit + 128));
- 		}
- 	class_destroy(pt_class);
- 	unregister_chrdev(major, name);
-diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
-index 3535ef8..e9de171 100644
---- a/drivers/block/pktcdvd.c
-+++ b/drivers/block/pktcdvd.c
-@@ -110,17 +110,18 @@ static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
- 					struct kobj_type* ktype)
- {
- 	struct pktcdvd_kobj *p;
-+	int error;
-+
- 	p = kzalloc(sizeof(*p), GFP_KERNEL);
- 	if (!p)
- 		return NULL;
--	kobject_set_name(&p->kobj, "%s", name);
--	p->kobj.parent = parent;
--	p->kobj.ktype = ktype;
- 	p->pd = pd;
--	if (kobject_register(&p->kobj) != 0) {
-+	error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
-+	if (error) {
- 		kobject_put(&p->kobj);
- 		return NULL;
- 	}
-+	kobject_uevent(&p->kobj, KOBJ_ADD);
- 	return p;
- }
- /*
-@@ -129,7 +130,7 @@ static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
- static void pkt_kobj_remove(struct pktcdvd_kobj *p)
- {
- 	if (p)
--		kobject_unregister(&p->kobj);
-+		kobject_put(&p->kobj);
- }
- /*
-  * default release function for pktcdvd kernel objects.
-@@ -301,18 +302,16 @@ static struct kobj_type kobj_pkt_type_wqueue = {
- static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
+-static int i2c_inc_use_client(struct i2c_client *client)
++/**
++ * i2c_use_client - increments the reference count of the i2c client structure
++ * @client: the client being referenced
++ *
++ * Each live reference to a client should be refcounted. The driver model does
++ * that automatically as part of driver binding, so that most drivers don't
++ * need to do this explicitly: they hold a reference until they're unbound
++ * from the device.
++ *
++ * A pointer to the client with the incremented reference counter is returned.
++ */
++struct i2c_client *i2c_use_client(struct i2c_client *client)
  {
- 	if (class_pktcdvd) {
--		pd->clsdev = class_device_create(class_pktcdvd,
--					NULL, pd->pkt_dev,
--					NULL, "%s", pd->name);
--		if (IS_ERR(pd->clsdev))
--			pd->clsdev = NULL;
-+		pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, "%s", pd->name);
-+		if (IS_ERR(pd->dev))
-+			pd->dev = NULL;
- 	}
--	if (pd->clsdev) {
-+	if (pd->dev) {
- 		pd->kobj_stat = pkt_kobj_create(pd, "stat",
--					&pd->clsdev->kobj,
-+					&pd->dev->kobj,
- 					&kobj_pkt_type_stat);
- 		pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
--					&pd->clsdev->kobj,
-+					&pd->dev->kobj,
- 					&kobj_pkt_type_wqueue);
- 	}
- }
-@@ -322,7 +321,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
- 	pkt_kobj_remove(pd->kobj_stat);
- 	pkt_kobj_remove(pd->kobj_wqueue);
- 	if (class_pktcdvd)
--		class_device_destroy(class_pktcdvd, pd->pkt_dev);
-+		device_destroy(class_pktcdvd, pd->pkt_dev);
- }
- 
- 
-diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
-index e354bfc..7483f94 100644
---- a/drivers/block/ps3disk.c
-+++ b/drivers/block/ps3disk.c
-@@ -229,7 +229,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
- 	struct ps3_storage_device *dev = data;
- 	struct ps3disk_private *priv;
- 	struct request *req;
--	int res, read, uptodate;
-+	int res, read, error;
- 	u64 tag, status;
- 	unsigned long num_sectors;
- 	const char *op;
-@@ -270,21 +270,17 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
- 	if (status) {
- 		dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
- 			__LINE__, op, status);
--		uptodate = 0;
-+		error = -EIO;
- 	} else {
- 		dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
- 			__LINE__, op);
--		uptodate = 1;
-+		error = 0;
- 		if (read)
- 			ps3disk_scatter_gather(dev, req, 0);
- 	}
- 
- 	spin_lock(&priv->lock);
--	if (!end_that_request_first(req, uptodate, num_sectors)) {
--		add_disk_randomness(req->rq_disk);
--		blkdev_dequeue_request(req);
--		end_that_request_last(req, uptodate);
+-
+-	if (!try_module_get(client->driver->driver.owner))
+-		return -ENODEV;
+-	if (!try_module_get(client->adapter->owner)) {
+-		module_put(client->driver->driver.owner);
+-		return -ENODEV;
 -	}
-+	__blk_end_request(req, error, num_sectors << 9);
- 	priv->req = NULL;
- 	ps3disk_do_request(dev, priv->queue);
- 	spin_unlock(&priv->lock);
-diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
-index fac4c6c..66e3015 100644
---- a/drivers/block/sunvdc.c
-+++ b/drivers/block/sunvdc.c
-@@ -212,12 +212,9 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
- 	vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
+-
+-	return 0;
++	get_device(&client->dev);
++	return client;
  }
++EXPORT_SYMBOL(i2c_use_client);
  
--static void vdc_end_request(struct request *req, int uptodate, int num_sectors)
-+static void vdc_end_request(struct request *req, int error, int num_sectors)
+-static void i2c_dec_use_client(struct i2c_client *client)
++/**
++ * i2c_release_client - release a use of the i2c client structure
++ * @client: the client being no longer referenced
++ *
++ * Must be called when a user of a client is finished with it.
++ */
++void i2c_release_client(struct i2c_client *client)
  {
--	if (end_that_request_first(req, uptodate, num_sectors))
--		return;
--	add_disk_randomness(req->rq_disk);
--	end_that_request_last(req, uptodate);
-+	__blk_end_request(req, error, num_sectors << 9);
+-	module_put(client->driver->driver.owner);
+-	module_put(client->adapter->owner);
++	put_device(&client->dev);
  }
++EXPORT_SYMBOL(i2c_release_client);
  
- static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
-@@ -242,7 +239,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
+-int i2c_use_client(struct i2c_client *client)
+-{
+-	int ret;
+-
+-	ret = i2c_inc_use_client(client);
+-	if (ret)
+-		return ret;
+-
+-	client->usage_count++;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL(i2c_use_client);
++struct i2c_cmd_arg {
++	unsigned	cmd;
++	void		*arg;
++};
  
- 	rqe->req = NULL;
+-int i2c_release_client(struct i2c_client *client)
++static int i2c_cmd(struct device *dev, void *_arg)
+ {
+-	if (!client->usage_count) {
+-		pr_debug("i2c-core: %s used one too many times\n",
+-			 __FUNCTION__);
+-		return -EPERM;
+-	}
+-
+-	client->usage_count--;
+-	i2c_dec_use_client(client);
++	struct i2c_client	*client = i2c_verify_client(dev);
++	struct i2c_cmd_arg	*arg = _arg;
  
--	vdc_end_request(req, !desc->status, desc->size >> 9);
-+	vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
++	if (client && client->driver && client->driver->command)
++		client->driver->command(client, arg->cmd, arg->arg);
+ 	return 0;
+ }
+-EXPORT_SYMBOL(i2c_release_client);
  
- 	if (blk_queue_stopped(port->disk->queue))
- 		blk_start_queue(port->disk->queue);
-@@ -456,7 +453,7 @@ static void do_vdc_request(struct request_queue *q)
+ void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg)
+ {
+-	struct list_head  *item;
+-	struct i2c_client *client;
++	struct i2c_cmd_arg	cmd_arg;
  
- 		blkdev_dequeue_request(req);
- 		if (__send_request(req) < 0)
--			vdc_end_request(req, 0, req->hard_nr_sectors);
-+			vdc_end_request(req, -EIO, req->hard_nr_sectors);
- 	}
+-	mutex_lock(&adap->clist_lock);
+-	list_for_each(item,&adap->clients) {
+-		client = list_entry(item, struct i2c_client, list);
+-		if (!try_module_get(client->driver->driver.owner))
+-			continue;
+-		if (NULL != client->driver->command) {
+-			mutex_unlock(&adap->clist_lock);
+-			client->driver->command(client,cmd,arg);
+-			mutex_lock(&adap->clist_lock);
+-		}
+-		module_put(client->driver->driver.owner);
+-       }
+-       mutex_unlock(&adap->clist_lock);
++	cmd_arg.cmd = cmd;
++	cmd_arg.arg = arg;
++	device_for_each_child(&adap->dev, &cmd_arg, i2c_cmd);
  }
+ EXPORT_SYMBOL(i2c_clients_command);
  
-diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
-index 52dc5e1..cd5674b 100644
---- a/drivers/block/sx8.c
-+++ b/drivers/block/sx8.c
-@@ -744,16 +744,14 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
+@@ -848,11 +885,24 @@ static int __init i2c_init(void)
+ 	retval = bus_register(&i2c_bus_type);
+ 	if (retval)
+ 		return retval;
+-	return class_register(&i2c_adapter_class);
++	retval = class_register(&i2c_adapter_class);
++	if (retval)
++		goto bus_err;
++	retval = i2c_add_driver(&dummy_driver);
++	if (retval)
++		goto class_err;
++	return 0;
++
++class_err:
++	class_unregister(&i2c_adapter_class);
++bus_err:
++	bus_unregister(&i2c_bus_type);
++	return retval;
+ }
  
- static inline void carm_end_request_queued(struct carm_host *host,
- 					   struct carm_request *crq,
--					   int uptodate)
-+					   int error)
+ static void __exit i2c_exit(void)
  {
- 	struct request *req = crq->rq;
- 	int rc;
++	i2c_del_driver(&dummy_driver);
+ 	class_unregister(&i2c_adapter_class);
+ 	bus_unregister(&i2c_bus_type);
+ }
+@@ -879,7 +929,15 @@ int i2c_transfer(struct i2c_adapter * adap, struct i2c_msg *msgs, int num)
+ 		}
+ #endif
  
--	rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
-+	rc = __blk_end_request(req, error, blk_rq_bytes(req));
- 	assert(rc == 0);
+-		mutex_lock_nested(&adap->bus_lock, adap->level);
++		if (in_atomic() || irqs_disabled()) {
++			ret = mutex_trylock(&adap->bus_lock);
++			if (!ret)
++				/* I2C activity is ongoing. */
++				return -EAGAIN;
++		} else {
++			mutex_lock_nested(&adap->bus_lock, adap->level);
++		}
++
+ 		ret = adap->algo->master_xfer(adap,msgs,num);
+ 		mutex_unlock(&adap->bus_lock);
  
--	end_that_request_last(req, uptodate);
--
- 	rc = carm_put_request(host, crq);
- 	assert(rc == 0);
- }
-@@ -793,9 +791,9 @@ static inline void carm_round_robin(struct carm_host *host)
+@@ -978,7 +1036,7 @@ static int i2c_probe_address(struct i2c_adapter *adapter, int addr, int kind,
  }
  
- static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
--			int is_ok)
-+			       int error)
+ int i2c_probe(struct i2c_adapter *adapter,
+-	      struct i2c_client_address_data *address_data,
++	      const struct i2c_client_address_data *address_data,
+ 	      int (*found_proc) (struct i2c_adapter *, int, int))
  {
--	carm_end_request_queued(host, crq, is_ok);
-+	carm_end_request_queued(host, crq, error);
- 	if (max_queue == 1)
- 		carm_round_robin(host);
- 	else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
-@@ -873,14 +871,14 @@ queue_one_request:
- 	sg = &crq->sg[0];
- 	n_elem = blk_rq_map_sg(q, rq, sg);
- 	if (n_elem <= 0) {
--		carm_end_rq(host, crq, 0);
-+		carm_end_rq(host, crq, -EIO);
- 		return;		/* request with no s/g entries? */
- 	}
+ 	int i, err;
+@@ -987,7 +1045,7 @@ int i2c_probe(struct i2c_adapter *adapter,
+ 	/* Force entries are done first, and are not affected by ignore
+ 	   entries */
+ 	if (address_data->forces) {
+-		unsigned short **forces = address_data->forces;
++		const unsigned short * const *forces = address_data->forces;
+ 		int kind;
  
- 	/* map scatterlist to PCI bus addresses */
- 	n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
- 	if (n_elem <= 0) {
--		carm_end_rq(host, crq, 0);
-+		carm_end_rq(host, crq, -EIO);
- 		return;		/* request with no s/g entries? */
+ 		for (kind = 0; forces[kind]; kind++) {
+@@ -1085,7 +1143,6 @@ i2c_new_probed_device(struct i2c_adapter *adap,
+ 		return NULL;
  	}
- 	crq->n_elem = n_elem;
-@@ -941,7 +939,7 @@ queue_one_request:
- 
- static void carm_handle_array_info(struct carm_host *host,
- 				   struct carm_request *crq, u8 *mem,
--				   int is_ok)
-+				   int error)
- {
- 	struct carm_port *port;
- 	u8 *msg_data = mem + sizeof(struct carm_array_info);
-@@ -952,9 +950,9 @@ static void carm_handle_array_info(struct carm_host *host,
- 
- 	DPRINTK("ENTER\n");
  
--	carm_end_rq(host, crq, is_ok);
-+	carm_end_rq(host, crq, error);
+-	mutex_lock(&adap->clist_lock);
+ 	for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
+ 		/* Check address validity */
+ 		if (addr_list[i] < 0x03 || addr_list[i] > 0x77) {
+@@ -1095,7 +1152,7 @@ i2c_new_probed_device(struct i2c_adapter *adap,
+ 		}
  
--	if (!is_ok)
-+	if (error)
- 		goto out;
- 	if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
- 		goto out;
-@@ -1001,7 +999,7 @@ out:
+ 		/* Check address availability */
+-		if (__i2c_check_addr(adap, addr_list[i])) {
++		if (i2c_check_addr(adap, addr_list[i])) {
+ 			dev_dbg(&adap->dev, "Address 0x%02x already in "
+ 				"use, not probing\n", addr_list[i]);
+ 			continue;
+@@ -1123,7 +1180,6 @@ i2c_new_probed_device(struct i2c_adapter *adap,
+ 				break;
+ 		}
+ 	}
+-	mutex_unlock(&adap->clist_lock);
  
- static void carm_handle_scan_chan(struct carm_host *host,
- 				  struct carm_request *crq, u8 *mem,
--				  int is_ok)
-+				  int error)
+ 	if (addr_list[i] == I2C_CLIENT_END) {
+ 		dev_dbg(&adap->dev, "Probing failed, no device found\n");
+@@ -1139,12 +1195,12 @@ struct i2c_adapter* i2c_get_adapter(int id)
  {
- 	u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
- 	unsigned int i, dev_count = 0;
-@@ -1009,9 +1007,9 @@ static void carm_handle_scan_chan(struct carm_host *host,
- 
- 	DPRINTK("ENTER\n");
+ 	struct i2c_adapter *adapter;
  
--	carm_end_rq(host, crq, is_ok);
-+	carm_end_rq(host, crq, error);
+-	mutex_lock(&core_lists);
++	mutex_lock(&core_lock);
+ 	adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id);
+ 	if (adapter && !try_module_get(adapter->owner))
+ 		adapter = NULL;
  
--	if (!is_ok) {
-+	if (error) {
- 		new_state = HST_ERROR;
- 		goto out;
- 	}
-@@ -1033,23 +1031,23 @@ out:
+-	mutex_unlock(&core_lists);
++	mutex_unlock(&core_lock);
+ 	return adapter;
+ }
+ EXPORT_SYMBOL(i2c_get_adapter);
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index df540d5..393e679 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -182,27 +182,22 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c
+ 	return ret;
  }
  
- static void carm_handle_generic(struct carm_host *host,
--				struct carm_request *crq, int is_ok,
-+				struct carm_request *crq, int error,
- 				int cur_state, int next_state)
++static int i2cdev_check(struct device *dev, void *addrp)
++{
++	struct i2c_client *client = i2c_verify_client(dev);
++
++	if (!client || client->addr != *(unsigned int *)addrp)
++		return 0;
++
++	return dev->driver ? -EBUSY : 0;
++}
++
+ /* This address checking function differs from the one in i2c-core
+    in that it considers an address with a registered device, but no
+-   bound driver, as NOT busy. */
++   driver bound to it, as NOT busy. */
+ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
  {
- 	DPRINTK("ENTER\n");
- 
--	carm_end_rq(host, crq, is_ok);
-+	carm_end_rq(host, crq, error);
- 
- 	assert(host->state == cur_state);
--	if (is_ok)
--		host->state = next_state;
--	else
-+	if (error)
- 		host->state = HST_ERROR;
-+	else
-+		host->state = next_state;
- 	schedule_work(&host->fsm_task);
+-	struct list_head *item;
+-	struct i2c_client *client;
+-	int res = 0;
+-
+-	mutex_lock(&adapter->clist_lock);
+-	list_for_each(item, &adapter->clients) {
+-		client = list_entry(item, struct i2c_client, list);
+-		if (client->addr == addr) {
+-			if (client->driver)
+-				res = -EBUSY;
+-			break;
+-		}
+-	}
+-	mutex_unlock(&adapter->clist_lock);
+-
+-	return res;
++	return device_for_each_child(&adapter->dev, &addr, i2cdev_check);
  }
  
- static inline void carm_handle_rw(struct carm_host *host,
--				  struct carm_request *crq, int is_ok)
-+				  struct carm_request *crq, int error)
- {
- 	int pci_dir;
+ static int i2cdev_ioctl(struct inode *inode, struct file *file,
+diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
+index fb06555..64df55e 100644
+--- a/drivers/ide/Kconfig
++++ b/drivers/ide/Kconfig
+@@ -325,7 +325,7 @@ config BLK_DEV_PLATFORM
+ 	  If unsure, say N.
  
-@@ -1062,7 +1060,7 @@ static inline void carm_handle_rw(struct carm_host *host,
+ config BLK_DEV_CMD640
+-	bool "CMD640 chipset bugfix/support"
++	tristate "CMD640 chipset bugfix/support"
+ 	depends on X86
+ 	---help---
+ 	  The CMD-Technologies CMD640 IDE chip is used on many common 486 and
+@@ -359,9 +359,8 @@ config BLK_DEV_CMD640_ENHANCED
+ 	  Otherwise say N.
  
- 	pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
+ config BLK_DEV_IDEPNP
+-	bool "PNP EIDE support"
++	tristate "PNP EIDE support"
+ 	depends on PNP
+-	select IDE_GENERIC
+ 	help
+ 	  If you have a PnP (Plug and Play) compatible EIDE card and
+ 	  would like the kernel to automatically detect and activate
+@@ -374,19 +373,20 @@ comment "PCI IDE chipsets support"
+ config BLK_DEV_IDEPCI
+ 	bool
  
--	carm_end_rq(host, crq, is_ok);
-+	carm_end_rq(host, crq, error);
- }
+-config IDEPCI_SHARE_IRQ
+-	bool "Sharing PCI IDE interrupts support"
+-	depends on BLK_DEV_IDEPCI
++config IDEPCI_PCIBUS_ORDER
++	bool "Probe IDE PCI devices in the PCI bus order (DEPRECATED)"
++	depends on BLK_DEV_IDE=y && BLK_DEV_IDEPCI
++	default y
+ 	help
+-	  Some ATA/IDE chipsets have hardware support which allows for
+-	  sharing a single IRQ with other cards. To enable support for
+-	  this in the ATA/IDE driver, say Y here.
++	  Probe IDE PCI devices in the order in which they appear on the
++	  PCI bus (i.e. 00:1f.1 PCI device before 02:01.0 PCI device)
++	  instead of the order in which IDE PCI host drivers are loaded.
  
- static inline void carm_handle_resp(struct carm_host *host,
-@@ -1071,7 +1069,7 @@ static inline void carm_handle_resp(struct carm_host *host,
- 	u32 handle = le32_to_cpu(ret_handle_le);
- 	unsigned int msg_idx;
- 	struct carm_request *crq;
--	int is_ok = (status == RMSG_OK);
-+	int error = (status == RMSG_OK) ? 0 : -EIO;
- 	u8 *mem;
+-	  It is safe to say Y to this question, in most cases.
+-	  If unsure, say N.
++	  Please note that this method of assuring stable naming of
++	  IDE devices is unreliable and use other means for achieving
++	  it (i.e. udev).
  
- 	VPRINTK("ENTER, handle == 0x%x\n", handle);
-@@ -1090,7 +1088,7 @@ static inline void carm_handle_resp(struct carm_host *host,
- 	/* fast path */
- 	if (likely(crq->msg_type == CARM_MSG_READ ||
- 		   crq->msg_type == CARM_MSG_WRITE)) {
--		carm_handle_rw(host, crq, is_ok);
-+		carm_handle_rw(host, crq, error);
- 		return;
- 	}
+-config IDEPCI_PCIBUS_ORDER
+-	def_bool BLK_DEV_IDE=y && BLK_DEV_IDEPCI
++	  If in doubt, say N.
  
-@@ -1100,7 +1098,7 @@ static inline void carm_handle_resp(struct carm_host *host,
- 	case CARM_MSG_IOCTL: {
- 		switch (crq->msg_subtype) {
- 		case CARM_IOC_SCAN_CHAN:
--			carm_handle_scan_chan(host, crq, mem, is_ok);
-+			carm_handle_scan_chan(host, crq, mem, error);
- 			break;
- 		default:
- 			/* unknown / invalid response */
-@@ -1112,21 +1110,21 @@ static inline void carm_handle_resp(struct carm_host *host,
- 	case CARM_MSG_MISC: {
- 		switch (crq->msg_subtype) {
- 		case MISC_ALLOC_MEM:
--			carm_handle_generic(host, crq, is_ok,
-+			carm_handle_generic(host, crq, error,
- 					    HST_ALLOC_BUF, HST_SYNC_TIME);
- 			break;
- 		case MISC_SET_TIME:
--			carm_handle_generic(host, crq, is_ok,
-+			carm_handle_generic(host, crq, error,
- 					    HST_SYNC_TIME, HST_GET_FW_VER);
- 			break;
- 		case MISC_GET_FW_VER: {
- 			struct carm_fw_ver *ver = (struct carm_fw_ver *)
- 				mem + sizeof(struct carm_msg_get_fw_ver);
--			if (is_ok) {
-+			if (!error) {
- 				host->fw_ver = le32_to_cpu(ver->version);
- 				host->flags |= (ver->features & FL_FW_VER_MASK);
- 			}
--			carm_handle_generic(host, crq, is_ok,
-+			carm_handle_generic(host, crq, error,
- 					    HST_GET_FW_VER, HST_PORT_SCAN);
- 			break;
- 		}
-@@ -1140,7 +1138,7 @@ static inline void carm_handle_resp(struct carm_host *host,
- 	case CARM_MSG_ARRAY: {
- 		switch (crq->msg_subtype) {
- 		case CARM_ARRAY_INFO:
--			carm_handle_array_info(host, crq, mem, is_ok);
-+			carm_handle_array_info(host, crq, mem, error);
- 			break;
- 		default:
- 			/* unknown / invalid response */
-@@ -1159,7 +1157,7 @@ static inline void carm_handle_resp(struct carm_host *host,
- err_out:
- 	printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
- 	       pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
--	carm_end_rq(host, crq, 0);
-+	carm_end_rq(host, crq, -EIO);
- }
+ # TODO: split it on per host driver config options (or module parameters)
+ config BLK_DEV_OFFBOARD
+@@ -707,7 +707,6 @@ config BLK_DEV_SVWKS
+ config BLK_DEV_SGIIOC4
+ 	tristate "Silicon Graphics IOC4 chipset ATA/ATAPI support"
+ 	depends on (IA64_SGI_SN2 || IA64_GENERIC) && SGI_IOC4
+-	select IDEPCI_SHARE_IRQ
+ 	select BLK_DEV_IDEDMA_PCI
+ 	help
+ 	  This driver adds PIO & MultiMode DMA-2 support for the SGI IOC4
+@@ -801,7 +800,7 @@ config BLK_DEV_CELLEB
+ endif
  
- static inline void carm_handle_responses(struct carm_host *host)
-diff --git a/drivers/block/ub.c b/drivers/block/ub.c
-index 08e909d..c6179d6 100644
---- a/drivers/block/ub.c
-+++ b/drivers/block/ub.c
-@@ -808,16 +808,16 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+ config BLK_DEV_IDE_PMAC
+-	bool "Builtin PowerMac IDE support"
++	tristate "Builtin PowerMac IDE support"
+ 	depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y
+ 	help
+ 	  This driver provides support for the built-in IDE controller on
+@@ -855,8 +854,9 @@ config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
+        depends on BLK_DEV_IDE_AU1XXX
  
- static void ub_end_rq(struct request *rq, unsigned int scsi_status)
- {
--	int uptodate;
-+	int error;
+ config IDE_ARM
+-	def_bool ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
+-	select IDE_GENERIC
++	tristate "ARM IDE support"
++	depends on ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
++	default y
  
- 	if (scsi_status == 0) {
--		uptodate = 1;
-+		error = 0;
- 	} else {
--		uptodate = 0;
-+		error = -EIO;
- 		rq->errors = scsi_status;
- 	}
--	end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
--	end_that_request_last(rq, uptodate);
-+	if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
-+		BUG();
- }
+ config BLK_DEV_IDE_ICSIDE
+ 	tristate "ICS IDE interface support"
+@@ -888,10 +888,9 @@ config BLK_DEV_IDE_BAST
+ 	  Simtec BAST or the Thorcom VR1000
  
- static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
-diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
-index ab5d404..9e61fca 100644
---- a/drivers/block/viodasd.c
-+++ b/drivers/block/viodasd.c
-@@ -229,13 +229,10 @@ static struct block_device_operations viodasd_fops = {
- /*
-  * End a request
-  */
--static void viodasd_end_request(struct request *req, int uptodate,
-+static void viodasd_end_request(struct request *req, int error,
- 		int num_sectors)
- {
--	if (end_that_request_first(req, uptodate, num_sectors))
--		return;
--	add_disk_randomness(req->rq_disk);
--	end_that_request_last(req, uptodate);
-+	__blk_end_request(req, error, num_sectors << 9);
- }
+ config ETRAX_IDE
+-	bool "ETRAX IDE support"
++	tristate "ETRAX IDE support"
+ 	depends on CRIS && BROKEN
+ 	select BLK_DEV_IDEDMA
+-	select IDE_GENERIC
+ 	help
+ 	  Enables the ETRAX IDE driver.
  
- /*
-@@ -374,12 +371,12 @@ static void do_viodasd_request(struct request_queue *q)
- 		blkdev_dequeue_request(req);
- 		/* check that request contains a valid command */
- 		if (!blk_fs_request(req)) {
--			viodasd_end_request(req, 0, req->hard_nr_sectors);
-+			viodasd_end_request(req, -EIO, req->hard_nr_sectors);
- 			continue;
- 		}
- 		/* Try sending the request */
- 		if (send_request(req) != 0)
--			viodasd_end_request(req, 0, req->hard_nr_sectors);
-+			viodasd_end_request(req, -EIO, req->hard_nr_sectors);
- 	}
- }
+@@ -923,17 +922,15 @@ config ETRAX_IDE_G27_RESET
+ endchoice
  
-@@ -591,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
- 	num_req_outstanding--;
- 	spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
+ config IDE_H8300
+-	bool "H8300 IDE support"
++	tristate "H8300 IDE support"
+ 	depends on H8300
+-	select IDE_GENERIC
+ 	default y
+ 	help
+ 	  Enables the H8300 IDE driver.
  
--	error = event->xRc != HvLpEvent_Rc_Good;
-+	error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
- 	if (error) {
- 		const struct vio_error_entry *err;
- 		err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
-@@ -601,7 +598,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
- 	}
- 	qlock = req->q->queue_lock;
- 	spin_lock_irqsave(qlock, irq_flags);
--	viodasd_end_request(req, !error, num_sect);
-+	viodasd_end_request(req, error, num_sect);
- 	spin_unlock_irqrestore(qlock, irq_flags);
+ config BLK_DEV_GAYLE
+-	bool "Amiga Gayle IDE interface support"
++	tristate "Amiga Gayle IDE interface support"
+ 	depends on AMIGA
+-	select IDE_GENERIC
+ 	help
+ 	  This is the IDE driver for the Amiga Gayle IDE interface. It supports
+ 	  both the `A1200 style' and `A4000 style' of the Gayle IDE interface,
+@@ -963,9 +960,8 @@ config BLK_DEV_IDEDOUBLER
+ 	  runtime using the "ide=doubler" kernel boot parameter.
  
- 	/* Finally, try to get more requests off of this device's queue */
-diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
-index 2bdebcb..8afce67 100644
---- a/drivers/block/xen-blkfront.c
-+++ b/drivers/block/xen-blkfront.c
-@@ -452,7 +452,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
- 	RING_IDX i, rp;
- 	unsigned long flags;
- 	struct blkfront_info *info = (struct blkfront_info *)dev_id;
--	int uptodate;
-+	int error;
+ config BLK_DEV_BUDDHA
+-	bool "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
++	tristate "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
+ 	depends on ZORRO && EXPERIMENTAL
+-	select IDE_GENERIC
+ 	help
+ 	  This is the IDE driver for the IDE interfaces on the Buddha, 
+ 	  Catweasel and X-Surf expansion boards.  It supports up to two interfaces 
+@@ -976,9 +972,8 @@ config BLK_DEV_BUDDHA
+ 	  to one of its IDE interfaces.
  
- 	spin_lock_irqsave(&blkif_io_lock, flags);
+ config BLK_DEV_FALCON_IDE
+-	bool "Falcon IDE interface support"
++	tristate "Falcon IDE interface support"
+ 	depends on ATARI
+-	select IDE_GENERIC
+ 	help
+ 	  This is the IDE driver for the builtin IDE interface on the Atari
+ 	  Falcon. Say Y if you have a Falcon and want to use IDE devices (hard
+@@ -986,9 +981,8 @@ config BLK_DEV_FALCON_IDE
+ 	  interface.
  
-@@ -477,13 +477,13 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+ config BLK_DEV_MAC_IDE
+-	bool "Macintosh Quadra/Powerbook IDE interface support"
++	tristate "Macintosh Quadra/Powerbook IDE interface support"
+ 	depends on MAC
+-	select IDE_GENERIC
+ 	help
+ 	  This is the IDE driver for the builtin IDE interface on some m68k
+ 	  Macintosh models. It supports both the `Quadra style' (used in
+@@ -1000,18 +994,16 @@ config BLK_DEV_MAC_IDE
+ 	  builtin IDE interface.
  
- 		add_id_to_freelist(info, id);
+ config BLK_DEV_Q40IDE
+-	bool "Q40/Q60 IDE interface support"
++	tristate "Q40/Q60 IDE interface support"
+ 	depends on Q40
+-	select IDE_GENERIC
+ 	help
+ 	  Enable the on-board IDE controller in the Q40/Q60.  This should
+ 	  normally be on; disable it only if you are running a custom hard
+ 	  drive subsystem through an expansion card.
  
--		uptodate = (bret->status == BLKIF_RSP_OKAY);
-+		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
- 		switch (bret->operation) {
- 		case BLKIF_OP_WRITE_BARRIER:
- 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
- 				printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
- 				       info->gd->disk_name);
--				uptodate = -EOPNOTSUPP;
-+				error = -EOPNOTSUPP;
- 				info->feature_barrier = 0;
- 				xlvbd_barrier(info);
- 			}
-@@ -494,10 +494,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
- 				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
- 					"request: %x\n", bret->status);
+ config BLK_DEV_MPC8xx_IDE
+-	bool "MPC8xx IDE support"
++	tristate "MPC8xx IDE support"
+ 	depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE
+-	select IDE_GENERIC
+ 	help
+ 	  This option provides support for IDE on Motorola MPC8xx Systems.
+ 	  Please see 'Type of MPC8xx IDE interface' for details.
+diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
+index b181fc6..0d2da89 100644
+--- a/drivers/ide/Makefile
++++ b/drivers/ide/Makefile
+@@ -7,41 +7,37 @@
+ # Note : at this point, these files are compiled on all systems.
+ # In the future, some of these should be built conditionally.
+ #
+-# First come modules that register themselves with the core
++# link order is important here
  
--			ret = end_that_request_first(req, uptodate,
--				req->hard_nr_sectors);
-+			ret = __blk_end_request(req, error, blk_rq_bytes(req));
- 			BUG_ON(ret);
--			end_that_request_last(req, uptodate);
- 			break;
- 		default:
- 			BUG();
-diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
-index 82effce..78ebfff 100644
---- a/drivers/block/xsysace.c
-+++ b/drivers/block/xsysace.c
-@@ -483,7 +483,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
- 	u32 status;
- 	u16 val;
- 	int count;
--	int i;
+ EXTRA_CFLAGS				+= -Idrivers/ide
  
- #if defined(DEBUG)
- 	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
-@@ -688,7 +687,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
- 		}
+-obj-$(CONFIG_BLK_DEV_IDE)		+= pci/
+-
+ ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o
  
- 		/* Transfer the next buffer */
--		i = 16;
- 		if (ace->fsm_task == ACE_TASK_WRITE)
- 			ace->reg_ops->dataout(ace);
- 		else
-@@ -702,8 +700,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
- 		}
+-ide-core-$(CONFIG_BLK_DEV_CMD640)	+= pci/cmd640.o
+-
+-# Core IDE code - must come before legacy
++# core IDE code
+ ide-core-$(CONFIG_BLK_DEV_IDEPCI)	+= setup-pci.o
+ ide-core-$(CONFIG_BLK_DEV_IDEDMA)	+= ide-dma.o
+ ide-core-$(CONFIG_IDE_PROC_FS)		+= ide-proc.o
+-ide-core-$(CONFIG_BLK_DEV_IDEPNP)	+= ide-pnp.o
+ ide-core-$(CONFIG_BLK_DEV_IDEACPI)	+= ide-acpi.o
  
- 		/* bio finished; is there another one? */
--		i = ace->req->current_nr_sectors;
--		if (end_that_request_first(ace->req, 1, i)) {
-+		if (__blk_end_request(ace->req, 0,
-+					blk_rq_cur_bytes(ace->req))) {
- 			/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
- 			 *      ace->req->hard_nr_sectors,
- 			 *      ace->req->current_nr_sectors);
-@@ -718,9 +716,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
- 		break;
+-# built-in only drivers from arm/
+-ide-core-$(CONFIG_IDE_ARM)		+= arm/ide_arm.o
++obj-$(CONFIG_BLK_DEV_IDE)		+= ide-core.o
  
- 	case ACE_FSM_STATE_REQ_COMPLETE:
--		/* Complete the block request */
--		blkdev_dequeue_request(ace->req);
--		end_that_request_last(ace->req, 1);
- 		ace->req = NULL;
+-# built-in only drivers from legacy/
+-ide-core-$(CONFIG_BLK_DEV_BUDDHA)	+= legacy/buddha.o
+-ide-core-$(CONFIG_BLK_DEV_FALCON_IDE)	+= legacy/falconide.o
+-ide-core-$(CONFIG_BLK_DEV_GAYLE)	+= legacy/gayle.o
+-ide-core-$(CONFIG_BLK_DEV_MAC_IDE)	+= legacy/macide.o
+-ide-core-$(CONFIG_BLK_DEV_Q40IDE)	+= legacy/q40ide.o
++ifeq ($(CONFIG_IDE_ARM), y)
++	ide-arm-core-y += arm/ide_arm.o
++	obj-y += ide-arm-core.o
++endif
  
- 		/* Finished request; go to idle state */
-diff --git a/drivers/cdrom/Makefile b/drivers/cdrom/Makefile
-index 774c180..ecf85fd 100644
---- a/drivers/cdrom/Makefile
-+++ b/drivers/cdrom/Makefile
-@@ -11,3 +11,4 @@ obj-$(CONFIG_PARIDE_PCD)	+=		cdrom.o
- obj-$(CONFIG_CDROM_PKTCDVD)	+=		cdrom.o
+-# built-in only drivers from ppc/
+-ide-core-$(CONFIG_BLK_DEV_MPC8xx_IDE)	+= ppc/mpc8xx.o
+-ide-core-$(CONFIG_BLK_DEV_IDE_PMAC)	+= ppc/pmac.o
++obj-$(CONFIG_BLK_DEV_IDE)		+= legacy/ pci/
  
- obj-$(CONFIG_VIOCD)		+= viocd.o      cdrom.o
-+obj-$(CONFIG_GDROM)		+= gdrom.o      cdrom.o
-diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
-new file mode 100644
-index 0000000..4e2bbcc
---- /dev/null
-+++ b/drivers/cdrom/gdrom.c
-@@ -0,0 +1,867 @@
-+/* GD ROM driver for the SEGA Dreamcast
-+ * copyright Adrian McMenamin, 2007
-+ * With thanks to Marcus Comstedt and Nathan Keynes
-+ * for work in reversing PIO and DMA
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along
-+ * with this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/kernel.h>
-+#include <linux/list.h>
-+#include <linux/slab.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/cdrom.h>
-+#include <linux/genhd.h>
-+#include <linux/bio.h>
-+#include <linux/blkdev.h>
-+#include <linux/interrupt.h>
-+#include <linux/device.h>
-+#include <linux/wait.h>
-+#include <linux/workqueue.h>
-+#include <linux/platform_device.h>
-+#include <scsi/scsi.h>
-+#include <asm/io.h>
-+#include <asm/dma.h>
-+#include <asm/delay.h>
-+#include <asm/mach/dma.h>
-+#include <asm/mach/sysasic.h>
-+
-+#define GDROM_DEV_NAME "gdrom"
-+#define GD_SESSION_OFFSET 150
-+
-+/* GD Rom commands */
-+#define GDROM_COM_SOFTRESET 0x08
-+#define GDROM_COM_EXECDIAG 0x90
-+#define GDROM_COM_PACKET 0xA0
-+#define GDROM_COM_IDDEV 0xA1
-+
-+/* GD Rom registers */
-+#define GDROM_BASE_REG			0xA05F7000
-+#define GDROM_ALTSTATUS_REG		(GDROM_BASE_REG + 0x18)
-+#define GDROM_DATA_REG			(GDROM_BASE_REG + 0x80)
-+#define GDROM_ERROR_REG		(GDROM_BASE_REG + 0x84)
-+#define GDROM_INTSEC_REG		(GDROM_BASE_REG + 0x88)
-+#define GDROM_SECNUM_REG		(GDROM_BASE_REG + 0x8C)
-+#define GDROM_BCL_REG			(GDROM_BASE_REG + 0x90)
-+#define GDROM_BCH_REG			(GDROM_BASE_REG + 0x94)
-+#define GDROM_DSEL_REG			(GDROM_BASE_REG + 0x98)
-+#define GDROM_STATUSCOMMAND_REG	(GDROM_BASE_REG + 0x9C)
-+#define GDROM_RESET_REG		(GDROM_BASE_REG + 0x4E4)
-+
-+#define GDROM_DMA_STARTADDR_REG	(GDROM_BASE_REG + 0x404)
-+#define GDROM_DMA_LENGTH_REG		(GDROM_BASE_REG + 0x408)
-+#define GDROM_DMA_DIRECTION_REG	(GDROM_BASE_REG + 0x40C)
-+#define GDROM_DMA_ENABLE_REG		(GDROM_BASE_REG + 0x414)
-+#define GDROM_DMA_STATUS_REG		(GDROM_BASE_REG + 0x418)
-+#define GDROM_DMA_WAIT_REG		(GDROM_BASE_REG + 0x4A0)
-+#define GDROM_DMA_ACCESS_CTRL_REG	(GDROM_BASE_REG + 0x4B8)
-+
-+#define GDROM_HARD_SECTOR	2048
-+#define BLOCK_LAYER_SECTOR	512
-+#define GD_TO_BLK		4
-+
-+#define GDROM_DEFAULT_TIMEOUT	(HZ * 7)
-+
-+static const struct {
-+	int sense_key;
-+	const char * const text;
-+} sense_texts[] = {
-+	{NO_SENSE, "OK"},
-+	{RECOVERED_ERROR, "Recovered from error"},
-+	{NOT_READY, "Device not ready"},
-+	{MEDIUM_ERROR, "Disk not ready"},
-+	{HARDWARE_ERROR, "Hardware error"},
-+	{ILLEGAL_REQUEST, "Command has failed"},
-+	{UNIT_ATTENTION, "Device needs attention - disk may have been changed"},
-+	{DATA_PROTECT, "Data protection error"},
-+	{ABORTED_COMMAND, "Command aborted"},
-+};
-+
-+static struct platform_device *pd;
-+static int gdrom_major;
-+static DECLARE_WAIT_QUEUE_HEAD(command_queue);
-+static DECLARE_WAIT_QUEUE_HEAD(request_queue);
-+
-+static DEFINE_SPINLOCK(gdrom_lock);
-+static void gdrom_readdisk_dma(struct work_struct *work);
-+static DECLARE_WORK(work, gdrom_readdisk_dma);
-+static LIST_HEAD(gdrom_deferred);
-+
-+struct gdromtoc {
-+	unsigned int entry[99];
-+	unsigned int first, last;
-+	unsigned int leadout;
-+};
-+
-+static struct gdrom_unit {
-+	struct gendisk *disk;
-+	struct cdrom_device_info *cd_info;
-+	int status;
-+	int pending;
-+	int transfer;
-+	char disk_type;
-+	struct gdromtoc *toc;
-+	struct request_queue *gdrom_rq;
-+} gd;
-+
-+struct gdrom_id {
-+	char mid;
-+	char modid;
-+	char verid;
-+	char padA[13];
-+	char mname[16];
-+	char modname[16];
-+	char firmver[16];
-+	char padB[16];
-+};
-+
-+static int gdrom_getsense(short *bufstring);
-+static int gdrom_packetcommand(struct cdrom_device_info *cd_info,
-+	struct packet_command *command);
-+static int gdrom_hardreset(struct cdrom_device_info *cd_info);
-+
-+static bool gdrom_is_busy(void)
-+{
-+	return (ctrl_inb(GDROM_ALTSTATUS_REG) & 0x80) != 0;
-+}
-+
-+static bool gdrom_data_request(void)
-+{
-+	return (ctrl_inb(GDROM_ALTSTATUS_REG) & 0x88) == 8;
-+}
-+
-+static bool gdrom_wait_clrbusy(void)
-+{
-+	unsigned long timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
-+	while ((ctrl_inb(GDROM_ALTSTATUS_REG) & 0x80) &&
-+		(time_before(jiffies, timeout)))
-+		cpu_relax();
-+	return time_before(jiffies, timeout + 1);
-+}
-+
-+static bool gdrom_wait_busy_sleeps(void)
-+{
-+	unsigned long timeout;
-+	/* Wait to get busy first */
-+	timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
-+	while (!gdrom_is_busy() && time_before(jiffies, timeout))
-+		cpu_relax();
-+	/* Now wait for busy to clear */
-+	return gdrom_wait_clrbusy();
-+}
-+
-+static void gdrom_identifydevice(void *buf)
-+{
-+	int c;
-+	short *data = buf;
-+	/* If the device won't clear it has probably
-+	* been hit by a serious failure - but we'll
-+	* try to return a sense key even so */
-+	if (!gdrom_wait_clrbusy()) {
-+		gdrom_getsense(NULL);
-+		return;
-+	}
-+	ctrl_outb(GDROM_COM_IDDEV, GDROM_STATUSCOMMAND_REG);
-+	if (!gdrom_wait_busy_sleeps()) {
-+		gdrom_getsense(NULL);
-+		return;
-+	}
-+	/* now read in the data */
-+	for (c = 0; c < 40; c++)
-+		data[c] = ctrl_inw(GDROM_DATA_REG);
-+}
-+
-+static void gdrom_spicommand(void *spi_string, int buflen)
-+{
-+	short *cmd = spi_string;
-+	unsigned long timeout;
-+
-+	/* ensure IRQ_WAIT is set */
-+	ctrl_outb(0x08, GDROM_ALTSTATUS_REG);
-+	/* specify how many bytes we expect back */
-+	ctrl_outb(buflen & 0xFF, GDROM_BCL_REG);
-+	ctrl_outb((buflen >> 8) & 0xFF, GDROM_BCH_REG);
-+	/* other parameters */
-+	ctrl_outb(0, GDROM_INTSEC_REG);
-+	ctrl_outb(0, GDROM_SECNUM_REG);
-+	ctrl_outb(0, GDROM_ERROR_REG);
-+	/* Wait until we can go */
-+	if (!gdrom_wait_clrbusy()) {
-+		gdrom_getsense(NULL);
-+		return;
-+	}
-+	timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
-+	ctrl_outb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
-+	while (!gdrom_data_request() && time_before(jiffies, timeout))
-+		cpu_relax();
-+	if (!time_before(jiffies, timeout + 1)) {
-+		gdrom_getsense(NULL);
-+		return;
-+	}
-+	outsw(PHYSADDR(GDROM_DATA_REG), cmd, 6);
-+}
-+
-+
-+/* gdrom_command_executediagnostic:
-+ * Used to probe for presence of working GDROM
-+ * Restarts GDROM device and then applies standard ATA 3
-+ * Execute Diagnostic Command: a return of '1' indicates device 0
-+ * present and device 1 absent
-+ */
-+static char gdrom_execute_diagnostic(void)
-+{
-+	gdrom_hardreset(gd.cd_info);
-+	if (!gdrom_wait_clrbusy())
-+		return 0;
-+	ctrl_outb(GDROM_COM_EXECDIAG, GDROM_STATUSCOMMAND_REG);
-+	if (!gdrom_wait_busy_sleeps())
-+		return 0;
-+	return ctrl_inb(GDROM_ERROR_REG);
-+}
-+
-+/*
-+ * Prepare disk command
-+ * byte 0 = 0x70
-+ * byte 1 = 0x1f
-+ */
-+static int gdrom_preparedisk_cmd(void)
-+{
-+	struct packet_command *spin_command;
-+	spin_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
-+	if (!spin_command)
-+		return -ENOMEM;
-+	spin_command->cmd[0] = 0x70;
-+	spin_command->cmd[2] = 0x1f;
-+	spin_command->buflen = 0;
-+	gd.pending = 1;
-+	gdrom_packetcommand(gd.cd_info, spin_command);
-+	/* 60 second timeout */
-+	wait_event_interruptible_timeout(command_queue, gd.pending == 0,
-+		GDROM_DEFAULT_TIMEOUT);
-+	gd.pending = 0;
-+	kfree(spin_command);
-+	if (gd.status & 0x01) {
-+		/* log an error */
-+		gdrom_getsense(NULL);
-+		return -EIO;
-+	}
-+	return 0;
-+}
-+
-+/*
-+ * Read TOC command
-+ * byte 0 = 0x14
-+ * byte 1 = session
-+ * byte 3 = sizeof TOC >> 8  ie upper byte
-+ * byte 4 = sizeof TOC & 0xff ie lower byte
-+ */
-+static int gdrom_readtoc_cmd(struct gdromtoc *toc, int session)
-+{
-+	int tocsize;
-+	struct packet_command *toc_command;
-+	int err = 0;
-+
-+	toc_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
-+	if (!toc_command)
-+		return -ENOMEM;
-+	tocsize = sizeof(struct gdromtoc);
-+	toc_command->cmd[0] = 0x14;
-+	toc_command->cmd[1] = session;
-+	toc_command->cmd[3] = tocsize >> 8;
-+	toc_command->cmd[4] = tocsize & 0xff;
-+	toc_command->buflen = tocsize;
-+	if (gd.pending) {
-+		err = -EBUSY;
-+		goto cleanup_readtoc_final;
-+	}
-+	gd.pending = 1;
-+	gdrom_packetcommand(gd.cd_info, toc_command);
-+	wait_event_interruptible_timeout(command_queue, gd.pending == 0,
-+		GDROM_DEFAULT_TIMEOUT);
-+	if (gd.pending) {
-+		err = -EINVAL;
-+		goto cleanup_readtoc;
-+	}
-+	insw(PHYSADDR(GDROM_DATA_REG), toc, tocsize/2);
-+	if (gd.status & 0x01)
-+		err = -EINVAL;
-+
-+cleanup_readtoc:
-+	gd.pending = 0;
-+cleanup_readtoc_final:
-+	kfree(toc_command);
-+	return err;
-+}
-+
-+/* TOC helpers */
-+static int get_entry_lba(int track)
-+{
-+	return (cpu_to_be32(track & 0xffffff00) - GD_SESSION_OFFSET);
-+}
-+
-+static int get_entry_q_ctrl(int track)
-+{
-+	return (track & 0x000000f0) >> 4;
-+}
-+
-+static int get_entry_track(int track)
-+{
-+	return (track & 0x0000ff00) >> 8;
-+}
-+
-+static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
-+	struct cdrom_multisession *ms_info)
-+{
-+	int fentry, lentry, track, data, tocuse, err;
-+	if (!gd.toc)
-+		return -ENOMEM;
-+	tocuse = 1;
-+	/* Check if GD-ROM */
-+	err = gdrom_readtoc_cmd(gd.toc, 1);
-+	/* Not a GD-ROM so check if standard CD-ROM */
-+	if (err) {
-+		tocuse = 0;
-+		err = gdrom_readtoc_cmd(gd.toc, 0);
-+		if (err) {
-+			printk(KERN_INFO "GDROM: Could not get CD "
-+				"table of contents\n");
-+			return -ENXIO;
-+		}
-+	}
-+
-+	fentry = get_entry_track(gd.toc->first);
-+	lentry = get_entry_track(gd.toc->last);
-+	/* Find the first data track */
-+	track = get_entry_track(gd.toc->last);
-+	do {
-+		data = gd.toc->entry[track - 1];
-+		if (get_entry_q_ctrl(data))
-+			break;	/* ie a real data track */
-+		track--;
-+	} while (track >= fentry);
-+
-+	if ((track > 100) || (track < get_entry_track(gd.toc->first))) {
-+		printk(KERN_INFO "GDROM: No data on the last "
-+			"session of the CD\n");
-+		gdrom_getsense(NULL);
-+		return -ENXIO;
-+	}
-+
-+	ms_info->addr_format = CDROM_LBA;
-+	ms_info->addr.lba = get_entry_lba(data);
-+	ms_info->xa_flag = 1;
-+	return 0;
-+}
-+
-+static int gdrom_open(struct cdrom_device_info *cd_info, int purpose)
-+{
-+	/* spin up the disk */
-+	return gdrom_preparedisk_cmd();
-+}
+-# built-in only drivers from h8300/
+-ide-core-$(CONFIG_IDE_H8300)		+= h8300/ide-h8300.o
++obj-$(CONFIG_IDEPCI_PCIBUS_ORDER)	+= ide-scan-pci.o
+ 
+-obj-$(CONFIG_BLK_DEV_IDE)		+= ide-core.o
++ifeq ($(CONFIG_BLK_DEV_CMD640), y)
++	cmd640-core-y += pci/cmd640.o
++	obj-y += cmd640-core.o
++endif
 +
-+/* this function is required even if empty */
-+static void gdrom_release(struct cdrom_device_info *cd_info)
-+{
-+}
++obj-$(CONFIG_BLK_DEV_IDE)		+= cris/ ppc/
++obj-$(CONFIG_BLK_DEV_IDEPNP)		+= ide-pnp.o
++obj-$(CONFIG_IDE_H8300)			+= h8300/
+ obj-$(CONFIG_IDE_GENERIC)		+= ide-generic.o
+ 
+ obj-$(CONFIG_BLK_DEV_IDEDISK)		+= ide-disk.o
+@@ -49,6 +45,20 @@ obj-$(CONFIG_BLK_DEV_IDECD)		+= ide-cd.o
+ obj-$(CONFIG_BLK_DEV_IDETAPE)		+= ide-tape.o
+ obj-$(CONFIG_BLK_DEV_IDEFLOPPY)		+= ide-floppy.o
+ 
+-obj-$(CONFIG_BLK_DEV_IDE)		+= legacy/ arm/ mips/
+-obj-$(CONFIG_BLK_DEV_HD)		+= legacy/
+-obj-$(CONFIG_ETRAX_IDE)		+= cris/
++ifeq ($(CONFIG_BLK_DEV_IDECS), y)
++	ide-cs-core-y += legacy/ide-cs.o
++	obj-y += ide-cs-core.o
++endif
 +
-+static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
-+{
-+	/* read the sense key */
-+	char sense = ctrl_inb(GDROM_ERROR_REG);
-+	sense &= 0xF0;
-+	if (sense == 0)
-+		return CDS_DISC_OK;
-+	if (sense == 0x20)
-+		return CDS_DRIVE_NOT_READY;
-+	/* default */
-+	return CDS_NO_INFO;
-+}
++ifeq ($(CONFIG_BLK_DEV_PLATFORM), y)
++	ide-platform-core-y += legacy/ide_platform.o
++	obj-y += ide-platform-core.o
++endif
 +
-+static int gdrom_mediachanged(struct cdrom_device_info *cd_info, int ignore)
-+{
-+	/* check the sense key */
-+	return (ctrl_inb(GDROM_ERROR_REG) & 0xF0) == 0x60;
-+}
++obj-$(CONFIG_BLK_DEV_IDE)		+= arm/ mips/
 +
-+/* reset the G1 bus */
-+static int gdrom_hardreset(struct cdrom_device_info *cd_info)
-+{
-+	int count;
-+	ctrl_outl(0x1fffff, GDROM_RESET_REG);
-+	for (count = 0xa0000000; count < 0xa0200000; count += 4)
-+		ctrl_inl(count);
-+	return 0;
-+}
++# old hd driver must be last
++ifeq ($(CONFIG_BLK_DEV_HD), y)
++	hd-core-y += legacy/hd.o
++	obj-y += hd-core.o
++endif
+diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
+index 6a78f07..5f63ad2 100644
+--- a/drivers/ide/arm/Makefile
++++ b/drivers/ide/arm/Makefile
+@@ -3,4 +3,8 @@ obj-$(CONFIG_BLK_DEV_IDE_ICSIDE)	+= icside.o
+ obj-$(CONFIG_BLK_DEV_IDE_RAPIDE)	+= rapide.o
+ obj-$(CONFIG_BLK_DEV_IDE_BAST)		+= bast-ide.o
+ 
++ifeq ($(CONFIG_IDE_ARM), m)
++	obj-m += ide_arm.o
++endif
 +
-+/* keep the function looking like the universal
-+ * CD Rom specification  - returning int */
-+static int gdrom_packetcommand(struct cdrom_device_info *cd_info,
-+	struct packet_command *command)
-+{
-+	gdrom_spicommand(&command->cmd, command->buflen);
-+	return 0;
-+}
+ EXTRA_CFLAGS	:= -Idrivers/ide
+diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
+index 48db616..45bf9c8 100644
+--- a/drivers/ide/arm/bast-ide.c
++++ b/drivers/ide/arm/bast-ide.c
+@@ -45,7 +45,7 @@ bastide_register(unsigned int base, unsigned int aux, int irq,
+ 	hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
+ 	hw.irq = irq;
+ 
+-	ide_register_hw(&hw, NULL, 0, hwif);
++	ide_register_hw(&hw, NULL, hwif);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
+index 93f71fc..8a5c720 100644
+--- a/drivers/ide/arm/icside.c
++++ b/drivers/ide/arm/icside.c
+@@ -272,8 +272,6 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
+ 	case XFER_SW_DMA_0:
+ 		cycle_time = 480;
+ 		break;
+-	default:
+-		return;
+ 	}
+ 
+ 	/*
+@@ -289,26 +287,10 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
+ 		ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
+ }
+ 
+-static void icside_dma_host_off(ide_drive_t *drive)
++static void icside_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+-static void icside_dma_off_quietly(ide_drive_t *drive)
+-{
+-	drive->using_dma = 0;
+-}
+-
+-static void icside_dma_host_on(ide_drive_t *drive)
+-{
+-}
+-
+-static int icside_dma_on(ide_drive_t *drive)
+-{
+-	drive->using_dma = 1;
+-
+-	return 0;
+-}
+-
+ static int icside_dma_end(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+@@ -424,10 +406,7 @@ static void icside_dma_init(ide_hwif_t *hwif)
+ 	hwif->dmatable_dma	= 0;
+ 	hwif->set_dma_mode	= icside_set_dma_mode;
+ 
+-	hwif->dma_host_off	= icside_dma_host_off;
+-	hwif->dma_off_quietly	= icside_dma_off_quietly;
+-	hwif->dma_host_on	= icside_dma_host_on;
+-	hwif->ide_dma_on	= icside_dma_on;
++	hwif->dma_host_set	= icside_dma_host_set;
+ 	hwif->dma_setup		= icside_dma_setup;
+ 	hwif->dma_exec_cmd	= icside_dma_exec_cmd;
+ 	hwif->dma_start		= icside_dma_start;
+diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
+index 8957cba..60f2497 100644
+--- a/drivers/ide/arm/ide_arm.c
++++ b/drivers/ide/arm/ide_arm.c
+@@ -24,12 +24,25 @@
+ # define IDE_ARM_IRQ	IRQ_HARDDISK
+ #endif
+ 
+-void __init ide_arm_init(void)
++static int __init ide_arm_init(void)
+ {
++	ide_hwif_t *hwif;
+ 	hw_regs_t hw;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	memset(&hw, 0, sizeof(hw));
+ 	ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206);
+ 	hw.irq = IDE_ARM_IRQ;
+-	ide_register_hw(&hw, NULL, 1, NULL);
 +
-+/* Get Sense SPI command
-+ * From Marcus Comstedt
-+ * cmd = 0x13
-+ * cmd + 4 = length of returned buffer
-+ * Returns 5 16 bit words
-+ */
-+static int gdrom_getsense(short *bufstring)
-+{
-+	struct packet_command *sense_command;
-+	short sense[5];
-+	int sense_key;
-+	int err = -EIO;
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		ide_init_port_hw(hwif, &hw);
++		idx[0] = hwif->index;
 +
-+	sense_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
-+	if (!sense_command)
-+		return -ENOMEM;
-+	sense_command->cmd[0] = 0x13;
-+	sense_command->cmd[4] = 10;
-+	sense_command->buflen = 10;
-+	/* even if something is pending try to get
-+	* the sense key if possible */
-+	if (gd.pending && !gdrom_wait_clrbusy()) {
-+		err = -EBUSY;
-+		goto cleanup_sense_final;
-+	}
-+	gd.pending = 1;
-+	gdrom_packetcommand(gd.cd_info, sense_command);
-+	wait_event_interruptible_timeout(command_queue, gd.pending == 0,
-+		GDROM_DEFAULT_TIMEOUT);
-+	if (gd.pending)
-+		goto cleanup_sense;
-+	insw(PHYSADDR(GDROM_DATA_REG), &sense, sense_command->buflen/2);
-+	if (sense[1] & 40) {
-+		printk(KERN_INFO "GDROM: Drive not ready - command aborted\n");
-+		goto cleanup_sense;
++		ide_device_add(idx);
 +	}
-+	sense_key = sense[1] & 0x0F;
-+	if (sense_key < ARRAY_SIZE(sense_texts))
-+		printk(KERN_INFO "GDROM: %s\n", sense_texts[sense_key].text);
-+	else
-+		printk(KERN_ERR "GDROM: Unknown sense key: %d\n", sense_key);
-+	if (bufstring) /* return addional sense data */
-+		memcpy(bufstring, &sense[4], 2);
-+	if (sense_key < 2)
-+		err = 0;
 +
-+cleanup_sense:
-+	gd.pending = 0;
-+cleanup_sense_final:
-+	kfree(sense_command);
-+	return err;
-+}
++	return 0;
+ }
 +
-+static struct cdrom_device_ops gdrom_ops = {
-+	.open			= gdrom_open,
-+	.release		= gdrom_release,
-+	.drive_status		= gdrom_drivestatus,
-+	.media_changed		= gdrom_mediachanged,
-+	.get_last_session	= gdrom_get_last_session,
-+	.reset			= gdrom_hardreset,
-+	.capability		= CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
-+				  CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
-+	.n_minors		= 1,
-+};
++module_init(ide_arm_init);
+diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
+index 0775a3a..e6b56d1 100644
+--- a/drivers/ide/arm/rapide.c
++++ b/drivers/ide/arm/rapide.c
+@@ -13,26 +13,18 @@
+ 
+ #include <asm/ecard.h>
+ 
+-static ide_hwif_t *
+-rapide_locate_hwif(void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq)
++static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
++			       void __iomem *ctrl, unsigned int sz, int irq)
+ {
+ 	unsigned long port = (unsigned long)base;
+-	ide_hwif_t *hwif = ide_find_port(port);
+ 	int i;
+ 
+-	if (hwif == NULL)
+-		goto out;
+-
+ 	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
+-		hwif->io_ports[i] = port;
++		hw->io_ports[i] = port;
+ 		port += sz;
+ 	}
+-	hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+-	hwif->irq = irq;
+-	hwif->mmio = 1;
+-	default_hwif_mmiops(hwif);
+-out:
+-	return hwif;
++	hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
++	hw->irq = irq;
+ }
+ 
+ static int __devinit
+@@ -42,6 +34,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
+ 	void __iomem *base;
+ 	int ret;
+ 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
++	hw_regs_t hw;
+ 
+ 	ret = ecard_request_resources(ec);
+ 	if (ret)
+@@ -53,11 +46,17 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
+ 		goto release;
+ 	}
+ 
+-	hwif = rapide_locate_hwif(base, base + 0x818, 1 << 6, ec->irq);
++	hwif = ide_find_port((unsigned long)base);
+ 	if (hwif) {
+-		hwif->hwif_data = base;
+-		hwif->gendev.parent = &ec->dev;
+-		hwif->noprobe = 0;
++		memset(&hw, 0, sizeof(hw));
++		rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
++		hw.chipset = ide_generic;
++		hw.dev = &ec->dev;
 +
-+static int gdrom_bdops_open(struct inode *inode, struct file *file)
-+{
-+	return cdrom_open(gd.cd_info, inode, file);
-+}
++		ide_init_port_hw(hwif, &hw);
 +
-+static int gdrom_bdops_release(struct inode *inode, struct file *file)
-+{
-+	return cdrom_release(gd.cd_info, file);
-+}
++		hwif->mmio = 1;
++		default_hwif_mmiops(hwif);
+ 
+ 		idx[0] = hwif->index;
+ 
+diff --git a/drivers/ide/cris/Makefile b/drivers/ide/cris/Makefile
+index 6176e8d..20b9596 100644
+--- a/drivers/ide/cris/Makefile
++++ b/drivers/ide/cris/Makefile
+@@ -1,3 +1,3 @@
+ EXTRA_CFLAGS				+= -Idrivers/ide
+ 
+-obj-y					+= ide-cris.o
++obj-$(CONFIG_IDE_ETRAX)			+= ide-cris.o
+diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
+index 476e0d6..8c3294c 100644
+--- a/drivers/ide/cris/ide-cris.c
++++ b/drivers/ide/cris/ide-cris.c
+@@ -673,9 +673,8 @@ static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
+ static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
+ static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
+ static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
+-static int cris_dma_on (ide_drive_t *drive);
+ 
+-static void cris_dma_off(ide_drive_t *drive)
++static void cris_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+@@ -747,8 +746,6 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 			strobe = ATA_DMA2_STROBE;
+ 			hold = ATA_DMA2_HOLD;
+ 			break;
+-		default:
+-			return;
+ 	}
+ 
+ 	if (speed >= XFER_UDMA_0)
+@@ -757,13 +754,11 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 		cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
+ }
+ 
+-void __init
+-init_e100_ide (void)
++static int __init init_e100_ide(void)
+ {
+ 	hw_regs_t hw;
+-	int ide_offsets[IDE_NR_PORTS];
+-	int h;
+-	int i;
++	int ide_offsets[IDE_NR_PORTS], h, i;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	printk("ide: ETRAX FS built-in ATA DMA controller\n");
+ 
+@@ -780,9 +775,11 @@ init_e100_ide (void)
+ 		                ide_offsets,
+ 		                0, 0, cris_ide_ack_intr,
+ 		                ide_default_irq(0));
+-		ide_register_hw(&hw, NULL, 1, &hwif);
++		hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ 		if (hwif == NULL)
+ 			continue;
++		ide_init_port_data(hwif, hwif->index);
++		ide_init_port_hw(hwif, &hw);
+ 		hwif->mmio = 1;
+ 		hwif->chipset = ide_etrax100;
+ 		hwif->set_pio_mode = &cris_set_pio_mode;
+@@ -791,6 +788,7 @@ init_e100_ide (void)
+ 		hwif->ata_output_data = &cris_ide_output_data;
+ 		hwif->atapi_input_bytes = &cris_atapi_input_bytes;
+ 		hwif->atapi_output_bytes = &cris_atapi_output_bytes;
++		hwif->dma_host_set = &cris_dma_host_set;
+ 		hwif->ide_dma_end = &cris_dma_end;
+ 		hwif->dma_setup = &cris_dma_setup;
+ 		hwif->dma_exec_cmd = &cris_dma_exec_cmd;
+@@ -801,9 +799,6 @@ init_e100_ide (void)
+ 		hwif->OUTBSYNC = &cris_ide_outbsync;
+ 		hwif->INB = &cris_ide_inb;
+ 		hwif->INW = &cris_ide_inw;
+-		hwif->dma_host_off = &cris_dma_off;
+-		hwif->dma_host_on = &cris_dma_on;
+-		hwif->dma_off_quietly = &cris_dma_off;
+ 		hwif->cbl = ATA_CBL_PATA40;
+ 		hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
+ 		hwif->pio_mask = ATA_PIO4,
+@@ -811,6 +806,8 @@ init_e100_ide (void)
+ 		hwif->drives[1].autotune = 1;
+ 		hwif->ultra_mask = cris_ultra_mask;
+ 		hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
 +
-+static int gdrom_bdops_mediachanged(struct gendisk *disk)
-+{
-+	return cdrom_media_changed(gd.cd_info);
-+}
++		idx[h] = hwif->index;
+ 	}
+ 
+ 	/* Reset pulse */
+@@ -823,14 +820,12 @@ init_e100_ide (void)
+ 	cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD);
+ 	cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD);
+ 	cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
+-}
+ 
+-static int cris_dma_on (ide_drive_t *drive)
+-{
++	ide_device_add(idx);
 +
-+static int gdrom_bdops_ioctl(struct inode *inode, struct file *file,
-+	unsigned cmd, unsigned long arg)
-+{
-+	return cdrom_ioctl(file, gd.cd_info, inode, cmd, arg);
-+}
+ 	return 0;
+ }
+ 
+-
+ static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16)));
+ 
+ /*
+@@ -1062,3 +1057,5 @@ static void cris_dma_start(ide_drive_t *drive)
+ 		LED_DISK_READ(1);
+ 	}
+ }
 +
-+static struct block_device_operations gdrom_bdops = {
-+	.owner			= THIS_MODULE,
-+	.open			= gdrom_bdops_open,
-+	.release		= gdrom_bdops_release,
-+	.media_changed		= gdrom_bdops_mediachanged,
-+	.ioctl			= gdrom_bdops_ioctl,
-+};
++module_init(init_e100_ide);
+diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile
+new file mode 100644
+index 0000000..5eba16f
+--- /dev/null
++++ b/drivers/ide/h8300/Makefile
+@@ -0,0 +1,2 @@
 +
-+static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
-+{
-+	gd.status = ctrl_inb(GDROM_STATUSCOMMAND_REG);
-+	if (gd.pending != 1)
-+		return IRQ_HANDLED;
-+	gd.pending = 0;
-+	wake_up_interruptible(&command_queue);
-+	return IRQ_HANDLED;
-+}
++obj-$(CONFIG_IDE_H8300)			+= ide-h8300.o
+diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
+index 4a49b5c..4f6d019 100644
+--- a/drivers/ide/h8300/ide-h8300.c
++++ b/drivers/ide/h8300/ide-h8300.c
+@@ -84,11 +84,12 @@ static inline void hwif_setup(ide_hwif_t *hwif)
+ 	hwif->INSL  = NULL;
+ }
+ 
+-void __init h8300_ide_init(void)
++static int __init h8300_ide_init(void)
+ {
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int idx;
++	int index;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
+ 		goto out_busy;
+@@ -100,16 +101,28 @@ void __init h8300_ide_init(void)
+ 	hw_setup(&hw);
+ 
+ 	/* register if */
+-	idx = ide_register_hw(&hw, NULL, 1, &hwif);
+-	if (idx == -1) {
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif == NULL) {
+ 		printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
+-		return;
++		return -ENOENT;
+ 	}
+ 
++	index = hwif->index;
++	ide_init_port_data(hwif, index);
++	ide_init_port_hw(hwif, &hw);
+ 	hwif_setup(hwif);
+-	printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", idx);
+-	return;
++	printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", index);
 +
-+static irqreturn_t gdrom_dma_interrupt(int irq, void *dev_id)
-+{
-+	gd.status = ctrl_inb(GDROM_STATUSCOMMAND_REG);
-+	if (gd.transfer != 1)
-+		return IRQ_HANDLED;
-+	gd.transfer = 0;
-+	wake_up_interruptible(&request_queue);
-+	return IRQ_HANDLED;
-+}
++	idx[0] = index;
 +
-+static int __devinit gdrom_set_interrupt_handlers(void)
-+{
-+	int err;
++	ide_device_add(idx);
 +
-+	err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
-+		IRQF_DISABLED, "gdrom_command", &gd);
-+	if (err)
-+		return err;
-+	err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
-+		IRQF_DISABLED, "gdrom_dma", &gd);
-+	if (err)
-+		free_irq(HW_EVENT_GDROM_CMD, &gd);
-+	return err;
-+}
++	return 0;
+ 
+ out_busy:
+ 	printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
 +
-+/* Implement DMA read using SPI command
-+ * 0 -> 0x30
-+ * 1 -> mode
-+ * 2 -> block >> 16
-+ * 3 -> block >> 8
-+ * 4 -> block
-+ * 8 -> sectors >> 16
-+ * 9 -> sectors >> 8
-+ * 10 -> sectors
++	return -EBUSY;
+ }
++
++module_init(h8300_ide_init);
+diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
+index 899d565..e888fc3 100644
+--- a/drivers/ide/ide-acpi.c
++++ b/drivers/ide/ide-acpi.c
+@@ -383,27 +383,19 @@ static int taskfile_load_raw(ide_drive_t *drive,
+ 	       gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]);
+ 
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	args.command_type = IDE_DRIVE_TASK_NO_DATA;
+-	args.data_phase   = TASKFILE_NO_DATA;
+-	args.handler      = &task_no_data_intr;
+ 
+ 	/* convert gtf to IDE Taskfile */
+-	args.tfRegister[1] = gtf->tfa[0];	/* 0x1f1 */
+-	args.tfRegister[2] = gtf->tfa[1];	/* 0x1f2 */
+-	args.tfRegister[3] = gtf->tfa[2];	/* 0x1f3 */
+-	args.tfRegister[4] = gtf->tfa[3];	/* 0x1f4 */
+-	args.tfRegister[5] = gtf->tfa[4];	/* 0x1f5 */
+-	args.tfRegister[6] = gtf->tfa[5];	/* 0x1f6 */
+-	args.tfRegister[7] = gtf->tfa[6];	/* 0x1f7 */
++	memcpy(&args.tf_array[7], &gtf->tfa, 7);
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ 
+ 	if (ide_noacpitfs) {
+ 		DEBPRINT("_GTF execution disabled\n");
+ 		return err;
+ 	}
+ 
+-	err = ide_raw_taskfile(drive, &args, NULL);
++	err = ide_no_data_taskfile(drive, &args);
+ 	if (err)
+-		printk(KERN_ERR "%s: ide_raw_taskfile failed: %u\n",
++		printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
+ 		       __FUNCTION__, err);
+ 
+ 	return err;
+diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
+index c7d77f0..74c6087 100644
+--- a/drivers/ide/ide-cd.c
++++ b/drivers/ide/ide-cd.c
+@@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
+ 					BUG();
+ 			} else {
+ 				spin_lock_irqsave(&ide_lock, flags);
+-				end_that_request_chunk(failed, 0,
+-							failed->data_len);
+-				end_that_request_last(failed, 0);
++				if (__blk_end_request(failed, -EIO,
++						      failed->data_len))
++					BUG();
+ 				spin_unlock_irqrestore(&ide_lock, flags);
+ 			}
+ 		} else
+@@ -917,19 +917,13 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
+ 	if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
+ 		return startstop;
+ 
++	/* FIXME: for Virtual DMA we must check harder */
+ 	if (info->dma)
+ 		info->dma = !hwif->dma_setup(drive);
+ 
+ 	/* Set up the controller registers. */
+-	/* FIXME: for Virtual DMA we must check harder */
+-	HWIF(drive)->OUTB(info->dma, IDE_FEATURE_REG);
+-	HWIF(drive)->OUTB(0, IDE_IREASON_REG);
+-	HWIF(drive)->OUTB(0, IDE_SECTOR_REG);
+-
+-	HWIF(drive)->OUTB(xferlen & 0xff, IDE_BCOUNTL_REG);
+-	HWIF(drive)->OUTB(xferlen >> 8  , IDE_BCOUNTH_REG);
+-	if (IDE_CONTROL_REG)
+-		HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
++	ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL |
++			   IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma);
+  
+ 	if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
+ 		/* waiting for CDB interrupt, not DMA yet. */
+@@ -1653,6 +1647,17 @@ static int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ireason)
+ 	return 1;
+ }
+ 
++/*
++ * Called from blk_end_request_callback() after the data of the request
++ * is completed and before the request is completed.
++ * By returning value '1', blk_end_request_callback() returns immediately
++ * without completing the request.
 + */
-+static void gdrom_readdisk_dma(struct work_struct *work)
++static int cdrom_newpc_intr_dummy_cb(struct request *rq)
 +{
-+	int err, block, block_cnt;
-+	struct packet_command *read_command;
-+	struct list_head *elem, *next;
-+	struct request *req;
-+	unsigned long timeout;
-+
-+	if (list_empty(&gdrom_deferred))
-+		return;
-+	read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
-+	if (!read_command)
-+		return; /* get more memory later? */
-+	read_command->cmd[0] = 0x30;
-+	read_command->cmd[1] = 0x20;
-+	spin_lock(&gdrom_lock);
-+	list_for_each_safe(elem, next, &gdrom_deferred) {
-+		req = list_entry(elem, struct request, queuelist);
-+		spin_unlock(&gdrom_lock);
-+		block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET;
-+		block_cnt = req->nr_sectors/GD_TO_BLK;
-+		ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
-+		ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
-+		ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
-+		ctrl_outl(1, GDROM_DMA_ENABLE_REG);
-+		read_command->cmd[2] = (block >> 16) & 0xFF;
-+		read_command->cmd[3] = (block >> 8) & 0xFF;
-+		read_command->cmd[4] = block & 0xFF;
-+		read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
-+		read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
-+		read_command->cmd[10] = block_cnt & 0xFF;
-+		/* set for DMA */
-+		ctrl_outb(1, GDROM_ERROR_REG);
-+		/* other registers */
-+		ctrl_outb(0, GDROM_SECNUM_REG);
-+		ctrl_outb(0, GDROM_BCL_REG);
-+		ctrl_outb(0, GDROM_BCH_REG);
-+		ctrl_outb(0, GDROM_DSEL_REG);
-+		ctrl_outb(0, GDROM_INTSEC_REG);
-+		/* Wait for registers to reset after any previous activity */
-+		timeout = jiffies + HZ / 2;
-+		while (gdrom_is_busy() && time_before(jiffies, timeout))
-+			cpu_relax();
-+		ctrl_outb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
-+		timeout = jiffies + HZ / 2;
-+		/* Wait for packet command to finish */
-+		while (gdrom_is_busy() && time_before(jiffies, timeout))
-+			cpu_relax();
-+		gd.pending = 1;
-+		gd.transfer = 1;
-+		outsw(PHYSADDR(GDROM_DATA_REG), &read_command->cmd, 6);
-+		timeout = jiffies + HZ / 2;
-+		/* Wait for any pending DMA to finish */
-+		while (ctrl_inb(GDROM_DMA_STATUS_REG) &&
-+			time_before(jiffies, timeout))
-+			cpu_relax();
-+		/* start transfer */
-+		ctrl_outb(1, GDROM_DMA_STATUS_REG);
-+		wait_event_interruptible_timeout(request_queue,
-+			gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
-+		err = gd.transfer;
-+		gd.transfer = 0;
-+		gd.pending = 0;
-+		/* now seek to take the request spinlock
-+		* before handling ending the request */
-+		spin_lock(&gdrom_lock);
-+		list_del_init(&req->queuelist);
-+		end_dequeued_request(req, 1 - err);
-+	}
-+	spin_unlock(&gdrom_lock);
-+	kfree(read_command);
++	return 1;
 +}
 +
-+static void gdrom_request_handler_dma(struct request *req)
-+{
-+	/* dequeue, add to list of deferred work
-+	* and then schedule workqueue */
-+	blkdev_dequeue_request(req);
-+	list_add_tail(&req->queuelist, &gdrom_deferred);
-+	schedule_work(&work);
-+}
+ typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
+ 
+ /*
+@@ -1691,9 +1696,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
+ 			return ide_error(drive, "dma error", stat);
+ 		}
+ 
+-		end_that_request_chunk(rq, 1, rq->data_len);
+-		rq->data_len = 0;
+-		goto end_request;
++		spin_lock_irqsave(&ide_lock, flags);
++		if (__blk_end_request(rq, 0, rq->data_len))
++			BUG();
++		HWGROUP(drive)->rq = NULL;
++		spin_unlock_irqrestore(&ide_lock, flags);
 +
-+static void gdrom_request(struct request_queue *rq)
-+{
-+	struct request *req;
++		return ide_stopped;
+ 	}
+ 
+ 	/*
+@@ -1711,8 +1720,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
+ 	/*
+ 	 * If DRQ is clear, the command has completed.
+ 	 */
+-	if ((stat & DRQ_STAT) == 0)
+-		goto end_request;
++	if ((stat & DRQ_STAT) == 0) {
++		spin_lock_irqsave(&ide_lock, flags);
++		if (__blk_end_request(rq, 0, 0))
++			BUG();
++		HWGROUP(drive)->rq = NULL;
++		spin_unlock_irqrestore(&ide_lock, flags);
 +
-+	while ((req = elv_next_request(rq)) != NULL) {
-+		if (!blk_fs_request(req)) {
-+			printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
-+			end_request(req, 0);
-+		}
-+		if (rq_data_dir(req) != READ) {
-+			printk(KERN_NOTICE "GDROM: Read only device -");
-+			printk(" write request ignored\n");
-+			end_request(req, 0);
-+		}
-+		if (req->nr_sectors)
-+			gdrom_request_handler_dma(req);
-+		else
-+			end_request(req, 0);
++		return ide_stopped;
 +	}
-+}
-+
-+/* Print string identifying GD ROM device */
-+static int __devinit gdrom_outputversion(void)
-+{
-+	struct gdrom_id *id;
-+	char *model_name, *manuf_name, *firmw_ver;
-+	int err = -ENOMEM;
-+
-+	/* query device ID */
-+	id = kzalloc(sizeof(struct gdrom_id), GFP_KERNEL);
-+	if (!id)
-+		return err;
-+	gdrom_identifydevice(id);
-+	model_name = kstrndup(id->modname, 16, GFP_KERNEL);
-+	if (!model_name)
-+		goto free_id;
-+	manuf_name = kstrndup(id->mname, 16, GFP_KERNEL);
-+	if (!manuf_name)
-+		goto free_model_name;
-+	firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL);
-+	if (!firmw_ver)
-+		goto free_manuf_name;
-+	printk(KERN_INFO "GDROM: %s from %s with firmware %s\n",
-+		model_name, manuf_name, firmw_ver);
-+	err = 0;
-+	kfree(firmw_ver);
-+free_manuf_name:
-+	kfree(manuf_name);
-+free_model_name:
-+	kfree(model_name);
-+free_id:
-+	kfree(id);
-+	return err;
-+}
-+
-+/* set the default mode for DMA transfer */
-+static int __devinit gdrom_init_dma_mode(void)
-+{
-+	ctrl_outb(0x13, GDROM_ERROR_REG);
-+	ctrl_outb(0x22, GDROM_INTSEC_REG);
-+	if (!gdrom_wait_clrbusy())
-+		return -EBUSY;
-+	ctrl_outb(0xEF, GDROM_STATUSCOMMAND_REG);
-+	if (!gdrom_wait_busy_sleeps())
-+		return -EBUSY;
-+	/* Memory protection setting for GDROM DMA
-+	* Bits 31 - 16 security: 0x8843
-+	* Bits 15 and 7 reserved (0)
-+	* Bits 14 - 8 start of transfer range in 1 MB blocks OR'ed with 0x80
-+	* Bits 6 - 0 end of transfer range in 1 MB blocks OR'ed with 0x80
-+	* (0x40 | 0x80) = start range at 0x0C000000
-+	* (0x7F | 0x80) = end range at 0x0FFFFFFF */
-+	ctrl_outl(0x8843407F, GDROM_DMA_ACCESS_CTRL_REG);
-+	ctrl_outl(9, GDROM_DMA_WAIT_REG); /* DMA word setting */
-+	return 0;
-+}
-+
-+static void __devinit probe_gdrom_setupcd(void)
-+{
-+	gd.cd_info->ops = &gdrom_ops;
-+	gd.cd_info->capacity = 1;
-+	strcpy(gd.cd_info->name, GDROM_DEV_NAME);
-+	gd.cd_info->mask = CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|
-+		CDC_SELECT_DISC;
-+}
-+
-+static void __devinit probe_gdrom_setupdisk(void)
-+{
-+	gd.disk->major = gdrom_major;
-+	gd.disk->first_minor = 1;
-+	gd.disk->minors = 1;
-+	strcpy(gd.disk->disk_name, GDROM_DEV_NAME);
-+}
+ 
+ 	/*
+ 	 * check which way to transfer data
+@@ -1765,7 +1781,14 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
+ 		rq->data_len -= blen;
+ 
+ 		if (rq->bio)
+-			end_that_request_chunk(rq, 1, blen);
++			/*
++			 * The request can't be completed until DRQ is cleared.
++			 * So complete the data, but don't complete the request
++			 * using the dummy function for the callback feature
++			 * of blk_end_request_callback().
++			 */
++			blk_end_request_callback(rq, 0, blen,
++						 cdrom_newpc_intr_dummy_cb);
+ 		else
+ 			rq->data += blen;
+ 	}
+@@ -1786,14 +1809,6 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
+ 
+ 	ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, NULL);
+ 	return ide_started;
+-
+-end_request:
+-	spin_lock_irqsave(&ide_lock, flags);
+-	blkdev_dequeue_request(rq);
+-	end_that_request_last(rq, 1);
+-	HWGROUP(drive)->rq = NULL;
+-	spin_unlock_irqrestore(&ide_lock, flags);
+-	return ide_stopped;
+ }
+ 
+ static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
+diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
+index b178190..717e114 100644
+--- a/drivers/ide/ide-disk.c
++++ b/drivers/ide/ide-disk.c
+@@ -129,6 +129,50 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
+ 	return 0;	/* lba_capacity value may be bad */
+ }
+ 
++static const u8 ide_rw_cmds[] = {
++	WIN_MULTREAD,
++	WIN_MULTWRITE,
++	WIN_MULTREAD_EXT,
++	WIN_MULTWRITE_EXT,
++	WIN_READ,
++	WIN_WRITE,
++	WIN_READ_EXT,
++	WIN_WRITE_EXT,
++	WIN_READDMA,
++	WIN_WRITEDMA,
++	WIN_READDMA_EXT,
++	WIN_WRITEDMA_EXT,
++};
 +
-+static int __devinit probe_gdrom_setupqueue(void)
-+{
-+	blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
-+	/* using DMA so memory will need to be contiguous */
-+	blk_queue_max_hw_segments(gd.gdrom_rq, 1);
-+	/* set a large max size to get most from DMA */
-+	blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
-+	gd.disk->queue = gd.gdrom_rq;
-+	return gdrom_init_dma_mode();
-+}
++static const u8 ide_data_phases[] = {
++	TASKFILE_MULTI_IN,
++	TASKFILE_MULTI_OUT,
++	TASKFILE_IN,
++	TASKFILE_OUT,
++	TASKFILE_IN_DMA,
++	TASKFILE_OUT_DMA,
++};
 +
-+/*
-+ * register this as a block device and as compliant with the
-+ * universal CD Rom driver interface
-+ */
-+static int __devinit probe_gdrom(struct platform_device *devptr)
++static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
 +{
-+	int err;
-+	/* Start the device */
-+	if (gdrom_execute_diagnostic() != 1) {
-+		printk(KERN_WARNING "GDROM: ATA Probe for GDROM failed.\n");
-+		return -ENODEV;
-+	}
-+	/* Print out firmware ID */
-+	if (gdrom_outputversion())
-+		return -ENOMEM;
-+	/* Register GDROM */
-+	gdrom_major = register_blkdev(0, GDROM_DEV_NAME);
-+	if (gdrom_major <= 0)
-+		return gdrom_major;
-+	printk(KERN_INFO "GDROM: Registered with major number %d\n",
-+		gdrom_major);
-+	/* Specify basic properties of drive */
-+	gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL);
-+	if (!gd.cd_info) {
-+		err = -ENOMEM;
-+		goto probe_fail_no_mem;
-+	}
-+	probe_gdrom_setupcd();
-+	gd.disk = alloc_disk(1);
-+	if (!gd.disk) {
-+		err = -ENODEV;
-+		goto probe_fail_no_disk;
-+	}
-+	probe_gdrom_setupdisk();
-+	if (register_cdrom(gd.cd_info)) {
-+		err = -ENODEV;
-+		goto probe_fail_cdrom_register;
-+	}
-+	gd.disk->fops = &gdrom_bdops;
-+	/* latch on to the interrupt */
-+	err = gdrom_set_interrupt_handlers();
-+	if (err)
-+		goto probe_fail_cmdirq_register;
-+	gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
-+	if (!gd.gdrom_rq)
-+		goto probe_fail_requestq;
-+
-+	err = probe_gdrom_setupqueue();
-+	if (err)
-+		goto probe_fail_toc;
-+
-+	gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL);
-+	if (!gd.toc)
-+		goto probe_fail_toc;
-+	add_disk(gd.disk);
-+	return 0;
++	u8 index, lba48, write;
 +
-+probe_fail_toc:
-+	blk_cleanup_queue(gd.gdrom_rq);
-+probe_fail_requestq:
-+	free_irq(HW_EVENT_GDROM_DMA, &gd);
-+	free_irq(HW_EVENT_GDROM_CMD, &gd);
-+probe_fail_cmdirq_register:
-+probe_fail_cdrom_register:
-+	del_gendisk(gd.disk);
-+probe_fail_no_disk:
-+	kfree(gd.cd_info);
-+	unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
-+	gdrom_major = 0;
-+probe_fail_no_mem:
-+	printk(KERN_WARNING "GDROM: Probe failed - error is 0x%X\n", err);
-+	return err;
-+}
++	lba48 = (task->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
++	write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
 +
-+static int __devexit remove_gdrom(struct platform_device *devptr)
-+{
-+	flush_scheduled_work();
-+	blk_cleanup_queue(gd.gdrom_rq);
-+	free_irq(HW_EVENT_GDROM_CMD, &gd);
-+	free_irq(HW_EVENT_GDROM_DMA, &gd);
-+	del_gendisk(gd.disk);
-+	if (gdrom_major)
-+		unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
-+	return unregister_cdrom(gd.cd_info);
-+}
++	if (dma)
++		index = drive->vdma ? 4 : 8;
++	else
++		index = drive->mult_count ? 0 : 4;
 +
-+static struct platform_driver gdrom_driver = {
-+	.probe = probe_gdrom,
-+	.remove = __devexit_p(remove_gdrom),
-+	.driver = {
-+			.name = GDROM_DEV_NAME,
-+	},
-+};
++	task->tf.command = ide_rw_cmds[index + lba48 + write];
 +
-+static int __init init_gdrom(void)
-+{
-+	int rc;
-+	gd.toc = NULL;
-+	rc = platform_driver_register(&gdrom_driver);
-+	if (rc)
-+		return rc;
-+	pd = platform_device_register_simple(GDROM_DEV_NAME, -1, NULL, 0);
-+	if (IS_ERR(pd)) {
-+		platform_driver_unregister(&gdrom_driver);
-+		return PTR_ERR(pd);
-+	}
-+	return 0;
-+}
++	if (dma)
++		index = 8; /* fixup index */
 +
-+static void __exit exit_gdrom(void)
-+{
-+	platform_device_unregister(pd);
-+	platform_driver_unregister(&gdrom_driver);
-+	kfree(gd.toc);
++	task->data_phase = ide_data_phases[index / 2 + write];
 +}
 +
-+module_init(init_gdrom);
-+module_exit(exit_gdrom);
-+MODULE_AUTHOR("Adrian McMenamin <adrian at mcmen.demon.co.uk>");
-+MODULE_DESCRIPTION("SEGA Dreamcast GD-ROM Driver");
-+MODULE_LICENSE("GPL");
-diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
-index d8bb44b..8473b9f 100644
---- a/drivers/cdrom/viocd.c
-+++ b/drivers/cdrom/viocd.c
-@@ -289,7 +289,7 @@ static int send_request(struct request *req)
- 	return 0;
- }
- 
--static void viocd_end_request(struct request *req, int uptodate)
-+static void viocd_end_request(struct request *req, int error)
+ /*
+  * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
+  * using LBA if supported, or CHS otherwise, to address sectors.
+@@ -137,11 +181,11 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
  {
- 	int nsectors = req->hard_nr_sectors;
- 
-@@ -302,11 +302,8 @@ static void viocd_end_request(struct request *req, int uptodate)
- 	if (!nsectors)
- 		nsectors = 1;
- 
--	if (end_that_request_first(req, uptodate, nsectors))
-+	if (__blk_end_request(req, error, nsectors << 9))
- 		BUG();
--	add_disk_randomness(req->rq_disk);
--	blkdev_dequeue_request(req);
--	end_that_request_last(req, uptodate);
- }
- 
- static int rwreq;
-@@ -317,11 +314,11 @@ static void do_viocd_request(struct request_queue *q)
+ 	ide_hwif_t *hwif	= HWIF(drive);
+ 	unsigned int dma	= drive->using_dma;
++	u16 nsectors		= (u16)rq->nr_sectors;
+ 	u8 lba48		= (drive->addressing == 1) ? 1 : 0;
+-	task_ioreg_t command	= WIN_NOP;
+-	ata_nsector_t		nsectors;
+-
+-	nsectors.all		= (u16) rq->nr_sectors;
++	ide_task_t		task;
++	struct ide_taskfile	*tf = &task.tf;
++	ide_startstop_t		rc;
  
- 	while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
- 		if (!blk_fs_request(req))
--			viocd_end_request(req, 0);
-+			viocd_end_request(req, -EIO);
- 		else if (send_request(req) < 0) {
- 			printk(VIOCD_KERN_WARNING
- 					"unable to send message to OS/400!");
--			viocd_end_request(req, 0);
-+			viocd_end_request(req, -EIO);
- 		} else
- 			rwreq++;
+ 	if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
+ 		if (block + rq->nr_sectors > 1ULL << 28)
+@@ -155,121 +199,71 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ 		ide_map_sg(drive, rq);
  	}
-@@ -532,9 +529,9 @@ return_complete:
- 					"with rc %d:0x%04X: %s\n",
- 					req, event->xRc,
- 					bevent->sub_result, err->msg);
--			viocd_end_request(req, 0);
-+			viocd_end_request(req, -EIO);
- 		} else
--			viocd_end_request(req, 1);
-+			viocd_end_request(req, 0);
  
- 		/* restart handling of incoming requests */
- 		spin_unlock_irqrestore(&viocd_reqlock, flags);
-diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
-index 2e3a0d4..4666295 100644
---- a/drivers/char/Kconfig
-+++ b/drivers/char/Kconfig
-@@ -373,6 +373,16 @@ config ISTALLION
- 	  To compile this driver as a module, choose M here: the
- 	  module will be called istallion.
+-	if (IDE_CONTROL_REG)
+-		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
+-
+-	/* FIXME: SELECT_MASK(drive, 0) ? */
++	memset(&task, 0, sizeof(task));
++	task.tf_flags = IDE_TFLAG_NO_SELECT_MASK;  /* FIXME? */
++	task.tf_flags |= (IDE_TFLAG_TF | IDE_TFLAG_DEVICE);
  
-+config NOZOMI
-+	tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
-+	depends on PCI && EXPERIMENTAL
-+	help
-+	  If you have a HSDPA driver Broadband Wireless Data Card -
-+	  Globe Trotter PCMCIA card, say Y here.
+ 	if (drive->select.b.lba) {
+ 		if (lba48) {
+-			task_ioreg_t tasklets[10];
+-
+ 			pr_debug("%s: LBA=0x%012llx\n", drive->name,
+ 					(unsigned long long)block);
+ 
+-			tasklets[0] = 0;
+-			tasklets[1] = 0;
+-			tasklets[2] = nsectors.b.low;
+-			tasklets[3] = nsectors.b.high;
+-			tasklets[4] = (task_ioreg_t) block;
+-			tasklets[5] = (task_ioreg_t) (block>>8);
+-			tasklets[6] = (task_ioreg_t) (block>>16);
+-			tasklets[7] = (task_ioreg_t) (block>>24);
+-			if (sizeof(block) == 4) {
+-				tasklets[8] = (task_ioreg_t) 0;
+-				tasklets[9] = (task_ioreg_t) 0;
+-			} else {
+-				tasklets[8] = (task_ioreg_t)((u64)block >> 32);
+-				tasklets[9] = (task_ioreg_t)((u64)block >> 40);
++			tf->hob_nsect = (nsectors >> 8) & 0xff;
++			tf->hob_lbal  = (u8)(block >> 24);
++			if (sizeof(block) != 4) {
++				tf->hob_lbam = (u8)((u64)block >> 32);
++				tf->hob_lbah = (u8)((u64)block >> 40);
+ 			}
+-#ifdef DEBUG
+-			printk("%s: 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n",
+-				drive->name, tasklets[3], tasklets[2],
+-				tasklets[9], tasklets[8], tasklets[7],
+-				tasklets[6], tasklets[5], tasklets[4]);
+-#endif
+-			hwif->OUTB(tasklets[1], IDE_FEATURE_REG);
+-			hwif->OUTB(tasklets[3], IDE_NSECTOR_REG);
+-			hwif->OUTB(tasklets[7], IDE_SECTOR_REG);
+-			hwif->OUTB(tasklets[8], IDE_LCYL_REG);
+-			hwif->OUTB(tasklets[9], IDE_HCYL_REG);
+-
+-			hwif->OUTB(tasklets[0], IDE_FEATURE_REG);
+-			hwif->OUTB(tasklets[2], IDE_NSECTOR_REG);
+-			hwif->OUTB(tasklets[4], IDE_SECTOR_REG);
+-			hwif->OUTB(tasklets[5], IDE_LCYL_REG);
+-			hwif->OUTB(tasklets[6], IDE_HCYL_REG);
+-			hwif->OUTB(0x00|drive->select.all,IDE_SELECT_REG);
 +
-+	  To compile this driver as a module, choose M here, the module
-+	  will be called nozomi.
++			tf->nsect  = nsectors & 0xff;
++			tf->lbal   = (u8) block;
++			tf->lbam   = (u8)(block >>  8);
++			tf->lbah   = (u8)(block >> 16);
 +
- config A2232
- 	tristate "Commodore A2232 serial support (EXPERIMENTAL)"
- 	depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP
-diff --git a/drivers/char/Makefile b/drivers/char/Makefile
-index 07304d5..96fc01e 100644
---- a/drivers/char/Makefile
-+++ b/drivers/char/Makefile
-@@ -26,6 +26,7 @@ obj-$(CONFIG_SERIAL167)		+= serial167.o
- obj-$(CONFIG_CYCLADES)		+= cyclades.o
- obj-$(CONFIG_STALLION)		+= stallion.o
- obj-$(CONFIG_ISTALLION)		+= istallion.o
-+obj-$(CONFIG_NOZOMI)		+= nozomi.o
- obj-$(CONFIG_DIGIEPCA)		+= epca.o
- obj-$(CONFIG_SPECIALIX)		+= specialix.o
- obj-$(CONFIG_MOXA_INTELLIO)	+= moxa.o
-diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
-index aa5ddb7..1ffb381 100644
---- a/drivers/char/agp/ali-agp.c
-+++ b/drivers/char/agp/ali-agp.c
-@@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
- 	void *addr = agp_generic_alloc_page(agp_bridge);
- 	u32 temp;
- 
--	global_flush_tlb();
- 	if (!addr)
- 		return NULL;
- 
-@@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags)
- 		if (flags & AGP_PAGE_DESTROY_UNMAP) {
- 			global_cache_flush();	/* is this really needed?  --hch */
- 			agp_generic_destroy_page(addr, flags);
--			global_flush_tlb();
- 		} else
- 			agp_generic_destroy_page(addr, flags);
- 	}
-diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
-index 832ded2..2720882 100644
---- a/drivers/char/agp/backend.c
-+++ b/drivers/char/agp/backend.c
-@@ -147,7 +147,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
- 			printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
- 			return -ENOMEM;
++			task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ 		} else {
+-			hwif->OUTB(0x00, IDE_FEATURE_REG);
+-			hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
+-			hwif->OUTB(block, IDE_SECTOR_REG);
+-			hwif->OUTB(block>>=8, IDE_LCYL_REG);
+-			hwif->OUTB(block>>=8, IDE_HCYL_REG);
+-			hwif->OUTB(((block>>8)&0x0f)|drive->select.all,IDE_SELECT_REG);
++			tf->nsect  = nsectors & 0xff;
++			tf->lbal   = block;
++			tf->lbam   = block >>= 8;
++			tf->lbah   = block >>= 8;
++			tf->device = (block >> 8) & 0xf;
  		}
--		flush_agp_mappings();
+ 	} else {
+ 		unsigned int sect,head,cyl,track;
+ 		track = (int)block / drive->sect;
+ 		sect  = (int)block % drive->sect + 1;
+-		hwif->OUTB(sect, IDE_SECTOR_REG);
+ 		head  = track % drive->head;
+ 		cyl   = track / drive->head;
  
- 		bridge->scratch_page_real = virt_to_gart(addr);
- 		bridge->scratch_page =
-@@ -191,7 +190,6 @@ err_out:
- 	if (bridge->driver->needs_scratch_page) {
- 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
- 						 AGP_PAGE_DESTROY_UNMAP);
--		flush_agp_mappings();
- 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
- 						 AGP_PAGE_DESTROY_FREE);
- 	}
-@@ -219,7 +217,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
- 	    bridge->driver->needs_scratch_page) {
- 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
- 						 AGP_PAGE_DESTROY_UNMAP);
--		flush_agp_mappings();
- 		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
- 						 AGP_PAGE_DESTROY_FREE);
- 	}
-diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
-index 64b2f6d..1a4674c 100644
---- a/drivers/char/agp/generic.c
-+++ b/drivers/char/agp/generic.c
-@@ -197,7 +197,6 @@ void agp_free_memory(struct agp_memory *curr)
- 		for (i = 0; i < curr->page_count; i++) {
- 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP);
- 		}
--		flush_agp_mappings();
- 		for (i = 0; i < curr->page_count; i++) {
- 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE);
- 		}
-@@ -267,8 +266,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
+ 		pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
+ 
+-		hwif->OUTB(0x00, IDE_FEATURE_REG);
+-		hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
+-		hwif->OUTB(cyl, IDE_LCYL_REG);
+-		hwif->OUTB(cyl>>8, IDE_HCYL_REG);
+-		hwif->OUTB(head|drive->select.all,IDE_SELECT_REG);
++		tf->nsect  = nsectors & 0xff;
++		tf->lbal   = sect;
++		tf->lbam   = cyl;
++		tf->lbah   = cyl >> 8;
++		tf->device = head;
  	}
- 	new->bridge = bridge;
  
--	flush_agp_mappings();
+-	if (dma) {
+-		if (!hwif->dma_setup(drive)) {
+-			if (rq_data_dir(rq)) {
+-				command = lba48 ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
+-				if (drive->vdma)
+-					command = lba48 ? WIN_WRITE_EXT: WIN_WRITE;
+-			} else {
+-				command = lba48 ? WIN_READDMA_EXT : WIN_READDMA;
+-				if (drive->vdma)
+-					command = lba48 ? WIN_READ_EXT: WIN_READ;
+-			}
+-			hwif->dma_exec_cmd(drive, command);
+-			hwif->dma_start(drive);
+-			return ide_started;
+-		}
+-		/* fallback to PIO */
+-		ide_init_sg_cmd(drive, rq);
+-	}
 -
- 	return new;
- }
- EXPORT_SYMBOL(agp_allocate_memory);
-diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
-index e72a83e..76f581c 100644
---- a/drivers/char/agp/i460-agp.c
-+++ b/drivers/char/agp/i460-agp.c
-@@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
+-	if (rq_data_dir(rq) == READ) {
+-
+-		if (drive->mult_count) {
+-			hwif->data_phase = TASKFILE_MULTI_IN;
+-			command = lba48 ? WIN_MULTREAD_EXT : WIN_MULTREAD;
+-		} else {
+-			hwif->data_phase = TASKFILE_IN;
+-			command = lba48 ? WIN_READ_EXT : WIN_READ;
+-		}
++	if (rq_data_dir(rq))
++		task.tf_flags |= IDE_TFLAG_WRITE;
  
- 	if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
- 		page = agp_generic_alloc_page(agp_bridge);
--		global_flush_tlb();
- 	} else
- 		/* Returning NULL would cause problems */
- 		/* AK: really dubious code. */
-@@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags)
- {
- 	if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
- 		agp_generic_destroy_page(page, flags);
--		global_flush_tlb();
- 	}
- }
+-		ide_execute_command(drive, command, &task_in_intr, WAIT_CMD, NULL);
+-		return ide_started;
+-	} else {
+-		if (drive->mult_count) {
+-			hwif->data_phase = TASKFILE_MULTI_OUT;
+-			command = lba48 ? WIN_MULTWRITE_EXT : WIN_MULTWRITE;
+-		} else {
+-			hwif->data_phase = TASKFILE_OUT;
+-			command = lba48 ? WIN_WRITE_EXT : WIN_WRITE;
+-		}
++	ide_tf_set_cmd(drive, &task, dma);
++	if (!dma)
++		hwif->data_phase = task.data_phase;
++	task.rq = rq;
  
-diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
-index 03eac1e..189efb6 100644
---- a/drivers/char/agp/intel-agp.c
-+++ b/drivers/char/agp/intel-agp.c
-@@ -210,13 +210,11 @@ static void *i8xx_alloc_pages(void)
- 	if (page == NULL)
- 		return NULL;
+-		/* FIXME: ->OUTBSYNC ? */
+-		hwif->OUTB(command, IDE_COMMAND_REG);
++	rc = do_rw_taskfile(drive, &task);
  
--	if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
--		change_page_attr(page, 4, PAGE_KERNEL);
--		global_flush_tlb();
-+	if (set_pages_uc(page, 4) < 0) {
-+		set_pages_wb(page, 4);
- 		__free_pages(page, 2);
- 		return NULL;
+-		return pre_task_out_intr(drive, rq);
++	if (rc == ide_stopped && dma) {
++		/* fallback to PIO */
++		task.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
++		ide_tf_set_cmd(drive, &task, 0);
++		hwif->data_phase = task.data_phase;
++		ide_init_sg_cmd(drive, rq);
++		rc = do_rw_taskfile(drive, &task);
  	}
--	global_flush_tlb();
- 	get_page(page);
- 	atomic_inc(&agp_bridge->current_memory_agp);
- 	return page_address(page);
-@@ -230,8 +228,7 @@ static void i8xx_destroy_pages(void *addr)
- 		return;
++
++	return rc;
+ }
  
- 	page = virt_to_page(addr);
--	change_page_attr(page, 4, PAGE_KERNEL);
--	global_flush_tlb();
-+	set_pages_wb(page, 4);
- 	put_page(page);
- 	__free_pages(page, 2);
- 	atomic_dec(&agp_bridge->current_memory_agp);
-@@ -341,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+ /*
+@@ -307,57 +301,29 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
+  * Queries for true maximum capacity of the drive.
+  * Returns maximum LBA address (> 0) of the drive, 0 if failed.
+  */
+-static unsigned long idedisk_read_native_max_address(ide_drive_t *drive)
++static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
+ {
+ 	ide_task_t args;
+-	unsigned long addr = 0;
++	struct ide_taskfile *tf = &args.tf;
++	u64 addr = 0;
  
- 	switch (pg_count) {
- 	case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
--		global_flush_tlb();
- 		break;
- 	case 4:
- 		/* kludge to get 4 physical pages for ARGB cursor */
-@@ -404,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
- 		else {
- 			agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
- 							     AGP_PAGE_DESTROY_UNMAP);
--			global_flush_tlb();
- 			agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
- 							     AGP_PAGE_DESTROY_FREE);
- 		}
-diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
-index 4c16778..465ad35 100644
---- a/drivers/char/hpet.c
-+++ b/drivers/char/hpet.c
-@@ -600,63 +600,6 @@ static int hpet_is_known(struct hpet_data *hdp)
- 	return 0;
- }
+ 	/* Create IDE/ATA command request structure */
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	args.tfRegister[IDE_SELECT_OFFSET]	= 0x40;
+-	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_READ_NATIVE_MAX;
+-	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
+-	args.handler				= &task_no_data_intr;
++	if (lba48)
++		tf->command = WIN_READ_NATIVE_MAX_EXT;
++	else
++		tf->command = WIN_READ_NATIVE_MAX;
++	tf->device  = ATA_LBA;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++	if (lba48)
++		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ 	/* submit command request */
+-	ide_raw_taskfile(drive, &args, NULL);
++	ide_no_data_taskfile(drive, &args);
  
--EXPORT_SYMBOL(hpet_alloc);
--EXPORT_SYMBOL(hpet_register);
--EXPORT_SYMBOL(hpet_unregister);
--EXPORT_SYMBOL(hpet_control);
--
--int hpet_register(struct hpet_task *tp, int periodic)
--{
--	unsigned int i;
--	u64 mask;
--	struct hpet_timer __iomem *timer;
--	struct hpet_dev *devp;
--	struct hpets *hpetp;
--
--	switch (periodic) {
--	case 1:
--		mask = Tn_PER_INT_CAP_MASK;
--		break;
--	case 0:
--		mask = 0;
--		break;
--	default:
--		return -EINVAL;
+ 	/* if OK, compute maximum address value */
+-	if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) {
+-		addr = ((args.tfRegister[IDE_SELECT_OFFSET] & 0x0f) << 24)
+-		     | ((args.tfRegister[  IDE_HCYL_OFFSET]       ) << 16)
+-		     | ((args.tfRegister[  IDE_LCYL_OFFSET]       ) <<  8)
+-		     | ((args.tfRegister[IDE_SECTOR_OFFSET]       ));
+-		addr++;	/* since the return value is (maxlba - 1), we add 1 */
 -	}
+-	return addr;
+-}
++	if ((tf->status & 0x01) == 0)
++		addr = ide_get_lba_addr(tf, lba48) + 1;
+ 
+-static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive)
+-{
+-	ide_task_t args;
+-	unsigned long long addr = 0;
 -
--	tp->ht_opaque = NULL;
--
--	spin_lock_irq(&hpet_task_lock);
--	spin_lock(&hpet_lock);
--
--	for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
--		for (timer = hpetp->hp_hpet->hpet_timers, i = 0;
--		     i < hpetp->hp_ntimer; i++, timer++) {
--			if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK)
--			    != mask)
--				continue;
--
--			devp = &hpetp->hp_dev[i];
--
--			if (devp->hd_flags & HPET_OPEN || devp->hd_task) {
--				devp = NULL;
--				continue;
--			}
--
--			tp->ht_opaque = devp;
--			devp->hd_task = tp;
--			break;
--		}
--
--	spin_unlock(&hpet_lock);
--	spin_unlock_irq(&hpet_task_lock);
+-	/* Create IDE/ATA command request structure */
+-	memset(&args, 0, sizeof(ide_task_t));
 -
--	if (tp->ht_opaque)
--		return 0;
--	else
--		return -EBUSY;
--}
+-	args.tfRegister[IDE_SELECT_OFFSET]	= 0x40;
+-	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_READ_NATIVE_MAX_EXT;
+-	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
+-	args.handler				= &task_no_data_intr;
+-        /* submit command request */
+-        ide_raw_taskfile(drive, &args, NULL);
 -
- static inline int hpet_tpcheck(struct hpet_task *tp)
- {
- 	struct hpet_dev *devp;
-@@ -706,24 +649,6 @@ int hpet_unregister(struct hpet_task *tp)
- 	return 0;
+-	/* if OK, compute maximum address value */
+-	if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) {
+-		u32 high = (args.hobRegister[IDE_HCYL_OFFSET] << 16) |
+-			   (args.hobRegister[IDE_LCYL_OFFSET] <<  8) |
+-			    args.hobRegister[IDE_SECTOR_OFFSET];
+-		u32 low  = ((args.tfRegister[IDE_HCYL_OFFSET])<<16) |
+-			   ((args.tfRegister[IDE_LCYL_OFFSET])<<8) |
+-			    (args.tfRegister[IDE_SECTOR_OFFSET]);
+-		addr = ((__u64)high << 24) | low;
+-		addr++;	/* since the return value is (maxlba - 1), we add 1 */
+-	}
+ 	return addr;
  }
  
--int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg)
+@@ -365,67 +331,37 @@ static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive
+  * Sets maximum virtual LBA address of the drive.
+  * Returns new maximum virtual LBA address (> 0) or 0 on failure.
+  */
+-static unsigned long idedisk_set_max_address(ide_drive_t *drive, unsigned long addr_req)
 -{
--	struct hpet_dev *devp;
--	int err;
--
--	if ((err = hpet_tpcheck(tp)))
--		return err;
--
--	spin_lock_irq(&hpet_lock);
--	devp = tp->ht_opaque;
--	if (devp->hd_task != tp) {
--		spin_unlock_irq(&hpet_lock);
--		return -ENXIO;
+-	ide_task_t args;
+-	unsigned long addr_set = 0;
+-	
+-	addr_req--;
+-	/* Create IDE/ATA command request structure */
+-	memset(&args, 0, sizeof(ide_task_t));
+-	args.tfRegister[IDE_SECTOR_OFFSET]	= ((addr_req >>  0) & 0xff);
+-	args.tfRegister[IDE_LCYL_OFFSET]	= ((addr_req >>  8) & 0xff);
+-	args.tfRegister[IDE_HCYL_OFFSET]	= ((addr_req >> 16) & 0xff);
+-	args.tfRegister[IDE_SELECT_OFFSET]	= ((addr_req >> 24) & 0x0f) | 0x40;
+-	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SET_MAX;
+-	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
+-	args.handler				= &task_no_data_intr;
+-	/* submit command request */
+-	ide_raw_taskfile(drive, &args, NULL);
+-	/* if OK, read new maximum address value */
+-	if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) {
+-		addr_set = ((args.tfRegister[IDE_SELECT_OFFSET] & 0x0f) << 24)
+-			 | ((args.tfRegister[  IDE_HCYL_OFFSET]       ) << 16)
+-			 | ((args.tfRegister[  IDE_LCYL_OFFSET]       ) <<  8)
+-			 | ((args.tfRegister[IDE_SECTOR_OFFSET]       ));
+-		addr_set++;
 -	}
--	spin_unlock_irq(&hpet_lock);
--	return hpet_ioctl_common(devp, cmd, arg, 1);
+-	return addr_set;
 -}
 -
- static ctl_table hpet_table[] = {
- 	{
- 	 .ctl_name = CTL_UNNUMBERED,
-@@ -806,14 +731,14 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
- 
- int hpet_alloc(struct hpet_data *hdp)
+-static unsigned long long idedisk_set_max_address_ext(ide_drive_t *drive, unsigned long long addr_req)
++static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
  {
--	u64 cap, mcfg;
-+	u64 cap, mcfg, hpet_config;
- 	struct hpet_dev *devp;
--	u32 i, ntimer;
-+	u32 i, ntimer, irq;
- 	struct hpets *hpetp;
- 	size_t siz;
- 	struct hpet __iomem *hpet;
- 	static struct hpets *last = NULL;
--	unsigned long period;
-+	unsigned long period, irq_bitmap;
- 	unsigned long long temp;
- 
- 	/*
-@@ -840,11 +765,47 @@ int hpet_alloc(struct hpet_data *hdp)
- 	hpetp->hp_hpet_phys = hdp->hd_phys_address;
+ 	ide_task_t args;
+-	unsigned long long addr_set = 0;
++	struct ide_taskfile *tf = &args.tf;
++	u64 addr_set = 0;
  
- 	hpetp->hp_ntimer = hdp->hd_nirqs;
-+	hpet = hpetp->hp_hpet;
+ 	addr_req--;
+ 	/* Create IDE/ATA command request structure */
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	args.tfRegister[IDE_SECTOR_OFFSET]	= ((addr_req >>  0) & 0xff);
+-	args.tfRegister[IDE_LCYL_OFFSET]	= ((addr_req >>= 8) & 0xff);
+-	args.tfRegister[IDE_HCYL_OFFSET]	= ((addr_req >>= 8) & 0xff);
+-	args.tfRegister[IDE_SELECT_OFFSET]      = 0x40;
+-	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SET_MAX_EXT;
+-	args.hobRegister[IDE_SECTOR_OFFSET]	= (addr_req >>= 8) & 0xff;
+-	args.hobRegister[IDE_LCYL_OFFSET]	= (addr_req >>= 8) & 0xff;
+-	args.hobRegister[IDE_HCYL_OFFSET]	= (addr_req >>= 8) & 0xff;
+-	args.hobRegister[IDE_SELECT_OFFSET]	= 0x40;
+-	args.hobRegister[IDE_CONTROL_OFFSET_HOB]= (drive->ctl|0x80);
+-	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
+-	args.handler				= &task_no_data_intr;
++	tf->lbal     = (addr_req >>  0) & 0xff;
++	tf->lbam     = (addr_req >>= 8) & 0xff;
++	tf->lbah     = (addr_req >>= 8) & 0xff;
++	if (lba48) {
++		tf->hob_lbal = (addr_req >>= 8) & 0xff;
++		tf->hob_lbam = (addr_req >>= 8) & 0xff;
++		tf->hob_lbah = (addr_req >>= 8) & 0xff;
++		tf->command  = WIN_SET_MAX_EXT;
++	} else {
++		tf->device   = (addr_req >>= 8) & 0x0f;
++		tf->command  = WIN_SET_MAX;
++	}
++	tf->device |= ATA_LBA;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++	if (lba48)
++		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ 	/* submit command request */
+-	ide_raw_taskfile(drive, &args, NULL);
++	ide_no_data_taskfile(drive, &args);
+ 	/* if OK, compute maximum address value */
+-	if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) {
+-		u32 high = (args.hobRegister[IDE_HCYL_OFFSET] << 16) |
+-			   (args.hobRegister[IDE_LCYL_OFFSET] <<  8) |
+-			    args.hobRegister[IDE_SECTOR_OFFSET];
+-		u32 low  = ((args.tfRegister[IDE_HCYL_OFFSET])<<16) |
+-			   ((args.tfRegister[IDE_LCYL_OFFSET])<<8) |
+-			    (args.tfRegister[IDE_SECTOR_OFFSET]);
+-		addr_set = ((__u64)high << 24) | low;
+-		addr_set++;
+-	}
++	if ((tf->status & 0x01) == 0)
++		addr_set = ide_get_lba_addr(tf, lba48) + 1;
++
+ 	return addr_set;
+ }
  
--	for (i = 0; i < hdp->hd_nirqs; i++)
--		hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
-+	/* Assign IRQs statically for legacy devices */
-+	hpetp->hp_dev[0].hd_hdwirq = hdp->hd_irq[0];
-+	hpetp->hp_dev[1].hd_hdwirq = hdp->hd_irq[1];
+@@ -471,10 +407,8 @@ static void idedisk_check_hpa(ide_drive_t *drive)
+ 	int lba48 = idedisk_supports_lba48(drive->id);
  
--	hpet = hpetp->hp_hpet;
-+	/* Assign IRQs dynamically for the others */
-+	for (i = 2, devp = &hpetp->hp_dev[2]; i < hdp->hd_nirqs; i++, devp++) {
-+		struct hpet_timer __iomem *timer;
-+
-+		timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
-+
-+		/* Check if there's already an IRQ assigned to the timer */
-+		if (hdp->hd_irq[i]) {
-+			hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
-+			continue;
-+		}
+ 	capacity = drive->capacity64;
+-	if (lba48)
+-		set_max = idedisk_read_native_max_address_ext(drive);
+-	else
+-		set_max = idedisk_read_native_max_address(drive);
 +
-+		hpet_config = readq(&timer->hpet_config);
-+		irq_bitmap = (hpet_config & Tn_INT_ROUTE_CAP_MASK)
-+			>> Tn_INT_ROUTE_CAP_SHIFT;
-+		if (!irq_bitmap)
-+			irq = 0;        /* No valid IRQ Assignable */
-+		else {
-+			irq = find_first_bit(&irq_bitmap, 32);
-+			do {
-+				hpet_config |= irq << Tn_INT_ROUTE_CNF_SHIFT;
-+				writeq(hpet_config, &timer->hpet_config);
++	set_max = idedisk_read_native_max_address(drive, lba48);
+ 
+ 	if (ide_in_drive_list(drive->id, hpa_list)) {
+ 		/*
+@@ -495,10 +429,8 @@ static void idedisk_check_hpa(ide_drive_t *drive)
+ 			 capacity, sectors_to_MB(capacity),
+ 			 set_max, sectors_to_MB(set_max));
+ 
+-	if (lba48)
+-		set_max = idedisk_set_max_address_ext(drive, set_max);
+-	else
+-		set_max = idedisk_set_max_address(drive, set_max);
++	set_max = idedisk_set_max_address(drive, set_max, lba48);
 +
-+				/*
-+				 * Verify whether we have written a valid
-+				 * IRQ number by reading it back again
-+				 */
-+				hpet_config = readq(&timer->hpet_config);
-+				if (irq == (hpet_config & Tn_INT_ROUTE_CNF_MASK)
-+						>> Tn_INT_ROUTE_CNF_SHIFT)
-+					break;  /* Success */
-+			} while ((irq = (find_next_bit(&irq_bitmap, 32, irq))));
-+		}
-+		hpetp->hp_dev[i].hd_hdwirq = irq;
-+	}
+ 	if (set_max) {
+ 		drive->capacity64 = set_max;
+ 		printk(KERN_INFO "%s: Host Protected Area disabled.\n",
+@@ -556,32 +488,32 @@ static sector_t idedisk_capacity (ide_drive_t *drive)
+ static int smart_enable(ide_drive_t *drive)
+ {
+ 	ide_task_t args;
++	struct ide_taskfile *tf = &args.tf;
  
- 	cap = readq(&hpet->hpet_cap);
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	args.tfRegister[IDE_FEATURE_OFFSET]	= SMART_ENABLE;
+-	args.tfRegister[IDE_LCYL_OFFSET]	= SMART_LCYL_PASS;
+-	args.tfRegister[IDE_HCYL_OFFSET]	= SMART_HCYL_PASS;
+-	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SMART;
+-	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
+-	args.handler				= &task_no_data_intr;
+-	return ide_raw_taskfile(drive, &args, NULL);
++	tf->feature = SMART_ENABLE;
++	tf->lbam    = SMART_LCYL_PASS;
++	tf->lbah    = SMART_HCYL_PASS;
++	tf->command = WIN_SMART;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++	return ide_no_data_taskfile(drive, &args);
+ }
  
-@@ -875,7 +836,8 @@ int hpet_alloc(struct hpet_data *hdp)
- 		hpetp->hp_which, hdp->hd_phys_address,
- 		hpetp->hp_ntimer > 1 ? "s" : "");
- 	for (i = 0; i < hpetp->hp_ntimer; i++)
--		printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
-+		printk("%s %d", i > 0 ? "," : "",
-+				hpetp->hp_dev[i].hd_hdwirq);
- 	printk("\n");
+ static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd)
+ {
+ 	ide_task_t args;
++	struct ide_taskfile *tf = &args.tf;
  
- 	printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n",
-diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
-index 8252f86..480fae2 100644
---- a/drivers/char/hvc_console.c
-+++ b/drivers/char/hvc_console.c
-@@ -27,7 +27,7 @@
- #include <linux/init.h>
- #include <linux/kbd_kern.h>
- #include <linux/kernel.h>
--#include <linux/kobject.h>
-+#include <linux/kref.h>
- #include <linux/kthread.h>
- #include <linux/list.h>
- #include <linux/module.h>
-@@ -89,7 +89,7 @@ struct hvc_struct {
- 	int irq_requested;
- 	int irq;
- 	struct list_head next;
--	struct kobject kobj; /* ref count & hvc_struct lifetime */
-+	struct kref kref; /* ref count & hvc_struct lifetime */
- };
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	args.tfRegister[IDE_FEATURE_OFFSET]	= sub_cmd;
+-	args.tfRegister[IDE_NSECTOR_OFFSET]	= 0x01;
+-	args.tfRegister[IDE_LCYL_OFFSET]	= SMART_LCYL_PASS;
+-	args.tfRegister[IDE_HCYL_OFFSET]	= SMART_HCYL_PASS;
+-	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SMART;
+-	args.command_type			= IDE_DRIVE_TASK_IN;
+-	args.data_phase				= TASKFILE_IN;
+-	args.handler				= &task_in_intr;
++	tf->feature = sub_cmd;
++	tf->nsect   = 0x01;
++	tf->lbam    = SMART_LCYL_PASS;
++	tf->lbah    = SMART_HCYL_PASS;
++	tf->command = WIN_SMART;
++	args.tf_flags	= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++	args.data_phase	= TASKFILE_IN;
+ 	(void) smart_enable(drive);
+-	return ide_raw_taskfile(drive, &args, buf);
++	return ide_raw_taskfile(drive, &args, buf, 1);
+ }
  
- /* dynamic list of hvc_struct instances */
-@@ -110,7 +110,7 @@ static int last_hvc = -1;
+ static int proc_idedisk_read_cache
+@@ -659,19 +591,20 @@ static ide_proc_entry_t idedisk_proc[] = {
+ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
+ {
+ 	ide_drive_t *drive = q->queuedata;
++	ide_task_t task;
  
- /*
-  * Do not call this function with either the hvc_structs_lock or the hvc_struct
-- * lock held.  If successful, this function increments the kobject reference
-+ * lock held.  If successful, this function increments the kref reference
-  * count against the target hvc_struct so it should be released when finished.
-  */
- static struct hvc_struct *hvc_get_by_index(int index)
-@@ -123,7 +123,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
- 	list_for_each_entry(hp, &hvc_structs, next) {
- 		spin_lock_irqsave(&hp->lock, flags);
- 		if (hp->index == index) {
--			kobject_get(&hp->kobj);
-+			kref_get(&hp->kref);
- 			spin_unlock_irqrestore(&hp->lock, flags);
- 			spin_unlock(&hvc_structs_lock);
- 			return hp;
-@@ -242,6 +242,23 @@ static int __init hvc_console_init(void)
+-	memset(rq->cmd, 0, sizeof(rq->cmd));
+-
++	memset(&task, 0, sizeof(task));
+ 	if (ide_id_has_flush_cache_ext(drive->id) &&
+ 	    (drive->capacity64 >= (1UL << 28)))
+-		rq->cmd[0] = WIN_FLUSH_CACHE_EXT;
++		task.tf.command = WIN_FLUSH_CACHE_EXT;
+ 	else
+-		rq->cmd[0] = WIN_FLUSH_CACHE;
++		task.tf.command = WIN_FLUSH_CACHE;
++	task.tf_flags	= IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	task.data_phase	= TASKFILE_NO_DATA;
+ 
+-
+-	rq->cmd_type = REQ_TYPE_ATA_TASK;
++	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ 	rq->cmd_flags |= REQ_SOFTBARRIER;
+-	rq->buffer = rq->cmd;
++	rq->special = &task;
  }
- console_initcall(hvc_console_init);
  
-+/* callback when the kboject ref count reaches zero. */
-+static void destroy_hvc_struct(struct kref *kref)
-+{
-+	struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref);
-+	unsigned long flags;
+ /*
+@@ -687,8 +620,10 @@ static int set_multcount(ide_drive_t *drive, int arg)
+ 
+ 	if (drive->special.b.set_multmode)
+ 		return -EBUSY;
 +
-+	spin_lock(&hvc_structs_lock);
+ 	ide_init_drive_cmd (&rq);
+-	rq.cmd_type = REQ_TYPE_ATA_CMD;
++	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
 +
-+	spin_lock_irqsave(&hp->lock, flags);
-+	list_del(&(hp->next));
-+	spin_unlock_irqrestore(&hp->lock, flags);
+ 	drive->mult_req = arg;
+ 	drive->special.b.set_multmode = 1;
+ 	(void) ide_do_drive_cmd (drive, &rq, ide_wait);
+@@ -753,12 +688,11 @@ static int write_cache(ide_drive_t *drive, int arg)
+ 
+ 	if (ide_id_has_flush_cache(drive->id)) {
+ 		memset(&args, 0, sizeof(ide_task_t));
+-		args.tfRegister[IDE_FEATURE_OFFSET]	= (arg) ?
++		args.tf.feature = arg ?
+ 			SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
+-		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SETFEATURES;
+-		args.command_type		= IDE_DRIVE_TASK_NO_DATA;
+-		args.handler			= &task_no_data_intr;
+-		err = ide_raw_taskfile(drive, &args, NULL);
++		args.tf.command = WIN_SETFEATURES;
++		args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++		err = ide_no_data_taskfile(drive, &args);
+ 		if (err == 0)
+ 			drive->wcache = arg;
+ 	}
+@@ -774,12 +708,11 @@ static int do_idedisk_flushcache (ide_drive_t *drive)
+ 
+ 	memset(&args, 0, sizeof(ide_task_t));
+ 	if (ide_id_has_flush_cache_ext(drive->id))
+-		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_FLUSH_CACHE_EXT;
++		args.tf.command = WIN_FLUSH_CACHE_EXT;
+ 	else
+-		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_FLUSH_CACHE;
+-	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
+-	args.handler				= &task_no_data_intr;
+-	return ide_raw_taskfile(drive, &args, NULL);
++		args.tf.command = WIN_FLUSH_CACHE;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++	return ide_no_data_taskfile(drive, &args);
+ }
+ 
+ static int set_acoustic (ide_drive_t *drive, int arg)
+@@ -790,13 +723,11 @@ static int set_acoustic (ide_drive_t *drive, int arg)
+ 		return -EINVAL;
+ 
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	args.tfRegister[IDE_FEATURE_OFFSET]	= (arg) ? SETFEATURES_EN_AAM :
+-							  SETFEATURES_DIS_AAM;
+-	args.tfRegister[IDE_NSECTOR_OFFSET]	= arg;
+-	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SETFEATURES;
+-	args.command_type = IDE_DRIVE_TASK_NO_DATA;
+-	args.handler	  = &task_no_data_intr;
+-	ide_raw_taskfile(drive, &args, NULL);
++	args.tf.feature = arg ? SETFEATURES_EN_AAM : SETFEATURES_DIS_AAM;
++	args.tf.nsect   = arg;
++	args.tf.command = WIN_SETFEATURES;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++	ide_no_data_taskfile(drive, &args);
+ 	drive->acoustic = arg;
+ 	return 0;
+ }
+@@ -832,7 +763,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
+ 	ide_add_setting(drive,	"bios_head",	SETTING_RW,	TYPE_BYTE,	0,	255,			1,	1,	&drive->bios_head,	NULL);
+ 	ide_add_setting(drive,	"bios_sect",	SETTING_RW,	TYPE_BYTE,	0,	63,			1,	1,	&drive->bios_sect,	NULL);
+ 	ide_add_setting(drive,	"address",	SETTING_RW,	TYPE_BYTE,	0,	2,			1,	1,	&drive->addressing,	set_lba_addressing);
+-	ide_add_setting(drive,	"bswap",	SETTING_READ,	TYPE_BYTE,	0,	1,			1,	1,	&drive->bswap,		NULL);
+ 	ide_add_setting(drive,	"multcount",	SETTING_RW,	TYPE_BYTE,	0,	id->max_multsect,	1,	1,	&drive->mult_count,	set_multcount);
+ 	ide_add_setting(drive,	"nowerr",	SETTING_RW,	TYPE_BYTE,	0,	1,			1,	1,	&drive->nowerr,		set_nowerr);
+ 	ide_add_setting(drive,	"lun",		SETTING_RW,	TYPE_INT,	0,	7,			1,	1,	&drive->lun,		NULL);
+@@ -1041,6 +971,17 @@ static ide_driver_t idedisk_driver = {
+ #endif
+ };
+ 
++static int idedisk_set_doorlock(ide_drive_t *drive, int on)
++{
++	ide_task_t task;
 +
-+	spin_unlock(&hvc_structs_lock);
++	memset(&task, 0, sizeof(task));
++	task.tf.command = on ? WIN_DOORLOCK : WIN_DOORUNLOCK;
++	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +
-+	kfree(hp);
++	return ide_no_data_taskfile(drive, &task);
 +}
 +
- /*
-  * hvc_instantiate() is an early console discovery method which locates
-  * consoles * prior to the vio subsystem discovering them.  Hotplugged
-@@ -261,7 +278,7 @@ int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
- 	/* make sure no no tty has been registered in this index */
- 	hp = hvc_get_by_index(index);
- 	if (hp) {
--		kobject_put(&hp->kobj);
-+		kref_put(&hp->kref, destroy_hvc_struct);
- 		return -1;
- 	}
+ static int idedisk_open(struct inode *inode, struct file *filp)
+ {
+ 	struct gendisk *disk = inode->i_bdev->bd_disk;
+@@ -1055,18 +996,13 @@ static int idedisk_open(struct inode *inode, struct file *filp)
+ 	idkp->openers++;
  
-@@ -318,9 +335,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
- 	unsigned long flags;
- 	int irq = 0;
- 	int rc = 0;
--	struct kobject *kobjp;
+ 	if (drive->removable && idkp->openers == 1) {
+-		ide_task_t args;
+-		memset(&args, 0, sizeof(ide_task_t));
+-		args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORLOCK;
+-		args.command_type = IDE_DRIVE_TASK_NO_DATA;
+-		args.handler	  = &task_no_data_intr;
+ 		check_disk_change(inode->i_bdev);
+ 		/*
+ 		 * Ignore the return code from door_lock,
+ 		 * since the open() has already succeeded,
+ 		 * and the door_lock is irrelevant at this point.
+ 		 */
+-		if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL))
++		if (drive->doorlocking && idedisk_set_doorlock(drive, 1))
+ 			drive->doorlocking = 0;
+ 	}
+ 	return 0;
+@@ -1082,12 +1018,7 @@ static int idedisk_release(struct inode *inode, struct file *filp)
+ 		ide_cacheflush_p(drive);
  
--	/* Auto increments kobject reference if found. */
-+	/* Auto increments kref reference if found. */
- 	if (!(hp = hvc_get_by_index(tty->index)))
- 		return -ENODEV;
+ 	if (drive->removable && idkp->openers == 1) {
+-		ide_task_t args;
+-		memset(&args, 0, sizeof(ide_task_t));
+-		args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORUNLOCK;
+-		args.command_type = IDE_DRIVE_TASK_NO_DATA;
+-		args.handler	  = &task_no_data_intr;
+-		if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL))
++		if (drive->doorlocking && idedisk_set_doorlock(drive, 0))
+ 			drive->doorlocking = 0;
+ 	}
  
-@@ -341,8 +357,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
- 	if (irq)
- 		hp->irq_requested = 1;
+diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
+index 4703837..5bf3203 100644
+--- a/drivers/ide/ide-dma.c
++++ b/drivers/ide/ide-dma.c
+@@ -153,13 +153,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
+ 		if (!dma_stat) {
+ 			struct request *rq = HWGROUP(drive)->rq;
  
--	kobjp = &hp->kobj;
+-			if (rq->rq_disk) {
+-				ide_driver_t *drv;
 -
- 	spin_unlock_irqrestore(&hp->lock, flags);
- 	/* check error, fallback to non-irq */
- 	if (irq)
-@@ -352,7 +366,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
- 	 * If the request_irq() fails and we return an error.  The tty layer
- 	 * will call hvc_close() after a failed open but we don't want to clean
- 	 * up there so we'll clean up here and clear out the previously set
--	 * tty fields and return the kobject reference.
-+	 * tty fields and return the kref reference.
- 	 */
- 	if (rc) {
- 		spin_lock_irqsave(&hp->lock, flags);
-@@ -360,7 +374,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
- 		hp->irq_requested = 0;
- 		spin_unlock_irqrestore(&hp->lock, flags);
- 		tty->driver_data = NULL;
--		kobject_put(kobjp);
-+		kref_put(&hp->kref, destroy_hvc_struct);
- 		printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
- 	}
- 	/* Force wakeup of the polling thread */
-@@ -372,7 +386,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
- static void hvc_close(struct tty_struct *tty, struct file * filp)
- {
- 	struct hvc_struct *hp;
--	struct kobject *kobjp;
- 	int irq = 0;
- 	unsigned long flags;
+-				drv = *(ide_driver_t **)rq->rq_disk->private_data;
+-				drv->end_request(drive, 1, rq->nr_sectors);
+-			} else
+-				ide_end_request(drive, 1, rq->nr_sectors);
++			task_end_request(drive, rq, stat);
+ 			return ide_stopped;
+ 		}
+ 		printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 
+@@ -408,23 +402,29 @@ static int dma_timer_expiry (ide_drive_t *drive)
+ }
  
-@@ -382,7 +395,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
- 	/*
- 	 * No driver_data means that this close was issued after a failed
- 	 * hvc_open by the tty layer's release_dev() function and we can just
--	 * exit cleanly because the kobject reference wasn't made.
-+	 * exit cleanly because the kref reference wasn't made.
- 	 */
- 	if (!tty->driver_data)
- 		return;
-@@ -390,7 +403,6 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
- 	hp = tty->driver_data;
- 	spin_lock_irqsave(&hp->lock, flags);
+ /**
+- *	ide_dma_host_off	-	Generic DMA kill
++ *	ide_dma_host_set	-	Enable/disable DMA on a host
+  *	@drive: drive to control
+  *
+- *	Perform the generic IDE controller DMA off operation. This
+- *	works for most IDE bus mastering controllers
++ *	Enable/disable DMA on an IDE controller following generic
++ *	bus-mastering IDE controller behaviour.
+  */
  
--	kobjp = &hp->kobj;
- 	if (--hp->count == 0) {
- 		if (hp->irq_requested)
- 			irq = hp->irq;
-@@ -417,7 +429,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
- 		spin_unlock_irqrestore(&hp->lock, flags);
- 	}
+-void ide_dma_host_off(ide_drive_t *drive)
++void ide_dma_host_set(ide_drive_t *drive, int on)
+ {
+ 	ide_hwif_t *hwif	= HWIF(drive);
+ 	u8 unit			= (drive->select.b.unit & 0x01);
+ 	u8 dma_stat		= hwif->INB(hwif->dma_status);
  
--	kobject_put(kobjp);
-+	kref_put(&hp->kref, destroy_hvc_struct);
+-	hwif->OUTB((dma_stat & ~(1<<(5+unit))), hwif->dma_status);
++	if (on)
++		dma_stat |= (1 << (5 + unit));
++	else
++		dma_stat &= ~(1 << (5 + unit));
++
++	hwif->OUTB(dma_stat, hwif->dma_status);
  }
  
- static void hvc_hangup(struct tty_struct *tty)
-@@ -426,7 +438,6 @@ static void hvc_hangup(struct tty_struct *tty)
- 	unsigned long flags;
- 	int irq = 0;
- 	int temp_open_count;
--	struct kobject *kobjp;
+-EXPORT_SYMBOL(ide_dma_host_off);
++EXPORT_SYMBOL_GPL(ide_dma_host_set);
++#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
  
- 	if (!hp)
- 		return;
-@@ -443,7 +454,6 @@ static void hvc_hangup(struct tty_struct *tty)
- 		return;
- 	}
+ /**
+  *	ide_dma_off_quietly	-	Generic DMA kill
+@@ -438,11 +438,10 @@ void ide_dma_off_quietly(ide_drive_t *drive)
+ 	drive->using_dma = 0;
+ 	ide_toggle_bounce(drive, 0);
  
--	kobjp = &hp->kobj;
- 	temp_open_count = hp->count;
- 	hp->count = 0;
- 	hp->n_outbuf = 0;
-@@ -457,7 +467,7 @@ static void hvc_hangup(struct tty_struct *tty)
- 		free_irq(irq, hp);
- 	while(temp_open_count) {
- 		--temp_open_count;
--		kobject_put(kobjp);
-+		kref_put(&hp->kref, destroy_hvc_struct);
- 	}
+-	drive->hwif->dma_host_off(drive);
++	drive->hwif->dma_host_set(drive, 0);
  }
  
-@@ -729,27 +739,6 @@ static const struct tty_operations hvc_ops = {
- 	.chars_in_buffer = hvc_chars_in_buffer,
- };
+ EXPORT_SYMBOL(ide_dma_off_quietly);
+-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
  
--/* callback when the kboject ref count reaches zero. */
--static void destroy_hvc_struct(struct kobject *kobj)
--{
--	struct hvc_struct *hp = container_of(kobj, struct hvc_struct, kobj);
--	unsigned long flags;
--
--	spin_lock(&hvc_structs_lock);
--
--	spin_lock_irqsave(&hp->lock, flags);
--	list_del(&(hp->next));
--	spin_unlock_irqrestore(&hp->lock, flags);
+ /**
+  *	ide_dma_off	-	disable DMA on a device
+@@ -455,56 +454,29 @@ EXPORT_SYMBOL(ide_dma_off_quietly);
+ void ide_dma_off(ide_drive_t *drive)
+ {
+ 	printk(KERN_INFO "%s: DMA disabled\n", drive->name);
+-	drive->hwif->dma_off_quietly(drive);
++	ide_dma_off_quietly(drive);
+ }
+ 
+ EXPORT_SYMBOL(ide_dma_off);
+ 
+-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+ /**
+- *	ide_dma_host_on	-	Enable DMA on a host
+- *	@drive: drive to enable for DMA
+- *
+- *	Enable DMA on an IDE controller following generic bus mastering
+- *	IDE controller behaviour
+- */
 -
--	spin_unlock(&hvc_structs_lock);
+-void ide_dma_host_on(ide_drive_t *drive)
+-{
+-	if (drive->using_dma) {
+-		ide_hwif_t *hwif	= HWIF(drive);
+-		u8 unit			= (drive->select.b.unit & 0x01);
+-		u8 dma_stat		= hwif->INB(hwif->dma_status);
 -
--	kfree(hp);
+-		hwif->OUTB((dma_stat|(1<<(5+unit))), hwif->dma_status);
+-	}
 -}
 -
--static struct kobj_type hvc_kobj_type = {
--	.release = destroy_hvc_struct,
--};
+-EXPORT_SYMBOL(ide_dma_host_on);
 -
- struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
- 					struct hv_ops *ops, int outbuf_size)
- {
-@@ -776,8 +765,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
- 	hp->outbuf_size = outbuf_size;
- 	hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
- 
--	kobject_init(&hp->kobj);
--	hp->kobj.ktype = &hvc_kobj_type;
-+	kref_init(&hp->kref);
- 
- 	spin_lock_init(&hp->lock);
- 	spin_lock(&hvc_structs_lock);
-@@ -806,12 +794,10 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
- int __devexit hvc_remove(struct hvc_struct *hp)
- {
- 	unsigned long flags;
--	struct kobject *kobjp;
- 	struct tty_struct *tty;
- 
- 	spin_lock_irqsave(&hp->lock, flags);
- 	tty = hp->tty;
--	kobjp = &hp->kobj;
- 
- 	if (hp->index < MAX_NR_HVC_CONSOLES)
- 		vtermnos[hp->index] = -1;
-@@ -821,12 +807,12 @@ int __devexit hvc_remove(struct hvc_struct *hp)
- 	spin_unlock_irqrestore(&hp->lock, flags);
- 
- 	/*
--	 * We 'put' the instance that was grabbed when the kobject instance
--	 * was initialized using kobject_init().  Let the last holder of this
--	 * kobject cause it to be removed, which will probably be the tty_hangup
-+	 * We 'put' the instance that was grabbed when the kref instance
-+	 * was initialized using kref_init().  Let the last holder of this
-+	 * kref cause it to be removed, which will probably be the tty_hangup
- 	 * below.
- 	 */
--	kobject_put(kobjp);
-+	kref_put(&hp->kref, destroy_hvc_struct);
- 
- 	/*
- 	 * This function call will auto chain call hvc_hangup.  The tty should
-diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
-index 69d8866..fd75590 100644
---- a/drivers/char/hvcs.c
-+++ b/drivers/char/hvcs.c
-@@ -57,11 +57,7 @@
-  * rescanning partner information upon a user's request.
-  *
-  * Each vty-server, prior to being exposed to this driver is reference counted
-- * using the 2.6 Linux kernel kobject construct.  This kobject is also used by
-- * the vio bus to provide a vio device sysfs entry that this driver attaches
-- * device specific attributes to, including partner information.  The vio bus
-- * framework also provides a sysfs entry for each vio driver.  The hvcs driver
-- * provides driver attributes in this entry.
-+ * using the 2.6 Linux kernel kref construct.
+-/**
+- *	__ide_dma_on		-	Enable DMA on a device
++ *	ide_dma_on		-	Enable DMA on a device
+  *	@drive: drive to enable DMA on
   *
-  * For direction on installation and usage of this driver please reference
-  * Documentation/powerpc/hvcs.txt.
-@@ -71,7 +67,7 @@
- #include <linux/init.h>
- #include <linux/interrupt.h>
- #include <linux/kernel.h>
--#include <linux/kobject.h>
-+#include <linux/kref.h>
- #include <linux/kthread.h>
- #include <linux/list.h>
- #include <linux/major.h>
-@@ -293,12 +289,12 @@ struct hvcs_struct {
- 	int chars_in_buffer;
- 
- 	/*
--	 * Any variable below the kobject is valid before a tty is connected and
-+	 * Any variable below the kref is valid before a tty is connected and
- 	 * stays valid after the tty is disconnected.  These shouldn't be
- 	 * whacked until the koject refcount reaches zero though some entries
- 	 * may be changed via sysfs initiatives.
- 	 */
--	struct kobject kobj; /* ref count & hvcs_struct lifetime */
-+	struct kref kref; /* ref count & hvcs_struct lifetime */
- 	int connected; /* is the vty-server currently connected to a vty? */
- 	uint32_t p_unit_address; /* partner unit address */
- 	uint32_t p_partition_ID; /* partner partition ID */
-@@ -307,8 +303,8 @@ struct hvcs_struct {
- 	struct vio_dev *vdev;
- };
- 
--/* Required to back map a kobject to its containing object */
--#define from_kobj(kobj) container_of(kobj, struct hvcs_struct, kobj)
-+/* Required to back map a kref to its containing object */
-+#define from_kref(k) container_of(k, struct hvcs_struct, kref)
- 
- static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs);
- static DEFINE_SPINLOCK(hvcs_structs_lock);
-@@ -334,7 +330,6 @@ static void hvcs_partner_free(struct hvcs_struct *hvcsd);
- static int hvcs_enable_device(struct hvcs_struct *hvcsd,
- 		uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
- 
--static void destroy_hvcs_struct(struct kobject *kobj);
- static int hvcs_open(struct tty_struct *tty, struct file *filp);
- static void hvcs_close(struct tty_struct *tty, struct file *filp);
- static void hvcs_hangup(struct tty_struct * tty);
-@@ -703,10 +698,10 @@ static void hvcs_return_index(int index)
- 		hvcs_index_list[index] = -1;
- }
+  *	Enable IDE DMA for a device on this IDE controller.
+  */
+- 
+-int __ide_dma_on (ide_drive_t *drive)
+-{
+-	/* consult the list of known "bad" drives */
+-	if (__ide_dma_bad_drive(drive))
+-		return 1;
  
--/* callback when the kboject ref count reaches zero */
--static void destroy_hvcs_struct(struct kobject *kobj)
-+/* callback when the kref ref count reaches zero */
-+static void destroy_hvcs_struct(struct kref *kref)
- {
--	struct hvcs_struct *hvcsd = from_kobj(kobj);
-+	struct hvcs_struct *hvcsd = from_kref(kref);
- 	struct vio_dev *vdev;
- 	unsigned long flags;
++void ide_dma_on(ide_drive_t *drive)
++{
+ 	drive->using_dma = 1;
+ 	ide_toggle_bounce(drive, 1);
  
-@@ -743,10 +738,6 @@ static void destroy_hvcs_struct(struct kobject *kobj)
- 	kfree(hvcsd);
+-	drive->hwif->dma_host_on(drive);
+-
+-	return 0;
++	drive->hwif->dma_host_set(drive, 1);
  }
  
--static struct kobj_type hvcs_kobj_type = {
--	.release = destroy_hvcs_struct,
--};
--
- static int hvcs_get_index(void)
- {
- 	int i;
-@@ -791,9 +782,7 @@ static int __devinit hvcs_probe(
+-EXPORT_SYMBOL(__ide_dma_on);
++EXPORT_SYMBOL(ide_dma_on);
  
- 	spin_lock_init(&hvcsd->lock);
- 	/* Automatically incs the refcount the first time */
--	kobject_init(&hvcsd->kobj);
--	/* Set up the callback for terminating the hvcs_struct's life */
--	hvcsd->kobj.ktype = &hvcs_kobj_type;
-+	kref_init(&hvcsd->kref);
++#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+ /**
+  *	ide_dma_setup	-	begin a DMA phase
+  *	@drive: target device
+@@ -759,6 +731,7 @@ EXPORT_SYMBOL_GPL(ide_find_dma_mode);
  
- 	hvcsd->vdev = dev;
- 	dev->dev.driver_data = hvcsd;
-@@ -844,7 +833,6 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
+ static int ide_tune_dma(ide_drive_t *drive)
  {
- 	struct hvcs_struct *hvcsd = dev->dev.driver_data;
- 	unsigned long flags;
--	struct kobject *kobjp;
- 	struct tty_struct *tty;
- 
- 	if (!hvcsd)
-@@ -856,15 +844,13 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
++	ide_hwif_t *hwif = drive->hwif;
+ 	u8 speed;
  
- 	tty = hvcsd->tty;
+ 	if (noautodma || drive->nodma || (drive->id->capability & 1) == 0)
+@@ -771,15 +744,21 @@ static int ide_tune_dma(ide_drive_t *drive)
+ 	if (ide_id_dma_bug(drive))
+ 		return 0;
  
--	kobjp = &hvcsd->kobj;
--
- 	spin_unlock_irqrestore(&hvcsd->lock, flags);
+-	if (drive->hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
++	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
+ 		return config_drive_for_dma(drive);
  
- 	/*
- 	 * Let the last holder of this object cause it to be removed, which
- 	 * would probably be tty_hangup below.
- 	 */
--	kobject_put (kobjp);
-+	kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ 	speed = ide_max_dma_mode(drive);
  
- 	/*
- 	 * The hangup is a scheduled function which will auto chain call
-@@ -1086,7 +1072,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
- }
+-	if (!speed)
+-		return 0;
++	if (!speed) {
++		 /* is this really correct/needed? */
++		if ((hwif->host_flags & IDE_HFLAG_CY82C693) &&
++		    ide_dma_good_drive(drive))
++			return 1;
++		else
++			return 0;
++	}
  
- /*
-- * This always increments the kobject ref count if the call is successful.
-+ * This always increments the kref ref count if the call is successful.
-  * Please remember to dec when you are done with the instance.
-  *
-  * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
-@@ -1103,7 +1089,7 @@ static struct hvcs_struct *hvcs_get_by_index(int index)
- 		list_for_each_entry(hvcsd, &hvcs_structs, next) {
- 			spin_lock_irqsave(&hvcsd->lock, flags);
- 			if (hvcsd->index == index) {
--				kobject_get(&hvcsd->kobj);
-+				kref_get(&hvcsd->kref);
- 				spin_unlock_irqrestore(&hvcsd->lock, flags);
- 				spin_unlock(&hvcs_structs_lock);
- 				return hvcsd;
-@@ -1129,14 +1115,13 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
- 	unsigned int irq;
- 	struct vio_dev *vdev;
- 	unsigned long unit_address;
--	struct kobject *kobjp;
+-	if (drive->hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
++	if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ 		return 0;
  
- 	if (tty->driver_data)
- 		goto fast_open;
+ 	if (ide_set_dma_mode(drive, speed))
+@@ -824,25 +803,23 @@ err_out:
  
- 	/*
- 	 * Is there a vty-server that shares the same index?
--	 * This function increments the kobject index.
-+	 * This function increments the kref index.
- 	 */
- 	if (!(hvcsd = hvcs_get_by_index(tty->index))) {
- 		printk(KERN_WARNING "HVCS: open failed, no device associated"
-@@ -1181,7 +1166,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
- 	 * and will grab the spinlock and free the connection if it fails.
- 	 */
- 	if (((rc = hvcs_enable_device(hvcsd, unit_address, irq, vdev)))) {
--		kobject_put(&hvcsd->kobj);
-+		kref_put(&hvcsd->kref, destroy_hvcs_struct);
- 		printk(KERN_WARNING "HVCS: enable device failed.\n");
- 		return rc;
- 	}
-@@ -1192,17 +1177,11 @@ fast_open:
- 	hvcsd = tty->driver_data;
+ int ide_set_dma(ide_drive_t *drive)
+ {
+-	ide_hwif_t *hwif = drive->hwif;
+ 	int rc;
  
- 	spin_lock_irqsave(&hvcsd->lock, flags);
--	if (!kobject_get(&hvcsd->kobj)) {
--		spin_unlock_irqrestore(&hvcsd->lock, flags);
--		printk(KERN_ERR "HVCS: Kobject of open"
--			" hvcs doesn't exist.\n");
--		return -EFAULT; /* Is this the right return value? */
--	}
--
-+	kref_get(&hvcsd->kref);
- 	hvcsd->open_count++;
--
- 	hvcsd->todo_mask |= HVCS_SCHED_READ;
- 	spin_unlock_irqrestore(&hvcsd->lock, flags);
++	/*
++	 * Force DMAing for the beginning of the check.
++	 * Some chipsets appear to do interesting
++	 * things, if not checked and cleared.
++	 *   PARANOIA!!!
++	 */
++	ide_dma_off_quietly(drive);
 +
- open_success:
- 	hvcs_kick();
- 
-@@ -1212,9 +1191,8 @@ open_success:
- 	return 0;
+ 	rc = ide_dma_check(drive);
++	if (rc)
++		return rc;
  
- error_release:
--	kobjp = &hvcsd->kobj;
- 	spin_unlock_irqrestore(&hvcsd->lock, flags);
--	kobject_put(&hvcsd->kobj);
-+	kref_put(&hvcsd->kref, destroy_hvcs_struct);
+-	switch(rc) {
+-	case -1: /* DMA needs to be disabled */
+-		hwif->dma_off_quietly(drive);
+-		return -1;
+-	case  0: /* DMA needs to be enabled */
+-		return hwif->ide_dma_on(drive);
+-	case  1: /* DMA setting cannot be changed */
+-		break;
+-	default:
+-		BUG();
+-		break;
+-	}
++	ide_dma_on(drive);
  
- 	printk(KERN_WARNING "HVCS: partner connect failed.\n");
- 	return retval;
-@@ -1224,7 +1202,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
- {
- 	struct hvcs_struct *hvcsd;
- 	unsigned long flags;
--	struct kobject *kobjp;
- 	int irq = NO_IRQ;
+-	return rc;
++	return 0;
+ }
  
- 	/*
-@@ -1245,7 +1222,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
- 	hvcsd = tty->driver_data;
+ #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+@@ -968,11 +945,6 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
  
- 	spin_lock_irqsave(&hvcsd->lock, flags);
--	kobjp = &hvcsd->kobj;
- 	if (--hvcsd->open_count == 0) {
+ 	hwif->dma_base = base;
  
- 		vio_disable_interrupts(hvcsd->vdev);
-@@ -1270,7 +1246,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
- 		tty->driver_data = NULL;
+-	if (hwif->mate)
+-		hwif->dma_master = hwif->channel ? hwif->mate->dma_base : base;
+-	else
+-		hwif->dma_master = base;
+-
+ 	if (!(hwif->dma_command))
+ 		hwif->dma_command	= hwif->dma_base;
+ 	if (!(hwif->dma_vendor1))
+@@ -984,14 +956,8 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
+ 	if (!(hwif->dma_prdtable))
+ 		hwif->dma_prdtable	= (hwif->dma_base + 4);
  
- 		free_irq(irq, hvcsd);
--		kobject_put(kobjp);
-+		kref_put(&hvcsd->kref, destroy_hvcs_struct);
- 		return;
- 	} else if (hvcsd->open_count < 0) {
- 		printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
-@@ -1279,7 +1255,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+-	if (!hwif->dma_off_quietly)
+-		hwif->dma_off_quietly = &ide_dma_off_quietly;
+-	if (!hwif->dma_host_off)
+-		hwif->dma_host_off = &ide_dma_host_off;
+-	if (!hwif->ide_dma_on)
+-		hwif->ide_dma_on = &__ide_dma_on;
+-	if (!hwif->dma_host_on)
+-		hwif->dma_host_on = &ide_dma_host_on;
++	if (!hwif->dma_host_set)
++		hwif->dma_host_set = &ide_dma_host_set;
+ 	if (!hwif->dma_setup)
+ 		hwif->dma_setup = &ide_dma_setup;
+ 	if (!hwif->dma_exec_cmd)
+@@ -1014,8 +980,6 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
+ 		       hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
  	}
- 
- 	spin_unlock_irqrestore(&hvcsd->lock, flags);
--	kobject_put(kobjp);
-+	kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ 	printk("\n");
+-
+-	BUG_ON(!hwif->dma_master);
  }
  
- static void hvcs_hangup(struct tty_struct * tty)
-@@ -1287,21 +1263,17 @@ static void hvcs_hangup(struct tty_struct * tty)
- 	struct hvcs_struct *hvcsd = tty->driver_data;
- 	unsigned long flags;
- 	int temp_open_count;
--	struct kobject *kobjp;
- 	int irq = NO_IRQ;
- 
- 	spin_lock_irqsave(&hvcsd->lock, flags);
--	/* Preserve this so that we know how many kobject refs to put */
-+	/* Preserve this so that we know how many kref refs to put */
- 	temp_open_count = hvcsd->open_count;
+ EXPORT_SYMBOL_GPL(ide_setup_dma);
+diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
+index 04a3578..ff8232e 100644
+--- a/drivers/ide/ide-floppy.c
++++ b/drivers/ide/ide-floppy.c
+@@ -369,27 +369,6 @@ typedef struct ide_floppy_obj {
+ #define	IDEFLOPPY_IOCTL_FORMAT_START		0x4602
+ #define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS	0x4603
  
- 	/*
--	 * Don't kobject put inside the spinlock because the destruction
-+	 * Don't kref put inside the spinlock because the destruction
- 	 * callback may use the spinlock and it may get called before the
--	 * spinlock has been released.  Get a pointer to the kobject and
--	 * kobject_put on that after releasing the spinlock.
-+	 * spinlock has been released.
- 	 */
--	kobjp = &hvcsd->kobj;
+-#if 0
+-/*
+- *	Special requests for our block device strategy routine.
+- */
+-#define	IDEFLOPPY_FIRST_RQ	90
 -
- 	vio_disable_interrupts(hvcsd->vdev);
+-/*
+- * 	IDEFLOPPY_PC_RQ is used to queue a packet command in the request queue.
+- */
+-#define	IDEFLOPPY_PC_RQ		90
+-
+-#define IDEFLOPPY_LAST_RQ	90
+-
+-/*
+- *	A macro which can be used to check if a given request command
+- *	originated in the driver or in the buffer cache layer.
+- */
+-#define IDEFLOPPY_RQ_CMD(cmd) 	((cmd >= IDEFLOPPY_FIRST_RQ) && (cmd <= IDEFLOPPY_LAST_RQ))
+-
+-#endif
+-
+ /*
+  *	Error codes which are returned in rq->errors to the higher part
+  *	of the driver.
+@@ -793,9 +772,8 @@ static void idefloppy_retry_pc (ide_drive_t *drive)
+ {
+ 	idefloppy_pc_t *pc;
+ 	struct request *rq;
+-	atapi_error_t error;
  
- 	hvcsd->todo_mask = 0;
-@@ -1324,7 +1296,7 @@ static void hvcs_hangup(struct tty_struct * tty)
- 	free_irq(irq, hvcsd);
+-	error.all = HWIF(drive)->INB(IDE_ERROR_REG);
++	(void)drive->hwif->INB(IDE_ERROR_REG);
+ 	pc = idefloppy_next_pc_storage(drive);
+ 	rq = idefloppy_next_rq_storage(drive);
+ 	idefloppy_create_request_sense_cmd(pc);
+@@ -809,12 +787,12 @@ static void idefloppy_retry_pc (ide_drive_t *drive)
+ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
+ {
+ 	idefloppy_floppy_t *floppy = drive->driver_data;
+-	atapi_status_t status;
+-	atapi_bcount_t bcount;
+-	atapi_ireason_t ireason;
++	ide_hwif_t *hwif = drive->hwif;
+ 	idefloppy_pc_t *pc = floppy->pc;
+ 	struct request *rq = pc->rq;
+ 	unsigned int temp;
++	u16 bcount;
++	u8 stat, ireason;
  
- 	/*
--	 * We need to kobject_put() for every open_count we have since the
-+	 * We need to kref_put() for every open_count we have since the
- 	 * tty_hangup() function doesn't invoke a close per open connection on a
- 	 * non-console device.
- 	 */
-@@ -1335,7 +1307,7 @@ static void hvcs_hangup(struct tty_struct * tty)
- 		 * NOTE:  If this hangup was signaled from user space then the
- 		 * final put will never happen.
- 		 */
--		kobject_put(kobjp);
-+		kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ 	debug_log(KERN_INFO "ide-floppy: Reached %s interrupt handler\n",
+ 		__FUNCTION__);
+@@ -830,16 +808,16 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
  	}
- }
  
-diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
-index 556fd81..c422e87 100644
---- a/drivers/char/hw_random/amd-rng.c
-+++ b/drivers/char/hw_random/amd-rng.c
-@@ -28,6 +28,7 @@
- #include <linux/kernel.h>
- #include <linux/pci.h>
- #include <linux/hw_random.h>
-+#include <linux/delay.h>
- #include <asm/io.h>
+ 	/* Clear the interrupt */
+-	status.all = HWIF(drive)->INB(IDE_STATUS_REG);
++	stat = drive->hwif->INB(IDE_STATUS_REG);
  
+-	if (!status.b.drq) {			/* No more interrupts */
++	if ((stat & DRQ_STAT) == 0) {		/* No more interrupts */
+ 		debug_log(KERN_INFO "Packet command completed, %d bytes "
+ 			"transferred\n", pc->actually_transferred);
+ 		clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
  
-@@ -52,11 +53,18 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
- static struct pci_dev *amd_pdev;
+ 		local_irq_enable_in_hardirq();
  
+-		if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) {
++		if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) {
+ 			/* Error detected */
+ 			debug_log(KERN_INFO "ide-floppy: %s: I/O error\n",
+ 				drive->name);
+@@ -870,32 +848,32 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
+ 	}
  
--static int amd_rng_data_present(struct hwrng *rng)
-+static int amd_rng_data_present(struct hwrng *rng, int wait)
- {
- 	u32 pmbase = (u32)rng->priv;
-+	int data, i;
+ 	/* Get the number of bytes to transfer */
+-	bcount.b.high = HWIF(drive)->INB(IDE_BCOUNTH_REG);
+-	bcount.b.low = HWIF(drive)->INB(IDE_BCOUNTL_REG);
++	bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) |
++		  hwif->INB(IDE_BCOUNTL_REG);
+ 	/* on this interrupt */
+-	ireason.all = HWIF(drive)->INB(IDE_IREASON_REG);
++	ireason = hwif->INB(IDE_IREASON_REG);
  
--      	return !!(inl(pmbase + 0xF4) & 1);
-+	for (i = 0; i < 20; i++) {
-+		data = !!(inl(pmbase + 0xF4) & 1);
-+		if (data || !wait)
-+			break;
-+		udelay(10);
-+	}
-+	return data;
- }
+-	if (ireason.b.cod) {
++	if (ireason & CD) {
+ 		printk(KERN_ERR "ide-floppy: CoD != 0 in idefloppy_pc_intr\n");
+ 		return ide_do_reset(drive);
+ 	}
+-	if (ireason.b.io == test_bit(PC_WRITING, &pc->flags)) {
++	if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) {
+ 		/* Hopefully, we will never get here */
+ 		printk(KERN_ERR "ide-floppy: We wanted to %s, ",
+-			ireason.b.io ? "Write":"Read");
++				(ireason & IO) ? "Write" : "Read");
+ 		printk(KERN_ERR "but the floppy wants us to %s !\n",
+-			ireason.b.io ? "Read":"Write");
++				(ireason & IO) ? "Read" : "Write");
+ 		return ide_do_reset(drive);
+ 	}
+ 	if (!test_bit(PC_WRITING, &pc->flags)) {
+ 		/* Reading - Check that we have enough space */
+-		temp = pc->actually_transferred + bcount.all;
++		temp = pc->actually_transferred + bcount;
+ 		if (temp > pc->request_transfer) {
+ 			if (temp > pc->buffer_size) {
+ 				printk(KERN_ERR "ide-floppy: The floppy wants "
+ 					"to send us more data than expected "
+ 					"- discarding data\n");
+-				idefloppy_discard_data(drive,bcount.all);
++				idefloppy_discard_data(drive, bcount);
+ 				BUG_ON(HWGROUP(drive)->handler != NULL);
+ 				ide_set_handler(drive,
+ 						&idefloppy_pc_intr,
+@@ -911,23 +889,21 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
+ 	if (test_bit(PC_WRITING, &pc->flags)) {
+ 		if (pc->buffer != NULL)
+ 			/* Write the current buffer */
+-			HWIF(drive)->atapi_output_bytes(drive,
+-						pc->current_position,
+-						bcount.all);
++			hwif->atapi_output_bytes(drive, pc->current_position,
++						 bcount);
+ 		else
+-			idefloppy_output_buffers(drive, pc, bcount.all);
++			idefloppy_output_buffers(drive, pc, bcount);
+ 	} else {
+ 		if (pc->buffer != NULL)
+ 			/* Read the current buffer */
+-			HWIF(drive)->atapi_input_bytes(drive,
+-						pc->current_position,
+-						bcount.all);
++			hwif->atapi_input_bytes(drive, pc->current_position,
++						bcount);
+ 		else
+-			idefloppy_input_buffers(drive, pc, bcount.all);
++			idefloppy_input_buffers(drive, pc, bcount);
+ 	}
+ 	/* Update the current position */
+-	pc->actually_transferred += bcount.all;
+-	pc->current_position += bcount.all;
++	pc->actually_transferred += bcount;
++	pc->current_position += bcount;
  
- static int amd_rng_data_read(struct hwrng *rng, u32 *data)
-diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
-index 26a860a..0118b98 100644
---- a/drivers/char/hw_random/core.c
-+++ b/drivers/char/hw_random/core.c
-@@ -66,11 +66,11 @@ static inline void hwrng_cleanup(struct hwrng *rng)
- 		rng->cleanup(rng);
- }
+ 	BUG_ON(HWGROUP(drive)->handler != NULL);
+ 	ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL);		/* And set the interrupt handler again */
+@@ -943,15 +919,15 @@ static ide_startstop_t idefloppy_transfer_pc (ide_drive_t *drive)
+ {
+ 	ide_startstop_t startstop;
+ 	idefloppy_floppy_t *floppy = drive->driver_data;
+-	atapi_ireason_t ireason;
++	u8 ireason;
  
--static inline int hwrng_data_present(struct hwrng *rng)
-+static inline int hwrng_data_present(struct hwrng *rng, int wait)
+ 	if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
+ 		printk(KERN_ERR "ide-floppy: Strange, packet command "
+ 				"initiated yet DRQ isn't asserted\n");
+ 		return startstop;
+ 	}
+-	ireason.all = HWIF(drive)->INB(IDE_IREASON_REG);
+-	if (!ireason.b.cod || ireason.b.io) {
++	ireason = drive->hwif->INB(IDE_IREASON_REG);
++	if ((ireason & CD) == 0 || (ireason & IO)) {
+ 		printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while "
+ 				"issuing a packet command\n");
+ 		return ide_do_reset(drive);
+@@ -991,15 +967,15 @@ static ide_startstop_t idefloppy_transfer_pc1 (ide_drive_t *drive)
  {
- 	if (!rng->data_present)
- 		return 1;
--	return rng->data_present(rng);
-+	return rng->data_present(rng, wait);
- }
+ 	idefloppy_floppy_t *floppy = drive->driver_data;
+ 	ide_startstop_t startstop;
+-	atapi_ireason_t ireason;
++	u8 ireason;
  
- static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
-@@ -94,8 +94,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ 	if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
+ 		printk(KERN_ERR "ide-floppy: Strange, packet command "
+ 				"initiated yet DRQ isn't asserted\n");
+ 		return startstop;
+ 	}
+-	ireason.all = HWIF(drive)->INB(IDE_IREASON_REG);
+-	if (!ireason.b.cod || ireason.b.io) {
++	ireason = drive->hwif->INB(IDE_IREASON_REG);
++	if ((ireason & CD) == 0 || (ireason & IO)) {
+ 		printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) "
+ 				"while issuing a packet command\n");
+ 		return ide_do_reset(drive);
+@@ -1041,21 +1017,9 @@ static ide_startstop_t idefloppy_issue_pc (ide_drive_t *drive, idefloppy_pc_t *p
  {
- 	u32 data;
- 	ssize_t ret = 0;
--	int i, err = 0;
--	int data_present;
-+	int err = 0;
- 	int bytes_read;
+ 	idefloppy_floppy_t *floppy = drive->driver_data;
+ 	ide_hwif_t *hwif = drive->hwif;
+-	atapi_feature_t feature;
+-	atapi_bcount_t bcount;
+ 	ide_handler_t *pkt_xfer_routine;
+-
+-#if 0 /* Accessing floppy->pc is not valid here, the previous pc may be gone
+-         and have lived on another thread's stack; that stack may have become
+-         unmapped meanwhile (CONFIG_DEBUG_PAGEALLOC). */
+-#if IDEFLOPPY_DEBUG_BUGS
+-	if (floppy->pc->c[0] == IDEFLOPPY_REQUEST_SENSE_CMD &&
+-	    pc->c[0] == IDEFLOPPY_REQUEST_SENSE_CMD) {
+-		printk(KERN_ERR "ide-floppy: possible ide-floppy.c bug - "
+-			"Two request sense in serial were issued\n");
+-	}
+-#endif /* IDEFLOPPY_DEBUG_BUGS */
+-#endif
++	u16 bcount;
++	u8 dma;
  
- 	while (size) {
-@@ -107,21 +106,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
- 			err = -ENODEV;
- 			goto out;
- 		}
--		if (filp->f_flags & O_NONBLOCK) {
--			data_present = hwrng_data_present(current_rng);
--		} else {
--			/* Some RNG require some time between data_reads to gather
--			 * new entropy. Poll it.
--			 */
--			for (i = 0; i < 20; i++) {
--				data_present = hwrng_data_present(current_rng);
--				if (data_present)
--					break;
--				udelay(10);
--			}
--		}
-+
- 		bytes_read = 0;
--		if (data_present)
-+		if (hwrng_data_present(current_rng,
-+				       !(filp->f_flags & O_NONBLOCK)))
- 			bytes_read = hwrng_data_read(current_rng, &data);
- 		mutex_unlock(&rng_mutex);
+ 	if (floppy->failed_pc == NULL &&
+ 	    pc->c[0] != IDEFLOPPY_REQUEST_SENSE_CMD)
+@@ -1093,25 +1057,20 @@ static ide_startstop_t idefloppy_issue_pc (ide_drive_t *drive, idefloppy_pc_t *p
+ 	/* We haven't transferred any data yet */
+ 	pc->actually_transferred = 0;
+ 	pc->current_position = pc->buffer;
+-	bcount.all = min(pc->request_transfer, 63 * 1024);
++	bcount = min(pc->request_transfer, 63 * 1024);
  
-diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
-index 8e8658d..fed4ef5 100644
---- a/drivers/char/hw_random/geode-rng.c
-+++ b/drivers/char/hw_random/geode-rng.c
-@@ -28,6 +28,7 @@
- #include <linux/kernel.h>
- #include <linux/pci.h>
- #include <linux/hw_random.h>
-+#include <linux/delay.h>
- #include <asm/io.h>
+ 	if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags))
+ 		ide_dma_off(drive);
  
+-	feature.all = 0;
++	dma = 0;
  
-@@ -61,11 +62,18 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
- 	return 4;
- }
+ 	if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma)
+-		feature.b.dma = !hwif->dma_setup(drive);
++		dma = !hwif->dma_setup(drive);
  
--static int geode_rng_data_present(struct hwrng *rng)
-+static int geode_rng_data_present(struct hwrng *rng, int wait)
- {
- 	void __iomem *mem = (void __iomem *)rng->priv;
-+	int data, i;
+-	if (IDE_CONTROL_REG)
+-		HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
+-	/* Use PIO/DMA */
+-	HWIF(drive)->OUTB(feature.all, IDE_FEATURE_REG);
+-	HWIF(drive)->OUTB(bcount.b.high, IDE_BCOUNTH_REG);
+-	HWIF(drive)->OUTB(bcount.b.low, IDE_BCOUNTL_REG);
+-	HWIF(drive)->OUTB(drive->select.all, IDE_SELECT_REG);
++	ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
++			   IDE_TFLAG_OUT_DEVICE, bcount, dma);
  
--	return !!(readl(mem + GEODE_RNG_STATUS_REG));
-+	for (i = 0; i < 20; i++) {
-+		data = !!(readl(mem + GEODE_RNG_STATUS_REG));
-+		if (data || !wait)
-+			break;
-+		udelay(10);
-+	}
-+	return data;
- }
+-	if (feature.b.dma) {	/* Begin DMA, if necessary */
++	if (dma) {	/* Begin DMA, if necessary */
+ 		set_bit(PC_DMA_IN_PROGRESS, &pc->flags);
+ 		hwif->dma_start(drive);
+ 	}
+@@ -1665,14 +1624,14 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
+ 		/* Else assume format_unit has finished, and we're
+ 		** at 0x10000 */
+ 	} else {
+-		atapi_status_t status;
+ 		unsigned long flags;
++		u8 stat;
  
+ 		local_irq_save(flags);
+-		status.all = HWIF(drive)->INB(IDE_STATUS_REG);
++		stat = drive->hwif->INB(IDE_STATUS_REG);
+ 		local_irq_restore(flags);
  
-diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
-index 753f460..5cc651e 100644
---- a/drivers/char/hw_random/intel-rng.c
-+++ b/drivers/char/hw_random/intel-rng.c
-@@ -29,6 +29,7 @@
- #include <linux/module.h>
- #include <linux/pci.h>
- #include <linux/stop_machine.h>
-+#include <linux/delay.h>
- #include <asm/io.h>
+-		progress_indication = !status.b.dsc ? 0 : 0x10000;
++		progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
+ 	}
+ 	if (put_user(progress_indication, arg))
+ 		return (-EFAULT);
+diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
+index 0f72b98..bb30c29 100644
+--- a/drivers/ide/ide-generic.c
++++ b/drivers/ide/ide-generic.c
+@@ -14,10 +14,16 @@
  
+ static int __init ide_generic_init(void)
+ {
++	u8 idx[MAX_HWIFS];
++	int i;
++
+ 	if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
+ 		ide_get_lock(NULL, NULL); /* for atari only */
  
-@@ -162,11 +163,19 @@ static inline u8 hwstatus_set(void __iomem *mem,
- 	return hwstatus_get(mem);
- }
+-	(void)ideprobe_init();
++	for (i = 0; i < MAX_HWIFS; i++)
++		idx[i] = ide_hwifs[i].present ? 0xff : i;
++
++	ide_device_add_all(idx);
  
--static int intel_rng_data_present(struct hwrng *rng)
-+static int intel_rng_data_present(struct hwrng *rng, int wait)
+ 	if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
+ 		ide_release_lock();	/* for atari only */
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
+index bef781f..e6bb9cf 100644
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
+ 			     int uptodate, unsigned int nr_bytes, int dequeue)
  {
- 	void __iomem *mem = (void __iomem *)rng->priv;
--
--	return !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT);
-+	int data, i;
+ 	int ret = 1;
++	int error = 0;
 +
-+	for (i = 0; i < 20; i++) {
-+		data = !!(readb(mem + INTEL_RNG_STATUS) &
-+			  INTEL_RNG_DATA_PRESENT);
-+		if (data || !wait)
-+			break;
-+		udelay(10);
-+	}
-+	return data;
- }
++	if (uptodate <= 0)
++		error = uptodate ? uptodate : -EIO;
  
- static int intel_rng_data_read(struct hwrng *rng, u32 *data)
-diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
-index 3f35a1c..7e31995 100644
---- a/drivers/char/hw_random/omap-rng.c
-+++ b/drivers/char/hw_random/omap-rng.c
-@@ -29,6 +29,7 @@
- #include <linux/err.h>
- #include <linux/platform_device.h>
- #include <linux/hw_random.h>
-+#include <linux/delay.h>
+ 	/*
+ 	 * if failfast is set on a request, override number of sectors and
+ 	 * complete the whole request right now
+ 	 */
+-	if (blk_noretry_request(rq) && end_io_error(uptodate))
++	if (blk_noretry_request(rq) && error)
+ 		nr_bytes = rq->hard_nr_sectors << 9;
  
- #include <asm/io.h>
+-	if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
++	if (!blk_fs_request(rq) && error && !rq->errors)
+ 		rq->errors = -EIO;
  
-@@ -65,9 +66,17 @@ static void omap_rng_write_reg(int reg, u32 val)
- }
+ 	/*
+@@ -75,17 +79,12 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
+ 	 */
+ 	if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
+ 		drive->state = 0;
+-		HWGROUP(drive)->hwif->ide_dma_on(drive);
++		ide_dma_on(drive);
+ 	}
  
- /* REVISIT: Does the status bit really work on 16xx? */
--static int omap_rng_data_present(struct hwrng *rng)
-+static int omap_rng_data_present(struct hwrng *rng, int wait)
- {
--	return omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
-+	int data, i;
-+
-+	for (i = 0; i < 20; i++) {
-+		data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
-+		if (data || !wait)
-+			break;
-+		udelay(10);
-+	}
-+	return data;
- }
+-	if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
+-		add_disk_randomness(rq->rq_disk);
+-		if (dequeue) {
+-			if (!list_empty(&rq->queuelist))
+-				blkdev_dequeue_request(rq);
++	if (!__blk_end_request(rq, error, nr_bytes)) {
++		if (dequeue)
+ 			HWGROUP(drive)->rq = NULL;
+-		}
+-		end_that_request_last(rq, uptodate);
+ 		ret = 0;
+ 	}
  
- static int omap_rng_data_read(struct hwrng *rng, u32 *data)
-diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
-index fa6040b..e2ea210 100644
---- a/drivers/char/hw_random/pasemi-rng.c
-+++ b/drivers/char/hw_random/pasemi-rng.c
-@@ -23,6 +23,7 @@
- #include <linux/kernel.h>
- #include <linux/platform_device.h>
- #include <linux/hw_random.h>
-+#include <linux/delay.h>
- #include <asm/of_platform.h>
- #include <asm/io.h>
+@@ -189,18 +188,14 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
+ 			return ide_stopped;
+ 		}
+ 		if (ide_id_has_flush_cache_ext(drive->id))
+-			args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT;
++			args->tf.command = WIN_FLUSH_CACHE_EXT;
+ 		else
+-			args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE;
+-		args->command_type = IDE_DRIVE_TASK_NO_DATA;
+-		args->handler	   = &task_no_data_intr;
+-		return do_rw_taskfile(drive, args);
++			args->tf.command = WIN_FLUSH_CACHE;
++		goto out_do_tf;
  
-@@ -41,12 +42,19 @@
+ 	case idedisk_pm_standby:	/* Suspend step 2 (standby) */
+-		args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1;
+-		args->command_type = IDE_DRIVE_TASK_NO_DATA;
+-		args->handler	   = &task_no_data_intr;
+-		return do_rw_taskfile(drive, args);
++		args->tf.command = WIN_STANDBYNOW1;
++		goto out_do_tf;
  
- #define MODULE_NAME "pasemi_rng"
+ 	case idedisk_pm_restore_pio:	/* Resume step 1 (restore PIO) */
+ 		ide_set_max_pio(drive);
+@@ -214,10 +209,8 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
+ 		return ide_stopped;
  
--static int pasemi_rng_data_present(struct hwrng *rng)
-+static int pasemi_rng_data_present(struct hwrng *rng, int wait)
- {
- 	void __iomem *rng_regs = (void __iomem *)rng->priv;
--
--	return (in_le32(rng_regs + SDCRNG_CTL_REG)
--		& SDCRNG_CTL_FVLD_M) ? 1 : 0;
-+	int data, i;
-+
-+	for (i = 0; i < 20; i++) {
-+		data = (in_le32(rng_regs + SDCRNG_CTL_REG)
-+			& SDCRNG_CTL_FVLD_M) ? 1 : 0;
-+		if (data || !wait)
-+			break;
-+		udelay(10);
-+	}
-+	return data;
- }
+ 	case idedisk_pm_idle:		/* Resume step 2 (idle) */
+-		args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE;
+-		args->command_type = IDE_DRIVE_TASK_NO_DATA;
+-		args->handler = task_no_data_intr;
+-		return do_rw_taskfile(drive, args);
++		args->tf.command = WIN_IDLEIMMEDIATE;
++		goto out_do_tf;
  
- static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
-diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
-index ec435cb..868e39f 100644
---- a/drivers/char/hw_random/via-rng.c
-+++ b/drivers/char/hw_random/via-rng.c
-@@ -27,6 +27,7 @@
- #include <linux/module.h>
- #include <linux/kernel.h>
- #include <linux/hw_random.h>
-+#include <linux/delay.h>
- #include <asm/io.h>
- #include <asm/msr.h>
- #include <asm/cpufeature.h>
-@@ -77,10 +78,11 @@ static inline u32 xstore(u32 *addr, u32 edx_in)
- 	return eax_out;
+ 	case ide_pm_restore_dma:	/* Resume step 3 (restore DMA) */
+ 		/*
+@@ -225,9 +218,8 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
+ 		 * we could be smarter and check for current xfer_speed
+ 		 * in struct drive etc...
+ 		 */
+-		if (drive->hwif->ide_dma_on == NULL)
++		if (drive->hwif->dma_host_set == NULL)
+ 			break;
+-		drive->hwif->dma_off_quietly(drive);
+ 		/*
+ 		 * TODO: respect ->using_dma setting
+ 		 */
+@@ -236,6 +228,11 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
+ 	}
+ 	pm->pm_step = ide_pm_state_completed;
+ 	return ide_stopped;
++
++out_do_tf:
++	args->tf_flags	 = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++	args->data_phase = TASKFILE_NO_DATA;
++	return do_rw_taskfile(drive, args);
  }
  
--static int via_rng_data_present(struct hwrng *rng)
-+static int via_rng_data_present(struct hwrng *rng, int wait)
- {
- 	u32 bytes_out;
- 	u32 *via_rng_datum = (u32 *)(&rng->priv);
-+	int i;
- 
- 	/* We choose the recommended 1-byte-per-instruction RNG rate,
- 	 * for greater randomness at the expense of speed.  Larger
-@@ -95,12 +97,15 @@ static int via_rng_data_present(struct hwrng *rng)
- 	 * completes.
- 	 */
- 
--	*via_rng_datum = 0; /* paranoia, not really necessary */
--	bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
--	bytes_out &= VIA_XSTORE_CNT_MASK;
--	if (bytes_out == 0)
--		return 0;
--	return 1;
-+	for (i = 0; i < 20; i++) {
-+		*via_rng_datum = 0; /* paranoia, not really necessary */
-+		bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
-+		bytes_out &= VIA_XSTORE_CNT_MASK;
-+		if (bytes_out || !wait)
-+			break;
-+		udelay(10);
-+	}
-+	return bytes_out ? 1 : 0;
+ /**
+@@ -292,12 +289,54 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
+ 		drive->blocked = 0;
+ 		blk_start_queue(drive->queue);
+ 	}
+-	blkdev_dequeue_request(rq);
+ 	HWGROUP(drive)->rq = NULL;
+-	end_that_request_last(rq, 1);
++	if (__blk_end_request(rq, 0, 0))
++		BUG();
+ 	spin_unlock_irqrestore(&ide_lock, flags);
  }
  
- static int via_rng_data_read(struct hwrng *rng, u32 *data)
-diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
-new file mode 100644
-index 0000000..6076e66
---- /dev/null
-+++ b/drivers/char/nozomi.c
-@@ -0,0 +1,1993 @@
-+/*
-+ * nozomi.c  -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
-+ *
-+ * Written by: Ulf Jakobsson,
-+ *             Jan �erfeldt,
-+ *             Stefan Thomasson,
-+ *
-+ * Maintained by: Paul Hardwick (p.hardwick at option.com)
-+ *
-+ * Patches:
-+ *          Locking code changes for Vodafone by Sphere Systems Ltd,
-+ *                              Andrew Bird (ajb at spheresystems.co.uk )
-+ *                              & Phil Sanderson
-+ *
-+ * Source has been ported from an implementation made by Filip Aben @ Option
-+ *
-+ * --------------------------------------------------------------------------
-+ *
-+ * Copyright (c) 2005,2006 Option Wireless Sweden AB
-+ * Copyright (c) 2006 Sphere Systems Ltd
-+ * Copyright (c) 2006 Option Wireless n/v
-+ * All rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-+ *
-+ * --------------------------------------------------------------------------
-+ */
-+
-+/*
-+ * CHANGELOG
-+ * Version 2.1d
-+ * 11-November-2007 Jiri Slaby, Frank Seidel
-+ * - Big rework of multicard support by Jiri
-+ * - Major cleanups (semaphore to mutex, endianess, no major reservation)
-+ * - Optimizations
-+ *
-+ * Version 2.1c
-+ * 30-October-2007 Frank Seidel
-+ * - Completed multicard support
-+ * - Minor cleanups
-+ *
-+ * Version 2.1b
-+ * 07-August-2007 Frank Seidel
-+ * - Minor cleanups
-+ * - theoretical multicard support
-+ *
-+ * Version 2.1
-+ * 03-July-2006 Paul Hardwick
-+ *
-+ * - Stability Improvements. Incorporated spinlock wraps patch.
-+ * - Updated for newer 2.6.14+ kernels (tty_buffer_request_room)
-+ * - using __devexit macro for tty
-+ *
-+ *
-+ * Version 2.0
-+ * 08-feb-2006 15:34:10:Ulf
-+ *
-+ * -Fixed issue when not waking up line disipine layer, could probably result
-+ *  in better uplink performance for 2.4.
-+ *
-+ * -Fixed issue with big endian during initalization, now proper toggle flags
-+ *  are handled between preloader and maincode.
-+ *
-+ * -Fixed flow control issue.
-+ *
-+ * -Added support for setting DTR.
-+ *
-+ * -For 2.4 kernels, removing temporary buffer that's not needed.
-+ *
-+ * -Reading CTS only for modem port (only port that supports it).
-+ *
-+ * -Return 0 in write_room instead of netative value, it's not handled in
-+ *  upper layer.
-+ *
-+ * --------------------------------------------------------------------------
-+ * Version 1.0
-+ *
-+ * First version of driver, only tested with card of type F32_2.
-+ * Works fine with 2.4 and 2.6 kernels.
-+ * Driver also support big endian architecture.
-+ */
-+
-+/* Enable this to have a lot of debug printouts */
-+#define DEBUG
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/ioport.h>
-+#include <linux/tty.h>
-+#include <linux/tty_driver.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/interrupt.h>
-+#include <linux/kmod.h>
-+#include <linux/init.h>
-+#include <linux/kfifo.h>
-+#include <linux/uaccess.h>
-+#include <asm/byteorder.h>
-+
-+#include <linux/delay.h>
-+
-+
-+#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \
-+					__DATE__ " " __TIME__ ")"
-+
-+/*    Macros definitions */
-+
-+/* Default debug printout level */
-+#define NOZOMI_DEBUG_LEVEL 0x00
-+
-+#define P_BUF_SIZE 128
-+#define NFO(_err_flag_, args...)				\
-+do {								\
-+	char tmp[P_BUF_SIZE];					\
-+	snprintf(tmp, sizeof(tmp), ##args);			\
-+	printk(_err_flag_ "[%d] %s(): %s\n", __LINE__,		\
-+		__FUNCTION__, tmp);				\
-+} while (0)
-+
-+#define DBG1(args...) D_(0x01, ##args)
-+#define DBG2(args...) D_(0x02, ##args)
-+#define DBG3(args...) D_(0x04, ##args)
-+#define DBG4(args...) D_(0x08, ##args)
-+#define DBG5(args...) D_(0x10, ##args)
-+#define DBG6(args...) D_(0x20, ##args)
-+#define DBG7(args...) D_(0x40, ##args)
-+#define DBG8(args...) D_(0x80, ##args)
-+
-+#ifdef DEBUG
-+/* Do we need this settable at runtime? */
-+static int debug = NOZOMI_DEBUG_LEVEL;
-+
-+#define D(lvl, args...)  do {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
-+				while (0)
-+#define D_(lvl, args...) D(lvl, ##args)
-+
-+/* These printouts are always printed */
-+
-+#else
-+static int debug;
-+#define D_(lvl, args...)
-+#endif
-+
-+/* TODO: rewrite to optimize macros... */
-+
-+#define TMP_BUF_MAX 256
-+
-+#define DUMP(buf__,len__) \
-+  do {  \
-+    char tbuf[TMP_BUF_MAX] = {0};\
-+    if (len__ > 1) {\
-+	snprintf(tbuf, len__ > TMP_BUF_MAX ? TMP_BUF_MAX : len__, "%s", buf__);\
-+	if (tbuf[len__-2] == '\r') {\
-+		tbuf[len__-2] = 'r';\
-+	} \
-+	DBG1("SENDING: '%s' (%d+n)", tbuf, len__);\
-+    } else {\
-+	DBG1("SENDING: '%s' (%d)", tbuf, len__);\
-+    } \
-+} while (0)
-+
-+/*    Defines */
-+#define NOZOMI_NAME		"nozomi"
-+#define NOZOMI_NAME_TTY		"nozomi_tty"
-+#define DRIVER_DESC		"Nozomi driver"
-+
-+#define NTTY_TTY_MAXMINORS	256
-+#define NTTY_FIFO_BUFFER_SIZE	8192
-+
-+/* Must be power of 2 */
-+#define FIFO_BUFFER_SIZE_UL	8192
-+
-+/* Size of tmp send buffer to card */
-+#define SEND_BUF_MAX		1024
-+#define RECEIVE_BUF_MAX		4
-+
-+
-+/* Define all types of vendors and devices to support */
-+#define VENDOR1		0x1931	/* Vendor Option */
-+#define DEVICE1		0x000c	/* HSDPA card */
-+
-+#define R_IIR		0x0000	/* Interrupt Identity Register */
-+#define R_FCR		0x0000	/* Flow Control Register */
-+#define R_IER		0x0004	/* Interrupt Enable Register */
-+
-+#define CONFIG_MAGIC	0xEFEFFEFE
-+#define TOGGLE_VALID	0x0000
-+
-+/* Definition of interrupt tokens */
-+#define MDM_DL1		0x0001
-+#define MDM_UL1		0x0002
-+#define MDM_DL2		0x0004
-+#define MDM_UL2		0x0008
-+#define DIAG_DL1	0x0010
-+#define DIAG_DL2	0x0020
-+#define DIAG_UL		0x0040
-+#define APP1_DL		0x0080
-+#define APP1_UL		0x0100
-+#define APP2_DL		0x0200
-+#define APP2_UL		0x0400
-+#define CTRL_DL		0x0800
-+#define CTRL_UL		0x1000
-+#define RESET		0x8000
-+
-+#define MDM_DL		(MDM_DL1  | MDM_DL2)
-+#define MDM_UL		(MDM_UL1  | MDM_UL2)
-+#define DIAG_DL		(DIAG_DL1 | DIAG_DL2)
-+
-+/* modem signal definition */
-+#define CTRL_DSR	0x0001
-+#define CTRL_DCD	0x0002
-+#define CTRL_RI		0x0004
-+#define CTRL_CTS	0x0008
-+
-+#define CTRL_DTR	0x0001
-+#define CTRL_RTS	0x0002
-+
-+#define MAX_PORT		4
-+#define NOZOMI_MAX_PORTS	5
-+#define NOZOMI_MAX_CARDS	(NTTY_TTY_MAXMINORS / MAX_PORT)
-+
-+/*    Type definitions */
-+
-+/*
-+ * There are two types of nozomi cards,
-+ * one with 2048 memory and with 8192 memory
-+ */
-+enum card_type {
-+	F32_2 = 2048,	/* 512 bytes downlink + uplink * 2 -> 2048 */
-+	F32_8 = 8192,	/* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */
-+};
-+
-+/* Two different toggle channels exist */
-+enum channel_type {
-+	CH_A = 0,
-+	CH_B = 1,
-+};
-+
-+/* Port definition for the card regarding flow control */
-+enum ctrl_port_type {
-+	CTRL_CMD	= 0,
-+	CTRL_MDM	= 1,
-+	CTRL_DIAG	= 2,
-+	CTRL_APP1	= 3,
-+	CTRL_APP2	= 4,
-+	CTRL_ERROR	= -1,
-+};
-+
-+/* Ports that the nozomi has */
-+enum port_type {
-+	PORT_MDM	= 0,
-+	PORT_DIAG	= 1,
-+	PORT_APP1	= 2,
-+	PORT_APP2	= 3,
-+	PORT_CTRL	= 4,
-+	PORT_ERROR	= -1,
-+};
-+
-+#ifdef __BIG_ENDIAN
-+/* Big endian */
-+
-+struct toggles {
-+	unsigned enabled:5;	/*
-+				 * Toggle fields are valid if enabled is 0,
-+				 * else A-channels must always be used.
-+				 */
-+	unsigned diag_dl:1;
-+	unsigned mdm_dl:1;
-+	unsigned mdm_ul:1;
-+} __attribute__ ((packed));
-+
-+/* Configuration table to read at startup of card */
-+/* Is for now only needed during initialization phase */
-+struct config_table {
-+	u32 signature;
-+	u16 product_information;
-+	u16 version;
-+	u8 pad3[3];
-+	struct toggles toggle;
-+	u8 pad1[4];
-+	u16 dl_mdm_len1;	/*
-+				 * If this is 64, it can hold
-+				 * 60 bytes + 4 that is length field
-+				 */
-+	u16 dl_start;
-+
-+	u16 dl_diag_len1;
-+	u16 dl_mdm_len2;	/*
-+				 * If this is 64, it can hold
-+				 * 60 bytes + 4 that is length field
-+				 */
-+	u16 dl_app1_len;
-+
-+	u16 dl_diag_len2;
-+	u16 dl_ctrl_len;
-+	u16 dl_app2_len;
-+	u8 pad2[16];
-+	u16 ul_mdm_len1;
-+	u16 ul_start;
-+	u16 ul_diag_len;
-+	u16 ul_mdm_len2;
-+	u16 ul_app1_len;
-+	u16 ul_app2_len;
-+	u16 ul_ctrl_len;
-+} __attribute__ ((packed));
-+
-+/* This stores all control downlink flags */
-+struct ctrl_dl {
-+	u8 port;
-+	unsigned reserved:4;
-+	unsigned CTS:1;
-+	unsigned RI:1;
-+	unsigned DCD:1;
-+	unsigned DSR:1;
-+} __attribute__ ((packed));
-+
-+/* This stores all control uplink flags */
-+struct ctrl_ul {
-+	u8 port;
-+	unsigned reserved:6;
-+	unsigned RTS:1;
-+	unsigned DTR:1;
-+} __attribute__ ((packed));
-+
-+#else
-+/* Little endian */
-+
-+/* This represents the toggle information */
-+struct toggles {
-+	unsigned mdm_ul:1;
-+	unsigned mdm_dl:1;
-+	unsigned diag_dl:1;
-+	unsigned enabled:5;	/*
-+				 * Toggle fields are valid if enabled is 0,
-+				 * else A-channels must always be used.
-+				 */
-+} __attribute__ ((packed));
-+
-+/* Configuration table to read at startup of card */
-+struct config_table {
-+	u32 signature;
-+	u16 version;
-+	u16 product_information;
-+	struct toggles toggle;
-+	u8 pad1[7];
-+	u16 dl_start;
-+	u16 dl_mdm_len1;	/*
-+				 * If this is 64, it can hold
-+				 * 60 bytes + 4 that is length field
-+				 */
-+	u16 dl_mdm_len2;
-+	u16 dl_diag_len1;
-+	u16 dl_diag_len2;
-+	u16 dl_app1_len;
-+	u16 dl_app2_len;
-+	u16 dl_ctrl_len;
-+	u8 pad2[16];
-+	u16 ul_start;
-+	u16 ul_mdm_len2;
-+	u16 ul_mdm_len1;
-+	u16 ul_diag_len;
-+	u16 ul_app1_len;
-+	u16 ul_app2_len;
-+	u16 ul_ctrl_len;
-+} __attribute__ ((packed));
-+
-+/* This stores all control downlink flags */
-+struct ctrl_dl {
-+	unsigned DSR:1;
-+	unsigned DCD:1;
-+	unsigned RI:1;
-+	unsigned CTS:1;
-+	unsigned reserverd:4;
-+	u8 port;
-+} __attribute__ ((packed));
-+
-+/* This stores all control uplink flags */
-+struct ctrl_ul {
-+	unsigned DTR:1;
-+	unsigned RTS:1;
-+	unsigned reserved:6;
-+	u8 port;
-+} __attribute__ ((packed));
-+#endif
-+
-+/* This holds all information that is needed regarding a port */
-+struct port {
-+	u8 update_flow_control;
-+	struct ctrl_ul ctrl_ul;
-+	struct ctrl_dl ctrl_dl;
-+	struct kfifo *fifo_ul;
-+	void __iomem *dl_addr[2];
-+	u32 dl_size[2];
-+	u8 toggle_dl;
-+	void __iomem *ul_addr[2];
-+	u32 ul_size[2];
-+	u8 toggle_ul;
-+	u16 token_dl;
-+
-+	struct tty_struct *tty;
-+	int tty_open_count;
-+	/* mutex to ensure one access patch to this port */
-+	struct mutex tty_sem;
-+	wait_queue_head_t tty_wait;
-+	struct async_icount tty_icount;
-+};
-+
-+/* Private data one for each card in the system */
-+struct nozomi {
-+	void __iomem *base_addr;
-+	unsigned long flip;
-+
-+	/* Pointers to registers */
-+	void __iomem *reg_iir;
-+	void __iomem *reg_fcr;
-+	void __iomem *reg_ier;
-+
-+	u16 last_ier;
-+	enum card_type card_type;
-+	struct config_table config_table;	/* Configuration table */
-+	struct pci_dev *pdev;
-+	struct port port[NOZOMI_MAX_PORTS];
-+	u8 *send_buf;
-+
-+	spinlock_t spin_mutex;	/* secures access to registers and tty */
-+
-+	unsigned int index_start;
-+	u32 open_ttys;
-+};
-+
-+/* This is a data packet that is read or written to/from card */
-+struct buffer {
-+	u32 size;		/* size is the length of the data buffer */
-+	u8 *data;
-+} __attribute__ ((packed));
-+
-+/*    Global variables */
-+static struct pci_device_id nozomi_pci_tbl[] = {
-+	{PCI_DEVICE(VENDOR1, DEVICE1)},
-+	{},
-+};
-+
-+MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
-+
-+static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
-+static struct tty_driver *ntty_driver;
-+
-+/*
-+ * find card by tty_index
-+ */
-+static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty)
-+{
-+	return tty ? ndevs[tty->index / MAX_PORT] : NULL;
-+}
-+
-+static inline struct port *get_port_by_tty(const struct tty_struct *tty)
-+{
-+	struct nozomi *ndev = get_dc_by_tty(tty);
-+	return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL;
-+}
-+
-+/*
-+ * TODO:
-+ * -Optimize
-+ * -Rewrite cleaner
-+ */
-+
-+static void read_mem32(u32 *buf, const void __iomem *mem_addr_start,
-+			u32 size_bytes)
++void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
 +{
-+	u32 i = 0;
-+	const u32 *ptr = (__force u32 *) mem_addr_start;
-+	u16 *buf16;
-+
-+	if (unlikely(!ptr || !buf))
-+		goto out;
++	ide_hwif_t *hwif = drive->hwif;
++	struct ide_taskfile *tf = &task->tf;
 +
-+	/* shortcut for extremely often used cases */
-+	switch (size_bytes) {
-+	case 2:	/* 2 bytes */
-+		buf16 = (u16 *) buf;
-+		*buf16 = __le16_to_cpu(readw((void __iomem *)ptr));
-+		goto out;
-+		break;
-+	case 4:	/* 4 bytes */
-+		*(buf) = __le32_to_cpu(readl((void __iomem *)ptr));
-+		goto out;
-+		break;
-+	}
++	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
++		u16 data = hwif->INW(IDE_DATA_REG);
 +
-+	while (i < size_bytes) {
-+		if (size_bytes - i == 2) {
-+			/* Handle 2 bytes in the end */
-+			buf16 = (u16 *) buf;
-+			*(buf16) = __le16_to_cpu(readw((void __iomem *)ptr));
-+			i += 2;
-+		} else {
-+			/* Read 4 bytes */
-+			*(buf) = __le32_to_cpu(readl((void __iomem *)ptr));
-+			i += 4;
-+		}
-+		buf++;
-+		ptr++;
++		tf->data = data & 0xff;
++		tf->hob_data = (data >> 8) & 0xff;
 +	}
-+out:
-+	return;
-+}
 +
-+/*
-+ * TODO:
-+ * -Optimize
-+ * -Rewrite cleaner
-+ */
-+static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
-+			u32 size_bytes)
-+{
-+	u32 i = 0;
-+	u32 *ptr = (__force u32 *) mem_addr_start;
-+	u16 *buf16;
++	/* be sure we're looking at the low order bits */
++	hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
 +
-+	if (unlikely(!ptr || !buf))
-+		return 0;
++	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
++		tf->nsect  = hwif->INB(IDE_NSECTOR_REG);
++	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
++		tf->lbal   = hwif->INB(IDE_SECTOR_REG);
++	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
++		tf->lbam   = hwif->INB(IDE_LCYL_REG);
++	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
++		tf->lbah   = hwif->INB(IDE_HCYL_REG);
++	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
++		tf->device = hwif->INB(IDE_SELECT_REG);
 +
-+	/* shortcut for extremely often used cases */
-+	switch (size_bytes) {
-+	case 2:	/* 2 bytes */
-+		buf16 = (u16 *) buf;
-+		writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
-+		return 2;
-+		break;
-+	case 1: /*
-+		 * also needs to write 4 bytes in this case
-+		 * so falling through..
-+		 */
-+	case 4: /* 4 bytes */
-+		writel(__cpu_to_le32(*buf), (void __iomem *)ptr);
-+		return 4;
-+		break;
-+	}
++	if (task->tf_flags & IDE_TFLAG_LBA48) {
++		hwif->OUTB(drive->ctl | 0x80, IDE_CONTROL_REG);
 +
-+	while (i < size_bytes) {
-+		if (size_bytes - i == 2) {
-+			/* 2 bytes */
-+			buf16 = (u16 *) buf;
-+			writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
-+			i += 2;
-+		} else {
-+			/* 4 bytes */
-+			writel(__cpu_to_le32(*buf), (void __iomem *)ptr);
-+			i += 4;
-+		}
-+		buf++;
-+		ptr++;
++		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
++			tf->hob_feature = hwif->INB(IDE_FEATURE_REG);
++		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
++			tf->hob_nsect   = hwif->INB(IDE_NSECTOR_REG);
++		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
++			tf->hob_lbal    = hwif->INB(IDE_SECTOR_REG);
++		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
++			tf->hob_lbam    = hwif->INB(IDE_LCYL_REG);
++		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
++			tf->hob_lbah    = hwif->INB(IDE_HCYL_REG);
 +	}
-+	return i;
-+}
-+
-+/* Setup pointers to different channels and also setup buffer sizes. */
-+static void setup_memory(struct nozomi *dc)
-+{
-+	void __iomem *offset = dc->base_addr + dc->config_table.dl_start;
-+	/* The length reported is including the length field of 4 bytes,
-+	 * hence subtract with 4.
-+	 */
-+	const u16 buff_offset = 4;
-+
-+	/* Modem port dl configuration */
-+	dc->port[PORT_MDM].dl_addr[CH_A] = offset;
-+	dc->port[PORT_MDM].dl_addr[CH_B] =
-+				(offset += dc->config_table.dl_mdm_len1);
-+	dc->port[PORT_MDM].dl_size[CH_A] =
-+				dc->config_table.dl_mdm_len1 - buff_offset;
-+	dc->port[PORT_MDM].dl_size[CH_B] =
-+				dc->config_table.dl_mdm_len2 - buff_offset;
-+
-+	/* Diag port dl configuration */
-+	dc->port[PORT_DIAG].dl_addr[CH_A] =
-+				(offset += dc->config_table.dl_mdm_len2);
-+	dc->port[PORT_DIAG].dl_size[CH_A] =
-+				dc->config_table.dl_diag_len1 - buff_offset;
-+	dc->port[PORT_DIAG].dl_addr[CH_B] =
-+				(offset += dc->config_table.dl_diag_len1);
-+	dc->port[PORT_DIAG].dl_size[CH_B] =
-+				dc->config_table.dl_diag_len2 - buff_offset;
-+
-+	/* App1 port dl configuration */
-+	dc->port[PORT_APP1].dl_addr[CH_A] =
-+				(offset += dc->config_table.dl_diag_len2);
-+	dc->port[PORT_APP1].dl_size[CH_A] =
-+				dc->config_table.dl_app1_len - buff_offset;
-+
-+	/* App2 port dl configuration */
-+	dc->port[PORT_APP2].dl_addr[CH_A] =
-+				(offset += dc->config_table.dl_app1_len);
-+	dc->port[PORT_APP2].dl_size[CH_A] =
-+				dc->config_table.dl_app2_len - buff_offset;
-+
-+	/* Ctrl dl configuration */
-+	dc->port[PORT_CTRL].dl_addr[CH_A] =
-+				(offset += dc->config_table.dl_app2_len);
-+	dc->port[PORT_CTRL].dl_size[CH_A] =
-+				dc->config_table.dl_ctrl_len - buff_offset;
-+
-+	offset = dc->base_addr + dc->config_table.ul_start;
-+
-+	/* Modem Port ul configuration */
-+	dc->port[PORT_MDM].ul_addr[CH_A] = offset;
-+	dc->port[PORT_MDM].ul_size[CH_A] =
-+				dc->config_table.ul_mdm_len1 - buff_offset;
-+	dc->port[PORT_MDM].ul_addr[CH_B] =
-+				(offset += dc->config_table.ul_mdm_len1);
-+	dc->port[PORT_MDM].ul_size[CH_B] =
-+				dc->config_table.ul_mdm_len2 - buff_offset;
-+
-+	/* Diag port ul configuration */
-+	dc->port[PORT_DIAG].ul_addr[CH_A] =
-+				(offset += dc->config_table.ul_mdm_len2);
-+	dc->port[PORT_DIAG].ul_size[CH_A] =
-+				dc->config_table.ul_diag_len - buff_offset;
-+
-+	/* App1 port ul configuration */
-+	dc->port[PORT_APP1].ul_addr[CH_A] =
-+				(offset += dc->config_table.ul_diag_len);
-+	dc->port[PORT_APP1].ul_size[CH_A] =
-+				dc->config_table.ul_app1_len - buff_offset;
-+
-+	/* App2 port ul configuration */
-+	dc->port[PORT_APP2].ul_addr[CH_A] =
-+				(offset += dc->config_table.ul_app1_len);
-+	dc->port[PORT_APP2].ul_size[CH_A] =
-+				dc->config_table.ul_app2_len - buff_offset;
-+
-+	/* Ctrl ul configuration */
-+	dc->port[PORT_CTRL].ul_addr[CH_A] =
-+				(offset += dc->config_table.ul_app2_len);
-+	dc->port[PORT_CTRL].ul_size[CH_A] =
-+				dc->config_table.ul_ctrl_len - buff_offset;
-+}
-+
-+/* Dump config table under initalization phase */
-+#ifdef DEBUG
-+static void dump_table(const struct nozomi *dc)
-+{
-+	DBG3("signature: 0x%08X", dc->config_table.signature);
-+	DBG3("version: 0x%04X", dc->config_table.version);
-+	DBG3("product_information: 0x%04X", \
-+				dc->config_table.product_information);
-+	DBG3("toggle enabled: %d", dc->config_table.toggle.enabled);
-+	DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul);
-+	DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl);
-+	DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl);
-+
-+	DBG3("dl_start: 0x%04X", dc->config_table.dl_start);
-+	DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1,
-+	   dc->config_table.dl_mdm_len1);
-+	DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2,
-+	   dc->config_table.dl_mdm_len2);
-+	DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1,
-+	   dc->config_table.dl_diag_len1);
-+	DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2,
-+	   dc->config_table.dl_diag_len2);
-+	DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len,
-+	   dc->config_table.dl_app1_len);
-+	DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len,
-+	   dc->config_table.dl_app2_len);
-+	DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len,
-+	   dc->config_table.dl_ctrl_len);
-+	DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start,
-+	   dc->config_table.ul_start);
-+	DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1,
-+	   dc->config_table.ul_mdm_len1);
-+	DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2,
-+	   dc->config_table.ul_mdm_len2);
-+	DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len,
-+	   dc->config_table.ul_diag_len);
-+	DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len,
-+	   dc->config_table.ul_app1_len);
-+	DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len,
-+	   dc->config_table.ul_app2_len);
-+	DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len,
-+	   dc->config_table.ul_ctrl_len);
 +}
-+#else
-+static __inline__ void dump_table(const struct nozomi *dc) { }
-+#endif
-+
-+/*
-+ * Read configuration table from card under intalization phase
-+ * Returns 1 if ok, else 0
-+ */
-+static int nozomi_read_config_table(struct nozomi *dc)
-+{
-+	read_mem32((u32 *) &dc->config_table, dc->base_addr + 0,
-+						sizeof(struct config_table));
-+
-+	if (dc->config_table.signature != CONFIG_MAGIC) {
-+		dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n",
-+			dc->config_table.signature, CONFIG_MAGIC);
-+		return 0;
-+	}
-+
-+	if ((dc->config_table.version == 0)
-+	    || (dc->config_table.toggle.enabled == TOGGLE_VALID)) {
-+		int i;
-+		DBG1("Second phase, configuring card");
-+
-+		setup_memory(dc);
-+
-+		dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul;
-+		dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl;
-+		dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl;
-+		DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d",
-+		   dc->port[PORT_MDM].toggle_ul,
-+		   dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl);
 +
-+		dump_table(dc);
+ /**
+  *	ide_end_drive_cmd	-	end an explicit drive command
+  *	@drive: command 
+@@ -314,7 +353,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
+  
+ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
+ {
+-	ide_hwif_t *hwif = HWIF(drive);
+ 	unsigned long flags;
+ 	struct request *rq;
+ 
+@@ -322,61 +360,18 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
+ 	rq = HWGROUP(drive)->rq;
+ 	spin_unlock_irqrestore(&ide_lock, flags);
+ 
+-	if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
+-		u8 *args = (u8 *) rq->buffer;
+-		if (rq->errors == 0)
+-			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+-
+-		if (args) {
+-			args[0] = stat;
+-			args[1] = err;
+-			args[2] = hwif->INB(IDE_NSECTOR_REG);
+-		}
+-	} else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
+-		u8 *args = (u8 *) rq->buffer;
+-		if (rq->errors == 0)
+-			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+-
+-		if (args) {
+-			args[0] = stat;
+-			args[1] = err;
+-			/* be sure we're looking at the low order bits */
+-			hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
+-			args[2] = hwif->INB(IDE_NSECTOR_REG);
+-			args[3] = hwif->INB(IDE_SECTOR_REG);
+-			args[4] = hwif->INB(IDE_LCYL_REG);
+-			args[5] = hwif->INB(IDE_HCYL_REG);
+-			args[6] = hwif->INB(IDE_SELECT_REG);
+-		}
+-	} else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
++	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ 		ide_task_t *args = (ide_task_t *) rq->special;
+ 		if (rq->errors == 0)
+ 			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+ 			
+ 		if (args) {
+-			if (args->tf_in_flags.b.data) {
+-				u16 data				= hwif->INW(IDE_DATA_REG);
+-				args->tfRegister[IDE_DATA_OFFSET]	= (data) & 0xFF;
+-				args->hobRegister[IDE_DATA_OFFSET]	= (data >> 8) & 0xFF;
+-			}
+-			args->tfRegister[IDE_ERROR_OFFSET]   = err;
+-			/* be sure we're looking at the low order bits */
+-			hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
+-			args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
+-			args->tfRegister[IDE_SECTOR_OFFSET]  = hwif->INB(IDE_SECTOR_REG);
+-			args->tfRegister[IDE_LCYL_OFFSET]    = hwif->INB(IDE_LCYL_REG);
+-			args->tfRegister[IDE_HCYL_OFFSET]    = hwif->INB(IDE_HCYL_REG);
+-			args->tfRegister[IDE_SELECT_OFFSET]  = hwif->INB(IDE_SELECT_REG);
+-			args->tfRegister[IDE_STATUS_OFFSET]  = stat;
+-
+-			if (drive->addressing == 1) {
+-				hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
+-				args->hobRegister[IDE_FEATURE_OFFSET]	= hwif->INB(IDE_FEATURE_REG);
+-				args->hobRegister[IDE_NSECTOR_OFFSET]	= hwif->INB(IDE_NSECTOR_REG);
+-				args->hobRegister[IDE_SECTOR_OFFSET]	= hwif->INB(IDE_SECTOR_REG);
+-				args->hobRegister[IDE_LCYL_OFFSET]	= hwif->INB(IDE_LCYL_REG);
+-				args->hobRegister[IDE_HCYL_OFFSET]	= hwif->INB(IDE_HCYL_REG);
+-			}
++			struct ide_taskfile *tf = &args->tf;
 +
-+		for (i = PORT_MDM; i < MAX_PORT; i++) {
-+			dc->port[i].fifo_ul =
-+			    kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
-+			memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
-+			memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
-+		}
++			tf->error = err;
++			tf->status = stat;
 +
-+		/* Enable control channel */
-+		dc->last_ier = dc->last_ier | CTRL_DL;
-+		writew(dc->last_ier, dc->reg_ier);
++			ide_tf_read(drive, args);
+ 		}
+ 	} else if (blk_pm_request(rq)) {
+ 		struct request_pm_state *pm = rq->data;
+@@ -391,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
+ 	}
+ 
+ 	spin_lock_irqsave(&ide_lock, flags);
+-	blkdev_dequeue_request(rq);
+ 	HWGROUP(drive)->rq = NULL;
+ 	rq->errors = err;
+-	end_that_request_last(rq, !rq->errors);
++	if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0))
++		BUG();
+ 	spin_unlock_irqrestore(&ide_lock, flags);
+ }
+ 
+@@ -615,90 +610,26 @@ ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
+ 		return __ide_abort(drive, rq);
+ }
+ 
+-/**
+- *	ide_cmd		-	issue a simple drive command
+- *	@drive: drive the command is for
+- *	@cmd: command byte
+- *	@nsect: sector byte
+- *	@handler: handler for the command completion
+- *
+- *	Issue a simple drive command with interrupts.
+- *	The drive must be selected beforehand.
+- */
+-
+-static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect,
+-		ide_handler_t *handler)
++static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+ {
+-	ide_hwif_t *hwif = HWIF(drive);
+-	if (IDE_CONTROL_REG)
+-		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);	/* clear nIEN */
+-	SELECT_MASK(drive,0);
+-	hwif->OUTB(nsect,IDE_NSECTOR_REG);
+-	ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
++	tf->nsect   = drive->sect;
++	tf->lbal    = drive->sect;
++	tf->lbam    = drive->cyl;
++	tf->lbah    = drive->cyl >> 8;
++	tf->device  = ((drive->head - 1) | drive->select.all) & ~ATA_LBA;
++	tf->command = WIN_SPECIFY;
+ }
+ 
+-/**
+- *	drive_cmd_intr		- 	drive command completion interrupt
+- *	@drive: drive the completion interrupt occurred on
+- *
+- *	drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
+- *	We do any necessary data reading and then wait for the drive to
+- *	go non busy. At that point we may read the error data and complete
+- *	the request
+- */
+- 
+-static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
++static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+ {
+-	struct request *rq = HWGROUP(drive)->rq;
+-	ide_hwif_t *hwif = HWIF(drive);
+-	u8 *args = (u8 *) rq->buffer;
+-	u8 stat = hwif->INB(IDE_STATUS_REG);
+-	int retries = 10;
+-
+-	local_irq_enable_in_hardirq();
+-	if (rq->cmd_type == REQ_TYPE_ATA_CMD &&
+-	    (stat & DRQ_STAT) && args && args[3]) {
+-		u8 io_32bit = drive->io_32bit;
+-		drive->io_32bit = 0;
+-		hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
+-		drive->io_32bit = io_32bit;
+-		while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
+-			udelay(100);
+-	}
+-
+-	if (!OK_STAT(stat, READY_STAT, BAD_STAT))
+-		return ide_error(drive, "drive_cmd", stat);
+-		/* calls ide_end_drive_cmd */
+-	ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
+-	return ide_stopped;
++	tf->nsect   = drive->sect;
++	tf->command = WIN_RESTORE;
+ }
+ 
+-static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task)
++static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+ {
+-	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
+-	task->tfRegister[IDE_SECTOR_OFFSET]  = drive->sect;
+-	task->tfRegister[IDE_LCYL_OFFSET]    = drive->cyl;
+-	task->tfRegister[IDE_HCYL_OFFSET]    = drive->cyl>>8;
+-	task->tfRegister[IDE_SELECT_OFFSET]  = ((drive->head-1)|drive->select.all)&0xBF;
+-	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY;
+-
+-	task->handler = &set_geometry_intr;
+-}
+-
+-static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task)
+-{
+-	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
+-	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE;
+-
+-	task->handler = &recal_intr;
+-}
+-
+-static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task)
+-{
+-	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req;
+-	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT;
+-
+-	task->handler = &set_multmode_intr;
++	tf->nsect   = drive->mult_req;
++	tf->command = WIN_SETMULT;
+ }
+ 
+ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+@@ -707,19 +638,19 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+ 	ide_task_t args;
+ 
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	args.command_type = IDE_DRIVE_TASK_NO_DATA;
++	args.data_phase = TASKFILE_NO_DATA;
+ 
+ 	if (s->b.set_geometry) {
+ 		s->b.set_geometry = 0;
+-		ide_init_specify_cmd(drive, &args);
++		ide_tf_set_specify_cmd(drive, &args.tf);
+ 	} else if (s->b.recalibrate) {
+ 		s->b.recalibrate = 0;
+-		ide_init_restore_cmd(drive, &args);
++		ide_tf_set_restore_cmd(drive, &args.tf);
+ 	} else if (s->b.set_multmode) {
+ 		s->b.set_multmode = 0;
+ 		if (drive->mult_req > drive->id->max_multsect)
+ 			drive->mult_req = drive->id->max_multsect;
+-		ide_init_setmult_cmd(drive, &args);
++		ide_tf_set_setmult_cmd(drive, &args.tf);
+ 	} else if (s->all) {
+ 		int special = s->all;
+ 		s->all = 0;
+@@ -727,6 +658,9 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+ 		return ide_stopped;
+ 	}
+ 
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
++			IDE_TFLAG_CUSTOM_HANDLER;
 +
-+		dev_info(&dc->pdev->dev, "Initialization OK!\n");
-+		return 1;
+ 	do_rw_taskfile(drive, &args);
+ 
+ 	return ide_started;
+@@ -801,7 +735,7 @@ static ide_startstop_t do_special (ide_drive_t *drive)
+ 
+ 			if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
+ 				if (keep_dma)
+-					hwif->ide_dma_on(drive);
++					ide_dma_on(drive);
+ 			}
+ 		}
+ 
+@@ -861,13 +795,10 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
+ 		struct request *rq)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+-	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+- 		ide_task_t *args = rq->special;
+- 
+-		if (!args)
+-			goto done;
++	ide_task_t *task = rq->special;
+ 
+-		hwif->data_phase = args->data_phase;
++	if (task) {
++		hwif->data_phase = task->data_phase;
+ 
+ 		switch (hwif->data_phase) {
+ 		case TASKFILE_MULTI_OUT:
+@@ -880,57 +811,9 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
+ 			break;
+ 		}
+ 
+-		if (args->tf_out_flags.all != 0) 
+-			return flagged_taskfile(drive, args);
+-		return do_rw_taskfile(drive, args);
+-	} else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
+-		u8 *args = rq->buffer;
+- 
+-		if (!args)
+-			goto done;
+-#ifdef DEBUG
+- 		printk("%s: DRIVE_TASK_CMD ", drive->name);
+- 		printk("cmd=0x%02x ", args[0]);
+- 		printk("fr=0x%02x ", args[1]);
+- 		printk("ns=0x%02x ", args[2]);
+- 		printk("sc=0x%02x ", args[3]);
+- 		printk("lcyl=0x%02x ", args[4]);
+- 		printk("hcyl=0x%02x ", args[5]);
+- 		printk("sel=0x%02x\n", args[6]);
+-#endif
+- 		hwif->OUTB(args[1], IDE_FEATURE_REG);
+- 		hwif->OUTB(args[3], IDE_SECTOR_REG);
+- 		hwif->OUTB(args[4], IDE_LCYL_REG);
+- 		hwif->OUTB(args[5], IDE_HCYL_REG);
+- 		hwif->OUTB((args[6] & 0xEF)|drive->select.all, IDE_SELECT_REG);
+- 		ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
+- 		return ide_started;
+- 	} else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
+- 		u8 *args = rq->buffer;
+-
+-		if (!args)
+-			goto done;
+-#ifdef DEBUG
+- 		printk("%s: DRIVE_CMD ", drive->name);
+- 		printk("cmd=0x%02x ", args[0]);
+- 		printk("sc=0x%02x ", args[1]);
+- 		printk("fr=0x%02x ", args[2]);
+- 		printk("xx=0x%02x\n", args[3]);
+-#endif
+- 		if (args[0] == WIN_SMART) {
+- 			hwif->OUTB(0x4f, IDE_LCYL_REG);
+- 			hwif->OUTB(0xc2, IDE_HCYL_REG);
+- 			hwif->OUTB(args[2],IDE_FEATURE_REG);
+- 			hwif->OUTB(args[1],IDE_SECTOR_REG);
+- 			ide_cmd(drive, args[0], args[3], &drive_cmd_intr);
+- 			return ide_started;
+- 		}
+- 		hwif->OUTB(args[2],IDE_FEATURE_REG);
+- 		ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
+- 		return ide_started;
+- 	}
+-
+-done:
++		return do_rw_taskfile(drive, task);
 +	}
 +
-+	if ((dc->config_table.version > 0)
-+	    && (dc->config_table.toggle.enabled != TOGGLE_VALID)) {
-+		u32 offset = 0;
-+		DBG1("First phase: pushing upload buffers, clearing download");
-+
-+		dev_info(&dc->pdev->dev, "Version of card: %d\n",
-+			 dc->config_table.version);
+  	/*
+  	 * NULL is actually a valid way of waiting for
+  	 * all current requests to be flushed from the queue.
+@@ -970,8 +853,7 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
+ 		if (rc)
+ 			printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
+ 		SELECT_DRIVE(drive);
+-		if (IDE_CONTROL_REG)
+-			HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
++		ide_set_irq(drive, 1);
+ 		rc = ide_wait_not_busy(HWIF(drive), 100000);
+ 		if (rc)
+ 			printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
+@@ -1003,6 +885,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
+ 
+ 	/* bail early if we've exceeded max_failures */
+ 	if (drive->max_failures && (drive->failures > drive->max_failures)) {
++		rq->cmd_flags |= REQ_FAILED;
+ 		goto kill_rq;
+ 	}
+ 
+@@ -1034,9 +917,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
+ 		if (drive->current_speed == 0xff)
+ 			ide_config_drive_speed(drive, drive->desired_speed);
+ 
+-		if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
+-		    rq->cmd_type == REQ_TYPE_ATA_TASK ||
+-		    rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
++		if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+ 			return execute_drive_cmd(drive, rq);
+ 		else if (blk_pm_request(rq)) {
+ 			struct request_pm_state *pm = rq->data;
+@@ -1244,11 +1125,13 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
+ 		}
+ 	again:
+ 		hwif = HWIF(drive);
+-		if (hwgroup->hwif->sharing_irq &&
+-		    hwif != hwgroup->hwif &&
+-		    hwif->io_ports[IDE_CONTROL_OFFSET]) {
+-			/* set nIEN for previous hwif */
+-			SELECT_INTERRUPT(drive);
++		if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) {
++			/*
++			 * set nIEN for previous hwif, drives in the
++			 * quirk_list may not like intr setups/cleanups
++			 */
++			if (drive->quirk_list != 1)
++				ide_set_irq(drive, 0);
+ 		}
+ 		hwgroup->hwif = hwif;
+ 		hwgroup->drive = drive;
+@@ -1361,7 +1244,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
+ 	 */
+ 	drive->retry_pio++;
+ 	drive->state = DMA_PIO_RETRY;
+-	hwif->dma_off_quietly(drive);
++	ide_dma_off_quietly(drive);
+ 
+ 	/*
+ 	 * un-busy drive etc (hwgroup->busy is cleared on return) and
+@@ -1454,12 +1337,8 @@ void ide_timer_expiry (unsigned long data)
+ 			 */
+ 			spin_unlock(&ide_lock);
+ 			hwif  = HWIF(drive);
+-#if DISABLE_IRQ_NOSYNC
+-			disable_irq_nosync(hwif->irq);
+-#else
+ 			/* disable_irq_nosync ?? */
+ 			disable_irq(hwif->irq);
+-#endif /* DISABLE_IRQ_NOSYNC */
+ 			/* local CPU only,
+ 			 * as if we were handling an interrupt */
+ 			local_irq_disable();
+@@ -1710,7 +1589,6 @@ irqreturn_t ide_intr (int irq, void *dev_id)
+ void ide_init_drive_cmd (struct request *rq)
+ {
+ 	memset(rq, 0, sizeof(*rq));
+-	rq->cmd_type = REQ_TYPE_ATA_CMD;
+ 	rq->ref_count = 1;
+ }
+ 
+@@ -1785,3 +1663,19 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
+ }
+ 
+ EXPORT_SYMBOL(ide_do_drive_cmd);
 +
-+		/* Here we should disable all I/O over F32. */
-+		setup_memory(dc);
++void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
++{
++	ide_task_t task;
 +
-+		/*
-+		 * We should send ALL channel pair tokens back along
-+		 * with reset token
-+		 */
++	memset(&task, 0, sizeof(task));
++	task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
++			IDE_TFLAG_OUT_FEATURE | tf_flags;
++	task.tf.feature = dma;		/* Use PIO/DMA */
++	task.tf.lbam    = bcount & 0xff;
++	task.tf.lbah    = (bcount >> 8) & 0xff;
 +
-+		/* push upload modem buffers */
-+		write_mem32(dc->port[PORT_MDM].ul_addr[CH_A],
-+			(u32 *) &offset, 4);
-+		write_mem32(dc->port[PORT_MDM].ul_addr[CH_B],
-+			(u32 *) &offset, 4);
++	ide_tf_load(drive, &task);
++}
 +
-+		writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr);
++EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
+diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
+index bb9693d..e2a7e95 100644
+--- a/drivers/ide/ide-iops.c
++++ b/drivers/ide/ide-iops.c
+@@ -158,14 +158,6 @@ void default_hwif_mmiops (ide_hwif_t *hwif)
+ 
+ EXPORT_SYMBOL(default_hwif_mmiops);
+ 
+-u32 ide_read_24 (ide_drive_t *drive)
+-{
+-	u8 hcyl = HWIF(drive)->INB(IDE_HCYL_REG);
+-	u8 lcyl = HWIF(drive)->INB(IDE_LCYL_REG);
+-	u8 sect = HWIF(drive)->INB(IDE_SECTOR_REG);
+-	return (hcyl<<16)|(lcyl<<8)|sect;
+-}
+-
+ void SELECT_DRIVE (ide_drive_t *drive)
+ {
+ 	if (HWIF(drive)->selectproc)
+@@ -175,26 +167,12 @@ void SELECT_DRIVE (ide_drive_t *drive)
+ 
+ EXPORT_SYMBOL(SELECT_DRIVE);
+ 
+-void SELECT_INTERRUPT (ide_drive_t *drive)
+-{
+-	if (HWIF(drive)->intrproc)
+-		HWIF(drive)->intrproc(drive);
+-	else
+-		HWIF(drive)->OUTB(drive->ctl|2, IDE_CONTROL_REG);
+-}
+-
+ void SELECT_MASK (ide_drive_t *drive, int mask)
+ {
+ 	if (HWIF(drive)->maskproc)
+ 		HWIF(drive)->maskproc(drive, mask);
+ }
+ 
+-void QUIRK_LIST (ide_drive_t *drive)
+-{
+-	if (HWIF(drive)->quirkproc)
+-		drive->quirk_list = HWIF(drive)->quirkproc(drive);
+-}
+-
+ /*
+  * Some localbus EIDE interfaces require a special access sequence
+  * when using 32-bit I/O instructions to transfer data.  We call this
+@@ -449,7 +427,6 @@ int drive_is_ready (ide_drive_t *drive)
+ 	udelay(1);
+ #endif
+ 
+-#ifdef CONFIG_IDEPCI_SHARE_IRQ
+ 	/*
+ 	 * We do a passive status test under shared PCI interrupts on
+ 	 * cards that truly share the ATA side interrupt, but may also share
+@@ -459,7 +436,6 @@ int drive_is_ready (ide_drive_t *drive)
+ 	if (IDE_CONTROL_REG)
+ 		stat = hwif->INB(IDE_ALTSTATUS_REG);
+ 	else
+-#endif /* CONFIG_IDEPCI_SHARE_IRQ */
+ 		/* Note: this may clear a pending IRQ!! */
+ 		stat = hwif->INB(IDE_STATUS_REG);
+ 
+@@ -642,9 +618,9 @@ no_80w:
+ 
+ int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
+ {
+-	if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
+-	    (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
+-	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
++	if (args->tf.command == WIN_SETFEATURES &&
++	    args->tf.nsect > XFER_UDMA_2 &&
++	    args->tf.feature == SETFEATURES_XFER) {
+ 		if (eighty_ninty_three(drive) == 0) {
+ 			printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
+ 					    "be set\n", drive->name);
+@@ -662,9 +638,9 @@ int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
+  */
+ int set_transfer (ide_drive_t *drive, ide_task_t *args)
+ {
+-	if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
+-	    (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) &&
+-	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) &&
++	if (args->tf.command == WIN_SETFEATURES &&
++	    args->tf.nsect >= XFER_SW_DMA_0 &&
++	    args->tf.feature == SETFEATURES_XFER &&
+ 	    (drive->id->dma_ultra ||
+ 	     drive->id->dma_mword ||
+ 	     drive->id->dma_1word))
+@@ -712,8 +688,7 @@ int ide_driveid_update(ide_drive_t *drive)
+ 	 */
+ 
+ 	SELECT_MASK(drive, 1);
+-	if (IDE_CONTROL_REG)
+-		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
++	ide_set_irq(drive, 1);
+ 	msleep(50);
+ 	hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
+ 	timeout = jiffies + WAIT_WORSTCASE;
+@@ -766,8 +741,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
+ //		msleep(50);
+ 
+ #ifdef CONFIG_BLK_DEV_IDEDMA
+-	if (hwif->ide_dma_on)	/* check if host supports DMA */
+-		hwif->dma_host_off(drive);
++	if (hwif->dma_host_set)	/* check if host supports DMA */
++		hwif->dma_host_set(drive, 0);
+ #endif
+ 
+ 	/* Skip setting PIO flow-control modes on pre-EIDE drives */
+@@ -796,13 +771,12 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
+ 	SELECT_DRIVE(drive);
+ 	SELECT_MASK(drive, 0);
+ 	udelay(1);
+-	if (IDE_CONTROL_REG)
+-		hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
++	ide_set_irq(drive, 0);
+ 	hwif->OUTB(speed, IDE_NSECTOR_REG);
+ 	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
+ 	hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
+-	if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
+-		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
++	if (drive->quirk_list == 2)
++		ide_set_irq(drive, 1);
+ 
+ 	error = __ide_wait_stat(drive, drive->ready_stat,
+ 				BUSY_STAT|DRQ_STAT|ERR_STAT,
+@@ -823,10 +797,11 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
+ 
+  skip:
+ #ifdef CONFIG_BLK_DEV_IDEDMA
+-	if (speed >= XFER_SW_DMA_0)
+-		hwif->dma_host_on(drive);
+-	else if (hwif->ide_dma_on)	/* check if host supports DMA */
+-		hwif->dma_off_quietly(drive);
++	if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
++	    drive->using_dma)
++		hwif->dma_host_set(drive, 1);
++	else if (hwif->dma_host_set)	/* check if host supports DMA */
++		ide_dma_off_quietly(drive);
+ #endif
+ 
+ 	switch(speed) {
+@@ -902,8 +877,9 @@ EXPORT_SYMBOL(ide_set_handler);
+  *	handler and IRQ setup do not race. All IDE command kick off
+  *	should go via this function or do equivalent locking.
+  */
+- 
+-void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *handler, unsigned timeout, ide_expiry_t *expiry)
 +
-+		DBG1("First phase done");
++void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
++			 unsigned timeout, ide_expiry_t *expiry)
+ {
+ 	unsigned long flags;
+ 	ide_hwgroup_t *hwgroup = HWGROUP(drive);
+@@ -1035,10 +1011,10 @@ static void check_dma_crc(ide_drive_t *drive)
+ {
+ #ifdef CONFIG_BLK_DEV_IDEDMA
+ 	if (drive->crc_count) {
+-		drive->hwif->dma_off_quietly(drive);
++		ide_dma_off_quietly(drive);
+ 		ide_set_xfer_rate(drive, ide_auto_reduce_xfer(drive));
+ 		if (drive->current_speed >= XFER_SW_DMA_0)
+-			(void) HWIF(drive)->ide_dma_on(drive);
++			ide_dma_on(drive);
+ 	} else
+ 		ide_dma_off(drive);
+ #endif
+@@ -1051,8 +1027,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
+ 	drive->special.all = 0;
+ 	drive->special.b.set_geometry = legacy;
+ 	drive->special.b.recalibrate  = legacy;
+-	if (OK_TO_RESET_CONTROLLER)
+-		drive->mult_count = 0;
++	drive->mult_count = 0;
+ 	if (!drive->keep_settings && !drive->using_dma)
+ 		drive->mult_req = 0;
+ 	if (drive->mult_req != drive->mult_count)
+@@ -1137,7 +1112,6 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+ 	for (unit = 0; unit < MAX_DRIVES; ++unit)
+ 		pre_reset(&hwif->drives[unit]);
+ 
+-#if OK_TO_RESET_CONTROLLER
+ 	if (!IDE_CONTROL_REG) {
+ 		spin_unlock_irqrestore(&ide_lock, flags);
+ 		return ide_stopped;
+@@ -1174,11 +1148,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+ 	 * state when the disks are reset this way. At least, the Winbond
+ 	 * 553 documentation says that
+ 	 */
+-	if (hwif->resetproc != NULL) {
++	if (hwif->resetproc)
+ 		hwif->resetproc(drive);
+-	}
+-	
+-#endif	/* OK_TO_RESET_CONTROLLER */
+ 
+ 	spin_unlock_irqrestore(&ide_lock, flags);
+ 	return ide_started;
+diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
+index 062d3bc..9b44fbd 100644
+--- a/drivers/ide/ide-lib.c
++++ b/drivers/ide/ide-lib.c
+@@ -441,6 +441,12 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
+ 	 * case could happen iff the transfer mode has already been set on
+ 	 * the device by ide-proc.c::set_xfer_rate()).
+ 	 */
++	if (rate < XFER_PIO_0) {
++		if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE)
++			return ide_set_dma_mode(drive, rate);
++		else
++			return ide_config_drive_speed(drive, rate);
 +	}
+ 
+ 	return ide_set_dma_mode(drive, rate);
+ }
+@@ -448,8 +454,7 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
+ static void ide_dump_opcode(ide_drive_t *drive)
+ {
+ 	struct request *rq;
+-	u8 opcode = 0;
+-	int found = 0;
++	ide_task_t *task = NULL;
+ 
+ 	spin_lock(&ide_lock);
+ 	rq = NULL;
+@@ -458,164 +463,129 @@ static void ide_dump_opcode(ide_drive_t *drive)
+ 	spin_unlock(&ide_lock);
+ 	if (!rq)
+ 		return;
+-	if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
+-	    rq->cmd_type == REQ_TYPE_ATA_TASK) {
+-		char *args = rq->buffer;
+-		if (args) {
+-			opcode = args[0];
+-			found = 1;
+-		}
+-	} else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+-		ide_task_t *args = rq->special;
+-		if (args) {
+-			task_struct_t *tf = (task_struct_t *) args->tfRegister;
+-			opcode = tf->command;
+-			found = 1;
+-		}
+-	}
 +
-+	return 1;
++	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
++		task = rq->special;
+ 
+ 	printk("ide: failed opcode was: ");
+-	if (!found)
+-		printk("unknown\n");
++	if (task == NULL)
++		printk(KERN_CONT "unknown\n");
+ 	else
+-		printk("0x%02x\n", opcode);
++		printk(KERN_CONT "0x%02x\n", task->tf.command);
+ }
+ 
+-static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
++u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48)
+ {
+-	ide_hwif_t *hwif = HWIF(drive);
+-	unsigned long flags;
+-	u8 err = 0;
++	u32 high, low;
+ 
+-	local_irq_save(flags);
+-	printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
+-	if (stat & BUSY_STAT)
+-		printk("Busy ");
+-	else {
+-		if (stat & READY_STAT)	printk("DriveReady ");
+-		if (stat & WRERR_STAT)	printk("DeviceFault ");
+-		if (stat & SEEK_STAT)	printk("SeekComplete ");
+-		if (stat & DRQ_STAT)	printk("DataRequest ");
+-		if (stat & ECC_STAT)	printk("CorrectedError ");
+-		if (stat & INDEX_STAT)	printk("Index ");
+-		if (stat & ERR_STAT)	printk("Error ");
++	if (lba48)
++		high = (tf->hob_lbah << 16) | (tf->hob_lbam << 8) |
++			tf->hob_lbal;
++	else
++		high = tf->device & 0xf;
++	low  = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
++
++	return ((u64)high << 24) | low;
 +}
++EXPORT_SYMBOL_GPL(ide_get_lba_addr);
 +
-+/* Enable uplink interrupts  */
-+static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
++static void ide_dump_sector(ide_drive_t *drive)
 +{
-+	u16 mask[NOZOMI_MAX_PORTS] = \
-+			{MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
++	ide_task_t task;
++	struct ide_taskfile *tf = &task.tf;
++	int lba48 = (drive->addressing == 1) ? 1 : 0;
 +
-+	if (port < NOZOMI_MAX_PORTS) {
-+		dc->last_ier |= mask[port];
-+		writew(dc->last_ier, dc->reg_ier);
-+	} else {
-+		dev_err(&dc->pdev->dev, "Called with wrong port?\n");
-+	}
-+}
++	memset(&task, 0, sizeof(task));
++	if (lba48)
++		task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_HOB_LBA |
++				IDE_TFLAG_LBA48;
++	else
++		task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
 +
-+/* Disable uplink interrupts  */
-+static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
-+{
-+	u16 mask[NOZOMI_MAX_PORTS] = \
-+			{~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
++	ide_tf_read(drive, &task);
 +
-+	if (port < NOZOMI_MAX_PORTS) {
-+		dc->last_ier &= mask[port];
-+		writew(dc->last_ier, dc->reg_ier);
-+	} else {
-+		dev_err(&dc->pdev->dev, "Called with wrong port?\n");
-+	}
++	if (lba48 || (tf->device & ATA_LBA))
++		printk(", LBAsect=%llu",
++			(unsigned long long)ide_get_lba_addr(tf, lba48));
++	else
++		printk(", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
++					 tf->device & 0xf, tf->lbal);
 +}
 +
-+/* Enable downlink interrupts */
-+static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
++static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
 +{
-+	u16 mask[NOZOMI_MAX_PORTS] = \
-+			{MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
-+
-+	if (port < NOZOMI_MAX_PORTS) {
-+		dc->last_ier |= mask[port];
-+		writew(dc->last_ier, dc->reg_ier);
-+	} else {
-+		dev_err(&dc->pdev->dev, "Called with wrong port?\n");
-+	}
++	printk("{ ");
++	if (err & ABRT_ERR)	printk("DriveStatusError ");
++	if (err & ICRC_ERR)
++		printk((err & ABRT_ERR) ? "BadCRC " : "BadSector ");
++	if (err & ECC_ERR)	printk("UncorrectableError ");
++	if (err & ID_ERR)	printk("SectorIdNotFound ");
++	if (err & TRK0_ERR)	printk("TrackZeroNotFound ");
++	if (err & MARK_ERR)	printk("AddrMarkNotFound ");
++	printk("}");
++	if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR ||
++	    (err & (ECC_ERR|ID_ERR|MARK_ERR))) {
++		ide_dump_sector(drive);
++		if (HWGROUP(drive) && HWGROUP(drive)->rq)
++			printk(", sector=%llu",
++			       (unsigned long long)HWGROUP(drive)->rq->sector);
+ 	}
++	printk("\n");
 +}
 +
-+/* Disable downlink interrupts */
-+static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
++static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
 +{
-+	u16 mask[NOZOMI_MAX_PORTS] = \
-+			{~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
++	printk("{ ");
++	if (err & ILI_ERR)	printk("IllegalLengthIndication ");
++	if (err & EOM_ERR)	printk("EndOfMedia ");
++	if (err & ABRT_ERR)	printk("AbortedCommand ");
++	if (err & MCR_ERR)	printk("MediaChangeRequested ");
++	if (err & LFS_ERR)	printk("LastFailedSense=0x%02x ",
++				       (err & LFS_ERR) >> 4);
+ 	printk("}\n");
+-	if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
+-		err = hwif->INB(IDE_ERROR_REG);
+-		printk("%s: %s: error=0x%02x { ", drive->name, msg, err);
+-		if (err & ABRT_ERR)	printk("DriveStatusError ");
+-		if (err & ICRC_ERR)
+-			printk((err & ABRT_ERR) ? "BadCRC " : "BadSector ");
+-		if (err & ECC_ERR)	printk("UncorrectableError ");
+-		if (err & ID_ERR)	printk("SectorIdNotFound ");
+-		if (err & TRK0_ERR)	printk("TrackZeroNotFound ");
+-		if (err & MARK_ERR)	printk("AddrMarkNotFound ");
+-		printk("}");
+-		if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR ||
+-		    (err & (ECC_ERR|ID_ERR|MARK_ERR))) {
+-			if (drive->addressing == 1) {
+-				__u64 sectors = 0;
+-				u32 low = 0, high = 0;
+-				hwif->OUTB(drive->ctl&~0x80, IDE_CONTROL_REG);
+-				low = ide_read_24(drive);
+-				hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
+-				high = ide_read_24(drive);
+-				sectors = ((__u64)high << 24) | low;
+-				printk(", LBAsect=%llu, high=%d, low=%d",
+-				       (unsigned long long) sectors,
+-				       high, low);
+-			} else {
+-				u8 cur = hwif->INB(IDE_SELECT_REG);
+-				if (cur & 0x40) {	/* using LBA? */
+-					printk(", LBAsect=%ld", (unsigned long)
+-					 ((cur&0xf)<<24)
+-					 |(hwif->INB(IDE_HCYL_REG)<<16)
+-					 |(hwif->INB(IDE_LCYL_REG)<<8)
+-					 | hwif->INB(IDE_SECTOR_REG));
+-				} else {
+-					printk(", CHS=%d/%d/%d",
+-					 (hwif->INB(IDE_HCYL_REG)<<8) +
+-					  hwif->INB(IDE_LCYL_REG),
+-					  cur & 0xf,
+-					  hwif->INB(IDE_SECTOR_REG));
+-				}
+-			}
+-			if (HWGROUP(drive) && HWGROUP(drive)->rq)
+-				printk(", sector=%llu",
+-					(unsigned long long)HWGROUP(drive)->rq->sector);
+-		}
+-		printk("\n");
+-	}
+-	ide_dump_opcode(drive);
+-	local_irq_restore(flags);
+-	return err;
+ }
+ 
+ /**
+- *	ide_dump_atapi_status       -       print human readable atapi status
++ *	ide_dump_status		-	translate ATA/ATAPI error
+  *	@drive: drive that status applies to
+  *	@msg: text message to print
+  *	@stat: status byte to decode
+  *
+  *	Error reporting, in human readable form (luxurious, but a memory hog).
++ *	Combines the drive name, message and status byte to provide a
++ *	user understandable explanation of the device error.
+  */
+ 
+-static u8 ide_dump_atapi_status(ide_drive_t *drive, const char *msg, u8 stat)
++u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
+ {
+ 	unsigned long flags;
++	u8 err = 0;
+ 
+-	atapi_status_t status;
+-	atapi_error_t error;
+-
+-	status.all = stat;
+-	error.all = 0;
+ 	local_irq_save(flags);
+ 	printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
+-	if (status.b.bsy)
++	if (stat & BUSY_STAT)
+ 		printk("Busy ");
+ 	else {
+-		if (status.b.drdy)	printk("DriveReady ");
+-		if (status.b.df)	printk("DeviceFault ");
+-		if (status.b.dsc)	printk("SeekComplete ");
+-		if (status.b.drq)	printk("DataRequest ");
+-		if (status.b.corr)	printk("CorrectedError ");
+-		if (status.b.idx)	printk("Index ");
+-		if (status.b.check)	printk("Error ");
++		if (stat & READY_STAT)	printk("DriveReady ");
++		if (stat & WRERR_STAT)	printk("DeviceFault ");
++		if (stat & SEEK_STAT)	printk("SeekComplete ");
++		if (stat & DRQ_STAT)	printk("DataRequest ");
++		if (stat & ECC_STAT)	printk("CorrectedError ");
++		if (stat & INDEX_STAT)	printk("Index ");
++		if (stat & ERR_STAT)	printk("Error ");
+ 	}
+ 	printk("}\n");
+-	if (status.b.check && !status.b.bsy) {
+-		error.all = HWIF(drive)->INB(IDE_ERROR_REG);
+-		printk("%s: %s: error=0x%02x { ", drive->name, msg, error.all);
+-		if (error.b.ili)	printk("IllegalLengthIndication ");
+-		if (error.b.eom)	printk("EndOfMedia ");
+-		if (error.b.abrt)	printk("AbortedCommand ");
+-		if (error.b.mcr)	printk("MediaChangeRequested ");
+-		if (error.b.sense_key)	printk("LastFailedSense=0x%02x ",
+-						error.b.sense_key);
+-		printk("}\n");
++	if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
++		err = drive->hwif->INB(IDE_ERROR_REG);
++		printk("%s: %s: error=0x%02x ", drive->name, msg, err);
++		if (drive->media == ide_disk)
++			ide_dump_ata_error(drive, err);
++		else
++			ide_dump_atapi_error(drive, err);
+ 	}
+ 	ide_dump_opcode(drive);
+ 	local_irq_restore(flags);
+-	return error.all;
+-}
+-
+-/**
+- *	ide_dump_status		-	translate ATA/ATAPI error
+- *	@drive: drive the error occured on
+- *	@msg: information string
+- *	@stat: status byte
+- *
+- *	Error reporting, in human readable form (luxurious, but a memory hog).
+- *	Combines the drive name, message and status byte to provide a
+- *	user understandable explanation of the device error.
+- */
+-
+-u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
+-{
+-	if (drive->media == ide_disk)
+-		return ide_dump_ata_status(drive, msg, stat);
+-	return ide_dump_atapi_status(drive, msg, stat);
++	return err;
+ }
+ 
+ EXPORT_SYMBOL(ide_dump_status);
+diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
+index e245521..cbbb0f7 100644
+--- a/drivers/ide/ide-pnp.c
++++ b/drivers/ide/ide-pnp.c
+@@ -31,7 +31,6 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
+ {
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int index;
+ 
+ 	if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
+ 		return -1;
+@@ -41,11 +40,19 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
+ 				pnp_port_start(dev, 1));
+ 	hw.irq = pnp_irq(dev, 0);
+ 
+-	index = ide_register_hw(&hw, NULL, 1, &hwif);
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		u8 index = hwif->index;
++		u8 idx[4] = { index, 0xff, 0xff, 0xff };
 +
-+	if (port < NOZOMI_MAX_PORTS) {
-+		dc->last_ier &= mask[port];
-+		writew(dc->last_ier, dc->reg_ier);
-+	} else {
-+		dev_err(&dc->pdev->dev, "Called with wrong port?\n");
-+	}
-+}
++		ide_init_port_data(hwif, index);
++		ide_init_port_hw(hwif, &hw);
+ 
+-	if (index != -1) {
+-	    	printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
++		printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
+ 		pnp_set_drvdata(dev,hwif);
 +
-+/*
-+ * Return 1 - send buffer to card and ack.
-+ * Return 0 - don't ack, don't send buffer to card.
-+ */
-+static int send_data(enum port_type index, struct nozomi *dc)
-+{
-+	u32 size = 0;
-+	struct port *port = &dc->port[index];
-+	u8 toggle = port->toggle_ul;
-+	void __iomem *addr = port->ul_addr[toggle];
-+	u32 ul_size = port->ul_size[toggle];
-+	struct tty_struct *tty = port->tty;
++		ide_device_add(idx);
 +
-+	/* Get data from tty and place in buf for now */
-+	size = __kfifo_get(port->fifo_ul, dc->send_buf,
-+			   ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
+ 		return 0;
+ 	}
+ 
+@@ -68,12 +75,15 @@ static struct pnp_driver idepnp_driver = {
+ 	.remove		= idepnp_remove,
+ };
+ 
+-void __init pnpide_init(void)
++static int __init pnpide_init(void)
+ {
+-	pnp_register_driver(&idepnp_driver);
++	return pnp_register_driver(&idepnp_driver);
+ }
+ 
+-void __exit pnpide_exit(void)
++static void __exit pnpide_exit(void)
+ {
+ 	pnp_unregister_driver(&idepnp_driver);
+ }
 +
-+	if (size == 0) {
-+		DBG4("No more data to send, disable link:");
-+		return 0;
-+	}
++module_init(pnpide_init);
++module_exit(pnpide_exit);
+diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
+index 2994523..edf650b 100644
+--- a/drivers/ide/ide-probe.c
++++ b/drivers/ide/ide-probe.c
+@@ -95,10 +95,10 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
+ #ifdef CONFIG_IDEDISK_MULTI_MODE
+ 		id->multsect = ((id->max_multsect/2) > 1) ? id->max_multsect : 0;
+ 		id->multsect_valid = id->multsect ? 1 : 0;
+-		drive->mult_req = id->multsect_valid ? id->max_multsect : INITIAL_MULT_COUNT;
++		drive->mult_req = id->multsect_valid ? id->max_multsect : 0;
+ 		drive->special.b.set_multmode = drive->mult_req ? 1 : 0;
+ #else	/* original, pre IDE-NFG, per request of AC */
+-		drive->mult_req = INITIAL_MULT_COUNT;
++		drive->mult_req = 0;
+ 		if (drive->mult_req > id->max_multsect)
+ 			drive->mult_req = id->max_multsect;
+ 		if (drive->mult_req || ((id->multsect_valid & 1) && id->multsect))
+@@ -234,7 +234,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
+ 
+ 	drive->media = ide_disk;
+ 	printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" );
+-	QUIRK_LIST(drive);
 +
-+	/* DUMP(buf, size); */
+ 	return;
+ 
+ err_misc:
+@@ -350,22 +350,19 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
+ 	 * the irq handler isn't expecting.
+ 	 */
+ 	if (IDE_CONTROL_REG) {
+-		u8 ctl = drive->ctl | 2;
+ 		if (!hwif->irq) {
+ 			autoprobe = 1;
+ 			cookie = probe_irq_on();
+-			/* enable device irq */
+-			ctl &= ~2;
+ 		}
+-		hwif->OUTB(ctl, IDE_CONTROL_REG);
++		ide_set_irq(drive, autoprobe);
+ 	}
+ 
+ 	retval = actual_try_to_identify(drive, cmd);
+ 
+ 	if (autoprobe) {
+ 		int irq;
+-		/* mask device irq */
+-		hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG);
 +
-+	/* Write length + data */
-+	write_mem32(addr, (u32 *) &size, 4);
-+	write_mem32(addr + 4, (u32 *) dc->send_buf, size);
++		ide_set_irq(drive, 0);
+ 		/* clear drive IRQ */
+ 		(void) hwif->INB(IDE_STATUS_REG);
+ 		udelay(5);
+@@ -385,6 +382,20 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
+ 	return retval;
+ }
+ 
++static int ide_busy_sleep(ide_hwif_t *hwif)
++{
++	unsigned long timeout = jiffies + WAIT_WORSTCASE;
++	u8 stat;
 +
-+	if (tty)
-+		tty_wakeup(tty);
++	do {
++		msleep(50);
++		stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
++		if ((stat & BUSY_STAT) == 0)
++			return 0;
++	} while (time_before(jiffies, timeout));
 +
 +	return 1;
 +}
+ 
+ /**
+  *	do_probe		-	probe an IDE device
+@@ -453,7 +464,6 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
+ 		if ((rc == 1 && cmd == WIN_PIDENTIFY) &&
+ 			((drive->autotune == IDE_TUNE_DEFAULT) ||
+ 			(drive->autotune == IDE_TUNE_AUTO))) {
+-			unsigned long timeout;
+ 			printk("%s: no response (status = 0x%02x), "
+ 				"resetting drive\n", drive->name,
+ 				hwif->INB(IDE_STATUS_REG));
+@@ -461,10 +471,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
+ 			hwif->OUTB(drive->select.all, IDE_SELECT_REG);
+ 			msleep(50);
+ 			hwif->OUTB(WIN_SRST, IDE_COMMAND_REG);
+-			timeout = jiffies;
+-			while (((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) &&
+-			       time_before(jiffies, timeout + WAIT_WORSTCASE))
+-				msleep(50);
++			(void)ide_busy_sleep(hwif);
+ 			rc = try_to_identify(drive, cmd);
+ 		}
+ 		if (rc == 1)
+@@ -492,20 +499,16 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
+ static void enable_nest (ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+-	unsigned long timeout;
+ 
+ 	printk("%s: enabling %s -- ", hwif->name, drive->id->model);
+ 	SELECT_DRIVE(drive);
+ 	msleep(50);
+ 	hwif->OUTB(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG);
+-	timeout = jiffies + WAIT_WORSTCASE;
+-	do {
+-		if (time_after(jiffies, timeout)) {
+-			printk("failed (timeout)\n");
+-			return;
+-		}
+-		msleep(50);
+-	} while ((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT);
 +
-+/* If all data has been read, return 1, else 0 */
-+static int receive_data(enum port_type index, struct nozomi *dc)
-+{
-+	u8 buf[RECEIVE_BUF_MAX] = { 0 };
-+	int size;
-+	u32 offset = 4;
-+	struct port *port = &dc->port[index];
-+	void __iomem *addr = port->dl_addr[port->toggle_dl];
-+	struct tty_struct *tty = port->tty;
-+	int i;
-+
-+	if (unlikely(!tty)) {
-+		DBG1("tty not open for port: %d?", index);
-+		return 1;
++	if (ide_busy_sleep(hwif)) {
++		printk(KERN_CONT "failed (timeout)\n");
++		return;
 +	}
+ 
+ 	msleep(50);
+ 
+@@ -653,8 +656,7 @@ static int wait_hwif_ready(ide_hwif_t *hwif)
+ 		/* Ignore disks that we will not probe for later. */
+ 		if (!drive->noprobe || drive->present) {
+ 			SELECT_DRIVE(drive);
+-			if (IDE_CONTROL_REG)
+-				hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
++			ide_set_irq(drive, 1);
+ 			mdelay(2);
+ 			rc = ide_wait_not_busy(hwif, 35000);
+ 			if (rc)
+@@ -673,19 +675,18 @@ out:
+ 
+ /**
+  *	ide_undecoded_slave	-	look for bad CF adapters
+- *	@hwif: interface
++ *	@drive1: drive
+  *
+  *	Analyse the drives on the interface and attempt to decide if we
+  *	have the same drive viewed twice. This occurs with crap CF adapters
+  *	and PCMCIA sometimes.
+  */
+ 
+-void ide_undecoded_slave(ide_hwif_t *hwif)
++void ide_undecoded_slave(ide_drive_t *drive1)
+ {
+-	ide_drive_t *drive0 = &hwif->drives[0];
+-	ide_drive_t *drive1 = &hwif->drives[1];
++	ide_drive_t *drive0 = &drive1->hwif->drives[0];
+ 
+-	if (drive0->present == 0 || drive1->present == 0)
++	if ((drive1->dn & 1) == 0 || drive0->present == 0)
+ 		return;
+ 
+ 	/* If the models don't match they are not the same product */
+@@ -788,18 +789,11 @@ static void probe_hwif(ide_hwif_t *hwif)
+ 		}
+ 	}
+ 	if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) {
+-		unsigned long timeout = jiffies + WAIT_WORSTCASE;
+-		u8 stat;
+-
+ 		printk(KERN_WARNING "%s: reset\n", hwif->name);
+ 		hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ 		udelay(10);
+ 		hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
+-		do {
+-			msleep(50);
+-			stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+-		} while ((stat & BUSY_STAT) && time_after(timeout, jiffies));
+-
++		(void)ide_busy_sleep(hwif);
+ 	}
+ 	local_irq_restore(flags);
+ 	/*
+@@ -814,8 +808,12 @@ static void probe_hwif(ide_hwif_t *hwif)
+ 		return;
+ 	}
+ 
+-	if (hwif->fixup)
+-		hwif->fixup(hwif);
++	for (unit = 0; unit < MAX_DRIVES; unit++) {
++		ide_drive_t *drive = &hwif->drives[unit];
 +
-+	read_mem32((u32 *) &size, addr, 4);
-+	/*  DBG1( "%d bytes port: %d", size, index); */
-+
-+	if (test_bit(TTY_THROTTLED, &tty->flags)) {
-+		DBG1("No room in tty, don't read data, don't ack interrupt, "
-+			"disable interrupt");
-+
-+		/* disable interrupt in downlink... */
-+		disable_transmit_dl(index, dc);
-+		return 0;
++		if (drive->present && hwif->quirkproc)
++			hwif->quirkproc(drive);
 +	}
+ 
+ 	for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ 		ide_drive_t *drive = &hwif->drives[unit];
+@@ -830,16 +828,8 @@ static void probe_hwif(ide_hwif_t *hwif)
+ 
+ 			drive->nice1 = 1;
+ 
+-			if (hwif->ide_dma_on) {
+-				/*
+-				 * Force DMAing for the beginning of the check.
+-				 * Some chipsets appear to do interesting
+-				 * things, if not checked and cleared.
+-				 *   PARANOIA!!!
+-				 */
+-				hwif->dma_off_quietly(drive);
++			if (hwif->dma_host_set)
+ 				ide_set_dma(drive);
+-			}
+ 		}
+ 	}
+ 
+@@ -853,25 +843,6 @@ static void probe_hwif(ide_hwif_t *hwif)
+ 	}
+ }
+ 
+-static int hwif_init(ide_hwif_t *hwif);
+-static void hwif_register_devices(ide_hwif_t *hwif);
+-
+-static int probe_hwif_init(ide_hwif_t *hwif)
+-{
+-	probe_hwif(hwif);
+-
+-	if (!hwif_init(hwif)) {
+-		printk(KERN_INFO "%s: failed to initialize IDE interface\n",
+-				 hwif->name);
+-		return -1;
+-	}
+-
+-	if (hwif->present)
+-		hwif_register_devices(hwif);
+-
+-	return 0;
+-}
+-
+ #if MAX_HWIFS > 1
+ /*
+  * save_match() is used to simplify logic in init_irq() below.
+@@ -968,11 +939,6 @@ static int ide_init_queue(ide_drive_t *drive)
+  * Much of the code is for correctly detecting/handling irq sharing
+  * and irq serialization situations.  This is somewhat complex because
+  * it handles static as well as dynamic (PCMCIA) IDE interfaces.
+- *
+- * The IRQF_DISABLED in sa_flags means ide_intr() is always entered with
+- * interrupts completely disabled.  This can be bad for interrupt latency,
+- * but anything else has led to problems on some machines.  We re-enable
+- * interrupts as much as we can safely do in most places.
+  */
+ static int init_irq (ide_hwif_t *hwif)
+ {
+@@ -1055,17 +1021,13 @@ static int init_irq (ide_hwif_t *hwif)
+ 	 * Allocate the irq, if not already obtained for another hwif
+ 	 */
+ 	if (!match || match->irq != hwif->irq) {
+-		int sa = IRQF_DISABLED;
++		int sa = 0;
+ #if defined(__mc68000__) || defined(CONFIG_APUS)
+ 		sa = IRQF_SHARED;
+ #endif /* __mc68000__ || CONFIG_APUS */
+ 
+-		if (IDE_CHIPSET_IS_PCI(hwif->chipset)) {
++		if (IDE_CHIPSET_IS_PCI(hwif->chipset))
+ 			sa = IRQF_SHARED;
+-#ifndef CONFIG_IDEPCI_SHARE_IRQ
+-			sa |= IRQF_DISABLED;
+-#endif /* CONFIG_IDEPCI_SHARE_IRQ */
+-		}
+ 
+ 		if (hwif->io_ports[IDE_CONTROL_OFFSET])
+ 			/* clear nIEN */
+@@ -1173,7 +1135,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
+ {
+ 	struct gendisk *p = data;
+ 	*part &= (1 << PARTN_BITS) - 1;
+-	return &p->kobj;
++	return &p->dev.kobj;
+ }
+ 
+ static int exact_lock(dev_t dev, void *data)
+@@ -1373,54 +1335,63 @@ static void hwif_register_devices(ide_hwif_t *hwif)
+ 	}
+ }
+ 
+-int ideprobe_init (void)
++int ide_device_add_all(u8 *idx)
+ {
+-	unsigned int index;
+-	int probe[MAX_HWIFS];
+-
+-	memset(probe, 0, MAX_HWIFS * sizeof(int));
+-	for (index = 0; index < MAX_HWIFS; ++index)
+-		probe[index] = !ide_hwifs[index].present;
+-
+-	for (index = 0; index < MAX_HWIFS; ++index)
+-		if (probe[index])
+-			probe_hwif(&ide_hwifs[index]);
+-	for (index = 0; index < MAX_HWIFS; ++index)
+-		if (probe[index])
+-			hwif_init(&ide_hwifs[index]);
+-	for (index = 0; index < MAX_HWIFS; ++index) {
+-		if (probe[index]) {
+-			ide_hwif_t *hwif = &ide_hwifs[index];
+-			if (!hwif->present)
+-				continue;
+-			if (hwif->chipset == ide_unknown || hwif->chipset == ide_forced)
+-				hwif->chipset = ide_generic;
+-			hwif_register_devices(hwif);
++	ide_hwif_t *hwif;
++	int i, rc = 0;
 +
-+	if (unlikely(size == 0)) {
-+		dev_err(&dc->pdev->dev, "size == 0?\n");
-+		return 1;
++	for (i = 0; i < MAX_HWIFS; i++) {
++		if (idx[i] == 0xff)
++			continue;
++
++		probe_hwif(&ide_hwifs[idx[i]]);
 +	}
 +
-+	tty_buffer_request_room(tty, size);
++	for (i = 0; i < MAX_HWIFS; i++) {
++		if (idx[i] == 0xff)
++			continue;
 +
-+	while (size > 0) {
-+		read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
++		hwif = &ide_hwifs[idx[i]];
 +
-+		if (size == 1) {
-+			tty_insert_flip_char(tty, buf[0], TTY_NORMAL);
-+			size = 0;
-+		} else if (size < RECEIVE_BUF_MAX) {
-+			size -= tty_insert_flip_string(tty, (char *) buf, size);
-+		} else {
-+			i = tty_insert_flip_string(tty, \
-+						(char *) buf, RECEIVE_BUF_MAX);
-+			size -= i;
-+			offset += i;
++		if (hwif_init(hwif) == 0) {
++			printk(KERN_INFO "%s: failed to initialize IDE "
++					 "interface\n", hwif->name);
++			rc = -1;
++			continue;
+ 		}
+ 	}
+-	for (index = 0; index < MAX_HWIFS; ++index)
+-		if (probe[index])
+-			ide_proc_register_port(&ide_hwifs[index]);
+-	return 0;
+-}
+ 
+-EXPORT_SYMBOL_GPL(ideprobe_init);
++	for (i = 0; i < MAX_HWIFS; i++) {
++		if (idx[i] == 0xff)
++			continue;
+ 
+-int ide_device_add(u8 idx[4])
+-{
+-	int i, rc = 0;
++		hwif = &ide_hwifs[idx[i]];
+ 
+-	for (i = 0; i < 4; i++) {
+-		if (idx[i] != 0xff)
+-			rc |= probe_hwif_init(&ide_hwifs[idx[i]]);
++		if (hwif->present) {
++			if (hwif->chipset == ide_unknown ||
++			    hwif->chipset == ide_forced)
++				hwif->chipset = ide_generic;
++			hwif_register_devices(hwif);
 +		}
-+	}
-+
-+	set_bit(index, &dc->flip);
-+
-+	return 1;
-+}
+ 	}
+ 
+-	for (i = 0; i < 4; i++) {
++	for (i = 0; i < MAX_HWIFS; i++) {
+ 		if (idx[i] != 0xff)
+ 			ide_proc_register_port(&ide_hwifs[idx[i]]);
+ 	}
+ 
+ 	return rc;
+ }
++EXPORT_SYMBOL_GPL(ide_device_add_all);
 +
-+/* Debug for interrupts */
-+#ifdef DEBUG
-+static char *interrupt2str(u16 interrupt)
++int ide_device_add(u8 idx[4])
 +{
-+	static char buf[TMP_BUF_MAX];
-+	char *p = buf;
-+
-+	interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL;
-+	interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"MDM_DL2 ") : NULL;
-+
-+	interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"MDM_UL1 ") : NULL;
-+	interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"MDM_UL2 ") : NULL;
-+
-+	interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"DIAG_DL1 ") : NULL;
-+	interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"DIAG_DL2 ") : NULL;
-+
-+	interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"DIAG_UL ") : NULL;
-+
-+	interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"APP1_DL ") : NULL;
-+	interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"APP2_DL ") : NULL;
-+
-+	interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"APP1_UL ") : NULL;
-+	interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"APP2_UL ") : NULL;
-+
-+	interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"CTRL_DL ") : NULL;
-+	interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"CTRL_UL ") : NULL;
-+
-+	interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
-+					"RESET ") : NULL;
++	u8 idx_all[MAX_HWIFS];
++	int i;
+ 
++	for (i = 0; i < MAX_HWIFS; i++)
++		idx_all[i] = (i < 4) ? idx[i] : 0xff;
 +
-+	return buf;
++	return ide_device_add_all(idx_all);
 +}
-+#endif
+ EXPORT_SYMBOL_GPL(ide_device_add);
+diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
+index a4007d3..aa663e7 100644
+--- a/drivers/ide/ide-proc.c
++++ b/drivers/ide/ide-proc.c
+@@ -346,14 +346,20 @@ static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int va
+ 
+ static int set_xfer_rate (ide_drive_t *drive, int arg)
+ {
++	ide_task_t task;
+ 	int err;
+ 
+ 	if (arg < 0 || arg > 70)
+ 		return -EINVAL;
+ 
+-	err = ide_wait_cmd(drive,
+-			WIN_SETFEATURES, (u8) arg,
+-			SETFEATURES_XFER, 0, NULL);
++	memset(&task, 0, sizeof(task));
++	task.tf.command = WIN_SETFEATURES;
++	task.tf.feature = SETFEATURES_XFER;
++	task.tf.nsect   = (u8)arg;
++	task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT |
++			IDE_TFLAG_IN_NSECT;
 +
++	err = ide_no_data_taskfile(drive, &task);
+ 
+ 	if (!err && arg) {
+ 		ide_set_xfer_rate(drive, (u8) arg);
+diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
+new file mode 100644
+index 0000000..7ffa332
+--- /dev/null
++++ b/drivers/ide/ide-scan-pci.c
+@@ -0,0 +1,121 @@
 +/*
-+ * Receive flow control
-+ * Return 1 - If ok, else 0
++ * support for probing IDE PCI devices in the PCI bus order
++ *
++ * Copyright (c) 1998-2000  Andre Hedrick <andre at linux-ide.org>
++ * Copyright (c) 1995-1998  Mark Lord
++ *
++ * May be copied or modified under the terms of the GNU General Public License
 + */
-+static int receive_flow_control(struct nozomi *dc)
-+{
-+	enum port_type port = PORT_MDM;
-+	struct ctrl_dl ctrl_dl;
-+	struct ctrl_dl old_ctrl;
-+	u16 enable_ier = 0;
-+
-+	read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2);
-+
-+	switch (ctrl_dl.port) {
-+	case CTRL_CMD:
-+		DBG1("The Base Band sends this value as a response to a "
-+			"request for IMSI detach sent over the control "
-+			"channel uplink (see section 7.6.1).");
-+		break;
-+	case CTRL_MDM:
-+		port = PORT_MDM;
-+		enable_ier = MDM_DL;
-+		break;
-+	case CTRL_DIAG:
-+		port = PORT_DIAG;
-+		enable_ier = DIAG_DL;
-+		break;
-+	case CTRL_APP1:
-+		port = PORT_APP1;
-+		enable_ier = APP1_DL;
-+		break;
-+	case CTRL_APP2:
-+		port = PORT_APP2;
-+		enable_ier = APP2_DL;
-+		break;
-+	default:
-+		dev_err(&dc->pdev->dev,
-+			"ERROR: flow control received for non-existing port\n");
-+		return 0;
-+	};
-+
-+	DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl),
-+	   *((u16 *)&ctrl_dl));
-+
-+	old_ctrl = dc->port[port].ctrl_dl;
-+	dc->port[port].ctrl_dl = ctrl_dl;
 +
-+	if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) {
-+		DBG1("Disable interrupt (0x%04X) on port: %d",
-+			enable_ier, port);
-+		disable_transmit_ul(port, dc);
-+
-+	} else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
-+
-+		if (__kfifo_len(dc->port[port].fifo_ul)) {
-+			DBG1("Enable interrupt (0x%04X) on port: %d",
-+				enable_ier, port);
-+			DBG1("Data in buffer [%d], enable transmit! ",
-+				__kfifo_len(dc->port[port].fifo_ul));
-+			enable_transmit_ul(port, dc);
-+		} else {
-+			DBG1("No data in buffer...");
-+		}
-+	}
-+
-+	if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) {
-+		DBG1(" No change in mctrl");
-+		return 1;
-+	}
-+	/* Update statistics */
-+	if (old_ctrl.CTS != ctrl_dl.CTS)
-+		dc->port[port].tty_icount.cts++;
-+	if (old_ctrl.DSR != ctrl_dl.DSR)
-+		dc->port[port].tty_icount.dsr++;
-+	if (old_ctrl.RI != ctrl_dl.RI)
-+		dc->port[port].tty_icount.rng++;
-+	if (old_ctrl.DCD != ctrl_dl.DCD)
-+		dc->port[port].tty_icount.dcd++;
-+
-+	wake_up_interruptible(&dc->port[port].tty_wait);
-+
-+	DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)",
-+	   port,
-+	   dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts,
-+	   dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr);
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/ide.h>
 +
-+	return 1;
-+}
++/*
++ *	Module interfaces
++ */
 +
-+static enum ctrl_port_type port2ctrl(enum port_type port,
-+					const struct nozomi *dc)
-+{
-+	switch (port) {
-+	case PORT_MDM:
-+		return CTRL_MDM;
-+	case PORT_DIAG:
-+		return CTRL_DIAG;
-+	case PORT_APP1:
-+		return CTRL_APP1;
-+	case PORT_APP2:
-+		return CTRL_APP2;
-+	default:
-+		dev_err(&dc->pdev->dev,
-+			"ERROR: send flow control " \
-+			"received for non-existing port\n");
-+	};
-+	return CTRL_ERROR;
-+}
++static int pre_init = 1;		/* Before first ordered IDE scan */
++static LIST_HEAD(ide_pci_drivers);
 +
 +/*
-+ * Send flow control, can only update one channel at a time
-+ * Return 0 - If we have updated all flow control
-+ * Return 1 - If we need to update more flow control, ack current enable more
++ *	__ide_pci_register_driver	-	attach IDE driver
++ *	@driver: pci driver
++ *	@module: owner module of the driver
++ *
++ *	Registers a driver with the IDE layer. The IDE layer arranges that
++ *	boot time setup is done in the expected device order and then
++ *	hands the controllers off to the core PCI code to do the rest of
++ *	the work.
++ *
++ *	Returns are the same as for pci_register_driver
 + */
-+static int send_flow_control(struct nozomi *dc)
-+{
-+	u32 i, more_flow_control_to_be_updated = 0;
-+	u16 *ctrl;
 +
-+	for (i = PORT_MDM; i < MAX_PORT; i++) {
-+		if (dc->port[i].update_flow_control) {
-+			if (more_flow_control_to_be_updated) {
-+				/* We have more flow control to be updated */
-+				return 1;
-+			}
-+			dc->port[i].ctrl_ul.port = port2ctrl(i, dc);
-+			ctrl = (u16 *)&dc->port[i].ctrl_ul;
-+			write_mem32(dc->port[PORT_CTRL].ul_addr[0], \
-+				(u32 *) ctrl, 2);
-+			dc->port[i].update_flow_control = 0;
-+			more_flow_control_to_be_updated = 1;
-+		}
-+	}
++int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
++			      const char *mod_name)
++{
++	if (!pre_init)
++		return __pci_register_driver(driver, module, mod_name);
++	driver->driver.owner = module;
++	list_add_tail(&driver->node, &ide_pci_drivers);
 +	return 0;
 +}
++EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
 +
-+/*
-+ * Handle donlink data, ports that are handled are modem and diagnostics
-+ * Return 1 - ok
-+ * Return 0 - toggle fields are out of sync
++/**
++ *	ide_scan_pcidev		-	find an IDE driver for a device
++ *	@dev: PCI device to check
++ *
++ *	Look for an IDE driver to handle the device we are considering.
++ *	This is only used during boot up to get the ordering correct. After
++ *	boot up the pci layer takes over the job.
 + */
-+static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle,
-+			u16 read_iir, u16 mask1, u16 mask2)
++
++static int __init ide_scan_pcidev(struct pci_dev *dev)
 +{
-+	if (*toggle == 0 && read_iir & mask1) {
-+		if (receive_data(port, dc)) {
-+			writew(mask1, dc->reg_fcr);
-+			*toggle = !(*toggle);
-+		}
++	struct list_head *l;
++	struct pci_driver *d;
 +
-+		if (read_iir & mask2) {
-+			if (receive_data(port, dc)) {
-+				writew(mask2, dc->reg_fcr);
-+				*toggle = !(*toggle);
-+			}
-+		}
-+	} else if (*toggle == 1 && read_iir & mask2) {
-+		if (receive_data(port, dc)) {
-+			writew(mask2, dc->reg_fcr);
-+			*toggle = !(*toggle);
-+		}
++	list_for_each(l, &ide_pci_drivers) {
++		d = list_entry(l, struct pci_driver, node);
++		if (d->id_table) {
++			const struct pci_device_id *id =
++				pci_match_id(d->id_table, dev);
 +
-+		if (read_iir & mask1) {
-+			if (receive_data(port, dc)) {
-+				writew(mask1, dc->reg_fcr);
-+				*toggle = !(*toggle);
++			if (id != NULL && d->probe(dev, id) >= 0) {
++				dev->driver = d;
++				pci_dev_get(dev);
++				return 1;
 +			}
 +		}
-+	} else {
-+		dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n",
-+			*toggle);
-+		return 0;
 +	}
-+	return 1;
++	return 0;
 +}
 +
-+/*
-+ * Handle uplink data, this is currently for the modem port
-+ * Return 1 - ok
-+ * Return 0 - toggle field are out of sync
++/**
++ *	ide_scan_pcibus		-	perform the initial IDE driver scan
++ *
++ *	Perform the initial bus rather than driver ordered scan of the
++ *	PCI drivers. After this all IDE pci handling becomes standard
++ *	module ordering not traditionally ordered.
 + */
-+static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir)
-+{
-+	u8 *toggle = &(dc->port[port].toggle_ul);
-+
-+	if (*toggle == 0 && read_iir & MDM_UL1) {
-+		dc->last_ier &= ~MDM_UL;
-+		writew(dc->last_ier, dc->reg_ier);
-+		if (send_data(port, dc)) {
-+			writew(MDM_UL1, dc->reg_fcr);
-+			dc->last_ier = dc->last_ier | MDM_UL;
-+			writew(dc->last_ier, dc->reg_ier);
-+			*toggle = !*toggle;
-+		}
-+
-+		if (read_iir & MDM_UL2) {
-+			dc->last_ier &= ~MDM_UL;
-+			writew(dc->last_ier, dc->reg_ier);
-+			if (send_data(port, dc)) {
-+				writew(MDM_UL2, dc->reg_fcr);
-+				dc->last_ier = dc->last_ier | MDM_UL;
-+				writew(dc->last_ier, dc->reg_ier);
-+				*toggle = !*toggle;
-+			}
-+		}
-+
-+	} else if (*toggle == 1 && read_iir & MDM_UL2) {
-+		dc->last_ier &= ~MDM_UL;
-+		writew(dc->last_ier, dc->reg_ier);
-+		if (send_data(port, dc)) {
-+			writew(MDM_UL2, dc->reg_fcr);
-+			dc->last_ier = dc->last_ier | MDM_UL;
-+			writew(dc->last_ier, dc->reg_ier);
-+			*toggle = !*toggle;
-+		}
-+
-+		if (read_iir & MDM_UL1) {
-+			dc->last_ier &= ~MDM_UL;
-+			writew(dc->last_ier, dc->reg_ier);
-+			if (send_data(port, dc)) {
-+				writew(MDM_UL1, dc->reg_fcr);
-+				dc->last_ier = dc->last_ier | MDM_UL;
-+				writew(dc->last_ier, dc->reg_ier);
-+				*toggle = !*toggle;
-+			}
-+		}
-+	} else {
-+		writew(read_iir & MDM_UL, dc->reg_fcr);
-+		dev_err(&dc->pdev->dev, "port out of sync!\n");
-+		return 0;
-+	}
-+	return 1;
-+}
 +
-+static irqreturn_t interrupt_handler(int irq, void *dev_id)
++int __init ide_scan_pcibus(void)
 +{
-+	struct nozomi *dc = dev_id;
-+	unsigned int a;
-+	u16 read_iir;
-+
-+	if (!dc)
-+		return IRQ_NONE;
++	struct pci_dev *dev = NULL;
++	struct pci_driver *d;
++	struct list_head *l, *n;
 +
-+	spin_lock(&dc->spin_mutex);
-+	read_iir = readw(dc->reg_iir);
++	pre_init = 0;
++	if (!ide_scan_direction)
++		while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
++			ide_scan_pcidev(dev);
++	else
++		while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID,
++						     dev)))
++			ide_scan_pcidev(dev);
 +
-+	/* Card removed */
-+	if (read_iir == (u16)-1)
-+		goto none;
 +	/*
-+	 * Just handle interrupt enabled in IER
-+	 * (by masking with dc->last_ier)
++	 *	Hand the drivers over to the PCI layer now we
++	 *	are post init.
 +	 */
-+	read_iir &= dc->last_ier;
-+
-+	if (read_iir == 0)
-+		goto none;
-+
-+
-+	DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
-+		dc->last_ier);
 +
-+	if (read_iir & RESET) {
-+		if (unlikely(!nozomi_read_config_table(dc))) {
-+			dc->last_ier = 0x0;
-+			writew(dc->last_ier, dc->reg_ier);
-+			dev_err(&dc->pdev->dev, "Could not read status from "
-+				"card, we should disable interface\n");
-+		} else {
-+			writew(RESET, dc->reg_fcr);
-+		}
-+		/* No more useful info if this was the reset interrupt. */
-+		goto exit_handler;
-+	}
-+	if (read_iir & CTRL_UL) {
-+		DBG1("CTRL_UL");
-+		dc->last_ier &= ~CTRL_UL;
-+		writew(dc->last_ier, dc->reg_ier);
-+		if (send_flow_control(dc)) {
-+			writew(CTRL_UL, dc->reg_fcr);
-+			dc->last_ier = dc->last_ier | CTRL_UL;
-+			writew(dc->last_ier, dc->reg_ier);
-+		}
-+	}
-+	if (read_iir & CTRL_DL) {
-+		receive_flow_control(dc);
-+		writew(CTRL_DL, dc->reg_fcr);
-+	}
-+	if (read_iir & MDM_DL) {
-+		if (!handle_data_dl(dc, PORT_MDM,
-+				&(dc->port[PORT_MDM].toggle_dl), read_iir,
-+				MDM_DL1, MDM_DL2)) {
-+			dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
-+			goto exit_handler;
-+		}
-+	}
-+	if (read_iir & MDM_UL) {
-+		if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
-+			dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
-+			goto exit_handler;
-+		}
-+	}
-+	if (read_iir & DIAG_DL) {
-+		if (!handle_data_dl(dc, PORT_DIAG,
-+				&(dc->port[PORT_DIAG].toggle_dl), read_iir,
-+				DIAG_DL1, DIAG_DL2)) {
-+			dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
-+			goto exit_handler;
-+		}
-+	}
-+	if (read_iir & DIAG_UL) {
-+		dc->last_ier &= ~DIAG_UL;
-+		writew(dc->last_ier, dc->reg_ier);
-+		if (send_data(PORT_DIAG, dc)) {
-+			writew(DIAG_UL, dc->reg_fcr);
-+			dc->last_ier = dc->last_ier | DIAG_UL;
-+			writew(dc->last_ier, dc->reg_ier);
-+		}
-+	}
-+	if (read_iir & APP1_DL) {
-+		if (receive_data(PORT_APP1, dc))
-+			writew(APP1_DL, dc->reg_fcr);
-+	}
-+	if (read_iir & APP1_UL) {
-+		dc->last_ier &= ~APP1_UL;
-+		writew(dc->last_ier, dc->reg_ier);
-+		if (send_data(PORT_APP1, dc)) {
-+			writew(APP1_UL, dc->reg_fcr);
-+			dc->last_ier = dc->last_ier | APP1_UL;
-+			writew(dc->last_ier, dc->reg_ier);
-+		}
-+	}
-+	if (read_iir & APP2_DL) {
-+		if (receive_data(PORT_APP2, dc))
-+			writew(APP2_DL, dc->reg_fcr);
-+	}
-+	if (read_iir & APP2_UL) {
-+		dc->last_ier &= ~APP2_UL;
-+		writew(dc->last_ier, dc->reg_ier);
-+		if (send_data(PORT_APP2, dc)) {
-+			writew(APP2_UL, dc->reg_fcr);
-+			dc->last_ier = dc->last_ier | APP2_UL;
-+			writew(dc->last_ier, dc->reg_ier);
-+		}
++	list_for_each_safe(l, n, &ide_pci_drivers) {
++		list_del(l);
++		d = list_entry(l, struct pci_driver, node);
++		if (__pci_register_driver(d, d->driver.owner,
++					  d->driver.mod_name))
++			printk(KERN_ERR "%s: failed to register %s driver\n",
++					__FUNCTION__, d->driver.mod_name);
 +	}
 +
-+exit_handler:
-+	spin_unlock(&dc->spin_mutex);
-+	for (a = 0; a < NOZOMI_MAX_PORTS; a++)
-+		if (test_and_clear_bit(a, &dc->flip))
-+			tty_flip_buffer_push(dc->port[a].tty);
-+	return IRQ_HANDLED;
-+none:
-+	spin_unlock(&dc->spin_mutex);
-+	return IRQ_NONE;
-+}
-+
-+static void nozomi_get_card_type(struct nozomi *dc)
-+{
-+	int i;
-+	u32 size = 0;
-+
-+	for (i = 0; i < 6; i++)
-+		size += pci_resource_len(dc->pdev, i);
-+
-+	/* Assume card type F32_8 if no match */
-+	dc->card_type = size == 2048 ? F32_2 : F32_8;
-+
-+	dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type);
-+}
-+
-+static void nozomi_setup_private_data(struct nozomi *dc)
-+{
-+	void __iomem *offset = dc->base_addr + dc->card_type / 2;
-+	unsigned int i;
-+
-+	dc->reg_fcr = (void __iomem *)(offset + R_FCR);
-+	dc->reg_iir = (void __iomem *)(offset + R_IIR);
-+	dc->reg_ier = (void __iomem *)(offset + R_IER);
-+	dc->last_ier = 0;
-+	dc->flip = 0;
-+
-+	dc->port[PORT_MDM].token_dl = MDM_DL;
-+	dc->port[PORT_DIAG].token_dl = DIAG_DL;
-+	dc->port[PORT_APP1].token_dl = APP1_DL;
-+	dc->port[PORT_APP2].token_dl = APP2_DL;
-+
-+	for (i = 0; i < MAX_PORT; i++)
-+		init_waitqueue_head(&dc->port[i].tty_wait);
-+}
-+
-+static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
-+			  char *buf)
-+{
-+	struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
-+
-+	return sprintf(buf, "%d\n", dc->card_type);
-+}
-+static DEVICE_ATTR(card_type, 0444, card_type_show, NULL);
-+
-+static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
-+			  char *buf)
-+{
-+	struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
-+
-+	return sprintf(buf, "%u\n", dc->open_ttys);
-+}
-+static DEVICE_ATTR(open_ttys, 0444, open_ttys_show, NULL);
-+
-+static void make_sysfs_files(struct nozomi *dc)
-+{
-+	if (device_create_file(&dc->pdev->dev, &dev_attr_card_type))
-+		dev_err(&dc->pdev->dev,
-+			"Could not create sysfs file for card_type\n");
-+	if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys))
-+		dev_err(&dc->pdev->dev,
-+			"Could not create sysfs file for open_ttys\n");
++	return 0;
 +}
 +
-+static void remove_sysfs_files(struct nozomi *dc)
++static int __init ide_scan_pci(void)
 +{
-+	device_remove_file(&dc->pdev->dev, &dev_attr_card_type);
-+	device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys);
++	return ide_scan_pcibus();
 +}
 +
-+/* Allocate memory for one device */
-+static int __devinit nozomi_card_init(struct pci_dev *pdev,
-+				      const struct pci_device_id *ent)
-+{
-+	resource_size_t start;
-+	int ret;
-+	struct nozomi *dc = NULL;
-+	int ndev_idx;
-+	int i;
-+
-+	dev_dbg(&pdev->dev, "Init, new card found\n");
-+
-+	for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++)
-+		if (!ndevs[ndev_idx])
-+			break;
-+
-+	if (ndev_idx >= ARRAY_SIZE(ndevs)) {
-+		dev_err(&pdev->dev, "no free tty range for this card left\n");
-+		ret = -EIO;
-+		goto err;
-+	}
-+
-+	dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL);
-+	if (unlikely(!dc)) {
-+		dev_err(&pdev->dev, "Could not allocate memory\n");
-+		ret = -ENOMEM;
-+		goto err_free;
-+	}
-+
-+	dc->pdev = pdev;
-+
-+	/* Find out what card type it is */
-+	nozomi_get_card_type(dc);
-+
-+	ret = pci_enable_device(dc->pdev);
-+	if (ret) {
-+		dev_err(&pdev->dev, "Failed to enable PCI Device\n");
-+		goto err_free;
-+	}
-+
-+	start = pci_resource_start(dc->pdev, 0);
-+	if (start == 0) {
-+		dev_err(&pdev->dev, "No I/O address for card detected\n");
-+		ret = -ENODEV;
-+		goto err_disable_device;
-+	}
-+
-+	ret = pci_request_regions(dc->pdev, NOZOMI_NAME);
-+	if (ret) {
-+		dev_err(&pdev->dev, "I/O address 0x%04x already in use\n",
-+			(int) /* nozomi_private.io_addr */ 0);
-+		goto err_disable_device;
-+	}
-+
-+	dc->base_addr = ioremap(start, dc->card_type);
-+	if (!dc->base_addr) {
-+		dev_err(&pdev->dev, "Unable to map card MMIO\n");
-+		ret = -ENODEV;
-+		goto err_rel_regs;
-+	}
-+
-+	dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL);
-+	if (!dc->send_buf) {
-+		dev_err(&pdev->dev, "Could not allocate send buffer?\n");
-+		ret = -ENOMEM;
-+		goto err_free_sbuf;
++module_init(ide_scan_pci);
+diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
+index 7b9181b..d71a584 100644
+--- a/drivers/ide/ide-tape.c
++++ b/drivers/ide/ide-tape.c
+@@ -615,16 +615,6 @@ typedef struct os_dat_s {
+ /*************************** End of tunable parameters ***********************/
+ 
+ /*
+- *	Debugging/Performance analysis
+- *
+- *	I/O trace support
+- */
+-#define USE_IOTRACE	0
+-#if USE_IOTRACE
+-#define IO_IDETAPE_FIFO	500
+-#endif
+-
+-/*
+  *	Read/Write error simulation
+  */
+ #define SIMULATE_ERRORS			0
+@@ -1700,6 +1690,11 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
+ 	if (error)
+ 		tape->failed_pc = NULL;
+ 
++	if (!blk_special_request(rq)) {
++		ide_end_request(drive, uptodate, nr_sects);
++		return 0;
 +	}
 +
-+	spin_lock_init(&dc->spin_mutex);
-+
-+	nozomi_setup_private_data(dc);
+ 	spin_lock_irqsave(&tape->spinlock, flags);
+ 
+ 	/* The request was a pipelined data transfer request */
+@@ -1818,9 +1813,8 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
+ 	idetape_tape_t *tape = drive->driver_data;
+ 	idetape_pc_t *pc;
+ 	struct request *rq;
+-	atapi_error_t error;
+ 
+-	error.all = HWIF(drive)->INB(IDE_ERROR_REG);
++	(void)drive->hwif->INB(IDE_ERROR_REG);
+ 	pc = idetape_next_pc_storage(drive);
+ 	rq = idetape_next_rq_storage(drive);
+ 	idetape_create_request_sense_cmd(pc);
+@@ -1858,15 +1852,13 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = drive->hwif;
+ 	idetape_tape_t *tape = drive->driver_data;
+-	atapi_status_t status;
+-	atapi_bcount_t bcount;
+-	atapi_ireason_t ireason;
+ 	idetape_pc_t *pc = tape->pc;
+-
+ 	unsigned int temp;
+ #if SIMULATE_ERRORS
+ 	static int error_sim_count = 0;
+ #endif
++	u16 bcount;
++	u8 stat, ireason;
+ 
+ #if IDETAPE_DEBUG_LOG
+ 	if (tape->debug_level >= 4)
+@@ -1875,10 +1867,10 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+ #endif /* IDETAPE_DEBUG_LOG */	
+ 
+ 	/* Clear the interrupt */
+-	status.all = HWIF(drive)->INB(IDE_STATUS_REG);
++	stat = hwif->INB(IDE_STATUS_REG);
+ 
+ 	if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
+-		if (HWIF(drive)->ide_dma_end(drive) || status.b.check) {
++		if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
+ 			/*
+ 			 * A DMA error is sometimes expected. For example,
+ 			 * if the tape is crossing a filemark during a
+@@ -1912,7 +1904,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+ 	}
+ 
+ 	/* No more interrupts */
+-	if (!status.b.drq) {
++	if ((stat & DRQ_STAT) == 0) {
+ #if IDETAPE_DEBUG_LOG
+ 		if (tape->debug_level >= 2)
+ 			printk(KERN_INFO "ide-tape: Packet command completed, %d bytes transferred\n", pc->actually_transferred);
+@@ -1927,12 +1919,13 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+ 		    (++error_sim_count % 100) == 0) {
+ 			printk(KERN_INFO "ide-tape: %s: simulating error\n",
+ 				tape->name);
+-			status.b.check = 1;
++			stat |= ERR_STAT;
+ 		}
+ #endif
+-		if (status.b.check && pc->c[0] == IDETAPE_REQUEST_SENSE_CMD)
+-			status.b.check = 0;
+-		if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) {	/* Error detected */
++		if ((stat & ERR_STAT) && pc->c[0] == IDETAPE_REQUEST_SENSE_CMD)
++			stat &= ~ERR_STAT;
++		if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) {
++			/* Error detected */
+ #if IDETAPE_DEBUG_LOG
+ 			if (tape->debug_level >= 1)
+ 				printk(KERN_INFO "ide-tape: %s: I/O error\n",
+@@ -1951,7 +1944,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+ 		}
+ 		pc->error = 0;
+ 		if (test_bit(PC_WAIT_FOR_DSC, &pc->flags) &&
+-		    !status.b.dsc) {
++		    (stat & SEEK_STAT) == 0) {
+ 			/* Media access command */
+ 			tape->dsc_polling_start = jiffies;
+ 			tape->dsc_polling_frequency = IDETAPE_DSC_MA_FAST;
+@@ -1973,30 +1966,30 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+ 		return ide_do_reset(drive);
+ 	}
+ 	/* Get the number of bytes to transfer on this interrupt. */
+-	bcount.b.high = hwif->INB(IDE_BCOUNTH_REG);
+-	bcount.b.low = hwif->INB(IDE_BCOUNTL_REG);
++	bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) |
++		  hwif->INB(IDE_BCOUNTL_REG);
+ 
+-	ireason.all = hwif->INB(IDE_IREASON_REG);
++	ireason = hwif->INB(IDE_IREASON_REG);
+ 
+-	if (ireason.b.cod) {
++	if (ireason & CD) {
+ 		printk(KERN_ERR "ide-tape: CoD != 0 in idetape_pc_intr\n");
+ 		return ide_do_reset(drive);
+ 	}
+-	if (ireason.b.io == test_bit(PC_WRITING, &pc->flags)) {
++	if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) {
+ 		/* Hopefully, we will never get here */
+ 		printk(KERN_ERR "ide-tape: We wanted to %s, ",
+-			ireason.b.io ? "Write":"Read");
++				(ireason & IO) ? "Write" : "Read");
+ 		printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
+-			ireason.b.io ? "Read":"Write");
++				(ireason & IO) ? "Read" : "Write");
+ 		return ide_do_reset(drive);
+ 	}
+ 	if (!test_bit(PC_WRITING, &pc->flags)) {
+ 		/* Reading - Check that we have enough space */
+-		temp = pc->actually_transferred + bcount.all;
++		temp = pc->actually_transferred + bcount;
+ 		if (temp > pc->request_transfer) {
+ 			if (temp > pc->buffer_size) {
+ 				printk(KERN_ERR "ide-tape: The tape wants to send us more data than expected - discarding data\n");
+-				idetape_discard_data(drive, bcount.all);
++				idetape_discard_data(drive, bcount);
+ 				ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
+ 				return ide_started;
+ 			}
+@@ -2008,23 +2001,26 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+ 	}
+ 	if (test_bit(PC_WRITING, &pc->flags)) {
+ 		if (pc->bh != NULL)
+-			idetape_output_buffers(drive, pc, bcount.all);
++			idetape_output_buffers(drive, pc, bcount);
+ 		else
+ 			/* Write the current buffer */
+-			HWIF(drive)->atapi_output_bytes(drive, pc->current_position, bcount.all);
++			hwif->atapi_output_bytes(drive, pc->current_position,
++						 bcount);
+ 	} else {
+ 		if (pc->bh != NULL)
+-			idetape_input_buffers(drive, pc, bcount.all);
++			idetape_input_buffers(drive, pc, bcount);
+ 		else
+ 			/* Read the current buffer */
+-			HWIF(drive)->atapi_input_bytes(drive, pc->current_position, bcount.all);
++			hwif->atapi_input_bytes(drive, pc->current_position,
++						bcount);
+ 	}
+ 	/* Update the current position */
+-	pc->actually_transferred += bcount.all;
+-	pc->current_position += bcount.all;
++	pc->actually_transferred += bcount;
++	pc->current_position += bcount;
+ #if IDETAPE_DEBUG_LOG
+ 	if (tape->debug_level >= 2)
+-		printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes on that interrupt\n", pc->c[0], bcount.all);
++		printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes "
++				 "on that interrupt\n", pc->c[0], bcount);
+ #endif
+ 	/* And set the interrupt handler again */
+ 	ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
+@@ -2078,28 +2074,28 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
+ 	ide_hwif_t *hwif = drive->hwif;
+ 	idetape_tape_t *tape = drive->driver_data;
+ 	idetape_pc_t *pc = tape->pc;
+-	atapi_ireason_t ireason;
+ 	int retries = 100;
+ 	ide_startstop_t startstop;
++	u8 ireason;
+ 
+ 	if (ide_wait_stat(&startstop,drive,DRQ_STAT,BUSY_STAT,WAIT_READY)) {
+ 		printk(KERN_ERR "ide-tape: Strange, packet command initiated yet DRQ isn't asserted\n");
+ 		return startstop;
+ 	}
+-	ireason.all = hwif->INB(IDE_IREASON_REG);
+-	while (retries-- && (!ireason.b.cod || ireason.b.io)) {
++	ireason = hwif->INB(IDE_IREASON_REG);
++	while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
+ 		printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
+ 				"a packet command, retrying\n");
+ 		udelay(100);
+-		ireason.all = hwif->INB(IDE_IREASON_REG);
++		ireason = hwif->INB(IDE_IREASON_REG);
+ 		if (retries == 0) {
+ 			printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
+ 					"issuing a packet command, ignoring\n");
+-			ireason.b.cod = 1;
+-			ireason.b.io = 0;
++			ireason |= CD;
++			ireason &= ~IO;
+ 		}
+ 	}
+-	if (!ireason.b.cod || ireason.b.io) {
++	if ((ireason & CD) == 0 || (ireason & IO)) {
+ 		printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
+ 				"a packet command\n");
+ 		return ide_do_reset(drive);
+@@ -2120,8 +2116,8 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
+ {
+ 	ide_hwif_t *hwif = drive->hwif;
+ 	idetape_tape_t *tape = drive->driver_data;
+-	atapi_bcount_t bcount;
+ 	int dma_ok = 0;
++	u16 bcount;
+ 
+ #if IDETAPE_DEBUG_BUGS
+ 	if (tape->pc->c[0] == IDETAPE_REQUEST_SENSE_CMD &&
+@@ -2170,7 +2166,7 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
+ 	pc->actually_transferred = 0;
+ 	pc->current_position = pc->buffer;
+ 	/* Request to transfer the entire buffer at once */
+-	bcount.all = pc->request_transfer;
++	bcount = pc->request_transfer;
+ 
+ 	if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags)) {
+ 		printk(KERN_WARNING "ide-tape: DMA disabled, "
+@@ -2180,12 +2176,9 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
+ 	if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma)
+ 		dma_ok = !hwif->dma_setup(drive);
+ 
+-	if (IDE_CONTROL_REG)
+-		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
+-	hwif->OUTB(dma_ok ? 1 : 0, IDE_FEATURE_REG);	/* Use PIO/DMA */
+-	hwif->OUTB(bcount.b.high, IDE_BCOUNTH_REG);
+-	hwif->OUTB(bcount.b.low, IDE_BCOUNTL_REG);
+-	hwif->OUTB(drive->select.all, IDE_SELECT_REG);
++	ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
++			   IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
 +
-+	/* Disable all interrupts */
-+	dc->last_ier = 0;
-+	writew(dc->last_ier, dc->reg_ier);
+ 	if (dma_ok)			/* Will begin DMA later */
+ 		set_bit(PC_DMA_IN_PROGRESS, &pc->flags);
+ 	if (test_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags)) {
+@@ -2295,11 +2288,11 @@ static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
+ {
+ 	idetape_tape_t *tape = drive->driver_data;
+ 	idetape_pc_t *pc = tape->pc;
+-	atapi_status_t status;
++	u8 stat;
+ 
+-	status.all = HWIF(drive)->INB(IDE_STATUS_REG);
+-	if (status.b.dsc) {
+-		if (status.b.check) {
++	stat = drive->hwif->INB(IDE_STATUS_REG);
++	if (stat & SEEK_STAT) {
++		if (stat & ERR_STAT) {
+ 			/* Error detected */
+ 			if (pc->c[0] != IDETAPE_TEST_UNIT_READY_CMD)
+ 				printk(KERN_ERR "ide-tape: %s: I/O error, ",
+@@ -2417,7 +2410,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+ 	idetape_tape_t *tape = drive->driver_data;
+ 	idetape_pc_t *pc = NULL;
+ 	struct request *postponed_rq = tape->postponed_rq;
+-	atapi_status_t status;
++	u8 stat;
+ 
+ #if IDETAPE_DEBUG_LOG
+ #if 0
+@@ -2465,7 +2458,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+ 	 * If the tape is still busy, postpone our request and service
+ 	 * the other device meanwhile.
+ 	 */
+-	status.all = HWIF(drive)->INB(IDE_STATUS_REG);
++	stat = drive->hwif->INB(IDE_STATUS_REG);
+ 
+ 	if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
+ 		set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
+@@ -2481,7 +2474,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+ 		tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
+ 	calculate_speeds(drive);
+ 	if (!test_and_clear_bit(IDETAPE_IGNORE_DSC, &tape->flags) &&
+-	    !status.b.dsc) {
++	    (stat & SEEK_STAT) == 0) {
+ 		if (postponed_rq == NULL) {
+ 			tape->dsc_polling_start = jiffies;
+ 			tape->dsc_polling_frequency = tape->best_dsc_rw_frequency;
+@@ -2502,9 +2495,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+ 	}
+ 	if (rq->cmd[0] & REQ_IDETAPE_READ) {
+ 		tape->buffer_head++;
+-#if USE_IOTRACE
+-		IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor);
+-#endif
+ 		tape->postpone_cnt = 0;
+ 		pc = idetape_next_pc_storage(drive);
+ 		idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
+@@ -2512,9 +2502,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+ 	}
+ 	if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
+ 		tape->buffer_head++;
+-#if USE_IOTRACE
+-		IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor);
+-#endif
+ 		tape->postpone_cnt = 0;
+ 		pc = idetape_next_pc_storage(drive);
+ 		idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
+@@ -3241,9 +3228,6 @@ static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
+ 	idetape_switch_buffers(tape, new_stage);
+ 	idetape_add_stage_tail(drive, new_stage);
+ 	tape->pipeline_head++;
+-#if USE_IOTRACE
+-	IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor);
+-#endif
+ 	calculate_speeds(drive);
+ 
+ 	/*
+@@ -3493,9 +3477,6 @@ static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
+ 		idetape_remove_stage_head(drive);
+ 		spin_unlock_irqrestore(&tape->spinlock, flags);
+ 		tape->pipeline_head++;
+-#if USE_IOTRACE
+-		IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor);
+-#endif
+ 		calculate_speeds(drive);
+ 	}
+ #if IDETAPE_DEBUG_BUGS
+@@ -4724,10 +4705,8 @@ static void ide_tape_release(struct kref *kref)
+ 
+ 	drive->dsc_overlap = 0;
+ 	drive->driver_data = NULL;
+-	class_device_destroy(idetape_sysfs_class,
+-			MKDEV(IDETAPE_MAJOR, tape->minor));
+-	class_device_destroy(idetape_sysfs_class,
+-			MKDEV(IDETAPE_MAJOR, tape->minor + 128));
++	device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
++	device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor + 128));
+ 	idetape_devs[tape->minor] = NULL;
+ 	g->private_data = NULL;
+ 	put_disk(g);
+@@ -4884,10 +4863,10 @@ static int ide_tape_probe(ide_drive_t *drive)
+ 
+ 	idetape_setup(drive, tape, minor);
+ 
+-	class_device_create(idetape_sysfs_class, NULL,
+-			MKDEV(IDETAPE_MAJOR, minor), &drive->gendev, "%s", tape->name);
+-	class_device_create(idetape_sysfs_class, NULL,
+-			MKDEV(IDETAPE_MAJOR, minor + 128), &drive->gendev, "n%s", tape->name);
++	device_create(idetape_sysfs_class, &drive->gendev,
++		      MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
++	device_create(idetape_sysfs_class, &drive->gendev,
++			MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
+ 
+ 	g->fops = &idetape_block_ops;
+ 	ide_register_region(g);
+diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
+index 2b60f1b..5eb6fa1 100644
+--- a/drivers/ide/ide-taskfile.c
++++ b/drivers/ide/ide-taskfile.c
+@@ -35,93 +35,81 @@
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+ 
+-static void ata_bswap_data (void *buffer, int wcount)
++void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+ {
+-	u16 *p = buffer;
+-
+-	while (wcount--) {
+-		*p = *p << 8 | *p >> 8; p++;
+-		*p = *p << 8 | *p >> 8; p++;
+-	}
+-}
+-
+-static void taskfile_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
+-{
+-	HWIF(drive)->ata_input_data(drive, buffer, wcount);
+-	if (drive->bswap)
+-		ata_bswap_data(buffer, wcount);
+-}
++	ide_hwif_t *hwif = drive->hwif;
++	struct ide_taskfile *tf = &task->tf;
++	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
 +
-+	ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED,
-+			NOZOMI_NAME, dc);
-+	if (unlikely(ret)) {
-+		dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
-+		goto err_free_sbuf;
-+	}
++	if (task->tf_flags & IDE_TFLAG_FLAGGED)
++		HIHI = 0xFF;
 +
-+	DBG1("base_addr: %p", dc->base_addr);
++#ifdef DEBUG
++	printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
++		"lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
++		drive->name, tf->feature, tf->nsect, tf->lbal,
++		tf->lbam, tf->lbah, tf->device, tf->command);
++	printk("%s: hob: nsect 0x%02x lbal 0x%02x "
++		"lbam 0x%02x lbah 0x%02x\n",
++		drive->name, tf->hob_nsect, tf->hob_lbal,
++		tf->hob_lbam, tf->hob_lbah);
++#endif
+ 
+-static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
+-{
+-	if (drive->bswap) {
+-		ata_bswap_data(buffer, wcount);
+-		HWIF(drive)->ata_output_data(drive, buffer, wcount);
+-		ata_bswap_data(buffer, wcount);
+-	} else {
+-		HWIF(drive)->ata_output_data(drive, buffer, wcount);
+-	}
++	ide_set_irq(drive, 1);
 +
-+	make_sysfs_files(dc);
++	if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
++		SELECT_MASK(drive, 0);
 +
-+	dc->index_start = ndev_idx * MAX_PORT;
-+	ndevs[ndev_idx] = dc;
++	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
++		hwif->OUTW((tf->hob_data << 8) | tf->data, IDE_DATA_REG);
 +
-+	for (i = 0; i < MAX_PORT; i++) {
-+		mutex_init(&dc->port[i].tty_sem);
-+		dc->port[i].tty_open_count = 0;
-+		dc->port[i].tty = NULL;
-+		tty_register_device(ntty_driver, dc->index_start + i,
-+							&pdev->dev);
-+	}
++	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
++		hwif->OUTB(tf->hob_feature, IDE_FEATURE_REG);
++	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
++		hwif->OUTB(tf->hob_nsect, IDE_NSECTOR_REG);
++	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
++		hwif->OUTB(tf->hob_lbal, IDE_SECTOR_REG);
++	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
++		hwif->OUTB(tf->hob_lbam, IDE_LCYL_REG);
++	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
++		hwif->OUTB(tf->hob_lbah, IDE_HCYL_REG);
 +
-+	/* Enable  RESET interrupt. */
-+	dc->last_ier = RESET;
-+	writew(dc->last_ier, dc->reg_ier);
++	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
++		hwif->OUTB(tf->feature, IDE_FEATURE_REG);
++	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
++		hwif->OUTB(tf->nsect, IDE_NSECTOR_REG);
++	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
++		hwif->OUTB(tf->lbal, IDE_SECTOR_REG);
++	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
++		hwif->OUTB(tf->lbam, IDE_LCYL_REG);
++	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
++		hwif->OUTB(tf->lbah, IDE_HCYL_REG);
 +
-+	pci_set_drvdata(pdev, dc);
++	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
++		hwif->OUTB((tf->device & HIHI) | drive->select.all, IDE_SELECT_REG);
+ }
+ 
+ int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
+ {
+ 	ide_task_t args;
 +
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	args.tfRegister[IDE_NSECTOR_OFFSET]	= 0x01;
++	args.tf.nsect = 0x01;
+ 	if (drive->media == ide_disk)
+-		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_IDENTIFY;
++		args.tf.command = WIN_IDENTIFY;
+ 	else
+-		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_PIDENTIFY;
+-	args.command_type = IDE_DRIVE_TASK_IN;
+-	args.data_phase   = TASKFILE_IN;
+-	args.handler	  = &task_in_intr;
+-	return ide_raw_taskfile(drive, &args, buf);
++		args.tf.command = WIN_PIDENTIFY;
++	args.tf_flags	= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++	args.data_phase	= TASKFILE_IN;
++	return ide_raw_taskfile(drive, &args, buf, 1);
+ }
+ 
+-ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
++static int inline task_dma_ok(ide_task_t *task)
+ {
+-	ide_hwif_t *hwif	= HWIF(drive);
+-	task_struct_t *taskfile	= (task_struct_t *) task->tfRegister;
+-	hob_struct_t *hobfile	= (hob_struct_t *) task->hobRegister;
+-	u8 HIHI			= (drive->addressing == 1) ? 0xE0 : 0xEF;
+-
+-	/* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
+-	if (IDE_CONTROL_REG) {
+-		/* clear nIEN */
+-		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
+-	}
+-	SELECT_MASK(drive, 0);
+-
+-	if (drive->addressing == 1) {
+-		hwif->OUTB(hobfile->feature, IDE_FEATURE_REG);
+-		hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
+-		hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
+-		hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
+-		hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
+-	}
+-
+-	hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
+-	hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
+-	hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
+-	hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
+-	hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
+-
+-	hwif->OUTB((taskfile->device_head & HIHI) | drive->select.all, IDE_SELECT_REG);
+-
+-	if (task->handler != NULL) {
+-		if (task->prehandler != NULL) {
+-			hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG);
+-			ndelay(400);	/* FIXME */
+-			return task->prehandler(drive, task->rq);
+-		}
+-		ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
+-		return ide_started;
+-	}
++	if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED))
++		return 1;
+ 
+-	if (!drive->using_dma)
+-		return ide_stopped;
+-
+-	switch (taskfile->command) {
++	switch (task->tf.command) {
+ 		case WIN_WRITEDMA_ONCE:
+ 		case WIN_WRITEDMA:
+ 		case WIN_WRITEDMA_EXT:
+@@ -129,24 +117,79 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
+ 		case WIN_READDMA:
+ 		case WIN_READDMA_EXT:
+ 		case WIN_IDENTIFY_DMA:
+-			if (!hwif->dma_setup(drive)) {
+-				hwif->dma_exec_cmd(drive, taskfile->command);
+-				hwif->dma_start(drive);
+-				return ide_started;
+-			}
+-			break;
+-		default:
+-			if (task->handler == NULL)
+-				return ide_stopped;
++			return 1;
+ 	}
+ 
+-	return ide_stopped;
 +	return 0;
+ }
+ 
++static ide_startstop_t task_no_data_intr(ide_drive_t *);
++static ide_startstop_t set_geometry_intr(ide_drive_t *);
++static ide_startstop_t recal_intr(ide_drive_t *);
++static ide_startstop_t set_multmode_intr(ide_drive_t *);
++static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
++static ide_startstop_t task_in_intr(ide_drive_t *);
 +
-+err_free_sbuf:
-+	kfree(dc->send_buf);
-+	iounmap(dc->base_addr);
-+err_rel_regs:
-+	pci_release_regions(pdev);
-+err_disable_device:
-+	pci_disable_device(pdev);
-+err_free:
-+	kfree(dc);
-+err:
-+	return ret;
-+}
-+
-+static void __devexit tty_exit(struct nozomi *dc)
++ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
 +{
-+	unsigned int i;
-+
-+	DBG1(" ");
++	ide_hwif_t *hwif	= HWIF(drive);
++	struct ide_taskfile *tf = &task->tf;
++	ide_handler_t *handler = NULL;
 +
-+	flush_scheduled_work();
++	if (task->data_phase == TASKFILE_MULTI_IN ||
++	    task->data_phase == TASKFILE_MULTI_OUT) {
++		if (!drive->mult_count) {
++			printk(KERN_ERR "%s: multimode not set!\n",
++					drive->name);
++			return ide_stopped;
++		}
++	}
 +
-+	for (i = 0; i < MAX_PORT; ++i)
-+		if (dc->port[i].tty && \
-+				list_empty(&dc->port[i].tty->hangup_work.entry))
-+			tty_hangup(dc->port[i].tty);
++	if (task->tf_flags & IDE_TFLAG_FLAGGED)
++		task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
 +
-+	while (dc->open_ttys)
-+		msleep(1);
++	if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0)
++		ide_tf_load(drive, task);
 +
-+	for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
-+		tty_unregister_device(ntty_driver, i);
++	switch (task->data_phase) {
++	case TASKFILE_MULTI_OUT:
++	case TASKFILE_OUT:
++		hwif->OUTBSYNC(drive, tf->command, IDE_COMMAND_REG);
++		ndelay(400);	/* FIXME */
++		return pre_task_out_intr(drive, task->rq);
++	case TASKFILE_MULTI_IN:
++	case TASKFILE_IN:
++		handler = task_in_intr;
++		/* fall-through */
++	case TASKFILE_NO_DATA:
++		if (handler == NULL)
++			handler = task_no_data_intr;
++		/* WIN_{SPECIFY,RESTORE,SETMULT} use custom handlers */
++		if (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) {
++			switch (tf->command) {
++			case WIN_SPECIFY: handler = set_geometry_intr;	break;
++			case WIN_RESTORE: handler = recal_intr;		break;
++			case WIN_SETMULT: handler = set_multmode_intr;	break;
++			}
++		}
++		ide_execute_command(drive, tf->command, handler,
++				    WAIT_WORSTCASE, NULL);
++		return ide_started;
++	default:
++		if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
++		    hwif->dma_setup(drive))
++			return ide_stopped;
++		hwif->dma_exec_cmd(drive, tf->command);
++		hwif->dma_start(drive);
++		return ide_started;
++	}
 +}
++EXPORT_SYMBOL_GPL(do_rw_taskfile);
 +
-+/* Deallocate memory for one device */
-+static void __devexit nozomi_card_exit(struct pci_dev *pdev)
-+{
-+	int i;
-+	struct ctrl_ul ctrl;
-+	struct nozomi *dc = pci_get_drvdata(pdev);
-+
-+	/* Disable all interrupts */
-+	dc->last_ier = 0;
-+	writew(dc->last_ier, dc->reg_ier);
-+
-+	tty_exit(dc);
-+
-+	/* Send 0x0001, command card to resend the reset token.  */
-+	/* This is to get the reset when the module is reloaded. */
-+	ctrl.port = 0x00;
-+	ctrl.reserved = 0;
-+	ctrl.RTS = 0;
-+	ctrl.DTR = 1;
-+	DBG1("sending flow control 0x%04X", *((u16 *)&ctrl));
-+
-+	/* Setup dc->reg addresses to we can use defines here */
-+	write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2);
-+	writew(CTRL_UL, dc->reg_fcr);	/* push the token to the card. */
-+
-+	remove_sysfs_files(dc);
-+
-+	free_irq(pdev->irq, dc);
-+
-+	for (i = 0; i < MAX_PORT; i++)
-+		if (dc->port[i].fifo_ul)
-+			kfifo_free(dc->port[i].fifo_ul);
-+
-+	kfree(dc->send_buf);
-+
-+	iounmap(dc->base_addr);
-+
-+	pci_release_regions(pdev);
-+
-+	pci_disable_device(pdev);
-+
-+	ndevs[dc->index_start / MAX_PORT] = NULL;
-+
-+	kfree(dc);
-+}
+ /*
+  * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
+  */
+-ide_startstop_t set_multmode_intr (ide_drive_t *drive)
++static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+ 	u8 stat;
+@@ -164,7 +207,7 @@ ide_startstop_t set_multmode_intr (ide_drive_t *drive)
+ /*
+  * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
+  */
+-ide_startstop_t set_geometry_intr (ide_drive_t *drive)
++static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+ 	int retries = 5;
+@@ -187,7 +230,7 @@ ide_startstop_t set_geometry_intr (ide_drive_t *drive)
+ /*
+  * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
+  */
+-ide_startstop_t recal_intr (ide_drive_t *drive)
++static ide_startstop_t recal_intr(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+ 	u8 stat;
+@@ -200,7 +243,7 @@ ide_startstop_t recal_intr (ide_drive_t *drive)
+ /*
+  * Handler for commands without a data phase
+  */
+-ide_startstop_t task_no_data_intr (ide_drive_t *drive)
++static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
+ {
+ 	ide_task_t *args	= HWGROUP(drive)->rq->special;
+ 	ide_hwif_t *hwif	= HWIF(drive);
+@@ -217,9 +260,7 @@ ide_startstop_t task_no_data_intr (ide_drive_t *drive)
+ 	return ide_stopped;
+ }
+ 
+-EXPORT_SYMBOL(task_no_data_intr);
+-
+-static u8 wait_drive_not_busy(ide_drive_t *drive)
++u8 wait_drive_not_busy(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+ 	int retries;
+@@ -227,8 +268,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
+ 
+ 	/*
+ 	 * Last sector was transfered, wait until drive is ready.
+-	 * This can take up to 10 usec, but we will wait max 1 ms
+-	 * (drive_cmd_intr() waits that long).
++	 * This can take up to 10 usec, but we will wait max 1 ms.
+ 	 */
+ 	for (retries = 0; retries < 100; retries++) {
+ 		if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT)
+@@ -283,9 +323,9 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
+ 
+ 	/* do the actual data transfer */
+ 	if (write)
+-		taskfile_output_data(drive, buf, SECTOR_WORDS);
++		hwif->ata_output_data(drive, buf, SECTOR_WORDS);
+ 	else
+-		taskfile_input_data(drive, buf, SECTOR_WORDS);
++		hwif->ata_input_data(drive, buf, SECTOR_WORDS);
+ 
+ 	kunmap_atomic(buf, KM_BIO_SRC_IRQ);
+ #ifdef CONFIG_HIGHMEM
+@@ -305,9 +345,18 @@ static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
+ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
+ 				     unsigned int write)
+ {
++	u8 saved_io_32bit = drive->io_32bit;
 +
-+static void set_rts(const struct tty_struct *tty, int rts)
-+{
-+	struct port *port = get_port_by_tty(tty);
+ 	if (rq->bio)	/* fs request */
+ 		rq->errors = 0;
+ 
++	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
++		ide_task_t *task = rq->special;
 +
-+	port->ctrl_ul.RTS = rts;
-+	port->update_flow_control = 1;
-+	enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
-+}
++		if (task->tf_flags & IDE_TFLAG_IO_16BIT)
++			drive->io_32bit = 0;
++	}
 +
-+static void set_dtr(const struct tty_struct *tty, int dtr)
-+{
-+	struct port *port = get_port_by_tty(tty);
+ 	touch_softlockup_watchdog();
+ 
+ 	switch (drive->hwif->data_phase) {
+@@ -319,6 +368,8 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
+ 		ide_pio_sector(drive, write);
+ 		break;
+ 	}
 +
-+	DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr);
++	drive->io_32bit = saved_io_32bit;
+ }
+ 
+ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
+@@ -356,40 +407,35 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
+ 	return ide_error(drive, s, stat);
+ }
+ 
+-static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
++void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
+ {
+-	HWIF(drive)->cursg = NULL;
+-
+ 	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+-		ide_task_t *task = rq->special;
++		u8 err = drive->hwif->INB(IDE_ERROR_REG);
+ 
+-		if (task->tf_out_flags.all) {
+-			u8 err = drive->hwif->INB(IDE_ERROR_REG);
+-			ide_end_drive_cmd(drive, stat, err);
+-			return;
+-		}
++		ide_end_drive_cmd(drive, stat, err);
++		return;
+ 	}
+ 
+ 	if (rq->rq_disk) {
+ 		ide_driver_t *drv;
+ 
+ 		drv = *(ide_driver_t **)rq->rq_disk->private_data;;
+-		drv->end_request(drive, 1, rq->hard_nr_sectors);
++		drv->end_request(drive, 1, rq->nr_sectors);
+ 	} else
+-		ide_end_request(drive, 1, rq->hard_nr_sectors);
++		ide_end_request(drive, 1, rq->nr_sectors);
+ }
+ 
+ /*
+  * Handler for command with PIO data-in phase (Read/Read Multiple).
+  */
+-ide_startstop_t task_in_intr (ide_drive_t *drive)
++static ide_startstop_t task_in_intr(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = drive->hwif;
+ 	struct request *rq = HWGROUP(drive)->rq;
+ 	u8 stat = hwif->INB(IDE_STATUS_REG);
+ 
+ 	/* new way for dealing with premature shared PCI interrupts */
+-	if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
++	if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
+ 		if (stat & (ERR_STAT | DRQ_STAT))
+ 			return task_error(drive, rq, __FUNCTION__, stat);
+ 		/* No data yet, so wait for another IRQ. */
+@@ -402,7 +448,7 @@ ide_startstop_t task_in_intr (ide_drive_t *drive)
+ 	/* If it was the last datablock check status and finish transfer. */
+ 	if (!hwif->nleft) {
+ 		stat = wait_drive_not_busy(drive);
+-		if (!OK_STAT(stat, 0, BAD_R_STAT))
++		if (!OK_STAT(stat, 0, BAD_STAT))
+ 			return task_error(drive, rq, __FUNCTION__, stat);
+ 		task_end_request(drive, rq, stat);
+ 		return ide_stopped;
+@@ -413,7 +459,6 @@ ide_startstop_t task_in_intr (ide_drive_t *drive)
+ 
+ 	return ide_started;
+ }
+-EXPORT_SYMBOL(task_in_intr);
+ 
+ /*
+  * Handler for command with PIO data-out phase (Write/Write Multiple).
+@@ -443,11 +488,11 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
+ 	return ide_started;
+ }
+ 
+-ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
++static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
+ {
+ 	ide_startstop_t startstop;
+ 
+-	if (ide_wait_stat(&startstop, drive, DATA_READY,
++	if (ide_wait_stat(&startstop, drive, DRQ_STAT,
+ 			  drive->bad_wstat, WAIT_DRQ)) {
+ 		printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
+ 				drive->name,
+@@ -464,9 +509,8 @@ ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
+ 
+ 	return ide_started;
+ }
+-EXPORT_SYMBOL(pre_task_out_intr);
+ 
+-static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
++int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
+ {
+ 	struct request rq;
+ 
+@@ -481,36 +525,27 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
+ 	 * if we would find a solution to transfer any size.
+ 	 * To support special commands like READ LONG.
+ 	 */
+-	if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
+-		if (data_size == 0)
+-			rq.nr_sectors = (args->hobRegister[IDE_NSECTOR_OFFSET] << 8) | args->tfRegister[IDE_NSECTOR_OFFSET];
+-		else
+-			rq.nr_sectors = data_size / SECTOR_SIZE;
+-
+-		if (!rq.nr_sectors) {
+-			printk(KERN_ERR "%s: in/out command without data\n",
+-					drive->name);
+-			return -EFAULT;
+-		}
++	rq.hard_nr_sectors = rq.nr_sectors = nsect;
++	rq.hard_cur_sectors = rq.current_nr_sectors = nsect;
+ 
+-		rq.hard_nr_sectors = rq.nr_sectors;
+-		rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors;
++	if (task->tf_flags & IDE_TFLAG_WRITE)
++		rq.cmd_flags |= REQ_RW;
+ 
+-		if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
+-			rq.cmd_flags |= REQ_RW;
+-	}
++	rq.special = task;
++	task->rq = &rq;
+ 
+-	rq.special = args;
+-	args->rq = &rq;
+ 	return ide_do_drive_cmd(drive, &rq, ide_wait);
+ }
+ 
+-int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf)
++EXPORT_SYMBOL(ide_raw_taskfile);
 +
-+	port->ctrl_ul.DTR = dtr;
-+	port->update_flow_control = 1;
-+	enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
++int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
+ {
+-	return ide_diag_taskfile(drive, args, 0, buf);
+-}
++	task->data_phase = TASKFILE_NO_DATA;
+ 
+-EXPORT_SYMBOL(ide_raw_taskfile);
++	return ide_raw_taskfile(drive, task, NULL, 0);
 +}
++EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
+ 
+ #ifdef CONFIG_IDE_TASK_IOCTL
+ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+@@ -519,13 +554,12 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ 	ide_task_t		args;
+ 	u8 *outbuf		= NULL;
+ 	u8 *inbuf		= NULL;
+-	task_ioreg_t *argsptr	= args.tfRegister;
+-	task_ioreg_t *hobsptr	= args.hobRegister;
++	u8 *data_buf		= NULL;
+ 	int err			= 0;
+ 	int tasksize		= sizeof(struct ide_task_request_s);
+ 	unsigned int taskin	= 0;
+ 	unsigned int taskout	= 0;
+-	u8 io_32bit		= drive->io_32bit;
++	u16 nsect		= 0;
+ 	char __user *buf = (char __user *)arg;
+ 
+ //	printk("IDE Taskfile ...\n");
+@@ -572,24 +606,52 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ 	}
+ 
+ 	memset(&args, 0, sizeof(ide_task_t));
+-	memcpy(argsptr, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
+-	memcpy(hobsptr, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE);
+ 
+-	args.tf_in_flags  = req_task->in_flags;
+-	args.tf_out_flags = req_task->out_flags;
+-	args.data_phase   = req_task->data_phase;
+-	args.command_type = req_task->req_cmd;
++	memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
++	memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
 +
-+/*
-+ * ----------------------------------------------------------------------------
-+ * TTY code
-+ * ----------------------------------------------------------------------------
-+ */
++	args.data_phase = req_task->data_phase;
 +
-+/* Called when the userspace process opens the tty, /dev/noz*.  */
-+static int ntty_open(struct tty_struct *tty, struct file *file)
-+{
-+	struct port *port = get_port_by_tty(tty);
-+	struct nozomi *dc = get_dc_by_tty(tty);
-+	unsigned long flags;
++	args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
++			IDE_TFLAG_IN_TF;
++	if (drive->addressing == 1)
++		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
 +
-+	if (!port || !dc)
-+		return -ENODEV;
++	if (req_task->out_flags.all) {
++		args.tf_flags |= IDE_TFLAG_FLAGGED;
 +
-+	if (mutex_lock_interruptible(&port->tty_sem))
-+		return -ERESTARTSYS;
++		if (req_task->out_flags.b.data)
++			args.tf_flags |= IDE_TFLAG_OUT_DATA;
 +
-+	port->tty_open_count++;
-+	dc->open_ttys++;
++		if (req_task->out_flags.b.nsector_hob)
++			args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
++		if (req_task->out_flags.b.sector_hob)
++			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
++		if (req_task->out_flags.b.lcyl_hob)
++			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
++		if (req_task->out_flags.b.hcyl_hob)
++			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
 +
-+	/* Enable interrupt downlink for channel */
-+	if (port->tty_open_count == 1) {
-+		tty->low_latency = 1;
-+		tty->driver_data = port;
-+		port->tty = tty;
-+		DBG1("open: %d", port->token_dl);
-+		spin_lock_irqsave(&dc->spin_mutex, flags);
-+		dc->last_ier = dc->last_ier | port->token_dl;
-+		writew(dc->last_ier, dc->reg_ier);
-+		spin_unlock_irqrestore(&dc->spin_mutex, flags);
++		if (req_task->out_flags.b.error_feature)
++			args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
++		if (req_task->out_flags.b.nsector)
++			args.tf_flags |= IDE_TFLAG_OUT_NSECT;
++		if (req_task->out_flags.b.sector)
++			args.tf_flags |= IDE_TFLAG_OUT_LBAL;
++		if (req_task->out_flags.b.lcyl)
++			args.tf_flags |= IDE_TFLAG_OUT_LBAM;
++		if (req_task->out_flags.b.hcyl)
++			args.tf_flags |= IDE_TFLAG_OUT_LBAH;
++	} else {
++		args.tf_flags |= IDE_TFLAG_OUT_TF;
++		if (args.tf_flags & IDE_TFLAG_LBA48)
++			args.tf_flags |= IDE_TFLAG_OUT_HOB;
 +	}
 +
-+	mutex_unlock(&port->tty_sem);
-+
-+	return 0;
-+}
-+
-+/* Called when the userspace process close the tty, /dev/noz*. */
-+static void ntty_close(struct tty_struct *tty, struct file *file)
-+{
-+	struct nozomi *dc = get_dc_by_tty(tty);
-+	struct port *port = tty->driver_data;
-+	unsigned long flags;
-+
-+	if (!dc || !port)
-+		return;
-+
-+	if (mutex_lock_interruptible(&port->tty_sem))
-+		return;
-+
-+	if (!port->tty_open_count)
-+		goto exit;
-+
-+	dc->open_ttys--;
-+	port->tty_open_count--;
++	if (req_task->in_flags.b.data)
++		args.tf_flags |= IDE_TFLAG_IN_DATA;
+ 
+-	drive->io_32bit = 0;
+ 	switch(req_task->data_phase) {
+-		case TASKFILE_OUT_DMAQ:
+-		case TASKFILE_OUT_DMA:
+-			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
+-			break;
+-		case TASKFILE_IN_DMAQ:
+-		case TASKFILE_IN_DMA:
+-			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
+-			break;
+ 		case TASKFILE_MULTI_OUT:
+ 			if (!drive->mult_count) {
+ 				/* (hs): give up if multcount is not set */
+@@ -601,9 +663,11 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ 			}
+ 			/* fall through */
+ 		case TASKFILE_OUT:
+-			args.prehandler = &pre_task_out_intr;
+-			args.handler = &task_out_intr;
+-			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
++			/* fall through */
++		case TASKFILE_OUT_DMAQ:
++		case TASKFILE_OUT_DMA:
++			nsect = taskout / SECTOR_SIZE;
++			data_buf = outbuf;
+ 			break;
+ 		case TASKFILE_MULTI_IN:
+ 			if (!drive->mult_count) {
+@@ -616,22 +680,46 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ 			}
+ 			/* fall through */
+ 		case TASKFILE_IN:
+-			args.handler = &task_in_intr;
+-			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
++			/* fall through */
++		case TASKFILE_IN_DMAQ:
++		case TASKFILE_IN_DMA:
++			nsect = taskin / SECTOR_SIZE;
++			data_buf = inbuf;
+ 			break;
+ 		case TASKFILE_NO_DATA:
+-			args.handler = &task_no_data_intr;
+-			err = ide_diag_taskfile(drive, &args, 0, NULL);
+ 			break;
+ 		default:
+ 			err = -EFAULT;
+ 			goto abort;
+ 	}
+ 
+-	memcpy(req_task->io_ports, &(args.tfRegister), HDIO_DRIVE_TASK_HDR_SIZE);
+-	memcpy(req_task->hob_ports, &(args.hobRegister), HDIO_DRIVE_HOB_HDR_SIZE);
+-	req_task->in_flags  = args.tf_in_flags;
+-	req_task->out_flags = args.tf_out_flags;
++	if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
++		nsect = 0;
++	else if (!nsect) {
++		nsect = (args.tf.hob_nsect << 8) | args.tf.nsect;
 +
-+	if (port->tty_open_count == 0) {
-+		DBG1("close: %d", port->token_dl);
-+		spin_lock_irqsave(&dc->spin_mutex, flags);
-+		dc->last_ier &= ~(port->token_dl);
-+		writew(dc->last_ier, dc->reg_ier);
-+		spin_unlock_irqrestore(&dc->spin_mutex, flags);
++		if (!nsect) {
++			printk(KERN_ERR "%s: in/out command without data\n",
++					drive->name);
++			err = -EFAULT;
++			goto abort;
++		}
 +	}
 +
-+exit:
-+	mutex_unlock(&port->tty_sem);
-+}
-+
-+/*
-+ * called when the userspace process writes to the tty (/dev/noz*).
-+ * Data is inserted into a fifo, which is then read and transfered to the modem.
-+ */
-+static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
-+		      int count)
-+{
-+	int rval = -EINVAL;
-+	struct nozomi *dc = get_dc_by_tty(tty);
-+	struct port *port = tty->driver_data;
-+	unsigned long flags;
-+
-+	/* DBG1( "WRITEx: %d, index = %d", count, index); */
++	if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
++		args.tf_flags |= IDE_TFLAG_WRITE;
 +
-+	if (!dc || !port)
-+		return -ENODEV;
++	err = ide_raw_taskfile(drive, &args, data_buf, nsect);
 +
-+	if (unlikely(!mutex_trylock(&port->tty_sem))) {
-+		/*
-+		 * must test lock as tty layer wraps calls
-+		 * to this function with BKL
-+		 */
-+		dev_err(&dc->pdev->dev, "Would have deadlocked - "
-+			"return EAGAIN\n");
-+		return -EAGAIN;
-+	}
++	memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
++	memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
 +
-+	if (unlikely(!port->tty_open_count)) {
-+		DBG1(" ");
-+		goto exit;
++	if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) &&
++	    req_task->in_flags.all == 0) {
++		req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
++		if (drive->addressing == 1)
++			req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
 +	}
+ 
+ 	if (copy_to_user(buf, req_task, tasksize)) {
+ 		err = -EFAULT;
+@@ -658,40 +746,24 @@ abort:
+ 
+ //	printk("IDE Taskfile ioctl ended. rc = %i\n", err);
+ 
+-	drive->io_32bit = io_32bit;
+-
+ 	return err;
+ }
+ #endif
+ 
+-int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
+-{
+-	struct request rq;
+-	u8 buffer[4];
+-
+-	if (!buf)
+-		buf = buffer;
+-	memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors);
+-	ide_init_drive_cmd(&rq);
+-	rq.buffer = buf;
+-	*buf++ = cmd;
+-	*buf++ = nsect;
+-	*buf++ = feature;
+-	*buf++ = sectors;
+-	return ide_do_drive_cmd(drive, &rq, ide_wait);
+-}
+-
+ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ {
+-	int err = 0;
+-	u8 args[4], *argbuf = args;
+-	u8 xfer_rate = 0;
+-	int argsize = 4;
++	u8 *buf = NULL;
++	int bufsize = 0, err = 0;
++	u8 args[4], xfer_rate = 0;
+ 	ide_task_t tfargs;
++	struct ide_taskfile *tf = &tfargs.tf;
+ 
+ 	if (NULL == (void *) arg) {
+ 		struct request rq;
 +
-+	rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count);
-+
-+	/* notify card */
-+	if (unlikely(dc == NULL)) {
-+		DBG1("No device context?");
-+		goto exit;
-+	}
+ 		ide_init_drive_cmd(&rq);
++		rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
 +
-+	spin_lock_irqsave(&dc->spin_mutex, flags);
-+	/* CTS is only valid on the modem channel */
-+	if (port == &(dc->port[PORT_MDM])) {
-+		if (port->ctrl_dl.CTS) {
-+			DBG4("Enable interrupt");
-+			enable_transmit_ul(tty->index % MAX_PORT, dc);
-+		} else {
-+			dev_err(&dc->pdev->dev,
-+				"CTS not active on modem port?\n");
-+		}
+ 		return ide_do_drive_cmd(drive, &rq, ide_wait);
+ 	}
+ 
+@@ -699,27 +771,40 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ 		return -EFAULT;
+ 
+ 	memset(&tfargs, 0, sizeof(ide_task_t));
+-	tfargs.tfRegister[IDE_FEATURE_OFFSET] = args[2];
+-	tfargs.tfRegister[IDE_NSECTOR_OFFSET] = args[3];
+-	tfargs.tfRegister[IDE_SECTOR_OFFSET]  = args[1];
+-	tfargs.tfRegister[IDE_LCYL_OFFSET]    = 0x00;
+-	tfargs.tfRegister[IDE_HCYL_OFFSET]    = 0x00;
+-	tfargs.tfRegister[IDE_SELECT_OFFSET]  = 0x00;
+-	tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0];
++	tf->feature = args[2];
++	if (args[0] == WIN_SMART) {
++		tf->nsect = args[3];
++		tf->lbal  = args[1];
++		tf->lbam  = 0x4f;
++		tf->lbah  = 0xc2;
++		tfargs.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT;
 +	} else {
-+		enable_transmit_ul(tty->index % MAX_PORT, dc);
++		tf->nsect = args[1];
++		tfargs.tf_flags = IDE_TFLAG_OUT_FEATURE |
++				  IDE_TFLAG_OUT_NSECT | IDE_TFLAG_IN_NSECT;
 +	}
-+	spin_unlock_irqrestore(&dc->spin_mutex, flags);
-+
-+exit:
-+	mutex_unlock(&port->tty_sem);
-+	return rval;
-+}
-+
-+/*
-+ * Calculate how much is left in device
-+ * This method is called by the upper tty layer.
-+ *   #according to sources N_TTY.c it expects a value >= 0 and
-+ *    does not check for negative values.
-+ */
-+static int ntty_write_room(struct tty_struct *tty)
-+{
-+	struct port *port = tty->driver_data;
-+	int room = 0;
-+	struct nozomi *dc = get_dc_by_tty(tty);
-+
-+	if (!dc || !port)
-+		return 0;
-+	if (!mutex_trylock(&port->tty_sem))
-+		return 0;
-+
-+	if (!port->tty_open_count)
-+		goto exit;
-+
-+	room = port->fifo_ul->size - __kfifo_len(port->fifo_ul);
-+
-+exit:
-+	mutex_unlock(&port->tty_sem);
-+	return room;
-+}
-+
-+/* Gets io control parameters */
-+static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
-+{
-+	struct port *port = tty->driver_data;
-+	struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
-+	struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
-+
-+	return	(ctrl_ul->RTS ? TIOCM_RTS : 0) |
-+		(ctrl_ul->DTR ? TIOCM_DTR : 0) |
-+		(ctrl_dl->DCD ? TIOCM_CAR : 0) |
-+		(ctrl_dl->RI  ? TIOCM_RNG : 0) |
-+		(ctrl_dl->DSR ? TIOCM_DSR : 0) |
-+		(ctrl_dl->CTS ? TIOCM_CTS : 0);
-+}
-+
-+/* Sets io controls parameters */
-+static int ntty_tiocmset(struct tty_struct *tty, struct file *file,
-+	unsigned int set, unsigned int clear)
-+{
-+	if (set & TIOCM_RTS)
-+		set_rts(tty, 1);
-+	else if (clear & TIOCM_RTS)
-+		set_rts(tty, 0);
-+
-+	if (set & TIOCM_DTR)
-+		set_dtr(tty, 1);
-+	else if (clear & TIOCM_DTR)
-+		set_dtr(tty, 0);
++	tf->command = args[0];
++	tfargs.data_phase = args[3] ? TASKFILE_IN : TASKFILE_NO_DATA;
+ 
+ 	if (args[3]) {
+-		argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
+-		argbuf = kzalloc(argsize, GFP_KERNEL);
+-		if (argbuf == NULL)
++		tfargs.tf_flags |= IDE_TFLAG_IO_16BIT;
++		bufsize = SECTOR_WORDS * 4 * args[3];
++		buf = kzalloc(bufsize, GFP_KERNEL);
++		if (buf == NULL)
+ 			return -ENOMEM;
+ 	}
 +
-+	return 0;
-+}
+ 	if (set_transfer(drive, &tfargs)) {
+ 		xfer_rate = args[1];
+ 		if (ide_ata66_check(drive, &tfargs))
+ 			goto abort;
+ 	}
+ 
+-	err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf);
++	err = ide_raw_taskfile(drive, &tfargs, buf, args[3]);
 +
-+static int ntty_cflags_changed(struct port *port, unsigned long flags,
-+		struct async_icount *cprev)
++	args[0] = tf->status;
++	args[1] = tf->error;
++	args[2] = tf->nsect;
+ 
+ 	if (!err && xfer_rate) {
+ 		/* active-retuning-calls future */
+@@ -727,142 +812,38 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ 		ide_driveid_update(drive);
+ 	}
+ abort:
+-	if (copy_to_user((void __user *)arg, argbuf, argsize))
++	if (copy_to_user((void __user *)arg, &args, 4))
+ 		err = -EFAULT;
+-	if (argsize > 4)
+-		kfree(argbuf);
++	if (buf) {
++		if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
++			err = -EFAULT;
++		kfree(buf);
++	}
+ 	return err;
+ }
+ 
+-static int ide_wait_cmd_task(ide_drive_t *drive, u8 *buf)
+-{
+-	struct request rq;
+-
+-	ide_init_drive_cmd(&rq);
+-	rq.cmd_type = REQ_TYPE_ATA_TASK;
+-	rq.buffer = buf;
+-	return ide_do_drive_cmd(drive, &rq, ide_wait);
+-}
+-
+ int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ {
+ 	void __user *p = (void __user *)arg;
+ 	int err = 0;
+-	u8 args[7], *argbuf = args;
+-	int argsize = 7;
++	u8 args[7];
++	ide_task_t task;
+ 
+ 	if (copy_from_user(args, p, 7))
+ 		return -EFAULT;
+-	err = ide_wait_cmd_task(drive, argbuf);
+-	if (copy_to_user(p, argbuf, argsize))
+-		err = -EFAULT;
+-	return err;
+-}
+-
+-/*
+- * NOTICE: This is additions from IBM to provide a discrete interface,
+- * for selective taskregister access operations.  Nice JOB Klaus!!!
+- * Glad to be able to work and co-develop this with you and IBM.
+- */
+-ide_startstop_t flagged_taskfile (ide_drive_t *drive, ide_task_t *task)
+-{
+-	ide_hwif_t *hwif	= HWIF(drive);
+-	task_struct_t *taskfile	= (task_struct_t *) task->tfRegister;
+-	hob_struct_t *hobfile	= (hob_struct_t *) task->hobRegister;
+-
+-	if (task->data_phase == TASKFILE_MULTI_IN ||
+-	    task->data_phase == TASKFILE_MULTI_OUT) {
+-		if (!drive->mult_count) {
+-			printk(KERN_ERR "%s: multimode not set!\n", drive->name);
+-			return ide_stopped;
+-		}
+-	}
+ 
+-	/*
+-	 * (ks) Check taskfile in flags.
+-	 * If set, then execute as it is defined.
+-	 * If not set, then define default settings.
+-	 * The default values are:
+-	 *	read all taskfile registers (except data)
+-	 *	read the hob registers (sector, nsector, lcyl, hcyl)
+-	 */
+-	if (task->tf_in_flags.all == 0) {
+-		task->tf_in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
+-		if (drive->addressing == 1)
+-			task->tf_in_flags.all |= (IDE_HOB_STD_IN_FLAGS  << 8);
+-        }
+-
+-	/* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
+-	if (IDE_CONTROL_REG)
+-		/* clear nIEN */
+-		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
+-	SELECT_MASK(drive, 0);
+-
+-	if (task->tf_out_flags.b.data) {
+-		u16 data =  taskfile->data + (hobfile->data << 8);
+-		hwif->OUTW(data, IDE_DATA_REG);
+-	}
+-
+-	/* (ks) send hob registers first */
+-	if (task->tf_out_flags.b.nsector_hob)
+-		hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
+-	if (task->tf_out_flags.b.sector_hob)
+-		hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
+-	if (task->tf_out_flags.b.lcyl_hob)
+-		hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
+-	if (task->tf_out_flags.b.hcyl_hob)
+-		hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
+-
+-	/* (ks) Send now the standard registers */
+-	if (task->tf_out_flags.b.error_feature)
+-		hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
+-	/* refers to number of sectors to transfer */
+-	if (task->tf_out_flags.b.nsector)
+-		hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
+-	/* refers to sector offset or start sector */
+-	if (task->tf_out_flags.b.sector)
+-		hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
+-	if (task->tf_out_flags.b.lcyl)
+-		hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
+-	if (task->tf_out_flags.b.hcyl)
+-		hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
+-
+-        /*
+-	 * (ks) In the flagged taskfile approch, we will use all specified
+-	 * registers and the register value will not be changed, except the
+-	 * select bit (master/slave) in the drive_head register. We must make
+-	 * sure that the desired drive is selected.
+-	 */
+-	hwif->OUTB(taskfile->device_head | drive->select.all, IDE_SELECT_REG);
+-	switch(task->data_phase) {
++	memset(&task, 0, sizeof(task));
++	memcpy(&task.tf_array[7], &args[1], 6);
++	task.tf.command = args[0];
++	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ 
+-   	        case TASKFILE_OUT_DMAQ:
+-		case TASKFILE_OUT_DMA:
+-		case TASKFILE_IN_DMAQ:
+-		case TASKFILE_IN_DMA:
+-			if (!drive->using_dma)
+-				break;
++	err = ide_no_data_taskfile(drive, &task);
+ 
+-			if (!hwif->dma_setup(drive)) {
+-				hwif->dma_exec_cmd(drive, taskfile->command);
+-				hwif->dma_start(drive);
+-				return ide_started;
+-			}
+-			break;
++	args[0] = task.tf.command;
++	memcpy(&args[1], &task.tf_array[7], 6);
+ 
+-	        default:
+- 			if (task->handler == NULL)
+-				return ide_stopped;
+-
+-			/* Issue the command */
+-			if (task->prehandler) {
+-				hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG);
+-				ndelay(400);	/* FIXME */
+-				return task->prehandler(drive, task->rq);
+-			}
+-			ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
+-			return ide_started;
+-	}
++	if (copy_to_user(p, args, 7))
++		err = -EFAULT;
+ 
+-	return ide_stopped;
++	return err;
+ }
+diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
+index 54943da..97894ab 100644
+--- a/drivers/ide/ide.c
++++ b/drivers/ide/ide.c
+@@ -95,7 +95,7 @@ DEFINE_MUTEX(ide_cfg_mtx);
+  __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
+ 
+ #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+-static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
++int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
+ #endif
+ 
+ int noautodma = 0;
+@@ -116,7 +116,7 @@ EXPORT_SYMBOL(ide_hwifs);
+ /*
+  * Do not even *think* about calling this!
+  */
+-static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
++void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
+ {
+ 	unsigned int unit;
+ 
+@@ -159,6 +159,7 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
+ 		init_completion(&drive->gendev_rel_comp);
+ 	}
+ }
++EXPORT_SYMBOL_GPL(ide_init_port_data);
+ 
+ static void init_hwif_default(ide_hwif_t *hwif, unsigned int index)
+ {
+@@ -177,8 +178,6 @@ static void init_hwif_default(ide_hwif_t *hwif, unsigned int index)
+ #endif
+ }
+ 
+-extern void ide_arm_init(void);
+-
+ /*
+  * init_ide_data() sets reasonable default values into all fields
+  * of all instances of the hwifs and drives, but only on the first call.
+@@ -210,16 +209,13 @@ static void __init init_ide_data (void)
+ 	/* Initialise all interface structures */
+ 	for (index = 0; index < MAX_HWIFS; ++index) {
+ 		hwif = &ide_hwifs[index];
+-		init_hwif_data(hwif, index);
++		ide_init_port_data(hwif, index);
+ 		init_hwif_default(hwif, index);
+ #if !defined(CONFIG_PPC32) || !defined(CONFIG_PCI)
+ 		hwif->irq =
+ 			ide_init_default_irq(hwif->io_ports[IDE_DATA_OFFSET]);
+ #endif
+ 	}
+-#ifdef CONFIG_IDE_ARM
+-	ide_arm_init();
+-#endif
+ }
+ 
+ /**
+@@ -414,8 +410,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
+ 	hwif->cds			= tmp_hwif->cds;
+ #endif
+ 
+-	hwif->fixup			= tmp_hwif->fixup;
+-
+ 	hwif->set_pio_mode		= tmp_hwif->set_pio_mode;
+ 	hwif->set_dma_mode		= tmp_hwif->set_dma_mode;
+ 	hwif->mdma_filter		= tmp_hwif->mdma_filter;
+@@ -424,7 +418,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
+ 	hwif->reset_poll		= tmp_hwif->reset_poll;
+ 	hwif->pre_reset			= tmp_hwif->pre_reset;
+ 	hwif->resetproc			= tmp_hwif->resetproc;
+-	hwif->intrproc			= tmp_hwif->intrproc;
+ 	hwif->maskproc			= tmp_hwif->maskproc;
+ 	hwif->quirkproc			= tmp_hwif->quirkproc;
+ 	hwif->busproc			= tmp_hwif->busproc;
+@@ -434,16 +427,13 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
+ 	hwif->atapi_input_bytes		= tmp_hwif->atapi_input_bytes;
+ 	hwif->atapi_output_bytes	= tmp_hwif->atapi_output_bytes;
+ 
++	hwif->dma_host_set		= tmp_hwif->dma_host_set;
+ 	hwif->dma_setup			= tmp_hwif->dma_setup;
+ 	hwif->dma_exec_cmd		= tmp_hwif->dma_exec_cmd;
+ 	hwif->dma_start			= tmp_hwif->dma_start;
+ 	hwif->ide_dma_end		= tmp_hwif->ide_dma_end;
+-	hwif->ide_dma_on		= tmp_hwif->ide_dma_on;
+-	hwif->dma_off_quietly		= tmp_hwif->dma_off_quietly;
+ 	hwif->ide_dma_test_irq		= tmp_hwif->ide_dma_test_irq;
+ 	hwif->ide_dma_clear_irq		= tmp_hwif->ide_dma_clear_irq;
+-	hwif->dma_host_on		= tmp_hwif->dma_host_on;
+-	hwif->dma_host_off		= tmp_hwif->dma_host_off;
+ 	hwif->dma_lost_irq		= tmp_hwif->dma_lost_irq;
+ 	hwif->dma_timeout		= tmp_hwif->dma_timeout;
+ 
+@@ -468,7 +458,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
+ #endif
+ 
+ 	hwif->dma_base			= tmp_hwif->dma_base;
+-	hwif->dma_master		= tmp_hwif->dma_master;
+ 	hwif->dma_command		= tmp_hwif->dma_command;
+ 	hwif->dma_vendor1		= tmp_hwif->dma_vendor1;
+ 	hwif->dma_status		= tmp_hwif->dma_status;
+@@ -602,7 +591,6 @@ void ide_unregister(unsigned int index)
+ 		(void) ide_release_dma(hwif);
+ 
+ 		hwif->dma_base = 0;
+-		hwif->dma_master = 0;
+ 		hwif->dma_command = 0;
+ 		hwif->dma_vendor1 = 0;
+ 		hwif->dma_status = 0;
+@@ -617,7 +605,7 @@ void ide_unregister(unsigned int index)
+ 	tmp_hwif = *hwif;
+ 
+ 	/* restore hwif data to pristine status */
+-	init_hwif_data(hwif, index);
++	ide_init_port_data(hwif, index);
+ 	init_hwif_default(hwif, index);
+ 
+ 	ide_hwif_restore(hwif, &tmp_hwif);
+@@ -683,24 +671,34 @@ void ide_setup_ports (	hw_regs_t *hw,
+  */
+ }
+ 
++void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
 +{
-+	struct async_icount cnow = port->tty_icount;
-+	int ret;
-+
-+	ret =	((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
-+		((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
-+		((flags & TIOCM_CD)  && (cnow.dcd != cprev->dcd)) ||
-+		((flags & TIOCM_CTS) && (cnow.cts != cprev->cts));
-+
-+	*cprev = cnow;
-+
-+	return ret;
++	memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
++	hwif->irq = hw->irq;
++	hwif->noprobe = 0;
++	hwif->chipset = hw->chipset;
++	hwif->gendev.parent = hw->dev;
++	hwif->ack_intr = hw->ack_intr;
 +}
++EXPORT_SYMBOL_GPL(ide_init_port_hw);
 +
-+static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
-+{
-+	struct async_icount cnow = port->tty_icount;
-+	struct serial_icounter_struct icount;
+ /**
+  *	ide_register_hw		-	register IDE interface
+  *	@hw: hardware registers
+- *	@fixup: fixup function
+- *	@initializing: set while initializing built-in drivers
++ *	@quirkproc: quirkproc function
+  *	@hwifp: pointer to returned hwif
+  *
+  *	Register an IDE interface, specifying exactly the registers etc.
+- *	Set init=1 iff calling before probes have taken place.
+  *
+  *	Returns -1 on error.
+  */
+ 
+-int ide_register_hw(hw_regs_t *hw, void (*fixup)(ide_hwif_t *),
+-		    int initializing, ide_hwif_t **hwifp)
++int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *),
++		    ide_hwif_t **hwifp)
+ {
+ 	int index, retry = 1;
+ 	ide_hwif_t *hwif;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	do {
+ 		for (index = 0; index < MAX_HWIFS; ++index) {
+@@ -712,8 +710,7 @@ int ide_register_hw(hw_regs_t *hw, void (*fixup)(ide_hwif_t *),
+ 			hwif = &ide_hwifs[index];
+ 			if (hwif->hold)
+ 				continue;
+-			if ((!hwif->present && !hwif->mate && !initializing) ||
+-			    (!hwif->io_ports[IDE_DATA_OFFSET] && initializing))
++			if (!hwif->present && hwif->mate == NULL)
+ 				goto found;
+ 		}
+ 		for (index = 0; index < MAX_HWIFS; index++)
+@@ -724,29 +721,23 @@ found:
+ 	if (hwif->present)
+ 		ide_unregister(index);
+ 	else if (!hwif->hold) {
+-		init_hwif_data(hwif, index);
++		ide_init_port_data(hwif, index);
+ 		init_hwif_default(hwif, index);
+ 	}
+ 	if (hwif->present)
+ 		return -1;
+-	memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
+-	hwif->irq = hw->irq;
+-	hwif->noprobe = 0;
+-	hwif->fixup = fixup;
+-	hwif->chipset = hw->chipset;
+-	hwif->gendev.parent = hw->dev;
+-	hwif->ack_intr = hw->ack_intr;
+ 
+-	if (initializing == 0) {
+-		u8 idx[4] = { index, 0xff, 0xff, 0xff };
++	ide_init_port_hw(hwif, hw);
++	hwif->quirkproc = quirkproc;
+ 
+-		ide_device_add(idx);
+-	}
++	idx[0] = index;
 +
-+	icount.cts = cnow.cts;
-+	icount.dsr = cnow.dsr;
-+	icount.rng = cnow.rng;
-+	icount.dcd = cnow.dcd;
-+	icount.rx = cnow.rx;
-+	icount.tx = cnow.tx;
-+	icount.frame = cnow.frame;
-+	icount.overrun = cnow.overrun;
-+	icount.parity = cnow.parity;
-+	icount.brk = cnow.brk;
-+	icount.buf_overrun = cnow.buf_overrun;
++	ide_device_add(idx);
+ 
+ 	if (hwifp)
+ 		*hwifp = hwif;
+ 
+-	return (initializing || hwif->present) ? index : -1;
++	return hwif->present ? index : -1;
+ }
+ 
+ EXPORT_SYMBOL(ide_register_hw);
+@@ -839,7 +830,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
+ 	if (!drive->id || !(drive->id->capability & 1))
+ 		goto out;
+ 
+-	if (hwif->ide_dma_on == NULL)
++	if (hwif->dma_host_set == NULL)
+ 		goto out;
+ 
+ 	err = -EBUSY;
+@@ -854,8 +845,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
+ 	err = 0;
+ 
+ 	if (arg) {
+-		hwif->dma_off_quietly(drive);
+-		if (ide_set_dma(drive) || hwif->ide_dma_on(drive))
++		if (ide_set_dma(drive))
+ 			err = -EIO;
+ 	} else
+ 		ide_dma_off(drive);
+@@ -888,7 +878,10 @@ int set_pio_mode(ide_drive_t *drive, int arg)
+ 
+ 	if (drive->special.b.set_tune)
+ 		return -EBUSY;
 +
-+	return copy_to_user(argp, &icount, sizeof(icount));
-+}
+ 	ide_init_drive_cmd(&rq);
++	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
 +
-+static int ntty_ioctl(struct tty_struct *tty, struct file *file,
-+		      unsigned int cmd, unsigned long arg)
-+{
-+	struct port *port = tty->driver_data;
-+	void __user *argp = (void __user *)arg;
-+	int rval = -ENOIOCTLCMD;
+ 	drive->tune_req = (u8) arg;
+ 	drive->special.b.set_tune = 1;
+ 	(void) ide_do_drive_cmd(drive, &rq, ide_wait);
+@@ -1070,7 +1063,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
+ 			ide_init_hwif_ports(&hw, (unsigned long) args[0],
+ 					    (unsigned long) args[1], NULL);
+ 			hw.irq = args[2];
+-			if (ide_register_hw(&hw, NULL, 0, NULL) == -1)
++			if (ide_register_hw(&hw, NULL, NULL) == -1)
+ 				return -EIO;
+ 			return 0;
+ 		}
+@@ -1231,26 +1224,12 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
+ 	return 0;	/* zero = nothing matched */
+ }
+ 
+-#ifdef CONFIG_BLK_DEV_ALI14XX
+ extern int probe_ali14xx;
+-extern int ali14xx_init(void);
+-#endif
+-#ifdef CONFIG_BLK_DEV_UMC8672
+ extern int probe_umc8672;
+-extern int umc8672_init(void);
+-#endif
+-#ifdef CONFIG_BLK_DEV_DTC2278
+ extern int probe_dtc2278;
+-extern int dtc2278_init(void);
+-#endif
+-#ifdef CONFIG_BLK_DEV_HT6560B
+ extern int probe_ht6560b;
+-extern int ht6560b_init(void);
+-#endif
+-#ifdef CONFIG_BLK_DEV_QD65XX
+ extern int probe_qd65xx;
+-extern int qd65xx_init(void);
+-#endif
++extern int cmd640_vlb;
+ 
+ static int __initdata is_chipset_set[MAX_HWIFS];
+ 
+@@ -1327,7 +1306,7 @@ static int __init ide_setup(char *s)
+ 	if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
+ 		const char *hd_words[] = {
+ 			"none", "noprobe", "nowerr", "cdrom", "nodma",
+-			"autotune", "noautotune", "minus8", "swapdata", "bswap",
++			"autotune", "noautotune", "-8", "-9", "-10",
+ 			"noflush", "remap", "remap63", "scsi", NULL };
+ 		unit = s[2] - 'a';
+ 		hw   = unit / MAX_DRIVES;
+@@ -1363,10 +1342,6 @@ static int __init ide_setup(char *s)
+ 			case -7: /* "noautotune" */
+ 				drive->autotune = IDE_TUNE_NOAUTO;
+ 				goto obsolete_option;
+-			case -9: /* "swapdata" */
+-			case -10: /* "bswap" */
+-				drive->bswap = 1;
+-				goto done;
+ 			case -11: /* noflush */
+ 				drive->noflush = 1;
+ 				goto done;
+@@ -1466,11 +1441,8 @@ static int __init ide_setup(char *s)
+ #endif
+ #ifdef CONFIG_BLK_DEV_CMD640
+ 			case -14: /* "cmd640_vlb" */
+-			{
+-				extern int cmd640_vlb; /* flag for cmd640.c */
+ 				cmd640_vlb = 1;
+ 				goto done;
+-			}
+ #endif
+ #ifdef CONFIG_BLK_DEV_HT6560B
+ 			case -13: /* "ht6560b" */
+@@ -1560,79 +1532,6 @@ done:
+ 	return 1;
+ }
+ 
+-extern void __init pnpide_init(void);
+-extern void __exit pnpide_exit(void);
+-extern void __init h8300_ide_init(void);
+-
+-/*
+- * probe_for_hwifs() finds/initializes "known" IDE interfaces
+- */
+-static void __init probe_for_hwifs (void)
+-{
+-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+-	ide_scan_pcibus(ide_scan_direction);
+-#endif
+-
+-#ifdef CONFIG_ETRAX_IDE
+-	{
+-		extern void init_e100_ide(void);
+-		init_e100_ide();
+-	}
+-#endif /* CONFIG_ETRAX_IDE */
+-#ifdef CONFIG_BLK_DEV_CMD640
+-	{
+-		extern void ide_probe_for_cmd640x(void);
+-		ide_probe_for_cmd640x();
+-	}
+-#endif /* CONFIG_BLK_DEV_CMD640 */
+-#ifdef CONFIG_BLK_DEV_IDE_PMAC
+-	{
+-		extern int pmac_ide_probe(void);
+-		(void)pmac_ide_probe();
+-	}
+-#endif /* CONFIG_BLK_DEV_IDE_PMAC */
+-#ifdef CONFIG_BLK_DEV_GAYLE
+-	{
+-		extern void gayle_init(void);
+-		gayle_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_GAYLE */
+-#ifdef CONFIG_BLK_DEV_FALCON_IDE
+-	{
+-		extern void falconide_init(void);
+-		falconide_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_FALCON_IDE */
+-#ifdef CONFIG_BLK_DEV_MAC_IDE
+-	{
+-		extern void macide_init(void);
+-		macide_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_MAC_IDE */
+-#ifdef CONFIG_BLK_DEV_Q40IDE
+-	{
+-		extern void q40ide_init(void);
+-		q40ide_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_Q40IDE */
+-#ifdef CONFIG_BLK_DEV_BUDDHA
+-	{
+-		extern void buddha_init(void);
+-		buddha_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_BUDDHA */
+-#ifdef CONFIG_BLK_DEV_IDEPNP
+-	pnpide_init();
+-#endif
+-#ifdef CONFIG_H8300
+-	h8300_ide_init();
+-#endif
+-}
+-
+-/*
+- * Probe module
+- */
+-
+ EXPORT_SYMBOL(ide_lock);
+ 
+ static int ide_bus_match(struct device *dev, struct device_driver *drv)
+@@ -1779,30 +1678,6 @@ static int __init ide_init(void)
+ 
+ 	proc_ide_create();
+ 
+-#ifdef CONFIG_BLK_DEV_ALI14XX
+-	if (probe_ali14xx)
+-		(void)ali14xx_init();
+-#endif
+-#ifdef CONFIG_BLK_DEV_UMC8672
+-	if (probe_umc8672)
+-		(void)umc8672_init();
+-#endif
+-#ifdef CONFIG_BLK_DEV_DTC2278
+-	if (probe_dtc2278)
+-		(void)dtc2278_init();
+-#endif
+-#ifdef CONFIG_BLK_DEV_HT6560B
+-	if (probe_ht6560b)
+-		(void)ht6560b_init();
+-#endif
+-#ifdef CONFIG_BLK_DEV_QD65XX
+-	if (probe_qd65xx)
+-		(void)qd65xx_init();
+-#endif
+-
+-	/* Probe for special PCI and other "known" interface chipsets. */
+-	probe_for_hwifs();
+-
+ 	return 0;
+ }
+ 
+@@ -1838,10 +1713,6 @@ void __exit cleanup_module (void)
+ 	for (index = 0; index < MAX_HWIFS; ++index)
+ 		ide_unregister(index);
+ 
+-#ifdef CONFIG_BLK_DEV_IDEPNP
+-	pnpide_exit();
+-#endif
+-
+ 	proc_ide_destroy();
+ 
+ 	bus_unregister(&ide_bus_type);
+diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile
+index 4098223..7043ec7 100644
+--- a/drivers/ide/legacy/Makefile
++++ b/drivers/ide/legacy/Makefile
+@@ -1,15 +1,24 @@
+ 
++# link order is important here
 +
-+	DBG1("******** IOCTL, cmd: %d", cmd);
+ obj-$(CONFIG_BLK_DEV_ALI14XX)		+= ali14xx.o
++obj-$(CONFIG_BLK_DEV_UMC8672)		+= umc8672.o
+ obj-$(CONFIG_BLK_DEV_DTC2278)		+= dtc2278.o
+ obj-$(CONFIG_BLK_DEV_HT6560B)		+= ht6560b.o
+ obj-$(CONFIG_BLK_DEV_QD65XX)		+= qd65xx.o
+-obj-$(CONFIG_BLK_DEV_UMC8672)		+= umc8672.o
+ 
+-obj-$(CONFIG_BLK_DEV_IDECS)		+= ide-cs.o
++obj-$(CONFIG_BLK_DEV_GAYLE)		+= gayle.o
++obj-$(CONFIG_BLK_DEV_FALCON_IDE)	+= falconide.o
++obj-$(CONFIG_BLK_DEV_MAC_IDE)		+= macide.o
++obj-$(CONFIG_BLK_DEV_Q40IDE)		+= q40ide.o
++obj-$(CONFIG_BLK_DEV_BUDDHA)		+= buddha.o
+ 
+-obj-$(CONFIG_BLK_DEV_PLATFORM)		+= ide_platform.o
++ifeq ($(CONFIG_BLK_DEV_IDECS), m)
++	obj-m += ide-cs.o
++endif
+ 
+-# Last of all
+-obj-$(CONFIG_BLK_DEV_HD)		+= hd.o
++ifeq ($(CONFIG_BLK_DEV_PLATFORM), m)
++	obj-m += ide_platform.o
++endif
+ 
+ EXTRA_CFLAGS	:= -Idrivers/ide
+diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
+index 38c3a6d..5ec0be4 100644
+--- a/drivers/ide/legacy/ali14xx.c
++++ b/drivers/ide/legacy/ali14xx.c
+@@ -231,8 +231,7 @@ int probe_ali14xx = 0;
+ module_param_named(probe, probe_ali14xx, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
+ 
+-/* Can be called directly from ide.c. */
+-int __init ali14xx_init(void)
++static int __init ali14xx_init(void)
+ {
+ 	if (probe_ali14xx == 0)
+ 		goto out;
+@@ -248,9 +247,7 @@ out:
+ 	return -ENODEV;
+ }
+ 
+-#ifdef MODULE
+ module_init(ali14xx_init);
+-#endif
+ 
+ MODULE_AUTHOR("see local file");
+ MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets");
+diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
+index 4a0be25..74d28e0 100644
+--- a/drivers/ide/legacy/buddha.c
++++ b/drivers/ide/legacy/buddha.c
+@@ -112,6 +112,7 @@ typedef enum BuddhaType_Enum {
+     BOARD_BUDDHA, BOARD_CATWEASEL, BOARD_XSURF
+ } BuddhaType;
+ 
++static const char *buddha_board_name[] = { "Buddha", "Catweasel", "X-Surf" };
+ 
+     /*
+      *  Check and acknowledge the interrupt status
+@@ -143,11 +144,11 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
+      *  Probe for a Buddha or Catweasel IDE interface
+      */
+ 
+-void __init buddha_init(void)
++static int __init buddha_init(void)
+ {
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int i, index;
++	int i;
+ 
+ 	struct zorro_dev *z = NULL;
+ 	u_long buddha_board = 0;
+@@ -156,6 +157,8 @@ void __init buddha_init(void)
+ 
+ 	while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
+ 		unsigned long board;
++		u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
 +
-+	switch (cmd) {
-+	case TIOCMIWAIT: {
-+		struct async_icount cprev = port->tty_icount;
+ 		if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
+ 			buddha_num_hwifs = BUDDHA_NUM_HWIFS;
+ 			type=BOARD_BUDDHA;
+@@ -195,7 +198,10 @@ fail_base2:
+ 		/* X-Surf doesn't have this.  IRQs are always on */
+ 		if (type != BOARD_XSURF)
+ 			z_writeb(0, buddha_board+BUDDHA_IRQ_MR);
+-		
 +
-+		rval = wait_event_interruptible(port->tty_wait,
-+				ntty_cflags_changed(port, arg, &cprev));
-+		break;
-+	} case TIOCGICOUNT:
-+		rval = ntty_ioctl_tiocgicount(port, argp);
-+		break;
-+	default:
-+		DBG1("ERR: 0x%08X, %d", cmd, cmd);
-+		break;
-+	};
++		printk(KERN_INFO "ide: %s IDE controller\n",
++				 buddha_board_name[type]);
 +
-+	return rval;
-+}
+ 		for(i=0;i<buddha_num_hwifs;i++) {
+ 			if(type != BOARD_XSURF) {
+ 				ide_setup_ports(&hw, (buddha_board+buddha_bases[i]),
+@@ -213,23 +219,23 @@ fail_base2:
+ 						IRQ_AMIGA_PORTS);
+ 			}	
+ 
+-			index = ide_register_hw(&hw, NULL, 1, &hwif);
+-			if (index != -1) {
++			hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++			if (hwif) {
++				u8 index = hwif->index;
 +
-+/*
-+ * Called by the upper tty layer when tty buffers are ready
-+ * to receive data again after a call to throttle.
-+ */
-+static void ntty_unthrottle(struct tty_struct *tty)
-+{
-+	struct nozomi *dc = get_dc_by_tty(tty);
-+	unsigned long flags;
++				ide_init_port_data(hwif, index);
++				ide_init_port_hw(hwif, &hw);
 +
-+	DBG1("UNTHROTTLE");
-+	spin_lock_irqsave(&dc->spin_mutex, flags);
-+	enable_transmit_dl(tty->index % MAX_PORT, dc);
-+	set_rts(tty, 1);
+ 				hwif->mmio = 1;
+-				printk("ide%d: ", index);
+-				switch(type) {
+-				case BOARD_BUDDHA:
+-					printk("Buddha");
+-					break;
+-				case BOARD_CATWEASEL:
+-					printk("Catweasel");
+-					break;
+-				case BOARD_XSURF:
+-					printk("X-Surf");
+-					break;
+-				}
+-				printk(" IDE interface\n");	    
+-			}		      
 +
-+	spin_unlock_irqrestore(&dc->spin_mutex, flags);
-+}
++				idx[i] = index;
++			}
+ 		}
 +
-+/*
-+ * Called by the upper tty layer when the tty buffers are almost full.
-+ * The driver should stop send more data.
-+ */
-+static void ntty_throttle(struct tty_struct *tty)
-+{
-+	struct nozomi *dc = get_dc_by_tty(tty);
-+	unsigned long flags;
++		ide_device_add(idx);
+ 	}
 +
-+	DBG1("THROTTLE");
-+	spin_lock_irqsave(&dc->spin_mutex, flags);
-+	set_rts(tty, 0);
-+	spin_unlock_irqrestore(&dc->spin_mutex, flags);
-+}
++	return 0;
+ }
 +
-+/* just to discard single character writes */
-+static void ntty_put_char(struct tty_struct *tty, unsigned char c)
-+{
-+	/* FIXME !!! */
-+	DBG2("PUT CHAR Function: %c", c);
-+}
++module_init(buddha_init);
+diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
+index 24a845d..13eee6d 100644
+--- a/drivers/ide/legacy/dtc2278.c
++++ b/drivers/ide/legacy/dtc2278.c
+@@ -150,8 +150,7 @@ int probe_dtc2278 = 0;
+ module_param_named(probe, probe_dtc2278, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
+ 
+-/* Can be called directly from ide.c. */
+-int __init dtc2278_init(void)
++static int __init dtc2278_init(void)
+ {
+ 	if (probe_dtc2278 == 0)
+ 		return -ENODEV;
+@@ -163,9 +162,7 @@ int __init dtc2278_init(void)
+ 	return 0;
+ }
+ 
+-#ifdef MODULE
+ module_init(dtc2278_init);
+-#endif
+ 
+ MODULE_AUTHOR("See Local File");
+ MODULE_DESCRIPTION("support of DTC-2278 VLB IDE chipsets");
+diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
+index 7d7936f..2860956 100644
+--- a/drivers/ide/legacy/falconide.c
++++ b/drivers/ide/legacy/falconide.c
+@@ -62,19 +62,31 @@ EXPORT_SYMBOL(falconide_intr_lock);
+      *  Probe for a Falcon IDE interface
+      */
+ 
+-void __init falconide_init(void)
++static int __init falconide_init(void)
+ {
+     if (MACH_IS_ATARI && ATARIHW_PRESENT(IDE)) {
+ 	hw_regs_t hw;
+-	int index;
 +
-+/* Returns number of chars in buffer, called by tty layer */
-+static s32 ntty_chars_in_buffer(struct tty_struct *tty)
-+{
-+	struct port *port = tty->driver_data;
-+	struct nozomi *dc = get_dc_by_tty(tty);
-+	s32 rval;
++	printk(KERN_INFO "ide: Falcon IDE controller\n");
+ 
+ 	ide_setup_ports(&hw, ATA_HD_BASE, falconide_offsets,
+ 			0, 0, NULL,
+ //			falconide_iops,
+ 			IRQ_MFP_IDE);
+-	index = ide_register_hw(&hw, NULL, 1, NULL);
+ 
+-	if (index != -1)
+-	    printk("ide%d: Falcon IDE interface\n", index);
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		u8 index = hwif->index;
++		u8 idx[4] = { index, 0xff, 0xff, 0xff };
 +
-+	if (unlikely(!dc || !port)) {
-+		rval = -ENODEV;
-+		goto exit_in_buffer;
-+	}
++		ide_init_port_data(hwif, index);
++		ide_init_port_hw(hwif, &hw);
 +
-+	if (unlikely(!port->tty_open_count)) {
-+		dev_err(&dc->pdev->dev, "No tty open?\n");
-+		rval = -ENODEV;
-+		goto exit_in_buffer;
++		ide_device_add(idx);
 +	}
+     }
 +
-+	rval = __kfifo_len(port->fifo_ul);
-+
-+exit_in_buffer:
-+	return rval;
-+}
-+
-+static struct tty_operations tty_ops = {
-+	.ioctl = ntty_ioctl,
-+	.open = ntty_open,
-+	.close = ntty_close,
-+	.write = ntty_write,
-+	.write_room = ntty_write_room,
-+	.unthrottle = ntty_unthrottle,
-+	.throttle = ntty_throttle,
-+	.chars_in_buffer = ntty_chars_in_buffer,
-+	.put_char = ntty_put_char,
-+	.tiocmget = ntty_tiocmget,
-+	.tiocmset = ntty_tiocmset,
-+};
-+
-+/* Module initialization */
-+static struct pci_driver nozomi_driver = {
-+	.name = NOZOMI_NAME,
-+	.id_table = nozomi_pci_tbl,
-+	.probe = nozomi_card_init,
-+	.remove = __devexit_p(nozomi_card_exit),
-+};
-+
-+static __init int nozomi_init(void)
-+{
-+	int ret;
-+
-+	printk(KERN_INFO "Initializing %s\n", VERSION_STRING);
-+
-+	ntty_driver = alloc_tty_driver(NTTY_TTY_MAXMINORS);
-+	if (!ntty_driver)
-+		return -ENOMEM;
++    return 0;
+ }
 +
-+	ntty_driver->owner = THIS_MODULE;
-+	ntty_driver->driver_name = NOZOMI_NAME_TTY;
-+	ntty_driver->name = "noz";
-+	ntty_driver->major = 0;
-+	ntty_driver->type = TTY_DRIVER_TYPE_SERIAL;
-+	ntty_driver->subtype = SERIAL_TYPE_NORMAL;
-+	ntty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
-+	ntty_driver->init_termios = tty_std_termios;
-+	ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \
-+						HUPCL | CLOCAL;
-+	ntty_driver->init_termios.c_ispeed = 115200;
-+	ntty_driver->init_termios.c_ospeed = 115200;
-+	tty_set_operations(ntty_driver, &tty_ops);
++module_init(falconide_init);
+diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
+index 53331ee..492fa04 100644
+--- a/drivers/ide/legacy/gayle.c
++++ b/drivers/ide/legacy/gayle.c
+@@ -110,12 +110,13 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
+      *  Probe for a Gayle IDE interface (and optionally for an IDE doubler)
+      */
+ 
+-void __init gayle_init(void)
++static int __init gayle_init(void)
+ {
+     int a4000, i;
++    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+     if (!MACH_IS_AMIGA)
+-	return;
++	return -ENODEV;
+ 
+     if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE))
+ 	goto found;
+@@ -125,15 +126,21 @@ void __init gayle_init(void)
+ 			  NULL))
+ 	goto found;
+ #endif
+-    return;
++    return -ENODEV;
+ 
+ found:
++	printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n",
++			 a4000 ? 4000 : 1200,
++#ifdef CONFIG_BLK_DEV_IDEDOUBLER
++			 ide_doubler ? ", IDE doubler" :
++#endif
++			 "");
 +
-+	ret = tty_register_driver(ntty_driver);
-+	if (ret) {
-+		printk(KERN_ERR "Nozomi: failed to register ntty driver\n");
-+		goto free_tty;
-+	}
+     for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
+ 	unsigned long base, ctrlport, irqport;
+ 	ide_ack_intr_t *ack_intr;
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int index;
+ 	unsigned long phys_base, res_start, res_n;
+ 
+ 	if (a4000) {
+@@ -165,21 +172,23 @@ found:
+ //			&gayle_iops,
+ 			IRQ_AMIGA_PORTS);
+ 
+-	index = ide_register_hw(&hw, NULL, 1, &hwif);
+-	if (index != -1) {
++	hwif = ide_find_port(base);
++	if (hwif) {
++	    u8 index = hwif->index;
 +
-+	ret = pci_register_driver(&nozomi_driver);
-+	if (ret) {
-+		printk(KERN_ERR "Nozomi: can't register pci driver\n");
-+		goto unr_tty;
-+	}
++	    ide_init_port_data(hwif, index);
++	    ide_init_port_hw(hwif, &hw);
 +
-+	return 0;
-+unr_tty:
-+	tty_unregister_driver(ntty_driver);
-+free_tty:
-+	put_tty_driver(ntty_driver);
-+	return ret;
-+}
+ 	    hwif->mmio = 1;
+-	    switch (i) {
+-		case 0:
+-		    printk("ide%d: Gayle IDE interface (A%d style)\n", index,
+-			   a4000 ? 4000 : 1200);
+-		    break;
+-#ifdef CONFIG_BLK_DEV_IDEDOUBLER
+-		case 1:
+-		    printk("ide%d: IDE doubler\n", index);
+-		    break;
+-#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
+-	    }
 +
-+static __exit void nozomi_exit(void)
-+{
-+	printk(KERN_INFO "Unloading %s\n", DRIVER_DESC);
-+	pci_unregister_driver(&nozomi_driver);
-+	tty_unregister_driver(ntty_driver);
-+	put_tty_driver(ntty_driver);
-+}
++	    idx[i] = index;
+ 	} else
+ 	    release_mem_region(res_start, res_n);
+     }
 +
-+module_init(nozomi_init);
-+module_exit(nozomi_exit);
++    ide_device_add(idx);
 +
-+module_param(debug, int, S_IRUGO | S_IWUSR);
++    return 0;
+ }
 +
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_DESCRIPTION(DRIVER_DESC);
-diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
-index 0c66b80..78b151c 100644
---- a/drivers/char/rtc.c
-+++ b/drivers/char/rtc.c
-@@ -1,5 +1,5 @@
- /*
-- *	Real Time Clock interface for Linux	
-+ *	Real Time Clock interface for Linux
-  *
-  *	Copyright (C) 1996 Paul Gortmaker
-  *
-@@ -17,7 +17,7 @@
-  *	has been received. If a RTC interrupt has already happened,
-  *	it will output an unsigned long and then block. The output value
-  *	contains the interrupt status in the low byte and the number of
-- *	interrupts since the last read in the remaining high bytes. The 
-+ *	interrupts since the last read in the remaining high bytes. The
-  *	/dev/rtc interface can also be used with the select(2) call.
-  *
-  *	This program is free software; you can redistribute it and/or
-@@ -104,12 +104,14 @@ static int rtc_has_irq = 1;
++module_init(gayle_init);
+diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
+index a4245d1..8da5031 100644
+--- a/drivers/ide/legacy/ht6560b.c
++++ b/drivers/ide/legacy/ht6560b.c
+@@ -307,8 +307,7 @@ int probe_ht6560b = 0;
+ module_param_named(probe, probe_ht6560b, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
  
- #ifndef CONFIG_HPET_EMULATE_RTC
- #define is_hpet_enabled()			0
--#define hpet_set_alarm_time(hrs, min, sec) 	0
--#define hpet_set_periodic_freq(arg) 		0
--#define hpet_mask_rtc_irq_bit(arg) 		0
--#define hpet_set_rtc_irq_bit(arg) 		0
--#define hpet_rtc_timer_init() 			do { } while (0)
--#define hpet_rtc_dropped_irq() 			0
-+#define hpet_set_alarm_time(hrs, min, sec)	0
-+#define hpet_set_periodic_freq(arg)		0
-+#define hpet_mask_rtc_irq_bit(arg)		0
-+#define hpet_set_rtc_irq_bit(arg)		0
-+#define hpet_rtc_timer_init()			do { } while (0)
-+#define hpet_rtc_dropped_irq()			0
-+#define hpet_register_irq_handler(h)		0
-+#define hpet_unregister_irq_handler(h)		0
- #ifdef RTC_IRQ
- static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+-/* Can be called directly from ide.c. */
+-int __init ht6560b_init(void)
++static int __init ht6560b_init(void)
  {
-@@ -147,7 +149,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file,
- static unsigned int rtc_poll(struct file *file, poll_table *wait);
- #endif
+ 	ide_hwif_t *hwif, *mate;
+ 	static u8 idx[4] = { 0, 1, 0xff, 0xff };
+@@ -369,9 +368,7 @@ release_region:
+ 	return -ENODEV;
+ }
  
--static void get_rtc_alm_time (struct rtc_time *alm_tm);
-+static void get_rtc_alm_time(struct rtc_time *alm_tm);
- #ifdef RTC_IRQ
- static void set_rtc_irq_bit_locked(unsigned char bit);
- static void mask_rtc_irq_bit_locked(unsigned char bit);
-@@ -185,9 +187,9 @@ static int rtc_proc_open(struct inode *inode, struct file *file);
-  * rtc_status but before mod_timer is called, which would then reenable the
-  * timer (but you would need to have an awful timing before you'd trip on it)
-  */
--static unsigned long rtc_status = 0;	/* bitmapped status byte.	*/
--static unsigned long rtc_freq = 0;	/* Current periodic IRQ rate	*/
--static unsigned long rtc_irq_data = 0;	/* our output to the world	*/
-+static unsigned long rtc_status;	/* bitmapped status byte.	*/
-+static unsigned long rtc_freq;		/* Current periodic IRQ rate	*/
-+static unsigned long rtc_irq_data;	/* our output to the world	*/
- static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
+-#ifdef MODULE
+ module_init(ht6560b_init);
+-#endif
  
- #ifdef RTC_IRQ
-@@ -195,7 +197,7 @@ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
-  * rtc_task_lock nests inside rtc_lock.
-  */
- static DEFINE_SPINLOCK(rtc_task_lock);
--static rtc_task_t *rtc_callback = NULL;
-+static rtc_task_t *rtc_callback;
- #endif
+ MODULE_AUTHOR("See Local File");
+ MODULE_DESCRIPTION("HT-6560B EIDE-controller support");
+diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
+index 03715c0..f4ea15b 100644
+--- a/drivers/ide/legacy/ide-cs.c
++++ b/drivers/ide/legacy/ide-cs.c
+@@ -153,7 +153,7 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq
+     hw.irq = irq;
+     hw.chipset = ide_pci;
+     hw.dev = &handle->dev;
+-    return ide_register_hw(&hw, &ide_undecoded_slave, 0, NULL);
++    return ide_register_hw(&hw, &ide_undecoded_slave, NULL);
+ }
  
- /*
-@@ -205,7 +207,7 @@ static rtc_task_t *rtc_callback = NULL;
+ /*======================================================================
+diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
+index 7bb79f5..69a0fb0 100644
+--- a/drivers/ide/legacy/ide_platform.c
++++ b/drivers/ide/legacy/ide_platform.c
+@@ -28,39 +28,27 @@ static struct {
+ 	int index;
+ } hwif_prop;
  
- static unsigned long epoch = 1900;	/* year corresponding to 0x00	*/
+-static ide_hwif_t *__devinit plat_ide_locate_hwif(void __iomem *base,
+-	    void __iomem *ctrl, struct pata_platform_info *pdata, int irq,
+-	    int mmio)
++static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
++					   void __iomem *base,
++					   void __iomem *ctrl,
++					   struct pata_platform_info *pdata,
++					   int irq)
+ {
+ 	unsigned long port = (unsigned long)base;
+-	ide_hwif_t *hwif = ide_find_port(port);
+ 	int i;
  
--static const unsigned char days_in_mo[] = 
-+static const unsigned char days_in_mo[] =
- {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+-	if (hwif == NULL)
+-		goto out;
+-
+-	hwif->io_ports[IDE_DATA_OFFSET] = port;
++	hw->io_ports[IDE_DATA_OFFSET] = port;
  
- /*
-@@ -242,7 +244,7 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
- 	 *	the last read in the remainder of rtc_irq_data.
- 	 */
+ 	port += (1 << pdata->ioport_shift);
+ 	for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET;
+ 	     i++, port += (1 << pdata->ioport_shift))
+-		hwif->io_ports[i] = port;
+-
+-	hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
++		hw->io_ports[i] = port;
  
--	spin_lock (&rtc_lock);
-+	spin_lock(&rtc_lock);
- 	rtc_irq_data += 0x100;
- 	rtc_irq_data &= ~0xff;
- 	if (is_hpet_enabled()) {
-@@ -259,16 +261,16 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
- 	if (rtc_status & RTC_TIMER_ON)
- 		mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
+-	hwif->irq = irq;
++	hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
  
--	spin_unlock (&rtc_lock);
-+	spin_unlock(&rtc_lock);
+-	hwif->chipset = ide_generic;
++	hw->irq = irq;
  
- 	/* Now do the rest of the actions */
- 	spin_lock(&rtc_task_lock);
- 	if (rtc_callback)
- 		rtc_callback->func(rtc_callback->private_data);
- 	spin_unlock(&rtc_task_lock);
--	wake_up_interruptible(&rtc_wait);	
-+	wake_up_interruptible(&rtc_wait);
+-	if (mmio) {
+-		hwif->mmio = 1;
+-		default_hwif_mmiops(hwif);
+-	}
+-
+-	hwif_prop.hwif = hwif;
+-	hwif_prop.index = hwif->index;
+-out:
+-	return hwif;
++	hw->chipset = ide_generic;
+ }
  
--	kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
-+	kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
+ static int __devinit plat_ide_probe(struct platform_device *pdev)
+@@ -71,6 +59,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
+ 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 	int ret = 0;
+ 	int mmio = 0;
++	hw_regs_t hw;
  
- 	return IRQ_HANDLED;
- }
-@@ -335,7 +337,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
- 	DECLARE_WAITQUEUE(wait, current);
- 	unsigned long data;
- 	ssize_t retval;
--	
-+
- 	if (rtc_has_irq == 0)
- 		return -EIO;
+ 	pdata = pdev->dev.platform_data;
  
-@@ -358,11 +360,11 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
- 		 * confusing. And no, xchg() is not the answer. */
+@@ -106,15 +95,27 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
+ 			res_alt->start, res_alt->end - res_alt->start + 1);
+ 	}
  
- 		__set_current_state(TASK_INTERRUPTIBLE);
--		
--		spin_lock_irq (&rtc_lock);
+-	hwif = plat_ide_locate_hwif(hwif_prop.plat_ide_mapbase,
+-	         hwif_prop.plat_ide_alt_mapbase, pdata, res_irq->start, mmio);
+-
++	hwif = ide_find_port((unsigned long)hwif_prop.plat_ide_mapbase);
+ 	if (!hwif) {
+ 		ret = -ENODEV;
+ 		goto out;
+ 	}
+-	hwif->gendev.parent = &pdev->dev;
+-	hwif->noprobe = 0;
 +
-+		spin_lock_irq(&rtc_lock);
- 		data = rtc_irq_data;
- 		rtc_irq_data = 0;
--		spin_unlock_irq (&rtc_lock);
-+		spin_unlock_irq(&rtc_lock);
++	memset(&hw, 0, sizeof(hw));
++	plat_ide_setup_ports(&hw, hwif_prop.plat_ide_mapbase,
++			     hwif_prop.plat_ide_alt_mapbase,
++			     pdata, res_irq->start);
++	hw.dev = &pdev->dev;
++
++	ide_init_port_hw(hwif, &hw);
++
++	if (mmio) {
++		hwif->mmio = 1;
++		default_hwif_mmiops(hwif);
++	}
++
++	hwif_prop.hwif = hwif;
++	hwif_prop.index = hwif->index;
  
- 		if (data != 0)
- 			break;
-@@ -378,10 +380,13 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
- 		schedule();
- 	} while (1);
+ 	idx[0] = hwif->index;
  
--	if (count == sizeof(unsigned int))
--		retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int);
--	else
--		retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long);
-+	if (count == sizeof(unsigned int)) {
-+		retval = put_user(data,
-+				  (unsigned int __user *)buf) ?: sizeof(int);
-+	} else {
-+		retval = put_user(data,
-+				  (unsigned long __user *)buf) ?: sizeof(long);
-+	}
- 	if (!retval)
- 		retval = count;
-  out:
-@@ -394,7 +399,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
+diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
+index 5c6aa77..782d4c7 100644
+--- a/drivers/ide/legacy/macide.c
++++ b/drivers/ide/legacy/macide.c
+@@ -77,15 +77,17 @@ int macide_ack_intr(ide_hwif_t* hwif)
+ 	return 0;
+ }
  
- static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
++static const char *mac_ide_name[] =
++	{ "Quadra", "Powerbook", "Powerbook Baboon" };
++
+ /*
+  * Probe for a Macintosh IDE interface
+  */
+ 
+-void __init macide_init(void)
++static int __init macide_init(void)
  {
--	struct rtc_time wtime; 
-+	struct rtc_time wtime;
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int index = -1;
  
- #ifdef RTC_IRQ
- 	if (rtc_has_irq == 0) {
-@@ -426,35 +431,41 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
- 	}
- 	case RTC_PIE_OFF:	/* Mask periodic int. enab. bit	*/
- 	{
--		unsigned long flags; /* can be called from isr via rtc_control() */
--		spin_lock_irqsave (&rtc_lock, flags);
-+		/* can be called from isr via rtc_control() */
-+		unsigned long flags;
+ 	switch (macintosh_config->ide_type) {
+ 	case MAC_IDE_QUADRA:
+@@ -93,48 +95,50 @@ void __init macide_init(void)
+ 				0, 0, macide_ack_intr,
+ //				quadra_ide_iops,
+ 				IRQ_NUBUS_F);
+-		index = ide_register_hw(&hw, NULL, 1, &hwif);
+ 		break;
+ 	case MAC_IDE_PB:
+ 		ide_setup_ports(&hw, IDE_BASE, macide_offsets,
+ 				0, 0, macide_ack_intr,
+ //				macide_pb_iops,
+ 				IRQ_NUBUS_C);
+-		index = ide_register_hw(&hw, NULL, 1, &hwif);
+ 		break;
+ 	case MAC_IDE_BABOON:
+ 		ide_setup_ports(&hw, BABOON_BASE, macide_offsets,
+ 				0, 0, NULL,
+ //				macide_baboon_iops,
+ 				IRQ_BABOON_1);
+-		index = ide_register_hw(&hw, NULL, 1, &hwif);
+-		if (index == -1) break;
+-		if (macintosh_config->ident == MAC_MODEL_PB190) {
++		break;
++	default:
++		return -ENODEV;
++	}
 +
-+		spin_lock_irqsave(&rtc_lock, flags);
- 		mask_rtc_irq_bit_locked(RTC_PIE);
- 		if (rtc_status & RTC_TIMER_ON) {
- 			rtc_status &= ~RTC_TIMER_ON;
- 			del_timer(&rtc_irq_timer);
- 		}
--		spin_unlock_irqrestore (&rtc_lock, flags);
-+		spin_unlock_irqrestore(&rtc_lock, flags);
++	printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
++			 mac_ide_name[macintosh_config->ide_type - 1]);
+ 
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		u8 index = hwif->index;
++		u8 idx[4] = { index, 0xff, 0xff, 0xff };
 +
- 		return 0;
- 	}
- 	case RTC_PIE_ON:	/* Allow periodic ints		*/
- 	{
--		unsigned long flags; /* can be called from isr via rtc_control() */
-+		/* can be called from isr via rtc_control() */
-+		unsigned long flags;
++		ide_init_port_data(hwif, index);
++		ide_init_port_hw(hwif, &hw);
 +
- 		/*
- 		 * We don't really want Joe User enabling more
- 		 * than 64Hz of interrupts on a multi-user machine.
- 		 */
- 		if (!kernel && (rtc_freq > rtc_max_user_freq) &&
--			(!capable(CAP_SYS_RESOURCE)))
-+						(!capable(CAP_SYS_RESOURCE)))
- 			return -EACCES;
- 
--		spin_lock_irqsave (&rtc_lock, flags);
-+		spin_lock_irqsave(&rtc_lock, flags);
- 		if (!(rtc_status & RTC_TIMER_ON)) {
- 			mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
- 					2*HZ/100);
- 			rtc_status |= RTC_TIMER_ON;
++		if (macintosh_config->ide_type == MAC_IDE_BABOON &&
++		    macintosh_config->ident == MAC_MODEL_PB190) {
+ 			/* Fix breakage in ide-disk.c: drive capacity	*/
+ 			/* is not initialized for drives without a 	*/
+ 			/* hardware ID, and we can't get that without	*/
+ 			/* probing the drive which freezes a 190.	*/
+-
+-			ide_drive_t *drive = &ide_hwifs[index].drives[0];
++			ide_drive_t *drive = &hwif->drives[0];
+ 			drive->capacity64 = drive->cyl*drive->head*drive->sect;
+-
  		}
- 		set_rtc_irq_bit_locked(RTC_PIE);
--		spin_unlock_irqrestore (&rtc_lock, flags);
-+		spin_unlock_irqrestore(&rtc_lock, flags);
+-		break;
+-
+-	default:
+-	    return;
+-	}
+ 
+-        if (index != -1) {
+ 		hwif->mmio = 1;
+-		if (macintosh_config->ide_type == MAC_IDE_QUADRA)
+-			printk(KERN_INFO "ide%d: Macintosh Quadra IDE interface\n", index);
+-		else if (macintosh_config->ide_type == MAC_IDE_PB)
+-			printk(KERN_INFO "ide%d: Macintosh Powerbook IDE interface\n", index);
+-		else if (macintosh_config->ide_type == MAC_IDE_BABOON)
+-			printk(KERN_INFO "ide%d: Macintosh Powerbook Baboon IDE interface\n", index);
+-		else
+-			printk(KERN_INFO "ide%d: Unknown Macintosh IDE interface\n", index);
 +
- 		return 0;
- 	}
- 	case RTC_UIE_OFF:	/* Mask ints from RTC updates.	*/
-@@ -477,7 +488,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
- 		 */
- 		memset(&wtime, 0, sizeof(struct rtc_time));
- 		get_rtc_alm_time(&wtime);
--		break; 
-+		break;
++		ide_device_add(idx);
  	}
- 	case RTC_ALM_SET:	/* Store a time into the alarm */
- 	{
-@@ -505,16 +516,21 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
- 			 */
- 		}
- 		if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
--		    RTC_ALWAYS_BCD)
--		{
--			if (sec < 60) BIN_TO_BCD(sec);
--			else sec = 0xff;
--
--			if (min < 60) BIN_TO_BCD(min);
--			else min = 0xff;
--
--			if (hrs < 24) BIN_TO_BCD(hrs);
--			else hrs = 0xff;
-+							RTC_ALWAYS_BCD) {
-+			if (sec < 60)
-+				BIN_TO_BCD(sec);
-+			else
-+				sec = 0xff;
 +
-+			if (min < 60)
-+				BIN_TO_BCD(min);
-+			else
-+				min = 0xff;
++	return 0;
+ }
 +
-+			if (hrs < 24)
-+				BIN_TO_BCD(hrs);
-+			else
-+				hrs = 0xff;
- 		}
- 		CMOS_WRITE(hrs, RTC_HOURS_ALARM);
- 		CMOS_WRITE(min, RTC_MINUTES_ALARM);
-@@ -563,11 +579,12 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
++module_init(macide_init);
+diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
+index 6ea46a6..f532973 100644
+--- a/drivers/ide/legacy/q40ide.c
++++ b/drivers/ide/legacy/q40ide.c
+@@ -111,15 +111,17 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
+  *  Probe for Q40 IDE interfaces
+  */
  
- 		if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
- 			return -EINVAL;
--			
-+
- 		if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- 			return -EINVAL;
+-void __init q40ide_init(void)
++static int __init q40ide_init(void)
+ {
+     int i;
+     ide_hwif_t *hwif;
+-    int index;
+     const char *name;
++    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
  
--		if ((yrs -= epoch) > 255)    /* They are unsigned */
-+		yrs -= epoch;
-+		if (yrs > 255)		/* They are unsigned */
- 			return -EINVAL;
+     if (!MACH_IS_Q40)
+-      return ;
++      return -ENODEV;
++
++    printk(KERN_INFO "ide: Q40 IDE controller\n");
  
- 		spin_lock_irq(&rtc_lock);
-@@ -635,9 +652,10 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
- 	{
- 		int tmp = 0;
- 		unsigned char val;
--		unsigned long flags; /* can be called from isr via rtc_control() */
-+		/* can be called from isr via rtc_control() */
-+		unsigned long flags;
+     for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
+ 	hw_regs_t hw;
+@@ -141,10 +143,20 @@ void __init q40ide_init(void)
+ 			0, NULL,
+ //			m68kide_iops,
+ 			q40ide_default_irq(pcide_bases[i]));
+-	index = ide_register_hw(&hw, NULL, 1, &hwif);
+-	// **FIXME**
+-	if (index != -1)
++
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		ide_init_port_data(hwif, hwif->index);
++		ide_init_port_hw(hwif, &hw);
+ 		hwif->mmio = 1;
++
++		idx[i] = hwif->index;
++	}
+     }
++
++    ide_device_add(idx);
++
++    return 0;
+ }
  
--		/* 
-+		/*
- 		 * The max we can do is 8192Hz.
- 		 */
- 		if ((arg < 2) || (arg > 8192))
-@@ -646,7 +664,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
- 		 * We don't really want Joe User generating more
- 		 * than 64Hz of interrupts on a multi-user machine.
- 		 */
--		if (!kernel && (arg > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE)))
-+		if (!kernel && (arg > rtc_max_user_freq) &&
-+					!capable(CAP_SYS_RESOURCE))
- 			return -EACCES;
++module_init(q40ide_init);
+diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
+index 912e738..2bac4c1 100644
+--- a/drivers/ide/legacy/qd65xx.c
++++ b/drivers/ide/legacy/qd65xx.c
+@@ -478,8 +478,7 @@ int probe_qd65xx = 0;
+ module_param_named(probe, probe_qd65xx, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
  
- 		while (arg > (1<<tmp))
-@@ -674,11 +693,11 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
- #endif
- 	case RTC_EPOCH_READ:	/* Read the epoch.	*/
- 	{
--		return put_user (epoch, (unsigned long __user *)arg);
-+		return put_user(epoch, (unsigned long __user *)arg);
- 	}
- 	case RTC_EPOCH_SET:	/* Set the epoch.	*/
- 	{
--		/* 
-+		/*
- 		 * There were no RTC clocks before 1900.
- 		 */
- 		if (arg < 1900)
-@@ -693,7 +712,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
- 	default:
- 		return -ENOTTY;
- 	}
--	return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
-+	return copy_to_user((void __user *)arg,
-+			    &wtime, sizeof wtime) ? -EFAULT : 0;
+-/* Can be called directly from ide.c. */
+-int __init qd65xx_init(void)
++static int __init qd65xx_init(void)
+ {
+ 	if (probe_qd65xx == 0)
+ 		return -ENODEV;
+@@ -492,9 +491,7 @@ int __init qd65xx_init(void)
+ 	return 0;
  }
  
- static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
-@@ -712,26 +732,25 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
-  * needed here. Or anywhere else in this driver. */
- static int rtc_open(struct inode *inode, struct file *file)
+-#ifdef MODULE
+ module_init(qd65xx_init);
+-#endif
+ 
+ MODULE_AUTHOR("Samuel Thibault");
+ MODULE_DESCRIPTION("support of qd65xx vlb ide chipset");
+diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
+index 79577b9..a1ae1ae 100644
+--- a/drivers/ide/legacy/umc8672.c
++++ b/drivers/ide/legacy/umc8672.c
+@@ -169,8 +169,7 @@ int probe_umc8672 = 0;
+ module_param_named(probe, probe_umc8672, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
+ 
+-/* Can be called directly from ide.c. */
+-int __init umc8672_init(void)
++static int __init umc8672_init(void)
  {
--	spin_lock_irq (&rtc_lock);
-+	spin_lock_irq(&rtc_lock);
+ 	if (probe_umc8672 == 0)
+ 		goto out;
+@@ -181,9 +180,7 @@ out:
+ 	return -ENODEV;;
+ }
  
--	if(rtc_status & RTC_IS_OPEN)
-+	if (rtc_status & RTC_IS_OPEN)
- 		goto out_busy;
+-#ifdef MODULE
+ module_init(umc8672_init);
+-#endif
  
- 	rtc_status |= RTC_IS_OPEN;
+ MODULE_AUTHOR("Wolfram Podien");
+ MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset");
+diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
+index a4ce3ba..2d3e511 100644
+--- a/drivers/ide/mips/au1xxx-ide.c
++++ b/drivers/ide/mips/au1xxx-ide.c
+@@ -198,8 +198,6 @@ static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
  
- 	rtc_irq_data = 0;
--	spin_unlock_irq (&rtc_lock);
-+	spin_unlock_irq(&rtc_lock);
- 	return 0;
+ 		break;
+ #endif
+-	default:
+-		return;
+ 	}
  
- out_busy:
--	spin_unlock_irq (&rtc_lock);
-+	spin_unlock_irq(&rtc_lock);
- 	return -EBUSY;
+ 	au_writel(mem_sttime,MEM_STTIME2);
+@@ -397,26 +395,10 @@ static int auide_dma_test_irq(ide_drive_t *drive)
+ 	return 0;
  }
  
--static int rtc_fasync (int fd, struct file *filp, int on)
+-static void auide_dma_host_on(ide_drive_t *drive)
+-{
+-}
 -
-+static int rtc_fasync(int fd, struct file *filp, int on)
+-static int auide_dma_on(ide_drive_t *drive)
+-{
+-	drive->using_dma = 1;
+-
+-	return 0;
+-}
+-
+-static void auide_dma_host_off(ide_drive_t *drive)
++static void auide_dma_host_set(ide_drive_t *drive, int on)
  {
--	return fasync_helper (fd, filp, on, &rtc_async_queue);
-+	return fasync_helper(fd, filp, on, &rtc_async_queue);
  }
  
- static int rtc_release(struct inode *inode, struct file *file)
-@@ -762,16 +781,16 @@ static int rtc_release(struct inode *inode, struct file *file)
- 	}
- 	spin_unlock_irq(&rtc_lock);
+-static void auide_dma_off_quietly(ide_drive_t *drive)
+-{
+-	drive->using_dma = 0;
+-}
+-
+ static void auide_dma_lost_irq(ide_drive_t *drive)
+ {
+ 	printk(KERN_ERR "%s: IRQ lost\n", drive->name);
+@@ -643,12 +625,13 @@ static int au_ide_probe(struct device *dev)
+ 	/* FIXME:  This might possibly break PCMCIA IDE devices */
  
--	if (file->f_flags & FASYNC) {
--		rtc_fasync (-1, file, 0);
--	}
-+	if (file->f_flags & FASYNC)
-+		rtc_fasync(-1, file, 0);
- no_irq:
- #endif
+ 	hwif                            = &ide_hwifs[pdev->id];
+-	hwif->irq			= ahwif->irq;
+-	hwif->chipset                   = ide_au1xxx;
  
--	spin_lock_irq (&rtc_lock);
-+	spin_lock_irq(&rtc_lock);
- 	rtc_irq_data = 0;
- 	rtc_status &= ~RTC_IS_OPEN;
--	spin_unlock_irq (&rtc_lock);
-+	spin_unlock_irq(&rtc_lock);
+ 	memset(&hw, 0, sizeof(hw));
+ 	auide_setup_ports(&hw, ahwif);
+-	memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
++	hw.irq = ahwif->irq;
++	hw.chipset = ide_au1xxx;
 +
- 	return 0;
- }
++	ide_init_port_hw(hwif, &hw);
  
-@@ -786,9 +805,9 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
+ 	hwif->ultra_mask                = 0x0;  /* Disable Ultra DMA */
+ #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+@@ -662,7 +645,6 @@ static int au_ide_probe(struct device *dev)
+ 	hwif->pio_mask = ATA_PIO4;
+ 	hwif->host_flags = IDE_HFLAG_POST_SET_MODE;
  
- 	poll_wait(file, &rtc_wait, wait);
+-	hwif->noprobe = 0;
+ 	hwif->drives[0].unmask          = 1;
+ 	hwif->drives[1].unmask          = 1;
  
--	spin_lock_irq (&rtc_lock);
-+	spin_lock_irq(&rtc_lock);
- 	l = rtc_irq_data;
--	spin_unlock_irq (&rtc_lock);
-+	spin_unlock_irq(&rtc_lock);
+@@ -684,29 +666,25 @@ static int au_ide_probe(struct device *dev)
+ 	hwif->set_dma_mode		= &auide_set_dma_mode;
  
- 	if (l != 0)
- 		return POLLIN | POLLRDNORM;
-@@ -796,14 +815,6 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
- }
- #endif
+ #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+-	hwif->dma_off_quietly		= &auide_dma_off_quietly;
+ 	hwif->dma_timeout		= &auide_dma_timeout;
  
--/*
-- * exported stuffs
-- */
--
--EXPORT_SYMBOL(rtc_register);
--EXPORT_SYMBOL(rtc_unregister);
--EXPORT_SYMBOL(rtc_control);
--
- int rtc_register(rtc_task_t *task)
- {
- #ifndef RTC_IRQ
-@@ -829,6 +840,7 @@ int rtc_register(rtc_task_t *task)
- 	return 0;
- #endif
- }
-+EXPORT_SYMBOL(rtc_register);
+ 	hwif->mdma_filter		= &auide_mdma_filter;
  
- int rtc_unregister(rtc_task_t *task)
- {
-@@ -845,7 +857,7 @@ int rtc_unregister(rtc_task_t *task)
- 		return -ENXIO;
- 	}
- 	rtc_callback = NULL;
--	
++	hwif->dma_host_set		= &auide_dma_host_set;
+ 	hwif->dma_exec_cmd              = &auide_dma_exec_cmd;
+ 	hwif->dma_start                 = &auide_dma_start;
+ 	hwif->ide_dma_end               = &auide_dma_end;
+ 	hwif->dma_setup                 = &auide_dma_setup;
+ 	hwif->ide_dma_test_irq          = &auide_dma_test_irq;
+-	hwif->dma_host_off		= &auide_dma_host_off;
+-	hwif->dma_host_on		= &auide_dma_host_on;
+ 	hwif->dma_lost_irq		= &auide_dma_lost_irq;
+-	hwif->ide_dma_on                = &auide_dma_on;
+-#else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
++#endif
+ 	hwif->channel                   = 0;
+-	hwif->hold                      = 1;
+ 	hwif->select_data               = 0;    /* no chipset-specific code */
+ 	hwif->config_data               = 0;    /* no chipset-specific code */
+ 
+ 	hwif->drives[0].autotune        = 1;    /* 1=autotune, 2=noautotune, 0=default */
+ 	hwif->drives[1].autotune	= 1;
+-#endif
 +
- 	/* disable controls */
- 	if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
- 		tmp = CMOS_READ(RTC_CONTROL);
-@@ -865,6 +877,7 @@ int rtc_unregister(rtc_task_t *task)
- 	return 0;
- #endif
- }
-+EXPORT_SYMBOL(rtc_unregister);
+ 	hwif->drives[0].no_io_32bit	= 1;
+ 	hwif->drives[1].no_io_32bit	= 1;
  
- int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
- {
-@@ -883,7 +896,7 @@ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
- 	return rtc_do_ioctl(cmd, arg, 1);
- #endif
- }
+diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
+index 521edd4..8b3959d 100644
+--- a/drivers/ide/mips/swarm.c
++++ b/drivers/ide/mips/swarm.c
+@@ -117,6 +117,7 @@ static int __devinit swarm_ide_probe(struct device *dev)
+ 	default_hwif_mmiops(hwif);
+ 	/* Prevent resource map manipulation.  */
+ 	hwif->mmio = 1;
++	hwif->chipset = ide_generic;
+ 	hwif->noprobe = 0;
+ 
+ 	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
+diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
+index 95d1ea8..9480325 100644
+--- a/drivers/ide/pci/Makefile
++++ b/drivers/ide/pci/Makefile
+@@ -36,4 +36,8 @@ obj-$(CONFIG_BLK_DEV_VIA82CXXX)		+= via82cxxx.o
+ # Must appear at the end of the block
+ obj-$(CONFIG_BLK_DEV_GENERIC)          += generic.o
+ 
++ifeq ($(CONFIG_BLK_DEV_CMD640), m)
++	obj-m += cmd640.o
++endif
++
+ EXTRA_CFLAGS	:= -Idrivers/ide
+diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
+index 4426850..7f4d185 100644
+--- a/drivers/ide/pci/aec62xx.c
++++ b/drivers/ide/pci/aec62xx.c
+@@ -202,6 +202,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
+ 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+ 		.host_flags	= IDE_HFLAG_SERIALIZE |
+ 				  IDE_HFLAG_NO_ATAPI_DMA |
++				  IDE_HFLAG_ABUSE_SET_DMA_MODE |
+ 				  IDE_HFLAG_OFF_BOARD,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+@@ -211,6 +212,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
+ 		.init_chipset	= init_chipset_aec62xx,
+ 		.init_hwif	= init_hwif_aec62xx,
+ 		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
++				  IDE_HFLAG_ABUSE_SET_DMA_MODE |
+ 				  IDE_HFLAG_OFF_BOARD,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+@@ -220,7 +222,8 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
+ 		.init_chipset	= init_chipset_aec62xx,
+ 		.init_hwif	= init_hwif_aec62xx,
+ 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA,
++		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
++				  IDE_HFLAG_ABUSE_SET_DMA_MODE,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= ATA_UDMA4,
+@@ -228,7 +231,9 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
+ 		.name		= "AEC6280",
+ 		.init_chipset	= init_chipset_aec62xx,
+ 		.init_hwif	= init_hwif_aec62xx,
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
++				  IDE_HFLAG_ABUSE_SET_DMA_MODE |
++				  IDE_HFLAG_OFF_BOARD,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= ATA_UDMA5,
+@@ -237,7 +242,9 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
+ 		.init_chipset	= init_chipset_aec62xx,
+ 		.init_hwif	= init_hwif_aec62xx,
+ 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
++				  IDE_HFLAG_ABUSE_SET_DMA_MODE |
++				  IDE_HFLAG_OFF_BOARD,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= ATA_UDMA5,
+diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
+index ce29393..49aa82e 100644
+--- a/drivers/ide/pci/alim15x3.c
++++ b/drivers/ide/pci/alim15x3.c
+@@ -402,9 +402,6 @@ static void ali_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	u8 tmpbyte		= 0x00;
+ 	int m5229_udma		= (hwif->channel) ? 0x57 : 0x56;
+ 
+-	if (speed < XFER_PIO_0)
+-		return;
 -
-+EXPORT_SYMBOL(rtc_control);
+ 	if (speed == XFER_UDMA_6)
+ 		speed1 = 0x47;
  
+diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
+index 8d4125e..cee51fd 100644
+--- a/drivers/ide/pci/amd74xx.c
++++ b/drivers/ide/pci/amd74xx.c
+@@ -266,6 +266,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
+ #define IDE_HFLAGS_AMD \
+ 	(IDE_HFLAG_PIO_NO_BLACKLIST | \
+ 	 IDE_HFLAG_PIO_NO_DOWNGRADE | \
++	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
+ 	 IDE_HFLAG_POST_SET_MODE | \
+ 	 IDE_HFLAG_IO_32BIT | \
+ 	 IDE_HFLAG_UNMASK_IRQS | \
+diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
+index ef8e016..4918719 100644
+--- a/drivers/ide/pci/atiixp.c
++++ b/drivers/ide/pci/atiixp.c
+@@ -1,5 +1,5 @@
  /*
-  *	The various file operations we support.
-@@ -910,11 +923,11 @@ static struct miscdevice rtc_dev = {
- 
- #ifdef CONFIG_PROC_FS
- static const struct file_operations rtc_proc_fops = {
--	.owner = THIS_MODULE,
--	.open = rtc_proc_open,
--	.read  = seq_read,
--	.llseek = seq_lseek,
--	.release = single_release,
-+	.owner		= THIS_MODULE,
-+	.open		= rtc_proc_open,
-+	.read		= seq_read,
-+	.llseek		= seq_lseek,
-+	.release	= single_release,
+- *  linux/drivers/ide/pci/atiixp.c	Version 0.03	Aug 3 2007
++ *  linux/drivers/ide/pci/atiixp.c	Version 0.05	Nov 9 2007
+  *
+  *  Copyright (C) 2003 ATI Inc. <hyu at ati.com>
+  *  Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz
+@@ -43,47 +43,8 @@ static atiixp_ide_timing mdma_timing[] = {
+ 	{ 0x02, 0x00 },
  };
- #endif
  
-@@ -965,7 +978,7 @@ static int __init rtc_init(void)
- #ifdef CONFIG_SPARC32
- 	for_each_ebus(ebus) {
- 		for_each_ebusdev(edev, ebus) {
--			if(strcmp(edev->prom_node->name, "rtc") == 0) {
-+			if (strcmp(edev->prom_node->name, "rtc") == 0) {
- 				rtc_port = edev->resource[0].start;
- 				rtc_irq = edev->irqs[0];
- 				goto found;
-@@ -986,7 +999,8 @@ found:
- 	 * XXX Interrupt pin #7 in Espresso is shared between RTC and
- 	 * PCI Slot 2 INTA# (and some INTx# in Slot 1).
- 	 */
--	if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) {
-+	if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
-+			(void *)&rtc_port)) {
- 		rtc_has_irq = 0;
- 		printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
- 		return -EIO;
-@@ -1015,16 +1029,26 @@ no_irq:
+-static int save_mdma_mode[4];
+-
+ static DEFINE_SPINLOCK(atiixp_lock);
  
- #ifdef RTC_IRQ
- 	if (is_hpet_enabled()) {
-+		int err;
-+
- 		rtc_int_handler_ptr = hpet_rtc_interrupt;
-+		err = hpet_register_irq_handler(rtc_interrupt);
-+		if (err != 0) {
-+			printk(KERN_WARNING "hpet_register_irq_handler failed "
-+					"in rtc_init().");
-+			return err;
-+		}
- 	} else {
- 		rtc_int_handler_ptr = rtc_interrupt;
- 	}
+-static void atiixp_dma_host_on(ide_drive_t *drive)
+-{
+-	struct pci_dev *dev = drive->hwif->pci_dev;
+-	unsigned long flags;
+-	u16 tmp16;
+-
+-	spin_lock_irqsave(&atiixp_lock, flags);
+-
+-	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+-	if (save_mdma_mode[drive->dn])
+-		tmp16 &= ~(1 << drive->dn);
+-	else
+-		tmp16 |= (1 << drive->dn);
+-	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+-
+-	spin_unlock_irqrestore(&atiixp_lock, flags);
+-
+-	ide_dma_host_on(drive);
+-}
+-
+-static void atiixp_dma_host_off(ide_drive_t *drive)
+-{
+-	struct pci_dev *dev = drive->hwif->pci_dev;
+-	unsigned long flags;
+-	u16 tmp16;
+-
+-	spin_lock_irqsave(&atiixp_lock, flags);
+-
+-	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+-	tmp16 &= ~(1 << drive->dn);
+-	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+-
+-	spin_unlock_irqrestore(&atiixp_lock, flags);
+-
+-	ide_dma_host_off(drive);
+-}
+-
+ /**
+  *	atiixp_set_pio_mode	-	set host controller for PIO mode
+  *	@drive: drive
+@@ -132,29 +93,33 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8;
+ 	u32 tmp32;
+ 	u16 tmp16;
+-
+-	if (speed < XFER_MW_DMA_0)
+-		return;
++	u16 udma_ctl = 0;
  
--	if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) {
-+	if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED,
-+			"rtc", NULL)) {
- 		/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
- 		rtc_has_irq = 0;
- 		printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
- 		rtc_release_region();
-+
- 		return -EIO;
- 	}
- 	hpet_rtc_timer_init();
-@@ -1036,6 +1060,7 @@ no_irq:
- 	if (misc_register(&rtc_dev)) {
- #ifdef RTC_IRQ
- 		free_irq(RTC_IRQ, NULL);
-+		hpet_unregister_irq_handler(rtc_interrupt);
- 		rtc_has_irq = 0;
- #endif
- 		rtc_release_region();
-@@ -1052,21 +1077,21 @@ no_irq:
+ 	spin_lock_irqsave(&atiixp_lock, flags);
  
- #if defined(__alpha__) || defined(__mips__)
- 	rtc_freq = HZ;
--	
-+
- 	/* Each operating system on an Alpha uses its own epoch.
- 	   Let's try to guess which one we are using now. */
--	
+-	save_mdma_mode[drive->dn] = 0;
++	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &udma_ctl);
 +
- 	if (rtc_is_updating() != 0)
- 		msleep(20);
--	
+ 	if (speed >= XFER_UDMA_0) {
+ 		pci_read_config_word(dev, ATIIXP_IDE_UDMA_MODE, &tmp16);
+ 		tmp16 &= ~(0x07 << (drive->dn * 4));
+ 		tmp16 |= ((speed & 0x07) << (drive->dn * 4));
+ 		pci_write_config_word(dev, ATIIXP_IDE_UDMA_MODE, tmp16);
+-	} else {
+-		if ((speed >= XFER_MW_DMA_0) && (speed <= XFER_MW_DMA_2)) {
+-			save_mdma_mode[drive->dn] = speed;
+-			pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32);
+-			tmp32 &= ~(0xff << timing_shift);
+-			tmp32 |= (mdma_timing[speed & 0x03].recover_width << timing_shift) |
+-				(mdma_timing[speed & 0x03].command_width << (timing_shift + 4));
+-			pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32);
+-		}
 +
- 	spin_lock_irq(&rtc_lock);
- 	year = CMOS_READ(RTC_YEAR);
- 	ctrl = CMOS_READ(RTC_CONTROL);
- 	spin_unlock_irq(&rtc_lock);
--	
++		udma_ctl |= (1 << drive->dn);
++	} else if (speed >= XFER_MW_DMA_0) {
++		u8 i = speed & 0x03;
 +
- 	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- 		BCD_TO_BIN(year);       /* This should never happen... */
--	
++		pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32);
++		tmp32 &= ~(0xff << timing_shift);
++		tmp32 |= (mdma_timing[i].recover_width << timing_shift) |
++			 (mdma_timing[i].command_width << (timing_shift + 4));
++		pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32);
 +
- 	if (year < 20) {
- 		epoch = 2000;
- 		guess = "SRM (post-2000)";
-@@ -1087,7 +1112,8 @@ no_irq:
- #endif
- 	}
- 	if (guess)
--		printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch);
-+		printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
-+			guess, epoch);
- #endif
- #ifdef RTC_IRQ
- 	if (rtc_has_irq == 0)
-@@ -1096,8 +1122,12 @@ no_irq:
- 	spin_lock_irq(&rtc_lock);
- 	rtc_freq = 1024;
- 	if (!hpet_set_periodic_freq(rtc_freq)) {
--		/* Initialize periodic freq. to CMOS reset default, which is 1024Hz */
--		CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT);
-+		/*
-+		 * Initialize periodic frequency to CMOS reset default,
-+		 * which is 1024Hz
-+		 */
-+		CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
-+			   RTC_FREQ_SELECT);
++		udma_ctl &= ~(1 << drive->dn);
  	}
- 	spin_unlock_irq(&rtc_lock);
- no_irq2:
-@@ -1110,20 +1140,22 @@ no_irq2:
- 	return 0;
+ 
++	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, udma_ctl);
++
+ 	spin_unlock_irqrestore(&atiixp_lock, flags);
  }
  
--static void __exit rtc_exit (void)
-+static void __exit rtc_exit(void)
+@@ -184,9 +149,6 @@ static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
+ 		hwif->cbl = ATA_CBL_PATA80;
+ 	else
+ 		hwif->cbl = ATA_CBL_PATA40;
+-
+-	hwif->dma_host_on = &atiixp_dma_host_on;
+-	hwif->dma_host_off = &atiixp_dma_host_off;
+ }
+ 
+ static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
+diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
+index 4aa4810..da3565e 100644
+--- a/drivers/ide/pci/cmd640.c
++++ b/drivers/ide/pci/cmd640.c
+@@ -706,9 +706,9 @@ static int pci_conf2(void)
+ }
+ 
+ /*
+- * Probe for a cmd640 chipset, and initialize it if found.  Called from ide.c
++ * Probe for a cmd640 chipset, and initialize it if found.
+  */
+-int __init ide_probe_for_cmd640x (void)
++static int __init cmd640x_init(void)
  {
- 	cleanup_sysctl();
--	remove_proc_entry ("driver/rtc", NULL);
-+	remove_proc_entry("driver/rtc", NULL);
- 	misc_deregister(&rtc_dev);
+ #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ 	int second_port_toggled = 0;
+@@ -717,6 +717,7 @@ int __init ide_probe_for_cmd640x (void)
+ 	const char *bus_type, *port2;
+ 	unsigned int index;
+ 	u8 b, cfr;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
  
- #ifdef CONFIG_SPARC32
- 	if (rtc_has_irq)
--		free_irq (rtc_irq, &rtc_port);
-+		free_irq(rtc_irq, &rtc_port);
- #else
- 	rtc_release_region();
- #ifdef RTC_IRQ
--	if (rtc_has_irq)
--		free_irq (RTC_IRQ, NULL);
-+	if (rtc_has_irq) {
-+		free_irq(RTC_IRQ, NULL);
-+		hpet_unregister_irq_handler(hpet_rtc_interrupt);
-+	}
+ 	if (cmd640_vlb && probe_for_cmd640_vlb()) {
+ 		bus_type = "VLB";
+@@ -769,6 +770,8 @@ int __init ide_probe_for_cmd640x (void)
+ 	cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode;
+ #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ 
++	idx[0] = cmd_hwif0->index;
++
+ 	/*
+ 	 * Ensure compatibility by always using the slowest timings
+ 	 * for access to the drive's command register block,
+@@ -826,6 +829,8 @@ int __init ide_probe_for_cmd640x (void)
+ 		cmd_hwif1->pio_mask = ATA_PIO5;
+ 		cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode;
+ #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
++
++		idx[1] = cmd_hwif1->index;
+ 	}
+ 	printk(KERN_INFO "%s: %sserialized, secondary interface %s\n", cmd_hwif1->name,
+ 		cmd_hwif0->serialized ? "" : "not ", port2);
+@@ -872,6 +877,13 @@ int __init ide_probe_for_cmd640x (void)
+ #ifdef CMD640_DUMP_REGS
+ 	cmd640_dump_regs();
  #endif
- #endif /* CONFIG_SPARC32 */
++
++	ide_device_add(idx);
++
+ 	return 1;
  }
-@@ -1133,14 +1165,14 @@ module_exit(rtc_exit);
  
- #ifdef RTC_IRQ
++module_param_named(probe_vlb, cmd640_vlb, bool, 0);
++MODULE_PARM_DESC(probe_vlb, "probe for VLB version of CMD640 chipset");
++
++module_init(cmd640x_init);
+diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
+index bc55333..cd4eb9d 100644
+--- a/drivers/ide/pci/cmd64x.c
++++ b/drivers/ide/pci/cmd64x.c
+@@ -1,5 +1,5 @@
  /*
-- * 	At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
-+ *	At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
-  *	(usually during an IDE disk interrupt, with IRQ unmasking off)
-  *	Since the interrupt handler doesn't get called, the IRQ status
-  *	byte doesn't get read, and the RTC stops generating interrupts.
-  *	A timer is set, and will call this function if/when that happens.
-  *	To get it out of this stalled state, we just read the status.
-  *	At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
-- *	(You *really* shouldn't be trying to use a non-realtime system 
-+ *	(You *really* shouldn't be trying to use a non-realtime system
-  *	for something that requires a steady > 1KHz signal anyways.)
+- * linux/drivers/ide/pci/cmd64x.c		Version 1.52	Dec 24, 2007
++ * linux/drivers/ide/pci/cmd64x.c		Version 1.53	Dec 24, 2007
+  *
+  * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
+  *           Due to massive hardware bugs, UltraDMA is only supported
+@@ -22,8 +22,6 @@
+ 
+ #include <asm/io.h>
+ 
+-#define DISPLAY_CMD64X_TIMINGS
+-
+ #define CMD_DEBUG 0
+ 
+ #if CMD_DEBUG
+@@ -37,11 +35,6 @@
   */
+ #define CFR		0x50
+ #define   CFR_INTR_CH0		0x04
+-#define CNTRL		0x51
+-#define   CNTRL_ENA_1ST 	0x04
+-#define   CNTRL_ENA_2ND 	0x08
+-#define   CNTRL_DIS_RA0 	0x40
+-#define   CNTRL_DIS_RA1 	0x80
  
-@@ -1148,7 +1180,7 @@ static void rtc_dropped_irq(unsigned long data)
+ #define	CMDTIM		0x52
+ #define	ARTTIM0		0x53
+@@ -60,108 +53,13 @@
+ #define MRDMODE		0x71
+ #define   MRDMODE_INTR_CH0	0x04
+ #define   MRDMODE_INTR_CH1	0x08
+-#define   MRDMODE_BLK_CH0	0x10
+-#define   MRDMODE_BLK_CH1	0x20
+-#define BMIDESR0	0x72
+ #define UDIDETCR0	0x73
+ #define DTPR0		0x74
+ #define BMIDECR1	0x78
+ #define BMIDECSR	0x79
+-#define BMIDESR1	0x7A
+ #define UDIDETCR1	0x7B
+ #define DTPR1		0x7C
+ 
+-#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
+-#include <linux/stat.h>
+-#include <linux/proc_fs.h>
+-
+-static u8 cmd64x_proc = 0;
+-
+-#define CMD_MAX_DEVS		5
+-
+-static struct pci_dev *cmd_devs[CMD_MAX_DEVS];
+-static int n_cmd_devs;
+-
+-static char * print_cmd64x_get_info (char *buf, struct pci_dev *dev, int index)
+-{
+-	char *p = buf;
+-	u8 reg72 = 0, reg73 = 0;			/* primary */
+-	u8 reg7a = 0, reg7b = 0;			/* secondary */
+-	u8 reg50 = 1, reg51 = 1, reg57 = 0, reg71 = 0;	/* extra */
+-
+-	p += sprintf(p, "\nController: %d\n", index);
+-	p += sprintf(p, "PCI-%x Chipset.\n", dev->device);
+-
+-	(void) pci_read_config_byte(dev, CFR,       &reg50);
+-	(void) pci_read_config_byte(dev, CNTRL,     &reg51);
+-	(void) pci_read_config_byte(dev, ARTTIM23,  &reg57);
+-	(void) pci_read_config_byte(dev, MRDMODE,   &reg71);
+-	(void) pci_read_config_byte(dev, BMIDESR0,  &reg72);
+-	(void) pci_read_config_byte(dev, UDIDETCR0, &reg73);
+-	(void) pci_read_config_byte(dev, BMIDESR1,  &reg7a);
+-	(void) pci_read_config_byte(dev, UDIDETCR1, &reg7b);
+-
+-	/* PCI0643/6 originally didn't have the primary channel enable bit */
+-	if ((dev->device == PCI_DEVICE_ID_CMD_643) ||
+-	    (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 3))
+-		reg51 |= CNTRL_ENA_1ST;
+-
+-	p += sprintf(p, "---------------- Primary Channel "
+-			"---------------- Secondary Channel ------------\n");
+-	p += sprintf(p, "                 %s                         %s\n",
+-		 (reg51 & CNTRL_ENA_1ST) ? "enabled " : "disabled",
+-		 (reg51 & CNTRL_ENA_2ND) ? "enabled " : "disabled");
+-	p += sprintf(p, "---------------- drive0 --------- drive1 "
+-			"-------- drive0 --------- drive1 ------\n");
+-	p += sprintf(p, "DMA enabled:     %s              %s"
+-			"             %s              %s\n",
+-		(reg72 & 0x20) ? "yes" : "no ", (reg72 & 0x40) ? "yes" : "no ",
+-		(reg7a & 0x20) ? "yes" : "no ", (reg7a & 0x40) ? "yes" : "no ");
+-	p += sprintf(p, "UltraDMA mode:   %s (%c)          %s (%c)",
+-		( reg73 & 0x01) ? " on" : "off",
+-		((reg73 & 0x30) == 0x30) ? ((reg73 & 0x04) ? '3' : '0') :
+-		((reg73 & 0x30) == 0x20) ? ((reg73 & 0x04) ? '3' : '1') :
+-		((reg73 & 0x30) == 0x10) ? ((reg73 & 0x04) ? '4' : '2') :
+-		((reg73 & 0x30) == 0x00) ? ((reg73 & 0x04) ? '5' : '2') : '?',
+-		( reg73 & 0x02) ? " on" : "off",
+-		((reg73 & 0xC0) == 0xC0) ? ((reg73 & 0x08) ? '3' : '0') :
+-		((reg73 & 0xC0) == 0x80) ? ((reg73 & 0x08) ? '3' : '1') :
+-		((reg73 & 0xC0) == 0x40) ? ((reg73 & 0x08) ? '4' : '2') :
+-		((reg73 & 0xC0) == 0x00) ? ((reg73 & 0x08) ? '5' : '2') : '?');
+-	p += sprintf(p, "         %s (%c)          %s (%c)\n",
+-		( reg7b & 0x01) ? " on" : "off",
+-		((reg7b & 0x30) == 0x30) ? ((reg7b & 0x04) ? '3' : '0') :
+-		((reg7b & 0x30) == 0x20) ? ((reg7b & 0x04) ? '3' : '1') :
+-		((reg7b & 0x30) == 0x10) ? ((reg7b & 0x04) ? '4' : '2') :
+-		((reg7b & 0x30) == 0x00) ? ((reg7b & 0x04) ? '5' : '2') : '?',
+-		( reg7b & 0x02) ? " on" : "off",
+-		((reg7b & 0xC0) == 0xC0) ? ((reg7b & 0x08) ? '3' : '0') :
+-		((reg7b & 0xC0) == 0x80) ? ((reg7b & 0x08) ? '3' : '1') :
+-		((reg7b & 0xC0) == 0x40) ? ((reg7b & 0x08) ? '4' : '2') :
+-		((reg7b & 0xC0) == 0x00) ? ((reg7b & 0x08) ? '5' : '2') : '?');
+-	p += sprintf(p, "Interrupt:       %s, %s                 %s, %s\n",
+-		(reg71 & MRDMODE_BLK_CH0  ) ? "blocked" : "enabled",
+-		(reg50 & CFR_INTR_CH0	  ) ? "pending" : "clear  ",
+-		(reg71 & MRDMODE_BLK_CH1  ) ? "blocked" : "enabled",
+-		(reg57 & ARTTIM23_INTR_CH1) ? "pending" : "clear  ");
+-
+-	return (char *)p;
+-}
+-
+-static int cmd64x_get_info (char *buffer, char **addr, off_t offset, int count)
+-{
+-	char *p = buffer;
+-	int i;
+-
+-	for (i = 0; i < n_cmd_devs; i++) {
+-		struct pci_dev *dev	= cmd_devs[i];
+-		p = print_cmd64x_get_info(p, dev, i);
+-	}
+-	return p-buffer;	/* => must be less than 4k! */
+-}
+-
+-#endif	/* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
+-
+ static u8 quantize_timing(int timing, int quant)
  {
- 	unsigned long freq;
+ 	return (timing + quant - 1) / quant;
+@@ -322,8 +220,6 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	case XFER_MW_DMA_0:
+ 		program_cycle_times(drive, 480, 215);
+ 		break;
+-	default:
+-		return;
+ 	}
  
--	spin_lock_irq (&rtc_lock);
-+	spin_lock_irq(&rtc_lock);
+ 	if (speed >= XFER_SW_DMA_0)
+@@ -333,14 +229,15 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ static int cmd648_ide_dma_end (ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif	= HWIF(drive);
++	unsigned long base	= hwif->dma_base - (hwif->channel * 8);
+ 	int err			= __ide_dma_end(drive);
+ 	u8  irq_mask		= hwif->channel ? MRDMODE_INTR_CH1 :
+ 						  MRDMODE_INTR_CH0;
+-	u8  mrdmode		= inb(hwif->dma_master + 0x01);
++	u8  mrdmode		= inb(base + 1);
  
- 	if (hpet_rtc_dropped_irq()) {
- 		spin_unlock_irq(&rtc_lock);
-@@ -1167,13 +1199,15 @@ static void rtc_dropped_irq(unsigned long data)
+ 	/* clear the interrupt bit */
+ 	outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
+-	     hwif->dma_master + 0x01);
++	     base + 1);
  
- 	spin_unlock_irq(&rtc_lock);
+ 	return err;
+ }
+@@ -365,10 +262,11 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
+ static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif	= HWIF(drive);
++	unsigned long base	= hwif->dma_base - (hwif->channel * 8);
+ 	u8 irq_mask		= hwif->channel ? MRDMODE_INTR_CH1 :
+ 						  MRDMODE_INTR_CH0;
+ 	u8 dma_stat		= inb(hwif->dma_status);
+-	u8 mrdmode		= inb(hwif->dma_master + 0x01);
++	u8 mrdmode		= inb(base + 1);
  
--	if (printk_ratelimit())
--		printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq);
-+	if (printk_ratelimit()) {
-+		printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
-+			freq);
-+	}
+ #ifdef DEBUG
+ 	printk("%s: dma_stat: 0x%02x mrdmode: 0x%02x irq_mask: 0x%02x\n",
+@@ -472,16 +370,6 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
+ 	mrdmode &= ~0x30;
+ 	(void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
  
- 	/* Now we have new data */
- 	wake_up_interruptible(&rtc_wait);
+-#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
+-
+-	cmd_devs[n_cmd_devs++] = dev;
+-
+-	if (!cmd64x_proc) {
+-		cmd64x_proc = 1;
+-		ide_pci_create_host_proc("cmd64x", cmd64x_get_info);
+-	}
+-#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_IDE_PROC_FS */
+-
+ 	return 0;
+ }
  
--	kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
-+	kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
+diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
+index 0466462..6ec00b8 100644
+--- a/drivers/ide/pci/cs5520.c
++++ b/drivers/ide/pci/cs5520.c
+@@ -71,7 +71,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	ide_hwif_t *hwif = HWIF(drive);
+ 	struct pci_dev *pdev = hwif->pci_dev;
+ 	int controller = drive->dn > 1 ? 1 : 0;
+-	u8 reg;
+ 
+ 	/* FIXME: if DMA = 1 do we need to set the DMA bit here ? */
+ 
+@@ -91,11 +90,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	pci_write_config_byte(pdev, 0x66 + 4*controller + (drive->dn&1),
+ 		(cs5520_pio_clocks[pio].recovery << 4) |
+ 		(cs5520_pio_clocks[pio].assert));
+-		
+-	/* Set the DMA enable/disable flag */
+-	reg = inb(hwif->dma_base + 0x02 + 8*controller);
+-	reg |= 1<<((drive->dn&1)+5);
+-	outb(reg, hwif->dma_base + 0x02 + 8*controller);
  }
- #endif
  
-@@ -1277,7 +1311,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
- 	 * can take just over 2ms. We wait 20ms. There is no need to
- 	 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
- 	 * If you need to know *exactly* when a second has started, enable
--	 * periodic update complete interrupts, (via ioctl) and then 
-+	 * periodic update complete interrupts, (via ioctl) and then
- 	 * immediately read /dev/rtc which will block until you get the IRQ.
- 	 * Once the read clears, read the RTC time (again via ioctl). Easy.
- 	 */
-@@ -1307,8 +1341,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
- 	ctrl = CMOS_READ(RTC_CONTROL);
- 	spin_unlock_irqrestore(&rtc_lock, flags);
+ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
+@@ -109,13 +103,14 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
+  *	We wrap the DMA activate to set the vdma flag. This is needed
+  *	so that the IDE DMA layer issues PIO not DMA commands over the
+  *	DMA channel
++ *
++ *	ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA
+  */
+- 
+-static int cs5520_dma_on(ide_drive_t *drive)
++
++static void cs5520_dma_host_set(ide_drive_t *drive, int on)
+ {
+-	/* ATAPI is harder so leave it for now */
+-	drive->vdma = 1;
+-	return 0;
++	drive->vdma = on;
++	ide_dma_host_set(drive, on);
+ }
  
--	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
--	{
-+	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- 		BCD_TO_BIN(rtc_tm->tm_sec);
- 		BCD_TO_BIN(rtc_tm->tm_min);
- 		BCD_TO_BIN(rtc_tm->tm_hour);
-@@ -1326,7 +1359,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
- 	 * Account for differences between how the RTC uses the values
- 	 * and how they are defined in a struct rtc_time;
- 	 */
--	if ((rtc_tm->tm_year += (epoch - 1900)) <= 69)
-+	rtc_tm->tm_year += epoch - 1900;
-+	if (rtc_tm->tm_year <= 69)
- 		rtc_tm->tm_year += 100;
+ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
+@@ -126,7 +121,7 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
+ 	if (hwif->dma_base == 0)
+ 		return;
  
- 	rtc_tm->tm_mon--;
-@@ -1347,8 +1381,7 @@ static void get_rtc_alm_time(struct rtc_time *alm_tm)
- 	ctrl = CMOS_READ(RTC_CONTROL);
- 	spin_unlock_irq(&rtc_lock);
+-	hwif->ide_dma_on = &cs5520_dma_on;
++	hwif->dma_host_set = &cs5520_dma_host_set;
+ }
  
--	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
--	{
-+	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- 		BCD_TO_BIN(alm_tm->tm_sec);
- 		BCD_TO_BIN(alm_tm->tm_min);
- 		BCD_TO_BIN(alm_tm->tm_hour);
-diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
-index 12ceed5..5732ca3 100644
---- a/drivers/connector/cn_queue.c
-+++ b/drivers/connector/cn_queue.c
-@@ -104,7 +104,6 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id
- 		return -EINVAL;
+ #define DECLARE_CS_DEV(name_str)				\
+@@ -137,6 +132,7 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
+ 				  IDE_HFLAG_CS5520 |		\
+ 				  IDE_HFLAG_VDMA |		\
+ 				  IDE_HFLAG_NO_ATAPI_DMA |	\
++				  IDE_HFLAG_ABUSE_SET_DMA_MODE |\
+ 				  IDE_HFLAG_BOOTABLE,		\
+ 		.pio_mask	= ATA_PIO4,			\
+ 	}
+diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
+index 5476903..df5966b 100644
+--- a/drivers/ide/pci/cs5530.c
++++ b/drivers/ide/pci/cs5530.c
+@@ -116,8 +116,6 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
+ 		case XFER_MW_DMA_0:	timings = 0x00077771; break;
+ 		case XFER_MW_DMA_1:	timings = 0x00012121; break;
+ 		case XFER_MW_DMA_2:	timings = 0x00002020; break;
+-		default:
+-			return;
  	}
+ 	basereg = CS5530_BASEREG(drive->hwif);
+ 	reg = inl(basereg + 4);			/* get drive0 config register */
+diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
+index ddcbeba..50b3d77 100644
+--- a/drivers/ide/pci/cs5535.c
++++ b/drivers/ide/pci/cs5535.c
+@@ -190,7 +190,7 @@ static const struct ide_port_info cs5535_chipset __devinitdata = {
+ 	.name		= "CS5535",
+ 	.init_hwif	= init_hwif_cs5535,
+ 	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE |
+-			  IDE_HFLAG_BOOTABLE,
++			  IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_BOOTABLE,
+ 	.pio_mask	= ATA_PIO4,
+ 	.mwdma_mask	= ATA_MWDMA2,
+ 	.udma_mask	= ATA_UDMA4,
+diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
+index 1cd4e9c..3ec4c65 100644
+--- a/drivers/ide/pci/cy82c693.c
++++ b/drivers/ide/pci/cy82c693.c
+@@ -1,5 +1,5 @@
+ /*
+- * linux/drivers/ide/pci/cy82c693.c		Version 0.42	Oct 23, 2007
++ * linux/drivers/ide/pci/cy82c693.c		Version 0.44	Nov 8, 2007
+  *
+  *  Copyright (C) 1998-2000 Andreas S. Krebs (akrebs at altavista.net), Maintainer
+  *  Copyright (C) 1998-2002 Andre Hedrick <andre at linux-ide.org>, Integrator
+@@ -176,17 +176,12 @@ static void compute_clocks (u8 pio, pio_clocks_t *p_pclk)
+  * set DMA mode a specific channel for CY82C693
+  */
  
--	cbq->nls = dev->nls;
- 	cbq->seq = 0;
- 	cbq->group = cbq->id.id.idx;
+-static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
++static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
+ {
+-	u8 index = 0, data = 0;
++	ide_hwif_t *hwif = drive->hwif;
++	u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
  
-@@ -146,7 +145,6 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
- 	spin_lock_init(&dev->queue_lock);
+-	if (mode>2)	/* make sure we set a valid mode */
+-		mode = 2;
+-			   
+-	if (mode > drive->id->tDMA)  /* to be absolutly sure we have a valid mode */
+-		mode = drive->id->tDMA;
+-	
+-	index = (HWIF(drive)->channel==0) ? CY82_INDEX_CHANNEL0 : CY82_INDEX_CHANNEL1;
++	index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
  
- 	dev->nls = nls;
--	dev->netlink_groups = 0;
+ #if CY82C693_DEBUG_LOGS
+ 	/* for debug let's show the previous values */
+@@ -199,7 +194,7 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
+ 		(data&0x3), ((data>>2)&1));
+ #endif /* CY82C693_DEBUG_LOGS */
  
- 	dev->cn_queue = create_workqueue(dev->name);
- 	if (!dev->cn_queue) {
-diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
-index bf9716b..fea2d3e 100644
---- a/drivers/connector/connector.c
-+++ b/drivers/connector/connector.c
-@@ -88,6 +88,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
- 			if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
- 				found = 1;
- 				group = __cbq->group;
-+				break;
- 			}
- 		}
- 		spin_unlock_bh(&dev->cbdev->queue_lock);
-@@ -181,33 +182,14 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
+-	data = (u8)mode|(u8)(single<<2);
++	data = (mode & 3) | (single << 2);
+ 
+ 	outb(index, CY82_INDEX_PORT);
+ 	outb(data, CY82_DATA_PORT);
+@@ -207,7 +202,7 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
+ #if CY82C693_DEBUG_INFO
+ 	printk(KERN_INFO "%s (ch=%d, dev=%d): set DMA mode to %d (single=%d)\n",
+ 		drive->name, HWIF(drive)->channel, drive->select.b.unit,
+-		mode, single);
++		mode & 3, single);
+ #endif /* CY82C693_DEBUG_INFO */
+ 
+ 	/* 
+@@ -230,39 +225,6 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
+ #endif /* CY82C693_DEBUG_INFO */
  }
  
- /*
-- * Skb receive helper - checks skb and msg size and calls callback
-- * helper.
+-/* 
+- * used to set DMA mode for CY82C693 (single and multi modes)
 - */
--static int __cn_rx_skb(struct sk_buff *skb, struct nlmsghdr *nlh)
+-static int cy82c693_ide_dma_on (ide_drive_t *drive)
 -{
--	u32 pid, uid, seq, group;
--	struct cn_msg *msg;
+-	struct hd_driveid *id = drive->id;
 -
--	pid = NETLINK_CREDS(skb)->pid;
--	uid = NETLINK_CREDS(skb)->uid;
--	seq = nlh->nlmsg_seq;
--	group = NETLINK_CB((skb)).dst_group;
--	msg = NLMSG_DATA(nlh);
+-#if CY82C693_DEBUG_INFO
+-	printk (KERN_INFO "dma_on: %s\n", drive->name);
+-#endif /* CY82C693_DEBUG_INFO */
 -
--	return cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
+-	if (id != NULL) {		
+-		/* Enable DMA on any drive that has DMA
+-		 * (multi or single) enabled
+-		 */
+-		if (id->field_valid & 2) {	/* regular DMA */
+-			int mmode, smode;
+-
+-			mmode = id->dma_mword & (id->dma_mword >> 8);
+-			smode = id->dma_1word & (id->dma_1word >> 8);
+-			       		      
+-			if (mmode != 0) {
+-				/* enable multi */
+-				cy82c693_dma_enable(drive, (mmode >> 1), 0);
+-			} else if (smode != 0) {
+-				/* enable single */
+-				cy82c693_dma_enable(drive, (smode >> 1), 1);
+-			}
+-		}
+-	}
+-        return __ide_dma_on(drive);
 -}
 -
--/*
-  * Main netlink receiving function.
-  *
-- * It checks skb and netlink header sizes and calls the skb receive
-- * helper with a shared skb.
-+ * It checks skb, netlink header and msg sizes, and calls callback helper.
-  */
- static void cn_rx_skb(struct sk_buff *__skb)
+ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
  {
-+	struct cn_msg *msg;
- 	struct nlmsghdr *nlh;
--	u32 len;
- 	int err;
- 	struct sk_buff *skb;
- 
-@@ -223,11 +205,8 @@ static void cn_rx_skb(struct sk_buff *__skb)
- 			return;
- 		}
- 
--		len = NLMSG_ALIGN(nlh->nlmsg_len);
--		if (len > skb->len)
--			len = skb->len;
+ 	ide_hwif_t *hwif = HWIF(drive);
+@@ -429,11 +391,7 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
+ static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
+ {
+ 	hwif->set_pio_mode = &cy82c693_set_pio_mode;
 -
--		err = __cn_rx_skb(skb, nlh);
-+		msg = NLMSG_DATA(nlh);
-+		err = cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
- 		if (err < 0)
- 			kfree_skb(skb);
- 	}
-@@ -441,8 +420,7 @@ static int __devinit cn_init(void)
+-	if (hwif->dma_base == 0)
+-		return;
+-
+-	hwif->ide_dma_on = &cy82c693_ide_dma_on;
++	hwif->set_dma_mode = &cy82c693_set_dma_mode;
+ }
  
- 	dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
- 	if (!dev->cbdev) {
--		if (dev->nls->sk_socket)
--			sock_release(dev->nls->sk_socket);
-+		netlink_kernel_release(dev->nls);
- 		return -EINVAL;
- 	}
- 	
-@@ -452,8 +430,7 @@ static int __devinit cn_init(void)
- 	if (err) {
- 		cn_already_initialized = 0;
- 		cn_queue_free_dev(dev->cbdev);
--		if (dev->nls->sk_socket)
--			sock_release(dev->nls->sk_socket);
-+		netlink_kernel_release(dev->nls);
- 		return -EINVAL;
- 	}
+ static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
+@@ -454,11 +412,11 @@ static const struct ide_port_info cy82c693_chipset __devinitdata = {
+ 	.init_iops	= init_iops_cy82c693,
+ 	.init_hwif	= init_hwif_cy82c693,
+ 	.chipset	= ide_cy82c693,
+-	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_TRUST_BIOS_FOR_DMA |
++	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_CY82C693 |
+ 			  IDE_HFLAG_BOOTABLE,
+ 	.pio_mask	= ATA_PIO4,
+-	.swdma_mask	= ATA_SWDMA2_ONLY,
+-	.mwdma_mask	= ATA_MWDMA2_ONLY,
++	.swdma_mask	= ATA_SWDMA2,
++	.mwdma_mask	= ATA_MWDMA2,
+ };
  
-@@ -468,8 +445,7 @@ static void __devexit cn_fini(void)
+ static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
+index 8382908..26aa492 100644
+--- a/drivers/ide/pci/delkin_cb.c
++++ b/drivers/ide/pci/delkin_cb.c
+@@ -80,7 +80,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
+ 	hw.irq = dev->irq;
+ 	hw.chipset = ide_pci;		/* this enables IRQ sharing */
  
- 	cn_del_callback(&dev->id);
- 	cn_queue_free_dev(dev->cbdev);
--	if (dev->nls->sk_socket)
--		sock_release(dev->nls->sk_socket);
-+	netlink_kernel_release(dev->nls);
+-	rc = ide_register_hw(&hw, &ide_undecoded_slave, 0, &hwif);
++	rc = ide_register_hw(&hw, &ide_undecoded_slave, &hwif);
+ 	if (rc < 0) {
+ 		printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc);
+ 		pci_disable_device(dev);
+diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
+index ae6307f..dfba0d1 100644
+--- a/drivers/ide/pci/hpt34x.c
++++ b/drivers/ide/pci/hpt34x.c
+@@ -129,14 +129,18 @@ static void __devinit init_hwif_hpt34x(ide_hwif_t *hwif)
+ 	hwif->set_dma_mode = &hpt34x_set_mode;
  }
  
- subsys_initcall(cn_init);
-diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 79581fa..b730d67 100644
---- a/drivers/cpufreq/cpufreq.c
-+++ b/drivers/cpufreq/cpufreq.c
-@@ -828,11 +828,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
- 	memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
++#define IDE_HFLAGS_HPT34X \
++	(IDE_HFLAG_NO_ATAPI_DMA | \
++	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
++	 IDE_HFLAG_NO_AUTODMA)
++
+ static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
+ 	{ /* 0 */
+ 		.name		= "HPT343",
+ 		.init_chipset	= init_chipset_hpt34x,
+ 		.init_hwif	= init_hwif_hpt34x,
+ 		.extra		= 16,
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
+-				  IDE_HFLAG_NO_AUTODMA,
++		.host_flags	= IDE_HFLAGS_HPT34X,
+ 		.pio_mask	= ATA_PIO5,
+ 	},
+ 	{ /* 1 */
+@@ -144,9 +148,7 @@ static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
+ 		.init_chipset	= init_chipset_hpt34x,
+ 		.init_hwif	= init_hwif_hpt34x,
+ 		.extra		= 16,
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
+-				  IDE_HFLAG_NO_AUTODMA |
+-				  IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD,
+ 		.pio_mask	= ATA_PIO5,
+ #ifdef CONFIG_HPT34X_AUTODMA
+ 		.swdma_mask	= ATA_SWDMA2,
+diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
+index 9fce25b..1268593 100644
+--- a/drivers/ide/pci/hpt366.c
++++ b/drivers/ide/pci/hpt366.c
+@@ -1,5 +1,5 @@
+ /*
+- * linux/drivers/ide/pci/hpt366.c		Version 1.22	Dec 4, 2007
++ * linux/drivers/ide/pci/hpt366.c		Version 1.30	Dec 12, 2007
+  *
+  * Copyright (C) 1999-2003		Andre Hedrick <andre at linux-ide.org>
+  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
+@@ -88,7 +88,7 @@
+  * - rename all the register related variables consistently
+  * - move all the interrupt twiddling code from the speedproc handlers into
+  *   init_hwif_hpt366(), also grouping all the DMA related code together there
+- * - merge two HPT37x speedproc handlers, fix the PIO timing register mask and
++ * - merge HPT36x/HPT37x speedproc handlers, fix PIO timing register mask and
+  *   separate the UltraDMA and MWDMA masks there to avoid changing PIO timings
+  *   when setting an UltraDMA mode
+  * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select
+@@ -458,6 +458,13 @@ enum ata_clock {
+ 	NUM_ATA_CLOCKS
+ };
  
- 	/* prepare interface data */
--	policy->kobj.parent = &sys_dev->kobj;
--	policy->kobj.ktype = &ktype_cpufreq;
--	kobject_set_name(&policy->kobj, "cpufreq");
--
--	ret = kobject_register(&policy->kobj);
-+	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
-+				   "cpufreq");
- 	if (ret) {
- 		unlock_policy_rwsem_write(cpu);
- 		goto err_out_driver_exit;
-@@ -902,6 +899,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
- 		goto err_out_unregister;
- 	}
++struct hpt_timings {
++	u32 pio_mask;
++	u32 dma_mask;
++	u32 ultra_mask;
++	u32 *clock_table[NUM_ATA_CLOCKS];
++};
++
+ /*
+  *	Hold all the HighPoint chip information in one place.
+  */
+@@ -468,7 +475,8 @@ struct hpt_info {
+ 	u8 udma_mask;		/* Allowed UltraDMA modes mask. */
+ 	u8 dpll_clk;		/* DPLL clock in MHz */
+ 	u8 pci_clk;		/* PCI  clock in MHz */
+-	u32 **settings; 	/* Chipset settings table */
++	struct hpt_timings *timings; /* Chipset timing data */
++	u8 clock;		/* ATA clock selected */
+ };
  
-+	kobject_uevent(&policy->kobj, KOBJ_ADD);
- 	module_put(cpufreq_driver->owner);
- 	dprintk("initialization complete\n");
- 	cpufreq_debug_enable_ratelimit();
-@@ -915,7 +913,7 @@ err_out_unregister:
- 		cpufreq_cpu_data[j] = NULL;
- 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ /* Supported HighPoint chips */
+@@ -486,20 +494,30 @@ enum {
+ 	HPT371N
+ };
  
--	kobject_unregister(&policy->kobj);
-+	kobject_put(&policy->kobj);
- 	wait_for_completion(&policy->kobj_unregister);
+-static u32 *hpt36x_settings[NUM_ATA_CLOCKS] = {
+-	twenty_five_base_hpt36x,
+-	thirty_three_base_hpt36x,
+-	forty_base_hpt36x,
+-	NULL,
+-	NULL
++static struct hpt_timings hpt36x_timings = {
++	.pio_mask	= 0xc1f8ffff,
++	.dma_mask	= 0x303800ff,
++	.ultra_mask	= 0x30070000,
++	.clock_table	= {
++		[ATA_CLOCK_25MHZ] = twenty_five_base_hpt36x,
++		[ATA_CLOCK_33MHZ] = thirty_three_base_hpt36x,
++		[ATA_CLOCK_40MHZ] = forty_base_hpt36x,
++		[ATA_CLOCK_50MHZ] = NULL,
++		[ATA_CLOCK_66MHZ] = NULL
++	}
+ };
  
- err_out_driver_exit:
-@@ -1032,8 +1030,6 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev)
+-static u32 *hpt37x_settings[NUM_ATA_CLOCKS] = {
+-	NULL,
+-	thirty_three_base_hpt37x,
+-	NULL,
+-	fifty_base_hpt37x,
+-	sixty_six_base_hpt37x
++static struct hpt_timings hpt37x_timings = {
++	.pio_mask	= 0xcfc3ffff,
++	.dma_mask	= 0x31c001ff,
++	.ultra_mask	= 0x303c0000,
++	.clock_table	= {
++		[ATA_CLOCK_25MHZ] = NULL,
++		[ATA_CLOCK_33MHZ] = thirty_three_base_hpt37x,
++		[ATA_CLOCK_40MHZ] = NULL,
++		[ATA_CLOCK_50MHZ] = fifty_base_hpt37x,
++		[ATA_CLOCK_66MHZ] = sixty_six_base_hpt37x
++	}
+ };
  
- 	unlock_policy_rwsem_write(cpu);
+ static const struct hpt_info hpt36x __devinitdata = {
+@@ -507,7 +525,7 @@ static const struct hpt_info hpt36x __devinitdata = {
+ 	.chip_type	= HPT36x,
+ 	.udma_mask	= HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
+ 	.dpll_clk	= 0,	/* no DPLL */
+-	.settings	= hpt36x_settings
++	.timings	= &hpt36x_timings
+ };
  
--	kobject_unregister(&data->kobj);
--
- 	kobject_put(&data->kobj);
+ static const struct hpt_info hpt370 __devinitdata = {
+@@ -515,7 +533,7 @@ static const struct hpt_info hpt370 __devinitdata = {
+ 	.chip_type	= HPT370,
+ 	.udma_mask	= HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
+ 	.dpll_clk	= 48,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
  
- 	/* we need to make sure that the underlying kobj is actually
-@@ -1608,7 +1604,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
- 	memcpy(&policy->cpuinfo, &data->cpuinfo,
- 				sizeof(struct cpufreq_cpuinfo));
+ static const struct hpt_info hpt370a __devinitdata = {
+@@ -523,7 +541,7 @@ static const struct hpt_info hpt370a __devinitdata = {
+ 	.chip_type	= HPT370A,
+ 	.udma_mask	= HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
+ 	.dpll_clk	= 48,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
  
--	if (policy->min > data->min && policy->min > policy->max) {
-+	if (policy->min > data->max || policy->max < data->min) {
- 		ret = -EINVAL;
- 		goto error_out;
- 	}
-diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
-index 0f3515e..088ea74 100644
---- a/drivers/cpuidle/sysfs.c
-+++ b/drivers/cpuidle/sysfs.c
-@@ -277,7 +277,7 @@ static struct kobj_type ktype_state_cpuidle = {
+ static const struct hpt_info hpt374 __devinitdata = {
+@@ -531,7 +549,7 @@ static const struct hpt_info hpt374 __devinitdata = {
+ 	.chip_type	= HPT374,
+ 	.udma_mask	= ATA_UDMA5,
+ 	.dpll_clk	= 48,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
  
- static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
- {
--	kobject_unregister(&device->kobjs[i]->kobj);
-+	kobject_put(&device->kobjs[i]->kobj);
- 	wait_for_completion(&device->kobjs[i]->kobj_unregister);
- 	kfree(device->kobjs[i]);
- 	device->kobjs[i] = NULL;
-@@ -300,14 +300,13 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
- 		kobj->state = &device->states[i];
- 		init_completion(&kobj->kobj_unregister);
+ static const struct hpt_info hpt372 __devinitdata = {
+@@ -539,7 +557,7 @@ static const struct hpt_info hpt372 __devinitdata = {
+ 	.chip_type	= HPT372,
+ 	.udma_mask	= HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+ 	.dpll_clk	= 55,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
  
--		kobj->kobj.parent = &device->kobj;
--		kobj->kobj.ktype = &ktype_state_cpuidle;
--		kobject_set_name(&kobj->kobj, "state%d", i);
--		ret = kobject_register(&kobj->kobj);
-+		ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
-+					   "state%d", i);
- 		if (ret) {
- 			kfree(kobj);
- 			goto error_state;
- 		}
-+		kobject_uevent(&kobj->kobj, KOBJ_ADD);
- 		device->kobjs[i] = kobj;
- 	}
+ static const struct hpt_info hpt372a __devinitdata = {
+@@ -547,7 +565,7 @@ static const struct hpt_info hpt372a __devinitdata = {
+ 	.chip_type	= HPT372A,
+ 	.udma_mask	= HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+ 	.dpll_clk	= 66,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
  
-@@ -339,12 +338,14 @@ int cpuidle_add_sysfs(struct sys_device *sysdev)
- {
- 	int cpu = sysdev->id;
- 	struct cpuidle_device *dev;
-+	int error;
+ static const struct hpt_info hpt302 __devinitdata = {
+@@ -555,7 +573,7 @@ static const struct hpt_info hpt302 __devinitdata = {
+ 	.chip_type	= HPT302,
+ 	.udma_mask	= HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+ 	.dpll_clk	= 66,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
  
- 	dev = per_cpu(cpuidle_devices, cpu);
--	dev->kobj.parent = &sysdev->kobj;
--	dev->kobj.ktype = &ktype_cpuidle;
--	kobject_set_name(&dev->kobj, "%s", "cpuidle");
--	return kobject_register(&dev->kobj);
-+	error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &sysdev->kobj,
-+				     "cpuidle");
-+	if (!error)
-+		kobject_uevent(&dev->kobj, KOBJ_ADD);
-+	return error;
- }
+ static const struct hpt_info hpt371 __devinitdata = {
+@@ -563,7 +581,7 @@ static const struct hpt_info hpt371 __devinitdata = {
+ 	.chip_type	= HPT371,
+ 	.udma_mask	= HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+ 	.dpll_clk	= 66,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
+ 
+ static const struct hpt_info hpt372n __devinitdata = {
+@@ -571,7 +589,7 @@ static const struct hpt_info hpt372n __devinitdata = {
+ 	.chip_type	= HPT372N,
+ 	.udma_mask	= HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+ 	.dpll_clk	= 77,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
+ 
+ static const struct hpt_info hpt302n __devinitdata = {
+@@ -579,7 +597,7 @@ static const struct hpt_info hpt302n __devinitdata = {
+ 	.chip_type	= HPT302N,
+ 	.udma_mask	= HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+ 	.dpll_clk	= 77,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
  
- /**
-@@ -357,5 +358,5 @@ void cpuidle_remove_sysfs(struct sys_device *sysdev)
- 	struct cpuidle_device *dev;
+ static const struct hpt_info hpt371n __devinitdata = {
+@@ -587,7 +605,7 @@ static const struct hpt_info hpt371n __devinitdata = {
+ 	.chip_type	= HPT371N,
+ 	.udma_mask	= HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+ 	.dpll_clk	= 77,
+-	.settings	= hpt37x_settings
++	.timings	= &hpt37x_timings
+ };
  
- 	dev = per_cpu(cpuidle_devices, cpu);
--	kobject_unregister(&dev->kobj);
-+	kobject_put(&dev->kobj);
+ static int check_in_drive_list(ide_drive_t *drive, const char **list)
+@@ -675,94 +693,50 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
+ 	for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
+ 		if (xfer_speeds[i] == speed)
+ 			break;
+-	/*
+-	 * NOTE: info->settings only points to the pointer
+-	 * to the list of the actual register values
+-	 */
+-	return (*info->settings)[i];
++
++	return info->timings->clock_table[info->clock][i];
  }
-diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
-index ddd3a25..6b658d8 100644
---- a/drivers/crypto/Kconfig
-+++ b/drivers/crypto/Kconfig
-@@ -48,8 +48,6 @@ config CRYPTO_DEV_PADLOCK_SHA
- 	  If unsure say M. The compiled module will be
- 	  called padlock-sha.ko
  
--source "arch/s390/crypto/Kconfig"
+-static void hpt36x_set_mode(ide_drive_t *drive, const u8 speed)
++static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
+ {
+-	ide_hwif_t *hwif	= HWIF(drive);
+-	struct pci_dev  *dev	= hwif->pci_dev;
++	struct pci_dev  *dev	= HWIF(drive)->pci_dev;
+ 	struct hpt_info	*info	= pci_get_drvdata(dev);
+-	u8  itr_addr		= drive->dn ? 0x44 : 0x40;
++	struct hpt_timings *t	= info->timings;
++	u8  itr_addr		= 0x40 + (drive->dn * 4);
+ 	u32 old_itr		= 0;
+-	u32 itr_mask, new_itr;
 -
- config CRYPTO_DEV_GEODE
- 	tristate "Support for the Geode LX AES engine"
- 	depends on X86_32 && PCI
-@@ -83,4 +81,82 @@ config ZCRYPT_MONOLITHIC
- 	  that contains all parts of the crypto device driver (ap bus,
- 	  request router and all the card drivers).
- 
-+config CRYPTO_SHA1_S390
-+	tristate "SHA1 digest algorithm"
-+	depends on S390
-+	select CRYPTO_ALGAPI
-+	help
-+	  This is the s390 hardware accelerated implementation of the
-+	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
-+
-+config CRYPTO_SHA256_S390
-+	tristate "SHA256 digest algorithm"
-+	depends on S390
-+	select CRYPTO_ALGAPI
-+	help
-+	  This is the s390 hardware accelerated implementation of the
-+	  SHA256 secure hash standard (DFIPS 180-2).
-+
-+	  This version of SHA implements a 256 bit hash with 128 bits of
-+	  security against collision attacks.
-+
-+config CRYPTO_DES_S390
-+	tristate "DES and Triple DES cipher algorithms"
-+	depends on S390
-+	select CRYPTO_ALGAPI
-+	select CRYPTO_BLKCIPHER
-+	help
-+	  This us the s390 hardware accelerated implementation of the
-+	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
-+
-+config CRYPTO_AES_S390
-+	tristate "AES cipher algorithms"
-+	depends on S390
-+	select CRYPTO_ALGAPI
-+	select CRYPTO_BLKCIPHER
-+	help
-+	  This is the s390 hardware accelerated implementation of the
-+	  AES cipher algorithms (FIPS-197). AES uses the Rijndael
-+	  algorithm.
-+
-+	  Rijndael appears to be consistently a very good performer in
-+	  both hardware and software across a wide range of computing
-+	  environments regardless of its use in feedback or non-feedback
-+	  modes. Its key setup time is excellent, and its key agility is
-+	  good. Rijndael's very low memory requirements make it very well
-+	  suited for restricted-space environments, in which it also
-+	  demonstrates excellent performance. Rijndael's operations are
-+	  among the easiest to defend against power and timing attacks.
-+
-+	  On s390 the System z9-109 currently only supports the key size
-+	  of 128 bit.
-+
-+config S390_PRNG
-+	tristate "Pseudo random number generator device driver"
-+	depends on S390
-+	default "m"
-+	help
-+	  Select this option if you want to use the s390 pseudo random number
-+	  generator. The PRNG is part of the cryptographic processor functions
-+	  and uses triple-DES to generate secure random numbers like the
-+	  ANSI X9.17 standard. The PRNG is usable via the char device
-+	  /dev/prandom.
-+
-+config CRYPTO_DEV_HIFN_795X
-+	tristate "Driver HIFN 795x crypto accelerator chips"
-+	select CRYPTO_DES
-+	select CRYPTO_ALGAPI
-+	select CRYPTO_BLKCIPHER
-+	select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
-+	depends on PCI
-+	help
-+	  This option allows you to have support for HIFN 795x crypto adapters.
-+
-+config CRYPTO_DEV_HIFN_795X_RNG
-+	bool "HIFN 795x random number generator"
-+	depends on CRYPTO_DEV_HIFN_795X
-+	help
-+	  Select this option if you want to enable the random number generator
-+	  on the HIFN 795x crypto adapters.
-+
- endif # CRYPTO_HW
-diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
-index d070030..c0327f0 100644
---- a/drivers/crypto/Makefile
-+++ b/drivers/crypto/Makefile
-@@ -1,3 +1,4 @@
- obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
- obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
- obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
-+obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
-diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
-index 711e246..4801162 100644
---- a/drivers/crypto/geode-aes.c
-+++ b/drivers/crypto/geode-aes.c
-@@ -13,44 +13,13 @@
- #include <linux/crypto.h>
- #include <linux/spinlock.h>
- #include <crypto/algapi.h>
-+#include <crypto/aes.h>
+-	itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 :
+-		  (speed < XFER_UDMA_0   ? 0xc0070000 : 0xc03800ff);
+-
+-	new_itr = get_speed_setting(speed, info);
++	u32 new_itr		= get_speed_setting(speed, info);
++	u32 itr_mask		= speed < XFER_MW_DMA_0 ? t->pio_mask :
++				 (speed < XFER_UDMA_0   ? t->dma_mask :
++							  t->ultra_mask);
  
- #include <asm/io.h>
- #include <asm/delay.h>
++	pci_read_config_dword(dev, itr_addr, &old_itr);
++	new_itr = (old_itr & ~itr_mask) | (new_itr & itr_mask);
+ 	/*
+ 	 * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well)
+ 	 * to avoid problems handling I/O errors later
+ 	 */
+-	pci_read_config_dword(dev, itr_addr, &old_itr);
+-	new_itr  = (new_itr & ~itr_mask) | (old_itr & itr_mask);
+ 	new_itr &= ~0xc0000000;
  
- #include "geode-aes.h"
+ 	pci_write_config_dword(dev, itr_addr, new_itr);
+ }
  
--/* Register definitions */
--
--#define AES_CTRLA_REG  0x0000
--
--#define AES_CTRL_START     0x01
--#define AES_CTRL_DECRYPT   0x00
--#define AES_CTRL_ENCRYPT   0x02
--#define AES_CTRL_WRKEY     0x04
--#define AES_CTRL_DCA       0x08
--#define AES_CTRL_SCA       0x10
--#define AES_CTRL_CBC       0x20
+-static void hpt37x_set_mode(ide_drive_t *drive, const u8 speed)
+-{
+-	ide_hwif_t *hwif	= HWIF(drive);
+-	struct pci_dev  *dev	= hwif->pci_dev;
+-	struct hpt_info	*info	= pci_get_drvdata(dev);
+-	u8  itr_addr		= 0x40 + (drive->dn * 4);
+-	u32 old_itr		= 0;
+-	u32 itr_mask, new_itr;
 -
--#define AES_INTR_REG  0x0008
+-	itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 :
+-		  (speed < XFER_UDMA_0   ? 0xc03c0000 : 0xc1c001ff);
 -
--#define AES_INTRA_PENDING (1 << 16)
--#define AES_INTRB_PENDING (1 << 17)
+-	new_itr = get_speed_setting(speed, info);
 -
--#define AES_INTR_PENDING  (AES_INTRA_PENDING | AES_INTRB_PENDING)
--#define AES_INTR_MASK     0x07
+-	pci_read_config_dword(dev, itr_addr, &old_itr);
+-	new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask);
+-	
+-	if (speed < XFER_MW_DMA_0)
+-		new_itr &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
+-	pci_write_config_dword(dev, itr_addr, new_itr);
+-}
 -
--#define AES_SOURCEA_REG   0x0010
--#define AES_DSTA_REG      0x0014
--#define AES_LENA_REG      0x0018
--#define AES_WRITEKEY0_REG 0x0030
--#define AES_WRITEIV0_REG  0x0040
+-static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
+-{
+-	ide_hwif_t *hwif	= HWIF(drive);
+-	struct hpt_info	*info	= pci_get_drvdata(hwif->pci_dev);
 -
--/*  A very large counter that is used to gracefully bail out of an
-- *  operation in case of trouble
-- */
+-	if (info->chip_type >= HPT370)
+-		hpt37x_set_mode(drive, speed);
+-	else	/* hpt368: hpt_minimum_revision(dev, 2) */
+-		hpt36x_set_mode(drive, speed);
+-}
 -
--#define AES_OP_TIMEOUT    0x50000
+ static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ {
+ 	hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
+ }
+ 
+-static int hpt3xx_quirkproc(ide_drive_t *drive)
++static void hpt3xx_quirkproc(ide_drive_t *drive)
+ {
+ 	struct hd_driveid *id	= drive->id;
+ 	const  char **list	= quirk_drives;
+ 
+ 	while (*list)
+-		if (strstr(id->model, *list++))
+-			return 1;
+-	return 0;
+-}
 -
- /* Static structures */
+-static void hpt3xx_intrproc(ide_drive_t *drive)
+-{
+-	if (drive->quirk_list)
+-		return;
++		if (strstr(id->model, *list++)) {
++			drive->quirk_list = 1;
++			return;
++		}
  
- static void __iomem * _iobase;
-@@ -87,9 +56,10 @@ do_crypt(void *src, void *dst, int len, u32 flags)
- 	/* Start the operation */
- 	iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
+-	/* drives in the quirk_list may not like intr setups/cleanups */
+-	outb(drive->ctl | 2, IDE_CONTROL_REG);
++	drive->quirk_list = 0;
+ }
  
--	do
-+	do {
- 		status = ioread32(_iobase + AES_INTR_REG);
--	while(!(status & AES_INTRA_PENDING) && --counter);
-+		cpu_relax();
-+	} while(!(status & AES_INTRA_PENDING) && --counter);
+ static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
+@@ -914,32 +888,33 @@ static int hpt374_ide_dma_end(ide_drive_t *drive)
  
- 	/* Clear the event */
- 	iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
-@@ -101,6 +71,7 @@ geode_aes_crypt(struct geode_aes_op *op)
+ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
  {
- 	u32 flags = 0;
- 	unsigned long iflags;
-+	int ret;
- 
- 	if (op->len == 0)
- 		return 0;
-@@ -129,7 +100,8 @@ geode_aes_crypt(struct geode_aes_op *op)
- 		_writefield(AES_WRITEKEY0_REG, op->key);
- 	}
+-	u8 scr2 = inb(hwif->dma_master + 0x7b);
++	unsigned long base = hwif->extra_base;
++	u8 scr2 = inb(base + 0x6b);
  
--	do_crypt(op->src, op->dst, op->len, flags);
-+	ret = do_crypt(op->src, op->dst, op->len, flags);
-+	BUG_ON(ret);
+ 	if ((scr2 & 0x7f) == mode)
+ 		return;
  
- 	if (op->mode == AES_MODE_CBC)
- 		_readfield(AES_WRITEIV0_REG, op->iv);
-@@ -141,18 +113,103 @@ geode_aes_crypt(struct geode_aes_op *op)
+ 	/* Tristate the bus */
+-	outb(0x80, hwif->dma_master + 0x73);
+-	outb(0x80, hwif->dma_master + 0x77);
++	outb(0x80, base + 0x63);
++	outb(0x80, base + 0x67);
  
- /* CRYPTO-API Functions */
+ 	/* Switch clock and reset channels */
+-	outb(mode, hwif->dma_master + 0x7b);
+-	outb(0xc0, hwif->dma_master + 0x79);
++	outb(mode, base + 0x6b);
++	outb(0xc0, base + 0x69);
  
--static int
--geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len)
-+static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
-+		unsigned int len)
- {
- 	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
-+	unsigned int ret;
+ 	/*
+ 	 * Reset the state machines.
+ 	 * NOTE: avoid accidentally enabling the disabled channels.
+ 	 */
+-	outb(inb(hwif->dma_master + 0x70) | 0x32, hwif->dma_master + 0x70);
+-	outb(inb(hwif->dma_master + 0x74) | 0x32, hwif->dma_master + 0x74);
++	outb(inb(base + 0x60) | 0x32, base + 0x60);
++	outb(inb(base + 0x64) | 0x32, base + 0x64);
  
--	if (len != AES_KEY_LENGTH) {
-+	op->keylen = len;
-+
-+	if (len == AES_KEYSIZE_128) {
-+		memcpy(op->key, key, len);
-+		return 0;
-+	}
-+
-+	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
-+		/* not supported at all */
- 		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- 		return -EINVAL;
- 	}
+ 	/* Complete reset */
+-	outb(0x00, hwif->dma_master + 0x79);
++	outb(0x00, base + 0x69);
  
--	memcpy(op->key, key, len);
--	return 0;
-+	/*
-+	 * The requested key size is not supported by HW, do a fallback
-+	 */
-+	op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-+	op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
-+
-+	ret = crypto_cipher_setkey(op->fallback.cip, key, len);
-+	if (ret) {
-+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
-+		tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
-+	}
-+	return ret;
-+}
-+
-+static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
-+		unsigned int len)
-+{
-+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
-+	unsigned int ret;
-+
-+	op->keylen = len;
-+
-+	if (len == AES_KEYSIZE_128) {
-+		memcpy(op->key, key, len);
-+		return 0;
-+	}
-+
-+	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
-+		/* not supported at all */
-+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-+		return -EINVAL;
-+	}
-+
-+	/*
-+	 * The requested key size is not supported by HW, do a fallback
-+	 */
-+	op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-+	op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
-+
-+	ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
-+	if (ret) {
-+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
-+		tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
-+	}
-+	return ret;
-+}
-+
-+static int fallback_blk_dec(struct blkcipher_desc *desc,
-+		struct scatterlist *dst, struct scatterlist *src,
-+		unsigned int nbytes)
-+{
-+	unsigned int ret;
-+	struct crypto_blkcipher *tfm;
-+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
-+
-+	tfm = desc->tfm;
-+	desc->tfm = op->fallback.blk;
-+
-+	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
-+
-+	desc->tfm = tfm;
-+	return ret;
-+}
-+static int fallback_blk_enc(struct blkcipher_desc *desc,
-+		struct scatterlist *dst, struct scatterlist *src,
-+		unsigned int nbytes)
-+{
-+	unsigned int ret;
-+	struct crypto_blkcipher *tfm;
-+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
-+
-+	tfm = desc->tfm;
-+	desc->tfm = op->fallback.blk;
-+
-+	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
-+
-+	desc->tfm = tfm;
-+	return ret;
+ 	/* Reconnect channels to bus */
+-	outb(0x00, hwif->dma_master + 0x73);
+-	outb(0x00, hwif->dma_master + 0x77);
++	outb(0x00, base + 0x63);
++	outb(0x00, base + 0x67);
  }
  
- static void
-@@ -160,8 +217,10 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- {
- 	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ /**
+@@ -1210,7 +1185,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
+ 	 * We also  don't like using  the DPLL because this causes glitches
+ 	 * on PRST-/SRST- when the state engine gets reset...
+ 	 */
+-	if (chip_type >= HPT374 || info->settings[clock] == NULL) {
++	if (chip_type >= HPT374 || info->timings->clock_table[clock] == NULL) {
+ 		u16 f_low, delta = pci_clk < 50 ? 2 : 4;
+ 		int adjust;
  
--	if ((out == NULL) || (in == NULL))
-+	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
-+		crypto_cipher_encrypt_one(op->fallback.cip, out, in);
- 		return;
-+	}
+@@ -1226,7 +1201,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
+ 			clock = ATA_CLOCK_50MHZ;
+ 		}
  
- 	op->src = (void *) in;
- 	op->dst = (void *) out;
-@@ -179,8 +238,10 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- {
- 	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+-		if (info->settings[clock] == NULL) {
++		if (info->timings->clock_table[clock] == NULL) {
+ 			printk(KERN_ERR "%s: unknown bus timing!\n", name);
+ 			kfree(info);
+ 			return -EIO;
+@@ -1267,15 +1242,10 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
+ 		printk("%s: using %d MHz PCI clock\n", name, pci_clk);
+ 	}
  
--	if ((out == NULL) || (in == NULL))
-+	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
-+		crypto_cipher_decrypt_one(op->fallback.cip, out, in);
- 		return;
-+	}
+-	/*
+-	 * Advance the table pointer to a slot which points to the list
+-	 * of the register values settings matching the clock being used.
+-	 */
+-	info->settings += clock;
+-
+ 	/* Store the clock frequencies. */
+ 	info->dpll_clk	= dpll_clk;
+ 	info->pci_clk	= pci_clk;
++	info->clock	= clock;
  
- 	op->src = (void *) in;
- 	op->dst = (void *) out;
-@@ -192,24 +253,50 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- 	geode_aes_crypt(op);
- }
+ 	/* Point to this chip's own instance of the hpt_info structure. */
+ 	pci_set_drvdata(dev, info);
+@@ -1320,8 +1290,8 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
  
-+static int fallback_init_cip(struct crypto_tfm *tfm)
-+{
-+	const char *name = tfm->__crt_alg->cra_name;
-+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
-+
-+	op->fallback.cip = crypto_alloc_cipher(name, 0,
-+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ 	hwif->set_pio_mode	= &hpt3xx_set_pio_mode;
+ 	hwif->set_dma_mode	= &hpt3xx_set_mode;
 +
-+	if (IS_ERR(op->fallback.cip)) {
-+		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
-+		return PTR_ERR(op->fallback.blk);
-+	}
+ 	hwif->quirkproc		= &hpt3xx_quirkproc;
+-	hwif->intrproc		= &hpt3xx_intrproc;
+ 	hwif->maskproc		= &hpt3xx_maskproc;
+ 	hwif->busproc		= &hpt3xx_busproc;
+ 
+@@ -1494,6 +1464,11 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
+ 	return 0;
+ }
+ 
++#define IDE_HFLAGS_HPT3XX \
++	(IDE_HFLAG_NO_ATAPI_DMA | \
++	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
++	 IDE_HFLAG_OFF_BOARD)
 +
-+	return 0;
-+}
+ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+ 	{	/* 0 */
+ 		.name		= "HPT36x",
+@@ -1508,9 +1483,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+ 		 */
+ 		.enablebits	= {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
+ 		.extra		= 240,
+-		.host_flags	= IDE_HFLAG_SINGLE |
+-				  IDE_HFLAG_NO_ATAPI_DMA |
+-				  IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 	},{	/* 1 */
+@@ -1520,7 +1493,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+ 		.init_dma	= init_dma_hpt366,
+ 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+ 		.extra		= 240,
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAGS_HPT3XX,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 	},{	/* 2 */
+@@ -1530,7 +1503,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+ 		.init_dma	= init_dma_hpt366,
+ 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+ 		.extra		= 240,
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAGS_HPT3XX,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 	},{	/* 3 */
+@@ -1540,7 +1513,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+ 		.init_dma	= init_dma_hpt366,
+ 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+ 		.extra		= 240,
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAGS_HPT3XX,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 	},{	/* 4 */
+@@ -1551,7 +1524,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+ 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+ 		.udma_mask	= ATA_UDMA5,
+ 		.extra		= 240,
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAGS_HPT3XX,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 	},{	/* 5 */
+@@ -1561,7 +1534,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+ 		.init_dma	= init_dma_hpt366,
+ 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+ 		.extra		= 240,
+-		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAGS_HPT3XX,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 	}
+diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
+index 90b52ed..2a0f45c 100644
+--- a/drivers/ide/pci/it8213.c
++++ b/drivers/ide/pci/it8213.c
+@@ -101,24 +101,11 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	pci_read_config_byte(dev, 0x54, &reg54);
+ 	pci_read_config_byte(dev, 0x55, &reg55);
+ 
+-	switch(speed) {
+-		case XFER_UDMA_6:
+-		case XFER_UDMA_4:
+-		case XFER_UDMA_2:	u_speed = 2 << (drive->dn * 4); break;
+-		case XFER_UDMA_5:
+-		case XFER_UDMA_3:
+-		case XFER_UDMA_1:	u_speed = 1 << (drive->dn * 4); break;
+-		case XFER_UDMA_0:	u_speed = 0 << (drive->dn * 4); break;
+-			break;
+-		case XFER_MW_DMA_2:
+-		case XFER_MW_DMA_1:
+-		case XFER_SW_DMA_2:
+-			break;
+-		default:
+-			return;
+-	}
+-
+ 	if (speed >= XFER_UDMA_0) {
++		u8 udma = speed - XFER_UDMA_0;
 +
-+static void fallback_exit_cip(struct crypto_tfm *tfm)
-+{
-+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++		u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
 +
-+	crypto_free_cipher(op->fallback.cip);
-+	op->fallback.cip = NULL;
-+}
+ 		if (!(reg48 & u_flag))
+ 			pci_write_config_byte(dev, 0x48, reg48 | u_flag);
+ 		if (speed >= XFER_UDMA_5) {
+diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
+index 99b7d76..e610a53 100644
+--- a/drivers/ide/pci/it821x.c
++++ b/drivers/ide/pci/it821x.c
+@@ -431,33 +431,29 @@ static u8 __devinit ata66_it821x(ide_hwif_t *hwif)
+ }
  
- static struct crypto_alg geode_alg = {
--	.cra_name               =       "aes",
--	.cra_driver_name	=       "geode-aes-128",
--	.cra_priority           =       300,
--	.cra_alignmask          =       15,
--	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
-+	.cra_name			=	"aes",
-+	.cra_driver_name	=	"geode-aes",
-+	.cra_priority		=	300,
-+	.cra_alignmask		=	15,
-+	.cra_flags			=	CRYPTO_ALG_TYPE_CIPHER |
-+							CRYPTO_ALG_NEED_FALLBACK,
-+	.cra_init			=	fallback_init_cip,
-+	.cra_exit			=	fallback_exit_cip,
- 	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
- 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
--	.cra_module		=	THIS_MODULE,
--	.cra_list		=	LIST_HEAD_INIT(geode_alg.cra_list),
--	.cra_u			=	{
--		.cipher = {
--			.cia_min_keysize	=  AES_KEY_LENGTH,
--			.cia_max_keysize	=  AES_KEY_LENGTH,
--			.cia_setkey		=  geode_setkey,
--			.cia_encrypt		=  geode_encrypt,
--			.cia_decrypt		=  geode_decrypt
-+	.cra_module			=	THIS_MODULE,
-+	.cra_list			=	LIST_HEAD_INIT(geode_alg.cra_list),
-+	.cra_u				=	{
-+		.cipher	=	{
-+			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
-+			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
-+			.cia_setkey			=	geode_setkey_cip,
-+			.cia_encrypt		=	geode_encrypt,
-+			.cia_decrypt		=	geode_decrypt
- 		}
- 	}
- };
-@@ -223,8 +310,12 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
- 	struct blkcipher_walk walk;
- 	int err, ret;
+ /**
+- *	it821x_fixup	-	post init callback
+- *	@hwif: interface
++ *	it821x_quirkproc	-	post init callback
++ *	@drive: drive
+  *
+- *	This callback is run after the drives have been probed but
++ *	This callback is run after the drive has been probed but
+  *	before anything gets attached. It allows drivers to do any
+  *	final tuning that is needed, or fixups to work around bugs.
+  */
  
-+	if (unlikely(op->keylen != AES_KEYSIZE_128))
-+		return fallback_blk_dec(desc, dst, src, nbytes);
-+
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	err = blkcipher_walk_virt(desc, &walk);
-+	op->iv = walk.iv;
+-static void __devinit it821x_fixups(ide_hwif_t *hwif)
++static void __devinit it821x_quirkproc(ide_drive_t *drive)
+ {
+-	struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+-	int i;
++	struct it821x_dev *itdev = ide_get_hwifdata(drive->hwif);
++	struct hd_driveid *id = drive->id;
++	u16 *idbits = (u16 *)drive->id;
  
- 	while((nbytes = walk.nbytes)) {
- 		op->src = walk.src.virt.addr,
-@@ -233,13 +324,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
- 		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
- 		op->dir = AES_DIR_DECRYPT;
+-	if(!itdev->smart) {
++	if (!itdev->smart) {
+ 		/*
+ 		 *	If we are in pass through mode then not much
+ 		 *	needs to be done, but we do bother to clear the
+ 		 *	IRQ mask as we may well be in PIO (eg rev 0x10)
+ 		 *	for now and we know unmasking is safe on this chipset.
+ 		 */
+-		for (i = 0; i < 2; i++) {
+-			ide_drive_t *drive = &hwif->drives[i];
+-			if(drive->present)
+-				drive->unmask = 1;
+-		}
+-		return;
+-	}
++		drive->unmask = 1;
++	} else {
+ 	/*
+ 	 *	Perform fixups on smart mode. We need to "lose" some
+ 	 *	capabilities the firmware lacks but does not filter, and
+@@ -465,16 +461,6 @@ static void __devinit it821x_fixups(ide_hwif_t *hwif)
+ 	 *	in RAID mode.
+ 	 */
  
--		memcpy(op->iv, walk.iv, AES_IV_LENGTH);
+-	for(i = 0; i < 2; i++) {
+-		ide_drive_t *drive = &hwif->drives[i];
+-		struct hd_driveid *id;
+-		u16 *idbits;
 -
- 		ret = geode_aes_crypt(op);
- 
--		memcpy(walk.iv, op->iv, AES_IV_LENGTH);
- 		nbytes -= ret;
+-		if(!drive->present)
+-			continue;
+-		id = drive->id;
+-		idbits = (u16 *)drive->id;
 -
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
- 	}
- 
-@@ -255,8 +342,12 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
- 	struct blkcipher_walk walk;
- 	int err, ret;
+ 		/* Check for RAID v native */
+ 		if(strstr(id->model, "Integrated Technology Express")) {
+ 			/* In raid mode the ident block is slightly buggy
+@@ -537,6 +523,8 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
+ 	struct it821x_dev *idev = kzalloc(sizeof(struct it821x_dev), GFP_KERNEL);
+ 	u8 conf;
  
-+	if (unlikely(op->keylen != AES_KEYSIZE_128))
-+		return fallback_blk_enc(desc, dst, src, nbytes);
++	hwif->quirkproc = &it821x_quirkproc;
 +
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	err = blkcipher_walk_virt(desc, &walk);
-+	op->iv = walk.iv;
+ 	if (idev == NULL) {
+ 		printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n");
+ 		return;
+@@ -633,7 +621,6 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha
+ 		.name		= name_str,		\
+ 		.init_chipset	= init_chipset_it821x,	\
+ 		.init_hwif	= init_hwif_it821x,	\
+-		.fixup	 	= it821x_fixups,	\
+ 		.host_flags	= IDE_HFLAG_BOOTABLE,	\
+ 		.pio_mask	= ATA_PIO4,		\
+ 	}
+diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
+index 2b4f44e..89d2363 100644
+--- a/drivers/ide/pci/pdc202xx_new.c
++++ b/drivers/ide/pci/pdc202xx_new.c
+@@ -146,7 +146,7 @@ static struct udma_timing {
+ 	{ 0x1a, 0x01, 0xcb },	/* UDMA mode 6 */
+ };
  
- 	while((nbytes = walk.nbytes)) {
- 		op->src = walk.src.virt.addr,
-@@ -265,8 +356,6 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
- 		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
- 		op->dir = AES_DIR_ENCRYPT;
+-static void pdcnew_set_mode(ide_drive_t *drive, const u8 speed)
++static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ {
+ 	ide_hwif_t *hwif	= HWIF(drive);
+ 	u8 adj			= (drive->dn & 1) ? 0x08 : 0x00;
+@@ -162,45 +162,18 @@ static void pdcnew_set_mode(ide_drive_t *drive, const u8 speed)
+ 	if (max_dma_rate(hwif->pci_dev) == 4) {
+ 		u8 mode = speed & 0x07;
  
--		memcpy(op->iv, walk.iv, AES_IV_LENGTH);
+-		switch (speed) {
+-			case XFER_UDMA_6:
+-			case XFER_UDMA_5:
+-			case XFER_UDMA_4:
+-			case XFER_UDMA_3:
+-			case XFER_UDMA_2:
+-			case XFER_UDMA_1:
+-			case XFER_UDMA_0:
+-				set_indexed_reg(hwif, 0x10 + adj,
+-						udma_timings[mode].reg10);
+-				set_indexed_reg(hwif, 0x11 + adj,
+-						udma_timings[mode].reg11);
+-				set_indexed_reg(hwif, 0x12 + adj,
+-						udma_timings[mode].reg12);
+-				break;
 -
- 		ret = geode_aes_crypt(op);
- 		nbytes -= ret;
- 		err = blkcipher_walk_done(desc, &walk, nbytes);
-@@ -275,22 +364,49 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
- 	return err;
- }
+-			case XFER_MW_DMA_2:
+-			case XFER_MW_DMA_1:
+-			case XFER_MW_DMA_0:
+-				set_indexed_reg(hwif, 0x0e + adj,
+-						mwdma_timings[mode].reg0e);
+-				set_indexed_reg(hwif, 0x0f + adj,
+-						mwdma_timings[mode].reg0f);
+-				break;
+-			case XFER_PIO_4:
+-			case XFER_PIO_3:
+-			case XFER_PIO_2:
+-			case XFER_PIO_1:
+-			case XFER_PIO_0:
+-				set_indexed_reg(hwif, 0x0c + adj,
+-						pio_timings[mode].reg0c);
+-				set_indexed_reg(hwif, 0x0d + adj,
+-						pio_timings[mode].reg0d);
+-				set_indexed_reg(hwif, 0x13 + adj,
+-						pio_timings[mode].reg13);
+-				break;
+-			default:
+-				printk(KERN_ERR "pdc202xx_new: "
+-				       "Unknown speed %d ignored\n", speed);
++		if (speed >= XFER_UDMA_0) {
++			set_indexed_reg(hwif, 0x10 + adj,
++					udma_timings[mode].reg10);
++			set_indexed_reg(hwif, 0x11 + adj,
++					udma_timings[mode].reg11);
++			set_indexed_reg(hwif, 0x12 + adj,
++					udma_timings[mode].reg12);
++		} else {
++			set_indexed_reg(hwif, 0x0e + adj,
++					mwdma_timings[mode].reg0e);
++			set_indexed_reg(hwif, 0x0f + adj,
++					mwdma_timings[mode].reg0f);
+ 		}
+ 	} else if (speed == XFER_UDMA_2) {
+ 		/* Set tHOLD bit to 0 if using UDMA mode 2 */
+@@ -212,7 +185,14 @@ static void pdcnew_set_mode(ide_drive_t *drive, const u8 speed)
  
-+static int fallback_init_blk(struct crypto_tfm *tfm)
-+{
-+	const char *name = tfm->__crt_alg->cra_name;
-+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
-+
-+	op->fallback.blk = crypto_alloc_blkcipher(name, 0,
-+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ {
+-	pdcnew_set_mode(drive, XFER_PIO_0 + pio);
++	ide_hwif_t *hwif = drive->hwif;
++	u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
 +
-+	if (IS_ERR(op->fallback.blk)) {
-+		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
-+		return PTR_ERR(op->fallback.blk);
++	if (max_dma_rate(hwif->pci_dev) == 4) {
++		set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
++		set_indexed_reg(hwif, 0x0d + adj, pio_timings[pio].reg0d);
++		set_indexed_reg(hwif, 0x13 + adj, pio_timings[pio].reg13);
 +	}
-+
-+	return 0;
-+}
-+
-+static void fallback_exit_blk(struct crypto_tfm *tfm)
-+{
-+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
-+
-+	crypto_free_blkcipher(op->fallback.blk);
-+	op->fallback.blk = NULL;
-+}
-+
- static struct crypto_alg geode_cbc_alg = {
- 	.cra_name		=	"cbc(aes)",
--	.cra_driver_name	=	"cbc-aes-geode-128",
-+	.cra_driver_name	=	"cbc-aes-geode",
- 	.cra_priority		=	400,
--	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
-+	.cra_flags			=	CRYPTO_ALG_TYPE_BLKCIPHER |
-+							CRYPTO_ALG_NEED_FALLBACK,
-+	.cra_init			=	fallback_init_blk,
-+	.cra_exit			=	fallback_exit_blk,
- 	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
- 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
- 	.cra_alignmask		=	15,
--	.cra_type		=	&crypto_blkcipher_type,
--	.cra_module		=	THIS_MODULE,
--	.cra_list		=	LIST_HEAD_INIT(geode_cbc_alg.cra_list),
--	.cra_u			=	{
--		.blkcipher = {
--			.min_keysize		=	AES_KEY_LENGTH,
--			.max_keysize		=	AES_KEY_LENGTH,
--			.setkey			=	geode_setkey,
-+	.cra_type			=	&crypto_blkcipher_type,
-+	.cra_module			=	THIS_MODULE,
-+	.cra_list			=	LIST_HEAD_INIT(geode_cbc_alg.cra_list),
-+	.cra_u				=	{
-+		.blkcipher	=	{
-+			.min_keysize	=	AES_MIN_KEY_SIZE,
-+			.max_keysize	=	AES_MAX_KEY_SIZE,
-+			.setkey			=	geode_setkey_blk,
- 			.encrypt		=	geode_cbc_encrypt,
- 			.decrypt		=	geode_cbc_decrypt,
- 			.ivsize			=	AES_IV_LENGTH,
-@@ -307,6 +423,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
- 	struct blkcipher_walk walk;
- 	int err, ret;
+ }
  
-+	if (unlikely(op->keylen != AES_KEYSIZE_128))
-+		return fallback_blk_dec(desc, dst, src, nbytes);
-+
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	err = blkcipher_walk_virt(desc, &walk);
+ static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
+@@ -223,14 +203,17 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
+ 		return ATA_CBL_PATA80;
+ }
  
-@@ -334,6 +453,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
- 	struct blkcipher_walk walk;
- 	int err, ret;
+-static int pdcnew_quirkproc(ide_drive_t *drive)
++static void pdcnew_quirkproc(ide_drive_t *drive)
+ {
+ 	const char **list, *model = drive->id->model;
  
-+	if (unlikely(op->keylen != AES_KEYSIZE_128))
-+		return fallback_blk_enc(desc, dst, src, nbytes);
+ 	for (list = pdc_quirk_drives; *list != NULL; list++)
+-		if (strstr(model, *list) != NULL)
+-			return 2;
+-	return 0;
++		if (strstr(model, *list) != NULL) {
++			drive->quirk_list = 2;
++			return;
++		}
 +
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	err = blkcipher_walk_virt(desc, &walk);
- 
-@@ -353,28 +475,31 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
++	drive->quirk_list = 0;
  }
  
- static struct crypto_alg geode_ecb_alg = {
--	.cra_name		=	"ecb(aes)",
--	.cra_driver_name	=	"ecb-aes-geode-128",
-+	.cra_name			=	"ecb(aes)",
-+	.cra_driver_name	=	"ecb-aes-geode",
- 	.cra_priority		=	400,
--	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
-+	.cra_flags			=	CRYPTO_ALG_TYPE_BLKCIPHER |
-+							CRYPTO_ALG_NEED_FALLBACK,
-+	.cra_init			=	fallback_init_blk,
-+	.cra_exit			=	fallback_exit_blk,
- 	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
- 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
- 	.cra_alignmask		=	15,
--	.cra_type		=	&crypto_blkcipher_type,
--	.cra_module		=	THIS_MODULE,
--	.cra_list		=	LIST_HEAD_INIT(geode_ecb_alg.cra_list),
--	.cra_u			=	{
--		.blkcipher = {
--			.min_keysize		=	AES_KEY_LENGTH,
--			.max_keysize		=	AES_KEY_LENGTH,
--			.setkey			=	geode_setkey,
-+	.cra_type			=	&crypto_blkcipher_type,
-+	.cra_module			=	THIS_MODULE,
-+	.cra_list			=	LIST_HEAD_INIT(geode_ecb_alg.cra_list),
-+	.cra_u				=	{
-+		.blkcipher	=	{
-+			.min_keysize	=	AES_MIN_KEY_SIZE,
-+			.max_keysize	=	AES_MAX_KEY_SIZE,
-+			.setkey			=	geode_setkey_blk,
- 			.encrypt		=	geode_ecb_encrypt,
- 			.decrypt		=	geode_ecb_decrypt,
- 		}
- 	}
- };
- 
--static void
-+static void __devexit
- geode_aes_remove(struct pci_dev *dev)
+ static void pdcnew_reset(ide_drive_t *drive)
+@@ -466,7 +449,7 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
+ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
  {
- 	crypto_unregister_alg(&geode_alg);
-@@ -389,7 +514,7 @@ geode_aes_remove(struct pci_dev *dev)
- }
- 
+ 	hwif->set_pio_mode = &pdcnew_set_pio_mode;
+-	hwif->set_dma_mode = &pdcnew_set_mode;
++	hwif->set_dma_mode = &pdcnew_set_dma_mode;
  
--static int
-+static int __devinit
- geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	hwif->quirkproc = &pdcnew_quirkproc;
+ 	hwif->resetproc = &pdcnew_reset;
+diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
+index e09742e..3a1e081 100644
+--- a/drivers/ide/pci/pdc202xx_old.c
++++ b/drivers/ide/pci/pdc202xx_old.c
+@@ -162,7 +162,7 @@ static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif)
+  */
+ static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif)
  {
- 	int ret;
-@@ -397,7 +522,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
- 	if ((ret = pci_enable_device(dev)))
- 		return ret;
- 
--	if ((ret = pci_request_regions(dev, "geode-aes-128")))
-+	if ((ret = pci_request_regions(dev, "geode-aes")))
- 		goto eenable;
- 
- 	_iobase = pci_iomap(dev, 0, 0);
-@@ -472,7 +597,6 @@ geode_aes_exit(void)
- MODULE_AUTHOR("Advanced Micro Devices, Inc.");
- MODULE_DESCRIPTION("Geode LX Hardware AES driver");
- MODULE_LICENSE("GPL");
--MODULE_ALIAS("aes");
+-	unsigned long clock_reg = hwif->dma_master + 0x11;
++	unsigned long clock_reg = hwif->extra_base + 0x01;
+ 	u8 clock = inb(clock_reg);
  
- module_init(geode_aes_init);
- module_exit(geode_aes_exit);
-diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
-index f479686..f1855b5 100644
---- a/drivers/crypto/geode-aes.h
-+++ b/drivers/crypto/geode-aes.h
-@@ -9,9 +9,9 @@
- #ifndef _GEODE_AES_H_
- #define _GEODE_AES_H_
+ 	outb(clock | (hwif->channel ? 0x08 : 0x02), clock_reg);
+@@ -170,20 +170,23 @@ static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif)
  
--#define AES_KEY_LENGTH 16
-+/* driver logic flags */
- #define AES_IV_LENGTH  16
--
-+#define AES_KEY_LENGTH 16
- #define AES_MIN_BLOCK_SIZE 16
+ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
+ {
+-	unsigned long clock_reg = hwif->dma_master + 0x11;
++	unsigned long clock_reg = hwif->extra_base + 0x01;
+ 	u8 clock = inb(clock_reg);
  
- #define AES_MODE_ECB 0
-@@ -22,6 +22,38 @@
+ 	outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
+ }
  
- #define AES_FLAGS_HIDDENKEY (1 << 0)
+-static int pdc202xx_quirkproc (ide_drive_t *drive)
++static void pdc202xx_quirkproc(ide_drive_t *drive)
+ {
+ 	const char **list, *model = drive->id->model;
  
-+/* Register definitions */
-+
-+#define AES_CTRLA_REG  0x0000
-+
-+#define AES_CTRL_START     0x01
-+#define AES_CTRL_DECRYPT   0x00
-+#define AES_CTRL_ENCRYPT   0x02
-+#define AES_CTRL_WRKEY     0x04
-+#define AES_CTRL_DCA       0x08
-+#define AES_CTRL_SCA       0x10
-+#define AES_CTRL_CBC       0x20
-+
-+#define AES_INTR_REG  0x0008
-+
-+#define AES_INTRA_PENDING (1 << 16)
-+#define AES_INTRB_PENDING (1 << 17)
-+
-+#define AES_INTR_PENDING  (AES_INTRA_PENDING | AES_INTRB_PENDING)
-+#define AES_INTR_MASK     0x07
-+
-+#define AES_SOURCEA_REG   0x0010
-+#define AES_DSTA_REG      0x0014
-+#define AES_LENA_REG      0x0018
-+#define AES_WRITEKEY0_REG 0x0030
-+#define AES_WRITEIV0_REG  0x0040
-+
-+/*  A very large counter that is used to gracefully bail out of an
-+ *  operation in case of trouble
-+ */
-+
-+#define AES_OP_TIMEOUT    0x50000
+ 	for (list = pdc_quirk_drives; *list != NULL; list++)
+-		if (strstr(model, *list) != NULL)
+-			return 2;
+-	return 0;
++		if (strstr(model, *list) != NULL) {
++			drive->quirk_list = 2;
++			return;
++		}
 +
- struct geode_aes_op {
++	drive->quirk_list = 0;
+ }
  
- 	void *src;
-@@ -33,7 +65,13 @@ struct geode_aes_op {
- 	int len;
+ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
+@@ -193,7 +196,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
+ 	if (drive->media != ide_disk || drive->addressing == 1) {
+ 		struct request *rq	= HWGROUP(drive)->rq;
+ 		ide_hwif_t *hwif	= HWIF(drive);
+-		unsigned long high_16   = hwif->dma_master;
++		unsigned long high_16	= hwif->extra_base - 16;
+ 		unsigned long atapi_reg	= high_16 + (hwif->channel ? 0x24 : 0x20);
+ 		u32 word_count	= 0;
+ 		u8 clock = inb(high_16 + 0x11);
+@@ -212,7 +215,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
+ {
+ 	if (drive->media != ide_disk || drive->addressing == 1) {
+ 		ide_hwif_t *hwif	= HWIF(drive);
+-		unsigned long high_16	= hwif->dma_master;
++		unsigned long high_16	= hwif->extra_base - 16;
+ 		unsigned long atapi_reg	= high_16 + (hwif->channel ? 0x24 : 0x20);
+ 		u8 clock		= 0;
  
- 	u8 key[AES_KEY_LENGTH];
--	u8 iv[AES_IV_LENGTH];
-+	u8 *iv;
-+
-+	union {
-+		struct crypto_blkcipher *blk;
-+		struct crypto_cipher *cip;
-+	} fallback;
-+	u32 keylen;
- };
+@@ -228,7 +231,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
+ static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif	= HWIF(drive);
+-	unsigned long high_16	= hwif->dma_master;
++	unsigned long high_16	= hwif->extra_base - 16;
+ 	u8 dma_stat		= inb(hwif->dma_status);
+ 	u8 sc1d			= inb(high_16 + 0x001d);
  
- #endif
-diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
-new file mode 100644
-index 0000000..dfbf24c
---- /dev/null
-+++ b/drivers/crypto/hifn_795x.c
-@@ -0,0 +1,2838 @@
-+/*
-+ * 2007+ Copyright (c) Evgeniy Polyakov <johnpol at 2ka.mipt.ru>
-+ * All rights reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/mod_devicetable.h>
-+#include <linux/interrupt.h>
-+#include <linux/pci.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/mm.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/scatterlist.h>
-+#include <linux/highmem.h>
-+#include <linux/interrupt.h>
-+#include <linux/crypto.h>
-+#include <linux/hw_random.h>
-+#include <linux/ktime.h>
-+
-+#include <crypto/algapi.h>
-+#include <crypto/des.h>
-+
-+#include <asm/kmap_types.h>
-+
-+#undef dprintk
-+
-+#define HIFN_TEST
-+//#define HIFN_DEBUG
-+
-+#ifdef HIFN_DEBUG
-+#define dprintk(f, a...) 	printk(f, ##a)
-+#else
-+#define dprintk(f, a...)	do {} while (0)
-+#endif
-+
-+static char hifn_pll_ref[sizeof("extNNN")] = "ext";
-+module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
-+MODULE_PARM_DESC(hifn_pll_ref,
-+		 "PLL reference clock (pci[freq] or ext[freq], default ext)");
-+
-+static atomic_t hifn_dev_number;
-+
-+#define ACRYPTO_OP_DECRYPT	0
-+#define ACRYPTO_OP_ENCRYPT	1
-+#define ACRYPTO_OP_HMAC		2
-+#define ACRYPTO_OP_RNG		3
-+
-+#define ACRYPTO_MODE_ECB		0
-+#define ACRYPTO_MODE_CBC		1
-+#define ACRYPTO_MODE_CFB		2
-+#define ACRYPTO_MODE_OFB		3
-+
-+#define ACRYPTO_TYPE_AES_128	0
-+#define ACRYPTO_TYPE_AES_192	1
-+#define ACRYPTO_TYPE_AES_256	2
-+#define ACRYPTO_TYPE_3DES	3
-+#define ACRYPTO_TYPE_DES	4
-+
-+#define PCI_VENDOR_ID_HIFN		0x13A3
-+#define PCI_DEVICE_ID_HIFN_7955		0x0020
-+#define	PCI_DEVICE_ID_HIFN_7956		0x001d
-+
-+/* I/O region sizes */
-+
-+#define HIFN_BAR0_SIZE			0x1000
-+#define HIFN_BAR1_SIZE			0x2000
-+#define HIFN_BAR2_SIZE			0x8000
-+
-+/* DMA registres */
-+
-+#define HIFN_DMA_CRA 			0x0C	/* DMA Command Ring Address */
-+#define HIFN_DMA_SDRA 			0x1C	/* DMA Source Data Ring Address */
-+#define HIFN_DMA_RRA			0x2C	/* DMA Result Ring Address */
-+#define HIFN_DMA_DDRA			0x3C	/* DMA Destination Data Ring Address */
-+#define HIFN_DMA_STCTL			0x40	/* DMA Status and Control */
-+#define HIFN_DMA_INTREN 		0x44	/* DMA Interrupt Enable */
-+#define HIFN_DMA_CFG1			0x48	/* DMA Configuration #1 */
-+#define HIFN_DMA_CFG2			0x6C	/* DMA Configuration #2 */
-+#define HIFN_CHIP_ID			0x98	/* Chip ID */
-+
-+/*
-+ * Processing Unit Registers (offset from BASEREG0)
-+ */
-+#define	HIFN_0_PUDATA		0x00	/* Processing Unit Data */
-+#define	HIFN_0_PUCTRL		0x04	/* Processing Unit Control */
-+#define	HIFN_0_PUISR		0x08	/* Processing Unit Interrupt Status */
-+#define	HIFN_0_PUCNFG		0x0c	/* Processing Unit Configuration */
-+#define	HIFN_0_PUIER		0x10	/* Processing Unit Interrupt Enable */
-+#define	HIFN_0_PUSTAT		0x14	/* Processing Unit Status/Chip ID */
-+#define	HIFN_0_FIFOSTAT		0x18	/* FIFO Status */
-+#define	HIFN_0_FIFOCNFG		0x1c	/* FIFO Configuration */
-+#define	HIFN_0_SPACESIZE	0x20	/* Register space size */
-+
-+/* Processing Unit Control Register (HIFN_0_PUCTRL) */
-+#define	HIFN_PUCTRL_CLRSRCFIFO	0x0010	/* clear source fifo */
-+#define	HIFN_PUCTRL_STOP	0x0008	/* stop pu */
-+#define	HIFN_PUCTRL_LOCKRAM	0x0004	/* lock ram */
-+#define	HIFN_PUCTRL_DMAENA	0x0002	/* enable dma */
-+#define	HIFN_PUCTRL_RESET	0x0001	/* Reset processing unit */
-+
-+/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
-+#define	HIFN_PUISR_CMDINVAL	0x8000	/* Invalid command interrupt */
-+#define	HIFN_PUISR_DATAERR	0x4000	/* Data error interrupt */
-+#define	HIFN_PUISR_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
-+#define	HIFN_PUISR_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
-+#define	HIFN_PUISR_DSTOVER	0x0200	/* Destination overrun interrupt */
-+#define	HIFN_PUISR_SRCCMD	0x0080	/* Source command interrupt */
-+#define	HIFN_PUISR_SRCCTX	0x0040	/* Source context interrupt */
-+#define	HIFN_PUISR_SRCDATA	0x0020	/* Source data interrupt */
-+#define	HIFN_PUISR_DSTDATA	0x0010	/* Destination data interrupt */
-+#define	HIFN_PUISR_DSTRESULT	0x0004	/* Destination result interrupt */
-+
-+/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
-+#define	HIFN_PUCNFG_DRAMMASK	0xe000	/* DRAM size mask */
-+#define	HIFN_PUCNFG_DSZ_256K	0x0000	/* 256k dram */
-+#define	HIFN_PUCNFG_DSZ_512K	0x2000	/* 512k dram */
-+#define	HIFN_PUCNFG_DSZ_1M	0x4000	/* 1m dram */
-+#define	HIFN_PUCNFG_DSZ_2M	0x6000	/* 2m dram */
-+#define	HIFN_PUCNFG_DSZ_4M	0x8000	/* 4m dram */
-+#define	HIFN_PUCNFG_DSZ_8M	0xa000	/* 8m dram */
-+#define	HIFN_PUNCFG_DSZ_16M	0xc000	/* 16m dram */
-+#define	HIFN_PUCNFG_DSZ_32M	0xe000	/* 32m dram */
-+#define	HIFN_PUCNFG_DRAMREFRESH	0x1800	/* DRAM refresh rate mask */
-+#define	HIFN_PUCNFG_DRFR_512	0x0000	/* 512 divisor of ECLK */
-+#define	HIFN_PUCNFG_DRFR_256	0x0800	/* 256 divisor of ECLK */
-+#define	HIFN_PUCNFG_DRFR_128	0x1000	/* 128 divisor of ECLK */
-+#define	HIFN_PUCNFG_TCALLPHASES	0x0200	/* your guess is as good as mine... */
-+#define	HIFN_PUCNFG_TCDRVTOTEM	0x0100	/* your guess is as good as mine... */
-+#define	HIFN_PUCNFG_BIGENDIAN	0x0080	/* DMA big endian mode */
-+#define	HIFN_PUCNFG_BUS32	0x0040	/* Bus width 32bits */
-+#define	HIFN_PUCNFG_BUS16	0x0000	/* Bus width 16 bits */
-+#define	HIFN_PUCNFG_CHIPID	0x0020	/* Allow chipid from PUSTAT */
-+#define	HIFN_PUCNFG_DRAM	0x0010	/* Context RAM is DRAM */
-+#define	HIFN_PUCNFG_SRAM	0x0000	/* Context RAM is SRAM */
-+#define	HIFN_PUCNFG_COMPSING	0x0004	/* Enable single compression context */
-+#define	HIFN_PUCNFG_ENCCNFG	0x0002	/* Encryption configuration */
-+
-+/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
-+#define	HIFN_PUIER_CMDINVAL	0x8000	/* Invalid command interrupt */
-+#define	HIFN_PUIER_DATAERR	0x4000	/* Data error interrupt */
-+#define	HIFN_PUIER_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
-+#define	HIFN_PUIER_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
-+#define	HIFN_PUIER_DSTOVER	0x0200	/* Destination overrun interrupt */
-+#define	HIFN_PUIER_SRCCMD	0x0080	/* Source command interrupt */
-+#define	HIFN_PUIER_SRCCTX	0x0040	/* Source context interrupt */
-+#define	HIFN_PUIER_SRCDATA	0x0020	/* Source data interrupt */
-+#define	HIFN_PUIER_DSTDATA	0x0010	/* Destination data interrupt */
-+#define	HIFN_PUIER_DSTRESULT	0x0004	/* Destination result interrupt */
-+
-+/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
-+#define	HIFN_PUSTAT_CMDINVAL	0x8000	/* Invalid command interrupt */
-+#define	HIFN_PUSTAT_DATAERR	0x4000	/* Data error interrupt */
-+#define	HIFN_PUSTAT_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
-+#define	HIFN_PUSTAT_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
-+#define	HIFN_PUSTAT_DSTOVER	0x0200	/* Destination overrun interrupt */
-+#define	HIFN_PUSTAT_SRCCMD	0x0080	/* Source command interrupt */
-+#define	HIFN_PUSTAT_SRCCTX	0x0040	/* Source context interrupt */
-+#define	HIFN_PUSTAT_SRCDATA	0x0020	/* Source data interrupt */
-+#define	HIFN_PUSTAT_DSTDATA	0x0010	/* Destination data interrupt */
-+#define	HIFN_PUSTAT_DSTRESULT	0x0004	/* Destination result interrupt */
-+#define	HIFN_PUSTAT_CHIPREV	0x00ff	/* Chip revision mask */
-+#define	HIFN_PUSTAT_CHIPENA	0xff00	/* Chip enabled mask */
-+#define	HIFN_PUSTAT_ENA_2	0x1100	/* Level 2 enabled */
-+#define	HIFN_PUSTAT_ENA_1	0x1000	/* Level 1 enabled */
-+#define	HIFN_PUSTAT_ENA_0	0x3000	/* Level 0 enabled */
-+#define	HIFN_PUSTAT_REV_2	0x0020	/* 7751 PT6/2 */
-+#define	HIFN_PUSTAT_REV_3	0x0030	/* 7751 PT6/3 */
-+
-+/* FIFO Status Register (HIFN_0_FIFOSTAT) */
-+#define	HIFN_FIFOSTAT_SRC	0x7f00	/* Source FIFO available */
-+#define	HIFN_FIFOSTAT_DST	0x007f	/* Destination FIFO available */
-+
-+/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
-+#define	HIFN_FIFOCNFG_THRESHOLD	0x0400	/* must be written as 1 */
-+
-+/*
-+ * DMA Interface Registers (offset from BASEREG1)
-+ */
-+#define	HIFN_1_DMA_CRAR		0x0c	/* DMA Command Ring Address */
-+#define	HIFN_1_DMA_SRAR		0x1c	/* DMA Source Ring Address */
-+#define	HIFN_1_DMA_RRAR		0x2c	/* DMA Result Ring Address */
-+#define	HIFN_1_DMA_DRAR		0x3c	/* DMA Destination Ring Address */
-+#define	HIFN_1_DMA_CSR		0x40	/* DMA Status and Control */
-+#define	HIFN_1_DMA_IER		0x44	/* DMA Interrupt Enable */
-+#define	HIFN_1_DMA_CNFG		0x48	/* DMA Configuration */
-+#define	HIFN_1_PLL		0x4c	/* 795x: PLL config */
-+#define	HIFN_1_7811_RNGENA	0x60	/* 7811: rng enable */
-+#define	HIFN_1_7811_RNGCFG	0x64	/* 7811: rng config */
-+#define	HIFN_1_7811_RNGDAT	0x68	/* 7811: rng data */
-+#define	HIFN_1_7811_RNGSTS	0x6c	/* 7811: rng status */
-+#define	HIFN_1_7811_MIPSRST	0x94	/* 7811: MIPS reset */
-+#define	HIFN_1_REVID		0x98	/* Revision ID */
-+#define	HIFN_1_UNLOCK_SECRET1	0xf4
-+#define	HIFN_1_UNLOCK_SECRET2	0xfc
-+#define	HIFN_1_PUB_RESET	0x204	/* Public/RNG Reset */
-+#define	HIFN_1_PUB_BASE		0x300	/* Public Base Address */
-+#define	HIFN_1_PUB_OPLEN	0x304	/* Public Operand Length */
-+#define	HIFN_1_PUB_OP		0x308	/* Public Operand */
-+#define	HIFN_1_PUB_STATUS	0x30c	/* Public Status */
-+#define	HIFN_1_PUB_IEN		0x310	/* Public Interrupt enable */
-+#define	HIFN_1_RNG_CONFIG	0x314	/* RNG config */
-+#define	HIFN_1_RNG_DATA		0x318	/* RNG data */
-+#define	HIFN_1_PUB_MEM		0x400	/* start of Public key memory */
-+#define	HIFN_1_PUB_MEMEND	0xbff	/* end of Public key memory */
-+
-+/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
-+#define	HIFN_DMACSR_D_CTRLMASK	0xc0000000	/* Destinition Ring Control */
-+#define	HIFN_DMACSR_D_CTRL_NOP	0x00000000	/* Dest. Control: no-op */
-+#define	HIFN_DMACSR_D_CTRL_DIS	0x40000000	/* Dest. Control: disable */
-+#define	HIFN_DMACSR_D_CTRL_ENA	0x80000000	/* Dest. Control: enable */
-+#define	HIFN_DMACSR_D_ABORT	0x20000000	/* Destinition Ring PCIAbort */
-+#define	HIFN_DMACSR_D_DONE	0x10000000	/* Destinition Ring Done */
-+#define	HIFN_DMACSR_D_LAST	0x08000000	/* Destinition Ring Last */
-+#define	HIFN_DMACSR_D_WAIT	0x04000000	/* Destinition Ring Waiting */
-+#define	HIFN_DMACSR_D_OVER	0x02000000	/* Destinition Ring Overflow */
-+#define	HIFN_DMACSR_R_CTRL	0x00c00000	/* Result Ring Control */
-+#define	HIFN_DMACSR_R_CTRL_NOP	0x00000000	/* Result Control: no-op */
-+#define	HIFN_DMACSR_R_CTRL_DIS	0x00400000	/* Result Control: disable */
-+#define	HIFN_DMACSR_R_CTRL_ENA	0x00800000	/* Result Control: enable */
-+#define	HIFN_DMACSR_R_ABORT	0x00200000	/* Result Ring PCI Abort */
-+#define	HIFN_DMACSR_R_DONE	0x00100000	/* Result Ring Done */
-+#define	HIFN_DMACSR_R_LAST	0x00080000	/* Result Ring Last */
-+#define	HIFN_DMACSR_R_WAIT	0x00040000	/* Result Ring Waiting */
-+#define	HIFN_DMACSR_R_OVER	0x00020000	/* Result Ring Overflow */
-+#define	HIFN_DMACSR_S_CTRL	0x0000c000	/* Source Ring Control */
-+#define	HIFN_DMACSR_S_CTRL_NOP	0x00000000	/* Source Control: no-op */
-+#define	HIFN_DMACSR_S_CTRL_DIS	0x00004000	/* Source Control: disable */
-+#define	HIFN_DMACSR_S_CTRL_ENA	0x00008000	/* Source Control: enable */
-+#define	HIFN_DMACSR_S_ABORT	0x00002000	/* Source Ring PCI Abort */
-+#define	HIFN_DMACSR_S_DONE	0x00001000	/* Source Ring Done */
-+#define	HIFN_DMACSR_S_LAST	0x00000800	/* Source Ring Last */
-+#define	HIFN_DMACSR_S_WAIT	0x00000400	/* Source Ring Waiting */
-+#define	HIFN_DMACSR_ILLW	0x00000200	/* Illegal write (7811 only) */
-+#define	HIFN_DMACSR_ILLR	0x00000100	/* Illegal read (7811 only) */
-+#define	HIFN_DMACSR_C_CTRL	0x000000c0	/* Command Ring Control */
-+#define	HIFN_DMACSR_C_CTRL_NOP	0x00000000	/* Command Control: no-op */
-+#define	HIFN_DMACSR_C_CTRL_DIS	0x00000040	/* Command Control: disable */
-+#define	HIFN_DMACSR_C_CTRL_ENA	0x00000080	/* Command Control: enable */
-+#define	HIFN_DMACSR_C_ABORT	0x00000020	/* Command Ring PCI Abort */
-+#define	HIFN_DMACSR_C_DONE	0x00000010	/* Command Ring Done */
-+#define	HIFN_DMACSR_C_LAST	0x00000008	/* Command Ring Last */
-+#define	HIFN_DMACSR_C_WAIT	0x00000004	/* Command Ring Waiting */
-+#define	HIFN_DMACSR_PUBDONE	0x00000002	/* Public op done (7951 only) */
-+#define	HIFN_DMACSR_ENGINE	0x00000001	/* Command Ring Engine IRQ */
-+
-+/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
-+#define	HIFN_DMAIER_D_ABORT	0x20000000	/* Destination Ring PCIAbort */
-+#define	HIFN_DMAIER_D_DONE	0x10000000	/* Destination Ring Done */
-+#define	HIFN_DMAIER_D_LAST	0x08000000	/* Destination Ring Last */
-+#define	HIFN_DMAIER_D_WAIT	0x04000000	/* Destination Ring Waiting */
-+#define	HIFN_DMAIER_D_OVER	0x02000000	/* Destination Ring Overflow */
-+#define	HIFN_DMAIER_R_ABORT	0x00200000	/* Result Ring PCI Abort */
-+#define	HIFN_DMAIER_R_DONE	0x00100000	/* Result Ring Done */
-+#define	HIFN_DMAIER_R_LAST	0x00080000	/* Result Ring Last */
-+#define	HIFN_DMAIER_R_WAIT	0x00040000	/* Result Ring Waiting */
-+#define	HIFN_DMAIER_R_OVER	0x00020000	/* Result Ring Overflow */
-+#define	HIFN_DMAIER_S_ABORT	0x00002000	/* Source Ring PCI Abort */
-+#define	HIFN_DMAIER_S_DONE	0x00001000	/* Source Ring Done */
-+#define	HIFN_DMAIER_S_LAST	0x00000800	/* Source Ring Last */
-+#define	HIFN_DMAIER_S_WAIT	0x00000400	/* Source Ring Waiting */
-+#define	HIFN_DMAIER_ILLW	0x00000200	/* Illegal write (7811 only) */
-+#define	HIFN_DMAIER_ILLR	0x00000100	/* Illegal read (7811 only) */
-+#define	HIFN_DMAIER_C_ABORT	0x00000020	/* Command Ring PCI Abort */
-+#define	HIFN_DMAIER_C_DONE	0x00000010	/* Command Ring Done */
-+#define	HIFN_DMAIER_C_LAST	0x00000008	/* Command Ring Last */
-+#define	HIFN_DMAIER_C_WAIT	0x00000004	/* Command Ring Waiting */
-+#define	HIFN_DMAIER_PUBDONE	0x00000002	/* public op done (7951 only) */
-+#define	HIFN_DMAIER_ENGINE	0x00000001	/* Engine IRQ */
-+
-+/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
-+#define	HIFN_DMACNFG_BIGENDIAN	0x10000000	/* big endian mode */
-+#define	HIFN_DMACNFG_POLLFREQ	0x00ff0000	/* Poll frequency mask */
-+#define	HIFN_DMACNFG_UNLOCK	0x00000800
-+#define	HIFN_DMACNFG_POLLINVAL	0x00000700	/* Invalid Poll Scalar */
-+#define	HIFN_DMACNFG_LAST	0x00000010	/* Host control LAST bit */
-+#define	HIFN_DMACNFG_MODE	0x00000004	/* DMA mode */
-+#define	HIFN_DMACNFG_DMARESET	0x00000002	/* DMA Reset # */
-+#define	HIFN_DMACNFG_MSTRESET	0x00000001	/* Master Reset # */
-+
-+/* PLL configuration register */
-+#define HIFN_PLL_REF_CLK_HBI	0x00000000	/* HBI reference clock */
-+#define HIFN_PLL_REF_CLK_PLL	0x00000001	/* PLL reference clock */
-+#define HIFN_PLL_BP		0x00000002	/* Reference clock bypass */
-+#define HIFN_PLL_PK_CLK_HBI	0x00000000	/* PK engine HBI clock */
-+#define HIFN_PLL_PK_CLK_PLL	0x00000008	/* PK engine PLL clock */
-+#define HIFN_PLL_PE_CLK_HBI	0x00000000	/* PE engine HBI clock */
-+#define HIFN_PLL_PE_CLK_PLL	0x00000010	/* PE engine PLL clock */
-+#define HIFN_PLL_RESERVED_1	0x00000400	/* Reserved bit, must be 1 */
-+#define HIFN_PLL_ND_SHIFT	11		/* Clock multiplier shift */
-+#define HIFN_PLL_ND_MULT_2	0x00000000	/* PLL clock multiplier 2 */
-+#define HIFN_PLL_ND_MULT_4	0x00000800	/* PLL clock multiplier 4 */
-+#define HIFN_PLL_ND_MULT_6	0x00001000	/* PLL clock multiplier 6 */
-+#define HIFN_PLL_ND_MULT_8	0x00001800	/* PLL clock multiplier 8 */
-+#define HIFN_PLL_ND_MULT_10	0x00002000	/* PLL clock multiplier 10 */
-+#define HIFN_PLL_ND_MULT_12	0x00002800	/* PLL clock multiplier 12 */
-+#define HIFN_PLL_IS_1_8		0x00000000	/* charge pump (mult. 1-8) */
-+#define HIFN_PLL_IS_9_12	0x00010000	/* charge pump (mult. 9-12) */
-+
-+#define HIFN_PLL_FCK_MAX	266		/* Maximum PLL frequency */
-+
-+/* Public key reset register (HIFN_1_PUB_RESET) */
-+#define	HIFN_PUBRST_RESET	0x00000001	/* reset public/rng unit */
-+
-+/* Public base address register (HIFN_1_PUB_BASE) */
-+#define	HIFN_PUBBASE_ADDR	0x00003fff	/* base address */
-+
-+/* Public operand length register (HIFN_1_PUB_OPLEN) */
-+#define	HIFN_PUBOPLEN_MOD_M	0x0000007f	/* modulus length mask */
-+#define	HIFN_PUBOPLEN_MOD_S	0		/* modulus length shift */
-+#define	HIFN_PUBOPLEN_EXP_M	0x0003ff80	/* exponent length mask */
-+#define	HIFN_PUBOPLEN_EXP_S	7		/* exponent lenght shift */
-+#define	HIFN_PUBOPLEN_RED_M	0x003c0000	/* reducend length mask */
-+#define	HIFN_PUBOPLEN_RED_S	18		/* reducend length shift */
-+
-+/* Public operation register (HIFN_1_PUB_OP) */
-+#define	HIFN_PUBOP_AOFFSET_M	0x0000007f	/* A offset mask */
-+#define	HIFN_PUBOP_AOFFSET_S	0		/* A offset shift */
-+#define	HIFN_PUBOP_BOFFSET_M	0x00000f80	/* B offset mask */
-+#define	HIFN_PUBOP_BOFFSET_S	7		/* B offset shift */
-+#define	HIFN_PUBOP_MOFFSET_M	0x0003f000	/* M offset mask */
-+#define	HIFN_PUBOP_MOFFSET_S	12		/* M offset shift */
-+#define	HIFN_PUBOP_OP_MASK	0x003c0000	/* Opcode: */
-+#define	HIFN_PUBOP_OP_NOP	0x00000000	/*  NOP */
-+#define	HIFN_PUBOP_OP_ADD	0x00040000	/*  ADD */
-+#define	HIFN_PUBOP_OP_ADDC	0x00080000	/*  ADD w/carry */
-+#define	HIFN_PUBOP_OP_SUB	0x000c0000	/*  SUB */
-+#define	HIFN_PUBOP_OP_SUBC	0x00100000	/*  SUB w/carry */
-+#define	HIFN_PUBOP_OP_MODADD	0x00140000	/*  Modular ADD */
-+#define	HIFN_PUBOP_OP_MODSUB	0x00180000	/*  Modular SUB */
-+#define	HIFN_PUBOP_OP_INCA	0x001c0000	/*  INC A */
-+#define	HIFN_PUBOP_OP_DECA	0x00200000	/*  DEC A */
-+#define	HIFN_PUBOP_OP_MULT	0x00240000	/*  MULT */
-+#define	HIFN_PUBOP_OP_MODMULT	0x00280000	/*  Modular MULT */
-+#define	HIFN_PUBOP_OP_MODRED	0x002c0000	/*  Modular RED */
-+#define	HIFN_PUBOP_OP_MODEXP	0x00300000	/*  Modular EXP */
-+
-+/* Public status register (HIFN_1_PUB_STATUS) */
-+#define	HIFN_PUBSTS_DONE	0x00000001	/* operation done */
-+#define	HIFN_PUBSTS_CARRY	0x00000002	/* carry */
-+
-+/* Public interrupt enable register (HIFN_1_PUB_IEN) */
-+#define	HIFN_PUBIEN_DONE	0x00000001	/* operation done interrupt */
-+
-+/* Random number generator config register (HIFN_1_RNG_CONFIG) */
-+#define	HIFN_RNGCFG_ENA		0x00000001	/* enable rng */
-+
-+#define HIFN_NAMESIZE			32
-+#define HIFN_MAX_RESULT_ORDER		5
-+
-+#define	HIFN_D_CMD_RSIZE		24*4
-+#define	HIFN_D_SRC_RSIZE		80*4
-+#define	HIFN_D_DST_RSIZE		80*4
-+#define	HIFN_D_RES_RSIZE		24*4
-+
-+#define HIFN_QUEUE_LENGTH		HIFN_D_CMD_RSIZE-5
-+
-+#define AES_MIN_KEY_SIZE		16
-+#define AES_MAX_KEY_SIZE		32
-+
-+#define HIFN_DES_KEY_LENGTH		8
-+#define HIFN_3DES_KEY_LENGTH		24
-+#define HIFN_MAX_CRYPT_KEY_LENGTH	AES_MAX_KEY_SIZE
-+#define HIFN_IV_LENGTH			8
-+#define HIFN_AES_IV_LENGTH		16
-+#define	HIFN_MAX_IV_LENGTH		HIFN_AES_IV_LENGTH
-+
-+#define HIFN_MAC_KEY_LENGTH		64
-+#define HIFN_MD5_LENGTH			16
-+#define HIFN_SHA1_LENGTH		20
-+#define HIFN_MAC_TRUNC_LENGTH		12
-+
-+#define	HIFN_MAX_COMMAND		(8 + 8 + 8 + 64 + 260)
-+#define	HIFN_MAX_RESULT			(8 + 4 + 4 + 20 + 4)
-+#define HIFN_USED_RESULT		12
-+
-+struct hifn_desc
-+{
-+	volatile u32		l;
-+	volatile u32		p;
-+};
-+
-+struct hifn_dma {
-+	struct hifn_desc	cmdr[HIFN_D_CMD_RSIZE+1];
-+	struct hifn_desc	srcr[HIFN_D_SRC_RSIZE+1];
-+	struct hifn_desc	dstr[HIFN_D_DST_RSIZE+1];
-+	struct hifn_desc	resr[HIFN_D_RES_RSIZE+1];
-+
-+	u8			command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
-+	u8			result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
-+
-+	u64			test_src, test_dst;
-+
-+	/*
-+	 *  Our current positions for insertion and removal from the descriptor
-+	 *  rings.
-+	 */
-+	volatile int		cmdi, srci, dsti, resi;
-+	volatile int		cmdu, srcu, dstu, resu;
-+	int			cmdk, srck, dstk, resk;
-+};
-+
-+#define HIFN_FLAG_CMD_BUSY	(1<<0)
-+#define HIFN_FLAG_SRC_BUSY	(1<<1)
-+#define HIFN_FLAG_DST_BUSY	(1<<2)
-+#define HIFN_FLAG_RES_BUSY	(1<<3)
-+#define HIFN_FLAG_OLD_KEY	(1<<4)
-+
-+#define HIFN_DEFAULT_ACTIVE_NUM	5
-+
-+struct hifn_device
-+{
-+	char			name[HIFN_NAMESIZE];
-+
-+	int			irq;
-+
-+	struct pci_dev		*pdev;
-+	void __iomem		*bar[3];
-+
-+	unsigned long		result_mem;
-+	dma_addr_t		dst;
-+
-+	void			*desc_virt;
-+	dma_addr_t		desc_dma;
-+
-+	u32			dmareg;
-+
-+	void 			*sa[HIFN_D_RES_RSIZE];
-+
-+	spinlock_t		lock;
-+
-+	void 			*priv;
-+
-+	u32			flags;
-+	int			active, started;
-+	struct delayed_work	work;
-+	unsigned long		reset;
-+	unsigned long		success;
-+	unsigned long		prev_success;
-+
-+	u8			snum;
-+
-+	struct tasklet_struct	tasklet;
-+
-+	struct crypto_queue 	queue;
-+	struct list_head	alg_list;
-+
-+	unsigned int		pk_clk_freq;
-+
-+#ifdef CRYPTO_DEV_HIFN_795X_RNG
-+	unsigned int		rng_wait_time;
-+	ktime_t			rngtime;
-+	struct hwrng		rng;
-+#endif
-+};
-+
-+#define	HIFN_D_LENGTH			0x0000ffff
-+#define	HIFN_D_NOINVALID		0x01000000
-+#define	HIFN_D_MASKDONEIRQ		0x02000000
-+#define	HIFN_D_DESTOVER			0x04000000
-+#define	HIFN_D_OVER			0x08000000
-+#define	HIFN_D_LAST			0x20000000
-+#define	HIFN_D_JUMP			0x40000000
-+#define	HIFN_D_VALID			0x80000000
-+
-+struct hifn_base_command
-+{
-+	volatile u16		masks;
-+	volatile u16		session_num;
-+	volatile u16		total_source_count;
-+	volatile u16		total_dest_count;
-+};
-+
-+#define	HIFN_BASE_CMD_COMP		0x0100	/* enable compression engine */
-+#define	HIFN_BASE_CMD_PAD		0x0200	/* enable padding engine */
-+#define	HIFN_BASE_CMD_MAC		0x0400	/* enable MAC engine */
-+#define	HIFN_BASE_CMD_CRYPT		0x0800	/* enable crypt engine */
-+#define	HIFN_BASE_CMD_DECODE		0x2000
-+#define	HIFN_BASE_CMD_SRCLEN_M		0xc000
-+#define	HIFN_BASE_CMD_SRCLEN_S		14
-+#define	HIFN_BASE_CMD_DSTLEN_M		0x3000
-+#define	HIFN_BASE_CMD_DSTLEN_S		12
-+#define	HIFN_BASE_CMD_LENMASK_HI	0x30000
-+#define	HIFN_BASE_CMD_LENMASK_LO	0x0ffff
-+
-+/*
-+ * Structure to help build up the command data structure.
-+ */
-+struct hifn_crypt_command
-+{
-+	volatile u16 		masks;
-+	volatile u16 		header_skip;
-+	volatile u16 		source_count;
-+	volatile u16 		reserved;
-+};
-+
-+#define	HIFN_CRYPT_CMD_ALG_MASK		0x0003		/* algorithm: */
-+#define	HIFN_CRYPT_CMD_ALG_DES		0x0000		/*   DES */
-+#define	HIFN_CRYPT_CMD_ALG_3DES		0x0001		/*   3DES */
-+#define	HIFN_CRYPT_CMD_ALG_RC4		0x0002		/*   RC4 */
-+#define	HIFN_CRYPT_CMD_ALG_AES		0x0003		/*   AES */
-+#define	HIFN_CRYPT_CMD_MODE_MASK	0x0018		/* Encrypt mode: */
-+#define	HIFN_CRYPT_CMD_MODE_ECB		0x0000		/*   ECB */
-+#define	HIFN_CRYPT_CMD_MODE_CBC		0x0008		/*   CBC */
-+#define	HIFN_CRYPT_CMD_MODE_CFB		0x0010		/*   CFB */
-+#define	HIFN_CRYPT_CMD_MODE_OFB		0x0018		/*   OFB */
-+#define	HIFN_CRYPT_CMD_CLR_CTX		0x0040		/* clear context */
-+#define	HIFN_CRYPT_CMD_KSZ_MASK		0x0600		/* AES key size: */
-+#define	HIFN_CRYPT_CMD_KSZ_128		0x0000		/*  128 bit */
-+#define	HIFN_CRYPT_CMD_KSZ_192		0x0200		/*  192 bit */
-+#define	HIFN_CRYPT_CMD_KSZ_256		0x0400		/*  256 bit */
-+#define	HIFN_CRYPT_CMD_NEW_KEY		0x0800		/* expect new key */
-+#define	HIFN_CRYPT_CMD_NEW_IV		0x1000		/* expect new iv */
-+#define	HIFN_CRYPT_CMD_SRCLEN_M		0xc000
-+#define	HIFN_CRYPT_CMD_SRCLEN_S		14
-+
-+/*
-+ * Structure to help build up the command data structure.
-+ */
-+struct hifn_mac_command
-+{
-+	volatile u16 		masks;
-+	volatile u16 		header_skip;
-+	volatile u16 		source_count;
-+	volatile u16 		reserved;
-+};
-+
-+#define	HIFN_MAC_CMD_ALG_MASK		0x0001
-+#define	HIFN_MAC_CMD_ALG_SHA1		0x0000
-+#define	HIFN_MAC_CMD_ALG_MD5		0x0001
-+#define	HIFN_MAC_CMD_MODE_MASK		0x000c
-+#define	HIFN_MAC_CMD_MODE_HMAC		0x0000
-+#define	HIFN_MAC_CMD_MODE_SSL_MAC	0x0004
-+#define	HIFN_MAC_CMD_MODE_HASH		0x0008
-+#define	HIFN_MAC_CMD_MODE_FULL		0x0004
-+#define	HIFN_MAC_CMD_TRUNC		0x0010
-+#define	HIFN_MAC_CMD_RESULT		0x0020
-+#define	HIFN_MAC_CMD_APPEND		0x0040
-+#define	HIFN_MAC_CMD_SRCLEN_M		0xc000
-+#define	HIFN_MAC_CMD_SRCLEN_S		14
-+
-+/*
-+ * MAC POS IPsec initiates authentication after encryption on encodes
-+ * and before decryption on decodes.
-+ */
-+#define	HIFN_MAC_CMD_POS_IPSEC		0x0200
-+#define	HIFN_MAC_CMD_NEW_KEY		0x0800
-+
-+struct hifn_comp_command
-+{
-+	volatile u16 		masks;
-+	volatile u16 		header_skip;
-+	volatile u16 		source_count;
-+	volatile u16 		reserved;
-+};
-+
-+#define	HIFN_COMP_CMD_SRCLEN_M		0xc000
-+#define	HIFN_COMP_CMD_SRCLEN_S		14
-+#define	HIFN_COMP_CMD_ONE		0x0100	/* must be one */
-+#define	HIFN_COMP_CMD_CLEARHIST		0x0010	/* clear history */
-+#define	HIFN_COMP_CMD_UPDATEHIST	0x0008	/* update history */
-+#define	HIFN_COMP_CMD_LZS_STRIP0	0x0004	/* LZS: strip zero */
-+#define	HIFN_COMP_CMD_MPPC_RESTART	0x0004	/* MPPC: restart */
-+#define	HIFN_COMP_CMD_ALG_MASK		0x0001	/* compression mode: */
-+#define	HIFN_COMP_CMD_ALG_MPPC		0x0001	/*   MPPC */
-+#define	HIFN_COMP_CMD_ALG_LZS		0x0000	/*   LZS */
-+
-+struct hifn_base_result
-+{
-+	volatile u16 		flags;
-+	volatile u16 		session;
-+	volatile u16 		src_cnt;		/* 15:0 of source count */
-+	volatile u16 		dst_cnt;		/* 15:0 of dest count */
-+};
-+
-+#define	HIFN_BASE_RES_DSTOVERRUN	0x0200	/* destination overrun */
-+#define	HIFN_BASE_RES_SRCLEN_M		0xc000	/* 17:16 of source count */
-+#define	HIFN_BASE_RES_SRCLEN_S		14
-+#define	HIFN_BASE_RES_DSTLEN_M		0x3000	/* 17:16 of dest count */
-+#define	HIFN_BASE_RES_DSTLEN_S		12
-+
-+struct hifn_comp_result
-+{
-+	volatile u16 		flags;
-+	volatile u16 		crc;
-+};
-+
-+#define	HIFN_COMP_RES_LCB_M		0xff00	/* longitudinal check byte */
-+#define	HIFN_COMP_RES_LCB_S		8
-+#define	HIFN_COMP_RES_RESTART		0x0004	/* MPPC: restart */
-+#define	HIFN_COMP_RES_ENDMARKER		0x0002	/* LZS: end marker seen */
-+#define	HIFN_COMP_RES_SRC_NOTZERO	0x0001	/* source expired */
-+
-+struct hifn_mac_result
-+{
-+	volatile u16 		flags;
-+	volatile u16 		reserved;
-+	/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
-+};
-+
-+#define	HIFN_MAC_RES_MISCOMPARE		0x0002	/* compare failed */
-+#define	HIFN_MAC_RES_SRC_NOTZERO	0x0001	/* source expired */
-+
-+struct hifn_crypt_result
-+{
-+	volatile u16 		flags;
-+	volatile u16 		reserved;
-+};
-+
-+#define	HIFN_CRYPT_RES_SRC_NOTZERO	0x0001	/* source expired */
-+
-+#ifndef HIFN_POLL_FREQUENCY
-+#define	HIFN_POLL_FREQUENCY	0x1
-+#endif
-+
-+#ifndef HIFN_POLL_SCALAR
-+#define	HIFN_POLL_SCALAR	0x0
-+#endif
-+
-+#define	HIFN_MAX_SEGLEN 	0xffff		/* maximum dma segment len */
-+#define	HIFN_MAX_DMALEN		0x3ffff		/* maximum dma length */
-+
-+struct hifn_crypto_alg
-+{
-+	struct list_head	entry;
-+	struct crypto_alg	alg;
-+	struct hifn_device	*dev;
-+};
-+
-+#define ASYNC_SCATTERLIST_CACHE	16
-+
-+#define ASYNC_FLAGS_MISALIGNED	(1<<0)
-+
-+struct ablkcipher_walk
-+{
-+	struct scatterlist	cache[ASYNC_SCATTERLIST_CACHE];
-+	u32			flags;
-+	int			num;
-+};
-+
-+struct hifn_context
-+{
-+	u8			key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv;
-+	struct hifn_device	*dev;
-+	unsigned int		keysize, ivsize;
-+	u8			op, type, mode, unused;
-+	struct ablkcipher_walk	walk;
-+	atomic_t		sg_num;
-+};
-+
-+#define crypto_alg_to_hifn(a)	container_of(a, struct hifn_crypto_alg, alg)
-+
-+static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
-+{
-+	u32 ret;
-+
-+	ret = readl((char *)(dev->bar[0]) + reg);
-+
-+	return ret;
-+}
-+
-+static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
-+{
-+	u32 ret;
-+
-+	ret = readl((char *)(dev->bar[1]) + reg);
-+
-+	return ret;
-+}
-+
-+static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
-+{
-+	writel(val, (char *)(dev->bar[0]) + reg);
-+}
-+
-+static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
-+{
-+	writel(val, (char *)(dev->bar[1]) + reg);
-+}
-+
-+static void hifn_wait_puc(struct hifn_device *dev)
-+{
-+	int i;
-+	u32 ret;
-+
-+	for (i=10000; i > 0; --i) {
-+		ret = hifn_read_0(dev, HIFN_0_PUCTRL);
-+		if (!(ret & HIFN_PUCTRL_RESET))
-+			break;
-+
-+		udelay(1);
-+	}
-+
-+	if (!i)
-+		dprintk("%s: Failed to reset PUC unit.\n", dev->name);
-+}
-+
-+static void hifn_reset_puc(struct hifn_device *dev)
-+{
-+	hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
-+	hifn_wait_puc(dev);
-+}
-+
-+static void hifn_stop_device(struct hifn_device *dev)
-+{
-+	hifn_write_1(dev, HIFN_1_DMA_CSR,
-+		HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
-+		HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
-+	hifn_write_0(dev, HIFN_0_PUIER, 0);
-+	hifn_write_1(dev, HIFN_1_DMA_IER, 0);
-+}
-+
-+static void hifn_reset_dma(struct hifn_device *dev, int full)
-+{
-+	hifn_stop_device(dev);
-+
-+	/*
-+	 * Setting poll frequency and others to 0.
-+	 */
-+	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
-+			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
-+	mdelay(1);
-+
-+	/*
-+	 * Reset DMA.
-+	 */
-+	if (full) {
-+		hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
-+		mdelay(1);
-+	} else {
-+		hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
-+				HIFN_DMACNFG_MSTRESET);
-+		hifn_reset_puc(dev);
-+	}
-+
-+	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
-+			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
-+
-+	hifn_reset_puc(dev);
-+}
-+
-+static u32 hifn_next_signature(u_int32_t a, u_int cnt)
-+{
-+	int i;
-+	u32 v;
-+
-+	for (i = 0; i < cnt; i++) {
-+
-+		/* get the parity */
-+		v = a & 0x80080125;
-+		v ^= v >> 16;
-+		v ^= v >> 8;
-+		v ^= v >> 4;
-+		v ^= v >> 2;
-+		v ^= v >> 1;
-+
-+		a = (v & 1) ^ (a << 1);
-+	}
-+
-+	return a;
-+}
-+
-+static struct pci2id {
-+	u_short		pci_vendor;
-+	u_short		pci_prod;
-+	char		card_id[13];
-+} pci2id[] = {
-+	{
-+		PCI_VENDOR_ID_HIFN,
-+		PCI_DEVICE_ID_HIFN_7955,
-+		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+		  0x00, 0x00, 0x00, 0x00, 0x00 }
-+	},
-+	{
-+		PCI_VENDOR_ID_HIFN,
-+		PCI_DEVICE_ID_HIFN_7956,
-+		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-+		  0x00, 0x00, 0x00, 0x00, 0x00 }
-+	}
-+};
-+
-+#ifdef CRYPTO_DEV_HIFN_795X_RNG
-+static int hifn_rng_data_present(struct hwrng *rng, int wait)
-+{
-+	struct hifn_device *dev = (struct hifn_device *)rng->priv;
-+	s64 nsec;
-+
-+	nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime));
-+	nsec -= dev->rng_wait_time;
-+	if (nsec <= 0)
-+		return 1;
-+	if (!wait)
-+		return 0;
-+	ndelay(nsec);
-+	return 1;
-+}
-+
-+static int hifn_rng_data_read(struct hwrng *rng, u32 *data)
-+{
-+	struct hifn_device *dev = (struct hifn_device *)rng->priv;
-+
-+	*data = hifn_read_1(dev, HIFN_1_RNG_DATA);
-+	dev->rngtime = ktime_get();
-+	return 4;
-+}
-+
-+static int hifn_register_rng(struct hifn_device *dev)
-+{
-+	/*
-+	 * We must wait at least 256 Pk_clk cycles between two reads of the rng.
-+	 */
-+	dev->rng_wait_time	= DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) *
-+				  256;
-+
-+	dev->rng.name		= dev->name;
-+	dev->rng.data_present	= hifn_rng_data_present,
-+	dev->rng.data_read	= hifn_rng_data_read,
-+	dev->rng.priv		= (unsigned long)dev;
-+
-+	return hwrng_register(&dev->rng);
-+}
-+
-+static void hifn_unregister_rng(struct hifn_device *dev)
-+{
-+	hwrng_unregister(&dev->rng);
-+}
-+#else
-+#define hifn_register_rng(dev)		0
-+#define hifn_unregister_rng(dev)
-+#endif
-+
-+static int hifn_init_pubrng(struct hifn_device *dev)
-+{
-+	int i;
-+
-+	hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
-+			HIFN_PUBRST_RESET);
-+
-+	for (i=100; i > 0; --i) {
-+		mdelay(1);
-+
-+		if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
-+			break;
-+	}
-+
-+	if (!i)
-+		dprintk("Chip %s: Failed to initialise public key engine.\n",
-+				dev->name);
-+	else {
-+		hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
-+		dev->dmareg |= HIFN_DMAIER_PUBDONE;
-+		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
-+
-+		dprintk("Chip %s: Public key engine has been sucessfully "
-+				"initialised.\n", dev->name);
-+	}
-+
-+	/*
-+	 * Enable RNG engine.
-+	 */
-+
-+	hifn_write_1(dev, HIFN_1_RNG_CONFIG,
-+			hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
-+	dprintk("Chip %s: RNG engine has been successfully initialised.\n",
-+			dev->name);
-+
-+#ifdef CRYPTO_DEV_HIFN_795X_RNG
-+	/* First value must be discarded */
-+	hifn_read_1(dev, HIFN_1_RNG_DATA);
-+	dev->rngtime = ktime_get();
-+#endif
-+	return 0;
-+}
-+
-+static int hifn_enable_crypto(struct hifn_device *dev)
-+{
-+	u32 dmacfg, addr;
-+	char *offtbl = NULL;
-+	int i;
-+
-+	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
-+		if (pci2id[i].pci_vendor == dev->pdev->vendor &&
-+				pci2id[i].pci_prod == dev->pdev->device) {
-+			offtbl = pci2id[i].card_id;
-+			break;
-+		}
-+	}
-+
-+	if (offtbl == NULL) {
-+		dprintk("Chip %s: Unknown card!\n", dev->name);
-+		return -ENODEV;
-+	}
-+
-+	dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
-+
-+	hifn_write_1(dev, HIFN_1_DMA_CNFG,
-+			HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
-+			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
-+	mdelay(1);
-+	addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
-+	mdelay(1);
-+	hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
-+	mdelay(1);
-+
-+	for (i=0; i<12; ++i) {
-+		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
-+		hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
+@@ -271,7 +274,7 @@ static void pdc202xx_dma_timeout(ide_drive_t *drive)
+ 
+ static void pdc202xx_reset_host (ide_hwif_t *hwif)
+ {
+-	unsigned long high_16	= hwif->dma_master;
++	unsigned long high_16	= hwif->extra_base - 16;
+ 	u8 udma_speed_flag	= inb(high_16 | 0x001f);
+ 
+ 	outb(udma_speed_flag | 0x10, high_16 | 0x001f);
+@@ -375,6 +378,11 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
+ 	}
+ }
+ 
++#define IDE_HFLAGS_PDC202XX \
++	(IDE_HFLAG_ERROR_STOPS_FIFO | \
++	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
++	 IDE_HFLAG_OFF_BOARD)
 +
-+		mdelay(1);
-+	}
-+	hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
+ #define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \
+ 	{ \
+ 		.name		= name_str, \
+@@ -382,9 +390,7 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
+ 		.init_hwif	= init_hwif_pdc202xx, \
+ 		.init_dma	= init_dma_pdc202xx, \
+ 		.extra		= 48, \
+-		.host_flags	= IDE_HFLAG_ERROR_STOPS_FIFO | \
+-				  extra_flags | \
+-				  IDE_HFLAG_OFF_BOARD, \
++		.host_flags	= IDE_HFLAGS_PDC202XX | extra_flags, \
+ 		.pio_mask	= ATA_PIO4, \
+ 		.mwdma_mask	= ATA_MWDMA2, \
+ 		.udma_mask	= udma, \
+@@ -397,8 +403,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
+ 		.init_hwif	= init_hwif_pdc202xx,
+ 		.init_dma	= init_dma_pdc202xx,
+ 		.extra		= 16,
+-		.host_flags	= IDE_HFLAG_ERROR_STOPS_FIFO |
+-				  IDE_HFLAG_OFF_BOARD,
++		.host_flags	= IDE_HFLAGS_PDC202XX,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= ATA_UDMA2,
+diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
+index 27781d2..bd6d3f7 100644
+--- a/drivers/ide/pci/piix.c
++++ b/drivers/ide/pci/piix.c
+@@ -203,20 +203,11 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	pci_read_config_byte(dev, 0x54, &reg54);
+ 	pci_read_config_byte(dev, 0x55, &reg55);
+ 
+-	switch(speed) {
+-		case XFER_UDMA_4:
+-		case XFER_UDMA_2:	u_speed = 2 << (drive->dn * 4); break;
+-		case XFER_UDMA_5:
+-		case XFER_UDMA_3:
+-		case XFER_UDMA_1:	u_speed = 1 << (drive->dn * 4); break;
+-		case XFER_UDMA_0:	u_speed = 0 << (drive->dn * 4); break;
+-		case XFER_MW_DMA_2:
+-		case XFER_MW_DMA_1:
+-		case XFER_SW_DMA_2:	break;
+-		default:		return;
+-	}
+-
+ 	if (speed >= XFER_UDMA_0) {
++		u8 udma = speed - XFER_UDMA_0;
 +
-+	dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
++		u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
 +
-+	return 0;
-+}
+ 		if (!(reg48 & u_flag))
+ 			pci_write_config_byte(dev, 0x48, reg48 | u_flag);
+ 		if (speed == XFER_UDMA_5) {
+diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
+index 707d5ff..32fdf53 100644
+--- a/drivers/ide/pci/sc1200.c
++++ b/drivers/ide/pci/sc1200.c
+@@ -135,59 +135,29 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
+ 	unsigned short		pci_clock;
+ 	unsigned int		basereg = hwif->channel ? 0x50 : 0x40;
+ 
++	static const u32 udma_timing[3][3] = {
++		{ 0x00921250, 0x00911140, 0x00911030 },
++		{ 0x00932470, 0x00922260, 0x00922140 },
++		{ 0x009436a1, 0x00933481, 0x00923261 },
++	};
 +
-+static void hifn_init_dma(struct hifn_device *dev)
-+{
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+	u32 dptr = dev->desc_dma;
-+	int i;
++	static const u32 mwdma_timing[3][3] = {
++		{ 0x00077771, 0x00012121, 0x00002020 },
++		{ 0x000bbbb2, 0x00024241, 0x00013131 },
++		{ 0x000ffff3, 0x00035352, 0x00015151 },
++	};
 +
-+	for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
-+		dma->cmdr[i].p = __cpu_to_le32(dptr +
-+				offsetof(struct hifn_dma, command_bufs[i][0]));
-+	for (i=0; i<HIFN_D_RES_RSIZE; ++i)
-+		dma->resr[i].p = __cpu_to_le32(dptr +
-+				offsetof(struct hifn_dma, result_bufs[i][0]));
+ 	pci_clock = sc1200_get_pci_clock();
+ 
+ 	/*
+ 	 * Note that each DMA mode has several timings associated with it.
+ 	 * The correct timing depends on the fast PCI clock freq.
+ 	 */
+-	timings = 0;
+-	switch (mode) {
+-		case XFER_UDMA_0:
+-			switch (pci_clock) {
+-				case PCI_CLK_33:	timings = 0x00921250;	break;
+-				case PCI_CLK_48:	timings = 0x00932470;	break;
+-				case PCI_CLK_66:	timings = 0x009436a1;	break;
+-			}
+-			break;
+-		case XFER_UDMA_1:
+-			switch (pci_clock) {
+-				case PCI_CLK_33:	timings = 0x00911140;	break;
+-				case PCI_CLK_48:	timings = 0x00922260;	break;
+-				case PCI_CLK_66:	timings = 0x00933481;	break;
+-			}
+-			break;
+-		case XFER_UDMA_2:
+-			switch (pci_clock) {
+-				case PCI_CLK_33:	timings = 0x00911030;	break;
+-				case PCI_CLK_48:	timings = 0x00922140;	break;
+-				case PCI_CLK_66:	timings = 0x00923261;	break;
+-			}
+-			break;
+-		case XFER_MW_DMA_0:
+-			switch (pci_clock) {
+-				case PCI_CLK_33:	timings = 0x00077771;	break;
+-				case PCI_CLK_48:	timings = 0x000bbbb2;	break;
+-				case PCI_CLK_66:	timings = 0x000ffff3;	break;
+-			}
+-			break;
+-		case XFER_MW_DMA_1:
+-			switch (pci_clock) {
+-				case PCI_CLK_33:	timings = 0x00012121;	break;
+-				case PCI_CLK_48:	timings = 0x00024241;	break;
+-				case PCI_CLK_66:	timings = 0x00035352;	break;
+-			}
+-			break;
+-		case XFER_MW_DMA_2:
+-			switch (pci_clock) {
+-				case PCI_CLK_33:	timings = 0x00002020;	break;
+-				case PCI_CLK_48:	timings = 0x00013131;	break;
+-				case PCI_CLK_66:	timings = 0x00015151;	break;
+-			}
+-			break;
+-		default:
+-			return;
+-	}
 +
++	if (mode >= XFER_UDMA_0)
++		timings =  udma_timing[pci_clock][mode - XFER_UDMA_0];
++	else
++		timings = mwdma_timing[pci_clock][mode - XFER_MW_DMA_0];
+ 
+ 	if (unit == 0) {			/* are we configuring drive0? */
+ 		pci_read_config_dword(hwif->pci_dev, basereg+4, &reg);
+@@ -250,9 +220,9 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	}
+ 	if (mode != -1) {
+ 		printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
+-		hwif->dma_off_quietly(drive);
+-		if (ide_set_dma_mode(drive, mode) == 0)
+-			hwif->dma_host_on(drive);
++		ide_dma_off_quietly(drive);
++		if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma)
++			hwif->dma_host_set(drive, 1);
+ 		return;
+ 	}
+ 
+@@ -260,66 +230,39 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ }
+ 
+ #ifdef CONFIG_PM
+-static ide_hwif_t *lookup_pci_dev (ide_hwif_t *prev, struct pci_dev *dev)
+-{
+-	int	h;
+-
+-	for (h = 0; h < MAX_HWIFS; h++) {
+-		ide_hwif_t *hwif = &ide_hwifs[h];
+-		if (prev) {
+-			if (hwif == prev)
+-				prev = NULL;	// found previous, now look for next match
+-		} else {
+-			if (hwif && hwif->pci_dev == dev)
+-				return hwif;	// found next match
+-		}
+-	}
+-	return NULL;	// not found
+-}
+-
+-typedef struct sc1200_saved_state_s {
+-	__u32		regs[4];
+-} sc1200_saved_state_t;
+-
++struct sc1200_saved_state {
++	u32 regs[8];
++};
+ 
+ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
+ {
+-	ide_hwif_t		*hwif = NULL;
+-
+ 	printk("SC1200: suspend(%u)\n", state.event);
+ 
 +	/*
-+	 * Setup LAST descriptors.
++	 * we only save state when going from full power to less
 +	 */
-+	dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
-+			offsetof(struct hifn_dma, cmdr[0]));
-+	dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
-+			offsetof(struct hifn_dma, srcr[0]));
-+	dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
-+			offsetof(struct hifn_dma, dstr[0]));
-+	dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
-+			offsetof(struct hifn_dma, resr[0]));
-+
-+	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
-+	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
-+	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
-+}
-+
-+/*
-+ * Initialize the PLL. We need to know the frequency of the reference clock
-+ * to calculate the optimal multiplier. For PCI we assume 66MHz, since that
-+ * allows us to operate without the risk of overclocking the chip. If it
-+ * actually uses 33MHz, the chip will operate at half the speed, this can be
-+ * overriden by specifying the frequency as module parameter (pci33).
-+ *
-+ * Unfortunately the PCI clock is not very suitable since the HIFN needs a
-+ * stable clock and the PCI clock frequency may vary, so the default is the
-+ * external clock. There is no way to find out its frequency, we default to
-+ * 66MHz since according to Mike Ham of HiFn, almost every board in existence
-+ * has an external crystal populated at 66MHz.
-+ */
-+static void hifn_init_pll(struct hifn_device *dev)
-+{
-+	unsigned int freq, m;
-+	u32 pllcfg;
-+
-+	pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1;
-+
-+	if (strncmp(hifn_pll_ref, "ext", 3) == 0)
-+		pllcfg |= HIFN_PLL_REF_CLK_PLL;
-+	else
-+		pllcfg |= HIFN_PLL_REF_CLK_HBI;
+ 	if (state.event == PM_EVENT_ON) {
+-		// we only save state when going from full power to less
+-
+-		//
+-		// Loop over all interfaces that are part of this PCI device:
+-		//
+-		while ((hwif = lookup_pci_dev(hwif, dev)) != NULL) {
+-			sc1200_saved_state_t	*ss;
+-			unsigned int		basereg, r;
+-			//
+-			// allocate a permanent save area, if not already allocated
+-			//
+-			ss = (sc1200_saved_state_t *)hwif->config_data;
+-			if (ss == NULL) {
+-				ss = kmalloc(sizeof(sc1200_saved_state_t), GFP_KERNEL);
+-				if (ss == NULL)
+-					return -ENOMEM;
+-				hwif->config_data = (unsigned long)ss;
+-			}
+-			ss = (sc1200_saved_state_t *)hwif->config_data;
+-			//
+-			// Save timing registers:  this may be unnecessary if 
+-			// BIOS also does it
+-			//
+-			basereg = hwif->channel ? 0x50 : 0x40;
+-			for (r = 0; r < 4; ++r) {
+-				pci_read_config_dword (hwif->pci_dev, basereg + (r<<2), &ss->regs[r]);
+-			}
++		struct sc1200_saved_state *ss;
++		unsigned int r;
 +
-+	if (hifn_pll_ref[3] != '\0')
-+		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
-+	else {
-+		freq = 66;
-+		printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, "
-+				 "override with hifn_pll_ref=%.3s<frequency>\n",
-+		       freq, hifn_pll_ref);
++		/*
++		 * allocate a permanent save area, if not already allocated
++		 */
++		ss = (struct sc1200_saved_state *)pci_get_drvdata(dev);
++		if (ss == NULL) {
++			ss = kmalloc(sizeof(*ss), GFP_KERNEL);
++			if (ss == NULL)
++				return -ENOMEM;
++			pci_set_drvdata(dev, ss);
+ 		}
+-	}
+ 
+-	/* You don't need to iterate over disks -- sysfs should have done that for you already */ 
++		/*
++		 * save timing registers
++		 * (this may be unnecessary if BIOS also does it)
++		 */
++		for (r = 0; r < 8; r++)
++			pci_read_config_dword(dev, 0x40 + r * 4, &ss->regs[r]);
 +	}
-+
-+	m = HIFN_PLL_FCK_MAX / freq;
-+
-+	pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT;
-+	if (m <= 8)
-+		pllcfg |= HIFN_PLL_IS_1_8;
-+	else
-+		pllcfg |= HIFN_PLL_IS_9_12;
-+
-+	/* Select clock source and enable clock bypass */
-+	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
-+		     HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP);
-+
-+	/* Let the chip lock to the input clock */
-+	mdelay(10);
-+
-+	/* Disable clock bypass */
-+	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
-+		     HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI);
-+
-+	/* Switch the engines to the PLL */
-+	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
-+		     HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL);
+ 
+ 	pci_disable_device(dev);
+ 	pci_set_power_state(dev, pci_choose_state(dev, state));
+@@ -328,30 +271,25 @@ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
+ 
+ static int sc1200_resume (struct pci_dev *dev)
+ {
+-	ide_hwif_t	*hwif = NULL;
+-	int		i;
++	struct sc1200_saved_state *ss;
++	unsigned int r;
++	int i;
+ 
+ 	i = pci_enable_device(dev);
+ 	if (i)
+ 		return i;
+ 
+-	//
+-	// loop over all interfaces that are part of this pci device:
+-	//
+-	while ((hwif = lookup_pci_dev(hwif, dev)) != NULL) {
+-		unsigned int		basereg, r;
+-		sc1200_saved_state_t	*ss = (sc1200_saved_state_t *)hwif->config_data;
+-
+-		//
+-		// Restore timing registers:  this may be unnecessary if BIOS also does it
+-		//
+-		basereg = hwif->channel ? 0x50 : 0x40;
+-		if (ss != NULL) {
+-			for (r = 0; r < 4; ++r) {
+-				pci_write_config_dword(hwif->pci_dev, basereg + (r<<2), ss->regs[r]);
+-			}
+-		}
++	ss = (struct sc1200_saved_state *)pci_get_drvdata(dev);
 +
 +	/*
-+	 * The Fpk_clk runs at half the total speed. Its frequency is needed to
-+	 * calculate the minimum time between two reads of the rng. Since 33MHz
-+	 * is actually 33.333... we overestimate the frequency here, resulting
-+	 * in slightly larger intervals.
++	 * restore timing registers
++	 * (this may be unnecessary if BIOS also does it)
 +	 */
-+	dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2;
-+}
-+
-+static void hifn_init_registers(struct hifn_device *dev)
-+{
-+	u32 dptr = dev->desc_dma;
-+
-+	/* Initialization magic... */
-+	hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
-+	hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
-+	hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
-+
-+	/* write all 4 ring address registers */
-+	hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr +
-+				offsetof(struct hifn_dma, cmdr[0])));
-+	hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr +
-+				offsetof(struct hifn_dma, srcr[0])));
-+	hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr +
-+				offsetof(struct hifn_dma, dstr[0])));
-+	hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr +
-+				offsetof(struct hifn_dma, resr[0])));
-+
-+	mdelay(2);
-+#if 0
-+	hifn_write_1(dev, HIFN_1_DMA_CSR,
-+	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
-+	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
-+	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
-+	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
-+	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
-+	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
-+	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
-+	    HIFN_DMACSR_S_WAIT |
-+	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
-+	    HIFN_DMACSR_C_WAIT |
-+	    HIFN_DMACSR_ENGINE |
-+	    HIFN_DMACSR_PUBDONE);
-+#else
-+	hifn_write_1(dev, HIFN_1_DMA_CSR,
-+	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
-+	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
-+	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
-+	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
-+	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
-+	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
-+	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
-+	    HIFN_DMACSR_S_WAIT |
-+	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
-+	    HIFN_DMACSR_C_WAIT |
-+	    HIFN_DMACSR_ENGINE |
-+	    HIFN_DMACSR_PUBDONE);
-+#endif
-+	hifn_read_1(dev, HIFN_1_DMA_CSR);
-+
-+	dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
-+	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
-+	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
-+	    HIFN_DMAIER_ENGINE;
-+	dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
-+
-+	hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
-+	hifn_read_1(dev, HIFN_1_DMA_IER);
-+#if 0
-+	hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
-+		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
-+		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
-+		    HIFN_PUCNFG_DRAM);
-+#else
-+	hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
-+#endif
-+	hifn_init_pll(dev);
++	if (ss) {
++		for (r = 0; r < 8; r++)
++			pci_write_config_dword(dev, 0x40 + r * 4, ss->regs[r]);
+ 	}
 +
-+	hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
-+	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
-+	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
-+	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
-+	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
-+}
+ 	return 0;
+ }
+ #endif
+diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
+index ebb7132..24a85bb 100644
+--- a/drivers/ide/pci/scc_pata.c
++++ b/drivers/ide/pci/scc_pata.c
+@@ -254,19 +254,7 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 		offset = 0; /* 100MHz */
+ 	}
+ 
+-	switch (speed) {
+-	case XFER_UDMA_6:
+-	case XFER_UDMA_5:
+-	case XFER_UDMA_4:
+-	case XFER_UDMA_3:
+-	case XFER_UDMA_2:
+-	case XFER_UDMA_1:
+-	case XFER_UDMA_0:
+-		idx = speed - XFER_UDMA_0;
+-		break;
+-	default:
+-		return;
+-	}
++	idx = speed - XFER_UDMA_0;
+ 
+ 	jcactsel = JCACTSELtbl[offset][idx];
+ 	if (is_slave) {
+diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
+index a728031..877c09b 100644
+--- a/drivers/ide/pci/serverworks.c
++++ b/drivers/ide/pci/serverworks.c
+@@ -164,25 +164,12 @@ static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	ultra_timing	&= ~(0x0F << (4*unit));
+ 	ultra_enable	&= ~(0x01 << drive->dn);
+ 
+-	switch(speed) {
+-		case XFER_MW_DMA_2:
+-		case XFER_MW_DMA_1:
+-		case XFER_MW_DMA_0:
+-			dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
+-			break;
+-
+-		case XFER_UDMA_5:
+-		case XFER_UDMA_4:
+-		case XFER_UDMA_3:
+-		case XFER_UDMA_2:
+-		case XFER_UDMA_1:
+-		case XFER_UDMA_0:
+-			dma_timing   |= dma_modes[2];
+-			ultra_timing |= ((udma_modes[speed - XFER_UDMA_0]) << (4*unit));
+-			ultra_enable |= (0x01 << drive->dn);
+-		default:
+-			break;
+-	}
++	if (speed >= XFER_UDMA_0) {
++		dma_timing   |= dma_modes[2];
++		ultra_timing |= (udma_modes[speed - XFER_UDMA_0] << (4 * unit));
++		ultra_enable |= (0x01 << drive->dn);
++	} else if (speed >= XFER_MW_DMA_0)
++		dma_timing   |= dma_modes[speed - XFER_MW_DMA_0];
+ 
+ 	pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
+ 	pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
+@@ -366,12 +353,17 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
+ 	}
+ }
+ 
++#define IDE_HFLAGS_SVWKS \
++	(IDE_HFLAG_LEGACY_IRQS | \
++	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
++	 IDE_HFLAG_BOOTABLE)
 +
-+static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
-+		unsigned dlen, unsigned slen, u16 mask, u8 snum)
-+{
-+	struct hifn_base_command *base_cmd;
-+	u8 *buf_pos = buf;
+ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+ 	{	/* 0 */
+ 		.name		= "SvrWks OSB4",
+ 		.init_chipset	= init_chipset_svwks,
+ 		.init_hwif	= init_hwif_svwks,
+-		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE,
++		.host_flags	= IDE_HFLAGS_SVWKS,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= 0x00, /* UDMA is problematic on OSB4 */
+@@ -379,7 +371,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+ 		.name		= "SvrWks CSB5",
+ 		.init_chipset	= init_chipset_svwks,
+ 		.init_hwif	= init_hwif_svwks,
+-		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE,
++		.host_flags	= IDE_HFLAGS_SVWKS,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= ATA_UDMA5,
+@@ -387,7 +379,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+ 		.name		= "SvrWks CSB6",
+ 		.init_chipset	= init_chipset_svwks,
+ 		.init_hwif	= init_hwif_svwks,
+-		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE,
++		.host_flags	= IDE_HFLAGS_SVWKS,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= ATA_UDMA5,
+@@ -395,8 +387,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+ 		.name		= "SvrWks CSB6",
+ 		.init_chipset	= init_chipset_svwks,
+ 		.init_hwif	= init_hwif_svwks,
+-		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_SINGLE |
+-				  IDE_HFLAG_BOOTABLE,
++		.host_flags	= IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= ATA_UDMA5,
+@@ -404,8 +395,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+ 		.name		= "SvrWks HT1000",
+ 		.init_chipset	= init_chipset_svwks,
+ 		.init_hwif	= init_hwif_svwks,
+-		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_SINGLE |
+-				  IDE_HFLAG_BOOTABLE,
++		.host_flags	= IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
+ 		.pio_mask	= ATA_PIO4,
+ 		.mwdma_mask	= ATA_MWDMA2,
+ 		.udma_mask	= ATA_UDMA5,
+diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
+index de820aa..9e0be7d 100644
+--- a/drivers/ide/pci/sgiioc4.c
++++ b/drivers/ide/pci/sgiioc4.c
+@@ -277,21 +277,6 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
+ 	return dma_stat;
+ }
+ 
+-static int
+-sgiioc4_ide_dma_on(ide_drive_t * drive)
+-{
+-	drive->using_dma = 1;
+-
+-	return 0;
+-}
+-
+-static void sgiioc4_dma_off_quietly(ide_drive_t *drive)
+-{
+-	drive->using_dma = 0;
+-
+-	drive->hwif->dma_host_off(drive);
+-}
+-
+ static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ {
+ }
+@@ -303,13 +288,10 @@ sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
+ 	return sgiioc4_checkirq(HWIF(drive));
+ }
+ 
+-static void sgiioc4_dma_host_on(ide_drive_t * drive)
+-{
+-}
+-
+-static void sgiioc4_dma_host_off(ide_drive_t * drive)
++static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
+ {
+-	sgiioc4_clearirq(drive);
++	if (!on)
++		sgiioc4_clearirq(drive);
+ }
+ 
+ static void
+@@ -582,7 +564,6 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
+ 	hwif->pre_reset = NULL;	/* No HBA specific pre_set needed */
+ 	hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine,
+ 						clear interrupts */
+-	hwif->intrproc = NULL;	/* Enable or Disable interrupt from drive */
+ 	hwif->maskproc = &sgiioc4_maskproc;	/* Mask on/off NIEN register */
+ 	hwif->quirkproc = NULL;
+ 	hwif->busproc = NULL;
+@@ -594,14 +575,11 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
+ 
+ 	hwif->mwdma_mask = ATA_MWDMA2_ONLY;
+ 
++	hwif->dma_host_set = &sgiioc4_dma_host_set;
+ 	hwif->dma_setup = &sgiioc4_ide_dma_setup;
+ 	hwif->dma_start = &sgiioc4_ide_dma_start;
+ 	hwif->ide_dma_end = &sgiioc4_ide_dma_end;
+-	hwif->ide_dma_on = &sgiioc4_ide_dma_on;
+-	hwif->dma_off_quietly = &sgiioc4_dma_off_quietly;
+ 	hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
+-	hwif->dma_host_on = &sgiioc4_dma_host_on;
+-	hwif->dma_host_off = &sgiioc4_dma_host_off;
+ 	hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
+ 	hwif->dma_timeout = &ide_dma_timeout;
+ }
+@@ -615,6 +593,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
+ 	ide_hwif_t *hwif;
+ 	int h;
+ 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
++	hw_regs_t hw;
+ 
+ 	/*
+ 	 * Find an empty HWIF; if none available, return -ENOMEM.
+@@ -654,21 +633,16 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (hwif->io_ports[IDE_DATA_OFFSET] != cmd_base) {
+-		hw_regs_t hw;
+-
+-		/* Initialize the IO registers */
+-		memset(&hw, 0, sizeof(hw));
+-		sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
+-		memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
+-		hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
+-	}
++	/* Initialize the IO registers */
++	memset(&hw, 0, sizeof(hw));
++	sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
++	hw.irq = dev->irq;
++	hw.chipset = ide_pci;
++	hw.dev = &dev->dev;
++	ide_init_port_hw(hwif, &hw);
+ 
+-	hwif->irq = dev->irq;
+-	hwif->chipset = ide_pci;
+ 	hwif->pci_dev = dev;
+ 	hwif->channel = 0;	/* Single Channel chip */
+-	hwif->gendev.parent = &dev->dev;/* setup proper ancestral information */
+ 
+ 	/* The IOC4 uses MMIO rather than Port IO. */
+ 	default_hwif_mmiops(hwif);
+diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
+index 5709c25..908f37b 100644
+--- a/drivers/ide/pci/siimage.c
++++ b/drivers/ide/pci/siimage.c
+@@ -278,27 +278,14 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 
+ 	scsc = is_sata(hwif) ? 1 : scsc;
+ 
+-	switch(speed) {
+-		case XFER_MW_DMA_2:
+-		case XFER_MW_DMA_1:
+-		case XFER_MW_DMA_0:
+-			multi = dma[speed - XFER_MW_DMA_0];
+-			mode |= ((unit) ? 0x20 : 0x02);
+-			break;
+-		case XFER_UDMA_6:
+-		case XFER_UDMA_5:
+-		case XFER_UDMA_4:
+-		case XFER_UDMA_3:
+-		case XFER_UDMA_2:
+-		case XFER_UDMA_1:
+-		case XFER_UDMA_0:
+-			multi = dma[2];
+-			ultra |= ((scsc) ? (ultra6[speed - XFER_UDMA_0]) :
+-					   (ultra5[speed - XFER_UDMA_0]));
+-			mode |= ((unit) ? 0x30 : 0x03);
+-			break;
+-		default:
+-			return;
++	if (speed >= XFER_UDMA_0) {
++		multi = dma[2];
++		ultra |= (scsc ? ultra6[speed - XFER_UDMA_0] :
++				 ultra5[speed - XFER_UDMA_0]);
++		mode |= (unit ? 0x30 : 0x03);
++	} else {
++		multi = dma[speed - XFER_MW_DMA_0];
++		mode |= (unit ? 0x20 : 0x02);
+ 	}
+ 
+ 	if (hwif->mmio) {
+@@ -726,9 +713,6 @@ static int is_dev_seagate_sata(ide_drive_t *drive)
+ 	const char *s = &drive->id->model[0];
+ 	unsigned len;
+ 
+-	if (!drive->present)
+-		return 0;
+-
+ 	len = strnlen(s, sizeof(drive->id->model));
+ 
+ 	if ((len > 4) && (!memcmp(s, "ST", 2))) {
+@@ -743,18 +727,20 @@ static int is_dev_seagate_sata(ide_drive_t *drive)
+ }
+ 
+ /**
+- *	siimage_fixup		-	post probe fixups
+- *	@hwif: interface to fix up
++ *	sil_quirkproc		-	post probe fixups
++ *	@drive: drive
+  *
+  *	Called after drive probe we use this to decide whether the
+  *	Seagate fixup must be applied. This used to be in init_iops but
+  *	that can occur before we know what drives are present.
+  */
+ 
+-static void __devinit siimage_fixup(ide_hwif_t *hwif)
++static void __devinit sil_quirkproc(ide_drive_t *drive)
+ {
++	ide_hwif_t *hwif = drive->hwif;
 +
-+	base_cmd = (struct hifn_base_command *)buf_pos;
-+	base_cmd->masks = __cpu_to_le16(mask);
-+	base_cmd->total_source_count =
-+		__cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
-+	base_cmd->total_dest_count =
-+		__cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
+ 	/* Try and raise the rqsize */
+-	if (!is_sata(hwif) || !is_dev_seagate_sata(&hwif->drives[0]))
++	if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
+ 		hwif->rqsize = 128;
+ }
+ 
+@@ -817,6 +803,7 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
+ 
+ 	hwif->set_pio_mode = &sil_set_pio_mode;
+ 	hwif->set_dma_mode = &sil_set_dma_mode;
++	hwif->quirkproc = &sil_quirkproc;
+ 
+ 	if (sata) {
+ 		static int first = 1;
+@@ -855,7 +842,6 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
+ 		.init_chipset	= init_chipset_siimage,	\
+ 		.init_iops	= init_iops_siimage,	\
+ 		.init_hwif	= init_hwif_siimage,	\
+-		.fixup		= siimage_fixup,	\
+ 		.host_flags	= IDE_HFLAG_BOOTABLE,	\
+ 		.pio_mask	= ATA_PIO4,		\
+ 		.mwdma_mask	= ATA_MWDMA2,		\
+diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
+index d90b429..85d3699 100644
+--- a/drivers/ide/pci/sis5513.c
++++ b/drivers/ide/pci/sis5513.c
+@@ -305,59 +305,56 @@ static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	sis_program_timings(drive, XFER_PIO_0 + pio);
+ }
+ 
+-static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
++static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode)
+ {
+-	ide_hwif_t *hwif	= HWIF(drive);
+-	struct pci_dev *dev	= hwif->pci_dev;
++	struct pci_dev *dev = drive->hwif->pci_dev;
++	u32 regdw = 0;
++	u8 drive_pci = sis_ata133_get_base(drive), clk, idx;
+ 
+-	/* Config chip for mode */
+-	switch(speed) {
+-		case XFER_UDMA_6:
+-		case XFER_UDMA_5:
+-		case XFER_UDMA_4:
+-		case XFER_UDMA_3:
+-		case XFER_UDMA_2:
+-		case XFER_UDMA_1:
+-		case XFER_UDMA_0:
+-			if (chipset_family >= ATA_133) {
+-				u32 regdw = 0;
+-				u8 drive_pci = sis_ata133_get_base(drive);
+-
+-				pci_read_config_dword(dev, drive_pci, &regdw);
+-				regdw |= 0x04;
+-				regdw &= 0xfffff00f;
+-				/* check if ATA133 enable */
+-				if (regdw & 0x08) {
+-					regdw |= (unsigned long)cycle_time_value[ATA_133][speed-XFER_UDMA_0] << 4;
+-					regdw |= (unsigned long)cvs_time_value[ATA_133][speed-XFER_UDMA_0] << 8;
+-				} else {
+-					regdw |= (unsigned long)cycle_time_value[ATA_100][speed-XFER_UDMA_0] << 4;
+-					regdw |= (unsigned long)cvs_time_value[ATA_100][speed-XFER_UDMA_0] << 8;
+-				}
+-				pci_write_config_dword(dev, (unsigned long)drive_pci, regdw);
+-			} else {
+-				u8 drive_pci = 0x40 + drive->dn * 2, reg = 0;
+-
+-				pci_read_config_byte(dev, drive_pci+1, &reg);
+-				/* Force the UDMA bit on if we want to use UDMA */
+-				reg |= 0x80;
+-				/* clean reg cycle time bits */
+-				reg &= ~((0xFF >> (8 - cycle_time_range[chipset_family]))
+-					 << cycle_time_offset[chipset_family]);
+-				/* set reg cycle time bits */
+-				reg |= cycle_time_value[chipset_family][speed-XFER_UDMA_0]
+-					<< cycle_time_offset[chipset_family];
+-				pci_write_config_byte(dev, drive_pci+1, reg);
+-			}
+-			break;
+-		case XFER_MW_DMA_2:
+-		case XFER_MW_DMA_1:
+-		case XFER_MW_DMA_0:
+-			sis_program_timings(drive, speed);
+-			break;
+-		default:
+-			break;
+-	}
++	pci_read_config_dword(dev, drive_pci, &regdw);
 +
-+	dlen >>= 16;
-+	slen >>= 16;
-+	base_cmd->session_num = __cpu_to_le16(snum |
-+	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
-+	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
++	regdw |= 0x04;
++	regdw &= 0xfffff00f;
++	/* check if ATA133 enable */
++	clk = (regdw & 0x08) ? ATA_133 : ATA_100;
++	idx = mode - XFER_UDMA_0;
++	regdw |= cycle_time_value[clk][idx] << 4;
++	regdw |= cvs_time_value[clk][idx] << 8;
 +
-+	return sizeof(struct hifn_base_command);
++	pci_write_config_dword(dev, drive_pci, regdw);
 +}
 +
-+static int hifn_setup_crypto_command(struct hifn_device *dev,
-+		u8 *buf, unsigned dlen, unsigned slen,
-+		u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
++static void sis_ata33_program_udma_timings(ide_drive_t *drive, const u8 mode)
 +{
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+	struct hifn_crypt_command *cry_cmd;
-+	u8 *buf_pos = buf;
-+	u16 cmd_len;
-+
-+	cry_cmd = (struct hifn_crypt_command *)buf_pos;
-+
-+	cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
-+	dlen >>= 16;
-+	cry_cmd->masks = __cpu_to_le16(mode |
-+			((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
-+			 HIFN_CRYPT_CMD_SRCLEN_M));
-+	cry_cmd->header_skip = 0;
-+	cry_cmd->reserved = 0;
-+
-+	buf_pos += sizeof(struct hifn_crypt_command);
-+
-+	dma->cmdu++;
-+	if (dma->cmdu > 1) {
-+		dev->dmareg |= HIFN_DMAIER_C_WAIT;
-+		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
-+	}
++	struct pci_dev *dev = drive->hwif->pci_dev;
++	u8 drive_pci = 0x40 + drive->dn * 2, reg = 0, i = chipset_family;
 +
-+	if (keylen) {
-+		memcpy(buf_pos, key, keylen);
-+		buf_pos += keylen;
-+	}
-+	if (ivsize) {
-+		memcpy(buf_pos, iv, ivsize);
-+		buf_pos += ivsize;
-+	}
++	pci_read_config_byte(dev, drive_pci + 1, &reg);
 +
-+	cmd_len = buf_pos - buf;
++	/* force the UDMA bit on if we want to use UDMA */
++	reg |= 0x80;
++	/* clean reg cycle time bits */
++	reg &= ~((0xff >> (8 - cycle_time_range[i])) << cycle_time_offset[i]);
++	/* set reg cycle time bits */
++	reg |= cycle_time_value[i][mode - XFER_UDMA_0] << cycle_time_offset[i];
 +
-+	return cmd_len;
++	pci_write_config_byte(dev, drive_pci + 1, reg);
 +}
 +
-+static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
-+		unsigned int offset, unsigned int size)
++static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode)
 +{
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+	int idx;
-+	dma_addr_t addr;
-+
-+	addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
-+
-+	idx = dma->srci;
-+
-+	dma->srcr[idx].p = __cpu_to_le32(addr);
-+	dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
-+			HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
-+
-+	if (++idx == HIFN_D_SRC_RSIZE) {
-+		dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
-+				HIFN_D_JUMP |
-+				HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
-+		idx = 0;
-+	}
-+
-+	dma->srci = idx;
-+	dma->srcu++;
-+
-+	if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
-+		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
-+		dev->flags |= HIFN_FLAG_SRC_BUSY;
-+	}
-+
-+	return size;
++	if (chipset_family >= ATA_133)	/* ATA_133 */
++		sis_ata133_program_udma_timings(drive, mode);
++	else				/* ATA_33/66/100a/100/133a */
++		sis_ata33_program_udma_timings(drive, mode);
 +}
 +
-+static void hifn_setup_res_desc(struct hifn_device *dev)
++static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
 +{
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+
-+	dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
-+			HIFN_D_VALID | HIFN_D_LAST);
++	if (speed >= XFER_UDMA_0)
++		sis_program_udma_timings(drive, speed);
++	else
++		sis_program_timings(drive, speed);
+ }
+ 
+ static u8 sis5513_ata133_udma_filter(ide_drive_t *drive)
+diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
+index 147d783..c7a125b 100644
+--- a/drivers/ide/pci/sl82c105.c
++++ b/drivers/ide/pci/sl82c105.c
+@@ -13,6 +13,7 @@
+  *  -- Benjamin Herrenschmidt (01/11/03) benh at kernel.crashing.org
+  *
+  * Copyright (C) 2006-2007 MontaVista Software, Inc. <source at mvista.com>
++ * Copyright (C)      2007 Bartlomiej Zolnierkiewicz
+  */
+ 
+ #include <linux/types.h>
+@@ -90,14 +91,8 @@ static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	drive->drive_data &= 0xffff0000;
+ 	drive->drive_data |= drv_ctrl;
+ 
+-	if (!drive->using_dma) {
+-		/*
+-		 * If we are actually using MW DMA, then we can not
+-		 * reprogram the interface drive control register.
+-		 */
+-		pci_write_config_word(dev, reg,  drv_ctrl);
+-		pci_read_config_word (dev, reg, &drv_ctrl);
+-	}
++	pci_write_config_word(dev, reg,  drv_ctrl);
++	pci_read_config_word (dev, reg, &drv_ctrl);
+ 
+ 	printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name,
+ 			  ide_xfer_verbose(pio + XFER_PIO_0),
+@@ -115,33 +110,14 @@ static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed)
+  	DBG(("sl82c105_tune_chipset(drive:%s, speed:%s)\n",
+ 	     drive->name, ide_xfer_verbose(speed)));
+ 
+-	switch (speed) {
+-	case XFER_MW_DMA_2:
+-	case XFER_MW_DMA_1:
+-	case XFER_MW_DMA_0:
+-		drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
++	drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
+ 
+-		/*
+-		 * Store the DMA timings so that we can actually program
+-		 * them when DMA will be turned on...
+-		 */
+-		drive->drive_data &= 0x0000ffff;
+-		drive->drive_data |= (unsigned long)drv_ctrl << 16;
+-
+-		/*
+-		 * If we are already using DMA, we just reprogram
+-		 * the drive control register.
+-		 */
+-		if (drive->using_dma) {
+-			struct pci_dev *dev	= HWIF(drive)->pci_dev;
+-			int reg 		= 0x44 + drive->dn * 4;
+-
+-			pci_write_config_word(dev, reg, drv_ctrl);
+-		}
+-		break;
+-	default:
+-		return;
+-	}
 +	/*
-+	 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
-+	 *					HIFN_D_LAST | HIFN_D_NOINVALID);
++	 * Store the DMA timings so that we can actually program
++	 * them when DMA will be turned on...
 +	 */
++	drive->drive_data &= 0x0000ffff;
++	drive->drive_data |= (unsigned long)drv_ctrl << 16;
+ }
+ 
+ /*
+@@ -209,6 +185,11 @@ static void sl82c105_dma_start(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif	= HWIF(drive);
+ 	struct pci_dev *dev	= hwif->pci_dev;
++	int reg 		= 0x44 + drive->dn * 4;
 +
-+	if (++dma->resi == HIFN_D_RES_RSIZE) {
-+		dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
-+				HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
-+		dma->resi = 0;
-+	}
-+
-+	dma->resu++;
-+
-+	if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
-+		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
-+		dev->flags |= HIFN_FLAG_RES_BUSY;
-+	}
-+}
-+
-+static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
-+		unsigned offset, unsigned size)
-+{
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+	int idx;
-+	dma_addr_t addr;
-+
-+	addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
++	DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
 +
-+	idx = dma->dsti;
-+	dma->dstr[idx].p = __cpu_to_le32(addr);
-+	dma->dstr[idx].l = __cpu_to_le32(size |	HIFN_D_VALID |
-+			HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
++	pci_write_config_word(dev, reg, drive->drive_data >> 16);
+ 
+ 	sl82c105_reset_host(dev);
+ 	ide_dma_start(drive);
+@@ -222,64 +203,24 @@ static void sl82c105_dma_timeout(ide_drive_t *drive)
+ 	ide_dma_timeout(drive);
+ }
+ 
+-static int sl82c105_ide_dma_on(ide_drive_t *drive)
+-{
+-	struct pci_dev *dev	= HWIF(drive)->pci_dev;
+-	int rc, reg 		= 0x44 + drive->dn * 4;
+-
+-	DBG(("sl82c105_ide_dma_on(drive:%s)\n", drive->name));
+-
+-	rc = __ide_dma_on(drive);
+-	if (rc == 0) {
+-		pci_write_config_word(dev, reg, drive->drive_data >> 16);
+-
+-		printk(KERN_INFO "%s: DMA enabled\n", drive->name);
+-	}
+-	return rc;
+-}
+-
+-static void sl82c105_dma_off_quietly(ide_drive_t *drive)
++static int sl82c105_dma_end(ide_drive_t *drive)
+ {
+ 	struct pci_dev *dev	= HWIF(drive)->pci_dev;
+ 	int reg 		= 0x44 + drive->dn * 4;
++	int ret;
+ 
+-	DBG(("sl82c105_dma_off_quietly(drive:%s)\n", drive->name));
++	DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
+ 
+-	pci_write_config_word(dev, reg, drive->drive_data);
++	ret = __ide_dma_end(drive);
+ 
+-	ide_dma_off_quietly(drive);
+-}
++	pci_write_config_word(dev, reg, drive->drive_data);
+ 
+-/*
+- * Ok, that is nasty, but we must make sure the DMA timings
+- * won't be used for a PIO access. The solution here is
+- * to make sure the 16 bits mode is diabled on the channel
+- * when DMA is enabled, thus causing the chip to use PIO0
+- * timings for those operations.
+- */
+-static void sl82c105_selectproc(ide_drive_t *drive)
+-{
+-	ide_hwif_t *hwif	= HWIF(drive);
+-	struct pci_dev *dev	= hwif->pci_dev;
+-	u32 val, old, mask;
+-
+-	//DBG(("sl82c105_selectproc(drive:%s)\n", drive->name));
+-
+-	mask = hwif->channel ? CTRL_P1F16 : CTRL_P0F16;
+-	old = val = (u32)pci_get_drvdata(dev);
+-	if (drive->using_dma)
+-		val &= ~mask;
+-	else
+-		val |= mask;
+-	if (old != val) {
+-		pci_write_config_dword(dev, 0x40, val);	
+-		pci_set_drvdata(dev, (void *)val);
+-	}
++	return ret;
+ }
+ 
+ /*
+  * ATA reset will clear the 16 bits mode in the control
+- * register, we need to update our cache
++ * register, we need to reprogram it
+  */
+ static void sl82c105_resetproc(ide_drive_t *drive)
+ {
+@@ -289,7 +230,8 @@ static void sl82c105_resetproc(ide_drive_t *drive)
+ 	DBG(("sl82c105_resetproc(drive:%s)\n", drive->name));
+ 
+ 	pci_read_config_dword(dev, 0x40, &val);
+-	pci_set_drvdata(dev, (void *)val);
++	val |= (CTRL_P1F16 | CTRL_P0F16);
++	pci_write_config_dword(dev, 0x40, val);
+ }
+ 
+ /*
+@@ -342,7 +284,6 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c
+ 	pci_read_config_dword(dev, 0x40, &val);
+ 	val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
+ 	pci_write_config_dword(dev, 0x40, val);
+-	pci_set_drvdata(dev, (void *)val);
+ 
+ 	return dev->irq;
+ }
+@@ -358,7 +299,6 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
+ 
+ 	hwif->set_pio_mode	= &sl82c105_set_pio_mode;
+ 	hwif->set_dma_mode	= &sl82c105_set_dma_mode;
+-	hwif->selectproc	= &sl82c105_selectproc;
+ 	hwif->resetproc 	= &sl82c105_resetproc;
+ 
+ 	if (!hwif->dma_base)
+@@ -377,10 +317,9 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
+ 
+ 	hwif->mwdma_mask = ATA_MWDMA2;
+ 
+-	hwif->ide_dma_on		= &sl82c105_ide_dma_on;
+-	hwif->dma_off_quietly		= &sl82c105_dma_off_quietly;
+ 	hwif->dma_lost_irq		= &sl82c105_dma_lost_irq;
+ 	hwif->dma_start			= &sl82c105_dma_start;
++	hwif->ide_dma_end		= &sl82c105_dma_end;
+ 	hwif->dma_timeout		= &sl82c105_dma_timeout;
+ 
+ 	if (hwif->mate)
+diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
+index eb4445b..dbbb468 100644
+--- a/drivers/ide/pci/slc90e66.c
++++ b/drivers/ide/pci/slc90e66.c
+@@ -91,19 +91,9 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	pci_read_config_word(dev, 0x48, &reg48);
+ 	pci_read_config_word(dev, 0x4a, &reg4a);
+ 
+-	switch(speed) {
+-		case XFER_UDMA_4:	u_speed = 4 << (drive->dn * 4); break;
+-		case XFER_UDMA_3:	u_speed = 3 << (drive->dn * 4); break;
+-		case XFER_UDMA_2:	u_speed = 2 << (drive->dn * 4); break;
+-		case XFER_UDMA_1:	u_speed = 1 << (drive->dn * 4); break;
+-		case XFER_UDMA_0:	u_speed = 0 << (drive->dn * 4); break;
+-		case XFER_MW_DMA_2:
+-		case XFER_MW_DMA_1:
+-		case XFER_SW_DMA_2:	break;
+-		default:		return;
+-	}
+-
+ 	if (speed >= XFER_UDMA_0) {
++		u_speed = (speed - XFER_UDMA_0) << (drive->dn * 4);
 +
-+	if (++idx == HIFN_D_DST_RSIZE) {
-+		dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
-+				HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
-+				HIFN_D_LAST | HIFN_D_NOINVALID);
-+		idx = 0;
-+	}
-+	dma->dsti = idx;
-+	dma->dstu++;
+ 		if (!(reg48 & u_flag))
+ 			pci_write_config_word(dev, 0x48, reg48|u_flag);
+ 		/* FIXME: (reg4a & a_speed) ? */
+diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
+index a66ebd1..e1faf6c 100644
+--- a/drivers/ide/pci/tc86c001.c
++++ b/drivers/ide/pci/tc86c001.c
+@@ -222,7 +222,8 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = {
+ 	.name		= "TC86C001",
+ 	.init_chipset	= init_chipset_tc86c001,
+ 	.init_hwif	= init_hwif_tc86c001,
+-	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
++	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
++			  IDE_HFLAG_ABUSE_SET_DMA_MODE,
+ 	.pio_mask	= ATA_PIO4,
+ 	.mwdma_mask	= ATA_MWDMA2,
+ 	.udma_mask	= ATA_UDMA4,
+diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
+index a227c41..ae52a96 100644
+--- a/drivers/ide/pci/triflex.c
++++ b/drivers/ide/pci/triflex.c
+@@ -81,8 +81,6 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
+ 		case XFER_PIO_0:
+ 			timing = 0x0808;
+ 			break;
+-		default:
+-			return;
+ 	}
+ 
+ 	triflex_timings &= ~(0xFFFF << (16 * unit));
+diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
+index 0151d7f..04cd893 100644
+--- a/drivers/ide/pci/trm290.c
++++ b/drivers/ide/pci/trm290.c
+@@ -241,11 +241,7 @@ static int trm290_ide_dma_test_irq (ide_drive_t *drive)
+ 	return (status == 0x00ff);
+ }
+ 
+-static void trm290_dma_host_on(ide_drive_t *drive)
+-{
+-}
+-
+-static void trm290_dma_host_off(ide_drive_t *drive)
++static void trm290_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+@@ -289,8 +285,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
+ 
+ 	ide_setup_dma(hwif, (hwif->config_data + 4) ^ (hwif->channel ? 0x0080 : 0x0000), 3);
+ 
+-	hwif->dma_host_off	= &trm290_dma_host_off;
+-	hwif->dma_host_on	= &trm290_dma_host_on;
++	hwif->dma_host_set	= &trm290_dma_host_set;
+ 	hwif->dma_setup 	= &trm290_dma_setup;
+ 	hwif->dma_exec_cmd	= &trm290_dma_exec_cmd;
+ 	hwif->dma_start 	= &trm290_dma_start;
+diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
+index a0d3c16..4b32c90 100644
+--- a/drivers/ide/pci/via82cxxx.c
++++ b/drivers/ide/pci/via82cxxx.c
+@@ -439,6 +439,7 @@ static const struct ide_port_info via82cxxx_chipset __devinitdata = {
+ 	.enablebits	= { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
+ 	.host_flags	= IDE_HFLAG_PIO_NO_BLACKLIST |
+ 			  IDE_HFLAG_PIO_NO_DOWNGRADE |
++			  IDE_HFLAG_ABUSE_SET_DMA_MODE |
+ 			  IDE_HFLAG_POST_SET_MODE |
+ 			  IDE_HFLAG_IO_32BIT |
+ 			  IDE_HFLAG_BOOTABLE,
+diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile
+new file mode 100644
+index 0000000..65af584
+--- /dev/null
++++ b/drivers/ide/ppc/Makefile
+@@ -0,0 +1,3 @@
 +
-+	if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
-+		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
-+		dev->flags |= HIFN_FLAG_DST_BUSY;
-+	}
-+}
++obj-$(CONFIG_BLK_DEV_IDE_PMAC)		+= pmac.o
++obj-$(CONFIG_BLK_DEV_MPC8xx_IDE)	+= mpc8xx.o
+diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
+index 5f0da35..3fd5d45 100644
+--- a/drivers/ide/ppc/mpc8xx.c
++++ b/drivers/ide/ppc/mpc8xx.c
+@@ -838,3 +838,21 @@ void m8xx_ide_init(void)
+ 	ppc_ide_md.default_io_base      = m8xx_ide_default_io_base;
+ 	ppc_ide_md.ide_init_hwif        = m8xx_ide_init_hwif_ports;
+ }
 +
-+static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
-+		struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
-+		struct hifn_context *ctx)
++static int __init mpc8xx_ide_probe(void)
 +{
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+	int cmd_len, sa_idx;
-+	u8 *buf, *buf_pos;
-+	u16 mask;
-+
-+	dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
-+			dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
-+
-+	sa_idx = dma->resi;
-+
-+	hifn_setup_src_desc(dev, spage, soff, nbytes);
-+
-+	buf_pos = buf = dma->command_bufs[dma->cmdi];
-+
-+	mask = 0;
-+	switch (ctx->op) {
-+		case ACRYPTO_OP_DECRYPT:
-+			mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
-+			break;
-+		case ACRYPTO_OP_ENCRYPT:
-+			mask = HIFN_BASE_CMD_CRYPT;
-+			break;
-+		case ACRYPTO_OP_HMAC:
-+			mask = HIFN_BASE_CMD_MAC;
-+			break;
-+		default:
-+			goto err_out;
-+	}
-+
-+	buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
-+			nbytes, mask, dev->snum);
-+
-+	if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) {
-+		u16 md = 0;
-+
-+		if (ctx->keysize)
-+			md |= HIFN_CRYPT_CMD_NEW_KEY;
-+		if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB)
-+			md |= HIFN_CRYPT_CMD_NEW_IV;
-+
-+		switch (ctx->mode) {
-+			case ACRYPTO_MODE_ECB:
-+				md |= HIFN_CRYPT_CMD_MODE_ECB;
-+				break;
-+			case ACRYPTO_MODE_CBC:
-+				md |= HIFN_CRYPT_CMD_MODE_CBC;
-+				break;
-+			case ACRYPTO_MODE_CFB:
-+				md |= HIFN_CRYPT_CMD_MODE_CFB;
-+				break;
-+			case ACRYPTO_MODE_OFB:
-+				md |= HIFN_CRYPT_CMD_MODE_OFB;
-+				break;
-+			default:
-+				goto err_out;
-+		}
-+
-+		switch (ctx->type) {
-+			case ACRYPTO_TYPE_AES_128:
-+				if (ctx->keysize != 16)
-+					goto err_out;
-+				md |= HIFN_CRYPT_CMD_KSZ_128 |
-+					HIFN_CRYPT_CMD_ALG_AES;
-+				break;
-+			case ACRYPTO_TYPE_AES_192:
-+				if (ctx->keysize != 24)
-+					goto err_out;
-+				md |= HIFN_CRYPT_CMD_KSZ_192 |
-+					HIFN_CRYPT_CMD_ALG_AES;
-+				break;
-+			case ACRYPTO_TYPE_AES_256:
-+				if (ctx->keysize != 32)
-+					goto err_out;
-+				md |= HIFN_CRYPT_CMD_KSZ_256 |
-+					HIFN_CRYPT_CMD_ALG_AES;
-+				break;
-+			case ACRYPTO_TYPE_3DES:
-+				if (ctx->keysize != 24)
-+					goto err_out;
-+				md |= HIFN_CRYPT_CMD_ALG_3DES;
-+				break;
-+			case ACRYPTO_TYPE_DES:
-+				if (ctx->keysize != 8)
-+					goto err_out;
-+				md |= HIFN_CRYPT_CMD_ALG_DES;
-+				break;
-+			default:
-+				goto err_out;
-+		}
-+
-+		buf_pos += hifn_setup_crypto_command(dev, buf_pos,
-+				nbytes, nbytes, ctx->key, ctx->keysize,
-+				ctx->iv, ctx->ivsize, md);
-+	}
-+
-+	dev->sa[sa_idx] = priv;
-+
-+	cmd_len = buf_pos - buf;
-+	dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
-+			HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
-+
-+	if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
-+		dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND |
-+			HIFN_D_VALID | HIFN_D_LAST |
-+			HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
-+		dma->cmdi = 0;
-+	} else
-+		dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
 +
-+	if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
-+		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
-+		dev->flags |= HIFN_FLAG_CMD_BUSY;
-+	}
++#ifdef IDE0_BASE_OFFSET
++	idx[0] = 0;
++#ifdef IDE1_BASE_OFFSET
++	idx[1] = 1;
++#endif
++#endif
 +
-+	hifn_setup_dst_desc(dev, dpage, doff, nbytes);
-+	hifn_setup_res_desc(dev);
++	ide_device_add(idx);
 +
 +	return 0;
-+
-+err_out:
-+	return -EINVAL;
-+}
-+
-+static int ablkcipher_walk_init(struct ablkcipher_walk *w,
-+		int num, gfp_t gfp_flags)
-+{
-+	int i;
-+
-+	num = min(ASYNC_SCATTERLIST_CACHE, num);
-+	sg_init_table(w->cache, num);
-+
-+	w->num = 0;
-+	for (i=0; i<num; ++i) {
-+		struct page *page = alloc_page(gfp_flags);
-+		struct scatterlist *s;
-+
-+		if (!page)
-+			break;
-+
-+		s = &w->cache[i];
-+
-+		sg_set_page(s, page, PAGE_SIZE, 0);
-+		w->num++;
-+	}
-+
-+	return i;
-+}
-+
-+static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
-+{
-+	int i;
-+
-+	for (i=0; i<w->num; ++i) {
-+		struct scatterlist *s = &w->cache[i];
-+
-+		__free_page(sg_page(s));
-+
-+		s->length = 0;
-+	}
-+
-+	w->num = 0;
 +}
 +
-+static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src,
-+		unsigned int size, unsigned int *nbytesp)
-+{
-+	unsigned int copy, drest = *drestp, nbytes = *nbytesp;
-+	int idx = 0;
-+	void *saddr;
-+
-+	if (drest < size || size > nbytes)
-+		return -EINVAL;
-+
-+	while (size) {
-+		copy = min(drest, src->length);
-+
-+		saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
-+		memcpy(daddr, saddr + src->offset, copy);
-+		kunmap_atomic(saddr, KM_SOFTIRQ1);
-+
-+		size -= copy;
-+		drest -= copy;
-+		nbytes -= copy;
-+		daddr += copy;
-+
-+		dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
-+				__func__, copy, size, drest, nbytes);
-+
-+		src++;
-+		idx++;
-+	}
-+
-+	*nbytesp = nbytes;
-+	*drestp = drest;
++module_init(mpc8xx_ide_probe);
+diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
+index 7f7a598..736d12c 100644
+--- a/drivers/ide/ppc/pmac.c
++++ b/drivers/ide/ppc/pmac.c
+@@ -438,13 +438,8 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw,
+ 		if (data_port == pmac_ide[ix].regbase)
+ 			break;
+ 
+-	if (ix >= MAX_HWIFS) {
+-		/* Probably a PCI interface... */
+-		for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; ++i)
+-			hw->io_ports[i] = data_port + i - IDE_DATA_OFFSET;
+-		hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+-		return;
+-	}
++	if (ix >= MAX_HWIFS)
++		return;		/* not an IDE PMAC interface */
+ 
+ 	for (i = 0; i < 8; ++i)
+ 		hw->io_ports[i] = data_port + i * 0x10;
+@@ -833,38 +828,20 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	tl[0] = *timings;
+ 	tl[1] = *timings2;
+ 
+-	switch(speed) {
+ #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
+-		case XFER_UDMA_6:
+-		case XFER_UDMA_5:
+-		case XFER_UDMA_4:
+-		case XFER_UDMA_3:
+-		case XFER_UDMA_2:
+-		case XFER_UDMA_1:
+-		case XFER_UDMA_0:
+-			if (pmif->kind == controller_kl_ata4)
+-				ret = set_timings_udma_ata4(&tl[0], speed);
+-			else if (pmif->kind == controller_un_ata6
+-				 || pmif->kind == controller_k2_ata6)
+-				ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
+-			else if (pmif->kind == controller_sh_ata6)
+-				ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
+-			else
+-				ret = 1;
+-			break;
+-		case XFER_MW_DMA_2:
+-		case XFER_MW_DMA_1:
+-		case XFER_MW_DMA_0:
+-			set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
+-			break;
+-		case XFER_SW_DMA_2:
+-		case XFER_SW_DMA_1:
+-		case XFER_SW_DMA_0:
+-			return;
++	if (speed >= XFER_UDMA_0) {
++		if (pmif->kind == controller_kl_ata4)
++			ret = set_timings_udma_ata4(&tl[0], speed);
++		else if (pmif->kind == controller_un_ata6
++			 || pmif->kind == controller_k2_ata6)
++			ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
++		else if (pmif->kind == controller_sh_ata6)
++			ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
++		else
++			ret = -1;
++	} else
++		set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
+ #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
+-		default:
+-			ret = 1;
+-	}
+ 	if (ret)
+ 		return;
+ 
+@@ -1035,12 +1012,11 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
+  * rare machines unfortunately, but it's better this way.
+  */
+ static int
+-pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
++pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
+ {
+ 	struct device_node *np = pmif->node;
+ 	const int *bidp;
+ 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+-	hw_regs_t hw;
+ 
+ 	pmif->cable_80 = 0;
+ 	pmif->broken_dma = pmif->broken_dma_warn = 0;
+@@ -1126,11 +1102,9 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+ 	/* Tell common code _not_ to mess with resources */
+ 	hwif->mmio = 1;
+ 	hwif->hwif_data = pmif;
+-	memset(&hw, 0, sizeof(hw));
+-	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, &hwif->irq);
+-	memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
+-	hwif->chipset = ide_pmac;
+-	hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay;
++	hw->chipset = ide_pmac;
++	ide_init_port_hw(hwif, hw);
++	hwif->noprobe = pmif->mediabay;
+ 	hwif->hold = pmif->mediabay;
+ 	hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
+ 	hwif->drives[0].unmask = 1;
+@@ -1159,8 +1133,6 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+ 		hwif->noprobe = 0;
+ #endif /* CONFIG_PMAC_MEDIABAY */
+ 
+-	hwif->sg_max_nents = MAX_DCMDS;
+-
+ #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
+ 	/* has a DBDMA controller channel */
+ 	if (pmif->dma_regs)
+@@ -1186,6 +1158,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
+ 	ide_hwif_t *hwif;
+ 	pmac_ide_hwif_t *pmif;
+ 	int i, rc;
++	hw_regs_t hw;
+ 
+ 	i = 0;
+ 	while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
+@@ -1228,7 +1201,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
+ 	regbase = (unsigned long) base;
+ 
+ 	hwif->pci_dev = mdev->bus->pdev;
+-	hwif->gendev.parent = &mdev->ofdev.dev;
+ 
+ 	pmif->mdev = mdev;
+ 	pmif->node = mdev->ofdev.node;
+@@ -1246,7 +1218,12 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
+ #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
+ 	dev_set_drvdata(&mdev->ofdev.dev, hwif);
+ 
+-	rc = pmac_ide_setup_device(pmif, hwif);
++	memset(&hw, 0, sizeof(hw));
++	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL);
++	hw.irq = irq;
++	hw.dev = &mdev->ofdev.dev;
 +
-+	return idx;
-+}
++	rc = pmac_ide_setup_device(pmif, hwif, &hw);
+ 	if (rc != 0) {
+ 		/* The inteface is released to the common IDE layer */
+ 		dev_set_drvdata(&mdev->ofdev.dev, NULL);
+@@ -1305,6 +1282,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	void __iomem *base;
+ 	unsigned long rbase, rlen;
+ 	int i, rc;
++	hw_regs_t hw;
+ 
+ 	np = pci_device_to_OF_node(pdev);
+ 	if (np == NULL) {
+@@ -1338,7 +1316,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	}
+ 
+ 	hwif->pci_dev = pdev;
+-	hwif->gendev.parent = &pdev->dev;
+ 	pmif->mdev = NULL;
+ 	pmif->node = np;
+ 
+@@ -1355,7 +1332,12 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	pci_set_drvdata(pdev, hwif);
+ 
+-	rc = pmac_ide_setup_device(pmif, hwif);
++	memset(&hw, 0, sizeof(hw));
++	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL);
++	hw.irq = pdev->irq;
++	hw.dev = &pdev->dev;
 +
-+static int ablkcipher_walk(struct ablkcipher_request *req,
-+		struct ablkcipher_walk *w)
-+{
-+	unsigned blocksize =
-+		crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
-+	unsigned alignmask =
-+		crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
-+	struct scatterlist *src, *dst, *t;
-+	void *daddr;
-+	unsigned int nbytes = req->nbytes, offset, copy, diff;
-+	int idx, tidx, err;
++	rc = pmac_ide_setup_device(pmif, hwif, &hw);
+ 	if (rc != 0) {
+ 		/* The inteface is released to the common IDE layer */
+ 		pci_set_drvdata(pdev, NULL);
+@@ -1721,11 +1703,7 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
+ 	return 1;
+ }
+ 
+-static void pmac_ide_dma_host_off(ide_drive_t *drive)
+-{
+-}
+-
+-static void pmac_ide_dma_host_on(ide_drive_t *drive)
++static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+@@ -1771,15 +1749,14 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+ 		return;
+ 	}
+ 
+-	hwif->dma_off_quietly = &ide_dma_off_quietly;
+-	hwif->ide_dma_on = &__ide_dma_on;
++	hwif->sg_max_nents = MAX_DCMDS;
 +
-+	tidx = idx = 0;
-+	offset = 0;
-+	while (nbytes) {
-+		if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
-+			return -EINVAL;
++	hwif->dma_host_set = &pmac_ide_dma_host_set;
+ 	hwif->dma_setup = &pmac_ide_dma_setup;
+ 	hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd;
+ 	hwif->dma_start = &pmac_ide_dma_start;
+ 	hwif->ide_dma_end = &pmac_ide_dma_end;
+ 	hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
+-	hwif->dma_host_off = &pmac_ide_dma_host_off;
+-	hwif->dma_host_on = &pmac_ide_dma_host_on;
+ 	hwif->dma_timeout = &ide_dma_timeout;
+ 	hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
+ 
+@@ -1809,3 +1786,5 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+ }
+ 
+ #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
 +
-+		src = &req->src[idx];
-+		dst = &req->dst[idx];
++module_init(pmac_ide_probe);
+diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
+index d2cd5a3..676c66e 100644
+--- a/drivers/ide/setup-pci.c
++++ b/drivers/ide/setup-pci.c
+@@ -165,13 +165,17 @@ static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_
+ 
+ 		dma_base = pci_resource_start(dev, baridx);
+ 
+-		if (dma_base == 0)
++		if (dma_base == 0) {
+ 			printk(KERN_ERR "%s: DMA base is invalid\n", d->name);
++			return 0;
++		}
+ 	}
+ 
+-	if ((d->host_flags & IDE_HFLAG_CS5520) == 0 && dma_base) {
++	if (hwif->channel)
++		dma_base += 8;
 +
-+		dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
-+				"blocksize: %u, nbytes: %u.\n",
-+				__func__, src->length, dst->length, src->offset,
-+				dst->offset, offset, blocksize, nbytes);
++	if ((d->host_flags & IDE_HFLAG_CS5520) == 0) {
+ 		u8 simplex_stat = 0;
+-		dma_base += hwif->channel ? 8 : 0;
+ 
+ 		switch(dev->device) {
+ 			case PCI_DEVICE_ID_AL_M5219:
+@@ -359,6 +363,8 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, const struct ide_port
+ 	unsigned long ctl = 0, base = 0;
+ 	ide_hwif_t *hwif;
+ 	u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0;
++	u8 oldnoprobe = 0;
++	struct hw_regs_s hw;
+ 
+ 	if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
+ 		/*  Possibly we should fail if these checks report true */
+@@ -381,26 +387,25 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, const struct ide_port
+ 	}
+ 	if ((hwif = ide_match_hwif(base, bootable, d->name)) == NULL)
+ 		return NULL;	/* no room in ide_hwifs[] */
+-	if (hwif->io_ports[IDE_DATA_OFFSET] != base ||
+-	    hwif->io_ports[IDE_CONTROL_OFFSET] != (ctl | 2)) {
+-		hw_regs_t hw;
+-
+-		memset(&hw, 0, sizeof(hw));
+-#ifndef CONFIG_IDE_ARCH_OBSOLETE_INIT
+-		ide_std_init_ports(&hw, base, ctl | 2);
+-#else
+-		ide_init_hwif_ports(&hw, base, ctl | 2, NULL);
+-#endif
+-		memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
+-		hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
+-	}
+-	hwif->chipset = d->chipset ? d->chipset : ide_pci;
 +
-+		if (src->length & (blocksize - 1) ||
-+				src->offset & (alignmask - 1) ||
-+				dst->length & (blocksize - 1) ||
-+				dst->offset & (alignmask - 1) ||
-+				offset) {
-+			unsigned slen = src->length - offset;
-+			unsigned dlen = PAGE_SIZE;
++	memset(&hw, 0, sizeof(hw));
++	hw.irq = hwif->irq ? hwif->irq : irq;
++	hw.dev = &dev->dev;
++	hw.chipset = d->chipset ? d->chipset : ide_pci;
++	ide_std_init_ports(&hw, base, ctl | 2);
 +
-+			t = &w->cache[idx];
++	if (hwif->io_ports[IDE_DATA_OFFSET] == base &&
++	    hwif->io_ports[IDE_CONTROL_OFFSET] == (ctl | 2))
++		oldnoprobe = hwif->noprobe;
 +
-+			daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
-+			err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes);
-+			if (err < 0)
-+				goto err_out_unmap;
++	ide_init_port_hw(hwif, &hw);
 +
-+			idx += err;
++	hwif->noprobe = oldnoprobe;
 +
-+			copy = slen & ~(blocksize - 1);
-+			diff = slen & (blocksize - 1);
+ 	hwif->pci_dev = dev;
+ 	hwif->cds = d;
+ 	hwif->channel = port;
+ 
+-	if (!hwif->irq)
+-		hwif->irq = irq;
+ 	if (mate) {
+ 		hwif->mate = mate;
+ 		mate->mate = hwif;
+@@ -535,12 +540,8 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
+ 		if ((hwif = ide_hwif_configure(dev, d, mate, port, pciirq)) == NULL)
+ 			continue;
+ 
+-		/* setup proper ancestral information */
+-		hwif->gendev.parent = &dev->dev;
+-
+ 		*(idx + port) = hwif->index;
+ 
+-		
+ 		if (d->init_iops)
+ 			d->init_iops(hwif);
+ 
+@@ -551,8 +552,6 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
+ 		    (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
+ 			hwif->irq = port ? 15 : 14;
+ 
+-		hwif->fixup = d->fixup;
+-
+ 		hwif->host_flags = d->host_flags;
+ 		hwif->pio_mask = d->pio_mask;
+ 
+@@ -699,105 +698,3 @@ out:
+ }
+ 
+ EXPORT_SYMBOL_GPL(ide_setup_pci_devices);
+-
+-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+-/*
+- *	Module interfaces
+- */
+-
+-static int pre_init = 1;		/* Before first ordered IDE scan */
+-static LIST_HEAD(ide_pci_drivers);
+-
+-/*
+- *	__ide_pci_register_driver	-	attach IDE driver
+- *	@driver: pci driver
+- *	@module: owner module of the driver
+- *
+- *	Registers a driver with the IDE layer. The IDE layer arranges that
+- *	boot time setup is done in the expected device order and then
+- *	hands the controllers off to the core PCI code to do the rest of
+- *	the work.
+- *
+- *	Returns are the same as for pci_register_driver
+- */
+-
+-int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
+-			      const char *mod_name)
+-{
+-	if (!pre_init)
+-		return __pci_register_driver(driver, module, mod_name);
+-	driver->driver.owner = module;
+-	list_add_tail(&driver->node, &ide_pci_drivers);
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
+-
+-/**
+- *	ide_scan_pcidev		-	find an IDE driver for a device
+- *	@dev: PCI device to check
+- *
+- *	Look for an IDE driver to handle the device we are considering.
+- *	This is only used during boot up to get the ordering correct. After
+- *	boot up the pci layer takes over the job.
+- */
+-
+-static int __init ide_scan_pcidev(struct pci_dev *dev)
+-{
+-	struct list_head *l;
+-	struct pci_driver *d;
+-
+-	list_for_each(l, &ide_pci_drivers) {
+-		d = list_entry(l, struct pci_driver, node);
+-		if (d->id_table) {
+-			const struct pci_device_id *id =
+-				pci_match_id(d->id_table, dev);
+-
+-			if (id != NULL && d->probe(dev, id) >= 0) {
+-				dev->driver = d;
+-				pci_dev_get(dev);
+-				return 1;
+-			}
+-		}
+-	}
+-	return 0;
+-}
+-
+-/**
+- *	ide_scan_pcibus		-	perform the initial IDE driver scan
+- *	@scan_direction: set for reverse order scanning
+- *
+- *	Perform the initial bus rather than driver ordered scan of the
+- *	PCI drivers. After this all IDE pci handling becomes standard
+- *	module ordering not traditionally ordered.
+- */
+- 	
+-void __init ide_scan_pcibus (int scan_direction)
+-{
+-	struct pci_dev *dev = NULL;
+-	struct pci_driver *d;
+-	struct list_head *l, *n;
+-
+-	pre_init = 0;
+-	if (!scan_direction)
+-		while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
+-			ide_scan_pcidev(dev);
+-	else
+-		while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID,
+-						     dev)))
+-			ide_scan_pcidev(dev);
+-
+-	/*
+-	 *	Hand the drivers over to the PCI layer now we
+-	 *	are post init.
+-	 */
+-
+-	list_for_each_safe(l, n, &ide_pci_drivers) {
+-		list_del(l);
+-		d = list_entry(l, struct pci_driver, node);
+-		if (__pci_register_driver(d, d->driver.owner,
+-					  d->driver.mod_name))
+-			printk(KERN_ERR "%s: failed to register %s driver\n",
+-					__FUNCTION__, d->driver.mod_name);
+-	}
+-}
+-#endif
+diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
+index 489c133..1f8153b 100644
+--- a/drivers/ieee1394/Makefile
++++ b/drivers/ieee1394/Makefile
+@@ -15,3 +15,4 @@ obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
+ obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
+ obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
+ 
++obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
+diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
+index 7c4eb39..73685e7 100644
+--- a/drivers/ieee1394/dma.c
++++ b/drivers/ieee1394/dma.c
+@@ -231,37 +231,24 @@ void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
+ 
+ #ifdef CONFIG_MMU
+ 
+-/* nopage() handler for mmap access */
+-
+-static struct page *dma_region_pagefault(struct vm_area_struct *area,
+-					 unsigned long address, int *type)
++static int dma_region_pagefault(struct vm_area_struct *vma,
++				struct vm_fault *vmf)
+ {
+-	unsigned long offset;
+-	unsigned long kernel_virt_addr;
+-	struct page *ret = NOPAGE_SIGBUS;
+-
+-	struct dma_region *dma = (struct dma_region *)area->vm_private_data;
++	struct dma_region *dma = (struct dma_region *)vma->vm_private_data;
+ 
+ 	if (!dma->kvirt)
+-		goto out;
+-
+-	if ((address < (unsigned long)area->vm_start) ||
+-	    (address >
+-	     (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT)))
+-		goto out;
+-
+-	if (type)
+-		*type = VM_FAULT_MINOR;
+-	offset = address - area->vm_start;
+-	kernel_virt_addr = (unsigned long)dma->kvirt + offset;
+-	ret = vmalloc_to_page((void *)kernel_virt_addr);
+-	get_page(ret);
+-      out:
+-	return ret;
++		return VM_FAULT_SIGBUS;
 +
-+			if (dlen < nbytes) {
-+				/*
-+				 * Destination page does not have enough space
-+				 * to put there additional blocksized chunk,
-+				 * so we mark that page as containing only
-+				 * blocksize aligned chunks:
-+				 * 	t->length = (slen & ~(blocksize - 1));
-+				 * and increase number of bytes to be processed
-+				 * in next chunk:
-+				 * 	nbytes += diff;
-+				 */
-+				nbytes += diff;
++	if (vmf->pgoff >= dma->n_pages)
++		return VM_FAULT_SIGBUS;
 +
-+				/*
-+				 * Temporary of course...
-+				 * Kick author if you will catch this one.
-+				 */
-+				printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
-+					"slen: %u, offset: %u.\n",
-+					__func__, dlen, nbytes, slen, offset);
-+				printk(KERN_ERR "%s: please contact author to fix this "
-+					"issue, generally you should not catch "
-+					"this path under any condition but who "
-+					"knows how did you use crypto code.\n"
-+					"Thank you.\n",	__func__);
-+				BUG();
-+			} else {
-+				copy += diff + nbytes;
++	vmf->page = vmalloc_to_page(dma->kvirt + (vmf->pgoff << PAGE_SHIFT));
++	get_page(vmf->page);
++	return 0;
+ }
+ 
+ static struct vm_operations_struct dma_region_vm_ops = {
+-	.nopage = dma_region_pagefault,
++	.fault = dma_region_pagefault,
+ };
+ 
+ /**
+@@ -275,7 +262,7 @@ int dma_region_mmap(struct dma_region *dma, struct file *file,
+ 	if (!dma->kvirt)
+ 		return -EINVAL;
+ 
+-	/* must be page-aligned */
++	/* must be page-aligned (XXX: comment is wrong, we could allow pgoff) */
+ 	if (vma->vm_pgoff != 0)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
+index 6779893..10c3d9f 100644
+--- a/drivers/ieee1394/ieee1394_transactions.c
++++ b/drivers/ieee1394/ieee1394_transactions.c
+@@ -570,71 +570,3 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+ 
+ 	return retval;
+ }
+-
+-#if 0
+-
+-int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
+-	      u64 addr, int extcode, quadlet_t * data, quadlet_t arg)
+-{
+-	struct hpsb_packet *packet;
+-	int retval = 0;
+-
+-	BUG_ON(in_interrupt());	// We can't be called in an interrupt, yet
+-
+-	packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
+-	if (!packet)
+-		return -ENOMEM;
+-
+-	packet->generation = generation;
+-	retval = hpsb_send_packet_and_wait(packet);
+-	if (retval < 0)
+-		goto hpsb_lock_fail;
+-
+-	retval = hpsb_packet_success(packet);
+-
+-	if (retval == 0) {
+-		*data = packet->data[0];
+-	}
+-
+-      hpsb_lock_fail:
+-	hpsb_free_tlabel(packet);
+-	hpsb_free_packet(packet);
+-
+-	return retval;
+-}
+-
+-int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
+-		   quadlet_t * buffer, size_t length, u32 specifier_id,
+-		   unsigned int version)
+-{
+-	struct hpsb_packet *packet;
+-	int retval = 0;
+-	u16 specifier_id_hi = (specifier_id & 0x00ffff00) >> 8;
+-	u8 specifier_id_lo = specifier_id & 0xff;
+-
+-	HPSB_VERBOSE("Send GASP: channel = %d, length = %Zd", channel, length);
+-
+-	length += 8;
+-
+-	packet = hpsb_make_streampacket(host, NULL, length, channel, 3, 0);
+-	if (!packet)
+-		return -ENOMEM;
+-
+-	packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi);
+-	packet->data[1] =
+-	    cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff));
+-
+-	memcpy(&(packet->data[2]), buffer, length - 8);
+-
+-	packet->generation = generation;
+-
+-	packet->no_waiter = 1;
+-
+-	retval = hpsb_send_packet(packet);
+-	if (retval < 0)
+-		hpsb_free_packet(packet);
+-
+-	return retval;
+-}
+-
+-#endif				/*  0  */
+diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
+new file mode 100644
+index 0000000..ddaab6e
+--- /dev/null
++++ b/drivers/ieee1394/init_ohci1394_dma.c
+@@ -0,0 +1,285 @@
++/*
++ * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers
++ *
++ * Copyright (C) 2006-2007      Bernhard Kaindl <bk at suse.de>
++ *
++ * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c
++ * this file has functions to:
++ * - scan the PCI very early on boot for all OHCI 1394-compliant controllers
++ * - reset and initialize them and make them join the IEEE1394 bus and
++ * - enable physical DMA on them to allow remote debugging
++ *
++ * All code and data is marked as __init and __initdata, respective as
++ * during boot, all OHCI1394 controllers may be claimed by the firewire
++ * stack and at this point, this code should not touch them anymore.
++ *
++ * To use physical DMA after the initialization of the firewire stack,
++ * be sure that the stack enables it and (re-)attach after the bus reset
++ * which may be caused by the firewire stack initialization.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software Foundation,
++ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ */
 +
-+				src = &req->src[idx];
++#include <linux/interrupt.h>	/* for ohci1394.h */
++#include <linux/delay.h>
++#include <linux/pci.h>		/* for PCI defines */
++#include <linux/init_ohci1394_dma.h>
++#include <asm/pci-direct.h>	/* for direct PCI config space access */
++#include <asm/fixmap.h>
 +
-+				err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes);
-+				if (err < 0)
-+					goto err_out_unmap;
++#include "ieee1394_types.h"
++#include "ohci1394.h"
 +
-+				idx += err;
-+			}
++int __initdata init_ohci1394_dma_early;
 +
-+			t->length = copy;
-+			t->offset = offset;
++/* Reads a PHY register of an OHCI-1394 controller */
++static inline u8 __init get_phy_reg(struct ti_ohci *ohci, u8 addr)
++{
++	int i;
++	quadlet_t r;
 +
-+			kunmap_atomic(daddr, KM_SOFTIRQ0);
-+		} else {
-+			nbytes -= src->length;
-+			idx++;
-+		}
++	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
 +
-+		tidx++;
++	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
++		if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
++			break;
++		mdelay(1);
 +	}
++	r = reg_read(ohci, OHCI1394_PhyControl);
 +
-+	return tidx;
-+
-+err_out_unmap:
-+	kunmap_atomic(daddr, KM_SOFTIRQ0);
-+	return err;
++	return (r & 0x00ff0000) >> 16;
 +}
 +
-+static int hifn_setup_session(struct ablkcipher_request *req)
++/* Writes to a PHY register of an OHCI-1394 controller */
++static inline void __init set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
 +{
-+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
-+	struct hifn_device *dev = ctx->dev;
-+	struct page *spage, *dpage;
-+	unsigned long soff, doff, flags;
-+	unsigned int nbytes = req->nbytes, idx = 0, len;
-+	int err = -EINVAL, sg_num;
-+	struct scatterlist *src, *dst, *t;
-+	unsigned blocksize =
-+		crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
-+	unsigned alignmask =
-+		crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
-+
-+	if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
-+		goto err_out_exit;
++	int i;
 +
-+	ctx->walk.flags = 0;
++	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
 +
-+	while (nbytes) {
-+		src = &req->src[idx];
-+		dst = &req->dst[idx];
++	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
++		u32 r = reg_read(ohci, OHCI1394_PhyControl);
++		if (!(r & 0x00004000))
++			break;
++		mdelay(1);
++	}
++}
 +
-+		if (src->length & (blocksize - 1) ||
-+				src->offset & (alignmask - 1) ||
-+				dst->length & (blocksize - 1) ||
-+				dst->offset & (alignmask - 1)) {
-+			ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
-+		}
++/* Resets an OHCI-1394 controller (for sane state before initialization) */
++static inline void __init init_ohci1394_soft_reset(struct ti_ohci *ohci) {
++	int i;
 +
-+		nbytes -= src->length;
-+		idx++;
-+	}
++	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
 +
-+	if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
-+		err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC);
-+		if (err < 0)
-+			return err;
++	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
++		if (!(reg_read(ohci, OHCI1394_HCControlSet)
++				   & OHCI1394_HCControl_softReset))
++			break;
++		mdelay(1);
 +	}
++}
 +
-+	nbytes = req->nbytes;
-+	idx = 0;
-+
-+	sg_num = ablkcipher_walk(req, &ctx->walk);
++/* Basic OHCI-1394 register and port inititalization */
++static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci)
++{
++	quadlet_t bus_options;
++	int num_ports, i;
 +
-+	atomic_set(&ctx->sg_num, sg_num);
++	/* Put some defaults to these undefined bus options */
++	bus_options = reg_read(ohci, OHCI1394_BusOptions);
++	bus_options |=  0x60000000; /* Enable CMC and ISC */
++	bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
++	bus_options &= ~0x18000000; /* Disable PMC and BMC */
++	reg_write(ohci, OHCI1394_BusOptions, bus_options);
 +
-+	spin_lock_irqsave(&dev->lock, flags);
-+	if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
-+		err = -EAGAIN;
-+		goto err_out;
-+	}
++	/* Set the bus number */
++	reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
 +
-+	dev->snum++;
-+	dev->started += sg_num;
++	/* Enable posted writes */
++	reg_write(ohci, OHCI1394_HCControlSet,
++			OHCI1394_HCControl_postedWriteEnable);
 +
-+	while (nbytes) {
-+		src = &req->src[idx];
-+		dst = &req->dst[idx];
-+		t = &ctx->walk.cache[idx];
++	/* Clear link control register */
++	reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
 +
-+		if (t->length) {
-+			spage = dpage = sg_page(t);
-+			soff = doff = 0;
-+			len = t->length;
-+		} else {
-+			spage = sg_page(src);
-+			soff = src->offset;
++	/* enable phys */
++	reg_write(ohci, OHCI1394_LinkControlSet,
++			OHCI1394_LinkControl_RcvPhyPkt);
 +
-+			dpage = sg_page(dst);
-+			doff = dst->offset;
++	/* Don't accept phy packets into AR request context */
++	reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
 +
-+			len = dst->length;
-+		}
++	/* Clear the Isochonouys interrupt masks */
++	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
++	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
++	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
++	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
 +
-+		idx++;
++	/* Accept asyncronous transfer requests from all nodes for now */
++	reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
 +
-+		err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes,
-+				req, ctx);
-+		if (err)
-+			goto err_out;
++	/* Specify asyncronous transfer retries */
++	reg_write(ohci, OHCI1394_ATRetries,
++		  OHCI1394_MAX_AT_REQ_RETRIES |
++		  (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
++		  (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
 +
-+		nbytes -= len;
-+	}
++	/* We don't want hardware swapping */
++	reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
 +
-+	dev->active = HIFN_DEFAULT_ACTIVE_NUM;
-+	spin_unlock_irqrestore(&dev->lock, flags);
++	/* Enable link */
++	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
 +
-+	return 0;
++	/* If anything is connected to a port, make sure it is enabled */
++	num_ports = get_phy_reg(ohci, 2) & 0xf;
++	for (i = 0; i < num_ports; i++) {
++		unsigned int status;
 +
-+err_out:
-+	spin_unlock_irqrestore(&dev->lock, flags);
-+err_out_exit:
-+	if (err && printk_ratelimit())
-+		dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
-+				"type: %u, err: %d.\n",
-+			dev->name, ctx->iv, ctx->ivsize,
-+			ctx->key, ctx->keysize,
-+			ctx->mode, ctx->op, ctx->type, err);
++		set_phy_reg(ohci, 7, i);
++		status = get_phy_reg(ohci, 8);
 +
-+	return err;
++		if (status & 0x20)
++			set_phy_reg(ohci, 8, status & ~1);
++	}
 +}
 +
-+static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
++/**
++ * init_ohci1394_wait_for_busresets - wait until bus resets are completed
++ *
++ * OHCI1394 initialization itself and any device going on- or offline
++ * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
++ * specifies that physical DMA is disabled on each bus reset and it
++ * has to be enabled after each bus reset when needed. We resort
++ * to polling here because on early boot, we have no interrupts.
++ */
++static inline void __init init_ohci1394_wait_for_busresets(struct ti_ohci *ohci)
 +{
-+	int n, err;
-+	u8 src[16];
-+	struct hifn_context ctx;
-+	u8 fips_aes_ecb_from_zero[16] = {
-+		0x66, 0xE9, 0x4B, 0xD4,
-+		0xEF, 0x8A, 0x2C, 0x3B,
-+		0x88, 0x4C, 0xFA, 0x59,
-+		0xCA, 0x34, 0x2B, 0x2E};
-+
-+	memset(src, 0, sizeof(src));
-+	memset(ctx.key, 0, sizeof(ctx.key));
-+
-+	ctx.dev = dev;
-+	ctx.keysize = 16;
-+	ctx.ivsize = 0;
-+	ctx.iv = NULL;
-+	ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
-+	ctx.mode = ACRYPTO_MODE_ECB;
-+	ctx.type = ACRYPTO_TYPE_AES_128;
-+	atomic_set(&ctx.sg_num, 1);
-+
-+	err = hifn_setup_dma(dev,
-+			virt_to_page(src), offset_in_page(src),
-+			virt_to_page(src), offset_in_page(src),
-+			sizeof(src), NULL, &ctx);
-+	if (err)
-+		goto err_out;
-+
-+	msleep(200);
-+
-+	dprintk("%s: decoded: ", dev->name);
-+	for (n=0; n<sizeof(src); ++n)
-+		dprintk("%02x ", src[n]);
-+	dprintk("\n");
-+	dprintk("%s: FIPS   : ", dev->name);
-+	for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
-+		dprintk("%02x ", fips_aes_ecb_from_zero[n]);
-+	dprintk("\n");
++	int i, events;
 +
-+	if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
-+		printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
-+				"passed.\n", dev->name);
-+		return 0;
++	for (i=0; i < 9; i++) {
++		mdelay(200);
++		events = reg_read(ohci, OHCI1394_IntEventSet);
++		if (events & OHCI1394_busReset)
++			reg_write(ohci, OHCI1394_IntEventClear,
++					OHCI1394_busReset);
 +	}
-+
-+err_out:
-+	printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
-+	return -1;
 +}
 +
-+static int hifn_start_device(struct hifn_device *dev)
++/**
++ * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
++ * This enables remote DMA access over IEEE1394 from every host for the low
++ * 4GB of address space. DMA accesses above 4GB are not available currently.
++ */
++static inline void __init init_ohci1394_enable_physical_dma(struct ti_ohci *hci)
 +{
-+	int err;
-+
-+	hifn_reset_dma(dev, 1);
-+
-+	err = hifn_enable_crypto(dev);
-+	if (err)
-+		return err;
-+
-+	hifn_reset_puc(dev);
-+
-+	hifn_init_dma(dev);
-+
-+	hifn_init_registers(dev);
-+
-+	hifn_init_pubrng(dev);
-+
-+	return 0;
++	reg_write(hci, OHCI1394_PhyReqFilterHiSet, 0xffffffff);
++	reg_write(hci, OHCI1394_PhyReqFilterLoSet, 0xffffffff);
++	reg_write(hci, OHCI1394_PhyUpperBound, 0xffff0000);
 +}
 +
-+static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
-+		struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
++/**
++ * init_ohci1394_reset_and_init_dma - init controller and enable DMA
++ * This initializes the given controller and enables physical DMA engine in it.
++ */
++static inline void __init init_ohci1394_reset_and_init_dma(struct ti_ohci *ohci)
 +{
-+	unsigned int srest = *srestp, nbytes = *nbytesp, copy;
-+	void *daddr;
-+	int idx = 0;
-+
-+	if (srest < size || size > nbytes)
-+		return -EINVAL;
-+
-+	while (size) {
-+
-+		copy = min(dst->length, srest);
-+
-+		daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
-+		memcpy(daddr + dst->offset + offset, saddr, copy);
-+		kunmap_atomic(daddr, KM_IRQ0);
++	/* Start off with a soft reset, clears everything to a sane state. */
++	init_ohci1394_soft_reset(ohci);
 +
-+		nbytes -= copy;
-+		size -= copy;
-+		srest -= copy;
-+		saddr += copy;
-+		offset = 0;
++	/* Accessing some registers without LPS enabled may cause lock up */
++	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
 +
-+		dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
-+				__func__, copy, size, srest, nbytes);
++	/* Disable and clear interrupts */
++	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
++	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
 +
-+		dst++;
-+		idx++;
-+	}
++	mdelay(50); /* Wait 50msec to make sure we have full link enabled */
 +
-+	*nbytesp = nbytes;
-+	*srestp = srest;
++	init_ohci1394_initialize(ohci);
++	/*
++	 * The initialization causes at least one IEEE1394 bus reset. Enabling
++	 * physical DMA only works *after* *all* bus resets have calmed down:
++	 */
++	init_ohci1394_wait_for_busresets(ohci);
 +
-+	return idx;
++	/* We had to wait and do this now if we want to debug early problems */
++	init_ohci1394_enable_physical_dma(ohci);
 +}
 +
-+static void hifn_process_ready(struct ablkcipher_request *req, int error)
++/**
++ * init_ohci1394_controller - Map the registers of the controller and init DMA
++ * This maps the registers of the specified controller and initializes it
++ */
++static inline void __init init_ohci1394_controller(int num, int slot, int func)
 +{
-+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
-+	struct hifn_device *dev;
++	unsigned long ohci_base;
++	struct ti_ohci ohci;
 +
-+	dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx);
++	printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394"
++			 " at %02x:%02x.%x\n", num, slot, func);
 +
-+	dev = ctx->dev;
-+	dprintk("%s: req: %p, started: %d, sg_num: %d.\n",
-+		__func__, req, dev->started, atomic_read(&ctx->sg_num));
++	ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2))
++						   & PCI_BASE_ADDRESS_MEM_MASK;
 +
-+	if (--dev->started < 0)
-+		BUG();
++	set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base);
 +
-+	if (atomic_dec_and_test(&ctx->sg_num)) {
-+		unsigned int nbytes = req->nbytes;
-+		int idx = 0, err;
-+		struct scatterlist *dst, *t;
-+		void *saddr;
++	ohci.registers = (void *)fix_to_virt(FIX_OHCI1394_BASE);
 +
-+		if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
-+			while (nbytes) {
-+				t = &ctx->walk.cache[idx];
-+				dst = &req->dst[idx];
++	init_ohci1394_reset_and_init_dma(&ohci);
++}
 +
-+				dprintk("\n%s: sg_page(t): %p, t->length: %u, "
-+					"sg_page(dst): %p, dst->length: %u, "
-+					"nbytes: %u.\n",
-+					__func__, sg_page(t), t->length,
-+					sg_page(dst), dst->length, nbytes);
++/**
++ * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them
++ * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them
++ */
++void __init init_ohci1394_dma_on_all_controllers(void)
++{
++	int num, slot, func;
 +
-+				if (!t->length) {
-+					nbytes -= dst->length;
-+					idx++;
-+					continue;
-+				}
++	if (!early_pci_allowed())
++		return;
 +
-+				saddr = kmap_atomic(sg_page(t), KM_IRQ1);
++	/* Poor man's PCI discovery, the only thing we can do at early boot */
++	for (num = 0; num < 32; num++) {
++		for (slot = 0; slot < 32; slot++) {
++			for (func = 0; func < 8; func++) {
++				u32 class = read_pci_config(num,slot,func,
++							PCI_CLASS_REVISION);
++				if ((class == 0xffffffff))
++					continue; /* No device at this func */
 +
-+				err = ablkcipher_get(saddr, &t->length, t->offset,
-+						dst, nbytes, &nbytes);
-+				if (err < 0) {
-+					kunmap_atomic(saddr, KM_IRQ1);
-+					break;
-+				}
++				if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
++					continue; /* Not an OHCI-1394 device */
 +
-+				idx += err;
-+				kunmap_atomic(saddr, KM_IRQ1);
++				init_ohci1394_controller(num, slot, func);
++				break; /* Assume one controller per device */
 +			}
-+
-+			ablkcipher_walk_exit(&ctx->walk);
 +		}
-+
-+		req->base.complete(&req->base, error);
 +	}
++	printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n");
 +}
 +
-+static void hifn_check_for_completion(struct hifn_device *dev, int error)
++/**
++ * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization
++ */
++static int __init setup_ohci1394_dma(char *opt)
 +{
-+	int i;
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+
-+	for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
-+		struct hifn_desc *d = &dma->resr[i];
-+
-+		if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) {
-+			dev->success++;
-+			dev->reset = 0;
-+			hifn_process_ready(dev->sa[i], error);
-+			dev->sa[i] = NULL;
-+		}
-+
-+		if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER))
-+			if (printk_ratelimit())
-+				printk("%s: overflow detected [d: %u, o: %u] "
-+						"at %d resr: l: %08x, p: %08x.\n",
-+					dev->name,
-+					!!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)),
-+					!!(d->l & __cpu_to_le32(HIFN_D_OVER)),
-+					i, d->l, d->p);
-+	}
++	if (!strcmp(opt, "early"))
++		init_ohci1394_dma_early = 1;
++	return 0;
 +}
 +
-+static void hifn_clear_rings(struct hifn_device *dev)
++/* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */
++early_param("ohci1394_dma", setup_ohci1394_dma);
+diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
+index 90dc75b..511e432 100644
+--- a/drivers/ieee1394/nodemgr.c
++++ b/drivers/ieee1394/nodemgr.c
+@@ -727,33 +727,31 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
+ 
+ static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
+ 
++static int __match_ne(struct device *dev, void *data)
 +{
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+	int i, u;
-+
-+	dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
-+			"k: %d.%d.%d.%d.\n",
-+			dev->name,
-+			dma->cmdi, dma->srci, dma->dsti, dma->resi,
-+			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
-+			dma->cmdk, dma->srck, dma->dstk, dma->resk);
-+
-+	i = dma->resk; u = dma->resu;
-+	while (u != 0) {
-+		if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
-+			break;
-+
-+		if (i != HIFN_D_RES_RSIZE)
-+			u--;
-+
-+		if (++i == (HIFN_D_RES_RSIZE + 1))
-+			i = 0;
-+	}
-+	dma->resk = i; dma->resu = u;
++	struct unit_directory *ud;
++	struct node_entry *ne = (struct node_entry *)data;
 +
-+	i = dma->srck; u = dma->srcu;
-+	while (u != 0) {
-+		if (i == HIFN_D_SRC_RSIZE)
-+			i = 0;
-+		if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
-+			break;
-+		i++, u--;
-+	}
-+	dma->srck = i; dma->srcu = u;
++	ud = container_of(dev, struct unit_directory, unit_dev);
++	return ud->ne == ne;
++}
 +
-+	i = dma->cmdk; u = dma->cmdu;
-+	while (u != 0) {
-+		if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
-+			break;
-+		if (i != HIFN_D_CMD_RSIZE)
-+			u--;
-+		if (++i == (HIFN_D_CMD_RSIZE + 1))
-+			i = 0;
-+	}
-+	dma->cmdk = i; dma->cmdu = u;
+ static void nodemgr_remove_uds(struct node_entry *ne)
+ {
+ 	struct device *dev;
+-	struct unit_directory *tmp, *ud;
+-
+-	/* Iteration over nodemgr_ud_class.devices has to be protected by
+-	 * nodemgr_ud_class.sem, but device_unregister() will eventually
+-	 * take nodemgr_ud_class.sem too. Therefore pick out one ud at a time,
+-	 * release the semaphore, and then unregister the ud. Since this code
+-	 * may be called from other contexts besides the knodemgrds, protect the
+-	 * gap after release of the semaphore by nodemgr_serialize_remove_uds.
++	struct unit_directory *ud;
 +
-+	i = dma->dstk; u = dma->dstu;
-+	while (u != 0) {
-+		if (i == HIFN_D_DST_RSIZE)
-+			i = 0;
-+		if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
-+			break;
-+		i++, u--;
-+	}
-+	dma->dstk = i; dma->dstu = u;
++	/* Use class_find device to iterate the devices. Since this code
++	 * may be called from other contexts besides the knodemgrds,
++	 * protect it by nodemgr_serialize_remove_uds.
+ 	 */
+ 	mutex_lock(&nodemgr_serialize_remove_uds);
+ 	for (;;) {
+-		ud = NULL;
+-		down(&nodemgr_ud_class.sem);
+-		list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
+-			tmp = container_of(dev, struct unit_directory,
+-					   unit_dev);
+-			if (tmp->ne == ne) {
+-				ud = tmp;
+-				break;
+-			}
+-		}
+-		up(&nodemgr_ud_class.sem);
+-		if (ud == NULL)
++		dev = class_find_device(&nodemgr_ud_class, ne, __match_ne);
++		if (!dev)
+ 			break;
++		ud = container_of(dev, struct unit_directory, unit_dev);
++		put_device(dev);
+ 		device_unregister(&ud->unit_dev);
+ 		device_unregister(&ud->device);
+ 	}
+@@ -882,45 +880,66 @@ fail_alloc:
+ 	return NULL;
+ }
+ 
++static int __match_ne_guid(struct device *dev, void *data)
++{
++	struct node_entry *ne;
++	u64 *guid = (u64 *)data;
 +
-+	dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
-+			"k: %d.%d.%d.%d.\n",
-+			dev->name,
-+			dma->cmdi, dma->srci, dma->dsti, dma->resi,
-+			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
-+			dma->cmdk, dma->srck, dma->dstk, dma->resk);
++	ne = container_of(dev, struct node_entry, node_dev);
++	return ne->guid == *guid;
 +}
+ 
+ static struct node_entry *find_entry_by_guid(u64 guid)
+ {
+ 	struct device *dev;
+-	struct node_entry *ne, *ret_ne = NULL;
+-
+-	down(&nodemgr_ne_class.sem);
+-	list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
+-		ne = container_of(dev, struct node_entry, node_dev);
++	struct node_entry *ne;
+ 
+-		if (ne->guid == guid) {
+-			ret_ne = ne;
+-			break;
+-		}
+-	}
+-	up(&nodemgr_ne_class.sem);
++	dev = class_find_device(&nodemgr_ne_class, &guid, __match_ne_guid);
++	if (!dev)
++		return NULL;
++	ne = container_of(dev, struct node_entry, node_dev);
++	put_device(dev);
+ 
+-	return ret_ne;
++	return ne;
+ }
+ 
++struct match_nodeid_param {
++	struct hpsb_host *host;
++	nodeid_t nodeid;
++};
 +
-+static void hifn_work(struct work_struct *work)
++static int __match_ne_nodeid(struct device *dev, void *data)
 +{
-+	struct delayed_work *dw = container_of(work, struct delayed_work, work);
-+	struct hifn_device *dev = container_of(dw, struct hifn_device, work);
-+	unsigned long flags;
-+	int reset = 0;
-+	u32 r = 0;
-+
-+	spin_lock_irqsave(&dev->lock, flags);
-+	if (dev->active == 0) {
-+		struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+
-+		if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
-+			dev->flags &= ~HIFN_FLAG_CMD_BUSY;
-+			r |= HIFN_DMACSR_C_CTRL_DIS;
-+		}
-+		if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
-+			dev->flags &= ~HIFN_FLAG_SRC_BUSY;
-+			r |= HIFN_DMACSR_S_CTRL_DIS;
-+		}
-+		if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
-+			dev->flags &= ~HIFN_FLAG_DST_BUSY;
-+			r |= HIFN_DMACSR_D_CTRL_DIS;
-+		}
-+		if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
-+			dev->flags &= ~HIFN_FLAG_RES_BUSY;
-+			r |= HIFN_DMACSR_R_CTRL_DIS;
-+		}
-+		if (r)
-+			hifn_write_1(dev, HIFN_1_DMA_CSR, r);
-+	} else
-+		dev->active--;
-+
-+	if (dev->prev_success == dev->success && dev->started)
-+		reset = 1;
-+	dev->prev_success = dev->success;
-+	spin_unlock_irqrestore(&dev->lock, flags);
-+
-+	if (reset) {
-+		dprintk("%s: r: %08x, active: %d, started: %d, "
-+				"success: %lu: reset: %d.\n",
-+			dev->name, r, dev->active, dev->started,
-+			dev->success, reset);
++	int found = 0;
++	struct node_entry *ne;
++	struct match_nodeid_param *param = (struct match_nodeid_param *)data;
 +
-+		if (++dev->reset >= 5) {
-+			dprintk("%s: really hard reset.\n", dev->name);
-+			hifn_reset_dma(dev, 1);
-+			hifn_stop_device(dev);
-+			hifn_start_device(dev);
-+			dev->reset = 0;
++	if (!dev)
++		goto ret;
++	ne = container_of(dev, struct node_entry, node_dev);
++	if (ne->host == param->host && ne->nodeid == param->nodeid)
++		found = 1;
++ret:
++	return found;
++}
+ 
+ static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
+ 					       nodeid_t nodeid)
+ {
+ 	struct device *dev;
+-	struct node_entry *ne, *ret_ne = NULL;
++	struct node_entry *ne;
++	struct match_nodeid_param param;
+ 
+-	down(&nodemgr_ne_class.sem);
+-	list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
+-		ne = container_of(dev, struct node_entry, node_dev);
++	param.host = host;
++	param.nodeid = nodeid;
+ 
+-		if (ne->host == host && ne->nodeid == nodeid) {
+-			ret_ne = ne;
+-			break;
+-		}
+-	}
+-	up(&nodemgr_ne_class.sem);
++	dev = class_find_device(&nodemgr_ne_class, &param, __match_ne_nodeid);
++	if (!dev)
++		return NULL;
++	ne = container_of(dev, struct node_entry, node_dev);
++	put_device(dev);
+ 
+-	return ret_ne;
++	return ne;
+ }
+ 
+ 
+@@ -1370,107 +1389,109 @@ static void nodemgr_node_scan(struct host_info *hi, int generation)
+ 	}
+ }
+ 
+-
+-static void nodemgr_suspend_ne(struct node_entry *ne)
++static int __nodemgr_driver_suspend(struct device *dev, void *data)
+ {
+-	struct device *dev;
+ 	struct unit_directory *ud;
+ 	struct device_driver *drv;
++	struct node_entry *ne = (struct node_entry *)data;
+ 	int error;
+ 
+-	HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
+-		   NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
++	ud = container_of(dev, struct unit_directory, unit_dev);
++	if (ud->ne == ne) {
++		drv = get_driver(ud->device.driver);
++		if (drv) {
++			error = 1; /* release if suspend is not implemented */
++			if (drv->suspend) {
++				down(&ud->device.sem);
++				error = drv->suspend(&ud->device, PMSG_SUSPEND);
++				up(&ud->device.sem);
++			}
++			if (error)
++				device_release_driver(&ud->device);
++			put_driver(drv);
 +		}
-+
-+		spin_lock_irqsave(&dev->lock, flags);
-+		hifn_check_for_completion(dev, -EBUSY);
-+		hifn_clear_rings(dev);
-+		dev->started = 0;
-+		spin_unlock_irqrestore(&dev->lock, flags);
 +	}
+ 
+-	ne->in_limbo = 1;
+-	WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
++	return 0;
++}
+ 
+-	down(&nodemgr_ud_class.sem);
+-	list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
+-		ud = container_of(dev, struct unit_directory, unit_dev);
+-		if (ud->ne != ne)
+-			continue;
++static int __nodemgr_driver_resume(struct device *dev, void *data)
++{
++	struct unit_directory *ud;
++	struct device_driver *drv;
++	struct node_entry *ne = (struct node_entry *)data;
+ 
++	ud = container_of(dev, struct unit_directory, unit_dev);
++	if (ud->ne == ne) {
+ 		drv = get_driver(ud->device.driver);
+-		if (!drv)
+-			continue;
+-
+-		error = 1; /* release if suspend is not implemented */
+-		if (drv->suspend) {
+-			down(&ud->device.sem);
+-			error = drv->suspend(&ud->device, PMSG_SUSPEND);
+-			up(&ud->device.sem);
++		if (drv) {
++			if (drv->resume) {
++				down(&ud->device.sem);
++				drv->resume(&ud->device);
++				up(&ud->device.sem);
++			}
++			put_driver(drv);
+ 		}
+-		if (error)
+-			device_release_driver(&ud->device);
+-		put_driver(drv);
+ 	}
+-	up(&nodemgr_ud_class.sem);
+-}
+ 
++	return 0;
++}
+ 
+-static void nodemgr_resume_ne(struct node_entry *ne)
++static void nodemgr_suspend_ne(struct node_entry *ne)
+ {
+-	struct device *dev;
+-	struct unit_directory *ud;
+-	struct device_driver *drv;
++	HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
++		   NODE_BUS_ARGS(ne->host, ne->nodeid),
++		   (unsigned long long)ne->guid);
+ 
+-	ne->in_limbo = 0;
+-	device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
++	ne->in_limbo = 1;
++	WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
+ 
+-	down(&nodemgr_ud_class.sem);
+-	list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
+-		ud = container_of(dev, struct unit_directory, unit_dev);
+-		if (ud->ne != ne)
+-			continue;
++	class_for_each_device(&nodemgr_ud_class, ne, __nodemgr_driver_suspend);
++}
+ 
+-		drv = get_driver(ud->device.driver);
+-		if (!drv)
+-			continue;
+ 
+-		if (drv->resume) {
+-			down(&ud->device.sem);
+-			drv->resume(&ud->device);
+-			up(&ud->device.sem);
+-		}
+-		put_driver(drv);
+-	}
+-	up(&nodemgr_ud_class.sem);
++static void nodemgr_resume_ne(struct node_entry *ne)
++{
++	ne->in_limbo = 0;
++	device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
+ 
++	class_for_each_device(&nodemgr_ud_class, ne, __nodemgr_driver_resume);
+ 	HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
+ 		   NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
+ }
+ 
+-
+-static void nodemgr_update_pdrv(struct node_entry *ne)
++static int __nodemgr_update_pdrv(struct device *dev, void *data)
+ {
+-	struct device *dev;
+ 	struct unit_directory *ud;
+ 	struct device_driver *drv;
+ 	struct hpsb_protocol_driver *pdrv;
++	struct node_entry *ne = (struct node_entry *)data;
+ 	int error;
+ 
+-	down(&nodemgr_ud_class.sem);
+-	list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
+-		ud = container_of(dev, struct unit_directory, unit_dev);
+-		if (ud->ne != ne)
+-			continue;
+-
++	ud = container_of(dev, struct unit_directory, unit_dev);
++	if (ud->ne == ne) {
+ 		drv = get_driver(ud->device.driver);
+-		if (!drv)
+-			continue;
+-
+-		error = 0;
+-		pdrv = container_of(drv, struct hpsb_protocol_driver, driver);
+-		if (pdrv->update) {
+-			down(&ud->device.sem);
+-			error = pdrv->update(ud);
+-			up(&ud->device.sem);
++		if (drv) {
++			error = 0;
++			pdrv = container_of(drv, struct hpsb_protocol_driver,
++					    driver);
++			if (pdrv->update) {
++				down(&ud->device.sem);
++				error = pdrv->update(ud);
++				up(&ud->device.sem);
++			}
++			if (error)
++				device_release_driver(&ud->device);
++			put_driver(drv);
+ 		}
+-		if (error)
+-			device_release_driver(&ud->device);
+-		put_driver(drv);
+ 	}
+-	up(&nodemgr_ud_class.sem);
 +
-+	schedule_delayed_work(&dev->work, HZ);
++	return 0;
 +}
 +
-+static irqreturn_t hifn_interrupt(int irq, void *data)
++static void nodemgr_update_pdrv(struct node_entry *ne)
 +{
-+	struct hifn_device *dev = (struct hifn_device *)data;
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+	u32 dmacsr, restart;
++	class_for_each_device(&nodemgr_ud_class, ne, __nodemgr_update_pdrv);
+ }
+ 
+ 
+@@ -1529,13 +1550,31 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge
+ 	put_device(dev);
+ }
+ 
++struct probe_param {
++	struct host_info *hi;
++	int generation;
++};
 +
-+	dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
++static int __nodemgr_node_probe(struct device *dev, void *data)
++{
++	struct probe_param *param = (struct probe_param *)data;
++	struct node_entry *ne;
 +
-+	dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
-+			"i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
-+		dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
-+		dma->cmdu, dma->srcu, dma->dstu, dma->resu,
-+		dma->cmdi, dma->srci, dma->dsti, dma->resi);
++	ne = container_of(dev, struct node_entry, node_dev);
++	if (!ne->needs_probe)
++		nodemgr_probe_ne(param->hi, ne, param->generation);
++	if (ne->needs_probe)
++		nodemgr_probe_ne(param->hi, ne, param->generation);
++	return 0;
++}
+ 
+ static void nodemgr_node_probe(struct host_info *hi, int generation)
+ {
+ 	struct hpsb_host *host = hi->host;
+-	struct device *dev;
+-	struct node_entry *ne;
++	struct probe_param param;
+ 
++	param.hi = hi;
++	param.generation = generation;
+ 	/* Do some processing of the nodes we've probed. This pulls them
+ 	 * into the sysfs layer if needed, and can result in processing of
+ 	 * unit-directories, or just updating the node and it's
+@@ -1545,19 +1584,7 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
+ 	 * while probes are time-consuming. (Well, those probes need some
+ 	 * improvement...) */
+ 
+-	down(&nodemgr_ne_class.sem);
+-	list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
+-		ne = container_of(dev, struct node_entry, node_dev);
+-		if (!ne->needs_probe)
+-			nodemgr_probe_ne(hi, ne, generation);
+-	}
+-	list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
+-		ne = container_of(dev, struct node_entry, node_dev);
+-		if (ne->needs_probe)
+-			nodemgr_probe_ne(hi, ne, generation);
+-	}
+-	up(&nodemgr_ne_class.sem);
+-
++	class_for_each_device(&nodemgr_ne_class, &param, __nodemgr_node_probe);
+ 
+ 	/* If we had a bus reset while we were scanning the bus, it is
+ 	 * possible that we did not probe all nodes.  In that case, we
+@@ -1757,6 +1784,22 @@ exit:
+ 	return 0;
+ }
+ 
++struct host_iter_param {
++	void *data;
++	int (*cb)(struct hpsb_host *, void *);
++};
 +
-+	if ((dmacsr & dev->dmareg) == 0)
-+		return IRQ_NONE;
++static int __nodemgr_for_each_host(struct device *dev, void *data)
++{
++	struct hpsb_host *host;
++	struct host_iter_param *hip = (struct host_iter_param *)data;
++	int error = 0;
 +
-+	hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
++	host = container_of(dev, struct hpsb_host, host_dev);
++	error = hip->cb(host, hip->data);
 +
-+	if (dmacsr & HIFN_DMACSR_ENGINE)
-+		hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
-+	if (dmacsr & HIFN_DMACSR_PUBDONE)
-+		hifn_write_1(dev, HIFN_1_PUB_STATUS,
-+			hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
++	return error;
++}
+ /**
+  * nodemgr_for_each_host - call a function for each IEEE 1394 host
+  * @data: an address to supply to the callback
+@@ -1771,18 +1814,13 @@ exit:
+  */
+ int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
+ {
+-	struct device *dev;
+-	struct hpsb_host *host;
+-	int error = 0;
+-
+-	down(&hpsb_host_class.sem);
+-	list_for_each_entry(dev, &hpsb_host_class.devices, node) {
+-		host = container_of(dev, struct hpsb_host, host_dev);
++	struct host_iter_param hip;
++	int error;
+ 
+-		if ((error = cb(host, data)))
+-			break;
+-	}
+-	up(&hpsb_host_class.sem);
++	hip.cb = cb;
++	hip.data = data;
++	error = class_for_each_device(&hpsb_host_class, &hip,
++				      __nodemgr_for_each_host);
+ 
+ 	return error;
+ }
+diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
+index 372c5c1..969de2a 100644
+--- a/drivers/ieee1394/ohci1394.c
++++ b/drivers/ieee1394/ohci1394.c
+@@ -2126,10 +2126,14 @@ static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
+ 	list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
+ 		mask = 1 << t->context;
+ 
+-		if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
+-			tasklet_schedule(&t->tasklet);
+-		else if (rx_event & mask)
+-			tasklet_schedule(&t->tasklet);
++		if (t->type == OHCI_ISO_TRANSMIT) {
++			if (tx_event & mask)
++				tasklet_schedule(&t->tasklet);
++		} else {
++			/* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
++			if (rx_event & mask)
++				tasklet_schedule(&t->tasklet);
++		}
+ 	}
+ 
+ 	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
+diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
+index cadf047..37e7e10 100644
+--- a/drivers/ieee1394/raw1394.c
++++ b/drivers/ieee1394/raw1394.c
+@@ -858,7 +858,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
+ 	int found = 0, size = 0, rcode = -1;
+ 	struct arm_request_response *arm_req_resp = NULL;
+ 
+-	DBGMSG("arm_read  called by node: %X"
++	DBGMSG("arm_read  called by node: %X "
+ 	       "addr: %4.4x %8.8x length: %Zu", nodeid,
+ 	       (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
+ 	       length);
+@@ -1012,7 +1012,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
+ 	int found = 0, size = 0, rcode = -1, length_conflict = 0;
+ 	struct arm_request_response *arm_req_resp = NULL;
+ 
+-	DBGMSG("arm_write called by node: %X"
++	DBGMSG("arm_write called by node: %X "
+ 	       "addr: %4.4x %8.8x length: %Zu", nodeid,
+ 	       (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
+ 	       length);
+diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
+index b83d254..2b889d9 100644
+--- a/drivers/ieee1394/sbp2.c
++++ b/drivers/ieee1394/sbp2.c
+@@ -51,6 +51,7 @@
+  * Grep for inline FIXME comments below.
+  */
+ 
++#include <linux/blkdev.h>
+ #include <linux/compiler.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+@@ -127,17 +128,21 @@ MODULE_PARM_DESC(serialize_io, "Serialize requests coming from SCSI drivers "
+ 		 "(default = Y, faster but buggy = N)");
+ 
+ /*
+- * Bump up max_sectors if you'd like to support very large sized
+- * transfers. Please note that some older sbp2 bridge chips are broken for
+- * transfers greater or equal to 128KB.  Default is a value of 255
+- * sectors, or just under 128KB (at 512 byte sector size). I can note that
+- * the Oxsemi sbp2 chipsets have no problems supporting very large
+- * transfer sizes.
++ * Adjust max_sectors if you'd like to influence how many sectors each SCSI
++ * command can transfer at most. Please note that some older SBP-2 bridge
++ * chips are broken for transfers greater or equal to 128KB, therefore
++ * max_sectors used to be a safe 255 sectors for many years. We now have a
++ * default of 0 here which means that we let the SCSI stack choose a limit.
++ *
++ * The SBP2_WORKAROUND_128K_MAX_TRANS flag, if set either in the workarounds
++ * module parameter or in the sbp2_workarounds_table[], will override the
++ * value of max_sectors. We should use sbp2_workarounds_table[] to cover any
++ * bridge chip which becomes known to need the 255 sectors limit.
+  */
+-static int sbp2_max_sectors = SBP2_MAX_SECTORS;
++static int sbp2_max_sectors;
+ module_param_named(max_sectors, sbp2_max_sectors, int, 0444);
+ MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported "
+-		 "(default = " __stringify(SBP2_MAX_SECTORS) ")");
++		 "(default = 0 = use SCSI stack's default)");
+ 
+ /*
+  * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
+@@ -1451,7 +1456,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
+ 				     struct sbp2_fwhost_info *hi,
+ 				     struct sbp2_command_info *cmd,
+ 				     unsigned int scsi_use_sg,
+-				     struct scatterlist *sgpnt,
++				     struct scatterlist *sg,
+ 				     u32 orb_direction,
+ 				     enum dma_data_direction dma_dir)
+ {
+@@ -1461,12 +1466,12 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
+ 
+ 	/* special case if only one element (and less than 64KB in size) */
+ 	if ((scsi_use_sg == 1) &&
+-	    (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
++	    (sg_dma_len(sg) <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
+ 
+-		cmd->dma_size = sgpnt[0].length;
++		cmd->dma_size = sg_dma_len(sg);
+ 		cmd->dma_type = CMD_DMA_PAGE;
+ 		cmd->cmd_dma = dma_map_page(hi->host->device.parent,
+-					    sg_page(&sgpnt[0]), sgpnt[0].offset,
++					    sg_page(sg), sg->offset,
+ 					    cmd->dma_size, cmd->dma_dir);
+ 
+ 		orb->data_descriptor_lo = cmd->cmd_dma;
+@@ -1477,11 +1482,11 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
+ 						&cmd->scatter_gather_element[0];
+ 		u32 sg_count, sg_len;
+ 		dma_addr_t sg_addr;
+-		int i, count = dma_map_sg(hi->host->device.parent, sgpnt,
++		int i, count = dma_map_sg(hi->host->device.parent, sg,
+ 					  scsi_use_sg, dma_dir);
+ 
+ 		cmd->dma_size = scsi_use_sg;
+-		cmd->sge_buffer = sgpnt;
++		cmd->sge_buffer = sg;
+ 
+ 		/* use page tables (s/g) */
+ 		orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
+@@ -1489,9 +1494,9 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
+ 
+ 		/* loop through and fill out our SBP-2 page tables
+ 		 * (and split up anything too large) */
+-		for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
+-			sg_len = sg_dma_len(sgpnt);
+-			sg_addr = sg_dma_address(sgpnt);
++		for (i = 0, sg_count = 0; i < count; i++, sg = sg_next(sg)) {
++			sg_len = sg_dma_len(sg);
++			sg_addr = sg_dma_address(sg);
+ 			while (sg_len) {
+ 				sg_element[sg_count].segment_base_lo = sg_addr;
+ 				if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
+@@ -1521,11 +1526,10 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
+ 				    unchar *scsi_cmd,
+ 				    unsigned int scsi_use_sg,
+ 				    unsigned int scsi_request_bufflen,
+-				    void *scsi_request_buffer,
++				    struct scatterlist *sg,
+ 				    enum dma_data_direction dma_dir)
+ {
+ 	struct sbp2_fwhost_info *hi = lu->hi;
+-	struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
+ 	struct sbp2_command_orb *orb = &cmd->command_orb;
+ 	u32 orb_direction;
+ 
+@@ -1560,7 +1564,7 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
+ 		orb->data_descriptor_lo = 0x0;
+ 		orb->misc |= ORB_SET_DIRECTION(1);
+ 	} else
+-		sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,
++		sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sg,
+ 					 orb_direction, dma_dir);
+ 
+ 	sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
+@@ -1650,7 +1654,6 @@ static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
+ 			     void (*done)(struct scsi_cmnd *))
+ {
+ 	unchar *scsi_cmd = (unchar *)SCpnt->cmnd;
+-	unsigned int request_bufflen = scsi_bufflen(SCpnt);
+ 	struct sbp2_command_info *cmd;
+ 
+ 	cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
+@@ -1658,7 +1661,7 @@ static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
+ 		return -EIO;
+ 
+ 	sbp2_create_command_orb(lu, cmd, scsi_cmd, scsi_sg_count(SCpnt),
+-				request_bufflen, scsi_sglist(SCpnt),
++				scsi_bufflen(SCpnt), scsi_sglist(SCpnt),
+ 				SCpnt->sc_data_direction);
+ 	sbp2_link_orb_command(lu, cmd);
+ 
+@@ -1963,6 +1966,12 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
+ 	lu->sdev = sdev;
+ 	sdev->allow_restart = 1;
+ 
++	/*
++	 * Update the dma alignment (minimum alignment requirements for
++	 * start and end of DMA transfers) to be a sector
++	 */
++	blk_queue_update_dma_alignment(sdev->request_queue, 511);
 +
-+	restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
-+	if (restart) {
-+		u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
+ 	if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
+ 		sdev->inquiry_len = 36;
+ 	return 0;
+@@ -1981,6 +1990,8 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
+ 		sdev->skip_ms_page_8 = 1;
+ 	if (lu->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+ 		sdev->fix_capacity = 1;
++	if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
++		blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+ 	return 0;
+ }
+ 
+@@ -2087,9 +2098,6 @@ static int sbp2_module_init(void)
+ 		sbp2_shost_template.cmd_per_lun = 1;
+ 	}
+ 
+-	if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+-	    (sbp2_max_sectors * 512) > (128 * 1024))
+-		sbp2_max_sectors = 128 * 1024 / 512;
+ 	sbp2_shost_template.max_sectors = sbp2_max_sectors;
+ 
+ 	hpsb_register_highlevel(&sbp2_highlevel);
+diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
+index 333a4bb..d2ecb0d 100644
+--- a/drivers/ieee1394/sbp2.h
++++ b/drivers/ieee1394/sbp2.h
+@@ -222,7 +222,6 @@ struct sbp2_status_block {
+  */
+ 
+ #define SBP2_MAX_SG_ELEMENT_LENGTH		0xf000
+-#define SBP2_MAX_SECTORS			255
+ /* There is no real limitation of the queue depth (i.e. length of the linked
+  * list of command ORBs) at the target. The chosen depth is merely an
+  * implementation detail of the sbp2 driver. */
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 5381c80..a58ad8a 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -110,7 +110,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
+ 	__be32 ip = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
+ 	int ret;
+ 
+-	dev = ip_dev_find(ip);
++	dev = ip_dev_find(&init_net, ip);
+ 	if (!dev)
+ 		return -EADDRNOTAVAIL;
+ 
+@@ -158,7 +158,7 @@ static void addr_send_arp(struct sockaddr_in *dst_in)
+ 
+ 	memset(&fl, 0, sizeof fl);
+ 	fl.nl_u.ip4_u.daddr = dst_ip;
+-	if (ip_route_output_key(&rt, &fl))
++	if (ip_route_output_key(&init_net, &rt, &fl))
+ 		return;
+ 
+ 	neigh_event_send(rt->u.dst.neighbour, NULL);
+@@ -179,7 +179,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
+ 	memset(&fl, 0, sizeof fl);
+ 	fl.nl_u.ip4_u.daddr = dst_ip;
+ 	fl.nl_u.ip4_u.saddr = src_ip;
+-	ret = ip_route_output_key(&rt, &fl);
++	ret = ip_route_output_key(&init_net, &rt, &fl);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -261,15 +261,15 @@ static int addr_resolve_local(struct sockaddr_in *src_in,
+ 	__be32 dst_ip = dst_in->sin_addr.s_addr;
+ 	int ret;
+ 
+-	dev = ip_dev_find(dst_ip);
++	dev = ip_dev_find(&init_net, dst_ip);
+ 	if (!dev)
+ 		return -EADDRNOTAVAIL;
+ 
+-	if (ZERONET(src_ip)) {
++	if (ipv4_is_zeronet(src_ip)) {
+ 		src_in->sin_family = dst_in->sin_family;
+ 		src_in->sin_addr.s_addr = dst_ip;
+ 		ret = rdma_copy_addr(addr, dev, dev->dev_addr);
+-	} else if (LOOPBACK(src_ip)) {
++	} else if (ipv4_is_loopback(src_ip)) {
+ 		ret = rdma_translate_ip((struct sockaddr *)dst_in, addr);
+ 		if (!ret)
+ 			memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 2e39236..c015014 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2004-2006 Intel Corporation.  All rights reserved.
++ * Copyright (c) 2004-2007 Intel Corporation.  All rights reserved.
+  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
+  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
+  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+@@ -37,12 +37,14 @@
+ 
+ #include <linux/completion.h>
+ #include <linux/dma-mapping.h>
++#include <linux/device.h>
+ #include <linux/err.h>
+ #include <linux/idr.h>
+ #include <linux/interrupt.h>
+ #include <linux/random.h>
+ #include <linux/rbtree.h>
+ #include <linux/spinlock.h>
++#include <linux/sysfs.h>
+ #include <linux/workqueue.h>
+ 
+ #include <rdma/ib_cache.h>
+@@ -78,17 +80,94 @@ static struct ib_cm {
+ 	struct workqueue_struct *wq;
+ } cm;
+ 
++/* Counter indexes ordered by attribute ID */
++enum {
++	CM_REQ_COUNTER,
++	CM_MRA_COUNTER,
++	CM_REJ_COUNTER,
++	CM_REP_COUNTER,
++	CM_RTU_COUNTER,
++	CM_DREQ_COUNTER,
++	CM_DREP_COUNTER,
++	CM_SIDR_REQ_COUNTER,
++	CM_SIDR_REP_COUNTER,
++	CM_LAP_COUNTER,
++	CM_APR_COUNTER,
++	CM_ATTR_COUNT,
++	CM_ATTR_ID_OFFSET = 0x0010,
++};
 +
-+		if (printk_ratelimit())
-+			printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
-+				dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
-+				!!(dmacsr & HIFN_DMACSR_D_OVER),
-+				puisr, !!(puisr & HIFN_PUISR_DSTOVER));
-+		if (!!(puisr & HIFN_PUISR_DSTOVER))
-+			hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
-+		hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
-+					HIFN_DMACSR_D_OVER));
-+	}
++enum {
++	CM_XMIT,
++	CM_XMIT_RETRIES,
++	CM_RECV,
++	CM_RECV_DUPLICATES,
++	CM_COUNTER_GROUPS
++};
 +
-+	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
-+			HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
-+	if (restart) {
-+		if (printk_ratelimit())
-+			printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
-+				dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
-+				!!(dmacsr & HIFN_DMACSR_S_ABORT),
-+				!!(dmacsr & HIFN_DMACSR_D_ABORT),
-+				!!(dmacsr & HIFN_DMACSR_R_ABORT));
-+		hifn_reset_dma(dev, 1);
-+		hifn_init_dma(dev);
-+		hifn_init_registers(dev);
-+	}
++static char const counter_group_names[CM_COUNTER_GROUPS]
++				     [sizeof("cm_rx_duplicates")] = {
++	"cm_tx_msgs", "cm_tx_retries",
++	"cm_rx_msgs", "cm_rx_duplicates"
++};
 +
-+	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
-+		dprintk("%s: wait on command.\n", dev->name);
-+		dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
-+		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
-+	}
++struct cm_counter_group {
++	struct kobject obj;
++	atomic_long_t counter[CM_ATTR_COUNT];
++};
 +
-+	tasklet_schedule(&dev->tasklet);
-+	hifn_clear_rings(dev);
++struct cm_counter_attribute {
++	struct attribute attr;
++	int index;
++};
 +
-+	return IRQ_HANDLED;
++#define CM_COUNTER_ATTR(_name, _index) \
++struct cm_counter_attribute cm_##_name##_counter_attr = { \
++	.attr = { .name = __stringify(_name), .mode = 0444, .owner = THIS_MODULE }, \
++	.index = _index \
 +}
 +
-+static void hifn_flush(struct hifn_device *dev)
-+{
-+	unsigned long flags;
-+	struct crypto_async_request *async_req;
-+	struct hifn_context *ctx;
-+	struct ablkcipher_request *req;
-+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
-+	int i;
-+
-+	spin_lock_irqsave(&dev->lock, flags);
-+	for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
-+		struct hifn_desc *d = &dma->resr[i];
-+
-+		if (dev->sa[i]) {
-+			hifn_process_ready(dev->sa[i],
-+				(d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
-+		}
-+	}
-+
-+	while ((async_req = crypto_dequeue_request(&dev->queue))) {
-+		ctx = crypto_tfm_ctx(async_req->tfm);
-+		req = container_of(async_req, struct ablkcipher_request, base);
-+
-+		hifn_process_ready(req, -ENODEV);
-+	}
-+	spin_unlock_irqrestore(&dev->lock, flags);
-+}
++static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
++static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
++static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
++static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
++static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
++static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
++static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
++static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
++static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
++static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
++static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
 +
-+static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
-+		unsigned int len)
-+{
-+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
-+	struct hifn_context *ctx = crypto_tfm_ctx(tfm);
-+	struct hifn_device *dev = ctx->dev;
++static struct attribute *cm_counter_default_attrs[] = {
++	&cm_req_counter_attr.attr,
++	&cm_mra_counter_attr.attr,
++	&cm_rej_counter_attr.attr,
++	&cm_rep_counter_attr.attr,
++	&cm_rtu_counter_attr.attr,
++	&cm_dreq_counter_attr.attr,
++	&cm_drep_counter_attr.attr,
++	&cm_sidr_req_counter_attr.attr,
++	&cm_sidr_rep_counter_attr.attr,
++	&cm_lap_counter_attr.attr,
++	&cm_apr_counter_attr.attr,
++	NULL
++};
 +
-+	if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
-+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+		return -1;
-+	}
+ struct cm_port {
+ 	struct cm_device *cm_dev;
+ 	struct ib_mad_agent *mad_agent;
++	struct kobject port_obj;
+ 	u8 port_num;
++	struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
+ };
+ 
+ struct cm_device {
+ 	struct list_head list;
+ 	struct ib_device *device;
++	struct kobject dev_obj;
+ 	u8 ack_delay;
+-	struct cm_port port[0];
++	struct cm_port *port[0];
+ };
+ 
+ struct cm_av {
+@@ -278,7 +357,7 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+ 	list_for_each_entry(cm_dev, &cm.device_list, list) {
+ 		if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
+ 					&p, NULL)) {
+-			port = &cm_dev->port[p-1];
++			port = cm_dev->port[p-1];
+ 			break;
+ 		}
+ 	}
+@@ -1270,6 +1349,9 @@ static void cm_dup_req_handler(struct cm_work *work,
+ 	struct ib_mad_send_buf *msg = NULL;
+ 	int ret;
+ 
++	atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++			counter[CM_REQ_COUNTER]);
 +
-+	if (len == HIFN_DES_KEY_LENGTH) {
-+		u32 tmp[DES_EXPKEY_WORDS];
-+		int ret = des_ekey(tmp, key);
-+		
-+		if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
-+			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
-+			return -EINVAL;
+ 	/* Quick state check to discard duplicate REQs. */
+ 	if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
+ 		return;
+@@ -1616,6 +1698,8 @@ static void cm_dup_rep_handler(struct cm_work *work)
+ 	if (!cm_id_priv)
+ 		return;
+ 
++	atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++			counter[CM_REP_COUNTER]);
+ 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+ 	if (ret)
+ 		goto deref;
+@@ -1781,6 +1865,8 @@ static int cm_rtu_handler(struct cm_work *work)
+ 	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
+ 	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
+ 		spin_unlock_irq(&cm_id_priv->lock);
++		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++				counter[CM_RTU_COUNTER]);
+ 		goto out;
+ 	}
+ 	cm_id_priv->id.state = IB_CM_ESTABLISHED;
+@@ -1958,6 +2044,8 @@ static int cm_dreq_handler(struct cm_work *work)
+ 	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
+ 				   dreq_msg->local_comm_id);
+ 	if (!cm_id_priv) {
++		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++				counter[CM_DREQ_COUNTER]);
+ 		cm_issue_drep(work->port, work->mad_recv_wc);
+ 		return -EINVAL;
+ 	}
+@@ -1977,6 +2065,8 @@ static int cm_dreq_handler(struct cm_work *work)
+ 	case IB_CM_MRA_REP_RCVD:
+ 		break;
+ 	case IB_CM_TIMEWAIT:
++		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++				counter[CM_DREQ_COUNTER]);
+ 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ 			goto unlock;
+ 
+@@ -1988,6 +2078,10 @@ static int cm_dreq_handler(struct cm_work *work)
+ 		if (ib_post_send_mad(msg, NULL))
+ 			cm_free_msg(msg);
+ 		goto deref;
++	case IB_CM_DREQ_RCVD:
++		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++				counter[CM_DREQ_COUNTER]);
++		goto unlock;
+ 	default:
+ 		goto unlock;
+ 	}
+@@ -2339,10 +2433,20 @@ static int cm_mra_handler(struct cm_work *work)
+ 		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
+ 		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
+ 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
+-				  cm_id_priv->msg, timeout))
++				  cm_id_priv->msg, timeout)) {
++			if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
++				atomic_long_inc(&work->port->
++						counter_group[CM_RECV_DUPLICATES].
++						counter[CM_MRA_COUNTER]);
+ 			goto out;
 +		}
-+	}
+ 		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
+ 		break;
++	case IB_CM_MRA_REQ_RCVD:
++	case IB_CM_MRA_REP_RCVD:
++		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++				counter[CM_MRA_COUNTER]);
++		/* fall through */
+ 	default:
+ 		goto out;
+ 	}
+@@ -2502,6 +2606,8 @@ static int cm_lap_handler(struct cm_work *work)
+ 	case IB_CM_LAP_IDLE:
+ 		break;
+ 	case IB_CM_MRA_LAP_SENT:
++		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++				counter[CM_LAP_COUNTER]);
+ 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ 			goto unlock;
+ 
+@@ -2515,6 +2621,10 @@ static int cm_lap_handler(struct cm_work *work)
+ 		if (ib_post_send_mad(msg, NULL))
+ 			cm_free_msg(msg);
+ 		goto deref;
++	case IB_CM_LAP_RCVD:
++		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++				counter[CM_LAP_COUNTER]);
++		goto unlock;
+ 	default:
+ 		goto unlock;
+ 	}
+@@ -2796,6 +2906,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
+ 	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+ 	if (cur_cm_id_priv) {
+ 		spin_unlock_irq(&cm.lock);
++		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++				counter[CM_SIDR_REQ_COUNTER]);
+ 		goto out; /* Duplicate message. */
+ 	}
+ 	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
+@@ -2990,6 +3102,27 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
+ 			    struct ib_mad_send_wc *mad_send_wc)
+ {
+ 	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
++	struct cm_port *port;
++	u16 attr_index;
 +
-+	dev->flags &= ~HIFN_FLAG_OLD_KEY;
++	port = mad_agent->context;
++	attr_index = be16_to_cpu(((struct ib_mad_hdr *)
++				  msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
 +
-+	memcpy(ctx->key, key, len);
-+	ctx->keysize = len;
++	/*
++	 * If the send was in response to a received message (context[0] is not
++	 * set to a cm_id), and is not a REJ, then it is a send that was
++	 * manually retried.
++	 */
++	if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
++		msg->retries = 1;
 +
-+	return 0;
-+}
++	atomic_long_add(1 + msg->retries,
++			&port->counter_group[CM_XMIT].counter[attr_index]);
++	if (msg->retries)
++		atomic_long_add(msg->retries,
++				&port->counter_group[CM_XMIT_RETRIES].
++				counter[attr_index]);
+ 
+ 	switch (mad_send_wc->status) {
+ 	case IB_WC_SUCCESS:
+@@ -3148,8 +3281,10 @@ EXPORT_SYMBOL(ib_cm_notify);
+ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+ 			    struct ib_mad_recv_wc *mad_recv_wc)
+ {
++	struct cm_port *port = mad_agent->context;
+ 	struct cm_work *work;
+ 	enum ib_cm_event_type event;
++	u16 attr_id;
+ 	int paths = 0;
+ 
+ 	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
+@@ -3194,6 +3329,10 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+ 		return;
+ 	}
+ 
++	attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
++	atomic_long_inc(&port->counter_group[CM_RECV].
++			counter[attr_id - CM_ATTR_ID_OFFSET]);
 +
-+static int hifn_handle_req(struct ablkcipher_request *req)
+ 	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
+ 		       GFP_KERNEL);
+ 	if (!work) {
+@@ -3204,7 +3343,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+ 	INIT_DELAYED_WORK(&work->work, cm_work_handler);
+ 	work->cm_event.event = event;
+ 	work->mad_recv_wc = mad_recv_wc;
+-	work->port = (struct cm_port *)mad_agent->context;
++	work->port = port;
+ 	queue_delayed_work(cm.wq, &work->work, 0);
+ }
+ 
+@@ -3379,6 +3518,108 @@ static void cm_get_ack_delay(struct cm_device *cm_dev)
+ 		cm_dev->ack_delay = attr.local_ca_ack_delay;
+ }
+ 
++static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
++			       char *buf)
 +{
-+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
-+	struct hifn_device *dev = ctx->dev;
-+	int err = -EAGAIN;
++	struct cm_counter_group *group;
++	struct cm_counter_attribute *cm_attr;
 +
-+	if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
-+		err = hifn_setup_session(req);
++	group = container_of(obj, struct cm_counter_group, obj);
++	cm_attr = container_of(attr, struct cm_counter_attribute, attr);
 +
-+	if (err == -EAGAIN) {
-+		unsigned long flags;
++	return sprintf(buf, "%ld\n",
++		       atomic_long_read(&group->counter[cm_attr->index]));
++}
 +
-+		spin_lock_irqsave(&dev->lock, flags);
-+		err = ablkcipher_enqueue_request(&dev->queue, req);
-+		spin_unlock_irqrestore(&dev->lock, flags);
-+	}
++static struct sysfs_ops cm_counter_ops = {
++	.show = cm_show_counter
++};
 +
-+	return err;
-+}
++static struct kobj_type cm_counter_obj_type = {
++	.sysfs_ops = &cm_counter_ops,
++	.default_attrs = cm_counter_default_attrs
++};
 +
-+static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
-+		u8 type, u8 mode)
++static void cm_release_port_obj(struct kobject *obj)
 +{
-+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
-+	unsigned ivsize;
++	struct cm_port *cm_port;
 +
-+	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
++	printk(KERN_ERR "free cm port\n");
 +
-+	if (req->info && mode != ACRYPTO_MODE_ECB) {
-+		if (type == ACRYPTO_TYPE_AES_128)
-+			ivsize = HIFN_AES_IV_LENGTH;
-+		else if (type == ACRYPTO_TYPE_DES)
-+			ivsize = HIFN_DES_KEY_LENGTH;
-+		else if (type == ACRYPTO_TYPE_3DES)
-+			ivsize = HIFN_3DES_KEY_LENGTH;
-+	}
++	cm_port = container_of(obj, struct cm_port, port_obj);
++	kfree(cm_port);
++}
 +
-+	if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
-+		if (ctx->keysize == 24)
-+			type = ACRYPTO_TYPE_AES_192;
-+		else if (ctx->keysize == 32)
-+			type = ACRYPTO_TYPE_AES_256;
-+	}
++static struct kobj_type cm_port_obj_type = {
++	.release = cm_release_port_obj
++};
 +
-+	ctx->op = op;
-+	ctx->mode = mode;
-+	ctx->type = type;
-+	ctx->iv = req->info;
-+	ctx->ivsize = ivsize;
++static void cm_release_dev_obj(struct kobject *obj)
++{
++	struct cm_device *cm_dev;
 +
-+	/*
-+	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
-+	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
-+	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
-+	 */
++	printk(KERN_ERR "free cm dev\n");
 +
-+	return hifn_handle_req(req);
++	cm_dev = container_of(obj, struct cm_device, dev_obj);
++	kfree(cm_dev);
 +}
 +
-+static int hifn_process_queue(struct hifn_device *dev)
-+{
-+	struct crypto_async_request *async_req;
-+	struct hifn_context *ctx;
-+	struct ablkcipher_request *req;
-+	unsigned long flags;
-+	int err = 0;
++static struct kobj_type cm_dev_obj_type = {
++	.release = cm_release_dev_obj
++};
 +
-+	while (dev->started < HIFN_QUEUE_LENGTH) {
-+		spin_lock_irqsave(&dev->lock, flags);
-+		async_req = crypto_dequeue_request(&dev->queue);
-+		spin_unlock_irqrestore(&dev->lock, flags);
++struct class cm_class = {
++	.name    = "infiniband_cm",
++};
++EXPORT_SYMBOL(cm_class);
 +
-+		if (!async_req)
-+			break;
++static void cm_remove_fs_obj(struct kobject *obj)
++{
++	kobject_put(obj->parent);
++	kobject_put(obj);
++}
 +
-+		ctx = crypto_tfm_ctx(async_req->tfm);
-+		req = container_of(async_req, struct ablkcipher_request, base);
++static int cm_create_port_fs(struct cm_port *port)
++{
++	int i, ret;
 +
-+		err = hifn_handle_req(req);
-+		if (err)
-+			break;
++	ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
++				   kobject_get(&port->cm_dev->dev_obj),
++				   "%d", port->port_num);
++	if (ret) {
++		kfree(port);
++		return ret;
 +	}
 +
-+	return err;
-+}
-+
-+static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
-+		u8 type, u8 mode)
-+{
-+	int err;
-+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
-+	struct hifn_device *dev = ctx->dev;
++	for (i = 0; i < CM_COUNTER_GROUPS; i++) {
++		ret = kobject_init_and_add(&port->counter_group[i].obj,
++					   &cm_counter_obj_type,
++					   kobject_get(&port->port_obj),
++					   "%s", counter_group_names[i]);
++		if (ret)
++			goto error;
++	}
 +
-+	err = hifn_setup_crypto_req(req, op, type, mode);
-+	if (err)
-+		return err;
++	return 0;
 +
-+	if (dev->started < HIFN_QUEUE_LENGTH &&	dev->queue.qlen)
-+		err = hifn_process_queue(dev);
++error:
++	while (i--)
++		cm_remove_fs_obj(&port->counter_group[i].obj);
++	cm_remove_fs_obj(&port->port_obj);
++	return ret;
 +
-+	return err;
 +}
 +
-+/*
-+ * AES ecryption functions.
-+ */
-+static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
-+}
-+static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
-+}
-+static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
-+}
-+static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
++static void cm_remove_port_fs(struct cm_port *port)
 +{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
-+}
++	int i;
 +
-+/*
-+ * AES decryption functions.
-+ */
-+static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
-+}
-+static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
-+}
-+static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
-+}
-+static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
-+}
++	for (i = 0; i < CM_COUNTER_GROUPS; i++)
++		cm_remove_fs_obj(&port->counter_group[i].obj);
 +
-+/*
-+ * DES ecryption functions.
-+ */
-+static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
-+}
-+static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
-+}
-+static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
-+}
-+static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
++	cm_remove_fs_obj(&port->port_obj);
 +}
 +
-+/*
-+ * DES decryption functions.
-+ */
-+static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
-+}
-+static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
-+}
-+static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
-+}
-+static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
-+}
+ static void cm_add_one(struct ib_device *device)
+ {
+ 	struct cm_device *cm_dev;
+@@ -3397,7 +3638,7 @@ static void cm_add_one(struct ib_device *device)
+ 	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ 		return;
+ 
+-	cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
++	cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
+ 			 device->phys_port_cnt, GFP_KERNEL);
+ 	if (!cm_dev)
+ 		return;
+@@ -3405,11 +3646,27 @@ static void cm_add_one(struct ib_device *device)
+ 	cm_dev->device = device;
+ 	cm_get_ack_delay(cm_dev);
+ 
++	ret = kobject_init_and_add(&cm_dev->dev_obj, &cm_dev_obj_type,
++				   &cm_class.subsys.kobj, "%s", device->name);
++	if (ret) {
++		kfree(cm_dev);
++		return;
++	}
 +
-+/*
-+ * 3DES ecryption functions.
-+ */
-+static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
-+}
-+static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
-+}
-+static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
-+}
-+static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
-+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
-+}
+ 	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
+ 	for (i = 1; i <= device->phys_port_cnt; i++) {
+-		port = &cm_dev->port[i-1];
++		port = kzalloc(sizeof *port, GFP_KERNEL);
++		if (!port)
++			goto error1;
 +
-+/*
-+ * 3DES decryption functions.
-+ */
-+static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
-+}
-+static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
-+}
-+static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
-+}
-+static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
-+{
-+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
-+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
-+}
++		cm_dev->port[i-1] = port;
+ 		port->cm_dev = cm_dev;
+ 		port->port_num = i;
 +
-+struct hifn_alg_template
-+{
-+	char name[CRYPTO_MAX_ALG_NAME];
-+	char drv_name[CRYPTO_MAX_ALG_NAME];
-+	unsigned int bsize;
-+	struct ablkcipher_alg ablkcipher;
-+};
++		ret = cm_create_port_fs(port);
++		if (ret)
++			goto error1;
 +
-+static struct hifn_alg_template hifn_alg_templates[] = {
-+	/*
-+	 * 3DES ECB, CBC, CFB and OFB modes.
-+	 */
-+	{
-+		.name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
-+		.ablkcipher = {
-+			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
-+			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_3des_cfb,
-+			.decrypt	=	hifn_decrypt_3des_cfb,
-+		},
-+	},
-+	{
-+		.name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
-+		.ablkcipher = {
-+			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
-+			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_3des_ofb,
-+			.decrypt	=	hifn_decrypt_3des_ofb,
-+		},
-+	},
-+	{
-+		.name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
-+		.ablkcipher = {
-+			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
-+			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_3des_cbc,
-+			.decrypt	=	hifn_decrypt_3des_cbc,
-+		},
-+	},
-+	{
-+		.name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
-+		.ablkcipher = {
-+			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
-+			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_3des_ecb,
-+			.decrypt	=	hifn_decrypt_3des_ecb,
-+		},
-+	},
+ 		port->mad_agent = ib_register_mad_agent(device, i,
+ 							IB_QPT_GSI,
+ 							&reg_req,
+@@ -3418,11 +3675,11 @@ static void cm_add_one(struct ib_device *device)
+ 							cm_recv_handler,
+ 							port);
+ 		if (IS_ERR(port->mad_agent))
+-			goto error1;
++			goto error2;
+ 
+ 		ret = ib_modify_port(device, i, 0, &port_modify);
+ 		if (ret)
+-			goto error2;
++			goto error3;
+ 	}
+ 	ib_set_client_data(device, &cm_client, cm_dev);
+ 
+@@ -3431,17 +3688,20 @@ static void cm_add_one(struct ib_device *device)
+ 	write_unlock_irqrestore(&cm.device_lock, flags);
+ 	return;
+ 
+-error2:
++error3:
+ 	ib_unregister_mad_agent(port->mad_agent);
++error2:
++	cm_remove_port_fs(port);
+ error1:
+ 	port_modify.set_port_cap_mask = 0;
+ 	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
+ 	while (--i) {
+-		port = &cm_dev->port[i-1];
++		port = cm_dev->port[i-1];
+ 		ib_modify_port(device, port->port_num, 0, &port_modify);
+ 		ib_unregister_mad_agent(port->mad_agent);
++		cm_remove_port_fs(port);
+ 	}
+-	kfree(cm_dev);
++	cm_remove_fs_obj(&cm_dev->dev_obj);
+ }
+ 
+ static void cm_remove_one(struct ib_device *device)
+@@ -3463,11 +3723,12 @@ static void cm_remove_one(struct ib_device *device)
+ 	write_unlock_irqrestore(&cm.device_lock, flags);
+ 
+ 	for (i = 1; i <= device->phys_port_cnt; i++) {
+-		port = &cm_dev->port[i-1];
++		port = cm_dev->port[i-1];
+ 		ib_modify_port(device, port->port_num, 0, &port_modify);
+ 		ib_unregister_mad_agent(port->mad_agent);
++		cm_remove_port_fs(port);
+ 	}
+-	kfree(cm_dev);
++	cm_remove_fs_obj(&cm_dev->dev_obj);
+ }
+ 
+ static int __init ib_cm_init(void)
+@@ -3488,17 +3749,25 @@ static int __init ib_cm_init(void)
+ 	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
+ 	INIT_LIST_HEAD(&cm.timewait_list);
+ 
+-	cm.wq = create_workqueue("ib_cm");
+-	if (!cm.wq)
++	ret = class_register(&cm_class);
++	if (ret)
+ 		return -ENOMEM;
+ 
++	cm.wq = create_workqueue("ib_cm");
++	if (!cm.wq) {
++		ret = -ENOMEM;
++		goto error1;
++	}
 +
-+	/*
-+	 * DES ECB, CBC, CFB and OFB modes.
-+	 */
-+	{
-+		.name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8,
-+		.ablkcipher = {
-+			.min_keysize	=	HIFN_DES_KEY_LENGTH,
-+			.max_keysize	=	HIFN_DES_KEY_LENGTH,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_des_cfb,
-+			.decrypt	=	hifn_decrypt_des_cfb,
-+		},
-+	},
-+	{
-+		.name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8,
-+		.ablkcipher = {
-+			.min_keysize	=	HIFN_DES_KEY_LENGTH,
-+			.max_keysize	=	HIFN_DES_KEY_LENGTH,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_des_ofb,
-+			.decrypt	=	hifn_decrypt_des_ofb,
-+		},
-+	},
-+	{
-+		.name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8,
-+		.ablkcipher = {
-+			.min_keysize	=	HIFN_DES_KEY_LENGTH,
-+			.max_keysize	=	HIFN_DES_KEY_LENGTH,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_des_cbc,
-+			.decrypt	=	hifn_decrypt_des_cbc,
-+		},
-+	},
-+	{
-+		.name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8,
-+		.ablkcipher = {
-+			.min_keysize	=	HIFN_DES_KEY_LENGTH,
-+			.max_keysize	=	HIFN_DES_KEY_LENGTH,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_des_ecb,
-+			.decrypt	=	hifn_decrypt_des_ecb,
-+		},
-+	},
+ 	ret = ib_register_client(&cm_client);
+ 	if (ret)
+-		goto error;
++		goto error2;
+ 
+ 	return 0;
+-error:
++error2:
+ 	destroy_workqueue(cm.wq);
++error1:
++	class_unregister(&cm_class);
+ 	return ret;
+ }
+ 
+@@ -3519,6 +3788,7 @@ static void __exit ib_cm_cleanup(void)
+ 	}
+ 
+ 	ib_unregister_client(&cm_client);
++	class_unregister(&cm_class);
+ 	idr_destroy(&cm.local_id_table);
+ }
+ 
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 0751697..1eff1b2 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -488,7 +488,8 @@ void rdma_destroy_qp(struct rdma_cm_id *id)
+ }
+ EXPORT_SYMBOL(rdma_destroy_qp);
+ 
+-static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
++static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
++			     struct rdma_conn_param *conn_param)
+ {
+ 	struct ib_qp_attr qp_attr;
+ 	int qp_attr_mask, ret;
+@@ -514,13 +515,16 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
+ 	if (ret)
+ 		goto out;
+ 
++	if (conn_param)
++		qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
+ 	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
+ out:
+ 	mutex_unlock(&id_priv->qp_mutex);
+ 	return ret;
+ }
+ 
+-static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
++static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
++			     struct rdma_conn_param *conn_param)
+ {
+ 	struct ib_qp_attr qp_attr;
+ 	int qp_attr_mask, ret;
+@@ -536,6 +540,8 @@ static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
+ 	if (ret)
+ 		goto out;
+ 
++	if (conn_param)
++		qp_attr.max_rd_atomic = conn_param->initiator_depth;
+ 	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
+ out:
+ 	mutex_unlock(&id_priv->qp_mutex);
+@@ -624,7 +630,8 @@ static inline int cma_zero_addr(struct sockaddr *addr)
+ 	struct in6_addr *ip6;
+ 
+ 	if (addr->sa_family == AF_INET)
+-		return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr);
++		return ipv4_is_zeronet(
++			((struct sockaddr_in *)addr)->sin_addr.s_addr);
+ 	else {
+ 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
+ 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
+@@ -634,7 +641,7 @@ static inline int cma_zero_addr(struct sockaddr *addr)
+ 
+ static inline int cma_loopback_addr(struct sockaddr *addr)
+ {
+-	return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr);
++	return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
+ }
+ 
+ static inline int cma_any_addr(struct sockaddr *addr)
+@@ -866,11 +873,11 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
+ {
+ 	int ret;
+ 
+-	ret = cma_modify_qp_rtr(id_priv);
++	ret = cma_modify_qp_rtr(id_priv, NULL);
+ 	if (ret)
+ 		goto reject;
+ 
+-	ret = cma_modify_qp_rts(id_priv);
++	ret = cma_modify_qp_rts(id_priv, NULL);
+ 	if (ret)
+ 		goto reject;
+ 
+@@ -1122,8 +1129,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
+ 	cm_id->cm_handler = cma_ib_handler;
+ 
+ 	ret = conn_id->id.event_handler(&conn_id->id, &event);
+-	if (!ret)
++	if (!ret) {
++		cma_enable_remove(conn_id);
+ 		goto out;
++	}
+ 
+ 	/* Destroy the CM ID by returning a non-zero value. */
+ 	conn_id->cm_id.ib = NULL;
+@@ -1262,6 +1271,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ 	struct net_device *dev = NULL;
+ 	struct rdma_cm_event event;
+ 	int ret;
++	struct ib_device_attr attr;
+ 
+ 	listen_id = cm_id->context;
+ 	if (cma_disable_remove(listen_id, CMA_LISTEN))
+@@ -1279,7 +1289,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ 	atomic_inc(&conn_id->dev_remove);
+ 	conn_id->state = CMA_CONNECT;
+ 
+-	dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
++	dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
+ 	if (!dev) {
+ 		ret = -EADDRNOTAVAIL;
+ 		cma_enable_remove(conn_id);
+@@ -1311,10 +1321,19 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ 	sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
+ 	*sin = iw_event->remote_addr;
+ 
++	ret = ib_query_device(conn_id->id.device, &attr);
++	if (ret) {
++		cma_enable_remove(conn_id);
++		rdma_destroy_id(new_cm_id);
++		goto out;
++	}
 +
+ 	memset(&event, 0, sizeof event);
+ 	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
+ 	event.param.conn.private_data = iw_event->private_data;
+ 	event.param.conn.private_data_len = iw_event->private_data_len;
++	event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
++	event.param.conn.responder_resources = attr.max_qp_rd_atom;
+ 	ret = conn_id->id.event_handler(&conn_id->id, &event);
+ 	if (ret) {
+ 		/* User wants to destroy the CM ID */
+@@ -2272,7 +2291,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
+ 	sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
+ 	cm_id->remote_addr = *sin;
+ 
+-	ret = cma_modify_qp_rtr(id_priv);
++	ret = cma_modify_qp_rtr(id_priv, conn_param);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -2335,25 +2354,15 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
+ 			 struct rdma_conn_param *conn_param)
+ {
+ 	struct ib_cm_rep_param rep;
+-	struct ib_qp_attr qp_attr;
+-	int qp_attr_mask, ret;
+-
+-	if (id_priv->id.qp) {
+-		ret = cma_modify_qp_rtr(id_priv);
+-		if (ret)
+-			goto out;
++	int ret;
+ 
+-		qp_attr.qp_state = IB_QPS_RTS;
+-		ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
+-					 &qp_attr_mask);
+-		if (ret)
+-			goto out;
++	ret = cma_modify_qp_rtr(id_priv, conn_param);
++	if (ret)
++		goto out;
+ 
+-		qp_attr.max_rd_atomic = conn_param->initiator_depth;
+-		ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
+-		if (ret)
+-			goto out;
+-	}
++	ret = cma_modify_qp_rts(id_priv, conn_param);
++	if (ret)
++		goto out;
+ 
+ 	memset(&rep, 0, sizeof rep);
+ 	rep.qp_num = id_priv->qp_num;
+@@ -2378,7 +2387,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
+ 	struct iw_cm_conn_param iw_param;
+ 	int ret;
+ 
+-	ret = cma_modify_qp_rtr(id_priv);
++	ret = cma_modify_qp_rtr(id_priv, conn_param);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -2598,11 +2607,9 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
+ 		/* IPv6 address is an SA assigned MGID. */
+ 		memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
+ 	} else {
+-		ip_ib_mc_map(sin->sin_addr.s_addr, mc_map);
++		ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
+ 		if (id_priv->id.ps == RDMA_PS_UDP)
+ 			mc_map[7] = 0x01;	/* Use RDMA CM signature */
+-		mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8;
+-		mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr);
+ 		*mgid = *(union ib_gid *) (mc_map + 4);
+ 	}
+ }
+diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
+index e8d5f6b..6c7aa59 100644
+--- a/drivers/infiniband/core/fmr_pool.c
++++ b/drivers/infiniband/core/fmr_pool.c
+@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
+ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
+ {
+ 	int                 ret;
+-	struct ib_pool_fmr *fmr;
++	struct ib_pool_fmr *fmr, *next;
+ 	LIST_HEAD(unmap_list);
+ 	LIST_HEAD(fmr_list);
+ 
+@@ -158,6 +158,20 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
+ #endif
+ 	}
+ 
 +	/*
-+	 * AES ECB, CBC, CFB and OFB modes.
++	 * The free_list may hold FMRs that have been put there
++	 * because they haven't reached the max_remap count.
++	 * Invalidate their mapping as well.
 +	 */
-+	{
-+		.name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16,
-+		.ablkcipher = {
-+			.min_keysize	=	AES_MIN_KEY_SIZE,
-+			.max_keysize	=	AES_MAX_KEY_SIZE,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_aes_ecb,
-+			.decrypt	=	hifn_decrypt_aes_ecb,
-+		},
-+	},
-+	{
-+		.name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16,
-+		.ablkcipher = {
-+			.min_keysize	=	AES_MIN_KEY_SIZE,
-+			.max_keysize	=	AES_MAX_KEY_SIZE,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_aes_cbc,
-+			.decrypt	=	hifn_decrypt_aes_cbc,
-+		},
-+	},
-+	{
-+		.name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16,
-+		.ablkcipher = {
-+			.min_keysize	=	AES_MIN_KEY_SIZE,
-+			.max_keysize	=	AES_MAX_KEY_SIZE,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_aes_cfb,
-+			.decrypt	=	hifn_decrypt_aes_cfb,
-+		},
-+	},
-+	{
-+		.name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16,
-+		.ablkcipher = {
-+			.min_keysize	=	AES_MIN_KEY_SIZE,
-+			.max_keysize	=	AES_MAX_KEY_SIZE,
-+			.setkey		=	hifn_setkey,
-+			.encrypt	=	hifn_encrypt_aes_ofb,
-+			.decrypt	=	hifn_decrypt_aes_ofb,
-+		},
-+	},
-+};
-+
-+static int hifn_cra_init(struct crypto_tfm *tfm)
-+{
-+	struct crypto_alg *alg = tfm->__crt_alg;
-+	struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
-+	struct hifn_context *ctx = crypto_tfm_ctx(tfm);
-+
-+	ctx->dev = ha->dev;
-+
-+	return 0;
-+}
++	list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
++		if (fmr->remap_count == 0)
++			continue;
++		hlist_del_init(&fmr->cache_node);
++		fmr->remap_count = 0;
++		list_add_tail(&fmr->fmr->list, &fmr_list);
++		list_move(&fmr->list, &unmap_list);
++	}
 +
-+static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
-+{
-+	struct hifn_crypto_alg *alg;
-+	int err;
+ 	list_splice(&pool->dirty_list, &unmap_list);
+ 	INIT_LIST_HEAD(&pool->dirty_list);
+ 	pool->dirty_len = 0;
+@@ -182,8 +196,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
+ 	struct ib_fmr_pool *pool = pool_ptr;
+ 
+ 	do {
+-		if (pool->dirty_len >= pool->dirty_watermark ||
+-		    atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
++		if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
+ 			ib_fmr_batch_release(pool);
+ 
+ 			atomic_inc(&pool->flush_ser);
+@@ -194,8 +207,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
+ 		}
+ 
+ 		set_current_state(TASK_INTERRUPTIBLE);
+-		if (pool->dirty_len < pool->dirty_watermark &&
+-		    atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
++		if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
+ 		    !kthread_should_stop())
+ 			schedule();
+ 		__set_current_state(TASK_RUNNING);
+@@ -369,11 +381,6 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
+ 
+ 	i = 0;
+ 	list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
+-		if (fmr->remap_count) {
+-			INIT_LIST_HEAD(&fmr_list);
+-			list_add_tail(&fmr->fmr->list, &fmr_list);
+-			ib_unmap_fmr(&fmr_list);
+-		}
+ 		ib_dealloc_fmr(fmr->fmr);
+ 		list_del(&fmr->list);
+ 		kfree(fmr);
+@@ -511,8 +518,10 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
+ 			list_add_tail(&fmr->list, &pool->free_list);
+ 		} else {
+ 			list_add_tail(&fmr->list, &pool->dirty_list);
+-			++pool->dirty_len;
+-			wake_up_process(pool->thread);
++			if (++pool->dirty_len >= pool->dirty_watermark) {
++				atomic_inc(&pool->req_ser);
++				wake_up_process(pool->thread);
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 6f42877..fbe16d5 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -701,7 +701,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
+ 	}
+ 
+ 	/* Check to post send on QP or process locally */
+-	if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD)
++	if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
++	    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
+ 		goto out;
+ 
+ 	local = kmalloc(sizeof *local, GFP_ATOMIC);
+@@ -752,8 +753,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
+ 		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
+ 					    mad_agent_priv->agent.port_num);
+ 		if (port_priv) {
+-			mad_priv->mad.mad.mad_hdr.tid =
+-				((struct ib_mad *)smp)->mad_hdr.tid;
++			memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
+ 			recv_mad_agent = find_mad_agent(port_priv,
+ 						        &mad_priv->mad.mad);
+ 		}
+@@ -1100,7 +1100,9 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
+ 		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
+ 		/* Timeout will be updated after send completes */
+ 		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
+-		mad_send_wr->retries = send_buf->retries;
++		mad_send_wr->max_retries = send_buf->retries;
++		mad_send_wr->retries_left = send_buf->retries;
++		send_buf->retries = 0;
+ 		/* Reference for work request to QP + response */
+ 		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
+ 		mad_send_wr->status = IB_WC_SUCCESS;
+@@ -1931,15 +1933,6 @@ local:
+ 	if (port_priv->device->process_mad) {
+ 		int ret;
+ 
+-		if (!response) {
+-			printk(KERN_ERR PFX "No memory for response MAD\n");
+-			/*
+-			 * Is it better to assume that
+-			 * it wouldn't be processed ?
+-			 */
+-			goto out;
+-		}
+-
+ 		ret = port_priv->device->process_mad(port_priv->device, 0,
+ 						     port_priv->port_num,
+ 						     wc, &recv->grh,
+@@ -2282,8 +2275,6 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
+ 
+ 	/* Empty wait list to prevent receives from finding a request */
+ 	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
+-	/* Empty local completion list as well */
+-	list_splice_init(&mad_agent_priv->local_list, &cancel_list);
+ 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ 
+ 	/* Report all cancelled requests */
+@@ -2445,9 +2436,12 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
+ {
+ 	int ret;
+ 
+-	if (!mad_send_wr->retries--)
++	if (!mad_send_wr->retries_left)
+ 		return -ETIMEDOUT;
+ 
++	mad_send_wr->retries_left--;
++	mad_send_wr->send_buf.retries++;
 +
-+	alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
-+	if (!alg)
-+		return -ENOMEM;
+ 	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
+ 
+ 	if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
+diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
+index 9be5cc0..8b75010 100644
+--- a/drivers/infiniband/core/mad_priv.h
++++ b/drivers/infiniband/core/mad_priv.h
+@@ -131,7 +131,8 @@ struct ib_mad_send_wr_private {
+ 	struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
+ 	__be64 tid;
+ 	unsigned long timeout;
+-	int retries;
++	int max_retries;
++	int retries_left;
+ 	int retry;
+ 	int refcount;
+ 	enum ib_wc_status status;
+diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
+index d43bc62..a5e2a31 100644
+--- a/drivers/infiniband/core/mad_rmpp.c
++++ b/drivers/infiniband/core/mad_rmpp.c
+@@ -684,7 +684,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
+ 
+ 	if (seg_num > mad_send_wr->last_ack) {
+ 		adjust_last_ack(mad_send_wr, seg_num);
+-		mad_send_wr->retries = mad_send_wr->send_buf.retries;
++		mad_send_wr->retries_left = mad_send_wr->max_retries;
+ 	}
+ 	mad_send_wr->newwin = newwin;
+ 	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
+diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
+index 1bc1fe6..107f170 100644
+--- a/drivers/infiniband/core/multicast.c
++++ b/drivers/infiniband/core/multicast.c
+@@ -73,11 +73,20 @@ struct mcast_device {
+ };
+ 
+ enum mcast_state {
+-	MCAST_IDLE,
+ 	MCAST_JOINING,
+ 	MCAST_MEMBER,
++	MCAST_ERROR,
++};
 +
-+	snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
-+	snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name);
++enum mcast_group_state {
++	MCAST_IDLE,
+ 	MCAST_BUSY,
+-	MCAST_ERROR
++	MCAST_GROUP_ERROR,
++	MCAST_PKEY_EVENT
++};
 +
-+	alg->alg.cra_priority = 300;
-+	alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
-+	alg->alg.cra_blocksize = t->bsize;
-+	alg->alg.cra_ctxsize = sizeof(struct hifn_context);
-+	alg->alg.cra_alignmask = 15;
-+	if (t->bsize == 8)
-+		alg->alg.cra_alignmask = 3;
-+	alg->alg.cra_type = &crypto_ablkcipher_type;
-+	alg->alg.cra_module = THIS_MODULE;
-+	alg->alg.cra_u.ablkcipher = t->ablkcipher;
-+	alg->alg.cra_init = hifn_cra_init;
++enum {
++	MCAST_INVALID_PKEY_INDEX = 0xFFFF
+ };
+ 
+ struct mcast_member;
+@@ -93,9 +102,10 @@ struct mcast_group {
+ 	struct mcast_member	*last_join;
+ 	int			members[3];
+ 	atomic_t		refcount;
+-	enum mcast_state	state;
++	enum mcast_group_state	state;
+ 	struct ib_sa_query	*query;
+ 	int			query_id;
++	u16			pkey_index;
+ };
+ 
+ struct mcast_member {
+@@ -378,9 +388,19 @@ static int fail_join(struct mcast_group *group, struct mcast_member *member,
+ static void process_group_error(struct mcast_group *group)
+ {
+ 	struct mcast_member *member;
+-	int ret;
++	int ret = 0;
++	u16 pkey_index;
 +
-+	alg->dev = dev;
++	if (group->state == MCAST_PKEY_EVENT)
++		ret = ib_find_pkey(group->port->dev->device,
++				   group->port->port_num,
++				   be16_to_cpu(group->rec.pkey), &pkey_index);
+ 
+ 	spin_lock_irq(&group->lock);
++	if (group->state == MCAST_PKEY_EVENT && !ret &&
++	    group->pkey_index == pkey_index)
++		goto out;
 +
-+	list_add_tail(&alg->entry, &dev->alg_list);
+ 	while (!list_empty(&group->active_list)) {
+ 		member = list_entry(group->active_list.next,
+ 				    struct mcast_member, list);
+@@ -399,6 +419,7 @@ static void process_group_error(struct mcast_group *group)
+ 	}
+ 
+ 	group->rec.join_state = 0;
++out:
+ 	group->state = MCAST_BUSY;
+ 	spin_unlock_irq(&group->lock);
+ }
+@@ -415,9 +436,9 @@ static void mcast_work_handler(struct work_struct *work)
+ retest:
+ 	spin_lock_irq(&group->lock);
+ 	while (!list_empty(&group->pending_list) ||
+-	       (group->state == MCAST_ERROR)) {
++	       (group->state != MCAST_BUSY)) {
+ 
+-		if (group->state == MCAST_ERROR) {
++		if (group->state != MCAST_BUSY) {
+ 			spin_unlock_irq(&group->lock);
+ 			process_group_error(group);
+ 			goto retest;
+@@ -494,12 +515,19 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
+ 			 void *context)
+ {
+ 	struct mcast_group *group = context;
++	u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
+ 
+ 	if (status)
+ 		process_join_error(group, status);
+ 	else {
++		ib_find_pkey(group->port->dev->device, group->port->port_num,
++			     be16_to_cpu(rec->pkey), &pkey_index);
 +
-+	err = crypto_register_alg(&alg->alg);
-+	if (err) {
-+		list_del(&alg->entry);
-+		kfree(alg);
-+	}
+ 		spin_lock_irq(&group->port->lock);
+ 		group->rec = *rec;
++		if (group->state == MCAST_BUSY &&
++		    group->pkey_index == MCAST_INVALID_PKEY_INDEX)
++			group->pkey_index = pkey_index;
+ 		if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
+ 			rb_erase(&group->node, &group->port->table);
+ 			mcast_insert(group->port, group, 1);
+@@ -539,6 +567,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
+ 
+ 	group->port = port;
+ 	group->rec.mgid = *mgid;
++	group->pkey_index = MCAST_INVALID_PKEY_INDEX;
+ 	INIT_LIST_HEAD(&group->pending_list);
+ 	INIT_LIST_HEAD(&group->active_list);
+ 	INIT_WORK(&group->work, mcast_work_handler);
+@@ -707,7 +736,8 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
+ }
+ EXPORT_SYMBOL(ib_init_ah_from_mcmember);
+ 
+-static void mcast_groups_lost(struct mcast_port *port)
++static void mcast_groups_event(struct mcast_port *port,
++			       enum mcast_group_state state)
+ {
+ 	struct mcast_group *group;
+ 	struct rb_node *node;
+@@ -721,7 +751,8 @@ static void mcast_groups_lost(struct mcast_port *port)
+ 			atomic_inc(&group->refcount);
+ 			queue_work(mcast_wq, &group->work);
+ 		}
+-		group->state = MCAST_ERROR;
++		if (group->state != MCAST_GROUP_ERROR)
++			group->state = state;
+ 		spin_unlock(&group->lock);
+ 	}
+ 	spin_unlock_irqrestore(&port->lock, flags);
+@@ -731,16 +762,20 @@ static void mcast_event_handler(struct ib_event_handler *handler,
+ 				struct ib_event *event)
+ {
+ 	struct mcast_device *dev;
++	int index;
+ 
+ 	dev = container_of(handler, struct mcast_device, event_handler);
++	index = event->element.port_num - dev->start_port;
+ 
+ 	switch (event->event) {
+ 	case IB_EVENT_PORT_ERR:
+ 	case IB_EVENT_LID_CHANGE:
+ 	case IB_EVENT_SM_CHANGE:
+ 	case IB_EVENT_CLIENT_REREGISTER:
+-		mcast_groups_lost(&dev->port[event->element.port_num -
+-					     dev->start_port]);
++		mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
++		break;
++	case IB_EVENT_PKEY_CHANGE:
++		mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
+ 		break;
+ 	default:
+ 		break;
+diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
+index 1cfc298..aff96ba 100644
+--- a/drivers/infiniband/core/smi.h
++++ b/drivers/infiniband/core/smi.h
+@@ -59,7 +59,8 @@ extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
+ 					      u8 node_type, int port_num);
+ 
+ /*
+- * Return 1 if the SMP should be handled by the local SMA/SM via process_mad
++ * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
++ * via process_mad
+  */
+ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
+ 						  struct ib_device *device)
+@@ -71,4 +72,19 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
+ 		(smp->hop_ptr == smp->hop_cnt + 1)) ?
+ 		IB_SMI_HANDLE : IB_SMI_DISCARD);
+ }
 +
-+	return err;
++/*
++ * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
++ * via process_mad
++ */
++static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp,
++						   struct ib_device *device)
++{
++	/* C14-13:3 -- We're at the end of the DR segment of path */
++	/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
++	return ((device->process_mad &&
++		ib_get_smp_direction(smp) &&
++		!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD);
 +}
 +
-+static void hifn_unregister_alg(struct hifn_device *dev)
-+{
-+	struct hifn_crypto_alg *a, *n;
+ #endif	/* __SMI_H_ */
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index 3d40506..c864ef7 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -508,19 +508,10 @@ static int add_port(struct ib_device *device, int port_num)
+ 
+ 	p->ibdev      = device;
+ 	p->port_num   = port_num;
+-	p->kobj.ktype = &port_type;
+ 
+-	p->kobj.parent = kobject_get(&device->ports_parent);
+-	if (!p->kobj.parent) {
+-		ret = -EBUSY;
+-		goto err;
+-	}
+-
+-	ret = kobject_set_name(&p->kobj, "%d", port_num);
+-	if (ret)
+-		goto err_put;
+-
+-	ret = kobject_register(&p->kobj);
++	ret = kobject_init_and_add(&p->kobj, &port_type,
++				   kobject_get(device->ports_parent),
++				   "%d", port_num);
+ 	if (ret)
+ 		goto err_put;
+ 
+@@ -549,6 +540,7 @@ static int add_port(struct ib_device *device, int port_num)
+ 
+ 	list_add_tail(&p->kobj.entry, &device->port_list);
+ 
++	kobject_uevent(&p->kobj, KOBJ_ADD);
+ 	return 0;
+ 
+ err_free_pkey:
+@@ -570,9 +562,7 @@ err_remove_pma:
+ 	sysfs_remove_group(&p->kobj, &pma_group);
+ 
+ err_put:
+-	kobject_put(&device->ports_parent);
+-
+-err:
++	kobject_put(device->ports_parent);
+ 	kfree(p);
+ 	return ret;
+ }
+@@ -694,16 +684,9 @@ int ib_device_register_sysfs(struct ib_device *device)
+ 			goto err_unregister;
+ 	}
+ 
+-	device->ports_parent.parent = kobject_get(&class_dev->kobj);
+-	if (!device->ports_parent.parent) {
+-		ret = -EBUSY;
+-		goto err_unregister;
+-	}
+-	ret = kobject_set_name(&device->ports_parent, "ports");
+-	if (ret)
+-		goto err_put;
+-	ret = kobject_register(&device->ports_parent);
+-	if (ret)
++	device->ports_parent = kobject_create_and_add("ports",
++					kobject_get(&class_dev->kobj));
++	if (!device->ports_parent)
+ 		goto err_put;
+ 
+ 	if (device->node_type == RDMA_NODE_IB_SWITCH) {
+@@ -731,7 +714,7 @@ err_put:
+ 			sysfs_remove_group(p, &pma_group);
+ 			sysfs_remove_group(p, &port->pkey_group);
+ 			sysfs_remove_group(p, &port->gid_group);
+-			kobject_unregister(p);
++			kobject_put(p);
+ 		}
+ 	}
+ 
+@@ -755,10 +738,10 @@ void ib_device_unregister_sysfs(struct ib_device *device)
+ 		sysfs_remove_group(p, &pma_group);
+ 		sysfs_remove_group(p, &port->pkey_group);
+ 		sysfs_remove_group(p, &port->gid_group);
+-		kobject_unregister(p);
++		kobject_put(p);
+ 	}
+ 
+-	kobject_unregister(&device->ports_parent);
++	kobject_put(device->ports_parent);
+ 	class_device_unregister(&device->class_dev);
+ }
+ 
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index 424983f..4291ab4 100644
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -106,6 +106,9 @@ enum {
+ 	IB_UCM_MAX_DEVICES = 32
+ };
+ 
++/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
++extern struct class cm_class;
 +
-+	list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
-+		list_del(&a->entry);
-+		crypto_unregister_alg(&a->alg);
-+		kfree(a);
+ #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
+ 
+ static void ib_ucm_add_one(struct ib_device *device);
+@@ -1199,7 +1202,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
+ 	return 0;
+ }
+ 
+-static void ib_ucm_release_class_dev(struct class_device *class_dev)
++static void ucm_release_class_dev(struct class_device *class_dev)
+ {
+ 	struct ib_ucm_device *dev;
+ 
+@@ -1217,11 +1220,6 @@ static const struct file_operations ucm_fops = {
+ 	.poll    = ib_ucm_poll,
+ };
+ 
+-static struct class ucm_class = {
+-	.name    = "infiniband_cm",
+-	.release = ib_ucm_release_class_dev
+-};
+-
+ static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
+ {
+ 	struct ib_ucm_device *dev;
+@@ -1257,9 +1255,10 @@ static void ib_ucm_add_one(struct ib_device *device)
+ 	if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
+ 		goto err;
+ 
+-	ucm_dev->class_dev.class = &ucm_class;
++	ucm_dev->class_dev.class = &cm_class;
+ 	ucm_dev->class_dev.dev = device->dma_device;
+ 	ucm_dev->class_dev.devt = ucm_dev->dev.dev;
++	ucm_dev->class_dev.release = ucm_release_class_dev;
+ 	snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d",
+ 		 ucm_dev->devnum);
+ 	if (class_device_register(&ucm_dev->class_dev))
+@@ -1306,40 +1305,34 @@ static int __init ib_ucm_init(void)
+ 				     "infiniband_cm");
+ 	if (ret) {
+ 		printk(KERN_ERR "ucm: couldn't register device number\n");
+-		goto err;
++		goto error1;
+ 	}
+ 
+-	ret = class_register(&ucm_class);
+-	if (ret) {
+-		printk(KERN_ERR "ucm: couldn't create class infiniband_cm\n");
+-		goto err_chrdev;
+-	}
+-
+-	ret = class_create_file(&ucm_class, &class_attr_abi_version);
++	ret = class_create_file(&cm_class, &class_attr_abi_version);
+ 	if (ret) {
+ 		printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
+-		goto err_class;
++		goto error2;
+ 	}
+ 
+ 	ret = ib_register_client(&ucm_client);
+ 	if (ret) {
+ 		printk(KERN_ERR "ucm: couldn't register client\n");
+-		goto err_class;
++		goto error3;
+ 	}
+ 	return 0;
+ 
+-err_class:
+-	class_unregister(&ucm_class);
+-err_chrdev:
++error3:
++	class_remove_file(&cm_class, &class_attr_abi_version);
++error2:
+ 	unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+-err:
++error1:
+ 	return ret;
+ }
+ 
+ static void __exit ib_ucm_cleanup(void)
+ {
+ 	ib_unregister_client(&ucm_client);
+-	class_unregister(&ucm_class);
++	class_remove_file(&cm_class, &class_attr_abi_version);
+ 	unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+ 	idr_destroy(&ctx_id_table);
+ }
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 90d675a..15937eb 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -31,6 +31,7 @@
+  */
+ 
+ #include <linux/completion.h>
++#include <linux/file.h>
+ #include <linux/mutex.h>
+ #include <linux/poll.h>
+ #include <linux/idr.h>
+@@ -991,6 +992,96 @@ out:
+ 	return ret;
+ }
+ 
++static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
++{
++	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
++	if (file1 < file2) {
++		mutex_lock(&file1->mut);
++		mutex_lock(&file2->mut);
++	} else {
++		mutex_lock(&file2->mut);
++		mutex_lock(&file1->mut);
 +	}
 +}
 +
-+static int hifn_register_alg(struct hifn_device *dev)
++static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
 +{
-+	int i, err;
-+
-+	for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
-+		err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
-+		if (err)
-+			goto err_out_exit;
++	if (file1 < file2) {
++		mutex_unlock(&file2->mut);
++		mutex_unlock(&file1->mut);
++	} else {
++		mutex_unlock(&file1->mut);
++		mutex_unlock(&file2->mut);
 +	}
-+
-+	return 0;
-+
-+err_out_exit:
-+	hifn_unregister_alg(dev);
-+	return err;
 +}
 +
-+static void hifn_tasklet_callback(unsigned long data)
++static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
 +{
-+	struct hifn_device *dev = (struct hifn_device *)data;
++	struct ucma_event *uevent, *tmp;
 +
-+	/*
-+	 * This is ok to call this without lock being held,
-+	 * althogh it modifies some parameters used in parallel,
-+	 * (like dev->success), but they are used in process
-+	 * context or update is atomic (like setting dev->sa[i] to NULL).
-+	 */
-+	hifn_check_for_completion(dev, 0);
++	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
++		if (uevent->ctx == ctx)
++			list_move_tail(&uevent->list, &file->event_list);
 +}
 +
-+static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++static ssize_t ucma_migrate_id(struct ucma_file *new_file,
++			       const char __user *inbuf,
++			       int in_len, int out_len)
 +{
-+	int err, i;
-+	struct hifn_device *dev;
-+	char name[8];
-+
-+	err = pci_enable_device(pdev);
-+	if (err)
-+		return err;
-+	pci_set_master(pdev);
-+
-+	err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
-+	if (err)
-+		goto err_out_disable_pci_device;
-+
-+	snprintf(name, sizeof(name), "hifn%d",
-+			atomic_inc_return(&hifn_dev_number)-1);
-+
-+	err = pci_request_regions(pdev, name);
-+	if (err)
-+		goto err_out_disable_pci_device;
-+
-+	if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
-+	    pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
-+	    pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
-+		dprintk("%s: Broken hardware - I/O regions are too small.\n",
-+				pci_name(pdev));
-+		err = -ENODEV;
-+		goto err_out_free_regions;
-+	}
-+
-+	dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
-+			GFP_KERNEL);
-+	if (!dev) {
-+		err = -ENOMEM;
-+		goto err_out_free_regions;
-+	}
-+
-+	INIT_LIST_HEAD(&dev->alg_list);
-+
-+	snprintf(dev->name, sizeof(dev->name), "%s", name);
-+	spin_lock_init(&dev->lock);
-+
-+	for (i=0; i<3; ++i) {
-+		unsigned long addr, size;
-+
-+		addr = pci_resource_start(pdev, i);
-+		size = pci_resource_len(pdev, i);
-+
-+		dev->bar[i] = ioremap_nocache(addr, size);
-+		if (!dev->bar[i])
-+			goto err_out_unmap_bars;
-+	}
++	struct rdma_ucm_migrate_id cmd;
++	struct rdma_ucm_migrate_resp resp;
++	struct ucma_context *ctx;
++	struct file *filp;
++	struct ucma_file *cur_file;
++	int ret = 0;
 +
-+	dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER);
-+	if (!dev->result_mem) {
-+		dprintk("Failed to allocate %d pages for result_mem.\n",
-+				HIFN_MAX_RESULT_ORDER);
-+		goto err_out_unmap_bars;
-+	}
-+	memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER));
++	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
++		return -EFAULT;
 +
-+	dev->dst = pci_map_single(pdev, (void *)dev->result_mem,
-+			PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE);
++	/* Get current fd to protect against it being closed */
++	filp = fget(cmd.fd);
++	if (!filp)
++		return -ENOENT;
 +
-+	dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
-+			&dev->desc_dma);
-+	if (!dev->desc_virt) {
-+		dprintk("Failed to allocate descriptor rings.\n");
-+		goto err_out_free_result_pages;
++	/* Validate current fd and prevent destruction of id. */
++	ctx = ucma_get_ctx(filp->private_data, cmd.id);
++	if (IS_ERR(ctx)) {
++		ret = PTR_ERR(ctx);
++		goto file_put;
 +	}
-+	memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
-+
-+	dev->pdev = pdev;
-+	dev->irq = pdev->irq;
-+
-+	for (i=0; i<HIFN_D_RES_RSIZE; ++i)
-+		dev->sa[i] = NULL;
-+
-+	pci_set_drvdata(pdev, dev);
-+
-+	tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev);
-+
-+	crypto_init_queue(&dev->queue, 1);
 +
-+	err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
-+	if (err) {
-+		dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
-+		dev->irq = 0;
-+		goto err_out_free_desc;
++	cur_file = ctx->file;
++	if (cur_file == new_file) {
++		resp.events_reported = ctx->events_reported;
++		goto response;
 +	}
 +
-+	err = hifn_start_device(dev);
-+	if (err)
-+		goto err_out_free_irq;
-+
-+	err = hifn_test(dev, 1, 0);
-+	if (err)
-+		goto err_out_stop_device;
++	/*
++	 * Migrate events between fd's, maintaining order, and avoiding new
++	 * events being added before existing events.
++	 */
++	ucma_lock_files(cur_file, new_file);
++	mutex_lock(&mut);
 +
-+	err = hifn_register_rng(dev);
-+	if (err)
-+		goto err_out_stop_device;
++	list_move_tail(&ctx->list, &new_file->ctx_list);
++	ucma_move_events(ctx, new_file);
++	ctx->file = new_file;
++	resp.events_reported = ctx->events_reported;
 +
-+	err = hifn_register_alg(dev);
-+	if (err)
-+		goto err_out_unregister_rng;
++	mutex_unlock(&mut);
++	ucma_unlock_files(cur_file, new_file);
 +
-+	INIT_DELAYED_WORK(&dev->work, hifn_work);
-+	schedule_delayed_work(&dev->work, HZ);
++response:
++	if (copy_to_user((void __user *)(unsigned long)cmd.response,
++			 &resp, sizeof(resp)))
++		ret = -EFAULT;
 +
-+	dprintk("HIFN crypto accelerator card at %s has been "
-+			"successfully registered as %s.\n",
-+			pci_name(pdev), dev->name);
++	ucma_put_ctx(ctx);
++file_put:
++	fput(filp);
++	return ret;
++}
 +
-+	return 0;
+ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
+ 				   const char __user *inbuf,
+ 				   int in_len, int out_len) = {
+@@ -1012,6 +1103,7 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
+ 	[RDMA_USER_CM_CMD_NOTIFY]	= ucma_notify,
+ 	[RDMA_USER_CM_CMD_JOIN_MCAST]	= ucma_join_multicast,
+ 	[RDMA_USER_CM_CMD_LEAVE_MCAST]	= ucma_leave_multicast,
++	[RDMA_USER_CM_CMD_MIGRATE_ID]	= ucma_migrate_id
+ };
+ 
+ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index b53eac4..4e91510 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -2,6 +2,7 @@
+  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
++ * Copyright (c) 2008 Cisco. All rights reserved.
+  *
+  * This software is available to you under a choice of one of two
+  * licenses.  You may choose to be licensed under the terms of the GNU
+@@ -42,7 +43,7 @@
+ #include <linux/cdev.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/poll.h>
+-#include <linux/rwsem.h>
++#include <linux/mutex.h>
+ #include <linux/kref.h>
+ #include <linux/compat.h>
+ 
+@@ -94,7 +95,7 @@ struct ib_umad_port {
+ 	struct class_device   *sm_class_dev;
+ 	struct semaphore       sm_sem;
+ 
+-	struct rw_semaphore    mutex;
++	struct mutex	       file_mutex;
+ 	struct list_head       file_list;
+ 
+ 	struct ib_device      *ib_dev;
+@@ -110,11 +111,11 @@ struct ib_umad_device {
+ };
+ 
+ struct ib_umad_file {
++	struct mutex		mutex;
+ 	struct ib_umad_port    *port;
+ 	struct list_head	recv_list;
+ 	struct list_head	send_list;
+ 	struct list_head	port_list;
+-	spinlock_t		recv_lock;
+ 	spinlock_t		send_lock;
+ 	wait_queue_head_t	recv_wait;
+ 	struct ib_mad_agent    *agent[IB_UMAD_MAX_AGENTS];
+@@ -156,7 +157,7 @@ static int hdr_size(struct ib_umad_file *file)
+ 		sizeof (struct ib_user_mad_hdr_old);
+ }
+ 
+-/* caller must hold port->mutex at least for reading */
++/* caller must hold file->mutex */
+ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
+ {
+ 	return file->agents_dead ? NULL : file->agent[id];
+@@ -168,32 +169,30 @@ static int queue_packet(struct ib_umad_file *file,
+ {
+ 	int ret = 1;
+ 
+-	down_read(&file->port->mutex);
++	mutex_lock(&file->mutex);
+ 
+ 	for (packet->mad.hdr.id = 0;
+ 	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
+ 	     packet->mad.hdr.id++)
+ 		if (agent == __get_agent(file, packet->mad.hdr.id)) {
+-			spin_lock_irq(&file->recv_lock);
+ 			list_add_tail(&packet->list, &file->recv_list);
+-			spin_unlock_irq(&file->recv_lock);
+ 			wake_up_interruptible(&file->recv_wait);
+ 			ret = 0;
+ 			break;
+ 		}
+ 
+-	up_read(&file->port->mutex);
++	mutex_unlock(&file->mutex);
+ 
+ 	return ret;
+ }
+ 
+ static void dequeue_send(struct ib_umad_file *file,
+ 			 struct ib_umad_packet *packet)
+- {
++{
+ 	spin_lock_irq(&file->send_lock);
+ 	list_del(&packet->list);
+ 	spin_unlock_irq(&file->send_lock);
+- }
++}
+ 
+ static void send_handler(struct ib_mad_agent *agent,
+ 			 struct ib_mad_send_wc *send_wc)
+@@ -341,10 +340,10 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 	if (count < hdr_size(file))
+ 		return -EINVAL;
+ 
+-	spin_lock_irq(&file->recv_lock);
++	mutex_lock(&file->mutex);
+ 
+ 	while (list_empty(&file->recv_list)) {
+-		spin_unlock_irq(&file->recv_lock);
++		mutex_unlock(&file->mutex);
+ 
+ 		if (filp->f_flags & O_NONBLOCK)
+ 			return -EAGAIN;
+@@ -353,13 +352,13 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 					     !list_empty(&file->recv_list)))
+ 			return -ERESTARTSYS;
+ 
+-		spin_lock_irq(&file->recv_lock);
++		mutex_lock(&file->mutex);
+ 	}
+ 
+ 	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+ 	list_del(&packet->list);
+ 
+-	spin_unlock_irq(&file->recv_lock);
++	mutex_unlock(&file->mutex);
+ 
+ 	if (packet->recv_wc)
+ 		ret = copy_recv_mad(file, buf, packet, count);
+@@ -368,9 +367,9 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 
+ 	if (ret < 0) {
+ 		/* Requeue packet */
+-		spin_lock_irq(&file->recv_lock);
++		mutex_lock(&file->mutex);
+ 		list_add(&packet->list, &file->recv_list);
+-		spin_unlock_irq(&file->recv_lock);
++		mutex_unlock(&file->mutex);
+ 	} else {
+ 		if (packet->recv_wc)
+ 			ib_free_recv_mad(packet->recv_wc);
+@@ -481,7 +480,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ 		goto err;
+ 	}
+ 
+-	down_read(&file->port->mutex);
++	mutex_lock(&file->mutex);
+ 
+ 	agent = __get_agent(file, packet->mad.hdr.id);
+ 	if (!agent) {
+@@ -577,7 +576,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ 	if (ret)
+ 		goto err_send;
+ 
+-	up_read(&file->port->mutex);
++	mutex_unlock(&file->mutex);
+ 	return count;
+ 
+ err_send:
+@@ -587,7 +586,7 @@ err_msg:
+ err_ah:
+ 	ib_destroy_ah(ah);
+ err_up:
+-	up_read(&file->port->mutex);
++	mutex_unlock(&file->mutex);
+ err:
+ 	kfree(packet);
+ 	return ret;
+@@ -613,11 +612,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
+ {
+ 	struct ib_user_mad_reg_req ureq;
+ 	struct ib_mad_reg_req req;
+-	struct ib_mad_agent *agent;
++	struct ib_mad_agent *agent = NULL;
+ 	int agent_id;
+ 	int ret;
+ 
+-	down_write(&file->port->mutex);
++	mutex_lock(&file->port->file_mutex);
++	mutex_lock(&file->mutex);
+ 
+ 	if (!file->port->ib_dev) {
+ 		ret = -EPIPE;
+@@ -666,13 +666,13 @@ found:
+ 				      send_handler, recv_handler, file);
+ 	if (IS_ERR(agent)) {
+ 		ret = PTR_ERR(agent);
++		agent = NULL;
+ 		goto out;
+ 	}
+ 
+ 	if (put_user(agent_id,
+ 		     (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
+ 		ret = -EFAULT;
+-		ib_unregister_mad_agent(agent);
+ 		goto out;
+ 	}
+ 
+@@ -690,7 +690,13 @@ found:
+ 	ret = 0;
+ 
+ out:
+-	up_write(&file->port->mutex);
++	mutex_unlock(&file->mutex);
 +
-+err_out_unregister_rng:
-+	hifn_unregister_rng(dev);
-+err_out_stop_device:
-+	hifn_reset_dma(dev, 1);
-+	hifn_stop_device(dev);
-+err_out_free_irq:
-+	free_irq(dev->irq, dev->name);
-+	tasklet_kill(&dev->tasklet);
-+err_out_free_desc:
-+	pci_free_consistent(pdev, sizeof(struct hifn_dma),
-+			dev->desc_virt, dev->desc_dma);
++	if (ret && agent)
++		ib_unregister_mad_agent(agent);
 +
-+err_out_free_result_pages:
-+	pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
-+			PCI_DMA_FROMDEVICE);
-+	free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
++	mutex_unlock(&file->port->file_mutex);
 +
-+err_out_unmap_bars:
-+	for (i=0; i<3; ++i)
-+		if (dev->bar[i])
-+			iounmap(dev->bar[i]);
+ 	return ret;
+ }
+ 
+@@ -703,7 +709,8 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
+ 	if (get_user(id, arg))
+ 		return -EFAULT;
+ 
+-	down_write(&file->port->mutex);
++	mutex_lock(&file->port->file_mutex);
++	mutex_lock(&file->mutex);
+ 
+ 	if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+ 		ret = -EINVAL;
+@@ -714,11 +721,13 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
+ 	file->agent[id] = NULL;
+ 
+ out:
+-	up_write(&file->port->mutex);
++	mutex_unlock(&file->mutex);
+ 
+ 	if (agent)
+ 		ib_unregister_mad_agent(agent);
+ 
++	mutex_unlock(&file->port->file_mutex);
 +
-+err_out_free_regions:
-+	pci_release_regions(pdev);
+ 	return ret;
+ }
+ 
+@@ -726,12 +735,12 @@ static long ib_umad_enable_pkey(struct ib_umad_file *file)
+ {
+ 	int ret = 0;
+ 
+-	down_write(&file->port->mutex);
++	mutex_lock(&file->mutex);
+ 	if (file->already_used)
+ 		ret = -EINVAL;
+ 	else
+ 		file->use_pkey_index = 1;
+-	up_write(&file->port->mutex);
++	mutex_unlock(&file->mutex);
+ 
+ 	return ret;
+ }
+@@ -783,7 +792,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
+ 	if (!port)
+ 		return -ENXIO;
+ 
+-	down_write(&port->mutex);
++	mutex_lock(&port->file_mutex);
+ 
+ 	if (!port->ib_dev) {
+ 		ret = -ENXIO;
+@@ -797,7 +806,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
+ 		goto out;
+ 	}
+ 
+-	spin_lock_init(&file->recv_lock);
++	mutex_init(&file->mutex);
+ 	spin_lock_init(&file->send_lock);
+ 	INIT_LIST_HEAD(&file->recv_list);
+ 	INIT_LIST_HEAD(&file->send_list);
+@@ -809,7 +818,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
+ 	list_add_tail(&file->port_list, &port->file_list);
+ 
+ out:
+-	up_write(&port->mutex);
++	mutex_unlock(&port->file_mutex);
+ 	return ret;
+ }
+ 
+@@ -821,7 +830,8 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
+ 	int already_dead;
+ 	int i;
+ 
+-	down_write(&file->port->mutex);
++	mutex_lock(&file->port->file_mutex);
++	mutex_lock(&file->mutex);
+ 
+ 	already_dead = file->agents_dead;
+ 	file->agents_dead = 1;
+@@ -834,14 +844,14 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
+ 
+ 	list_del(&file->port_list);
+ 
+-	downgrade_write(&file->port->mutex);
++	mutex_unlock(&file->mutex);
+ 
+ 	if (!already_dead)
+ 		for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
+ 			if (file->agent[i])
+ 				ib_unregister_mad_agent(file->agent[i]);
+ 
+-	up_read(&file->port->mutex);
++	mutex_unlock(&file->port->file_mutex);
+ 
+ 	kfree(file);
+ 	kref_put(&dev->ref, ib_umad_release_dev);
+@@ -914,10 +924,10 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
+ 	};
+ 	int ret = 0;
+ 
+-	down_write(&port->mutex);
++	mutex_lock(&port->file_mutex);
+ 	if (port->ib_dev)
+ 		ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
+-	up_write(&port->mutex);
++	mutex_unlock(&port->file_mutex);
+ 
+ 	up(&port->sm_sem);
+ 
+@@ -981,7 +991,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
+ 	port->ib_dev   = device;
+ 	port->port_num = port_num;
+ 	init_MUTEX(&port->sm_sem);
+-	init_rwsem(&port->mutex);
++	mutex_init(&port->file_mutex);
+ 	INIT_LIST_HEAD(&port->file_list);
+ 
+ 	port->dev = cdev_alloc();
+@@ -1052,6 +1062,7 @@ err_cdev:
+ static void ib_umad_kill_port(struct ib_umad_port *port)
+ {
+ 	struct ib_umad_file *file;
++	int already_dead;
+ 	int id;
+ 
+ 	class_set_devdata(port->class_dev,    NULL);
+@@ -1067,42 +1078,22 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
+ 	umad_port[port->dev_num] = NULL;
+ 	spin_unlock(&port_lock);
+ 
+-	down_write(&port->mutex);
++	mutex_lock(&port->file_mutex);
+ 
+ 	port->ib_dev = NULL;
+ 
+-	/*
+-	 * Now go through the list of files attached to this port and
+-	 * unregister all of their MAD agents.  We need to hold
+-	 * port->mutex while doing this to avoid racing with
+-	 * ib_umad_close(), but we can't hold the mutex for writing
+-	 * while calling ib_unregister_mad_agent(), since that might
+-	 * deadlock by calling back into queue_packet().  So we
+-	 * downgrade our lock to a read lock, and then drop and
+-	 * reacquire the write lock for the next iteration.
+-	 *
+-	 * We do list_del_init() on the file's list_head so that the
+-	 * list_del in ib_umad_close() is still OK, even after the
+-	 * file is removed from the list.
+-	 */
+-	while (!list_empty(&port->file_list)) {
+-		file = list_entry(port->file_list.next, struct ib_umad_file,
+-				  port_list);
+-
++	list_for_each_entry(file, &port->file_list, port_list) {
++		mutex_lock(&file->mutex);
++		already_dead = file->agents_dead;
+ 		file->agents_dead = 1;
+-		list_del_init(&file->port_list);
+-
+-		downgrade_write(&port->mutex);
++		mutex_unlock(&file->mutex);
+ 
+ 		for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
+ 			if (file->agent[id])
+ 				ib_unregister_mad_agent(file->agent[id]);
+-
+-		up_read(&port->mutex);
+-		down_write(&port->mutex);
+ 	}
+ 
+-	up_write(&port->mutex);
++	mutex_unlock(&port->file_mutex);
+ 
+ 	clear_bit(port->dev_num, dev_map);
+ }
+diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
+index 36b9898..7e7b5a6 100644
+--- a/drivers/infiniband/hw/cxgb3/Makefile
++++ b/drivers/infiniband/hw/cxgb3/Makefile
+@@ -1,5 +1,4 @@
+-EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3 \
+-		-I$(TOPDIR)/drivers/infiniband/hw/cxgb3/core
++EXTRA_CFLAGS += -Idrivers/net/cxgb3
+ 
+ obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
+ 
+diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
+index eec6a30..03c5ff6 100644
+--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
++++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
+@@ -179,7 +179,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ 	setup.size = 1UL << cq->size_log2;
+ 	setup.credits = 65535;
+ 	setup.credit_thres = 1;
+-	if (rdev_p->t3cdev_p->type == T3B)
++	if (rdev_p->t3cdev_p->type != T3A)
+ 		setup.ovfl_mode = 0;
+ 	else
+ 		setup.ovfl_mode = 1;
+@@ -584,7 +584,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
+ {
+ 	u32 i, nr_wqe, copy_len;
+ 	u8 *copy_data;
+-	u8 wr_len, utx_len;	/* lenght in 8 byte flit */
++	u8 wr_len, utx_len;	/* length in 8 byte flit */
+ 	enum t3_wr_flags flag;
+ 	__be64 *wqe;
+ 	u64 utx_cmd;
+diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
+index c84d4ac..969d4d9 100644
+--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
++++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
+@@ -315,7 +315,7 @@ struct t3_rdma_init_wr {
+ 	__be32 ird;
+ 	__be64 qp_dma_addr;	/* 7 */
+ 	__be32 qp_dma_size;	/* 8 */
+-	u32 irs;
++	__be32 irs;
+ };
+ 
+ struct t3_genbit {
+@@ -324,7 +324,8 @@ struct t3_genbit {
+ };
+ 
+ enum rdma_init_wr_flags {
+-	RECVS_POSTED = 1,
++	RECVS_POSTED = (1<<0),
++	PRIV_QP = (1<<1),
+ };
+ 
+ union t3_wr {
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index 20ba372..e9a08fa 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -332,7 +332,7 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
+ 			  }
+ 	};
+ 
+-	if (ip_route_output_flow(&rt, &fl, NULL, 0))
++	if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ 		return NULL;
+ 	return rt;
+ }
+@@ -1118,7 +1118,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+ 	     status2errno(rpl->status));
+ 	connect_reply_upcall(ep, status2errno(rpl->status));
+ 	state_set(&ep->com, DEAD);
+-	if (ep->com.tdev->type == T3B && act_open_has_tid(rpl->status))
++	if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
+ 		release_tid(ep->com.tdev, GET_TID(rpl), NULL);
+ 	cxgb3_free_atid(ep->com.tdev, ep->atid);
+ 	dst_release(ep->dst);
+@@ -1249,7 +1249,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
+ 	skb_trim(skb, sizeof(struct cpl_tid_release));
+ 	skb_get(skb);
+ 
+-	if (tdev->type == T3B)
++	if (tdev->type != T3A)
+ 		release_tid(tdev, hwtid, skb);
+ 	else {
+ 		struct cpl_pass_accept_rpl *rpl;
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
+index a6c2c4b..73bfd16 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
+@@ -122,6 +122,13 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
+ 		*total_size += buffer_list[i].size;
+ 		if (i > 0)
+ 			mask |= buffer_list[i].addr;
++		else
++			mask |= buffer_list[i].addr & PAGE_MASK;
++		if (i != num_phys_buf - 1)
++			mask |= buffer_list[i].addr + buffer_list[i].size;
++		else
++			mask |= (buffer_list[i].addr + buffer_list[i].size +
++				PAGE_SIZE - 1) & PAGE_MASK;
+ 	}
+ 
+ 	if (*total_size > 0xFFFFFFFFULL)
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
+index b5436ca..df1838f 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
+@@ -39,6 +39,7 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
+ 
+ #include <asm/io.h>
+ #include <asm/irq.h>
+@@ -645,7 +646,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ 	if (err)
+ 		goto err;
+ 
+-	if (udata && t3b_device(rhp)) {
++	if (udata && !t3a_device(rhp)) {
+ 		uresp.pbl_addr = (mhp->attr.pbl_addr -
+ 	                         rhp->rdev.rnic_info.pbl_base) >> 3;
+ 		PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__,
+@@ -1053,7 +1054,9 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
+ 	struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
+ 
+ 	PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
++	rtnl_lock();
+ 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
++	rtnl_unlock();
+ 	return sprintf(buf, "%s\n", info.fw_version);
+ }
+ 
+@@ -1065,7 +1068,9 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
+ 	struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
+ 
+ 	PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
++	rtnl_lock();
+ 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
++	rtnl_unlock();
+ 	return sprintf(buf, "%s\n", info.driver);
+ }
+ 
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
+index dd89b6b..ea2cdd7 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
+@@ -208,36 +208,19 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
+ static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,
+ 				struct ib_recv_wr *wr)
+ {
+-	int i, err = 0;
+-	u32 pbl_addr[4];
+-	u8 page_size[4];
++	int i;
+ 	if (wr->num_sge > T3_MAX_SGE)
+ 		return -EINVAL;
+-	err = iwch_sgl2pbl_map(rhp, wr->sg_list, wr->num_sge, pbl_addr,
+-			       page_size);
+-	if (err)
+-		return err;
+-	wqe->recv.pagesz[0] = page_size[0];
+-	wqe->recv.pagesz[1] = page_size[1];
+-	wqe->recv.pagesz[2] = page_size[2];
+-	wqe->recv.pagesz[3] = page_size[3];
+ 	wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
+ 	for (i = 0; i < wr->num_sge; i++) {
+ 		wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
+ 		wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
+-
+-		/* to in the WQE == the offset into the page */
+-		wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
+-				(1UL << (12 + page_size[i])));
+-
+-		/* pbl_addr is the adapters address in the PBL */
+-		wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
++		wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
+ 	}
+ 	for (; i < T3_MAX_SGE; i++) {
+ 		wqe->recv.sgl[i].stag = 0;
+ 		wqe->recv.sgl[i].len = 0;
+ 		wqe->recv.sgl[i].to = 0;
+-		wqe->recv.pbl_addr[i] = 0;
+ 	}
+ 	return 0;
+ }
+@@ -659,6 +642,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+ 	cxio_flush_rq(&qhp->wq, &rchp->cq, count);
+ 	spin_unlock(&qhp->lock);
+ 	spin_unlock_irqrestore(&rchp->lock, *flag);
++	(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ 
+ 	/* locking heirarchy: cq lock first, then qp lock. */
+ 	spin_lock_irqsave(&schp->lock, *flag);
+@@ -668,6 +652,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+ 	cxio_flush_sq(&qhp->wq, &schp->cq, count);
+ 	spin_unlock(&qhp->lock);
+ 	spin_unlock_irqrestore(&schp->lock, *flag);
++	(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+ 
+ 	/* deref */
+ 	if (atomic_dec_and_test(&qhp->refcnt))
+@@ -678,7 +663,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+ 
+ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+ {
+-	if (t3b_device(qhp->rhp))
++	if (qhp->ibqp.uobject)
+ 		cxio_set_wq_in_error(&qhp->wq);
+ 	else
+ 		__flush_qp(qhp, flag);
+@@ -732,6 +717,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
+ 	init_attr.qp_dma_addr = qhp->wq.dma_addr;
+ 	init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
+ 	init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
++	init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
+ 	init_attr.irs = qhp->ep->rcv_seq;
+ 	PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
+ 	     "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
+@@ -847,10 +833,11 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
+ 				disconnect = 1;
+ 				ep = qhp->ep;
+ 			}
++			flush_qp(qhp, &flag);
+ 			break;
+ 		case IWCH_QP_STATE_TERMINATE:
+ 			qhp->attr.state = IWCH_QP_STATE_TERMINATE;
+-			if (t3b_device(qhp->rhp))
++			if (qhp->ibqp.uobject)
+ 				cxio_set_wq_in_error(&qhp->wq);
+ 			if (!internal)
+ 				terminate = 1;
+diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
+index f7782c8..194c1c3 100644
+--- a/drivers/infiniband/hw/ehca/ehca_av.c
++++ b/drivers/infiniband/hw/ehca/ehca_av.c
+@@ -1,7 +1,7 @@
+ /*
+  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+  *
+- *  adress vector functions
++ *  address vector functions
+  *
+  *  Authors: Hoang-Nam Nguyen <hnguyen at de.ibm.com>
+  *           Khadija Souissi <souissik at de.ibm.com>
+diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
+index 74d2b72..f281d16 100644
+--- a/drivers/infiniband/hw/ehca/ehca_classes.h
++++ b/drivers/infiniband/hw/ehca/ehca_classes.h
+@@ -94,7 +94,11 @@ struct ehca_sma_attr {
+ 
+ struct ehca_sport {
+ 	struct ib_cq *ibcq_aqp1;
+-	struct ib_qp *ibqp_aqp1;
++	struct ib_qp *ibqp_sqp[2];
++	/* lock to serialze modify_qp() calls for sqp in normal
++	 * and irq path (when event PORT_ACTIVE is received first time)
++	 */
++	spinlock_t mod_sqp_lock;
+ 	enum ib_port_state port_state;
+ 	struct ehca_sma_attr saved_attr;
+ };
+@@ -141,6 +145,14 @@ enum ehca_ext_qp_type {
+ 	EQPT_SRQ       = 3,
+ };
+ 
++/* struct to cache modify_qp()'s parms for GSI/SMI qp */
++struct ehca_mod_qp_parm {
++	int mask;
++	struct ib_qp_attr attr;
++};
 +
-+err_out_disable_pci_device:
-+	pci_disable_device(pdev);
++#define EHCA_MOD_QP_PARM_MAX 4
 +
-+	return err;
-+}
+ struct ehca_qp {
+ 	union {
+ 		struct ib_qp ib_qp;
+@@ -164,10 +176,18 @@ struct ehca_qp {
+ 	struct ehca_cq *recv_cq;
+ 	unsigned int sqerr_purgeflag;
+ 	struct hlist_node list_entries;
++	/* array to cache modify_qp()'s parms for GSI/SMI qp */
++	struct ehca_mod_qp_parm *mod_qp_parm;
++	int mod_qp_parm_idx;
+ 	/* mmap counter for resources mapped into user space */
+ 	u32 mm_count_squeue;
+ 	u32 mm_count_rqueue;
+ 	u32 mm_count_galpa;
++	/* unsolicited ack circumvention */
++	int unsol_ack_circ;
++	int mtu_shift;
++	u32 message_count;
++	u32 packet_count;
+ };
+ 
+ #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
+@@ -323,6 +343,7 @@ extern int ehca_port_act_time;
+ extern int ehca_use_hp_mr;
+ extern int ehca_scaling_code;
+ extern int ehca_lock_hcalls;
++extern int ehca_nr_ports;
+ 
+ struct ipzu_queue_resp {
+ 	u32 qe_size;      /* queue entry size */
+diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
+index 79c25f5..0467c15 100644
+--- a/drivers/infiniband/hw/ehca/ehca_cq.c
++++ b/drivers/infiniband/hw/ehca/ehca_cq.c
+@@ -246,7 +246,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
+ 		} else {
+ 			if (h_ret != H_PAGE_REGISTERED) {
+ 				ehca_err(device, "Registration of page failed "
+-					 "ehca_cq=%p cq_num=%x h_ret=%li"
++					 "ehca_cq=%p cq_num=%x h_ret=%li "
+ 					 "counter=%i act_pages=%i",
+ 					 my_cq, my_cq->cq_number,
+ 					 h_ret, counter, param.act_pages);
+diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
+index 3f617b2..863b34f 100644
+--- a/drivers/infiniband/hw/ehca/ehca_irq.c
++++ b/drivers/infiniband/hw/ehca/ehca_irq.c
+@@ -62,6 +62,7 @@
+ #define NEQE_PORT_NUMBER       EHCA_BMASK_IBM( 8, 15)
+ #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
+ #define NEQE_DISRUPTIVE        EHCA_BMASK_IBM(16, 16)
++#define NEQE_SPECIFIC_EVENT    EHCA_BMASK_IBM(16, 23)
+ 
+ #define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52, 63)
+ #define ERROR_DATA_TYPE        EHCA_BMASK_IBM( 0,  7)
+@@ -354,17 +355,34 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
+ {
+ 	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
+ 	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
++	u8 spec_event;
++	struct ehca_sport *sport = &shca->sport[port - 1];
++	unsigned long flags;
+ 
+ 	switch (ec) {
+ 	case 0x30: /* port availability change */
+ 		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
+-			shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
++			int suppress_event;
++			/* replay modify_qp for sqps */
++			spin_lock_irqsave(&sport->mod_sqp_lock, flags);
++			suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
++			if (sport->ibqp_sqp[IB_QPT_SMI])
++				ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
++			if (!suppress_event)
++				ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
++			spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
 +
-+static void hifn_remove(struct pci_dev *pdev)
-+{
-+	int i;
-+	struct hifn_device *dev;
++			/* AQP1 was destroyed, ignore this event */
++			if (suppress_event)
++				break;
 +
-+	dev = pci_get_drvdata(pdev);
++			sport->port_state = IB_PORT_ACTIVE;
+ 			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
+ 					    "is active");
+ 			ehca_query_sma_attr(shca, port,
+-					    &shca->sport[port - 1].saved_attr);
++					    &sport->saved_attr);
+ 		} else {
+-			shca->sport[port - 1].port_state = IB_PORT_DOWN;
++			sport->port_state = IB_PORT_DOWN;
+ 			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
+ 					    "is inactive");
+ 		}
+@@ -378,11 +396,11 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
+ 			ehca_warn(&shca->ib_device, "disruptive port "
+ 				  "%d configuration change", port);
+ 
+-			shca->sport[port - 1].port_state = IB_PORT_DOWN;
++			sport->port_state = IB_PORT_DOWN;
+ 			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
+ 					    "is inactive");
+ 
+-			shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
++			sport->port_state = IB_PORT_ACTIVE;
+ 			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
+ 					    "is active");
+ 		} else
+@@ -394,6 +412,16 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
+ 	case 0x33:  /* trace stopped */
+ 		ehca_err(&shca->ib_device, "Traced stopped.");
+ 		break;
++	case 0x34: /* util async event */
++		spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
++		if (spec_event == 0x80) /* client reregister required */
++			dispatch_port_event(shca, port,
++					    IB_EVENT_CLIENT_REREGISTER,
++					    "client reregister req.");
++		else
++			ehca_warn(&shca->ib_device, "Unknown util async "
++				  "event %x on port %x", spec_event, port);
++		break;
+ 	default:
+ 		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
+ 			 ec, shca->ib_device.name);
+diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
+index 5485799..c469bfd 100644
+--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
++++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
+@@ -200,4 +200,6 @@ void ehca_free_fw_ctrlblock(void *ptr);
+ #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
+ #endif
+ 
++void ehca_recover_sqp(struct ib_qp *sqp);
 +
-+	if (dev) {
-+		cancel_delayed_work(&dev->work);
-+		flush_scheduled_work();
+ #endif
+diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
+index 6a56d86..84c9b7b 100644
+--- a/drivers/infiniband/hw/ehca/ehca_main.c
++++ b/drivers/infiniband/hw/ehca/ehca_main.c
+@@ -90,7 +90,8 @@ MODULE_PARM_DESC(hw_level,
+ 		 "hardware level"
+ 		 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
+ MODULE_PARM_DESC(nr_ports,
+-		 "number of connected ports (default: 2)");
++		 "number of connected ports (-1: autodetect, 1: port one only, "
++		 "2: two ports (default)");
+ MODULE_PARM_DESC(use_hp_mr,
+ 		 "high performance MRs (0: no (default), 1: yes)");
+ MODULE_PARM_DESC(port_act_time,
+@@ -511,7 +512,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
+ 	}
+ 	sport->ibcq_aqp1 = ibcq;
+ 
+-	if (sport->ibqp_aqp1) {
++	if (sport->ibqp_sqp[IB_QPT_GSI]) {
+ 		ehca_err(&shca->ib_device, "AQP1 QP is already created.");
+ 		ret = -EPERM;
+ 		goto create_aqp1;
+@@ -537,7 +538,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
+ 		ret = PTR_ERR(ibqp);
+ 		goto create_aqp1;
+ 	}
+-	sport->ibqp_aqp1 = ibqp;
++	sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
+ 
+ 	return 0;
+ 
+@@ -550,7 +551,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
+ {
+ 	int ret;
+ 
+-	ret = ib_destroy_qp(sport->ibqp_aqp1);
++	ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
+ 	if (ret) {
+ 		ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
+ 		return ret;
+@@ -590,6 +591,11 @@ static struct attribute_group ehca_drv_attr_grp = {
+ 	.attrs = ehca_drv_attrs
+ };
+ 
++static struct attribute_group *ehca_drv_attr_groups[] = {
++	&ehca_drv_attr_grp,
++	NULL,
++};
 +
-+		hifn_unregister_rng(dev);
-+		hifn_unregister_alg(dev);
-+		hifn_reset_dma(dev, 1);
-+		hifn_stop_device(dev);
+ #define EHCA_RESOURCE_ATTR(name)                                           \
+ static ssize_t  ehca_show_##name(struct device *dev,                       \
+ 				 struct device_attribute *attr,            \
+@@ -688,7 +694,7 @@ static int __devinit ehca_probe(struct of_device *dev,
+ 	struct ehca_shca *shca;
+ 	const u64 *handle;
+ 	struct ib_pd *ibpd;
+-	int ret;
++	int ret, i;
+ 
+ 	handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
+ 	if (!handle) {
+@@ -709,6 +715,8 @@ static int __devinit ehca_probe(struct of_device *dev,
+ 		return -ENOMEM;
+ 	}
+ 	mutex_init(&shca->modify_mutex);
++	for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
++		spin_lock_init(&shca->sport[i].mod_sqp_lock);
+ 
+ 	shca->ofdev = dev;
+ 	shca->ipz_hca_handle.handle = *handle;
+@@ -899,6 +907,9 @@ static struct of_platform_driver ehca_driver = {
+ 	.match_table = ehca_device_table,
+ 	.probe       = ehca_probe,
+ 	.remove      = ehca_remove,
++	.driver	     = {
++		.groups = ehca_drv_attr_groups,
++	},
+ };
+ 
+ void ehca_poll_eqs(unsigned long data)
+@@ -926,7 +937,7 @@ void ehca_poll_eqs(unsigned long data)
+ 				ehca_process_eq(shca, 0);
+ 		}
+ 	}
+-	mod_timer(&poll_eqs_timer, jiffies + HZ);
++	mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
+ 	spin_unlock(&shca_list_lock);
+ }
+ 
+@@ -957,10 +968,6 @@ int __init ehca_module_init(void)
+ 		goto module_init2;
+ 	}
+ 
+-	ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
+-	if (ret) /* only complain; we can live without attributes */
+-		ehca_gen_err("Cannot create driver attributes  ret=%d", ret);
+-
+ 	if (ehca_poll_all_eqs != 1) {
+ 		ehca_gen_err("WARNING!!!");
+ 		ehca_gen_err("It is possible to lose interrupts.");
+@@ -986,7 +993,6 @@ void __exit ehca_module_exit(void)
+ 	if (ehca_poll_all_eqs == 1)
+ 		del_timer_sync(&poll_eqs_timer);
+ 
+-	sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
+ 	ibmebus_unregister_driver(&ehca_driver);
+ 
+ 	ehca_destroy_slab_caches();
+diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
+index eff5fb5..1012f15 100644
+--- a/drivers/infiniband/hw/ehca/ehca_qp.c
++++ b/drivers/infiniband/hw/ehca/ehca_qp.c
+@@ -592,10 +592,8 @@ static struct ehca_qp *internal_create_qp(
+ 		goto create_qp_exit1;
+ 	}
+ 
+-	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+-		parms.sigtype = HCALL_SIGT_EVERY;
+-	else
+-		parms.sigtype = HCALL_SIGT_BY_WQE;
++	/* Always signal by WQE so we can hide circ. WQEs */
++	parms.sigtype = HCALL_SIGT_BY_WQE;
+ 
+ 	/* UD_AV CIRCUMVENTION */
+ 	max_send_sge = init_attr->cap.max_send_sge;
+@@ -618,6 +616,10 @@ static struct ehca_qp *internal_create_qp(
+ 	parms.squeue.max_sge = max_send_sge;
+ 	parms.rqueue.max_sge = max_recv_sge;
+ 
++	/* RC QPs need one more SWQE for unsolicited ack circumvention */
++	if (qp_type == IB_QPT_RC)
++		parms.squeue.max_wr++;
 +
-+		free_irq(dev->irq, dev->name);
-+		tasklet_kill(&dev->tasklet);
+ 	if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
+ 		if (HAS_SQ(my_qp))
+ 			ehca_determine_small_queue(
+@@ -650,6 +652,8 @@ static struct ehca_qp *internal_create_qp(
+ 			parms.squeue.act_nr_sges = 1;
+ 			parms.rqueue.act_nr_sges = 1;
+ 		}
++		/* hide the extra WQE */
++		parms.squeue.act_nr_wqes--;
+ 		break;
+ 	case IB_QPT_UD:
+ 	case IB_QPT_GSI:
+@@ -729,12 +733,31 @@ static struct ehca_qp *internal_create_qp(
+ 	init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
+ 	my_qp->init_attr = *init_attr;
+ 
++	if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
++		shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
++			&my_qp->ib_qp;
++		if (ehca_nr_ports < 0) {
++			/* alloc array to cache subsequent modify qp parms
++			 * for autodetect mode
++			 */
++			my_qp->mod_qp_parm =
++				kzalloc(EHCA_MOD_QP_PARM_MAX *
++					sizeof(*my_qp->mod_qp_parm),
++					GFP_KERNEL);
++			if (!my_qp->mod_qp_parm) {
++				ehca_err(pd->device,
++					 "Could not alloc mod_qp_parm");
++				goto create_qp_exit4;
++			}
++		}
++	}
 +
-+		hifn_flush(dev);
+ 	/* NOTE: define_apq0() not supported yet */
+ 	if (qp_type == IB_QPT_GSI) {
+ 		h_ret = ehca_define_sqp(shca, my_qp, init_attr);
+ 		if (h_ret != H_SUCCESS) {
+ 			ret = ehca2ib_return_code(h_ret);
+-			goto create_qp_exit4;
++			goto create_qp_exit5;
+ 		}
+ 	}
+ 
+@@ -743,7 +766,7 @@ static struct ehca_qp *internal_create_qp(
+ 		if (ret) {
+ 			ehca_err(pd->device,
+ 				 "Couldn't assign qp to send_cq ret=%i", ret);
+-			goto create_qp_exit4;
++			goto create_qp_exit5;
+ 		}
+ 	}
+ 
+@@ -769,12 +792,18 @@ static struct ehca_qp *internal_create_qp(
+ 		if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
+ 			ehca_err(pd->device, "Copy to udata failed");
+ 			ret = -EINVAL;
+-			goto create_qp_exit4;
++			goto create_qp_exit6;
+ 		}
+ 	}
+ 
+ 	return my_qp;
+ 
++create_qp_exit6:
++	ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
 +
-+		pci_free_consistent(pdev, sizeof(struct hifn_dma),
-+				dev->desc_virt, dev->desc_dma);
-+		pci_unmap_single(pdev, dev->dst,
-+				PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
-+				PCI_DMA_FROMDEVICE);
-+		free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
-+		for (i=0; i<3; ++i)
-+			if (dev->bar[i])
-+				iounmap(dev->bar[i]);
++create_qp_exit5:
++	kfree(my_qp->mod_qp_parm);
 +
-+		kfree(dev);
+ create_qp_exit4:
+ 	if (HAS_RQ(my_qp))
+ 		ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
+@@ -858,7 +887,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
+ 				update_mask,
+ 				mqpcb, my_qp->galpas.kernel);
+ 	if (hret != H_SUCCESS) {
+-		ehca_err(pd->device, "Could not modify SRQ to INIT"
++		ehca_err(pd->device, "Could not modify SRQ to INIT "
+ 			 "ehca_qp=%p qp_num=%x h_ret=%li",
+ 			 my_qp, my_qp->real_qp_num, hret);
+ 		goto create_srq2;
+@@ -872,7 +901,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
+ 				update_mask,
+ 				mqpcb, my_qp->galpas.kernel);
+ 	if (hret != H_SUCCESS) {
+-		ehca_err(pd->device, "Could not enable SRQ"
++		ehca_err(pd->device, "Could not enable SRQ "
+ 			 "ehca_qp=%p qp_num=%x h_ret=%li",
+ 			 my_qp, my_qp->real_qp_num, hret);
+ 		goto create_srq2;
+@@ -886,7 +915,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
+ 				update_mask,
+ 				mqpcb, my_qp->galpas.kernel);
+ 	if (hret != H_SUCCESS) {
+-		ehca_err(pd->device, "Could not modify SRQ to RTR"
++		ehca_err(pd->device, "Could not modify SRQ to RTR "
+ 			 "ehca_qp=%p qp_num=%x h_ret=%li",
+ 			 my_qp, my_qp->real_qp_num, hret);
+ 		goto create_srq2;
+@@ -992,7 +1021,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
+ 	unsigned long flags = 0;
+ 
+ 	/* do query_qp to obtain current attr values */
+-	mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
++	mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
+ 	if (!mqpcb) {
+ 		ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
+ 			 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
+@@ -1180,6 +1209,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
+ 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
+ 	}
+ 	if (attr_mask & IB_QP_PORT) {
++		struct ehca_sport *sport;
++		struct ehca_qp *aqp1;
+ 		if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
+ 			ret = -EINVAL;
+ 			ehca_err(ibqp->device, "Invalid port=%x. "
+@@ -1188,6 +1219,29 @@ static int internal_modify_qp(struct ib_qp *ibqp,
+ 				 shca->num_ports);
+ 			goto modify_qp_exit2;
+ 		}
++		sport = &shca->sport[attr->port_num - 1];
++		if (!sport->ibqp_sqp[IB_QPT_GSI]) {
++			/* should not occur */
++			ret = -EFAULT;
++			ehca_err(ibqp->device, "AQP1 was not created for "
++				 "port=%x", attr->port_num);
++			goto modify_qp_exit2;
++		}
++		aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
++				    struct ehca_qp, ib_qp);
++		if (ibqp->qp_type != IB_QPT_GSI &&
++		    ibqp->qp_type != IB_QPT_SMI &&
++		    aqp1->mod_qp_parm) {
++			/*
++			 * firmware will reject this modify_qp() because
++			 * port is not activated/initialized fully
++			 */
++			ret = -EFAULT;
++			ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
++				  "either port is being activated (try again) "
++				  "or cabling issue", attr->port_num);
++			goto modify_qp_exit2;
++		}
+ 		mqpcb->prim_phys_port = attr->port_num;
+ 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
+ 	}
+@@ -1244,6 +1298,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
+ 	}
+ 
+ 	if (attr_mask & IB_QP_PATH_MTU) {
++		/* store ld(MTU) */
++		my_qp->mtu_shift = attr->path_mtu + 7;
+ 		mqpcb->path_mtu = attr->path_mtu;
+ 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
+ 	}
+@@ -1467,6 +1523,8 @@ modify_qp_exit1:
+ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ 		   struct ib_udata *udata)
+ {
++	struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
++					      ib_device);
+ 	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ 	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+ 					     ib_pd);
+@@ -1479,9 +1537,100 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ 		return -EINVAL;
+ 	}
+ 
++	/* The if-block below caches qp_attr to be modified for GSI and SMI
++	 * qps during the initialization by ib_mad. When the respective port
++	 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
++	 * cached modify calls sequence, see ehca_recover_sqs() below.
++	 * Why that is required:
++	 * 1) If one port is connected, older code requires that port one
++	 *    to be connected and module option nr_ports=1 to be given by
++	 *    user, which is very inconvenient for end user.
++	 * 2) Firmware accepts modify_qp() only if respective port has become
++	 *    active. Older code had a wait loop of 30sec create_qp()/
++	 *    define_aqp1(), which is not appropriate in practice. This
++	 *    code now removes that wait loop, see define_aqp1(), and always
++	 *    reports all ports to ib_mad resp. users. Only activated ports
++	 *    will then usable for the users.
++	 */
++	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
++		int port = my_qp->init_attr.port_num;
++		struct ehca_sport *sport = &shca->sport[port - 1];
++		unsigned long flags;
++		spin_lock_irqsave(&sport->mod_sqp_lock, flags);
++		/* cache qp_attr only during init */
++		if (my_qp->mod_qp_parm) {
++			struct ehca_mod_qp_parm *p;
++			if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
++				ehca_err(&shca->ib_device,
++					 "mod_qp_parm overflow state=%x port=%x"
++					 " type=%x", attr->qp_state,
++					 my_qp->init_attr.port_num,
++					 ibqp->qp_type);
++				spin_unlock_irqrestore(&sport->mod_sqp_lock,
++						       flags);
++				return -EINVAL;
++			}
++			p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
++			p->mask = attr_mask;
++			p->attr = *attr;
++			my_qp->mod_qp_parm_idx++;
++			ehca_dbg(&shca->ib_device,
++				 "Saved qp_attr for state=%x port=%x type=%x",
++				 attr->qp_state, my_qp->init_attr.port_num,
++				 ibqp->qp_type);
++			spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
++			return 0;
++		}
++		spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
 +	}
 +
-+	pci_release_regions(pdev);
-+	pci_disable_device(pdev);
-+}
-+
-+static struct pci_device_id hifn_pci_tbl[] = {
-+	{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
-+	{ 0 }
-+};
-+MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
-+
-+static struct pci_driver hifn_pci_driver = {
-+	.name     = "hifn795x",
-+	.id_table = hifn_pci_tbl,
-+	.probe    = hifn_probe,
-+	.remove   = __devexit_p(hifn_remove),
-+};
-+
-+static int __devinit hifn_init(void)
+ 	return internal_modify_qp(ibqp, attr, attr_mask, 0);
+ }
+ 
++void ehca_recover_sqp(struct ib_qp *sqp)
 +{
-+	unsigned int freq;
-+	int err;
++	struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
++	int port = my_sqp->init_attr.port_num;
++	struct ib_qp_attr attr;
++	struct ehca_mod_qp_parm *qp_parm;
++	int i, qp_parm_idx, ret;
++	unsigned long flags, wr_cnt;
 +
-+	if (strncmp(hifn_pll_ref, "ext", 3) &&
-+	    strncmp(hifn_pll_ref, "pci", 3)) {
-+		printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, "
-+				"must be pci or ext");
-+		return -EINVAL;
-+	}
++	if (!my_sqp->mod_qp_parm)
++		return;
++	ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
 +
-+	/*
-+	 * For the 7955/7956 the reference clock frequency must be in the
-+	 * range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz,
-+	 * but this chip is currently not supported.
-+	 */
-+	if (hifn_pll_ref[3] != '\0') {
-+		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
-+		if (freq < 20 || freq > 100) {
-+			printk(KERN_ERR "hifn795x: invalid hifn_pll_ref "
-+					"frequency, must be in the range "
-+					"of 20-100");
-+			return -EINVAL;
++	qp_parm = my_sqp->mod_qp_parm;
++	qp_parm_idx = my_sqp->mod_qp_parm_idx;
++	for (i = 0; i < qp_parm_idx; i++) {
++		attr = qp_parm[i].attr;
++		ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
++		if (ret) {
++			ehca_err(sqp->device, "Could not modify SQP port=%x "
++				 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
++			goto free_qp_parm;
 +		}
++		ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
++			 port, sqp->qp_num, attr.qp_state);
 +	}
 +
-+	err = pci_register_driver(&hifn_pci_driver);
-+	if (err < 0) {
-+		dprintk("Failed to register PCI driver for %s device.\n",
-+				hifn_pci_driver.name);
-+		return -ENODEV;
++	/* re-trigger posted recv wrs */
++	wr_cnt =  my_sqp->ipz_rqueue.current_q_offset /
++		my_sqp->ipz_rqueue.qe_size;
++	if (wr_cnt) {
++		spin_lock_irqsave(&my_sqp->spinlock_r, flags);
++		hipz_update_rqa(my_sqp, wr_cnt);
++		spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
++		ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
++			 port, sqp->qp_num, wr_cnt);
 +	}
 +
-+	printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
-+			"has been successfully registered.\n");
-+
-+	return 0;
-+}
-+
-+static void __devexit hifn_fini(void)
-+{
-+	pci_unregister_driver(&hifn_pci_driver);
-+
-+	printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
-+			"has been successfully unregistered.\n");
++free_qp_parm:
++	kfree(qp_parm);
++	/* this prevents subsequent calls to modify_qp() to cache qp_attr */
++	my_sqp->mod_qp_parm = NULL;
 +}
 +
-+module_init(hifn_init);
-+module_exit(hifn_fini);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Evgeniy Polyakov <johnpol at 2ka.mipt.ru>");
-+MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");
-diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
-index 5f7e718..2f3ad3f 100644
---- a/drivers/crypto/padlock-aes.c
-+++ b/drivers/crypto/padlock-aes.c
-@@ -44,6 +44,7 @@
-  */
- 
- #include <crypto/algapi.h>
-+#include <crypto/aes.h>
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/types.h>
-@@ -53,9 +54,6 @@
- #include <asm/byteorder.h>
- #include "padlock.h"
- 
--#define AES_MIN_KEY_SIZE	16	/* in uint8_t units */
--#define AES_MAX_KEY_SIZE	32	/* ditto */
--#define AES_BLOCK_SIZE		16	/* ditto */
- #define AES_EXTENDED_KEY_SIZE	64	/* in uint32_t units */
- #define AES_EXTENDED_KEY_SIZE_B	(AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
+ int ehca_query_qp(struct ib_qp *qp,
+ 		  struct ib_qp_attr *qp_attr,
+ 		  int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+@@ -1769,6 +1918,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
+ 	struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
+ 	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+ 					     ib_pd);
++	struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
+ 	u32 cur_pid = current->tgid;
+ 	u32 qp_num = my_qp->real_qp_num;
+ 	int ret;
+@@ -1815,6 +1965,14 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
+ 	port_num = my_qp->init_attr.port_num;
+ 	qp_type  = my_qp->init_attr.qp_type;
  
-@@ -419,6 +417,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- /* ====== Encryption/decryption routines ====== */
++	if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
++		spin_lock_irqsave(&sport->mod_sqp_lock, flags);
++		kfree(my_qp->mod_qp_parm);
++		my_qp->mod_qp_parm = NULL;
++		shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
++		spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
++	}
++
+ 	/* no support for IB_QPT_SMI yet */
+ 	if (qp_type == IB_QPT_GSI) {
+ 		struct ib_event event;
+diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
+index ea91360..3aacc8c 100644
+--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
++++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
+@@ -50,6 +50,9 @@
+ #include "hcp_if.h"
+ #include "hipz_fns.h"
  
- /* These are the real call to PadLock. */
-+static inline void padlock_reset_key(void)
-+{
-+	asm volatile ("pushfl; popfl");
-+}
++/* in RC traffic, insert an empty RDMA READ every this many packets */
++#define ACK_CIRC_THRESHOLD 2000000
 +
- static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
- 				  void *control_word)
- {
-@@ -439,8 +442,6 @@ static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
- static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
- 			     struct cword *cword)
- {
--	asm volatile ("pushfl; popfl");
--
- 	/* padlock_xcrypt requires at least two blocks of data. */
- 	if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
- 		       (PAGE_SIZE - 1)))) {
-@@ -459,7 +460,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
- 		return;
+ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
+ 				  struct ehca_wqe *wqe_p,
+ 				  struct ib_recv_wr *recv_wr)
+@@ -81,7 +84,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
+ 	if (ehca_debug_level) {
+ 		ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
+ 			     ipz_rqueue);
+-		ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
++		ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
  	}
  
--	asm volatile ("pushfl; popfl");		/* enforce key reload. */
- 	asm volatile ("test $1, %%cl;"
- 		      "je 1f;"
- 		      "lea -1(%%ecx), %%eax;"
-@@ -476,8 +476,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
- static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
- 				     u8 *iv, void *control_word, u32 count)
- {
--	/* Enforce key reload. */
--	asm volatile ("pushfl; popfl");
- 	/* rep xcryptcbc */
- 	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
- 		      : "+S" (input), "+D" (output), "+a" (iv)
-@@ -488,12 +486,14 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
- static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- {
- 	struct aes_ctx *ctx = aes_ctx(tfm);
-+	padlock_reset_key();
- 	aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
- }
+ 	return 0;
+@@ -135,7 +138,8 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
  
- static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ static inline int ehca_write_swqe(struct ehca_qp *qp,
+ 				  struct ehca_wqe *wqe_p,
+-				  const struct ib_send_wr *send_wr)
++				  const struct ib_send_wr *send_wr,
++				  int hidden)
  {
- 	struct aes_ctx *ctx = aes_ctx(tfm);
-+	padlock_reset_key();
- 	aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
- }
- 
-@@ -526,6 +526,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
- 	struct blkcipher_walk walk;
- 	int err;
+ 	u32 idx;
+ 	u64 dma_length;
+@@ -176,7 +180,9 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
  
-+	padlock_reset_key();
-+
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	err = blkcipher_walk_virt(desc, &walk);
+ 	wqe_p->wr_flag = 0;
  
-@@ -548,6 +550,8 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
- 	struct blkcipher_walk walk;
- 	int err;
+-	if (send_wr->send_flags & IB_SEND_SIGNALED)
++	if ((send_wr->send_flags & IB_SEND_SIGNALED ||
++	    qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
++	    && !hidden)
+ 		wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
  
-+	padlock_reset_key();
-+
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	err = blkcipher_walk_virt(desc, &walk);
+ 	if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
+@@ -199,7 +205,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
  
-@@ -592,6 +596,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
- 	struct blkcipher_walk walk;
- 	int err;
+ 		wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
+ 		wqe_p->local_ee_context_qkey = remote_qkey;
+-		if (!send_wr->wr.ud.ah) {
++		if (unlikely(!send_wr->wr.ud.ah)) {
+ 			ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
+ 			return -EINVAL;
+ 		}
+@@ -255,6 +261,15 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
+ 		} /* eof idx */
+ 		wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
  
-+	padlock_reset_key();
++		/* unsolicited ack circumvention */
++		if (send_wr->opcode == IB_WR_RDMA_READ) {
++			/* on RDMA read, switch on and reset counters */
++			qp->message_count = qp->packet_count = 0;
++			qp->unsol_ack_circ = 1;
++		} else
++			/* else estimate #packets */
++			qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
 +
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	err = blkcipher_walk_virt(desc, &walk);
+ 		break;
  
-@@ -616,6 +622,8 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
- 	struct blkcipher_walk walk;
- 	int err;
+ 	default:
+@@ -355,13 +370,49 @@ static inline void map_ib_wc_status(u32 cqe_status,
+ 		*wc_status = IB_WC_SUCCESS;
+ }
  
-+	padlock_reset_key();
++static inline int post_one_send(struct ehca_qp *my_qp,
++			 struct ib_send_wr *cur_send_wr,
++			 struct ib_send_wr **bad_send_wr,
++			 int hidden)
++{
++	struct ehca_wqe *wqe_p;
++	int ret;
++	u64 start_offset = my_qp->ipz_squeue.current_q_offset;
 +
- 	blkcipher_walk_init(&walk, dst, src, nbytes);
- 	err = blkcipher_walk_virt(desc, &walk);
- 
-diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
-index d59b2f4..bcf52df 100644
---- a/drivers/dma/dmaengine.c
-+++ b/drivers/dma/dmaengine.c
-@@ -41,12 +41,12 @@
-  * the definition of dma_event_callback in dmaengine.h.
-  *
-  * Each device has a kref, which is initialized to 1 when the device is
-- * registered. A kref_get is done for each class_device registered.  When the
-- * class_device is released, the coresponding kref_put is done in the release
-+ * registered. A kref_get is done for each device registered.  When the
-+ * device is released, the coresponding kref_put is done in the release
-  * method. Every time one of the device's channels is allocated to a client,
-  * a kref_get occurs.  When the channel is freed, the coresponding kref_put
-  * happens. The device's release function does a completion, so
-- * unregister_device does a remove event, class_device_unregister, a kref_put
-+ * unregister_device does a remove event, device_unregister, a kref_put
-  * for the first reference, then waits on the completion for all other
-  * references to finish.
-  *
-@@ -77,9 +77,9 @@ static LIST_HEAD(dma_client_list);
- 
- /* --- sysfs implementation --- */
- 
--static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
-+static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
++	/* get pointer next to free WQE */
++	wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
++	if (unlikely(!wqe_p)) {
++		/* too many posted work requests: queue overflow */
++		if (bad_send_wr)
++			*bad_send_wr = cur_send_wr;
++		ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
++			 "qp_num=%x", my_qp->ib_qp.qp_num);
++		return -ENOMEM;
++	}
++	/* write a SEND WQE into the QUEUE */
++	ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
++	/*
++	 * if something failed,
++	 * reset the free entry pointer to the start value
++	 */
++	if (unlikely(ret)) {
++		my_qp->ipz_squeue.current_q_offset = start_offset;
++		if (bad_send_wr)
++			*bad_send_wr = cur_send_wr;
++		ehca_err(my_qp->ib_qp.device, "Could not write WQE "
++			 "qp_num=%x", my_qp->ib_qp.qp_num);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ int ehca_post_send(struct ib_qp *qp,
+ 		   struct ib_send_wr *send_wr,
+ 		   struct ib_send_wr **bad_send_wr)
  {
--	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
-+	struct dma_chan *chan = to_dma_chan(dev);
- 	unsigned long count = 0;
- 	int i;
+ 	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+ 	struct ib_send_wr *cur_send_wr;
+-	struct ehca_wqe *wqe_p;
+ 	int wqe_cnt = 0;
+ 	int ret = 0;
+ 	unsigned long flags;
+@@ -369,37 +420,33 @@ int ehca_post_send(struct ib_qp *qp,
+ 	/* LOCK the QUEUE */
+ 	spin_lock_irqsave(&my_qp->spinlock_s, flags);
  
-@@ -89,9 +89,10 @@ static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
- 	return sprintf(buf, "%lu\n", count);
++	/* Send an empty extra RDMA read if:
++	 *  1) there has been an RDMA read on this connection before
++	 *  2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
++	 *  3) we can be sure that any previous extra RDMA read has been
++	 *     processed so we don't overflow the SQ
++	 */
++	if (unlikely(my_qp->unsol_ack_circ &&
++		     my_qp->packet_count > ACK_CIRC_THRESHOLD &&
++		     my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
++		/* insert an empty RDMA READ to fix up the remote QP state */
++		struct ib_send_wr circ_wr;
++		memset(&circ_wr, 0, sizeof(circ_wr));
++		circ_wr.opcode = IB_WR_RDMA_READ;
++		post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
++		wqe_cnt++;
++		ehca_dbg(qp->device, "posted circ wr  qp_num=%x", qp->qp_num);
++		my_qp->message_count = my_qp->packet_count = 0;
++	}
++
+ 	/* loop processes list of send reqs */
+ 	for (cur_send_wr = send_wr; cur_send_wr != NULL;
+ 	     cur_send_wr = cur_send_wr->next) {
+-		u64 start_offset = my_qp->ipz_squeue.current_q_offset;
+-		/* get pointer next to free WQE */
+-		wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
+-		if (unlikely(!wqe_p)) {
+-			/* too many posted work requests: queue overflow */
+-			if (bad_send_wr)
+-				*bad_send_wr = cur_send_wr;
+-			if (wqe_cnt == 0) {
+-				ret = -ENOMEM;
+-				ehca_err(qp->device, "Too many posted WQEs "
+-					 "qp_num=%x", qp->qp_num);
+-			}
+-			goto post_send_exit0;
+-		}
+-		/* write a SEND WQE into the QUEUE */
+-		ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
+-		/*
+-		 * if something failed,
+-		 * reset the free entry pointer to the start value
+-		 */
++		ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
+ 		if (unlikely(ret)) {
+-			my_qp->ipz_squeue.current_q_offset = start_offset;
+-			*bad_send_wr = cur_send_wr;
+-			if (wqe_cnt == 0) {
+-				ret = -EINVAL;
+-				ehca_err(qp->device, "Could not write WQE "
+-					 "qp_num=%x", qp->qp_num);
+-			}
++			/* if one or more WQEs were successful, don't fail */
++			if (wqe_cnt)
++				ret = 0;
+ 			goto post_send_exit0;
+ 		}
+ 		wqe_cnt++;
+@@ -410,6 +457,7 @@ int ehca_post_send(struct ib_qp *qp,
+ post_send_exit0:
+ 	iosync(); /* serialize GAL register access */
+ 	hipz_update_sqa(my_qp, wqe_cnt);
++	my_qp->message_count += wqe_cnt;
+ 	spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
+ 	return ret;
  }
+diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
+index f0792e5..79e72b2 100644
+--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
++++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
+@@ -40,11 +40,8 @@
+  */
  
--static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
-+static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
-+				      char *buf)
- {
--	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
-+	struct dma_chan *chan = to_dma_chan(dev);
- 	unsigned long count = 0;
- 	int i;
- 
-@@ -101,9 +102,9 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
- 	return sprintf(buf, "%lu\n", count);
- }
  
--static ssize_t show_in_use(struct class_device *cd, char *buf)
-+static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
- {
--	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
-+	struct dma_chan *chan = to_dma_chan(dev);
- 	int in_use = 0;
+-#include <linux/module.h>
+-#include <linux/err.h>
+ #include "ehca_classes.h"
+ #include "ehca_tools.h"
+-#include "ehca_qes.h"
+ #include "ehca_iverbs.h"
+ #include "hcp_if.h"
  
- 	if (unlikely(chan->slow_ref) &&
-@@ -119,7 +120,7 @@ static ssize_t show_in_use(struct class_device *cd, char *buf)
- 	return sprintf(buf, "%d\n", in_use);
- }
+@@ -93,6 +90,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
+ 		return H_PARAMETER;
+ 	}
  
--static struct class_device_attribute dma_class_attrs[] = {
-+static struct device_attribute dma_attrs[] = {
- 	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
- 	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
- 	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
-@@ -128,16 +129,16 @@ static struct class_device_attribute dma_class_attrs[] = {
++	if (ehca_nr_ports < 0) /* autodetect mode */
++		return H_SUCCESS;
++
+ 	for (counter = 0;
+ 	     shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
+ 		     counter < ehca_port_act_time;
+diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
+index 851df8a..4146210 100644
+--- a/drivers/infiniband/hw/ipath/ipath_common.h
++++ b/drivers/infiniband/hw/ipath/ipath_common.h
+@@ -82,6 +82,16 @@
+ #define IPATH_IB_LINK_EXTERNAL	7 /* normal, disable local loopback */
  
- static void dma_async_device_cleanup(struct kref *kref);
+ /*
++ * These 3 values (SDR and DDR may be ORed for auto-speed
++ * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
++ * with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs.  They
++ * are also the the possible values for ipath_link_speed_enabled and active
++ * The values were chosen to match values used within the IB spec.
++ */
++#define IPATH_IB_SDR 1
++#define IPATH_IB_DDR 2
++
++/*
+  * stats maintained by the driver.  For now, at least, this is global
+  * to all minor devices.
+  */
+@@ -433,8 +443,9 @@ struct ipath_user_info {
+ #define IPATH_CMD_UNUSED_2	26
+ #define IPATH_CMD_PIOAVAILUPD	27	/* force an update of PIOAvail reg */
+ #define IPATH_CMD_POLL_TYPE	28	/* set the kind of polling we want */
++#define IPATH_CMD_ARMLAUNCH_CTRL	29 /* armlaunch detection control */
  
--static void dma_class_dev_release(struct class_device *cd)
-+static void dma_dev_release(struct device *dev)
- {
--	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
-+	struct dma_chan *chan = to_dma_chan(dev);
- 	kref_put(&chan->device->refcount, dma_async_device_cleanup);
- }
+-#define IPATH_CMD_MAX		28
++#define IPATH_CMD_MAX		29
  
- static struct class dma_devclass = {
--	.name            = "dma",
--	.class_dev_attrs = dma_class_attrs,
--	.release = dma_class_dev_release,
-+	.name		= "dma",
-+	.dev_attrs	= dma_attrs,
-+	.dev_release	= dma_dev_release,
+ /*
+  * Poll types
+@@ -477,6 +488,8 @@ struct ipath_cmd {
+ 		__u64 port_info;
+ 		/* enable/disable receipt of packets */
+ 		__u32 recv_ctrl;
++		/* enable/disable armlaunch errors (non-zero to enable) */
++		__u32 armlaunch_ctrl;
+ 		/* partition key to set */
+ 		__u16 part_key;
+ 		/* user address of __u32 bitmask of active slaves */
+@@ -579,7 +592,7 @@ struct ipath_flash {
+ struct infinipath_counters {
+ 	__u64 LBIntCnt;
+ 	__u64 LBFlowStallCnt;
+-	__u64 Reserved1;
++	__u64 TxSDmaDescCnt;	/* was Reserved1 */
+ 	__u64 TxUnsupVLErrCnt;
+ 	__u64 TxDataPktCnt;
+ 	__u64 TxFlowPktCnt;
+@@ -615,12 +628,26 @@ struct infinipath_counters {
+ 	__u64 RxP6HdrEgrOvflCnt;
+ 	__u64 RxP7HdrEgrOvflCnt;
+ 	__u64 RxP8HdrEgrOvflCnt;
+-	__u64 Reserved6;
+-	__u64 Reserved7;
++	__u64 RxP9HdrEgrOvflCnt;	/* was Reserved6 */
++	__u64 RxP10HdrEgrOvflCnt;	/* was Reserved7 */
++	__u64 RxP11HdrEgrOvflCnt;	/* new for IBA7220 */
++	__u64 RxP12HdrEgrOvflCnt;	/* new for IBA7220 */
++	__u64 RxP13HdrEgrOvflCnt;	/* new for IBA7220 */
++	__u64 RxP14HdrEgrOvflCnt;	/* new for IBA7220 */
++	__u64 RxP15HdrEgrOvflCnt;	/* new for IBA7220 */
++	__u64 RxP16HdrEgrOvflCnt;	/* new for IBA7220 */
+ 	__u64 IBStatusChangeCnt;
+ 	__u64 IBLinkErrRecoveryCnt;
+ 	__u64 IBLinkDownedCnt;
+ 	__u64 IBSymbolErrCnt;
++	/* The following are new for IBA7220 */
++	__u64 RxVL15DroppedPktCnt;
++	__u64 RxOtherLocalPhyErrCnt;
++	__u64 PcieRetryBufDiagQwordCnt;
++	__u64 ExcessBufferOvflCnt;
++	__u64 LocalLinkIntegrityErrCnt;
++	__u64 RxVlErrCnt;
++	__u64 RxDlidFltrCnt;
  };
  
- /* --- client and device registration --- */
-@@ -377,12 +378,12 @@ int dma_async_device_register(struct dma_device *device)
- 			continue;
- 
- 		chan->chan_id = chancnt++;
--		chan->class_dev.class = &dma_devclass;
--		chan->class_dev.dev = NULL;
--		snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
-+		chan->dev.class = &dma_devclass;
-+		chan->dev.parent = NULL;
-+		snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
- 		         device->dev_id, chan->chan_id);
- 
--		rc = class_device_register(&chan->class_dev);
-+		rc = device_register(&chan->dev);
- 		if (rc) {
- 			chancnt--;
- 			free_percpu(chan->local);
-@@ -411,7 +412,7 @@ err_out:
- 		if (chan->local == NULL)
- 			continue;
- 		kref_put(&device->refcount, dma_async_device_cleanup);
--		class_device_unregister(&chan->class_dev);
-+		device_unregister(&chan->dev);
- 		chancnt--;
- 		free_percpu(chan->local);
+ /*
+diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
+index d1380c7..a03bd28 100644
+--- a/drivers/infiniband/hw/ipath/ipath_cq.c
++++ b/drivers/infiniband/hw/ipath/ipath_cq.c
+@@ -421,7 +421,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+ 	else
+ 		n = head - tail;
+ 	if (unlikely((u32)cqe < n)) {
+-		ret = -EOVERFLOW;
++		ret = -EINVAL;
+ 		goto bail_unlock;
  	}
-@@ -445,7 +446,7 @@ void dma_async_device_unregister(struct dma_device *device)
+ 	for (n = 0; tail != head; n++) {
+diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
+index 19c56e6..d6f6953 100644
+--- a/drivers/infiniband/hw/ipath/ipath_debug.h
++++ b/drivers/infiniband/hw/ipath/ipath_debug.h
+@@ -55,7 +55,7 @@
+ #define __IPATH_PKTDBG      0x80	/* print packet data */
+ /* print process startup (init)/exit messages */
+ #define __IPATH_PROCDBG     0x100
+-/* print mmap/nopage stuff, not using VDBG any more */
++/* print mmap/fault stuff, not using VDBG any more */
+ #define __IPATH_MMDBG       0x200
+ #define __IPATH_ERRPKTDBG   0x400
+ #define __IPATH_USER_SEND   0x1000	/* use user mode send */
+@@ -81,7 +81,7 @@
+ #define __IPATH_VERBDBG   0x0	/* very verbose debug */
+ #define __IPATH_PKTDBG    0x0	/* print packet data */
+ #define __IPATH_PROCDBG   0x0	/* process startup (init)/exit messages */
+-/* print mmap/nopage stuff, not using VDBG any more */
++/* print mmap/fault stuff, not using VDBG any more */
+ #define __IPATH_MMDBG     0x0
+ #define __IPATH_EPKTDBG   0x0	/* print ethernet packet data */
+ #define __IPATH_IPATHDBG  0x0	/* Ethernet (IPATH) table dump on */
+diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
+index 1f152de..d5ff6ca 100644
+--- a/drivers/infiniband/hw/ipath/ipath_driver.c
++++ b/drivers/infiniband/hw/ipath/ipath_driver.c
+@@ -121,6 +121,9 @@ static struct pci_driver ipath_driver = {
+ 	.probe = ipath_init_one,
+ 	.remove = __devexit_p(ipath_remove_one),
+ 	.id_table = ipath_pci_tbl,
++	.driver = {
++		.groups = ipath_driver_attr_groups,
++	},
+ };
  
- 	list_for_each_entry(chan, &device->channels, device_node) {
- 		dma_clients_notify_removed(chan);
--		class_device_unregister(&chan->class_dev);
-+		device_unregister(&chan->dev);
- 		dma_chan_release(chan);
+ static void ipath_check_status(struct work_struct *work)
+@@ -331,6 +334,8 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
+ 		udelay(1);
  	}
  
-diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
-index 70b837f..5376457 100644
---- a/drivers/edac/edac_device_sysfs.c
-+++ b/drivers/edac/edac_device_sysfs.c
-@@ -246,16 +246,6 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
++	ipath_disable_armlaunch(dd);
++
+ 	writeq(0, piobuf); /* length 0, no dwords actually sent */
+ 	ipath_flush_wc();
  
- 	/* Init the devices's kobject */
- 	memset(&edac_dev->kobj, 0, sizeof(struct kobject));
--	edac_dev->kobj.ktype = &ktype_device_ctrl;
--
--	/* set this new device under the edac_class kobject */
--	edac_dev->kobj.parent = &edac_class->kset.kobj;
--
--	/* generate sysfs "..../edac/<name>"   */
--	debugf4("%s() set name of kobject to: %s\n", __func__, edac_dev->name);
--	err = kobject_set_name(&edac_dev->kobj, "%s", edac_dev->name);
--	if (err)
--		goto err_out;
+@@ -362,6 +367,7 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
+ done:
+ 	/* disarm piobuf, so it's available again */
+ 	ipath_disarm_piobufs(dd, pbnum, 1);
++	ipath_enable_armlaunch(dd);
+ }
  
- 	/* Record which module 'owns' this control structure
- 	 * and bump the ref count of the module
-@@ -268,12 +258,15 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
- 	}
+ static int __devinit ipath_init_one(struct pci_dev *pdev,
+@@ -800,31 +806,37 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
+ 			  unsigned cnt)
+ {
+ 	unsigned i, last = first + cnt;
+-	u64 sendctrl, sendorig;
++	unsigned long flags;
  
- 	/* register */
--	err = kobject_register(&edac_dev->kobj);
-+	err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
-+				   &edac_class->kset.kobj,
-+				   "%s", edac_dev->name);
- 	if (err) {
- 		debugf1("%s()Failed to register '.../edac/%s'\n",
- 			__func__, edac_dev->name);
- 		goto err_kobj_reg;
+ 	ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
+-	sendorig = dd->ipath_sendctrl;
+ 	for (i = first; i < last; i++) {
+-		sendctrl = sendorig  | INFINIPATH_S_DISARM |
+-			(i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
++		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
++		/*
++		 * The disarm-related bits are write-only, so it
++		 * is ok to OR them in with our copy of sendctrl
++		 * while we hold the lock.
++		 */
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+-				 sendctrl);
++			dd->ipath_sendctrl | INFINIPATH_S_DISARM |
++			(i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
++		/* can't disarm bufs back-to-back per iba7220 spec */
++		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
  	}
-+	kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
  
- 	/* At this point, to 'free' the control struct,
- 	 * edac_device_unregister_sysfs_main_kobj() must be used
-@@ -310,7 +303,7 @@ void edac_device_unregister_sysfs_main_kobj(
- 	 *   a) module_put() this module
- 	 *   b) 'kfree' the memory
+ 	/*
+-	 * Write it again with current value, in case ipath_sendctrl changed
+-	 * while we were looping; no critical bits that would require
+-	 * locking.
+-	 *
+-	 * disable PIOAVAILUPD, then re-enable, reading scratch in
++	 * Disable PIOAVAILUPD, then re-enable, reading scratch in
+ 	 * between.  This seems to avoid a chip timing race that causes
+-	 * pioavail updates to memory to stop.
++	 * pioavail updates to memory to stop.  We xor as we don't
++	 * know the state of the bit when we're called.
  	 */
--	kobject_unregister(&edac_dev->kobj);
-+	kobject_put(&edac_dev->kobj);
++	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+-			 sendorig & ~INFINIPATH_S_PIOBUFAVAILUPD);
+-	sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++		dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
++	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ 			 dd->ipath_sendctrl);
++	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
  }
  
- /* edac_dev -> instance information */
-@@ -533,12 +526,6 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
- 
- 	/* init this block's kobject */
- 	memset(&block->kobj, 0, sizeof(struct kobject));
--	block->kobj.parent = &instance->kobj;
--	block->kobj.ktype = &ktype_block_ctrl;
--
--	err = kobject_set_name(&block->kobj, "%s", block->name);
--	if (err)
--		return err;
- 
- 	/* bump the main kobject's reference count for this controller
- 	 * and this instance is dependant on the main
-@@ -550,7 +537,9 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
- 	}
+ /**
+@@ -1000,12 +1012,10 @@ static void get_rhf_errstring(u32 err, char *msg, size_t len)
+  * ipath_get_egrbuf - get an eager buffer
+  * @dd: the infinipath device
+  * @bufnum: the eager buffer to get
+- * @err: unused
+  *
+  * must only be called if ipath_pd[port] is known to be allocated
+  */
+-static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
+-				     int err)
++static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
+ {
+ 	return dd->ipath_port0_skbinfo ?
+ 		(void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
+@@ -1097,13 +1107,14 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
  
- 	/* Add this block's kobject */
--	err = kobject_register(&block->kobj);
-+	err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl,
-+				   &instance->kobj,
-+				   "%s", block->name);
- 	if (err) {
- 		debugf1("%s() Failed to register instance '%s'\n",
- 			__func__, block->name);
-@@ -579,12 +568,13 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
- 				goto err_on_attrib;
- 		}
+ /*
+  * ipath_kreceive - receive a packet
+- * @dd: the infinipath device
++ * @pd: the infinipath port
+  *
+  * called from interrupt handler for errors or receive interrupt
+  */
+-void ipath_kreceive(struct ipath_devdata *dd)
++void ipath_kreceive(struct ipath_portdata *pd)
+ {
+ 	u64 *rc;
++	struct ipath_devdata *dd = pd->port_dd;
+ 	void *ebuf;
+ 	const u32 rsize = dd->ipath_rcvhdrentsize;	/* words */
+ 	const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;	/* words */
+@@ -1118,8 +1129,8 @@ void ipath_kreceive(struct ipath_devdata *dd)
+ 		goto bail;
  	}
-+	kobject_uevent(&block->kobj, KOBJ_ADD);
- 
- 	return 0;
  
- 	/* Error unwind stack */
- err_on_attrib:
--	kobject_unregister(&block->kobj);
-+	kobject_put(&block->kobj);
+-	l = dd->ipath_port0head;
+-	hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
++	l = pd->port_head;
++	hdrqtail = ipath_get_rcvhdrtail(pd);
+ 	if (l == hdrqtail)
+ 		goto bail;
  
- err_out:
- 	return err;
-@@ -615,7 +605,7 @@ static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
- 	/* unregister this block's kobject, SEE:
- 	 *	edac_device_ctrl_block_release() callback operation
- 	 */
--	kobject_unregister(&block->kobj);
-+	kobject_put(&block->kobj);
- }
+@@ -1128,7 +1139,7 @@ reloop:
+ 		u32 qp;
+ 		u8 *bthbytes;
  
- /* instance ctor/dtor code */
-@@ -637,15 +627,8 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
- 	/* Init the instance's kobject */
- 	memset(&instance->kobj, 0, sizeof(struct kobject));
+-		rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
++		rc = (u64 *) (pd->port_rcvhdrq + (l << 2));
+ 		hdr = (struct ipath_message_header *)&rc[1];
+ 		/*
+ 		 * could make a network order version of IPATH_KD_QP, and
+@@ -1153,7 +1164,7 @@ reloop:
+ 			etail = ipath_hdrget_index((__le32 *) rc);
+ 			if (tlen > sizeof(*hdr) ||
+ 			    etype == RCVHQ_RCV_TYPE_NON_KD)
+-				ebuf = ipath_get_egrbuf(dd, etail, 0);
++				ebuf = ipath_get_egrbuf(dd, etail);
+ 		}
  
--	/* set this new device under the edac_device main kobject */
--	instance->kobj.parent = &edac_dev->kobj;
--	instance->kobj.ktype = &ktype_instance_ctrl;
- 	instance->ctl = edac_dev;
+ 		/*
+@@ -1188,7 +1199,7 @@ reloop:
+ 				  be32_to_cpu(hdr->bth[0]) & 0xff);
+ 		else {
+ 			/*
+-			 * error packet, type of error	unknown.
++			 * error packet, type of error unknown.
+ 			 * Probably type 3, but we don't know, so don't
+ 			 * even try to print the opcode, etc.
+ 			 */
+@@ -1238,7 +1249,7 @@ reloop:
+ 		 * earlier packets, we "almost" guarantee we have covered
+ 		 * that case.
+ 		 */
+-		u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
++		u32 hqtail = ipath_get_rcvhdrtail(pd);
+ 		if (hqtail != hdrqtail) {
+ 			hdrqtail = hqtail;
+ 			reloop = 1; /* loop 1 extra time at most */
+@@ -1248,7 +1259,7 @@ reloop:
  
--	err = kobject_set_name(&instance->kobj, "%s", instance->name);
--	if (err)
--		goto err_out;
--
- 	/* bump the main kobject's reference count for this controller
- 	 * and this instance is dependant on the main
- 	 */
-@@ -655,8 +638,9 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
- 		goto err_out;
- 	}
+ 	pkttot += i;
  
--	/* Formally register this instance's kobject */
--	err = kobject_register(&instance->kobj);
-+	/* Formally register this instance's kobject under the edac_device */
-+	err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
-+				   &edac_dev->kobj, "%s", instance->name);
- 	if (err != 0) {
- 		debugf2("%s() Failed to register instance '%s'\n",
- 			__func__, instance->name);
-@@ -679,6 +663,7 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
- 			goto err_release_instance_kobj;
- 		}
- 	}
-+	kobject_uevent(&instance->kobj, KOBJ_ADD);
+-	dd->ipath_port0head = l;
++	pd->port_head = l;
  
- 	debugf4("%s() Registered instance %d '%s' kobject\n",
- 		__func__, idx, instance->name);
-@@ -687,7 +672,7 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+ 	if (pkttot > ipath_stats.sps_maxpkts_call)
+ 		ipath_stats.sps_maxpkts_call = pkttot;
+@@ -1332,14 +1343,9 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
+ 		/*
+ 		 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
+ 		 */
+-		if (i > 3) {
+-			if (i & 1)
+-				piov = le64_to_cpu(
+-					dd->ipath_pioavailregs_dma[i - 1]);
+-			else
+-				piov = le64_to_cpu(
+-					dd->ipath_pioavailregs_dma[i + 1]);
+-		} else
++		if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
++			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
++		else
+ 			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
+ 		pchg = _IPATH_ALL_CHECKBITS &
+ 			~(dd->ipath_pioavailshadow[i] ^ piov);
+@@ -1598,7 +1604,8 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
  
- 	/* error unwind stack */
- err_release_instance_kobj:
--	kobject_unregister(&instance->kobj);
-+	kobject_put(&instance->kobj);
+ 	/* clear for security and sanity on each use */
+ 	memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
+-	memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
++	if (pd->port_rcvhdrtail_kvaddr)
++		memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
  
- err_out:
- 	return err;
-@@ -712,7 +697,7 @@ static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
- 	/* unregister this instance's kobject, SEE:
- 	 *	edac_device_ctrl_instance_release() for callback operation
- 	 */
--	kobject_unregister(&instance->kobj);
-+	kobject_put(&instance->kobj);
+ 	/*
+ 	 * tell chip each time we init it, even if we are re-using previous
+@@ -1614,77 +1621,6 @@ bail:
+ 	return ret;
  }
  
- /*
-diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
-index 3706b2b..9aac880 100644
---- a/drivers/edac/edac_mc_sysfs.c
-+++ b/drivers/edac/edac_mc_sysfs.c
-@@ -380,13 +380,6 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
- 	/* generate ..../edac/mc/mc<id>/csrow<index>   */
- 	memset(&csrow->kobj, 0, sizeof(csrow->kobj));
- 	csrow->mci = mci;	/* include container up link */
--	csrow->kobj.parent = kobj_mci;
--	csrow->kobj.ktype = &ktype_csrow;
+-int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
+-			   u64 bits_to_wait_for, u64 * valp)
+-{
+-	unsigned long timeout;
+-	u64 lastval, val;
+-	int ret;
 -
--	/* name this instance of csrow<id> */
--	err = kobject_set_name(&csrow->kobj, "csrow%d", index);
--	if (err)
--		goto err_out;
- 
- 	/* bump the mci instance's kobject's ref count */
- 	kobj = kobject_get(&mci->edac_mci_kobj);
-@@ -396,12 +389,13 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
- 	}
- 
- 	/* Instanstiate the csrow object */
--	err = kobject_register(&csrow->kobj);
-+	err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci,
-+				   "csrow%d", index);
- 	if (err)
- 		goto err_release_top_kobj;
- 
- 	/* At this point, to release a csrow kobj, one must
--	 * call the kobject_unregister and allow that tear down
-+	 * call the kobject_put and allow that tear down
- 	 * to work the releasing
- 	 */
- 
-@@ -412,11 +406,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
- 		err = edac_create_channel_files(&csrow->kobj, chan);
- 		if (err) {
- 			/* special case the unregister here */
--			kobject_unregister(&csrow->kobj);
-+			kobject_put(&csrow->kobj);
- 			goto err_out;
- 		}
- 	}
+-	lastval = ipath_read_kreg64(dd, reg_id);
+-	/* wait a ridiculously long time */
+-	timeout = jiffies + msecs_to_jiffies(5);
+-	do {
+-		val = ipath_read_kreg64(dd, reg_id);
+-		/* set so they have something, even on failures. */
+-		*valp = val;
+-		if ((val & bits_to_wait_for) == bits_to_wait_for) {
+-			ret = 0;
+-			break;
+-		}
+-		if (val != lastval)
+-			ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
+-				   "waiting for %llx bits\n",
+-				   (unsigned long long) lastval,
+-				   (unsigned long long) val,
+-				   (unsigned long long) bits_to_wait_for);
+-		cond_resched();
+-		if (time_after(jiffies, timeout)) {
+-			ipath_dbg("Didn't get bits %llx in register 0x%x, "
+-				  "got %llx\n",
+-				  (unsigned long long) bits_to_wait_for,
+-				  reg_id, (unsigned long long) *valp);
+-			ret = -ENODEV;
+-			break;
+-		}
+-	} while (1);
 -
-+	kobject_uevent(&csrow->kobj, KOBJ_ADD);
- 	return 0;
- 
- 	/* error unwind stack */
-@@ -744,7 +738,6 @@ static struct kobj_type ktype_mc_set_attribs = {
-  */
- static struct kset mc_kset = {
- 	.kobj = {.ktype = &ktype_mc_set_attribs },
--	.ktype = &ktype_mci,
- };
- 
- 
-@@ -765,14 +758,6 @@ int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
- 	/* Init the mci's kobject */
- 	memset(kobj_mci, 0, sizeof(*kobj_mci));
- 
--	/* this instance become part of the mc_kset */
--	kobj_mci->kset = &mc_kset;
+-	return ret;
+-}
 -
--	/* set the name of the mc<id> object */
--	err = kobject_set_name(kobj_mci, "mc%d", mci->mc_idx);
--	if (err)
--		goto fail_out;
+-/**
+- * ipath_waitfor_mdio_cmdready - wait for last command to complete
+- * @dd: the infinipath device
+- *
+- * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
+- * away indicating the last command has completed.  It doesn't return data
+- */
+-int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
+-{
+-	unsigned long timeout;
+-	u64 val;
+-	int ret;
+-
+-	/* wait a ridiculously long time */
+-	timeout = jiffies + msecs_to_jiffies(5);
+-	do {
+-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
+-		if (!(val & IPATH_MDIO_CMDVALID)) {
+-			ret = 0;
+-			break;
+-		}
+-		cond_resched();
+-		if (time_after(jiffies, timeout)) {
+-			ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
+-				  (unsigned long long) val);
+-			ret = -ENODEV;
+-			break;
+-		}
+-	} while (1);
+-
+-	return ret;
+-}
 -
- 	/* Record which module 'owns' this control structure
- 	 * and bump the ref count of the module
- 	 */
-@@ -784,13 +769,18 @@ int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
- 		goto fail_out;
- 	}
- 
-+	/* this instance become part of the mc_kset */
-+	kobj_mci->kset = &mc_kset;
-+
- 	/* register the mc<id> kobject to the mc_kset */
--	err = kobject_register(kobj_mci);
-+	err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
-+				   "mc%d", mci->mc_idx);
- 	if (err) {
- 		debugf1("%s()Failed to register '.../edac/mc%d'\n",
- 			__func__, mci->mc_idx);
- 		goto kobj_reg_fail;
- 	}
-+	kobject_uevent(kobj_mci, KOBJ_ADD);
  
- 	/* At this point, to 'free' the control struct,
- 	 * edac_mc_unregister_sysfs_main_kobj() must be used
-@@ -818,7 +808,7 @@ fail_out:
- void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
+ /*
+  * Flush all sends that might be in the ready to send state, as well as any
+@@ -2053,6 +1989,8 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
+  */
+ void ipath_shutdown_device(struct ipath_devdata *dd)
  {
- 	/* delete the kobj from the mc_kset */
--	kobject_unregister(&mci->edac_mci_kobj);
-+	kobject_put(&mci->edac_mci_kobj);
- }
++	unsigned long flags;
++
+ 	ipath_dbg("Shutting down the device\n");
  
- #define EDAC_DEVICE_SYMLINK	"device"
-@@ -933,7 +923,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
- fail1:
- 	for (i--; i >= 0; i--) {
- 		if (csrow->nr_pages > 0) {
--			kobject_unregister(&mci->csrows[i].kobj);
-+			kobject_put(&mci->csrows[i].kobj);
- 		}
+ 	dd->ipath_flags |= IPATH_LINKUNK;
+@@ -2073,9 +2011,13 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
+ 	 * gracefully stop all sends allowing any in progress to trickle out
+ 	 * first.
+ 	 */
+-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
++	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
++	dd->ipath_sendctrl = 0;
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
+ 	/* flush it */
+ 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
++
+ 	/*
+ 	 * enough for anything that's going to trickle out to have actually
+ 	 * done so.
+@@ -2217,25 +2159,15 @@ static int __init infinipath_init(void)
+ 		goto bail_unit;
  	}
  
-@@ -960,7 +950,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
- 	for (i = 0; i < mci->nr_csrows; i++) {
- 		if (mci->csrows[i].nr_pages > 0) {
- 			debugf0("%s()  unreg csrow-%d\n", __func__, i);
--			kobject_unregister(&mci->csrows[i].kobj);
-+			kobject_put(&mci->csrows[i].kobj);
- 		}
+-	ret = ipath_driver_create_group(&ipath_driver.driver);
+-	if (ret < 0) {
+-		printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
+-		       "sysfs entries: error %d\n", -ret);
+-		goto bail_pci;
+-	}
+-
+ 	ret = ipath_init_ipathfs();
+ 	if (ret < 0) {
+ 		printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
+ 		       "ipathfs: error %d\n", -ret);
+-		goto bail_group;
++		goto bail_pci;
  	}
  
-@@ -977,7 +967,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
- 	debugf0("%s()  unregister this mci kobj\n", __func__);
- 
- 	/* unregister this instance's kobject */
--	kobject_unregister(&mci->edac_mci_kobj);
-+	kobject_put(&mci->edac_mci_kobj);
- }
- 
- 
-diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
-index e0c4a40..7e1374a 100644
---- a/drivers/edac/edac_module.c
-+++ b/drivers/edac/edac_module.c
-@@ -31,7 +31,7 @@ struct workqueue_struct *edac_workqueue;
-  *	need to export to other files in this modules
-  */
- static struct sysdev_class edac_class = {
--	set_kset_name("edac"),
-+	.name = "edac",
- };
- static int edac_class_valid;
+ 	goto bail;
  
-diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
-index 69f5ddd..5b075da 100644
---- a/drivers/edac/edac_pci_sysfs.c
-+++ b/drivers/edac/edac_pci_sysfs.c
-@@ -162,14 +162,6 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+-bail_group:
+-	ipath_driver_remove_group(&ipath_driver.driver);
+-
+ bail_pci:
+ 	pci_unregister_driver(&ipath_driver);
  
- 	debugf0("%s()\n", __func__);
+@@ -2250,8 +2182,6 @@ static void __exit infinipath_cleanup(void)
+ {
+ 	ipath_exit_ipathfs();
  
--	/* Set the parent and the instance's ktype */
--	pci->kobj.parent = &edac_pci_top_main_kobj;
--	pci->kobj.ktype = &ktype_pci_instance;
--
--	err = kobject_set_name(&pci->kobj, "pci%d", idx);
--	if (err)
--		return err;
+-	ipath_driver_remove_group(&ipath_driver.driver);
 -
- 	/* First bump the ref count on the top main kobj, which will
- 	 * track the number of PCI instances we have, and thus nest
- 	 * properly on keeping the module loaded
-@@ -181,7 +173,8 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
- 	}
+ 	ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
+ 	pci_unregister_driver(&ipath_driver);
  
- 	/* And now register this new kobject under the main kobj */
--	err = kobject_register(&pci->kobj);
-+	err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
-+				   &edac_pci_top_main_kobj, "pci%d", idx);
- 	if (err != 0) {
- 		debugf2("%s() failed to register instance pci%d\n",
- 			__func__, idx);
-@@ -189,6 +182,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
- 		goto error_out;
+@@ -2344,5 +2274,34 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
  	}
- 
-+	kobject_uevent(&pci->kobj, KOBJ_ADD);
- 	debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
- 
  	return 0;
-@@ -211,7 +205,7 @@ void edac_pci_unregister_sysfs_instance_kobj(struct edac_pci_ctl_info *pci)
- 	 * function release the main reference count and then
- 	 * kfree the memory
- 	 */
--	kobject_unregister(&pci->kobj);
-+	kobject_put(&pci->kobj);
  }
++
++/*
++ * Disable and enable the armlaunch error.  Used for PIO bandwidth testing on
++ * the 7220, which is count-based, rather than trigger-based.  Safe for the
++ * driver check, since it's at init.   Not completely safe when used for
++ * user-mode checking, since some error checking can be lost, but not
++ * particularly risky, and only has problematic side-effects in the face of
++ * very buggy user code.  There is no reference counting, but that's also
++ * fine, given the intended use.
++ */
++void ipath_enable_armlaunch(struct ipath_devdata *dd)
++{
++	dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
++		INFINIPATH_E_SPIOARMLAUNCH);
++	dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
++		dd->ipath_errormask);
++}
++
++void ipath_disable_armlaunch(struct ipath_devdata *dd)
++{
++	/* so don't re-enable if already set */
++	dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
++	dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
++		dd->ipath_errormask);
++}
++
+ module_init(infinipath_init);
+ module_exit(infinipath_cleanup);
+diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
+index e7c25db..e28a42f 100644
+--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
++++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
+@@ -510,10 +510,10 @@ int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
+ {
+ 	int ret;
  
- /***************************** EDAC PCI sysfs root **********************/
-@@ -364,14 +358,6 @@ int edac_pci_main_kobj_setup(void)
- 		goto decrement_count_fail;
+-	ret = down_interruptible(&dd->ipath_eep_sem);
++	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
+ 	if (!ret) {
+ 		ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
+-		up(&dd->ipath_eep_sem);
++		mutex_unlock(&dd->ipath_eep_lock);
  	}
  
--	/* Need the kobject hook ups, and name setting */
--	edac_pci_top_main_kobj.ktype = &ktype_edac_pci_main_kobj;
--	edac_pci_top_main_kobj.parent = &edac_class->kset.kobj;
--
--	err = kobject_set_name(&edac_pci_top_main_kobj, "pci");
--	if (err)
--		goto decrement_count_fail;
--
- 	/* Bump the reference count on this module to ensure the
- 	 * modules isn't unloaded until we deconstruct the top
- 	 * level main kobj for EDAC PCI
-@@ -383,23 +369,24 @@ int edac_pci_main_kobj_setup(void)
- 	}
+ 	return ret;
+@@ -524,10 +524,10 @@ int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
+ {
+ 	int ret;
  
- 	/* Instanstiate the pci object */
--	/* FIXME: maybe new sysdev_create_subdir() */
--	err = kobject_register(&edac_pci_top_main_kobj);
-+	err = kobject_init_and_add(&edac_pci_top_main_kobj, &ktype_edac_pci_main_kobj,
-+				   &edac_class->kset.kobj, "pci");
- 	if (err) {
- 		debugf1("Failed to register '.../edac/pci'\n");
--		goto kobject_register_fail;
-+		goto kobject_init_and_add_fail;
+-	ret = down_interruptible(&dd->ipath_eep_sem);
++	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
+ 	if (!ret) {
+ 		ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
+-		up(&dd->ipath_eep_sem);
++		mutex_unlock(&dd->ipath_eep_lock);
  	}
  
- 	/* At this point, to 'release' the top level kobject
- 	 * for EDAC PCI, then edac_pci_main_kobj_teardown()
- 	 * must be used, for resources to be cleaned up properly
- 	 */
-+	kobject_uevent(&edac_pci_top_main_kobj, KOBJ_ADD);
- 	debugf1("Registered '.../edac/pci' kobject\n");
- 
- 	return 0;
+ 	return ret;
+@@ -574,7 +574,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
+ 	struct ipath_devdata *dd0 = ipath_lookup(0);
  
- 	/* Error unwind statck */
--kobject_register_fail:
-+kobject_init_and_add_fail:
- 	module_put(THIS_MODULE);
+ 	if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
+-		u8 *bguid, oguid;
++		u8 oguid;
+ 		dd->ipath_guid = dd0->ipath_guid;
+ 		bguid = (u8 *) & dd->ipath_guid;
  
- decrement_count_fail:
-@@ -424,9 +411,9 @@ static void edac_pci_main_kobj_teardown(void)
- 	 * main kobj
- 	 */
- 	if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
--		debugf0("%s() called kobject_unregister on main kobj\n",
-+		debugf0("%s() called kobject_put on main kobj\n",
- 			__func__);
--		kobject_unregister(&edac_pci_top_main_kobj);
-+		kobject_put(&edac_pci_top_main_kobj);
+@@ -616,9 +616,9 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
+ 		goto bail;
  	}
- }
  
-diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
-index 624ff3e..c2169d2 100644
---- a/drivers/firewire/fw-sbp2.c
-+++ b/drivers/firewire/fw-sbp2.c
-@@ -1238,6 +1238,12 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+-	down(&dd->ipath_eep_sem);
++	mutex_lock(&dd->ipath_eep_lock);
+ 	eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
+-	up(&dd->ipath_eep_sem);
++	mutex_unlock(&dd->ipath_eep_lock);
  
- 	sdev->allow_restart = 1;
+ 	if (eep_stat) {
+ 		ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
+@@ -674,7 +674,6 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
+ 		 * elsewhere for backward-compatibility.
+ 		 */
+ 		char *snp = dd->ipath_serial;
+-		int len;
+ 		memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
+ 		snp[sizeof ifp->if_sprefix] = '\0';
+ 		len = strlen(snp);
+@@ -764,14 +763,14 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
+ 	/* Grab semaphore and read current EEPROM. If we get an
+ 	 * error, let go, but if not, keep it until we finish write.
+ 	 */
+-	ret = down_interruptible(&dd->ipath_eep_sem);
++	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
+ 	if (ret) {
+ 		ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+ 		goto free_bail;
+ 	}
+ 	ret = ipath_eeprom_internal_read(dd, 0, buf, len);
+ 	if (ret) {
+-		up(&dd->ipath_eep_sem);
++		mutex_unlock(&dd->ipath_eep_lock);
+ 		ipath_dev_err(dd, "Unable read EEPROM for logging\n");
+ 		goto free_bail;
+ 	}
+@@ -779,7 +778,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
  
-+	/*
-+	 * Update the dma alignment (minimum alignment requirements for
-+	 * start and end of DMA transfers) to be a sector
-+	 */
-+	blk_queue_update_dma_alignment(sdev->request_queue, 511);
-+
- 	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
- 		sdev->inquiry_len = 36;
+ 	csum = flash_csum(ifp, 0);
+ 	if (csum != ifp->if_csum) {
+-		up(&dd->ipath_eep_sem);
++		mutex_unlock(&dd->ipath_eep_lock);
+ 		ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+ 				csum, ifp->if_csum);
+ 		ret = 1;
+@@ -849,7 +848,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
+ 		csum = flash_csum(ifp, 1);
+ 		ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
+ 	}
+-	up(&dd->ipath_eep_sem);
++	mutex_unlock(&dd->ipath_eep_lock);
+ 	if (ret)
+ 		ipath_dev_err(dd, "Failed updating EEPROM\n");
  
-diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
-index 5e596a7..9008ed5 100644
---- a/drivers/firmware/dmi_scan.c
-+++ b/drivers/firmware/dmi_scan.c
-@@ -8,6 +8,8 @@
- #include <linux/slab.h>
- #include <asm/dmi.h>
+diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
+index 5de3243..7e025c8 100644
+--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
++++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
+@@ -169,7 +169,7 @@ static int ipath_get_base_info(struct file *fp,
+ 		kinfo->spi_piocnt = dd->ipath_pbufsport;
+ 		kinfo->spi_piobufbase = (u64) pd->port_piobufs;
+ 		kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
+-			dd->ipath_palign * pd->port_port;
++			dd->ipath_ureg_align * pd->port_port;
+ 	} else if (master) {
+ 		kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) +
+ 				    (dd->ipath_pbufsport % subport_cnt);
+@@ -186,7 +186,7 @@ static int ipath_get_base_info(struct file *fp,
+ 	}
+ 	if (shared) {
+ 		kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
+-			dd->ipath_palign * pd->port_port;
++			dd->ipath_ureg_align * pd->port_port;
+ 		kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
+ 		kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
+ 		kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
+@@ -742,11 +742,12 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
+ 		 * updated and correct itself, even in the face of software
+ 		 * bugs.
+ 		 */
+-		*(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0;
+-		set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
++		if (pd->port_rcvhdrtail_kvaddr)
++			ipath_clear_rcvhdrtail(pd);
++		set_bit(dd->ipath_r_portenable_shift + pd->port_port,
+ 			&dd->ipath_rcvctrl);
+ 	} else
+-		clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
++		clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
+ 			  &dd->ipath_rcvctrl);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ 			 dd->ipath_rcvctrl);
+@@ -881,7 +882,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
  
-+static char dmi_empty_string[] = "        ";
-+
- static char * __init dmi_string(const struct dmi_header *dm, u8 s)
- {
- 	const u8 *bp = ((u8 *) dm) + dm->length;
-@@ -21,11 +23,16 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
- 		}
+ 	egrcnt = dd->ipath_rcvegrcnt;
+ 	/* TID number offset for this port */
+-	egroff = pd->port_port * egrcnt;
++	egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
+ 	egrsize = dd->ipath_rcvegrbufsize;
+ 	ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
+ 		   "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
+@@ -1049,11 +1050,6 @@ static int mmap_piobufs(struct vm_area_struct *vma,
  
- 		if (*bp != 0) {
--			str = dmi_alloc(strlen(bp) + 1);
-+			size_t len = strlen(bp)+1;
-+			size_t cmp_len = len > 8 ? 8 : len;
-+
-+			if (!memcmp(bp, dmi_empty_string, cmp_len))
-+				return dmi_empty_string;
-+			str = dmi_alloc(len);
- 			if (str != NULL)
- 				strcpy(str, bp);
- 			else
--				printk(KERN_ERR "dmi_string: out of memory.\n");
-+				printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
- 		}
- 	}
+ 	phys = dd->ipath_physaddr + piobufs;
  
-@@ -175,12 +182,23 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
- 	}
+-	/*
+-	 * Don't mark this as non-cached, or we don't get the
+-	 * write combining behavior we want on the PIO buffers!
+-	 */
+-
+ #if defined(__powerpc__)
+ 	/* There isn't a generic way to specify writethrough mappings */
+ 	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+@@ -1120,33 +1116,24 @@ bail:
  }
  
-+static struct dmi_device empty_oem_string_dev = {
-+	.name = dmi_empty_string,
-+};
-+
- static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
+ /*
+- * ipath_file_vma_nopage - handle a VMA page fault.
++ * ipath_file_vma_fault - handle a VMA page fault.
+  */
+-static struct page *ipath_file_vma_nopage(struct vm_area_struct *vma,
+-					  unsigned long address, int *type)
++static int ipath_file_vma_fault(struct vm_area_struct *vma,
++					struct vm_fault *vmf)
  {
- 	int i, count = *(u8 *)(dm + 1);
- 	struct dmi_device *dev;
+-	unsigned long offset = address - vma->vm_start;
+-	struct page *page = NOPAGE_SIGBUS;
+-	void *pageptr;
++	struct page *page;
  
- 	for (i = 1; i <= count; i++) {
-+		char *devname = dmi_string(dm, i);
-+
-+		if (!strcmp(devname, dmi_empty_string)) {
-+			list_add(&empty_oem_string_dev.list, &dmi_devices);
-+			continue;
-+		}
+-	/*
+-	 * Convert the vmalloc address into a struct page.
+-	 */
+-	pageptr = (void *)(offset + (vma->vm_pgoff << PAGE_SHIFT));
+-	page = vmalloc_to_page(pageptr);
++	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
+ 	if (!page)
+-		goto out;
+-
+-	/* Increment the reference count. */
++		return VM_FAULT_SIGBUS;
+ 	get_page(page);
+-	if (type)
+-		*type = VM_FAULT_MINOR;
+-out:
+-	return page;
++	vmf->page = page;
 +
- 		dev = dmi_alloc(sizeof(*dev));
- 		if (!dev) {
- 			printk(KERN_ERR
-@@ -189,7 +207,7 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
- 		}
++	return 0;
+ }
  
- 		dev->type = DMI_DEV_TYPE_OEM_STRING;
--		dev->name = dmi_string(dm, i);
-+		dev->name = devname;
- 		dev->device_data = NULL;
+ static struct vm_operations_struct ipath_file_vm_ops = {
+-	.nopage = ipath_file_vma_nopage,
++	.fault = ipath_file_vma_fault,
+ };
  
- 		list_add(&dev->list, &dmi_devices);
-@@ -331,9 +349,11 @@ void __init dmi_scan_machine(void)
- 			rc = dmi_present(q);
- 			if (!rc) {
- 				dmi_available = 1;
-+				dmi_iounmap(p, 0x10000);
- 				return;
- 			}
- 		}
-+		dmi_iounmap(p, 0x10000);
+ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
+@@ -1284,7 +1271,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
+ 		goto bail;
  	}
-  out:	printk(KERN_INFO "DMI not present or invalid.\n");
- }
-diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
-index 6942e06..d168223 100644
---- a/drivers/firmware/edd.c
-+++ b/drivers/firmware/edd.c
-@@ -631,7 +631,7 @@ static struct kobj_type edd_ktype = {
- 	.default_attrs	= def_attrs,
- };
  
--static decl_subsys(edd, &edd_ktype, NULL);
-+static struct kset *edd_kset;
+-	ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
++	ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
+ 	if (!pd->port_subport_cnt) {
+ 		/* port is not shared */
+ 		piocnt = dd->ipath_pbufsport;
+@@ -1400,7 +1387,10 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
+ 	pollflag = ipath_poll_hdrqfull(pd);
  
+ 	head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
+-	tail = *(volatile u64 *)pd->port_rcvhdrtail_kvaddr;
++	if (pd->port_rcvhdrtail_kvaddr)
++		tail = ipath_get_rcvhdrtail(pd);
++	else
++		tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
  
- /**
-@@ -693,7 +693,7 @@ edd_create_symlink_to_pcidev(struct edd_device *edev)
- static inline void
- edd_device_unregister(struct edd_device *edev)
- {
--	kobject_unregister(&edev->kobj);
-+	kobject_put(&edev->kobj);
- }
+ 	if (head != tail)
+ 		pollflag |= POLLIN | POLLRDNORM;
+@@ -1410,7 +1400,7 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
+ 		/* flush waiting flag so we don't miss an event */
+ 		wmb();
  
- static void edd_populate_dir(struct edd_device * edev)
-@@ -721,12 +721,13 @@ edd_device_register(struct edd_device *edev, int i)
- 	if (!edev)
- 		return 1;
- 	edd_dev_set_info(edev, i);
--	kobject_set_name(&edev->kobj, "int13_dev%02x",
--			 0x80 + i);
--	kobj_set_kset_s(edev,edd_subsys);
--	error = kobject_register(&edev->kobj);
--	if (!error)
-+	edev->kobj.kset = edd_kset;
-+	error = kobject_init_and_add(&edev->kobj, &edd_ktype, NULL,
-+				     "int13_dev%02x", 0x80 + i);
-+	if (!error) {
- 		edd_populate_dir(edev);
-+		kobject_uevent(&edev->kobj, KOBJ_ADD);
-+	}
- 	return error;
- }
+-		set_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT,
++		set_bit(pd->port_port + dd->ipath_r_intravail_shift,
+ 			&dd->ipath_rcvctrl);
  
-@@ -755,9 +756,9 @@ edd_init(void)
- 		return 1;
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+@@ -1790,6 +1780,7 @@ static int find_shared_port(struct file *fp,
+ 			}
+ 			port_fp(fp) = pd;
+ 			subport_fp(fp) = pd->port_cnt++;
++			pd->port_subpid[subport_fp(fp)] = current->pid;
+ 			tidcursor_fp(fp) = 0;
+ 			pd->active_slaves |= 1 << subport_fp(fp);
+ 			ipath_cdbg(PROC,
+@@ -1920,8 +1911,7 @@ static int ipath_do_user_init(struct file *fp,
+ 	 */
+ 	head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
+ 	ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
+-	dd->ipath_lastegrheads[pd->port_port] = -1;
+-	dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
++	pd->port_lastrcvhdrqtail = -1;
+ 	ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
+ 		pd->port_port, head32);
+ 	pd->port_tidcursor = 0;	/* start at beginning after open */
+@@ -1941,11 +1931,13 @@ static int ipath_do_user_init(struct file *fp,
+ 	 * We explictly set the in-memory copy to 0 beforehand, so we don't
+ 	 * have to wait to be sure the DMA update has happened.
+ 	 */
+-	*(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0ULL;
+-	set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
++	if (pd->port_rcvhdrtail_kvaddr)
++		ipath_clear_rcvhdrtail(pd);
++	set_bit(dd->ipath_r_portenable_shift + pd->port_port,
+ 		&dd->ipath_rcvctrl);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+-			 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
++			dd->ipath_rcvctrl &
++			~(1ULL << dd->ipath_r_tailupd_shift));
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ 			 dd->ipath_rcvctrl);
+ 	/* Notify any waiting slaves */
+@@ -2022,6 +2014,7 @@ static int ipath_close(struct inode *in, struct file *fp)
+ 		 * the slave(s) don't wait for receive data forever.
+ 		 */
+ 		pd->active_slaves &= ~(1 << fd->subport);
++		pd->port_subpid[fd->subport] = 0;
+ 		mutex_unlock(&ipath_mutex);
+ 		goto bail;
  	}
+@@ -2054,9 +2047,9 @@ static int ipath_close(struct inode *in, struct file *fp)
+ 	if (dd->ipath_kregbase) {
+ 		int i;
+ 		/* atomically clear receive enable port and intr avail. */
+-		clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
++		clear_bit(dd->ipath_r_portenable_shift + port,
+ 			  &dd->ipath_rcvctrl);
+-		clear_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT,
++		clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
+ 			  &dd->ipath_rcvctrl);
+ 		ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
+ 			dd->ipath_rcvctrl);
+@@ -2149,11 +2142,15 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
  
--	rc = firmware_register(&edd_subsys);
--	if (rc)
--		return rc;
-+	edd_kset = kset_create_and_add("edd", NULL, firmware_kobj);
-+	if (!edd_kset)
-+		return -ENOMEM;
+ static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
+ {
+-	u64 reg = dd->ipath_sendctrl;
++	unsigned long flags;
  
- 	for (i = 0; i < edd_num_devices() && !rc; i++) {
- 		edev = kzalloc(sizeof (*edev), GFP_KERNEL);
-@@ -773,7 +774,7 @@ edd_init(void)
- 	}
+-	clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg);
+-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
++	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
++		dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
++	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
++	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
  
- 	if (rc)
--		firmware_unregister(&edd_subsys);
-+		kset_unregister(edd_kset);
- 	return rc;
+ 	return 0;
  }
- 
-@@ -787,7 +788,7 @@ edd_exit(void)
- 		if ((edev = edd_devices[i]))
- 			edd_device_unregister(edev);
+@@ -2227,6 +2224,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
+ 		dest = &cmd.cmd.poll_type;
+ 		src = &ucmd->cmd.poll_type;
+ 		break;
++	case IPATH_CMD_ARMLAUNCH_CTRL:
++		copy = sizeof(cmd.cmd.armlaunch_ctrl);
++		dest = &cmd.cmd.armlaunch_ctrl;
++		src = &ucmd->cmd.armlaunch_ctrl;
++		break;
+ 	default:
+ 		ret = -EINVAL;
+ 		goto bail;
+@@ -2302,6 +2304,12 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
+ 	case IPATH_CMD_POLL_TYPE:
+ 		pd->poll_type = cmd.cmd.poll_type;
+ 		break;
++	case IPATH_CMD_ARMLAUNCH_CTRL:
++		if (cmd.cmd.armlaunch_ctrl)
++			ipath_enable_armlaunch(pd->port_dd);
++		else
++			ipath_disable_armlaunch(pd->port_dd);
++		break;
  	}
--	firmware_unregister(&edd_subsys);
-+	kset_unregister(edd_kset);
- }
- 
- late_initcall(edd_init);
-diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
-index 858a7b9..f4f709d 100644
---- a/drivers/firmware/efivars.c
-+++ b/drivers/firmware/efivars.c
-@@ -129,13 +129,6 @@ struct efivar_attribute {
- };
- 
  
--#define EFI_ATTR(_name, _mode, _show, _store) \
--struct subsys_attribute efi_attr_##_name = { \
--	.attr = {.name = __stringify(_name), .mode = _mode}, \
--	.show = _show, \
--	.store = _store, \
--};
--
- #define EFIVAR_ATTR(_name, _mode, _show, _store) \
- struct efivar_attribute efivar_attr_##_name = { \
- 	.attr = {.name = __stringify(_name), .mode = _mode}, \
-@@ -143,13 +136,6 @@ struct efivar_attribute efivar_attr_##_name = { \
- 	.store = _store, \
+ 	if (ret >= 0)
+diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
+index 262c25d..23faba9 100644
+--- a/drivers/infiniband/hw/ipath/ipath_fs.c
++++ b/drivers/infiniband/hw/ipath/ipath_fs.c
+@@ -108,21 +108,16 @@ static const struct file_operations atomic_stats_ops = {
+ 	.read = atomic_stats_read,
  };
  
--#define VAR_SUBSYS_ATTR(_name, _mode, _show, _store) \
--struct subsys_attribute var_subsys_attr_##_name = { \
--	.attr = {.name = __stringify(_name), .mode = _mode}, \
--	.show = _show, \
--	.store = _store, \
--};
+-#define NUM_COUNTERS sizeof(struct infinipath_counters) / sizeof(u64)
 -
- #define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
- #define to_efivar_entry(obj)  container_of(obj, struct efivar_entry, kobj)
+ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
+ 				    size_t count, loff_t *ppos)
+ {
+-	u64 counters[NUM_COUNTERS];
+-	u16 i;
++	struct infinipath_counters counters;
+ 	struct ipath_devdata *dd;
  
-@@ -408,21 +394,16 @@ static struct kobj_type efivar_ktype = {
- 	.default_attrs = def_attrs,
- };
+ 	dd = file->f_path.dentry->d_inode->i_private;
++	dd->ipath_f_read_counters(dd, &counters);
  
--static ssize_t
--dummy(struct kset *kset, char *buf)
--{
--	return -ENODEV;
--}
+-	for (i = 0; i < NUM_COUNTERS; i++)
+-		counters[i] = ipath_snap_cntr(dd, i);
 -
- static inline void
- efivar_unregister(struct efivar_entry *var)
- {
--	kobject_unregister(&var->kobj);
-+	kobject_put(&var->kobj);
+-	return simple_read_from_buffer(buf, count, ppos, counters,
++	return simple_read_from_buffer(buf, count, ppos, &counters,
+ 				       sizeof counters);
  }
  
+@@ -243,8 +238,7 @@ static int create_device_files(struct super_block *sb,
  
--static ssize_t
--efivar_create(struct kset *kset, const char *buf, size_t count)
-+static ssize_t efivar_create(struct kobject *kobj,
-+			     struct bin_attribute *bin_attr,
-+			     char *buf, loff_t pos, size_t count)
- {
- 	struct efi_variable *new_var = (struct efi_variable *)buf;
- 	struct efivar_entry *search_efivar, *n;
-@@ -479,8 +460,9 @@ efivar_create(struct kset *kset, const char *buf, size_t count)
- 	return count;
- }
- 
--static ssize_t
--efivar_delete(struct kset *kset, const char *buf, size_t count)
-+static ssize_t efivar_delete(struct kobject *kobj,
-+			     struct bin_attribute *bin_attr,
-+			     char *buf, loff_t pos, size_t count)
- {
- 	struct efi_variable *del_var = (struct efi_variable *)buf;
- 	struct efivar_entry *search_efivar, *n;
-@@ -537,25 +519,26 @@ efivar_delete(struct kset *kset, const char *buf, size_t count)
- 	return count;
- }
+ 	snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
+ 	ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
+-			  (struct file_operations *) &simple_dir_operations,
+-			  dd);
++			  &simple_dir_operations, dd);
+ 	if (ret) {
+ 		printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
+ 		goto bail;
+diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
+index ddbebe4..9e2ced3 100644
+--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
++++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
+@@ -148,10 +148,57 @@ struct _infinipath_do_not_use_kernel_regs {
+ 	unsigned long long ReservedSW2[4];
+ };
  
--static VAR_SUBSYS_ATTR(new_var, 0200, dummy, efivar_create);
--static VAR_SUBSYS_ATTR(del_var, 0200, dummy, efivar_delete);
-+static struct bin_attribute var_subsys_attr_new_var = {
-+	.attr = {.name = "new_var", .mode = 0200},
-+	.write = efivar_create,
+-#define IPATH_KREG_OFFSET(field) (offsetof(struct \
+-    _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
++struct _infinipath_do_not_use_counters {
++	__u64 LBIntCnt;
++	__u64 LBFlowStallCnt;
++	__u64 Reserved1;
++	__u64 TxUnsupVLErrCnt;
++	__u64 TxDataPktCnt;
++	__u64 TxFlowPktCnt;
++	__u64 TxDwordCnt;
++	__u64 TxLenErrCnt;
++	__u64 TxMaxMinLenErrCnt;
++	__u64 TxUnderrunCnt;
++	__u64 TxFlowStallCnt;
++	__u64 TxDroppedPktCnt;
++	__u64 RxDroppedPktCnt;
++	__u64 RxDataPktCnt;
++	__u64 RxFlowPktCnt;
++	__u64 RxDwordCnt;
++	__u64 RxLenErrCnt;
++	__u64 RxMaxMinLenErrCnt;
++	__u64 RxICRCErrCnt;
++	__u64 RxVCRCErrCnt;
++	__u64 RxFlowCtrlErrCnt;
++	__u64 RxBadFormatCnt;
++	__u64 RxLinkProblemCnt;
++	__u64 RxEBPCnt;
++	__u64 RxLPCRCErrCnt;
++	__u64 RxBufOvflCnt;
++	__u64 RxTIDFullErrCnt;
++	__u64 RxTIDValidErrCnt;
++	__u64 RxPKeyMismatchCnt;
++	__u64 RxP0HdrEgrOvflCnt;
++	__u64 RxP1HdrEgrOvflCnt;
++	__u64 RxP2HdrEgrOvflCnt;
++	__u64 RxP3HdrEgrOvflCnt;
++	__u64 RxP4HdrEgrOvflCnt;
++	__u64 RxP5HdrEgrOvflCnt;
++	__u64 RxP6HdrEgrOvflCnt;
++	__u64 RxP7HdrEgrOvflCnt;
++	__u64 RxP8HdrEgrOvflCnt;
++	__u64 Reserved6;
++	__u64 Reserved7;
++	__u64 IBStatusChangeCnt;
++	__u64 IBLinkErrRecoveryCnt;
++	__u64 IBLinkDownedCnt;
++	__u64 IBSymbolErrCnt;
 +};
++
++#define IPATH_KREG_OFFSET(field) (offsetof( \
++	struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
+ #define IPATH_CREG_OFFSET(field) (offsetof( \
+-    struct infinipath_counters, field) / sizeof(u64))
++	struct _infinipath_do_not_use_counters, field) / sizeof(u64))
  
--static struct subsys_attribute *var_subsys_attrs[] = {
--	&var_subsys_attr_new_var,
--	&var_subsys_attr_del_var,
--	NULL,
-+static struct bin_attribute var_subsys_attr_del_var = {
-+	.attr = {.name = "del_var", .mode = 0200},
-+	.write = efivar_delete,
- };
+ static const struct ipath_kregs ipath_ht_kregs = {
+ 	.kr_control = IPATH_KREG_OFFSET(Control),
+@@ -282,6 +329,9 @@ static const struct ipath_cregs ipath_ht_cregs = {
+ #define INFINIPATH_HWE_HTAPLL_RFSLIP        0x1000000000000000ULL
+ #define INFINIPATH_HWE_SERDESPLLFAILED      0x2000000000000000ULL
  
- /*
-  * Let's not leave out systab information that snuck into
-  * the efivars driver
-  */
--static ssize_t
--systab_read(struct kset *kset, char *buf)
-+static ssize_t systab_show(struct kobject *kobj,
-+			   struct kobj_attribute *attr, char *buf)
- {
- 	char *str = buf;
++#define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf
++#define IBA6110_IBCS_LINKSTATE_SHIFT 4
++
+ /* kr_extstatus bits */
+ #define INFINIPATH_EXTS_FREQSEL 0x2
+ #define INFINIPATH_EXTS_SERDESSEL 0x4
+@@ -296,6 +346,12 @@ static const struct ipath_cregs ipath_ht_cregs = {
+ #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
+ #define INFINIPATH_RT_BUFSIZE_SHIFT 48
  
--	if (!kset || !buf)
-+	if (!kobj || !buf)
- 		return -EINVAL;
++#define INFINIPATH_R_INTRAVAIL_SHIFT 16
++#define INFINIPATH_R_TAILUPD_SHIFT 31
++
++/* kr_xgxsconfig bits */
++#define INFINIPATH_XGXS_RESET          0x7ULL
++
+ /*
+  * masks and bits that are different in different chips, or present only
+  * in one
+@@ -652,7 +708,6 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
+ 			      "with ID %u\n", boardrev);
+ 		snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
+ 			 boardrev);
+-		ret = 1;
+ 		break;
+ 	}
+ 	if (n)
+@@ -686,6 +741,13 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
+ 			      dd->ipath_htspeed);
+ 	ret = 0;
  
- 	if (efi.mps != EFI_INVALID_TABLE_ADDR)
-@@ -576,15 +559,21 @@ systab_read(struct kset *kset, char *buf)
- 	return str - buf;
++	/*
++	 * set here, not in ipath_init_*_funcs because we have to do
++	 * it after we can read chip registers.
++	 */
++	dd->ipath_ureg_align =
++		ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
++
+ bail:
+ 	return ret;
  }
+@@ -969,7 +1031,8 @@ static int ipath_setup_ht_config(struct ipath_devdata *dd,
+ 	do {
+ 		u8 cap_type;
  
--static EFI_ATTR(systab, 0400, systab_read, NULL);
-+static struct kobj_attribute efi_attr_systab =
-+			__ATTR(systab, 0400, systab_show, NULL);
+-		/* the HT capability type byte is 3 bytes after the
++		/*
++		 * The HT capability type byte is 3 bytes after the
+ 		 * capability byte.
+ 		 */
+ 		if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
+@@ -982,6 +1045,8 @@ static int ipath_setup_ht_config(struct ipath_devdata *dd,
+ 	} while ((pos = pci_find_next_capability(pdev, pos,
+ 						 PCI_CAP_ID_HT)));
  
--static struct subsys_attribute *efi_subsys_attrs[] = {
--	&efi_attr_systab,
-+static struct attribute *efi_subsys_attrs[] = {
-+	&efi_attr_systab.attr,
- 	NULL,	/* maybe more in the future? */
- };
++	dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
++
+ bail:
+ 	return ret;
+ }
+@@ -1074,11 +1139,55 @@ static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
+ 
+ static void ipath_init_ht_variables(struct ipath_devdata *dd)
+ {
++	/*
++	 * setup the register offsets, since they are different for each
++	 * chip
++	 */
++	dd->ipath_kregs = &ipath_ht_kregs;
++	dd->ipath_cregs = &ipath_ht_cregs;
++
+ 	dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
+ 	dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
+ 	dd->ipath_gpio_sda = IPATH_GPIO_SDA;
+ 	dd->ipath_gpio_scl = IPATH_GPIO_SCL;
  
--static decl_subsys(vars, &efivar_ktype, NULL);
--static decl_subsys(efi, NULL, NULL);
-+static struct attribute_group efi_subsys_attr_group = {
-+	.attrs = efi_subsys_attrs,
-+};
++	/*
++	 * Fill in data for field-values that change in newer chips.
++	 * We dynamically specify only the mask for LINKTRAININGSTATE
++	 * and only the shift for LINKSTATE, as they are the only ones
++	 * that change.  Also precalculate the 3 link states of interest
++	 * and the combined mask.
++	 */
++	dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT;
++	dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK;
++	dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
++		dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
++	dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
++		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
++		(INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
++	dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
++		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
++		(INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
++	dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
++		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
++		(INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
++
++	/*
++	 * Fill in data for ibcc field-values that change in newer chips.
++	 * We dynamically specify only the mask for LINKINITCMD
++	 * and only the shift for LINKCMD and MAXPKTLEN, as they are
++	 * the only ones that change.
++	 */
++	dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
++	dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
++	dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
 +
++	/* Fill in shifts for RcvCtrl. */
++	dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
++	dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
++	dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
++	dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */
 +
-+static struct kset *vars_kset;
-+static struct kobject *efi_kobj;
+ 	dd->ipath_i_bitsextant =
+ 		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
+ 		(INFINIPATH_I_RCVAVAIL_MASK <<
+@@ -1135,6 +1244,8 @@ static void ipath_init_ht_variables(struct ipath_devdata *dd)
  
- /*
-  * efivar_create_sysfs_entry()
-@@ -628,15 +617,16 @@ efivar_create_sysfs_entry(unsigned long variable_name_size,
- 	*(short_name + strlen(short_name)) = '-';
- 	efi_guid_unparse(vendor_guid, short_name + strlen(short_name));
+ 	dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
+ 	dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
++	dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
++	dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
  
--	kobject_set_name(&new_efivar->kobj, "%s", short_name);
--	kobj_set_kset_s(new_efivar, vars_subsys);
--	i = kobject_register(&new_efivar->kobj);
-+	new_efivar->kobj.kset = vars_kset;
-+	i = kobject_init_and_add(&new_efivar->kobj, &efivar_ktype, NULL,
-+				 "%s", short_name);
- 	if (i) {
- 		kfree(short_name);
- 		kfree(new_efivar);
- 		return 1;
- 	}
+ 	/*
+ 	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+@@ -1148,9 +1259,17 @@ static void ipath_init_ht_variables(struct ipath_devdata *dd)
+ 		INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+ 		INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
  
-+	kobject_uevent(&new_efivar->kobj, KOBJ_ADD);
- 	kfree(short_name);
- 	short_name = NULL;
+-	dd->ipath_eep_st_masks[2].errs_to_log =
+-		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
++	dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
  
-@@ -660,9 +650,8 @@ efivars_init(void)
- 	efi_status_t status = EFI_NOT_FOUND;
- 	efi_guid_t vendor_guid;
- 	efi_char16_t *variable_name;
--	struct subsys_attribute *attr;
- 	unsigned long variable_name_size = 1024;
--	int i, error = 0;
-+	int error = 0;
++	dd->delay_mult = 2; /* SDR, 4X, can't change */
++
++	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
++	dd->ipath_link_speed_supported = IPATH_IB_SDR;
++	dd->ipath_link_width_enabled = IB_WIDTH_4X;
++	dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
++	/* these can't change for this chip, so set once */
++	dd->ipath_link_width_active = dd->ipath_link_width_enabled;
++	dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
+ }
  
- 	if (!efi_enabled)
- 		return -ENODEV;
-@@ -676,23 +665,18 @@ efivars_init(void)
- 	printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
- 	       EFIVARS_DATE);
+ /**
+@@ -1205,14 +1324,16 @@ static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
+ 	val &= ~INFINIPATH_HWE_HTCMISCERR4;
  
--	/*
--	 * For now we'll register the efi subsys within this driver
--	 */
--
--	error = firmware_register(&efi_subsys);
--
--	if (error) {
--		printk(KERN_ERR "efivars: Firmware registration failed with error %d.\n", error);
-+	/* For now we'll register the efi directory at /sys/firmware/efi */
-+	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
-+	if (!efi_kobj) {
-+		printk(KERN_ERR "efivars: Firmware registration failed.\n");
-+		error = -ENOMEM;
- 		goto out_free;
- 	}
+ 	/*
+-	 * PLL ignored because MDIO interface has a logic problem
+-	 * for reads, on Comstock and Ponderosa.  BRINGUP
++	 * PLL ignored because unused MDIO interface has a logic problem
+ 	 */
+ 	if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
+ 		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+ 	dd->ipath_hwerrmask = val;
+ }
  
--	kobj_set_kset_s(&vars_subsys, efi_subsys);
--
--	error = subsystem_register(&vars_subsys);
--
--	if (error) {
--		printk(KERN_ERR "efivars: Subsystem registration failed with error %d.\n", error);
-+	vars_kset = kset_create_and_add("vars", NULL, efi_kobj);
-+	if (!vars_kset) {
-+		printk(KERN_ERR "efivars: Subsystem registration failed.\n");
-+		error = -ENOMEM;
- 		goto out_firmware_unregister;
++
++
++
+ /**
+  * ipath_ht_bringup_serdes - bring up the serdes
+  * @dd: the infinipath device
+@@ -1284,16 +1405,6 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
  	}
  
-@@ -727,28 +711,28 @@ efivars_init(void)
- 	 * Now add attributes to allow creation of new vars
- 	 * and deletion of existing ones...
- 	 */
--
--	for (i = 0; (attr = var_subsys_attrs[i]) && !error; i++) {
--		if (attr->show && attr->store)
--			error = subsys_create_file(&vars_subsys, attr);
+ 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+-	if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
+-	     INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
+-		val &= ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
+-			 INFINIPATH_XGXS_MDIOADDR_SHIFT);
+-		/*
+-		 * we use address 3
+-		 */
+-		val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
+-		change = 1;
 -	}
-+	error = sysfs_create_bin_file(&vars_kset->kobj,
-+				      &var_subsys_attr_new_var);
-+	if (error)
-+		printk(KERN_ERR "efivars: unable to create new_var sysfs file"
-+			" due to error %d\n", error);
-+	error = sysfs_create_bin_file(&vars_kset->kobj,
-+				      &var_subsys_attr_del_var);
-+	if (error)
-+		printk(KERN_ERR "efivars: unable to create del_var sysfs file"
-+			" due to error %d\n", error);
+ 	if (val & INFINIPATH_XGXS_RESET) {
+ 		/* normally true after boot */
+ 		val &= ~INFINIPATH_XGXS_RESET;
+@@ -1329,21 +1440,6 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
+ 		   (unsigned long long)
+ 		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
  
- 	/* Don't forget the systab entry */
--
--	for (i = 0; (attr = efi_subsys_attrs[i]) && !error; i++) {
--		if (attr->show)
--			error = subsys_create_file(&efi_subsys, attr);
--	}
+-	if (!ipath_waitfor_mdio_cmdready(dd)) {
+-		ipath_write_kreg(dd, dd->ipath_kregs->kr_mdio,
+-				 ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
+-						IPATH_MDIO_CTRL_XGXS_REG_8,
+-						0));
+-		if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
+-					   IPATH_MDIO_DATAVALID, &val))
+-			ipath_dbg("Never got MDIO data for XGXS status "
+-				  "read\n");
+-		else
+-			ipath_cdbg(VERBOSE, "MDIO Read reg8, "
+-				   "'bank' 31 %x\n", (u32) val);
+-	} else
+-		ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
 -
-+	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
- 	if (error)
- 		printk(KERN_ERR "efivars: Sysfs attribute export failed with error %d.\n", error);
- 	else
- 		goto out_free;
- 
--	subsystem_unregister(&vars_subsys);
-+	kset_unregister(vars_kset);
- 
- out_firmware_unregister:
--	firmware_unregister(&efi_subsys);
-+	kobject_put(efi_kobj);
+ 	return ret;		/* for now, say we always succeeded */
+ }
  
- out_free:
- 	kfree(variable_name);
-@@ -768,8 +752,8 @@ efivars_exit(void)
- 		efivar_unregister(entry);
+@@ -1396,6 +1492,7 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
+ 			pa |= lenvalid | INFINIPATH_RT_VALID;
+ 		}
  	}
- 
--	subsystem_unregister(&vars_subsys);
--	firmware_unregister(&efi_subsys);
-+	kset_unregister(vars_kset);
-+	kobject_put(efi_kobj);
++
+ 	writeq(pa, tidptr);
  }
  
- module_init(efivars_init);
-diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
-index 1ac5103..275dc52 100644
---- a/drivers/hid/Makefile
-+++ b/drivers/hid/Makefile
-@@ -1,7 +1,7 @@
- #
- # Makefile for the HID driver
- #
--hid-objs			:= hid-core.o hid-input.o
-+hid-objs			:= hid-core.o hid-input.o hid-input-quirks.o
- 
- obj-$(CONFIG_HID)		+= hid.o
- 
-diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index 2884b03..d73a768 100644
---- a/drivers/hid/hid-core.c
-+++ b/drivers/hid/hid-core.c
-@@ -26,6 +26,7 @@
- #include <linux/input.h>
- #include <linux/wait.h>
- #include <linux/vmalloc.h>
-+#include <linux/sched.h>
- 
- #include <linux/hid.h>
- #include <linux/hiddev.h>
-@@ -758,7 +759,9 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
- {
- 	u64 x;
- 
--	WARN_ON(n > 32);
-+	if (n > 32)
-+		printk(KERN_WARNING "HID: extract() called with n (%d) > 32! (%s)\n",
-+				n, current->comm);
- 
- 	report += offset >> 3;  /* adjust byte index */
- 	offset &= 7;            /* now only need bit offset into one byte */
-@@ -780,8 +783,13 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3
- 	__le64 x;
- 	u64 m = (1ULL << n) - 1;
- 
--	WARN_ON(n > 32);
-+	if (n > 32)
-+		printk(KERN_WARNING "HID: implement() called with n (%d) > 32! (%s)\n",
-+				n, current->comm);
+@@ -1526,8 +1623,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
+ 	}
  
-+	if (value > m)
-+		printk(KERN_WARNING "HID: implement() called with too large value %d! (%s)\n",
-+				value, current->comm);
- 	WARN_ON(value > m);
- 	value &= m;
+ 	ipath_get_eeprom_info(dd);
+-	if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
+-		dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
++	if (dd->ipath_boardrev == 5) {
+ 		/*
+ 		 * Later production QHT7040 has same changes as QHT7140, so
+ 		 * can use GPIO interrupts.  They have serial #'s starting
+@@ -1602,6 +1698,210 @@ static void ipath_ht_free_irq(struct ipath_devdata *dd)
+ 	dd->ipath_intconfig = 0;
+ }
  
-diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
-new file mode 100644
-index 0000000..a870ba5
---- /dev/null
-+++ b/drivers/hid/hid-input-quirks.c
-@@ -0,0 +1,423 @@
-+/*
-+ *  HID-input usage mapping quirks
-+ *
-+ *  This is used to handle HID-input mappings for devices violating
-+ *  HUT 1.12 specification.
-+ *
-+ * Copyright (c) 2007-2008 Jiri Kosina
-+ */
-+
-+/*
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License
-+ */
-+
-+#include <linux/input.h>
-+#include <linux/hid.h>
-+
-+#define map_abs(c)      do { usage->code = c; usage->type = EV_ABS; *bit = input->absbit; *max = ABS_MAX; } while (0)
-+#define map_rel(c)      do { usage->code = c; usage->type = EV_REL; *bit = input->relbit; *max = REL_MAX; } while (0)
-+#define map_key(c)      do { usage->code = c; usage->type = EV_KEY; *bit = input->keybit; *max = KEY_MAX; } while (0)
-+#define map_led(c)      do { usage->code = c; usage->type = EV_LED; *bit = input->ledbit; *max = LED_MAX; } while (0)
-+
-+#define map_abs_clear(c)        do { map_abs(c); clear_bit(c, *bit); } while (0)
-+#define map_key_clear(c)        do { map_key(c); clear_bit(c, *bit); } while (0)
-+
-+static int quirk_belkin_wkbd(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
++static struct ipath_message_header *
++ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
 +{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
-+		return 0;
-+
-+	switch (usage->hid & HID_USAGE) {
-+		case 0x03a: map_key_clear(KEY_SOUND);		break;
-+		case 0x03b: map_key_clear(KEY_CAMERA);		break;
-+		case 0x03c: map_key_clear(KEY_DOCUMENTS);	break;
-+		default:
-+			return 0;
-+	}
-+	return 1;
++	return (struct ipath_message_header *)
++		&rhf_addr[sizeof(u64) / sizeof(u32)];
 +}
 +
-+static int quirk_cherry_cymotion(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
++static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports)
 +{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
-+		return 0;
-+
-+	switch (usage->hid & HID_USAGE) {
-+		case 0x301: map_key_clear(KEY_PROG1);		break;
-+		case 0x302: map_key_clear(KEY_PROG2);		break;
-+		case 0x303: map_key_clear(KEY_PROG3);		break;
-+		default:
-+			return 0;
-+	}
-+	return 1;
++	dd->ipath_portcnt =
++		ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
++	dd->ipath_p0_rcvegrcnt =
++		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
 +}
 +
-+static int quirk_logitech_ultrax_remote(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
++static void ipath_ht_read_counters(struct ipath_devdata *dd,
++				   struct infinipath_counters *cntrs)
 +{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
-+		return 0;
-+
-+	set_bit(EV_REP, input->evbit);
-+	switch(usage->hid & HID_USAGE) {
-+		/* Reported on Logitech Ultra X Media Remote */
-+		case 0x004: map_key_clear(KEY_AGAIN);		break;
-+		case 0x00d: map_key_clear(KEY_HOME);		break;
-+		case 0x024: map_key_clear(KEY_SHUFFLE);		break;
-+		case 0x025: map_key_clear(KEY_TV);		break;
-+		case 0x026: map_key_clear(KEY_MENU);		break;
-+		case 0x031: map_key_clear(KEY_AUDIO);		break;
-+		case 0x032: map_key_clear(KEY_TEXT);		break;
-+		case 0x033: map_key_clear(KEY_LAST);		break;
-+		case 0x047: map_key_clear(KEY_MP3);		break;
-+		case 0x048: map_key_clear(KEY_DVD);		break;
-+		case 0x049: map_key_clear(KEY_MEDIA);		break;
-+		case 0x04a: map_key_clear(KEY_VIDEO);		break;
-+		case 0x04b: map_key_clear(KEY_ANGLE);		break;
-+		case 0x04c: map_key_clear(KEY_LANGUAGE);	break;
-+		case 0x04d: map_key_clear(KEY_SUBTITLE);	break;
-+		case 0x051: map_key_clear(KEY_RED);		break;
-+		case 0x052: map_key_clear(KEY_CLOSE);		break;
-+
-+		default:
-+			return 0;
-+	}
-+	return 1;
++	cntrs->LBIntCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
++	cntrs->LBFlowStallCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
++	cntrs->TxSDmaDescCnt = 0;
++	cntrs->TxUnsupVLErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
++	cntrs->TxDataPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
++	cntrs->TxFlowPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
++	cntrs->TxDwordCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
++	cntrs->TxLenErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
++	cntrs->TxMaxMinLenErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
++	cntrs->TxUnderrunCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
++	cntrs->TxFlowStallCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
++	cntrs->TxDroppedPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
++	cntrs->RxDroppedPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
++	cntrs->RxDataPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
++	cntrs->RxFlowPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
++	cntrs->RxDwordCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
++	cntrs->RxLenErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
++	cntrs->RxMaxMinLenErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
++	cntrs->RxICRCErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
++	cntrs->RxVCRCErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
++	cntrs->RxFlowCtrlErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
++	cntrs->RxBadFormatCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
++	cntrs->RxLinkProblemCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
++	cntrs->RxEBPCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
++	cntrs->RxLPCRCErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
++	cntrs->RxBufOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
++	cntrs->RxTIDFullErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
++	cntrs->RxTIDValidErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
++	cntrs->RxPKeyMismatchCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
++	cntrs->RxP0HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
++	cntrs->RxP1HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
++	cntrs->RxP2HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
++	cntrs->RxP3HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
++	cntrs->RxP4HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
++	cntrs->RxP5HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt));
++	cntrs->RxP6HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt));
++	cntrs->RxP7HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt));
++	cntrs->RxP8HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt));
++	cntrs->RxP9HdrEgrOvflCnt = 0;
++	cntrs->RxP10HdrEgrOvflCnt = 0;
++	cntrs->RxP11HdrEgrOvflCnt = 0;
++	cntrs->RxP12HdrEgrOvflCnt = 0;
++	cntrs->RxP13HdrEgrOvflCnt = 0;
++	cntrs->RxP14HdrEgrOvflCnt = 0;
++	cntrs->RxP15HdrEgrOvflCnt = 0;
++	cntrs->RxP16HdrEgrOvflCnt = 0;
++	cntrs->IBStatusChangeCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
++	cntrs->IBLinkErrRecoveryCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
++	cntrs->IBLinkDownedCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
++	cntrs->IBSymbolErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
++	cntrs->RxVL15DroppedPktCnt = 0;
++	cntrs->RxOtherLocalPhyErrCnt = 0;
++	cntrs->PcieRetryBufDiagQwordCnt = 0;
++	cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
++	cntrs->LocalLinkIntegrityErrCnt =
++		(dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
++		dd->ipath_lli_errs : dd->ipath_lli_errors;
++	cntrs->RxVlErrCnt = 0;
++	cntrs->RxDlidFltrCnt = 0;
 +}
 +
-+static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
-+{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
-+		return 0;
 +
-+	set_bit(EV_REP, input->evbit);
-+	switch (usage->hid & HID_USAGE) {
-+		case 0xff01: map_key_clear(BTN_1);		break;
-+		case 0xff02: map_key_clear(BTN_2);		break;
-+		case 0xff03: map_key_clear(BTN_3);		break;
-+		case 0xff04: map_key_clear(BTN_4);		break;
-+		case 0xff05: map_key_clear(BTN_5);		break;
-+		case 0xff06: map_key_clear(BTN_6);		break;
-+		case 0xff07: map_key_clear(BTN_7);		break;
-+		case 0xff08: map_key_clear(BTN_8);		break;
-+		case 0xff09: map_key_clear(BTN_9);		break;
-+		case 0xff0a: map_key_clear(BTN_A);		break;
-+		case 0xff0b: map_key_clear(BTN_B);		break;
-+		default:
-+			return 0;
-+	}
-+	return 1;
++/* no interrupt fallback for these chips */
++static int ipath_ht_nointr_fallback(struct ipath_devdata *dd)
++{
++	return 0;
 +}
 +
-+static int quirk_microsoft_ergonomy_kb(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
++
++/*
++ * reset the XGXS (between serdes and IBC).  Slightly less intrusive
++ * than resetting the IBC or external link state, and useful in some
++ * cases to cause some retraining.  To do this right, we reset IBC
++ * as well.
++ */
++static void ipath_ht_xgxs_reset(struct ipath_devdata *dd)
 +{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
-+		return 0;
++	u64 val, prev_val;
 +
-+	switch(usage->hid & HID_USAGE) {
-+		case 0xfd06: map_key_clear(KEY_CHAT);		break;
-+		case 0xfd07: map_key_clear(KEY_PHONE);		break;
-+		case 0xff05:
-+			set_bit(EV_REP, input->evbit);
-+			map_key_clear(KEY_F13);
-+			set_bit(KEY_F14, input->keybit);
-+			set_bit(KEY_F15, input->keybit);
-+			set_bit(KEY_F16, input->keybit);
-+			set_bit(KEY_F17, input->keybit);
-+			set_bit(KEY_F18, input->keybit);
-+		default:
-+			return 0;
-+	}
-+	return 1;
++	prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
++	val = prev_val | INFINIPATH_XGXS_RESET;
++	prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
++			 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
++	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
++			 dd->ipath_control);
 +}
 +
-+static int quirk_microsoft_presenter_8k(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
++
++static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which)
 +{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR)
-+		return 0;
++	int ret;
 +
-+	set_bit(EV_REP, input->evbit);
-+	switch(usage->hid & HID_USAGE) {
-+		case 0xfd08: map_key_clear(KEY_FORWARD);	break;
-+		case 0xfd09: map_key_clear(KEY_BACK);		break;
-+		case 0xfd0b: map_key_clear(KEY_PLAYPAUSE);	break;
-+		case 0xfd0e: map_key_clear(KEY_CLOSE);		break;
-+		case 0xfd0f: map_key_clear(KEY_PLAY);		break;
-+		default:
-+			return 0;
++	switch (which) {
++	case IPATH_IB_CFG_LWID:
++		ret = dd->ipath_link_width_active;
++		break;
++	case IPATH_IB_CFG_SPD:
++		ret = dd->ipath_link_speed_active;
++		break;
++	case IPATH_IB_CFG_LWID_ENB:
++		ret = dd->ipath_link_width_enabled;
++		break;
++	case IPATH_IB_CFG_SPD_ENB:
++		ret = dd->ipath_link_speed_enabled;
++		break;
++	default:
++		ret =  -ENOTSUPP;
++		break;
 +	}
-+	return 1;
++	return ret;
 +}
 +
-+static int quirk_petalynx_remote(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
-+{
-+	if (((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) &&
-+			((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER))
-+		return 0;
 +
-+	if ((usage->hid & HID_USAGE_PAGE) == HID_UP_LOGIVENDOR)
-+		switch(usage->hid & HID_USAGE) {
-+			case 0x05a: map_key_clear(KEY_TEXT);		break;
-+			case 0x05b: map_key_clear(KEY_RED);		break;
-+			case 0x05c: map_key_clear(KEY_GREEN);		break;
-+			case 0x05d: map_key_clear(KEY_YELLOW);		break;
-+			case 0x05e: map_key_clear(KEY_BLUE);		break;
-+			default:
-+				return 0;
-+		}
++/* we assume range checking is already done, if needed */
++static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
++{
++	int ret = 0;
 +
-+	if ((usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
-+		switch(usage->hid & HID_USAGE) {
-+			case 0x0f6: map_key_clear(KEY_NEXT);            break;
-+			case 0x0fa: map_key_clear(KEY_BACK);            break;
-+			default:
-+				return 0;
-+		}
-+	return 1;
++	if (which == IPATH_IB_CFG_LWID_ENB)
++		dd->ipath_link_width_enabled = val;
++	else if (which == IPATH_IB_CFG_SPD_ENB)
++		dd->ipath_link_speed_enabled = val;
++	else
++		ret = -ENOTSUPP;
++	return ret;
 +}
 +
-+static int quirk_logitech_wireless(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
-+{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
-+		return 0;
-+
-+	switch (usage->hid & HID_USAGE) {
-+		case 0x1001: map_key_clear(KEY_MESSENGER);	break;
-+		case 0x1003: map_key_clear(KEY_SOUND);		break;
-+		case 0x1004: map_key_clear(KEY_VIDEO);		break;
-+		case 0x1005: map_key_clear(KEY_AUDIO);		break;
-+		case 0x100a: map_key_clear(KEY_DOCUMENTS);	break;
-+		case 0x1011: map_key_clear(KEY_PREVIOUSSONG);	break;
-+		case 0x1012: map_key_clear(KEY_NEXTSONG);	break;
-+		case 0x1013: map_key_clear(KEY_CAMERA);		break;
-+		case 0x1014: map_key_clear(KEY_MESSENGER);	break;
-+		case 0x1015: map_key_clear(KEY_RECORD);		break;
-+		case 0x1016: map_key_clear(KEY_PLAYER);		break;
-+		case 0x1017: map_key_clear(KEY_EJECTCD);	break;
-+		case 0x1018: map_key_clear(KEY_MEDIA);		break;
-+		case 0x1019: map_key_clear(KEY_PROG1);		break;
-+		case 0x101a: map_key_clear(KEY_PROG2);		break;
-+		case 0x101b: map_key_clear(KEY_PROG3);		break;
-+		case 0x101f: map_key_clear(KEY_ZOOMIN);		break;
-+		case 0x1020: map_key_clear(KEY_ZOOMOUT);	break;
-+		case 0x1021: map_key_clear(KEY_ZOOMRESET);	break;
-+		case 0x1023: map_key_clear(KEY_CLOSE);		break;
-+		case 0x1027: map_key_clear(KEY_MENU);		break;
-+		/* this one is marked as 'Rotate' */
-+		case 0x1028: map_key_clear(KEY_ANGLE);		break;
-+		case 0x1029: map_key_clear(KEY_SHUFFLE);	break;
-+		case 0x102a: map_key_clear(KEY_BACK);		break;
-+		case 0x102b: map_key_clear(KEY_CYCLEWINDOWS);	break;
-+		case 0x1041: map_key_clear(KEY_BATTERY);	break;
-+		case 0x1042: map_key_clear(KEY_WORDPROCESSOR);	break;
-+		case 0x1043: map_key_clear(KEY_SPREADSHEET);	break;
-+		case 0x1044: map_key_clear(KEY_PRESENTATION);	break;
-+		case 0x1045: map_key_clear(KEY_UNDO);		break;
-+		case 0x1046: map_key_clear(KEY_REDO);		break;
-+		case 0x1047: map_key_clear(KEY_PRINT);		break;
-+		case 0x1048: map_key_clear(KEY_SAVE);		break;
-+		case 0x1049: map_key_clear(KEY_PROG1);		break;
-+		case 0x104a: map_key_clear(KEY_PROG2);		break;
-+		case 0x104b: map_key_clear(KEY_PROG3);		break;
-+		case 0x104c: map_key_clear(KEY_PROG4);		break;
 +
-+		default:
-+			return 0;
-+	}
-+	return 1;
++static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
++{
 +}
 +
-+static int quirk_cherry_genius_29e(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
++
++static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
 +{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
-+		return 0;
++	ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs),
++		ipath_ib_linktrstate(dd, ibcs));
++	return 0;
++}
 +
-+	switch (usage->hid & HID_USAGE) {
-+		case 0x156: map_key_clear(KEY_WORDPROCESSOR);	break;
-+		case 0x157: map_key_clear(KEY_SPREADSHEET);	break;
-+		case 0x158: map_key_clear(KEY_PRESENTATION);	break;
-+		case 0x15c: map_key_clear(KEY_STOP);		break;
 +
-+		default:
-+			return 0;
-+	}
-+	return 1;
-+}
+ /**
+  * ipath_init_iba6110_funcs - set up the chip-specific function pointers
+  * @dd: the infinipath device
+@@ -1626,22 +1926,19 @@ void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
+ 	dd->ipath_f_setextled = ipath_setup_ht_setextled;
+ 	dd->ipath_f_get_base_info = ipath_ht_get_base_info;
+ 	dd->ipath_f_free_irq = ipath_ht_free_irq;
+-
+-	/*
+-	 * initialize chip-specific variables
+-	 */
+ 	dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
++	dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback;
++	dd->ipath_f_get_msgheader = ipath_ht_get_msgheader;
++	dd->ipath_f_config_ports = ipath_ht_config_ports;
++	dd->ipath_f_read_counters = ipath_ht_read_counters;
++	dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset;
++	dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg;
++	dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg;
++	dd->ipath_f_config_jint = ipath_ht_config_jint;
++	dd->ipath_f_ib_updown = ipath_ht_ib_updown;
+ 
+ 	/*
+-	 * setup the register offsets, since they are different for each
+-	 * chip
+-	 */
+-	dd->ipath_kregs = &ipath_ht_kregs;
+-	dd->ipath_cregs = &ipath_ht_cregs;
+-
+-	/*
+-	 * do very early init that is needed before ipath_f_bus is
+-	 * called
++	 * initialize chip-specific variables
+ 	 */
+ 	ipath_init_ht_variables(dd);
+ }
+diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
+index 0103d6f..c7a2f50 100644
+--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
++++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
+@@ -145,10 +145,57 @@ struct _infinipath_do_not_use_kernel_regs {
+ 	unsigned long long Reserved12;
+ };
+ 
+-#define IPATH_KREG_OFFSET(field) (offsetof(struct \
+-    _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
++struct _infinipath_do_not_use_counters {
++	__u64 LBIntCnt;
++	__u64 LBFlowStallCnt;
++	__u64 Reserved1;
++	__u64 TxUnsupVLErrCnt;
++	__u64 TxDataPktCnt;
++	__u64 TxFlowPktCnt;
++	__u64 TxDwordCnt;
++	__u64 TxLenErrCnt;
++	__u64 TxMaxMinLenErrCnt;
++	__u64 TxUnderrunCnt;
++	__u64 TxFlowStallCnt;
++	__u64 TxDroppedPktCnt;
++	__u64 RxDroppedPktCnt;
++	__u64 RxDataPktCnt;
++	__u64 RxFlowPktCnt;
++	__u64 RxDwordCnt;
++	__u64 RxLenErrCnt;
++	__u64 RxMaxMinLenErrCnt;
++	__u64 RxICRCErrCnt;
++	__u64 RxVCRCErrCnt;
++	__u64 RxFlowCtrlErrCnt;
++	__u64 RxBadFormatCnt;
++	__u64 RxLinkProblemCnt;
++	__u64 RxEBPCnt;
++	__u64 RxLPCRCErrCnt;
++	__u64 RxBufOvflCnt;
++	__u64 RxTIDFullErrCnt;
++	__u64 RxTIDValidErrCnt;
++	__u64 RxPKeyMismatchCnt;
++	__u64 RxP0HdrEgrOvflCnt;
++	__u64 RxP1HdrEgrOvflCnt;
++	__u64 RxP2HdrEgrOvflCnt;
++	__u64 RxP3HdrEgrOvflCnt;
++	__u64 RxP4HdrEgrOvflCnt;
++	__u64 RxP5HdrEgrOvflCnt;
++	__u64 RxP6HdrEgrOvflCnt;
++	__u64 RxP7HdrEgrOvflCnt;
++	__u64 RxP8HdrEgrOvflCnt;
++	__u64 Reserved6;
++	__u64 Reserved7;
++	__u64 IBStatusChangeCnt;
++	__u64 IBLinkErrRecoveryCnt;
++	__u64 IBLinkDownedCnt;
++	__u64 IBSymbolErrCnt;
++};
 +
-+static int quirk_btc_8193(struct hid_usage *usage, struct input_dev *input,
-+			      unsigned long **bit, int *max)
-+{
-+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
-+		return 0;
++#define IPATH_KREG_OFFSET(field) (offsetof( \
++	struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
+ #define IPATH_CREG_OFFSET(field) (offsetof( \
+-    struct infinipath_counters, field) / sizeof(u64))
++	struct _infinipath_do_not_use_counters, field) / sizeof(u64))
+ 
+ static const struct ipath_kregs ipath_pe_kregs = {
+ 	.kr_control = IPATH_KREG_OFFSET(Control),
+@@ -282,6 +329,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
+ #define INFINIPATH_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
+ #define INFINIPATH_HWE_SERDESPLLFAILED      0x1000000000000000ULL
+ 
++#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
++#define IBA6120_IBCS_LINKSTATE_SHIFT 4
 +
-+	switch (usage->hid & HID_USAGE) {
-+		case 0x230: map_key(BTN_MOUSE);			break;
-+		case 0x231: map_rel(REL_WHEEL);			break;
-+		/* 
-+		 * this keyboard has a scrollwheel implemented in
-+		 * totally broken way. We map this usage temporarily
-+		 * to HWHEEL and handle it in the event quirk handler
-+		 */
-+		case 0x232: map_rel(REL_HWHEEL);		break;
+ /* kr_extstatus bits */
+ #define INFINIPATH_EXTS_FREQSEL 0x2
+ #define INFINIPATH_EXTS_SERDESSEL 0x4
+@@ -296,6 +346,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
+ #define IPATH_GPIO_SCL (1ULL << \
+ 	(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+ 
++#define INFINIPATH_R_INTRAVAIL_SHIFT 16
++#define INFINIPATH_R_TAILUPD_SHIFT 31
 +
-+		default:
-+			return 0;
+ /* 6120 specific hardware errors... */
+ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
+ 	INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
+@@ -320,10 +373,28 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
+ 		        INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
+ 		        << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
+ 
+-static int ipath_pe_txe_recover(struct ipath_devdata *);
+ static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
+ 			       u32, unsigned long);
+ 
++/*
++ * On platforms using this chip, and not having ordered WC stores, we
++ * can get TXE parity errors due to speculative reads to the PIO buffers,
++ * and this, due to a chip bug can result in (many) false parity error
++ * reports.  So it's a debug print on those, and an info print on systems
++ * where the speculative reads don't occur.
++ */
++static void ipath_pe_txe_recover(struct ipath_devdata *dd)
++{
++	if (ipath_unordered_wc())
++		ipath_dbg("Recovering from TXE PIO parity error\n");
++	else {
++		++ipath_stats.sps_txeparity;
++		dev_info(&dd->pcidev->dev,
++			"Recovering from TXE PIO parity error\n");
 +	}
-+	return 1;
 +}
 +
-+#define VENDOR_ID_BELKIN			0x1020
-+#define DEVICE_ID_BELKIN_WIRELESS_KEYBOARD	0x0006
-+
-+#define VENDOR_ID_CHERRY			0x046a
-+#define DEVICE_ID_CHERRY_CYMOTION		0x0023
-+
-+#define VENDOR_ID_CHICONY			0x04f2
-+#define DEVICE_ID_CHICONY_TACTICAL_PAD		0x0418
-+
-+#define VENDOR_ID_EZKEY				0x0518
-+#define DEVICE_ID_BTC_8193			0x0002
 +
-+#define VENDOR_ID_LOGITECH			0x046d
-+#define DEVICE_ID_LOGITECH_RECEIVER		0xc101
-+#define DEVICE_ID_S510_RECEIVER			0xc50c
-+#define DEVICE_ID_S510_RECEIVER_2		0xc517
-+#define DEVICE_ID_MX3000_RECEIVER		0xc513
+ /**
+  * ipath_pe_handle_hwerrors - display hardware errors.
+  * @dd: the infinipath device
+@@ -403,35 +474,11 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+ 		 * occur if a processor speculative read is done to the PIO
+ 		 * buffer while we are sending a packet, for example.
+ 		 */
+-		if ((hwerrs & TXE_PIO_PARITY) && ipath_pe_txe_recover(dd))
++		if (hwerrs & TXE_PIO_PARITY) {
++			ipath_pe_txe_recover(dd);
+ 			hwerrs &= ~TXE_PIO_PARITY;
+-		if (hwerrs) {
+-			/*
+-			 * if any set that we aren't ignoring only make the
+-			 * complaint once, in case it's stuck or recurring,
+-			 * and we get here multiple times
+-			 * Force link down, so switch knows, and
+-			 * LEDs are turned off
+-			 */
+-			if (dd->ipath_flags & IPATH_INITTED) {
+-				ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
+-				ipath_setup_pe_setextled(dd,
+-					INFINIPATH_IBCS_L_STATE_DOWN,
+-					INFINIPATH_IBCS_LT_STATE_DISABLED);
+-				ipath_dev_err(dd, "Fatal Hardware Error (freeze "
+-					      "mode), no longer usable, SN %.16s\n",
+-						  dd->ipath_serial);
+-				isfatal = 1;
+-			}
+-			/*
+-			 * Mark as having had an error for driver, and also
+-			 * for /sys and status word mapped to user programs.
+-			 * This marks unit as not usable, until reset
+-			 */
+-			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+-			*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+-			dd->ipath_flags &= ~IPATH_INITTED;
+-		} else {
++		}
++		if (!hwerrs) {
+ 			static u32 freeze_cnt;
+ 
+ 			freeze_cnt++;
+@@ -485,7 +532,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+ 
+ 	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
+ 		/*
+-		 * If it occurs, it is left masked since the eternal
++		 * If it occurs, it is left masked since the external
+ 		 * interface is unused
+ 		 */
+ 		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+@@ -563,6 +610,14 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
+ 			dd->ipath_f_put_tid = ipath_pe_put_tid_2;
+ 	}
+ 
 +
-+#define VENDOR_ID_MICROSOFT			0x045e
-+#define DEVICE_ID_MS4K				0x00db
-+#define DEVICE_ID_MS6K				0x00f9
-+#define DEVICE_IS_MS_PRESENTER_8K_BT		0x0701
-+#define DEVICE_ID_MS_PRESENTER_8K_USB		0x0713
++	/*
++	 * set here, not in ipath_init_*_funcs because we have to do
++	 * it after we can read chip registers.
++	 */
++	dd->ipath_ureg_align =
++		ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
 +
-+#define VENDOR_ID_MONTEREY			0x0566
-+#define DEVICE_ID_GENIUS_KB29E			0x3004
+ 	return ret;
+ }
+ 
+@@ -667,17 +722,8 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
+ 
+ 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+ 	prev_val = val;
+-	if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
+-	     INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
+-		val &=
+-			~(INFINIPATH_XGXS_MDIOADDR_MASK <<
+-			  INFINIPATH_XGXS_MDIOADDR_SHIFT);
+-		/* MDIO address 3 */
+-		val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
+-	}
+-	if (val & INFINIPATH_XGXS_RESET) {
++	if (val & INFINIPATH_XGXS_RESET)
+ 		val &= ~INFINIPATH_XGXS_RESET;
+-	}
+ 	if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
+ 	     INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
+ 		/* need to compensate for Tx inversion in partner */
+@@ -707,21 +753,6 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
+ 		   (unsigned long long)
+ 		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+ 
+-	if (!ipath_waitfor_mdio_cmdready(dd)) {
+-		ipath_write_kreg(
+-			dd, dd->ipath_kregs->kr_mdio,
+-			ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
+-				       IPATH_MDIO_CTRL_XGXS_REG_8, 0));
+-		if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
+-					   IPATH_MDIO_DATAVALID, &val))
+-			ipath_dbg("Never got MDIO data for XGXS "
+-				  "status read\n");
+-		else
+-			ipath_cdbg(VERBOSE, "MDIO Read reg8, "
+-				   "'bank' 31 %x\n", (u32) val);
+-	} else
+-		ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
+-
+ 	return ret;
+ }
+ 
+@@ -902,12 +933,27 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
+ 	else
+ 		ipath_dev_err(dd, "Can't find PCI Express "
+ 			      "capability!\n");
 +
-+#define VENDOR_ID_PETALYNX			0x18b1
-+#define DEVICE_ID_PETALYNX_MAXTER_REMOTE	0x0037
++	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
++	dd->ipath_link_speed_supported = IPATH_IB_SDR;
++	dd->ipath_link_width_enabled = IB_WIDTH_4X;
++	dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
++	/* these can't change for this chip, so set once */
++	dd->ipath_link_width_active = dd->ipath_link_width_enabled;
++	dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
+ 	return 0;
+ }
+ 
+ static void ipath_init_pe_variables(struct ipath_devdata *dd)
+ {
+ 	/*
++	 * setup the register offsets, since they are different for each
++	 * chip
++	 */
++	dd->ipath_kregs = &ipath_pe_kregs;
++	dd->ipath_cregs = &ipath_pe_cregs;
 +
-+static const struct hid_input_blacklist {
-+	__u16 idVendor;
-+	__u16 idProduct;
-+	int (*quirk)(struct hid_usage *, struct input_dev *, unsigned long **, int *);
-+} hid_input_blacklist[] = {
-+	{ VENDOR_ID_BELKIN, DEVICE_ID_BELKIN_WIRELESS_KEYBOARD, quirk_belkin_wkbd },
++	/*
+ 	 * bits for selecting i2c direction and values,
+ 	 * used for I2C serial flash
+ 	 */
+@@ -916,6 +962,43 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
+ 	dd->ipath_gpio_sda = IPATH_GPIO_SDA;
+ 	dd->ipath_gpio_scl = IPATH_GPIO_SCL;
+ 
++	/*
++	 * Fill in data for field-values that change in newer chips.
++	 * We dynamically specify only the mask for LINKTRAININGSTATE
++	 * and only the shift for LINKSTATE, as they are the only ones
++	 * that change.  Also precalculate the 3 link states of interest
++	 * and the combined mask.
++	 */
++	dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT;
++	dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK;
++	dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
++		dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
++	dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
++		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
++		(INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
++	dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
++		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
++		(INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
++	dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
++		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
++		(INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
 +
-+	{ VENDOR_ID_CHERRY, DEVICE_ID_CHERRY_CYMOTION, quirk_cherry_cymotion },
++	/*
++	 * Fill in data for ibcc field-values that change in newer chips.
++	 * We dynamically specify only the mask for LINKINITCMD
++	 * and only the shift for LINKCMD and MAXPKTLEN, as they are
++	 * the only ones that change.
++	 */
++	dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
++	dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
++	dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
 +
-+	{ VENDOR_ID_CHICONY, DEVICE_ID_CHICONY_TACTICAL_PAD, quirk_chicony_tactical_pad },
++	/* Fill in shifts for RcvCtrl. */
++	dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
++	dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
++	dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
++	dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */
 +
-+	{ VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 },
+ 	/* variables for sanity checking interrupt and errors */
+ 	dd->ipath_hwe_bitsextant =
+ 		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+@@ -963,6 +1046,8 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
+ 
+ 	dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
+ 	dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
++	dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
++	dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
+ 
+ 	/*
+ 	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+@@ -984,6 +1069,7 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
+ 		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
+ 
+ 
++	dd->delay_mult = 2; /* SDR, 4X, can't change */
+ }
+ 
+ /* setup the MSI stuff again after a reset.  I'd like to just call
+@@ -1289,6 +1375,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
+ 	 */
+ 	dd->ipath_rcvhdrentsize = 24;
+ 	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
++	dd->ipath_rhf_offset = 0;
++	dd->ipath_egrtidbase = (u64 __iomem *)
++		((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
+ 
+ 	/*
+ 	 * To truly support a 4KB MTU (for usermode), we need to
+@@ -1359,34 +1448,204 @@ static void ipath_pe_free_irq(struct ipath_devdata *dd)
+ 	dd->ipath_irq = 0;
+ }
+ 
 +
-+	{ VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote },
-+	{ VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless },
-+	{ VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless },
-+	{ VENDOR_ID_LOGITECH, DEVICE_ID_MX3000_RECEIVER, quirk_logitech_wireless },
++static struct ipath_message_header *
++ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
++{
++	return (struct ipath_message_header *)
++		&rhf_addr[sizeof(u64) / sizeof(u32)];
++}
 +
-+	{ VENDOR_ID_MICROSOFT, DEVICE_ID_MS4K, quirk_microsoft_ergonomy_kb },
-+	{ VENDOR_ID_MICROSOFT, DEVICE_ID_MS6K, quirk_microsoft_ergonomy_kb },
-+	{ VENDOR_ID_MICROSOFT, DEVICE_IS_MS_PRESENTER_8K_BT, quirk_microsoft_presenter_8k },
-+	{ VENDOR_ID_MICROSOFT, DEVICE_ID_MS_PRESENTER_8K_USB, quirk_microsoft_presenter_8k },
++static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports)
++{
++	dd->ipath_portcnt =
++		ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
++	dd->ipath_p0_rcvegrcnt =
++		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
++}
 +
-+	{ VENDOR_ID_MONTEREY, DEVICE_ID_GENIUS_KB29E, quirk_cherry_genius_29e },
++static void ipath_pe_read_counters(struct ipath_devdata *dd,
++				   struct infinipath_counters *cntrs)
++{
++	cntrs->LBIntCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
++	cntrs->LBFlowStallCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
++	cntrs->TxSDmaDescCnt = 0;
++	cntrs->TxUnsupVLErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
++	cntrs->TxDataPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
++	cntrs->TxFlowPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
++	cntrs->TxDwordCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
++	cntrs->TxLenErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
++	cntrs->TxMaxMinLenErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
++	cntrs->TxUnderrunCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
++	cntrs->TxFlowStallCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
++	cntrs->TxDroppedPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
++	cntrs->RxDroppedPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
++	cntrs->RxDataPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
++	cntrs->RxFlowPktCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
++	cntrs->RxDwordCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
++	cntrs->RxLenErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
++	cntrs->RxMaxMinLenErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
++	cntrs->RxICRCErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
++	cntrs->RxVCRCErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
++	cntrs->RxFlowCtrlErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
++	cntrs->RxBadFormatCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
++	cntrs->RxLinkProblemCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
++	cntrs->RxEBPCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
++	cntrs->RxLPCRCErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
++	cntrs->RxBufOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
++	cntrs->RxTIDFullErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
++	cntrs->RxTIDValidErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
++	cntrs->RxPKeyMismatchCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
++	cntrs->RxP0HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
++	cntrs->RxP1HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
++	cntrs->RxP2HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
++	cntrs->RxP3HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
++	cntrs->RxP4HdrEgrOvflCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
++	cntrs->RxP5HdrEgrOvflCnt = 0;
++	cntrs->RxP6HdrEgrOvflCnt = 0;
++	cntrs->RxP7HdrEgrOvflCnt = 0;
++	cntrs->RxP8HdrEgrOvflCnt = 0;
++	cntrs->RxP9HdrEgrOvflCnt = 0;
++	cntrs->RxP10HdrEgrOvflCnt = 0;
++	cntrs->RxP11HdrEgrOvflCnt = 0;
++	cntrs->RxP12HdrEgrOvflCnt = 0;
++	cntrs->RxP13HdrEgrOvflCnt = 0;
++	cntrs->RxP14HdrEgrOvflCnt = 0;
++	cntrs->RxP15HdrEgrOvflCnt = 0;
++	cntrs->RxP16HdrEgrOvflCnt = 0;
++	cntrs->IBStatusChangeCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
++	cntrs->IBLinkErrRecoveryCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
++	cntrs->IBLinkDownedCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
++	cntrs->IBSymbolErrCnt =
++		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
++	cntrs->RxVL15DroppedPktCnt = 0;
++	cntrs->RxOtherLocalPhyErrCnt = 0;
++	cntrs->PcieRetryBufDiagQwordCnt = 0;
++	cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
++	cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs;
++	cntrs->RxVlErrCnt = 0;
++	cntrs->RxDlidFltrCnt = 0;
++}
 +
-+	{ VENDOR_ID_PETALYNX, DEVICE_ID_PETALYNX_MAXTER_REMOTE, quirk_petalynx_remote },
-+	
-+	{ 0, 0, 0 }
-+};
 +
-+int hidinput_mapping_quirks(struct hid_usage *usage, 
-+				   struct input_dev *input, 
-+				   unsigned long **bit, int *max)
++/* no interrupt fallback for these chips */
++static int ipath_pe_nointr_fallback(struct ipath_devdata *dd)
 +{
-+	struct hid_device *device = input_get_drvdata(input);
-+	int i = 0;
-+	
-+	while (hid_input_blacklist[i].quirk) {
-+		if (hid_input_blacklist[i].idVendor == device->vendor &&
-+				hid_input_blacklist[i].idProduct == device->product)
-+			return hid_input_blacklist[i].quirk(usage, input, bit, max);
-+		i++;
-+	}
 +	return 0;
 +}
 +
-+void hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value)
-+{
-+	struct input_dev *input;
 +
-+	input = field->hidinput->input;
+ /*
+- * On platforms using this chip, and not having ordered WC stores, we
+- * can get TXE parity errors due to speculative reads to the PIO buffers,
+- * and this, due to a chip bug can result in (many) false parity error
+- * reports.  So it's a debug print on those, and an info print on systems
+- * where the speculative reads don't occur.
+- * Because we can get lots of false errors, we have no upper limit
+- * on recovery attempts on those platforms.
++ * reset the XGXS (between serdes and IBC).  Slightly less intrusive
++ * than resetting the IBC or external link state, and useful in some
++ * cases to cause some retraining.  To do this right, we reset IBC
++ * as well.
+  */
+-static int ipath_pe_txe_recover(struct ipath_devdata *dd)
++static void ipath_pe_xgxs_reset(struct ipath_devdata *dd)
+ {
+-	if (ipath_unordered_wc())
+-		ipath_dbg("Recovering from TXE PIO parity error\n");
+-	else {
+-		int cnt = ++ipath_stats.sps_txeparity;
+-		if (cnt >= IPATH_MAX_PARITY_ATTEMPTS)  {
+-			if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
+-				ipath_dev_err(dd,
+-					"Too many attempts to recover from "
+-					"TXE parity, giving up\n");
+-			return 0;
+-		}
+-		dev_info(&dd->pcidev->dev,
+-			"Recovering from TXE PIO parity error\n");
++	u64 val, prev_val;
 +
-+	if (((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_5) && (usage->hid == 0x00090005))
-+		|| ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_7) && (usage->hid == 0x00090007))) {
-+		if (value) hid->quirks |=  HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
-+		else       hid->quirks &= ~HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
-+		return;
-+	}
++	prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
++	val = prev_val | INFINIPATH_XGXS_RESET;
++	prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
++			 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
++	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
++			 dd->ipath_control);
++}
 +
-+	if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_B8) &&
-+			(usage->type == EV_REL) &&
-+			(usage->code == REL_WHEEL)) {
-+		hid->delayed_value = value;
-+		return;
-+	}
 +
-+	if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_B8) &&
-+			(usage->hid == 0x000100b8)) {
-+		input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, hid->delayed_value);
-+		return;
-+	}
++static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which)
++{
++	int ret;
 +
-+	if ((hid->quirks & HID_QUIRK_INVERT_HWHEEL) && (usage->code == REL_HWHEEL)) {
-+		input_event(input, usage->type, usage->code, -value);
-+		return;
-+	}
++	switch (which) {
++	case IPATH_IB_CFG_LWID:
++		ret = dd->ipath_link_width_active;
++		break;
++	case IPATH_IB_CFG_SPD:
++		ret = dd->ipath_link_speed_active;
++		break;
++	case IPATH_IB_CFG_LWID_ENB:
++		ret = dd->ipath_link_width_enabled;
++		break;
++	case IPATH_IB_CFG_SPD_ENB:
++		ret = dd->ipath_link_speed_enabled;
++		break;
++	default:
++		ret =  -ENOTSUPP;
++		break;
+ 	}
+-	return 1;
++	return ret;
++}
 +
-+	if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_ON) && (usage->code == REL_WHEEL)) {
-+		input_event(input, usage->type, REL_HWHEEL, value);
-+		return;
-+	}
 +
-+	if ((hid->quirks & HID_QUIRK_APPLE_HAS_FN) && hidinput_apple_event(hid, input, usage, value))
-+		return;
++/* we assume range checking is already done, if needed */
++static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
++{
++	int ret = 0;
 +
-+	/* Handling MS keyboards special buttons */
-+	if (hid->quirks & HID_QUIRK_MICROSOFT_KEYS && 
-+			usage->hid == (HID_UP_MSVENDOR | 0xff05)) {
-+		int key = 0;
-+		static int last_key = 0;
-+		switch (value) {
-+			case 0x01: key = KEY_F14; break;
-+			case 0x02: key = KEY_F15; break;
-+			case 0x04: key = KEY_F16; break;
-+			case 0x08: key = KEY_F17; break;
-+			case 0x10: key = KEY_F18; break;
-+			default: break;
-+		}
-+		if (key) {
-+			input_event(input, usage->type, key, 1);
-+			last_key = key;
-+		} else {
-+			input_event(input, usage->type, last_key, 0);
-+		}
-+	}
++	if (which == IPATH_IB_CFG_LWID_ENB)
++		dd->ipath_link_width_enabled = val;
++	else if (which == IPATH_IB_CFG_SPD_ENB)
++		dd->ipath_link_speed_enabled = val;
++	else
++		ret = -ENOTSUPP;
++	return ret;
+ }
+ 
++static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
++{
++}
 +
-+	/* handle the temporary quirky mapping to HWHEEL */
-+	if (hid->quirks & HID_QUIRK_HWHEEL_WHEEL_INVERT &&
-+			usage->type == EV_REL && usage->code == REL_HWHEEL) {
-+		input_event(input, usage->type, REL_WHEEL, -value);
-+		return;
-+	}
++
++static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
++{
++	ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs),
++		ipath_ib_linktrstate(dd, ibcs));
++	return 0;
 +}
 +
 +
-diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
-index 0b27da7..5325d98 100644
---- a/drivers/hid/hid-input.c
-+++ b/drivers/hid/hid-input.c
-@@ -34,10 +34,10 @@
- #include <linux/hid.h>
- #include <linux/hid-debug.h>
- 
--static int hid_pb_fnmode = 1;
--module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644);
-+static int hid_apple_fnmode = 1;
-+module_param_named(pb_fnmode, hid_apple_fnmode, int, 0644);
- MODULE_PARM_DESC(pb_fnmode,
--		"Mode of fn key on PowerBooks (0 = disabled, 1 = fkeyslast, 2 = fkeysfirst)");
-+		"Mode of fn key on Apple keyboards (0 = disabled, 1 = fkeyslast, 2 = fkeysfirst)");
- 
- #define unk	KEY_UNKNOWN
- 
-@@ -86,10 +86,6 @@ static const struct {
- #define map_abs_clear(c)	do { map_abs(c); clear_bit(c, bit); } while (0)
- #define map_key_clear(c)	do { map_key(c); clear_bit(c, bit); } while (0)
- 
--/* hardware needing special handling due to colliding MSVENDOR page usages */
--#define IS_CHICONY_TACTICAL_PAD(x) (x->vendor == 0x04f2 && device->product == 0x0418)
--#define IS_MS_KB(x) (x->vendor == 0x045e && (x->product == 0x00db || x->product == 0x00f9))
+ /**
+  * ipath_init_iba6120_funcs - set up the chip-specific function pointers
+  * @dd: the infinipath device
+@@ -1407,7 +1666,7 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
+ 	dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
+ 	dd->ipath_f_clear_tids = ipath_pe_clear_tids;
+ 	/*
+-	 * this may get changed after we read the chip revision,
++	 * _f_put_tid may get changed after we read the chip revision,
+ 	 * but we start with the safe version for all revs
+ 	 */
+ 	dd->ipath_f_put_tid = ipath_pe_put_tid;
+@@ -1415,17 +1674,19 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
+ 	dd->ipath_f_setextled = ipath_setup_pe_setextled;
+ 	dd->ipath_f_get_base_info = ipath_pe_get_base_info;
+ 	dd->ipath_f_free_irq = ipath_pe_free_irq;
 -
- #ifdef CONFIG_USB_HIDINPUT_POWERBOOK
- 
- struct hidinput_key_translation {
-@@ -98,20 +94,36 @@ struct hidinput_key_translation {
- 	u8 flags;
- };
- 
--#define POWERBOOK_FLAG_FKEY 0x01
-+#define APPLE_FLAG_FKEY 0x01
-+
-+static struct hidinput_key_translation apple_fn_keys[] = {
-+	{ KEY_F1,       KEY_BRIGHTNESSDOWN,     APPLE_FLAG_FKEY },
-+	{ KEY_F2,       KEY_BRIGHTNESSUP,       APPLE_FLAG_FKEY },
-+	{ KEY_F3,       KEY_CYCLEWINDOWS,       APPLE_FLAG_FKEY }, /* Exposé */
-+	{ KEY_F4,       KEY_FN_F4,              APPLE_FLAG_FKEY }, /* Dashboard */
-+	{ KEY_F5,       KEY_FN_F5 },
-+	{ KEY_F6,       KEY_FN_F6 },
-+	{ KEY_F7,       KEY_BACK,               APPLE_FLAG_FKEY },
-+	{ KEY_F8,       KEY_PLAYPAUSE,          APPLE_FLAG_FKEY },
-+	{ KEY_F9,       KEY_FORWARD,            APPLE_FLAG_FKEY },
-+	{ KEY_F10,      KEY_MUTE,               APPLE_FLAG_FKEY },
-+	{ KEY_F11,      KEY_VOLUMEDOWN,         APPLE_FLAG_FKEY },
-+	{ KEY_F12,      KEY_VOLUMEUP,           APPLE_FLAG_FKEY },
-+	{ }
-+};
+-	/* initialize chip-specific variables */
+ 	dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
++	dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback;
++	dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset;
++	dd->ipath_f_get_msgheader = ipath_pe_get_msgheader;
++	dd->ipath_f_config_ports = ipath_pe_config_ports;
++	dd->ipath_f_read_counters = ipath_pe_read_counters;
++	dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg;
++	dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg;
++	dd->ipath_f_config_jint = ipath_pe_config_jint;
++	dd->ipath_f_ib_updown = ipath_pe_ib_updown;
  
- static struct hidinput_key_translation powerbook_fn_keys[] = {
- 	{ KEY_BACKSPACE, KEY_DELETE },
--	{ KEY_F1,       KEY_BRIGHTNESSDOWN,     POWERBOOK_FLAG_FKEY },
--	{ KEY_F2,       KEY_BRIGHTNESSUP,       POWERBOOK_FLAG_FKEY },
--	{ KEY_F3,       KEY_MUTE,               POWERBOOK_FLAG_FKEY },
--	{ KEY_F4,       KEY_VOLUMEDOWN,         POWERBOOK_FLAG_FKEY },
--	{ KEY_F5,       KEY_VOLUMEUP,           POWERBOOK_FLAG_FKEY },
--	{ KEY_F6,       KEY_NUMLOCK,            POWERBOOK_FLAG_FKEY },
--	{ KEY_F7,       KEY_SWITCHVIDEOMODE,    POWERBOOK_FLAG_FKEY },
--	{ KEY_F8,       KEY_KBDILLUMTOGGLE,     POWERBOOK_FLAG_FKEY },
--	{ KEY_F9,       KEY_KBDILLUMDOWN,       POWERBOOK_FLAG_FKEY },
--	{ KEY_F10,      KEY_KBDILLUMUP,         POWERBOOK_FLAG_FKEY },
-+	{ KEY_F1,       KEY_BRIGHTNESSDOWN,     APPLE_FLAG_FKEY },
-+	{ KEY_F2,       KEY_BRIGHTNESSUP,       APPLE_FLAG_FKEY },
-+	{ KEY_F3,       KEY_MUTE,               APPLE_FLAG_FKEY },
-+	{ KEY_F4,       KEY_VOLUMEDOWN,         APPLE_FLAG_FKEY },
-+	{ KEY_F5,       KEY_VOLUMEUP,           APPLE_FLAG_FKEY },
-+	{ KEY_F6,       KEY_NUMLOCK,            APPLE_FLAG_FKEY },
-+	{ KEY_F7,       KEY_SWITCHVIDEOMODE,    APPLE_FLAG_FKEY },
-+	{ KEY_F8,       KEY_KBDILLUMTOGGLE,     APPLE_FLAG_FKEY },
-+	{ KEY_F9,       KEY_KBDILLUMDOWN,       APPLE_FLAG_FKEY },
-+	{ KEY_F10,      KEY_KBDILLUMUP,         APPLE_FLAG_FKEY },
- 	{ KEY_UP,       KEY_PAGEUP },
- 	{ KEY_DOWN,     KEY_PAGEDOWN },
- 	{ KEY_LEFT,     KEY_HOME },
-@@ -142,7 +154,7 @@ static struct hidinput_key_translation powerbook_numlock_keys[] = {
- 	{ }
- };
+-	/*
+-	 * setup the register offsets, since they are different for each
+-	 * chip
+-	 */
+-	dd->ipath_kregs = &ipath_pe_kregs;
+-	dd->ipath_cregs = &ipath_pe_cregs;
  
--static struct hidinput_key_translation powerbook_iso_keyboard[] = {
-+static struct hidinput_key_translation apple_iso_keyboard[] = {
- 	{ KEY_GRAVE,    KEY_102ND },
- 	{ KEY_102ND,    KEY_GRAVE },
- 	{ }
-@@ -160,39 +172,42 @@ static struct hidinput_key_translation *find_translation(struct hidinput_key_tra
- 	return NULL;
++	/* initialize chip-specific variables */
+ 	ipath_init_pe_variables(dd);
  }
  
--static int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
-+int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
- 		struct hid_usage *usage, __s32 value)
- {
- 	struct hidinput_key_translation *trans;
- 
- 	if (usage->code == KEY_FN) {
--		if (value) hid->quirks |=  HID_QUIRK_POWERBOOK_FN_ON;
--		else       hid->quirks &= ~HID_QUIRK_POWERBOOK_FN_ON;
-+		if (value) hid->quirks |=  HID_QUIRK_APPLE_FN_ON;
-+		else       hid->quirks &= ~HID_QUIRK_APPLE_FN_ON;
+diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
+index 9dd0bac..4471674 100644
+--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
++++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
+@@ -91,7 +91,7 @@ static int create_port0_egr(struct ipath_devdata *dd)
+ 	struct ipath_skbinfo *skbinfo;
+ 	int ret;
  
- 		input_event(input, usage->type, usage->code, value);
+-	egrcnt = dd->ipath_rcvegrcnt;
++	egrcnt = dd->ipath_p0_rcvegrcnt;
  
- 		return 1;
+ 	skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
+ 	if (skbinfo == NULL) {
+@@ -244,8 +244,7 @@ static int init_chip_first(struct ipath_devdata *dd,
+ 	 * cfgports.  We do still check and report a difference, if
+ 	 * not same (should be impossible).
+ 	 */
+-	dd->ipath_portcnt =
+-		ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
++	dd->ipath_f_config_ports(dd, ipath_cfgports);
+ 	if (!ipath_cfgports)
+ 		dd->ipath_cfgports = dd->ipath_portcnt;
+ 	else if (ipath_cfgports <= dd->ipath_portcnt) {
+@@ -272,22 +271,7 @@ static int init_chip_first(struct ipath_devdata *dd,
+ 		goto done;
  	}
  
--	if (hid_pb_fnmode) {
-+	if (hid_apple_fnmode) {
- 		int do_translate;
- 
--		trans = find_translation(powerbook_fn_keys, usage->code);
-+		trans = find_translation((hid->product < 0x220 ||
-+					  hid->product >= 0x300) ?
-+					 powerbook_fn_keys : apple_fn_keys,
-+					 usage->code);
- 		if (trans) {
--			if (test_bit(usage->code, hid->pb_pressed_fn))
-+			if (test_bit(usage->code, hid->apple_pressed_fn))
- 				do_translate = 1;
--			else if (trans->flags & POWERBOOK_FLAG_FKEY)
-+			else if (trans->flags & APPLE_FLAG_FKEY)
- 				do_translate =
--					(hid_pb_fnmode == 2 &&  (hid->quirks & HID_QUIRK_POWERBOOK_FN_ON)) ||
--					(hid_pb_fnmode == 1 && !(hid->quirks & HID_QUIRK_POWERBOOK_FN_ON));
-+					(hid_apple_fnmode == 2 &&  (hid->quirks & HID_QUIRK_APPLE_FN_ON)) ||
-+					(hid_apple_fnmode == 1 && !(hid->quirks & HID_QUIRK_APPLE_FN_ON));
- 			else
--				do_translate = (hid->quirks & HID_QUIRK_POWERBOOK_FN_ON);
-+				do_translate = (hid->quirks & HID_QUIRK_APPLE_FN_ON);
- 
- 			if (do_translate) {
- 				if (value)
--					set_bit(usage->code, hid->pb_pressed_fn);
-+					set_bit(usage->code, hid->apple_pressed_fn);
- 				else
--					clear_bit(usage->code, hid->pb_pressed_fn);
-+					clear_bit(usage->code, hid->apple_pressed_fn);
- 
- 				input_event(input, usage->type, trans->to, value);
+-	dd->ipath_lastegrheads = kzalloc(sizeof(*dd->ipath_lastegrheads)
+-					 * dd->ipath_cfgports,
+-					 GFP_KERNEL);
+-	dd->ipath_lastrcvhdrqtails =
+-		kzalloc(sizeof(*dd->ipath_lastrcvhdrqtails)
+-			* dd->ipath_cfgports, GFP_KERNEL);
+-
+-	if (!dd->ipath_lastegrheads || !dd->ipath_lastrcvhdrqtails) {
+-		ipath_dev_err(dd, "Unable to allocate head arrays, "
+-			      "failing\n");
+-		ret = -ENOMEM;
+-		goto done;
+-	}
+-
+ 	pd = create_portdata0(dd);
+-
+ 	if (!pd) {
+ 		ipath_dev_err(dd, "Unable to allocate portdata for port "
+ 			      "0, failing\n");
+@@ -345,10 +329,10 @@ static int init_chip_first(struct ipath_devdata *dd,
+ 		       dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
  
-@@ -217,8 +232,8 @@ static int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
- 		}
- 	}
+ 	spin_lock_init(&dd->ipath_tid_lock);
+-
++	spin_lock_init(&dd->ipath_sendctrl_lock);
+ 	spin_lock_init(&dd->ipath_gpio_lock);
+ 	spin_lock_init(&dd->ipath_eep_st_lock);
+-	sema_init(&dd->ipath_eep_sem, 1);
++	mutex_init(&dd->ipath_eep_lock);
  
--	if (hid->quirks & HID_QUIRK_POWERBOOK_ISO_KEYBOARD) {
--		trans = find_translation(powerbook_iso_keyboard, usage->code);
-+	if (hid->quirks & HID_QUIRK_APPLE_ISO_KEYBOARD) {
-+		trans = find_translation(apple_iso_keyboard, usage->code);
- 		if (trans) {
- 			input_event(input, usage->type, trans->to, value);
- 			return 1;
-@@ -228,31 +243,35 @@ static int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
- 	return 0;
- }
+ done:
+ 	*pdp = pd;
+@@ -372,9 +356,9 @@ static int init_chip_reset(struct ipath_devdata *dd,
+ 	*pdp = dd->ipath_pd[0];
+ 	/* ensure chip does no sends or receives while we re-initialize */
+ 	dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
+-	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
+-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0);
+-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
  
--static void hidinput_pb_setup(struct input_dev *input)
-+static void hidinput_apple_setup(struct input_dev *input)
+ 	rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
+ 	if (dd->ipath_portcnt != rtmp)
+@@ -487,6 +471,7 @@ static void enable_chip(struct ipath_devdata *dd,
+ 			struct ipath_portdata *pd, int reinit)
  {
- 	struct hidinput_key_translation *trans;
+ 	u32 val;
++	unsigned long flags;
+ 	int i;
  
- 	set_bit(KEY_NUMLOCK, input->keybit);
+ 	if (!reinit)
+@@ -495,19 +480,21 @@ static void enable_chip(struct ipath_devdata *dd,
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ 			 dd->ipath_rcvctrl);
  
- 	/* Enable all needed keys */
-+	for (trans = apple_fn_keys; trans->from; trans++)
-+		set_bit(trans->to, input->keybit);
-+
- 	for (trans = powerbook_fn_keys; trans->from; trans++)
- 		set_bit(trans->to, input->keybit);
++	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+ 	/* Enable PIO send, and update of PIOavail regs to memory. */
+ 	dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
+ 		INFINIPATH_S_PIOBUFAVAILUPD;
+-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+-			 dd->ipath_sendctrl);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
++	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
  
- 	for (trans = powerbook_numlock_keys; trans->from; trans++)
- 		set_bit(trans->to, input->keybit);
+ 	/*
+ 	 * enable port 0 receive, and receive interrupt.  other ports
+ 	 * done as user opens and inits them.
+ 	 */
+-	dd->ipath_rcvctrl = INFINIPATH_R_TAILUPD |
+-		(1ULL << INFINIPATH_R_PORTENABLE_SHIFT) |
+-		(1ULL << INFINIPATH_R_INTRAVAIL_SHIFT);
++	dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) |
++		(1ULL << dd->ipath_r_portenable_shift) |
++		(1ULL << dd->ipath_r_intravail_shift);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ 			 dd->ipath_rcvctrl);
  
--	for (trans = powerbook_iso_keyboard; trans->from; trans++)
-+	for (trans = apple_iso_keyboard; trans->from; trans++)
- 		set_bit(trans->to, input->keybit);
+@@ -523,12 +510,11 @@ static void enable_chip(struct ipath_devdata *dd,
+ 	 */
+ 	val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
+ 	(void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
+-	dd->ipath_port0head = ipath_read_ureg32(dd, ur_rcvhdrtail, 0);
  
- }
- #else
--static inline int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
--		struct hid_usage *usage, __s32 value)
-+inline int hidinput_apple_event(struct hid_device *hid,
-+				       struct input_dev *input,
-+				       struct hid_usage *usage, __s32 value)
- {
- 	return 0;
- }
+ 	/* Initialize so we interrupt on next packet received */
+ 	(void)ipath_write_ureg(dd, ur_rcvhdrhead,
+ 			       dd->ipath_rhdrhead_intr_off |
+-			       dd->ipath_port0head, 0);
++			       dd->ipath_pd[0]->port_head, 0);
  
--static inline void hidinput_pb_setup(struct input_dev *input)
-+static inline void hidinput_apple_setup(struct input_dev *input)
- {
- }
- #endif
-@@ -343,7 +362,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 	/*
+ 	 * by now pioavail updates to memory should have occurred, so
+@@ -542,12 +528,8 @@ static void enable_chip(struct ipath_devdata *dd,
+ 		/*
+ 		 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
+ 		 */
+-		if (i > 3) {
+-			if (i & 1)
+-				val = dd->ipath_pioavailregs_dma[i - 1];
+-			else
+-				val = dd->ipath_pioavailregs_dma[i + 1];
+-		}
++		if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
++			val = dd->ipath_pioavailregs_dma[i ^ 1];
+ 		else
+ 			val = dd->ipath_pioavailregs_dma[i];
+ 		dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
+@@ -690,12 +672,13 @@ done:
+  */
+ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
  {
- 	struct input_dev *input = hidinput->input;
- 	struct hid_device *device = input_get_drvdata(input);
--	int max = 0, code;
-+	int max = 0, code, ret;
- 	unsigned long *bit = NULL;
+-	int ret = 0, i;
++	int ret = 0;
+ 	u32 val32, kpiobufs;
+ 	u32 piobufs, uports;
+ 	u64 val;
+ 	struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
+ 	gfp_t gfp_flags = GFP_USER | __GFP_COMP;
++	unsigned long flags;
  
- 	field->hidinput = hidinput;
-@@ -362,6 +381,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
- 		goto ignore;
- 	}
+ 	ret = init_housekeeping(dd, &pd, reinit);
+ 	if (ret)
+@@ -746,7 +729,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
+ 		kpiobufs = ipath_kpiobufs;
  
-+	/* handle input mappings for quirky devices */
-+	ret = hidinput_mapping_quirks(usage, input, &bit, &max);
-+	if (ret)
-+		goto mapped;
+ 	if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
+-		i = (int) piobufs -
++		int i = (int) piobufs -
+ 			(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
+ 		if (i < 0)
+ 			i = 0;
+@@ -827,8 +810,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+ 			 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
+-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+-			 INFINIPATH_S_PIOENABLE);
 +
- 	switch (usage->hid & HID_USAGE_PAGE) {
- 
- 		case HID_UP_UNDEFINED:
-@@ -549,14 +573,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
- 				case 0x000: goto ignore;
- 				case 0x034: map_key_clear(KEY_SLEEP);		break;
- 				case 0x036: map_key_clear(BTN_MISC);		break;
--				/*
--				 * The next three are reported by Belkin wireless
--				 * keyboard (1020:0006). These values are "reserved"
--				 * in HUT 1.12.
--				 */
--				case 0x03a: map_key_clear(KEY_SOUND);           break;
--				case 0x03b: map_key_clear(KEY_CAMERA);          break;
--				case 0x03c: map_key_clear(KEY_DOCUMENTS);       break;
- 
- 				case 0x040: map_key_clear(KEY_MENU);		break;
- 				case 0x045: map_key_clear(KEY_RADIO);		break;
-@@ -602,10 +618,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
- 				case 0x0e9: map_key_clear(KEY_VOLUMEUP);	break;
- 				case 0x0ea: map_key_clear(KEY_VOLUMEDOWN);	break;
- 
--				/* reserved in HUT 1.12. Reported on Petalynx remote */
--				case 0x0f6: map_key_clear(KEY_NEXT);		break;
--				case 0x0fa: map_key_clear(KEY_BACK);		break;
--
- 				case 0x182: map_key_clear(KEY_BOOKMARKS);	break;
- 				case 0x183: map_key_clear(KEY_CONFIG);		break;
- 				case 0x184: map_key_clear(KEY_WORDPROCESSOR);	break;
-@@ -665,51 +677,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
- 				case 0x28b: map_key_clear(KEY_FORWARDMAIL);	break;
- 				case 0x28c: map_key_clear(KEY_SEND);		break;
- 
--				/* Reported on a Cherry Cymotion keyboard */
--				case 0x301: map_key_clear(KEY_PROG1);		break;
--				case 0x302: map_key_clear(KEY_PROG2);		break;
--				case 0x303: map_key_clear(KEY_PROG3);		break;
--
--				/* Reported on certain Logitech wireless keyboards */
--				case 0x1001: map_key_clear(KEY_MESSENGER);	break;
--				case 0x1003: map_key_clear(KEY_SOUND);		break;
--				case 0x1004: map_key_clear(KEY_VIDEO);		break;
--				case 0x1005: map_key_clear(KEY_AUDIO);		break;
--				case 0x100a: map_key_clear(KEY_DOCUMENTS);	break;
--				case 0x1011: map_key_clear(KEY_PREVIOUSSONG);	break;
--				case 0x1012: map_key_clear(KEY_NEXTSONG);	break;
--				case 0x1013: map_key_clear(KEY_CAMERA);		break;
--				case 0x1014: map_key_clear(KEY_MESSENGER);	break;
--				case 0x1015: map_key_clear(KEY_RECORD);		break;
--				case 0x1016: map_key_clear(KEY_PLAYER);		break;
--				case 0x1017: map_key_clear(KEY_EJECTCD);	break;
--				case 0x1018: map_key_clear(KEY_MEDIA);          break;
--				case 0x1019: map_key_clear(KEY_PROG1);		break;
--				case 0x101a: map_key_clear(KEY_PROG2);		break;
--				case 0x101b: map_key_clear(KEY_PROG3);		break;
--				case 0x101f: map_key_clear(KEY_ZOOMIN);		break;
--				case 0x1020: map_key_clear(KEY_ZOOMOUT);	break;
--				case 0x1021: map_key_clear(KEY_ZOOMRESET);	break;
--				case 0x1023: map_key_clear(KEY_CLOSE);		break;
--				case 0x1027: map_key_clear(KEY_MENU);           break;
--				/* this one is marked as 'Rotate' */
--				case 0x1028: map_key_clear(KEY_ANGLE);		break;
--				case 0x1029: map_key_clear(KEY_SHUFFLE);	break;
--				case 0x102a: map_key_clear(KEY_BACK);           break;
--				case 0x102b: map_key_clear(KEY_CYCLEWINDOWS);   break;
--				case 0x1041: map_key_clear(KEY_BATTERY);	break;
--				case 0x1042: map_key_clear(KEY_WORDPROCESSOR);	break;
--				case 0x1043: map_key_clear(KEY_SPREADSHEET);	break;
--				case 0x1044: map_key_clear(KEY_PRESENTATION);	break;
--				case 0x1045: map_key_clear(KEY_UNDO);		break;
--				case 0x1046: map_key_clear(KEY_REDO);		break;
--				case 0x1047: map_key_clear(KEY_PRINT);		break;
--				case 0x1048: map_key_clear(KEY_SAVE);		break;
--				case 0x1049: map_key_clear(KEY_PROG1);		break;
--				case 0x104a: map_key_clear(KEY_PROG2);		break;
--				case 0x104b: map_key_clear(KEY_PROG3);		break;
--				case 0x104c: map_key_clear(KEY_PROG4);		break;
--
- 				default:    goto ignore;
- 			}
- 			break;
-@@ -736,63 +703,16 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
- 
- 		case HID_UP_MSVENDOR:
- 
--			/* Unfortunately, there are multiple devices which
--			 * emit usages from MSVENDOR page that require different
--			 * handling. If this list grows too much in the future,
--			 * more general handling will have to be introduced here
--			 * (i.e. another blacklist).
--			 */
--
--			/* Chicony Chicony KU-0418 tactical pad */
--			if (IS_CHICONY_TACTICAL_PAD(device)) {
--				set_bit(EV_REP, input->evbit);
--				switch(usage->hid & HID_USAGE) {
--					case 0xff01: map_key_clear(BTN_1);		break;
--					case 0xff02: map_key_clear(BTN_2);		break;
--					case 0xff03: map_key_clear(BTN_3);		break;
--					case 0xff04: map_key_clear(BTN_4);		break;
--					case 0xff05: map_key_clear(BTN_5);		break;
--					case 0xff06: map_key_clear(BTN_6);		break;
--					case 0xff07: map_key_clear(BTN_7);		break;
--					case 0xff08: map_key_clear(BTN_8);		break;
--					case 0xff09: map_key_clear(BTN_9);		break;
--					case 0xff0a: map_key_clear(BTN_A);		break;
--					case 0xff0b: map_key_clear(BTN_B);		break;
--					default:    goto ignore;
--				}
--
--			/* Microsoft Natural Ergonomic Keyboard 4000 */
--			} else if (IS_MS_KB(device)) {
--				switch(usage->hid & HID_USAGE) {
--					case 0xfd06:
--						map_key_clear(KEY_CHAT);
--						break;
--					case 0xfd07:
--						map_key_clear(KEY_PHONE);
--						break;
--					case 0xff05:
--						set_bit(EV_REP, input->evbit);
--						map_key_clear(KEY_F13);
--						set_bit(KEY_F14, input->keybit);
--						set_bit(KEY_F15, input->keybit);
--						set_bit(KEY_F16, input->keybit);
--						set_bit(KEY_F17, input->keybit);
--						set_bit(KEY_F18, input->keybit);
--					default:	goto ignore;
--				}
--			} else {
--				goto ignore;
--			}
--			break;
-+			goto ignore;
- 
--		case HID_UP_CUSTOM: /* Reported on Logitech and Powerbook USB keyboards */
-+		case HID_UP_CUSTOM: /* Reported on Logitech and Apple USB keyboards */
- 
- 			set_bit(EV_REP, input->evbit);
- 			switch(usage->hid & HID_USAGE) {
- 				case 0x003:
--					/* The fn key on Apple PowerBooks */
-+					/* The fn key on Apple USB keyboards */
- 					map_key_clear(KEY_FN);
--					hidinput_pb_setup(input);
-+					hidinput_apple_setup(input);
- 					break;
++	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
++	dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
++	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
  
- 				default:    goto ignore;
-@@ -800,38 +720,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
- 			break;
+ 	/*
+ 	 * before error clears, since we expect serdes pll errors during
+diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
+index c61f9da..92e58c9 100644
+--- a/drivers/infiniband/hw/ipath/ipath_intr.c
++++ b/drivers/infiniband/hw/ipath/ipath_intr.c
+@@ -683,7 +683,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+ 		for (i = 0; i < dd->ipath_cfgports; i++) {
+ 			struct ipath_portdata *pd = dd->ipath_pd[i];
+ 			if (i == 0) {
+-				hd = dd->ipath_port0head;
++				hd = pd->port_head;
+ 				tl = (u32) le64_to_cpu(
+ 					*dd->ipath_hdrqtailptr);
+ 			} else if (pd && pd->port_cnt &&
+@@ -693,7 +693,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+ 				 * except kernel
+ 				 */
+ 				tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
+-				if (tl == dd->ipath_lastrcvhdrqtails[i])
++				if (tl == pd->port_lastrcvhdrqtail)
+ 					continue;
+ 				hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
+ 						       i);
+@@ -703,7 +703,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+ 			    (!hd && tl == dd->ipath_hdrqlast)) {
+ 				if (i == 0)
+ 					chkerrpkts = 1;
+-				dd->ipath_lastrcvhdrqtails[i] = tl;
++				pd->port_lastrcvhdrqtail = tl;
+ 				pd->port_hdrqfull++;
+ 				/* flush hdrqfull so that poll() sees it */
+ 				wmb();
+@@ -712,6 +712,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+ 		}
+ 	}
+ 	if (errs & INFINIPATH_E_RRCVEGRFULL) {
++		struct ipath_portdata *pd = dd->ipath_pd[0];
++
+ 		/*
+ 		 * since this is of less importance and not likely to
+ 		 * happen without also getting hdrfull, only count
+@@ -719,7 +721,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+ 		 * vs user)
+ 		 */
+ 		ipath_stats.sps_etidfull++;
+-		if (dd->ipath_port0head !=
++		if (pd->port_head !=
+ 		    (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
+ 			chkerrpkts = 1;
+ 	}
+@@ -795,6 +797,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
+ {
+ 	int i, im;
+ 	__le64 val;
++	unsigned long flags;
  
- 		case HID_UP_LOGIVENDOR:
--			set_bit(EV_REP, input->evbit);
--			switch(usage->hid & HID_USAGE) {
--				/* Reported on Logitech Ultra X Media Remote */
--				case 0x004: map_key_clear(KEY_AGAIN);		break;
--				case 0x00d: map_key_clear(KEY_HOME);		break;
--				case 0x024: map_key_clear(KEY_SHUFFLE);		break;
--				case 0x025: map_key_clear(KEY_TV);		break;
--				case 0x026: map_key_clear(KEY_MENU);		break;
--				case 0x031: map_key_clear(KEY_AUDIO);		break;
--				case 0x032: map_key_clear(KEY_TEXT);		break;
--				case 0x033: map_key_clear(KEY_LAST);		break;
--				case 0x047: map_key_clear(KEY_MP3);		break;
--				case 0x048: map_key_clear(KEY_DVD);		break;
--				case 0x049: map_key_clear(KEY_MEDIA);		break;
--				case 0x04a: map_key_clear(KEY_VIDEO);		break;
--				case 0x04b: map_key_clear(KEY_ANGLE);		break;
--				case 0x04c: map_key_clear(KEY_LANGUAGE);	break;
--				case 0x04d: map_key_clear(KEY_SUBTITLE);	break;
--				case 0x051: map_key_clear(KEY_RED);		break;
--				case 0x052: map_key_clear(KEY_CLOSE);		break;
--
--				/* Reported on Petalynx Maxter remote */
--				case 0x05a: map_key_clear(KEY_TEXT);		break;
--				case 0x05b: map_key_clear(KEY_RED);		break;
--				case 0x05c: map_key_clear(KEY_GREEN);		break;
--				case 0x05d: map_key_clear(KEY_YELLOW);		break;
--				case 0x05e: map_key_clear(KEY_BLUE);		break;
--
--				default:    goto ignore;
--			}
--			break;
+ 	/* disable error interrupts, to avoid confusion */
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
+@@ -813,11 +816,14 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
+ 			 dd->ipath_control);
  
-+			goto ignore;
-+		
- 		case HID_UP_PID:
+ 	/* ensure pio avail updates continue */
++	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ 		 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
+ 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+-		 dd->ipath_sendctrl);
++			 dd->ipath_sendctrl);
++	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
  
- 			switch(usage->hid & HID_USAGE) {
-@@ -858,6 +749,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
- 			break;
+ 	/*
+ 	 * We just enabled pioavailupdate, so dma copy is almost certainly
+@@ -825,8 +831,8 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
+ 	 */
+ 	for (i = 0; i < dd->ipath_pioavregs; i++) {
+ 		/* deal with 6110 chip bug */
+-		im = i > 3 ? ((i&1) ? i-1 : i+1) : i;
+-		val = ipath_read_kreg64(dd, (0x1000/sizeof(u64))+im);
++		im = i > 3 ? i ^ 1 : i;
++		val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
+ 		dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
+ 			= le64_to_cpu(val);
  	}
+@@ -849,7 +855,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
  
-+mapped:
- 	if (device->quirks & HID_QUIRK_MIGHTYMOUSE) {
- 		if (usage->hid == HID_GD_Z)
- 			map_rel(REL_HWHEEL);
-@@ -867,9 +759,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
- 			map_key(BTN_1);
- 	}
+ /* this is separate to allow for better optimization of ipath_intr() */
  
--	if ((device->quirks & (HID_QUIRK_2WHEEL_MOUSE_HACK_7 | HID_QUIRK_2WHEEL_MOUSE_HACK_5)) &&
--		 (usage->type == EV_REL) && (usage->code == REL_WHEEL))
--			set_bit(REL_HWHEEL, bit);
-+	if ((device->quirks & (HID_QUIRK_2WHEEL_MOUSE_HACK_7 | HID_QUIRK_2WHEEL_MOUSE_HACK_5 |
-+			HID_QUIRK_2WHEEL_MOUSE_HACK_B8)) && (usage->type == EV_REL) &&
-+			(usage->code == REL_WHEEL))
-+		set_bit(REL_HWHEEL, bit);
+-static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
++static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
+ {
+ 	/*
+ 	 * sometimes happen during driver init and unload, don't want
+@@ -877,7 +883,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
+ 				dd->ipath_f_free_irq(dd);
+ 			}
+ 		}
+-		if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) {
++		if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
+ 			ipath_dev_err(dd, "%u unexpected interrupts, "
+ 				      "disabling interrupts completely\n",
+ 				      *unexpectp);
+@@ -892,7 +898,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
+ 			  "ignoring\n");
+ }
  
- 	if (((device->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_5) && (usage->hid == 0x00090005))
- 		|| ((device->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_7) && (usage->hid == 0x00090007)))
-@@ -960,25 +853,8 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
- 	if (!usage->type)
- 		return;
+-static void ipath_bad_regread(struct ipath_devdata *dd)
++static noinline void ipath_bad_regread(struct ipath_devdata *dd)
+ {
+ 	static int allbits;
  
--	if (((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_5) && (usage->hid == 0x00090005))
--		|| ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_7) && (usage->hid == 0x00090007))) {
--		if (value) hid->quirks |=  HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
--		else       hid->quirks &= ~HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
--		return;
--	}
--
--	if ((hid->quirks & HID_QUIRK_INVERT_HWHEEL) && (usage->code == REL_HWHEEL)) {
--		input_event(input, usage->type, usage->code, -value);
--		return;
--	}
+@@ -920,31 +926,9 @@ static void ipath_bad_regread(struct ipath_devdata *dd)
+ 	}
+ }
+ 
+-static void handle_port_pioavail(struct ipath_devdata *dd)
+-{
+-	u32 i;
+-	/*
+-	 * start from port 1, since for now port 0  is never using
+-	 * wait_event for PIO
+-	 */
+-	for (i = 1; dd->ipath_portpiowait && i < dd->ipath_cfgports; i++) {
+-		struct ipath_portdata *pd = dd->ipath_pd[i];
 -
--	if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_ON) && (usage->code == REL_WHEEL)) {
--		input_event(input, usage->type, REL_HWHEEL, value);
--		return;
+-		if (pd && pd->port_cnt &&
+-		    dd->ipath_portpiowait & (1U << i)) {
+-			clear_bit(i, &dd->ipath_portpiowait);
+-			if (test_bit(IPATH_PORT_WAITING_PIO,
+-				     &pd->port_flag)) {
+-				clear_bit(IPATH_PORT_WAITING_PIO,
+-					  &pd->port_flag);
+-				wake_up_interruptible(&pd->port_wait);
+-			}
+-		}
 -	}
+-}
 -
--	if ((hid->quirks & HID_QUIRK_POWERBOOK_HAS_FN) && hidinput_pb_event(hid, input, usage, value))
--		return;
-+	/* handle input events for quirky devices */
-+	hidinput_event_quirks(hid, field, usage, value);
+ static void handle_layer_pioavail(struct ipath_devdata *dd)
+ {
++	unsigned long flags;
+ 	int ret;
  
- 	if (usage->hat_min < usage->hat_max || usage->hat_dir) {
- 		int hat_dir = usage->hat_dir;
-@@ -1039,25 +915,6 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
- 		return;
- 	}
+ 	ret = ipath_ib_piobufavail(dd->verbs_dev);
+@@ -953,9 +937,12 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
  
--	/* Handling MS keyboards special buttons */
--	if (IS_MS_KB(hid) && usage->hid == (HID_UP_MSVENDOR | 0xff05)) {
--		int key = 0;
--		static int last_key = 0;
--		switch (value) {
--			case 0x01: key = KEY_F14; break;
--			case 0x02: key = KEY_F15; break;
--			case 0x04: key = KEY_F16; break;
--			case 0x08: key = KEY_F17; break;
--			case 0x10: key = KEY_F18; break;
--			default: break;
--		}
--		if (key) {
--			input_event(input, usage->type, key, 1);
--			last_key = key;
--		} else {
--			input_event(input, usage->type, last_key, 0);
--		}
--	}
- 	/* report the usage code as scancode if the key status has changed */
- 	if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
- 		input_event(input, EV_MSC, MSC_SCAN, usage->hid);
-diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
-index c557d70..7160fa6 100644
---- a/drivers/hid/usbhid/Kconfig
-+++ b/drivers/hid/usbhid/Kconfig
-@@ -25,12 +25,13 @@ comment "Input core support is needed for USB HID input layer or HIDBP support"
- 	depends on USB_HID && INPUT=n
+ 	return;
+ set:
+-	set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
++	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
++	dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ 			 dd->ipath_sendctrl);
++	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+ }
  
- config USB_HIDINPUT_POWERBOOK
--	bool "Enable support for iBook/PowerBook/MacBook/MacBookPro special keys"
-+	bool "Enable support for Apple laptop/aluminum USB special keys"
- 	default n
- 	depends on USB_HID
- 	help
- 	  Say Y here if you want support for the special keys (Fn, Numlock) on
--	  Apple iBooks, PowerBooks, MacBooks and MacBook Pros.
-+	  Apple iBooks, PowerBooks, MacBooks, MacBook Pros and aluminum USB
-+	  keyboards.
+ /*
+@@ -969,7 +956,15 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
+ 	int i;
+ 	int rcvdint = 0;
  
- 	  If unsure, say N.
+-	/* test_bit below needs this... */
++	/*
++	 * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
++	 * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
++	 * would both like timely updates of the bits so that
++	 * we don't pass them by unnecessarily.  the rmb()
++	 * here ensures that we see them promptly -- the
++	 * corresponding wmb()'s are in ipath_poll_urgent()
++	 * and ipath_poll_next()...
++	 */
+ 	rmb();
+ 	portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
+ 		 dd->ipath_i_rcvavail_mask)
+@@ -980,7 +975,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
+ 		if (portr & (1 << i) && pd && pd->port_cnt) {
+ 			if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
+ 					       &pd->port_flag)) {
+-				clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
++				clear_bit(i + dd->ipath_r_intravail_shift,
+ 					  &dd->ipath_rcvctrl);
+ 				wake_up_interruptible(&pd->port_wait);
+ 				rcvdint = 1;
+@@ -1039,7 +1034,7 @@ irqreturn_t ipath_intr(int irq, void *data)
+ 		goto bail;
+ 	}
  
-diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
-index a255285..b77b61e 100644
---- a/drivers/hid/usbhid/hid-quirks.c
-+++ b/drivers/hid/usbhid/hid-quirks.c
-@@ -19,6 +19,7 @@
+-	istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
++	istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
  
- #define USB_VENDOR_ID_A4TECH		0x09da
- #define USB_DEVICE_ID_A4TECH_WCP32PU	0x0006
-+#define USB_DEVICE_ID_A4TECH_X5_005D	0x000a
+ 	if (unlikely(!istat)) {
+ 		ipath_stats.sps_nullintr++;
+@@ -1180,7 +1175,7 @@ irqreturn_t ipath_intr(int irq, void *data)
+ 	 * for receive are at the bottom.
+ 	 */
+ 	if (chk0rcv) {
+-		ipath_kreceive(dd);
++		ipath_kreceive(dd->ipath_pd[0]);
+ 		istat &= ~port0rbits;
+ 	}
  
- #define USB_VENDOR_ID_AASHIMA		0x06d6
- #define USB_DEVICE_ID_AASHIMA_GAMEPAD	0x0025
-@@ -28,6 +29,9 @@
- #define USB_DEVICE_ID_ACECAD_FLAIR	0x0004
- #define USB_DEVICE_ID_ACECAD_302	0x0008
+@@ -1191,12 +1186,14 @@ irqreturn_t ipath_intr(int irq, void *data)
+ 		handle_urcv(dd, istat);
  
-+#define USB_VENDOR_ID_ADS_TECH 		0x06e1
-+#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X	0xa155
+ 	if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
+-		clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
++		unsigned long flags;
 +
- #define USB_VENDOR_ID_AIPTEK		0x08ca
- #define USB_DEVICE_ID_AIPTEK_01		0x0001
- #define USB_DEVICE_ID_AIPTEK_10		0x0010
-@@ -59,6 +63,9 @@
- #define USB_DEVICE_ID_APPLE_GEYSER4_ANSI	0x021a
- #define USB_DEVICE_ID_APPLE_GEYSER4_ISO	0x021b
- #define USB_DEVICE_ID_APPLE_GEYSER4_JIS	0x021c
-+#define USB_DEVICE_ID_APPLE_ALU_ANSI	0x0220
-+#define USB_DEVICE_ID_APPLE_ALU_ISO	0x0221
-+#define USB_DEVICE_ID_APPLE_ALU_JIS	0x0222
- #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY	0x030a
- #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY	0x030b
- #define USB_DEVICE_ID_APPLE_IRCONTROL4	0x8242
-@@ -94,6 +101,9 @@
- #define USB_DEVICE_ID_CODEMERCS_IOW_FIRST	0x1500
- #define USB_DEVICE_ID_CODEMERCS_IOW_LAST	0x15ff
++		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
++		dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ 				 dd->ipath_sendctrl);
+-
+-		if (dd->ipath_portpiowait)
+-			handle_port_pioavail(dd);
++		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
  
-+#define USB_VENDOR_ID_CYGNAL		0x10c4
-+#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X	0x818a
-+
- #define USB_VENDOR_ID_CYPRESS		0x04b4
- #define USB_DEVICE_ID_CYPRESS_MOUSE	0x0001
- #define USB_DEVICE_ID_CYPRESS_HIDCOM	0x5500
-@@ -114,6 +124,9 @@
- #define USB_VENDOR_ID_ESSENTIAL_REALITY	0x0d7f
- #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
+ 		handle_layer_pioavail(dd);
+ 	}
+diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
+index 8786dd7..4cc0f95 100644
+--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
++++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
+@@ -41,6 +41,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
+ #include <linux/dma-mapping.h>
++#include <linux/mutex.h>
+ #include <asm/io.h>
+ #include <rdma/ib_verbs.h>
  
-+#define USB_VENDOR_ID_EZKEY 		0x0518
-+#define USB_DEVICE_ID_BTC_8193		0x0002
-+
- #define USB_VENDOR_ID_GAMERON		0x0810
- #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR	0x0001
+@@ -140,6 +141,11 @@ struct ipath_portdata {
+ 	u32 port_pionowait;
+ 	/* total number of rcvhdrqfull errors */
+ 	u32 port_hdrqfull;
++	/*
++	 * Used to suppress multiple instances of same
++	 * port staying stuck at same point.
++	 */
++	u32 port_lastrcvhdrqtail;
+ 	/* saved total number of rcvhdrqfull errors for poll edge trigger */
+ 	u32 port_hdrqfull_poll;
+ 	/* total number of polled urgent packets */
+@@ -148,6 +154,7 @@ struct ipath_portdata {
+ 	u32 port_urgent_poll;
+ 	/* pid of process using this port */
+ 	pid_t port_pid;
++	pid_t port_subpid[INFINIPATH_MAX_SUBPORT];
+ 	/* same size as task_struct .comm[] */
+ 	char port_comm[16];
+ 	/* pkeys set by this use of this port */
+@@ -166,6 +173,8 @@ struct ipath_portdata {
+ 	u32 active_slaves;
+ 	/* Type of packets or conditions we want to poll for */
+ 	u16 poll_type;
++	/* port rcvhdrq head offset */
++	u32 port_head;
+ };
  
-@@ -134,6 +147,9 @@
- #define USB_DEVICE_ID_GOGOPEN		0x00ce
- #define USB_DEVICE_ID_PENPOWER		0x00f4
+ struct sk_buff;
+@@ -182,6 +191,22 @@ struct ipath_skbinfo {
+ 	dma_addr_t phys;
+ };
  
-+#define USB_VENDOR_ID_GRETAGMACBETH	0x0971
-+#define USB_DEVICE_ID_GRETAGMACBETH_HUEY	0x2005
++/*
++ * Possible IB config parameters for ipath_f_get/set_ib_cfg()
++ */
++#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
++#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
++#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
++#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
++#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
++#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
++#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
++#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
++#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
++#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
++#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
 +
- #define USB_VENDOR_ID_GRIFFIN		0x077d
- #define USB_DEVICE_ID_POWERMATE		0x0410
- #define USB_DEVICE_ID_SOUNDKNOB		0x04AA
-@@ -278,7 +294,9 @@
- #define USB_DEVICE_ID_LOGITECH_HARMONY_62 0xc14d
- #define USB_DEVICE_ID_LOGITECH_HARMONY_63 0xc14e
- #define USB_DEVICE_ID_LOGITECH_HARMONY_64 0xc14f
-+#define USB_DEVICE_ID_LOGITECH_EXTREME_3D	0xc215
- #define USB_DEVICE_ID_LOGITECH_WHEEL	0xc294
-+#define USB_DEVICE_ID_LOGITECH_ELITE_KBD	0xc30a
- #define USB_DEVICE_ID_LOGITECH_KBD	0xc311
- #define USB_DEVICE_ID_S510_RECEIVER	0xc50c
- #define USB_DEVICE_ID_S510_RECEIVER_2	0xc517
-@@ -296,6 +314,12 @@
- 
- #define USB_VENDOR_ID_MICROSOFT		0x045e
- #define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
-+#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
-+#define USB_DEVICE_ID_MS_NE4K		0x00db
-+#define USB_DEVICE_ID_MS_LK6K		0x00f9
 +
-+#define USB_VENDOR_ID_MONTEREY		0x0566
-+#define USB_DEVICE_ID_GENIUS_KB29E	0x3004
- 
- #define USB_VENDOR_ID_NCR		0x0404
- #define USB_DEVICE_ID_NCR_FIRST		0x0300
-@@ -324,6 +348,9 @@
- #define USB_VENDOR_ID_SAITEK		0x06a3
- #define USB_DEVICE_ID_SAITEK_RUMBLEPAD	0xff17
+ struct ipath_devdata {
+ 	struct list_head ipath_list;
  
-+#define USB_VENDOR_ID_SAMSUNG		0x0419
-+#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001
+@@ -222,6 +247,8 @@ struct ipath_devdata {
+ 	struct _ipath_layer ipath_layer;
+ 	/* setup intr */
+ 	int (*ipath_f_intrsetup)(struct ipath_devdata *);
++	/* fallback to alternate interrupt type if possible */
++	int (*ipath_f_intr_fallback)(struct ipath_devdata *);
+ 	/* setup on-chip bus config */
+ 	int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
+ 	/* hard reset chip */
+@@ -244,6 +271,18 @@ struct ipath_devdata {
+ 	int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
+ 	/* free irq */
+ 	void (*ipath_f_free_irq)(struct ipath_devdata *);
++	struct ipath_message_header *(*ipath_f_get_msgheader)
++					(struct ipath_devdata *, __le32 *);
++	void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
++	int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
++	int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
++	void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
++	void (*ipath_f_read_counters)(struct ipath_devdata *,
++					struct infinipath_counters *);
++	void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
++	/* per chip actions needed for IB Link up/down changes */
++	int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
 +
- #define USB_VENDOR_ID_SONY			0x054c
- #define USB_DEVICE_ID_SONY_PS3_CONTROLLER	0x0268
- 
-@@ -368,6 +395,7 @@ static const struct hid_blacklist {
- } hid_blacklist[] = {
+ 	struct ipath_ibdev *verbs_dev;
+ 	struct timer_list verbs_timer;
+ 	/* total dwords sent (summed from counter) */
+@@ -313,22 +352,12 @@ struct ipath_devdata {
+ 	 * supports, less gives more pio bufs/port, etc.
+ 	 */
+ 	u32 ipath_cfgports;
+-	/* port0 rcvhdrq head offset */
+-	u32 ipath_port0head;
+ 	/* count of port 0 hdrqfull errors */
+ 	u32 ipath_p0_hdrqfull;
++	/* port 0 number of receive eager buffers */
++	u32 ipath_p0_rcvegrcnt;
  
- 	{ USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU, HID_QUIRK_2WHEEL_MOUSE_HACK_7 },
-+	{ USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D, HID_QUIRK_2WHEEL_MOUSE_HACK_B8 },
- 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE, HID_QUIRK_2WHEEL_MOUSE_HACK_5 },
+ 	/*
+-	 * (*cfgports) used to suppress multiple instances of same
+-	 * port staying stuck at same point
+-	 */
+-	u32 *ipath_lastrcvhdrqtails;
+-	/*
+-	 * (*cfgports) used to suppress multiple instances of same
+-	 * port staying stuck at same point
+-	 */
+-	u32 *ipath_lastegrheads;
+-	/*
+ 	 * index of last piobuffer we used.  Speeds up searching, by
+ 	 * starting at this point.  Doesn't matter if multiple cpu's use and
+ 	 * update, last updater is only write that matters.  Whenever it
+@@ -367,14 +396,15 @@ struct ipath_devdata {
+ 	unsigned long ipath_wc_len;
+ 	/* ref count for each pkey */
+ 	atomic_t ipath_pkeyrefs[4];
+-	/* shadow copy of all exptids physaddr; used only by funcsim */
+-	u64 *ipath_tidsimshadow;
+ 	/* shadow copy of struct page *'s for exp tid pages */
+ 	struct page **ipath_pageshadow;
+ 	/* shadow copy of dma handles for exp tid pages */
+ 	dma_addr_t *ipath_physshadow;
+-	/* lock to workaround chip bug 9437 */
++	u64 __iomem *ipath_egrtidbase;
++	/* lock to workaround chip bug 9437 and others */
++	spinlock_t ipath_kernel_tid_lock;
+ 	spinlock_t ipath_tid_lock;
++	spinlock_t ipath_sendctrl_lock;
  
- 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS },
-@@ -390,6 +418,9 @@ static const struct hid_blacklist {
- 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT },
- 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV, HID_QUIRK_HIDINPUT },
+ 	/*
+ 	 * IPATH_STATUS_*,
+@@ -395,6 +425,8 @@ struct ipath_devdata {
+ 	void *ipath_dummy_hdrq;	/* used after port close */
+ 	dma_addr_t ipath_dummy_hdrq_phys;
  
-+	{ USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193, HID_QUIRK_HWHEEL_WHEEL_INVERT },
++	unsigned long ipath_ureg_align; /* user register alignment */
 +
-+	{ USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20, HID_QUIRK_IGNORE },
-@@ -402,6 +433,7 @@ static const struct hid_blacklist {
- 	{ USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM, HID_QUIRK_IGNORE},
- 	{ USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_CIDC, 0x0103, HID_QUIRK_IGNORE },
-+	{ USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE },
-@@ -423,6 +455,7 @@ static const struct hid_blacklist {
- 	{ USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER, HID_QUIRK_IGNORE },
-+	{ USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90, HID_QUIRK_IGNORE },
-@@ -516,14 +549,18 @@ static const struct hid_blacklist {
- 	{ USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR, HID_QUIRK_IGNORE },
- 	{ USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302, HID_QUIRK_IGNORE },
- 
-+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
- 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
+ 	/*
+ 	 * Shadow copies of registers; size indicates read access size.
+ 	 * Most of them are readonly, but some are write-only register,
+@@ -456,8 +488,6 @@ struct ipath_devdata {
+ 	unsigned long ipath_rcvctrl;
+ 	/* shadow kr_sendctrl */
+ 	unsigned long ipath_sendctrl;
+-	/* ports waiting for PIOavail intr */
+-	unsigned long ipath_portpiowait;
+ 	unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */
  
-+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS },
-+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS },
+ 	/* value we put in kr_rcvhdrcnt */
+@@ -550,12 +580,26 @@ struct ipath_devdata {
+ 	u8 ipath_minrev;
+ 	/* board rev, from ipath_revision */
+ 	u8 ipath_boardrev;
 +
- 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE, HID_QUIRK_MIGHTYMOUSE | HID_QUIRK_INVERT_HWHEEL },
++	u8 ipath_r_portenable_shift;
++	u8 ipath_r_intravail_shift;
++	u8 ipath_r_tailupd_shift;
++	u8 ipath_r_portcfg_shift;
++
+ 	/* unit # of this chip, if present */
+ 	int ipath_unit;
+ 	/* saved for restore after reset */
+ 	u8 ipath_pci_cacheline;
+ 	/* LID mask control */
+ 	u8 ipath_lmc;
++	/* link width supported */
++	u8 ipath_link_width_supported;
++	/* link speed supported */
++	u8 ipath_link_speed_supported;
++	u8 ipath_link_width_enabled;
++	u8 ipath_link_speed_enabled;
++	u8 ipath_link_width_active;
++	u8 ipath_link_speed_active;
+ 	/* Rx Polarity inversion (compensate for ~tx on partner) */
+ 	u8 ipath_rx_pol_inv;
  
- 	{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
- 	{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
+@@ -590,6 +634,8 @@ struct ipath_devdata {
+ 	 */
+ 	u32 ipath_i_rcvavail_mask;
+ 	u32 ipath_i_rcvurg_mask;
++	u16 ipath_i_rcvurg_shift;
++	u16 ipath_i_rcvavail_shift;
  
--	{ USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER, HID_QUIRK_SONY_PS3_CONTROLLER },
-+	{ USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER, HID_QUIRK_SONY_PS3_CONTROLLER | HID_QUIRK_HIDDEV },
+ 	/*
+ 	 * Register bits for selecting i2c direction and values, used for
+@@ -603,6 +649,29 @@ struct ipath_devdata {
+ 	/* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
+ 	spinlock_t ipath_gpio_lock;
  
- 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
-@@ -531,7 +568,9 @@ static const struct hid_blacklist {
- 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
-+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET },
-+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
-@@ -540,19 +579,22 @@ static const struct hid_blacklist {
++	/*
++	 * IB link and linktraining states and masks that vary per chip in
++	 * some way.  Set at init, to avoid each IB status change interrupt
++	 */
++	u8 ibcs_ls_shift;
++	u8 ibcs_lts_mask;
++	u32 ibcs_mask;
++	u32 ib_init;
++	u32 ib_arm;
++	u32 ib_active;
++
++	u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
++
++	/*
++	 * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
++	 * reg. Changes for IBA7220
++	 */
++	u8 ibcc_lic_mask; /* LinkInitCmd */
++	u8 ibcc_lc_shift; /* LinkCmd */
++	u8 ibcc_mpl_shift; /* Maxpktlen */
++
++	u8 delay_mult;
++
+ 	/* used to override LED behavior */
+ 	u8 ipath_led_override;  /* Substituted for normal value, if non-zero */
+ 	u16 ipath_led_override_timeoff; /* delta to next timer event */
+@@ -616,7 +685,7 @@ struct ipath_devdata {
+ 	/* control access to actual counters, timer */
+ 	spinlock_t ipath_eep_st_lock;
+ 	/* control high-level access to EEPROM */
+-	struct semaphore ipath_eep_sem;
++	struct mutex ipath_eep_lock;
+ 	/* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
+ 	uint64_t ipath_traffic_wds;
+ 	/* active time is kept in seconds, but logged in hours */
+@@ -630,6 +699,10 @@ struct ipath_devdata {
+ 	 * each of the counters to increment.
+ 	 */
+ 	struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
++
++	/* interrupt mitigation reload register info */
++	u16 ipath_jint_idle_ticks;	/* idle clock ticks */
++	u16 ipath_jint_max_packets;	/* max packets across all ports */
+ };
  
- 	{ USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ /* Private data for file operations */
+@@ -690,7 +763,7 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
  
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
--	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_APPLE_ISO_KEYBOARD},
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_APPLE_ISO_KEYBOARD},
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_APPLE_ISO_KEYBOARD},
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI, HID_QUIRK_APPLE_HAS_FN },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS, HID_QUIRK_APPLE_HAS_FN },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
-+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+ int ipath_parse_ushort(const char *str, unsigned short *valp);
  
- 	{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS },
- 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD, HID_QUIRK_RESET_LEDS },
-@@ -638,10 +680,14 @@ static const struct hid_rdesc_blacklist {
- 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
- 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_RDESC_LOGITECH },
+-void ipath_kreceive(struct ipath_devdata *);
++void ipath_kreceive(struct ipath_portdata *);
+ int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
+ int ipath_reset_device(int);
+ void ipath_get_faststats(unsigned long);
+@@ -698,6 +771,8 @@ int ipath_set_linkstate(struct ipath_devdata *, u8);
+ int ipath_set_mtu(struct ipath_devdata *, u16);
+ int ipath_set_lid(struct ipath_devdata *, u32, u8);
+ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
++void ipath_enable_armlaunch(struct ipath_devdata *);
++void ipath_disable_armlaunch(struct ipath_devdata *);
  
-+	{ USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E, HID_QUIRK_RDESC_BUTTON_CONSUMER },
-+
- 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_RDESC_MACBOOK_JIS },
+ /* for use in system calls, where we want to know device type, etc. */
+ #define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
+@@ -744,9 +819,15 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
+ 		 * are 64bit */
+ #define IPATH_32BITCOUNTERS 0x20000
+ 		/* can miss port0 rx interrupts */
++		/* Interrupt register is 64 bits */
++#define IPATH_INTREG_64     0x40000
+ #define IPATH_DISABLED      0x80000 /* administratively disabled */
+ 		/* Use GPIO interrupts for new counters */
+ #define IPATH_GPIO_ERRINTRS 0x100000
++#define IPATH_SWAP_PIOBUFS  0x200000
++		/* Suppress heartbeat, even if turning off loopback */
++#define IPATH_NO_HRTBT      0x1000000
++#define IPATH_HAS_MULT_IB_SPEED 0x8000000
  
- 	{ USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_RDESC_PETALYNX },
+ /* Bits in GPIO for the added interrupts */
+ #define IPATH_GPIO_PORT0_BIT 2
+@@ -758,8 +839,6 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
+ /* portdata flag bit offsets */
+ 		/* waiting for a packet to arrive */
+ #define IPATH_PORT_WAITING_RCV   2
+-		/* waiting for a PIO buffer to be available */
+-#define IPATH_PORT_WAITING_PIO   3
+ 		/* master has not finished initializing */
+ #define IPATH_PORT_MASTER_UNINIT 4
+ 		/* waiting for an urgent packet to arrive */
+@@ -767,8 +846,6 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
  
-+	{ USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_RDESC_SAMSUNG_REMOTE },
-+
- 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
- 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
+ /* free up any allocated data at closes */
+ void ipath_free_data(struct ipath_portdata *dd);
+-int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
+-int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
+ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
+ void ipath_init_iba6120_funcs(struct ipath_devdata *);
+ void ipath_init_iba6110_funcs(struct ipath_devdata *);
+@@ -792,33 +869,6 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
+  */
+ #define IPATH_DFLT_RCVHDRSIZE 9
  
-@@ -884,6 +930,8 @@ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
- 	return quirks;
+-#define IPATH_MDIO_CMD_WRITE   1
+-#define IPATH_MDIO_CMD_READ    2
+-#define IPATH_MDIO_CLD_DIV     25	/* to get 2.5 Mhz mdio clock */
+-#define IPATH_MDIO_CMDVALID    0x40000000	/* bit 30 */
+-#define IPATH_MDIO_DATAVALID   0x80000000	/* bit 31 */
+-#define IPATH_MDIO_CTRL_STD    0x0
+-
+-static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
+-{
+-	return (((u64) IPATH_MDIO_CLD_DIV) << 32) |
+-		(cmd << 26) |
+-		(dev << 21) |
+-		(reg << 16) |
+-		(data & 0xFFFF);
+-}
+-
+-		/* signal and fifo status, in bank 31 */
+-#define IPATH_MDIO_CTRL_XGXS_REG_8  0x8
+-		/* controls loopback, redundancy */
+-#define IPATH_MDIO_CTRL_8355_REG_1  0x10
+-		/* premph, encdec, etc. */
+-#define IPATH_MDIO_CTRL_8355_REG_2  0x11
+-		/* Kchars, etc. */
+-#define IPATH_MDIO_CTRL_8355_REG_6  0x15
+-#define IPATH_MDIO_CTRL_8355_REG_9  0x18
+-#define IPATH_MDIO_CTRL_8355_REG_10 0x1D
+-
+ int ipath_get_user_pages(unsigned long, size_t, struct page **);
+ void ipath_release_user_pages(struct page **, size_t);
+ void ipath_release_user_pages_on_close(struct page **, size_t);
+@@ -863,7 +913,7 @@ static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
+ 	return readl(regno + (u64 __iomem *)
+ 		     (dd->ipath_uregbase +
+ 		      (char __iomem *)dd->ipath_kregbase +
+-		      dd->ipath_palign * port));
++		      dd->ipath_ureg_align * port));
  }
  
-+EXPORT_SYMBOL_GPL(usbhid_lookup_quirk);
-+
- /*
-  * Cherry Cymotion keyboard have an invalid HID report descriptor,
-  * that needs fixing before we can parse it.
-@@ -914,6 +962,33 @@ static void usbhid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize)
- 	}
+ /**
+@@ -880,7 +930,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd,
+ {
+ 	u64 __iomem *ubase = (u64 __iomem *)
+ 		(dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
+-		 dd->ipath_palign * port);
++		 dd->ipath_ureg_align * port);
+ 	if (dd->ipath_kregbase)
+ 		writeq(value, &ubase[regno]);
+ }
+@@ -930,6 +980,53 @@ static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
+ 		      (char __iomem *)dd->ipath_kregbase));
  }
  
-+/*
-+ * Samsung IrDA remote controller (reports as Cypress USB Mouse).
-+ *
-+ * Vendor specific report #4 has a size of 48 bit,
-+ * and therefore is not accepted when inspecting the descriptors.
-+ * As a workaround we reinterpret the report as:
-+ *   Variable type, count 6, size 8 bit, log. maximum 255
-+ * The burden to reconstruct the data is moved into user space.
-+ */
-+static void usbhid_fixup_samsung_irda_descriptor(unsigned char *rdesc,
-+						  int rsize)
++static inline void ipath_write_creg(const struct ipath_devdata *dd,
++				    ipath_creg regno, u64 value)
 +{
-+	if (rsize >= 182 && rdesc[175] == 0x25
-+			 && rdesc[176] == 0x40
-+			 && rdesc[177] == 0x75
-+			 && rdesc[178] == 0x30
-+			 && rdesc[179] == 0x95
-+			 && rdesc[180] == 0x01
-+			 && rdesc[182] == 0x40) {
-+		printk(KERN_INFO "Fixing up Samsung IrDA report descriptor\n");
-+		rdesc[176] = 0xff;
-+		rdesc[178] = 0x08;
-+		rdesc[180] = 0x06;
-+		rdesc[182] = 0x42;
-+	}
++	if (dd->ipath_kregbase)
++		writeq(value, regno + (u64 __iomem *)
++		       (dd->ipath_cregbase +
++			(char __iomem *)dd->ipath_kregbase));
 +}
 +
- /* Petalynx Maxter Remote has maximum for consumer page set too low */
- static void usbhid_fixup_petalynx_descriptor(unsigned char *rdesc, int rsize)
- {
-@@ -965,6 +1040,14 @@ static void usbhid_fixup_macbook_descriptor(unsigned char *rdesc, int rsize)
- 	}
- }
- 
-+static void usbhid_fixup_button_consumer_descriptor(unsigned char *rdesc, int rsize)
++static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
 +{
-+	if (rsize >= 30 && rdesc[29] == 0x05
-+			&& rdesc[30] == 0x09) {
-+		printk(KERN_INFO "Fixing up button/consumer in HID report descriptor\n");
-+		rdesc[30] = 0x0c;
-+	}
++	*((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
 +}
- 
- static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned rsize)
- {
-@@ -982,6 +1065,13 @@ static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned
- 
- 	if (quirks & HID_QUIRK_RDESC_MACBOOK_JIS)
- 		usbhid_fixup_macbook_descriptor(rdesc, rsize);
 +
-+	if (quirks & HID_QUIRK_RDESC_BUTTON_CONSUMER)
-+		usbhid_fixup_button_consumer_descriptor(rdesc, rsize);
++static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
++{
++	return (u32) le64_to_cpu(*((volatile __le64 *)
++				pd->port_rcvhdrtail_kvaddr));
++}
 +
-+	if (quirks & HID_QUIRK_RDESC_SAMSUNG_REMOTE)
-+		usbhid_fixup_samsung_irda_descriptor(rdesc, rsize);
++static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
++{
++	return (dd->ipath_flags & IPATH_INTREG_64) ?
++		ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
++}
 +
- }
- 
- /**
-diff --git a/drivers/hid/usbhid/hid-tmff.c b/drivers/hid/usbhid/hid-tmff.c
-index 69882a7..144578b 100644
---- a/drivers/hid/usbhid/hid-tmff.c
-+++ b/drivers/hid/usbhid/hid-tmff.c
-@@ -137,7 +137,8 @@ static int hid_tmff_play(struct input_dev *dev, void *data, struct ff_effect *ef
- int hid_tmff_init(struct hid_device *hid)
- {
- 	struct tmff_device *tmff;
--	struct list_head *pos;
-+	struct hid_report *report;
-+	struct list_head *report_list;
- 	struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- 	struct input_dev *input_dev = hidinput->input;
- 	const signed short *ff_bits = ff_joystick;
-@@ -149,8 +150,8 @@ int hid_tmff_init(struct hid_device *hid)
- 		return -ENOMEM;
- 
- 	/* Find the report to use */
--	list_for_each(pos, &hid->report_enum[HID_OUTPUT_REPORT].report_list) {
--		struct hid_report *report = (struct hid_report *)pos;
-+	report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-+	list_for_each_entry(report, report_list, list) {
- 		int fieldnum;
- 
- 		for (fieldnum = 0; fieldnum < report->maxfield; ++fieldnum) {
-diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
-index 775a1ef..5d9dbb4 100644
---- a/drivers/hid/usbhid/usbkbd.c
-+++ b/drivers/hid/usbhid/usbkbd.c
-@@ -235,6 +235,14 @@ static int usb_kbd_probe(struct usb_interface *iface,
- 	if (!usb_endpoint_is_int_in(endpoint))
- 		return -ENODEV;
- 
-+#ifdef CONFIG_USB_HID
-+	if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
-+				le16_to_cpu(dev->descriptor.idProduct))
-+			& HID_QUIRK_IGNORE) {
-+		return -ENODEV;
-+	}
-+#endif
++/*
++ * from contents of IBCStatus (or a saved copy), return linkstate
++ * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
++ * everywhere, anyway (and should be, for almost all purposes).
++ */
++static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
++{
++	u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
++		INFINIPATH_IBCS_LINKSTATE_MASK;
++	if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
++		state = INFINIPATH_IBCS_L_STATE_ACTIVE;
++	return state;
++}
 +
- 	pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- 	maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
- 
-diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
-index f8ad691..df0d96d 100644
---- a/drivers/hid/usbhid/usbmouse.c
-+++ b/drivers/hid/usbhid/usbmouse.c
-@@ -131,6 +131,14 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
- 	if (!usb_endpoint_is_int_in(endpoint))
- 		return -ENODEV;
- 
-+#ifdef CONFIG_USB_HID
-+	if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
-+				le16_to_cpu(dev->descriptor.idProduct))
-+			& (HID_QUIRK_IGNORE|HID_QUIRK_IGNORE_MOUSE)) {
-+		return -ENODEV;
-+	}
-+#endif
++/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
++static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
++{
++	return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
++		dd->ibcs_lts_mask;
++}
 +
- 	pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
- 	maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
- 
-diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
-index a37cb6b..3581282 100644
---- a/drivers/i2c/algos/i2c-algo-bit.c
-+++ b/drivers/i2c/algos/i2c-algo-bit.c
-@@ -1,7 +1,7 @@
--/* ------------------------------------------------------------------------- */
--/* i2c-algo-bit.c i2c driver algorithms for bit-shift adapters		     */
--/* ------------------------------------------------------------------------- */
--/*   Copyright (C) 1995-2000 Simon G. Vogl
-+/* -------------------------------------------------------------------------
-+ * i2c-algo-bit.c i2c driver algorithms for bit-shift adapters
-+ * -------------------------------------------------------------------------
-+ *   Copyright (C) 1995-2000 Simon G. Vogl
+ /*
+  * sysfs interface.
+  */
+@@ -938,8 +1035,7 @@ struct device_driver;
  
-     This program is free software; you can redistribute it and/or modify
-     it under the terms of the GNU General Public License as published by
-@@ -15,8 +15,8 @@
+ extern const char ib_ipath_version[];
  
-     You should have received a copy of the GNU General Public License
-     along with this program; if not, write to the Free Software
--    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.		     */
--/* ------------------------------------------------------------------------- */
-+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-+ * ------------------------------------------------------------------------- */
+-int ipath_driver_create_group(struct device_driver *);
+-void ipath_driver_remove_group(struct device_driver *);
++extern struct attribute_group *ipath_driver_attr_groups[];
  
- /* With some changes from Frodo Looijaard <frodol at dds.nl>, Kyösti Mälkki
-    <kmalkki at cc.hut.fi> and Jean Delvare <khali at linux-fr.org> */
-@@ -60,26 +60,26 @@ MODULE_PARM_DESC(i2c_debug,
+ int ipath_device_create_group(struct device *, struct ipath_devdata *);
+ void ipath_device_remove_group(struct device *, struct ipath_devdata *);
+diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
+index 85a4aef..8f32b17 100644
+--- a/drivers/infiniband/hw/ipath/ipath_keys.c
++++ b/drivers/infiniband/hw/ipath/ipath_keys.c
+@@ -128,9 +128,8 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
+ 	int ret;
  
- /* --- setting states on the bus with the right timing: ---------------	*/
+ 	/*
+-	 * We use LKEY == zero to mean a physical kmalloc() address.
+-	 * This is a bit of a hack since we rely on dma_map_single()
+-	 * being reversible by calling bus_to_virt().
++	 * We use LKEY == zero for kernel virtual addresses
++	 * (see ipath_get_dma_mr and ipath_dma.c).
+ 	 */
+ 	if (sge->lkey == 0) {
+ 		struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
+diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
+index 3d1432d..d98d5f1 100644
+--- a/drivers/infiniband/hw/ipath/ipath_mad.c
++++ b/drivers/infiniband/hw/ipath/ipath_mad.c
+@@ -934,6 +934,7 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
+ 	struct ib_pma_portsamplescontrol *p =
+ 		(struct ib_pma_portsamplescontrol *)pmp->data;
+ 	struct ipath_ibdev *dev = to_idev(ibdev);
++	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
+ 	unsigned long flags;
+ 	u8 port_select = p->port_select;
  
--#define setsda(adap,val) adap->setsda(adap->data, val)
--#define setscl(adap,val) adap->setscl(adap->data, val)
--#define getsda(adap) adap->getsda(adap->data)
--#define getscl(adap) adap->getscl(adap->data)
-+#define setsda(adap, val)	adap->setsda(adap->data, val)
-+#define setscl(adap, val)	adap->setscl(adap->data, val)
-+#define getsda(adap)		adap->getsda(adap->data)
-+#define getscl(adap)		adap->getscl(adap->data)
+@@ -955,7 +956,10 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
+ 	p->counter_width = 4;	/* 32 bit counters */
+ 	p->counter_mask0_9 = COUNTER_MASK0_9;
+ 	spin_lock_irqsave(&dev->pending_lock, flags);
+-	p->sample_status = dev->pma_sample_status;
++	if (crp->cr_psstat)
++		p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
++	else
++		p->sample_status = dev->pma_sample_status;
+ 	p->sample_start = cpu_to_be32(dev->pma_sample_start);
+ 	p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
+ 	p->tag = cpu_to_be16(dev->pma_tag);
+@@ -975,8 +979,9 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
+ 	struct ib_pma_portsamplescontrol *p =
+ 		(struct ib_pma_portsamplescontrol *)pmp->data;
+ 	struct ipath_ibdev *dev = to_idev(ibdev);
++	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
+ 	unsigned long flags;
+-	u32 start;
++	u8 status;
+ 	int ret;
  
- static inline void sdalo(struct i2c_algo_bit_data *adap)
- {
--	setsda(adap,0);
-+	setsda(adap, 0);
- 	udelay((adap->udelay + 1) / 2);
- }
+ 	if (pmp->attr_mod != 0 ||
+@@ -986,59 +991,67 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
+ 		goto bail;
+ 	}
  
- static inline void sdahi(struct i2c_algo_bit_data *adap)
- {
--	setsda(adap,1);
-+	setsda(adap, 1);
- 	udelay((adap->udelay + 1) / 2);
- }
+-	start = be32_to_cpu(p->sample_start);
+-	if (start != 0) {
+-		spin_lock_irqsave(&dev->pending_lock, flags);
+-		if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_DONE) {
+-			dev->pma_sample_status =
+-				IB_PMA_SAMPLE_STATUS_STARTED;
+-			dev->pma_sample_start = start;
+-			dev->pma_sample_interval =
+-				be32_to_cpu(p->sample_interval);
+-			dev->pma_tag = be16_to_cpu(p->tag);
+-			if (p->counter_select[0])
+-				dev->pma_counter_select[0] =
+-					p->counter_select[0];
+-			if (p->counter_select[1])
+-				dev->pma_counter_select[1] =
+-					p->counter_select[1];
+-			if (p->counter_select[2])
+-				dev->pma_counter_select[2] =
+-					p->counter_select[2];
+-			if (p->counter_select[3])
+-				dev->pma_counter_select[3] =
+-					p->counter_select[3];
+-			if (p->counter_select[4])
+-				dev->pma_counter_select[4] =
+-					p->counter_select[4];
+-		}
+-		spin_unlock_irqrestore(&dev->pending_lock, flags);
++	spin_lock_irqsave(&dev->pending_lock, flags);
++	if (crp->cr_psstat)
++		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
++	else
++		status = dev->pma_sample_status;
++	if (status == IB_PMA_SAMPLE_STATUS_DONE) {
++		dev->pma_sample_start = be32_to_cpu(p->sample_start);
++		dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
++		dev->pma_tag = be16_to_cpu(p->tag);
++		dev->pma_counter_select[0] = p->counter_select[0];
++		dev->pma_counter_select[1] = p->counter_select[1];
++		dev->pma_counter_select[2] = p->counter_select[2];
++		dev->pma_counter_select[3] = p->counter_select[3];
++		dev->pma_counter_select[4] = p->counter_select[4];
++		if (crp->cr_psstat) {
++			ipath_write_creg(dev->dd, crp->cr_psinterval,
++					 dev->pma_sample_interval);
++			ipath_write_creg(dev->dd, crp->cr_psstart,
++					 dev->pma_sample_start);
++		} else
++			dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
+ 	}
++	spin_unlock_irqrestore(&dev->pending_lock, flags);
++
+ 	ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
  
- static inline void scllo(struct i2c_algo_bit_data *adap)
- {
--	setscl(adap,0);
-+	setscl(adap, 0);
- 	udelay(adap->udelay / 2);
+ bail:
+ 	return ret;
  }
  
-@@ -91,22 +91,21 @@ static int sclhi(struct i2c_algo_bit_data *adap)
+-static u64 get_counter(struct ipath_ibdev *dev, __be16 sel)
++static u64 get_counter(struct ipath_ibdev *dev,
++		       struct ipath_cregs const *crp,
++		       __be16 sel)
  {
- 	unsigned long start;
+ 	u64 ret;
  
--	setscl(adap,1);
-+	setscl(adap, 1);
+ 	switch (sel) {
+ 	case IB_PMA_PORT_XMIT_DATA:
+-		ret = dev->ipath_sword;
++		ret = (crp->cr_psxmitdatacount) ?
++			ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
++			dev->ipath_sword;
+ 		break;
+ 	case IB_PMA_PORT_RCV_DATA:
+-		ret = dev->ipath_rword;
++		ret = (crp->cr_psrcvdatacount) ?
++			ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
++			dev->ipath_rword;
+ 		break;
+ 	case IB_PMA_PORT_XMIT_PKTS:
+-		ret = dev->ipath_spkts;
++		ret = (crp->cr_psxmitpktscount) ?
++			ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
++			dev->ipath_spkts;
+ 		break;
+ 	case IB_PMA_PORT_RCV_PKTS:
+-		ret = dev->ipath_rpkts;
++		ret = (crp->cr_psrcvpktscount) ?
++			ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
++			dev->ipath_rpkts;
+ 		break;
+ 	case IB_PMA_PORT_XMIT_WAIT:
+-		ret = dev->ipath_xmit_wait;
++		ret = (crp->cr_psxmitwaitcount) ?
++			ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
++			dev->ipath_xmit_wait;
+ 		break;
+ 	default:
+ 		ret = 0;
+@@ -1053,14 +1066,21 @@ static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
+ 	struct ib_pma_portsamplesresult *p =
+ 		(struct ib_pma_portsamplesresult *)pmp->data;
+ 	struct ipath_ibdev *dev = to_idev(ibdev);
++	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
++	u8 status;
+ 	int i;
  
- 	/* Not all adapters have scl sense line... */
- 	if (!adap->getscl)
- 		goto done;
+ 	memset(pmp->data, 0, sizeof(pmp->data));
+ 	p->tag = cpu_to_be16(dev->pma_tag);
+-	p->sample_status = cpu_to_be16(dev->pma_sample_status);
++	if (crp->cr_psstat)
++		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
++	else
++		status = dev->pma_sample_status;
++	p->sample_status = cpu_to_be16(status);
+ 	for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
+-		p->counter[i] = cpu_to_be32(
+-			get_counter(dev, dev->pma_counter_select[i]));
++		p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
++		    cpu_to_be32(
++			get_counter(dev, crp, dev->pma_counter_select[i]));
  
--	start=jiffies;
--	while (! getscl(adap) ) {	
-- 		/* the hw knows how to read the clock line,
-- 		 * so we wait until it actually gets high.
-- 		 * This is safer as some chips may hold it low
-- 		 * while they are processing data internally. 
-- 		 */
--		if (time_after_eq(jiffies, start+adap->timeout)) {
-+	start = jiffies;
-+	while (!getscl(adap)) {
-+		/* This hw knows how to read the clock line, so we wait
-+		 * until it actually gets high.  This is safer as some
-+		 * chips may hold it low ("clock stretching") while they
-+		 * are processing data internally.
-+		 */
-+		if (time_after_eq(jiffies, start + adap->timeout))
- 			return -ETIMEDOUT;
--		}
- 		cond_resched();
- 	}
- #ifdef DEBUG
-@@ -118,11 +117,11 @@ static int sclhi(struct i2c_algo_bit_data *adap)
- done:
- 	udelay(adap->udelay);
- 	return 0;
--} 
-+}
+ 	return reply((struct ib_smp *) pmp);
+ }
+@@ -1071,16 +1091,23 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+ 	struct ib_pma_portsamplesresult_ext *p =
+ 		(struct ib_pma_portsamplesresult_ext *)pmp->data;
+ 	struct ipath_ibdev *dev = to_idev(ibdev);
++	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
++	u8 status;
+ 	int i;
  
+ 	memset(pmp->data, 0, sizeof(pmp->data));
+ 	p->tag = cpu_to_be16(dev->pma_tag);
+-	p->sample_status = cpu_to_be16(dev->pma_sample_status);
++	if (crp->cr_psstat)
++		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
++	else
++		status = dev->pma_sample_status;
++	p->sample_status = cpu_to_be16(status);
+ 	/* 64 bits */
+ 	p->extended_width = __constant_cpu_to_be32(0x80000000);
+ 	for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
+-		p->counter[i] = cpu_to_be64(
+-			get_counter(dev, dev->pma_counter_select[i]));
++		p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
++		    cpu_to_be64(
++			get_counter(dev, crp, dev->pma_counter_select[i]));
  
- /* --- other auxiliary functions --------------------------------------	*/
--static void i2c_start(struct i2c_algo_bit_data *adap) 
-+static void i2c_start(struct i2c_algo_bit_data *adap)
- {
- 	/* assert: scl, sda are high */
- 	setsda(adap, 0);
-@@ -130,7 +129,7 @@ static void i2c_start(struct i2c_algo_bit_data *adap)
- 	scllo(adap);
+ 	return reply((struct ib_smp *) pmp);
  }
+@@ -1113,6 +1140,8 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
+ 		dev->z_local_link_integrity_errors;
+ 	cntrs.excessive_buffer_overrun_errors -=
+ 		dev->z_excessive_buffer_overrun_errors;
++	cntrs.vl15_dropped -= dev->z_vl15_dropped;
++	cntrs.vl15_dropped += dev->n_vl15_dropped;
  
--static void i2c_repstart(struct i2c_algo_bit_data *adap) 
-+static void i2c_repstart(struct i2c_algo_bit_data *adap)
- {
- 	/* assert: scl is low */
- 	sdahi(adap);
-@@ -141,18 +140,18 @@ static void i2c_repstart(struct i2c_algo_bit_data *adap)
- }
+ 	memset(pmp->data, 0, sizeof(pmp->data));
+ 
+@@ -1156,10 +1185,10 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
+ 		cntrs.excessive_buffer_overrun_errors = 0xFUL;
+ 	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ 		cntrs.excessive_buffer_overrun_errors;
+-	if (dev->n_vl15_dropped > 0xFFFFUL)
++	if (cntrs.vl15_dropped > 0xFFFFUL)
+ 		p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
+ 	else
+-		p->vl15_dropped = cpu_to_be16((u16)dev->n_vl15_dropped);
++		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+ 	if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
+ 		p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
+ 	else
+@@ -1262,8 +1291,10 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
+ 		dev->z_excessive_buffer_overrun_errors =
+ 			cntrs.excessive_buffer_overrun_errors;
  
+-	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED)
++	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
+ 		dev->n_vl15_dropped = 0;
++		dev->z_vl15_dropped = cntrs.vl15_dropped;
++	}
  
--static void i2c_stop(struct i2c_algo_bit_data *adap) 
-+static void i2c_stop(struct i2c_algo_bit_data *adap)
- {
- 	/* assert: scl is low */
- 	sdalo(adap);
--	sclhi(adap); 
-+	sclhi(adap);
- 	setsda(adap, 1);
- 	udelay(adap->udelay);
- }
+ 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
+ 		dev->z_port_xmit_data = cntrs.port_xmit_data;
+@@ -1434,7 +1465,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
+ 		 * before checking for other consumers.
+ 		 * Just tell the caller to process it normally.
+ 		 */
+-		ret = IB_MAD_RESULT_FAILURE;
++		ret = IB_MAD_RESULT_SUCCESS;
+ 		goto bail;
+ 	default:
+ 		smp->status |= IB_SMP_UNSUP_METHOD;
+@@ -1516,7 +1547,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
+ 		 * before checking for other consumers.
+ 		 * Just tell the caller to process it normally.
+ 		 */
+-		ret = IB_MAD_RESULT_FAILURE;
++		ret = IB_MAD_RESULT_SUCCESS;
+ 		goto bail;
+ 	default:
+ 		pmp->status |= IB_SMP_UNSUP_METHOD;
+diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
+index b997ff8..80dc623 100644
+--- a/drivers/infiniband/hw/ipath/ipath_qp.c
++++ b/drivers/infiniband/hw/ipath/ipath_qp.c
+@@ -387,8 +387,8 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
+ 	struct ib_wc wc;
+ 	int ret = 0;
  
+-	ipath_dbg("QP%d/%d in error state\n",
+-		  qp->ibqp.qp_num, qp->remote_qpn);
++	ipath_dbg("QP%d/%d in error state (%d)\n",
++		  qp->ibqp.qp_num, qp->remote_qpn, err);
  
+ 	spin_lock(&dev->pending_lock);
+ 	/* XXX What if its already removed by the timeout code? */
+@@ -855,8 +855,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
+ 	 * See ipath_mmap() for details.
+ 	 */
+ 	if (udata && udata->outlen >= sizeof(__u64)) {
+-		int err;
+-
+ 		if (!qp->r_rq.wq) {
+ 			__u64 offset = 0;
  
--/* send a byte without start cond., look for arbitration, 
-+/* send a byte without start cond., look for arbitration,
-    check ackn. from slave */
- /* returns:
-  * 1 if the device acknowledged
-@@ -167,27 +166,33 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
- 	struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
+index 120a61b..459e46e 100644
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c
+@@ -647,6 +647,7 @@ static void send_rc_ack(struct ipath_qp *qp)
  
- 	/* assert: scl is low */
--	for ( i=7 ; i>=0 ; i-- ) {
-+	for (i = 7; i >= 0; i--) {
- 		sb = (c >> i) & 1;
--		setsda(adap,sb);
-+		setsda(adap, sb);
- 		udelay((adap->udelay + 1) / 2);
--		if (sclhi(adap)<0) { /* timed out */
-+		if (sclhi(adap) < 0) { /* timed out */
- 			bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
- 				"timeout at bit #%d\n", (int)c, i);
- 			return -ETIMEDOUT;
--		};
--		/* do arbitration here: 
--		 * if ( sb && ! getsda(adap) ) -> ouch! Get out of here.
+ queue_ack:
+ 	spin_lock_irqsave(&qp->s_lock, flags);
++	dev->n_rc_qacks++;
+ 	qp->s_flags |= IPATH_S_ACK_PENDING;
+ 	qp->s_nak_state = qp->r_nak_state;
+ 	qp->s_ack_psn = qp->r_ack_psn;
+@@ -798,11 +799,13 @@ bail:
+ 
+ static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
+ {
+-	if (qp->s_wait_credit) {
+-		qp->s_wait_credit = 0;
+-		tasklet_hi_schedule(&qp->s_task);
++	if (qp->s_last_psn != psn) {
++		qp->s_last_psn = psn;
++		if (qp->s_wait_credit) {
++			qp->s_wait_credit = 0;
++			tasklet_hi_schedule(&qp->s_task);
 +		}
-+		/* FIXME do arbitration here:
-+		 * if (sb && !getsda(adap)) -> ouch! Get out of here.
-+		 *
-+		 * Report a unique code, so higher level code can retry
-+		 * the whole (combined) message and *NOT* issue STOP.
- 		 */
- 		scllo(adap);
  	}
- 	sdahi(adap);
--	if (sclhi(adap)<0){ /* timeout */
-+	if (sclhi(adap) < 0) { /* timeout */
- 		bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
- 			"timeout at ack\n", (int)c);
- 		return -ETIMEDOUT;
--	};
--	/* read ack: SDA should be pulled down by slave */
-+	}
-+
-+	/* read ack: SDA should be pulled down by slave, or it may
-+	 * NAK (usually to report problems with the data we wrote).
-+	 */
- 	ack = !getsda(adap);    /* ack: sda is pulled low -> success */
- 	bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c,
- 		ack ? "A" : "NA");
-@@ -198,24 +203,24 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
+-	qp->s_last_psn = psn;
  }
  
+ /**
+@@ -1653,13 +1656,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ 	case OP(SEND_FIRST):
+ 		if (!ipath_get_rwqe(qp, 0)) {
+ 		rnr_nak:
+-			/*
+-			 * A RNR NAK will ACK earlier sends and RDMA writes.
+-			 * Don't queue the NAK if a RDMA read or atomic
+-			 * is pending though.
+-			 */
+-			if (qp->r_nak_state)
+-				goto done;
+ 			qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+ 			qp->r_ack_psn = qp->r_psn;
+ 			goto send_ack;
+diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
+index 708eba3..6d2a17f 100644
+--- a/drivers/infiniband/hw/ipath/ipath_registers.h
++++ b/drivers/infiniband/hw/ipath/ipath_registers.h
+@@ -82,8 +82,7 @@
  
--static int i2c_inb(struct i2c_adapter *i2c_adap) 
-+static int i2c_inb(struct i2c_adapter *i2c_adap)
- {
- 	/* read byte via i2c port, without start/stop sequence	*/
- 	/* acknowledge is sent in i2c_read.			*/
- 	int i;
--	unsigned char indata=0;
-+	unsigned char indata = 0;
- 	struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+ /* kr_rcvctrl bits */
+ #define INFINIPATH_R_PORTENABLE_SHIFT 0
+-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
+-#define INFINIPATH_R_TAILUPD   0x80000000
++#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
  
- 	/* assert: scl is low */
- 	sdahi(adap);
--	for (i=0;i<8;i++) {
--		if (sclhi(adap)<0) { /* timeout */
-+	for (i = 0; i < 8; i++) {
-+		if (sclhi(adap) < 0) { /* timeout */
- 			bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit "
- 				"#%d\n", 7 - i);
- 			return -ETIMEDOUT;
--		};
-+		}
- 		indata *= 2;
--		if ( getsda(adap) ) 
-+		if (getsda(adap))
- 			indata |= 0x01;
- 		setscl(adap, 0);
- 		udelay(i == 7 ? adap->udelay / 2 : adap->udelay);
-@@ -228,66 +233,67 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
-  * Sanity check for the adapter hardware - check the reaction of
-  * the bus lines only if it seems to be idle.
-  */
--static int test_bus(struct i2c_algo_bit_data *adap, char* name) {
--	int scl,sda;
-+static int test_bus(struct i2c_algo_bit_data *adap, char *name)
-+{
-+	int scl, sda;
+ /* kr_intstatus, kr_intclear, kr_intmask bits */
+ #define INFINIPATH_I_RCVURG_SHIFT 0
+@@ -272,20 +271,6 @@
+ #define INFINIPATH_EXTC_LEDGBLOK_ON          0x00000002ULL
+ #define INFINIPATH_EXTC_LEDGBLERR_OFF        0x00000001ULL
  
--	if (adap->getscl==NULL)
-+	if (adap->getscl == NULL)
- 		pr_info("%s: Testing SDA only, SCL is not readable\n", name);
+-/* kr_mdio bits */
+-#define INFINIPATH_MDIO_CLKDIV_MASK 0x7FULL
+-#define INFINIPATH_MDIO_CLKDIV_SHIFT 32
+-#define INFINIPATH_MDIO_COMMAND_MASK 0x7ULL
+-#define INFINIPATH_MDIO_COMMAND_SHIFT 26
+-#define INFINIPATH_MDIO_DEVADDR_MASK 0x1FULL
+-#define INFINIPATH_MDIO_DEVADDR_SHIFT 21
+-#define INFINIPATH_MDIO_REGADDR_MASK 0x1FULL
+-#define INFINIPATH_MDIO_REGADDR_SHIFT 16
+-#define INFINIPATH_MDIO_DATA_MASK 0xFFFFULL
+-#define INFINIPATH_MDIO_DATA_SHIFT 0
+-#define INFINIPATH_MDIO_CMDVALID    0x0000000040000000ULL
+-#define INFINIPATH_MDIO_RDDATAVALID 0x0000000080000000ULL
+-
+ /* kr_partitionkey bits */
+ #define INFINIPATH_PKEY_SIZE 16
+ #define INFINIPATH_PKEY_MASK 0xFFFF
+@@ -303,8 +288,6 @@
  
--	sda=getsda(adap);
--	scl=(adap->getscl==NULL?1:getscl(adap));
--	if (!scl || !sda ) {
-+	sda = getsda(adap);
-+	scl = (adap->getscl == NULL) ? 1 : getscl(adap);
-+	if (!scl || !sda) {
- 		printk(KERN_WARNING "%s: bus seems to be busy\n", name);
- 		goto bailout;
- 	}
+ /* kr_xgxsconfig bits */
+ #define INFINIPATH_XGXS_RESET          0x7ULL
+-#define INFINIPATH_XGXS_MDIOADDR_MASK  0xfULL
+-#define INFINIPATH_XGXS_MDIOADDR_SHIFT 4
+ #define INFINIPATH_XGXS_RX_POL_SHIFT 19
+ #define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
  
- 	sdalo(adap);
--	sda=getsda(adap);
--	scl=(adap->getscl==NULL?1:getscl(adap));
--	if ( 0 != sda ) {
-+	sda = getsda(adap);
-+	scl = (adap->getscl == NULL) ? 1 : getscl(adap);
-+	if (sda) {
- 		printk(KERN_WARNING "%s: SDA stuck high!\n", name);
- 		goto bailout;
- 	}
--	if ( 0 == scl ) {
-+	if (!scl) {
- 		printk(KERN_WARNING "%s: SCL unexpected low "
- 		       "while pulling SDA low!\n", name);
- 		goto bailout;
--	}		
-+	}
+@@ -470,6 +453,20 @@ struct ipath_cregs {
+ 	ipath_creg cr_unsupvlcnt;
+ 	ipath_creg cr_wordrcvcnt;
+ 	ipath_creg cr_wordsendcnt;
++	ipath_creg cr_vl15droppedpktcnt;
++	ipath_creg cr_rxotherlocalphyerrcnt;
++	ipath_creg cr_excessbufferovflcnt;
++	ipath_creg cr_locallinkintegrityerrcnt;
++	ipath_creg cr_rxvlerrcnt;
++	ipath_creg cr_rxdlidfltrcnt;
++	ipath_creg cr_psstat;
++	ipath_creg cr_psstart;
++	ipath_creg cr_psinterval;
++	ipath_creg cr_psrcvdatacount;
++	ipath_creg cr_psrcvpktscount;
++	ipath_creg cr_psxmitdatacount;
++	ipath_creg cr_psxmitpktscount;
++	ipath_creg cr_psxmitwaitcount;
+ };
  
- 	sdahi(adap);
--	sda=getsda(adap);
--	scl=(adap->getscl==NULL?1:getscl(adap));
--	if ( 0 == sda ) {
-+	sda = getsda(adap);
-+	scl = (adap->getscl == NULL) ? 1 : getscl(adap);
-+	if (!sda) {
- 		printk(KERN_WARNING "%s: SDA stuck low!\n", name);
- 		goto bailout;
- 	}
--	if ( 0 == scl ) {
-+	if (!scl) {
- 		printk(KERN_WARNING "%s: SCL unexpected low "
- 		       "while pulling SDA high!\n", name);
- 		goto bailout;
+ #endif				/* _IPATH_REGISTERS_H */
+diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
+index 54c61a9..a59bdbd 100644
+--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
+@@ -98,11 +98,15 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
+ 		while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
+ 			qp->s_rnr_timeout -= nqp->s_rnr_timeout;
+ 			l = l->next;
+-			if (l->next == &dev->rnrwait)
++			if (l->next == &dev->rnrwait) {
++				nqp = NULL;
+ 				break;
++			}
+ 			nqp = list_entry(l->next, struct ipath_qp,
+ 					 timerwait);
+ 		}
++		if (nqp)
++			nqp->s_rnr_timeout -= qp->s_rnr_timeout;
+ 		list_add(&qp->timerwait, l);
  	}
+ 	spin_unlock_irqrestore(&dev->pending_lock, flags);
+@@ -479,9 +483,14 @@ done:
  
- 	scllo(adap);
--	sda=getsda(adap);
--	scl=(adap->getscl==NULL?0:getscl(adap));
--	if ( 0 != scl ) {
-+	sda = getsda(adap);
-+	scl = (adap->getscl == NULL) ? 0 : getscl(adap);
-+	if (scl) {
- 		printk(KERN_WARNING "%s: SCL stuck high!\n", name);
- 		goto bailout;
- 	}
--	if ( 0 == sda ) {
-+	if (!sda) {
- 		printk(KERN_WARNING "%s: SDA unexpected low "
- 		       "while pulling SCL low!\n", name);
- 		goto bailout;
- 	}
--	
-+
- 	sclhi(adap);
--	sda=getsda(adap);
--	scl=(adap->getscl==NULL?1:getscl(adap));
--	if ( 0 == scl ) {
-+	sda = getsda(adap);
-+	scl = (adap->getscl == NULL) ? 1 : getscl(adap);
-+	if (!scl) {
- 		printk(KERN_WARNING "%s: SCL stuck low!\n", name);
- 		goto bailout;
- 	}
--	if ( 0 == sda ) {
-+	if (!sda) {
- 		printk(KERN_WARNING "%s: SDA unexpected low "
- 		       "while pulling SCL high!\n", name);
- 		goto bailout;
-@@ -314,9 +320,10 @@ static int try_address(struct i2c_adapter *i2c_adap,
- 		       unsigned char addr, int retries)
+ static void want_buffer(struct ipath_devdata *dd)
  {
- 	struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
--	int i,ret = -1;
--	for (i=0;i<=retries;i++) {
--		ret = i2c_outb(i2c_adap,addr);
-+	int i, ret = -1;
+-	set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
++	unsigned long flags;
 +
-+	for (i = 0; i <= retries; i++) {
-+		ret = i2c_outb(i2c_adap, addr);
- 		if (ret == 1 || i == retries)
- 			break;
- 		bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n");
-@@ -338,20 +345,38 @@ static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
++	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
++	dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ 			 dd->ipath_sendctrl);
++	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
++	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
+index 2fef36f..f772102 100644
+--- a/drivers/infiniband/hw/ipath/ipath_srq.c
++++ b/drivers/infiniband/hw/ipath/ipath_srq.c
+@@ -94,8 +94,8 @@ bail:
+ /**
+  * ipath_create_srq - create a shared receive queue
+  * @ibpd: the protection domain of the SRQ to create
+- * @attr: the attributes of the SRQ
+- * @udata: not used by the InfiniPath verbs driver
++ * @srq_init_attr: the attributes of the SRQ
++ * @udata: data from libipathverbs when creating a user SRQ
+  */
+ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
+ 				struct ib_srq_init_attr *srq_init_attr,
+diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
+index f027141..d2725cd 100644
+--- a/drivers/infiniband/hw/ipath/ipath_stats.c
++++ b/drivers/infiniband/hw/ipath/ipath_stats.c
+@@ -133,15 +133,16 @@ bail:
+ static void ipath_qcheck(struct ipath_devdata *dd)
  {
- 	const unsigned char *temp = msg->buf;
- 	int count = msg->len;
--	unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK; 
-+	unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK;
- 	int retval;
--	int wrcount=0;
-+	int wrcount = 0;
+ 	static u64 last_tot_hdrqfull;
++	struct ipath_portdata *pd = dd->ipath_pd[0];
+ 	size_t blen = 0;
+ 	char buf[128];
  
- 	while (count > 0) {
- 		retval = i2c_outb(i2c_adap, *temp);
--		if ((retval>0) || (nak_ok && (retval==0)))  { /* ok or ignored NAK */
--			count--; 
-+
-+		/* OK/ACK; or ignored NAK */
-+		if ((retval > 0) || (nak_ok && (retval == 0))) {
-+			count--;
- 			temp++;
- 			wrcount++;
--		} else { /* arbitration or no acknowledge */
--			dev_err(&i2c_adap->dev, "sendbytes: error - bailout.\n");
--			return (retval<0)? retval : -EFAULT;
--			        /* got a better one ?? */
-+
-+		/* A slave NAKing the master means the slave didn't like
-+		 * something about the data it saw.  For example, maybe
-+		 * the SMBus PEC was wrong.
-+		 */
-+		} else if (retval == 0) {
-+			dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n");
-+			return -EIO;
-+
-+		/* Timeout; or (someday) lost arbitration
-+		 *
-+		 * FIXME Lost ARB implies retrying the transaction from
-+		 * the first message, after the "winning" master issues
-+		 * its STOP.  As a rule, upper layer code has no reason
-+		 * to know or care about this ... it is *NOT* an error.
-+		 */
-+		} else {
-+			dev_err(&i2c_adap->dev, "sendbytes: error %d\n",
-+					retval);
-+			return retval;
- 		}
+ 	*buf = 0;
+-	if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
++	if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
+ 		blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
+-				dd->ipath_pd[0]->port_hdrqfull -
++				pd->port_hdrqfull -
+ 				dd->ipath_p0_hdrqfull);
+-		dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
++		dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
  	}
- 	return wrcount;
-@@ -376,14 +401,14 @@ static int acknak(struct i2c_adapter *i2c_adap, int is_ack)
- static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
- {
- 	int inval;
--	int rdcount=0;   	/* counts bytes read */
-+	int rdcount = 0;	/* counts bytes read */
- 	unsigned char *temp = msg->buf;
- 	int count = msg->len;
- 	const unsigned flags = msg->flags;
+ 	if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
+ 		blen += snprintf(buf + blen, sizeof buf - blen,
+@@ -173,7 +174,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
+ 	if (blen)
+ 		ipath_dbg("%s\n", buf);
  
- 	while (count > 0) {
- 		inval = i2c_inb(i2c_adap);
--		if (inval>=0) {
-+		if (inval >= 0) {
- 			*temp = inval;
- 			rdcount++;
- 		} else {   /* read timed out */
-@@ -431,7 +456,7 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
-  * returns:
-  *  0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set
-  * -x an error occurred (like: -EREMOTEIO if the device did not answer, or
-- *	-ETIMEDOUT, for example if the lines are stuck...) 
-+ *	-ETIMEDOUT, for example if the lines are stuck...)
-  */
- static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
+-	if (dd->ipath_port0head != (u32)
++	if (pd->port_head != (u32)
+ 	    le64_to_cpu(*dd->ipath_hdrqtailptr)) {
+ 		if (dd->ipath_lastport0rcv_cnt ==
+ 		    ipath_stats.sps_port0pkts) {
+@@ -181,7 +182,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
+ 				   "port0 hd=%llx tl=%x; port0pkts %llx\n",
+ 				   (unsigned long long)
+ 				   le64_to_cpu(*dd->ipath_hdrqtailptr),
+-				   dd->ipath_port0head,
++				   pd->port_head,
+ 				   (unsigned long long)
+ 				   ipath_stats.sps_port0pkts);
+ 		}
+@@ -237,7 +238,7 @@ static void ipath_chk_errormask(struct ipath_devdata *dd)
+ void ipath_get_faststats(unsigned long opaque)
  {
-@@ -443,10 +468,10 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
- 	int ret, retries;
+ 	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
+-	u32 val;
++	int i;
+ 	static unsigned cnt;
+ 	unsigned long flags;
+ 	u64 traffic_wds;
+@@ -321,12 +322,11 @@ void ipath_get_faststats(unsigned long opaque)
  
- 	retries = nak_ok ? 0 : i2c_adap->retries;
--	
--	if ( (flags & I2C_M_TEN)  ) { 
+ 	/* limit qfull messages to ~one per minute per port */
+ 	if ((++cnt & 0x10)) {
+-		for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
+-		     val--) {
+-			if (dd->ipath_lastegrheads[val] != -1)
+-				dd->ipath_lastegrheads[val] = -1;
+-			if (dd->ipath_lastrcvhdrqtails[val] != -1)
+-				dd->ipath_lastrcvhdrqtails[val] = -1;
++		for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
++			struct ipath_portdata *pd = dd->ipath_pd[i];
 +
-+	if (flags & I2C_M_TEN) {
- 		/* a ten bit address */
--		addr = 0xf0 | (( msg->addr >> 7) & 0x03);
-+		addr = 0xf0 | ((msg->addr >> 7) & 0x03);
- 		bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
- 		/* try extended address code...*/
- 		ret = try_address(i2c_adap, addr, retries);
-@@ -456,33 +481,33 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
- 			return -EREMOTEIO;
- 		}
- 		/* the remaining 8 bit address */
--		ret = i2c_outb(i2c_adap,msg->addr & 0x7f);
-+		ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
- 		if ((ret != 1) && !nak_ok) {
- 			/* the chip did not ack / xmission error occurred */
- 			dev_err(&i2c_adap->dev, "died at 2nd address code\n");
- 			return -EREMOTEIO;
- 		}
--		if ( flags & I2C_M_RD ) {
-+		if (flags & I2C_M_RD) {
- 			bit_dbg(3, &i2c_adap->dev, "emitting repeated "
- 				"start condition\n");
- 			i2c_repstart(adap);
- 			/* okay, now switch into reading mode */
- 			addr |= 0x01;
- 			ret = try_address(i2c_adap, addr, retries);
--			if ((ret!=1) && !nak_ok) {
-+			if ((ret != 1) && !nak_ok) {
- 				dev_err(&i2c_adap->dev,
- 					"died at repeated address code\n");
- 				return -EREMOTEIO;
- 			}
++			if (pd && pd->port_lastrcvhdrqtail != -1)
++				pd->port_lastrcvhdrqtail = -1;
  		}
- 	} else {		/* normal 7bit address	*/
--		addr = ( msg->addr << 1 );
--		if (flags & I2C_M_RD )
-+		addr = msg->addr << 1;
-+		if (flags & I2C_M_RD)
- 			addr |= 1;
--		if (flags & I2C_M_REV_DIR_ADDR )
-+		if (flags & I2C_M_REV_DIR_ADDR)
- 			addr ^= 1;
- 		ret = try_address(i2c_adap, addr, retries);
--		if ((ret!=1) && !nak_ok)
-+		if ((ret != 1) && !nak_ok)
- 			return -EREMOTEIO;
  	}
  
-@@ -494,15 +519,14 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
- {
- 	struct i2c_msg *pmsg;
- 	struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
--	
--	int i,ret;
-+	int i, ret;
- 	unsigned short nak_ok;
+diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
+index e1ad7cf..56dfc8a 100644
+--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
++++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
+@@ -363,6 +363,60 @@ static ssize_t show_unit(struct device *dev,
+ 	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
+ }
  
- 	bit_dbg(3, &i2c_adap->dev, "emitting start condition\n");
- 	i2c_start(adap);
--	for (i=0;i<num;i++) {
-+	for (i = 0; i < num; i++) {
- 		pmsg = &msgs[i];
--		nak_ok = pmsg->flags & I2C_M_IGNORE_NAK; 
-+		nak_ok = pmsg->flags & I2C_M_IGNORE_NAK;
- 		if (!(pmsg->flags & I2C_M_NOSTART)) {
- 			if (i) {
- 				bit_dbg(3, &i2c_adap->dev, "emitting "
-@@ -517,7 +541,7 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
- 				goto bailout;
- 			}
- 		}
--		if (pmsg->flags & I2C_M_RD ) {
-+		if (pmsg->flags & I2C_M_RD) {
- 			/* read bytes into buffer*/
- 			ret = readbytes(i2c_adap, pmsg);
- 			if (ret >= 1)
-@@ -551,7 +575,7 @@ bailout:
++static ssize_t show_jint_max_packets(struct device *dev,
++				     struct device_attribute *attr,
++				     char *buf)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++
++	return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets);
++}
++
++static ssize_t store_jint_max_packets(struct device *dev,
++				      struct device_attribute *attr,
++				      const char *buf,
++				      size_t count)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	u16 v = 0;
++	int ret;
++
++	ret = ipath_parse_ushort(buf, &v);
++	if (ret < 0)
++		ipath_dev_err(dd, "invalid jint_max_packets.\n");
++	else
++		dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v);
++
++	return ret;
++}
++
++static ssize_t show_jint_idle_ticks(struct device *dev,
++				    struct device_attribute *attr,
++				    char *buf)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++
++	return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks);
++}
++
++static ssize_t store_jint_idle_ticks(struct device *dev,
++				     struct device_attribute *attr,
++				     const char *buf,
++				     size_t count)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	u16 v = 0;
++	int ret;
++
++	ret = ipath_parse_ushort(buf, &v);
++	if (ret < 0)
++		ipath_dev_err(dd, "invalid jint_idle_ticks.\n");
++	else
++		dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets);
++
++	return ret;
++}
++
+ #define DEVICE_COUNTER(name, attr) \
+ 	static ssize_t show_counter_##name(struct device *dev, \
+ 					   struct device_attribute *attr, \
+@@ -670,6 +724,257 @@ static ssize_t show_logged_errs(struct device *dev,
+ 	return count;
+ }
+ 
++/*
++ * New sysfs entries to control various IB config. These all turn into
++ * accesses via ipath_f_get/set_ib_cfg.
++ *
++ * Get/Set heartbeat enable. Or of 1=enabled, 2=auto
++ */
++static ssize_t show_hrtbt_enb(struct device *dev,
++			 struct device_attribute *attr,
++			 char *buf)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret;
++
++	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT);
++	if (ret >= 0)
++		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
++	return ret;
++}
++
++static ssize_t store_hrtbt_enb(struct device *dev,
++			  struct device_attribute *attr,
++			  const char *buf,
++			  size_t count)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret, r;
++	u16 val;
++
++	ret = ipath_parse_ushort(buf, &val);
++	if (ret >= 0 && val > 3)
++		ret = -EINVAL;
++	if (ret < 0) {
++		ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
++		goto bail;
++	}
++
++	/*
++	 * Set the "intentional" heartbeat enable per either of
++	 * "Enable" and "Auto", as these are normally set together.
++	 * This bit is consulted when leaving loopback mode,
++	 * because entering loopback mode overrides it and automatically
++	 * disables heartbeat.
++	 */
++	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val);
++	if (r < 0)
++		ret = r;
++	else if (val == IPATH_IB_HRTBT_OFF)
++		dd->ipath_flags |= IPATH_NO_HRTBT;
++	else
++		dd->ipath_flags &= ~IPATH_NO_HRTBT;
++
++bail:
++	return ret;
++}
++
++/*
++ * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric,
++ * _not_ the particular encoding of any given chip)
++ */
++static ssize_t show_lwid_enb(struct device *dev,
++			 struct device_attribute *attr,
++			 char *buf)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret;
++
++	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB);
++	if (ret >= 0)
++		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
++	return ret;
++}
++
++static ssize_t store_lwid_enb(struct device *dev,
++			  struct device_attribute *attr,
++			  const char *buf,
++			  size_t count)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret, r;
++	u16 val;
++
++	ret = ipath_parse_ushort(buf, &val);
++	if (ret >= 0 && (val == 0 || val > 3))
++		ret = -EINVAL;
++	if (ret < 0) {
++		ipath_dev_err(dd,
++			"attempt to set invalid Link Width (enable)\n");
++		goto bail;
++	}
++
++	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val);
++	if (r < 0)
++		ret = r;
++
++bail:
++	return ret;
++}
++
++/* Get current link width */
++static ssize_t show_lwid(struct device *dev,
++			 struct device_attribute *attr,
++			 char *buf)
++
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret;
++
++	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID);
++	if (ret >= 0)
++		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
++	return ret;
++}
++
++/*
++ * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR.
++ */
++static ssize_t show_spd_enb(struct device *dev,
++			 struct device_attribute *attr,
++			 char *buf)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret;
++
++	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB);
++	if (ret >= 0)
++		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
++	return ret;
++}
++
++static ssize_t store_spd_enb(struct device *dev,
++			  struct device_attribute *attr,
++			  const char *buf,
++			  size_t count)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret, r;
++	u16 val;
++
++	ret = ipath_parse_ushort(buf, &val);
++	if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR)))
++		ret = -EINVAL;
++	if (ret < 0) {
++		ipath_dev_err(dd,
++			"attempt to set invalid Link Speed (enable)\n");
++		goto bail;
++	}
++
++	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val);
++	if (r < 0)
++		ret = r;
++
++bail:
++	return ret;
++}
++
++/* Get current link speed */
++static ssize_t show_spd(struct device *dev,
++			 struct device_attribute *attr,
++			 char *buf)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret;
++
++	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD);
++	if (ret >= 0)
++		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
++	return ret;
++}
++
++/*
++ * Get/Set RX polarity-invert enable. 0=no, 1=yes.
++ */
++static ssize_t show_rx_polinv_enb(struct device *dev,
++			 struct device_attribute *attr,
++			 char *buf)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret;
++
++	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB);
++	if (ret >= 0)
++		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
++	return ret;
++}
++
++static ssize_t store_rx_polinv_enb(struct device *dev,
++			  struct device_attribute *attr,
++			  const char *buf,
++			  size_t count)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret, r;
++	u16 val;
++
++	ret = ipath_parse_ushort(buf, &val);
++	if (ret < 0 || val > 1)
++		goto invalid;
++
++	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
++	if (r < 0) {
++		ret = r;
++		goto bail;
++	}
++
++	goto bail;
++invalid:
++	ipath_dev_err(dd, "attempt to set invalid Rx Polarity (enable)\n");
++bail:
++	return ret;
++}
++/*
++ * Get/Set RX lane-reversal enable. 0=no, 1=yes.
++ */
++static ssize_t show_lanerev_enb(struct device *dev,
++			 struct device_attribute *attr,
++			 char *buf)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret;
++
++	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB);
++	if (ret >= 0)
++		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
++	return ret;
++}
++
++static ssize_t store_lanerev_enb(struct device *dev,
++			  struct device_attribute *attr,
++			  const char *buf,
++			  size_t count)
++{
++	struct ipath_devdata *dd = dev_get_drvdata(dev);
++	int ret, r;
++	u16 val;
++
++	ret = ipath_parse_ushort(buf, &val);
++	if (ret >= 0 && val > 1) {
++		ret = -EINVAL;
++		ipath_dev_err(dd,
++			"attempt to set invalid Lane reversal (enable)\n");
++		goto bail;
++	}
++
++	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val);
++	if (r < 0)
++		ret = r;
++
++bail:
++	return ret;
++}
++
+ static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
+ static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
  
- static u32 bit_func(struct i2c_adapter *adap)
- {
--	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 
-+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
- 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
- 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
- 	       I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
-@@ -565,8 +589,8 @@ static const struct i2c_algorithm i2c_bit_algo = {
- 	.functionality	= bit_func,
+@@ -683,6 +988,11 @@ static struct attribute_group driver_attr_group = {
+ 	.attrs = driver_attributes
  };
  
--/* 
-- * registering functions to load algorithms at runtime 
-+/*
-+ * registering functions to load algorithms at runtime
-  */
- static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
- {
-@@ -574,7 +598,7 @@ static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
++struct attribute_group *ipath_driver_attr_groups[] = {
++	&driver_attr_group,
++	NULL,
++};
++
+ static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
+ static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
+ static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
+@@ -701,6 +1011,10 @@ static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
+ static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
+ static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
+ static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
++static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
++		   show_jint_max_packets, store_jint_max_packets);
++static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
++		   show_jint_idle_ticks, store_jint_idle_ticks);
  
- 	if (bit_test) {
- 		int ret = test_bus(bit_adap, adap->name);
--		if (ret<0)
-+		if (ret < 0)
- 			return -ENODEV;
- 	}
+ static struct attribute *dev_attributes[] = {
+ 	&dev_attr_guid.attr,
+@@ -727,6 +1041,34 @@ static struct attribute_group dev_attr_group = {
+ 	.attrs = dev_attributes
+ };
  
-diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
-index ab2e6f3..8907b01 100644
---- a/drivers/i2c/algos/i2c-algo-pcf.c
-+++ b/drivers/i2c/algos/i2c-algo-pcf.c
-@@ -203,35 +203,6 @@ static int pcf_init_8584 (struct i2c_algo_pcf_data *adap)
- /* ----- Utility functions
-  */
++static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
++		   store_hrtbt_enb);
++static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb,
++		   store_lwid_enb);
++static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL);
++static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb,
++		   store_spd_enb);
++static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL);
++static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb,
++		   store_rx_polinv_enb);
++static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb,
++		   store_lanerev_enb);
++
++static struct attribute *dev_ibcfg_attributes[] = {
++	&dev_attr_hrtbt_enable.attr,
++	&dev_attr_link_width_enable.attr,
++	&dev_attr_link_width.attr,
++	&dev_attr_link_speed_enable.attr,
++	&dev_attr_link_speed.attr,
++	&dev_attr_rx_pol_inv_enable.attr,
++	&dev_attr_rx_lane_rev_enable.attr,
++	NULL
++};
++
++static struct attribute_group dev_ibcfg_attr_group = {
++	.attrs = dev_ibcfg_attributes
++};
++
+ /**
+  * ipath_expose_reset - create a device reset file
+  * @dev: the device structure
+@@ -753,24 +1095,9 @@ int ipath_expose_reset(struct device *dev)
+ 	return ret;
+ }
  
--static inline int try_address(struct i2c_algo_pcf_data *adap,
--		       unsigned char addr, int retries)
+-int ipath_driver_create_group(struct device_driver *drv)
 -{
--	int i, status, ret = -1;
--	int wfp;
--	for (i=0;i<retries;i++) {
--		i2c_outb(adap, addr);
--		i2c_start(adap);
--		status = get_pcf(adap, 1);
--		if ((wfp = wait_for_pin(adap, &status)) >= 0) {
--			if ((status & I2C_PCF_LRB) == 0) { 
--				i2c_stop(adap);
--				break;	/* success! */
--			}
--		}
--		if (wfp == -EINTR) {
--			/* arbitration lost */
--			udelay(adap->udelay);
--			return -EINTR;
--		}
--		i2c_stop(adap);
--		udelay(adap->udelay);
--	}
--	DEB2(if (i) printk(KERN_DEBUG "i2c-algo-pcf.o: needed %d retries for %d\n",i,
--	                   addr));
+-	int ret;
+-
+-	ret = sysfs_create_group(&drv->kobj, &driver_attr_group);
+-
 -	return ret;
 -}
 -
+-void ipath_driver_remove_group(struct device_driver *drv)
+-{
+-	sysfs_remove_group(&drv->kobj, &driver_attr_group);
+-}
 -
- static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf,
-                          int count, int last)
+ int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
  {
-@@ -321,47 +292,19 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf,
- }
+ 	int ret;
+-	char unit[5];
  
+ 	ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
+ 	if (ret)
+@@ -780,11 +1107,26 @@ int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
+ 	if (ret)
+ 		goto bail_attrs;
  
--static inline int pcf_doAddress(struct i2c_algo_pcf_data *adap,
--                                struct i2c_msg *msg, int retries) 
-+static int pcf_doAddress(struct i2c_algo_pcf_data *adap,
-+			 struct i2c_msg *msg)
- {
- 	unsigned short flags = msg->flags;
- 	unsigned char addr;
--	int ret;
--	if ( (flags & I2C_M_TEN)  ) { 
--		/* a ten bit address */
--		addr = 0xf0 | (( msg->addr >> 7) & 0x03);
--		DEB2(printk(KERN_DEBUG "addr0: %d\n",addr));
--		/* try extended address code...*/
--		ret = try_address(adap, addr, retries);
--		if (ret!=1) {
--			printk(KERN_ERR "died at extended address code.\n");
--			return -EREMOTEIO;
--		}
--		/* the remaining 8 bit address */
--		i2c_outb(adap,msg->addr & 0x7f);
--/* Status check comes here */
--		if (ret != 1) {
--			printk(KERN_ERR "died at 2nd address code.\n");
--			return -EREMOTEIO;
--		}
--		if ( flags & I2C_M_RD ) {
--			i2c_repstart(adap);
--			/* okay, now switch into reading mode */
--			addr |= 0x01;
--			ret = try_address(adap, addr, retries);
--			if (ret!=1) {
--				printk(KERN_ERR "died at extended address code.\n");
--				return -EREMOTEIO;
--			}
--		}
--	} else {		/* normal 7bit address	*/
--		addr = ( msg->addr << 1 );
--		if (flags & I2C_M_RD )
--			addr |= 1;
--		if (flags & I2C_M_REV_DIR_ADDR )
--			addr ^= 1;
--		i2c_outb(adap, addr);
--	}
+-	snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit);
+-	ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj, unit);
+-	if (ret == 0)
+-		goto bail;
++	if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
++		ret = device_create_file(dev, &dev_attr_jint_idle_ticks);
++		if (ret)
++			goto bail_counter;
++		ret = device_create_file(dev, &dev_attr_jint_max_packets);
++		if (ret)
++			goto bail_idle;
+ 
++		ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group);
++		if (ret)
++			goto bail_max;
++	}
 +
-+	addr = msg->addr << 1;
-+	if (flags & I2C_M_RD)
-+		addr |= 1;
-+	if (flags & I2C_M_REV_DIR_ADDR)
-+		addr ^= 1;
-+	i2c_outb(adap, addr);
++	return 0;
 +
++bail_max:
++	device_remove_file(dev, &dev_attr_jint_max_packets);
++bail_idle:
++	device_remove_file(dev, &dev_attr_jint_idle_ticks);
++bail_counter:
+ 	sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
+ bail_attrs:
+ 	sysfs_remove_group(&dev->kobj, &dev_attr_group);
+@@ -794,12 +1136,14 @@ bail:
+ 
+ void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
+ {
+-	char unit[5];
++	sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
+ 
+-	snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit);
+-	sysfs_remove_link(&dev->driver->kobj, unit);
++	if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
++		sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group);
++		device_remove_file(dev, &dev_attr_jint_idle_ticks);
++		device_remove_file(dev, &dev_attr_jint_max_packets);
++	}
+ 
+-	sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
+ 	sysfs_remove_group(&dev->kobj, &dev_attr_group);
+ 
+ 	device_remove_file(dev, &dev_attr_reset);
+diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
+index b3df6f3..de67eed 100644
+--- a/drivers/infiniband/hw/ipath/ipath_ud.c
++++ b/drivers/infiniband/hw/ipath/ipath_ud.c
+@@ -301,8 +301,6 @@ int ipath_make_ud_req(struct ipath_qp *qp)
+ 
+ 	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
+ 	qp->s_hdrwords = 7;
+-	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+-		qp->s_hdrwords++;
+ 	qp->s_cur_size = wqe->length;
+ 	qp->s_cur_sge = &qp->s_sge;
+ 	qp->s_wqe = wqe;
+@@ -327,6 +325,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
+ 		ohdr = &qp->s_hdr.u.oth;
+ 	}
+ 	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
++		qp->s_hdrwords++;
+ 		ohdr->u.ud.imm_data = wqe->wr.imm_data;
+ 		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+ 	} else
+diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
+index c4c9984..32d8f88 100644
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
++++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
+@@ -943,7 +943,7 @@ bail:
+  * ipath_verbs_send - send a packet
+  * @qp: the QP to send on
+  * @hdr: the packet header
+- * @hdrwords: the number of words in the header
++ * @hdrwords: the number of 32-bit words in the header
+  * @ss: the SGE to send
+  * @len: the length of the packet in bytes
+  */
+@@ -955,7 +955,10 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
+ 	int ret;
+ 	u32 dwords = (len + 3) >> 2;
+ 
+-	/* +1 is for the qword padding of pbc */
++	/*
++	 * Calculate the send buffer trigger address.
++	 * The +1 counts for the pbc control dword following the pbc length.
++	 */
+ 	plen = hdrwords + dwords + 1;
+ 
+ 	/* Drop non-VL15 packets if we are not in the active state */
+@@ -1130,20 +1133,34 @@ static int ipath_query_device(struct ib_device *ibdev,
  	return 0;
  }
  
-@@ -390,7 +333,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
- 		     pmsg->flags & I2C_M_RD ? "read" : "write",
-                      pmsg->len, pmsg->addr, i + 1, num);)
-     
--		ret = pcf_doAddress(adap, pmsg, i2c_adap->retries);
-+		ret = pcf_doAddress(adap, pmsg);
+-const u8 ipath_cvt_physportstate[16] = {
+-	[INFINIPATH_IBCS_LT_STATE_DISABLED] = 3,
+-	[INFINIPATH_IBCS_LT_STATE_LINKUP] = 5,
+-	[INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2,
+-	[INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2,
+-	[INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1,
+-	[INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1,
+-	[INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4,
+-	[INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4,
+-	[INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4,
+-	[INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4,
+-	[INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6,
+-	[INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6,
+-	[INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
++const u8 ipath_cvt_physportstate[32] = {
++	[INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
++	[INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
++	[INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
++	[INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
++	[INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
++	[INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
++	[INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
++		IB_PHYSPORTSTATE_CFG_TRAIN,
++	[INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
++		IB_PHYSPORTSTATE_CFG_TRAIN,
++	[INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
++		IB_PHYSPORTSTATE_CFG_TRAIN,
++	[INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
++	[INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
++		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
++	[INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
++		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
++	[INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
++		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
++	[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
++	[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
++	[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
++	[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
++	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
++	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
++	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
++	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+ };
  
- 		/* Send START */
- 		if (i == 0) {
-@@ -453,7 +396,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
- static u32 pcf_func(struct i2c_adapter *adap)
- {
- 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 
--	       I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; 
-+	       I2C_FUNC_PROTOCOL_MANGLING;
- }
+ u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
+@@ -1168,8 +1185,9 @@ static int ipath_query_port(struct ib_device *ibdev,
+ 	ibcstat = dd->ipath_lastibcstat;
+ 	props->state = ((ibcstat >> 4) & 0x3) + 1;
+ 	/* See phys_state_show() */
+-	props->phys_state = ipath_cvt_physportstate[
+-		dd->ipath_lastibcstat & 0xf];
++	props->phys_state = /* MEA: assumes shift == 0 */
++		ipath_cvt_physportstate[dd->ipath_lastibcstat &
++		dd->ibcs_lts_mask];
+ 	props->port_cap_flags = dev->port_cap_flags;
+ 	props->gid_tbl_len = 1;
+ 	props->max_msg_sz = 0x80000000;
+@@ -1641,6 +1659,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
+ 		cntrs.local_link_integrity_errors;
+ 	idev->z_excessive_buffer_overrun_errors =
+ 		cntrs.excessive_buffer_overrun_errors;
++	idev->z_vl15_dropped = cntrs.vl15_dropped;
  
- /* -----exported algorithm data: -------------------------------------	*/
-@@ -475,9 +418,7 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap)
+ 	/*
+ 	 * The system image GUID is supposed to be the same for all
+diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
+index 6ccb54f..3d59736 100644
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
++++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
+@@ -554,6 +554,7 @@ struct ipath_ibdev {
+ 	u32 z_pkey_violations;			/* starting count for PMA */
+ 	u32 z_local_link_integrity_errors;	/* starting count for PMA */
+ 	u32 z_excessive_buffer_overrun_errors;	/* starting count for PMA */
++	u32 z_vl15_dropped;			/* starting count for PMA */
+ 	u32 n_rc_resends;
+ 	u32 n_rc_acks;
+ 	u32 n_rc_qacks;
+@@ -598,6 +599,7 @@ struct ipath_verbs_counters {
+ 	u64 port_rcv_packets;
+ 	u32 local_link_integrity_errors;
+ 	u32 excessive_buffer_overrun_errors;
++	u32 vl15_dropped;
+ };
  
- 	/* register new adapter to i2c module... */
- 	adap->algo = &pcf_algo;
--
--	adap->timeout = 100;		/* default values, should	*/
--	adap->retries = 3;		/* be replaced by defines	*/
-+	adap->timeout = 100;
+ static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
+@@ -830,7 +832,17 @@ unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
  
- 	if ((rval = pcf_init_8584(pcf_adap)))
- 		return rval;
-diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
-index c466c6c..b61f56b 100644
---- a/drivers/i2c/busses/Kconfig
-+++ b/drivers/i2c/busses/Kconfig
-@@ -182,7 +182,8 @@ config I2C_I801
- 	  will be called i2c-i801.
+ extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
  
- config I2C_I810
--	tristate "Intel 810/815"
-+	tristate "Intel 810/815 (DEPRECATED)"
-+	default n
- 	depends on PCI
- 	select I2C_ALGOBIT
- 	help
-@@ -195,6 +196,8 @@ config I2C_I810
- 	    i815
- 	    i845G
++/*
++ * Below converts HCA-specific LinkTrainingState to IB PhysPortState
++ * values.
++ */
+ extern const u8 ipath_cvt_physportstate[];
++#define IB_PHYSPORTSTATE_SLEEP 1
++#define IB_PHYSPORTSTATE_POLL 2
++#define IB_PHYSPORTSTATE_DISABLED 3
++#define IB_PHYSPORTSTATE_CFG_TRAIN 4
++#define IB_PHYSPORTSTATE_LINKUP 5
++#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
  
-+	  This driver is deprecated in favor of the i810fb and intelfb drivers.
-+
- 	  This driver can also be built as a module.  If so, the module
- 	  will be called i2c-i810.
+ extern const int ib_ipath_state_ops[];
  
-@@ -259,20 +262,6 @@ config I2C_IOP3XX
- 	  This driver can also be built as a module.  If so, the module
- 	  will be called i2c-iop3xx.
+diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
+index 9d32c49..7950aa6 100644
+--- a/drivers/infiniband/hw/mlx4/cq.c
++++ b/drivers/infiniband/hw/mlx4/cq.c
+@@ -313,6 +313,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
+ 	struct mlx4_ib_srq *srq;
+ 	int is_send;
+ 	int is_error;
++	u32 g_mlpath_rqpn;
+ 	u16 wqe_ctr;
  
--config I2C_IXP4XX
--	tristate "IXP4xx GPIO-Based I2C Interface (DEPRECATED)"
--	depends on ARCH_IXP4XX
--	select I2C_ALGOBIT
--	help
--	  Say Y here if you have an Intel IXP4xx(420,421,422,425) based 
--	  system and are using GPIO lines for an I2C bus.
--
--	  This support is also available as a module. If so, the module
--	  will be called i2c-ixp4xx.
--
--	  This driver is deprecated and will be dropped soon. Use i2c-gpio
--	  instead.
--
- config I2C_IXP2000
- 	tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)"
- 	depends on ARCH_IXP2000
-@@ -396,7 +385,8 @@ config I2C_PASEMI
- 	  Supports the PA Semi PWRficient on-chip SMBus interfaces.
+ 	cqe = next_cqe_sw(cq);
+@@ -426,10 +427,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
  
- config I2C_PROSAVAGE
--	tristate "S3/VIA (Pro)Savage"
-+	tristate "S3/VIA (Pro)Savage (DEPRECATED)"
-+	default n
- 	depends on PCI
- 	select I2C_ALGOBIT
- 	help
-@@ -407,6 +397,8 @@ config I2C_PROSAVAGE
- 	    S3/VIA KM266/VT8375 aka ProSavage8
- 	    S3/VIA KM133/VT8365 aka Savage4
+ 		wc->slid	   = be16_to_cpu(cqe->rlid);
+ 		wc->sl		   = cqe->sl >> 4;
+-		wc->src_qp	   = be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff;
+-		wc->dlid_path_bits = (be32_to_cpu(cqe->g_mlpath_rqpn) >> 24) & 0x7f;
+-		wc->wc_flags      |= be32_to_cpu(cqe->g_mlpath_rqpn) & 0x80000000 ?
+-			IB_WC_GRH : 0;
++		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
++		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
++		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
++		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
+ 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
+ 	}
  
-+	  This driver is deprecated in favor of the savagefb driver.
-+
- 	  This support is also available as a module.  If so, the module 
- 	  will be called i2c-prosavage.
+diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
+index 15aa32e..7bbdd1f 100644
+--- a/drivers/infiniband/hw/mthca/mthca_dev.h
++++ b/drivers/infiniband/hw/mthca/mthca_dev.h
+@@ -60,13 +60,12 @@
+ enum {
+ 	MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
+ 	MTHCA_FLAG_SRQ        = 1 << 2,
+-	MTHCA_FLAG_MSI        = 1 << 3,
+-	MTHCA_FLAG_MSI_X      = 1 << 4,
+-	MTHCA_FLAG_NO_LAM     = 1 << 5,
+-	MTHCA_FLAG_FMR        = 1 << 6,
+-	MTHCA_FLAG_MEMFREE    = 1 << 7,
+-	MTHCA_FLAG_PCIE       = 1 << 8,
+-	MTHCA_FLAG_SINAI_OPT  = 1 << 9
++	MTHCA_FLAG_MSI_X      = 1 << 3,
++	MTHCA_FLAG_NO_LAM     = 1 << 4,
++	MTHCA_FLAG_FMR        = 1 << 5,
++	MTHCA_FLAG_MEMFREE    = 1 << 6,
++	MTHCA_FLAG_PCIE       = 1 << 7,
++	MTHCA_FLAG_SINAI_OPT  = 1 << 8
+ };
  
-@@ -418,13 +410,16 @@ config I2C_S3C2410
- 	  Samsung S3C2410 based System-on-Chip devices.
+ enum {
+diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
+index b29de51..b60eb5d 100644
+--- a/drivers/infiniband/hw/mthca/mthca_eq.c
++++ b/drivers/infiniband/hw/mthca/mthca_eq.c
+@@ -827,8 +827,7 @@ int mthca_init_eq_table(struct mthca_dev *dev)
+ 	if (err)
+ 		goto err_out_free;
  
- config I2C_SAVAGE4
--	tristate "S3 Savage 4"
--	depends on PCI && EXPERIMENTAL
-+	tristate "S3 Savage 4 (DEPRECATED)"
-+	default n
-+	depends on PCI
- 	select I2C_ALGOBIT
- 	help
- 	  If you say yes to this option, support will be included for the 
- 	  S3 Savage 4 I2C interface.
+-	if (dev->mthca_flags & MTHCA_FLAG_MSI ||
+-	    dev->mthca_flags & MTHCA_FLAG_MSI_X) {
++	if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
+ 		dev->eq_table.clr_mask = 0;
+ 	} else {
+ 		dev->eq_table.clr_mask =
+@@ -839,8 +838,7 @@ int mthca_init_eq_table(struct mthca_dev *dev)
  
-+	  This driver is deprecated in favor of the savagefb driver.
-+
- 	  This driver can also be built as a module.  If so, the module
- 	  will be called i2c-savage4.
+ 	dev->eq_table.arm_mask = 0;
  
-@@ -611,7 +606,7 @@ config I2C_VIAPRO
- 	    VT8231
- 	    VT8233/A
- 	    VT8235
--	    VT8237R/A
-+	    VT8237R/A/S
- 	    VT8251
- 	    CX700
+-	intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
+-		128 : dev->eq_table.inta_pin;
++	intr = dev->eq_table.inta_pin;
  
-@@ -648,7 +643,7 @@ config I2C_PCA_ISA
+ 	err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
+ 			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
+diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
+index 60de6f9..5cf8250 100644
+--- a/drivers/infiniband/hw/mthca/mthca_main.c
++++ b/drivers/infiniband/hw/mthca/mthca_main.c
+@@ -65,14 +65,9 @@ static int msi_x = 1;
+ module_param(msi_x, int, 0444);
+ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
  
- config I2C_MV64XXX
- 	tristate "Marvell mv64xxx I2C Controller"
--	depends on MV64X60 && EXPERIMENTAL
-+	depends on (MV64X60 || ARCH_ORION) && EXPERIMENTAL
- 	help
- 	  If you say yes to this option, support will be included for the
- 	  built-in I2C interface on the Marvell 64xxx line of host bridges.
-diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
-index 81d43c2..ea7068f 100644
---- a/drivers/i2c/busses/Makefile
-+++ b/drivers/i2c/busses/Makefile
-@@ -20,7 +20,6 @@ obj-$(CONFIG_I2C_I810)		+= i2c-i810.o
- obj-$(CONFIG_I2C_IBM_IIC)	+= i2c-ibm_iic.o
- obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
- obj-$(CONFIG_I2C_IXP2000)	+= i2c-ixp2000.o
--obj-$(CONFIG_I2C_IXP4XX)	+= i2c-ixp4xx.o
- obj-$(CONFIG_I2C_POWERMAC)	+= i2c-powermac.o
- obj-$(CONFIG_I2C_MPC)		+= i2c-mpc.o
- obj-$(CONFIG_I2C_MV64XXX)	+= i2c-mv64xxx.o
-diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
-index 7490dc1..573abe4 100644
---- a/drivers/i2c/busses/i2c-amd756.c
-+++ b/drivers/i2c/busses/i2c-amd756.c
-@@ -334,6 +334,10 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
- 	int error;
- 	u8 temp;
- 	
-+	/* driver_data might come from user-space, so check it */
-+	if (id->driver_data > ARRAY_SIZE(chipname))
-+		return -EINVAL;
-+
- 	if (amd756_ioport) {
- 		dev_err(&pdev->dev, "Only one device supported "
- 		       "(you have a strange motherboard, btw)\n");
-@@ -405,6 +409,7 @@ static struct pci_driver amd756_driver = {
- 	.id_table	= amd756_ids,
- 	.probe		= amd756_probe,
- 	.remove		= __devexit_p(amd756_remove),
-+	.dynids.use_driver_data = 1,
- };
+-static int msi = 0;
+-module_param(msi, int, 0444);
+-MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero (deprecated, use MSI-X instead)");
+-
+ #else /* CONFIG_PCI_MSI */
  
- static int __init amd756_init(void)
-diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
-index 2f68416..1953b26 100644
---- a/drivers/i2c/busses/i2c-au1550.c
-+++ b/drivers/i2c/busses/i2c-au1550.c
-@@ -30,14 +30,22 @@
- #include <linux/delay.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
-+#include <linux/platform_device.h>
- #include <linux/init.h>
- #include <linux/errno.h>
- #include <linux/i2c.h>
-+#include <linux/slab.h>
+ #define msi_x (0)
+-#define msi   (0)
  
- #include <asm/mach-au1x00/au1xxx.h>
- #include <asm/mach-au1x00/au1xxx_psc.h>
+ #endif /* CONFIG_PCI_MSI */
  
--#include "i2c-au1550.h"
-+struct i2c_au1550_data {
-+	u32	psc_base;
-+	int	xfer_timeout;
-+	int	ack_timeout;
-+	struct i2c_adapter adap;
-+	struct resource *ioarea;
-+};
+@@ -816,13 +811,11 @@ static int mthca_setup_hca(struct mthca_dev *dev)
  
- static int
- wait_xfer_done(struct i2c_au1550_data *adap)
-@@ -105,7 +113,7 @@ wait_master_done(struct i2c_au1550_data *adap)
- }
+ 	err = mthca_NOP(dev, &status);
+ 	if (err || status) {
+-		if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X)) {
++		if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
+ 			mthca_warn(dev, "NOP command failed to generate interrupt "
+ 				   "(IRQ %d).\n",
+-				   dev->mthca_flags & MTHCA_FLAG_MSI_X ?
+-				   dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector :
+-				   dev->pdev->irq);
+-			mthca_warn(dev, "Trying again with MSI/MSI-X disabled.\n");
++				   dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
++			mthca_warn(dev, "Trying again with MSI-X disabled.\n");
+ 		} else {
+ 			mthca_err(dev, "NOP command failed to generate interrupt "
+ 				  "(IRQ %d), aborting.\n",
+@@ -1005,7 +998,7 @@ static struct {
+ 			   .flags     = 0 },
+ 	[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
+ 			   .flags     = MTHCA_FLAG_PCIE },
+-	[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 2, 0),
++	[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
+ 			   .flags     = MTHCA_FLAG_MEMFREE |
+ 					MTHCA_FLAG_PCIE },
+ 	[SINAI]        = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
+@@ -1128,29 +1121,12 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
  
- static int
--do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd)
-+do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q)
- {
- 	volatile psc_smb_t	*sp;
- 	u32			stat;
-@@ -134,6 +142,10 @@ do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd)
- 	if (rd)
- 		addr |= 1;
+ 	if (msi_x && !mthca_enable_msi_x(mdev))
+ 		mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
+-	else if (msi) {
+-		static int warned;
+-
+-		if (!warned) {
+-			printk(KERN_WARNING PFX "WARNING: MSI support will be "
+-			       "removed from the ib_mthca driver in January 2008.\n");
+-			printk(KERN_WARNING "    If you are using MSI and cannot "
+-			       "switch to MSI-X, please tell "
+-			       "<general at lists.openfabrics.org>.\n");
+-			++warned;
+-		}
+-
+-		if (!pci_enable_msi(pdev))
+-			mdev->mthca_flags |= MTHCA_FLAG_MSI;
+-	}
  
-+	/* zero-byte xfers stop immediately */
-+	if (q)
-+		addr |= PSC_SMBTXRX_STP;
-+
- 	/* Put byte into fifo, start up master.
- 	*/
- 	sp->psc_smbtxrx = addr;
-@@ -142,7 +154,7 @@ do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd)
- 	au_sync();
- 	if (wait_ack(adap))
- 		return -EIO;
--	return 0;
-+	return (q) ? wait_master_done(adap) : 0;
- }
+ 	err = mthca_setup_hca(mdev);
+-	if (err == -EBUSY && (mdev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X))) {
++	if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
+ 		if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
+ 			pci_disable_msix(pdev);
+-		if (mdev->mthca_flags & MTHCA_FLAG_MSI)
+-			pci_disable_msi(pdev);
+-		mdev->mthca_flags &= ~(MTHCA_FLAG_MSI_X | MTHCA_FLAG_MSI);
++		mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
  
- static u32
-@@ -262,7 +274,8 @@ au1550_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
+ 		err = mthca_setup_hca(mdev);
+ 	}
+@@ -1192,8 +1168,6 @@ err_cleanup:
+ err_close:
+ 	if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
+ 		pci_disable_msix(pdev);
+-	if (mdev->mthca_flags & MTHCA_FLAG_MSI)
+-		pci_disable_msi(pdev);
  
- 	for (i = 0; !err && i < num; i++) {
- 		p = &msgs[i];
--		err = do_address(adap, p->addr, p->flags & I2C_M_RD);
-+		err = do_address(adap, p->addr, p->flags & I2C_M_RD,
-+				 (p->len == 0));
- 		if (err || !p->len)
- 			continue;
- 		if (p->flags & I2C_M_RD)
-@@ -294,18 +307,48 @@ static const struct i2c_algorithm au1550_algo = {
-  * Prior to calling us, the 50MHz clock frequency and routing
-  * must have been set up for the PSC indicated by the adapter.
-  */
--int
--i2c_au1550_add_bus(struct i2c_adapter *i2c_adap)
-+static int __devinit
-+i2c_au1550_probe(struct platform_device *pdev)
- {
--	struct i2c_au1550_data *adap = i2c_adap->algo_data;
--	volatile psc_smb_t	*sp;
--	u32	stat;
-+	struct i2c_au1550_data *priv;
-+	volatile psc_smb_t *sp;
-+	struct resource *r;
-+	u32 stat;
-+	int ret;
-+
-+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-+	if (!r) {
-+		ret = -ENODEV;
-+		goto out;
-+	}
-+
-+	priv = kzalloc(sizeof(struct i2c_au1550_data), GFP_KERNEL);
-+	if (!priv) {
-+		ret = -ENOMEM;
-+		goto out;
-+	}
-+
-+	priv->ioarea = request_mem_region(r->start, r->end - r->start + 1,
-+					  pdev->name);
-+	if (!priv->ioarea) {
-+		ret = -EBUSY;
-+		goto out_mem;
-+	}
+ 	mthca_close_hca(mdev);
  
--	i2c_adap->algo = &au1550_algo;
-+	priv->psc_base = r->start;
-+	priv->xfer_timeout = 200;
-+	priv->ack_timeout = 200;
-+
-+	priv->adap.id = I2C_HW_AU1550_PSC;
-+	priv->adap.nr = pdev->id;
-+	priv->adap.algo = &au1550_algo;
-+	priv->adap.algo_data = priv;
-+	priv->adap.dev.parent = &pdev->dev;
-+	strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
+@@ -1246,8 +1220,6 @@ static void __mthca_remove_one(struct pci_dev *pdev)
  
- 	/* Now, set up the PSC for SMBus PIO mode.
- 	*/
--	sp = (volatile psc_smb_t *)(adap->psc_base);
-+	sp = (volatile psc_smb_t *)priv->psc_base;
- 	sp->psc_ctrl = PSC_CTRL_DISABLE;
- 	au_sync();
- 	sp->psc_sel = PSC_SEL_PS_SMBUSMODE;
-@@ -343,87 +386,87 @@ i2c_au1550_add_bus(struct i2c_adapter *i2c_adap)
- 		au_sync();
- 	} while ((stat & PSC_SMBSTAT_DR) == 0);
+ 		if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
+ 			pci_disable_msix(pdev);
+-		if (mdev->mthca_flags & MTHCA_FLAG_MSI)
+-			pci_disable_msi(pdev);
  
--	return i2c_add_adapter(i2c_adap);
--}
-+	ret = i2c_add_numbered_adapter(&priv->adap);
-+	if (ret == 0) {
-+		platform_set_drvdata(pdev, priv);
-+		return 0;
-+	}
+ 		ib_dealloc_device(&mdev->ib_dev);
+ 		mthca_release_regions(pdev, mdev->mthca_flags &
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
+index eb7edab..fe250c6 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -56,42 +56,43 @@
+ /* constants */
  
-+	/* disable the PSC */
-+	sp->psc_smbcfg = 0;
-+	sp->psc_ctrl = PSC_CTRL_DISABLE;
-+	au_sync();
+ enum {
+-	IPOIB_PACKET_SIZE         = 2048,
+-	IPOIB_BUF_SIZE 		  = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
++	IPOIB_PACKET_SIZE	  = 2048,
++	IPOIB_BUF_SIZE		  = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
  
--int
--i2c_au1550_del_bus(struct i2c_adapter *adap)
-+	release_resource(priv->ioarea);
-+	kfree(priv->ioarea);
-+out_mem:
-+	kfree(priv);
-+out:
-+	return ret;
-+}
-+
-+static int __devexit
-+i2c_au1550_remove(struct platform_device *pdev)
- {
--	return i2c_del_adapter(adap);
-+	struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
-+	volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base;
-+
-+	platform_set_drvdata(pdev, NULL);
-+	i2c_del_adapter(&priv->adap);
-+	sp->psc_smbcfg = 0;
-+	sp->psc_ctrl = PSC_CTRL_DISABLE;
-+	au_sync();
-+	release_resource(priv->ioarea);
-+	kfree(priv->ioarea);
-+	kfree(priv);
-+	return 0;
- }
+-	IPOIB_ENCAP_LEN 	  = 4,
++	IPOIB_ENCAP_LEN		  = 4,
  
- static int
--pb1550_reg(struct i2c_client *client)
-+i2c_au1550_suspend(struct platform_device *pdev, pm_message_t state)
- {
-+	struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
-+	volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base;
-+
-+	sp->psc_ctrl = PSC_CTRL_SUSPEND;
-+	au_sync();
- 	return 0;
- }
+-	IPOIB_CM_MTU              = 0x10000 - 0x10, /* padding to align header to 16 */
+-	IPOIB_CM_BUF_SIZE         = IPOIB_CM_MTU  + IPOIB_ENCAP_LEN,
+-	IPOIB_CM_HEAD_SIZE 	  = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
+-	IPOIB_CM_RX_SG            = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
+-	IPOIB_RX_RING_SIZE 	  = 128,
+-	IPOIB_TX_RING_SIZE 	  = 64,
++	IPOIB_CM_MTU		  = 0x10000 - 0x10, /* padding to align header to 16 */
++	IPOIB_CM_BUF_SIZE	  = IPOIB_CM_MTU  + IPOIB_ENCAP_LEN,
++	IPOIB_CM_HEAD_SIZE	  = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
++	IPOIB_CM_RX_SG		  = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
++	IPOIB_RX_RING_SIZE	  = 128,
++	IPOIB_TX_RING_SIZE	  = 64,
+ 	IPOIB_MAX_QUEUE_SIZE	  = 8192,
+ 	IPOIB_MIN_QUEUE_SIZE	  = 2,
++	IPOIB_CM_MAX_CONN_QP	  = 4096,
  
- static int
--pb1550_unreg(struct i2c_client *client)
-+i2c_au1550_resume(struct platform_device *pdev)
- {
-+	struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
-+	volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base;
-+
-+	sp->psc_ctrl = PSC_CTRL_ENABLE;
-+	au_sync();
-+	while (!(sp->psc_smbstat & PSC_SMBSTAT_SR))
-+		au_sync();
- 	return 0;
- }
+-	IPOIB_NUM_WC 		  = 4,
++	IPOIB_NUM_WC		  = 4,
  
--static struct i2c_au1550_data pb1550_i2c_info = {
--	SMBUS_PSC_BASE, 200, 200
--};
+ 	IPOIB_MAX_PATH_REC_QUEUE  = 3,
+-	IPOIB_MAX_MCAST_QUEUE     = 3,
 -
--static struct i2c_adapter pb1550_board_adapter = {
--	name:              "pb1550 adapter",
--	id:                I2C_HW_AU1550_PSC,
--	algo:              NULL,
--	algo_data:         &pb1550_i2c_info,
--	client_register:   pb1550_reg,
--	client_unregister: pb1550_unreg,
-+static struct platform_driver au1xpsc_smbus_driver = {
-+	.driver = {
-+		.name	= "au1xpsc_smbus",
-+		.owner	= THIS_MODULE,
-+	},
-+	.probe		= i2c_au1550_probe,
-+	.remove		= __devexit_p(i2c_au1550_remove),
-+	.suspend	= i2c_au1550_suspend,
-+	.resume		= i2c_au1550_resume,
- };
+-	IPOIB_FLAG_OPER_UP 	  = 0,
+-	IPOIB_FLAG_INITIALIZED    = 1,
+-	IPOIB_FLAG_ADMIN_UP 	  = 2,
+-	IPOIB_PKEY_ASSIGNED 	  = 3,
+-	IPOIB_PKEY_STOP 	  = 4,
+-	IPOIB_FLAG_SUBINTERFACE   = 5,
+-	IPOIB_MCAST_RUN 	  = 6,
+-	IPOIB_STOP_REAPER         = 7,
+-	IPOIB_MCAST_STARTED       = 8,
+-	IPOIB_FLAG_ADMIN_CM 	  = 9,
++	IPOIB_MAX_MCAST_QUEUE	  = 3,
++
++	IPOIB_FLAG_OPER_UP	  = 0,
++	IPOIB_FLAG_INITIALIZED	  = 1,
++	IPOIB_FLAG_ADMIN_UP	  = 2,
++	IPOIB_PKEY_ASSIGNED	  = 3,
++	IPOIB_PKEY_STOP		  = 4,
++	IPOIB_FLAG_SUBINTERFACE	  = 5,
++	IPOIB_MCAST_RUN		  = 6,
++	IPOIB_STOP_REAPER	  = 7,
++	IPOIB_MCAST_STARTED	  = 8,
++	IPOIB_FLAG_ADMIN_CM	  = 9,
+ 	IPOIB_FLAG_UMCAST	  = 10,
  
--/* BIG hack to support the control interface on the Wolfson WM8731
-- * audio codec on the Pb1550 board.  We get an address and two data
-- * bytes to write, create an i2c message, and send it across the
-- * i2c transfer function.  We do this here because we have access to
-- * the i2c adapter structure.
-- */
--static struct i2c_msg wm_i2c_msg;  /* We don't want this stuff on the stack */
--static	u8 i2cbuf[2];
--
--int
--pb1550_wm_codec_write(u8 addr, u8 reg, u8 val)
--{
--	wm_i2c_msg.addr = addr;
--	wm_i2c_msg.flags = 0;
--	wm_i2c_msg.buf = i2cbuf;
--	wm_i2c_msg.len = 2;
--	i2cbuf[0] = reg;
--	i2cbuf[1] = val;
--
--	return pb1550_board_adapter.algo->master_xfer(&pb1550_board_adapter, &wm_i2c_msg, 1);
--}
--
- static int __init
- i2c_au1550_init(void)
- {
--	printk(KERN_INFO "Au1550 I2C: ");
--
--	/* This is where we would set up a 50MHz clock source
--	 * and routing.  On the Pb1550, the SMBus is PSC2, which
--	 * uses a shared clock with USB.  This has been already
--	 * configured by Yamon as a 48MHz clock, close enough
--	 * for our work.
--	 */
--        if (i2c_au1550_add_bus(&pb1550_board_adapter) < 0) {
--		printk("failed to initialize.\n");
--                return -ENODEV;
--	}
--
--	printk("initialized.\n");
--	return 0;
-+	return platform_driver_register(&au1xpsc_smbus_driver);
- }
+ 	IPOIB_MAX_BACKOFF_SECONDS = 16,
  
- static void __exit
- i2c_au1550_exit(void)
- {
--	i2c_au1550_del_bus(&pb1550_board_adapter);
-+	platform_driver_unregister(&au1xpsc_smbus_driver);
- }
+-	IPOIB_MCAST_FLAG_FOUND 	  = 0,	/* used in set_multicast_list */
++	IPOIB_MCAST_FLAG_FOUND	  = 0,	/* used in set_multicast_list */
+ 	IPOIB_MCAST_FLAG_SENDONLY = 1,
+-	IPOIB_MCAST_FLAG_BUSY 	  = 2,	/* joining or already joined */
++	IPOIB_MCAST_FLAG_BUSY	  = 2,	/* joining or already joined */
+ 	IPOIB_MCAST_FLAG_ATTACHED = 3,
+ };
  
- MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC.");
-diff --git a/drivers/i2c/busses/i2c-au1550.h b/drivers/i2c/busses/i2c-au1550.h
-deleted file mode 100644
-index fce15d1..0000000
---- a/drivers/i2c/busses/i2c-au1550.h
-+++ /dev/null
-@@ -1,32 +0,0 @@
--/*
-- * Copyright (C) 2004 Embedded Edge, LLC <dan at embeddededge.com>
-- * 2.6 port by Matt Porter <mporter at kernel.crashing.org>
-- *
-- *  This program is free software; you can redistribute it and/or modify
-- *  it under the terms of the GNU General Public License as published by
-- *  the Free Software Foundation; either version 2 of the License, or
-- *  (at your option) any later version.
-- *
-- *  This program is distributed in the hope that it will be useful,
-- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
-- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-- *  GNU General Public License for more details.
-- *
-- *  You should have received a copy of the GNU General Public License
-- *  along with this program; if not, write to the Free Software
-- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-- */
--
--#ifndef I2C_AU1550_H
--#define I2C_AU1550_H
--
--struct i2c_au1550_data {
--	u32	psc_base;
--	int	xfer_timeout;
--	int	ack_timeout;
--};
--
--int i2c_au1550_add_bus(struct i2c_adapter *);
--int i2c_au1550_del_bus(struct i2c_adapter *);
--
--#endif /* I2C_AU1550_H */
-diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
-index 67224a4..7dbdaeb 100644
---- a/drivers/i2c/busses/i2c-bfin-twi.c
-+++ b/drivers/i2c/busses/i2c-bfin-twi.c
-@@ -550,6 +550,7 @@ static int i2c_bfin_twi_probe(struct platform_device *dev)
+@@ -117,7 +118,7 @@ struct ipoib_pseudoheader {
+ struct ipoib_mcast {
+ 	struct ib_sa_mcmember_rec mcmember;
+ 	struct ib_sa_multicast	 *mc;
+-	struct ipoib_ah          *ah;
++	struct ipoib_ah		 *ah;
  
- 	p_adap = &iface->adap;
- 	p_adap->id = I2C_HW_BLACKFIN;
-+	p_adap->nr = dev->id;
- 	strlcpy(p_adap->name, dev->name, sizeof(p_adap->name));
- 	p_adap->algo = &bfin_twi_algorithm;
- 	p_adap->algo_data = iface;
-@@ -576,7 +577,7 @@ static int i2c_bfin_twi_probe(struct platform_device *dev)
- 	bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA);
- 	SSYNC();
+ 	struct rb_node    rb_node;
+ 	struct list_head  list;
+@@ -186,27 +187,29 @@ enum ipoib_cm_state {
+ };
  
--	rc = i2c_add_adapter(p_adap);
-+	rc = i2c_add_numbered_adapter(p_adap);
- 	if (rc < 0)
- 		free_irq(iface->irq, iface);
- 	else
-diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
-index 6767988..cce5a61 100644
---- a/drivers/i2c/busses/i2c-davinci.c
-+++ b/drivers/i2c/busses/i2c-davinci.c
-@@ -510,7 +510,6 @@ static int davinci_i2c_probe(struct platform_device *pdev)
+ struct ipoib_cm_rx {
+-	struct ib_cm_id     *id;
+-	struct ib_qp        *qp;
+-	struct list_head     list;
+-	struct net_device   *dev;
+-	unsigned long        jiffies;
+-	enum ipoib_cm_state  state;
++	struct ib_cm_id	       *id;
++	struct ib_qp	       *qp;
++	struct ipoib_cm_rx_buf *rx_ring;
++	struct list_head	list;
++	struct net_device      *dev;
++	unsigned long		jiffies;
++	enum ipoib_cm_state	state;
++	int			recv_count;
+ };
  
- 	/* FIXME */
- 	adap->timeout = 1;
--	adap->retries = 1;
+ struct ipoib_cm_tx {
+-	struct ib_cm_id     *id;
+-	struct ib_qp        *qp;
++	struct ib_cm_id	    *id;
++	struct ib_qp	    *qp;
+ 	struct list_head     list;
+ 	struct net_device   *dev;
+ 	struct ipoib_neigh  *neigh;
+ 	struct ipoib_path   *path;
+ 	struct ipoib_tx_buf *tx_ring;
+-	unsigned             tx_head;
+-	unsigned             tx_tail;
+-	unsigned long        flags;
+-	u32                  mtu;
+-	struct ib_wc         ibwc[IPOIB_NUM_WC];
++	unsigned	     tx_head;
++	unsigned	     tx_tail;
++	unsigned long	     flags;
++	u32		     mtu;
++	struct ib_wc	     ibwc[IPOIB_NUM_WC];
+ };
  
- 	adap->nr = pdev->id;
- 	r = i2c_add_numbered_adapter(adap);
-diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
-index ac27e5f..aa91579 100644
---- a/drivers/i2c/busses/i2c-i801.c
-+++ b/drivers/i2c/busses/i2c-i801.c
-@@ -4,6 +4,7 @@
-     Copyright (c) 1998 - 2002  Frodo Looijaard <frodol at dds.nl>,
-     Philip Edelbrock <phil at netroedge.com>, and Mark D. Studebaker
-     <mdsxyz123 at yahoo.com>
-+    Copyright (C) 2007         Jean Delvare <khali at linux-fr.org>
+ struct ipoib_cm_rx_buf {
+@@ -215,25 +218,28 @@ struct ipoib_cm_rx_buf {
+ };
  
-     This program is free software; you can redistribute it and/or modify
-     it under the terms of the GNU General Public License as published by
-@@ -21,25 +22,34 @@
- */
+ struct ipoib_cm_dev_priv {
+-	struct ib_srq  	       *srq;
++	struct ib_srq	       *srq;
+ 	struct ipoib_cm_rx_buf *srq_ring;
+-	struct ib_cm_id        *id;
+-	struct list_head        passive_ids;   /* state: LIVE */
+-	struct list_head        rx_error_list; /* state: ERROR */
+-	struct list_head        rx_flush_list; /* state: FLUSH, drain not started */
+-	struct list_head        rx_drain_list; /* state: FLUSH, drain started */
+-	struct list_head        rx_reap_list;  /* state: FLUSH, drain done */
++	struct ib_cm_id	       *id;
++	struct list_head	passive_ids;   /* state: LIVE */
++	struct list_head	rx_error_list; /* state: ERROR */
++	struct list_head	rx_flush_list; /* state: FLUSH, drain not started */
++	struct list_head	rx_drain_list; /* state: FLUSH, drain started */
++	struct list_head	rx_reap_list;  /* state: FLUSH, drain done */
+ 	struct work_struct      start_task;
+ 	struct work_struct      reap_task;
+ 	struct work_struct      skb_task;
+ 	struct work_struct      rx_reap_task;
+ 	struct delayed_work     stale_task;
+ 	struct sk_buff_head     skb_queue;
+-	struct list_head        start_list;
+-	struct list_head        reap_list;
+-	struct ib_wc            ibwc[IPOIB_NUM_WC];
+-	struct ib_sge           rx_sge[IPOIB_CM_RX_SG];
++	struct list_head	start_list;
++	struct list_head	reap_list;
++	struct ib_wc		ibwc[IPOIB_NUM_WC];
++	struct ib_sge		rx_sge[IPOIB_CM_RX_SG];
+ 	struct ib_recv_wr       rx_wr;
++	int			nonsrq_conn_qp;
++	int			max_cm_mtu;
++	int			num_frags;
+ };
  
  /*
--    SUPPORTED DEVICES	PCI ID
--    82801AA		2413
--    82801AB		2423
--    82801BA		2443
--    82801CA/CAM		2483
--    82801DB		24C3   (HW PEC supported)
--    82801EB		24D3   (HW PEC supported)
--    6300ESB		25A4
--    ICH6		266A
--    ICH7		27DA
--    ESB2		269B
--    ICH8		283E
--    ICH9		2930
--    Tolapai		5032
--    This driver supports several versions of Intel's I/O Controller Hubs (ICH).
--    For SMBus support, they are similar to the PIIX4 and are part
--    of Intel's '810' and other chipsets.
--    See the file Documentation/i2c/busses/i2c-i801 for details.
--    I2C Block Read and Process Call are not supported.
-+  Supports the following Intel I/O Controller Hubs (ICH):
-+
-+                                  I/O                     Block   I2C
-+                                  region  SMBus   Block   proc.   block
-+  Chip name             PCI ID    size    PEC     buffer  call    read
-+  ----------------------------------------------------------------------
-+  82801AA  (ICH)        0x2413     16      no      no      no      no
-+  82801AB  (ICH0)       0x2423     16      no      no      no      no
-+  82801BA  (ICH2)       0x2443     16      no      no      no      no
-+  82801CA  (ICH3)       0x2483     32     soft     no      no      no
-+  82801DB  (ICH4)       0x24c3     32     hard     yes     no      no
-+  82801E   (ICH5)       0x24d3     32     hard     yes     yes     yes
-+  6300ESB               0x25a4     32     hard     yes     yes     yes
-+  82801F   (ICH6)       0x266a     32     hard     yes     yes     yes
-+  6310ESB/6320ESB       0x269b     32     hard     yes     yes     yes
-+  82801G   (ICH7)       0x27da     32     hard     yes     yes     yes
-+  82801H   (ICH8)       0x283e     32     hard     yes     yes     yes
-+  82801I   (ICH9)       0x2930     32     hard     yes     yes     yes
-+  Tolapai               0x5032     32     hard     yes     ?       ?
-+
-+  Features supported by this driver:
-+  Software PEC                     no
-+  Hardware PEC                     yes
-+  Block buffer                     yes
-+  Block process call transaction   no
-+  I2C block read transaction       yes  (doesn't use the block buffer)
-+
-+  See the file Documentation/i2c/busses/i2c-i801 for details.
- */
+@@ -269,30 +275,30 @@ struct ipoib_dev_priv {
+ 	struct work_struct pkey_event_task;
  
- /* Note: we assume there can only be one I801, with one SMBus interface */
-@@ -62,9 +72,9 @@
- #define SMBHSTDAT0	(5 + i801_smba)
- #define SMBHSTDAT1	(6 + i801_smba)
- #define SMBBLKDAT	(7 + i801_smba)
--#define SMBPEC		(8 + i801_smba)	/* ICH4 only */
--#define SMBAUXSTS	(12 + i801_smba)	/* ICH4 only */
--#define SMBAUXCTL	(13 + i801_smba)	/* ICH4 only */
-+#define SMBPEC		(8 + i801_smba)		/* ICH3 and later */
-+#define SMBAUXSTS	(12 + i801_smba)	/* ICH4 and later */
-+#define SMBAUXCTL	(13 + i801_smba)	/* ICH4 and later */
+ 	struct ib_device *ca;
+-	u8            	  port;
+-	u16           	  pkey;
+-	u16               pkey_index;
+-	struct ib_pd  	 *pd;
+-	struct ib_mr  	 *mr;
+-	struct ib_cq  	 *cq;
+-	struct ib_qp  	 *qp;
+-	u32           	  qkey;
++	u8		  port;
++	u16		  pkey;
++	u16		  pkey_index;
++	struct ib_pd	 *pd;
++	struct ib_mr	 *mr;
++	struct ib_cq	 *cq;
++	struct ib_qp	 *qp;
++	u32		  qkey;
  
- /* PCI Address Constants */
- #define SMBBAR		4
-@@ -91,13 +101,13 @@
- #define I801_BYTE		0x04
- #define I801_BYTE_DATA		0x08
- #define I801_WORD_DATA		0x0C
--#define I801_PROC_CALL		0x10	/* later chips only, unimplemented */
-+#define I801_PROC_CALL		0x10	/* unimplemented */
- #define I801_BLOCK_DATA		0x14
--#define I801_I2C_BLOCK_DATA	0x18	/* unimplemented */
-+#define I801_I2C_BLOCK_DATA	0x18	/* ICH5 and later */
- #define I801_BLOCK_LAST		0x34
--#define I801_I2C_BLOCK_LAST	0x38	/* unimplemented */
-+#define I801_I2C_BLOCK_LAST	0x38	/* ICH5 and later */
- #define I801_START		0x40
--#define I801_PEC_EN		0x80	/* ICH4 only */
-+#define I801_PEC_EN		0x80	/* ICH3 and later */
+ 	union ib_gid local_gid;
+-	u16          local_lid;
++	u16	     local_lid;
  
- /* I801 Hosts Status register bits */
- #define SMBHSTSTS_BYTE_DONE	0x80
-@@ -113,7 +123,12 @@ static unsigned long i801_smba;
- static unsigned char i801_original_hstcfg;
- static struct pci_driver i801_driver;
- static struct pci_dev *I801_dev;
--static int isich4;
-+
-+#define FEATURE_SMBUS_PEC	(1 << 0)
-+#define FEATURE_BLOCK_BUFFER	(1 << 1)
-+#define FEATURE_BLOCK_PROC	(1 << 2)
-+#define FEATURE_I2C_BLOCK_READ	(1 << 3)
-+static unsigned int i801_features;
+ 	unsigned int admin_mtu;
+ 	unsigned int mcast_mtu;
  
- static int i801_transaction(int xact)
- {
-@@ -242,7 +257,8 @@ static int i801_block_transaction_by_block(union i2c_smbus_data *data,
- }
+ 	struct ipoib_rx_buf *rx_ring;
  
- static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
--					       char read_write, int hwpec)
-+					       char read_write, int command,
-+					       int hwpec)
- {
- 	int i, len;
- 	int smbcmd;
-@@ -259,16 +275,24 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
- 	}
+-	spinlock_t           tx_lock;
++	spinlock_t	     tx_lock;
+ 	struct ipoib_tx_buf *tx_ring;
+-	unsigned             tx_head;
+-	unsigned             tx_tail;
+-	struct ib_sge        tx_sge;
++	unsigned	     tx_head;
++	unsigned	     tx_tail;
++	struct ib_sge	     tx_sge;
+ 	struct ib_send_wr    tx_wr;
+-	unsigned             tx_outstanding;
++	unsigned	     tx_outstanding;
  
- 	for (i = 1; i <= len; i++) {
--		if (i == len && read_write == I2C_SMBUS_READ)
--			smbcmd = I801_BLOCK_LAST;
--		else
--			smbcmd = I801_BLOCK_DATA;
-+		if (i == len && read_write == I2C_SMBUS_READ) {
-+			if (command == I2C_SMBUS_I2C_BLOCK_DATA)
-+				smbcmd = I801_I2C_BLOCK_LAST;
-+			else
-+				smbcmd = I801_BLOCK_LAST;
-+		} else {
-+			if (command == I2C_SMBUS_I2C_BLOCK_DATA
-+			 && read_write == I2C_SMBUS_READ)
-+				smbcmd = I801_I2C_BLOCK_DATA;
-+			else
-+				smbcmd = I801_BLOCK_DATA;
-+		}
- 		outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT);
+ 	struct ib_wc ibwc[IPOIB_NUM_WC];
  
- 		dev_dbg(&I801_dev->dev, "Block (pre %d): CNT=%02x, CMD=%02x, "
--			"ADD=%02x, DAT0=%02x, BLKDAT=%02x\n", i,
-+			"ADD=%02x, DAT0=%02x, DAT1=%02x, BLKDAT=%02x\n", i,
- 			inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD),
--			inb_p(SMBHSTDAT0), inb_p(SMBBLKDAT));
-+			inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1), inb_p(SMBBLKDAT));
+@@ -317,10 +323,10 @@ struct ipoib_dev_priv {
  
- 		/* Make sure the SMBus host is ready to start transmitting */
- 		temp = inb_p(SMBHSTSTS);
-@@ -332,7 +356,8 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
- 			dev_dbg(&I801_dev->dev, "Error: no response!\n");
- 		}
+ struct ipoib_ah {
+ 	struct net_device *dev;
+-	struct ib_ah      *ah;
++	struct ib_ah	  *ah;
+ 	struct list_head   list;
+-	struct kref        ref;
+-	unsigned           last_send;
++	struct kref	   ref;
++	unsigned	   last_send;
+ };
  
--		if (i == 1 && read_write == I2C_SMBUS_READ) {
-+		if (i == 1 && read_write == I2C_SMBUS_READ
-+		 && command != I2C_SMBUS_I2C_BLOCK_DATA) {
- 			len = inb_p(SMBHSTDAT0);
- 			if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
- 				return -1;
-@@ -353,9 +378,9 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
- 				temp);
- 		}
- 		dev_dbg(&I801_dev->dev, "Block (post %d): CNT=%02x, CMD=%02x, "
--			"ADD=%02x, DAT0=%02x, BLKDAT=%02x\n", i,
-+			"ADD=%02x, DAT0=%02x, DAT1=%02x, BLKDAT=%02x\n", i,
- 			inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD),
--			inb_p(SMBHSTDAT0), inb_p(SMBBLKDAT));
-+			inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1), inb_p(SMBBLKDAT));
+ struct ipoib_path {
+@@ -331,11 +337,11 @@ struct ipoib_path {
  
- 		if (result < 0)
- 			return result;
-@@ -384,33 +409,38 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
- 			pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc);
- 			pci_write_config_byte(I801_dev, SMBHSTCFG,
- 					      hostc | SMBHSTCFG_I2C_EN);
--		} else {
-+		} else if (!(i801_features & FEATURE_I2C_BLOCK_READ)) {
- 			dev_err(&I801_dev->dev,
--				"I2C_SMBUS_I2C_BLOCK_READ not DB!\n");
-+				"I2C block read is unsupported!\n");
- 			return -1;
- 		}
- 	}
+ 	struct list_head      neigh_list;
  
--	if (read_write == I2C_SMBUS_WRITE) {
-+	if (read_write == I2C_SMBUS_WRITE
-+	 || command == I2C_SMBUS_I2C_BLOCK_DATA) {
- 		if (data->block[0] < 1)
- 			data->block[0] = 1;
- 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
- 			data->block[0] = I2C_SMBUS_BLOCK_MAX;
- 	} else {
--		data->block[0] = 32;	/* max for reads */
-+		data->block[0] = 32;	/* max for SMBus block reads */
- 	}
+-	int                   query_id;
++	int		      query_id;
+ 	struct ib_sa_query   *query;
+ 	struct completion     done;
  
--	if (isich4 && i801_set_block_buffer_mode() == 0 )
-+	if ((i801_features & FEATURE_BLOCK_BUFFER)
-+	 && !(command == I2C_SMBUS_I2C_BLOCK_DATA
-+	      && read_write == I2C_SMBUS_READ)
-+	 && i801_set_block_buffer_mode() == 0)
- 		result = i801_block_transaction_by_block(data, read_write,
- 							 hwpec);
- 	else
- 		result = i801_block_transaction_byte_by_byte(data, read_write,
--							     hwpec);
-+							     command, hwpec);
+-	struct rb_node        rb_node;
++	struct rb_node	      rb_node;
+ 	struct list_head      list;
+ };
  
- 	if (result == 0 && hwpec)
- 		i801_wait_hwpec();
+@@ -344,7 +350,7 @@ struct ipoib_neigh {
+ #ifdef CONFIG_INFINIBAND_IPOIB_CM
+ 	struct ipoib_cm_tx *cm;
+ #endif
+-	union ib_gid        dgid;
++	union ib_gid	    dgid;
+ 	struct sk_buff_head queue;
  
--	if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
-+	if (command == I2C_SMBUS_I2C_BLOCK_DATA
-+	 && read_write == I2C_SMBUS_WRITE) {
- 		/* restore saved configuration register value */
- 		pci_write_config_byte(I801_dev, SMBHSTCFG, hostc);
- 	}
-@@ -426,7 +456,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
- 	int block = 0;
- 	int ret, xact = 0;
+ 	struct neighbour   *neighbour;
+@@ -455,12 +461,14 @@ void ipoib_drain_cq(struct net_device *dev);
  
--	hwpec = isich4 && (flags & I2C_CLIENT_PEC)
-+	hwpec = (i801_features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
- 		&& size != I2C_SMBUS_QUICK
- 		&& size != I2C_SMBUS_I2C_BLOCK_DATA;
+ #ifdef CONFIG_INFINIBAND_IPOIB_CM
  
-@@ -462,12 +492,23 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
- 		xact = I801_WORD_DATA;
- 		break;
- 	case I2C_SMBUS_BLOCK_DATA:
--	case I2C_SMBUS_I2C_BLOCK_DATA:
- 		outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- 		       SMBHSTADD);
- 		outb_p(command, SMBHSTCMD);
- 		block = 1;
- 		break;
-+	case I2C_SMBUS_I2C_BLOCK_DATA:
-+		/* NB: page 240 of ICH5 datasheet shows that the R/#W
-+		 * bit should be cleared here, even when reading */
-+		outb_p((addr & 0x7f) << 1, SMBHSTADD);
-+		if (read_write == I2C_SMBUS_READ) {
-+			/* NB: page 240 of ICH5 datasheet also shows
-+			 * that DATA1 is the cmd field when reading */
-+			outb_p(command, SMBHSTDAT1);
-+		} else
-+			outb_p(command, SMBHSTCMD);
-+		block = 1;
-+		break;
- 	case I2C_SMBUS_PROC_CALL:
- 	default:
- 		dev_err(&I801_dev->dev, "Unsupported transaction %d\n", size);
-@@ -487,7 +528,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
- 	/* Some BIOSes don't like it when PEC is enabled at reboot or resume
- 	   time, so we forcibly disable it after every transaction. Turn off
- 	   E32B for the same reason. */
--	if (hwpec)
-+	if (hwpec || block)
- 		outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
- 		       SMBAUXCTL);
+-#define IPOIB_FLAGS_RC          0x80
+-#define IPOIB_FLAGS_UC          0x40
++#define IPOIB_FLAGS_RC		0x80
++#define IPOIB_FLAGS_UC		0x40
  
-@@ -514,9 +555,11 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
- static u32 i801_func(struct i2c_adapter *adapter)
+ /* We don't support UC connections at the moment */
+ #define IPOIB_CM_SUPPORTED(ha)   (ha[0] & (IPOIB_FLAGS_RC))
+ 
++extern int ipoib_max_conn_qp;
++
+ static inline int ipoib_cm_admin_enabled(struct net_device *dev)
  {
- 	return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
--	    I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
--	    I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK
--	     | (isich4 ? I2C_FUNC_SMBUS_PEC : 0);
-+	       I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
-+	       I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK |
-+	       ((i801_features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
-+	       ((i801_features & FEATURE_I2C_BLOCK_READ) ?
-+		I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0);
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -491,6 +499,18 @@ static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *t
+ 	neigh->cm = tx;
  }
  
- static const struct i2c_algorithm smbus_algorithm = {
-@@ -556,8 +599,8 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
- 	int err;
- 
- 	I801_dev = dev;
-+	i801_features = 0;
- 	switch (dev->device) {
--	case PCI_DEVICE_ID_INTEL_82801DB_3:
- 	case PCI_DEVICE_ID_INTEL_82801EB_3:
- 	case PCI_DEVICE_ID_INTEL_ESB_4:
- 	case PCI_DEVICE_ID_INTEL_ICH6_16:
-@@ -565,11 +608,13 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
- 	case PCI_DEVICE_ID_INTEL_ESB2_17:
- 	case PCI_DEVICE_ID_INTEL_ICH8_5:
- 	case PCI_DEVICE_ID_INTEL_ICH9_6:
-+		i801_features |= FEATURE_I2C_BLOCK_READ;
-+		/* fall through */
-+	case PCI_DEVICE_ID_INTEL_82801DB_3:
- 	case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
--		isich4 = 1;
-+		i801_features |= FEATURE_SMBUS_PEC;
-+		i801_features |= FEATURE_BLOCK_BUFFER;
- 		break;
--	default:
--		isich4 = 0;
- 	}
++static inline int ipoib_cm_has_srq(struct net_device *dev)
++{
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	return !!priv->cm.srq;
++}
++
++static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
++{
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	return priv->cm.max_cm_mtu;
++}
++
+ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx);
+ int ipoib_cm_dev_open(struct net_device *dev);
+ void ipoib_cm_dev_stop(struct net_device *dev);
+@@ -500,7 +520,7 @@ void ipoib_cm_dev_cleanup(struct net_device *dev);
+ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
+ 				    struct ipoib_neigh *neigh);
+ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx);
+-void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
++void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ 			   unsigned int mtu);
+ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
+ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
+@@ -508,6 +528,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
  
- 	err = pci_enable_device(dev);
-@@ -610,6 +655,11 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
- 	else
- 		dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n");
+ struct ipoib_cm_tx;
  
-+	/* Clear special mode bits */
-+	if (i801_features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
-+		outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
-+		       SMBAUXCTL);
++#define ipoib_max_conn_qp 0
 +
- 	/* set up the sysfs linkage to our parent device */
- 	i801_adapter.dev.parent = &dev->dev;
+ static inline int ipoib_cm_admin_enabled(struct net_device *dev)
+ {
+ 	return 0;
+@@ -533,6 +555,16 @@ static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *t
+ {
+ }
  
-@@ -678,9 +728,8 @@ static void __exit i2c_i801_exit(void)
- 	pci_unregister_driver(&i801_driver);
++static inline int ipoib_cm_has_srq(struct net_device *dev)
++{
++	return 0;
++}
++
++static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
++{
++	return 0;
++}
++
+ static inline
+ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
+ {
+@@ -582,7 +614,7 @@ int ipoib_cm_add_mode_attr(struct net_device *dev)
+ 	return 0;
  }
  
--MODULE_AUTHOR ("Frodo Looijaard <frodol at dds.nl>, "
--		"Philip Edelbrock <phil at netroedge.com>, "
--		"and Mark D. Studebaker <mdsxyz123 at yahoo.com>");
-+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123 at yahoo.com>, "
-+	      "Jean Delvare <khali at linux-fr.org>");
- MODULE_DESCRIPTION("I801 SMBus driver");
- MODULE_LICENSE("GPL");
+-static inline void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
++static inline void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ 					 unsigned int mtu)
+ {
+ 	dev_kfree_skb_any(skb);
+@@ -624,12 +656,12 @@ extern struct ib_sa_client ipoib_sa_client;
+ extern int ipoib_debug_level;
  
-diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
-index 9b43ff7..7c7eb0c 100644
---- a/drivers/i2c/busses/i2c-ibm_iic.c
-+++ b/drivers/i2c/busses/i2c-ibm_iic.c
-@@ -6,7 +6,7 @@
-  * Copyright (c) 2003, 2004 Zultys Technologies.
-  * Eugene Surovegin <eugene.surovegin at zultys.com> or <ebs at ebshome.net>
-  *
-- * Based on original work by 
-+ * Based on original work by
-  * 	Ian DaSilva  <idasilva at mvista.com>
-  *      Armin Kuster <akuster at mvista.com>
-  * 	Matt Porter  <mporter at mvista.com>
-@@ -86,8 +86,8 @@ static void dump_iic_regs(const char* header, struct ibm_iic_private* dev)
- 	       KERN_DEBUG "  sts      = 0x%02x, extsts = 0x%02x\n"
- 	       KERN_DEBUG "  clkdiv   = 0x%02x, xfrcnt = 0x%02x\n"
- 	       KERN_DEBUG "  xtcntlss = 0x%02x, directcntl = 0x%02x\n",
--		in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts), 
--		in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt), 
-+		in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts),
-+		in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt),
- 		in_8(&iic->xtcntlss), in_8(&iic->directcntl));
+ #define ipoib_dbg(priv, format, arg...)			\
+-	do {					        \
++	do {						\
+ 		if (ipoib_debug_level > 0)			\
+ 			ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ 	} while (0)
+ #define ipoib_dbg_mcast(priv, format, arg...)		\
+-	do {					        \
++	do {						\
+ 		if (mcast_debug_level > 0)		\
+ 			ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ 	} while (0)
+@@ -642,7 +674,7 @@ extern int ipoib_debug_level;
+ 
+ #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+ #define ipoib_dbg_data(priv, format, arg...)		\
+-	do {					        \
++	do {						\
+ 		if (data_debug_level > 0)		\
+ 			ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ 	} while (0)
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 059cf92..1818f95 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -39,6 +39,15 @@
+ #include <linux/icmpv6.h>
+ #include <linux/delay.h>
+ 
++#include "ipoib.h"
++
++int ipoib_max_conn_qp = 128;
++
++module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
++MODULE_PARM_DESC(max_nonsrq_conn_qp,
++		 "Max number of connected-mode QPs per interface "
++		 "(applied only if shared receive queue is not available)");
++
+ #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+ static int data_debug_level;
+ 
+@@ -47,8 +56,6 @@ MODULE_PARM_DESC(cm_data_debug_level,
+ 		 "Enable data path debug tracing for connected mode if > 0");
+ #endif
+ 
+-#include "ipoib.h"
+-
+ #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
+ 
+ #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
+@@ -81,7 +88,7 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
+ 		ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
  }
- #  define DUMP_REGS(h,dev)	dump_iic_regs((h),(dev))
-@@ -125,7 +125,7 @@ static inline void iic_interrupt_mode(struct ibm_iic_private* dev, int enable)
+ 
+-static int ipoib_cm_post_receive(struct net_device *dev, int id)
++static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
  {
- 	out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0);
- }
-- 
-+
- /*
-  * Initialize IIC interface.
-  */
-@@ -134,7 +134,7 @@ static void iic_dev_init(struct ibm_iic_private* dev)
- 	volatile struct iic_regs __iomem *iic = dev->vaddr;
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+ 	struct ib_recv_wr *bad_wr;
+@@ -89,13 +96,13 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id)
  
- 	DBG("%d: init\n", dev->idx);
--	
-+
- 	/* Clear master address */
- 	out_8(&iic->lmadr, 0);
- 	out_8(&iic->hmadr, 0);
-@@ -160,7 +160,7 @@ static void iic_dev_init(struct ibm_iic_private* dev)
+ 	priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
  
- 	/* Clear control register */
- 	out_8(&iic->cntl, 0);
--	
-+
- 	/* Enable interrupts if possible */
- 	iic_interrupt_mode(dev, dev->irq >= 0);
+-	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
++	for (i = 0; i < priv->cm.num_frags; ++i)
+ 		priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
  
-@@ -171,7 +171,7 @@ static void iic_dev_init(struct ibm_iic_private* dev)
- 	DUMP_REGS("iic_init", dev);
+ 	ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
+ 	if (unlikely(ret)) {
+ 		ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
+-		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
++		ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
+ 				      priv->cm.srq_ring[id].mapping);
+ 		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
+ 		priv->cm.srq_ring[id].skb = NULL;
+@@ -104,7 +111,33 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id)
+ 	return ret;
  }
  
--/* 
-+/*
-  * Reset IIC interface
-  */
- static void iic_dev_reset(struct ibm_iic_private* dev)
-@@ -179,42 +179,42 @@ static void iic_dev_reset(struct ibm_iic_private* dev)
- 	volatile struct iic_regs __iomem *iic = dev->vaddr;
- 	int i;
- 	u8 dc;
--	
-+
- 	DBG("%d: soft reset\n", dev->idx);
- 	DUMP_REGS("reset", dev);
--	
-+
-     	/* Place chip in the reset state */
- 	out_8(&iic->xtcntlss, XTCNTLSS_SRST);
--	
+-static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags,
++static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
++					struct ipoib_cm_rx *rx, int id)
++{
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	struct ib_recv_wr *bad_wr;
++	int i, ret;
 +
- 	/* Check if bus is free */
--	dc = in_8(&iic->directcntl);	
-+	dc = in_8(&iic->directcntl);
- 	if (!DIRCTNL_FREE(dc)){
- 		DBG("%d: trying to regain bus control\n", dev->idx);
--	
++	priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
 +
- 		/* Try to set bus free state */
--		out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC);	
--	
-+		out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC);
++	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
++		priv->cm.rx_sge[i].addr = rx->rx_ring[id].mapping[i];
 +
- 		/* Wait until we regain bus control */
- 		for (i = 0; i < 100; ++i){
- 			dc = in_8(&iic->directcntl);
- 			if (DIRCTNL_FREE(dc))
- 				break;
--			
++	ret = ib_post_recv(rx->qp, &priv->cm.rx_wr, &bad_wr);
++	if (unlikely(ret)) {
++		ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
++		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
++				      rx->rx_ring[id].mapping);
++		dev_kfree_skb_any(rx->rx_ring[id].skb);
++		rx->rx_ring[id].skb = NULL;
++	}
 +
- 			/* Toggle SCL line */
- 			dc ^= DIRCNTL_SCC;
- 			out_8(&iic->directcntl, dc);
- 			udelay(10);
- 			dc ^= DIRCNTL_SCC;
- 			out_8(&iic->directcntl, dc);
--			
++	return ret;
++}
 +
- 			/* be nice */
- 			cond_resched();
- 		}
++static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
++					     struct ipoib_cm_rx_buf *rx_ring,
++					     int id, int frags,
+ 					     u64 mapping[IPOIB_CM_RX_SG])
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -141,7 +174,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int
+ 			goto partial_error;
  	}
--	
+ 
+-	priv->cm.srq_ring[id].skb = skb;
++	rx_ring[id].skb = skb;
+ 	return skb;
+ 
+ partial_error:
+@@ -155,7 +188,23 @@ partial_error:
+ 	return NULL;
+ }
+ 
+-static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv)
++static void ipoib_cm_free_rx_ring(struct net_device *dev,
++				  struct ipoib_cm_rx_buf *rx_ring)
++{
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	int i;
 +
- 	/* Remove reset */
- 	out_8(&iic->xtcntlss, 0);
--	
++	for (i = 0; i < ipoib_recvq_size; ++i)
++		if (rx_ring[i].skb) {
++			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
++					      rx_ring[i].mapping);
++			dev_kfree_skb_any(rx_ring[i].skb);
++		}
 +
- 	/* Reinitialize interface */
- 	iic_dev_init(dev);
- }
-@@ -324,14 +324,14 @@ static irqreturn_t iic_handler(int irq, void *dev_id)
- {
- 	struct ibm_iic_private* dev = (struct ibm_iic_private*)dev_id;
- 	volatile struct iic_regs __iomem *iic = dev->vaddr;
--	
--	DBG2("%d: irq handler, STS = 0x%02x, EXTSTS = 0x%02x\n", 
++	kfree(rx_ring);
++}
 +
-+	DBG2("%d: irq handler, STS = 0x%02x, EXTSTS = 0x%02x\n",
- 	     dev->idx, in_8(&iic->sts), in_8(&iic->extsts));
--	
++static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
+ {
+ 	struct ib_send_wr *bad_wr;
+ 	struct ipoib_cm_rx *p;
+@@ -208,12 +257,18 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
+ 		.qp_type = IB_QPT_RC,
+ 		.qp_context = p,
+ 	};
 +
- 	/* Acknowledge IRQ and wakeup iic_wait_for_tc */
- 	out_8(&iic->sts, STS_IRQA | STS_SCMP);
- 	wake_up_interruptible(&dev->wq);
--	
++	if (!ipoib_cm_has_srq(dev)) {
++		attr.cap.max_recv_wr  = ipoib_recvq_size;
++		attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
++	}
 +
- 	return IRQ_HANDLED;
+ 	return ib_create_qp(priv->pd, &attr);
  }
  
-@@ -341,19 +341,19 @@ static irqreturn_t iic_handler(int irq, void *dev_id)
-  */
- static int iic_xfer_result(struct ibm_iic_private* dev)
+ static int ipoib_cm_modify_rx_qp(struct net_device *dev,
+-				  struct ib_cm_id *cm_id, struct ib_qp *qp,
+-				  unsigned psn)
++				 struct ib_cm_id *cm_id, struct ib_qp *qp,
++				 unsigned psn)
  {
--	volatile struct iic_regs __iomem *iic = dev->vaddr;	
--	
-+	volatile struct iic_regs __iomem *iic = dev->vaddr;
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+ 	struct ib_qp_attr qp_attr;
+@@ -266,6 +321,60 @@ static int ipoib_cm_modify_rx_qp(struct net_device *dev,
+ 	return 0;
+ }
+ 
++static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
++				   struct ipoib_cm_rx *rx)
++{
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	int ret;
++	int i;
 +
- 	if (unlikely(in_8(&iic->sts) & STS_ERR)){
--		DBG("%d: xfer error, EXTSTS = 0x%02x\n", dev->idx, 
-+		DBG("%d: xfer error, EXTSTS = 0x%02x\n", dev->idx,
- 			in_8(&iic->extsts));
--				
++	rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL);
++	if (!rx->rx_ring)
++		return -ENOMEM;
 +
- 		/* Clear errors and possible pending IRQs */
--		out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD | 
-+		out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD |
- 			EXTSTS_LA | EXTSTS_ICT | EXTSTS_XFRA);
--			
++	spin_lock_irq(&priv->lock);
 +
- 		/* Flush master data buffer */
- 		out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB);
--		
++	if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
++		spin_unlock_irq(&priv->lock);
++		ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
++		ret = -EINVAL;
++		goto err_free;
++	} else
++		++priv->cm.nonsrq_conn_qp;
 +
- 		/* Is bus free?
- 		 * If error happened during combined xfer
- 		 * IIC interface is usually stuck in some strange
-@@ -376,11 +376,11 @@ static void iic_abort_xfer(struct ibm_iic_private* dev)
- {
- 	volatile struct iic_regs __iomem *iic = dev->vaddr;
- 	unsigned long x;
--	
++	spin_unlock_irq(&priv->lock);
 +
- 	DBG("%d: iic_abort_xfer\n", dev->idx);
--	
++	for (i = 0; i < ipoib_recvq_size; ++i) {
++		if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
++					   rx->rx_ring[i].mapping)) {
++			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
++				ret = -ENOMEM;
++				goto err_count;
++			}
++		ret = ipoib_cm_post_receive_nonsrq(dev, rx, i);
++		if (ret) {
++			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
++				   "failed for buf %d\n", i);
++			ret = -EIO;
++			goto err_count;
++		}
++	}
 +
- 	out_8(&iic->cntl, CNTL_HMT);
--	
++	rx->recv_count = ipoib_recvq_size;
 +
- 	/*
- 	 * Wait for the abort command to complete.
- 	 * It's not worth to be optimized, just poll (timeout >= 1 tick)
-@@ -405,13 +405,13 @@ static void iic_abort_xfer(struct ibm_iic_private* dev)
-  * Returns the number of transferred bytes or error (<0)
-  */
- static int iic_wait_for_tc(struct ibm_iic_private* dev){
--	
++	return 0;
 +
- 	volatile struct iic_regs __iomem *iic = dev->vaddr;
- 	int ret = 0;
--	
++err_count:
++	spin_lock_irq(&priv->lock);
++	--priv->cm.nonsrq_conn_qp;
++	spin_unlock_irq(&priv->lock);
 +
- 	if (dev->irq >= 0){
- 		/* Interrupt mode */
--		ret = wait_event_interruptible_timeout(dev->wq, 
-+		ret = wait_event_interruptible_timeout(dev->wq,
- 			!(in_8(&iic->sts) & STS_PT), dev->adap.timeout * HZ);
++err_free:
++	ipoib_cm_free_rx_ring(dev, rx->rx_ring);
++
++	return ret;
++}
++
+ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
+ 			     struct ib_qp *qp, struct ib_cm_req_event_param *req,
+ 			     unsigned psn)
+@@ -281,7 +390,7 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
+ 	rep.private_data_len = sizeof data;
+ 	rep.flow_control = 0;
+ 	rep.rnr_retry_count = req->rnr_retry_count;
+-	rep.srq = 1;
++	rep.srq = ipoib_cm_has_srq(dev);
+ 	rep.qp_num = qp->qp_num;
+ 	rep.starting_psn = psn;
+ 	return ib_send_cm_rep(cm_id, &rep);
+@@ -317,6 +426,12 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
+ 	if (ret)
+ 		goto err_modify;
  
- 		if (unlikely(ret < 0))
-@@ -424,37 +424,37 @@ static int iic_wait_for_tc(struct ibm_iic_private* dev){
- 	else {
- 		/* Polling mode */
- 		unsigned long x = jiffies + dev->adap.timeout * HZ;
--		
++	if (!ipoib_cm_has_srq(dev)) {
++		ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
++		if (ret)
++			goto err_modify;
++	}
 +
- 		while (in_8(&iic->sts) & STS_PT){
- 			if (unlikely(time_after(jiffies, x))){
- 				DBG("%d: poll timeout\n", dev->idx);
- 				ret = -ETIMEDOUT;
- 				break;
- 			}
--		
+ 	spin_lock_irq(&priv->lock);
+ 	queue_delayed_work(ipoib_workqueue,
+ 			   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+@@ -401,12 +516,14 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	struct ipoib_cm_rx_buf *rx_ring;
+ 	unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
+ 	struct sk_buff *skb, *newskb;
+ 	struct ipoib_cm_rx *p;
+ 	unsigned long flags;
+ 	u64 mapping[IPOIB_CM_RX_SG];
+ 	int frags;
++	int has_srq;
+ 
+ 	ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
+ 		       wr_id, wc->status);
+@@ -424,18 +541,32 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+ 		return;
+ 	}
+ 
+-	skb  = priv->cm.srq_ring[wr_id].skb;
++	p = wc->qp->qp_context;
 +
- 			if (unlikely(signal_pending(current))){
- 				DBG("%d: poll interrupted\n", dev->idx);
- 				ret = -ERESTARTSYS;
- 				break;
- 			}
- 			schedule();
--		}	
++	has_srq = ipoib_cm_has_srq(dev);
++	rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
++
++	skb = rx_ring[wr_id].skb;
+ 
+ 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ 		ipoib_dbg(priv, "cm recv error "
+ 			   "(status=%d, wrid=%d vend_err %x)\n",
+ 			   wc->status, wr_id, wc->vendor_err);
+ 		++dev->stats.rx_dropped;
+-		goto repost;
++		if (has_srq)
++			goto repost;
++		else {
++			if (!--p->recv_count) {
++				spin_lock_irqsave(&priv->lock, flags);
++				list_move(&p->list, &priv->cm.rx_reap_list);
++				spin_unlock_irqrestore(&priv->lock, flags);
++				queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
++			}
++			return;
++		}
+ 	}
+ 
+ 	if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
+-		p = wc->qp->qp_context;
+ 		if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
+ 			spin_lock_irqsave(&priv->lock, flags);
+ 			p->jiffies = jiffies;
+@@ -450,7 +581,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+ 	frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
+ 					      (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
+ 
+-	newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping);
++	newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
+ 	if (unlikely(!newskb)) {
+ 		/*
+ 		 * If we can't allocate a new RX buffer, dump
+@@ -461,8 +592,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+ 		goto repost;
+ 	}
+ 
+-	ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping);
+-	memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
++	ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
++	memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
+ 
+ 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
+ 		       wc->byte_len, wc->slid);
+@@ -483,9 +614,17 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+ 	netif_receive_skb(skb);
+ 
+ repost:
+-	if (unlikely(ipoib_cm_post_receive(dev, wr_id)))
+-		ipoib_warn(priv, "ipoib_cm_post_receive failed "
+-			   "for buf %d\n", wr_id);
++	if (has_srq) {
++		if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
++			ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
++				   "for buf %d\n", wr_id);
++	} else {
++		if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p, wr_id))) {
++			--p->recv_count;
++			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
++				   "for buf %d\n", wr_id);
 +		}
- 	}
--	
-+
- 	if (unlikely(ret < 0))
- 		iic_abort_xfer(dev);
- 	else
- 		ret = iic_xfer_result(dev);
--	
-+
- 	DBG2("%d: iic_wait_for_tc -> %d\n", dev->idx, ret);
--	
-+
- 	return ret;
++	}
  }
  
- /*
-  * Low level master transfer routine
-  */
--static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm, 
-+static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm,
- 			  int combined_xfer)
+ static inline int post_send(struct ipoib_dev_priv *priv,
+@@ -495,10 +634,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
  {
- 	volatile struct iic_regs __iomem *iic = dev->vaddr;
-@@ -465,48 +465,48 @@ static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm,
- 	u8 cntl = (in_8(&iic->cntl) & CNTL_AMD) | CNTL_PT;
- 	if (pm->flags & I2C_M_RD)
- 		cntl |= CNTL_RW;
--	
-+
- 	loops = (len + 3) / 4;
- 	for (i = 0; i < loops; ++i, len -= 4){
- 		int count = len > 4 ? 4 : len;
- 		u8 cmd = cntl | ((count - 1) << CNTL_TCT_SHIFT);
--		
-+
- 		if (!(cntl & CNTL_RW))
- 			for (j = 0; j < count; ++j)
- 				out_8((void __iomem *)&iic->mdbuf, *buf++);
--		
-+
- 		if (i < loops - 1)
- 			cmd |= CNTL_CHT;
- 		else if (combined_xfer)
- 			cmd |= CNTL_RPST;
--		
-+
- 		DBG2("%d: xfer_bytes, %d, CNTL = 0x%02x\n", dev->idx, count, cmd);
--		
-+
- 		/* Start transfer */
- 		out_8(&iic->cntl, cmd);
--		
-+
- 		/* Wait for completion */
- 		ret = iic_wait_for_tc(dev);
+ 	struct ib_send_wr *bad_wr;
  
- 		if (unlikely(ret < 0))
- 			break;
- 		else if (unlikely(ret != count)){
--			DBG("%d: xfer_bytes, requested %d, transfered %d\n", 
-+			DBG("%d: xfer_bytes, requested %d, transfered %d\n",
- 				dev->idx, count, ret);
--			
+-	priv->tx_sge.addr             = addr;
+-	priv->tx_sge.length           = len;
++	priv->tx_sge.addr	= addr;
++	priv->tx_sge.length	= len;
+ 
+-	priv->tx_wr.wr_id 	      = wr_id | IPOIB_OP_CM;
++	priv->tx_wr.wr_id	= wr_id | IPOIB_OP_CM;
+ 
+ 	return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
+ }
+@@ -540,7 +679,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
+ 	tx_req->mapping = addr;
+ 
+ 	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
+-			        addr, skb->len))) {
++			       addr, skb->len))) {
+ 		ipoib_warn(priv, "post_send failed\n");
+ 		++dev->stats.tx_errors;
+ 		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
+@@ -657,10 +796,33 @@ err_cm:
+ 	return ret;
+ }
+ 
++static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
++{
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	struct ipoib_cm_rx *rx, *n;
++	LIST_HEAD(list);
 +
- 			/* If it's not a last part of xfer, abort it */
- 			if (combined_xfer || (i < loops - 1))
-     				iic_abort_xfer(dev);
--				
++	spin_lock_irq(&priv->lock);
++	list_splice_init(&priv->cm.rx_reap_list, &list);
++	spin_unlock_irq(&priv->lock);
 +
- 			ret = -EREMOTEIO;
--			break;				
-+			break;
- 		}
--		
++	list_for_each_entry_safe(rx, n, &list, list) {
++		ib_destroy_cm_id(rx->id);
++		ib_destroy_qp(rx->qp);
++		if (!ipoib_cm_has_srq(dev)) {
++			ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
++			spin_lock_irq(&priv->lock);
++			--priv->cm.nonsrq_conn_qp;
++			spin_unlock_irq(&priv->lock);
++		}
++		kfree(rx);
++	}
++}
 +
- 		if (cntl & CNTL_RW)
- 			for (j = 0; j < count; ++j)
- 				*buf++ = in_8((void __iomem *)&iic->mdbuf);
+ void ipoib_cm_dev_stop(struct net_device *dev)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+-	struct ipoib_cm_rx *p, *n;
++	struct ipoib_cm_rx *p;
+ 	unsigned long begin;
+ 	LIST_HEAD(list);
+ 	int ret;
+@@ -706,15 +868,9 @@ void ipoib_cm_dev_stop(struct net_device *dev)
+ 		spin_lock_irq(&priv->lock);
  	}
--	
-+
- 	return ret > 0 ? 0 : ret;
+ 
+-	list_splice_init(&priv->cm.rx_reap_list, &list);
+-
+ 	spin_unlock_irq(&priv->lock);
+ 
+-	list_for_each_entry_safe(p, n, &list, list) {
+-		ib_destroy_cm_id(p->id);
+-		ib_destroy_qp(p->qp);
+-		kfree(p);
+-	}
++	ipoib_cm_free_rx_reap_list(dev);
+ 
+ 	cancel_delayed_work(&priv->cm.stale_task);
  }
+@@ -799,7 +955,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
+ 		.sq_sig_type		= IB_SIGNAL_ALL_WR,
+ 		.qp_type		= IB_QPT_RC,
+ 		.qp_context		= tx
+-        };
++	};
  
-@@ -517,10 +517,10 @@ static inline void iic_address(struct ibm_iic_private* dev, struct i2c_msg* msg)
+ 	return ib_create_qp(priv->pd, &attr);
+ }
+@@ -816,28 +972,28 @@ static int ipoib_cm_send_req(struct net_device *dev,
+ 	data.qpn = cpu_to_be32(priv->qp->qp_num);
+ 	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
+ 
+-	req.primary_path 	      = pathrec;
+-	req.alternate_path 	      = NULL;
+-	req.service_id                = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
+-	req.qp_num 		      = qp->qp_num;
+-	req.qp_type 		      = qp->qp_type;
+-	req.private_data 	      = &data;
+-	req.private_data_len 	      = sizeof data;
+-	req.flow_control 	      = 0;
++	req.primary_path		= pathrec;
++	req.alternate_path		= NULL;
++	req.service_id			= cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
++	req.qp_num			= qp->qp_num;
++	req.qp_type			= qp->qp_type;
++	req.private_data		= &data;
++	req.private_data_len		= sizeof data;
++	req.flow_control		= 0;
+ 
+-	req.starting_psn              = 0; /* FIXME */
++	req.starting_psn		= 0; /* FIXME */
+ 
+ 	/*
+ 	 * Pick some arbitrary defaults here; we could make these
+ 	 * module parameters if anyone cared about setting them.
+ 	 */
+-	req.responder_resources	      = 4;
+-	req.remote_cm_response_timeout = 20;
+-	req.local_cm_response_timeout  = 20;
+-	req.retry_count 	      = 0; /* RFC draft warns against retries */
+-	req.rnr_retry_count 	      = 0; /* RFC draft warns against retries */
+-	req.max_cm_retries 	      = 15;
+-	req.srq 	              = 1;
++	req.responder_resources		= 4;
++	req.remote_cm_response_timeout	= 20;
++	req.local_cm_response_timeout	= 20;
++	req.retry_count			= 0; /* RFC draft warns against retries */
++	req.rnr_retry_count		= 0; /* RFC draft warns against retries */
++	req.max_cm_retries		= 15;
++	req.srq				= ipoib_cm_has_srq(dev);
+ 	return ib_send_cm_req(id, &req);
+ }
+ 
+@@ -1150,7 +1306,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
+ 	spin_unlock_irq(&priv->tx_lock);
+ }
+ 
+-void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
++void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ 			   unsigned int mtu)
  {
- 	volatile struct iic_regs __iomem *iic = dev->vaddr;
- 	u16 addr = msg->addr;
--	
--	DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx, 
-+
-+	DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx,
- 		addr, msg->flags & I2C_M_TEN ? 10 : 7);
--	
-+
- 	if (msg->flags & I2C_M_TEN){
- 	    out_8(&iic->cntl, CNTL_AMD);
- 	    out_8(&iic->lmadr, addr);
-@@ -537,15 +537,15 @@ static inline int iic_invalid_address(const struct i2c_msg* p)
- 	return (p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN) && (p->addr > 0x7f));
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -1166,20 +1322,8 @@ void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
+ 
+ static void ipoib_cm_rx_reap(struct work_struct *work)
+ {
+-	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+-						   cm.rx_reap_task);
+-	struct ipoib_cm_rx *p, *n;
+-	LIST_HEAD(list);
+-
+-	spin_lock_irq(&priv->lock);
+-	list_splice_init(&priv->cm.rx_reap_list, &list);
+-	spin_unlock_irq(&priv->lock);
+-
+-	list_for_each_entry_safe(p, n, &list, list) {
+-		ib_destroy_cm_id(p->id);
+-		ib_destroy_qp(p->qp);
+-		kfree(p);
+-	}
++	ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
++						cm.rx_reap_task)->dev);
  }
  
--static inline int iic_address_neq(const struct i2c_msg* p1, 
-+static inline int iic_address_neq(const struct i2c_msg* p1,
- 				  const struct i2c_msg* p2)
+ static void ipoib_cm_stale_task(struct work_struct *work)
+@@ -1212,7 +1356,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
+ }
+ 
+ 
+-static ssize_t show_mode(struct device *d, struct device_attribute *attr, 
++static ssize_t show_mode(struct device *d, struct device_attribute *attr,
+ 			 char *buf)
  {
--	return (p1->addr != p2->addr) 
-+	return (p1->addr != p2->addr)
- 		|| ((p1->flags & I2C_M_TEN) != (p2->flags & I2C_M_TEN));
--} 
-+}
+ 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
+@@ -1255,16 +1399,40 @@ int ipoib_cm_add_mode_attr(struct net_device *dev)
+ 	return device_create_file(&dev->dev, &dev_attr_mode);
+ }
  
- /*
-- * Generic master transfer entrypoint. 
-+ * Generic master transfer entrypoint.
-  * Returns the number of processed messages or error (<0)
-  */
- static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-@@ -553,20 +553,20 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-     	struct ibm_iic_private* dev = (struct ibm_iic_private*)(i2c_get_adapdata(adap));
- 	volatile struct iic_regs __iomem *iic = dev->vaddr;
- 	int i, ret = 0;
--	
-+
- 	DBG2("%d: iic_xfer, %d msg(s)\n", dev->idx, num);
--	
-+
- 	if (!num)
- 		return 0;
--	
-+
- 	/* Check the sanity of the passed messages.
- 	 * Uhh, generic i2c layer is more suitable place for such code...
- 	 */
- 	if (unlikely(iic_invalid_address(&msgs[0]))){
--		DBG("%d: invalid address 0x%03x (%d-bit)\n", dev->idx, 
-+		DBG("%d: invalid address 0x%03x (%d-bit)\n", dev->idx,
- 			msgs[0].addr, msgs[0].flags & I2C_M_TEN ? 10 : 7);
- 		return -EINVAL;
--	}		
-+	}
- 	for (i = 0; i < num; ++i){
- 		if (unlikely(msgs[i].len <= 0)){
- 			if (num == 1 && !msgs[0].len){
-@@ -576,7 +576,7 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
- 				 */
- 				return iic_smbus_quick(dev, &msgs[0]);
- 			}
--			DBG("%d: invalid len %d in msg[%d]\n", dev->idx, 
-+			DBG("%d: invalid len %d in msg[%d]\n", dev->idx,
- 				msgs[i].len, i);
- 			return -EINVAL;
- 		}
-@@ -585,34 +585,34 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
- 			return -EINVAL;
+-int ipoib_cm_dev_init(struct net_device *dev)
++static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+ 	struct ib_srq_init_attr srq_init_attr = {
+ 		.attr = {
+ 			.max_wr  = ipoib_recvq_size,
+-			.max_sge = IPOIB_CM_RX_SG
++			.max_sge = max_sge
  		}
- 	}
--	
-+
- 	/* Check bus state */
- 	if (unlikely((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE)){
- 		DBG("%d: iic_xfer, bus is not free\n", dev->idx);
--		
-+
- 		/* Usually it means something serious has happend.
- 		 * We *cannot* have unfinished previous transfer
- 		 * so it doesn't make any sense to try to stop it.
--		 * Probably we were not able to recover from the 
-+		 * Probably we were not able to recover from the
- 		 * previous error.
- 		 * The only *reasonable* thing I can think of here
- 		 * is soft reset.  --ebs
- 		 */
- 		iic_dev_reset(dev);
--		
+ 	};
+-	int ret, i;
 +
- 		if ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){
- 			DBG("%d: iic_xfer, bus is still not free\n", dev->idx);
- 			return -EREMOTEIO;
- 		}
--	} 
++	priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
++	if (IS_ERR(priv->cm.srq)) {
++		if (PTR_ERR(priv->cm.srq) != -ENOSYS)
++			printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
++			       priv->ca->name, PTR_ERR(priv->cm.srq));
++		priv->cm.srq = NULL;
++		return;
 +	}
- 	else {
- 		/* Flush master data buffer (just in case) */
- 		out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB);
- 	}
--	
 +
- 	/* Load slave address */
- 	iic_address(dev, &msgs[0]);
--	
++	priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
++				    GFP_KERNEL);
++	if (!priv->cm.srq_ring) {
++		printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
++		       priv->ca->name, ipoib_recvq_size);
++		ib_destroy_srq(priv->cm.srq);
++		priv->cm.srq = NULL;
++	}
++}
 +
- 	/* Do real transfer */
-     	for (i = 0; i < num && !ret; ++i)
- 		ret = iic_xfer_bytes(dev, &msgs[i], i < num - 1);
-@@ -648,7 +648,7 @@ static inline u8 iic_clckdiv(unsigned int opb)
++int ipoib_cm_dev_init(struct net_device *dev)
++{
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	int i, ret;
++	struct ib_device_attr attr;
  
- 	/* Convert to MHz */
- 	opb /= 1000000;
--	
-+
- 	if (opb < 20 || opb > 150){
- 		printk(KERN_CRIT "ibm-iic: invalid OPB clock frequency %u MHz\n",
- 			opb);
-@@ -666,7 +666,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
- 	struct i2c_adapter* adap;
- 	struct ocp_func_iic_data* iic_data = ocp->def->additions;
- 	int ret;
--	
-+
- 	if (!iic_data)
- 		printk(KERN_WARNING"ibm-iic%d: missing additional data!\n",
- 			ocp->def->index);
-@@ -679,7 +679,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
+ 	INIT_LIST_HEAD(&priv->cm.passive_ids);
+ 	INIT_LIST_HEAD(&priv->cm.reap_list);
+@@ -1281,43 +1449,53 @@ int ipoib_cm_dev_init(struct net_device *dev)
  
- 	dev->idx = ocp->def->index;
- 	ocp_set_drvdata(ocp, dev);
--	
+ 	skb_queue_head_init(&priv->cm.skb_queue);
+ 
+-	priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
+-	if (IS_ERR(priv->cm.srq)) {
+-		ret = PTR_ERR(priv->cm.srq);
+-		priv->cm.srq = NULL;
++	ret = ib_query_device(priv->ca, &attr);
++	if (ret) {
++		printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
+ 		return ret;
+ 	}
+ 
+-	priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
+-				    GFP_KERNEL);
+-	if (!priv->cm.srq_ring) {
+-		printk(KERN_WARNING "%s: failed to allocate CM ring (%d entries)\n",
+-		       priv->ca->name, ipoib_recvq_size);
+-		ipoib_cm_dev_cleanup(dev);
+-		return -ENOMEM;
++	ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
 +
- 	if (!request_mem_region(ocp->def->paddr, sizeof(struct iic_regs),
- 				"ibm_iic")) {
- 		ret = -EBUSY;
-@@ -692,7 +692,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
- 		ret = -ENXIO;
- 		goto fail2;
++	attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
++	ipoib_cm_create_srq(dev, attr.max_srq_sge);
++	if (ipoib_cm_has_srq(dev)) {
++		priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
++		priv->cm.num_frags  = attr.max_srq_sge;
++		ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
++			  priv->cm.max_cm_mtu, priv->cm.num_frags);
++	} else {
++		priv->cm.max_cm_mtu = IPOIB_CM_MTU;
++		priv->cm.num_frags  = IPOIB_CM_RX_SG;
  	}
--	
+ 
+-	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
++	for (i = 0; i < priv->cm.num_frags; ++i)
+ 		priv->cm.rx_sge[i].lkey	= priv->mr->lkey;
+ 
+ 	priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE;
+-	for (i = 1; i < IPOIB_CM_RX_SG; ++i)
++	for (i = 1; i < priv->cm.num_frags; ++i)
+ 		priv->cm.rx_sge[i].length = PAGE_SIZE;
+ 	priv->cm.rx_wr.next = NULL;
+ 	priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
+-	priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG;
++	priv->cm.rx_wr.num_sge = priv->cm.num_frags;
 +
- 	init_waitqueue_head(&dev->wq);
++	if (ipoib_cm_has_srq(dev)) {
++		for (i = 0; i < ipoib_recvq_size; ++i) {
++			if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
++						   priv->cm.num_frags - 1,
++						   priv->cm.srq_ring[i].mapping)) {
++				ipoib_warn(priv, "failed to allocate "
++					   "receive buffer %d\n", i);
++				ipoib_cm_dev_cleanup(dev);
++				return -ENOMEM;
++			}
  
- 	dev->irq = iic_force_poll ? -1 : ocp->def->irq;
-@@ -702,29 +702,29 @@ static int __devinit iic_probe(struct ocp_device *ocp){
- 		 */
- 		iic_interrupt_mode(dev, 0);
- 		if (request_irq(dev->irq, iic_handler, 0, "IBM IIC", dev)){
--			printk(KERN_ERR "ibm-iic%d: request_irq %d failed\n", 
-+			printk(KERN_ERR "ibm-iic%d: request_irq %d failed\n",
- 				dev->idx, dev->irq);
--			/* Fallback to the polling mode */	
-+			/* Fallback to the polling mode */
- 			dev->irq = -1;
+-	for (i = 0; i < ipoib_recvq_size; ++i) {
+-		if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
+-					   priv->cm.srq_ring[i].mapping)) {
+-			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+-			ipoib_cm_dev_cleanup(dev);
+-			return -ENOMEM;
+-		}
+-		if (ipoib_cm_post_receive(dev, i)) {
+-			ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
+-			ipoib_cm_dev_cleanup(dev);
+-			return -EIO;
++			if (ipoib_cm_post_receive_srq(dev, i)) {
++				ipoib_warn(priv, "ipoib_cm_post_receive_srq "
++					   "failed for buf %d\n", i);
++				ipoib_cm_dev_cleanup(dev);
++				return -EIO;
++			}
  		}
  	}
--	
-+
- 	if (dev->irq < 0)
--		printk(KERN_WARNING "ibm-iic%d: using polling mode\n", 
-+		printk(KERN_WARNING "ibm-iic%d: using polling mode\n",
- 			dev->idx);
--		
-+
- 	/* Board specific settings */
- 	dev->fast_mode = iic_force_fast ? 1 : (iic_data ? iic_data->fast_mode : 0);
--	
--	/* clckdiv is the same for *all* IIC interfaces, 
-+
-+	/* clckdiv is the same for *all* IIC interfaces,
- 	 * but I'd rather make a copy than introduce another global. --ebs
- 	 */
- 	dev->clckdiv = iic_clckdiv(ocp_sys_info.opb_bus_freq);
- 	DBG("%d: clckdiv = %d\n", dev->idx, dev->clckdiv);
--	
-+
- 	/* Initialize IIC interface */
- 	iic_dev_init(dev);
--	
-+
- 	/* Register it with i2c layer */
- 	adap = &dev->adap;
- 	adap->dev.parent = &ocp->dev;
-@@ -736,7 +736,6 @@ static int __devinit iic_probe(struct ocp_device *ocp){
- 	adap->client_register = NULL;
- 	adap->client_unregister = NULL;
- 	adap->timeout = 1;
--	adap->retries = 1;
  
- 	/*
- 	 * If "dev->idx" is negative we consider it as zero.
-@@ -750,24 +749,24 @@ static int __devinit iic_probe(struct ocp_device *ocp){
- 			dev->idx);
- 		goto fail;
- 	}
--	
+@@ -1328,7 +1506,7 @@ int ipoib_cm_dev_init(struct net_device *dev)
+ void ipoib_cm_dev_cleanup(struct net_device *dev)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+-	int i, ret;
++	int ret;
+ 
+ 	if (!priv->cm.srq)
+ 		return;
+@@ -1342,13 +1520,7 @@ void ipoib_cm_dev_cleanup(struct net_device *dev)
+ 	priv->cm.srq = NULL;
+ 	if (!priv->cm.srq_ring)
+ 		return;
+-	for (i = 0; i < ipoib_recvq_size; ++i)
+-		if (priv->cm.srq_ring[i].skb) {
+-			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+-					      priv->cm.srq_ring[i].mapping);
+-			dev_kfree_skb_any(priv->cm.srq_ring[i].skb);
+-			priv->cm.srq_ring[i].skb = NULL;
+-		}
+-	kfree(priv->cm.srq_ring);
 +
- 	printk(KERN_INFO "ibm-iic%d: using %s mode\n", dev->idx,
- 		dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
++	ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
+ 	priv->cm.srq_ring = NULL;
+ }
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+index 44c1741..8b882bb 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -124,7 +124,7 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
+ 	return 0;
+ }
  
+-static struct seq_operations ipoib_mcg_seq_ops = {
++static const struct seq_operations ipoib_mcg_seq_ops = {
+ 	.start = ipoib_mcg_seq_start,
+ 	.next  = ipoib_mcg_seq_next,
+ 	.stop  = ipoib_mcg_seq_stop,
+@@ -230,7 +230,7 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
  	return 0;
+ }
  
--fail:	
-+fail:
- 	if (dev->irq >= 0){
- 		iic_interrupt_mode(dev, 0);
- 		free_irq(dev->irq, dev);
--	}	
-+	}
+-static struct seq_operations ipoib_path_seq_ops = {
++static const struct seq_operations ipoib_path_seq_ops = {
+ 	.start = ipoib_path_seq_start,
+ 	.next  = ipoib_path_seq_next,
+ 	.stop  = ipoib_path_seq_stop,
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index 5063dd5..52bc2bd 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -345,12 +345,12 @@ static inline int post_send(struct ipoib_dev_priv *priv,
+ {
+ 	struct ib_send_wr *bad_wr;
  
- 	iounmap(dev->vaddr);
--fail2:	
-+fail2:
- 	release_mem_region(ocp->def->paddr, sizeof(struct iic_regs));
- fail1:
- 	ocp_set_drvdata(ocp, NULL);
--	kfree(dev);	
-+	kfree(dev);
- 	return ret;
+-	priv->tx_sge.addr             = addr;
+-	priv->tx_sge.length           = len;
++	priv->tx_sge.addr	      = addr;
++	priv->tx_sge.length	      = len;
+ 
+-	priv->tx_wr.wr_id 	      = wr_id;
++	priv->tx_wr.wr_id	      = wr_id;
+ 	priv->tx_wr.wr.ud.remote_qpn  = qpn;
+-	priv->tx_wr.wr.ud.ah 	      = address;
++	priv->tx_wr.wr.ud.ah	      = address;
+ 
+ 	return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
  }
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index c9f6077..a082466 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -182,17 +182,20 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
  
-@@ -783,13 +782,13 @@ static void __devexit iic_remove(struct ocp_device *ocp)
- 			dev->idx);
- 		/* That's *very* bad, just shutdown IRQ ... */
- 		if (dev->irq >= 0){
--		    iic_interrupt_mode(dev, 0);	
-+		    iic_interrupt_mode(dev, 0);
- 		    free_irq(dev->irq, dev);
- 		    dev->irq = -1;
- 		}
- 	} else {
- 		if (dev->irq >= 0){
--		    iic_interrupt_mode(dev, 0);	
-+		    iic_interrupt_mode(dev, 0);
- 		    free_irq(dev->irq, dev);
- 		}
- 		iounmap(dev->vaddr);
-@@ -798,7 +797,7 @@ static void __devexit iic_remove(struct ocp_device *ocp)
+ 	/* dev->mtu > 2K ==> connected mode */
+-	if (ipoib_cm_admin_enabled(dev) && new_mtu <= IPOIB_CM_MTU) {
++	if (ipoib_cm_admin_enabled(dev)) {
++		if (new_mtu > ipoib_cm_max_mtu(dev))
++			return -EINVAL;
++
+ 		if (new_mtu > priv->mcast_mtu)
+ 			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
+ 				   priv->mcast_mtu);
++
+ 		dev->mtu = new_mtu;
+ 		return 0;
  	}
- }
  
--static struct ocp_device_id ibm_iic_ids[] __devinitdata = 
-+static struct ocp_device_id ibm_iic_ids[] __devinitdata =
- {
- 	{ .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_IIC },
- 	{ .vendor = OCP_VENDOR_INVALID }
-diff --git a/drivers/i2c/busses/i2c-ibm_iic.h b/drivers/i2c/busses/i2c-ibm_iic.h
-index 59d7b43..fdaa482 100644
---- a/drivers/i2c/busses/i2c-ibm_iic.h
-+++ b/drivers/i2c/busses/i2c-ibm_iic.h
-@@ -2,11 +2,11 @@
-  * drivers/i2c/busses/i2c-ibm_iic.h
-  *
-  * Support for the IIC peripheral on IBM PPC 4xx
-- * 
-+ *
-  * Copyright (c) 2003 Zultys Technologies.
-  * Eugene Surovegin <eugene.surovegin at zultys.com> or <ebs at ebshome.net>
-  *
-- * Based on original work by 
-+ * Based on original work by
-  * 	Ian DaSilva  <idasilva at mvista.com>
-  *      Armin Kuster <akuster at mvista.com>
-  * 	Matt Porter  <mporter at mvista.com>
-@@ -22,7 +22,7 @@
- #ifndef __I2C_IBM_IIC_H_
- #define __I2C_IBM_IIC_H_
+-	if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) {
++	if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
+ 		return -EINVAL;
+-	}
  
--#include <linux/i2c.h> 
-+#include <linux/i2c.h>
+ 	priv->admin_mtu = new_mtu;
  
- struct iic_regs {
- 	u16 mdbuf;
-@@ -58,7 +58,7 @@ struct ibm_iic_private {
- #define CNTL_TCT_MASK	0x30
- #define CNTL_TCT_SHIFT	4
- #define CNTL_RPST	0x08
--#define CNTL_CHT	0x04 
-+#define CNTL_CHT	0x04
- #define CNTL_RW		0x02
- #define CNTL_PT		0x01
+@@ -474,8 +477,8 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
+ 	INIT_LIST_HEAD(&path->neigh_list);
  
-diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
-index c70146e..ab41400 100644
---- a/drivers/i2c/busses/i2c-iop3xx.c
-+++ b/drivers/i2c/busses/i2c-iop3xx.c
-@@ -490,7 +490,6 @@ iop3xx_i2c_probe(struct platform_device *pdev)
- 	 * Default values...should these come in from board code?
- 	 */
- 	new_adapter->timeout = 100;	
--	new_adapter->retries = 3;
- 	new_adapter->algo = &iop3xx_i2c_algo;
+ 	memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
+-	path->pathrec.sgid          = priv->local_gid;
+-	path->pathrec.pkey          = cpu_to_be16(priv->pkey);
++	path->pathrec.sgid	    = priv->local_gid;
++	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
+ 	path->pathrec.numb_path     = 1;
+ 	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ 
+@@ -669,16 +672,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
+ 		return NETDEV_TX_LOCKED;
  
- 	init_waitqueue_head(&adapter_data->waitq);
-diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c
-deleted file mode 100644
-index 069ed7f..0000000
---- a/drivers/i2c/busses/i2c-ixp4xx.c
-+++ /dev/null
-@@ -1,178 +0,0 @@
--/*
-- * drivers/i2c/busses/i2c-ixp4xx.c
-- *
-- * Intel's IXP4xx XScale NPU chipsets (IXP420, 421, 422, 425) do not have
-- * an on board I2C controller but provide 16 GPIO pins that are often
-- * used to create an I2C bus. This driver provides an i2c_adapter 
-- * interface that plugs in under algo_bit and drives the GPIO pins
-- * as instructed by the alogorithm driver.
-- *
-- * Author: Deepak Saxena <dsaxena at plexity.net>
-- *
-- * Copyright (c) 2003-2004 MontaVista Software Inc.
-- *
-- * This file is licensed under the terms of the GNU General Public 
-- * License version 2. This program is licensed "as is" without any 
-- * warranty of any kind, whether express or implied.
-- *
-- * NOTE: Since different platforms will use different GPIO pins for
-- *       I2C, this driver uses an IXP4xx-specific platform_data
-- *       pointer to pass the GPIO numbers to the driver. This 
-- *       allows us to support all the different IXP4xx platforms
-- *       w/o having to put #ifdefs in this driver.
-- *
-- *       See arch/arm/mach-ixp4xx/ixdp425.c for an example of building a 
-- *       device list and filling in the ixp4xx_i2c_pins data structure 
-- *       that is passed as the platform_data to this driver.
-- */
--
--#include <linux/kernel.h>
--#include <linux/init.h>
--#include <linux/platform_device.h>
--#include <linux/module.h>
--#include <linux/i2c.h>
--#include <linux/i2c-algo-bit.h>
--
--#include <asm/hardware.h>	/* Pick up IXP4xx-specific bits */
--
--static inline int ixp4xx_scl_pin(void *data)
--{
--	return ((struct ixp4xx_i2c_pins*)data)->scl_pin;
--}
--
--static inline int ixp4xx_sda_pin(void *data)
--{
--	return ((struct ixp4xx_i2c_pins*)data)->sda_pin;
--}
--
--static void ixp4xx_bit_setscl(void *data, int val)
--{
--	gpio_line_set(ixp4xx_scl_pin(data), 0);
--	gpio_line_config(ixp4xx_scl_pin(data),
--		val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT );
--}
--
--static void ixp4xx_bit_setsda(void *data, int val)
--{
--	gpio_line_set(ixp4xx_sda_pin(data), 0);
--	gpio_line_config(ixp4xx_sda_pin(data),
--		val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT );
--}
--
--static int ixp4xx_bit_getscl(void *data)
--{
--	int scl;
--
--	gpio_line_config(ixp4xx_scl_pin(data), IXP4XX_GPIO_IN );
--	gpio_line_get(ixp4xx_scl_pin(data), &scl);
--
--	return scl;
--}	
--
--static int ixp4xx_bit_getsda(void *data)
--{
--	int sda;
--
--	gpio_line_config(ixp4xx_sda_pin(data), IXP4XX_GPIO_IN );
--	gpio_line_get(ixp4xx_sda_pin(data), &sda);
--
--	return sda;
--}	
--
--struct ixp4xx_i2c_data {
--	struct ixp4xx_i2c_pins *gpio_pins;
--	struct i2c_adapter adapter;
--	struct i2c_algo_bit_data algo_data;
--};
--
--static int ixp4xx_i2c_remove(struct platform_device *plat_dev)
--{
--	struct ixp4xx_i2c_data *drv_data = platform_get_drvdata(plat_dev);
--
--	platform_set_drvdata(plat_dev, NULL);
--
--	i2c_del_adapter(&drv_data->adapter);
--
--	kfree(drv_data);
--
--	return 0;
--}
--
--static int ixp4xx_i2c_probe(struct platform_device *plat_dev)
--{
--	int err;
--	struct ixp4xx_i2c_pins *gpio = plat_dev->dev.platform_data;
--	struct ixp4xx_i2c_data *drv_data = 
--		kzalloc(sizeof(struct ixp4xx_i2c_data), GFP_KERNEL);
--
--	if(!drv_data)
--		return -ENOMEM;
--
--	drv_data->gpio_pins = gpio;
--
 -	/*
--	 * We could make a lot of these structures static, but
--	 * certain platforms may have multiple GPIO-based I2C
--	 * buses for various device domains, so we need per-device
--	 * algo_data->data. 
+-	 * Check if our queue is stopped.  Since we have the LLTX bit
+-	 * set, we can't rely on netif_stop_queue() preventing our
+-	 * xmit function from being called with a full queue.
 -	 */
--	drv_data->algo_data.data = gpio;
--	drv_data->algo_data.setsda = ixp4xx_bit_setsda;
--	drv_data->algo_data.setscl = ixp4xx_bit_setscl;
--	drv_data->algo_data.getsda = ixp4xx_bit_getsda;
--	drv_data->algo_data.getscl = ixp4xx_bit_getscl;
--	drv_data->algo_data.udelay = 10;
--	drv_data->algo_data.timeout = 100;
--
--	drv_data->adapter.id = I2C_HW_B_IXP4XX;
--	drv_data->adapter.class = I2C_CLASS_HWMON;
--	strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
--		sizeof(drv_data->adapter.name));
--	drv_data->adapter.algo_data = &drv_data->algo_data;
--
--	drv_data->adapter.dev.parent = &plat_dev->dev;
--
--	gpio_line_config(gpio->scl_pin, IXP4XX_GPIO_IN);
--	gpio_line_config(gpio->sda_pin, IXP4XX_GPIO_IN);
--	gpio_line_set(gpio->scl_pin, 0);
--	gpio_line_set(gpio->sda_pin, 0);
--
--	err = i2c_bit_add_bus(&drv_data->adapter);
--	if (err) {
--		printk(KERN_ERR "ERROR: Could not install %s\n", plat_dev->dev.bus_id);
--
--		kfree(drv_data);
--		return err;
+-	if (unlikely(netif_queue_stopped(dev))) {
+-		spin_unlock_irqrestore(&priv->tx_lock, flags);
+-		return NETDEV_TX_BUSY;
 -	}
 -
--	platform_set_drvdata(plat_dev, drv_data);
--
--	return 0;
--}
--
--static struct platform_driver ixp4xx_i2c_driver = {
--	.probe		= ixp4xx_i2c_probe,
--	.remove		= ixp4xx_i2c_remove,
--	.driver		= {
--		.name	= "IXP4XX-I2C",
--		.owner	= THIS_MODULE,
--	},
--};
--
--static int __init ixp4xx_i2c_init(void)
--{
--	return platform_driver_register(&ixp4xx_i2c_driver);
--}
--
--static void __exit ixp4xx_i2c_exit(void)
--{
--	platform_driver_unregister(&ixp4xx_i2c_driver);
--}
--
--module_init(ixp4xx_i2c_init);
--module_exit(ixp4xx_i2c_exit);
--
--MODULE_DESCRIPTION("GPIO-based I2C adapter for IXP4xx systems");
--MODULE_LICENSE("GPL");
--MODULE_AUTHOR("Deepak Saxena <dsaxena at plexity.net>");
--
-diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
-index d8de4ac..bbe787b 100644
---- a/drivers/i2c/busses/i2c-mpc.c
-+++ b/drivers/i2c/busses/i2c-mpc.c
-@@ -180,7 +180,7 @@ static void mpc_i2c_stop(struct mpc_i2c *i2c)
- static int mpc_write(struct mpc_i2c *i2c, int target,
- 		     const u8 * data, int length, int restart)
+ 	if (likely(skb->dst && skb->dst->neighbour)) {
+ 		if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
+ 			ipoib_path_lookup(skb, dev);
+@@ -950,34 +943,34 @@ static void ipoib_setup(struct net_device *dev)
  {
--	int i;
-+	int i, result;
- 	unsigned timeout = i2c->adap.timeout;
- 	u32 flags = restart ? CCR_RSTA : 0;
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
  
-@@ -192,15 +192,17 @@ static int mpc_write(struct mpc_i2c *i2c, int target,
- 	/* Write target byte */
- 	writeb((target << 1), i2c->base + MPC_I2C_DR);
+-	dev->open 		 = ipoib_open;
+-	dev->stop 		 = ipoib_stop;
+-	dev->change_mtu 	 = ipoib_change_mtu;
+-	dev->hard_start_xmit 	 = ipoib_start_xmit;
+-	dev->tx_timeout 	 = ipoib_timeout;
+-	dev->header_ops 	 = &ipoib_header_ops;
+-	dev->set_multicast_list  = ipoib_set_mcast_list;
+-	dev->neigh_setup         = ipoib_neigh_setup_dev;
++	dev->open		 = ipoib_open;
++	dev->stop		 = ipoib_stop;
++	dev->change_mtu		 = ipoib_change_mtu;
++	dev->hard_start_xmit	 = ipoib_start_xmit;
++	dev->tx_timeout		 = ipoib_timeout;
++	dev->header_ops		 = &ipoib_header_ops;
++	dev->set_multicast_list	 = ipoib_set_mcast_list;
++	dev->neigh_setup	 = ipoib_neigh_setup_dev;
  
--	if (i2c_wait(i2c, timeout, 1) < 0)
--		return -1;
-+	result = i2c_wait(i2c, timeout, 1);
-+	if (result < 0)
-+		return result;
+ 	netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
  
- 	for (i = 0; i < length; i++) {
- 		/* Write data byte */
- 		writeb(data[i], i2c->base + MPC_I2C_DR);
+-	dev->watchdog_timeo 	 = HZ;
++	dev->watchdog_timeo	 = HZ;
  
--		if (i2c_wait(i2c, timeout, 1) < 0)
--			return -1;
-+		result = i2c_wait(i2c, timeout, 1);
-+		if (result < 0)
-+			return result;
- 	}
+-	dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
++	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
  
- 	return 0;
-@@ -210,7 +212,7 @@ static int mpc_read(struct mpc_i2c *i2c, int target,
- 		    u8 * data, int length, int restart)
- {
- 	unsigned timeout = i2c->adap.timeout;
--	int i;
-+	int i, result;
- 	u32 flags = restart ? CCR_RSTA : 0;
+ 	/*
+ 	 * We add in INFINIBAND_ALEN to allow for the destination
+ 	 * address "pseudoheader" for skbs without neighbour struct.
+ 	 */
+-	dev->hard_header_len 	 = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
+-	dev->addr_len 		 = INFINIBAND_ALEN;
+-	dev->type 		 = ARPHRD_INFINIBAND;
+-	dev->tx_queue_len 	 = ipoib_sendq_size * 2;
+-	dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
++	dev->hard_header_len	 = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
++	dev->addr_len		 = INFINIBAND_ALEN;
++	dev->type		 = ARPHRD_INFINIBAND;
++	dev->tx_queue_len	 = ipoib_sendq_size * 2;
++	dev->features		 = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
  
- 	/* Start with MEN */
-@@ -221,8 +223,9 @@ static int mpc_read(struct mpc_i2c *i2c, int target,
- 	/* Write target address byte - this time with the read flag set */
- 	writeb((target << 1) | 1, i2c->base + MPC_I2C_DR);
+ 	/* MTU will be reset when mcast join happens */
+-	dev->mtu 		 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
+-	priv->mcast_mtu 	 = priv->admin_mtu = dev->mtu;
++	dev->mtu		 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
++	priv->mcast_mtu		 = priv->admin_mtu = dev->mtu;
  
--	if (i2c_wait(i2c, timeout, 1) < 0)
--		return -1;
-+	result = i2c_wait(i2c, timeout, 1);
-+	if (result < 0)
-+		return result;
+ 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
  
- 	if (length) {
- 		if (length == 1)
-@@ -234,8 +237,9 @@ static int mpc_read(struct mpc_i2c *i2c, int target,
- 	}
+@@ -1268,6 +1261,9 @@ static int __init ipoib_init_module(void)
+ 	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
+ 	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
+ 	ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
++#ifdef CONFIG_INFINIBAND_IPOIB_CM
++	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
++#endif
  
- 	for (i = 0; i < length; i++) {
--		if (i2c_wait(i2c, timeout, 0) < 0)
--			return -1;
-+		result = i2c_wait(i2c, timeout, 0);
-+		if (result < 0)
-+			return result;
+ 	ret = ipoib_register_debugfs();
+ 	if (ret)
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index 9bcfc7a..2628339 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -702,7 +702,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
  
- 		/* Generate txack on next to last byte */
- 		if (i == length - 2)
-@@ -309,7 +313,6 @@ static struct i2c_adapter mpc_ops = {
- 	.algo = &mpc_algo,
- 	.class = I2C_CLASS_HWMON,
- 	.timeout = 1,
--	.retries = 1
- };
+ out:
+ 	if (mcast && mcast->ah) {
+-		if (skb->dst            &&
++		if (skb->dst		&&
+ 		    skb->dst->neighbour &&
+ 		    !*to_ipoib_neigh(skb->dst->neighbour)) {
+ 			struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour,
+@@ -710,7 +710,7 @@ out:
  
- static int fsl_i2c_probe(struct platform_device *pdev)
-@@ -321,9 +324,9 @@ static int fsl_i2c_probe(struct platform_device *pdev)
+ 			if (neigh) {
+ 				kref_get(&mcast->ah->ref);
+-				neigh->ah  	= mcast->ah;
++				neigh->ah	= mcast->ah;
+ 				list_add_tail(&neigh->list, &mcast->neigh_list);
+ 			}
+ 		}
+@@ -788,10 +788,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
  
- 	pdata = (struct fsl_i2c_platform_data *) pdev->dev.platform_data;
+ 		memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
  
--	if (!(i2c = kzalloc(sizeof(*i2c), GFP_KERNEL))) {
-+	i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
-+	if (!i2c)
- 		return -ENOMEM;
--	}
+-		/* Add in the P_Key */
+-		mgid.raw[4] = (priv->pkey >> 8) & 0xff;
+-		mgid.raw[5] = priv->pkey & 0xff;
+-
+ 		mcast = __ipoib_mcast_find(dev, &mgid);
+ 		if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ 			struct ipoib_mcast *nmcast;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+index 3c6e45d..433e99a 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+@@ -172,8 +172,12 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
  
- 	i2c->irq = platform_get_irq(pdev, 0);
- 	if (i2c->irq < 0) {
-diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
-index bb7bf68..036e6a8 100644
---- a/drivers/i2c/busses/i2c-mv64xxx.c
-+++ b/drivers/i2c/busses/i2c-mv64xxx.c
-@@ -1,6 +1,6 @@
- /*
-- * Driver for the i2c controller on the Marvell line of host bridges for MIPS
-- * and PPC (e.g, gt642[46]0, mv643[46]0, mv644[46]0).
-+ * Driver for the i2c controller on the Marvell line of host bridges
-+ * (e.g, gt642[46]0, mv643[46]0, mv644[46]0, and Orion SoC family).
-  *
-  * Author: Mark A. Greer <mgreer at mvista.com>
-  *
-@@ -14,7 +14,7 @@
- #include <linux/spinlock.h>
- #include <linux/i2c.h>
- #include <linux/interrupt.h>
--#include <linux/mv643xx.h>
-+#include <linux/mv643xx_i2c.h>
- #include <linux/platform_device.h>
+ 	size = ipoib_sendq_size + ipoib_recvq_size + 1;
+ 	ret = ipoib_cm_dev_init(dev);
+-	if (!ret)
+-		size += ipoib_recvq_size + 1 /* 1 extra for rx_drain_qp */;
++	if (!ret) {
++		if (ipoib_cm_has_srq(dev))
++			size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
++		else
++			size += ipoib_recvq_size * ipoib_max_conn_qp;
++	}
  
- #include <asm/io.h>
-@@ -86,6 +86,7 @@ struct mv64xxx_i2c_data {
- 	u32			cntl_bits;
- 	void __iomem		*reg_base;
- 	u32			reg_base_p;
-+	u32			reg_size;
- 	u32			addr1;
- 	u32			addr2;
- 	u32			bytes_left;
-@@ -463,17 +464,20 @@ static int __devinit
- mv64xxx_i2c_map_regs(struct platform_device *pd,
- 	struct mv64xxx_i2c_data *drv_data)
- {
--	struct resource	*r;
-+	int size;
-+	struct resource	*r = platform_get_resource(pd, IORESOURCE_MEM, 0);
+ 	priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
+ 	if (IS_ERR(priv->cq)) {
+@@ -197,12 +201,12 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
+ 	priv->dev->dev_addr[2] = (priv->qp->qp_num >>  8) & 0xff;
+ 	priv->dev->dev_addr[3] = (priv->qp->qp_num      ) & 0xff;
  
--	if ((r = platform_get_resource(pd, IORESOURCE_MEM, 0)) &&
--		request_mem_region(r->start, MV64XXX_I2C_REG_BLOCK_SIZE,
--			drv_data->adapter.name)) {
-+	if (!r)
-+		return -ENODEV;
+-	priv->tx_sge.lkey 	= priv->mr->lkey;
++	priv->tx_sge.lkey	= priv->mr->lkey;
  
--		drv_data->reg_base = ioremap(r->start,
--			MV64XXX_I2C_REG_BLOCK_SIZE);
--		drv_data->reg_base_p = r->start;
--	} else
--		return -ENOMEM;
-+	size = r->end - r->start + 1;
-+
-+	if (!request_mem_region(r->start, size, drv_data->adapter.name))
-+		return -EBUSY;
-+
-+	drv_data->reg_base = ioremap(r->start, size);
-+	drv_data->reg_base_p = r->start;
-+	drv_data->reg_size = size;
+-	priv->tx_wr.opcode 	= IB_WR_SEND;
+-	priv->tx_wr.sg_list 	= &priv->tx_sge;
+-	priv->tx_wr.num_sge 	= 1;
+-	priv->tx_wr.send_flags 	= IB_SEND_SIGNALED;
++	priv->tx_wr.opcode	= IB_WR_SEND;
++	priv->tx_wr.sg_list	= &priv->tx_sge;
++	priv->tx_wr.num_sge	= 1;
++	priv->tx_wr.send_flags	= IB_SEND_SIGNALED;
  
  	return 0;
- }
-@@ -483,8 +487,7 @@ mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
+ 
+diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
+index fe604c8..77dedba 100644
+--- a/drivers/infiniband/ulp/iser/Kconfig
++++ b/drivers/infiniband/ulp/iser/Kconfig
+@@ -8,5 +8,5 @@ config INFINIBAND_ISER
+           that speak iSCSI over iSER over InfiniBand.
+ 
+ 	  The iSER protocol is defined by IETF.
+-	  See <http://www.ietf.org/internet-drafts/draft-ietf-ips-iser-05.txt>
+-	  and <http://www.infinibandta.org/members/spec/iser_annex_060418.pdf>
++	  See <http://www.ietf.org/rfc/rfc5046.txt>
++	  and <http://www.infinibandta.org/members/spec/Annex_iSER.PDF>
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..be1b9fb 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -129,7 +129,7 @@ error:
+  * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+  *
+  **/
+-static void
++static int
+ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
  {
- 	if (drv_data->reg_base) {
- 		iounmap(drv_data->reg_base);
--		release_mem_region(drv_data->reg_base_p,
--			MV64XXX_I2C_REG_BLOCK_SIZE);
-+		release_mem_region(drv_data->reg_base_p, drv_data->reg_size);
+ 	struct iscsi_iser_conn     *iser_conn  = ctask->conn->dd_data;
+@@ -138,6 +138,7 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ 	iser_ctask->command_sent = 0;
+ 	iser_ctask->iser_conn    = iser_conn;
+ 	iser_ctask_rdma_init(iser_ctask);
++	return 0;
+ }
+ 
+ /**
+@@ -220,12 +221,6 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ 	debug_scsi("ctask deq [cid %d itt 0x%x]\n",
+ 		   conn->id, ctask->itt);
+ 
+-	/*
+-	 * serialize with TMF AbortTask
+-	 */
+-	if (ctask->mtask)
+-		return error;
+-
+ 	/* Send the cmd PDU */
+ 	if (!iser_ctask->command_sent) {
+ 		error = iser_send_command(conn, ctask);
+@@ -406,6 +401,7 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ 		ctask      = session->cmds[i];
+ 		iser_ctask = ctask->dd_data;
+ 		ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++		ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
  	}
  
- 	drv_data->reg_base = NULL;
-@@ -529,7 +532,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
- 	drv_data->adapter.owner = THIS_MODULE;
- 	drv_data->adapter.class = I2C_CLASS_HWMON;
- 	drv_data->adapter.timeout = pdata->timeout;
--	drv_data->adapter.retries = pdata->retries;
- 	drv_data->adapter.nr = pd->id;
- 	platform_set_drvdata(pd, drv_data);
- 	i2c_set_adapdata(&drv_data->adapter, drv_data);
-diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
-index 1bf590c..3dac920 100644
---- a/drivers/i2c/busses/i2c-nforce2.c
-+++ b/drivers/i2c/busses/i2c-nforce2.c
-@@ -351,6 +351,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
- 	pci_set_drvdata(dev, smbuses);
+ 	for (i = 0; i < session->mgmtpool_max; i++) {
+@@ -551,11 +547,13 @@ static struct scsi_host_template iscsi_iser_sht = {
+ 	.module                 = THIS_MODULE,
+ 	.name                   = "iSCSI Initiator over iSER, v." DRV_VER,
+ 	.queuecommand           = iscsi_queuecommand,
++	.change_queue_depth	= iscsi_change_queue_depth,
+ 	.can_queue		= ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ 	.sg_tablesize           = ISCSI_ISER_SG_TABLESIZE,
+ 	.max_sectors		= 1024,
+ 	.cmd_per_lun            = ISCSI_MAX_CMD_PER_LUN,
+ 	.eh_abort_handler       = iscsi_eh_abort,
++	.eh_device_reset_handler= iscsi_eh_device_reset,
+ 	.eh_host_reset_handler	= iscsi_eh_host_reset,
+ 	.use_clustering         = DISABLE_CLUSTERING,
+ 	.proc_name              = "iscsi_iser",
+@@ -582,7 +580,9 @@ static struct iscsi_transport iscsi_iser_transport = {
+ 				  ISCSI_PERSISTENT_ADDRESS |
+ 				  ISCSI_TARGET_NAME | ISCSI_TPGT |
+ 				  ISCSI_USERNAME | ISCSI_PASSWORD |
+-				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
++				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
++				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
++				  ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ 	.host_param_mask	= ISCSI_HOST_HWADDRESS |
+ 				  ISCSI_HOST_NETDEV_NAME |
+ 				  ISCSI_HOST_INITIATOR_NAME,
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index a6f2303..83247f1 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -561,7 +561,7 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ 	if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+ 	        itt = get_itt(hdr->itt); /* mask out cid and age bits */
+ 		if (!(itt < session->cmds_max))
+-			iser_err("itt can't be matched to task!!!"
++			iser_err("itt can't be matched to task!!! "
+ 				 "conn %p opcode %d cmds_max %d itt %d\n",
+ 				 conn->iscsi_conn,opcode,session->cmds_max,itt);
+ 		/* use the mapping given with the cmds array indexed by itt */
+@@ -621,9 +621,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ 			struct iscsi_session *session = conn->session;
  
- 	switch(dev->device) {
-+	case PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS:
- 	case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS:
- 	case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS:
- 		smbuses[0].blockops = 1;
-diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
-index f2552b1..da66397 100644
---- a/drivers/i2c/busses/i2c-omap.c
-+++ b/drivers/i2c/busses/i2c-omap.c
-@@ -362,8 +362,6 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+ 			spin_lock(&conn->session->lock);
+-			list_del(&mtask->running);
+-			__kfifo_put(session->mgmtpool.queue, (void*)&mtask,
+-				    sizeof(void*));
++			iscsi_free_mgmt_task(conn, mtask);
+ 			spin_unlock(&session->lock);
+ 		}
+ 	}
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 654a4dc..714b8db 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -105,7 +105,7 @@ pd_err:
+ }
  
- 	omap_i2c_enable_clocks(dev);
+ /**
+- * iser_free_device_ib_res - destory/dealloc/dereg the DMA MR,
++ * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
+  * CQ and PD created with the device associated with the adapator.
+  */
+ static void iser_free_device_ib_res(struct iser_device *device)
+@@ -475,13 +475,11 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ 		iser_disconnected_handler(cma_id);
+ 		break;
+ 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
++		iser_err("Device removal is currently unsupported\n");
+ 		BUG();
+ 		break;
+-	case RDMA_CM_EVENT_CONNECT_RESPONSE:
+-		BUG();
+-		break;
+-	case RDMA_CM_EVENT_CONNECT_REQUEST:
+ 	default:
++		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
+ 		break;
+ 	}
+ 	return ret;
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index bdb6f85..195ce7c 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -272,7 +272,8 @@ static void srp_path_rec_completion(int status,
  
--	/* REVISIT: initialize and use adap->retries. This is an optional
--	 * feature */
- 	if ((r = omap_i2c_wait_for_bb(dev)) < 0)
- 		goto out;
+ 	target->status = status;
+ 	if (status)
+-		printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
++		shost_printk(KERN_ERR, target->scsi_host,
++			     PFX "Got failed path rec status %d\n", status);
+ 	else
+ 		target->path = *pathrec;
+ 	complete(&target->done);
+@@ -303,7 +304,8 @@ static int srp_lookup_path(struct srp_target_port *target)
+ 	wait_for_completion(&target->done);
  
-diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
-index ca18e0b..1603c81 100644
---- a/drivers/i2c/busses/i2c-pasemi.c
-+++ b/drivers/i2c/busses/i2c-pasemi.c
-@@ -368,6 +368,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev,
- 	smbus->adapter.class = I2C_CLASS_HWMON;
- 	smbus->adapter.algo = &smbus_algorithm;
- 	smbus->adapter.algo_data = smbus;
-+	smbus->adapter.nr = PCI_FUNC(dev->devfn);
+ 	if (target->status < 0)
+-		printk(KERN_WARNING PFX "Path record query failed\n");
++		shost_printk(KERN_WARNING, target->scsi_host,
++			     PFX "Path record query failed\n");
  
- 	/* set up the sysfs linkage to our parent device */
- 	smbus->adapter.dev.parent = &dev->dev;
-@@ -375,7 +376,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev,
- 	reg_write(smbus, REG_CTL, (CTL_MTR | CTL_MRR |
- 		  (CLK_100K_DIV & CTL_CLK_M)));
+ 	return target->status;
+ }
+@@ -379,9 +381,10 @@ static int srp_send_req(struct srp_target_port *target)
+ 	 * the second 8 bytes to the local node GUID.
+ 	 */
+ 	if (srp_target_is_topspin(target)) {
+-		printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
+-		       "activated for target GUID %016llx\n",
+-		       (unsigned long long) be64_to_cpu(target->ioc_guid));
++		shost_printk(KERN_DEBUG, target->scsi_host,
++			     PFX "Topspin/Cisco initiator port ID workaround "
++			     "activated for target GUID %016llx\n",
++			     (unsigned long long) be64_to_cpu(target->ioc_guid));
+ 		memset(req->priv.initiator_port_id, 0, 8);
+ 		memcpy(req->priv.initiator_port_id + 8,
+ 		       &target->srp_host->dev->dev->node_guid, 8);
+@@ -400,7 +403,8 @@ static void srp_disconnect_target(struct srp_target_port *target)
  
--	error = i2c_add_adapter(&smbus->adapter);
-+	error = i2c_add_numbered_adapter(&smbus->adapter);
- 	if (error)
- 		goto out_release_region;
+ 	init_completion(&target->done);
+ 	if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+-		printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
++		shost_printk(KERN_DEBUG, target->scsi_host,
++			     PFX "Sending CM DREQ failed\n");
+ 		return;
+ 	}
+ 	wait_for_completion(&target->done);
+@@ -568,7 +572,8 @@ static int srp_reconnect_target(struct srp_target_port *target)
+ 	return ret;
  
-diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
-index 167e413..9bbe96c 100644
---- a/drivers/i2c/busses/i2c-piix4.c
-+++ b/drivers/i2c/busses/i2c-piix4.c
-@@ -121,10 +121,6 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
- {
- 	unsigned char temp;
+ err:
+-	printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
++	shost_printk(KERN_ERR, target->scsi_host,
++		     PFX "reconnect failed (%d), removing target port.\n", ret);
  
--	/* match up the function */
--	if (PCI_FUNC(PIIX4_dev->devfn) != id->driver_data)
--		return -ENODEV;
--
- 	dev_info(&PIIX4_dev->dev, "Found %s device\n", pci_name(PIIX4_dev));
+ 	/*
+ 	 * We couldn't reconnect, so kill our target port off.
+@@ -683,8 +688,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
  
- 	/* Don't access SMBus on IBM systems which get corrupted eeproms */
-@@ -389,28 +385,21 @@ static struct i2c_adapter piix4_adapter = {
- };
+ 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
+ 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
+-		printk(KERN_WARNING PFX "Unhandled data direction %d\n",
+-		       scmnd->sc_data_direction);
++		shost_printk(KERN_WARNING, target->scsi_host,
++			     PFX "Unhandled data direction %d\n",
++			     scmnd->sc_data_direction);
+ 		return -EINVAL;
+ 	}
  
- static struct pci_device_id piix4_ids[] = {
--	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3),
--	  .driver_data = 3 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS),
--	  .driver_data = 0 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS),
--	  .driver_data = 0 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS),
--	  .driver_data = 0 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS),
--	  .driver_data = 0 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4),
--	  .driver_data = 0 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5),
--	  .driver_data = 0 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6),
--	  .driver_data = 0 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB),
--	  .driver_data = 0 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3),
--	  .driver_data = 3 },
--	{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3),
--	  .driver_data = 0 },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
-+		     PCI_DEVICE_ID_SERVERWORKS_OSB4) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
-+		     PCI_DEVICE_ID_SERVERWORKS_CSB5) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
-+		     PCI_DEVICE_ID_SERVERWORKS_CSB6) },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
-+		     PCI_DEVICE_ID_SERVERWORKS_HT1000SB) },
- 	{ 0, }
- };
+@@ -786,8 +792,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
+ 	} else {
+ 		scmnd = req->scmnd;
+ 		if (!scmnd)
+-			printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
+-			       (unsigned long long) rsp->tag);
++			shost_printk(KERN_ERR, target->scsi_host,
++				     "Null scmnd for RSP w/tag %016llx\n",
++				     (unsigned long long) rsp->tag);
+ 		scmnd->result = rsp->status;
  
-diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
-index 6426a61..2598d29 100644
---- a/drivers/i2c/busses/i2c-pxa.c
-+++ b/drivers/i2c/busses/i2c-pxa.c
-@@ -65,6 +65,7 @@ struct pxa_i2c {
- 	unsigned long		iosize;
+ 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
+@@ -831,7 +838,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
+ 	if (0) {
+ 		int i;
  
- 	int			irq;
-+	int			use_pio;
- };
+-		printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
++		shost_printk(KERN_ERR, target->scsi_host,
++			     PFX "recv completion, opcode 0x%02x\n", opcode);
  
- #define _IBMR(i2c)	((i2c)->reg_base + 0)
-@@ -163,6 +164,7 @@ static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname)
- #define eedbg(lvl, x...) do { if ((lvl) < 1) { printk(KERN_DEBUG "" x); } } while(0)
+ 		for (i = 0; i < wc->byte_len; ++i) {
+ 			if (i % 8 == 0)
+@@ -852,11 +860,13 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
  
- static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret);
-+static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id);
+ 	case SRP_T_LOGOUT:
+ 		/* XXX Handle target logout */
+-		printk(KERN_WARNING PFX "Got target logout request\n");
++		shost_printk(KERN_WARNING, target->scsi_host,
++			     PFX "Got target logout request\n");
+ 		break;
  
- static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
- {
-@@ -554,6 +556,71 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c)
- 	writel(icr, _ICR(i2c));
- }
+ 	default:
+-		printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
++		shost_printk(KERN_WARNING, target->scsi_host,
++			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
+ 		break;
+ 	}
  
-+static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c)
-+{
-+	/* make timeout the same as for interrupt based functions */
-+	long timeout = 2 * DEF_TIMEOUT;
-+
-+	/*
-+	 * Wait for the bus to become free.
-+	 */
-+	while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) {
-+		udelay(1000);
-+		show_state(i2c);
-+	}
-+
-+	if (timeout <= 0) {
-+		show_state(i2c);
-+		dev_err(&i2c->adap.dev,
-+			"i2c_pxa: timeout waiting for bus free\n");
-+		return I2C_RETRY;
-+	}
-+
-+	/*
-+	 * Set master mode.
-+	 */
-+	writel(readl(_ICR(i2c)) | ICR_SCLE, _ICR(i2c));
-+
-+	return 0;
-+}
-+
-+static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c,
-+			       struct i2c_msg *msg, int num)
-+{
-+	unsigned long timeout = 500000; /* 5 seconds */
-+	int ret = 0;
-+
-+	ret = i2c_pxa_pio_set_master(i2c);
-+	if (ret)
-+		goto out;
-+
-+	i2c->msg = msg;
-+	i2c->msg_num = num;
-+	i2c->msg_idx = 0;
-+	i2c->msg_ptr = 0;
-+	i2c->irqlogidx = 0;
-+
-+	i2c_pxa_start_message(i2c);
-+
-+	while (timeout-- && i2c->msg_num > 0) {
-+		i2c_pxa_handler(0, i2c);
-+		udelay(10);
-+	}
-+
-+	i2c_pxa_stop_message(i2c);
-+
-+	/*
-+	 * We place the return code in i2c->msg_idx.
-+	 */
-+	ret = i2c->msg_idx;
-+
-+out:
-+	if (timeout == 0)
-+		i2c_pxa_scream_blue_murder(i2c, "timeout");
-+
-+	return ret;
-+}
-+
- /*
-  * We are protected by the adapter bus mutex.
+@@ -872,9 +882,10 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
+ 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ 	while (ib_poll_cq(cq, 1, &wc) > 0) {
+ 		if (wc.status) {
+-			printk(KERN_ERR PFX "failed %s status %d\n",
+-			       wc.wr_id & SRP_OP_RECV ? "receive" : "send",
+-			       wc.status);
++			shost_printk(KERN_ERR, target->scsi_host,
++				     PFX "failed %s status %d\n",
++				     wc.wr_id & SRP_OP_RECV ? "receive" : "send",
++				     wc.status);
+ 			target->qp_in_error = 1;
+ 			break;
+ 		}
+@@ -930,13 +941,18 @@ static int srp_post_recv(struct srp_target_port *target)
+  * req_lim and tx_head.  Lock cannot be dropped between call here and
+  * call to __srp_post_send().
   */
-@@ -610,6 +677,35 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
- 	return ret;
- }
- 
-+static int i2c_pxa_pio_xfer(struct i2c_adapter *adap,
-+			    struct i2c_msg msgs[], int num)
-+{
-+	struct pxa_i2c *i2c = adap->algo_data;
-+	int ret, i;
-+
-+	/* If the I2C controller is disabled we need to reset it
-+	  (probably due to a suspend/resume destroying state). We do
-+	  this here as we can then avoid worrying about resuming the
-+	  controller before its users. */
-+	if (!(readl(_ICR(i2c)) & ICR_IUE))
-+		i2c_pxa_reset(i2c);
-+
-+	for (i = adap->retries; i >= 0; i--) {
-+		ret = i2c_pxa_do_pio_xfer(i2c, msgs, num);
-+		if (ret != I2C_RETRY)
-+			goto out;
+-static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
++static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
++					enum srp_request_type req_type)
+ {
++	s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
 +
-+		if (i2c_debug)
-+			dev_dbg(&adap->dev, "Retrying transmission\n");
-+		udelay(100);
+ 	if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+ 		return NULL;
+ 
+-	if (unlikely(target->req_lim < 1))
++	if (target->req_lim < min) {
+ 		++target->zero_req_lim;
++		return NULL;
 +	}
-+	i2c_pxa_scream_blue_murder(i2c, "exhausted retries");
-+	ret = -EREMOTEIO;
-+ out:
-+	i2c_pxa_set_slave(i2c, ret);
-+	return ret;
-+}
-+
- /*
-  * i2c_pxa_master_complete - complete the message and wake up.
-  */
-@@ -621,7 +717,8 @@ static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret)
- 	i2c->msg_num = 0;
- 	if (ret)
- 		i2c->msg_idx = ret;
--	wake_up(&i2c->wait);
-+	if (!i2c->use_pio)
-+		wake_up(&i2c->wait);
+ 
+ 	return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
  }
+@@ -993,7 +1009,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
+ 		return 0;
+ 	}
  
- static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr)
-@@ -840,6 +937,37 @@ static const struct i2c_algorithm i2c_pxa_algorithm = {
- 	.functionality	= i2c_pxa_functionality,
- };
+-	iu = __srp_get_tx_iu(target);
++	iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL);
+ 	if (!iu)
+ 		goto err;
  
-+static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
-+	.master_xfer	= i2c_pxa_pio_xfer,
-+	.functionality	= i2c_pxa_functionality,
-+};
-+
-+static void i2c_pxa_enable(struct platform_device *dev)
-+{
-+	if (cpu_is_pxa27x()) {
-+		switch (dev->id) {
-+		case 0:
-+			pxa_gpio_mode(GPIO117_I2CSCL_MD);
-+			pxa_gpio_mode(GPIO118_I2CSDA_MD);
-+			break;
-+		case 1:
-+			local_irq_disable();
-+			PCFR |= PCFR_PI2CEN;
-+			local_irq_enable();
-+			break;
-+		}
-+	}
-+}
-+
-+static void i2c_pxa_disable(struct platform_device *dev)
-+{
-+	if (cpu_is_pxa27x() && dev->id == 1) {
-+		local_irq_disable();
-+		PCFR &= ~PCFR_PI2CEN;
-+		local_irq_enable();
-+	}
-+}
-+
- #define res_len(r)		((r)->end - (r)->start + 1)
- static int i2c_pxa_probe(struct platform_device *dev)
+@@ -1022,12 +1038,13 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
+ 
+ 	len = srp_map_data(scmnd, target, req);
+ 	if (len < 0) {
+-		printk(KERN_ERR PFX "Failed to map data\n");
++		shost_printk(KERN_ERR, target->scsi_host,
++			     PFX "Failed to map data\n");
+ 		goto err;
+ 	}
+ 
+ 	if (__srp_post_recv(target)) {
+-		printk(KERN_ERR PFX "Recv failed\n");
++		shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n");
+ 		goto err_unmap;
+ 	}
+ 
+@@ -1035,7 +1052,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
+ 				      DMA_TO_DEVICE);
+ 
+ 	if (__srp_post_send(target, iu, len)) {
+-		printk(KERN_ERR PFX "Send failed\n");
++		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
+ 		goto err_unmap;
+ 	}
+ 
+@@ -1090,6 +1107,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
+ 			       struct ib_cm_event *event,
+ 			       struct srp_target_port *target)
  {
-@@ -864,7 +992,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
++	struct Scsi_Host *shost = target->scsi_host;
+ 	struct ib_class_port_info *cpi;
+ 	int opcode;
+ 
+@@ -1115,19 +1133,22 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
+ 			memcpy(target->path.dgid.raw,
+ 			       event->param.rej_rcvd.ari, 16);
+ 
+-			printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
+-			       (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
+-			       (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
++			shost_printk(KERN_DEBUG, shost,
++				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
++				     (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
++				     (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
+ 
+ 			target->status = SRP_PORT_REDIRECT;
+ 		} else {
+-			printk(KERN_WARNING "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
++			shost_printk(KERN_WARNING, shost,
++				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
+ 			target->status = -ECONNRESET;
+ 		}
+ 		break;
+ 
+ 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
+-		printk(KERN_WARNING "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
++		shost_printk(KERN_WARNING, shost,
++			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
+ 		target->status = -ECONNRESET;
+ 		break;
+ 
+@@ -1138,20 +1159,21 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
+ 			u32 reason = be32_to_cpu(rej->reason);
+ 
+ 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
+-				printk(KERN_WARNING PFX
+-				       "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
++				shost_printk(KERN_WARNING, shost,
++					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
+ 			else
+-				printk(KERN_WARNING PFX
+-				       "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
++				shost_printk(KERN_WARNING, shost,
++					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
+ 		} else
+-			printk(KERN_WARNING "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
+-			       " opcode 0x%02x\n", opcode);
++			shost_printk(KERN_WARNING, shost,
++				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
++				     " opcode 0x%02x\n", opcode);
+ 		target->status = -ECONNRESET;
+ 		break;
+ 
+ 	default:
+-		printk(KERN_WARNING "  REJ reason 0x%x\n",
+-		       event->param.rej_rcvd.reason);
++		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
++			     event->param.rej_rcvd.reason);
+ 		target->status = -ECONNRESET;
  	}
+ }
+@@ -1166,7 +1188,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  
- 	i2c->adap.owner   = THIS_MODULE;
--	i2c->adap.algo    = &i2c_pxa_algorithm;
- 	i2c->adap.retries = 5;
+ 	switch (event->event) {
+ 	case IB_CM_REQ_ERROR:
+-		printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
++		shost_printk(KERN_DEBUG, target->scsi_host,
++			     PFX "Sending CM REQ failed\n");
+ 		comp = 1;
+ 		target->status = -ECONNRESET;
+ 		break;
+@@ -1184,7 +1207,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+ 			target->scsi_host->can_queue = min(target->req_lim,
+ 							   target->scsi_host->can_queue);
+ 		} else {
+-			printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
++			shost_printk(KERN_WARNING, target->scsi_host,
++				    PFX "Unhandled RSP opcode %#x\n", opcode);
+ 			target->status = -ECONNRESET;
+ 			break;
+ 		}
+@@ -1230,20 +1254,23 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+ 		break;
  
- 	spin_lock_init(&i2c->lock);
-@@ -899,34 +1026,28 @@ static int i2c_pxa_probe(struct platform_device *dev)
- #endif
+ 	case IB_CM_REJ_RECEIVED:
+-		printk(KERN_DEBUG PFX "REJ received\n");
++		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
+ 		comp = 1;
  
- 	clk_enable(i2c->clk);
--#ifdef CONFIG_PXA27x
--	switch (dev->id) {
--	case 0:
--		pxa_gpio_mode(GPIO117_I2CSCL_MD);
--		pxa_gpio_mode(GPIO118_I2CSDA_MD);
--		break;
--	case 1:
--		local_irq_disable();
--		PCFR |= PCFR_PI2CEN;
--		local_irq_enable();
--	}
--#endif
-+	i2c_pxa_enable(dev);
+ 		srp_cm_rej_handler(cm_id, event, target);
+ 		break;
  
--	ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED,
--			  i2c->adap.name, i2c);
--	if (ret)
--		goto ereqirq;
-+	if (plat) {
-+		i2c->adap.class = plat->class;
-+		i2c->use_pio = plat->use_pio;
-+	}
+ 	case IB_CM_DREQ_RECEIVED:
+-		printk(KERN_WARNING PFX "DREQ received - connection closed\n");
++		shost_printk(KERN_WARNING, target->scsi_host,
++			     PFX "DREQ received - connection closed\n");
+ 		if (ib_send_cm_drep(cm_id, NULL, 0))
+-			printk(KERN_ERR PFX "Sending CM DREP failed\n");
++			shost_printk(KERN_ERR, target->scsi_host,
++				     PFX "Sending CM DREP failed\n");
+ 		break;
  
-+	if (i2c->use_pio) {
-+		i2c->adap.algo = &i2c_pxa_pio_algorithm;
-+	} else {
-+		i2c->adap.algo = &i2c_pxa_algorithm;
-+		ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED,
-+				  i2c->adap.name, i2c);
-+		if (ret)
-+			goto ereqirq;
-+	}
+ 	case IB_CM_TIMEWAIT_EXIT:
+-		printk(KERN_ERR PFX "connection closed\n");
++		shost_printk(KERN_ERR, target->scsi_host,
++			     PFX "connection closed\n");
  
- 	i2c_pxa_reset(i2c);
+ 		comp = 1;
+ 		target->status = 0;
+@@ -1255,7 +1282,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+ 		break;
  
- 	i2c->adap.algo_data = i2c;
- 	i2c->adap.dev.parent = &dev->dev;
+ 	default:
+-		printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
++		shost_printk(KERN_WARNING, target->scsi_host,
++			     PFX "Unhandled CM event %d\n", event->event);
+ 		break;
+ 	}
  
--	if (plat) {
--		i2c->adap.class = plat->class;
--	}
--
- 	/*
- 	 * If "dev->id" is negative we consider it as zero.
- 	 * The reason to do so is to avoid sysfs names that only make
-@@ -952,17 +1073,11 @@ static int i2c_pxa_probe(struct platform_device *dev)
- 	return 0;
+@@ -1283,7 +1311,7 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
  
- eadapt:
--	free_irq(irq, i2c);
-+	if (!i2c->use_pio)
-+		free_irq(irq, i2c);
- ereqirq:
- 	clk_disable(i2c->clk);
--
--#ifdef CONFIG_PXA27x
--	if (dev->id == 1) {
--		local_irq_disable();
--		PCFR &= ~PCFR_PI2CEN;
--		local_irq_enable();
--	}
--#endif
-+	i2c_pxa_disable(dev);
- eremap:
- 	clk_put(i2c->clk);
- eclk:
-@@ -979,18 +1094,12 @@ static int i2c_pxa_remove(struct platform_device *dev)
- 	platform_set_drvdata(dev, NULL);
+ 	init_completion(&req->done);
  
- 	i2c_del_adapter(&i2c->adap);
--	free_irq(i2c->irq, i2c);
-+	if (!i2c->use_pio)
-+		free_irq(i2c->irq, i2c);
+-	iu = __srp_get_tx_iu(target);
++	iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT);
+ 	if (!iu)
+ 		goto out;
  
- 	clk_disable(i2c->clk);
- 	clk_put(i2c->clk);
--
--#ifdef CONFIG_PXA27x
--	if (dev->id == 1) {
--		local_irq_disable();
--		PCFR &= ~PCFR_PI2CEN;
--		local_irq_enable();
--	}
--#endif
-+	i2c_pxa_disable(dev);
+@@ -1332,7 +1360,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ 	struct srp_request *req;
+ 	int ret = SUCCESS;
  
- 	release_mem_region(i2c->iobase, i2c->iosize);
- 	kfree(i2c);
-diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
-index 503a134..8fbbdb4 100644
---- a/drivers/i2c/busses/i2c-sibyte.c
-+++ b/drivers/i2c/busses/i2c-sibyte.c
-@@ -36,14 +36,6 @@ struct i2c_algo_sibyte_data {
- /* ----- global defines ----------------------------------------------- */
- #define SMB_CSR(a,r) ((long)(a->reg_base + r))
+-	printk(KERN_ERR "SRP abort called\n");
++	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
  
--/* ----- global variables --------------------------------------------- */
--
--/* module parameters:
-- */
--static int bit_scan;	/* have a look at what's hanging 'round */
--module_param(bit_scan, int, 0);
--MODULE_PARM_DESC(bit_scan, "Scan for active chips on the bus");
--
+ 	if (target->qp_in_error)
+ 		return FAILED;
+@@ -1362,7 +1390,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
+ 	struct srp_target_port *target = host_to_target(scmnd->device->host);
+ 	struct srp_request *req, *tmp;
  
- static int smbus_xfer(struct i2c_adapter *i2c_adap, u16 addr,
- 		      unsigned short flags, char read_write,
-@@ -140,9 +132,8 @@ static const struct i2c_algorithm i2c_sibyte_algo = {
- /*
-  * registering functions to load algorithms at runtime
-  */
--int i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
-+int __init i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
- {
--	int i;
- 	struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data;
+-	printk(KERN_ERR "SRP reset_device called\n");
++	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
  
- 	/* register new adapter to i2c module... */
-@@ -152,24 +143,6 @@ int i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
- 	csr_out32(speed, SMB_CSR(adap,R_SMB_FREQ));
- 	csr_out32(0, SMB_CSR(adap,R_SMB_CONTROL));
+ 	if (target->qp_in_error)
+ 		return FAILED;
+@@ -1389,7 +1417,7 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
+ 	struct srp_target_port *target = host_to_target(scmnd->device->host);
+ 	int ret = FAILED;
  
--	/* scan bus */
--	if (bit_scan) {
--		union i2c_smbus_data data;
--		int rc;
--		printk(KERN_INFO " i2c-algo-sibyte.o: scanning bus %s.\n",
--		       i2c_adap->name);
--		for (i = 0x00; i < 0x7f; i++) {
--			/* XXXKW is this a realistic probe? */
--			rc = smbus_xfer(i2c_adap, i, 0, I2C_SMBUS_READ, 0,
--					I2C_SMBUS_BYTE_DATA, &data);
--			if (!rc) {
--				printk("(%02x)",i);
--			} else
--				printk(".");
--		}
--		printk("\n");
--	}
--
- 	return i2c_add_adapter(i2c_adap);
- }
+-	printk(KERN_ERR PFX "SRP reset_host called\n");
++	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
  
-diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
-index 84df29d..c2a9f8c 100644
---- a/drivers/i2c/busses/i2c-stub.c
-+++ b/drivers/i2c/busses/i2c-stub.c
-@@ -1,8 +1,8 @@
- /*
--    i2c-stub.c - Part of lm_sensors, Linux kernel modules for hardware
--              monitoring
-+    i2c-stub.c - I2C/SMBus chip emulator
+ 	if (!srp_reconnect_target(target))
+ 		ret = SUCCESS;
+@@ -1814,8 +1842,9 @@ static ssize_t srp_create_target(struct class_device *class_dev,
+ 
+ 	ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
+ 
+-	printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
+-	       "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++	shost_printk(KERN_DEBUG, target->scsi_host, PFX
++		     "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
++		     "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ 	       (unsigned long long) be64_to_cpu(target->id_ext),
+ 	       (unsigned long long) be64_to_cpu(target->ioc_guid),
+ 	       be16_to_cpu(target->path.pkey),
+@@ -1842,7 +1871,8 @@ static ssize_t srp_create_target(struct class_device *class_dev,
+ 	target->qp_in_error = 0;
+ 	ret = srp_connect_target(target);
+ 	if (ret) {
+-		printk(KERN_ERR PFX "Connection failed\n");
++		shost_printk(KERN_ERR, target->scsi_host,
++			     PFX "Connection failed\n");
+ 		goto err_cm_id;
+ 	}
+ 
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
+index e3573e7..4a3c1f3 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.h
++++ b/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -79,6 +79,11 @@ enum srp_target_state {
+ 	SRP_TARGET_REMOVED
+ };
+ 
++enum srp_request_type {
++	SRP_REQ_NORMAL,
++	SRP_REQ_TASK_MGMT,
++};
++
+ struct srp_device {
+ 	struct list_head	dev_list;
+ 	struct ib_device       *dev;
+diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
+index 8991ab0..61cff83 100644
+--- a/drivers/input/mouse/pc110pad.c
++++ b/drivers/input/mouse/pc110pad.c
+@@ -39,6 +39,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
++#include <linux/delay.h>
+ 
+ #include <asm/io.h>
+ #include <asm/irq.h>
+@@ -62,8 +63,10 @@ static irqreturn_t pc110pad_interrupt(int irq, void *ptr)
+ 	int value     = inb_p(pc110pad_io);
+ 	int handshake = inb_p(pc110pad_io + 2);
+ 
+-	outb_p(handshake |  1, pc110pad_io + 2);
+-	outb_p(handshake & ~1, pc110pad_io + 2);
++	outb(handshake |  1, pc110pad_io + 2);
++	udelay(2);
++	outb(handshake & ~1, pc110pad_io + 2);
++	udelay(2);
+ 	inb_p(0x64);
+ 
+ 	pc110pad_data[pc110pad_count++] = value;
+diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
+index b1b2e07..99d92f5 100644
+--- a/drivers/input/touchscreen/corgi_ts.c
++++ b/drivers/input/touchscreen/corgi_ts.c
+@@ -74,10 +74,10 @@ extern unsigned int get_clk_frequency_khz(int info);
  
-     Copyright (c) 2004 Mark M. Hoffman <mhoffman at lightlink.com>
-+    Copyright (C) 2007 Jean Delvare <khali at linux-fr.org>
+ static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
+ {
+-	unsigned long hsync_len = corgi_ts->machinfo->get_hsync_len();
++	unsigned long hsync_invperiod = corgi_ts->machinfo->get_hsync_invperiod();
  
-     This program is free software; you can redistribute it and/or modify
-     it under the terms of the GNU General Public License as published by
-@@ -37,8 +37,8 @@ MODULE_PARM_DESC(chip_addr,
+-	if (hsync_len)
+-		return get_clk_frequency_khz(0)*1000/hsync_len;
++	if (hsync_invperiod)
++		return get_clk_frequency_khz(0)*1000/hsync_invperiod;
+ 	else
+ 		return 0;
+ }
+@@ -114,7 +114,7 @@ static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, i
+ 			if (timer2-timer1 > wait_time) {
+ 				/* too slow - timeout, try again */
+ 				corgi_ts->machinfo->wait_hsync();
+-				/* get OSCR */
++				/* get CCNT */
+ 				CCNT(timer1);
+ 				/* Wait after HSync */
+ 				CCNT(timer2);
+diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
+index f449dae..23ae66c 100644
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -1544,11 +1544,11 @@ static int __init capi_init(void)
+ 		return PTR_ERR(capi_class);
+ 	}
  
- struct stub_chip {
- 	u8 pointer;
--	u8 bytes[256];
--	u16 words[256];
-+	u16 words[256];		/* Byte operations use the LSB as per SMBus
-+				   specification */
- };
+-	class_device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi");
++	device_create(capi_class, NULL, MKDEV(capi_major, 0), "capi");
  
- static struct stub_chip *stub_chips;
-@@ -75,7 +75,7 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
- 					"wrote 0x%02x.\n",
- 					addr, command);
- 		} else {
--			data->byte = chip->bytes[chip->pointer++];
-+			data->byte = chip->words[chip->pointer++] & 0xff;
- 			dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, "
- 					"read  0x%02x.\n",
- 					addr, data->byte);
-@@ -86,12 +86,13 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
+ #ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
+ 	if (capinc_tty_init() < 0) {
+-		class_device_destroy(capi_class, MKDEV(capi_major, 0));
++		device_destroy(capi_class, MKDEV(capi_major, 0));
+ 		class_destroy(capi_class);
+ 		unregister_chrdev(capi_major, "capi20");
+ 		return -ENOMEM;
+@@ -1576,7 +1576,7 @@ static void __exit capi_exit(void)
+ {
+ 	proc_exit();
  
- 	case I2C_SMBUS_BYTE_DATA:
- 		if (read_write == I2C_SMBUS_WRITE) {
--			chip->bytes[command] = data->byte;
-+			chip->words[command] &= 0xff00;
-+			chip->words[command] |= data->byte;
- 			dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
- 					"wrote 0x%02x at 0x%02x.\n",
- 					addr, data->byte, command);
- 		} else {
--			data->byte = chip->bytes[command];
-+			data->byte = chip->words[command] & 0xff;
- 			dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
- 					"read  0x%02x at 0x%02x.\n",
- 					addr, data->byte, command);
-diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
-index c9ce77f..77b13d0 100644
---- a/drivers/i2c/busses/i2c-viapro.c
-+++ b/drivers/i2c/busses/i2c-viapro.c
-@@ -4,7 +4,7 @@
-     Copyright (c) 1998 - 2002  Frodo Looijaard <frodol at dds.nl>,
-     Philip Edelbrock <phil at netroedge.com>, Kyösti Mälkki <kmalkki at cc.hut.fi>,
-     Mark D. Studebaker <mdsxyz123 at yahoo.com>
--    Copyright (C) 2005 - 2007  Jean Delvare <khali at linux-fr.org>
-+    Copyright (C) 2005 - 2008  Jean Delvare <khali at linux-fr.org>
+-	class_device_destroy(capi_class, MKDEV(capi_major, 0));
++	device_destroy(capi_class, MKDEV(capi_major, 0));
+ 	class_destroy(capi_class);
+ 	unregister_chrdev(capi_major, "capi20");
  
-     This program is free software; you can redistribute it and/or modify
-     it under the terms of the GNU General Public License as published by
-@@ -35,6 +35,7 @@
-    VT8235             0x3177             yes
-    VT8237R            0x3227             yes
-    VT8237A            0x3337             yes
-+   VT8237S            0x3372             yes
-    VT8251             0x3287             yes
-    CX700              0x8324             yes
+diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
+index 48c1775..cb42b69 100644
+--- a/drivers/isdn/capi/capidrv.c
++++ b/drivers/isdn/capi/capidrv.c
+@@ -2332,13 +2332,14 @@ static int __init capidrv_init(void)
  
-@@ -318,6 +319,10 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
- 	unsigned char temp;
- 	int error = -ENODEV;
+ static void __exit capidrv_exit(void)
+ {
+-	char rev[10];
++	char rev[32];
+ 	char *p;
  
-+	/* driver_data might come from user-space, so check it */
-+	if (id->driver_data & 1 || id->driver_data > 0xff)
-+		return -EINVAL;
-+
- 	/* Determine the address of the SMBus areas */
- 	if (force_addr) {
- 		vt596_smba = force_addr & 0xfff0;
-@@ -389,6 +394,7 @@ found:
- 	case PCI_DEVICE_ID_VIA_8251:
- 	case PCI_DEVICE_ID_VIA_8237:
- 	case PCI_DEVICE_ID_VIA_8237A:
-+	case PCI_DEVICE_ID_VIA_8237S:
- 	case PCI_DEVICE_ID_VIA_8235:
- 	case PCI_DEVICE_ID_VIA_8233A:
- 	case PCI_DEVICE_ID_VIA_8233_0:
-@@ -440,6 +446,8 @@ static struct pci_device_id vt596_ids[] = {
- 	  .driver_data = SMBBA3 },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A),
- 	  .driver_data = SMBBA3 },
-+	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237S),
-+	  .driver_data = SMBBA3 },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4),
- 	  .driver_data = SMBBA1 },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8251),
-@@ -455,6 +463,7 @@ static struct pci_driver vt596_driver = {
- 	.name		= "vt596_smbus",
- 	.id_table	= vt596_ids,
- 	.probe		= vt596_probe,
-+	.dynids.use_driver_data = 1,
+ 	if ((p = strchr(revision, ':')) != 0) {
+-		strcpy(rev, p + 1);
+-		p = strchr(rev, '$');
+-		*p = 0;
++		strncpy(rev, p + 1, sizeof(rev));
++		rev[sizeof(rev)-1] = 0;
++		if ((p = strchr(rev, '$')) != 0)
++			*p = 0;
+ 	} else {
+ 		strcpy(rev, " ??? ");
+ 	}
+diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
+index a0317ab..02bdaf2 100644
+--- a/drivers/isdn/gigaset/gigaset.h
++++ b/drivers/isdn/gigaset/gigaset.h
+@@ -106,12 +106,6 @@ enum debuglevel {
+ 					 activated */
  };
  
- static int __init i2c_vt596_init(void)
-diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
-index 2e1c24f..bd7082c 100644
---- a/drivers/i2c/chips/Kconfig
-+++ b/drivers/i2c/chips/Kconfig
-@@ -4,32 +4,6 @@
- 
- menu "Miscellaneous I2C Chip support"
- 
--config SENSORS_DS1337
--	tristate "Dallas DS1337 and DS1339 Real Time Clock (DEPRECATED)"
--	depends on EXPERIMENTAL
--	help
--	  If you say yes here you get support for Dallas Semiconductor
--	  DS1337 and DS1339 real-time clock chips.
--
--	  This driver can also be built as a module.  If so, the module
--	  will be called ds1337.
+-/* missing from linux/device.h ... */
+-#ifndef dev_notice
+-#define dev_notice(dev, format, arg...)		\
+-	dev_printk(KERN_NOTICE , dev , format , ## arg)
+-#endif
 -
--	  This driver is deprecated and will be dropped soon. Use
--	  rtc-ds1307 instead.
+ /* Kernel message macros for situations where dev_printk and friends cannot be
+  * used for lack of reliable access to a device structure.
+  * linux/usb.h already contains these but in an obsolete form which clutters
+diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
+deleted file mode 100644
+index 6569206..0000000
+--- a/drivers/kvm/Kconfig
++++ /dev/null
+@@ -1,54 +0,0 @@
+-#
+-# KVM configuration
+-#
+-menuconfig VIRTUALIZATION
+-	bool "Virtualization"
+-	depends on X86
+-	default y
+-	---help---
+-	  Say Y here to get to see options for using your Linux host to run other
+-	  operating systems inside virtual machines (guests).
+-	  This option alone does not add any kernel code.
+-
+-	  If you say N, all options in this submenu will be skipped and disabled.
+-
+-if VIRTUALIZATION
+-
+-config KVM
+-	tristate "Kernel-based Virtual Machine (KVM) support"
+-	depends on X86 && EXPERIMENTAL
+-	select PREEMPT_NOTIFIERS
+-	select ANON_INODES
+-	---help---
+-	  Support hosting fully virtualized guest machines using hardware
+-	  virtualization extensions.  You will need a fairly recent
+-	  processor equipped with virtualization extensions. You will also
+-	  need to select one or more of the processor modules below.
 -
--config SENSORS_DS1374
--	tristate "Dallas DS1374 Real Time Clock (DEPRECATED)"
--	depends on EXPERIMENTAL
--	help
--	  If you say yes here you get support for Dallas Semiconductor
--	  DS1374 real-time clock chips.
+-	  This module provides access to the hardware capabilities through
+-	  a character device node named /dev/kvm.
 -
--	  This driver can also be built as a module.  If so, the module
--	  will be called ds1374.
+-	  To compile this as a module, choose M here: the module
+-	  will be called kvm.
 -
--	  This driver is deprecated and will be dropped soon. Use
--	  rtc-ds1374 instead.
+-	  If unsure, say N.
 -
- config DS1682
- 	tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
- 	depends on EXPERIMENTAL
-@@ -57,7 +31,7 @@ config SENSORS_PCF8574
- 	default n
- 	help
- 	  If you say yes here you get support for Philips PCF8574 and 
--	  PCF8574A chips.
-+	  PCF8574A chips. These chips are 8-bit I/O expanders for the I2C bus.
- 
- 	  This driver can also be built as a module.  If so, the module
- 	  will be called pcf8574.
-@@ -65,6 +39,20 @@ config SENSORS_PCF8574
- 	  These devices are hard to detect and rarely found on mainstream
- 	  hardware.  If unsure, say N.
- 
-+config PCF8575
-+	tristate "Philips PCF8575"
-+	default n
-+	help
-+	  If you say yes here you get support for Philips PCF8575 chip.
-+	  This chip is a 16-bit I/O expander for the I2C bus.  Several other
-+	  chip manufacturers sell equivalent chips, e.g. Texas Instruments.
-+
-+	  This driver can also be built as a module.  If so, the module
-+	  will be called pcf8575.
-+
-+	  This device is hard to detect and is rarely found on mainstream
-+	  hardware.  If unsure, say N.
-+
- config SENSORS_PCA9539
- 	tristate "Philips PCA9539 16-bit I/O port"
- 	depends on EXPERIMENTAL
-@@ -100,12 +88,8 @@ config ISP1301_OMAP
- 	  This driver can also be built as a module.  If so, the module
- 	  will be called isp1301_omap.
- 
--# NOTE:  This isn't really OMAP-specific, except for the current
--# interface location in  <include/asm-arm/arch-omap/tps65010.h>
--# and having mostly OMAP-specific board support
- config TPS65010
- 	tristate "TPS6501x Power Management chips"
--	depends on ARCH_OMAP
- 	default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK
- 	help
- 	  If you say yes here you get support for the TPS6501x series of
-@@ -116,18 +100,6 @@ config TPS65010
- 	  This driver can also be built as a module.  If so, the module
- 	  will be called tps65010.
- 
--config SENSORS_M41T00
--	tristate "ST M41T00 RTC chip (DEPRECATED)"
--	depends on PPC32
--	help
--	  If you say yes here you get support for the ST M41T00 RTC chip.
+-config KVM_INTEL
+-	tristate "KVM for Intel processors support"
+-	depends on KVM
+-	---help---
+-	  Provides support for KVM on Intel processors equipped with the VT
+-	  extensions.
 -
--	  This driver can also be built as a module.  If so, the module
--	  will be called m41t00.
+-config KVM_AMD
+-	tristate "KVM for AMD processors support"
+-	depends on KVM
+-	---help---
+-	  Provides support for KVM on AMD processors equipped with the AMD-V
+-	  (SVM) extensions.
 -
--	  This driver is deprecated and will be dropped soon. Use
--	  rtc-ds1307 or rtc-m41t80 instead.
+-# OK, it's a little counter-intuitive to do this, but it puts it neatly under
+-# the virtualization menu.
+-source drivers/lguest/Kconfig
 -
- config SENSORS_MAX6875
- 	tristate "Maxim MAX6875 Power supply supervisor"
- 	depends on EXPERIMENTAL
-diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
-index ca924e1..501f00c 100644
---- a/drivers/i2c/chips/Makefile
-+++ b/drivers/i2c/chips/Makefile
-@@ -2,14 +2,12 @@
- # Makefile for miscellaneous I2C chip drivers.
- #
- 
--obj-$(CONFIG_SENSORS_DS1337)	+= ds1337.o
--obj-$(CONFIG_SENSORS_DS1374)	+= ds1374.o
- obj-$(CONFIG_DS1682)		+= ds1682.o
- obj-$(CONFIG_SENSORS_EEPROM)	+= eeprom.o
- obj-$(CONFIG_SENSORS_MAX6875)	+= max6875.o
--obj-$(CONFIG_SENSORS_M41T00)	+= m41t00.o
- obj-$(CONFIG_SENSORS_PCA9539)	+= pca9539.o
- obj-$(CONFIG_SENSORS_PCF8574)	+= pcf8574.o
-+obj-$(CONFIG_PCF8575)		+= pcf8575.o
- obj-$(CONFIG_SENSORS_PCF8591)	+= pcf8591.o
- obj-$(CONFIG_ISP1301_OMAP)	+= isp1301_omap.o
- obj-$(CONFIG_TPS65010)		+= tps65010.o
-diff --git a/drivers/i2c/chips/ds1337.c b/drivers/i2c/chips/ds1337.c
+-endif # VIRTUALIZATION
+diff --git a/drivers/kvm/Makefile b/drivers/kvm/Makefile
 deleted file mode 100644
-index ec17d6b..0000000
---- a/drivers/i2c/chips/ds1337.c
+index e5a8f4d..0000000
+--- a/drivers/kvm/Makefile
 +++ /dev/null
-@@ -1,410 +0,0 @@
--/*
-- *  linux/drivers/i2c/chips/ds1337.c
-- *
-- *  Copyright (C) 2005 James Chapman <jchapman at katalix.com>
-- *
-- *	based on linux/drivers/acorn/char/pcf8583.c
-- *  Copyright (C) 2000 Russell King
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 as
-- * published by the Free Software Foundation.
-- *
-- * Driver for Dallas Semiconductor DS1337 and DS1339 real time clock chip
-- */
--
--#include <linux/module.h>
--#include <linux/init.h>
--#include <linux/slab.h>
--#include <linux/i2c.h>
--#include <linux/string.h>
--#include <linux/rtc.h>		/* get the user-level API */
--#include <linux/bcd.h>
--#include <linux/list.h>
--
--/* Device registers */
--#define DS1337_REG_HOUR		2
--#define DS1337_REG_DAY		3
--#define DS1337_REG_DATE		4
--#define DS1337_REG_MONTH	5
--#define DS1337_REG_CONTROL	14
--#define DS1337_REG_STATUS	15
+@@ -1,10 +0,0 @@
+-#
+-# Makefile for Kernel-based Virtual Machine module
+-#
 -
--/* FIXME - how do we export these interface constants? */
--#define DS1337_GET_DATE		0
--#define DS1337_SET_DATE		1
+-kvm-objs := kvm_main.o mmu.o x86_emulate.o i8259.o irq.o lapic.o ioapic.o
+-obj-$(CONFIG_KVM) += kvm.o
+-kvm-intel-objs = vmx.o
+-obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
+-kvm-amd-objs = svm.o
+-obj-$(CONFIG_KVM_AMD) += kvm-amd.o
+diff --git a/drivers/kvm/i8259.c b/drivers/kvm/i8259.c
+deleted file mode 100644
+index a679157..0000000
+--- a/drivers/kvm/i8259.c
++++ /dev/null
+@@ -1,450 +0,0 @@
+-/*
+- * 8259 interrupt controller emulation
+- *
+- * Copyright (c) 2003-2004 Fabrice Bellard
+- * Copyright (c) 2007 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a copy
+- * of this software and associated documentation files (the "Software"), to deal
+- * in the Software without restriction, including without limitation the rights
+- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+- * copies of the Software, and to permit persons to whom the Software is
+- * furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+- * THE SOFTWARE.
+- * Authors:
+- *   Yaozu (Eddie) Dong <Eddie.dong at intel.com>
+- *   Port from Qemu.
+- */
+-#include <linux/mm.h>
+-#include "irq.h"
 -
 -/*
-- * Functions declaration
+- * set irq level. If an edge is detected, then the IRR is set to 1
 - */
--static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
--
--I2C_CLIENT_INSMOD_1(ds1337);
--
--static int ds1337_attach_adapter(struct i2c_adapter *adapter);
--static int ds1337_detect(struct i2c_adapter *adapter, int address, int kind);
--static void ds1337_init_client(struct i2c_client *client);
--static int ds1337_detach_client(struct i2c_client *client);
--static int ds1337_command(struct i2c_client *client, unsigned int cmd,
--			  void *arg);
+-static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
+-{
+-	int mask;
+-	mask = 1 << irq;
+-	if (s->elcr & mask)	/* level triggered */
+-		if (level) {
+-			s->irr |= mask;
+-			s->last_irr |= mask;
+-		} else {
+-			s->irr &= ~mask;
+-			s->last_irr &= ~mask;
+-		}
+-	else	/* edge triggered */
+-		if (level) {
+-			if ((s->last_irr & mask) == 0)
+-				s->irr |= mask;
+-			s->last_irr |= mask;
+-		} else
+-			s->last_irr &= ~mask;
+-}
 -
 -/*
-- * Driver data (common to all clients)
+- * return the highest priority found in mask (highest = smallest
+- * number). Return 8 if no irq
 - */
--static struct i2c_driver ds1337_driver = {
--	.driver = {
--		.name	= "ds1337",
--	},
--	.attach_adapter	= ds1337_attach_adapter,
--	.detach_client	= ds1337_detach_client,
--	.command	= ds1337_command,
--};
+-static inline int get_priority(struct kvm_kpic_state *s, int mask)
+-{
+-	int priority;
+-	if (mask == 0)
+-		return 8;
+-	priority = 0;
+-	while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
+-		priority++;
+-	return priority;
+-}
 -
 -/*
-- * Client data (each client gets its own)
+- * return the pic wanted interrupt. return -1 if none
 - */
--struct ds1337_data {
--	struct i2c_client client;
--	struct list_head list;
--};
+-static int pic_get_irq(struct kvm_kpic_state *s)
+-{
+-	int mask, cur_priority, priority;
+-
+-	mask = s->irr & ~s->imr;
+-	priority = get_priority(s, mask);
+-	if (priority == 8)
+-		return -1;
+-	/*
+-	 * compute current priority. If special fully nested mode on the
+-	 * master, the IRQ coming from the slave is not taken into account
+-	 * for the priority computation.
+-	 */
+-	mask = s->isr;
+-	if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
+-		mask &= ~(1 << 2);
+-	cur_priority = get_priority(s, mask);
+-	if (priority < cur_priority)
+-		/*
+-		 * higher priority found: an irq should be generated
+-		 */
+-		return (priority + s->priority_add) & 7;
+-	else
+-		return -1;
+-}
 -
 -/*
-- * Internal variables
+- * raise irq to CPU if necessary. must be called every time the active
+- * irq may change
 - */
--static LIST_HEAD(ds1337_clients);
--
--static inline int ds1337_read(struct i2c_client *client, u8 reg, u8 *value)
+-static void pic_update_irq(struct kvm_pic *s)
 -{
--	s32 tmp = i2c_smbus_read_byte_data(client, reg);
+-	int irq2, irq;
 -
--	if (tmp < 0)
--		return -EIO;
+-	irq2 = pic_get_irq(&s->pics[1]);
+-	if (irq2 >= 0) {
+-		/*
+-		 * if irq request by slave pic, signal master PIC
+-		 */
+-		pic_set_irq1(&s->pics[0], 2, 1);
+-		pic_set_irq1(&s->pics[0], 2, 0);
+-	}
+-	irq = pic_get_irq(&s->pics[0]);
+-	if (irq >= 0)
+-		s->irq_request(s->irq_request_opaque, 1);
+-	else
+-		s->irq_request(s->irq_request_opaque, 0);
+-}
 -
--	*value = tmp;
+-void kvm_pic_update_irq(struct kvm_pic *s)
+-{
+-	pic_update_irq(s);
+-}
 -
--	return 0;
+-void kvm_pic_set_irq(void *opaque, int irq, int level)
+-{
+-	struct kvm_pic *s = opaque;
+-
+-	pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
+-	pic_update_irq(s);
 -}
 -
 -/*
-- * Chip access functions
+- * acknowledge interrupt 'irq'
 - */
--static int ds1337_get_datetime(struct i2c_client *client, struct rtc_time *dt)
+-static inline void pic_intack(struct kvm_kpic_state *s, int irq)
 -{
--	int result;
--	u8 buf[7];
--	u8 val;
--	struct i2c_msg msg[2];
--	u8 offs = 0;
+-	if (s->auto_eoi) {
+-		if (s->rotate_on_auto_eoi)
+-			s->priority_add = (irq + 1) & 7;
+-	} else
+-		s->isr |= (1 << irq);
+-	/*
+-	 * We don't clear a level sensitive interrupt here
+-	 */
+-	if (!(s->elcr & (1 << irq)))
+-		s->irr &= ~(1 << irq);
+-}
 -
--	if (!dt) {
--		dev_dbg(&client->dev, "%s: EINVAL: dt=NULL\n", __FUNCTION__);
--		return -EINVAL;
+-int kvm_pic_read_irq(struct kvm_pic *s)
+-{
+-	int irq, irq2, intno;
+-
+-	irq = pic_get_irq(&s->pics[0]);
+-	if (irq >= 0) {
+-		pic_intack(&s->pics[0], irq);
+-		if (irq == 2) {
+-			irq2 = pic_get_irq(&s->pics[1]);
+-			if (irq2 >= 0)
+-				pic_intack(&s->pics[1], irq2);
+-			else
+-				/*
+-				 * spurious IRQ on slave controller
+-				 */
+-				irq2 = 7;
+-			intno = s->pics[1].irq_base + irq2;
+-			irq = irq2 + 8;
+-		} else
+-			intno = s->pics[0].irq_base + irq;
+-	} else {
+-		/*
+-		 * spurious IRQ on host controller
+-		 */
+-		irq = 7;
+-		intno = s->pics[0].irq_base + irq;
 -	}
+-	pic_update_irq(s);
 -
--	msg[0].addr = client->addr;
--	msg[0].flags = 0;
--	msg[0].len = 1;
--	msg[0].buf = &offs;
+-	return intno;
+-}
 -
--	msg[1].addr = client->addr;
--	msg[1].flags = I2C_M_RD;
--	msg[1].len = sizeof(buf);
--	msg[1].buf = &buf[0];
+-static void pic_reset(void *opaque)
+-{
+-	struct kvm_kpic_state *s = opaque;
 -
--	result = i2c_transfer(client->adapter, msg, 2);
+-	s->last_irr = 0;
+-	s->irr = 0;
+-	s->imr = 0;
+-	s->isr = 0;
+-	s->priority_add = 0;
+-	s->irq_base = 0;
+-	s->read_reg_select = 0;
+-	s->poll = 0;
+-	s->special_mask = 0;
+-	s->init_state = 0;
+-	s->auto_eoi = 0;
+-	s->rotate_on_auto_eoi = 0;
+-	s->special_fully_nested_mode = 0;
+-	s->init4 = 0;
+-}
 -
--	dev_dbg(&client->dev, "%s: [%d] %02x %02x %02x %02x %02x %02x %02x\n",
--		__FUNCTION__, result, buf[0], buf[1], buf[2], buf[3],
--		buf[4], buf[5], buf[6]);
+-static void pic_ioport_write(void *opaque, u32 addr, u32 val)
+-{
+-	struct kvm_kpic_state *s = opaque;
+-	int priority, cmd, irq;
 -
--	if (result == 2) {
--		dt->tm_sec = BCD2BIN(buf[0]);
--		dt->tm_min = BCD2BIN(buf[1]);
--		val = buf[2] & 0x3f;
--		dt->tm_hour = BCD2BIN(val);
--		dt->tm_wday = BCD2BIN(buf[3]) - 1;
--		dt->tm_mday = BCD2BIN(buf[4]);
--		val = buf[5] & 0x7f;
--		dt->tm_mon = BCD2BIN(val) - 1;
--		dt->tm_year = BCD2BIN(buf[6]);
--		if (buf[5] & 0x80)
--			dt->tm_year += 100;
+-	addr &= 1;
+-	if (addr == 0) {
+-		if (val & 0x10) {
+-			pic_reset(s);	/* init */
+-			/*
+-			 * deassert a pending interrupt
+-			 */
+-			s->pics_state->irq_request(s->pics_state->
+-						   irq_request_opaque, 0);
+-			s->init_state = 1;
+-			s->init4 = val & 1;
+-			if (val & 0x02)
+-				printk(KERN_ERR "single mode not supported");
+-			if (val & 0x08)
+-				printk(KERN_ERR
+-				       "level sensitive irq not supported");
+-		} else if (val & 0x08) {
+-			if (val & 0x04)
+-				s->poll = 1;
+-			if (val & 0x02)
+-				s->read_reg_select = val & 1;
+-			if (val & 0x40)
+-				s->special_mask = (val >> 5) & 1;
+-		} else {
+-			cmd = val >> 5;
+-			switch (cmd) {
+-			case 0:
+-			case 4:
+-				s->rotate_on_auto_eoi = cmd >> 2;
+-				break;
+-			case 1:	/* end of interrupt */
+-			case 5:
+-				priority = get_priority(s, s->isr);
+-				if (priority != 8) {
+-					irq = (priority + s->priority_add) & 7;
+-					s->isr &= ~(1 << irq);
+-					if (cmd == 5)
+-						s->priority_add = (irq + 1) & 7;
+-					pic_update_irq(s->pics_state);
+-				}
+-				break;
+-			case 3:
+-				irq = val & 7;
+-				s->isr &= ~(1 << irq);
+-				pic_update_irq(s->pics_state);
+-				break;
+-			case 6:
+-				s->priority_add = (val + 1) & 7;
+-				pic_update_irq(s->pics_state);
+-				break;
+-			case 7:
+-				irq = val & 7;
+-				s->isr &= ~(1 << irq);
+-				s->priority_add = (irq + 1) & 7;
+-				pic_update_irq(s->pics_state);
+-				break;
+-			default:
+-				break;	/* no operation */
+-			}
+-		}
+-	} else
+-		switch (s->init_state) {
+-		case 0:		/* normal mode */
+-			s->imr = val;
+-			pic_update_irq(s->pics_state);
+-			break;
+-		case 1:
+-			s->irq_base = val & 0xf8;
+-			s->init_state = 2;
+-			break;
+-		case 2:
+-			if (s->init4)
+-				s->init_state = 3;
+-			else
+-				s->init_state = 0;
+-			break;
+-		case 3:
+-			s->special_fully_nested_mode = (val >> 4) & 1;
+-			s->auto_eoi = (val >> 1) & 1;
+-			s->init_state = 0;
+-			break;
+-		}
+-}
 -
--		dev_dbg(&client->dev, "%s: secs=%d, mins=%d, "
--			"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
--			__FUNCTION__, dt->tm_sec, dt->tm_min,
--			dt->tm_hour, dt->tm_mday,
--			dt->tm_mon, dt->tm_year, dt->tm_wday);
+-static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
+-{
+-	int ret;
 -
--		return 0;
+-	ret = pic_get_irq(s);
+-	if (ret >= 0) {
+-		if (addr1 >> 7) {
+-			s->pics_state->pics[0].isr &= ~(1 << 2);
+-			s->pics_state->pics[0].irr &= ~(1 << 2);
+-		}
+-		s->irr &= ~(1 << ret);
+-		s->isr &= ~(1 << ret);
+-		if (addr1 >> 7 || ret != 2)
+-			pic_update_irq(s->pics_state);
+-	} else {
+-		ret = 0x07;
+-		pic_update_irq(s->pics_state);
 -	}
 -
--	dev_err(&client->dev, "error reading data! %d\n", result);
--	return -EIO;
+-	return ret;
 -}
 -
--static int ds1337_set_datetime(struct i2c_client *client, struct rtc_time *dt)
+-static u32 pic_ioport_read(void *opaque, u32 addr1)
 -{
--	int result;
--	u8 buf[8];
--	u8 val;
--	struct i2c_msg msg[1];
--
--	if (!dt) {
--		dev_dbg(&client->dev, "%s: EINVAL: dt=NULL\n", __FUNCTION__);
--		return -EINVAL;
--	}
+-	struct kvm_kpic_state *s = opaque;
+-	unsigned int addr;
+-	int ret;
 -
--	dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
--		"mday=%d, mon=%d, year=%d, wday=%d\n", __FUNCTION__,
--		dt->tm_sec, dt->tm_min, dt->tm_hour,
--		dt->tm_mday, dt->tm_mon, dt->tm_year, dt->tm_wday);
+-	addr = addr1;
+-	addr &= 1;
+-	if (s->poll) {
+-		ret = pic_poll_read(s, addr1);
+-		s->poll = 0;
+-	} else
+-		if (addr == 0)
+-			if (s->read_reg_select)
+-				ret = s->isr;
+-			else
+-				ret = s->irr;
+-		else
+-			ret = s->imr;
+-	return ret;
+-}
 -
--	buf[0] = 0;		/* reg offset */
--	buf[1] = BIN2BCD(dt->tm_sec);
--	buf[2] = BIN2BCD(dt->tm_min);
--	buf[3] = BIN2BCD(dt->tm_hour);
--	buf[4] = BIN2BCD(dt->tm_wday + 1);
--	buf[5] = BIN2BCD(dt->tm_mday);
--	buf[6] = BIN2BCD(dt->tm_mon + 1);
--	val = dt->tm_year;
--	if (val >= 100) {
--		val -= 100;
--		buf[6] |= (1 << 7);
--	}
--	buf[7] = BIN2BCD(val);
+-static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
+-{
+-	struct kvm_kpic_state *s = opaque;
+-	s->elcr = val & s->elcr_mask;
+-}
 -
--	msg[0].addr = client->addr;
--	msg[0].flags = 0;
--	msg[0].len = sizeof(buf);
--	msg[0].buf = &buf[0];
+-static u32 elcr_ioport_read(void *opaque, u32 addr1)
+-{
+-	struct kvm_kpic_state *s = opaque;
+-	return s->elcr;
+-}
 -
--	result = i2c_transfer(client->adapter, msg, 1);
--	if (result == 1)
+-static int picdev_in_range(struct kvm_io_device *this, gpa_t addr)
+-{
+-	switch (addr) {
+-	case 0x20:
+-	case 0x21:
+-	case 0xa0:
+-	case 0xa1:
+-	case 0x4d0:
+-	case 0x4d1:
+-		return 1;
+-	default:
 -		return 0;
--
--	dev_err(&client->dev, "error writing data! %d\n", result);
--	return -EIO;
+-	}
 -}
 -
--static int ds1337_command(struct i2c_client *client, unsigned int cmd,
--			  void *arg)
+-static void picdev_write(struct kvm_io_device *this,
+-			 gpa_t addr, int len, const void *val)
 -{
--	dev_dbg(&client->dev, "%s: cmd=%d\n", __FUNCTION__, cmd);
+-	struct kvm_pic *s = this->private;
+-	unsigned char data = *(unsigned char *)val;
 -
--	switch (cmd) {
--	case DS1337_GET_DATE:
--		return ds1337_get_datetime(client, arg);
+-	if (len != 1) {
+-		if (printk_ratelimit())
+-			printk(KERN_ERR "PIC: non byte write\n");
+-		return;
+-	}
+-	switch (addr) {
+-	case 0x20:
+-	case 0x21:
+-	case 0xa0:
+-	case 0xa1:
+-		pic_ioport_write(&s->pics[addr >> 7], addr, data);
+-		break;
+-	case 0x4d0:
+-	case 0x4d1:
+-		elcr_ioport_write(&s->pics[addr & 1], addr, data);
+-		break;
+-	}
+-}
 -
--	case DS1337_SET_DATE:
--		return ds1337_set_datetime(client, arg);
+-static void picdev_read(struct kvm_io_device *this,
+-			gpa_t addr, int len, void *val)
+-{
+-	struct kvm_pic *s = this->private;
+-	unsigned char data = 0;
 -
--	default:
--		return -EINVAL;
+-	if (len != 1) {
+-		if (printk_ratelimit())
+-			printk(KERN_ERR "PIC: non byte read\n");
+-		return;
+-	}
+-	switch (addr) {
+-	case 0x20:
+-	case 0x21:
+-	case 0xa0:
+-	case 0xa1:
+-		data = pic_ioport_read(&s->pics[addr >> 7], addr);
+-		break;
+-	case 0x4d0:
+-	case 0x4d1:
+-		data = elcr_ioport_read(&s->pics[addr & 1], addr);
+-		break;
 -	}
+-	*(unsigned char *)val = data;
 -}
 -
 -/*
-- * Public API for access to specific device. Useful for low-level
-- * RTC access from kernel code.
+- * callback when PIC0 irq status changed
 - */
--int ds1337_do_command(int bus, int cmd, void *arg)
+-static void pic_irq_request(void *opaque, int level)
 -{
--	struct list_head *walk;
--	struct list_head *tmp;
--	struct ds1337_data *data;
--
--	list_for_each_safe(walk, tmp, &ds1337_clients) {
--		data = list_entry(walk, struct ds1337_data, list);
--		if (data->client.adapter->nr == bus)
--			return ds1337_command(&data->client, cmd, arg);
--	}
+-	struct kvm *kvm = opaque;
+-	struct kvm_vcpu *vcpu = kvm->vcpus[0];
 -
--	return -ENODEV;
+-	pic_irqchip(kvm)->output = level;
+-	if (vcpu)
+-		kvm_vcpu_kick(vcpu);
 -}
 -
--static int ds1337_attach_adapter(struct i2c_adapter *adapter)
+-struct kvm_pic *kvm_create_pic(struct kvm *kvm)
 -{
--	return i2c_probe(adapter, &addr_data, ds1337_detect);
--}
+-	struct kvm_pic *s;
+-	s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
+-	if (!s)
+-		return NULL;
+-	s->pics[0].elcr_mask = 0xf8;
+-	s->pics[1].elcr_mask = 0xde;
+-	s->irq_request = pic_irq_request;
+-	s->irq_request_opaque = kvm;
+-	s->pics[0].pics_state = s;
+-	s->pics[1].pics_state = s;
 -
+-	/*
+-	 * Initialize PIO device
+-	 */
+-	s->dev.read = picdev_read;
+-	s->dev.write = picdev_write;
+-	s->dev.in_range = picdev_in_range;
+-	s->dev.private = s;
+-	kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev);
+-	return s;
+-}
+diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
+deleted file mode 100644
+index c7992e6..0000000
+--- a/drivers/kvm/ioapic.c
++++ /dev/null
+@@ -1,388 +0,0 @@
 -/*
-- * The following function does more than just detection. If detection
-- * succeeds, it also registers the new chip.
+- *  Copyright (C) 2001  MandrakeSoft S.A.
+- *
+- *    MandrakeSoft S.A.
+- *    43, rue d'Aboukir
+- *    75002 Paris - France
+- *    http://www.linux-mandrake.com/
+- *    http://www.mandrakesoft.com/
+- *
+- *  This library is free software; you can redistribute it and/or
+- *  modify it under the terms of the GNU Lesser General Public
+- *  License as published by the Free Software Foundation; either
+- *  version 2 of the License, or (at your option) any later version.
+- *
+- *  This library is distributed in the hope that it will be useful,
+- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+- *  Lesser General Public License for more details.
+- *
+- *  You should have received a copy of the GNU Lesser General Public
+- *  License along with this library; if not, write to the Free Software
+- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+- *
+- *  Yunhong Jiang <yunhong.jiang at intel.com>
+- *  Yaozu (Eddie) Dong <eddie.dong at intel.com>
+- *  Based on Xen 3.1 code.
 - */
--static int ds1337_detect(struct i2c_adapter *adapter, int address, int kind)
--{
--	struct i2c_client *new_client;
--	struct ds1337_data *data;
--	int err = 0;
--	const char *name = "";
 -
--	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
--				     I2C_FUNC_I2C))
--		goto exit;
+-#include "kvm.h"
+-#include <linux/kvm.h>
+-#include <linux/mm.h>
+-#include <linux/highmem.h>
+-#include <linux/smp.h>
+-#include <linux/hrtimer.h>
+-#include <linux/io.h>
+-#include <asm/processor.h>
+-#include <asm/msr.h>
+-#include <asm/page.h>
+-#include <asm/current.h>
+-#include <asm/apicdef.h>
+-#include <asm/io_apic.h>
+-#include "irq.h"
+-/* #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define ioapic_debug(fmt, arg...)
+-static void ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
+-
+-static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
+-					  unsigned long addr,
+-					  unsigned long length)
+-{
+-	unsigned long result = 0;
 -
--	if (!(data = kzalloc(sizeof(struct ds1337_data), GFP_KERNEL))) {
--		err = -ENOMEM;
--		goto exit;
--	}
--	INIT_LIST_HEAD(&data->list);
+-	switch (ioapic->ioregsel) {
+-	case IOAPIC_REG_VERSION:
+-		result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
+-			  | (IOAPIC_VERSION_ID & 0xff));
+-		break;
 -
--	/* The common I2C client data is placed right before the
--	 * DS1337-specific data. 
--	 */
--	new_client = &data->client;
--	i2c_set_clientdata(new_client, data);
--	new_client->addr = address;
--	new_client->adapter = adapter;
--	new_client->driver = &ds1337_driver;
--	new_client->flags = 0;
+-	case IOAPIC_REG_APIC_ID:
+-	case IOAPIC_REG_ARB_ID:
+-		result = ((ioapic->id & 0xf) << 24);
+-		break;
 -
--	/*
--	 * Now we do the remaining detection. A negative kind means that
--	 * the driver was loaded with no force parameter (default), so we
--	 * must both detect and identify the chip. A zero kind means that
--	 * the driver was loaded with the force parameter, the detection
--	 * step shall be skipped. A positive kind means that the driver
--	 * was loaded with the force parameter and a given kind of chip is
--	 * requested, so both the detection and the identification steps
--	 * are skipped.
--	 *
--	 * For detection, we read registers that are most likely to cause
--	 * detection failure, i.e. those that have more bits with fixed
--	 * or reserved values.
--	 */
+-	default:
+-		{
+-			u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
+-			u64 redir_content;
 -
--	/* Default to an DS1337 if forced */
--	if (kind == 0)
--		kind = ds1337;
+-			ASSERT(redir_index < IOAPIC_NUM_PINS);
 -
--	if (kind < 0) {		/* detection and identification */
--		u8 data;
+-			redir_content = ioapic->redirtbl[redir_index].bits;
+-			result = (ioapic->ioregsel & 0x1) ?
+-			    (redir_content >> 32) & 0xffffffff :
+-			    redir_content & 0xffffffff;
+-			break;
+-		}
+-	}
 -
--		/* Check that status register bits 6-2 are zero */
--		if ((ds1337_read(new_client, DS1337_REG_STATUS, &data) < 0) ||
--		    (data & 0x7c))
--			goto exit_free;
+-	return result;
+-}
 -
--		/* Check for a valid day register value */
--		if ((ds1337_read(new_client, DS1337_REG_DAY, &data) < 0) ||
--		    (data == 0) || (data & 0xf8))
--			goto exit_free;
+-static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
+-{
+-	union ioapic_redir_entry *pent;
 -
--		/* Check for a valid date register value */
--		if ((ds1337_read(new_client, DS1337_REG_DATE, &data) < 0) ||
--		    (data == 0) || (data & 0xc0) || ((data & 0x0f) > 9) ||
--		    (data >= 0x32))
--			goto exit_free;
+-	pent = &ioapic->redirtbl[idx];
 -
--		/* Check for a valid month register value */
--		if ((ds1337_read(new_client, DS1337_REG_MONTH, &data) < 0) ||
--		    (data == 0) || (data & 0x60) || ((data & 0x0f) > 9) ||
--		    ((data >= 0x13) && (data <= 0x19)))
--			goto exit_free;
+-	if (!pent->fields.mask) {
+-		ioapic_deliver(ioapic, idx);
+-		if (pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
+-			pent->fields.remote_irr = 1;
+-	}
+-	if (!pent->fields.trig_mode)
+-		ioapic->irr &= ~(1 << idx);
+-}
 -
--		/* Check that control register bits 6-5 are zero */
--		if ((ds1337_read(new_client, DS1337_REG_CONTROL, &data) < 0) ||
--		    (data & 0x60))
--			goto exit_free;
+-static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
+-{
+-	unsigned index;
 -
--		kind = ds1337;
--	}
+-	switch (ioapic->ioregsel) {
+-	case IOAPIC_REG_VERSION:
+-		/* Writes are ignored. */
+-		break;
 -
--	if (kind == ds1337)
--		name = "ds1337";
+-	case IOAPIC_REG_APIC_ID:
+-		ioapic->id = (val >> 24) & 0xf;
+-		break;
 -
--	/* We can fill in the remaining client fields */
--	strlcpy(new_client->name, name, I2C_NAME_SIZE);
+-	case IOAPIC_REG_ARB_ID:
+-		break;
 -
--	/* Tell the I2C layer a new client has arrived */
--	if ((err = i2c_attach_client(new_client)))
--		goto exit_free;
+-	default:
+-		index = (ioapic->ioregsel - 0x10) >> 1;
 -
--	/* Initialize the DS1337 chip */
--	ds1337_init_client(new_client);
+-		ioapic_debug("change redir index %x val %x", index, val);
+-		if (index >= IOAPIC_NUM_PINS)
+-			return;
+-		if (ioapic->ioregsel & 1) {
+-			ioapic->redirtbl[index].bits &= 0xffffffff;
+-			ioapic->redirtbl[index].bits |= (u64) val << 32;
+-		} else {
+-			ioapic->redirtbl[index].bits &= ~0xffffffffULL;
+-			ioapic->redirtbl[index].bits |= (u32) val;
+-			ioapic->redirtbl[index].fields.remote_irr = 0;
+-		}
+-		if (ioapic->irr & (1 << index))
+-			ioapic_service(ioapic, index);
+-		break;
+-	}
+-}
 -
--	/* Add client to local list */
--	list_add(&data->list, &ds1337_clients);
+-static void ioapic_inj_irq(struct kvm_ioapic *ioapic,
+-			   struct kvm_lapic *target,
+-			   u8 vector, u8 trig_mode, u8 delivery_mode)
+-{
+-	ioapic_debug("irq %d trig %d deliv %d", vector, trig_mode,
+-		     delivery_mode);
 -
--	return 0;
+-	ASSERT((delivery_mode == dest_Fixed) ||
+-	       (delivery_mode == dest_LowestPrio));
 -
--exit_free:
--	kfree(data);
--exit:
--	return err;
+-	kvm_apic_set_irq(target, vector, trig_mode);
 -}
 -
--static void ds1337_init_client(struct i2c_client *client)
+-static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
+-				       u8 dest_mode)
 -{
--	u8 status, control;
--
--	/* On some boards, the RTC isn't configured by boot firmware.
--	 * Handle that case by starting/configuring the RTC now.
--	 */
--	status = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
--	control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+-	u32 mask = 0;
+-	int i;
+-	struct kvm *kvm = ioapic->kvm;
+-	struct kvm_vcpu *vcpu;
 -
--	if ((status & 0x80) || (control & 0x80)) {
--		/* RTC not running */
--		u8 buf[1+16];	/* First byte is interpreted as address */
--		struct i2c_msg msg[1];
+-	ioapic_debug("dest %d dest_mode %d", dest, dest_mode);
 -
--		dev_dbg(&client->dev, "%s: RTC not running!\n", __FUNCTION__);
+-	if (dest_mode == 0) {	/* Physical mode. */
+-		if (dest == 0xFF) {	/* Broadcast. */
+-			for (i = 0; i < KVM_MAX_VCPUS; ++i)
+-				if (kvm->vcpus[i] && kvm->vcpus[i]->apic)
+-					mask |= 1 << i;
+-			return mask;
+-		}
+-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+-			vcpu = kvm->vcpus[i];
+-			if (!vcpu)
+-				continue;
+-			if (kvm_apic_match_physical_addr(vcpu->apic, dest)) {
+-				if (vcpu->apic)
+-					mask = 1 << i;
+-				break;
+-			}
+-		}
+-	} else if (dest != 0)	/* Logical mode, MDA non-zero. */
+-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+-			vcpu = kvm->vcpus[i];
+-			if (!vcpu)
+-				continue;
+-			if (vcpu->apic &&
+-			    kvm_apic_match_logical_addr(vcpu->apic, dest))
+-				mask |= 1 << vcpu->vcpu_id;
+-		}
+-	ioapic_debug("mask %x", mask);
+-	return mask;
+-}
 -
--		/* Initialize all, including STATUS and CONTROL to zero */
--		memset(buf, 0, sizeof(buf));
+-static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
+-{
+-	u8 dest = ioapic->redirtbl[irq].fields.dest_id;
+-	u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
+-	u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode;
+-	u8 vector = ioapic->redirtbl[irq].fields.vector;
+-	u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
+-	u32 deliver_bitmask;
+-	struct kvm_lapic *target;
+-	struct kvm_vcpu *vcpu;
+-	int vcpu_id;
+-
+-	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
+-		     "vector=%x trig_mode=%x",
+-		     dest, dest_mode, delivery_mode, vector, trig_mode);
+-
+-	deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode);
+-	if (!deliver_bitmask) {
+-		ioapic_debug("no target on destination");
+-		return;
+-	}
 -
--		/* Write valid values in the date/time registers */
--		buf[1+DS1337_REG_DAY] = 1;
--		buf[1+DS1337_REG_DATE] = 1;
--		buf[1+DS1337_REG_MONTH] = 1;
+-	switch (delivery_mode) {
+-	case dest_LowestPrio:
+-		target =
+-		    kvm_apic_round_robin(ioapic->kvm, vector, deliver_bitmask);
+-		if (target != NULL)
+-			ioapic_inj_irq(ioapic, target, vector,
+-				       trig_mode, delivery_mode);
+-		else
+-			ioapic_debug("null round robin: "
+-				     "mask=%x vector=%x delivery_mode=%x",
+-				     deliver_bitmask, vector, dest_LowestPrio);
+-		break;
+-	case dest_Fixed:
+-		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+-			if (!(deliver_bitmask & (1 << vcpu_id)))
+-				continue;
+-			deliver_bitmask &= ~(1 << vcpu_id);
+-			vcpu = ioapic->kvm->vcpus[vcpu_id];
+-			if (vcpu) {
+-				target = vcpu->apic;
+-				ioapic_inj_irq(ioapic, target, vector,
+-					       trig_mode, delivery_mode);
+-			}
+-		}
+-		break;
 -
--		msg[0].addr = client->addr;
--		msg[0].flags = 0;
--		msg[0].len = sizeof(buf);
--		msg[0].buf = &buf[0];
+-		/* TODO: NMI */
+-	default:
+-		printk(KERN_WARNING "Unsupported delivery mode %d\n",
+-		       delivery_mode);
+-		break;
+-	}
+-}
 -
--		i2c_transfer(client->adapter, msg, 1);
--	} else {
--		/* Running: ensure that device is set in 24-hour mode */
--		s32 val;
+-void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
+-{
+-	u32 old_irr = ioapic->irr;
+-	u32 mask = 1 << irq;
+-	union ioapic_redir_entry entry;
 -
--		val = i2c_smbus_read_byte_data(client, DS1337_REG_HOUR);
--		if ((val >= 0) && (val & (1 << 6)))
--			i2c_smbus_write_byte_data(client, DS1337_REG_HOUR,
--						  val & 0x3f);
+-	if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
+-		entry = ioapic->redirtbl[irq];
+-		level ^= entry.fields.polarity;
+-		if (!level)
+-			ioapic->irr &= ~mask;
+-		else {
+-			ioapic->irr |= mask;
+-			if ((!entry.fields.trig_mode && old_irr != ioapic->irr)
+-			    || !entry.fields.remote_irr)
+-				ioapic_service(ioapic, irq);
+-		}
 -	}
 -}
 -
--static int ds1337_detach_client(struct i2c_client *client)
+-static int get_eoi_gsi(struct kvm_ioapic *ioapic, int vector)
 -{
--	int err;
--	struct ds1337_data *data = i2c_get_clientdata(client);
+-	int i;
 -
--	if ((err = i2c_detach_client(client)))
--		return err;
+-	for (i = 0; i < IOAPIC_NUM_PINS; i++)
+-		if (ioapic->redirtbl[i].fields.vector == vector)
+-			return i;
+-	return -1;
+-}
 -
--	list_del(&data->list);
--	kfree(data);
--	return 0;
+-void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
+-{
+-	struct kvm_ioapic *ioapic = kvm->vioapic;
+-	union ioapic_redir_entry *ent;
+-	int gsi;
+-
+-	gsi = get_eoi_gsi(ioapic, vector);
+-	if (gsi == -1) {
+-		printk(KERN_WARNING "Can't find redir item for %d EOI\n",
+-		       vector);
+-		return;
+-	}
+-
+-	ent = &ioapic->redirtbl[gsi];
+-	ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
+-
+-	ent->fields.remote_irr = 0;
+-	if (!ent->fields.mask && (ioapic->irr & (1 << gsi)))
+-		ioapic_deliver(ioapic, gsi);
 -}
 -
--static int __init ds1337_init(void)
+-static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr)
 -{
--	return i2c_add_driver(&ds1337_driver);
+-	struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
+-
+-	return ((addr >= ioapic->base_address &&
+-		 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
 -}
 -
--static void __exit ds1337_exit(void)
+-static void ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
+-			     void *val)
 -{
--	i2c_del_driver(&ds1337_driver);
+-	struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
+-	u32 result;
+-
+-	ioapic_debug("addr %lx", (unsigned long)addr);
+-	ASSERT(!(addr & 0xf));	/* check alignment */
+-
+-	addr &= 0xff;
+-	switch (addr) {
+-	case IOAPIC_REG_SELECT:
+-		result = ioapic->ioregsel;
+-		break;
+-
+-	case IOAPIC_REG_WINDOW:
+-		result = ioapic_read_indirect(ioapic, addr, len);
+-		break;
+-
+-	default:
+-		result = 0;
+-		break;
+-	}
+-	switch (len) {
+-	case 8:
+-		*(u64 *) val = result;
+-		break;
+-	case 1:
+-	case 2:
+-	case 4:
+-		memcpy(val, (char *)&result, len);
+-		break;
+-	default:
+-		printk(KERN_WARNING "ioapic: wrong length %d\n", len);
+-	}
 -}
 -
--MODULE_AUTHOR("James Chapman <jchapman at katalix.com>");
--MODULE_DESCRIPTION("DS1337 RTC driver");
--MODULE_LICENSE("GPL");
+-static void ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
+-			      const void *val)
+-{
+-	struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
+-	u32 data;
 -
--EXPORT_SYMBOL_GPL(ds1337_do_command);
+-	ioapic_debug("ioapic_mmio_write addr=%lx len=%d val=%p\n",
+-		     addr, len, val);
+-	ASSERT(!(addr & 0xf));	/* check alignment */
+-	if (len == 4 || len == 8)
+-		data = *(u32 *) val;
+-	else {
+-		printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
+-		return;
+-	}
 -
--module_init(ds1337_init);
--module_exit(ds1337_exit);
-diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
+-	addr &= 0xff;
+-	switch (addr) {
+-	case IOAPIC_REG_SELECT:
+-		ioapic->ioregsel = data;
+-		break;
+-
+-	case IOAPIC_REG_WINDOW:
+-		ioapic_write_indirect(ioapic, data);
+-		break;
+-
+-	default:
+-		break;
+-	}
+-}
+-
+-int kvm_ioapic_init(struct kvm *kvm)
+-{
+-	struct kvm_ioapic *ioapic;
+-	int i;
+-
+-	ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
+-	if (!ioapic)
+-		return -ENOMEM;
+-	kvm->vioapic = ioapic;
+-	for (i = 0; i < IOAPIC_NUM_PINS; i++)
+-		ioapic->redirtbl[i].fields.mask = 1;
+-	ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
+-	ioapic->dev.read = ioapic_mmio_read;
+-	ioapic->dev.write = ioapic_mmio_write;
+-	ioapic->dev.in_range = ioapic_in_range;
+-	ioapic->dev.private = ioapic;
+-	ioapic->kvm = kvm;
+-	kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev);
+-	return 0;
+-}
+diff --git a/drivers/kvm/irq.c b/drivers/kvm/irq.c
 deleted file mode 100644
-index 8a2ff0c..0000000
---- a/drivers/i2c/chips/ds1374.c
+index 7628c7f..0000000
+--- a/drivers/kvm/irq.c
 +++ /dev/null
-@@ -1,267 +0,0 @@
+@@ -1,98 +0,0 @@
 -/*
-- * drivers/i2c/chips/ds1374.c
+- * irq.c: API for in kernel interrupt controller
+- * Copyright (c) 2007, Intel Corporation.
 - *
-- * I2C client/driver for the Maxim/Dallas DS1374 Real-Time Clock
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
 - *
-- * Author: Randy Vinson <rvinson at mvista.com>
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
 - *
-- * Based on the m41t00.c by Mark Greer <mgreer at mvista.com>
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+- * Place - Suite 330, Boston, MA 02111-1307 USA.
+- * Authors:
+- *   Yaozu (Eddie) Dong <Eddie.dong at intel.com>
 - *
-- * 2005 (c) MontaVista Software, Inc. This file is licensed under
-- * the terms of the GNU General Public License version 2. This program
-- * is licensed "as is" without any warranty of any kind, whether express
-- * or implied.
-- */
--/*
-- * This i2c client/driver wedges between the drivers/char/genrtc.c RTC
-- * interface and the SMBus interface of the i2c subsystem.
-- * It would be more efficient to use i2c msgs/i2c_transfer directly but, as
-- * recommened in .../Documentation/i2c/writing-clients section
-- * "Sending and receiving", using SMBus level communication is preferred.
 - */
 -
--#include <linux/kernel.h>
 -#include <linux/module.h>
--#include <linux/interrupt.h>
--#include <linux/i2c.h>
--#include <linux/rtc.h>
--#include <linux/bcd.h>
--#include <linux/mutex.h>
--#include <linux/workqueue.h>
--
--#define DS1374_REG_TOD0		0x00
--#define DS1374_REG_TOD1		0x01
--#define DS1374_REG_TOD2		0x02
--#define DS1374_REG_TOD3		0x03
--#define DS1374_REG_WDALM0	0x04
--#define DS1374_REG_WDALM1	0x05
--#define DS1374_REG_WDALM2	0x06
--#define DS1374_REG_CR		0x07
--#define DS1374_REG_SR		0x08
--#define DS1374_REG_SR_OSF	0x80
--#define DS1374_REG_TCR		0x09
 -
--#define	DS1374_DRV_NAME		"ds1374"
--
--static DEFINE_MUTEX(ds1374_mutex);
--
--static struct i2c_driver ds1374_driver;
--static struct i2c_client *save_client;
--
--static unsigned short ignore[] = { I2C_CLIENT_END };
--static unsigned short normal_addr[] = { 0x68, I2C_CLIENT_END };
--
--static struct i2c_client_address_data addr_data = {
--	.normal_i2c = normal_addr,
--	.probe = ignore,
--	.ignore = ignore,
--};
+-#include "kvm.h"
+-#include "irq.h"
 -
--static ulong ds1374_read_rtc(void)
+-/*
+- * check if there is pending interrupt without
+- * intack.
+- */
+-int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
 -{
--	ulong time = 0;
--	int reg = DS1374_REG_WDALM0;
+-	struct kvm_pic *s;
 -
--	while (reg--) {
--		s32 tmp;
--		if ((tmp = i2c_smbus_read_byte_data(save_client, reg)) < 0) {
--			dev_warn(&save_client->dev,
--				 "can't read from rtc chip\n");
+-	if (kvm_apic_has_interrupt(v) == -1) {	/* LAPIC */
+-		if (kvm_apic_accept_pic_intr(v)) {
+-			s = pic_irqchip(v->kvm);	/* PIC */
+-			return s->output;
+-		} else
 -			return 0;
--		}
--		time = (time << 8) | (tmp & 0xff);
 -	}
--	return time;
+-	return 1;
 -}
+-EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
 -
--static void ds1374_write_rtc(ulong time)
+-/*
+- * Read pending interrupt vector and intack.
+- */
+-int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
 -{
--	int reg;
+-	struct kvm_pic *s;
+-	int vector;
 -
--	for (reg = DS1374_REG_TOD0; reg < DS1374_REG_WDALM0; reg++) {
--		if (i2c_smbus_write_byte_data(save_client, reg, time & 0xff)
--		    < 0) {
--			dev_warn(&save_client->dev,
--				 "can't write to rtc chip\n");
--			break;
+-	vector = kvm_get_apic_interrupt(v);	/* APIC */
+-	if (vector == -1) {
+-		if (kvm_apic_accept_pic_intr(v)) {
+-			s = pic_irqchip(v->kvm);
+-			s->output = 0;		/* PIC */
+-			vector = kvm_pic_read_irq(s);
 -		}
--		time = time >> 8;
 -	}
+-	return vector;
 -}
+-EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
 -
--static void ds1374_check_rtc_status(void)
+-static void vcpu_kick_intr(void *info)
 -{
--	s32 tmp;
--
--	tmp = i2c_smbus_read_byte_data(save_client, DS1374_REG_SR);
--	if (tmp < 0) {
--		dev_warn(&save_client->dev,
--			 "can't read status from rtc chip\n");
--		return;
--	}
--	if (tmp & DS1374_REG_SR_OSF) {
--		dev_warn(&save_client->dev,
--			 "oscillator discontinuity flagged, time unreliable\n");
--		tmp &= ~DS1374_REG_SR_OSF;
--		tmp = i2c_smbus_write_byte_data(save_client, DS1374_REG_SR,
--						tmp & 0xff);
--		if (tmp < 0)
--			dev_warn(&save_client->dev,
--				 "can't clear discontinuity notification\n");
--	}
+-#ifdef DEBUG
+-	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
+-	printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
+-#endif
 -}
 -
--ulong ds1374_get_rtc_time(void)
+-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 -{
--	ulong t1, t2;
--	int limit = 10;		/* arbitrary retry limit */
--
--	mutex_lock(&ds1374_mutex);
--
--	/*
--	 * Since the reads are being performed one byte at a time using
--	 * the SMBus vs a 4-byte i2c transfer, there is a chance that a
--	 * carry will occur during the read. To detect this, 2 reads are
--	 * performed and compared.
--	 */
--	do {
--		t1 = ds1374_read_rtc();
--		t2 = ds1374_read_rtc();
--	} while (t1 != t2 && limit--);
--
--	mutex_unlock(&ds1374_mutex);
+-	int ipi_pcpu = vcpu->cpu;
 -
--	if (t1 != t2) {
--		dev_warn(&save_client->dev,
--			 "can't get consistent time from rtc chip\n");
--		t1 = 0;
+-	if (waitqueue_active(&vcpu->wq)) {
+-		wake_up_interruptible(&vcpu->wq);
+-		++vcpu->stat.halt_wakeup;
 -	}
--
--	return t1;
+-	if (vcpu->guest_mode)
+-		smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
 -}
 -
--static ulong new_time;
--
--static void ds1374_set_work(struct work_struct *work)
+-void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
 -{
--	ulong t1, t2;
--	int limit = 10;		/* arbitrary retry limit */
--
--	t1 = new_time;
--
--	mutex_lock(&ds1374_mutex);
--
--	/*
--	 * Since the writes are being performed one byte at a time using
--	 * the SMBus vs a 4-byte i2c transfer, there is a chance that a
--	 * carry will occur during the write. To detect this, the write
--	 * value is read back and compared.
--	 */
--	do {
--		ds1374_write_rtc(t1);
--		t2 = ds1374_read_rtc();
--	} while (t1 != t2 && limit--);
--
--	mutex_unlock(&ds1374_mutex);
--
--	if (t1 != t2)
--		dev_warn(&save_client->dev,
--			 "can't confirm time set from rtc chip\n");
+-	kvm_inject_apic_timer_irqs(vcpu);
+-	/* TODO: PIT, RTC etc. */
 -}
+-EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
 -
--static struct workqueue_struct *ds1374_workqueue;
--
--static DECLARE_WORK(ds1374_work, ds1374_set_work);
--
--int ds1374_set_rtc_time(ulong nowtime)
+-void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
 -{
--	new_time = nowtime;
--
--	if (in_interrupt())
--		queue_work(ds1374_workqueue, &ds1374_work);
--	else
--		ds1374_set_work(NULL);
--
--	return 0;
+-	kvm_apic_timer_intr_post(vcpu, vec);
+-	/* TODO: PIT, RTC etc. */
 -}
--
+-EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
+diff --git a/drivers/kvm/irq.h b/drivers/kvm/irq.h
+deleted file mode 100644
+index 11fc014..0000000
+--- a/drivers/kvm/irq.h
++++ /dev/null
+@@ -1,165 +0,0 @@
 -/*
-- *****************************************************************************
+- * irq.h: in kernel interrupt controller related definitions
+- * Copyright (c) 2007, Intel Corporation.
 - *
-- *	Driver Interface
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
 - *
-- *****************************************************************************
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+- * Place - Suite 330, Boston, MA 02111-1307 USA.
+- * Authors:
+- *   Yaozu (Eddie) Dong <Eddie.dong at intel.com>
+- *
+- */
+-
+-#ifndef __IRQ_H
+-#define __IRQ_H
+-
+-#include "kvm.h"
+-
+-typedef void irq_request_func(void *opaque, int level);
+-
+-struct kvm_kpic_state {
+-	u8 last_irr;	/* edge detection */
+-	u8 irr;		/* interrupt request register */
+-	u8 imr;		/* interrupt mask register */
+-	u8 isr;		/* interrupt service register */
+-	u8 priority_add;	/* highest irq priority */
+-	u8 irq_base;
+-	u8 read_reg_select;
+-	u8 poll;
+-	u8 special_mask;
+-	u8 init_state;
+-	u8 auto_eoi;
+-	u8 rotate_on_auto_eoi;
+-	u8 special_fully_nested_mode;
+-	u8 init4;		/* true if 4 byte init */
+-	u8 elcr;		/* PIIX edge/trigger selection */
+-	u8 elcr_mask;
+-	struct kvm_pic *pics_state;
+-};
+-
+-struct kvm_pic {
+-	struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
+-	irq_request_func *irq_request;
+-	void *irq_request_opaque;
+-	int output;		/* intr from master PIC */
+-	struct kvm_io_device dev;
+-};
+-
+-struct kvm_pic *kvm_create_pic(struct kvm *kvm);
+-void kvm_pic_set_irq(void *opaque, int irq, int level);
+-int kvm_pic_read_irq(struct kvm_pic *s);
+-int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+-int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
+-void kvm_pic_update_irq(struct kvm_pic *s);
+-
+-#define IOAPIC_NUM_PINS  KVM_IOAPIC_NUM_PINS
+-#define IOAPIC_VERSION_ID 0x11	/* IOAPIC version */
+-#define IOAPIC_EDGE_TRIG  0
+-#define IOAPIC_LEVEL_TRIG 1
+-
+-#define IOAPIC_DEFAULT_BASE_ADDRESS  0xfec00000
+-#define IOAPIC_MEM_LENGTH            0x100
+-
+-/* Direct registers. */
+-#define IOAPIC_REG_SELECT  0x00
+-#define IOAPIC_REG_WINDOW  0x10
+-#define IOAPIC_REG_EOI     0x40	/* IA64 IOSAPIC only */
+-
+-/* Indirect registers. */
+-#define IOAPIC_REG_APIC_ID 0x00	/* x86 IOAPIC only */
+-#define IOAPIC_REG_VERSION 0x01
+-#define IOAPIC_REG_ARB_ID  0x02	/* x86 IOAPIC only */
+-
+-struct kvm_ioapic {
+-	u64 base_address;
+-	u32 ioregsel;
+-	u32 id;
+-	u32 irr;
+-	u32 pad;
+-	union ioapic_redir_entry {
+-		u64 bits;
+-		struct {
+-			u8 vector;
+-			u8 delivery_mode:3;
+-			u8 dest_mode:1;
+-			u8 delivery_status:1;
+-			u8 polarity:1;
+-			u8 remote_irr:1;
+-			u8 trig_mode:1;
+-			u8 mask:1;
+-			u8 reserve:7;
+-			u8 reserved[4];
+-			u8 dest_id;
+-		} fields;
+-	} redirtbl[IOAPIC_NUM_PINS];
+-	struct kvm_io_device dev;
+-	struct kvm *kvm;
+-};
+-
+-struct kvm_lapic {
+-	unsigned long base_address;
+-	struct kvm_io_device dev;
+-	struct {
+-		atomic_t pending;
+-		s64 period;	/* unit: ns */
+-		u32 divide_count;
+-		ktime_t last_update;
+-		struct hrtimer dev;
+-	} timer;
+-	struct kvm_vcpu *vcpu;
+-	struct page *regs_page;
+-	void *regs;
+-};
+-
+-#ifdef DEBUG
+-#define ASSERT(x)  							\
+-do {									\
+-	if (!(x)) {							\
+-		printk(KERN_EMERG "assertion failed %s: %d: %s\n",	\
+-		       __FILE__, __LINE__, #x);				\
+-		BUG();							\
+-	}								\
+-} while (0)
+-#else
+-#define ASSERT(x) do { } while (0)
+-#endif
+-
+-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+-int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
+-int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
+-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
+-int kvm_create_lapic(struct kvm_vcpu *vcpu);
+-void kvm_lapic_reset(struct kvm_vcpu *vcpu);
+-void kvm_free_apic(struct kvm_lapic *apic);
+-u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
+-void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
+-void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
+-struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
+-				       unsigned long bitmap);
+-u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
+-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
+-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
+-void kvm_ioapic_update_eoi(struct kvm *kvm, int vector);
+-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
+-int kvm_apic_set_irq(struct kvm_lapic *apic, u8 vec, u8 trig);
+-void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
+-int kvm_ioapic_init(struct kvm *kvm);
+-void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
+-int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
+-int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+-void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
+-void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
+-void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
+-void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
+-void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
+-
+-#endif
+diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
+deleted file mode 100644
+index 3b0bc4b..0000000
+--- a/drivers/kvm/kvm.h
++++ /dev/null
+@@ -1,796 +0,0 @@
+-#ifndef __KVM_H
+-#define __KVM_H
+-
+-/*
+- * This work is licensed under the terms of the GNU GPL, version 2.  See
+- * the COPYING file in the top-level directory.
 - */
--static int ds1374_probe(struct i2c_adapter *adap, int addr, int kind)
--{
--	struct i2c_client *client;
--	int rc;
 -
--	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
--	if (!client)
--		return -ENOMEM;
+-#include <linux/types.h>
+-#include <linux/list.h>
+-#include <linux/mutex.h>
+-#include <linux/spinlock.h>
+-#include <linux/signal.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/preempt.h>
+-#include <asm/signal.h>
 -
--	strncpy(client->name, DS1374_DRV_NAME, I2C_NAME_SIZE);
--	client->addr = addr;
--	client->adapter = adap;
--	client->driver = &ds1374_driver;
+-#include <linux/kvm.h>
+-#include <linux/kvm_para.h>
 -
--	ds1374_workqueue = create_singlethread_workqueue("ds1374");
--	if (!ds1374_workqueue) {
--		kfree(client);
--		return -ENOMEM;	/* most expected reason */
--	}
+-#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
+-#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
+-#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
 -
--	if ((rc = i2c_attach_client(client)) != 0) {
--		kfree(client);
--		return rc;
--	}
+-#define KVM_GUEST_CR0_MASK \
+-	(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
+-	 | X86_CR0_NW | X86_CR0_CD)
+-#define KVM_VM_CR0_ALWAYS_ON \
+-	(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
+-	 | X86_CR0_MP)
+-#define KVM_GUEST_CR4_MASK \
+-	(X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
+-#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+-#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
 -
--	save_client = client;
+-#define INVALID_PAGE (~(hpa_t)0)
+-#define UNMAPPED_GVA (~(gpa_t)0)
 -
--	ds1374_check_rtc_status();
+-#define KVM_MAX_VCPUS 4
+-#define KVM_ALIAS_SLOTS 4
+-#define KVM_MEMORY_SLOTS 8
+-#define KVM_NUM_MMU_PAGES 1024
+-#define KVM_MIN_FREE_MMU_PAGES 5
+-#define KVM_REFILL_PAGES 25
+-#define KVM_MAX_CPUID_ENTRIES 40
 -
--	return 0;
--}
+-#define DE_VECTOR 0
+-#define NM_VECTOR 7
+-#define DF_VECTOR 8
+-#define TS_VECTOR 10
+-#define NP_VECTOR 11
+-#define SS_VECTOR 12
+-#define GP_VECTOR 13
+-#define PF_VECTOR 14
 -
--static int ds1374_attach(struct i2c_adapter *adap)
--{
--	return i2c_probe(adap, &addr_data, ds1374_probe);
--}
+-#define SELECTOR_TI_MASK (1 << 2)
+-#define SELECTOR_RPL_MASK 0x03
 -
--static int ds1374_detach(struct i2c_client *client)
--{
--	int rc;
+-#define IOPL_SHIFT 12
 -
--	if ((rc = i2c_detach_client(client)) == 0) {
--		kfree(i2c_get_clientdata(client));
--		destroy_workqueue(ds1374_workqueue);
--	}
--	return rc;
--}
+-#define KVM_PIO_PAGE_OFFSET 1
 -
--static struct i2c_driver ds1374_driver = {
--	.driver = {
--		.name	= DS1374_DRV_NAME,
--	},
--	.id = I2C_DRIVERID_DS1374,
--	.attach_adapter = ds1374_attach,
--	.detach_client = ds1374_detach,
--};
+-/*
+- * vcpu->requests bit members
+- */
+-#define KVM_TLB_FLUSH 0
 -
--static int __init ds1374_init(void)
--{
--	return i2c_add_driver(&ds1374_driver);
--}
+-/*
+- * Address types:
+- *
+- *  gva - guest virtual address
+- *  gpa - guest physical address
+- *  gfn - guest frame number
+- *  hva - host virtual address
+- *  hpa - host physical address
+- *  hfn - host frame number
+- */
 -
--static void __exit ds1374_exit(void)
--{
--	i2c_del_driver(&ds1374_driver);
--}
+-typedef unsigned long  gva_t;
+-typedef u64            gpa_t;
+-typedef unsigned long  gfn_t;
 -
--module_init(ds1374_init);
--module_exit(ds1374_exit);
+-typedef unsigned long  hva_t;
+-typedef u64            hpa_t;
+-typedef unsigned long  hfn_t;
 -
--MODULE_AUTHOR("Randy Vinson <rvinson at mvista.com>");
--MODULE_DESCRIPTION("Maxim/Dallas DS1374 RTC I2C Client Driver");
--MODULE_LICENSE("GPL");
-diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
-index 1a7eeeb..fde297b 100644
---- a/drivers/i2c/chips/eeprom.c
-+++ b/drivers/i2c/chips/eeprom.c
-@@ -35,7 +35,7 @@
- #include <linux/mutex.h>
- 
- /* Addresses to scan */
--static unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
-+static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
- 					0x55, 0x56, 0x57, I2C_CLIENT_END };
- 
- /* Insmod parameters */
-diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
-index b767603..2a31601 100644
---- a/drivers/i2c/chips/isp1301_omap.c
-+++ b/drivers/i2c/chips/isp1301_omap.c
-@@ -100,7 +100,7 @@ struct isp1301 {
- 
- #if	defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE)
- 
--#include <asm/arch/tps65010.h>
-+#include <linux/i2c/tps65010.h>
- 
- #else
- 
-@@ -259,12 +259,6 @@ static inline const char *state_name(struct isp1301 *isp)
- 	return state_string(isp->otg.state);
- }
- 
--#ifdef	VERBOSE
--#define	dev_vdbg			dev_dbg
--#else
--#define	dev_vdbg(dev, fmt, arg...)	do{}while(0)
--#endif
+-#define NR_PTE_CHAIN_ENTRIES 5
+-
+-struct kvm_pte_chain {
+-	u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
+-	struct hlist_node link;
+-};
 -
- /*-------------------------------------------------------------------------*/
- 
- /* NOTE:  some of this ISP1301 setup is specific to H2 boards;
-diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c
-deleted file mode 100644
-index 3fcb646..0000000
---- a/drivers/i2c/chips/m41t00.c
-+++ /dev/null
-@@ -1,413 +0,0 @@
 -/*
-- * I2C client/driver for the ST M41T00 family of i2c rtc chips.
-- *
-- * Author: Mark A. Greer <mgreer at mvista.com>
+- * kvm_mmu_page_role, below, is defined as:
 - *
-- * 2005, 2006 (c) MontaVista Software, Inc. This file is licensed under
-- * the terms of the GNU General Public License version 2. This program
-- * is licensed "as is" without any warranty of any kind, whether express
-- * or implied.
+- *   bits 0:3 - total guest paging levels (2-4, or zero for real mode)
+- *   bits 4:7 - page table level for this shadow (1-4)
+- *   bits 8:9 - page table quadrant for 2-level guests
+- *   bit   16 - "metaphysical" - gfn is not a real page (huge page/real mode)
+- *   bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
 - */
+-union kvm_mmu_page_role {
+-	unsigned word;
+-	struct {
+-		unsigned glevels : 4;
+-		unsigned level : 4;
+-		unsigned quadrant : 2;
+-		unsigned pad_for_nice_hex_output : 6;
+-		unsigned metaphysical : 1;
+-		unsigned hugepage_access : 3;
+-	};
+-};
+-
+-struct kvm_mmu_page {
+-	struct list_head link;
+-	struct hlist_node hash_link;
+-
+-	/*
+-	 * The following two entries are used to key the shadow page in the
+-	 * hash table.
+-	 */
+-	gfn_t gfn;
+-	union kvm_mmu_page_role role;
+-
+-	u64 *spt;
+-	unsigned long slot_bitmap; /* One bit set per slot which has memory
+-				    * in this shadow page.
+-				    */
+-	int multimapped;         /* More than one parent_pte? */
+-	int root_count;          /* Currently serving as active root */
+-	union {
+-		u64 *parent_pte;               /* !multimapped */
+-		struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
+-	};
+-};
+-
+-struct kvm_vcpu;
+-extern struct kmem_cache *kvm_vcpu_cache;
+-
 -/*
-- * This i2c client/driver wedges between the drivers/char/genrtc.c RTC
-- * interface and the SMBus interface of the i2c subsystem.
+- * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
+- * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
+- * mode.
 - */
+-struct kvm_mmu {
+-	void (*new_cr3)(struct kvm_vcpu *vcpu);
+-	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
+-	void (*free)(struct kvm_vcpu *vcpu);
+-	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
+-	hpa_t root_hpa;
+-	int root_level;
+-	int shadow_root_level;
 -
--#include <linux/kernel.h>
--#include <linux/module.h>
--#include <linux/interrupt.h>
--#include <linux/i2c.h>
--#include <linux/rtc.h>
--#include <linux/bcd.h>
--#include <linux/workqueue.h>
--#include <linux/platform_device.h>
--#include <linux/m41t00.h>
--#include <asm/time.h>
--#include <asm/rtc.h>
+-	u64 *pae_root;
+-};
 -
--static struct i2c_driver m41t00_driver;
--static struct i2c_client *save_client;
+-#define KVM_NR_MEM_OBJS 20
 -
--static unsigned short ignore[] = { I2C_CLIENT_END };
--static unsigned short normal_addr[] = { I2C_CLIENT_END, I2C_CLIENT_END };
+-struct kvm_mmu_memory_cache {
+-	int nobjs;
+-	void *objects[KVM_NR_MEM_OBJS];
+-};
 -
--static struct i2c_client_address_data addr_data = {
--	.normal_i2c	= normal_addr,
--	.probe		= ignore,
--	.ignore		= ignore,
+-/*
+- * We don't want allocation failures within the mmu code, so we preallocate
+- * enough memory for a single page fault in a cache.
+- */
+-struct kvm_guest_debug {
+-	int enabled;
+-	unsigned long bp[4];
+-	int singlestep;
 -};
 -
--struct m41t00_chip_info {
--	u8	type;
--	char	*name;
--	u8	read_limit;
--	u8	sec;		/* Offsets for chip regs */
--	u8	min;
--	u8	hour;
--	u8	day;
--	u8	mon;
--	u8	year;
--	u8	alarm_mon;
--	u8	alarm_hour;
--	u8	sqw;
--	u8	sqw_freq;
+-enum {
+-	VCPU_REGS_RAX = 0,
+-	VCPU_REGS_RCX = 1,
+-	VCPU_REGS_RDX = 2,
+-	VCPU_REGS_RBX = 3,
+-	VCPU_REGS_RSP = 4,
+-	VCPU_REGS_RBP = 5,
+-	VCPU_REGS_RSI = 6,
+-	VCPU_REGS_RDI = 7,
+-#ifdef CONFIG_X86_64
+-	VCPU_REGS_R8 = 8,
+-	VCPU_REGS_R9 = 9,
+-	VCPU_REGS_R10 = 10,
+-	VCPU_REGS_R11 = 11,
+-	VCPU_REGS_R12 = 12,
+-	VCPU_REGS_R13 = 13,
+-	VCPU_REGS_R14 = 14,
+-	VCPU_REGS_R15 = 15,
+-#endif
+-	NR_VCPU_REGS
 -};
 -
--static struct m41t00_chip_info m41t00_chip_info_tbl[] = {
--	{
--		.type		= M41T00_TYPE_M41T00,
--		.name		= "m41t00",
--		.read_limit	= 5,
--		.sec		= 0,
--		.min		= 1,
--		.hour		= 2,
--		.day		= 4,
--		.mon		= 5,
--		.year		= 6,
--	},
--	{
--		.type		= M41T00_TYPE_M41T81,
--		.name		= "m41t81",
--		.read_limit	= 1,
--		.sec		= 1,
--		.min		= 2,
--		.hour		= 3,
--		.day		= 5,
--		.mon		= 6,
--		.year		= 7,
--		.alarm_mon	= 0xa,
--		.alarm_hour	= 0xc,
--		.sqw		= 0x13,
--	},
--	{
--		.type		= M41T00_TYPE_M41T85,
--		.name		= "m41t85",
--		.read_limit	= 1,
--		.sec		= 1,
--		.min		= 2,
--		.hour		= 3,
--		.day		= 5,
--		.mon		= 6,
--		.year		= 7,
--		.alarm_mon	= 0xa,
--		.alarm_hour	= 0xc,
--		.sqw		= 0x13,
--	},
+-enum {
+-	VCPU_SREG_CS,
+-	VCPU_SREG_DS,
+-	VCPU_SREG_ES,
+-	VCPU_SREG_FS,
+-	VCPU_SREG_GS,
+-	VCPU_SREG_SS,
+-	VCPU_SREG_TR,
+-	VCPU_SREG_LDTR,
+-};
+-
+-struct kvm_pio_request {
+-	unsigned long count;
+-	int cur_count;
+-	struct page *guest_pages[2];
+-	unsigned guest_page_offset;
+-	int in;
+-	int port;
+-	int size;
+-	int string;
+-	int down;
+-	int rep;
 -};
--static struct m41t00_chip_info *m41t00_chip;
 -
--ulong
--m41t00_get_rtc_time(void)
--{
--	s32 sec, min, hour, day, mon, year;
--	s32 sec1, min1, hour1, day1, mon1, year1;
--	u8 reads = 0;
--	u8 buf[8], msgbuf[1] = { 0 }; /* offset into rtc's regs */
--	struct i2c_msg msgs[] = {
--		{
--			.addr	= save_client->addr,
--			.flags	= 0,
--			.len	= 1,
--			.buf	= msgbuf,
--		},
--		{
--			.addr	= save_client->addr,
--			.flags	= I2C_M_RD,
--			.len	= 8,
--			.buf	= buf,
--		},
--	};
+-struct kvm_stat {
+-	u32 pf_fixed;
+-	u32 pf_guest;
+-	u32 tlb_flush;
+-	u32 invlpg;
 -
--	sec = min = hour = day = mon = year = 0;
+-	u32 exits;
+-	u32 io_exits;
+-	u32 mmio_exits;
+-	u32 signal_exits;
+-	u32 irq_window_exits;
+-	u32 halt_exits;
+-	u32 halt_wakeup;
+-	u32 request_irq_exits;
+-	u32 irq_exits;
+-	u32 light_exits;
+-	u32 efer_reload;
+-};
 -
--	do {
--		if (i2c_transfer(save_client->adapter, msgs, 2) < 0)
--			goto read_err;
+-struct kvm_io_device {
+-	void (*read)(struct kvm_io_device *this,
+-		     gpa_t addr,
+-		     int len,
+-		     void *val);
+-	void (*write)(struct kvm_io_device *this,
+-		      gpa_t addr,
+-		      int len,
+-		      const void *val);
+-	int (*in_range)(struct kvm_io_device *this, gpa_t addr);
+-	void (*destructor)(struct kvm_io_device *this);
 -
--		sec1 = sec;
--		min1 = min;
--		hour1 = hour;
--		day1 = day;
--		mon1 = mon;
--		year1 = year;
+-	void             *private;
+-};
 -
--		sec = buf[m41t00_chip->sec] & 0x7f;
--		min = buf[m41t00_chip->min] & 0x7f;
--		hour = buf[m41t00_chip->hour] & 0x3f;
--		day = buf[m41t00_chip->day] & 0x3f;
--		mon = buf[m41t00_chip->mon] & 0x1f;
--		year = buf[m41t00_chip->year];
--	} while ((++reads < m41t00_chip->read_limit) && ((sec != sec1)
--			|| (min != min1) || (hour != hour1) || (day != day1)
--			|| (mon != mon1) || (year != year1)));
+-static inline void kvm_iodevice_read(struct kvm_io_device *dev,
+-				     gpa_t addr,
+-				     int len,
+-				     void *val)
+-{
+-	dev->read(dev, addr, len, val);
+-}
 -
--	if ((m41t00_chip->read_limit > 1) && ((sec != sec1) || (min != min1)
--			|| (hour != hour1) || (day != day1) || (mon != mon1)
--			|| (year != year1)))
--		goto read_err;
+-static inline void kvm_iodevice_write(struct kvm_io_device *dev,
+-				      gpa_t addr,
+-				      int len,
+-				      const void *val)
+-{
+-	dev->write(dev, addr, len, val);
+-}
 -
--	sec = BCD2BIN(sec);
--	min = BCD2BIN(min);
--	hour = BCD2BIN(hour);
--	day = BCD2BIN(day);
--	mon = BCD2BIN(mon);
--	year = BCD2BIN(year);
+-static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
+-{
+-	return dev->in_range(dev, addr);
+-}
 -
--	year += 1900;
--	if (year < 1970)
--		year += 100;
+-static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
+-{
+-	if (dev->destructor)
+-		dev->destructor(dev);
+-}
 -
--	return mktime(year, mon, day, hour, min, sec);
+-/*
+- * It would be nice to use something smarter than a linear search, TBD...
+- * Thankfully we dont expect many devices to register (famous last words :),
+- * so until then it will suffice.  At least its abstracted so we can change
+- * in one place.
+- */
+-struct kvm_io_bus {
+-	int                   dev_count;
+-#define NR_IOBUS_DEVS 6
+-	struct kvm_io_device *devs[NR_IOBUS_DEVS];
+-};
 -
--read_err:
--	dev_err(&save_client->dev, "m41t00_get_rtc_time: Read error\n");
--	return 0;
--}
--EXPORT_SYMBOL_GPL(m41t00_get_rtc_time);
+-void kvm_io_bus_init(struct kvm_io_bus *bus);
+-void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+-struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
+-void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+-			     struct kvm_io_device *dev);
 -
--static void
--m41t00_set(void *arg)
--{
--	struct rtc_time	tm;
--	int nowtime = *(int *)arg;
--	s32 sec, min, hour, day, mon, year;
--	u8 wbuf[9], *buf = &wbuf[1], msgbuf[1] = { 0 };
--	struct i2c_msg msgs[] = {
--		{
--			.addr	= save_client->addr,
--			.flags	= 0,
--			.len	= 1,
--			.buf	= msgbuf,
--		},
--		{
--			.addr	= save_client->addr,
--			.flags	= I2C_M_RD,
--			.len	= 8,
--			.buf	= buf,
--		},
--	};
+-struct kvm_vcpu {
+-	struct kvm *kvm;
+-	struct preempt_notifier preempt_notifier;
+-	int vcpu_id;
+-	struct mutex mutex;
+-	int   cpu;
+-	u64 host_tsc;
+-	struct kvm_run *run;
+-	int interrupt_window_open;
+-	int guest_mode;
+-	unsigned long requests;
+-	unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
+-	DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
+-	unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
+-	unsigned long rip;      /* needs vcpu_load_rsp_rip() */
 -
--	to_tm(nowtime, &tm);
--	tm.tm_year = (tm.tm_year - 1900) % 100;
+-	unsigned long cr0;
+-	unsigned long cr2;
+-	unsigned long cr3;
+-	gpa_t para_state_gpa;
+-	struct page *para_state_page;
+-	gpa_t hypercall_gpa;
+-	unsigned long cr4;
+-	unsigned long cr8;
+-	u64 pdptrs[4]; /* pae */
+-	u64 shadow_efer;
+-	u64 apic_base;
+-	struct kvm_lapic *apic;    /* kernel irqchip context */
+-#define VCPU_MP_STATE_RUNNABLE          0
+-#define VCPU_MP_STATE_UNINITIALIZED     1
+-#define VCPU_MP_STATE_INIT_RECEIVED     2
+-#define VCPU_MP_STATE_SIPI_RECEIVED     3
+-#define VCPU_MP_STATE_HALTED            4
+-	int mp_state;
+-	int sipi_vector;
+-	u64 ia32_misc_enable_msr;
+-
+-	struct kvm_mmu mmu;
+-
+-	struct kvm_mmu_memory_cache mmu_pte_chain_cache;
+-	struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+-	struct kvm_mmu_memory_cache mmu_page_cache;
+-	struct kvm_mmu_memory_cache mmu_page_header_cache;
+-
+-	gfn_t last_pt_write_gfn;
+-	int   last_pt_write_count;
+-
+-	struct kvm_guest_debug guest_debug;
+-
+-	struct i387_fxsave_struct host_fx_image;
+-	struct i387_fxsave_struct guest_fx_image;
+-	int fpu_active;
+-	int guest_fpu_loaded;
+-
+-	int mmio_needed;
+-	int mmio_read_completed;
+-	int mmio_is_write;
+-	int mmio_size;
+-	unsigned char mmio_data[8];
+-	gpa_t mmio_phys_addr;
+-	gva_t mmio_fault_cr2;
+-	struct kvm_pio_request pio;
+-	void *pio_data;
+-	wait_queue_head_t wq;
 -
--	sec = BIN2BCD(tm.tm_sec);
--	min = BIN2BCD(tm.tm_min);
--	hour = BIN2BCD(tm.tm_hour);
--	day = BIN2BCD(tm.tm_mday);
--	mon = BIN2BCD(tm.tm_mon);
--	year = BIN2BCD(tm.tm_year);
+-	int sigset_active;
+-	sigset_t sigset;
 -
--	/* Read reg values into buf[0..7]/wbuf[1..8] */
--	if (i2c_transfer(save_client->adapter, msgs, 2) < 0) {
--		dev_err(&save_client->dev, "m41t00_set: Read error\n");
--		return;
--	}
+-	struct kvm_stat stat;
 -
--	wbuf[0] = 0; /* offset into rtc's regs */
--	buf[m41t00_chip->sec] = (buf[m41t00_chip->sec] & ~0x7f) | (sec & 0x7f);
--	buf[m41t00_chip->min] = (buf[m41t00_chip->min] & ~0x7f) | (min & 0x7f);
--	buf[m41t00_chip->hour] = (buf[m41t00_chip->hour] & ~0x3f) | (hour& 0x3f);
--	buf[m41t00_chip->day] = (buf[m41t00_chip->day] & ~0x3f) | (day & 0x3f);
--	buf[m41t00_chip->mon] = (buf[m41t00_chip->mon] & ~0x1f) | (mon & 0x1f);
--	buf[m41t00_chip->year] = year;
+-	struct {
+-		int active;
+-		u8 save_iopl;
+-		struct kvm_save_segment {
+-			u16 selector;
+-			unsigned long base;
+-			u32 limit;
+-			u32 ar;
+-		} tr, es, ds, fs, gs;
+-	} rmode;
+-	int halt_request; /* real mode on Intel only */
+-
+-	int cpuid_nent;
+-	struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+-};
+-
+-struct kvm_mem_alias {
+-	gfn_t base_gfn;
+-	unsigned long npages;
+-	gfn_t target_gfn;
+-};
+-
+-struct kvm_memory_slot {
+-	gfn_t base_gfn;
+-	unsigned long npages;
+-	unsigned long flags;
+-	struct page **phys_mem;
+-	unsigned long *dirty_bitmap;
+-};
 -
--	if (i2c_master_send(save_client, wbuf, 9) < 0)
--		dev_err(&save_client->dev, "m41t00_set: Write error\n");
+-struct kvm {
+-	struct mutex lock; /* protects everything except vcpus */
+-	int naliases;
+-	struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
+-	int nmemslots;
+-	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
+-	/*
+-	 * Hash table of struct kvm_mmu_page.
+-	 */
+-	struct list_head active_mmu_pages;
+-	int n_free_mmu_pages;
+-	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+-	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+-	unsigned long rmap_overflow;
+-	struct list_head vm_list;
+-	struct file *filp;
+-	struct kvm_io_bus mmio_bus;
+-	struct kvm_io_bus pio_bus;
+-	struct kvm_pic *vpic;
+-	struct kvm_ioapic *vioapic;
+-	int round_robin_prev_vcpu;
+-};
+-
+-static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
+-{
+-	return kvm->vpic;
 -}
 -
--static ulong new_time;
--/* well, isn't this API just _lovely_? */
--static void
--m41t00_barf(struct work_struct *unusable)
+-static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
 -{
--	m41t00_set(&new_time);
+-	return kvm->vioapic;
 -}
 -
--static struct workqueue_struct *m41t00_wq;
--static DECLARE_WORK(m41t00_work, m41t00_barf);
+-static inline int irqchip_in_kernel(struct kvm *kvm)
+-{
+-	return pic_irqchip(kvm) != 0;
+-}
 -
--int
--m41t00_set_rtc_time(ulong nowtime)
+-struct descriptor_table {
+-	u16 limit;
+-	unsigned long base;
+-} __attribute__((packed));
+-
+-struct kvm_x86_ops {
+-	int (*cpu_has_kvm_support)(void);          /* __init */
+-	int (*disabled_by_bios)(void);             /* __init */
+-	void (*hardware_enable)(void *dummy);      /* __init */
+-	void (*hardware_disable)(void *dummy);
+-	void (*check_processor_compatibility)(void *rtn);
+-	int (*hardware_setup)(void);               /* __init */
+-	void (*hardware_unsetup)(void);            /* __exit */
+-
+-	/* Create, but do not attach this VCPU */
+-	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
+-	void (*vcpu_free)(struct kvm_vcpu *vcpu);
+-	void (*vcpu_reset)(struct kvm_vcpu *vcpu);
+-
+-	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
+-	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+-	void (*vcpu_put)(struct kvm_vcpu *vcpu);
+-	void (*vcpu_decache)(struct kvm_vcpu *vcpu);
+-
+-	int (*set_guest_debug)(struct kvm_vcpu *vcpu,
+-			       struct kvm_debug_guest *dbg);
+-	void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
+-	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
+-	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+-	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
+-	void (*get_segment)(struct kvm_vcpu *vcpu,
+-			    struct kvm_segment *var, int seg);
+-	void (*set_segment)(struct kvm_vcpu *vcpu,
+-			    struct kvm_segment *var, int seg);
+-	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
+-	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
+-	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
+-	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
+-	void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+-	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
+-	void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+-	void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+-	void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+-	void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
+-	unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
+-	void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
+-		       int *exception);
+-	void (*cache_regs)(struct kvm_vcpu *vcpu);
+-	void (*decache_regs)(struct kvm_vcpu *vcpu);
+-	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+-	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+-
+-	void (*tlb_flush)(struct kvm_vcpu *vcpu);
+-	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
+-				  unsigned long addr, u32 err_code);
+-
+-	void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
+-
+-	void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
+-	int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+-	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+-	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
+-				unsigned char *hypercall_addr);
+-	int (*get_irq)(struct kvm_vcpu *vcpu);
+-	void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
+-	void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
+-	void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
+-				       struct kvm_run *run);
+-};
+-
+-extern struct kvm_x86_ops *kvm_x86_ops;
+-
+-/* The guest did something we don't support. */
+-#define pr_unimpl(vcpu, fmt, ...)					\
+- do {									\
+-	if (printk_ratelimit())						\
+-		printk(KERN_ERR "kvm: %i: cpu%i " fmt,			\
+-		       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
+- } while(0)
+-
+-#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
+-#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
+-
+-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
+-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+-
+-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+-		  struct module *module);
+-void kvm_exit_x86(void);
+-
+-int kvm_mmu_module_init(void);
+-void kvm_mmu_module_exit(void);
+-
+-void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
+-int kvm_mmu_create(struct kvm_vcpu *vcpu);
+-int kvm_mmu_setup(struct kvm_vcpu *vcpu);
+-
+-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
+-void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
+-void kvm_mmu_zap_all(struct kvm *kvm);
+-
+-hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
+-#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
+-#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
+-static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
+-hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
+-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
+-
+-extern hpa_t bad_page_address;
+-
+-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+-void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+-
+-enum emulation_result {
+-	EMULATE_DONE,       /* no further processing */
+-	EMULATE_DO_MMIO,      /* kvm_run filled with mmio request */
+-	EMULATE_FAIL,         /* can't emulate this instruction */
+-};
+-
+-int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
+-			unsigned long cr2, u16 error_code);
+-void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
+-void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
+-void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
+-void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
+-		   unsigned long *rflags);
+-
+-unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
+-void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
+-		     unsigned long *rflags);
+-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
+-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+-
+-struct x86_emulate_ctxt;
+-
+-int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+-		     int size, unsigned port);
+-int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+-			   int size, unsigned long count, int down,
+-			    gva_t address, int rep, unsigned port);
+-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+-int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+-int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
+-int emulate_clts(struct kvm_vcpu *vcpu);
+-int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
+-		    unsigned long *dest);
+-int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
+-		    unsigned long value);
+-
+-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
+-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
+-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
+-unsigned long get_cr8(struct kvm_vcpu *vcpu);
+-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+-void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
+-
+-int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
+-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+-
+-void fx_init(struct kvm_vcpu *vcpu);
+-
+-void kvm_resched(struct kvm_vcpu *vcpu);
+-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
+-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+-void kvm_flush_remote_tlbs(struct kvm *kvm);
+-
+-int emulator_read_std(unsigned long addr,
+-                      void *val,
+-		      unsigned int bytes,
+-		      struct kvm_vcpu *vcpu);
+-int emulator_write_emulated(unsigned long addr,
+-			    const void *val,
+-			    unsigned int bytes,
+-			    struct kvm_vcpu *vcpu);
+-
+-unsigned long segment_base(u16 selector);
+-
+-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+-		       const u8 *new, int bytes);
+-int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
+-void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+-int kvm_mmu_load(struct kvm_vcpu *vcpu);
+-void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+-
+-int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+-
+-static inline void kvm_guest_enter(void)
+-{
+-	current->flags |= PF_VCPU;
+-}
+-
+-static inline void kvm_guest_exit(void)
+-{
+-	current->flags &= ~PF_VCPU;
+-}
+-
+-static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+-				     u32 error_code)
+-{
+-	return vcpu->mmu.page_fault(vcpu, gva, error_code);
+-}
+-
+-static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
+-{
+-	if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+-		__kvm_mmu_free_some_pages(vcpu);
+-}
+-
+-static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
 -{
--	new_time = nowtime;
+-	if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
+-		return 0;
 -
--	if (in_interrupt())
--		queue_work(m41t00_wq, &m41t00_work);
--	else
--		m41t00_set(&new_time);
+-	return kvm_mmu_load(vcpu);
+-}
 -
+-static inline int is_long_mode(struct kvm_vcpu *vcpu)
+-{
+-#ifdef CONFIG_X86_64
+-	return vcpu->shadow_efer & EFER_LME;
+-#else
 -	return 0;
+-#endif
 -}
--EXPORT_SYMBOL_GPL(m41t00_set_rtc_time);
 -
--/*
-- *****************************************************************************
-- *
-- *	platform_data Driver Interface
-- *
-- *****************************************************************************
-- */
--static int __init
--m41t00_platform_probe(struct platform_device *pdev)
+-static inline int is_pae(struct kvm_vcpu *vcpu)
 -{
--	struct m41t00_platform_data *pdata;
--	int i;
--
--	if (pdev && (pdata = pdev->dev.platform_data)) {
--		normal_addr[0] = pdata->i2c_addr;
--
--		for (i=0; i<ARRAY_SIZE(m41t00_chip_info_tbl); i++)
--			if (m41t00_chip_info_tbl[i].type == pdata->type) {
--				m41t00_chip = &m41t00_chip_info_tbl[i];
--				m41t00_chip->sqw_freq = pdata->sqw_freq;
--				return 0;
--			}
--	}
--	return -ENODEV;
+-	return vcpu->cr4 & X86_CR4_PAE;
 -}
 -
--static int __exit
--m41t00_platform_remove(struct platform_device *pdev)
+-static inline int is_pse(struct kvm_vcpu *vcpu)
 -{
--	return 0;
+-	return vcpu->cr4 & X86_CR4_PSE;
 -}
 -
--static struct platform_driver m41t00_platform_driver = {
--	.probe  = m41t00_platform_probe,
--	.remove = m41t00_platform_remove,
--	.driver = {
--		.owner = THIS_MODULE,
--		.name  = M41T00_DRV_NAME,
--	},
--};
--
--/*
-- *****************************************************************************
-- *
-- *	Driver Interface
-- *
-- *****************************************************************************
-- */
--static int
--m41t00_probe(struct i2c_adapter *adap, int addr, int kind)
+-static inline int is_paging(struct kvm_vcpu *vcpu)
 -{
--	struct i2c_client *client;
--	int rc;
+-	return vcpu->cr0 & X86_CR0_PG;
+-}
 -
--	if (!i2c_check_functionality(adap, I2C_FUNC_I2C
--			| I2C_FUNC_SMBUS_BYTE_DATA))
--		return 0;
+-static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
+-{
+-	return slot - kvm->memslots;
+-}
 -
--	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
--	if (!client)
--		return -ENOMEM;
+-static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
+-{
+-	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
 -
--	strlcpy(client->name, m41t00_chip->name, I2C_NAME_SIZE);
--	client->addr = addr;
--	client->adapter = adap;
--	client->driver = &m41t00_driver;
+-	return (struct kvm_mmu_page *)page_private(page);
+-}
 -
--	if ((rc = i2c_attach_client(client)))
--		goto attach_err;
+-static inline u16 read_fs(void)
+-{
+-	u16 seg;
+-	asm ("mov %%fs, %0" : "=g"(seg));
+-	return seg;
+-}
 -
--	if (m41t00_chip->type != M41T00_TYPE_M41T00) {
--		/* If asked, disable SQW, set SQW frequency & re-enable */
--		if (m41t00_chip->sqw_freq)
--			if (((rc = i2c_smbus_read_byte_data(client,
--					m41t00_chip->alarm_mon)) < 0)
--			 || ((rc = i2c_smbus_write_byte_data(client,
--					m41t00_chip->alarm_mon, rc & ~0x40)) <0)
--			 || ((rc = i2c_smbus_write_byte_data(client,
--					m41t00_chip->sqw,
--					m41t00_chip->sqw_freq)) < 0)
--			 || ((rc = i2c_smbus_write_byte_data(client,
--					m41t00_chip->alarm_mon, rc | 0x40)) <0))
--				goto sqw_err;
+-static inline u16 read_gs(void)
+-{
+-	u16 seg;
+-	asm ("mov %%gs, %0" : "=g"(seg));
+-	return seg;
+-}
 -
--		/* Make sure HT (Halt Update) bit is cleared */
--		if ((rc = i2c_smbus_read_byte_data(client,
--				m41t00_chip->alarm_hour)) < 0)
--			goto ht_err;
+-static inline u16 read_ldt(void)
+-{
+-	u16 ldt;
+-	asm ("sldt %0" : "=g"(ldt));
+-	return ldt;
+-}
 -
--		if (rc & 0x40)
--			if ((rc = i2c_smbus_write_byte_data(client,
--					m41t00_chip->alarm_hour, rc & ~0x40))<0)
--				goto ht_err;
--	}
+-static inline void load_fs(u16 sel)
+-{
+-	asm ("mov %0, %%fs" : : "rm"(sel));
+-}
 -
--	/* Make sure ST (stop) bit is cleared */
--	if ((rc = i2c_smbus_read_byte_data(client, m41t00_chip->sec)) < 0)
--		goto st_err;
+-static inline void load_gs(u16 sel)
+-{
+-	asm ("mov %0, %%gs" : : "rm"(sel));
+-}
 -
--	if (rc & 0x80)
--		if ((rc = i2c_smbus_write_byte_data(client, m41t00_chip->sec,
--				rc & ~0x80)) < 0)
--			goto st_err;
+-#ifndef load_ldt
+-static inline void load_ldt(u16 sel)
+-{
+-	asm ("lldt %0" : : "rm"(sel));
+-}
+-#endif
 -
--	m41t00_wq = create_singlethread_workqueue(m41t00_chip->name);
--	save_client = client;
--	return 0;
+-static inline void get_idt(struct descriptor_table *table)
+-{
+-	asm ("sidt %0" : "=m"(*table));
+-}
 -
--st_err:
--	dev_err(&client->dev, "m41t00_probe: Can't clear ST bit\n");
--	goto attach_err;
--ht_err:
--	dev_err(&client->dev, "m41t00_probe: Can't clear HT bit\n");
--	goto attach_err;
--sqw_err:
--	dev_err(&client->dev, "m41t00_probe: Can't set SQW Frequency\n");
--attach_err:
--	kfree(client);
--	return rc;
+-static inline void get_gdt(struct descriptor_table *table)
+-{
+-	asm ("sgdt %0" : "=m"(*table));
 -}
 -
--static int
--m41t00_attach(struct i2c_adapter *adap)
+-static inline unsigned long read_tr_base(void)
 -{
--	return i2c_probe(adap, &addr_data, m41t00_probe);
+-	u16 tr;
+-	asm ("str %0" : "=g"(tr));
+-	return segment_base(tr);
 -}
 -
--static int
--m41t00_detach(struct i2c_client *client)
+-#ifdef CONFIG_X86_64
+-static inline unsigned long read_msr(unsigned long msr)
 -{
--	int rc;
+-	u64 value;
 -
--	if ((rc = i2c_detach_client(client)) == 0) {
--		kfree(client);
--		destroy_workqueue(m41t00_wq);
--	}
--	return rc;
+-	rdmsrl(msr, value);
+-	return value;
 -}
+-#endif
 -
--static struct i2c_driver m41t00_driver = {
--	.driver = {
--		.name	= M41T00_DRV_NAME,
--	},
--	.id		= I2C_DRIVERID_STM41T00,
--	.attach_adapter	= m41t00_attach,
--	.detach_client	= m41t00_detach,
--};
+-static inline void fx_save(struct i387_fxsave_struct *image)
+-{
+-	asm ("fxsave (%0)":: "r" (image));
+-}
 -
--static int __init
--m41t00_init(void)
+-static inline void fx_restore(struct i387_fxsave_struct *image)
 -{
--	int rc;
+-	asm ("fxrstor (%0)":: "r" (image));
+-}
 -
--	if (!(rc = platform_driver_register(&m41t00_platform_driver)))
--		rc = i2c_add_driver(&m41t00_driver);
--	return rc;
+-static inline void fpu_init(void)
+-{
+-	asm ("finit");
 -}
 -
--static void __exit
--m41t00_exit(void)
+-static inline u32 get_rdx_init_val(void)
 -{
--	i2c_del_driver(&m41t00_driver);
--	platform_driver_unregister(&m41t00_platform_driver);
+-	return 0x600; /* P6 family */
 -}
 -
--module_init(m41t00_init);
--module_exit(m41t00_exit);
+-#define ASM_VMX_VMCLEAR_RAX       ".byte 0x66, 0x0f, 0xc7, 0x30"
+-#define ASM_VMX_VMLAUNCH          ".byte 0x0f, 0x01, 0xc2"
+-#define ASM_VMX_VMRESUME          ".byte 0x0f, 0x01, 0xc3"
+-#define ASM_VMX_VMPTRLD_RAX       ".byte 0x0f, 0xc7, 0x30"
+-#define ASM_VMX_VMREAD_RDX_RAX    ".byte 0x0f, 0x78, 0xd0"
+-#define ASM_VMX_VMWRITE_RAX_RDX   ".byte 0x0f, 0x79, 0xd0"
+-#define ASM_VMX_VMWRITE_RSP_RDX   ".byte 0x0f, 0x79, 0xd4"
+-#define ASM_VMX_VMXOFF            ".byte 0x0f, 0x01, 0xc4"
+-#define ASM_VMX_VMXON_RAX         ".byte 0xf3, 0x0f, 0xc7, 0x30"
 -
--MODULE_AUTHOR("Mark A. Greer <mgreer at mvista.com>");
--MODULE_DESCRIPTION("ST Microelectronics M41T00 RTC I2C Client Driver");
+-#define MSR_IA32_TIME_STAMP_COUNTER		0x010
+-
+-#define TSS_IOPB_BASE_OFFSET 0x66
+-#define TSS_BASE_SIZE 0x68
+-#define TSS_IOPB_SIZE (65536 / 8)
+-#define TSS_REDIRECTION_SIZE (256 / 8)
+-#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+-
+-#endif
+diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
+deleted file mode 100644
+index 47c10b8..0000000
+--- a/drivers/kvm/kvm_main.c
++++ /dev/null
+@@ -1,3628 +0,0 @@
+-/*
+- * Kernel-based Virtual Machine driver for Linux
+- *
+- * This module enables machines with Intel VT-x extensions to run virtual
+- * machines without emulation or binary translation.
+- *
+- * Copyright (C) 2006 Qumranet, Inc.
+- *
+- * Authors:
+- *   Avi Kivity   <avi at qumranet.com>
+- *   Yaniv Kamay  <yaniv at qumranet.com>
+- *
+- * This work is licensed under the terms of the GNU GPL, version 2.  See
+- * the COPYING file in the top-level directory.
+- *
+- */
+-
+-#include "kvm.h"
+-#include "x86_emulate.h"
+-#include "segment_descriptor.h"
+-#include "irq.h"
+-
+-#include <linux/kvm.h>
+-#include <linux/module.h>
+-#include <linux/errno.h>
+-#include <linux/percpu.h>
+-#include <linux/gfp.h>
+-#include <linux/mm.h>
+-#include <linux/miscdevice.h>
+-#include <linux/vmalloc.h>
+-#include <linux/reboot.h>
+-#include <linux/debugfs.h>
+-#include <linux/highmem.h>
+-#include <linux/file.h>
+-#include <linux/sysdev.h>
+-#include <linux/cpu.h>
+-#include <linux/sched.h>
+-#include <linux/cpumask.h>
+-#include <linux/smp.h>
+-#include <linux/anon_inodes.h>
+-#include <linux/profile.h>
+-
+-#include <asm/processor.h>
+-#include <asm/msr.h>
+-#include <asm/io.h>
+-#include <asm/uaccess.h>
+-#include <asm/desc.h>
+-
+-MODULE_AUTHOR("Qumranet");
 -MODULE_LICENSE("GPL");
-diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c
-index 64692f6..fb7ea56 100644
---- a/drivers/i2c/chips/max6875.c
-+++ b/drivers/i2c/chips/max6875.c
-@@ -34,7 +34,7 @@
- #include <linux/mutex.h>
- 
- /* Do not scan - the MAX6875 access method will write to some EEPROM chips */
--static unsigned short normal_i2c[] = {I2C_CLIENT_END};
-+static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
- 
- /* Insmod parameters */
- I2C_CLIENT_INSMOD_1(max6875);
-diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c
-index 21c6dd6..b3b830c 100644
---- a/drivers/i2c/chips/pcf8574.c
-+++ b/drivers/i2c/chips/pcf8574.c
-@@ -41,9 +41,11 @@
- #include <linux/i2c.h>
- 
- /* Addresses to scan */
--static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
--					0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
--					I2C_CLIENT_END };
-+static const unsigned short normal_i2c[] = {
-+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
-+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
-+	I2C_CLIENT_END
-+};
- 
- /* Insmod parameters */
- I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a);
-diff --git a/drivers/i2c/chips/pcf8575.c b/drivers/i2c/chips/pcf8575.c
-new file mode 100644
-index 0000000..3ea08ac
---- /dev/null
-+++ b/drivers/i2c/chips/pcf8575.c
-@@ -0,0 +1,214 @@
-+/*
-+  pcf8575.c
-+
-+  About the PCF8575 chip: the PCF8575 is a 16-bit I/O expander for the I2C bus
-+  produced by a.o. Philips Semiconductors.
-+
-+  Copyright (C) 2006 Michael Hennerich, Analog Devices Inc.
-+  <hennerich at blackfin.uclinux.org>
-+  Based on pcf8574.c.
-+
-+  Copyright (c) 2007 Bart Van Assche <bart.vanassche at gmail.com>.
-+  Ported this driver from ucLinux to the mainstream Linux kernel.
-+
-+  This program is free software; you can redistribute it and/or modify
-+  it under the terms of the GNU General Public License as published by
-+  the Free Software Foundation; either version 2 of the License, or
-+  (at your option) any later version.
-+
-+  This program is distributed in the hope that it will be useful,
-+  but WITHOUT ANY WARRANTY; without even the implied warranty of
-+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+  GNU General Public License for more details.
-+
-+  You should have received a copy of the GNU General Public License
-+  along with this program; if not, write to the Free Software
-+  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-+*/
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/i2c.h>
-+#include <linux/slab.h>  /* kzalloc() */
-+#include <linux/sysfs.h> /* sysfs_create_group() */
-+
-+/* Addresses to scan */
-+static const unsigned short normal_i2c[] = {
-+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
-+	I2C_CLIENT_END
-+};
-+
-+/* Insmod parameters */
-+I2C_CLIENT_INSMOD;
-+
-+
-+/* Each client has this additional data */
-+struct pcf8575_data {
-+	struct i2c_client client;
-+	int write;		/* last written value, or error code */
-+};
-+
-+static int pcf8575_attach_adapter(struct i2c_adapter *adapter);
-+static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind);
-+static int pcf8575_detach_client(struct i2c_client *client);
-+
-+/* This is the driver that will be inserted */
-+static struct i2c_driver pcf8575_driver = {
-+	.driver = {
-+		.owner	= THIS_MODULE,
-+		.name	= "pcf8575",
-+	},
-+	.attach_adapter	= pcf8575_attach_adapter,
-+	.detach_client	= pcf8575_detach_client,
-+};
-+
-+/* following are the sysfs callback functions */
-+static ssize_t show_read(struct device *dev, struct device_attribute *attr,
-+			 char *buf)
-+{
-+	struct i2c_client *client = to_i2c_client(dev);
-+	u16 val;
-+	u8 iopin_state[2];
-+
-+	i2c_master_recv(client, iopin_state, 2);
-+
-+	val = iopin_state[0];
-+	val |= iopin_state[1] << 8;
-+
-+	return sprintf(buf, "%u\n", val);
-+}
-+
-+static DEVICE_ATTR(read, S_IRUGO, show_read, NULL);
-+
-+static ssize_t show_write(struct device *dev, struct device_attribute *attr,
-+			  char *buf)
-+{
-+	struct pcf8575_data *data = dev_get_drvdata(dev);
-+	if (data->write < 0)
-+		return data->write;
-+	return sprintf(buf, "%d\n", data->write);
-+}
-+
-+static ssize_t set_write(struct device *dev, struct device_attribute *attr,
-+			 const char *buf, size_t count)
-+{
-+	struct i2c_client *client = to_i2c_client(dev);
-+	struct pcf8575_data *data = i2c_get_clientdata(client);
-+	unsigned long val = simple_strtoul(buf, NULL, 10);
-+	u8 iopin_state[2];
-+
-+	if (val > 0xffff)
-+		return -EINVAL;
-+
-+	data->write = val;
-+
-+	iopin_state[0] = val & 0xFF;
-+	iopin_state[1] = val >> 8;
-+
-+	i2c_master_send(client, iopin_state, 2);
-+
-+	return count;
-+}
-+
-+static DEVICE_ATTR(write, S_IWUSR | S_IRUGO, show_write, set_write);
-+
-+static struct attribute *pcf8575_attributes[] = {
-+	&dev_attr_read.attr,
-+	&dev_attr_write.attr,
-+	NULL
-+};
-+
-+static const struct attribute_group pcf8575_attr_group = {
-+	.attrs = pcf8575_attributes,
-+};
-+
-+/*
-+ * Real code
-+ */
-+
-+static int pcf8575_attach_adapter(struct i2c_adapter *adapter)
-+{
-+	return i2c_probe(adapter, &addr_data, pcf8575_detect);
-+}
-+
-+/* This function is called by i2c_probe */
-+static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind)
-+{
-+	struct i2c_client *client;
-+	struct pcf8575_data *data;
-+	int err = 0;
-+
-+	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
-+		goto exit;
-+
-+	/* OK. For now, we presume we have a valid client. We now create the
-+	   client structure, even though we cannot fill it completely yet. */
-+	data = kzalloc(sizeof(struct pcf8575_data), GFP_KERNEL);
-+	if (!data) {
-+		err = -ENOMEM;
-+		goto exit;
-+	}
-+
-+	client = &data->client;
-+	i2c_set_clientdata(client, data);
-+	client->addr = address;
-+	client->adapter = adapter;
-+	client->driver = &pcf8575_driver;
-+	strlcpy(client->name, "pcf8575", I2C_NAME_SIZE);
-+	data->write = -EAGAIN;
-+
-+	/* This is the place to detect whether the chip at the specified
-+	   address really is a PCF8575 chip. However, there is no method known
-+	   to detect whether an I2C chip is a PCF8575 or any other I2C chip. */
-+
-+	/* Tell the I2C layer a new client has arrived */
-+	err = i2c_attach_client(client);
-+	if (err)
-+		goto exit_free;
-+
-+	/* Register sysfs hooks */
-+	err = sysfs_create_group(&client->dev.kobj, &pcf8575_attr_group);
-+	if (err)
-+		goto exit_detach;
-+
-+	return 0;
-+
-+exit_detach:
-+	i2c_detach_client(client);
-+exit_free:
-+	kfree(data);
-+exit:
-+	return err;
-+}
-+
-+static int pcf8575_detach_client(struct i2c_client *client)
-+{
-+	int err;
-+
-+	sysfs_remove_group(&client->dev.kobj, &pcf8575_attr_group);
-+
-+	err = i2c_detach_client(client);
-+	if (err)
-+		return err;
-+
-+	kfree(i2c_get_clientdata(client));
-+	return 0;
-+}
-+
-+static int __init pcf8575_init(void)
-+{
-+	return i2c_add_driver(&pcf8575_driver);
-+}
-+
-+static void __exit pcf8575_exit(void)
-+{
-+	i2c_del_driver(&pcf8575_driver);
-+}
-+
-+MODULE_AUTHOR("Michael Hennerich <hennerich at blackfin.uclinux.org>, "
-+	      "Bart Van Assche <bart.vanassche at gmail.com>");
-+MODULE_DESCRIPTION("pcf8575 driver");
-+MODULE_LICENSE("GPL");
-+
-+module_init(pcf8575_init);
-+module_exit(pcf8575_exit);
-diff --git a/drivers/i2c/chips/pcf8591.c b/drivers/i2c/chips/pcf8591.c
-index 4dc3637..865f440 100644
---- a/drivers/i2c/chips/pcf8591.c
-+++ b/drivers/i2c/chips/pcf8591.c
-@@ -27,7 +27,7 @@
- #include <linux/mutex.h>
- 
- /* Addresses to scan */
--static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
-+static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
- 					0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
- 
- /* Insmod parameters */
-diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
-index e320994..4154a91 100644
---- a/drivers/i2c/chips/tps65010.c
-+++ b/drivers/i2c/chips/tps65010.c
-@@ -31,7 +31,7 @@
- #include <linux/seq_file.h>
- #include <linux/mutex.h>
- 
--#include <asm/arch/tps65010.h>
-+#include <linux/i2c/tps65010.h>
- 
- /*-------------------------------------------------------------------------*/
- 
-diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
-index 3de4b19..a10fd27 100644
---- a/drivers/i2c/chips/tsl2550.c
-+++ b/drivers/i2c/chips/tsl2550.c
-@@ -432,11 +432,32 @@ static int __devexit tsl2550_remove(struct i2c_client *client)
- 	return 0;
- }
- 
-+#ifdef CONFIG_PM
-+
-+static int tsl2550_suspend(struct i2c_client *client, pm_message_t mesg)
-+{
-+	return tsl2550_set_power_state(client, 0);
-+}
-+
-+static int tsl2550_resume(struct i2c_client *client)
-+{
-+	return tsl2550_set_power_state(client, 1);
-+}
-+
-+#else
-+
-+#define tsl2550_suspend		NULL
-+#define tsl2550_resume		NULL
-+
-+#endif /* CONFIG_PM */
-+
- static struct i2c_driver tsl2550_driver = {
- 	.driver = {
- 		.name	= TSL2550_DRV_NAME,
- 		.owner	= THIS_MODULE,
- 	},
-+	.suspend = tsl2550_suspend,
-+	.resume	= tsl2550_resume,
- 	.probe	= tsl2550_probe,
- 	.remove	= __devexit_p(tsl2550_remove),
- };
-diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
-index b5e13e4..96da22e 100644
---- a/drivers/i2c/i2c-core.c
-+++ b/drivers/i2c/i2c-core.c
-@@ -33,14 +33,15 @@
- #include <linux/platform_device.h>
- #include <linux/mutex.h>
- #include <linux/completion.h>
-+#include <linux/hardirq.h>
-+#include <linux/irqflags.h>
- #include <asm/uaccess.h>
-+#include <asm/semaphore.h>
- 
- #include "i2c-core.h"
- 
- 
--static LIST_HEAD(adapters);
--static LIST_HEAD(drivers);
--static DEFINE_MUTEX(core_lists);
-+static DEFINE_MUTEX(core_lock);
- static DEFINE_IDR(i2c_adapter_idr);
- 
- #define is_newstyle_driver(d) ((d)->probe || (d)->remove)
-@@ -198,6 +199,25 @@ static struct bus_type i2c_bus_type = {
- 	.resume		= i2c_device_resume,
- };
- 
-+
-+/**
-+ * i2c_verify_client - return parameter as i2c_client, or NULL
-+ * @dev: device, probably from some driver model iterator
-+ *
-+ * When traversing the driver model tree, perhaps using driver model
-+ * iterators like @device_for_each_child(), you can't assume very much
-+ * about the nodes you find.  Use this function to avoid oopses caused
-+ * by wrongly treating some non-I2C device as an i2c_client.
-+ */
-+struct i2c_client *i2c_verify_client(struct device *dev)
-+{
-+	return (dev->bus == &i2c_bus_type)
-+			? to_i2c_client(dev)
-+			: NULL;
-+}
-+EXPORT_SYMBOL(i2c_verify_client);
-+
-+
- /**
-  * i2c_new_device - instantiate an i2c device for use with a new style driver
-  * @adap: the adapter managing the device
-@@ -276,6 +296,50 @@ void i2c_unregister_device(struct i2c_client *client)
- EXPORT_SYMBOL_GPL(i2c_unregister_device);
- 
- 
-+static int dummy_nop(struct i2c_client *client)
-+{
-+	return 0;
-+}
-+
-+static struct i2c_driver dummy_driver = {
-+	.driver.name	= "dummy",
-+	.probe		= dummy_nop,
-+	.remove		= dummy_nop,
-+};
-+
-+/**
-+ * i2c_new_dummy - return a new i2c device bound to a dummy driver
-+ * @adapter: the adapter managing the device
-+ * @address: seven bit address to be used
-+ * @type: optional label used for i2c_client.name
-+ * Context: can sleep
-+ *
-+ * This returns an I2C client bound to the "dummy" driver, intended for use
-+ * with devices that consume multiple addresses.  Examples of such chips
-+ * include various EEPROMS (like 24c04 and 24c08 models).
-+ *
-+ * These dummy devices have two main uses.  First, most I2C and SMBus calls
-+ * except i2c_transfer() need a client handle; the dummy will be that handle.
-+ * And second, this prevents the specified address from being bound to a
-+ * different driver.
-+ *
-+ * This returns the new i2c client, which should be saved for later use with
-+ * i2c_unregister_device(); or NULL to indicate an error.
-+ */
-+struct i2c_client *
-+i2c_new_dummy(struct i2c_adapter *adapter, u16 address, const char *type)
-+{
-+	struct i2c_board_info info = {
-+		.driver_name	= "dummy",
-+		.addr		= address,
-+	};
-+
-+	if (type)
-+		strlcpy(info.type, type, sizeof info.type);
-+	return i2c_new_device(adapter, &info);
-+}
-+EXPORT_SYMBOL_GPL(i2c_new_dummy);
-+
- /* ------------------------------------------------------------------------- */
- 
- /* I2C bus adapters -- one roots each I2C or SMBUS segment */
-@@ -320,18 +384,27 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
- 	mutex_unlock(&__i2c_board_lock);
- }
- 
-+static int i2c_do_add_adapter(struct device_driver *d, void *data)
-+{
-+	struct i2c_driver *driver = to_i2c_driver(d);
-+	struct i2c_adapter *adap = data;
-+
-+	if (driver->attach_adapter) {
-+		/* We ignore the return code; if it fails, too bad */
-+		driver->attach_adapter(adap);
-+	}
-+	return 0;
-+}
-+
- static int i2c_register_adapter(struct i2c_adapter *adap)
- {
--	int res = 0;
--	struct list_head   *item;
--	struct i2c_driver  *driver;
-+	int res = 0, dummy;
- 
- 	mutex_init(&adap->bus_lock);
- 	mutex_init(&adap->clist_lock);
- 	INIT_LIST_HEAD(&adap->clients);
- 
--	mutex_lock(&core_lists);
--	list_add_tail(&adap->list, &adapters);
-+	mutex_lock(&core_lock);
- 
- 	/* Add the adapter to the driver core.
- 	 * If the parent pointer is not set up,
-@@ -356,19 +429,14 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
- 		i2c_scan_static_board_info(adap);
- 
- 	/* let legacy drivers scan this bus for matching devices */
--	list_for_each(item,&drivers) {
--		driver = list_entry(item, struct i2c_driver, list);
--		if (driver->attach_adapter)
--			/* We ignore the return code; if it fails, too bad */
--			driver->attach_adapter(adap);
+-
+-static DEFINE_SPINLOCK(kvm_lock);
+-static LIST_HEAD(vm_list);
+-
+-static cpumask_t cpus_hardware_enabled;
+-
+-struct kvm_x86_ops *kvm_x86_ops;
+-struct kmem_cache *kvm_vcpu_cache;
+-EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+-
+-static __read_mostly struct preempt_ops kvm_preempt_ops;
+-
+-#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
+-
+-static struct kvm_stats_debugfs_item {
+-	const char *name;
+-	int offset;
+-	struct dentry *dentry;
+-} debugfs_entries[] = {
+-	{ "pf_fixed", STAT_OFFSET(pf_fixed) },
+-	{ "pf_guest", STAT_OFFSET(pf_guest) },
+-	{ "tlb_flush", STAT_OFFSET(tlb_flush) },
+-	{ "invlpg", STAT_OFFSET(invlpg) },
+-	{ "exits", STAT_OFFSET(exits) },
+-	{ "io_exits", STAT_OFFSET(io_exits) },
+-	{ "mmio_exits", STAT_OFFSET(mmio_exits) },
+-	{ "signal_exits", STAT_OFFSET(signal_exits) },
+-	{ "irq_window", STAT_OFFSET(irq_window_exits) },
+-	{ "halt_exits", STAT_OFFSET(halt_exits) },
+-	{ "halt_wakeup", STAT_OFFSET(halt_wakeup) },
+-	{ "request_irq", STAT_OFFSET(request_irq_exits) },
+-	{ "irq_exits", STAT_OFFSET(irq_exits) },
+-	{ "light_exits", STAT_OFFSET(light_exits) },
+-	{ "efer_reload", STAT_OFFSET(efer_reload) },
+-	{ NULL }
+-};
+-
+-static struct dentry *debugfs_dir;
+-
+-#define MAX_IO_MSRS 256
+-
+-#define CR0_RESERVED_BITS						\
+-	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
+-			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
+-			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
+-#define CR4_RESERVED_BITS						\
+-	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
+-			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE	\
+-			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR	\
+-			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
+-
+-#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
+-#define EFER_RESERVED_BITS 0xfffffffffffff2fe
+-
+-#ifdef CONFIG_X86_64
+-// LDT or TSS descriptor in the GDT. 16 bytes.
+-struct segment_descriptor_64 {
+-	struct segment_descriptor s;
+-	u32 base_higher;
+-	u32 pad_zero;
+-};
+-
+-#endif
+-
+-static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
+-			   unsigned long arg);
+-
+-unsigned long segment_base(u16 selector)
+-{
+-	struct descriptor_table gdt;
+-	struct segment_descriptor *d;
+-	unsigned long table_base;
+-	typedef unsigned long ul;
+-	unsigned long v;
+-
+-	if (selector == 0)
+-		return 0;
+-
+-	asm ("sgdt %0" : "=m"(gdt));
+-	table_base = gdt.base;
+-
+-	if (selector & 4) {           /* from ldt */
+-		u16 ldt_selector;
+-
+-		asm ("sldt %0" : "=g"(ldt_selector));
+-		table_base = segment_base(ldt_selector);
 -	}
-+	dummy = bus_for_each_drv(&i2c_bus_type, NULL, adap,
-+				 i2c_do_add_adapter);
- 
- out_unlock:
--	mutex_unlock(&core_lists);
-+	mutex_unlock(&core_lock);
- 	return res;
- 
- out_list:
--	list_del(&adap->list);
- 	idr_remove(&i2c_adapter_idr, adap->nr);
- 	goto out_unlock;
- }
-@@ -394,11 +462,11 @@ retry:
- 	if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
- 		return -ENOMEM;
- 
--	mutex_lock(&core_lists);
-+	mutex_lock(&core_lock);
- 	/* "above" here means "above or equal to", sigh */
- 	res = idr_get_new_above(&i2c_adapter_idr, adapter,
- 				__i2c_first_dynamic_bus_num, &id);
--	mutex_unlock(&core_lists);
-+	mutex_unlock(&core_lock);
- 
- 	if (res < 0) {
- 		if (res == -EAGAIN)
-@@ -443,7 +511,7 @@ retry:
- 	if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
- 		return -ENOMEM;
- 
--	mutex_lock(&core_lists);
-+	mutex_lock(&core_lock);
- 	/* "above" here means "above or equal to", sigh;
- 	 * we need the "equal to" result to force the result
- 	 */
-@@ -452,7 +520,7 @@ retry:
- 		status = -EBUSY;
- 		idr_remove(&i2c_adapter_idr, id);
- 	}
--	mutex_unlock(&core_lists);
-+	mutex_unlock(&core_lock);
- 	if (status == -EAGAIN)
- 		goto retry;
- 
-@@ -462,6 +530,21 @@ retry:
- }
- EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
- 
-+static int i2c_do_del_adapter(struct device_driver *d, void *data)
-+{
-+	struct i2c_driver *driver = to_i2c_driver(d);
-+	struct i2c_adapter *adapter = data;
-+	int res;
-+
-+	if (!driver->detach_adapter)
-+		return 0;
-+	res = driver->detach_adapter(adapter);
-+	if (res)
-+		dev_err(&adapter->dev, "detach_adapter failed (%d) "
-+			"for driver [%s]\n", res, driver->driver.name);
-+	return res;
-+}
-+
- /**
-  * i2c_del_adapter - unregister I2C adapter
-  * @adap: the adapter being unregistered
-@@ -473,35 +556,24 @@ EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
- int i2c_del_adapter(struct i2c_adapter *adap)
- {
- 	struct list_head  *item, *_n;
--	struct i2c_adapter *adap_from_list;
--	struct i2c_driver *driver;
- 	struct i2c_client *client;
- 	int res = 0;
- 
--	mutex_lock(&core_lists);
-+	mutex_lock(&core_lock);
- 
- 	/* First make sure that this adapter was ever added */
--	list_for_each_entry(adap_from_list, &adapters, list) {
--		if (adap_from_list == adap)
--			break;
+-	d = (struct segment_descriptor *)(table_base + (selector & ~7));
+-	v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
+-#ifdef CONFIG_X86_64
+-	if (d->system == 0
+-	    && (d->type == 2 || d->type == 9 || d->type == 11))
+-		v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
+-#endif
+-	return v;
+-}
+-EXPORT_SYMBOL_GPL(segment_base);
+-
+-static inline int valid_vcpu(int n)
+-{
+-	return likely(n >= 0 && n < KVM_MAX_VCPUS);
+-}
+-
+-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+-{
+-	if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
+-		return;
+-
+-	vcpu->guest_fpu_loaded = 1;
+-	fx_save(&vcpu->host_fx_image);
+-	fx_restore(&vcpu->guest_fx_image);
+-}
+-EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
+-
+-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+-{
+-	if (!vcpu->guest_fpu_loaded)
+-		return;
+-
+-	vcpu->guest_fpu_loaded = 0;
+-	fx_save(&vcpu->guest_fx_image);
+-	fx_restore(&vcpu->host_fx_image);
+-}
+-EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
+-
+-/*
+- * Switches to specified vcpu, until a matching vcpu_put()
+- */
+-static void vcpu_load(struct kvm_vcpu *vcpu)
+-{
+-	int cpu;
+-
+-	mutex_lock(&vcpu->mutex);
+-	cpu = get_cpu();
+-	preempt_notifier_register(&vcpu->preempt_notifier);
+-	kvm_x86_ops->vcpu_load(vcpu, cpu);
+-	put_cpu();
+-}
+-
+-static void vcpu_put(struct kvm_vcpu *vcpu)
+-{
+-	preempt_disable();
+-	kvm_x86_ops->vcpu_put(vcpu);
+-	preempt_notifier_unregister(&vcpu->preempt_notifier);
+-	preempt_enable();
+-	mutex_unlock(&vcpu->mutex);
+-}
+-
+-static void ack_flush(void *_completed)
+-{
+-}
+-
+-void kvm_flush_remote_tlbs(struct kvm *kvm)
+-{
+-	int i, cpu;
+-	cpumask_t cpus;
+-	struct kvm_vcpu *vcpu;
+-
+-	cpus_clear(cpus);
+-	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+-		vcpu = kvm->vcpus[i];
+-		if (!vcpu)
+-			continue;
+-		if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
+-			continue;
+-		cpu = vcpu->cpu;
+-		if (cpu != -1 && cpu != raw_smp_processor_id())
+-			cpu_set(cpu, cpus);
 -	}
--	if (adap_from_list != adap) {
-+	if (idr_find(&i2c_adapter_idr, adap->nr) != adap) {
- 		pr_debug("i2c-core: attempting to delete unregistered "
- 			 "adapter [%s]\n", adap->name);
- 		res = -EINVAL;
- 		goto out_unlock;
- 	}
- 
--	list_for_each(item,&drivers) {
--		driver = list_entry(item, struct i2c_driver, list);
--		if (driver->detach_adapter)
--			if ((res = driver->detach_adapter(adap))) {
--				dev_err(&adap->dev, "detach_adapter failed "
--					"for driver [%s]\n",
--					driver->driver.name);
--				goto out_unlock;
--			}
+-	smp_call_function_mask(cpus, ack_flush, NULL, 1);
+-}
+-
+-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
+-{
+-	struct page *page;
+-	int r;
+-
+-	mutex_init(&vcpu->mutex);
+-	vcpu->cpu = -1;
+-	vcpu->mmu.root_hpa = INVALID_PAGE;
+-	vcpu->kvm = kvm;
+-	vcpu->vcpu_id = id;
+-	if (!irqchip_in_kernel(kvm) || id == 0)
+-		vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+-	else
+-		vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
+-	init_waitqueue_head(&vcpu->wq);
+-
+-	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+-	if (!page) {
+-		r = -ENOMEM;
+-		goto fail;
 -	}
-+	/* Tell drivers about this removal */
-+	res = bus_for_each_drv(&i2c_bus_type, NULL, adap,
-+			       i2c_do_del_adapter);
-+	if (res)
-+		goto out_unlock;
- 
- 	/* detach any active clients. This must be done first, because
- 	 * it can fail; in which case we give up. */
-@@ -529,7 +601,6 @@ int i2c_del_adapter(struct i2c_adapter *adap)
- 	/* clean up the sysfs representation */
- 	init_completion(&adap->dev_released);
- 	device_unregister(&adap->dev);
--	list_del(&adap->list);
- 
- 	/* wait for sysfs to drop all references */
- 	wait_for_completion(&adap->dev_released);
-@@ -540,7 +611,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
- 	dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
- 
-  out_unlock:
--	mutex_unlock(&core_lists);
-+	mutex_unlock(&core_lock);
- 	return res;
- }
- EXPORT_SYMBOL(i2c_del_adapter);
-@@ -583,21 +654,23 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
- 	if (res)
- 		return res;
- 
--	mutex_lock(&core_lists);
-+	mutex_lock(&core_lock);
- 
--	list_add_tail(&driver->list,&drivers);
- 	pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
- 
- 	/* legacy drivers scan i2c busses directly */
- 	if (driver->attach_adapter) {
- 		struct i2c_adapter *adapter;
- 
--		list_for_each_entry(adapter, &adapters, list) {
-+		down(&i2c_adapter_class.sem);
-+		list_for_each_entry(adapter, &i2c_adapter_class.devices,
-+				    dev.node) {
- 			driver->attach_adapter(adapter);
- 		}
-+		up(&i2c_adapter_class.sem);
- 	}
- 
--	mutex_unlock(&core_lists);
-+	mutex_unlock(&core_lock);
- 	return 0;
- }
- EXPORT_SYMBOL(i2c_register_driver);
-@@ -609,11 +682,11 @@ EXPORT_SYMBOL(i2c_register_driver);
-  */
- void i2c_del_driver(struct i2c_driver *driver)
- {
--	struct list_head   *item1, *item2, *_n;
-+	struct list_head   *item2, *_n;
- 	struct i2c_client  *client;
- 	struct i2c_adapter *adap;
- 
--	mutex_lock(&core_lists);
-+	mutex_lock(&core_lock);
- 
- 	/* new-style driver? */
- 	if (is_newstyle_driver(driver))
-@@ -623,8 +696,8 @@ void i2c_del_driver(struct i2c_driver *driver)
- 	 * attached. If so, detach them to be able to kill the driver
- 	 * afterwards.
- 	 */
--	list_for_each(item1,&adapters) {
--		adap = list_entry(item1, struct i2c_adapter, list);
-+	down(&i2c_adapter_class.sem);
-+	list_for_each_entry(adap, &i2c_adapter_class.devices, dev.node) {
- 		if (driver->detach_adapter) {
- 			if (driver->detach_adapter(adap)) {
- 				dev_err(&adap->dev, "detach_adapter failed "
-@@ -648,40 +721,31 @@ void i2c_del_driver(struct i2c_driver *driver)
- 			}
- 		}
- 	}
-+	up(&i2c_adapter_class.sem);
- 
-  unregister:
- 	driver_unregister(&driver->driver);
--	list_del(&driver->list);
- 	pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name);
- 
--	mutex_unlock(&core_lists);
-+	mutex_unlock(&core_lock);
- }
- EXPORT_SYMBOL(i2c_del_driver);
- 
- /* ------------------------------------------------------------------------- */
- 
--static int __i2c_check_addr(struct i2c_adapter *adapter, unsigned int addr)
-+static int __i2c_check_addr(struct device *dev, void *addrp)
- {
--	struct list_head   *item;
--	struct i2c_client  *client;
-+	struct i2c_client	*client = i2c_verify_client(dev);
-+	int			addr = *(int *)addrp;
- 
--	list_for_each(item,&adapter->clients) {
--		client = list_entry(item, struct i2c_client, list);
--		if (client->addr == addr)
--			return -EBUSY;
+-	vcpu->run = page_address(page);
+-
+-	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+-	if (!page) {
+-		r = -ENOMEM;
+-		goto fail_free_run;
 -	}
-+	if (client && client->addr == addr)
-+		return -EBUSY;
- 	return 0;
- }
- 
- static int i2c_check_addr(struct i2c_adapter *adapter, int addr)
- {
--	int rval;
+-	vcpu->pio_data = page_address(page);
 -
--	mutex_lock(&adapter->clist_lock);
--	rval = __i2c_check_addr(adapter, addr);
--	mutex_unlock(&adapter->clist_lock);
+-	r = kvm_mmu_create(vcpu);
+-	if (r < 0)
+-		goto fail_free_pio_data;
 -
--	return rval;
-+	return device_for_each_child(&adapter->dev, &addr, __i2c_check_addr);
- }
- 
- int i2c_attach_client(struct i2c_client *client)
-@@ -689,15 +753,6 @@ int i2c_attach_client(struct i2c_client *client)
- 	struct i2c_adapter *adapter = client->adapter;
- 	int res = 0;
- 
--	mutex_lock(&adapter->clist_lock);
--	if (__i2c_check_addr(client->adapter, client->addr)) {
--		res = -EBUSY;
--		goto out_unlock;
--	}
--	list_add_tail(&client->list,&adapter->clients);
+-	return 0;
 -
--	client->usage_count = 0;
+-fail_free_pio_data:
+-	free_page((unsigned long)vcpu->pio_data);
+-fail_free_run:
+-	free_page((unsigned long)vcpu->run);
+-fail:
+-	return -ENOMEM;
+-}
+-EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 -
- 	client->dev.parent = &client->adapter->dev;
- 	client->dev.bus = &i2c_bus_type;
- 
-@@ -712,13 +767,17 @@ int i2c_attach_client(struct i2c_client *client)
- 
- 	snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id),
- 		"%d-%04x", i2c_adapter_id(adapter), client->addr);
--	dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
--		client->name, client->dev.bus_id);
- 	res = device_register(&client->dev);
- 	if (res)
--		goto out_list;
-+		goto out_err;
-+
-+	mutex_lock(&adapter->clist_lock);
-+	list_add_tail(&client->list, &adapter->clients);
- 	mutex_unlock(&adapter->clist_lock);
- 
-+	dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
-+		client->name, client->dev.bus_id);
-+
- 	if (adapter->client_register)  {
- 		if (adapter->client_register(client)) {
- 			dev_dbg(&adapter->dev, "client_register "
-@@ -729,12 +788,9 @@ int i2c_attach_client(struct i2c_client *client)
- 
- 	return 0;
- 
--out_list:
--	list_del(&client->list);
-+out_err:
- 	dev_err(&adapter->dev, "Failed to attach i2c client %s at 0x%02x "
- 		"(%d)\n", client->name, client->addr, res);
--out_unlock:
--	mutex_unlock(&adapter->clist_lock);
- 	return res;
- }
- EXPORT_SYMBOL(i2c_attach_client);
-@@ -744,12 +800,6 @@ int i2c_detach_client(struct i2c_client *client)
- 	struct i2c_adapter *adapter = client->adapter;
- 	int res = 0;
- 
--	if (client->usage_count > 0) {
--		dev_warn(&client->dev, "Client [%s] still busy, "
--			 "can't detach\n", client->name);
--		return -EBUSY;
--	}
+-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
+-{
+-	kvm_mmu_destroy(vcpu);
+-	if (vcpu->apic)
+-		hrtimer_cancel(&vcpu->apic->timer.dev);
+-	kvm_free_apic(vcpu->apic);
+-	free_page((unsigned long)vcpu->pio_data);
+-	free_page((unsigned long)vcpu->run);
+-}
+-EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
 -
- 	if (adapter->client_unregister)  {
- 		res = adapter->client_unregister(client);
- 		if (res) {
-@@ -762,9 +812,10 @@ int i2c_detach_client(struct i2c_client *client)
- 
- 	mutex_lock(&adapter->clist_lock);
- 	list_del(&client->list);
-+	mutex_unlock(&adapter->clist_lock);
-+
- 	init_completion(&client->released);
- 	device_unregister(&client->dev);
--	mutex_unlock(&adapter->clist_lock);
- 	wait_for_completion(&client->released);
- 
-  out:
-@@ -772,72 +823,58 @@ int i2c_detach_client(struct i2c_client *client)
- }
- EXPORT_SYMBOL(i2c_detach_client);
- 
--static int i2c_inc_use_client(struct i2c_client *client)
-+/**
-+ * i2c_use_client - increments the reference count of the i2c client structure
-+ * @client: the client being referenced
-+ *
-+ * Each live reference to a client should be refcounted. The driver model does
-+ * that automatically as part of driver binding, so that most drivers don't
-+ * need to do this explicitly: they hold a reference until they're unbound
-+ * from the device.
-+ *
-+ * A pointer to the client with the incremented reference counter is returned.
-+ */
-+struct i2c_client *i2c_use_client(struct i2c_client *client)
- {
+-static struct kvm *kvm_create_vm(void)
+-{
+-	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
 -
--	if (!try_module_get(client->driver->driver.owner))
--		return -ENODEV;
--	if (!try_module_get(client->adapter->owner)) {
--		module_put(client->driver->driver.owner);
--		return -ENODEV;
+-	if (!kvm)
+-		return ERR_PTR(-ENOMEM);
+-
+-	kvm_io_bus_init(&kvm->pio_bus);
+-	mutex_init(&kvm->lock);
+-	INIT_LIST_HEAD(&kvm->active_mmu_pages);
+-	kvm_io_bus_init(&kvm->mmio_bus);
+-	spin_lock(&kvm_lock);
+-	list_add(&kvm->vm_list, &vm_list);
+-	spin_unlock(&kvm_lock);
+-	return kvm;
+-}
+-
+-/*
+- * Free any memory in @free but not in @dont.
+- */
+-static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
+-				  struct kvm_memory_slot *dont)
+-{
+-	int i;
+-
+-	if (!dont || free->phys_mem != dont->phys_mem)
+-		if (free->phys_mem) {
+-			for (i = 0; i < free->npages; ++i)
+-				if (free->phys_mem[i])
+-					__free_page(free->phys_mem[i]);
+-			vfree(free->phys_mem);
+-		}
+-
+-	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
+-		vfree(free->dirty_bitmap);
+-
+-	free->phys_mem = NULL;
+-	free->npages = 0;
+-	free->dirty_bitmap = NULL;
+-}
+-
+-static void kvm_free_physmem(struct kvm *kvm)
+-{
+-	int i;
+-
+-	for (i = 0; i < kvm->nmemslots; ++i)
+-		kvm_free_physmem_slot(&kvm->memslots[i], NULL);
+-}
+-
+-static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
+-		if (vcpu->pio.guest_pages[i]) {
+-			__free_page(vcpu->pio.guest_pages[i]);
+-			vcpu->pio.guest_pages[i] = NULL;
+-		}
+-}
+-
+-static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
+-{
+-	vcpu_load(vcpu);
+-	kvm_mmu_unload(vcpu);
+-	vcpu_put(vcpu);
+-}
+-
+-static void kvm_free_vcpus(struct kvm *kvm)
+-{
+-	unsigned int i;
+-
+-	/*
+-	 * Unpin any mmu pages first.
+-	 */
+-	for (i = 0; i < KVM_MAX_VCPUS; ++i)
+-		if (kvm->vcpus[i])
+-			kvm_unload_vcpu_mmu(kvm->vcpus[i]);
+-	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+-		if (kvm->vcpus[i]) {
+-			kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
+-			kvm->vcpus[i] = NULL;
+-		}
 -	}
 -
+-}
+-
+-static void kvm_destroy_vm(struct kvm *kvm)
+-{
+-	spin_lock(&kvm_lock);
+-	list_del(&kvm->vm_list);
+-	spin_unlock(&kvm_lock);
+-	kvm_io_bus_destroy(&kvm->pio_bus);
+-	kvm_io_bus_destroy(&kvm->mmio_bus);
+-	kfree(kvm->vpic);
+-	kfree(kvm->vioapic);
+-	kvm_free_vcpus(kvm);
+-	kvm_free_physmem(kvm);
+-	kfree(kvm);
+-}
+-
+-static int kvm_vm_release(struct inode *inode, struct file *filp)
+-{
+-	struct kvm *kvm = filp->private_data;
+-
+-	kvm_destroy_vm(kvm);
 -	return 0;
-+	get_device(&client->dev);
-+	return client;
- }
-+EXPORT_SYMBOL(i2c_use_client);
- 
--static void i2c_dec_use_client(struct i2c_client *client)
-+/**
-+ * i2c_release_client - release a use of the i2c client structure
-+ * @client: the client being no longer referenced
-+ *
-+ * Must be called when a user of a client is finished with it.
-+ */
-+void i2c_release_client(struct i2c_client *client)
- {
--	module_put(client->driver->driver.owner);
--	module_put(client->adapter->owner);
-+	put_device(&client->dev);
- }
-+EXPORT_SYMBOL(i2c_release_client);
- 
--int i2c_use_client(struct i2c_client *client)
+-}
+-
+-static void inject_gp(struct kvm_vcpu *vcpu)
 -{
+-	kvm_x86_ops->inject_gp(vcpu, 0);
+-}
+-
+-/*
+- * Load the pae pdptrs.  Return true is they are all valid.
+- */
+-static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
+-{
+-	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
+-	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
+-	int i;
+-	u64 *pdpt;
 -	int ret;
+-	struct page *page;
+-	u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
 -
--	ret = i2c_inc_use_client(client);
--	if (ret)
--		return ret;
+-	mutex_lock(&vcpu->kvm->lock);
+-	page = gfn_to_page(vcpu->kvm, pdpt_gfn);
+-	if (!page) {
+-		ret = 0;
+-		goto out;
+-	}
 -
--	client->usage_count++;
+-	pdpt = kmap_atomic(page, KM_USER0);
+-	memcpy(pdpte, pdpt+offset, sizeof(pdpte));
+-	kunmap_atomic(pdpt, KM_USER0);
 -
--	return 0;
+-	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
+-		if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
+-			ret = 0;
+-			goto out;
+-		}
+-	}
+-	ret = 1;
+-
+-	memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
+-out:
+-	mutex_unlock(&vcpu->kvm->lock);
+-
+-	return ret;
 -}
--EXPORT_SYMBOL(i2c_use_client);
-+struct i2c_cmd_arg {
-+	unsigned	cmd;
-+	void		*arg;
-+};
- 
--int i2c_release_client(struct i2c_client *client)
-+static int i2c_cmd(struct device *dev, void *_arg)
- {
--	if (!client->usage_count) {
--		pr_debug("i2c-core: %s used one too many times\n",
--			 __FUNCTION__);
--		return -EPERM;
+-
+-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+-{
+-	if (cr0 & CR0_RESERVED_BITS) {
+-		printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
+-		       cr0, vcpu->cr0);
+-		inject_gp(vcpu);
+-		return;
 -	}
 -
--	client->usage_count--;
--	i2c_dec_use_client(client);
-+	struct i2c_client	*client = i2c_verify_client(dev);
-+	struct i2c_cmd_arg	*arg = _arg;
- 
-+	if (client && client->driver && client->driver->command)
-+		client->driver->command(client, arg->cmd, arg->arg);
- 	return 0;
- }
--EXPORT_SYMBOL(i2c_release_client);
- 
- void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg)
- {
--	struct list_head  *item;
--	struct i2c_client *client;
-+	struct i2c_cmd_arg	cmd_arg;
- 
--	mutex_lock(&adap->clist_lock);
--	list_for_each(item,&adap->clients) {
--		client = list_entry(item, struct i2c_client, list);
--		if (!try_module_get(client->driver->driver.owner))
--			continue;
--		if (NULL != client->driver->command) {
--			mutex_unlock(&adap->clist_lock);
--			client->driver->command(client,cmd,arg);
--			mutex_lock(&adap->clist_lock);
+-	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
+-		printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
+-		inject_gp(vcpu);
+-		return;
+-	}
+-
+-	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
+-		printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
+-		       "and a clear PE flag\n");
+-		inject_gp(vcpu);
+-		return;
+-	}
+-
+-	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
+-#ifdef CONFIG_X86_64
+-		if ((vcpu->shadow_efer & EFER_LME)) {
+-			int cs_db, cs_l;
+-
+-			if (!is_pae(vcpu)) {
+-				printk(KERN_DEBUG "set_cr0: #GP, start paging "
+-				       "in long mode while PAE is disabled\n");
+-				inject_gp(vcpu);
+-				return;
+-			}
+-			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+-			if (cs_l) {
+-				printk(KERN_DEBUG "set_cr0: #GP, start paging "
+-				       "in long mode while CS.L == 1\n");
+-				inject_gp(vcpu);
+-				return;
+-
+-			}
+-		} else
+-#endif
+-		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
+-			printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
+-			       "reserved bits\n");
+-			inject_gp(vcpu);
+-			return;
 -		}
--		module_put(client->driver->driver.owner);
--       }
--       mutex_unlock(&adap->clist_lock);
-+	cmd_arg.cmd = cmd;
-+	cmd_arg.arg = arg;
-+	device_for_each_child(&adap->dev, &cmd_arg, i2c_cmd);
- }
- EXPORT_SYMBOL(i2c_clients_command);
- 
-@@ -848,11 +885,24 @@ static int __init i2c_init(void)
- 	retval = bus_register(&i2c_bus_type);
- 	if (retval)
- 		return retval;
--	return class_register(&i2c_adapter_class);
-+	retval = class_register(&i2c_adapter_class);
-+	if (retval)
-+		goto bus_err;
-+	retval = i2c_add_driver(&dummy_driver);
-+	if (retval)
-+		goto class_err;
-+	return 0;
-+
-+class_err:
-+	class_unregister(&i2c_adapter_class);
-+bus_err:
-+	bus_unregister(&i2c_bus_type);
-+	return retval;
- }
- 
- static void __exit i2c_exit(void)
- {
-+	i2c_del_driver(&dummy_driver);
- 	class_unregister(&i2c_adapter_class);
- 	bus_unregister(&i2c_bus_type);
- }
-@@ -879,7 +929,15 @@ int i2c_transfer(struct i2c_adapter * adap, struct i2c_msg *msgs, int num)
- 		}
- #endif
- 
--		mutex_lock_nested(&adap->bus_lock, adap->level);
-+		if (in_atomic() || irqs_disabled()) {
-+			ret = mutex_trylock(&adap->bus_lock);
-+			if (!ret)
-+				/* I2C activity is ongoing. */
-+				return -EAGAIN;
-+		} else {
-+			mutex_lock_nested(&adap->bus_lock, adap->level);
-+		}
-+
- 		ret = adap->algo->master_xfer(adap,msgs,num);
- 		mutex_unlock(&adap->bus_lock);
- 
-@@ -978,7 +1036,7 @@ static int i2c_probe_address(struct i2c_adapter *adapter, int addr, int kind,
- }
- 
- int i2c_probe(struct i2c_adapter *adapter,
--	      struct i2c_client_address_data *address_data,
-+	      const struct i2c_client_address_data *address_data,
- 	      int (*found_proc) (struct i2c_adapter *, int, int))
- {
- 	int i, err;
-@@ -987,7 +1045,7 @@ int i2c_probe(struct i2c_adapter *adapter,
- 	/* Force entries are done first, and are not affected by ignore
- 	   entries */
- 	if (address_data->forces) {
--		unsigned short **forces = address_data->forces;
-+		const unsigned short * const *forces = address_data->forces;
- 		int kind;
- 
- 		for (kind = 0; forces[kind]; kind++) {
-@@ -1085,7 +1143,6 @@ i2c_new_probed_device(struct i2c_adapter *adap,
- 		return NULL;
- 	}
- 
--	mutex_lock(&adap->clist_lock);
- 	for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
- 		/* Check address validity */
- 		if (addr_list[i] < 0x03 || addr_list[i] > 0x77) {
-@@ -1095,7 +1152,7 @@ i2c_new_probed_device(struct i2c_adapter *adap,
- 		}
- 
- 		/* Check address availability */
--		if (__i2c_check_addr(adap, addr_list[i])) {
-+		if (i2c_check_addr(adap, addr_list[i])) {
- 			dev_dbg(&adap->dev, "Address 0x%02x already in "
- 				"use, not probing\n", addr_list[i]);
- 			continue;
-@@ -1123,7 +1180,6 @@ i2c_new_probed_device(struct i2c_adapter *adap,
- 				break;
- 		}
- 	}
--	mutex_unlock(&adap->clist_lock);
- 
- 	if (addr_list[i] == I2C_CLIENT_END) {
- 		dev_dbg(&adap->dev, "Probing failed, no device found\n");
-@@ -1139,12 +1195,12 @@ struct i2c_adapter* i2c_get_adapter(int id)
- {
- 	struct i2c_adapter *adapter;
- 
--	mutex_lock(&core_lists);
-+	mutex_lock(&core_lock);
- 	adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id);
- 	if (adapter && !try_module_get(adapter->owner))
- 		adapter = NULL;
- 
--	mutex_unlock(&core_lists);
-+	mutex_unlock(&core_lock);
- 	return adapter;
- }
- EXPORT_SYMBOL(i2c_get_adapter);
-diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
-index df540d5..393e679 100644
---- a/drivers/i2c/i2c-dev.c
-+++ b/drivers/i2c/i2c-dev.c
-@@ -182,27 +182,22 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c
- 	return ret;
- }
- 
-+static int i2cdev_check(struct device *dev, void *addrp)
-+{
-+	struct i2c_client *client = i2c_verify_client(dev);
-+
-+	if (!client || client->addr != *(unsigned int *)addrp)
-+		return 0;
-+
-+	return dev->driver ? -EBUSY : 0;
-+}
-+
- /* This address checking function differs from the one in i2c-core
-    in that it considers an address with a registered device, but no
--   bound driver, as NOT busy. */
-+   driver bound to it, as NOT busy. */
- static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
- {
--	struct list_head *item;
--	struct i2c_client *client;
--	int res = 0;
 -
--	mutex_lock(&adapter->clist_lock);
--	list_for_each(item, &adapter->clients) {
--		client = list_entry(item, struct i2c_client, list);
--		if (client->addr == addr) {
--			if (client->driver)
--				res = -EBUSY;
--			break;
+-	}
+-
+-	kvm_x86_ops->set_cr0(vcpu, cr0);
+-	vcpu->cr0 = cr0;
+-
+-	mutex_lock(&vcpu->kvm->lock);
+-	kvm_mmu_reset_context(vcpu);
+-	mutex_unlock(&vcpu->kvm->lock);
+-	return;
+-}
+-EXPORT_SYMBOL_GPL(set_cr0);
+-
+-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
+-{
+-	set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
+-}
+-EXPORT_SYMBOL_GPL(lmsw);
+-
+-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+-{
+-	if (cr4 & CR4_RESERVED_BITS) {
+-		printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
+-		inject_gp(vcpu);
+-		return;
+-	}
+-
+-	if (is_long_mode(vcpu)) {
+-		if (!(cr4 & X86_CR4_PAE)) {
+-			printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
+-			       "in long mode\n");
+-			inject_gp(vcpu);
+-			return;
 -		}
+-	} else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
+-		   && !load_pdptrs(vcpu, vcpu->cr3)) {
+-		printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
+-		inject_gp(vcpu);
+-		return;
 -	}
--	mutex_unlock(&adapter->clist_lock);
 -
--	return res;
-+	return device_for_each_child(&adapter->dev, &addr, i2cdev_check);
- }
- 
- static int i2cdev_ioctl(struct inode *inode, struct file *file,
-diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
-index fb06555..64df55e 100644
---- a/drivers/ide/Kconfig
-+++ b/drivers/ide/Kconfig
-@@ -325,7 +325,7 @@ config BLK_DEV_PLATFORM
- 	  If unsure, say N.
- 
- config BLK_DEV_CMD640
--	bool "CMD640 chipset bugfix/support"
-+	tristate "CMD640 chipset bugfix/support"
- 	depends on X86
- 	---help---
- 	  The CMD-Technologies CMD640 IDE chip is used on many common 486 and
-@@ -359,9 +359,8 @@ config BLK_DEV_CMD640_ENHANCED
- 	  Otherwise say N.
- 
- config BLK_DEV_IDEPNP
--	bool "PNP EIDE support"
-+	tristate "PNP EIDE support"
- 	depends on PNP
--	select IDE_GENERIC
- 	help
- 	  If you have a PnP (Plug and Play) compatible EIDE card and
- 	  would like the kernel to automatically detect and activate
-@@ -374,19 +373,20 @@ comment "PCI IDE chipsets support"
- config BLK_DEV_IDEPCI
- 	bool
- 
--config IDEPCI_SHARE_IRQ
--	bool "Sharing PCI IDE interrupts support"
--	depends on BLK_DEV_IDEPCI
-+config IDEPCI_PCIBUS_ORDER
-+	bool "Probe IDE PCI devices in the PCI bus order (DEPRECATED)"
-+	depends on BLK_DEV_IDE=y && BLK_DEV_IDEPCI
-+	default y
- 	help
--	  Some ATA/IDE chipsets have hardware support which allows for
--	  sharing a single IRQ with other cards. To enable support for
--	  this in the ATA/IDE driver, say Y here.
-+	  Probe IDE PCI devices in the order in which they appear on the
-+	  PCI bus (i.e. 00:1f.1 PCI device before 02:01.0 PCI device)
-+	  instead of the order in which IDE PCI host drivers are loaded.
- 
--	  It is safe to say Y to this question, in most cases.
--	  If unsure, say N.
-+	  Please note that this method of assuring stable naming of
-+	  IDE devices is unreliable and use other means for achieving
-+	  it (i.e. udev).
- 
--config IDEPCI_PCIBUS_ORDER
--	def_bool BLK_DEV_IDE=y && BLK_DEV_IDEPCI
-+	  If in doubt, say N.
- 
- # TODO: split it on per host driver config options (or module parameters)
- config BLK_DEV_OFFBOARD
-@@ -707,7 +707,6 @@ config BLK_DEV_SVWKS
- config BLK_DEV_SGIIOC4
- 	tristate "Silicon Graphics IOC4 chipset ATA/ATAPI support"
- 	depends on (IA64_SGI_SN2 || IA64_GENERIC) && SGI_IOC4
--	select IDEPCI_SHARE_IRQ
- 	select BLK_DEV_IDEDMA_PCI
- 	help
- 	  This driver adds PIO & MultiMode DMA-2 support for the SGI IOC4
-@@ -801,7 +800,7 @@ config BLK_DEV_CELLEB
- endif
- 
- config BLK_DEV_IDE_PMAC
--	bool "Builtin PowerMac IDE support"
-+	tristate "Builtin PowerMac IDE support"
- 	depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y
- 	help
- 	  This driver provides support for the built-in IDE controller on
-@@ -855,8 +854,9 @@ config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
-        depends on BLK_DEV_IDE_AU1XXX
- 
- config IDE_ARM
--	def_bool ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
--	select IDE_GENERIC
-+	tristate "ARM IDE support"
-+	depends on ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
-+	default y
- 
- config BLK_DEV_IDE_ICSIDE
- 	tristate "ICS IDE interface support"
-@@ -888,10 +888,9 @@ config BLK_DEV_IDE_BAST
- 	  Simtec BAST or the Thorcom VR1000
- 
- config ETRAX_IDE
--	bool "ETRAX IDE support"
-+	tristate "ETRAX IDE support"
- 	depends on CRIS && BROKEN
- 	select BLK_DEV_IDEDMA
--	select IDE_GENERIC
- 	help
- 	  Enables the ETRAX IDE driver.
- 
-@@ -923,17 +922,15 @@ config ETRAX_IDE_G27_RESET
- endchoice
- 
- config IDE_H8300
--	bool "H8300 IDE support"
-+	tristate "H8300 IDE support"
- 	depends on H8300
--	select IDE_GENERIC
- 	default y
- 	help
- 	  Enables the H8300 IDE driver.
- 
- config BLK_DEV_GAYLE
--	bool "Amiga Gayle IDE interface support"
-+	tristate "Amiga Gayle IDE interface support"
- 	depends on AMIGA
--	select IDE_GENERIC
- 	help
- 	  This is the IDE driver for the Amiga Gayle IDE interface. It supports
- 	  both the `A1200 style' and `A4000 style' of the Gayle IDE interface,
-@@ -963,9 +960,8 @@ config BLK_DEV_IDEDOUBLER
- 	  runtime using the "ide=doubler" kernel boot parameter.
- 
- config BLK_DEV_BUDDHA
--	bool "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
-+	tristate "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
- 	depends on ZORRO && EXPERIMENTAL
--	select IDE_GENERIC
- 	help
- 	  This is the IDE driver for the IDE interfaces on the Buddha, 
- 	  Catweasel and X-Surf expansion boards.  It supports up to two interfaces 
-@@ -976,9 +972,8 @@ config BLK_DEV_BUDDHA
- 	  to one of its IDE interfaces.
- 
- config BLK_DEV_FALCON_IDE
--	bool "Falcon IDE interface support"
-+	tristate "Falcon IDE interface support"
- 	depends on ATARI
--	select IDE_GENERIC
- 	help
- 	  This is the IDE driver for the builtin IDE interface on the Atari
- 	  Falcon. Say Y if you have a Falcon and want to use IDE devices (hard
-@@ -986,9 +981,8 @@ config BLK_DEV_FALCON_IDE
- 	  interface.
- 
- config BLK_DEV_MAC_IDE
--	bool "Macintosh Quadra/Powerbook IDE interface support"
-+	tristate "Macintosh Quadra/Powerbook IDE interface support"
- 	depends on MAC
--	select IDE_GENERIC
- 	help
- 	  This is the IDE driver for the builtin IDE interface on some m68k
- 	  Macintosh models. It supports both the `Quadra style' (used in
-@@ -1000,18 +994,16 @@ config BLK_DEV_MAC_IDE
- 	  builtin IDE interface.
- 
- config BLK_DEV_Q40IDE
--	bool "Q40/Q60 IDE interface support"
-+	tristate "Q40/Q60 IDE interface support"
- 	depends on Q40
--	select IDE_GENERIC
- 	help
- 	  Enable the on-board IDE controller in the Q40/Q60.  This should
- 	  normally be on; disable it only if you are running a custom hard
- 	  drive subsystem through an expansion card.
- 
- config BLK_DEV_MPC8xx_IDE
--	bool "MPC8xx IDE support"
-+	tristate "MPC8xx IDE support"
- 	depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE
--	select IDE_GENERIC
- 	help
- 	  This option provides support for IDE on Motorola MPC8xx Systems.
- 	  Please see 'Type of MPC8xx IDE interface' for details.
-diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
-index b181fc6..0d2da89 100644
---- a/drivers/ide/Makefile
-+++ b/drivers/ide/Makefile
-@@ -7,41 +7,37 @@
- # Note : at this point, these files are compiled on all systems.
- # In the future, some of these should be built conditionally.
- #
--# First come modules that register themselves with the core
-+# link order is important here
- 
- EXTRA_CFLAGS				+= -Idrivers/ide
- 
--obj-$(CONFIG_BLK_DEV_IDE)		+= pci/
+-	if (cr4 & X86_CR4_VMXE) {
+-		printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
+-		inject_gp(vcpu);
+-		return;
+-	}
+-	kvm_x86_ops->set_cr4(vcpu, cr4);
+-	vcpu->cr4 = cr4;
+-	mutex_lock(&vcpu->kvm->lock);
+-	kvm_mmu_reset_context(vcpu);
+-	mutex_unlock(&vcpu->kvm->lock);
+-}
+-EXPORT_SYMBOL_GPL(set_cr4);
+-
+-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+-{
+-	if (is_long_mode(vcpu)) {
+-		if (cr3 & CR3_L_MODE_RESERVED_BITS) {
+-			printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
+-			inject_gp(vcpu);
+-			return;
+-		}
+-	} else {
+-		if (is_pae(vcpu)) {
+-			if (cr3 & CR3_PAE_RESERVED_BITS) {
+-				printk(KERN_DEBUG
+-				       "set_cr3: #GP, reserved bits\n");
+-				inject_gp(vcpu);
+-				return;
+-			}
+-			if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
+-				printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
+-				       "reserved bits\n");
+-				inject_gp(vcpu);
+-				return;
+-			}
+-		} else {
+-			if (cr3 & CR3_NONPAE_RESERVED_BITS) {
+-				printk(KERN_DEBUG
+-				       "set_cr3: #GP, reserved bits\n");
+-				inject_gp(vcpu);
+-				return;
+-			}
+-		}
+-	}
 -
- ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o
- 
--ide-core-$(CONFIG_BLK_DEV_CMD640)	+= pci/cmd640.o
+-	mutex_lock(&vcpu->kvm->lock);
+-	/*
+-	 * Does the new cr3 value map to physical memory? (Note, we
+-	 * catch an invalid cr3 even in real-mode, because it would
+-	 * cause trouble later on when we turn on paging anyway.)
+-	 *
+-	 * A real CPU would silently accept an invalid cr3 and would
+-	 * attempt to use it - with largely undefined (and often hard
+-	 * to debug) behavior on the guest side.
+-	 */
+-	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
+-		inject_gp(vcpu);
+-	else {
+-		vcpu->cr3 = cr3;
+-		vcpu->mmu.new_cr3(vcpu);
+-	}
+-	mutex_unlock(&vcpu->kvm->lock);
+-}
+-EXPORT_SYMBOL_GPL(set_cr3);
 -
--# Core IDE code - must come before legacy
-+# core IDE code
- ide-core-$(CONFIG_BLK_DEV_IDEPCI)	+= setup-pci.o
- ide-core-$(CONFIG_BLK_DEV_IDEDMA)	+= ide-dma.o
- ide-core-$(CONFIG_IDE_PROC_FS)		+= ide-proc.o
--ide-core-$(CONFIG_BLK_DEV_IDEPNP)	+= ide-pnp.o
- ide-core-$(CONFIG_BLK_DEV_IDEACPI)	+= ide-acpi.o
- 
--# built-in only drivers from arm/
--ide-core-$(CONFIG_IDE_ARM)		+= arm/ide_arm.o
-+obj-$(CONFIG_BLK_DEV_IDE)		+= ide-core.o
- 
--# built-in only drivers from legacy/
--ide-core-$(CONFIG_BLK_DEV_BUDDHA)	+= legacy/buddha.o
--ide-core-$(CONFIG_BLK_DEV_FALCON_IDE)	+= legacy/falconide.o
--ide-core-$(CONFIG_BLK_DEV_GAYLE)	+= legacy/gayle.o
--ide-core-$(CONFIG_BLK_DEV_MAC_IDE)	+= legacy/macide.o
--ide-core-$(CONFIG_BLK_DEV_Q40IDE)	+= legacy/q40ide.o
-+ifeq ($(CONFIG_IDE_ARM), y)
-+	ide-arm-core-y += arm/ide_arm.o
-+	obj-y += ide-arm-core.o
-+endif
- 
--# built-in only drivers from ppc/
--ide-core-$(CONFIG_BLK_DEV_MPC8xx_IDE)	+= ppc/mpc8xx.o
--ide-core-$(CONFIG_BLK_DEV_IDE_PMAC)	+= ppc/pmac.o
-+obj-$(CONFIG_BLK_DEV_IDE)		+= legacy/ pci/
- 
--# built-in only drivers from h8300/
--ide-core-$(CONFIG_IDE_H8300)		+= h8300/ide-h8300.o
-+obj-$(CONFIG_IDEPCI_PCIBUS_ORDER)	+= ide-scan-pci.o
- 
--obj-$(CONFIG_BLK_DEV_IDE)		+= ide-core.o
-+ifeq ($(CONFIG_BLK_DEV_CMD640), y)
-+	cmd640-core-y += pci/cmd640.o
-+	obj-y += cmd640-core.o
-+endif
-+
-+obj-$(CONFIG_BLK_DEV_IDE)		+= cris/ ppc/
-+obj-$(CONFIG_BLK_DEV_IDEPNP)		+= ide-pnp.o
-+obj-$(CONFIG_IDE_H8300)			+= h8300/
- obj-$(CONFIG_IDE_GENERIC)		+= ide-generic.o
- 
- obj-$(CONFIG_BLK_DEV_IDEDISK)		+= ide-disk.o
-@@ -49,6 +45,20 @@ obj-$(CONFIG_BLK_DEV_IDECD)		+= ide-cd.o
- obj-$(CONFIG_BLK_DEV_IDETAPE)		+= ide-tape.o
- obj-$(CONFIG_BLK_DEV_IDEFLOPPY)		+= ide-floppy.o
- 
--obj-$(CONFIG_BLK_DEV_IDE)		+= legacy/ arm/ mips/
--obj-$(CONFIG_BLK_DEV_HD)		+= legacy/
--obj-$(CONFIG_ETRAX_IDE)		+= cris/
-+ifeq ($(CONFIG_BLK_DEV_IDECS), y)
-+	ide-cs-core-y += legacy/ide-cs.o
-+	obj-y += ide-cs-core.o
-+endif
-+
-+ifeq ($(CONFIG_BLK_DEV_PLATFORM), y)
-+	ide-platform-core-y += legacy/ide_platform.o
-+	obj-y += ide-platform-core.o
-+endif
-+
-+obj-$(CONFIG_BLK_DEV_IDE)		+= arm/ mips/
-+
-+# old hd driver must be last
-+ifeq ($(CONFIG_BLK_DEV_HD), y)
-+	hd-core-y += legacy/hd.o
-+	obj-y += hd-core.o
-+endif
-diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
-index 6a78f07..5f63ad2 100644
---- a/drivers/ide/arm/Makefile
-+++ b/drivers/ide/arm/Makefile
-@@ -3,4 +3,8 @@ obj-$(CONFIG_BLK_DEV_IDE_ICSIDE)	+= icside.o
- obj-$(CONFIG_BLK_DEV_IDE_RAPIDE)	+= rapide.o
- obj-$(CONFIG_BLK_DEV_IDE_BAST)		+= bast-ide.o
- 
-+ifeq ($(CONFIG_IDE_ARM), m)
-+	obj-m += ide_arm.o
-+endif
-+
- EXTRA_CFLAGS	:= -Idrivers/ide
-diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
-index 48db616..45bf9c8 100644
---- a/drivers/ide/arm/bast-ide.c
-+++ b/drivers/ide/arm/bast-ide.c
-@@ -45,7 +45,7 @@ bastide_register(unsigned int base, unsigned int aux, int irq,
- 	hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
- 	hw.irq = irq;
- 
--	ide_register_hw(&hw, NULL, 0, hwif);
-+	ide_register_hw(&hw, NULL, hwif);
- 
- 	return 0;
- }
-diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
-index 93f71fc..8a5c720 100644
---- a/drivers/ide/arm/icside.c
-+++ b/drivers/ide/arm/icside.c
-@@ -272,8 +272,6 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
- 	case XFER_SW_DMA_0:
- 		cycle_time = 480;
- 		break;
--	default:
+-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+-{
+-	if (cr8 & CR8_RESERVED_BITS) {
+-		printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
+-		inject_gp(vcpu);
 -		return;
- 	}
- 
- 	/*
-@@ -289,26 +287,10 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
- 		ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
- }
- 
--static void icside_dma_host_off(ide_drive_t *drive)
-+static void icside_dma_host_set(ide_drive_t *drive, int on)
- {
- }
- 
--static void icside_dma_off_quietly(ide_drive_t *drive)
+-	}
+-	if (irqchip_in_kernel(vcpu->kvm))
+-		kvm_lapic_set_tpr(vcpu, cr8);
+-	else
+-		vcpu->cr8 = cr8;
+-}
+-EXPORT_SYMBOL_GPL(set_cr8);
+-
+-unsigned long get_cr8(struct kvm_vcpu *vcpu)
 -{
--	drive->using_dma = 0;
+-	if (irqchip_in_kernel(vcpu->kvm))
+-		return kvm_lapic_get_cr8(vcpu);
+-	else
+-		return vcpu->cr8;
 -}
+-EXPORT_SYMBOL_GPL(get_cr8);
 -
--static void icside_dma_host_on(ide_drive_t *drive)
+-u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
 -{
+-	if (irqchip_in_kernel(vcpu->kvm))
+-		return vcpu->apic_base;
+-	else
+-		return vcpu->apic_base;
 -}
+-EXPORT_SYMBOL_GPL(kvm_get_apic_base);
 -
--static int icside_dma_on(ide_drive_t *drive)
+-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
 -{
--	drive->using_dma = 1;
+-	/* TODO: reserve bits check */
+-	if (irqchip_in_kernel(vcpu->kvm))
+-		kvm_lapic_set_base(vcpu, data);
+-	else
+-		vcpu->apic_base = data;
+-}
+-EXPORT_SYMBOL_GPL(kvm_set_apic_base);
+-
+-void fx_init(struct kvm_vcpu *vcpu)
+-{
+-	unsigned after_mxcsr_mask;
+-
+-	/* Initialize guest FPU by resetting ours and saving into guest's */
+-	preempt_disable();
+-	fx_save(&vcpu->host_fx_image);
+-	fpu_init();
+-	fx_save(&vcpu->guest_fx_image);
+-	fx_restore(&vcpu->host_fx_image);
+-	preempt_enable();
+-
+-	vcpu->cr0 |= X86_CR0_ET;
+-	after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
+-	vcpu->guest_fx_image.mxcsr = 0x1f80;
+-	memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
+-	       0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
+-}
+-EXPORT_SYMBOL_GPL(fx_init);
+-
+-/*
+- * Allocate some memory and give it an address in the guest physical address
+- * space.
+- *
+- * Discontiguous memory is allowed, mostly for framebuffers.
+- */
+-static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
+-					  struct kvm_memory_region *mem)
+-{
+-	int r;
+-	gfn_t base_gfn;
+-	unsigned long npages;
+-	unsigned long i;
+-	struct kvm_memory_slot *memslot;
+-	struct kvm_memory_slot old, new;
+-
+-	r = -EINVAL;
+-	/* General sanity checks */
+-	if (mem->memory_size & (PAGE_SIZE - 1))
+-		goto out;
+-	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
+-		goto out;
+-	if (mem->slot >= KVM_MEMORY_SLOTS)
+-		goto out;
+-	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
+-		goto out;
+-
+-	memslot = &kvm->memslots[mem->slot];
+-	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
+-	npages = mem->memory_size >> PAGE_SHIFT;
+-
+-	if (!npages)
+-		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
+-
+-	mutex_lock(&kvm->lock);
+-
+-	new = old = *memslot;
+-
+-	new.base_gfn = base_gfn;
+-	new.npages = npages;
+-	new.flags = mem->flags;
+-
+-	/* Disallow changing a memory slot's size. */
+-	r = -EINVAL;
+-	if (npages && old.npages && npages != old.npages)
+-		goto out_unlock;
+-
+-	/* Check for overlaps */
+-	r = -EEXIST;
+-	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
+-		struct kvm_memory_slot *s = &kvm->memslots[i];
+-
+-		if (s == memslot)
+-			continue;
+-		if (!((base_gfn + npages <= s->base_gfn) ||
+-		      (base_gfn >= s->base_gfn + s->npages)))
+-			goto out_unlock;
+-	}
+-
+-	/* Deallocate if slot is being removed */
+-	if (!npages)
+-		new.phys_mem = NULL;
+-
+-	/* Free page dirty bitmap if unneeded */
+-	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
+-		new.dirty_bitmap = NULL;
+-
+-	r = -ENOMEM;
+-
+-	/* Allocate if a slot is being created */
+-	if (npages && !new.phys_mem) {
+-		new.phys_mem = vmalloc(npages * sizeof(struct page *));
+-
+-		if (!new.phys_mem)
+-			goto out_unlock;
+-
+-		memset(new.phys_mem, 0, npages * sizeof(struct page *));
+-		for (i = 0; i < npages; ++i) {
+-			new.phys_mem[i] = alloc_page(GFP_HIGHUSER
+-						     | __GFP_ZERO);
+-			if (!new.phys_mem[i])
+-				goto out_unlock;
+-			set_page_private(new.phys_mem[i],0);
+-		}
+-	}
+-
+-	/* Allocate page dirty bitmap if needed */
+-	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
+-		unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
 -
+-		new.dirty_bitmap = vmalloc(dirty_bytes);
+-		if (!new.dirty_bitmap)
+-			goto out_unlock;
+-		memset(new.dirty_bitmap, 0, dirty_bytes);
+-	}
+-
+-	if (mem->slot >= kvm->nmemslots)
+-		kvm->nmemslots = mem->slot + 1;
+-
+-	*memslot = new;
+-
+-	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+-	kvm_flush_remote_tlbs(kvm);
+-
+-	mutex_unlock(&kvm->lock);
+-
+-	kvm_free_physmem_slot(&old, &new);
 -	return 0;
+-
+-out_unlock:
+-	mutex_unlock(&kvm->lock);
+-	kvm_free_physmem_slot(&new, &old);
+-out:
+-	return r;
 -}
 -
- static int icside_dma_end(ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif = HWIF(drive);
-@@ -424,10 +406,7 @@ static void icside_dma_init(ide_hwif_t *hwif)
- 	hwif->dmatable_dma	= 0;
- 	hwif->set_dma_mode	= icside_set_dma_mode;
- 
--	hwif->dma_host_off	= icside_dma_host_off;
--	hwif->dma_off_quietly	= icside_dma_off_quietly;
--	hwif->dma_host_on	= icside_dma_host_on;
--	hwif->ide_dma_on	= icside_dma_on;
-+	hwif->dma_host_set	= icside_dma_host_set;
- 	hwif->dma_setup		= icside_dma_setup;
- 	hwif->dma_exec_cmd	= icside_dma_exec_cmd;
- 	hwif->dma_start		= icside_dma_start;
-diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
-index 8957cba..60f2497 100644
---- a/drivers/ide/arm/ide_arm.c
-+++ b/drivers/ide/arm/ide_arm.c
-@@ -24,12 +24,25 @@
- # define IDE_ARM_IRQ	IRQ_HARDDISK
- #endif
- 
--void __init ide_arm_init(void)
-+static int __init ide_arm_init(void)
- {
-+	ide_hwif_t *hwif;
- 	hw_regs_t hw;
-+	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- 
- 	memset(&hw, 0, sizeof(hw));
- 	ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206);
- 	hw.irq = IDE_ARM_IRQ;
--	ide_register_hw(&hw, NULL, 1, NULL);
-+
-+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
-+	if (hwif) {
-+		ide_init_port_hw(hwif, &hw);
-+		idx[0] = hwif->index;
-+
-+		ide_device_add(idx);
-+	}
-+
-+	return 0;
- }
-+
-+module_init(ide_arm_init);
-diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
-index 0775a3a..e6b56d1 100644
---- a/drivers/ide/arm/rapide.c
-+++ b/drivers/ide/arm/rapide.c
-@@ -13,26 +13,18 @@
- 
- #include <asm/ecard.h>
- 
--static ide_hwif_t *
--rapide_locate_hwif(void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq)
-+static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
-+			       void __iomem *ctrl, unsigned int sz, int irq)
- {
- 	unsigned long port = (unsigned long)base;
--	ide_hwif_t *hwif = ide_find_port(port);
- 	int i;
- 
--	if (hwif == NULL)
+-/*
+- * Get (and clear) the dirty memory log for a memory slot.
+- */
+-static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+-				      struct kvm_dirty_log *log)
+-{
+-	struct kvm_memory_slot *memslot;
+-	int r, i;
+-	int n;
+-	unsigned long any = 0;
+-
+-	mutex_lock(&kvm->lock);
+-
+-	r = -EINVAL;
+-	if (log->slot >= KVM_MEMORY_SLOTS)
 -		goto out;
 -
- 	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
--		hwif->io_ports[i] = port;
-+		hw->io_ports[i] = port;
- 		port += sz;
- 	}
--	hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
--	hwif->irq = irq;
--	hwif->mmio = 1;
--	default_hwif_mmiops(hwif);
+-	memslot = &kvm->memslots[log->slot];
+-	r = -ENOENT;
+-	if (!memslot->dirty_bitmap)
+-		goto out;
+-
+-	n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+-
+-	for (i = 0; !any && i < n/sizeof(long); ++i)
+-		any = memslot->dirty_bitmap[i];
+-
+-	r = -EFAULT;
+-	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
+-		goto out;
+-
+-	/* If nothing is dirty, don't bother messing with page tables. */
+-	if (any) {
+-		kvm_mmu_slot_remove_write_access(kvm, log->slot);
+-		kvm_flush_remote_tlbs(kvm);
+-		memset(memslot->dirty_bitmap, 0, n);
+-	}
+-
+-	r = 0;
+-
 -out:
--	return hwif;
-+	hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
-+	hw->irq = irq;
- }
- 
- static int __devinit
-@@ -42,6 +34,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
- 	void __iomem *base;
- 	int ret;
- 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-+	hw_regs_t hw;
- 
- 	ret = ecard_request_resources(ec);
- 	if (ret)
-@@ -53,11 +46,17 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
- 		goto release;
- 	}
- 
--	hwif = rapide_locate_hwif(base, base + 0x818, 1 << 6, ec->irq);
-+	hwif = ide_find_port((unsigned long)base);
- 	if (hwif) {
--		hwif->hwif_data = base;
--		hwif->gendev.parent = &ec->dev;
--		hwif->noprobe = 0;
-+		memset(&hw, 0, sizeof(hw));
-+		rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
-+		hw.chipset = ide_generic;
-+		hw.dev = &ec->dev;
-+
-+		ide_init_port_hw(hwif, &hw);
-+
-+		hwif->mmio = 1;
-+		default_hwif_mmiops(hwif);
- 
- 		idx[0] = hwif->index;
- 
-diff --git a/drivers/ide/cris/Makefile b/drivers/ide/cris/Makefile
-index 6176e8d..20b9596 100644
---- a/drivers/ide/cris/Makefile
-+++ b/drivers/ide/cris/Makefile
-@@ -1,3 +1,3 @@
- EXTRA_CFLAGS				+= -Idrivers/ide
- 
--obj-y					+= ide-cris.o
-+obj-$(CONFIG_IDE_ETRAX)			+= ide-cris.o
-diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
-index 476e0d6..8c3294c 100644
---- a/drivers/ide/cris/ide-cris.c
-+++ b/drivers/ide/cris/ide-cris.c
-@@ -673,9 +673,8 @@ static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
- static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
- static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
- static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
--static int cris_dma_on (ide_drive_t *drive);
- 
--static void cris_dma_off(ide_drive_t *drive)
-+static void cris_dma_host_set(ide_drive_t *drive, int on)
- {
- }
- 
-@@ -747,8 +746,6 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 			strobe = ATA_DMA2_STROBE;
- 			hold = ATA_DMA2_HOLD;
- 			break;
--		default:
--			return;
- 	}
- 
- 	if (speed >= XFER_UDMA_0)
-@@ -757,13 +754,11 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 		cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
- }
- 
--void __init
--init_e100_ide (void)
-+static int __init init_e100_ide(void)
- {
- 	hw_regs_t hw;
--	int ide_offsets[IDE_NR_PORTS];
--	int h;
+-	mutex_unlock(&kvm->lock);
+-	return r;
+-}
+-
+-/*
+- * Set a new alias region.  Aliases map a portion of physical memory into
+- * another portion.  This is useful for memory windows, for example the PC
+- * VGA region.
+- */
+-static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
+-					 struct kvm_memory_alias *alias)
+-{
+-	int r, n;
+-	struct kvm_mem_alias *p;
+-
+-	r = -EINVAL;
+-	/* General sanity checks */
+-	if (alias->memory_size & (PAGE_SIZE - 1))
+-		goto out;
+-	if (alias->guest_phys_addr & (PAGE_SIZE - 1))
+-		goto out;
+-	if (alias->slot >= KVM_ALIAS_SLOTS)
+-		goto out;
+-	if (alias->guest_phys_addr + alias->memory_size
+-	    < alias->guest_phys_addr)
+-		goto out;
+-	if (alias->target_phys_addr + alias->memory_size
+-	    < alias->target_phys_addr)
+-		goto out;
+-
+-	mutex_lock(&kvm->lock);
+-
+-	p = &kvm->aliases[alias->slot];
+-	p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
+-	p->npages = alias->memory_size >> PAGE_SHIFT;
+-	p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
+-
+-	for (n = KVM_ALIAS_SLOTS; n > 0; --n)
+-		if (kvm->aliases[n - 1].npages)
+-			break;
+-	kvm->naliases = n;
+-
+-	kvm_mmu_zap_all(kvm);
+-
+-	mutex_unlock(&kvm->lock);
+-
+-	return 0;
+-
+-out:
+-	return r;
+-}
+-
+-static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
+-{
+-	int r;
+-
+-	r = 0;
+-	switch (chip->chip_id) {
+-	case KVM_IRQCHIP_PIC_MASTER:
+-		memcpy (&chip->chip.pic,
+-			&pic_irqchip(kvm)->pics[0],
+-			sizeof(struct kvm_pic_state));
+-		break;
+-	case KVM_IRQCHIP_PIC_SLAVE:
+-		memcpy (&chip->chip.pic,
+-			&pic_irqchip(kvm)->pics[1],
+-			sizeof(struct kvm_pic_state));
+-		break;
+-	case KVM_IRQCHIP_IOAPIC:
+-		memcpy (&chip->chip.ioapic,
+-			ioapic_irqchip(kvm),
+-			sizeof(struct kvm_ioapic_state));
+-		break;
+-	default:
+-		r = -EINVAL;
+-		break;
+-	}
+-	return r;
+-}
+-
+-static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
+-{
+-	int r;
+-
+-	r = 0;
+-	switch (chip->chip_id) {
+-	case KVM_IRQCHIP_PIC_MASTER:
+-		memcpy (&pic_irqchip(kvm)->pics[0],
+-			&chip->chip.pic,
+-			sizeof(struct kvm_pic_state));
+-		break;
+-	case KVM_IRQCHIP_PIC_SLAVE:
+-		memcpy (&pic_irqchip(kvm)->pics[1],
+-			&chip->chip.pic,
+-			sizeof(struct kvm_pic_state));
+-		break;
+-	case KVM_IRQCHIP_IOAPIC:
+-		memcpy (ioapic_irqchip(kvm),
+-			&chip->chip.ioapic,
+-			sizeof(struct kvm_ioapic_state));
+-		break;
+-	default:
+-		r = -EINVAL;
+-		break;
+-	}
+-	kvm_pic_update_irq(pic_irqchip(kvm));
+-	return r;
+-}
+-
+-static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+-{
 -	int i;
-+	int ide_offsets[IDE_NR_PORTS], h, i;
-+	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- 
- 	printk("ide: ETRAX FS built-in ATA DMA controller\n");
- 
-@@ -780,9 +775,11 @@ init_e100_ide (void)
- 		                ide_offsets,
- 		                0, 0, cris_ide_ack_intr,
- 		                ide_default_irq(0));
--		ide_register_hw(&hw, NULL, 1, &hwif);
-+		hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
- 		if (hwif == NULL)
- 			continue;
-+		ide_init_port_data(hwif, hwif->index);
-+		ide_init_port_hw(hwif, &hw);
- 		hwif->mmio = 1;
- 		hwif->chipset = ide_etrax100;
- 		hwif->set_pio_mode = &cris_set_pio_mode;
-@@ -791,6 +788,7 @@ init_e100_ide (void)
- 		hwif->ata_output_data = &cris_ide_output_data;
- 		hwif->atapi_input_bytes = &cris_atapi_input_bytes;
- 		hwif->atapi_output_bytes = &cris_atapi_output_bytes;
-+		hwif->dma_host_set = &cris_dma_host_set;
- 		hwif->ide_dma_end = &cris_dma_end;
- 		hwif->dma_setup = &cris_dma_setup;
- 		hwif->dma_exec_cmd = &cris_dma_exec_cmd;
-@@ -801,9 +799,6 @@ init_e100_ide (void)
- 		hwif->OUTBSYNC = &cris_ide_outbsync;
- 		hwif->INB = &cris_ide_inb;
- 		hwif->INW = &cris_ide_inw;
--		hwif->dma_host_off = &cris_dma_off;
--		hwif->dma_host_on = &cris_dma_on;
--		hwif->dma_off_quietly = &cris_dma_off;
- 		hwif->cbl = ATA_CBL_PATA40;
- 		hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
- 		hwif->pio_mask = ATA_PIO4,
-@@ -811,6 +806,8 @@ init_e100_ide (void)
- 		hwif->drives[1].autotune = 1;
- 		hwif->ultra_mask = cris_ultra_mask;
- 		hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
-+
-+		idx[h] = hwif->index;
- 	}
- 
- 	/* Reset pulse */
-@@ -823,14 +820,12 @@ init_e100_ide (void)
- 	cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD);
- 	cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD);
- 	cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
+-	struct kvm_mem_alias *alias;
+-
+-	for (i = 0; i < kvm->naliases; ++i) {
+-		alias = &kvm->aliases[i];
+-		if (gfn >= alias->base_gfn
+-		    && gfn < alias->base_gfn + alias->npages)
+-			return alias->target_gfn + gfn - alias->base_gfn;
+-	}
+-	return gfn;
 -}
- 
--static int cris_dma_on (ide_drive_t *drive)
+-
+-static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
 -{
-+	ide_device_add(idx);
-+
- 	return 0;
- }
- 
+-	int i;
 -
- static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16)));
- 
- /*
-@@ -1062,3 +1057,5 @@ static void cris_dma_start(ide_drive_t *drive)
- 		LED_DISK_READ(1);
- 	}
- }
-+
-+module_init(init_e100_ide);
-diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile
-new file mode 100644
-index 0000000..5eba16f
---- /dev/null
-+++ b/drivers/ide/h8300/Makefile
-@@ -0,0 +1,2 @@
-+
-+obj-$(CONFIG_IDE_H8300)			+= ide-h8300.o
-diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
-index 4a49b5c..4f6d019 100644
---- a/drivers/ide/h8300/ide-h8300.c
-+++ b/drivers/ide/h8300/ide-h8300.c
-@@ -84,11 +84,12 @@ static inline void hwif_setup(ide_hwif_t *hwif)
- 	hwif->INSL  = NULL;
- }
- 
--void __init h8300_ide_init(void)
-+static int __init h8300_ide_init(void)
- {
- 	hw_regs_t hw;
- 	ide_hwif_t *hwif;
--	int idx;
-+	int index;
-+	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- 
- 	if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
- 		goto out_busy;
-@@ -100,16 +101,28 @@ void __init h8300_ide_init(void)
- 	hw_setup(&hw);
- 
- 	/* register if */
--	idx = ide_register_hw(&hw, NULL, 1, &hwif);
--	if (idx == -1) {
-+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
-+	if (hwif == NULL) {
- 		printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
+-	for (i = 0; i < kvm->nmemslots; ++i) {
+-		struct kvm_memory_slot *memslot = &kvm->memslots[i];
+-
+-		if (gfn >= memslot->base_gfn
+-		    && gfn < memslot->base_gfn + memslot->npages)
+-			return memslot;
+-	}
+-	return NULL;
+-}
+-
+-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+-{
+-	gfn = unalias_gfn(kvm, gfn);
+-	return __gfn_to_memslot(kvm, gfn);
+-}
+-
+-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+-{
+-	struct kvm_memory_slot *slot;
+-
+-	gfn = unalias_gfn(kvm, gfn);
+-	slot = __gfn_to_memslot(kvm, gfn);
+-	if (!slot)
+-		return NULL;
+-	return slot->phys_mem[gfn - slot->base_gfn];
+-}
+-EXPORT_SYMBOL_GPL(gfn_to_page);
+-
+-/* WARNING: Does not work on aliased pages. */
+-void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+-{
+-	struct kvm_memory_slot *memslot;
+-
+-	memslot = __gfn_to_memslot(kvm, gfn);
+-	if (memslot && memslot->dirty_bitmap) {
+-		unsigned long rel_gfn = gfn - memslot->base_gfn;
+-
+-		/* avoid RMW */
+-		if (!test_bit(rel_gfn, memslot->dirty_bitmap))
+-			set_bit(rel_gfn, memslot->dirty_bitmap);
+-	}
+-}
+-
+-int emulator_read_std(unsigned long addr,
+-			     void *val,
+-			     unsigned int bytes,
+-			     struct kvm_vcpu *vcpu)
+-{
+-	void *data = val;
+-
+-	while (bytes) {
+-		gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+-		unsigned offset = addr & (PAGE_SIZE-1);
+-		unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
+-		unsigned long pfn;
+-		struct page *page;
+-		void *page_virt;
+-
+-		if (gpa == UNMAPPED_GVA)
+-			return X86EMUL_PROPAGATE_FAULT;
+-		pfn = gpa >> PAGE_SHIFT;
+-		page = gfn_to_page(vcpu->kvm, pfn);
+-		if (!page)
+-			return X86EMUL_UNHANDLEABLE;
+-		page_virt = kmap_atomic(page, KM_USER0);
+-
+-		memcpy(data, page_virt + offset, tocopy);
+-
+-		kunmap_atomic(page_virt, KM_USER0);
+-
+-		bytes -= tocopy;
+-		data += tocopy;
+-		addr += tocopy;
+-	}
+-
+-	return X86EMUL_CONTINUE;
+-}
+-EXPORT_SYMBOL_GPL(emulator_read_std);
+-
+-static int emulator_write_std(unsigned long addr,
+-			      const void *val,
+-			      unsigned int bytes,
+-			      struct kvm_vcpu *vcpu)
+-{
+-	pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
+-	return X86EMUL_UNHANDLEABLE;
+-}
+-
+-/*
+- * Only apic need an MMIO device hook, so shortcut now..
+- */
+-static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
+-						gpa_t addr)
+-{
+-	struct kvm_io_device *dev;
+-
+-	if (vcpu->apic) {
+-		dev = &vcpu->apic->dev;
+-		if (dev->in_range(dev, addr))
+-			return dev;
+-	}
+-	return NULL;
+-}
+-
+-static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+-						gpa_t addr)
+-{
+-	struct kvm_io_device *dev;
+-
+-	dev = vcpu_find_pervcpu_dev(vcpu, addr);
+-	if (dev == NULL)
+-		dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+-	return dev;
+-}
+-
+-static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
+-					       gpa_t addr)
+-{
+-	return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
+-}
+-
+-static int emulator_read_emulated(unsigned long addr,
+-				  void *val,
+-				  unsigned int bytes,
+-				  struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_io_device *mmio_dev;
+-	gpa_t                 gpa;
+-
+-	if (vcpu->mmio_read_completed) {
+-		memcpy(val, vcpu->mmio_data, bytes);
+-		vcpu->mmio_read_completed = 0;
+-		return X86EMUL_CONTINUE;
+-	} else if (emulator_read_std(addr, val, bytes, vcpu)
+-		   == X86EMUL_CONTINUE)
+-		return X86EMUL_CONTINUE;
+-
+-	gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+-	if (gpa == UNMAPPED_GVA)
+-		return X86EMUL_PROPAGATE_FAULT;
+-
+-	/*
+-	 * Is this MMIO handled locally?
+-	 */
+-	mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+-	if (mmio_dev) {
+-		kvm_iodevice_read(mmio_dev, gpa, bytes, val);
+-		return X86EMUL_CONTINUE;
+-	}
+-
+-	vcpu->mmio_needed = 1;
+-	vcpu->mmio_phys_addr = gpa;
+-	vcpu->mmio_size = bytes;
+-	vcpu->mmio_is_write = 0;
+-
+-	return X86EMUL_UNHANDLEABLE;
+-}
+-
+-static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+-			       const void *val, int bytes)
+-{
+-	struct page *page;
+-	void *virt;
+-
+-	if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
+-		return 0;
+-	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+-	if (!page)
+-		return 0;
+-	mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
+-	virt = kmap_atomic(page, KM_USER0);
+-	kvm_mmu_pte_write(vcpu, gpa, val, bytes);
+-	memcpy(virt + offset_in_page(gpa), val, bytes);
+-	kunmap_atomic(virt, KM_USER0);
+-	return 1;
+-}
+-
+-static int emulator_write_emulated_onepage(unsigned long addr,
+-					   const void *val,
+-					   unsigned int bytes,
+-					   struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_io_device *mmio_dev;
+-	gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+-
+-	if (gpa == UNMAPPED_GVA) {
+-		kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
+-		return X86EMUL_PROPAGATE_FAULT;
+-	}
+-
+-	if (emulator_write_phys(vcpu, gpa, val, bytes))
+-		return X86EMUL_CONTINUE;
+-
+-	/*
+-	 * Is this MMIO handled locally?
+-	 */
+-	mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+-	if (mmio_dev) {
+-		kvm_iodevice_write(mmio_dev, gpa, bytes, val);
+-		return X86EMUL_CONTINUE;
+-	}
+-
+-	vcpu->mmio_needed = 1;
+-	vcpu->mmio_phys_addr = gpa;
+-	vcpu->mmio_size = bytes;
+-	vcpu->mmio_is_write = 1;
+-	memcpy(vcpu->mmio_data, val, bytes);
+-
+-	return X86EMUL_CONTINUE;
+-}
+-
+-int emulator_write_emulated(unsigned long addr,
+-				   const void *val,
+-				   unsigned int bytes,
+-				   struct kvm_vcpu *vcpu)
+-{
+-	/* Crossing a page boundary? */
+-	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
+-		int rc, now;
+-
+-		now = -addr & ~PAGE_MASK;
+-		rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
+-		if (rc != X86EMUL_CONTINUE)
+-			return rc;
+-		addr += now;
+-		val += now;
+-		bytes -= now;
+-	}
+-	return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
+-}
+-EXPORT_SYMBOL_GPL(emulator_write_emulated);
+-
+-static int emulator_cmpxchg_emulated(unsigned long addr,
+-				     const void *old,
+-				     const void *new,
+-				     unsigned int bytes,
+-				     struct kvm_vcpu *vcpu)
+-{
+-	static int reported;
+-
+-	if (!reported) {
+-		reported = 1;
+-		printk(KERN_WARNING "kvm: emulating exchange as write\n");
+-	}
+-	return emulator_write_emulated(addr, new, bytes, vcpu);
+-}
+-
+-static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
+-{
+-	return kvm_x86_ops->get_segment_base(vcpu, seg);
+-}
+-
+-int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
+-{
+-	return X86EMUL_CONTINUE;
+-}
+-
+-int emulate_clts(struct kvm_vcpu *vcpu)
+-{
+-	kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
+-	return X86EMUL_CONTINUE;
+-}
+-
+-int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
+-{
+-	struct kvm_vcpu *vcpu = ctxt->vcpu;
+-
+-	switch (dr) {
+-	case 0 ... 3:
+-		*dest = kvm_x86_ops->get_dr(vcpu, dr);
+-		return X86EMUL_CONTINUE;
+-	default:
+-		pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
+-		return X86EMUL_UNHANDLEABLE;
+-	}
+-}
+-
+-int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
+-{
+-	unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
+-	int exception;
+-
+-	kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
+-	if (exception) {
+-		/* FIXME: better handling */
+-		return X86EMUL_UNHANDLEABLE;
+-	}
+-	return X86EMUL_CONTINUE;
+-}
+-
+-void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
+-{
+-	static int reported;
+-	u8 opcodes[4];
+-	unsigned long rip = vcpu->rip;
+-	unsigned long rip_linear;
+-
+-	rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
+-
+-	if (reported)
 -		return;
-+		return -ENOENT;
- 	}
- 
-+	index = hwif->index;
-+	ide_init_port_data(hwif, index);
-+	ide_init_port_hw(hwif, &hw);
- 	hwif_setup(hwif);
--	printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", idx);
--	return;
-+	printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", index);
-+
-+	idx[0] = index;
-+
-+	ide_device_add(idx);
-+
-+	return 0;
- 
- out_busy:
- 	printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
-+
-+	return -EBUSY;
- }
-+
-+module_init(h8300_ide_init);
-diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
-index 899d565..e888fc3 100644
---- a/drivers/ide/ide-acpi.c
-+++ b/drivers/ide/ide-acpi.c
-@@ -383,27 +383,19 @@ static int taskfile_load_raw(ide_drive_t *drive,
- 	       gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]);
- 
- 	memset(&args, 0, sizeof(ide_task_t));
--	args.command_type = IDE_DRIVE_TASK_NO_DATA;
--	args.data_phase   = TASKFILE_NO_DATA;
--	args.handler      = &task_no_data_intr;
- 
- 	/* convert gtf to IDE Taskfile */
--	args.tfRegister[1] = gtf->tfa[0];	/* 0x1f1 */
--	args.tfRegister[2] = gtf->tfa[1];	/* 0x1f2 */
--	args.tfRegister[3] = gtf->tfa[2];	/* 0x1f3 */
--	args.tfRegister[4] = gtf->tfa[3];	/* 0x1f4 */
--	args.tfRegister[5] = gtf->tfa[4];	/* 0x1f5 */
--	args.tfRegister[6] = gtf->tfa[5];	/* 0x1f6 */
--	args.tfRegister[7] = gtf->tfa[6];	/* 0x1f7 */
-+	memcpy(&args.tf_array[7], &gtf->tfa, 7);
-+	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
- 
- 	if (ide_noacpitfs) {
- 		DEBPRINT("_GTF execution disabled\n");
- 		return err;
- 	}
- 
--	err = ide_raw_taskfile(drive, &args, NULL);
-+	err = ide_no_data_taskfile(drive, &args);
- 	if (err)
--		printk(KERN_ERR "%s: ide_raw_taskfile failed: %u\n",
-+		printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
- 		       __FUNCTION__, err);
- 
- 	return err;
-diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
-index c7d77f0..74c6087 100644
---- a/drivers/ide/ide-cd.c
-+++ b/drivers/ide/ide-cd.c
-@@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
- 					BUG();
- 			} else {
- 				spin_lock_irqsave(&ide_lock, flags);
--				end_that_request_chunk(failed, 0,
--							failed->data_len);
--				end_that_request_last(failed, 0);
-+				if (__blk_end_request(failed, -EIO,
-+						      failed->data_len))
-+					BUG();
- 				spin_unlock_irqrestore(&ide_lock, flags);
- 			}
- 		} else
-@@ -917,19 +917,13 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
- 	if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
- 		return startstop;
- 
-+	/* FIXME: for Virtual DMA we must check harder */
- 	if (info->dma)
- 		info->dma = !hwif->dma_setup(drive);
- 
- 	/* Set up the controller registers. */
--	/* FIXME: for Virtual DMA we must check harder */
--	HWIF(drive)->OUTB(info->dma, IDE_FEATURE_REG);
--	HWIF(drive)->OUTB(0, IDE_IREASON_REG);
--	HWIF(drive)->OUTB(0, IDE_SECTOR_REG);
 -
--	HWIF(drive)->OUTB(xferlen & 0xff, IDE_BCOUNTL_REG);
--	HWIF(drive)->OUTB(xferlen >> 8  , IDE_BCOUNTH_REG);
--	if (IDE_CONTROL_REG)
--		HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
-+	ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL |
-+			   IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma);
-  
- 	if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
- 		/* waiting for CDB interrupt, not DMA yet. */
-@@ -1653,6 +1647,17 @@ static int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ireason)
- 	return 1;
- }
- 
-+/*
-+ * Called from blk_end_request_callback() after the data of the request
-+ * is completed and before the request is completed.
-+ * By returning value '1', blk_end_request_callback() returns immediately
-+ * without completing the request.
-+ */
-+static int cdrom_newpc_intr_dummy_cb(struct request *rq)
-+{
-+	return 1;
-+}
-+
- typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
- 
- /*
-@@ -1691,9 +1696,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
- 			return ide_error(drive, "dma error", stat);
- 		}
- 
--		end_that_request_chunk(rq, 1, rq->data_len);
--		rq->data_len = 0;
--		goto end_request;
-+		spin_lock_irqsave(&ide_lock, flags);
-+		if (__blk_end_request(rq, 0, rq->data_len))
-+			BUG();
-+		HWGROUP(drive)->rq = NULL;
-+		spin_unlock_irqrestore(&ide_lock, flags);
-+
-+		return ide_stopped;
- 	}
- 
- 	/*
-@@ -1711,8 +1720,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
- 	/*
- 	 * If DRQ is clear, the command has completed.
- 	 */
--	if ((stat & DRQ_STAT) == 0)
--		goto end_request;
-+	if ((stat & DRQ_STAT) == 0) {
-+		spin_lock_irqsave(&ide_lock, flags);
-+		if (__blk_end_request(rq, 0, 0))
-+			BUG();
-+		HWGROUP(drive)->rq = NULL;
-+		spin_unlock_irqrestore(&ide_lock, flags);
-+
-+		return ide_stopped;
-+	}
- 
- 	/*
- 	 * check which way to transfer data
-@@ -1765,7 +1781,14 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
- 		rq->data_len -= blen;
- 
- 		if (rq->bio)
--			end_that_request_chunk(rq, 1, blen);
-+			/*
-+			 * The request can't be completed until DRQ is cleared.
-+			 * So complete the data, but don't complete the request
-+			 * using the dummy function for the callback feature
-+			 * of blk_end_request_callback().
-+			 */
-+			blk_end_request_callback(rq, 0, blen,
-+						 cdrom_newpc_intr_dummy_cb);
- 		else
- 			rq->data += blen;
- 	}
-@@ -1786,14 +1809,6 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
- 
- 	ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, NULL);
- 	return ide_started;
+-	emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
 -
--end_request:
--	spin_lock_irqsave(&ide_lock, flags);
--	blkdev_dequeue_request(rq);
--	end_that_request_last(rq, 1);
--	HWGROUP(drive)->rq = NULL;
--	spin_unlock_irqrestore(&ide_lock, flags);
--	return ide_stopped;
- }
- 
- static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
-diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
-index b178190..717e114 100644
---- a/drivers/ide/ide-disk.c
-+++ b/drivers/ide/ide-disk.c
-@@ -129,6 +129,50 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
- 	return 0;	/* lba_capacity value may be bad */
- }
- 
-+static const u8 ide_rw_cmds[] = {
-+	WIN_MULTREAD,
-+	WIN_MULTWRITE,
-+	WIN_MULTREAD_EXT,
-+	WIN_MULTWRITE_EXT,
-+	WIN_READ,
-+	WIN_WRITE,
-+	WIN_READ_EXT,
-+	WIN_WRITE_EXT,
-+	WIN_READDMA,
-+	WIN_WRITEDMA,
-+	WIN_READDMA_EXT,
-+	WIN_WRITEDMA_EXT,
-+};
-+
-+static const u8 ide_data_phases[] = {
-+	TASKFILE_MULTI_IN,
-+	TASKFILE_MULTI_OUT,
-+	TASKFILE_IN,
-+	TASKFILE_OUT,
-+	TASKFILE_IN_DMA,
-+	TASKFILE_OUT_DMA,
-+};
-+
-+static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
-+{
-+	u8 index, lba48, write;
-+
-+	lba48 = (task->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
-+	write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
-+
-+	if (dma)
-+		index = drive->vdma ? 4 : 8;
-+	else
-+		index = drive->mult_count ? 0 : 4;
-+
-+	task->tf.command = ide_rw_cmds[index + lba48 + write];
-+
-+	if (dma)
-+		index = 8; /* fixup index */
-+
-+	task->data_phase = ide_data_phases[index / 2 + write];
-+}
-+
- /*
-  * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
-  * using LBA if supported, or CHS otherwise, to address sectors.
-@@ -137,11 +181,11 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
- {
- 	ide_hwif_t *hwif	= HWIF(drive);
- 	unsigned int dma	= drive->using_dma;
-+	u16 nsectors		= (u16)rq->nr_sectors;
- 	u8 lba48		= (drive->addressing == 1) ? 1 : 0;
--	task_ioreg_t command	= WIN_NOP;
--	ata_nsector_t		nsectors;
+-	printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
+-	       context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
+-	reported = 1;
+-}
+-EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
 -
--	nsectors.all		= (u16) rq->nr_sectors;
-+	ide_task_t		task;
-+	struct ide_taskfile	*tf = &task.tf;
-+	ide_startstop_t		rc;
- 
- 	if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
- 		if (block + rq->nr_sectors > 1ULL << 28)
-@@ -155,121 +199,71 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
- 		ide_map_sg(drive, rq);
- 	}
- 
--	if (IDE_CONTROL_REG)
--		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
+-struct x86_emulate_ops emulate_ops = {
+-	.read_std            = emulator_read_std,
+-	.write_std           = emulator_write_std,
+-	.read_emulated       = emulator_read_emulated,
+-	.write_emulated      = emulator_write_emulated,
+-	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
+-};
 -
--	/* FIXME: SELECT_MASK(drive, 0) ? */
-+	memset(&task, 0, sizeof(task));
-+	task.tf_flags = IDE_TFLAG_NO_SELECT_MASK;  /* FIXME? */
-+	task.tf_flags |= (IDE_TFLAG_TF | IDE_TFLAG_DEVICE);
- 
- 	if (drive->select.b.lba) {
- 		if (lba48) {
--			task_ioreg_t tasklets[10];
+-int emulate_instruction(struct kvm_vcpu *vcpu,
+-			struct kvm_run *run,
+-			unsigned long cr2,
+-			u16 error_code)
+-{
+-	struct x86_emulate_ctxt emulate_ctxt;
+-	int r;
+-	int cs_db, cs_l;
 -
- 			pr_debug("%s: LBA=0x%012llx\n", drive->name,
- 					(unsigned long long)block);
- 
--			tasklets[0] = 0;
--			tasklets[1] = 0;
--			tasklets[2] = nsectors.b.low;
--			tasklets[3] = nsectors.b.high;
--			tasklets[4] = (task_ioreg_t) block;
--			tasklets[5] = (task_ioreg_t) (block>>8);
--			tasklets[6] = (task_ioreg_t) (block>>16);
--			tasklets[7] = (task_ioreg_t) (block>>24);
--			if (sizeof(block) == 4) {
--				tasklets[8] = (task_ioreg_t) 0;
--				tasklets[9] = (task_ioreg_t) 0;
--			} else {
--				tasklets[8] = (task_ioreg_t)((u64)block >> 32);
--				tasklets[9] = (task_ioreg_t)((u64)block >> 40);
-+			tf->hob_nsect = (nsectors >> 8) & 0xff;
-+			tf->hob_lbal  = (u8)(block >> 24);
-+			if (sizeof(block) != 4) {
-+				tf->hob_lbam = (u8)((u64)block >> 32);
-+				tf->hob_lbah = (u8)((u64)block >> 40);
- 			}
--#ifdef DEBUG
--			printk("%s: 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n",
--				drive->name, tasklets[3], tasklets[2],
--				tasklets[9], tasklets[8], tasklets[7],
--				tasklets[6], tasklets[5], tasklets[4]);
--#endif
--			hwif->OUTB(tasklets[1], IDE_FEATURE_REG);
--			hwif->OUTB(tasklets[3], IDE_NSECTOR_REG);
--			hwif->OUTB(tasklets[7], IDE_SECTOR_REG);
--			hwif->OUTB(tasklets[8], IDE_LCYL_REG);
--			hwif->OUTB(tasklets[9], IDE_HCYL_REG);
+-	vcpu->mmio_fault_cr2 = cr2;
+-	kvm_x86_ops->cache_regs(vcpu);
 -
--			hwif->OUTB(tasklets[0], IDE_FEATURE_REG);
--			hwif->OUTB(tasklets[2], IDE_NSECTOR_REG);
--			hwif->OUTB(tasklets[4], IDE_SECTOR_REG);
--			hwif->OUTB(tasklets[5], IDE_LCYL_REG);
--			hwif->OUTB(tasklets[6], IDE_HCYL_REG);
--			hwif->OUTB(0x00|drive->select.all,IDE_SELECT_REG);
-+
-+			tf->nsect  = nsectors & 0xff;
-+			tf->lbal   = (u8) block;
-+			tf->lbam   = (u8)(block >>  8);
-+			tf->lbah   = (u8)(block >> 16);
-+
-+			task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
- 		} else {
--			hwif->OUTB(0x00, IDE_FEATURE_REG);
--			hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
--			hwif->OUTB(block, IDE_SECTOR_REG);
--			hwif->OUTB(block>>=8, IDE_LCYL_REG);
--			hwif->OUTB(block>>=8, IDE_HCYL_REG);
--			hwif->OUTB(((block>>8)&0x0f)|drive->select.all,IDE_SELECT_REG);
-+			tf->nsect  = nsectors & 0xff;
-+			tf->lbal   = block;
-+			tf->lbam   = block >>= 8;
-+			tf->lbah   = block >>= 8;
-+			tf->device = (block >> 8) & 0xf;
- 		}
- 	} else {
- 		unsigned int sect,head,cyl,track;
- 		track = (int)block / drive->sect;
- 		sect  = (int)block % drive->sect + 1;
--		hwif->OUTB(sect, IDE_SECTOR_REG);
- 		head  = track % drive->head;
- 		cyl   = track / drive->head;
- 
- 		pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
- 
--		hwif->OUTB(0x00, IDE_FEATURE_REG);
--		hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
--		hwif->OUTB(cyl, IDE_LCYL_REG);
--		hwif->OUTB(cyl>>8, IDE_HCYL_REG);
--		hwif->OUTB(head|drive->select.all,IDE_SELECT_REG);
-+		tf->nsect  = nsectors & 0xff;
-+		tf->lbal   = sect;
-+		tf->lbam   = cyl;
-+		tf->lbah   = cyl >> 8;
-+		tf->device = head;
- 	}
- 
--	if (dma) {
--		if (!hwif->dma_setup(drive)) {
--			if (rq_data_dir(rq)) {
--				command = lba48 ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
--				if (drive->vdma)
--					command = lba48 ? WIN_WRITE_EXT: WIN_WRITE;
--			} else {
--				command = lba48 ? WIN_READDMA_EXT : WIN_READDMA;
--				if (drive->vdma)
--					command = lba48 ? WIN_READ_EXT: WIN_READ;
--			}
--			hwif->dma_exec_cmd(drive, command);
--			hwif->dma_start(drive);
--			return ide_started;
+-	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+-
+-	emulate_ctxt.vcpu = vcpu;
+-	emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+-	emulate_ctxt.cr2 = cr2;
+-	emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
+-		? X86EMUL_MODE_REAL : cs_l
+-		? X86EMUL_MODE_PROT64 :	cs_db
+-		? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+-
+-	if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
+-		emulate_ctxt.cs_base = 0;
+-		emulate_ctxt.ds_base = 0;
+-		emulate_ctxt.es_base = 0;
+-		emulate_ctxt.ss_base = 0;
+-	} else {
+-		emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
+-		emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
+-		emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
+-		emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
+-	}
+-
+-	emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
+-	emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
+-
+-	vcpu->mmio_is_write = 0;
+-	vcpu->pio.string = 0;
+-	r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
+-	if (vcpu->pio.string)
+-		return EMULATE_DO_MMIO;
+-
+-	if ((r || vcpu->mmio_is_write) && run) {
+-		run->exit_reason = KVM_EXIT_MMIO;
+-		run->mmio.phys_addr = vcpu->mmio_phys_addr;
+-		memcpy(run->mmio.data, vcpu->mmio_data, 8);
+-		run->mmio.len = vcpu->mmio_size;
+-		run->mmio.is_write = vcpu->mmio_is_write;
+-	}
+-
+-	if (r) {
+-		if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
+-			return EMULATE_DONE;
+-		if (!vcpu->mmio_needed) {
+-			kvm_report_emulation_failure(vcpu, "mmio");
+-			return EMULATE_FAIL;
 -		}
--		/* fallback to PIO */
--		ide_init_sg_cmd(drive, rq);
+-		return EMULATE_DO_MMIO;
+-	}
+-
+-	kvm_x86_ops->decache_regs(vcpu);
+-	kvm_x86_ops->set_rflags(vcpu, emulate_ctxt.eflags);
+-
+-	if (vcpu->mmio_is_write) {
+-		vcpu->mmio_needed = 0;
+-		return EMULATE_DO_MMIO;
+-	}
+-
+-	return EMULATE_DONE;
+-}
+-EXPORT_SYMBOL_GPL(emulate_instruction);
+-
+-/*
+- * The vCPU has executed a HLT instruction with in-kernel mode enabled.
+- */
+-static void kvm_vcpu_block(struct kvm_vcpu *vcpu)
+-{
+-	DECLARE_WAITQUEUE(wait, current);
+-
+-	add_wait_queue(&vcpu->wq, &wait);
+-
+-	/*
+-	 * We will block until either an interrupt or a signal wakes us up
+-	 */
+-	while (!kvm_cpu_has_interrupt(vcpu)
+-	       && !signal_pending(current)
+-	       && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
+-	       && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		vcpu_put(vcpu);
+-		schedule();
+-		vcpu_load(vcpu);
+-	}
+-
+-	__set_current_state(TASK_RUNNING);
+-	remove_wait_queue(&vcpu->wq, &wait);
+-}
+-
+-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+-{
+-	++vcpu->stat.halt_exits;
+-	if (irqchip_in_kernel(vcpu->kvm)) {
+-		vcpu->mp_state = VCPU_MP_STATE_HALTED;
+-		kvm_vcpu_block(vcpu);
+-		if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
+-			return -EINTR;
+-		return 1;
+-	} else {
+-		vcpu->run->exit_reason = KVM_EXIT_HLT;
+-		return 0;
+-	}
+-}
+-EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+-
+-int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
+-{
+-	unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
+-
+-	kvm_x86_ops->cache_regs(vcpu);
+-	ret = -KVM_EINVAL;
+-#ifdef CONFIG_X86_64
+-	if (is_long_mode(vcpu)) {
+-		nr = vcpu->regs[VCPU_REGS_RAX];
+-		a0 = vcpu->regs[VCPU_REGS_RDI];
+-		a1 = vcpu->regs[VCPU_REGS_RSI];
+-		a2 = vcpu->regs[VCPU_REGS_RDX];
+-		a3 = vcpu->regs[VCPU_REGS_RCX];
+-		a4 = vcpu->regs[VCPU_REGS_R8];
+-		a5 = vcpu->regs[VCPU_REGS_R9];
+-	} else
+-#endif
+-	{
+-		nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
+-		a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
+-		a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
+-		a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
+-		a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
+-		a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
+-		a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
+-	}
+-	switch (nr) {
+-	default:
+-		run->hypercall.nr = nr;
+-		run->hypercall.args[0] = a0;
+-		run->hypercall.args[1] = a1;
+-		run->hypercall.args[2] = a2;
+-		run->hypercall.args[3] = a3;
+-		run->hypercall.args[4] = a4;
+-		run->hypercall.args[5] = a5;
+-		run->hypercall.ret = ret;
+-		run->hypercall.longmode = is_long_mode(vcpu);
+-		kvm_x86_ops->decache_regs(vcpu);
+-		return 0;
+-	}
+-	vcpu->regs[VCPU_REGS_RAX] = ret;
+-	kvm_x86_ops->decache_regs(vcpu);
+-	return 1;
+-}
+-EXPORT_SYMBOL_GPL(kvm_hypercall);
+-
+-static u64 mk_cr_64(u64 curr_cr, u32 new_val)
+-{
+-	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
+-}
+-
+-void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
+-{
+-	struct descriptor_table dt = { limit, base };
+-
+-	kvm_x86_ops->set_gdt(vcpu, &dt);
+-}
+-
+-void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
+-{
+-	struct descriptor_table dt = { limit, base };
+-
+-	kvm_x86_ops->set_idt(vcpu, &dt);
+-}
+-
+-void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
+-		   unsigned long *rflags)
+-{
+-	lmsw(vcpu, msw);
+-	*rflags = kvm_x86_ops->get_rflags(vcpu);
+-}
+-
+-unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
+-{
+-	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+-	switch (cr) {
+-	case 0:
+-		return vcpu->cr0;
+-	case 2:
+-		return vcpu->cr2;
+-	case 3:
+-		return vcpu->cr3;
+-	case 4:
+-		return vcpu->cr4;
+-	default:
+-		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+-		return 0;
+-	}
+-}
+-
+-void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
+-		     unsigned long *rflags)
+-{
+-	switch (cr) {
+-	case 0:
+-		set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
+-		*rflags = kvm_x86_ops->get_rflags(vcpu);
+-		break;
+-	case 2:
+-		vcpu->cr2 = val;
+-		break;
+-	case 3:
+-		set_cr3(vcpu, val);
+-		break;
+-	case 4:
+-		set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
+-		break;
+-	default:
+-		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+-	}
+-}
+-
+-/*
+- * Register the para guest with the host:
+- */
+-static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
+-{
+-	struct kvm_vcpu_para_state *para_state;
+-	hpa_t para_state_hpa, hypercall_hpa;
+-	struct page *para_state_page;
+-	unsigned char *hypercall;
+-	gpa_t hypercall_gpa;
+-
+-	printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
+-	printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
+-
+-	/*
+-	 * Needs to be page aligned:
+-	 */
+-	if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
+-		goto err_gp;
+-
+-	para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
+-	printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
+-	if (is_error_hpa(para_state_hpa))
+-		goto err_gp;
+-
+-	mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
+-	para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
+-	para_state = kmap(para_state_page);
+-
+-	printk(KERN_DEBUG "....  guest version: %d\n", para_state->guest_version);
+-	printk(KERN_DEBUG "....           size: %d\n", para_state->size);
+-
+-	para_state->host_version = KVM_PARA_API_VERSION;
+-	/*
+-	 * We cannot support guests that try to register themselves
+-	 * with a newer API version than the host supports:
+-	 */
+-	if (para_state->guest_version > KVM_PARA_API_VERSION) {
+-		para_state->ret = -KVM_EINVAL;
+-		goto err_kunmap_skip;
+-	}
+-
+-	hypercall_gpa = para_state->hypercall_gpa;
+-	hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
+-	printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
+-	if (is_error_hpa(hypercall_hpa)) {
+-		para_state->ret = -KVM_EINVAL;
+-		goto err_kunmap_skip;
+-	}
+-
+-	printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
+-	vcpu->para_state_page = para_state_page;
+-	vcpu->para_state_gpa = para_state_gpa;
+-	vcpu->hypercall_gpa = hypercall_gpa;
+-
+-	mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
+-	hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
+-				KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
+-	kvm_x86_ops->patch_hypercall(vcpu, hypercall);
+-	kunmap_atomic(hypercall, KM_USER1);
+-
+-	para_state->ret = 0;
+-err_kunmap_skip:
+-	kunmap(para_state_page);
+-	return 0;
+-err_gp:
+-	return 1;
+-}
+-
+-int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+-{
+-	u64 data;
+-
+-	switch (msr) {
+-	case 0xc0010010: /* SYSCFG */
+-	case 0xc0010015: /* HWCR */
+-	case MSR_IA32_PLATFORM_ID:
+-	case MSR_IA32_P5_MC_ADDR:
+-	case MSR_IA32_P5_MC_TYPE:
+-	case MSR_IA32_MC0_CTL:
+-	case MSR_IA32_MCG_STATUS:
+-	case MSR_IA32_MCG_CAP:
+-	case MSR_IA32_MC0_MISC:
+-	case MSR_IA32_MC0_MISC+4:
+-	case MSR_IA32_MC0_MISC+8:
+-	case MSR_IA32_MC0_MISC+12:
+-	case MSR_IA32_MC0_MISC+16:
+-	case MSR_IA32_UCODE_REV:
+-	case MSR_IA32_PERF_STATUS:
+-	case MSR_IA32_EBL_CR_POWERON:
+-		/* MTRR registers */
+-	case 0xfe:
+-	case 0x200 ... 0x2ff:
+-		data = 0;
+-		break;
+-	case 0xcd: /* fsb frequency */
+-		data = 3;
+-		break;
+-	case MSR_IA32_APICBASE:
+-		data = kvm_get_apic_base(vcpu);
+-		break;
+-	case MSR_IA32_MISC_ENABLE:
+-		data = vcpu->ia32_misc_enable_msr;
+-		break;
+-#ifdef CONFIG_X86_64
+-	case MSR_EFER:
+-		data = vcpu->shadow_efer;
+-		break;
+-#endif
+-	default:
+-		pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
+-		return 1;
+-	}
+-	*pdata = data;
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(kvm_get_msr_common);
+-
+-/*
+- * Reads an msr value (of 'msr_index') into 'pdata'.
+- * Returns 0 on success, non-0 otherwise.
+- * Assumes vcpu_load() was already called.
+- */
+-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+-{
+-	return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
+-}
+-
+-#ifdef CONFIG_X86_64
+-
+-static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+-{
+-	if (efer & EFER_RESERVED_BITS) {
+-		printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
+-		       efer);
+-		inject_gp(vcpu);
+-		return;
+-	}
+-
+-	if (is_paging(vcpu)
+-	    && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+-		printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
+-		inject_gp(vcpu);
+-		return;
+-	}
+-
+-	kvm_x86_ops->set_efer(vcpu, efer);
+-
+-	efer &= ~EFER_LMA;
+-	efer |= vcpu->shadow_efer & EFER_LMA;
+-
+-	vcpu->shadow_efer = efer;
+-}
+-
+-#endif
+-
+-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+-{
+-	switch (msr) {
+-#ifdef CONFIG_X86_64
+-	case MSR_EFER:
+-		set_efer(vcpu, data);
+-		break;
+-#endif
+-	case MSR_IA32_MC0_STATUS:
+-		pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
+-		       __FUNCTION__, data);
+-		break;
+-	case MSR_IA32_MCG_STATUS:
+-		pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
+-			__FUNCTION__, data);
+-		break;
+-	case MSR_IA32_UCODE_REV:
+-	case MSR_IA32_UCODE_WRITE:
+-	case 0x200 ... 0x2ff: /* MTRRs */
+-		break;
+-	case MSR_IA32_APICBASE:
+-		kvm_set_apic_base(vcpu, data);
+-		break;
+-	case MSR_IA32_MISC_ENABLE:
+-		vcpu->ia32_misc_enable_msr = data;
+-		break;
+-	/*
+-	 * This is the 'probe whether the host is KVM' logic:
+-	 */
+-	case MSR_KVM_API_MAGIC:
+-		return vcpu_register_para(vcpu, data);
+-
+-	default:
+-		pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
+-		return 1;
 -	}
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(kvm_set_msr_common);
 -
--	if (rq_data_dir(rq) == READ) {
+-/*
+- * Writes msr value into into the appropriate "register".
+- * Returns 0 on success, non-0 otherwise.
+- * Assumes vcpu_load() was already called.
+- */
+-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+-{
+-	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+-}
 -
--		if (drive->mult_count) {
--			hwif->data_phase = TASKFILE_MULTI_IN;
--			command = lba48 ? WIN_MULTREAD_EXT : WIN_MULTREAD;
--		} else {
--			hwif->data_phase = TASKFILE_IN;
--			command = lba48 ? WIN_READ_EXT : WIN_READ;
--		}
-+	if (rq_data_dir(rq))
-+		task.tf_flags |= IDE_TFLAG_WRITE;
- 
--		ide_execute_command(drive, command, &task_in_intr, WAIT_CMD, NULL);
--		return ide_started;
--	} else {
--		if (drive->mult_count) {
--			hwif->data_phase = TASKFILE_MULTI_OUT;
--			command = lba48 ? WIN_MULTWRITE_EXT : WIN_MULTWRITE;
--		} else {
--			hwif->data_phase = TASKFILE_OUT;
--			command = lba48 ? WIN_WRITE_EXT : WIN_WRITE;
+-void kvm_resched(struct kvm_vcpu *vcpu)
+-{
+-	if (!need_resched())
+-		return;
+-	cond_resched();
+-}
+-EXPORT_SYMBOL_GPL(kvm_resched);
+-
+-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+-{
+-	int i;
+-	u32 function;
+-	struct kvm_cpuid_entry *e, *best;
+-
+-	kvm_x86_ops->cache_regs(vcpu);
+-	function = vcpu->regs[VCPU_REGS_RAX];
+-	vcpu->regs[VCPU_REGS_RAX] = 0;
+-	vcpu->regs[VCPU_REGS_RBX] = 0;
+-	vcpu->regs[VCPU_REGS_RCX] = 0;
+-	vcpu->regs[VCPU_REGS_RDX] = 0;
+-	best = NULL;
+-	for (i = 0; i < vcpu->cpuid_nent; ++i) {
+-		e = &vcpu->cpuid_entries[i];
+-		if (e->function == function) {
+-			best = e;
+-			break;
 -		}
-+	ide_tf_set_cmd(drive, &task, dma);
-+	if (!dma)
-+		hwif->data_phase = task.data_phase;
-+	task.rq = rq;
- 
--		/* FIXME: ->OUTBSYNC ? */
--		hwif->OUTB(command, IDE_COMMAND_REG);
-+	rc = do_rw_taskfile(drive, &task);
- 
--		return pre_task_out_intr(drive, rq);
-+	if (rc == ide_stopped && dma) {
-+		/* fallback to PIO */
-+		task.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
-+		ide_tf_set_cmd(drive, &task, 0);
-+		hwif->data_phase = task.data_phase;
-+		ide_init_sg_cmd(drive, rq);
-+		rc = do_rw_taskfile(drive, &task);
- 	}
-+
-+	return rc;
- }
- 
- /*
-@@ -307,57 +301,29 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
-  * Queries for true maximum capacity of the drive.
-  * Returns maximum LBA address (> 0) of the drive, 0 if failed.
-  */
--static unsigned long idedisk_read_native_max_address(ide_drive_t *drive)
-+static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
- {
- 	ide_task_t args;
--	unsigned long addr = 0;
-+	struct ide_taskfile *tf = &args.tf;
-+	u64 addr = 0;
- 
- 	/* Create IDE/ATA command request structure */
- 	memset(&args, 0, sizeof(ide_task_t));
--	args.tfRegister[IDE_SELECT_OFFSET]	= 0x40;
--	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_READ_NATIVE_MAX;
--	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
--	args.handler				= &task_no_data_intr;
-+	if (lba48)
-+		tf->command = WIN_READ_NATIVE_MAX_EXT;
-+	else
-+		tf->command = WIN_READ_NATIVE_MAX;
-+	tf->device  = ATA_LBA;
-+	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+	if (lba48)
-+		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
- 	/* submit command request */
--	ide_raw_taskfile(drive, &args, NULL);
-+	ide_no_data_taskfile(drive, &args);
- 
- 	/* if OK, compute maximum address value */
--	if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) {
--		addr = ((args.tfRegister[IDE_SELECT_OFFSET] & 0x0f) << 24)
--		     | ((args.tfRegister[  IDE_HCYL_OFFSET]       ) << 16)
--		     | ((args.tfRegister[  IDE_LCYL_OFFSET]       ) <<  8)
--		     | ((args.tfRegister[IDE_SECTOR_OFFSET]       ));
--		addr++;	/* since the return value is (maxlba - 1), we add 1 */
+-		/*
+-		 * Both basic or both extended?
+-		 */
+-		if (((e->function ^ function) & 0x80000000) == 0)
+-			if (!best || e->function > best->function)
+-				best = e;
+-	}
+-	if (best) {
+-		vcpu->regs[VCPU_REGS_RAX] = best->eax;
+-		vcpu->regs[VCPU_REGS_RBX] = best->ebx;
+-		vcpu->regs[VCPU_REGS_RCX] = best->ecx;
+-		vcpu->regs[VCPU_REGS_RDX] = best->edx;
+-	}
+-	kvm_x86_ops->decache_regs(vcpu);
+-	kvm_x86_ops->skip_emulated_instruction(vcpu);
+-}
+-EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
+-
+-static int pio_copy_data(struct kvm_vcpu *vcpu)
+-{
+-	void *p = vcpu->pio_data;
+-	void *q;
+-	unsigned bytes;
+-	int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
+-
+-	q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
+-		 PAGE_KERNEL);
+-	if (!q) {
+-		free_pio_guest_pages(vcpu);
+-		return -ENOMEM;
 -	}
--	return addr;
+-	q += vcpu->pio.guest_page_offset;
+-	bytes = vcpu->pio.size * vcpu->pio.cur_count;
+-	if (vcpu->pio.in)
+-		memcpy(q, p, bytes);
+-	else
+-		memcpy(p, q, bytes);
+-	q -= vcpu->pio.guest_page_offset;
+-	vunmap(q);
+-	free_pio_guest_pages(vcpu);
+-	return 0;
 -}
-+	if ((tf->status & 0x01) == 0)
-+		addr = ide_get_lba_addr(tf, lba48) + 1;
- 
--static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive)
+-
+-static int complete_pio(struct kvm_vcpu *vcpu)
 -{
--	ide_task_t args;
--	unsigned long long addr = 0;
+-	struct kvm_pio_request *io = &vcpu->pio;
+-	long delta;
+-	int r;
 -
--	/* Create IDE/ATA command request structure */
--	memset(&args, 0, sizeof(ide_task_t));
+-	kvm_x86_ops->cache_regs(vcpu);
 -
--	args.tfRegister[IDE_SELECT_OFFSET]	= 0x40;
--	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_READ_NATIVE_MAX_EXT;
--	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
--	args.handler				= &task_no_data_intr;
--        /* submit command request */
--        ide_raw_taskfile(drive, &args, NULL);
+-	if (!io->string) {
+-		if (io->in)
+-			memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
+-			       io->size);
+-	} else {
+-		if (io->in) {
+-			r = pio_copy_data(vcpu);
+-			if (r) {
+-				kvm_x86_ops->cache_regs(vcpu);
+-				return r;
+-			}
+-		}
 -
--	/* if OK, compute maximum address value */
--	if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) {
--		u32 high = (args.hobRegister[IDE_HCYL_OFFSET] << 16) |
--			   (args.hobRegister[IDE_LCYL_OFFSET] <<  8) |
--			    args.hobRegister[IDE_SECTOR_OFFSET];
--		u32 low  = ((args.tfRegister[IDE_HCYL_OFFSET])<<16) |
--			   ((args.tfRegister[IDE_LCYL_OFFSET])<<8) |
--			    (args.tfRegister[IDE_SECTOR_OFFSET]);
--		addr = ((__u64)high << 24) | low;
--		addr++;	/* since the return value is (maxlba - 1), we add 1 */
+-		delta = 1;
+-		if (io->rep) {
+-			delta *= io->cur_count;
+-			/*
+-			 * The size of the register should really depend on
+-			 * current address size.
+-			 */
+-			vcpu->regs[VCPU_REGS_RCX] -= delta;
+-		}
+-		if (io->down)
+-			delta = -delta;
+-		delta *= io->size;
+-		if (io->in)
+-			vcpu->regs[VCPU_REGS_RDI] += delta;
+-		else
+-			vcpu->regs[VCPU_REGS_RSI] += delta;
 -	}
- 	return addr;
- }
- 
-@@ -365,67 +331,37 @@ static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive
-  * Sets maximum virtual LBA address of the drive.
-  * Returns new maximum virtual LBA address (> 0) or 0 on failure.
-  */
--static unsigned long idedisk_set_max_address(ide_drive_t *drive, unsigned long addr_req)
+-
+-	kvm_x86_ops->decache_regs(vcpu);
+-
+-	io->count -= io->cur_count;
+-	io->cur_count = 0;
+-
+-	return 0;
+-}
+-
+-static void kernel_pio(struct kvm_io_device *pio_dev,
+-		       struct kvm_vcpu *vcpu,
+-		       void *pd)
 -{
--	ide_task_t args;
--	unsigned long addr_set = 0;
--	
--	addr_req--;
--	/* Create IDE/ATA command request structure */
--	memset(&args, 0, sizeof(ide_task_t));
--	args.tfRegister[IDE_SECTOR_OFFSET]	= ((addr_req >>  0) & 0xff);
--	args.tfRegister[IDE_LCYL_OFFSET]	= ((addr_req >>  8) & 0xff);
--	args.tfRegister[IDE_HCYL_OFFSET]	= ((addr_req >> 16) & 0xff);
--	args.tfRegister[IDE_SELECT_OFFSET]	= ((addr_req >> 24) & 0x0f) | 0x40;
--	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SET_MAX;
--	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
--	args.handler				= &task_no_data_intr;
--	/* submit command request */
--	ide_raw_taskfile(drive, &args, NULL);
--	/* if OK, read new maximum address value */
--	if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) {
--		addr_set = ((args.tfRegister[IDE_SELECT_OFFSET] & 0x0f) << 24)
--			 | ((args.tfRegister[  IDE_HCYL_OFFSET]       ) << 16)
--			 | ((args.tfRegister[  IDE_LCYL_OFFSET]       ) <<  8)
--			 | ((args.tfRegister[IDE_SECTOR_OFFSET]       ));
--		addr_set++;
+-	/* TODO: String I/O for in kernel device */
+-
+-	mutex_lock(&vcpu->kvm->lock);
+-	if (vcpu->pio.in)
+-		kvm_iodevice_read(pio_dev, vcpu->pio.port,
+-				  vcpu->pio.size,
+-				  pd);
+-	else
+-		kvm_iodevice_write(pio_dev, vcpu->pio.port,
+-				   vcpu->pio.size,
+-				   pd);
+-	mutex_unlock(&vcpu->kvm->lock);
+-}
+-
+-static void pio_string_write(struct kvm_io_device *pio_dev,
+-			     struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_pio_request *io = &vcpu->pio;
+-	void *pd = vcpu->pio_data;
+-	int i;
+-
+-	mutex_lock(&vcpu->kvm->lock);
+-	for (i = 0; i < io->cur_count; i++) {
+-		kvm_iodevice_write(pio_dev, io->port,
+-				   io->size,
+-				   pd);
+-		pd += io->size;
+-	}
+-	mutex_unlock(&vcpu->kvm->lock);
+-}
+-
+-int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+-		  int size, unsigned port)
+-{
+-	struct kvm_io_device *pio_dev;
+-
+-	vcpu->run->exit_reason = KVM_EXIT_IO;
+-	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
+-	vcpu->run->io.size = vcpu->pio.size = size;
+-	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+-	vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
+-	vcpu->run->io.port = vcpu->pio.port = port;
+-	vcpu->pio.in = in;
+-	vcpu->pio.string = 0;
+-	vcpu->pio.down = 0;
+-	vcpu->pio.guest_page_offset = 0;
+-	vcpu->pio.rep = 0;
+-
+-	kvm_x86_ops->cache_regs(vcpu);
+-	memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
+-	kvm_x86_ops->decache_regs(vcpu);
+-
+-	kvm_x86_ops->skip_emulated_instruction(vcpu);
+-
+-	pio_dev = vcpu_find_pio_dev(vcpu, port);
+-	if (pio_dev) {
+-		kernel_pio(pio_dev, vcpu, vcpu->pio_data);
+-		complete_pio(vcpu);
+-		return 1;
 -	}
--	return addr_set;
+-	return 0;
 -}
+-EXPORT_SYMBOL_GPL(kvm_emulate_pio);
 -
--static unsigned long long idedisk_set_max_address_ext(ide_drive_t *drive, unsigned long long addr_req)
-+static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
- {
- 	ide_task_t args;
--	unsigned long long addr_set = 0;
-+	struct ide_taskfile *tf = &args.tf;
-+	u64 addr_set = 0;
- 
- 	addr_req--;
- 	/* Create IDE/ATA command request structure */
- 	memset(&args, 0, sizeof(ide_task_t));
--	args.tfRegister[IDE_SECTOR_OFFSET]	= ((addr_req >>  0) & 0xff);
--	args.tfRegister[IDE_LCYL_OFFSET]	= ((addr_req >>= 8) & 0xff);
--	args.tfRegister[IDE_HCYL_OFFSET]	= ((addr_req >>= 8) & 0xff);
--	args.tfRegister[IDE_SELECT_OFFSET]      = 0x40;
--	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SET_MAX_EXT;
--	args.hobRegister[IDE_SECTOR_OFFSET]	= (addr_req >>= 8) & 0xff;
--	args.hobRegister[IDE_LCYL_OFFSET]	= (addr_req >>= 8) & 0xff;
--	args.hobRegister[IDE_HCYL_OFFSET]	= (addr_req >>= 8) & 0xff;
--	args.hobRegister[IDE_SELECT_OFFSET]	= 0x40;
--	args.hobRegister[IDE_CONTROL_OFFSET_HOB]= (drive->ctl|0x80);
--	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
--	args.handler				= &task_no_data_intr;
-+	tf->lbal     = (addr_req >>  0) & 0xff;
-+	tf->lbam     = (addr_req >>= 8) & 0xff;
-+	tf->lbah     = (addr_req >>= 8) & 0xff;
-+	if (lba48) {
-+		tf->hob_lbal = (addr_req >>= 8) & 0xff;
-+		tf->hob_lbam = (addr_req >>= 8) & 0xff;
-+		tf->hob_lbah = (addr_req >>= 8) & 0xff;
-+		tf->command  = WIN_SET_MAX_EXT;
-+	} else {
-+		tf->device   = (addr_req >>= 8) & 0x0f;
-+		tf->command  = WIN_SET_MAX;
-+	}
-+	tf->device |= ATA_LBA;
-+	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+	if (lba48)
-+		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
- 	/* submit command request */
--	ide_raw_taskfile(drive, &args, NULL);
-+	ide_no_data_taskfile(drive, &args);
- 	/* if OK, compute maximum address value */
--	if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) {
--		u32 high = (args.hobRegister[IDE_HCYL_OFFSET] << 16) |
--			   (args.hobRegister[IDE_LCYL_OFFSET] <<  8) |
--			    args.hobRegister[IDE_SECTOR_OFFSET];
--		u32 low  = ((args.tfRegister[IDE_HCYL_OFFSET])<<16) |
--			   ((args.tfRegister[IDE_LCYL_OFFSET])<<8) |
--			    (args.tfRegister[IDE_SECTOR_OFFSET]);
--		addr_set = ((__u64)high << 24) | low;
--		addr_set++;
+-int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+-		  int size, unsigned long count, int down,
+-		  gva_t address, int rep, unsigned port)
+-{
+-	unsigned now, in_page;
+-	int i, ret = 0;
+-	int nr_pages = 1;
+-	struct page *page;
+-	struct kvm_io_device *pio_dev;
+-
+-	vcpu->run->exit_reason = KVM_EXIT_IO;
+-	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
+-	vcpu->run->io.size = vcpu->pio.size = size;
+-	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+-	vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
+-	vcpu->run->io.port = vcpu->pio.port = port;
+-	vcpu->pio.in = in;
+-	vcpu->pio.string = 1;
+-	vcpu->pio.down = down;
+-	vcpu->pio.guest_page_offset = offset_in_page(address);
+-	vcpu->pio.rep = rep;
+-
+-	if (!count) {
+-		kvm_x86_ops->skip_emulated_instruction(vcpu);
+-		return 1;
 -	}
-+	if ((tf->status & 0x01) == 0)
-+		addr_set = ide_get_lba_addr(tf, lba48) + 1;
-+
- 	return addr_set;
- }
- 
-@@ -471,10 +407,8 @@ static void idedisk_check_hpa(ide_drive_t *drive)
- 	int lba48 = idedisk_supports_lba48(drive->id);
- 
- 	capacity = drive->capacity64;
--	if (lba48)
--		set_max = idedisk_read_native_max_address_ext(drive);
--	else
--		set_max = idedisk_read_native_max_address(drive);
-+
-+	set_max = idedisk_read_native_max_address(drive, lba48);
- 
- 	if (ide_in_drive_list(drive->id, hpa_list)) {
- 		/*
-@@ -495,10 +429,8 @@ static void idedisk_check_hpa(ide_drive_t *drive)
- 			 capacity, sectors_to_MB(capacity),
- 			 set_max, sectors_to_MB(set_max));
- 
--	if (lba48)
--		set_max = idedisk_set_max_address_ext(drive, set_max);
+-
+-	if (!down)
+-		in_page = PAGE_SIZE - offset_in_page(address);
 -	else
--		set_max = idedisk_set_max_address(drive, set_max);
-+	set_max = idedisk_set_max_address(drive, set_max, lba48);
-+
- 	if (set_max) {
- 		drive->capacity64 = set_max;
- 		printk(KERN_INFO "%s: Host Protected Area disabled.\n",
-@@ -556,32 +488,32 @@ static sector_t idedisk_capacity (ide_drive_t *drive)
- static int smart_enable(ide_drive_t *drive)
- {
- 	ide_task_t args;
-+	struct ide_taskfile *tf = &args.tf;
- 
- 	memset(&args, 0, sizeof(ide_task_t));
--	args.tfRegister[IDE_FEATURE_OFFSET]	= SMART_ENABLE;
--	args.tfRegister[IDE_LCYL_OFFSET]	= SMART_LCYL_PASS;
--	args.tfRegister[IDE_HCYL_OFFSET]	= SMART_HCYL_PASS;
--	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SMART;
--	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
--	args.handler				= &task_no_data_intr;
--	return ide_raw_taskfile(drive, &args, NULL);
-+	tf->feature = SMART_ENABLE;
-+	tf->lbam    = SMART_LCYL_PASS;
-+	tf->lbah    = SMART_HCYL_PASS;
-+	tf->command = WIN_SMART;
-+	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+	return ide_no_data_taskfile(drive, &args);
- }
- 
- static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd)
- {
- 	ide_task_t args;
-+	struct ide_taskfile *tf = &args.tf;
- 
- 	memset(&args, 0, sizeof(ide_task_t));
--	args.tfRegister[IDE_FEATURE_OFFSET]	= sub_cmd;
--	args.tfRegister[IDE_NSECTOR_OFFSET]	= 0x01;
--	args.tfRegister[IDE_LCYL_OFFSET]	= SMART_LCYL_PASS;
--	args.tfRegister[IDE_HCYL_OFFSET]	= SMART_HCYL_PASS;
--	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SMART;
--	args.command_type			= IDE_DRIVE_TASK_IN;
--	args.data_phase				= TASKFILE_IN;
--	args.handler				= &task_in_intr;
-+	tf->feature = sub_cmd;
-+	tf->nsect   = 0x01;
-+	tf->lbam    = SMART_LCYL_PASS;
-+	tf->lbah    = SMART_HCYL_PASS;
-+	tf->command = WIN_SMART;
-+	args.tf_flags	= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+	args.data_phase	= TASKFILE_IN;
- 	(void) smart_enable(drive);
--	return ide_raw_taskfile(drive, &args, buf);
-+	return ide_raw_taskfile(drive, &args, buf, 1);
- }
- 
- static int proc_idedisk_read_cache
-@@ -659,19 +591,20 @@ static ide_proc_entry_t idedisk_proc[] = {
- static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
- {
- 	ide_drive_t *drive = q->queuedata;
-+	ide_task_t task;
- 
--	memset(rq->cmd, 0, sizeof(rq->cmd));
+-		in_page = offset_in_page(address) + size;
+-	now = min(count, (unsigned long)in_page / size);
+-	if (!now) {
+-		/*
+-		 * String I/O straddles page boundary.  Pin two guest pages
+-		 * so that we satisfy atomicity constraints.  Do just one
+-		 * transaction to avoid complexity.
+-		 */
+-		nr_pages = 2;
+-		now = 1;
+-	}
+-	if (down) {
+-		/*
+-		 * String I/O in reverse.  Yuck.  Kill the guest, fix later.
+-		 */
+-		pr_unimpl(vcpu, "guest string pio down\n");
+-		inject_gp(vcpu);
+-		return 1;
+-	}
+-	vcpu->run->io.count = now;
+-	vcpu->pio.cur_count = now;
 -
-+	memset(&task, 0, sizeof(task));
- 	if (ide_id_has_flush_cache_ext(drive->id) &&
- 	    (drive->capacity64 >= (1UL << 28)))
--		rq->cmd[0] = WIN_FLUSH_CACHE_EXT;
-+		task.tf.command = WIN_FLUSH_CACHE_EXT;
- 	else
--		rq->cmd[0] = WIN_FLUSH_CACHE;
-+		task.tf.command = WIN_FLUSH_CACHE;
-+	task.tf_flags	= IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
-+	task.data_phase	= TASKFILE_NO_DATA;
- 
+-	if (vcpu->pio.cur_count == vcpu->pio.count)
+-		kvm_x86_ops->skip_emulated_instruction(vcpu);
 -
--	rq->cmd_type = REQ_TYPE_ATA_TASK;
-+	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
- 	rq->cmd_flags |= REQ_SOFTBARRIER;
--	rq->buffer = rq->cmd;
-+	rq->special = &task;
- }
- 
- /*
-@@ -687,8 +620,10 @@ static int set_multcount(ide_drive_t *drive, int arg)
- 
- 	if (drive->special.b.set_multmode)
- 		return -EBUSY;
-+
- 	ide_init_drive_cmd (&rq);
--	rq.cmd_type = REQ_TYPE_ATA_CMD;
-+	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
-+
- 	drive->mult_req = arg;
- 	drive->special.b.set_multmode = 1;
- 	(void) ide_do_drive_cmd (drive, &rq, ide_wait);
-@@ -753,12 +688,11 @@ static int write_cache(ide_drive_t *drive, int arg)
- 
- 	if (ide_id_has_flush_cache(drive->id)) {
- 		memset(&args, 0, sizeof(ide_task_t));
--		args.tfRegister[IDE_FEATURE_OFFSET]	= (arg) ?
-+		args.tf.feature = arg ?
- 			SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
--		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SETFEATURES;
--		args.command_type		= IDE_DRIVE_TASK_NO_DATA;
--		args.handler			= &task_no_data_intr;
--		err = ide_raw_taskfile(drive, &args, NULL);
-+		args.tf.command = WIN_SETFEATURES;
-+		args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+		err = ide_no_data_taskfile(drive, &args);
- 		if (err == 0)
- 			drive->wcache = arg;
- 	}
-@@ -774,12 +708,11 @@ static int do_idedisk_flushcache (ide_drive_t *drive)
- 
- 	memset(&args, 0, sizeof(ide_task_t));
- 	if (ide_id_has_flush_cache_ext(drive->id))
--		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_FLUSH_CACHE_EXT;
-+		args.tf.command = WIN_FLUSH_CACHE_EXT;
- 	else
--		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_FLUSH_CACHE;
--	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
--	args.handler				= &task_no_data_intr;
--	return ide_raw_taskfile(drive, &args, NULL);
-+		args.tf.command = WIN_FLUSH_CACHE;
-+	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+	return ide_no_data_taskfile(drive, &args);
- }
- 
- static int set_acoustic (ide_drive_t *drive, int arg)
-@@ -790,13 +723,11 @@ static int set_acoustic (ide_drive_t *drive, int arg)
- 		return -EINVAL;
- 
- 	memset(&args, 0, sizeof(ide_task_t));
--	args.tfRegister[IDE_FEATURE_OFFSET]	= (arg) ? SETFEATURES_EN_AAM :
--							  SETFEATURES_DIS_AAM;
--	args.tfRegister[IDE_NSECTOR_OFFSET]	= arg;
--	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_SETFEATURES;
--	args.command_type = IDE_DRIVE_TASK_NO_DATA;
--	args.handler	  = &task_no_data_intr;
--	ide_raw_taskfile(drive, &args, NULL);
-+	args.tf.feature = arg ? SETFEATURES_EN_AAM : SETFEATURES_DIS_AAM;
-+	args.tf.nsect   = arg;
-+	args.tf.command = WIN_SETFEATURES;
-+	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+	ide_no_data_taskfile(drive, &args);
- 	drive->acoustic = arg;
- 	return 0;
- }
-@@ -832,7 +763,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
- 	ide_add_setting(drive,	"bios_head",	SETTING_RW,	TYPE_BYTE,	0,	255,			1,	1,	&drive->bios_head,	NULL);
- 	ide_add_setting(drive,	"bios_sect",	SETTING_RW,	TYPE_BYTE,	0,	63,			1,	1,	&drive->bios_sect,	NULL);
- 	ide_add_setting(drive,	"address",	SETTING_RW,	TYPE_BYTE,	0,	2,			1,	1,	&drive->addressing,	set_lba_addressing);
--	ide_add_setting(drive,	"bswap",	SETTING_READ,	TYPE_BYTE,	0,	1,			1,	1,	&drive->bswap,		NULL);
- 	ide_add_setting(drive,	"multcount",	SETTING_RW,	TYPE_BYTE,	0,	id->max_multsect,	1,	1,	&drive->mult_count,	set_multcount);
- 	ide_add_setting(drive,	"nowerr",	SETTING_RW,	TYPE_BYTE,	0,	1,			1,	1,	&drive->nowerr,		set_nowerr);
- 	ide_add_setting(drive,	"lun",		SETTING_RW,	TYPE_INT,	0,	7,			1,	1,	&drive->lun,		NULL);
-@@ -1041,6 +971,17 @@ static ide_driver_t idedisk_driver = {
- #endif
- };
- 
-+static int idedisk_set_doorlock(ide_drive_t *drive, int on)
-+{
-+	ide_task_t task;
-+
-+	memset(&task, 0, sizeof(task));
-+	task.tf.command = on ? WIN_DOORLOCK : WIN_DOORUNLOCK;
-+	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+
-+	return ide_no_data_taskfile(drive, &task);
-+}
-+
- static int idedisk_open(struct inode *inode, struct file *filp)
- {
- 	struct gendisk *disk = inode->i_bdev->bd_disk;
-@@ -1055,18 +996,13 @@ static int idedisk_open(struct inode *inode, struct file *filp)
- 	idkp->openers++;
- 
- 	if (drive->removable && idkp->openers == 1) {
--		ide_task_t args;
--		memset(&args, 0, sizeof(ide_task_t));
--		args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORLOCK;
--		args.command_type = IDE_DRIVE_TASK_NO_DATA;
--		args.handler	  = &task_no_data_intr;
- 		check_disk_change(inode->i_bdev);
- 		/*
- 		 * Ignore the return code from door_lock,
- 		 * since the open() has already succeeded,
- 		 * and the door_lock is irrelevant at this point.
- 		 */
--		if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL))
-+		if (drive->doorlocking && idedisk_set_doorlock(drive, 1))
- 			drive->doorlocking = 0;
- 	}
- 	return 0;
-@@ -1082,12 +1018,7 @@ static int idedisk_release(struct inode *inode, struct file *filp)
- 		ide_cacheflush_p(drive);
- 
- 	if (drive->removable && idkp->openers == 1) {
--		ide_task_t args;
--		memset(&args, 0, sizeof(ide_task_t));
--		args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORUNLOCK;
--		args.command_type = IDE_DRIVE_TASK_NO_DATA;
--		args.handler	  = &task_no_data_intr;
--		if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL))
-+		if (drive->doorlocking && idedisk_set_doorlock(drive, 0))
- 			drive->doorlocking = 0;
- 	}
- 
-diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
-index 4703837..5bf3203 100644
---- a/drivers/ide/ide-dma.c
-+++ b/drivers/ide/ide-dma.c
-@@ -153,13 +153,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
- 		if (!dma_stat) {
- 			struct request *rq = HWGROUP(drive)->rq;
- 
--			if (rq->rq_disk) {
--				ide_driver_t *drv;
+-	for (i = 0; i < nr_pages; ++i) {
+-		mutex_lock(&vcpu->kvm->lock);
+-		page = gva_to_page(vcpu, address + i * PAGE_SIZE);
+-		if (page)
+-			get_page(page);
+-		vcpu->pio.guest_pages[i] = page;
+-		mutex_unlock(&vcpu->kvm->lock);
+-		if (!page) {
+-			inject_gp(vcpu);
+-			free_pio_guest_pages(vcpu);
+-			return 1;
+-		}
+-	}
 -
--				drv = *(ide_driver_t **)rq->rq_disk->private_data;
--				drv->end_request(drive, 1, rq->nr_sectors);
--			} else
--				ide_end_request(drive, 1, rq->nr_sectors);
-+			task_end_request(drive, rq, stat);
- 			return ide_stopped;
- 		}
- 		printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 
-@@ -408,23 +402,29 @@ static int dma_timer_expiry (ide_drive_t *drive)
- }
- 
- /**
-- *	ide_dma_host_off	-	Generic DMA kill
-+ *	ide_dma_host_set	-	Enable/disable DMA on a host
-  *	@drive: drive to control
-  *
-- *	Perform the generic IDE controller DMA off operation. This
-- *	works for most IDE bus mastering controllers
-+ *	Enable/disable DMA on an IDE controller following generic
-+ *	bus-mastering IDE controller behaviour.
-  */
- 
--void ide_dma_host_off(ide_drive_t *drive)
-+void ide_dma_host_set(ide_drive_t *drive, int on)
- {
- 	ide_hwif_t *hwif	= HWIF(drive);
- 	u8 unit			= (drive->select.b.unit & 0x01);
- 	u8 dma_stat		= hwif->INB(hwif->dma_status);
- 
--	hwif->OUTB((dma_stat & ~(1<<(5+unit))), hwif->dma_status);
-+	if (on)
-+		dma_stat |= (1 << (5 + unit));
-+	else
-+		dma_stat &= ~(1 << (5 + unit));
-+
-+	hwif->OUTB(dma_stat, hwif->dma_status);
- }
- 
--EXPORT_SYMBOL(ide_dma_host_off);
-+EXPORT_SYMBOL_GPL(ide_dma_host_set);
-+#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
- 
- /**
-  *	ide_dma_off_quietly	-	Generic DMA kill
-@@ -438,11 +438,10 @@ void ide_dma_off_quietly(ide_drive_t *drive)
- 	drive->using_dma = 0;
- 	ide_toggle_bounce(drive, 0);
- 
--	drive->hwif->dma_host_off(drive);
-+	drive->hwif->dma_host_set(drive, 0);
- }
- 
- EXPORT_SYMBOL(ide_dma_off_quietly);
--#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
- 
- /**
-  *	ide_dma_off	-	disable DMA on a device
-@@ -455,56 +454,29 @@ EXPORT_SYMBOL(ide_dma_off_quietly);
- void ide_dma_off(ide_drive_t *drive)
- {
- 	printk(KERN_INFO "%s: DMA disabled\n", drive->name);
--	drive->hwif->dma_off_quietly(drive);
-+	ide_dma_off_quietly(drive);
- }
- 
- EXPORT_SYMBOL(ide_dma_off);
- 
--#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
- /**
-- *	ide_dma_host_on	-	Enable DMA on a host
-- *	@drive: drive to enable for DMA
+-	pio_dev = vcpu_find_pio_dev(vcpu, port);
+-	if (!vcpu->pio.in) {
+-		/* string PIO write */
+-		ret = pio_copy_data(vcpu);
+-		if (ret >= 0 && pio_dev) {
+-			pio_string_write(pio_dev, vcpu);
+-			complete_pio(vcpu);
+-			if (vcpu->pio.count == 0)
+-				ret = 1;
+-		}
+-	} else if (pio_dev)
+-		pr_unimpl(vcpu, "no string pio read support yet, "
+-		       "port %x size %d count %ld\n",
+-			port, size, count);
+-
+-	return ret;
+-}
+-EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
+-
+-/*
+- * Check if userspace requested an interrupt window, and that the
+- * interrupt window is open.
 - *
-- *	Enable DMA on an IDE controller following generic bus mastering
-- *	IDE controller behaviour
+- * No need to exit to userspace if we already have an interrupt queued.
 - */
+-static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+-					  struct kvm_run *kvm_run)
+-{
+-	return (!vcpu->irq_summary &&
+-		kvm_run->request_interrupt_window &&
+-		vcpu->interrupt_window_open &&
+-		(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
+-}
 -
--void ide_dma_host_on(ide_drive_t *drive)
+-static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+-			      struct kvm_run *kvm_run)
 -{
--	if (drive->using_dma) {
--		ide_hwif_t *hwif	= HWIF(drive);
--		u8 unit			= (drive->select.b.unit & 0x01);
--		u8 dma_stat		= hwif->INB(hwif->dma_status);
+-	kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
+-	kvm_run->cr8 = get_cr8(vcpu);
+-	kvm_run->apic_base = kvm_get_apic_base(vcpu);
+-	if (irqchip_in_kernel(vcpu->kvm))
+-		kvm_run->ready_for_interrupt_injection = 1;
+-	else
+-		kvm_run->ready_for_interrupt_injection =
+-					(vcpu->interrupt_window_open &&
+-					 vcpu->irq_summary == 0);
+-}
 -
--		hwif->OUTB((dma_stat|(1<<(5+unit))), hwif->dma_status);
+-static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	int r;
+-
+-	if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
+-		printk("vcpu %d received sipi with vector # %x\n",
+-		       vcpu->vcpu_id, vcpu->sipi_vector);
+-		kvm_lapic_reset(vcpu);
+-		kvm_x86_ops->vcpu_reset(vcpu);
+-		vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+-	}
+-
+-preempted:
+-	if (vcpu->guest_debug.enabled)
+-		kvm_x86_ops->guest_debug_pre(vcpu);
+-
+-again:
+-	r = kvm_mmu_reload(vcpu);
+-	if (unlikely(r))
+-		goto out;
+-
+-	preempt_disable();
+-
+-	kvm_x86_ops->prepare_guest_switch(vcpu);
+-	kvm_load_guest_fpu(vcpu);
+-
+-	local_irq_disable();
+-
+-	if (signal_pending(current)) {
+-		local_irq_enable();
+-		preempt_enable();
+-		r = -EINTR;
+-		kvm_run->exit_reason = KVM_EXIT_INTR;
+-		++vcpu->stat.signal_exits;
+-		goto out;
+-	}
+-
+-	if (irqchip_in_kernel(vcpu->kvm))
+-		kvm_x86_ops->inject_pending_irq(vcpu);
+-	else if (!vcpu->mmio_read_completed)
+-		kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
+-
+-	vcpu->guest_mode = 1;
+-	kvm_guest_enter();
+-
+-	if (vcpu->requests)
+-		if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+-			kvm_x86_ops->tlb_flush(vcpu);
+-
+-	kvm_x86_ops->run(vcpu, kvm_run);
+-
+-	vcpu->guest_mode = 0;
+-	local_irq_enable();
+-
+-	++vcpu->stat.exits;
+-
+-	/*
+-	 * We must have an instruction between local_irq_enable() and
+-	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
+-	 * the interrupt shadow.  The stat.exits increment will do nicely.
+-	 * But we need to prevent reordering, hence this barrier():
+-	 */
+-	barrier();
+-
+-	kvm_guest_exit();
+-
+-	preempt_enable();
+-
+-	/*
+-	 * Profile KVM exit RIPs:
+-	 */
+-	if (unlikely(prof_on == KVM_PROFILING)) {
+-		kvm_x86_ops->cache_regs(vcpu);
+-		profile_hit(KVM_PROFILING, (void *)vcpu->rip);
+-	}
+-
+-	r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
+-
+-	if (r > 0) {
+-		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+-			r = -EINTR;
+-			kvm_run->exit_reason = KVM_EXIT_INTR;
+-			++vcpu->stat.request_irq_exits;
+-			goto out;
+-		}
+-		if (!need_resched()) {
+-			++vcpu->stat.light_exits;
+-			goto again;
+-		}
+-	}
+-
+-out:
+-	if (r > 0) {
+-		kvm_resched(vcpu);
+-		goto preempted;
 -	}
+-
+-	post_kvm_run_save(vcpu, kvm_run);
+-
+-	return r;
 -}
 -
--EXPORT_SYMBOL(ide_dma_host_on);
 -
--/**
-- *	__ide_dma_on		-	Enable DMA on a device
-+ *	ide_dma_on		-	Enable DMA on a device
-  *	@drive: drive to enable DMA on
-  *
-  *	Enable IDE DMA for a device on this IDE controller.
-  */
-- 
--int __ide_dma_on (ide_drive_t *drive)
+-static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 -{
--	/* consult the list of known "bad" drives */
--	if (__ide_dma_bad_drive(drive))
--		return 1;
- 
-+void ide_dma_on(ide_drive_t *drive)
-+{
- 	drive->using_dma = 1;
- 	ide_toggle_bounce(drive, 1);
- 
--	drive->hwif->dma_host_on(drive);
+-	int r;
+-	sigset_t sigsaved;
+-
+-	vcpu_load(vcpu);
+-
+-	if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
+-		kvm_vcpu_block(vcpu);
+-		vcpu_put(vcpu);
+-		return -EAGAIN;
+-	}
+-
+-	if (vcpu->sigset_active)
+-		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+-
+-	/* re-sync apic's tpr */
+-	if (!irqchip_in_kernel(vcpu->kvm))
+-		set_cr8(vcpu, kvm_run->cr8);
+-
+-	if (vcpu->pio.cur_count) {
+-		r = complete_pio(vcpu);
+-		if (r)
+-			goto out;
+-	}
+-
+-	if (vcpu->mmio_needed) {
+-		memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+-		vcpu->mmio_read_completed = 1;
+-		vcpu->mmio_needed = 0;
+-		r = emulate_instruction(vcpu, kvm_run,
+-					vcpu->mmio_fault_cr2, 0);
+-		if (r == EMULATE_DO_MMIO) {
+-			/*
+-			 * Read-modify-write.  Back to userspace.
+-			 */
+-			r = 0;
+-			goto out;
+-		}
+-	}
+-
+-	if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
+-		kvm_x86_ops->cache_regs(vcpu);
+-		vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
+-		kvm_x86_ops->decache_regs(vcpu);
+-	}
+-
+-	r = __vcpu_run(vcpu, kvm_run);
+-
+-out:
+-	if (vcpu->sigset_active)
+-		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+-
+-	vcpu_put(vcpu);
+-	return r;
+-}
+-
+-static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
+-				   struct kvm_regs *regs)
+-{
+-	vcpu_load(vcpu);
+-
+-	kvm_x86_ops->cache_regs(vcpu);
+-
+-	regs->rax = vcpu->regs[VCPU_REGS_RAX];
+-	regs->rbx = vcpu->regs[VCPU_REGS_RBX];
+-	regs->rcx = vcpu->regs[VCPU_REGS_RCX];
+-	regs->rdx = vcpu->regs[VCPU_REGS_RDX];
+-	regs->rsi = vcpu->regs[VCPU_REGS_RSI];
+-	regs->rdi = vcpu->regs[VCPU_REGS_RDI];
+-	regs->rsp = vcpu->regs[VCPU_REGS_RSP];
+-	regs->rbp = vcpu->regs[VCPU_REGS_RBP];
+-#ifdef CONFIG_X86_64
+-	regs->r8 = vcpu->regs[VCPU_REGS_R8];
+-	regs->r9 = vcpu->regs[VCPU_REGS_R9];
+-	regs->r10 = vcpu->regs[VCPU_REGS_R10];
+-	regs->r11 = vcpu->regs[VCPU_REGS_R11];
+-	regs->r12 = vcpu->regs[VCPU_REGS_R12];
+-	regs->r13 = vcpu->regs[VCPU_REGS_R13];
+-	regs->r14 = vcpu->regs[VCPU_REGS_R14];
+-	regs->r15 = vcpu->regs[VCPU_REGS_R15];
+-#endif
+-
+-	regs->rip = vcpu->rip;
+-	regs->rflags = kvm_x86_ops->get_rflags(vcpu);
+-
+-	/*
+-	 * Don't leak debug flags in case they were set for guest debugging
+-	 */
+-	if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
+-		regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+-
+-	vcpu_put(vcpu);
 -
 -	return 0;
-+	drive->hwif->dma_host_set(drive, 1);
- }
- 
--EXPORT_SYMBOL(__ide_dma_on);
-+EXPORT_SYMBOL(ide_dma_on);
- 
-+#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
- /**
-  *	ide_dma_setup	-	begin a DMA phase
-  *	@drive: target device
-@@ -759,6 +731,7 @@ EXPORT_SYMBOL_GPL(ide_find_dma_mode);
- 
- static int ide_tune_dma(ide_drive_t *drive)
- {
-+	ide_hwif_t *hwif = drive->hwif;
- 	u8 speed;
- 
- 	if (noautodma || drive->nodma || (drive->id->capability & 1) == 0)
-@@ -771,15 +744,21 @@ static int ide_tune_dma(ide_drive_t *drive)
- 	if (ide_id_dma_bug(drive))
- 		return 0;
- 
--	if (drive->hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
-+	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
- 		return config_drive_for_dma(drive);
- 
- 	speed = ide_max_dma_mode(drive);
- 
--	if (!speed)
--		return 0;
-+	if (!speed) {
-+		 /* is this really correct/needed? */
-+		if ((hwif->host_flags & IDE_HFLAG_CY82C693) &&
-+		    ide_dma_good_drive(drive))
-+			return 1;
-+		else
-+			return 0;
-+	}
- 
--	if (drive->hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
-+	if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
- 		return 0;
- 
- 	if (ide_set_dma_mode(drive, speed))
-@@ -824,25 +803,23 @@ err_out:
- 
- int ide_set_dma(ide_drive_t *drive)
- {
--	ide_hwif_t *hwif = drive->hwif;
- 	int rc;
- 
-+	/*
-+	 * Force DMAing for the beginning of the check.
-+	 * Some chipsets appear to do interesting
-+	 * things, if not checked and cleared.
-+	 *   PARANOIA!!!
-+	 */
-+	ide_dma_off_quietly(drive);
-+
- 	rc = ide_dma_check(drive);
-+	if (rc)
-+		return rc;
- 
--	switch(rc) {
--	case -1: /* DMA needs to be disabled */
--		hwif->dma_off_quietly(drive);
--		return -1;
--	case  0: /* DMA needs to be enabled */
--		return hwif->ide_dma_on(drive);
--	case  1: /* DMA setting cannot be changed */
--		break;
--	default:
--		BUG();
--		break;
+-}
+-
+-static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
+-				   struct kvm_regs *regs)
+-{
+-	vcpu_load(vcpu);
+-
+-	vcpu->regs[VCPU_REGS_RAX] = regs->rax;
+-	vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
+-	vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
+-	vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
+-	vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
+-	vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
+-	vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
+-	vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
+-#ifdef CONFIG_X86_64
+-	vcpu->regs[VCPU_REGS_R8] = regs->r8;
+-	vcpu->regs[VCPU_REGS_R9] = regs->r9;
+-	vcpu->regs[VCPU_REGS_R10] = regs->r10;
+-	vcpu->regs[VCPU_REGS_R11] = regs->r11;
+-	vcpu->regs[VCPU_REGS_R12] = regs->r12;
+-	vcpu->regs[VCPU_REGS_R13] = regs->r13;
+-	vcpu->regs[VCPU_REGS_R14] = regs->r14;
+-	vcpu->regs[VCPU_REGS_R15] = regs->r15;
+-#endif
+-
+-	vcpu->rip = regs->rip;
+-	kvm_x86_ops->set_rflags(vcpu, regs->rflags);
+-
+-	kvm_x86_ops->decache_regs(vcpu);
+-
+-	vcpu_put(vcpu);
+-
+-	return 0;
+-}
+-
+-static void get_segment(struct kvm_vcpu *vcpu,
+-			struct kvm_segment *var, int seg)
+-{
+-	return kvm_x86_ops->get_segment(vcpu, var, seg);
+-}
+-
+-static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+-				    struct kvm_sregs *sregs)
+-{
+-	struct descriptor_table dt;
+-	int pending_vec;
+-
+-	vcpu_load(vcpu);
+-
+-	get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
+-	get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
+-	get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
+-	get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
+-	get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
+-	get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
+-
+-	get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
+-	get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
+-
+-	kvm_x86_ops->get_idt(vcpu, &dt);
+-	sregs->idt.limit = dt.limit;
+-	sregs->idt.base = dt.base;
+-	kvm_x86_ops->get_gdt(vcpu, &dt);
+-	sregs->gdt.limit = dt.limit;
+-	sregs->gdt.base = dt.base;
+-
+-	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+-	sregs->cr0 = vcpu->cr0;
+-	sregs->cr2 = vcpu->cr2;
+-	sregs->cr3 = vcpu->cr3;
+-	sregs->cr4 = vcpu->cr4;
+-	sregs->cr8 = get_cr8(vcpu);
+-	sregs->efer = vcpu->shadow_efer;
+-	sregs->apic_base = kvm_get_apic_base(vcpu);
+-
+-	if (irqchip_in_kernel(vcpu->kvm)) {
+-		memset(sregs->interrupt_bitmap, 0,
+-		       sizeof sregs->interrupt_bitmap);
+-		pending_vec = kvm_x86_ops->get_irq(vcpu);
+-		if (pending_vec >= 0)
+-			set_bit(pending_vec, (unsigned long *)sregs->interrupt_bitmap);
+-	} else
+-		memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
+-		       sizeof sregs->interrupt_bitmap);
+-
+-	vcpu_put(vcpu);
+-
+-	return 0;
+-}
+-
+-static void set_segment(struct kvm_vcpu *vcpu,
+-			struct kvm_segment *var, int seg)
+-{
+-	return kvm_x86_ops->set_segment(vcpu, var, seg);
+-}
+-
+-static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+-				    struct kvm_sregs *sregs)
+-{
+-	int mmu_reset_needed = 0;
+-	int i, pending_vec, max_bits;
+-	struct descriptor_table dt;
+-
+-	vcpu_load(vcpu);
+-
+-	dt.limit = sregs->idt.limit;
+-	dt.base = sregs->idt.base;
+-	kvm_x86_ops->set_idt(vcpu, &dt);
+-	dt.limit = sregs->gdt.limit;
+-	dt.base = sregs->gdt.base;
+-	kvm_x86_ops->set_gdt(vcpu, &dt);
+-
+-	vcpu->cr2 = sregs->cr2;
+-	mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
+-	vcpu->cr3 = sregs->cr3;
+-
+-	set_cr8(vcpu, sregs->cr8);
+-
+-	mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
+-#ifdef CONFIG_X86_64
+-	kvm_x86_ops->set_efer(vcpu, sregs->efer);
+-#endif
+-	kvm_set_apic_base(vcpu, sregs->apic_base);
+-
+-	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+-
+-	mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
+-	vcpu->cr0 = sregs->cr0;
+-	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
+-
+-	mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
+-	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+-	if (!is_long_mode(vcpu) && is_pae(vcpu))
+-		load_pdptrs(vcpu, vcpu->cr3);
+-
+-	if (mmu_reset_needed)
+-		kvm_mmu_reset_context(vcpu);
+-
+-	if (!irqchip_in_kernel(vcpu->kvm)) {
+-		memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
+-		       sizeof vcpu->irq_pending);
+-		vcpu->irq_summary = 0;
+-		for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
+-			if (vcpu->irq_pending[i])
+-				__set_bit(i, &vcpu->irq_summary);
+-	} else {
+-		max_bits = (sizeof sregs->interrupt_bitmap) << 3;
+-		pending_vec = find_first_bit(
+-			(const unsigned long *)sregs->interrupt_bitmap,
+-			max_bits);
+-		/* Only pending external irq is handled here */
+-		if (pending_vec < max_bits) {
+-			kvm_x86_ops->set_irq(vcpu, pending_vec);
+-			printk("Set back pending irq %d\n", pending_vec);
+-		}
 -	}
-+	ide_dma_on(drive);
- 
--	return rc;
-+	return 0;
- }
- 
- #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-@@ -968,11 +945,6 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
- 
- 	hwif->dma_base = base;
- 
--	if (hwif->mate)
--		hwif->dma_master = hwif->channel ? hwif->mate->dma_base : base;
--	else
--		hwif->dma_master = base;
 -
- 	if (!(hwif->dma_command))
- 		hwif->dma_command	= hwif->dma_base;
- 	if (!(hwif->dma_vendor1))
-@@ -984,14 +956,8 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
- 	if (!(hwif->dma_prdtable))
- 		hwif->dma_prdtable	= (hwif->dma_base + 4);
- 
--	if (!hwif->dma_off_quietly)
--		hwif->dma_off_quietly = &ide_dma_off_quietly;
--	if (!hwif->dma_host_off)
--		hwif->dma_host_off = &ide_dma_host_off;
--	if (!hwif->ide_dma_on)
--		hwif->ide_dma_on = &__ide_dma_on;
--	if (!hwif->dma_host_on)
--		hwif->dma_host_on = &ide_dma_host_on;
-+	if (!hwif->dma_host_set)
-+		hwif->dma_host_set = &ide_dma_host_set;
- 	if (!hwif->dma_setup)
- 		hwif->dma_setup = &ide_dma_setup;
- 	if (!hwif->dma_exec_cmd)
-@@ -1014,8 +980,6 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
- 		       hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
- 	}
- 	printk("\n");
+-	set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
+-	set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
+-	set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
+-	set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
+-	set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
+-	set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
+-
+-	set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
+-	set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
+-
+-	vcpu_put(vcpu);
+-
+-	return 0;
+-}
+-
+-void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+-{
+-	struct kvm_segment cs;
+-
+-	get_segment(vcpu, &cs, VCPU_SREG_CS);
+-	*db = cs.db;
+-	*l = cs.l;
+-}
+-EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
 -
--	BUG_ON(!hwif->dma_master);
- }
- 
- EXPORT_SYMBOL_GPL(ide_setup_dma);
-diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
-index 04a3578..ff8232e 100644
---- a/drivers/ide/ide-floppy.c
-+++ b/drivers/ide/ide-floppy.c
-@@ -369,27 +369,6 @@ typedef struct ide_floppy_obj {
- #define	IDEFLOPPY_IOCTL_FORMAT_START		0x4602
- #define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS	0x4603
- 
--#if 0
 -/*
-- *	Special requests for our block device strategy routine.
+- * List of msr numbers which we expose to userspace through KVM_GET_MSRS
+- * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
+- *
+- * This list is modified at module load time to reflect the
+- * capabilities of the host cpu.
 - */
--#define	IDEFLOPPY_FIRST_RQ	90
+-static u32 msrs_to_save[] = {
+-	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
+-	MSR_K6_STAR,
+-#ifdef CONFIG_X86_64
+-	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
+-#endif
+-	MSR_IA32_TIME_STAMP_COUNTER,
+-};
+-
+-static unsigned num_msrs_to_save;
+-
+-static u32 emulated_msrs[] = {
+-	MSR_IA32_MISC_ENABLE,
+-};
+-
+-static __init void kvm_init_msr_list(void)
+-{
+-	u32 dummy[2];
+-	unsigned i, j;
+-
+-	for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
+-		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
+-			continue;
+-		if (j < i)
+-			msrs_to_save[j] = msrs_to_save[i];
+-		j++;
+-	}
+-	num_msrs_to_save = j;
+-}
 -
 -/*
-- * 	IDEFLOPPY_PC_RQ is used to queue a packet command in the request queue.
+- * Adapt set_msr() to msr_io()'s calling convention
 - */
--#define	IDEFLOPPY_PC_RQ		90
+-static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+-{
+-	return kvm_set_msr(vcpu, index, *data);
+-}
 -
--#define IDEFLOPPY_LAST_RQ	90
+-/*
+- * Read or write a bunch of msrs. All parameters are kernel addresses.
+- *
+- * @return number of msrs set successfully.
+- */
+-static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
+-		    struct kvm_msr_entry *entries,
+-		    int (*do_msr)(struct kvm_vcpu *vcpu,
+-				  unsigned index, u64 *data))
+-{
+-	int i;
+-
+-	vcpu_load(vcpu);
+-
+-	for (i = 0; i < msrs->nmsrs; ++i)
+-		if (do_msr(vcpu, entries[i].index, &entries[i].data))
+-			break;
+-
+-	vcpu_put(vcpu);
+-
+-	return i;
+-}
 -
 -/*
-- *	A macro which can be used to check if a given request command
-- *	originated in the driver or in the buffer cache layer.
+- * Read or write a bunch of msrs. Parameters are user addresses.
+- *
+- * @return number of msrs set successfully.
 - */
--#define IDEFLOPPY_RQ_CMD(cmd) 	((cmd >= IDEFLOPPY_FIRST_RQ) && (cmd <= IDEFLOPPY_LAST_RQ))
+-static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
+-		  int (*do_msr)(struct kvm_vcpu *vcpu,
+-				unsigned index, u64 *data),
+-		  int writeback)
+-{
+-	struct kvm_msrs msrs;
+-	struct kvm_msr_entry *entries;
+-	int r, n;
+-	unsigned size;
 -
--#endif
+-	r = -EFAULT;
+-	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
+-		goto out;
 -
- /*
-  *	Error codes which are returned in rq->errors to the higher part
-  *	of the driver.
-@@ -793,9 +772,8 @@ static void idefloppy_retry_pc (ide_drive_t *drive)
- {
- 	idefloppy_pc_t *pc;
- 	struct request *rq;
--	atapi_error_t error;
- 
--	error.all = HWIF(drive)->INB(IDE_ERROR_REG);
-+	(void)drive->hwif->INB(IDE_ERROR_REG);
- 	pc = idefloppy_next_pc_storage(drive);
- 	rq = idefloppy_next_rq_storage(drive);
- 	idefloppy_create_request_sense_cmd(pc);
-@@ -809,12 +787,12 @@ static void idefloppy_retry_pc (ide_drive_t *drive)
- static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
- {
- 	idefloppy_floppy_t *floppy = drive->driver_data;
--	atapi_status_t status;
--	atapi_bcount_t bcount;
--	atapi_ireason_t ireason;
-+	ide_hwif_t *hwif = drive->hwif;
- 	idefloppy_pc_t *pc = floppy->pc;
- 	struct request *rq = pc->rq;
- 	unsigned int temp;
-+	u16 bcount;
-+	u8 stat, ireason;
- 
- 	debug_log(KERN_INFO "ide-floppy: Reached %s interrupt handler\n",
- 		__FUNCTION__);
-@@ -830,16 +808,16 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
- 	}
- 
- 	/* Clear the interrupt */
--	status.all = HWIF(drive)->INB(IDE_STATUS_REG);
-+	stat = drive->hwif->INB(IDE_STATUS_REG);
- 
--	if (!status.b.drq) {			/* No more interrupts */
-+	if ((stat & DRQ_STAT) == 0) {		/* No more interrupts */
- 		debug_log(KERN_INFO "Packet command completed, %d bytes "
- 			"transferred\n", pc->actually_transferred);
- 		clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
- 
- 		local_irq_enable_in_hardirq();
- 
--		if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) {
-+		if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) {
- 			/* Error detected */
- 			debug_log(KERN_INFO "ide-floppy: %s: I/O error\n",
- 				drive->name);
-@@ -870,32 +848,32 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
- 	}
- 
- 	/* Get the number of bytes to transfer */
--	bcount.b.high = HWIF(drive)->INB(IDE_BCOUNTH_REG);
--	bcount.b.low = HWIF(drive)->INB(IDE_BCOUNTL_REG);
-+	bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) |
-+		  hwif->INB(IDE_BCOUNTL_REG);
- 	/* on this interrupt */
--	ireason.all = HWIF(drive)->INB(IDE_IREASON_REG);
-+	ireason = hwif->INB(IDE_IREASON_REG);
- 
--	if (ireason.b.cod) {
-+	if (ireason & CD) {
- 		printk(KERN_ERR "ide-floppy: CoD != 0 in idefloppy_pc_intr\n");
- 		return ide_do_reset(drive);
- 	}
--	if (ireason.b.io == test_bit(PC_WRITING, &pc->flags)) {
-+	if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) {
- 		/* Hopefully, we will never get here */
- 		printk(KERN_ERR "ide-floppy: We wanted to %s, ",
--			ireason.b.io ? "Write":"Read");
-+				(ireason & IO) ? "Write" : "Read");
- 		printk(KERN_ERR "but the floppy wants us to %s !\n",
--			ireason.b.io ? "Read":"Write");
-+				(ireason & IO) ? "Read" : "Write");
- 		return ide_do_reset(drive);
- 	}
- 	if (!test_bit(PC_WRITING, &pc->flags)) {
- 		/* Reading - Check that we have enough space */
--		temp = pc->actually_transferred + bcount.all;
-+		temp = pc->actually_transferred + bcount;
- 		if (temp > pc->request_transfer) {
- 			if (temp > pc->buffer_size) {
- 				printk(KERN_ERR "ide-floppy: The floppy wants "
- 					"to send us more data than expected "
- 					"- discarding data\n");
--				idefloppy_discard_data(drive,bcount.all);
-+				idefloppy_discard_data(drive, bcount);
- 				BUG_ON(HWGROUP(drive)->handler != NULL);
- 				ide_set_handler(drive,
- 						&idefloppy_pc_intr,
-@@ -911,23 +889,21 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
- 	if (test_bit(PC_WRITING, &pc->flags)) {
- 		if (pc->buffer != NULL)
- 			/* Write the current buffer */
--			HWIF(drive)->atapi_output_bytes(drive,
--						pc->current_position,
--						bcount.all);
-+			hwif->atapi_output_bytes(drive, pc->current_position,
-+						 bcount);
- 		else
--			idefloppy_output_buffers(drive, pc, bcount.all);
-+			idefloppy_output_buffers(drive, pc, bcount);
- 	} else {
- 		if (pc->buffer != NULL)
- 			/* Read the current buffer */
--			HWIF(drive)->atapi_input_bytes(drive,
--						pc->current_position,
--						bcount.all);
-+			hwif->atapi_input_bytes(drive, pc->current_position,
-+						bcount);
- 		else
--			idefloppy_input_buffers(drive, pc, bcount.all);
-+			idefloppy_input_buffers(drive, pc, bcount);
- 	}
- 	/* Update the current position */
--	pc->actually_transferred += bcount.all;
--	pc->current_position += bcount.all;
-+	pc->actually_transferred += bcount;
-+	pc->current_position += bcount;
- 
- 	BUG_ON(HWGROUP(drive)->handler != NULL);
- 	ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL);		/* And set the interrupt handler again */
-@@ -943,15 +919,15 @@ static ide_startstop_t idefloppy_transfer_pc (ide_drive_t *drive)
- {
- 	ide_startstop_t startstop;
- 	idefloppy_floppy_t *floppy = drive->driver_data;
--	atapi_ireason_t ireason;
-+	u8 ireason;
- 
- 	if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
- 		printk(KERN_ERR "ide-floppy: Strange, packet command "
- 				"initiated yet DRQ isn't asserted\n");
- 		return startstop;
- 	}
--	ireason.all = HWIF(drive)->INB(IDE_IREASON_REG);
--	if (!ireason.b.cod || ireason.b.io) {
-+	ireason = drive->hwif->INB(IDE_IREASON_REG);
-+	if ((ireason & CD) == 0 || (ireason & IO)) {
- 		printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while "
- 				"issuing a packet command\n");
- 		return ide_do_reset(drive);
-@@ -991,15 +967,15 @@ static ide_startstop_t idefloppy_transfer_pc1 (ide_drive_t *drive)
- {
- 	idefloppy_floppy_t *floppy = drive->driver_data;
- 	ide_startstop_t startstop;
--	atapi_ireason_t ireason;
-+	u8 ireason;
- 
- 	if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
- 		printk(KERN_ERR "ide-floppy: Strange, packet command "
- 				"initiated yet DRQ isn't asserted\n");
- 		return startstop;
- 	}
--	ireason.all = HWIF(drive)->INB(IDE_IREASON_REG);
--	if (!ireason.b.cod || ireason.b.io) {
-+	ireason = drive->hwif->INB(IDE_IREASON_REG);
-+	if ((ireason & CD) == 0 || (ireason & IO)) {
- 		printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) "
- 				"while issuing a packet command\n");
- 		return ide_do_reset(drive);
-@@ -1041,21 +1017,9 @@ static ide_startstop_t idefloppy_issue_pc (ide_drive_t *drive, idefloppy_pc_t *p
- {
- 	idefloppy_floppy_t *floppy = drive->driver_data;
- 	ide_hwif_t *hwif = drive->hwif;
--	atapi_feature_t feature;
--	atapi_bcount_t bcount;
- 	ide_handler_t *pkt_xfer_routine;
+-	r = -E2BIG;
+-	if (msrs.nmsrs >= MAX_IO_MSRS)
+-		goto out;
 -
--#if 0 /* Accessing floppy->pc is not valid here, the previous pc may be gone
--         and have lived on another thread's stack; that stack may have become
--         unmapped meanwhile (CONFIG_DEBUG_PAGEALLOC). */
--#if IDEFLOPPY_DEBUG_BUGS
--	if (floppy->pc->c[0] == IDEFLOPPY_REQUEST_SENSE_CMD &&
--	    pc->c[0] == IDEFLOPPY_REQUEST_SENSE_CMD) {
--		printk(KERN_ERR "ide-floppy: possible ide-floppy.c bug - "
--			"Two request sense in serial were issued\n");
--	}
--#endif /* IDEFLOPPY_DEBUG_BUGS */
--#endif
-+	u16 bcount;
-+	u8 dma;
- 
- 	if (floppy->failed_pc == NULL &&
- 	    pc->c[0] != IDEFLOPPY_REQUEST_SENSE_CMD)
-@@ -1093,25 +1057,20 @@ static ide_startstop_t idefloppy_issue_pc (ide_drive_t *drive, idefloppy_pc_t *p
- 	/* We haven't transferred any data yet */
- 	pc->actually_transferred = 0;
- 	pc->current_position = pc->buffer;
--	bcount.all = min(pc->request_transfer, 63 * 1024);
-+	bcount = min(pc->request_transfer, 63 * 1024);
- 
- 	if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags))
- 		ide_dma_off(drive);
- 
--	feature.all = 0;
-+	dma = 0;
- 
- 	if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma)
--		feature.b.dma = !hwif->dma_setup(drive);
-+		dma = !hwif->dma_setup(drive);
- 
--	if (IDE_CONTROL_REG)
--		HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
--	/* Use PIO/DMA */
--	HWIF(drive)->OUTB(feature.all, IDE_FEATURE_REG);
--	HWIF(drive)->OUTB(bcount.b.high, IDE_BCOUNTH_REG);
--	HWIF(drive)->OUTB(bcount.b.low, IDE_BCOUNTL_REG);
--	HWIF(drive)->OUTB(drive->select.all, IDE_SELECT_REG);
-+	ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
-+			   IDE_TFLAG_OUT_DEVICE, bcount, dma);
- 
--	if (feature.b.dma) {	/* Begin DMA, if necessary */
-+	if (dma) {	/* Begin DMA, if necessary */
- 		set_bit(PC_DMA_IN_PROGRESS, &pc->flags);
- 		hwif->dma_start(drive);
- 	}
-@@ -1665,14 +1624,14 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
- 		/* Else assume format_unit has finished, and we're
- 		** at 0x10000 */
- 	} else {
--		atapi_status_t status;
- 		unsigned long flags;
-+		u8 stat;
- 
- 		local_irq_save(flags);
--		status.all = HWIF(drive)->INB(IDE_STATUS_REG);
-+		stat = drive->hwif->INB(IDE_STATUS_REG);
- 		local_irq_restore(flags);
- 
--		progress_indication = !status.b.dsc ? 0 : 0x10000;
-+		progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
- 	}
- 	if (put_user(progress_indication, arg))
- 		return (-EFAULT);
-diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
-index 0f72b98..bb30c29 100644
---- a/drivers/ide/ide-generic.c
-+++ b/drivers/ide/ide-generic.c
-@@ -14,10 +14,16 @@
- 
- static int __init ide_generic_init(void)
- {
-+	u8 idx[MAX_HWIFS];
-+	int i;
-+
- 	if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
- 		ide_get_lock(NULL, NULL); /* for atari only */
- 
--	(void)ideprobe_init();
-+	for (i = 0; i < MAX_HWIFS; i++)
-+		idx[i] = ide_hwifs[i].present ? 0xff : i;
-+
-+	ide_device_add_all(idx);
- 
- 	if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
- 		ide_release_lock();	/* for atari only */
-diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
-index bef781f..e6bb9cf 100644
---- a/drivers/ide/ide-io.c
-+++ b/drivers/ide/ide-io.c
-@@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
- 			     int uptodate, unsigned int nr_bytes, int dequeue)
- {
- 	int ret = 1;
-+	int error = 0;
-+
-+	if (uptodate <= 0)
-+		error = uptodate ? uptodate : -EIO;
- 
- 	/*
- 	 * if failfast is set on a request, override number of sectors and
- 	 * complete the whole request right now
- 	 */
--	if (blk_noretry_request(rq) && end_io_error(uptodate))
-+	if (blk_noretry_request(rq) && error)
- 		nr_bytes = rq->hard_nr_sectors << 9;
- 
--	if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
-+	if (!blk_fs_request(rq) && error && !rq->errors)
- 		rq->errors = -EIO;
- 
- 	/*
-@@ -75,17 +79,12 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
- 	 */
- 	if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
- 		drive->state = 0;
--		HWGROUP(drive)->hwif->ide_dma_on(drive);
-+		ide_dma_on(drive);
- 	}
- 
--	if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
--		add_disk_randomness(rq->rq_disk);
--		if (dequeue) {
--			if (!list_empty(&rq->queuelist))
--				blkdev_dequeue_request(rq);
-+	if (!__blk_end_request(rq, error, nr_bytes)) {
-+		if (dequeue)
- 			HWGROUP(drive)->rq = NULL;
--		}
--		end_that_request_last(rq, uptodate);
- 		ret = 0;
- 	}
- 
-@@ -189,18 +188,14 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
- 			return ide_stopped;
- 		}
- 		if (ide_id_has_flush_cache_ext(drive->id))
--			args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT;
-+			args->tf.command = WIN_FLUSH_CACHE_EXT;
- 		else
--			args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE;
--		args->command_type = IDE_DRIVE_TASK_NO_DATA;
--		args->handler	   = &task_no_data_intr;
--		return do_rw_taskfile(drive, args);
-+			args->tf.command = WIN_FLUSH_CACHE;
-+		goto out_do_tf;
- 
- 	case idedisk_pm_standby:	/* Suspend step 2 (standby) */
--		args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1;
--		args->command_type = IDE_DRIVE_TASK_NO_DATA;
--		args->handler	   = &task_no_data_intr;
--		return do_rw_taskfile(drive, args);
-+		args->tf.command = WIN_STANDBYNOW1;
-+		goto out_do_tf;
- 
- 	case idedisk_pm_restore_pio:	/* Resume step 1 (restore PIO) */
- 		ide_set_max_pio(drive);
-@@ -214,10 +209,8 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
- 		return ide_stopped;
- 
- 	case idedisk_pm_idle:		/* Resume step 2 (idle) */
--		args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE;
--		args->command_type = IDE_DRIVE_TASK_NO_DATA;
--		args->handler = task_no_data_intr;
--		return do_rw_taskfile(drive, args);
-+		args->tf.command = WIN_IDLEIMMEDIATE;
-+		goto out_do_tf;
- 
- 	case ide_pm_restore_dma:	/* Resume step 3 (restore DMA) */
- 		/*
-@@ -225,9 +218,8 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
- 		 * we could be smarter and check for current xfer_speed
- 		 * in struct drive etc...
- 		 */
--		if (drive->hwif->ide_dma_on == NULL)
-+		if (drive->hwif->dma_host_set == NULL)
- 			break;
--		drive->hwif->dma_off_quietly(drive);
- 		/*
- 		 * TODO: respect ->using_dma setting
- 		 */
-@@ -236,6 +228,11 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
- 	}
- 	pm->pm_step = ide_pm_state_completed;
- 	return ide_stopped;
-+
-+out_do_tf:
-+	args->tf_flags	 = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+	args->data_phase = TASKFILE_NO_DATA;
-+	return do_rw_taskfile(drive, args);
- }
- 
- /**
-@@ -292,12 +289,54 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
- 		drive->blocked = 0;
- 		blk_start_queue(drive->queue);
- 	}
--	blkdev_dequeue_request(rq);
- 	HWGROUP(drive)->rq = NULL;
--	end_that_request_last(rq, 1);
-+	if (__blk_end_request(rq, 0, 0))
-+		BUG();
- 	spin_unlock_irqrestore(&ide_lock, flags);
- }
- 
-+void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
-+{
-+	ide_hwif_t *hwif = drive->hwif;
-+	struct ide_taskfile *tf = &task->tf;
-+
-+	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
-+		u16 data = hwif->INW(IDE_DATA_REG);
-+
-+		tf->data = data & 0xff;
-+		tf->hob_data = (data >> 8) & 0xff;
-+	}
-+
-+	/* be sure we're looking at the low order bits */
-+	hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
-+
-+	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
-+		tf->nsect  = hwif->INB(IDE_NSECTOR_REG);
-+	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
-+		tf->lbal   = hwif->INB(IDE_SECTOR_REG);
-+	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
-+		tf->lbam   = hwif->INB(IDE_LCYL_REG);
-+	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
-+		tf->lbah   = hwif->INB(IDE_HCYL_REG);
-+	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
-+		tf->device = hwif->INB(IDE_SELECT_REG);
-+
-+	if (task->tf_flags & IDE_TFLAG_LBA48) {
-+		hwif->OUTB(drive->ctl | 0x80, IDE_CONTROL_REG);
-+
-+		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
-+			tf->hob_feature = hwif->INB(IDE_FEATURE_REG);
-+		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
-+			tf->hob_nsect   = hwif->INB(IDE_NSECTOR_REG);
-+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
-+			tf->hob_lbal    = hwif->INB(IDE_SECTOR_REG);
-+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
-+			tf->hob_lbam    = hwif->INB(IDE_LCYL_REG);
-+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
-+			tf->hob_lbah    = hwif->INB(IDE_HCYL_REG);
-+	}
-+}
-+
- /**
-  *	ide_end_drive_cmd	-	end an explicit drive command
-  *	@drive: command 
-@@ -314,7 +353,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
-  
- void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
- {
--	ide_hwif_t *hwif = HWIF(drive);
- 	unsigned long flags;
- 	struct request *rq;
- 
-@@ -322,61 +360,18 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
- 	rq = HWGROUP(drive)->rq;
- 	spin_unlock_irqrestore(&ide_lock, flags);
- 
--	if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
--		u8 *args = (u8 *) rq->buffer;
--		if (rq->errors == 0)
--			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+-	r = -ENOMEM;
+-	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
+-	entries = vmalloc(size);
+-	if (!entries)
+-		goto out;
 -
--		if (args) {
--			args[0] = stat;
--			args[1] = err;
--			args[2] = hwif->INB(IDE_NSECTOR_REG);
--		}
--	} else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
--		u8 *args = (u8 *) rq->buffer;
--		if (rq->errors == 0)
--			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+-	r = -EFAULT;
+-	if (copy_from_user(entries, user_msrs->entries, size))
+-		goto out_free;
 -
--		if (args) {
--			args[0] = stat;
--			args[1] = err;
--			/* be sure we're looking at the low order bits */
--			hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
--			args[2] = hwif->INB(IDE_NSECTOR_REG);
--			args[3] = hwif->INB(IDE_SECTOR_REG);
--			args[4] = hwif->INB(IDE_LCYL_REG);
--			args[5] = hwif->INB(IDE_HCYL_REG);
--			args[6] = hwif->INB(IDE_SELECT_REG);
--		}
--	} else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
-+	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- 		ide_task_t *args = (ide_task_t *) rq->special;
- 		if (rq->errors == 0)
- 			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
- 			
- 		if (args) {
--			if (args->tf_in_flags.b.data) {
--				u16 data				= hwif->INW(IDE_DATA_REG);
--				args->tfRegister[IDE_DATA_OFFSET]	= (data) & 0xFF;
--				args->hobRegister[IDE_DATA_OFFSET]	= (data >> 8) & 0xFF;
--			}
--			args->tfRegister[IDE_ERROR_OFFSET]   = err;
--			/* be sure we're looking at the low order bits */
--			hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
--			args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
--			args->tfRegister[IDE_SECTOR_OFFSET]  = hwif->INB(IDE_SECTOR_REG);
--			args->tfRegister[IDE_LCYL_OFFSET]    = hwif->INB(IDE_LCYL_REG);
--			args->tfRegister[IDE_HCYL_OFFSET]    = hwif->INB(IDE_HCYL_REG);
--			args->tfRegister[IDE_SELECT_OFFSET]  = hwif->INB(IDE_SELECT_REG);
--			args->tfRegister[IDE_STATUS_OFFSET]  = stat;
+-	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
+-	if (r < 0)
+-		goto out_free;
 -
--			if (drive->addressing == 1) {
--				hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
--				args->hobRegister[IDE_FEATURE_OFFSET]	= hwif->INB(IDE_FEATURE_REG);
--				args->hobRegister[IDE_NSECTOR_OFFSET]	= hwif->INB(IDE_NSECTOR_REG);
--				args->hobRegister[IDE_SECTOR_OFFSET]	= hwif->INB(IDE_SECTOR_REG);
--				args->hobRegister[IDE_LCYL_OFFSET]	= hwif->INB(IDE_LCYL_REG);
--				args->hobRegister[IDE_HCYL_OFFSET]	= hwif->INB(IDE_HCYL_REG);
--			}
-+			struct ide_taskfile *tf = &args->tf;
-+
-+			tf->error = err;
-+			tf->status = stat;
-+
-+			ide_tf_read(drive, args);
- 		}
- 	} else if (blk_pm_request(rq)) {
- 		struct request_pm_state *pm = rq->data;
-@@ -391,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
- 	}
- 
- 	spin_lock_irqsave(&ide_lock, flags);
--	blkdev_dequeue_request(rq);
- 	HWGROUP(drive)->rq = NULL;
- 	rq->errors = err;
--	end_that_request_last(rq, !rq->errors);
-+	if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0))
-+		BUG();
- 	spin_unlock_irqrestore(&ide_lock, flags);
- }
- 
-@@ -615,90 +610,26 @@ ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
- 		return __ide_abort(drive, rq);
- }
- 
--/**
-- *	ide_cmd		-	issue a simple drive command
-- *	@drive: drive the command is for
-- *	@cmd: command byte
-- *	@nsect: sector byte
-- *	@handler: handler for the command completion
-- *
-- *	Issue a simple drive command with interrupts.
-- *	The drive must be selected beforehand.
+-	r = -EFAULT;
+-	if (writeback && copy_to_user(user_msrs->entries, entries, size))
+-		goto out_free;
+-
+-	r = n;
+-
+-out_free:
+-	vfree(entries);
+-out:
+-	return r;
+-}
+-
+-/*
+- * Translate a guest virtual address to a guest physical address.
 - */
+-static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+-				    struct kvm_translation *tr)
+-{
+-	unsigned long vaddr = tr->linear_address;
+-	gpa_t gpa;
 -
--static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect,
--		ide_handler_t *handler)
-+static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
- {
--	ide_hwif_t *hwif = HWIF(drive);
--	if (IDE_CONTROL_REG)
--		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);	/* clear nIEN */
--	SELECT_MASK(drive,0);
--	hwif->OUTB(nsect,IDE_NSECTOR_REG);
--	ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
-+	tf->nsect   = drive->sect;
-+	tf->lbal    = drive->sect;
-+	tf->lbam    = drive->cyl;
-+	tf->lbah    = drive->cyl >> 8;
-+	tf->device  = ((drive->head - 1) | drive->select.all) & ~ATA_LBA;
-+	tf->command = WIN_SPECIFY;
- }
- 
--/**
-- *	drive_cmd_intr		- 	drive command completion interrupt
-- *	@drive: drive the completion interrupt occurred on
-- *
-- *	drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
-- *	We do any necessary data reading and then wait for the drive to
-- *	go non busy. At that point we may read the error data and complete
-- *	the request
+-	vcpu_load(vcpu);
+-	mutex_lock(&vcpu->kvm->lock);
+-	gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
+-	tr->physical_address = gpa;
+-	tr->valid = gpa != UNMAPPED_GVA;
+-	tr->writeable = 1;
+-	tr->usermode = 0;
+-	mutex_unlock(&vcpu->kvm->lock);
+-	vcpu_put(vcpu);
+-
+-	return 0;
+-}
+-
+-static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+-				    struct kvm_interrupt *irq)
+-{
+-	if (irq->irq < 0 || irq->irq >= 256)
+-		return -EINVAL;
+-	if (irqchip_in_kernel(vcpu->kvm))
+-		return -ENXIO;
+-	vcpu_load(vcpu);
+-
+-	set_bit(irq->irq, vcpu->irq_pending);
+-	set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
+-
+-	vcpu_put(vcpu);
+-
+-	return 0;
+-}
+-
+-static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+-				      struct kvm_debug_guest *dbg)
+-{
+-	int r;
+-
+-	vcpu_load(vcpu);
+-
+-	r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
+-
+-	vcpu_put(vcpu);
+-
+-	return r;
+-}
+-
+-static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
+-				    unsigned long address,
+-				    int *type)
+-{
+-	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
+-	unsigned long pgoff;
+-	struct page *page;
+-
+-	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+-	if (pgoff == 0)
+-		page = virt_to_page(vcpu->run);
+-	else if (pgoff == KVM_PIO_PAGE_OFFSET)
+-		page = virt_to_page(vcpu->pio_data);
+-	else
+-		return NOPAGE_SIGBUS;
+-	get_page(page);
+-	if (type != NULL)
+-		*type = VM_FAULT_MINOR;
+-
+-	return page;
+-}
+-
+-static struct vm_operations_struct kvm_vcpu_vm_ops = {
+-	.nopage = kvm_vcpu_nopage,
+-};
+-
+-static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
+-{
+-	vma->vm_ops = &kvm_vcpu_vm_ops;
+-	return 0;
+-}
+-
+-static int kvm_vcpu_release(struct inode *inode, struct file *filp)
+-{
+-	struct kvm_vcpu *vcpu = filp->private_data;
+-
+-	fput(vcpu->kvm->filp);
+-	return 0;
+-}
+-
+-static struct file_operations kvm_vcpu_fops = {
+-	.release        = kvm_vcpu_release,
+-	.unlocked_ioctl = kvm_vcpu_ioctl,
+-	.compat_ioctl   = kvm_vcpu_ioctl,
+-	.mmap           = kvm_vcpu_mmap,
+-};
+-
+-/*
+- * Allocates an inode for the vcpu.
 - */
-- 
--static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
-+static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
- {
--	struct request *rq = HWGROUP(drive)->rq;
--	ide_hwif_t *hwif = HWIF(drive);
--	u8 *args = (u8 *) rq->buffer;
--	u8 stat = hwif->INB(IDE_STATUS_REG);
--	int retries = 10;
+-static int create_vcpu_fd(struct kvm_vcpu *vcpu)
+-{
+-	int fd, r;
+-	struct inode *inode;
+-	struct file *file;
 -
--	local_irq_enable_in_hardirq();
--	if (rq->cmd_type == REQ_TYPE_ATA_CMD &&
--	    (stat & DRQ_STAT) && args && args[3]) {
--		u8 io_32bit = drive->io_32bit;
--		drive->io_32bit = 0;
--		hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
--		drive->io_32bit = io_32bit;
--		while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
--			udelay(100);
+-	r = anon_inode_getfd(&fd, &inode, &file,
+-			     "kvm-vcpu", &kvm_vcpu_fops, vcpu);
+-	if (r)
+-		return r;
+-	atomic_inc(&vcpu->kvm->filp->f_count);
+-	return fd;
+-}
+-
+-/*
+- * Creates some virtual cpus.  Good luck creating more than one.
+- */
+-static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+-{
+-	int r;
+-	struct kvm_vcpu *vcpu;
+-
+-	if (!valid_vcpu(n))
+-		return -EINVAL;
+-
+-	vcpu = kvm_x86_ops->vcpu_create(kvm, n);
+-	if (IS_ERR(vcpu))
+-		return PTR_ERR(vcpu);
+-
+-	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
+-
+-	/* We do fxsave: this must be aligned. */
+-	BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+-
+-	vcpu_load(vcpu);
+-	r = kvm_mmu_setup(vcpu);
+-	vcpu_put(vcpu);
+-	if (r < 0)
+-		goto free_vcpu;
+-
+-	mutex_lock(&kvm->lock);
+-	if (kvm->vcpus[n]) {
+-		r = -EEXIST;
+-		mutex_unlock(&kvm->lock);
+-		goto mmu_unload;
 -	}
+-	kvm->vcpus[n] = vcpu;
+-	mutex_unlock(&kvm->lock);
 -
--	if (!OK_STAT(stat, READY_STAT, BAD_STAT))
--		return ide_error(drive, "drive_cmd", stat);
--		/* calls ide_end_drive_cmd */
--	ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
--	return ide_stopped;
-+	tf->nsect   = drive->sect;
-+	tf->command = WIN_RESTORE;
- }
- 
--static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task)
-+static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
- {
--	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
--	task->tfRegister[IDE_SECTOR_OFFSET]  = drive->sect;
--	task->tfRegister[IDE_LCYL_OFFSET]    = drive->cyl;
--	task->tfRegister[IDE_HCYL_OFFSET]    = drive->cyl>>8;
--	task->tfRegister[IDE_SELECT_OFFSET]  = ((drive->head-1)|drive->select.all)&0xBF;
--	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY;
+-	/* Now it's all set up, let userspace reach it */
+-	r = create_vcpu_fd(vcpu);
+-	if (r < 0)
+-		goto unlink;
+-	return r;
 -
--	task->handler = &set_geometry_intr;
+-unlink:
+-	mutex_lock(&kvm->lock);
+-	kvm->vcpus[n] = NULL;
+-	mutex_unlock(&kvm->lock);
+-
+-mmu_unload:
+-	vcpu_load(vcpu);
+-	kvm_mmu_unload(vcpu);
+-	vcpu_put(vcpu);
+-
+-free_vcpu:
+-	kvm_x86_ops->vcpu_free(vcpu);
+-	return r;
 -}
 -
--static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task)
+-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
 -{
--	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
--	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE;
+-	u64 efer;
+-	int i;
+-	struct kvm_cpuid_entry *e, *entry;
 -
--	task->handler = &recal_intr;
+-	rdmsrl(MSR_EFER, efer);
+-	entry = NULL;
+-	for (i = 0; i < vcpu->cpuid_nent; ++i) {
+-		e = &vcpu->cpuid_entries[i];
+-		if (e->function == 0x80000001) {
+-			entry = e;
+-			break;
+-		}
+-	}
+-	if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
+-		entry->edx &= ~(1 << 20);
+-		printk(KERN_INFO "kvm: guest NX capability removed\n");
+-	}
 -}
 -
--static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task)
+-static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+-				    struct kvm_cpuid *cpuid,
+-				    struct kvm_cpuid_entry __user *entries)
 -{
--	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req;
--	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT;
+-	int r;
 -
--	task->handler = &set_multmode_intr;
-+	tf->nsect   = drive->mult_req;
-+	tf->command = WIN_SETMULT;
- }
- 
- static ide_startstop_t ide_disk_special(ide_drive_t *drive)
-@@ -707,19 +638,19 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
- 	ide_task_t args;
- 
- 	memset(&args, 0, sizeof(ide_task_t));
--	args.command_type = IDE_DRIVE_TASK_NO_DATA;
-+	args.data_phase = TASKFILE_NO_DATA;
- 
- 	if (s->b.set_geometry) {
- 		s->b.set_geometry = 0;
--		ide_init_specify_cmd(drive, &args);
-+		ide_tf_set_specify_cmd(drive, &args.tf);
- 	} else if (s->b.recalibrate) {
- 		s->b.recalibrate = 0;
--		ide_init_restore_cmd(drive, &args);
-+		ide_tf_set_restore_cmd(drive, &args.tf);
- 	} else if (s->b.set_multmode) {
- 		s->b.set_multmode = 0;
- 		if (drive->mult_req > drive->id->max_multsect)
- 			drive->mult_req = drive->id->max_multsect;
--		ide_init_setmult_cmd(drive, &args);
-+		ide_tf_set_setmult_cmd(drive, &args.tf);
- 	} else if (s->all) {
- 		int special = s->all;
- 		s->all = 0;
-@@ -727,6 +658,9 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
- 		return ide_stopped;
- 	}
- 
-+	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
-+			IDE_TFLAG_CUSTOM_HANDLER;
-+
- 	do_rw_taskfile(drive, &args);
- 
- 	return ide_started;
-@@ -801,7 +735,7 @@ static ide_startstop_t do_special (ide_drive_t *drive)
- 
- 			if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
- 				if (keep_dma)
--					hwif->ide_dma_on(drive);
-+					ide_dma_on(drive);
- 			}
- 		}
- 
-@@ -861,13 +795,10 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
- 		struct request *rq)
- {
- 	ide_hwif_t *hwif = HWIF(drive);
--	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
-- 		ide_task_t *args = rq->special;
-- 
--		if (!args)
--			goto done;
-+	ide_task_t *task = rq->special;
- 
--		hwif->data_phase = args->data_phase;
-+	if (task) {
-+		hwif->data_phase = task->data_phase;
- 
- 		switch (hwif->data_phase) {
- 		case TASKFILE_MULTI_OUT:
-@@ -880,57 +811,9 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
- 			break;
- 		}
- 
--		if (args->tf_out_flags.all != 0) 
--			return flagged_taskfile(drive, args);
--		return do_rw_taskfile(drive, args);
--	} else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
--		u8 *args = rq->buffer;
-- 
--		if (!args)
--			goto done;
--#ifdef DEBUG
-- 		printk("%s: DRIVE_TASK_CMD ", drive->name);
-- 		printk("cmd=0x%02x ", args[0]);
-- 		printk("fr=0x%02x ", args[1]);
-- 		printk("ns=0x%02x ", args[2]);
-- 		printk("sc=0x%02x ", args[3]);
-- 		printk("lcyl=0x%02x ", args[4]);
-- 		printk("hcyl=0x%02x ", args[5]);
-- 		printk("sel=0x%02x\n", args[6]);
--#endif
-- 		hwif->OUTB(args[1], IDE_FEATURE_REG);
-- 		hwif->OUTB(args[3], IDE_SECTOR_REG);
-- 		hwif->OUTB(args[4], IDE_LCYL_REG);
-- 		hwif->OUTB(args[5], IDE_HCYL_REG);
-- 		hwif->OUTB((args[6] & 0xEF)|drive->select.all, IDE_SELECT_REG);
-- 		ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
-- 		return ide_started;
-- 	} else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
-- 		u8 *args = rq->buffer;
+-	r = -E2BIG;
+-	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+-		goto out;
+-	r = -EFAULT;
+-	if (copy_from_user(&vcpu->cpuid_entries, entries,
+-			   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+-		goto out;
+-	vcpu->cpuid_nent = cpuid->nent;
+-	cpuid_fix_nx_cap(vcpu);
+-	return 0;
 -
--		if (!args)
--			goto done;
--#ifdef DEBUG
-- 		printk("%s: DRIVE_CMD ", drive->name);
-- 		printk("cmd=0x%02x ", args[0]);
-- 		printk("sc=0x%02x ", args[1]);
-- 		printk("fr=0x%02x ", args[2]);
-- 		printk("xx=0x%02x\n", args[3]);
--#endif
-- 		if (args[0] == WIN_SMART) {
-- 			hwif->OUTB(0x4f, IDE_LCYL_REG);
-- 			hwif->OUTB(0xc2, IDE_HCYL_REG);
-- 			hwif->OUTB(args[2],IDE_FEATURE_REG);
-- 			hwif->OUTB(args[1],IDE_SECTOR_REG);
-- 			ide_cmd(drive, args[0], args[3], &drive_cmd_intr);
-- 			return ide_started;
-- 		}
-- 		hwif->OUTB(args[2],IDE_FEATURE_REG);
-- 		ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
-- 		return ide_started;
-- 	}
+-out:
+-	return r;
+-}
 -
--done:
-+		return do_rw_taskfile(drive, task);
-+	}
-+
-  	/*
-  	 * NULL is actually a valid way of waiting for
-  	 * all current requests to be flushed from the queue.
-@@ -970,8 +853,7 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
- 		if (rc)
- 			printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
- 		SELECT_DRIVE(drive);
--		if (IDE_CONTROL_REG)
--			HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
-+		ide_set_irq(drive, 1);
- 		rc = ide_wait_not_busy(HWIF(drive), 100000);
- 		if (rc)
- 			printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
-@@ -1003,6 +885,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
- 
- 	/* bail early if we've exceeded max_failures */
- 	if (drive->max_failures && (drive->failures > drive->max_failures)) {
-+		rq->cmd_flags |= REQ_FAILED;
- 		goto kill_rq;
- 	}
- 
-@@ -1034,9 +917,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
- 		if (drive->current_speed == 0xff)
- 			ide_config_drive_speed(drive, drive->desired_speed);
- 
--		if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
--		    rq->cmd_type == REQ_TYPE_ATA_TASK ||
--		    rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
-+		if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
- 			return execute_drive_cmd(drive, rq);
- 		else if (blk_pm_request(rq)) {
- 			struct request_pm_state *pm = rq->data;
-@@ -1244,11 +1125,13 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
- 		}
- 	again:
- 		hwif = HWIF(drive);
--		if (hwgroup->hwif->sharing_irq &&
--		    hwif != hwgroup->hwif &&
--		    hwif->io_ports[IDE_CONTROL_OFFSET]) {
--			/* set nIEN for previous hwif */
--			SELECT_INTERRUPT(drive);
-+		if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) {
-+			/*
-+			 * set nIEN for previous hwif, drives in the
-+			 * quirk_list may not like intr setups/cleanups
-+			 */
-+			if (drive->quirk_list != 1)
-+				ide_set_irq(drive, 0);
- 		}
- 		hwgroup->hwif = hwif;
- 		hwgroup->drive = drive;
-@@ -1361,7 +1244,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
- 	 */
- 	drive->retry_pio++;
- 	drive->state = DMA_PIO_RETRY;
--	hwif->dma_off_quietly(drive);
-+	ide_dma_off_quietly(drive);
- 
- 	/*
- 	 * un-busy drive etc (hwgroup->busy is cleared on return) and
-@@ -1454,12 +1337,8 @@ void ide_timer_expiry (unsigned long data)
- 			 */
- 			spin_unlock(&ide_lock);
- 			hwif  = HWIF(drive);
--#if DISABLE_IRQ_NOSYNC
--			disable_irq_nosync(hwif->irq);
+-static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
+-{
+-	if (sigset) {
+-		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+-		vcpu->sigset_active = 1;
+-		vcpu->sigset = *sigset;
+-	} else
+-		vcpu->sigset_active = 0;
+-	return 0;
+-}
+-
+-/*
+- * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
+- * we have asm/x86/processor.h
+- */
+-struct fxsave {
+-	u16	cwd;
+-	u16	swd;
+-	u16	twd;
+-	u16	fop;
+-	u64	rip;
+-	u64	rdp;
+-	u32	mxcsr;
+-	u32	mxcsr_mask;
+-	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
+-#ifdef CONFIG_X86_64
+-	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 256 bytes */
 -#else
- 			/* disable_irq_nosync ?? */
- 			disable_irq(hwif->irq);
--#endif /* DISABLE_IRQ_NOSYNC */
- 			/* local CPU only,
- 			 * as if we were handling an interrupt */
- 			local_irq_disable();
-@@ -1710,7 +1589,6 @@ irqreturn_t ide_intr (int irq, void *dev_id)
- void ide_init_drive_cmd (struct request *rq)
- {
- 	memset(rq, 0, sizeof(*rq));
--	rq->cmd_type = REQ_TYPE_ATA_CMD;
- 	rq->ref_count = 1;
- }
- 
-@@ -1785,3 +1663,19 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
- }
- 
- EXPORT_SYMBOL(ide_do_drive_cmd);
-+
-+void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
-+{
-+	ide_task_t task;
-+
-+	memset(&task, 0, sizeof(task));
-+	task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
-+			IDE_TFLAG_OUT_FEATURE | tf_flags;
-+	task.tf.feature = dma;		/* Use PIO/DMA */
-+	task.tf.lbam    = bcount & 0xff;
-+	task.tf.lbah    = (bcount >> 8) & 0xff;
-+
-+	ide_tf_load(drive, &task);
-+}
-+
-+EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
-diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
-index bb9693d..e2a7e95 100644
---- a/drivers/ide/ide-iops.c
-+++ b/drivers/ide/ide-iops.c
-@@ -158,14 +158,6 @@ void default_hwif_mmiops (ide_hwif_t *hwif)
- 
- EXPORT_SYMBOL(default_hwif_mmiops);
- 
--u32 ide_read_24 (ide_drive_t *drive)
+-	u32	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
+-#endif
+-};
+-
+-static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 -{
--	u8 hcyl = HWIF(drive)->INB(IDE_HCYL_REG);
--	u8 lcyl = HWIF(drive)->INB(IDE_LCYL_REG);
--	u8 sect = HWIF(drive)->INB(IDE_SECTOR_REG);
--	return (hcyl<<16)|(lcyl<<8)|sect;
+-	struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
+-
+-	vcpu_load(vcpu);
+-
+-	memcpy(fpu->fpr, fxsave->st_space, 128);
+-	fpu->fcw = fxsave->cwd;
+-	fpu->fsw = fxsave->swd;
+-	fpu->ftwx = fxsave->twd;
+-	fpu->last_opcode = fxsave->fop;
+-	fpu->last_ip = fxsave->rip;
+-	fpu->last_dp = fxsave->rdp;
+-	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
+-
+-	vcpu_put(vcpu);
+-
+-	return 0;
 -}
 -
- void SELECT_DRIVE (ide_drive_t *drive)
- {
- 	if (HWIF(drive)->selectproc)
-@@ -175,26 +167,12 @@ void SELECT_DRIVE (ide_drive_t *drive)
- 
- EXPORT_SYMBOL(SELECT_DRIVE);
- 
--void SELECT_INTERRUPT (ide_drive_t *drive)
+-static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 -{
--	if (HWIF(drive)->intrproc)
--		HWIF(drive)->intrproc(drive);
--	else
--		HWIF(drive)->OUTB(drive->ctl|2, IDE_CONTROL_REG);
+-	struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
+-
+-	vcpu_load(vcpu);
+-
+-	memcpy(fxsave->st_space, fpu->fpr, 128);
+-	fxsave->cwd = fpu->fcw;
+-	fxsave->swd = fpu->fsw;
+-	fxsave->twd = fpu->ftwx;
+-	fxsave->fop = fpu->last_opcode;
+-	fxsave->rip = fpu->last_ip;
+-	fxsave->rdp = fpu->last_dp;
+-	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
+-
+-	vcpu_put(vcpu);
+-
+-	return 0;
+-}
+-
+-static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
+-				    struct kvm_lapic_state *s)
+-{
+-	vcpu_load(vcpu);
+-	memcpy(s->regs, vcpu->apic->regs, sizeof *s);
+-	vcpu_put(vcpu);
+-
+-	return 0;
+-}
+-
+-static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
+-				    struct kvm_lapic_state *s)
+-{
+-	vcpu_load(vcpu);
+-	memcpy(vcpu->apic->regs, s->regs, sizeof *s);
+-	kvm_apic_post_state_restore(vcpu);
+-	vcpu_put(vcpu);
+-
+-	return 0;
+-}
+-
+-static long kvm_vcpu_ioctl(struct file *filp,
+-			   unsigned int ioctl, unsigned long arg)
+-{
+-	struct kvm_vcpu *vcpu = filp->private_data;
+-	void __user *argp = (void __user *)arg;
+-	int r = -EINVAL;
+-
+-	switch (ioctl) {
+-	case KVM_RUN:
+-		r = -EINVAL;
+-		if (arg)
+-			goto out;
+-		r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
+-		break;
+-	case KVM_GET_REGS: {
+-		struct kvm_regs kvm_regs;
+-
+-		memset(&kvm_regs, 0, sizeof kvm_regs);
+-		r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
+-		if (r)
+-			goto out;
+-		r = -EFAULT;
+-		if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_SET_REGS: {
+-		struct kvm_regs kvm_regs;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
+-			goto out;
+-		r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
+-		if (r)
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_GET_SREGS: {
+-		struct kvm_sregs kvm_sregs;
+-
+-		memset(&kvm_sregs, 0, sizeof kvm_sregs);
+-		r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
+-		if (r)
+-			goto out;
+-		r = -EFAULT;
+-		if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_SET_SREGS: {
+-		struct kvm_sregs kvm_sregs;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
+-			goto out;
+-		r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
+-		if (r)
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_TRANSLATE: {
+-		struct kvm_translation tr;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&tr, argp, sizeof tr))
+-			goto out;
+-		r = kvm_vcpu_ioctl_translate(vcpu, &tr);
+-		if (r)
+-			goto out;
+-		r = -EFAULT;
+-		if (copy_to_user(argp, &tr, sizeof tr))
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_INTERRUPT: {
+-		struct kvm_interrupt irq;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&irq, argp, sizeof irq))
+-			goto out;
+-		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+-		if (r)
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_DEBUG_GUEST: {
+-		struct kvm_debug_guest dbg;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&dbg, argp, sizeof dbg))
+-			goto out;
+-		r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
+-		if (r)
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_GET_MSRS:
+-		r = msr_io(vcpu, argp, kvm_get_msr, 1);
+-		break;
+-	case KVM_SET_MSRS:
+-		r = msr_io(vcpu, argp, do_set_msr, 0);
+-		break;
+-	case KVM_SET_CPUID: {
+-		struct kvm_cpuid __user *cpuid_arg = argp;
+-		struct kvm_cpuid cpuid;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
+-			goto out;
+-		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
+-		if (r)
+-			goto out;
+-		break;
+-	}
+-	case KVM_SET_SIGNAL_MASK: {
+-		struct kvm_signal_mask __user *sigmask_arg = argp;
+-		struct kvm_signal_mask kvm_sigmask;
+-		sigset_t sigset, *p;
+-
+-		p = NULL;
+-		if (argp) {
+-			r = -EFAULT;
+-			if (copy_from_user(&kvm_sigmask, argp,
+-					   sizeof kvm_sigmask))
+-				goto out;
+-			r = -EINVAL;
+-			if (kvm_sigmask.len != sizeof sigset)
+-				goto out;
+-			r = -EFAULT;
+-			if (copy_from_user(&sigset, sigmask_arg->sigset,
+-					   sizeof sigset))
+-				goto out;
+-			p = &sigset;
+-		}
+-		r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+-		break;
+-	}
+-	case KVM_GET_FPU: {
+-		struct kvm_fpu fpu;
+-
+-		memset(&fpu, 0, sizeof fpu);
+-		r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
+-		if (r)
+-			goto out;
+-		r = -EFAULT;
+-		if (copy_to_user(argp, &fpu, sizeof fpu))
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_SET_FPU: {
+-		struct kvm_fpu fpu;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&fpu, argp, sizeof fpu))
+-			goto out;
+-		r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
+-		if (r)
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_GET_LAPIC: {
+-		struct kvm_lapic_state lapic;
+-
+-		memset(&lapic, 0, sizeof lapic);
+-		r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
+-		if (r)
+-			goto out;
+-		r = -EFAULT;
+-		if (copy_to_user(argp, &lapic, sizeof lapic))
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_SET_LAPIC: {
+-		struct kvm_lapic_state lapic;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&lapic, argp, sizeof lapic))
+-			goto out;
+-		r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
+-		if (r)
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	default:
+-		;
+-	}
+-out:
+-	return r;
+-}
+-
+-static long kvm_vm_ioctl(struct file *filp,
+-			   unsigned int ioctl, unsigned long arg)
+-{
+-	struct kvm *kvm = filp->private_data;
+-	void __user *argp = (void __user *)arg;
+-	int r = -EINVAL;
+-
+-	switch (ioctl) {
+-	case KVM_CREATE_VCPU:
+-		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
+-		if (r < 0)
+-			goto out;
+-		break;
+-	case KVM_SET_MEMORY_REGION: {
+-		struct kvm_memory_region kvm_mem;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
+-			goto out;
+-		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
+-		if (r)
+-			goto out;
+-		break;
+-	}
+-	case KVM_GET_DIRTY_LOG: {
+-		struct kvm_dirty_log log;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&log, argp, sizeof log))
+-			goto out;
+-		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
+-		if (r)
+-			goto out;
+-		break;
+-	}
+-	case KVM_SET_MEMORY_ALIAS: {
+-		struct kvm_memory_alias alias;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&alias, argp, sizeof alias))
+-			goto out;
+-		r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
+-		if (r)
+-			goto out;
+-		break;
+-	}
+-	case KVM_CREATE_IRQCHIP:
+-		r = -ENOMEM;
+-		kvm->vpic = kvm_create_pic(kvm);
+-		if (kvm->vpic) {
+-			r = kvm_ioapic_init(kvm);
+-			if (r) {
+-				kfree(kvm->vpic);
+-				kvm->vpic = NULL;
+-				goto out;
+-			}
+-		}
+-		else
+-			goto out;
+-		break;
+-	case KVM_IRQ_LINE: {
+-		struct kvm_irq_level irq_event;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&irq_event, argp, sizeof irq_event))
+-			goto out;
+-		if (irqchip_in_kernel(kvm)) {
+-			mutex_lock(&kvm->lock);
+-			if (irq_event.irq < 16)
+-				kvm_pic_set_irq(pic_irqchip(kvm),
+-					irq_event.irq,
+-					irq_event.level);
+-			kvm_ioapic_set_irq(kvm->vioapic,
+-					irq_event.irq,
+-					irq_event.level);
+-			mutex_unlock(&kvm->lock);
+-			r = 0;
+-		}
+-		break;
+-	}
+-	case KVM_GET_IRQCHIP: {
+-		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
+-		struct kvm_irqchip chip;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&chip, argp, sizeof chip))
+-			goto out;
+-		r = -ENXIO;
+-		if (!irqchip_in_kernel(kvm))
+-			goto out;
+-		r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
+-		if (r)
+-			goto out;
+-		r = -EFAULT;
+-		if (copy_to_user(argp, &chip, sizeof chip))
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	case KVM_SET_IRQCHIP: {
+-		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
+-		struct kvm_irqchip chip;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&chip, argp, sizeof chip))
+-			goto out;
+-		r = -ENXIO;
+-		if (!irqchip_in_kernel(kvm))
+-			goto out;
+-		r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
+-		if (r)
+-			goto out;
+-		r = 0;
+-		break;
+-	}
+-	default:
+-		;
+-	}
+-out:
+-	return r;
+-}
+-
+-static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
+-				  unsigned long address,
+-				  int *type)
+-{
+-	struct kvm *kvm = vma->vm_file->private_data;
+-	unsigned long pgoff;
+-	struct page *page;
+-
+-	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+-	page = gfn_to_page(kvm, pgoff);
+-	if (!page)
+-		return NOPAGE_SIGBUS;
+-	get_page(page);
+-	if (type != NULL)
+-		*type = VM_FAULT_MINOR;
+-
+-	return page;
 -}
 -
- void SELECT_MASK (ide_drive_t *drive, int mask)
- {
- 	if (HWIF(drive)->maskproc)
- 		HWIF(drive)->maskproc(drive, mask);
- }
- 
--void QUIRK_LIST (ide_drive_t *drive)
+-static struct vm_operations_struct kvm_vm_vm_ops = {
+-	.nopage = kvm_vm_nopage,
+-};
+-
+-static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
+-{
+-	vma->vm_ops = &kvm_vm_vm_ops;
+-	return 0;
+-}
+-
+-static struct file_operations kvm_vm_fops = {
+-	.release        = kvm_vm_release,
+-	.unlocked_ioctl = kvm_vm_ioctl,
+-	.compat_ioctl   = kvm_vm_ioctl,
+-	.mmap           = kvm_vm_mmap,
+-};
+-
+-static int kvm_dev_ioctl_create_vm(void)
 -{
--	if (HWIF(drive)->quirkproc)
--		drive->quirk_list = HWIF(drive)->quirkproc(drive);
+-	int fd, r;
+-	struct inode *inode;
+-	struct file *file;
+-	struct kvm *kvm;
+-
+-	kvm = kvm_create_vm();
+-	if (IS_ERR(kvm))
+-		return PTR_ERR(kvm);
+-	r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
+-	if (r) {
+-		kvm_destroy_vm(kvm);
+-		return r;
+-	}
+-
+-	kvm->filp = file;
+-
+-	return fd;
 -}
 -
- /*
-  * Some localbus EIDE interfaces require a special access sequence
-  * when using 32-bit I/O instructions to transfer data.  We call this
-@@ -449,7 +427,6 @@ int drive_is_ready (ide_drive_t *drive)
- 	udelay(1);
- #endif
- 
--#ifdef CONFIG_IDEPCI_SHARE_IRQ
- 	/*
- 	 * We do a passive status test under shared PCI interrupts on
- 	 * cards that truly share the ATA side interrupt, but may also share
-@@ -459,7 +436,6 @@ int drive_is_ready (ide_drive_t *drive)
- 	if (IDE_CONTROL_REG)
- 		stat = hwif->INB(IDE_ALTSTATUS_REG);
- 	else
--#endif /* CONFIG_IDEPCI_SHARE_IRQ */
- 		/* Note: this may clear a pending IRQ!! */
- 		stat = hwif->INB(IDE_STATUS_REG);
- 
-@@ -642,9 +618,9 @@ no_80w:
- 
- int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
- {
--	if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
--	    (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
--	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
-+	if (args->tf.command == WIN_SETFEATURES &&
-+	    args->tf.nsect > XFER_UDMA_2 &&
-+	    args->tf.feature == SETFEATURES_XFER) {
- 		if (eighty_ninty_three(drive) == 0) {
- 			printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
- 					    "be set\n", drive->name);
-@@ -662,9 +638,9 @@ int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
-  */
- int set_transfer (ide_drive_t *drive, ide_task_t *args)
- {
--	if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
--	    (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) &&
--	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) &&
-+	if (args->tf.command == WIN_SETFEATURES &&
-+	    args->tf.nsect >= XFER_SW_DMA_0 &&
-+	    args->tf.feature == SETFEATURES_XFER &&
- 	    (drive->id->dma_ultra ||
- 	     drive->id->dma_mword ||
- 	     drive->id->dma_1word))
-@@ -712,8 +688,7 @@ int ide_driveid_update(ide_drive_t *drive)
- 	 */
- 
- 	SELECT_MASK(drive, 1);
--	if (IDE_CONTROL_REG)
--		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
-+	ide_set_irq(drive, 1);
- 	msleep(50);
- 	hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
- 	timeout = jiffies + WAIT_WORSTCASE;
-@@ -766,8 +741,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
- //		msleep(50);
- 
- #ifdef CONFIG_BLK_DEV_IDEDMA
--	if (hwif->ide_dma_on)	/* check if host supports DMA */
--		hwif->dma_host_off(drive);
-+	if (hwif->dma_host_set)	/* check if host supports DMA */
-+		hwif->dma_host_set(drive, 0);
- #endif
- 
- 	/* Skip setting PIO flow-control modes on pre-EIDE drives */
-@@ -796,13 +771,12 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
- 	SELECT_DRIVE(drive);
- 	SELECT_MASK(drive, 0);
- 	udelay(1);
--	if (IDE_CONTROL_REG)
--		hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
-+	ide_set_irq(drive, 0);
- 	hwif->OUTB(speed, IDE_NSECTOR_REG);
- 	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
- 	hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
--	if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
--		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
-+	if (drive->quirk_list == 2)
-+		ide_set_irq(drive, 1);
- 
- 	error = __ide_wait_stat(drive, drive->ready_stat,
- 				BUSY_STAT|DRQ_STAT|ERR_STAT,
-@@ -823,10 +797,11 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
- 
-  skip:
- #ifdef CONFIG_BLK_DEV_IDEDMA
--	if (speed >= XFER_SW_DMA_0)
--		hwif->dma_host_on(drive);
--	else if (hwif->ide_dma_on)	/* check if host supports DMA */
--		hwif->dma_off_quietly(drive);
-+	if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
-+	    drive->using_dma)
-+		hwif->dma_host_set(drive, 1);
-+	else if (hwif->dma_host_set)	/* check if host supports DMA */
-+		ide_dma_off_quietly(drive);
- #endif
- 
- 	switch(speed) {
-@@ -902,8 +877,9 @@ EXPORT_SYMBOL(ide_set_handler);
-  *	handler and IRQ setup do not race. All IDE command kick off
-  *	should go via this function or do equivalent locking.
-  */
-- 
--void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *handler, unsigned timeout, ide_expiry_t *expiry)
-+
-+void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
-+			 unsigned timeout, ide_expiry_t *expiry)
- {
- 	unsigned long flags;
- 	ide_hwgroup_t *hwgroup = HWGROUP(drive);
-@@ -1035,10 +1011,10 @@ static void check_dma_crc(ide_drive_t *drive)
- {
- #ifdef CONFIG_BLK_DEV_IDEDMA
- 	if (drive->crc_count) {
--		drive->hwif->dma_off_quietly(drive);
-+		ide_dma_off_quietly(drive);
- 		ide_set_xfer_rate(drive, ide_auto_reduce_xfer(drive));
- 		if (drive->current_speed >= XFER_SW_DMA_0)
--			(void) HWIF(drive)->ide_dma_on(drive);
-+			ide_dma_on(drive);
- 	} else
- 		ide_dma_off(drive);
- #endif
-@@ -1051,8 +1027,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
- 	drive->special.all = 0;
- 	drive->special.b.set_geometry = legacy;
- 	drive->special.b.recalibrate  = legacy;
--	if (OK_TO_RESET_CONTROLLER)
--		drive->mult_count = 0;
-+	drive->mult_count = 0;
- 	if (!drive->keep_settings && !drive->using_dma)
- 		drive->mult_req = 0;
- 	if (drive->mult_req != drive->mult_count)
-@@ -1137,7 +1112,6 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
- 	for (unit = 0; unit < MAX_DRIVES; ++unit)
- 		pre_reset(&hwif->drives[unit]);
- 
--#if OK_TO_RESET_CONTROLLER
- 	if (!IDE_CONTROL_REG) {
- 		spin_unlock_irqrestore(&ide_lock, flags);
- 		return ide_stopped;
-@@ -1174,11 +1148,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
- 	 * state when the disks are reset this way. At least, the Winbond
- 	 * 553 documentation says that
- 	 */
--	if (hwif->resetproc != NULL) {
-+	if (hwif->resetproc)
- 		hwif->resetproc(drive);
+-static long kvm_dev_ioctl(struct file *filp,
+-			  unsigned int ioctl, unsigned long arg)
+-{
+-	void __user *argp = (void __user *)arg;
+-	long r = -EINVAL;
+-
+-	switch (ioctl) {
+-	case KVM_GET_API_VERSION:
+-		r = -EINVAL;
+-		if (arg)
+-			goto out;
+-		r = KVM_API_VERSION;
+-		break;
+-	case KVM_CREATE_VM:
+-		r = -EINVAL;
+-		if (arg)
+-			goto out;
+-		r = kvm_dev_ioctl_create_vm();
+-		break;
+-	case KVM_GET_MSR_INDEX_LIST: {
+-		struct kvm_msr_list __user *user_msr_list = argp;
+-		struct kvm_msr_list msr_list;
+-		unsigned n;
+-
+-		r = -EFAULT;
+-		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
+-			goto out;
+-		n = msr_list.nmsrs;
+-		msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
+-		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
+-			goto out;
+-		r = -E2BIG;
+-		if (n < num_msrs_to_save)
+-			goto out;
+-		r = -EFAULT;
+-		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
+-				 num_msrs_to_save * sizeof(u32)))
+-			goto out;
+-		if (copy_to_user(user_msr_list->indices
+-				 + num_msrs_to_save * sizeof(u32),
+-				 &emulated_msrs,
+-				 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
+-			goto out;
+-		r = 0;
+-		break;
 -	}
--	
--#endif	/* OK_TO_RESET_CONTROLLER */
- 
- 	spin_unlock_irqrestore(&ide_lock, flags);
- 	return ide_started;
-diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
-index 062d3bc..9b44fbd 100644
---- a/drivers/ide/ide-lib.c
-+++ b/drivers/ide/ide-lib.c
-@@ -441,6 +441,12 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
- 	 * case could happen iff the transfer mode has already been set on
- 	 * the device by ide-proc.c::set_xfer_rate()).
- 	 */
-+	if (rate < XFER_PIO_0) {
-+		if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE)
-+			return ide_set_dma_mode(drive, rate);
-+		else
-+			return ide_config_drive_speed(drive, rate);
-+	}
- 
- 	return ide_set_dma_mode(drive, rate);
- }
-@@ -448,8 +454,7 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
- static void ide_dump_opcode(ide_drive_t *drive)
- {
- 	struct request *rq;
--	u8 opcode = 0;
--	int found = 0;
-+	ide_task_t *task = NULL;
- 
- 	spin_lock(&ide_lock);
- 	rq = NULL;
-@@ -458,164 +463,129 @@ static void ide_dump_opcode(ide_drive_t *drive)
- 	spin_unlock(&ide_lock);
- 	if (!rq)
- 		return;
--	if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
--	    rq->cmd_type == REQ_TYPE_ATA_TASK) {
--		char *args = rq->buffer;
--		if (args) {
--			opcode = args[0];
--			found = 1;
--		}
--	} else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
--		ide_task_t *args = rq->special;
--		if (args) {
--			task_struct_t *tf = (task_struct_t *) args->tfRegister;
--			opcode = tf->command;
--			found = 1;
+-	case KVM_CHECK_EXTENSION: {
+-		int ext = (long)argp;
+-
+-		switch (ext) {
+-		case KVM_CAP_IRQCHIP:
+-		case KVM_CAP_HLT:
+-			r = 1;
+-			break;
+-		default:
+-			r = 0;
+-			break;
 -		}
+-		break;
 -	}
-+
-+	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
-+		task = rq->special;
- 
- 	printk("ide: failed opcode was: ");
--	if (!found)
--		printk("unknown\n");
-+	if (task == NULL)
-+		printk(KERN_CONT "unknown\n");
- 	else
--		printk("0x%02x\n", opcode);
-+		printk(KERN_CONT "0x%02x\n", task->tf.command);
- }
- 
--static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
-+u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48)
- {
--	ide_hwif_t *hwif = HWIF(drive);
--	unsigned long flags;
--	u8 err = 0;
-+	u32 high, low;
- 
--	local_irq_save(flags);
--	printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
--	if (stat & BUSY_STAT)
--		printk("Busy ");
--	else {
--		if (stat & READY_STAT)	printk("DriveReady ");
--		if (stat & WRERR_STAT)	printk("DeviceFault ");
--		if (stat & SEEK_STAT)	printk("SeekComplete ");
--		if (stat & DRQ_STAT)	printk("DataRequest ");
--		if (stat & ECC_STAT)	printk("CorrectedError ");
--		if (stat & INDEX_STAT)	printk("Index ");
--		if (stat & ERR_STAT)	printk("Error ");
-+	if (lba48)
-+		high = (tf->hob_lbah << 16) | (tf->hob_lbam << 8) |
-+			tf->hob_lbal;
-+	else
-+		high = tf->device & 0xf;
-+	low  = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
-+
-+	return ((u64)high << 24) | low;
-+}
-+EXPORT_SYMBOL_GPL(ide_get_lba_addr);
-+
-+static void ide_dump_sector(ide_drive_t *drive)
-+{
-+	ide_task_t task;
-+	struct ide_taskfile *tf = &task.tf;
-+	int lba48 = (drive->addressing == 1) ? 1 : 0;
-+
-+	memset(&task, 0, sizeof(task));
-+	if (lba48)
-+		task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_HOB_LBA |
-+				IDE_TFLAG_LBA48;
-+	else
-+		task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
-+
-+	ide_tf_read(drive, &task);
-+
-+	if (lba48 || (tf->device & ATA_LBA))
-+		printk(", LBAsect=%llu",
-+			(unsigned long long)ide_get_lba_addr(tf, lba48));
-+	else
-+		printk(", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
-+					 tf->device & 0xf, tf->lbal);
-+}
-+
-+static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
-+{
-+	printk("{ ");
-+	if (err & ABRT_ERR)	printk("DriveStatusError ");
-+	if (err & ICRC_ERR)
-+		printk((err & ABRT_ERR) ? "BadCRC " : "BadSector ");
-+	if (err & ECC_ERR)	printk("UncorrectableError ");
-+	if (err & ID_ERR)	printk("SectorIdNotFound ");
-+	if (err & TRK0_ERR)	printk("TrackZeroNotFound ");
-+	if (err & MARK_ERR)	printk("AddrMarkNotFound ");
-+	printk("}");
-+	if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR ||
-+	    (err & (ECC_ERR|ID_ERR|MARK_ERR))) {
-+		ide_dump_sector(drive);
-+		if (HWGROUP(drive) && HWGROUP(drive)->rq)
-+			printk(", sector=%llu",
-+			       (unsigned long long)HWGROUP(drive)->rq->sector);
- 	}
-+	printk("\n");
-+}
-+
-+static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
-+{
-+	printk("{ ");
-+	if (err & ILI_ERR)	printk("IllegalLengthIndication ");
-+	if (err & EOM_ERR)	printk("EndOfMedia ");
-+	if (err & ABRT_ERR)	printk("AbortedCommand ");
-+	if (err & MCR_ERR)	printk("MediaChangeRequested ");
-+	if (err & LFS_ERR)	printk("LastFailedSense=0x%02x ",
-+				       (err & LFS_ERR) >> 4);
- 	printk("}\n");
--	if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
--		err = hwif->INB(IDE_ERROR_REG);
--		printk("%s: %s: error=0x%02x { ", drive->name, msg, err);
--		if (err & ABRT_ERR)	printk("DriveStatusError ");
--		if (err & ICRC_ERR)
--			printk((err & ABRT_ERR) ? "BadCRC " : "BadSector ");
--		if (err & ECC_ERR)	printk("UncorrectableError ");
--		if (err & ID_ERR)	printk("SectorIdNotFound ");
--		if (err & TRK0_ERR)	printk("TrackZeroNotFound ");
--		if (err & MARK_ERR)	printk("AddrMarkNotFound ");
--		printk("}");
--		if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR ||
--		    (err & (ECC_ERR|ID_ERR|MARK_ERR))) {
--			if (drive->addressing == 1) {
--				__u64 sectors = 0;
--				u32 low = 0, high = 0;
--				hwif->OUTB(drive->ctl&~0x80, IDE_CONTROL_REG);
--				low = ide_read_24(drive);
--				hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
--				high = ide_read_24(drive);
--				sectors = ((__u64)high << 24) | low;
--				printk(", LBAsect=%llu, high=%d, low=%d",
--				       (unsigned long long) sectors,
--				       high, low);
--			} else {
--				u8 cur = hwif->INB(IDE_SELECT_REG);
--				if (cur & 0x40) {	/* using LBA? */
--					printk(", LBAsect=%ld", (unsigned long)
--					 ((cur&0xf)<<24)
--					 |(hwif->INB(IDE_HCYL_REG)<<16)
--					 |(hwif->INB(IDE_LCYL_REG)<<8)
--					 | hwif->INB(IDE_SECTOR_REG));
--				} else {
--					printk(", CHS=%d/%d/%d",
--					 (hwif->INB(IDE_HCYL_REG)<<8) +
--					  hwif->INB(IDE_LCYL_REG),
--					  cur & 0xf,
--					  hwif->INB(IDE_SECTOR_REG));
+-	case KVM_GET_VCPU_MMAP_SIZE:
+-		r = -EINVAL;
+-		if (arg)
+-			goto out;
+-		r = 2 * PAGE_SIZE;
+-		break;
+-	default:
+-		;
+-	}
+-out:
+-	return r;
+-}
+-
+-static struct file_operations kvm_chardev_ops = {
+-	.unlocked_ioctl = kvm_dev_ioctl,
+-	.compat_ioctl   = kvm_dev_ioctl,
+-};
+-
+-static struct miscdevice kvm_dev = {
+-	KVM_MINOR,
+-	"kvm",
+-	&kvm_chardev_ops,
+-};
+-
+-/*
+- * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+- * cached on it.
+- */
+-static void decache_vcpus_on_cpu(int cpu)
+-{
+-	struct kvm *vm;
+-	struct kvm_vcpu *vcpu;
+-	int i;
+-
+-	spin_lock(&kvm_lock);
+-	list_for_each_entry(vm, &vm_list, vm_list)
+-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+-			vcpu = vm->vcpus[i];
+-			if (!vcpu)
+-				continue;
+-			/*
+-			 * If the vcpu is locked, then it is running on some
+-			 * other cpu and therefore it is not cached on the
+-			 * cpu in question.
+-			 *
+-			 * If it's not locked, check the last cpu it executed
+-			 * on.
+-			 */
+-			if (mutex_trylock(&vcpu->mutex)) {
+-				if (vcpu->cpu == cpu) {
+-					kvm_x86_ops->vcpu_decache(vcpu);
+-					vcpu->cpu = -1;
 -				}
+-				mutex_unlock(&vcpu->mutex);
 -			}
--			if (HWGROUP(drive) && HWGROUP(drive)->rq)
--				printk(", sector=%llu",
--					(unsigned long long)HWGROUP(drive)->rq->sector);
 -		}
--		printk("\n");
+-	spin_unlock(&kvm_lock);
+-}
+-
+-static void hardware_enable(void *junk)
+-{
+-	int cpu = raw_smp_processor_id();
+-
+-	if (cpu_isset(cpu, cpus_hardware_enabled))
+-		return;
+-	cpu_set(cpu, cpus_hardware_enabled);
+-	kvm_x86_ops->hardware_enable(NULL);
+-}
+-
+-static void hardware_disable(void *junk)
+-{
+-	int cpu = raw_smp_processor_id();
+-
+-	if (!cpu_isset(cpu, cpus_hardware_enabled))
+-		return;
+-	cpu_clear(cpu, cpus_hardware_enabled);
+-	decache_vcpus_on_cpu(cpu);
+-	kvm_x86_ops->hardware_disable(NULL);
+-}
+-
+-static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
+-			   void *v)
+-{
+-	int cpu = (long)v;
+-
+-	switch (val) {
+-	case CPU_DYING:
+-	case CPU_DYING_FROZEN:
+-		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
+-		       cpu);
+-		hardware_disable(NULL);
+-		break;
+-	case CPU_UP_CANCELED:
+-	case CPU_UP_CANCELED_FROZEN:
+-		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
+-		       cpu);
+-		smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
+-		break;
+-	case CPU_ONLINE:
+-	case CPU_ONLINE_FROZEN:
+-		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
+-		       cpu);
+-		smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
+-		break;
 -	}
--	ide_dump_opcode(drive);
--	local_irq_restore(flags);
--	return err;
- }
- 
- /**
-- *	ide_dump_atapi_status       -       print human readable atapi status
-+ *	ide_dump_status		-	translate ATA/ATAPI error
-  *	@drive: drive that status applies to
-  *	@msg: text message to print
-  *	@stat: status byte to decode
-  *
-  *	Error reporting, in human readable form (luxurious, but a memory hog).
-+ *	Combines the drive name, message and status byte to provide a
-+ *	user understandable explanation of the device error.
-  */
- 
--static u8 ide_dump_atapi_status(ide_drive_t *drive, const char *msg, u8 stat)
-+u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
- {
- 	unsigned long flags;
-+	u8 err = 0;
- 
--	atapi_status_t status;
--	atapi_error_t error;
+-	return NOTIFY_OK;
+-}
 -
--	status.all = stat;
--	error.all = 0;
- 	local_irq_save(flags);
- 	printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
--	if (status.b.bsy)
-+	if (stat & BUSY_STAT)
- 		printk("Busy ");
- 	else {
--		if (status.b.drdy)	printk("DriveReady ");
--		if (status.b.df)	printk("DeviceFault ");
--		if (status.b.dsc)	printk("SeekComplete ");
--		if (status.b.drq)	printk("DataRequest ");
--		if (status.b.corr)	printk("CorrectedError ");
--		if (status.b.idx)	printk("Index ");
--		if (status.b.check)	printk("Error ");
-+		if (stat & READY_STAT)	printk("DriveReady ");
-+		if (stat & WRERR_STAT)	printk("DeviceFault ");
-+		if (stat & SEEK_STAT)	printk("SeekComplete ");
-+		if (stat & DRQ_STAT)	printk("DataRequest ");
-+		if (stat & ECC_STAT)	printk("CorrectedError ");
-+		if (stat & INDEX_STAT)	printk("Index ");
-+		if (stat & ERR_STAT)	printk("Error ");
- 	}
- 	printk("}\n");
--	if (status.b.check && !status.b.bsy) {
--		error.all = HWIF(drive)->INB(IDE_ERROR_REG);
--		printk("%s: %s: error=0x%02x { ", drive->name, msg, error.all);
--		if (error.b.ili)	printk("IllegalLengthIndication ");
--		if (error.b.eom)	printk("EndOfMedia ");
--		if (error.b.abrt)	printk("AbortedCommand ");
--		if (error.b.mcr)	printk("MediaChangeRequested ");
--		if (error.b.sense_key)	printk("LastFailedSense=0x%02x ",
--						error.b.sense_key);
--		printk("}\n");
-+	if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
-+		err = drive->hwif->INB(IDE_ERROR_REG);
-+		printk("%s: %s: error=0x%02x ", drive->name, msg, err);
-+		if (drive->media == ide_disk)
-+			ide_dump_ata_error(drive, err);
-+		else
-+			ide_dump_atapi_error(drive, err);
- 	}
- 	ide_dump_opcode(drive);
- 	local_irq_restore(flags);
--	return error.all;
+-static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
+-                       void *v)
+-{
+-	if (val == SYS_RESTART) {
+-		/*
+-		 * Some (well, at least mine) BIOSes hang on reboot if
+-		 * in vmx root mode.
+-		 */
+-		printk(KERN_INFO "kvm: exiting hardware virtualization\n");
+-		on_each_cpu(hardware_disable, NULL, 0, 1);
+-	}
+-	return NOTIFY_OK;
 -}
 -
--/**
-- *	ide_dump_status		-	translate ATA/ATAPI error
-- *	@drive: drive the error occured on
-- *	@msg: information string
-- *	@stat: status byte
-- *
-- *	Error reporting, in human readable form (luxurious, but a memory hog).
-- *	Combines the drive name, message and status byte to provide a
-- *	user understandable explanation of the device error.
-- */
+-static struct notifier_block kvm_reboot_notifier = {
+-	.notifier_call = kvm_reboot,
+-	.priority = 0,
+-};
 -
--u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
+-void kvm_io_bus_init(struct kvm_io_bus *bus)
 -{
--	if (drive->media == ide_disk)
--		return ide_dump_ata_status(drive, msg, stat);
--	return ide_dump_atapi_status(drive, msg, stat);
-+	return err;
- }
- 
- EXPORT_SYMBOL(ide_dump_status);
-diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
-index e245521..cbbb0f7 100644
---- a/drivers/ide/ide-pnp.c
-+++ b/drivers/ide/ide-pnp.c
-@@ -31,7 +31,6 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
- {
- 	hw_regs_t hw;
- 	ide_hwif_t *hwif;
--	int index;
- 
- 	if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
- 		return -1;
-@@ -41,11 +40,19 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
- 				pnp_port_start(dev, 1));
- 	hw.irq = pnp_irq(dev, 0);
- 
--	index = ide_register_hw(&hw, NULL, 1, &hwif);
-+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
-+	if (hwif) {
-+		u8 index = hwif->index;
-+		u8 idx[4] = { index, 0xff, 0xff, 0xff };
-+
-+		ide_init_port_data(hwif, index);
-+		ide_init_port_hw(hwif, &hw);
- 
--	if (index != -1) {
--	    	printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
-+		printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
- 		pnp_set_drvdata(dev,hwif);
-+
-+		ide_device_add(idx);
-+
- 		return 0;
- 	}
- 
-@@ -68,12 +75,15 @@ static struct pnp_driver idepnp_driver = {
- 	.remove		= idepnp_remove,
- };
- 
--void __init pnpide_init(void)
-+static int __init pnpide_init(void)
- {
--	pnp_register_driver(&idepnp_driver);
-+	return pnp_register_driver(&idepnp_driver);
- }
- 
--void __exit pnpide_exit(void)
-+static void __exit pnpide_exit(void)
- {
- 	pnp_unregister_driver(&idepnp_driver);
- }
-+
-+module_init(pnpide_init);
-+module_exit(pnpide_exit);
-diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
-index 2994523..edf650b 100644
---- a/drivers/ide/ide-probe.c
-+++ b/drivers/ide/ide-probe.c
-@@ -95,10 +95,10 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
- #ifdef CONFIG_IDEDISK_MULTI_MODE
- 		id->multsect = ((id->max_multsect/2) > 1) ? id->max_multsect : 0;
- 		id->multsect_valid = id->multsect ? 1 : 0;
--		drive->mult_req = id->multsect_valid ? id->max_multsect : INITIAL_MULT_COUNT;
-+		drive->mult_req = id->multsect_valid ? id->max_multsect : 0;
- 		drive->special.b.set_multmode = drive->mult_req ? 1 : 0;
- #else	/* original, pre IDE-NFG, per request of AC */
--		drive->mult_req = INITIAL_MULT_COUNT;
-+		drive->mult_req = 0;
- 		if (drive->mult_req > id->max_multsect)
- 			drive->mult_req = id->max_multsect;
- 		if (drive->mult_req || ((id->multsect_valid & 1) && id->multsect))
-@@ -234,7 +234,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
- 
- 	drive->media = ide_disk;
- 	printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" );
--	QUIRK_LIST(drive);
-+
- 	return;
- 
- err_misc:
-@@ -350,22 +350,19 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
- 	 * the irq handler isn't expecting.
- 	 */
- 	if (IDE_CONTROL_REG) {
--		u8 ctl = drive->ctl | 2;
- 		if (!hwif->irq) {
- 			autoprobe = 1;
- 			cookie = probe_irq_on();
--			/* enable device irq */
--			ctl &= ~2;
- 		}
--		hwif->OUTB(ctl, IDE_CONTROL_REG);
-+		ide_set_irq(drive, autoprobe);
- 	}
- 
- 	retval = actual_try_to_identify(drive, cmd);
- 
- 	if (autoprobe) {
- 		int irq;
--		/* mask device irq */
--		hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG);
-+
-+		ide_set_irq(drive, 0);
- 		/* clear drive IRQ */
- 		(void) hwif->INB(IDE_STATUS_REG);
- 		udelay(5);
-@@ -385,6 +382,20 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
- 	return retval;
- }
- 
-+static int ide_busy_sleep(ide_hwif_t *hwif)
-+{
-+	unsigned long timeout = jiffies + WAIT_WORSTCASE;
-+	u8 stat;
-+
-+	do {
-+		msleep(50);
-+		stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
-+		if ((stat & BUSY_STAT) == 0)
-+			return 0;
-+	} while (time_before(jiffies, timeout));
-+
-+	return 1;
-+}
- 
- /**
-  *	do_probe		-	probe an IDE device
-@@ -453,7 +464,6 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
- 		if ((rc == 1 && cmd == WIN_PIDENTIFY) &&
- 			((drive->autotune == IDE_TUNE_DEFAULT) ||
- 			(drive->autotune == IDE_TUNE_AUTO))) {
--			unsigned long timeout;
- 			printk("%s: no response (status = 0x%02x), "
- 				"resetting drive\n", drive->name,
- 				hwif->INB(IDE_STATUS_REG));
-@@ -461,10 +471,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
- 			hwif->OUTB(drive->select.all, IDE_SELECT_REG);
- 			msleep(50);
- 			hwif->OUTB(WIN_SRST, IDE_COMMAND_REG);
--			timeout = jiffies;
--			while (((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) &&
--			       time_before(jiffies, timeout + WAIT_WORSTCASE))
--				msleep(50);
-+			(void)ide_busy_sleep(hwif);
- 			rc = try_to_identify(drive, cmd);
- 		}
- 		if (rc == 1)
-@@ -492,20 +499,16 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
- static void enable_nest (ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif = HWIF(drive);
--	unsigned long timeout;
- 
- 	printk("%s: enabling %s -- ", hwif->name, drive->id->model);
- 	SELECT_DRIVE(drive);
- 	msleep(50);
- 	hwif->OUTB(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG);
--	timeout = jiffies + WAIT_WORSTCASE;
--	do {
--		if (time_after(jiffies, timeout)) {
--			printk("failed (timeout)\n");
--			return;
--		}
--		msleep(50);
--	} while ((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT);
-+
-+	if (ide_busy_sleep(hwif)) {
-+		printk(KERN_CONT "failed (timeout)\n");
-+		return;
-+	}
- 
- 	msleep(50);
- 
-@@ -653,8 +656,7 @@ static int wait_hwif_ready(ide_hwif_t *hwif)
- 		/* Ignore disks that we will not probe for later. */
- 		if (!drive->noprobe || drive->present) {
- 			SELECT_DRIVE(drive);
--			if (IDE_CONTROL_REG)
--				hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
-+			ide_set_irq(drive, 1);
- 			mdelay(2);
- 			rc = ide_wait_not_busy(hwif, 35000);
- 			if (rc)
-@@ -673,19 +675,18 @@ out:
- 
- /**
-  *	ide_undecoded_slave	-	look for bad CF adapters
-- *	@hwif: interface
-+ *	@drive1: drive
-  *
-  *	Analyse the drives on the interface and attempt to decide if we
-  *	have the same drive viewed twice. This occurs with crap CF adapters
-  *	and PCMCIA sometimes.
-  */
- 
--void ide_undecoded_slave(ide_hwif_t *hwif)
-+void ide_undecoded_slave(ide_drive_t *drive1)
- {
--	ide_drive_t *drive0 = &hwif->drives[0];
--	ide_drive_t *drive1 = &hwif->drives[1];
-+	ide_drive_t *drive0 = &drive1->hwif->drives[0];
- 
--	if (drive0->present == 0 || drive1->present == 0)
-+	if ((drive1->dn & 1) == 0 || drive0->present == 0)
- 		return;
- 
- 	/* If the models don't match they are not the same product */
-@@ -788,18 +789,11 @@ static void probe_hwif(ide_hwif_t *hwif)
- 		}
- 	}
- 	if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) {
--		unsigned long timeout = jiffies + WAIT_WORSTCASE;
--		u8 stat;
+-	memset(bus, 0, sizeof(*bus));
+-}
 -
- 		printk(KERN_WARNING "%s: reset\n", hwif->name);
- 		hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
- 		udelay(10);
- 		hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
--		do {
--			msleep(50);
--			stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
--		} while ((stat & BUSY_STAT) && time_after(timeout, jiffies));
+-void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+-{
+-	int i;
 -
-+		(void)ide_busy_sleep(hwif);
- 	}
- 	local_irq_restore(flags);
- 	/*
-@@ -814,8 +808,12 @@ static void probe_hwif(ide_hwif_t *hwif)
- 		return;
- 	}
- 
--	if (hwif->fixup)
--		hwif->fixup(hwif);
-+	for (unit = 0; unit < MAX_DRIVES; unit++) {
-+		ide_drive_t *drive = &hwif->drives[unit];
-+
-+		if (drive->present && hwif->quirkproc)
-+			hwif->quirkproc(drive);
-+	}
- 
- 	for (unit = 0; unit < MAX_DRIVES; ++unit) {
- 		ide_drive_t *drive = &hwif->drives[unit];
-@@ -830,16 +828,8 @@ static void probe_hwif(ide_hwif_t *hwif)
- 
- 			drive->nice1 = 1;
- 
--			if (hwif->ide_dma_on) {
--				/*
--				 * Force DMAing for the beginning of the check.
--				 * Some chipsets appear to do interesting
--				 * things, if not checked and cleared.
--				 *   PARANOIA!!!
--				 */
--				hwif->dma_off_quietly(drive);
-+			if (hwif->dma_host_set)
- 				ide_set_dma(drive);
--			}
- 		}
- 	}
- 
-@@ -853,25 +843,6 @@ static void probe_hwif(ide_hwif_t *hwif)
- 	}
- }
- 
--static int hwif_init(ide_hwif_t *hwif);
--static void hwif_register_devices(ide_hwif_t *hwif);
+-	for (i = 0; i < bus->dev_count; i++) {
+-		struct kvm_io_device *pos = bus->devs[i];
 -
--static int probe_hwif_init(ide_hwif_t *hwif)
+-		kvm_iodevice_destructor(pos);
+-	}
+-}
+-
+-struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
 -{
--	probe_hwif(hwif);
+-	int i;
 -
--	if (!hwif_init(hwif)) {
--		printk(KERN_INFO "%s: failed to initialize IDE interface\n",
--				 hwif->name);
--		return -1;
+-	for (i = 0; i < bus->dev_count; i++) {
+-		struct kvm_io_device *pos = bus->devs[i];
+-
+-		if (pos->in_range(pos, addr))
+-			return pos;
 -	}
 -
--	if (hwif->present)
--		hwif_register_devices(hwif);
+-	return NULL;
+-}
 -
--	return 0;
+-void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
+-{
+-	BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
+-
+-	bus->devs[bus->dev_count++] = dev;
 -}
 -
- #if MAX_HWIFS > 1
- /*
-  * save_match() is used to simplify logic in init_irq() below.
-@@ -968,11 +939,6 @@ static int ide_init_queue(ide_drive_t *drive)
-  * Much of the code is for correctly detecting/handling irq sharing
-  * and irq serialization situations.  This is somewhat complex because
-  * it handles static as well as dynamic (PCMCIA) IDE interfaces.
-- *
-- * The IRQF_DISABLED in sa_flags means ide_intr() is always entered with
-- * interrupts completely disabled.  This can be bad for interrupt latency,
-- * but anything else has led to problems on some machines.  We re-enable
-- * interrupts as much as we can safely do in most places.
-  */
- static int init_irq (ide_hwif_t *hwif)
- {
-@@ -1055,17 +1021,13 @@ static int init_irq (ide_hwif_t *hwif)
- 	 * Allocate the irq, if not already obtained for another hwif
- 	 */
- 	if (!match || match->irq != hwif->irq) {
--		int sa = IRQF_DISABLED;
-+		int sa = 0;
- #if defined(__mc68000__) || defined(CONFIG_APUS)
- 		sa = IRQF_SHARED;
- #endif /* __mc68000__ || CONFIG_APUS */
- 
--		if (IDE_CHIPSET_IS_PCI(hwif->chipset)) {
-+		if (IDE_CHIPSET_IS_PCI(hwif->chipset))
- 			sa = IRQF_SHARED;
--#ifndef CONFIG_IDEPCI_SHARE_IRQ
--			sa |= IRQF_DISABLED;
--#endif /* CONFIG_IDEPCI_SHARE_IRQ */
+-static struct notifier_block kvm_cpu_notifier = {
+-	.notifier_call = kvm_cpu_hotplug,
+-	.priority = 20, /* must be > scheduler priority */
+-};
+-
+-static u64 stat_get(void *_offset)
+-{
+-	unsigned offset = (long)_offset;
+-	u64 total = 0;
+-	struct kvm *kvm;
+-	struct kvm_vcpu *vcpu;
+-	int i;
+-
+-	spin_lock(&kvm_lock);
+-	list_for_each_entry(kvm, &vm_list, vm_list)
+-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+-			vcpu = kvm->vcpus[i];
+-			if (vcpu)
+-				total += *(u32 *)((void *)vcpu + offset);
 -		}
- 
- 		if (hwif->io_ports[IDE_CONTROL_OFFSET])
- 			/* clear nIEN */
-@@ -1173,7 +1135,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
- {
- 	struct gendisk *p = data;
- 	*part &= (1 << PARTN_BITS) - 1;
--	return &p->kobj;
-+	return &p->dev.kobj;
- }
- 
- static int exact_lock(dev_t dev, void *data)
-@@ -1373,54 +1335,63 @@ static void hwif_register_devices(ide_hwif_t *hwif)
- 	}
- }
- 
--int ideprobe_init (void)
-+int ide_device_add_all(u8 *idx)
- {
--	unsigned int index;
--	int probe[MAX_HWIFS];
+-	spin_unlock(&kvm_lock);
+-	return total;
+-}
 -
--	memset(probe, 0, MAX_HWIFS * sizeof(int));
--	for (index = 0; index < MAX_HWIFS; ++index)
--		probe[index] = !ide_hwifs[index].present;
+-DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
 -
--	for (index = 0; index < MAX_HWIFS; ++index)
--		if (probe[index])
--			probe_hwif(&ide_hwifs[index]);
--	for (index = 0; index < MAX_HWIFS; ++index)
--		if (probe[index])
--			hwif_init(&ide_hwifs[index]);
--	for (index = 0; index < MAX_HWIFS; ++index) {
--		if (probe[index]) {
--			ide_hwif_t *hwif = &ide_hwifs[index];
--			if (!hwif->present)
--				continue;
--			if (hwif->chipset == ide_unknown || hwif->chipset == ide_forced)
--				hwif->chipset = ide_generic;
--			hwif_register_devices(hwif);
-+	ide_hwif_t *hwif;
-+	int i, rc = 0;
-+
-+	for (i = 0; i < MAX_HWIFS; i++) {
-+		if (idx[i] == 0xff)
-+			continue;
-+
-+		probe_hwif(&ide_hwifs[idx[i]]);
-+	}
-+
-+	for (i = 0; i < MAX_HWIFS; i++) {
-+		if (idx[i] == 0xff)
-+			continue;
-+
-+		hwif = &ide_hwifs[idx[i]];
-+
-+		if (hwif_init(hwif) == 0) {
-+			printk(KERN_INFO "%s: failed to initialize IDE "
-+					 "interface\n", hwif->name);
-+			rc = -1;
-+			continue;
- 		}
- 	}
--	for (index = 0; index < MAX_HWIFS; ++index)
--		if (probe[index])
--			ide_proc_register_port(&ide_hwifs[index]);
+-static __init void kvm_init_debug(void)
+-{
+-	struct kvm_stats_debugfs_item *p;
+-
+-	debugfs_dir = debugfs_create_dir("kvm", NULL);
+-	for (p = debugfs_entries; p->name; ++p)
+-		p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
+-						(void *)(long)p->offset,
+-						&stat_fops);
+-}
+-
+-static void kvm_exit_debug(void)
+-{
+-	struct kvm_stats_debugfs_item *p;
+-
+-	for (p = debugfs_entries; p->name; ++p)
+-		debugfs_remove(p->dentry);
+-	debugfs_remove(debugfs_dir);
+-}
+-
+-static int kvm_suspend(struct sys_device *dev, pm_message_t state)
+-{
+-	hardware_disable(NULL);
 -	return 0;
 -}
- 
--EXPORT_SYMBOL_GPL(ideprobe_init);
-+	for (i = 0; i < MAX_HWIFS; i++) {
-+		if (idx[i] == 0xff)
-+			continue;
- 
--int ide_device_add(u8 idx[4])
+-
+-static int kvm_resume(struct sys_device *dev)
 -{
--	int i, rc = 0;
-+		hwif = &ide_hwifs[idx[i]];
- 
--	for (i = 0; i < 4; i++) {
--		if (idx[i] != 0xff)
--			rc |= probe_hwif_init(&ide_hwifs[idx[i]]);
-+		if (hwif->present) {
-+			if (hwif->chipset == ide_unknown ||
-+			    hwif->chipset == ide_forced)
-+				hwif->chipset = ide_generic;
-+			hwif_register_devices(hwif);
-+		}
- 	}
- 
--	for (i = 0; i < 4; i++) {
-+	for (i = 0; i < MAX_HWIFS; i++) {
- 		if (idx[i] != 0xff)
- 			ide_proc_register_port(&ide_hwifs[idx[i]]);
- 	}
- 
- 	return rc;
- }
-+EXPORT_SYMBOL_GPL(ide_device_add_all);
-+
-+int ide_device_add(u8 idx[4])
-+{
-+	u8 idx_all[MAX_HWIFS];
-+	int i;
- 
-+	for (i = 0; i < MAX_HWIFS; i++)
-+		idx_all[i] = (i < 4) ? idx[i] : 0xff;
-+
-+	return ide_device_add_all(idx_all);
-+}
- EXPORT_SYMBOL_GPL(ide_device_add);
-diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
-index a4007d3..aa663e7 100644
---- a/drivers/ide/ide-proc.c
-+++ b/drivers/ide/ide-proc.c
-@@ -346,14 +346,20 @@ static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int va
- 
- static int set_xfer_rate (ide_drive_t *drive, int arg)
- {
-+	ide_task_t task;
- 	int err;
- 
- 	if (arg < 0 || arg > 70)
- 		return -EINVAL;
- 
--	err = ide_wait_cmd(drive,
--			WIN_SETFEATURES, (u8) arg,
--			SETFEATURES_XFER, 0, NULL);
-+	memset(&task, 0, sizeof(task));
-+	task.tf.command = WIN_SETFEATURES;
-+	task.tf.feature = SETFEATURES_XFER;
-+	task.tf.nsect   = (u8)arg;
-+	task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT |
-+			IDE_TFLAG_IN_NSECT;
-+
-+	err = ide_no_data_taskfile(drive, &task);
- 
- 	if (!err && arg) {
- 		ide_set_xfer_rate(drive, (u8) arg);
-diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
-new file mode 100644
-index 0000000..7ffa332
---- /dev/null
-+++ b/drivers/ide/ide-scan-pci.c
-@@ -0,0 +1,121 @@
-+/*
-+ * support for probing IDE PCI devices in the PCI bus order
-+ *
-+ * Copyright (c) 1998-2000  Andre Hedrick <andre at linux-ide.org>
-+ * Copyright (c) 1995-1998  Mark Lord
-+ *
-+ * May be copied or modified under the terms of the GNU General Public License
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/ide.h>
-+
-+/*
-+ *	Module interfaces
-+ */
-+
-+static int pre_init = 1;		/* Before first ordered IDE scan */
-+static LIST_HEAD(ide_pci_drivers);
-+
-+/*
-+ *	__ide_pci_register_driver	-	attach IDE driver
-+ *	@driver: pci driver
-+ *	@module: owner module of the driver
-+ *
-+ *	Registers a driver with the IDE layer. The IDE layer arranges that
-+ *	boot time setup is done in the expected device order and then
-+ *	hands the controllers off to the core PCI code to do the rest of
-+ *	the work.
-+ *
-+ *	Returns are the same as for pci_register_driver
-+ */
-+
-+int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
-+			      const char *mod_name)
-+{
-+	if (!pre_init)
-+		return __pci_register_driver(driver, module, mod_name);
-+	driver->driver.owner = module;
-+	list_add_tail(&driver->node, &ide_pci_drivers);
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
-+
-+/**
-+ *	ide_scan_pcidev		-	find an IDE driver for a device
-+ *	@dev: PCI device to check
-+ *
-+ *	Look for an IDE driver to handle the device we are considering.
-+ *	This is only used during boot up to get the ordering correct. After
-+ *	boot up the pci layer takes over the job.
-+ */
-+
-+static int __init ide_scan_pcidev(struct pci_dev *dev)
-+{
-+	struct list_head *l;
-+	struct pci_driver *d;
-+
-+	list_for_each(l, &ide_pci_drivers) {
-+		d = list_entry(l, struct pci_driver, node);
-+		if (d->id_table) {
-+			const struct pci_device_id *id =
-+				pci_match_id(d->id_table, dev);
-+
-+			if (id != NULL && d->probe(dev, id) >= 0) {
-+				dev->driver = d;
-+				pci_dev_get(dev);
-+				return 1;
-+			}
-+		}
-+	}
-+	return 0;
-+}
-+
-+/**
-+ *	ide_scan_pcibus		-	perform the initial IDE driver scan
-+ *
-+ *	Perform the initial bus rather than driver ordered scan of the
-+ *	PCI drivers. After this all IDE pci handling becomes standard
-+ *	module ordering not traditionally ordered.
-+ */
-+
-+int __init ide_scan_pcibus(void)
-+{
-+	struct pci_dev *dev = NULL;
-+	struct pci_driver *d;
-+	struct list_head *l, *n;
-+
-+	pre_init = 0;
-+	if (!ide_scan_direction)
-+		while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
-+			ide_scan_pcidev(dev);
-+	else
-+		while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID,
-+						     dev)))
-+			ide_scan_pcidev(dev);
-+
-+	/*
-+	 *	Hand the drivers over to the PCI layer now we
-+	 *	are post init.
-+	 */
-+
-+	list_for_each_safe(l, n, &ide_pci_drivers) {
-+		list_del(l);
-+		d = list_entry(l, struct pci_driver, node);
-+		if (__pci_register_driver(d, d->driver.owner,
-+					  d->driver.mod_name))
-+			printk(KERN_ERR "%s: failed to register %s driver\n",
-+					__FUNCTION__, d->driver.mod_name);
-+	}
-+
-+	return 0;
-+}
-+
-+static int __init ide_scan_pci(void)
-+{
-+	return ide_scan_pcibus();
-+}
-+
-+module_init(ide_scan_pci);
-diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
-index 7b9181b..d71a584 100644
---- a/drivers/ide/ide-tape.c
-+++ b/drivers/ide/ide-tape.c
-@@ -615,16 +615,6 @@ typedef struct os_dat_s {
- /*************************** End of tunable parameters ***********************/
- 
- /*
-- *	Debugging/Performance analysis
-- *
-- *	I/O trace support
-- */
--#define USE_IOTRACE	0
--#if USE_IOTRACE
--#define IO_IDETAPE_FIFO	500
--#endif
+-	hardware_enable(NULL);
+-	return 0;
+-}
 -
--/*
-  *	Read/Write error simulation
-  */
- #define SIMULATE_ERRORS			0
-@@ -1700,6 +1690,11 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
- 	if (error)
- 		tape->failed_pc = NULL;
- 
-+	if (!blk_special_request(rq)) {
-+		ide_end_request(drive, uptodate, nr_sects);
-+		return 0;
-+	}
-+
- 	spin_lock_irqsave(&tape->spinlock, flags);
- 
- 	/* The request was a pipelined data transfer request */
-@@ -1818,9 +1813,8 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
- 	idetape_tape_t *tape = drive->driver_data;
- 	idetape_pc_t *pc;
- 	struct request *rq;
--	atapi_error_t error;
- 
--	error.all = HWIF(drive)->INB(IDE_ERROR_REG);
-+	(void)drive->hwif->INB(IDE_ERROR_REG);
- 	pc = idetape_next_pc_storage(drive);
- 	rq = idetape_next_rq_storage(drive);
- 	idetape_create_request_sense_cmd(pc);
-@@ -1858,15 +1852,13 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif = drive->hwif;
- 	idetape_tape_t *tape = drive->driver_data;
--	atapi_status_t status;
--	atapi_bcount_t bcount;
--	atapi_ireason_t ireason;
- 	idetape_pc_t *pc = tape->pc;
+-static struct sysdev_class kvm_sysdev_class = {
+-	set_kset_name("kvm"),
+-	.suspend = kvm_suspend,
+-	.resume = kvm_resume,
+-};
 -
- 	unsigned int temp;
- #if SIMULATE_ERRORS
- 	static int error_sim_count = 0;
- #endif
-+	u16 bcount;
-+	u8 stat, ireason;
- 
- #if IDETAPE_DEBUG_LOG
- 	if (tape->debug_level >= 4)
-@@ -1875,10 +1867,10 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
- #endif /* IDETAPE_DEBUG_LOG */	
- 
- 	/* Clear the interrupt */
--	status.all = HWIF(drive)->INB(IDE_STATUS_REG);
-+	stat = hwif->INB(IDE_STATUS_REG);
- 
- 	if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
--		if (HWIF(drive)->ide_dma_end(drive) || status.b.check) {
-+		if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
- 			/*
- 			 * A DMA error is sometimes expected. For example,
- 			 * if the tape is crossing a filemark during a
-@@ -1912,7 +1904,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
- 	}
- 
- 	/* No more interrupts */
--	if (!status.b.drq) {
-+	if ((stat & DRQ_STAT) == 0) {
- #if IDETAPE_DEBUG_LOG
- 		if (tape->debug_level >= 2)
- 			printk(KERN_INFO "ide-tape: Packet command completed, %d bytes transferred\n", pc->actually_transferred);
-@@ -1927,12 +1919,13 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
- 		    (++error_sim_count % 100) == 0) {
- 			printk(KERN_INFO "ide-tape: %s: simulating error\n",
- 				tape->name);
--			status.b.check = 1;
-+			stat |= ERR_STAT;
- 		}
- #endif
--		if (status.b.check && pc->c[0] == IDETAPE_REQUEST_SENSE_CMD)
--			status.b.check = 0;
--		if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) {	/* Error detected */
-+		if ((stat & ERR_STAT) && pc->c[0] == IDETAPE_REQUEST_SENSE_CMD)
-+			stat &= ~ERR_STAT;
-+		if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) {
-+			/* Error detected */
- #if IDETAPE_DEBUG_LOG
- 			if (tape->debug_level >= 1)
- 				printk(KERN_INFO "ide-tape: %s: I/O error\n",
-@@ -1951,7 +1944,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
- 		}
- 		pc->error = 0;
- 		if (test_bit(PC_WAIT_FOR_DSC, &pc->flags) &&
--		    !status.b.dsc) {
-+		    (stat & SEEK_STAT) == 0) {
- 			/* Media access command */
- 			tape->dsc_polling_start = jiffies;
- 			tape->dsc_polling_frequency = IDETAPE_DSC_MA_FAST;
-@@ -1973,30 +1966,30 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
- 		return ide_do_reset(drive);
- 	}
- 	/* Get the number of bytes to transfer on this interrupt. */
--	bcount.b.high = hwif->INB(IDE_BCOUNTH_REG);
--	bcount.b.low = hwif->INB(IDE_BCOUNTL_REG);
-+	bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) |
-+		  hwif->INB(IDE_BCOUNTL_REG);
- 
--	ireason.all = hwif->INB(IDE_IREASON_REG);
-+	ireason = hwif->INB(IDE_IREASON_REG);
- 
--	if (ireason.b.cod) {
-+	if (ireason & CD) {
- 		printk(KERN_ERR "ide-tape: CoD != 0 in idetape_pc_intr\n");
- 		return ide_do_reset(drive);
- 	}
--	if (ireason.b.io == test_bit(PC_WRITING, &pc->flags)) {
-+	if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) {
- 		/* Hopefully, we will never get here */
- 		printk(KERN_ERR "ide-tape: We wanted to %s, ",
--			ireason.b.io ? "Write":"Read");
-+				(ireason & IO) ? "Write" : "Read");
- 		printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
--			ireason.b.io ? "Read":"Write");
-+				(ireason & IO) ? "Read" : "Write");
- 		return ide_do_reset(drive);
- 	}
- 	if (!test_bit(PC_WRITING, &pc->flags)) {
- 		/* Reading - Check that we have enough space */
--		temp = pc->actually_transferred + bcount.all;
-+		temp = pc->actually_transferred + bcount;
- 		if (temp > pc->request_transfer) {
- 			if (temp > pc->buffer_size) {
- 				printk(KERN_ERR "ide-tape: The tape wants to send us more data than expected - discarding data\n");
--				idetape_discard_data(drive, bcount.all);
-+				idetape_discard_data(drive, bcount);
- 				ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
- 				return ide_started;
- 			}
-@@ -2008,23 +2001,26 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
- 	}
- 	if (test_bit(PC_WRITING, &pc->flags)) {
- 		if (pc->bh != NULL)
--			idetape_output_buffers(drive, pc, bcount.all);
-+			idetape_output_buffers(drive, pc, bcount);
- 		else
- 			/* Write the current buffer */
--			HWIF(drive)->atapi_output_bytes(drive, pc->current_position, bcount.all);
-+			hwif->atapi_output_bytes(drive, pc->current_position,
-+						 bcount);
- 	} else {
- 		if (pc->bh != NULL)
--			idetape_input_buffers(drive, pc, bcount.all);
-+			idetape_input_buffers(drive, pc, bcount);
- 		else
- 			/* Read the current buffer */
--			HWIF(drive)->atapi_input_bytes(drive, pc->current_position, bcount.all);
-+			hwif->atapi_input_bytes(drive, pc->current_position,
-+						bcount);
- 	}
- 	/* Update the current position */
--	pc->actually_transferred += bcount.all;
--	pc->current_position += bcount.all;
-+	pc->actually_transferred += bcount;
-+	pc->current_position += bcount;
- #if IDETAPE_DEBUG_LOG
- 	if (tape->debug_level >= 2)
--		printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes on that interrupt\n", pc->c[0], bcount.all);
-+		printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes "
-+				 "on that interrupt\n", pc->c[0], bcount);
- #endif
- 	/* And set the interrupt handler again */
- 	ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
-@@ -2078,28 +2074,28 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
- 	ide_hwif_t *hwif = drive->hwif;
- 	idetape_tape_t *tape = drive->driver_data;
- 	idetape_pc_t *pc = tape->pc;
--	atapi_ireason_t ireason;
- 	int retries = 100;
- 	ide_startstop_t startstop;
-+	u8 ireason;
- 
- 	if (ide_wait_stat(&startstop,drive,DRQ_STAT,BUSY_STAT,WAIT_READY)) {
- 		printk(KERN_ERR "ide-tape: Strange, packet command initiated yet DRQ isn't asserted\n");
- 		return startstop;
- 	}
--	ireason.all = hwif->INB(IDE_IREASON_REG);
--	while (retries-- && (!ireason.b.cod || ireason.b.io)) {
-+	ireason = hwif->INB(IDE_IREASON_REG);
-+	while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
- 		printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
- 				"a packet command, retrying\n");
- 		udelay(100);
--		ireason.all = hwif->INB(IDE_IREASON_REG);
-+		ireason = hwif->INB(IDE_IREASON_REG);
- 		if (retries == 0) {
- 			printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
- 					"issuing a packet command, ignoring\n");
--			ireason.b.cod = 1;
--			ireason.b.io = 0;
-+			ireason |= CD;
-+			ireason &= ~IO;
- 		}
- 	}
--	if (!ireason.b.cod || ireason.b.io) {
-+	if ((ireason & CD) == 0 || (ireason & IO)) {
- 		printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
- 				"a packet command\n");
- 		return ide_do_reset(drive);
-@@ -2120,8 +2116,8 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
- {
- 	ide_hwif_t *hwif = drive->hwif;
- 	idetape_tape_t *tape = drive->driver_data;
--	atapi_bcount_t bcount;
- 	int dma_ok = 0;
-+	u16 bcount;
- 
- #if IDETAPE_DEBUG_BUGS
- 	if (tape->pc->c[0] == IDETAPE_REQUEST_SENSE_CMD &&
-@@ -2170,7 +2166,7 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
- 	pc->actually_transferred = 0;
- 	pc->current_position = pc->buffer;
- 	/* Request to transfer the entire buffer at once */
--	bcount.all = pc->request_transfer;
-+	bcount = pc->request_transfer;
- 
- 	if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags)) {
- 		printk(KERN_WARNING "ide-tape: DMA disabled, "
-@@ -2180,12 +2176,9 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
- 	if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma)
- 		dma_ok = !hwif->dma_setup(drive);
- 
--	if (IDE_CONTROL_REG)
--		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
--	hwif->OUTB(dma_ok ? 1 : 0, IDE_FEATURE_REG);	/* Use PIO/DMA */
--	hwif->OUTB(bcount.b.high, IDE_BCOUNTH_REG);
--	hwif->OUTB(bcount.b.low, IDE_BCOUNTL_REG);
--	hwif->OUTB(drive->select.all, IDE_SELECT_REG);
-+	ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
-+			   IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
-+
- 	if (dma_ok)			/* Will begin DMA later */
- 		set_bit(PC_DMA_IN_PROGRESS, &pc->flags);
- 	if (test_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags)) {
-@@ -2295,11 +2288,11 @@ static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
- {
- 	idetape_tape_t *tape = drive->driver_data;
- 	idetape_pc_t *pc = tape->pc;
--	atapi_status_t status;
-+	u8 stat;
- 
--	status.all = HWIF(drive)->INB(IDE_STATUS_REG);
--	if (status.b.dsc) {
--		if (status.b.check) {
-+	stat = drive->hwif->INB(IDE_STATUS_REG);
-+	if (stat & SEEK_STAT) {
-+		if (stat & ERR_STAT) {
- 			/* Error detected */
- 			if (pc->c[0] != IDETAPE_TEST_UNIT_READY_CMD)
- 				printk(KERN_ERR "ide-tape: %s: I/O error, ",
-@@ -2417,7 +2410,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
- 	idetape_tape_t *tape = drive->driver_data;
- 	idetape_pc_t *pc = NULL;
- 	struct request *postponed_rq = tape->postponed_rq;
--	atapi_status_t status;
-+	u8 stat;
- 
- #if IDETAPE_DEBUG_LOG
- #if 0
-@@ -2465,7 +2458,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
- 	 * If the tape is still busy, postpone our request and service
- 	 * the other device meanwhile.
- 	 */
--	status.all = HWIF(drive)->INB(IDE_STATUS_REG);
-+	stat = drive->hwif->INB(IDE_STATUS_REG);
- 
- 	if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
- 		set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
-@@ -2481,7 +2474,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
- 		tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
- 	calculate_speeds(drive);
- 	if (!test_and_clear_bit(IDETAPE_IGNORE_DSC, &tape->flags) &&
--	    !status.b.dsc) {
-+	    (stat & SEEK_STAT) == 0) {
- 		if (postponed_rq == NULL) {
- 			tape->dsc_polling_start = jiffies;
- 			tape->dsc_polling_frequency = tape->best_dsc_rw_frequency;
-@@ -2502,9 +2495,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
- 	}
- 	if (rq->cmd[0] & REQ_IDETAPE_READ) {
- 		tape->buffer_head++;
--#if USE_IOTRACE
--		IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor);
--#endif
- 		tape->postpone_cnt = 0;
- 		pc = idetape_next_pc_storage(drive);
- 		idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
-@@ -2512,9 +2502,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
- 	}
- 	if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
- 		tape->buffer_head++;
--#if USE_IOTRACE
--		IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor);
--#endif
- 		tape->postpone_cnt = 0;
- 		pc = idetape_next_pc_storage(drive);
- 		idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
-@@ -3241,9 +3228,6 @@ static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
- 	idetape_switch_buffers(tape, new_stage);
- 	idetape_add_stage_tail(drive, new_stage);
- 	tape->pipeline_head++;
--#if USE_IOTRACE
--	IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor);
--#endif
- 	calculate_speeds(drive);
- 
- 	/*
-@@ -3493,9 +3477,6 @@ static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
- 		idetape_remove_stage_head(drive);
- 		spin_unlock_irqrestore(&tape->spinlock, flags);
- 		tape->pipeline_head++;
--#if USE_IOTRACE
--		IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor);
--#endif
- 		calculate_speeds(drive);
- 	}
- #if IDETAPE_DEBUG_BUGS
-@@ -4724,10 +4705,8 @@ static void ide_tape_release(struct kref *kref)
- 
- 	drive->dsc_overlap = 0;
- 	drive->driver_data = NULL;
--	class_device_destroy(idetape_sysfs_class,
--			MKDEV(IDETAPE_MAJOR, tape->minor));
--	class_device_destroy(idetape_sysfs_class,
--			MKDEV(IDETAPE_MAJOR, tape->minor + 128));
-+	device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
-+	device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor + 128));
- 	idetape_devs[tape->minor] = NULL;
- 	g->private_data = NULL;
- 	put_disk(g);
-@@ -4884,10 +4863,10 @@ static int ide_tape_probe(ide_drive_t *drive)
- 
- 	idetape_setup(drive, tape, minor);
- 
--	class_device_create(idetape_sysfs_class, NULL,
--			MKDEV(IDETAPE_MAJOR, minor), &drive->gendev, "%s", tape->name);
--	class_device_create(idetape_sysfs_class, NULL,
--			MKDEV(IDETAPE_MAJOR, minor + 128), &drive->gendev, "n%s", tape->name);
-+	device_create(idetape_sysfs_class, &drive->gendev,
-+		      MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
-+	device_create(idetape_sysfs_class, &drive->gendev,
-+			MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
- 
- 	g->fops = &idetape_block_ops;
- 	ide_register_region(g);
-diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
-index 2b60f1b..5eb6fa1 100644
---- a/drivers/ide/ide-taskfile.c
-+++ b/drivers/ide/ide-taskfile.c
-@@ -35,93 +35,81 @@
- #include <asm/uaccess.h>
- #include <asm/io.h>
- 
--static void ata_bswap_data (void *buffer, int wcount)
-+void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
- {
--	u16 *p = buffer;
+-static struct sys_device kvm_sysdev = {
+-	.id = 0,
+-	.cls = &kvm_sysdev_class,
+-};
 -
--	while (wcount--) {
--		*p = *p << 8 | *p >> 8; p++;
--		*p = *p << 8 | *p >> 8; p++;
--	}
+-hpa_t bad_page_address;
+-
+-static inline
+-struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
+-{
+-	return container_of(pn, struct kvm_vcpu, preempt_notifier);
 -}
 -
--static void taskfile_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
+-static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
 -{
--	HWIF(drive)->ata_input_data(drive, buffer, wcount);
--	if (drive->bswap)
--		ata_bswap_data(buffer, wcount);
+-	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
+-
+-	kvm_x86_ops->vcpu_load(vcpu, cpu);
 -}
-+	ide_hwif_t *hwif = drive->hwif;
-+	struct ide_taskfile *tf = &task->tf;
-+	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
-+
-+	if (task->tf_flags & IDE_TFLAG_FLAGGED)
-+		HIHI = 0xFF;
-+
-+#ifdef DEBUG
-+	printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
-+		"lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
-+		drive->name, tf->feature, tf->nsect, tf->lbal,
-+		tf->lbam, tf->lbah, tf->device, tf->command);
-+	printk("%s: hob: nsect 0x%02x lbal 0x%02x "
-+		"lbam 0x%02x lbah 0x%02x\n",
-+		drive->name, tf->hob_nsect, tf->hob_lbal,
-+		tf->hob_lbam, tf->hob_lbah);
-+#endif
- 
--static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
+-
+-static void kvm_sched_out(struct preempt_notifier *pn,
+-			  struct task_struct *next)
 -{
--	if (drive->bswap) {
--		ata_bswap_data(buffer, wcount);
--		HWIF(drive)->ata_output_data(drive, buffer, wcount);
--		ata_bswap_data(buffer, wcount);
--	} else {
--		HWIF(drive)->ata_output_data(drive, buffer, wcount);
+-	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
+-
+-	kvm_x86_ops->vcpu_put(vcpu);
+-}
+-
+-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+-		  struct module *module)
+-{
+-	int r;
+-	int cpu;
+-
+-	if (kvm_x86_ops) {
+-		printk(KERN_ERR "kvm: already loaded the other module\n");
+-		return -EEXIST;
 -	}
-+	ide_set_irq(drive, 1);
-+
-+	if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
-+		SELECT_MASK(drive, 0);
-+
-+	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
-+		hwif->OUTW((tf->hob_data << 8) | tf->data, IDE_DATA_REG);
-+
-+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
-+		hwif->OUTB(tf->hob_feature, IDE_FEATURE_REG);
-+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
-+		hwif->OUTB(tf->hob_nsect, IDE_NSECTOR_REG);
-+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
-+		hwif->OUTB(tf->hob_lbal, IDE_SECTOR_REG);
-+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
-+		hwif->OUTB(tf->hob_lbam, IDE_LCYL_REG);
-+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
-+		hwif->OUTB(tf->hob_lbah, IDE_HCYL_REG);
-+
-+	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
-+		hwif->OUTB(tf->feature, IDE_FEATURE_REG);
-+	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
-+		hwif->OUTB(tf->nsect, IDE_NSECTOR_REG);
-+	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
-+		hwif->OUTB(tf->lbal, IDE_SECTOR_REG);
-+	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
-+		hwif->OUTB(tf->lbam, IDE_LCYL_REG);
-+	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
-+		hwif->OUTB(tf->lbah, IDE_HCYL_REG);
-+
-+	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
-+		hwif->OUTB((tf->device & HIHI) | drive->select.all, IDE_SELECT_REG);
- }
- 
- int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
- {
- 	ide_task_t args;
-+
- 	memset(&args, 0, sizeof(ide_task_t));
--	args.tfRegister[IDE_NSECTOR_OFFSET]	= 0x01;
-+	args.tf.nsect = 0x01;
- 	if (drive->media == ide_disk)
--		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_IDENTIFY;
-+		args.tf.command = WIN_IDENTIFY;
- 	else
--		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_PIDENTIFY;
--	args.command_type = IDE_DRIVE_TASK_IN;
--	args.data_phase   = TASKFILE_IN;
--	args.handler	  = &task_in_intr;
--	return ide_raw_taskfile(drive, &args, buf);
-+		args.tf.command = WIN_PIDENTIFY;
-+	args.tf_flags	= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-+	args.data_phase	= TASKFILE_IN;
-+	return ide_raw_taskfile(drive, &args, buf, 1);
- }
- 
--ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
-+static int inline task_dma_ok(ide_task_t *task)
- {
--	ide_hwif_t *hwif	= HWIF(drive);
--	task_struct_t *taskfile	= (task_struct_t *) task->tfRegister;
--	hob_struct_t *hobfile	= (hob_struct_t *) task->hobRegister;
--	u8 HIHI			= (drive->addressing == 1) ? 0xE0 : 0xEF;
 -
--	/* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
--	if (IDE_CONTROL_REG) {
--		/* clear nIEN */
--		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
+-	if (!ops->cpu_has_kvm_support()) {
+-		printk(KERN_ERR "kvm: no hardware support\n");
+-		return -EOPNOTSUPP;
+-	}
+-	if (ops->disabled_by_bios()) {
+-		printk(KERN_ERR "kvm: disabled by bios\n");
+-		return -EOPNOTSUPP;
 -	}
--	SELECT_MASK(drive, 0);
 -
--	if (drive->addressing == 1) {
--		hwif->OUTB(hobfile->feature, IDE_FEATURE_REG);
--		hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
--		hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
--		hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
--		hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
+-	kvm_x86_ops = ops;
+-
+-	r = kvm_x86_ops->hardware_setup();
+-	if (r < 0)
+-		goto out;
+-
+-	for_each_online_cpu(cpu) {
+-		smp_call_function_single(cpu,
+-				kvm_x86_ops->check_processor_compatibility,
+-				&r, 0, 1);
+-		if (r < 0)
+-			goto out_free_0;
 -	}
 -
--	hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
--	hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
--	hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
--	hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
--	hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
+-	on_each_cpu(hardware_enable, NULL, 0, 1);
+-	r = register_cpu_notifier(&kvm_cpu_notifier);
+-	if (r)
+-		goto out_free_1;
+-	register_reboot_notifier(&kvm_reboot_notifier);
 -
--	hwif->OUTB((taskfile->device_head & HIHI) | drive->select.all, IDE_SELECT_REG);
+-	r = sysdev_class_register(&kvm_sysdev_class);
+-	if (r)
+-		goto out_free_2;
 -
--	if (task->handler != NULL) {
--		if (task->prehandler != NULL) {
--			hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG);
--			ndelay(400);	/* FIXME */
--			return task->prehandler(drive, task->rq);
--		}
--		ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
--		return ide_started;
+-	r = sysdev_register(&kvm_sysdev);
+-	if (r)
+-		goto out_free_3;
+-
+-	/* A kmem cache lets us meet the alignment requirements of fx_save. */
+-	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
+-					   __alignof__(struct kvm_vcpu), 0, 0);
+-	if (!kvm_vcpu_cache) {
+-		r = -ENOMEM;
+-		goto out_free_4;
 -	}
-+	if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED))
-+		return 1;
- 
--	if (!drive->using_dma)
--		return ide_stopped;
 -
--	switch (taskfile->command) {
-+	switch (task->tf.command) {
- 		case WIN_WRITEDMA_ONCE:
- 		case WIN_WRITEDMA:
- 		case WIN_WRITEDMA_EXT:
-@@ -129,24 +117,79 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
- 		case WIN_READDMA:
- 		case WIN_READDMA_EXT:
- 		case WIN_IDENTIFY_DMA:
--			if (!hwif->dma_setup(drive)) {
--				hwif->dma_exec_cmd(drive, taskfile->command);
--				hwif->dma_start(drive);
--				return ide_started;
--			}
--			break;
--		default:
--			if (task->handler == NULL)
--				return ide_stopped;
-+			return 1;
- 	}
- 
--	return ide_stopped;
-+	return 0;
- }
- 
-+static ide_startstop_t task_no_data_intr(ide_drive_t *);
-+static ide_startstop_t set_geometry_intr(ide_drive_t *);
-+static ide_startstop_t recal_intr(ide_drive_t *);
-+static ide_startstop_t set_multmode_intr(ide_drive_t *);
-+static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
-+static ide_startstop_t task_in_intr(ide_drive_t *);
-+
-+ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
-+{
-+	ide_hwif_t *hwif	= HWIF(drive);
-+	struct ide_taskfile *tf = &task->tf;
-+	ide_handler_t *handler = NULL;
-+
-+	if (task->data_phase == TASKFILE_MULTI_IN ||
-+	    task->data_phase == TASKFILE_MULTI_OUT) {
-+		if (!drive->mult_count) {
-+			printk(KERN_ERR "%s: multimode not set!\n",
-+					drive->name);
-+			return ide_stopped;
-+		}
-+	}
-+
-+	if (task->tf_flags & IDE_TFLAG_FLAGGED)
-+		task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
-+
-+	if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0)
-+		ide_tf_load(drive, task);
-+
-+	switch (task->data_phase) {
-+	case TASKFILE_MULTI_OUT:
-+	case TASKFILE_OUT:
-+		hwif->OUTBSYNC(drive, tf->command, IDE_COMMAND_REG);
-+		ndelay(400);	/* FIXME */
-+		return pre_task_out_intr(drive, task->rq);
-+	case TASKFILE_MULTI_IN:
-+	case TASKFILE_IN:
-+		handler = task_in_intr;
-+		/* fall-through */
-+	case TASKFILE_NO_DATA:
-+		if (handler == NULL)
-+			handler = task_no_data_intr;
-+		/* WIN_{SPECIFY,RESTORE,SETMULT} use custom handlers */
-+		if (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) {
-+			switch (tf->command) {
-+			case WIN_SPECIFY: handler = set_geometry_intr;	break;
-+			case WIN_RESTORE: handler = recal_intr;		break;
-+			case WIN_SETMULT: handler = set_multmode_intr;	break;
-+			}
-+		}
-+		ide_execute_command(drive, tf->command, handler,
-+				    WAIT_WORSTCASE, NULL);
-+		return ide_started;
-+	default:
-+		if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
-+		    hwif->dma_setup(drive))
-+			return ide_stopped;
-+		hwif->dma_exec_cmd(drive, tf->command);
-+		hwif->dma_start(drive);
-+		return ide_started;
-+	}
-+}
-+EXPORT_SYMBOL_GPL(do_rw_taskfile);
-+
- /*
-  * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
-  */
--ide_startstop_t set_multmode_intr (ide_drive_t *drive)
-+static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif = HWIF(drive);
- 	u8 stat;
-@@ -164,7 +207,7 @@ ide_startstop_t set_multmode_intr (ide_drive_t *drive)
- /*
-  * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
-  */
--ide_startstop_t set_geometry_intr (ide_drive_t *drive)
-+static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif = HWIF(drive);
- 	int retries = 5;
-@@ -187,7 +230,7 @@ ide_startstop_t set_geometry_intr (ide_drive_t *drive)
- /*
-  * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
-  */
--ide_startstop_t recal_intr (ide_drive_t *drive)
-+static ide_startstop_t recal_intr(ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif = HWIF(drive);
- 	u8 stat;
-@@ -200,7 +243,7 @@ ide_startstop_t recal_intr (ide_drive_t *drive)
- /*
-  * Handler for commands without a data phase
-  */
--ide_startstop_t task_no_data_intr (ide_drive_t *drive)
-+static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
- {
- 	ide_task_t *args	= HWGROUP(drive)->rq->special;
- 	ide_hwif_t *hwif	= HWIF(drive);
-@@ -217,9 +260,7 @@ ide_startstop_t task_no_data_intr (ide_drive_t *drive)
- 	return ide_stopped;
- }
- 
--EXPORT_SYMBOL(task_no_data_intr);
+-	kvm_chardev_ops.owner = module;
 -
--static u8 wait_drive_not_busy(ide_drive_t *drive)
-+u8 wait_drive_not_busy(ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif = HWIF(drive);
- 	int retries;
-@@ -227,8 +268,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
- 
- 	/*
- 	 * Last sector was transfered, wait until drive is ready.
--	 * This can take up to 10 usec, but we will wait max 1 ms
--	 * (drive_cmd_intr() waits that long).
-+	 * This can take up to 10 usec, but we will wait max 1 ms.
- 	 */
- 	for (retries = 0; retries < 100; retries++) {
- 		if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT)
-@@ -283,9 +323,9 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
- 
- 	/* do the actual data transfer */
- 	if (write)
--		taskfile_output_data(drive, buf, SECTOR_WORDS);
-+		hwif->ata_output_data(drive, buf, SECTOR_WORDS);
- 	else
--		taskfile_input_data(drive, buf, SECTOR_WORDS);
-+		hwif->ata_input_data(drive, buf, SECTOR_WORDS);
- 
- 	kunmap_atomic(buf, KM_BIO_SRC_IRQ);
- #ifdef CONFIG_HIGHMEM
-@@ -305,9 +345,18 @@ static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
- static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
- 				     unsigned int write)
- {
-+	u8 saved_io_32bit = drive->io_32bit;
-+
- 	if (rq->bio)	/* fs request */
- 		rq->errors = 0;
- 
-+	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
-+		ide_task_t *task = rq->special;
-+
-+		if (task->tf_flags & IDE_TFLAG_IO_16BIT)
-+			drive->io_32bit = 0;
-+	}
-+
- 	touch_softlockup_watchdog();
- 
- 	switch (drive->hwif->data_phase) {
-@@ -319,6 +368,8 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
- 		ide_pio_sector(drive, write);
- 		break;
- 	}
-+
-+	drive->io_32bit = saved_io_32bit;
- }
- 
- static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
-@@ -356,40 +407,35 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
- 	return ide_error(drive, s, stat);
- }
- 
--static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
-+void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
- {
--	HWIF(drive)->cursg = NULL;
+-	r = misc_register(&kvm_dev);
+-	if (r) {
+-		printk (KERN_ERR "kvm: misc device register failed\n");
+-		goto out_free;
+-	}
 -
- 	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
--		ide_task_t *task = rq->special;
-+		u8 err = drive->hwif->INB(IDE_ERROR_REG);
- 
--		if (task->tf_out_flags.all) {
--			u8 err = drive->hwif->INB(IDE_ERROR_REG);
--			ide_end_drive_cmd(drive, stat, err);
--			return;
--		}
-+		ide_end_drive_cmd(drive, stat, err);
-+		return;
- 	}
- 
- 	if (rq->rq_disk) {
- 		ide_driver_t *drv;
- 
- 		drv = *(ide_driver_t **)rq->rq_disk->private_data;;
--		drv->end_request(drive, 1, rq->hard_nr_sectors);
-+		drv->end_request(drive, 1, rq->nr_sectors);
- 	} else
--		ide_end_request(drive, 1, rq->hard_nr_sectors);
-+		ide_end_request(drive, 1, rq->nr_sectors);
- }
- 
- /*
-  * Handler for command with PIO data-in phase (Read/Read Multiple).
-  */
--ide_startstop_t task_in_intr (ide_drive_t *drive)
-+static ide_startstop_t task_in_intr(ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif = drive->hwif;
- 	struct request *rq = HWGROUP(drive)->rq;
- 	u8 stat = hwif->INB(IDE_STATUS_REG);
- 
- 	/* new way for dealing with premature shared PCI interrupts */
--	if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
-+	if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
- 		if (stat & (ERR_STAT | DRQ_STAT))
- 			return task_error(drive, rq, __FUNCTION__, stat);
- 		/* No data yet, so wait for another IRQ. */
-@@ -402,7 +448,7 @@ ide_startstop_t task_in_intr (ide_drive_t *drive)
- 	/* If it was the last datablock check status and finish transfer. */
- 	if (!hwif->nleft) {
- 		stat = wait_drive_not_busy(drive);
--		if (!OK_STAT(stat, 0, BAD_R_STAT))
-+		if (!OK_STAT(stat, 0, BAD_STAT))
- 			return task_error(drive, rq, __FUNCTION__, stat);
- 		task_end_request(drive, rq, stat);
- 		return ide_stopped;
-@@ -413,7 +459,6 @@ ide_startstop_t task_in_intr (ide_drive_t *drive)
- 
- 	return ide_started;
- }
--EXPORT_SYMBOL(task_in_intr);
- 
- /*
-  * Handler for command with PIO data-out phase (Write/Write Multiple).
-@@ -443,11 +488,11 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
- 	return ide_started;
- }
- 
--ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
-+static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
- {
- 	ide_startstop_t startstop;
- 
--	if (ide_wait_stat(&startstop, drive, DATA_READY,
-+	if (ide_wait_stat(&startstop, drive, DRQ_STAT,
- 			  drive->bad_wstat, WAIT_DRQ)) {
- 		printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
- 				drive->name,
-@@ -464,9 +509,8 @@ ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
- 
- 	return ide_started;
- }
--EXPORT_SYMBOL(pre_task_out_intr);
- 
--static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
-+int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
- {
- 	struct request rq;
- 
-@@ -481,36 +525,27 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
- 	 * if we would find a solution to transfer any size.
- 	 * To support special commands like READ LONG.
- 	 */
--	if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
--		if (data_size == 0)
--			rq.nr_sectors = (args->hobRegister[IDE_NSECTOR_OFFSET] << 8) | args->tfRegister[IDE_NSECTOR_OFFSET];
--		else
--			rq.nr_sectors = data_size / SECTOR_SIZE;
+-	kvm_preempt_ops.sched_in = kvm_sched_in;
+-	kvm_preempt_ops.sched_out = kvm_sched_out;
+-
+-	return r;
+-
+-out_free:
+-	kmem_cache_destroy(kvm_vcpu_cache);
+-out_free_4:
+-	sysdev_unregister(&kvm_sysdev);
+-out_free_3:
+-	sysdev_class_unregister(&kvm_sysdev_class);
+-out_free_2:
+-	unregister_reboot_notifier(&kvm_reboot_notifier);
+-	unregister_cpu_notifier(&kvm_cpu_notifier);
+-out_free_1:
+-	on_each_cpu(hardware_disable, NULL, 0, 1);
+-out_free_0:
+-	kvm_x86_ops->hardware_unsetup();
+-out:
+-	kvm_x86_ops = NULL;
+-	return r;
+-}
+-
+-void kvm_exit_x86(void)
+-{
+-	misc_deregister(&kvm_dev);
+-	kmem_cache_destroy(kvm_vcpu_cache);
+-	sysdev_unregister(&kvm_sysdev);
+-	sysdev_class_unregister(&kvm_sysdev_class);
+-	unregister_reboot_notifier(&kvm_reboot_notifier);
+-	unregister_cpu_notifier(&kvm_cpu_notifier);
+-	on_each_cpu(hardware_disable, NULL, 0, 1);
+-	kvm_x86_ops->hardware_unsetup();
+-	kvm_x86_ops = NULL;
+-}
+-
+-static __init int kvm_init(void)
+-{
+-	static struct page *bad_page;
+-	int r;
+-
+-	r = kvm_mmu_module_init();
+-	if (r)
+-		goto out4;
+-
+-	kvm_init_debug();
+-
+-	kvm_init_msr_list();
+-
+-	if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
+-		r = -ENOMEM;
+-		goto out;
+-	}
+-
+-	bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
+-	memset(__va(bad_page_address), 0, PAGE_SIZE);
+-
+-	return 0;
+-
+-out:
+-	kvm_exit_debug();
+-	kvm_mmu_module_exit();
+-out4:
+-	return r;
+-}
+-
+-static __exit void kvm_exit(void)
+-{
+-	kvm_exit_debug();
+-	__free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
+-	kvm_mmu_module_exit();
+-}
+-
+-module_init(kvm_init)
+-module_exit(kvm_exit)
+-
+-EXPORT_SYMBOL_GPL(kvm_init_x86);
+-EXPORT_SYMBOL_GPL(kvm_exit_x86);
+diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h
+deleted file mode 100644
+index a0e415d..0000000
+--- a/drivers/kvm/kvm_svm.h
++++ /dev/null
+@@ -1,45 +0,0 @@
+-#ifndef __KVM_SVM_H
+-#define __KVM_SVM_H
+-
+-#include <linux/kernel.h>
+-#include <linux/types.h>
+-#include <linux/list.h>
+-#include <asm/msr.h>
+-
+-#include "svm.h"
+-#include "kvm.h"
+-
+-static const u32 host_save_user_msrs[] = {
+-#ifdef CONFIG_X86_64
+-	MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
+-	MSR_FS_BASE,
+-#endif
+-	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
+-};
+-
+-#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
+-#define NUM_DB_REGS 4
+-
+-struct kvm_vcpu;
+-
+-struct vcpu_svm {
+-	struct kvm_vcpu vcpu;
+-	struct vmcb *vmcb;
+-	unsigned long vmcb_pa;
+-	struct svm_cpu_data *svm_data;
+-	uint64_t asid_generation;
+-
+-	unsigned long db_regs[NUM_DB_REGS];
+-
+-	u64 next_rip;
+-
+-	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
+-	u64 host_gs_base;
+-	unsigned long host_cr2;
+-	unsigned long host_db_regs[NUM_DB_REGS];
+-	unsigned long host_dr6;
+-	unsigned long host_dr7;
+-};
+-
+-#endif
+-
+diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
+deleted file mode 100644
+index 238fcad..0000000
+--- a/drivers/kvm/lapic.c
++++ /dev/null
+@@ -1,1080 +0,0 @@
+-
+-/*
+- * Local APIC virtualization
+- *
+- * Copyright (C) 2006 Qumranet, Inc.
+- * Copyright (C) 2007 Novell
+- * Copyright (C) 2007 Intel
+- *
+- * Authors:
+- *   Dor Laor <dor.laor at qumranet.com>
+- *   Gregory Haskins <ghaskins at novell.com>
+- *   Yaozu (Eddie) Dong <eddie.dong at intel.com>
+- *
+- * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
+- *
+- * This work is licensed under the terms of the GNU GPL, version 2.  See
+- * the COPYING file in the top-level directory.
+- */
+-
+-#include "kvm.h"
+-#include <linux/kvm.h>
+-#include <linux/mm.h>
+-#include <linux/highmem.h>
+-#include <linux/smp.h>
+-#include <linux/hrtimer.h>
+-#include <linux/io.h>
+-#include <linux/module.h>
+-#include <asm/processor.h>
+-#include <asm/msr.h>
+-#include <asm/page.h>
+-#include <asm/current.h>
+-#include <asm/apicdef.h>
+-#include <asm/atomic.h>
+-#include <asm/div64.h>
+-#include "irq.h"
+-
+-#define PRId64 "d"
+-#define PRIx64 "llx"
+-#define PRIu64 "u"
+-#define PRIo64 "o"
+-
+-#define APIC_BUS_CYCLE_NS 1
+-
+-/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define apic_debug(fmt, arg...)
+-
+-#define APIC_LVT_NUM			6
+-/* 14 is the version for Xeon and Pentium 8.4.8*/
+-#define APIC_VERSION			(0x14UL | ((APIC_LVT_NUM - 1) << 16))
+-#define LAPIC_MMIO_LENGTH		(1 << 12)
+-/* followed define is not in apicdef.h */
+-#define APIC_SHORT_MASK			0xc0000
+-#define APIC_DEST_NOSHORT		0x0
+-#define APIC_DEST_MASK			0x800
+-#define MAX_APIC_VECTOR			256
+-
+-#define VEC_POS(v) ((v) & (32 - 1))
+-#define REG_POS(v) (((v) >> 5) << 4)
+-static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
+-{
+-	return *((u32 *) (apic->regs + reg_off));
+-}
+-
+-static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
+-{
+-	*((u32 *) (apic->regs + reg_off)) = val;
+-}
+-
+-static inline int apic_test_and_set_vector(int vec, void *bitmap)
+-{
+-	return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+-}
+-
+-static inline int apic_test_and_clear_vector(int vec, void *bitmap)
+-{
+-	return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+-}
+-
+-static inline void apic_set_vector(int vec, void *bitmap)
+-{
+-	set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+-}
+-
+-static inline void apic_clear_vector(int vec, void *bitmap)
+-{
+-	clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+-}
+-
+-static inline int apic_hw_enabled(struct kvm_lapic *apic)
+-{
+-	return (apic)->vcpu->apic_base & MSR_IA32_APICBASE_ENABLE;
+-}
+-
+-static inline int  apic_sw_enabled(struct kvm_lapic *apic)
+-{
+-	return apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED;
+-}
+-
+-static inline int apic_enabled(struct kvm_lapic *apic)
+-{
+-	return apic_sw_enabled(apic) &&	apic_hw_enabled(apic);
+-}
+-
+-#define LVT_MASK	\
+-	(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
+-
+-#define LINT_MASK	\
+-	(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
+-	 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
+-
+-static inline int kvm_apic_id(struct kvm_lapic *apic)
+-{
+-	return (apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
+-}
+-
+-static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
+-{
+-	return !(apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
+-}
+-
+-static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
+-{
+-	return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
+-}
+-
+-static inline int apic_lvtt_period(struct kvm_lapic *apic)
+-{
+-	return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC;
+-}
+-
+-static unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
+-	LVT_MASK | APIC_LVT_TIMER_PERIODIC,	/* LVTT */
+-	LVT_MASK | APIC_MODE_MASK,	/* LVTTHMR */
+-	LVT_MASK | APIC_MODE_MASK,	/* LVTPC */
+-	LINT_MASK, LINT_MASK,	/* LVT0-1 */
+-	LVT_MASK		/* LVTERR */
+-};
 -
--		if (!rq.nr_sectors) {
--			printk(KERN_ERR "%s: in/out command without data\n",
--					drive->name);
--			return -EFAULT;
--		}
-+	rq.hard_nr_sectors = rq.nr_sectors = nsect;
-+	rq.hard_cur_sectors = rq.current_nr_sectors = nsect;
- 
--		rq.hard_nr_sectors = rq.nr_sectors;
--		rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors;
-+	if (task->tf_flags & IDE_TFLAG_WRITE)
-+		rq.cmd_flags |= REQ_RW;
- 
--		if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
--			rq.cmd_flags |= REQ_RW;
+-static int find_highest_vector(void *bitmap)
+-{
+-	u32 *word = bitmap;
+-	int word_offset = MAX_APIC_VECTOR >> 5;
+-
+-	while ((word_offset != 0) && (word[(--word_offset) << 2] == 0))
+-		continue;
+-
+-	if (likely(!word_offset && !word[0]))
+-		return -1;
+-	else
+-		return fls(word[word_offset << 2]) - 1 + (word_offset << 5);
+-}
+-
+-static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
+-{
+-	return apic_test_and_set_vector(vec, apic->regs + APIC_IRR);
+-}
+-
+-static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
+-{
+-	apic_clear_vector(vec, apic->regs + APIC_IRR);
+-}
+-
+-static inline int apic_find_highest_irr(struct kvm_lapic *apic)
+-{
+-	int result;
+-
+-	result = find_highest_vector(apic->regs + APIC_IRR);
+-	ASSERT(result == -1 || result >= 16);
+-
+-	return result;
+-}
+-
+-int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
+-	int highest_irr;
+-
+-	if (!apic)
+-		return 0;
+-	highest_irr = apic_find_highest_irr(apic);
+-
+-	return highest_irr;
+-}
+-EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
+-
+-int kvm_apic_set_irq(struct kvm_lapic *apic, u8 vec, u8 trig)
+-{
+-	if (!apic_test_and_set_irr(vec, apic)) {
+-		/* a new pending irq is set in IRR */
+-		if (trig)
+-			apic_set_vector(vec, apic->regs + APIC_TMR);
+-		else
+-			apic_clear_vector(vec, apic->regs + APIC_TMR);
+-		kvm_vcpu_kick(apic->vcpu);
+-		return 1;
 -	}
-+	rq.special = task;
-+	task->rq = &rq;
- 
--	rq.special = args;
--	args->rq = &rq;
- 	return ide_do_drive_cmd(drive, &rq, ide_wait);
- }
- 
--int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf)
-+EXPORT_SYMBOL(ide_raw_taskfile);
-+
-+int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
- {
--	return ide_diag_taskfile(drive, args, 0, buf);
+-	return 0;
 -}
-+	task->data_phase = TASKFILE_NO_DATA;
- 
--EXPORT_SYMBOL(ide_raw_taskfile);
-+	return ide_raw_taskfile(drive, task, NULL, 0);
-+}
-+EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
- 
- #ifdef CONFIG_IDE_TASK_IOCTL
- int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
-@@ -519,13 +554,12 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- 	ide_task_t		args;
- 	u8 *outbuf		= NULL;
- 	u8 *inbuf		= NULL;
--	task_ioreg_t *argsptr	= args.tfRegister;
--	task_ioreg_t *hobsptr	= args.hobRegister;
-+	u8 *data_buf		= NULL;
- 	int err			= 0;
- 	int tasksize		= sizeof(struct ide_task_request_s);
- 	unsigned int taskin	= 0;
- 	unsigned int taskout	= 0;
--	u8 io_32bit		= drive->io_32bit;
-+	u16 nsect		= 0;
- 	char __user *buf = (char __user *)arg;
- 
- //	printk("IDE Taskfile ...\n");
-@@ -572,24 +606,52 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- 	}
- 
- 	memset(&args, 0, sizeof(ide_task_t));
--	memcpy(argsptr, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
--	memcpy(hobsptr, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE);
- 
--	args.tf_in_flags  = req_task->in_flags;
--	args.tf_out_flags = req_task->out_flags;
--	args.data_phase   = req_task->data_phase;
--	args.command_type = req_task->req_cmd;
-+	memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
-+	memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
-+
-+	args.data_phase = req_task->data_phase;
-+
-+	args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
-+			IDE_TFLAG_IN_TF;
-+	if (drive->addressing == 1)
-+		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
-+
-+	if (req_task->out_flags.all) {
-+		args.tf_flags |= IDE_TFLAG_FLAGGED;
-+
-+		if (req_task->out_flags.b.data)
-+			args.tf_flags |= IDE_TFLAG_OUT_DATA;
-+
-+		if (req_task->out_flags.b.nsector_hob)
-+			args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
-+		if (req_task->out_flags.b.sector_hob)
-+			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
-+		if (req_task->out_flags.b.lcyl_hob)
-+			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
-+		if (req_task->out_flags.b.hcyl_hob)
-+			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
-+
-+		if (req_task->out_flags.b.error_feature)
-+			args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
-+		if (req_task->out_flags.b.nsector)
-+			args.tf_flags |= IDE_TFLAG_OUT_NSECT;
-+		if (req_task->out_flags.b.sector)
-+			args.tf_flags |= IDE_TFLAG_OUT_LBAL;
-+		if (req_task->out_flags.b.lcyl)
-+			args.tf_flags |= IDE_TFLAG_OUT_LBAM;
-+		if (req_task->out_flags.b.hcyl)
-+			args.tf_flags |= IDE_TFLAG_OUT_LBAH;
-+	} else {
-+		args.tf_flags |= IDE_TFLAG_OUT_TF;
-+		if (args.tf_flags & IDE_TFLAG_LBA48)
-+			args.tf_flags |= IDE_TFLAG_OUT_HOB;
-+	}
-+
-+	if (req_task->in_flags.b.data)
-+		args.tf_flags |= IDE_TFLAG_IN_DATA;
- 
--	drive->io_32bit = 0;
- 	switch(req_task->data_phase) {
--		case TASKFILE_OUT_DMAQ:
--		case TASKFILE_OUT_DMA:
--			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
--			break;
--		case TASKFILE_IN_DMAQ:
--		case TASKFILE_IN_DMA:
--			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
--			break;
- 		case TASKFILE_MULTI_OUT:
- 			if (!drive->mult_count) {
- 				/* (hs): give up if multcount is not set */
-@@ -601,9 +663,11 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- 			}
- 			/* fall through */
- 		case TASKFILE_OUT:
--			args.prehandler = &pre_task_out_intr;
--			args.handler = &task_out_intr;
--			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
-+			/* fall through */
-+		case TASKFILE_OUT_DMAQ:
-+		case TASKFILE_OUT_DMA:
-+			nsect = taskout / SECTOR_SIZE;
-+			data_buf = outbuf;
- 			break;
- 		case TASKFILE_MULTI_IN:
- 			if (!drive->mult_count) {
-@@ -616,22 +680,46 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- 			}
- 			/* fall through */
- 		case TASKFILE_IN:
--			args.handler = &task_in_intr;
--			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
-+			/* fall through */
-+		case TASKFILE_IN_DMAQ:
-+		case TASKFILE_IN_DMA:
-+			nsect = taskin / SECTOR_SIZE;
-+			data_buf = inbuf;
- 			break;
- 		case TASKFILE_NO_DATA:
--			args.handler = &task_no_data_intr;
--			err = ide_diag_taskfile(drive, &args, 0, NULL);
- 			break;
- 		default:
- 			err = -EFAULT;
- 			goto abort;
- 	}
- 
--	memcpy(req_task->io_ports, &(args.tfRegister), HDIO_DRIVE_TASK_HDR_SIZE);
--	memcpy(req_task->hob_ports, &(args.hobRegister), HDIO_DRIVE_HOB_HDR_SIZE);
--	req_task->in_flags  = args.tf_in_flags;
--	req_task->out_flags = args.tf_out_flags;
-+	if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
-+		nsect = 0;
-+	else if (!nsect) {
-+		nsect = (args.tf.hob_nsect << 8) | args.tf.nsect;
-+
-+		if (!nsect) {
-+			printk(KERN_ERR "%s: in/out command without data\n",
-+					drive->name);
-+			err = -EFAULT;
-+			goto abort;
-+		}
-+	}
-+
-+	if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
-+		args.tf_flags |= IDE_TFLAG_WRITE;
-+
-+	err = ide_raw_taskfile(drive, &args, data_buf, nsect);
-+
-+	memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
-+	memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
-+
-+	if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) &&
-+	    req_task->in_flags.all == 0) {
-+		req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
-+		if (drive->addressing == 1)
-+			req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
-+	}
- 
- 	if (copy_to_user(buf, req_task, tasksize)) {
- 		err = -EFAULT;
-@@ -658,40 +746,24 @@ abort:
- 
- //	printk("IDE Taskfile ioctl ended. rc = %i\n", err);
- 
--	drive->io_32bit = io_32bit;
 -
- 	return err;
- }
- #endif
- 
--int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
+-static inline int apic_find_highest_isr(struct kvm_lapic *apic)
 -{
--	struct request rq;
--	u8 buffer[4];
+-	int result;
 -
--	if (!buf)
--		buf = buffer;
--	memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors);
--	ide_init_drive_cmd(&rq);
--	rq.buffer = buf;
--	*buf++ = cmd;
--	*buf++ = nsect;
--	*buf++ = feature;
--	*buf++ = sectors;
--	return ide_do_drive_cmd(drive, &rq, ide_wait);
+-	result = find_highest_vector(apic->regs + APIC_ISR);
+-	ASSERT(result == -1 || result >= 16);
+-
+-	return result;
 -}
 -
- int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- {
--	int err = 0;
--	u8 args[4], *argbuf = args;
--	u8 xfer_rate = 0;
--	int argsize = 4;
-+	u8 *buf = NULL;
-+	int bufsize = 0, err = 0;
-+	u8 args[4], xfer_rate = 0;
- 	ide_task_t tfargs;
-+	struct ide_taskfile *tf = &tfargs.tf;
- 
- 	if (NULL == (void *) arg) {
- 		struct request rq;
-+
- 		ide_init_drive_cmd(&rq);
-+		rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
-+
- 		return ide_do_drive_cmd(drive, &rq, ide_wait);
- 	}
- 
-@@ -699,27 +771,40 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- 		return -EFAULT;
- 
- 	memset(&tfargs, 0, sizeof(ide_task_t));
--	tfargs.tfRegister[IDE_FEATURE_OFFSET] = args[2];
--	tfargs.tfRegister[IDE_NSECTOR_OFFSET] = args[3];
--	tfargs.tfRegister[IDE_SECTOR_OFFSET]  = args[1];
--	tfargs.tfRegister[IDE_LCYL_OFFSET]    = 0x00;
--	tfargs.tfRegister[IDE_HCYL_OFFSET]    = 0x00;
--	tfargs.tfRegister[IDE_SELECT_OFFSET]  = 0x00;
--	tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0];
-+	tf->feature = args[2];
-+	if (args[0] == WIN_SMART) {
-+		tf->nsect = args[3];
-+		tf->lbal  = args[1];
-+		tf->lbam  = 0x4f;
-+		tf->lbah  = 0xc2;
-+		tfargs.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT;
-+	} else {
-+		tf->nsect = args[1];
-+		tfargs.tf_flags = IDE_TFLAG_OUT_FEATURE |
-+				  IDE_TFLAG_OUT_NSECT | IDE_TFLAG_IN_NSECT;
-+	}
-+	tf->command = args[0];
-+	tfargs.data_phase = args[3] ? TASKFILE_IN : TASKFILE_NO_DATA;
- 
- 	if (args[3]) {
--		argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
--		argbuf = kzalloc(argsize, GFP_KERNEL);
--		if (argbuf == NULL)
-+		tfargs.tf_flags |= IDE_TFLAG_IO_16BIT;
-+		bufsize = SECTOR_WORDS * 4 * args[3];
-+		buf = kzalloc(bufsize, GFP_KERNEL);
-+		if (buf == NULL)
- 			return -ENOMEM;
- 	}
-+
- 	if (set_transfer(drive, &tfargs)) {
- 		xfer_rate = args[1];
- 		if (ide_ata66_check(drive, &tfargs))
- 			goto abort;
- 	}
- 
--	err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf);
-+	err = ide_raw_taskfile(drive, &tfargs, buf, args[3]);
-+
-+	args[0] = tf->status;
-+	args[1] = tf->error;
-+	args[2] = tf->nsect;
- 
- 	if (!err && xfer_rate) {
- 		/* active-retuning-calls future */
-@@ -727,142 +812,38 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- 		ide_driveid_update(drive);
- 	}
- abort:
--	if (copy_to_user((void __user *)arg, argbuf, argsize))
-+	if (copy_to_user((void __user *)arg, &args, 4))
- 		err = -EFAULT;
--	if (argsize > 4)
--		kfree(argbuf);
-+	if (buf) {
-+		if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
-+			err = -EFAULT;
-+		kfree(buf);
-+	}
- 	return err;
- }
- 
--static int ide_wait_cmd_task(ide_drive_t *drive, u8 *buf)
+-static void apic_update_ppr(struct kvm_lapic *apic)
 -{
--	struct request rq;
+-	u32 tpr, isrv, ppr;
+-	int isr;
 -
--	ide_init_drive_cmd(&rq);
--	rq.cmd_type = REQ_TYPE_ATA_TASK;
--	rq.buffer = buf;
--	return ide_do_drive_cmd(drive, &rq, ide_wait);
+-	tpr = apic_get_reg(apic, APIC_TASKPRI);
+-	isr = apic_find_highest_isr(apic);
+-	isrv = (isr != -1) ? isr : 0;
+-
+-	if ((tpr & 0xf0) >= (isrv & 0xf0))
+-		ppr = tpr & 0xff;
+-	else
+-		ppr = isrv & 0xf0;
+-
+-	apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
+-		   apic, ppr, isr, isrv);
+-
+-	apic_set_reg(apic, APIC_PROCPRI, ppr);
 -}
 -
- int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- {
- 	void __user *p = (void __user *)arg;
- 	int err = 0;
--	u8 args[7], *argbuf = args;
--	int argsize = 7;
-+	u8 args[7];
-+	ide_task_t task;
- 
- 	if (copy_from_user(args, p, 7))
- 		return -EFAULT;
--	err = ide_wait_cmd_task(drive, argbuf);
--	if (copy_to_user(p, argbuf, argsize))
--		err = -EFAULT;
--	return err;
+-static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
+-{
+-	apic_set_reg(apic, APIC_TASKPRI, tpr);
+-	apic_update_ppr(apic);
+-}
+-
+-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
+-{
+-	return kvm_apic_id(apic) == dest;
+-}
+-
+-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
+-{
+-	int result = 0;
+-	u8 logical_id;
+-
+-	logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR));
+-
+-	switch (apic_get_reg(apic, APIC_DFR)) {
+-	case APIC_DFR_FLAT:
+-		if (logical_id & mda)
+-			result = 1;
+-		break;
+-	case APIC_DFR_CLUSTER:
+-		if (((logical_id >> 4) == (mda >> 0x4))
+-		    && (logical_id & mda & 0xf))
+-			result = 1;
+-		break;
+-	default:
+-		printk(KERN_WARNING "Bad DFR vcpu %d: %08x\n",
+-		       apic->vcpu->vcpu_id, apic_get_reg(apic, APIC_DFR));
+-		break;
+-	}
+-
+-	return result;
+-}
+-
+-static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+-			   int short_hand, int dest, int dest_mode)
+-{
+-	int result = 0;
+-	struct kvm_lapic *target = vcpu->apic;
+-
+-	apic_debug("target %p, source %p, dest 0x%x, "
+-		   "dest_mode 0x%x, short_hand 0x%x",
+-		   target, source, dest, dest_mode, short_hand);
+-
+-	ASSERT(!target);
+-	switch (short_hand) {
+-	case APIC_DEST_NOSHORT:
+-		if (dest_mode == 0) {
+-			/* Physical mode. */
+-			if ((dest == 0xFF) || (dest == kvm_apic_id(target)))
+-				result = 1;
+-		} else
+-			/* Logical mode. */
+-			result = kvm_apic_match_logical_addr(target, dest);
+-		break;
+-	case APIC_DEST_SELF:
+-		if (target == source)
+-			result = 1;
+-		break;
+-	case APIC_DEST_ALLINC:
+-		result = 1;
+-		break;
+-	case APIC_DEST_ALLBUT:
+-		if (target != source)
+-			result = 1;
+-		break;
+-	default:
+-		printk(KERN_WARNING "Bad dest shorthand value %x\n",
+-		       short_hand);
+-		break;
+-	}
+-
+-	return result;
 -}
 -
 -/*
-- * NOTICE: This is additions from IBM to provide a discrete interface,
-- * for selective taskregister access operations.  Nice JOB Klaus!!!
-- * Glad to be able to work and co-develop this with you and IBM.
+- * Add a pending IRQ into lapic.
+- * Return 1 if successfully added and 0 if discarded.
 - */
--ide_startstop_t flagged_taskfile (ide_drive_t *drive, ide_task_t *task)
+-static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
+-			     int vector, int level, int trig_mode)
 -{
--	ide_hwif_t *hwif	= HWIF(drive);
--	task_struct_t *taskfile	= (task_struct_t *) task->tfRegister;
--	hob_struct_t *hobfile	= (hob_struct_t *) task->hobRegister;
+-	int orig_irr, result = 0;
+-	struct kvm_vcpu *vcpu = apic->vcpu;
 -
--	if (task->data_phase == TASKFILE_MULTI_IN ||
--	    task->data_phase == TASKFILE_MULTI_OUT) {
--		if (!drive->mult_count) {
--			printk(KERN_ERR "%s: multimode not set!\n", drive->name);
--			return ide_stopped;
+-	switch (delivery_mode) {
+-	case APIC_DM_FIXED:
+-	case APIC_DM_LOWEST:
+-		/* FIXME add logic for vcpu on reset */
+-		if (unlikely(!apic_enabled(apic)))
+-			break;
+-
+-		orig_irr = apic_test_and_set_irr(vector, apic);
+-		if (orig_irr && trig_mode) {
+-			apic_debug("level trig mode repeatedly for vector %d",
+-				   vector);
+-			break;
+-		}
+-
+-		if (trig_mode) {
+-			apic_debug("level trig mode for vector %d", vector);
+-			apic_set_vector(vector, apic->regs + APIC_TMR);
+-		} else
+-			apic_clear_vector(vector, apic->regs + APIC_TMR);
+-
+-		if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE)
+-			kvm_vcpu_kick(vcpu);
+-		else if (vcpu->mp_state == VCPU_MP_STATE_HALTED) {
+-			vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+-			if (waitqueue_active(&vcpu->wq))
+-				wake_up_interruptible(&vcpu->wq);
+-		}
+-
+-		result = (orig_irr == 0);
+-		break;
+-
+-	case APIC_DM_REMRD:
+-		printk(KERN_DEBUG "Ignoring delivery mode 3\n");
+-		break;
+-
+-	case APIC_DM_SMI:
+-		printk(KERN_DEBUG "Ignoring guest SMI\n");
+-		break;
+-	case APIC_DM_NMI:
+-		printk(KERN_DEBUG "Ignoring guest NMI\n");
+-		break;
+-
+-	case APIC_DM_INIT:
+-		if (level) {
+-			if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE)
+-				printk(KERN_DEBUG
+-				       "INIT on a runnable vcpu %d\n",
+-				       vcpu->vcpu_id);
+-			vcpu->mp_state = VCPU_MP_STATE_INIT_RECEIVED;
+-			kvm_vcpu_kick(vcpu);
+-		} else {
+-			printk(KERN_DEBUG
+-			       "Ignoring de-assert INIT to vcpu %d\n",
+-			       vcpu->vcpu_id);
+-		}
+-
+-		break;
+-
+-	case APIC_DM_STARTUP:
+-		printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
+-		       vcpu->vcpu_id, vector);
+-		if (vcpu->mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
+-			vcpu->sipi_vector = vector;
+-			vcpu->mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
+-			if (waitqueue_active(&vcpu->wq))
+-				wake_up_interruptible(&vcpu->wq);
 -		}
+-		break;
+-
+-	default:
+-		printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
+-		       delivery_mode);
+-		break;
 -	}
- 
+-	return result;
+-}
+-
+-struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
+-				       unsigned long bitmap)
+-{
+-	int vcpu_id;
+-	int last;
+-	int next;
+-	struct kvm_lapic *apic;
+-
+-	last = kvm->round_robin_prev_vcpu;
+-	next = last;
+-
+-	do {
+-		if (++next == KVM_MAX_VCPUS)
+-			next = 0;
+-		if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
+-			continue;
+-		apic = kvm->vcpus[next]->apic;
+-		if (apic && apic_enabled(apic))
+-			break;
+-		apic = NULL;
+-	} while (next != last);
+-	kvm->round_robin_prev_vcpu = next;
+-
+-	if (!apic) {
+-		vcpu_id = ffs(bitmap) - 1;
+-		if (vcpu_id < 0) {
+-			vcpu_id = 0;
+-			printk(KERN_DEBUG "vcpu not ready for apic_round_robin\n");
+-		}
+-		apic = kvm->vcpus[vcpu_id]->apic;
+-	}
+-
+-	return apic;
+-}
+-
+-static void apic_set_eoi(struct kvm_lapic *apic)
+-{
+-	int vector = apic_find_highest_isr(apic);
+-
 -	/*
--	 * (ks) Check taskfile in flags.
--	 * If set, then execute as it is defined.
--	 * If not set, then define default settings.
--	 * The default values are:
--	 *	read all taskfile registers (except data)
--	 *	read the hob registers (sector, nsector, lcyl, hcyl)
+-	 * Not every write EOI will has corresponding ISR,
+-	 * one example is when Kernel check timer on setup_IO_APIC
 -	 */
--	if (task->tf_in_flags.all == 0) {
--		task->tf_in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
--		if (drive->addressing == 1)
--			task->tf_in_flags.all |= (IDE_HOB_STD_IN_FLAGS  << 8);
--        }
+-	if (vector == -1)
+-		return;
 -
--	/* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
--	if (IDE_CONTROL_REG)
--		/* clear nIEN */
--		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
--	SELECT_MASK(drive, 0);
+-	apic_clear_vector(vector, apic->regs + APIC_ISR);
+-	apic_update_ppr(apic);
 -
--	if (task->tf_out_flags.b.data) {
--		u16 data =  taskfile->data + (hobfile->data << 8);
--		hwif->OUTW(data, IDE_DATA_REG);
+-	if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR))
+-		kvm_ioapic_update_eoi(apic->vcpu->kvm, vector);
+-}
+-
+-static void apic_send_ipi(struct kvm_lapic *apic)
+-{
+-	u32 icr_low = apic_get_reg(apic, APIC_ICR);
+-	u32 icr_high = apic_get_reg(apic, APIC_ICR2);
+-
+-	unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
+-	unsigned int short_hand = icr_low & APIC_SHORT_MASK;
+-	unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
+-	unsigned int level = icr_low & APIC_INT_ASSERT;
+-	unsigned int dest_mode = icr_low & APIC_DEST_MASK;
+-	unsigned int delivery_mode = icr_low & APIC_MODE_MASK;
+-	unsigned int vector = icr_low & APIC_VECTOR_MASK;
+-
+-	struct kvm_lapic *target;
+-	struct kvm_vcpu *vcpu;
+-	unsigned long lpr_map = 0;
+-	int i;
+-
+-	apic_debug("icr_high 0x%x, icr_low 0x%x, "
+-		   "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
+-		   "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
+-		   icr_high, icr_low, short_hand, dest,
+-		   trig_mode, level, dest_mode, delivery_mode, vector);
+-
+-	for (i = 0; i < KVM_MAX_VCPUS; i++) {
+-		vcpu = apic->vcpu->kvm->vcpus[i];
+-		if (!vcpu)
+-			continue;
+-
+-		if (vcpu->apic &&
+-		    apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
+-			if (delivery_mode == APIC_DM_LOWEST)
+-				set_bit(vcpu->vcpu_id, &lpr_map);
+-			else
+-				__apic_accept_irq(vcpu->apic, delivery_mode,
+-						  vector, level, trig_mode);
+-		}
 -	}
 -
--	/* (ks) send hob registers first */
--	if (task->tf_out_flags.b.nsector_hob)
--		hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
--	if (task->tf_out_flags.b.sector_hob)
--		hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
--	if (task->tf_out_flags.b.lcyl_hob)
--		hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
--	if (task->tf_out_flags.b.hcyl_hob)
--		hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
+-	if (delivery_mode == APIC_DM_LOWEST) {
+-		target = kvm_apic_round_robin(vcpu->kvm, vector, lpr_map);
+-		if (target != NULL)
+-			__apic_accept_irq(target, delivery_mode,
+-					  vector, level, trig_mode);
+-	}
+-}
 -
--	/* (ks) Send now the standard registers */
--	if (task->tf_out_flags.b.error_feature)
--		hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
--	/* refers to number of sectors to transfer */
--	if (task->tf_out_flags.b.nsector)
--		hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
--	/* refers to sector offset or start sector */
--	if (task->tf_out_flags.b.sector)
--		hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
--	if (task->tf_out_flags.b.lcyl)
--		hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
--	if (task->tf_out_flags.b.hcyl)
--		hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
+-static u32 apic_get_tmcct(struct kvm_lapic *apic)
+-{
+-	u64 counter_passed;
+-	ktime_t passed, now;
+-	u32 tmcct;
 -
--        /*
--	 * (ks) In the flagged taskfile approch, we will use all specified
--	 * registers and the register value will not be changed, except the
--	 * select bit (master/slave) in the drive_head register. We must make
--	 * sure that the desired drive is selected.
+-	ASSERT(apic != NULL);
+-
+-	now = apic->timer.dev.base->get_time();
+-	tmcct = apic_get_reg(apic, APIC_TMICT);
+-
+-	/* if initial count is 0, current count should also be 0 */
+-	if (tmcct == 0)
+-		return 0;
+-
+-	if (unlikely(ktime_to_ns(now) <=
+-		ktime_to_ns(apic->timer.last_update))) {
+-		/* Wrap around */
+-		passed = ktime_add(( {
+-				    (ktime_t) {
+-				    .tv64 = KTIME_MAX -
+-				    (apic->timer.last_update).tv64}; }
+-				   ), now);
+-		apic_debug("time elapsed\n");
+-	} else
+-		passed = ktime_sub(now, apic->timer.last_update);
+-
+-	counter_passed = div64_64(ktime_to_ns(passed),
+-				  (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
+-
+-	if (counter_passed > tmcct) {
+-		if (unlikely(!apic_lvtt_period(apic))) {
+-			/* one-shot timers stick at 0 until reset */
+-			tmcct = 0;
+-		} else {
+-			/*
+-			 * periodic timers reset to APIC_TMICT when they
+-			 * hit 0. The while loop simulates this happening N
+-			 * times. (counter_passed %= tmcct) would also work,
+-			 * but might be slower or not work on 32-bit??
+-			 */
+-			while (counter_passed > tmcct)
+-				counter_passed -= tmcct;
+-			tmcct -= counter_passed;
+-		}
+-	} else {
+-		tmcct -= counter_passed;
+-	}
+-
+-	return tmcct;
+-}
+-
+-static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
+-{
+-	u32 val = 0;
+-
+-	if (offset >= LAPIC_MMIO_LENGTH)
+-		return 0;
+-
+-	switch (offset) {
+-	case APIC_ARBPRI:
+-		printk(KERN_WARNING "Access APIC ARBPRI register "
+-		       "which is for P6\n");
+-		break;
+-
+-	case APIC_TMCCT:	/* Timer CCR */
+-		val = apic_get_tmcct(apic);
+-		break;
+-
+-	default:
+-		apic_update_ppr(apic);
+-		val = apic_get_reg(apic, offset);
+-		break;
+-	}
+-
+-	return val;
+-}
+-
+-static void apic_mmio_read(struct kvm_io_device *this,
+-			   gpa_t address, int len, void *data)
+-{
+-	struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
+-	unsigned int offset = address - apic->base_address;
+-	unsigned char alignment = offset & 0xf;
+-	u32 result;
+-
+-	if ((alignment + len) > 4) {
+-		printk(KERN_ERR "KVM_APIC_READ: alignment error %lx %d",
+-		       (unsigned long)address, len);
+-		return;
+-	}
+-	result = __apic_read(apic, offset & ~0xf);
+-
+-	switch (len) {
+-	case 1:
+-	case 2:
+-	case 4:
+-		memcpy(data, (char *)&result + alignment, len);
+-		break;
+-	default:
+-		printk(KERN_ERR "Local APIC read with len = %x, "
+-		       "should be 1,2, or 4 instead\n", len);
+-		break;
+-	}
+-}
+-
+-static void update_divide_count(struct kvm_lapic *apic)
+-{
+-	u32 tmp1, tmp2, tdcr;
+-
+-	tdcr = apic_get_reg(apic, APIC_TDCR);
+-	tmp1 = tdcr & 0xf;
+-	tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
+-	apic->timer.divide_count = 0x1 << (tmp2 & 0x7);
+-
+-	apic_debug("timer divide count is 0x%x\n",
+-				   apic->timer.divide_count);
+-}
+-
+-static void start_apic_timer(struct kvm_lapic *apic)
+-{
+-	ktime_t now = apic->timer.dev.base->get_time();
+-
+-	apic->timer.last_update = now;
+-
+-	apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
+-		    APIC_BUS_CYCLE_NS * apic->timer.divide_count;
+-	atomic_set(&apic->timer.pending, 0);
+-	hrtimer_start(&apic->timer.dev,
+-		      ktime_add_ns(now, apic->timer.period),
+-		      HRTIMER_MODE_ABS);
+-
+-	apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
+-			   PRIx64 ", "
+-			   "timer initial count 0x%x, period %lldns, "
+-			   "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__,
+-			   APIC_BUS_CYCLE_NS, ktime_to_ns(now),
+-			   apic_get_reg(apic, APIC_TMICT),
+-			   apic->timer.period,
+-			   ktime_to_ns(ktime_add_ns(now,
+-					apic->timer.period)));
+-}
+-
+-static void apic_mmio_write(struct kvm_io_device *this,
+-			    gpa_t address, int len, const void *data)
+-{
+-	struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
+-	unsigned int offset = address - apic->base_address;
+-	unsigned char alignment = offset & 0xf;
+-	u32 val;
+-
+-	/*
+-	 * APIC register must be aligned on 128-bits boundary.
+-	 * 32/64/128 bits registers must be accessed thru 32 bits.
+-	 * Refer SDM 8.4.1
 -	 */
--	hwif->OUTB(taskfile->device_head | drive->select.all, IDE_SELECT_REG);
--	switch(task->data_phase) {
-+	memset(&task, 0, sizeof(task));
-+	memcpy(&task.tf_array[7], &args[1], 6);
-+	task.tf.command = args[0];
-+	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
- 
--   	        case TASKFILE_OUT_DMAQ:
--		case TASKFILE_OUT_DMA:
--		case TASKFILE_IN_DMAQ:
--		case TASKFILE_IN_DMA:
--			if (!drive->using_dma)
--				break;
-+	err = ide_no_data_taskfile(drive, &task);
- 
--			if (!hwif->dma_setup(drive)) {
--				hwif->dma_exec_cmd(drive, taskfile->command);
--				hwif->dma_start(drive);
--				return ide_started;
--			}
--			break;
-+	args[0] = task.tf.command;
-+	memcpy(&args[1], &task.tf_array[7], 6);
- 
--	        default:
-- 			if (task->handler == NULL)
--				return ide_stopped;
+-	if (len != 4 || alignment) {
+-		if (printk_ratelimit())
+-			printk(KERN_ERR "apic write: bad size=%d %lx\n",
+-			       len, (long)address);
+-		return;
+-	}
 -
--			/* Issue the command */
--			if (task->prehandler) {
--				hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG);
--				ndelay(400);	/* FIXME */
--				return task->prehandler(drive, task->rq);
+-	val = *(u32 *) data;
+-
+-	/* too common printing */
+-	if (offset != APIC_EOI)
+-		apic_debug("%s: offset 0x%x with length 0x%x, and value is "
+-			   "0x%x\n", __FUNCTION__, offset, len, val);
+-
+-	offset &= 0xff0;
+-
+-	switch (offset) {
+-	case APIC_ID:		/* Local APIC ID */
+-		apic_set_reg(apic, APIC_ID, val);
+-		break;
+-
+-	case APIC_TASKPRI:
+-		apic_set_tpr(apic, val & 0xff);
+-		break;
+-
+-	case APIC_EOI:
+-		apic_set_eoi(apic);
+-		break;
+-
+-	case APIC_LDR:
+-		apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
+-		break;
+-
+-	case APIC_DFR:
+-		apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
+-		break;
+-
+-	case APIC_SPIV:
+-		apic_set_reg(apic, APIC_SPIV, val & 0x3ff);
+-		if (!(val & APIC_SPIV_APIC_ENABLED)) {
+-			int i;
+-			u32 lvt_val;
+-
+-			for (i = 0; i < APIC_LVT_NUM; i++) {
+-				lvt_val = apic_get_reg(apic,
+-						       APIC_LVTT + 0x10 * i);
+-				apic_set_reg(apic, APIC_LVTT + 0x10 * i,
+-					     lvt_val | APIC_LVT_MASKED);
 -			}
--			ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
--			return ide_started;
+-			atomic_set(&apic->timer.pending, 0);
+-
+-		}
+-		break;
+-
+-	case APIC_ICR:
+-		/* No delay here, so we always clear the pending bit */
+-		apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
+-		apic_send_ipi(apic);
+-		break;
+-
+-	case APIC_ICR2:
+-		apic_set_reg(apic, APIC_ICR2, val & 0xff000000);
+-		break;
+-
+-	case APIC_LVTT:
+-	case APIC_LVTTHMR:
+-	case APIC_LVTPC:
+-	case APIC_LVT0:
+-	case APIC_LVT1:
+-	case APIC_LVTERR:
+-		/* TODO: Check vector */
+-		if (!apic_sw_enabled(apic))
+-			val |= APIC_LVT_MASKED;
+-
+-		val &= apic_lvt_mask[(offset - APIC_LVTT) >> 4];
+-		apic_set_reg(apic, offset, val);
+-
+-		break;
+-
+-	case APIC_TMICT:
+-		hrtimer_cancel(&apic->timer.dev);
+-		apic_set_reg(apic, APIC_TMICT, val);
+-		start_apic_timer(apic);
+-		return;
+-
+-	case APIC_TDCR:
+-		if (val & 4)
+-			printk(KERN_ERR "KVM_WRITE:TDCR %x\n", val);
+-		apic_set_reg(apic, APIC_TDCR, val);
+-		update_divide_count(apic);
+-		break;
+-
+-	default:
+-		apic_debug("Local APIC Write to read-only register %x\n",
+-			   offset);
+-		break;
 -	}
-+	if (copy_to_user(p, args, 7))
-+		err = -EFAULT;
- 
--	return ide_stopped;
-+	return err;
- }
-diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
-index 54943da..97894ab 100644
---- a/drivers/ide/ide.c
-+++ b/drivers/ide/ide.c
-@@ -95,7 +95,7 @@ DEFINE_MUTEX(ide_cfg_mtx);
-  __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
- 
- #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
--static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
-+int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
- #endif
- 
- int noautodma = 0;
-@@ -116,7 +116,7 @@ EXPORT_SYMBOL(ide_hwifs);
- /*
-  * Do not even *think* about calling this!
-  */
--static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
-+void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
- {
- 	unsigned int unit;
- 
-@@ -159,6 +159,7 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
- 		init_completion(&drive->gendev_rel_comp);
- 	}
- }
-+EXPORT_SYMBOL_GPL(ide_init_port_data);
- 
- static void init_hwif_default(ide_hwif_t *hwif, unsigned int index)
- {
-@@ -177,8 +178,6 @@ static void init_hwif_default(ide_hwif_t *hwif, unsigned int index)
- #endif
- }
- 
--extern void ide_arm_init(void);
 -
- /*
-  * init_ide_data() sets reasonable default values into all fields
-  * of all instances of the hwifs and drives, but only on the first call.
-@@ -210,16 +209,13 @@ static void __init init_ide_data (void)
- 	/* Initialise all interface structures */
- 	for (index = 0; index < MAX_HWIFS; ++index) {
- 		hwif = &ide_hwifs[index];
--		init_hwif_data(hwif, index);
-+		ide_init_port_data(hwif, index);
- 		init_hwif_default(hwif, index);
- #if !defined(CONFIG_PPC32) || !defined(CONFIG_PCI)
- 		hwif->irq =
- 			ide_init_default_irq(hwif->io_ports[IDE_DATA_OFFSET]);
- #endif
- 	}
--#ifdef CONFIG_IDE_ARM
--	ide_arm_init();
--#endif
- }
- 
- /**
-@@ -414,8 +410,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
- 	hwif->cds			= tmp_hwif->cds;
- #endif
- 
--	hwif->fixup			= tmp_hwif->fixup;
+-}
 -
- 	hwif->set_pio_mode		= tmp_hwif->set_pio_mode;
- 	hwif->set_dma_mode		= tmp_hwif->set_dma_mode;
- 	hwif->mdma_filter		= tmp_hwif->mdma_filter;
-@@ -424,7 +418,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
- 	hwif->reset_poll		= tmp_hwif->reset_poll;
- 	hwif->pre_reset			= tmp_hwif->pre_reset;
- 	hwif->resetproc			= tmp_hwif->resetproc;
--	hwif->intrproc			= tmp_hwif->intrproc;
- 	hwif->maskproc			= tmp_hwif->maskproc;
- 	hwif->quirkproc			= tmp_hwif->quirkproc;
- 	hwif->busproc			= tmp_hwif->busproc;
-@@ -434,16 +427,13 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
- 	hwif->atapi_input_bytes		= tmp_hwif->atapi_input_bytes;
- 	hwif->atapi_output_bytes	= tmp_hwif->atapi_output_bytes;
- 
-+	hwif->dma_host_set		= tmp_hwif->dma_host_set;
- 	hwif->dma_setup			= tmp_hwif->dma_setup;
- 	hwif->dma_exec_cmd		= tmp_hwif->dma_exec_cmd;
- 	hwif->dma_start			= tmp_hwif->dma_start;
- 	hwif->ide_dma_end		= tmp_hwif->ide_dma_end;
--	hwif->ide_dma_on		= tmp_hwif->ide_dma_on;
--	hwif->dma_off_quietly		= tmp_hwif->dma_off_quietly;
- 	hwif->ide_dma_test_irq		= tmp_hwif->ide_dma_test_irq;
- 	hwif->ide_dma_clear_irq		= tmp_hwif->ide_dma_clear_irq;
--	hwif->dma_host_on		= tmp_hwif->dma_host_on;
--	hwif->dma_host_off		= tmp_hwif->dma_host_off;
- 	hwif->dma_lost_irq		= tmp_hwif->dma_lost_irq;
- 	hwif->dma_timeout		= tmp_hwif->dma_timeout;
- 
-@@ -468,7 +458,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
- #endif
- 
- 	hwif->dma_base			= tmp_hwif->dma_base;
--	hwif->dma_master		= tmp_hwif->dma_master;
- 	hwif->dma_command		= tmp_hwif->dma_command;
- 	hwif->dma_vendor1		= tmp_hwif->dma_vendor1;
- 	hwif->dma_status		= tmp_hwif->dma_status;
-@@ -602,7 +591,6 @@ void ide_unregister(unsigned int index)
- 		(void) ide_release_dma(hwif);
- 
- 		hwif->dma_base = 0;
--		hwif->dma_master = 0;
- 		hwif->dma_command = 0;
- 		hwif->dma_vendor1 = 0;
- 		hwif->dma_status = 0;
-@@ -617,7 +605,7 @@ void ide_unregister(unsigned int index)
- 	tmp_hwif = *hwif;
- 
- 	/* restore hwif data to pristine status */
--	init_hwif_data(hwif, index);
-+	ide_init_port_data(hwif, index);
- 	init_hwif_default(hwif, index);
- 
- 	ide_hwif_restore(hwif, &tmp_hwif);
-@@ -683,24 +671,34 @@ void ide_setup_ports (	hw_regs_t *hw,
-  */
- }
- 
-+void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
-+{
-+	memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
-+	hwif->irq = hw->irq;
-+	hwif->noprobe = 0;
-+	hwif->chipset = hw->chipset;
-+	hwif->gendev.parent = hw->dev;
-+	hwif->ack_intr = hw->ack_intr;
-+}
-+EXPORT_SYMBOL_GPL(ide_init_port_hw);
-+
- /**
-  *	ide_register_hw		-	register IDE interface
-  *	@hw: hardware registers
-- *	@fixup: fixup function
-- *	@initializing: set while initializing built-in drivers
-+ *	@quirkproc: quirkproc function
-  *	@hwifp: pointer to returned hwif
-  *
-  *	Register an IDE interface, specifying exactly the registers etc.
-- *	Set init=1 iff calling before probes have taken place.
-  *
-  *	Returns -1 on error.
-  */
- 
--int ide_register_hw(hw_regs_t *hw, void (*fixup)(ide_hwif_t *),
--		    int initializing, ide_hwif_t **hwifp)
-+int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *),
-+		    ide_hwif_t **hwifp)
- {
- 	int index, retry = 1;
- 	ide_hwif_t *hwif;
-+	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- 
- 	do {
- 		for (index = 0; index < MAX_HWIFS; ++index) {
-@@ -712,8 +710,7 @@ int ide_register_hw(hw_regs_t *hw, void (*fixup)(ide_hwif_t *),
- 			hwif = &ide_hwifs[index];
- 			if (hwif->hold)
- 				continue;
--			if ((!hwif->present && !hwif->mate && !initializing) ||
--			    (!hwif->io_ports[IDE_DATA_OFFSET] && initializing))
-+			if (!hwif->present && hwif->mate == NULL)
- 				goto found;
- 		}
- 		for (index = 0; index < MAX_HWIFS; index++)
-@@ -724,29 +721,23 @@ found:
- 	if (hwif->present)
- 		ide_unregister(index);
- 	else if (!hwif->hold) {
--		init_hwif_data(hwif, index);
-+		ide_init_port_data(hwif, index);
- 		init_hwif_default(hwif, index);
- 	}
- 	if (hwif->present)
- 		return -1;
--	memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
--	hwif->irq = hw->irq;
--	hwif->noprobe = 0;
--	hwif->fixup = fixup;
--	hwif->chipset = hw->chipset;
--	hwif->gendev.parent = hw->dev;
--	hwif->ack_intr = hw->ack_intr;
- 
--	if (initializing == 0) {
--		u8 idx[4] = { index, 0xff, 0xff, 0xff };
-+	ide_init_port_hw(hwif, hw);
-+	hwif->quirkproc = quirkproc;
- 
--		ide_device_add(idx);
+-static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr)
+-{
+-	struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
+-	int ret = 0;
+-
+-
+-	if (apic_hw_enabled(apic) &&
+-	    (addr >= apic->base_address) &&
+-	    (addr < (apic->base_address + LAPIC_MMIO_LENGTH)))
+-		ret = 1;
+-
+-	return ret;
+-}
+-
+-void kvm_free_apic(struct kvm_lapic *apic)
+-{
+-	if (!apic)
+-		return;
+-
+-	hrtimer_cancel(&apic->timer.dev);
+-
+-	if (apic->regs_page) {
+-		__free_page(apic->regs_page);
+-		apic->regs_page = 0;
 -	}
-+	idx[0] = index;
-+
-+	ide_device_add(idx);
- 
- 	if (hwifp)
- 		*hwifp = hwif;
- 
--	return (initializing || hwif->present) ? index : -1;
-+	return hwif->present ? index : -1;
- }
- 
- EXPORT_SYMBOL(ide_register_hw);
-@@ -839,7 +830,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
- 	if (!drive->id || !(drive->id->capability & 1))
- 		goto out;
- 
--	if (hwif->ide_dma_on == NULL)
-+	if (hwif->dma_host_set == NULL)
- 		goto out;
- 
- 	err = -EBUSY;
-@@ -854,8 +845,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
- 	err = 0;
- 
- 	if (arg) {
--		hwif->dma_off_quietly(drive);
--		if (ide_set_dma(drive) || hwif->ide_dma_on(drive))
-+		if (ide_set_dma(drive))
- 			err = -EIO;
- 	} else
- 		ide_dma_off(drive);
-@@ -888,7 +878,10 @@ int set_pio_mode(ide_drive_t *drive, int arg)
- 
- 	if (drive->special.b.set_tune)
- 		return -EBUSY;
-+
- 	ide_init_drive_cmd(&rq);
-+	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
-+
- 	drive->tune_req = (u8) arg;
- 	drive->special.b.set_tune = 1;
- 	(void) ide_do_drive_cmd(drive, &rq, ide_wait);
-@@ -1070,7 +1063,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
- 			ide_init_hwif_ports(&hw, (unsigned long) args[0],
- 					    (unsigned long) args[1], NULL);
- 			hw.irq = args[2];
--			if (ide_register_hw(&hw, NULL, 0, NULL) == -1)
-+			if (ide_register_hw(&hw, NULL, NULL) == -1)
- 				return -EIO;
- 			return 0;
- 		}
-@@ -1231,26 +1224,12 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
- 	return 0;	/* zero = nothing matched */
- }
- 
--#ifdef CONFIG_BLK_DEV_ALI14XX
- extern int probe_ali14xx;
--extern int ali14xx_init(void);
--#endif
--#ifdef CONFIG_BLK_DEV_UMC8672
- extern int probe_umc8672;
--extern int umc8672_init(void);
--#endif
--#ifdef CONFIG_BLK_DEV_DTC2278
- extern int probe_dtc2278;
--extern int dtc2278_init(void);
--#endif
--#ifdef CONFIG_BLK_DEV_HT6560B
- extern int probe_ht6560b;
--extern int ht6560b_init(void);
--#endif
--#ifdef CONFIG_BLK_DEV_QD65XX
- extern int probe_qd65xx;
--extern int qd65xx_init(void);
--#endif
-+extern int cmd640_vlb;
- 
- static int __initdata is_chipset_set[MAX_HWIFS];
- 
-@@ -1327,7 +1306,7 @@ static int __init ide_setup(char *s)
- 	if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
- 		const char *hd_words[] = {
- 			"none", "noprobe", "nowerr", "cdrom", "nodma",
--			"autotune", "noautotune", "minus8", "swapdata", "bswap",
-+			"autotune", "noautotune", "-8", "-9", "-10",
- 			"noflush", "remap", "remap63", "scsi", NULL };
- 		unit = s[2] - 'a';
- 		hw   = unit / MAX_DRIVES;
-@@ -1363,10 +1342,6 @@ static int __init ide_setup(char *s)
- 			case -7: /* "noautotune" */
- 				drive->autotune = IDE_TUNE_NOAUTO;
- 				goto obsolete_option;
--			case -9: /* "swapdata" */
--			case -10: /* "bswap" */
--				drive->bswap = 1;
--				goto done;
- 			case -11: /* noflush */
- 				drive->noflush = 1;
- 				goto done;
-@@ -1466,11 +1441,8 @@ static int __init ide_setup(char *s)
- #endif
- #ifdef CONFIG_BLK_DEV_CMD640
- 			case -14: /* "cmd640_vlb" */
--			{
--				extern int cmd640_vlb; /* flag for cmd640.c */
- 				cmd640_vlb = 1;
- 				goto done;
--			}
- #endif
- #ifdef CONFIG_BLK_DEV_HT6560B
- 			case -13: /* "ht6560b" */
-@@ -1560,79 +1532,6 @@ done:
- 	return 1;
- }
- 
--extern void __init pnpide_init(void);
--extern void __exit pnpide_exit(void);
--extern void __init h8300_ide_init(void);
+-
+-	kfree(apic);
+-}
 -
 -/*
-- * probe_for_hwifs() finds/initializes "known" IDE interfaces
+- *----------------------------------------------------------------------
+- * LAPIC interface
+- *----------------------------------------------------------------------
 - */
--static void __init probe_for_hwifs (void)
+-
+-void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
 -{
--#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
--	ide_scan_pcibus(ide_scan_direction);
--#endif
+-	struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
 -
--#ifdef CONFIG_ETRAX_IDE
--	{
--		extern void init_e100_ide(void);
--		init_e100_ide();
--	}
--#endif /* CONFIG_ETRAX_IDE */
--#ifdef CONFIG_BLK_DEV_CMD640
--	{
--		extern void ide_probe_for_cmd640x(void);
--		ide_probe_for_cmd640x();
--	}
--#endif /* CONFIG_BLK_DEV_CMD640 */
--#ifdef CONFIG_BLK_DEV_IDE_PMAC
--	{
--		extern int pmac_ide_probe(void);
--		(void)pmac_ide_probe();
+-	if (!apic)
+-		return;
+-	apic_set_tpr(apic, ((cr8 & 0x0f) << 4));
+-}
+-
+-u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
+-	u64 tpr;
+-
+-	if (!apic)
+-		return 0;
+-	tpr = (u64) apic_get_reg(apic, APIC_TASKPRI);
+-
+-	return (tpr & 0xf0) >> 4;
+-}
+-EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
+-
+-void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
+-{
+-	struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
+-
+-	if (!apic) {
+-		value |= MSR_IA32_APICBASE_BSP;
+-		vcpu->apic_base = value;
+-		return;
 -	}
--#endif /* CONFIG_BLK_DEV_IDE_PMAC */
--#ifdef CONFIG_BLK_DEV_GAYLE
+-	if (apic->vcpu->vcpu_id)
+-		value &= ~MSR_IA32_APICBASE_BSP;
+-
+-	vcpu->apic_base = value;
+-	apic->base_address = apic->vcpu->apic_base &
+-			     MSR_IA32_APICBASE_BASE;
+-
+-	/* with FSB delivery interrupt, we can restart APIC functionality */
+-	apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
+-		   "0x%lx.\n", apic->apic_base, apic->base_address);
+-
+-}
+-
+-u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
+-{
+-	return vcpu->apic_base;
+-}
+-EXPORT_SYMBOL_GPL(kvm_lapic_get_base);
+-
+-void kvm_lapic_reset(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic;
+-	int i;
+-
+-	apic_debug("%s\n", __FUNCTION__);
+-
+-	ASSERT(vcpu);
+-	apic = vcpu->apic;
+-	ASSERT(apic != NULL);
+-
+-	/* Stop the timer in case it's a reset to an active apic */
+-	hrtimer_cancel(&apic->timer.dev);
+-
+-	apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
+-	apic_set_reg(apic, APIC_LVR, APIC_VERSION);
+-
+-	for (i = 0; i < APIC_LVT_NUM; i++)
+-		apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
+-	apic_set_reg(apic, APIC_LVT0,
+-		     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
+-
+-	apic_set_reg(apic, APIC_DFR, 0xffffffffU);
+-	apic_set_reg(apic, APIC_SPIV, 0xff);
+-	apic_set_reg(apic, APIC_TASKPRI, 0);
+-	apic_set_reg(apic, APIC_LDR, 0);
+-	apic_set_reg(apic, APIC_ESR, 0);
+-	apic_set_reg(apic, APIC_ICR, 0);
+-	apic_set_reg(apic, APIC_ICR2, 0);
+-	apic_set_reg(apic, APIC_TDCR, 0);
+-	apic_set_reg(apic, APIC_TMICT, 0);
+-	for (i = 0; i < 8; i++) {
+-		apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
+-		apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
+-		apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
+-	}
+-	update_divide_count(apic);
+-	atomic_set(&apic->timer.pending, 0);
+-	if (vcpu->vcpu_id == 0)
+-		vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
+-	apic_update_ppr(apic);
+-
+-	apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
+-		   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
+-		   vcpu, kvm_apic_id(apic),
+-		   vcpu->apic_base, apic->base_address);
+-}
+-EXPORT_SYMBOL_GPL(kvm_lapic_reset);
+-
+-int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
+-	int ret = 0;
+-
+-	if (!apic)
+-		return 0;
+-	ret = apic_enabled(apic);
+-
+-	return ret;
+-}
+-EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
+-
+-/*
+- *----------------------------------------------------------------------
+- * timer interface
+- *----------------------------------------------------------------------
+- */
+-
+-/* TODO: make sure __apic_timer_fn runs in current pCPU */
+-static int __apic_timer_fn(struct kvm_lapic *apic)
+-{
+-	int result = 0;
+-	wait_queue_head_t *q = &apic->vcpu->wq;
+-
+-	atomic_inc(&apic->timer.pending);
+-	if (waitqueue_active(q))
 -	{
--		extern void gayle_init(void);
--		gayle_init();
+-		apic->vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+-		wake_up_interruptible(q);
 -	}
--#endif /* CONFIG_BLK_DEV_GAYLE */
--#ifdef CONFIG_BLK_DEV_FALCON_IDE
--	{
--		extern void falconide_init(void);
--		falconide_init();
+-	if (apic_lvtt_period(apic)) {
+-		result = 1;
+-		apic->timer.dev.expires = ktime_add_ns(
+-					apic->timer.dev.expires,
+-					apic->timer.period);
 -	}
--#endif /* CONFIG_BLK_DEV_FALCON_IDE */
--#ifdef CONFIG_BLK_DEV_MAC_IDE
--	{
--		extern void macide_init(void);
--		macide_init();
+-	return result;
+-}
+-
+-static int __inject_apic_timer_irq(struct kvm_lapic *apic)
+-{
+-	int vector;
+-
+-	vector = apic_lvt_vector(apic, APIC_LVTT);
+-	return __apic_accept_irq(apic, APIC_DM_FIXED, vector, 1, 0);
+-}
+-
+-static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
+-{
+-	struct kvm_lapic *apic;
+-	int restart_timer = 0;
+-
+-	apic = container_of(data, struct kvm_lapic, timer.dev);
+-
+-	restart_timer = __apic_timer_fn(apic);
+-
+-	if (restart_timer)
+-		return HRTIMER_RESTART;
+-	else
+-		return HRTIMER_NORESTART;
+-}
+-
+-int kvm_create_lapic(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic;
+-
+-	ASSERT(vcpu != NULL);
+-	apic_debug("apic_init %d\n", vcpu->vcpu_id);
+-
+-	apic = kzalloc(sizeof(*apic), GFP_KERNEL);
+-	if (!apic)
+-		goto nomem;
+-
+-	vcpu->apic = apic;
+-
+-	apic->regs_page = alloc_page(GFP_KERNEL);
+-	if (apic->regs_page == NULL) {
+-		printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
+-		       vcpu->vcpu_id);
+-		goto nomem;
 -	}
--#endif /* CONFIG_BLK_DEV_MAC_IDE */
--#ifdef CONFIG_BLK_DEV_Q40IDE
--	{
--		extern void q40ide_init(void);
--		q40ide_init();
+-	apic->regs = page_address(apic->regs_page);
+-	memset(apic->regs, 0, PAGE_SIZE);
+-	apic->vcpu = vcpu;
+-
+-	hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+-	apic->timer.dev.function = apic_timer_fn;
+-	apic->base_address = APIC_DEFAULT_PHYS_BASE;
+-	vcpu->apic_base = APIC_DEFAULT_PHYS_BASE;
+-
+-	kvm_lapic_reset(vcpu);
+-	apic->dev.read = apic_mmio_read;
+-	apic->dev.write = apic_mmio_write;
+-	apic->dev.in_range = apic_mmio_range;
+-	apic->dev.private = apic;
+-
+-	return 0;
+-nomem:
+-	kvm_free_apic(apic);
+-	return -ENOMEM;
+-}
+-EXPORT_SYMBOL_GPL(kvm_create_lapic);
+-
+-int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic = vcpu->apic;
+-	int highest_irr;
+-
+-	if (!apic || !apic_enabled(apic))
+-		return -1;
+-
+-	apic_update_ppr(apic);
+-	highest_irr = apic_find_highest_irr(apic);
+-	if ((highest_irr == -1) ||
+-	    ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI)))
+-		return -1;
+-	return highest_irr;
+-}
+-
+-int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
+-{
+-	u32 lvt0 = apic_get_reg(vcpu->apic, APIC_LVT0);
+-	int r = 0;
+-
+-	if (vcpu->vcpu_id == 0) {
+-		if (!apic_hw_enabled(vcpu->apic))
+-			r = 1;
+-		if ((lvt0 & APIC_LVT_MASKED) == 0 &&
+-		    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
+-			r = 1;
 -	}
--#endif /* CONFIG_BLK_DEV_Q40IDE */
--#ifdef CONFIG_BLK_DEV_BUDDHA
--	{
--		extern void buddha_init(void);
--		buddha_init();
+-	return r;
+-}
+-
+-void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic = vcpu->apic;
+-
+-	if (apic && apic_lvt_enabled(apic, APIC_LVTT) &&
+-		atomic_read(&apic->timer.pending) > 0) {
+-		if (__inject_apic_timer_irq(apic))
+-			atomic_dec(&apic->timer.pending);
 -	}
--#endif /* CONFIG_BLK_DEV_BUDDHA */
--#ifdef CONFIG_BLK_DEV_IDEPNP
--	pnpide_init();
--#endif
--#ifdef CONFIG_H8300
--	h8300_ide_init();
--#endif
 -}
 -
+-void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
+-{
+-	struct kvm_lapic *apic = vcpu->apic;
+-
+-	if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
+-		apic->timer.last_update = ktime_add_ns(
+-				apic->timer.last_update,
+-				apic->timer.period);
+-}
+-
+-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+-{
+-	int vector = kvm_apic_has_interrupt(vcpu);
+-	struct kvm_lapic *apic = vcpu->apic;
+-
+-	if (vector == -1)
+-		return -1;
+-
+-	apic_set_vector(vector, apic->regs + APIC_ISR);
+-	apic_update_ppr(apic);
+-	apic_clear_irr(vector, apic);
+-	return vector;
+-}
+-
+-void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic = vcpu->apic;
+-
+-	apic->base_address = vcpu->apic_base &
+-			     MSR_IA32_APICBASE_BASE;
+-	apic_set_reg(apic, APIC_LVR, APIC_VERSION);
+-	apic_update_ppr(apic);
+-	hrtimer_cancel(&apic->timer.dev);
+-	update_divide_count(apic);
+-	start_apic_timer(apic);
+-}
+-
+-void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_lapic *apic = vcpu->apic;
+-	struct hrtimer *timer;
+-
+-	if (!apic)
+-		return;
+-
+-	timer = &apic->timer.dev;
+-	if (hrtimer_cancel(timer))
+-		hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
+-}
+-EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer);
+diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
+deleted file mode 100644
+index feb5ac9..0000000
+--- a/drivers/kvm/mmu.c
++++ /dev/null
+@@ -1,1498 +0,0 @@
 -/*
-- * Probe module
+- * Kernel-based Virtual Machine driver for Linux
+- *
+- * This module enables machines with Intel VT-x extensions to run virtual
+- * machines without emulation or binary translation.
+- *
+- * MMU support
+- *
+- * Copyright (C) 2006 Qumranet, Inc.
+- *
+- * Authors:
+- *   Yaniv Kamay  <yaniv at qumranet.com>
+- *   Avi Kivity   <avi at qumranet.com>
+- *
+- * This work is licensed under the terms of the GNU GPL, version 2.  See
+- * the COPYING file in the top-level directory.
+- *
 - */
 -
- EXPORT_SYMBOL(ide_lock);
- 
- static int ide_bus_match(struct device *dev, struct device_driver *drv)
-@@ -1779,30 +1678,6 @@ static int __init ide_init(void)
- 
- 	proc_ide_create();
- 
--#ifdef CONFIG_BLK_DEV_ALI14XX
--	if (probe_ali14xx)
--		(void)ali14xx_init();
--#endif
--#ifdef CONFIG_BLK_DEV_UMC8672
--	if (probe_umc8672)
--		(void)umc8672_init();
--#endif
--#ifdef CONFIG_BLK_DEV_DTC2278
--	if (probe_dtc2278)
--		(void)dtc2278_init();
--#endif
--#ifdef CONFIG_BLK_DEV_HT6560B
--	if (probe_ht6560b)
--		(void)ht6560b_init();
--#endif
--#ifdef CONFIG_BLK_DEV_QD65XX
--	if (probe_qd65xx)
--		(void)qd65xx_init();
+-#include "vmx.h"
+-#include "kvm.h"
+-
+-#include <linux/types.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
+-#include <linux/highmem.h>
+-#include <linux/module.h>
+-
+-#include <asm/page.h>
+-#include <asm/cmpxchg.h>
+-
+-#undef MMU_DEBUG
+-
+-#undef AUDIT
+-
+-#ifdef AUDIT
+-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
+-#else
+-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
 -#endif
 -
--	/* Probe for special PCI and other "known" interface chipsets. */
--	probe_for_hwifs();
+-#ifdef MMU_DEBUG
+-
+-#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
+-#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
+-
+-#else
+-
+-#define pgprintk(x...) do { } while (0)
+-#define rmap_printk(x...) do { } while (0)
 -
- 	return 0;
- }
- 
-@@ -1838,10 +1713,6 @@ void __exit cleanup_module (void)
- 	for (index = 0; index < MAX_HWIFS; ++index)
- 		ide_unregister(index);
- 
--#ifdef CONFIG_BLK_DEV_IDEPNP
--	pnpide_exit();
 -#endif
 -
- 	proc_ide_destroy();
- 
- 	bus_unregister(&ide_bus_type);
-diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile
-index 4098223..7043ec7 100644
---- a/drivers/ide/legacy/Makefile
-+++ b/drivers/ide/legacy/Makefile
-@@ -1,15 +1,24 @@
- 
-+# link order is important here
-+
- obj-$(CONFIG_BLK_DEV_ALI14XX)		+= ali14xx.o
-+obj-$(CONFIG_BLK_DEV_UMC8672)		+= umc8672.o
- obj-$(CONFIG_BLK_DEV_DTC2278)		+= dtc2278.o
- obj-$(CONFIG_BLK_DEV_HT6560B)		+= ht6560b.o
- obj-$(CONFIG_BLK_DEV_QD65XX)		+= qd65xx.o
--obj-$(CONFIG_BLK_DEV_UMC8672)		+= umc8672.o
- 
--obj-$(CONFIG_BLK_DEV_IDECS)		+= ide-cs.o
-+obj-$(CONFIG_BLK_DEV_GAYLE)		+= gayle.o
-+obj-$(CONFIG_BLK_DEV_FALCON_IDE)	+= falconide.o
-+obj-$(CONFIG_BLK_DEV_MAC_IDE)		+= macide.o
-+obj-$(CONFIG_BLK_DEV_Q40IDE)		+= q40ide.o
-+obj-$(CONFIG_BLK_DEV_BUDDHA)		+= buddha.o
- 
--obj-$(CONFIG_BLK_DEV_PLATFORM)		+= ide_platform.o
-+ifeq ($(CONFIG_BLK_DEV_IDECS), m)
-+	obj-m += ide-cs.o
-+endif
- 
--# Last of all
--obj-$(CONFIG_BLK_DEV_HD)		+= hd.o
-+ifeq ($(CONFIG_BLK_DEV_PLATFORM), m)
-+	obj-m += ide_platform.o
-+endif
- 
- EXTRA_CFLAGS	:= -Idrivers/ide
-diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
-index 38c3a6d..5ec0be4 100644
---- a/drivers/ide/legacy/ali14xx.c
-+++ b/drivers/ide/legacy/ali14xx.c
-@@ -231,8 +231,7 @@ int probe_ali14xx = 0;
- module_param_named(probe, probe_ali14xx, bool, 0);
- MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
- 
--/* Can be called directly from ide.c. */
--int __init ali14xx_init(void)
-+static int __init ali14xx_init(void)
- {
- 	if (probe_ali14xx == 0)
- 		goto out;
-@@ -248,9 +247,7 @@ out:
- 	return -ENODEV;
- }
- 
--#ifdef MODULE
- module_init(ali14xx_init);
+-#if defined(MMU_DEBUG) || defined(AUDIT)
+-static int dbg = 1;
 -#endif
- 
- MODULE_AUTHOR("see local file");
- MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets");
-diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
-index 4a0be25..74d28e0 100644
---- a/drivers/ide/legacy/buddha.c
-+++ b/drivers/ide/legacy/buddha.c
-@@ -112,6 +112,7 @@ typedef enum BuddhaType_Enum {
-     BOARD_BUDDHA, BOARD_CATWEASEL, BOARD_XSURF
- } BuddhaType;
- 
-+static const char *buddha_board_name[] = { "Buddha", "Catweasel", "X-Surf" };
- 
-     /*
-      *  Check and acknowledge the interrupt status
-@@ -143,11 +144,11 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
-      *  Probe for a Buddha or Catweasel IDE interface
-      */
- 
--void __init buddha_init(void)
-+static int __init buddha_init(void)
- {
- 	hw_regs_t hw;
- 	ide_hwif_t *hwif;
--	int i, index;
-+	int i;
- 
- 	struct zorro_dev *z = NULL;
- 	u_long buddha_board = 0;
-@@ -156,6 +157,8 @@ void __init buddha_init(void)
- 
- 	while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- 		unsigned long board;
-+		u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-+
- 		if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
- 			buddha_num_hwifs = BUDDHA_NUM_HWIFS;
- 			type=BOARD_BUDDHA;
-@@ -195,7 +198,10 @@ fail_base2:
- 		/* X-Surf doesn't have this.  IRQs are always on */
- 		if (type != BOARD_XSURF)
- 			z_writeb(0, buddha_board+BUDDHA_IRQ_MR);
--		
-+
-+		printk(KERN_INFO "ide: %s IDE controller\n",
-+				 buddha_board_name[type]);
-+
- 		for(i=0;i<buddha_num_hwifs;i++) {
- 			if(type != BOARD_XSURF) {
- 				ide_setup_ports(&hw, (buddha_board+buddha_bases[i]),
-@@ -213,23 +219,23 @@ fail_base2:
- 						IRQ_AMIGA_PORTS);
- 			}	
- 
--			index = ide_register_hw(&hw, NULL, 1, &hwif);
--			if (index != -1) {
-+			hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
-+			if (hwif) {
-+				u8 index = hwif->index;
-+
-+				ide_init_port_data(hwif, index);
-+				ide_init_port_hw(hwif, &hw);
-+
- 				hwif->mmio = 1;
--				printk("ide%d: ", index);
--				switch(type) {
--				case BOARD_BUDDHA:
--					printk("Buddha");
--					break;
--				case BOARD_CATWEASEL:
--					printk("Catweasel");
--					break;
--				case BOARD_XSURF:
--					printk("X-Surf");
--					break;
--				}
--				printk(" IDE interface\n");	    
--			}		      
-+
-+				idx[i] = index;
-+			}
- 		}
-+
-+		ide_device_add(idx);
- 	}
-+
-+	return 0;
- }
-+
-+module_init(buddha_init);
-diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
-index 24a845d..13eee6d 100644
---- a/drivers/ide/legacy/dtc2278.c
-+++ b/drivers/ide/legacy/dtc2278.c
-@@ -150,8 +150,7 @@ int probe_dtc2278 = 0;
- module_param_named(probe, probe_dtc2278, bool, 0);
- MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
- 
--/* Can be called directly from ide.c. */
--int __init dtc2278_init(void)
-+static int __init dtc2278_init(void)
- {
- 	if (probe_dtc2278 == 0)
- 		return -ENODEV;
-@@ -163,9 +162,7 @@ int __init dtc2278_init(void)
- 	return 0;
- }
- 
--#ifdef MODULE
- module_init(dtc2278_init);
+-
+-#ifndef MMU_DEBUG
+-#define ASSERT(x) do { } while (0)
+-#else
+-#define ASSERT(x)							\
+-	if (!(x)) {							\
+-		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
+-		       __FILE__, __LINE__, #x);				\
+-	}
 -#endif
- 
- MODULE_AUTHOR("See Local File");
- MODULE_DESCRIPTION("support of DTC-2278 VLB IDE chipsets");
-diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
-index 7d7936f..2860956 100644
---- a/drivers/ide/legacy/falconide.c
-+++ b/drivers/ide/legacy/falconide.c
-@@ -62,19 +62,31 @@ EXPORT_SYMBOL(falconide_intr_lock);
-      *  Probe for a Falcon IDE interface
-      */
- 
--void __init falconide_init(void)
-+static int __init falconide_init(void)
- {
-     if (MACH_IS_ATARI && ATARIHW_PRESENT(IDE)) {
- 	hw_regs_t hw;
--	int index;
-+
-+	printk(KERN_INFO "ide: Falcon IDE controller\n");
- 
- 	ide_setup_ports(&hw, ATA_HD_BASE, falconide_offsets,
- 			0, 0, NULL,
- //			falconide_iops,
- 			IRQ_MFP_IDE);
--	index = ide_register_hw(&hw, NULL, 1, NULL);
- 
--	if (index != -1)
--	    printk("ide%d: Falcon IDE interface\n", index);
-+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
-+	if (hwif) {
-+		u8 index = hwif->index;
-+		u8 idx[4] = { index, 0xff, 0xff, 0xff };
-+
-+		ide_init_port_data(hwif, index);
-+		ide_init_port_hw(hwif, &hw);
-+
-+		ide_device_add(idx);
-+	}
-     }
-+
-+    return 0;
- }
-+
-+module_init(falconide_init);
-diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
-index 53331ee..492fa04 100644
---- a/drivers/ide/legacy/gayle.c
-+++ b/drivers/ide/legacy/gayle.c
-@@ -110,12 +110,13 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
-      *  Probe for a Gayle IDE interface (and optionally for an IDE doubler)
-      */
- 
--void __init gayle_init(void)
-+static int __init gayle_init(void)
- {
-     int a4000, i;
-+    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- 
-     if (!MACH_IS_AMIGA)
--	return;
-+	return -ENODEV;
- 
-     if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE))
- 	goto found;
-@@ -125,15 +126,21 @@ void __init gayle_init(void)
- 			  NULL))
- 	goto found;
- #endif
--    return;
-+    return -ENODEV;
- 
- found:
-+	printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n",
-+			 a4000 ? 4000 : 1200,
-+#ifdef CONFIG_BLK_DEV_IDEDOUBLER
-+			 ide_doubler ? ", IDE doubler" :
-+#endif
-+			 "");
-+
-     for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
- 	unsigned long base, ctrlport, irqport;
- 	ide_ack_intr_t *ack_intr;
- 	hw_regs_t hw;
- 	ide_hwif_t *hwif;
--	int index;
- 	unsigned long phys_base, res_start, res_n;
- 
- 	if (a4000) {
-@@ -165,21 +172,23 @@ found:
- //			&gayle_iops,
- 			IRQ_AMIGA_PORTS);
- 
--	index = ide_register_hw(&hw, NULL, 1, &hwif);
--	if (index != -1) {
-+	hwif = ide_find_port(base);
-+	if (hwif) {
-+	    u8 index = hwif->index;
-+
-+	    ide_init_port_data(hwif, index);
-+	    ide_init_port_hw(hwif, &hw);
-+
- 	    hwif->mmio = 1;
--	    switch (i) {
--		case 0:
--		    printk("ide%d: Gayle IDE interface (A%d style)\n", index,
--			   a4000 ? 4000 : 1200);
--		    break;
--#ifdef CONFIG_BLK_DEV_IDEDOUBLER
--		case 1:
--		    printk("ide%d: IDE doubler\n", index);
--		    break;
--#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
--	    }
-+
-+	    idx[i] = index;
- 	} else
- 	    release_mem_region(res_start, res_n);
-     }
-+
-+    ide_device_add(idx);
-+
-+    return 0;
- }
-+
-+module_init(gayle_init);
-diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
-index a4245d1..8da5031 100644
---- a/drivers/ide/legacy/ht6560b.c
-+++ b/drivers/ide/legacy/ht6560b.c
-@@ -307,8 +307,7 @@ int probe_ht6560b = 0;
- module_param_named(probe, probe_ht6560b, bool, 0);
- MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
- 
--/* Can be called directly from ide.c. */
--int __init ht6560b_init(void)
-+static int __init ht6560b_init(void)
- {
- 	ide_hwif_t *hwif, *mate;
- 	static u8 idx[4] = { 0, 1, 0xff, 0xff };
-@@ -369,9 +368,7 @@ release_region:
- 	return -ENODEV;
- }
- 
--#ifdef MODULE
- module_init(ht6560b_init);
+-
+-#define PT64_PT_BITS 9
+-#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
+-#define PT32_PT_BITS 10
+-#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
+-
+-#define PT_WRITABLE_SHIFT 1
+-
+-#define PT_PRESENT_MASK (1ULL << 0)
+-#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
+-#define PT_USER_MASK (1ULL << 2)
+-#define PT_PWT_MASK (1ULL << 3)
+-#define PT_PCD_MASK (1ULL << 4)
+-#define PT_ACCESSED_MASK (1ULL << 5)
+-#define PT_DIRTY_MASK (1ULL << 6)
+-#define PT_PAGE_SIZE_MASK (1ULL << 7)
+-#define PT_PAT_MASK (1ULL << 7)
+-#define PT_GLOBAL_MASK (1ULL << 8)
+-#define PT64_NX_MASK (1ULL << 63)
+-
+-#define PT_PAT_SHIFT 7
+-#define PT_DIR_PAT_SHIFT 12
+-#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
+-
+-#define PT32_DIR_PSE36_SIZE 4
+-#define PT32_DIR_PSE36_SHIFT 13
+-#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
+-
+-
+-#define PT_FIRST_AVAIL_BITS_SHIFT 9
+-#define PT64_SECOND_AVAIL_BITS_SHIFT 52
+-
+-#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
+-
+-#define VALID_PAGE(x) ((x) != INVALID_PAGE)
+-
+-#define PT64_LEVEL_BITS 9
+-
+-#define PT64_LEVEL_SHIFT(level) \
+-		( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
+-
+-#define PT64_LEVEL_MASK(level) \
+-		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
+-
+-#define PT64_INDEX(address, level)\
+-	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
+-
+-
+-#define PT32_LEVEL_BITS 10
+-
+-#define PT32_LEVEL_SHIFT(level) \
+-		( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
+-
+-#define PT32_LEVEL_MASK(level) \
+-		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
+-
+-#define PT32_INDEX(address, level)\
+-	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
+-
+-
+-#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+-#define PT64_DIR_BASE_ADDR_MASK \
+-	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
+-
+-#define PT32_BASE_ADDR_MASK PAGE_MASK
+-#define PT32_DIR_BASE_ADDR_MASK \
+-	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
+-
+-
+-#define PFERR_PRESENT_MASK (1U << 0)
+-#define PFERR_WRITE_MASK (1U << 1)
+-#define PFERR_USER_MASK (1U << 2)
+-#define PFERR_FETCH_MASK (1U << 4)
+-
+-#define PT64_ROOT_LEVEL 4
+-#define PT32_ROOT_LEVEL 2
+-#define PT32E_ROOT_LEVEL 3
+-
+-#define PT_DIRECTORY_LEVEL 2
+-#define PT_PAGE_TABLE_LEVEL 1
+-
+-#define RMAP_EXT 4
+-
+-struct kvm_rmap_desc {
+-	u64 *shadow_ptes[RMAP_EXT];
+-	struct kvm_rmap_desc *more;
+-};
+-
+-static struct kmem_cache *pte_chain_cache;
+-static struct kmem_cache *rmap_desc_cache;
+-static struct kmem_cache *mmu_page_header_cache;
+-
+-static int is_write_protection(struct kvm_vcpu *vcpu)
+-{
+-	return vcpu->cr0 & X86_CR0_WP;
+-}
+-
+-static int is_cpuid_PSE36(void)
+-{
+-	return 1;
+-}
+-
+-static int is_nx(struct kvm_vcpu *vcpu)
+-{
+-	return vcpu->shadow_efer & EFER_NX;
+-}
+-
+-static int is_present_pte(unsigned long pte)
+-{
+-	return pte & PT_PRESENT_MASK;
+-}
+-
+-static int is_writeble_pte(unsigned long pte)
+-{
+-	return pte & PT_WRITABLE_MASK;
+-}
+-
+-static int is_io_pte(unsigned long pte)
+-{
+-	return pte & PT_SHADOW_IO_MARK;
+-}
+-
+-static int is_rmap_pte(u64 pte)
+-{
+-	return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
+-		== (PT_WRITABLE_MASK | PT_PRESENT_MASK);
+-}
+-
+-static void set_shadow_pte(u64 *sptep, u64 spte)
+-{
+-#ifdef CONFIG_X86_64
+-	set_64bit((unsigned long *)sptep, spte);
+-#else
+-	set_64bit((unsigned long long *)sptep, spte);
 -#endif
- 
- MODULE_AUTHOR("See Local File");
- MODULE_DESCRIPTION("HT-6560B EIDE-controller support");
-diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
-index 03715c0..f4ea15b 100644
---- a/drivers/ide/legacy/ide-cs.c
-+++ b/drivers/ide/legacy/ide-cs.c
-@@ -153,7 +153,7 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq
-     hw.irq = irq;
-     hw.chipset = ide_pci;
-     hw.dev = &handle->dev;
--    return ide_register_hw(&hw, &ide_undecoded_slave, 0, NULL);
-+    return ide_register_hw(&hw, &ide_undecoded_slave, NULL);
- }
- 
- /*======================================================================
-diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
-index 7bb79f5..69a0fb0 100644
---- a/drivers/ide/legacy/ide_platform.c
-+++ b/drivers/ide/legacy/ide_platform.c
-@@ -28,39 +28,27 @@ static struct {
- 	int index;
- } hwif_prop;
- 
--static ide_hwif_t *__devinit plat_ide_locate_hwif(void __iomem *base,
--	    void __iomem *ctrl, struct pata_platform_info *pdata, int irq,
--	    int mmio)
-+static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
-+					   void __iomem *base,
-+					   void __iomem *ctrl,
-+					   struct pata_platform_info *pdata,
-+					   int irq)
- {
- 	unsigned long port = (unsigned long)base;
--	ide_hwif_t *hwif = ide_find_port(port);
- 	int i;
- 
--	if (hwif == NULL)
--		goto out;
+-}
 -
--	hwif->io_ports[IDE_DATA_OFFSET] = port;
-+	hw->io_ports[IDE_DATA_OFFSET] = port;
- 
- 	port += (1 << pdata->ioport_shift);
- 	for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET;
- 	     i++, port += (1 << pdata->ioport_shift))
--		hwif->io_ports[i] = port;
+-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+-				  struct kmem_cache *base_cache, int min)
+-{
+-	void *obj;
 -
--	hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
-+		hw->io_ports[i] = port;
- 
--	hwif->irq = irq;
-+	hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
- 
--	hwif->chipset = ide_generic;
-+	hw->irq = irq;
- 
--	if (mmio) {
--		hwif->mmio = 1;
--		default_hwif_mmiops(hwif);
+-	if (cache->nobjs >= min)
+-		return 0;
+-	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
+-		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
+-		if (!obj)
+-			return -ENOMEM;
+-		cache->objects[cache->nobjs++] = obj;
 -	}
+-	return 0;
+-}
 -
--	hwif_prop.hwif = hwif;
--	hwif_prop.index = hwif->index;
+-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+-{
+-	while (mc->nobjs)
+-		kfree(mc->objects[--mc->nobjs]);
+-}
+-
+-static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
+-				       int min)
+-{
+-	struct page *page;
+-
+-	if (cache->nobjs >= min)
+-		return 0;
+-	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
+-		page = alloc_page(GFP_KERNEL);
+-		if (!page)
+-			return -ENOMEM;
+-		set_page_private(page, 0);
+-		cache->objects[cache->nobjs++] = page_address(page);
+-	}
+-	return 0;
+-}
+-
+-static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
+-{
+-	while (mc->nobjs)
+-		free_page((unsigned long)mc->objects[--mc->nobjs]);
+-}
+-
+-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+-{
+-	int r;
+-
+-	kvm_mmu_free_some_pages(vcpu);
+-	r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
+-				   pte_chain_cache, 4);
+-	if (r)
+-		goto out;
+-	r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
+-				   rmap_desc_cache, 1);
+-	if (r)
+-		goto out;
+-	r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4);
+-	if (r)
+-		goto out;
+-	r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+-				   mmu_page_header_cache, 4);
 -out:
--	return hwif;
-+	hw->chipset = ide_generic;
- }
- 
- static int __devinit plat_ide_probe(struct platform_device *pdev)
-@@ -71,6 +59,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
- 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- 	int ret = 0;
- 	int mmio = 0;
-+	hw_regs_t hw;
- 
- 	pdata = pdev->dev.platform_data;
- 
-@@ -106,15 +95,27 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
- 			res_alt->start, res_alt->end - res_alt->start + 1);
- 	}
- 
--	hwif = plat_ide_locate_hwif(hwif_prop.plat_ide_mapbase,
--	         hwif_prop.plat_ide_alt_mapbase, pdata, res_irq->start, mmio);
+-	return r;
+-}
 -
-+	hwif = ide_find_port((unsigned long)hwif_prop.plat_ide_mapbase);
- 	if (!hwif) {
- 		ret = -ENODEV;
- 		goto out;
- 	}
--	hwif->gendev.parent = &pdev->dev;
--	hwif->noprobe = 0;
-+
-+	memset(&hw, 0, sizeof(hw));
-+	plat_ide_setup_ports(&hw, hwif_prop.plat_ide_mapbase,
-+			     hwif_prop.plat_ide_alt_mapbase,
-+			     pdata, res_irq->start);
-+	hw.dev = &pdev->dev;
-+
-+	ide_init_port_hw(hwif, &hw);
-+
-+	if (mmio) {
-+		hwif->mmio = 1;
-+		default_hwif_mmiops(hwif);
-+	}
-+
-+	hwif_prop.hwif = hwif;
-+	hwif_prop.index = hwif->index;
- 
- 	idx[0] = hwif->index;
- 
-diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
-index 5c6aa77..782d4c7 100644
---- a/drivers/ide/legacy/macide.c
-+++ b/drivers/ide/legacy/macide.c
-@@ -77,15 +77,17 @@ int macide_ack_intr(ide_hwif_t* hwif)
- 	return 0;
- }
- 
-+static const char *mac_ide_name[] =
-+	{ "Quadra", "Powerbook", "Powerbook Baboon" };
-+
- /*
-  * Probe for a Macintosh IDE interface
-  */
- 
--void __init macide_init(void)
-+static int __init macide_init(void)
- {
- 	hw_regs_t hw;
- 	ide_hwif_t *hwif;
--	int index = -1;
- 
- 	switch (macintosh_config->ide_type) {
- 	case MAC_IDE_QUADRA:
-@@ -93,48 +95,50 @@ void __init macide_init(void)
- 				0, 0, macide_ack_intr,
- //				quadra_ide_iops,
- 				IRQ_NUBUS_F);
--		index = ide_register_hw(&hw, NULL, 1, &hwif);
- 		break;
- 	case MAC_IDE_PB:
- 		ide_setup_ports(&hw, IDE_BASE, macide_offsets,
- 				0, 0, macide_ack_intr,
- //				macide_pb_iops,
- 				IRQ_NUBUS_C);
--		index = ide_register_hw(&hw, NULL, 1, &hwif);
- 		break;
- 	case MAC_IDE_BABOON:
- 		ide_setup_ports(&hw, BABOON_BASE, macide_offsets,
- 				0, 0, NULL,
- //				macide_baboon_iops,
- 				IRQ_BABOON_1);
--		index = ide_register_hw(&hw, NULL, 1, &hwif);
--		if (index == -1) break;
--		if (macintosh_config->ident == MAC_MODEL_PB190) {
-+		break;
-+	default:
-+		return -ENODEV;
-+	}
-+
-+	printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
-+			 mac_ide_name[macintosh_config->ide_type - 1]);
- 
-+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
-+	if (hwif) {
-+		u8 index = hwif->index;
-+		u8 idx[4] = { index, 0xff, 0xff, 0xff };
-+
-+		ide_init_port_data(hwif, index);
-+		ide_init_port_hw(hwif, &hw);
-+
-+		if (macintosh_config->ide_type == MAC_IDE_BABOON &&
-+		    macintosh_config->ident == MAC_MODEL_PB190) {
- 			/* Fix breakage in ide-disk.c: drive capacity	*/
- 			/* is not initialized for drives without a 	*/
- 			/* hardware ID, and we can't get that without	*/
- 			/* probing the drive which freezes a 190.	*/
+-static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+-{
+-	mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
+-	mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
+-	mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
+-	mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
+-}
 -
--			ide_drive_t *drive = &ide_hwifs[index].drives[0];
-+			ide_drive_t *drive = &hwif->drives[0];
- 			drive->capacity64 = drive->cyl*drive->head*drive->sect;
+-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
+-				    size_t size)
+-{
+-	void *p;
 -
- 		}
--		break;
+-	BUG_ON(!mc->nobjs);
+-	p = mc->objects[--mc->nobjs];
+-	memset(p, 0, size);
+-	return p;
+-}
 -
--	default:
--	    return;
+-static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
+-{
+-	return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
+-				      sizeof(struct kvm_pte_chain));
+-}
+-
+-static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
+-{
+-	kfree(pc);
+-}
+-
+-static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
+-{
+-	return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
+-				      sizeof(struct kvm_rmap_desc));
+-}
+-
+-static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
+-{
+-	kfree(rd);
+-}
+-
+-/*
+- * Reverse mapping data structures:
+- *
+- * If page->private bit zero is zero, then page->private points to the
+- * shadow page table entry that points to page_address(page).
+- *
+- * If page->private bit zero is one, (then page->private & ~1) points
+- * to a struct kvm_rmap_desc containing more mappings.
+- */
+-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
+-{
+-	struct page *page;
+-	struct kvm_rmap_desc *desc;
+-	int i;
+-
+-	if (!is_rmap_pte(*spte))
+-		return;
+-	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+-	if (!page_private(page)) {
+-		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
+-		set_page_private(page,(unsigned long)spte);
+-	} else if (!(page_private(page) & 1)) {
+-		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
+-		desc = mmu_alloc_rmap_desc(vcpu);
+-		desc->shadow_ptes[0] = (u64 *)page_private(page);
+-		desc->shadow_ptes[1] = spte;
+-		set_page_private(page,(unsigned long)desc | 1);
+-	} else {
+-		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
+-		desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
+-		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
+-			desc = desc->more;
+-		if (desc->shadow_ptes[RMAP_EXT-1]) {
+-			desc->more = mmu_alloc_rmap_desc(vcpu);
+-			desc = desc->more;
+-		}
+-		for (i = 0; desc->shadow_ptes[i]; ++i)
+-			;
+-		desc->shadow_ptes[i] = spte;
 -	}
- 
--        if (index != -1) {
- 		hwif->mmio = 1;
--		if (macintosh_config->ide_type == MAC_IDE_QUADRA)
--			printk(KERN_INFO "ide%d: Macintosh Quadra IDE interface\n", index);
--		else if (macintosh_config->ide_type == MAC_IDE_PB)
--			printk(KERN_INFO "ide%d: Macintosh Powerbook IDE interface\n", index);
--		else if (macintosh_config->ide_type == MAC_IDE_BABOON)
--			printk(KERN_INFO "ide%d: Macintosh Powerbook Baboon IDE interface\n", index);
+-}
+-
+-static void rmap_desc_remove_entry(struct page *page,
+-				   struct kvm_rmap_desc *desc,
+-				   int i,
+-				   struct kvm_rmap_desc *prev_desc)
+-{
+-	int j;
+-
+-	for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
+-		;
+-	desc->shadow_ptes[i] = desc->shadow_ptes[j];
+-	desc->shadow_ptes[j] = NULL;
+-	if (j != 0)
+-		return;
+-	if (!prev_desc && !desc->more)
+-		set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
+-	else
+-		if (prev_desc)
+-			prev_desc->more = desc->more;
 -		else
--			printk(KERN_INFO "ide%d: Unknown Macintosh IDE interface\n", index);
-+
-+		ide_device_add(idx);
- 	}
-+
-+	return 0;
- }
-+
-+module_init(macide_init);
-diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
-index 6ea46a6..f532973 100644
---- a/drivers/ide/legacy/q40ide.c
-+++ b/drivers/ide/legacy/q40ide.c
-@@ -111,15 +111,17 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
-  *  Probe for Q40 IDE interfaces
-  */
- 
--void __init q40ide_init(void)
-+static int __init q40ide_init(void)
- {
-     int i;
-     ide_hwif_t *hwif;
--    int index;
-     const char *name;
-+    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- 
-     if (!MACH_IS_Q40)
--      return ;
-+      return -ENODEV;
-+
-+    printk(KERN_INFO "ide: Q40 IDE controller\n");
- 
-     for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
- 	hw_regs_t hw;
-@@ -141,10 +143,20 @@ void __init q40ide_init(void)
- 			0, NULL,
- //			m68kide_iops,
- 			q40ide_default_irq(pcide_bases[i]));
--	index = ide_register_hw(&hw, NULL, 1, &hwif);
--	// **FIXME**
--	if (index != -1)
-+
-+	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
-+	if (hwif) {
-+		ide_init_port_data(hwif, hwif->index);
-+		ide_init_port_hw(hwif, &hw);
- 		hwif->mmio = 1;
-+
-+		idx[i] = hwif->index;
-+	}
-     }
-+
-+    ide_device_add(idx);
-+
-+    return 0;
- }
- 
-+module_init(q40ide_init);
-diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
-index 912e738..2bac4c1 100644
---- a/drivers/ide/legacy/qd65xx.c
-+++ b/drivers/ide/legacy/qd65xx.c
-@@ -478,8 +478,7 @@ int probe_qd65xx = 0;
- module_param_named(probe, probe_qd65xx, bool, 0);
- MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
- 
--/* Can be called directly from ide.c. */
--int __init qd65xx_init(void)
-+static int __init qd65xx_init(void)
- {
- 	if (probe_qd65xx == 0)
- 		return -ENODEV;
-@@ -492,9 +491,7 @@ int __init qd65xx_init(void)
- 	return 0;
- }
- 
--#ifdef MODULE
- module_init(qd65xx_init);
--#endif
- 
- MODULE_AUTHOR("Samuel Thibault");
- MODULE_DESCRIPTION("support of qd65xx vlb ide chipset");
-diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
-index 79577b9..a1ae1ae 100644
---- a/drivers/ide/legacy/umc8672.c
-+++ b/drivers/ide/legacy/umc8672.c
-@@ -169,8 +169,7 @@ int probe_umc8672 = 0;
- module_param_named(probe, probe_umc8672, bool, 0);
- MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
- 
--/* Can be called directly from ide.c. */
--int __init umc8672_init(void)
-+static int __init umc8672_init(void)
- {
- 	if (probe_umc8672 == 0)
- 		goto out;
-@@ -181,9 +180,7 @@ out:
- 	return -ENODEV;;
- }
- 
--#ifdef MODULE
- module_init(umc8672_init);
+-			set_page_private(page,(unsigned long)desc->more | 1);
+-	mmu_free_rmap_desc(desc);
+-}
+-
+-static void rmap_remove(u64 *spte)
+-{
+-	struct page *page;
+-	struct kvm_rmap_desc *desc;
+-	struct kvm_rmap_desc *prev_desc;
+-	int i;
+-
+-	if (!is_rmap_pte(*spte))
+-		return;
+-	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+-	if (!page_private(page)) {
+-		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
+-		BUG();
+-	} else if (!(page_private(page) & 1)) {
+-		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
+-		if ((u64 *)page_private(page) != spte) {
+-			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
+-			       spte, *spte);
+-			BUG();
+-		}
+-		set_page_private(page,0);
+-	} else {
+-		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
+-		desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
+-		prev_desc = NULL;
+-		while (desc) {
+-			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
+-				if (desc->shadow_ptes[i] == spte) {
+-					rmap_desc_remove_entry(page,
+-							       desc, i,
+-							       prev_desc);
+-					return;
+-				}
+-			prev_desc = desc;
+-			desc = desc->more;
+-		}
+-		BUG();
+-	}
+-}
+-
+-static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+-{
+-	struct kvm *kvm = vcpu->kvm;
+-	struct page *page;
+-	struct kvm_rmap_desc *desc;
+-	u64 *spte;
+-
+-	page = gfn_to_page(kvm, gfn);
+-	BUG_ON(!page);
+-
+-	while (page_private(page)) {
+-		if (!(page_private(page) & 1))
+-			spte = (u64 *)page_private(page);
+-		else {
+-			desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
+-			spte = desc->shadow_ptes[0];
+-		}
+-		BUG_ON(!spte);
+-		BUG_ON((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT
+-		       != page_to_pfn(page));
+-		BUG_ON(!(*spte & PT_PRESENT_MASK));
+-		BUG_ON(!(*spte & PT_WRITABLE_MASK));
+-		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
+-		rmap_remove(spte);
+-		set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+-		kvm_flush_remote_tlbs(vcpu->kvm);
+-	}
+-}
+-
+-#ifdef MMU_DEBUG
+-static int is_empty_shadow_page(u64 *spt)
+-{
+-	u64 *pos;
+-	u64 *end;
+-
+-	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
+-		if (*pos != 0) {
+-			printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
+-			       pos, *pos);
+-			return 0;
+-		}
+-	return 1;
+-}
 -#endif
- 
- MODULE_AUTHOR("Wolfram Podien");
- MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset");
-diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
-index a4ce3ba..2d3e511 100644
---- a/drivers/ide/mips/au1xxx-ide.c
-+++ b/drivers/ide/mips/au1xxx-ide.c
-@@ -198,8 +198,6 @@ static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 
- 		break;
- #endif
--	default:
+-
+-static void kvm_mmu_free_page(struct kvm *kvm,
+-			      struct kvm_mmu_page *page_head)
+-{
+-	ASSERT(is_empty_shadow_page(page_head->spt));
+-	list_del(&page_head->link);
+-	__free_page(virt_to_page(page_head->spt));
+-	kfree(page_head);
+-	++kvm->n_free_mmu_pages;
+-}
+-
+-static unsigned kvm_page_table_hashfn(gfn_t gfn)
+-{
+-	return gfn;
+-}
+-
+-static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
+-					       u64 *parent_pte)
+-{
+-	struct kvm_mmu_page *page;
+-
+-	if (!vcpu->kvm->n_free_mmu_pages)
+-		return NULL;
+-
+-	page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
+-				      sizeof *page);
+-	page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+-	set_page_private(virt_to_page(page->spt), (unsigned long)page);
+-	list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+-	ASSERT(is_empty_shadow_page(page->spt));
+-	page->slot_bitmap = 0;
+-	page->multimapped = 0;
+-	page->parent_pte = parent_pte;
+-	--vcpu->kvm->n_free_mmu_pages;
+-	return page;
+-}
+-
+-static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
+-				    struct kvm_mmu_page *page, u64 *parent_pte)
+-{
+-	struct kvm_pte_chain *pte_chain;
+-	struct hlist_node *node;
+-	int i;
+-
+-	if (!parent_pte)
 -		return;
- 	}
- 
- 	au_writel(mem_sttime,MEM_STTIME2);
-@@ -397,26 +395,10 @@ static int auide_dma_test_irq(ide_drive_t *drive)
- 	return 0;
- }
- 
--static void auide_dma_host_on(ide_drive_t *drive)
+-	if (!page->multimapped) {
+-		u64 *old = page->parent_pte;
+-
+-		if (!old) {
+-			page->parent_pte = parent_pte;
+-			return;
+-		}
+-		page->multimapped = 1;
+-		pte_chain = mmu_alloc_pte_chain(vcpu);
+-		INIT_HLIST_HEAD(&page->parent_ptes);
+-		hlist_add_head(&pte_chain->link, &page->parent_ptes);
+-		pte_chain->parent_ptes[0] = old;
+-	}
+-	hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
+-		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
+-			continue;
+-		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
+-			if (!pte_chain->parent_ptes[i]) {
+-				pte_chain->parent_ptes[i] = parent_pte;
+-				return;
+-			}
+-	}
+-	pte_chain = mmu_alloc_pte_chain(vcpu);
+-	BUG_ON(!pte_chain);
+-	hlist_add_head(&pte_chain->link, &page->parent_ptes);
+-	pte_chain->parent_ptes[0] = parent_pte;
+-}
+-
+-static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
+-				       u64 *parent_pte)
+-{
+-	struct kvm_pte_chain *pte_chain;
+-	struct hlist_node *node;
+-	int i;
+-
+-	if (!page->multimapped) {
+-		BUG_ON(page->parent_pte != parent_pte);
+-		page->parent_pte = NULL;
+-		return;
+-	}
+-	hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
+-		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
+-			if (!pte_chain->parent_ptes[i])
+-				break;
+-			if (pte_chain->parent_ptes[i] != parent_pte)
+-				continue;
+-			while (i + 1 < NR_PTE_CHAIN_ENTRIES
+-				&& pte_chain->parent_ptes[i + 1]) {
+-				pte_chain->parent_ptes[i]
+-					= pte_chain->parent_ptes[i + 1];
+-				++i;
+-			}
+-			pte_chain->parent_ptes[i] = NULL;
+-			if (i == 0) {
+-				hlist_del(&pte_chain->link);
+-				mmu_free_pte_chain(pte_chain);
+-				if (hlist_empty(&page->parent_ptes)) {
+-					page->multimapped = 0;
+-					page->parent_pte = NULL;
+-				}
+-			}
+-			return;
+-		}
+-	BUG();
+-}
+-
+-static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
+-						gfn_t gfn)
+-{
+-	unsigned index;
+-	struct hlist_head *bucket;
+-	struct kvm_mmu_page *page;
+-	struct hlist_node *node;
+-
+-	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
+-	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+-	bucket = &vcpu->kvm->mmu_page_hash[index];
+-	hlist_for_each_entry(page, node, bucket, hash_link)
+-		if (page->gfn == gfn && !page->role.metaphysical) {
+-			pgprintk("%s: found role %x\n",
+-				 __FUNCTION__, page->role.word);
+-			return page;
+-		}
+-	return NULL;
+-}
+-
+-static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
+-					     gfn_t gfn,
+-					     gva_t gaddr,
+-					     unsigned level,
+-					     int metaphysical,
+-					     unsigned hugepage_access,
+-					     u64 *parent_pte)
+-{
+-	union kvm_mmu_page_role role;
+-	unsigned index;
+-	unsigned quadrant;
+-	struct hlist_head *bucket;
+-	struct kvm_mmu_page *page;
+-	struct hlist_node *node;
+-
+-	role.word = 0;
+-	role.glevels = vcpu->mmu.root_level;
+-	role.level = level;
+-	role.metaphysical = metaphysical;
+-	role.hugepage_access = hugepage_access;
+-	if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
+-		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
+-		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
+-		role.quadrant = quadrant;
+-	}
+-	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
+-		 gfn, role.word);
+-	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+-	bucket = &vcpu->kvm->mmu_page_hash[index];
+-	hlist_for_each_entry(page, node, bucket, hash_link)
+-		if (page->gfn == gfn && page->role.word == role.word) {
+-			mmu_page_add_parent_pte(vcpu, page, parent_pte);
+-			pgprintk("%s: found\n", __FUNCTION__);
+-			return page;
+-		}
+-	page = kvm_mmu_alloc_page(vcpu, parent_pte);
+-	if (!page)
+-		return page;
+-	pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
+-	page->gfn = gfn;
+-	page->role = role;
+-	hlist_add_head(&page->hash_link, bucket);
+-	if (!metaphysical)
+-		rmap_write_protect(vcpu, gfn);
+-	return page;
+-}
+-
+-static void kvm_mmu_page_unlink_children(struct kvm *kvm,
+-					 struct kvm_mmu_page *page)
+-{
+-	unsigned i;
+-	u64 *pt;
+-	u64 ent;
+-
+-	pt = page->spt;
+-
+-	if (page->role.level == PT_PAGE_TABLE_LEVEL) {
+-		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+-			if (pt[i] & PT_PRESENT_MASK)
+-				rmap_remove(&pt[i]);
+-			pt[i] = 0;
+-		}
+-		kvm_flush_remote_tlbs(kvm);
+-		return;
+-	}
+-
+-	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+-		ent = pt[i];
+-
+-		pt[i] = 0;
+-		if (!(ent & PT_PRESENT_MASK))
+-			continue;
+-		ent &= PT64_BASE_ADDR_MASK;
+-		mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
+-	}
+-	kvm_flush_remote_tlbs(kvm);
+-}
+-
+-static void kvm_mmu_put_page(struct kvm_mmu_page *page,
+-			     u64 *parent_pte)
+-{
+-	mmu_page_remove_parent_pte(page, parent_pte);
+-}
+-
+-static void kvm_mmu_zap_page(struct kvm *kvm,
+-			     struct kvm_mmu_page *page)
+-{
+-	u64 *parent_pte;
+-
+-	while (page->multimapped || page->parent_pte) {
+-		if (!page->multimapped)
+-			parent_pte = page->parent_pte;
+-		else {
+-			struct kvm_pte_chain *chain;
+-
+-			chain = container_of(page->parent_ptes.first,
+-					     struct kvm_pte_chain, link);
+-			parent_pte = chain->parent_ptes[0];
+-		}
+-		BUG_ON(!parent_pte);
+-		kvm_mmu_put_page(page, parent_pte);
+-		set_shadow_pte(parent_pte, 0);
+-	}
+-	kvm_mmu_page_unlink_children(kvm, page);
+-	if (!page->root_count) {
+-		hlist_del(&page->hash_link);
+-		kvm_mmu_free_page(kvm, page);
+-	} else
+-		list_move(&page->link, &kvm->active_mmu_pages);
+-}
+-
+-static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+-{
+-	unsigned index;
+-	struct hlist_head *bucket;
+-	struct kvm_mmu_page *page;
+-	struct hlist_node *node, *n;
+-	int r;
+-
+-	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
+-	r = 0;
+-	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+-	bucket = &vcpu->kvm->mmu_page_hash[index];
+-	hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
+-		if (page->gfn == gfn && !page->role.metaphysical) {
+-			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
+-				 page->role.word);
+-			kvm_mmu_zap_page(vcpu->kvm, page);
+-			r = 1;
+-		}
+-	return r;
+-}
+-
+-static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
 -{
+-	struct kvm_mmu_page *page;
+-
+-	while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+-		pgprintk("%s: zap %lx %x\n",
+-			 __FUNCTION__, gfn, page->role.word);
+-		kvm_mmu_zap_page(vcpu->kvm, page);
+-	}
 -}
 -
--static int auide_dma_on(ide_drive_t *drive)
+-static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
 -{
--	drive->using_dma = 1;
+-	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
+-	struct kvm_mmu_page *page_head = page_header(__pa(pte));
 -
--	return 0;
+-	__set_bit(slot, &page_head->slot_bitmap);
 -}
 -
--static void auide_dma_host_off(ide_drive_t *drive)
-+static void auide_dma_host_set(ide_drive_t *drive, int on)
- {
- }
- 
--static void auide_dma_off_quietly(ide_drive_t *drive)
+-hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 -{
--	drive->using_dma = 0;
+-	hpa_t hpa = gpa_to_hpa(vcpu, gpa);
+-
+-	return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
 -}
 -
- static void auide_dma_lost_irq(ide_drive_t *drive)
- {
- 	printk(KERN_ERR "%s: IRQ lost\n", drive->name);
-@@ -643,12 +625,13 @@ static int au_ide_probe(struct device *dev)
- 	/* FIXME:  This might possibly break PCMCIA IDE devices */
- 
- 	hwif                            = &ide_hwifs[pdev->id];
--	hwif->irq			= ahwif->irq;
--	hwif->chipset                   = ide_au1xxx;
- 
- 	memset(&hw, 0, sizeof(hw));
- 	auide_setup_ports(&hw, ahwif);
--	memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
-+	hw.irq = ahwif->irq;
-+	hw.chipset = ide_au1xxx;
-+
-+	ide_init_port_hw(hwif, &hw);
- 
- 	hwif->ultra_mask                = 0x0;  /* Disable Ultra DMA */
- #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
-@@ -662,7 +645,6 @@ static int au_ide_probe(struct device *dev)
- 	hwif->pio_mask = ATA_PIO4;
- 	hwif->host_flags = IDE_HFLAG_POST_SET_MODE;
- 
--	hwif->noprobe = 0;
- 	hwif->drives[0].unmask          = 1;
- 	hwif->drives[1].unmask          = 1;
- 
-@@ -684,29 +666,25 @@ static int au_ide_probe(struct device *dev)
- 	hwif->set_dma_mode		= &auide_set_dma_mode;
- 
- #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
--	hwif->dma_off_quietly		= &auide_dma_off_quietly;
- 	hwif->dma_timeout		= &auide_dma_timeout;
- 
- 	hwif->mdma_filter		= &auide_mdma_filter;
- 
-+	hwif->dma_host_set		= &auide_dma_host_set;
- 	hwif->dma_exec_cmd              = &auide_dma_exec_cmd;
- 	hwif->dma_start                 = &auide_dma_start;
- 	hwif->ide_dma_end               = &auide_dma_end;
- 	hwif->dma_setup                 = &auide_dma_setup;
- 	hwif->ide_dma_test_irq          = &auide_dma_test_irq;
--	hwif->dma_host_off		= &auide_dma_host_off;
--	hwif->dma_host_on		= &auide_dma_host_on;
- 	hwif->dma_lost_irq		= &auide_dma_lost_irq;
--	hwif->ide_dma_on                = &auide_dma_on;
--#else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
-+#endif
- 	hwif->channel                   = 0;
--	hwif->hold                      = 1;
- 	hwif->select_data               = 0;    /* no chipset-specific code */
- 	hwif->config_data               = 0;    /* no chipset-specific code */
- 
- 	hwif->drives[0].autotune        = 1;    /* 1=autotune, 2=noautotune, 0=default */
- 	hwif->drives[1].autotune	= 1;
--#endif
-+
- 	hwif->drives[0].no_io_32bit	= 1;
- 	hwif->drives[1].no_io_32bit	= 1;
- 
-diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
-index 521edd4..8b3959d 100644
---- a/drivers/ide/mips/swarm.c
-+++ b/drivers/ide/mips/swarm.c
-@@ -117,6 +117,7 @@ static int __devinit swarm_ide_probe(struct device *dev)
- 	default_hwif_mmiops(hwif);
- 	/* Prevent resource map manipulation.  */
- 	hwif->mmio = 1;
-+	hwif->chipset = ide_generic;
- 	hwif->noprobe = 0;
- 
- 	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
-diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
-index 95d1ea8..9480325 100644
---- a/drivers/ide/pci/Makefile
-+++ b/drivers/ide/pci/Makefile
-@@ -36,4 +36,8 @@ obj-$(CONFIG_BLK_DEV_VIA82CXXX)		+= via82cxxx.o
- # Must appear at the end of the block
- obj-$(CONFIG_BLK_DEV_GENERIC)          += generic.o
- 
-+ifeq ($(CONFIG_BLK_DEV_CMD640), m)
-+	obj-m += cmd640.o
-+endif
-+
- EXTRA_CFLAGS	:= -Idrivers/ide
-diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
-index 4426850..7f4d185 100644
---- a/drivers/ide/pci/aec62xx.c
-+++ b/drivers/ide/pci/aec62xx.c
-@@ -202,6 +202,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
- 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
- 		.host_flags	= IDE_HFLAG_SERIALIZE |
- 				  IDE_HFLAG_NO_ATAPI_DMA |
-+				  IDE_HFLAG_ABUSE_SET_DMA_MODE |
- 				  IDE_HFLAG_OFF_BOARD,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
-@@ -211,6 +212,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
- 		.init_chipset	= init_chipset_aec62xx,
- 		.init_hwif	= init_hwif_aec62xx,
- 		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
-+				  IDE_HFLAG_ABUSE_SET_DMA_MODE |
- 				  IDE_HFLAG_OFF_BOARD,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
-@@ -220,7 +222,8 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
- 		.init_chipset	= init_chipset_aec62xx,
- 		.init_hwif	= init_hwif_aec62xx,
- 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA,
-+		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
-+				  IDE_HFLAG_ABUSE_SET_DMA_MODE,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= ATA_UDMA4,
-@@ -228,7 +231,9 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
- 		.name		= "AEC6280",
- 		.init_chipset	= init_chipset_aec62xx,
- 		.init_hwif	= init_hwif_aec62xx,
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
-+				  IDE_HFLAG_ABUSE_SET_DMA_MODE |
-+				  IDE_HFLAG_OFF_BOARD,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= ATA_UDMA5,
-@@ -237,7 +242,9 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
- 		.init_chipset	= init_chipset_aec62xx,
- 		.init_hwif	= init_hwif_aec62xx,
- 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
-+				  IDE_HFLAG_ABUSE_SET_DMA_MODE |
-+				  IDE_HFLAG_OFF_BOARD,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= ATA_UDMA5,
-diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
-index ce29393..49aa82e 100644
---- a/drivers/ide/pci/alim15x3.c
-+++ b/drivers/ide/pci/alim15x3.c
-@@ -402,9 +402,6 @@ static void ali_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 	u8 tmpbyte		= 0x00;
- 	int m5229_udma		= (hwif->channel) ? 0x57 : 0x56;
- 
--	if (speed < XFER_PIO_0)
--		return;
+-hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+-{
+-	struct page *page;
 -
- 	if (speed == XFER_UDMA_6)
- 		speed1 = 0x47;
- 
-diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
-index 8d4125e..cee51fd 100644
---- a/drivers/ide/pci/amd74xx.c
-+++ b/drivers/ide/pci/amd74xx.c
-@@ -266,6 +266,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
- #define IDE_HFLAGS_AMD \
- 	(IDE_HFLAG_PIO_NO_BLACKLIST | \
- 	 IDE_HFLAG_PIO_NO_DOWNGRADE | \
-+	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
- 	 IDE_HFLAG_POST_SET_MODE | \
- 	 IDE_HFLAG_IO_32BIT | \
- 	 IDE_HFLAG_UNMASK_IRQS | \
-diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
-index ef8e016..4918719 100644
---- a/drivers/ide/pci/atiixp.c
-+++ b/drivers/ide/pci/atiixp.c
-@@ -1,5 +1,5 @@
- /*
-- *  linux/drivers/ide/pci/atiixp.c	Version 0.03	Aug 3 2007
-+ *  linux/drivers/ide/pci/atiixp.c	Version 0.05	Nov 9 2007
-  *
-  *  Copyright (C) 2003 ATI Inc. <hyu at ati.com>
-  *  Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz
-@@ -43,47 +43,8 @@ static atiixp_ide_timing mdma_timing[] = {
- 	{ 0x02, 0x00 },
- };
- 
--static int save_mdma_mode[4];
+-	ASSERT((gpa & HPA_ERR_MASK) == 0);
+-	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+-	if (!page)
+-		return gpa | HPA_ERR_MASK;
+-	return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
+-		| (gpa & (PAGE_SIZE-1));
+-}
 -
- static DEFINE_SPINLOCK(atiixp_lock);
- 
--static void atiixp_dma_host_on(ide_drive_t *drive)
+-hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
 -{
--	struct pci_dev *dev = drive->hwif->pci_dev;
--	unsigned long flags;
--	u16 tmp16;
+-	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
 -
--	spin_lock_irqsave(&atiixp_lock, flags);
+-	if (gpa == UNMAPPED_GVA)
+-		return UNMAPPED_GVA;
+-	return gpa_to_hpa(vcpu, gpa);
+-}
 -
--	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
--	if (save_mdma_mode[drive->dn])
--		tmp16 &= ~(1 << drive->dn);
--	else
--		tmp16 |= (1 << drive->dn);
--	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
+-{
+-	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
 -
--	spin_unlock_irqrestore(&atiixp_lock, flags);
+-	if (gpa == UNMAPPED_GVA)
+-		return NULL;
+-	return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);
+-}
 -
--	ide_dma_host_on(drive);
+-static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
+-{
 -}
 -
--static void atiixp_dma_host_off(ide_drive_t *drive)
+-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
 -{
--	struct pci_dev *dev = drive->hwif->pci_dev;
--	unsigned long flags;
--	u16 tmp16;
+-	int level = PT32E_ROOT_LEVEL;
+-	hpa_t table_addr = vcpu->mmu.root_hpa;
 -
--	spin_lock_irqsave(&atiixp_lock, flags);
+-	for (; ; level--) {
+-		u32 index = PT64_INDEX(v, level);
+-		u64 *table;
+-		u64 pte;
 -
--	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
--	tmp16 &= ~(1 << drive->dn);
--	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+-		ASSERT(VALID_PAGE(table_addr));
+-		table = __va(table_addr);
 -
--	spin_unlock_irqrestore(&atiixp_lock, flags);
+-		if (level == 1) {
+-			pte = table[index];
+-			if (is_present_pte(pte) && is_writeble_pte(pte))
+-				return 0;
+-			mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
+-			page_header_update_slot(vcpu->kvm, table, v);
+-			table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
+-								PT_USER_MASK;
+-			rmap_add(vcpu, &table[index]);
+-			return 0;
+-		}
 -
--	ide_dma_host_off(drive);
+-		if (table[index] == 0) {
+-			struct kvm_mmu_page *new_table;
+-			gfn_t pseudo_gfn;
+-
+-			pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
+-				>> PAGE_SHIFT;
+-			new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
+-						     v, level - 1,
+-						     1, 0, &table[index]);
+-			if (!new_table) {
+-				pgprintk("nonpaging_map: ENOMEM\n");
+-				return -ENOMEM;
+-			}
+-
+-			table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
+-				| PT_WRITABLE_MASK | PT_USER_MASK;
+-		}
+-		table_addr = table[index] & PT64_BASE_ADDR_MASK;
+-	}
 -}
 -
- /**
-  *	atiixp_set_pio_mode	-	set host controller for PIO mode
-  *	@drive: drive
-@@ -132,29 +93,33 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 	int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8;
- 	u32 tmp32;
- 	u16 tmp16;
+-static void mmu_free_roots(struct kvm_vcpu *vcpu)
+-{
+-	int i;
+-	struct kvm_mmu_page *page;
 -
--	if (speed < XFER_MW_DMA_0)
+-	if (!VALID_PAGE(vcpu->mmu.root_hpa))
 -		return;
-+	u16 udma_ctl = 0;
- 
- 	spin_lock_irqsave(&atiixp_lock, flags);
- 
--	save_mdma_mode[drive->dn] = 0;
-+	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &udma_ctl);
-+
- 	if (speed >= XFER_UDMA_0) {
- 		pci_read_config_word(dev, ATIIXP_IDE_UDMA_MODE, &tmp16);
- 		tmp16 &= ~(0x07 << (drive->dn * 4));
- 		tmp16 |= ((speed & 0x07) << (drive->dn * 4));
- 		pci_write_config_word(dev, ATIIXP_IDE_UDMA_MODE, tmp16);
--	} else {
--		if ((speed >= XFER_MW_DMA_0) && (speed <= XFER_MW_DMA_2)) {
--			save_mdma_mode[drive->dn] = speed;
--			pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32);
--			tmp32 &= ~(0xff << timing_shift);
--			tmp32 |= (mdma_timing[speed & 0x03].recover_width << timing_shift) |
--				(mdma_timing[speed & 0x03].command_width << (timing_shift + 4));
--			pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32);
+-#ifdef CONFIG_X86_64
+-	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+-		hpa_t root = vcpu->mmu.root_hpa;
+-
+-		page = page_header(root);
+-		--page->root_count;
+-		vcpu->mmu.root_hpa = INVALID_PAGE;
+-		return;
+-	}
+-#endif
+-	for (i = 0; i < 4; ++i) {
+-		hpa_t root = vcpu->mmu.pae_root[i];
+-
+-		if (root) {
+-			root &= PT64_BASE_ADDR_MASK;
+-			page = page_header(root);
+-			--page->root_count;
 -		}
-+
-+		udma_ctl |= (1 << drive->dn);
-+	} else if (speed >= XFER_MW_DMA_0) {
-+		u8 i = speed & 0x03;
-+
-+		pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32);
-+		tmp32 &= ~(0xff << timing_shift);
-+		tmp32 |= (mdma_timing[i].recover_width << timing_shift) |
-+			 (mdma_timing[i].command_width << (timing_shift + 4));
-+		pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32);
-+
-+		udma_ctl &= ~(1 << drive->dn);
- 	}
- 
-+	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, udma_ctl);
-+
- 	spin_unlock_irqrestore(&atiixp_lock, flags);
- }
- 
-@@ -184,9 +149,6 @@ static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
- 		hwif->cbl = ATA_CBL_PATA80;
- 	else
- 		hwif->cbl = ATA_CBL_PATA40;
+-		vcpu->mmu.pae_root[i] = INVALID_PAGE;
+-	}
+-	vcpu->mmu.root_hpa = INVALID_PAGE;
+-}
 -
--	hwif->dma_host_on = &atiixp_dma_host_on;
--	hwif->dma_host_off = &atiixp_dma_host_off;
- }
- 
- static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
-diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
-index 4aa4810..da3565e 100644
---- a/drivers/ide/pci/cmd640.c
-+++ b/drivers/ide/pci/cmd640.c
-@@ -706,9 +706,9 @@ static int pci_conf2(void)
- }
- 
- /*
-- * Probe for a cmd640 chipset, and initialize it if found.  Called from ide.c
-+ * Probe for a cmd640 chipset, and initialize it if found.
-  */
--int __init ide_probe_for_cmd640x (void)
-+static int __init cmd640x_init(void)
- {
- #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- 	int second_port_toggled = 0;
-@@ -717,6 +717,7 @@ int __init ide_probe_for_cmd640x (void)
- 	const char *bus_type, *port2;
- 	unsigned int index;
- 	u8 b, cfr;
-+	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- 
- 	if (cmd640_vlb && probe_for_cmd640_vlb()) {
- 		bus_type = "VLB";
-@@ -769,6 +770,8 @@ int __init ide_probe_for_cmd640x (void)
- 	cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode;
- #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
- 
-+	idx[0] = cmd_hwif0->index;
-+
- 	/*
- 	 * Ensure compatibility by always using the slowest timings
- 	 * for access to the drive's command register block,
-@@ -826,6 +829,8 @@ int __init ide_probe_for_cmd640x (void)
- 		cmd_hwif1->pio_mask = ATA_PIO5;
- 		cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode;
- #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-+
-+		idx[1] = cmd_hwif1->index;
- 	}
- 	printk(KERN_INFO "%s: %sserialized, secondary interface %s\n", cmd_hwif1->name,
- 		cmd_hwif0->serialized ? "" : "not ", port2);
-@@ -872,6 +877,13 @@ int __init ide_probe_for_cmd640x (void)
- #ifdef CMD640_DUMP_REGS
- 	cmd640_dump_regs();
- #endif
-+
-+	ide_device_add(idx);
-+
- 	return 1;
- }
- 
-+module_param_named(probe_vlb, cmd640_vlb, bool, 0);
-+MODULE_PARM_DESC(probe_vlb, "probe for VLB version of CMD640 chipset");
-+
-+module_init(cmd640x_init);
-diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
-index bc55333..cd4eb9d 100644
---- a/drivers/ide/pci/cmd64x.c
-+++ b/drivers/ide/pci/cmd64x.c
-@@ -1,5 +1,5 @@
- /*
-- * linux/drivers/ide/pci/cmd64x.c		Version 1.52	Dec 24, 2007
-+ * linux/drivers/ide/pci/cmd64x.c		Version 1.53	Dec 24, 2007
-  *
-  * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
-  *           Due to massive hardware bugs, UltraDMA is only supported
-@@ -22,8 +22,6 @@
- 
- #include <asm/io.h>
- 
--#define DISPLAY_CMD64X_TIMINGS
+-static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
+-{
+-	int i;
+-	gfn_t root_gfn;
+-	struct kvm_mmu_page *page;
 -
- #define CMD_DEBUG 0
- 
- #if CMD_DEBUG
-@@ -37,11 +35,6 @@
-  */
- #define CFR		0x50
- #define   CFR_INTR_CH0		0x04
--#define CNTRL		0x51
--#define   CNTRL_ENA_1ST 	0x04
--#define   CNTRL_ENA_2ND 	0x08
--#define   CNTRL_DIS_RA0 	0x40
--#define   CNTRL_DIS_RA1 	0x80
- 
- #define	CMDTIM		0x52
- #define	ARTTIM0		0x53
-@@ -60,108 +53,13 @@
- #define MRDMODE		0x71
- #define   MRDMODE_INTR_CH0	0x04
- #define   MRDMODE_INTR_CH1	0x08
--#define   MRDMODE_BLK_CH0	0x10
--#define   MRDMODE_BLK_CH1	0x20
--#define BMIDESR0	0x72
- #define UDIDETCR0	0x73
- #define DTPR0		0x74
- #define BMIDECR1	0x78
- #define BMIDECSR	0x79
--#define BMIDESR1	0x7A
- #define UDIDETCR1	0x7B
- #define DTPR1		0x7C
- 
--#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
--#include <linux/stat.h>
--#include <linux/proc_fs.h>
+-	root_gfn = vcpu->cr3 >> PAGE_SHIFT;
 -
--static u8 cmd64x_proc = 0;
+-#ifdef CONFIG_X86_64
+-	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+-		hpa_t root = vcpu->mmu.root_hpa;
 -
--#define CMD_MAX_DEVS		5
+-		ASSERT(!VALID_PAGE(root));
+-		page = kvm_mmu_get_page(vcpu, root_gfn, 0,
+-					PT64_ROOT_LEVEL, 0, 0, NULL);
+-		root = __pa(page->spt);
+-		++page->root_count;
+-		vcpu->mmu.root_hpa = root;
+-		return;
+-	}
+-#endif
+-	for (i = 0; i < 4; ++i) {
+-		hpa_t root = vcpu->mmu.pae_root[i];
 -
--static struct pci_dev *cmd_devs[CMD_MAX_DEVS];
--static int n_cmd_devs;
+-		ASSERT(!VALID_PAGE(root));
+-		if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
+-			if (!is_present_pte(vcpu->pdptrs[i])) {
+-				vcpu->mmu.pae_root[i] = 0;
+-				continue;
+-			}
+-			root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
+-		} else if (vcpu->mmu.root_level == 0)
+-			root_gfn = 0;
+-		page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
+-					PT32_ROOT_LEVEL, !is_paging(vcpu),
+-					0, NULL);
+-		root = __pa(page->spt);
+-		++page->root_count;
+-		vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
+-	}
+-	vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
+-}
 -
--static char * print_cmd64x_get_info (char *buf, struct pci_dev *dev, int index)
+-static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 -{
--	char *p = buf;
--	u8 reg72 = 0, reg73 = 0;			/* primary */
--	u8 reg7a = 0, reg7b = 0;			/* secondary */
--	u8 reg50 = 1, reg51 = 1, reg57 = 0, reg71 = 0;	/* extra */
+-	return vaddr;
+-}
 -
--	p += sprintf(p, "\nController: %d\n", index);
--	p += sprintf(p, "PCI-%x Chipset.\n", dev->device);
+-static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+-			       u32 error_code)
+-{
+-	gpa_t addr = gva;
+-	hpa_t paddr;
+-	int r;
 -
--	(void) pci_read_config_byte(dev, CFR,       &reg50);
--	(void) pci_read_config_byte(dev, CNTRL,     &reg51);
--	(void) pci_read_config_byte(dev, ARTTIM23,  &reg57);
--	(void) pci_read_config_byte(dev, MRDMODE,   &reg71);
--	(void) pci_read_config_byte(dev, BMIDESR0,  &reg72);
--	(void) pci_read_config_byte(dev, UDIDETCR0, &reg73);
--	(void) pci_read_config_byte(dev, BMIDESR1,  &reg7a);
--	(void) pci_read_config_byte(dev, UDIDETCR1, &reg7b);
+-	r = mmu_topup_memory_caches(vcpu);
+-	if (r)
+-		return r;
 -
--	/* PCI0643/6 originally didn't have the primary channel enable bit */
--	if ((dev->device == PCI_DEVICE_ID_CMD_643) ||
--	    (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 3))
--		reg51 |= CNTRL_ENA_1ST;
+-	ASSERT(vcpu);
+-	ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
 -
--	p += sprintf(p, "---------------- Primary Channel "
--			"---------------- Secondary Channel ------------\n");
--	p += sprintf(p, "                 %s                         %s\n",
--		 (reg51 & CNTRL_ENA_1ST) ? "enabled " : "disabled",
--		 (reg51 & CNTRL_ENA_2ND) ? "enabled " : "disabled");
--	p += sprintf(p, "---------------- drive0 --------- drive1 "
--			"-------- drive0 --------- drive1 ------\n");
--	p += sprintf(p, "DMA enabled:     %s              %s"
--			"             %s              %s\n",
--		(reg72 & 0x20) ? "yes" : "no ", (reg72 & 0x40) ? "yes" : "no ",
--		(reg7a & 0x20) ? "yes" : "no ", (reg7a & 0x40) ? "yes" : "no ");
--	p += sprintf(p, "UltraDMA mode:   %s (%c)          %s (%c)",
--		( reg73 & 0x01) ? " on" : "off",
--		((reg73 & 0x30) == 0x30) ? ((reg73 & 0x04) ? '3' : '0') :
--		((reg73 & 0x30) == 0x20) ? ((reg73 & 0x04) ? '3' : '1') :
--		((reg73 & 0x30) == 0x10) ? ((reg73 & 0x04) ? '4' : '2') :
--		((reg73 & 0x30) == 0x00) ? ((reg73 & 0x04) ? '5' : '2') : '?',
--		( reg73 & 0x02) ? " on" : "off",
--		((reg73 & 0xC0) == 0xC0) ? ((reg73 & 0x08) ? '3' : '0') :
--		((reg73 & 0xC0) == 0x80) ? ((reg73 & 0x08) ? '3' : '1') :
--		((reg73 & 0xC0) == 0x40) ? ((reg73 & 0x08) ? '4' : '2') :
--		((reg73 & 0xC0) == 0x00) ? ((reg73 & 0x08) ? '5' : '2') : '?');
--	p += sprintf(p, "         %s (%c)          %s (%c)\n",
--		( reg7b & 0x01) ? " on" : "off",
--		((reg7b & 0x30) == 0x30) ? ((reg7b & 0x04) ? '3' : '0') :
--		((reg7b & 0x30) == 0x20) ? ((reg7b & 0x04) ? '3' : '1') :
--		((reg7b & 0x30) == 0x10) ? ((reg7b & 0x04) ? '4' : '2') :
--		((reg7b & 0x30) == 0x00) ? ((reg7b & 0x04) ? '5' : '2') : '?',
--		( reg7b & 0x02) ? " on" : "off",
--		((reg7b & 0xC0) == 0xC0) ? ((reg7b & 0x08) ? '3' : '0') :
--		((reg7b & 0xC0) == 0x80) ? ((reg7b & 0x08) ? '3' : '1') :
--		((reg7b & 0xC0) == 0x40) ? ((reg7b & 0x08) ? '4' : '2') :
--		((reg7b & 0xC0) == 0x00) ? ((reg7b & 0x08) ? '5' : '2') : '?');
--	p += sprintf(p, "Interrupt:       %s, %s                 %s, %s\n",
--		(reg71 & MRDMODE_BLK_CH0  ) ? "blocked" : "enabled",
--		(reg50 & CFR_INTR_CH0	  ) ? "pending" : "clear  ",
--		(reg71 & MRDMODE_BLK_CH1  ) ? "blocked" : "enabled",
--		(reg57 & ARTTIM23_INTR_CH1) ? "pending" : "clear  ");
 -
--	return (char *)p;
+-	paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
+-
+-	if (is_error_hpa(paddr))
+-		return 1;
+-
+-	return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
 -}
 -
--static int cmd64x_get_info (char *buffer, char **addr, off_t offset, int count)
+-static void nonpaging_free(struct kvm_vcpu *vcpu)
 -{
--	char *p = buffer;
--	int i;
+-	mmu_free_roots(vcpu);
+-}
 -
--	for (i = 0; i < n_cmd_devs; i++) {
--		struct pci_dev *dev	= cmd_devs[i];
--		p = print_cmd64x_get_info(p, dev, i);
--	}
--	return p-buffer;	/* => must be less than 4k! */
+-static int nonpaging_init_context(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_mmu *context = &vcpu->mmu;
+-
+-	context->new_cr3 = nonpaging_new_cr3;
+-	context->page_fault = nonpaging_page_fault;
+-	context->gva_to_gpa = nonpaging_gva_to_gpa;
+-	context->free = nonpaging_free;
+-	context->root_level = 0;
+-	context->shadow_root_level = PT32E_ROOT_LEVEL;
+-	context->root_hpa = INVALID_PAGE;
+-	return 0;
 -}
 -
--#endif	/* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
+-static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
+-{
+-	++vcpu->stat.tlb_flush;
+-	kvm_x86_ops->tlb_flush(vcpu);
+-}
 -
- static u8 quantize_timing(int timing, int quant)
- {
- 	return (timing + quant - 1) / quant;
-@@ -322,8 +220,6 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 	case XFER_MW_DMA_0:
- 		program_cycle_times(drive, 480, 215);
- 		break;
--	default:
--		return;
- 	}
- 
- 	if (speed >= XFER_SW_DMA_0)
-@@ -333,14 +229,15 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
- static int cmd648_ide_dma_end (ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif	= HWIF(drive);
-+	unsigned long base	= hwif->dma_base - (hwif->channel * 8);
- 	int err			= __ide_dma_end(drive);
- 	u8  irq_mask		= hwif->channel ? MRDMODE_INTR_CH1 :
- 						  MRDMODE_INTR_CH0;
--	u8  mrdmode		= inb(hwif->dma_master + 0x01);
-+	u8  mrdmode		= inb(base + 1);
- 
- 	/* clear the interrupt bit */
- 	outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
--	     hwif->dma_master + 0x01);
-+	     base + 1);
- 
- 	return err;
- }
-@@ -365,10 +262,11 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
- static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif	= HWIF(drive);
-+	unsigned long base	= hwif->dma_base - (hwif->channel * 8);
- 	u8 irq_mask		= hwif->channel ? MRDMODE_INTR_CH1 :
- 						  MRDMODE_INTR_CH0;
- 	u8 dma_stat		= inb(hwif->dma_status);
--	u8 mrdmode		= inb(hwif->dma_master + 0x01);
-+	u8 mrdmode		= inb(base + 1);
- 
- #ifdef DEBUG
- 	printk("%s: dma_stat: 0x%02x mrdmode: 0x%02x irq_mask: 0x%02x\n",
-@@ -472,16 +370,6 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
- 	mrdmode &= ~0x30;
- 	(void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
- 
--#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
+-static void paging_new_cr3(struct kvm_vcpu *vcpu)
+-{
+-	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
+-	mmu_free_roots(vcpu);
+-}
 -
--	cmd_devs[n_cmd_devs++] = dev;
+-static void inject_page_fault(struct kvm_vcpu *vcpu,
+-			      u64 addr,
+-			      u32 err_code)
+-{
+-	kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
+-}
 -
--	if (!cmd64x_proc) {
--		cmd64x_proc = 1;
--		ide_pci_create_host_proc("cmd64x", cmd64x_get_info);
--	}
--#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_IDE_PROC_FS */
+-static void paging_free(struct kvm_vcpu *vcpu)
+-{
+-	nonpaging_free(vcpu);
+-}
 -
- 	return 0;
- }
- 
-diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
-index 0466462..6ec00b8 100644
---- a/drivers/ide/pci/cs5520.c
-+++ b/drivers/ide/pci/cs5520.c
-@@ -71,7 +71,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
- 	ide_hwif_t *hwif = HWIF(drive);
- 	struct pci_dev *pdev = hwif->pci_dev;
- 	int controller = drive->dn > 1 ? 1 : 0;
--	u8 reg;
- 
- 	/* FIXME: if DMA = 1 do we need to set the DMA bit here ? */
- 
-@@ -91,11 +90,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
- 	pci_write_config_byte(pdev, 0x66 + 4*controller + (drive->dn&1),
- 		(cs5520_pio_clocks[pio].recovery << 4) |
- 		(cs5520_pio_clocks[pio].assert));
--		
--	/* Set the DMA enable/disable flag */
--	reg = inb(hwif->dma_base + 0x02 + 8*controller);
--	reg |= 1<<((drive->dn&1)+5);
--	outb(reg, hwif->dma_base + 0x02 + 8*controller);
- }
- 
- static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
-@@ -109,13 +103,14 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
-  *	We wrap the DMA activate to set the vdma flag. This is needed
-  *	so that the IDE DMA layer issues PIO not DMA commands over the
-  *	DMA channel
-+ *
-+ *	ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA
-  */
-- 
--static int cs5520_dma_on(ide_drive_t *drive)
-+
-+static void cs5520_dma_host_set(ide_drive_t *drive, int on)
- {
--	/* ATAPI is harder so leave it for now */
--	drive->vdma = 1;
+-#define PTTYPE 64
+-#include "paging_tmpl.h"
+-#undef PTTYPE
+-
+-#define PTTYPE 32
+-#include "paging_tmpl.h"
+-#undef PTTYPE
+-
+-static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
+-{
+-	struct kvm_mmu *context = &vcpu->mmu;
+-
+-	ASSERT(is_pae(vcpu));
+-	context->new_cr3 = paging_new_cr3;
+-	context->page_fault = paging64_page_fault;
+-	context->gva_to_gpa = paging64_gva_to_gpa;
+-	context->free = paging_free;
+-	context->root_level = level;
+-	context->shadow_root_level = level;
+-	context->root_hpa = INVALID_PAGE;
 -	return 0;
-+	drive->vdma = on;
-+	ide_dma_host_set(drive, on);
- }
- 
- static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
-@@ -126,7 +121,7 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
- 	if (hwif->dma_base == 0)
- 		return;
- 
--	hwif->ide_dma_on = &cs5520_dma_on;
-+	hwif->dma_host_set = &cs5520_dma_host_set;
- }
- 
- #define DECLARE_CS_DEV(name_str)				\
-@@ -137,6 +132,7 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
- 				  IDE_HFLAG_CS5520 |		\
- 				  IDE_HFLAG_VDMA |		\
- 				  IDE_HFLAG_NO_ATAPI_DMA |	\
-+				  IDE_HFLAG_ABUSE_SET_DMA_MODE |\
- 				  IDE_HFLAG_BOOTABLE,		\
- 		.pio_mask	= ATA_PIO4,			\
- 	}
-diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
-index 5476903..df5966b 100644
---- a/drivers/ide/pci/cs5530.c
-+++ b/drivers/ide/pci/cs5530.c
-@@ -116,8 +116,6 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
- 		case XFER_MW_DMA_0:	timings = 0x00077771; break;
- 		case XFER_MW_DMA_1:	timings = 0x00012121; break;
- 		case XFER_MW_DMA_2:	timings = 0x00002020; break;
--		default:
--			return;
- 	}
- 	basereg = CS5530_BASEREG(drive->hwif);
- 	reg = inl(basereg + 4);			/* get drive0 config register */
-diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
-index ddcbeba..50b3d77 100644
---- a/drivers/ide/pci/cs5535.c
-+++ b/drivers/ide/pci/cs5535.c
-@@ -190,7 +190,7 @@ static const struct ide_port_info cs5535_chipset __devinitdata = {
- 	.name		= "CS5535",
- 	.init_hwif	= init_hwif_cs5535,
- 	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE |
--			  IDE_HFLAG_BOOTABLE,
-+			  IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_BOOTABLE,
- 	.pio_mask	= ATA_PIO4,
- 	.mwdma_mask	= ATA_MWDMA2,
- 	.udma_mask	= ATA_UDMA4,
-diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
-index 1cd4e9c..3ec4c65 100644
---- a/drivers/ide/pci/cy82c693.c
-+++ b/drivers/ide/pci/cy82c693.c
-@@ -1,5 +1,5 @@
- /*
-- * linux/drivers/ide/pci/cy82c693.c		Version 0.42	Oct 23, 2007
-+ * linux/drivers/ide/pci/cy82c693.c		Version 0.44	Nov 8, 2007
-  *
-  *  Copyright (C) 1998-2000 Andreas S. Krebs (akrebs at altavista.net), Maintainer
-  *  Copyright (C) 1998-2002 Andre Hedrick <andre at linux-ide.org>, Integrator
-@@ -176,17 +176,12 @@ static void compute_clocks (u8 pio, pio_clocks_t *p_pclk)
-  * set DMA mode a specific channel for CY82C693
-  */
- 
--static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
-+static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
- {
--	u8 index = 0, data = 0;
-+	ide_hwif_t *hwif = drive->hwif;
-+	u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
- 
--	if (mode>2)	/* make sure we set a valid mode */
--		mode = 2;
--			   
--	if (mode > drive->id->tDMA)  /* to be absolutly sure we have a valid mode */
--		mode = drive->id->tDMA;
--	
--	index = (HWIF(drive)->channel==0) ? CY82_INDEX_CHANNEL0 : CY82_INDEX_CHANNEL1;
-+	index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
- 
- #if CY82C693_DEBUG_LOGS
- 	/* for debug let's show the previous values */
-@@ -199,7 +194,7 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
- 		(data&0x3), ((data>>2)&1));
- #endif /* CY82C693_DEBUG_LOGS */
- 
--	data = (u8)mode|(u8)(single<<2);
-+	data = (mode & 3) | (single << 2);
- 
- 	outb(index, CY82_INDEX_PORT);
- 	outb(data, CY82_DATA_PORT);
-@@ -207,7 +202,7 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
- #if CY82C693_DEBUG_INFO
- 	printk(KERN_INFO "%s (ch=%d, dev=%d): set DMA mode to %d (single=%d)\n",
- 		drive->name, HWIF(drive)->channel, drive->select.b.unit,
--		mode, single);
-+		mode & 3, single);
- #endif /* CY82C693_DEBUG_INFO */
- 
- 	/* 
-@@ -230,39 +225,6 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
- #endif /* CY82C693_DEBUG_INFO */
- }
- 
--/* 
-- * used to set DMA mode for CY82C693 (single and multi modes)
-- */
--static int cy82c693_ide_dma_on (ide_drive_t *drive)
+-}
+-
+-static int paging64_init_context(struct kvm_vcpu *vcpu)
 -{
--	struct hd_driveid *id = drive->id;
+-	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
+-}
 -
--#if CY82C693_DEBUG_INFO
--	printk (KERN_INFO "dma_on: %s\n", drive->name);
--#endif /* CY82C693_DEBUG_INFO */
+-static int paging32_init_context(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_mmu *context = &vcpu->mmu;
 -
--	if (id != NULL) {		
--		/* Enable DMA on any drive that has DMA
--		 * (multi or single) enabled
--		 */
--		if (id->field_valid & 2) {	/* regular DMA */
--			int mmode, smode;
+-	context->new_cr3 = paging_new_cr3;
+-	context->page_fault = paging32_page_fault;
+-	context->gva_to_gpa = paging32_gva_to_gpa;
+-	context->free = paging_free;
+-	context->root_level = PT32_ROOT_LEVEL;
+-	context->shadow_root_level = PT32E_ROOT_LEVEL;
+-	context->root_hpa = INVALID_PAGE;
+-	return 0;
+-}
 -
--			mmode = id->dma_mword & (id->dma_mword >> 8);
--			smode = id->dma_1word & (id->dma_1word >> 8);
--			       		      
--			if (mmode != 0) {
--				/* enable multi */
--				cy82c693_dma_enable(drive, (mmode >> 1), 0);
--			} else if (smode != 0) {
--				/* enable single */
--				cy82c693_dma_enable(drive, (smode >> 1), 1);
+-static int paging32E_init_context(struct kvm_vcpu *vcpu)
+-{
+-	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
+-}
+-
+-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+-{
+-	ASSERT(vcpu);
+-	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+-
+-	if (!is_paging(vcpu))
+-		return nonpaging_init_context(vcpu);
+-	else if (is_long_mode(vcpu))
+-		return paging64_init_context(vcpu);
+-	else if (is_pae(vcpu))
+-		return paging32E_init_context(vcpu);
+-	else
+-		return paging32_init_context(vcpu);
+-}
+-
+-static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
+-{
+-	ASSERT(vcpu);
+-	if (VALID_PAGE(vcpu->mmu.root_hpa)) {
+-		vcpu->mmu.free(vcpu);
+-		vcpu->mmu.root_hpa = INVALID_PAGE;
+-	}
+-}
+-
+-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
+-{
+-	destroy_kvm_mmu(vcpu);
+-	return init_kvm_mmu(vcpu);
+-}
+-EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
+-
+-int kvm_mmu_load(struct kvm_vcpu *vcpu)
+-{
+-	int r;
+-
+-	mutex_lock(&vcpu->kvm->lock);
+-	r = mmu_topup_memory_caches(vcpu);
+-	if (r)
+-		goto out;
+-	mmu_alloc_roots(vcpu);
+-	kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+-	kvm_mmu_flush_tlb(vcpu);
+-out:
+-	mutex_unlock(&vcpu->kvm->lock);
+-	return r;
+-}
+-EXPORT_SYMBOL_GPL(kvm_mmu_load);
+-
+-void kvm_mmu_unload(struct kvm_vcpu *vcpu)
+-{
+-	mmu_free_roots(vcpu);
+-}
+-
+-static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
+-				  struct kvm_mmu_page *page,
+-				  u64 *spte)
+-{
+-	u64 pte;
+-	struct kvm_mmu_page *child;
+-
+-	pte = *spte;
+-	if (is_present_pte(pte)) {
+-		if (page->role.level == PT_PAGE_TABLE_LEVEL)
+-			rmap_remove(spte);
+-		else {
+-			child = page_header(pte & PT64_BASE_ADDR_MASK);
+-			mmu_page_remove_parent_pte(child, spte);
+-		}
+-	}
+-	set_shadow_pte(spte, 0);
+-	kvm_flush_remote_tlbs(vcpu->kvm);
+-}
+-
+-static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
+-				  struct kvm_mmu_page *page,
+-				  u64 *spte,
+-				  const void *new, int bytes)
+-{
+-	if (page->role.level != PT_PAGE_TABLE_LEVEL)
+-		return;
+-
+-	if (page->role.glevels == PT32_ROOT_LEVEL)
+-		paging32_update_pte(vcpu, page, spte, new, bytes);
+-	else
+-		paging64_update_pte(vcpu, page, spte, new, bytes);
+-}
+-
+-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+-		       const u8 *new, int bytes)
+-{
+-	gfn_t gfn = gpa >> PAGE_SHIFT;
+-	struct kvm_mmu_page *page;
+-	struct hlist_node *node, *n;
+-	struct hlist_head *bucket;
+-	unsigned index;
+-	u64 *spte;
+-	unsigned offset = offset_in_page(gpa);
+-	unsigned pte_size;
+-	unsigned page_offset;
+-	unsigned misaligned;
+-	unsigned quadrant;
+-	int level;
+-	int flooded = 0;
+-	int npte;
+-
+-	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+-	if (gfn == vcpu->last_pt_write_gfn) {
+-		++vcpu->last_pt_write_count;
+-		if (vcpu->last_pt_write_count >= 3)
+-			flooded = 1;
+-	} else {
+-		vcpu->last_pt_write_gfn = gfn;
+-		vcpu->last_pt_write_count = 1;
+-	}
+-	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+-	bucket = &vcpu->kvm->mmu_page_hash[index];
+-	hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
+-		if (page->gfn != gfn || page->role.metaphysical)
+-			continue;
+-		pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
+-		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+-		misaligned |= bytes < 4;
+-		if (misaligned || flooded) {
+-			/*
+-			 * Misaligned accesses are too much trouble to fix
+-			 * up; also, they usually indicate a page is not used
+-			 * as a page table.
+-			 *
+-			 * If we're seeing too many writes to a page,
+-			 * it may no longer be a page table, or we may be
+-			 * forking, in which case it is better to unmap the
+-			 * page.
+-			 */
+-			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
+-				 gpa, bytes, page->role.word);
+-			kvm_mmu_zap_page(vcpu->kvm, page);
+-			continue;
+-		}
+-		page_offset = offset;
+-		level = page->role.level;
+-		npte = 1;
+-		if (page->role.glevels == PT32_ROOT_LEVEL) {
+-			page_offset <<= 1;	/* 32->64 */
+-			/*
+-			 * A 32-bit pde maps 4MB while the shadow pdes map
+-			 * only 2MB.  So we need to double the offset again
+-			 * and zap two pdes instead of one.
+-			 */
+-			if (level == PT32_ROOT_LEVEL) {
+-				page_offset &= ~7; /* kill rounding error */
+-				page_offset <<= 1;
+-				npte = 2;
 -			}
+-			quadrant = page_offset >> PAGE_SHIFT;
+-			page_offset &= ~PAGE_MASK;
+-			if (quadrant != page->role.quadrant)
+-				continue;
+-		}
+-		spte = &page->spt[page_offset / sizeof(*spte)];
+-		while (npte--) {
+-			mmu_pte_write_zap_pte(vcpu, page, spte);
+-			mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
+-			++spte;
 -		}
 -	}
--        return __ide_dma_on(drive);
 -}
 -
- static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
- {
- 	ide_hwif_t *hwif = HWIF(drive);
-@@ -429,11 +391,7 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
- static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
- {
- 	hwif->set_pio_mode = &cy82c693_set_pio_mode;
+-int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
+-{
+-	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
 -
--	if (hwif->dma_base == 0)
--		return;
+-	return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
+-}
+-
+-void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
+-{
+-	while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
+-		struct kvm_mmu_page *page;
+-
+-		page = container_of(vcpu->kvm->active_mmu_pages.prev,
+-				    struct kvm_mmu_page, link);
+-		kvm_mmu_zap_page(vcpu->kvm, page);
+-	}
+-}
+-
+-static void free_mmu_pages(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_mmu_page *page;
+-
+-	while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
+-		page = container_of(vcpu->kvm->active_mmu_pages.next,
+-				    struct kvm_mmu_page, link);
+-		kvm_mmu_zap_page(vcpu->kvm, page);
+-	}
+-	free_page((unsigned long)vcpu->mmu.pae_root);
+-}
+-
+-static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
+-{
+-	struct page *page;
+-	int i;
+-
+-	ASSERT(vcpu);
+-
+-	vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
 -
--	hwif->ide_dma_on = &cy82c693_ide_dma_on;
-+	hwif->set_dma_mode = &cy82c693_set_dma_mode;
- }
- 
- static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
-@@ -454,11 +412,11 @@ static const struct ide_port_info cy82c693_chipset __devinitdata = {
- 	.init_iops	= init_iops_cy82c693,
- 	.init_hwif	= init_hwif_cy82c693,
- 	.chipset	= ide_cy82c693,
--	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_TRUST_BIOS_FOR_DMA |
-+	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_CY82C693 |
- 			  IDE_HFLAG_BOOTABLE,
- 	.pio_mask	= ATA_PIO4,
--	.swdma_mask	= ATA_SWDMA2_ONLY,
--	.mwdma_mask	= ATA_MWDMA2_ONLY,
-+	.swdma_mask	= ATA_SWDMA2,
-+	.mwdma_mask	= ATA_MWDMA2,
- };
- 
- static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
-index 8382908..26aa492 100644
---- a/drivers/ide/pci/delkin_cb.c
-+++ b/drivers/ide/pci/delkin_cb.c
-@@ -80,7 +80,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
- 	hw.irq = dev->irq;
- 	hw.chipset = ide_pci;		/* this enables IRQ sharing */
- 
--	rc = ide_register_hw(&hw, &ide_undecoded_slave, 0, &hwif);
-+	rc = ide_register_hw(&hw, &ide_undecoded_slave, &hwif);
- 	if (rc < 0) {
- 		printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc);
- 		pci_disable_device(dev);
-diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
-index ae6307f..dfba0d1 100644
---- a/drivers/ide/pci/hpt34x.c
-+++ b/drivers/ide/pci/hpt34x.c
-@@ -129,14 +129,18 @@ static void __devinit init_hwif_hpt34x(ide_hwif_t *hwif)
- 	hwif->set_dma_mode = &hpt34x_set_mode;
- }
- 
-+#define IDE_HFLAGS_HPT34X \
-+	(IDE_HFLAG_NO_ATAPI_DMA | \
-+	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
-+	 IDE_HFLAG_NO_AUTODMA)
-+
- static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
- 	{ /* 0 */
- 		.name		= "HPT343",
- 		.init_chipset	= init_chipset_hpt34x,
- 		.init_hwif	= init_hwif_hpt34x,
- 		.extra		= 16,
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
--				  IDE_HFLAG_NO_AUTODMA,
-+		.host_flags	= IDE_HFLAGS_HPT34X,
- 		.pio_mask	= ATA_PIO5,
- 	},
- 	{ /* 1 */
-@@ -144,9 +148,7 @@ static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
- 		.init_chipset	= init_chipset_hpt34x,
- 		.init_hwif	= init_hwif_hpt34x,
- 		.extra		= 16,
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA |
--				  IDE_HFLAG_NO_AUTODMA |
--				  IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD,
- 		.pio_mask	= ATA_PIO5,
- #ifdef CONFIG_HPT34X_AUTODMA
- 		.swdma_mask	= ATA_SWDMA2,
-diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
-index 9fce25b..1268593 100644
---- a/drivers/ide/pci/hpt366.c
-+++ b/drivers/ide/pci/hpt366.c
-@@ -1,5 +1,5 @@
- /*
-- * linux/drivers/ide/pci/hpt366.c		Version 1.22	Dec 4, 2007
-+ * linux/drivers/ide/pci/hpt366.c		Version 1.30	Dec 12, 2007
-  *
-  * Copyright (C) 1999-2003		Andre Hedrick <andre at linux-ide.org>
-  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
-@@ -88,7 +88,7 @@
-  * - rename all the register related variables consistently
-  * - move all the interrupt twiddling code from the speedproc handlers into
-  *   init_hwif_hpt366(), also grouping all the DMA related code together there
-- * - merge two HPT37x speedproc handlers, fix the PIO timing register mask and
-+ * - merge HPT36x/HPT37x speedproc handlers, fix PIO timing register mask and
-  *   separate the UltraDMA and MWDMA masks there to avoid changing PIO timings
-  *   when setting an UltraDMA mode
-  * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select
-@@ -458,6 +458,13 @@ enum ata_clock {
- 	NUM_ATA_CLOCKS
- };
- 
-+struct hpt_timings {
-+	u32 pio_mask;
-+	u32 dma_mask;
-+	u32 ultra_mask;
-+	u32 *clock_table[NUM_ATA_CLOCKS];
-+};
-+
- /*
-  *	Hold all the HighPoint chip information in one place.
-  */
-@@ -468,7 +475,8 @@ struct hpt_info {
- 	u8 udma_mask;		/* Allowed UltraDMA modes mask. */
- 	u8 dpll_clk;		/* DPLL clock in MHz */
- 	u8 pci_clk;		/* PCI  clock in MHz */
--	u32 **settings; 	/* Chipset settings table */
-+	struct hpt_timings *timings; /* Chipset timing data */
-+	u8 clock;		/* ATA clock selected */
- };
- 
- /* Supported HighPoint chips */
-@@ -486,20 +494,30 @@ enum {
- 	HPT371N
- };
- 
--static u32 *hpt36x_settings[NUM_ATA_CLOCKS] = {
--	twenty_five_base_hpt36x,
--	thirty_three_base_hpt36x,
--	forty_base_hpt36x,
--	NULL,
--	NULL
-+static struct hpt_timings hpt36x_timings = {
-+	.pio_mask	= 0xc1f8ffff,
-+	.dma_mask	= 0x303800ff,
-+	.ultra_mask	= 0x30070000,
-+	.clock_table	= {
-+		[ATA_CLOCK_25MHZ] = twenty_five_base_hpt36x,
-+		[ATA_CLOCK_33MHZ] = thirty_three_base_hpt36x,
-+		[ATA_CLOCK_40MHZ] = forty_base_hpt36x,
-+		[ATA_CLOCK_50MHZ] = NULL,
-+		[ATA_CLOCK_66MHZ] = NULL
-+	}
- };
- 
--static u32 *hpt37x_settings[NUM_ATA_CLOCKS] = {
--	NULL,
--	thirty_three_base_hpt37x,
--	NULL,
--	fifty_base_hpt37x,
--	sixty_six_base_hpt37x
-+static struct hpt_timings hpt37x_timings = {
-+	.pio_mask	= 0xcfc3ffff,
-+	.dma_mask	= 0x31c001ff,
-+	.ultra_mask	= 0x303c0000,
-+	.clock_table	= {
-+		[ATA_CLOCK_25MHZ] = NULL,
-+		[ATA_CLOCK_33MHZ] = thirty_three_base_hpt37x,
-+		[ATA_CLOCK_40MHZ] = NULL,
-+		[ATA_CLOCK_50MHZ] = fifty_base_hpt37x,
-+		[ATA_CLOCK_66MHZ] = sixty_six_base_hpt37x
-+	}
- };
- 
- static const struct hpt_info hpt36x __devinitdata = {
-@@ -507,7 +525,7 @@ static const struct hpt_info hpt36x __devinitdata = {
- 	.chip_type	= HPT36x,
- 	.udma_mask	= HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
- 	.dpll_clk	= 0,	/* no DPLL */
--	.settings	= hpt36x_settings
-+	.timings	= &hpt36x_timings
- };
- 
- static const struct hpt_info hpt370 __devinitdata = {
-@@ -515,7 +533,7 @@ static const struct hpt_info hpt370 __devinitdata = {
- 	.chip_type	= HPT370,
- 	.udma_mask	= HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
- 	.dpll_clk	= 48,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt370a __devinitdata = {
-@@ -523,7 +541,7 @@ static const struct hpt_info hpt370a __devinitdata = {
- 	.chip_type	= HPT370A,
- 	.udma_mask	= HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
- 	.dpll_clk	= 48,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt374 __devinitdata = {
-@@ -531,7 +549,7 @@ static const struct hpt_info hpt374 __devinitdata = {
- 	.chip_type	= HPT374,
- 	.udma_mask	= ATA_UDMA5,
- 	.dpll_clk	= 48,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt372 __devinitdata = {
-@@ -539,7 +557,7 @@ static const struct hpt_info hpt372 __devinitdata = {
- 	.chip_type	= HPT372,
- 	.udma_mask	= HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- 	.dpll_clk	= 55,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt372a __devinitdata = {
-@@ -547,7 +565,7 @@ static const struct hpt_info hpt372a __devinitdata = {
- 	.chip_type	= HPT372A,
- 	.udma_mask	= HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- 	.dpll_clk	= 66,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt302 __devinitdata = {
-@@ -555,7 +573,7 @@ static const struct hpt_info hpt302 __devinitdata = {
- 	.chip_type	= HPT302,
- 	.udma_mask	= HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- 	.dpll_clk	= 66,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt371 __devinitdata = {
-@@ -563,7 +581,7 @@ static const struct hpt_info hpt371 __devinitdata = {
- 	.chip_type	= HPT371,
- 	.udma_mask	= HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- 	.dpll_clk	= 66,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt372n __devinitdata = {
-@@ -571,7 +589,7 @@ static const struct hpt_info hpt372n __devinitdata = {
- 	.chip_type	= HPT372N,
- 	.udma_mask	= HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- 	.dpll_clk	= 77,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt302n __devinitdata = {
-@@ -579,7 +597,7 @@ static const struct hpt_info hpt302n __devinitdata = {
- 	.chip_type	= HPT302N,
- 	.udma_mask	= HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- 	.dpll_clk	= 77,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static const struct hpt_info hpt371n __devinitdata = {
-@@ -587,7 +605,7 @@ static const struct hpt_info hpt371n __devinitdata = {
- 	.chip_type	= HPT371N,
- 	.udma_mask	= HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- 	.dpll_clk	= 77,
--	.settings	= hpt37x_settings
-+	.timings	= &hpt37x_timings
- };
- 
- static int check_in_drive_list(ide_drive_t *drive, const char **list)
-@@ -675,94 +693,50 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
- 	for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
- 		if (xfer_speeds[i] == speed)
- 			break;
 -	/*
--	 * NOTE: info->settings only points to the pointer
--	 * to the list of the actual register values
+-	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
+-	 * Therefore we need to allocate shadow page tables in the first
+-	 * 4GB of memory, which happens to fit the DMA32 zone.
 -	 */
--	return (*info->settings)[i];
-+
-+	return info->timings->clock_table[info->clock][i];
- }
- 
--static void hpt36x_set_mode(ide_drive_t *drive, const u8 speed)
-+static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
- {
--	ide_hwif_t *hwif	= HWIF(drive);
--	struct pci_dev  *dev	= hwif->pci_dev;
-+	struct pci_dev  *dev	= HWIF(drive)->pci_dev;
- 	struct hpt_info	*info	= pci_get_drvdata(dev);
--	u8  itr_addr		= drive->dn ? 0x44 : 0x40;
-+	struct hpt_timings *t	= info->timings;
-+	u8  itr_addr		= 0x40 + (drive->dn * 4);
- 	u32 old_itr		= 0;
--	u32 itr_mask, new_itr;
+-	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+-	if (!page)
+-		goto error_1;
+-	vcpu->mmu.pae_root = page_address(page);
+-	for (i = 0; i < 4; ++i)
+-		vcpu->mmu.pae_root[i] = INVALID_PAGE;
 -
--	itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 :
--		  (speed < XFER_UDMA_0   ? 0xc0070000 : 0xc03800ff);
+-	return 0;
 -
--	new_itr = get_speed_setting(speed, info);
-+	u32 new_itr		= get_speed_setting(speed, info);
-+	u32 itr_mask		= speed < XFER_MW_DMA_0 ? t->pio_mask :
-+				 (speed < XFER_UDMA_0   ? t->dma_mask :
-+							  t->ultra_mask);
- 
-+	pci_read_config_dword(dev, itr_addr, &old_itr);
-+	new_itr = (old_itr & ~itr_mask) | (new_itr & itr_mask);
- 	/*
- 	 * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well)
- 	 * to avoid problems handling I/O errors later
- 	 */
--	pci_read_config_dword(dev, itr_addr, &old_itr);
--	new_itr  = (new_itr & ~itr_mask) | (old_itr & itr_mask);
- 	new_itr &= ~0xc0000000;
- 
- 	pci_write_config_dword(dev, itr_addr, new_itr);
- }
- 
--static void hpt37x_set_mode(ide_drive_t *drive, const u8 speed)
+-error_1:
+-	free_mmu_pages(vcpu);
+-	return -ENOMEM;
+-}
+-
+-int kvm_mmu_create(struct kvm_vcpu *vcpu)
 -{
--	ide_hwif_t *hwif	= HWIF(drive);
--	struct pci_dev  *dev	= hwif->pci_dev;
--	struct hpt_info	*info	= pci_get_drvdata(dev);
--	u8  itr_addr		= 0x40 + (drive->dn * 4);
--	u32 old_itr		= 0;
--	u32 itr_mask, new_itr;
+-	ASSERT(vcpu);
+-	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
 -
--	itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 :
--		  (speed < XFER_UDMA_0   ? 0xc03c0000 : 0xc1c001ff);
+-	return alloc_mmu_pages(vcpu);
+-}
 -
--	new_itr = get_speed_setting(speed, info);
+-int kvm_mmu_setup(struct kvm_vcpu *vcpu)
+-{
+-	ASSERT(vcpu);
+-	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
 -
--	pci_read_config_dword(dev, itr_addr, &old_itr);
--	new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask);
--	
--	if (speed < XFER_MW_DMA_0)
--		new_itr &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
--	pci_write_config_dword(dev, itr_addr, new_itr);
+-	return init_kvm_mmu(vcpu);
 -}
 -
--static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
+-void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 -{
--	ide_hwif_t *hwif	= HWIF(drive);
--	struct hpt_info	*info	= pci_get_drvdata(hwif->pci_dev);
+-	ASSERT(vcpu);
 -
--	if (info->chip_type >= HPT370)
--		hpt37x_set_mode(drive, speed);
--	else	/* hpt368: hpt_minimum_revision(dev, 2) */
--		hpt36x_set_mode(drive, speed);
+-	destroy_kvm_mmu(vcpu);
+-	free_mmu_pages(vcpu);
+-	mmu_free_memory_caches(vcpu);
 -}
 -
- static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
- {
- 	hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
- }
- 
--static int hpt3xx_quirkproc(ide_drive_t *drive)
-+static void hpt3xx_quirkproc(ide_drive_t *drive)
- {
- 	struct hd_driveid *id	= drive->id;
- 	const  char **list	= quirk_drives;
- 
- 	while (*list)
--		if (strstr(id->model, *list++))
--			return 1;
+-void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
+-{
+-	struct kvm_mmu_page *page;
+-
+-	list_for_each_entry(page, &kvm->active_mmu_pages, link) {
+-		int i;
+-		u64 *pt;
+-
+-		if (!test_bit(slot, &page->slot_bitmap))
+-			continue;
+-
+-		pt = page->spt;
+-		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+-			/* avoid RMW */
+-			if (pt[i] & PT_WRITABLE_MASK) {
+-				rmap_remove(&pt[i]);
+-				pt[i] &= ~PT_WRITABLE_MASK;
+-			}
+-	}
+-}
+-
+-void kvm_mmu_zap_all(struct kvm *kvm)
+-{
+-	struct kvm_mmu_page *page, *node;
+-
+-	list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
+-		kvm_mmu_zap_page(kvm, page);
+-
+-	kvm_flush_remote_tlbs(kvm);
+-}
+-
+-void kvm_mmu_module_exit(void)
+-{
+-	if (pte_chain_cache)
+-		kmem_cache_destroy(pte_chain_cache);
+-	if (rmap_desc_cache)
+-		kmem_cache_destroy(rmap_desc_cache);
+-	if (mmu_page_header_cache)
+-		kmem_cache_destroy(mmu_page_header_cache);
+-}
+-
+-int kvm_mmu_module_init(void)
+-{
+-	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
+-					    sizeof(struct kvm_pte_chain),
+-					    0, 0, NULL);
+-	if (!pte_chain_cache)
+-		goto nomem;
+-	rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
+-					    sizeof(struct kvm_rmap_desc),
+-					    0, 0, NULL);
+-	if (!rmap_desc_cache)
+-		goto nomem;
+-
+-	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
+-						  sizeof(struct kvm_mmu_page),
+-						  0, 0, NULL);
+-	if (!mmu_page_header_cache)
+-		goto nomem;
+-
 -	return 0;
+-
+-nomem:
+-	kvm_mmu_module_exit();
+-	return -ENOMEM;
 -}
 -
--static void hpt3xx_intrproc(ide_drive_t *drive)
+-#ifdef AUDIT
+-
+-static const char *audit_msg;
+-
+-static gva_t canonicalize(gva_t gva)
 -{
--	if (drive->quirk_list)
--		return;
-+		if (strstr(id->model, *list++)) {
-+			drive->quirk_list = 1;
-+			return;
-+		}
- 
--	/* drives in the quirk_list may not like intr setups/cleanups */
--	outb(drive->ctl | 2, IDE_CONTROL_REG);
-+	drive->quirk_list = 0;
- }
- 
- static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
-@@ -914,32 +888,33 @@ static int hpt374_ide_dma_end(ide_drive_t *drive)
- 
- static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
- {
--	u8 scr2 = inb(hwif->dma_master + 0x7b);
-+	unsigned long base = hwif->extra_base;
-+	u8 scr2 = inb(base + 0x6b);
- 
- 	if ((scr2 & 0x7f) == mode)
- 		return;
- 
- 	/* Tristate the bus */
--	outb(0x80, hwif->dma_master + 0x73);
--	outb(0x80, hwif->dma_master + 0x77);
-+	outb(0x80, base + 0x63);
-+	outb(0x80, base + 0x67);
- 
- 	/* Switch clock and reset channels */
--	outb(mode, hwif->dma_master + 0x7b);
--	outb(0xc0, hwif->dma_master + 0x79);
-+	outb(mode, base + 0x6b);
-+	outb(0xc0, base + 0x69);
- 
- 	/*
- 	 * Reset the state machines.
- 	 * NOTE: avoid accidentally enabling the disabled channels.
- 	 */
--	outb(inb(hwif->dma_master + 0x70) | 0x32, hwif->dma_master + 0x70);
--	outb(inb(hwif->dma_master + 0x74) | 0x32, hwif->dma_master + 0x74);
-+	outb(inb(base + 0x60) | 0x32, base + 0x60);
-+	outb(inb(base + 0x64) | 0x32, base + 0x64);
- 
- 	/* Complete reset */
--	outb(0x00, hwif->dma_master + 0x79);
-+	outb(0x00, base + 0x69);
- 
- 	/* Reconnect channels to bus */
--	outb(0x00, hwif->dma_master + 0x73);
--	outb(0x00, hwif->dma_master + 0x77);
-+	outb(0x00, base + 0x63);
-+	outb(0x00, base + 0x67);
- }
- 
- /**
-@@ -1210,7 +1185,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
- 	 * We also  don't like using  the DPLL because this causes glitches
- 	 * on PRST-/SRST- when the state engine gets reset...
- 	 */
--	if (chip_type >= HPT374 || info->settings[clock] == NULL) {
-+	if (chip_type >= HPT374 || info->timings->clock_table[clock] == NULL) {
- 		u16 f_low, delta = pci_clk < 50 ? 2 : 4;
- 		int adjust;
- 
-@@ -1226,7 +1201,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
- 			clock = ATA_CLOCK_50MHZ;
- 		}
- 
--		if (info->settings[clock] == NULL) {
-+		if (info->timings->clock_table[clock] == NULL) {
- 			printk(KERN_ERR "%s: unknown bus timing!\n", name);
- 			kfree(info);
- 			return -EIO;
-@@ -1267,15 +1242,10 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
- 		printk("%s: using %d MHz PCI clock\n", name, pci_clk);
- 	}
- 
--	/*
--	 * Advance the table pointer to a slot which points to the list
--	 * of the register values settings matching the clock being used.
--	 */
--	info->settings += clock;
+-#ifdef CONFIG_X86_64
+-	gva = (long long)(gva << 16) >> 16;
+-#endif
+-	return gva;
+-}
 -
- 	/* Store the clock frequencies. */
- 	info->dpll_clk	= dpll_clk;
- 	info->pci_clk	= pci_clk;
-+	info->clock	= clock;
- 
- 	/* Point to this chip's own instance of the hpt_info structure. */
- 	pci_set_drvdata(dev, info);
-@@ -1320,8 +1290,8 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
- 
- 	hwif->set_pio_mode	= &hpt3xx_set_pio_mode;
- 	hwif->set_dma_mode	= &hpt3xx_set_mode;
-+
- 	hwif->quirkproc		= &hpt3xx_quirkproc;
--	hwif->intrproc		= &hpt3xx_intrproc;
- 	hwif->maskproc		= &hpt3xx_maskproc;
- 	hwif->busproc		= &hpt3xx_busproc;
- 
-@@ -1494,6 +1464,11 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
- 	return 0;
- }
- 
-+#define IDE_HFLAGS_HPT3XX \
-+	(IDE_HFLAG_NO_ATAPI_DMA | \
-+	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
-+	 IDE_HFLAG_OFF_BOARD)
-+
- static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
- 	{	/* 0 */
- 		.name		= "HPT36x",
-@@ -1508,9 +1483,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
- 		 */
- 		.enablebits	= {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
- 		.extra		= 240,
--		.host_flags	= IDE_HFLAG_SINGLE |
--				  IDE_HFLAG_NO_ATAPI_DMA |
--				  IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 	},{	/* 1 */
-@@ -1520,7 +1493,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
- 		.init_dma	= init_dma_hpt366,
- 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- 		.extra		= 240,
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAGS_HPT3XX,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 	},{	/* 2 */
-@@ -1530,7 +1503,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
- 		.init_dma	= init_dma_hpt366,
- 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- 		.extra		= 240,
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAGS_HPT3XX,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 	},{	/* 3 */
-@@ -1540,7 +1513,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
- 		.init_dma	= init_dma_hpt366,
- 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- 		.extra		= 240,
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAGS_HPT3XX,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 	},{	/* 4 */
-@@ -1551,7 +1524,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
- 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- 		.udma_mask	= ATA_UDMA5,
- 		.extra		= 240,
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAGS_HPT3XX,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 	},{	/* 5 */
-@@ -1561,7 +1534,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
- 		.init_dma	= init_dma_hpt366,
- 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- 		.extra		= 240,
--		.host_flags	= IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAGS_HPT3XX,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 	}
-diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
-index 90b52ed..2a0f45c 100644
---- a/drivers/ide/pci/it8213.c
-+++ b/drivers/ide/pci/it8213.c
-@@ -101,24 +101,11 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 	pci_read_config_byte(dev, 0x54, &reg54);
- 	pci_read_config_byte(dev, 0x55, &reg55);
- 
--	switch(speed) {
--		case XFER_UDMA_6:
--		case XFER_UDMA_4:
--		case XFER_UDMA_2:	u_speed = 2 << (drive->dn * 4); break;
--		case XFER_UDMA_5:
--		case XFER_UDMA_3:
--		case XFER_UDMA_1:	u_speed = 1 << (drive->dn * 4); break;
--		case XFER_UDMA_0:	u_speed = 0 << (drive->dn * 4); break;
--			break;
--		case XFER_MW_DMA_2:
--		case XFER_MW_DMA_1:
--		case XFER_SW_DMA_2:
--			break;
--		default:
--			return;
+-static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
+-				gva_t va, int level)
+-{
+-	u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
+-	int i;
+-	gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
+-
+-	for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
+-		u64 ent = pt[i];
+-
+-		if (!(ent & PT_PRESENT_MASK))
+-			continue;
+-
+-		va = canonicalize(va);
+-		if (level > 1)
+-			audit_mappings_page(vcpu, ent, va, level - 1);
+-		else {
+-			gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
+-			hpa_t hpa = gpa_to_hpa(vcpu, gpa);
+-
+-			if ((ent & PT_PRESENT_MASK)
+-			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
+-				printk(KERN_ERR "audit error: (%s) levels %d"
+-				       " gva %lx gpa %llx hpa %llx ent %llx\n",
+-				       audit_msg, vcpu->mmu.root_level,
+-				       va, gpa, hpa, ent);
+-		}
 -	}
+-}
 -
- 	if (speed >= XFER_UDMA_0) {
-+		u8 udma = speed - XFER_UDMA_0;
-+
-+		u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
-+
- 		if (!(reg48 & u_flag))
- 			pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- 		if (speed >= XFER_UDMA_5) {
-diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
-index 99b7d76..e610a53 100644
---- a/drivers/ide/pci/it821x.c
-+++ b/drivers/ide/pci/it821x.c
-@@ -431,33 +431,29 @@ static u8 __devinit ata66_it821x(ide_hwif_t *hwif)
- }
- 
- /**
-- *	it821x_fixup	-	post init callback
-- *	@hwif: interface
-+ *	it821x_quirkproc	-	post init callback
-+ *	@drive: drive
-  *
-- *	This callback is run after the drives have been probed but
-+ *	This callback is run after the drive has been probed but
-  *	before anything gets attached. It allows drivers to do any
-  *	final tuning that is needed, or fixups to work around bugs.
-  */
- 
--static void __devinit it821x_fixups(ide_hwif_t *hwif)
-+static void __devinit it821x_quirkproc(ide_drive_t *drive)
- {
--	struct it821x_dev *itdev = ide_get_hwifdata(hwif);
--	int i;
-+	struct it821x_dev *itdev = ide_get_hwifdata(drive->hwif);
-+	struct hd_driveid *id = drive->id;
-+	u16 *idbits = (u16 *)drive->id;
- 
--	if(!itdev->smart) {
-+	if (!itdev->smart) {
- 		/*
- 		 *	If we are in pass through mode then not much
- 		 *	needs to be done, but we do bother to clear the
- 		 *	IRQ mask as we may well be in PIO (eg rev 0x10)
- 		 *	for now and we know unmasking is safe on this chipset.
- 		 */
--		for (i = 0; i < 2; i++) {
--			ide_drive_t *drive = &hwif->drives[i];
--			if(drive->present)
--				drive->unmask = 1;
+-static void audit_mappings(struct kvm_vcpu *vcpu)
+-{
+-	unsigned i;
+-
+-	if (vcpu->mmu.root_level == 4)
+-		audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
+-	else
+-		for (i = 0; i < 4; ++i)
+-			if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
+-				audit_mappings_page(vcpu,
+-						    vcpu->mmu.pae_root[i],
+-						    i << 30,
+-						    2);
+-}
+-
+-static int count_rmaps(struct kvm_vcpu *vcpu)
+-{
+-	int nmaps = 0;
+-	int i, j, k;
+-
+-	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
+-		struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
+-		struct kvm_rmap_desc *d;
+-
+-		for (j = 0; j < m->npages; ++j) {
+-			struct page *page = m->phys_mem[j];
+-
+-			if (!page->private)
+-				continue;
+-			if (!(page->private & 1)) {
+-				++nmaps;
+-				continue;
+-			}
+-			d = (struct kvm_rmap_desc *)(page->private & ~1ul);
+-			while (d) {
+-				for (k = 0; k < RMAP_EXT; ++k)
+-					if (d->shadow_ptes[k])
+-						++nmaps;
+-					else
+-						break;
+-				d = d->more;
+-			}
 -		}
--		return;
 -	}
-+		drive->unmask = 1;
-+	} else {
- 	/*
- 	 *	Perform fixups on smart mode. We need to "lose" some
- 	 *	capabilities the firmware lacks but does not filter, and
-@@ -465,16 +461,6 @@ static void __devinit it821x_fixups(ide_hwif_t *hwif)
- 	 *	in RAID mode.
- 	 */
- 
--	for(i = 0; i < 2; i++) {
--		ide_drive_t *drive = &hwif->drives[i];
--		struct hd_driveid *id;
--		u16 *idbits;
+-	return nmaps;
+-}
 -
--		if(!drive->present)
+-static int count_writable_mappings(struct kvm_vcpu *vcpu)
+-{
+-	int nmaps = 0;
+-	struct kvm_mmu_page *page;
+-	int i;
+-
+-	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+-		u64 *pt = page->spt;
+-
+-		if (page->role.level != PT_PAGE_TABLE_LEVEL)
 -			continue;
--		id = drive->id;
--		idbits = (u16 *)drive->id;
 -
- 		/* Check for RAID v native */
- 		if(strstr(id->model, "Integrated Technology Express")) {
- 			/* In raid mode the ident block is slightly buggy
-@@ -537,6 +523,8 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
- 	struct it821x_dev *idev = kzalloc(sizeof(struct it821x_dev), GFP_KERNEL);
- 	u8 conf;
- 
-+	hwif->quirkproc = &it821x_quirkproc;
-+
- 	if (idev == NULL) {
- 		printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n");
- 		return;
-@@ -633,7 +621,6 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha
- 		.name		= name_str,		\
- 		.init_chipset	= init_chipset_it821x,	\
- 		.init_hwif	= init_hwif_it821x,	\
--		.fixup	 	= it821x_fixups,	\
- 		.host_flags	= IDE_HFLAG_BOOTABLE,	\
- 		.pio_mask	= ATA_PIO4,		\
- 	}
-diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
-index 2b4f44e..89d2363 100644
---- a/drivers/ide/pci/pdc202xx_new.c
-+++ b/drivers/ide/pci/pdc202xx_new.c
-@@ -146,7 +146,7 @@ static struct udma_timing {
- 	{ 0x1a, 0x01, 0xcb },	/* UDMA mode 6 */
- };
- 
--static void pdcnew_set_mode(ide_drive_t *drive, const u8 speed)
-+static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed)
- {
- 	ide_hwif_t *hwif	= HWIF(drive);
- 	u8 adj			= (drive->dn & 1) ? 0x08 : 0x00;
-@@ -162,45 +162,18 @@ static void pdcnew_set_mode(ide_drive_t *drive, const u8 speed)
- 	if (max_dma_rate(hwif->pci_dev) == 4) {
- 		u8 mode = speed & 0x07;
- 
--		switch (speed) {
--			case XFER_UDMA_6:
--			case XFER_UDMA_5:
--			case XFER_UDMA_4:
--			case XFER_UDMA_3:
--			case XFER_UDMA_2:
--			case XFER_UDMA_1:
--			case XFER_UDMA_0:
--				set_indexed_reg(hwif, 0x10 + adj,
--						udma_timings[mode].reg10);
--				set_indexed_reg(hwif, 0x11 + adj,
--						udma_timings[mode].reg11);
--				set_indexed_reg(hwif, 0x12 + adj,
--						udma_timings[mode].reg12);
--				break;
+-		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+-			u64 ent = pt[i];
 -
--			case XFER_MW_DMA_2:
--			case XFER_MW_DMA_1:
--			case XFER_MW_DMA_0:
--				set_indexed_reg(hwif, 0x0e + adj,
--						mwdma_timings[mode].reg0e);
--				set_indexed_reg(hwif, 0x0f + adj,
--						mwdma_timings[mode].reg0f);
--				break;
--			case XFER_PIO_4:
--			case XFER_PIO_3:
--			case XFER_PIO_2:
--			case XFER_PIO_1:
--			case XFER_PIO_0:
--				set_indexed_reg(hwif, 0x0c + adj,
--						pio_timings[mode].reg0c);
--				set_indexed_reg(hwif, 0x0d + adj,
--						pio_timings[mode].reg0d);
--				set_indexed_reg(hwif, 0x13 + adj,
--						pio_timings[mode].reg13);
--				break;
--			default:
--				printk(KERN_ERR "pdc202xx_new: "
--				       "Unknown speed %d ignored\n", speed);
-+		if (speed >= XFER_UDMA_0) {
-+			set_indexed_reg(hwif, 0x10 + adj,
-+					udma_timings[mode].reg10);
-+			set_indexed_reg(hwif, 0x11 + adj,
-+					udma_timings[mode].reg11);
-+			set_indexed_reg(hwif, 0x12 + adj,
-+					udma_timings[mode].reg12);
-+		} else {
-+			set_indexed_reg(hwif, 0x0e + adj,
-+					mwdma_timings[mode].reg0e);
-+			set_indexed_reg(hwif, 0x0f + adj,
-+					mwdma_timings[mode].reg0f);
- 		}
- 	} else if (speed == XFER_UDMA_2) {
- 		/* Set tHOLD bit to 0 if using UDMA mode 2 */
-@@ -212,7 +185,14 @@ static void pdcnew_set_mode(ide_drive_t *drive, const u8 speed)
- 
- static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio)
- {
--	pdcnew_set_mode(drive, XFER_PIO_0 + pio);
-+	ide_hwif_t *hwif = drive->hwif;
-+	u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
-+
-+	if (max_dma_rate(hwif->pci_dev) == 4) {
-+		set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
-+		set_indexed_reg(hwif, 0x0d + adj, pio_timings[pio].reg0d);
-+		set_indexed_reg(hwif, 0x13 + adj, pio_timings[pio].reg13);
-+	}
- }
- 
- static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
-@@ -223,14 +203,17 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
- 		return ATA_CBL_PATA80;
- }
- 
--static int pdcnew_quirkproc(ide_drive_t *drive)
-+static void pdcnew_quirkproc(ide_drive_t *drive)
- {
- 	const char **list, *model = drive->id->model;
- 
- 	for (list = pdc_quirk_drives; *list != NULL; list++)
--		if (strstr(model, *list) != NULL)
--			return 2;
--	return 0;
-+		if (strstr(model, *list) != NULL) {
-+			drive->quirk_list = 2;
-+			return;
-+		}
-+
-+	drive->quirk_list = 0;
- }
- 
- static void pdcnew_reset(ide_drive_t *drive)
-@@ -466,7 +449,7 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
- static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
- {
- 	hwif->set_pio_mode = &pdcnew_set_pio_mode;
--	hwif->set_dma_mode = &pdcnew_set_mode;
-+	hwif->set_dma_mode = &pdcnew_set_dma_mode;
- 
- 	hwif->quirkproc = &pdcnew_quirkproc;
- 	hwif->resetproc = &pdcnew_reset;
-diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
-index e09742e..3a1e081 100644
---- a/drivers/ide/pci/pdc202xx_old.c
-+++ b/drivers/ide/pci/pdc202xx_old.c
-@@ -162,7 +162,7 @@ static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif)
-  */
- static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif)
- {
--	unsigned long clock_reg = hwif->dma_master + 0x11;
-+	unsigned long clock_reg = hwif->extra_base + 0x01;
- 	u8 clock = inb(clock_reg);
- 
- 	outb(clock | (hwif->channel ? 0x08 : 0x02), clock_reg);
-@@ -170,20 +170,23 @@ static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif)
- 
- static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
- {
--	unsigned long clock_reg = hwif->dma_master + 0x11;
-+	unsigned long clock_reg = hwif->extra_base + 0x01;
- 	u8 clock = inb(clock_reg);
- 
- 	outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
- }
- 
--static int pdc202xx_quirkproc (ide_drive_t *drive)
-+static void pdc202xx_quirkproc(ide_drive_t *drive)
- {
- 	const char **list, *model = drive->id->model;
- 
- 	for (list = pdc_quirk_drives; *list != NULL; list++)
--		if (strstr(model, *list) != NULL)
--			return 2;
--	return 0;
-+		if (strstr(model, *list) != NULL) {
-+			drive->quirk_list = 2;
-+			return;
-+		}
-+
-+	drive->quirk_list = 0;
- }
- 
- static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
-@@ -193,7 +196,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
- 	if (drive->media != ide_disk || drive->addressing == 1) {
- 		struct request *rq	= HWGROUP(drive)->rq;
- 		ide_hwif_t *hwif	= HWIF(drive);
--		unsigned long high_16   = hwif->dma_master;
-+		unsigned long high_16	= hwif->extra_base - 16;
- 		unsigned long atapi_reg	= high_16 + (hwif->channel ? 0x24 : 0x20);
- 		u32 word_count	= 0;
- 		u8 clock = inb(high_16 + 0x11);
-@@ -212,7 +215,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
- {
- 	if (drive->media != ide_disk || drive->addressing == 1) {
- 		ide_hwif_t *hwif	= HWIF(drive);
--		unsigned long high_16	= hwif->dma_master;
-+		unsigned long high_16	= hwif->extra_base - 16;
- 		unsigned long atapi_reg	= high_16 + (hwif->channel ? 0x24 : 0x20);
- 		u8 clock		= 0;
- 
-@@ -228,7 +231,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
- static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif	= HWIF(drive);
--	unsigned long high_16	= hwif->dma_master;
-+	unsigned long high_16	= hwif->extra_base - 16;
- 	u8 dma_stat		= inb(hwif->dma_status);
- 	u8 sc1d			= inb(high_16 + 0x001d);
- 
-@@ -271,7 +274,7 @@ static void pdc202xx_dma_timeout(ide_drive_t *drive)
- 
- static void pdc202xx_reset_host (ide_hwif_t *hwif)
- {
--	unsigned long high_16	= hwif->dma_master;
-+	unsigned long high_16	= hwif->extra_base - 16;
- 	u8 udma_speed_flag	= inb(high_16 | 0x001f);
- 
- 	outb(udma_speed_flag | 0x10, high_16 | 0x001f);
-@@ -375,6 +378,11 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
- 	}
- }
- 
-+#define IDE_HFLAGS_PDC202XX \
-+	(IDE_HFLAG_ERROR_STOPS_FIFO | \
-+	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
-+	 IDE_HFLAG_OFF_BOARD)
-+
- #define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \
- 	{ \
- 		.name		= name_str, \
-@@ -382,9 +390,7 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
- 		.init_hwif	= init_hwif_pdc202xx, \
- 		.init_dma	= init_dma_pdc202xx, \
- 		.extra		= 48, \
--		.host_flags	= IDE_HFLAG_ERROR_STOPS_FIFO | \
--				  extra_flags | \
--				  IDE_HFLAG_OFF_BOARD, \
-+		.host_flags	= IDE_HFLAGS_PDC202XX | extra_flags, \
- 		.pio_mask	= ATA_PIO4, \
- 		.mwdma_mask	= ATA_MWDMA2, \
- 		.udma_mask	= udma, \
-@@ -397,8 +403,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
- 		.init_hwif	= init_hwif_pdc202xx,
- 		.init_dma	= init_dma_pdc202xx,
- 		.extra		= 16,
--		.host_flags	= IDE_HFLAG_ERROR_STOPS_FIFO |
--				  IDE_HFLAG_OFF_BOARD,
-+		.host_flags	= IDE_HFLAGS_PDC202XX,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= ATA_UDMA2,
-diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
-index 27781d2..bd6d3f7 100644
---- a/drivers/ide/pci/piix.c
-+++ b/drivers/ide/pci/piix.c
-@@ -203,20 +203,11 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 	pci_read_config_byte(dev, 0x54, &reg54);
- 	pci_read_config_byte(dev, 0x55, &reg55);
- 
--	switch(speed) {
--		case XFER_UDMA_4:
--		case XFER_UDMA_2:	u_speed = 2 << (drive->dn * 4); break;
--		case XFER_UDMA_5:
--		case XFER_UDMA_3:
--		case XFER_UDMA_1:	u_speed = 1 << (drive->dn * 4); break;
--		case XFER_UDMA_0:	u_speed = 0 << (drive->dn * 4); break;
--		case XFER_MW_DMA_2:
--		case XFER_MW_DMA_1:
--		case XFER_SW_DMA_2:	break;
--		default:		return;
+-			if (!(ent & PT_PRESENT_MASK))
+-				continue;
+-			if (!(ent & PT_WRITABLE_MASK))
+-				continue;
+-			++nmaps;
+-		}
 -	}
+-	return nmaps;
+-}
 -
- 	if (speed >= XFER_UDMA_0) {
-+		u8 udma = speed - XFER_UDMA_0;
-+
-+		u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
-+
- 		if (!(reg48 & u_flag))
- 			pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- 		if (speed == XFER_UDMA_5) {
-diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
-index 707d5ff..32fdf53 100644
---- a/drivers/ide/pci/sc1200.c
-+++ b/drivers/ide/pci/sc1200.c
-@@ -135,59 +135,29 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
- 	unsigned short		pci_clock;
- 	unsigned int		basereg = hwif->channel ? 0x50 : 0x40;
- 
-+	static const u32 udma_timing[3][3] = {
-+		{ 0x00921250, 0x00911140, 0x00911030 },
-+		{ 0x00932470, 0x00922260, 0x00922140 },
-+		{ 0x009436a1, 0x00933481, 0x00923261 },
-+	};
-+
-+	static const u32 mwdma_timing[3][3] = {
-+		{ 0x00077771, 0x00012121, 0x00002020 },
-+		{ 0x000bbbb2, 0x00024241, 0x00013131 },
-+		{ 0x000ffff3, 0x00035352, 0x00015151 },
-+	};
-+
- 	pci_clock = sc1200_get_pci_clock();
- 
- 	/*
- 	 * Note that each DMA mode has several timings associated with it.
- 	 * The correct timing depends on the fast PCI clock freq.
- 	 */
--	timings = 0;
--	switch (mode) {
--		case XFER_UDMA_0:
--			switch (pci_clock) {
--				case PCI_CLK_33:	timings = 0x00921250;	break;
--				case PCI_CLK_48:	timings = 0x00932470;	break;
--				case PCI_CLK_66:	timings = 0x009436a1;	break;
--			}
--			break;
--		case XFER_UDMA_1:
--			switch (pci_clock) {
--				case PCI_CLK_33:	timings = 0x00911140;	break;
--				case PCI_CLK_48:	timings = 0x00922260;	break;
--				case PCI_CLK_66:	timings = 0x00933481;	break;
--			}
--			break;
--		case XFER_UDMA_2:
--			switch (pci_clock) {
--				case PCI_CLK_33:	timings = 0x00911030;	break;
--				case PCI_CLK_48:	timings = 0x00922140;	break;
--				case PCI_CLK_66:	timings = 0x00923261;	break;
--			}
--			break;
--		case XFER_MW_DMA_0:
--			switch (pci_clock) {
--				case PCI_CLK_33:	timings = 0x00077771;	break;
--				case PCI_CLK_48:	timings = 0x000bbbb2;	break;
--				case PCI_CLK_66:	timings = 0x000ffff3;	break;
--			}
+-static void audit_rmap(struct kvm_vcpu *vcpu)
+-{
+-	int n_rmap = count_rmaps(vcpu);
+-	int n_actual = count_writable_mappings(vcpu);
+-
+-	if (n_rmap != n_actual)
+-		printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
+-		       __FUNCTION__, audit_msg, n_rmap, n_actual);
+-}
+-
+-static void audit_write_protection(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_mmu_page *page;
+-
+-	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+-		hfn_t hfn;
+-		struct page *pg;
+-
+-		if (page->role.metaphysical)
+-			continue;
+-
+-		hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
+-			>> PAGE_SHIFT;
+-		pg = pfn_to_page(hfn);
+-		if (pg->private)
+-			printk(KERN_ERR "%s: (%s) shadow page has writable"
+-			       " mappings: gfn %lx role %x\n",
+-			       __FUNCTION__, audit_msg, page->gfn,
+-			       page->role.word);
+-	}
+-}
+-
+-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
+-{
+-	int olddbg = dbg;
+-
+-	dbg = 0;
+-	audit_msg = msg;
+-	audit_rmap(vcpu);
+-	audit_write_protection(vcpu);
+-	audit_mappings(vcpu);
+-	dbg = olddbg;
+-}
+-
+-#endif
+diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
+deleted file mode 100644
+index 6b094b4..0000000
+--- a/drivers/kvm/paging_tmpl.h
++++ /dev/null
+@@ -1,511 +0,0 @@
+-/*
+- * Kernel-based Virtual Machine driver for Linux
+- *
+- * This module enables machines with Intel VT-x extensions to run virtual
+- * machines without emulation or binary translation.
+- *
+- * MMU support
+- *
+- * Copyright (C) 2006 Qumranet, Inc.
+- *
+- * Authors:
+- *   Yaniv Kamay  <yaniv at qumranet.com>
+- *   Avi Kivity   <avi at qumranet.com>
+- *
+- * This work is licensed under the terms of the GNU GPL, version 2.  See
+- * the COPYING file in the top-level directory.
+- *
+- */
+-
+-/*
+- * We need the mmu code to access both 32-bit and 64-bit guest ptes,
+- * so the code in this file is compiled twice, once per pte size.
+- */
+-
+-#if PTTYPE == 64
+-	#define pt_element_t u64
+-	#define guest_walker guest_walker64
+-	#define FNAME(name) paging##64_##name
+-	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+-	#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
+-	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
+-	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+-	#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
+-	#ifdef CONFIG_X86_64
+-	#define PT_MAX_FULL_LEVELS 4
+-	#else
+-	#define PT_MAX_FULL_LEVELS 2
+-	#endif
+-#elif PTTYPE == 32
+-	#define pt_element_t u32
+-	#define guest_walker guest_walker32
+-	#define FNAME(name) paging##32_##name
+-	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
+-	#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
+-	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
+-	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+-	#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
+-	#define PT_MAX_FULL_LEVELS 2
+-#else
+-	#error Invalid PTTYPE value
+-#endif
+-
+-/*
+- * The guest_walker structure emulates the behavior of the hardware page
+- * table walker.
+- */
+-struct guest_walker {
+-	int level;
+-	gfn_t table_gfn[PT_MAX_FULL_LEVELS];
+-	pt_element_t *table;
+-	pt_element_t pte;
+-	pt_element_t *ptep;
+-	struct page *page;
+-	int index;
+-	pt_element_t inherited_ar;
+-	gfn_t gfn;
+-	u32 error_code;
+-};
+-
+-/*
+- * Fetch a guest pte for a guest virtual address
+- */
+-static int FNAME(walk_addr)(struct guest_walker *walker,
+-			    struct kvm_vcpu *vcpu, gva_t addr,
+-			    int write_fault, int user_fault, int fetch_fault)
+-{
+-	hpa_t hpa;
+-	struct kvm_memory_slot *slot;
+-	pt_element_t *ptep;
+-	pt_element_t root;
+-	gfn_t table_gfn;
+-
+-	pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
+-	walker->level = vcpu->mmu.root_level;
+-	walker->table = NULL;
+-	walker->page = NULL;
+-	walker->ptep = NULL;
+-	root = vcpu->cr3;
+-#if PTTYPE == 64
+-	if (!is_long_mode(vcpu)) {
+-		walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
+-		root = *walker->ptep;
+-		walker->pte = root;
+-		if (!(root & PT_PRESENT_MASK))
+-			goto not_present;
+-		--walker->level;
+-	}
+-#endif
+-	table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+-	walker->table_gfn[walker->level - 1] = table_gfn;
+-	pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
+-		 walker->level - 1, table_gfn);
+-	slot = gfn_to_memslot(vcpu->kvm, table_gfn);
+-	hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
+-	walker->page = pfn_to_page(hpa >> PAGE_SHIFT);
+-	walker->table = kmap_atomic(walker->page, KM_USER0);
+-
+-	ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
+-	       (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
+-
+-	walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
+-
+-	for (;;) {
+-		int index = PT_INDEX(addr, walker->level);
+-		hpa_t paddr;
+-
+-		ptep = &walker->table[index];
+-		walker->index = index;
+-		ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
+-		       ((unsigned long)ptep & PAGE_MASK));
+-
+-		if (!is_present_pte(*ptep))
+-			goto not_present;
+-
+-		if (write_fault && !is_writeble_pte(*ptep))
+-			if (user_fault || is_write_protection(vcpu))
+-				goto access_error;
+-
+-		if (user_fault && !(*ptep & PT_USER_MASK))
+-			goto access_error;
+-
+-#if PTTYPE == 64
+-		if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
+-			goto access_error;
+-#endif
+-
+-		if (!(*ptep & PT_ACCESSED_MASK)) {
+-			mark_page_dirty(vcpu->kvm, table_gfn);
+-			*ptep |= PT_ACCESSED_MASK;
+-		}
+-
+-		if (walker->level == PT_PAGE_TABLE_LEVEL) {
+-			walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
+-				>> PAGE_SHIFT;
 -			break;
--		case XFER_MW_DMA_1:
--			switch (pci_clock) {
--				case PCI_CLK_33:	timings = 0x00012121;	break;
--				case PCI_CLK_48:	timings = 0x00024241;	break;
--				case PCI_CLK_66:	timings = 0x00035352;	break;
--			}
+-		}
+-
+-		if (walker->level == PT_DIRECTORY_LEVEL
+-		    && (*ptep & PT_PAGE_SIZE_MASK)
+-		    && (PTTYPE == 64 || is_pse(vcpu))) {
+-			walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
+-				>> PAGE_SHIFT;
+-			walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
 -			break;
--		case XFER_MW_DMA_2:
--			switch (pci_clock) {
--				case PCI_CLK_33:	timings = 0x00002020;	break;
--				case PCI_CLK_48:	timings = 0x00013131;	break;
--				case PCI_CLK_66:	timings = 0x00015151;	break;
+-		}
+-
+-		walker->inherited_ar &= walker->table[index];
+-		table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
+-		kunmap_atomic(walker->table, KM_USER0);
+-		paddr = safe_gpa_to_hpa(vcpu, table_gfn << PAGE_SHIFT);
+-		walker->page = pfn_to_page(paddr >> PAGE_SHIFT);
+-		walker->table = kmap_atomic(walker->page, KM_USER0);
+-		--walker->level;
+-		walker->table_gfn[walker->level - 1 ] = table_gfn;
+-		pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
+-			 walker->level - 1, table_gfn);
+-	}
+-	walker->pte = *ptep;
+-	if (walker->page)
+-		walker->ptep = NULL;
+-	if (walker->table)
+-		kunmap_atomic(walker->table, KM_USER0);
+-	pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
+-	return 1;
+-
+-not_present:
+-	walker->error_code = 0;
+-	goto err;
+-
+-access_error:
+-	walker->error_code = PFERR_PRESENT_MASK;
+-
+-err:
+-	if (write_fault)
+-		walker->error_code |= PFERR_WRITE_MASK;
+-	if (user_fault)
+-		walker->error_code |= PFERR_USER_MASK;
+-	if (fetch_fault)
+-		walker->error_code |= PFERR_FETCH_MASK;
+-	if (walker->table)
+-		kunmap_atomic(walker->table, KM_USER0);
+-	return 0;
+-}
+-
+-static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
+-					struct guest_walker *walker)
+-{
+-	mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
+-}
+-
+-static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
+-				  u64 *shadow_pte,
+-				  gpa_t gaddr,
+-				  pt_element_t gpte,
+-				  u64 access_bits,
+-				  int user_fault,
+-				  int write_fault,
+-				  int *ptwrite,
+-				  struct guest_walker *walker,
+-				  gfn_t gfn)
+-{
+-	hpa_t paddr;
+-	int dirty = gpte & PT_DIRTY_MASK;
+-	u64 spte = *shadow_pte;
+-	int was_rmapped = is_rmap_pte(spte);
+-
+-	pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
+-		 " user_fault %d gfn %lx\n",
+-		 __FUNCTION__, spte, (u64)gpte, access_bits,
+-		 write_fault, user_fault, gfn);
+-
+-	if (write_fault && !dirty) {
+-		pt_element_t *guest_ent, *tmp = NULL;
+-
+-		if (walker->ptep)
+-			guest_ent = walker->ptep;
+-		else {
+-			tmp = kmap_atomic(walker->page, KM_USER0);
+-			guest_ent = &tmp[walker->index];
+-		}
+-
+-		*guest_ent |= PT_DIRTY_MASK;
+-		if (!walker->ptep)
+-			kunmap_atomic(tmp, KM_USER0);
+-		dirty = 1;
+-		FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
+-	}
+-
+-	spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
+-	spte |= gpte & PT64_NX_MASK;
+-	if (!dirty)
+-		access_bits &= ~PT_WRITABLE_MASK;
+-
+-	paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
+-
+-	spte |= PT_PRESENT_MASK;
+-	if (access_bits & PT_USER_MASK)
+-		spte |= PT_USER_MASK;
+-
+-	if (is_error_hpa(paddr)) {
+-		spte |= gaddr;
+-		spte |= PT_SHADOW_IO_MARK;
+-		spte &= ~PT_PRESENT_MASK;
+-		set_shadow_pte(shadow_pte, spte);
+-		return;
+-	}
+-
+-	spte |= paddr;
+-
+-	if ((access_bits & PT_WRITABLE_MASK)
+-	    || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
+-		struct kvm_mmu_page *shadow;
+-
+-		spte |= PT_WRITABLE_MASK;
+-		if (user_fault) {
+-			mmu_unshadow(vcpu, gfn);
+-			goto unshadowed;
+-		}
+-
+-		shadow = kvm_mmu_lookup_page(vcpu, gfn);
+-		if (shadow) {
+-			pgprintk("%s: found shadow page for %lx, marking ro\n",
+-				 __FUNCTION__, gfn);
+-			access_bits &= ~PT_WRITABLE_MASK;
+-			if (is_writeble_pte(spte)) {
+-				spte &= ~PT_WRITABLE_MASK;
+-				kvm_x86_ops->tlb_flush(vcpu);
 -			}
+-			if (write_fault)
+-				*ptwrite = 1;
+-		}
+-	}
+-
+-unshadowed:
+-
+-	if (access_bits & PT_WRITABLE_MASK)
+-		mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
+-
+-	set_shadow_pte(shadow_pte, spte);
+-	page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
+-	if (!was_rmapped)
+-		rmap_add(vcpu, shadow_pte);
+-}
+-
+-static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
+-			   u64 *shadow_pte, u64 access_bits,
+-			   int user_fault, int write_fault, int *ptwrite,
+-			   struct guest_walker *walker, gfn_t gfn)
+-{
+-	access_bits &= gpte;
+-	FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
+-			      gpte, access_bits, user_fault, write_fault,
+-			      ptwrite, walker, gfn);
+-}
+-
+-static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
+-			      u64 *spte, const void *pte, int bytes)
+-{
+-	pt_element_t gpte;
+-
+-	if (bytes < sizeof(pt_element_t))
+-		return;
+-	gpte = *(const pt_element_t *)pte;
+-	if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
+-		return;
+-	pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
+-	FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
+-		       0, NULL, NULL,
+-		       (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
+-}
+-
+-static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
+-			   u64 *shadow_pte, u64 access_bits,
+-			   int user_fault, int write_fault, int *ptwrite,
+-			   struct guest_walker *walker, gfn_t gfn)
+-{
+-	gpa_t gaddr;
+-
+-	access_bits &= gpde;
+-	gaddr = (gpa_t)gfn << PAGE_SHIFT;
+-	if (PTTYPE == 32 && is_cpuid_PSE36())
+-		gaddr |= (gpde & PT32_DIR_PSE36_MASK) <<
+-			(32 - PT32_DIR_PSE36_SHIFT);
+-	FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
+-			      gpde, access_bits, user_fault, write_fault,
+-			      ptwrite, walker, gfn);
+-}
+-
+-/*
+- * Fetch a shadow pte for a specific level in the paging hierarchy.
+- */
+-static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+-			 struct guest_walker *walker,
+-			 int user_fault, int write_fault, int *ptwrite)
+-{
+-	hpa_t shadow_addr;
+-	int level;
+-	u64 *shadow_ent;
+-	u64 *prev_shadow_ent = NULL;
+-
+-	if (!is_present_pte(walker->pte))
+-		return NULL;
+-
+-	shadow_addr = vcpu->mmu.root_hpa;
+-	level = vcpu->mmu.shadow_root_level;
+-	if (level == PT32E_ROOT_LEVEL) {
+-		shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
+-		shadow_addr &= PT64_BASE_ADDR_MASK;
+-		--level;
+-	}
+-
+-	for (; ; level--) {
+-		u32 index = SHADOW_PT_INDEX(addr, level);
+-		struct kvm_mmu_page *shadow_page;
+-		u64 shadow_pte;
+-		int metaphysical;
+-		gfn_t table_gfn;
+-		unsigned hugepage_access = 0;
+-
+-		shadow_ent = ((u64 *)__va(shadow_addr)) + index;
+-		if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
+-			if (level == PT_PAGE_TABLE_LEVEL)
+-				break;
+-			shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
+-			prev_shadow_ent = shadow_ent;
+-			continue;
+-		}
+-
+-		if (level == PT_PAGE_TABLE_LEVEL)
 -			break;
--		default:
--			return;
+-
+-		if (level - 1 == PT_PAGE_TABLE_LEVEL
+-		    && walker->level == PT_DIRECTORY_LEVEL) {
+-			metaphysical = 1;
+-			hugepage_access = walker->pte;
+-			hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
+-			if (walker->pte & PT64_NX_MASK)
+-				hugepage_access |= (1 << 2);
+-			hugepage_access >>= PT_WRITABLE_SHIFT;
+-			table_gfn = (walker->pte & PT_BASE_ADDR_MASK)
+-				>> PAGE_SHIFT;
+-		} else {
+-			metaphysical = 0;
+-			table_gfn = walker->table_gfn[level - 2];
+-		}
+-		shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
+-					       metaphysical, hugepage_access,
+-					       shadow_ent);
+-		shadow_addr = __pa(shadow_page->spt);
+-		shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
+-			| PT_WRITABLE_MASK | PT_USER_MASK;
+-		*shadow_ent = shadow_pte;
+-		prev_shadow_ent = shadow_ent;
+-	}
+-
+-	if (walker->level == PT_DIRECTORY_LEVEL) {
+-		FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
+-			       walker->inherited_ar, user_fault, write_fault,
+-			       ptwrite, walker, walker->gfn);
+-	} else {
+-		ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
+-		FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
+-			       walker->inherited_ar, user_fault, write_fault,
+-			       ptwrite, walker, walker->gfn);
+-	}
+-	return shadow_ent;
+-}
+-
+-/*
+- * Page fault handler.  There are several causes for a page fault:
+- *   - there is no shadow pte for the guest pte
+- *   - write access through a shadow pte marked read only so that we can set
+- *     the dirty bit
+- *   - write access to a shadow pte marked read only so we can update the page
+- *     dirty bitmap, when userspace requests it
+- *   - mmio access; in this case we will never install a present shadow pte
+- *   - normal guest page fault due to the guest pte marked not present, not
+- *     writable, or not executable
+- *
+- *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
+- *           a negative value on error.
+- */
+-static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
+-			       u32 error_code)
+-{
+-	int write_fault = error_code & PFERR_WRITE_MASK;
+-	int user_fault = error_code & PFERR_USER_MASK;
+-	int fetch_fault = error_code & PFERR_FETCH_MASK;
+-	struct guest_walker walker;
+-	u64 *shadow_pte;
+-	int write_pt = 0;
+-	int r;
+-
+-	pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
+-	kvm_mmu_audit(vcpu, "pre page fault");
+-
+-	r = mmu_topup_memory_caches(vcpu);
+-	if (r)
+-		return r;
+-
+-	/*
+-	 * Look up the shadow pte for the faulting address.
+-	 */
+-	r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
+-			     fetch_fault);
+-
+-	/*
+-	 * The page is not mapped by the guest.  Let the guest handle it.
+-	 */
+-	if (!r) {
+-		pgprintk("%s: guest page fault\n", __FUNCTION__);
+-		inject_page_fault(vcpu, addr, walker.error_code);
+-		vcpu->last_pt_write_count = 0; /* reset fork detector */
+-		return 0;
+-	}
+-
+-	shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+-				  &write_pt);
+-	pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
+-		 shadow_pte, *shadow_pte, write_pt);
+-
+-	if (!write_pt)
+-		vcpu->last_pt_write_count = 0; /* reset fork detector */
+-
+-	/*
+-	 * mmio: emulate if accessible, otherwise its a guest fault.
+-	 */
+-	if (is_io_pte(*shadow_pte))
+-		return 1;
+-
+-	++vcpu->stat.pf_fixed;
+-	kvm_mmu_audit(vcpu, "post page fault (fixed)");
+-
+-	return write_pt;
+-}
+-
+-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
+-{
+-	struct guest_walker walker;
+-	gpa_t gpa = UNMAPPED_GVA;
+-	int r;
+-
+-	r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
+-
+-	if (r) {
+-		gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
+-		gpa |= vaddr & ~PAGE_MASK;
+-	}
+-
+-	return gpa;
+-}
+-
+-#undef pt_element_t
+-#undef guest_walker
+-#undef FNAME
+-#undef PT_BASE_ADDR_MASK
+-#undef PT_INDEX
+-#undef SHADOW_PT_INDEX
+-#undef PT_LEVEL_MASK
+-#undef PT_DIR_BASE_ADDR_MASK
+-#undef PT_MAX_FULL_LEVELS
+diff --git a/drivers/kvm/segment_descriptor.h b/drivers/kvm/segment_descriptor.h
+deleted file mode 100644
+index 71fdf45..0000000
+--- a/drivers/kvm/segment_descriptor.h
++++ /dev/null
+@@ -1,17 +0,0 @@
+-struct segment_descriptor {
+-	u16 limit_low;
+-	u16 base_low;
+-	u8  base_mid;
+-	u8  type : 4;
+-	u8  system : 1;
+-	u8  dpl : 2;
+-	u8  present : 1;
+-	u8  limit_high : 4;
+-	u8  avl : 1;
+-	u8  long_mode : 1;
+-	u8  default_op : 1;
+-	u8  granularity : 1;
+-	u8  base_high;
+-} __attribute__((packed));
+-
+-
+diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
+deleted file mode 100644
+index 4e04e49..0000000
+--- a/drivers/kvm/svm.c
++++ /dev/null
+@@ -1,1754 +0,0 @@
+-/*
+- * Kernel-based Virtual Machine driver for Linux
+- *
+- * AMD SVM support
+- *
+- * Copyright (C) 2006 Qumranet, Inc.
+- *
+- * Authors:
+- *   Yaniv Kamay  <yaniv at qumranet.com>
+- *   Avi Kivity   <avi at qumranet.com>
+- *
+- * This work is licensed under the terms of the GNU GPL, version 2.  See
+- * the COPYING file in the top-level directory.
+- *
+- */
+-
+-#include "kvm_svm.h"
+-#include "x86_emulate.h"
+-#include "irq.h"
+-
+-#include <linux/module.h>
+-#include <linux/kernel.h>
+-#include <linux/vmalloc.h>
+-#include <linux/highmem.h>
+-#include <linux/sched.h>
+-
+-#include <asm/desc.h>
+-
+-MODULE_AUTHOR("Qumranet");
+-MODULE_LICENSE("GPL");
+-
+-#define IOPM_ALLOC_ORDER 2
+-#define MSRPM_ALLOC_ORDER 1
+-
+-#define DB_VECTOR 1
+-#define UD_VECTOR 6
+-#define GP_VECTOR 13
+-
+-#define DR7_GD_MASK (1 << 13)
+-#define DR6_BD_MASK (1 << 13)
+-
+-#define SEG_TYPE_LDT 2
+-#define SEG_TYPE_BUSY_TSS16 3
+-
+-#define KVM_EFER_LMA (1 << 10)
+-#define KVM_EFER_LME (1 << 8)
+-
+-#define SVM_FEATURE_NPT  (1 << 0)
+-#define SVM_FEATURE_LBRV (1 << 1)
+-#define SVM_DEATURE_SVML (1 << 2)
+-
+-static void kvm_reput_irq(struct vcpu_svm *svm);
+-
+-static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
+-{
+-	return container_of(vcpu, struct vcpu_svm, vcpu);
+-}
+-
+-unsigned long iopm_base;
+-unsigned long msrpm_base;
+-
+-struct kvm_ldttss_desc {
+-	u16 limit0;
+-	u16 base0;
+-	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+-	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+-	u32 base3;
+-	u32 zero1;
+-} __attribute__((packed));
+-
+-struct svm_cpu_data {
+-	int cpu;
+-
+-	u64 asid_generation;
+-	u32 max_asid;
+-	u32 next_asid;
+-	struct kvm_ldttss_desc *tss_desc;
+-
+-	struct page *save_area;
+-};
+-
+-static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
+-static uint32_t svm_features;
+-
+-struct svm_init_data {
+-	int cpu;
+-	int r;
+-};
+-
+-static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
+-
+-#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
+-#define MSRS_RANGE_SIZE 2048
+-#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
+-
+-#define MAX_INST_SIZE 15
+-
+-static inline u32 svm_has(u32 feat)
+-{
+-	return svm_features & feat;
+-}
+-
+-static inline u8 pop_irq(struct kvm_vcpu *vcpu)
+-{
+-	int word_index = __ffs(vcpu->irq_summary);
+-	int bit_index = __ffs(vcpu->irq_pending[word_index]);
+-	int irq = word_index * BITS_PER_LONG + bit_index;
+-
+-	clear_bit(bit_index, &vcpu->irq_pending[word_index]);
+-	if (!vcpu->irq_pending[word_index])
+-		clear_bit(word_index, &vcpu->irq_summary);
+-	return irq;
+-}
+-
+-static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
+-{
+-	set_bit(irq, vcpu->irq_pending);
+-	set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
+-}
+-
+-static inline void clgi(void)
+-{
+-	asm volatile (SVM_CLGI);
+-}
+-
+-static inline void stgi(void)
+-{
+-	asm volatile (SVM_STGI);
+-}
+-
+-static inline void invlpga(unsigned long addr, u32 asid)
+-{
+-	asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
+-}
+-
+-static inline unsigned long kvm_read_cr2(void)
+-{
+-	unsigned long cr2;
+-
+-	asm volatile ("mov %%cr2, %0" : "=r" (cr2));
+-	return cr2;
+-}
+-
+-static inline void kvm_write_cr2(unsigned long val)
+-{
+-	asm volatile ("mov %0, %%cr2" :: "r" (val));
+-}
+-
+-static inline unsigned long read_dr6(void)
+-{
+-	unsigned long dr6;
+-
+-	asm volatile ("mov %%dr6, %0" : "=r" (dr6));
+-	return dr6;
+-}
+-
+-static inline void write_dr6(unsigned long val)
+-{
+-	asm volatile ("mov %0, %%dr6" :: "r" (val));
+-}
+-
+-static inline unsigned long read_dr7(void)
+-{
+-	unsigned long dr7;
+-
+-	asm volatile ("mov %%dr7, %0" : "=r" (dr7));
+-	return dr7;
+-}
+-
+-static inline void write_dr7(unsigned long val)
+-{
+-	asm volatile ("mov %0, %%dr7" :: "r" (val));
+-}
+-
+-static inline void force_new_asid(struct kvm_vcpu *vcpu)
+-{
+-	to_svm(vcpu)->asid_generation--;
+-}
+-
+-static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
+-{
+-	force_new_asid(vcpu);
+-}
+-
+-static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+-{
+-	if (!(efer & KVM_EFER_LMA))
+-		efer &= ~KVM_EFER_LME;
+-
+-	to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
+-	vcpu->shadow_efer = efer;
+-}
+-
+-static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	svm->vmcb->control.event_inj =		SVM_EVTINJ_VALID |
+-						SVM_EVTINJ_VALID_ERR |
+-						SVM_EVTINJ_TYPE_EXEPT |
+-						GP_VECTOR;
+-	svm->vmcb->control.event_inj_err = error_code;
+-}
+-
+-static void inject_ud(struct kvm_vcpu *vcpu)
+-{
+-	to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
+-						SVM_EVTINJ_TYPE_EXEPT |
+-						UD_VECTOR;
+-}
+-
+-static int is_page_fault(uint32_t info)
+-{
+-	info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
+-	return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT);
+-}
+-
+-static int is_external_interrupt(u32 info)
+-{
+-	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
+-	return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
+-}
+-
+-static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	if (!svm->next_rip) {
+-		printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
+-		return;
 -	}
-+
-+	if (mode >= XFER_UDMA_0)
-+		timings =  udma_timing[pci_clock][mode - XFER_UDMA_0];
-+	else
-+		timings = mwdma_timing[pci_clock][mode - XFER_MW_DMA_0];
- 
- 	if (unit == 0) {			/* are we configuring drive0? */
- 		pci_read_config_dword(hwif->pci_dev, basereg+4, &reg);
-@@ -250,9 +220,9 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
- 	}
- 	if (mode != -1) {
- 		printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
--		hwif->dma_off_quietly(drive);
--		if (ide_set_dma_mode(drive, mode) == 0)
--			hwif->dma_host_on(drive);
-+		ide_dma_off_quietly(drive);
-+		if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma)
-+			hwif->dma_host_set(drive, 1);
- 		return;
- 	}
- 
-@@ -260,66 +230,39 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
- }
- 
- #ifdef CONFIG_PM
--static ide_hwif_t *lookup_pci_dev (ide_hwif_t *prev, struct pci_dev *dev)
+-	if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) {
+-		printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
+-		       __FUNCTION__,
+-		       svm->vmcb->save.rip,
+-		       svm->next_rip);
+-	}
+-
+-	vcpu->rip = svm->vmcb->save.rip = svm->next_rip;
+-	svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
+-
+-	vcpu->interrupt_window_open = 1;
+-}
+-
+-static int has_svm(void)
 -{
--	int	h;
+-	uint32_t eax, ebx, ecx, edx;
 -
--	for (h = 0; h < MAX_HWIFS; h++) {
--		ide_hwif_t *hwif = &ide_hwifs[h];
--		if (prev) {
--			if (hwif == prev)
--				prev = NULL;	// found previous, now look for next match
--		} else {
--			if (hwif && hwif->pci_dev == dev)
--				return hwif;	// found next match
--		}
+-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+-		printk(KERN_INFO "has_svm: not amd\n");
+-		return 0;
 -	}
--	return NULL;	// not found
+-
+-	cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
+-	if (eax < SVM_CPUID_FUNC) {
+-		printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
+-		return 0;
+-	}
+-
+-	cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+-	if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
+-		printk(KERN_DEBUG "has_svm: svm not available\n");
+-		return 0;
+-	}
+-	return 1;
 -}
 -
--typedef struct sc1200_saved_state_s {
--	__u32		regs[4];
--} sc1200_saved_state_t;
+-static void svm_hardware_disable(void *garbage)
+-{
+-	struct svm_cpu_data *svm_data
+-		= per_cpu(svm_data, raw_smp_processor_id());
 -
-+struct sc1200_saved_state {
-+	u32 regs[8];
-+};
- 
- static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
- {
--	ide_hwif_t		*hwif = NULL;
+-	if (svm_data) {
+-		uint64_t efer;
 -
- 	printk("SC1200: suspend(%u)\n", state.event);
- 
-+	/*
-+	 * we only save state when going from full power to less
-+	 */
- 	if (state.event == PM_EVENT_ON) {
--		// we only save state when going from full power to less
+-		wrmsrl(MSR_VM_HSAVE_PA, 0);
+-		rdmsrl(MSR_EFER, efer);
+-		wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
+-		per_cpu(svm_data, raw_smp_processor_id()) = NULL;
+-		__free_page(svm_data->save_area);
+-		kfree(svm_data);
+-	}
+-}
 -
--		//
--		// Loop over all interfaces that are part of this PCI device:
--		//
--		while ((hwif = lookup_pci_dev(hwif, dev)) != NULL) {
--			sc1200_saved_state_t	*ss;
--			unsigned int		basereg, r;
--			//
--			// allocate a permanent save area, if not already allocated
--			//
--			ss = (sc1200_saved_state_t *)hwif->config_data;
--			if (ss == NULL) {
--				ss = kmalloc(sizeof(sc1200_saved_state_t), GFP_KERNEL);
--				if (ss == NULL)
--					return -ENOMEM;
--				hwif->config_data = (unsigned long)ss;
--			}
--			ss = (sc1200_saved_state_t *)hwif->config_data;
--			//
--			// Save timing registers:  this may be unnecessary if 
--			// BIOS also does it
--			//
--			basereg = hwif->channel ? 0x50 : 0x40;
--			for (r = 0; r < 4; ++r) {
--				pci_read_config_dword (hwif->pci_dev, basereg + (r<<2), &ss->regs[r]);
--			}
-+		struct sc1200_saved_state *ss;
-+		unsigned int r;
-+
-+		/*
-+		 * allocate a permanent save area, if not already allocated
-+		 */
-+		ss = (struct sc1200_saved_state *)pci_get_drvdata(dev);
-+		if (ss == NULL) {
-+			ss = kmalloc(sizeof(*ss), GFP_KERNEL);
-+			if (ss == NULL)
-+				return -ENOMEM;
-+			pci_set_drvdata(dev, ss);
- 		}
+-static void svm_hardware_enable(void *garbage)
+-{
+-
+-	struct svm_cpu_data *svm_data;
+-	uint64_t efer;
+-#ifdef CONFIG_X86_64
+-	struct desc_ptr gdt_descr;
+-#else
+-	struct Xgt_desc_struct gdt_descr;
+-#endif
+-	struct desc_struct *gdt;
+-	int me = raw_smp_processor_id();
+-
+-	if (!has_svm()) {
+-		printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
+-		return;
 -	}
- 
--	/* You don't need to iterate over disks -- sysfs should have done that for you already */ 
-+		/*
-+		 * save timing registers
-+		 * (this may be unnecessary if BIOS also does it)
-+		 */
-+		for (r = 0; r < 8; r++)
-+			pci_read_config_dword(dev, 0x40 + r * 4, &ss->regs[r]);
-+	}
- 
- 	pci_disable_device(dev);
- 	pci_set_power_state(dev, pci_choose_state(dev, state));
-@@ -328,30 +271,25 @@ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
- 
- static int sc1200_resume (struct pci_dev *dev)
- {
--	ide_hwif_t	*hwif = NULL;
--	int		i;
-+	struct sc1200_saved_state *ss;
-+	unsigned int r;
-+	int i;
- 
- 	i = pci_enable_device(dev);
- 	if (i)
- 		return i;
- 
--	//
--	// loop over all interfaces that are part of this pci device:
--	//
--	while ((hwif = lookup_pci_dev(hwif, dev)) != NULL) {
--		unsigned int		basereg, r;
--		sc1200_saved_state_t	*ss = (sc1200_saved_state_t *)hwif->config_data;
+-	svm_data = per_cpu(svm_data, me);
 -
--		//
--		// Restore timing registers:  this may be unnecessary if BIOS also does it
--		//
--		basereg = hwif->channel ? 0x50 : 0x40;
--		if (ss != NULL) {
--			for (r = 0; r < 4; ++r) {
--				pci_write_config_dword(hwif->pci_dev, basereg + (r<<2), ss->regs[r]);
--			}
--		}
-+	ss = (struct sc1200_saved_state *)pci_get_drvdata(dev);
-+
-+	/*
-+	 * restore timing registers
-+	 * (this may be unnecessary if BIOS also does it)
-+	 */
-+	if (ss) {
-+		for (r = 0; r < 8; r++)
-+			pci_write_config_dword(dev, 0x40 + r * 4, ss->regs[r]);
- 	}
-+
- 	return 0;
- }
- #endif
-diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
-index ebb7132..24a85bb 100644
---- a/drivers/ide/pci/scc_pata.c
-+++ b/drivers/ide/pci/scc_pata.c
-@@ -254,19 +254,7 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 		offset = 0; /* 100MHz */
- 	}
- 
--	switch (speed) {
--	case XFER_UDMA_6:
--	case XFER_UDMA_5:
--	case XFER_UDMA_4:
--	case XFER_UDMA_3:
--	case XFER_UDMA_2:
--	case XFER_UDMA_1:
--	case XFER_UDMA_0:
--		idx = speed - XFER_UDMA_0;
--		break;
--	default:
+-	if (!svm_data) {
+-		printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
+-		       me);
 -		return;
 -	}
-+	idx = speed - XFER_UDMA_0;
- 
- 	jcactsel = JCACTSELtbl[offset][idx];
- 	if (is_slave) {
-diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
-index a728031..877c09b 100644
---- a/drivers/ide/pci/serverworks.c
-+++ b/drivers/ide/pci/serverworks.c
-@@ -164,25 +164,12 @@ static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 	ultra_timing	&= ~(0x0F << (4*unit));
- 	ultra_enable	&= ~(0x01 << drive->dn);
- 
--	switch(speed) {
--		case XFER_MW_DMA_2:
--		case XFER_MW_DMA_1:
--		case XFER_MW_DMA_0:
--			dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
--			break;
 -
--		case XFER_UDMA_5:
--		case XFER_UDMA_4:
--		case XFER_UDMA_3:
--		case XFER_UDMA_2:
--		case XFER_UDMA_1:
--		case XFER_UDMA_0:
--			dma_timing   |= dma_modes[2];
--			ultra_timing |= ((udma_modes[speed - XFER_UDMA_0]) << (4*unit));
--			ultra_enable |= (0x01 << drive->dn);
--		default:
--			break;
+-	svm_data->asid_generation = 1;
+-	svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
+-	svm_data->next_asid = svm_data->max_asid + 1;
+-	svm_features = cpuid_edx(SVM_CPUID_FUNC);
+-
+-	asm volatile ( "sgdt %0" : "=m"(gdt_descr) );
+-	gdt = (struct desc_struct *)gdt_descr.address;
+-	svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+-
+-	rdmsrl(MSR_EFER, efer);
+-	wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
+-
+-	wrmsrl(MSR_VM_HSAVE_PA,
+-	       page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+-}
+-
+-static int svm_cpu_init(int cpu)
+-{
+-	struct svm_cpu_data *svm_data;
+-	int r;
+-
+-	svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
+-	if (!svm_data)
+-		return -ENOMEM;
+-	svm_data->cpu = cpu;
+-	svm_data->save_area = alloc_page(GFP_KERNEL);
+-	r = -ENOMEM;
+-	if (!svm_data->save_area)
+-		goto err_1;
+-
+-	per_cpu(svm_data, cpu) = svm_data;
+-
+-	return 0;
+-
+-err_1:
+-	kfree(svm_data);
+-	return r;
+-
+-}
+-
+-static void set_msr_interception(u32 *msrpm, unsigned msr,
+-				 int read, int write)
+-{
+-	int i;
+-
+-	for (i = 0; i < NUM_MSR_MAPS; i++) {
+-		if (msr >= msrpm_ranges[i] &&
+-		    msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
+-			u32 msr_offset = (i * MSRS_IN_RANGE + msr -
+-					  msrpm_ranges[i]) * 2;
+-
+-			u32 *base = msrpm + (msr_offset / 32);
+-			u32 msr_shift = msr_offset % 32;
+-			u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
+-			*base = (*base & ~(0x3 << msr_shift)) |
+-				(mask << msr_shift);
+-			return;
+-		}
 -	}
-+	if (speed >= XFER_UDMA_0) {
-+		dma_timing   |= dma_modes[2];
-+		ultra_timing |= (udma_modes[speed - XFER_UDMA_0] << (4 * unit));
-+		ultra_enable |= (0x01 << drive->dn);
-+	} else if (speed >= XFER_MW_DMA_0)
-+		dma_timing   |= dma_modes[speed - XFER_MW_DMA_0];
- 
- 	pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
- 	pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
-@@ -366,12 +353,17 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
- 	}
- }
- 
-+#define IDE_HFLAGS_SVWKS \
-+	(IDE_HFLAG_LEGACY_IRQS | \
-+	 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
-+	 IDE_HFLAG_BOOTABLE)
-+
- static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
- 	{	/* 0 */
- 		.name		= "SvrWks OSB4",
- 		.init_chipset	= init_chipset_svwks,
- 		.init_hwif	= init_hwif_svwks,
--		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE,
-+		.host_flags	= IDE_HFLAGS_SVWKS,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= 0x00, /* UDMA is problematic on OSB4 */
-@@ -379,7 +371,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
- 		.name		= "SvrWks CSB5",
- 		.init_chipset	= init_chipset_svwks,
- 		.init_hwif	= init_hwif_svwks,
--		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE,
-+		.host_flags	= IDE_HFLAGS_SVWKS,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= ATA_UDMA5,
-@@ -387,7 +379,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
- 		.name		= "SvrWks CSB6",
- 		.init_chipset	= init_chipset_svwks,
- 		.init_hwif	= init_hwif_svwks,
--		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE,
-+		.host_flags	= IDE_HFLAGS_SVWKS,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= ATA_UDMA5,
-@@ -395,8 +387,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
- 		.name		= "SvrWks CSB6",
- 		.init_chipset	= init_chipset_svwks,
- 		.init_hwif	= init_hwif_svwks,
--		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_SINGLE |
--				  IDE_HFLAG_BOOTABLE,
-+		.host_flags	= IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= ATA_UDMA5,
-@@ -404,8 +395,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
- 		.name		= "SvrWks HT1000",
- 		.init_chipset	= init_chipset_svwks,
- 		.init_hwif	= init_hwif_svwks,
--		.host_flags	= IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_SINGLE |
--				  IDE_HFLAG_BOOTABLE,
-+		.host_flags	= IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
- 		.pio_mask	= ATA_PIO4,
- 		.mwdma_mask	= ATA_MWDMA2,
- 		.udma_mask	= ATA_UDMA5,
-diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
-index de820aa..9e0be7d 100644
---- a/drivers/ide/pci/sgiioc4.c
-+++ b/drivers/ide/pci/sgiioc4.c
-@@ -277,21 +277,6 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
- 	return dma_stat;
- }
- 
--static int
--sgiioc4_ide_dma_on(ide_drive_t * drive)
+-	BUG();
+-}
+-
+-static __init int svm_hardware_setup(void)
 -{
--	drive->using_dma = 1;
+-	int cpu;
+-	struct page *iopm_pages;
+-	struct page *msrpm_pages;
+-	void *iopm_va, *msrpm_va;
+-	int r;
+-
+-	iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
+-
+-	if (!iopm_pages)
+-		return -ENOMEM;
+-
+-	iopm_va = page_address(iopm_pages);
+-	memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+-	clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
+-	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
 -
+-
+-	msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+-
+-	r = -ENOMEM;
+-	if (!msrpm_pages)
+-		goto err_1;
+-
+-	msrpm_va = page_address(msrpm_pages);
+-	memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+-	msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
+-
+-#ifdef CONFIG_X86_64
+-	set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
+-	set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
+-	set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
+-	set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
+-	set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
+-	set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
+-#endif
+-	set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
+-	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
+-	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
+-	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
+-
+-	for_each_online_cpu(cpu) {
+-		r = svm_cpu_init(cpu);
+-		if (r)
+-			goto err_2;
+-	}
 -	return 0;
+-
+-err_2:
+-	__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+-	msrpm_base = 0;
+-err_1:
+-	__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
+-	iopm_base = 0;
+-	return r;
 -}
 -
--static void sgiioc4_dma_off_quietly(ide_drive_t *drive)
+-static __exit void svm_hardware_unsetup(void)
 -{
--	drive->using_dma = 0;
+-	__free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
+-	__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
+-	iopm_base = msrpm_base = 0;
+-}
 -
--	drive->hwif->dma_host_off(drive);
+-static void init_seg(struct vmcb_seg *seg)
+-{
+-	seg->selector = 0;
+-	seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
+-		SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
+-	seg->limit = 0xffff;
+-	seg->base = 0;
 -}
 -
- static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
- {
- }
-@@ -303,13 +288,10 @@ sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
- 	return sgiioc4_checkirq(HWIF(drive));
- }
- 
--static void sgiioc4_dma_host_on(ide_drive_t * drive)
+-static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
 -{
+-	seg->selector = 0;
+-	seg->attrib = SVM_SELECTOR_P_MASK | type;
+-	seg->limit = 0xffff;
+-	seg->base = 0;
 -}
 -
--static void sgiioc4_dma_host_off(ide_drive_t * drive)
-+static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
- {
--	sgiioc4_clearirq(drive);
-+	if (!on)
-+		sgiioc4_clearirq(drive);
- }
- 
- static void
-@@ -582,7 +564,6 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
- 	hwif->pre_reset = NULL;	/* No HBA specific pre_set needed */
- 	hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine,
- 						clear interrupts */
--	hwif->intrproc = NULL;	/* Enable or Disable interrupt from drive */
- 	hwif->maskproc = &sgiioc4_maskproc;	/* Mask on/off NIEN register */
- 	hwif->quirkproc = NULL;
- 	hwif->busproc = NULL;
-@@ -594,14 +575,11 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
- 
- 	hwif->mwdma_mask = ATA_MWDMA2_ONLY;
- 
-+	hwif->dma_host_set = &sgiioc4_dma_host_set;
- 	hwif->dma_setup = &sgiioc4_ide_dma_setup;
- 	hwif->dma_start = &sgiioc4_ide_dma_start;
- 	hwif->ide_dma_end = &sgiioc4_ide_dma_end;
--	hwif->ide_dma_on = &sgiioc4_ide_dma_on;
--	hwif->dma_off_quietly = &sgiioc4_dma_off_quietly;
- 	hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
--	hwif->dma_host_on = &sgiioc4_dma_host_on;
--	hwif->dma_host_off = &sgiioc4_dma_host_off;
- 	hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
- 	hwif->dma_timeout = &ide_dma_timeout;
- }
-@@ -615,6 +593,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
- 	ide_hwif_t *hwif;
- 	int h;
- 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-+	hw_regs_t hw;
- 
- 	/*
- 	 * Find an empty HWIF; if none available, return -ENOMEM.
-@@ -654,21 +633,16 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
- 		return -ENOMEM;
- 	}
- 
--	if (hwif->io_ports[IDE_DATA_OFFSET] != cmd_base) {
--		hw_regs_t hw;
+-static void init_vmcb(struct vmcb *vmcb)
+-{
+-	struct vmcb_control_area *control = &vmcb->control;
+-	struct vmcb_save_area *save = &vmcb->save;
 -
--		/* Initialize the IO registers */
--		memset(&hw, 0, sizeof(hw));
--		sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
--		memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
--		hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
--	}
-+	/* Initialize the IO registers */
-+	memset(&hw, 0, sizeof(hw));
-+	sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
-+	hw.irq = dev->irq;
-+	hw.chipset = ide_pci;
-+	hw.dev = &dev->dev;
-+	ide_init_port_hw(hwif, &hw);
- 
--	hwif->irq = dev->irq;
--	hwif->chipset = ide_pci;
- 	hwif->pci_dev = dev;
- 	hwif->channel = 0;	/* Single Channel chip */
--	hwif->gendev.parent = &dev->dev;/* setup proper ancestral information */
- 
- 	/* The IOC4 uses MMIO rather than Port IO. */
- 	default_hwif_mmiops(hwif);
-diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
-index 5709c25..908f37b 100644
---- a/drivers/ide/pci/siimage.c
-+++ b/drivers/ide/pci/siimage.c
-@@ -278,27 +278,14 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 
- 	scsc = is_sata(hwif) ? 1 : scsc;
- 
--	switch(speed) {
--		case XFER_MW_DMA_2:
--		case XFER_MW_DMA_1:
--		case XFER_MW_DMA_0:
--			multi = dma[speed - XFER_MW_DMA_0];
--			mode |= ((unit) ? 0x20 : 0x02);
--			break;
--		case XFER_UDMA_6:
--		case XFER_UDMA_5:
--		case XFER_UDMA_4:
--		case XFER_UDMA_3:
--		case XFER_UDMA_2:
--		case XFER_UDMA_1:
--		case XFER_UDMA_0:
--			multi = dma[2];
--			ultra |= ((scsc) ? (ultra6[speed - XFER_UDMA_0]) :
--					   (ultra5[speed - XFER_UDMA_0]));
--			mode |= ((unit) ? 0x30 : 0x03);
--			break;
--		default:
--			return;
-+	if (speed >= XFER_UDMA_0) {
-+		multi = dma[2];
-+		ultra |= (scsc ? ultra6[speed - XFER_UDMA_0] :
-+				 ultra5[speed - XFER_UDMA_0]);
-+		mode |= (unit ? 0x30 : 0x03);
-+	} else {
-+		multi = dma[speed - XFER_MW_DMA_0];
-+		mode |= (unit ? 0x20 : 0x02);
- 	}
- 
- 	if (hwif->mmio) {
-@@ -726,9 +713,6 @@ static int is_dev_seagate_sata(ide_drive_t *drive)
- 	const char *s = &drive->id->model[0];
- 	unsigned len;
- 
--	if (!drive->present)
--		return 0;
+-	control->intercept_cr_read = 	INTERCEPT_CR0_MASK |
+-					INTERCEPT_CR3_MASK |
+-					INTERCEPT_CR4_MASK;
 -
- 	len = strnlen(s, sizeof(drive->id->model));
- 
- 	if ((len > 4) && (!memcmp(s, "ST", 2))) {
-@@ -743,18 +727,20 @@ static int is_dev_seagate_sata(ide_drive_t *drive)
- }
- 
- /**
-- *	siimage_fixup		-	post probe fixups
-- *	@hwif: interface to fix up
-+ *	sil_quirkproc		-	post probe fixups
-+ *	@drive: drive
-  *
-  *	Called after drive probe we use this to decide whether the
-  *	Seagate fixup must be applied. This used to be in init_iops but
-  *	that can occur before we know what drives are present.
-  */
- 
--static void __devinit siimage_fixup(ide_hwif_t *hwif)
-+static void __devinit sil_quirkproc(ide_drive_t *drive)
- {
-+	ide_hwif_t *hwif = drive->hwif;
-+
- 	/* Try and raise the rqsize */
--	if (!is_sata(hwif) || !is_dev_seagate_sata(&hwif->drives[0]))
-+	if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
- 		hwif->rqsize = 128;
- }
- 
-@@ -817,6 +803,7 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
- 
- 	hwif->set_pio_mode = &sil_set_pio_mode;
- 	hwif->set_dma_mode = &sil_set_dma_mode;
-+	hwif->quirkproc = &sil_quirkproc;
- 
- 	if (sata) {
- 		static int first = 1;
-@@ -855,7 +842,6 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
- 		.init_chipset	= init_chipset_siimage,	\
- 		.init_iops	= init_iops_siimage,	\
- 		.init_hwif	= init_hwif_siimage,	\
--		.fixup		= siimage_fixup,	\
- 		.host_flags	= IDE_HFLAG_BOOTABLE,	\
- 		.pio_mask	= ATA_PIO4,		\
- 		.mwdma_mask	= ATA_MWDMA2,		\
-diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
-index d90b429..85d3699 100644
---- a/drivers/ide/pci/sis5513.c
-+++ b/drivers/ide/pci/sis5513.c
-@@ -305,59 +305,56 @@ static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
- 	sis_program_timings(drive, XFER_PIO_0 + pio);
- }
- 
--static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
-+static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode)
- {
--	ide_hwif_t *hwif	= HWIF(drive);
--	struct pci_dev *dev	= hwif->pci_dev;
-+	struct pci_dev *dev = drive->hwif->pci_dev;
-+	u32 regdw = 0;
-+	u8 drive_pci = sis_ata133_get_base(drive), clk, idx;
- 
--	/* Config chip for mode */
--	switch(speed) {
--		case XFER_UDMA_6:
--		case XFER_UDMA_5:
--		case XFER_UDMA_4:
--		case XFER_UDMA_3:
--		case XFER_UDMA_2:
--		case XFER_UDMA_1:
--		case XFER_UDMA_0:
--			if (chipset_family >= ATA_133) {
--				u32 regdw = 0;
--				u8 drive_pci = sis_ata133_get_base(drive);
+-	control->intercept_cr_write = 	INTERCEPT_CR0_MASK |
+-					INTERCEPT_CR3_MASK |
+-					INTERCEPT_CR4_MASK;
 -
--				pci_read_config_dword(dev, drive_pci, &regdw);
--				regdw |= 0x04;
--				regdw &= 0xfffff00f;
--				/* check if ATA133 enable */
--				if (regdw & 0x08) {
--					regdw |= (unsigned long)cycle_time_value[ATA_133][speed-XFER_UDMA_0] << 4;
--					regdw |= (unsigned long)cvs_time_value[ATA_133][speed-XFER_UDMA_0] << 8;
--				} else {
--					regdw |= (unsigned long)cycle_time_value[ATA_100][speed-XFER_UDMA_0] << 4;
--					regdw |= (unsigned long)cvs_time_value[ATA_100][speed-XFER_UDMA_0] << 8;
--				}
--				pci_write_config_dword(dev, (unsigned long)drive_pci, regdw);
--			} else {
--				u8 drive_pci = 0x40 + drive->dn * 2, reg = 0;
+-	control->intercept_dr_read = 	INTERCEPT_DR0_MASK |
+-					INTERCEPT_DR1_MASK |
+-					INTERCEPT_DR2_MASK |
+-					INTERCEPT_DR3_MASK;
 -
--				pci_read_config_byte(dev, drive_pci+1, &reg);
--				/* Force the UDMA bit on if we want to use UDMA */
--				reg |= 0x80;
--				/* clean reg cycle time bits */
--				reg &= ~((0xFF >> (8 - cycle_time_range[chipset_family]))
--					 << cycle_time_offset[chipset_family]);
--				/* set reg cycle time bits */
--				reg |= cycle_time_value[chipset_family][speed-XFER_UDMA_0]
--					<< cycle_time_offset[chipset_family];
--				pci_write_config_byte(dev, drive_pci+1, reg);
--			}
--			break;
--		case XFER_MW_DMA_2:
--		case XFER_MW_DMA_1:
--		case XFER_MW_DMA_0:
--			sis_program_timings(drive, speed);
--			break;
--		default:
--			break;
--	}
-+	pci_read_config_dword(dev, drive_pci, &regdw);
-+
-+	regdw |= 0x04;
-+	regdw &= 0xfffff00f;
-+	/* check if ATA133 enable */
-+	clk = (regdw & 0x08) ? ATA_133 : ATA_100;
-+	idx = mode - XFER_UDMA_0;
-+	regdw |= cycle_time_value[clk][idx] << 4;
-+	regdw |= cvs_time_value[clk][idx] << 8;
-+
-+	pci_write_config_dword(dev, drive_pci, regdw);
-+}
-+
-+static void sis_ata33_program_udma_timings(ide_drive_t *drive, const u8 mode)
-+{
-+	struct pci_dev *dev = drive->hwif->pci_dev;
-+	u8 drive_pci = 0x40 + drive->dn * 2, reg = 0, i = chipset_family;
-+
-+	pci_read_config_byte(dev, drive_pci + 1, &reg);
-+
-+	/* force the UDMA bit on if we want to use UDMA */
-+	reg |= 0x80;
-+	/* clean reg cycle time bits */
-+	reg &= ~((0xff >> (8 - cycle_time_range[i])) << cycle_time_offset[i]);
-+	/* set reg cycle time bits */
-+	reg |= cycle_time_value[i][mode - XFER_UDMA_0] << cycle_time_offset[i];
-+
-+	pci_write_config_byte(dev, drive_pci + 1, reg);
-+}
-+
-+static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode)
-+{
-+	if (chipset_family >= ATA_133)	/* ATA_133 */
-+		sis_ata133_program_udma_timings(drive, mode);
-+	else				/* ATA_33/66/100a/100/133a */
-+		sis_ata33_program_udma_timings(drive, mode);
-+}
-+
-+static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
-+{
-+	if (speed >= XFER_UDMA_0)
-+		sis_program_udma_timings(drive, speed);
-+	else
-+		sis_program_timings(drive, speed);
- }
- 
- static u8 sis5513_ata133_udma_filter(ide_drive_t *drive)
-diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
-index 147d783..c7a125b 100644
---- a/drivers/ide/pci/sl82c105.c
-+++ b/drivers/ide/pci/sl82c105.c
-@@ -13,6 +13,7 @@
-  *  -- Benjamin Herrenschmidt (01/11/03) benh at kernel.crashing.org
-  *
-  * Copyright (C) 2006-2007 MontaVista Software, Inc. <source at mvista.com>
-+ * Copyright (C)      2007 Bartlomiej Zolnierkiewicz
-  */
- 
- #include <linux/types.h>
-@@ -90,14 +91,8 @@ static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
- 	drive->drive_data &= 0xffff0000;
- 	drive->drive_data |= drv_ctrl;
- 
--	if (!drive->using_dma) {
--		/*
--		 * If we are actually using MW DMA, then we can not
--		 * reprogram the interface drive control register.
--		 */
--		pci_write_config_word(dev, reg,  drv_ctrl);
--		pci_read_config_word (dev, reg, &drv_ctrl);
--	}
-+	pci_write_config_word(dev, reg,  drv_ctrl);
-+	pci_read_config_word (dev, reg, &drv_ctrl);
- 
- 	printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name,
- 			  ide_xfer_verbose(pio + XFER_PIO_0),
-@@ -115,33 +110,14 @@ static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed)
-  	DBG(("sl82c105_tune_chipset(drive:%s, speed:%s)\n",
- 	     drive->name, ide_xfer_verbose(speed)));
- 
--	switch (speed) {
--	case XFER_MW_DMA_2:
--	case XFER_MW_DMA_1:
--	case XFER_MW_DMA_0:
--		drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
-+	drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
- 
--		/*
--		 * Store the DMA timings so that we can actually program
--		 * them when DMA will be turned on...
--		 */
--		drive->drive_data &= 0x0000ffff;
--		drive->drive_data |= (unsigned long)drv_ctrl << 16;
+-	control->intercept_dr_write = 	INTERCEPT_DR0_MASK |
+-					INTERCEPT_DR1_MASK |
+-					INTERCEPT_DR2_MASK |
+-					INTERCEPT_DR3_MASK |
+-					INTERCEPT_DR5_MASK |
+-					INTERCEPT_DR7_MASK;
 -
+-	control->intercept_exceptions = 1 << PF_VECTOR;
+-
+-
+-	control->intercept = 	(1ULL << INTERCEPT_INTR) |
+-				(1ULL << INTERCEPT_NMI) |
+-				(1ULL << INTERCEPT_SMI) |
 -		/*
--		 * If we are already using DMA, we just reprogram
--		 * the drive control register.
+-		 * selective cr0 intercept bug?
+-		 *    	0:   0f 22 d8                mov    %eax,%cr3
+-		 *	3:   0f 20 c0                mov    %cr0,%eax
+-		 *	6:   0d 00 00 00 80          or     $0x80000000,%eax
+-		 *	b:   0f 22 c0                mov    %eax,%cr0
+-		 * set cr3 ->interception
+-		 * get cr0 ->interception
+-		 * set cr0 -> no interception
 -		 */
--		if (drive->using_dma) {
--			struct pci_dev *dev	= HWIF(drive)->pci_dev;
--			int reg 		= 0x44 + drive->dn * 4;
+-		/*              (1ULL << INTERCEPT_SELECTIVE_CR0) | */
+-				(1ULL << INTERCEPT_CPUID) |
+-				(1ULL << INTERCEPT_INVD) |
+-				(1ULL << INTERCEPT_HLT) |
+-				(1ULL << INTERCEPT_INVLPGA) |
+-				(1ULL << INTERCEPT_IOIO_PROT) |
+-				(1ULL << INTERCEPT_MSR_PROT) |
+-				(1ULL << INTERCEPT_TASK_SWITCH) |
+-				(1ULL << INTERCEPT_SHUTDOWN) |
+-				(1ULL << INTERCEPT_VMRUN) |
+-				(1ULL << INTERCEPT_VMMCALL) |
+-				(1ULL << INTERCEPT_VMLOAD) |
+-				(1ULL << INTERCEPT_VMSAVE) |
+-				(1ULL << INTERCEPT_STGI) |
+-				(1ULL << INTERCEPT_CLGI) |
+-				(1ULL << INTERCEPT_SKINIT) |
+-				(1ULL << INTERCEPT_WBINVD) |
+-				(1ULL << INTERCEPT_MONITOR) |
+-				(1ULL << INTERCEPT_MWAIT);
+-
+-	control->iopm_base_pa = iopm_base;
+-	control->msrpm_base_pa = msrpm_base;
+-	control->tsc_offset = 0;
+-	control->int_ctl = V_INTR_MASKING_MASK;
+-
+-	init_seg(&save->es);
+-	init_seg(&save->ss);
+-	init_seg(&save->ds);
+-	init_seg(&save->fs);
+-	init_seg(&save->gs);
+-
+-	save->cs.selector = 0xf000;
+-	/* Executable/Readable Code Segment */
+-	save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
+-		SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
+-	save->cs.limit = 0xffff;
+-	/*
+-	 * cs.base should really be 0xffff0000, but vmx can't handle that, so
+-	 * be consistent with it.
+-	 *
+-	 * Replace when we have real mode working for vmx.
+-	 */
+-	save->cs.base = 0xf0000;
 -
--			pci_write_config_word(dev, reg, drv_ctrl);
--		}
--		break;
--	default:
--		return;
+-	save->gdtr.limit = 0xffff;
+-	save->idtr.limit = 0xffff;
+-
+-	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
+-	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
+-
+-	save->efer = MSR_EFER_SVME_MASK;
+-
+-        save->dr6 = 0xffff0ff0;
+-	save->dr7 = 0x400;
+-	save->rflags = 2;
+-	save->rip = 0x0000fff0;
+-
+-	/*
+-	 * cr0 val on cpu init should be 0x60000010, we enable cpu
+-	 * cache by default. the orderly way is to enable cache in bios.
+-	 */
+-	save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
+-	save->cr4 = X86_CR4_PAE;
+-	/* rdx = ?? */
+-}
+-
+-static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	init_vmcb(svm->vmcb);
+-
+-	if (vcpu->vcpu_id != 0) {
+-		svm->vmcb->save.rip = 0;
+-		svm->vmcb->save.cs.base = svm->vcpu.sipi_vector << 12;
+-		svm->vmcb->save.cs.selector = svm->vcpu.sipi_vector << 8;
 -	}
-+	/*
-+	 * Store the DMA timings so that we can actually program
-+	 * them when DMA will be turned on...
-+	 */
-+	drive->drive_data &= 0x0000ffff;
-+	drive->drive_data |= (unsigned long)drv_ctrl << 16;
- }
- 
- /*
-@@ -209,6 +185,11 @@ static void sl82c105_dma_start(ide_drive_t *drive)
- {
- 	ide_hwif_t *hwif	= HWIF(drive);
- 	struct pci_dev *dev	= hwif->pci_dev;
-+	int reg 		= 0x44 + drive->dn * 4;
-+
-+	DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
-+
-+	pci_write_config_word(dev, reg, drive->drive_data >> 16);
- 
- 	sl82c105_reset_host(dev);
- 	ide_dma_start(drive);
-@@ -222,64 +203,24 @@ static void sl82c105_dma_timeout(ide_drive_t *drive)
- 	ide_dma_timeout(drive);
- }
- 
--static int sl82c105_ide_dma_on(ide_drive_t *drive)
+-}
+-
+-static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 -{
--	struct pci_dev *dev	= HWIF(drive)->pci_dev;
--	int rc, reg 		= 0x44 + drive->dn * 4;
+-	struct vcpu_svm *svm;
+-	struct page *page;
+-	int err;
 -
--	DBG(("sl82c105_ide_dma_on(drive:%s)\n", drive->name));
+-	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+-	if (!svm) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
 -
--	rc = __ide_dma_on(drive);
--	if (rc == 0) {
--		pci_write_config_word(dev, reg, drive->drive_data >> 16);
+-	err = kvm_vcpu_init(&svm->vcpu, kvm, id);
+-	if (err)
+-		goto free_svm;
 -
--		printk(KERN_INFO "%s: DMA enabled\n", drive->name);
+-	if (irqchip_in_kernel(kvm)) {
+-		err = kvm_create_lapic(&svm->vcpu);
+-		if (err < 0)
+-			goto free_svm;
 -	}
--	return rc;
+-
+-	page = alloc_page(GFP_KERNEL);
+-	if (!page) {
+-		err = -ENOMEM;
+-		goto uninit;
+-	}
+-
+-	svm->vmcb = page_address(page);
+-	clear_page(svm->vmcb);
+-	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
+-	svm->asid_generation = 0;
+-	memset(svm->db_regs, 0, sizeof(svm->db_regs));
+-	init_vmcb(svm->vmcb);
+-
+-	fx_init(&svm->vcpu);
+-	svm->vcpu.fpu_active = 1;
+-	svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+-	if (svm->vcpu.vcpu_id == 0)
+-		svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
+-
+-	return &svm->vcpu;
+-
+-uninit:
+-	kvm_vcpu_uninit(&svm->vcpu);
+-free_svm:
+-	kmem_cache_free(kvm_vcpu_cache, svm);
+-out:
+-	return ERR_PTR(err);
 -}
 -
--static void sl82c105_dma_off_quietly(ide_drive_t *drive)
-+static int sl82c105_dma_end(ide_drive_t *drive)
- {
- 	struct pci_dev *dev	= HWIF(drive)->pci_dev;
- 	int reg 		= 0x44 + drive->dn * 4;
-+	int ret;
- 
--	DBG(("sl82c105_dma_off_quietly(drive:%s)\n", drive->name));
-+	DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
- 
--	pci_write_config_word(dev, reg, drive->drive_data);
-+	ret = __ide_dma_end(drive);
- 
--	ide_dma_off_quietly(drive);
+-static void svm_free_vcpu(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+-	kvm_vcpu_uninit(vcpu);
+-	kmem_cache_free(kvm_vcpu_cache, svm);
 -}
-+	pci_write_config_word(dev, reg, drive->drive_data);
- 
--/*
-- * Ok, that is nasty, but we must make sure the DMA timings
-- * won't be used for a PIO access. The solution here is
-- * to make sure the 16 bits mode is diabled on the channel
-- * when DMA is enabled, thus causing the chip to use PIO0
-- * timings for those operations.
-- */
--static void sl82c105_selectproc(ide_drive_t *drive)
+-
+-static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 -{
--	ide_hwif_t *hwif	= HWIF(drive);
--	struct pci_dev *dev	= hwif->pci_dev;
--	u32 val, old, mask;
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	int i;
 -
--	//DBG(("sl82c105_selectproc(drive:%s)\n", drive->name));
+-	if (unlikely(cpu != vcpu->cpu)) {
+-		u64 tsc_this, delta;
 -
--	mask = hwif->channel ? CTRL_P1F16 : CTRL_P0F16;
--	old = val = (u32)pci_get_drvdata(dev);
--	if (drive->using_dma)
--		val &= ~mask;
--	else
--		val |= mask;
--	if (old != val) {
--		pci_write_config_dword(dev, 0x40, val);	
--		pci_set_drvdata(dev, (void *)val);
--	}
-+	return ret;
- }
- 
- /*
-  * ATA reset will clear the 16 bits mode in the control
-- * register, we need to update our cache
-+ * register, we need to reprogram it
-  */
- static void sl82c105_resetproc(ide_drive_t *drive)
- {
-@@ -289,7 +230,8 @@ static void sl82c105_resetproc(ide_drive_t *drive)
- 	DBG(("sl82c105_resetproc(drive:%s)\n", drive->name));
- 
- 	pci_read_config_dword(dev, 0x40, &val);
--	pci_set_drvdata(dev, (void *)val);
-+	val |= (CTRL_P1F16 | CTRL_P0F16);
-+	pci_write_config_dword(dev, 0x40, val);
- }
- 
- /*
-@@ -342,7 +284,6 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c
- 	pci_read_config_dword(dev, 0x40, &val);
- 	val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
- 	pci_write_config_dword(dev, 0x40, val);
--	pci_set_drvdata(dev, (void *)val);
- 
- 	return dev->irq;
- }
-@@ -358,7 +299,6 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
- 
- 	hwif->set_pio_mode	= &sl82c105_set_pio_mode;
- 	hwif->set_dma_mode	= &sl82c105_set_dma_mode;
--	hwif->selectproc	= &sl82c105_selectproc;
- 	hwif->resetproc 	= &sl82c105_resetproc;
- 
- 	if (!hwif->dma_base)
-@@ -377,10 +317,9 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
- 
- 	hwif->mwdma_mask = ATA_MWDMA2;
- 
--	hwif->ide_dma_on		= &sl82c105_ide_dma_on;
--	hwif->dma_off_quietly		= &sl82c105_dma_off_quietly;
- 	hwif->dma_lost_irq		= &sl82c105_dma_lost_irq;
- 	hwif->dma_start			= &sl82c105_dma_start;
-+	hwif->ide_dma_end		= &sl82c105_dma_end;
- 	hwif->dma_timeout		= &sl82c105_dma_timeout;
- 
- 	if (hwif->mate)
-diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
-index eb4445b..dbbb468 100644
---- a/drivers/ide/pci/slc90e66.c
-+++ b/drivers/ide/pci/slc90e66.c
-@@ -91,19 +91,9 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 	pci_read_config_word(dev, 0x48, &reg48);
- 	pci_read_config_word(dev, 0x4a, &reg4a);
- 
--	switch(speed) {
--		case XFER_UDMA_4:	u_speed = 4 << (drive->dn * 4); break;
--		case XFER_UDMA_3:	u_speed = 3 << (drive->dn * 4); break;
--		case XFER_UDMA_2:	u_speed = 2 << (drive->dn * 4); break;
--		case XFER_UDMA_1:	u_speed = 1 << (drive->dn * 4); break;
--		case XFER_UDMA_0:	u_speed = 0 << (drive->dn * 4); break;
--		case XFER_MW_DMA_2:
--		case XFER_MW_DMA_1:
--		case XFER_SW_DMA_2:	break;
--		default:		return;
+-		/*
+-		 * Make sure that the guest sees a monotonically
+-		 * increasing TSC.
+-		 */
+-		rdtscll(tsc_this);
+-		delta = vcpu->host_tsc - tsc_this;
+-		svm->vmcb->control.tsc_offset += delta;
+-		vcpu->cpu = cpu;
+-		kvm_migrate_apic_timer(vcpu);
 -	}
 -
- 	if (speed >= XFER_UDMA_0) {
-+		u_speed = (speed - XFER_UDMA_0) << (drive->dn * 4);
-+
- 		if (!(reg48 & u_flag))
- 			pci_write_config_word(dev, 0x48, reg48|u_flag);
- 		/* FIXME: (reg4a & a_speed) ? */
-diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
-index a66ebd1..e1faf6c 100644
---- a/drivers/ide/pci/tc86c001.c
-+++ b/drivers/ide/pci/tc86c001.c
-@@ -222,7 +222,8 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = {
- 	.name		= "TC86C001",
- 	.init_chipset	= init_chipset_tc86c001,
- 	.init_hwif	= init_hwif_tc86c001,
--	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
-+	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
-+			  IDE_HFLAG_ABUSE_SET_DMA_MODE,
- 	.pio_mask	= ATA_PIO4,
- 	.mwdma_mask	= ATA_MWDMA2,
- 	.udma_mask	= ATA_UDMA4,
-diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
-index a227c41..ae52a96 100644
---- a/drivers/ide/pci/triflex.c
-+++ b/drivers/ide/pci/triflex.c
-@@ -81,8 +81,6 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
- 		case XFER_PIO_0:
- 			timing = 0x0808;
- 			break;
--		default:
--			return;
- 	}
- 
- 	triflex_timings &= ~(0xFFFF << (16 * unit));
-diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
-index 0151d7f..04cd893 100644
---- a/drivers/ide/pci/trm290.c
-+++ b/drivers/ide/pci/trm290.c
-@@ -241,11 +241,7 @@ static int trm290_ide_dma_test_irq (ide_drive_t *drive)
- 	return (status == 0x00ff);
- }
- 
--static void trm290_dma_host_on(ide_drive_t *drive)
+-	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+-		rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
+-}
+-
+-static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 -{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	int i;
+-
+-	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+-		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
+-
+-	rdtscll(vcpu->host_tsc);
+-	kvm_put_guest_fpu(vcpu);
 -}
 -
--static void trm290_dma_host_off(ide_drive_t *drive)
-+static void trm290_dma_host_set(ide_drive_t *drive, int on)
- {
- }
- 
-@@ -289,8 +285,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
- 
- 	ide_setup_dma(hwif, (hwif->config_data + 4) ^ (hwif->channel ? 0x0080 : 0x0000), 3);
- 
--	hwif->dma_host_off	= &trm290_dma_host_off;
--	hwif->dma_host_on	= &trm290_dma_host_on;
-+	hwif->dma_host_set	= &trm290_dma_host_set;
- 	hwif->dma_setup 	= &trm290_dma_setup;
- 	hwif->dma_exec_cmd	= &trm290_dma_exec_cmd;
- 	hwif->dma_start 	= &trm290_dma_start;
-diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
-index a0d3c16..4b32c90 100644
---- a/drivers/ide/pci/via82cxxx.c
-+++ b/drivers/ide/pci/via82cxxx.c
-@@ -439,6 +439,7 @@ static const struct ide_port_info via82cxxx_chipset __devinitdata = {
- 	.enablebits	= { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
- 	.host_flags	= IDE_HFLAG_PIO_NO_BLACKLIST |
- 			  IDE_HFLAG_PIO_NO_DOWNGRADE |
-+			  IDE_HFLAG_ABUSE_SET_DMA_MODE |
- 			  IDE_HFLAG_POST_SET_MODE |
- 			  IDE_HFLAG_IO_32BIT |
- 			  IDE_HFLAG_BOOTABLE,
-diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile
-new file mode 100644
-index 0000000..65af584
---- /dev/null
-+++ b/drivers/ide/ppc/Makefile
-@@ -0,0 +1,3 @@
-+
-+obj-$(CONFIG_BLK_DEV_IDE_PMAC)		+= pmac.o
-+obj-$(CONFIG_BLK_DEV_MPC8xx_IDE)	+= mpc8xx.o
-diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
-index 5f0da35..3fd5d45 100644
---- a/drivers/ide/ppc/mpc8xx.c
-+++ b/drivers/ide/ppc/mpc8xx.c
-@@ -838,3 +838,21 @@ void m8xx_ide_init(void)
- 	ppc_ide_md.default_io_base      = m8xx_ide_default_io_base;
- 	ppc_ide_md.ide_init_hwif        = m8xx_ide_init_hwif_ports;
- }
-+
-+static int __init mpc8xx_ide_probe(void)
-+{
-+	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-+
-+#ifdef IDE0_BASE_OFFSET
-+	idx[0] = 0;
-+#ifdef IDE1_BASE_OFFSET
-+	idx[1] = 1;
-+#endif
-+#endif
-+
-+	ide_device_add(idx);
-+
-+	return 0;
-+}
-+
-+module_init(mpc8xx_ide_probe);
-diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
-index 7f7a598..736d12c 100644
---- a/drivers/ide/ppc/pmac.c
-+++ b/drivers/ide/ppc/pmac.c
-@@ -438,13 +438,8 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw,
- 		if (data_port == pmac_ide[ix].regbase)
- 			break;
- 
--	if (ix >= MAX_HWIFS) {
--		/* Probably a PCI interface... */
--		for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; ++i)
--			hw->io_ports[i] = data_port + i - IDE_DATA_OFFSET;
--		hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
--		return;
--	}
-+	if (ix >= MAX_HWIFS)
-+		return;		/* not an IDE PMAC interface */
- 
- 	for (i = 0; i < 8; ++i)
- 		hw->io_ports[i] = data_port + i * 0x10;
-@@ -833,38 +828,20 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
- 	tl[0] = *timings;
- 	tl[1] = *timings2;
- 
--	switch(speed) {
- #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
--		case XFER_UDMA_6:
--		case XFER_UDMA_5:
--		case XFER_UDMA_4:
--		case XFER_UDMA_3:
--		case XFER_UDMA_2:
--		case XFER_UDMA_1:
--		case XFER_UDMA_0:
--			if (pmif->kind == controller_kl_ata4)
--				ret = set_timings_udma_ata4(&tl[0], speed);
--			else if (pmif->kind == controller_un_ata6
--				 || pmif->kind == controller_k2_ata6)
--				ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
--			else if (pmif->kind == controller_sh_ata6)
--				ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
--			else
--				ret = 1;
--			break;
--		case XFER_MW_DMA_2:
--		case XFER_MW_DMA_1:
--		case XFER_MW_DMA_0:
--			set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
--			break;
--		case XFER_SW_DMA_2:
--		case XFER_SW_DMA_1:
--		case XFER_SW_DMA_0:
--			return;
-+	if (speed >= XFER_UDMA_0) {
-+		if (pmif->kind == controller_kl_ata4)
-+			ret = set_timings_udma_ata4(&tl[0], speed);
-+		else if (pmif->kind == controller_un_ata6
-+			 || pmif->kind == controller_k2_ata6)
-+			ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
-+		else if (pmif->kind == controller_sh_ata6)
-+			ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
-+		else
-+			ret = -1;
-+	} else
-+		set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
- #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
--		default:
--			ret = 1;
--	}
- 	if (ret)
- 		return;
- 
-@@ -1035,12 +1012,11 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
-  * rare machines unfortunately, but it's better this way.
-  */
- static int
--pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
-+pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
- {
- 	struct device_node *np = pmif->node;
- 	const int *bidp;
- 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
--	hw_regs_t hw;
- 
- 	pmif->cable_80 = 0;
- 	pmif->broken_dma = pmif->broken_dma_warn = 0;
-@@ -1126,11 +1102,9 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
- 	/* Tell common code _not_ to mess with resources */
- 	hwif->mmio = 1;
- 	hwif->hwif_data = pmif;
--	memset(&hw, 0, sizeof(hw));
--	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, &hwif->irq);
--	memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
--	hwif->chipset = ide_pmac;
--	hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay;
-+	hw->chipset = ide_pmac;
-+	ide_init_port_hw(hwif, hw);
-+	hwif->noprobe = pmif->mediabay;
- 	hwif->hold = pmif->mediabay;
- 	hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
- 	hwif->drives[0].unmask = 1;
-@@ -1159,8 +1133,6 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
- 		hwif->noprobe = 0;
- #endif /* CONFIG_PMAC_MEDIABAY */
- 
--	hwif->sg_max_nents = MAX_DCMDS;
+-static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
+-{
+-}
 -
- #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
- 	/* has a DBDMA controller channel */
- 	if (pmif->dma_regs)
-@@ -1186,6 +1158,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
- 	ide_hwif_t *hwif;
- 	pmac_ide_hwif_t *pmif;
- 	int i, rc;
-+	hw_regs_t hw;
- 
- 	i = 0;
- 	while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
-@@ -1228,7 +1201,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
- 	regbase = (unsigned long) base;
- 
- 	hwif->pci_dev = mdev->bus->pdev;
--	hwif->gendev.parent = &mdev->ofdev.dev;
- 
- 	pmif->mdev = mdev;
- 	pmif->node = mdev->ofdev.node;
-@@ -1246,7 +1218,12 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
- #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
- 	dev_set_drvdata(&mdev->ofdev.dev, hwif);
- 
--	rc = pmac_ide_setup_device(pmif, hwif);
-+	memset(&hw, 0, sizeof(hw));
-+	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL);
-+	hw.irq = irq;
-+	hw.dev = &mdev->ofdev.dev;
-+
-+	rc = pmac_ide_setup_device(pmif, hwif, &hw);
- 	if (rc != 0) {
- 		/* The inteface is released to the common IDE layer */
- 		dev_set_drvdata(&mdev->ofdev.dev, NULL);
-@@ -1305,6 +1282,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
- 	void __iomem *base;
- 	unsigned long rbase, rlen;
- 	int i, rc;
-+	hw_regs_t hw;
- 
- 	np = pci_device_to_OF_node(pdev);
- 	if (np == NULL) {
-@@ -1338,7 +1316,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
- 	}
- 
- 	hwif->pci_dev = pdev;
--	hwif->gendev.parent = &pdev->dev;
- 	pmif->mdev = NULL;
- 	pmif->node = np;
- 
-@@ -1355,7 +1332,12 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
- 
- 	pci_set_drvdata(pdev, hwif);
- 
--	rc = pmac_ide_setup_device(pmif, hwif);
-+	memset(&hw, 0, sizeof(hw));
-+	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL);
-+	hw.irq = pdev->irq;
-+	hw.dev = &pdev->dev;
-+
-+	rc = pmac_ide_setup_device(pmif, hwif, &hw);
- 	if (rc != 0) {
- 		/* The inteface is released to the common IDE layer */
- 		pci_set_drvdata(pdev, NULL);
-@@ -1721,11 +1703,7 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
- 	return 1;
- }
- 
--static void pmac_ide_dma_host_off(ide_drive_t *drive)
+-static void svm_cache_regs(struct kvm_vcpu *vcpu)
 -{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+-	vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+-	vcpu->rip = svm->vmcb->save.rip;
 -}
 -
--static void pmac_ide_dma_host_on(ide_drive_t *drive)
-+static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
- {
- }
- 
-@@ -1771,15 +1749,14 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
- 		return;
- 	}
- 
--	hwif->dma_off_quietly = &ide_dma_off_quietly;
--	hwif->ide_dma_on = &__ide_dma_on;
-+	hwif->sg_max_nents = MAX_DCMDS;
-+
-+	hwif->dma_host_set = &pmac_ide_dma_host_set;
- 	hwif->dma_setup = &pmac_ide_dma_setup;
- 	hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd;
- 	hwif->dma_start = &pmac_ide_dma_start;
- 	hwif->ide_dma_end = &pmac_ide_dma_end;
- 	hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
--	hwif->dma_host_off = &pmac_ide_dma_host_off;
--	hwif->dma_host_on = &pmac_ide_dma_host_on;
- 	hwif->dma_timeout = &ide_dma_timeout;
- 	hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
- 
-@@ -1809,3 +1786,5 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
- }
- 
- #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
-+
-+module_init(pmac_ide_probe);
-diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
-index d2cd5a3..676c66e 100644
---- a/drivers/ide/setup-pci.c
-+++ b/drivers/ide/setup-pci.c
-@@ -165,13 +165,17 @@ static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_
- 
- 		dma_base = pci_resource_start(dev, baridx);
- 
--		if (dma_base == 0)
-+		if (dma_base == 0) {
- 			printk(KERN_ERR "%s: DMA base is invalid\n", d->name);
-+			return 0;
-+		}
- 	}
- 
--	if ((d->host_flags & IDE_HFLAG_CS5520) == 0 && dma_base) {
-+	if (hwif->channel)
-+		dma_base += 8;
-+
-+	if ((d->host_flags & IDE_HFLAG_CS5520) == 0) {
- 		u8 simplex_stat = 0;
--		dma_base += hwif->channel ? 8 : 0;
- 
- 		switch(dev->device) {
- 			case PCI_DEVICE_ID_AL_M5219:
-@@ -359,6 +363,8 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, const struct ide_port
- 	unsigned long ctl = 0, base = 0;
- 	ide_hwif_t *hwif;
- 	u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0;
-+	u8 oldnoprobe = 0;
-+	struct hw_regs_s hw;
- 
- 	if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
- 		/*  Possibly we should fail if these checks report true */
-@@ -381,26 +387,25 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, const struct ide_port
- 	}
- 	if ((hwif = ide_match_hwif(base, bootable, d->name)) == NULL)
- 		return NULL;	/* no room in ide_hwifs[] */
--	if (hwif->io_ports[IDE_DATA_OFFSET] != base ||
--	    hwif->io_ports[IDE_CONTROL_OFFSET] != (ctl | 2)) {
--		hw_regs_t hw;
+-static void svm_decache_regs(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
+-	svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
+-	svm->vmcb->save.rip = vcpu->rip;
+-}
 -
--		memset(&hw, 0, sizeof(hw));
--#ifndef CONFIG_IDE_ARCH_OBSOLETE_INIT
--		ide_std_init_ports(&hw, base, ctl | 2);
--#else
--		ide_init_hwif_ports(&hw, base, ctl | 2, NULL);
--#endif
--		memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
--		hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
+-static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
+-{
+-	return to_svm(vcpu)->vmcb->save.rflags;
+-}
+-
+-static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+-{
+-	to_svm(vcpu)->vmcb->save.rflags = rflags;
+-}
+-
+-static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
+-{
+-	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
+-
+-	switch (seg) {
+-	case VCPU_SREG_CS: return &save->cs;
+-	case VCPU_SREG_DS: return &save->ds;
+-	case VCPU_SREG_ES: return &save->es;
+-	case VCPU_SREG_FS: return &save->fs;
+-	case VCPU_SREG_GS: return &save->gs;
+-	case VCPU_SREG_SS: return &save->ss;
+-	case VCPU_SREG_TR: return &save->tr;
+-	case VCPU_SREG_LDTR: return &save->ldtr;
 -	}
--	hwif->chipset = d->chipset ? d->chipset : ide_pci;
-+
-+	memset(&hw, 0, sizeof(hw));
-+	hw.irq = hwif->irq ? hwif->irq : irq;
-+	hw.dev = &dev->dev;
-+	hw.chipset = d->chipset ? d->chipset : ide_pci;
-+	ide_std_init_ports(&hw, base, ctl | 2);
-+
-+	if (hwif->io_ports[IDE_DATA_OFFSET] == base &&
-+	    hwif->io_ports[IDE_CONTROL_OFFSET] == (ctl | 2))
-+		oldnoprobe = hwif->noprobe;
-+
-+	ide_init_port_hw(hwif, &hw);
-+
-+	hwif->noprobe = oldnoprobe;
-+
- 	hwif->pci_dev = dev;
- 	hwif->cds = d;
- 	hwif->channel = port;
- 
--	if (!hwif->irq)
--		hwif->irq = irq;
- 	if (mate) {
- 		hwif->mate = mate;
- 		mate->mate = hwif;
-@@ -535,12 +540,8 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
- 		if ((hwif = ide_hwif_configure(dev, d, mate, port, pciirq)) == NULL)
- 			continue;
- 
--		/* setup proper ancestral information */
--		hwif->gendev.parent = &dev->dev;
+-	BUG();
+-	return NULL;
+-}
 -
- 		*(idx + port) = hwif->index;
- 
--		
- 		if (d->init_iops)
- 			d->init_iops(hwif);
- 
-@@ -551,8 +552,6 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
- 		    (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
- 			hwif->irq = port ? 15 : 14;
- 
--		hwif->fixup = d->fixup;
+-static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+-{
+-	struct vmcb_seg *s = svm_seg(vcpu, seg);
 -
- 		hwif->host_flags = d->host_flags;
- 		hwif->pio_mask = d->pio_mask;
- 
-@@ -699,105 +698,3 @@ out:
- }
- 
- EXPORT_SYMBOL_GPL(ide_setup_pci_devices);
+-	return s->base;
+-}
 -
--#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
--/*
-- *	Module interfaces
-- */
+-static void svm_get_segment(struct kvm_vcpu *vcpu,
+-			    struct kvm_segment *var, int seg)
+-{
+-	struct vmcb_seg *s = svm_seg(vcpu, seg);
 -
--static int pre_init = 1;		/* Before first ordered IDE scan */
--static LIST_HEAD(ide_pci_drivers);
+-	var->base = s->base;
+-	var->limit = s->limit;
+-	var->selector = s->selector;
+-	var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
+-	var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
+-	var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+-	var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
+-	var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
+-	var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
+-	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
+-	var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
+-	var->unusable = !var->present;
+-}
 -
--/*
-- *	__ide_pci_register_driver	-	attach IDE driver
-- *	@driver: pci driver
-- *	@module: owner module of the driver
-- *
-- *	Registers a driver with the IDE layer. The IDE layer arranges that
-- *	boot time setup is done in the expected device order and then
-- *	hands the controllers off to the core PCI code to do the rest of
-- *	the work.
-- *
-- *	Returns are the same as for pci_register_driver
-- */
+-static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
 -
--int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
--			      const char *mod_name)
+-	dt->limit = svm->vmcb->save.idtr.limit;
+-	dt->base = svm->vmcb->save.idtr.base;
+-}
+-
+-static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
 -{
--	if (!pre_init)
--		return __pci_register_driver(driver, module, mod_name);
--	driver->driver.owner = module;
--	list_add_tail(&driver->node, &ide_pci_drivers);
--	return 0;
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	svm->vmcb->save.idtr.limit = dt->limit;
+-	svm->vmcb->save.idtr.base = dt->base ;
 -}
--EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
 -
--/**
-- *	ide_scan_pcidev		-	find an IDE driver for a device
-- *	@dev: PCI device to check
-- *
-- *	Look for an IDE driver to handle the device we are considering.
-- *	This is only used during boot up to get the ordering correct. After
-- *	boot up the pci layer takes over the job.
-- */
+-static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
 -
--static int __init ide_scan_pcidev(struct pci_dev *dev)
+-	dt->limit = svm->vmcb->save.gdtr.limit;
+-	dt->base = svm->vmcb->save.gdtr.base;
+-}
+-
+-static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
 -{
--	struct list_head *l;
--	struct pci_driver *d;
+-	struct vcpu_svm *svm = to_svm(vcpu);
 -
--	list_for_each(l, &ide_pci_drivers) {
--		d = list_entry(l, struct pci_driver, node);
--		if (d->id_table) {
--			const struct pci_device_id *id =
--				pci_match_id(d->id_table, dev);
+-	svm->vmcb->save.gdtr.limit = dt->limit;
+-	svm->vmcb->save.gdtr.base = dt->base ;
+-}
 -
--			if (id != NULL && d->probe(dev, id) >= 0) {
--				dev->driver = d;
--				pci_dev_get(dev);
--				return 1;
--			}
+-static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
+-{
+-}
+-
+-static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-#ifdef CONFIG_X86_64
+-	if (vcpu->shadow_efer & KVM_EFER_LME) {
+-		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
+-			vcpu->shadow_efer |= KVM_EFER_LMA;
+-			svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
+-		}
+-
+-		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) {
+-			vcpu->shadow_efer &= ~KVM_EFER_LMA;
+-			svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
 -		}
 -	}
--	return 0;
+-#endif
+-	if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
+-		svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
+-		vcpu->fpu_active = 1;
+-	}
+-
+-	vcpu->cr0 = cr0;
+-	cr0 |= X86_CR0_PG | X86_CR0_WP;
+-	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+-	svm->vmcb->save.cr0 = cr0;
 -}
 -
--/**
-- *	ide_scan_pcibus		-	perform the initial IDE driver scan
-- *	@scan_direction: set for reverse order scanning
-- *
-- *	Perform the initial bus rather than driver ordered scan of the
-- *	PCI drivers. After this all IDE pci handling becomes standard
-- *	module ordering not traditionally ordered.
-- */
-- 	
--void __init ide_scan_pcibus (int scan_direction)
+-static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 -{
--	struct pci_dev *dev = NULL;
--	struct pci_driver *d;
--	struct list_head *l, *n;
+-       vcpu->cr4 = cr4;
+-       to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
+-}
 -
--	pre_init = 0;
--	if (!scan_direction)
--		while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
--			ide_scan_pcidev(dev);
--	else
--		while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID,
--						     dev)))
--			ide_scan_pcidev(dev);
+-static void svm_set_segment(struct kvm_vcpu *vcpu,
+-			    struct kvm_segment *var, int seg)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	struct vmcb_seg *s = svm_seg(vcpu, seg);
 -
--	/*
--	 *	Hand the drivers over to the PCI layer now we
--	 *	are post init.
--	 */
+-	s->base = var->base;
+-	s->limit = var->limit;
+-	s->selector = var->selector;
+-	if (var->unusable)
+-		s->attrib = 0;
+-	else {
+-		s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+-		s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+-		s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+-		s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
+-		s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+-		s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+-		s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+-		s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
+-	}
+-	if (seg == VCPU_SREG_CS)
+-		svm->vmcb->save.cpl
+-			= (svm->vmcb->save.cs.attrib
+-			   >> SVM_SELECTOR_DPL_SHIFT) & 3;
 -
--	list_for_each_safe(l, n, &ide_pci_drivers) {
--		list_del(l);
--		d = list_entry(l, struct pci_driver, node);
--		if (__pci_register_driver(d, d->driver.owner,
--					  d->driver.mod_name))
--			printk(KERN_ERR "%s: failed to register %s driver\n",
--					__FUNCTION__, d->driver.mod_name);
--	}
 -}
+-
+-/* FIXME:
+-
+-	svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
+-	svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
+-
+-*/
+-
+-static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
+-{
+-	return -EOPNOTSUPP;
+-}
+-
+-static int svm_get_irq(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	u32 exit_int_info = svm->vmcb->control.exit_int_info;
+-
+-	if (is_external_interrupt(exit_int_info))
+-		return exit_int_info & SVM_EVTINJ_VEC_MASK;
+-	return -1;
+-}
+-
+-static void load_host_msrs(struct kvm_vcpu *vcpu)
+-{
+-#ifdef CONFIG_X86_64
+-	wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
 -#endif
-diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
-index 489c133..1f8153b 100644
---- a/drivers/ieee1394/Makefile
-+++ b/drivers/ieee1394/Makefile
-@@ -15,3 +15,4 @@ obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
- obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
- obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
- 
-+obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
-diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
-new file mode 100644
-index 0000000..ddaab6e
---- /dev/null
-+++ b/drivers/ieee1394/init_ohci1394_dma.c
-@@ -0,0 +1,285 @@
-+/*
-+ * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers
-+ *
-+ * Copyright (C) 2006-2007      Bernhard Kaindl <bk at suse.de>
-+ *
-+ * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c
-+ * this file has functions to:
-+ * - scan the PCI very early on boot for all OHCI 1394-compliant controllers
-+ * - reset and initialize them and make them join the IEEE1394 bus and
-+ * - enable physical DMA on them to allow remote debugging
-+ *
-+ * All code and data is marked as __init and __initdata, respective as
-+ * during boot, all OHCI1394 controllers may be claimed by the firewire
-+ * stack and at this point, this code should not touch them anymore.
-+ *
-+ * To use physical DMA after the initialization of the firewire stack,
-+ * be sure that the stack enables it and (re-)attach after the bus reset
-+ * which may be caused by the firewire stack initialization.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software Foundation,
-+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ */
-+
-+#include <linux/interrupt.h>	/* for ohci1394.h */
-+#include <linux/delay.h>
-+#include <linux/pci.h>		/* for PCI defines */
-+#include <linux/init_ohci1394_dma.h>
-+#include <asm/pci-direct.h>	/* for direct PCI config space access */
-+#include <asm/fixmap.h>
-+
-+#include "ieee1394_types.h"
-+#include "ohci1394.h"
-+
-+int __initdata init_ohci1394_dma_early;
-+
-+/* Reads a PHY register of an OHCI-1394 controller */
-+static inline u8 __init get_phy_reg(struct ti_ohci *ohci, u8 addr)
-+{
-+	int i;
-+	quadlet_t r;
-+
-+	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
-+
-+	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
-+		if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
-+			break;
-+		mdelay(1);
-+	}
-+	r = reg_read(ohci, OHCI1394_PhyControl);
-+
-+	return (r & 0x00ff0000) >> 16;
-+}
-+
-+/* Writes to a PHY register of an OHCI-1394 controller */
-+static inline void __init set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
-+{
-+	int i;
-+
-+	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
-+
-+	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
-+		u32 r = reg_read(ohci, OHCI1394_PhyControl);
-+		if (!(r & 0x00004000))
-+			break;
-+		mdelay(1);
-+	}
-+}
-+
-+/* Resets an OHCI-1394 controller (for sane state before initialization) */
-+static inline void __init init_ohci1394_soft_reset(struct ti_ohci *ohci) {
-+	int i;
-+
-+	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
-+
-+	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
-+		if (!(reg_read(ohci, OHCI1394_HCControlSet)
-+				   & OHCI1394_HCControl_softReset))
-+			break;
-+		mdelay(1);
-+	}
-+}
-+
-+/* Basic OHCI-1394 register and port inititalization */
-+static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci)
-+{
-+	quadlet_t bus_options;
-+	int num_ports, i;
-+
-+	/* Put some defaults to these undefined bus options */
-+	bus_options = reg_read(ohci, OHCI1394_BusOptions);
-+	bus_options |=  0x60000000; /* Enable CMC and ISC */
-+	bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
-+	bus_options &= ~0x18000000; /* Disable PMC and BMC */
-+	reg_write(ohci, OHCI1394_BusOptions, bus_options);
-+
-+	/* Set the bus number */
-+	reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
-+
-+	/* Enable posted writes */
-+	reg_write(ohci, OHCI1394_HCControlSet,
-+			OHCI1394_HCControl_postedWriteEnable);
-+
-+	/* Clear link control register */
-+	reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
-+
-+	/* enable phys */
-+	reg_write(ohci, OHCI1394_LinkControlSet,
-+			OHCI1394_LinkControl_RcvPhyPkt);
-+
-+	/* Don't accept phy packets into AR request context */
-+	reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
-+
-+	/* Clear the Isochonouys interrupt masks */
-+	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
-+	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
-+	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
-+	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
-+
-+	/* Accept asyncronous transfer requests from all nodes for now */
-+	reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
-+
-+	/* Specify asyncronous transfer retries */
-+	reg_write(ohci, OHCI1394_ATRetries,
-+		  OHCI1394_MAX_AT_REQ_RETRIES |
-+		  (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
-+		  (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
-+
-+	/* We don't want hardware swapping */
-+	reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
-+
-+	/* Enable link */
-+	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
-+
-+	/* If anything is connected to a port, make sure it is enabled */
-+	num_ports = get_phy_reg(ohci, 2) & 0xf;
-+	for (i = 0; i < num_ports; i++) {
-+		unsigned int status;
-+
-+		set_phy_reg(ohci, 7, i);
-+		status = get_phy_reg(ohci, 8);
-+
-+		if (status & 0x20)
-+			set_phy_reg(ohci, 8, status & ~1);
-+	}
-+}
-+
-+/**
-+ * init_ohci1394_wait_for_busresets - wait until bus resets are completed
-+ *
-+ * OHCI1394 initialization itself and any device going on- or offline
-+ * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
-+ * specifies that physical DMA is disabled on each bus reset and it
-+ * has to be enabled after each bus reset when needed. We resort
-+ * to polling here because on early boot, we have no interrupts.
-+ */
-+static inline void __init init_ohci1394_wait_for_busresets(struct ti_ohci *ohci)
-+{
-+	int i, events;
-+
-+	for (i=0; i < 9; i++) {
-+		mdelay(200);
-+		events = reg_read(ohci, OHCI1394_IntEventSet);
-+		if (events & OHCI1394_busReset)
-+			reg_write(ohci, OHCI1394_IntEventClear,
-+					OHCI1394_busReset);
-+	}
-+}
-+
-+/**
-+ * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
-+ * This enables remote DMA access over IEEE1394 from every host for the low
-+ * 4GB of address space. DMA accesses above 4GB are not available currently.
-+ */
-+static inline void __init init_ohci1394_enable_physical_dma(struct ti_ohci *hci)
-+{
-+	reg_write(hci, OHCI1394_PhyReqFilterHiSet, 0xffffffff);
-+	reg_write(hci, OHCI1394_PhyReqFilterLoSet, 0xffffffff);
-+	reg_write(hci, OHCI1394_PhyUpperBound, 0xffff0000);
-+}
-+
-+/**
-+ * init_ohci1394_reset_and_init_dma - init controller and enable DMA
-+ * This initializes the given controller and enables physical DMA engine in it.
-+ */
-+static inline void __init init_ohci1394_reset_and_init_dma(struct ti_ohci *ohci)
-+{
-+	/* Start off with a soft reset, clears everything to a sane state. */
-+	init_ohci1394_soft_reset(ohci);
-+
-+	/* Accessing some registers without LPS enabled may cause lock up */
-+	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
-+
-+	/* Disable and clear interrupts */
-+	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
-+	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
-+
-+	mdelay(50); /* Wait 50msec to make sure we have full link enabled */
-+
-+	init_ohci1394_initialize(ohci);
-+	/*
-+	 * The initialization causes at least one IEEE1394 bus reset. Enabling
-+	 * physical DMA only works *after* *all* bus resets have calmed down:
-+	 */
-+	init_ohci1394_wait_for_busresets(ohci);
-+
-+	/* We had to wait and do this now if we want to debug early problems */
-+	init_ohci1394_enable_physical_dma(ohci);
-+}
-+
-+/**
-+ * init_ohci1394_controller - Map the registers of the controller and init DMA
-+ * This maps the registers of the specified controller and initializes it
-+ */
-+static inline void __init init_ohci1394_controller(int num, int slot, int func)
-+{
-+	unsigned long ohci_base;
-+	struct ti_ohci ohci;
-+
-+	printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394"
-+			 " at %02x:%02x.%x\n", num, slot, func);
-+
-+	ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2))
-+						   & PCI_BASE_ADDRESS_MEM_MASK;
-+
-+	set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base);
-+
-+	ohci.registers = (void *)fix_to_virt(FIX_OHCI1394_BASE);
-+
-+	init_ohci1394_reset_and_init_dma(&ohci);
-+}
-+
-+/**
-+ * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them
-+ * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them
-+ */
-+void __init init_ohci1394_dma_on_all_controllers(void)
-+{
-+	int num, slot, func;
-+
-+	if (!early_pci_allowed())
-+		return;
-+
-+	/* Poor man's PCI discovery, the only thing we can do at early boot */
-+	for (num = 0; num < 32; num++) {
-+		for (slot = 0; slot < 32; slot++) {
-+			for (func = 0; func < 8; func++) {
-+				u32 class = read_pci_config(num,slot,func,
-+							PCI_CLASS_REVISION);
-+				if ((class == 0xffffffff))
-+					continue; /* No device at this func */
-+
-+				if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
-+					continue; /* Not an OHCI-1394 device */
-+
-+				init_ohci1394_controller(num, slot, func);
-+				break; /* Assume one controller per device */
-+			}
-+		}
-+	}
-+	printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n");
-+}
-+
-+/**
-+ * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization
-+ */
-+static int __init setup_ohci1394_dma(char *opt)
-+{
-+	if (!strcmp(opt, "early"))
-+		init_ohci1394_dma_early = 1;
-+	return 0;
-+}
-+
-+/* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */
-+early_param("ohci1394_dma", setup_ohci1394_dma);
-diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
-index 90dc75b..511e432 100644
---- a/drivers/ieee1394/nodemgr.c
-+++ b/drivers/ieee1394/nodemgr.c
-@@ -727,33 +727,31 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
- 
- static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
- 
-+static int __match_ne(struct device *dev, void *data)
-+{
-+	struct unit_directory *ud;
-+	struct node_entry *ne = (struct node_entry *)data;
-+
-+	ud = container_of(dev, struct unit_directory, unit_dev);
-+	return ud->ne == ne;
-+}
-+
- static void nodemgr_remove_uds(struct node_entry *ne)
- {
- 	struct device *dev;
--	struct unit_directory *tmp, *ud;
+-}
 -
--	/* Iteration over nodemgr_ud_class.devices has to be protected by
--	 * nodemgr_ud_class.sem, but device_unregister() will eventually
--	 * take nodemgr_ud_class.sem too. Therefore pick out one ud at a time,
--	 * release the semaphore, and then unregister the ud. Since this code
--	 * may be called from other contexts besides the knodemgrds, protect the
--	 * gap after release of the semaphore by nodemgr_serialize_remove_uds.
-+	struct unit_directory *ud;
-+
-+	/* Use class_find device to iterate the devices. Since this code
-+	 * may be called from other contexts besides the knodemgrds,
-+	 * protect it by nodemgr_serialize_remove_uds.
- 	 */
- 	mutex_lock(&nodemgr_serialize_remove_uds);
- 	for (;;) {
--		ud = NULL;
--		down(&nodemgr_ud_class.sem);
--		list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
--			tmp = container_of(dev, struct unit_directory,
--					   unit_dev);
--			if (tmp->ne == ne) {
--				ud = tmp;
--				break;
--			}
--		}
--		up(&nodemgr_ud_class.sem);
--		if (ud == NULL)
-+		dev = class_find_device(&nodemgr_ud_class, ne, __match_ne);
-+		if (!dev)
- 			break;
-+		ud = container_of(dev, struct unit_directory, unit_dev);
-+		put_device(dev);
- 		device_unregister(&ud->unit_dev);
- 		device_unregister(&ud->device);
- 	}
-@@ -882,45 +880,66 @@ fail_alloc:
- 	return NULL;
- }
- 
-+static int __match_ne_guid(struct device *dev, void *data)
-+{
-+	struct node_entry *ne;
-+	u64 *guid = (u64 *)data;
-+
-+	ne = container_of(dev, struct node_entry, node_dev);
-+	return ne->guid == *guid;
-+}
- 
- static struct node_entry *find_entry_by_guid(u64 guid)
- {
- 	struct device *dev;
--	struct node_entry *ne, *ret_ne = NULL;
+-static void save_host_msrs(struct kvm_vcpu *vcpu)
+-{
+-#ifdef CONFIG_X86_64
+-	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
+-#endif
+-}
 -
--	down(&nodemgr_ne_class.sem);
--	list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
--		ne = container_of(dev, struct node_entry, node_dev);
-+	struct node_entry *ne;
- 
--		if (ne->guid == guid) {
--			ret_ne = ne;
--			break;
--		}
+-static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+-{
+-	if (svm_data->next_asid > svm_data->max_asid) {
+-		++svm_data->asid_generation;
+-		svm_data->next_asid = 1;
+-		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
 -	}
--	up(&nodemgr_ne_class.sem);
-+	dev = class_find_device(&nodemgr_ne_class, &guid, __match_ne_guid);
-+	if (!dev)
-+		return NULL;
-+	ne = container_of(dev, struct node_entry, node_dev);
-+	put_device(dev);
- 
--	return ret_ne;
-+	return ne;
- }
- 
-+struct match_nodeid_param {
-+	struct hpsb_host *host;
-+	nodeid_t nodeid;
-+};
-+
-+static int __match_ne_nodeid(struct device *dev, void *data)
-+{
-+	int found = 0;
-+	struct node_entry *ne;
-+	struct match_nodeid_param *param = (struct match_nodeid_param *)data;
-+
-+	if (!dev)
-+		goto ret;
-+	ne = container_of(dev, struct node_entry, node_dev);
-+	if (ne->host == param->host && ne->nodeid == param->nodeid)
-+		found = 1;
-+ret:
-+	return found;
-+}
- 
- static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
- 					       nodeid_t nodeid)
- {
- 	struct device *dev;
--	struct node_entry *ne, *ret_ne = NULL;
-+	struct node_entry *ne;
-+	struct match_nodeid_param param;
- 
--	down(&nodemgr_ne_class.sem);
--	list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
--		ne = container_of(dev, struct node_entry, node_dev);
-+	param.host = host;
-+	param.nodeid = nodeid;
- 
--		if (ne->host == host && ne->nodeid == nodeid) {
--			ret_ne = ne;
--			break;
+-
+-	svm->vcpu.cpu = svm_data->cpu;
+-	svm->asid_generation = svm_data->asid_generation;
+-	svm->vmcb->control.asid = svm_data->next_asid++;
+-}
+-
+-static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
+-{
+-	return to_svm(vcpu)->db_regs[dr];
+-}
+-
+-static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
+-		       int *exception)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	*exception = 0;
+-
+-	if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
+-		svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
+-		svm->vmcb->save.dr6 |= DR6_BD_MASK;
+-		*exception = DB_VECTOR;
+-		return;
+-	}
+-
+-	switch (dr) {
+-	case 0 ... 3:
+-		svm->db_regs[dr] = value;
+-		return;
+-	case 4 ... 5:
+-		if (vcpu->cr4 & X86_CR4_DE) {
+-			*exception = UD_VECTOR;
+-			return;
 -		}
+-	case 7: {
+-		if (value & ~((1ULL << 32) - 1)) {
+-			*exception = GP_VECTOR;
+-			return;
+-		}
+-		svm->vmcb->save.dr7 = value;
+-		return;
 -	}
--	up(&nodemgr_ne_class.sem);
-+	dev = class_find_device(&nodemgr_ne_class, &param, __match_ne_nodeid);
-+	if (!dev)
-+		return NULL;
-+	ne = container_of(dev, struct node_entry, node_dev);
-+	put_device(dev);
- 
--	return ret_ne;
-+	return ne;
- }
- 
- 
-@@ -1370,107 +1389,109 @@ static void nodemgr_node_scan(struct host_info *hi, int generation)
- 	}
- }
- 
+-	default:
+-		printk(KERN_DEBUG "%s: unexpected dr %u\n",
+-		       __FUNCTION__, dr);
+-		*exception = UD_VECTOR;
+-		return;
+-	}
+-}
 -
--static void nodemgr_suspend_ne(struct node_entry *ne)
-+static int __nodemgr_driver_suspend(struct device *dev, void *data)
- {
--	struct device *dev;
- 	struct unit_directory *ud;
- 	struct device_driver *drv;
-+	struct node_entry *ne = (struct node_entry *)data;
- 	int error;
- 
--	HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
--		   NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
-+	ud = container_of(dev, struct unit_directory, unit_dev);
-+	if (ud->ne == ne) {
-+		drv = get_driver(ud->device.driver);
-+		if (drv) {
-+			error = 1; /* release if suspend is not implemented */
-+			if (drv->suspend) {
-+				down(&ud->device.sem);
-+				error = drv->suspend(&ud->device, PMSG_SUSPEND);
-+				up(&ud->device.sem);
-+			}
-+			if (error)
-+				device_release_driver(&ud->device);
-+			put_driver(drv);
-+		}
-+	}
- 
--	ne->in_limbo = 1;
--	WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
-+	return 0;
-+}
- 
--	down(&nodemgr_ud_class.sem);
--	list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
--		ud = container_of(dev, struct unit_directory, unit_dev);
--		if (ud->ne != ne)
--			continue;
-+static int __nodemgr_driver_resume(struct device *dev, void *data)
-+{
-+	struct unit_directory *ud;
-+	struct device_driver *drv;
-+	struct node_entry *ne = (struct node_entry *)data;
- 
-+	ud = container_of(dev, struct unit_directory, unit_dev);
-+	if (ud->ne == ne) {
- 		drv = get_driver(ud->device.driver);
--		if (!drv)
--			continue;
+-static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	u32 exit_int_info = svm->vmcb->control.exit_int_info;
+-	struct kvm *kvm = svm->vcpu.kvm;
+-	u64 fault_address;
+-	u32 error_code;
+-	enum emulation_result er;
+-	int r;
 -
--		error = 1; /* release if suspend is not implemented */
--		if (drv->suspend) {
--			down(&ud->device.sem);
--			error = drv->suspend(&ud->device, PMSG_SUSPEND);
--			up(&ud->device.sem);
-+		if (drv) {
-+			if (drv->resume) {
-+				down(&ud->device.sem);
-+				drv->resume(&ud->device);
-+				up(&ud->device.sem);
-+			}
-+			put_driver(drv);
- 		}
--		if (error)
--			device_release_driver(&ud->device);
--		put_driver(drv);
- 	}
--	up(&nodemgr_ud_class.sem);
+-	if (!irqchip_in_kernel(kvm) &&
+-		is_external_interrupt(exit_int_info))
+-		push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
+-
+-	mutex_lock(&kvm->lock);
+-
+-	fault_address  = svm->vmcb->control.exit_info_2;
+-	error_code = svm->vmcb->control.exit_info_1;
+-	r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
+-	if (r < 0) {
+-		mutex_unlock(&kvm->lock);
+-		return r;
+-	}
+-	if (!r) {
+-		mutex_unlock(&kvm->lock);
+-		return 1;
+-	}
+-	er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
+-				 error_code);
+-	mutex_unlock(&kvm->lock);
+-
+-	switch (er) {
+-	case EMULATE_DONE:
+-		return 1;
+-	case EMULATE_DO_MMIO:
+-		++svm->vcpu.stat.mmio_exits;
+-		return 0;
+-	case EMULATE_FAIL:
+-		kvm_report_emulation_failure(&svm->vcpu, "pagetable");
+-		break;
+-	default:
+-		BUG();
+-	}
+-
+-	kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+-	return 0;
 -}
- 
-+	return 0;
-+}
- 
--static void nodemgr_resume_ne(struct node_entry *ne)
-+static void nodemgr_suspend_ne(struct node_entry *ne)
- {
--	struct device *dev;
--	struct unit_directory *ud;
--	struct device_driver *drv;
-+	HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
-+		   NODE_BUS_ARGS(ne->host, ne->nodeid),
-+		   (unsigned long long)ne->guid);
- 
--	ne->in_limbo = 0;
--	device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
-+	ne->in_limbo = 1;
-+	WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
- 
--	down(&nodemgr_ud_class.sem);
--	list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
--		ud = container_of(dev, struct unit_directory, unit_dev);
--		if (ud->ne != ne)
--			continue;
-+	class_for_each_device(&nodemgr_ud_class, ne, __nodemgr_driver_suspend);
-+}
- 
--		drv = get_driver(ud->device.driver);
--		if (!drv)
--			continue;
- 
--		if (drv->resume) {
--			down(&ud->device.sem);
--			drv->resume(&ud->device);
--			up(&ud->device.sem);
--		}
--		put_driver(drv);
+-
+-static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
+-	if (!(svm->vcpu.cr0 & X86_CR0_TS))
+-		svm->vmcb->save.cr0 &= ~X86_CR0_TS;
+-	svm->vcpu.fpu_active = 1;
+-
+-	return 1;
+-}
+-
+-static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	/*
+-	 * VMCB is undefined after a SHUTDOWN intercept
+-	 * so reinitialize it.
+-	 */
+-	clear_page(svm->vmcb);
+-	init_vmcb(svm->vmcb);
+-
+-	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+-	return 0;
+-}
+-
+-static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
+-	int size, down, in, string, rep;
+-	unsigned port;
+-
+-	++svm->vcpu.stat.io_exits;
+-
+-	svm->next_rip = svm->vmcb->control.exit_info_2;
+-
+-	string = (io_info & SVM_IOIO_STR_MASK) != 0;
+-
+-	if (string) {
+-		if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
+-			return 0;
+-		return 1;
 -	}
--	up(&nodemgr_ud_class.sem);
-+static void nodemgr_resume_ne(struct node_entry *ne)
-+{
-+	ne->in_limbo = 0;
-+	device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
- 
-+	class_for_each_device(&nodemgr_ud_class, ne, __nodemgr_driver_resume);
- 	HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "]  GUID[%016Lx]",
- 		   NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
- }
- 
 -
--static void nodemgr_update_pdrv(struct node_entry *ne)
-+static int __nodemgr_update_pdrv(struct device *dev, void *data)
- {
--	struct device *dev;
- 	struct unit_directory *ud;
- 	struct device_driver *drv;
- 	struct hpsb_protocol_driver *pdrv;
-+	struct node_entry *ne = (struct node_entry *)data;
- 	int error;
- 
--	down(&nodemgr_ud_class.sem);
--	list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
--		ud = container_of(dev, struct unit_directory, unit_dev);
--		if (ud->ne != ne)
--			continue;
+-	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
+-	port = io_info >> 16;
+-	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
+-	rep = (io_info & SVM_IOIO_REP_MASK) != 0;
+-	down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
 -
-+	ud = container_of(dev, struct unit_directory, unit_dev);
-+	if (ud->ne == ne) {
- 		drv = get_driver(ud->device.driver);
--		if (!drv)
--			continue;
+-	return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
+-}
 -
--		error = 0;
--		pdrv = container_of(drv, struct hpsb_protocol_driver, driver);
--		if (pdrv->update) {
--			down(&ud->device.sem);
--			error = pdrv->update(ud);
--			up(&ud->device.sem);
-+		if (drv) {
-+			error = 0;
-+			pdrv = container_of(drv, struct hpsb_protocol_driver,
-+					    driver);
-+			if (pdrv->update) {
-+				down(&ud->device.sem);
-+				error = pdrv->update(ud);
-+				up(&ud->device.sem);
-+			}
-+			if (error)
-+				device_release_driver(&ud->device);
-+			put_driver(drv);
- 		}
--		if (error)
--			device_release_driver(&ud->device);
--		put_driver(drv);
- 	}
--	up(&nodemgr_ud_class.sem);
-+
-+	return 0;
-+}
-+
-+static void nodemgr_update_pdrv(struct node_entry *ne)
-+{
-+	class_for_each_device(&nodemgr_ud_class, ne, __nodemgr_update_pdrv);
- }
- 
- 
-@@ -1529,13 +1550,31 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge
- 	put_device(dev);
- }
- 
-+struct probe_param {
-+	struct host_info *hi;
-+	int generation;
-+};
-+
-+static int __nodemgr_node_probe(struct device *dev, void *data)
-+{
-+	struct probe_param *param = (struct probe_param *)data;
-+	struct node_entry *ne;
-+
-+	ne = container_of(dev, struct node_entry, node_dev);
-+	if (!ne->needs_probe)
-+		nodemgr_probe_ne(param->hi, ne, param->generation);
-+	if (ne->needs_probe)
-+		nodemgr_probe_ne(param->hi, ne, param->generation);
-+	return 0;
-+}
- 
- static void nodemgr_node_probe(struct host_info *hi, int generation)
- {
- 	struct hpsb_host *host = hi->host;
--	struct device *dev;
--	struct node_entry *ne;
-+	struct probe_param param;
- 
-+	param.hi = hi;
-+	param.generation = generation;
- 	/* Do some processing of the nodes we've probed. This pulls them
- 	 * into the sysfs layer if needed, and can result in processing of
- 	 * unit-directories, or just updating the node and it's
-@@ -1545,19 +1584,7 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
- 	 * while probes are time-consuming. (Well, those probes need some
- 	 * improvement...) */
- 
--	down(&nodemgr_ne_class.sem);
--	list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
--		ne = container_of(dev, struct node_entry, node_dev);
--		if (!ne->needs_probe)
--			nodemgr_probe_ne(hi, ne, generation);
+-static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	return 1;
+-}
+-
+-static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	svm->next_rip = svm->vmcb->save.rip + 1;
+-	skip_emulated_instruction(&svm->vcpu);
+-	return kvm_emulate_halt(&svm->vcpu);
+-}
+-
+-static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	svm->next_rip = svm->vmcb->save.rip + 3;
+-	skip_emulated_instruction(&svm->vcpu);
+-	return kvm_hypercall(&svm->vcpu, kvm_run);
+-}
+-
+-static int invalid_op_interception(struct vcpu_svm *svm,
+-				   struct kvm_run *kvm_run)
+-{
+-	inject_ud(&svm->vcpu);
+-	return 1;
+-}
+-
+-static int task_switch_interception(struct vcpu_svm *svm,
+-				    struct kvm_run *kvm_run)
+-{
+-	pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__);
+-	kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+-	return 0;
+-}
+-
+-static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	svm->next_rip = svm->vmcb->save.rip + 2;
+-	kvm_emulate_cpuid(&svm->vcpu);
+-	return 1;
+-}
+-
+-static int emulate_on_interception(struct vcpu_svm *svm,
+-				   struct kvm_run *kvm_run)
+-{
+-	if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE)
+-		pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
+-	return 1;
+-}
+-
+-static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	switch (ecx) {
+-	case MSR_IA32_TIME_STAMP_COUNTER: {
+-		u64 tsc;
+-
+-		rdtscll(tsc);
+-		*data = svm->vmcb->control.tsc_offset + tsc;
+-		break;
 -	}
--	list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
--		ne = container_of(dev, struct node_entry, node_dev);
--		if (ne->needs_probe)
--			nodemgr_probe_ne(hi, ne, generation);
+-	case MSR_K6_STAR:
+-		*data = svm->vmcb->save.star;
+-		break;
+-#ifdef CONFIG_X86_64
+-	case MSR_LSTAR:
+-		*data = svm->vmcb->save.lstar;
+-		break;
+-	case MSR_CSTAR:
+-		*data = svm->vmcb->save.cstar;
+-		break;
+-	case MSR_KERNEL_GS_BASE:
+-		*data = svm->vmcb->save.kernel_gs_base;
+-		break;
+-	case MSR_SYSCALL_MASK:
+-		*data = svm->vmcb->save.sfmask;
+-		break;
+-#endif
+-	case MSR_IA32_SYSENTER_CS:
+-		*data = svm->vmcb->save.sysenter_cs;
+-		break;
+-	case MSR_IA32_SYSENTER_EIP:
+-		*data = svm->vmcb->save.sysenter_eip;
+-		break;
+-	case MSR_IA32_SYSENTER_ESP:
+-		*data = svm->vmcb->save.sysenter_esp;
+-		break;
+-	default:
+-		return kvm_get_msr_common(vcpu, ecx, data);
 -	}
--	up(&nodemgr_ne_class.sem);
+-	return 0;
+-}
 -
-+	class_for_each_device(&nodemgr_ne_class, &param, __nodemgr_node_probe);
- 
- 	/* If we had a bus reset while we were scanning the bus, it is
- 	 * possible that we did not probe all nodes.  In that case, we
-@@ -1757,6 +1784,22 @@ exit:
- 	return 0;
- }
- 
-+struct host_iter_param {
-+	void *data;
-+	int (*cb)(struct hpsb_host *, void *);
-+};
-+
-+static int __nodemgr_for_each_host(struct device *dev, void *data)
-+{
-+	struct hpsb_host *host;
-+	struct host_iter_param *hip = (struct host_iter_param *)data;
-+	int error = 0;
-+
-+	host = container_of(dev, struct hpsb_host, host_dev);
-+	error = hip->cb(host, hip->data);
-+
-+	return error;
-+}
- /**
-  * nodemgr_for_each_host - call a function for each IEEE 1394 host
-  * @data: an address to supply to the callback
-@@ -1771,18 +1814,13 @@ exit:
-  */
- int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
- {
--	struct device *dev;
--	struct hpsb_host *host;
--	int error = 0;
+-static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
+-	u64 data;
 -
--	down(&hpsb_host_class.sem);
--	list_for_each_entry(dev, &hpsb_host_class.devices, node) {
--		host = container_of(dev, struct hpsb_host, host_dev);
-+	struct host_iter_param hip;
-+	int error;
- 
--		if ((error = cb(host, data)))
--			break;
+-	if (svm_get_msr(&svm->vcpu, ecx, &data))
+-		svm_inject_gp(&svm->vcpu, 0);
+-	else {
+-		svm->vmcb->save.rax = data & 0xffffffff;
+-		svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
+-		svm->next_rip = svm->vmcb->save.rip + 2;
+-		skip_emulated_instruction(&svm->vcpu);
 -	}
--	up(&hpsb_host_class.sem);
-+	hip.cb = cb;
-+	hip.data = data;
-+	error = class_for_each_device(&hpsb_host_class, &hip,
-+				      __nodemgr_for_each_host);
- 
- 	return error;
- }
-diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
-index b83d254..1eda11a 100644
---- a/drivers/ieee1394/sbp2.c
-+++ b/drivers/ieee1394/sbp2.c
-@@ -1963,6 +1963,12 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
- 	lu->sdev = sdev;
- 	sdev->allow_restart = 1;
- 
-+	/*
-+	 * Update the dma alignment (minimum alignment requirements for
-+	 * start and end of DMA transfers) to be a sector
-+	 */
-+	blk_queue_update_dma_alignment(sdev->request_queue, 511);
-+
- 	if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
- 		sdev->inquiry_len = 36;
- 	return 0;
-diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
-index 5381c80..a58ad8a 100644
---- a/drivers/infiniband/core/addr.c
-+++ b/drivers/infiniband/core/addr.c
-@@ -110,7 +110,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
- 	__be32 ip = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
- 	int ret;
- 
--	dev = ip_dev_find(ip);
-+	dev = ip_dev_find(&init_net, ip);
- 	if (!dev)
- 		return -EADDRNOTAVAIL;
- 
-@@ -158,7 +158,7 @@ static void addr_send_arp(struct sockaddr_in *dst_in)
- 
- 	memset(&fl, 0, sizeof fl);
- 	fl.nl_u.ip4_u.daddr = dst_ip;
--	if (ip_route_output_key(&rt, &fl))
-+	if (ip_route_output_key(&init_net, &rt, &fl))
- 		return;
- 
- 	neigh_event_send(rt->u.dst.neighbour, NULL);
-@@ -179,7 +179,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
- 	memset(&fl, 0, sizeof fl);
- 	fl.nl_u.ip4_u.daddr = dst_ip;
- 	fl.nl_u.ip4_u.saddr = src_ip;
--	ret = ip_route_output_key(&rt, &fl);
-+	ret = ip_route_output_key(&init_net, &rt, &fl);
- 	if (ret)
- 		goto out;
- 
-@@ -261,15 +261,15 @@ static int addr_resolve_local(struct sockaddr_in *src_in,
- 	__be32 dst_ip = dst_in->sin_addr.s_addr;
- 	int ret;
- 
--	dev = ip_dev_find(dst_ip);
-+	dev = ip_dev_find(&init_net, dst_ip);
- 	if (!dev)
- 		return -EADDRNOTAVAIL;
- 
--	if (ZERONET(src_ip)) {
-+	if (ipv4_is_zeronet(src_ip)) {
- 		src_in->sin_family = dst_in->sin_family;
- 		src_in->sin_addr.s_addr = dst_ip;
- 		ret = rdma_copy_addr(addr, dev, dev->dev_addr);
--	} else if (LOOPBACK(src_ip)) {
-+	} else if (ipv4_is_loopback(src_ip)) {
- 		ret = rdma_translate_ip((struct sockaddr *)dst_in, addr);
- 		if (!ret)
- 			memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
-diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
-index 2e39236..c015014 100644
---- a/drivers/infiniband/core/cm.c
-+++ b/drivers/infiniband/core/cm.c
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2004-2006 Intel Corporation.  All rights reserved.
-+ * Copyright (c) 2004-2007 Intel Corporation.  All rights reserved.
-  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
-  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
-  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
-@@ -37,12 +37,14 @@
- 
- #include <linux/completion.h>
- #include <linux/dma-mapping.h>
-+#include <linux/device.h>
- #include <linux/err.h>
- #include <linux/idr.h>
- #include <linux/interrupt.h>
- #include <linux/random.h>
- #include <linux/rbtree.h>
- #include <linux/spinlock.h>
-+#include <linux/sysfs.h>
- #include <linux/workqueue.h>
- 
- #include <rdma/ib_cache.h>
-@@ -78,17 +80,94 @@ static struct ib_cm {
- 	struct workqueue_struct *wq;
- } cm;
- 
-+/* Counter indexes ordered by attribute ID */
-+enum {
-+	CM_REQ_COUNTER,
-+	CM_MRA_COUNTER,
-+	CM_REJ_COUNTER,
-+	CM_REP_COUNTER,
-+	CM_RTU_COUNTER,
-+	CM_DREQ_COUNTER,
-+	CM_DREP_COUNTER,
-+	CM_SIDR_REQ_COUNTER,
-+	CM_SIDR_REP_COUNTER,
-+	CM_LAP_COUNTER,
-+	CM_APR_COUNTER,
-+	CM_ATTR_COUNT,
-+	CM_ATTR_ID_OFFSET = 0x0010,
-+};
-+
-+enum {
-+	CM_XMIT,
-+	CM_XMIT_RETRIES,
-+	CM_RECV,
-+	CM_RECV_DUPLICATES,
-+	CM_COUNTER_GROUPS
-+};
-+
-+static char const counter_group_names[CM_COUNTER_GROUPS]
-+				     [sizeof("cm_rx_duplicates")] = {
-+	"cm_tx_msgs", "cm_tx_retries",
-+	"cm_rx_msgs", "cm_rx_duplicates"
-+};
-+
-+struct cm_counter_group {
-+	struct kobject obj;
-+	atomic_long_t counter[CM_ATTR_COUNT];
-+};
-+
-+struct cm_counter_attribute {
-+	struct attribute attr;
-+	int index;
-+};
-+
-+#define CM_COUNTER_ATTR(_name, _index) \
-+struct cm_counter_attribute cm_##_name##_counter_attr = { \
-+	.attr = { .name = __stringify(_name), .mode = 0444, .owner = THIS_MODULE }, \
-+	.index = _index \
-+}
-+
-+static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
-+static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
-+static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
-+static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
-+static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
-+static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
-+static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
-+static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
-+static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
-+static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
-+static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
-+
-+static struct attribute *cm_counter_default_attrs[] = {
-+	&cm_req_counter_attr.attr,
-+	&cm_mra_counter_attr.attr,
-+	&cm_rej_counter_attr.attr,
-+	&cm_rep_counter_attr.attr,
-+	&cm_rtu_counter_attr.attr,
-+	&cm_dreq_counter_attr.attr,
-+	&cm_drep_counter_attr.attr,
-+	&cm_sidr_req_counter_attr.attr,
-+	&cm_sidr_rep_counter_attr.attr,
-+	&cm_lap_counter_attr.attr,
-+	&cm_apr_counter_attr.attr,
-+	NULL
-+};
-+
- struct cm_port {
- 	struct cm_device *cm_dev;
- 	struct ib_mad_agent *mad_agent;
-+	struct kobject port_obj;
- 	u8 port_num;
-+	struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
- };
- 
- struct cm_device {
- 	struct list_head list;
- 	struct ib_device *device;
-+	struct kobject dev_obj;
- 	u8 ack_delay;
--	struct cm_port port[0];
-+	struct cm_port *port[0];
- };
- 
- struct cm_av {
-@@ -278,7 +357,7 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
- 	list_for_each_entry(cm_dev, &cm.device_list, list) {
- 		if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
- 					&p, NULL)) {
--			port = &cm_dev->port[p-1];
-+			port = cm_dev->port[p-1];
- 			break;
- 		}
- 	}
-@@ -1270,6 +1349,9 @@ static void cm_dup_req_handler(struct cm_work *work,
- 	struct ib_mad_send_buf *msg = NULL;
- 	int ret;
- 
-+	atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+			counter[CM_REQ_COUNTER]);
-+
- 	/* Quick state check to discard duplicate REQs. */
- 	if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
- 		return;
-@@ -1616,6 +1698,8 @@ static void cm_dup_rep_handler(struct cm_work *work)
- 	if (!cm_id_priv)
- 		return;
- 
-+	atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+			counter[CM_REP_COUNTER]);
- 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
- 	if (ret)
- 		goto deref;
-@@ -1781,6 +1865,8 @@ static int cm_rtu_handler(struct cm_work *work)
- 	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
- 	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
- 		spin_unlock_irq(&cm_id_priv->lock);
-+		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+				counter[CM_RTU_COUNTER]);
- 		goto out;
- 	}
- 	cm_id_priv->id.state = IB_CM_ESTABLISHED;
-@@ -1958,6 +2044,8 @@ static int cm_dreq_handler(struct cm_work *work)
- 	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
- 				   dreq_msg->local_comm_id);
- 	if (!cm_id_priv) {
-+		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+				counter[CM_DREQ_COUNTER]);
- 		cm_issue_drep(work->port, work->mad_recv_wc);
- 		return -EINVAL;
- 	}
-@@ -1977,6 +2065,8 @@ static int cm_dreq_handler(struct cm_work *work)
- 	case IB_CM_MRA_REP_RCVD:
- 		break;
- 	case IB_CM_TIMEWAIT:
-+		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+				counter[CM_DREQ_COUNTER]);
- 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
- 			goto unlock;
- 
-@@ -1988,6 +2078,10 @@ static int cm_dreq_handler(struct cm_work *work)
- 		if (ib_post_send_mad(msg, NULL))
- 			cm_free_msg(msg);
- 		goto deref;
-+	case IB_CM_DREQ_RCVD:
-+		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+				counter[CM_DREQ_COUNTER]);
-+		goto unlock;
- 	default:
- 		goto unlock;
- 	}
-@@ -2339,10 +2433,20 @@ static int cm_mra_handler(struct cm_work *work)
- 		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
- 		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
- 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
--				  cm_id_priv->msg, timeout))
-+				  cm_id_priv->msg, timeout)) {
-+			if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
-+				atomic_long_inc(&work->port->
-+						counter_group[CM_RECV_DUPLICATES].
-+						counter[CM_MRA_COUNTER]);
- 			goto out;
-+		}
- 		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
- 		break;
-+	case IB_CM_MRA_REQ_RCVD:
-+	case IB_CM_MRA_REP_RCVD:
-+		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+				counter[CM_MRA_COUNTER]);
-+		/* fall through */
- 	default:
- 		goto out;
- 	}
-@@ -2502,6 +2606,8 @@ static int cm_lap_handler(struct cm_work *work)
- 	case IB_CM_LAP_IDLE:
- 		break;
- 	case IB_CM_MRA_LAP_SENT:
-+		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+				counter[CM_LAP_COUNTER]);
- 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
- 			goto unlock;
- 
-@@ -2515,6 +2621,10 @@ static int cm_lap_handler(struct cm_work *work)
- 		if (ib_post_send_mad(msg, NULL))
- 			cm_free_msg(msg);
- 		goto deref;
-+	case IB_CM_LAP_RCVD:
-+		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+				counter[CM_LAP_COUNTER]);
-+		goto unlock;
- 	default:
- 		goto unlock;
- 	}
-@@ -2796,6 +2906,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
- 	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
- 	if (cur_cm_id_priv) {
- 		spin_unlock_irq(&cm.lock);
-+		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+				counter[CM_SIDR_REQ_COUNTER]);
- 		goto out; /* Duplicate message. */
- 	}
- 	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
-@@ -2990,6 +3102,27 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
- 			    struct ib_mad_send_wc *mad_send_wc)
- {
- 	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
-+	struct cm_port *port;
-+	u16 attr_index;
-+
-+	port = mad_agent->context;
-+	attr_index = be16_to_cpu(((struct ib_mad_hdr *)
-+				  msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
-+
-+	/*
-+	 * If the send was in response to a received message (context[0] is not
-+	 * set to a cm_id), and is not a REJ, then it is a send that was
-+	 * manually retried.
-+	 */
-+	if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
-+		msg->retries = 1;
-+
-+	atomic_long_add(1 + msg->retries,
-+			&port->counter_group[CM_XMIT].counter[attr_index]);
-+	if (msg->retries)
-+		atomic_long_add(msg->retries,
-+				&port->counter_group[CM_XMIT_RETRIES].
-+				counter[attr_index]);
- 
- 	switch (mad_send_wc->status) {
- 	case IB_WC_SUCCESS:
-@@ -3148,8 +3281,10 @@ EXPORT_SYMBOL(ib_cm_notify);
- static void cm_recv_handler(struct ib_mad_agent *mad_agent,
- 			    struct ib_mad_recv_wc *mad_recv_wc)
- {
-+	struct cm_port *port = mad_agent->context;
- 	struct cm_work *work;
- 	enum ib_cm_event_type event;
-+	u16 attr_id;
- 	int paths = 0;
- 
- 	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
-@@ -3194,6 +3329,10 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
- 		return;
- 	}
- 
-+	attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
-+	atomic_long_inc(&port->counter_group[CM_RECV].
-+			counter[attr_id - CM_ATTR_ID_OFFSET]);
-+
- 	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
- 		       GFP_KERNEL);
- 	if (!work) {
-@@ -3204,7 +3343,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
- 	INIT_DELAYED_WORK(&work->work, cm_work_handler);
- 	work->cm_event.event = event;
- 	work->mad_recv_wc = mad_recv_wc;
--	work->port = (struct cm_port *)mad_agent->context;
-+	work->port = port;
- 	queue_delayed_work(cm.wq, &work->work, 0);
- }
- 
-@@ -3379,6 +3518,108 @@ static void cm_get_ack_delay(struct cm_device *cm_dev)
- 		cm_dev->ack_delay = attr.local_ca_ack_delay;
- }
- 
-+static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
-+			       char *buf)
-+{
-+	struct cm_counter_group *group;
-+	struct cm_counter_attribute *cm_attr;
-+
-+	group = container_of(obj, struct cm_counter_group, obj);
-+	cm_attr = container_of(attr, struct cm_counter_attribute, attr);
-+
-+	return sprintf(buf, "%ld\n",
-+		       atomic_long_read(&group->counter[cm_attr->index]));
-+}
-+
-+static struct sysfs_ops cm_counter_ops = {
-+	.show = cm_show_counter
-+};
-+
-+static struct kobj_type cm_counter_obj_type = {
-+	.sysfs_ops = &cm_counter_ops,
-+	.default_attrs = cm_counter_default_attrs
-+};
-+
-+static void cm_release_port_obj(struct kobject *obj)
-+{
-+	struct cm_port *cm_port;
-+
-+	printk(KERN_ERR "free cm port\n");
-+
-+	cm_port = container_of(obj, struct cm_port, port_obj);
-+	kfree(cm_port);
-+}
-+
-+static struct kobj_type cm_port_obj_type = {
-+	.release = cm_release_port_obj
-+};
-+
-+static void cm_release_dev_obj(struct kobject *obj)
-+{
-+	struct cm_device *cm_dev;
-+
-+	printk(KERN_ERR "free cm dev\n");
-+
-+	cm_dev = container_of(obj, struct cm_device, dev_obj);
-+	kfree(cm_dev);
-+}
-+
-+static struct kobj_type cm_dev_obj_type = {
-+	.release = cm_release_dev_obj
-+};
-+
-+struct class cm_class = {
-+	.name    = "infiniband_cm",
-+};
-+EXPORT_SYMBOL(cm_class);
-+
-+static void cm_remove_fs_obj(struct kobject *obj)
-+{
-+	kobject_put(obj->parent);
-+	kobject_put(obj);
-+}
-+
-+static int cm_create_port_fs(struct cm_port *port)
-+{
-+	int i, ret;
-+
-+	ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
-+				   kobject_get(&port->cm_dev->dev_obj),
-+				   "%d", port->port_num);
-+	if (ret) {
-+		kfree(port);
-+		return ret;
-+	}
-+
-+	for (i = 0; i < CM_COUNTER_GROUPS; i++) {
-+		ret = kobject_init_and_add(&port->counter_group[i].obj,
-+					   &cm_counter_obj_type,
-+					   kobject_get(&port->port_obj),
-+					   "%s", counter_group_names[i]);
-+		if (ret)
-+			goto error;
-+	}
-+
-+	return 0;
-+
-+error:
-+	while (i--)
-+		cm_remove_fs_obj(&port->counter_group[i].obj);
-+	cm_remove_fs_obj(&port->port_obj);
-+	return ret;
-+
-+}
-+
-+static void cm_remove_port_fs(struct cm_port *port)
-+{
-+	int i;
-+
-+	for (i = 0; i < CM_COUNTER_GROUPS; i++)
-+		cm_remove_fs_obj(&port->counter_group[i].obj);
-+
-+	cm_remove_fs_obj(&port->port_obj);
-+}
-+
- static void cm_add_one(struct ib_device *device)
- {
- 	struct cm_device *cm_dev;
-@@ -3397,7 +3638,7 @@ static void cm_add_one(struct ib_device *device)
- 	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
- 		return;
- 
--	cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
-+	cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
- 			 device->phys_port_cnt, GFP_KERNEL);
- 	if (!cm_dev)
- 		return;
-@@ -3405,11 +3646,27 @@ static void cm_add_one(struct ib_device *device)
- 	cm_dev->device = device;
- 	cm_get_ack_delay(cm_dev);
- 
-+	ret = kobject_init_and_add(&cm_dev->dev_obj, &cm_dev_obj_type,
-+				   &cm_class.subsys.kobj, "%s", device->name);
-+	if (ret) {
-+		kfree(cm_dev);
-+		return;
-+	}
-+
- 	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
- 	for (i = 1; i <= device->phys_port_cnt; i++) {
--		port = &cm_dev->port[i-1];
-+		port = kzalloc(sizeof *port, GFP_KERNEL);
-+		if (!port)
-+			goto error1;
-+
-+		cm_dev->port[i-1] = port;
- 		port->cm_dev = cm_dev;
- 		port->port_num = i;
-+
-+		ret = cm_create_port_fs(port);
-+		if (ret)
-+			goto error1;
-+
- 		port->mad_agent = ib_register_mad_agent(device, i,
- 							IB_QPT_GSI,
- 							&reg_req,
-@@ -3418,11 +3675,11 @@ static void cm_add_one(struct ib_device *device)
- 							cm_recv_handler,
- 							port);
- 		if (IS_ERR(port->mad_agent))
--			goto error1;
-+			goto error2;
- 
- 		ret = ib_modify_port(device, i, 0, &port_modify);
- 		if (ret)
--			goto error2;
-+			goto error3;
- 	}
- 	ib_set_client_data(device, &cm_client, cm_dev);
- 
-@@ -3431,17 +3688,20 @@ static void cm_add_one(struct ib_device *device)
- 	write_unlock_irqrestore(&cm.device_lock, flags);
- 	return;
- 
--error2:
-+error3:
- 	ib_unregister_mad_agent(port->mad_agent);
-+error2:
-+	cm_remove_port_fs(port);
- error1:
- 	port_modify.set_port_cap_mask = 0;
- 	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
- 	while (--i) {
--		port = &cm_dev->port[i-1];
-+		port = cm_dev->port[i-1];
- 		ib_modify_port(device, port->port_num, 0, &port_modify);
- 		ib_unregister_mad_agent(port->mad_agent);
-+		cm_remove_port_fs(port);
- 	}
--	kfree(cm_dev);
-+	cm_remove_fs_obj(&cm_dev->dev_obj);
- }
- 
- static void cm_remove_one(struct ib_device *device)
-@@ -3463,11 +3723,12 @@ static void cm_remove_one(struct ib_device *device)
- 	write_unlock_irqrestore(&cm.device_lock, flags);
- 
- 	for (i = 1; i <= device->phys_port_cnt; i++) {
--		port = &cm_dev->port[i-1];
-+		port = cm_dev->port[i-1];
- 		ib_modify_port(device, port->port_num, 0, &port_modify);
- 		ib_unregister_mad_agent(port->mad_agent);
-+		cm_remove_port_fs(port);
- 	}
--	kfree(cm_dev);
-+	cm_remove_fs_obj(&cm_dev->dev_obj);
- }
- 
- static int __init ib_cm_init(void)
-@@ -3488,17 +3749,25 @@ static int __init ib_cm_init(void)
- 	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
- 	INIT_LIST_HEAD(&cm.timewait_list);
- 
--	cm.wq = create_workqueue("ib_cm");
--	if (!cm.wq)
-+	ret = class_register(&cm_class);
-+	if (ret)
- 		return -ENOMEM;
- 
-+	cm.wq = create_workqueue("ib_cm");
-+	if (!cm.wq) {
-+		ret = -ENOMEM;
-+		goto error1;
-+	}
-+
- 	ret = ib_register_client(&cm_client);
- 	if (ret)
--		goto error;
-+		goto error2;
- 
- 	return 0;
--error:
-+error2:
- 	destroy_workqueue(cm.wq);
-+error1:
-+	class_unregister(&cm_class);
- 	return ret;
- }
- 
-@@ -3519,6 +3788,7 @@ static void __exit ib_cm_cleanup(void)
- 	}
- 
- 	ib_unregister_client(&cm_client);
-+	class_unregister(&cm_class);
- 	idr_destroy(&cm.local_id_table);
- }
- 
-diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
-index 0751697..1eff1b2 100644
---- a/drivers/infiniband/core/cma.c
-+++ b/drivers/infiniband/core/cma.c
-@@ -488,7 +488,8 @@ void rdma_destroy_qp(struct rdma_cm_id *id)
- }
- EXPORT_SYMBOL(rdma_destroy_qp);
- 
--static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
-+static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
-+			     struct rdma_conn_param *conn_param)
- {
- 	struct ib_qp_attr qp_attr;
- 	int qp_attr_mask, ret;
-@@ -514,13 +515,16 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
- 	if (ret)
- 		goto out;
- 
-+	if (conn_param)
-+		qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
- 	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
- out:
- 	mutex_unlock(&id_priv->qp_mutex);
- 	return ret;
- }
- 
--static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
-+static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
-+			     struct rdma_conn_param *conn_param)
- {
- 	struct ib_qp_attr qp_attr;
- 	int qp_attr_mask, ret;
-@@ -536,6 +540,8 @@ static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
- 	if (ret)
- 		goto out;
- 
-+	if (conn_param)
-+		qp_attr.max_rd_atomic = conn_param->initiator_depth;
- 	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
- out:
- 	mutex_unlock(&id_priv->qp_mutex);
-@@ -624,7 +630,8 @@ static inline int cma_zero_addr(struct sockaddr *addr)
- 	struct in6_addr *ip6;
- 
- 	if (addr->sa_family == AF_INET)
--		return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr);
-+		return ipv4_is_zeronet(
-+			((struct sockaddr_in *)addr)->sin_addr.s_addr);
- 	else {
- 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
- 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
-@@ -634,7 +641,7 @@ static inline int cma_zero_addr(struct sockaddr *addr)
- 
- static inline int cma_loopback_addr(struct sockaddr *addr)
- {
--	return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr);
-+	return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
- }
- 
- static inline int cma_any_addr(struct sockaddr *addr)
-@@ -866,11 +873,11 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
- {
- 	int ret;
- 
--	ret = cma_modify_qp_rtr(id_priv);
-+	ret = cma_modify_qp_rtr(id_priv, NULL);
- 	if (ret)
- 		goto reject;
- 
--	ret = cma_modify_qp_rts(id_priv);
-+	ret = cma_modify_qp_rts(id_priv, NULL);
- 	if (ret)
- 		goto reject;
- 
-@@ -1122,8 +1129,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
- 	cm_id->cm_handler = cma_ib_handler;
- 
- 	ret = conn_id->id.event_handler(&conn_id->id, &event);
--	if (!ret)
-+	if (!ret) {
-+		cma_enable_remove(conn_id);
- 		goto out;
-+	}
- 
- 	/* Destroy the CM ID by returning a non-zero value. */
- 	conn_id->cm_id.ib = NULL;
-@@ -1262,6 +1271,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
- 	struct net_device *dev = NULL;
- 	struct rdma_cm_event event;
- 	int ret;
-+	struct ib_device_attr attr;
- 
- 	listen_id = cm_id->context;
- 	if (cma_disable_remove(listen_id, CMA_LISTEN))
-@@ -1279,7 +1289,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
- 	atomic_inc(&conn_id->dev_remove);
- 	conn_id->state = CMA_CONNECT;
- 
--	dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
-+	dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
- 	if (!dev) {
- 		ret = -EADDRNOTAVAIL;
- 		cma_enable_remove(conn_id);
-@@ -1311,10 +1321,19 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
- 	sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
- 	*sin = iw_event->remote_addr;
- 
-+	ret = ib_query_device(conn_id->id.device, &attr);
-+	if (ret) {
-+		cma_enable_remove(conn_id);
-+		rdma_destroy_id(new_cm_id);
-+		goto out;
-+	}
-+
- 	memset(&event, 0, sizeof event);
- 	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
- 	event.param.conn.private_data = iw_event->private_data;
- 	event.param.conn.private_data_len = iw_event->private_data_len;
-+	event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
-+	event.param.conn.responder_resources = attr.max_qp_rd_atom;
- 	ret = conn_id->id.event_handler(&conn_id->id, &event);
- 	if (ret) {
- 		/* User wants to destroy the CM ID */
-@@ -2272,7 +2291,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
- 	sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
- 	cm_id->remote_addr = *sin;
- 
--	ret = cma_modify_qp_rtr(id_priv);
-+	ret = cma_modify_qp_rtr(id_priv, conn_param);
- 	if (ret)
- 		goto out;
- 
-@@ -2335,25 +2354,15 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
- 			 struct rdma_conn_param *conn_param)
- {
- 	struct ib_cm_rep_param rep;
--	struct ib_qp_attr qp_attr;
--	int qp_attr_mask, ret;
+-	return 1;
+-}
 -
--	if (id_priv->id.qp) {
--		ret = cma_modify_qp_rtr(id_priv);
--		if (ret)
--			goto out;
-+	int ret;
- 
--		qp_attr.qp_state = IB_QPS_RTS;
--		ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
--					 &qp_attr_mask);
--		if (ret)
--			goto out;
-+	ret = cma_modify_qp_rtr(id_priv, conn_param);
-+	if (ret)
-+		goto out;
- 
--		qp_attr.max_rd_atomic = conn_param->initiator_depth;
--		ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
--		if (ret)
--			goto out;
--	}
-+	ret = cma_modify_qp_rts(id_priv, conn_param);
-+	if (ret)
-+		goto out;
- 
- 	memset(&rep, 0, sizeof rep);
- 	rep.qp_num = id_priv->qp_num;
-@@ -2378,7 +2387,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
- 	struct iw_cm_conn_param iw_param;
- 	int ret;
- 
--	ret = cma_modify_qp_rtr(id_priv);
-+	ret = cma_modify_qp_rtr(id_priv, conn_param);
- 	if (ret)
- 		return ret;
- 
-@@ -2598,11 +2607,9 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
- 		/* IPv6 address is an SA assigned MGID. */
- 		memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
- 	} else {
--		ip_ib_mc_map(sin->sin_addr.s_addr, mc_map);
-+		ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
- 		if (id_priv->id.ps == RDMA_PS_UDP)
- 			mc_map[7] = 0x01;	/* Use RDMA CM signature */
--		mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8;
--		mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr);
- 		*mgid = *(union ib_gid *) (mc_map + 4);
- 	}
- }
-diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
-index e8d5f6b..6c7aa59 100644
---- a/drivers/infiniband/core/fmr_pool.c
-+++ b/drivers/infiniband/core/fmr_pool.c
-@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
- static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
- {
- 	int                 ret;
--	struct ib_pool_fmr *fmr;
-+	struct ib_pool_fmr *fmr, *next;
- 	LIST_HEAD(unmap_list);
- 	LIST_HEAD(fmr_list);
- 
-@@ -158,6 +158,20 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
- #endif
- 	}
- 
-+	/*
-+	 * The free_list may hold FMRs that have been put there
-+	 * because they haven't reached the max_remap count.
-+	 * Invalidate their mapping as well.
-+	 */
-+	list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
-+		if (fmr->remap_count == 0)
-+			continue;
-+		hlist_del_init(&fmr->cache_node);
-+		fmr->remap_count = 0;
-+		list_add_tail(&fmr->fmr->list, &fmr_list);
-+		list_move(&fmr->list, &unmap_list);
-+	}
-+
- 	list_splice(&pool->dirty_list, &unmap_list);
- 	INIT_LIST_HEAD(&pool->dirty_list);
- 	pool->dirty_len = 0;
-@@ -182,8 +196,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
- 	struct ib_fmr_pool *pool = pool_ptr;
- 
- 	do {
--		if (pool->dirty_len >= pool->dirty_watermark ||
--		    atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
-+		if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
- 			ib_fmr_batch_release(pool);
- 
- 			atomic_inc(&pool->flush_ser);
-@@ -194,8 +207,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
- 		}
- 
- 		set_current_state(TASK_INTERRUPTIBLE);
--		if (pool->dirty_len < pool->dirty_watermark &&
--		    atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
-+		if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
- 		    !kthread_should_stop())
- 			schedule();
- 		__set_current_state(TASK_RUNNING);
-@@ -369,11 +381,6 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
- 
- 	i = 0;
- 	list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
--		if (fmr->remap_count) {
--			INIT_LIST_HEAD(&fmr_list);
--			list_add_tail(&fmr->fmr->list, &fmr_list);
--			ib_unmap_fmr(&fmr_list);
--		}
- 		ib_dealloc_fmr(fmr->fmr);
- 		list_del(&fmr->list);
- 		kfree(fmr);
-@@ -511,8 +518,10 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
- 			list_add_tail(&fmr->list, &pool->free_list);
- 		} else {
- 			list_add_tail(&fmr->list, &pool->dirty_list);
--			++pool->dirty_len;
--			wake_up_process(pool->thread);
-+			if (++pool->dirty_len >= pool->dirty_watermark) {
-+				atomic_inc(&pool->req_ser);
-+				wake_up_process(pool->thread);
-+			}
- 		}
- 	}
- 
-diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
-index 6f42877..fbe16d5 100644
---- a/drivers/infiniband/core/mad.c
-+++ b/drivers/infiniband/core/mad.c
-@@ -701,7 +701,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
- 	}
- 
- 	/* Check to post send on QP or process locally */
--	if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD)
-+	if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
-+	    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
- 		goto out;
- 
- 	local = kmalloc(sizeof *local, GFP_ATOMIC);
-@@ -752,8 +753,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
- 		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
- 					    mad_agent_priv->agent.port_num);
- 		if (port_priv) {
--			mad_priv->mad.mad.mad_hdr.tid =
--				((struct ib_mad *)smp)->mad_hdr.tid;
-+			memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
- 			recv_mad_agent = find_mad_agent(port_priv,
- 						        &mad_priv->mad.mad);
- 		}
-@@ -1100,7 +1100,9 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
- 		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
- 		/* Timeout will be updated after send completes */
- 		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
--		mad_send_wr->retries = send_buf->retries;
-+		mad_send_wr->max_retries = send_buf->retries;
-+		mad_send_wr->retries_left = send_buf->retries;
-+		send_buf->retries = 0;
- 		/* Reference for work request to QP + response */
- 		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
- 		mad_send_wr->status = IB_WC_SUCCESS;
-@@ -1931,15 +1933,6 @@ local:
- 	if (port_priv->device->process_mad) {
- 		int ret;
- 
--		if (!response) {
--			printk(KERN_ERR PFX "No memory for response MAD\n");
--			/*
--			 * Is it better to assume that
--			 * it wouldn't be processed ?
--			 */
--			goto out;
--		}
+-static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
 -
- 		ret = port_priv->device->process_mad(port_priv->device, 0,
- 						     port_priv->port_num,
- 						     wc, &recv->grh,
-@@ -2282,8 +2275,6 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
- 
- 	/* Empty wait list to prevent receives from finding a request */
- 	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
--	/* Empty local completion list as well */
--	list_splice_init(&mad_agent_priv->local_list, &cancel_list);
- 	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- 
- 	/* Report all cancelled requests */
-@@ -2445,9 +2436,12 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
- {
- 	int ret;
- 
--	if (!mad_send_wr->retries--)
-+	if (!mad_send_wr->retries_left)
- 		return -ETIMEDOUT;
- 
-+	mad_send_wr->retries_left--;
-+	mad_send_wr->send_buf.retries++;
-+
- 	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
- 
- 	if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
-diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
-index 9be5cc0..8b75010 100644
---- a/drivers/infiniband/core/mad_priv.h
-+++ b/drivers/infiniband/core/mad_priv.h
-@@ -131,7 +131,8 @@ struct ib_mad_send_wr_private {
- 	struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
- 	__be64 tid;
- 	unsigned long timeout;
--	int retries;
-+	int max_retries;
-+	int retries_left;
- 	int retry;
- 	int refcount;
- 	enum ib_wc_status status;
-diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
-index d43bc62..a5e2a31 100644
---- a/drivers/infiniband/core/mad_rmpp.c
-+++ b/drivers/infiniband/core/mad_rmpp.c
-@@ -684,7 +684,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
- 
- 	if (seg_num > mad_send_wr->last_ack) {
- 		adjust_last_ack(mad_send_wr, seg_num);
--		mad_send_wr->retries = mad_send_wr->send_buf.retries;
-+		mad_send_wr->retries_left = mad_send_wr->max_retries;
- 	}
- 	mad_send_wr->newwin = newwin;
- 	if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
-diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
-index 1bc1fe6..107f170 100644
---- a/drivers/infiniband/core/multicast.c
-+++ b/drivers/infiniband/core/multicast.c
-@@ -73,11 +73,20 @@ struct mcast_device {
- };
- 
- enum mcast_state {
--	MCAST_IDLE,
- 	MCAST_JOINING,
- 	MCAST_MEMBER,
-+	MCAST_ERROR,
-+};
-+
-+enum mcast_group_state {
-+	MCAST_IDLE,
- 	MCAST_BUSY,
--	MCAST_ERROR
-+	MCAST_GROUP_ERROR,
-+	MCAST_PKEY_EVENT
-+};
-+
-+enum {
-+	MCAST_INVALID_PKEY_INDEX = 0xFFFF
- };
- 
- struct mcast_member;
-@@ -93,9 +102,10 @@ struct mcast_group {
- 	struct mcast_member	*last_join;
- 	int			members[3];
- 	atomic_t		refcount;
--	enum mcast_state	state;
-+	enum mcast_group_state	state;
- 	struct ib_sa_query	*query;
- 	int			query_id;
-+	u16			pkey_index;
- };
- 
- struct mcast_member {
-@@ -378,9 +388,19 @@ static int fail_join(struct mcast_group *group, struct mcast_member *member,
- static void process_group_error(struct mcast_group *group)
- {
- 	struct mcast_member *member;
--	int ret;
-+	int ret = 0;
-+	u16 pkey_index;
-+
-+	if (group->state == MCAST_PKEY_EVENT)
-+		ret = ib_find_pkey(group->port->dev->device,
-+				   group->port->port_num,
-+				   be16_to_cpu(group->rec.pkey), &pkey_index);
- 
- 	spin_lock_irq(&group->lock);
-+	if (group->state == MCAST_PKEY_EVENT && !ret &&
-+	    group->pkey_index == pkey_index)
-+		goto out;
-+
- 	while (!list_empty(&group->active_list)) {
- 		member = list_entry(group->active_list.next,
- 				    struct mcast_member, list);
-@@ -399,6 +419,7 @@ static void process_group_error(struct mcast_group *group)
- 	}
- 
- 	group->rec.join_state = 0;
-+out:
- 	group->state = MCAST_BUSY;
- 	spin_unlock_irq(&group->lock);
- }
-@@ -415,9 +436,9 @@ static void mcast_work_handler(struct work_struct *work)
- retest:
- 	spin_lock_irq(&group->lock);
- 	while (!list_empty(&group->pending_list) ||
--	       (group->state == MCAST_ERROR)) {
-+	       (group->state != MCAST_BUSY)) {
- 
--		if (group->state == MCAST_ERROR) {
-+		if (group->state != MCAST_BUSY) {
- 			spin_unlock_irq(&group->lock);
- 			process_group_error(group);
- 			goto retest;
-@@ -494,12 +515,19 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
- 			 void *context)
- {
- 	struct mcast_group *group = context;
-+	u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
- 
- 	if (status)
- 		process_join_error(group, status);
- 	else {
-+		ib_find_pkey(group->port->dev->device, group->port->port_num,
-+			     be16_to_cpu(rec->pkey), &pkey_index);
-+
- 		spin_lock_irq(&group->port->lock);
- 		group->rec = *rec;
-+		if (group->state == MCAST_BUSY &&
-+		    group->pkey_index == MCAST_INVALID_PKEY_INDEX)
-+			group->pkey_index = pkey_index;
- 		if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
- 			rb_erase(&group->node, &group->port->table);
- 			mcast_insert(group->port, group, 1);
-@@ -539,6 +567,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
- 
- 	group->port = port;
- 	group->rec.mgid = *mgid;
-+	group->pkey_index = MCAST_INVALID_PKEY_INDEX;
- 	INIT_LIST_HEAD(&group->pending_list);
- 	INIT_LIST_HEAD(&group->active_list);
- 	INIT_WORK(&group->work, mcast_work_handler);
-@@ -707,7 +736,8 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
- }
- EXPORT_SYMBOL(ib_init_ah_from_mcmember);
- 
--static void mcast_groups_lost(struct mcast_port *port)
-+static void mcast_groups_event(struct mcast_port *port,
-+			       enum mcast_group_state state)
- {
- 	struct mcast_group *group;
- 	struct rb_node *node;
-@@ -721,7 +751,8 @@ static void mcast_groups_lost(struct mcast_port *port)
- 			atomic_inc(&group->refcount);
- 			queue_work(mcast_wq, &group->work);
- 		}
--		group->state = MCAST_ERROR;
-+		if (group->state != MCAST_GROUP_ERROR)
-+			group->state = state;
- 		spin_unlock(&group->lock);
- 	}
- 	spin_unlock_irqrestore(&port->lock, flags);
-@@ -731,16 +762,20 @@ static void mcast_event_handler(struct ib_event_handler *handler,
- 				struct ib_event *event)
- {
- 	struct mcast_device *dev;
-+	int index;
- 
- 	dev = container_of(handler, struct mcast_device, event_handler);
-+	index = event->element.port_num - dev->start_port;
- 
- 	switch (event->event) {
- 	case IB_EVENT_PORT_ERR:
- 	case IB_EVENT_LID_CHANGE:
- 	case IB_EVENT_SM_CHANGE:
- 	case IB_EVENT_CLIENT_REREGISTER:
--		mcast_groups_lost(&dev->port[event->element.port_num -
--					     dev->start_port]);
-+		mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
-+		break;
-+	case IB_EVENT_PKEY_CHANGE:
-+		mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
- 		break;
- 	default:
- 		break;
-diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
-index 1cfc298..aff96ba 100644
---- a/drivers/infiniband/core/smi.h
-+++ b/drivers/infiniband/core/smi.h
-@@ -59,7 +59,8 @@ extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- 					      u8 node_type, int port_num);
- 
- /*
-- * Return 1 if the SMP should be handled by the local SMA/SM via process_mad
-+ * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
-+ * via process_mad
-  */
- static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
- 						  struct ib_device *device)
-@@ -71,4 +72,19 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
- 		(smp->hop_ptr == smp->hop_cnt + 1)) ?
- 		IB_SMI_HANDLE : IB_SMI_DISCARD);
- }
-+
-+/*
-+ * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
-+ * via process_mad
-+ */
-+static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp,
-+						   struct ib_device *device)
-+{
-+	/* C14-13:3 -- We're at the end of the DR segment of path */
-+	/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
-+	return ((device->process_mad &&
-+		ib_get_smp_direction(smp) &&
-+		!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD);
-+}
-+
- #endif	/* __SMI_H_ */
-diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
-index 3d40506..c864ef7 100644
---- a/drivers/infiniband/core/sysfs.c
-+++ b/drivers/infiniband/core/sysfs.c
-@@ -508,19 +508,10 @@ static int add_port(struct ib_device *device, int port_num)
- 
- 	p->ibdev      = device;
- 	p->port_num   = port_num;
--	p->kobj.ktype = &port_type;
- 
--	p->kobj.parent = kobject_get(&device->ports_parent);
--	if (!p->kobj.parent) {
--		ret = -EBUSY;
--		goto err;
+-	switch (ecx) {
+-	case MSR_IA32_TIME_STAMP_COUNTER: {
+-		u64 tsc;
+-
+-		rdtscll(tsc);
+-		svm->vmcb->control.tsc_offset = data - tsc;
+-		break;
+-	}
+-	case MSR_K6_STAR:
+-		svm->vmcb->save.star = data;
+-		break;
+-#ifdef CONFIG_X86_64
+-	case MSR_LSTAR:
+-		svm->vmcb->save.lstar = data;
+-		break;
+-	case MSR_CSTAR:
+-		svm->vmcb->save.cstar = data;
+-		break;
+-	case MSR_KERNEL_GS_BASE:
+-		svm->vmcb->save.kernel_gs_base = data;
+-		break;
+-	case MSR_SYSCALL_MASK:
+-		svm->vmcb->save.sfmask = data;
+-		break;
+-#endif
+-	case MSR_IA32_SYSENTER_CS:
+-		svm->vmcb->save.sysenter_cs = data;
+-		break;
+-	case MSR_IA32_SYSENTER_EIP:
+-		svm->vmcb->save.sysenter_eip = data;
+-		break;
+-	case MSR_IA32_SYSENTER_ESP:
+-		svm->vmcb->save.sysenter_esp = data;
+-		break;
+-	default:
+-		return kvm_set_msr_common(vcpu, ecx, data);
 -	}
+-	return 0;
+-}
 -
--	ret = kobject_set_name(&p->kobj, "%d", port_num);
--	if (ret)
--		goto err_put;
+-static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
+-	u64 data = (svm->vmcb->save.rax & -1u)
+-		| ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
+-	svm->next_rip = svm->vmcb->save.rip + 2;
+-	if (svm_set_msr(&svm->vcpu, ecx, data))
+-		svm_inject_gp(&svm->vcpu, 0);
+-	else
+-		skip_emulated_instruction(&svm->vcpu);
+-	return 1;
+-}
 -
--	ret = kobject_register(&p->kobj);
-+	ret = kobject_init_and_add(&p->kobj, &port_type,
-+				   kobject_get(device->ports_parent),
-+				   "%d", port_num);
- 	if (ret)
- 		goto err_put;
- 
-@@ -549,6 +540,7 @@ static int add_port(struct ib_device *device, int port_num)
- 
- 	list_add_tail(&p->kobj.entry, &device->port_list);
- 
-+	kobject_uevent(&p->kobj, KOBJ_ADD);
- 	return 0;
- 
- err_free_pkey:
-@@ -570,9 +562,7 @@ err_remove_pma:
- 	sysfs_remove_group(&p->kobj, &pma_group);
- 
- err_put:
--	kobject_put(&device->ports_parent);
+-static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+-{
+-	if (svm->vmcb->control.exit_info_1)
+-		return wrmsr_interception(svm, kvm_run);
+-	else
+-		return rdmsr_interception(svm, kvm_run);
+-}
 -
--err:
-+	kobject_put(device->ports_parent);
- 	kfree(p);
- 	return ret;
- }
-@@ -694,16 +684,9 @@ int ib_device_register_sysfs(struct ib_device *device)
- 			goto err_unregister;
- 	}
- 
--	device->ports_parent.parent = kobject_get(&class_dev->kobj);
--	if (!device->ports_parent.parent) {
--		ret = -EBUSY;
--		goto err_unregister;
+-static int interrupt_window_interception(struct vcpu_svm *svm,
+-				   struct kvm_run *kvm_run)
+-{
+-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
+-	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
+-	/*
+-	 * If the user space waits to inject interrupts, exit as soon as
+-	 * possible
+-	 */
+-	if (kvm_run->request_interrupt_window &&
+-	    !svm->vcpu.irq_summary) {
+-		++svm->vcpu.stat.irq_window_exits;
+-		kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+-		return 0;
 -	}
--	ret = kobject_set_name(&device->ports_parent, "ports");
--	if (ret)
--		goto err_put;
--	ret = kobject_register(&device->ports_parent);
--	if (ret)
-+	device->ports_parent = kobject_create_and_add("ports",
-+					kobject_get(&class_dev->kobj));
-+	if (!device->ports_parent)
- 		goto err_put;
- 
- 	if (device->node_type == RDMA_NODE_IB_SWITCH) {
-@@ -731,7 +714,7 @@ err_put:
- 			sysfs_remove_group(p, &pma_group);
- 			sysfs_remove_group(p, &port->pkey_group);
- 			sysfs_remove_group(p, &port->gid_group);
--			kobject_unregister(p);
-+			kobject_put(p);
- 		}
- 	}
- 
-@@ -755,10 +738,10 @@ void ib_device_unregister_sysfs(struct ib_device *device)
- 		sysfs_remove_group(p, &pma_group);
- 		sysfs_remove_group(p, &port->pkey_group);
- 		sysfs_remove_group(p, &port->gid_group);
--		kobject_unregister(p);
-+		kobject_put(p);
- 	}
- 
--	kobject_unregister(&device->ports_parent);
-+	kobject_put(device->ports_parent);
- 	class_device_unregister(&device->class_dev);
- }
- 
-diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
-index 424983f..4291ab4 100644
---- a/drivers/infiniband/core/ucm.c
-+++ b/drivers/infiniband/core/ucm.c
-@@ -106,6 +106,9 @@ enum {
- 	IB_UCM_MAX_DEVICES = 32
- };
- 
-+/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
-+extern struct class cm_class;
-+
- #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
- 
- static void ib_ucm_add_one(struct ib_device *device);
-@@ -1199,7 +1202,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
- 	return 0;
- }
- 
--static void ib_ucm_release_class_dev(struct class_device *class_dev)
-+static void ucm_release_class_dev(struct class_device *class_dev)
- {
- 	struct ib_ucm_device *dev;
- 
-@@ -1217,11 +1220,6 @@ static const struct file_operations ucm_fops = {
- 	.poll    = ib_ucm_poll,
- };
- 
--static struct class ucm_class = {
--	.name    = "infiniband_cm",
--	.release = ib_ucm_release_class_dev
+-
+-	return 1;
+-}
+-
+-static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
+-				      struct kvm_run *kvm_run) = {
+-	[SVM_EXIT_READ_CR0]           		= emulate_on_interception,
+-	[SVM_EXIT_READ_CR3]           		= emulate_on_interception,
+-	[SVM_EXIT_READ_CR4]           		= emulate_on_interception,
+-	/* for now: */
+-	[SVM_EXIT_WRITE_CR0]          		= emulate_on_interception,
+-	[SVM_EXIT_WRITE_CR3]          		= emulate_on_interception,
+-	[SVM_EXIT_WRITE_CR4]          		= emulate_on_interception,
+-	[SVM_EXIT_READ_DR0] 			= emulate_on_interception,
+-	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
+-	[SVM_EXIT_READ_DR2]			= emulate_on_interception,
+-	[SVM_EXIT_READ_DR3]			= emulate_on_interception,
+-	[SVM_EXIT_WRITE_DR0]			= emulate_on_interception,
+-	[SVM_EXIT_WRITE_DR1]			= emulate_on_interception,
+-	[SVM_EXIT_WRITE_DR2]			= emulate_on_interception,
+-	[SVM_EXIT_WRITE_DR3]			= emulate_on_interception,
+-	[SVM_EXIT_WRITE_DR5]			= emulate_on_interception,
+-	[SVM_EXIT_WRITE_DR7]			= emulate_on_interception,
+-	[SVM_EXIT_EXCP_BASE + PF_VECTOR] 	= pf_interception,
+-	[SVM_EXIT_EXCP_BASE + NM_VECTOR] 	= nm_interception,
+-	[SVM_EXIT_INTR] 			= nop_on_interception,
+-	[SVM_EXIT_NMI]				= nop_on_interception,
+-	[SVM_EXIT_SMI]				= nop_on_interception,
+-	[SVM_EXIT_INIT]				= nop_on_interception,
+-	[SVM_EXIT_VINTR]			= interrupt_window_interception,
+-	/* [SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception, */
+-	[SVM_EXIT_CPUID]			= cpuid_interception,
+-	[SVM_EXIT_INVD]                         = emulate_on_interception,
+-	[SVM_EXIT_HLT]				= halt_interception,
+-	[SVM_EXIT_INVLPG]			= emulate_on_interception,
+-	[SVM_EXIT_INVLPGA]			= invalid_op_interception,
+-	[SVM_EXIT_IOIO] 		  	= io_interception,
+-	[SVM_EXIT_MSR]				= msr_interception,
+-	[SVM_EXIT_TASK_SWITCH]			= task_switch_interception,
+-	[SVM_EXIT_SHUTDOWN]			= shutdown_interception,
+-	[SVM_EXIT_VMRUN]			= invalid_op_interception,
+-	[SVM_EXIT_VMMCALL]			= vmmcall_interception,
+-	[SVM_EXIT_VMLOAD]			= invalid_op_interception,
+-	[SVM_EXIT_VMSAVE]			= invalid_op_interception,
+-	[SVM_EXIT_STGI]				= invalid_op_interception,
+-	[SVM_EXIT_CLGI]				= invalid_op_interception,
+-	[SVM_EXIT_SKINIT]			= invalid_op_interception,
+-	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
+-	[SVM_EXIT_MONITOR]			= invalid_op_interception,
+-	[SVM_EXIT_MWAIT]			= invalid_op_interception,
 -};
 -
- static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
- {
- 	struct ib_ucm_device *dev;
-@@ -1257,9 +1255,10 @@ static void ib_ucm_add_one(struct ib_device *device)
- 	if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
- 		goto err;
- 
--	ucm_dev->class_dev.class = &ucm_class;
-+	ucm_dev->class_dev.class = &cm_class;
- 	ucm_dev->class_dev.dev = device->dma_device;
- 	ucm_dev->class_dev.devt = ucm_dev->dev.dev;
-+	ucm_dev->class_dev.release = ucm_release_class_dev;
- 	snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d",
- 		 ucm_dev->devnum);
- 	if (class_device_register(&ucm_dev->class_dev))
-@@ -1306,40 +1305,34 @@ static int __init ib_ucm_init(void)
- 				     "infiniband_cm");
- 	if (ret) {
- 		printk(KERN_ERR "ucm: couldn't register device number\n");
--		goto err;
-+		goto error1;
- 	}
- 
--	ret = class_register(&ucm_class);
--	if (ret) {
--		printk(KERN_ERR "ucm: couldn't create class infiniband_cm\n");
--		goto err_chrdev;
+-
+-static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	u32 exit_code = svm->vmcb->control.exit_code;
+-
+-	kvm_reput_irq(svm);
+-
+-	if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
+-		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+-		kvm_run->fail_entry.hardware_entry_failure_reason
+-			= svm->vmcb->control.exit_code;
+-		return 0;
 -	}
 -
--	ret = class_create_file(&ucm_class, &class_attr_abi_version);
-+	ret = class_create_file(&cm_class, &class_attr_abi_version);
- 	if (ret) {
- 		printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
--		goto err_class;
-+		goto error2;
- 	}
- 
- 	ret = ib_register_client(&ucm_client);
- 	if (ret) {
- 		printk(KERN_ERR "ucm: couldn't register client\n");
--		goto err_class;
-+		goto error3;
- 	}
- 	return 0;
- 
--err_class:
--	class_unregister(&ucm_class);
--err_chrdev:
-+error3:
-+	class_remove_file(&cm_class, &class_attr_abi_version);
-+error2:
- 	unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
--err:
-+error1:
- 	return ret;
- }
- 
- static void __exit ib_ucm_cleanup(void)
- {
- 	ib_unregister_client(&ucm_client);
--	class_unregister(&ucm_class);
-+	class_remove_file(&cm_class, &class_attr_abi_version);
- 	unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
- 	idr_destroy(&ctx_id_table);
- }
-diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
-index 90d675a..15937eb 100644
---- a/drivers/infiniband/core/ucma.c
-+++ b/drivers/infiniband/core/ucma.c
-@@ -31,6 +31,7 @@
-  */
- 
- #include <linux/completion.h>
-+#include <linux/file.h>
- #include <linux/mutex.h>
- #include <linux/poll.h>
- #include <linux/idr.h>
-@@ -991,6 +992,96 @@ out:
- 	return ret;
- }
- 
-+static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
-+{
-+	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
-+	if (file1 < file2) {
-+		mutex_lock(&file1->mut);
-+		mutex_lock(&file2->mut);
-+	} else {
-+		mutex_lock(&file2->mut);
-+		mutex_lock(&file1->mut);
-+	}
-+}
-+
-+static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
-+{
-+	if (file1 < file2) {
-+		mutex_unlock(&file2->mut);
-+		mutex_unlock(&file1->mut);
-+	} else {
-+		mutex_unlock(&file1->mut);
-+		mutex_unlock(&file2->mut);
-+	}
-+}
-+
-+static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
-+{
-+	struct ucma_event *uevent, *tmp;
-+
-+	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
-+		if (uevent->ctx == ctx)
-+			list_move_tail(&uevent->list, &file->event_list);
-+}
-+
-+static ssize_t ucma_migrate_id(struct ucma_file *new_file,
-+			       const char __user *inbuf,
-+			       int in_len, int out_len)
-+{
-+	struct rdma_ucm_migrate_id cmd;
-+	struct rdma_ucm_migrate_resp resp;
-+	struct ucma_context *ctx;
-+	struct file *filp;
-+	struct ucma_file *cur_file;
-+	int ret = 0;
-+
-+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
-+		return -EFAULT;
-+
-+	/* Get current fd to protect against it being closed */
-+	filp = fget(cmd.fd);
-+	if (!filp)
-+		return -ENOENT;
-+
-+	/* Validate current fd and prevent destruction of id. */
-+	ctx = ucma_get_ctx(filp->private_data, cmd.id);
-+	if (IS_ERR(ctx)) {
-+		ret = PTR_ERR(ctx);
-+		goto file_put;
-+	}
-+
-+	cur_file = ctx->file;
-+	if (cur_file == new_file) {
-+		resp.events_reported = ctx->events_reported;
-+		goto response;
-+	}
-+
-+	/*
-+	 * Migrate events between fd's, maintaining order, and avoiding new
-+	 * events being added before existing events.
-+	 */
-+	ucma_lock_files(cur_file, new_file);
-+	mutex_lock(&mut);
-+
-+	list_move_tail(&ctx->list, &new_file->ctx_list);
-+	ucma_move_events(ctx, new_file);
-+	ctx->file = new_file;
-+	resp.events_reported = ctx->events_reported;
-+
-+	mutex_unlock(&mut);
-+	ucma_unlock_files(cur_file, new_file);
-+
-+response:
-+	if (copy_to_user((void __user *)(unsigned long)cmd.response,
-+			 &resp, sizeof(resp)))
-+		ret = -EFAULT;
-+
-+	ucma_put_ctx(ctx);
-+file_put:
-+	fput(filp);
-+	return ret;
-+}
-+
- static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
- 				   const char __user *inbuf,
- 				   int in_len, int out_len) = {
-@@ -1012,6 +1103,7 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
- 	[RDMA_USER_CM_CMD_NOTIFY]	= ucma_notify,
- 	[RDMA_USER_CM_CMD_JOIN_MCAST]	= ucma_join_multicast,
- 	[RDMA_USER_CM_CMD_LEAVE_MCAST]	= ucma_leave_multicast,
-+	[RDMA_USER_CM_CMD_MIGRATE_ID]	= ucma_migrate_id
- };
- 
- static ssize_t ucma_write(struct file *filp, const char __user *buf,
-diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
-index b53eac4..4e91510 100644
---- a/drivers/infiniband/core/user_mad.c
-+++ b/drivers/infiniband/core/user_mad.c
-@@ -2,6 +2,7 @@
-  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
-  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
-  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
-+ * Copyright (c) 2008 Cisco. All rights reserved.
-  *
-  * This software is available to you under a choice of one of two
-  * licenses.  You may choose to be licensed under the terms of the GNU
-@@ -42,7 +43,7 @@
- #include <linux/cdev.h>
- #include <linux/dma-mapping.h>
- #include <linux/poll.h>
--#include <linux/rwsem.h>
-+#include <linux/mutex.h>
- #include <linux/kref.h>
- #include <linux/compat.h>
- 
-@@ -94,7 +95,7 @@ struct ib_umad_port {
- 	struct class_device   *sm_class_dev;
- 	struct semaphore       sm_sem;
- 
--	struct rw_semaphore    mutex;
-+	struct mutex	       file_mutex;
- 	struct list_head       file_list;
- 
- 	struct ib_device      *ib_dev;
-@@ -110,11 +111,11 @@ struct ib_umad_device {
- };
- 
- struct ib_umad_file {
-+	struct mutex		mutex;
- 	struct ib_umad_port    *port;
- 	struct list_head	recv_list;
- 	struct list_head	send_list;
- 	struct list_head	port_list;
--	spinlock_t		recv_lock;
- 	spinlock_t		send_lock;
- 	wait_queue_head_t	recv_wait;
- 	struct ib_mad_agent    *agent[IB_UMAD_MAX_AGENTS];
-@@ -156,7 +157,7 @@ static int hdr_size(struct ib_umad_file *file)
- 		sizeof (struct ib_user_mad_hdr_old);
- }
- 
--/* caller must hold port->mutex at least for reading */
-+/* caller must hold file->mutex */
- static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
- {
- 	return file->agents_dead ? NULL : file->agent[id];
-@@ -168,32 +169,30 @@ static int queue_packet(struct ib_umad_file *file,
- {
- 	int ret = 1;
- 
--	down_read(&file->port->mutex);
-+	mutex_lock(&file->mutex);
- 
- 	for (packet->mad.hdr.id = 0;
- 	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
- 	     packet->mad.hdr.id++)
- 		if (agent == __get_agent(file, packet->mad.hdr.id)) {
--			spin_lock_irq(&file->recv_lock);
- 			list_add_tail(&packet->list, &file->recv_list);
--			spin_unlock_irq(&file->recv_lock);
- 			wake_up_interruptible(&file->recv_wait);
- 			ret = 0;
- 			break;
- 		}
- 
--	up_read(&file->port->mutex);
-+	mutex_unlock(&file->mutex);
- 
- 	return ret;
- }
- 
- static void dequeue_send(struct ib_umad_file *file,
- 			 struct ib_umad_packet *packet)
-- {
-+{
- 	spin_lock_irq(&file->send_lock);
- 	list_del(&packet->list);
- 	spin_unlock_irq(&file->send_lock);
-- }
-+}
- 
- static void send_handler(struct ib_mad_agent *agent,
- 			 struct ib_mad_send_wc *send_wc)
-@@ -341,10 +340,10 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
- 	if (count < hdr_size(file))
- 		return -EINVAL;
- 
--	spin_lock_irq(&file->recv_lock);
-+	mutex_lock(&file->mutex);
- 
- 	while (list_empty(&file->recv_list)) {
--		spin_unlock_irq(&file->recv_lock);
-+		mutex_unlock(&file->mutex);
- 
- 		if (filp->f_flags & O_NONBLOCK)
- 			return -EAGAIN;
-@@ -353,13 +352,13 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
- 					     !list_empty(&file->recv_list)))
- 			return -ERESTARTSYS;
- 
--		spin_lock_irq(&file->recv_lock);
-+		mutex_lock(&file->mutex);
- 	}
- 
- 	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
- 	list_del(&packet->list);
- 
--	spin_unlock_irq(&file->recv_lock);
-+	mutex_unlock(&file->mutex);
- 
- 	if (packet->recv_wc)
- 		ret = copy_recv_mad(file, buf, packet, count);
-@@ -368,9 +367,9 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
- 
- 	if (ret < 0) {
- 		/* Requeue packet */
--		spin_lock_irq(&file->recv_lock);
-+		mutex_lock(&file->mutex);
- 		list_add(&packet->list, &file->recv_list);
--		spin_unlock_irq(&file->recv_lock);
-+		mutex_unlock(&file->mutex);
- 	} else {
- 		if (packet->recv_wc)
- 			ib_free_recv_mad(packet->recv_wc);
-@@ -481,7 +480,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
- 		goto err;
- 	}
- 
--	down_read(&file->port->mutex);
-+	mutex_lock(&file->mutex);
- 
- 	agent = __get_agent(file, packet->mad.hdr.id);
- 	if (!agent) {
-@@ -577,7 +576,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
- 	if (ret)
- 		goto err_send;
- 
--	up_read(&file->port->mutex);
-+	mutex_unlock(&file->mutex);
- 	return count;
- 
- err_send:
-@@ -587,7 +586,7 @@ err_msg:
- err_ah:
- 	ib_destroy_ah(ah);
- err_up:
--	up_read(&file->port->mutex);
-+	mutex_unlock(&file->mutex);
- err:
- 	kfree(packet);
- 	return ret;
-@@ -613,11 +612,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
- {
- 	struct ib_user_mad_reg_req ureq;
- 	struct ib_mad_reg_req req;
--	struct ib_mad_agent *agent;
-+	struct ib_mad_agent *agent = NULL;
- 	int agent_id;
- 	int ret;
- 
--	down_write(&file->port->mutex);
-+	mutex_lock(&file->port->file_mutex);
-+	mutex_lock(&file->mutex);
- 
- 	if (!file->port->ib_dev) {
- 		ret = -EPIPE;
-@@ -666,13 +666,13 @@ found:
- 				      send_handler, recv_handler, file);
- 	if (IS_ERR(agent)) {
- 		ret = PTR_ERR(agent);
-+		agent = NULL;
- 		goto out;
- 	}
- 
- 	if (put_user(agent_id,
- 		     (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
- 		ret = -EFAULT;
--		ib_unregister_mad_agent(agent);
- 		goto out;
- 	}
- 
-@@ -690,7 +690,13 @@ found:
- 	ret = 0;
- 
- out:
--	up_write(&file->port->mutex);
-+	mutex_unlock(&file->mutex);
-+
-+	if (ret && agent)
-+		ib_unregister_mad_agent(agent);
-+
-+	mutex_unlock(&file->port->file_mutex);
-+
- 	return ret;
- }
- 
-@@ -703,7 +709,8 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
- 	if (get_user(id, arg))
- 		return -EFAULT;
- 
--	down_write(&file->port->mutex);
-+	mutex_lock(&file->port->file_mutex);
-+	mutex_lock(&file->mutex);
- 
- 	if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
- 		ret = -EINVAL;
-@@ -714,11 +721,13 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
- 	file->agent[id] = NULL;
- 
- out:
--	up_write(&file->port->mutex);
-+	mutex_unlock(&file->mutex);
- 
- 	if (agent)
- 		ib_unregister_mad_agent(agent);
- 
-+	mutex_unlock(&file->port->file_mutex);
-+
- 	return ret;
- }
- 
-@@ -726,12 +735,12 @@ static long ib_umad_enable_pkey(struct ib_umad_file *file)
- {
- 	int ret = 0;
- 
--	down_write(&file->port->mutex);
-+	mutex_lock(&file->mutex);
- 	if (file->already_used)
- 		ret = -EINVAL;
- 	else
- 		file->use_pkey_index = 1;
--	up_write(&file->port->mutex);
-+	mutex_unlock(&file->mutex);
- 
- 	return ret;
- }
-@@ -783,7 +792,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
- 	if (!port)
- 		return -ENXIO;
- 
--	down_write(&port->mutex);
-+	mutex_lock(&port->file_mutex);
- 
- 	if (!port->ib_dev) {
- 		ret = -ENXIO;
-@@ -797,7 +806,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
- 		goto out;
- 	}
- 
--	spin_lock_init(&file->recv_lock);
-+	mutex_init(&file->mutex);
- 	spin_lock_init(&file->send_lock);
- 	INIT_LIST_HEAD(&file->recv_list);
- 	INIT_LIST_HEAD(&file->send_list);
-@@ -809,7 +818,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
- 	list_add_tail(&file->port_list, &port->file_list);
- 
- out:
--	up_write(&port->mutex);
-+	mutex_unlock(&port->file_mutex);
- 	return ret;
- }
- 
-@@ -821,7 +830,8 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
- 	int already_dead;
- 	int i;
- 
--	down_write(&file->port->mutex);
-+	mutex_lock(&file->port->file_mutex);
-+	mutex_lock(&file->mutex);
- 
- 	already_dead = file->agents_dead;
- 	file->agents_dead = 1;
-@@ -834,14 +844,14 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
- 
- 	list_del(&file->port_list);
- 
--	downgrade_write(&file->port->mutex);
-+	mutex_unlock(&file->mutex);
- 
- 	if (!already_dead)
- 		for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
- 			if (file->agent[i])
- 				ib_unregister_mad_agent(file->agent[i]);
- 
--	up_read(&file->port->mutex);
-+	mutex_unlock(&file->port->file_mutex);
- 
- 	kfree(file);
- 	kref_put(&dev->ref, ib_umad_release_dev);
-@@ -914,10 +924,10 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
- 	};
- 	int ret = 0;
- 
--	down_write(&port->mutex);
-+	mutex_lock(&port->file_mutex);
- 	if (port->ib_dev)
- 		ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
--	up_write(&port->mutex);
-+	mutex_unlock(&port->file_mutex);
- 
- 	up(&port->sm_sem);
- 
-@@ -981,7 +991,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
- 	port->ib_dev   = device;
- 	port->port_num = port_num;
- 	init_MUTEX(&port->sm_sem);
--	init_rwsem(&port->mutex);
-+	mutex_init(&port->file_mutex);
- 	INIT_LIST_HEAD(&port->file_list);
- 
- 	port->dev = cdev_alloc();
-@@ -1052,6 +1062,7 @@ err_cdev:
- static void ib_umad_kill_port(struct ib_umad_port *port)
- {
- 	struct ib_umad_file *file;
-+	int already_dead;
- 	int id;
- 
- 	class_set_devdata(port->class_dev,    NULL);
-@@ -1067,42 +1078,22 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
- 	umad_port[port->dev_num] = NULL;
- 	spin_unlock(&port_lock);
- 
--	down_write(&port->mutex);
-+	mutex_lock(&port->file_mutex);
- 
- 	port->ib_dev = NULL;
- 
+-	if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
+-	    exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
+-		printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
+-		       "exit_code 0x%x\n",
+-		       __FUNCTION__, svm->vmcb->control.exit_int_info,
+-		       exit_code);
+-
+-	if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
+-	    || svm_exit_handlers[exit_code] == 0) {
+-		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+-		kvm_run->hw.hardware_exit_reason = exit_code;
+-		return 0;
+-	}
+-
+-	return svm_exit_handlers[exit_code](svm, kvm_run);
+-}
+-
+-static void reload_tss(struct kvm_vcpu *vcpu)
+-{
+-	int cpu = raw_smp_processor_id();
+-
+-	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
+-	svm_data->tss_desc->type = 9; //available 32/64-bit TSS
+-	load_TR_desc();
+-}
+-
+-static void pre_svm_run(struct vcpu_svm *svm)
+-{
+-	int cpu = raw_smp_processor_id();
+-
+-	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
+-
+-	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+-	if (svm->vcpu.cpu != cpu ||
+-	    svm->asid_generation != svm_data->asid_generation)
+-		new_asid(svm, svm_data);
+-}
+-
+-
+-static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
+-{
+-	struct vmcb_control_area *control;
+-
+-	control = &svm->vmcb->control;
+-	control->int_vector = irq;
+-	control->int_ctl &= ~V_INTR_PRIO_MASK;
+-	control->int_ctl |= V_IRQ_MASK |
+-		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
+-}
+-
+-static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	svm_inject_irq(svm, irq);
+-}
+-
+-static void svm_intr_assist(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	struct vmcb *vmcb = svm->vmcb;
+-	int intr_vector = -1;
+-
+-	kvm_inject_pending_timer_irqs(vcpu);
+-	if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
+-	    ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
+-		intr_vector = vmcb->control.exit_int_info &
+-			      SVM_EVTINJ_VEC_MASK;
+-		vmcb->control.exit_int_info = 0;
+-		svm_inject_irq(svm, intr_vector);
+-		return;
+-	}
+-
+-	if (vmcb->control.int_ctl & V_IRQ_MASK)
+-		return;
+-
+-	if (!kvm_cpu_has_interrupt(vcpu))
+-		return;
+-
+-	if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
+-	    (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
+-	    (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
+-		/* unable to deliver irq, set pending irq */
+-		vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
+-		svm_inject_irq(svm, 0x0);
+-		return;
+-	}
+-	/* Okay, we can deliver the interrupt: grab it and update PIC state. */
+-	intr_vector = kvm_cpu_get_interrupt(vcpu);
+-	svm_inject_irq(svm, intr_vector);
+-	kvm_timer_intr_post(vcpu, intr_vector);
+-}
+-
+-static void kvm_reput_irq(struct vcpu_svm *svm)
+-{
+-	struct vmcb_control_area *control = &svm->vmcb->control;
+-
+-	if ((control->int_ctl & V_IRQ_MASK)
+-	    && !irqchip_in_kernel(svm->vcpu.kvm)) {
+-		control->int_ctl &= ~V_IRQ_MASK;
+-		push_irq(&svm->vcpu, control->int_vector);
+-	}
+-
+-	svm->vcpu.interrupt_window_open =
+-		!(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
+-}
+-
+-static void svm_do_inject_vector(struct vcpu_svm *svm)
+-{
+-	struct kvm_vcpu *vcpu = &svm->vcpu;
+-	int word_index = __ffs(vcpu->irq_summary);
+-	int bit_index = __ffs(vcpu->irq_pending[word_index]);
+-	int irq = word_index * BITS_PER_LONG + bit_index;
+-
+-	clear_bit(bit_index, &vcpu->irq_pending[word_index]);
+-	if (!vcpu->irq_pending[word_index])
+-		clear_bit(word_index, &vcpu->irq_summary);
+-	svm_inject_irq(svm, irq);
+-}
+-
+-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+-				       struct kvm_run *kvm_run)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	struct vmcb_control_area *control = &svm->vmcb->control;
+-
+-	svm->vcpu.interrupt_window_open =
+-		(!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+-		 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
+-
+-	if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary)
+-		/*
+-		 * If interrupts enabled, and not blocked by sti or mov ss. Good.
+-		 */
+-		svm_do_inject_vector(svm);
+-
 -	/*
--	 * Now go through the list of files attached to this port and
--	 * unregister all of their MAD agents.  We need to hold
--	 * port->mutex while doing this to avoid racing with
--	 * ib_umad_close(), but we can't hold the mutex for writing
--	 * while calling ib_unregister_mad_agent(), since that might
--	 * deadlock by calling back into queue_packet().  So we
--	 * downgrade our lock to a read lock, and then drop and
--	 * reacquire the write lock for the next iteration.
--	 *
--	 * We do list_del_init() on the file's list_head so that the
--	 * list_del in ib_umad_close() is still OK, even after the
--	 * file is removed from the list.
+-	 * Interrupts blocked.  Wait for unblock.
 -	 */
--	while (!list_empty(&port->file_list)) {
--		file = list_entry(port->file_list.next, struct ib_umad_file,
--				  port_list);
+-	if (!svm->vcpu.interrupt_window_open &&
+-	    (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) {
+-		control->intercept |= 1ULL << INTERCEPT_VINTR;
+-	} else
+-		control->intercept &= ~(1ULL << INTERCEPT_VINTR);
+-}
 -
-+	list_for_each_entry(file, &port->file_list, port_list) {
-+		mutex_lock(&file->mutex);
-+		already_dead = file->agents_dead;
- 		file->agents_dead = 1;
--		list_del_init(&file->port_list);
+-static void save_db_regs(unsigned long *db_regs)
+-{
+-	asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
+-	asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
+-	asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
+-	asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
+-}
 -
--		downgrade_write(&port->mutex);
-+		mutex_unlock(&file->mutex);
- 
- 		for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
- 			if (file->agent[id])
- 				ib_unregister_mad_agent(file->agent[id]);
+-static void load_db_regs(unsigned long *db_regs)
+-{
+-	asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
+-	asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
+-	asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
+-	asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
+-}
 -
--		up_read(&port->mutex);
--		down_write(&port->mutex);
- 	}
- 
--	up_write(&port->mutex);
-+	mutex_unlock(&port->file_mutex);
- 
- 	clear_bit(port->dev_num, dev_map);
- }
-diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
-index 36b9898..7e7b5a6 100644
---- a/drivers/infiniband/hw/cxgb3/Makefile
-+++ b/drivers/infiniband/hw/cxgb3/Makefile
-@@ -1,5 +1,4 @@
--EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3 \
--		-I$(TOPDIR)/drivers/infiniband/hw/cxgb3/core
-+EXTRA_CFLAGS += -Idrivers/net/cxgb3
- 
- obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
- 
-diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
-index eec6a30..03c5ff6 100644
---- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
-+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
-@@ -179,7 +179,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
- 	setup.size = 1UL << cq->size_log2;
- 	setup.credits = 65535;
- 	setup.credit_thres = 1;
--	if (rdev_p->t3cdev_p->type == T3B)
-+	if (rdev_p->t3cdev_p->type != T3A)
- 		setup.ovfl_mode = 0;
- 	else
- 		setup.ovfl_mode = 1;
-@@ -584,7 +584,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
- {
- 	u32 i, nr_wqe, copy_len;
- 	u8 *copy_data;
--	u8 wr_len, utx_len;	/* lenght in 8 byte flit */
-+	u8 wr_len, utx_len;	/* length in 8 byte flit */
- 	enum t3_wr_flags flag;
- 	__be64 *wqe;
- 	u64 utx_cmd;
-diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
-index c84d4ac..969d4d9 100644
---- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
-+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
-@@ -315,7 +315,7 @@ struct t3_rdma_init_wr {
- 	__be32 ird;
- 	__be64 qp_dma_addr;	/* 7 */
- 	__be32 qp_dma_size;	/* 8 */
--	u32 irs;
-+	__be32 irs;
- };
- 
- struct t3_genbit {
-@@ -324,7 +324,8 @@ struct t3_genbit {
- };
- 
- enum rdma_init_wr_flags {
--	RECVS_POSTED = 1,
-+	RECVS_POSTED = (1<<0),
-+	PRIV_QP = (1<<1),
- };
- 
- union t3_wr {
-diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
-index 20ba372..e9a08fa 100644
---- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
-+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
-@@ -332,7 +332,7 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
- 			  }
- 	};
- 
--	if (ip_route_output_flow(&rt, &fl, NULL, 0))
-+	if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
- 		return NULL;
- 	return rt;
- }
-@@ -1118,7 +1118,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
- 	     status2errno(rpl->status));
- 	connect_reply_upcall(ep, status2errno(rpl->status));
- 	state_set(&ep->com, DEAD);
--	if (ep->com.tdev->type == T3B && act_open_has_tid(rpl->status))
-+	if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
- 		release_tid(ep->com.tdev, GET_TID(rpl), NULL);
- 	cxgb3_free_atid(ep->com.tdev, ep->atid);
- 	dst_release(ep->dst);
-@@ -1249,7 +1249,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
- 	skb_trim(skb, sizeof(struct cpl_tid_release));
- 	skb_get(skb);
- 
--	if (tdev->type == T3B)
-+	if (tdev->type != T3A)
- 		release_tid(tdev, hwtid, skb);
- 	else {
- 		struct cpl_pass_accept_rpl *rpl;
-diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
-index a6c2c4b..73bfd16 100644
---- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
-+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
-@@ -122,6 +122,13 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
- 		*total_size += buffer_list[i].size;
- 		if (i > 0)
- 			mask |= buffer_list[i].addr;
-+		else
-+			mask |= buffer_list[i].addr & PAGE_MASK;
-+		if (i != num_phys_buf - 1)
-+			mask |= buffer_list[i].addr + buffer_list[i].size;
-+		else
-+			mask |= (buffer_list[i].addr + buffer_list[i].size +
-+				PAGE_SIZE - 1) & PAGE_MASK;
- 	}
- 
- 	if (*total_size > 0xFFFFFFFFULL)
-diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
-index b5436ca..df1838f 100644
---- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
-+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
-@@ -39,6 +39,7 @@
- #include <linux/list.h>
- #include <linux/spinlock.h>
- #include <linux/ethtool.h>
-+#include <linux/rtnetlink.h>
- 
- #include <asm/io.h>
- #include <asm/irq.h>
-@@ -645,7 +646,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- 	if (err)
- 		goto err;
- 
--	if (udata && t3b_device(rhp)) {
-+	if (udata && !t3a_device(rhp)) {
- 		uresp.pbl_addr = (mhp->attr.pbl_addr -
- 	                         rhp->rdev.rnic_info.pbl_base) >> 3;
- 		PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__,
-@@ -1053,7 +1054,9 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
- 	struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
- 
- 	PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
-+	rtnl_lock();
- 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
-+	rtnl_unlock();
- 	return sprintf(buf, "%s\n", info.fw_version);
- }
- 
-@@ -1065,7 +1068,9 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
- 	struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
- 
- 	PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
-+	rtnl_lock();
- 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
-+	rtnl_unlock();
- 	return sprintf(buf, "%s\n", info.driver);
- }
- 
-diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
-index dd89b6b..ea2cdd7 100644
---- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
-+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
-@@ -208,36 +208,19 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
- static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,
- 				struct ib_recv_wr *wr)
- {
--	int i, err = 0;
--	u32 pbl_addr[4];
--	u8 page_size[4];
-+	int i;
- 	if (wr->num_sge > T3_MAX_SGE)
- 		return -EINVAL;
--	err = iwch_sgl2pbl_map(rhp, wr->sg_list, wr->num_sge, pbl_addr,
--			       page_size);
--	if (err)
--		return err;
--	wqe->recv.pagesz[0] = page_size[0];
--	wqe->recv.pagesz[1] = page_size[1];
--	wqe->recv.pagesz[2] = page_size[2];
--	wqe->recv.pagesz[3] = page_size[3];
- 	wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
- 	for (i = 0; i < wr->num_sge; i++) {
- 		wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- 		wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
+-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
+-{
+-	force_new_asid(vcpu);
+-}
 -
--		/* to in the WQE == the offset into the page */
--		wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
--				(1UL << (12 + page_size[i])));
+-static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
+-{
+-}
 -
--		/* pbl_addr is the adapters address in the PBL */
--		wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
-+		wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- 	}
- 	for (; i < T3_MAX_SGE; i++) {
- 		wqe->recv.sgl[i].stag = 0;
- 		wqe->recv.sgl[i].len = 0;
- 		wqe->recv.sgl[i].to = 0;
--		wqe->recv.pbl_addr[i] = 0;
- 	}
- 	return 0;
- }
-@@ -659,6 +642,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
- 	cxio_flush_rq(&qhp->wq, &rchp->cq, count);
- 	spin_unlock(&qhp->lock);
- 	spin_unlock_irqrestore(&rchp->lock, *flag);
-+	(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- 
- 	/* locking heirarchy: cq lock first, then qp lock. */
- 	spin_lock_irqsave(&schp->lock, *flag);
-@@ -668,6 +652,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
- 	cxio_flush_sq(&qhp->wq, &schp->cq, count);
- 	spin_unlock(&qhp->lock);
- 	spin_unlock_irqrestore(&schp->lock, *flag);
-+	(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
- 
- 	/* deref */
- 	if (atomic_dec_and_test(&qhp->refcnt))
-@@ -678,7 +663,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
- 
- static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
- {
--	if (t3b_device(qhp->rhp))
-+	if (qhp->ibqp.uobject)
- 		cxio_set_wq_in_error(&qhp->wq);
- 	else
- 		__flush_qp(qhp, flag);
-@@ -732,6 +717,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
- 	init_attr.qp_dma_addr = qhp->wq.dma_addr;
- 	init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
- 	init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
-+	init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
- 	init_attr.irs = qhp->ep->rcv_seq;
- 	PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
- 	     "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
-@@ -847,10 +833,11 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
- 				disconnect = 1;
- 				ep = qhp->ep;
- 			}
-+			flush_qp(qhp, &flag);
- 			break;
- 		case IWCH_QP_STATE_TERMINATE:
- 			qhp->attr.state = IWCH_QP_STATE_TERMINATE;
--			if (t3b_device(qhp->rhp))
-+			if (qhp->ibqp.uobject)
- 				cxio_set_wq_in_error(&qhp->wq);
- 			if (!internal)
- 				terminate = 1;
-diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
-index f7782c8..194c1c3 100644
---- a/drivers/infiniband/hw/ehca/ehca_av.c
-+++ b/drivers/infiniband/hw/ehca/ehca_av.c
-@@ -1,7 +1,7 @@
- /*
-  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
-  *
-- *  adress vector functions
-+ *  address vector functions
-  *
-  *  Authors: Hoang-Nam Nguyen <hnguyen at de.ibm.com>
-  *           Khadija Souissi <souissik at de.ibm.com>
-diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
-index 74d2b72..f281d16 100644
---- a/drivers/infiniband/hw/ehca/ehca_classes.h
-+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
-@@ -94,7 +94,11 @@ struct ehca_sma_attr {
- 
- struct ehca_sport {
- 	struct ib_cq *ibcq_aqp1;
--	struct ib_qp *ibqp_aqp1;
-+	struct ib_qp *ibqp_sqp[2];
-+	/* lock to serialze modify_qp() calls for sqp in normal
-+	 * and irq path (when event PORT_ACTIVE is received first time)
-+	 */
-+	spinlock_t mod_sqp_lock;
- 	enum ib_port_state port_state;
- 	struct ehca_sma_attr saved_attr;
- };
-@@ -141,6 +145,14 @@ enum ehca_ext_qp_type {
- 	EQPT_SRQ       = 3,
- };
- 
-+/* struct to cache modify_qp()'s parms for GSI/SMI qp */
-+struct ehca_mod_qp_parm {
-+	int mask;
-+	struct ib_qp_attr attr;
-+};
-+
-+#define EHCA_MOD_QP_PARM_MAX 4
-+
- struct ehca_qp {
- 	union {
- 		struct ib_qp ib_qp;
-@@ -164,10 +176,18 @@ struct ehca_qp {
- 	struct ehca_cq *recv_cq;
- 	unsigned int sqerr_purgeflag;
- 	struct hlist_node list_entries;
-+	/* array to cache modify_qp()'s parms for GSI/SMI qp */
-+	struct ehca_mod_qp_parm *mod_qp_parm;
-+	int mod_qp_parm_idx;
- 	/* mmap counter for resources mapped into user space */
- 	u32 mm_count_squeue;
- 	u32 mm_count_rqueue;
- 	u32 mm_count_galpa;
-+	/* unsolicited ack circumvention */
-+	int unsol_ack_circ;
-+	int mtu_shift;
-+	u32 message_count;
-+	u32 packet_count;
- };
- 
- #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
-@@ -323,6 +343,7 @@ extern int ehca_port_act_time;
- extern int ehca_use_hp_mr;
- extern int ehca_scaling_code;
- extern int ehca_lock_hcalls;
-+extern int ehca_nr_ports;
- 
- struct ipzu_queue_resp {
- 	u32 qe_size;      /* queue entry size */
-diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
-index 79c25f5..0467c15 100644
---- a/drivers/infiniband/hw/ehca/ehca_cq.c
-+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
-@@ -246,7 +246,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
- 		} else {
- 			if (h_ret != H_PAGE_REGISTERED) {
- 				ehca_err(device, "Registration of page failed "
--					 "ehca_cq=%p cq_num=%x h_ret=%li"
-+					 "ehca_cq=%p cq_num=%x h_ret=%li "
- 					 "counter=%i act_pages=%i",
- 					 my_cq, my_cq->cq_number,
- 					 h_ret, counter, param.act_pages);
-diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
-index 3f617b2..863b34f 100644
---- a/drivers/infiniband/hw/ehca/ehca_irq.c
-+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
-@@ -62,6 +62,7 @@
- #define NEQE_PORT_NUMBER       EHCA_BMASK_IBM( 8, 15)
- #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
- #define NEQE_DISRUPTIVE        EHCA_BMASK_IBM(16, 16)
-+#define NEQE_SPECIFIC_EVENT    EHCA_BMASK_IBM(16, 23)
- 
- #define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52, 63)
- #define ERROR_DATA_TYPE        EHCA_BMASK_IBM( 0,  7)
-@@ -354,17 +355,34 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
- {
- 	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
- 	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
-+	u8 spec_event;
-+	struct ehca_sport *sport = &shca->sport[port - 1];
-+	unsigned long flags;
- 
- 	switch (ec) {
- 	case 0x30: /* port availability change */
- 		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
--			shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
-+			int suppress_event;
-+			/* replay modify_qp for sqps */
-+			spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-+			suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
-+			if (sport->ibqp_sqp[IB_QPT_SMI])
-+				ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
-+			if (!suppress_event)
-+				ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
-+			spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-+
-+			/* AQP1 was destroyed, ignore this event */
-+			if (suppress_event)
-+				break;
-+
-+			sport->port_state = IB_PORT_ACTIVE;
- 			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
- 					    "is active");
- 			ehca_query_sma_attr(shca, port,
--					    &shca->sport[port - 1].saved_attr);
-+					    &sport->saved_attr);
- 		} else {
--			shca->sport[port - 1].port_state = IB_PORT_DOWN;
-+			sport->port_state = IB_PORT_DOWN;
- 			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
- 					    "is inactive");
- 		}
-@@ -378,11 +396,11 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
- 			ehca_warn(&shca->ib_device, "disruptive port "
- 				  "%d configuration change", port);
- 
--			shca->sport[port - 1].port_state = IB_PORT_DOWN;
-+			sport->port_state = IB_PORT_DOWN;
- 			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
- 					    "is inactive");
- 
--			shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
-+			sport->port_state = IB_PORT_ACTIVE;
- 			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
- 					    "is active");
- 		} else
-@@ -394,6 +412,16 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
- 	case 0x33:  /* trace stopped */
- 		ehca_err(&shca->ib_device, "Traced stopped.");
- 		break;
-+	case 0x34: /* util async event */
-+		spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
-+		if (spec_event == 0x80) /* client reregister required */
-+			dispatch_port_event(shca, port,
-+					    IB_EVENT_CLIENT_REREGISTER,
-+					    "client reregister req.");
-+		else
-+			ehca_warn(&shca->ib_device, "Unknown util async "
-+				  "event %x on port %x", spec_event, port);
-+		break;
- 	default:
- 		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
- 			 ec, shca->ib_device.name);
-diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
-index 5485799..c469bfd 100644
---- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
-+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
-@@ -200,4 +200,6 @@ void ehca_free_fw_ctrlblock(void *ptr);
- #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
- #endif
- 
-+void ehca_recover_sqp(struct ib_qp *sqp);
-+
- #endif
-diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
-index 6a56d86..84c9b7b 100644
---- a/drivers/infiniband/hw/ehca/ehca_main.c
-+++ b/drivers/infiniband/hw/ehca/ehca_main.c
-@@ -90,7 +90,8 @@ MODULE_PARM_DESC(hw_level,
- 		 "hardware level"
- 		 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
- MODULE_PARM_DESC(nr_ports,
--		 "number of connected ports (default: 2)");
-+		 "number of connected ports (-1: autodetect, 1: port one only, "
-+		 "2: two ports (default)");
- MODULE_PARM_DESC(use_hp_mr,
- 		 "high performance MRs (0: no (default), 1: yes)");
- MODULE_PARM_DESC(port_act_time,
-@@ -511,7 +512,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
- 	}
- 	sport->ibcq_aqp1 = ibcq;
- 
--	if (sport->ibqp_aqp1) {
-+	if (sport->ibqp_sqp[IB_QPT_GSI]) {
- 		ehca_err(&shca->ib_device, "AQP1 QP is already created.");
- 		ret = -EPERM;
- 		goto create_aqp1;
-@@ -537,7 +538,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
- 		ret = PTR_ERR(ibqp);
- 		goto create_aqp1;
- 	}
--	sport->ibqp_aqp1 = ibqp;
-+	sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
- 
- 	return 0;
- 
-@@ -550,7 +551,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
- {
- 	int ret;
- 
--	ret = ib_destroy_qp(sport->ibqp_aqp1);
-+	ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
- 	if (ret) {
- 		ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
- 		return ret;
-@@ -590,6 +591,11 @@ static struct attribute_group ehca_drv_attr_grp = {
- 	.attrs = ehca_drv_attrs
- };
- 
-+static struct attribute_group *ehca_drv_attr_groups[] = {
-+	&ehca_drv_attr_grp,
-+	NULL,
-+};
-+
- #define EHCA_RESOURCE_ATTR(name)                                           \
- static ssize_t  ehca_show_##name(struct device *dev,                       \
- 				 struct device_attribute *attr,            \
-@@ -688,7 +694,7 @@ static int __devinit ehca_probe(struct of_device *dev,
- 	struct ehca_shca *shca;
- 	const u64 *handle;
- 	struct ib_pd *ibpd;
--	int ret;
-+	int ret, i;
- 
- 	handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
- 	if (!handle) {
-@@ -709,6 +715,8 @@ static int __devinit ehca_probe(struct of_device *dev,
- 		return -ENOMEM;
- 	}
- 	mutex_init(&shca->modify_mutex);
-+	for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
-+		spin_lock_init(&shca->sport[i].mod_sqp_lock);
- 
- 	shca->ofdev = dev;
- 	shca->ipz_hca_handle.handle = *handle;
-@@ -899,6 +907,9 @@ static struct of_platform_driver ehca_driver = {
- 	.match_table = ehca_device_table,
- 	.probe       = ehca_probe,
- 	.remove      = ehca_remove,
-+	.driver	     = {
-+		.groups = ehca_drv_attr_groups,
-+	},
- };
- 
- void ehca_poll_eqs(unsigned long data)
-@@ -926,7 +937,7 @@ void ehca_poll_eqs(unsigned long data)
- 				ehca_process_eq(shca, 0);
- 		}
- 	}
--	mod_timer(&poll_eqs_timer, jiffies + HZ);
-+	mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
- 	spin_unlock(&shca_list_lock);
- }
- 
-@@ -957,10 +968,6 @@ int __init ehca_module_init(void)
- 		goto module_init2;
- 	}
- 
--	ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
--	if (ret) /* only complain; we can live without attributes */
--		ehca_gen_err("Cannot create driver attributes  ret=%d", ret);
+-static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	u16 fs_selector;
+-	u16 gs_selector;
+-	u16 ldt_selector;
 -
- 	if (ehca_poll_all_eqs != 1) {
- 		ehca_gen_err("WARNING!!!");
- 		ehca_gen_err("It is possible to lose interrupts.");
-@@ -986,7 +993,6 @@ void __exit ehca_module_exit(void)
- 	if (ehca_poll_all_eqs == 1)
- 		del_timer_sync(&poll_eqs_timer);
- 
--	sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
- 	ibmebus_unregister_driver(&ehca_driver);
- 
- 	ehca_destroy_slab_caches();
-diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
-index eff5fb5..1012f15 100644
---- a/drivers/infiniband/hw/ehca/ehca_qp.c
-+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
-@@ -592,10 +592,8 @@ static struct ehca_qp *internal_create_qp(
- 		goto create_qp_exit1;
- 	}
- 
--	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
--		parms.sigtype = HCALL_SIGT_EVERY;
+-	pre_svm_run(svm);
+-
+-	save_host_msrs(vcpu);
+-	fs_selector = read_fs();
+-	gs_selector = read_gs();
+-	ldt_selector = read_ldt();
+-	svm->host_cr2 = kvm_read_cr2();
+-	svm->host_dr6 = read_dr6();
+-	svm->host_dr7 = read_dr7();
+-	svm->vmcb->save.cr2 = vcpu->cr2;
+-
+-	if (svm->vmcb->save.dr7 & 0xff) {
+-		write_dr7(0);
+-		save_db_regs(svm->host_db_regs);
+-		load_db_regs(svm->db_regs);
+-	}
+-
+-	clgi();
+-
+-	local_irq_enable();
+-
+-	asm volatile (
+-#ifdef CONFIG_X86_64
+-		"push %%rbx; push %%rcx; push %%rdx;"
+-		"push %%rsi; push %%rdi; push %%rbp;"
+-		"push %%r8;  push %%r9;  push %%r10; push %%r11;"
+-		"push %%r12; push %%r13; push %%r14; push %%r15;"
+-#else
+-		"push %%ebx; push %%ecx; push %%edx;"
+-		"push %%esi; push %%edi; push %%ebp;"
+-#endif
+-
+-#ifdef CONFIG_X86_64
+-		"mov %c[rbx](%[svm]), %%rbx \n\t"
+-		"mov %c[rcx](%[svm]), %%rcx \n\t"
+-		"mov %c[rdx](%[svm]), %%rdx \n\t"
+-		"mov %c[rsi](%[svm]), %%rsi \n\t"
+-		"mov %c[rdi](%[svm]), %%rdi \n\t"
+-		"mov %c[rbp](%[svm]), %%rbp \n\t"
+-		"mov %c[r8](%[svm]),  %%r8  \n\t"
+-		"mov %c[r9](%[svm]),  %%r9  \n\t"
+-		"mov %c[r10](%[svm]), %%r10 \n\t"
+-		"mov %c[r11](%[svm]), %%r11 \n\t"
+-		"mov %c[r12](%[svm]), %%r12 \n\t"
+-		"mov %c[r13](%[svm]), %%r13 \n\t"
+-		"mov %c[r14](%[svm]), %%r14 \n\t"
+-		"mov %c[r15](%[svm]), %%r15 \n\t"
+-#else
+-		"mov %c[rbx](%[svm]), %%ebx \n\t"
+-		"mov %c[rcx](%[svm]), %%ecx \n\t"
+-		"mov %c[rdx](%[svm]), %%edx \n\t"
+-		"mov %c[rsi](%[svm]), %%esi \n\t"
+-		"mov %c[rdi](%[svm]), %%edi \n\t"
+-		"mov %c[rbp](%[svm]), %%ebp \n\t"
+-#endif
+-
+-#ifdef CONFIG_X86_64
+-		/* Enter guest mode */
+-		"push %%rax \n\t"
+-		"mov %c[vmcb](%[svm]), %%rax \n\t"
+-		SVM_VMLOAD "\n\t"
+-		SVM_VMRUN "\n\t"
+-		SVM_VMSAVE "\n\t"
+-		"pop %%rax \n\t"
+-#else
+-		/* Enter guest mode */
+-		"push %%eax \n\t"
+-		"mov %c[vmcb](%[svm]), %%eax \n\t"
+-		SVM_VMLOAD "\n\t"
+-		SVM_VMRUN "\n\t"
+-		SVM_VMSAVE "\n\t"
+-		"pop %%eax \n\t"
+-#endif
+-
+-		/* Save guest registers, load host registers */
+-#ifdef CONFIG_X86_64
+-		"mov %%rbx, %c[rbx](%[svm]) \n\t"
+-		"mov %%rcx, %c[rcx](%[svm]) \n\t"
+-		"mov %%rdx, %c[rdx](%[svm]) \n\t"
+-		"mov %%rsi, %c[rsi](%[svm]) \n\t"
+-		"mov %%rdi, %c[rdi](%[svm]) \n\t"
+-		"mov %%rbp, %c[rbp](%[svm]) \n\t"
+-		"mov %%r8,  %c[r8](%[svm]) \n\t"
+-		"mov %%r9,  %c[r9](%[svm]) \n\t"
+-		"mov %%r10, %c[r10](%[svm]) \n\t"
+-		"mov %%r11, %c[r11](%[svm]) \n\t"
+-		"mov %%r12, %c[r12](%[svm]) \n\t"
+-		"mov %%r13, %c[r13](%[svm]) \n\t"
+-		"mov %%r14, %c[r14](%[svm]) \n\t"
+-		"mov %%r15, %c[r15](%[svm]) \n\t"
+-
+-		"pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
+-		"pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
+-		"pop  %%rbp; pop  %%rdi; pop  %%rsi;"
+-		"pop  %%rdx; pop  %%rcx; pop  %%rbx; \n\t"
+-#else
+-		"mov %%ebx, %c[rbx](%[svm]) \n\t"
+-		"mov %%ecx, %c[rcx](%[svm]) \n\t"
+-		"mov %%edx, %c[rdx](%[svm]) \n\t"
+-		"mov %%esi, %c[rsi](%[svm]) \n\t"
+-		"mov %%edi, %c[rdi](%[svm]) \n\t"
+-		"mov %%ebp, %c[rbp](%[svm]) \n\t"
+-
+-		"pop  %%ebp; pop  %%edi; pop  %%esi;"
+-		"pop  %%edx; pop  %%ecx; pop  %%ebx; \n\t"
+-#endif
+-		:
+-		: [svm]"a"(svm),
+-		  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
+-		  [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])),
+-		  [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])),
+-		  [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])),
+-		  [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])),
+-		  [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])),
+-		  [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP]))
+-#ifdef CONFIG_X86_64
+-		  ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])),
+-		  [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])),
+-		  [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])),
+-		  [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])),
+-		  [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])),
+-		  [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])),
+-		  [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])),
+-		  [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15]))
+-#endif
+-		: "cc", "memory" );
+-
+-	if ((svm->vmcb->save.dr7 & 0xff))
+-		load_db_regs(svm->host_db_regs);
+-
+-	vcpu->cr2 = svm->vmcb->save.cr2;
+-
+-	write_dr6(svm->host_dr6);
+-	write_dr7(svm->host_dr7);
+-	kvm_write_cr2(svm->host_cr2);
+-
+-	load_fs(fs_selector);
+-	load_gs(gs_selector);
+-	load_ldt(ldt_selector);
+-	load_host_msrs(vcpu);
+-
+-	reload_tss(vcpu);
+-
+-	local_irq_disable();
+-
+-	stgi();
+-
+-	svm->next_rip = 0;
+-}
+-
+-static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-
+-	svm->vmcb->save.cr3 = root;
+-	force_new_asid(vcpu);
+-
+-	if (vcpu->fpu_active) {
+-		svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
+-		svm->vmcb->save.cr0 |= X86_CR0_TS;
+-		vcpu->fpu_active = 0;
+-	}
+-}
+-
+-static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
+-				  unsigned long  addr,
+-				  uint32_t err_code)
+-{
+-	struct vcpu_svm *svm = to_svm(vcpu);
+-	uint32_t exit_int_info = svm->vmcb->control.exit_int_info;
+-
+-	++vcpu->stat.pf_guest;
+-
+-	if (is_page_fault(exit_int_info)) {
+-
+-		svm->vmcb->control.event_inj_err = 0;
+-		svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
+-						SVM_EVTINJ_VALID_ERR |
+-						SVM_EVTINJ_TYPE_EXEPT |
+-						DF_VECTOR;
+-		return;
+-	}
+-	vcpu->cr2 = addr;
+-	svm->vmcb->save.cr2 = addr;
+-	svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
+-					SVM_EVTINJ_VALID_ERR |
+-					SVM_EVTINJ_TYPE_EXEPT |
+-					PF_VECTOR;
+-	svm->vmcb->control.event_inj_err = err_code;
+-}
+-
+-
+-static int is_disabled(void)
+-{
+-	u64 vm_cr;
+-
+-	rdmsrl(MSR_VM_CR, vm_cr);
+-	if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
+-		return 1;
+-
+-	return 0;
+-}
+-
+-static void
+-svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+-{
+-	/*
+-	 * Patch in the VMMCALL instruction:
+-	 */
+-	hypercall[0] = 0x0f;
+-	hypercall[1] = 0x01;
+-	hypercall[2] = 0xd9;
+-	hypercall[3] = 0xc3;
+-}
+-
+-static void svm_check_processor_compat(void *rtn)
+-{
+-	*(int *)rtn = 0;
+-}
+-
+-static struct kvm_x86_ops svm_x86_ops = {
+-	.cpu_has_kvm_support = has_svm,
+-	.disabled_by_bios = is_disabled,
+-	.hardware_setup = svm_hardware_setup,
+-	.hardware_unsetup = svm_hardware_unsetup,
+-	.check_processor_compatibility = svm_check_processor_compat,
+-	.hardware_enable = svm_hardware_enable,
+-	.hardware_disable = svm_hardware_disable,
+-
+-	.vcpu_create = svm_create_vcpu,
+-	.vcpu_free = svm_free_vcpu,
+-	.vcpu_reset = svm_vcpu_reset,
+-
+-	.prepare_guest_switch = svm_prepare_guest_switch,
+-	.vcpu_load = svm_vcpu_load,
+-	.vcpu_put = svm_vcpu_put,
+-	.vcpu_decache = svm_vcpu_decache,
+-
+-	.set_guest_debug = svm_guest_debug,
+-	.get_msr = svm_get_msr,
+-	.set_msr = svm_set_msr,
+-	.get_segment_base = svm_get_segment_base,
+-	.get_segment = svm_get_segment,
+-	.set_segment = svm_set_segment,
+-	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
+-	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
+-	.set_cr0 = svm_set_cr0,
+-	.set_cr3 = svm_set_cr3,
+-	.set_cr4 = svm_set_cr4,
+-	.set_efer = svm_set_efer,
+-	.get_idt = svm_get_idt,
+-	.set_idt = svm_set_idt,
+-	.get_gdt = svm_get_gdt,
+-	.set_gdt = svm_set_gdt,
+-	.get_dr = svm_get_dr,
+-	.set_dr = svm_set_dr,
+-	.cache_regs = svm_cache_regs,
+-	.decache_regs = svm_decache_regs,
+-	.get_rflags = svm_get_rflags,
+-	.set_rflags = svm_set_rflags,
+-
+-	.tlb_flush = svm_flush_tlb,
+-	.inject_page_fault = svm_inject_page_fault,
+-
+-	.inject_gp = svm_inject_gp,
+-
+-	.run = svm_vcpu_run,
+-	.handle_exit = handle_exit,
+-	.skip_emulated_instruction = skip_emulated_instruction,
+-	.patch_hypercall = svm_patch_hypercall,
+-	.get_irq = svm_get_irq,
+-	.set_irq = svm_set_irq,
+-	.inject_pending_irq = svm_intr_assist,
+-	.inject_pending_vectors = do_interrupt_requests,
+-};
+-
+-static int __init svm_init(void)
+-{
+-	return kvm_init_x86(&svm_x86_ops, sizeof(struct vcpu_svm),
+-			      THIS_MODULE);
+-}
+-
+-static void __exit svm_exit(void)
+-{
+-	kvm_exit_x86();
+-}
+-
+-module_init(svm_init)
+-module_exit(svm_exit)
+diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
+deleted file mode 100644
+index 3b1b0f3..0000000
+--- a/drivers/kvm/svm.h
++++ /dev/null
+@@ -1,324 +0,0 @@
+-#ifndef __SVM_H
+-#define __SVM_H
+-
+-enum {
+-	INTERCEPT_INTR,
+-	INTERCEPT_NMI,
+-	INTERCEPT_SMI,
+-	INTERCEPT_INIT,
+-	INTERCEPT_VINTR,
+-	INTERCEPT_SELECTIVE_CR0,
+-	INTERCEPT_STORE_IDTR,
+-	INTERCEPT_STORE_GDTR,
+-	INTERCEPT_STORE_LDTR,
+-	INTERCEPT_STORE_TR,
+-	INTERCEPT_LOAD_IDTR,
+-	INTERCEPT_LOAD_GDTR,
+-	INTERCEPT_LOAD_LDTR,
+-	INTERCEPT_LOAD_TR,
+-	INTERCEPT_RDTSC,
+-	INTERCEPT_RDPMC,
+-	INTERCEPT_PUSHF,
+-	INTERCEPT_POPF,
+-	INTERCEPT_CPUID,
+-	INTERCEPT_RSM,
+-	INTERCEPT_IRET,
+-	INTERCEPT_INTn,
+-	INTERCEPT_INVD,
+-	INTERCEPT_PAUSE,
+-	INTERCEPT_HLT,
+-	INTERCEPT_INVLPG,
+-	INTERCEPT_INVLPGA,
+-	INTERCEPT_IOIO_PROT,
+-	INTERCEPT_MSR_PROT,
+-	INTERCEPT_TASK_SWITCH,
+-	INTERCEPT_FERR_FREEZE,
+-	INTERCEPT_SHUTDOWN,
+-	INTERCEPT_VMRUN,
+-	INTERCEPT_VMMCALL,
+-	INTERCEPT_VMLOAD,
+-	INTERCEPT_VMSAVE,
+-	INTERCEPT_STGI,
+-	INTERCEPT_CLGI,
+-	INTERCEPT_SKINIT,
+-	INTERCEPT_RDTSCP,
+-	INTERCEPT_ICEBP,
+-	INTERCEPT_WBINVD,
+-	INTERCEPT_MONITOR,
+-	INTERCEPT_MWAIT,
+-	INTERCEPT_MWAIT_COND,
+-};
+-
+-
+-struct __attribute__ ((__packed__)) vmcb_control_area {
+-	u16 intercept_cr_read;
+-	u16 intercept_cr_write;
+-	u16 intercept_dr_read;
+-	u16 intercept_dr_write;
+-	u32 intercept_exceptions;
+-	u64 intercept;
+-	u8 reserved_1[44];
+-	u64 iopm_base_pa;
+-	u64 msrpm_base_pa;
+-	u64 tsc_offset;
+-	u32 asid;
+-	u8 tlb_ctl;
+-	u8 reserved_2[3];
+-	u32 int_ctl;
+-	u32 int_vector;
+-	u32 int_state;
+-	u8 reserved_3[4];
+-	u32 exit_code;
+-	u32 exit_code_hi;
+-	u64 exit_info_1;
+-	u64 exit_info_2;
+-	u32 exit_int_info;
+-	u32 exit_int_info_err;
+-	u64 nested_ctl;
+-	u8 reserved_4[16];
+-	u32 event_inj;
+-	u32 event_inj_err;
+-	u64 nested_cr3;
+-	u64 lbr_ctl;
+-	u8 reserved_5[832];
+-};
+-
+-
+-#define TLB_CONTROL_DO_NOTHING 0
+-#define TLB_CONTROL_FLUSH_ALL_ASID 1
+-
+-#define V_TPR_MASK 0x0f
+-
+-#define V_IRQ_SHIFT 8
+-#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
+-
+-#define V_INTR_PRIO_SHIFT 16
+-#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
+-
+-#define V_IGN_TPR_SHIFT 20
+-#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+-
+-#define V_INTR_MASKING_SHIFT 24
+-#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+-
+-#define SVM_INTERRUPT_SHADOW_MASK 1
+-
+-#define SVM_IOIO_STR_SHIFT 2
+-#define SVM_IOIO_REP_SHIFT 3
+-#define SVM_IOIO_SIZE_SHIFT 4
+-#define SVM_IOIO_ASIZE_SHIFT 7
+-
+-#define SVM_IOIO_TYPE_MASK 1
+-#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
+-#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
+-#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
+-#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+-
+-struct __attribute__ ((__packed__)) vmcb_seg {
+-	u16 selector;
+-	u16 attrib;
+-	u32 limit;
+-	u64 base;
+-};
+-
+-struct __attribute__ ((__packed__)) vmcb_save_area {
+-	struct vmcb_seg es;
+-	struct vmcb_seg cs;
+-	struct vmcb_seg ss;
+-	struct vmcb_seg ds;
+-	struct vmcb_seg fs;
+-	struct vmcb_seg gs;
+-	struct vmcb_seg gdtr;
+-	struct vmcb_seg ldtr;
+-	struct vmcb_seg idtr;
+-	struct vmcb_seg tr;
+-	u8 reserved_1[43];
+-	u8 cpl;
+-	u8 reserved_2[4];
+-	u64 efer;
+-	u8 reserved_3[112];
+-	u64 cr4;
+-	u64 cr3;
+-	u64 cr0;
+-	u64 dr7;
+-	u64 dr6;
+-	u64 rflags;
+-	u64 rip;
+-	u8 reserved_4[88];
+-	u64 rsp;
+-	u8 reserved_5[24];
+-	u64 rax;
+-	u64 star;
+-	u64 lstar;
+-	u64 cstar;
+-	u64 sfmask;
+-	u64 kernel_gs_base;
+-	u64 sysenter_cs;
+-	u64 sysenter_esp;
+-	u64 sysenter_eip;
+-	u64 cr2;
+-	u8 reserved_6[32];
+-	u64 g_pat;
+-	u64 dbgctl;
+-	u64 br_from;
+-	u64 br_to;
+-	u64 last_excp_from;
+-	u64 last_excp_to;
+-};
+-
+-struct __attribute__ ((__packed__)) vmcb {
+-	struct vmcb_control_area control;
+-	struct vmcb_save_area save;
+-};
+-
+-#define SVM_CPUID_FEATURE_SHIFT 2
+-#define SVM_CPUID_FUNC 0x8000000a
+-
+-#define MSR_EFER_SVME_MASK (1ULL << 12)
+-#define MSR_VM_CR       0xc0010114
+-#define MSR_VM_HSAVE_PA 0xc0010117ULL
+-
+-#define SVM_VM_CR_SVM_DISABLE 4
+-
+-#define SVM_SELECTOR_S_SHIFT 4
+-#define SVM_SELECTOR_DPL_SHIFT 5
+-#define SVM_SELECTOR_P_SHIFT 7
+-#define SVM_SELECTOR_AVL_SHIFT 8
+-#define SVM_SELECTOR_L_SHIFT 9
+-#define SVM_SELECTOR_DB_SHIFT 10
+-#define SVM_SELECTOR_G_SHIFT 11
+-
+-#define SVM_SELECTOR_TYPE_MASK (0xf)
+-#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
+-#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
+-#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
+-#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
+-#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
+-#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
+-#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
+-
+-#define SVM_SELECTOR_WRITE_MASK (1 << 1)
+-#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
+-#define SVM_SELECTOR_CODE_MASK (1 << 3)
+-
+-#define INTERCEPT_CR0_MASK 1
+-#define INTERCEPT_CR3_MASK (1 << 3)
+-#define INTERCEPT_CR4_MASK (1 << 4)
+-
+-#define INTERCEPT_DR0_MASK 1
+-#define INTERCEPT_DR1_MASK (1 << 1)
+-#define INTERCEPT_DR2_MASK (1 << 2)
+-#define INTERCEPT_DR3_MASK (1 << 3)
+-#define INTERCEPT_DR4_MASK (1 << 4)
+-#define INTERCEPT_DR5_MASK (1 << 5)
+-#define INTERCEPT_DR6_MASK (1 << 6)
+-#define INTERCEPT_DR7_MASK (1 << 7)
+-
+-#define SVM_EVTINJ_VEC_MASK 0xff
+-
+-#define SVM_EVTINJ_TYPE_SHIFT 8
+-#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
+-
+-#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
+-#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
+-#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
+-#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
+-
+-#define SVM_EVTINJ_VALID (1 << 31)
+-#define SVM_EVTINJ_VALID_ERR (1 << 11)
+-
+-#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
+-
+-#define	SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
+-#define	SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
+-#define	SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
+-#define	SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
+-
+-#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
+-#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+-
+-#define	SVM_EXIT_READ_CR0 	0x000
+-#define	SVM_EXIT_READ_CR3 	0x003
+-#define	SVM_EXIT_READ_CR4 	0x004
+-#define	SVM_EXIT_READ_CR8 	0x008
+-#define	SVM_EXIT_WRITE_CR0 	0x010
+-#define	SVM_EXIT_WRITE_CR3 	0x013
+-#define	SVM_EXIT_WRITE_CR4 	0x014
+-#define	SVM_EXIT_WRITE_CR8 	0x018
+-#define	SVM_EXIT_READ_DR0 	0x020
+-#define	SVM_EXIT_READ_DR1 	0x021
+-#define	SVM_EXIT_READ_DR2 	0x022
+-#define	SVM_EXIT_READ_DR3 	0x023
+-#define	SVM_EXIT_READ_DR4 	0x024
+-#define	SVM_EXIT_READ_DR5 	0x025
+-#define	SVM_EXIT_READ_DR6 	0x026
+-#define	SVM_EXIT_READ_DR7 	0x027
+-#define	SVM_EXIT_WRITE_DR0 	0x030
+-#define	SVM_EXIT_WRITE_DR1 	0x031
+-#define	SVM_EXIT_WRITE_DR2 	0x032
+-#define	SVM_EXIT_WRITE_DR3 	0x033
+-#define	SVM_EXIT_WRITE_DR4 	0x034
+-#define	SVM_EXIT_WRITE_DR5 	0x035
+-#define	SVM_EXIT_WRITE_DR6 	0x036
+-#define	SVM_EXIT_WRITE_DR7 	0x037
+-#define SVM_EXIT_EXCP_BASE      0x040
+-#define SVM_EXIT_INTR		0x060
+-#define SVM_EXIT_NMI		0x061
+-#define SVM_EXIT_SMI		0x062
+-#define SVM_EXIT_INIT		0x063
+-#define SVM_EXIT_VINTR		0x064
+-#define SVM_EXIT_CR0_SEL_WRITE	0x065
+-#define SVM_EXIT_IDTR_READ	0x066
+-#define SVM_EXIT_GDTR_READ	0x067
+-#define SVM_EXIT_LDTR_READ	0x068
+-#define SVM_EXIT_TR_READ	0x069
+-#define SVM_EXIT_IDTR_WRITE	0x06a
+-#define SVM_EXIT_GDTR_WRITE	0x06b
+-#define SVM_EXIT_LDTR_WRITE	0x06c
+-#define SVM_EXIT_TR_WRITE	0x06d
+-#define SVM_EXIT_RDTSC		0x06e
+-#define SVM_EXIT_RDPMC		0x06f
+-#define SVM_EXIT_PUSHF		0x070
+-#define SVM_EXIT_POPF		0x071
+-#define SVM_EXIT_CPUID		0x072
+-#define SVM_EXIT_RSM		0x073
+-#define SVM_EXIT_IRET		0x074
+-#define SVM_EXIT_SWINT		0x075
+-#define SVM_EXIT_INVD		0x076
+-#define SVM_EXIT_PAUSE		0x077
+-#define SVM_EXIT_HLT		0x078
+-#define SVM_EXIT_INVLPG		0x079
+-#define SVM_EXIT_INVLPGA	0x07a
+-#define SVM_EXIT_IOIO		0x07b
+-#define SVM_EXIT_MSR		0x07c
+-#define SVM_EXIT_TASK_SWITCH	0x07d
+-#define SVM_EXIT_FERR_FREEZE	0x07e
+-#define SVM_EXIT_SHUTDOWN	0x07f
+-#define SVM_EXIT_VMRUN		0x080
+-#define SVM_EXIT_VMMCALL	0x081
+-#define SVM_EXIT_VMLOAD		0x082
+-#define SVM_EXIT_VMSAVE		0x083
+-#define SVM_EXIT_STGI		0x084
+-#define SVM_EXIT_CLGI		0x085
+-#define SVM_EXIT_SKINIT		0x086
+-#define SVM_EXIT_RDTSCP		0x087
+-#define SVM_EXIT_ICEBP		0x088
+-#define SVM_EXIT_WBINVD		0x089
+-#define SVM_EXIT_MONITOR	0x08a
+-#define SVM_EXIT_MWAIT		0x08b
+-#define SVM_EXIT_MWAIT_COND	0x08c
+-#define SVM_EXIT_NPF  		0x400
+-
+-#define SVM_EXIT_ERR		-1
+-
+-#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) // TS and MP
+-
+-#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
+-#define SVM_VMRUN  ".byte 0x0f, 0x01, 0xd8"
+-#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
+-#define SVM_CLGI   ".byte 0x0f, 0x01, 0xdd"
+-#define SVM_STGI   ".byte 0x0f, 0x01, 0xdc"
+-#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
+-
+-#endif
+-
+diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
+deleted file mode 100644
+index bb56ae3..0000000
+--- a/drivers/kvm/vmx.c
++++ /dev/null
+@@ -1,2566 +0,0 @@
+-/*
+- * Kernel-based Virtual Machine driver for Linux
+- *
+- * This module enables machines with Intel VT-x extensions to run virtual
+- * machines without emulation or binary translation.
+- *
+- * Copyright (C) 2006 Qumranet, Inc.
+- *
+- * Authors:
+- *   Avi Kivity   <avi at qumranet.com>
+- *   Yaniv Kamay  <yaniv at qumranet.com>
+- *
+- * This work is licensed under the terms of the GNU GPL, version 2.  See
+- * the COPYING file in the top-level directory.
+- *
+- */
+-
+-#include "kvm.h"
+-#include "x86_emulate.h"
+-#include "irq.h"
+-#include "vmx.h"
+-#include "segment_descriptor.h"
+-
+-#include <linux/module.h>
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/highmem.h>
+-#include <linux/sched.h>
+-
+-#include <asm/io.h>
+-#include <asm/desc.h>
+-
+-MODULE_AUTHOR("Qumranet");
+-MODULE_LICENSE("GPL");
+-
+-struct vmcs {
+-	u32 revision_id;
+-	u32 abort;
+-	char data[0];
+-};
+-
+-struct vcpu_vmx {
+-	struct kvm_vcpu       vcpu;
+-	int                   launched;
+-	u8                    fail;
+-	struct kvm_msr_entry *guest_msrs;
+-	struct kvm_msr_entry *host_msrs;
+-	int                   nmsrs;
+-	int                   save_nmsrs;
+-	int                   msr_offset_efer;
+-#ifdef CONFIG_X86_64
+-	int                   msr_offset_kernel_gs_base;
+-#endif
+-	struct vmcs          *vmcs;
+-	struct {
+-		int           loaded;
+-		u16           fs_sel, gs_sel, ldt_sel;
+-		int           gs_ldt_reload_needed;
+-		int           fs_reload_needed;
+-	}host_state;
+-
+-};
+-
+-static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
+-{
+-	return container_of(vcpu, struct vcpu_vmx, vcpu);
+-}
+-
+-static int init_rmode_tss(struct kvm *kvm);
+-
+-static DEFINE_PER_CPU(struct vmcs *, vmxarea);
+-static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
+-
+-static struct page *vmx_io_bitmap_a;
+-static struct page *vmx_io_bitmap_b;
+-
+-#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
+-
+-static struct vmcs_config {
+-	int size;
+-	int order;
+-	u32 revision_id;
+-	u32 pin_based_exec_ctrl;
+-	u32 cpu_based_exec_ctrl;
+-	u32 vmexit_ctrl;
+-	u32 vmentry_ctrl;
+-} vmcs_config;
+-
+-#define VMX_SEGMENT_FIELD(seg)					\
+-	[VCPU_SREG_##seg] = {                                   \
+-		.selector = GUEST_##seg##_SELECTOR,		\
+-		.base = GUEST_##seg##_BASE,		   	\
+-		.limit = GUEST_##seg##_LIMIT,		   	\
+-		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
+-	}
+-
+-static struct kvm_vmx_segment_field {
+-	unsigned selector;
+-	unsigned base;
+-	unsigned limit;
+-	unsigned ar_bytes;
+-} kvm_vmx_segment_fields[] = {
+-	VMX_SEGMENT_FIELD(CS),
+-	VMX_SEGMENT_FIELD(DS),
+-	VMX_SEGMENT_FIELD(ES),
+-	VMX_SEGMENT_FIELD(FS),
+-	VMX_SEGMENT_FIELD(GS),
+-	VMX_SEGMENT_FIELD(SS),
+-	VMX_SEGMENT_FIELD(TR),
+-	VMX_SEGMENT_FIELD(LDTR),
+-};
+-
+-/*
+- * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
+- * away by decrementing the array size.
+- */
+-static const u32 vmx_msr_index[] = {
+-#ifdef CONFIG_X86_64
+-	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
+-#endif
+-	MSR_EFER, MSR_K6_STAR,
+-};
+-#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
+-
+-static void load_msrs(struct kvm_msr_entry *e, int n)
+-{
+-	int i;
+-
+-	for (i = 0; i < n; ++i)
+-		wrmsrl(e[i].index, e[i].data);
+-}
+-
+-static void save_msrs(struct kvm_msr_entry *e, int n)
+-{
+-	int i;
+-
+-	for (i = 0; i < n; ++i)
+-		rdmsrl(e[i].index, e[i].data);
+-}
+-
+-static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
+-{
+-	return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
+-}
+-
+-static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
+-{
+-	int efer_offset = vmx->msr_offset_efer;
+-	return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
+-		msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
+-}
+-
+-static inline int is_page_fault(u32 intr_info)
+-{
+-	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+-			     INTR_INFO_VALID_MASK)) ==
+-		(INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
+-}
+-
+-static inline int is_no_device(u32 intr_info)
+-{
+-	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+-			     INTR_INFO_VALID_MASK)) ==
+-		(INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
+-}
+-
+-static inline int is_external_interrupt(u32 intr_info)
+-{
+-	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+-		== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
+-}
+-
+-static inline int cpu_has_vmx_tpr_shadow(void)
+-{
+-	return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
+-}
+-
+-static inline int vm_need_tpr_shadow(struct kvm *kvm)
+-{
+-	return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
+-}
+-
+-static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
+-{
+-	int i;
+-
+-	for (i = 0; i < vmx->nmsrs; ++i)
+-		if (vmx->guest_msrs[i].index == msr)
+-			return i;
+-	return -1;
+-}
+-
+-static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+-{
+-	int i;
+-
+-	i = __find_msr_index(vmx, msr);
+-	if (i >= 0)
+-		return &vmx->guest_msrs[i];
+-	return NULL;
+-}
+-
+-static void vmcs_clear(struct vmcs *vmcs)
+-{
+-	u64 phys_addr = __pa(vmcs);
+-	u8 error;
+-
+-	asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
+-		      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+-		      : "cc", "memory");
+-	if (error)
+-		printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
+-		       vmcs, phys_addr);
+-}
+-
+-static void __vcpu_clear(void *arg)
+-{
+-	struct vcpu_vmx *vmx = arg;
+-	int cpu = raw_smp_processor_id();
+-
+-	if (vmx->vcpu.cpu == cpu)
+-		vmcs_clear(vmx->vmcs);
+-	if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
+-		per_cpu(current_vmcs, cpu) = NULL;
+-	rdtscll(vmx->vcpu.host_tsc);
+-}
+-
+-static void vcpu_clear(struct vcpu_vmx *vmx)
+-{
+-	if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
+-		smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
+-					 vmx, 0, 1);
 -	else
--		parms.sigtype = HCALL_SIGT_BY_WQE;
-+	/* Always signal by WQE so we can hide circ. WQEs */
-+	parms.sigtype = HCALL_SIGT_BY_WQE;
- 
- 	/* UD_AV CIRCUMVENTION */
- 	max_send_sge = init_attr->cap.max_send_sge;
-@@ -618,6 +616,10 @@ static struct ehca_qp *internal_create_qp(
- 	parms.squeue.max_sge = max_send_sge;
- 	parms.rqueue.max_sge = max_recv_sge;
- 
-+	/* RC QPs need one more SWQE for unsolicited ack circumvention */
-+	if (qp_type == IB_QPT_RC)
-+		parms.squeue.max_wr++;
-+
- 	if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
- 		if (HAS_SQ(my_qp))
- 			ehca_determine_small_queue(
-@@ -650,6 +652,8 @@ static struct ehca_qp *internal_create_qp(
- 			parms.squeue.act_nr_sges = 1;
- 			parms.rqueue.act_nr_sges = 1;
- 		}
-+		/* hide the extra WQE */
-+		parms.squeue.act_nr_wqes--;
- 		break;
- 	case IB_QPT_UD:
- 	case IB_QPT_GSI:
-@@ -729,12 +733,31 @@ static struct ehca_qp *internal_create_qp(
- 	init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
- 	my_qp->init_attr = *init_attr;
- 
-+	if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
-+		shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
-+			&my_qp->ib_qp;
-+		if (ehca_nr_ports < 0) {
-+			/* alloc array to cache subsequent modify qp parms
-+			 * for autodetect mode
-+			 */
-+			my_qp->mod_qp_parm =
-+				kzalloc(EHCA_MOD_QP_PARM_MAX *
-+					sizeof(*my_qp->mod_qp_parm),
-+					GFP_KERNEL);
-+			if (!my_qp->mod_qp_parm) {
-+				ehca_err(pd->device,
-+					 "Could not alloc mod_qp_parm");
-+				goto create_qp_exit4;
-+			}
-+		}
-+	}
-+
- 	/* NOTE: define_apq0() not supported yet */
- 	if (qp_type == IB_QPT_GSI) {
- 		h_ret = ehca_define_sqp(shca, my_qp, init_attr);
- 		if (h_ret != H_SUCCESS) {
- 			ret = ehca2ib_return_code(h_ret);
--			goto create_qp_exit4;
-+			goto create_qp_exit5;
- 		}
- 	}
- 
-@@ -743,7 +766,7 @@ static struct ehca_qp *internal_create_qp(
- 		if (ret) {
- 			ehca_err(pd->device,
- 				 "Couldn't assign qp to send_cq ret=%i", ret);
--			goto create_qp_exit4;
-+			goto create_qp_exit5;
- 		}
- 	}
- 
-@@ -769,12 +792,18 @@ static struct ehca_qp *internal_create_qp(
- 		if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
- 			ehca_err(pd->device, "Copy to udata failed");
- 			ret = -EINVAL;
--			goto create_qp_exit4;
-+			goto create_qp_exit6;
- 		}
- 	}
- 
- 	return my_qp;
- 
-+create_qp_exit6:
-+	ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
-+
-+create_qp_exit5:
-+	kfree(my_qp->mod_qp_parm);
-+
- create_qp_exit4:
- 	if (HAS_RQ(my_qp))
- 		ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-@@ -858,7 +887,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
- 				update_mask,
- 				mqpcb, my_qp->galpas.kernel);
- 	if (hret != H_SUCCESS) {
--		ehca_err(pd->device, "Could not modify SRQ to INIT"
-+		ehca_err(pd->device, "Could not modify SRQ to INIT "
- 			 "ehca_qp=%p qp_num=%x h_ret=%li",
- 			 my_qp, my_qp->real_qp_num, hret);
- 		goto create_srq2;
-@@ -872,7 +901,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
- 				update_mask,
- 				mqpcb, my_qp->galpas.kernel);
- 	if (hret != H_SUCCESS) {
--		ehca_err(pd->device, "Could not enable SRQ"
-+		ehca_err(pd->device, "Could not enable SRQ "
- 			 "ehca_qp=%p qp_num=%x h_ret=%li",
- 			 my_qp, my_qp->real_qp_num, hret);
- 		goto create_srq2;
-@@ -886,7 +915,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
- 				update_mask,
- 				mqpcb, my_qp->galpas.kernel);
- 	if (hret != H_SUCCESS) {
--		ehca_err(pd->device, "Could not modify SRQ to RTR"
-+		ehca_err(pd->device, "Could not modify SRQ to RTR "
- 			 "ehca_qp=%p qp_num=%x h_ret=%li",
- 			 my_qp, my_qp->real_qp_num, hret);
- 		goto create_srq2;
-@@ -992,7 +1021,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
- 	unsigned long flags = 0;
- 
- 	/* do query_qp to obtain current attr values */
--	mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-+	mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
- 	if (!mqpcb) {
- 		ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
- 			 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
-@@ -1180,6 +1209,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
- 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
- 	}
- 	if (attr_mask & IB_QP_PORT) {
-+		struct ehca_sport *sport;
-+		struct ehca_qp *aqp1;
- 		if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
- 			ret = -EINVAL;
- 			ehca_err(ibqp->device, "Invalid port=%x. "
-@@ -1188,6 +1219,29 @@ static int internal_modify_qp(struct ib_qp *ibqp,
- 				 shca->num_ports);
- 			goto modify_qp_exit2;
- 		}
-+		sport = &shca->sport[attr->port_num - 1];
-+		if (!sport->ibqp_sqp[IB_QPT_GSI]) {
-+			/* should not occur */
-+			ret = -EFAULT;
-+			ehca_err(ibqp->device, "AQP1 was not created for "
-+				 "port=%x", attr->port_num);
-+			goto modify_qp_exit2;
-+		}
-+		aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
-+				    struct ehca_qp, ib_qp);
-+		if (ibqp->qp_type != IB_QPT_GSI &&
-+		    ibqp->qp_type != IB_QPT_SMI &&
-+		    aqp1->mod_qp_parm) {
-+			/*
-+			 * firmware will reject this modify_qp() because
-+			 * port is not activated/initialized fully
-+			 */
-+			ret = -EFAULT;
-+			ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
-+				  "either port is being activated (try again) "
-+				  "or cabling issue", attr->port_num);
-+			goto modify_qp_exit2;
-+		}
- 		mqpcb->prim_phys_port = attr->port_num;
- 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
- 	}
-@@ -1244,6 +1298,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
- 	}
- 
- 	if (attr_mask & IB_QP_PATH_MTU) {
-+		/* store ld(MTU) */
-+		my_qp->mtu_shift = attr->path_mtu + 7;
- 		mqpcb->path_mtu = attr->path_mtu;
- 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
- 	}
-@@ -1467,6 +1523,8 @@ modify_qp_exit1:
- int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
- 		   struct ib_udata *udata)
- {
-+	struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
-+					      ib_device);
- 	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- 	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
- 					     ib_pd);
-@@ -1479,9 +1537,100 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
- 		return -EINVAL;
- 	}
- 
-+	/* The if-block below caches qp_attr to be modified for GSI and SMI
-+	 * qps during the initialization by ib_mad. When the respective port
-+	 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
-+	 * cached modify calls sequence, see ehca_recover_sqs() below.
-+	 * Why that is required:
-+	 * 1) If one port is connected, older code requires that port one
-+	 *    to be connected and module option nr_ports=1 to be given by
-+	 *    user, which is very inconvenient for end user.
-+	 * 2) Firmware accepts modify_qp() only if respective port has become
-+	 *    active. Older code had a wait loop of 30sec create_qp()/
-+	 *    define_aqp1(), which is not appropriate in practice. This
-+	 *    code now removes that wait loop, see define_aqp1(), and always
-+	 *    reports all ports to ib_mad resp. users. Only activated ports
-+	 *    will then usable for the users.
-+	 */
-+	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
-+		int port = my_qp->init_attr.port_num;
-+		struct ehca_sport *sport = &shca->sport[port - 1];
-+		unsigned long flags;
-+		spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-+		/* cache qp_attr only during init */
-+		if (my_qp->mod_qp_parm) {
-+			struct ehca_mod_qp_parm *p;
-+			if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
-+				ehca_err(&shca->ib_device,
-+					 "mod_qp_parm overflow state=%x port=%x"
-+					 " type=%x", attr->qp_state,
-+					 my_qp->init_attr.port_num,
-+					 ibqp->qp_type);
-+				spin_unlock_irqrestore(&sport->mod_sqp_lock,
-+						       flags);
-+				return -EINVAL;
-+			}
-+			p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
-+			p->mask = attr_mask;
-+			p->attr = *attr;
-+			my_qp->mod_qp_parm_idx++;
-+			ehca_dbg(&shca->ib_device,
-+				 "Saved qp_attr for state=%x port=%x type=%x",
-+				 attr->qp_state, my_qp->init_attr.port_num,
-+				 ibqp->qp_type);
-+			spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-+			return 0;
-+		}
-+		spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-+	}
-+
- 	return internal_modify_qp(ibqp, attr, attr_mask, 0);
- }
- 
-+void ehca_recover_sqp(struct ib_qp *sqp)
-+{
-+	struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
-+	int port = my_sqp->init_attr.port_num;
-+	struct ib_qp_attr attr;
-+	struct ehca_mod_qp_parm *qp_parm;
-+	int i, qp_parm_idx, ret;
-+	unsigned long flags, wr_cnt;
-+
-+	if (!my_sqp->mod_qp_parm)
-+		return;
-+	ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
-+
-+	qp_parm = my_sqp->mod_qp_parm;
-+	qp_parm_idx = my_sqp->mod_qp_parm_idx;
-+	for (i = 0; i < qp_parm_idx; i++) {
-+		attr = qp_parm[i].attr;
-+		ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
-+		if (ret) {
-+			ehca_err(sqp->device, "Could not modify SQP port=%x "
-+				 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
-+			goto free_qp_parm;
-+		}
-+		ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
-+			 port, sqp->qp_num, attr.qp_state);
-+	}
-+
-+	/* re-trigger posted recv wrs */
-+	wr_cnt =  my_sqp->ipz_rqueue.current_q_offset /
-+		my_sqp->ipz_rqueue.qe_size;
-+	if (wr_cnt) {
-+		spin_lock_irqsave(&my_sqp->spinlock_r, flags);
-+		hipz_update_rqa(my_sqp, wr_cnt);
-+		spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
-+		ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
-+			 port, sqp->qp_num, wr_cnt);
-+	}
-+
-+free_qp_parm:
-+	kfree(qp_parm);
-+	/* this prevents subsequent calls to modify_qp() to cache qp_attr */
-+	my_sqp->mod_qp_parm = NULL;
-+}
-+
- int ehca_query_qp(struct ib_qp *qp,
- 		  struct ib_qp_attr *qp_attr,
- 		  int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
-@@ -1769,6 +1918,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
- 	struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
- 	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
- 					     ib_pd);
-+	struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
- 	u32 cur_pid = current->tgid;
- 	u32 qp_num = my_qp->real_qp_num;
- 	int ret;
-@@ -1815,6 +1965,14 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
- 	port_num = my_qp->init_attr.port_num;
- 	qp_type  = my_qp->init_attr.qp_type;
- 
-+	if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
-+		spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-+		kfree(my_qp->mod_qp_parm);
-+		my_qp->mod_qp_parm = NULL;
-+		shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
-+		spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-+	}
-+
- 	/* no support for IB_QPT_SMI yet */
- 	if (qp_type == IB_QPT_GSI) {
- 		struct ib_event event;
-diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
-index ea91360..3aacc8c 100644
---- a/drivers/infiniband/hw/ehca/ehca_reqs.c
-+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
-@@ -50,6 +50,9 @@
- #include "hcp_if.h"
- #include "hipz_fns.h"
- 
-+/* in RC traffic, insert an empty RDMA READ every this many packets */
-+#define ACK_CIRC_THRESHOLD 2000000
-+
- static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
- 				  struct ehca_wqe *wqe_p,
- 				  struct ib_recv_wr *recv_wr)
-@@ -81,7 +84,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
- 	if (ehca_debug_level) {
- 		ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
- 			     ipz_rqueue);
--		ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
-+		ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
- 	}
- 
- 	return 0;
-@@ -135,7 +138,8 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
- 
- static inline int ehca_write_swqe(struct ehca_qp *qp,
- 				  struct ehca_wqe *wqe_p,
--				  const struct ib_send_wr *send_wr)
-+				  const struct ib_send_wr *send_wr,
-+				  int hidden)
- {
- 	u32 idx;
- 	u64 dma_length;
-@@ -176,7 +180,9 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
- 
- 	wqe_p->wr_flag = 0;
- 
--	if (send_wr->send_flags & IB_SEND_SIGNALED)
-+	if ((send_wr->send_flags & IB_SEND_SIGNALED ||
-+	    qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
-+	    && !hidden)
- 		wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
- 
- 	if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
-@@ -199,7 +205,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
- 
- 		wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
- 		wqe_p->local_ee_context_qkey = remote_qkey;
--		if (!send_wr->wr.ud.ah) {
-+		if (unlikely(!send_wr->wr.ud.ah)) {
- 			ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
- 			return -EINVAL;
- 		}
-@@ -255,6 +261,15 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
- 		} /* eof idx */
- 		wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
- 
-+		/* unsolicited ack circumvention */
-+		if (send_wr->opcode == IB_WR_RDMA_READ) {
-+			/* on RDMA read, switch on and reset counters */
-+			qp->message_count = qp->packet_count = 0;
-+			qp->unsol_ack_circ = 1;
-+		} else
-+			/* else estimate #packets */
-+			qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
-+
- 		break;
- 
- 	default:
-@@ -355,13 +370,49 @@ static inline void map_ib_wc_status(u32 cqe_status,
- 		*wc_status = IB_WC_SUCCESS;
- }
- 
-+static inline int post_one_send(struct ehca_qp *my_qp,
-+			 struct ib_send_wr *cur_send_wr,
-+			 struct ib_send_wr **bad_send_wr,
-+			 int hidden)
-+{
-+	struct ehca_wqe *wqe_p;
-+	int ret;
-+	u64 start_offset = my_qp->ipz_squeue.current_q_offset;
-+
-+	/* get pointer next to free WQE */
-+	wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
-+	if (unlikely(!wqe_p)) {
-+		/* too many posted work requests: queue overflow */
-+		if (bad_send_wr)
-+			*bad_send_wr = cur_send_wr;
-+		ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
-+			 "qp_num=%x", my_qp->ib_qp.qp_num);
-+		return -ENOMEM;
-+	}
-+	/* write a SEND WQE into the QUEUE */
-+	ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
-+	/*
-+	 * if something failed,
-+	 * reset the free entry pointer to the start value
-+	 */
-+	if (unlikely(ret)) {
-+		my_qp->ipz_squeue.current_q_offset = start_offset;
-+		if (bad_send_wr)
-+			*bad_send_wr = cur_send_wr;
-+		ehca_err(my_qp->ib_qp.device, "Could not write WQE "
-+			 "qp_num=%x", my_qp->ib_qp.qp_num);
-+		return -EINVAL;
-+	}
-+
-+	return 0;
-+}
-+
- int ehca_post_send(struct ib_qp *qp,
- 		   struct ib_send_wr *send_wr,
- 		   struct ib_send_wr **bad_send_wr)
- {
- 	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
- 	struct ib_send_wr *cur_send_wr;
--	struct ehca_wqe *wqe_p;
- 	int wqe_cnt = 0;
- 	int ret = 0;
- 	unsigned long flags;
-@@ -369,37 +420,33 @@ int ehca_post_send(struct ib_qp *qp,
- 	/* LOCK the QUEUE */
- 	spin_lock_irqsave(&my_qp->spinlock_s, flags);
- 
-+	/* Send an empty extra RDMA read if:
-+	 *  1) there has been an RDMA read on this connection before
-+	 *  2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
-+	 *  3) we can be sure that any previous extra RDMA read has been
-+	 *     processed so we don't overflow the SQ
-+	 */
-+	if (unlikely(my_qp->unsol_ack_circ &&
-+		     my_qp->packet_count > ACK_CIRC_THRESHOLD &&
-+		     my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
-+		/* insert an empty RDMA READ to fix up the remote QP state */
-+		struct ib_send_wr circ_wr;
-+		memset(&circ_wr, 0, sizeof(circ_wr));
-+		circ_wr.opcode = IB_WR_RDMA_READ;
-+		post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
-+		wqe_cnt++;
-+		ehca_dbg(qp->device, "posted circ wr  qp_num=%x", qp->qp_num);
-+		my_qp->message_count = my_qp->packet_count = 0;
-+	}
-+
- 	/* loop processes list of send reqs */
- 	for (cur_send_wr = send_wr; cur_send_wr != NULL;
- 	     cur_send_wr = cur_send_wr->next) {
--		u64 start_offset = my_qp->ipz_squeue.current_q_offset;
--		/* get pointer next to free WQE */
--		wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
--		if (unlikely(!wqe_p)) {
--			/* too many posted work requests: queue overflow */
--			if (bad_send_wr)
--				*bad_send_wr = cur_send_wr;
--			if (wqe_cnt == 0) {
--				ret = -ENOMEM;
--				ehca_err(qp->device, "Too many posted WQEs "
--					 "qp_num=%x", qp->qp_num);
--			}
--			goto post_send_exit0;
--		}
--		/* write a SEND WQE into the QUEUE */
--		ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
+-		__vcpu_clear(vmx);
+-	vmx->launched = 0;
+-}
+-
+-static unsigned long vmcs_readl(unsigned long field)
+-{
+-	unsigned long value;
+-
+-	asm volatile (ASM_VMX_VMREAD_RDX_RAX
+-		      : "=a"(value) : "d"(field) : "cc");
+-	return value;
+-}
+-
+-static u16 vmcs_read16(unsigned long field)
+-{
+-	return vmcs_readl(field);
+-}
+-
+-static u32 vmcs_read32(unsigned long field)
+-{
+-	return vmcs_readl(field);
+-}
+-
+-static u64 vmcs_read64(unsigned long field)
+-{
+-#ifdef CONFIG_X86_64
+-	return vmcs_readl(field);
+-#else
+-	return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
+-#endif
+-}
+-
+-static noinline void vmwrite_error(unsigned long field, unsigned long value)
+-{
+-	printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
+-	       field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
+-	dump_stack();
+-}
+-
+-static void vmcs_writel(unsigned long field, unsigned long value)
+-{
+-	u8 error;
+-
+-	asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
+-		       : "=q"(error) : "a"(value), "d"(field) : "cc" );
+-	if (unlikely(error))
+-		vmwrite_error(field, value);
+-}
+-
+-static void vmcs_write16(unsigned long field, u16 value)
+-{
+-	vmcs_writel(field, value);
+-}
+-
+-static void vmcs_write32(unsigned long field, u32 value)
+-{
+-	vmcs_writel(field, value);
+-}
+-
+-static void vmcs_write64(unsigned long field, u64 value)
+-{
+-#ifdef CONFIG_X86_64
+-	vmcs_writel(field, value);
+-#else
+-	vmcs_writel(field, value);
+-	asm volatile ("");
+-	vmcs_writel(field+1, value >> 32);
+-#endif
+-}
+-
+-static void vmcs_clear_bits(unsigned long field, u32 mask)
+-{
+-	vmcs_writel(field, vmcs_readl(field) & ~mask);
+-}
+-
+-static void vmcs_set_bits(unsigned long field, u32 mask)
+-{
+-	vmcs_writel(field, vmcs_readl(field) | mask);
+-}
+-
+-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+-{
+-	u32 eb;
+-
+-	eb = 1u << PF_VECTOR;
+-	if (!vcpu->fpu_active)
+-		eb |= 1u << NM_VECTOR;
+-	if (vcpu->guest_debug.enabled)
+-		eb |= 1u << 1;
+-	if (vcpu->rmode.active)
+-		eb = ~0;
+-	vmcs_write32(EXCEPTION_BITMAP, eb);
+-}
+-
+-static void reload_tss(void)
+-{
+-#ifndef CONFIG_X86_64
+-
+-	/*
+-	 * VT restores TR but not its size.  Useless.
+-	 */
+-	struct descriptor_table gdt;
+-	struct segment_descriptor *descs;
+-
+-	get_gdt(&gdt);
+-	descs = (void *)gdt.base;
+-	descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
+-	load_TR_desc();
+-#endif
+-}
+-
+-static void load_transition_efer(struct vcpu_vmx *vmx)
+-{
+-	u64 trans_efer;
+-	int efer_offset = vmx->msr_offset_efer;
+-
+-	trans_efer = vmx->host_msrs[efer_offset].data;
+-	trans_efer &= ~EFER_SAVE_RESTORE_BITS;
+-	trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
+-	wrmsrl(MSR_EFER, trans_efer);
+-	vmx->vcpu.stat.efer_reload++;
+-}
+-
+-static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+-	if (vmx->host_state.loaded)
+-		return;
+-
+-	vmx->host_state.loaded = 1;
+-	/*
+-	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
+-	 * allow segment selectors with cpl > 0 or ti == 1.
+-	 */
+-	vmx->host_state.ldt_sel = read_ldt();
+-	vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
+-	vmx->host_state.fs_sel = read_fs();
+-	if (!(vmx->host_state.fs_sel & 7)) {
+-		vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
+-		vmx->host_state.fs_reload_needed = 0;
+-	} else {
+-		vmcs_write16(HOST_FS_SELECTOR, 0);
+-		vmx->host_state.fs_reload_needed = 1;
+-	}
+-	vmx->host_state.gs_sel = read_gs();
+-	if (!(vmx->host_state.gs_sel & 7))
+-		vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
+-	else {
+-		vmcs_write16(HOST_GS_SELECTOR, 0);
+-		vmx->host_state.gs_ldt_reload_needed = 1;
+-	}
+-
+-#ifdef CONFIG_X86_64
+-	vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
+-	vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
+-#else
+-	vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
+-	vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
+-#endif
+-
+-#ifdef CONFIG_X86_64
+-	if (is_long_mode(&vmx->vcpu)) {
+-		save_msrs(vmx->host_msrs +
+-			  vmx->msr_offset_kernel_gs_base, 1);
+-	}
+-#endif
+-	load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+-	if (msr_efer_need_save_restore(vmx))
+-		load_transition_efer(vmx);
+-}
+-
+-static void vmx_load_host_state(struct vcpu_vmx *vmx)
+-{
+-	unsigned long flags;
+-
+-	if (!vmx->host_state.loaded)
+-		return;
+-
+-	vmx->host_state.loaded = 0;
+-	if (vmx->host_state.fs_reload_needed)
+-		load_fs(vmx->host_state.fs_sel);
+-	if (vmx->host_state.gs_ldt_reload_needed) {
+-		load_ldt(vmx->host_state.ldt_sel);
 -		/*
--		 * if something failed,
--		 * reset the free entry pointer to the start value
+-		 * If we have to reload gs, we must take care to
+-		 * preserve our gs base.
 -		 */
-+		ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
- 		if (unlikely(ret)) {
--			my_qp->ipz_squeue.current_q_offset = start_offset;
--			*bad_send_wr = cur_send_wr;
--			if (wqe_cnt == 0) {
--				ret = -EINVAL;
--				ehca_err(qp->device, "Could not write WQE "
--					 "qp_num=%x", qp->qp_num);
--			}
-+			/* if one or more WQEs were successful, don't fail */
-+			if (wqe_cnt)
-+				ret = 0;
- 			goto post_send_exit0;
- 		}
- 		wqe_cnt++;
-@@ -410,6 +457,7 @@ int ehca_post_send(struct ib_qp *qp,
- post_send_exit0:
- 	iosync(); /* serialize GAL register access */
- 	hipz_update_sqa(my_qp, wqe_cnt);
-+	my_qp->message_count += wqe_cnt;
- 	spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
- 	return ret;
- }
-diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
-index f0792e5..79e72b2 100644
---- a/drivers/infiniband/hw/ehca/ehca_sqp.c
-+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
-@@ -40,11 +40,8 @@
-  */
- 
- 
--#include <linux/module.h>
--#include <linux/err.h>
- #include "ehca_classes.h"
- #include "ehca_tools.h"
--#include "ehca_qes.h"
- #include "ehca_iverbs.h"
- #include "hcp_if.h"
- 
-@@ -93,6 +90,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
- 		return H_PARAMETER;
- 	}
- 
-+	if (ehca_nr_ports < 0) /* autodetect mode */
-+		return H_SUCCESS;
-+
- 	for (counter = 0;
- 	     shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
- 		     counter < ehca_port_act_time;
-diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
-index 851df8a..4146210 100644
---- a/drivers/infiniband/hw/ipath/ipath_common.h
-+++ b/drivers/infiniband/hw/ipath/ipath_common.h
-@@ -82,6 +82,16 @@
- #define IPATH_IB_LINK_EXTERNAL	7 /* normal, disable local loopback */
- 
- /*
-+ * These 3 values (SDR and DDR may be ORed for auto-speed
-+ * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
-+ * with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs.  They
-+ * are also the the possible values for ipath_link_speed_enabled and active
-+ * The values were chosen to match values used within the IB spec.
-+ */
-+#define IPATH_IB_SDR 1
-+#define IPATH_IB_DDR 2
-+
-+/*
-  * stats maintained by the driver.  For now, at least, this is global
-  * to all minor devices.
-  */
-@@ -433,8 +443,9 @@ struct ipath_user_info {
- #define IPATH_CMD_UNUSED_2	26
- #define IPATH_CMD_PIOAVAILUPD	27	/* force an update of PIOAvail reg */
- #define IPATH_CMD_POLL_TYPE	28	/* set the kind of polling we want */
-+#define IPATH_CMD_ARMLAUNCH_CTRL	29 /* armlaunch detection control */
- 
--#define IPATH_CMD_MAX		28
-+#define IPATH_CMD_MAX		29
- 
- /*
-  * Poll types
-@@ -477,6 +488,8 @@ struct ipath_cmd {
- 		__u64 port_info;
- 		/* enable/disable receipt of packets */
- 		__u32 recv_ctrl;
-+		/* enable/disable armlaunch errors (non-zero to enable) */
-+		__u32 armlaunch_ctrl;
- 		/* partition key to set */
- 		__u16 part_key;
- 		/* user address of __u32 bitmask of active slaves */
-@@ -579,7 +592,7 @@ struct ipath_flash {
- struct infinipath_counters {
- 	__u64 LBIntCnt;
- 	__u64 LBFlowStallCnt;
--	__u64 Reserved1;
-+	__u64 TxSDmaDescCnt;	/* was Reserved1 */
- 	__u64 TxUnsupVLErrCnt;
- 	__u64 TxDataPktCnt;
- 	__u64 TxFlowPktCnt;
-@@ -615,12 +628,26 @@ struct infinipath_counters {
- 	__u64 RxP6HdrEgrOvflCnt;
- 	__u64 RxP7HdrEgrOvflCnt;
- 	__u64 RxP8HdrEgrOvflCnt;
--	__u64 Reserved6;
--	__u64 Reserved7;
-+	__u64 RxP9HdrEgrOvflCnt;	/* was Reserved6 */
-+	__u64 RxP10HdrEgrOvflCnt;	/* was Reserved7 */
-+	__u64 RxP11HdrEgrOvflCnt;	/* new for IBA7220 */
-+	__u64 RxP12HdrEgrOvflCnt;	/* new for IBA7220 */
-+	__u64 RxP13HdrEgrOvflCnt;	/* new for IBA7220 */
-+	__u64 RxP14HdrEgrOvflCnt;	/* new for IBA7220 */
-+	__u64 RxP15HdrEgrOvflCnt;	/* new for IBA7220 */
-+	__u64 RxP16HdrEgrOvflCnt;	/* new for IBA7220 */
- 	__u64 IBStatusChangeCnt;
- 	__u64 IBLinkErrRecoveryCnt;
- 	__u64 IBLinkDownedCnt;
- 	__u64 IBSymbolErrCnt;
-+	/* The following are new for IBA7220 */
-+	__u64 RxVL15DroppedPktCnt;
-+	__u64 RxOtherLocalPhyErrCnt;
-+	__u64 PcieRetryBufDiagQwordCnt;
-+	__u64 ExcessBufferOvflCnt;
-+	__u64 LocalLinkIntegrityErrCnt;
-+	__u64 RxVlErrCnt;
-+	__u64 RxDlidFltrCnt;
- };
- 
- /*
-diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
-index d1380c7..a03bd28 100644
---- a/drivers/infiniband/hw/ipath/ipath_cq.c
-+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
-@@ -421,7 +421,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
- 	else
- 		n = head - tail;
- 	if (unlikely((u32)cqe < n)) {
--		ret = -EOVERFLOW;
-+		ret = -EINVAL;
- 		goto bail_unlock;
- 	}
- 	for (n = 0; tail != head; n++) {
-diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
-index 19c56e6..d6f6953 100644
---- a/drivers/infiniband/hw/ipath/ipath_debug.h
-+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
-@@ -55,7 +55,7 @@
- #define __IPATH_PKTDBG      0x80	/* print packet data */
- /* print process startup (init)/exit messages */
- #define __IPATH_PROCDBG     0x100
--/* print mmap/nopage stuff, not using VDBG any more */
-+/* print mmap/fault stuff, not using VDBG any more */
- #define __IPATH_MMDBG       0x200
- #define __IPATH_ERRPKTDBG   0x400
- #define __IPATH_USER_SEND   0x1000	/* use user mode send */
-@@ -81,7 +81,7 @@
- #define __IPATH_VERBDBG   0x0	/* very verbose debug */
- #define __IPATH_PKTDBG    0x0	/* print packet data */
- #define __IPATH_PROCDBG   0x0	/* process startup (init)/exit messages */
--/* print mmap/nopage stuff, not using VDBG any more */
-+/* print mmap/fault stuff, not using VDBG any more */
- #define __IPATH_MMDBG     0x0
- #define __IPATH_EPKTDBG   0x0	/* print ethernet packet data */
- #define __IPATH_IPATHDBG  0x0	/* Ethernet (IPATH) table dump on */
-diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
-index 1f152de..d5ff6ca 100644
---- a/drivers/infiniband/hw/ipath/ipath_driver.c
-+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
-@@ -121,6 +121,9 @@ static struct pci_driver ipath_driver = {
- 	.probe = ipath_init_one,
- 	.remove = __devexit_p(ipath_remove_one),
- 	.id_table = ipath_pci_tbl,
-+	.driver = {
-+		.groups = ipath_driver_attr_groups,
-+	},
- };
- 
- static void ipath_check_status(struct work_struct *work)
-@@ -331,6 +334,8 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
- 		udelay(1);
- 	}
- 
-+	ipath_disable_armlaunch(dd);
-+
- 	writeq(0, piobuf); /* length 0, no dwords actually sent */
- 	ipath_flush_wc();
- 
-@@ -362,6 +367,7 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
- done:
- 	/* disarm piobuf, so it's available again */
- 	ipath_disarm_piobufs(dd, pbnum, 1);
-+	ipath_enable_armlaunch(dd);
- }
- 
- static int __devinit ipath_init_one(struct pci_dev *pdev,
-@@ -800,31 +806,37 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
- 			  unsigned cnt)
- {
- 	unsigned i, last = first + cnt;
--	u64 sendctrl, sendorig;
-+	unsigned long flags;
- 
- 	ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
--	sendorig = dd->ipath_sendctrl;
- 	for (i = first; i < last; i++) {
--		sendctrl = sendorig  | INFINIPATH_S_DISARM |
--			(i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
-+		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-+		/*
-+		 * The disarm-related bits are write-only, so it
-+		 * is ok to OR them in with our copy of sendctrl
-+		 * while we hold the lock.
-+		 */
- 		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
--				 sendctrl);
-+			dd->ipath_sendctrl | INFINIPATH_S_DISARM |
-+			(i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
-+		/* can't disarm bufs back-to-back per iba7220 spec */
-+		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- 	}
- 
- 	/*
--	 * Write it again with current value, in case ipath_sendctrl changed
--	 * while we were looping; no critical bits that would require
--	 * locking.
--	 *
--	 * disable PIOAVAILUPD, then re-enable, reading scratch in
-+	 * Disable PIOAVAILUPD, then re-enable, reading scratch in
- 	 * between.  This seems to avoid a chip timing race that causes
--	 * pioavail updates to memory to stop.
-+	 * pioavail updates to memory to stop.  We xor as we don't
-+	 * know the state of the bit when we're called.
- 	 */
-+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
--			 sendorig & ~INFINIPATH_S_PIOBUFAVAILUPD);
--	sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+		dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
-+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- 			 dd->ipath_sendctrl);
-+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- }
- 
- /**
-@@ -1000,12 +1012,10 @@ static void get_rhf_errstring(u32 err, char *msg, size_t len)
-  * ipath_get_egrbuf - get an eager buffer
-  * @dd: the infinipath device
-  * @bufnum: the eager buffer to get
-- * @err: unused
-  *
-  * must only be called if ipath_pd[port] is known to be allocated
-  */
--static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
--				     int err)
-+static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
- {
- 	return dd->ipath_port0_skbinfo ?
- 		(void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
-@@ -1097,13 +1107,14 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
- 
- /*
-  * ipath_kreceive - receive a packet
-- * @dd: the infinipath device
-+ * @pd: the infinipath port
-  *
-  * called from interrupt handler for errors or receive interrupt
-  */
--void ipath_kreceive(struct ipath_devdata *dd)
-+void ipath_kreceive(struct ipath_portdata *pd)
- {
- 	u64 *rc;
-+	struct ipath_devdata *dd = pd->port_dd;
- 	void *ebuf;
- 	const u32 rsize = dd->ipath_rcvhdrentsize;	/* words */
- 	const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;	/* words */
-@@ -1118,8 +1129,8 @@ void ipath_kreceive(struct ipath_devdata *dd)
- 		goto bail;
- 	}
- 
--	l = dd->ipath_port0head;
--	hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
-+	l = pd->port_head;
-+	hdrqtail = ipath_get_rcvhdrtail(pd);
- 	if (l == hdrqtail)
- 		goto bail;
- 
-@@ -1128,7 +1139,7 @@ reloop:
- 		u32 qp;
- 		u8 *bthbytes;
- 
--		rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
-+		rc = (u64 *) (pd->port_rcvhdrq + (l << 2));
- 		hdr = (struct ipath_message_header *)&rc[1];
- 		/*
- 		 * could make a network order version of IPATH_KD_QP, and
-@@ -1153,7 +1164,7 @@ reloop:
- 			etail = ipath_hdrget_index((__le32 *) rc);
- 			if (tlen > sizeof(*hdr) ||
- 			    etype == RCVHQ_RCV_TYPE_NON_KD)
--				ebuf = ipath_get_egrbuf(dd, etail, 0);
-+				ebuf = ipath_get_egrbuf(dd, etail);
- 		}
- 
- 		/*
-@@ -1188,7 +1199,7 @@ reloop:
- 				  be32_to_cpu(hdr->bth[0]) & 0xff);
- 		else {
- 			/*
--			 * error packet, type of error	unknown.
-+			 * error packet, type of error unknown.
- 			 * Probably type 3, but we don't know, so don't
- 			 * even try to print the opcode, etc.
- 			 */
-@@ -1238,7 +1249,7 @@ reloop:
- 		 * earlier packets, we "almost" guarantee we have covered
- 		 * that case.
- 		 */
--		u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
-+		u32 hqtail = ipath_get_rcvhdrtail(pd);
- 		if (hqtail != hdrqtail) {
- 			hdrqtail = hqtail;
- 			reloop = 1; /* loop 1 extra time at most */
-@@ -1248,7 +1259,7 @@ reloop:
- 
- 	pkttot += i;
- 
--	dd->ipath_port0head = l;
-+	pd->port_head = l;
- 
- 	if (pkttot > ipath_stats.sps_maxpkts_call)
- 		ipath_stats.sps_maxpkts_call = pkttot;
-@@ -1332,14 +1343,9 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
- 		/*
- 		 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
- 		 */
--		if (i > 3) {
--			if (i & 1)
--				piov = le64_to_cpu(
--					dd->ipath_pioavailregs_dma[i - 1]);
--			else
--				piov = le64_to_cpu(
--					dd->ipath_pioavailregs_dma[i + 1]);
--		} else
-+		if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-+			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
-+		else
- 			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
- 		pchg = _IPATH_ALL_CHECKBITS &
- 			~(dd->ipath_pioavailshadow[i] ^ piov);
-@@ -1598,7 +1604,8 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
- 
- 	/* clear for security and sanity on each use */
- 	memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
--	memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
-+	if (pd->port_rcvhdrtail_kvaddr)
-+		memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
- 
- 	/*
- 	 * tell chip each time we init it, even if we are re-using previous
-@@ -1614,77 +1621,6 @@ bail:
- 	return ret;
- }
- 
--int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
--			   u64 bits_to_wait_for, u64 * valp)
+-		local_irq_save(flags);
+-		load_gs(vmx->host_state.gs_sel);
+-#ifdef CONFIG_X86_64
+-		wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+-#endif
+-		local_irq_restore(flags);
+-	}
+-	reload_tss();
+-	save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+-	load_msrs(vmx->host_msrs, vmx->save_nmsrs);
+-	if (msr_efer_need_save_restore(vmx))
+-		load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
+-}
+-
+-/*
+- * Switches to specified vcpu, until a matching vcpu_put(), but assumes
+- * vcpu mutex is already taken.
+- */
+-static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+-{
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-	u64 phys_addr = __pa(vmx->vmcs);
+-	u64 tsc_this, delta;
+-
+-	if (vcpu->cpu != cpu) {
+-		vcpu_clear(vmx);
+-		kvm_migrate_apic_timer(vcpu);
+-	}
+-
+-	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
+-		u8 error;
+-
+-		per_cpu(current_vmcs, cpu) = vmx->vmcs;
+-		asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
+-			      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+-			      : "cc");
+-		if (error)
+-			printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
+-			       vmx->vmcs, phys_addr);
+-	}
+-
+-	if (vcpu->cpu != cpu) {
+-		struct descriptor_table dt;
+-		unsigned long sysenter_esp;
+-
+-		vcpu->cpu = cpu;
+-		/*
+-		 * Linux uses per-cpu TSS and GDT, so set these when switching
+-		 * processors.
+-		 */
+-		vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
+-		get_gdt(&dt);
+-		vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
+-
+-		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
+-		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+-
+-		/*
+-		 * Make sure the time stamp counter is monotonous.
+-		 */
+-		rdtscll(tsc_this);
+-		delta = vcpu->host_tsc - tsc_this;
+-		vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
+-	}
+-}
+-
+-static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
+-{
+-	vmx_load_host_state(to_vmx(vcpu));
+-	kvm_put_guest_fpu(vcpu);
+-}
+-
+-static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
+-{
+-	if (vcpu->fpu_active)
+-		return;
+-	vcpu->fpu_active = 1;
+-	vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
+-	if (vcpu->cr0 & X86_CR0_TS)
+-		vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
+-	update_exception_bitmap(vcpu);
+-}
+-
+-static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
+-{
+-	if (!vcpu->fpu_active)
+-		return;
+-	vcpu->fpu_active = 0;
+-	vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
+-	update_exception_bitmap(vcpu);
+-}
+-
+-static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
+-{
+-	vcpu_clear(to_vmx(vcpu));
+-}
+-
+-static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
+-{
+-	return vmcs_readl(GUEST_RFLAGS);
+-}
+-
+-static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+-{
+-	if (vcpu->rmode.active)
+-		rflags |= IOPL_MASK | X86_EFLAGS_VM;
+-	vmcs_writel(GUEST_RFLAGS, rflags);
+-}
+-
+-static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+-{
+-	unsigned long rip;
+-	u32 interruptibility;
+-
+-	rip = vmcs_readl(GUEST_RIP);
+-	rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+-	vmcs_writel(GUEST_RIP, rip);
+-
+-	/*
+-	 * We emulated an instruction, so temporary interrupt blocking
+-	 * should be removed, if set.
+-	 */
+-	interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+-	if (interruptibility & 3)
+-		vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
+-			     interruptibility & ~3);
+-	vcpu->interrupt_window_open = 1;
+-}
+-
+-static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
+-{
+-	printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
+-	       vmcs_readl(GUEST_RIP));
+-	vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+-	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+-		     GP_VECTOR |
+-		     INTR_TYPE_EXCEPTION |
+-		     INTR_INFO_DELIEVER_CODE_MASK |
+-		     INTR_INFO_VALID_MASK);
+-}
+-
+-/*
+- * Swap MSR entry in host/guest MSR entry array.
+- */
+-#ifdef CONFIG_X86_64
+-static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
+-{
+-	struct kvm_msr_entry tmp;
+-
+-	tmp = vmx->guest_msrs[to];
+-	vmx->guest_msrs[to] = vmx->guest_msrs[from];
+-	vmx->guest_msrs[from] = tmp;
+-	tmp = vmx->host_msrs[to];
+-	vmx->host_msrs[to] = vmx->host_msrs[from];
+-	vmx->host_msrs[from] = tmp;
+-}
+-#endif
+-
+-/*
+- * Set up the vmcs to automatically save and restore system
+- * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
+- * mode, as fiddling with msrs is very expensive.
+- */
+-static void setup_msrs(struct vcpu_vmx *vmx)
+-{
+-	int save_nmsrs;
+-
+-	save_nmsrs = 0;
+-#ifdef CONFIG_X86_64
+-	if (is_long_mode(&vmx->vcpu)) {
+-		int index;
+-
+-		index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
+-		if (index >= 0)
+-			move_msr_up(vmx, index, save_nmsrs++);
+-		index = __find_msr_index(vmx, MSR_LSTAR);
+-		if (index >= 0)
+-			move_msr_up(vmx, index, save_nmsrs++);
+-		index = __find_msr_index(vmx, MSR_CSTAR);
+-		if (index >= 0)
+-			move_msr_up(vmx, index, save_nmsrs++);
+-		index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
+-		if (index >= 0)
+-			move_msr_up(vmx, index, save_nmsrs++);
+-		/*
+-		 * MSR_K6_STAR is only needed on long mode guests, and only
+-		 * if efer.sce is enabled.
+-		 */
+-		index = __find_msr_index(vmx, MSR_K6_STAR);
+-		if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
+-			move_msr_up(vmx, index, save_nmsrs++);
+-	}
+-#endif
+-	vmx->save_nmsrs = save_nmsrs;
+-
+-#ifdef CONFIG_X86_64
+-	vmx->msr_offset_kernel_gs_base =
+-		__find_msr_index(vmx, MSR_KERNEL_GS_BASE);
+-#endif
+-	vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
+-}
+-
+-/*
+- * reads and returns guest's timestamp counter "register"
+- * guest_tsc = host_tsc + tsc_offset    -- 21.3
+- */
+-static u64 guest_read_tsc(void)
+-{
+-	u64 host_tsc, tsc_offset;
+-
+-	rdtscll(host_tsc);
+-	tsc_offset = vmcs_read64(TSC_OFFSET);
+-	return host_tsc + tsc_offset;
+-}
+-
+-/*
+- * writes 'guest_tsc' into guest's timestamp counter "register"
+- * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
+- */
+-static void guest_write_tsc(u64 guest_tsc)
+-{
+-	u64 host_tsc;
+-
+-	rdtscll(host_tsc);
+-	vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
+-}
+-
+-/*
+- * Reads an msr value (of 'msr_index') into 'pdata'.
+- * Returns 0 on success, non-0 otherwise.
+- * Assumes vcpu_load() was already called.
+- */
+-static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+-{
+-	u64 data;
+-	struct kvm_msr_entry *msr;
+-
+-	if (!pdata) {
+-		printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
+-		return -EINVAL;
+-	}
+-
+-	switch (msr_index) {
+-#ifdef CONFIG_X86_64
+-	case MSR_FS_BASE:
+-		data = vmcs_readl(GUEST_FS_BASE);
+-		break;
+-	case MSR_GS_BASE:
+-		data = vmcs_readl(GUEST_GS_BASE);
+-		break;
+-	case MSR_EFER:
+-		return kvm_get_msr_common(vcpu, msr_index, pdata);
+-#endif
+-	case MSR_IA32_TIME_STAMP_COUNTER:
+-		data = guest_read_tsc();
+-		break;
+-	case MSR_IA32_SYSENTER_CS:
+-		data = vmcs_read32(GUEST_SYSENTER_CS);
+-		break;
+-	case MSR_IA32_SYSENTER_EIP:
+-		data = vmcs_readl(GUEST_SYSENTER_EIP);
+-		break;
+-	case MSR_IA32_SYSENTER_ESP:
+-		data = vmcs_readl(GUEST_SYSENTER_ESP);
+-		break;
+-	default:
+-		msr = find_msr_entry(to_vmx(vcpu), msr_index);
+-		if (msr) {
+-			data = msr->data;
+-			break;
+-		}
+-		return kvm_get_msr_common(vcpu, msr_index, pdata);
+-	}
+-
+-	*pdata = data;
+-	return 0;
+-}
+-
+-/*
+- * Writes msr value into into the appropriate "register".
+- * Returns 0 on success, non-0 otherwise.
+- * Assumes vcpu_load() was already called.
+- */
+-static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+-{
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-	struct kvm_msr_entry *msr;
+-	int ret = 0;
+-
+-	switch (msr_index) {
+-#ifdef CONFIG_X86_64
+-	case MSR_EFER:
+-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+-		if (vmx->host_state.loaded)
+-			load_transition_efer(vmx);
+-		break;
+-	case MSR_FS_BASE:
+-		vmcs_writel(GUEST_FS_BASE, data);
+-		break;
+-	case MSR_GS_BASE:
+-		vmcs_writel(GUEST_GS_BASE, data);
+-		break;
+-#endif
+-	case MSR_IA32_SYSENTER_CS:
+-		vmcs_write32(GUEST_SYSENTER_CS, data);
+-		break;
+-	case MSR_IA32_SYSENTER_EIP:
+-		vmcs_writel(GUEST_SYSENTER_EIP, data);
+-		break;
+-	case MSR_IA32_SYSENTER_ESP:
+-		vmcs_writel(GUEST_SYSENTER_ESP, data);
+-		break;
+-	case MSR_IA32_TIME_STAMP_COUNTER:
+-		guest_write_tsc(data);
+-		break;
+-	default:
+-		msr = find_msr_entry(vmx, msr_index);
+-		if (msr) {
+-			msr->data = data;
+-			if (vmx->host_state.loaded)
+-				load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+-			break;
+-		}
+-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+-	}
+-
+-	return ret;
+-}
+-
+-/*
+- * Sync the rsp and rip registers into the vcpu structure.  This allows
+- * registers to be accessed by indexing vcpu->regs.
+- */
+-static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
+-{
+-	vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+-	vcpu->rip = vmcs_readl(GUEST_RIP);
+-}
+-
+-/*
+- * Syncs rsp and rip back into the vmcs.  Should be called after possible
+- * modification.
+- */
+-static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
+-{
+-	vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
+-	vmcs_writel(GUEST_RIP, vcpu->rip);
+-}
+-
+-static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
+-{
+-	unsigned long dr7 = 0x400;
+-	int old_singlestep;
+-
+-	old_singlestep = vcpu->guest_debug.singlestep;
+-
+-	vcpu->guest_debug.enabled = dbg->enabled;
+-	if (vcpu->guest_debug.enabled) {
+-		int i;
+-
+-		dr7 |= 0x200;  /* exact */
+-		for (i = 0; i < 4; ++i) {
+-			if (!dbg->breakpoints[i].enabled)
+-				continue;
+-			vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
+-			dr7 |= 2 << (i*2);    /* global enable */
+-			dr7 |= 0 << (i*4+16); /* execution breakpoint */
+-		}
+-
+-		vcpu->guest_debug.singlestep = dbg->singlestep;
+-	} else
+-		vcpu->guest_debug.singlestep = 0;
+-
+-	if (old_singlestep && !vcpu->guest_debug.singlestep) {
+-		unsigned long flags;
+-
+-		flags = vmcs_readl(GUEST_RFLAGS);
+-		flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+-		vmcs_writel(GUEST_RFLAGS, flags);
+-	}
+-
+-	update_exception_bitmap(vcpu);
+-	vmcs_writel(GUEST_DR7, dr7);
+-
+-	return 0;
+-}
+-
+-static int vmx_get_irq(struct kvm_vcpu *vcpu)
+-{
+-	u32 idtv_info_field;
+-
+-	idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+-	if (idtv_info_field & INTR_INFO_VALID_MASK) {
+-		if (is_external_interrupt(idtv_info_field))
+-			return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
+-		else
+-			printk("pending exception: not handled yet\n");
+-	}
+-	return -1;
+-}
+-
+-static __init int cpu_has_kvm_support(void)
+-{
+-	unsigned long ecx = cpuid_ecx(1);
+-	return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
+-}
+-
+-static __init int vmx_disabled_by_bios(void)
+-{
+-	u64 msr;
+-
+-	rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+-	return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
+-		       MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
+-	    == MSR_IA32_FEATURE_CONTROL_LOCKED;
+-	/* locked but not enabled */
+-}
+-
+-static void hardware_enable(void *garbage)
+-{
+-	int cpu = raw_smp_processor_id();
+-	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
+-	u64 old;
+-
+-	rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
+-	if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
+-		    MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
+-	    != (MSR_IA32_FEATURE_CONTROL_LOCKED |
+-		MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
+-		/* enable and lock */
+-		wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
+-		       MSR_IA32_FEATURE_CONTROL_LOCKED |
+-		       MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
+-	write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
+-	asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
+-		      : "memory", "cc");
+-}
+-
+-static void hardware_disable(void *garbage)
+-{
+-	asm volatile (ASM_VMX_VMXOFF : : : "cc");
+-}
+-
+-static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
+-				      u32 msr, u32* result)
+-{
+-	u32 vmx_msr_low, vmx_msr_high;
+-	u32 ctl = ctl_min | ctl_opt;
+-
+-	rdmsr(msr, vmx_msr_low, vmx_msr_high);
+-
+-	ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
+-	ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
+-
+-	/* Ensure minimum (required) set of control bits are supported. */
+-	if (ctl_min & ~ctl)
+-		return -EIO;
+-
+-	*result = ctl;
+-	return 0;
+-}
+-
+-static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
+-{
+-	u32 vmx_msr_low, vmx_msr_high;
+-	u32 min, opt;
+-	u32 _pin_based_exec_control = 0;
+-	u32 _cpu_based_exec_control = 0;
+-	u32 _vmexit_control = 0;
+-	u32 _vmentry_control = 0;
+-
+-	min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
+-	opt = 0;
+-	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
+-				&_pin_based_exec_control) < 0)
+-		return -EIO;
+-
+-	min = CPU_BASED_HLT_EXITING |
+-#ifdef CONFIG_X86_64
+-	      CPU_BASED_CR8_LOAD_EXITING |
+-	      CPU_BASED_CR8_STORE_EXITING |
+-#endif
+-	      CPU_BASED_USE_IO_BITMAPS |
+-	      CPU_BASED_MOV_DR_EXITING |
+-	      CPU_BASED_USE_TSC_OFFSETING;
+-#ifdef CONFIG_X86_64
+-	opt = CPU_BASED_TPR_SHADOW;
+-#else
+-	opt = 0;
+-#endif
+-	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
+-				&_cpu_based_exec_control) < 0)
+-		return -EIO;
+-#ifdef CONFIG_X86_64
+-	if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
+-		_cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
+-					   ~CPU_BASED_CR8_STORE_EXITING;
+-#endif
+-
+-	min = 0;
+-#ifdef CONFIG_X86_64
+-	min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
+-#endif
+-	opt = 0;
+-	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
+-				&_vmexit_control) < 0)
+-		return -EIO;
+-
+-	min = opt = 0;
+-	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
+-				&_vmentry_control) < 0)
+-		return -EIO;
+-
+-	rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
+-
+-	/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
+-	if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
+-		return -EIO;
+-
+-#ifdef CONFIG_X86_64
+-	/* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
+-	if (vmx_msr_high & (1u<<16))
+-		return -EIO;
+-#endif
+-
+-	/* Require Write-Back (WB) memory type for VMCS accesses. */
+-	if (((vmx_msr_high >> 18) & 15) != 6)
+-		return -EIO;
+-
+-	vmcs_conf->size = vmx_msr_high & 0x1fff;
+-	vmcs_conf->order = get_order(vmcs_config.size);
+-	vmcs_conf->revision_id = vmx_msr_low;
+-
+-	vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
+-	vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
+-	vmcs_conf->vmexit_ctrl         = _vmexit_control;
+-	vmcs_conf->vmentry_ctrl        = _vmentry_control;
+-
+-	return 0;
+-}
+-
+-static struct vmcs *alloc_vmcs_cpu(int cpu)
+-{
+-	int node = cpu_to_node(cpu);
+-	struct page *pages;
+-	struct vmcs *vmcs;
+-
+-	pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
+-	if (!pages)
+-		return NULL;
+-	vmcs = page_address(pages);
+-	memset(vmcs, 0, vmcs_config.size);
+-	vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
+-	return vmcs;
+-}
+-
+-static struct vmcs *alloc_vmcs(void)
+-{
+-	return alloc_vmcs_cpu(raw_smp_processor_id());
+-}
+-
+-static void free_vmcs(struct vmcs *vmcs)
+-{
+-	free_pages((unsigned long)vmcs, vmcs_config.order);
+-}
+-
+-static void free_kvm_area(void)
+-{
+-	int cpu;
+-
+-	for_each_online_cpu(cpu)
+-		free_vmcs(per_cpu(vmxarea, cpu));
+-}
+-
+-static __init int alloc_kvm_area(void)
+-{
+-	int cpu;
+-
+-	for_each_online_cpu(cpu) {
+-		struct vmcs *vmcs;
+-
+-		vmcs = alloc_vmcs_cpu(cpu);
+-		if (!vmcs) {
+-			free_kvm_area();
+-			return -ENOMEM;
+-		}
+-
+-		per_cpu(vmxarea, cpu) = vmcs;
+-	}
+-	return 0;
+-}
+-
+-static __init int hardware_setup(void)
+-{
+-	if (setup_vmcs_config(&vmcs_config) < 0)
+-		return -EIO;
+-	return alloc_kvm_area();
+-}
+-
+-static __exit void hardware_unsetup(void)
+-{
+-	free_kvm_area();
+-}
+-
+-static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
+-{
+-	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+-
+-	if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
+-		vmcs_write16(sf->selector, save->selector);
+-		vmcs_writel(sf->base, save->base);
+-		vmcs_write32(sf->limit, save->limit);
+-		vmcs_write32(sf->ar_bytes, save->ar);
+-	} else {
+-		u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
+-			<< AR_DPL_SHIFT;
+-		vmcs_write32(sf->ar_bytes, 0x93 | dpl);
+-	}
+-}
+-
+-static void enter_pmode(struct kvm_vcpu *vcpu)
+-{
+-	unsigned long flags;
+-
+-	vcpu->rmode.active = 0;
+-
+-	vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
+-	vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
+-	vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
+-
+-	flags = vmcs_readl(GUEST_RFLAGS);
+-	flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
+-	flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
+-	vmcs_writel(GUEST_RFLAGS, flags);
+-
+-	vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
+-			(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
+-
+-	update_exception_bitmap(vcpu);
+-
+-	fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
+-	fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
+-	fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
+-	fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
+-
+-	vmcs_write16(GUEST_SS_SELECTOR, 0);
+-	vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
+-
+-	vmcs_write16(GUEST_CS_SELECTOR,
+-		     vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
+-	vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
+-}
+-
+-static gva_t rmode_tss_base(struct kvm* kvm)
+-{
+-	gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
+-	return base_gfn << PAGE_SHIFT;
+-}
+-
+-static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
+-{
+-	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+-
+-	save->selector = vmcs_read16(sf->selector);
+-	save->base = vmcs_readl(sf->base);
+-	save->limit = vmcs_read32(sf->limit);
+-	save->ar = vmcs_read32(sf->ar_bytes);
+-	vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
+-	vmcs_write32(sf->limit, 0xffff);
+-	vmcs_write32(sf->ar_bytes, 0xf3);
+-}
+-
+-static void enter_rmode(struct kvm_vcpu *vcpu)
+-{
+-	unsigned long flags;
+-
+-	vcpu->rmode.active = 1;
+-
+-	vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
+-	vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
+-
+-	vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
+-	vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
+-
+-	vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
+-	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
+-
+-	flags = vmcs_readl(GUEST_RFLAGS);
+-	vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
+-
+-	flags |= IOPL_MASK | X86_EFLAGS_VM;
+-
+-	vmcs_writel(GUEST_RFLAGS, flags);
+-	vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
+-	update_exception_bitmap(vcpu);
+-
+-	vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
+-	vmcs_write32(GUEST_SS_LIMIT, 0xffff);
+-	vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
+-
+-	vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
+-	vmcs_write32(GUEST_CS_LIMIT, 0xffff);
+-	if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
+-		vmcs_writel(GUEST_CS_BASE, 0xf0000);
+-	vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
+-
+-	fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
+-	fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
+-	fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
+-	fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+-
+-	kvm_mmu_reset_context(vcpu);
+-	init_rmode_tss(vcpu->kvm);
+-}
+-
+-#ifdef CONFIG_X86_64
+-
+-static void enter_lmode(struct kvm_vcpu *vcpu)
+-{
+-	u32 guest_tr_ar;
+-
+-	guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
+-	if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
+-		printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
+-		       __FUNCTION__);
+-		vmcs_write32(GUEST_TR_AR_BYTES,
+-			     (guest_tr_ar & ~AR_TYPE_MASK)
+-			     | AR_TYPE_BUSY_64_TSS);
+-	}
+-
+-	vcpu->shadow_efer |= EFER_LMA;
+-
+-	find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
+-	vmcs_write32(VM_ENTRY_CONTROLS,
+-		     vmcs_read32(VM_ENTRY_CONTROLS)
+-		     | VM_ENTRY_IA32E_MODE);
+-}
+-
+-static void exit_lmode(struct kvm_vcpu *vcpu)
 -{
--	unsigned long timeout;
--	u64 lastval, val;
--	int ret;
+-	vcpu->shadow_efer &= ~EFER_LMA;
 -
--	lastval = ipath_read_kreg64(dd, reg_id);
--	/* wait a ridiculously long time */
--	timeout = jiffies + msecs_to_jiffies(5);
--	do {
--		val = ipath_read_kreg64(dd, reg_id);
--		/* set so they have something, even on failures. */
--		*valp = val;
--		if ((val & bits_to_wait_for) == bits_to_wait_for) {
--			ret = 0;
--			break;
--		}
--		if (val != lastval)
--			ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
--				   "waiting for %llx bits\n",
--				   (unsigned long long) lastval,
--				   (unsigned long long) val,
--				   (unsigned long long) bits_to_wait_for);
--		cond_resched();
--		if (time_after(jiffies, timeout)) {
--			ipath_dbg("Didn't get bits %llx in register 0x%x, "
--				  "got %llx\n",
--				  (unsigned long long) bits_to_wait_for,
--				  reg_id, (unsigned long long) *valp);
--			ret = -ENODEV;
--			break;
--		}
--	} while (1);
+-	vmcs_write32(VM_ENTRY_CONTROLS,
+-		     vmcs_read32(VM_ENTRY_CONTROLS)
+-		     & ~VM_ENTRY_IA32E_MODE);
+-}
 -
--	return ret;
+-#endif
+-
+-static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
+-{
+-	vcpu->cr4 &= KVM_GUEST_CR4_MASK;
+-	vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
 -}
 -
--/**
-- * ipath_waitfor_mdio_cmdready - wait for last command to complete
-- * @dd: the infinipath device
-- *
-- * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
-- * away indicating the last command has completed.  It doesn't return data
-- */
--int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
+-static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 -{
--	unsigned long timeout;
--	u64 val;
--	int ret;
+-	vmx_fpu_deactivate(vcpu);
 -
--	/* wait a ridiculously long time */
--	timeout = jiffies + msecs_to_jiffies(5);
--	do {
--		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
--		if (!(val & IPATH_MDIO_CMDVALID)) {
--			ret = 0;
--			break;
--		}
--		cond_resched();
--		if (time_after(jiffies, timeout)) {
--			ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
--				  (unsigned long long) val);
--			ret = -ENODEV;
--			break;
--		}
--	} while (1);
+-	if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
+-		enter_pmode(vcpu);
 -
--	return ret;
+-	if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
+-		enter_rmode(vcpu);
+-
+-#ifdef CONFIG_X86_64
+-	if (vcpu->shadow_efer & EFER_LME) {
+-		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
+-			enter_lmode(vcpu);
+-		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
+-			exit_lmode(vcpu);
+-	}
+-#endif
+-
+-	vmcs_writel(CR0_READ_SHADOW, cr0);
+-	vmcs_writel(GUEST_CR0,
+-		    (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
+-	vcpu->cr0 = cr0;
+-
+-	if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
+-		vmx_fpu_activate(vcpu);
 -}
 -
- 
- /*
-  * Flush all sends that might be in the ready to send state, as well as any
-@@ -2053,6 +1989,8 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
-  */
- void ipath_shutdown_device(struct ipath_devdata *dd)
- {
-+	unsigned long flags;
-+
- 	ipath_dbg("Shutting down the device\n");
- 
- 	dd->ipath_flags |= IPATH_LINKUNK;
-@@ -2073,9 +2011,13 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
- 	 * gracefully stop all sends allowing any in progress to trickle out
- 	 * first.
- 	 */
--	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
-+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-+	dd->ipath_sendctrl = 0;
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
- 	/* flush it */
- 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-+
- 	/*
- 	 * enough for anything that's going to trickle out to have actually
- 	 * done so.
-@@ -2217,25 +2159,15 @@ static int __init infinipath_init(void)
- 		goto bail_unit;
- 	}
- 
--	ret = ipath_driver_create_group(&ipath_driver.driver);
--	if (ret < 0) {
--		printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
--		       "sysfs entries: error %d\n", -ret);
--		goto bail_pci;
+-static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+-{
+-	vmcs_writel(GUEST_CR3, cr3);
+-	if (vcpu->cr0 & X86_CR0_PE)
+-		vmx_fpu_deactivate(vcpu);
+-}
+-
+-static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+-{
+-	vmcs_writel(CR4_READ_SHADOW, cr4);
+-	vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
+-		    KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
+-	vcpu->cr4 = cr4;
+-}
+-
+-#ifdef CONFIG_X86_64
+-
+-static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+-{
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-	struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
+-
+-	vcpu->shadow_efer = efer;
+-	if (efer & EFER_LMA) {
+-		vmcs_write32(VM_ENTRY_CONTROLS,
+-				     vmcs_read32(VM_ENTRY_CONTROLS) |
+-				     VM_ENTRY_IA32E_MODE);
+-		msr->data = efer;
+-
+-	} else {
+-		vmcs_write32(VM_ENTRY_CONTROLS,
+-				     vmcs_read32(VM_ENTRY_CONTROLS) &
+-				     ~VM_ENTRY_IA32E_MODE);
+-
+-		msr->data = efer & ~EFER_LME;
 -	}
+-	setup_msrs(vmx);
+-}
 -
- 	ret = ipath_init_ipathfs();
- 	if (ret < 0) {
- 		printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
- 		       "ipathfs: error %d\n", -ret);
--		goto bail_group;
-+		goto bail_pci;
- 	}
- 
- 	goto bail;
- 
--bail_group:
--	ipath_driver_remove_group(&ipath_driver.driver);
+-#endif
 -
- bail_pci:
- 	pci_unregister_driver(&ipath_driver);
- 
-@@ -2250,8 +2182,6 @@ static void __exit infinipath_cleanup(void)
- {
- 	ipath_exit_ipathfs();
- 
--	ipath_driver_remove_group(&ipath_driver.driver);
+-static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+-{
+-	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
 -
- 	ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
- 	pci_unregister_driver(&ipath_driver);
- 
-@@ -2344,5 +2274,34 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
- 	}
- 	return 0;
- }
-+
-+/*
-+ * Disable and enable the armlaunch error.  Used for PIO bandwidth testing on
-+ * the 7220, which is count-based, rather than trigger-based.  Safe for the
-+ * driver check, since it's at init.   Not completely safe when used for
-+ * user-mode checking, since some error checking can be lost, but not
-+ * particularly risky, and only has problematic side-effects in the face of
-+ * very buggy user code.  There is no reference counting, but that's also
-+ * fine, given the intended use.
-+ */
-+void ipath_enable_armlaunch(struct ipath_devdata *dd)
-+{
-+	dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-+		INFINIPATH_E_SPIOARMLAUNCH);
-+	dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-+		dd->ipath_errormask);
-+}
-+
-+void ipath_disable_armlaunch(struct ipath_devdata *dd)
-+{
-+	/* so don't re-enable if already set */
-+	dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
-+	dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-+		dd->ipath_errormask);
-+}
-+
- module_init(infinipath_init);
- module_exit(infinipath_cleanup);
-diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
-index e7c25db..e28a42f 100644
---- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
-+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
-@@ -510,10 +510,10 @@ int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
- {
- 	int ret;
- 
--	ret = down_interruptible(&dd->ipath_eep_sem);
-+	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
- 	if (!ret) {
- 		ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
--		up(&dd->ipath_eep_sem);
-+		mutex_unlock(&dd->ipath_eep_lock);
- 	}
- 
- 	return ret;
-@@ -524,10 +524,10 @@ int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
- {
- 	int ret;
- 
--	ret = down_interruptible(&dd->ipath_eep_sem);
-+	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
- 	if (!ret) {
- 		ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
--		up(&dd->ipath_eep_sem);
-+		mutex_unlock(&dd->ipath_eep_lock);
- 	}
- 
- 	return ret;
-@@ -574,7 +574,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
- 	struct ipath_devdata *dd0 = ipath_lookup(0);
- 
- 	if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
--		u8 *bguid, oguid;
-+		u8 oguid;
- 		dd->ipath_guid = dd0->ipath_guid;
- 		bguid = (u8 *) & dd->ipath_guid;
- 
-@@ -616,9 +616,9 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
- 		goto bail;
- 	}
- 
--	down(&dd->ipath_eep_sem);
-+	mutex_lock(&dd->ipath_eep_lock);
- 	eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
--	up(&dd->ipath_eep_sem);
-+	mutex_unlock(&dd->ipath_eep_lock);
- 
- 	if (eep_stat) {
- 		ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
-@@ -674,7 +674,6 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
- 		 * elsewhere for backward-compatibility.
- 		 */
- 		char *snp = dd->ipath_serial;
--		int len;
- 		memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
- 		snp[sizeof ifp->if_sprefix] = '\0';
- 		len = strlen(snp);
-@@ -764,14 +763,14 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
- 	/* Grab semaphore and read current EEPROM. If we get an
- 	 * error, let go, but if not, keep it until we finish write.
- 	 */
--	ret = down_interruptible(&dd->ipath_eep_sem);
-+	ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
- 	if (ret) {
- 		ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
- 		goto free_bail;
- 	}
- 	ret = ipath_eeprom_internal_read(dd, 0, buf, len);
- 	if (ret) {
--		up(&dd->ipath_eep_sem);
-+		mutex_unlock(&dd->ipath_eep_lock);
- 		ipath_dev_err(dd, "Unable read EEPROM for logging\n");
- 		goto free_bail;
- 	}
-@@ -779,7 +778,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
- 
- 	csum = flash_csum(ifp, 0);
- 	if (csum != ifp->if_csum) {
--		up(&dd->ipath_eep_sem);
-+		mutex_unlock(&dd->ipath_eep_lock);
- 		ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
- 				csum, ifp->if_csum);
- 		ret = 1;
-@@ -849,7 +848,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
- 		csum = flash_csum(ifp, 1);
- 		ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
- 	}
--	up(&dd->ipath_eep_sem);
-+	mutex_unlock(&dd->ipath_eep_lock);
- 	if (ret)
- 		ipath_dev_err(dd, "Failed updating EEPROM\n");
- 
-diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
-index 5de3243..7e025c8 100644
---- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
-+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
-@@ -169,7 +169,7 @@ static int ipath_get_base_info(struct file *fp,
- 		kinfo->spi_piocnt = dd->ipath_pbufsport;
- 		kinfo->spi_piobufbase = (u64) pd->port_piobufs;
- 		kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
--			dd->ipath_palign * pd->port_port;
-+			dd->ipath_ureg_align * pd->port_port;
- 	} else if (master) {
- 		kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) +
- 				    (dd->ipath_pbufsport % subport_cnt);
-@@ -186,7 +186,7 @@ static int ipath_get_base_info(struct file *fp,
- 	}
- 	if (shared) {
- 		kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
--			dd->ipath_palign * pd->port_port;
-+			dd->ipath_ureg_align * pd->port_port;
- 		kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
- 		kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
- 		kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
-@@ -742,11 +742,12 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
- 		 * updated and correct itself, even in the face of software
- 		 * bugs.
- 		 */
--		*(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0;
--		set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
-+		if (pd->port_rcvhdrtail_kvaddr)
-+			ipath_clear_rcvhdrtail(pd);
-+		set_bit(dd->ipath_r_portenable_shift + pd->port_port,
- 			&dd->ipath_rcvctrl);
- 	} else
--		clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
-+		clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
- 			  &dd->ipath_rcvctrl);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- 			 dd->ipath_rcvctrl);
-@@ -881,7 +882,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
- 
- 	egrcnt = dd->ipath_rcvegrcnt;
- 	/* TID number offset for this port */
--	egroff = pd->port_port * egrcnt;
-+	egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
- 	egrsize = dd->ipath_rcvegrbufsize;
- 	ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
- 		   "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
-@@ -1049,11 +1050,6 @@ static int mmap_piobufs(struct vm_area_struct *vma,
- 
- 	phys = dd->ipath_physaddr + piobufs;
- 
--	/*
--	 * Don't mark this as non-cached, or we don't get the
--	 * write combining behavior we want on the PIO buffers!
--	 */
+-	return vmcs_readl(sf->base);
+-}
 -
- #if defined(__powerpc__)
- 	/* There isn't a generic way to specify writethrough mappings */
- 	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-@@ -1120,33 +1116,24 @@ bail:
- }
- 
- /*
-- * ipath_file_vma_nopage - handle a VMA page fault.
-+ * ipath_file_vma_fault - handle a VMA page fault.
-  */
--static struct page *ipath_file_vma_nopage(struct vm_area_struct *vma,
--					  unsigned long address, int *type)
-+static int ipath_file_vma_fault(struct vm_area_struct *vma,
-+					struct vm_fault *vmf)
- {
--	unsigned long offset = address - vma->vm_start;
--	struct page *page = NOPAGE_SIGBUS;
--	void *pageptr;
-+	struct page *page;
- 
--	/*
--	 * Convert the vmalloc address into a struct page.
--	 */
--	pageptr = (void *)(offset + (vma->vm_pgoff << PAGE_SHIFT));
--	page = vmalloc_to_page(pageptr);
-+	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
- 	if (!page)
--		goto out;
+-static void vmx_get_segment(struct kvm_vcpu *vcpu,
+-			    struct kvm_segment *var, int seg)
+-{
+-	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+-	u32 ar;
 -
--	/* Increment the reference count. */
-+		return VM_FAULT_SIGBUS;
- 	get_page(page);
--	if (type)
--		*type = VM_FAULT_MINOR;
--out:
--	return page;
-+	vmf->page = page;
-+
-+	return 0;
- }
- 
- static struct vm_operations_struct ipath_file_vm_ops = {
--	.nopage = ipath_file_vma_nopage,
-+	.fault = ipath_file_vma_fault,
- };
- 
- static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
-@@ -1284,7 +1271,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
- 		goto bail;
- 	}
- 
--	ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
-+	ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
- 	if (!pd->port_subport_cnt) {
- 		/* port is not shared */
- 		piocnt = dd->ipath_pbufsport;
-@@ -1400,7 +1387,10 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
- 	pollflag = ipath_poll_hdrqfull(pd);
- 
- 	head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
--	tail = *(volatile u64 *)pd->port_rcvhdrtail_kvaddr;
-+	if (pd->port_rcvhdrtail_kvaddr)
-+		tail = ipath_get_rcvhdrtail(pd);
-+	else
-+		tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
- 
- 	if (head != tail)
- 		pollflag |= POLLIN | POLLRDNORM;
-@@ -1410,7 +1400,7 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
- 		/* flush waiting flag so we don't miss an event */
- 		wmb();
- 
--		set_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT,
-+		set_bit(pd->port_port + dd->ipath_r_intravail_shift,
- 			&dd->ipath_rcvctrl);
- 
- 		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-@@ -1790,6 +1780,7 @@ static int find_shared_port(struct file *fp,
- 			}
- 			port_fp(fp) = pd;
- 			subport_fp(fp) = pd->port_cnt++;
-+			pd->port_subpid[subport_fp(fp)] = current->pid;
- 			tidcursor_fp(fp) = 0;
- 			pd->active_slaves |= 1 << subport_fp(fp);
- 			ipath_cdbg(PROC,
-@@ -1920,8 +1911,7 @@ static int ipath_do_user_init(struct file *fp,
- 	 */
- 	head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
- 	ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
--	dd->ipath_lastegrheads[pd->port_port] = -1;
--	dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
-+	pd->port_lastrcvhdrqtail = -1;
- 	ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
- 		pd->port_port, head32);
- 	pd->port_tidcursor = 0;	/* start at beginning after open */
-@@ -1941,11 +1931,13 @@ static int ipath_do_user_init(struct file *fp,
- 	 * We explictly set the in-memory copy to 0 beforehand, so we don't
- 	 * have to wait to be sure the DMA update has happened.
- 	 */
--	*(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0ULL;
--	set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
-+	if (pd->port_rcvhdrtail_kvaddr)
-+		ipath_clear_rcvhdrtail(pd);
-+	set_bit(dd->ipath_r_portenable_shift + pd->port_port,
- 		&dd->ipath_rcvctrl);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
--			 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
-+			dd->ipath_rcvctrl &
-+			~(1ULL << dd->ipath_r_tailupd_shift));
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- 			 dd->ipath_rcvctrl);
- 	/* Notify any waiting slaves */
-@@ -2022,6 +2014,7 @@ static int ipath_close(struct inode *in, struct file *fp)
- 		 * the slave(s) don't wait for receive data forever.
- 		 */
- 		pd->active_slaves &= ~(1 << fd->subport);
-+		pd->port_subpid[fd->subport] = 0;
- 		mutex_unlock(&ipath_mutex);
- 		goto bail;
- 	}
-@@ -2054,9 +2047,9 @@ static int ipath_close(struct inode *in, struct file *fp)
- 	if (dd->ipath_kregbase) {
- 		int i;
- 		/* atomically clear receive enable port and intr avail. */
--		clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
-+		clear_bit(dd->ipath_r_portenable_shift + port,
- 			  &dd->ipath_rcvctrl);
--		clear_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT,
-+		clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
- 			  &dd->ipath_rcvctrl);
- 		ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
- 			dd->ipath_rcvctrl);
-@@ -2149,11 +2142,15 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
- 
- static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
- {
--	u64 reg = dd->ipath_sendctrl;
-+	unsigned long flags;
- 
--	clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg);
--	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
-+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-+		dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- 
- 	return 0;
- }
-@@ -2227,6 +2224,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
- 		dest = &cmd.cmd.poll_type;
- 		src = &ucmd->cmd.poll_type;
- 		break;
-+	case IPATH_CMD_ARMLAUNCH_CTRL:
-+		copy = sizeof(cmd.cmd.armlaunch_ctrl);
-+		dest = &cmd.cmd.armlaunch_ctrl;
-+		src = &ucmd->cmd.armlaunch_ctrl;
-+		break;
- 	default:
- 		ret = -EINVAL;
- 		goto bail;
-@@ -2302,6 +2304,12 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
- 	case IPATH_CMD_POLL_TYPE:
- 		pd->poll_type = cmd.cmd.poll_type;
- 		break;
-+	case IPATH_CMD_ARMLAUNCH_CTRL:
-+		if (cmd.cmd.armlaunch_ctrl)
-+			ipath_enable_armlaunch(pd->port_dd);
-+		else
-+			ipath_disable_armlaunch(pd->port_dd);
-+		break;
- 	}
- 
- 	if (ret >= 0)
-diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
-index 262c25d..23faba9 100644
---- a/drivers/infiniband/hw/ipath/ipath_fs.c
-+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
-@@ -108,21 +108,16 @@ static const struct file_operations atomic_stats_ops = {
- 	.read = atomic_stats_read,
- };
- 
--#define NUM_COUNTERS sizeof(struct infinipath_counters) / sizeof(u64)
+-	var->base = vmcs_readl(sf->base);
+-	var->limit = vmcs_read32(sf->limit);
+-	var->selector = vmcs_read16(sf->selector);
+-	ar = vmcs_read32(sf->ar_bytes);
+-	if (ar & AR_UNUSABLE_MASK)
+-		ar = 0;
+-	var->type = ar & 15;
+-	var->s = (ar >> 4) & 1;
+-	var->dpl = (ar >> 5) & 3;
+-	var->present = (ar >> 7) & 1;
+-	var->avl = (ar >> 12) & 1;
+-	var->l = (ar >> 13) & 1;
+-	var->db = (ar >> 14) & 1;
+-	var->g = (ar >> 15) & 1;
+-	var->unusable = (ar >> 16) & 1;
+-}
 -
- static ssize_t atomic_counters_read(struct file *file, char __user *buf,
- 				    size_t count, loff_t *ppos)
- {
--	u64 counters[NUM_COUNTERS];
--	u16 i;
-+	struct infinipath_counters counters;
- 	struct ipath_devdata *dd;
- 
- 	dd = file->f_path.dentry->d_inode->i_private;
-+	dd->ipath_f_read_counters(dd, &counters);
- 
--	for (i = 0; i < NUM_COUNTERS; i++)
--		counters[i] = ipath_snap_cntr(dd, i);
+-static u32 vmx_segment_access_rights(struct kvm_segment *var)
+-{
+-	u32 ar;
 -
--	return simple_read_from_buffer(buf, count, ppos, counters,
-+	return simple_read_from_buffer(buf, count, ppos, &counters,
- 				       sizeof counters);
- }
- 
-@@ -243,8 +238,7 @@ static int create_device_files(struct super_block *sb,
- 
- 	snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
- 	ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
--			  (struct file_operations *) &simple_dir_operations,
--			  dd);
-+			  &simple_dir_operations, dd);
- 	if (ret) {
- 		printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
- 		goto bail;
-diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
-index ddbebe4..9e2ced3 100644
---- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
-+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
-@@ -148,10 +148,57 @@ struct _infinipath_do_not_use_kernel_regs {
- 	unsigned long long ReservedSW2[4];
- };
- 
--#define IPATH_KREG_OFFSET(field) (offsetof(struct \
--    _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-+struct _infinipath_do_not_use_counters {
-+	__u64 LBIntCnt;
-+	__u64 LBFlowStallCnt;
-+	__u64 Reserved1;
-+	__u64 TxUnsupVLErrCnt;
-+	__u64 TxDataPktCnt;
-+	__u64 TxFlowPktCnt;
-+	__u64 TxDwordCnt;
-+	__u64 TxLenErrCnt;
-+	__u64 TxMaxMinLenErrCnt;
-+	__u64 TxUnderrunCnt;
-+	__u64 TxFlowStallCnt;
-+	__u64 TxDroppedPktCnt;
-+	__u64 RxDroppedPktCnt;
-+	__u64 RxDataPktCnt;
-+	__u64 RxFlowPktCnt;
-+	__u64 RxDwordCnt;
-+	__u64 RxLenErrCnt;
-+	__u64 RxMaxMinLenErrCnt;
-+	__u64 RxICRCErrCnt;
-+	__u64 RxVCRCErrCnt;
-+	__u64 RxFlowCtrlErrCnt;
-+	__u64 RxBadFormatCnt;
-+	__u64 RxLinkProblemCnt;
-+	__u64 RxEBPCnt;
-+	__u64 RxLPCRCErrCnt;
-+	__u64 RxBufOvflCnt;
-+	__u64 RxTIDFullErrCnt;
-+	__u64 RxTIDValidErrCnt;
-+	__u64 RxPKeyMismatchCnt;
-+	__u64 RxP0HdrEgrOvflCnt;
-+	__u64 RxP1HdrEgrOvflCnt;
-+	__u64 RxP2HdrEgrOvflCnt;
-+	__u64 RxP3HdrEgrOvflCnt;
-+	__u64 RxP4HdrEgrOvflCnt;
-+	__u64 RxP5HdrEgrOvflCnt;
-+	__u64 RxP6HdrEgrOvflCnt;
-+	__u64 RxP7HdrEgrOvflCnt;
-+	__u64 RxP8HdrEgrOvflCnt;
-+	__u64 Reserved6;
-+	__u64 Reserved7;
-+	__u64 IBStatusChangeCnt;
-+	__u64 IBLinkErrRecoveryCnt;
-+	__u64 IBLinkDownedCnt;
-+	__u64 IBSymbolErrCnt;
-+};
-+
-+#define IPATH_KREG_OFFSET(field) (offsetof( \
-+	struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
- #define IPATH_CREG_OFFSET(field) (offsetof( \
--    struct infinipath_counters, field) / sizeof(u64))
-+	struct _infinipath_do_not_use_counters, field) / sizeof(u64))
- 
- static const struct ipath_kregs ipath_ht_kregs = {
- 	.kr_control = IPATH_KREG_OFFSET(Control),
-@@ -282,6 +329,9 @@ static const struct ipath_cregs ipath_ht_cregs = {
- #define INFINIPATH_HWE_HTAPLL_RFSLIP        0x1000000000000000ULL
- #define INFINIPATH_HWE_SERDESPLLFAILED      0x2000000000000000ULL
- 
-+#define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf
-+#define IBA6110_IBCS_LINKSTATE_SHIFT 4
-+
- /* kr_extstatus bits */
- #define INFINIPATH_EXTS_FREQSEL 0x2
- #define INFINIPATH_EXTS_SERDESSEL 0x4
-@@ -296,6 +346,12 @@ static const struct ipath_cregs ipath_ht_cregs = {
- #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
- #define INFINIPATH_RT_BUFSIZE_SHIFT 48
- 
-+#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-+#define INFINIPATH_R_TAILUPD_SHIFT 31
-+
-+/* kr_xgxsconfig bits */
-+#define INFINIPATH_XGXS_RESET          0x7ULL
-+
- /*
-  * masks and bits that are different in different chips, or present only
-  * in one
-@@ -652,7 +708,6 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
- 			      "with ID %u\n", boardrev);
- 		snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
- 			 boardrev);
--		ret = 1;
- 		break;
- 	}
- 	if (n)
-@@ -686,6 +741,13 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
- 			      dd->ipath_htspeed);
- 	ret = 0;
- 
-+	/*
-+	 * set here, not in ipath_init_*_funcs because we have to do
-+	 * it after we can read chip registers.
-+	 */
-+	dd->ipath_ureg_align =
-+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-+
- bail:
- 	return ret;
- }
-@@ -969,7 +1031,8 @@ static int ipath_setup_ht_config(struct ipath_devdata *dd,
- 	do {
- 		u8 cap_type;
- 
--		/* the HT capability type byte is 3 bytes after the
-+		/*
-+		 * The HT capability type byte is 3 bytes after the
- 		 * capability byte.
- 		 */
- 		if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
-@@ -982,6 +1045,8 @@ static int ipath_setup_ht_config(struct ipath_devdata *dd,
- 	} while ((pos = pci_find_next_capability(pdev, pos,
- 						 PCI_CAP_ID_HT)));
- 
-+	dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
-+
- bail:
- 	return ret;
- }
-@@ -1074,11 +1139,55 @@ static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
- 
- static void ipath_init_ht_variables(struct ipath_devdata *dd)
- {
-+	/*
-+	 * setup the register offsets, since they are different for each
-+	 * chip
-+	 */
-+	dd->ipath_kregs = &ipath_ht_kregs;
-+	dd->ipath_cregs = &ipath_ht_cregs;
-+
- 	dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
- 	dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
- 	dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- 	dd->ipath_gpio_scl = IPATH_GPIO_SCL;
- 
-+	/*
-+	 * Fill in data for field-values that change in newer chips.
-+	 * We dynamically specify only the mask for LINKTRAININGSTATE
-+	 * and only the shift for LINKSTATE, as they are the only ones
-+	 * that change.  Also precalculate the 3 link states of interest
-+	 * and the combined mask.
-+	 */
-+	dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT;
-+	dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK;
-+	dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
-+		dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
-+	dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-+		(INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
-+	dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-+		(INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
-+	dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-+		(INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-+
-+	/*
-+	 * Fill in data for ibcc field-values that change in newer chips.
-+	 * We dynamically specify only the mask for LINKINITCMD
-+	 * and only the shift for LINKCMD and MAXPKTLEN, as they are
-+	 * the only ones that change.
-+	 */
-+	dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
-+	dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
-+	dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-+
-+	/* Fill in shifts for RcvCtrl. */
-+	dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
-+	dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
-+	dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
-+	dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */
-+
- 	dd->ipath_i_bitsextant =
- 		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
- 		(INFINIPATH_I_RCVAVAIL_MASK <<
-@@ -1135,6 +1244,8 @@ static void ipath_init_ht_variables(struct ipath_devdata *dd)
- 
- 	dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- 	dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-+	dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
-+	dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
- 
- 	/*
- 	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
-@@ -1148,9 +1259,17 @@ static void ipath_init_ht_variables(struct ipath_devdata *dd)
- 		INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- 		INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
- 
--	dd->ipath_eep_st_masks[2].errs_to_log =
--		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
-+	dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
- 
-+	dd->delay_mult = 2; /* SDR, 4X, can't change */
-+
-+	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
-+	dd->ipath_link_speed_supported = IPATH_IB_SDR;
-+	dd->ipath_link_width_enabled = IB_WIDTH_4X;
-+	dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
-+	/* these can't change for this chip, so set once */
-+	dd->ipath_link_width_active = dd->ipath_link_width_enabled;
-+	dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
- }
- 
- /**
-@@ -1205,14 +1324,16 @@ static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
- 	val &= ~INFINIPATH_HWE_HTCMISCERR4;
- 
- 	/*
--	 * PLL ignored because MDIO interface has a logic problem
--	 * for reads, on Comstock and Ponderosa.  BRINGUP
-+	 * PLL ignored because unused MDIO interface has a logic problem
- 	 */
- 	if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
- 		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- 	dd->ipath_hwerrmask = val;
- }
- 
-+
-+
-+
- /**
-  * ipath_ht_bringup_serdes - bring up the serdes
-  * @dd: the infinipath device
-@@ -1284,16 +1405,6 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
- 	}
- 
- 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
--	if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
--	     INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
--		val &= ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
--			 INFINIPATH_XGXS_MDIOADDR_SHIFT);
+-	if (var->unusable)
+-		ar = 1 << 16;
+-	else {
+-		ar = var->type & 15;
+-		ar |= (var->s & 1) << 4;
+-		ar |= (var->dpl & 3) << 5;
+-		ar |= (var->present & 1) << 7;
+-		ar |= (var->avl & 1) << 12;
+-		ar |= (var->l & 1) << 13;
+-		ar |= (var->db & 1) << 14;
+-		ar |= (var->g & 1) << 15;
+-	}
+-	if (ar == 0) /* a 0 value means unusable */
+-		ar = AR_UNUSABLE_MASK;
+-
+-	return ar;
+-}
+-
+-static void vmx_set_segment(struct kvm_vcpu *vcpu,
+-			    struct kvm_segment *var, int seg)
+-{
+-	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+-	u32 ar;
+-
+-	if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
+-		vcpu->rmode.tr.selector = var->selector;
+-		vcpu->rmode.tr.base = var->base;
+-		vcpu->rmode.tr.limit = var->limit;
+-		vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+-		return;
+-	}
+-	vmcs_writel(sf->base, var->base);
+-	vmcs_write32(sf->limit, var->limit);
+-	vmcs_write16(sf->selector, var->selector);
+-	if (vcpu->rmode.active && var->s) {
 -		/*
--		 * we use address 3
+-		 * Hack real-mode segments into vm86 compatibility.
 -		 */
--		val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
--		change = 1;
--	}
- 	if (val & INFINIPATH_XGXS_RESET) {
- 		/* normally true after boot */
- 		val &= ~INFINIPATH_XGXS_RESET;
-@@ -1329,21 +1440,6 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
- 		   (unsigned long long)
- 		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
- 
--	if (!ipath_waitfor_mdio_cmdready(dd)) {
--		ipath_write_kreg(dd, dd->ipath_kregs->kr_mdio,
--				 ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
--						IPATH_MDIO_CTRL_XGXS_REG_8,
--						0));
--		if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
--					   IPATH_MDIO_DATAVALID, &val))
--			ipath_dbg("Never got MDIO data for XGXS status "
--				  "read\n");
--		else
--			ipath_cdbg(VERBOSE, "MDIO Read reg8, "
--				   "'bank' 31 %x\n", (u32) val);
+-		if (var->base == 0xffff0000 && var->selector == 0xf000)
+-			vmcs_writel(sf->base, 0xf0000);
+-		ar = 0xf3;
 -	} else
--		ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
+-		ar = vmx_segment_access_rights(var);
+-	vmcs_write32(sf->ar_bytes, ar);
+-}
 -
- 	return ret;		/* for now, say we always succeeded */
- }
- 
-@@ -1396,6 +1492,7 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
- 			pa |= lenvalid | INFINIPATH_RT_VALID;
- 		}
- 	}
-+
- 	writeq(pa, tidptr);
- }
- 
-@@ -1526,8 +1623,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
- 	}
- 
- 	ipath_get_eeprom_info(dd);
--	if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
--		dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
-+	if (dd->ipath_boardrev == 5) {
- 		/*
- 		 * Later production QHT7040 has same changes as QHT7140, so
- 		 * can use GPIO interrupts.  They have serial #'s starting
-@@ -1602,6 +1698,210 @@ static void ipath_ht_free_irq(struct ipath_devdata *dd)
- 	dd->ipath_intconfig = 0;
- }
- 
-+static struct ipath_message_header *
-+ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-+{
-+	return (struct ipath_message_header *)
-+		&rhf_addr[sizeof(u64) / sizeof(u32)];
-+}
-+
-+static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports)
-+{
-+	dd->ipath_portcnt =
-+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-+	dd->ipath_p0_rcvegrcnt =
-+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-+}
-+
-+static void ipath_ht_read_counters(struct ipath_devdata *dd,
-+				   struct infinipath_counters *cntrs)
-+{
-+	cntrs->LBIntCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
-+	cntrs->LBFlowStallCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
-+	cntrs->TxSDmaDescCnt = 0;
-+	cntrs->TxUnsupVLErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
-+	cntrs->TxDataPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
-+	cntrs->TxFlowPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
-+	cntrs->TxDwordCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
-+	cntrs->TxLenErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
-+	cntrs->TxMaxMinLenErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
-+	cntrs->TxUnderrunCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
-+	cntrs->TxFlowStallCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
-+	cntrs->TxDroppedPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
-+	cntrs->RxDroppedPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
-+	cntrs->RxDataPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
-+	cntrs->RxFlowPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
-+	cntrs->RxDwordCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
-+	cntrs->RxLenErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
-+	cntrs->RxMaxMinLenErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
-+	cntrs->RxICRCErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
-+	cntrs->RxVCRCErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
-+	cntrs->RxFlowCtrlErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
-+	cntrs->RxBadFormatCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
-+	cntrs->RxLinkProblemCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
-+	cntrs->RxEBPCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
-+	cntrs->RxLPCRCErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
-+	cntrs->RxBufOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
-+	cntrs->RxTIDFullErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
-+	cntrs->RxTIDValidErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
-+	cntrs->RxPKeyMismatchCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
-+	cntrs->RxP0HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
-+	cntrs->RxP1HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
-+	cntrs->RxP2HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
-+	cntrs->RxP3HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
-+	cntrs->RxP4HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
-+	cntrs->RxP5HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt));
-+	cntrs->RxP6HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt));
-+	cntrs->RxP7HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt));
-+	cntrs->RxP8HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt));
-+	cntrs->RxP9HdrEgrOvflCnt = 0;
-+	cntrs->RxP10HdrEgrOvflCnt = 0;
-+	cntrs->RxP11HdrEgrOvflCnt = 0;
-+	cntrs->RxP12HdrEgrOvflCnt = 0;
-+	cntrs->RxP13HdrEgrOvflCnt = 0;
-+	cntrs->RxP14HdrEgrOvflCnt = 0;
-+	cntrs->RxP15HdrEgrOvflCnt = 0;
-+	cntrs->RxP16HdrEgrOvflCnt = 0;
-+	cntrs->IBStatusChangeCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
-+	cntrs->IBLinkErrRecoveryCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
-+	cntrs->IBLinkDownedCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
-+	cntrs->IBSymbolErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
-+	cntrs->RxVL15DroppedPktCnt = 0;
-+	cntrs->RxOtherLocalPhyErrCnt = 0;
-+	cntrs->PcieRetryBufDiagQwordCnt = 0;
-+	cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
-+	cntrs->LocalLinkIntegrityErrCnt =
-+		(dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
-+		dd->ipath_lli_errs : dd->ipath_lli_errors;
-+	cntrs->RxVlErrCnt = 0;
-+	cntrs->RxDlidFltrCnt = 0;
-+}
-+
-+
-+/* no interrupt fallback for these chips */
-+static int ipath_ht_nointr_fallback(struct ipath_devdata *dd)
-+{
-+	return 0;
-+}
-+
-+
-+/*
-+ * reset the XGXS (between serdes and IBC).  Slightly less intrusive
-+ * than resetting the IBC or external link state, and useful in some
-+ * cases to cause some retraining.  To do this right, we reset IBC
-+ * as well.
-+ */
-+static void ipath_ht_xgxs_reset(struct ipath_devdata *dd)
-+{
-+	u64 val, prev_val;
-+
-+	prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-+	val = prev_val | INFINIPATH_XGXS_RESET;
-+	prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-+			 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-+			 dd->ipath_control);
-+}
-+
-+
-+static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which)
-+{
-+	int ret;
-+
-+	switch (which) {
-+	case IPATH_IB_CFG_LWID:
-+		ret = dd->ipath_link_width_active;
-+		break;
-+	case IPATH_IB_CFG_SPD:
-+		ret = dd->ipath_link_speed_active;
-+		break;
-+	case IPATH_IB_CFG_LWID_ENB:
-+		ret = dd->ipath_link_width_enabled;
-+		break;
-+	case IPATH_IB_CFG_SPD_ENB:
-+		ret = dd->ipath_link_speed_enabled;
-+		break;
-+	default:
-+		ret =  -ENOTSUPP;
-+		break;
-+	}
-+	return ret;
-+}
-+
-+
-+/* we assume range checking is already done, if needed */
-+static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-+{
-+	int ret = 0;
-+
-+	if (which == IPATH_IB_CFG_LWID_ENB)
-+		dd->ipath_link_width_enabled = val;
-+	else if (which == IPATH_IB_CFG_SPD_ENB)
-+		dd->ipath_link_speed_enabled = val;
-+	else
-+		ret = -ENOTSUPP;
-+	return ret;
-+}
-+
-+
-+static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-+{
-+}
-+
-+
-+static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-+{
-+	ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs),
-+		ipath_ib_linktrstate(dd, ibcs));
-+	return 0;
-+}
-+
-+
- /**
-  * ipath_init_iba6110_funcs - set up the chip-specific function pointers
-  * @dd: the infinipath device
-@@ -1626,22 +1926,19 @@ void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
- 	dd->ipath_f_setextled = ipath_setup_ht_setextled;
- 	dd->ipath_f_get_base_info = ipath_ht_get_base_info;
- 	dd->ipath_f_free_irq = ipath_ht_free_irq;
+-static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+-{
+-	u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
 -
--	/*
--	 * initialize chip-specific variables
--	 */
- 	dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
-+	dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback;
-+	dd->ipath_f_get_msgheader = ipath_ht_get_msgheader;
-+	dd->ipath_f_config_ports = ipath_ht_config_ports;
-+	dd->ipath_f_read_counters = ipath_ht_read_counters;
-+	dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset;
-+	dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg;
-+	dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg;
-+	dd->ipath_f_config_jint = ipath_ht_config_jint;
-+	dd->ipath_f_ib_updown = ipath_ht_ib_updown;
- 
- 	/*
--	 * setup the register offsets, since they are different for each
--	 * chip
--	 */
--	dd->ipath_kregs = &ipath_ht_kregs;
--	dd->ipath_cregs = &ipath_ht_cregs;
+-	*db = (ar >> 14) & 1;
+-	*l = (ar >> 13) & 1;
+-}
 -
--	/*
--	 * do very early init that is needed before ipath_f_bus is
--	 * called
-+	 * initialize chip-specific variables
- 	 */
- 	ipath_init_ht_variables(dd);
- }
-diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
-index 0103d6f..c7a2f50 100644
---- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
-+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
-@@ -145,10 +145,57 @@ struct _infinipath_do_not_use_kernel_regs {
- 	unsigned long long Reserved12;
- };
- 
--#define IPATH_KREG_OFFSET(field) (offsetof(struct \
--    _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-+struct _infinipath_do_not_use_counters {
-+	__u64 LBIntCnt;
-+	__u64 LBFlowStallCnt;
-+	__u64 Reserved1;
-+	__u64 TxUnsupVLErrCnt;
-+	__u64 TxDataPktCnt;
-+	__u64 TxFlowPktCnt;
-+	__u64 TxDwordCnt;
-+	__u64 TxLenErrCnt;
-+	__u64 TxMaxMinLenErrCnt;
-+	__u64 TxUnderrunCnt;
-+	__u64 TxFlowStallCnt;
-+	__u64 TxDroppedPktCnt;
-+	__u64 RxDroppedPktCnt;
-+	__u64 RxDataPktCnt;
-+	__u64 RxFlowPktCnt;
-+	__u64 RxDwordCnt;
-+	__u64 RxLenErrCnt;
-+	__u64 RxMaxMinLenErrCnt;
-+	__u64 RxICRCErrCnt;
-+	__u64 RxVCRCErrCnt;
-+	__u64 RxFlowCtrlErrCnt;
-+	__u64 RxBadFormatCnt;
-+	__u64 RxLinkProblemCnt;
-+	__u64 RxEBPCnt;
-+	__u64 RxLPCRCErrCnt;
-+	__u64 RxBufOvflCnt;
-+	__u64 RxTIDFullErrCnt;
-+	__u64 RxTIDValidErrCnt;
-+	__u64 RxPKeyMismatchCnt;
-+	__u64 RxP0HdrEgrOvflCnt;
-+	__u64 RxP1HdrEgrOvflCnt;
-+	__u64 RxP2HdrEgrOvflCnt;
-+	__u64 RxP3HdrEgrOvflCnt;
-+	__u64 RxP4HdrEgrOvflCnt;
-+	__u64 RxP5HdrEgrOvflCnt;
-+	__u64 RxP6HdrEgrOvflCnt;
-+	__u64 RxP7HdrEgrOvflCnt;
-+	__u64 RxP8HdrEgrOvflCnt;
-+	__u64 Reserved6;
-+	__u64 Reserved7;
-+	__u64 IBStatusChangeCnt;
-+	__u64 IBLinkErrRecoveryCnt;
-+	__u64 IBLinkDownedCnt;
-+	__u64 IBSymbolErrCnt;
-+};
-+
-+#define IPATH_KREG_OFFSET(field) (offsetof( \
-+	struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
- #define IPATH_CREG_OFFSET(field) (offsetof( \
--    struct infinipath_counters, field) / sizeof(u64))
-+	struct _infinipath_do_not_use_counters, field) / sizeof(u64))
- 
- static const struct ipath_kregs ipath_pe_kregs = {
- 	.kr_control = IPATH_KREG_OFFSET(Control),
-@@ -282,6 +329,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
- #define INFINIPATH_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
- #define INFINIPATH_HWE_SERDESPLLFAILED      0x1000000000000000ULL
- 
-+#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
-+#define IBA6120_IBCS_LINKSTATE_SHIFT 4
-+
- /* kr_extstatus bits */
- #define INFINIPATH_EXTS_FREQSEL 0x2
- #define INFINIPATH_EXTS_SERDESSEL 0x4
-@@ -296,6 +346,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
- #define IPATH_GPIO_SCL (1ULL << \
- 	(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
- 
-+#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-+#define INFINIPATH_R_TAILUPD_SHIFT 31
-+
- /* 6120 specific hardware errors... */
- static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
- 	INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
-@@ -320,10 +373,28 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
- 		        INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
- 		        << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
- 
--static int ipath_pe_txe_recover(struct ipath_devdata *);
- static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
- 			       u32, unsigned long);
- 
-+/*
-+ * On platforms using this chip, and not having ordered WC stores, we
-+ * can get TXE parity errors due to speculative reads to the PIO buffers,
-+ * and this, due to a chip bug can result in (many) false parity error
-+ * reports.  So it's a debug print on those, and an info print on systems
-+ * where the speculative reads don't occur.
-+ */
-+static void ipath_pe_txe_recover(struct ipath_devdata *dd)
-+{
-+	if (ipath_unordered_wc())
-+		ipath_dbg("Recovering from TXE PIO parity error\n");
-+	else {
-+		++ipath_stats.sps_txeparity;
-+		dev_info(&dd->pcidev->dev,
-+			"Recovering from TXE PIO parity error\n");
-+	}
-+}
-+
-+
- /**
-  * ipath_pe_handle_hwerrors - display hardware errors.
-  * @dd: the infinipath device
-@@ -403,35 +474,11 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- 		 * occur if a processor speculative read is done to the PIO
- 		 * buffer while we are sending a packet, for example.
- 		 */
--		if ((hwerrs & TXE_PIO_PARITY) && ipath_pe_txe_recover(dd))
-+		if (hwerrs & TXE_PIO_PARITY) {
-+			ipath_pe_txe_recover(dd);
- 			hwerrs &= ~TXE_PIO_PARITY;
--		if (hwerrs) {
--			/*
--			 * if any set that we aren't ignoring only make the
--			 * complaint once, in case it's stuck or recurring,
--			 * and we get here multiple times
--			 * Force link down, so switch knows, and
--			 * LEDs are turned off
--			 */
--			if (dd->ipath_flags & IPATH_INITTED) {
--				ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
--				ipath_setup_pe_setextled(dd,
--					INFINIPATH_IBCS_L_STATE_DOWN,
--					INFINIPATH_IBCS_LT_STATE_DISABLED);
--				ipath_dev_err(dd, "Fatal Hardware Error (freeze "
--					      "mode), no longer usable, SN %.16s\n",
--						  dd->ipath_serial);
--				isfatal = 1;
--			}
--			/*
--			 * Mark as having had an error for driver, and also
--			 * for /sys and status word mapped to user programs.
--			 * This marks unit as not usable, until reset
--			 */
--			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
--			*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
--			dd->ipath_flags &= ~IPATH_INITTED;
--		} else {
-+		}
-+		if (!hwerrs) {
- 			static u32 freeze_cnt;
- 
- 			freeze_cnt++;
-@@ -485,7 +532,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- 
- 	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
- 		/*
--		 * If it occurs, it is left masked since the eternal
-+		 * If it occurs, it is left masked since the external
- 		 * interface is unused
- 		 */
- 		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-@@ -563,6 +610,14 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
- 			dd->ipath_f_put_tid = ipath_pe_put_tid_2;
- 	}
- 
-+
-+	/*
-+	 * set here, not in ipath_init_*_funcs because we have to do
-+	 * it after we can read chip registers.
-+	 */
-+	dd->ipath_ureg_align =
-+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-+
- 	return ret;
- }
- 
-@@ -667,17 +722,8 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
- 
- 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- 	prev_val = val;
--	if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
--	     INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
--		val &=
--			~(INFINIPATH_XGXS_MDIOADDR_MASK <<
--			  INFINIPATH_XGXS_MDIOADDR_SHIFT);
--		/* MDIO address 3 */
--		val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
--	}
--	if (val & INFINIPATH_XGXS_RESET) {
-+	if (val & INFINIPATH_XGXS_RESET)
- 		val &= ~INFINIPATH_XGXS_RESET;
+-static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+-{
+-	dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
+-	dt->base = vmcs_readl(GUEST_IDTR_BASE);
+-}
+-
+-static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+-{
+-	vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
+-	vmcs_writel(GUEST_IDTR_BASE, dt->base);
+-}
+-
+-static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+-{
+-	dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
+-	dt->base = vmcs_readl(GUEST_GDTR_BASE);
+-}
+-
+-static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+-{
+-	vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
+-	vmcs_writel(GUEST_GDTR_BASE, dt->base);
+-}
+-
+-static int init_rmode_tss(struct kvm* kvm)
+-{
+-	struct page *p1, *p2, *p3;
+-	gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
+-	char *page;
+-
+-	p1 = gfn_to_page(kvm, fn++);
+-	p2 = gfn_to_page(kvm, fn++);
+-	p3 = gfn_to_page(kvm, fn);
+-
+-	if (!p1 || !p2 || !p3) {
+-		kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
+-		return 0;
 -	}
- 	if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
- 	     INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
- 		/* need to compensate for Tx inversion in partner */
-@@ -707,21 +753,6 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
- 		   (unsigned long long)
- 		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
- 
--	if (!ipath_waitfor_mdio_cmdready(dd)) {
--		ipath_write_kreg(
--			dd, dd->ipath_kregs->kr_mdio,
--			ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
--				       IPATH_MDIO_CTRL_XGXS_REG_8, 0));
--		if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
--					   IPATH_MDIO_DATAVALID, &val))
--			ipath_dbg("Never got MDIO data for XGXS "
--				  "status read\n");
--		else
--			ipath_cdbg(VERBOSE, "MDIO Read reg8, "
--				   "'bank' 31 %x\n", (u32) val);
--	} else
--		ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
 -
- 	return ret;
- }
- 
-@@ -902,12 +933,27 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
- 	else
- 		ipath_dev_err(dd, "Can't find PCI Express "
- 			      "capability!\n");
-+
-+	dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
-+	dd->ipath_link_speed_supported = IPATH_IB_SDR;
-+	dd->ipath_link_width_enabled = IB_WIDTH_4X;
-+	dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
-+	/* these can't change for this chip, so set once */
-+	dd->ipath_link_width_active = dd->ipath_link_width_enabled;
-+	dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
- 	return 0;
- }
- 
- static void ipath_init_pe_variables(struct ipath_devdata *dd)
- {
- 	/*
-+	 * setup the register offsets, since they are different for each
-+	 * chip
-+	 */
-+	dd->ipath_kregs = &ipath_pe_kregs;
-+	dd->ipath_cregs = &ipath_pe_cregs;
-+
-+	/*
- 	 * bits for selecting i2c direction and values,
- 	 * used for I2C serial flash
- 	 */
-@@ -916,6 +962,43 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
- 	dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- 	dd->ipath_gpio_scl = IPATH_GPIO_SCL;
- 
-+	/*
-+	 * Fill in data for field-values that change in newer chips.
-+	 * We dynamically specify only the mask for LINKTRAININGSTATE
-+	 * and only the shift for LINKSTATE, as they are the only ones
-+	 * that change.  Also precalculate the 3 link states of interest
-+	 * and the combined mask.
-+	 */
-+	dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT;
-+	dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK;
-+	dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
-+		dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
-+	dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-+		(INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
-+	dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-+		(INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
-+	dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-+		INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-+		(INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-+
-+	/*
-+	 * Fill in data for ibcc field-values that change in newer chips.
-+	 * We dynamically specify only the mask for LINKINITCMD
-+	 * and only the shift for LINKCMD and MAXPKTLEN, as they are
-+	 * the only ones that change.
-+	 */
-+	dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
-+	dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
-+	dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-+
-+	/* Fill in shifts for RcvCtrl. */
-+	dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
-+	dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
-+	dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
-+	dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */
-+
- 	/* variables for sanity checking interrupt and errors */
- 	dd->ipath_hwe_bitsextant =
- 		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-@@ -963,6 +1046,8 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
- 
- 	dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- 	dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-+	dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
-+	dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
- 
- 	/*
- 	 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
-@@ -984,6 +1069,7 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
- 		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
- 
- 
-+	dd->delay_mult = 2; /* SDR, 4X, can't change */
- }
- 
- /* setup the MSI stuff again after a reset.  I'd like to just call
-@@ -1289,6 +1375,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
- 	 */
- 	dd->ipath_rcvhdrentsize = 24;
- 	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-+	dd->ipath_rhf_offset = 0;
-+	dd->ipath_egrtidbase = (u64 __iomem *)
-+		((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
- 
- 	/*
- 	 * To truly support a 4KB MTU (for usermode), we need to
-@@ -1359,34 +1448,204 @@ static void ipath_pe_free_irq(struct ipath_devdata *dd)
- 	dd->ipath_irq = 0;
- }
- 
-+
-+static struct ipath_message_header *
-+ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-+{
-+	return (struct ipath_message_header *)
-+		&rhf_addr[sizeof(u64) / sizeof(u32)];
-+}
-+
-+static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports)
-+{
-+	dd->ipath_portcnt =
-+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-+	dd->ipath_p0_rcvegrcnt =
-+		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-+}
-+
-+static void ipath_pe_read_counters(struct ipath_devdata *dd,
-+				   struct infinipath_counters *cntrs)
-+{
-+	cntrs->LBIntCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
-+	cntrs->LBFlowStallCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
-+	cntrs->TxSDmaDescCnt = 0;
-+	cntrs->TxUnsupVLErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
-+	cntrs->TxDataPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
-+	cntrs->TxFlowPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
-+	cntrs->TxDwordCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
-+	cntrs->TxLenErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
-+	cntrs->TxMaxMinLenErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
-+	cntrs->TxUnderrunCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
-+	cntrs->TxFlowStallCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
-+	cntrs->TxDroppedPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
-+	cntrs->RxDroppedPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
-+	cntrs->RxDataPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
-+	cntrs->RxFlowPktCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
-+	cntrs->RxDwordCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
-+	cntrs->RxLenErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
-+	cntrs->RxMaxMinLenErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
-+	cntrs->RxICRCErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
-+	cntrs->RxVCRCErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
-+	cntrs->RxFlowCtrlErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
-+	cntrs->RxBadFormatCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
-+	cntrs->RxLinkProblemCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
-+	cntrs->RxEBPCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
-+	cntrs->RxLPCRCErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
-+	cntrs->RxBufOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
-+	cntrs->RxTIDFullErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
-+	cntrs->RxTIDValidErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
-+	cntrs->RxPKeyMismatchCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
-+	cntrs->RxP0HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
-+	cntrs->RxP1HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
-+	cntrs->RxP2HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
-+	cntrs->RxP3HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
-+	cntrs->RxP4HdrEgrOvflCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
-+	cntrs->RxP5HdrEgrOvflCnt = 0;
-+	cntrs->RxP6HdrEgrOvflCnt = 0;
-+	cntrs->RxP7HdrEgrOvflCnt = 0;
-+	cntrs->RxP8HdrEgrOvflCnt = 0;
-+	cntrs->RxP9HdrEgrOvflCnt = 0;
-+	cntrs->RxP10HdrEgrOvflCnt = 0;
-+	cntrs->RxP11HdrEgrOvflCnt = 0;
-+	cntrs->RxP12HdrEgrOvflCnt = 0;
-+	cntrs->RxP13HdrEgrOvflCnt = 0;
-+	cntrs->RxP14HdrEgrOvflCnt = 0;
-+	cntrs->RxP15HdrEgrOvflCnt = 0;
-+	cntrs->RxP16HdrEgrOvflCnt = 0;
-+	cntrs->IBStatusChangeCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
-+	cntrs->IBLinkErrRecoveryCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
-+	cntrs->IBLinkDownedCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
-+	cntrs->IBSymbolErrCnt =
-+		ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
-+	cntrs->RxVL15DroppedPktCnt = 0;
-+	cntrs->RxOtherLocalPhyErrCnt = 0;
-+	cntrs->PcieRetryBufDiagQwordCnt = 0;
-+	cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
-+	cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs;
-+	cntrs->RxVlErrCnt = 0;
-+	cntrs->RxDlidFltrCnt = 0;
-+}
-+
-+
-+/* no interrupt fallback for these chips */
-+static int ipath_pe_nointr_fallback(struct ipath_devdata *dd)
-+{
-+	return 0;
-+}
-+
-+
- /*
-- * On platforms using this chip, and not having ordered WC stores, we
-- * can get TXE parity errors due to speculative reads to the PIO buffers,
-- * and this, due to a chip bug can result in (many) false parity error
-- * reports.  So it's a debug print on those, and an info print on systems
-- * where the speculative reads don't occur.
-- * Because we can get lots of false errors, we have no upper limit
-- * on recovery attempts on those platforms.
-+ * reset the XGXS (between serdes and IBC).  Slightly less intrusive
-+ * than resetting the IBC or external link state, and useful in some
-+ * cases to cause some retraining.  To do this right, we reset IBC
-+ * as well.
-  */
--static int ipath_pe_txe_recover(struct ipath_devdata *dd)
-+static void ipath_pe_xgxs_reset(struct ipath_devdata *dd)
- {
--	if (ipath_unordered_wc())
--		ipath_dbg("Recovering from TXE PIO parity error\n");
--	else {
--		int cnt = ++ipath_stats.sps_txeparity;
--		if (cnt >= IPATH_MAX_PARITY_ATTEMPTS)  {
--			if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
--				ipath_dev_err(dd,
--					"Too many attempts to recover from "
--					"TXE parity, giving up\n");
--			return 0;
--		}
--		dev_info(&dd->pcidev->dev,
--			"Recovering from TXE PIO parity error\n");
-+	u64 val, prev_val;
-+
-+	prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-+	val = prev_val | INFINIPATH_XGXS_RESET;
-+	prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-+			 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-+	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-+			 dd->ipath_control);
-+}
-+
-+
-+static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which)
-+{
-+	int ret;
-+
-+	switch (which) {
-+	case IPATH_IB_CFG_LWID:
-+		ret = dd->ipath_link_width_active;
-+		break;
-+	case IPATH_IB_CFG_SPD:
-+		ret = dd->ipath_link_speed_active;
-+		break;
-+	case IPATH_IB_CFG_LWID_ENB:
-+		ret = dd->ipath_link_width_enabled;
-+		break;
-+	case IPATH_IB_CFG_SPD_ENB:
-+		ret = dd->ipath_link_speed_enabled;
-+		break;
-+	default:
-+		ret =  -ENOTSUPP;
-+		break;
- 	}
+-	page = kmap_atomic(p1, KM_USER0);
+-	clear_page(page);
+-	*(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
+-	kunmap_atomic(page, KM_USER0);
+-
+-	page = kmap_atomic(p2, KM_USER0);
+-	clear_page(page);
+-	kunmap_atomic(page, KM_USER0);
+-
+-	page = kmap_atomic(p3, KM_USER0);
+-	clear_page(page);
+-	*(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
+-	kunmap_atomic(page, KM_USER0);
+-
 -	return 1;
-+	return ret;
-+}
-+
-+
-+/* we assume range checking is already done, if needed */
-+static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-+{
-+	int ret = 0;
-+
-+	if (which == IPATH_IB_CFG_LWID_ENB)
-+		dd->ipath_link_width_enabled = val;
-+	else if (which == IPATH_IB_CFG_SPD_ENB)
-+		dd->ipath_link_speed_enabled = val;
-+	else
-+		ret = -ENOTSUPP;
-+	return ret;
- }
- 
-+static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-+{
-+}
-+
-+
-+static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-+{
-+	ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs),
-+		ipath_ib_linktrstate(dd, ibcs));
-+	return 0;
-+}
-+
-+
- /**
-  * ipath_init_iba6120_funcs - set up the chip-specific function pointers
-  * @dd: the infinipath device
-@@ -1407,7 +1666,7 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
- 	dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
- 	dd->ipath_f_clear_tids = ipath_pe_clear_tids;
- 	/*
--	 * this may get changed after we read the chip revision,
-+	 * _f_put_tid may get changed after we read the chip revision,
- 	 * but we start with the safe version for all revs
- 	 */
- 	dd->ipath_f_put_tid = ipath_pe_put_tid;
-@@ -1415,17 +1674,19 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
- 	dd->ipath_f_setextled = ipath_setup_pe_setextled;
- 	dd->ipath_f_get_base_info = ipath_pe_get_base_info;
- 	dd->ipath_f_free_irq = ipath_pe_free_irq;
+-}
+-
+-static void seg_setup(int seg)
+-{
+-	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+-
+-	vmcs_write16(sf->selector, 0);
+-	vmcs_writel(sf->base, 0);
+-	vmcs_write32(sf->limit, 0xffff);
+-	vmcs_write32(sf->ar_bytes, 0x93);
+-}
+-
+-/*
+- * Sets up the vmcs for emulated real mode.
+- */
+-static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
+-{
+-	u32 host_sysenter_cs;
+-	u32 junk;
+-	unsigned long a;
+-	struct descriptor_table dt;
+-	int i;
+-	int ret = 0;
+-	unsigned long kvm_vmx_return;
+-	u64 msr;
+-	u32 exec_control;
+-
+-	if (!init_rmode_tss(vmx->vcpu.kvm)) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+-	vmx->vcpu.rmode.active = 0;
+-
+-	vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+-	set_cr8(&vmx->vcpu, 0);
+-	msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+-	if (vmx->vcpu.vcpu_id == 0)
+-		msr |= MSR_IA32_APICBASE_BSP;
+-	kvm_set_apic_base(&vmx->vcpu, msr);
+-
+-	fx_init(&vmx->vcpu);
 -
--	/* initialize chip-specific variables */
- 	dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
-+	dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback;
-+	dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset;
-+	dd->ipath_f_get_msgheader = ipath_pe_get_msgheader;
-+	dd->ipath_f_config_ports = ipath_pe_config_ports;
-+	dd->ipath_f_read_counters = ipath_pe_read_counters;
-+	dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg;
-+	dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg;
-+	dd->ipath_f_config_jint = ipath_pe_config_jint;
-+	dd->ipath_f_ib_updown = ipath_pe_ib_updown;
- 
 -	/*
--	 * setup the register offsets, since they are different for each
--	 * chip
+-	 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
+-	 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
 -	 */
--	dd->ipath_kregs = &ipath_pe_kregs;
--	dd->ipath_cregs = &ipath_pe_cregs;
- 
-+	/* initialize chip-specific variables */
- 	ipath_init_pe_variables(dd);
- }
- 
-diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
-index 9dd0bac..4471674 100644
---- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
-+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
-@@ -91,7 +91,7 @@ static int create_port0_egr(struct ipath_devdata *dd)
- 	struct ipath_skbinfo *skbinfo;
- 	int ret;
- 
--	egrcnt = dd->ipath_rcvegrcnt;
-+	egrcnt = dd->ipath_p0_rcvegrcnt;
- 
- 	skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
- 	if (skbinfo == NULL) {
-@@ -244,8 +244,7 @@ static int init_chip_first(struct ipath_devdata *dd,
- 	 * cfgports.  We do still check and report a difference, if
- 	 * not same (should be impossible).
- 	 */
--	dd->ipath_portcnt =
--		ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-+	dd->ipath_f_config_ports(dd, ipath_cfgports);
- 	if (!ipath_cfgports)
- 		dd->ipath_cfgports = dd->ipath_portcnt;
- 	else if (ipath_cfgports <= dd->ipath_portcnt) {
-@@ -272,22 +271,7 @@ static int init_chip_first(struct ipath_devdata *dd,
- 		goto done;
- 	}
- 
--	dd->ipath_lastegrheads = kzalloc(sizeof(*dd->ipath_lastegrheads)
--					 * dd->ipath_cfgports,
--					 GFP_KERNEL);
--	dd->ipath_lastrcvhdrqtails =
--		kzalloc(sizeof(*dd->ipath_lastrcvhdrqtails)
--			* dd->ipath_cfgports, GFP_KERNEL);
+-	if (vmx->vcpu.vcpu_id == 0) {
+-		vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
+-		vmcs_writel(GUEST_CS_BASE, 0x000f0000);
+-	} else {
+-		vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8);
+-		vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12);
+-	}
+-	vmcs_write32(GUEST_CS_LIMIT, 0xffff);
+-	vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
 -
--	if (!dd->ipath_lastegrheads || !dd->ipath_lastrcvhdrqtails) {
--		ipath_dev_err(dd, "Unable to allocate head arrays, "
--			      "failing\n");
--		ret = -ENOMEM;
--		goto done;
+-	seg_setup(VCPU_SREG_DS);
+-	seg_setup(VCPU_SREG_ES);
+-	seg_setup(VCPU_SREG_FS);
+-	seg_setup(VCPU_SREG_GS);
+-	seg_setup(VCPU_SREG_SS);
+-
+-	vmcs_write16(GUEST_TR_SELECTOR, 0);
+-	vmcs_writel(GUEST_TR_BASE, 0);
+-	vmcs_write32(GUEST_TR_LIMIT, 0xffff);
+-	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
+-
+-	vmcs_write16(GUEST_LDTR_SELECTOR, 0);
+-	vmcs_writel(GUEST_LDTR_BASE, 0);
+-	vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
+-	vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
+-
+-	vmcs_write32(GUEST_SYSENTER_CS, 0);
+-	vmcs_writel(GUEST_SYSENTER_ESP, 0);
+-	vmcs_writel(GUEST_SYSENTER_EIP, 0);
+-
+-	vmcs_writel(GUEST_RFLAGS, 0x02);
+-	if (vmx->vcpu.vcpu_id == 0)
+-		vmcs_writel(GUEST_RIP, 0xfff0);
+-	else
+-		vmcs_writel(GUEST_RIP, 0);
+-	vmcs_writel(GUEST_RSP, 0);
+-
+-	//todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
+-	vmcs_writel(GUEST_DR7, 0x400);
+-
+-	vmcs_writel(GUEST_GDTR_BASE, 0);
+-	vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
+-
+-	vmcs_writel(GUEST_IDTR_BASE, 0);
+-	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
+-
+-	vmcs_write32(GUEST_ACTIVITY_STATE, 0);
+-	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
+-	vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
+-
+-	/* I/O */
+-	vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
+-	vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
+-
+-	guest_write_tsc(0);
+-
+-	vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
+-
+-	/* Special registers */
+-	vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+-
+-	/* Control */
+-	vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
+-		vmcs_config.pin_based_exec_ctrl);
+-
+-	exec_control = vmcs_config.cpu_based_exec_ctrl;
+-	if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
+-		exec_control &= ~CPU_BASED_TPR_SHADOW;
+-#ifdef CONFIG_X86_64
+-		exec_control |= CPU_BASED_CR8_STORE_EXITING |
+-				CPU_BASED_CR8_LOAD_EXITING;
+-#endif
 -	}
+-	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
 -
- 	pd = create_portdata0(dd);
+-	vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
+-	vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
+-	vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
+-
+-	vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
+-	vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
+-	vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
+-
+-	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+-	vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+-	vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+-	vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
+-	vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
+-	vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+-#ifdef CONFIG_X86_64
+-	rdmsrl(MSR_FS_BASE, a);
+-	vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
+-	rdmsrl(MSR_GS_BASE, a);
+-	vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
+-#else
+-	vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
+-	vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
+-#endif
 -
- 	if (!pd) {
- 		ipath_dev_err(dd, "Unable to allocate portdata for port "
- 			      "0, failing\n");
-@@ -345,10 +329,10 @@ static int init_chip_first(struct ipath_devdata *dd,
- 		       dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
- 
- 	spin_lock_init(&dd->ipath_tid_lock);
+-	vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
 -
-+	spin_lock_init(&dd->ipath_sendctrl_lock);
- 	spin_lock_init(&dd->ipath_gpio_lock);
- 	spin_lock_init(&dd->ipath_eep_st_lock);
--	sema_init(&dd->ipath_eep_sem, 1);
-+	mutex_init(&dd->ipath_eep_lock);
- 
- done:
- 	*pdp = pd;
-@@ -372,9 +356,9 @@ static int init_chip_reset(struct ipath_devdata *dd,
- 	*pdp = dd->ipath_pd[0];
- 	/* ensure chip does no sends or receives while we re-initialize */
- 	dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
--	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
--	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0);
--	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
- 
- 	rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
- 	if (dd->ipath_portcnt != rtmp)
-@@ -487,6 +471,7 @@ static void enable_chip(struct ipath_devdata *dd,
- 			struct ipath_portdata *pd, int reinit)
- {
- 	u32 val;
-+	unsigned long flags;
- 	int i;
- 
- 	if (!reinit)
-@@ -495,19 +480,21 @@ static void enable_chip(struct ipath_devdata *dd,
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- 			 dd->ipath_rcvctrl);
- 
-+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- 	/* Enable PIO send, and update of PIOavail regs to memory. */
- 	dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
- 		INFINIPATH_S_PIOBUFAVAILUPD;
--	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
--			 dd->ipath_sendctrl);
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- 
- 	/*
- 	 * enable port 0 receive, and receive interrupt.  other ports
- 	 * done as user opens and inits them.
- 	 */
--	dd->ipath_rcvctrl = INFINIPATH_R_TAILUPD |
--		(1ULL << INFINIPATH_R_PORTENABLE_SHIFT) |
--		(1ULL << INFINIPATH_R_INTRAVAIL_SHIFT);
-+	dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) |
-+		(1ULL << dd->ipath_r_portenable_shift) |
-+		(1ULL << dd->ipath_r_intravail_shift);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- 			 dd->ipath_rcvctrl);
- 
-@@ -523,12 +510,11 @@ static void enable_chip(struct ipath_devdata *dd,
- 	 */
- 	val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
- 	(void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
--	dd->ipath_port0head = ipath_read_ureg32(dd, ur_rcvhdrtail, 0);
- 
- 	/* Initialize so we interrupt on next packet received */
- 	(void)ipath_write_ureg(dd, ur_rcvhdrhead,
- 			       dd->ipath_rhdrhead_intr_off |
--			       dd->ipath_port0head, 0);
-+			       dd->ipath_pd[0]->port_head, 0);
- 
- 	/*
- 	 * by now pioavail updates to memory should have occurred, so
-@@ -542,12 +528,8 @@ static void enable_chip(struct ipath_devdata *dd,
- 		/*
- 		 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
- 		 */
--		if (i > 3) {
--			if (i & 1)
--				val = dd->ipath_pioavailregs_dma[i - 1];
--			else
--				val = dd->ipath_pioavailregs_dma[i + 1];
--		}
-+		if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-+			val = dd->ipath_pioavailregs_dma[i ^ 1];
- 		else
- 			val = dd->ipath_pioavailregs_dma[i];
- 		dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
-@@ -690,12 +672,13 @@ done:
-  */
- int ipath_init_chip(struct ipath_devdata *dd, int reinit)
- {
--	int ret = 0, i;
-+	int ret = 0;
- 	u32 val32, kpiobufs;
- 	u32 piobufs, uports;
- 	u64 val;
- 	struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
- 	gfp_t gfp_flags = GFP_USER | __GFP_COMP;
-+	unsigned long flags;
- 
- 	ret = init_housekeeping(dd, &pd, reinit);
- 	if (ret)
-@@ -746,7 +729,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
- 		kpiobufs = ipath_kpiobufs;
- 
- 	if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
--		i = (int) piobufs -
-+		int i = (int) piobufs -
- 			(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
- 		if (i < 0)
- 			i = 0;
-@@ -827,8 +810,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- 			 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
--	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
--			 INFINIPATH_S_PIOENABLE);
-+
-+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-+	dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
-+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- 
- 	/*
- 	 * before error clears, since we expect serdes pll errors during
-diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
-index c61f9da..92e58c9 100644
---- a/drivers/infiniband/hw/ipath/ipath_intr.c
-+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
-@@ -683,7 +683,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
- 		for (i = 0; i < dd->ipath_cfgports; i++) {
- 			struct ipath_portdata *pd = dd->ipath_pd[i];
- 			if (i == 0) {
--				hd = dd->ipath_port0head;
-+				hd = pd->port_head;
- 				tl = (u32) le64_to_cpu(
- 					*dd->ipath_hdrqtailptr);
- 			} else if (pd && pd->port_cnt &&
-@@ -693,7 +693,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
- 				 * except kernel
- 				 */
- 				tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
--				if (tl == dd->ipath_lastrcvhdrqtails[i])
-+				if (tl == pd->port_lastrcvhdrqtail)
- 					continue;
- 				hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
- 						       i);
-@@ -703,7 +703,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
- 			    (!hd && tl == dd->ipath_hdrqlast)) {
- 				if (i == 0)
- 					chkerrpkts = 1;
--				dd->ipath_lastrcvhdrqtails[i] = tl;
-+				pd->port_lastrcvhdrqtail = tl;
- 				pd->port_hdrqfull++;
- 				/* flush hdrqfull so that poll() sees it */
- 				wmb();
-@@ -712,6 +712,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
- 		}
- 	}
- 	if (errs & INFINIPATH_E_RRCVEGRFULL) {
-+		struct ipath_portdata *pd = dd->ipath_pd[0];
-+
- 		/*
- 		 * since this is of less importance and not likely to
- 		 * happen without also getting hdrfull, only count
-@@ -719,7 +721,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
- 		 * vs user)
- 		 */
- 		ipath_stats.sps_etidfull++;
--		if (dd->ipath_port0head !=
-+		if (pd->port_head !=
- 		    (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
- 			chkerrpkts = 1;
- 	}
-@@ -795,6 +797,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
- {
- 	int i, im;
- 	__le64 val;
-+	unsigned long flags;
- 
- 	/* disable error interrupts, to avoid confusion */
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
-@@ -813,11 +816,14 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
- 			 dd->ipath_control);
- 
- 	/* ensure pio avail updates continue */
-+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- 		 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
- 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
--		 dd->ipath_sendctrl);
-+			 dd->ipath_sendctrl);
-+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- 
- 	/*
- 	 * We just enabled pioavailupdate, so dma copy is almost certainly
-@@ -825,8 +831,8 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
- 	 */
- 	for (i = 0; i < dd->ipath_pioavregs; i++) {
- 		/* deal with 6110 chip bug */
--		im = i > 3 ? ((i&1) ? i-1 : i+1) : i;
--		val = ipath_read_kreg64(dd, (0x1000/sizeof(u64))+im);
-+		im = i > 3 ? i ^ 1 : i;
-+		val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
- 		dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
- 			= le64_to_cpu(val);
- 	}
-@@ -849,7 +855,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
- 
- /* this is separate to allow for better optimization of ipath_intr() */
- 
--static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
-+static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
- {
- 	/*
- 	 * sometimes happen during driver init and unload, don't want
-@@ -877,7 +883,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
- 				dd->ipath_f_free_irq(dd);
- 			}
- 		}
--		if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) {
-+		if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
- 			ipath_dev_err(dd, "%u unexpected interrupts, "
- 				      "disabling interrupts completely\n",
- 				      *unexpectp);
-@@ -892,7 +898,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
- 			  "ignoring\n");
- }
- 
--static void ipath_bad_regread(struct ipath_devdata *dd)
-+static noinline void ipath_bad_regread(struct ipath_devdata *dd)
- {
- 	static int allbits;
- 
-@@ -920,31 +926,9 @@ static void ipath_bad_regread(struct ipath_devdata *dd)
- 	}
- }
- 
--static void handle_port_pioavail(struct ipath_devdata *dd)
+-	get_idt(&dt);
+-	vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
+-
+-	asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+-	vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
+-	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+-	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+-	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+-
+-	rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
+-	vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
+-	rdmsrl(MSR_IA32_SYSENTER_ESP, a);
+-	vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
+-	rdmsrl(MSR_IA32_SYSENTER_EIP, a);
+-	vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
+-
+-	for (i = 0; i < NR_VMX_MSR; ++i) {
+-		u32 index = vmx_msr_index[i];
+-		u32 data_low, data_high;
+-		u64 data;
+-		int j = vmx->nmsrs;
+-
+-		if (rdmsr_safe(index, &data_low, &data_high) < 0)
+-			continue;
+-		if (wrmsr_safe(index, data_low, data_high) < 0)
+-			continue;
+-		data = data_low | ((u64)data_high << 32);
+-		vmx->host_msrs[j].index = index;
+-		vmx->host_msrs[j].reserved = 0;
+-		vmx->host_msrs[j].data = data;
+-		vmx->guest_msrs[j] = vmx->host_msrs[j];
+-		++vmx->nmsrs;
+-	}
+-
+-	setup_msrs(vmx);
+-
+-	vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
+-
+-	/* 22.2.1, 20.8.1 */
+-	vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
+-
+-	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
+-
+-#ifdef CONFIG_X86_64
+-	vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
+-	if (vm_need_tpr_shadow(vmx->vcpu.kvm))
+-		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+-			     page_to_phys(vmx->vcpu.apic->regs_page));
+-	vmcs_write32(TPR_THRESHOLD, 0);
+-#endif
+-
+-	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
+-	vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
+-
+-	vmx->vcpu.cr0 = 0x60000010;
+-	vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
+-	vmx_set_cr4(&vmx->vcpu, 0);
+-#ifdef CONFIG_X86_64
+-	vmx_set_efer(&vmx->vcpu, 0);
+-#endif
+-	vmx_fpu_activate(&vmx->vcpu);
+-	update_exception_bitmap(&vmx->vcpu);
+-
+-	return 0;
+-
+-out:
+-	return ret;
+-}
+-
+-static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+-	vmx_vcpu_setup(vmx);
+-}
+-
+-static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
+-{
+-	u16 ent[2];
+-	u16 cs;
+-	u16 ip;
+-	unsigned long flags;
+-	unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
+-	u16 sp =  vmcs_readl(GUEST_RSP);
+-	u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
+-
+-	if (sp > ss_limit || sp < 6 ) {
+-		vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
+-			    __FUNCTION__,
+-			    vmcs_readl(GUEST_RSP),
+-			    vmcs_readl(GUEST_SS_BASE),
+-			    vmcs_read32(GUEST_SS_LIMIT));
+-		return;
+-	}
+-
+-	if (emulator_read_std(irq * sizeof(ent), &ent, sizeof(ent), vcpu) !=
+-							X86EMUL_CONTINUE) {
+-		vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
+-		return;
+-	}
+-
+-	flags =  vmcs_readl(GUEST_RFLAGS);
+-	cs =  vmcs_readl(GUEST_CS_BASE) >> 4;
+-	ip =  vmcs_readl(GUEST_RIP);
+-
+-
+-	if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
+-	    emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
+-	    emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
+-		vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
+-		return;
+-	}
+-
+-	vmcs_writel(GUEST_RFLAGS, flags &
+-		    ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
+-	vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
+-	vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
+-	vmcs_writel(GUEST_RIP, ent[0]);
+-	vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
+-}
+-
+-static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
+-{
+-	if (vcpu->rmode.active) {
+-		inject_rmode_irq(vcpu, irq);
+-		return;
+-	}
+-	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+-			irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
+-}
+-
+-static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
+-{
+-	int word_index = __ffs(vcpu->irq_summary);
+-	int bit_index = __ffs(vcpu->irq_pending[word_index]);
+-	int irq = word_index * BITS_PER_LONG + bit_index;
+-
+-	clear_bit(bit_index, &vcpu->irq_pending[word_index]);
+-	if (!vcpu->irq_pending[word_index])
+-		clear_bit(word_index, &vcpu->irq_summary);
+-	vmx_inject_irq(vcpu, irq);
+-}
+-
+-
+-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+-				       struct kvm_run *kvm_run)
 -{
--	u32 i;
+-	u32 cpu_based_vm_exec_control;
+-
+-	vcpu->interrupt_window_open =
+-		((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+-		 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
+-
+-	if (vcpu->interrupt_window_open &&
+-	    vcpu->irq_summary &&
+-	    !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
+-		/*
+-		 * If interrupts enabled, and not blocked by sti or mov ss. Good.
+-		 */
+-		kvm_do_inject_irq(vcpu);
+-
+-	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+-	if (!vcpu->interrupt_window_open &&
+-	    (vcpu->irq_summary || kvm_run->request_interrupt_window))
+-		/*
+-		 * Interrupts blocked.  Wait for unblock.
+-		 */
+-		cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
+-	else
+-		cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+-	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+-}
+-
+-static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
+-{
+-	struct kvm_guest_debug *dbg = &vcpu->guest_debug;
+-
+-	set_debugreg(dbg->bp[0], 0);
+-	set_debugreg(dbg->bp[1], 1);
+-	set_debugreg(dbg->bp[2], 2);
+-	set_debugreg(dbg->bp[3], 3);
+-
+-	if (dbg->singlestep) {
+-		unsigned long flags;
+-
+-		flags = vmcs_readl(GUEST_RFLAGS);
+-		flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
+-		vmcs_writel(GUEST_RFLAGS, flags);
+-	}
+-}
+-
+-static int handle_rmode_exception(struct kvm_vcpu *vcpu,
+-				  int vec, u32 err_code)
+-{
+-	if (!vcpu->rmode.active)
+-		return 0;
+-
 -	/*
--	 * start from port 1, since for now port 0  is never using
--	 * wait_event for PIO
+-	 * Instruction with address size override prefix opcode 0x67
+-	 * Cause the #SS fault with 0 error code in VM86 mode.
 -	 */
--	for (i = 1; dd->ipath_portpiowait && i < dd->ipath_cfgports; i++) {
--		struct ipath_portdata *pd = dd->ipath_pd[i];
+-	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
+-		if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
+-			return 1;
+-	return 0;
+-}
 -
--		if (pd && pd->port_cnt &&
--		    dd->ipath_portpiowait & (1U << i)) {
--			clear_bit(i, &dd->ipath_portpiowait);
--			if (test_bit(IPATH_PORT_WAITING_PIO,
--				     &pd->port_flag)) {
--				clear_bit(IPATH_PORT_WAITING_PIO,
--					  &pd->port_flag);
--				wake_up_interruptible(&pd->port_wait);
--			}
+-static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	u32 intr_info, error_code;
+-	unsigned long cr2, rip;
+-	u32 vect_info;
+-	enum emulation_result er;
+-	int r;
+-
+-	vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+-	intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+-
+-	if ((vect_info & VECTORING_INFO_VALID_MASK) &&
+-						!is_page_fault(intr_info)) {
+-		printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
+-		       "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
+-	}
+-
+-	if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
+-		int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
+-		set_bit(irq, vcpu->irq_pending);
+-		set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
+-	}
+-
+-	if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+-		return 1;  /* already handled by vmx_vcpu_run() */
+-
+-	if (is_no_device(intr_info)) {
+-		vmx_fpu_activate(vcpu);
+-		return 1;
+-	}
+-
+-	error_code = 0;
+-	rip = vmcs_readl(GUEST_RIP);
+-	if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
+-		error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
+-	if (is_page_fault(intr_info)) {
+-		cr2 = vmcs_readl(EXIT_QUALIFICATION);
+-
+-		mutex_lock(&vcpu->kvm->lock);
+-		r = kvm_mmu_page_fault(vcpu, cr2, error_code);
+-		if (r < 0) {
+-			mutex_unlock(&vcpu->kvm->lock);
+-			return r;
+-		}
+-		if (!r) {
+-			mutex_unlock(&vcpu->kvm->lock);
+-			return 1;
+-		}
+-
+-		er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
+-		mutex_unlock(&vcpu->kvm->lock);
+-
+-		switch (er) {
+-		case EMULATE_DONE:
+-			return 1;
+-		case EMULATE_DO_MMIO:
+-			++vcpu->stat.mmio_exits;
+-			return 0;
+-		 case EMULATE_FAIL:
+-			kvm_report_emulation_failure(vcpu, "pagetable");
+-			break;
+-		default:
+-			BUG();
 -		}
 -	}
+-
+-	if (vcpu->rmode.active &&
+-	    handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
+-								error_code)) {
+-		if (vcpu->halt_request) {
+-			vcpu->halt_request = 0;
+-			return kvm_emulate_halt(vcpu);
+-		}
+-		return 1;
+-	}
+-
+-	if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
+-		kvm_run->exit_reason = KVM_EXIT_DEBUG;
+-		return 0;
+-	}
+-	kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
+-	kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
+-	kvm_run->ex.error_code = error_code;
+-	return 0;
 -}
 -
- static void handle_layer_pioavail(struct ipath_devdata *dd)
- {
-+	unsigned long flags;
- 	int ret;
- 
- 	ret = ipath_ib_piobufavail(dd->verbs_dev);
-@@ -953,9 +937,12 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
- 
- 	return;
- set:
--	set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
-+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-+	dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- 			 dd->ipath_sendctrl);
-+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- }
- 
- /*
-@@ -969,7 +956,15 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
- 	int i;
- 	int rcvdint = 0;
- 
--	/* test_bit below needs this... */
-+	/*
-+	 * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
-+	 * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
-+	 * would both like timely updates of the bits so that
-+	 * we don't pass them by unnecessarily.  the rmb()
-+	 * here ensures that we see them promptly -- the
-+	 * corresponding wmb()'s are in ipath_poll_urgent()
-+	 * and ipath_poll_next()...
-+	 */
- 	rmb();
- 	portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
- 		 dd->ipath_i_rcvavail_mask)
-@@ -980,7 +975,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
- 		if (portr & (1 << i) && pd && pd->port_cnt) {
- 			if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
- 					       &pd->port_flag)) {
--				clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
-+				clear_bit(i + dd->ipath_r_intravail_shift,
- 					  &dd->ipath_rcvctrl);
- 				wake_up_interruptible(&pd->port_wait);
- 				rcvdint = 1;
-@@ -1039,7 +1034,7 @@ irqreturn_t ipath_intr(int irq, void *data)
- 		goto bail;
- 	}
- 
--	istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
-+	istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
- 
- 	if (unlikely(!istat)) {
- 		ipath_stats.sps_nullintr++;
-@@ -1180,7 +1175,7 @@ irqreturn_t ipath_intr(int irq, void *data)
- 	 * for receive are at the bottom.
- 	 */
- 	if (chk0rcv) {
--		ipath_kreceive(dd);
-+		ipath_kreceive(dd->ipath_pd[0]);
- 		istat &= ~port0rbits;
- 	}
- 
-@@ -1191,12 +1186,14 @@ irqreturn_t ipath_intr(int irq, void *data)
- 		handle_urcv(dd, istat);
- 
- 	if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
--		clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
-+		unsigned long flags;
-+
-+		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-+		dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
- 		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- 				 dd->ipath_sendctrl);
+-static int handle_external_interrupt(struct kvm_vcpu *vcpu,
+-				     struct kvm_run *kvm_run)
+-{
+-	++vcpu->stat.irq_exits;
+-	return 1;
+-}
 -
--		if (dd->ipath_portpiowait)
--			handle_port_pioavail(dd);
-+		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- 
- 		handle_layer_pioavail(dd);
- 	}
-diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
-index 8786dd7..4cc0f95 100644
---- a/drivers/infiniband/hw/ipath/ipath_kernel.h
-+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
-@@ -41,6 +41,7 @@
- #include <linux/interrupt.h>
- #include <linux/pci.h>
- #include <linux/dma-mapping.h>
-+#include <linux/mutex.h>
- #include <asm/io.h>
- #include <rdma/ib_verbs.h>
- 
-@@ -140,6 +141,11 @@ struct ipath_portdata {
- 	u32 port_pionowait;
- 	/* total number of rcvhdrqfull errors */
- 	u32 port_hdrqfull;
-+	/*
-+	 * Used to suppress multiple instances of same
-+	 * port staying stuck at same point.
-+	 */
-+	u32 port_lastrcvhdrqtail;
- 	/* saved total number of rcvhdrqfull errors for poll edge trigger */
- 	u32 port_hdrqfull_poll;
- 	/* total number of polled urgent packets */
-@@ -148,6 +154,7 @@ struct ipath_portdata {
- 	u32 port_urgent_poll;
- 	/* pid of process using this port */
- 	pid_t port_pid;
-+	pid_t port_subpid[INFINIPATH_MAX_SUBPORT];
- 	/* same size as task_struct .comm[] */
- 	char port_comm[16];
- 	/* pkeys set by this use of this port */
-@@ -166,6 +173,8 @@ struct ipath_portdata {
- 	u32 active_slaves;
- 	/* Type of packets or conditions we want to poll for */
- 	u16 poll_type;
-+	/* port rcvhdrq head offset */
-+	u32 port_head;
- };
- 
- struct sk_buff;
-@@ -182,6 +191,22 @@ struct ipath_skbinfo {
- 	dma_addr_t phys;
- };
- 
-+/*
-+ * Possible IB config parameters for ipath_f_get/set_ib_cfg()
-+ */
-+#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
-+#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
-+#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
-+#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
-+#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
-+#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
-+#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
-+#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
-+#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
-+#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
-+#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
-+
-+
- struct ipath_devdata {
- 	struct list_head ipath_list;
- 
-@@ -222,6 +247,8 @@ struct ipath_devdata {
- 	struct _ipath_layer ipath_layer;
- 	/* setup intr */
- 	int (*ipath_f_intrsetup)(struct ipath_devdata *);
-+	/* fallback to alternate interrupt type if possible */
-+	int (*ipath_f_intr_fallback)(struct ipath_devdata *);
- 	/* setup on-chip bus config */
- 	int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
- 	/* hard reset chip */
-@@ -244,6 +271,18 @@ struct ipath_devdata {
- 	int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
- 	/* free irq */
- 	void (*ipath_f_free_irq)(struct ipath_devdata *);
-+	struct ipath_message_header *(*ipath_f_get_msgheader)
-+					(struct ipath_devdata *, __le32 *);
-+	void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
-+	int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
-+	int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
-+	void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
-+	void (*ipath_f_read_counters)(struct ipath_devdata *,
-+					struct infinipath_counters *);
-+	void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
-+	/* per chip actions needed for IB Link up/down changes */
-+	int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
-+
- 	struct ipath_ibdev *verbs_dev;
- 	struct timer_list verbs_timer;
- 	/* total dwords sent (summed from counter) */
-@@ -313,22 +352,12 @@ struct ipath_devdata {
- 	 * supports, less gives more pio bufs/port, etc.
- 	 */
- 	u32 ipath_cfgports;
--	/* port0 rcvhdrq head offset */
--	u32 ipath_port0head;
- 	/* count of port 0 hdrqfull errors */
- 	u32 ipath_p0_hdrqfull;
-+	/* port 0 number of receive eager buffers */
-+	u32 ipath_p0_rcvegrcnt;
- 
- 	/*
--	 * (*cfgports) used to suppress multiple instances of same
--	 * port staying stuck at same point
+-static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+-	return 0;
+-}
+-
+-static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	unsigned long exit_qualification;
+-	int size, down, in, string, rep;
+-	unsigned port;
+-
+-	++vcpu->stat.io_exits;
+-	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+-	string = (exit_qualification & 16) != 0;
+-
+-	if (string) {
+-		if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
+-			return 0;
+-		return 1;
+-	}
+-
+-	size = (exit_qualification & 7) + 1;
+-	in = (exit_qualification & 8) != 0;
+-	down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
+-	rep = (exit_qualification & 32) != 0;
+-	port = exit_qualification >> 16;
+-
+-	return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
+-}
+-
+-static void
+-vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+-{
+-	/*
+-	 * Patch in the VMCALL instruction:
 -	 */
--	u32 *ipath_lastrcvhdrqtails;
+-	hypercall[0] = 0x0f;
+-	hypercall[1] = 0x01;
+-	hypercall[2] = 0xc1;
+-	hypercall[3] = 0xc3;
+-}
+-
+-static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	unsigned long exit_qualification;
+-	int cr;
+-	int reg;
+-
+-	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+-	cr = exit_qualification & 15;
+-	reg = (exit_qualification >> 8) & 15;
+-	switch ((exit_qualification >> 4) & 3) {
+-	case 0: /* mov to cr */
+-		switch (cr) {
+-		case 0:
+-			vcpu_load_rsp_rip(vcpu);
+-			set_cr0(vcpu, vcpu->regs[reg]);
+-			skip_emulated_instruction(vcpu);
+-			return 1;
+-		case 3:
+-			vcpu_load_rsp_rip(vcpu);
+-			set_cr3(vcpu, vcpu->regs[reg]);
+-			skip_emulated_instruction(vcpu);
+-			return 1;
+-		case 4:
+-			vcpu_load_rsp_rip(vcpu);
+-			set_cr4(vcpu, vcpu->regs[reg]);
+-			skip_emulated_instruction(vcpu);
+-			return 1;
+-		case 8:
+-			vcpu_load_rsp_rip(vcpu);
+-			set_cr8(vcpu, vcpu->regs[reg]);
+-			skip_emulated_instruction(vcpu);
+-			kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+-			return 0;
+-		};
+-		break;
+-	case 2: /* clts */
+-		vcpu_load_rsp_rip(vcpu);
+-		vmx_fpu_deactivate(vcpu);
+-		vcpu->cr0 &= ~X86_CR0_TS;
+-		vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
+-		vmx_fpu_activate(vcpu);
+-		skip_emulated_instruction(vcpu);
+-		return 1;
+-	case 1: /*mov from cr*/
+-		switch (cr) {
+-		case 3:
+-			vcpu_load_rsp_rip(vcpu);
+-			vcpu->regs[reg] = vcpu->cr3;
+-			vcpu_put_rsp_rip(vcpu);
+-			skip_emulated_instruction(vcpu);
+-			return 1;
+-		case 8:
+-			vcpu_load_rsp_rip(vcpu);
+-			vcpu->regs[reg] = get_cr8(vcpu);
+-			vcpu_put_rsp_rip(vcpu);
+-			skip_emulated_instruction(vcpu);
+-			return 1;
+-		}
+-		break;
+-	case 3: /* lmsw */
+-		lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
+-
+-		skip_emulated_instruction(vcpu);
+-		return 1;
+-	default:
+-		break;
+-	}
+-	kvm_run->exit_reason = 0;
+-	pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
+-	       (int)(exit_qualification >> 4) & 3, cr);
+-	return 0;
+-}
+-
+-static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	unsigned long exit_qualification;
+-	unsigned long val;
+-	int dr, reg;
+-
 -	/*
--	 * (*cfgports) used to suppress multiple instances of same
--	 * port staying stuck at same point
+-	 * FIXME: this code assumes the host is debugging the guest.
+-	 *        need to deal with guest debugging itself too.
 -	 */
--	u32 *ipath_lastegrheads;
+-	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+-	dr = exit_qualification & 7;
+-	reg = (exit_qualification >> 8) & 15;
+-	vcpu_load_rsp_rip(vcpu);
+-	if (exit_qualification & 16) {
+-		/* mov from dr */
+-		switch (dr) {
+-		case 6:
+-			val = 0xffff0ff0;
+-			break;
+-		case 7:
+-			val = 0x400;
+-			break;
+-		default:
+-			val = 0;
+-		}
+-		vcpu->regs[reg] = val;
+-	} else {
+-		/* mov to dr */
+-	}
+-	vcpu_put_rsp_rip(vcpu);
+-	skip_emulated_instruction(vcpu);
+-	return 1;
+-}
+-
+-static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	kvm_emulate_cpuid(vcpu);
+-	return 1;
+-}
+-
+-static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+-	u64 data;
+-
+-	if (vmx_get_msr(vcpu, ecx, &data)) {
+-		vmx_inject_gp(vcpu, 0);
+-		return 1;
+-	}
+-
+-	/* FIXME: handling of bits 32:63 of rax, rdx */
+-	vcpu->regs[VCPU_REGS_RAX] = data & -1u;
+-	vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
+-	skip_emulated_instruction(vcpu);
+-	return 1;
+-}
+-
+-static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+-	u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
+-		| ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
+-
+-	if (vmx_set_msr(vcpu, ecx, data) != 0) {
+-		vmx_inject_gp(vcpu, 0);
+-		return 1;
+-	}
+-
+-	skip_emulated_instruction(vcpu);
+-	return 1;
+-}
+-
+-static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
+-				      struct kvm_run *kvm_run)
+-{
+-	return 1;
+-}
+-
+-static int handle_interrupt_window(struct kvm_vcpu *vcpu,
+-				   struct kvm_run *kvm_run)
+-{
+-	u32 cpu_based_vm_exec_control;
+-
+-	/* clear pending irq */
+-	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+-	cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+-	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+-	/*
+-	 * If the user space waits to inject interrupts, exit as soon as
+-	 * possible
+-	 */
+-	if (kvm_run->request_interrupt_window &&
+-	    !vcpu->irq_summary) {
+-		kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+-		++vcpu->stat.irq_window_exits;
+-		return 0;
+-	}
+-	return 1;
+-}
+-
+-static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	skip_emulated_instruction(vcpu);
+-	return kvm_emulate_halt(vcpu);
+-}
+-
+-static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	skip_emulated_instruction(vcpu);
+-	return kvm_hypercall(vcpu, kvm_run);
+-}
+-
+-/*
+- * The exit handlers return 1 if the exit was handled fully and guest execution
+- * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
+- * to be done to userspace and return 0.
+- */
+-static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
+-				      struct kvm_run *kvm_run) = {
+-	[EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
+-	[EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
+-	[EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
+-	[EXIT_REASON_IO_INSTRUCTION]          = handle_io,
+-	[EXIT_REASON_CR_ACCESS]               = handle_cr,
+-	[EXIT_REASON_DR_ACCESS]               = handle_dr,
+-	[EXIT_REASON_CPUID]                   = handle_cpuid,
+-	[EXIT_REASON_MSR_READ]                = handle_rdmsr,
+-	[EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
+-	[EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
+-	[EXIT_REASON_HLT]                     = handle_halt,
+-	[EXIT_REASON_VMCALL]                  = handle_vmcall,
+-	[EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold
+-};
+-
+-static const int kvm_vmx_max_exit_handlers =
+-	ARRAY_SIZE(kvm_vmx_exit_handlers);
+-
+-/*
+- * The guest has exited.  See if we can fix it or if we need userspace
+- * assistance.
+- */
+-static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+-{
+-	u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+-	u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+-	if (unlikely(vmx->fail)) {
+-		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+-		kvm_run->fail_entry.hardware_entry_failure_reason
+-			= vmcs_read32(VM_INSTRUCTION_ERROR);
+-		return 0;
+-	}
+-
+-	if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
+-				exit_reason != EXIT_REASON_EXCEPTION_NMI )
+-		printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
+-		       "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
+-	if (exit_reason < kvm_vmx_max_exit_handlers
+-	    && kvm_vmx_exit_handlers[exit_reason])
+-		return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
+-	else {
+-		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+-		kvm_run->hw.hardware_exit_reason = exit_reason;
+-	}
+-	return 0;
+-}
+-
+-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+-{
+-}
+-
+-static void update_tpr_threshold(struct kvm_vcpu *vcpu)
+-{
+-	int max_irr, tpr;
+-
+-	if (!vm_need_tpr_shadow(vcpu->kvm))
+-		return;
+-
+-	if (!kvm_lapic_enabled(vcpu) ||
+-	    ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
+-		vmcs_write32(TPR_THRESHOLD, 0);
+-		return;
+-	}
+-
+-	tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
+-	vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
+-}
+-
+-static void enable_irq_window(struct kvm_vcpu *vcpu)
+-{
+-	u32 cpu_based_vm_exec_control;
+-
+-	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+-	cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
+-	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+-}
+-
+-static void vmx_intr_assist(struct kvm_vcpu *vcpu)
+-{
+-	u32 idtv_info_field, intr_info_field;
+-	int has_ext_irq, interrupt_window_open;
+-	int vector;
+-
+-	kvm_inject_pending_timer_irqs(vcpu);
+-	update_tpr_threshold(vcpu);
+-
+-	has_ext_irq = kvm_cpu_has_interrupt(vcpu);
+-	intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
+-	idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+-	if (intr_info_field & INTR_INFO_VALID_MASK) {
+-		if (idtv_info_field & INTR_INFO_VALID_MASK) {
+-			/* TODO: fault when IDT_Vectoring */
+-			printk(KERN_ERR "Fault when IDT_Vectoring\n");
+-		}
+-		if (has_ext_irq)
+-			enable_irq_window(vcpu);
+-		return;
+-	}
+-	if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
+-		vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
+-		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+-				vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
+-
+-		if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
+-			vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+-				vmcs_read32(IDT_VECTORING_ERROR_CODE));
+-		if (unlikely(has_ext_irq))
+-			enable_irq_window(vcpu);
+-		return;
+-	}
+-	if (!has_ext_irq)
+-		return;
+-	interrupt_window_open =
+-		((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+-		 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
+-	if (interrupt_window_open) {
+-		vector = kvm_cpu_get_interrupt(vcpu);
+-		vmx_inject_irq(vcpu, vector);
+-		kvm_timer_intr_post(vcpu, vector);
+-	} else
+-		enable_irq_window(vcpu);
+-}
+-
+-static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+-{
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-	u32 intr_info;
+-
 -	/*
- 	 * index of last piobuffer we used.  Speeds up searching, by
- 	 * starting at this point.  Doesn't matter if multiple cpu's use and
- 	 * update, last updater is only write that matters.  Whenever it
-@@ -367,14 +396,15 @@ struct ipath_devdata {
- 	unsigned long ipath_wc_len;
- 	/* ref count for each pkey */
- 	atomic_t ipath_pkeyrefs[4];
--	/* shadow copy of all exptids physaddr; used only by funcsim */
--	u64 *ipath_tidsimshadow;
- 	/* shadow copy of struct page *'s for exp tid pages */
- 	struct page **ipath_pageshadow;
- 	/* shadow copy of dma handles for exp tid pages */
- 	dma_addr_t *ipath_physshadow;
--	/* lock to workaround chip bug 9437 */
-+	u64 __iomem *ipath_egrtidbase;
-+	/* lock to workaround chip bug 9437 and others */
-+	spinlock_t ipath_kernel_tid_lock;
- 	spinlock_t ipath_tid_lock;
-+	spinlock_t ipath_sendctrl_lock;
- 
- 	/*
- 	 * IPATH_STATUS_*,
-@@ -395,6 +425,8 @@ struct ipath_devdata {
- 	void *ipath_dummy_hdrq;	/* used after port close */
- 	dma_addr_t ipath_dummy_hdrq_phys;
- 
-+	unsigned long ipath_ureg_align; /* user register alignment */
-+
- 	/*
- 	 * Shadow copies of registers; size indicates read access size.
- 	 * Most of them are readonly, but some are write-only register,
-@@ -456,8 +488,6 @@ struct ipath_devdata {
- 	unsigned long ipath_rcvctrl;
- 	/* shadow kr_sendctrl */
- 	unsigned long ipath_sendctrl;
--	/* ports waiting for PIOavail intr */
--	unsigned long ipath_portpiowait;
- 	unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */
- 
- 	/* value we put in kr_rcvhdrcnt */
-@@ -550,12 +580,26 @@ struct ipath_devdata {
- 	u8 ipath_minrev;
- 	/* board rev, from ipath_revision */
- 	u8 ipath_boardrev;
-+
-+	u8 ipath_r_portenable_shift;
-+	u8 ipath_r_intravail_shift;
-+	u8 ipath_r_tailupd_shift;
-+	u8 ipath_r_portcfg_shift;
-+
- 	/* unit # of this chip, if present */
- 	int ipath_unit;
- 	/* saved for restore after reset */
- 	u8 ipath_pci_cacheline;
- 	/* LID mask control */
- 	u8 ipath_lmc;
-+	/* link width supported */
-+	u8 ipath_link_width_supported;
-+	/* link speed supported */
-+	u8 ipath_link_speed_supported;
-+	u8 ipath_link_width_enabled;
-+	u8 ipath_link_speed_enabled;
-+	u8 ipath_link_width_active;
-+	u8 ipath_link_speed_active;
- 	/* Rx Polarity inversion (compensate for ~tx on partner) */
- 	u8 ipath_rx_pol_inv;
- 
-@@ -590,6 +634,8 @@ struct ipath_devdata {
- 	 */
- 	u32 ipath_i_rcvavail_mask;
- 	u32 ipath_i_rcvurg_mask;
-+	u16 ipath_i_rcvurg_shift;
-+	u16 ipath_i_rcvavail_shift;
- 
- 	/*
- 	 * Register bits for selecting i2c direction and values, used for
-@@ -603,6 +649,29 @@ struct ipath_devdata {
- 	/* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
- 	spinlock_t ipath_gpio_lock;
- 
-+	/*
-+	 * IB link and linktraining states and masks that vary per chip in
-+	 * some way.  Set at init, to avoid each IB status change interrupt
-+	 */
-+	u8 ibcs_ls_shift;
-+	u8 ibcs_lts_mask;
-+	u32 ibcs_mask;
-+	u32 ib_init;
-+	u32 ib_arm;
-+	u32 ib_active;
-+
-+	u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
-+
-+	/*
-+	 * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
-+	 * reg. Changes for IBA7220
-+	 */
-+	u8 ibcc_lic_mask; /* LinkInitCmd */
-+	u8 ibcc_lc_shift; /* LinkCmd */
-+	u8 ibcc_mpl_shift; /* Maxpktlen */
-+
-+	u8 delay_mult;
-+
- 	/* used to override LED behavior */
- 	u8 ipath_led_override;  /* Substituted for normal value, if non-zero */
- 	u16 ipath_led_override_timeoff; /* delta to next timer event */
-@@ -616,7 +685,7 @@ struct ipath_devdata {
- 	/* control access to actual counters, timer */
- 	spinlock_t ipath_eep_st_lock;
- 	/* control high-level access to EEPROM */
--	struct semaphore ipath_eep_sem;
-+	struct mutex ipath_eep_lock;
- 	/* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
- 	uint64_t ipath_traffic_wds;
- 	/* active time is kept in seconds, but logged in hours */
-@@ -630,6 +699,10 @@ struct ipath_devdata {
- 	 * each of the counters to increment.
- 	 */
- 	struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
-+
-+	/* interrupt mitigation reload register info */
-+	u16 ipath_jint_idle_ticks;	/* idle clock ticks */
-+	u16 ipath_jint_max_packets;	/* max packets across all ports */
- };
- 
- /* Private data for file operations */
-@@ -690,7 +763,7 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
- 
- int ipath_parse_ushort(const char *str, unsigned short *valp);
- 
--void ipath_kreceive(struct ipath_devdata *);
-+void ipath_kreceive(struct ipath_portdata *);
- int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
- int ipath_reset_device(int);
- void ipath_get_faststats(unsigned long);
-@@ -698,6 +771,8 @@ int ipath_set_linkstate(struct ipath_devdata *, u8);
- int ipath_set_mtu(struct ipath_devdata *, u16);
- int ipath_set_lid(struct ipath_devdata *, u32, u8);
- int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
-+void ipath_enable_armlaunch(struct ipath_devdata *);
-+void ipath_disable_armlaunch(struct ipath_devdata *);
- 
- /* for use in system calls, where we want to know device type, etc. */
- #define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
-@@ -744,9 +819,15 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
- 		 * are 64bit */
- #define IPATH_32BITCOUNTERS 0x20000
- 		/* can miss port0 rx interrupts */
-+		/* Interrupt register is 64 bits */
-+#define IPATH_INTREG_64     0x40000
- #define IPATH_DISABLED      0x80000 /* administratively disabled */
- 		/* Use GPIO interrupts for new counters */
- #define IPATH_GPIO_ERRINTRS 0x100000
-+#define IPATH_SWAP_PIOBUFS  0x200000
-+		/* Suppress heartbeat, even if turning off loopback */
-+#define IPATH_NO_HRTBT      0x1000000
-+#define IPATH_HAS_MULT_IB_SPEED 0x8000000
- 
- /* Bits in GPIO for the added interrupts */
- #define IPATH_GPIO_PORT0_BIT 2
-@@ -758,8 +839,6 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
- /* portdata flag bit offsets */
- 		/* waiting for a packet to arrive */
- #define IPATH_PORT_WAITING_RCV   2
--		/* waiting for a PIO buffer to be available */
--#define IPATH_PORT_WAITING_PIO   3
- 		/* master has not finished initializing */
- #define IPATH_PORT_MASTER_UNINIT 4
- 		/* waiting for an urgent packet to arrive */
-@@ -767,8 +846,6 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
- 
- /* free up any allocated data at closes */
- void ipath_free_data(struct ipath_portdata *dd);
--int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
--int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
- u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
- void ipath_init_iba6120_funcs(struct ipath_devdata *);
- void ipath_init_iba6110_funcs(struct ipath_devdata *);
-@@ -792,33 +869,6 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
-  */
- #define IPATH_DFLT_RCVHDRSIZE 9
- 
--#define IPATH_MDIO_CMD_WRITE   1
--#define IPATH_MDIO_CMD_READ    2
--#define IPATH_MDIO_CLD_DIV     25	/* to get 2.5 Mhz mdio clock */
--#define IPATH_MDIO_CMDVALID    0x40000000	/* bit 30 */
--#define IPATH_MDIO_DATAVALID   0x80000000	/* bit 31 */
--#define IPATH_MDIO_CTRL_STD    0x0
+-	 * Loading guest fpu may have cleared host cr0.ts
+-	 */
+-	vmcs_writel(HOST_CR0, read_cr0());
 -
--static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
+-	asm (
+-		/* Store host registers */
+-#ifdef CONFIG_X86_64
+-		"push %%rax; push %%rbx; push %%rdx;"
+-		"push %%rsi; push %%rdi; push %%rbp;"
+-		"push %%r8;  push %%r9;  push %%r10; push %%r11;"
+-		"push %%r12; push %%r13; push %%r14; push %%r15;"
+-		"push %%rcx \n\t"
+-		ASM_VMX_VMWRITE_RSP_RDX "\n\t"
+-#else
+-		"pusha; push %%ecx \n\t"
+-		ASM_VMX_VMWRITE_RSP_RDX "\n\t"
+-#endif
+-		/* Check if vmlaunch of vmresume is needed */
+-		"cmp $0, %1 \n\t"
+-		/* Load guest registers.  Don't clobber flags. */
+-#ifdef CONFIG_X86_64
+-		"mov %c[cr2](%3), %%rax \n\t"
+-		"mov %%rax, %%cr2 \n\t"
+-		"mov %c[rax](%3), %%rax \n\t"
+-		"mov %c[rbx](%3), %%rbx \n\t"
+-		"mov %c[rdx](%3), %%rdx \n\t"
+-		"mov %c[rsi](%3), %%rsi \n\t"
+-		"mov %c[rdi](%3), %%rdi \n\t"
+-		"mov %c[rbp](%3), %%rbp \n\t"
+-		"mov %c[r8](%3),  %%r8  \n\t"
+-		"mov %c[r9](%3),  %%r9  \n\t"
+-		"mov %c[r10](%3), %%r10 \n\t"
+-		"mov %c[r11](%3), %%r11 \n\t"
+-		"mov %c[r12](%3), %%r12 \n\t"
+-		"mov %c[r13](%3), %%r13 \n\t"
+-		"mov %c[r14](%3), %%r14 \n\t"
+-		"mov %c[r15](%3), %%r15 \n\t"
+-		"mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
+-#else
+-		"mov %c[cr2](%3), %%eax \n\t"
+-		"mov %%eax,   %%cr2 \n\t"
+-		"mov %c[rax](%3), %%eax \n\t"
+-		"mov %c[rbx](%3), %%ebx \n\t"
+-		"mov %c[rdx](%3), %%edx \n\t"
+-		"mov %c[rsi](%3), %%esi \n\t"
+-		"mov %c[rdi](%3), %%edi \n\t"
+-		"mov %c[rbp](%3), %%ebp \n\t"
+-		"mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
+-#endif
+-		/* Enter guest mode */
+-		"jne .Llaunched \n\t"
+-		ASM_VMX_VMLAUNCH "\n\t"
+-		"jmp .Lkvm_vmx_return \n\t"
+-		".Llaunched: " ASM_VMX_VMRESUME "\n\t"
+-		".Lkvm_vmx_return: "
+-		/* Save guest registers, load host registers, keep flags */
+-#ifdef CONFIG_X86_64
+-		"xchg %3,     (%%rsp) \n\t"
+-		"mov %%rax, %c[rax](%3) \n\t"
+-		"mov %%rbx, %c[rbx](%3) \n\t"
+-		"pushq (%%rsp); popq %c[rcx](%3) \n\t"
+-		"mov %%rdx, %c[rdx](%3) \n\t"
+-		"mov %%rsi, %c[rsi](%3) \n\t"
+-		"mov %%rdi, %c[rdi](%3) \n\t"
+-		"mov %%rbp, %c[rbp](%3) \n\t"
+-		"mov %%r8,  %c[r8](%3) \n\t"
+-		"mov %%r9,  %c[r9](%3) \n\t"
+-		"mov %%r10, %c[r10](%3) \n\t"
+-		"mov %%r11, %c[r11](%3) \n\t"
+-		"mov %%r12, %c[r12](%3) \n\t"
+-		"mov %%r13, %c[r13](%3) \n\t"
+-		"mov %%r14, %c[r14](%3) \n\t"
+-		"mov %%r15, %c[r15](%3) \n\t"
+-		"mov %%cr2, %%rax   \n\t"
+-		"mov %%rax, %c[cr2](%3) \n\t"
+-		"mov (%%rsp), %3 \n\t"
+-
+-		"pop  %%rcx; pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
+-		"pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
+-		"pop  %%rbp; pop  %%rdi; pop  %%rsi;"
+-		"pop  %%rdx; pop  %%rbx; pop  %%rax \n\t"
+-#else
+-		"xchg %3, (%%esp) \n\t"
+-		"mov %%eax, %c[rax](%3) \n\t"
+-		"mov %%ebx, %c[rbx](%3) \n\t"
+-		"pushl (%%esp); popl %c[rcx](%3) \n\t"
+-		"mov %%edx, %c[rdx](%3) \n\t"
+-		"mov %%esi, %c[rsi](%3) \n\t"
+-		"mov %%edi, %c[rdi](%3) \n\t"
+-		"mov %%ebp, %c[rbp](%3) \n\t"
+-		"mov %%cr2, %%eax  \n\t"
+-		"mov %%eax, %c[cr2](%3) \n\t"
+-		"mov (%%esp), %3 \n\t"
+-
+-		"pop %%ecx; popa \n\t"
+-#endif
+-		"setbe %0 \n\t"
+-	      : "=q" (vmx->fail)
+-	      : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
+-		"c"(vcpu),
+-		[rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
+-		[rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
+-		[rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
+-		[rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
+-		[rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
+-		[rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
+-		[rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
+-#ifdef CONFIG_X86_64
+-		[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
+-		[r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
+-		[r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
+-		[r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
+-		[r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
+-		[r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
+-		[r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
+-		[r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
+-#endif
+-		[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
+-	      : "cc", "memory" );
+-
+-	vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
+-
+-	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+-	vmx->launched = 1;
+-
+-	intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+-
+-	/* We need to handle NMIs before interrupts are enabled */
+-	if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+-		asm("int $2");
+-}
+-
+-static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
+-				  unsigned long addr,
+-				  u32 err_code)
+-{
+-	u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+-
+-	++vcpu->stat.pf_guest;
+-
+-	if (is_page_fault(vect_info)) {
+-		printk(KERN_DEBUG "inject_page_fault: "
+-		       "double fault 0x%lx @ 0x%lx\n",
+-		       addr, vmcs_readl(GUEST_RIP));
+-		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
+-		vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+-			     DF_VECTOR |
+-			     INTR_TYPE_EXCEPTION |
+-			     INTR_INFO_DELIEVER_CODE_MASK |
+-			     INTR_INFO_VALID_MASK);
+-		return;
+-	}
+-	vcpu->cr2 = addr;
+-	vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
+-	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+-		     PF_VECTOR |
+-		     INTR_TYPE_EXCEPTION |
+-		     INTR_INFO_DELIEVER_CODE_MASK |
+-		     INTR_INFO_VALID_MASK);
+-
+-}
+-
+-static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
 -{
--	return (((u64) IPATH_MDIO_CLD_DIV) << 32) |
--		(cmd << 26) |
--		(dev << 21) |
--		(reg << 16) |
--		(data & 0xFFFF);
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+-	if (vmx->vmcs) {
+-		on_each_cpu(__vcpu_clear, vmx, 0, 1);
+-		free_vmcs(vmx->vmcs);
+-		vmx->vmcs = NULL;
+-	}
 -}
 -
--		/* signal and fifo status, in bank 31 */
--#define IPATH_MDIO_CTRL_XGXS_REG_8  0x8
--		/* controls loopback, redundancy */
--#define IPATH_MDIO_CTRL_8355_REG_1  0x10
--		/* premph, encdec, etc. */
--#define IPATH_MDIO_CTRL_8355_REG_2  0x11
--		/* Kchars, etc. */
--#define IPATH_MDIO_CTRL_8355_REG_6  0x15
--#define IPATH_MDIO_CTRL_8355_REG_9  0x18
--#define IPATH_MDIO_CTRL_8355_REG_10 0x1D
+-static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+-{
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
 -
- int ipath_get_user_pages(unsigned long, size_t, struct page **);
- void ipath_release_user_pages(struct page **, size_t);
- void ipath_release_user_pages_on_close(struct page **, size_t);
-@@ -863,7 +913,7 @@ static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
- 	return readl(regno + (u64 __iomem *)
- 		     (dd->ipath_uregbase +
- 		      (char __iomem *)dd->ipath_kregbase +
--		      dd->ipath_palign * port));
-+		      dd->ipath_ureg_align * port));
- }
- 
- /**
-@@ -880,7 +930,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd,
- {
- 	u64 __iomem *ubase = (u64 __iomem *)
- 		(dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
--		 dd->ipath_palign * port);
-+		 dd->ipath_ureg_align * port);
- 	if (dd->ipath_kregbase)
- 		writeq(value, &ubase[regno]);
- }
-@@ -930,6 +980,53 @@ static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
- 		      (char __iomem *)dd->ipath_kregbase));
- }
- 
-+static inline void ipath_write_creg(const struct ipath_devdata *dd,
-+				    ipath_creg regno, u64 value)
-+{
-+	if (dd->ipath_kregbase)
-+		writeq(value, regno + (u64 __iomem *)
-+		       (dd->ipath_cregbase +
-+			(char __iomem *)dd->ipath_kregbase));
-+}
-+
-+static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
-+{
-+	*((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
-+}
-+
-+static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
-+{
-+	return (u32) le64_to_cpu(*((volatile __le64 *)
-+				pd->port_rcvhdrtail_kvaddr));
-+}
-+
-+static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
-+{
-+	return (dd->ipath_flags & IPATH_INTREG_64) ?
-+		ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
-+}
-+
-+/*
-+ * from contents of IBCStatus (or a saved copy), return linkstate
-+ * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
-+ * everywhere, anyway (and should be, for almost all purposes).
-+ */
-+static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-+{
-+	u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
-+		INFINIPATH_IBCS_LINKSTATE_MASK;
-+	if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
-+		state = INFINIPATH_IBCS_L_STATE_ACTIVE;
-+	return state;
-+}
-+
-+/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
-+static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
-+{
-+	return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-+		dd->ibcs_lts_mask;
-+}
-+
- /*
-  * sysfs interface.
-  */
-@@ -938,8 +1035,7 @@ struct device_driver;
- 
- extern const char ib_ipath_version[];
- 
--int ipath_driver_create_group(struct device_driver *);
--void ipath_driver_remove_group(struct device_driver *);
-+extern struct attribute_group *ipath_driver_attr_groups[];
- 
- int ipath_device_create_group(struct device *, struct ipath_devdata *);
- void ipath_device_remove_group(struct device *, struct ipath_devdata *);
-diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
-index 85a4aef..8f32b17 100644
---- a/drivers/infiniband/hw/ipath/ipath_keys.c
-+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
-@@ -128,9 +128,8 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
- 	int ret;
- 
- 	/*
--	 * We use LKEY == zero to mean a physical kmalloc() address.
--	 * This is a bit of a hack since we rely on dma_map_single()
--	 * being reversible by calling bus_to_virt().
-+	 * We use LKEY == zero for kernel virtual addresses
-+	 * (see ipath_get_dma_mr and ipath_dma.c).
- 	 */
- 	if (sge->lkey == 0) {
- 		struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
-index 3d1432d..d98d5f1 100644
---- a/drivers/infiniband/hw/ipath/ipath_mad.c
-+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
-@@ -934,6 +934,7 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
- 	struct ib_pma_portsamplescontrol *p =
- 		(struct ib_pma_portsamplescontrol *)pmp->data;
- 	struct ipath_ibdev *dev = to_idev(ibdev);
-+	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
- 	unsigned long flags;
- 	u8 port_select = p->port_select;
- 
-@@ -955,7 +956,10 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
- 	p->counter_width = 4;	/* 32 bit counters */
- 	p->counter_mask0_9 = COUNTER_MASK0_9;
- 	spin_lock_irqsave(&dev->pending_lock, flags);
--	p->sample_status = dev->pma_sample_status;
-+	if (crp->cr_psstat)
-+		p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-+	else
-+		p->sample_status = dev->pma_sample_status;
- 	p->sample_start = cpu_to_be32(dev->pma_sample_start);
- 	p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
- 	p->tag = cpu_to_be16(dev->pma_tag);
-@@ -975,8 +979,9 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
- 	struct ib_pma_portsamplescontrol *p =
- 		(struct ib_pma_portsamplescontrol *)pmp->data;
- 	struct ipath_ibdev *dev = to_idev(ibdev);
-+	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
- 	unsigned long flags;
--	u32 start;
-+	u8 status;
- 	int ret;
- 
- 	if (pmp->attr_mod != 0 ||
-@@ -986,59 +991,67 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
- 		goto bail;
- 	}
- 
--	start = be32_to_cpu(p->sample_start);
--	if (start != 0) {
--		spin_lock_irqsave(&dev->pending_lock, flags);
--		if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_DONE) {
--			dev->pma_sample_status =
--				IB_PMA_SAMPLE_STATUS_STARTED;
--			dev->pma_sample_start = start;
--			dev->pma_sample_interval =
--				be32_to_cpu(p->sample_interval);
--			dev->pma_tag = be16_to_cpu(p->tag);
--			if (p->counter_select[0])
--				dev->pma_counter_select[0] =
--					p->counter_select[0];
--			if (p->counter_select[1])
--				dev->pma_counter_select[1] =
--					p->counter_select[1];
--			if (p->counter_select[2])
--				dev->pma_counter_select[2] =
--					p->counter_select[2];
--			if (p->counter_select[3])
--				dev->pma_counter_select[3] =
--					p->counter_select[3];
--			if (p->counter_select[4])
--				dev->pma_counter_select[4] =
--					p->counter_select[4];
+-	vmx_free_vmcs(vcpu);
+-	kfree(vmx->host_msrs);
+-	kfree(vmx->guest_msrs);
+-	kvm_vcpu_uninit(vcpu);
+-	kmem_cache_free(kvm_vcpu_cache, vmx);
+-}
+-
+-static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+-{
+-	int err;
+-	struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+-	int cpu;
+-
+-	if (!vmx)
+-		return ERR_PTR(-ENOMEM);
+-
+-	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
+-	if (err)
+-		goto free_vcpu;
+-
+-	if (irqchip_in_kernel(kvm)) {
+-		err = kvm_create_lapic(&vmx->vcpu);
+-		if (err < 0)
+-			goto free_vcpu;
+-	}
+-
+-	vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+-	if (!vmx->guest_msrs) {
+-		err = -ENOMEM;
+-		goto uninit_vcpu;
+-	}
+-
+-	vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+-	if (!vmx->host_msrs)
+-		goto free_guest_msrs;
+-
+-	vmx->vmcs = alloc_vmcs();
+-	if (!vmx->vmcs)
+-		goto free_msrs;
+-
+-	vmcs_clear(vmx->vmcs);
+-
+-	cpu = get_cpu();
+-	vmx_vcpu_load(&vmx->vcpu, cpu);
+-	err = vmx_vcpu_setup(vmx);
+-	vmx_vcpu_put(&vmx->vcpu);
+-	put_cpu();
+-	if (err)
+-		goto free_vmcs;
+-
+-	return &vmx->vcpu;
+-
+-free_vmcs:
+-	free_vmcs(vmx->vmcs);
+-free_msrs:
+-	kfree(vmx->host_msrs);
+-free_guest_msrs:
+-	kfree(vmx->guest_msrs);
+-uninit_vcpu:
+-	kvm_vcpu_uninit(&vmx->vcpu);
+-free_vcpu:
+-	kmem_cache_free(kvm_vcpu_cache, vmx);
+-	return ERR_PTR(err);
+-}
+-
+-static void __init vmx_check_processor_compat(void *rtn)
+-{
+-	struct vmcs_config vmcs_conf;
+-
+-	*(int *)rtn = 0;
+-	if (setup_vmcs_config(&vmcs_conf) < 0)
+-		*(int *)rtn = -EIO;
+-	if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
+-		printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
+-				smp_processor_id());
+-		*(int *)rtn = -EIO;
+-	}
+-}
+-
+-static struct kvm_x86_ops vmx_x86_ops = {
+-	.cpu_has_kvm_support = cpu_has_kvm_support,
+-	.disabled_by_bios = vmx_disabled_by_bios,
+-	.hardware_setup = hardware_setup,
+-	.hardware_unsetup = hardware_unsetup,
+-	.check_processor_compatibility = vmx_check_processor_compat,
+-	.hardware_enable = hardware_enable,
+-	.hardware_disable = hardware_disable,
+-
+-	.vcpu_create = vmx_create_vcpu,
+-	.vcpu_free = vmx_free_vcpu,
+-	.vcpu_reset = vmx_vcpu_reset,
+-
+-	.prepare_guest_switch = vmx_save_host_state,
+-	.vcpu_load = vmx_vcpu_load,
+-	.vcpu_put = vmx_vcpu_put,
+-	.vcpu_decache = vmx_vcpu_decache,
+-
+-	.set_guest_debug = set_guest_debug,
+-	.guest_debug_pre = kvm_guest_debug_pre,
+-	.get_msr = vmx_get_msr,
+-	.set_msr = vmx_set_msr,
+-	.get_segment_base = vmx_get_segment_base,
+-	.get_segment = vmx_get_segment,
+-	.set_segment = vmx_set_segment,
+-	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
+-	.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
+-	.set_cr0 = vmx_set_cr0,
+-	.set_cr3 = vmx_set_cr3,
+-	.set_cr4 = vmx_set_cr4,
+-#ifdef CONFIG_X86_64
+-	.set_efer = vmx_set_efer,
+-#endif
+-	.get_idt = vmx_get_idt,
+-	.set_idt = vmx_set_idt,
+-	.get_gdt = vmx_get_gdt,
+-	.set_gdt = vmx_set_gdt,
+-	.cache_regs = vcpu_load_rsp_rip,
+-	.decache_regs = vcpu_put_rsp_rip,
+-	.get_rflags = vmx_get_rflags,
+-	.set_rflags = vmx_set_rflags,
+-
+-	.tlb_flush = vmx_flush_tlb,
+-	.inject_page_fault = vmx_inject_page_fault,
+-
+-	.inject_gp = vmx_inject_gp,
+-
+-	.run = vmx_vcpu_run,
+-	.handle_exit = kvm_handle_exit,
+-	.skip_emulated_instruction = skip_emulated_instruction,
+-	.patch_hypercall = vmx_patch_hypercall,
+-	.get_irq = vmx_get_irq,
+-	.set_irq = vmx_inject_irq,
+-	.inject_pending_irq = vmx_intr_assist,
+-	.inject_pending_vectors = do_interrupt_requests,
+-};
+-
+-static int __init vmx_init(void)
+-{
+-	void *iova;
+-	int r;
+-
+-	vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+-	if (!vmx_io_bitmap_a)
+-		return -ENOMEM;
+-
+-	vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+-	if (!vmx_io_bitmap_b) {
+-		r = -ENOMEM;
+-		goto out;
+-	}
+-
+-	/*
+-	 * Allow direct access to the PC debug port (it is often used for I/O
+-	 * delays, but the vmexits simply slow things down).
+-	 */
+-	iova = kmap(vmx_io_bitmap_a);
+-	memset(iova, 0xff, PAGE_SIZE);
+-	clear_bit(0x80, iova);
+-	kunmap(vmx_io_bitmap_a);
+-
+-	iova = kmap(vmx_io_bitmap_b);
+-	memset(iova, 0xff, PAGE_SIZE);
+-	kunmap(vmx_io_bitmap_b);
+-
+-	r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+-	if (r)
+-		goto out1;
+-
+-	return 0;
+-
+-out1:
+-	__free_page(vmx_io_bitmap_b);
+-out:
+-	__free_page(vmx_io_bitmap_a);
+-	return r;
+-}
+-
+-static void __exit vmx_exit(void)
+-{
+-	__free_page(vmx_io_bitmap_b);
+-	__free_page(vmx_io_bitmap_a);
+-
+-	kvm_exit_x86();
+-}
+-
+-module_init(vmx_init)
+-module_exit(vmx_exit)
+diff --git a/drivers/kvm/vmx.h b/drivers/kvm/vmx.h
+deleted file mode 100644
+index fd4e146..0000000
+--- a/drivers/kvm/vmx.h
++++ /dev/null
+@@ -1,310 +0,0 @@
+-#ifndef VMX_H
+-#define VMX_H
+-
+-/*
+- * vmx.h: VMX Architecture related definitions
+- * Copyright (c) 2004, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+- * Place - Suite 330, Boston, MA 02111-1307 USA.
+- *
+- * A few random additions are:
+- * Copyright (C) 2006 Qumranet
+- *    Avi Kivity <avi at qumranet.com>
+- *    Yaniv Kamay <yaniv at qumranet.com>
+- *
+- */
+-
+-#define CPU_BASED_VIRTUAL_INTR_PENDING          0x00000004
+-#define CPU_BASED_USE_TSC_OFFSETING             0x00000008
+-#define CPU_BASED_HLT_EXITING                   0x00000080
+-#define CPU_BASED_INVLPG_EXITING                0x00000200
+-#define CPU_BASED_MWAIT_EXITING                 0x00000400
+-#define CPU_BASED_RDPMC_EXITING                 0x00000800
+-#define CPU_BASED_RDTSC_EXITING                 0x00001000
+-#define CPU_BASED_CR8_LOAD_EXITING              0x00080000
+-#define CPU_BASED_CR8_STORE_EXITING             0x00100000
+-#define CPU_BASED_TPR_SHADOW                    0x00200000
+-#define CPU_BASED_MOV_DR_EXITING                0x00800000
+-#define CPU_BASED_UNCOND_IO_EXITING             0x01000000
+-#define CPU_BASED_USE_IO_BITMAPS                0x02000000
+-#define CPU_BASED_USE_MSR_BITMAPS               0x10000000
+-#define CPU_BASED_MONITOR_EXITING               0x20000000
+-#define CPU_BASED_PAUSE_EXITING                 0x40000000
+-#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS   0x80000000
+-
+-#define PIN_BASED_EXT_INTR_MASK                 0x00000001
+-#define PIN_BASED_NMI_EXITING                   0x00000008
+-#define PIN_BASED_VIRTUAL_NMIS                  0x00000020
+-
+-#define VM_EXIT_HOST_ADDR_SPACE_SIZE            0x00000200
+-#define VM_EXIT_ACK_INTR_ON_EXIT                0x00008000
+-
+-#define VM_ENTRY_IA32E_MODE                     0x00000200
+-#define VM_ENTRY_SMM                            0x00000400
+-#define VM_ENTRY_DEACT_DUAL_MONITOR             0x00000800
+-
+-#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
+-
+-/* VMCS Encodings */
+-enum vmcs_field {
+-	GUEST_ES_SELECTOR               = 0x00000800,
+-	GUEST_CS_SELECTOR               = 0x00000802,
+-	GUEST_SS_SELECTOR               = 0x00000804,
+-	GUEST_DS_SELECTOR               = 0x00000806,
+-	GUEST_FS_SELECTOR               = 0x00000808,
+-	GUEST_GS_SELECTOR               = 0x0000080a,
+-	GUEST_LDTR_SELECTOR             = 0x0000080c,
+-	GUEST_TR_SELECTOR               = 0x0000080e,
+-	HOST_ES_SELECTOR                = 0x00000c00,
+-	HOST_CS_SELECTOR                = 0x00000c02,
+-	HOST_SS_SELECTOR                = 0x00000c04,
+-	HOST_DS_SELECTOR                = 0x00000c06,
+-	HOST_FS_SELECTOR                = 0x00000c08,
+-	HOST_GS_SELECTOR                = 0x00000c0a,
+-	HOST_TR_SELECTOR                = 0x00000c0c,
+-	IO_BITMAP_A                     = 0x00002000,
+-	IO_BITMAP_A_HIGH                = 0x00002001,
+-	IO_BITMAP_B                     = 0x00002002,
+-	IO_BITMAP_B_HIGH                = 0x00002003,
+-	MSR_BITMAP                      = 0x00002004,
+-	MSR_BITMAP_HIGH                 = 0x00002005,
+-	VM_EXIT_MSR_STORE_ADDR          = 0x00002006,
+-	VM_EXIT_MSR_STORE_ADDR_HIGH     = 0x00002007,
+-	VM_EXIT_MSR_LOAD_ADDR           = 0x00002008,
+-	VM_EXIT_MSR_LOAD_ADDR_HIGH      = 0x00002009,
+-	VM_ENTRY_MSR_LOAD_ADDR          = 0x0000200a,
+-	VM_ENTRY_MSR_LOAD_ADDR_HIGH     = 0x0000200b,
+-	TSC_OFFSET                      = 0x00002010,
+-	TSC_OFFSET_HIGH                 = 0x00002011,
+-	VIRTUAL_APIC_PAGE_ADDR          = 0x00002012,
+-	VIRTUAL_APIC_PAGE_ADDR_HIGH     = 0x00002013,
+-	VMCS_LINK_POINTER               = 0x00002800,
+-	VMCS_LINK_POINTER_HIGH          = 0x00002801,
+-	GUEST_IA32_DEBUGCTL             = 0x00002802,
+-	GUEST_IA32_DEBUGCTL_HIGH        = 0x00002803,
+-	PIN_BASED_VM_EXEC_CONTROL       = 0x00004000,
+-	CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,
+-	EXCEPTION_BITMAP                = 0x00004004,
+-	PAGE_FAULT_ERROR_CODE_MASK      = 0x00004006,
+-	PAGE_FAULT_ERROR_CODE_MATCH     = 0x00004008,
+-	CR3_TARGET_COUNT                = 0x0000400a,
+-	VM_EXIT_CONTROLS                = 0x0000400c,
+-	VM_EXIT_MSR_STORE_COUNT         = 0x0000400e,
+-	VM_EXIT_MSR_LOAD_COUNT          = 0x00004010,
+-	VM_ENTRY_CONTROLS               = 0x00004012,
+-	VM_ENTRY_MSR_LOAD_COUNT         = 0x00004014,
+-	VM_ENTRY_INTR_INFO_FIELD        = 0x00004016,
+-	VM_ENTRY_EXCEPTION_ERROR_CODE   = 0x00004018,
+-	VM_ENTRY_INSTRUCTION_LEN        = 0x0000401a,
+-	TPR_THRESHOLD                   = 0x0000401c,
+-	SECONDARY_VM_EXEC_CONTROL       = 0x0000401e,
+-	VM_INSTRUCTION_ERROR            = 0x00004400,
+-	VM_EXIT_REASON                  = 0x00004402,
+-	VM_EXIT_INTR_INFO               = 0x00004404,
+-	VM_EXIT_INTR_ERROR_CODE         = 0x00004406,
+-	IDT_VECTORING_INFO_FIELD        = 0x00004408,
+-	IDT_VECTORING_ERROR_CODE        = 0x0000440a,
+-	VM_EXIT_INSTRUCTION_LEN         = 0x0000440c,
+-	VMX_INSTRUCTION_INFO            = 0x0000440e,
+-	GUEST_ES_LIMIT                  = 0x00004800,
+-	GUEST_CS_LIMIT                  = 0x00004802,
+-	GUEST_SS_LIMIT                  = 0x00004804,
+-	GUEST_DS_LIMIT                  = 0x00004806,
+-	GUEST_FS_LIMIT                  = 0x00004808,
+-	GUEST_GS_LIMIT                  = 0x0000480a,
+-	GUEST_LDTR_LIMIT                = 0x0000480c,
+-	GUEST_TR_LIMIT                  = 0x0000480e,
+-	GUEST_GDTR_LIMIT                = 0x00004810,
+-	GUEST_IDTR_LIMIT                = 0x00004812,
+-	GUEST_ES_AR_BYTES               = 0x00004814,
+-	GUEST_CS_AR_BYTES               = 0x00004816,
+-	GUEST_SS_AR_BYTES               = 0x00004818,
+-	GUEST_DS_AR_BYTES               = 0x0000481a,
+-	GUEST_FS_AR_BYTES               = 0x0000481c,
+-	GUEST_GS_AR_BYTES               = 0x0000481e,
+-	GUEST_LDTR_AR_BYTES             = 0x00004820,
+-	GUEST_TR_AR_BYTES               = 0x00004822,
+-	GUEST_INTERRUPTIBILITY_INFO     = 0x00004824,
+-	GUEST_ACTIVITY_STATE            = 0X00004826,
+-	GUEST_SYSENTER_CS               = 0x0000482A,
+-	HOST_IA32_SYSENTER_CS           = 0x00004c00,
+-	CR0_GUEST_HOST_MASK             = 0x00006000,
+-	CR4_GUEST_HOST_MASK             = 0x00006002,
+-	CR0_READ_SHADOW                 = 0x00006004,
+-	CR4_READ_SHADOW                 = 0x00006006,
+-	CR3_TARGET_VALUE0               = 0x00006008,
+-	CR3_TARGET_VALUE1               = 0x0000600a,
+-	CR3_TARGET_VALUE2               = 0x0000600c,
+-	CR3_TARGET_VALUE3               = 0x0000600e,
+-	EXIT_QUALIFICATION              = 0x00006400,
+-	GUEST_LINEAR_ADDRESS            = 0x0000640a,
+-	GUEST_CR0                       = 0x00006800,
+-	GUEST_CR3                       = 0x00006802,
+-	GUEST_CR4                       = 0x00006804,
+-	GUEST_ES_BASE                   = 0x00006806,
+-	GUEST_CS_BASE                   = 0x00006808,
+-	GUEST_SS_BASE                   = 0x0000680a,
+-	GUEST_DS_BASE                   = 0x0000680c,
+-	GUEST_FS_BASE                   = 0x0000680e,
+-	GUEST_GS_BASE                   = 0x00006810,
+-	GUEST_LDTR_BASE                 = 0x00006812,
+-	GUEST_TR_BASE                   = 0x00006814,
+-	GUEST_GDTR_BASE                 = 0x00006816,
+-	GUEST_IDTR_BASE                 = 0x00006818,
+-	GUEST_DR7                       = 0x0000681a,
+-	GUEST_RSP                       = 0x0000681c,
+-	GUEST_RIP                       = 0x0000681e,
+-	GUEST_RFLAGS                    = 0x00006820,
+-	GUEST_PENDING_DBG_EXCEPTIONS    = 0x00006822,
+-	GUEST_SYSENTER_ESP              = 0x00006824,
+-	GUEST_SYSENTER_EIP              = 0x00006826,
+-	HOST_CR0                        = 0x00006c00,
+-	HOST_CR3                        = 0x00006c02,
+-	HOST_CR4                        = 0x00006c04,
+-	HOST_FS_BASE                    = 0x00006c06,
+-	HOST_GS_BASE                    = 0x00006c08,
+-	HOST_TR_BASE                    = 0x00006c0a,
+-	HOST_GDTR_BASE                  = 0x00006c0c,
+-	HOST_IDTR_BASE                  = 0x00006c0e,
+-	HOST_IA32_SYSENTER_ESP          = 0x00006c10,
+-	HOST_IA32_SYSENTER_EIP          = 0x00006c12,
+-	HOST_RSP                        = 0x00006c14,
+-	HOST_RIP                        = 0x00006c16,
+-};
+-
+-#define VMX_EXIT_REASONS_FAILED_VMENTRY         0x80000000
+-
+-#define EXIT_REASON_EXCEPTION_NMI       0
+-#define EXIT_REASON_EXTERNAL_INTERRUPT  1
+-#define EXIT_REASON_TRIPLE_FAULT        2
+-
+-#define EXIT_REASON_PENDING_INTERRUPT   7
+-
+-#define EXIT_REASON_TASK_SWITCH         9
+-#define EXIT_REASON_CPUID               10
+-#define EXIT_REASON_HLT                 12
+-#define EXIT_REASON_INVLPG              14
+-#define EXIT_REASON_RDPMC               15
+-#define EXIT_REASON_RDTSC               16
+-#define EXIT_REASON_VMCALL              18
+-#define EXIT_REASON_VMCLEAR             19
+-#define EXIT_REASON_VMLAUNCH            20
+-#define EXIT_REASON_VMPTRLD             21
+-#define EXIT_REASON_VMPTRST             22
+-#define EXIT_REASON_VMREAD              23
+-#define EXIT_REASON_VMRESUME            24
+-#define EXIT_REASON_VMWRITE             25
+-#define EXIT_REASON_VMOFF               26
+-#define EXIT_REASON_VMON                27
+-#define EXIT_REASON_CR_ACCESS           28
+-#define EXIT_REASON_DR_ACCESS           29
+-#define EXIT_REASON_IO_INSTRUCTION      30
+-#define EXIT_REASON_MSR_READ            31
+-#define EXIT_REASON_MSR_WRITE           32
+-#define EXIT_REASON_MWAIT_INSTRUCTION   36
+-#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
+-
+-/*
+- * Interruption-information format
+- */
+-#define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
+-#define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
+-#define INTR_INFO_DELIEVER_CODE_MASK    0x800           /* 11 */
+-#define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
+-
+-#define VECTORING_INFO_VECTOR_MASK           	INTR_INFO_VECTOR_MASK
+-#define VECTORING_INFO_TYPE_MASK        	INTR_INFO_INTR_TYPE_MASK
+-#define VECTORING_INFO_DELIEVER_CODE_MASK    	INTR_INFO_DELIEVER_CODE_MASK
+-#define VECTORING_INFO_VALID_MASK       	INTR_INFO_VALID_MASK
+-
+-#define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
+-#define INTR_TYPE_EXCEPTION             (3 << 8) /* processor exception */
+-
+-/*
+- * Exit Qualifications for MOV for Control Register Access
+- */
+-#define CONTROL_REG_ACCESS_NUM          0x7     /* 2:0, number of control register */
+-#define CONTROL_REG_ACCESS_TYPE         0x30    /* 5:4, access type */
+-#define CONTROL_REG_ACCESS_REG          0xf00   /* 10:8, general purpose register */
+-#define LMSW_SOURCE_DATA_SHIFT 16
+-#define LMSW_SOURCE_DATA  (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */
+-#define REG_EAX                         (0 << 8)
+-#define REG_ECX                         (1 << 8)
+-#define REG_EDX                         (2 << 8)
+-#define REG_EBX                         (3 << 8)
+-#define REG_ESP                         (4 << 8)
+-#define REG_EBP                         (5 << 8)
+-#define REG_ESI                         (6 << 8)
+-#define REG_EDI                         (7 << 8)
+-#define REG_R8                         (8 << 8)
+-#define REG_R9                         (9 << 8)
+-#define REG_R10                        (10 << 8)
+-#define REG_R11                        (11 << 8)
+-#define REG_R12                        (12 << 8)
+-#define REG_R13                        (13 << 8)
+-#define REG_R14                        (14 << 8)
+-#define REG_R15                        (15 << 8)
+-
+-/*
+- * Exit Qualifications for MOV for Debug Register Access
+- */
+-#define DEBUG_REG_ACCESS_NUM            0x7     /* 2:0, number of debug register */
+-#define DEBUG_REG_ACCESS_TYPE           0x10    /* 4, direction of access */
+-#define TYPE_MOV_TO_DR                  (0 << 4)
+-#define TYPE_MOV_FROM_DR                (1 << 4)
+-#define DEBUG_REG_ACCESS_REG            0xf00   /* 11:8, general purpose register */
+-
+-
+-/* segment AR */
+-#define SEGMENT_AR_L_MASK (1 << 13)
+-
+-#define AR_TYPE_ACCESSES_MASK 1
+-#define AR_TYPE_READABLE_MASK (1 << 1)
+-#define AR_TYPE_WRITEABLE_MASK (1 << 2)
+-#define AR_TYPE_CODE_MASK (1 << 3)
+-#define AR_TYPE_MASK 0x0f
+-#define AR_TYPE_BUSY_64_TSS 11
+-#define AR_TYPE_BUSY_32_TSS 11
+-#define AR_TYPE_BUSY_16_TSS 3
+-#define AR_TYPE_LDT 2
+-
+-#define AR_UNUSABLE_MASK (1 << 16)
+-#define AR_S_MASK (1 << 4)
+-#define AR_P_MASK (1 << 7)
+-#define AR_L_MASK (1 << 13)
+-#define AR_DB_MASK (1 << 14)
+-#define AR_G_MASK (1 << 15)
+-#define AR_DPL_SHIFT 5
+-#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3)
+-
+-#define AR_RESERVD_MASK 0xfffe0f00
+-
+-#define MSR_IA32_VMX_BASIC                      0x480
+-#define MSR_IA32_VMX_PINBASED_CTLS              0x481
+-#define MSR_IA32_VMX_PROCBASED_CTLS             0x482
+-#define MSR_IA32_VMX_EXIT_CTLS                  0x483
+-#define MSR_IA32_VMX_ENTRY_CTLS                 0x484
+-#define MSR_IA32_VMX_MISC                       0x485
+-#define MSR_IA32_VMX_CR0_FIXED0                 0x486
+-#define MSR_IA32_VMX_CR0_FIXED1                 0x487
+-#define MSR_IA32_VMX_CR4_FIXED0                 0x488
+-#define MSR_IA32_VMX_CR4_FIXED1                 0x489
+-#define MSR_IA32_VMX_VMCS_ENUM                  0x48a
+-#define MSR_IA32_VMX_PROCBASED_CTLS2            0x48b
+-
+-#define MSR_IA32_FEATURE_CONTROL                0x3a
+-#define MSR_IA32_FEATURE_CONTROL_LOCKED         0x1
+-#define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED  0x4
+-
+-#endif
+diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
+deleted file mode 100644
+index bd46de6..0000000
+--- a/drivers/kvm/x86_emulate.c
++++ /dev/null
+@@ -1,1662 +0,0 @@
+-/******************************************************************************
+- * x86_emulate.c
+- *
+- * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
+- *
+- * Copyright (c) 2005 Keir Fraser
+- *
+- * Linux coding style, mod r/m decoder, segment base fixes, real-mode
+- * privileged instructions:
+- *
+- * Copyright (C) 2006 Qumranet
+- *
+- *   Avi Kivity <avi at qumranet.com>
+- *   Yaniv Kamay <yaniv at qumranet.com>
+- *
+- * This work is licensed under the terms of the GNU GPL, version 2.  See
+- * the COPYING file in the top-level directory.
+- *
+- * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
+- */
+-
+-#ifndef __KERNEL__
+-#include <stdio.h>
+-#include <stdint.h>
+-#include <public/xen.h>
+-#define DPRINTF(_f, _a ...) printf( _f , ## _a )
+-#else
+-#include "kvm.h"
+-#define DPRINTF(x...) do {} while (0)
+-#endif
+-#include "x86_emulate.h"
+-#include <linux/module.h>
+-
+-/*
+- * Opcode effective-address decode tables.
+- * Note that we only emulate instructions that have at least one memory
+- * operand (excluding implicit stack references). We assume that stack
+- * references and instruction fetches will never occur in special memory
+- * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
+- * not be handled.
+- */
+-
+-/* Operand sizes: 8-bit operands or specified/overridden size. */
+-#define ByteOp      (1<<0)	/* 8-bit operands. */
+-/* Destination operand type. */
+-#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
+-#define DstReg      (2<<1)	/* Register operand. */
+-#define DstMem      (3<<1)	/* Memory operand. */
+-#define DstMask     (3<<1)
+-/* Source operand type. */
+-#define SrcNone     (0<<3)	/* No source operand. */
+-#define SrcImplicit (0<<3)	/* Source operand is implicit in the opcode. */
+-#define SrcReg      (1<<3)	/* Register operand. */
+-#define SrcMem      (2<<3)	/* Memory operand. */
+-#define SrcMem16    (3<<3)	/* Memory operand (16-bit). */
+-#define SrcMem32    (4<<3)	/* Memory operand (32-bit). */
+-#define SrcImm      (5<<3)	/* Immediate operand. */
+-#define SrcImmByte  (6<<3)	/* 8-bit sign-extended immediate operand. */
+-#define SrcMask     (7<<3)
+-/* Generic ModRM decode. */
+-#define ModRM       (1<<6)
+-/* Destination is only written; never read. */
+-#define Mov         (1<<7)
+-#define BitOp       (1<<8)
+-
+-static u8 opcode_table[256] = {
+-	/* 0x00 - 0x07 */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
+-	0, 0, 0, 0,
+-	/* 0x08 - 0x0F */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
+-	0, 0, 0, 0,
+-	/* 0x10 - 0x17 */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
+-	0, 0, 0, 0,
+-	/* 0x18 - 0x1F */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
+-	0, 0, 0, 0,
+-	/* 0x20 - 0x27 */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
+-	SrcImmByte, SrcImm, 0, 0,
+-	/* 0x28 - 0x2F */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
+-	0, 0, 0, 0,
+-	/* 0x30 - 0x37 */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
+-	0, 0, 0, 0,
+-	/* 0x38 - 0x3F */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
+-	0, 0, 0, 0,
+-	/* 0x40 - 0x4F */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0x50 - 0x57 */
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	/* 0x58 - 0x5F */
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	/* 0x60 - 0x67 */
+-	0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
+-	0, 0, 0, 0,
+-	/* 0x68 - 0x6F */
+-	0, 0, ImplicitOps|Mov, 0,
+-	SrcNone  | ByteOp  | ImplicitOps, SrcNone  | ImplicitOps, /* insb, insw/insd */
+-	SrcNone  | ByteOp  | ImplicitOps, SrcNone  | ImplicitOps, /* outsb, outsw/outsd */
+-	/* 0x70 - 0x77 */
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	/* 0x78 - 0x7F */
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	/* 0x80 - 0x87 */
+-	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+-	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
+-	/* 0x88 - 0x8F */
+-	ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
+-	ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	0, ModRM | DstReg, 0, DstMem | SrcNone | ModRM | Mov,
+-	/* 0x90 - 0x9F */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps, ImplicitOps, 0, 0,
+-	/* 0xA0 - 0xA7 */
+-	ByteOp | DstReg | SrcMem | Mov, DstReg | SrcMem | Mov,
+-	ByteOp | DstMem | SrcReg | Mov, DstMem | SrcReg | Mov,
+-	ByteOp | ImplicitOps | Mov, ImplicitOps | Mov,
+-	ByteOp | ImplicitOps, ImplicitOps,
+-	/* 0xA8 - 0xAF */
+-	0, 0, ByteOp | ImplicitOps | Mov, ImplicitOps | Mov,
+-	ByteOp | ImplicitOps | Mov, ImplicitOps | Mov,
+-	ByteOp | ImplicitOps, ImplicitOps,
+-	/* 0xB0 - 0xBF */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0xC0 - 0xC7 */
+-	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
+-	0, ImplicitOps, 0, 0,
+-	ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
+-	/* 0xC8 - 0xCF */
+-	0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0xD0 - 0xD7 */
+-	ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
+-	ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
+-	0, 0, 0, 0,
+-	/* 0xD8 - 0xDF */
+-	0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0xE0 - 0xE7 */
+-	0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0xE8 - 0xEF */
+-	ImplicitOps, SrcImm|ImplicitOps, 0, SrcImmByte|ImplicitOps, 0, 0, 0, 0,
+-	/* 0xF0 - 0xF7 */
+-	0, 0, 0, 0,
+-	ImplicitOps, 0,
+-	ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
+-	/* 0xF8 - 0xFF */
+-	0, 0, 0, 0,
+-	0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
+-};
+-
+-static u16 twobyte_table[256] = {
+-	/* 0x00 - 0x0F */
+-	0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
+-	ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
+-	/* 0x10 - 0x1F */
+-	0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0x20 - 0x2F */
+-	ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
+-	0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0x30 - 0x3F */
+-	ImplicitOps, 0, ImplicitOps, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0x40 - 0x47 */
+-	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	/* 0x48 - 0x4F */
+-	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
+-	/* 0x50 - 0x5F */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0x60 - 0x6F */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0x70 - 0x7F */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0x80 - 0x8F */
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+-	/* 0x90 - 0x9F */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0xA0 - 0xA7 */
+-	0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0,
+-	/* 0xA8 - 0xAF */
+-	0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0,
+-	/* 0xB0 - 0xB7 */
+-	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0,
+-	    DstMem | SrcReg | ModRM | BitOp,
+-	0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
+-	    DstReg | SrcMem16 | ModRM | Mov,
+-	/* 0xB8 - 0xBF */
+-	0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp,
+-	0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
+-	    DstReg | SrcMem16 | ModRM | Mov,
+-	/* 0xC0 - 0xCF */
+-	0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM,
+-	0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0xD0 - 0xDF */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0xE0 - 0xEF */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-	/* 0xF0 - 0xFF */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+-};
+-
+-/* Type, address-of, and value of an instruction's operand. */
+-struct operand {
+-	enum { OP_REG, OP_MEM, OP_IMM } type;
+-	unsigned int bytes;
+-	unsigned long val, orig_val, *ptr;
+-};
+-
+-/* EFLAGS bit definitions. */
+-#define EFLG_OF (1<<11)
+-#define EFLG_DF (1<<10)
+-#define EFLG_SF (1<<7)
+-#define EFLG_ZF (1<<6)
+-#define EFLG_AF (1<<4)
+-#define EFLG_PF (1<<2)
+-#define EFLG_CF (1<<0)
+-
+-/*
+- * Instruction emulation:
+- * Most instructions are emulated directly via a fragment of inline assembly
+- * code. This allows us to save/restore EFLAGS and thus very easily pick up
+- * any modified flags.
+- */
+-
+-#if defined(CONFIG_X86_64)
+-#define _LO32 "k"		/* force 32-bit operand */
+-#define _STK  "%%rsp"		/* stack pointer */
+-#elif defined(__i386__)
+-#define _LO32 ""		/* force 32-bit operand */
+-#define _STK  "%%esp"		/* stack pointer */
+-#endif
+-
+-/*
+- * These EFLAGS bits are restored from saved value during emulation, and
+- * any changes are written back to the saved value after emulation.
+- */
+-#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
+-
+-/* Before executing instruction: restore necessary bits in EFLAGS. */
+-#define _PRE_EFLAGS(_sav, _msk, _tmp) \
+-	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); */	\
+-	"push %"_sav"; "					\
+-	"movl %"_msk",%"_LO32 _tmp"; "				\
+-	"andl %"_LO32 _tmp",("_STK"); "				\
+-	"pushf; "						\
+-	"notl %"_LO32 _tmp"; "					\
+-	"andl %"_LO32 _tmp",("_STK"); "				\
+-	"pop  %"_tmp"; "					\
+-	"orl  %"_LO32 _tmp",("_STK"); "				\
+-	"popf; "						\
+-	/* _sav &= ~msk; */					\
+-	"movl %"_msk",%"_LO32 _tmp"; "				\
+-	"notl %"_LO32 _tmp"; "					\
+-	"andl %"_LO32 _tmp",%"_sav"; "
+-
+-/* After executing instruction: write-back necessary bits in EFLAGS. */
+-#define _POST_EFLAGS(_sav, _msk, _tmp) \
+-	/* _sav |= EFLAGS & _msk; */		\
+-	"pushf; "				\
+-	"pop  %"_tmp"; "			\
+-	"andl %"_msk",%"_LO32 _tmp"; "		\
+-	"orl  %"_LO32 _tmp",%"_sav"; "
+-
+-/* Raw emulation: instruction has two explicit operands. */
+-#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
+-	do { 								    \
+-		unsigned long _tmp;					    \
+-									    \
+-		switch ((_dst).bytes) {					    \
+-		case 2:							    \
+-			__asm__ __volatile__ (				    \
+-				_PRE_EFLAGS("0","4","2")		    \
+-				_op"w %"_wx"3,%1; "			    \
+-				_POST_EFLAGS("0","4","2")		    \
+-				: "=m" (_eflags), "=m" ((_dst).val),        \
+-				  "=&r" (_tmp)				    \
+-				: _wy ((_src).val), "i" (EFLAGS_MASK) );    \
+-			break;						    \
+-		case 4:							    \
+-			__asm__ __volatile__ (				    \
+-				_PRE_EFLAGS("0","4","2")		    \
+-				_op"l %"_lx"3,%1; "			    \
+-				_POST_EFLAGS("0","4","2")		    \
+-				: "=m" (_eflags), "=m" ((_dst).val),	    \
+-				  "=&r" (_tmp)				    \
+-				: _ly ((_src).val), "i" (EFLAGS_MASK) );    \
+-			break;						    \
+-		case 8:							    \
+-			__emulate_2op_8byte(_op, _src, _dst,		    \
+-					    _eflags, _qx, _qy);		    \
+-			break;						    \
+-		}							    \
+-	} while (0)
+-
+-#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
+-	do {								     \
+-		unsigned long _tmp;					     \
+-		switch ( (_dst).bytes )					     \
+-		{							     \
+-		case 1:							     \
+-			__asm__ __volatile__ (				     \
+-				_PRE_EFLAGS("0","4","2")		     \
+-				_op"b %"_bx"3,%1; "			     \
+-				_POST_EFLAGS("0","4","2")		     \
+-				: "=m" (_eflags), "=m" ((_dst).val),	     \
+-				  "=&r" (_tmp)				     \
+-				: _by ((_src).val), "i" (EFLAGS_MASK) );     \
+-			break;						     \
+-		default:						     \
+-			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
+-					     _wx, _wy, _lx, _ly, _qx, _qy);  \
+-			break;						     \
+-		}							     \
+-	} while (0)
+-
+-/* Source operand is byte-sized and may be restricted to just %cl. */
+-#define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
+-	__emulate_2op(_op, _src, _dst, _eflags,				\
+-		      "b", "c", "b", "c", "b", "c", "b", "c")
+-
+-/* Source operand is byte, word, long or quad sized. */
+-#define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
+-	__emulate_2op(_op, _src, _dst, _eflags,				\
+-		      "b", "q", "w", "r", _LO32, "r", "", "r")
+-
+-/* Source operand is word, long or quad sized. */
+-#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
+-	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
+-			     "w", "r", _LO32, "r", "", "r")
+-
+-/* Instruction has only one explicit operand (no source operand). */
+-#define emulate_1op(_op, _dst, _eflags)                                    \
+-	do {								\
+-		unsigned long _tmp;					\
+-									\
+-		switch ( (_dst).bytes )					\
+-		{							\
+-		case 1:							\
+-			__asm__ __volatile__ (				\
+-				_PRE_EFLAGS("0","3","2")		\
+-				_op"b %1; "				\
+-				_POST_EFLAGS("0","3","2")		\
+-				: "=m" (_eflags), "=m" ((_dst).val),	\
+-				  "=&r" (_tmp)				\
+-				: "i" (EFLAGS_MASK) );			\
+-			break;						\
+-		case 2:							\
+-			__asm__ __volatile__ (				\
+-				_PRE_EFLAGS("0","3","2")		\
+-				_op"w %1; "				\
+-				_POST_EFLAGS("0","3","2")		\
+-				: "=m" (_eflags), "=m" ((_dst).val),	\
+-				  "=&r" (_tmp)				\
+-				: "i" (EFLAGS_MASK) );			\
+-			break;						\
+-		case 4:							\
+-			__asm__ __volatile__ (				\
+-				_PRE_EFLAGS("0","3","2")		\
+-				_op"l %1; "				\
+-				_POST_EFLAGS("0","3","2")		\
+-				: "=m" (_eflags), "=m" ((_dst).val),	\
+-				  "=&r" (_tmp)				\
+-				: "i" (EFLAGS_MASK) );			\
+-			break;						\
+-		case 8:							\
+-			__emulate_1op_8byte(_op, _dst, _eflags);	\
+-			break;						\
+-		}							\
+-	} while (0)
+-
+-/* Emulate an instruction with quadword operands (x86/64 only). */
+-#if defined(CONFIG_X86_64)
+-#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy)           \
+-	do {								  \
+-		__asm__ __volatile__ (					  \
+-			_PRE_EFLAGS("0","4","2")			  \
+-			_op"q %"_qx"3,%1; "				  \
+-			_POST_EFLAGS("0","4","2")			  \
+-			: "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \
+-			: _qy ((_src).val), "i" (EFLAGS_MASK) );	  \
+-	} while (0)
+-
+-#define __emulate_1op_8byte(_op, _dst, _eflags)                           \
+-	do {								  \
+-		__asm__ __volatile__ (					  \
+-			_PRE_EFLAGS("0","3","2")			  \
+-			_op"q %1; "					  \
+-			_POST_EFLAGS("0","3","2")			  \
+-			: "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \
+-			: "i" (EFLAGS_MASK) );				  \
+-	} while (0)
+-
+-#elif defined(__i386__)
+-#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy)
+-#define __emulate_1op_8byte(_op, _dst, _eflags)
+-#endif				/* __i386__ */
+-
+-/* Fetch next part of the instruction being emulated. */
+-#define insn_fetch(_type, _size, _eip)                                  \
+-({	unsigned long _x;						\
+-	rc = ops->read_std((unsigned long)(_eip) + ctxt->cs_base, &_x,	\
+-                                                  (_size), ctxt->vcpu); \
+-	if ( rc != 0 )							\
+-		goto done;						\
+-	(_eip) += (_size);						\
+-	(_type)_x;							\
+-})
+-
+-/* Access/update address held in a register, based on addressing mode. */
+-#define address_mask(reg)						\
+-	((ad_bytes == sizeof(unsigned long)) ? 				\
+-		(reg) :	((reg) & ((1UL << (ad_bytes << 3)) - 1)))
+-#define register_address(base, reg)                                     \
+-	((base) + address_mask(reg))
+-#define register_address_increment(reg, inc)                            \
+-	do {								\
+-		/* signed type ensures sign extension to long */        \
+-		int _inc = (inc);					\
+-		if ( ad_bytes == sizeof(unsigned long) )		\
+-			(reg) += _inc;					\
+-		else							\
+-			(reg) = ((reg) & ~((1UL << (ad_bytes << 3)) - 1)) | \
+-			   (((reg) + _inc) & ((1UL << (ad_bytes << 3)) - 1)); \
+-	} while (0)
+-
+-#define JMP_REL(rel) 							\
+-	do {								\
+-		register_address_increment(_eip, rel);			\
+-	} while (0)
+-
+-/*
+- * Given the 'reg' portion of a ModRM byte, and a register block, return a
+- * pointer into the block that addresses the relevant register.
+- * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
+- */
+-static void *decode_register(u8 modrm_reg, unsigned long *regs,
+-			     int highbyte_regs)
+-{
+-	void *p;
+-
+-	p = &regs[modrm_reg];
+-	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
+-		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
+-	return p;
+-}
+-
+-static int read_descriptor(struct x86_emulate_ctxt *ctxt,
+-			   struct x86_emulate_ops *ops,
+-			   void *ptr,
+-			   u16 *size, unsigned long *address, int op_bytes)
+-{
+-	int rc;
+-
+-	if (op_bytes == 2)
+-		op_bytes = 3;
+-	*address = 0;
+-	rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
+-			   ctxt->vcpu);
+-	if (rc)
+-		return rc;
+-	rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
+-			   ctxt->vcpu);
+-	return rc;
+-}
+-
+-static int test_cc(unsigned int condition, unsigned int flags)
+-{
+-	int rc = 0;
+-
+-	switch ((condition & 15) >> 1) {
+-	case 0: /* o */
+-		rc |= (flags & EFLG_OF);
+-		break;
+-	case 1: /* b/c/nae */
+-		rc |= (flags & EFLG_CF);
+-		break;
+-	case 2: /* z/e */
+-		rc |= (flags & EFLG_ZF);
+-		break;
+-	case 3: /* be/na */
+-		rc |= (flags & (EFLG_CF|EFLG_ZF));
+-		break;
+-	case 4: /* s */
+-		rc |= (flags & EFLG_SF);
+-		break;
+-	case 5: /* p/pe */
+-		rc |= (flags & EFLG_PF);
+-		break;
+-	case 7: /* le/ng */
+-		rc |= (flags & EFLG_ZF);
+-		/* fall through */
+-	case 6: /* l/nge */
+-		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
+-		break;
+-	}
+-
+-	/* Odd condition identifiers (lsb == 1) have inverted sense. */
+-	return (!!rc ^ (condition & 1));
+-}
+-
+-int
+-x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+-{
+-	unsigned d;
+-	u8 b, sib, twobyte = 0, rex_prefix = 0;
+-	u8 modrm, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
+-	unsigned long *override_base = NULL;
+-	unsigned int op_bytes, ad_bytes, lock_prefix = 0, rep_prefix = 0, i;
+-	int rc = 0;
+-	struct operand src, dst;
+-	unsigned long cr2 = ctxt->cr2;
+-	int mode = ctxt->mode;
+-	unsigned long modrm_ea;
+-	int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0;
+-	int no_wb = 0;
+-	u64 msr_data;
+-
+-	/* Shadow copy of register state. Committed on successful emulation. */
+-	unsigned long _regs[NR_VCPU_REGS];
+-	unsigned long _eip = ctxt->vcpu->rip, _eflags = ctxt->eflags;
+-	unsigned long modrm_val = 0;
+-
+-	memcpy(_regs, ctxt->vcpu->regs, sizeof _regs);
+-
+-	switch (mode) {
+-	case X86EMUL_MODE_REAL:
+-	case X86EMUL_MODE_PROT16:
+-		op_bytes = ad_bytes = 2;
+-		break;
+-	case X86EMUL_MODE_PROT32:
+-		op_bytes = ad_bytes = 4;
+-		break;
+-#ifdef CONFIG_X86_64
+-	case X86EMUL_MODE_PROT64:
+-		op_bytes = 4;
+-		ad_bytes = 8;
+-		break;
+-#endif
+-	default:
+-		return -1;
+-	}
+-
+-	/* Legacy prefixes. */
+-	for (i = 0; i < 8; i++) {
+-		switch (b = insn_fetch(u8, 1, _eip)) {
+-		case 0x66:	/* operand-size override */
+-			op_bytes ^= 6;	/* switch between 2/4 bytes */
+-			break;
+-		case 0x67:	/* address-size override */
+-			if (mode == X86EMUL_MODE_PROT64)
+-				ad_bytes ^= 12;	/* switch between 4/8 bytes */
+-			else
+-				ad_bytes ^= 6;	/* switch between 2/4 bytes */
+-			break;
+-		case 0x2e:	/* CS override */
+-			override_base = &ctxt->cs_base;
+-			break;
+-		case 0x3e:	/* DS override */
+-			override_base = &ctxt->ds_base;
+-			break;
+-		case 0x26:	/* ES override */
+-			override_base = &ctxt->es_base;
+-			break;
+-		case 0x64:	/* FS override */
+-			override_base = &ctxt->fs_base;
+-			break;
+-		case 0x65:	/* GS override */
+-			override_base = &ctxt->gs_base;
+-			break;
+-		case 0x36:	/* SS override */
+-			override_base = &ctxt->ss_base;
+-			break;
+-		case 0xf0:	/* LOCK */
+-			lock_prefix = 1;
+-			break;
+-		case 0xf2:	/* REPNE/REPNZ */
+-		case 0xf3:	/* REP/REPE/REPZ */
+-			rep_prefix = 1;
+-			break;
+-		default:
+-			goto done_prefixes;
 -		}
--		spin_unlock_irqrestore(&dev->pending_lock, flags);
-+	spin_lock_irqsave(&dev->pending_lock, flags);
-+	if (crp->cr_psstat)
-+		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-+	else
-+		status = dev->pma_sample_status;
-+	if (status == IB_PMA_SAMPLE_STATUS_DONE) {
-+		dev->pma_sample_start = be32_to_cpu(p->sample_start);
-+		dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
-+		dev->pma_tag = be16_to_cpu(p->tag);
-+		dev->pma_counter_select[0] = p->counter_select[0];
-+		dev->pma_counter_select[1] = p->counter_select[1];
-+		dev->pma_counter_select[2] = p->counter_select[2];
-+		dev->pma_counter_select[3] = p->counter_select[3];
-+		dev->pma_counter_select[4] = p->counter_select[4];
-+		if (crp->cr_psstat) {
-+			ipath_write_creg(dev->dd, crp->cr_psinterval,
-+					 dev->pma_sample_interval);
-+			ipath_write_creg(dev->dd, crp->cr_psstart,
-+					 dev->pma_sample_start);
-+		} else
-+			dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
- 	}
-+	spin_unlock_irqrestore(&dev->pending_lock, flags);
-+
- 	ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
- 
- bail:
- 	return ret;
- }
- 
--static u64 get_counter(struct ipath_ibdev *dev, __be16 sel)
-+static u64 get_counter(struct ipath_ibdev *dev,
-+		       struct ipath_cregs const *crp,
-+		       __be16 sel)
- {
- 	u64 ret;
- 
- 	switch (sel) {
- 	case IB_PMA_PORT_XMIT_DATA:
--		ret = dev->ipath_sword;
-+		ret = (crp->cr_psxmitdatacount) ?
-+			ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
-+			dev->ipath_sword;
- 		break;
- 	case IB_PMA_PORT_RCV_DATA:
--		ret = dev->ipath_rword;
-+		ret = (crp->cr_psrcvdatacount) ?
-+			ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
-+			dev->ipath_rword;
- 		break;
- 	case IB_PMA_PORT_XMIT_PKTS:
--		ret = dev->ipath_spkts;
-+		ret = (crp->cr_psxmitpktscount) ?
-+			ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
-+			dev->ipath_spkts;
- 		break;
- 	case IB_PMA_PORT_RCV_PKTS:
--		ret = dev->ipath_rpkts;
-+		ret = (crp->cr_psrcvpktscount) ?
-+			ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
-+			dev->ipath_rpkts;
- 		break;
- 	case IB_PMA_PORT_XMIT_WAIT:
--		ret = dev->ipath_xmit_wait;
-+		ret = (crp->cr_psxmitwaitcount) ?
-+			ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
-+			dev->ipath_xmit_wait;
- 		break;
- 	default:
- 		ret = 0;
-@@ -1053,14 +1066,21 @@ static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
- 	struct ib_pma_portsamplesresult *p =
- 		(struct ib_pma_portsamplesresult *)pmp->data;
- 	struct ipath_ibdev *dev = to_idev(ibdev);
-+	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-+	u8 status;
- 	int i;
- 
- 	memset(pmp->data, 0, sizeof(pmp->data));
- 	p->tag = cpu_to_be16(dev->pma_tag);
--	p->sample_status = cpu_to_be16(dev->pma_sample_status);
-+	if (crp->cr_psstat)
-+		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-+	else
-+		status = dev->pma_sample_status;
-+	p->sample_status = cpu_to_be16(status);
- 	for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
--		p->counter[i] = cpu_to_be32(
--			get_counter(dev, dev->pma_counter_select[i]));
-+		p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
-+		    cpu_to_be32(
-+			get_counter(dev, crp, dev->pma_counter_select[i]));
- 
- 	return reply((struct ib_smp *) pmp);
- }
-@@ -1071,16 +1091,23 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
- 	struct ib_pma_portsamplesresult_ext *p =
- 		(struct ib_pma_portsamplesresult_ext *)pmp->data;
- 	struct ipath_ibdev *dev = to_idev(ibdev);
-+	struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-+	u8 status;
- 	int i;
- 
- 	memset(pmp->data, 0, sizeof(pmp->data));
- 	p->tag = cpu_to_be16(dev->pma_tag);
--	p->sample_status = cpu_to_be16(dev->pma_sample_status);
-+	if (crp->cr_psstat)
-+		status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-+	else
-+		status = dev->pma_sample_status;
-+	p->sample_status = cpu_to_be16(status);
- 	/* 64 bits */
- 	p->extended_width = __constant_cpu_to_be32(0x80000000);
- 	for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
--		p->counter[i] = cpu_to_be64(
--			get_counter(dev, dev->pma_counter_select[i]));
-+		p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
-+		    cpu_to_be64(
-+			get_counter(dev, crp, dev->pma_counter_select[i]));
- 
- 	return reply((struct ib_smp *) pmp);
- }
-@@ -1113,6 +1140,8 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
- 		dev->z_local_link_integrity_errors;
- 	cntrs.excessive_buffer_overrun_errors -=
- 		dev->z_excessive_buffer_overrun_errors;
-+	cntrs.vl15_dropped -= dev->z_vl15_dropped;
-+	cntrs.vl15_dropped += dev->n_vl15_dropped;
- 
- 	memset(pmp->data, 0, sizeof(pmp->data));
- 
-@@ -1156,10 +1185,10 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
- 		cntrs.excessive_buffer_overrun_errors = 0xFUL;
- 	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
- 		cntrs.excessive_buffer_overrun_errors;
--	if (dev->n_vl15_dropped > 0xFFFFUL)
-+	if (cntrs.vl15_dropped > 0xFFFFUL)
- 		p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
- 	else
--		p->vl15_dropped = cpu_to_be16((u16)dev->n_vl15_dropped);
-+		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
- 	if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
- 		p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
- 	else
-@@ -1262,8 +1291,10 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
- 		dev->z_excessive_buffer_overrun_errors =
- 			cntrs.excessive_buffer_overrun_errors;
- 
--	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED)
-+	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
- 		dev->n_vl15_dropped = 0;
-+		dev->z_vl15_dropped = cntrs.vl15_dropped;
-+	}
- 
- 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
- 		dev->z_port_xmit_data = cntrs.port_xmit_data;
-@@ -1434,7 +1465,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
- 		 * before checking for other consumers.
- 		 * Just tell the caller to process it normally.
- 		 */
--		ret = IB_MAD_RESULT_FAILURE;
-+		ret = IB_MAD_RESULT_SUCCESS;
- 		goto bail;
- 	default:
- 		smp->status |= IB_SMP_UNSUP_METHOD;
-@@ -1516,7 +1547,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
- 		 * before checking for other consumers.
- 		 * Just tell the caller to process it normally.
- 		 */
--		ret = IB_MAD_RESULT_FAILURE;
-+		ret = IB_MAD_RESULT_SUCCESS;
- 		goto bail;
- 	default:
- 		pmp->status |= IB_SMP_UNSUP_METHOD;
-diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
-index b997ff8..80dc623 100644
---- a/drivers/infiniband/hw/ipath/ipath_qp.c
-+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
-@@ -387,8 +387,8 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
- 	struct ib_wc wc;
- 	int ret = 0;
- 
--	ipath_dbg("QP%d/%d in error state\n",
--		  qp->ibqp.qp_num, qp->remote_qpn);
-+	ipath_dbg("QP%d/%d in error state (%d)\n",
-+		  qp->ibqp.qp_num, qp->remote_qpn, err);
- 
- 	spin_lock(&dev->pending_lock);
- 	/* XXX What if its already removed by the timeout code? */
-@@ -855,8 +855,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
- 	 * See ipath_mmap() for details.
- 	 */
- 	if (udata && udata->outlen >= sizeof(__u64)) {
--		int err;
+-	}
 -
- 		if (!qp->r_rq.wq) {
- 			__u64 offset = 0;
- 
-diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
-index 120a61b..459e46e 100644
---- a/drivers/infiniband/hw/ipath/ipath_rc.c
-+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
-@@ -647,6 +647,7 @@ static void send_rc_ack(struct ipath_qp *qp)
- 
- queue_ack:
- 	spin_lock_irqsave(&qp->s_lock, flags);
-+	dev->n_rc_qacks++;
- 	qp->s_flags |= IPATH_S_ACK_PENDING;
- 	qp->s_nak_state = qp->r_nak_state;
- 	qp->s_ack_psn = qp->r_ack_psn;
-@@ -798,11 +799,13 @@ bail:
- 
- static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
- {
--	if (qp->s_wait_credit) {
--		qp->s_wait_credit = 0;
--		tasklet_hi_schedule(&qp->s_task);
-+	if (qp->s_last_psn != psn) {
-+		qp->s_last_psn = psn;
-+		if (qp->s_wait_credit) {
-+			qp->s_wait_credit = 0;
-+			tasklet_hi_schedule(&qp->s_task);
-+		}
- 	}
--	qp->s_last_psn = psn;
- }
- 
- /**
-@@ -1653,13 +1656,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- 	case OP(SEND_FIRST):
- 		if (!ipath_get_rwqe(qp, 0)) {
- 		rnr_nak:
+-done_prefixes:
+-
+-	/* REX prefix. */
+-	if ((mode == X86EMUL_MODE_PROT64) && ((b & 0xf0) == 0x40)) {
+-		rex_prefix = b;
+-		if (b & 8)
+-			op_bytes = 8;	/* REX.W */
+-		modrm_reg = (b & 4) << 1;	/* REX.R */
+-		index_reg = (b & 2) << 2; /* REX.X */
+-		modrm_rm = base_reg = (b & 1) << 3; /* REG.B */
+-		b = insn_fetch(u8, 1, _eip);
+-	}
+-
+-	/* Opcode byte(s). */
+-	d = opcode_table[b];
+-	if (d == 0) {
+-		/* Two-byte opcode? */
+-		if (b == 0x0f) {
+-			twobyte = 1;
+-			b = insn_fetch(u8, 1, _eip);
+-			d = twobyte_table[b];
+-		}
+-
+-		/* Unrecognised? */
+-		if (d == 0)
+-			goto cannot_emulate;
+-	}
+-
+-	/* ModRM and SIB bytes. */
+-	if (d & ModRM) {
+-		modrm = insn_fetch(u8, 1, _eip);
+-		modrm_mod |= (modrm & 0xc0) >> 6;
+-		modrm_reg |= (modrm & 0x38) >> 3;
+-		modrm_rm |= (modrm & 0x07);
+-		modrm_ea = 0;
+-		use_modrm_ea = 1;
+-
+-		if (modrm_mod == 3) {
+-			modrm_val = *(unsigned long *)
+-				decode_register(modrm_rm, _regs, d & ByteOp);
+-			goto modrm_done;
+-		}
+-
+-		if (ad_bytes == 2) {
+-			unsigned bx = _regs[VCPU_REGS_RBX];
+-			unsigned bp = _regs[VCPU_REGS_RBP];
+-			unsigned si = _regs[VCPU_REGS_RSI];
+-			unsigned di = _regs[VCPU_REGS_RDI];
+-
+-			/* 16-bit ModR/M decode. */
+-			switch (modrm_mod) {
+-			case 0:
+-				if (modrm_rm == 6)
+-					modrm_ea += insn_fetch(u16, 2, _eip);
+-				break;
+-			case 1:
+-				modrm_ea += insn_fetch(s8, 1, _eip);
+-				break;
+-			case 2:
+-				modrm_ea += insn_fetch(u16, 2, _eip);
+-				break;
+-			}
+-			switch (modrm_rm) {
+-			case 0:
+-				modrm_ea += bx + si;
+-				break;
+-			case 1:
+-				modrm_ea += bx + di;
+-				break;
+-			case 2:
+-				modrm_ea += bp + si;
+-				break;
+-			case 3:
+-				modrm_ea += bp + di;
+-				break;
+-			case 4:
+-				modrm_ea += si;
+-				break;
+-			case 5:
+-				modrm_ea += di;
+-				break;
+-			case 6:
+-				if (modrm_mod != 0)
+-					modrm_ea += bp;
+-				break;
+-			case 7:
+-				modrm_ea += bx;
+-				break;
+-			}
+-			if (modrm_rm == 2 || modrm_rm == 3 ||
+-			    (modrm_rm == 6 && modrm_mod != 0))
+-				if (!override_base)
+-					override_base = &ctxt->ss_base;
+-			modrm_ea = (u16)modrm_ea;
+-		} else {
+-			/* 32/64-bit ModR/M decode. */
+-			switch (modrm_rm) {
+-			case 4:
+-			case 12:
+-				sib = insn_fetch(u8, 1, _eip);
+-				index_reg |= (sib >> 3) & 7;
+-				base_reg |= sib & 7;
+-				scale = sib >> 6;
+-
+-				switch (base_reg) {
+-				case 5:
+-					if (modrm_mod != 0)
+-						modrm_ea += _regs[base_reg];
+-					else
+-						modrm_ea += insn_fetch(s32, 4, _eip);
+-					break;
+-				default:
+-					modrm_ea += _regs[base_reg];
+-				}
+-				switch (index_reg) {
+-				case 4:
+-					break;
+-				default:
+-					modrm_ea += _regs[index_reg] << scale;
+-
+-				}
+-				break;
+-			case 5:
+-				if (modrm_mod != 0)
+-					modrm_ea += _regs[modrm_rm];
+-				else if (mode == X86EMUL_MODE_PROT64)
+-					rip_relative = 1;
+-				break;
+-			default:
+-				modrm_ea += _regs[modrm_rm];
+-				break;
+-			}
+-			switch (modrm_mod) {
+-			case 0:
+-				if (modrm_rm == 5)
+-					modrm_ea += insn_fetch(s32, 4, _eip);
+-				break;
+-			case 1:
+-				modrm_ea += insn_fetch(s8, 1, _eip);
+-				break;
+-			case 2:
+-				modrm_ea += insn_fetch(s32, 4, _eip);
+-				break;
+-			}
+-		}
+-		if (!override_base)
+-			override_base = &ctxt->ds_base;
+-		if (mode == X86EMUL_MODE_PROT64 &&
+-		    override_base != &ctxt->fs_base &&
+-		    override_base != &ctxt->gs_base)
+-			override_base = NULL;
+-
+-		if (override_base)
+-			modrm_ea += *override_base;
+-
+-		if (rip_relative) {
+-			modrm_ea += _eip;
+-			switch (d & SrcMask) {
+-			case SrcImmByte:
+-				modrm_ea += 1;
+-				break;
+-			case SrcImm:
+-				if (d & ByteOp)
+-					modrm_ea += 1;
+-				else
+-					if (op_bytes == 8)
+-						modrm_ea += 4;
+-					else
+-						modrm_ea += op_bytes;
+-			}
+-		}
+-		if (ad_bytes != 8)
+-			modrm_ea = (u32)modrm_ea;
+-		cr2 = modrm_ea;
+-	modrm_done:
+-		;
+-	}
+-
+-	/*
+-	 * Decode and fetch the source operand: register, memory
+-	 * or immediate.
+-	 */
+-	switch (d & SrcMask) {
+-	case SrcNone:
+-		break;
+-	case SrcReg:
+-		src.type = OP_REG;
+-		if (d & ByteOp) {
+-			src.ptr = decode_register(modrm_reg, _regs,
+-						  (rex_prefix == 0));
+-			src.val = src.orig_val = *(u8 *) src.ptr;
+-			src.bytes = 1;
+-		} else {
+-			src.ptr = decode_register(modrm_reg, _regs, 0);
+-			switch ((src.bytes = op_bytes)) {
+-			case 2:
+-				src.val = src.orig_val = *(u16 *) src.ptr;
+-				break;
+-			case 4:
+-				src.val = src.orig_val = *(u32 *) src.ptr;
+-				break;
+-			case 8:
+-				src.val = src.orig_val = *(u64 *) src.ptr;
+-				break;
+-			}
+-		}
+-		break;
+-	case SrcMem16:
+-		src.bytes = 2;
+-		goto srcmem_common;
+-	case SrcMem32:
+-		src.bytes = 4;
+-		goto srcmem_common;
+-	case SrcMem:
+-		src.bytes = (d & ByteOp) ? 1 : op_bytes;
+-		/* Don't fetch the address for invlpg: it could be unmapped. */
+-		if (twobyte && b == 0x01 && modrm_reg == 7)
+-			break;
+-	      srcmem_common:
+-		/*
+-		 * For instructions with a ModR/M byte, switch to register
+-		 * access if Mod = 3.
+-		 */
+-		if ((d & ModRM) && modrm_mod == 3) {
+-			src.type = OP_REG;
+-			break;
+-		}
+-		src.type = OP_MEM;
+-		src.ptr = (unsigned long *)cr2;
+-		src.val = 0;
+-		if ((rc = ops->read_emulated((unsigned long)src.ptr,
+-					     &src.val, src.bytes, ctxt->vcpu)) != 0)
+-			goto done;
+-		src.orig_val = src.val;
+-		break;
+-	case SrcImm:
+-		src.type = OP_IMM;
+-		src.ptr = (unsigned long *)_eip;
+-		src.bytes = (d & ByteOp) ? 1 : op_bytes;
+-		if (src.bytes == 8)
+-			src.bytes = 4;
+-		/* NB. Immediates are sign-extended as necessary. */
+-		switch (src.bytes) {
+-		case 1:
+-			src.val = insn_fetch(s8, 1, _eip);
+-			break;
+-		case 2:
+-			src.val = insn_fetch(s16, 2, _eip);
+-			break;
+-		case 4:
+-			src.val = insn_fetch(s32, 4, _eip);
+-			break;
+-		}
+-		break;
+-	case SrcImmByte:
+-		src.type = OP_IMM;
+-		src.ptr = (unsigned long *)_eip;
+-		src.bytes = 1;
+-		src.val = insn_fetch(s8, 1, _eip);
+-		break;
+-	}
+-
+-	/* Decode and fetch the destination operand: register or memory. */
+-	switch (d & DstMask) {
+-	case ImplicitOps:
+-		/* Special instructions do their own operand decoding. */
+-		goto special_insn;
+-	case DstReg:
+-		dst.type = OP_REG;
+-		if ((d & ByteOp)
+-		    && !(twobyte && (b == 0xb6 || b == 0xb7))) {
+-			dst.ptr = decode_register(modrm_reg, _regs,
+-						  (rex_prefix == 0));
+-			dst.val = *(u8 *) dst.ptr;
+-			dst.bytes = 1;
+-		} else {
+-			dst.ptr = decode_register(modrm_reg, _regs, 0);
+-			switch ((dst.bytes = op_bytes)) {
+-			case 2:
+-				dst.val = *(u16 *)dst.ptr;
+-				break;
+-			case 4:
+-				dst.val = *(u32 *)dst.ptr;
+-				break;
+-			case 8:
+-				dst.val = *(u64 *)dst.ptr;
+-				break;
+-			}
+-		}
+-		break;
+-	case DstMem:
+-		dst.type = OP_MEM;
+-		dst.ptr = (unsigned long *)cr2;
+-		dst.bytes = (d & ByteOp) ? 1 : op_bytes;
+-		dst.val = 0;
+-		/*
+-		 * For instructions with a ModR/M byte, switch to register
+-		 * access if Mod = 3.
+-		 */
+-		if ((d & ModRM) && modrm_mod == 3) {
+-			dst.type = OP_REG;
+-			break;
+-		}
+-		if (d & BitOp) {
+-			unsigned long mask = ~(dst.bytes * 8 - 1);
+-
+-			dst.ptr = (void *)dst.ptr + (src.val & mask) / 8;
+-		}
+-		if (!(d & Mov) && /* optimisation - avoid slow emulated read */
+-		    ((rc = ops->read_emulated((unsigned long)dst.ptr,
+-					      &dst.val, dst.bytes, ctxt->vcpu)) != 0))
+-			goto done;
+-		break;
+-	}
+-	dst.orig_val = dst.val;
+-
+-	if (twobyte)
+-		goto twobyte_insn;
+-
+-	switch (b) {
+-	case 0x00 ... 0x05:
+-	      add:		/* add */
+-		emulate_2op_SrcV("add", src, dst, _eflags);
+-		break;
+-	case 0x08 ... 0x0d:
+-	      or:		/* or */
+-		emulate_2op_SrcV("or", src, dst, _eflags);
+-		break;
+-	case 0x10 ... 0x15:
+-	      adc:		/* adc */
+-		emulate_2op_SrcV("adc", src, dst, _eflags);
+-		break;
+-	case 0x18 ... 0x1d:
+-	      sbb:		/* sbb */
+-		emulate_2op_SrcV("sbb", src, dst, _eflags);
+-		break;
+-	case 0x20 ... 0x23:
+-	      and:		/* and */
+-		emulate_2op_SrcV("and", src, dst, _eflags);
+-		break;
+-	case 0x24:              /* and al imm8 */
+-		dst.type = OP_REG;
+-		dst.ptr = &_regs[VCPU_REGS_RAX];
+-		dst.val = *(u8 *)dst.ptr;
+-		dst.bytes = 1;
+-		dst.orig_val = dst.val;
+-		goto and;
+-	case 0x25:              /* and ax imm16, or eax imm32 */
+-		dst.type = OP_REG;
+-		dst.bytes = op_bytes;
+-		dst.ptr = &_regs[VCPU_REGS_RAX];
+-		if (op_bytes == 2)
+-			dst.val = *(u16 *)dst.ptr;
+-		else
+-			dst.val = *(u32 *)dst.ptr;
+-		dst.orig_val = dst.val;
+-		goto and;
+-	case 0x28 ... 0x2d:
+-	      sub:		/* sub */
+-		emulate_2op_SrcV("sub", src, dst, _eflags);
+-		break;
+-	case 0x30 ... 0x35:
+-	      xor:		/* xor */
+-		emulate_2op_SrcV("xor", src, dst, _eflags);
+-		break;
+-	case 0x38 ... 0x3d:
+-	      cmp:		/* cmp */
+-		emulate_2op_SrcV("cmp", src, dst, _eflags);
+-		break;
+-	case 0x63:		/* movsxd */
+-		if (mode != X86EMUL_MODE_PROT64)
+-			goto cannot_emulate;
+-		dst.val = (s32) src.val;
+-		break;
+-	case 0x80 ... 0x83:	/* Grp1 */
+-		switch (modrm_reg) {
+-		case 0:
+-			goto add;
+-		case 1:
+-			goto or;
+-		case 2:
+-			goto adc;
+-		case 3:
+-			goto sbb;
+-		case 4:
+-			goto and;
+-		case 5:
+-			goto sub;
+-		case 6:
+-			goto xor;
+-		case 7:
+-			goto cmp;
+-		}
+-		break;
+-	case 0x84 ... 0x85:
+-	      test:		/* test */
+-		emulate_2op_SrcV("test", src, dst, _eflags);
+-		break;
+-	case 0x86 ... 0x87:	/* xchg */
+-		/* Write back the register source. */
+-		switch (dst.bytes) {
+-		case 1:
+-			*(u8 *) src.ptr = (u8) dst.val;
+-			break;
+-		case 2:
+-			*(u16 *) src.ptr = (u16) dst.val;
+-			break;
+-		case 4:
+-			*src.ptr = (u32) dst.val;
+-			break;	/* 64b reg: zero-extend */
+-		case 8:
+-			*src.ptr = dst.val;
+-			break;
+-		}
+-		/*
+-		 * Write back the memory destination with implicit LOCK
+-		 * prefix.
+-		 */
+-		dst.val = src.val;
+-		lock_prefix = 1;
+-		break;
+-	case 0x88 ... 0x8b:	/* mov */
+-		goto mov;
+-	case 0x8d: /* lea r16/r32, m */
+-		dst.val = modrm_val;
+-		break;
+-	case 0x8f:		/* pop (sole member of Grp1a) */
+-		/* 64-bit mode: POP always pops a 64-bit operand. */
+-		if (mode == X86EMUL_MODE_PROT64)
+-			dst.bytes = 8;
+-		if ((rc = ops->read_std(register_address(ctxt->ss_base,
+-							 _regs[VCPU_REGS_RSP]),
+-					&dst.val, dst.bytes, ctxt->vcpu)) != 0)
+-			goto done;
+-		register_address_increment(_regs[VCPU_REGS_RSP], dst.bytes);
+-		break;
+-	case 0xa0 ... 0xa1:	/* mov */
+-		dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
+-		dst.val = src.val;
+-		_eip += ad_bytes;	/* skip src displacement */
+-		break;
+-	case 0xa2 ... 0xa3:	/* mov */
+-		dst.val = (unsigned long)_regs[VCPU_REGS_RAX];
+-		_eip += ad_bytes;	/* skip dst displacement */
+-		break;
+-	case 0xc0 ... 0xc1:
+-	      grp2:		/* Grp2 */
+-		switch (modrm_reg) {
+-		case 0:	/* rol */
+-			emulate_2op_SrcB("rol", src, dst, _eflags);
+-			break;
+-		case 1:	/* ror */
+-			emulate_2op_SrcB("ror", src, dst, _eflags);
+-			break;
+-		case 2:	/* rcl */
+-			emulate_2op_SrcB("rcl", src, dst, _eflags);
+-			break;
+-		case 3:	/* rcr */
+-			emulate_2op_SrcB("rcr", src, dst, _eflags);
+-			break;
+-		case 4:	/* sal/shl */
+-		case 6:	/* sal/shl */
+-			emulate_2op_SrcB("sal", src, dst, _eflags);
+-			break;
+-		case 5:	/* shr */
+-			emulate_2op_SrcB("shr", src, dst, _eflags);
+-			break;
+-		case 7:	/* sar */
+-			emulate_2op_SrcB("sar", src, dst, _eflags);
+-			break;
+-		}
+-		break;
+-	case 0xc6 ... 0xc7:	/* mov (sole member of Grp11) */
+-	mov:
+-		dst.val = src.val;
+-		break;
+-	case 0xd0 ... 0xd1:	/* Grp2 */
+-		src.val = 1;
+-		goto grp2;
+-	case 0xd2 ... 0xd3:	/* Grp2 */
+-		src.val = _regs[VCPU_REGS_RCX];
+-		goto grp2;
+-	case 0xf6 ... 0xf7:	/* Grp3 */
+-		switch (modrm_reg) {
+-		case 0 ... 1:	/* test */
 -			/*
--			 * A RNR NAK will ACK earlier sends and RDMA writes.
--			 * Don't queue the NAK if a RDMA read or atomic
--			 * is pending though.
+-			 * Special case in Grp3: test has an immediate
+-			 * source operand.
 -			 */
--			if (qp->r_nak_state)
+-			src.type = OP_IMM;
+-			src.ptr = (unsigned long *)_eip;
+-			src.bytes = (d & ByteOp) ? 1 : op_bytes;
+-			if (src.bytes == 8)
+-				src.bytes = 4;
+-			switch (src.bytes) {
+-			case 1:
+-				src.val = insn_fetch(s8, 1, _eip);
+-				break;
+-			case 2:
+-				src.val = insn_fetch(s16, 2, _eip);
+-				break;
+-			case 4:
+-				src.val = insn_fetch(s32, 4, _eip);
+-				break;
+-			}
+-			goto test;
+-		case 2:	/* not */
+-			dst.val = ~dst.val;
+-			break;
+-		case 3:	/* neg */
+-			emulate_1op("neg", dst, _eflags);
+-			break;
+-		default:
+-			goto cannot_emulate;
+-		}
+-		break;
+-	case 0xfe ... 0xff:	/* Grp4/Grp5 */
+-		switch (modrm_reg) {
+-		case 0:	/* inc */
+-			emulate_1op("inc", dst, _eflags);
+-			break;
+-		case 1:	/* dec */
+-			emulate_1op("dec", dst, _eflags);
+-			break;
+-		case 4: /* jmp abs */
+-			if (b == 0xff)
+-				_eip = dst.val;
+-			else
+-				goto cannot_emulate;
+-			break;
+-		case 6:	/* push */
+-			/* 64-bit mode: PUSH always pushes a 64-bit operand. */
+-			if (mode == X86EMUL_MODE_PROT64) {
+-				dst.bytes = 8;
+-				if ((rc = ops->read_std((unsigned long)dst.ptr,
+-							&dst.val, 8,
+-							ctxt->vcpu)) != 0)
+-					goto done;
+-			}
+-			register_address_increment(_regs[VCPU_REGS_RSP],
+-						   -dst.bytes);
+-			if ((rc = ops->write_emulated(
+-				     register_address(ctxt->ss_base,
+-						      _regs[VCPU_REGS_RSP]),
+-				     &dst.val, dst.bytes, ctxt->vcpu)) != 0)
 -				goto done;
- 			qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
- 			qp->r_ack_psn = qp->r_psn;
- 			goto send_ack;
-diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
-index 708eba3..6d2a17f 100644
---- a/drivers/infiniband/hw/ipath/ipath_registers.h
-+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
-@@ -82,8 +82,7 @@
- 
- /* kr_rcvctrl bits */
- #define INFINIPATH_R_PORTENABLE_SHIFT 0
--#define INFINIPATH_R_INTRAVAIL_SHIFT 16
--#define INFINIPATH_R_TAILUPD   0x80000000
-+#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
- 
- /* kr_intstatus, kr_intclear, kr_intmask bits */
- #define INFINIPATH_I_RCVURG_SHIFT 0
-@@ -272,20 +271,6 @@
- #define INFINIPATH_EXTC_LEDGBLOK_ON          0x00000002ULL
- #define INFINIPATH_EXTC_LEDGBLERR_OFF        0x00000001ULL
- 
--/* kr_mdio bits */
--#define INFINIPATH_MDIO_CLKDIV_MASK 0x7FULL
--#define INFINIPATH_MDIO_CLKDIV_SHIFT 32
--#define INFINIPATH_MDIO_COMMAND_MASK 0x7ULL
--#define INFINIPATH_MDIO_COMMAND_SHIFT 26
--#define INFINIPATH_MDIO_DEVADDR_MASK 0x1FULL
--#define INFINIPATH_MDIO_DEVADDR_SHIFT 21
--#define INFINIPATH_MDIO_REGADDR_MASK 0x1FULL
--#define INFINIPATH_MDIO_REGADDR_SHIFT 16
--#define INFINIPATH_MDIO_DATA_MASK 0xFFFFULL
--#define INFINIPATH_MDIO_DATA_SHIFT 0
--#define INFINIPATH_MDIO_CMDVALID    0x0000000040000000ULL
--#define INFINIPATH_MDIO_RDDATAVALID 0x0000000080000000ULL
+-			no_wb = 1;
+-			break;
+-		default:
+-			goto cannot_emulate;
+-		}
+-		break;
+-	}
 -
- /* kr_partitionkey bits */
- #define INFINIPATH_PKEY_SIZE 16
- #define INFINIPATH_PKEY_MASK 0xFFFF
-@@ -303,8 +288,6 @@
- 
- /* kr_xgxsconfig bits */
- #define INFINIPATH_XGXS_RESET          0x7ULL
--#define INFINIPATH_XGXS_MDIOADDR_MASK  0xfULL
--#define INFINIPATH_XGXS_MDIOADDR_SHIFT 4
- #define INFINIPATH_XGXS_RX_POL_SHIFT 19
- #define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
- 
-@@ -470,6 +453,20 @@ struct ipath_cregs {
- 	ipath_creg cr_unsupvlcnt;
- 	ipath_creg cr_wordrcvcnt;
- 	ipath_creg cr_wordsendcnt;
-+	ipath_creg cr_vl15droppedpktcnt;
-+	ipath_creg cr_rxotherlocalphyerrcnt;
-+	ipath_creg cr_excessbufferovflcnt;
-+	ipath_creg cr_locallinkintegrityerrcnt;
-+	ipath_creg cr_rxvlerrcnt;
-+	ipath_creg cr_rxdlidfltrcnt;
-+	ipath_creg cr_psstat;
-+	ipath_creg cr_psstart;
-+	ipath_creg cr_psinterval;
-+	ipath_creg cr_psrcvdatacount;
-+	ipath_creg cr_psrcvpktscount;
-+	ipath_creg cr_psxmitdatacount;
-+	ipath_creg cr_psxmitpktscount;
-+	ipath_creg cr_psxmitwaitcount;
- };
- 
- #endif				/* _IPATH_REGISTERS_H */
-diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
-index 54c61a9..a59bdbd 100644
---- a/drivers/infiniband/hw/ipath/ipath_ruc.c
-+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
-@@ -98,11 +98,15 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
- 		while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
- 			qp->s_rnr_timeout -= nqp->s_rnr_timeout;
- 			l = l->next;
--			if (l->next == &dev->rnrwait)
-+			if (l->next == &dev->rnrwait) {
-+				nqp = NULL;
- 				break;
-+			}
- 			nqp = list_entry(l->next, struct ipath_qp,
- 					 timerwait);
- 		}
-+		if (nqp)
-+			nqp->s_rnr_timeout -= qp->s_rnr_timeout;
- 		list_add(&qp->timerwait, l);
- 	}
- 	spin_unlock_irqrestore(&dev->pending_lock, flags);
-@@ -479,9 +483,14 @@ done:
- 
- static void want_buffer(struct ipath_devdata *dd)
- {
--	set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-+	dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
- 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- 			 dd->ipath_sendctrl);
-+	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-+	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- }
- 
- /**
-diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
-index 2fef36f..f772102 100644
---- a/drivers/infiniband/hw/ipath/ipath_srq.c
-+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
-@@ -94,8 +94,8 @@ bail:
- /**
-  * ipath_create_srq - create a shared receive queue
-  * @ibpd: the protection domain of the SRQ to create
-- * @attr: the attributes of the SRQ
-- * @udata: not used by the InfiniPath verbs driver
-+ * @srq_init_attr: the attributes of the SRQ
-+ * @udata: data from libipathverbs when creating a user SRQ
-  */
- struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
- 				struct ib_srq_init_attr *srq_init_attr,
-diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
-index f027141..d2725cd 100644
---- a/drivers/infiniband/hw/ipath/ipath_stats.c
-+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
-@@ -133,15 +133,16 @@ bail:
- static void ipath_qcheck(struct ipath_devdata *dd)
- {
- 	static u64 last_tot_hdrqfull;
-+	struct ipath_portdata *pd = dd->ipath_pd[0];
- 	size_t blen = 0;
- 	char buf[128];
- 
- 	*buf = 0;
--	if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
-+	if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
- 		blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
--				dd->ipath_pd[0]->port_hdrqfull -
-+				pd->port_hdrqfull -
- 				dd->ipath_p0_hdrqfull);
--		dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
-+		dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
- 	}
- 	if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
- 		blen += snprintf(buf + blen, sizeof buf - blen,
-@@ -173,7 +174,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
- 	if (blen)
- 		ipath_dbg("%s\n", buf);
- 
--	if (dd->ipath_port0head != (u32)
-+	if (pd->port_head != (u32)
- 	    le64_to_cpu(*dd->ipath_hdrqtailptr)) {
- 		if (dd->ipath_lastport0rcv_cnt ==
- 		    ipath_stats.sps_port0pkts) {
-@@ -181,7 +182,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
- 				   "port0 hd=%llx tl=%x; port0pkts %llx\n",
- 				   (unsigned long long)
- 				   le64_to_cpu(*dd->ipath_hdrqtailptr),
--				   dd->ipath_port0head,
-+				   pd->port_head,
- 				   (unsigned long long)
- 				   ipath_stats.sps_port0pkts);
- 		}
-@@ -237,7 +238,7 @@ static void ipath_chk_errormask(struct ipath_devdata *dd)
- void ipath_get_faststats(unsigned long opaque)
- {
- 	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
--	u32 val;
-+	int i;
- 	static unsigned cnt;
- 	unsigned long flags;
- 	u64 traffic_wds;
-@@ -321,12 +322,11 @@ void ipath_get_faststats(unsigned long opaque)
- 
- 	/* limit qfull messages to ~one per minute per port */
- 	if ((++cnt & 0x10)) {
--		for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
--		     val--) {
--			if (dd->ipath_lastegrheads[val] != -1)
--				dd->ipath_lastegrheads[val] = -1;
--			if (dd->ipath_lastrcvhdrqtails[val] != -1)
--				dd->ipath_lastrcvhdrqtails[val] = -1;
-+		for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
-+			struct ipath_portdata *pd = dd->ipath_pd[i];
-+
-+			if (pd && pd->port_lastrcvhdrqtail != -1)
-+				pd->port_lastrcvhdrqtail = -1;
- 		}
- 	}
- 
-diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
-index e1ad7cf..56dfc8a 100644
---- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
-+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
-@@ -363,6 +363,60 @@ static ssize_t show_unit(struct device *dev,
- 	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
- }
- 
-+static ssize_t show_jint_max_packets(struct device *dev,
-+				     struct device_attribute *attr,
-+				     char *buf)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+
-+	return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets);
-+}
-+
-+static ssize_t store_jint_max_packets(struct device *dev,
-+				      struct device_attribute *attr,
-+				      const char *buf,
-+				      size_t count)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	u16 v = 0;
-+	int ret;
-+
-+	ret = ipath_parse_ushort(buf, &v);
-+	if (ret < 0)
-+		ipath_dev_err(dd, "invalid jint_max_packets.\n");
-+	else
-+		dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v);
-+
-+	return ret;
-+}
-+
-+static ssize_t show_jint_idle_ticks(struct device *dev,
-+				    struct device_attribute *attr,
-+				    char *buf)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+
-+	return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks);
-+}
-+
-+static ssize_t store_jint_idle_ticks(struct device *dev,
-+				     struct device_attribute *attr,
-+				     const char *buf,
-+				     size_t count)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	u16 v = 0;
-+	int ret;
-+
-+	ret = ipath_parse_ushort(buf, &v);
-+	if (ret < 0)
-+		ipath_dev_err(dd, "invalid jint_idle_ticks.\n");
-+	else
-+		dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets);
-+
-+	return ret;
-+}
-+
- #define DEVICE_COUNTER(name, attr) \
- 	static ssize_t show_counter_##name(struct device *dev, \
- 					   struct device_attribute *attr, \
-@@ -670,6 +724,257 @@ static ssize_t show_logged_errs(struct device *dev,
- 	return count;
- }
- 
-+/*
-+ * New sysfs entries to control various IB config. These all turn into
-+ * accesses via ipath_f_get/set_ib_cfg.
-+ *
-+ * Get/Set heartbeat enable. Or of 1=enabled, 2=auto
-+ */
-+static ssize_t show_hrtbt_enb(struct device *dev,
-+			 struct device_attribute *attr,
-+			 char *buf)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret;
-+
-+	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT);
-+	if (ret >= 0)
-+		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-+	return ret;
-+}
-+
-+static ssize_t store_hrtbt_enb(struct device *dev,
-+			  struct device_attribute *attr,
-+			  const char *buf,
-+			  size_t count)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret, r;
-+	u16 val;
-+
-+	ret = ipath_parse_ushort(buf, &val);
-+	if (ret >= 0 && val > 3)
-+		ret = -EINVAL;
-+	if (ret < 0) {
-+		ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
-+		goto bail;
-+	}
-+
-+	/*
-+	 * Set the "intentional" heartbeat enable per either of
-+	 * "Enable" and "Auto", as these are normally set together.
-+	 * This bit is consulted when leaving loopback mode,
-+	 * because entering loopback mode overrides it and automatically
-+	 * disables heartbeat.
-+	 */
-+	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val);
-+	if (r < 0)
-+		ret = r;
-+	else if (val == IPATH_IB_HRTBT_OFF)
-+		dd->ipath_flags |= IPATH_NO_HRTBT;
-+	else
-+		dd->ipath_flags &= ~IPATH_NO_HRTBT;
-+
-+bail:
-+	return ret;
-+}
-+
-+/*
-+ * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric,
-+ * _not_ the particular encoding of any given chip)
-+ */
-+static ssize_t show_lwid_enb(struct device *dev,
-+			 struct device_attribute *attr,
-+			 char *buf)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret;
-+
-+	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB);
-+	if (ret >= 0)
-+		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-+	return ret;
-+}
-+
-+static ssize_t store_lwid_enb(struct device *dev,
-+			  struct device_attribute *attr,
-+			  const char *buf,
-+			  size_t count)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret, r;
-+	u16 val;
-+
-+	ret = ipath_parse_ushort(buf, &val);
-+	if (ret >= 0 && (val == 0 || val > 3))
-+		ret = -EINVAL;
-+	if (ret < 0) {
-+		ipath_dev_err(dd,
-+			"attempt to set invalid Link Width (enable)\n");
-+		goto bail;
-+	}
-+
-+	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val);
-+	if (r < 0)
-+		ret = r;
-+
-+bail:
-+	return ret;
-+}
-+
-+/* Get current link width */
-+static ssize_t show_lwid(struct device *dev,
-+			 struct device_attribute *attr,
-+			 char *buf)
-+
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret;
-+
-+	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID);
-+	if (ret >= 0)
-+		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-+	return ret;
-+}
-+
-+/*
-+ * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR.
-+ */
-+static ssize_t show_spd_enb(struct device *dev,
-+			 struct device_attribute *attr,
-+			 char *buf)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret;
-+
-+	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB);
-+	if (ret >= 0)
-+		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-+	return ret;
-+}
-+
-+static ssize_t store_spd_enb(struct device *dev,
-+			  struct device_attribute *attr,
-+			  const char *buf,
-+			  size_t count)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret, r;
-+	u16 val;
-+
-+	ret = ipath_parse_ushort(buf, &val);
-+	if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR)))
-+		ret = -EINVAL;
-+	if (ret < 0) {
-+		ipath_dev_err(dd,
-+			"attempt to set invalid Link Speed (enable)\n");
-+		goto bail;
-+	}
-+
-+	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val);
-+	if (r < 0)
-+		ret = r;
-+
-+bail:
-+	return ret;
-+}
-+
-+/* Get current link speed */
-+static ssize_t show_spd(struct device *dev,
-+			 struct device_attribute *attr,
-+			 char *buf)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret;
-+
-+	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD);
-+	if (ret >= 0)
-+		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-+	return ret;
-+}
-+
-+/*
-+ * Get/Set RX polarity-invert enable. 0=no, 1=yes.
-+ */
-+static ssize_t show_rx_polinv_enb(struct device *dev,
-+			 struct device_attribute *attr,
-+			 char *buf)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret;
-+
-+	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB);
-+	if (ret >= 0)
-+		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-+	return ret;
-+}
-+
-+static ssize_t store_rx_polinv_enb(struct device *dev,
-+			  struct device_attribute *attr,
-+			  const char *buf,
-+			  size_t count)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret, r;
-+	u16 val;
-+
-+	ret = ipath_parse_ushort(buf, &val);
-+	if (ret < 0 || val > 1)
-+		goto invalid;
-+
-+	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
-+	if (r < 0) {
-+		ret = r;
-+		goto bail;
-+	}
-+
-+	goto bail;
-+invalid:
-+	ipath_dev_err(dd, "attempt to set invalid Rx Polarity (enable)\n");
-+bail:
-+	return ret;
-+}
-+/*
-+ * Get/Set RX lane-reversal enable. 0=no, 1=yes.
-+ */
-+static ssize_t show_lanerev_enb(struct device *dev,
-+			 struct device_attribute *attr,
-+			 char *buf)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret;
-+
-+	ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB);
-+	if (ret >= 0)
-+		ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-+	return ret;
-+}
-+
-+static ssize_t store_lanerev_enb(struct device *dev,
-+			  struct device_attribute *attr,
-+			  const char *buf,
-+			  size_t count)
-+{
-+	struct ipath_devdata *dd = dev_get_drvdata(dev);
-+	int ret, r;
-+	u16 val;
-+
-+	ret = ipath_parse_ushort(buf, &val);
-+	if (ret >= 0 && val > 1) {
-+		ret = -EINVAL;
-+		ipath_dev_err(dd,
-+			"attempt to set invalid Lane reversal (enable)\n");
-+		goto bail;
-+	}
-+
-+	r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val);
-+	if (r < 0)
-+		ret = r;
-+
-+bail:
-+	return ret;
-+}
-+
- static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
- static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
- 
-@@ -683,6 +988,11 @@ static struct attribute_group driver_attr_group = {
- 	.attrs = driver_attributes
- };
- 
-+struct attribute_group *ipath_driver_attr_groups[] = {
-+	&driver_attr_group,
-+	NULL,
-+};
-+
- static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
- static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
- static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
-@@ -701,6 +1011,10 @@ static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
- static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
- static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
- static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
-+static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
-+		   show_jint_max_packets, store_jint_max_packets);
-+static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
-+		   show_jint_idle_ticks, store_jint_idle_ticks);
- 
- static struct attribute *dev_attributes[] = {
- 	&dev_attr_guid.attr,
-@@ -727,6 +1041,34 @@ static struct attribute_group dev_attr_group = {
- 	.attrs = dev_attributes
- };
- 
-+static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
-+		   store_hrtbt_enb);
-+static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb,
-+		   store_lwid_enb);
-+static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL);
-+static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb,
-+		   store_spd_enb);
-+static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL);
-+static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb,
-+		   store_rx_polinv_enb);
-+static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb,
-+		   store_lanerev_enb);
-+
-+static struct attribute *dev_ibcfg_attributes[] = {
-+	&dev_attr_hrtbt_enable.attr,
-+	&dev_attr_link_width_enable.attr,
-+	&dev_attr_link_width.attr,
-+	&dev_attr_link_speed_enable.attr,
-+	&dev_attr_link_speed.attr,
-+	&dev_attr_rx_pol_inv_enable.attr,
-+	&dev_attr_rx_lane_rev_enable.attr,
-+	NULL
-+};
-+
-+static struct attribute_group dev_ibcfg_attr_group = {
-+	.attrs = dev_ibcfg_attributes
-+};
-+
- /**
-  * ipath_expose_reset - create a device reset file
-  * @dev: the device structure
-@@ -753,24 +1095,9 @@ int ipath_expose_reset(struct device *dev)
- 	return ret;
- }
- 
--int ipath_driver_create_group(struct device_driver *drv)
+-writeback:
+-	if (!no_wb) {
+-		switch (dst.type) {
+-		case OP_REG:
+-			/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+-			switch (dst.bytes) {
+-			case 1:
+-				*(u8 *)dst.ptr = (u8)dst.val;
+-				break;
+-			case 2:
+-				*(u16 *)dst.ptr = (u16)dst.val;
+-				break;
+-			case 4:
+-				*dst.ptr = (u32)dst.val;
+-				break;	/* 64b: zero-ext */
+-			case 8:
+-				*dst.ptr = dst.val;
+-				break;
+-			}
+-			break;
+-		case OP_MEM:
+-			if (lock_prefix)
+-				rc = ops->cmpxchg_emulated((unsigned long)dst.
+-							   ptr, &dst.orig_val,
+-							   &dst.val, dst.bytes,
+-							   ctxt->vcpu);
+-			else
+-				rc = ops->write_emulated((unsigned long)dst.ptr,
+-							 &dst.val, dst.bytes,
+-							 ctxt->vcpu);
+-			if (rc != 0)
+-				goto done;
+-		default:
+-			break;
+-		}
+-	}
+-
+-	/* Commit shadow register state. */
+-	memcpy(ctxt->vcpu->regs, _regs, sizeof _regs);
+-	ctxt->eflags = _eflags;
+-	ctxt->vcpu->rip = _eip;
+-
+-done:
+-	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
+-
+-special_insn:
+-	if (twobyte)
+-		goto twobyte_special_insn;
+-	switch(b) {
+-	case 0x50 ... 0x57:  /* push reg */
+-		if (op_bytes == 2)
+-			src.val = (u16) _regs[b & 0x7];
+-		else
+-			src.val = (u32) _regs[b & 0x7];
+-		dst.type  = OP_MEM;
+-		dst.bytes = op_bytes;
+-		dst.val = src.val;
+-		register_address_increment(_regs[VCPU_REGS_RSP], -op_bytes);
+-		dst.ptr = (void *) register_address(
+-			ctxt->ss_base, _regs[VCPU_REGS_RSP]);
+-		break;
+-	case 0x58 ... 0x5f: /* pop reg */
+-		dst.ptr = (unsigned long *)&_regs[b & 0x7];
+-	pop_instruction:
+-		if ((rc = ops->read_std(register_address(ctxt->ss_base,
+-			_regs[VCPU_REGS_RSP]), dst.ptr, op_bytes, ctxt->vcpu))
+-			!= 0)
+-			goto done;
+-
+-		register_address_increment(_regs[VCPU_REGS_RSP], op_bytes);
+-		no_wb = 1; /* Disable writeback. */
+-		break;
+-	case 0x6a: /* push imm8 */
+-		src.val = 0L;
+-		src.val = insn_fetch(s8, 1, _eip);
+-	push:
+-		dst.type  = OP_MEM;
+-		dst.bytes = op_bytes;
+-		dst.val = src.val;
+-		register_address_increment(_regs[VCPU_REGS_RSP], -op_bytes);
+-		dst.ptr = (void *) register_address(ctxt->ss_base,
+-							_regs[VCPU_REGS_RSP]);
+-		break;
+-	case 0x6c:		/* insb */
+-	case 0x6d:		/* insw/insd */
+-		 if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
+-				1, 					/* in */
+-				(d & ByteOp) ? 1 : op_bytes, 		/* size */
+-				rep_prefix ?
+-				address_mask(_regs[VCPU_REGS_RCX]) : 1,	/* count */
+-				(_eflags & EFLG_DF),			/* down */
+-				register_address(ctxt->es_base,
+-						 _regs[VCPU_REGS_RDI]),	/* address */
+-				rep_prefix,
+-				_regs[VCPU_REGS_RDX]			/* port */
+-				) == 0)
+-			return -1;
+-		return 0;
+-	case 0x6e:		/* outsb */
+-	case 0x6f:		/* outsw/outsd */
+-		if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
+-				0, 					/* in */
+-				(d & ByteOp) ? 1 : op_bytes, 		/* size */
+-				rep_prefix ?
+-				address_mask(_regs[VCPU_REGS_RCX]) : 1,	/* count */
+-				(_eflags & EFLG_DF),			/* down */
+-				register_address(override_base ?
+-						 *override_base : ctxt->ds_base,
+-						 _regs[VCPU_REGS_RSI]),	/* address */
+-				rep_prefix,
+-				_regs[VCPU_REGS_RDX]			/* port */
+-				) == 0)
+-			return -1;
+-		return 0;
+-	case 0x70 ... 0x7f: /* jcc (short) */ {
+-		int rel = insn_fetch(s8, 1, _eip);
+-
+-		if (test_cc(b, _eflags))
+-		JMP_REL(rel);
+-		break;
+-	}
+-	case 0x9c: /* pushf */
+-		src.val =  (unsigned long) _eflags;
+-		goto push;
+-	case 0x9d: /* popf */
+-		dst.ptr = (unsigned long *) &_eflags;
+-		goto pop_instruction;
+-	case 0xc3: /* ret */
+-		dst.ptr = &_eip;
+-		goto pop_instruction;
+-	case 0xf4:              /* hlt */
+-		ctxt->vcpu->halt_request = 1;
+-		goto done;
+-	}
+-	if (rep_prefix) {
+-		if (_regs[VCPU_REGS_RCX] == 0) {
+-			ctxt->vcpu->rip = _eip;
+-			goto done;
+-		}
+-		_regs[VCPU_REGS_RCX]--;
+-		_eip = ctxt->vcpu->rip;
+-	}
+-	switch (b) {
+-	case 0xa4 ... 0xa5:	/* movs */
+-		dst.type = OP_MEM;
+-		dst.bytes = (d & ByteOp) ? 1 : op_bytes;
+-		dst.ptr = (unsigned long *)register_address(ctxt->es_base,
+-							_regs[VCPU_REGS_RDI]);
+-		if ((rc = ops->read_emulated(register_address(
+-		      override_base ? *override_base : ctxt->ds_base,
+-		      _regs[VCPU_REGS_RSI]), &dst.val, dst.bytes, ctxt->vcpu)) != 0)
+-			goto done;
+-		register_address_increment(_regs[VCPU_REGS_RSI],
+-			     (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
+-		register_address_increment(_regs[VCPU_REGS_RDI],
+-			     (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
+-		break;
+-	case 0xa6 ... 0xa7:	/* cmps */
+-		DPRINTF("Urk! I don't handle CMPS.\n");
+-		goto cannot_emulate;
+-	case 0xaa ... 0xab:	/* stos */
+-		dst.type = OP_MEM;
+-		dst.bytes = (d & ByteOp) ? 1 : op_bytes;
+-		dst.ptr = (unsigned long *)cr2;
+-		dst.val = _regs[VCPU_REGS_RAX];
+-		register_address_increment(_regs[VCPU_REGS_RDI],
+-			     (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
+-		break;
+-	case 0xac ... 0xad:	/* lods */
+-		dst.type = OP_REG;
+-		dst.bytes = (d & ByteOp) ? 1 : op_bytes;
+-		dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
+-		if ((rc = ops->read_emulated(cr2, &dst.val, dst.bytes,
+-					     ctxt->vcpu)) != 0)
+-			goto done;
+-		register_address_increment(_regs[VCPU_REGS_RSI],
+-			   (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
+-		break;
+-	case 0xae ... 0xaf:	/* scas */
+-		DPRINTF("Urk! I don't handle SCAS.\n");
+-		goto cannot_emulate;
+-	case 0xe8: /* call (near) */ {
+-		long int rel;
+-		switch (op_bytes) {
+-		case 2:
+-			rel = insn_fetch(s16, 2, _eip);
+-			break;
+-		case 4:
+-			rel = insn_fetch(s32, 4, _eip);
+-			break;
+-		case 8:
+-			rel = insn_fetch(s64, 8, _eip);
+-			break;
+-		default:
+-			DPRINTF("Call: Invalid op_bytes\n");
+-			goto cannot_emulate;
+-		}
+-		src.val = (unsigned long) _eip;
+-		JMP_REL(rel);
+-		op_bytes = ad_bytes;
+-		goto push;
+-	}
+-	case 0xe9: /* jmp rel */
+-	case 0xeb: /* jmp rel short */
+-		JMP_REL(src.val);
+-		no_wb = 1; /* Disable writeback. */
+-		break;
+-
+-
+-	}
+-	goto writeback;
+-
+-twobyte_insn:
+-	switch (b) {
+-	case 0x01: /* lgdt, lidt, lmsw */
+-		/* Disable writeback. */
+-		no_wb = 1;
+-		switch (modrm_reg) {
+-			u16 size;
+-			unsigned long address;
+-
+-		case 2: /* lgdt */
+-			rc = read_descriptor(ctxt, ops, src.ptr,
+-					     &size, &address, op_bytes);
+-			if (rc)
+-				goto done;
+-			realmode_lgdt(ctxt->vcpu, size, address);
+-			break;
+-		case 3: /* lidt */
+-			rc = read_descriptor(ctxt, ops, src.ptr,
+-					     &size, &address, op_bytes);
+-			if (rc)
+-				goto done;
+-			realmode_lidt(ctxt->vcpu, size, address);
+-			break;
+-		case 4: /* smsw */
+-			if (modrm_mod != 3)
+-				goto cannot_emulate;
+-			*(u16 *)&_regs[modrm_rm]
+-				= realmode_get_cr(ctxt->vcpu, 0);
+-			break;
+-		case 6: /* lmsw */
+-			if (modrm_mod != 3)
+-				goto cannot_emulate;
+-			realmode_lmsw(ctxt->vcpu, (u16)modrm_val, &_eflags);
+-			break;
+-		case 7: /* invlpg*/
+-			emulate_invlpg(ctxt->vcpu, cr2);
+-			break;
+-		default:
+-			goto cannot_emulate;
+-		}
+-		break;
+-	case 0x21: /* mov from dr to reg */
+-		no_wb = 1;
+-		if (modrm_mod != 3)
+-			goto cannot_emulate;
+-		rc = emulator_get_dr(ctxt, modrm_reg, &_regs[modrm_rm]);
+-		break;
+-	case 0x23: /* mov from reg to dr */
+-		no_wb = 1;
+-		if (modrm_mod != 3)
+-			goto cannot_emulate;
+-		rc = emulator_set_dr(ctxt, modrm_reg, _regs[modrm_rm]);
+-		break;
+-	case 0x40 ... 0x4f:	/* cmov */
+-		dst.val = dst.orig_val = src.val;
+-		no_wb = 1;
+-		/*
+-		 * First, assume we're decoding an even cmov opcode
+-		 * (lsb == 0).
+-		 */
+-		switch ((b & 15) >> 1) {
+-		case 0:	/* cmovo */
+-			no_wb = (_eflags & EFLG_OF) ? 0 : 1;
+-			break;
+-		case 1:	/* cmovb/cmovc/cmovnae */
+-			no_wb = (_eflags & EFLG_CF) ? 0 : 1;
+-			break;
+-		case 2:	/* cmovz/cmove */
+-			no_wb = (_eflags & EFLG_ZF) ? 0 : 1;
+-			break;
+-		case 3:	/* cmovbe/cmovna */
+-			no_wb = (_eflags & (EFLG_CF | EFLG_ZF)) ? 0 : 1;
+-			break;
+-		case 4:	/* cmovs */
+-			no_wb = (_eflags & EFLG_SF) ? 0 : 1;
+-			break;
+-		case 5:	/* cmovp/cmovpe */
+-			no_wb = (_eflags & EFLG_PF) ? 0 : 1;
+-			break;
+-		case 7:	/* cmovle/cmovng */
+-			no_wb = (_eflags & EFLG_ZF) ? 0 : 1;
+-			/* fall through */
+-		case 6:	/* cmovl/cmovnge */
+-			no_wb &= (!(_eflags & EFLG_SF) !=
+-			      !(_eflags & EFLG_OF)) ? 0 : 1;
+-			break;
+-		}
+-		/* Odd cmov opcodes (lsb == 1) have inverted sense. */
+-		no_wb ^= b & 1;
+-		break;
+-	case 0xa3:
+-	      bt:		/* bt */
+-		src.val &= (dst.bytes << 3) - 1; /* only subword offset */
+-		emulate_2op_SrcV_nobyte("bt", src, dst, _eflags);
+-		break;
+-	case 0xab:
+-	      bts:		/* bts */
+-		src.val &= (dst.bytes << 3) - 1; /* only subword offset */
+-		emulate_2op_SrcV_nobyte("bts", src, dst, _eflags);
+-		break;
+-	case 0xb0 ... 0xb1:	/* cmpxchg */
+-		/*
+-		 * Save real source value, then compare EAX against
+-		 * destination.
+-		 */
+-		src.orig_val = src.val;
+-		src.val = _regs[VCPU_REGS_RAX];
+-		emulate_2op_SrcV("cmp", src, dst, _eflags);
+-		if (_eflags & EFLG_ZF) {
+-			/* Success: write back to memory. */
+-			dst.val = src.orig_val;
+-		} else {
+-			/* Failure: write the value we saw to EAX. */
+-			dst.type = OP_REG;
+-			dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
+-		}
+-		break;
+-	case 0xb3:
+-	      btr:		/* btr */
+-		src.val &= (dst.bytes << 3) - 1; /* only subword offset */
+-		emulate_2op_SrcV_nobyte("btr", src, dst, _eflags);
+-		break;
+-	case 0xb6 ... 0xb7:	/* movzx */
+-		dst.bytes = op_bytes;
+-		dst.val = (d & ByteOp) ? (u8) src.val : (u16) src.val;
+-		break;
+-	case 0xba:		/* Grp8 */
+-		switch (modrm_reg & 3) {
+-		case 0:
+-			goto bt;
+-		case 1:
+-			goto bts;
+-		case 2:
+-			goto btr;
+-		case 3:
+-			goto btc;
+-		}
+-		break;
+-	case 0xbb:
+-	      btc:		/* btc */
+-		src.val &= (dst.bytes << 3) - 1; /* only subword offset */
+-		emulate_2op_SrcV_nobyte("btc", src, dst, _eflags);
+-		break;
+-	case 0xbe ... 0xbf:	/* movsx */
+-		dst.bytes = op_bytes;
+-		dst.val = (d & ByteOp) ? (s8) src.val : (s16) src.val;
+-		break;
+-	case 0xc3:		/* movnti */
+-		dst.bytes = op_bytes;
+-		dst.val = (op_bytes == 4) ? (u32) src.val : (u64) src.val;
+-		break;
+-	}
+-	goto writeback;
+-
+-twobyte_special_insn:
+-	/* Disable writeback. */
+-	no_wb = 1;
+-	switch (b) {
+-	case 0x06:
+-		emulate_clts(ctxt->vcpu);
+-		break;
+-	case 0x08:		/* invd */
+-		break;
+-	case 0x09:		/* wbinvd */
+-		break;
+-	case 0x0d:		/* GrpP (prefetch) */
+-	case 0x18:		/* Grp16 (prefetch/nop) */
+-		break;
+-	case 0x20: /* mov cr, reg */
+-		if (modrm_mod != 3)
+-			goto cannot_emulate;
+-		_regs[modrm_rm] = realmode_get_cr(ctxt->vcpu, modrm_reg);
+-		break;
+-	case 0x22: /* mov reg, cr */
+-		if (modrm_mod != 3)
+-			goto cannot_emulate;
+-		realmode_set_cr(ctxt->vcpu, modrm_reg, modrm_val, &_eflags);
+-		break;
+-	case 0x30:
+-		/* wrmsr */
+-		msr_data = (u32)_regs[VCPU_REGS_RAX]
+-			| ((u64)_regs[VCPU_REGS_RDX] << 32);
+-		rc = kvm_set_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], msr_data);
+-		if (rc) {
+-			kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
+-			_eip = ctxt->vcpu->rip;
+-		}
+-		rc = X86EMUL_CONTINUE;
+-		break;
+-	case 0x32:
+-		/* rdmsr */
+-		rc = kvm_get_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], &msr_data);
+-		if (rc) {
+-			kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
+-			_eip = ctxt->vcpu->rip;
+-		} else {
+-			_regs[VCPU_REGS_RAX] = (u32)msr_data;
+-			_regs[VCPU_REGS_RDX] = msr_data >> 32;
+-		}
+-		rc = X86EMUL_CONTINUE;
+-		break;
+-	case 0x80 ... 0x8f: /* jnz rel, etc*/ {
+-		long int rel;
+-
+-		switch (op_bytes) {
+-		case 2:
+-			rel = insn_fetch(s16, 2, _eip);
+-			break;
+-		case 4:
+-			rel = insn_fetch(s32, 4, _eip);
+-			break;
+-		case 8:
+-			rel = insn_fetch(s64, 8, _eip);
+-			break;
+-		default:
+-			DPRINTF("jnz: Invalid op_bytes\n");
+-			goto cannot_emulate;
+-		}
+-		if (test_cc(b, _eflags))
+-			JMP_REL(rel);
+-		break;
+-	}
+-	case 0xc7:		/* Grp9 (cmpxchg8b) */
+-		{
+-			u64 old, new;
+-			if ((rc = ops->read_emulated(cr2, &old, 8, ctxt->vcpu))
+-									!= 0)
+-				goto done;
+-			if (((u32) (old >> 0) != (u32) _regs[VCPU_REGS_RAX]) ||
+-			    ((u32) (old >> 32) != (u32) _regs[VCPU_REGS_RDX])) {
+-				_regs[VCPU_REGS_RAX] = (u32) (old >> 0);
+-				_regs[VCPU_REGS_RDX] = (u32) (old >> 32);
+-				_eflags &= ~EFLG_ZF;
+-			} else {
+-				new = ((u64)_regs[VCPU_REGS_RCX] << 32)
+-					| (u32) _regs[VCPU_REGS_RBX];
+-				if ((rc = ops->cmpxchg_emulated(cr2, &old,
+-							  &new, 8, ctxt->vcpu)) != 0)
+-					goto done;
+-				_eflags |= EFLG_ZF;
+-			}
+-			break;
+-		}
+-	}
+-	goto writeback;
+-
+-cannot_emulate:
+-	DPRINTF("Cannot emulate %02x\n", b);
+-	return -1;
+-}
+-
+-#ifdef __XEN__
+-
+-#include <asm/mm.h>
+-#include <asm/uaccess.h>
+-
+-int
+-x86_emulate_read_std(unsigned long addr,
+-		     unsigned long *val,
+-		     unsigned int bytes, struct x86_emulate_ctxt *ctxt)
 -{
--	int ret;
+-	unsigned int rc;
 -
--	ret = sysfs_create_group(&drv->kobj, &driver_attr_group);
+-	*val = 0;
 -
--	return ret;
+-	if ((rc = copy_from_user((void *)val, (void *)addr, bytes)) != 0) {
+-		propagate_page_fault(addr + bytes - rc, 0);	/* read fault */
+-		return X86EMUL_PROPAGATE_FAULT;
+-	}
+-
+-	return X86EMUL_CONTINUE;
 -}
 -
--void ipath_driver_remove_group(struct device_driver *drv)
+-int
+-x86_emulate_write_std(unsigned long addr,
+-		      unsigned long val,
+-		      unsigned int bytes, struct x86_emulate_ctxt *ctxt)
 -{
--	sysfs_remove_group(&drv->kobj, &driver_attr_group);
+-	unsigned int rc;
+-
+-	if ((rc = copy_to_user((void *)addr, (void *)&val, bytes)) != 0) {
+-		propagate_page_fault(addr + bytes - rc, PGERR_write_access);
+-		return X86EMUL_PROPAGATE_FAULT;
+-	}
+-
+-	return X86EMUL_CONTINUE;
 -}
 -
- int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
- {
- 	int ret;
--	char unit[5];
- 
- 	ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
- 	if (ret)
-@@ -780,11 +1107,26 @@ int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
- 	if (ret)
- 		goto bail_attrs;
- 
--	snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit);
--	ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj, unit);
--	if (ret == 0)
--		goto bail;
-+	if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
-+		ret = device_create_file(dev, &dev_attr_jint_idle_ticks);
-+		if (ret)
-+			goto bail_counter;
-+		ret = device_create_file(dev, &dev_attr_jint_max_packets);
-+		if (ret)
-+			goto bail_idle;
- 
-+		ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group);
-+		if (ret)
-+			goto bail_max;
-+	}
-+
-+	return 0;
-+
-+bail_max:
-+	device_remove_file(dev, &dev_attr_jint_max_packets);
-+bail_idle:
-+	device_remove_file(dev, &dev_attr_jint_idle_ticks);
-+bail_counter:
- 	sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
- bail_attrs:
- 	sysfs_remove_group(&dev->kobj, &dev_attr_group);
-@@ -794,12 +1136,14 @@ bail:
- 
- void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
- {
--	char unit[5];
-+	sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
- 
--	snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit);
--	sysfs_remove_link(&dev->driver->kobj, unit);
-+	if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
-+		sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group);
-+		device_remove_file(dev, &dev_attr_jint_idle_ticks);
-+		device_remove_file(dev, &dev_attr_jint_max_packets);
-+	}
- 
--	sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
- 	sysfs_remove_group(&dev->kobj, &dev_attr_group);
- 
- 	device_remove_file(dev, &dev_attr_reset);
-diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
-index b3df6f3..de67eed 100644
---- a/drivers/infiniband/hw/ipath/ipath_ud.c
-+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
-@@ -301,8 +301,6 @@ int ipath_make_ud_req(struct ipath_qp *qp)
- 
- 	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
- 	qp->s_hdrwords = 7;
--	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
--		qp->s_hdrwords++;
- 	qp->s_cur_size = wqe->length;
- 	qp->s_cur_sge = &qp->s_sge;
- 	qp->s_wqe = wqe;
-@@ -327,6 +325,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
- 		ohdr = &qp->s_hdr.u.oth;
- 	}
- 	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
-+		qp->s_hdrwords++;
- 		ohdr->u.ud.imm_data = wqe->wr.imm_data;
- 		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
- 	} else
-diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
-index c4c9984..32d8f88 100644
---- a/drivers/infiniband/hw/ipath/ipath_verbs.c
-+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
-@@ -943,7 +943,7 @@ bail:
-  * ipath_verbs_send - send a packet
-  * @qp: the QP to send on
-  * @hdr: the packet header
-- * @hdrwords: the number of words in the header
-+ * @hdrwords: the number of 32-bit words in the header
-  * @ss: the SGE to send
-  * @len: the length of the packet in bytes
-  */
-@@ -955,7 +955,10 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
- 	int ret;
- 	u32 dwords = (len + 3) >> 2;
- 
--	/* +1 is for the qword padding of pbc */
-+	/*
-+	 * Calculate the send buffer trigger address.
-+	 * The +1 counts for the pbc control dword following the pbc length.
-+	 */
- 	plen = hdrwords + dwords + 1;
- 
- 	/* Drop non-VL15 packets if we are not in the active state */
-@@ -1130,20 +1133,34 @@ static int ipath_query_device(struct ib_device *ibdev,
- 	return 0;
- }
- 
--const u8 ipath_cvt_physportstate[16] = {
--	[INFINIPATH_IBCS_LT_STATE_DISABLED] = 3,
--	[INFINIPATH_IBCS_LT_STATE_LINKUP] = 5,
--	[INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2,
--	[INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2,
--	[INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1,
--	[INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1,
--	[INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4,
--	[INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4,
--	[INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4,
--	[INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4,
--	[INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6,
--	[INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6,
--	[INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
-+const u8 ipath_cvt_physportstate[32] = {
-+	[INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
-+	[INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
-+	[INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
-+	[INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
-+	[INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
-+	[INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
-+	[INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
-+		IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
-+		IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
-+		IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
-+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-+	[INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
-+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-+	[INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
-+		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-+	[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
-+	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
- };
- 
- u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
-@@ -1168,8 +1185,9 @@ static int ipath_query_port(struct ib_device *ibdev,
- 	ibcstat = dd->ipath_lastibcstat;
- 	props->state = ((ibcstat >> 4) & 0x3) + 1;
- 	/* See phys_state_show() */
--	props->phys_state = ipath_cvt_physportstate[
--		dd->ipath_lastibcstat & 0xf];
-+	props->phys_state = /* MEA: assumes shift == 0 */
-+		ipath_cvt_physportstate[dd->ipath_lastibcstat &
-+		dd->ibcs_lts_mask];
- 	props->port_cap_flags = dev->port_cap_flags;
- 	props->gid_tbl_len = 1;
- 	props->max_msg_sz = 0x80000000;
-@@ -1641,6 +1659,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
- 		cntrs.local_link_integrity_errors;
- 	idev->z_excessive_buffer_overrun_errors =
- 		cntrs.excessive_buffer_overrun_errors;
-+	idev->z_vl15_dropped = cntrs.vl15_dropped;
- 
- 	/*
- 	 * The system image GUID is supposed to be the same for all
-diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
-index 6ccb54f..3d59736 100644
---- a/drivers/infiniband/hw/ipath/ipath_verbs.h
-+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
-@@ -554,6 +554,7 @@ struct ipath_ibdev {
- 	u32 z_pkey_violations;			/* starting count for PMA */
- 	u32 z_local_link_integrity_errors;	/* starting count for PMA */
- 	u32 z_excessive_buffer_overrun_errors;	/* starting count for PMA */
-+	u32 z_vl15_dropped;			/* starting count for PMA */
- 	u32 n_rc_resends;
- 	u32 n_rc_acks;
- 	u32 n_rc_qacks;
-@@ -598,6 +599,7 @@ struct ipath_verbs_counters {
- 	u64 port_rcv_packets;
- 	u32 local_link_integrity_errors;
- 	u32 excessive_buffer_overrun_errors;
-+	u32 vl15_dropped;
- };
- 
- static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
-@@ -830,7 +832,17 @@ unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
- 
- extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
- 
-+/*
-+ * Below converts HCA-specific LinkTrainingState to IB PhysPortState
-+ * values.
-+ */
- extern const u8 ipath_cvt_physportstate[];
-+#define IB_PHYSPORTSTATE_SLEEP 1
-+#define IB_PHYSPORTSTATE_POLL 2
-+#define IB_PHYSPORTSTATE_DISABLED 3
-+#define IB_PHYSPORTSTATE_CFG_TRAIN 4
-+#define IB_PHYSPORTSTATE_LINKUP 5
-+#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
- 
- extern const int ib_ipath_state_ops[];
- 
-diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
-index 9d32c49..7950aa6 100644
---- a/drivers/infiniband/hw/mlx4/cq.c
-+++ b/drivers/infiniband/hw/mlx4/cq.c
-@@ -313,6 +313,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
- 	struct mlx4_ib_srq *srq;
- 	int is_send;
- 	int is_error;
-+	u32 g_mlpath_rqpn;
- 	u16 wqe_ctr;
- 
- 	cqe = next_cqe_sw(cq);
-@@ -426,10 +427,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
- 
- 		wc->slid	   = be16_to_cpu(cqe->rlid);
- 		wc->sl		   = cqe->sl >> 4;
--		wc->src_qp	   = be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff;
--		wc->dlid_path_bits = (be32_to_cpu(cqe->g_mlpath_rqpn) >> 24) & 0x7f;
--		wc->wc_flags      |= be32_to_cpu(cqe->g_mlpath_rqpn) & 0x80000000 ?
--			IB_WC_GRH : 0;
-+		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
-+		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
-+		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
-+		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
- 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
- 	}
- 
-diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
-index 15aa32e..7bbdd1f 100644
---- a/drivers/infiniband/hw/mthca/mthca_dev.h
-+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
-@@ -60,13 +60,12 @@
- enum {
- 	MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
- 	MTHCA_FLAG_SRQ        = 1 << 2,
--	MTHCA_FLAG_MSI        = 1 << 3,
--	MTHCA_FLAG_MSI_X      = 1 << 4,
--	MTHCA_FLAG_NO_LAM     = 1 << 5,
--	MTHCA_FLAG_FMR        = 1 << 6,
--	MTHCA_FLAG_MEMFREE    = 1 << 7,
--	MTHCA_FLAG_PCIE       = 1 << 8,
--	MTHCA_FLAG_SINAI_OPT  = 1 << 9
-+	MTHCA_FLAG_MSI_X      = 1 << 3,
-+	MTHCA_FLAG_NO_LAM     = 1 << 4,
-+	MTHCA_FLAG_FMR        = 1 << 5,
-+	MTHCA_FLAG_MEMFREE    = 1 << 6,
-+	MTHCA_FLAG_PCIE       = 1 << 7,
-+	MTHCA_FLAG_SINAI_OPT  = 1 << 8
- };
- 
- enum {
-diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
-index b29de51..b60eb5d 100644
---- a/drivers/infiniband/hw/mthca/mthca_eq.c
-+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
-@@ -827,8 +827,7 @@ int mthca_init_eq_table(struct mthca_dev *dev)
- 	if (err)
- 		goto err_out_free;
- 
--	if (dev->mthca_flags & MTHCA_FLAG_MSI ||
--	    dev->mthca_flags & MTHCA_FLAG_MSI_X) {
-+	if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
- 		dev->eq_table.clr_mask = 0;
- 	} else {
- 		dev->eq_table.clr_mask =
-@@ -839,8 +838,7 @@ int mthca_init_eq_table(struct mthca_dev *dev)
- 
- 	dev->eq_table.arm_mask = 0;
- 
--	intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
--		128 : dev->eq_table.inta_pin;
-+	intr = dev->eq_table.inta_pin;
- 
- 	err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
- 			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
-diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
-index 60de6f9..5cf8250 100644
---- a/drivers/infiniband/hw/mthca/mthca_main.c
-+++ b/drivers/infiniband/hw/mthca/mthca_main.c
-@@ -65,14 +65,9 @@ static int msi_x = 1;
- module_param(msi_x, int, 0444);
- MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
- 
--static int msi = 0;
--module_param(msi, int, 0444);
--MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero (deprecated, use MSI-X instead)");
+-#endif
+diff --git a/drivers/kvm/x86_emulate.h b/drivers/kvm/x86_emulate.h
+deleted file mode 100644
+index 92c73aa..0000000
+--- a/drivers/kvm/x86_emulate.h
++++ /dev/null
+@@ -1,155 +0,0 @@
+-/******************************************************************************
+- * x86_emulate.h
+- *
+- * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
+- *
+- * Copyright (c) 2005 Keir Fraser
+- *
+- * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
+- */
 -
- #else /* CONFIG_PCI_MSI */
- 
- #define msi_x (0)
--#define msi   (0)
- 
- #endif /* CONFIG_PCI_MSI */
- 
-@@ -816,13 +811,11 @@ static int mthca_setup_hca(struct mthca_dev *dev)
- 
- 	err = mthca_NOP(dev, &status);
- 	if (err || status) {
--		if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X)) {
-+		if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
- 			mthca_warn(dev, "NOP command failed to generate interrupt "
- 				   "(IRQ %d).\n",
--				   dev->mthca_flags & MTHCA_FLAG_MSI_X ?
--				   dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector :
--				   dev->pdev->irq);
--			mthca_warn(dev, "Trying again with MSI/MSI-X disabled.\n");
-+				   dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
-+			mthca_warn(dev, "Trying again with MSI-X disabled.\n");
- 		} else {
- 			mthca_err(dev, "NOP command failed to generate interrupt "
- 				  "(IRQ %d), aborting.\n",
-@@ -1005,7 +998,7 @@ static struct {
- 			   .flags     = 0 },
- 	[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
- 			   .flags     = MTHCA_FLAG_PCIE },
--	[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 2, 0),
-+	[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
- 			   .flags     = MTHCA_FLAG_MEMFREE |
- 					MTHCA_FLAG_PCIE },
- 	[SINAI]        = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
-@@ -1128,29 +1121,12 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
- 
- 	if (msi_x && !mthca_enable_msi_x(mdev))
- 		mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
--	else if (msi) {
--		static int warned;
+-#ifndef __X86_EMULATE_H__
+-#define __X86_EMULATE_H__
 -
--		if (!warned) {
--			printk(KERN_WARNING PFX "WARNING: MSI support will be "
--			       "removed from the ib_mthca driver in January 2008.\n");
--			printk(KERN_WARNING "    If you are using MSI and cannot "
--			       "switch to MSI-X, please tell "
--			       "<general at lists.openfabrics.org>.\n");
--			++warned;
--		}
+-struct x86_emulate_ctxt;
 -
--		if (!pci_enable_msi(pdev))
--			mdev->mthca_flags |= MTHCA_FLAG_MSI;
--	}
- 
- 	err = mthca_setup_hca(mdev);
--	if (err == -EBUSY && (mdev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X))) {
-+	if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
- 		if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- 			pci_disable_msix(pdev);
--		if (mdev->mthca_flags & MTHCA_FLAG_MSI)
--			pci_disable_msi(pdev);
--		mdev->mthca_flags &= ~(MTHCA_FLAG_MSI_X | MTHCA_FLAG_MSI);
-+		mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
- 
- 		err = mthca_setup_hca(mdev);
- 	}
-@@ -1192,8 +1168,6 @@ err_cleanup:
- err_close:
- 	if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- 		pci_disable_msix(pdev);
--	if (mdev->mthca_flags & MTHCA_FLAG_MSI)
--		pci_disable_msi(pdev);
- 
- 	mthca_close_hca(mdev);
- 
-@@ -1246,8 +1220,6 @@ static void __mthca_remove_one(struct pci_dev *pdev)
- 
- 		if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- 			pci_disable_msix(pdev);
--		if (mdev->mthca_flags & MTHCA_FLAG_MSI)
--			pci_disable_msi(pdev);
- 
- 		ib_dealloc_device(&mdev->ib_dev);
- 		mthca_release_regions(pdev, mdev->mthca_flags &
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
-index eb7edab..fe250c6 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib.h
-+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
-@@ -56,42 +56,43 @@
- /* constants */
- 
- enum {
--	IPOIB_PACKET_SIZE         = 2048,
--	IPOIB_BUF_SIZE 		  = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-+	IPOIB_PACKET_SIZE	  = 2048,
-+	IPOIB_BUF_SIZE		  = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
- 
--	IPOIB_ENCAP_LEN 	  = 4,
-+	IPOIB_ENCAP_LEN		  = 4,
- 
--	IPOIB_CM_MTU              = 0x10000 - 0x10, /* padding to align header to 16 */
--	IPOIB_CM_BUF_SIZE         = IPOIB_CM_MTU  + IPOIB_ENCAP_LEN,
--	IPOIB_CM_HEAD_SIZE 	  = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
--	IPOIB_CM_RX_SG            = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
--	IPOIB_RX_RING_SIZE 	  = 128,
--	IPOIB_TX_RING_SIZE 	  = 64,
-+	IPOIB_CM_MTU		  = 0x10000 - 0x10, /* padding to align header to 16 */
-+	IPOIB_CM_BUF_SIZE	  = IPOIB_CM_MTU  + IPOIB_ENCAP_LEN,
-+	IPOIB_CM_HEAD_SIZE	  = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
-+	IPOIB_CM_RX_SG		  = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
-+	IPOIB_RX_RING_SIZE	  = 128,
-+	IPOIB_TX_RING_SIZE	  = 64,
- 	IPOIB_MAX_QUEUE_SIZE	  = 8192,
- 	IPOIB_MIN_QUEUE_SIZE	  = 2,
-+	IPOIB_CM_MAX_CONN_QP	  = 4096,
- 
--	IPOIB_NUM_WC 		  = 4,
-+	IPOIB_NUM_WC		  = 4,
- 
- 	IPOIB_MAX_PATH_REC_QUEUE  = 3,
--	IPOIB_MAX_MCAST_QUEUE     = 3,
+-/*
+- * x86_emulate_ops:
+- *
+- * These operations represent the instruction emulator's interface to memory.
+- * There are two categories of operation: those that act on ordinary memory
+- * regions (*_std), and those that act on memory regions known to require
+- * special treatment or emulation (*_emulated).
+- *
+- * The emulator assumes that an instruction accesses only one 'emulated memory'
+- * location, that this location is the given linear faulting address (cr2), and
+- * that this is one of the instruction's data operands. Instruction fetches and
+- * stack operations are assumed never to access emulated memory. The emulator
+- * automatically deduces which operand of a string-move operation is accessing
+- * emulated memory, and assumes that the other operand accesses normal memory.
+- *
+- * NOTES:
+- *  1. The emulator isn't very smart about emulated vs. standard memory.
+- *     'Emulated memory' access addresses should be checked for sanity.
+- *     'Normal memory' accesses may fault, and the caller must arrange to
+- *     detect and handle reentrancy into the emulator via recursive faults.
+- *     Accesses may be unaligned and may cross page boundaries.
+- *  2. If the access fails (cannot emulate, or a standard access faults) then
+- *     it is up to the memop to propagate the fault to the guest VM via
+- *     some out-of-band mechanism, unknown to the emulator. The memop signals
+- *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
+- *     then immediately bail.
+- *  3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
+- *     cmpxchg8b_emulated need support 8-byte accesses.
+- *  4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
+- */
+-/* Access completed successfully: continue emulation as normal. */
+-#define X86EMUL_CONTINUE        0
+-/* Access is unhandleable: bail from emulation and return error to caller. */
+-#define X86EMUL_UNHANDLEABLE    1
+-/* Terminate emulation but return success to the caller. */
+-#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
+-#define X86EMUL_RETRY_INSTR     2 /* retry the instruction for some reason */
+-#define X86EMUL_CMPXCHG_FAILED  2 /* cmpxchg did not see expected value */
+-struct x86_emulate_ops {
+-	/*
+-	 * read_std: Read bytes of standard (non-emulated/special) memory.
+-	 *           Used for instruction fetch, stack operations, and others.
+-	 *  @addr:  [IN ] Linear address from which to read.
+-	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+-	 *  @bytes: [IN ] Number of bytes to read from memory.
+-	 */
+-	int (*read_std)(unsigned long addr, void *val,
+-			unsigned int bytes, struct kvm_vcpu *vcpu);
+-
+-	/*
+-	 * write_std: Write bytes of standard (non-emulated/special) memory.
+-	 *            Used for stack operations, and others.
+-	 *  @addr:  [IN ] Linear address to which to write.
+-	 *  @val:   [IN ] Value to write to memory (low-order bytes used as
+-	 *                required).
+-	 *  @bytes: [IN ] Number of bytes to write to memory.
+-	 */
+-	int (*write_std)(unsigned long addr, const void *val,
+-			 unsigned int bytes, struct kvm_vcpu *vcpu);
+-
+-	/*
+-	 * read_emulated: Read bytes from emulated/special memory area.
+-	 *  @addr:  [IN ] Linear address from which to read.
+-	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+-	 *  @bytes: [IN ] Number of bytes to read from memory.
+-	 */
+-	int (*read_emulated) (unsigned long addr,
+-			      void *val,
+-			      unsigned int bytes,
+-			      struct kvm_vcpu *vcpu);
+-
+-	/*
+-	 * write_emulated: Read bytes from emulated/special memory area.
+-	 *  @addr:  [IN ] Linear address to which to write.
+-	 *  @val:   [IN ] Value to write to memory (low-order bytes used as
+-	 *                required).
+-	 *  @bytes: [IN ] Number of bytes to write to memory.
+-	 */
+-	int (*write_emulated) (unsigned long addr,
+-			       const void *val,
+-			       unsigned int bytes,
+-			       struct kvm_vcpu *vcpu);
+-
+-	/*
+-	 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
+-	 *                   emulated/special memory area.
+-	 *  @addr:  [IN ] Linear address to access.
+-	 *  @old:   [IN ] Value expected to be current at @addr.
+-	 *  @new:   [IN ] Value to write to @addr.
+-	 *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
+-	 */
+-	int (*cmpxchg_emulated) (unsigned long addr,
+-				 const void *old,
+-				 const void *new,
+-				 unsigned int bytes,
+-				 struct kvm_vcpu *vcpu);
+-
+-};
+-
+-struct x86_emulate_ctxt {
+-	/* Register state before/after emulation. */
+-	struct kvm_vcpu *vcpu;
 -
--	IPOIB_FLAG_OPER_UP 	  = 0,
--	IPOIB_FLAG_INITIALIZED    = 1,
--	IPOIB_FLAG_ADMIN_UP 	  = 2,
--	IPOIB_PKEY_ASSIGNED 	  = 3,
--	IPOIB_PKEY_STOP 	  = 4,
--	IPOIB_FLAG_SUBINTERFACE   = 5,
--	IPOIB_MCAST_RUN 	  = 6,
--	IPOIB_STOP_REAPER         = 7,
--	IPOIB_MCAST_STARTED       = 8,
--	IPOIB_FLAG_ADMIN_CM 	  = 9,
-+	IPOIB_MAX_MCAST_QUEUE	  = 3,
-+
-+	IPOIB_FLAG_OPER_UP	  = 0,
-+	IPOIB_FLAG_INITIALIZED	  = 1,
-+	IPOIB_FLAG_ADMIN_UP	  = 2,
-+	IPOIB_PKEY_ASSIGNED	  = 3,
-+	IPOIB_PKEY_STOP		  = 4,
-+	IPOIB_FLAG_SUBINTERFACE	  = 5,
-+	IPOIB_MCAST_RUN		  = 6,
-+	IPOIB_STOP_REAPER	  = 7,
-+	IPOIB_MCAST_STARTED	  = 8,
-+	IPOIB_FLAG_ADMIN_CM	  = 9,
- 	IPOIB_FLAG_UMCAST	  = 10,
- 
- 	IPOIB_MAX_BACKOFF_SECONDS = 16,
- 
--	IPOIB_MCAST_FLAG_FOUND 	  = 0,	/* used in set_multicast_list */
-+	IPOIB_MCAST_FLAG_FOUND	  = 0,	/* used in set_multicast_list */
- 	IPOIB_MCAST_FLAG_SENDONLY = 1,
--	IPOIB_MCAST_FLAG_BUSY 	  = 2,	/* joining or already joined */
-+	IPOIB_MCAST_FLAG_BUSY	  = 2,	/* joining or already joined */
- 	IPOIB_MCAST_FLAG_ATTACHED = 3,
- };
- 
-@@ -117,7 +118,7 @@ struct ipoib_pseudoheader {
- struct ipoib_mcast {
- 	struct ib_sa_mcmember_rec mcmember;
- 	struct ib_sa_multicast	 *mc;
--	struct ipoib_ah          *ah;
-+	struct ipoib_ah		 *ah;
- 
- 	struct rb_node    rb_node;
- 	struct list_head  list;
-@@ -186,27 +187,29 @@ enum ipoib_cm_state {
- };
- 
- struct ipoib_cm_rx {
--	struct ib_cm_id     *id;
--	struct ib_qp        *qp;
--	struct list_head     list;
--	struct net_device   *dev;
--	unsigned long        jiffies;
--	enum ipoib_cm_state  state;
-+	struct ib_cm_id	       *id;
-+	struct ib_qp	       *qp;
-+	struct ipoib_cm_rx_buf *rx_ring;
-+	struct list_head	list;
-+	struct net_device      *dev;
-+	unsigned long		jiffies;
-+	enum ipoib_cm_state	state;
-+	int			recv_count;
- };
- 
- struct ipoib_cm_tx {
--	struct ib_cm_id     *id;
--	struct ib_qp        *qp;
-+	struct ib_cm_id	    *id;
-+	struct ib_qp	    *qp;
- 	struct list_head     list;
- 	struct net_device   *dev;
- 	struct ipoib_neigh  *neigh;
- 	struct ipoib_path   *path;
- 	struct ipoib_tx_buf *tx_ring;
--	unsigned             tx_head;
--	unsigned             tx_tail;
--	unsigned long        flags;
--	u32                  mtu;
--	struct ib_wc         ibwc[IPOIB_NUM_WC];
-+	unsigned	     tx_head;
-+	unsigned	     tx_tail;
-+	unsigned long	     flags;
-+	u32		     mtu;
-+	struct ib_wc	     ibwc[IPOIB_NUM_WC];
- };
- 
- struct ipoib_cm_rx_buf {
-@@ -215,25 +218,28 @@ struct ipoib_cm_rx_buf {
- };
- 
- struct ipoib_cm_dev_priv {
--	struct ib_srq  	       *srq;
-+	struct ib_srq	       *srq;
- 	struct ipoib_cm_rx_buf *srq_ring;
--	struct ib_cm_id        *id;
--	struct list_head        passive_ids;   /* state: LIVE */
--	struct list_head        rx_error_list; /* state: ERROR */
--	struct list_head        rx_flush_list; /* state: FLUSH, drain not started */
--	struct list_head        rx_drain_list; /* state: FLUSH, drain started */
--	struct list_head        rx_reap_list;  /* state: FLUSH, drain done */
-+	struct ib_cm_id	       *id;
-+	struct list_head	passive_ids;   /* state: LIVE */
-+	struct list_head	rx_error_list; /* state: ERROR */
-+	struct list_head	rx_flush_list; /* state: FLUSH, drain not started */
-+	struct list_head	rx_drain_list; /* state: FLUSH, drain started */
-+	struct list_head	rx_reap_list;  /* state: FLUSH, drain done */
- 	struct work_struct      start_task;
- 	struct work_struct      reap_task;
- 	struct work_struct      skb_task;
- 	struct work_struct      rx_reap_task;
- 	struct delayed_work     stale_task;
- 	struct sk_buff_head     skb_queue;
--	struct list_head        start_list;
--	struct list_head        reap_list;
--	struct ib_wc            ibwc[IPOIB_NUM_WC];
--	struct ib_sge           rx_sge[IPOIB_CM_RX_SG];
-+	struct list_head	start_list;
-+	struct list_head	reap_list;
-+	struct ib_wc		ibwc[IPOIB_NUM_WC];
-+	struct ib_sge		rx_sge[IPOIB_CM_RX_SG];
- 	struct ib_recv_wr       rx_wr;
-+	int			nonsrq_conn_qp;
-+	int			max_cm_mtu;
-+	int			num_frags;
- };
- 
- /*
-@@ -269,30 +275,30 @@ struct ipoib_dev_priv {
- 	struct work_struct pkey_event_task;
- 
- 	struct ib_device *ca;
--	u8            	  port;
--	u16           	  pkey;
--	u16               pkey_index;
--	struct ib_pd  	 *pd;
--	struct ib_mr  	 *mr;
--	struct ib_cq  	 *cq;
--	struct ib_qp  	 *qp;
--	u32           	  qkey;
-+	u8		  port;
-+	u16		  pkey;
-+	u16		  pkey_index;
-+	struct ib_pd	 *pd;
-+	struct ib_mr	 *mr;
-+	struct ib_cq	 *cq;
-+	struct ib_qp	 *qp;
-+	u32		  qkey;
- 
- 	union ib_gid local_gid;
--	u16          local_lid;
-+	u16	     local_lid;
- 
- 	unsigned int admin_mtu;
- 	unsigned int mcast_mtu;
- 
- 	struct ipoib_rx_buf *rx_ring;
- 
--	spinlock_t           tx_lock;
-+	spinlock_t	     tx_lock;
- 	struct ipoib_tx_buf *tx_ring;
--	unsigned             tx_head;
--	unsigned             tx_tail;
--	struct ib_sge        tx_sge;
-+	unsigned	     tx_head;
-+	unsigned	     tx_tail;
-+	struct ib_sge	     tx_sge;
- 	struct ib_send_wr    tx_wr;
--	unsigned             tx_outstanding;
-+	unsigned	     tx_outstanding;
- 
- 	struct ib_wc ibwc[IPOIB_NUM_WC];
- 
-@@ -317,10 +323,10 @@ struct ipoib_dev_priv {
- 
- struct ipoib_ah {
- 	struct net_device *dev;
--	struct ib_ah      *ah;
-+	struct ib_ah	  *ah;
- 	struct list_head   list;
--	struct kref        ref;
--	unsigned           last_send;
-+	struct kref	   ref;
-+	unsigned	   last_send;
- };
- 
- struct ipoib_path {
-@@ -331,11 +337,11 @@ struct ipoib_path {
- 
- 	struct list_head      neigh_list;
- 
--	int                   query_id;
-+	int		      query_id;
- 	struct ib_sa_query   *query;
- 	struct completion     done;
- 
--	struct rb_node        rb_node;
-+	struct rb_node	      rb_node;
- 	struct list_head      list;
- };
- 
-@@ -344,7 +350,7 @@ struct ipoib_neigh {
- #ifdef CONFIG_INFINIBAND_IPOIB_CM
- 	struct ipoib_cm_tx *cm;
- #endif
--	union ib_gid        dgid;
-+	union ib_gid	    dgid;
- 	struct sk_buff_head queue;
- 
- 	struct neighbour   *neighbour;
-@@ -455,12 +461,14 @@ void ipoib_drain_cq(struct net_device *dev);
- 
- #ifdef CONFIG_INFINIBAND_IPOIB_CM
- 
--#define IPOIB_FLAGS_RC          0x80
--#define IPOIB_FLAGS_UC          0x40
-+#define IPOIB_FLAGS_RC		0x80
-+#define IPOIB_FLAGS_UC		0x40
- 
- /* We don't support UC connections at the moment */
- #define IPOIB_CM_SUPPORTED(ha)   (ha[0] & (IPOIB_FLAGS_RC))
- 
-+extern int ipoib_max_conn_qp;
-+
- static inline int ipoib_cm_admin_enabled(struct net_device *dev)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
-@@ -491,6 +499,18 @@ static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *t
- 	neigh->cm = tx;
- }
- 
-+static inline int ipoib_cm_has_srq(struct net_device *dev)
-+{
-+	struct ipoib_dev_priv *priv = netdev_priv(dev);
-+	return !!priv->cm.srq;
-+}
-+
-+static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
-+{
-+	struct ipoib_dev_priv *priv = netdev_priv(dev);
-+	return priv->cm.max_cm_mtu;
-+}
-+
- void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx);
- int ipoib_cm_dev_open(struct net_device *dev);
- void ipoib_cm_dev_stop(struct net_device *dev);
-@@ -500,7 +520,7 @@ void ipoib_cm_dev_cleanup(struct net_device *dev);
- struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
- 				    struct ipoib_neigh *neigh);
- void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx);
--void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
-+void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
- 			   unsigned int mtu);
- void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
- void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
-@@ -508,6 +528,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
- 
- struct ipoib_cm_tx;
- 
-+#define ipoib_max_conn_qp 0
-+
- static inline int ipoib_cm_admin_enabled(struct net_device *dev)
- {
- 	return 0;
-@@ -533,6 +555,16 @@ static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *t
- {
- }
- 
-+static inline int ipoib_cm_has_srq(struct net_device *dev)
-+{
-+	return 0;
-+}
-+
-+static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
-+{
-+	return 0;
-+}
-+
- static inline
- void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
- {
-@@ -582,7 +614,7 @@ int ipoib_cm_add_mode_attr(struct net_device *dev)
- 	return 0;
- }
- 
--static inline void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
-+static inline void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
- 					 unsigned int mtu)
- {
- 	dev_kfree_skb_any(skb);
-@@ -624,12 +656,12 @@ extern struct ib_sa_client ipoib_sa_client;
- extern int ipoib_debug_level;
- 
- #define ipoib_dbg(priv, format, arg...)			\
--	do {					        \
-+	do {						\
- 		if (ipoib_debug_level > 0)			\
- 			ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
- 	} while (0)
- #define ipoib_dbg_mcast(priv, format, arg...)		\
--	do {					        \
-+	do {						\
- 		if (mcast_debug_level > 0)		\
- 			ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
- 	} while (0)
-@@ -642,7 +674,7 @@ extern int ipoib_debug_level;
- 
- #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
- #define ipoib_dbg_data(priv, format, arg...)		\
--	do {					        \
-+	do {						\
- 		if (data_debug_level > 0)		\
- 			ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
- 	} while (0)
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
-index 059cf92..1818f95 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
-@@ -39,6 +39,15 @@
- #include <linux/icmpv6.h>
- #include <linux/delay.h>
- 
-+#include "ipoib.h"
-+
-+int ipoib_max_conn_qp = 128;
-+
-+module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
-+MODULE_PARM_DESC(max_nonsrq_conn_qp,
-+		 "Max number of connected-mode QPs per interface "
-+		 "(applied only if shared receive queue is not available)");
-+
- #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
- static int data_debug_level;
- 
-@@ -47,8 +56,6 @@ MODULE_PARM_DESC(cm_data_debug_level,
- 		 "Enable data path debug tracing for connected mode if > 0");
- #endif
- 
--#include "ipoib.h"
+-	/* Linear faulting address (if emulating a page-faulting instruction). */
+-	unsigned long eflags;
+-	unsigned long cr2;
 -
- #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
- 
- #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
-@@ -81,7 +88,7 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
- 		ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
- }
- 
--static int ipoib_cm_post_receive(struct net_device *dev, int id)
-+static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
- 	struct ib_recv_wr *bad_wr;
-@@ -89,13 +96,13 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id)
- 
- 	priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
- 
--	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
-+	for (i = 0; i < priv->cm.num_frags; ++i)
- 		priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
- 
- 	ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
- 	if (unlikely(ret)) {
- 		ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
--		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
-+		ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
- 				      priv->cm.srq_ring[id].mapping);
- 		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
- 		priv->cm.srq_ring[id].skb = NULL;
-@@ -104,7 +111,33 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id)
- 	return ret;
- }
- 
--static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags,
-+static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
-+					struct ipoib_cm_rx *rx, int id)
-+{
-+	struct ipoib_dev_priv *priv = netdev_priv(dev);
-+	struct ib_recv_wr *bad_wr;
-+	int i, ret;
-+
-+	priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
-+
-+	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
-+		priv->cm.rx_sge[i].addr = rx->rx_ring[id].mapping[i];
-+
-+	ret = ib_post_recv(rx->qp, &priv->cm.rx_wr, &bad_wr);
-+	if (unlikely(ret)) {
-+		ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
-+		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
-+				      rx->rx_ring[id].mapping);
-+		dev_kfree_skb_any(rx->rx_ring[id].skb);
-+		rx->rx_ring[id].skb = NULL;
-+	}
-+
-+	return ret;
-+}
-+
-+static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
-+					     struct ipoib_cm_rx_buf *rx_ring,
-+					     int id, int frags,
- 					     u64 mapping[IPOIB_CM_RX_SG])
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
-@@ -141,7 +174,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int
- 			goto partial_error;
+-	/* Emulated execution mode, represented by an X86EMUL_MODE value. */
+-	int mode;
+-
+-	unsigned long cs_base;
+-	unsigned long ds_base;
+-	unsigned long es_base;
+-	unsigned long ss_base;
+-	unsigned long gs_base;
+-	unsigned long fs_base;
+-};
+-
+-/* Execution mode, passed to the emulator. */
+-#define X86EMUL_MODE_REAL     0	/* Real mode.             */
+-#define X86EMUL_MODE_PROT16   2	/* 16-bit protected mode. */
+-#define X86EMUL_MODE_PROT32   4	/* 32-bit protected mode. */
+-#define X86EMUL_MODE_PROT64   8	/* 64-bit (long) mode.    */
+-
+-/* Host execution mode. */
+-#if defined(__i386__)
+-#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
+-#elif defined(CONFIG_X86_64)
+-#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
+-#endif
+-
+-/*
+- * x86_emulate_memop: Emulate an instruction that faulted attempting to
+- *                    read/write a 'special' memory area.
+- * Returns -1 on failure, 0 on success.
+- */
+-int x86_emulate_memop(struct x86_emulate_ctxt *ctxt,
+-		      struct x86_emulate_ops *ops);
+-
+-#endif				/* __X86_EMULATE_H__ */
+diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
+index cb4c670..7743d73 100644
+--- a/drivers/lguest/core.c
++++ b/drivers/lguest/core.c
+@@ -151,43 +151,43 @@ int lguest_address_ok(const struct lguest *lg,
+ /* This routine copies memory from the Guest.  Here we can see how useful the
+  * kill_lguest() routine we met in the Launcher can be: we return a random
+  * value (all zeroes) instead of needing to return an error. */
+-void __lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
++void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes)
+ {
+-	if (!lguest_address_ok(lg, addr, bytes)
+-	    || copy_from_user(b, lg->mem_base + addr, bytes) != 0) {
++	if (!lguest_address_ok(cpu->lg, addr, bytes)
++	    || copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) {
+ 		/* copy_from_user should do this, but as we rely on it... */
+ 		memset(b, 0, bytes);
+-		kill_guest(lg, "bad read address %#lx len %u", addr, bytes);
++		kill_guest(cpu, "bad read address %#lx len %u", addr, bytes);
  	}
- 
--	priv->cm.srq_ring[id].skb = skb;
-+	rx_ring[id].skb = skb;
- 	return skb;
- 
- partial_error:
-@@ -155,7 +188,23 @@ partial_error:
- 	return NULL;
- }
- 
--static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv)
-+static void ipoib_cm_free_rx_ring(struct net_device *dev,
-+				  struct ipoib_cm_rx_buf *rx_ring)
-+{
-+	struct ipoib_dev_priv *priv = netdev_priv(dev);
-+	int i;
-+
-+	for (i = 0; i < ipoib_recvq_size; ++i)
-+		if (rx_ring[i].skb) {
-+			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
-+					      rx_ring[i].mapping);
-+			dev_kfree_skb_any(rx_ring[i].skb);
-+		}
-+
-+	kfree(rx_ring);
-+}
-+
-+static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
- {
- 	struct ib_send_wr *bad_wr;
- 	struct ipoib_cm_rx *p;
-@@ -208,12 +257,18 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
- 		.qp_type = IB_QPT_RC,
- 		.qp_context = p,
- 	};
-+
-+	if (!ipoib_cm_has_srq(dev)) {
-+		attr.cap.max_recv_wr  = ipoib_recvq_size;
-+		attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
-+	}
-+
- 	return ib_create_qp(priv->pd, &attr);
  }
  
- static int ipoib_cm_modify_rx_qp(struct net_device *dev,
--				  struct ib_cm_id *cm_id, struct ib_qp *qp,
--				  unsigned psn)
-+				 struct ib_cm_id *cm_id, struct ib_qp *qp,
-+				 unsigned psn)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
- 	struct ib_qp_attr qp_attr;
-@@ -266,6 +321,60 @@ static int ipoib_cm_modify_rx_qp(struct net_device *dev,
- 	return 0;
+ /* This is the write (copy into guest) version. */
+-void __lgwrite(struct lguest *lg, unsigned long addr, const void *b,
++void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
+ 	       unsigned bytes)
+ {
+-	if (!lguest_address_ok(lg, addr, bytes)
+-	    || copy_to_user(lg->mem_base + addr, b, bytes) != 0)
+-		kill_guest(lg, "bad write address %#lx len %u", addr, bytes);
++	if (!lguest_address_ok(cpu->lg, addr, bytes)
++	    || copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0)
++		kill_guest(cpu, "bad write address %#lx len %u", addr, bytes);
  }
+ /*:*/
  
-+static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
-+				   struct ipoib_cm_rx *rx)
-+{
-+	struct ipoib_dev_priv *priv = netdev_priv(dev);
-+	int ret;
-+	int i;
-+
-+	rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL);
-+	if (!rx->rx_ring)
-+		return -ENOMEM;
-+
-+	spin_lock_irq(&priv->lock);
-+
-+	if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
-+		spin_unlock_irq(&priv->lock);
-+		ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
-+		ret = -EINVAL;
-+		goto err_free;
-+	} else
-+		++priv->cm.nonsrq_conn_qp;
-+
-+	spin_unlock_irq(&priv->lock);
-+
-+	for (i = 0; i < ipoib_recvq_size; ++i) {
-+		if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
-+					   rx->rx_ring[i].mapping)) {
-+			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
-+				ret = -ENOMEM;
-+				goto err_count;
-+			}
-+		ret = ipoib_cm_post_receive_nonsrq(dev, rx, i);
-+		if (ret) {
-+			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
-+				   "failed for buf %d\n", i);
-+			ret = -EIO;
-+			goto err_count;
-+		}
-+	}
-+
-+	rx->recv_count = ipoib_recvq_size;
-+
-+	return 0;
-+
-+err_count:
-+	spin_lock_irq(&priv->lock);
-+	--priv->cm.nonsrq_conn_qp;
-+	spin_unlock_irq(&priv->lock);
-+
-+err_free:
-+	ipoib_cm_free_rx_ring(dev, rx->rx_ring);
-+
-+	return ret;
-+}
-+
- static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
- 			     struct ib_qp *qp, struct ib_cm_req_event_param *req,
- 			     unsigned psn)
-@@ -281,7 +390,7 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
- 	rep.private_data_len = sizeof data;
- 	rep.flow_control = 0;
- 	rep.rnr_retry_count = req->rnr_retry_count;
--	rep.srq = 1;
-+	rep.srq = ipoib_cm_has_srq(dev);
- 	rep.qp_num = qp->qp_num;
- 	rep.starting_psn = psn;
- 	return ib_send_cm_rep(cm_id, &rep);
-@@ -317,6 +426,12 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
- 	if (ret)
- 		goto err_modify;
+ /*H:030 Let's jump straight to the the main loop which runs the Guest.
+  * Remember, this is called by the Launcher reading /dev/lguest, and we keep
+  * going around and around until something interesting happens. */
+-int run_guest(struct lguest *lg, unsigned long __user *user)
++int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
+ {
+ 	/* We stop running once the Guest is dead. */
+-	while (!lg->dead) {
++	while (!cpu->lg->dead) {
+ 		/* First we run any hypercalls the Guest wants done. */
+-		if (lg->hcall)
+-			do_hypercalls(lg);
++		if (cpu->hcall)
++			do_hypercalls(cpu);
+ 
+ 		/* It's possible the Guest did a NOTIFY hypercall to the
+ 		 * Launcher, in which case we return from the read() now. */
+-		if (lg->pending_notify) {
+-			if (put_user(lg->pending_notify, user))
++		if (cpu->pending_notify) {
++			if (put_user(cpu->pending_notify, user))
+ 				return -EFAULT;
+-			return sizeof(lg->pending_notify);
++			return sizeof(cpu->pending_notify);
+ 		}
  
-+	if (!ipoib_cm_has_srq(dev)) {
-+		ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
-+		if (ret)
-+			goto err_modify;
-+	}
-+
- 	spin_lock_irq(&priv->lock);
- 	queue_delayed_work(ipoib_workqueue,
- 			   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
-@@ -401,12 +516,14 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
- void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
-+	struct ipoib_cm_rx_buf *rx_ring;
- 	unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
- 	struct sk_buff *skb, *newskb;
- 	struct ipoib_cm_rx *p;
- 	unsigned long flags;
- 	u64 mapping[IPOIB_CM_RX_SG];
- 	int frags;
-+	int has_srq;
+ 		/* Check for signals */
+@@ -195,13 +195,13 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
+ 			return -ERESTARTSYS;
  
- 	ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
- 		       wr_id, wc->status);
-@@ -424,18 +541,32 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
- 		return;
- 	}
+ 		/* If Waker set break_out, return to Launcher. */
+-		if (lg->break_out)
++		if (cpu->break_out)
+ 			return -EAGAIN;
  
--	skb  = priv->cm.srq_ring[wr_id].skb;
-+	p = wc->qp->qp_context;
-+
-+	has_srq = ipoib_cm_has_srq(dev);
-+	rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
-+
-+	skb = rx_ring[wr_id].skb;
+ 		/* Check if there are any interrupts which can be delivered
+ 		 * now: if so, this sets up the hander to be executed when we
+ 		 * next run the Guest. */
+-		maybe_do_interrupt(lg);
++		maybe_do_interrupt(cpu);
+ 
+ 		/* All long-lived kernel loops need to check with this horrible
+ 		 * thing called the freezer.  If the Host is trying to suspend,
+@@ -210,12 +210,12 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
+ 
+ 		/* Just make absolutely sure the Guest is still alive.  One of
+ 		 * those hypercalls could have been fatal, for example. */
+-		if (lg->dead)
++		if (cpu->lg->dead)
+ 			break;
+ 
+ 		/* If the Guest asked to be stopped, we sleep.  The Guest's
+ 		 * clock timer or LHCALL_BREAK from the Waker will wake us. */
+-		if (lg->halted) {
++		if (cpu->halted) {
+ 			set_current_state(TASK_INTERRUPTIBLE);
+ 			schedule();
+ 			continue;
+@@ -226,15 +226,17 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
+ 		local_irq_disable();
  
- 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
- 		ipoib_dbg(priv, "cm recv error "
- 			   "(status=%d, wrid=%d vend_err %x)\n",
- 			   wc->status, wr_id, wc->vendor_err);
- 		++dev->stats.rx_dropped;
--		goto repost;
-+		if (has_srq)
-+			goto repost;
-+		else {
-+			if (!--p->recv_count) {
-+				spin_lock_irqsave(&priv->lock, flags);
-+				list_move(&p->list, &priv->cm.rx_reap_list);
-+				spin_unlock_irqrestore(&priv->lock, flags);
-+				queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
-+			}
-+			return;
-+		}
- 	}
+ 		/* Actually run the Guest until something happens. */
+-		lguest_arch_run_guest(lg);
++		lguest_arch_run_guest(cpu);
  
- 	if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
--		p = wc->qp->qp_context;
- 		if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
- 			spin_lock_irqsave(&priv->lock, flags);
- 			p->jiffies = jiffies;
-@@ -450,7 +581,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
- 	frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
- 					      (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
+ 		/* Now we're ready to be interrupted or moved to other CPUs */
+ 		local_irq_enable();
  
--	newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping);
-+	newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
- 	if (unlikely(!newskb)) {
- 		/*
- 		 * If we can't allocate a new RX buffer, dump
-@@ -461,8 +592,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
- 		goto repost;
+ 		/* Now we deal with whatever happened to the Guest. */
+-		lguest_arch_handle_trap(lg);
++		lguest_arch_handle_trap(cpu);
  	}
  
--	ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping);
--	memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
-+	ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
-+	memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
- 
- 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
- 		       wc->byte_len, wc->slid);
-@@ -483,9 +614,17 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
- 	netif_receive_skb(skb);
- 
- repost:
--	if (unlikely(ipoib_cm_post_receive(dev, wr_id)))
--		ipoib_warn(priv, "ipoib_cm_post_receive failed "
--			   "for buf %d\n", wr_id);
-+	if (has_srq) {
-+		if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
-+			ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
-+				   "for buf %d\n", wr_id);
-+	} else {
-+		if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p, wr_id))) {
-+			--p->recv_count;
-+			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
-+				   "for buf %d\n", wr_id);
-+		}
-+	}
- }
- 
- static inline int post_send(struct ipoib_dev_priv *priv,
-@@ -495,10 +634,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
- {
- 	struct ib_send_wr *bad_wr;
- 
--	priv->tx_sge.addr             = addr;
--	priv->tx_sge.length           = len;
-+	priv->tx_sge.addr	= addr;
-+	priv->tx_sge.length	= len;
- 
--	priv->tx_wr.wr_id 	      = wr_id | IPOIB_OP_CM;
-+	priv->tx_wr.wr_id	= wr_id | IPOIB_OP_CM;
- 
- 	return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
- }
-@@ -540,7 +679,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
- 	tx_req->mapping = addr;
- 
- 	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
--			        addr, skb->len))) {
-+			       addr, skb->len))) {
- 		ipoib_warn(priv, "post_send failed\n");
- 		++dev->stats.tx_errors;
- 		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
-@@ -657,10 +796,33 @@ err_cm:
- 	return ret;
++	if (cpu->lg->dead == ERR_PTR(-ERESTART))
++		return -ERESTART;
+ 	/* The Guest is dead => "No such file or directory" */
+ 	return -ENOENT;
  }
+@@ -253,7 +255,7 @@ static int __init init(void)
  
-+static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
-+{
-+	struct ipoib_dev_priv *priv = netdev_priv(dev);
-+	struct ipoib_cm_rx *rx, *n;
-+	LIST_HEAD(list);
-+
-+	spin_lock_irq(&priv->lock);
-+	list_splice_init(&priv->cm.rx_reap_list, &list);
-+	spin_unlock_irq(&priv->lock);
-+
-+	list_for_each_entry_safe(rx, n, &list, list) {
-+		ib_destroy_cm_id(rx->id);
-+		ib_destroy_qp(rx->qp);
-+		if (!ipoib_cm_has_srq(dev)) {
-+			ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
-+			spin_lock_irq(&priv->lock);
-+			--priv->cm.nonsrq_conn_qp;
-+			spin_unlock_irq(&priv->lock);
-+		}
-+		kfree(rx);
-+	}
-+}
-+
- void ipoib_cm_dev_stop(struct net_device *dev)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
--	struct ipoib_cm_rx *p, *n;
-+	struct ipoib_cm_rx *p;
- 	unsigned long begin;
- 	LIST_HEAD(list);
- 	int ret;
-@@ -706,15 +868,9 @@ void ipoib_cm_dev_stop(struct net_device *dev)
- 		spin_lock_irq(&priv->lock);
+ 	/* Lguest can't run under Xen, VMI or itself.  It does Tricky Stuff. */
+ 	if (paravirt_enabled()) {
+-		printk("lguest is afraid of %s\n", pv_info.name);
++		printk("lguest is afraid of being a guest\n");
+ 		return -EPERM;
  	}
  
--	list_splice_init(&priv->cm.rx_reap_list, &list);
--
- 	spin_unlock_irq(&priv->lock);
- 
--	list_for_each_entry_safe(p, n, &list, list) {
--		ib_destroy_cm_id(p->id);
--		ib_destroy_qp(p->qp);
--		kfree(p);
--	}
-+	ipoib_cm_free_rx_reap_list(dev);
- 
- 	cancel_delayed_work(&priv->cm.stale_task);
- }
-@@ -799,7 +955,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
- 		.sq_sig_type		= IB_SIGNAL_ALL_WR,
- 		.qp_type		= IB_QPT_RC,
- 		.qp_context		= tx
--        };
-+	};
- 
- 	return ib_create_qp(priv->pd, &attr);
- }
-@@ -816,28 +972,28 @@ static int ipoib_cm_send_req(struct net_device *dev,
- 	data.qpn = cpu_to_be32(priv->qp->qp_num);
- 	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
- 
--	req.primary_path 	      = pathrec;
--	req.alternate_path 	      = NULL;
--	req.service_id                = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
--	req.qp_num 		      = qp->qp_num;
--	req.qp_type 		      = qp->qp_type;
--	req.private_data 	      = &data;
--	req.private_data_len 	      = sizeof data;
--	req.flow_control 	      = 0;
-+	req.primary_path		= pathrec;
-+	req.alternate_path		= NULL;
-+	req.service_id			= cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
-+	req.qp_num			= qp->qp_num;
-+	req.qp_type			= qp->qp_type;
-+	req.private_data		= &data;
-+	req.private_data_len		= sizeof data;
-+	req.flow_control		= 0;
- 
--	req.starting_psn              = 0; /* FIXME */
-+	req.starting_psn		= 0; /* FIXME */
+diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
+index b478aff..0f2cb4f 100644
+--- a/drivers/lguest/hypercalls.c
++++ b/drivers/lguest/hypercalls.c
+@@ -23,13 +23,14 @@
+ #include <linux/uaccess.h>
+ #include <linux/syscalls.h>
+ #include <linux/mm.h>
++#include <linux/ktime.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include "lg.h"
  
- 	/*
- 	 * Pick some arbitrary defaults here; we could make these
- 	 * module parameters if anyone cared about setting them.
- 	 */
--	req.responder_resources	      = 4;
--	req.remote_cm_response_timeout = 20;
--	req.local_cm_response_timeout  = 20;
--	req.retry_count 	      = 0; /* RFC draft warns against retries */
--	req.rnr_retry_count 	      = 0; /* RFC draft warns against retries */
--	req.max_cm_retries 	      = 15;
--	req.srq 	              = 1;
-+	req.responder_resources		= 4;
-+	req.remote_cm_response_timeout	= 20;
-+	req.local_cm_response_timeout	= 20;
-+	req.retry_count			= 0; /* RFC draft warns against retries */
-+	req.rnr_retry_count		= 0; /* RFC draft warns against retries */
-+	req.max_cm_retries		= 15;
-+	req.srq				= ipoib_cm_has_srq(dev);
- 	return ib_send_cm_req(id, &req);
- }
+ /*H:120 This is the core hypercall routine: where the Guest gets what it wants.
+  * Or gets killed.  Or, in the case of LHCALL_CRASH, both. */
+-static void do_hcall(struct lguest *lg, struct hcall_args *args)
++static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
+ {
+ 	switch (args->arg0) {
+ 	case LHCALL_FLUSH_ASYNC:
+@@ -39,60 +40,62 @@ static void do_hcall(struct lguest *lg, struct hcall_args *args)
+ 	case LHCALL_LGUEST_INIT:
+ 		/* You can't get here unless you're already initialized.  Don't
+ 		 * do that. */
+-		kill_guest(lg, "already have lguest_data");
++		kill_guest(cpu, "already have lguest_data");
+ 		break;
+-	case LHCALL_CRASH: {
+-		/* Crash is such a trivial hypercall that we do it in four
++	case LHCALL_SHUTDOWN: {
++		/* Shutdown is such a trivial hypercall that we do it in four
+ 		 * lines right here. */
+ 		char msg[128];
+ 		/* If the lgread fails, it will call kill_guest() itself; the
+ 		 * kill_guest() with the message will be ignored. */
+-		__lgread(lg, msg, args->arg1, sizeof(msg));
++		__lgread(cpu, msg, args->arg1, sizeof(msg));
+ 		msg[sizeof(msg)-1] = '\0';
+-		kill_guest(lg, "CRASH: %s", msg);
++		kill_guest(cpu, "CRASH: %s", msg);
++		if (args->arg2 == LGUEST_SHUTDOWN_RESTART)
++			cpu->lg->dead = ERR_PTR(-ERESTART);
+ 		break;
+ 	}
+ 	case LHCALL_FLUSH_TLB:
+ 		/* FLUSH_TLB comes in two flavors, depending on the
+ 		 * argument: */
+ 		if (args->arg1)
+-			guest_pagetable_clear_all(lg);
++			guest_pagetable_clear_all(cpu);
+ 		else
+-			guest_pagetable_flush_user(lg);
++			guest_pagetable_flush_user(cpu);
+ 		break;
  
-@@ -1150,7 +1306,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
- 	spin_unlock_irq(&priv->tx_lock);
+ 	/* All these calls simply pass the arguments through to the right
+ 	 * routines. */
+ 	case LHCALL_NEW_PGTABLE:
+-		guest_new_pagetable(lg, args->arg1);
++		guest_new_pagetable(cpu, args->arg1);
+ 		break;
+ 	case LHCALL_SET_STACK:
+-		guest_set_stack(lg, args->arg1, args->arg2, args->arg3);
++		guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
+ 		break;
+ 	case LHCALL_SET_PTE:
+-		guest_set_pte(lg, args->arg1, args->arg2, __pte(args->arg3));
++		guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
+ 		break;
+ 	case LHCALL_SET_PMD:
+-		guest_set_pmd(lg, args->arg1, args->arg2);
++		guest_set_pmd(cpu->lg, args->arg1, args->arg2);
+ 		break;
+ 	case LHCALL_SET_CLOCKEVENT:
+-		guest_set_clockevent(lg, args->arg1);
++		guest_set_clockevent(cpu, args->arg1);
+ 		break;
+ 	case LHCALL_TS:
+ 		/* This sets the TS flag, as we saw used in run_guest(). */
+-		lg->ts = args->arg1;
++		cpu->ts = args->arg1;
+ 		break;
+ 	case LHCALL_HALT:
+ 		/* Similarly, this sets the halted flag for run_guest(). */
+-		lg->halted = 1;
++		cpu->halted = 1;
+ 		break;
+ 	case LHCALL_NOTIFY:
+-		lg->pending_notify = args->arg1;
++		cpu->pending_notify = args->arg1;
+ 		break;
+ 	default:
+ 		/* It should be an architecture-specific hypercall. */
+-		if (lguest_arch_do_hcall(lg, args))
+-			kill_guest(lg, "Bad hypercall %li\n", args->arg0);
++		if (lguest_arch_do_hcall(cpu, args))
++			kill_guest(cpu, "Bad hypercall %li\n", args->arg0);
+ 	}
  }
- 
--void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
-+void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
- 			   unsigned int mtu)
+ /*:*/
+@@ -104,13 +107,13 @@ static void do_hcall(struct lguest *lg, struct hcall_args *args)
+  * Guest put them in the ring, but we also promise the Guest that they will
+  * happen before any normal hypercall (which is why we check this before
+  * checking for a normal hcall). */
+-static void do_async_hcalls(struct lguest *lg)
++static void do_async_hcalls(struct lg_cpu *cpu)
  {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
-@@ -1166,20 +1322,8 @@ void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
+ 	unsigned int i;
+ 	u8 st[LHCALL_RING_SIZE];
  
- static void ipoib_cm_rx_reap(struct work_struct *work)
- {
--	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
--						   cm.rx_reap_task);
--	struct ipoib_cm_rx *p, *n;
--	LIST_HEAD(list);
--
--	spin_lock_irq(&priv->lock);
--	list_splice_init(&priv->cm.rx_reap_list, &list);
--	spin_unlock_irq(&priv->lock);
--
--	list_for_each_entry_safe(p, n, &list, list) {
--		ib_destroy_cm_id(p->id);
--		ib_destroy_qp(p->qp);
--		kfree(p);
--	}
-+	ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
-+						cm.rx_reap_task)->dev);
- }
+ 	/* For simplicity, we copy the entire call status array in at once. */
+-	if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st)))
++	if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st)))
+ 		return;
  
- static void ipoib_cm_stale_task(struct work_struct *work)
-@@ -1212,7 +1356,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
- }
+ 	/* We process "struct lguest_data"s hcalls[] ring once. */
+@@ -119,7 +122,7 @@ static void do_async_hcalls(struct lguest *lg)
+ 		/* We remember where we were up to from last time.  This makes
+ 		 * sure that the hypercalls are done in the order the Guest
+ 		 * places them in the ring. */
+-		unsigned int n = lg->next_hcall;
++		unsigned int n = cpu->next_hcall;
  
+ 		/* 0xFF means there's no call here (yet). */
+ 		if (st[n] == 0xFF)
+@@ -127,65 +130,65 @@ static void do_async_hcalls(struct lguest *lg)
  
--static ssize_t show_mode(struct device *d, struct device_attribute *attr, 
-+static ssize_t show_mode(struct device *d, struct device_attribute *attr,
- 			 char *buf)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
-@@ -1255,16 +1399,40 @@ int ipoib_cm_add_mode_attr(struct net_device *dev)
- 	return device_create_file(&dev->dev, &dev_attr_mode);
- }
+ 		/* OK, we have hypercall.  Increment the "next_hcall" cursor,
+ 		 * and wrap back to 0 if we reach the end. */
+-		if (++lg->next_hcall == LHCALL_RING_SIZE)
+-			lg->next_hcall = 0;
++		if (++cpu->next_hcall == LHCALL_RING_SIZE)
++			cpu->next_hcall = 0;
  
--int ipoib_cm_dev_init(struct net_device *dev)
-+static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
- 	struct ib_srq_init_attr srq_init_attr = {
- 		.attr = {
- 			.max_wr  = ipoib_recvq_size,
--			.max_sge = IPOIB_CM_RX_SG
-+			.max_sge = max_sge
+ 		/* Copy the hypercall arguments into a local copy of
+ 		 * the hcall_args struct. */
+-		if (copy_from_user(&args, &lg->lguest_data->hcalls[n],
++		if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n],
+ 				   sizeof(struct hcall_args))) {
+-			kill_guest(lg, "Fetching async hypercalls");
++			kill_guest(cpu, "Fetching async hypercalls");
+ 			break;
  		}
- 	};
--	int ret, i;
-+
-+	priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
-+	if (IS_ERR(priv->cm.srq)) {
-+		if (PTR_ERR(priv->cm.srq) != -ENOSYS)
-+			printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
-+			       priv->ca->name, PTR_ERR(priv->cm.srq));
-+		priv->cm.srq = NULL;
-+		return;
-+	}
-+
-+	priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
-+				    GFP_KERNEL);
-+	if (!priv->cm.srq_ring) {
-+		printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
-+		       priv->ca->name, ipoib_recvq_size);
-+		ib_destroy_srq(priv->cm.srq);
-+		priv->cm.srq = NULL;
-+	}
-+}
-+
-+int ipoib_cm_dev_init(struct net_device *dev)
-+{
-+	struct ipoib_dev_priv *priv = netdev_priv(dev);
-+	int i, ret;
-+	struct ib_device_attr attr;
- 
- 	INIT_LIST_HEAD(&priv->cm.passive_ids);
- 	INIT_LIST_HEAD(&priv->cm.reap_list);
-@@ -1281,43 +1449,53 @@ int ipoib_cm_dev_init(struct net_device *dev)
- 
- 	skb_queue_head_init(&priv->cm.skb_queue);
- 
--	priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
--	if (IS_ERR(priv->cm.srq)) {
--		ret = PTR_ERR(priv->cm.srq);
--		priv->cm.srq = NULL;
-+	ret = ib_query_device(priv->ca, &attr);
-+	if (ret) {
-+		printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
- 		return ret;
- 	}
- 
--	priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
--				    GFP_KERNEL);
--	if (!priv->cm.srq_ring) {
--		printk(KERN_WARNING "%s: failed to allocate CM ring (%d entries)\n",
--		       priv->ca->name, ipoib_recvq_size);
--		ipoib_cm_dev_cleanup(dev);
--		return -ENOMEM;
-+	ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
-+
-+	attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
-+	ipoib_cm_create_srq(dev, attr.max_srq_sge);
-+	if (ipoib_cm_has_srq(dev)) {
-+		priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
-+		priv->cm.num_frags  = attr.max_srq_sge;
-+		ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
-+			  priv->cm.max_cm_mtu, priv->cm.num_frags);
-+	} else {
-+		priv->cm.max_cm_mtu = IPOIB_CM_MTU;
-+		priv->cm.num_frags  = IPOIB_CM_RX_SG;
- 	}
- 
--	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
-+	for (i = 0; i < priv->cm.num_frags; ++i)
- 		priv->cm.rx_sge[i].lkey	= priv->mr->lkey;
  
- 	priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE;
--	for (i = 1; i < IPOIB_CM_RX_SG; ++i)
-+	for (i = 1; i < priv->cm.num_frags; ++i)
- 		priv->cm.rx_sge[i].length = PAGE_SIZE;
- 	priv->cm.rx_wr.next = NULL;
- 	priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
--	priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG;
-+	priv->cm.rx_wr.num_sge = priv->cm.num_frags;
-+
-+	if (ipoib_cm_has_srq(dev)) {
-+		for (i = 0; i < ipoib_recvq_size; ++i) {
-+			if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
-+						   priv->cm.num_frags - 1,
-+						   priv->cm.srq_ring[i].mapping)) {
-+				ipoib_warn(priv, "failed to allocate "
-+					   "receive buffer %d\n", i);
-+				ipoib_cm_dev_cleanup(dev);
-+				return -ENOMEM;
-+			}
+ 		/* Do the hypercall, same as a normal one. */
+-		do_hcall(lg, &args);
++		do_hcall(cpu, &args);
  
--	for (i = 0; i < ipoib_recvq_size; ++i) {
--		if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
--					   priv->cm.srq_ring[i].mapping)) {
--			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
--			ipoib_cm_dev_cleanup(dev);
--			return -ENOMEM;
--		}
--		if (ipoib_cm_post_receive(dev, i)) {
--			ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
--			ipoib_cm_dev_cleanup(dev);
--			return -EIO;
-+			if (ipoib_cm_post_receive_srq(dev, i)) {
-+				ipoib_warn(priv, "ipoib_cm_post_receive_srq "
-+					   "failed for buf %d\n", i);
-+				ipoib_cm_dev_cleanup(dev);
-+				return -EIO;
-+			}
+ 		/* Mark the hypercall done. */
+-		if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) {
+-			kill_guest(lg, "Writing result for async hypercall");
++		if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) {
++			kill_guest(cpu, "Writing result for async hypercall");
+ 			break;
  		}
- 	}
- 
-@@ -1328,7 +1506,7 @@ int ipoib_cm_dev_init(struct net_device *dev)
- void ipoib_cm_dev_cleanup(struct net_device *dev)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
--	int i, ret;
-+	int ret;
- 
- 	if (!priv->cm.srq)
- 		return;
-@@ -1342,13 +1520,7 @@ void ipoib_cm_dev_cleanup(struct net_device *dev)
- 	priv->cm.srq = NULL;
- 	if (!priv->cm.srq_ring)
- 		return;
--	for (i = 0; i < ipoib_recvq_size; ++i)
--		if (priv->cm.srq_ring[i].skb) {
--			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
--					      priv->cm.srq_ring[i].mapping);
--			dev_kfree_skb_any(priv->cm.srq_ring[i].skb);
--			priv->cm.srq_ring[i].skb = NULL;
--		}
--	kfree(priv->cm.srq_ring);
-+
-+	ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
- 	priv->cm.srq_ring = NULL;
- }
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
-index 44c1741..8b882bb 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
-@@ -124,7 +124,7 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
- 	return 0;
- }
  
--static struct seq_operations ipoib_mcg_seq_ops = {
-+static const struct seq_operations ipoib_mcg_seq_ops = {
- 	.start = ipoib_mcg_seq_start,
- 	.next  = ipoib_mcg_seq_next,
- 	.stop  = ipoib_mcg_seq_stop,
-@@ -230,7 +230,7 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
- 	return 0;
+ 		/* Stop doing hypercalls if they want to notify the Launcher:
+ 		 * it needs to service this first. */
+-		if (lg->pending_notify)
++		if (cpu->pending_notify)
+ 			break;
+ 	}
  }
  
--static struct seq_operations ipoib_path_seq_ops = {
-+static const struct seq_operations ipoib_path_seq_ops = {
- 	.start = ipoib_path_seq_start,
- 	.next  = ipoib_path_seq_next,
- 	.stop  = ipoib_path_seq_stop,
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
-index 5063dd5..52bc2bd 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
-@@ -345,12 +345,12 @@ static inline int post_send(struct ipoib_dev_priv *priv,
+ /* Last of all, we look at what happens first of all.  The very first time the
+  * Guest makes a hypercall, we end up here to set things up: */
+-static void initialize(struct lguest *lg)
++static void initialize(struct lg_cpu *cpu)
  {
- 	struct ib_send_wr *bad_wr;
- 
--	priv->tx_sge.addr             = addr;
--	priv->tx_sge.length           = len;
-+	priv->tx_sge.addr	      = addr;
-+	priv->tx_sge.length	      = len;
- 
--	priv->tx_wr.wr_id 	      = wr_id;
-+	priv->tx_wr.wr_id	      = wr_id;
- 	priv->tx_wr.wr.ud.remote_qpn  = qpn;
--	priv->tx_wr.wr.ud.ah 	      = address;
-+	priv->tx_wr.wr.ud.ah	      = address;
- 
- 	return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
- }
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
-index c9f6077..a082466 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
-@@ -182,17 +182,20 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
- 
- 	/* dev->mtu > 2K ==> connected mode */
--	if (ipoib_cm_admin_enabled(dev) && new_mtu <= IPOIB_CM_MTU) {
-+	if (ipoib_cm_admin_enabled(dev)) {
-+		if (new_mtu > ipoib_cm_max_mtu(dev))
-+			return -EINVAL;
-+
- 		if (new_mtu > priv->mcast_mtu)
- 			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
- 				   priv->mcast_mtu);
-+
- 		dev->mtu = new_mtu;
- 		return 0;
+ 	/* You can't do anything until you're initialized.  The Guest knows the
+ 	 * rules, so we're unforgiving here. */
+-	if (lg->hcall->arg0 != LHCALL_LGUEST_INIT) {
+-		kill_guest(lg, "hypercall %li before INIT", lg->hcall->arg0);
++	if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) {
++		kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0);
+ 		return;
  	}
  
--	if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) {
-+	if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
- 		return -EINVAL;
--	}
- 
- 	priv->admin_mtu = new_mtu;
- 
-@@ -474,8 +477,8 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
- 	INIT_LIST_HEAD(&path->neigh_list);
- 
- 	memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
--	path->pathrec.sgid          = priv->local_gid;
--	path->pathrec.pkey          = cpu_to_be16(priv->pkey);
-+	path->pathrec.sgid	    = priv->local_gid;
-+	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
- 	path->pathrec.numb_path     = 1;
- 	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
- 
-@@ -669,16 +672,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
- 	if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
- 		return NETDEV_TX_LOCKED;
- 
--	/*
--	 * Check if our queue is stopped.  Since we have the LLTX bit
--	 * set, we can't rely on netif_stop_queue() preventing our
--	 * xmit function from being called with a full queue.
--	 */
--	if (unlikely(netif_queue_stopped(dev))) {
--		spin_unlock_irqrestore(&priv->tx_lock, flags);
--		return NETDEV_TX_BUSY;
--	}
--
- 	if (likely(skb->dst && skb->dst->neighbour)) {
- 		if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
- 			ipoib_path_lookup(skb, dev);
-@@ -950,34 +943,34 @@ static void ipoib_setup(struct net_device *dev)
- {
- 	struct ipoib_dev_priv *priv = netdev_priv(dev);
- 
--	dev->open 		 = ipoib_open;
--	dev->stop 		 = ipoib_stop;
--	dev->change_mtu 	 = ipoib_change_mtu;
--	dev->hard_start_xmit 	 = ipoib_start_xmit;
--	dev->tx_timeout 	 = ipoib_timeout;
--	dev->header_ops 	 = &ipoib_header_ops;
--	dev->set_multicast_list  = ipoib_set_mcast_list;
--	dev->neigh_setup         = ipoib_neigh_setup_dev;
-+	dev->open		 = ipoib_open;
-+	dev->stop		 = ipoib_stop;
-+	dev->change_mtu		 = ipoib_change_mtu;
-+	dev->hard_start_xmit	 = ipoib_start_xmit;
-+	dev->tx_timeout		 = ipoib_timeout;
-+	dev->header_ops		 = &ipoib_header_ops;
-+	dev->set_multicast_list	 = ipoib_set_mcast_list;
-+	dev->neigh_setup	 = ipoib_neigh_setup_dev;
- 
- 	netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
- 
--	dev->watchdog_timeo 	 = HZ;
-+	dev->watchdog_timeo	 = HZ;
- 
--	dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
-+	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
- 
- 	/*
- 	 * We add in INFINIBAND_ALEN to allow for the destination
- 	 * address "pseudoheader" for skbs without neighbour struct.
- 	 */
--	dev->hard_header_len 	 = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
--	dev->addr_len 		 = INFINIBAND_ALEN;
--	dev->type 		 = ARPHRD_INFINIBAND;
--	dev->tx_queue_len 	 = ipoib_sendq_size * 2;
--	dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
-+	dev->hard_header_len	 = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
-+	dev->addr_len		 = INFINIBAND_ALEN;
-+	dev->type		 = ARPHRD_INFINIBAND;
-+	dev->tx_queue_len	 = ipoib_sendq_size * 2;
-+	dev->features		 = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
- 
- 	/* MTU will be reset when mcast join happens */
--	dev->mtu 		 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
--	priv->mcast_mtu 	 = priv->admin_mtu = dev->mtu;
-+	dev->mtu		 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
-+	priv->mcast_mtu		 = priv->admin_mtu = dev->mtu;
- 
- 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
- 
-@@ -1268,6 +1261,9 @@ static int __init ipoib_init_module(void)
- 	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
- 	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
- 	ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
-+#ifdef CONFIG_INFINIBAND_IPOIB_CM
-+	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
-+#endif
- 
- 	ret = ipoib_register_debugfs();
- 	if (ret)
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-index 9bcfc7a..2628339 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -702,7 +702,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
- 
- out:
- 	if (mcast && mcast->ah) {
--		if (skb->dst            &&
-+		if (skb->dst		&&
- 		    skb->dst->neighbour &&
- 		    !*to_ipoib_neigh(skb->dst->neighbour)) {
- 			struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour,
-@@ -710,7 +710,7 @@ out:
- 
- 			if (neigh) {
- 				kref_get(&mcast->ah->ref);
--				neigh->ah  	= mcast->ah;
-+				neigh->ah	= mcast->ah;
- 				list_add_tail(&neigh->list, &mcast->neigh_list);
- 			}
- 		}
-@@ -788,10 +788,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
- 
- 		memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
- 
--		/* Add in the P_Key */
--		mgid.raw[4] = (priv->pkey >> 8) & 0xff;
--		mgid.raw[5] = priv->pkey & 0xff;
--
- 		mcast = __ipoib_mcast_find(dev, &mgid);
- 		if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
- 			struct ipoib_mcast *nmcast;
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
-index 3c6e45d..433e99a 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
-@@ -172,8 +172,12 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
- 
- 	size = ipoib_sendq_size + ipoib_recvq_size + 1;
- 	ret = ipoib_cm_dev_init(dev);
--	if (!ret)
--		size += ipoib_recvq_size + 1 /* 1 extra for rx_drain_qp */;
-+	if (!ret) {
-+		if (ipoib_cm_has_srq(dev))
-+			size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
-+		else
-+			size += ipoib_recvq_size * ipoib_max_conn_qp;
-+	}
- 
- 	priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
- 	if (IS_ERR(priv->cq)) {
-@@ -197,12 +201,12 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
- 	priv->dev->dev_addr[2] = (priv->qp->qp_num >>  8) & 0xff;
- 	priv->dev->dev_addr[3] = (priv->qp->qp_num      ) & 0xff;
- 
--	priv->tx_sge.lkey 	= priv->mr->lkey;
-+	priv->tx_sge.lkey	= priv->mr->lkey;
- 
--	priv->tx_wr.opcode 	= IB_WR_SEND;
--	priv->tx_wr.sg_list 	= &priv->tx_sge;
--	priv->tx_wr.num_sge 	= 1;
--	priv->tx_wr.send_flags 	= IB_SEND_SIGNALED;
-+	priv->tx_wr.opcode	= IB_WR_SEND;
-+	priv->tx_wr.sg_list	= &priv->tx_sge;
-+	priv->tx_wr.num_sge	= 1;
-+	priv->tx_wr.send_flags	= IB_SEND_SIGNALED;
- 
- 	return 0;
- 
-diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
-index fe604c8..77dedba 100644
---- a/drivers/infiniband/ulp/iser/Kconfig
-+++ b/drivers/infiniband/ulp/iser/Kconfig
-@@ -8,5 +8,5 @@ config INFINIBAND_ISER
-           that speak iSCSI over iSER over InfiniBand.
- 
- 	  The iSER protocol is defined by IETF.
--	  See <http://www.ietf.org/internet-drafts/draft-ietf-ips-iser-05.txt>
--	  and <http://www.infinibandta.org/members/spec/iser_annex_060418.pdf>
-+	  See <http://www.ietf.org/rfc/rfc5046.txt>
-+	  and <http://www.infinibandta.org/members/spec/Annex_iSER.PDF>
-diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
-index bad8dac..be1b9fb 100644
---- a/drivers/infiniband/ulp/iser/iscsi_iser.c
-+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
-@@ -129,7 +129,7 @@ error:
-  * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
-  *
-  **/
--static void
-+static int
- iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
- {
- 	struct iscsi_iser_conn     *iser_conn  = ctask->conn->dd_data;
-@@ -138,6 +138,7 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
- 	iser_ctask->command_sent = 0;
- 	iser_ctask->iser_conn    = iser_conn;
- 	iser_ctask_rdma_init(iser_ctask);
-+	return 0;
- }
+-	if (lguest_arch_init_hypercalls(lg))
+-		kill_guest(lg, "bad guest page %p", lg->lguest_data);
++	if (lguest_arch_init_hypercalls(cpu))
++		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
  
- /**
-@@ -220,12 +221,6 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
- 	debug_scsi("ctask deq [cid %d itt 0x%x]\n",
- 		   conn->id, ctask->itt);
+ 	/* The Guest tells us where we're not to deliver interrupts by putting
+ 	 * the range of addresses into "struct lguest_data". */
+-	if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start)
+-	    || get_user(lg->noirq_end, &lg->lguest_data->noirq_end))
+-		kill_guest(lg, "bad guest page %p", lg->lguest_data);
++	if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
++	    || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
++		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
  
--	/*
--	 * serialize with TMF AbortTask
--	 */
--	if (ctask->mtask)
--		return error;
--
- 	/* Send the cmd PDU */
- 	if (!iser_ctask->command_sent) {
- 		error = iser_send_command(conn, ctask);
-@@ -406,6 +401,7 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
- 		ctask      = session->cmds[i];
- 		iser_ctask = ctask->dd_data;
- 		ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
-+		ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
- 	}
+ 	/* We write the current time into the Guest's data page once so it can
+ 	 * set its clock. */
+-	write_timestamp(lg);
++	write_timestamp(cpu);
  
- 	for (i = 0; i < session->mgmtpool_max; i++) {
-@@ -551,11 +547,13 @@ static struct scsi_host_template iscsi_iser_sht = {
- 	.module                 = THIS_MODULE,
- 	.name                   = "iSCSI Initiator over iSER, v." DRV_VER,
- 	.queuecommand           = iscsi_queuecommand,
-+	.change_queue_depth	= iscsi_change_queue_depth,
- 	.can_queue		= ISCSI_DEF_XMIT_CMDS_MAX - 1,
- 	.sg_tablesize           = ISCSI_ISER_SG_TABLESIZE,
- 	.max_sectors		= 1024,
- 	.cmd_per_lun            = ISCSI_MAX_CMD_PER_LUN,
- 	.eh_abort_handler       = iscsi_eh_abort,
-+	.eh_device_reset_handler= iscsi_eh_device_reset,
- 	.eh_host_reset_handler	= iscsi_eh_host_reset,
- 	.use_clustering         = DISABLE_CLUSTERING,
- 	.proc_name              = "iscsi_iser",
-@@ -582,7 +580,9 @@ static struct iscsi_transport iscsi_iser_transport = {
- 				  ISCSI_PERSISTENT_ADDRESS |
- 				  ISCSI_TARGET_NAME | ISCSI_TPGT |
- 				  ISCSI_USERNAME | ISCSI_PASSWORD |
--				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
-+				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
-+				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-+				  ISCSI_PING_TMO | ISCSI_RECV_TMO,
- 	.host_param_mask	= ISCSI_HOST_HWADDRESS |
- 				  ISCSI_HOST_NETDEV_NAME |
- 				  ISCSI_HOST_INITIATOR_NAME,
-diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
-index a6f2303..83247f1 100644
---- a/drivers/infiniband/ulp/iser/iser_initiator.c
-+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
-@@ -561,7 +561,7 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
- 	if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
- 	        itt = get_itt(hdr->itt); /* mask out cid and age bits */
- 		if (!(itt < session->cmds_max))
--			iser_err("itt can't be matched to task!!!"
-+			iser_err("itt can't be matched to task!!! "
- 				 "conn %p opcode %d cmds_max %d itt %d\n",
- 				 conn->iscsi_conn,opcode,session->cmds_max,itt);
- 		/* use the mapping given with the cmds array indexed by itt */
-@@ -621,9 +621,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
- 			struct iscsi_session *session = conn->session;
+ 	/* page_tables.c will also do some setup. */
+-	page_table_guest_data_init(lg);
++	page_table_guest_data_init(cpu);
  
- 			spin_lock(&conn->session->lock);
--			list_del(&mtask->running);
--			__kfifo_put(session->mgmtpool.queue, (void*)&mtask,
--				    sizeof(void*));
-+			iscsi_free_mgmt_task(conn, mtask);
- 			spin_unlock(&session->lock);
- 		}
- 	}
-diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
-index 654a4dc..714b8db 100644
---- a/drivers/infiniband/ulp/iser/iser_verbs.c
-+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
-@@ -105,7 +105,7 @@ pd_err:
+ 	/* This is the one case where the above accesses might have been the
+ 	 * first write to a Guest page.  This may have caused a copy-on-write
+ 	 * fault, but the old page might be (read-only) in the Guest
+ 	 * pagetable. */
+-	guest_pagetable_clear_all(lg);
++	guest_pagetable_clear_all(cpu);
  }
  
- /**
-- * iser_free_device_ib_res - destory/dealloc/dereg the DMA MR,
-+ * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
-  * CQ and PD created with the device associated with the adapator.
+ /*H:100
+@@ -194,27 +197,27 @@ static void initialize(struct lguest *lg)
+  * Remember from the Guest, hypercalls come in two flavors: normal and
+  * asynchronous.  This file handles both of types.
   */
- static void iser_free_device_ib_res(struct iser_device *device)
-@@ -475,13 +475,11 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
- 		iser_disconnected_handler(cma_id);
- 		break;
- 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
-+		iser_err("Device removal is currently unsupported\n");
- 		BUG();
- 		break;
--	case RDMA_CM_EVENT_CONNECT_RESPONSE:
--		BUG();
--		break;
--	case RDMA_CM_EVENT_CONNECT_REQUEST:
- 	default:
-+		iser_err("Unexpected RDMA CM event (%d)\n", event->event);
- 		break;
- 	}
- 	return ret;
-diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
-index bdb6f85..f2d2c7e 100644
---- a/drivers/infiniband/ulp/srp/ib_srp.c
-+++ b/drivers/infiniband/ulp/srp/ib_srp.c
-@@ -272,7 +272,8 @@ static void srp_path_rec_completion(int status,
- 
- 	target->status = status;
- 	if (status)
--		printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
-+		shost_printk(KERN_ERR, target->scsi_host,
-+			     PFX "Got failed path rec status %d\n", status);
- 	else
- 		target->path = *pathrec;
- 	complete(&target->done);
-@@ -303,7 +304,8 @@ static int srp_lookup_path(struct srp_target_port *target)
- 	wait_for_completion(&target->done);
- 
- 	if (target->status < 0)
--		printk(KERN_WARNING PFX "Path record query failed\n");
-+		shost_printk(KERN_WARNING, target->scsi_host,
-+			     PFX "Path record query failed\n");
- 
- 	return target->status;
- }
-@@ -379,9 +381,10 @@ static int srp_send_req(struct srp_target_port *target)
- 	 * the second 8 bytes to the local node GUID.
- 	 */
- 	if (srp_target_is_topspin(target)) {
--		printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
--		       "activated for target GUID %016llx\n",
--		       (unsigned long long) be64_to_cpu(target->ioc_guid));
-+		shost_printk(KERN_DEBUG, target->scsi_host,
-+			     PFX "Topspin/Cisco initiator port ID workaround "
-+			     "activated for target GUID %016llx\n",
-+			     (unsigned long long) be64_to_cpu(target->ioc_guid));
- 		memset(req->priv.initiator_port_id, 0, 8);
- 		memcpy(req->priv.initiator_port_id + 8,
- 		       &target->srp_host->dev->dev->node_guid, 8);
-@@ -400,7 +403,8 @@ static void srp_disconnect_target(struct srp_target_port *target)
- 
- 	init_completion(&target->done);
- 	if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
--		printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
-+		shost_printk(KERN_DEBUG, target->scsi_host,
-+			     PFX "Sending CM DREQ failed\n");
+-void do_hypercalls(struct lguest *lg)
++void do_hypercalls(struct lg_cpu *cpu)
+ {
+ 	/* Not initialized yet?  This hypercall must do it. */
+-	if (unlikely(!lg->lguest_data)) {
++	if (unlikely(!cpu->lg->lguest_data)) {
+ 		/* Set up the "struct lguest_data" */
+-		initialize(lg);
++		initialize(cpu);
+ 		/* Hcall is done. */
+-		lg->hcall = NULL;
++		cpu->hcall = NULL;
  		return;
  	}
- 	wait_for_completion(&target->done);
-@@ -568,7 +572,8 @@ static int srp_reconnect_target(struct srp_target_port *target)
- 	return ret;
- 
- err:
--	printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
-+	shost_printk(KERN_ERR, target->scsi_host,
-+		     PFX "reconnect failed (%d), removing target port.\n", ret);
- 
- 	/*
- 	 * We couldn't reconnect, so kill our target port off.
-@@ -683,8 +688,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
- 
- 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
- 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
--		printk(KERN_WARNING PFX "Unhandled data direction %d\n",
--		       scmnd->sc_data_direction);
-+		shost_printk(KERN_WARNING, target->scsi_host,
-+			     PFX "Unhandled data direction %d\n",
-+			     scmnd->sc_data_direction);
- 		return -EINVAL;
- 	}
- 
-@@ -786,8 +792,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
- 	} else {
- 		scmnd = req->scmnd;
- 		if (!scmnd)
--			printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
--			       (unsigned long long) rsp->tag);
-+			shost_printk(KERN_ERR, target->scsi_host,
-+				     "Null scmnd for RSP w/tag %016llx\n",
-+				     (unsigned long long) rsp->tag);
- 		scmnd->result = rsp->status;
- 
- 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
-@@ -831,7 +838,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
- 	if (0) {
- 		int i;
- 
--		printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
-+		shost_printk(KERN_ERR, target->scsi_host,
-+			     PFX "recv completion, opcode 0x%02x\n", opcode);
- 
- 		for (i = 0; i < wc->byte_len; ++i) {
- 			if (i % 8 == 0)
-@@ -852,11 +860,13 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
- 
- 	case SRP_T_LOGOUT:
- 		/* XXX Handle target logout */
--		printk(KERN_WARNING PFX "Got target logout request\n");
-+		shost_printk(KERN_WARNING, target->scsi_host,
-+			     PFX "Got target logout request\n");
- 		break;
  
- 	default:
--		printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
-+		shost_printk(KERN_WARNING, target->scsi_host,
-+			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
- 		break;
+ 	/* The Guest has initialized.
+ 	 *
+ 	 * Look in the hypercall ring for the async hypercalls: */
+-	do_async_hcalls(lg);
++	do_async_hcalls(cpu);
+ 
+ 	/* If we stopped reading the hypercall ring because the Guest did a
+ 	 * NOTIFY to the Launcher, we want to return now.  Otherwise we do
+ 	 * the hypercall. */
+-	if (!lg->pending_notify) {
+-		do_hcall(lg, lg->hcall);
++	if (!cpu->pending_notify) {
++		do_hcall(cpu, cpu->hcall);
+ 		/* Tricky point: we reset the hcall pointer to mark the
+ 		 * hypercall as "done".  We use the hcall pointer rather than
+ 		 * the trap number to indicate a hypercall is pending.
+@@ -225,16 +228,17 @@ void do_hypercalls(struct lguest *lg)
+ 		 * Launcher, the run_guest() loop will exit without running the
+ 		 * Guest.  When it comes back it would try to re-run the
+ 		 * hypercall. */
+-		lg->hcall = NULL;
++		cpu->hcall = NULL;
  	}
+ }
  
-@@ -872,9 +882,10 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
- 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- 	while (ib_poll_cq(cq, 1, &wc) > 0) {
- 		if (wc.status) {
--			printk(KERN_ERR PFX "failed %s status %d\n",
--			       wc.wr_id & SRP_OP_RECV ? "receive" : "send",
--			       wc.status);
-+			shost_printk(KERN_ERR, target->scsi_host,
-+				     PFX "failed %s status %d\n",
-+				     wc.wr_id & SRP_OP_RECV ? "receive" : "send",
-+				     wc.status);
- 			target->qp_in_error = 1;
- 			break;
- 		}
-@@ -930,13 +941,18 @@ static int srp_post_recv(struct srp_target_port *target)
-  * req_lim and tx_head.  Lock cannot be dropped between call here and
-  * call to __srp_post_send().
-  */
--static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
-+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
-+					enum srp_request_type req_type)
+ /* This routine supplies the Guest with time: it's used for wallclock time at
+  * initial boot and as a rough time source if the TSC isn't available. */
+-void write_timestamp(struct lguest *lg)
++void write_timestamp(struct lg_cpu *cpu)
  {
-+	s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
-+
- 	if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
- 		return NULL;
- 
--	if (unlikely(target->req_lim < 1))
-+	if (target->req_lim < min) {
- 		++target->zero_req_lim;
-+		return NULL;
-+	}
+ 	struct timespec now;
+ 	ktime_get_real_ts(&now);
+-	if (copy_to_user(&lg->lguest_data->time, &now, sizeof(struct timespec)))
+-		kill_guest(lg, "Writing timestamp");
++	if (copy_to_user(&cpu->lg->lguest_data->time,
++			 &now, sizeof(struct timespec)))
++		kill_guest(cpu, "Writing timestamp");
+ }
+diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
+index 2b66f79..32e97c1 100644
+--- a/drivers/lguest/interrupts_and_traps.c
++++ b/drivers/lguest/interrupts_and_traps.c
+@@ -41,11 +41,11 @@ static int idt_present(u32 lo, u32 hi)
+ 
+ /* We need a helper to "push" a value onto the Guest's stack, since that's a
+  * big part of what delivering an interrupt does. */
+-static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
++static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
+ {
+ 	/* Stack grows upwards: move stack then write value. */
+ 	*gstack -= 4;
+-	lgwrite(lg, *gstack, u32, val);
++	lgwrite(cpu, *gstack, u32, val);
+ }
+ 
+ /*H:210 The set_guest_interrupt() routine actually delivers the interrupt or
+@@ -60,7 +60,7 @@ static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
+  * We set up the stack just like the CPU does for a real interrupt, so it's
+  * identical for the Guest (and the standard "iret" instruction will undo
+  * it). */
+-static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
++static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
+ {
+ 	unsigned long gstack, origstack;
+ 	u32 eflags, ss, irq_enable;
+@@ -69,59 +69,59 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
+ 	/* There are two cases for interrupts: one where the Guest is already
+ 	 * in the kernel, and a more complex one where the Guest is in
+ 	 * userspace.  We check the privilege level to find out. */
+-	if ((lg->regs->ss&0x3) != GUEST_PL) {
++	if ((cpu->regs->ss&0x3) != GUEST_PL) {
+ 		/* The Guest told us their kernel stack with the SET_STACK
+ 		 * hypercall: both the virtual address and the segment */
+-		virtstack = lg->esp1;
+-		ss = lg->ss1;
++		virtstack = cpu->esp1;
++		ss = cpu->ss1;
+ 
+-		origstack = gstack = guest_pa(lg, virtstack);
++		origstack = gstack = guest_pa(cpu, virtstack);
+ 		/* We push the old stack segment and pointer onto the new
+ 		 * stack: when the Guest does an "iret" back from the interrupt
+ 		 * handler the CPU will notice they're dropping privilege
+ 		 * levels and expect these here. */
+-		push_guest_stack(lg, &gstack, lg->regs->ss);
+-		push_guest_stack(lg, &gstack, lg->regs->esp);
++		push_guest_stack(cpu, &gstack, cpu->regs->ss);
++		push_guest_stack(cpu, &gstack, cpu->regs->esp);
+ 	} else {
+ 		/* We're staying on the same Guest (kernel) stack. */
+-		virtstack = lg->regs->esp;
+-		ss = lg->regs->ss;
++		virtstack = cpu->regs->esp;
++		ss = cpu->regs->ss;
+ 
+-		origstack = gstack = guest_pa(lg, virtstack);
++		origstack = gstack = guest_pa(cpu, virtstack);
+ 	}
+ 
+ 	/* Remember that we never let the Guest actually disable interrupts, so
+ 	 * the "Interrupt Flag" bit is always set.  We copy that bit from the
+ 	 * Guest's "irq_enabled" field into the eflags word: we saw the Guest
+ 	 * copy it back in "lguest_iret". */
+-	eflags = lg->regs->eflags;
+-	if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0
++	eflags = cpu->regs->eflags;
++	if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
+ 	    && !(irq_enable & X86_EFLAGS_IF))
+ 		eflags &= ~X86_EFLAGS_IF;
+ 
+ 	/* An interrupt is expected to push three things on the stack: the old
+ 	 * "eflags" word, the old code segment, and the old instruction
+ 	 * pointer. */
+-	push_guest_stack(lg, &gstack, eflags);
+-	push_guest_stack(lg, &gstack, lg->regs->cs);
+-	push_guest_stack(lg, &gstack, lg->regs->eip);
++	push_guest_stack(cpu, &gstack, eflags);
++	push_guest_stack(cpu, &gstack, cpu->regs->cs);
++	push_guest_stack(cpu, &gstack, cpu->regs->eip);
+ 
+ 	/* For the six traps which supply an error code, we push that, too. */
+ 	if (has_err)
+-		push_guest_stack(lg, &gstack, lg->regs->errcode);
++		push_guest_stack(cpu, &gstack, cpu->regs->errcode);
+ 
+ 	/* Now we've pushed all the old state, we change the stack, the code
+ 	 * segment and the address to execute. */
+-	lg->regs->ss = ss;
+-	lg->regs->esp = virtstack + (gstack - origstack);
+-	lg->regs->cs = (__KERNEL_CS|GUEST_PL);
+-	lg->regs->eip = idt_address(lo, hi);
++	cpu->regs->ss = ss;
++	cpu->regs->esp = virtstack + (gstack - origstack);
++	cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
++	cpu->regs->eip = idt_address(lo, hi);
+ 
+ 	/* There are two kinds of interrupt handlers: 0xE is an "interrupt
+ 	 * gate" which expects interrupts to be disabled on entry. */
+ 	if (idt_type(lo, hi) == 0xE)
+-		if (put_user(0, &lg->lguest_data->irq_enabled))
+-			kill_guest(lg, "Disabling interrupts");
++		if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
++			kill_guest(cpu, "Disabling interrupts");
+ }
+ 
+ /*H:205
+@@ -129,23 +129,23 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
+  *
+  * maybe_do_interrupt() gets called before every entry to the Guest, to see if
+  * we should divert the Guest to running an interrupt handler. */
+-void maybe_do_interrupt(struct lguest *lg)
++void maybe_do_interrupt(struct lg_cpu *cpu)
+ {
+ 	unsigned int irq;
+ 	DECLARE_BITMAP(blk, LGUEST_IRQS);
+ 	struct desc_struct *idt;
  
- 	return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
+ 	/* If the Guest hasn't even initialized yet, we can do nothing. */
+-	if (!lg->lguest_data)
++	if (!cpu->lg->lguest_data)
+ 		return;
+ 
+ 	/* Take our "irqs_pending" array and remove any interrupts the Guest
+ 	 * wants blocked: the result ends up in "blk". */
+-	if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts,
++	if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
+ 			   sizeof(blk)))
+ 		return;
+ 
+-	bitmap_andnot(blk, lg->irqs_pending, blk, LGUEST_IRQS);
++	bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
+ 
+ 	/* Find the first interrupt. */
+ 	irq = find_first_bit(blk, LGUEST_IRQS);
+@@ -155,19 +155,20 @@ void maybe_do_interrupt(struct lguest *lg)
+ 
+ 	/* They may be in the middle of an iret, where they asked us never to
+ 	 * deliver interrupts. */
+-	if (lg->regs->eip >= lg->noirq_start && lg->regs->eip < lg->noirq_end)
++	if (cpu->regs->eip >= cpu->lg->noirq_start &&
++	   (cpu->regs->eip < cpu->lg->noirq_end))
+ 		return;
+ 
+ 	/* If they're halted, interrupts restart them. */
+-	if (lg->halted) {
++	if (cpu->halted) {
+ 		/* Re-enable interrupts. */
+-		if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled))
+-			kill_guest(lg, "Re-enabling interrupts");
+-		lg->halted = 0;
++		if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
++			kill_guest(cpu, "Re-enabling interrupts");
++		cpu->halted = 0;
+ 	} else {
+ 		/* Otherwise we check if they have interrupts disabled. */
+ 		u32 irq_enabled;
+-		if (get_user(irq_enabled, &lg->lguest_data->irq_enabled))
++		if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
+ 			irq_enabled = 0;
+ 		if (!irq_enabled)
+ 			return;
+@@ -176,15 +177,15 @@ void maybe_do_interrupt(struct lguest *lg)
+ 	/* Look at the IDT entry the Guest gave us for this interrupt.  The
+ 	 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
+ 	 * over them. */
+-	idt = &lg->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
++	idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
+ 	/* If they don't have a handler (yet?), we just ignore it */
+ 	if (idt_present(idt->a, idt->b)) {
+ 		/* OK, mark it no longer pending and deliver it. */
+-		clear_bit(irq, lg->irqs_pending);
++		clear_bit(irq, cpu->irqs_pending);
+ 		/* set_guest_interrupt() takes the interrupt descriptor and a
+ 		 * flag to say whether this interrupt pushes an error code onto
+ 		 * the stack as well: virtual interrupts never do. */
+-		set_guest_interrupt(lg, idt->a, idt->b, 0);
++		set_guest_interrupt(cpu, idt->a, idt->b, 0);
+ 	}
+ 
+ 	/* Every time we deliver an interrupt, we update the timestamp in the
+@@ -192,7 +193,7 @@ void maybe_do_interrupt(struct lguest *lg)
+ 	 * did this more often, but it can actually be quite slow: doing it
+ 	 * here is a compromise which means at least it gets updated every
+ 	 * timer interrupt. */
+-	write_timestamp(lg);
++	write_timestamp(cpu);
  }
-@@ -993,7 +1009,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
- 		return 0;
- 	}
- 
--	iu = __srp_get_tx_iu(target);
-+	iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL);
- 	if (!iu)
- 		goto err;
- 
-@@ -1022,12 +1038,13 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
- 
- 	len = srp_map_data(scmnd, target, req);
- 	if (len < 0) {
--		printk(KERN_ERR PFX "Failed to map data\n");
-+		shost_printk(KERN_ERR, target->scsi_host,
-+			     PFX "Failed to map data\n");
- 		goto err;
- 	}
- 
- 	if (__srp_post_recv(target)) {
--		printk(KERN_ERR PFX "Recv failed\n");
-+		shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n");
- 		goto err_unmap;
- 	}
- 
-@@ -1035,7 +1052,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
- 				      DMA_TO_DEVICE);
+ /*:*/
  
- 	if (__srp_post_send(target, iu, len)) {
--		printk(KERN_ERR PFX "Send failed\n");
-+		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
- 		goto err_unmap;
- 	}
+@@ -245,19 +246,19 @@ static int has_err(unsigned int trap)
+ }
  
-@@ -1090,6 +1107,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
- 			       struct ib_cm_event *event,
- 			       struct srp_target_port *target)
+ /* deliver_trap() returns true if it could deliver the trap. */
+-int deliver_trap(struct lguest *lg, unsigned int num)
++int deliver_trap(struct lg_cpu *cpu, unsigned int num)
  {
-+	struct Scsi_Host *shost = target->scsi_host;
- 	struct ib_class_port_info *cpi;
- 	int opcode;
- 
-@@ -1115,19 +1133,22 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
- 			memcpy(target->path.dgid.raw,
- 			       event->param.rej_rcvd.ari, 16);
- 
--			printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
--			       (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
--			       (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
-+			shost_printk(KERN_DEBUG, shost,
-+				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
-+				     (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
-+				     (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
- 
- 			target->status = SRP_PORT_REDIRECT;
- 		} else {
--			printk(KERN_WARNING "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
-+			shost_printk(KERN_WARNING, shost,
-+				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
- 			target->status = -ECONNRESET;
- 		}
- 		break;
- 
- 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
--		printk(KERN_WARNING "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
-+		shost_printk(KERN_WARNING, shost,
-+			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
- 		target->status = -ECONNRESET;
- 		break;
+ 	/* Trap numbers are always 8 bit, but we set an impossible trap number
+ 	 * for traps inside the Switcher, so check that here. */
+-	if (num >= ARRAY_SIZE(lg->arch.idt))
++	if (num >= ARRAY_SIZE(cpu->arch.idt))
+ 		return 0;
  
-@@ -1138,20 +1159,21 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
- 			u32 reason = be32_to_cpu(rej->reason);
+ 	/* Early on the Guest hasn't set the IDT entries (or maybe it put a
+ 	 * bogus one in): if we fail here, the Guest will be killed. */
+-	if (!idt_present(lg->arch.idt[num].a, lg->arch.idt[num].b))
++	if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
+ 		return 0;
+-	set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b,
+-			    has_err(num));
++	set_guest_interrupt(cpu, cpu->arch.idt[num].a,
++			    cpu->arch.idt[num].b, has_err(num));
+ 	return 1;
+ }
  
- 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
--				printk(KERN_WARNING PFX
--				       "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
-+				shost_printk(KERN_WARNING, shost,
-+					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
- 			else
--				printk(KERN_WARNING PFX
--				       "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
-+				shost_printk(KERN_WARNING, shost,
-+					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
- 		} else
--			printk(KERN_WARNING "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
--			       " opcode 0x%02x\n", opcode);
-+			shost_printk(KERN_WARNING, shost,
-+				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
-+				     " opcode 0x%02x\n", opcode);
- 		target->status = -ECONNRESET;
- 		break;
+@@ -309,18 +310,18 @@ static int direct_trap(unsigned int num)
+  * the Guest.
+  *
+  * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */
+-void pin_stack_pages(struct lguest *lg)
++void pin_stack_pages(struct lg_cpu *cpu)
+ {
+ 	unsigned int i;
  
- 	default:
--		printk(KERN_WARNING "  REJ reason 0x%x\n",
--		       event->param.rej_rcvd.reason);
-+		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
-+			     event->param.rej_rcvd.reason);
- 		target->status = -ECONNRESET;
- 	}
+ 	/* Depending on the CONFIG_4KSTACKS option, the Guest can have one or
+ 	 * two pages of stack space. */
+-	for (i = 0; i < lg->stack_pages; i++)
++	for (i = 0; i < cpu->lg->stack_pages; i++)
+ 		/* The stack grows *upwards*, so the address we're given is the
+ 		 * start of the page after the kernel stack.  Subtract one to
+ 		 * get back onto the first stack page, and keep subtracting to
+ 		 * get to the rest of the stack pages. */
+-		pin_page(lg, lg->esp1 - 1 - i * PAGE_SIZE);
++		pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE);
+ }
+ 
+ /* Direct traps also mean that we need to know whenever the Guest wants to use
+@@ -331,21 +332,21 @@ void pin_stack_pages(struct lguest *lg)
+  *
+  * In Linux each process has its own kernel stack, so this happens a lot: we
+  * change stacks on each context switch. */
+-void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
++void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
+ {
+ 	/* You are not allowed have a stack segment with privilege level 0: bad
+ 	 * Guest! */
+ 	if ((seg & 0x3) != GUEST_PL)
+-		kill_guest(lg, "bad stack segment %i", seg);
++		kill_guest(cpu, "bad stack segment %i", seg);
+ 	/* We only expect one or two stack pages. */
+ 	if (pages > 2)
+-		kill_guest(lg, "bad stack pages %u", pages);
++		kill_guest(cpu, "bad stack pages %u", pages);
+ 	/* Save where the stack is, and how many pages */
+-	lg->ss1 = seg;
+-	lg->esp1 = esp;
+-	lg->stack_pages = pages;
++	cpu->ss1 = seg;
++	cpu->esp1 = esp;
++	cpu->lg->stack_pages = pages;
+ 	/* Make sure the new stack pages are mapped */
+-	pin_stack_pages(lg);
++	pin_stack_pages(cpu);
+ }
+ 
+ /* All this reference to mapping stacks leads us neatly into the other complex
+@@ -353,7 +354,7 @@ void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
+ 
+ /*H:235 This is the routine which actually checks the Guest's IDT entry and
+  * transfers it into the entry in "struct lguest": */
+-static void set_trap(struct lguest *lg, struct desc_struct *trap,
++static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
+ 		     unsigned int num, u32 lo, u32 hi)
+ {
+ 	u8 type = idt_type(lo, hi);
+@@ -366,7 +367,7 @@ static void set_trap(struct lguest *lg, struct desc_struct *trap,
+ 
+ 	/* We only support interrupt and trap gates. */
+ 	if (type != 0xE && type != 0xF)
+-		kill_guest(lg, "bad IDT type %i", type);
++		kill_guest(cpu, "bad IDT type %i", type);
+ 
+ 	/* We only copy the handler address, present bit, privilege level and
+ 	 * type.  The privilege level controls where the trap can be triggered
+@@ -383,7 +384,7 @@ static void set_trap(struct lguest *lg, struct desc_struct *trap,
+  *
+  * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
+  * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */
+-void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
++void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
+ {
+ 	/* Guest never handles: NMI, doublefault, spurious interrupt or
+ 	 * hypercall.  We ignore when it tries to set them. */
+@@ -392,13 +393,13 @@ void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
+ 
+ 	/* Mark the IDT as changed: next time the Guest runs we'll know we have
+ 	 * to copy this again. */
+-	lg->changed |= CHANGED_IDT;
++	cpu->changed |= CHANGED_IDT;
+ 
+ 	/* Check that the Guest doesn't try to step outside the bounds. */
+-	if (num >= ARRAY_SIZE(lg->arch.idt))
+-		kill_guest(lg, "Setting idt entry %u", num);
++	if (num >= ARRAY_SIZE(cpu->arch.idt))
++		kill_guest(cpu, "Setting idt entry %u", num);
+ 	else
+-		set_trap(lg, &lg->arch.idt[num], num, lo, hi);
++		set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
  }
-@@ -1166,7 +1188,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
- 
- 	switch (event->event) {
- 	case IB_CM_REQ_ERROR:
--		printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
-+		shost_printk(KERN_DEBUG, target->scsi_host,
-+			     PFX "Sending CM REQ failed\n");
- 		comp = 1;
- 		target->status = -ECONNRESET;
- 		break;
-@@ -1184,7 +1207,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
- 			target->scsi_host->can_queue = min(target->req_lim,
- 							   target->scsi_host->can_queue);
- 		} else {
--			printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
-+			shost_printk(KERN_WARNING, target->scsi_host,
-+				    PFX "Unhandled RSP opcode %#x\n", opcode);
- 			target->status = -ECONNRESET;
- 			break;
- 		}
-@@ -1230,20 +1254,23 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
- 		break;
  
- 	case IB_CM_REJ_RECEIVED:
--		printk(KERN_DEBUG PFX "REJ received\n");
-+		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
- 		comp = 1;
+ /* The default entry for each interrupt points into the Switcher routines which
+@@ -434,14 +435,14 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
+ /*H:240 We don't use the IDT entries in the "struct lguest" directly, instead
+  * we copy them into the IDT which we've set up for Guests on this CPU, just
+  * before we run the Guest.  This routine does that copy. */
+-void copy_traps(const struct lguest *lg, struct desc_struct *idt,
++void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
+ 		const unsigned long *def)
+ {
+ 	unsigned int i;
  
- 		srp_cm_rej_handler(cm_id, event, target);
- 		break;
+ 	/* We can simply copy the direct traps, otherwise we use the default
+ 	 * ones in the Switcher: they will return to the Host. */
+-	for (i = 0; i < ARRAY_SIZE(lg->arch.idt); i++) {
++	for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) {
+ 		/* If no Guest can ever override this trap, leave it alone. */
+ 		if (!direct_trap(i))
+ 			continue;
+@@ -450,8 +451,8 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt,
+ 		 * Interrupt gates (type 14) disable interrupts as they are
+ 		 * entered, which we never let the Guest do.  Not present
+ 		 * entries (type 0x0) also can't go direct, of course. */
+-		if (idt_type(lg->arch.idt[i].a, lg->arch.idt[i].b) == 0xF)
+-			idt[i] = lg->arch.idt[i];
++		if (idt_type(cpu->arch.idt[i].a, cpu->arch.idt[i].b) == 0xF)
++			idt[i] = cpu->arch.idt[i];
+ 		else
+ 			/* Reset it to the default. */
+ 			default_idt_entry(&idt[i], i, def[i]);
+@@ -470,13 +471,13 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt,
+  * infrastructure to set a callback at that time.
+  *
+  * 0 means "turn off the clock". */
+-void guest_set_clockevent(struct lguest *lg, unsigned long delta)
++void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta)
+ {
+ 	ktime_t expires;
+ 
+ 	if (unlikely(delta == 0)) {
+ 		/* Clock event device is shutting down. */
+-		hrtimer_cancel(&lg->hrt);
++		hrtimer_cancel(&cpu->hrt);
+ 		return;
+ 	}
  
- 	case IB_CM_DREQ_RECEIVED:
--		printk(KERN_WARNING PFX "DREQ received - connection closed\n");
-+		shost_printk(KERN_WARNING, target->scsi_host,
-+			     PFX "DREQ received - connection closed\n");
- 		if (ib_send_cm_drep(cm_id, NULL, 0))
--			printk(KERN_ERR PFX "Sending CM DREP failed\n");
-+			shost_printk(KERN_ERR, target->scsi_host,
-+				     PFX "Sending CM DREP failed\n");
- 		break;
+@@ -484,25 +485,25 @@ void guest_set_clockevent(struct lguest *lg, unsigned long delta)
+ 	 * all the time between now and the timer interrupt it asked for.  This
+ 	 * is almost always the right thing to do. */
+ 	expires = ktime_add_ns(ktime_get_real(), delta);
+-	hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS);
++	hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS);
+ }
+ 
+ /* This is the function called when the Guest's timer expires. */
+ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
+ {
+-	struct lguest *lg = container_of(timer, struct lguest, hrt);
++	struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
+ 
+ 	/* Remember the first interrupt is the timer interrupt. */
+-	set_bit(0, lg->irqs_pending);
++	set_bit(0, cpu->irqs_pending);
+ 	/* If the Guest is actually stopped, we need to wake it up. */
+-	if (lg->halted)
+-		wake_up_process(lg->tsk);
++	if (cpu->halted)
++		wake_up_process(cpu->tsk);
+ 	return HRTIMER_NORESTART;
+ }
+ 
+ /* This sets up the timer for this Guest. */
+-void init_clockdev(struct lguest *lg)
++void init_clockdev(struct lg_cpu *cpu)
+ {
+-	hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+-	lg->hrt.function = clockdev_fn;
++	hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
++	cpu->hrt.function = clockdev_fn;
+ }
+diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
+index 8692489..2337e1a 100644
+--- a/drivers/lguest/lg.h
++++ b/drivers/lguest/lg.h
+@@ -8,6 +8,7 @@
+ #include <linux/lguest.h>
+ #include <linux/lguest_launcher.h>
+ #include <linux/wait.h>
++#include <linux/hrtimer.h>
+ #include <linux/err.h>
+ #include <asm/semaphore.h>
  
- 	case IB_CM_TIMEWAIT_EXIT:
--		printk(KERN_ERR PFX "connection closed\n");
-+		shost_printk(KERN_ERR, target->scsi_host,
-+			     PFX "connection closed\n");
+@@ -38,58 +39,72 @@ struct lguest_pages
+ #define CHANGED_GDT_TLS		4 /* Actually a subset of CHANGED_GDT */
+ #define CHANGED_ALL	        3
+ 
+-/* The private info the thread maintains about the guest. */
+-struct lguest
+-{
+-	/* At end of a page shared mapped over lguest_pages in guest.  */
+-	unsigned long regs_page;
+-	struct lguest_regs *regs;
+-	struct lguest_data __user *lguest_data;
++struct lguest;
++
++struct lg_cpu {
++	unsigned int id;
++	struct lguest *lg;
+ 	struct task_struct *tsk;
+ 	struct mm_struct *mm; 	/* == tsk->mm, but that becomes NULL on exit */
+-	u32 pfn_limit;
+-	/* This provides the offset to the base of guest-physical
+-	 * memory in the Launcher. */
+-	void __user *mem_base;
+-	unsigned long kernel_address;
++
+ 	u32 cr2;
+-	int halted;
+ 	int ts;
+-	u32 next_hcall;
+ 	u32 esp1;
+ 	u8 ss1;
  
- 		comp = 1;
- 		target->status = 0;
-@@ -1255,7 +1282,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
- 		break;
++	/* Bitmap of what has changed: see CHANGED_* above. */
++	int changed;
++
++	unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */
++
++	/* At end of a page shared mapped over lguest_pages in guest.  */
++	unsigned long regs_page;
++	struct lguest_regs *regs;
++
++	struct lguest_pages *last_pages;
++
++	int cpu_pgd; /* which pgd this cpu is currently using */
++
+ 	/* If a hypercall was asked for, this points to the arguments. */
+ 	struct hcall_args *hcall;
++	u32 next_hcall;
++
++	/* Virtual clock device */
++	struct hrtimer hrt;
  
- 	default:
--		printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
-+		shost_printk(KERN_WARNING, target->scsi_host,
-+			     PFX "Unhandled CM event %d\n", event->event);
- 		break;
- 	}
+ 	/* Do we need to stop what we're doing and return to userspace? */
+ 	int break_out;
+ 	wait_queue_head_t break_wq;
++	int halted;
  
-@@ -1283,7 +1311,7 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
+-	/* Bitmap of what has changed: see CHANGED_* above. */
+-	int changed;
+-	struct lguest_pages *last_pages;
++	/* Pending virtual interrupts */
++	DECLARE_BITMAP(irqs_pending, LGUEST_IRQS);
++
++	struct lg_cpu_arch arch;
++};
++
++/* The private info the thread maintains about the guest. */
++struct lguest
++{
++	struct lguest_data __user *lguest_data;
++	struct lg_cpu cpus[NR_CPUS];
++	unsigned int nr_cpus;
++
++	u32 pfn_limit;
++	/* This provides the offset to the base of guest-physical
++	 * memory in the Launcher. */
++	void __user *mem_base;
++	unsigned long kernel_address;
  
- 	init_completion(&req->done);
+-	/* We keep a small number of these. */
+-	u32 pgdidx;
+ 	struct pgdir pgdirs[4];
  
--	iu = __srp_get_tx_iu(target);
-+	iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT);
- 	if (!iu)
- 		goto out;
+ 	unsigned long noirq_start, noirq_end;
+-	unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */
  
-@@ -1332,7 +1360,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
- 	struct srp_request *req;
- 	int ret = SUCCESS;
+ 	unsigned int stack_pages;
+ 	u32 tsc_khz;
  
--	printk(KERN_ERR "SRP abort called\n");
-+	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+ 	/* Dead? */
+ 	const char *dead;
+-
+-	struct lguest_arch arch;
+-
+-	/* Virtual clock device */
+-	struct hrtimer hrt;
+-
+-	/* Pending virtual interrupts */
+-	DECLARE_BITMAP(irqs_pending, LGUEST_IRQS);
+ };
  
- 	if (target->qp_in_error)
- 		return FAILED;
-@@ -1362,7 +1390,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
- 	struct srp_target_port *target = host_to_target(scmnd->device->host);
- 	struct srp_request *req, *tmp;
+ extern struct mutex lguest_lock;
+@@ -97,26 +112,26 @@ extern struct mutex lguest_lock;
+ /* core.c: */
+ int lguest_address_ok(const struct lguest *lg,
+ 		      unsigned long addr, unsigned long len);
+-void __lgread(struct lguest *, void *, unsigned long, unsigned);
+-void __lgwrite(struct lguest *, unsigned long, const void *, unsigned);
++void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
++void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
  
--	printk(KERN_ERR "SRP reset_device called\n");
-+	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
+ /*H:035 Using memory-copy operations like that is usually inconvient, so we
+  * have the following helper macros which read and write a specific type (often
+  * an unsigned long).
+  *
+  * This reads into a variable of the given type then returns that. */
+-#define lgread(lg, addr, type)						\
+-	({ type _v; __lgread((lg), &_v, (addr), sizeof(_v)); _v; })
++#define lgread(cpu, addr, type)						\
++	({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; })
  
- 	if (target->qp_in_error)
- 		return FAILED;
-@@ -1389,7 +1417,7 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
- 	struct srp_target_port *target = host_to_target(scmnd->device->host);
- 	int ret = FAILED;
+ /* This checks that the variable is of the given type, then writes it out. */
+-#define lgwrite(lg, addr, type, val)				\
++#define lgwrite(cpu, addr, type, val)				\
+ 	do {							\
+ 		typecheck(type, val);				\
+-		__lgwrite((lg), (addr), &(val), sizeof(val));	\
++		__lgwrite((cpu), (addr), &(val), sizeof(val));	\
+ 	} while(0)
+ /* (end of memory access helper routines) :*/
  
--	printk(KERN_ERR PFX "SRP reset_host called\n");
-+	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
+-int run_guest(struct lguest *lg, unsigned long __user *user);
++int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
  
- 	if (!srp_reconnect_target(target))
- 		ret = SUCCESS;
-@@ -1543,6 +1571,7 @@ static struct scsi_host_template srp_template = {
- 	.this_id			= -1,
- 	.cmd_per_lun			= SRP_SQ_SIZE,
- 	.use_clustering			= ENABLE_CLUSTERING,
-+	.use_sg_chaining		= ENABLE_SG_CHAINING,
- 	.shost_attrs			= srp_host_attrs
- };
+ /* Helper macros to obtain the first 12 or the last 20 bits, this is only the
+  * first step in the migration to the kernel types.  pte_pfn is already defined
+@@ -126,52 +141,53 @@ int run_guest(struct lguest *lg, unsigned long __user *user);
+ #define pgd_pfn(x)	(pgd_val(x) >> PAGE_SHIFT)
+ 
+ /* interrupts_and_traps.c: */
+-void maybe_do_interrupt(struct lguest *lg);
+-int deliver_trap(struct lguest *lg, unsigned int num);
+-void load_guest_idt_entry(struct lguest *lg, unsigned int i, u32 low, u32 hi);
+-void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages);
+-void pin_stack_pages(struct lguest *lg);
++void maybe_do_interrupt(struct lg_cpu *cpu);
++int deliver_trap(struct lg_cpu *cpu, unsigned int num);
++void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
++			  u32 low, u32 hi);
++void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages);
++void pin_stack_pages(struct lg_cpu *cpu);
+ void setup_default_idt_entries(struct lguest_ro_state *state,
+ 			       const unsigned long *def);
+-void copy_traps(const struct lguest *lg, struct desc_struct *idt,
++void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
+ 		const unsigned long *def);
+-void guest_set_clockevent(struct lguest *lg, unsigned long delta);
+-void init_clockdev(struct lguest *lg);
++void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
++void init_clockdev(struct lg_cpu *cpu);
+ bool check_syscall_vector(struct lguest *lg);
+ int init_interrupts(void);
+ void free_interrupts(void);
+ 
+ /* segments.c: */
+ void setup_default_gdt_entries(struct lguest_ro_state *state);
+-void setup_guest_gdt(struct lguest *lg);
+-void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num);
+-void guest_load_tls(struct lguest *lg, unsigned long tls_array);
+-void copy_gdt(const struct lguest *lg, struct desc_struct *gdt);
+-void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt);
++void setup_guest_gdt(struct lg_cpu *cpu);
++void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num);
++void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array);
++void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt);
++void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
+ 
+ /* page_tables.c: */
+ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable);
+ void free_guest_pagetable(struct lguest *lg);
+-void guest_new_pagetable(struct lguest *lg, unsigned long pgtable);
++void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
+ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
+-void guest_pagetable_clear_all(struct lguest *lg);
+-void guest_pagetable_flush_user(struct lguest *lg);
+-void guest_set_pte(struct lguest *lg, unsigned long gpgdir,
++void guest_pagetable_clear_all(struct lg_cpu *cpu);
++void guest_pagetable_flush_user(struct lg_cpu *cpu);
++void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
+ 		   unsigned long vaddr, pte_t val);
+-void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages);
+-int demand_page(struct lguest *info, unsigned long cr2, int errcode);
+-void pin_page(struct lguest *lg, unsigned long vaddr);
+-unsigned long guest_pa(struct lguest *lg, unsigned long vaddr);
+-void page_table_guest_data_init(struct lguest *lg);
++void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
++int demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode);
++void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
++unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
++void page_table_guest_data_init(struct lg_cpu *cpu);
+ 
+ /* <arch>/core.c: */
+ void lguest_arch_host_init(void);
+ void lguest_arch_host_fini(void);
+-void lguest_arch_run_guest(struct lguest *lg);
+-void lguest_arch_handle_trap(struct lguest *lg);
+-int lguest_arch_init_hypercalls(struct lguest *lg);
+-int lguest_arch_do_hcall(struct lguest *lg, struct hcall_args *args);
+-void lguest_arch_setup_regs(struct lguest *lg, unsigned long start);
++void lguest_arch_run_guest(struct lg_cpu *cpu);
++void lguest_arch_handle_trap(struct lg_cpu *cpu);
++int lguest_arch_init_hypercalls(struct lg_cpu *cpu);
++int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args);
++void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start);
+ 
+ /* <arch>/switcher.S: */
+ extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
+@@ -181,8 +197,8 @@ int lguest_device_init(void);
+ void lguest_device_remove(void);
+ 
+ /* hypercalls.c: */
+-void do_hypercalls(struct lguest *lg);
+-void write_timestamp(struct lguest *lg);
++void do_hypercalls(struct lg_cpu *cpu);
++void write_timestamp(struct lg_cpu *cpu);
+ 
+ /*L:035
+  * Let's step aside for the moment, to study one important routine that's used
+@@ -208,12 +224,12 @@ void write_timestamp(struct lguest *lg);
+  * Like any macro which uses an "if", it is safely wrapped in a run-once "do {
+  * } while(0)".
+  */
+-#define kill_guest(lg, fmt...)					\
++#define kill_guest(cpu, fmt...)					\
+ do {								\
+-	if (!(lg)->dead) {					\
+-		(lg)->dead = kasprintf(GFP_ATOMIC, fmt);	\
+-		if (!(lg)->dead)				\
+-			(lg)->dead = ERR_PTR(-ENOMEM);		\
++	if (!(cpu)->lg->dead) {					\
++		(cpu)->lg->dead = kasprintf(GFP_ATOMIC, fmt);	\
++		if (!(cpu)->lg->dead)				\
++			(cpu)->lg->dead = ERR_PTR(-ENOMEM);	\
+ 	}							\
+ } while(0)
+ /* (End of aside) :*/
+diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
+index 3b92a61..85d42d3 100644
+--- a/drivers/lguest/lguest_user.c
++++ b/drivers/lguest/lguest_user.c
+@@ -6,6 +6,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/miscdevice.h>
+ #include <linux/fs.h>
++#include <linux/sched.h>
+ #include "lg.h"
  
-@@ -1814,8 +1843,9 @@ static ssize_t srp_create_target(struct class_device *class_dev,
+ /*L:055 When something happens, the Waker process needs a way to stop the
+@@ -13,7 +14,7 @@
+  * LHREQ_BREAK and the value "1" to /dev/lguest to do this.  Once the Launcher
+  * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release
+  * the Waker. */
+-static int break_guest_out(struct lguest *lg, const unsigned long __user *input)
++static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input)
+ {
+ 	unsigned long on;
  
- 	ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
+@@ -22,21 +23,21 @@ static int break_guest_out(struct lguest *lg, const unsigned long __user *input)
+ 		return -EFAULT;
  
--	printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
--	       "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
-+	shost_printk(KERN_DEBUG, target->scsi_host, PFX
-+		     "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
-+		     "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
- 	       (unsigned long long) be64_to_cpu(target->id_ext),
- 	       (unsigned long long) be64_to_cpu(target->ioc_guid),
- 	       be16_to_cpu(target->path.pkey),
-@@ -1842,7 +1872,8 @@ static ssize_t srp_create_target(struct class_device *class_dev,
- 	target->qp_in_error = 0;
- 	ret = srp_connect_target(target);
- 	if (ret) {
--		printk(KERN_ERR PFX "Connection failed\n");
-+		shost_printk(KERN_ERR, target->scsi_host,
-+			     PFX "Connection failed\n");
- 		goto err_cm_id;
+ 	if (on) {
+-		lg->break_out = 1;
++		cpu->break_out = 1;
+ 		/* Pop it out of the Guest (may be running on different CPU) */
+-		wake_up_process(lg->tsk);
++		wake_up_process(cpu->tsk);
+ 		/* Wait for them to reset it */
+-		return wait_event_interruptible(lg->break_wq, !lg->break_out);
++		return wait_event_interruptible(cpu->break_wq, !cpu->break_out);
+ 	} else {
+-		lg->break_out = 0;
+-		wake_up(&lg->break_wq);
++		cpu->break_out = 0;
++		wake_up(&cpu->break_wq);
+ 		return 0;
  	}
+ }
  
-diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
-index e3573e7..4a3c1f3 100644
---- a/drivers/infiniband/ulp/srp/ib_srp.h
-+++ b/drivers/infiniband/ulp/srp/ib_srp.h
-@@ -79,6 +79,11 @@ enum srp_target_state {
- 	SRP_TARGET_REMOVED
- };
+ /*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
+  * number to /dev/lguest. */
+-static int user_send_irq(struct lguest *lg, const unsigned long __user *input)
++static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
+ {
+ 	unsigned long irq;
  
-+enum srp_request_type {
-+	SRP_REQ_NORMAL,
-+	SRP_REQ_TASK_MGMT,
-+};
+@@ -46,7 +47,7 @@ static int user_send_irq(struct lguest *lg, const unsigned long __user *input)
+ 		return -EINVAL;
+ 	/* Next time the Guest runs, the core code will see if it can deliver
+ 	 * this interrupt. */
+-	set_bit(irq, lg->irqs_pending);
++	set_bit(irq, cpu->irqs_pending);
+ 	return 0;
+ }
+ 
+@@ -55,13 +56,21 @@ static int user_send_irq(struct lguest *lg, const unsigned long __user *input)
+ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
+ {
+ 	struct lguest *lg = file->private_data;
++	struct lg_cpu *cpu;
++	unsigned int cpu_id = *o;
+ 
+ 	/* You must write LHREQ_INITIALIZE first! */
+ 	if (!lg)
+ 		return -EINVAL;
+ 
++	/* Watch out for arbitrary vcpu indexes! */
++	if (cpu_id >= lg->nr_cpus)
++		return -EINVAL;
 +
- struct srp_device {
- 	struct list_head	dev_list;
- 	struct ib_device       *dev;
-diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
-index 8991ab0..61cff83 100644
---- a/drivers/input/mouse/pc110pad.c
-+++ b/drivers/input/mouse/pc110pad.c
-@@ -39,6 +39,7 @@
- #include <linux/init.h>
- #include <linux/interrupt.h>
- #include <linux/pci.h>
-+#include <linux/delay.h>
++	cpu = &lg->cpus[cpu_id];
++
+ 	/* If you're not the task which owns the Guest, go away. */
+-	if (current != lg->tsk)
++	if (current != cpu->tsk)
+ 		return -EPERM;
  
- #include <asm/io.h>
- #include <asm/irq.h>
-@@ -62,8 +63,10 @@ static irqreturn_t pc110pad_interrupt(int irq, void *ptr)
- 	int value     = inb_p(pc110pad_io);
- 	int handshake = inb_p(pc110pad_io + 2);
+ 	/* If the guest is already dead, we indicate why */
+@@ -81,11 +90,53 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
  
--	outb_p(handshake |  1, pc110pad_io + 2);
--	outb_p(handshake & ~1, pc110pad_io + 2);
-+	outb(handshake |  1, pc110pad_io + 2);
-+	udelay(2);
-+	outb(handshake & ~1, pc110pad_io + 2);
-+	udelay(2);
- 	inb_p(0x64);
+ 	/* If we returned from read() last time because the Guest notified,
+ 	 * clear the flag. */
+-	if (lg->pending_notify)
+-		lg->pending_notify = 0;
++	if (cpu->pending_notify)
++		cpu->pending_notify = 0;
  
- 	pc110pad_data[pc110pad_count++] = value;
-diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
-index b1b2e07..99d92f5 100644
---- a/drivers/input/touchscreen/corgi_ts.c
-+++ b/drivers/input/touchscreen/corgi_ts.c
-@@ -74,10 +74,10 @@ extern unsigned int get_clk_frequency_khz(int info);
+ 	/* Run the Guest until something interesting happens. */
+-	return run_guest(lg, (unsigned long __user *)user);
++	return run_guest(cpu, (unsigned long __user *)user);
++}
++
++static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
++{
++	if (id >= NR_CPUS)
++		return -EINVAL;
++
++	cpu->id = id;
++	cpu->lg = container_of((cpu - id), struct lguest, cpus[0]);
++	cpu->lg->nr_cpus++;
++	init_clockdev(cpu);
++
++	/* We need a complete page for the Guest registers: they are accessible
++	 * to the Guest and we can only grant it access to whole pages. */
++	cpu->regs_page = get_zeroed_page(GFP_KERNEL);
++	if (!cpu->regs_page)
++		return -ENOMEM;
++
++	/* We actually put the registers at the bottom of the page. */
++	cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
++
++	/* Now we initialize the Guest's registers, handing it the start
++	 * address. */
++	lguest_arch_setup_regs(cpu, start_ip);
++
++	/* Initialize the queue for the waker to wait on */
++	init_waitqueue_head(&cpu->break_wq);
++
++	/* We keep a pointer to the Launcher task (ie. current task) for when
++	 * other Guests want to wake this one (inter-Guest I/O). */
++	cpu->tsk = current;
++
++	/* We need to keep a pointer to the Launcher's memory map, because if
++	 * the Launcher dies we need to clean it up.  If we don't keep a
++	 * reference, it is destroyed before close() is called. */
++	cpu->mm = get_task_mm(cpu->tsk);
++
++	/* We remember which CPU's pages this Guest used last, for optimization
++	 * when the same Guest runs on the same CPU twice. */
++	cpu->last_pages = NULL;
++
++	return 0;
+ }
  
- static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
+ /*L:020 The initialization write supplies 4 pointer sized (32 or 64 bit)
+@@ -134,15 +185,10 @@ static int initialize(struct file *file, const unsigned long __user *input)
+ 	lg->mem_base = (void __user *)(long)args[0];
+ 	lg->pfn_limit = args[1];
+ 
+-	/* We need a complete page for the Guest registers: they are accessible
+-	 * to the Guest and we can only grant it access to whole pages. */
+-	lg->regs_page = get_zeroed_page(GFP_KERNEL);
+-	if (!lg->regs_page) {
+-		err = -ENOMEM;
++	/* This is the first cpu */
++	err = lg_cpu_start(&lg->cpus[0], 0, args[3]);
++	if (err)
+ 		goto release_guest;
+-	}
+-	/* We actually put the registers at the bottom of the page. */
+-	lg->regs = (void *)lg->regs_page + PAGE_SIZE - sizeof(*lg->regs);
+ 
+ 	/* Initialize the Guest's shadow page tables, using the toplevel
+ 	 * address the Launcher gave us.  This allocates memory, so can
+@@ -151,28 +197,6 @@ static int initialize(struct file *file, const unsigned long __user *input)
+ 	if (err)
+ 		goto free_regs;
+ 
+-	/* Now we initialize the Guest's registers, handing it the start
+-	 * address. */
+-	lguest_arch_setup_regs(lg, args[3]);
+-
+-	/* The timer for lguest's clock needs initialization. */
+-	init_clockdev(lg);
+-
+-	/* We keep a pointer to the Launcher task (ie. current task) for when
+-	 * other Guests want to wake this one (inter-Guest I/O). */
+-	lg->tsk = current;
+-	/* We need to keep a pointer to the Launcher's memory map, because if
+-	 * the Launcher dies we need to clean it up.  If we don't keep a
+-	 * reference, it is destroyed before close() is called. */
+-	lg->mm = get_task_mm(lg->tsk);
+-
+-	/* Initialize the queue for the waker to wait on */
+-	init_waitqueue_head(&lg->break_wq);
+-
+-	/* We remember which CPU's pages this Guest used last, for optimization
+-	 * when the same Guest runs on the same CPU twice. */
+-	lg->last_pages = NULL;
+-
+ 	/* We keep our "struct lguest" in the file's private_data. */
+ 	file->private_data = lg;
+ 
+@@ -182,7 +206,8 @@ static int initialize(struct file *file, const unsigned long __user *input)
+ 	return sizeof(args);
+ 
+ free_regs:
+-	free_page(lg->regs_page);
++	/* FIXME: This should be in free_vcpu */
++	free_page(lg->cpus[0].regs_page);
+ release_guest:
+ 	kfree(lg);
+ unlock:
+@@ -202,30 +227,37 @@ static ssize_t write(struct file *file, const char __user *in,
+ 	struct lguest *lg = file->private_data;
+ 	const unsigned long __user *input = (const unsigned long __user *)in;
+ 	unsigned long req;
++	struct lg_cpu *uninitialized_var(cpu);
++	unsigned int cpu_id = *off;
+ 
+ 	if (get_user(req, input) != 0)
+ 		return -EFAULT;
+ 	input++;
+ 
+ 	/* If you haven't initialized, you must do that first. */
+-	if (req != LHREQ_INITIALIZE && !lg)
+-		return -EINVAL;
++	if (req != LHREQ_INITIALIZE) {
++		if (!lg || (cpu_id >= lg->nr_cpus))
++			return -EINVAL;
++		cpu = &lg->cpus[cpu_id];
++		if (!cpu)
++			return -EINVAL;
++	}
+ 
+ 	/* Once the Guest is dead, all you can do is read() why it died. */
+ 	if (lg && lg->dead)
+ 		return -ENOENT;
+ 
+ 	/* If you're not the task which owns the Guest, you can only break */
+-	if (lg && current != lg->tsk && req != LHREQ_BREAK)
++	if (lg && current != cpu->tsk && req != LHREQ_BREAK)
+ 		return -EPERM;
+ 
+ 	switch (req) {
+ 	case LHREQ_INITIALIZE:
+ 		return initialize(file, input);
+ 	case LHREQ_IRQ:
+-		return user_send_irq(lg, input);
++		return user_send_irq(cpu, input);
+ 	case LHREQ_BREAK:
+-		return break_guest_out(lg, input);
++		return break_guest_out(cpu, input);
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -241,6 +273,7 @@ static ssize_t write(struct file *file, const char __user *in,
+ static int close(struct inode *inode, struct file *file)
  {
--	unsigned long hsync_len = corgi_ts->machinfo->get_hsync_len();
-+	unsigned long hsync_invperiod = corgi_ts->machinfo->get_hsync_invperiod();
+ 	struct lguest *lg = file->private_data;
++	unsigned int i;
  
--	if (hsync_len)
--		return get_clk_frequency_khz(0)*1000/hsync_len;
-+	if (hsync_invperiod)
-+		return get_clk_frequency_khz(0)*1000/hsync_invperiod;
- 	else
- 		return 0;
+ 	/* If we never successfully initialized, there's nothing to clean up */
+ 	if (!lg)
+@@ -249,19 +282,23 @@ static int close(struct inode *inode, struct file *file)
+ 	/* We need the big lock, to protect from inter-guest I/O and other
+ 	 * Launchers initializing guests. */
+ 	mutex_lock(&lguest_lock);
+-	/* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
+-	hrtimer_cancel(&lg->hrt);
++
+ 	/* Free up the shadow page tables for the Guest. */
+ 	free_guest_pagetable(lg);
+-	/* Now all the memory cleanups are done, it's safe to release the
+-	 * Launcher's memory management structure. */
+-	mmput(lg->mm);
++
++	for (i = 0; i < lg->nr_cpus; i++) {
++		/* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
++		hrtimer_cancel(&lg->cpus[i].hrt);
++		/* We can free up the register page we allocated. */
++		free_page(lg->cpus[i].regs_page);
++		/* Now all the memory cleanups are done, it's safe to release
++		 * the Launcher's memory management structure. */
++		mmput(lg->cpus[i].mm);
++	}
+ 	/* If lg->dead doesn't contain an error code it will be NULL or a
+ 	 * kmalloc()ed string, either of which is ok to hand to kfree(). */
+ 	if (!IS_ERR(lg->dead))
+ 		kfree(lg->dead);
+-	/* We can free up the register page we allocated. */
+-	free_page(lg->regs_page);
+ 	/* We clear the entire structure, which also marks it as free for the
+ 	 * next user. */
+ 	memset(lg, 0, sizeof(*lg));
+diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
+index fffabb3..74b4cf2 100644
+--- a/drivers/lguest/page_tables.c
++++ b/drivers/lguest/page_tables.c
+@@ -68,23 +68,23 @@ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
+  * page directory entry (PGD) for that address.  Since we keep track of several
+  * page tables, the "i" argument tells us which one we're interested in (it's
+  * usually the current one). */
+-static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
++static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
+ {
+ 	unsigned int index = pgd_index(vaddr);
+ 
+ 	/* We kill any Guest trying to touch the Switcher addresses. */
+ 	if (index >= SWITCHER_PGD_INDEX) {
+-		kill_guest(lg, "attempt to access switcher pages");
++		kill_guest(cpu, "attempt to access switcher pages");
+ 		index = 0;
+ 	}
+ 	/* Return a pointer index'th pgd entry for the i'th page table. */
+-	return &lg->pgdirs[i].pgdir[index];
++	return &cpu->lg->pgdirs[i].pgdir[index];
+ }
+ 
+ /* This routine then takes the page directory entry returned above, which
+  * contains the address of the page table entry (PTE) page.  It then returns a
+  * pointer to the PTE entry for the given address. */
+-static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr)
++static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
+ {
+ 	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
+ 	/* You should never call this if the PGD entry wasn't valid */
+@@ -94,14 +94,13 @@ static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr)
+ 
+ /* These two functions just like the above two, except they access the Guest
+  * page tables.  Hence they return a Guest address. */
+-static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr)
++static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
+ {
+ 	unsigned int index = vaddr >> (PGDIR_SHIFT);
+-	return lg->pgdirs[lg->pgdidx].gpgdir + index * sizeof(pgd_t);
++	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
+ }
+ 
+-static unsigned long gpte_addr(struct lguest *lg,
+-			       pgd_t gpgd, unsigned long vaddr)
++static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
+ {
+ 	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
+ 	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
+@@ -138,7 +137,7 @@ static unsigned long get_pfn(unsigned long virtpfn, int write)
+  * entry can be a little tricky.  The flags are (almost) the same, but the
+  * Guest PTE contains a virtual page number: the CPU needs the real page
+  * number. */
+-static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write)
++static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
+ {
+ 	unsigned long pfn, base, flags;
+ 
+@@ -149,7 +148,7 @@ static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write)
+ 	flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
+ 
+ 	/* The Guest's pages are offset inside the Launcher. */
+-	base = (unsigned long)lg->mem_base / PAGE_SIZE;
++	base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
+ 
+ 	/* We need a temporary "unsigned long" variable to hold the answer from
+ 	 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
+@@ -157,7 +156,7 @@ static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write)
+ 	 * page, given the virtual number. */
+ 	pfn = get_pfn(base + pte_pfn(gpte), write);
+ 	if (pfn == -1UL) {
+-		kill_guest(lg, "failed to get page %lu", pte_pfn(gpte));
++		kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
+ 		/* When we destroy the Guest, we'll go through the shadow page
+ 		 * tables and release_pte() them.  Make sure we don't think
+ 		 * this one is valid! */
+@@ -177,17 +176,18 @@ static void release_pte(pte_t pte)
  }
-@@ -114,7 +114,7 @@ static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, i
- 			if (timer2-timer1 > wait_time) {
- 				/* too slow - timeout, try again */
- 				corgi_ts->machinfo->wait_hsync();
--				/* get OSCR */
-+				/* get CCNT */
- 				CCNT(timer1);
- 				/* Wait after HSync */
- 				CCNT(timer2);
-diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
-index f449dae..23ae66c 100644
---- a/drivers/isdn/capi/capi.c
-+++ b/drivers/isdn/capi/capi.c
-@@ -1544,11 +1544,11 @@ static int __init capi_init(void)
- 		return PTR_ERR(capi_class);
- 	}
+ /*:*/
  
--	class_device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi");
-+	device_create(capi_class, NULL, MKDEV(capi_major, 0), "capi");
+-static void check_gpte(struct lguest *lg, pte_t gpte)
++static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
+ {
+ 	if ((pte_flags(gpte) & (_PAGE_PWT|_PAGE_PSE))
+-	    || pte_pfn(gpte) >= lg->pfn_limit)
+-		kill_guest(lg, "bad page table entry");
++	    || pte_pfn(gpte) >= cpu->lg->pfn_limit)
++		kill_guest(cpu, "bad page table entry");
+ }
+ 
+-static void check_gpgd(struct lguest *lg, pgd_t gpgd)
++static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
+ {
+-	if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || pgd_pfn(gpgd) >= lg->pfn_limit)
+-		kill_guest(lg, "bad page directory entry");
++	if ((pgd_flags(gpgd) & ~_PAGE_TABLE) ||
++	   (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
++		kill_guest(cpu, "bad page directory entry");
+ }
+ 
+ /*H:330
+@@ -200,7 +200,7 @@ static void check_gpgd(struct lguest *lg, pgd_t gpgd)
+  *
+  * If we fixed up the fault (ie. we mapped the address), this routine returns
+  * true.  Otherwise, it was a real fault and we need to tell the Guest. */
+-int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
++int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
+ {
+ 	pgd_t gpgd;
+ 	pgd_t *spgd;
+@@ -209,24 +209,24 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
+ 	pte_t *spte;
+ 
+ 	/* First step: get the top-level Guest page table entry. */
+-	gpgd = lgread(lg, gpgd_addr(lg, vaddr), pgd_t);
++	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+ 	/* Toplevel not present?  We can't map it in. */
+ 	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+ 		return 0;
  
- #ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- 	if (capinc_tty_init() < 0) {
--		class_device_destroy(capi_class, MKDEV(capi_major, 0));
-+		device_destroy(capi_class, MKDEV(capi_major, 0));
- 		class_destroy(capi_class);
- 		unregister_chrdev(capi_major, "capi20");
- 		return -ENOMEM;
-@@ -1576,7 +1576,7 @@ static void __exit capi_exit(void)
+ 	/* Now look at the matching shadow entry. */
+-	spgd = spgd_addr(lg, lg->pgdidx, vaddr);
++	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
+ 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
+ 		/* No shadow entry: allocate a new shadow PTE page. */
+ 		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+ 		/* This is not really the Guest's fault, but killing it is
+ 		 * simple for this corner case. */
+ 		if (!ptepage) {
+-			kill_guest(lg, "out of memory allocating pte page");
++			kill_guest(cpu, "out of memory allocating pte page");
+ 			return 0;
+ 		}
+ 		/* We check that the Guest pgd is OK. */
+-		check_gpgd(lg, gpgd);
++		check_gpgd(cpu, gpgd);
+ 		/* And we copy the flags to the shadow PGD entry.  The page
+ 		 * number in the shadow PGD is the page we just allocated. */
+ 		*spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
+@@ -234,8 +234,8 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
+ 
+ 	/* OK, now we look at the lower level in the Guest page table: keep its
+ 	 * address, because we might update it later. */
+-	gpte_ptr = gpte_addr(lg, gpgd, vaddr);
+-	gpte = lgread(lg, gpte_ptr, pte_t);
++	gpte_ptr = gpte_addr(gpgd, vaddr);
++	gpte = lgread(cpu, gpte_ptr, pte_t);
+ 
+ 	/* If this page isn't in the Guest page tables, we can't page it in. */
+ 	if (!(pte_flags(gpte) & _PAGE_PRESENT))
+@@ -252,7 +252,7 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
+ 
+ 	/* Check that the Guest PTE flags are OK, and the page number is below
+ 	 * the pfn_limit (ie. not mapping the Launcher binary). */
+-	check_gpte(lg, gpte);
++	check_gpte(cpu, gpte);
+ 
+ 	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
+ 	gpte = pte_mkyoung(gpte);
+@@ -260,7 +260,7 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
+ 		gpte = pte_mkdirty(gpte);
+ 
+ 	/* Get the pointer to the shadow PTE entry we're going to set. */
+-	spte = spte_addr(lg, *spgd, vaddr);
++	spte = spte_addr(*spgd, vaddr);
+ 	/* If there was a valid shadow PTE entry here before, we release it.
+ 	 * This can happen with a write to a previously read-only entry. */
+ 	release_pte(*spte);
+@@ -268,17 +268,17 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
+ 	/* If this is a write, we insist that the Guest page is writable (the
+ 	 * final arg to gpte_to_spte()). */
+ 	if (pte_dirty(gpte))
+-		*spte = gpte_to_spte(lg, gpte, 1);
++		*spte = gpte_to_spte(cpu, gpte, 1);
+ 	else
+ 		/* If this is a read, don't set the "writable" bit in the page
+ 		 * table entry, even if the Guest says it's writable.  That way
+ 		 * we will come back here when a write does actually occur, so
+ 		 * we can update the Guest's _PAGE_DIRTY flag. */
+-		*spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0);
++		*spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0);
+ 
+ 	/* Finally, we write the Guest PTE entry back: we've set the
+ 	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
+-	lgwrite(lg, gpte_ptr, pte_t, gpte);
++	lgwrite(cpu, gpte_ptr, pte_t, gpte);
+ 
+ 	/* The fault is fixed, the page table is populated, the mapping
+ 	 * manipulated, the result returned and the code complete.  A small
+@@ -297,19 +297,19 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
+  *
+  * This is a quick version which answers the question: is this virtual address
+  * mapped by the shadow page tables, and is it writable? */
+-static int page_writable(struct lguest *lg, unsigned long vaddr)
++static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
  {
- 	proc_exit();
+ 	pgd_t *spgd;
+ 	unsigned long flags;
  
--	class_device_destroy(capi_class, MKDEV(capi_major, 0));
-+	device_destroy(capi_class, MKDEV(capi_major, 0));
- 	class_destroy(capi_class);
- 	unregister_chrdev(capi_major, "capi20");
+ 	/* Look at the current top level entry: is it present? */
+-	spgd = spgd_addr(lg, lg->pgdidx, vaddr);
++	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
+ 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
+ 		return 0;
  
-diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
-index 48c1775..cb42b69 100644
---- a/drivers/isdn/capi/capidrv.c
-+++ b/drivers/isdn/capi/capidrv.c
-@@ -2332,13 +2332,14 @@ static int __init capidrv_init(void)
+ 	/* Check the flags on the pte entry itself: it must be present and
+ 	 * writable. */
+-	flags = pte_flags(*(spte_addr(lg, *spgd, vaddr)));
++	flags = pte_flags(*(spte_addr(*spgd, vaddr)));
+ 
+ 	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
+ }
+@@ -317,10 +317,10 @@ static int page_writable(struct lguest *lg, unsigned long vaddr)
+ /* So, when pin_stack_pages() asks us to pin a page, we check if it's already
+  * in the page tables, and if not, we call demand_page() with error code 2
+  * (meaning "write"). */
+-void pin_page(struct lguest *lg, unsigned long vaddr)
++void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
+ {
+-	if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2))
+-		kill_guest(lg, "bad stack page %#lx", vaddr);
++	if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
++		kill_guest(cpu, "bad stack page %#lx", vaddr);
+ }
+ 
+ /*H:450 If we chase down the release_pgd() code, it looks like this: */
+@@ -358,28 +358,28 @@ static void flush_user_mappings(struct lguest *lg, int idx)
+  *
+  * The Guest has a hypercall to throw away the page tables: it's used when a
+  * large number of mappings have been changed. */
+-void guest_pagetable_flush_user(struct lguest *lg)
++void guest_pagetable_flush_user(struct lg_cpu *cpu)
+ {
+ 	/* Drop the userspace part of the current page table. */
+-	flush_user_mappings(lg, lg->pgdidx);
++	flush_user_mappings(cpu->lg, cpu->cpu_pgd);
+ }
+ /*:*/
  
- static void __exit capidrv_exit(void)
- {
--	char rev[10];
-+	char rev[32];
- 	char *p;
+ /* We walk down the guest page tables to get a guest-physical address */
+-unsigned long guest_pa(struct lguest *lg, unsigned long vaddr)
++unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
+ {
+ 	pgd_t gpgd;
+ 	pte_t gpte;
+ 
+ 	/* First step: get the top-level Guest page table entry. */
+-	gpgd = lgread(lg, gpgd_addr(lg, vaddr), pgd_t);
++	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+ 	/* Toplevel not present?  We can't map it in. */
+ 	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+-		kill_guest(lg, "Bad address %#lx", vaddr);
++		kill_guest(cpu, "Bad address %#lx", vaddr);
+ 
+-	gpte = lgread(lg, gpte_addr(lg, gpgd, vaddr), pte_t);
++	gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
+ 	if (!(pte_flags(gpte) & _PAGE_PRESENT))
+-		kill_guest(lg, "Bad address %#lx", vaddr);
++		kill_guest(cpu, "Bad address %#lx", vaddr);
+ 
+ 	return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
+ }
+@@ -399,7 +399,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
+ /*H:435 And this is us, creating the new page directory.  If we really do
+  * allocate a new one (and so the kernel parts are not there), we set
+  * blank_pgdir. */
+-static unsigned int new_pgdir(struct lguest *lg,
++static unsigned int new_pgdir(struct lg_cpu *cpu,
+ 			      unsigned long gpgdir,
+ 			      int *blank_pgdir)
+ {
+@@ -407,22 +407,23 @@ static unsigned int new_pgdir(struct lguest *lg,
+ 
+ 	/* We pick one entry at random to throw out.  Choosing the Least
+ 	 * Recently Used might be better, but this is easy. */
+-	next = random32() % ARRAY_SIZE(lg->pgdirs);
++	next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
+ 	/* If it's never been allocated at all before, try now. */
+-	if (!lg->pgdirs[next].pgdir) {
+-		lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
++	if (!cpu->lg->pgdirs[next].pgdir) {
++		cpu->lg->pgdirs[next].pgdir =
++					(pgd_t *)get_zeroed_page(GFP_KERNEL);
+ 		/* If the allocation fails, just keep using the one we have */
+-		if (!lg->pgdirs[next].pgdir)
+-			next = lg->pgdidx;
++		if (!cpu->lg->pgdirs[next].pgdir)
++			next = cpu->cpu_pgd;
+ 		else
+ 			/* This is a blank page, so there are no kernel
+ 			 * mappings: caller must map the stack! */
+ 			*blank_pgdir = 1;
+ 	}
+ 	/* Record which Guest toplevel this shadows. */
+-	lg->pgdirs[next].gpgdir = gpgdir;
++	cpu->lg->pgdirs[next].gpgdir = gpgdir;
+ 	/* Release all the non-kernel mappings. */
+-	flush_user_mappings(lg, next);
++	flush_user_mappings(cpu->lg, next);
  
- 	if ((p = strchr(revision, ':')) != 0) {
--		strcpy(rev, p + 1);
--		p = strchr(rev, '$');
--		*p = 0;
-+		strncpy(rev, p + 1, sizeof(rev));
-+		rev[sizeof(rev)-1] = 0;
-+		if ((p = strchr(rev, '$')) != 0)
-+			*p = 0;
- 	} else {
- 		strcpy(rev, " ??? ");
+ 	return next;
+ }
+@@ -432,21 +433,21 @@ static unsigned int new_pgdir(struct lguest *lg,
+  * Now we've seen all the page table setting and manipulation, let's see what
+  * what happens when the Guest changes page tables (ie. changes the top-level
+  * pgdir).  This occurs on almost every context switch. */
+-void guest_new_pagetable(struct lguest *lg, unsigned long pgtable)
++void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
+ {
+ 	int newpgdir, repin = 0;
+ 
+ 	/* Look to see if we have this one already. */
+-	newpgdir = find_pgdir(lg, pgtable);
++	newpgdir = find_pgdir(cpu->lg, pgtable);
+ 	/* If not, we allocate or mug an existing one: if it's a fresh one,
+ 	 * repin gets set to 1. */
+-	if (newpgdir == ARRAY_SIZE(lg->pgdirs))
+-		newpgdir = new_pgdir(lg, pgtable, &repin);
++	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
++		newpgdir = new_pgdir(cpu, pgtable, &repin);
+ 	/* Change the current pgd index to the new one. */
+-	lg->pgdidx = newpgdir;
++	cpu->cpu_pgd = newpgdir;
+ 	/* If it was completely blank, we map in the Guest kernel stack */
+ 	if (repin)
+-		pin_stack_pages(lg);
++		pin_stack_pages(cpu);
+ }
+ 
+ /*H:470 Finally, a routine which throws away everything: all PGD entries in all
+@@ -468,11 +469,11 @@ static void release_all_pagetables(struct lguest *lg)
+  * mapping.  Since kernel mappings are in every page table, it's easiest to
+  * throw them all away.  This traps the Guest in amber for a while as
+  * everything faults back in, but it's rare. */
+-void guest_pagetable_clear_all(struct lguest *lg)
++void guest_pagetable_clear_all(struct lg_cpu *cpu)
+ {
+-	release_all_pagetables(lg);
++	release_all_pagetables(cpu->lg);
+ 	/* We need the Guest kernel stack mapped again. */
+-	pin_stack_pages(lg);
++	pin_stack_pages(cpu);
+ }
+ /*:*/
+ /*M:009 Since we throw away all mappings when a kernel mapping changes, our
+@@ -497,24 +498,24 @@ void guest_pagetable_clear_all(struct lguest *lg)
+  * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
+  * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
+  */
+-static void do_set_pte(struct lguest *lg, int idx,
++static void do_set_pte(struct lg_cpu *cpu, int idx,
+ 		       unsigned long vaddr, pte_t gpte)
+ {
+ 	/* Look up the matching shadow page directory entry. */
+-	pgd_t *spgd = spgd_addr(lg, idx, vaddr);
++	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
+ 
+ 	/* If the top level isn't present, there's no entry to update. */
+ 	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
+ 		/* Otherwise, we start by releasing the existing entry. */
+-		pte_t *spte = spte_addr(lg, *spgd, vaddr);
++		pte_t *spte = spte_addr(*spgd, vaddr);
+ 		release_pte(*spte);
+ 
+ 		/* If they're setting this entry as dirty or accessed, we might
+ 		 * as well put that entry they've given us in now.  This shaves
+ 		 * 10% off a copy-on-write micro-benchmark. */
+ 		if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
+-			check_gpte(lg, gpte);
+-			*spte = gpte_to_spte(lg, gpte,
++			check_gpte(cpu, gpte);
++			*spte = gpte_to_spte(cpu, gpte,
+ 					     pte_flags(gpte) & _PAGE_DIRTY);
+ 		} else
+ 			/* Otherwise kill it and we can demand_page() it in
+@@ -533,22 +534,22 @@ static void do_set_pte(struct lguest *lg, int idx,
+  *
+  * The benefit is that when we have to track a new page table, we can copy keep
+  * all the kernel mappings.  This speeds up context switch immensely. */
+-void guest_set_pte(struct lguest *lg,
++void guest_set_pte(struct lg_cpu *cpu,
+ 		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
+ {
+ 	/* Kernel mappings must be changed on all top levels.  Slow, but
+ 	 * doesn't happen often. */
+-	if (vaddr >= lg->kernel_address) {
++	if (vaddr >= cpu->lg->kernel_address) {
+ 		unsigned int i;
+-		for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+-			if (lg->pgdirs[i].pgdir)
+-				do_set_pte(lg, i, vaddr, gpte);
++		for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
++			if (cpu->lg->pgdirs[i].pgdir)
++				do_set_pte(cpu, i, vaddr, gpte);
+ 	} else {
+ 		/* Is this page table one we have a shadow for? */
+-		int pgdir = find_pgdir(lg, gpgdir);
+-		if (pgdir != ARRAY_SIZE(lg->pgdirs))
++		int pgdir = find_pgdir(cpu->lg, gpgdir);
++		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
+ 			/* If so, do the update. */
+-			do_set_pte(lg, pgdir, vaddr, gpte);
++			do_set_pte(cpu, pgdir, vaddr, gpte);
  	}
-diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
-index a0317ab..02bdaf2 100644
---- a/drivers/isdn/gigaset/gigaset.h
-+++ b/drivers/isdn/gigaset/gigaset.h
-@@ -106,12 +106,6 @@ enum debuglevel {
- 					 activated */
- };
+ }
  
--/* missing from linux/device.h ... */
--#ifndef dev_notice
--#define dev_notice(dev, format, arg...)		\
--	dev_printk(KERN_NOTICE , dev , format , ## arg)
--#endif
--
- /* Kernel message macros for situations where dev_printk and friends cannot be
-  * used for lack of reliable access to a device structure.
-  * linux/usb.h already contains these but in an obsolete form which clutters
-diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
-index 47c10b8..c0f372f 100644
---- a/drivers/kvm/kvm_main.c
-+++ b/drivers/kvm/kvm_main.c
-@@ -3451,7 +3451,7 @@ static int kvm_resume(struct sys_device *dev)
+@@ -590,30 +591,32 @@ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
+ {
+ 	/* We start on the first shadow page table, and give it a blank PGD
+ 	 * page. */
+-	lg->pgdidx = 0;
+-	lg->pgdirs[lg->pgdidx].gpgdir = pgtable;
+-	lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL);
+-	if (!lg->pgdirs[lg->pgdidx].pgdir)
++	lg->pgdirs[0].gpgdir = pgtable;
++	lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
++	if (!lg->pgdirs[0].pgdir)
+ 		return -ENOMEM;
++	lg->cpus[0].cpu_pgd = 0;
+ 	return 0;
  }
  
- static struct sysdev_class kvm_sysdev_class = {
--	set_kset_name("kvm"),
-+	.name = "kvm",
- 	.suspend = kvm_suspend,
- 	.resume = kvm_resume,
- };
-diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
-index 4e04e49..ced4ac1 100644
---- a/drivers/kvm/svm.c
-+++ b/drivers/kvm/svm.c
-@@ -290,7 +290,7 @@ static void svm_hardware_enable(void *garbage)
- #ifdef CONFIG_X86_64
- 	struct desc_ptr gdt_descr;
- #else
--	struct Xgt_desc_struct gdt_descr;
-+	struct desc_ptr gdt_descr;
- #endif
- 	struct desc_struct *gdt;
- 	int me = raw_smp_processor_id();
-diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
-index bb56ae3..5b397b6 100644
---- a/drivers/kvm/vmx.c
-+++ b/drivers/kvm/vmx.c
-@@ -524,7 +524,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
- static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
- {
- 	if (vcpu->rmode.active)
--		rflags |= IOPL_MASK | X86_EFLAGS_VM;
-+		rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
- 	vmcs_writel(GUEST_RFLAGS, rflags);
+ /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
+-void page_table_guest_data_init(struct lguest *lg)
++void page_table_guest_data_init(struct lg_cpu *cpu)
+ {
+ 	/* We get the kernel address: above this is all kernel memory. */
+-	if (get_user(lg->kernel_address, &lg->lguest_data->kernel_address)
++	if (get_user(cpu->lg->kernel_address,
++		     &cpu->lg->lguest_data->kernel_address)
+ 	    /* We tell the Guest that it can't use the top 4MB of virtual
+ 	     * addresses used by the Switcher. */
+-	    || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
+-	    || put_user(lg->pgdirs[lg->pgdidx].gpgdir,&lg->lguest_data->pgdir))
+-		kill_guest(lg, "bad guest page %p", lg->lguest_data);
++	    || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem)
++	    || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir))
++		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
+ 
+ 	/* In flush_user_mappings() we loop from 0 to
+ 	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
+ 	 * Switcher mappings, so check that now. */
+-	if (pgd_index(lg->kernel_address) >= SWITCHER_PGD_INDEX)
+-		kill_guest(lg, "bad kernel address %#lx", lg->kernel_address);
++	if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
++		kill_guest(cpu, "bad kernel address %#lx",
++				 cpu->lg->kernel_address);
+ }
+ 
+ /* When a Guest dies, our cleanup is fairly simple. */
+@@ -634,17 +637,18 @@ void free_guest_pagetable(struct lguest *lg)
+  * Guest (and not the pages for other CPUs).  We have the appropriate PTE pages
+  * for each CPU already set up, we just need to hook them in now we know which
+  * Guest is about to run on this CPU. */
+-void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
++void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
+ {
+ 	pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
+ 	pgd_t switcher_pgd;
+ 	pte_t regs_pte;
++	unsigned long pfn;
+ 
+ 	/* Make the last PGD entry for this Guest point to the Switcher's PTE
+ 	 * page for this CPU (with appropriate flags). */
+-	switcher_pgd = __pgd(__pa(switcher_pte_page) | _PAGE_KERNEL);
++	switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL);
+ 
+-	lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
++	cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
+ 
+ 	/* We also change the Switcher PTE page.  When we're running the Guest,
+ 	 * we want the Guest's "regs" page to appear where the first Switcher
+@@ -653,7 +657,8 @@ void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
+ 	 * CPU's "struct lguest_pages": if we make sure the Guest's register
+ 	 * page is already mapped there, we don't have to copy them out
+ 	 * again. */
+-	regs_pte = pfn_pte (__pa(lg->regs_page) >> PAGE_SHIFT, __pgprot(_PAGE_KERNEL));
++	pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
++	regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL));
+ 	switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte;
  }
+ /*:*/
+diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
+index 9e189cb..ec6aa3f 100644
+--- a/drivers/lguest/segments.c
++++ b/drivers/lguest/segments.c
+@@ -58,7 +58,7 @@ static int ignored_gdt(unsigned int num)
+  * Protection Fault in the Switcher when it restores a Guest segment register
+  * which tries to use that entry.  Then we kill the Guest for causing such a
+  * mess: the message will be "unhandled trap 256". */
+-static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end)
++static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
+ {
+ 	unsigned int i;
  
-@@ -1050,7 +1050,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
- 	vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
+@@ -71,14 +71,14 @@ static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end)
+ 		/* Segment descriptors contain a privilege level: the Guest is
+ 		 * sometimes careless and leaves this as 0, even though it's
+ 		 * running at privilege level 1.  If so, we fix it here. */
+-		if ((lg->arch.gdt[i].b & 0x00006000) == 0)
+-			lg->arch.gdt[i].b |= (GUEST_PL << 13);
++		if ((cpu->arch.gdt[i].b & 0x00006000) == 0)
++			cpu->arch.gdt[i].b |= (GUEST_PL << 13);
+ 
+ 		/* Each descriptor has an "accessed" bit.  If we don't set it
+ 		 * now, the CPU will try to set it when the Guest first loads
+ 		 * that entry into a segment register.  But the GDT isn't
+ 		 * writable by the Guest, so bad things can happen. */
+-		lg->arch.gdt[i].b |= 0x00000100;
++		cpu->arch.gdt[i].b |= 0x00000100;
+ 	}
+ }
  
- 	flags = vmcs_readl(GUEST_RFLAGS);
--	flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
-+	flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
- 	flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
- 	vmcs_writel(GUEST_RFLAGS, flags);
+@@ -109,31 +109,31 @@ void setup_default_gdt_entries(struct lguest_ro_state *state)
+ 
+ /* This routine sets up the initial Guest GDT for booting.  All entries start
+  * as 0 (unusable). */
+-void setup_guest_gdt(struct lguest *lg)
++void setup_guest_gdt(struct lg_cpu *cpu)
+ {
+ 	/* Start with full 0-4G segments... */
+-	lg->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
+-	lg->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
++	cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
++	cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
+ 	/* ...except the Guest is allowed to use them, so set the privilege
+ 	 * level appropriately in the flags. */
+-	lg->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
+-	lg->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
++	cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
++	cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
+ }
+ 
+ /*H:650 An optimization of copy_gdt(), for just the three "thead-local storage"
+  * entries. */
+-void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt)
++void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt)
+ {
+ 	unsigned int i;
  
-@@ -1107,9 +1107,9 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
- 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
+ 	for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++)
+-		gdt[i] = lg->arch.gdt[i];
++		gdt[i] = cpu->arch.gdt[i];
+ }
  
- 	flags = vmcs_readl(GUEST_RFLAGS);
--	vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
-+	vcpu->rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ /*H:640 When the Guest is run on a different CPU, or the GDT entries have
+  * changed, copy_gdt() is called to copy the Guest's GDT entries across to this
+  * CPU's GDT. */
+-void copy_gdt(const struct lguest *lg, struct desc_struct *gdt)
++void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
+ {
+ 	unsigned int i;
  
--	flags |= IOPL_MASK | X86_EFLAGS_VM;
-+	flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+@@ -141,38 +141,38 @@ void copy_gdt(const struct lguest *lg, struct desc_struct *gdt)
+ 	 * replaced.  See ignored_gdt() above. */
+ 	for (i = 0; i < GDT_ENTRIES; i++)
+ 		if (!ignored_gdt(i))
+-			gdt[i] = lg->arch.gdt[i];
++			gdt[i] = cpu->arch.gdt[i];
+ }
+ 
+ /*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT).
+  * We copy it from the Guest and tweak the entries. */
+-void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num)
++void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num)
+ {
+ 	/* We assume the Guest has the same number of GDT entries as the
+ 	 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
+-	if (num > ARRAY_SIZE(lg->arch.gdt))
+-		kill_guest(lg, "too many gdt entries %i", num);
++	if (num > ARRAY_SIZE(cpu->arch.gdt))
++		kill_guest(cpu, "too many gdt entries %i", num);
+ 
+ 	/* We read the whole thing in, then fix it up. */
+-	__lgread(lg, lg->arch.gdt, table, num * sizeof(lg->arch.gdt[0]));
+-	fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->arch.gdt));
++	__lgread(cpu, cpu->arch.gdt, table, num * sizeof(cpu->arch.gdt[0]));
++	fixup_gdt_table(cpu, 0, ARRAY_SIZE(cpu->arch.gdt));
+ 	/* Mark that the GDT changed so the core knows it has to copy it again,
+ 	 * even if the Guest is run on the same CPU. */
+-	lg->changed |= CHANGED_GDT;
++	cpu->changed |= CHANGED_GDT;
+ }
+ 
+ /* This is the fast-track version for just changing the three TLS entries.
+  * Remember that this happens on every context switch, so it's worth
+  * optimizing.  But wouldn't it be neater to have a single hypercall to cover
+  * both cases? */
+-void guest_load_tls(struct lguest *lg, unsigned long gtls)
++void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls)
+ {
+-	struct desc_struct *tls = &lg->arch.gdt[GDT_ENTRY_TLS_MIN];
++	struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN];
+ 
+-	__lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
+-	fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
++	__lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
++	fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
+ 	/* Note that just the TLS entries have changed. */
+-	lg->changed |= CHANGED_GDT_TLS;
++	cpu->changed |= CHANGED_GDT_TLS;
+ }
+ /*:*/
  
- 	vmcs_writel(GUEST_RFLAGS, flags);
- 	vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
-index 482aec2..44adb00 100644
+index 482aec2..61f2f8e 100644
 --- a/drivers/lguest/x86/core.c
 +++ b/drivers/lguest/x86/core.c
-@@ -94,7 +94,7 @@ static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
+@@ -60,7 +60,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
+ 		  (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
+ }
+ 
+-static DEFINE_PER_CPU(struct lguest *, last_guest);
++static DEFINE_PER_CPU(struct lg_cpu *, last_cpu);
+ 
+ /*S:010
+  * We approach the Switcher.
+@@ -73,16 +73,16 @@ static DEFINE_PER_CPU(struct lguest *, last_guest);
+  * since it last ran.  We saw this set in interrupts_and_traps.c and
+  * segments.c.
+  */
+-static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
++static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
+ {
+ 	/* Copying all this data can be quite expensive.  We usually run the
+ 	 * same Guest we ran last time (and that Guest hasn't run anywhere else
+ 	 * meanwhile).  If that's not the case, we pretend everything in the
+ 	 * Guest has changed. */
+-	if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
+-		__get_cpu_var(last_guest) = lg;
+-		lg->last_pages = pages;
+-		lg->changed = CHANGED_ALL;
++	if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) {
++		__get_cpu_var(last_cpu) = cpu;
++		cpu->last_pages = pages;
++		cpu->changed = CHANGED_ALL;
+ 	}
+ 
+ 	/* These copies are pretty cheap, so we do them unconditionally: */
+@@ -90,42 +90,42 @@ static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
+ 	pages->state.host_cr3 = __pa(current->mm->pgd);
+ 	/* Set up the Guest's page tables to see this CPU's pages (and no
+ 	 * other CPU's pages). */
+-	map_switcher_in_guest(lg, pages);
++	map_switcher_in_guest(cpu, pages);
  	/* Set up the two "TSS" members which tell the CPU what stack to use
  	 * for traps which do directly into the Guest (ie. traps at privilege
  	 * level 1). */
 -	pages->state.guest_tss.esp1 = lg->esp1;
-+	pages->state.guest_tss.sp1 = lg->esp1;
- 	pages->state.guest_tss.ss1 = lg->ss1;
+-	pages->state.guest_tss.ss1 = lg->ss1;
++	pages->state.guest_tss.esp1 = cpu->esp1;
++	pages->state.guest_tss.ss1 = cpu->ss1;
  
  	/* Copy direct-to-Guest trap entries. */
-@@ -416,7 +416,7 @@ void __init lguest_arch_host_init(void)
+-	if (lg->changed & CHANGED_IDT)
+-		copy_traps(lg, pages->state.guest_idt, default_idt_entries);
++	if (cpu->changed & CHANGED_IDT)
++		copy_traps(cpu, pages->state.guest_idt, default_idt_entries);
+ 
+ 	/* Copy all GDT entries which the Guest can change. */
+-	if (lg->changed & CHANGED_GDT)
+-		copy_gdt(lg, pages->state.guest_gdt);
++	if (cpu->changed & CHANGED_GDT)
++		copy_gdt(cpu, pages->state.guest_gdt);
+ 	/* If only the TLS entries have changed, copy them. */
+-	else if (lg->changed & CHANGED_GDT_TLS)
+-		copy_gdt_tls(lg, pages->state.guest_gdt);
++	else if (cpu->changed & CHANGED_GDT_TLS)
++		copy_gdt_tls(cpu, pages->state.guest_gdt);
+ 
+ 	/* Mark the Guest as unchanged for next time. */
+-	lg->changed = 0;
++	cpu->changed = 0;
+ }
+ 
+ /* Finally: the code to actually call into the Switcher to run the Guest. */
+-static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
++static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
+ {
+ 	/* This is a dummy value we need for GCC's sake. */
+ 	unsigned int clobber;
+ 
+ 	/* Copy the guest-specific information into this CPU's "struct
+ 	 * lguest_pages". */
+-	copy_in_guest_info(lg, pages);
++	copy_in_guest_info(cpu, pages);
+ 
+ 	/* Set the trap number to 256 (impossible value).  If we fault while
+ 	 * switching to the Guest (bad segment registers or bug), this will
+ 	 * cause us to abort the Guest. */
+-	lg->regs->trapnum = 256;
++	cpu->regs->trapnum = 256;
+ 
+ 	/* Now: we push the "eflags" register on the stack, then do an "lcall".
+ 	 * This is how we change from using the kernel code segment to using
+@@ -143,7 +143,7 @@ static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
+ 		      * 0-th argument above, ie "a").  %ebx contains the
+ 		      * physical address of the Guest's top-level page
+ 		      * directory. */
+-		     : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
++		     : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir))
+ 		     /* We tell gcc that all these registers could change,
+ 		      * which means we don't have to save and restore them in
+ 		      * the Switcher. */
+@@ -161,12 +161,12 @@ static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
+ 
+ /*H:040 This is the i386-specific code to setup and run the Guest.  Interrupts
+  * are disabled: we own the CPU. */
+-void lguest_arch_run_guest(struct lguest *lg)
++void lguest_arch_run_guest(struct lg_cpu *cpu)
+ {
+ 	/* Remember the awfully-named TS bit?  If the Guest has asked to set it
+ 	 * we set it now, so we can trap and pass that trap to the Guest if it
+ 	 * uses the FPU. */
+-	if (lg->ts)
++	if (cpu->ts)
+ 		lguest_set_ts();
+ 
+ 	/* SYSENTER is an optimized way of doing system calls.  We can't allow
+@@ -180,7 +180,7 @@ void lguest_arch_run_guest(struct lguest *lg)
+ 	/* Now we actually run the Guest.  It will return when something
+ 	 * interesting happens, and we can examine its registers to see what it
+ 	 * was doing. */
+-	run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
++	run_guest_once(cpu, lguest_pages(raw_smp_processor_id()));
+ 
+ 	/* Note that the "regs" pointer contains two extra entries which are
+ 	 * not really registers: a trap number which says what interrupt or
+@@ -191,11 +191,11 @@ void lguest_arch_run_guest(struct lguest *lg)
+ 	 * bad virtual address.  We have to grab this now, because once we
+ 	 * re-enable interrupts an interrupt could fault and thus overwrite
+ 	 * cr2, or we could even move off to a different CPU. */
+-	if (lg->regs->trapnum == 14)
+-		lg->arch.last_pagefault = read_cr2();
++	if (cpu->regs->trapnum == 14)
++		cpu->arch.last_pagefault = read_cr2();
+ 	/* Similarly, if we took a trap because the Guest used the FPU,
+ 	 * we have to restore the FPU it expects to see. */
+-	else if (lg->regs->trapnum == 7)
++	else if (cpu->regs->trapnum == 7)
+ 		math_state_restore();
+ 
+ 	/* Restore SYSENTER if it's supposed to be on. */
+@@ -214,22 +214,22 @@ void lguest_arch_run_guest(struct lguest *lg)
+  * When the Guest uses one of these instructions, we get a trap (General
+  * Protection Fault) and come here.  We see if it's one of those troublesome
+  * instructions and skip over it.  We return true if we did. */
+-static int emulate_insn(struct lguest *lg)
++static int emulate_insn(struct lg_cpu *cpu)
+ {
+ 	u8 insn;
+ 	unsigned int insnlen = 0, in = 0, shift = 0;
+ 	/* The eip contains the *virtual* address of the Guest's instruction:
+ 	 * guest_pa just subtracts the Guest's page_offset. */
+-	unsigned long physaddr = guest_pa(lg, lg->regs->eip);
++	unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
+ 
+ 	/* This must be the Guest kernel trying to do something, not userspace!
+ 	 * The bottom two bits of the CS segment register are the privilege
+ 	 * level. */
+-	if ((lg->regs->cs & 3) != GUEST_PL)
++	if ((cpu->regs->cs & 3) != GUEST_PL)
+ 		return 0;
+ 
+ 	/* Decoding x86 instructions is icky. */
+-	insn = lgread(lg, physaddr, u8);
++	insn = lgread(cpu, physaddr, u8);
+ 
+ 	/* 0x66 is an "operand prefix".  It means it's using the upper 16 bits
+ 	   of the eax register. */
+@@ -237,7 +237,7 @@ static int emulate_insn(struct lguest *lg)
+ 		shift = 16;
+ 		/* The instruction is 1 byte so far, read the next byte. */
+ 		insnlen = 1;
+-		insn = lgread(lg, physaddr + insnlen, u8);
++		insn = lgread(cpu, physaddr + insnlen, u8);
+ 	}
+ 
+ 	/* We can ignore the lower bit for the moment and decode the 4 opcodes
+@@ -268,26 +268,26 @@ static int emulate_insn(struct lguest *lg)
+ 	if (in) {
+ 		/* Lower bit tells is whether it's a 16 or 32 bit access */
+ 		if (insn & 0x1)
+-			lg->regs->eax = 0xFFFFFFFF;
++			cpu->regs->eax = 0xFFFFFFFF;
+ 		else
+-			lg->regs->eax |= (0xFFFF << shift);
++			cpu->regs->eax |= (0xFFFF << shift);
+ 	}
+ 	/* Finally, we've "done" the instruction, so move past it. */
+-	lg->regs->eip += insnlen;
++	cpu->regs->eip += insnlen;
+ 	/* Success! */
+ 	return 1;
+ }
+ 
+ /*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
+-void lguest_arch_handle_trap(struct lguest *lg)
++void lguest_arch_handle_trap(struct lg_cpu *cpu)
+ {
+-	switch (lg->regs->trapnum) {
++	switch (cpu->regs->trapnum) {
+ 	case 13: /* We've intercepted a General Protection Fault. */
+ 		/* Check if this was one of those annoying IN or OUT
+ 		 * instructions which we need to emulate.  If so, we just go
+ 		 * back into the Guest after we've done it. */
+-		if (lg->regs->errcode == 0) {
+-			if (emulate_insn(lg))
++		if (cpu->regs->errcode == 0) {
++			if (emulate_insn(cpu))
+ 				return;
+ 		}
+ 		break;
+@@ -301,7 +301,8 @@ void lguest_arch_handle_trap(struct lguest *lg)
+ 		 *
+ 		 * The errcode tells whether this was a read or a write, and
+ 		 * whether kernel or userspace code. */
+-		if (demand_page(lg, lg->arch.last_pagefault, lg->regs->errcode))
++		if (demand_page(cpu, cpu->arch.last_pagefault,
++				cpu->regs->errcode))
+ 			return;
+ 
+ 		/* OK, it's really not there (or not OK): the Guest needs to
+@@ -311,15 +312,16 @@ void lguest_arch_handle_trap(struct lguest *lg)
+ 		 * Note that if the Guest were really messed up, this could
+ 		 * happen before it's done the LHCALL_LGUEST_INIT hypercall, so
+ 		 * lg->lguest_data could be NULL */
+-		if (lg->lguest_data &&
+-		    put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2))
+-			kill_guest(lg, "Writing cr2");
++		if (cpu->lg->lguest_data &&
++		    put_user(cpu->arch.last_pagefault,
++			     &cpu->lg->lguest_data->cr2))
++			kill_guest(cpu, "Writing cr2");
+ 		break;
+ 	case 7: /* We've intercepted a Device Not Available fault. */
+ 		/* If the Guest doesn't want to know, we already restored the
+ 		 * Floating Point Unit, so we just continue without telling
+ 		 * it. */
+-		if (!lg->ts)
++		if (!cpu->ts)
+ 			return;
+ 		break;
+ 	case 32 ... 255:
+@@ -332,19 +334,19 @@ void lguest_arch_handle_trap(struct lguest *lg)
+ 	case LGUEST_TRAP_ENTRY:
+ 		/* Our 'struct hcall_args' maps directly over our regs: we set
+ 		 * up the pointer now to indicate a hypercall is pending. */
+-		lg->hcall = (struct hcall_args *)lg->regs;
++		cpu->hcall = (struct hcall_args *)cpu->regs;
+ 		return;
+ 	}
+ 
+ 	/* We didn't handle the trap, so it needs to go to the Guest. */
+-	if (!deliver_trap(lg, lg->regs->trapnum))
++	if (!deliver_trap(cpu, cpu->regs->trapnum))
+ 		/* If the Guest doesn't have a handler (either it hasn't
+ 		 * registered any yet, or it's one of the faults we don't let
+ 		 * it handle), it dies with a cryptic error message. */
+-		kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
+-			   lg->regs->trapnum, lg->regs->eip,
+-			   lg->regs->trapnum == 14 ? lg->arch.last_pagefault
+-			   : lg->regs->errcode);
++		kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)",
++			   cpu->regs->trapnum, cpu->regs->eip,
++			   cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault
++			   : cpu->regs->errcode);
+ }
+ 
+ /* Now we can look at each of the routines this calls, in increasing order of
+@@ -416,7 +418,7 @@ void __init lguest_arch_host_init(void)
  		/* We know where we want the stack to be when the Guest enters
  		 * the switcher: in pages->regs.  The stack grows upwards, so
  		 * we start it at the end of that structure. */
@@ -278566,7 +418839,7 @@
  		/* And this is the GDT entry to use for the stack: we keep a
  		 * couple of special LGUEST entries. */
  		state->guest_tss.ss0 = LGUEST_DS;
-@@ -459,7 +459,7 @@ void __init lguest_arch_host_init(void)
+@@ -459,7 +461,7 @@ void __init lguest_arch_host_init(void)
  
  	/* We don't need the complexity of CPUs coming and going while we're
  	 * doing this. */
@@ -278575,7 +418848,7 @@
  	if (cpu_has_pge) { /* We have a broader idea of "global". */
  		/* Remember that this was originally set (for cleanup). */
  		cpu_had_pge = 1;
-@@ -469,20 +469,20 @@ void __init lguest_arch_host_init(void)
+@@ -469,35 +471,35 @@ void __init lguest_arch_host_init(void)
  		/* Turn off the feature in the global feature set. */
  		clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
  	}
@@ -278599,21 +418872,412 @@
  }
  
  
+ /*H:122 The i386-specific hypercalls simply farm out to the right functions. */
+-int lguest_arch_do_hcall(struct lguest *lg, struct hcall_args *args)
++int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
+ {
+ 	switch (args->arg0) {
+ 	case LHCALL_LOAD_GDT:
+-		load_guest_gdt(lg, args->arg1, args->arg2);
++		load_guest_gdt(cpu, args->arg1, args->arg2);
+ 		break;
+ 	case LHCALL_LOAD_IDT_ENTRY:
+-		load_guest_idt_entry(lg, args->arg1, args->arg2, args->arg3);
++		load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3);
+ 		break;
+ 	case LHCALL_LOAD_TLS:
+-		guest_load_tls(lg, args->arg1);
++		guest_load_tls(cpu, args->arg1);
+ 		break;
+ 	default:
+ 		/* Bad Guest.  Bad! */
+@@ -507,13 +509,14 @@ int lguest_arch_do_hcall(struct lguest *lg, struct hcall_args *args)
+ }
+ 
+ /*H:126 i386-specific hypercall initialization: */
+-int lguest_arch_init_hypercalls(struct lguest *lg)
++int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
+ {
+ 	u32 tsc_speed;
+ 
+ 	/* The pointer to the Guest's "struct lguest_data" is the only
+ 	 * argument.  We check that address now. */
+-	if (!lguest_address_ok(lg, lg->hcall->arg1, sizeof(*lg->lguest_data)))
++	if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1,
++			       sizeof(*cpu->lg->lguest_data)))
+ 		return -EFAULT;
+ 
+ 	/* Having checked it, we simply set lg->lguest_data to point straight
+@@ -521,7 +524,7 @@ int lguest_arch_init_hypercalls(struct lguest *lg)
+ 	 * copy_to_user/from_user from now on, instead of lgread/write.  I put
+ 	 * this in to show that I'm not immune to writing stupid
+ 	 * optimizations. */
+-	lg->lguest_data = lg->mem_base + lg->hcall->arg1;
++	cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1;
+ 
+ 	/* We insist that the Time Stamp Counter exist and doesn't change with
+ 	 * cpu frequency.  Some devious chip manufacturers decided that TSC
+@@ -534,12 +537,12 @@ int lguest_arch_init_hypercalls(struct lguest *lg)
+ 		tsc_speed = tsc_khz;
+ 	else
+ 		tsc_speed = 0;
+-	if (put_user(tsc_speed, &lg->lguest_data->tsc_khz))
++	if (put_user(tsc_speed, &cpu->lg->lguest_data->tsc_khz))
+ 		return -EFAULT;
+ 
+ 	/* The interrupt code might not like the system call vector. */
+-	if (!check_syscall_vector(lg))
+-		kill_guest(lg, "bad syscall vector");
++	if (!check_syscall_vector(cpu->lg))
++		kill_guest(cpu, "bad syscall vector");
+ 
+ 	return 0;
+ }
+@@ -548,9 +551,9 @@ int lguest_arch_init_hypercalls(struct lguest *lg)
+  *
+  * Most of the Guest's registers are left alone: we used get_zeroed_page() to
+  * allocate the structure, so they will be 0. */
+-void lguest_arch_setup_regs(struct lguest *lg, unsigned long start)
++void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
+ {
+-	struct lguest_regs *regs = lg->regs;
++	struct lguest_regs *regs = cpu->regs;
+ 
+ 	/* There are four "segment" registers which the Guest needs to boot:
+ 	 * The "code segment" register (cs) refers to the kernel code segment
+@@ -577,5 +580,5 @@ void lguest_arch_setup_regs(struct lguest *lg, unsigned long start)
+ 
+ 	/* There are a couple of GDT entries the Guest expects when first
+ 	 * booting. */
+-	setup_guest_gdt(lg);
++	setup_guest_gdt(cpu);
+ }
 diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
-index 5c742a5..b7adde4 100644
+index 5c742a5..7ce0ea6 100644
 --- a/drivers/macintosh/adb.c
 +++ b/drivers/macintosh/adb.c
-@@ -875,5 +875,5 @@ adbdev_init(void)
+@@ -35,6 +35,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
+ #include <linux/device.h>
++#include <linux/kthread.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/semaphore.h>
+@@ -82,21 +83,11 @@ struct adb_driver *adb_controller;
+ BLOCKING_NOTIFIER_HEAD(adb_client_list);
+ static int adb_got_sleep;
+ static int adb_inited;
+-static pid_t adb_probe_task_pid;
+ static DECLARE_MUTEX(adb_probe_mutex);
+-static struct completion adb_probe_task_comp;
+ static int sleepy_trackpad;
+ static int autopoll_devs;
+ int __adb_probe_sync;
+ 
+-#ifdef CONFIG_PM_SLEEP
+-static void adb_notify_sleep(struct pmu_sleep_notifier *self, int when);
+-static struct pmu_sleep_notifier adb_sleep_notifier = {
+-	adb_notify_sleep,
+-	SLEEP_LEVEL_ADB,
+-};
+-#endif
+-
+ static int adb_scan_bus(void);
+ static int do_adb_reset_bus(void);
+ static void adbdev_init(void);
+@@ -134,16 +125,6 @@ static void printADBreply(struct adb_request *req)
+ }
+ #endif
+ 
+-
+-static __inline__ void adb_wait_ms(unsigned int ms)
+-{
+-	if (current->pid && adb_probe_task_pid &&
+-	  adb_probe_task_pid == current->pid)
+-		msleep(ms);
+-	else
+-		mdelay(ms);
+-}
+-
+ static int adb_scan_bus(void)
+ {
+ 	int i, highFree=0, noMovement;
+@@ -248,13 +229,10 @@ static int adb_scan_bus(void)
+ static int
+ adb_probe_task(void *x)
+ {
+-	strcpy(current->comm, "kadbprobe");
+-
+ 	printk(KERN_INFO "adb: starting probe task...\n");
+ 	do_adb_reset_bus();
+ 	printk(KERN_INFO "adb: finished probe task...\n");
+ 
+-	adb_probe_task_pid = 0;
+ 	up(&adb_probe_mutex);
+ 
+ 	return 0;
+@@ -263,7 +241,7 @@ adb_probe_task(void *x)
+ static void
+ __adb_probe_task(struct work_struct *bullshit)
+ {
+-	adb_probe_task_pid = kernel_thread(adb_probe_task, NULL, SIGCHLD | CLONE_KERNEL);
++	kthread_run(adb_probe_task, NULL, "kadbprobe");
+ }
+ 
+ static DECLARE_WORK(adb_reset_work, __adb_probe_task);
+@@ -281,6 +259,36 @@ adb_reset_bus(void)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PM
++/*
++ * notify clients before sleep
++ */
++static int adb_suspend(struct platform_device *dev, pm_message_t state)
++{
++	adb_got_sleep = 1;
++	/* We need to get a lock on the probe thread */
++	down(&adb_probe_mutex);
++	/* Stop autopoll */
++	if (adb_controller->autopoll)
++		adb_controller->autopoll(0);
++	blocking_notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL);
++
++	return 0;
++}
++
++/*
++ * reset bus after sleep
++ */
++static int adb_resume(struct platform_device *dev)
++{
++	adb_got_sleep = 0;
++	up(&adb_probe_mutex);
++	adb_reset_bus();
++
++	return 0;
++}
++#endif /* CONFIG_PM */
++
+ int __init adb_init(void)
+ {
+ 	struct adb_driver *driver;
+@@ -313,15 +321,12 @@ int __init adb_init(void)
+ 		printk(KERN_WARNING "Warning: no ADB interface detected\n");
+ 		adb_controller = NULL;
+ 	} else {
+-#ifdef CONFIG_PM_SLEEP
+-		pmu_register_sleep_notifier(&adb_sleep_notifier);
+-#endif /* CONFIG_PM */
+ #ifdef CONFIG_PPC
+ 		if (machine_is_compatible("AAPL,PowerBook1998") ||
+ 			machine_is_compatible("PowerBook1,1"))
+ 			sleepy_trackpad = 1;
+ #endif /* CONFIG_PPC */
+-		init_completion(&adb_probe_task_comp);
++
+ 		adbdev_init();
+ 		adb_reset_bus();
+ 	}
+@@ -330,33 +335,6 @@ int __init adb_init(void)
+ 
+ __initcall(adb_init);
+ 
+-#ifdef CONFIG_PM
+-/*
+- * notify clients before sleep and reset bus afterwards
+- */
+-void
+-adb_notify_sleep(struct pmu_sleep_notifier *self, int when)
+-{
+-	switch (when) {
+-	case PBOOK_SLEEP_REQUEST:
+-		adb_got_sleep = 1;
+-		/* We need to get a lock on the probe thread */
+-		down(&adb_probe_mutex);
+-		/* Stop autopoll */
+-		if (adb_controller->autopoll)
+-			adb_controller->autopoll(0);
+-		blocking_notifier_call_chain(&adb_client_list,
+-			ADB_MSG_POWERDOWN, NULL);
+-		break;
+-	case PBOOK_WAKE:
+-		adb_got_sleep = 0;
+-		up(&adb_probe_mutex);
+-		adb_reset_bus();
+-		break;
+-	}
+-}
+-#endif /* CONFIG_PM */
+-
+ static int
+ do_adb_reset_bus(void)
+ {
+@@ -373,7 +351,7 @@ do_adb_reset_bus(void)
+ 
+ 	if (sleepy_trackpad) {
+ 		/* Let the trackpad settle down */
+-		adb_wait_ms(500);
++		msleep(500);
+ 	}
+ 
+ 	down(&adb_handler_sem);
+@@ -389,7 +367,7 @@ do_adb_reset_bus(void)
+ 
+ 	if (sleepy_trackpad) {
+ 		/* Let the trackpad settle down */
+-		adb_wait_ms(1500);
++		msleep(1500);
+ 	}
+ 
+ 	if (!ret) {
+@@ -413,41 +391,27 @@ adb_poll(void)
+ 	adb_controller->poll();
+ }
+ 
+-static void
+-adb_probe_wakeup(struct adb_request *req)
++static void adb_sync_req_done(struct adb_request *req)
+ {
+-	complete(&adb_probe_task_comp);
+-}
++	struct completion *comp = req->arg;
+ 
+-/* Static request used during probe */
+-static struct adb_request adb_sreq;
+-static unsigned long adb_sreq_lock; // Use semaphore ! */ 
++	complete(comp);
++}
+ 
+ int
+ adb_request(struct adb_request *req, void (*done)(struct adb_request *),
+ 	    int flags, int nbytes, ...)
+ {
+ 	va_list list;
+-	int i, use_sreq;
++	int i;
+ 	int rc;
++	struct completion comp;
+ 
+ 	if ((adb_controller == NULL) || (adb_controller->send_request == NULL))
+ 		return -ENXIO;
+ 	if (nbytes < 1)
+ 		return -EINVAL;
+-	if (req == NULL && (flags & ADBREQ_NOSEND))
+-		return -EINVAL;
+-	
+-	if (req == NULL) {
+-		if (test_and_set_bit(0,&adb_sreq_lock)) {
+-			printk("adb.c: Warning: contention on static request !\n");
+-			return -EPERM;
+-		}
+-		req = &adb_sreq;
+-		flags |= ADBREQ_SYNC;
+-		use_sreq = 1;
+-	} else
+-		use_sreq = 0;
++
+ 	req->nbytes = nbytes+1;
+ 	req->done = done;
+ 	req->reply_expected = flags & ADBREQ_REPLY;
+@@ -460,25 +424,18 @@ adb_request(struct adb_request *req, void (*done)(struct adb_request *),
+ 	if (flags & ADBREQ_NOSEND)
+ 		return 0;
+ 
+-	/* Synchronous requests send from the probe thread cause it to
+-	 * block. Beware that the "done" callback will be overriden !
+-	 */
+-	if ((flags & ADBREQ_SYNC) &&
+-	    (current->pid && adb_probe_task_pid &&
+-	    adb_probe_task_pid == current->pid)) {
+-		req->done = adb_probe_wakeup;
+-		rc = adb_controller->send_request(req, 0);
+-		if (rc || req->complete)
+-			goto bail;
+-		wait_for_completion(&adb_probe_task_comp);
+-		rc = 0;
+-		goto bail;
++	/* Synchronous requests block using an on-stack completion */
++	if (flags & ADBREQ_SYNC) {
++		WARN_ON(done);
++		req->done = adb_sync_req_done;
++		req->arg = &comp;
++		init_completion(&comp);
+ 	}
+ 
+-	rc = adb_controller->send_request(req, flags & ADBREQ_SYNC);
+-bail:
+-	if (use_sreq)
+-		clear_bit(0, &adb_sreq_lock);
++	rc = adb_controller->send_request(req, 0);
++
++	if ((flags & ADBREQ_SYNC) && !rc && !req->complete)
++		wait_for_completion(&comp);
+ 
+ 	return rc;
+ }
+@@ -864,7 +821,29 @@ static const struct file_operations adb_fops = {
+ 	.release	= adb_release,
+ };
+ 
+-static void
++static struct platform_driver adb_pfdrv = {
++	.driver = {
++		.name = "adb",
++	},
++#ifdef CONFIG_PM
++	.suspend = adb_suspend,
++	.resume = adb_resume,
++#endif
++};
++
++static struct platform_device adb_pfdev = {
++	.name = "adb",
++};
++
++static int __init
++adb_dummy_probe(struct platform_device *dev)
++{
++	if (dev == &adb_pfdev)
++		return 0;
++	return -ENODEV;
++}
++
++static void __init
+ adbdev_init(void)
+ {
+ 	if (register_chrdev(ADB_MAJOR, "adb", &adb_fops)) {
+@@ -875,5 +854,8 @@ adbdev_init(void)
  	adb_dev_class = class_create(THIS_MODULE, "adb");
  	if (IS_ERR(adb_dev_class))
  		return;
 -	class_device_create(adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), NULL, "adb");
 +	device_create(adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), "adb");
++
++	platform_device_register(&adb_pfdev);
++	platform_driver_probe(&adb_pfdrv, adb_dummy_probe);
  }
 diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
-index 48d647a..eaba4a9 100644
+index 48d647a..18dde2a 100644
 --- a/drivers/macintosh/mediabay.c
 +++ b/drivers/macintosh/mediabay.c
+@@ -20,6 +20,7 @@
+ #include <linux/stddef.h>
+ #include <linux/init.h>
+ #include <linux/ide.h>
++#include <linux/kthread.h>
+ #include <asm/prom.h>
+ #include <asm/pgtable.h>
+ #include <asm/io.h>
+@@ -35,7 +36,6 @@
+ 
+ 
+ #define MB_DEBUG
+-#define MB_IGNORE_SIGNALS
+ 
+ #ifdef MB_DEBUG
+ #define MBDBG(fmt, arg...)	printk(KERN_INFO fmt , ## arg)
 @@ -563,7 +563,8 @@ static void media_bay_step(int i)
  				ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL);
  				hw.irq = bay->cd_irq;
@@ -278624,11 +419288,264 @@
  				pmu_resume();
  			}
  			if (bay->cd_index == -1) {
+@@ -622,12 +623,7 @@ static int media_bay_task(void *x)
+ {
+ 	int	i;
+ 
+-	strcpy(current->comm, "media-bay");
+-#ifdef MB_IGNORE_SIGNALS
+-	sigfillset(&current->blocked);
+-#endif
+-
+-	for (;;) {
++	while (!kthread_should_stop()) {
+ 		for (i = 0; i < media_bay_count; ++i) {
+ 			down(&media_bays[i].lock);
+ 			if (!media_bays[i].sleeping)
+@@ -636,9 +632,8 @@ static int media_bay_task(void *x)
+ 		}
+ 
+ 		msleep_interruptible(MB_POLL_DELAY);
+-		if (signal_pending(current))
+-			return 0;
+ 	}
++	return 0;
+ }
+ 
+ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_device_id *match)
+@@ -699,7 +694,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
+ 
+ 	/* Startup kernel thread */
+ 	if (i == 0)
+-		kernel_thread(media_bay_task, NULL, CLONE_KERNEL);
++		kthread_run(media_bay_task, NULL, "media-bay");
+ 
+ 	return 0;
+ 
+diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
+index 276945d..54f4942 100644
+--- a/drivers/macintosh/therm_adt746x.c
++++ b/drivers/macintosh/therm_adt746x.c
+@@ -553,6 +553,7 @@ thermostat_init(void)
+ 	struct device_node* np;
+ 	const u32 *prop;
+ 	int i = 0, offset = 0;
++	int err;
+ 	
+ 	np = of_find_node_by_name(NULL, "fan");
+ 	if (!np)
+@@ -612,17 +613,20 @@ thermostat_init(void)
+ 		return -ENODEV;
+ 	}
+ 	
+-	device_create_file(&of_dev->dev, &dev_attr_sensor1_temperature);
+-	device_create_file(&of_dev->dev, &dev_attr_sensor2_temperature);
+-	device_create_file(&of_dev->dev, &dev_attr_sensor1_limit);
+-	device_create_file(&of_dev->dev, &dev_attr_sensor2_limit);
+-	device_create_file(&of_dev->dev, &dev_attr_sensor1_location);
+-	device_create_file(&of_dev->dev, &dev_attr_sensor2_location);
+-	device_create_file(&of_dev->dev, &dev_attr_limit_adjust);
+-	device_create_file(&of_dev->dev, &dev_attr_specified_fan_speed);
+-	device_create_file(&of_dev->dev, &dev_attr_sensor1_fan_speed);
++	err = device_create_file(&of_dev->dev, &dev_attr_sensor1_temperature);
++	err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_temperature);
++	err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_limit);
++	err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_limit);
++	err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_location);
++	err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_location);
++	err |= device_create_file(&of_dev->dev, &dev_attr_limit_adjust);
++	err |= device_create_file(&of_dev->dev, &dev_attr_specified_fan_speed);
++	err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_fan_speed);
+ 	if(therm_type == ADT7460)
+-		device_create_file(&of_dev->dev, &dev_attr_sensor2_fan_speed);
++		err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_fan_speed);
++	if (err)
++		printk(KERN_WARNING
++			"Failed to create tempertaure attribute file(s).\n");
+ 
+ #ifndef CONFIG_I2C_POWERMAC
+ 	request_module("i2c-powermac");
+diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
+index e43554e..1e0a69a 100644
+--- a/drivers/macintosh/therm_pm72.c
++++ b/drivers/macintosh/therm_pm72.c
+@@ -121,6 +121,7 @@
+ #include <linux/reboot.h>
+ #include <linux/kmod.h>
+ #include <linux/i2c.h>
++#include <linux/kthread.h>
+ #include <asm/prom.h>
+ #include <asm/machdep.h>
+ #include <asm/io.h>
+@@ -161,7 +162,7 @@ static struct slots_pid_state		slots_state;
+ static int				state;
+ static int				cpu_count;
+ static int				cpu_pid_type;
+-static pid_t				ctrl_task;
++static struct task_struct		*ctrl_task;
+ static struct completion		ctrl_complete;
+ static int				critical_state;
+ static int				rackmac;
+@@ -1156,6 +1157,8 @@ static void do_monitor_cpu_rack(struct cpu_pid_state *state)
+  */
+ static int init_cpu_state(struct cpu_pid_state *state, int index)
+ {
++	int err;
++
+ 	state->index = index;
+ 	state->first = 1;
+ 	state->rpm = (cpu_pid_type == CPU_PID_TYPE_RACKMAC) ? 4000 : 1000;
+@@ -1181,18 +1184,21 @@ static int init_cpu_state(struct cpu_pid_state *state, int index)
+ 	DBG("CPU %d Using %d power history entries\n", index, state->count_power);
+ 
+ 	if (index == 0) {
+-		device_create_file(&of_dev->dev, &dev_attr_cpu0_temperature);
+-		device_create_file(&of_dev->dev, &dev_attr_cpu0_voltage);
+-		device_create_file(&of_dev->dev, &dev_attr_cpu0_current);
+-		device_create_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
+-		device_create_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
++		err = device_create_file(&of_dev->dev, &dev_attr_cpu0_temperature);
++		err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_voltage);
++		err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_current);
++		err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
++		err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
+ 	} else {
+-		device_create_file(&of_dev->dev, &dev_attr_cpu1_temperature);
+-		device_create_file(&of_dev->dev, &dev_attr_cpu1_voltage);
+-		device_create_file(&of_dev->dev, &dev_attr_cpu1_current);
+-		device_create_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
+-		device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
++		err = device_create_file(&of_dev->dev, &dev_attr_cpu1_temperature);
++		err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_voltage);
++		err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_current);
++		err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
++		err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
+ 	}
++	if (err)
++		printk(KERN_WARNING "Failed to create some of the atribute"
++			"files for CPU %d\n", index);
+ 
+ 	return 0;
+  fail:
+@@ -1328,6 +1334,7 @@ static int init_backside_state(struct backside_pid_state *state)
+ {
+ 	struct device_node *u3;
+ 	int u3h = 1; /* conservative by default */
++	int err;
+ 
+ 	/*
+ 	 * There are different PID params for machines with U3 and machines
+@@ -1379,8 +1386,11 @@ static int init_backside_state(struct backside_pid_state *state)
+ 	if (state->monitor == NULL)
+ 		return -ENODEV;
+ 
+-	device_create_file(&of_dev->dev, &dev_attr_backside_temperature);
+-	device_create_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
++	err = device_create_file(&of_dev->dev, &dev_attr_backside_temperature);
++	err |= device_create_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
++	if (err)
++		printk(KERN_WARNING "Failed to create attribute file(s)"
++			" for backside fan\n");
+ 
+ 	return 0;
+ }
+@@ -1491,6 +1501,8 @@ static void do_monitor_drives(struct drives_pid_state *state)
+  */
+ static int init_drives_state(struct drives_pid_state *state)
+ {
++	int err;
++
+ 	state->ticks = 1;
+ 	state->first = 1;
+ 	state->rpm = 1000;
+@@ -1499,8 +1511,11 @@ static int init_drives_state(struct drives_pid_state *state)
+ 	if (state->monitor == NULL)
+ 		return -ENODEV;
+ 
+-	device_create_file(&of_dev->dev, &dev_attr_drives_temperature);
+-	device_create_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
++	err = device_create_file(&of_dev->dev, &dev_attr_drives_temperature);
++	err |= device_create_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
++	if (err)
++		printk(KERN_WARNING "Failed to create attribute file(s)"
++			" for drives bay fan\n");
+ 
+ 	return 0;
+ }
+@@ -1621,7 +1636,9 @@ static int init_dimms_state(struct dimm_pid_state *state)
+ 	if (state->monitor == NULL)
+ 		return -ENODEV;
+ 
+-       	device_create_file(&of_dev->dev, &dev_attr_dimms_temperature);
++	if (device_create_file(&of_dev->dev, &dev_attr_dimms_temperature))
++		printk(KERN_WARNING "Failed to create attribute file"
++			" for DIMM temperature\n");
+ 
+ 	return 0;
+ }
+@@ -1731,6 +1748,8 @@ static void do_monitor_slots(struct slots_pid_state *state)
+  */
+ static int init_slots_state(struct slots_pid_state *state)
+ {
++	int err;
++
+ 	state->ticks = 1;
+ 	state->first = 1;
+ 	state->pwm = 50;
+@@ -1739,8 +1758,11 @@ static int init_slots_state(struct slots_pid_state *state)
+ 	if (state->monitor == NULL)
+ 		return -ENODEV;
+ 
+-	device_create_file(&of_dev->dev, &dev_attr_slots_temperature);
+-	device_create_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
++	err = device_create_file(&of_dev->dev, &dev_attr_slots_temperature);
++	err |= device_create_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
++	if (err)
++		printk(KERN_WARNING "Failed to create attribute file(s)"
++			" for slots bay fan\n");
+ 
+ 	return 0;
+ }
+@@ -1779,8 +1801,6 @@ static int call_critical_overtemp(void)
+  */
+ static int main_control_loop(void *x)
+ {
+-	daemonize("kfand");
+-
+ 	DBG("main_control_loop started\n");
+ 
+ 	down(&driver_lock);
+@@ -1956,7 +1976,7 @@ static void start_control_loops(void)
+ {
+ 	init_completion(&ctrl_complete);
+ 
+-	ctrl_task = kernel_thread(main_control_loop, NULL, SIGCHLD | CLONE_KERNEL);
++	ctrl_task = kthread_run(main_control_loop, NULL, "kfand");
+ }
+ 
+ /*
+@@ -1964,7 +1984,7 @@ static void start_control_loops(void)
+  */
+ static void stop_control_loops(void)
+ {
+-	if (ctrl_task != 0)
++	if (ctrl_task)
+ 		wait_for_completion(&ctrl_complete);
+ }
+ 
 diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
-index 5452da1..b66da74 100644
+index 5452da1..d11821a 100644
 --- a/drivers/macintosh/therm_windtunnel.c
 +++ b/drivers/macintosh/therm_windtunnel.c
-@@ -47,12 +47,10 @@
+@@ -36,6 +36,7 @@
+ #include <linux/i2c.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
++#include <linux/kthread.h>
+ 
+ #include <asm/prom.h>
+ #include <asm/machdep.h>
+@@ -47,12 +48,10 @@
  
  #define LOG_TEMP		0			/* continously log temperature */
  
@@ -278642,7 +419559,99 @@
  						 0x4c, 0x4d, 0x4e, 0x4f,
  						 0x2c, 0x2d, 0x2e, 0x2f,
  						 I2C_CLIENT_END };
-@@ -357,7 +355,6 @@ static struct i2c_driver g4fan_driver = {
+@@ -61,8 +60,7 @@ I2C_CLIENT_INSMOD;
+ 
+ static struct {
+ 	volatile int		running;
+-	struct completion	completion;
+-	pid_t			poll_task;
++	struct task_struct	*poll_task;
+ 	
+ 	struct semaphore 	lock;
+ 	struct of_device	*of_dev;
+@@ -223,6 +221,7 @@ static void
+ setup_hardware( void )
+ {
+ 	int val;
++	int err;
+ 
+ 	/* save registers (if we unload the module) */
+ 	x.r0 = read_reg( x.fan, 0x00, 1 );
+@@ -265,8 +264,11 @@ setup_hardware( void )
+ 	x.upind = -1;
+ 	/* tune_fan( fan_up_table[x.upind].fan_setting ); */
+ 
+-	device_create_file( &x.of_dev->dev, &dev_attr_cpu_temperature );
+-	device_create_file( &x.of_dev->dev, &dev_attr_case_temperature );
++	err = device_create_file( &x.of_dev->dev, &dev_attr_cpu_temperature );
++	err |= device_create_file( &x.of_dev->dev, &dev_attr_case_temperature );
++	if (err)
++		printk(KERN_WARNING
++			"Failed to create temperature attribute file(s).\n");
+ }
+ 
+ static void
+@@ -282,27 +284,27 @@ restore_regs( void )
+ 	write_reg( x.fan, 0x00, x.r0, 1 );
+ }
+ 
+-static int
+-control_loop( void *dummy )
++static int control_loop(void *dummy)
+ {
+-	daemonize("g4fand");
+-
+-	down( &x.lock );
++	down(&x.lock);
+ 	setup_hardware();
++	up(&x.lock);
+ 
+-	while( x.running ) {
+-		up( &x.lock );
+-
++	for (;;) {
+ 		msleep_interruptible(8000);
+-		
+-		down( &x.lock );
++		if (kthread_should_stop())
++			break;
++
++		down(&x.lock);
+ 		poll_temp();
++		up(&x.lock);
+ 	}
+ 
++	down(&x.lock);
+ 	restore_regs();
+-	up( &x.lock );
++	up(&x.lock);
+ 
+-	complete_and_exit( &x.completion, 0 );
++	return 0;
+ }
+ 
+ 
+@@ -322,8 +324,7 @@ do_attach( struct i2c_adapter *adapter )
+ 		ret = i2c_probe( adapter, &addr_data, &do_probe );
+ 		if( x.thermostat && x.fan ) {
+ 			x.running = 1;
+-			init_completion( &x.completion );
+-			x.poll_task = kernel_thread( control_loop, NULL, SIGCHLD | CLONE_KERNEL );
++			x.poll_task = kthread_run(control_loop, NULL, "g4fand");
+ 		}
+ 	}
+ 	return ret;
+@@ -339,7 +340,8 @@ do_detach( struct i2c_client *client )
+ 	else {
+ 		if( x.running ) {
+ 			x.running = 0;
+-			wait_for_completion( &x.completion );
++			kthread_stop(x.poll_task);
++			x.poll_task = NULL;
+ 		}
+ 		if( client == x.thermostat )
+ 			x.thermostat = NULL;
+@@ -357,7 +359,6 @@ static struct i2c_driver g4fan_driver = {
  	.driver = {
  		.name	= "therm_windtunnel",
  	},
@@ -278650,12 +419659,1021 @@
  	.attach_adapter = do_attach,
  	.detach_client	= do_detach,
  };
+diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
+index 7e27071..741a2e3 100644
+--- a/drivers/macintosh/via-pmu-backlight.c
++++ b/drivers/macintosh/via-pmu-backlight.c
+@@ -22,7 +22,7 @@ static u8 bl_curve[FB_BACKLIGHT_LEVELS];
+ 
+ static void pmu_backlight_init_curve(u8 off, u8 min, u8 max)
+ {
+-	unsigned int i, flat, count, range = (max - min);
++	int i, flat, count, range = (max - min);
+ 
+ 	bl_curve[0] = off;
+ 
+@@ -68,17 +68,11 @@ static int pmu_backlight_get_level_brightness(int level)
+ 	return pmulevel;
+ }
+ 
+-static int pmu_backlight_update_status(struct backlight_device *bd)
++static int __pmu_backlight_update_status(struct backlight_device *bd)
+ {
+ 	struct adb_request req;
+-	unsigned long flags;
+ 	int level = bd->props.brightness;
+ 
+-	spin_lock_irqsave(&pmu_backlight_lock, flags);
+-
+-	/* Don't update brightness when sleeping */
+-	if (sleeping)
+-		goto out;
+ 
+ 	if (bd->props.power != FB_BLANK_UNBLANK ||
+ 	    bd->props.fb_blank != FB_BLANK_UNBLANK)
+@@ -99,12 +93,23 @@ static int pmu_backlight_update_status(struct backlight_device *bd)
+ 		pmu_wait_complete(&req);
+ 	}
+ 
+-out:
+-	spin_unlock_irqrestore(&pmu_backlight_lock, flags);
+-
+ 	return 0;
+ }
+ 
++static int pmu_backlight_update_status(struct backlight_device *bd)
++{
++	unsigned long flags;
++	int rc = 0;
++
++	spin_lock_irqsave(&pmu_backlight_lock, flags);
++	/* Don't update brightness when sleeping */
++	if (!sleeping)
++		rc = __pmu_backlight_update_status(bd);
++	spin_unlock_irqrestore(&pmu_backlight_lock, flags);
++	return rc;
++}
++
++
+ static int pmu_backlight_get_brightness(struct backlight_device *bd)
+ {
+ 	return bd->props.brightness;
+@@ -123,6 +128,16 @@ void pmu_backlight_set_sleep(int sleep)
+ 
+ 	spin_lock_irqsave(&pmu_backlight_lock, flags);
+ 	sleeping = sleep;
++	if (pmac_backlight) {
++		if (sleep) {
++			struct adb_request req;
++
++			pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
++				    PMU_POW_BACKLIGHT | PMU_POW_OFF);
++			pmu_wait_complete(&req);
++		} else
++			__pmu_backlight_update_status(pmac_backlight);
++	}
+ 	spin_unlock_irqrestore(&pmu_backlight_lock, flags);
+ }
+ #endif /* CONFIG_PM */
+@@ -148,8 +163,8 @@ void __init pmu_backlight_init()
+ 
+ 	bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data);
+ 	if (IS_ERR(bd)) {
+-		printk("pmubl: Backlight registration failed\n");
+-		goto error;
++		printk(KERN_ERR "PMU Backlight registration failed\n");
++		return;
+ 	}
+ 	bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
+ 	pmu_backlight_init_curve(0x7F, 0x46, 0x0E);
+@@ -171,10 +186,5 @@ void __init pmu_backlight_init()
+ 	bd->props.power = FB_BLANK_UNBLANK;
+ 	backlight_update_status(bd);
+ 
+-	printk("pmubl: Backlight initialized (%s)\n", name);
+-
+-	return;
+-
+-error:
+-	return;
++	printk(KERN_INFO "PMU Backlight initialized (%s)\n", name);
+ }
 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
-index 6123c70..ac420b1 100644
+index 6123c70..ebec663 100644
 --- a/drivers/macintosh/via-pmu.c
 +++ b/drivers/macintosh/via-pmu.c
-@@ -2796,7 +2796,7 @@ static int pmu_sys_resume(struct sys_device *sysdev)
- #endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
+@@ -10,13 +10,11 @@
+  *
+  * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi.
+  * Copyright (C) 2001-2002 Benjamin Herrenschmidt
++ * Copyright (C) 2006-2007 Johannes Berg
+  *
+  * THIS DRIVER IS BECOMING A TOTAL MESS !
+  *  - Cleanup atomically disabling reply to PMU events after
+  *    a sleep or a freq. switch
+- *  - Move sleep code out of here to pmac_pm, merge into new
+- *    common PM infrastructure
+- *  - Save/Restore PCI space properly
+  *
+  */
+ #include <stdarg.h>
+@@ -33,7 +31,6 @@
+ #include <linux/adb.h>
+ #include <linux/pmu.h>
+ #include <linux/cuda.h>
+-#include <linux/smp_lock.h>
+ #include <linux/module.h>
+ #include <linux/spinlock.h>
+ #include <linux/pm.h>
+@@ -65,9 +62,7 @@
+ #include "via-pmu-event.h"
+ 
+ /* Some compile options */
+-#undef SUSPEND_USES_PMU
+-#define DEBUG_SLEEP
+-#undef HACKED_PCI_SAVE
++#undef DEBUG_SLEEP
+ 
+ /* Misc minor number allocated for /dev/pmu */
+ #define PMU_MINOR		154
+@@ -152,12 +147,9 @@ static spinlock_t pmu_lock;
+ static u8 pmu_intr_mask;
+ static int pmu_version;
+ static int drop_interrupts;
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
+ static int option_lid_wakeup = 1;
+-#endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
+-#if (defined(CONFIG_PM_SLEEP)&&defined(CONFIG_PPC32))||defined(CONFIG_PMAC_BACKLIGHT_LEGACY)
+-static int sleep_in_progress;
+-#endif
++#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
+ static unsigned long async_req_locks;
+ static unsigned int pmu_irq_stats[11];
+ 
+@@ -177,7 +169,6 @@ static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES];
+ 
+ int __fake_sleep;
+ int asleep;
+-BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
+ 
+ #ifdef CONFIG_ADB
+ static int adb_dev_map;
+@@ -224,7 +215,7 @@ extern void enable_kernel_fp(void);
+ 
+ #ifdef DEBUG_SLEEP
+ int pmu_polled_request(struct adb_request *req);
+-int pmu_wink(struct adb_request *req);
++void pmu_blink(int n);
+ #endif
+ 
+ /*
+@@ -875,7 +866,7 @@ proc_read_options(char *page, char **start, off_t off,
+ {
+ 	char *p = page;
+ 
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
+ 	if (pmu_kind == PMU_KEYLARGO_BASED &&
+ 	    pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
+ 		p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup);
+@@ -916,7 +907,7 @@ proc_write_options(struct file *file, const char __user *buffer,
+ 	*(val++) = 0;
+ 	while(*val == ' ')
+ 		val++;
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
+ 	if (pmu_kind == PMU_KEYLARGO_BASED &&
+ 	    pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
+ 		if (!strcmp(label, "lid_wakeup"))
+@@ -1256,9 +1247,7 @@ void
+ pmu_suspend(void)
+ {
+ 	unsigned long flags;
+-#ifdef SUSPEND_USES_PMU
+-	struct adb_request *req;
+-#endif
++
+ 	if (!via)
+ 		return;
+ 	
+@@ -1276,17 +1265,10 @@ pmu_suspend(void)
+ 		via_pmu_interrupt(0, NULL);
+ 		spin_lock_irqsave(&pmu_lock, flags);
+ 		if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) {
+-#ifdef SUSPEND_USES_PMU
+-			pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0);
+-			spin_unlock_irqrestore(&pmu_lock, flags);
+-			while(!req.complete)
+-				pmu_poll();
+-#else /* SUSPEND_USES_PMU */
+ 			if (gpio_irq >= 0)
+ 				disable_irq_nosync(gpio_irq);
+ 			out_8(&via[IER], CB1_INT | IER_CLR);
+ 			spin_unlock_irqrestore(&pmu_lock, flags);
+-#endif /* SUSPEND_USES_PMU */
+ 			break;
+ 		}
+ 	} while (1);
+@@ -1307,18 +1289,11 @@ pmu_resume(void)
+ 		return;
+ 	}
+ 	adb_int_pending = 1;
+-#ifdef SUSPEND_USES_PMU
+-	pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
+-	spin_unlock_irqrestore(&pmu_lock, flags);
+-	while(!req.complete)
+-		pmu_poll();
+-#else /* SUSPEND_USES_PMU */
+ 	if (gpio_irq >= 0)
+ 		enable_irq(gpio_irq);
+ 	out_8(&via[IER], CB1_INT | IER_SET);
+ 	spin_unlock_irqrestore(&pmu_lock, flags);
+ 	pmu_poll();
+-#endif /* SUSPEND_USES_PMU */
+ }
+ 
+ /* Interrupt data could be the result data from an ADB cmd */
+@@ -1738,228 +1713,7 @@ pmu_present(void)
+ 	return via != 0;
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+-
+-static LIST_HEAD(sleep_notifiers);
+-
+-int
+-pmu_register_sleep_notifier(struct pmu_sleep_notifier *n)
+-{
+-	struct list_head *list;
+-	struct pmu_sleep_notifier *notifier;
+-
+-	for (list = sleep_notifiers.next; list != &sleep_notifiers;
+-	     list = list->next) {
+-		notifier = list_entry(list, struct pmu_sleep_notifier, list);
+-		if (n->priority > notifier->priority)
+-			break;
+-	}
+-	__list_add(&n->list, list->prev, list);
+-	return 0;
+-}
+-EXPORT_SYMBOL(pmu_register_sleep_notifier);
+-
+-int
+-pmu_unregister_sleep_notifier(struct pmu_sleep_notifier* n)
+-{
+-	if (n->list.next == 0)
+-		return -ENOENT;
+-	list_del(&n->list);
+-	n->list.next = NULL;
+-	return 0;
+-}
+-EXPORT_SYMBOL(pmu_unregister_sleep_notifier);
+-#endif /* CONFIG_PM_SLEEP */
+-
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
+-
+-/* Sleep is broadcast last-to-first */
+-static void broadcast_sleep(int when)
+-{
+-	struct list_head *list;
+-	struct pmu_sleep_notifier *notifier;
+-
+-	for (list = sleep_notifiers.prev; list != &sleep_notifiers;
+-	     list = list->prev) {
+-		notifier = list_entry(list, struct pmu_sleep_notifier, list);
+-		notifier->notifier_call(notifier, when);
+-	}
+-}
+-
+-/* Wake is broadcast first-to-last */
+-static void broadcast_wake(void)
+-{
+-	struct list_head *list;
+-	struct pmu_sleep_notifier *notifier;
+-
+-	for (list = sleep_notifiers.next; list != &sleep_notifiers;
+-	     list = list->next) {
+-		notifier = list_entry(list, struct pmu_sleep_notifier, list);
+-		notifier->notifier_call(notifier, PBOOK_WAKE);
+-	}
+-}
+-
+-/*
+- * This struct is used to store config register values for
+- * PCI devices which may get powered off when we sleep.
+- */
+-static struct pci_save {
+-#ifndef HACKED_PCI_SAVE
+-	u16	command;
+-	u16	cache_lat;
+-	u16	intr;
+-	u32	rom_address;
+-#else
+-	u32	config[16];
+-#endif	
+-} *pbook_pci_saves;
+-static int pbook_npci_saves;
+-
+-static void
+-pbook_alloc_pci_save(void)
+-{
+-	int npci;
+-	struct pci_dev *pd = NULL;
+-
+-	npci = 0;
+-	while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) {
+-		++npci;
+-	}
+-	if (npci == 0)
+-		return;
+-	pbook_pci_saves = (struct pci_save *)
+-		kmalloc(npci * sizeof(struct pci_save), GFP_KERNEL);
+-	pbook_npci_saves = npci;
+-}
+-
+-static void
+-pbook_free_pci_save(void)
+-{
+-	if (pbook_pci_saves == NULL)
+-		return;
+-	kfree(pbook_pci_saves);
+-	pbook_pci_saves = NULL;
+-	pbook_npci_saves = 0;
+-}
+-
+-static void
+-pbook_pci_save(void)
+-{
+-	struct pci_save *ps = pbook_pci_saves;
+-	struct pci_dev *pd = NULL;
+-	int npci = pbook_npci_saves;
+-	
+-	if (ps == NULL)
+-		return;
+-
+-	while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) {
+-		if (npci-- == 0) {
+-			pci_dev_put(pd);
+-			return;
+-		}
+-#ifndef HACKED_PCI_SAVE
+-		pci_read_config_word(pd, PCI_COMMAND, &ps->command);
+-		pci_read_config_word(pd, PCI_CACHE_LINE_SIZE, &ps->cache_lat);
+-		pci_read_config_word(pd, PCI_INTERRUPT_LINE, &ps->intr);
+-		pci_read_config_dword(pd, PCI_ROM_ADDRESS, &ps->rom_address);
+-#else
+-		int i;
+-		for (i=1;i<16;i++)
+-			pci_read_config_dword(pd, i<<4, &ps->config[i]);
+-#endif
+-		++ps;
+-	}
+-}
+-
+-/* For this to work, we must take care of a few things: If gmac was enabled
+- * during boot, it will be in the pci dev list. If it's disabled at this point
+- * (and it will probably be), then you can't access it's config space.
+- */
+-static void
+-pbook_pci_restore(void)
+-{
+-	u16 cmd;
+-	struct pci_save *ps = pbook_pci_saves - 1;
+-	struct pci_dev *pd = NULL;
+-	int npci = pbook_npci_saves;
+-	int j;
+-
+-	while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) {
+-#ifdef HACKED_PCI_SAVE
+-		int i;
+-		if (npci-- == 0) {
+-			pci_dev_put(pd);
+-			return;
+-		}
+-		ps++;
+-		for (i=2;i<16;i++)
+-			pci_write_config_dword(pd, i<<4, ps->config[i]);
+-		pci_write_config_dword(pd, 4, ps->config[1]);
+-#else
+-		if (npci-- == 0)
+-			return;
+-		ps++;
+-		if (ps->command == 0)
+-			continue;
+-		pci_read_config_word(pd, PCI_COMMAND, &cmd);
+-		if ((ps->command & ~cmd) == 0)
+-			continue;
+-		switch (pd->hdr_type) {
+-		case PCI_HEADER_TYPE_NORMAL:
+-			for (j = 0; j < 6; ++j)
+-				pci_write_config_dword(pd,
+-					PCI_BASE_ADDRESS_0 + j*4,
+-					pd->resource[j].start);
+-			pci_write_config_dword(pd, PCI_ROM_ADDRESS,
+-				ps->rom_address);
+-			pci_write_config_word(pd, PCI_CACHE_LINE_SIZE,
+-				ps->cache_lat);
+-			pci_write_config_word(pd, PCI_INTERRUPT_LINE,
+-				ps->intr);
+-			pci_write_config_word(pd, PCI_COMMAND, ps->command);
+-			break;
+-		}
+-#endif	
+-	}
+-}
+-
+-#ifdef DEBUG_SLEEP
+-/* N.B. This doesn't work on the 3400 */
+-void 
+-pmu_blink(int n)
+-{
+-	struct adb_request req;
+-
+-	memset(&req, 0, sizeof(req));
+-
+-	for (; n > 0; --n) {
+-		req.nbytes = 4;
+-		req.done = NULL;
+-		req.data[0] = 0xee;
+-		req.data[1] = 4;
+-		req.data[2] = 0;
+-		req.data[3] = 1;
+-		req.reply[0] = ADB_RET_OK;
+-		req.reply_len = 1;
+-		req.reply_expected = 0;
+-		pmu_polled_request(&req);
+-		mdelay(50);
+-		req.nbytes = 4;
+-		req.done = NULL;
+-		req.data[0] = 0xee;
+-		req.data[1] = 4;
+-		req.data[2] = 0;
+-		req.data[3] = 0;
+-		req.reply[0] = ADB_RET_OK;
+-		req.reply_len = 1;
+-		req.reply_expected = 0;
+-		pmu_polled_request(&req);
+-		mdelay(50);
+-	}
+-	mdelay(50);
+-}
+-#endif
+-
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
+ /*
+  * Put the powerbook to sleep.
+  */
+@@ -1994,134 +1748,6 @@ restore_via_state(void)
+ 	out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
+ }
+ 
+-extern void pmu_backlight_set_sleep(int sleep);
+-
+-static int
+-pmac_suspend_devices(void)
+-{
+-	int ret;
+-
+-	pm_prepare_console();
+-	
+-	/* Notify old-style device drivers */
+-	broadcast_sleep(PBOOK_SLEEP_REQUEST);
+-
+-	/* Sync the disks. */
+-	/* XXX It would be nice to have some way to ensure that
+-	 * nobody is dirtying any new buffers while we wait. That
+-	 * could be achieved using the refrigerator for processes
+-	 * that swsusp uses
+-	 */
+-	sys_sync();
+-
+-	broadcast_sleep(PBOOK_SLEEP_NOW);
+-
+-	/* Send suspend call to devices, hold the device core's dpm_sem */
+-	ret = device_suspend(PMSG_SUSPEND);
+-	if (ret) {
+-		broadcast_wake();
+-		printk(KERN_ERR "Driver sleep failed\n");
+-		return -EBUSY;
+-	}
+-
+-#ifdef CONFIG_PMAC_BACKLIGHT
+-	/* Tell backlight code not to muck around with the chip anymore */
+-	pmu_backlight_set_sleep(1);
+-#endif
+-
+-	/* Call platform functions marked "on sleep" */
+-	pmac_pfunc_i2c_suspend();
+-	pmac_pfunc_base_suspend();
+-
+-	/* Stop preemption */
+-	preempt_disable();
+-
+-	/* Make sure the decrementer won't interrupt us */
+-	asm volatile("mtdec %0" : : "r" (0x7fffffff));
+-	/* Make sure any pending DEC interrupt occurring while we did
+-	 * the above didn't re-enable the DEC */
+-	mb();
+-	asm volatile("mtdec %0" : : "r" (0x7fffffff));
+-
+-	/* We can now disable MSR_EE. This code of course works properly only
+-	 * on UP machines... For SMP, if we ever implement sleep, we'll have to
+-	 * stop the "other" CPUs way before we do all that stuff.
+-	 */
+-	local_irq_disable();
+-
+-	/* Broadcast power down irq
+-	 * This isn't that useful in most cases (only directly wired devices can
+-	 * use this but still... This will take care of sysdev's as well, so
+-	 * we exit from here with local irqs disabled and PIC off.
+-	 */
+-	ret = device_power_down(PMSG_SUSPEND);
+-	if (ret) {
+-		wakeup_decrementer();
+-		local_irq_enable();
+-		preempt_enable();
+-		device_resume();
+-		broadcast_wake();
+-		printk(KERN_ERR "Driver powerdown failed\n");
+-		return -EBUSY;
+-	}
+-
+-	/* Wait for completion of async requests */
+-	while (!batt_req.complete)
+-		pmu_poll();
+-
+-	/* Giveup the lazy FPU & vec so we don't have to back them
+-	 * up from the low level code
+-	 */
+-	enable_kernel_fp();
+-
+-#ifdef CONFIG_ALTIVEC
+-	if (cpu_has_feature(CPU_FTR_ALTIVEC))
+-		enable_kernel_altivec();
+-#endif /* CONFIG_ALTIVEC */
+-
+-	return 0;
+-}
+-
+-static int
+-pmac_wakeup_devices(void)
+-{
+-	mdelay(100);
+-
+-#ifdef CONFIG_PMAC_BACKLIGHT
+-	/* Tell backlight code it can use the chip again */
+-	pmu_backlight_set_sleep(0);
+-#endif
+-
+-	/* Power back up system devices (including the PIC) */
+-	device_power_up();
+-
+-	/* Force a poll of ADB interrupts */
+-	adb_int_pending = 1;
+-	via_pmu_interrupt(0, NULL);
+-
+-	/* Restart jiffies & scheduling */
+-	wakeup_decrementer();
+-
+-	/* Re-enable local CPU interrupts */
+-	local_irq_enable();
+-	mdelay(10);
+-	preempt_enable();
+-
+-	/* Call platform functions marked "on wake" */
+-	pmac_pfunc_base_resume();
+-	pmac_pfunc_i2c_resume();
+-
+-	/* Resume devices */
+-	device_resume();
+-
+-	/* Notify old style drivers */
+-	broadcast_wake();
+-
+-	pm_restore_console();
+-
+-	return 0;
+-}
+-
+ #define	GRACKLE_PM	(1<<7)
+ #define GRACKLE_DOZE	(1<<5)
+ #define	GRACKLE_NAP	(1<<4)
+@@ -2132,19 +1758,12 @@ static int powerbook_sleep_grackle(void)
+ 	unsigned long save_l2cr;
+ 	unsigned short pmcr1;
+ 	struct adb_request req;
+-	int ret;
+ 	struct pci_dev *grackle;
+ 
+ 	grackle = pci_get_bus_and_slot(0, 0);
+ 	if (!grackle)
+ 		return -ENODEV;
+ 
+-	ret = pmac_suspend_devices();
+-	if (ret) {
+-		printk(KERN_ERR "Sleep rejected by devices\n");
+-		return ret;
+-	}
+-	
+ 	/* Turn off various things. Darwin does some retry tests here... */
+ 	pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, PMU_POW0_OFF|PMU_POW0_HARD_DRIVE);
+ 	pmu_wait_complete(&req);
+@@ -2207,8 +1826,6 @@ static int powerbook_sleep_grackle(void)
+ 			PMU_POW_ON|PMU_POW_BACKLIGHT|PMU_POW_CHARGER|PMU_POW_IRLED|PMU_POW_MEDIABAY);
+ 	pmu_wait_complete(&req);
+ 
+-	pmac_wakeup_devices();
+-
+ 	return 0;
+ }
+ 
+@@ -2218,7 +1835,6 @@ powerbook_sleep_Core99(void)
+ 	unsigned long save_l2cr;
+ 	unsigned long save_l3cr;
+ 	struct adb_request req;
+-	int ret;
+ 	
+ 	if (pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) < 0) {
+ 		printk(KERN_ERR "Sleep mode not supported on this machine\n");
+@@ -2228,12 +1844,6 @@ powerbook_sleep_Core99(void)
+ 	if (num_online_cpus() > 1 || cpu_is_offline(0))
+ 		return -EAGAIN;
+ 
+-	ret = pmac_suspend_devices();
+-	if (ret) {
+-		printk(KERN_ERR "Sleep rejected by devices\n");
+-		return ret;
+-	}
+-
+ 	/* Stop environment and ADB interrupts */
+ 	pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0);
+ 	pmu_wait_complete(&req);
+@@ -2304,45 +1914,33 @@ powerbook_sleep_Core99(void)
+ 	/* Restore LPJ, cpufreq will adjust the cpu frequency */
+ 	loops_per_jiffy /= 2;
+ 
+-	pmac_wakeup_devices();
+-
+ 	return 0;
+ }
+ 
+ #define PB3400_MEM_CTRL		0xf8000000
+ #define PB3400_MEM_CTRL_SLEEP	0x70
+ 
+-static int
+-powerbook_sleep_3400(void)
++static void __iomem *pb3400_mem_ctrl;
++
++static void powerbook_sleep_init_3400(void)
++{
++	/* map in the memory controller registers */
++	pb3400_mem_ctrl = ioremap(PB3400_MEM_CTRL, 0x100);
++	if (pb3400_mem_ctrl == NULL)
++		printk(KERN_WARNING "ioremap failed: sleep won't be possible");
++}
++
++static int powerbook_sleep_3400(void)
+ {
+-	int ret, i, x;
++	int i, x;
+ 	unsigned int hid0;
+-	unsigned long p;
++	unsigned long msr;
+ 	struct adb_request sleep_req;
+-	void __iomem *mem_ctrl;
+ 	unsigned int __iomem *mem_ctrl_sleep;
+ 
+-	/* first map in the memory controller registers */
+-	mem_ctrl = ioremap(PB3400_MEM_CTRL, 0x100);
+-	if (mem_ctrl == NULL) {
+-		printk("powerbook_sleep_3400: ioremap failed\n");
++	if (pb3400_mem_ctrl == NULL)
+ 		return -ENOMEM;
+-	}
+-	mem_ctrl_sleep = mem_ctrl + PB3400_MEM_CTRL_SLEEP;
+-
+-	/* Allocate room for PCI save */
+-	pbook_alloc_pci_save();
+-
+-	ret = pmac_suspend_devices();
+-	if (ret) {
+-		pbook_free_pci_save();
+-		iounmap(mem_ctrl);
+-		printk(KERN_ERR "Sleep rejected by devices\n");
+-		return ret;
+-	}
+-
+-	/* Save the state of PCI config space for some slots */
+-	pbook_pci_save();
++	mem_ctrl_sleep = pb3400_mem_ctrl + PB3400_MEM_CTRL_SLEEP;
+ 
+ 	/* Set the memory controller to keep the memory refreshed
+ 	   while we're asleep */
+@@ -2357,41 +1955,34 @@ powerbook_sleep_3400(void)
+ 
+ 	/* Ask the PMU to put us to sleep */
+ 	pmu_request(&sleep_req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
+-	while (!sleep_req.complete)
+-		mb();
++	pmu_wait_complete(&sleep_req);
++	pmu_unlock();
+ 
+-	pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,1);
++	pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1);
+ 
+-	/* displacement-flush the L2 cache - necessary? */
+-	for (p = KERNELBASE; p < KERNELBASE + 0x100000; p += 0x1000)
+-		i = *(volatile int *)p;
+ 	asleep = 1;
+ 
+ 	/* Put the CPU into sleep mode */
+ 	hid0 = mfspr(SPRN_HID0);
+ 	hid0 = (hid0 & ~(HID0_NAP | HID0_DOZE)) | HID0_SLEEP;
+ 	mtspr(SPRN_HID0, hid0);
+-	mtmsr(mfmsr() | MSR_POW | MSR_EE);
+-	udelay(10);
++	local_irq_enable();
++	msr = mfmsr() | MSR_POW;
++	while (asleep) {
++		mb();
++		mtmsr(msr);
++		isync();
++	}
++	local_irq_disable();
+ 
+ 	/* OK, we're awake again, start restoring things */
+ 	out_be32(mem_ctrl_sleep, 0x3f);
+-	pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,0);
+-	pbook_pci_restore();
+-	pmu_unlock();
+-
+-	/* wait for the PMU interrupt sequence to complete */
+-	while (asleep)
+-		mb();
+-
+-	pmac_wakeup_devices();
+-	pbook_free_pci_save();
+-	iounmap(mem_ctrl);
++	pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0);
+ 
+ 	return 0;
+ }
+ 
+-#endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
++#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
+ 
+ /*
+  * Support for /dev/pmu device
+@@ -2548,7 +2139,6 @@ pmu_release(struct inode *inode, struct file *file)
+ 	struct pmu_private *pp = file->private_data;
+ 	unsigned long flags;
+ 
+-	lock_kernel();
+ 	if (pp != 0) {
+ 		file->private_data = NULL;
+ 		spin_lock_irqsave(&all_pvt_lock, flags);
+@@ -2562,10 +2152,96 @@ pmu_release(struct inode *inode, struct file *file)
+ 
+ 		kfree(pp);
+ 	}
+-	unlock_kernel();
+ 	return 0;
+ }
+ 
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
++static void pmac_suspend_disable_irqs(void)
++{
++	/* Call platform functions marked "on sleep" */
++	pmac_pfunc_i2c_suspend();
++	pmac_pfunc_base_suspend();
++}
++
++static int powerbook_sleep(suspend_state_t state)
++{
++	int error = 0;
++
++	/* Wait for completion of async requests */
++	while (!batt_req.complete)
++		pmu_poll();
++
++	/* Giveup the lazy FPU & vec so we don't have to back them
++	 * up from the low level code
++	 */
++	enable_kernel_fp();
++
++#ifdef CONFIG_ALTIVEC
++	if (cpu_has_feature(CPU_FTR_ALTIVEC))
++		enable_kernel_altivec();
++#endif /* CONFIG_ALTIVEC */
++
++	switch (pmu_kind) {
++	case PMU_OHARE_BASED:
++		error = powerbook_sleep_3400();
++		break;
++	case PMU_HEATHROW_BASED:
++	case PMU_PADDINGTON_BASED:
++		error = powerbook_sleep_grackle();
++		break;
++	case PMU_KEYLARGO_BASED:
++		error = powerbook_sleep_Core99();
++		break;
++	default:
++		return -ENOSYS;
++	}
++
++	if (error)
++		return error;
++
++	mdelay(100);
++
++	return 0;
++}
++
++static void pmac_suspend_enable_irqs(void)
++{
++	/* Force a poll of ADB interrupts */
++	adb_int_pending = 1;
++	via_pmu_interrupt(0, NULL);
++
++	mdelay(10);
++
++	/* Call platform functions marked "on wake" */
++	pmac_pfunc_base_resume();
++	pmac_pfunc_i2c_resume();
++}
++
++static int pmu_sleep_valid(suspend_state_t state)
++{
++	return state == PM_SUSPEND_MEM
++		&& (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
++}
++
++static struct platform_suspend_ops pmu_pm_ops = {
++	.enter = powerbook_sleep,
++	.valid = pmu_sleep_valid,
++};
++
++static int register_pmu_pm_ops(void)
++{
++	if (pmu_kind == PMU_OHARE_BASED)
++		powerbook_sleep_init_3400();
++	ppc_md.suspend_disable_irqs = pmac_suspend_disable_irqs;
++	ppc_md.suspend_enable_irqs = pmac_suspend_enable_irqs;
++	suspend_set_ops(&pmu_pm_ops);
++
++	return 0;
++}
++
++device_initcall(register_pmu_pm_ops);
++#endif
++
+ static int
+ pmu_ioctl(struct inode * inode, struct file *filp,
+ 		     u_int cmd, u_long arg)
+@@ -2574,35 +2250,15 @@ pmu_ioctl(struct inode * inode, struct file *filp,
+ 	int error = -EINVAL;
+ 
+ 	switch (cmd) {
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
+ 	case PMU_IOC_SLEEP:
+ 		if (!capable(CAP_SYS_ADMIN))
+ 			return -EACCES;
+-		if (sleep_in_progress)
+-			return -EBUSY;
+-		sleep_in_progress = 1;
+-		switch (pmu_kind) {
+-		case PMU_OHARE_BASED:
+-			error = powerbook_sleep_3400();
+-			break;
+-		case PMU_HEATHROW_BASED:
+-		case PMU_PADDINGTON_BASED:
+-			error = powerbook_sleep_grackle();
+-			break;
+-		case PMU_KEYLARGO_BASED:
+-			error = powerbook_sleep_Core99();
+-			break;
+-		default:
+-			error = -ENOSYS;
+-		}
+-		sleep_in_progress = 0;
+-		break;
++		return pm_suspend(PM_SUSPEND_MEM);
+ 	case PMU_IOC_CAN_SLEEP:
+-		if (pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) < 0)
++		if (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) < 0)
+ 			return put_user(0, argp);
+ 		else
+ 			return put_user(1, argp);
+-#endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
+ 
+ #ifdef CONFIG_PMAC_BACKLIGHT_LEGACY
+ 	/* Compatibility ioctl's for backlight */
+@@ -2610,9 +2266,6 @@ pmu_ioctl(struct inode * inode, struct file *filp,
+ 	{
+ 		int brightness;
+ 
+-		if (sleep_in_progress)
+-			return -EBUSY;
+-
+ 		brightness = pmac_backlight_get_legacy_brightness();
+ 		if (brightness < 0)
+ 			return brightness;
+@@ -2624,9 +2277,6 @@ pmu_ioctl(struct inode * inode, struct file *filp,
+ 	{
+ 		int brightness;
+ 
+-		if (sleep_in_progress)
+-			return -EBUSY;
+-
+ 		error = get_user(brightness, argp);
+ 		if (error)
+ 			return error;
+@@ -2751,15 +2401,43 @@ pmu_polled_request(struct adb_request *req)
+ 	local_irq_restore(flags);
+ 	return 0;
+ }
+-#endif /* DEBUG_SLEEP */
+ 
++/* N.B. This doesn't work on the 3400 */
++void pmu_blink(int n)
++{
++	struct adb_request req;
+ 
+-/* FIXME: This is a temporary set of callbacks to enable us
+- * to do suspend-to-disk.
+- */
++	memset(&req, 0, sizeof(req));
+ 
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
++	for (; n > 0; --n) {
++		req.nbytes = 4;
++		req.done = NULL;
++		req.data[0] = 0xee;
++		req.data[1] = 4;
++		req.data[2] = 0;
++		req.data[3] = 1;
++		req.reply[0] = ADB_RET_OK;
++		req.reply_len = 1;
++		req.reply_expected = 0;
++		pmu_polled_request(&req);
++		mdelay(50);
++		req.nbytes = 4;
++		req.done = NULL;
++		req.data[0] = 0xee;
++		req.data[1] = 4;
++		req.data[2] = 0;
++		req.data[3] = 0;
++		req.reply[0] = ADB_RET_OK;
++		req.reply_len = 1;
++		req.reply_expected = 0;
++		pmu_polled_request(&req);
++		mdelay(50);
++	}
++	mdelay(50);
++}
++#endif /* DEBUG_SLEEP */
+ 
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
+ int pmu_sys_suspended;
+ 
+ static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state)
+@@ -2767,10 +2445,15 @@ static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state)
+ 	if (state.event != PM_EVENT_SUSPEND || pmu_sys_suspended)
+ 		return 0;
+ 
+-	/* Suspend PMU event interrupts */
++	/* Suspend PMU event interrupts */\
+ 	pmu_suspend();
+-
+ 	pmu_sys_suspended = 1;
++
++#ifdef CONFIG_PMAC_BACKLIGHT
++	/* Tell backlight code not to muck around with the chip anymore */
++	pmu_backlight_set_sleep(1);
++#endif
++
+ 	return 0;
+ }
+ 
+@@ -2785,18 +2468,21 @@ static int pmu_sys_resume(struct sys_device *sysdev)
+ 	pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
+ 	pmu_wait_complete(&req);
+ 
++#ifdef CONFIG_PMAC_BACKLIGHT
++	/* Tell backlight code it can use the chip again */
++	pmu_backlight_set_sleep(0);
++#endif
+ 	/* Resume PMU event interrupts */
+ 	pmu_resume();
+-
+ 	pmu_sys_suspended = 0;
+ 
+ 	return 0;
+ }
+ 
+-#endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
++#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
  
  static struct sysdev_class pmu_sysclass = {
 -	set_kset_name("pmu"),
@@ -278663,6 +420681,32 @@
  };
  
  static struct sys_device device_pmu = {
+@@ -2804,10 +2490,10 @@ static struct sys_device device_pmu = {
+ };
+ 
+ static struct sysdev_driver driver_pmu = {
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
+ 	.suspend	= &pmu_sys_suspend,
+ 	.resume		= &pmu_sys_resume,
+-#endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
++#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
+ };
+ 
+ static int __init init_pmu_sysfs(void)
+@@ -2842,10 +2528,10 @@ EXPORT_SYMBOL(pmu_wait_complete);
+ EXPORT_SYMBOL(pmu_suspend);
+ EXPORT_SYMBOL(pmu_resume);
+ EXPORT_SYMBOL(pmu_unlock);
+-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
+ EXPORT_SYMBOL(pmu_enable_irled);
+ EXPORT_SYMBOL(pmu_battery_count);
+ EXPORT_SYMBOL(pmu_batteries);
+ EXPORT_SYMBOL(pmu_power_flags);
+-#endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
++#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
+ 
 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
 index 88c0fd6..f2d24eb 100644
 --- a/drivers/md/dm.c
@@ -334942,7 +476986,7 @@
                                          dev->name);
                                  dev->stats.rx_dropped++;
 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
-index 9af05a2..6c57540 100644
+index 9af05a2..389980f 100644
 --- a/drivers/net/Kconfig
 +++ b/drivers/net/Kconfig
 @@ -212,7 +212,7 @@ config MII
@@ -335046,6 +477090,15 @@
  source "drivers/net/ixp2000/Kconfig"
  
  config MYRI_SBUS
+@@ -2301,7 +2356,7 @@ config GELIC_NET
+ 
+ config GIANFAR
+ 	tristate "Gianfar Ethernet"
+-	depends on 85xx || 83xx || PPC_86xx
++	depends on FSL_SOC
+ 	select PHYLIB
+ 	select CRC32
+ 	help
 @@ -2560,6 +2615,7 @@ config PASEMI_MAC
  	tristate "PA Semi 1/10Gbit MAC"
  	depends on PPC64 && PCI
@@ -368569,6 +510622,81 @@
 +#define ENC28J60_LAMPS_MODE	0x3476
 +
 +#endif
+diff --git a/drivers/net/fec_8xx/fec_8xx-netta.c b/drivers/net/fec_8xx/fec_8xx-netta.c
+index e492eb8..79deee2 100644
+--- a/drivers/net/fec_8xx/fec_8xx-netta.c
++++ b/drivers/net/fec_8xx/fec_8xx-netta.c
+@@ -26,7 +26,7 @@
+ #include <asm/mpc8xx.h>
+ #include <asm/irq.h>
+ #include <asm/uaccess.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ #include "fec_8xx.h"
+ 
+diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c
+index ab9637a..ca8d2e8 100644
+--- a/drivers/net/fec_8xx/fec_main.c
++++ b/drivers/net/fec_8xx/fec_main.c
+@@ -35,7 +35,7 @@
+ #include <asm/mpc8xx.h>
+ #include <asm/irq.h>
+ #include <asm/uaccess.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ #include "fec_8xx.h"
+ 
+diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c
+index e8e10a0..3b6ca29 100644
+--- a/drivers/net/fec_8xx/fec_mii.c
++++ b/drivers/net/fec_8xx/fec_mii.c
+@@ -34,7 +34,7 @@
+ #include <asm/mpc8xx.h>
+ #include <asm/irq.h>
+ #include <asm/uaccess.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ /*************************************************/
+ 
+diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
+index f91ee70..58b71e6 100644
+--- a/drivers/net/fec_mpc52xx.c
++++ b/drivers/net/fec_mpc52xx.c
+@@ -1057,10 +1057,8 @@ static int mpc52xx_fec_of_resume(struct of_device *op)
+ #endif
+ 
+ static struct of_device_id mpc52xx_fec_match[] = {
+-	{
+-		.type		= "network",
+-		.compatible	= "mpc5200-fec",
+-	},
++	{ .type = "network", .compatible = "fsl,mpc5200-fec", },
++	{ .type = "network", .compatible = "mpc5200-fec", },
+ 	{ }
+ };
+ 
+diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
+index ba6e8b2..1837584 100644
+--- a/drivers/net/fec_mpc52xx_phy.c
++++ b/drivers/net/fec_mpc52xx_phy.c
+@@ -177,11 +177,9 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of)
+ 
+ 
+ static struct of_device_id mpc52xx_fec_mdio_match[] = {
+-	{
+-		.type = "mdio",
+-		.compatible = "mpc5200b-fec-phy",
+-	},
+-	{},
++	{ .compatible = "fsl,mpc5200b-mdio", },
++	{ .compatible = "mpc5200b-fec-phy", },
++	{}
+ };
+ 
+ struct of_platform_driver mpc52xx_fec_mdio_driver = {
 diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
 index f84c752..7667a62 100644
 --- a/drivers/net/forcedeth.c
@@ -368682,6 +510810,111 @@
  		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  	}
  	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
+index c83bd65..42d94ed 100644
+--- a/drivers/net/fs_enet/fs_enet-main.c
++++ b/drivers/net/fs_enet/fs_enet-main.c
+@@ -1178,8 +1178,15 @@ static int __devinit find_phy(struct device_node *np,
+ 	struct device_node *phynode, *mdionode;
+ 	struct resource res;
+ 	int ret = 0, len;
++	const u32 *data;
++
++	data  = of_get_property(np, "fixed-link", NULL);
++	if (data) {
++		snprintf(fpi->bus_id, 16, PHY_ID_FMT, 0, *data);
++		return 0;
++	}
+ 
+-	const u32 *data = of_get_property(np, "phy-handle", &len);
++	data = of_get_property(np, "phy-handle", &len);
+ 	if (!data || len != 4)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
+index c675e29..e05389c 100644
+--- a/drivers/net/fs_enet/fs_enet.h
++++ b/drivers/net/fs_enet/fs_enet.h
+@@ -12,7 +12,7 @@
+ #include <asm/fs_pd.h>
+ 
+ #ifdef CONFIG_CPM1
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ struct fec_info {
+ 	fec_t __iomem *fecp;
+diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
+index da4efbc..e363211 100644
+--- a/drivers/net/fs_enet/mac-fcc.c
++++ b/drivers/net/fs_enet/mac-fcc.c
+@@ -81,16 +81,8 @@
+ static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
+ {
+ 	const struct fs_platform_info *fpi = fep->fpi;
+-	int i;
+-
+-	W32(cpmp, cp_cpcr, fpi->cp_command | op | CPM_CR_FLG);
+-	for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
+-		if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
+-			return 0;
+ 
+-	printk(KERN_ERR "%s(): Not able to issue CPM command\n",
+-	       __FUNCTION__);
+-	return 1;
++	return cpm_command(fpi->cp_command, op);
+ }
+ 
+ static int do_pd_setup(struct fs_enet_private *fep)
+diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
+index c1fee48..8a311d1 100644
+--- a/drivers/net/fs_enet/mac-fec.c
++++ b/drivers/net/fs_enet/mac-fec.c
+@@ -40,7 +40,7 @@
+ #include <asm/8xx_immap.h>
+ #include <asm/pgtable.h>
+ #include <asm/mpc8xx.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #endif
+ 
+ #ifdef CONFIG_PPC_CPM_NEW_BINDING
+diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
+index 48f2f30..d7ca319 100644
+--- a/drivers/net/fs_enet/mac-scc.c
++++ b/drivers/net/fs_enet/mac-scc.c
+@@ -40,7 +40,7 @@
+ #include <asm/8xx_immap.h>
+ #include <asm/pgtable.h>
+ #include <asm/mpc8xx.h>
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ #endif
+ 
+ #ifdef CONFIG_PPC_CPM_NEW_BINDING
+@@ -89,21 +89,12 @@
+  * Delay to wait for SCC reset command to complete (in us)
+  */
+ #define SCC_RESET_DELAY		50
+-#define MAX_CR_CMD_LOOPS	10000
+ 
+ static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
+ {
+ 	const struct fs_platform_info *fpi = fep->fpi;
+-	int i;
+-
+-	W16(cpmp, cp_cpcr, fpi->cp_command | CPM_CR_FLG | (op << 8));
+-	for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
+-		if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
+-			return 0;
+ 
+-	printk(KERN_ERR "%s(): Not able to issue CPM command\n",
+-		__FUNCTION__);
+-	return 1;
++	return cpm_command(fpi->cp_command, op);
+ }
+ 
+ static int do_pd_setup(struct fs_enet_private *fep)
 diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
 index aec9ab1..230878b 100644
 --- a/drivers/net/gianfar_sysfs.c
@@ -368873,10 +511106,18 @@
  	else {
  		/* pageptr shall point into the DMA accessible memory region  */
 diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
-index cb06280..b24bd2d 100644
+index cb06280..e6c69f7 100644
 --- a/drivers/net/ibm_newemac/core.c
 +++ b/drivers/net/ibm_newemac/core.c
-@@ -1297,7 +1297,6 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+@@ -37,6 +37,7 @@
+ #include <linux/mii.h>
+ #include <linux/bitops.h>
+ #include <linux/workqueue.h>
++#include <linux/of.h>
+ 
+ #include <asm/processor.h>
+ #include <asm/io.h>
+@@ -1297,7 +1298,6 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  	return emac_xmit_finish(dev, len);
  }
  
@@ -368884,7 +511125,7 @@
  static inline int emac_xmit_split(struct emac_instance *dev, int slot,
  				  u32 pd, int len, int last, u16 base_ctrl)
  {
-@@ -1410,9 +1409,6 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
+@@ -1410,9 +1410,6 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
  	DBG2(dev, "stopped TX queue" NL);
  	return 1;
  }
@@ -368894,7 +511135,7 @@
  
  /* Tx lock BHs */
  static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
-@@ -2683,13 +2679,8 @@ static int __devinit emac_probe(struct of_device *ofdev,
+@@ -2683,13 +2680,8 @@ static int __devinit emac_probe(struct of_device *ofdev,
  
  	/* Fill in the driver function table */
  	ndev->open = &emac_open;
@@ -368909,7 +511150,7 @@
  	ndev->tx_timeout = &emac_tx_timeout;
  	ndev->watchdog_timeo = 5 * HZ;
  	ndev->stop = &emac_close;
-@@ -2697,8 +2688,11 @@ static int __devinit emac_probe(struct of_device *ofdev,
+@@ -2697,8 +2689,11 @@ static int __devinit emac_probe(struct of_device *ofdev,
  	ndev->set_multicast_list = &emac_set_multicast_list;
  	ndev->do_ioctl = &emac_ioctl;
  	if (emac_phy_supports_gige(dev->phy_mode)) {
@@ -392486,6 +534727,585 @@
  
  #define PCNET32_NUM_REGS 136
  
+diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
+index 54b2ba9..7fe03ce 100644
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -61,34 +61,12 @@ config ICPLUS_PHY
+ 	  Currently supports the IP175C PHY.
+ 
+ config FIXED_PHY
+-	tristate "Drivers for PHY emulation on fixed speed/link"
++	bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ 	---help---
+-	  Adds the driver to PHY layer to cover the boards that do not have any PHY bound,
+-	  but with the ability to manipulate the speed/link in software. The relevant MII
+-	  speed/duplex parameters could be effectively handled in a user-specified function.
+-	  Currently tested with mpc866ads.
+-
+-config FIXED_MII_10_FDX
+-	bool "Emulation for 10M Fdx fixed PHY behavior"
+-	depends on FIXED_PHY
+-
+-config FIXED_MII_100_FDX
+-	bool "Emulation for 100M Fdx fixed PHY behavior"
+-	depends on FIXED_PHY
+-
+-config FIXED_MII_1000_FDX
+-	bool "Emulation for 1000M Fdx fixed PHY behavior"
+-	depends on FIXED_PHY
+-
+-config FIXED_MII_AMNT
+-        int "Number of emulated PHYs to allocate "
+-        depends on FIXED_PHY
+-        default "1"
+-        ---help---
+-        Sometimes it is required to have several independent emulated
+-        PHYs on the bus (in case of multi-eth but phy-less HW for instance).
+-        This control will have specified number allocated for each fixed
+-        PHY type enabled.
++	  Adds the platform "fixed" MDIO Bus to cover the boards that use
++	  PHYs that are not connected to the real MDIO bus.
++
++	  Currently tested with mpc866ads and mpc8349e-mitx.
+ 
+ config MDIO_BITBANG
+ 	tristate "Support for bitbanged MDIO buses"
+diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
+index 5619182..73b6d39 100644
+--- a/drivers/net/phy/fixed.c
++++ b/drivers/net/phy/fixed.c
+@@ -1,362 +1,253 @@
+ /*
+- * drivers/net/phy/fixed.c
++ * Fixed MDIO bus (MDIO bus emulation with fixed PHYs)
+  *
+- * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode.
++ * Author: Vitaly Bordug <vbordug at ru.mvista.com>
++ *         Anton Vorontsov <avorontsov at ru.mvista.com>
+  *
+- * Author: Vitaly Bordug
+- *
+- * Copyright (c) 2006 MontaVista Software, Inc.
++ * Copyright (c) 2006-2007 MontaVista Software, Inc.
+  *
+  * This program is free software; you can redistribute  it and/or modify it
+  * under  the terms of  the GNU General  Public License as published by the
+  * Free Software Foundation;  either version 2 of the  License, or (at your
+  * option) any later version.
+- *
+  */
++
+ #include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/unistd.h>
+-#include <linux/slab.h>
+-#include <linux/interrupt.h>
+-#include <linux/init.h>
+-#include <linux/delay.h>
+-#include <linux/netdevice.h>
+-#include <linux/etherdevice.h>
+-#include <linux/skbuff.h>
+-#include <linux/spinlock.h>
+-#include <linux/mm.h>
+ #include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/list.h>
+ #include <linux/mii.h>
+-#include <linux/ethtool.h>
+ #include <linux/phy.h>
+ #include <linux/phy_fixed.h>
+ 
+-#include <asm/io.h>
+-#include <asm/irq.h>
+-#include <asm/uaccess.h>
++#define MII_REGS_NUM 29
+ 
+-/* we need to track the allocated pointers in order to free them on exit */
+-static struct fixed_info *fixed_phy_ptrs[CONFIG_FIXED_MII_AMNT*MAX_PHY_AMNT];
+-
+-/*-----------------------------------------------------------------------------
+- *  If something weird is required to be done with link/speed,
+- * network driver is able to assign a function to implement this.
+- * May be useful for PHY's that need to be software-driven.
+- *-----------------------------------------------------------------------------*/
+-int fixed_mdio_set_link_update(struct phy_device *phydev,
+-			       int (*link_update) (struct net_device *,
+-						   struct fixed_phy_status *))
+-{
+-	struct fixed_info *fixed;
+-
+-	if (link_update == NULL)
+-		return -EINVAL;
+-
+-	if (phydev) {
+-		if (phydev->bus) {
+-			fixed = phydev->bus->priv;
+-			fixed->link_update = link_update;
+-			return 0;
+-		}
+-	}
+-	return -EINVAL;
+-}
+-
+-EXPORT_SYMBOL(fixed_mdio_set_link_update);
++struct fixed_mdio_bus {
++	int irqs[PHY_MAX_ADDR];
++	struct mii_bus mii_bus;
++	struct list_head phys;
++};
+ 
+-struct fixed_info *fixed_mdio_get_phydev (int phydev_ind)
+-{
+-	if (phydev_ind >= MAX_PHY_AMNT)
+-		return NULL;
+-	return fixed_phy_ptrs[phydev_ind];
+-}
++struct fixed_phy {
++	int id;
++	u16 regs[MII_REGS_NUM];
++	struct phy_device *phydev;
++	struct fixed_phy_status status;
++	int (*link_update)(struct net_device *, struct fixed_phy_status *);
++	struct list_head node;
++};
+ 
+-EXPORT_SYMBOL(fixed_mdio_get_phydev);
++static struct platform_device *pdev;
++static struct fixed_mdio_bus platform_fmb = {
++	.phys = LIST_HEAD_INIT(platform_fmb.phys),
++};
+ 
+-/*-----------------------------------------------------------------------------
+- *  This is used for updating internal mii regs from the status
+- *-----------------------------------------------------------------------------*/
+-#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) || defined(CONFIG_FIXED_MII_1000_FDX)
+-static int fixed_mdio_update_regs(struct fixed_info *fixed)
++static int fixed_phy_update_regs(struct fixed_phy *fp)
+ {
+-	u16 *regs = fixed->regs;
+-	u16 bmsr = 0;
++	u16 bmsr = BMSR_ANEGCAPABLE;
+ 	u16 bmcr = 0;
++	u16 lpagb = 0;
++	u16 lpa = 0;
+ 
+-	if (!regs) {
+-		printk(KERN_ERR "%s: regs not set up", __FUNCTION__);
+-		return -EINVAL;
+-	}
+-
+-	if (fixed->phy_status.link)
+-		bmsr |= BMSR_LSTATUS;
+-
+-	if (fixed->phy_status.duplex) {
++	if (fp->status.duplex) {
+ 		bmcr |= BMCR_FULLDPLX;
+ 
+-		switch (fixed->phy_status.speed) {
++		switch (fp->status.speed) {
++		case 1000:
++			bmsr |= BMSR_ESTATEN;
++			bmcr |= BMCR_SPEED1000;
++			lpagb |= LPA_1000FULL;
++			break;
+ 		case 100:
+ 			bmsr |= BMSR_100FULL;
+ 			bmcr |= BMCR_SPEED100;
++			lpa |= LPA_100FULL;
+ 			break;
+-
+ 		case 10:
+ 			bmsr |= BMSR_10FULL;
++			lpa |= LPA_10FULL;
+ 			break;
++		default:
++			printk(KERN_WARNING "fixed phy: unknown speed\n");
++			return -EINVAL;
+ 		}
+ 	} else {
+-		switch (fixed->phy_status.speed) {
++		switch (fp->status.speed) {
++		case 1000:
++			bmsr |= BMSR_ESTATEN;
++			bmcr |= BMCR_SPEED1000;
++			lpagb |= LPA_1000HALF;
++			break;
+ 		case 100:
+ 			bmsr |= BMSR_100HALF;
+ 			bmcr |= BMCR_SPEED100;
++			lpa |= LPA_100HALF;
+ 			break;
+-
+ 		case 10:
+-			bmsr |= BMSR_100HALF;
++			bmsr |= BMSR_10HALF;
++			lpa |= LPA_10HALF;
+ 			break;
++		default:
++			printk(KERN_WARNING "fixed phy: unknown speed\n");
++			return -EINVAL;
+ 		}
+ 	}
+ 
+-	regs[MII_BMCR] = bmcr;
+-	regs[MII_BMSR] = bmsr | 0x800;	/*we are always capable of 10 hdx */
++	if (fp->status.link)
++		bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
++
++	if (fp->status.pause)
++		lpa |= LPA_PAUSE_CAP;
++
++	if (fp->status.asym_pause)
++		lpa |= LPA_PAUSE_ASYM;
++
++	fp->regs[MII_PHYSID1] = fp->id >> 16;
++	fp->regs[MII_PHYSID2] = fp->id;
++
++	fp->regs[MII_BMSR] = bmsr;
++	fp->regs[MII_BMCR] = bmcr;
++	fp->regs[MII_LPA] = lpa;
++	fp->regs[MII_STAT1000] = lpagb;
+ 
+ 	return 0;
+ }
+ 
+-static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location)
++static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
+ {
+-	struct fixed_info *fixed = bus->priv;
+-
+-	/* if user has registered link update callback, use it */
+-	if (fixed->phydev)
+-		if (fixed->phydev->attached_dev) {
+-			if (fixed->link_update) {
+-				fixed->link_update(fixed->phydev->attached_dev,
+-						   &fixed->phy_status);
+-				fixed_mdio_update_regs(fixed);
++	struct fixed_mdio_bus *fmb = container_of(bus, struct fixed_mdio_bus,
++						  mii_bus);
++	struct fixed_phy *fp;
++
++	if (reg_num >= MII_REGS_NUM)
++		return -1;
++
++	list_for_each_entry(fp, &fmb->phys, node) {
++		if (fp->id == phy_id) {
++			/* Issue callback if user registered it. */
++			if (fp->link_update) {
++				fp->link_update(fp->phydev->attached_dev,
++						&fp->status);
++				fixed_phy_update_regs(fp);
+ 			}
++			return fp->regs[reg_num];
+ 		}
++	}
+ 
+-	if ((unsigned int)location >= fixed->regs_num)
+-		return -1;
+-	return fixed->regs[location];
++	return 0xFFFF;
+ }
+ 
+-static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location,
+-			   u16 val)
++static int fixed_mdio_write(struct mii_bus *bus, int phy_id, int reg_num,
++			    u16 val)
+ {
+-	/* do nothing for now */
+ 	return 0;
+ }
+ 
+-static int fixed_mii_reset(struct mii_bus *bus)
++/*
++ * If something weird is required to be done with link/speed,
++ * network driver is able to assign a function to implement this.
++ * May be useful for PHY's that need to be software-driven.
++ */
++int fixed_phy_set_link_update(struct phy_device *phydev,
++			      int (*link_update)(struct net_device *,
++						 struct fixed_phy_status *))
+ {
+-	/*nothing here - no way/need to reset it */
+-	return 0;
+-}
+-#endif
++	struct fixed_mdio_bus *fmb = &platform_fmb;
++	struct fixed_phy *fp;
+ 
+-static int fixed_config_aneg(struct phy_device *phydev)
+-{
+-	/* :TODO:03/13/2006 09:45:37 PM::
+-	   The full autoneg funcionality can be emulated,
+-	   but no need to have anything here for now
+-	 */
+-	return 0;
+-}
++	if (!link_update || !phydev || !phydev->bus)
++		return -EINVAL;
+ 
+-/*-----------------------------------------------------------------------------
+- * the manual bind will do the magic - with phy_id_mask == 0
+- * match will never return true...
+- *-----------------------------------------------------------------------------*/
+-static struct phy_driver fixed_mdio_driver = {
+-	.name = "Fixed PHY",
+-#ifdef CONFIG_FIXED_MII_1000_FDX
+-	.features = PHY_GBIT_FEATURES,
+-#else
+-	.features = PHY_BASIC_FEATURES,
+-#endif
+-	.config_aneg = fixed_config_aneg,
+-	.read_status = genphy_read_status,
+-	.driver = { .owner = THIS_MODULE, },
+-};
++	list_for_each_entry(fp, &fmb->phys, node) {
++		if (fp->id == phydev->phy_id) {
++			fp->link_update = link_update;
++			fp->phydev = phydev;
++			return 0;
++		}
++	}
+ 
+-static void fixed_mdio_release(struct device *dev)
+-{
+-	struct phy_device *phydev = container_of(dev, struct phy_device, dev);
+-	struct mii_bus *bus = phydev->bus;
+-	struct fixed_info *fixed = bus->priv;
+-
+-	kfree(phydev);
+-	kfree(bus->dev);
+-	kfree(bus);
+-	kfree(fixed->regs);
+-	kfree(fixed);
++	return -ENOENT;
+ }
++EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
+ 
+-/*-----------------------------------------------------------------------------
+- *  This func is used to create all the necessary stuff, bind
+- * the fixed phy driver and register all it on the mdio_bus_type.
+- * speed is either 10 or 100 or 1000, duplex is boolean.
+- * number is used to create multiple fixed PHYs, so that several devices can
+- * utilize them simultaneously.
+- *
+- * The device on mdio bus will look like [bus_id]:[phy_id],
+- * bus_id = number
+- * phy_id = speed+duplex.
+- *-----------------------------------------------------------------------------*/
+-#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) || defined(CONFIG_FIXED_MII_1000_FDX)
+-struct fixed_info *fixed_mdio_register_device(
+-	int bus_id, int speed, int duplex, u8 phy_id)
++int fixed_phy_add(unsigned int irq, int phy_id,
++		  struct fixed_phy_status *status)
+ {
+-	struct mii_bus *new_bus;
+-	struct fixed_info *fixed;
+-	struct phy_device *phydev;
+-	int err;
++	int ret;
++	struct fixed_mdio_bus *fmb = &platform_fmb;
++	struct fixed_phy *fp;
+ 
+-	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
++	fp = kzalloc(sizeof(*fp), GFP_KERNEL);
++	if (!fp)
++		return -ENOMEM;
+ 
+-	if (dev == NULL)
+-		goto err_dev_alloc;
++	memset(fp->regs, 0xFF,  sizeof(fp->regs[0]) * MII_REGS_NUM);
+ 
+-	new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
++	fmb->irqs[phy_id] = irq;
+ 
+-	if (new_bus == NULL)
+-		goto err_bus_alloc;
++	fp->id = phy_id;
++	fp->status = *status;
+ 
+-	fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL);
++	ret = fixed_phy_update_regs(fp);
++	if (ret)
++		goto err_regs;
+ 
+-	if (fixed == NULL)
+-		goto err_fixed_alloc;
++	list_add_tail(&fp->node, &fmb->phys);
+ 
+-	fixed->regs = kzalloc(MII_REGS_NUM * sizeof(int), GFP_KERNEL);
+-	if (NULL == fixed->regs)
+-		goto err_fixed_regs_alloc;
++	return 0;
+ 
+-	fixed->regs_num = MII_REGS_NUM;
+-	fixed->phy_status.speed = speed;
+-	fixed->phy_status.duplex = duplex;
+-	fixed->phy_status.link = 1;
++err_regs:
++	kfree(fp);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(fixed_phy_add);
+ 
+-	new_bus->name = "Fixed MII Bus";
+-	new_bus->read = &fixed_mii_read;
+-	new_bus->write = &fixed_mii_write;
+-	new_bus->reset = &fixed_mii_reset;
+-	/*set up workspace */
+-	fixed_mdio_update_regs(fixed);
+-	new_bus->priv = fixed;
++static int __init fixed_mdio_bus_init(void)
++{
++	struct fixed_mdio_bus *fmb = &platform_fmb;
++	int ret;
+ 
+-	new_bus->dev = dev;
+-	dev_set_drvdata(dev, new_bus);
++	pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
++	if (!pdev) {
++		ret = -ENOMEM;
++		goto err_pdev;
++	}
+ 
+-	/* create phy_device and register it on the mdio bus */
+-	phydev = phy_device_create(new_bus, 0, 0);
+-	if (phydev == NULL)
+-		goto err_phy_dev_create;
++	fmb->mii_bus.id = 0;
++	fmb->mii_bus.name = "Fixed MDIO Bus";
++	fmb->mii_bus.dev = &pdev->dev;
++	fmb->mii_bus.read = &fixed_mdio_read;
++	fmb->mii_bus.write = &fixed_mdio_write;
++	fmb->mii_bus.irq = fmb->irqs;
+ 
+-	/*
+-	 * Put the phydev pointer into the fixed pack so that bus read/write
+-	 * code could be able to access for instance attached netdev. Well it
+-	 * doesn't have to do so, only in case of utilizing user-specified
+-	 * link-update...
+-	 */
++	ret = mdiobus_register(&fmb->mii_bus);
++	if (ret)
++		goto err_mdiobus_reg;
+ 
+-	fixed->phydev = phydev;
+-	phydev->speed = speed;
+-	phydev->duplex = duplex;
++	return 0;
+ 
+-	phydev->irq = PHY_IGNORE_INTERRUPT;
+-	phydev->dev.bus = &mdio_bus_type;
++err_mdiobus_reg:
++	platform_device_unregister(pdev);
++err_pdev:
++	return ret;
++}
++module_init(fixed_mdio_bus_init);
+ 
+-	snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
+-		 PHY_ID_FMT, bus_id, phy_id);
++static void __exit fixed_mdio_bus_exit(void)
++{
++	struct fixed_mdio_bus *fmb = &platform_fmb;
++	struct fixed_phy *fp;
+ 
+-	phydev->bus = new_bus;
++	mdiobus_unregister(&fmb->mii_bus);
++	platform_device_unregister(pdev);
+ 
+-	phydev->dev.driver = &fixed_mdio_driver.driver;
+-	phydev->dev.release = fixed_mdio_release;
+-	err = phydev->dev.driver->probe(&phydev->dev);
+-	if (err < 0) {
+-		printk(KERN_ERR "Phy %s: problems with fixed driver\n",
+-		       phydev->dev.bus_id);
+-		goto err_out;
+-	}
+-	err = device_register(&phydev->dev);
+-	if (err) {
+-		printk(KERN_ERR "Phy %s failed to register\n",
+-		       phydev->dev.bus_id);
+-		goto err_out;
++	list_for_each_entry(fp, &fmb->phys, node) {
++		list_del(&fp->node);
++		kfree(fp);
+ 	}
+-	//phydev->state = PHY_RUNNING; /* make phy go up quick, but in 10Mbit/HDX
+-	return fixed;
+-
+-err_out:
+-	kfree(phydev);
+-err_phy_dev_create:
+-	kfree(fixed->regs);
+-err_fixed_regs_alloc:
+-	kfree(fixed);
+-err_fixed_alloc:
+-	kfree(new_bus);
+-err_bus_alloc:
+-	kfree(dev);
+-err_dev_alloc:
+-
+-	return NULL;
+-
+ }
+-#endif
++module_exit(fixed_mdio_bus_exit);
+ 
+-MODULE_DESCRIPTION("Fixed PHY device & driver for PAL");
++MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)");
+ MODULE_AUTHOR("Vitaly Bordug");
+ MODULE_LICENSE("GPL");
+-
+-static int __init fixed_init(void)
+-{
+-	int cnt = 0;
+-	int i;
+-/* register on the bus... Not expected to be matched
+- * with anything there...
+- *
+- */
+-	phy_driver_register(&fixed_mdio_driver);
+-
+-/* We will create several mdio devices here, and will bound the upper
+- * driver to them.
+- *
+- * Then the external software can lookup the phy bus by searching
+- * for 0:101, to be connected to the virtual 100M Fdx phy.
+- *
+- * In case several virtual PHYs required, the bus_id will be in form
+- * [num]:[duplex]+[speed], which make it able even to define
+- * driver-specific link control callback, if for instance PHY is
+- * completely SW-driven.
+- */
+-	for (i=1; i <= CONFIG_FIXED_MII_AMNT; i++) {
+-#ifdef CONFIG_FIXED_MII_1000_FDX
+-		fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(0, 1000, 1, i);
+-#endif
+-#ifdef CONFIG_FIXED_MII_100_FDX
+-		fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(1, 100, 1, i);
+-#endif
+-#ifdef CONFIG_FIXED_MII_10_FDX
+-		fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(2, 10, 1, i);
+-#endif
+-	}
+-
+-	return 0;
+-}
+-
+-static void __exit fixed_exit(void)
+-{
+-	int i;
+-
+-	phy_driver_unregister(&fixed_mdio_driver);
+-	for (i=0; i < MAX_PHY_AMNT; i++)
+-		if ( fixed_phy_ptrs[i] )
+-			device_unregister(&fixed_phy_ptrs[i]->phydev->dev);
+-}
+-
+-module_init(fixed_init);
+-module_exit(fixed_exit);
 diff --git a/drivers/net/plip.c b/drivers/net/plip.c
 index 57c9866..fee3d7b 100644
 --- a/drivers/net/plip.c
@@ -392664,6 +535484,24 @@
  	      "James Chapman <jchapman at katalix.com>");
  MODULE_DESCRIPTION("PPP over L2TP over UDP");
  MODULE_LICENSE("GPL");
+diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
+index 0a42bf5..055af08 100644
+--- a/drivers/net/ps3_gelic_net.c
++++ b/drivers/net/ps3_gelic_net.c
+@@ -58,11 +58,11 @@ static inline struct device *ctodev(struct gelic_net_card *card)
+ {
+ 	return &card->dev->core;
+ }
+-static inline unsigned int bus_id(struct gelic_net_card *card)
++static inline u64 bus_id(struct gelic_net_card *card)
+ {
+ 	return card->dev->bus_id;
+ }
+-static inline unsigned int dev_id(struct gelic_net_card *card)
++static inline u64 dev_id(struct gelic_net_card *card)
+ {
+ 	return card->dev->dev_id;
+ }
 diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
 index cf0774d..a6aeb9d 100644
 --- a/drivers/net/qla3xxx.c
@@ -398994,7 +541832,7 @@
  		memset(tun->chr_filter, 0, sizeof tun->chr_filter);
  
 diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
-index abac7db..73d6ac9 100644
+index abac7db..4ffd873 100644
 --- a/drivers/net/ucc_geth.c
 +++ b/drivers/net/ucc_geth.c
 @@ -3614,9 +3614,6 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
@@ -399007,6 +541845,89 @@
  	uccf = ugeth->uccf;
  	ug_info = ugeth->ug_info;
  
+@@ -3822,6 +3819,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
+ 	int err, ucc_num, max_speed = 0;
+ 	const phandle *ph;
+ 	const unsigned int *prop;
++	const char *sprop;
+ 	const void *mac_addr;
+ 	phy_interface_t phy_interface;
+ 	static const int enet_to_speed[] = {
+@@ -3854,10 +3852,56 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
+ 
+ 	ug_info->uf_info.ucc_num = ucc_num;
+ 
+-	prop = of_get_property(np, "rx-clock", NULL);
+-	ug_info->uf_info.rx_clock = *prop;
+-	prop = of_get_property(np, "tx-clock", NULL);
+-	ug_info->uf_info.tx_clock = *prop;
++	sprop = of_get_property(np, "rx-clock-name", NULL);
++	if (sprop) {
++		ug_info->uf_info.rx_clock = qe_clock_source(sprop);
++		if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
++		    (ug_info->uf_info.rx_clock > QE_CLK24)) {
++			printk(KERN_ERR
++				"ucc_geth: invalid rx-clock-name property\n");
++			return -EINVAL;
++		}
++	} else {
++		prop = of_get_property(np, "rx-clock", NULL);
++		if (!prop) {
++			/* If both rx-clock-name and rx-clock are missing,
++			   we want to tell people to use rx-clock-name. */
++			printk(KERN_ERR
++				"ucc_geth: missing rx-clock-name property\n");
++			return -EINVAL;
++		}
++		if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
++			printk(KERN_ERR
++				"ucc_geth: invalid rx-clock propperty\n");
++			return -EINVAL;
++		}
++		ug_info->uf_info.rx_clock = *prop;
++	}
++
++	sprop = of_get_property(np, "tx-clock-name", NULL);
++	if (sprop) {
++		ug_info->uf_info.tx_clock = qe_clock_source(sprop);
++		if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
++		    (ug_info->uf_info.tx_clock > QE_CLK24)) {
++			printk(KERN_ERR
++				"ucc_geth: invalid tx-clock-name property\n");
++			return -EINVAL;
++		}
++	} else {
++		prop = of_get_property(np, "rx-clock", NULL);
++		if (!prop) {
++			printk(KERN_ERR
++				"ucc_geth: mising tx-clock-name property\n");
++			return -EINVAL;
++		}
++		if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
++			printk(KERN_ERR
++				"ucc_geth: invalid tx-clock property\n");
++			return -EINVAL;
++		}
++		ug_info->uf_info.tx_clock = *prop;
++	}
++
+ 	err = of_address_to_resource(np, 0, &res);
+ 	if (err)
+ 		return -EINVAL;
+diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
+index df884f0..e3ba14a 100644
+--- a/drivers/net/ucc_geth_mii.c
++++ b/drivers/net/ucc_geth_mii.c
+@@ -256,6 +256,9 @@ static struct of_device_id uec_mdio_match[] = {
+ 		.type = "mdio",
+ 		.compatible = "ucc_geth_phy",
+ 	},
++	{
++		.compatible = "fsl,ucc-mdio",
++	},
+ 	{},
+ };
+ 
 diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
 index 1ffdd10..633a511 100644
 --- a/drivers/net/usb/dm9601.c
@@ -508446,6 +651367,112 @@
  			mcl++;
  			HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
  		}
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 9377f3b..b306fef 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -273,3 +273,61 @@ struct device_node *of_find_compatible_node(struct device_node *from,
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_compatible_node);
++
++/**
++ * of_match_node - Tell if an device_node has a matching of_match structure
++ *	@matches:	array of of device match structures to search in
++ *	@node:		the of device structure to match against
++ *
++ *	Low level utility function used by device matching.
++ */
++const struct of_device_id *of_match_node(const struct of_device_id *matches,
++					 const struct device_node *node)
++{
++	while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
++		int match = 1;
++		if (matches->name[0])
++			match &= node->name
++				&& !strcmp(matches->name, node->name);
++		if (matches->type[0])
++			match &= node->type
++				&& !strcmp(matches->type, node->type);
++		if (matches->compatible[0])
++			match &= of_device_is_compatible(node,
++						matches->compatible);
++		if (match)
++			return matches;
++		matches++;
++	}
++	return NULL;
++}
++EXPORT_SYMBOL(of_match_node);
++
++/**
++ *	of_find_matching_node - Find a node based on an of_device_id match
++ *				table.
++ *	@from:		The node to start searching from or NULL, the node
++ *			you pass will not be searched, only the next one
++ *			will; typically, you pass what the previous call
++ *			returned. of_node_put() will be called on it
++ *	@matches:	array of of device match structures to search in
++ *
++ *	Returns a node pointer with refcount incremented, use
++ *	of_node_put() on it when done.
++ */
++struct device_node *of_find_matching_node(struct device_node *from,
++					  const struct of_device_id *matches)
++{
++	struct device_node *np;
++
++	read_lock(&devtree_lock);
++	np = from ? from->allnext : allnodes;
++	for (; np; np = np->allnext) {
++		if (of_match_node(matches, np) && of_node_get(np))
++			break;
++	}
++	of_node_put(from);
++	read_unlock(&devtree_lock);
++	return np;
++}
++EXPORT_SYMBOL(of_find_matching_node);
+diff --git a/drivers/of/device.c b/drivers/of/device.c
+index 6245f06..29681c4 100644
+--- a/drivers/of/device.c
++++ b/drivers/of/device.c
+@@ -10,35 +10,6 @@
+ #include <asm/errno.h>
+ 
+ /**
+- * of_match_node - Tell if an device_node has a matching of_match structure
+- * @ids: array of of device match structures to search in
+- * @node: the of device structure to match against
+- *
+- * Low level utility function used by device matching.
+- */
+-const struct of_device_id *of_match_node(const struct of_device_id *matches,
+-					 const struct device_node *node)
+-{
+-	while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
+-		int match = 1;
+-		if (matches->name[0])
+-			match &= node->name
+-				&& !strcmp(matches->name, node->name);
+-		if (matches->type[0])
+-			match &= node->type
+-				&& !strcmp(matches->type, node->type);
+-		if (matches->compatible[0])
+-			match &= of_device_is_compatible(node,
+-						matches->compatible);
+-		if (match)
+-			return matches;
+-		matches++;
+-	}
+-	return NULL;
+-}
+-EXPORT_SYMBOL(of_match_node);
+-
+-/**
+  * of_match_device - Tell if an of_device structure has a matching
+  * of_match structure
+  * @ids: array of of device match structures to search in
 diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
 index a6d6b24..703b85e 100644
 --- a/drivers/parisc/led.c
@@ -508945,6 +651972,19 @@
  }
  
  
+diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
+index 5550556..f697f3d 100644
+--- a/drivers/pci/Makefile
++++ b/drivers/pci/Makefile
+@@ -32,7 +32,7 @@ obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_PARISC) += setup-bus.o
+ obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_PPC32) += setup-irq.o
+-obj-$(CONFIG_PPC64) += setup-bus.o
++obj-$(CONFIG_PPC) += setup-bus.o
+ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_X86_VISWS) += setup-irq.o
+ 
 diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
 index 47d26b6..750ebd7 100644
 --- a/drivers/pci/hotplug/acpiphp_ibm.c
@@ -509074,6 +652114,19 @@
  EXPORT_SYMBOL_GPL(pci_hp_register);
  EXPORT_SYMBOL_GPL(pci_hp_deregister);
  EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
+diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
+index b169b0e..191954b 100644
+--- a/drivers/pci/hotplug/rpadlpar_core.c
++++ b/drivers/pci/hotplug/rpadlpar_core.c
+@@ -155,7 +155,7 @@ static void dlpar_pci_add_bus(struct device_node *dn)
+ 	    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ 		of_scan_pci_bridge(dn, dev);
+ 
+-	pcibios_fixup_new_pci_devices(dev->subordinate,0);
++	pcibios_fixup_new_pci_devices(dev->subordinate);
+ 
+ 	/* Claim new bus resources */
+ 	pcibios_claim_one_bus(dev->bus);
 diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
 index a080fed..e32148a 100644
 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -509698,6 +652751,1512 @@
  }
  
  int power_supply_register(struct device *parent, struct power_supply *psy)
+diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
+index 1f5a2d3..ccea15c 100644
+--- a/drivers/ps3/Makefile
++++ b/drivers/ps3/Makefile
+@@ -4,3 +4,4 @@ ps3av_mod-objs		+= ps3av.o ps3av_cmd.o
+ obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
+ obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
+ obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
++obj-$(CONFIG_PS3_LPM) += ps3-lpm.o
+diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
+new file mode 100644
+index 0000000..4c06654
+--- /dev/null
++++ b/drivers/ps3/ps3-lpm.c
+@@ -0,0 +1,1248 @@
++/*
++ * PS3 Logical Performance Monitor.
++ *
++ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
++ *  Copyright 2007 Sony Corp.
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License as published by
++ *  the Free Software Foundation; version 2 of the License.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *  GNU General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/uaccess.h>
++#include <asm/ps3.h>
++#include <asm/lv1call.h>
++#include <asm/cell-pmu.h>
++
++
++/* BOOKMARK tag macros */
++#define PS3_PM_BOOKMARK_START                    0x8000000000000000ULL
++#define PS3_PM_BOOKMARK_STOP                     0x4000000000000000ULL
++#define PS3_PM_BOOKMARK_TAG_KERNEL               0x1000000000000000ULL
++#define PS3_PM_BOOKMARK_TAG_USER                 0x3000000000000000ULL
++#define PS3_PM_BOOKMARK_TAG_MASK_HI              0xF000000000000000ULL
++#define PS3_PM_BOOKMARK_TAG_MASK_LO              0x0F00000000000000ULL
++
++/* CBE PM CONTROL register macros */
++#define PS3_PM_CONTROL_PPU_TH0_BOOKMARK          0x00001000
++#define PS3_PM_CONTROL_PPU_TH1_BOOKMARK          0x00000800
++#define PS3_PM_CONTROL_PPU_COUNT_MODE_MASK       0x000C0000
++#define PS3_PM_CONTROL_PPU_COUNT_MODE_PROBLEM    0x00080000
++#define PS3_WRITE_PM_MASK                        0xFFFFFFFFFFFFFFFFULL
++
++/* CBE PM START STOP register macros */
++#define PS3_PM_START_STOP_PPU_TH0_BOOKMARK_START 0x02000000
++#define PS3_PM_START_STOP_PPU_TH1_BOOKMARK_START 0x01000000
++#define PS3_PM_START_STOP_PPU_TH0_BOOKMARK_STOP  0x00020000
++#define PS3_PM_START_STOP_PPU_TH1_BOOKMARK_STOP  0x00010000
++#define PS3_PM_START_STOP_START_MASK             0xFF000000
++#define PS3_PM_START_STOP_STOP_MASK              0x00FF0000
++
++/* CBE PM COUNTER register macres */
++#define PS3_PM_COUNTER_MASK_HI                   0xFFFFFFFF00000000ULL
++#define PS3_PM_COUNTER_MASK_LO                   0x00000000FFFFFFFFULL
++
++/* BASE SIGNAL GROUP NUMBER macros */
++#define PM_ISLAND2_BASE_SIGNAL_GROUP_NUMBER  0
++#define PM_ISLAND2_SIGNAL_GROUP_NUMBER1      6
++#define PM_ISLAND2_SIGNAL_GROUP_NUMBER2      7
++#define PM_ISLAND3_BASE_SIGNAL_GROUP_NUMBER  7
++#define PM_ISLAND4_BASE_SIGNAL_GROUP_NUMBER  15
++#define PM_SPU_TRIGGER_SIGNAL_GROUP_NUMBER   17
++#define PM_SPU_EVENT_SIGNAL_GROUP_NUMBER     18
++#define PM_ISLAND5_BASE_SIGNAL_GROUP_NUMBER  18
++#define PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER  24
++#define PM_ISLAND7_BASE_SIGNAL_GROUP_NUMBER  49
++#define PM_ISLAND8_BASE_SIGNAL_GROUP_NUMBER  52
++#define PM_SIG_GROUP_SPU                     41
++#define PM_SIG_GROUP_SPU_TRIGGER             42
++#define PM_SIG_GROUP_SPU_EVENT               43
++#define PM_SIG_GROUP_MFC_MAX                 60
++
++/**
++ * struct ps3_lpm_shadow_regs - Performance monitor shadow registers.
++ *
++ * @pm_control: Shadow of the processor's pm_control register.
++ * @pm_start_stop: Shadow of the processor's pm_start_stop register.
++ * @pm_interval: Shadow of the processor's pm_interval register.
++ * @group_control: Shadow of the processor's group_control register.
++ * @debug_bus_control: Shadow of the processor's debug_bus_control register.
++ *
++ * The logical performance monitor provides a write-only interface to
++ * these processor registers.  These shadow variables cache the processor
++ * register values for reading.
++ *
++ * The initial value of the shadow registers at lpm creation is
++ * PS3_LPM_SHADOW_REG_INIT.
++ */
++
++struct ps3_lpm_shadow_regs {
++	u64 pm_control;
++	u64 pm_start_stop;
++	u64 pm_interval;
++	u64 group_control;
++	u64 debug_bus_control;
++};
++
++#define PS3_LPM_SHADOW_REG_INIT 0xFFFFFFFF00000000ULL
++
++/**
++ * struct ps3_lpm_priv - Private lpm device data.
++ *
++ * @open: An atomic variable indicating the lpm driver has been opened.
++ * @rights: The lpm rigths granted by the system policy module.  A logical
++ *  OR of enum ps3_lpm_rights.
++ * @node_id: The node id of a BE prosessor whose performance monitor this
++ *  lpar has the right to use.
++ * @pu_id: The lv1 id of the logical PU.
++ * @lpm_id: The lv1 id of this lpm instance.
++ * @outlet_id: The outlet created by lv1 for this lpm instance.
++ * @tb_count: The number of bytes of data held in the lv1 trace buffer.
++ * @tb_cache: Kernel buffer to receive the data from the lv1 trace buffer.
++ *  Must be 128 byte aligned.
++ * @tb_cache_size: Size of the kernel @tb_cache buffer.  Must be 128 byte
++ *  aligned.
++ * @tb_cache_internal: An unaligned buffer allocated by this driver to be
++ *  used for the trace buffer cache when ps3_lpm_open() is called with a
++ *  NULL tb_cache argument.  Otherwise unused.
++ * @shadow: Processor register shadow of type struct ps3_lpm_shadow_regs.
++ * @sbd: The struct ps3_system_bus_device attached to this driver.
++ *
++ * The trace buffer is a buffer allocated and used internally to the lv1
++ * hypervisor to collect trace data.  The trace buffer cache is a guest
++ * buffer that accepts the trace data from the trace buffer.
++ */
++
++struct ps3_lpm_priv {
++	atomic_t open;
++	u64 rights;
++	u64 node_id;
++	u64 pu_id;
++	u64 lpm_id;
++	u64 outlet_id;
++	u64 tb_count;
++	void *tb_cache;
++	u64 tb_cache_size;
++	void *tb_cache_internal;
++	struct ps3_lpm_shadow_regs shadow;
++	struct ps3_system_bus_device *sbd;
++};
++
++enum {
++	PS3_LPM_DEFAULT_TB_CACHE_SIZE = 0x4000,
++};
++
++/**
++ * lpm_priv - Static instance of the lpm data.
++ *
++ * Since the exported routines don't support the notion of a device
++ * instance we need to hold the instance in this static variable
++ * and then only allow at most one instance at a time to be created.
++ */
++
++static struct ps3_lpm_priv *lpm_priv;
++
++static struct device *sbd_core(void)
++{
++	BUG_ON(!lpm_priv || !lpm_priv->sbd);
++	return &lpm_priv->sbd->core;
++}
++
++/**
++ * use_start_stop_bookmark - Enable the PPU bookmark trace.
++ *
++ * And it enables PPU bookmark triggers ONLY if the other triggers are not set.
++ * The start/stop bookmarks are inserted at ps3_enable_pm() and ps3_disable_pm()
++ * to start/stop LPM.
++ *
++ * Used to get good quality of the performance counter.
++ */
++
++enum {use_start_stop_bookmark = 1,};
++
++void ps3_set_bookmark(u64 bookmark)
++{
++	/*
++	 * As per the PPE book IV, to avoid bookmark loss there must
++	 * not be a traced branch within 10 cycles of setting the
++	 * SPRN_BKMK register.  The actual text is unclear if 'within'
++	 * includes cycles before the call.
++	 */
++
++	asm volatile("or 29, 29, 29;"); /* db10cyc */
++	mtspr(SPRN_BKMK, bookmark);
++	asm volatile("or 29, 29, 29;"); /* db10cyc */
++}
++EXPORT_SYMBOL_GPL(ps3_set_bookmark);
++
++void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id)
++{
++	u64 bookmark;
++
++	bookmark = (get_tb() & 0x00000000FFFFFFFFULL) |
++		PS3_PM_BOOKMARK_TAG_KERNEL;
++	bookmark = ((tag << 56) & PS3_PM_BOOKMARK_TAG_MASK_LO) |
++		(incident << 48) | (th_id << 32) | bookmark;
++	ps3_set_bookmark(bookmark);
++}
++EXPORT_SYMBOL_GPL(ps3_set_pm_bookmark);
++
++/**
++ * ps3_read_phys_ctr - Read physical counter registers.
++ *
++ * Each physical counter can act as one 32 bit counter or as two 16 bit
++ * counters.
++ */
++
++u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr)
++{
++	int result;
++	u64 counter0415;
++	u64 counter2637;
++
++	if (phys_ctr >= NR_PHYS_CTRS) {
++		dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
++			__LINE__, phys_ctr);
++		return 0;
++	}
++
++	result = lv1_set_lpm_counter(lpm_priv->lpm_id, 0, 0, 0, 0, &counter0415,
++				     &counter2637);
++	if (result) {
++		dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter failed: "
++			"phys_ctr %u, %s\n", __func__, __LINE__, phys_ctr,
++			ps3_result(result));
++		return 0;
++	}
++
++	switch (phys_ctr) {
++	case 0:
++		return counter0415 >> 32;
++	case 1:
++		return counter0415 & PS3_PM_COUNTER_MASK_LO;
++	case 2:
++		return counter2637 >> 32;
++	case 3:
++		return counter2637 & PS3_PM_COUNTER_MASK_LO;
++	default:
++		BUG();
++	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ps3_read_phys_ctr);
++
++/**
++ * ps3_write_phys_ctr - Write physical counter registers.
++ *
++ * Each physical counter can act as one 32 bit counter or as two 16 bit
++ * counters.
++ */
++
++void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
++{
++	u64 counter0415;
++	u64 counter0415_mask;
++	u64 counter2637;
++	u64 counter2637_mask;
++	int result;
++
++	if (phys_ctr >= NR_PHYS_CTRS) {
++		dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
++			__LINE__, phys_ctr);
++		return;
++	}
++
++	switch (phys_ctr) {
++	case 0:
++		counter0415 = (u64)val << 32;
++		counter0415_mask = PS3_PM_COUNTER_MASK_HI;
++		counter2637 = 0x0;
++		counter2637_mask = 0x0;
++		break;
++	case 1:
++		counter0415 = (u64)val;
++		counter0415_mask = PS3_PM_COUNTER_MASK_LO;
++		counter2637 = 0x0;
++		counter2637_mask = 0x0;
++		break;
++	case 2:
++		counter0415 = 0x0;
++		counter0415_mask = 0x0;
++		counter2637 = (u64)val << 32;
++		counter2637_mask = PS3_PM_COUNTER_MASK_HI;
++		break;
++	case 3:
++		counter0415 = 0x0;
++		counter0415_mask = 0x0;
++		counter2637 = (u64)val;
++		counter2637_mask = PS3_PM_COUNTER_MASK_LO;
++		break;
++	default:
++		BUG();
++	}
++
++	result = lv1_set_lpm_counter(lpm_priv->lpm_id,
++				     counter0415, counter0415_mask,
++				     counter2637, counter2637_mask,
++				     &counter0415, &counter2637);
++	if (result)
++		dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter failed: "
++			"phys_ctr %u, val %u, %s\n", __func__, __LINE__,
++			phys_ctr, val, ps3_result(result));
++}
++EXPORT_SYMBOL_GPL(ps3_write_phys_ctr);
++
++/**
++ * ps3_read_ctr - Read counter.
++ *
++ * Read 16 or 32 bits depending on the current size of the counter.
++ * Counters 4, 5, 6 & 7 are always 16 bit.
++ */
++
++u32 ps3_read_ctr(u32 cpu, u32 ctr)
++{
++	u32 val;
++	u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
++
++	val = ps3_read_phys_ctr(cpu, phys_ctr);
++
++	if (ps3_get_ctr_size(cpu, phys_ctr) == 16)
++		val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
++
++	return val;
++}
++EXPORT_SYMBOL_GPL(ps3_read_ctr);
++
++/**
++ * ps3_write_ctr - Write counter.
++ *
++ * Write 16 or 32 bits depending on the current size of the counter.
++ * Counters 4, 5, 6 & 7 are always 16 bit.
++ */
++
++void ps3_write_ctr(u32 cpu, u32 ctr, u32 val)
++{
++	u32 phys_ctr;
++	u32 phys_val;
++
++	phys_ctr = ctr & (NR_PHYS_CTRS - 1);
++
++	if (ps3_get_ctr_size(cpu, phys_ctr) == 16) {
++		phys_val = ps3_read_phys_ctr(cpu, phys_ctr);
++
++		if (ctr < NR_PHYS_CTRS)
++			val = (val << 16) | (phys_val & 0xffff);
++		else
++			val = (val & 0xffff) | (phys_val & 0xffff0000);
++	}
++
++	ps3_write_phys_ctr(cpu, phys_ctr, val);
++}
++EXPORT_SYMBOL_GPL(ps3_write_ctr);
++
++/**
++ * ps3_read_pm07_control - Read counter control registers.
++ *
++ * Each logical counter has a corresponding control register.
++ */
++
++u32 ps3_read_pm07_control(u32 cpu, u32 ctr)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ps3_read_pm07_control);
++
++/**
++ * ps3_write_pm07_control - Write counter control registers.
++ *
++ * Each logical counter has a corresponding control register.
++ */
++
++void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val)
++{
++	int result;
++	static const u64 mask = 0xFFFFFFFFFFFFFFFFULL;
++	u64 old_value;
++
++	if (ctr >= NR_CTRS) {
++		dev_dbg(sbd_core(), "%s:%u: ctr too big: %u\n", __func__,
++			__LINE__, ctr);
++		return;
++	}
++
++	result = lv1_set_lpm_counter_control(lpm_priv->lpm_id, ctr, val, mask,
++					     &old_value);
++	if (result)
++		dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter_control "
++			"failed: ctr %u, %s\n", __func__, __LINE__, ctr,
++			ps3_result(result));
++}
++EXPORT_SYMBOL_GPL(ps3_write_pm07_control);
++
++/**
++ * ps3_read_pm - Read Other LPM control registers.
++ */
++
++u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
++{
++	int result = 0;
++	u64 val = 0;
++
++	switch (reg) {
++	case pm_control:
++		return lpm_priv->shadow.pm_control;
++	case trace_address:
++		return CBE_PM_TRACE_BUF_EMPTY;
++	case pm_start_stop:
++		return lpm_priv->shadow.pm_start_stop;
++	case pm_interval:
++		return lpm_priv->shadow.pm_interval;
++	case group_control:
++		return lpm_priv->shadow.group_control;
++	case debug_bus_control:
++		return lpm_priv->shadow.debug_bus_control;
++	case pm_status:
++		result = lv1_get_lpm_interrupt_status(lpm_priv->lpm_id,
++						      &val);
++		if (result) {
++			val = 0;
++			dev_dbg(sbd_core(), "%s:%u: lv1 get_lpm_status failed: "
++				"reg %u, %s\n", __func__, __LINE__, reg,
++				ps3_result(result));
++		}
++		return (u32)val;
++	case ext_tr_timer:
++		return 0;
++	default:
++		dev_dbg(sbd_core(), "%s:%u: unknown reg: %d\n", __func__,
++			__LINE__, reg);
++		BUG();
++		break;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ps3_read_pm);
++
++/**
++ * ps3_write_pm - Write Other LPM control registers.
++ */
++
++void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
++{
++	int result = 0;
++	u64 dummy;
++
++	switch (reg) {
++	case group_control:
++		if (val != lpm_priv->shadow.group_control)
++			result = lv1_set_lpm_group_control(lpm_priv->lpm_id,
++							   val,
++							   PS3_WRITE_PM_MASK,
++							   &dummy);
++		lpm_priv->shadow.group_control = val;
++		break;
++	case debug_bus_control:
++		if (val != lpm_priv->shadow.debug_bus_control)
++			result = lv1_set_lpm_debug_bus_control(lpm_priv->lpm_id,
++							      val,
++							      PS3_WRITE_PM_MASK,
++							      &dummy);
++		lpm_priv->shadow.debug_bus_control = val;
++		break;
++	case pm_control:
++		if (use_start_stop_bookmark)
++			val |= (PS3_PM_CONTROL_PPU_TH0_BOOKMARK |
++				PS3_PM_CONTROL_PPU_TH1_BOOKMARK);
++		if (val != lpm_priv->shadow.pm_control)
++			result = lv1_set_lpm_general_control(lpm_priv->lpm_id,
++							     val,
++							     PS3_WRITE_PM_MASK,
++							     0, 0, &dummy,
++							     &dummy);
++		lpm_priv->shadow.pm_control = val;
++		break;
++	case pm_interval:
++		if (val != lpm_priv->shadow.pm_interval)
++			result = lv1_set_lpm_interval(lpm_priv->lpm_id, val,
++						   PS3_WRITE_PM_MASK, &dummy);
++		lpm_priv->shadow.pm_interval = val;
++		break;
++	case pm_start_stop:
++		if (val != lpm_priv->shadow.pm_start_stop)
++			result = lv1_set_lpm_trigger_control(lpm_priv->lpm_id,
++							     val,
++							     PS3_WRITE_PM_MASK,
++							     &dummy);
++		lpm_priv->shadow.pm_start_stop = val;
++		break;
++	case trace_address:
++	case ext_tr_timer:
++	case pm_status:
++		break;
++	default:
++		dev_dbg(sbd_core(), "%s:%u: unknown reg: %d\n", __func__,
++			__LINE__, reg);
++		BUG();
++		break;
++	}
++
++	if (result)
++		dev_err(sbd_core(), "%s:%u: lv1 set_control failed: "
++			"reg %u, %s\n", __func__, __LINE__, reg,
++			ps3_result(result));
++}
++EXPORT_SYMBOL_GPL(ps3_write_pm);
++
++/**
++ * ps3_get_ctr_size - Get the size of a physical counter.
++ *
++ * Returns either 16 or 32.
++ */
++
++u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr)
++{
++	u32 pm_ctrl;
++
++	if (phys_ctr >= NR_PHYS_CTRS) {
++		dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
++			__LINE__, phys_ctr);
++		return 0;
++	}
++
++	pm_ctrl = ps3_read_pm(cpu, pm_control);
++	return (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
++}
++EXPORT_SYMBOL_GPL(ps3_get_ctr_size);
++
++/**
++ * ps3_set_ctr_size - Set the size of a physical counter to 16 or 32 bits.
++ */
++
++void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
++{
++	u32 pm_ctrl;
++
++	if (phys_ctr >= NR_PHYS_CTRS) {
++		dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
++			__LINE__, phys_ctr);
++		return;
++	}
++
++	pm_ctrl = ps3_read_pm(cpu, pm_control);
++
++	switch (ctr_size) {
++	case 16:
++		pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
++		ps3_write_pm(cpu, pm_control, pm_ctrl);
++		break;
++
++	case 32:
++		pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
++		ps3_write_pm(cpu, pm_control, pm_ctrl);
++		break;
++	default:
++		BUG();
++	}
++}
++EXPORT_SYMBOL_GPL(ps3_set_ctr_size);
++
++static u64 pm_translate_signal_group_number_on_island2(u64 subgroup)
++{
++
++	if (subgroup == 2)
++		subgroup = 3;
++
++	if (subgroup <= 6)
++		return PM_ISLAND2_BASE_SIGNAL_GROUP_NUMBER + subgroup;
++	else if (subgroup == 7)
++		return PM_ISLAND2_SIGNAL_GROUP_NUMBER1;
++	else
++		return PM_ISLAND2_SIGNAL_GROUP_NUMBER2;
++}
++
++static u64 pm_translate_signal_group_number_on_island3(u64 subgroup)
++{
++
++	switch (subgroup) {
++	case 2:
++	case 3:
++	case 4:
++		subgroup += 2;
++		break;
++	case 5:
++		subgroup = 8;
++		break;
++	default:
++		break;
++	}
++	return PM_ISLAND3_BASE_SIGNAL_GROUP_NUMBER + subgroup;
++}
++
++static u64 pm_translate_signal_group_number_on_island4(u64 subgroup)
++{
++	return PM_ISLAND4_BASE_SIGNAL_GROUP_NUMBER + subgroup;
++}
++
++static u64 pm_translate_signal_group_number_on_island5(u64 subgroup)
++{
++
++	switch (subgroup) {
++	case 3:
++		subgroup = 4;
++		break;
++	case 4:
++		subgroup = 6;
++		break;
++	default:
++		break;
++	}
++	return PM_ISLAND5_BASE_SIGNAL_GROUP_NUMBER + subgroup;
++}
++
++static u64 pm_translate_signal_group_number_on_island6(u64 subgroup,
++						       u64 subsubgroup)
++{
++	switch (subgroup) {
++	case 3:
++	case 4:
++	case 5:
++		subgroup += 1;
++		break;
++	default:
++		break;
++	}
++
++	switch (subsubgroup) {
++	case 4:
++	case 5:
++	case 6:
++		subsubgroup += 2;
++		break;
++	case 7:
++	case 8:
++	case 9:
++	case 10:
++		subsubgroup += 4;
++		break;
++	case 11:
++	case 12:
++	case 13:
++		subsubgroup += 5;
++		break;
++	default:
++		break;
++	}
++
++	if (subgroup <= 5)
++		return (PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER + subgroup);
++	else
++		return (PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER + subgroup
++			+ subsubgroup - 1);
++}
++
++static u64 pm_translate_signal_group_number_on_island7(u64 subgroup)
++{
++	return PM_ISLAND7_BASE_SIGNAL_GROUP_NUMBER + subgroup;
++}
++
++static u64 pm_translate_signal_group_number_on_island8(u64 subgroup)
++{
++	return PM_ISLAND8_BASE_SIGNAL_GROUP_NUMBER + subgroup;
++}
++
++static u64 pm_signal_group_to_ps3_lv1_signal_group(u64 group)
++{
++	u64 island;
++	u64 subgroup;
++	u64 subsubgroup;
++
++	subgroup = 0;
++	subsubgroup = 0;
++	island = 0;
++	if (group < 1000) {
++		if (group < 100) {
++			if (20 <= group && group < 30) {
++				island = 2;
++				subgroup = group - 20;
++			} else if (30 <= group && group < 40) {
++				island = 3;
++				subgroup = group - 30;
++			} else if (40 <= group && group < 50) {
++				island = 4;
++				subgroup = group - 40;
++			} else if (50 <= group && group < 60) {
++				island = 5;
++				subgroup = group - 50;
++			} else if (60 <= group && group < 70) {
++				island = 6;
++				subgroup = group - 60;
++			} else if (70 <= group && group < 80) {
++				island = 7;
++				subgroup = group - 70;
++			} else if (80 <= group && group < 90) {
++				island = 8;
++				subgroup = group - 80;
++			}
++		} else if (200 <= group && group < 300) {
++			island = 2;
++			subgroup = group - 200;
++		} else if (600 <= group && group < 700) {
++			island = 6;
++			subgroup = 5;
++			subsubgroup = group - 650;
++		}
++	} else if (6000 <= group && group < 7000) {
++		island = 6;
++		subgroup = 5;
++		subsubgroup = group - 6500;
++	}
++
++	switch (island) {
++	case 2:
++		return pm_translate_signal_group_number_on_island2(subgroup);
++	case 3:
++		return pm_translate_signal_group_number_on_island3(subgroup);
++	case 4:
++		return pm_translate_signal_group_number_on_island4(subgroup);
++	case 5:
++		return pm_translate_signal_group_number_on_island5(subgroup);
++	case 6:
++		return pm_translate_signal_group_number_on_island6(subgroup,
++								   subsubgroup);
++	case 7:
++		return pm_translate_signal_group_number_on_island7(subgroup);
++	case 8:
++		return pm_translate_signal_group_number_on_island8(subgroup);
++	default:
++		dev_dbg(sbd_core(), "%s:%u: island not found: %lu\n", __func__,
++			__LINE__, group);
++		BUG();
++		break;
++	}
++	return 0;
++}
++
++static u64 pm_bus_word_to_ps3_lv1_bus_word(u8 word)
++{
++
++	switch (word) {
++	case 1:
++		return 0xF000;
++	case 2:
++		return 0x0F00;
++	case 4:
++		return 0x00F0;
++	case 8:
++	default:
++		return 0x000F;
++	}
++}
++
++static int __ps3_set_signal(u64 lv1_signal_group, u64 bus_select,
++			    u64 signal_select, u64 attr1, u64 attr2, u64 attr3)
++{
++	int ret;
++
++	ret = lv1_set_lpm_signal(lpm_priv->lpm_id, lv1_signal_group, bus_select,
++				 signal_select, attr1, attr2, attr3);
++	if (ret)
++		dev_err(sbd_core(),
++			"%s:%u: error:%d 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
++			__func__, __LINE__, ret, lv1_signal_group, bus_select,
++			signal_select, attr1, attr2, attr3);
++
++	return ret;
++}
++
++int ps3_set_signal(u64 signal_group, u8 signal_bit, u16 sub_unit,
++		   u8 bus_word)
++{
++	int ret;
++	u64 lv1_signal_group;
++	u64 bus_select;
++	u64 signal_select;
++	u64 attr1, attr2, attr3;
++
++	if (signal_group == 0)
++		return __ps3_set_signal(0, 0, 0, 0, 0, 0);
++
++	lv1_signal_group =
++		pm_signal_group_to_ps3_lv1_signal_group(signal_group);
++	bus_select = pm_bus_word_to_ps3_lv1_bus_word(bus_word);
++
++	switch (signal_group) {
++	case PM_SIG_GROUP_SPU_TRIGGER:
++		signal_select = 1;
++		signal_select = signal_select << (63 - signal_bit);
++		break;
++	case PM_SIG_GROUP_SPU_EVENT:
++		signal_select = 1;
++		signal_select = (signal_select << (63 - signal_bit)) | 0x3;
++		break;
++	default:
++		signal_select = 0;
++		break;
++	}
++
++	/*
++	 * 0: physical object.
++	 * 1: logical object.
++	 * This parameter is only used for the PPE and SPE signals.
++	 */
++	attr1 = 1;
++
++	/*
++	 * This parameter is used to specify the target physical/logical
++	 * PPE/SPE object.
++	 */
++	if (PM_SIG_GROUP_SPU <= signal_group &&
++		signal_group < PM_SIG_GROUP_MFC_MAX)
++		attr2 = sub_unit;
++	else
++		attr2 = lpm_priv->pu_id;
++
++	/*
++	 * This parameter is only used for setting the SPE signal.
++	 */
++	attr3 = 0;
++
++	ret = __ps3_set_signal(lv1_signal_group, bus_select, signal_select,
++			       attr1, attr2, attr3);
++	if (ret)
++		dev_err(sbd_core(), "%s:%u: __ps3_set_signal failed: %d\n",
++			__func__, __LINE__, ret);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(ps3_set_signal);
++
++u32 ps3_get_hw_thread_id(int cpu)
++{
++	return get_hard_smp_processor_id(cpu);
++}
++EXPORT_SYMBOL_GPL(ps3_get_hw_thread_id);
++
++/**
++ * ps3_enable_pm - Enable the entire performance monitoring unit.
++ *
++ * When we enable the LPM, all pending writes to counters get committed.
++ */
++
++void ps3_enable_pm(u32 cpu)
++{
++	int result;
++	u64 tmp;
++	int insert_bookmark = 0;
++
++	lpm_priv->tb_count = 0;
++
++	if (use_start_stop_bookmark) {
++		if (!(lpm_priv->shadow.pm_start_stop &
++			(PS3_PM_START_STOP_START_MASK
++			| PS3_PM_START_STOP_STOP_MASK))) {
++			result = lv1_set_lpm_trigger_control(lpm_priv->lpm_id,
++				(PS3_PM_START_STOP_PPU_TH0_BOOKMARK_START |
++				PS3_PM_START_STOP_PPU_TH1_BOOKMARK_START |
++				PS3_PM_START_STOP_PPU_TH0_BOOKMARK_STOP |
++				PS3_PM_START_STOP_PPU_TH1_BOOKMARK_STOP),
++				0xFFFFFFFFFFFFFFFFULL, &tmp);
++
++			if (result)
++				dev_err(sbd_core(), "%s:%u: "
++					"lv1_set_lpm_trigger_control failed: "
++					"%s\n", __func__, __LINE__,
++					ps3_result(result));
++
++			insert_bookmark = !result;
++		}
++	}
++
++	result = lv1_start_lpm(lpm_priv->lpm_id);
++
++	if (result)
++		dev_err(sbd_core(), "%s:%u: lv1_start_lpm failed: %s\n",
++			__func__, __LINE__, ps3_result(result));
++
++	if (use_start_stop_bookmark && !result && insert_bookmark)
++		ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_START);
++}
++EXPORT_SYMBOL_GPL(ps3_enable_pm);
++
++/**
++ * ps3_disable_pm - Disable the entire performance monitoring unit.
++ */
++
++void ps3_disable_pm(u32 cpu)
++{
++	int result;
++	u64 tmp;
++
++	ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_STOP);
++
++	result = lv1_stop_lpm(lpm_priv->lpm_id, &tmp);
++
++	if (result) {
++		if(result != LV1_WRONG_STATE)
++			dev_err(sbd_core(), "%s:%u: lv1_stop_lpm failed: %s\n",
++				__func__, __LINE__, ps3_result(result));
++		return;
++	}
++
++	lpm_priv->tb_count = tmp;
++
++	dev_dbg(sbd_core(), "%s:%u: tb_count %lu (%lxh)\n", __func__, __LINE__,
++		lpm_priv->tb_count, lpm_priv->tb_count);
++}
++EXPORT_SYMBOL_GPL(ps3_disable_pm);
++
++/**
++ * ps3_lpm_copy_tb - Copy data from the trace buffer to a kernel buffer.
++ * @offset: Offset in bytes from the start of the trace buffer.
++ * @buf: Copy destination.
++ * @count: Maximum count of bytes to copy.
++ * @bytes_copied: Pointer to a variable that will recieve the number of
++ *  bytes copied to @buf.
++ *
++ * On error @buf will contain any successfully copied trace buffer data
++ * and bytes_copied will be set to the number of bytes successfully copied.
++ */
++
++int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
++		    unsigned long *bytes_copied)
++{
++	int result;
++
++	*bytes_copied = 0;
++
++	if (!lpm_priv->tb_cache)
++		return -EPERM;
++
++	if (offset >= lpm_priv->tb_count)
++		return 0;
++
++	count = min(count, lpm_priv->tb_count - offset);
++
++	while (*bytes_copied < count) {
++		const unsigned long request = count - *bytes_copied;
++		u64 tmp;
++
++		result = lv1_copy_lpm_trace_buffer(lpm_priv->lpm_id, offset,
++						   request, &tmp);
++		if (result) {
++			dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%lx\n",
++				__func__, __LINE__, request, offset);
++
++			dev_err(sbd_core(), "%s:%u: lv1_copy_lpm_trace_buffer "
++				"failed: %s\n", __func__, __LINE__,
++				ps3_result(result));
++			return result == LV1_WRONG_STATE ? -EBUSY : -EINVAL;
++		}
++
++		memcpy(buf, lpm_priv->tb_cache, tmp);
++		buf += tmp;
++		*bytes_copied += tmp;
++		offset += tmp;
++	}
++	dev_dbg(sbd_core(), "%s:%u: copied %lxh bytes\n", __func__, __LINE__,
++		*bytes_copied);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ps3_lpm_copy_tb);
++
++/**
++ * ps3_lpm_copy_tb_to_user - Copy data from the trace buffer to a user buffer.
++ * @offset: Offset in bytes from the start of the trace buffer.
++ * @buf: A __user copy destination.
++ * @count: Maximum count of bytes to copy.
++ * @bytes_copied: Pointer to a variable that will recieve the number of
++ *  bytes copied to @buf.
++ *
++ * On error @buf will contain any successfully copied trace buffer data
++ * and bytes_copied will be set to the number of bytes successfully copied.
++ */
++
++int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
++			    unsigned long count, unsigned long *bytes_copied)
++{
++	int result;
++
++	*bytes_copied = 0;
++
++	if (!lpm_priv->tb_cache)
++		return -EPERM;
++
++	if (offset >= lpm_priv->tb_count)
++		return 0;
++
++	count = min(count, lpm_priv->tb_count - offset);
++
++	while (*bytes_copied < count) {
++		const unsigned long request = count - *bytes_copied;
++		u64 tmp;
++
++		result = lv1_copy_lpm_trace_buffer(lpm_priv->lpm_id, offset,
++						   request, &tmp);
++		if (result) {
++			dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%lx\n",
++				__func__, __LINE__, request, offset);
++			dev_err(sbd_core(), "%s:%u: lv1_copy_lpm_trace_buffer "
++				"failed: %s\n", __func__, __LINE__,
++				ps3_result(result));
++			return result == LV1_WRONG_STATE ? -EBUSY : -EINVAL;
++		}
++
++		result = copy_to_user(buf, lpm_priv->tb_cache, tmp);
++
++		if (result) {
++			dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%p\n",
++				__func__, __LINE__, tmp, buf);
++			dev_err(sbd_core(), "%s:%u: copy_to_user failed: %d\n",
++				__func__, __LINE__, result);
++			return -EFAULT;
++		}
++
++		buf += tmp;
++		*bytes_copied += tmp;
++		offset += tmp;
++	}
++	dev_dbg(sbd_core(), "%s:%u: copied %lxh bytes\n", __func__, __LINE__,
++		*bytes_copied);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ps3_lpm_copy_tb_to_user);
++
++/**
++ * ps3_get_and_clear_pm_interrupts -
++ *
++ * Clearing interrupts for the entire performance monitoring unit.
++ * Reading pm_status clears the interrupt bits.
++ */
++
++u32 ps3_get_and_clear_pm_interrupts(u32 cpu)
++{
++	return ps3_read_pm(cpu, pm_status);
++}
++EXPORT_SYMBOL_GPL(ps3_get_and_clear_pm_interrupts);
++
++/**
++ * ps3_enable_pm_interrupts -
++ *
++ * Enabling interrupts for the entire performance monitoring unit.
++ * Enables the interrupt bits in the pm_status register.
++ */
++
++void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
++{
++	if (mask)
++		ps3_write_pm(cpu, pm_status, mask);
++}
++EXPORT_SYMBOL_GPL(ps3_enable_pm_interrupts);
++
++/**
++ * ps3_enable_pm_interrupts -
++ *
++ * Disabling interrupts for the entire performance monitoring unit.
++ */
++
++void ps3_disable_pm_interrupts(u32 cpu)
++{
++	ps3_get_and_clear_pm_interrupts(cpu);
++	ps3_write_pm(cpu, pm_status, 0);
++}
++EXPORT_SYMBOL_GPL(ps3_disable_pm_interrupts);
++
++/**
++ * ps3_lpm_open - Open the logical performance monitor device.
++ * @tb_type: Specifies the type of trace buffer lv1 sould use for this lpm
++ *  instance, specified by one of enum ps3_lpm_tb_type.
++ * @tb_cache: Optional user supplied buffer to use as the trace buffer cache.
++ *  If NULL, the driver will allocate and manage an internal buffer.
++ *  Unused when when @tb_type is PS3_LPM_TB_TYPE_NONE.
++ * @tb_cache_size: The size in bytes of the user supplied @tb_cache buffer.
++ *  Unused when @tb_cache is NULL or @tb_type is PS3_LPM_TB_TYPE_NONE.
++ */
++
++int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
++	u64 tb_cache_size)
++{
++	int result;
++	u64 tb_size;
++
++	BUG_ON(!lpm_priv);
++	BUG_ON(tb_type != PS3_LPM_TB_TYPE_NONE
++		&& tb_type != PS3_LPM_TB_TYPE_INTERNAL);
++
++	if (tb_type == PS3_LPM_TB_TYPE_NONE && tb_cache)
++		dev_dbg(sbd_core(), "%s:%u: bad in vals\n", __func__, __LINE__);
++
++	if (!atomic_add_unless(&lpm_priv->open, 1, 1)) {
++		dev_dbg(sbd_core(), "%s:%u: busy\n", __func__, __LINE__);
++		return -EBUSY;
++	}
++
++	/* Note tb_cache needs 128 byte alignment. */
++
++	if (tb_type == PS3_LPM_TB_TYPE_NONE) {
++		lpm_priv->tb_cache_size = 0;
++		lpm_priv->tb_cache_internal = NULL;
++		lpm_priv->tb_cache = NULL;
++	} else if (tb_cache) {
++		if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128)
++			|| tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) {
++			dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
++				__func__, __LINE__);
++			result = -EINVAL;
++			goto fail_align;
++		}
++		lpm_priv->tb_cache_size = tb_cache_size;
++		lpm_priv->tb_cache_internal = NULL;
++		lpm_priv->tb_cache = tb_cache;
++	} else {
++		lpm_priv->tb_cache_size = PS3_LPM_DEFAULT_TB_CACHE_SIZE;
++		lpm_priv->tb_cache_internal = kzalloc(
++			lpm_priv->tb_cache_size + 127, GFP_KERNEL);
++		if (!lpm_priv->tb_cache_internal) {
++			dev_err(sbd_core(), "%s:%u: alloc internal tb_cache "
++				"failed\n", __func__, __LINE__);
++			result = -ENOMEM;
++			goto fail_malloc;
++		}
++		lpm_priv->tb_cache = (void *)_ALIGN_UP(
++			(unsigned long)lpm_priv->tb_cache_internal, 128);
++	}
++
++	result = lv1_construct_lpm(lpm_priv->node_id, tb_type, 0, 0,
++				ps3_mm_phys_to_lpar(__pa(lpm_priv->tb_cache)),
++				lpm_priv->tb_cache_size, &lpm_priv->lpm_id,
++				&lpm_priv->outlet_id, &tb_size);
++
++	if (result) {
++		dev_err(sbd_core(), "%s:%u: lv1_construct_lpm failed: %s\n",
++			__func__, __LINE__, ps3_result(result));
++		result = -EINVAL;
++		goto fail_construct;
++	}
++
++	lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
++	lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
++	lpm_priv->shadow.pm_interval = PS3_LPM_SHADOW_REG_INIT;
++	lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
++	lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;
++
++	dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%lx, outlet_id 0x%lx, "
++		"tb_size 0x%lx\n", __func__, __LINE__, lpm_priv->lpm_id,
++		lpm_priv->outlet_id, tb_size);
++
++	return 0;
++
++fail_construct:
++	kfree(lpm_priv->tb_cache_internal);
++	lpm_priv->tb_cache_internal = NULL;
++fail_malloc:
++fail_align:
++	atomic_dec(&lpm_priv->open);
++	return result;
++}
++EXPORT_SYMBOL_GPL(ps3_lpm_open);
++
++/**
++ * ps3_lpm_close - Close the lpm device.
++ *
++ */
++
++int ps3_lpm_close(void)
++{
++	dev_dbg(sbd_core(), "%s:%u\n", __func__, __LINE__);
++
++	lv1_destruct_lpm(lpm_priv->lpm_id);
++	lpm_priv->lpm_id = 0;
++
++	kfree(lpm_priv->tb_cache_internal);
++	lpm_priv->tb_cache_internal = NULL;
++
++	atomic_dec(&lpm_priv->open);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ps3_lpm_close);
++
++static int __devinit ps3_lpm_probe(struct ps3_system_bus_device *dev)
++{
++	dev_dbg(&dev->core, " -> %s:%u\n", __func__, __LINE__);
++
++	if (lpm_priv) {
++		dev_info(&dev->core, "%s:%u: called twice\n",
++			__func__, __LINE__);
++		return -EBUSY;
++	}
++
++	lpm_priv = kzalloc(sizeof(*lpm_priv), GFP_KERNEL);
++
++	if (!lpm_priv)
++		return -ENOMEM;
++
++	lpm_priv->sbd = dev;
++	lpm_priv->node_id = dev->lpm.node_id;
++	lpm_priv->pu_id = dev->lpm.pu_id;
++	lpm_priv->rights = dev->lpm.rights;
++
++	dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
++
++	return 0;
++}
++
++static int ps3_lpm_remove(struct ps3_system_bus_device *dev)
++{
++	dev_dbg(&dev->core, " -> %s:%u:\n", __func__, __LINE__);
++
++	ps3_lpm_close();
++
++	kfree(lpm_priv);
++	lpm_priv = NULL;
++
++	dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
++	return 0;
++}
++
++static struct ps3_system_bus_driver ps3_lpm_driver = {
++	.match_id = PS3_MATCH_ID_LPM,
++	.core.name	= "ps3-lpm",
++	.core.owner	= THIS_MODULE,
++	.probe		= ps3_lpm_probe,
++	.remove		= ps3_lpm_remove,
++	.shutdown	= ps3_lpm_remove,
++};
++
++static int __init ps3_lpm_init(void)
++{
++	pr_debug("%s:%d:\n", __func__, __LINE__);
++	return ps3_system_bus_driver_register(&ps3_lpm_driver);
++}
++
++static void __exit ps3_lpm_exit(void)
++{
++	pr_debug("%s:%d:\n", __func__, __LINE__);
++	ps3_system_bus_driver_unregister(&ps3_lpm_driver);
++}
++
++module_init(ps3_lpm_init);
++module_exit(ps3_lpm_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("PS3 Logical Performance Monitor Driver");
++MODULE_AUTHOR("Sony Corporation");
++MODULE_ALIAS(PS3_MODULE_ALIAS_LPM);
+diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
+index 8461b08..c3c3aba 100644
+--- a/drivers/ps3/ps3-sys-manager.c
++++ b/drivers/ps3/ps3-sys-manager.c
+@@ -452,7 +452,7 @@ static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev)
+ 	case PS3_SM_EVENT_THERMAL_ALERT:
+ 		dev_dbg(&dev->core, "%s:%d: THERMAL_ALERT (zone %u)\n",
+ 			__func__, __LINE__, event.value);
+-		printk(KERN_INFO "PS3 Thermal Alert Zone %u\n", event.value);
++		pr_info("PS3 Thermal Alert Zone %u\n", event.value);
+ 		break;
+ 	case PS3_SM_EVENT_THERMAL_CLEARED:
+ 		dev_dbg(&dev->core, "%s:%d: THERMAL_CLEARED (zone %u)\n",
+@@ -488,7 +488,7 @@ static int ps3_sys_manager_handle_cmd(struct ps3_system_bus_device *dev)
+ 	result = ps3_vuart_read(dev, &cmd, sizeof(cmd));
+ 	BUG_ON(result && "need to retry here");
+ 
+-	if(result)
++	if (result)
+ 		return result;
+ 
+ 	if (cmd.version != 1) {
+@@ -521,7 +521,7 @@ static int ps3_sys_manager_handle_msg(struct ps3_system_bus_device *dev)
+ 	result = ps3_vuart_read(dev, &header,
+ 		sizeof(struct ps3_sys_manager_header));
+ 
+-	if(result)
++	if (result)
+ 		return result;
+ 
+ 	if (header.version != 1) {
+@@ -589,9 +589,9 @@ static void ps3_sys_manager_final_power_off(struct ps3_system_bus_device *dev)
+ 		PS3_SM_WAKE_DEFAULT);
+ 	ps3_sys_manager_send_request_shutdown(dev);
+ 
+-	printk(KERN_EMERG "System Halted, OK to turn off power\n");
++	pr_emerg("System Halted, OK to turn off power\n");
+ 
+-	while(1)
++	while (1)
+ 		ps3_sys_manager_handle_msg(dev);
+ }
+ 
+@@ -626,9 +626,9 @@ static void ps3_sys_manager_final_restart(struct ps3_system_bus_device *dev)
+ 		PS3_SM_WAKE_DEFAULT);
+ 	ps3_sys_manager_send_request_shutdown(dev);
+ 
+-	printk(KERN_EMERG "System Halted, OK to turn off power\n");
++	pr_emerg("System Halted, OK to turn off power\n");
+ 
+-	while(1)
++	while (1)
+ 		ps3_sys_manager_handle_msg(dev);
+ }
+ 
+diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
+index bb8d5b1..90c097a 100644
+--- a/drivers/ps3/ps3-vuart.c
++++ b/drivers/ps3/ps3-vuart.c
+@@ -108,18 +108,18 @@ static struct ps3_vuart_port_priv *to_port_priv(
+ struct ports_bmp {
+ 	u64 status;
+ 	u64 unused[3];
+-} __attribute__ ((aligned (32)));
++} __attribute__((aligned(32)));
+ 
+ #define dump_ports_bmp(_b) _dump_ports_bmp(_b, __func__, __LINE__)
+ static void __maybe_unused _dump_ports_bmp(
+-	const struct ports_bmp* bmp, const char* func, int line)
++	const struct ports_bmp *bmp, const char *func, int line)
+ {
+ 	pr_debug("%s:%d: ports_bmp: %016lxh\n", func, line, bmp->status);
+ }
+ 
+ #define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
+ static void __maybe_unused _dump_port_params(unsigned int port_number,
+-	const char* func, int line)
++	const char *func, int line)
+ {
+ #if defined(DEBUG)
+ 	static const char *strings[] = {
+@@ -363,7 +363,7 @@ int ps3_vuart_disable_interrupt_disconnect(struct ps3_system_bus_device *dev)
+  */
+ 
+ static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
+-	const void* buf, unsigned int bytes, unsigned long *bytes_written)
++	const void *buf, unsigned int bytes, unsigned long *bytes_written)
+ {
+ 	int result;
+ 	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+@@ -431,7 +431,7 @@ void ps3_vuart_clear_rx_bytes(struct ps3_system_bus_device *dev,
+ 	int result;
+ 	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+ 	u64 bytes_waiting;
+-	void* tmp;
++	void *tmp;
+ 
+ 	result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes_waiting);
+ 
+@@ -526,9 +526,8 @@ int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf,
+ 
+ 	lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_KERNEL);
+ 
+-	if (!lb) {
++	if (!lb)
+ 		return -ENOMEM;
+-	}
+ 
+ 	memcpy(lb->data, buf, bytes);
+ 	lb->head = lb->data;
+@@ -878,7 +877,7 @@ static int ps3_vuart_handle_port_interrupt(struct ps3_system_bus_device *dev)
+ struct vuart_bus_priv {
+ 	struct ports_bmp *bmp;
+ 	unsigned int virq;
+-	struct semaphore probe_mutex;
++	struct mutex probe_mutex;
+ 	int use_count;
+ 	struct ps3_system_bus_device *devices[PORT_COUNT];
+ } static vuart_bus_priv;
+@@ -926,9 +925,8 @@ static int ps3_vuart_bus_interrupt_get(void)
+ 
+ 	BUG_ON(vuart_bus_priv.use_count > 2);
+ 
+-	if (vuart_bus_priv.use_count != 1) {
++	if (vuart_bus_priv.use_count != 1)
+ 		return 0;
+-	}
+ 
+ 	BUG_ON(vuart_bus_priv.bmp);
+ 
+@@ -1017,7 +1015,7 @@ static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
+ 		return -EINVAL;
+ 	}
+ 
+-	down(&vuart_bus_priv.probe_mutex);
++	mutex_lock(&vuart_bus_priv.probe_mutex);
+ 
+ 	result = ps3_vuart_bus_interrupt_get();
+ 
+@@ -1077,7 +1075,7 @@ static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
+ 		goto fail_probe;
+ 	}
+ 
+-	up(&vuart_bus_priv.probe_mutex);
++	mutex_unlock(&vuart_bus_priv.probe_mutex);
+ 
+ 	return result;
+ 
+@@ -1090,7 +1088,7 @@ fail_dev_malloc:
+ fail_busy:
+ 	ps3_vuart_bus_interrupt_put();
+ fail_setup_interrupt:
+-	up(&vuart_bus_priv.probe_mutex);
++	mutex_unlock(&vuart_bus_priv.probe_mutex);
+ 	dev_dbg(&dev->core, "%s:%d: failed\n", __func__, __LINE__);
+ 	return result;
+ }
+@@ -1129,7 +1127,7 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
+ 
+ 	BUG_ON(!dev);
+ 
+-	down(&vuart_bus_priv.probe_mutex);
++	mutex_lock(&vuart_bus_priv.probe_mutex);
+ 
+ 	dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
+ 		dev->match_id);
+@@ -1137,7 +1135,7 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
+ 	if (!dev->core.driver) {
+ 		dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
+ 			__LINE__);
+-		up(&vuart_bus_priv.probe_mutex);
++		mutex_unlock(&vuart_bus_priv.probe_mutex);
+ 		return 0;
+ 	}
+ 
+@@ -1160,7 +1158,7 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
+ 	priv = NULL;
+ 
+ 	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
+-	up(&vuart_bus_priv.probe_mutex);
++	mutex_unlock(&vuart_bus_priv.probe_mutex);
+ 	return 0;
+ }
+ 
+@@ -1180,7 +1178,7 @@ static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
+ 
+ 	BUG_ON(!dev);
+ 
+-	down(&vuart_bus_priv.probe_mutex);
++	mutex_lock(&vuart_bus_priv.probe_mutex);
+ 
+ 	dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
+ 		dev->match_id);
+@@ -1188,7 +1186,7 @@ static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
+ 	if (!dev->core.driver) {
+ 		dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
+ 			__LINE__);
+-		up(&vuart_bus_priv.probe_mutex);
++		mutex_unlock(&vuart_bus_priv.probe_mutex);
+ 		return 0;
+ 	}
+ 
+@@ -1212,7 +1210,7 @@ static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
+ 
+ 	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
+ 
+-	up(&vuart_bus_priv.probe_mutex);
++	mutex_unlock(&vuart_bus_priv.probe_mutex);
+ 	return 0;
+ }
+ 
+@@ -1223,7 +1221,7 @@ static int __init ps3_vuart_bus_init(void)
+ 	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+ 		return -ENODEV;
+ 
+-	init_MUTEX(&vuart_bus_priv.probe_mutex);
++	mutex_init(&vuart_bus_priv.probe_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
+index f644807..80c5f1b 100644
+--- a/drivers/rapidio/rio.c
++++ b/drivers/rapidio/rio.c
+@@ -23,6 +23,7 @@
+ #include <linux/module.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
++#include <linux/interrupt.h>
+ 
+ #include "rio.h"
+ 
+@@ -476,8 +477,8 @@ int rio_init_mports(void)
+ 					port->iores.end - port->iores.start,
+ 					port->name)) {
+ 			printk(KERN_ERR
+-			       "RIO: Error requesting master port region %8.8lx-%8.8lx\n",
+-			       port->iores.start, port->iores.end - 1);
++			       "RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
++			       (u64)port->iores.start, (u64)port->iores.end - 1);
+ 			rc = -ENOMEM;
+ 			goto out;
+ 		}
 diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
 index b242cee..80e3f03 100644
 --- a/drivers/rapidio/rio.h
@@ -522443,7 +667002,7 @@
  extern void zfcp_sysfs_adapter_remove_files(struct device *);
  extern int  zfcp_sysfs_port_create_files(struct device *, u32);
 diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
-index ff866eb..e45f85f 100644
+index ff866eb..0dff058 100644
 --- a/drivers/s390/scsi/zfcp_fsf.c
 +++ b/drivers/s390/scsi/zfcp_fsf.c
 @@ -502,7 +502,7 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
@@ -522634,6 +667193,19 @@
   out:
  	write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
  	return fsf_req;
+@@ -4224,10 +4224,10 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
+ 
+ 		ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
+ 			       fcp_rsp_iu->fcp_sns_len);
+-		memcpy(&scpnt->sense_buffer,
++		memcpy(scpnt->sense_buffer,
+ 		       zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
+ 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
+-			      (void *) &scpnt->sense_buffer, sns_len);
++			      (void *)scpnt->sense_buffer, sns_len);
+ 	}
+ 
+ 	/* check for overrun */
 @@ -4725,7 +4725,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
  	/* allocate new FSF request */
  	fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
@@ -522779,10 +667351,18 @@
 -53c7xx_d.h
 -53c7xx_u.h
 diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
-index afb262b..1c24483 100644
+index afb262b..b4912d1 100644
 --- a/drivers/scsi/3w-9xxx.c
 +++ b/drivers/scsi/3w-9xxx.c
-@@ -2010,6 +2010,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
+@@ -1990,7 +1990,6 @@ static struct scsi_host_template driver_template = {
+ 	.max_sectors		= TW_MAX_SECTORS,
+ 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.shost_attrs		= twa_host_attrs,
+ 	.emulated		= 1
+ };
+@@ -2010,6 +2009,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
  	}
  
  	pci_set_master(pdev);
@@ -522790,6 +667370,18 @@
  
  	if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
  	    || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
+index 59716eb..d095321 100644
+--- a/drivers/scsi/3w-xxxx.c
++++ b/drivers/scsi/3w-xxxx.c
+@@ -2261,7 +2261,6 @@ static struct scsi_host_template driver_template = {
+ 	.max_sectors		= TW_MAX_SECTORS,
+ 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,	
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.shost_attrs		= tw_host_attrs,
+ 	.emulated		= 1
+ };
 diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
 index 71ff3fb..f4c4fe9 100644
 --- a/drivers/scsi/53c700.c
@@ -522832,7 +667424,7 @@
  				/* queue the command for reissue */
  				slot->state = NCR_700_SLOT_QUEUED;
 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
-index 49e1ffa..ead47c1 100644
+index 49e1ffa..4d3ebb1 100644
 --- a/drivers/scsi/BusLogic.c
 +++ b/drivers/scsi/BusLogic.c
 @@ -2947,7 +2947,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
@@ -522844,11 +667436,19 @@
  	CCB->SenseDataPointer = pci_map_single(HostAdapter->PCI_Device, Command->sense_buffer, CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
  	CCB->Command = Command;
  	Command->scsi_done = CompletionRoutine;
+@@ -3575,7 +3575,6 @@ static struct scsi_host_template Bus_Logic_template = {
+ 	.unchecked_isa_dma = 1,
+ 	.max_sectors = 128,
+ 	.use_clustering = ENABLE_CLUSTERING,
+-	.use_sg_chaining = ENABLE_SG_CHAINING,
+ };
+ 
+ /*
 diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
-index 184c7ae..3e161cd 100644
+index 184c7ae..14fc7f3 100644
 --- a/drivers/scsi/Kconfig
 +++ b/drivers/scsi/Kconfig
-@@ -341,7 +341,7 @@ config ISCSI_TCP
+@@ -341,11 +341,11 @@ config ISCSI_TCP
  	 The userspace component needed to initialize the driver, documentation,
  	 and sample configuration files can be found here:
  
@@ -522857,6 +667457,11 @@
  
  config SGIWD93_SCSI
  	tristate "SGI WD93C93 SCSI Driver"
+-	depends on SGI_IP22 && SCSI
++	depends on SGI_HAS_WD93 && SCSI
+   	help
+ 	  If you have a Western Digital WD93 SCSI controller on
+ 	  an SGI MIPS system, say Y.  Otherwise, say N.
 @@ -573,10 +573,10 @@ config SCSI_ARCMSR_AER
  source "drivers/scsi/megaraid/Kconfig.megaraid"
  
@@ -523014,6 +667619,30 @@
  
  	do {
  		done = 1;
+diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
+index 137d065..6961f78 100644
+--- a/drivers/scsi/NCR53c406a.c
++++ b/drivers/scsi/NCR53c406a.c
+@@ -1065,7 +1065,6 @@ static struct scsi_host_template driver_template =
+      .cmd_per_lun       	= 1			/* commands per lun */, 
+      .unchecked_isa_dma 	= 1			/* unchecked_isa_dma */,
+      .use_clustering    	= ENABLE_CLUSTERING,
+-     .use_sg_chaining           = ENABLE_SG_CHAINING,
+ };
+ 
+ #include "scsi_module.c"
+diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
+index d3a6d15..f608d4a 100644
+--- a/drivers/scsi/a100u2w.c
++++ b/drivers/scsi/a100u2w.c
+@@ -1071,7 +1071,6 @@ static struct scsi_host_template inia100_template = {
+ 	.sg_tablesize		= SG_ALL,
+ 	.cmd_per_lun 		= 1,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ 
+ static int __devinit inia100_probe_one(struct pci_dev *pdev,
 diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
 index b7c5385..23f27c9 100644
 --- a/drivers/scsi/a2091.c
@@ -524867,7 +669496,7 @@
  extern int acbsize;
  extern char aac_driver_version[];
 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
-index 1e6d7a9..851a7e5 100644
+index 1e6d7a9..f8afa35 100644
 --- a/drivers/scsi/aacraid/commctrl.c
 +++ b/drivers/scsi/aacraid/commctrl.c
 @@ -48,13 +48,13 @@
@@ -524957,42 +669586,7 @@
  	if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
  		return -EFAULT;
  	/*
-@@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
- 	 *	Search the list of AdapterFibContext addresses on the adapter
- 	 *	to be sure this is a valid address
- 	 */
-+	spin_lock_irqsave(&dev->fib_lock, flags);
- 	entry = dev->fib_list.next;
- 	fibctx = NULL;
- 
-@@ -251,37 +252,37 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
- 		/*
- 		 *	Extract the AdapterFibContext from the Input parameters.
- 		 */
--		if (fibctx->unique == f.fibctx) {   /* We found a winner */
-+		if (fibctx->unique == f.fibctx) { /* We found a winner */
- 			break;
- 		}
- 		entry = entry->next;
- 		fibctx = NULL;
- 	}
- 	if (!fibctx) {
-+		spin_unlock_irqrestore(&dev->fib_lock, flags);
- 		dprintk ((KERN_INFO "Fib Context not found\n"));
- 		return -EINVAL;
- 	}
- 
- 	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
- 		 (fibctx->size != sizeof(struct aac_fib_context))) {
-+		spin_unlock_irqrestore(&dev->fib_lock, flags);
- 		dprintk ((KERN_INFO "Fib Context corrupt?\n"));
- 		return -EINVAL;
- 	}
- 	status = 0;
--	spin_lock_irqsave(&dev->fib_lock, flags);
- 	/*
- 	 *	If there are no fibs to send back, then either wait or return
- 	 *	-EAGAIN
+@@ -275,13 +275,12 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
  	 */
  return_fib:
  	if (!list_empty(&fibctx->fib_list)) {
@@ -525007,7 +669601,7 @@
  		fib = list_entry(entry, struct fib, fiblink);
  		fibctx->count--;
  		spin_unlock_irqrestore(&dev->fib_lock, flags);
-@@ -289,7 +290,7 @@ return_fib:
+@@ -289,7 +288,7 @@ return_fib:
  			kfree(fib->hw_fib_va);
  			kfree(fib);
  			return -EFAULT;
@@ -525016,7 +669610,7 @@
  		/*
  		 *	Free the space occupied by this copy of the fib.
  		 */
-@@ -318,7 +319,7 @@ return_fib:
+@@ -318,7 +317,7 @@ return_fib:
  			}
  		} else {
  			status = -EAGAIN;
@@ -525025,25 +669619,7 @@
  	}
  	fibctx->jiffies = jiffies/HZ;
  	return status;
-@@ -327,7 +328,9 @@ return_fib:
- int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
- {
- 	struct fib *fib;
-+	unsigned long flags;
- 
-+	spin_lock_irqsave(&dev->fib_lock, flags);
- 	/*
- 	 *	First free any FIBs that have not been consumed.
- 	 */
-@@ -350,6 +353,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
- 	 *	Remove the Context from the AdapterFibContext List
- 	 */
- 	list_del(&fibctx->next);
-+	spin_unlock_irqrestore(&dev->fib_lock, flags);
- 	/*
- 	 *	Invalidate context
- 	 */
-@@ -368,7 +372,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
+@@ -368,7 +367,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
   *
   *	This routine will close down the fibctx passed in from the user.
   */
@@ -525052,18 +669628,16 @@
  static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
  {
  	struct aac_fib_context *fibctx;
-@@ -415,8 +419,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
+@@ -415,7 +414,7 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
   *	@arg: ioctl arguments
   *
   *	This routine returns the driver version.
 - *      Under Linux, there have been no version incompatibilities, so this is 
-- *      simple!
-+ *	Under Linux, there have been no version incompatibilities, so this is
-+ *	simple!
++ *      Under Linux, there have been no version incompatibilities, so this is
+  *      simple!
   */
  
- static int check_revision(struct aac_dev *dev, void __user *arg)
-@@ -426,12 +430,12 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
+@@ -426,12 +425,12 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
  	u32 version;
  
  	response.compat = 1;
@@ -525078,16 +669652,7 @@
  		response.build = cpu_to_le32(AAC_DRIVER_BUILD);
  #	else
  		response.build = cpu_to_le32(9999);
-@@ -464,7 +468,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
- 	u32 data_dir;
- 	void __user *sg_user[32];
- 	void *sg_list[32];
--	u32   sg_indx = 0;
-+	u32 sg_indx = 0;
- 	u32 byte_count = 0;
- 	u32 actual_fibsize64, actual_fibsize = 0;
- 	int i;
-@@ -475,7 +479,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -475,7 +474,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  		return -EBUSY;
  	}
  	if (!capable(CAP_SYS_ADMIN)){
@@ -525096,7 +669661,7 @@
  		return -EPERM;
  	}
  	/*
-@@ -490,7 +494,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -490,7 +489,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  
  	memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
  	if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
@@ -525105,7 +669670,7 @@
  		rcode = -EFAULT;
  		goto cleanup;
  	}
-@@ -507,7 +511,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -507,7 +506,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  		goto cleanup;
  	}
  	if(copy_from_user(user_srbcmd, user_srb,fibsize)){
@@ -525114,19 +669679,7 @@
  		rcode = -EFAULT;
  		goto cleanup;
  	}
-@@ -518,15 +522,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
- 	// Fix up srb for endian and force some values
- 
- 	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);	// Force this
--	srbcmd->channel  = cpu_to_le32(user_srbcmd->channel);
-+	srbcmd->channel	 = cpu_to_le32(user_srbcmd->channel);
- 	srbcmd->id	 = cpu_to_le32(user_srbcmd->id);
--	srbcmd->lun      = cpu_to_le32(user_srbcmd->lun);
--	srbcmd->timeout  = cpu_to_le32(user_srbcmd->timeout);
--	srbcmd->flags    = cpu_to_le32(flags);
-+	srbcmd->lun	 = cpu_to_le32(user_srbcmd->lun);
-+	srbcmd->timeout	 = cpu_to_le32(user_srbcmd->timeout);
-+	srbcmd->flags	 = cpu_to_le32(flags);
+@@ -526,7 +525,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  	srbcmd->retry_limit = 0; // Obsolete parameter
  	srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
  	memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
@@ -525135,7 +669688,7 @@
  	switch (flags & (SRB_DataIn | SRB_DataOut)) {
  	case SRB_DataOut:
  		data_dir = DMA_TO_DEVICE;
-@@ -582,7 +586,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -582,7 +581,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  				void* p;
  				/* Does this really need to be GFP_DMA? */
  				p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
@@ -525144,7 +669697,7 @@
  					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  					  upsg->sg[i].count,i,upsg->count));
  					rcode = -ENOMEM;
-@@ -594,7 +598,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -594,7 +593,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  				sg_list[i] = p; // save so we can clean up later
  				sg_indx = i;
  
@@ -525153,7 +669706,7 @@
  					if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
  						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  						rcode = -EFAULT;
-@@ -626,7 +630,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -626,7 +625,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  				void* p;
  				/* Does this really need to be GFP_DMA? */
  				p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
@@ -525162,7 +669715,7 @@
  					kfree (usg);
  					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  					  usg->sg[i].count,i,usg->count));
-@@ -637,7 +641,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -637,7 +636,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  				sg_list[i] = p; // save so we can clean up later
  				sg_indx = i;
  
@@ -525171,7 +669724,7 @@
  					if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
  						kfree (usg);
  						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
-@@ -668,7 +672,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -668,7 +667,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  				void* p;
  				/* Does this really need to be GFP_DMA? */
  				p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
@@ -525180,7 +669733,7 @@
  					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  					  usg->sg[i].count,i,usg->count));
  					rcode = -ENOMEM;
-@@ -680,7 +684,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -680,7 +679,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  				sg_list[i] = p; // save so we can clean up later
  				sg_indx = i;
  
@@ -525189,7 +669742,7 @@
  					if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
  						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  						rcode = -EFAULT;
-@@ -698,7 +702,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -698,7 +697,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  				dma_addr_t addr;
  				void* p;
  				p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
@@ -525198,7 +669751,7 @@
  					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  					  upsg->sg[i].count, i, upsg->count));
  					rcode = -ENOMEM;
-@@ -708,7 +712,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -708,7 +707,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  				sg_list[i] = p; // save so we can clean up later
  				sg_indx = i;
  
@@ -525207,7 +669760,7 @@
  					if(copy_from_user(p, sg_user[i],
  							upsg->sg[i].count)) {
  						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
-@@ -734,19 +738,19 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -734,19 +733,19 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  	}
  
  	if (status != 0){
@@ -525230,7 +669783,7 @@
  				rcode = -EFAULT;
  				goto cleanup;
  
-@@ -756,7 +760,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+@@ -756,7 +755,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  
  	reply = (struct aac_srb_reply *) fib_data(srbfib);
  	if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
@@ -525239,7 +669792,7 @@
  		rcode = -EFAULT;
  		goto cleanup;
  	}
-@@ -775,34 +779,34 @@ cleanup:
+@@ -775,34 +774,34 @@ cleanup:
  }
  
  struct aac_pci_info {
@@ -525258,12 +669811,11 @@
  	pci_info.bus = dev->pdev->bus->number;
  	pci_info.slot = PCI_SLOT(dev->pdev->devfn);
  
--       if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
+        if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
 -               dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
 -               return -EFAULT;
-+	if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
-+		dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
-+		return -EFAULT;
++	       dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
++	       return -EFAULT;
  	}
 -        return 0;
 +	return 0;
@@ -525284,7 +669836,7 @@
  	status = aac_dev_ioctl(dev, cmd, arg);
  	if(status != -ENOTTY)
  		return status;
-@@ -832,7 +836,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
+@@ -832,7 +831,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
  		break;
  	default:
  		status = -ENOTTY;
@@ -526315,7 +670867,7 @@
  		} else {
  			unsigned long flagv;
 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
-index 9dd331b..61be227 100644
+index 9dd331b..0e8267c 100644
 --- a/drivers/scsi/aacraid/linit.c
 +++ b/drivers/scsi/aacraid/linit.c
 @@ -159,27 +159,27 @@ static struct pci_device_id aac_pci_tbl[] = {
@@ -526719,7 +671271,7 @@
  	&aac_kernel_version,
  	&aac_monitor_version,
  	&aac_bios_version,
-@@ -928,21 +1015,22 @@ static struct scsi_host_template aac_driver_template = {
+@@ -928,23 +1015,23 @@ static struct scsi_host_template aac_driver_template = {
  	.compat_ioctl			= aac_compat_ioctl,
  #endif
  	.queuecommand   		= aac_queuecommand,
@@ -526745,9 +671297,11 @@
 +	.cmd_per_lun    		= AAC_NUM_IO_FIB,
 +#endif
  	.use_clustering			= ENABLE_CLUSTERING,
- 	.use_sg_chaining		= ENABLE_SG_CHAINING,
+-	.use_sg_chaining		= ENABLE_SG_CHAINING,
  	.emulated                       = 1,
-@@ -979,18 +1067,18 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
+ };
+ 
+@@ -979,18 +1066,18 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
  		goto out;
  	error = -ENODEV;
  
@@ -526769,7 +671323,7 @@
  	pci_set_master(pdev);
  
  	shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
-@@ -1003,7 +1091,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
+@@ -1003,7 +1090,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
  	shost->max_cmd_len = 16;
  
  	aac = (struct aac_dev *)shost->hostdata;
@@ -526778,7 +671332,7 @@
  	aac->pdev = pdev;
  	aac->name = aac_driver_template.name;
  	aac->id = shost->unique_id;
-@@ -1040,7 +1128,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
+@@ -1040,7 +1127,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
  	if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
  		if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
  			goto out_deinit;
@@ -526787,7 +671341,7 @@
  	aac->maximum_num_channels = aac_drivers[index].channels;
  	error = aac_get_adapter_info(aac);
  	if (error < 0)
-@@ -1049,7 +1137,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
+@@ -1049,7 +1136,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
  	/*
   	 * Lets override negotiations and drop the maximum SG limit to 34
   	 */
@@ -526796,7 +671350,7 @@
  			(aac->scsi_host_ptr->sg_tablesize > 34)) {
   		aac->scsi_host_ptr->sg_tablesize = 34;
   		aac->scsi_host_ptr->max_sectors
-@@ -1066,17 +1154,17 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
+@@ -1066,17 +1153,17 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
  	/*
  	 * Firware printf works only with older firmware.
  	 */
@@ -526817,7 +671371,7 @@
  		shost->max_channel = aac->maximum_num_channels;
  	else
  		shost->max_channel = 0;
-@@ -1148,10 +1236,10 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
+@@ -1148,10 +1235,10 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
  	kfree(aac->queues);
  
  	aac_adapter_ioremap(aac, 0);
@@ -526830,7 +671384,7 @@
  	list_del(&aac->entry);
  	scsi_host_put(shost);
  	pci_disable_device(pdev);
-@@ -1172,7 +1260,7 @@ static struct pci_driver aac_pci_driver = {
+@@ -1172,7 +1259,7 @@ static struct pci_driver aac_pci_driver = {
  static int __init aac_init(void)
  {
  	int error;
@@ -527116,7 +671670,7 @@
  	ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7);	/*SCSI Target Id */
  	ccb[mbo].rsalen = 16;
 diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
-index f6722fd..be58a0b 100644
+index f6722fd..7c45d88 100644
 --- a/drivers/scsi/aha1740.c
 +++ b/drivers/scsi/aha1740.c
 @@ -286,7 +286,7 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
@@ -527128,6 +671682,14 @@
  				errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
  			} else
  				errstatus = 0;
+@@ -563,7 +563,6 @@ static struct scsi_host_template aha1740_template = {
+ 	.sg_tablesize     = AHA1740_SCATTER,
+ 	.cmd_per_lun      = AHA1740_CMDLUN,
+ 	.use_clustering   = ENABLE_CLUSTERING,
+-	.use_sg_chaining  = ENABLE_SG_CHAINING,
+ 	.eh_abort_handler = aha1740_eh_abort_handler,
+ };
+ 
 diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
 index 9a6ce19..e4f70c5 100644
 --- a/drivers/scsi/aic7xxx/Makefile
@@ -527216,11 +671778,73 @@
  endif
  
  $(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl]
+diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
+index ce638aa..2f00467 100644
+--- a/drivers/scsi/aic7xxx/aic79xx.h
++++ b/drivers/scsi/aic7xxx/aic79xx.h
+@@ -1340,8 +1340,10 @@ struct	ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
+ int			  ahd_pci_config(struct ahd_softc *,
+ 					 struct ahd_pci_identity *);
+ int	ahd_pci_test_register_access(struct ahd_softc *);
++#ifdef CONFIG_PM
+ void	ahd_pci_suspend(struct ahd_softc *);
+ void	ahd_pci_resume(struct ahd_softc *);
++#endif
+ 
+ /************************** SCB and SCB queue management **********************/
+ void		ahd_qinfifo_requeue_tail(struct ahd_softc *ahd,
+@@ -1352,8 +1354,10 @@ struct ahd_softc	*ahd_alloc(void *platform_arg, char *name);
+ int			 ahd_softc_init(struct ahd_softc *);
+ void			 ahd_controller_info(struct ahd_softc *ahd, char *buf);
+ int			 ahd_init(struct ahd_softc *ahd);
++#ifdef CONFIG_PM
+ int			 ahd_suspend(struct ahd_softc *ahd);
+ void			 ahd_resume(struct ahd_softc *ahd);
++#endif
+ int			 ahd_default_config(struct ahd_softc *ahd);
+ int			 ahd_parse_vpddata(struct ahd_softc *ahd,
+ 					   struct vpd_config *vpd);
+@@ -1361,7 +1365,6 @@ int			 ahd_parse_cfgdata(struct ahd_softc *ahd,
+ 					   struct seeprom_config *sc);
+ void			 ahd_intr_enable(struct ahd_softc *ahd, int enable);
+ void			 ahd_pause_and_flushwork(struct ahd_softc *ahd);
+-int			 ahd_suspend(struct ahd_softc *ahd); 
+ void			 ahd_set_unit(struct ahd_softc *, int);
+ void			 ahd_set_name(struct ahd_softc *, char *);
+ struct scb		*ahd_get_scb(struct ahd_softc *ahd, u_int col_idx);
+diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
+index a7dd8cd..ade0fb8 100644
+--- a/drivers/scsi/aic7xxx/aic79xx_core.c
++++ b/drivers/scsi/aic7xxx/aic79xx_core.c
+@@ -7175,6 +7175,7 @@ ahd_pause_and_flushwork(struct ahd_softc *ahd)
+ 	ahd->flags &= ~AHD_ALL_INTERRUPTS;
+ }
+ 
++#ifdef CONFIG_PM
+ int
+ ahd_suspend(struct ahd_softc *ahd)
+ {
+@@ -7197,6 +7198,7 @@ ahd_resume(struct ahd_softc *ahd)
+ 	ahd_intr_enable(ahd, TRUE); 
+ 	ahd_restart(ahd);
+ }
++#endif
+ 
+ /************************** Busy Target Table *********************************/
+ /*
 diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
-index 2d02040..0e4708f 100644
+index 2d02040..0146547 100644
 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c
 +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
-@@ -1784,7 +1784,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
+@@ -766,7 +766,6 @@ struct scsi_host_template aic79xx_driver_template = {
+ 	.max_sectors		= 8192,
+ 	.cmd_per_lun		= 2,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.slave_alloc		= ahd_linux_slave_alloc,
+ 	.slave_configure	= ahd_linux_slave_configure,
+ 	.target_alloc		= ahd_linux_target_alloc,
+@@ -1784,7 +1783,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
  			if (scb->flags & SCB_SENSE) {
  				sense_size = min(sizeof(struct scsi_sense_data)
  					       - ahd_get_sense_residual(scb),
@@ -527229,7 +671853,7 @@
  				sense_offset = 0;
  			} else {
  				/*
-@@ -1795,11 +1795,11 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
+@@ -1795,11 +1794,11 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
  				    scb->sense_data;
  				sense_size = min_t(size_t,
  						scsi_4btoul(siu->sense_length),
@@ -527243,11 +671867,176 @@
  			memcpy(cmd->sense_buffer,
  			       ahd_get_sense_buf(ahd, scb)
  			       + sense_offset, sense_size);
+@@ -1922,7 +1921,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
+ 				struct scsi_sense_data *sense;
+ 				
+ 				sense = (struct scsi_sense_data *)
+-					&cmd->sense_buffer;
++					cmd->sense_buffer;
+ 				if (sense->extra_len >= 5 &&
+ 				    (sense->add_sense_code == 0x47
+ 				     || sense->add_sense_code == 0x48))
+diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+index 66f0259..4150c8a 100644
+--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
++++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+@@ -43,17 +43,6 @@
+ #include "aic79xx_inline.h"
+ #include "aic79xx_pci.h"
+ 
+-static int	ahd_linux_pci_dev_probe(struct pci_dev *pdev,
+-					const struct pci_device_id *ent);
+-static int	ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd,
+-						 u_long *base, u_long *base2);
+-static int	ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
+-						 u_long *bus_addr,
+-						 uint8_t __iomem **maddr);
+-static int	ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg);
+-static int	ahd_linux_pci_dev_resume(struct pci_dev *pdev);
+-static void	ahd_linux_pci_dev_remove(struct pci_dev *pdev);
+-
+ /* Define the macro locally since it's different for different class of chips.
+  */
+ #define ID(x)            \
+@@ -85,17 +74,7 @@ static struct pci_device_id ahd_linux_pci_id_table[] = {
+ 
+ MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table);
+ 
+-static struct pci_driver aic79xx_pci_driver = {
+-	.name		= "aic79xx",
+-	.probe		= ahd_linux_pci_dev_probe,
+ #ifdef CONFIG_PM
+-	.suspend	= ahd_linux_pci_dev_suspend,
+-	.resume		= ahd_linux_pci_dev_resume,
+-#endif
+-	.remove		= ahd_linux_pci_dev_remove,
+-	.id_table	= ahd_linux_pci_id_table
+-};
+-
+ static int
+ ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
+ {
+@@ -139,6 +118,7 @@ ahd_linux_pci_dev_resume(struct pci_dev *pdev)
+ 
+ 	return rc;
+ }
++#endif
+ 
+ static void
+ ahd_linux_pci_dev_remove(struct pci_dev *pdev)
+@@ -245,6 +225,17 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	return (0);
+ }
+ 
++static struct pci_driver aic79xx_pci_driver = {
++	.name		= "aic79xx",
++	.probe		= ahd_linux_pci_dev_probe,
++#ifdef CONFIG_PM
++	.suspend	= ahd_linux_pci_dev_suspend,
++	.resume		= ahd_linux_pci_dev_resume,
++#endif
++	.remove		= ahd_linux_pci_dev_remove,
++	.id_table	= ahd_linux_pci_id_table
++};
++
+ int
+ ahd_linux_pci_init(void)
+ {
+diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
+index 7a203a9..df85367 100644
+--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
++++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
+@@ -389,6 +389,7 @@ ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
+ 	return error;
+ }
+ 
++#ifdef CONFIG_PM
+ void
+ ahd_pci_suspend(struct ahd_softc *ahd)
+ {
+@@ -415,6 +416,7 @@ ahd_pci_resume(struct ahd_softc *ahd)
+ 	ahd_pci_write_config(ahd->dev_softc, CSIZE_LATTIME,
+ 			     ahd->suspend_state.pci_state.csize_lattime, /*bytes*/1);
+ }
++#endif
+ 
+ /*
+  * Perform some simple tests that should catch situations where
+diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
+index 3d4e42d..c0344e6 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx.h
++++ b/drivers/scsi/aic7xxx/aic7xxx.h
+@@ -1143,7 +1143,9 @@ struct ahc_pci_identity	*ahc_find_pci_device(ahc_dev_softc_t);
+ int			 ahc_pci_config(struct ahc_softc *,
+ 					struct ahc_pci_identity *);
+ int			 ahc_pci_test_register_access(struct ahc_softc *);
++#ifdef CONFIG_PM
+ void			 ahc_pci_resume(struct ahc_softc *ahc);
++#endif
+ 
+ /*************************** EISA/VL Front End ********************************/
+ struct aic7770_identity *aic7770_find_device(uint32_t);
+@@ -1170,8 +1172,10 @@ int			 ahc_chip_init(struct ahc_softc *ahc);
+ int			 ahc_init(struct ahc_softc *ahc);
+ void			 ahc_intr_enable(struct ahc_softc *ahc, int enable);
+ void			 ahc_pause_and_flushwork(struct ahc_softc *ahc);
++#ifdef CONFIG_PM
+ int			 ahc_suspend(struct ahc_softc *ahc); 
+ int			 ahc_resume(struct ahc_softc *ahc);
++#endif
+ void			 ahc_set_unit(struct ahc_softc *, int);
+ void			 ahc_set_name(struct ahc_softc *, char *);
+ void			 ahc_alloc_scbs(struct ahc_softc *ahc);
+diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
+index f350b5e..6d2ae64 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
++++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
+@@ -5078,6 +5078,7 @@ ahc_pause_and_flushwork(struct ahc_softc *ahc)
+ 	ahc->flags &= ~AHC_ALL_INTERRUPTS;
+ }
+ 
++#ifdef CONFIG_PM
+ int
+ ahc_suspend(struct ahc_softc *ahc)
+ {
+@@ -5113,7 +5114,7 @@ ahc_resume(struct ahc_softc *ahc)
+ 	ahc_restart(ahc);
+ 	return (0);
+ }
+-
++#endif
+ /************************** Busy Target Table *********************************/
+ /*
+  * Return the untagged transaction id for a given target/channel lun.
 diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
-index 390b0fc..e310e41 100644
+index 390b0fc..99a3b33 100644
 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
 +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
-@@ -1801,12 +1801,12 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
+@@ -747,7 +747,6 @@ struct scsi_host_template aic7xxx_driver_template = {
+ 	.max_sectors		= 8192,
+ 	.cmd_per_lun		= 2,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.slave_alloc		= ahc_linux_slave_alloc,
+ 	.slave_configure	= ahc_linux_slave_configure,
+ 	.target_alloc		= ahc_linux_target_alloc,
+@@ -1658,9 +1657,12 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
+ 		untagged_q = &(ahc->untagged_queues[target_offset]);
+ 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
+ 		BUG_ON(!TAILQ_EMPTY(untagged_q));
+-	}
+-
+-	if ((scb->flags & SCB_ACTIVE) == 0) {
++	} else if ((scb->flags & SCB_ACTIVE) == 0) {
++		/*
++		 * Transactions aborted from the untagged queue may
++		 * not have been dispatched to the controller, so
++		 * only check the SCB_ACTIVE flag for tagged transactions.
++		 */
+ 		printf("SCB %d done'd twice\n", scb->hscb->tag);
+ 		ahc_dump_card_state(ahc);
+ 		panic("Stopping for safety");
+@@ -1801,12 +1803,12 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
  
  			sense_size = min(sizeof(struct scsi_sense_data)
  				       - ahc_get_sense_residual(scb),
@@ -527263,8 +672052,94 @@
  			cmd->result |= (DRIVER_SENSE << 24);
  #ifdef AHC_DEBUG
  			if (ahc_debug & AHC_SHOW_SENSE) {
+diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+index 4488946..dd6e21d 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
++++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+@@ -42,17 +42,6 @@
+ #include "aic7xxx_osm.h"
+ #include "aic7xxx_pci.h"
+ 
+-static int	ahc_linux_pci_dev_probe(struct pci_dev *pdev,
+-					const struct pci_device_id *ent);
+-static int	ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc,
+-						u_long *base);
+-static int	ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
+-						 u_long *bus_addr,
+-						 uint8_t __iomem **maddr);
+-static int	ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg);
+-static int	ahc_linux_pci_dev_resume(struct pci_dev *pdev);
+-static void	ahc_linux_pci_dev_remove(struct pci_dev *pdev);
+-
+ /* Define the macro locally since it's different for different class of chips.
+ */
+ #define ID(x)	ID_C(x, PCI_CLASS_STORAGE_SCSI)
+@@ -132,17 +121,7 @@ static struct pci_device_id ahc_linux_pci_id_table[] = {
+ 
+ MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table);
+ 
+-static struct pci_driver aic7xxx_pci_driver = {
+-	.name		= "aic7xxx",
+-	.probe		= ahc_linux_pci_dev_probe,
+ #ifdef CONFIG_PM
+-	.suspend	= ahc_linux_pci_dev_suspend,
+-	.resume		= ahc_linux_pci_dev_resume,
+-#endif
+-	.remove		= ahc_linux_pci_dev_remove,
+-	.id_table	= ahc_linux_pci_id_table
+-};
+-
+ static int
+ ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
+ {
+@@ -182,6 +161,7 @@ ahc_linux_pci_dev_resume(struct pci_dev *pdev)
+ 
+ 	return (ahc_resume(ahc));
+ }
++#endif
+ 
+ static void
+ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
+@@ -289,6 +269,17 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	return (0);
+ }
+ 
++static struct pci_driver aic7xxx_pci_driver = {
++	.name		= "aic7xxx",
++	.probe		= ahc_linux_pci_dev_probe,
++#ifdef CONFIG_PM
++	.suspend	= ahc_linux_pci_dev_suspend,
++	.resume		= ahc_linux_pci_dev_resume,
++#endif
++	.remove		= ahc_linux_pci_dev_remove,
++	.id_table	= ahc_linux_pci_id_table
++};
++
+ int
+ ahc_linux_pci_init(void)
+ {
+diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
+index ae35937..56848f4 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
++++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
+@@ -2020,6 +2020,7 @@ ahc_pci_chip_init(struct ahc_softc *ahc)
+ 	return (ahc_chip_init(ahc));
+ }
+ 
++#ifdef CONFIG_PM
+ void
+ ahc_pci_resume(struct ahc_softc *ahc)
+ {
+@@ -2051,6 +2052,7 @@ ahc_pci_resume(struct ahc_softc *ahc)
+ 		ahc_release_seeprom(&sd);
+ 	}
+ }
++#endif
+ 
+ static int
+ ahc_aic785X_setup(struct ahc_softc *ahc)
 diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
-index 8f8db5f..bcb0b87 100644
+index 8f8db5f..3bfd929 100644
 --- a/drivers/scsi/aic7xxx_old.c
 +++ b/drivers/scsi/aic7xxx_old.c
 @@ -2696,7 +2696,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
@@ -527310,6 +672185,14 @@
    aic7xxx_error(cmd) = DID_OK;
    aic7xxx_status(cmd) = 0;
    cmd->host_scribble = NULL;
+@@ -11142,7 +11141,6 @@ static struct scsi_host_template driver_template = {
+ 	.max_sectors		= 2048,
+ 	.cmd_per_lun		= 3,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ 
+ #include "scsi_module.c"
 diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
 index 3dce618..72042ca 100644
 --- a/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -528344,10 +673227,18 @@
  
  	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
 diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
-index d466a2d..d80dba9 100644
+index d466a2d..f4a202e 100644
 --- a/drivers/scsi/arcmsr/arcmsr_hba.c
 +++ b/drivers/scsi/arcmsr/arcmsr_hba.c
-@@ -634,9 +634,9 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
+@@ -122,7 +122,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
+ 	.max_sectors    	= ARCMSR_MAX_XFER_SECTORS,
+ 	.cmd_per_lun		= ARCMSR_MAX_CMD_PERLUN,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.shost_attrs		= arcmsr_host_attrs,
+ };
+ #ifdef CONFIG_SCSI_ARCMSR_AER
+@@ -634,9 +633,9 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
  	pcmd->result = DID_OK << 16;
  	if (sensebuffer) {
  		int sense_data_length =
@@ -529163,7 +674054,7 @@
  
  static const char * const driverbyte_table[]={
 diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
-index a9def6e..f93c73c 100644
+index a9def6e..22ef371 100644
 --- a/drivers/scsi/dc395x.c
 +++ b/drivers/scsi/dc395x.c
 @@ -1629,8 +1629,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
@@ -529215,8 +674106,16 @@
  	srb->sg_count = 1;
  	srb->sg_index = 0;
  
+@@ -4765,7 +4763,6 @@ static struct scsi_host_template dc395x_driver_template = {
+ 	.eh_bus_reset_handler   = dc395x_eh_bus_reset,
+ 	.unchecked_isa_dma      = 0,
+ 	.use_clustering         = DISABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ 
+ 
 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
-index b31d1c9..19cce12 100644
+index b31d1c9..c9dd839 100644
 --- a/drivers/scsi/dpt_i2o.c
 +++ b/drivers/scsi/dpt_i2o.c
 @@ -2296,9 +2296,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
@@ -529231,11 +674130,27 @@
  			// Copy over the sense data
  			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
  			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 
+@@ -3341,7 +3340,6 @@ static struct scsi_host_template driver_template = {
+ 	.this_id		= 7,
+ 	.cmd_per_lun		= 1,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ #include "scsi_module.c"
+ MODULE_LICENSE("GPL");
 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
-index 7ead521..05163ce 100644
+index 7ead521..8be3d76 100644
 --- a/drivers/scsi/eata.c
 +++ b/drivers/scsi/eata.c
-@@ -1623,9 +1623,9 @@ static void map_dma(unsigned int i, struct hostdata *ha)
+@@ -524,7 +524,6 @@ static struct scsi_host_template driver_template = {
+ 	.this_id = 7,
+ 	.unchecked_isa_dma = 1,
+ 	.use_clustering = ENABLE_CLUSTERING,
+-	.use_sg_chaining = ENABLE_SG_CHAINING,
+ };
+ 
+ #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
+@@ -1623,9 +1622,9 @@ static void map_dma(unsigned int i, struct hostdata *ha)
  	if (SCpnt->sense_buffer)
  		cpp->sense_addr =
  		    H2DEV(pci_map_single(ha->pdev, SCpnt->sense_buffer,
@@ -529447,7 +674362,7 @@
  
  static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
-index 24271a8..5ea1f98 100644
+index 24271a8..880c78b 100644
 --- a/drivers/scsi/hosts.c
 +++ b/drivers/scsi/hosts.c
 @@ -54,8 +54,7 @@ static struct class shost_class = {
@@ -529460,7 +674375,15 @@
   *	@shost:	scsi host to change the state of.
   *	@state:	state to change to.
   *
-@@ -429,9 +428,17 @@ void scsi_unregister(struct Scsi_Host *shost)
+@@ -343,7 +342,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+ 	shost->use_clustering = sht->use_clustering;
+ 	shost->ordered_tag = sht->ordered_tag;
+ 	shost->active_mode = sht->supported_mode;
+-	shost->use_sg_chaining = sht->use_sg_chaining;
+ 
+ 	if (sht->supported_mode == MODE_UNKNOWN)
+ 		/* means we didn't set it ... default to INITIATOR */
+@@ -429,9 +427,17 @@ void scsi_unregister(struct Scsi_Host *shost)
  }
  EXPORT_SYMBOL(scsi_unregister);
  
@@ -529479,7 +674402,7 @@
   * @hostnum:	host number to locate
   *
   * Return value:
-@@ -439,19 +446,12 @@ EXPORT_SYMBOL(scsi_unregister);
+@@ -439,19 +445,12 @@ EXPORT_SYMBOL(scsi_unregister);
   **/
  struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
  {
@@ -529504,7 +674427,7 @@
  	return shost;
  }
 diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
-index 0844331..e7b2f35 100644
+index 0844331..ff149ad 100644
 --- a/drivers/scsi/hptiop.c
 +++ b/drivers/scsi/hptiop.c
 @@ -1,5 +1,5 @@
@@ -530095,8 +675018,9 @@
  		scp->result = SAM_STAT_CHECK_CONDITION;
 -		memset(&scp->sense_buffer,
 -				0, sizeof(scp->sense_buffer));
- 		memcpy(&scp->sense_buffer, &req->sg_list,
+-		memcpy(&scp->sense_buffer, &req->sg_list,
 -				min(sizeof(scp->sense_buffer),
++		memcpy(scp->sense_buffer, &req->sg_list,
 +				min_t(size_t, SCSI_SENSE_BUFFERSIZE,
  					le32_to_cpu(req->dataxfer_length)));
  		break;
@@ -530271,7 +675195,14 @@
  	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  	return queue_depth;
  }
-@@ -663,6 +913,26 @@ static struct scsi_host_template driver_template = {
+@@ -656,13 +906,32 @@ static struct scsi_host_template driver_template = {
+ 	.unchecked_isa_dma          = 0,
+ 	.emulated                   = 0,
+ 	.use_clustering             = ENABLE_CLUSTERING,
+-	.use_sg_chaining            = ENABLE_SG_CHAINING,
+ 	.proc_name                  = driver_name,
+ 	.shost_attrs                = hptiop_attrs,
+ 	.this_id                    = -1,
  	.change_queue_depth         = hptiop_adjust_disk_queue_depth,
  };
  
@@ -530298,7 +675229,7 @@
  static int __devinit hptiop_probe(struct pci_dev *pcidev,
  					const struct pci_device_id *id)
  {
-@@ -708,6 +978,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
+@@ -708,6 +977,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
  
  	hba = (struct hptiop_hba *)host->hostdata;
  
@@ -530306,7 +675237,7 @@
  	hba->pcidev = pcidev;
  	hba->host = host;
  	hba->initialized = 0;
-@@ -725,16 +996,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
+@@ -725,16 +995,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
  	host->n_io_port = 0;
  	host->irq = pcidev->irq;
  
@@ -530334,7 +675265,7 @@
  		printk(KERN_ERR "scsi%d: get config failed\n",
  				hba->host->host_no);
  		goto unmap_pci_bar;
-@@ -770,7 +1049,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
+@@ -770,7 +1048,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
  	set_config.vbus_id = cpu_to_le16(host->host_no);
  	set_config.max_host_request_size = cpu_to_le16(req_size);
  
@@ -530343,7 +675274,7 @@
  		printk(KERN_ERR "scsi%d: set config failed\n",
  				hba->host->host_no);
  		goto unmap_pci_bar;
-@@ -839,21 +1118,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
+@@ -839,21 +1117,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
  
  free_request_mem:
  	dma_free_coherent(&hba->pcidev->dev,
@@ -530372,7 +675303,7 @@
  disable_pci_device:
  	pci_disable_device(pcidev);
  
-@@ -865,8 +1147,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
+@@ -865,8 +1146,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
  {
  	struct Scsi_Host *host = pci_get_drvdata(pcidev);
  	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
@@ -530381,7 +675312,7 @@
  
  	dprintk("hptiop_shutdown(%p)\n", hba);
  
-@@ -876,11 +1156,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
+@@ -876,11 +1155,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
  					hba->host->host_no);
  
  	/* disable all outbound interrupts */
@@ -530409,7 +675340,7 @@
  }
  
  static void hptiop_remove(struct pci_dev *pcidev)
-@@ -901,7 +1194,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
+@@ -901,7 +1193,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
  			hba->dma_coherent,
  			hba->dma_coherent_handle);
  
@@ -530421,7 +675352,7 @@
  
  	pci_release_regions(hba->pcidev);
  	pci_set_drvdata(hba->pcidev, NULL);
-@@ -910,11 +1206,50 @@ static void hptiop_remove(struct pci_dev *pcidev)
+@@ -910,11 +1205,50 @@ static void hptiop_remove(struct pci_dev *pcidev)
  	scsi_host_put(host);
  }
  
@@ -530713,8 +675644,20 @@
  #define HPT_IOCTL_RESULT_OK         0
  #define HPT_IOCTL_RESULT_FAILED     (-1)
  
+diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
+index db004a4..4d15a62 100644
+--- a/drivers/scsi/ibmmca.c
++++ b/drivers/scsi/ibmmca.c
+@@ -1501,7 +1501,6 @@ static struct scsi_host_template ibmmca_driver_template = {
+           .sg_tablesize   = 16,
+           .cmd_per_lun    = 1,
+           .use_clustering = ENABLE_CLUSTERING,
+-          .use_sg_chaining = ENABLE_SG_CHAINING,
+ };
+ 
+ static int ibmmca_probe(struct device *dev)
 diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
-index 5f2396c..3081901 100644
+index 5f2396c..78d46a9 100644
 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c
 +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
 @@ -629,6 +629,16 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
@@ -530933,6 +675876,14 @@
  	scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
  	spin_unlock_irqrestore(shost->host_lock, lock_flags);
  	return 0;
+@@ -1557,7 +1600,6 @@ static struct scsi_host_template driver_template = {
+ 	.this_id = -1,
+ 	.sg_tablesize = SG_ALL,
+ 	.use_clustering = ENABLE_CLUSTERING,
+-	.use_sg_chaining = ENABLE_SG_CHAINING,
+ 	.shost_attrs = ibmvscsi_attrs,
+ };
+ 
 diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
 index 82bcab6..d63f11e 100644
 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -531192,6 +676143,18 @@
  	}
  	cmd->SCp.have_data_in = 0;
  
+diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
+index a10a5c7..0cc8868 100644
+--- a/drivers/scsi/initio.c
++++ b/drivers/scsi/initio.c
+@@ -2833,7 +2833,6 @@ static struct scsi_host_template initio_template = {
+ 	.sg_tablesize		= SG_ALL,
+ 	.cmd_per_lun		= 1,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ 
+ static int initio_probe_one(struct pci_dev *pdev,
 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
 index 0841df0..73270ff 100644
 --- a/drivers/scsi/ipr.c
@@ -532331,7 +677294,7 @@
     dma_addr_t         ioctl_busaddr;      /* dma address of ioctl buffer*/
     uint8_t            bios_version[8];    /* BIOS Revision              */
 diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
-index 57ce225..e5be5fd 100644
+index 57ce225..b6f99df 100644
 --- a/drivers/scsi/iscsi_tcp.c
 +++ b/drivers/scsi/iscsi_tcp.c
 @@ -48,7 +48,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus at yahoo.com>, "
@@ -534840,7 +679803,7 @@
  	}
  
  	if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
-@@ -2222,12 +1926,14 @@ static struct scsi_host_template iscsi_sht = {
+@@ -2222,10 +1926,11 @@ static struct scsi_host_template iscsi_sht = {
  	.queuecommand           = iscsi_queuecommand,
  	.change_queue_depth	= iscsi_change_queue_depth,
  	.can_queue		= ISCSI_DEF_XMIT_CMDS_MAX - 1,
@@ -534852,11 +679815,8 @@
 +	.eh_device_reset_handler= iscsi_eh_device_reset,
  	.eh_host_reset_handler	= iscsi_eh_host_reset,
  	.use_clustering         = DISABLE_CLUSTERING,
-+	.use_sg_chaining	= ENABLE_SG_CHAINING,
  	.slave_configure        = iscsi_tcp_slave_configure,
- 	.proc_name		= "iscsi_tcp",
- 	.this_id		= -1,
-@@ -2257,14 +1963,17 @@ static struct iscsi_transport iscsi_tcp_transport = {
+@@ -2257,14 +1962,17 @@ static struct iscsi_transport iscsi_tcp_transport = {
  				  ISCSI_PERSISTENT_ADDRESS |
  				  ISCSI_TARGET_NAME | ISCSI_TPGT |
  				  ISCSI_USERNAME | ISCSI_PASSWORD |
@@ -534876,7 +679836,7 @@
  	/* session management */
  	.create_session		= iscsi_tcp_session_create,
  	.destroy_session	= iscsi_tcp_session_destroy,
-@@ -2283,8 +1992,8 @@ static struct iscsi_transport iscsi_tcp_transport = {
+@@ -2283,8 +1991,8 @@ static struct iscsi_transport iscsi_tcp_transport = {
  	/* IO */
  	.send_pdu		= iscsi_conn_send_pdu,
  	.get_stats		= iscsi_conn_get_stats,
@@ -537397,7 +682357,7 @@
 +EXPORT_SYMBOL_GPL(sas_ssp_task_response);
 +
 diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
-index 2ad0a27..5cff020 100644
+index 2ad0a27..6d6a76e 100644
 --- a/drivers/scsi/libsrp.c
 +++ b/drivers/scsi/libsrp.c
 @@ -192,18 +192,18 @@ static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
@@ -537456,6 +682416,17 @@
  	} else
  		len = id->len;
  
+@@ -425,8 +426,8 @@ int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
+ 
+ 	sc->SCp.ptr = info;
+ 	memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
+-	sc->request_bufflen = len;
+-	sc->request_buffer = (void *) (unsigned long) addr;
++	sc->sdb.length = len;
++	sc->sdb.table.sgl = (void *) (unsigned long) addr;
+ 	sc->tag = tag;
+ 	err = scsi_tgt_queue_command(sc, itn_id, (struct scsi_lun *)&cmd->lun,
+ 				     cmd->tag);
 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
 index ba3ecab..f26b953 100644
 --- a/drivers/scsi/lpfc/lpfc.h
@@ -542620,7 +687591,7 @@
  	lpfc_disc_illegal,		/* CMPL_LOGO       */
  	lpfc_disc_illegal,		/* CMPL_ADISC      */
 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
-index 4e46045..6483c62 100644
+index 4e46045..fc5c3a4 100644
 --- a/drivers/scsi/lpfc/lpfc_scsi.c
 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
 @@ -130,7 +130,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
@@ -542753,16 +687724,17 @@
  
  	if (lpfc_cmd->pCmd == cmnd) {
  		ret = FAILED;
-@@ -1438,7 +1458,7 @@ struct scsi_host_template lpfc_template = {
+@@ -1438,8 +1458,7 @@ struct scsi_host_template lpfc_template = {
  	.slave_destroy		= lpfc_slave_destroy,
  	.scan_finished		= lpfc_scan_finished,
  	.this_id		= -1,
 -	.sg_tablesize		= LPFC_SG_SEG_CNT,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 +	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
- 	.use_sg_chaining	= ENABLE_SG_CHAINING,
  	.cmd_per_lun		= LPFC_CMD_PER_LUN,
  	.use_clustering		= ENABLE_CLUSTERING,
-@@ -1459,7 +1479,7 @@ struct scsi_host_template lpfc_vport_template = {
+ 	.shost_attrs		= lpfc_hba_attrs,
+@@ -1459,10 +1478,9 @@ struct scsi_host_template lpfc_vport_template = {
  	.slave_destroy		= lpfc_slave_destroy,
  	.scan_finished		= lpfc_scan_finished,
  	.this_id		= -1,
@@ -542770,7 +687742,10 @@
 +	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
  	.cmd_per_lun		= LPFC_CMD_PER_LUN,
  	.use_clustering		= ENABLE_CLUSTERING,
- 	.use_sg_chaining	= ENABLE_SG_CHAINING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.shost_attrs		= lpfc_vport_attrs,
+ 	.max_sectors		= 0xFFFF,
+ };
 diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
 index 31787bb..daba923 100644
 --- a/drivers/scsi/lpfc/lpfc_scsi.h
@@ -543954,11 +688929,31 @@
  
  /*
   *  queuecommand  VPORT-specific return codes. Specified in  the host byte code.
+diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
+index a035001..b12ad7c 100644
+--- a/drivers/scsi/mac53c94.c
++++ b/drivers/scsi/mac53c94.c
+@@ -402,7 +402,6 @@ static struct scsi_host_template mac53c94_template = {
+ 	.sg_tablesize	= SG_ALL,
+ 	.cmd_per_lun	= 1,
+ 	.use_clustering	= DISABLE_CLUSTERING,
+-	.use_sg_chaining = ENABLE_SG_CHAINING,
+ };
+ 
+ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
 diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
-index 66c6520..765c24d 100644
+index 66c6520..4d59ae8 100644
 --- a/drivers/scsi/megaraid.c
 +++ b/drivers/scsi/megaraid.c
-@@ -4889,7 +4889,7 @@ __megaraid_shutdown(adapter_t *adapter)
+@@ -4490,7 +4490,6 @@ static struct scsi_host_template megaraid_template = {
+ 	.sg_tablesize			= MAX_SGLIST,
+ 	.cmd_per_lun			= DEF_CMD_PER_LUN,
+ 	.use_clustering			= ENABLE_CLUSTERING,
+-	.use_sg_chaining		= ENABLE_SG_CHAINING,
+ 	.eh_abort_handler		= megaraid_abort,
+ 	.eh_device_reset_handler	= megaraid_reset,
+ 	.eh_bus_reset_handler		= megaraid_reset,
+@@ -4889,7 +4888,7 @@ __megaraid_shutdown(adapter_t *adapter)
  		mdelay(1000);
  }
  
@@ -543968,7 +688963,7 @@
  {
  	struct Scsi_Host *host = pci_get_drvdata(pdev);
 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
-index c892310..24e32e4 100644
+index c892310..6db77c0 100644
 --- a/drivers/scsi/megaraid/megaraid_mbox.c
 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
 @@ -300,7 +300,7 @@ static struct pci_device_id pci_id_table_g[] =  {
@@ -543980,7 +688975,15 @@
  	.name		= "megaraid",
  	.id_table	= pci_id_table_g,
  	.probe		= megaraid_probe_one,
-@@ -394,7 +394,7 @@ megaraid_init(void)
+@@ -361,7 +361,6 @@ static struct scsi_host_template megaraid_template_g = {
+ 	.eh_host_reset_handler		= megaraid_reset_handler,
+ 	.change_queue_depth		= megaraid_change_queue_depth,
+ 	.use_clustering			= ENABLE_CLUSTERING,
+-	.use_sg_chaining		= ENABLE_SG_CHAINING,
+ 	.sdev_attrs			= megaraid_sdev_attrs,
+ 	.shost_attrs			= megaraid_shost_attrs,
+ };
+@@ -394,7 +393,7 @@ megaraid_init(void)
  
  
  	// register as a PCI hot-plug driver module
@@ -543989,7 +688992,7 @@
  	if (rval < 0) {
  		con_log(CL_ANN, (KERN_WARNING
  			"megaraid: could not register hotplug support.\n"));
-@@ -415,7 +415,7 @@ megaraid_exit(void)
+@@ -415,7 +414,7 @@ megaraid_exit(void)
  	con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
  
  	// unregister as PCI hotplug driver
@@ -543999,7 +689002,7 @@
  	return;
  }
 diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
-index e3c5c52..d7ec921 100644
+index e3c5c52..672c759 100644
 --- a/drivers/scsi/megaraid/megaraid_sas.c
 +++ b/drivers/scsi/megaraid/megaraid_sas.c
 @@ -2,7 +2,7 @@
@@ -544158,7 +689161,15 @@
  	.proc_name = "megaraid_sas",
  	.slave_configure = megasas_slave_configure,
  	.queuecommand = megasas_queue_command,
-@@ -1749,57 +1831,119 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
+@@ -1110,7 +1192,6 @@ static struct scsi_host_template megasas_template = {
+ 	.eh_timed_out = megasas_reset_timer,
+ 	.bios_param = megasas_bios_param,
+ 	.use_clustering = ENABLE_CLUSTERING,
+-	.use_sg_chaining = ENABLE_SG_CHAINING,
+ };
+ 
+ /**
+@@ -1749,57 +1830,119 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
  }
  
  /**
@@ -544310,7 +689321,7 @@
  }
  
  /**
-@@ -1814,22 +1958,15 @@ static int megasas_init_mfi(struct megasas_instance *instance)
+@@ -1814,22 +1957,15 @@ static int megasas_init_mfi(struct megasas_instance *instance)
  	u32 reply_q_sz;
  	u32 max_sectors_1;
  	u32 max_sectors_2;
@@ -544335,7 +689346,7 @@
  		printk(KERN_DEBUG "megasas: IO memory region busy!\n");
  		return -EBUSY;
  	}
-@@ -1900,52 +2037,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
+@@ -1900,52 +2036,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
  		goto fail_reply_queue;
  	}
  
@@ -544389,7 +689400,7 @@
  
  	ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
  
-@@ -1958,17 +2051,20 @@ static int megasas_init_mfi(struct megasas_instance *instance)
+@@ -1958,17 +2050,20 @@ static int megasas_init_mfi(struct megasas_instance *instance)
  	 * Note that older firmwares ( < FW ver 30) didn't report information
  	 * to calculate max_sectors_1. So the number ended up as zero always.
  	 */
@@ -544415,7 +689426,7 @@
  
  	kfree(ctrl_info);
  
-@@ -1976,12 +2072,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
+@@ -1976,12 +2071,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
  	* Setup tasklet for cmd completion
  	*/
  
@@ -544436,7 +689447,7 @@
  
  	pci_free_consistent(instance->pdev, reply_q_sz,
  			    instance->reply_queue, instance->reply_queue_h);
-@@ -2263,6 +2364,28 @@ static int megasas_io_attach(struct megasas_instance *instance)
+@@ -2263,6 +2363,28 @@ static int megasas_io_attach(struct megasas_instance *instance)
  	return 0;
  }
  
@@ -544465,7 +689476,7 @@
  /**
   * megasas_probe_one -	PCI hotplug entry point
   * @pdev:		PCI device structure
-@@ -2296,19 +2419,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+@@ -2296,19 +2418,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  
  	pci_set_master(pdev);
  
@@ -544487,7 +689498,7 @@
  
  	host = scsi_host_alloc(&megasas_template,
  			       sizeof(struct megasas_instance));
-@@ -2357,8 +2469,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+@@ -2357,8 +2468,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  	init_waitqueue_head(&instance->abort_cmd_wait_q);
  
  	spin_lock_init(&instance->cmd_pool_lock);
@@ -544498,7 +689509,7 @@
  	sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
  
  	/*
-@@ -2490,8 +2603,10 @@ static void megasas_flush_cache(struct megasas_instance *instance)
+@@ -2490,8 +2602,10 @@ static void megasas_flush_cache(struct megasas_instance *instance)
  /**
   * megasas_shutdown_controller -	Instructs FW to shutdown the controller
   * @instance:				Adapter soft state
@@ -544510,7 +689521,7 @@
  {
  	struct megasas_cmd *cmd;
  	struct megasas_dcmd_frame *dcmd;
-@@ -2514,7 +2629,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance)
+@@ -2514,7 +2628,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance)
  	dcmd->flags = MFI_FRAME_DIR_NONE;
  	dcmd->timeout = 0;
  	dcmd->data_xfer_len = 0;
@@ -544519,7 +689530,7 @@
  
  	megasas_issue_blocked_cmd(instance, cmd);
  
-@@ -2524,6 +2639,139 @@ static void megasas_shutdown_controller(struct megasas_instance *instance)
+@@ -2524,6 +2638,139 @@ static void megasas_shutdown_controller(struct megasas_instance *instance)
  }
  
  /**
@@ -544659,7 +689670,7 @@
   * megasas_detach_one -	PCI hot"un"plug entry point
   * @pdev:		PCI device structure
   */
-@@ -2536,9 +2784,12 @@ static void megasas_detach_one(struct pci_dev *pdev)
+@@ -2536,9 +2783,12 @@ static void megasas_detach_one(struct pci_dev *pdev)
  	instance = pci_get_drvdata(pdev);
  	host = instance->host;
  
@@ -544673,7 +689684,7 @@
  	tasklet_kill(&instance->isr_tasklet);
  
  	/*
-@@ -2660,6 +2911,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+@@ -2660,6 +2910,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
  	void *sense = NULL;
  	dma_addr_t sense_handle;
  	u32 *sense_ptr;
@@ -544681,7 +689692,7 @@
  
  	memset(kbuff_arr, 0, sizeof(kbuff_arr));
  
-@@ -2764,14 +3016,16 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+@@ -2764,14 +3015,16 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
  	 */
  	if (ioc->sense_len) {
  		/*
@@ -544703,7 +689714,7 @@
  			error = -EFAULT;
  			goto out;
  		}
-@@ -2874,10 +3128,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
+@@ -2874,10 +3127,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
  	if (!instance)
  		return -ENODEV;
  
@@ -544716,7 +689727,7 @@
  	return error;
  }
  
-@@ -2977,6 +3231,8 @@ static struct pci_driver megasas_pci_driver = {
+@@ -2977,6 +3230,8 @@ static struct pci_driver megasas_pci_driver = {
  	.id_table = megasas_pci_table,
  	.probe = megasas_probe_one,
  	.remove = __devexit_p(megasas_detach_one),
@@ -544725,7 +689736,7 @@
  	.shutdown = megasas_shutdown,
  };
  
-@@ -3004,7 +3260,7 @@ static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
+@@ -3004,7 +3259,7 @@ static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
  static ssize_t
  megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
  {
@@ -544734,7 +689745,7 @@
  }
  
  static ssize_t
-@@ -3019,7 +3275,65 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
+@@ -3019,7 +3274,65 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
  }
  
  static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl,
@@ -544801,7 +689812,7 @@
  
  /**
   * megasas_init - Driver load entry point
-@@ -3070,8 +3384,16 @@ static int __init megasas_init(void)
+@@ -3070,8 +3383,16 @@ static int __init megasas_init(void)
  				  &driver_attr_dbg_lvl);
  	if (rval)
  		goto err_dcf_dbg_lvl;
@@ -544818,7 +689829,7 @@
  err_dcf_dbg_lvl:
  	driver_remove_file(&megasas_pci_driver.driver,
  			   &driver_attr_release_date);
-@@ -3090,6 +3412,8 @@ err_pcidrv:
+@@ -3090,6 +3411,8 @@ err_pcidrv:
  static void __exit megasas_exit(void)
  {
  	driver_remove_file(&megasas_pci_driver.driver,
@@ -544897,11 +689908,23 @@
  };
  
  #define MEGASAS_IS_LOGICAL(scp)						\
+diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
+index 7470ff3..651d09b 100644
+--- a/drivers/scsi/mesh.c
++++ b/drivers/scsi/mesh.c
+@@ -1843,7 +1843,6 @@ static struct scsi_host_template mesh_template = {
+ 	.sg_tablesize			= SG_ALL,
+ 	.cmd_per_lun			= 2,
+ 	.use_clustering			= DISABLE_CLUSTERING,
+-	.use_sg_chaining		= ENABLE_SG_CHAINING,
+ };
+ 
+ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
 diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
-index 016c462..c02771a 100644
+index 016c462..c5ebf01 100644
 --- a/drivers/scsi/ncr53c8xx.c
 +++ b/drivers/scsi/ncr53c8xx.c
-@@ -4963,7 +4963,8 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
+@@ -4963,10 +4963,11 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
  		**	Copy back sense data to caller's buffer.
  		*/
  		memcpy(cmd->sense_buffer, cp->sense_buf,
@@ -544910,7 +689933,23 @@
 +			     sizeof(cp->sense_buf)));
  
  		if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
- 			u_char * p = (u_char*) & cmd->sense_buffer;
+-			u_char * p = (u_char*) & cmd->sense_buffer;
++			u_char *p = cmd->sense_buffer;
+ 			int i;
+ 			PRINT_ADDR(cmd, "sense data:");
+ 			for (i=0; i<14; i++) printk (" %x", *p++);
+diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
+index 28161dc..7fed353 100644
+--- a/drivers/scsi/nsp32.c
++++ b/drivers/scsi/nsp32.c
+@@ -281,7 +281,6 @@ static struct scsi_host_template nsp32_template = {
+ 	.cmd_per_lun			= 1,
+ 	.this_id			= NSP32_HOST_SCSIID,
+ 	.use_clustering			= DISABLE_CLUSTERING,
+-	.use_sg_chaining		= ENABLE_SG_CHAINING,
+ 	.eh_abort_handler       	= nsp32_eh_abort,
+ 	.eh_bus_reset_handler		= nsp32_eh_bus_reset,
+ 	.eh_host_reset_handler		= nsp32_eh_host_reset,
 diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
 index fa481b5..53857c6 100644
 --- a/drivers/scsi/pcmcia/Kconfig
@@ -545074,6 +690113,18 @@
  		data->TransferMode = MODE_IO8;
  	} else if (nsp_burst_mode == BURST_MEM32) {
  		data->TransferMode = MODE_MEM32;
+diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
+index 969b938..3454a57 100644
+--- a/drivers/scsi/pcmcia/sym53c500_cs.c
++++ b/drivers/scsi/pcmcia/sym53c500_cs.c
+@@ -692,7 +692,6 @@ static struct scsi_host_template sym53c500_driver_template = {
+      .sg_tablesize		= 32,
+      .cmd_per_lun		= 1,
+      .use_clustering		= ENABLE_CLUSTERING,
+-     .use_sg_chaining		= ENABLE_SG_CHAINING,
+      .shost_attrs		= SYM53C500_shost_attrs
+ };
+ 
 diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
 index 67ee51a..f655ae3 100644
 --- a/drivers/scsi/ppa.c
@@ -546320,7 +691371,7 @@
 -#endif
 -
 diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
-index 2886407..c94906a 100644
+index 2886407..68c0d09 100644
 --- a/drivers/scsi/qla1280.c
 +++ b/drivers/scsi/qla1280.c
 @@ -528,7 +528,7 @@ __setup("qla1280=", qla1280_setup);
@@ -546341,6 +691392,14 @@
  
  			dprintk(2, "qla1280_status_entry: Check "
  				"condition Sense data, b %i, t %i, "
+@@ -4204,7 +4204,6 @@ static struct scsi_host_template qla1280_driver_template = {
+ 	.sg_tablesize		= SG_ALL,
+ 	.cmd_per_lun		= 1,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ 
+ 
 diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
 index 71ddb5d..c51fd1f 100644
 --- a/drivers/scsi/qla2xxx/Makefile
@@ -548264,7 +693323,7 @@
  	up(&ha->vport_sem);
  
 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
-index 8ecc047..aba1e6d 100644
+index 8ecc047..3954ed2 100644
 --- a/drivers/scsi/qla2xxx/qla_os.c
 +++ b/drivers/scsi/qla2xxx/qla_os.c
 @@ -105,13 +105,12 @@ static int qla2xxx_eh_abort(struct scsi_cmnd *);
@@ -548282,7 +693341,23 @@
  	.module			= THIS_MODULE,
  	.name			= QLA2XXX_DRIVER_NAME,
  	.queuecommand		= qla2x00_queuecommand,
-@@ -179,13 +178,6 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
+@@ -132,7 +131,6 @@ struct scsi_host_template qla2x00_driver_template = {
+ 	.this_id		= -1,
+ 	.cmd_per_lun		= 3,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.sg_tablesize		= SG_ALL,
+ 
+ 	/*
+@@ -164,7 +162,6 @@ struct scsi_host_template qla24xx_driver_template = {
+ 	.this_id		= -1,
+ 	.cmd_per_lun		= 3,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.sg_tablesize		= SG_ALL,
+ 
+ 	.max_sectors		= 0xFFFF,
+@@ -179,13 +176,6 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
   * Timer routines
   */
  
@@ -548296,7 +693371,7 @@
  __inline__ void
  qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
  {
-@@ -203,7 +195,7 @@ qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
+@@ -203,7 +193,7 @@ qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
  	mod_timer(&ha->timer, jiffies + interval * HZ);
  }
  
@@ -548305,7 +693380,7 @@
  qla2x00_stop_timer(scsi_qla_host_t *ha)
  {
  	del_timer_sync(&ha->timer);
-@@ -214,12 +206,11 @@ static int qla2x00_do_dpc(void *data);
+@@ -214,12 +204,11 @@ static int qla2x00_do_dpc(void *data);
  
  static void qla2x00_rst_aen(scsi_qla_host_t *);
  
@@ -548320,7 +693395,7 @@
  
  /* -------------------------------------------------------------------------- */
  
-@@ -1060,7 +1051,7 @@ eh_host_reset_lock:
+@@ -1060,7 +1049,7 @@ eh_host_reset_lock:
  * Returns:
  *      0 = success
  */
@@ -548329,7 +693404,7 @@
  qla2x00_loop_reset(scsi_qla_host_t *ha)
  {
  	int ret;
-@@ -1479,8 +1470,7 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
+@@ -1479,8 +1468,7 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
  static int
  qla2x00_iospace_config(scsi_qla_host_t *ha)
  {
@@ -548339,7 +693414,7 @@
  
  	if (pci_request_selected_regions(ha->pdev, ha->bars,
  	    QLA2XXX_DRIVER_NAME)) {
-@@ -1495,10 +1485,8 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
+@@ -1495,10 +1483,8 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
  
  	/* We only need PIO for Flash operations on ISP2312 v2 chips. */
  	pio = pci_resource_start(ha->pdev, 0);
@@ -548352,7 +693427,7 @@
  			qla_printk(KERN_WARNING, ha,
  			    "Invalid PCI I/O region size (%s)...\n",
  				pci_name(ha->pdev));
-@@ -1511,28 +1499,23 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
+@@ -1511,28 +1497,23 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
  		pio = 0;
  	}
  	ha->pio_address = pio;
@@ -548385,7 +693460,7 @@
  	if (!ha->iobase) {
  		qla_printk(KERN_ERR, ha,
  		    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
-@@ -1701,9 +1684,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+@@ -1701,9 +1682,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  	/* load the F/W, read paramaters, and init the H/W */
  	ha->instance = num_hosts;
  
@@ -548398,7 +693473,7 @@
  
  	INIT_LIST_HEAD(&ha->list);
  	INIT_LIST_HEAD(&ha->fcports);
-@@ -1807,6 +1791,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+@@ -1807,6 +1789,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  
  	qla2x00_init_host_attr(ha);
  
@@ -548407,7 +693482,7 @@
  	qla_printk(KERN_INFO, ha, "\n"
  	    " QLogic Fibre Channel HBA Driver: %s\n"
  	    "  QLogic %s - %s\n"
-@@ -1838,6 +1824,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
+@@ -1838,6 +1822,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
  
  	ha = pci_get_drvdata(pdev);
  
@@ -548416,7 +693491,7 @@
  	qla2x00_free_sysfs_attr(ha);
  
  	fc_remove_host(ha->host);
-@@ -1871,8 +1859,11 @@ qla2x00_free_device(scsi_qla_host_t *ha)
+@@ -1871,8 +1857,11 @@ qla2x00_free_device(scsi_qla_host_t *ha)
  		kthread_stop(t);
  	}
  
@@ -548429,7 +693504,7 @@
  
  	ha->flags.online = 0;
  
-@@ -2016,7 +2007,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
+@@ -2016,7 +2005,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
  *      0  = success.
  *      1  = failure.
  */
@@ -548438,7 +693513,7 @@
  qla2x00_mem_alloc(scsi_qla_host_t *ha)
  {
  	char	name[16];
-@@ -2213,7 +2204,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
+@@ -2213,7 +2202,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
  * Input:
  *      ha = adapter block pointer.
  */
@@ -548447,7 +693522,7 @@
  qla2x00_mem_free(scsi_qla_host_t *ha)
  {
  	struct list_head	*fcpl, *fcptemp;
-@@ -2228,6 +2219,10 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
+@@ -2228,6 +2217,10 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
  	/* free sp pool */
  	qla2x00_free_sp_pool(ha);
  
@@ -548458,7 +693533,7 @@
  	if (ha->fw_dump) {
  		if (ha->eft)
  			dma_free_coherent(&ha->pdev->dev,
-@@ -2748,23 +2743,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
+@@ -2748,23 +2741,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
  	qla2x00_restart_timer(ha, WATCH_INTERVAL);
  }
  
@@ -548625,10 +693700,18 @@
  			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
  				      "ASC/ASCQ = %02x/%02x\n", ha->host_no,
 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
-index 89460d2..d3f8664 100644
+index 89460d2..2e2b9fe 100644
 --- a/drivers/scsi/qla4xxx/ql4_os.c
 +++ b/drivers/scsi/qla4xxx/ql4_os.c
-@@ -173,18 +173,6 @@ static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
+@@ -94,7 +94,6 @@ static struct scsi_host_template qla4xxx_driver_template = {
+ 	.this_id		= -1,
+ 	.cmd_per_lun		= 3,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.sg_tablesize		= SG_ALL,
+ 
+ 	.max_sectors		= 0xFFFF,
+@@ -173,18 +172,6 @@ static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
  		printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
  }
  
@@ -548647,7 +693730,7 @@
  static int qla4xxx_host_get_param(struct Scsi_Host *shost,
  				  enum iscsi_host_param param, char *buf)
  {
-@@ -193,7 +181,7 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+@@ -193,7 +180,7 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
  
  	switch (param) {
  	case ISCSI_HOST_PARAM_HWADDRESS:
@@ -548656,7 +693739,7 @@
  		break;
  	case ISCSI_HOST_PARAM_IPADDRESS:
  		len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0],
-@@ -298,8 +286,7 @@ void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
+@@ -298,8 +285,7 @@ void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
  		return;
  
  	if (ddb_entry->conn) {
@@ -548666,7 +693749,7 @@
  		iscsi_remove_session(ddb_entry->sess);
  	}
  	iscsi_free_session(ddb_entry->sess);
-@@ -309,6 +296,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
+@@ -309,6 +295,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
  {
  	int err;
  
@@ -548674,7 +693757,7 @@
  	err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
  	if (err) {
  		DEBUG2(printk(KERN_ERR "Could not add session.\n"));
-@@ -321,9 +309,6 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
+@@ -321,9 +308,6 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
  		DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
  		return -ENOMEM;
  	}
@@ -548684,6 +693767,18 @@
  	return 0;
  }
  
+diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
+index 1769f96..1e874f1 100644
+--- a/drivers/scsi/qlogicfas.c
++++ b/drivers/scsi/qlogicfas.c
+@@ -197,7 +197,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
+ 	.sg_tablesize		= SG_ALL,
+ 	.cmd_per_lun		= 1,
+ 	.use_clustering		= DISABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ 
+ static __init int qlogicfas_init(void)
 diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
 index 7a2e798..65455ab 100644
 --- a/drivers/scsi/qlogicpti.c
@@ -548752,7 +693847,7 @@
  		sbus_writew(out_ptr, qpti->qregs + MBOX5);
  		Cmnd->host_scribble = (unsigned char *) done_queue;
 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
-index 0fb1709..1a9fba6 100644
+index 0fb1709..b35d194 100644
 --- a/drivers/scsi/scsi.c
 +++ b/drivers/scsi/scsi.c
 @@ -122,6 +122,11 @@ static const char *const scsi_device_types[] = {
@@ -549111,6 +694206,15 @@
   *              request, waking processes that are waiting on results,
   *              etc.
   */
+@@ -698,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
+ 				"Notifying upper driver of completion "
+ 				"(result %x)\n", cmd->result));
+ 
+-	good_bytes = cmd->request_bufflen;
++	good_bytes = scsi_bufflen(cmd);
+         if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ 		drv = scsi_cmd_to_driver(cmd);
+ 		if (drv->done)
 @@ -708,18 +767,14 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
  }
  EXPORT_SYMBOL(scsi_finish_command);
@@ -549291,10 +694395,69 @@
  struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
  		uint channel, uint id, uint lun)
 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
-index 46cae5a..82c06f0 100644
+index 46cae5a..1541c17 100644
 --- a/drivers/scsi/scsi_debug.c
 +++ b/drivers/scsi/scsi_debug.c
-@@ -329,7 +329,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
+@@ -280,6 +280,8 @@ static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba,
+ 		      unsigned int num, struct sdebug_dev_info * devip);
+ static int resp_report_luns(struct scsi_cmnd * SCpnt,
+ 			    struct sdebug_dev_info * devip);
++static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
++			    unsigned int num, struct sdebug_dev_info *devip);
+ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
+                                 int arr_len);
+ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
+@@ -311,12 +313,48 @@ static void sdebug_max_tgts_luns(void);
+ static struct device pseudo_primary;
+ static struct bus_type pseudo_lld_bus;
+ 
++static void get_data_transfer_info(unsigned char *cmd,
++				   unsigned long long *lba, unsigned int *num)
++{
++	int i;
++
++	switch (*cmd) {
++	case WRITE_16:
++	case READ_16:
++		for (*lba = 0, i = 0; i < 8; ++i) {
++			if (i > 0)
++				*lba <<= 8;
++			*lba += cmd[2 + i];
++		}
++		*num = cmd[13] + (cmd[12] << 8) +
++			(cmd[11] << 16) + (cmd[10] << 24);
++		break;
++	case WRITE_12:
++	case READ_12:
++		*lba = cmd[5] + (cmd[4] << 8) +	(cmd[3] << 16) + (cmd[2] << 24);
++		*num = cmd[9] + (cmd[8] << 8) +	(cmd[7] << 16) + (cmd[6] << 24);
++		break;
++	case WRITE_10:
++	case READ_10:
++	case XDWRITEREAD_10:
++		*lba = cmd[5] + (cmd[4] << 8) +	(cmd[3] << 16) + (cmd[2] << 24);
++		*num = cmd[8] + (cmd[7] << 8);
++		break;
++	case WRITE_6:
++	case READ_6:
++		*lba = cmd[3] + (cmd[2] << 8) + ((cmd[1] & 0x1f) << 16);
++		*num = (0 == cmd[4]) ? 256 : cmd[4];
++		break;
++	default:
++		break;
++	}
++}
+ 
+ static
+ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
+ {
+ 	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
+-	int len, k, j;
++	int len, k;
+ 	unsigned int num;
+ 	unsigned long long lba;
+ 	int errsts = 0;
+@@ -329,7 +367,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
  	if (done == NULL)
  		return 0;	/* assume mid level reprocessing command */
  
@@ -549303,18 +694466,112 @@
  	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
  		printk(KERN_INFO "scsi_debug: cmd ");
  		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
-@@ -603,26 +603,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
+@@ -452,28 +490,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
+ 			break;
+ 		if (scsi_debug_fake_rw)
+ 			break;
+-		if ((*cmd) == READ_16) {
+-			for (lba = 0, j = 0; j < 8; ++j) {
+-				if (j > 0)
+-					lba <<= 8;
+-				lba += cmd[2 + j];
+-			}
+-			num = cmd[13] + (cmd[12] << 8) +
+-				(cmd[11] << 16) + (cmd[10] << 24);
+-		} else if ((*cmd) == READ_12) {
+-			lba = cmd[5] + (cmd[4] << 8) +
+-				(cmd[3] << 16) + (cmd[2] << 24);
+-			num = cmd[9] + (cmd[8] << 8) +
+-				(cmd[7] << 16) + (cmd[6] << 24);
+-		} else if ((*cmd) == READ_10) {
+-			lba = cmd[5] + (cmd[4] << 8) +
+-				(cmd[3] << 16) + (cmd[2] << 24);
+-			num = cmd[8] + (cmd[7] << 8);
+-		} else {	/* READ (6) */
+-			lba = cmd[3] + (cmd[2] << 8) +
+-				((cmd[1] & 0x1f) << 16);
+-			num = (0 == cmd[4]) ? 256 : cmd[4];
+-		}
++		get_data_transfer_info(cmd, &lba, &num);
+ 		errsts = resp_read(SCpnt, lba, num, devip);
+ 		if (inj_recovered && (0 == errsts)) {
+ 			mk_sense_buffer(devip, RECOVERED_ERROR,
+@@ -500,28 +517,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
+ 			break;
+ 		if (scsi_debug_fake_rw)
+ 			break;
+-		if ((*cmd) == WRITE_16) {
+-			for (lba = 0, j = 0; j < 8; ++j) {
+-				if (j > 0)
+-					lba <<= 8;
+-				lba += cmd[2 + j];
+-			}
+-			num = cmd[13] + (cmd[12] << 8) +
+-				(cmd[11] << 16) + (cmd[10] << 24);
+-		} else if ((*cmd) == WRITE_12) {
+-			lba = cmd[5] + (cmd[4] << 8) +
+-				(cmd[3] << 16) + (cmd[2] << 24);
+-			num = cmd[9] + (cmd[8] << 8) +
+-				(cmd[7] << 16) + (cmd[6] << 24);
+-		} else if ((*cmd) == WRITE_10) {
+-			lba = cmd[5] + (cmd[4] << 8) +
+-				(cmd[3] << 16) + (cmd[2] << 24);
+-			num = cmd[8] + (cmd[7] << 8);
+-		} else {	/* WRITE (6) */
+-			lba = cmd[3] + (cmd[2] << 8) +
+-				((cmd[1] & 0x1f) << 16);
+-			num = (0 == cmd[4]) ? 256 : cmd[4];
+-		}
++		get_data_transfer_info(cmd, &lba, &num);
+ 		errsts = resp_write(SCpnt, lba, num, devip);
+ 		if (inj_recovered && (0 == errsts)) {
+ 			mk_sense_buffer(devip, RECOVERED_ERROR,
+@@ -549,6 +545,28 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
+ 	case WRITE_BUFFER:
+ 		errsts = check_readiness(SCpnt, 1, devip);
+ 		break;
++	case XDWRITEREAD_10:
++		if (!scsi_bidi_cmnd(SCpnt)) {
++			mk_sense_buffer(devip, ILLEGAL_REQUEST,
++					INVALID_FIELD_IN_CDB, 0);
++			errsts = check_condition_result;
++			break;
++		}
++
++		errsts = check_readiness(SCpnt, 0, devip);
++		if (errsts)
++			break;
++		if (scsi_debug_fake_rw)
++			break;
++		get_data_transfer_info(cmd, &lba, &num);
++		errsts = resp_read(SCpnt, lba, num, devip);
++		if (errsts)
++			break;
++		errsts = resp_write(SCpnt, lba, num, devip);
++		if (errsts)
++			break;
++		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
++		break;
+ 	default:
+ 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ 			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
+@@ -601,28 +619,18 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
+ 	int k, req_len, act_len, len, active;
+ 	void * kaddr;
  	void * kaddr_off;
- 	struct scatterlist * sg;
+-	struct scatterlist * sg;
++	struct scatterlist *sg;
++	struct scsi_data_buffer *sdb = scsi_in(scp);
  
 -	if (0 == scp->request_bufflen)
-+	if (0 == scsi_bufflen(scp))
++	if (!sdb->length)
  		return 0;
 -	if (NULL == scp->request_buffer)
-+	if (NULL == scsi_sglist(scp))
++	if (!sdb->table.sgl)
  		return (DID_ERROR << 16);
- 	if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
- 	      (scp->sc_data_direction == DMA_FROM_DEVICE)))
+-	if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
+-	      (scp->sc_data_direction == DMA_FROM_DEVICE)))
++	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
  		return (DID_ERROR << 16);
 -	if (0 == scp->use_sg) {
 -		req_len = scp->request_bufflen;
@@ -549329,25 +694586,25 @@
  	active = 1;
  	req_len = act_len = 0;
 -	scsi_for_each_sg(scp, sg, scp->use_sg, k) {
-+	scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) {
++	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, k) {
  		if (active) {
  			kaddr = (unsigned char *)
  				kmap_atomic(sg_page(sg), KM_USER0);
-@@ -640,10 +630,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
+@@ -640,10 +648,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
  		}
  		req_len += sg->length;
  	}
 -	if (scp->resid)
 -		scp->resid -= act_len;
-+	if (scsi_get_resid(scp))
-+		scsi_set_resid(scp, scsi_get_resid(scp) - act_len);
++	if (sdb->resid)
++		sdb->resid -= act_len;
  	else
 -		scp->resid = req_len - act_len;
-+		scsi_set_resid(scp, req_len - act_len);
++		sdb->resid = req_len - act_len;
  	return 0;
  }
  
-@@ -656,22 +646,15 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
+@@ -656,22 +664,14 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
  	void * kaddr_off;
  	struct scatterlist * sg;
  
@@ -549357,8 +694614,9 @@
 -	if (NULL == scp->request_buffer)
 +	if (NULL == scsi_sglist(scp))
  		return -1;
- 	if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
- 	      (scp->sc_data_direction == DMA_TO_DEVICE)))
+-	if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
+-	      (scp->sc_data_direction == DMA_TO_DEVICE)))
++	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
  		return -1;
 -	if (0 == scp->use_sg) {
 -		req_len = scp->request_bufflen;
@@ -549373,6 +694631,65 @@
  		kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
  		if (NULL == kaddr)
  			return -1;
+@@ -1973,6 +1973,50 @@ static int resp_report_luns(struct scsi_cmnd * scp,
+ 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
+ }
+ 
++static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
++			    unsigned int num, struct sdebug_dev_info *devip)
++{
++	int i, j, ret = -1;
++	unsigned char *kaddr, *buf;
++	unsigned int offset;
++	struct scatterlist *sg;
++	struct scsi_data_buffer *sdb = scsi_in(scp);
++
++	/* better not to use temporary buffer. */
++	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
++	if (!buf)
++		return ret;
++
++	offset = 0;
++	scsi_for_each_sg(scp, sg, scsi_sg_count(scp), i) {
++		kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
++		if (!kaddr)
++			goto out;
++
++		memcpy(buf + offset, kaddr + sg->offset, sg->length);
++		offset += sg->length;
++		kunmap_atomic(kaddr, KM_USER0);
++	}
++
++	offset = 0;
++	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
++		kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
++		if (!kaddr)
++			goto out;
++
++		for (j = 0; j < sg->length; j++)
++			*(kaddr + sg->offset + j) ^= *(buf + offset + j);
++
++		offset += sg->length;
++		kunmap_atomic(kaddr, KM_USER0);
++	}
++	ret = 0;
++out:
++	kfree(buf);
++
++	return ret;
++}
++
+ /* When timer goes off this function is called. */
+ static void timer_intr_handler(unsigned long indx)
+ {
+@@ -2006,6 +2050,7 @@ static int scsi_debug_slave_alloc(struct scsi_device * sdp)
+ 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ 		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
+ 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
++	set_bit(QUEUE_FLAG_BIDI, &sdp->request_queue->queue_flags);
+ 	return 0;
+ }
+ 
 diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
 index 348cc5a..b8de041 100644
 --- a/drivers/scsi/scsi_devinfo.c
@@ -549467,7 +694784,7 @@
  {
  #ifdef CONFIG_SCSI_PROC_FS
 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
-index ebaca4c..547e85a 100644
+index ebaca4c..045a086 100644
 --- a/drivers/scsi/scsi_error.c
 +++ b/drivers/scsi/scsi_error.c
 @@ -62,7 +62,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
@@ -549605,16 +694922,48 @@
  void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
  			unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
  {
-@@ -625,7 +625,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
+@@ -617,29 +617,27 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
+ 	ses->cmd_len = scmd->cmd_len;
+ 	memcpy(ses->cmnd, scmd->cmnd, sizeof(scmd->cmnd));
+ 	ses->data_direction = scmd->sc_data_direction;
+-	ses->bufflen = scmd->request_bufflen;
+-	ses->buffer = scmd->request_buffer;
+-	ses->use_sg = scmd->use_sg;
+-	ses->resid = scmd->resid;
++	ses->sdb = scmd->sdb;
++	ses->next_rq = scmd->request->next_rq;
+ 	ses->result = scmd->result;
  
++	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
++	scmd->request->next_rq = NULL;
++
  	if (sense_bytes) {
- 		scmd->request_bufflen = min_t(unsigned,
+-		scmd->request_bufflen = min_t(unsigned,
 -		                       sizeof(scmd->sense_buffer), sense_bytes);
-+		                       SCSI_SENSE_BUFFERSIZE, sense_bytes);
++		scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
++					 sense_bytes);
  		sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
- 		                                       scmd->request_bufflen);
- 		scmd->request_buffer = &ses->sense_sgl;
-@@ -657,7 +657,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
+-		                                       scmd->request_bufflen);
+-		scmd->request_buffer = &ses->sense_sgl;
++			    scmd->sdb.length);
++		scmd->sdb.table.sgl = &ses->sense_sgl;
+ 		scmd->sc_data_direction = DMA_FROM_DEVICE;
+-		scmd->use_sg = 1;
++		scmd->sdb.table.nents = 1;
+ 		memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
+ 		scmd->cmnd[0] = REQUEST_SENSE;
+-		scmd->cmnd[4] = scmd->request_bufflen;
++		scmd->cmnd[4] = scmd->sdb.length;
+ 		scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+ 	} else {
+-		scmd->request_buffer = NULL;
+-		scmd->request_bufflen = 0;
+ 		scmd->sc_data_direction = DMA_NONE;
+-		scmd->use_sg = 0;
+ 		if (cmnd) {
+ 			memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
+ 			memcpy(scmd->cmnd, cmnd, cmnd_size);
+@@ -657,7 +655,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
  	 * Zero the sense buffer.  The scsi spec mandates that any
  	 * untransferred sense data should be interpreted as being zero.
  	 */
@@ -549623,7 +694972,7 @@
  }
  EXPORT_SYMBOL(scsi_eh_prep_cmnd);
  
-@@ -667,7 +667,7 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
+@@ -667,7 +665,7 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
   * @ses:        saved information from a coresponding call to scsi_prep_eh_cmnd
   *
   * Undo any damage done by above scsi_prep_eh_cmnd().
@@ -549632,7 +694981,20 @@
  void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
  {
  	/*
-@@ -697,7 +697,7 @@ EXPORT_SYMBOL(scsi_eh_restore_cmnd);
+@@ -676,10 +674,8 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
+ 	scmd->cmd_len = ses->cmd_len;
+ 	memcpy(scmd->cmnd, ses->cmnd, sizeof(scmd->cmnd));
+ 	scmd->sc_data_direction = ses->data_direction;
+-	scmd->request_bufflen = ses->bufflen;
+-	scmd->request_buffer = ses->buffer;
+-	scmd->use_sg = ses->use_sg;
+-	scmd->resid = ses->resid;
++	scmd->sdb = ses->sdb;
++	scmd->request->next_rq = ses->next_rq;
+ 	scmd->result = ses->result;
+ }
+ EXPORT_SYMBOL(scsi_eh_restore_cmnd);
+@@ -697,7 +693,7 @@ EXPORT_SYMBOL(scsi_eh_restore_cmnd);
   *
   * Return value:
   *    SUCCESS or FAILED or NEEDS_RETRY
@@ -549641,7 +695003,7 @@
  static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
  			     int cmnd_size, int timeout, unsigned sense_bytes)
  {
-@@ -765,7 +765,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
+@@ -765,7 +761,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
   *    Some hosts automatically obtain this information, others require
   *    that we obtain it on our own. This function will *not* return until
   *    the command either times out, or it completes.
@@ -549650,7 +695012,7 @@
  static int scsi_request_sense(struct scsi_cmnd *scmd)
  {
  	return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0);
-@@ -779,10 +779,10 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
+@@ -779,10 +775,10 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
   * Notes:
   *    We don't want to use the normal command completion while we are are
   *    still handling errors - it may cause other commands to be queued,
@@ -549663,7 +695025,7 @@
  void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
  {
  	scmd->device->host->host_failed--;
-@@ -794,7 +794,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
+@@ -794,7 +790,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
  /**
   * scsi_eh_get_sense - Get device sense data.
   * @work_q:	Queue of commands to process.
@@ -549672,7 +695034,7 @@
   *
   * Description:
   *    See if we need to request sense information.  if so, then get it
-@@ -802,7 +802,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
+@@ -802,7 +798,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
   *
   * Notes:
   *    This has the unfortunate side effect that if a shost adapter does
@@ -549681,7 +695043,7 @@
   *    it down before we request it.
   *
   *    All drivers should request sense information internally these days,
-@@ -810,7 +810,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
+@@ -810,7 +806,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
   *
   *    XXX: Long term this code should go away, but that needs an audit of
   *         all LLDDs first.
@@ -549690,7 +695052,7 @@
  int scsi_eh_get_sense(struct list_head *work_q,
  		      struct list_head *done_q)
  {
-@@ -858,11 +858,11 @@ EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
+@@ -858,11 +854,11 @@ EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
  
  /**
   * scsi_eh_tur - Send TUR to device.
@@ -549704,7 +695066,7 @@
  static int scsi_eh_tur(struct scsi_cmnd *scmd)
  {
  	static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
-@@ -887,17 +887,17 @@ retry_tur:
+@@ -887,17 +883,17 @@ retry_tur:
  }
  
  /**
@@ -549728,7 +695090,7 @@
  static int scsi_eh_abort_cmds(struct list_head *work_q,
  			      struct list_head *done_q)
  {
-@@ -931,11 +931,11 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
+@@ -931,11 +927,11 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
  
  /**
   * scsi_eh_try_stu - Send START_UNIT to device.
@@ -549742,7 +695104,7 @@
  static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
  {
  	static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
-@@ -956,13 +956,14 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
+@@ -956,13 +952,14 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
  
   /**
   * scsi_eh_stu - send START_UNIT if needed
@@ -549760,7 +695122,7 @@
  static int scsi_eh_stu(struct Scsi_Host *shost,
  			      struct list_head *work_q,
  			      struct list_head *done_q)
-@@ -1008,14 +1009,15 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
+@@ -1008,14 +1005,15 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
  /**
   * scsi_eh_bus_device_reset - send bdr if needed
   * @shost:	scsi host being recovered.
@@ -549779,7 +695141,7 @@
  static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
  				    struct list_head *work_q,
  				    struct list_head *done_q)
-@@ -1063,9 +1065,10 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
+@@ -1063,9 +1061,10 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
  
  /**
   * scsi_eh_bus_reset - send a bus reset 
@@ -549793,7 +695155,7 @@
  static int scsi_eh_bus_reset(struct Scsi_Host *shost,
  			     struct list_head *work_q,
  			     struct list_head *done_q)
-@@ -1122,7 +1125,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
+@@ -1122,7 +1121,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
   * scsi_eh_host_reset - send a host reset 
   * @work_q:	list_head for processed commands.
   * @done_q:	list_head for processed commands.
@@ -549802,7 +695164,7 @@
  static int scsi_eh_host_reset(struct list_head *work_q,
  			      struct list_head *done_q)
  {
-@@ -1157,8 +1160,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
+@@ -1157,8 +1156,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
   * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
   * @work_q:	list_head for processed commands.
   * @done_q:	list_head for processed commands.
@@ -549812,7 +695174,7 @@
  static void scsi_eh_offline_sdevs(struct list_head *work_q,
  				  struct list_head *done_q)
  {
-@@ -1191,7 +1193,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
+@@ -1191,7 +1189,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
   *    is woken.  In cases where the error code indicates an error that
   *    doesn't require the error handler read (i.e. we don't need to
   *    abort/reset), this function should return SUCCESS.
@@ -549821,7 +695183,7 @@
  int scsi_decide_disposition(struct scsi_cmnd *scmd)
  {
  	int rtn;
-@@ -1372,7 +1374,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
+@@ -1372,7 +1370,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
   *
   *	If scsi_allocate_request() fails for what ever reason, we
   *	completely forget to lock the door.
@@ -549830,7 +695192,7 @@
  static void scsi_eh_lock_door(struct scsi_device *sdev)
  {
  	unsigned char cmnd[MAX_COMMAND_SIZE];
-@@ -1396,7 +1398,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
+@@ -1396,7 +1394,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
   * Notes:
   *    When we entered the error handler, we blocked all further i/o to
   *    this device.  we need to 'reverse' this process.
@@ -549839,7 +695201,7 @@
  static void scsi_restart_operations(struct Scsi_Host *shost)
  {
  	struct scsi_device *sdev;
-@@ -1440,9 +1442,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
+@@ -1440,9 +1438,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
  /**
   * scsi_eh_ready_devs - check device ready state and recover if not.
   * @shost: 	host to be recovered.
@@ -549852,7 +695214,7 @@
  void scsi_eh_ready_devs(struct Scsi_Host *shost,
  			struct list_head *work_q,
  			struct list_head *done_q)
-@@ -1458,8 +1460,7 @@ EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
+@@ -1458,8 +1456,7 @@ EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
  /**
   * scsi_eh_flush_done_q - finish processed commands or retry them.
   * @done_q:	list_head of processed commands.
@@ -549862,7 +695224,7 @@
  void scsi_eh_flush_done_q(struct list_head *done_q)
  {
  	struct scsi_cmnd *scmd, *next;
-@@ -1513,7 +1514,7 @@ EXPORT_SYMBOL(scsi_eh_flush_done_q);
+@@ -1513,7 +1510,7 @@ EXPORT_SYMBOL(scsi_eh_flush_done_q);
   *    scsi_finish_cmd() called for it.  we do all of the retry stuff
   *    here, so when we restart the host after we return it should have an
   *    empty queue.
@@ -549871,7 +695233,7 @@
  static void scsi_unjam_host(struct Scsi_Host *shost)
  {
  	unsigned long flags;
-@@ -1540,7 +1541,7 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
+@@ -1540,7 +1537,7 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
   * Notes:
   *    This is the main error handling loop.  This is run as a kernel thread
   *    for every SCSI host and handles all error handling activity.
@@ -549880,7 +695242,17 @@
  int scsi_error_handler(void *data)
  {
  	struct Scsi_Host *shost = data;
-@@ -1769,7 +1770,7 @@ EXPORT_SYMBOL(scsi_reset_provider);
+@@ -1699,8 +1696,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
+ 	memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd));
+     
+ 	scmd->scsi_done		= scsi_reset_provider_done_command;
+-	scmd->request_buffer		= NULL;
+-	scmd->request_bufflen		= 0;
++	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+ 
+ 	scmd->cmd_len			= 0;
+ 
+@@ -1769,7 +1765,7 @@ EXPORT_SYMBOL(scsi_reset_provider);
   *
   * Return value:
   *	1 if valid sense data information found, else 0;
@@ -549889,7 +695261,7 @@
  int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
                           struct scsi_sense_hdr *sshdr)
  {
-@@ -1819,14 +1820,12 @@ int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
+@@ -1819,14 +1815,12 @@ int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
  				 struct scsi_sense_hdr *sshdr)
  {
  	return scsi_normalize_sense(cmd->sense_buffer,
@@ -549906,7 +695278,7 @@
   * @sense_buffer:	byte array of descriptor format sense data
   * @sb_len:		number of valid bytes in sense_buffer
   * @desc_type:		value of descriptor type to find
-@@ -1837,7 +1836,7 @@ EXPORT_SYMBOL(scsi_command_normalize_sense);
+@@ -1837,7 +1831,7 @@ EXPORT_SYMBOL(scsi_command_normalize_sense);
   *
   * Return value:
   *	pointer to start of (first) descriptor if found else NULL
@@ -549915,7 +695287,7 @@
  const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
  				int desc_type)
  {
-@@ -1865,9 +1864,7 @@ const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
+@@ -1865,9 +1859,7 @@ const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
  EXPORT_SYMBOL(scsi_sense_desc_find);
  
  /**
@@ -549926,7 +695298,7 @@
   * @sense_buffer:	byte array of sense data
   * @sb_len:		number of valid bytes in sense_buffer
   * @info_out:		pointer to 64 integer where 8 or 4 byte information
-@@ -1875,7 +1872,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
+@@ -1875,7 +1867,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
   *
   * Return value:
   *	1 if information field found, 0 if not found.
@@ -549994,10 +695366,68 @@
  			return -ENODEV;
  	} else if (!scsi_block_when_processing_errors(sdev))
 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
-index a9ac5b1..7c4c889 100644
+index a9ac5b1..b12fb31 100644
 --- a/drivers/scsi/scsi_lib.c
 +++ b/drivers/scsi/scsi_lib.c
-@@ -175,7 +175,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+@@ -8,6 +8,7 @@
+  */
+ 
+ #include <linux/bio.h>
++#include <linux/bitops.h>
+ #include <linux/blkdev.h>
+ #include <linux/completion.h>
+ #include <linux/kernel.h>
+@@ -34,13 +35,6 @@
+ #define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
+ #define SG_MEMPOOL_SIZE		2
+ 
+-/*
+- * The maximum number of SG segments that we will put inside a scatterlist
+- * (unless chaining is used). Should ideally fit inside a single page, to
+- * avoid a higher order allocation.
+- */
+-#define SCSI_MAX_SG_SEGMENTS	128
+-
+ struct scsi_host_sg_pool {
+ 	size_t		size;
+ 	char		*name;
+@@ -48,22 +42,31 @@ struct scsi_host_sg_pool {
+ 	mempool_t	*pool;
+ };
+ 
+-#define SP(x) { x, "sgpool-" #x }
++#define SP(x) { x, "sgpool-" __stringify(x) }
++#if (SCSI_MAX_SG_SEGMENTS < 32)
++#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
++#endif
+ static struct scsi_host_sg_pool scsi_sg_pools[] = {
+ 	SP(8),
+ 	SP(16),
+-#if (SCSI_MAX_SG_SEGMENTS > 16)
+-	SP(32),
+ #if (SCSI_MAX_SG_SEGMENTS > 32)
+-	SP(64),
++	SP(32),
+ #if (SCSI_MAX_SG_SEGMENTS > 64)
++	SP(64),
++#if (SCSI_MAX_SG_SEGMENTS > 128)
+ 	SP(128),
++#if (SCSI_MAX_SG_SEGMENTS > 256)
++#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
++#endif
+ #endif
+ #endif
+ #endif
++	SP(SCSI_MAX_SG_SEGMENTS)
+ };
+ #undef SP
+ 
++static struct kmem_cache *scsi_bidi_sdb_cache;
++
+ static void scsi_run_queue(struct request_queue *q);
+ 
+ /*
+@@ -175,7 +178,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
   *
   * returns the req->errors value which is the scsi_cmnd result
   * field.
@@ -550006,7 +695436,7 @@
  int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
  		 int data_direction, void *buffer, unsigned bufflen,
  		 unsigned char *sense, int timeout, int retries, int flags)
-@@ -274,7 +274,7 @@ static void scsi_bi_endio(struct bio *bio, int error)
+@@ -274,7 +277,7 @@ static void scsi_bi_endio(struct bio *bio, int error)
  /**
   * scsi_req_map_sg - map a scatterlist into a request
   * @rq:		request to fill
@@ -550015,7 +695445,7 @@
   * @nsegs:	number of elements
   * @bufflen:	len of buffer
   * @gfp:	memory allocation flags
-@@ -365,14 +365,16 @@ free_bios:
+@@ -365,14 +368,16 @@ free_bios:
   * @sdev:	scsi device
   * @cmd:	scsi command
   * @cmd_len:	length of scsi cdb
@@ -550035,16 +695465,18 @@
  int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
  		       int cmd_len, int data_direction, void *buffer, unsigned bufflen,
  		       int use_sg, int timeout, int retries, void *privdata,
-@@ -439,7 +441,7 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
+@@ -438,8 +443,8 @@ EXPORT_SYMBOL_GPL(scsi_execute_async);
+ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
  {
  	cmd->serial_number = 0;
- 	cmd->resid = 0;
+-	cmd->resid = 0;
 -	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
++	scsi_set_resid(cmd, 0);
 +	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  	if (cmd->cmd_len == 0)
  		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
  }
-@@ -524,7 +526,7 @@ static void scsi_run_queue(struct request_queue *q)
+@@ -524,7 +529,7 @@ static void scsi_run_queue(struct request_queue *q)
  	struct Scsi_Host *shost = sdev->host;
  	unsigned long flags;
  
@@ -550053,7 +695485,7 @@
  		scsi_single_lun_run(sdev);
  
  	spin_lock_irqsave(shost->host_lock, flags);
-@@ -632,7 +634,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
+@@ -632,7 +637,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
   *		of upper level post-processing and scsi_io_completion).
   *
   * Arguments:   cmd	 - command that is complete.
@@ -550062,7 +695494,7 @@
   *              bytes    - number of bytes of completed I/O
   *		requeue  - indicates whether we should requeue leftovers.
   *
-@@ -647,26 +649,25 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
+@@ -647,26 +652,25 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
   *		at some point during this call.
   * Notes:	If cmd was requeued, upon return it will be a stale pointer.
   */
@@ -550093,7 +695525,7 @@
  		else {
  			if (requeue) {
  				/*
-@@ -681,14 +682,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
+@@ -681,14 +685,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
  		}
  	}
  
@@ -550108,7 +695540,52 @@
  	/*
  	 * This will goose the queue request function at the end, so we don't
  	 * need to worry about launching another command.
-@@ -737,138 +730,43 @@ static inline unsigned int scsi_sgtable_index(unsigned short nents)
+@@ -697,182 +693,57 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
+ 	return NULL;
+ }
+ 
+-/*
+- * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
+- * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
+- */
+-#define SCSI_MAX_SG_CHAIN_SEGMENTS	2048
+-
+ static inline unsigned int scsi_sgtable_index(unsigned short nents)
+ {
+ 	unsigned int index;
+ 
+-	switch (nents) {
+-	case 1 ... 8:
++	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
++
++	if (nents <= 8)
+ 		index = 0;
+-		break;
+-	case 9 ... 16:
+-		index = 1;
+-		break;
+-#if (SCSI_MAX_SG_SEGMENTS > 16)
+-	case 17 ... 32:
+-		index = 2;
+-		break;
+-#if (SCSI_MAX_SG_SEGMENTS > 32)
+-	case 33 ... 64:
+-		index = 3;
+-		break;
+-#if (SCSI_MAX_SG_SEGMENTS > 64)
+-	case 65 ... 128:
+-		index = 4;
+-		break;
+-#endif
+-#endif
+-#endif
+-	default:
+-		printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
+-		BUG();
+-	}
++	else
++		index = get_count_order(nents) - 3;
+ 
  	return index;
  }
  
@@ -550131,31 +695608,35 @@
 -			index = SG_MEMPOOL_NR - 1;
 -		} else
 -			index = scsi_sgtable_index(this);
- 
--		left -= this;
 -
+-		left -= this;
+ 
 -		sgp = scsi_sg_pools + index;
--
--		sgl = mempool_alloc(sgp->pool, gfp_mask);
--		if (unlikely(!sgl))
--			goto enomem;
 +	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
 +	mempool_free(sgl, sgp->pool);
 +}
  
--		sg_init_table(sgl, sgp->size);
+-		sgl = mempool_alloc(sgp->pool, gfp_mask);
+-		if (unlikely(!sgl))
+-			goto enomem;
 +static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
 +{
 +	struct scsi_host_sg_pool *sgp;
  
+-		sg_init_table(sgl, sgp->size);
++	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
++	return mempool_alloc(sgp->pool, gfp_mask);
++}
+ 
 -		/*
 -		 * first loop through, set initial index and return value
 -		 */
 -		if (!ret)
 -			ret = sgl;
-+	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
-+	return mempool_alloc(sgp->pool, gfp_mask);
-+}
++static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
++			      gfp_t gfp_mask)
++{
++	int ret;
  
 -		/*
 -		 * chain previous sglist, if any. we know the previous
@@ -550164,9 +695645,7 @@
 -		 */
 -		if (prev)
 -			sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
-+int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
-+{
-+	int ret;
++	BUG_ON(!nents);
  
 -		/*
 -		 * if we have nothing left, mark the last segment as
@@ -550174,8 +695653,7 @@
 -		 */
 -		if (!left)
 -			sg_mark_end(&sgl[this - 1]);
-+	BUG_ON(!cmd->use_sg);
- 
+-
 -		/*
 -		 * don't allow subsequent mempool allocs to sleep, it would
 -		 * violate the mempool principle.
@@ -550184,10 +695662,10 @@
 -		gfp_mask |= __GFP_HIGH;
 -		prev = sgl;
 -	} while (left);
-+	ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg,
-+			       SCSI_MAX_SG_SEGMENTS, gfp_mask, scsi_sg_alloc);
++	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
++			       gfp_mask, scsi_sg_alloc);
 +	if (unlikely(ret))
-+		__sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS,
++		__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
 +				scsi_sg_free);
  
 -	/*
@@ -550195,7 +695673,6 @@
 -	 * shrunk the number of segments, so keep a copy of it for free.
 -	 */
 -	cmd->__use_sg = cmd->use_sg;
-+	cmd->request_buffer = cmd->sg_table.sgl;
  	return ret;
 -enomem:
 -	if (ret) {
@@ -550218,9 +695695,10 @@
 -	return NULL;
  }
  
- EXPORT_SYMBOL(scsi_alloc_sgtable);
- 
- void scsi_free_sgtable(struct scsi_cmnd *cmd)
+-EXPORT_SYMBOL(scsi_alloc_sgtable);
+-
+-void scsi_free_sgtable(struct scsi_cmnd *cmd)
++static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
  {
 -	struct scatterlist *sgl = cmd->request_buffer;
 -	struct scsi_host_sg_pool *sgp;
@@ -550264,11 +695742,117 @@
 -		sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
 -
 -	mempool_free(sgl, sgp->pool);
-+	__sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
++	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
+ }
+ 
+-EXPORT_SYMBOL(scsi_free_sgtable);
+-
+ /*
+  * Function:    scsi_release_buffers()
+  *
+@@ -890,17 +761,49 @@ EXPORT_SYMBOL(scsi_free_sgtable);
+  *		the scatter-gather table, and potentially any bounce
+  *		buffers.
+  */
+-static void scsi_release_buffers(struct scsi_cmnd *cmd)
++void scsi_release_buffers(struct scsi_cmnd *cmd)
++{
++	if (cmd->sdb.table.nents)
++		scsi_free_sgtable(&cmd->sdb);
++
++	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
++
++	if (scsi_bidi_cmnd(cmd)) {
++		struct scsi_data_buffer *bidi_sdb =
++			cmd->request->next_rq->special;
++		scsi_free_sgtable(bidi_sdb);
++		kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb);
++		cmd->request->next_rq->special = NULL;
++	}
++}
++EXPORT_SYMBOL(scsi_release_buffers);
++
++/*
++ * Bidi commands Must be complete as a whole, both sides at once.
++ * If part of the bytes were written and lld returned
++ * scsi_in()->resid and/or scsi_out()->resid this information will be left
++ * in req->data_len and req->next_rq->data_len. The upper-layer driver can
++ * decide what to do with this information.
++ */
++void scsi_end_bidi_request(struct scsi_cmnd *cmd)
+ {
+-	if (cmd->use_sg)
+-		scsi_free_sgtable(cmd);
++	struct request *req = cmd->request;
++	unsigned int dlen = req->data_len;
++	unsigned int next_dlen = req->next_rq->data_len;
++
++	req->data_len = scsi_out(cmd)->resid;
++	req->next_rq->data_len = scsi_in(cmd)->resid;
++
++	/* The req and req->next_rq have not been completed */
++	BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
++
++	scsi_release_buffers(cmd);
+ 
+ 	/*
+-	 * Zero these out.  They now point to freed memory, and it is
+-	 * dangerous to hang onto the pointers.
++	 * This will goose the queue request function at the end, so we don't
++	 * need to worry about launching another command.
+ 	 */
+-	cmd->request_buffer = NULL;
+-	cmd->request_bufflen = 0;
++	scsi_next_command(cmd);
  }
  
- EXPORT_SYMBOL(scsi_free_sgtable);
-@@ -985,7 +883,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ /*
+@@ -934,7 +837,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
+ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ {
+ 	int result = cmd->result;
+-	int this_count = cmd->request_bufflen;
++	int this_count = scsi_bufflen(cmd);
+ 	struct request_queue *q = cmd->device->request_queue;
+ 	struct request *req = cmd->request;
+ 	int clear_errors = 1;
+@@ -942,8 +845,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 	int sense_valid = 0;
+ 	int sense_deferred = 0;
+ 
+-	scsi_release_buffers(cmd);
+-
+ 	if (result) {
+ 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ 		if (sense_valid)
+@@ -966,9 +867,17 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 				req->sense_len = len;
+ 			}
+ 		}
+-		req->data_len = cmd->resid;
++		if (scsi_bidi_cmnd(cmd)) {
++			/* will also release_buffers */
++			scsi_end_bidi_request(cmd);
++			return;
++		}
++		req->data_len = scsi_get_resid(cmd);
+ 	}
+ 
++	BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
++	scsi_release_buffers(cmd);
++
+ 	/*
+ 	 * Next deal with any sectors which we were able to correctly
+ 	 * handle.
+@@ -976,7 +885,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 	SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
+ 				      "%d bytes done.\n",
+ 				      req->nr_sectors, good_bytes));
+-	SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
+ 
+ 	if (clear_errors)
+ 		req->errors = 0;
+@@ -985,7 +893,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  	 * are leftovers and there is some kind of error
  	 * (result != 0), retry the rest.
  	 */
@@ -550277,7 +695861,7 @@
  		return;
  
  	/* good_bytes = 0, or (inclusive) there were leftovers and
-@@ -999,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+@@ -999,7 +907,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  				 * and quietly refuse further access.
  				 */
  				cmd->device->changed = 1;
@@ -550286,7 +695870,7 @@
  				return;
  			} else {
  				/* Must have been a power glitch, or a
-@@ -1031,7 +929,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+@@ -1031,7 +939,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  				scsi_requeue_command(q, cmd);
  				return;
  			} else {
@@ -550295,7 +695879,7 @@
  				return;
  			}
  			break;
-@@ -1059,7 +957,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+@@ -1059,7 +967,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  							 "Device not ready",
  							 &sshdr);
  
@@ -550304,7 +695888,7 @@
  			return;
  		case VOLUME_OVERFLOW:
  			if (!(req->cmd_flags & REQ_QUIET)) {
-@@ -1069,7 +967,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+@@ -1069,7 +977,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  				scsi_print_sense("", cmd);
  			}
  			/* See SSC3rXX or current. */
@@ -550313,7 +695897,7 @@
  			return;
  		default:
  			break;
-@@ -1090,7 +988,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+@@ -1090,64 +998,82 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  				scsi_print_sense("", cmd);
  		}
  	}
@@ -550321,47 +695905,146 @@
 +	scsi_end_request(cmd, -EIO, this_count, !result);
  }
  
- /*
-@@ -1102,7 +1000,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
-  *
-  * Returns:     0 on success
-  *		BLKPREP_DEFER if the failure is retryable
+-/*
+- * Function:    scsi_init_io()
+- *
+- * Purpose:     SCSI I/O initialize function.
+- *
+- * Arguments:   cmd   - Command descriptor we wish to initialize
+- *
+- * Returns:     0 on success
+- *		BLKPREP_DEFER if the failure is retryable
 - *		BLKPREP_KILL if the failure is fatal
-  */
- static int scsi_init_io(struct scsi_cmnd *cmd)
+- */
+-static int scsi_init_io(struct scsi_cmnd *cmd)
++static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
++			     gfp_t gfp_mask)
  {
-@@ -1119,8 +1016,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
+-	struct request     *req = cmd->request;
+-	int		   count;
+-
+-	/*
+-	 * We used to not use scatter-gather for single segment request,
+-	 * but now we do (it makes highmem I/O easier to support without
+-	 * kmapping pages)
+-	 */
+-	cmd->use_sg = req->nr_phys_segments;
++	int count;
+ 
  	/*
  	 * If sg table allocation fails, requeue request later.
  	 */
 -	cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
 -	if (unlikely(!cmd->request_buffer)) {
-+	if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) {
- 		scsi_unprep_request(req);
+-		scsi_unprep_request(req);
++	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
++					gfp_mask))) {
  		return BLKPREP_DEFER;
  	}
-@@ -1136,17 +1032,9 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
+ 
+ 	req->buffer = NULL;
+ 	if (blk_pc_request(req))
+-		cmd->request_bufflen = req->data_len;
++		sdb->length = req->data_len;
+ 	else
+-		cmd->request_bufflen = req->nr_sectors << 9;
++		sdb->length = req->nr_sectors << 9;
+ 
+ 	/* 
+ 	 * Next, walk the list, and fill in the addresses and sizes of
  	 * each segment.
  	 */
- 	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
+-	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
 -	if (likely(count <= cmd->use_sg)) {
 -		cmd->use_sg = count;
 -		return BLKPREP_OK;
--	}
--
++	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
++	BUG_ON(count > sdb->table.nents);
++	sdb->table.nents = count;
++	return BLKPREP_OK;
++}
++
++/*
++ * Function:    scsi_init_io()
++ *
++ * Purpose:     SCSI I/O initialize function.
++ *
++ * Arguments:   cmd   - Command descriptor we wish to initialize
++ *
++ * Returns:     0 on success
++ *		BLKPREP_DEFER if the failure is retryable
++ *		BLKPREP_KILL if the failure is fatal
++ */
++int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
++{
++	int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
++	if (error)
++		goto err_exit;
++
++	if (blk_bidi_rq(cmd->request)) {
++		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
++			scsi_bidi_sdb_cache, GFP_ATOMIC);
++		if (!bidi_sdb) {
++			error = BLKPREP_DEFER;
++			goto err_exit;
++		}
++
++		cmd->request->next_rq->special = bidi_sdb;
++		error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
++								    GFP_ATOMIC);
++		if (error)
++			goto err_exit;
+ 	}
+ 
 -	printk(KERN_ERR "Incorrect number of segments after building list\n");
 -	printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
 -	printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
 -			req->current_nr_sectors);
--
++	return BLKPREP_OK ;
++
++err_exit:
++	scsi_release_buffers(cmd);
++	if (error == BLKPREP_KILL)
++		scsi_put_command(cmd);
++	else /* BLKPREP_DEFER */
++		scsi_unprep_request(cmd->request);
+ 
 -	return BLKPREP_KILL;
-+	BUG_ON(count > cmd->use_sg);
-+	cmd->use_sg = count;
-+	return BLKPREP_OK;
++	return error;
  }
++EXPORT_SYMBOL(scsi_init_io);
  
  static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
-@@ -1557,7 +1445,7 @@ static void scsi_request_fn(struct request_queue *q)
+ 		struct request *req)
+@@ -1193,16 +1119,14 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
+ 
+ 		BUG_ON(!req->nr_phys_segments);
+ 
+-		ret = scsi_init_io(cmd);
++		ret = scsi_init_io(cmd, GFP_ATOMIC);
+ 		if (unlikely(ret))
+ 			return ret;
+ 	} else {
+ 		BUG_ON(req->data_len);
+ 		BUG_ON(req->data);
+ 
+-		cmd->request_bufflen = 0;
+-		cmd->request_buffer = NULL;
+-		cmd->use_sg = 0;
++		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+ 		req->buffer = NULL;
+ 	}
+ 
+@@ -1244,7 +1168,7 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
+ 	if (unlikely(!cmd))
+ 		return BLKPREP_DEFER;
+ 
+-	return scsi_init_io(cmd);
++	return scsi_init_io(cmd, GFP_ATOMIC);
+ }
+ EXPORT_SYMBOL(scsi_setup_fs_cmnd);
+ 
+@@ -1557,7 +1481,7 @@ static void scsi_request_fn(struct request_queue *q)
  
  		if (!scsi_host_queue_ready(q, shost, sdev))
  			goto not_ready;
@@ -550370,7 +696053,29 @@
  			if (scsi_target(sdev)->starget_sdev_user &&
  			    scsi_target(sdev)->starget_sdev_user != sdev)
  				goto not_ready;
-@@ -1675,6 +1563,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+@@ -1654,20 +1578,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+ 	 * this limit is imposed by hardware restrictions
+ 	 */
+ 	blk_queue_max_hw_segments(q, shost->sg_tablesize);
+-
+-	/*
+-	 * In the future, sg chaining support will be mandatory and this
+-	 * ifdef can then go away. Right now we don't have all archs
+-	 * converted, so better keep it safe.
+-	 */
+-#ifdef ARCH_HAS_SG_CHAIN
+-	if (shost->use_sg_chaining)
+-		blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
+-	else
+-		blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
+-#else
+-	blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
+-#endif
++	blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
+ 
+ 	blk_queue_max_sectors(q, shost->max_sectors);
+ 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
+@@ -1675,6 +1586,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
  
  	if (!shost->use_clustering)
  		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
@@ -550385,7 +696090,64 @@
  	return q;
  }
  EXPORT_SYMBOL(__scsi_alloc_queue);
-@@ -1804,7 +1700,7 @@ void scsi_exit_queue(void)
+@@ -1758,6 +1677,14 @@ int __init scsi_init_queue(void)
+ 		return -ENOMEM;
+ 	}
+ 
++	scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb",
++					sizeof(struct scsi_data_buffer),
++					0, 0, NULL);
++	if (!scsi_bidi_sdb_cache) {
++		printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n");
++		goto cleanup_io_context;
++	}
++
+ 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+ 		int size = sgp->size * sizeof(struct scatterlist);
+@@ -1767,6 +1694,7 @@ int __init scsi_init_queue(void)
+ 		if (!sgp->slab) {
+ 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
+ 					sgp->name);
++			goto cleanup_bidi_sdb;
+ 		}
+ 
+ 		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
+@@ -1774,10 +1702,25 @@ int __init scsi_init_queue(void)
+ 		if (!sgp->pool) {
+ 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
+ 					sgp->name);
++			goto cleanup_bidi_sdb;
+ 		}
+ 	}
+ 
+ 	return 0;
++
++cleanup_bidi_sdb:
++	for (i = 0; i < SG_MEMPOOL_NR; i++) {
++		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
++		if (sgp->pool)
++			mempool_destroy(sgp->pool);
++		if (sgp->slab)
++			kmem_cache_destroy(sgp->slab);
++	}
++	kmem_cache_destroy(scsi_bidi_sdb_cache);
++cleanup_io_context:
++	kmem_cache_destroy(scsi_io_context_cache);
++
++	return -ENOMEM;
+ }
+ 
+ void scsi_exit_queue(void)
+@@ -1785,6 +1728,7 @@ void scsi_exit_queue(void)
+ 	int i;
+ 
+ 	kmem_cache_destroy(scsi_io_context_cache);
++	kmem_cache_destroy(scsi_bidi_sdb_cache);
+ 
+ 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+@@ -1804,7 +1748,7 @@ void scsi_exit_queue(void)
   *	@timeout: command timeout
   *	@retries: number of retries before failing
   *	@data: returns a structure abstracting the mode header data
@@ -550394,7 +696156,7 @@
   *		must be SCSI_SENSE_BUFFERSIZE big.
   *
   *	Returns zero if successful; negative error number or scsi
-@@ -1871,8 +1767,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
+@@ -1871,8 +1815,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
  EXPORT_SYMBOL_GPL(scsi_mode_select);
  
  /**
@@ -550404,7 +696166,7 @@
   *	@sdev:	SCSI device to be queried
   *	@dbd:	set if mode sense will allow block descriptors to be returned
   *	@modepage: mode page being requested
-@@ -1881,13 +1776,13 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
+@@ -1881,13 +1824,13 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
   *	@timeout: command timeout
   *	@retries: number of retries before failing
   *	@data: returns a structure abstracting the mode header data
@@ -550420,7 +696182,7 @@
  int
  scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
  		  unsigned char *buffer, int len, int timeout, int retries,
-@@ -1981,40 +1876,69 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
+@@ -1981,40 +1924,69 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
  }
  EXPORT_SYMBOL(scsi_mode_sense);
  
@@ -550501,7 +696263,7 @@
  int
  scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
  {
-@@ -2264,7 +2188,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
+@@ -2264,7 +2236,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
   *	Must be called with user context, may sleep.
   *
   *	Returns zero if unsuccessful or an error if not.
@@ -550510,7 +696272,7 @@
  int
  scsi_device_quiesce(struct scsi_device *sdev)
  {
-@@ -2289,7 +2213,7 @@ EXPORT_SYMBOL(scsi_device_quiesce);
+@@ -2289,7 +2261,7 @@ EXPORT_SYMBOL(scsi_device_quiesce);
   *	queues.
   *
   *	Must be called with user context, may sleep.
@@ -550519,7 +696281,7 @@
  void
  scsi_device_resume(struct scsi_device *sdev)
  {
-@@ -2326,8 +2250,7 @@ scsi_target_resume(struct scsi_target *starget)
+@@ -2326,8 +2298,7 @@ scsi_target_resume(struct scsi_target *starget)
  EXPORT_SYMBOL(scsi_target_resume);
  
  /**
@@ -550529,7 +696291,7 @@
   * @sdev:	device to block
   *
   * Block request made by scsi lld's to temporarily stop all
-@@ -2342,7 +2265,7 @@ EXPORT_SYMBOL(scsi_target_resume);
+@@ -2342,7 +2313,7 @@ EXPORT_SYMBOL(scsi_target_resume);
   *	state, all commands are deferred until the scsi lld reenables
   *	the device with scsi_device_unblock or device_block_tmo fires.
   *	This routine assumes the host_lock is held on entry.
@@ -550538,7 +696300,7 @@
  int
  scsi_internal_device_block(struct scsi_device *sdev)
  {
-@@ -2382,7 +2305,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
+@@ -2382,7 +2353,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
   *	(which must be a legal transition) allowing the midlayer to
   *	goose the queue for this device.  This routine assumes the 
   *	host_lock is held upon entry.
@@ -550547,7 +696309,7 @@
  int
  scsi_internal_device_unblock(struct scsi_device *sdev)
  {
-@@ -2460,7 +2383,7 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock);
+@@ -2460,7 +2431,7 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock);
  
  /**
   * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
@@ -550556,7 +696318,7 @@
   * @sg_count:	number of segments in sg
   * @offset:	offset in bytes into sg, on return offset into the mapped area
   * @len:	bytes to map, on return number of bytes mapped
-@@ -2509,8 +2432,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
+@@ -2509,8 +2480,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
  EXPORT_SYMBOL(scsi_kmap_atomic_sg);
  
  /**
@@ -550985,7 +696747,7 @@
  	memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
  	ev.p.cmd_req.attribute = cmd->tag;
 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
-index a91761c..01e03f3 100644
+index a91761c..91630ba 100644
 --- a/drivers/scsi/scsi_tgt_lib.c
 +++ b/drivers/scsi/scsi_tgt_lib.c
 @@ -180,7 +180,7 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
@@ -550997,7 +696759,7 @@
  		rq_data_dir(cmd->request));
  	scsi_unmap_user_pages(tcmd);
  	scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
-@@ -327,11 +327,11 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
+@@ -327,12 +327,11 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
  {
  	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
  
@@ -551007,11 +696769,12 @@
  	scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
  
 -	if (cmd->request_buffer)
-+	if (scsi_sglist(cmd))
- 		scsi_free_sgtable(cmd);
+-		scsi_free_sgtable(cmd);
++	scsi_release_buffers(cmd);
  
  	queue_work(scsi_tgtd, &tcmd->work);
-@@ -342,7 +342,7 @@ static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
+ }
+@@ -342,7 +341,7 @@ static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
  	struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
  	int err;
  
@@ -551020,17 +696783,22 @@
  
  	err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
  	switch (err) {
-@@ -359,22 +359,17 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
- 	int count;
+@@ -353,30 +352,6 @@ static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
+ 	return 0;
+ }
  
- 	cmd->use_sg = rq->nr_phys_segments;
+-static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+-{
+-	struct request *rq = cmd->request;
+-	int count;
+-
+-	cmd->use_sg = rq->nr_phys_segments;
 -	cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask);
 -	if (!cmd->request_buffer)
-+	if (scsi_alloc_sgtable(cmd, gfp_mask))
- 		return -ENOMEM;
- 
- 	cmd->request_bufflen = rq->data_len;
- 
+-		return -ENOMEM;
+-
+-	cmd->request_bufflen = rq->data_len;
+-
 -	dprintk("cmd %p cnt %d %lu\n", cmd, cmd->use_sg, rq_data_dir(rq));
 -	count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
 -	if (likely(count <= cmd->use_sg)) {
@@ -551041,16 +696809,26 @@
 -	eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
 -	scsi_free_sgtable(cmd);
 -	return -EINVAL;
-+	dprintk("cmd %p cnt %d %lu\n", cmd, scsi_sg_count(cmd),
-+		rq_data_dir(rq));
-+	count = blk_rq_map_sg(rq->q, rq, scsi_sglist(cmd));
-+	BUG_ON(count > cmd->use_sg);
-+	cmd->use_sg = count;
-+	return 0;
- }
- 
+-}
+-
  /* TODO: test this crap and replace bio_map_user with new interface maybe */
-@@ -496,8 +491,8 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
+ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
+ 			       unsigned long uaddr, unsigned int len, int rw)
+@@ -402,9 +377,11 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
+ 	}
+ 
+ 	tcmd->bio = rq->bio;
+-	err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
+-	if (err)
++	err = scsi_init_io(cmd, GFP_KERNEL);
++	if (err) {
++		scsi_release_buffers(cmd);
+ 		goto unmap_rq;
++	}
+ 
+ 	return 0;
+ 
+@@ -496,8 +473,8 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
  	}
  	cmd = rq->special;
  
@@ -551061,7 +696839,7 @@
  		rq_data_dir(rq), cmd->cmnd[0]);
  
  	if (result == TASK_ABORTED) {
-@@ -617,7 +612,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
+@@ -617,7 +594,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
  	struct Scsi_Host *shost;
  	int err = -EINVAL;
  
@@ -552773,7 +698551,7 @@
   *                                                        Revision 6
   *                                                         10-MAR-94
 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index a69b155..24eba31 100644
+index a69b155..51a5557 100644
 --- a/drivers/scsi/sd.c
 +++ b/drivers/scsi/sd.c
 @@ -395,6 +395,15 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
@@ -552792,6 +698570,15 @@
  	SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
  					(unsigned long long)block));
  
+@@ -510,7 +519,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
+ 		SCpnt->cmnd[4] = (unsigned char) this_count;
+ 		SCpnt->cmnd[5] = 0;
+ 	}
+-	SCpnt->request_bufflen = this_count * sdp->sector_size;
++	SCpnt->sdb.length = this_count * sdp->sector_size;
+ 
+ 	/*
+ 	 * We shouldn't disconnect in the middle of a sector, so with a dumb
 @@ -736,6 +745,7 @@ static int sd_media_changed(struct gendisk *disk)
  {
  	struct scsi_disk *sdkp = scsi_disk(disk);
@@ -552863,6 +698650,15 @@
  }
  
  static int sd_sync_cache(struct scsi_disk *sdkp)
+@@ -904,7 +926,7 @@ static struct block_device_operations sd_fops = {
+ static int sd_done(struct scsi_cmnd *SCpnt)
+ {
+ 	int result = SCpnt->result;
+- 	unsigned int xfer_size = SCpnt->request_bufflen;
++	unsigned int xfer_size = scsi_bufflen(SCpnt);
+  	unsigned int good_bytes = result ? 0 : xfer_size;
+  	u64 start_lba = SCpnt->request->sector;
+  	u64 bad_lba;
 diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
 deleted file mode 100644
 index b113244..0000000
@@ -554634,19 +700430,188 @@
  
  	read_lock_irqsave(&sg_index_lock, iflags);
 diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
-index eef8275..d4ebe8c 100644
+index eef8275..26cfc56 100644
 --- a/drivers/scsi/sgiwd93.c
 +++ b/drivers/scsi/sgiwd93.c
-@@ -159,6 +159,7 @@ void sgiwd93_reset(unsigned long base)
+@@ -33,10 +33,9 @@
+ 
+ struct ip22_hostdata {
+ 	struct WD33C93_hostdata wh;
+-	struct hpc_data {
+-		dma_addr_t      dma;
+-		void		*cpu;
+-	} hd;
++	dma_addr_t dma;
++	void *cpu;
++	struct device *dev;
+ };
+ 
+ #define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata))
+@@ -46,6 +45,11 @@ struct hpc_chunk {
+ 	u32 _padding;	/* align to quadword boundary */
+ };
+ 
++/* space for hpc dma descriptors */
++#define HPC_DMA_SIZE   PAGE_SIZE
++
++#define DMA_DIR(d)   ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
++
+ static irqreturn_t sgiwd93_intr(int irq, void *dev_id)
+ {
+ 	struct Scsi_Host * host = dev_id;
+@@ -59,15 +63,17 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id)
+ }
+ 
+ static inline
+-void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp)
++void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
+ {
+ 	unsigned long len = cmd->SCp.this_residual;
+ 	void *addr = cmd->SCp.ptr;
+ 	dma_addr_t physaddr;
+ 	unsigned long count;
++	struct hpc_chunk *hcp;
+ 
+-	physaddr = dma_map_single(NULL, addr, len, cmd->sc_data_direction);
++	physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din));
+ 	cmd->SCp.dma_handle = physaddr;
++	hcp = hd->cpu;
+ 
+ 	while (len) {
+ 		/*
+@@ -89,6 +95,9 @@ void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp)
+ 	 */
+ 	hcp->desc.pbuf = 0;
+ 	hcp->desc.cntinfo = HPCDMA_EOX;
++	dma_cache_sync(hd->dev, hd->cpu,
++		       (unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
++		       DMA_TO_DEVICE);
+ }
+ 
+ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
+@@ -96,9 +105,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
+ 	struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host);
+ 	struct hpc3_scsiregs *hregs =
+ 		(struct hpc3_scsiregs *) cmd->device->host->base;
+-	struct hpc_chunk *hcp = (struct hpc_chunk *) hdata->hd.cpu;
+ 
+-	pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hcp);
++	pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu);
+ 
+ 	hdata->wh.dma_dir = datainp;
+ 
+@@ -111,12 +119,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
+ 	if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0)
+ 		return 1;
+ 
+-	fill_hpc_entries(hcp, cmd, datainp);
++	fill_hpc_entries(hdata, cmd, datainp);
+ 
+ 	pr_debug(" HPCGO\n");
+ 
+ 	/* Start up the HPC. */
+-	hregs->ndptr = hdata->hd.dma;
++	hregs->ndptr = hdata->dma;
+ 	if (datainp)
+ 		hregs->ctrl = HPC3_SCTRL_ACTIVE;
+ 	else
+@@ -134,6 +142,9 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
+ 	if (!SCpnt)
+ 		return;
+ 
++	if (SCpnt->SCp.ptr == NULL || SCpnt->SCp.this_residual == 0)
++		return;
++
+ 	hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base;
+ 
+ 	pr_debug("dma_stop: status<%d> ", status);
+@@ -145,8 +156,9 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
+ 			barrier();
+ 	}
+ 	hregs->ctrl = 0;
+-	dma_unmap_single(NULL, SCpnt->SCp.dma_handle, SCpnt->SCp.this_residual,
+-	                 SCpnt->sc_data_direction);
++	dma_unmap_single(hdata->dev, SCpnt->SCp.dma_handle,
++			 SCpnt->SCp.this_residual,
++			 DMA_DIR(hdata->wh.dma_dir));
+ 
+ 	pr_debug("\n");
+ }
+@@ -159,23 +171,25 @@ void sgiwd93_reset(unsigned long base)
  	udelay(50);
  	hregs->ctrl = 0;
  }
 +EXPORT_SYMBOL_GPL(sgiwd93_reset);
  
- static inline void init_hpc_chain(struct hpc_data *hd)
+-static inline void init_hpc_chain(struct hpc_data *hd)
++static inline void init_hpc_chain(struct ip22_hostdata *hdata)
  {
+-	struct hpc_chunk *hcp = (struct hpc_chunk *) hd->cpu;
+-	struct hpc_chunk *dma = (struct hpc_chunk *) hd->dma;
++	struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu;
++	dma_addr_t dma = hdata->dma;
+ 	unsigned long start, end;
+ 
+ 	start = (unsigned long) hcp;
+-	end = start + PAGE_SIZE;
++	end = start + HPC_DMA_SIZE;
+ 	while (start < end) {
+-		hcp->desc.pnext = (u32) (dma + 1);
++		hcp->desc.pnext = (u32) (dma + sizeof(struct hpc_chunk));
+ 		hcp->desc.cntinfo = HPCDMA_EOX;
+-		hcp++; dma++;
++		hcp++;
++		dma += sizeof(struct hpc_chunk);
+ 		start += sizeof(struct hpc_chunk);
+ 	};
+ 	hcp--;
+-	hcp->desc.pnext = hd->dma;
++	hcp->desc.pnext = hdata->dma;
+ }
+ 
+ static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
+@@ -234,16 +248,17 @@ static int __init sgiwd93_probe(struct platform_device *pdev)
+ 	host->irq = irq;
+ 
+ 	hdata = host_to_hostdata(host);
+-	hdata->hd.cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+-	                                   &hdata->hd.dma, GFP_KERNEL);
+-	if (!hdata->hd.cpu) {
++	hdata->dev = &pdev->dev;
++	hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
++					   &hdata->dma, GFP_KERNEL);
++	if (!hdata->cpu) {
+ 		printk(KERN_WARNING "sgiwd93: Could not allocate memory for "
+ 		       "host %d buffer.\n", unit);
+ 		err = -ENOMEM;
+ 		goto out_put;
+ 	}
+ 
+-	init_hpc_chain(&hdata->hd);
++	init_hpc_chain(hdata);
+ 
+ 	regs.SASR = wdregs + 3;
+ 	regs.SCMD = wdregs + 7;
+@@ -273,7 +288,7 @@ static int __init sgiwd93_probe(struct platform_device *pdev)
+ out_irq:
+ 	free_irq(irq, host);
+ out_free:
+-	dma_free_coherent(NULL, PAGE_SIZE, hdata->hd.cpu, hdata->hd.dma);
++	dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
+ out_put:
+ 	scsi_host_put(host);
+ out:
+@@ -289,7 +304,7 @@ static void __exit sgiwd93_remove(struct platform_device *pdev)
+ 
+ 	scsi_remove_host(host);
+ 	free_irq(pd->irq, host);
+-	dma_free_coherent(&pdev->dev, PAGE_SIZE, hdata->hd.cpu, hdata->hd.dma);
++	dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
+ 	scsi_host_put(host);
+ }
+ 
 diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
-index c619990..1fcee16 100644
+index c619990..50ba492 100644
 --- a/drivers/scsi/sr.c
 +++ b/drivers/scsi/sr.c
 @@ -67,8 +67,6 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
@@ -554714,6 +700679,66 @@
  	return retval;
  }
   
+@@ -218,7 +231,7 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
+ static int sr_done(struct scsi_cmnd *SCpnt)
+ {
+ 	int result = SCpnt->result;
+-	int this_count = SCpnt->request_bufflen;
++	int this_count = scsi_bufflen(SCpnt);
+ 	int good_bytes = (result == 0 ? this_count : 0);
+ 	int block_sectors = 0;
+ 	long error_sector;
+@@ -366,17 +379,18 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
+ 	}
+ 
+ 	{
+-		struct scatterlist *sg = SCpnt->request_buffer;
+-		int i, size = 0;
+-		for (i = 0; i < SCpnt->use_sg; i++)
+-			size += sg[i].length;
++		struct scatterlist *sg;
++		int i, size = 0, sg_count = scsi_sg_count(SCpnt);
++
++		scsi_for_each_sg(SCpnt, sg, sg_count, i)
++			size += sg->length;
+ 
+-		if (size != SCpnt->request_bufflen && SCpnt->use_sg) {
++		if (size != scsi_bufflen(SCpnt)) {
+ 			scmd_printk(KERN_ERR, SCpnt,
+ 				"mismatch count %d, bytes %d\n",
+-				size, SCpnt->request_bufflen);
+-			if (SCpnt->request_bufflen > size)
+-				SCpnt->request_bufflen = size;
++				size, scsi_bufflen(SCpnt));
++			if (scsi_bufflen(SCpnt) > size)
++				SCpnt->sdb.length = size;
+ 		}
+ 	}
+ 
+@@ -384,12 +398,12 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
+ 	 * request doesn't start on hw block boundary, add scatter pads
+ 	 */
+ 	if (((unsigned int)rq->sector % (s_size >> 9)) ||
+-	    (SCpnt->request_bufflen % s_size)) {
++	    (scsi_bufflen(SCpnt) % s_size)) {
+ 		scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
+ 		goto out;
+ 	}
+ 
+-	this_count = (SCpnt->request_bufflen >> 9) / (s_size >> 9);
++	this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
+ 
+ 
+ 	SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
+@@ -403,7 +417,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
+ 
+ 	if (this_count > 0xffff) {
+ 		this_count = 0xffff;
+-		SCpnt->request_bufflen = this_count * s_size;
++		SCpnt->sdb.length = this_count * s_size;
+ 	}
+ 
+ 	SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
 diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
 index d65de96..81fbc0b 100644
 --- a/drivers/scsi/sr.h
@@ -554849,6 +700874,18 @@
  #ifdef CONFIG_COMPAT
  	.compat_ioctl = st_compat_ioctl,
  #endif
+diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
+index e3fab3a..72f6d80 100644
+--- a/drivers/scsi/stex.c
++++ b/drivers/scsi/stex.c
+@@ -1123,7 +1123,6 @@ static struct scsi_host_template driver_template = {
+ 	.this_id			= -1,
+ 	.sg_tablesize			= ST_MAX_SG,
+ 	.cmd_per_lun			= ST_CMD_PER_LUN,
+-	.use_sg_chaining		= ENABLE_SG_CHAINING,
+ };
+ 
+ static int stex_set_dma_mask(struct pci_dev * pdev)
 diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
 index 2dcde37..bcaba86 100644
 --- a/drivers/scsi/sun3_NCR5380.c
@@ -554939,7 +700976,7 @@
      NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
  
 diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
-index 90cee94..1f6fd16 100644
+index 90cee94..6325901 100644
 --- a/drivers/scsi/sym53c416.c
 +++ b/drivers/scsi/sym53c416.c
 @@ -328,27 +328,13 @@ static __inline__ unsigned int sym53c416_write(int base, unsigned char *buffer,
@@ -554971,8 +701008,15 @@
  	spin_lock_irqsave(dev->host_lock,flags);
  	status_reg = inb(base + STATUS_REG);
  	pio_int_reg = inb(base + PIO_INT_REG);
+@@ -854,6 +840,5 @@ static struct scsi_host_template driver_template = {
+ 	.cmd_per_lun =		1,
+ 	.unchecked_isa_dma =	1,
+ 	.use_clustering =	ENABLE_CLUSTERING,
+-	.use_sg_chaining =	ENABLE_SG_CHAINING,
+ };
+ #include "scsi_module.c"
 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
-index 9e0908d..21e926d 100644
+index 9e0908d..d39107b 100644
 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
 @@ -207,10 +207,9 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
@@ -554980,7 +701024,7 @@
  			 *  Bounce back the sense data to user.
  			 */
 -			memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
-+			memset(&cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
++			memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  			memcpy(cmd->sense_buffer, cp->sns_bbuf,
 -			      min(sizeof(cmd->sense_buffer),
 -				  (size_t)SYM_SNS_BBUF_LEN));
@@ -555018,7 +701062,15 @@
  		if (!finished_reset)
  			return SCSI_FAILED;
  	}
-@@ -1744,7 +1745,7 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
+@@ -1680,7 +1681,6 @@ static struct scsi_host_template sym2_template = {
+ 	.eh_host_reset_handler	= sym53c8xx_eh_host_reset_handler,
+ 	.this_id		= 7,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ 	.max_sectors		= 0xFFFF,
+ #ifdef SYM_LINUX_PROC_INFO_SUPPORT
+ 	.proc_info		= sym53c8xx_proc_info,
+@@ -1744,7 +1744,7 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
  	return -ENODEV;
  }
  
@@ -555027,7 +701079,7 @@
  {
  	struct Scsi_Host *shost = pci_get_drvdata(pdev);
  
-@@ -1879,7 +1880,6 @@ static void sym2_io_resume(struct pci_dev *pdev)
+@@ -1879,7 +1879,6 @@ static void sym2_io_resume(struct pci_dev *pdev)
  	spin_lock_irq(shost->host_lock);
  	if (sym_data->io_reset)
  		complete_all(sym_data->io_reset);
@@ -555035,7 +701087,7 @@
  	spin_unlock_irq(shost->host_lock);
  }
  
-@@ -2056,7 +2056,7 @@ static struct pci_driver sym2_driver = {
+@@ -2056,7 +2055,7 @@ static struct pci_driver sym2_driver = {
  	.name		= NAME53C8XX,
  	.id_table	= sym2_id_table,
  	.probe		= sym2_probe,
@@ -555076,10 +701128,18 @@
  	DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n"));
      }
 diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
-index 7edd6ce..4bc5407 100644
+index 7edd6ce..662c004 100644
 --- a/drivers/scsi/u14-34f.c
 +++ b/drivers/scsi/u14-34f.c
-@@ -1121,9 +1121,9 @@ static void map_dma(unsigned int i, unsigned int j) {
+@@ -451,7 +451,6 @@ static struct scsi_host_template driver_template = {
+                 .this_id                 = 7,
+                 .unchecked_isa_dma       = 1,
+                 .use_clustering          = ENABLE_CLUSTERING,
+-                .use_sg_chaining         = ENABLE_SG_CHAINING,
+                 };
+ 
+ #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
+@@ -1121,9 +1120,9 @@ static void map_dma(unsigned int i, unsigned int j) {
  
     if (SCpnt->sense_buffer)
        cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
@@ -555092,7 +701152,7 @@
     if (scsi_bufflen(SCpnt)) {
  	   count = scsi_dma_map(SCpnt);
 diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
-index 6d1f0ed..75eca6b 100644
+index 6d1f0ed..f385dce 100644
 --- a/drivers/scsi/ultrastor.c
 +++ b/drivers/scsi/ultrastor.c
 @@ -298,9 +298,16 @@ static inline int find_and_clear_bit_16(unsigned long *field)
@@ -555124,6 +701184,13 @@
      my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
      memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
      my_mscp->adapter_status = 0;
+@@ -1197,6 +1204,5 @@ static struct scsi_host_template driver_template = {
+ 	.cmd_per_lun       = ULTRASTOR_MAX_CMDS_PER_LUN,
+ 	.unchecked_isa_dma = 1,
+ 	.use_clustering    = ENABLE_CLUSTERING,
+-	.use_sg_chaining   = ENABLE_SG_CHAINING,
+ };
+ #include "scsi_module.c"
 diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
 index fdbb92d..f286c37 100644
 --- a/drivers/scsi/wd33c93.c
@@ -555151,7 +701218,7 @@
  
  /* WD docs state that at the conclusion of a "LEVEL2" command, the
 diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
-index 03cd44f..b4304ae 100644
+index 03cd44f..c975c01 100644
 --- a/drivers/scsi/wd7000.c
 +++ b/drivers/scsi/wd7000.c
 @@ -1108,13 +1108,10 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
@@ -555190,6 +701257,14 @@
  
  				present++;	/* count it */
  
+@@ -1671,7 +1671,6 @@ static struct scsi_host_template driver_template = {
+ 	.cmd_per_lun		= 1,
+ 	.unchecked_isa_dma	= 1,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+-	.use_sg_chaining	= ENABLE_SG_CHAINING,
+ };
+ 
+ #include "scsi_module.c"
 diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
 index facb678..6a48dfa 100644
 --- a/drivers/serial/21285.c
@@ -555203,6 +701278,34 @@
  	/*
  	 * Which character status flags should we ignore?
  	 */
+diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
+index d7e1996..d962b74 100644
+--- a/drivers/serial/Kconfig
++++ b/drivers/serial/Kconfig
+@@ -1284,4 +1284,14 @@ config SERIAL_OF_PLATFORM
+ 	  Currently, only 8250 compatible ports are supported, but
+ 	  others can easily be added.
+ 
++config SERIAL_QE
++	tristate "Freescale QUICC Engine serial port support"
++	depends on QUICC_ENGINE
++	select SERIAL_CORE
++	select FW_LOADER
++	default n
++	help
++	  This driver supports the QE serial ports on Freescale embedded
++	  PowerPC that contain a QUICC Engine.
++
+ endmenu
+diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
+index af6377d..7eb4553 100644
+--- a/drivers/serial/Makefile
++++ b/drivers/serial/Makefile
+@@ -64,3 +64,4 @@ obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
+ obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
+ obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
+ obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
++obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
 diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
 index 6f475b6..ac2a3ef 100644
 --- a/drivers/serial/bfin_5xx.c
@@ -555227,6 +701330,66 @@
  	set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT);
  	set_dma_x_modify(uart->rx_dma_channel, 1);
  	set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT);
+diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+index 52fb044..6ea0366 100644
+--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c
++++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+@@ -52,11 +52,7 @@
+ #ifdef CONFIG_PPC_CPM_NEW_BINDING
+ void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
+ {
+-	u16 __iomem *cpcr = &cpmp->cp_cpcr;
+-
+-	out_be16(cpcr, port->command | (cmd << 8) | CPM_CR_FLG);
+-	while (in_be16(cpcr) & CPM_CR_FLG)
+-		;
++	cpm_command(port->command, cmd);
+ }
+ #else
+ void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
+diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+index 9b5465f..ddf46d3 100644
+--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
++++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+@@ -10,7 +10,7 @@
+ #ifndef CPM_UART_CPM1_H
+ #define CPM_UART_CPM1_H
+ 
+-#include <asm/commproc.h>
++#include <asm/cpm1.h>
+ 
+ /* defines for IRQs */
+ #ifndef CONFIG_PPC_CPM_NEW_BINDING
+diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+index 882dbc1..d9af06a 100644
+--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
++++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+@@ -52,13 +52,7 @@
+ #ifdef CONFIG_PPC_CPM_NEW_BINDING
+ void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
+ {
+-	cpm_cpm2_t __iomem *cp = cpm2_map(im_cpm);
+-
+-	out_be32(&cp->cp_cpcr, port->command | cmd | CPM_CR_FLG);
+-	while (in_be32(&cp->cp_cpcr) & CPM_CR_FLG)
+-		;
+-
+-	cpm2_unmap(cp);
++	cpm_command(port->command, cmd);
+ }
+ #else
+ void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
+@@ -171,9 +165,9 @@ void scc2_lineif(struct uart_cpm_port *pinfo)
+ 	 * really has to get out of the driver so boards can
+ 	 * be supported in a sane fashion.
+ 	 */
++	volatile cpmux_t *cpmux = cpm2_map(im_cpmux);
+ #ifndef CONFIG_STX_GP3
+ 	volatile iop_cpm2_t *io = cpm2_map(im_ioport);
+-	volatile cpmux_t *cpmux = cpm2_map(im_cpmux);
+ 
+ 	io->iop_pparb |= 0x008b0000;
+ 	io->iop_pdirb |= 0x00880000;
 diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
 index 9d3105b..9c2df5c 100644
 --- a/drivers/serial/icom.c
@@ -555333,6 +701496,601 @@
  };
  
  /* prototype */
+diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
+index ec36ad7..3c4d29e 100644
+--- a/drivers/serial/mpc52xx_uart.c
++++ b/drivers/serial/mpc52xx_uart.c
+@@ -36,7 +36,7 @@
+  * DCD. However, the pin multiplexing aren't changed and should be set either
+  * by the bootloader or in the platform init code.
+  *
+- * The idx field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2,
++ * The idx field must be equal to the PSC index (e.g. 0 for PSC1, 1 for PSC2,
+  * and so on). So the PSC1 is mapped to /dev/ttyPSC0, PSC2 to /dev/ttyPSC1 and
+  * so on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly
+  * fpr the console code : without this 1:1 mapping, at early boot time, when we
+@@ -68,11 +68,12 @@
+ #include <linux/sysrq.h>
+ #include <linux/console.h>
+ 
+-#include <asm/delay.h>
+-#include <asm/io.h>
++#include <linux/delay.h>
++#include <linux/io.h>
+ 
+ #if defined(CONFIG_PPC_MERGE)
+-#include <asm/of_platform.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
+ #else
+ #include <linux/platform_device.h>
+ #endif
+@@ -111,23 +112,27 @@ static void mpc52xx_uart_of_enumerate(void);
+ #endif
+ 
+ #define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase))
++#define FIFO(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
+ 
+ 
+ /* Forward declaration of the interruption handling routine */
+-static irqreturn_t mpc52xx_uart_int(int irq,void *dev_id);
++static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id);
+ 
+ 
+ /* Simple macro to test if a port is console or not. This one is taken
+  * for serial_core.c and maybe should be moved to serial_core.h ? */
+ #ifdef CONFIG_SERIAL_CORE_CONSOLE
+-#define uart_console(port)	((port)->cons && (port)->cons->index == (port)->line)
++#define uart_console(port) \
++	((port)->cons && (port)->cons->index == (port)->line)
+ #else
+ #define uart_console(port)	(0)
+ #endif
+ 
+ #if defined(CONFIG_PPC_MERGE)
+ static struct of_device_id mpc52xx_uart_of_match[] = {
+-	{ .type = "serial", .compatible = "mpc5200-psc-uart", },
++	{ .type = "serial", .compatible = "fsl,mpc5200-psc-uart", },
++	{ .type = "serial", .compatible = "mpc5200-psc-uart", }, /* lite5200 */
++	{ .type = "serial", .compatible = "mpc5200-serial", }, /* efika */
+ 	{},
+ };
+ #endif
+@@ -162,7 +167,7 @@ mpc52xx_uart_stop_tx(struct uart_port *port)
+ {
+ 	/* port->lock taken by caller */
+ 	port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
+-	out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
++	out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ }
+ 
+ static void
+@@ -170,7 +175,7 @@ mpc52xx_uart_start_tx(struct uart_port *port)
+ {
+ 	/* port->lock taken by caller */
+ 	port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
+-	out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
++	out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ }
+ 
+ static void
+@@ -184,7 +189,7 @@ mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
+ 		/* Make sure tx interrupts are on */
+ 		/* Truly necessary ??? They should be anyway */
+ 		port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
+-		out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
++		out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&port->lock, flags);
+@@ -195,7 +200,7 @@ mpc52xx_uart_stop_rx(struct uart_port *port)
+ {
+ 	/* port->lock taken by caller */
+ 	port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
+-	out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
++	out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ }
+ 
+ static void
+@@ -210,10 +215,10 @@ mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
+ 	unsigned long flags;
+ 	spin_lock_irqsave(&port->lock, flags);
+ 
+-	if ( ctl == -1 )
+-		out_8(&PSC(port)->command,MPC52xx_PSC_START_BRK);
++	if (ctl == -1)
++		out_8(&PSC(port)->command, MPC52xx_PSC_START_BRK);
+ 	else
+-		out_8(&PSC(port)->command,MPC52xx_PSC_STOP_BRK);
++		out_8(&PSC(port)->command, MPC52xx_PSC_STOP_BRK);
+ 
+ 	spin_unlock_irqrestore(&port->lock, flags);
+ }
+@@ -222,6 +227,7 @@ static int
+ mpc52xx_uart_startup(struct uart_port *port)
+ {
+ 	struct mpc52xx_psc __iomem *psc = PSC(port);
++	struct mpc52xx_psc_fifo __iomem *fifo = FIFO(port);
+ 	int ret;
+ 
+ 	/* Request IRQ */
+@@ -231,23 +237,23 @@ mpc52xx_uart_startup(struct uart_port *port)
+ 		return ret;
+ 
+ 	/* Reset/activate the port, clear and enable interrupts */
+-	out_8(&psc->command,MPC52xx_PSC_RST_RX);
+-	out_8(&psc->command,MPC52xx_PSC_RST_TX);
++	out_8(&psc->command, MPC52xx_PSC_RST_RX);
++	out_8(&psc->command, MPC52xx_PSC_RST_TX);
+ 
+-	out_be32(&psc->sicr,0);	/* UART mode DCD ignored */
++	out_be32(&psc->sicr, 0);	/* UART mode DCD ignored */
+ 
+ 	out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */
+ 
+-	out_8(&psc->rfcntl, 0x00);
+-	out_be16(&psc->rfalarm, 0x1ff);
+-	out_8(&psc->tfcntl, 0x07);
+-	out_be16(&psc->tfalarm, 0x80);
++	out_8(&fifo->rfcntl, 0x00);
++	out_be16(&fifo->rfalarm, 0x1ff);
++	out_8(&fifo->tfcntl, 0x07);
++	out_be16(&fifo->tfalarm, 0x80);
+ 
+ 	port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
+-	out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
++	out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+ 
+-	out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
+-	out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
++	out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
++	out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+ 
+ 	return 0;
+ }
+@@ -258,12 +264,12 @@ mpc52xx_uart_shutdown(struct uart_port *port)
+ 	struct mpc52xx_psc __iomem *psc = PSC(port);
+ 
+ 	/* Shut down the port.  Leave TX active if on a console port */
+-	out_8(&psc->command,MPC52xx_PSC_RST_RX);
++	out_8(&psc->command, MPC52xx_PSC_RST_RX);
+ 	if (!uart_console(port))
+-		out_8(&psc->command,MPC52xx_PSC_RST_TX);
++		out_8(&psc->command, MPC52xx_PSC_RST_TX);
+ 
+ 	port->read_status_mask = 0;
+-	out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
++	out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+ 
+ 	/* Release interrupt */
+ 	free_irq(port->irq, port);
+@@ -271,7 +277,7 @@ mpc52xx_uart_shutdown(struct uart_port *port)
+ 
+ static void
+ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
+-                         struct ktermios *old)
++			 struct ktermios *old)
+ {
+ 	struct mpc52xx_psc __iomem *psc = PSC(port);
+ 	unsigned long flags;
+@@ -283,14 +289,14 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
+ 	mr1 = 0;
+ 
+ 	switch (new->c_cflag & CSIZE) {
+-		case CS5:	mr1 |= MPC52xx_PSC_MODE_5_BITS;
+-				break;
+-		case CS6:	mr1 |= MPC52xx_PSC_MODE_6_BITS;
+-				break;
+-		case CS7:	mr1 |= MPC52xx_PSC_MODE_7_BITS;
+-				break;
+-		case CS8:
+-		default:	mr1 |= MPC52xx_PSC_MODE_8_BITS;
++	case CS5:	mr1 |= MPC52xx_PSC_MODE_5_BITS;
++		break;
++	case CS6:	mr1 |= MPC52xx_PSC_MODE_6_BITS;
++		break;
++	case CS7:	mr1 |= MPC52xx_PSC_MODE_7_BITS;
++		break;
++	case CS8:
++	default:	mr1 |= MPC52xx_PSC_MODE_8_BITS;
+ 	}
+ 
+ 	if (new->c_cflag & PARENB) {
+@@ -332,24 +338,24 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
+ 		udelay(1);
+ 
+ 	if (!j)
+-		printk(	KERN_ERR "mpc52xx_uart.c: "
++		printk(KERN_ERR "mpc52xx_uart.c: "
+ 			"Unable to flush RX & TX fifos in-time in set_termios."
+-			"Some chars may have been lost.\n" );
++			"Some chars may have been lost.\n");
+ 
+ 	/* Reset the TX & RX */
+-	out_8(&psc->command,MPC52xx_PSC_RST_RX);
+-	out_8(&psc->command,MPC52xx_PSC_RST_TX);
++	out_8(&psc->command, MPC52xx_PSC_RST_RX);
++	out_8(&psc->command, MPC52xx_PSC_RST_TX);
+ 
+ 	/* Send new mode settings */
+-	out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
+-	out_8(&psc->mode,mr1);
+-	out_8(&psc->mode,mr2);
+-	out_8(&psc->ctur,ctr >> 8);
+-	out_8(&psc->ctlr,ctr & 0xff);
++	out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
++	out_8(&psc->mode, mr1);
++	out_8(&psc->mode, mr2);
++	out_8(&psc->ctur, ctr >> 8);
++	out_8(&psc->ctlr, ctr & 0xff);
+ 
+ 	/* Reenable TX & RX */
+-	out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
+-	out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
++	out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
++	out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+ 
+ 	/* We're all set, release the lock */
+ 	spin_unlock_irqrestore(&port->lock, flags);
+@@ -364,7 +370,8 @@ mpc52xx_uart_type(struct uart_port *port)
+ static void
+ mpc52xx_uart_release_port(struct uart_port *port)
+ {
+-	if (port->flags & UPF_IOREMAP) { /* remapped by us ? */
++	/* remapped by us ? */
++	if (port->flags & UPF_IOREMAP) {
+ 		iounmap(port->membase);
+ 		port->membase = NULL;
+ 	}
+@@ -379,7 +386,7 @@ mpc52xx_uart_request_port(struct uart_port *port)
+ 
+ 	if (port->flags & UPF_IOREMAP) /* Need to remap ? */
+ 		port->membase = ioremap(port->mapbase,
+-		                        sizeof(struct mpc52xx_psc));
++					sizeof(struct mpc52xx_psc));
+ 
+ 	if (!port->membase)
+ 		return -EINVAL;
+@@ -398,22 +405,22 @@ mpc52xx_uart_request_port(struct uart_port *port)
+ static void
+ mpc52xx_uart_config_port(struct uart_port *port, int flags)
+ {
+-	if ( (flags & UART_CONFIG_TYPE) &&
+-	     (mpc52xx_uart_request_port(port) == 0) )
+-	     	port->type = PORT_MPC52xx;
++	if ((flags & UART_CONFIG_TYPE)
++		&& (mpc52xx_uart_request_port(port) == 0))
++		port->type = PORT_MPC52xx;
+ }
+ 
+ static int
+ mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
+ {
+-	if ( ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx )
++	if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx)
+ 		return -EINVAL;
+ 
+-	if ( (ser->irq != port->irq) ||
+-	     (ser->io_type != SERIAL_IO_MEM) ||
+-	     (ser->baud_base != port->uartclk)  ||
+-	     (ser->iomem_base != (void*)port->mapbase) ||
+-	     (ser->hub6 != 0 ) )
++	if ((ser->irq != port->irq) ||
++	    (ser->io_type != SERIAL_IO_MEM) ||
++	    (ser->baud_base != port->uartclk)  ||
++	    (ser->iomem_base != (void *)port->mapbase) ||
++	    (ser->hub6 != 0))
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -455,8 +462,8 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
+ 	unsigned short status;
+ 
+ 	/* While we can read, do so ! */
+-	while ( (status = in_be16(&PSC(port)->mpc52xx_psc_status)) &
+-	        MPC52xx_PSC_SR_RXRDY) {
++	while ((status = in_be16(&PSC(port)->mpc52xx_psc_status)) &
++		MPC52xx_PSC_SR_RXRDY) {
+ 
+ 		/* Get the char */
+ 		ch = in_8(&PSC(port)->mpc52xx_psc_buffer_8);
+@@ -474,9 +481,9 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
+ 		flag = TTY_NORMAL;
+ 		port->icount.rx++;
+ 
+-		if ( status & (MPC52xx_PSC_SR_PE |
+-		               MPC52xx_PSC_SR_FE |
+-		               MPC52xx_PSC_SR_RB) ) {
++		if (status & (MPC52xx_PSC_SR_PE |
++			      MPC52xx_PSC_SR_FE |
++			      MPC52xx_PSC_SR_RB)) {
+ 
+ 			if (status & MPC52xx_PSC_SR_RB) {
+ 				flag = TTY_BREAK;
+@@ -487,7 +494,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
+ 				flag = TTY_FRAME;
+ 
+ 			/* Clear error condition */
+-			out_8(&PSC(port)->command,MPC52xx_PSC_RST_ERR_STAT);
++			out_8(&PSC(port)->command, MPC52xx_PSC_RST_ERR_STAT);
+ 
+ 		}
+ 		tty_insert_flip_char(tty, ch, flag);
+@@ -568,16 +575,16 @@ mpc52xx_uart_int(int irq, void *dev_id)
+ 
+ 		/* Do we need to receive chars ? */
+ 		/* For this RX interrupts must be on and some chars waiting */
+-		if ( status & MPC52xx_PSC_IMR_RXRDY )
++		if (status & MPC52xx_PSC_IMR_RXRDY)
+ 			keepgoing |= mpc52xx_uart_int_rx_chars(port);
+ 
+ 		/* Do we need to send chars ? */
+ 		/* For this, TX must be ready and TX interrupt enabled */
+-		if ( status & MPC52xx_PSC_IMR_TXRDY )
++		if (status & MPC52xx_PSC_IMR_TXRDY)
+ 			keepgoing |= mpc52xx_uart_int_tx_chars(port);
+ 
+ 		/* Limit number of iteration */
+-		if ( !(--pass) )
++		if (!(--pass))
+ 			keepgoing = 0;
+ 
+ 	} while (keepgoing);
+@@ -596,7 +603,7 @@ mpc52xx_uart_int(int irq, void *dev_id)
+ 
+ static void __init
+ mpc52xx_console_get_options(struct uart_port *port,
+-                            int *baud, int *parity, int *bits, int *flow)
++			    int *baud, int *parity, int *bits, int *flow)
+ {
+ 	struct mpc52xx_psc __iomem *psc = PSC(port);
+ 	unsigned char mr1;
+@@ -604,7 +611,7 @@ mpc52xx_console_get_options(struct uart_port *port,
+ 	pr_debug("mpc52xx_console_get_options(port=%p)\n", port);
+ 
+ 	/* Read the mode registers */
+-	out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
++	out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+ 	mr1 = in_8(&psc->mode);
+ 
+ 	/* CT{U,L}R are write-only ! */
+@@ -616,11 +623,18 @@ mpc52xx_console_get_options(struct uart_port *port,
+ 
+ 	/* Parse them */
+ 	switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) {
+-		case MPC52xx_PSC_MODE_5_BITS:	*bits = 5; break;
+-		case MPC52xx_PSC_MODE_6_BITS:	*bits = 6; break;
+-		case MPC52xx_PSC_MODE_7_BITS:	*bits = 7; break;
+-		case MPC52xx_PSC_MODE_8_BITS:
+-		default:			*bits = 8;
++	case MPC52xx_PSC_MODE_5_BITS:
++		*bits = 5;
++		break;
++	case MPC52xx_PSC_MODE_6_BITS:
++		*bits = 6;
++		break;
++	case MPC52xx_PSC_MODE_7_BITS:
++		*bits = 7;
++		break;
++	case MPC52xx_PSC_MODE_8_BITS:
++	default:
++		*bits = 8;
+ 	}
+ 
+ 	if (mr1 & MPC52xx_PSC_MODE_PARNONE)
+@@ -657,7 +671,7 @@ mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
+ 		/* Wait the TX buffer to be empty */
+ 		j = 20000;	/* Maximum wait */
+ 		while (!(in_be16(&psc->mpc52xx_psc_status) &
+-		         MPC52xx_PSC_SR_TXEMP) && --j)
++			 MPC52xx_PSC_SR_TXEMP) && --j)
+ 			udelay(1);
+ 	}
+ 
+@@ -730,16 +744,18 @@ mpc52xx_console_setup(struct console *co, char *options)
+ 	}
+ 
+ 	pr_debug("Console on ttyPSC%x is %s\n",
+-	         co->index, mpc52xx_uart_nodes[co->index]->full_name);
++		 co->index, mpc52xx_uart_nodes[co->index]->full_name);
+ 
+ 	/* Fetch register locations */
+-	if ((ret = of_address_to_resource(np, 0, &res)) != 0) {
++	ret = of_address_to_resource(np, 0, &res);
++	if (ret) {
+ 		pr_debug("Could not get resources for PSC%x\n", co->index);
+ 		return ret;
+ 	}
+ 
+ 	/* Search for bus-frequency property in this node or a parent */
+-	if ((ipb_freq = mpc52xx_find_ipb_freq(np)) == 0) {
++	ipb_freq = mpc52xx_find_ipb_freq(np);
++	if (ipb_freq == 0) {
+ 		pr_debug("Could not find IPB bus frequency!\n");
+ 		return -EINVAL;
+ 	}
+@@ -757,7 +773,8 @@ mpc52xx_console_setup(struct console *co, char *options)
+ 		return -EINVAL;
+ 
+ 	pr_debug("mpc52xx-psc uart at %p, mapped to %p, irq=%x, freq=%i\n",
+-	         (void*)port->mapbase, port->membase, port->irq, port->uartclk);
++		 (void *)port->mapbase, port->membase,
++		 port->irq, port->uartclk);
+ 
+ 	/* Setup the port parameters accoding to options */
+ 	if (options)
+@@ -766,7 +783,7 @@ mpc52xx_console_setup(struct console *co, char *options)
+ 		mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow);
+ 
+ 	pr_debug("Setting console parameters: %i %i%c1 flow=%c\n",
+-	         baud, bits, parity, flow);
++		 baud, bits, parity, flow);
+ 
+ 	return uart_set_options(port, co, baud, parity, bits, flow);
+ }
+@@ -781,7 +798,7 @@ static struct console mpc52xx_console = {
+ 	.device	= uart_console_device,
+ 	.setup	= mpc52xx_console_setup,
+ 	.flags	= CON_PRINTBUFFER,
+-	.index	= -1,	/* Specified on the cmdline (e.g. console=ttyPSC0 ) */
++	.index	= -1,	/* Specified on the cmdline (e.g. console=ttyPSC0) */
+ 	.data	= &mpc52xx_uart_driver,
+ };
+ 
+@@ -809,7 +826,6 @@ console_initcall(mpc52xx_console_init);
+ /* ======================================================================== */
+ 
+ static struct uart_driver mpc52xx_uart_driver = {
+-	.owner		= THIS_MODULE,
+ 	.driver_name	= "mpc52xx_psc_uart",
+ 	.dev_name	= "ttyPSC",
+ 	.major		= SERIAL_PSC_MAJOR,
+@@ -837,7 +853,7 @@ mpc52xx_uart_probe(struct platform_device *dev)
+ 	if (idx < 0 || idx >= MPC52xx_PSC_MAXNUM)
+ 		return -EINVAL;
+ 
+-	if (!mpc52xx_match_psc_function(idx,"uart"))
++	if (!mpc52xx_match_psc_function(idx, "uart"))
+ 		return -ENODEV;
+ 
+ 	/* Init the port structure */
+@@ -848,13 +864,13 @@ mpc52xx_uart_probe(struct platform_device *dev)
+ 	port->fifosize	= 512;
+ 	port->iotype	= UPIO_MEM;
+ 	port->flags	= UPF_BOOT_AUTOCONF |
+-			  ( uart_console(port) ? 0 : UPF_IOREMAP );
++			  (uart_console(port) ? 0 : UPF_IOREMAP);
+ 	port->line	= idx;
+ 	port->ops	= &mpc52xx_uart_ops;
+ 	port->dev	= &dev->dev;
+ 
+ 	/* Search for IRQ and mapbase */
+-	for (i=0 ; i<dev->num_resources ; i++, res++) {
++	for (i = 0 ; i < dev->num_resources ; i++, res++) {
+ 		if (res->flags & IORESOURCE_MEM)
+ 			port->mapbase = res->start;
+ 		else if (res->flags & IORESOURCE_IRQ)
+@@ -866,7 +882,7 @@ mpc52xx_uart_probe(struct platform_device *dev)
+ 	/* Add the port to the uart sub-system */
+ 	ret = uart_add_one_port(&mpc52xx_uart_driver, port);
+ 	if (!ret)
+-		platform_set_drvdata(dev, (void*)port);
++		platform_set_drvdata(dev, (void *)port);
+ 
+ 	return ret;
+ }
+@@ -917,6 +933,7 @@ static struct platform_driver mpc52xx_uart_platform_driver = {
+ 	.resume		= mpc52xx_uart_resume,
+ #endif
+ 	.driver		= {
++		.owner	= THIS_MODULE,
+ 		.name	= "mpc52xx-psc",
+ 	},
+ };
+@@ -946,10 +963,11 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
+ 	if (idx >= MPC52xx_PSC_MAXNUM)
+ 		return -EINVAL;
+ 	pr_debug("Found %s assigned to ttyPSC%x\n",
+-	         mpc52xx_uart_nodes[idx]->full_name, idx);
++		 mpc52xx_uart_nodes[idx]->full_name, idx);
+ 
+ 	/* Search for bus-frequency property in this node or a parent */
+-	if ((ipb_freq = mpc52xx_find_ipb_freq(op->node)) == 0) {
++	ipb_freq = mpc52xx_find_ipb_freq(op->node);
++	if (ipb_freq == 0) {
+ 		dev_dbg(&op->dev, "Could not find IPB bus frequency!\n");
+ 		return -EINVAL;
+ 	}
+@@ -962,22 +980,23 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
+ 	port->fifosize	= 512;
+ 	port->iotype	= UPIO_MEM;
+ 	port->flags	= UPF_BOOT_AUTOCONF |
+-			  ( uart_console(port) ? 0 : UPF_IOREMAP );
++			  (uart_console(port) ? 0 : UPF_IOREMAP);
+ 	port->line	= idx;
+ 	port->ops	= &mpc52xx_uart_ops;
+ 	port->dev	= &op->dev;
+ 
+ 	/* Search for IRQ and mapbase */
+-	if ((ret = of_address_to_resource(op->node, 0, &res)) != 0)
++	ret = of_address_to_resource(op->node, 0, &res);
++	if (ret)
+ 		return ret;
+ 
+ 	port->mapbase = res.start;
+ 	port->irq = irq_of_parse_and_map(op->node, 0);
+ 
+ 	dev_dbg(&op->dev, "mpc52xx-psc uart at %p, irq=%x, freq=%i\n",
+-	        (void*)port->mapbase, port->irq, port->uartclk);
++		(void *)port->mapbase, port->irq, port->uartclk);
+ 
+-	if ((port->irq==NO_IRQ) || !port->mapbase) {
++	if ((port->irq == NO_IRQ) || !port->mapbase) {
+ 		printk(KERN_ERR "Could not allocate resources for PSC\n");
+ 		return -EINVAL;
+ 	}
+@@ -985,7 +1004,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
+ 	/* Add the port to the uart sub-system */
+ 	ret = uart_add_one_port(&mpc52xx_uart_driver, port);
+ 	if (!ret)
+-		dev_set_drvdata(&op->dev, (void*)port);
++		dev_set_drvdata(&op->dev, (void *)port);
+ 
+ 	return ret;
+ }
+@@ -1048,6 +1067,7 @@ mpc52xx_uart_of_assign(struct device_node *np, int idx)
+ 	if (idx < 0)
+ 		return; /* No free slot; abort */
+ 
++	of_node_get(np);
+ 	/* If the slot is already occupied, then swap slots */
+ 	if (mpc52xx_uart_nodes[idx] && (free_idx != -1))
+ 		mpc52xx_uart_nodes[free_idx] = mpc52xx_uart_nodes[idx];
+@@ -1057,7 +1077,7 @@ mpc52xx_uart_of_assign(struct device_node *np, int idx)
+ static void
+ mpc52xx_uart_of_enumerate(void)
+ {
+-	static int enum_done = 0;
++	static int enum_done;
+ 	struct device_node *np;
+ 	const unsigned int *devno;
+ 	int i;
+@@ -1071,7 +1091,7 @@ mpc52xx_uart_of_enumerate(void)
+ 
+ 		/* Is a particular device number requested? */
+ 		devno = of_get_property(np, "port-number", NULL);
+-		mpc52xx_uart_of_assign(of_node_get(np), devno ? *devno : -1);
++		mpc52xx_uart_of_assign(np, devno ? *devno : -1);
+ 	}
+ 
+ 	enum_done = 1;
+@@ -1079,15 +1099,13 @@ mpc52xx_uart_of_enumerate(void)
+ 	for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
+ 		if (mpc52xx_uart_nodes[i])
+ 			pr_debug("%s assigned to ttyPSC%x\n",
+-			         mpc52xx_uart_nodes[i]->full_name, i);
++				 mpc52xx_uart_nodes[i]->full_name, i);
+ 	}
+ }
+ 
+ MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
+ 
+ static struct of_platform_driver mpc52xx_uart_of_driver = {
+-	.owner		= THIS_MODULE,
+-	.name		= "mpc52xx-psc-uart",
+ 	.match_table	= mpc52xx_uart_of_match,
+ 	.probe		= mpc52xx_uart_of_probe,
+ 	.remove		= mpc52xx_uart_of_remove,
+@@ -1113,7 +1131,8 @@ mpc52xx_uart_init(void)
+ 
+ 	printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n");
+ 
+-	if ((ret = uart_register_driver(&mpc52xx_uart_driver)) != 0) {
++	ret = uart_register_driver(&mpc52xx_uart_driver);
++	if (ret) {
+ 		printk(KERN_ERR "%s: uart_register_driver failed (%i)\n",
+ 		       __FILE__, ret);
+ 		return ret;
 diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
 index 73440e2..ddf6391 100644
 --- a/drivers/serial/sh-sci.c
@@ -555503,6 +702261,1556 @@
  #define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
  #elif defined(__H8300H__) || defined(__H8300S__)
  #define SCBRR_VALUE(bps) (((CONFIG_CPU_CLOCK*1000/32)/bps)-1)
+diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
+index 3f59324..8094340 100644
+--- a/drivers/serial/uartlite.c
++++ b/drivers/serial/uartlite.c
+@@ -539,7 +539,7 @@ static int __devinit ulite_assign(struct device *dev, int id, u32 base, int irq)
+  *
+  * @dev: pointer to device structure
+  */
+-static int __devinit ulite_release(struct device *dev)
++static int __devexit ulite_release(struct device *dev)
+ {
+ 	struct uart_port *port = dev_get_drvdata(dev);
+ 	int rc = 0;
+@@ -572,14 +572,14 @@ static int __devinit ulite_probe(struct platform_device *pdev)
+ 	return ulite_assign(&pdev->dev, pdev->id, res->start, res2->start);
+ }
+ 
+-static int ulite_remove(struct platform_device *pdev)
++static int __devexit ulite_remove(struct platform_device *pdev)
+ {
+ 	return ulite_release(&pdev->dev);
+ }
+ 
+ static struct platform_driver ulite_platform_driver = {
+ 	.probe	= ulite_probe,
+-	.remove	= ulite_remove,
++	.remove	= __devexit_p(ulite_remove),
+ 	.driver	= {
+ 		   .owner = THIS_MODULE,
+ 		   .name  = "uartlite",
+diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c
+new file mode 100644
+index 0000000..e0994f0
+--- /dev/null
++++ b/drivers/serial/ucc_uart.c
+@@ -0,0 +1,1514 @@
++/*
++ * Freescale QUICC Engine UART device driver
++ *
++ * Author: Timur Tabi <timur at freescale.com>
++ *
++ * Copyright 2007 Freescale Semiconductor, Inc.  This file is licensed under
++ * the terms of the GNU General Public License version 2.  This program
++ * is licensed "as is" without any warranty of any kind, whether express
++ * or implied.
++ *
++ * This driver adds support for UART devices via Freescale's QUICC Engine
++ * found on some Freescale SOCs.
++ *
++ * If Soft-UART support is needed but not already present, then this driver
++ * will request and upload the "Soft-UART" microcode upon probe.  The
++ * filename of the microcode should be fsl_qe_ucode_uart_X_YZ.bin, where "X"
++ * is the name of the SOC (e.g. 8323), and YZ is the revision of the SOC,
++ * (e.g. "11" for 1.1).
++ */
++
++#include <linux/module.h>
++#include <linux/serial.h>
++#include <linux/serial_core.h>
++#include <linux/io.h>
++#include <linux/of_platform.h>
++#include <linux/dma-mapping.h>
++
++#include <linux/fs_uart_pd.h>
++#include <asm/ucc_slow.h>
++
++#include <linux/firmware.h>
++#include <asm/reg.h>
++
++/*
++ * The GUMR flag for Soft UART.  This would normally be defined in qe.h,
++ * but Soft-UART is a hack and we want to keep everything related to it in
++ * this file.
++ */
++#define UCC_SLOW_GUMR_H_SUART   	0x00004000      /* Soft-UART */
++
++/*
++ * soft_uart is 1 if we need to use Soft-UART mode
++ */
++static int soft_uart;
++/*
++ * firmware_loaded is 1 if the firmware has been loaded, 0 otherwise.
++ */
++static int firmware_loaded;
++
++/* Enable this macro to configure all serial ports in internal loopback
++   mode */
++/* #define LOOPBACK */
++
++/* The major and minor device numbers are defined in
++ * http://www.lanana.org/docs/device-list/devices-2.6+.txt.  For the QE
++ * UART, we have major number 204 and minor numbers 46 - 49, which are the
++ * same as for the CPM2.  This decision was made because no Freescale part
++ * has both a CPM and a QE.
++ */
++#define SERIAL_QE_MAJOR 204
++#define SERIAL_QE_MINOR 46
++
++/* Since we only have minor numbers 46 - 49, there is a hard limit of 4 ports */
++#define UCC_MAX_UART    4
++
++/* The number of buffer descriptors for receiving characters. */
++#define RX_NUM_FIFO     4
++
++/* The number of buffer descriptors for transmitting characters. */
++#define TX_NUM_FIFO     4
++
++/* The maximum size of the character buffer for a single RX BD. */
++#define RX_BUF_SIZE     32
++
++/* The maximum size of the character buffer for a single TX BD. */
++#define TX_BUF_SIZE     32
++
++/*
++ * The number of jiffies to wait after receiving a close command before the
++ * device is actually closed.  This allows the last few characters to be
++ * sent over the wire.
++ */
++#define UCC_WAIT_CLOSING 100
++
++struct ucc_uart_pram {
++	struct ucc_slow_pram common;
++	u8 res1[8];     	/* reserved */
++	__be16 maxidl;  	/* Maximum idle chars */
++	__be16 idlc;    	/* temp idle counter */
++	__be16 brkcr;   	/* Break count register */
++	__be16 parec;   	/* receive parity error counter */
++	__be16 frmec;   	/* receive framing error counter */
++	__be16 nosec;   	/* receive noise counter */
++	__be16 brkec;   	/* receive break condition counter */
++	__be16 brkln;   	/* last received break length */
++	__be16 uaddr[2];	/* UART address character 1 & 2 */
++	__be16 rtemp;   	/* Temp storage */
++	__be16 toseq;   	/* Transmit out of sequence char */
++	__be16 cchars[8];       /* control characters 1-8 */
++	__be16 rccm;    	/* receive control character mask */
++	__be16 rccr;    	/* receive control character register */
++	__be16 rlbc;    	/* receive last break character */
++	__be16 res2;    	/* reserved */
++	__be32 res3;    	/* reserved, should be cleared */
++	u8 res4;		/* reserved, should be cleared */
++	u8 res5[3];     	/* reserved, should be cleared */
++	__be32 res6;    	/* reserved, should be cleared */
++	__be32 res7;    	/* reserved, should be cleared */
++	__be32 res8;    	/* reserved, should be cleared */
++	__be32 res9;    	/* reserved, should be cleared */
++	__be32 res10;   	/* reserved, should be cleared */
++	__be32 res11;   	/* reserved, should be cleared */
++	__be32 res12;   	/* reserved, should be cleared */
++	__be32 res13;   	/* reserved, should be cleared */
++/* The rest is for Soft-UART only */
++	__be16 supsmr;  	/* 0x90, Shadow UPSMR */
++	__be16 res92;   	/* 0x92, reserved, initialize to 0 */
++	__be32 rx_state;	/* 0x94, RX state, initialize to 0 */
++	__be32 rx_cnt;  	/* 0x98, RX count, initialize to 0 */
++	u8 rx_length;   	/* 0x9C, Char length, set to 1+CL+PEN+1+SL */
++	u8 rx_bitmark;  	/* 0x9D, reserved, initialize to 0 */
++	u8 rx_temp_dlst_qe;     /* 0x9E, reserved, initialize to 0 */
++	u8 res14[0xBC - 0x9F];  /* reserved */
++	__be32 dump_ptr;	/* 0xBC, Dump pointer */
++	__be32 rx_frame_rem;    /* 0xC0, reserved, initialize to 0 */
++	u8 rx_frame_rem_size;   /* 0xC4, reserved, initialize to 0 */
++	u8 tx_mode;     	/* 0xC5, mode, 0=AHDLC, 1=UART */
++	__be16 tx_state;	/* 0xC6, TX state */
++	u8 res15[0xD0 - 0xC8];  /* reserved */
++	__be32 resD0;   	/* 0xD0, reserved, initialize to 0 */
++	u8 resD4;       	/* 0xD4, reserved, initialize to 0 */
++	__be16 resD5;   	/* 0xD5, reserved, initialize to 0 */
++} __attribute__ ((packed));
++
++/* SUPSMR definitions, for Soft-UART only */
++#define UCC_UART_SUPSMR_SL      	0x8000
++#define UCC_UART_SUPSMR_RPM_MASK	0x6000
++#define UCC_UART_SUPSMR_RPM_ODD 	0x0000
++#define UCC_UART_SUPSMR_RPM_LOW 	0x2000
++#define UCC_UART_SUPSMR_RPM_EVEN	0x4000
++#define UCC_UART_SUPSMR_RPM_HIGH	0x6000
++#define UCC_UART_SUPSMR_PEN     	0x1000
++#define UCC_UART_SUPSMR_TPM_MASK	0x0C00
++#define UCC_UART_SUPSMR_TPM_ODD 	0x0000
++#define UCC_UART_SUPSMR_TPM_LOW 	0x0400
++#define UCC_UART_SUPSMR_TPM_EVEN	0x0800
++#define UCC_UART_SUPSMR_TPM_HIGH	0x0C00
++#define UCC_UART_SUPSMR_FRZ     	0x0100
++#define UCC_UART_SUPSMR_UM_MASK 	0x00c0
++#define UCC_UART_SUPSMR_UM_NORMAL       0x0000
++#define UCC_UART_SUPSMR_UM_MAN_MULTI    0x0040
++#define UCC_UART_SUPSMR_UM_AUTO_MULTI   0x00c0
++#define UCC_UART_SUPSMR_CL_MASK 	0x0030
++#define UCC_UART_SUPSMR_CL_8    	0x0030
++#define UCC_UART_SUPSMR_CL_7    	0x0020
++#define UCC_UART_SUPSMR_CL_6    	0x0010
++#define UCC_UART_SUPSMR_CL_5    	0x0000
++
++#define UCC_UART_TX_STATE_AHDLC 	0x00
++#define UCC_UART_TX_STATE_UART  	0x01
++#define UCC_UART_TX_STATE_X1    	0x00
++#define UCC_UART_TX_STATE_X16   	0x80
++
++#define UCC_UART_PRAM_ALIGNMENT 0x100
++
++#define UCC_UART_SIZE_OF_BD     UCC_SLOW_SIZE_OF_BD
++#define NUM_CONTROL_CHARS       8
++
++/* Private per-port data structure */
++struct uart_qe_port {
++	struct uart_port port;
++	struct ucc_slow __iomem *uccp;
++	struct ucc_uart_pram __iomem *uccup;
++	struct ucc_slow_info us_info;
++	struct ucc_slow_private *us_private;
++	struct device_node *np;
++	unsigned int ucc_num;   /* First ucc is 0, not 1 */
++
++	u16 rx_nrfifos;
++	u16 rx_fifosize;
++	u16 tx_nrfifos;
++	u16 tx_fifosize;
++	int wait_closing;
++	u32 flags;
++	struct qe_bd *rx_bd_base;
++	struct qe_bd *rx_cur;
++	struct qe_bd *tx_bd_base;
++	struct qe_bd *tx_cur;
++	unsigned char *tx_buf;
++	unsigned char *rx_buf;
++	void *bd_virt;  	/* virtual address of the BD buffers */
++	dma_addr_t bd_dma_addr; /* bus address of the BD buffers */
++	unsigned int bd_size;   /* size of BD buffer space */
++};
++
++static struct uart_driver ucc_uart_driver = {
++	.owner  	= THIS_MODULE,
++	.driver_name    = "serial",
++	.dev_name       = "ttyQE",
++	.major  	= SERIAL_QE_MAJOR,
++	.minor  	= SERIAL_QE_MINOR,
++	.nr     	= UCC_MAX_UART,
++};
++
++/*
++ * Virtual to physical address translation.
++ *
++ * Given the virtual address for a character buffer, this function returns
++ * the physical (DMA) equivalent.
++ */
++static inline dma_addr_t cpu2qe_addr(void *addr, struct uart_qe_port *qe_port)
++{
++	if (likely((addr >= qe_port->bd_virt)) &&
++	    (addr < (qe_port->bd_virt + qe_port->bd_size)))
++		return qe_port->bd_dma_addr + (addr - qe_port->bd_virt);
++
++	/* something nasty happened */
++	printk(KERN_ERR "%s: addr=%p\n", __FUNCTION__, addr);
++	BUG();
++	return 0;
++}
++
++/*
++ * Physical to virtual address translation.
++ *
++ * Given the physical (DMA) address for a character buffer, this function
++ * returns the virtual equivalent.
++ */
++static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
++{
++	/* sanity check */
++	if (likely((addr >= qe_port->bd_dma_addr) &&
++		   (addr < (qe_port->bd_dma_addr + qe_port->bd_size))))
++		return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
++
++	/* something nasty happened */
++	printk(KERN_ERR "%s: addr=%x\n", __FUNCTION__, addr);
++	BUG();
++	return NULL;
++}
++
++/*
++ * Return 1 if the QE is done transmitting all buffers for this port
++ *
++ * This function scans each BD in sequence.  If we find a BD that is not
++ * ready (READY=1), then we return 0 indicating that the QE is still sending
++ * data.  If we reach the last BD (WRAP=1), then we know we've scanned
++ * the entire list, and all BDs are done.
++ */
++static unsigned int qe_uart_tx_empty(struct uart_port *port)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++	struct qe_bd *bdp = qe_port->tx_bd_base;
++
++	while (1) {
++		if (in_be16(&bdp->status) & BD_SC_READY)
++			/* This BD is not done, so return "not done" */
++			return 0;
++
++		if (in_be16(&bdp->status) & BD_SC_WRAP)
++			/*
++			 * This BD is done and it's the last one, so return
++			 * "done"
++			 */
++			return 1;
++
++		bdp++;
++	};
++}
++
++/*
++ * Set the modem control lines
++ *
++ * Although the QE can control the modem control lines (e.g. CTS), we
++ * don't need that support. This function must exist, however, otherwise
++ * the kernel will panic.
++ */
++void qe_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++}
++
++/*
++ * Get the current modem control line status
++ *
++ * Although the QE can control the modem control lines (e.g. CTS), this
++ * driver currently doesn't support that, so we always return Carrier
++ * Detect, Data Set Ready, and Clear To Send.
++ */
++static unsigned int qe_uart_get_mctrl(struct uart_port *port)
++{
++	return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
++}
++
++/*
++ * Disable the transmit interrupt.
++ *
++ * Although this function is called "stop_tx", it does not actually stop
++ * transmission of data.  Instead, it tells the QE to not generate an
++ * interrupt when the UCC is finished sending characters.
++ */
++static void qe_uart_stop_tx(struct uart_port *port)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++
++	clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
++}
++
++/*
++ * Transmit as many characters to the HW as possible.
++ *
++ * This function will attempt to stuff of all the characters from the
++ * kernel's transmit buffer into TX BDs.
++ *
++ * A return value of non-zero indicates that it sucessfully stuffed all
++ * characters from the kernel buffer.
++ *
++ * A return value of zero indicates that there are still characters in the
++ * kernel's buffer that have not been transmitted, but there are no more BDs
++ * available.  This function should be called again after a BD has been made
++ * available.
++ */
++static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
++{
++	struct qe_bd *bdp;
++	unsigned char *p;
++	unsigned int count;
++	struct uart_port *port = &qe_port->port;
++	struct circ_buf *xmit = &port->info->xmit;
++
++	bdp = qe_port->rx_cur;
++
++	/* Handle xon/xoff */
++	if (port->x_char) {
++		/* Pick next descriptor and fill from buffer */
++		bdp = qe_port->tx_cur;
++
++		p = qe2cpu_addr(bdp->buf, qe_port);
++
++		*p++ = port->x_char;
++		out_be16(&bdp->length, 1);
++		setbits16(&bdp->status, BD_SC_READY);
++		/* Get next BD. */
++		if (in_be16(&bdp->status) & BD_SC_WRAP)
++			bdp = qe_port->tx_bd_base;
++		else
++			bdp++;
++		qe_port->tx_cur = bdp;
++
++		port->icount.tx++;
++		port->x_char = 0;
++		return 1;
++	}
++
++	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
++		qe_uart_stop_tx(port);
++		return 0;
++	}
++
++	/* Pick next descriptor and fill from buffer */
++	bdp = qe_port->tx_cur;
++
++	while (!(in_be16(&bdp->status) & BD_SC_READY) &&
++	       (xmit->tail != xmit->head)) {
++		count = 0;
++		p = qe2cpu_addr(bdp->buf, qe_port);
++		while (count < qe_port->tx_fifosize) {
++			*p++ = xmit->buf[xmit->tail];
++			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
++			port->icount.tx++;
++			count++;
++			if (xmit->head == xmit->tail)
++				break;
++		}
++
++		out_be16(&bdp->length, count);
++		setbits16(&bdp->status, BD_SC_READY);
++
++		/* Get next BD. */
++		if (in_be16(&bdp->status) & BD_SC_WRAP)
++			bdp = qe_port->tx_bd_base;
++		else
++			bdp++;
++	}
++	qe_port->tx_cur = bdp;
++
++	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++		uart_write_wakeup(port);
++
++	if (uart_circ_empty(xmit)) {
++		/* The kernel buffer is empty, so turn off TX interrupts.  We
++		   don't need to be told when the QE is finished transmitting
++		   the data. */
++		qe_uart_stop_tx(port);
++		return 0;
++	}
++
++	return 1;
++}
++
++/*
++ * Start transmitting data
++ *
++ * This function will start transmitting any available data, if the port
++ * isn't already transmitting data.
++ */
++static void qe_uart_start_tx(struct uart_port *port)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++
++	/* If we currently are transmitting, then just return */
++	if (in_be16(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
++		return;
++
++	/* Otherwise, pump the port and start transmission */
++	if (qe_uart_tx_pump(qe_port))
++		setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
++}
++
++/*
++ * Stop transmitting data
++ */
++static void qe_uart_stop_rx(struct uart_port *port)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++
++	clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
++}
++
++/*
++ * Enable status change interrupts
++ *
++ * We don't support status change interrupts, but we need to define this
++ * function otherwise the kernel will panic.
++ */
++static void qe_uart_enable_ms(struct uart_port *port)
++{
++}
++
++/* Start or stop sending  break signal
++ *
++ * This function controls the sending of a break signal.  If break_state=1,
++ * then we start sending a break signal.  If break_state=0, then we stop
++ * sending the break signal.
++ */
++static void qe_uart_break_ctl(struct uart_port *port, int break_state)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++
++	if (break_state)
++		ucc_slow_stop_tx(qe_port->us_private);
++	else
++		ucc_slow_restart_tx(qe_port->us_private);
++}
++
++/* ISR helper function for receiving character.
++ *
++ * This function is called by the ISR to handling receiving characters
++ */
++static void qe_uart_int_rx(struct uart_qe_port *qe_port)
++{
++	int i;
++	unsigned char ch, *cp;
++	struct uart_port *port = &qe_port->port;
++	struct tty_struct *tty = port->info->tty;
++	struct qe_bd *bdp;
++	u16 status;
++	unsigned int flg;
++
++	/* Just loop through the closed BDs and copy the characters into
++	 * the buffer.
++	 */
++	bdp = qe_port->rx_cur;
++	while (1) {
++		status = in_be16(&bdp->status);
++
++		/* If this one is empty, then we assume we've read them all */
++		if (status & BD_SC_EMPTY)
++			break;
++
++		/* get number of characters, and check space in RX buffer */
++		i = in_be16(&bdp->length);
++
++		/* If we don't have enough room in RX buffer for the entire BD,
++		 * then we try later, which will be the next RX interrupt.
++		 */
++		if (tty_buffer_request_room(tty, i) < i) {
++			dev_dbg(port->dev, "ucc-uart: no room in RX buffer\n");
++			return;
++		}
++
++		/* get pointer */
++		cp = qe2cpu_addr(bdp->buf, qe_port);
++
++		/* loop through the buffer */
++		while (i-- > 0) {
++			ch = *cp++;
++			port->icount.rx++;
++			flg = TTY_NORMAL;
++
++			if (!i && status &
++			    (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV))
++				goto handle_error;
++			if (uart_handle_sysrq_char(port, ch))
++				continue;
++
++error_return:
++			tty_insert_flip_char(tty, ch, flg);
++
++		}
++
++		/* This BD is ready to be used again. Clear status. get next */
++		clrsetbits_be16(&bdp->status, BD_SC_BR | BD_SC_FR | BD_SC_PR |
++			BD_SC_OV | BD_SC_ID, BD_SC_EMPTY);
++		if (in_be16(&bdp->status) & BD_SC_WRAP)
++			bdp = qe_port->rx_bd_base;
++		else
++			bdp++;
++
++	}
++
++	/* Write back buffer pointer */
++	qe_port->rx_cur = bdp;
++
++	/* Activate BH processing */
++	tty_flip_buffer_push(tty);
++
++	return;
++
++	/* Error processing */
++
++handle_error:
++	/* Statistics */
++	if (status & BD_SC_BR)
++		port->icount.brk++;
++	if (status & BD_SC_PR)
++		port->icount.parity++;
++	if (status & BD_SC_FR)
++		port->icount.frame++;
++	if (status & BD_SC_OV)
++		port->icount.overrun++;
++
++	/* Mask out ignored conditions */
++	status &= port->read_status_mask;
++
++	/* Handle the remaining ones */
++	if (status & BD_SC_BR)
++		flg = TTY_BREAK;
++	else if (status & BD_SC_PR)
++		flg = TTY_PARITY;
++	else if (status & BD_SC_FR)
++		flg = TTY_FRAME;
++
++	/* Overrun does not affect the current character ! */
++	if (status & BD_SC_OV)
++		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
++#ifdef SUPPORT_SYSRQ
++	port->sysrq = 0;
++#endif
++	goto error_return;
++}
++
++/* Interrupt handler
++ *
++ * This interrupt handler is called after a BD is processed.
++ */
++static irqreturn_t qe_uart_int(int irq, void *data)
++{
++	struct uart_qe_port *qe_port = (struct uart_qe_port *) data;
++	struct ucc_slow __iomem *uccp = qe_port->uccp;
++	u16 events;
++
++	/* Clear the interrupts */
++	events = in_be16(&uccp->ucce);
++	out_be16(&uccp->ucce, events);
++
++	if (events & UCC_UART_UCCE_BRKE)
++		uart_handle_break(&qe_port->port);
++
++	if (events & UCC_UART_UCCE_RX)
++		qe_uart_int_rx(qe_port);
++
++	if (events & UCC_UART_UCCE_TX)
++		qe_uart_tx_pump(qe_port);
++
++	return events ? IRQ_HANDLED : IRQ_NONE;
++}
++
++/* Initialize buffer descriptors
++ *
++ * This function initializes all of the RX and TX buffer descriptors.
++ */
++static void qe_uart_initbd(struct uart_qe_port *qe_port)
++{
++	int i;
++	void *bd_virt;
++	struct qe_bd *bdp;
++
++	/* Set the physical address of the host memory buffers in the buffer
++	 * descriptors, and the virtual address for us to work with.
++	 */
++	bd_virt = qe_port->bd_virt;
++	bdp = qe_port->rx_bd_base;
++	qe_port->rx_cur = qe_port->rx_bd_base;
++	for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) {
++		out_be16(&bdp->status, BD_SC_EMPTY | BD_SC_INTRPT);
++		out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
++		out_be16(&bdp->length, 0);
++		bd_virt += qe_port->rx_fifosize;
++		bdp++;
++	}
++
++	/* */
++	out_be16(&bdp->status, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT);
++	out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
++	out_be16(&bdp->length, 0);
++
++	/* Set the physical address of the host memory
++	 * buffers in the buffer descriptors, and the
++	 * virtual address for us to work with.
++	 */
++	bd_virt = qe_port->bd_virt +
++		L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize);
++	qe_port->tx_cur = qe_port->tx_bd_base;
++	bdp = qe_port->tx_bd_base;
++	for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) {
++		out_be16(&bdp->status, BD_SC_INTRPT);
++		out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
++		out_be16(&bdp->length, 0);
++		bd_virt += qe_port->tx_fifosize;
++		bdp++;
++	}
++
++	/* Loopback requires the preamble bit to be set on the first TX BD */
++#ifdef LOOPBACK
++	setbits16(&qe_port->tx_cur->status, BD_SC_P);
++#endif
++
++	out_be16(&bdp->status, BD_SC_WRAP | BD_SC_INTRPT);
++	out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
++	out_be16(&bdp->length, 0);
++}
++
++/*
++ * Initialize a UCC for UART.
++ *
++ * This function configures a given UCC to be used as a UART device. Basic
++ * UCC initialization is handled in qe_uart_request_port().  This function
++ * does all the UART-specific stuff.
++ */
++static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
++{
++	u32 cecr_subblock;
++	struct ucc_slow __iomem *uccp = qe_port->uccp;
++	struct ucc_uart_pram *uccup = qe_port->uccup;
++
++	unsigned int i;
++
++	/* First, disable TX and RX in the UCC */
++	ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
++
++	/* Program the UCC UART parameter RAM */
++	out_8(&uccup->common.rbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
++	out_8(&uccup->common.tbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
++	out_be16(&uccup->common.mrblr, qe_port->rx_fifosize);
++	out_be16(&uccup->maxidl, 0x10);
++	out_be16(&uccup->brkcr, 1);
++	out_be16(&uccup->parec, 0);
++	out_be16(&uccup->frmec, 0);
++	out_be16(&uccup->nosec, 0);
++	out_be16(&uccup->brkec, 0);
++	out_be16(&uccup->uaddr[0], 0);
++	out_be16(&uccup->uaddr[1], 0);
++	out_be16(&uccup->toseq, 0);
++	for (i = 0; i < 8; i++)
++		out_be16(&uccup->cchars[i], 0xC000);
++	out_be16(&uccup->rccm, 0xc0ff);
++
++	/* Configure the GUMR registers for UART */
++	if (soft_uart)
++		/* Soft-UART requires a 1X multiplier for TX */
++		clrsetbits_be32(&uccp->gumr_l,
++			UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
++			UCC_SLOW_GUMR_L_RDCR_MASK,
++			UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 |
++			UCC_SLOW_GUMR_L_RDCR_16);
++	else
++		clrsetbits_be32(&uccp->gumr_l,
++			UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
++			UCC_SLOW_GUMR_L_RDCR_MASK,
++			UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 |
++			UCC_SLOW_GUMR_L_RDCR_16);
++
++	clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW,
++		UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX);
++
++#ifdef LOOPBACK
++	clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
++		UCC_SLOW_GUMR_L_DIAG_LOOP);
++	clrsetbits_be32(&uccp->gumr_h,
++		UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN,
++		UCC_SLOW_GUMR_H_CDS);
++#endif
++
++	/* Enable rx interrupts  and clear all pending events.  */
++	out_be16(&uccp->uccm, 0);
++	out_be16(&uccp->ucce, 0xffff);
++	out_be16(&uccp->udsr, 0x7e7e);
++
++	/* Initialize UPSMR */
++	out_be16(&uccp->upsmr, 0);
++
++	if (soft_uart) {
++		out_be16(&uccup->supsmr, 0x30);
++		out_be16(&uccup->res92, 0);
++		out_be32(&uccup->rx_state, 0);
++		out_be32(&uccup->rx_cnt, 0);
++		out_8(&uccup->rx_bitmark, 0);
++		out_8(&uccup->rx_length, 10);
++		out_be32(&uccup->dump_ptr, 0x4000);
++		out_8(&uccup->rx_temp_dlst_qe, 0);
++		out_be32(&uccup->rx_frame_rem, 0);
++		out_8(&uccup->rx_frame_rem_size, 0);
++		/* Soft-UART requires TX to be 1X */
++		out_8(&uccup->tx_mode,
++			UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1);
++		out_be16(&uccup->tx_state, 0);
++		out_8(&uccup->resD4, 0);
++		out_be16(&uccup->resD5, 0);
++
++		/* Set UART mode.
++		 * Enable receive and transmit.
++		 */
++
++		/* From the microcode errata:
++		 * 1.GUMR_L register, set mode=0010 (QMC).
++		 * 2.Set GUMR_H[17] bit. (UART/AHDLC mode).
++		 * 3.Set GUMR_H[19:20] (Transparent mode)
++		 * 4.Clear GUMR_H[26] (RFW)
++		 * ...
++		 * 6.Receiver must use 16x over sampling
++		 */
++		clrsetbits_be32(&uccp->gumr_l,
++			UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
++			UCC_SLOW_GUMR_L_RDCR_MASK,
++			UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 |
++			UCC_SLOW_GUMR_L_RDCR_16);
++
++		clrsetbits_be32(&uccp->gumr_h,
++			UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN,
++			UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX |
++			UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL);
++
++#ifdef LOOPBACK
++		clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
++				UCC_SLOW_GUMR_L_DIAG_LOOP);
++		clrbits32(&uccp->gumr_h, UCC_SLOW_GUMR_H_CTSP |
++			  UCC_SLOW_GUMR_H_CDS);
++#endif
++
++		cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num);
++		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
++			QE_CR_PROTOCOL_UNSPECIFIED, 0);
++	}
++}
++
++/*
++ * Initialize the port.
++ */
++static int qe_uart_startup(struct uart_port *port)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++	int ret;
++
++	/*
++	 * If we're using Soft-UART mode, then we need to make sure the
++	 * firmware has been uploaded first.
++	 */
++	if (soft_uart && !firmware_loaded) {
++		dev_err(port->dev, "Soft-UART firmware not uploaded\n");
++		return -ENODEV;
++	}
++
++	qe_uart_initbd(qe_port);
++	qe_uart_init_ucc(qe_port);
++
++	/* Install interrupt handler. */
++	ret = request_irq(port->irq, qe_uart_int, IRQF_SHARED, "ucc-uart",
++		qe_port);
++	if (ret) {
++		dev_err(port->dev, "could not claim IRQ %u\n", port->irq);
++		return ret;
++	}
++
++	/* Startup rx-int */
++	setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
++	ucc_slow_enable(qe_port->us_private, COMM_DIR_RX_AND_TX);
++
++	return 0;
++}
++
++/*
++ * Shutdown the port.
++ */
++static void qe_uart_shutdown(struct uart_port *port)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++	struct ucc_slow __iomem *uccp = qe_port->uccp;
++	unsigned int timeout = 20;
++
++	/* Disable RX and TX */
++
++	/* Wait for all the BDs marked sent */
++	while (!qe_uart_tx_empty(port)) {
++		if (!--timeout) {
++			dev_warn(port->dev, "shutdown timeout\n");
++			break;
++		}
++		set_current_state(TASK_UNINTERRUPTIBLE);
++		schedule_timeout(2);
++	}
++
++	if (qe_port->wait_closing) {
++		/* Wait a bit longer */
++		set_current_state(TASK_UNINTERRUPTIBLE);
++		schedule_timeout(qe_port->wait_closing);
++	}
++
++	/* Stop uarts */
++	ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
++	clrbits16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX);
++
++	/* Shut them really down and reinit buffer descriptors */
++	ucc_slow_graceful_stop_tx(qe_port->us_private);
++	qe_uart_initbd(qe_port);
++
++	free_irq(port->irq, qe_port);
++}
++
++/*
++ * Set the serial port parameters.
++ */
++static void qe_uart_set_termios(struct uart_port *port,
++				struct ktermios *termios, struct ktermios *old)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++	struct ucc_slow __iomem *uccp = qe_port->uccp;
++	unsigned int baud;
++	unsigned long flags;
++	u16 upsmr = in_be16(&uccp->upsmr);
++	struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
++	u16 supsmr = in_be16(&uccup->supsmr);
++	u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
++
++	/* Character length programmed into the mode register is the
++	 * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
++	 * 1 or 2 stop bits, minus 1.
++	 * The value 'bits' counts this for us.
++	 */
++
++	/* byte size */
++	upsmr &= UCC_UART_UPSMR_CL_MASK;
++	supsmr &= UCC_UART_SUPSMR_CL_MASK;
++
++	switch (termios->c_cflag & CSIZE) {
++	case CS5:
++		upsmr |= UCC_UART_UPSMR_CL_5;
++		supsmr |= UCC_UART_SUPSMR_CL_5;
++		char_length += 5;
++		break;
++	case CS6:
++		upsmr |= UCC_UART_UPSMR_CL_6;
++		supsmr |= UCC_UART_SUPSMR_CL_6;
++		char_length += 6;
++		break;
++	case CS7:
++		upsmr |= UCC_UART_UPSMR_CL_7;
++		supsmr |= UCC_UART_SUPSMR_CL_7;
++		char_length += 7;
++		break;
++	default:	/* case CS8 */
++		upsmr |= UCC_UART_UPSMR_CL_8;
++		supsmr |= UCC_UART_SUPSMR_CL_8;
++		char_length += 8;
++		break;
++	}
++
++	/* If CSTOPB is set, we want two stop bits */
++	if (termios->c_cflag & CSTOPB) {
++		upsmr |= UCC_UART_UPSMR_SL;
++		supsmr |= UCC_UART_SUPSMR_SL;
++		char_length++;  /* + SL */
++	}
++
++	if (termios->c_cflag & PARENB) {
++		upsmr |= UCC_UART_UPSMR_PEN;
++		supsmr |= UCC_UART_SUPSMR_PEN;
++		char_length++;  /* + PEN */
++
++		if (!(termios->c_cflag & PARODD)) {
++			upsmr &= ~(UCC_UART_UPSMR_RPM_MASK |
++				   UCC_UART_UPSMR_TPM_MASK);
++			upsmr |= UCC_UART_UPSMR_RPM_EVEN |
++				UCC_UART_UPSMR_TPM_EVEN;
++			supsmr &= ~(UCC_UART_SUPSMR_RPM_MASK |
++				    UCC_UART_SUPSMR_TPM_MASK);
++			supsmr |= UCC_UART_SUPSMR_RPM_EVEN |
++				UCC_UART_SUPSMR_TPM_EVEN;
++		}
++	}
++
++	/*
++	 * Set up parity check flag
++	 */
++	port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
++	if (termios->c_iflag & INPCK)
++		port->read_status_mask |= BD_SC_FR | BD_SC_PR;
++	if (termios->c_iflag & (BRKINT | PARMRK))
++		port->read_status_mask |= BD_SC_BR;
++
++	/*
++	 * Characters to ignore
++	 */
++	port->ignore_status_mask = 0;
++	if (termios->c_iflag & IGNPAR)
++		port->ignore_status_mask |= BD_SC_PR | BD_SC_FR;
++	if (termios->c_iflag & IGNBRK) {
++		port->ignore_status_mask |= BD_SC_BR;
++		/*
++		 * If we're ignore parity and break indicators, ignore
++		 * overruns too.  (For real raw support).
++		 */
++		if (termios->c_iflag & IGNPAR)
++			port->ignore_status_mask |= BD_SC_OV;
++	}
++	/*
++	 * !!! ignore all characters if CREAD is not set
++	 */
++	if ((termios->c_cflag & CREAD) == 0)
++		port->read_status_mask &= ~BD_SC_EMPTY;
++
++	baud = uart_get_baud_rate(port, termios, old, 0, 115200);
++
++	/* Do we really need a spinlock here? */
++	spin_lock_irqsave(&port->lock, flags);
++
++	out_be16(&uccp->upsmr, upsmr);
++	if (soft_uart) {
++		out_be16(&uccup->supsmr, supsmr);
++		out_8(&uccup->rx_length, char_length);
++
++		/* Soft-UART requires a 1X multiplier for TX */
++		qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
++		qe_setbrg(qe_port->us_info.tx_clock, baud, 1);
++	} else {
++		qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
++		qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
++	}
++
++	spin_unlock_irqrestore(&port->lock, flags);
++}
++
++/*
++ * Return a pointer to a string that describes what kind of port this is.
++ */
++static const char *qe_uart_type(struct uart_port *port)
++{
++	return "QE";
++}
++
++/*
++ * Allocate any memory and I/O resources required by the port.
++ */
++static int qe_uart_request_port(struct uart_port *port)
++{
++	int ret;
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++	struct ucc_slow_info *us_info = &qe_port->us_info;
++	struct ucc_slow_private *uccs;
++	unsigned int rx_size, tx_size;
++	void *bd_virt;
++	dma_addr_t bd_dma_addr = 0;
++
++	ret = ucc_slow_init(us_info, &uccs);
++	if (ret) {
++		dev_err(port->dev, "could not initialize UCC%u\n",
++		       qe_port->ucc_num);
++		return ret;
++	}
++
++	qe_port->us_private = uccs;
++	qe_port->uccp = uccs->us_regs;
++	qe_port->uccup = (struct ucc_uart_pram *) uccs->us_pram;
++	qe_port->rx_bd_base = uccs->rx_bd;
++	qe_port->tx_bd_base = uccs->tx_bd;
++
++	/*
++	 * Allocate the transmit and receive data buffers.
++	 */
++
++	rx_size = L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize);
++	tx_size = L1_CACHE_ALIGN(qe_port->tx_nrfifos * qe_port->tx_fifosize);
++
++	bd_virt = dma_alloc_coherent(NULL, rx_size + tx_size, &bd_dma_addr,
++		GFP_KERNEL);
++	if (!bd_virt) {
++		dev_err(port->dev, "could not allocate buffer descriptors\n");
++		return -ENOMEM;
++	}
++
++	qe_port->bd_virt = bd_virt;
++	qe_port->bd_dma_addr = bd_dma_addr;
++	qe_port->bd_size = rx_size + tx_size;
++
++	qe_port->rx_buf = bd_virt;
++	qe_port->tx_buf = qe_port->rx_buf + rx_size;
++
++	return 0;
++}
++
++/*
++ * Configure the port.
++ *
++ * We say we're a CPM-type port because that's mostly true.  Once the device
++ * is configured, this driver operates almost identically to the CPM serial
++ * driver.
++ */
++static void qe_uart_config_port(struct uart_port *port, int flags)
++{
++	if (flags & UART_CONFIG_TYPE) {
++		port->type = PORT_CPM;
++		qe_uart_request_port(port);
++	}
++}
++
++/*
++ * Release any memory and I/O resources that were allocated in
++ * qe_uart_request_port().
++ */
++static void qe_uart_release_port(struct uart_port *port)
++{
++	struct uart_qe_port *qe_port =
++		container_of(port, struct uart_qe_port, port);
++	struct ucc_slow_private *uccs = qe_port->us_private;
++
++	dma_free_coherent(NULL, qe_port->bd_size, qe_port->bd_virt,
++			  qe_port->bd_dma_addr);
++
++	ucc_slow_free(uccs);
++}
++
++/*
++ * Verify that the data in serial_struct is suitable for this device.
++ */
++static int qe_uart_verify_port(struct uart_port *port,
++			       struct serial_struct *ser)
++{
++	if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
++		return -EINVAL;
++
++	if (ser->irq < 0 || ser->irq >= NR_IRQS)
++		return -EINVAL;
++
++	if (ser->baud_base < 9600)
++		return -EINVAL;
++
++	return 0;
++}
++/* UART operations
++ *
++ * Details on these functions can be found in Documentation/serial/driver
++ */
++static struct uart_ops qe_uart_pops = {
++	.tx_empty       = qe_uart_tx_empty,
++	.set_mctrl      = qe_uart_set_mctrl,
++	.get_mctrl      = qe_uart_get_mctrl,
++	.stop_tx	= qe_uart_stop_tx,
++	.start_tx       = qe_uart_start_tx,
++	.stop_rx	= qe_uart_stop_rx,
++	.enable_ms      = qe_uart_enable_ms,
++	.break_ctl      = qe_uart_break_ctl,
++	.startup	= qe_uart_startup,
++	.shutdown       = qe_uart_shutdown,
++	.set_termios    = qe_uart_set_termios,
++	.type   	= qe_uart_type,
++	.release_port   = qe_uart_release_port,
++	.request_port   = qe_uart_request_port,
++	.config_port    = qe_uart_config_port,
++	.verify_port    = qe_uart_verify_port,
++};
++
++/*
++ * Obtain the SOC model number and revision level
++ *
++ * This function parses the device tree to obtain the SOC model.  It then
++ * reads the SVR register to the revision.
++ *
++ * The device tree stores the SOC model two different ways.
++ *
++ * The new way is:
++ *
++ *      	cpu at 0 {
++ *      		compatible = "PowerPC,8323";
++ *      		device_type = "cpu";
++ *      		...
++ *
++ *
++ * The old way is:
++ *      	 PowerPC,8323 at 0 {
++ *      		device_type = "cpu";
++ *      		...
++ *
++ * This code first checks the new way, and then the old way.
++ */
++static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
++{
++	struct device_node *np;
++	const char *soc_string;
++	unsigned int svr;
++	unsigned int soc;
++
++	/* Find the CPU node */
++	np = of_find_node_by_type(NULL, "cpu");
++	if (!np)
++		return 0;
++	/* Find the compatible property */
++	soc_string = of_get_property(np, "compatible", NULL);
++	if (!soc_string)
++		/* No compatible property, so try the name. */
++		soc_string = np->name;
++
++	/* Extract the SOC number from the "PowerPC," string */
++	if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
++		return 0;
++
++	/* Get the revision from the SVR */
++	svr = mfspr(SPRN_SVR);
++	*rev_h = (svr >> 4) & 0xf;
++	*rev_l = svr & 0xf;
++
++	return soc;
++}
++
++/*
++ * requst_firmware_nowait() callback function
++ *
++ * This function is called by the kernel when a firmware is made available,
++ * or if it times out waiting for the firmware.
++ */
++static void uart_firmware_cont(const struct firmware *fw, void *context)
++{
++	struct qe_firmware *firmware;
++	struct device *dev = context;
++	int ret;
++
++	if (!fw) {
++		dev_err(dev, "firmware not found\n");
++		return;
++	}
++
++	firmware = (struct qe_firmware *) fw->data;
++
++	if (firmware->header.length != fw->size) {
++		dev_err(dev, "invalid firmware\n");
++		return;
++	}
++
++	ret = qe_upload_firmware(firmware);
++	if (ret) {
++		dev_err(dev, "could not load firmware\n");
++		return;
++	}
++
++	firmware_loaded = 1;
++}
++
++static int ucc_uart_probe(struct of_device *ofdev,
++	const struct of_device_id *match)
++{
++	struct device_node *np = ofdev->node;
++	const unsigned int *iprop;      /* Integer OF properties */
++	const char *sprop;      /* String OF properties */
++	struct uart_qe_port *qe_port = NULL;
++	struct resource res;
++	int ret;
++
++	/*
++	 * Determine if we need Soft-UART mode
++	 */
++	if (of_find_property(np, "soft-uart", NULL)) {
++		dev_dbg(&ofdev->dev, "using Soft-UART mode\n");
++		soft_uart = 1;
++	}
++
++	/*
++	 * If we are using Soft-UART, determine if we need to upload the
++	 * firmware, too.
++	 */
++	if (soft_uart) {
++		struct qe_firmware_info *qe_fw_info;
++
++		qe_fw_info = qe_get_firmware_info();
++
++		/* Check if the firmware has been uploaded. */
++		if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) {
++			firmware_loaded = 1;
++		} else {
++			char filename[32];
++			unsigned int soc;
++			unsigned int rev_h;
++			unsigned int rev_l;
++
++			soc = soc_info(&rev_h, &rev_l);
++			if (!soc) {
++				dev_err(&ofdev->dev, "unknown CPU model\n");
++				return -ENXIO;
++			}
++			sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin",
++				soc, rev_h, rev_l);
++
++			dev_info(&ofdev->dev, "waiting for firmware %s\n",
++				filename);
++
++			/*
++			 * We call request_firmware_nowait instead of
++			 * request_firmware so that the driver can load and
++			 * initialize the ports without holding up the rest of
++			 * the kernel.  If hotplug support is enabled in the
++			 * kernel, then we use it.
++			 */
++			ret = request_firmware_nowait(THIS_MODULE,
++				FW_ACTION_HOTPLUG, filename, &ofdev->dev,
++				&ofdev->dev, uart_firmware_cont);
++			if (ret) {
++				dev_err(&ofdev->dev,
++					"could not load firmware %s\n",
++					filename);
++				return ret;
++			}
++		}
++	}
++
++	qe_port = kzalloc(sizeof(struct uart_qe_port), GFP_KERNEL);
++	if (!qe_port) {
++		dev_err(&ofdev->dev, "can't allocate QE port structure\n");
++		return -ENOMEM;
++	}
++
++	/* Search for IRQ and mapbase */
++	ret = of_address_to_resource(np, 0, &res);
++	if (ret) {
++		dev_err(&ofdev->dev, "missing 'reg' property in device tree\n");
++		kfree(qe_port);
++		return ret;
++	}
++	if (!res.start) {
++		dev_err(&ofdev->dev, "invalid 'reg' property in device tree\n");
++		kfree(qe_port);
++		return -EINVAL;
++	}
++	qe_port->port.mapbase = res.start;
++
++	/* Get the UCC number (device ID) */
++	/* UCCs are numbered 1-7 */
++	iprop = of_get_property(np, "device-id", NULL);
++	if (!iprop || (*iprop < 1) || (*iprop > UCC_MAX_NUM)) {
++		dev_err(&ofdev->dev,
++			"missing or invalid UCC specified in device tree\n");
++		kfree(qe_port);
++		return -ENODEV;
++	}
++	qe_port->ucc_num = *iprop - 1;
++
++	/*
++	 * In the future, we should not require the BRG to be specified in the
++	 * device tree.  If no clock-source is specified, then just pick a BRG
++	 * to use.  This requires a new QE library function that manages BRG
++	 * assignments.
++	 */
++
++	sprop = of_get_property(np, "rx-clock-name", NULL);
++	if (!sprop) {
++		dev_err(&ofdev->dev, "missing rx-clock-name in device tree\n");
++		kfree(qe_port);
++		return -ENODEV;
++	}
++
++	qe_port->us_info.rx_clock = qe_clock_source(sprop);
++	if ((qe_port->us_info.rx_clock < QE_BRG1) ||
++	    (qe_port->us_info.rx_clock > QE_BRG16)) {
++		dev_err(&ofdev->dev, "rx-clock-name must be a BRG for UART\n");
++		kfree(qe_port);
++		return -ENODEV;
++	}
++
++#ifdef LOOPBACK
++	/* In internal loopback mode, TX and RX must use the same clock */
++	qe_port->us_info.tx_clock = qe_port->us_info.rx_clock;
++#else
++	sprop = of_get_property(np, "tx-clock-name", NULL);
++	if (!sprop) {
++		dev_err(&ofdev->dev, "missing tx-clock-name in device tree\n");
++		kfree(qe_port);
++		return -ENODEV;
++	}
++	qe_port->us_info.tx_clock = qe_clock_source(sprop);
++#endif
++	if ((qe_port->us_info.tx_clock < QE_BRG1) ||
++	    (qe_port->us_info.tx_clock > QE_BRG16)) {
++		dev_err(&ofdev->dev, "tx-clock-name must be a BRG for UART\n");
++		kfree(qe_port);
++		return -ENODEV;
++	}
++
++	/* Get the port number, numbered 0-3 */
++	iprop = of_get_property(np, "port-number", NULL);
++	if (!iprop) {
++		dev_err(&ofdev->dev, "missing port-number in device tree\n");
++		kfree(qe_port);
++		return -EINVAL;
++	}
++	qe_port->port.line = *iprop;
++	if (qe_port->port.line >= UCC_MAX_UART) {
++		dev_err(&ofdev->dev, "port-number must be 0-%u\n",
++			UCC_MAX_UART - 1);
++		kfree(qe_port);
++		return -EINVAL;
++	}
++
++	qe_port->port.irq = irq_of_parse_and_map(np, 0);
++	if (qe_port->port.irq == NO_IRQ) {
++		dev_err(&ofdev->dev, "could not map IRQ for UCC%u\n",
++		       qe_port->ucc_num + 1);
++		kfree(qe_port);
++		return -EINVAL;
++	}
++
++	/*
++	 * Newer device trees have an "fsl,qe" compatible property for the QE
++	 * node, but we still need to support older device trees.
++	 */
++	np = of_find_compatible_node(NULL, NULL, "fsl,qe");
++	if (!np) {
++		np = of_find_node_by_type(NULL, "qe");
++		if (!np) {
++			dev_err(&ofdev->dev, "could not find 'qe' node\n");
++			kfree(qe_port);
++			return -EINVAL;
++		}
++	}
++
++	iprop = of_get_property(np, "brg-frequency", NULL);
++	if (!iprop) {
++		dev_err(&ofdev->dev,
++		       "missing brg-frequency in device tree\n");
++		kfree(qe_port);
++		return -EINVAL;
++	}
++
++	if (*iprop)
++		qe_port->port.uartclk = *iprop;
++	else {
++		/*
++		 * Older versions of U-Boot do not initialize the brg-frequency
++		 * property, so in this case we assume the BRG frequency is
++		 * half the QE bus frequency.
++		 */
++		iprop = of_get_property(np, "bus-frequency", NULL);
++		if (!iprop) {
++			dev_err(&ofdev->dev,
++				"missing QE bus-frequency in device tree\n");
++			kfree(qe_port);
++			return -EINVAL;
++		}
++		if (*iprop)
++			qe_port->port.uartclk = *iprop / 2;
++		else {
++			dev_err(&ofdev->dev,
++				"invalid QE bus-frequency in device tree\n");
++			kfree(qe_port);
++			return -EINVAL;
++		}
++	}
++
++	spin_lock_init(&qe_port->port.lock);
++	qe_port->np = np;
++	qe_port->port.dev = &ofdev->dev;
++	qe_port->port.ops = &qe_uart_pops;
++	qe_port->port.iotype = UPIO_MEM;
++
++	qe_port->tx_nrfifos = TX_NUM_FIFO;
++	qe_port->tx_fifosize = TX_BUF_SIZE;
++	qe_port->rx_nrfifos = RX_NUM_FIFO;
++	qe_port->rx_fifosize = RX_BUF_SIZE;
++
++	qe_port->wait_closing = UCC_WAIT_CLOSING;
++	qe_port->port.fifosize = 512;
++	qe_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
++
++	qe_port->us_info.ucc_num = qe_port->ucc_num;
++	qe_port->us_info.regs = (phys_addr_t) res.start;
++	qe_port->us_info.irq = qe_port->port.irq;
++
++	qe_port->us_info.rx_bd_ring_len = qe_port->rx_nrfifos;
++	qe_port->us_info.tx_bd_ring_len = qe_port->tx_nrfifos;
++
++	/* Make sure ucc_slow_init() initializes both TX and RX */
++	qe_port->us_info.init_tx = 1;
++	qe_port->us_info.init_rx = 1;
++
++	/* Add the port to the uart sub-system.  This will cause
++	 * qe_uart_config_port() to be called, so the us_info structure must
++	 * be initialized.
++	 */
++	ret = uart_add_one_port(&ucc_uart_driver, &qe_port->port);
++	if (ret) {
++		dev_err(&ofdev->dev, "could not add /dev/ttyQE%u\n",
++		       qe_port->port.line);
++		kfree(qe_port);
++		return ret;
++	}
++
++	dev_set_drvdata(&ofdev->dev, qe_port);
++
++	dev_info(&ofdev->dev, "UCC%u assigned to /dev/ttyQE%u\n",
++		qe_port->ucc_num + 1, qe_port->port.line);
++
++	/* Display the mknod command for this device */
++	dev_dbg(&ofdev->dev, "mknod command is 'mknod /dev/ttyQE%u c %u %u'\n",
++	       qe_port->port.line, SERIAL_QE_MAJOR,
++	       SERIAL_QE_MINOR + qe_port->port.line);
++
++	return 0;
++}
++
++static int ucc_uart_remove(struct of_device *ofdev)
++{
++	struct uart_qe_port *qe_port = dev_get_drvdata(&ofdev->dev);
++
++	dev_info(&ofdev->dev, "removing /dev/ttyQE%u\n", qe_port->port.line);
++
++	uart_remove_one_port(&ucc_uart_driver, &qe_port->port);
++
++	dev_set_drvdata(&ofdev->dev, NULL);
++	kfree(qe_port);
++
++	return 0;
++}
++
++static struct of_device_id ucc_uart_match[] = {
++	{
++		.type = "serial",
++		.compatible = "ucc_uart",
++	},
++	{},
++};
++MODULE_DEVICE_TABLE(of, ucc_uart_match);
++
++static struct of_platform_driver ucc_uart_of_driver = {
++	.owner  	= THIS_MODULE,
++	.name   	= "ucc_uart",
++	.match_table    = ucc_uart_match,
++	.probe  	= ucc_uart_probe,
++	.remove 	= ucc_uart_remove,
++};
++
++static int __init ucc_uart_init(void)
++{
++	int ret;
++
++	printk(KERN_INFO "Freescale QUICC Engine UART device driver\n");
++#ifdef LOOPBACK
++	printk(KERN_INFO "ucc-uart: Using loopback mode\n");
++#endif
++
++	ret = uart_register_driver(&ucc_uart_driver);
++	if (ret) {
++		printk(KERN_ERR "ucc-uart: could not register UART driver\n");
++		return ret;
++	}
++
++	ret = of_register_platform_driver(&ucc_uart_of_driver);
++	if (ret)
++		printk(KERN_ERR
++		       "ucc-uart: could not register platform driver\n");
++
++	return ret;
++}
++
++static void __exit ucc_uart_exit(void)
++{
++	printk(KERN_INFO
++	       "Freescale QUICC Engine UART device driver unloading\n");
++
++	of_unregister_platform_driver(&ucc_uart_of_driver);
++	uart_unregister_driver(&ucc_uart_driver);
++}
++
++module_init(ucc_uart_init);
++module_exit(ucc_uart_exit);
++
++MODULE_DESCRIPTION("Freescale QUICC Engine (QE) UART");
++MODULE_AUTHOR("Timur Tabi <timur at freescale.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_QE_MAJOR);
++
 diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
 index abf0504..aaaea81 100644
 --- a/drivers/spi/Kconfig
@@ -555515,6 +703823,113 @@
  	help
  	  This enables using a PXA2xx SSP port as a SPI master controller.
  	  The driver can be configured to use any SSP port and additional
+diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
+index 7051e6c..253ed56 100644
+--- a/drivers/spi/mpc52xx_psc_spi.c
++++ b/drivers/spi/mpc52xx_psc_spi.c
+@@ -330,75 +330,13 @@ static void mpc52xx_psc_spi_cleanup(struct spi_device *spi)
+ 
+ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
+ {
+-	struct mpc52xx_cdm __iomem *cdm;
+-	struct mpc52xx_gpio __iomem *gpio;
+ 	struct mpc52xx_psc __iomem *psc = mps->psc;
+-	u32 ul;
+ 	u32 mclken_div;
+ 	int ret = 0;
+ 
+-#if defined(CONFIG_PPC_MERGE)
+-	cdm = mpc52xx_find_and_map("mpc5200-cdm");
+-	gpio = mpc52xx_find_and_map("mpc5200-gpio");
+-#else
+-	cdm = ioremap(MPC52xx_PA(MPC52xx_CDM_OFFSET), MPC52xx_CDM_SIZE);
+-	gpio = ioremap(MPC52xx_PA(MPC52xx_GPIO_OFFSET), MPC52xx_GPIO_SIZE);
+-#endif
+-	if (!cdm || !gpio) {
+-		printk(KERN_ERR "Error mapping CDM/GPIO\n");
+-		ret = -EFAULT;
+-		goto unmap_regs;
+-	}
+-
+ 	/* default sysclk is 512MHz */
+-	mclken_div = 0x8000 |
+-		(((mps->sysclk ? mps->sysclk : 512000000) / MCLK) & 0x1FF);
+-
+-	switch (psc_id) {
+-	case 1:
+-		ul = in_be32(&gpio->port_config);
+-		ul &= 0xFFFFFFF8;
+-		ul |= 0x00000006;
+-		out_be32(&gpio->port_config, ul);
+-		out_be16(&cdm->mclken_div_psc1, mclken_div);
+-		ul = in_be32(&cdm->clk_enables);
+-		ul |= 0x00000020;
+-		out_be32(&cdm->clk_enables, ul);
+-		break;
+-	case 2:
+-		ul = in_be32(&gpio->port_config);
+-		ul &= 0xFFFFFF8F;
+-		ul |= 0x00000060;
+-		out_be32(&gpio->port_config, ul);
+-		out_be16(&cdm->mclken_div_psc2, mclken_div);
+-		ul = in_be32(&cdm->clk_enables);
+-		ul |= 0x00000040;
+-		out_be32(&cdm->clk_enables, ul);
+-		break;
+-	case 3:
+-		ul = in_be32(&gpio->port_config);
+-		ul &= 0xFFFFF0FF;
+-		ul |= 0x00000600;
+-		out_be32(&gpio->port_config, ul);
+-		out_be16(&cdm->mclken_div_psc3, mclken_div);
+-		ul = in_be32(&cdm->clk_enables);
+-		ul |= 0x00000080;
+-		out_be32(&cdm->clk_enables, ul);
+-		break;
+-	case 6:
+-		ul = in_be32(&gpio->port_config);
+-		ul &= 0xFF8FFFFF;
+-		ul |= 0x00700000;
+-		out_be32(&gpio->port_config, ul);
+-		out_be16(&cdm->mclken_div_psc6, mclken_div);
+-		ul = in_be32(&cdm->clk_enables);
+-		ul |= 0x00000010;
+-		out_be32(&cdm->clk_enables, ul);
+-		break;
+-	default:
+-		ret = -EINVAL;
+-		goto unmap_regs;
+-	}
++	mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK;
++	mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
+ 
+ 	/* Reset the PSC into a known state */
+ 	out_8(&psc->command, MPC52xx_PSC_RST_RX);
+@@ -422,12 +360,6 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
+ 
+ 	mps->bits_per_word = 8;
+ 
+-unmap_regs:
+-	if (cdm)
+-		iounmap(cdm);
+-	if (gpio)
+-		iounmap(gpio);
+-
+ 	return ret;
+ }
+ 
+@@ -623,8 +555,9 @@ static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)
+ }
+ 
+ static struct of_device_id mpc52xx_psc_spi_of_match[] = {
+-	{ .type = "spi", .compatible = "mpc5200-psc-spi", },
+-	{},
++	{ .compatible = "fsl,mpc5200-psc-spi", },
++	{ .compatible = "mpc5200-psc-spi", }, /* old */
++	{}
+ };
+ 
+ MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
 diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
 index 1c2ab54..eb817b8 100644
 --- a/drivers/spi/pxa2xx_spi.c
@@ -555883,6 +704298,23 @@
  	return master;
  }
  EXPORT_SYMBOL_GPL(spi_busnum_to_master);
+diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
+index 4580b9c..04f7cd9 100644
+--- a/drivers/spi/spi_mpc83xx.c
++++ b/drivers/spi/spi_mpc83xx.c
+@@ -436,11 +436,7 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
+ 	mpc83xx_spi->qe_mode = pdata->qe_mode;
+ 	mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
+ 	mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+-
+-	if (mpc83xx_spi->qe_mode)
+-		mpc83xx_spi->spibrg = pdata->sysclk / 2;
+-	else
+-		mpc83xx_spi->spibrg = pdata->sysclk;
++	mpc83xx_spi->spibrg = pdata->sysclk;
+ 
+ 	mpc83xx_spi->rx_shift = 0;
+ 	mpc83xx_spi->tx_shift = 0;
 diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
 index f145d8a..1a31f7a 100644
 --- a/drivers/ssb/b43_pci_bridge.c
@@ -556799,6 +705231,19 @@
  	.class = I2C_CLASS_HWMON,
  	.attach_adapter = isp1301_probe,
  	.detach_client = isp1301_detach,
+diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
+index 0c3e6b7..a672527 100644
+--- a/drivers/usb/host/ohci-ppc-of.c
++++ b/drivers/usb/host/ohci-ppc-of.c
+@@ -136,6 +136,8 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
+ 	ohci = hcd_to_ohci(hcd);
+ 	if (is_bigendian) {
+ 		ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
++		if (of_device_is_compatible(dn, "fsl,mpc5200-ohci"))
++			ohci->flags |= OHCI_QUIRK_FRAME_NO;
+ 		if (of_device_is_compatible(dn, "mpc5200-ohci"))
+ 			ohci->flags |= OHCI_QUIRK_FRAME_NO;
+ 	}
 diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
 index 23d2fe5..ff9a798 100644
 --- a/drivers/usb/host/ohci-pxa27x.c
@@ -556915,7 +705360,7 @@
  	}
  
 diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
-index 49ba6c0..178e8c2 100644
+index 49ba6c0..0db4886 100644
 --- a/drivers/usb/storage/isd200.c
 +++ b/drivers/usb/storage/isd200.c
 @@ -49,6 +49,7 @@
@@ -556953,14 +705398,14 @@
 +		sg_init_one(&info->sg, buff, bufflen);
 +
 +	srb->sc_data_direction = dir;
-+	srb->request_buffer = buff ? &info->sg : NULL;
-+	srb->request_bufflen = bufflen;
-+	srb->use_sg = buff ? 1 : 0;
++	srb->sdb.table.sgl = buff ? &info->sg : NULL;
++	srb->sdb.length = bufflen;
++	srb->sdb.table.nents = buff ? 1 : 0;
 +}
 +
 +static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen)
 +{
-+	srb->request_bufflen = bufflen;
++	srb->sdb.length = bufflen;
 +}
 +
  
@@ -557855,7 +706300,7 @@
  /**
   * Insmod parameters
 diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
-index 52dff40..899fc13 100644
+index 52dff40..afcdc69 100644
 --- a/drivers/watchdog/Kconfig
 +++ b/drivers/watchdog/Kconfig
 @@ -223,7 +223,7 @@ config DAVINCI_WATCHDOG
@@ -557867,6 +706312,15 @@
  	help
  	  Watchdog timer embedded into AT32AP700x devices. This will reboot
  	  your system when the timeout is reached.
+@@ -609,7 +609,7 @@ config SBC_EPX_C3_WATCHDOG
+ 
+ config INDYDOG
+ 	tristate "Indy/I2 Hardware Watchdog"
+-	depends on SGI_IP22
++	depends on SGI_HAS_INDYDOG
+ 	help
+ 	  Hardware driver for the Indy's/I2's watchdog. This is a
+ 	  watchdog timer that will reboot the machine after a 60 second
 @@ -639,6 +639,12 @@ config AR7_WDT
  	help
  	  Hardware driver for the TI AR7 Watchdog Timer.
@@ -561898,11 +710352,831 @@
  }
  
  core_initcall(debugfs_init);
+diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
+index 4675455..ff97ba9 100644
+--- a/fs/dlm/dir.c
++++ b/fs/dlm/dir.c
+@@ -49,7 +49,7 @@ static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
+ 	spin_unlock(&ls->ls_recover_list_lock);
+ 
+ 	if (!found)
+-		de = allocate_direntry(ls, len);
++		de = kzalloc(sizeof(struct dlm_direntry) + len, GFP_KERNEL);
+ 	return de;
+ }
+ 
+@@ -62,7 +62,7 @@ void dlm_clear_free_entries(struct dlm_ls *ls)
+ 		de = list_entry(ls->ls_recover_list.next, struct dlm_direntry,
+ 				list);
+ 		list_del(&de->list);
+-		free_direntry(de);
++		kfree(de);
+ 	}
+ 	spin_unlock(&ls->ls_recover_list_lock);
+ }
+@@ -171,7 +171,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
+ 	}
+ 
+ 	list_del(&de->list);
+-	free_direntry(de);
++	kfree(de);
+  out:
+ 	write_unlock(&ls->ls_dirtbl[bucket].lock);
+ }
+@@ -302,7 +302,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
+ 
+ 	write_unlock(&ls->ls_dirtbl[bucket].lock);
+ 
+-	de = allocate_direntry(ls, namelen);
++	de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_KERNEL);
+ 	if (!de)
+ 		return -ENOMEM;
+ 
+@@ -313,7 +313,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
+ 	write_lock(&ls->ls_dirtbl[bucket].lock);
+ 	tmp = search_bucket(ls, name, namelen, bucket);
+ 	if (tmp) {
+-		free_direntry(de);
++		kfree(de);
+ 		de = tmp;
+ 	} else {
+ 		list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
+@@ -329,49 +329,47 @@ int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
+ 	return get_entry(ls, nodeid, name, namelen, r_nodeid);
+ }
+ 
+-/* Copy the names of master rsb's into the buffer provided.
+-   Only select names whose dir node is the given nodeid. */
++static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len)
++{
++	struct dlm_rsb *r;
++
++	down_read(&ls->ls_root_sem);
++	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
++		if (len == r->res_length && !memcmp(name, r->res_name, len)) {
++			up_read(&ls->ls_root_sem);
++			return r;
++		}
++	}
++	up_read(&ls->ls_root_sem);
++	return NULL;
++}
++
++/* Find the rsb where we left off (or start again), then send rsb names
++   for rsb's we're master of and whose directory node matches the requesting
++   node.  inbuf is the rsb name last sent, inlen is the name's length */
+ 
+ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
+  			   char *outbuf, int outlen, int nodeid)
+ {
+ 	struct list_head *list;
+-	struct dlm_rsb *start_r = NULL, *r = NULL;
+-	int offset = 0, start_namelen, error, dir_nodeid;
+-	char *start_name;
++	struct dlm_rsb *r;
++	int offset = 0, dir_nodeid;
+ 	uint16_t be_namelen;
+ 
+-	/*
+-	 * Find the rsb where we left off (or start again)
+-	 */
+-
+-	start_namelen = inlen;
+-	start_name = inbuf;
+-
+-	if (start_namelen > 1) {
+-		/*
+-		 * We could also use a find_rsb_root() function here that
+-		 * searched the ls_root_list.
+-		 */
+-		error = dlm_find_rsb(ls, start_name, start_namelen, R_MASTER,
+-				     &start_r);
+-		DLM_ASSERT(!error && start_r,
+-			   printk("error %d\n", error););
+-		DLM_ASSERT(!list_empty(&start_r->res_root_list),
+-			   dlm_print_rsb(start_r););
+-		dlm_put_rsb(start_r);
+-	}
+-
+-	/*
+-	 * Send rsb names for rsb's we're master of and whose directory node
+-	 * matches the requesting node.
+-	 */
+-
+ 	down_read(&ls->ls_root_sem);
+-	if (start_r)
+-		list = start_r->res_root_list.next;
+-	else
++
++	if (inlen > 1) {
++		r = find_rsb_root(ls, inbuf, inlen);
++		if (!r) {
++			inbuf[inlen - 1] = '\0';
++			log_error(ls, "copy_master_names from %d start %d %s",
++				  nodeid, inlen, inbuf);
++			goto out;
++		}
++		list = r->res_root_list.next;
++	} else {
+ 		list = ls->ls_root_list.next;
++	}
+ 
+ 	for (offset = 0; list != &ls->ls_root_list; list = list->next) {
+ 		r = list_entry(list, struct dlm_rsb, res_root_list);
+diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
+index d2fc238..ec61bba 100644
+--- a/fs/dlm/dlm_internal.h
++++ b/fs/dlm/dlm_internal.h
+@@ -570,5 +570,21 @@ static inline int dlm_no_directory(struct dlm_ls *ls)
+ 	return (ls->ls_exflags & DLM_LSFL_NODIR) ? 1 : 0;
+ }
+ 
++int dlm_netlink_init(void);
++void dlm_netlink_exit(void);
++void dlm_timeout_warn(struct dlm_lkb *lkb);
++
++#ifdef CONFIG_DLM_DEBUG
++int dlm_register_debugfs(void);
++void dlm_unregister_debugfs(void);
++int dlm_create_debug_file(struct dlm_ls *ls);
++void dlm_delete_debug_file(struct dlm_ls *ls);
++#else
++static inline int dlm_register_debugfs(void) { return 0; }
++static inline void dlm_unregister_debugfs(void) { }
++static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
++static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
++#endif
++
+ #endif				/* __DLM_INTERNAL_DOT_H__ */
+ 
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index 3915b8e..ff4a198 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1,7 +1,7 @@
+ /******************************************************************************
+ *******************************************************************************
+ **
+-**  Copyright (C) 2005-2007 Red Hat, Inc.  All rights reserved.
++**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+ **
+ **  This copyrighted material is made available to anyone wishing to use,
+ **  modify, copy, or redistribute it subject to the terms and conditions
+@@ -88,7 +88,6 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ static int receive_extralen(struct dlm_message *ms);
+ static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
+ static void del_timeout(struct dlm_lkb *lkb);
+-void dlm_timeout_warn(struct dlm_lkb *lkb);
+ 
+ /*
+  * Lock compatibilty matrix - thanks Steve
+@@ -335,7 +334,7 @@ static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
+ {
+ 	struct dlm_rsb *r;
+ 
+-	r = allocate_rsb(ls, len);
++	r = dlm_allocate_rsb(ls, len);
+ 	if (!r)
+ 		return NULL;
+ 
+@@ -478,7 +477,7 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
+ 	error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
+ 	if (!error) {
+ 		write_unlock(&ls->ls_rsbtbl[bucket].lock);
+-		free_rsb(r);
++		dlm_free_rsb(r);
+ 		r = tmp;
+ 		goto out;
+ 	}
+@@ -490,12 +489,6 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
+ 	return error;
+ }
+ 
+-int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
+-		 unsigned int flags, struct dlm_rsb **r_ret)
+-{
+-	return find_rsb(ls, name, namelen, flags, r_ret);
+-}
+-
+ /* This is only called to add a reference when the code already holds
+    a valid reference to the rsb, so there's no need for locking. */
+ 
+@@ -519,7 +512,7 @@ static void toss_rsb(struct kref *kref)
+ 	list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
+ 	r->res_toss_time = jiffies;
+ 	if (r->res_lvbptr) {
+-		free_lvb(r->res_lvbptr);
++		dlm_free_lvb(r->res_lvbptr);
+ 		r->res_lvbptr = NULL;
+ 	}
+ }
+@@ -589,7 +582,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
+ 	uint32_t lkid = 0;
+ 	uint16_t bucket;
+ 
+-	lkb = allocate_lkb(ls);
++	lkb = dlm_allocate_lkb(ls);
+ 	if (!lkb)
+ 		return -ENOMEM;
+ 
+@@ -683,8 +676,8 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
+ 
+ 		/* for local/process lkbs, lvbptr points to caller's lksb */
+ 		if (lkb->lkb_lvbptr && is_master_copy(lkb))
+-			free_lvb(lkb->lkb_lvbptr);
+-		free_lkb(lkb);
++			dlm_free_lvb(lkb->lkb_lvbptr);
++		dlm_free_lkb(lkb);
+ 		return 1;
+ 	} else {
+ 		write_unlock(&ls->ls_lkbtbl[bucket].lock);
+@@ -988,7 +981,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
+ 
+ 			if (is_master(r))
+ 				dir_remove(r);
+-			free_rsb(r);
++			dlm_free_rsb(r);
+ 			count++;
+ 		} else {
+ 			write_unlock(&ls->ls_rsbtbl[b].lock);
+@@ -1171,7 +1164,7 @@ static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ 			return;
+ 
+ 		if (!r->res_lvbptr)
+-			r->res_lvbptr = allocate_lvb(r->res_ls);
++			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
+ 
+ 		if (!r->res_lvbptr)
+ 			return;
+@@ -1203,7 +1196,7 @@ static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ 		return;
+ 
+ 	if (!r->res_lvbptr)
+-		r->res_lvbptr = allocate_lvb(r->res_ls);
++		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
+ 
+ 	if (!r->res_lvbptr)
+ 		return;
+@@ -1852,7 +1845,7 @@ static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ {
+ 	struct dlm_ls *ls = r->res_ls;
+-	int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
++	int i, error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
+ 
+ 	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
+ 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
+@@ -1886,7 +1879,7 @@ static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ 		return 1;
+ 	}
+ 
+-	for (;;) {
++	for (i = 0; i < 2; i++) {
+ 		/* It's possible for dlm_scand to remove an old rsb for
+ 		   this same resource from the toss list, us to create
+ 		   a new one, look up the master locally, and find it
+@@ -1900,6 +1893,8 @@ static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ 		log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
+ 		schedule();
+ 	}
++	if (error && error != -EEXIST)
++		return error;
+ 
+ 	if (ret_nodeid == our_nodeid) {
+ 		r->res_first_lkid = 0;
+@@ -1941,8 +1936,11 @@ static void confirm_master(struct dlm_rsb *r, int error)
+ 		break;
+ 
+ 	case -EAGAIN:
+-		/* the remote master didn't queue our NOQUEUE request;
+-		   make a waiting lkb the first_lkid */
++	case -EBADR:
++	case -ENOTBLK:
++		/* the remote request failed and won't be retried (it was
++		   a NOQUEUE, or has been canceled/unlocked); make a waiting
++		   lkb the first_lkid */
+ 
+ 		r->res_first_lkid = 0;
+ 
+@@ -2108,17 +2106,18 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
+ 	/* an lkb may be waiting for an rsb lookup to complete where the
+ 	   lookup was initiated by another lock */
+ 
+-	if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
+-		if (!list_empty(&lkb->lkb_rsb_lookup)) {
++	if (!list_empty(&lkb->lkb_rsb_lookup)) {
++		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
+ 			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
+ 			list_del_init(&lkb->lkb_rsb_lookup);
+ 			queue_cast(lkb->lkb_resource, lkb,
+ 				   args->flags & DLM_LKF_CANCEL ?
+ 				   -DLM_ECANCEL : -DLM_EUNLOCK);
+ 			unhold_lkb(lkb); /* undoes create_lkb() */
+-			rv = -EBUSY;
+-			goto out;
+ 		}
++		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
++		rv = -EBUSY;
++		goto out;
+ 	}
+ 
+ 	/* cancel not allowed with another cancel/unlock in progress */
+@@ -2986,7 +2985,7 @@ static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ 
+ 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
+ 		if (!lkb->lkb_lvbptr)
+-			lkb->lkb_lvbptr = allocate_lvb(ls);
++			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
+ 		if (!lkb->lkb_lvbptr)
+ 			return -ENOMEM;
+ 		len = receive_extralen(ms);
+@@ -3006,11 +3005,9 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ 	lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
+ 	lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
+ 
+-	DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
+-
+ 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
+ 		/* lkb was just created so there won't be an lvb yet */
+-		lkb->lkb_lvbptr = allocate_lvb(ls);
++		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
+ 		if (!lkb->lkb_lvbptr)
+ 			return -ENOMEM;
+ 	}
+@@ -3021,16 +3018,6 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ 				struct dlm_message *ms)
+ {
+-	if (lkb->lkb_nodeid != ms->m_header.h_nodeid) {
+-		log_error(ls, "convert_args nodeid %d %d lkid %x %x",
+-			  lkb->lkb_nodeid, ms->m_header.h_nodeid,
+-			  lkb->lkb_id, lkb->lkb_remid);
+-		return -EINVAL;
+-	}
+-
+-	if (!is_master_copy(lkb))
+-		return -EINVAL;
+-
+ 	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
+ 		return -EBUSY;
+ 
+@@ -3046,8 +3033,6 @@ static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ 			       struct dlm_message *ms)
+ {
+-	if (!is_master_copy(lkb))
+-		return -EINVAL;
+ 	if (receive_lvb(ls, lkb, ms))
+ 		return -ENOMEM;
+ 	return 0;
+@@ -3063,6 +3048,50 @@ static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
+ 	lkb->lkb_remid = ms->m_lkid;
+ }
+ 
++/* This is called after the rsb is locked so that we can safely inspect
++   fields in the lkb. */
++
++static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
++{
++	int from = ms->m_header.h_nodeid;
++	int error = 0;
++
++	switch (ms->m_type) {
++	case DLM_MSG_CONVERT:
++	case DLM_MSG_UNLOCK:
++	case DLM_MSG_CANCEL:
++		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
++			error = -EINVAL;
++		break;
++
++	case DLM_MSG_CONVERT_REPLY:
++	case DLM_MSG_UNLOCK_REPLY:
++	case DLM_MSG_CANCEL_REPLY:
++	case DLM_MSG_GRANT:
++	case DLM_MSG_BAST:
++		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
++			error = -EINVAL;
++		break;
++
++	case DLM_MSG_REQUEST_REPLY:
++		if (!is_process_copy(lkb))
++			error = -EINVAL;
++		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
++			error = -EINVAL;
++		break;
++
++	default:
++		error = -EINVAL;
++	}
++
++	if (error)
++		log_error(lkb->lkb_resource->res_ls,
++			  "ignore invalid message %d from %d %x %x %x %d",
++			  ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
++			  lkb->lkb_flags, lkb->lkb_nodeid);
++	return error;
++}
++
+ static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
+ {
+ 	struct dlm_lkb *lkb;
+@@ -3124,17 +3153,21 @@ static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
++
+ 	receive_flags(lkb, ms);
+ 	error = receive_convert_args(ls, lkb, ms);
+ 	if (error)
+-		goto out;
++		goto out_reply;
+ 	reply = !down_conversion(lkb);
+ 
+ 	error = do_convert(r, lkb);
+- out:
++ out_reply:
+ 	if (reply)
+ 		send_convert_reply(r, lkb, error);
+-
++ out:
+ 	unlock_rsb(r);
+ 	put_rsb(r);
+ 	dlm_put_lkb(lkb);
+@@ -3160,15 +3193,19 @@ static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
++
+ 	receive_flags(lkb, ms);
+ 	error = receive_unlock_args(ls, lkb, ms);
+ 	if (error)
+-		goto out;
++		goto out_reply;
+ 
+ 	error = do_unlock(r, lkb);
+- out:
++ out_reply:
+ 	send_unlock_reply(r, lkb, error);
+-
++ out:
+ 	unlock_rsb(r);
+ 	put_rsb(r);
+ 	dlm_put_lkb(lkb);
+@@ -3196,9 +3233,13 @@ static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
++
+ 	error = do_cancel(r, lkb);
+ 	send_cancel_reply(r, lkb, error);
+-
++ out:
+ 	unlock_rsb(r);
+ 	put_rsb(r);
+ 	dlm_put_lkb(lkb);
+@@ -3217,22 +3258,26 @@ static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
+ 
+ 	error = find_lkb(ls, ms->m_remid, &lkb);
+ 	if (error) {
+-		log_error(ls, "receive_grant no lkb");
++		log_debug(ls, "receive_grant from %d no lkb %x",
++			  ms->m_header.h_nodeid, ms->m_remid);
+ 		return;
+ 	}
+-	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+ 
+ 	r = lkb->lkb_resource;
+ 
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
++
+ 	receive_flags_reply(lkb, ms);
+ 	if (is_altmode(lkb))
+ 		munge_altmode(lkb, ms);
+ 	grant_lock_pc(r, lkb, ms);
+ 	queue_cast(r, lkb, 0);
+-
++ out:
+ 	unlock_rsb(r);
+ 	put_rsb(r);
+ 	dlm_put_lkb(lkb);
+@@ -3246,18 +3291,22 @@ static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
+ 
+ 	error = find_lkb(ls, ms->m_remid, &lkb);
+ 	if (error) {
+-		log_error(ls, "receive_bast no lkb");
++		log_debug(ls, "receive_bast from %d no lkb %x",
++			  ms->m_header.h_nodeid, ms->m_remid);
+ 		return;
+ 	}
+-	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+ 
+ 	r = lkb->lkb_resource;
+ 
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
+-	queue_bast(r, lkb, ms->m_bastmode);
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
+ 
++	queue_bast(r, lkb, ms->m_bastmode);
++ out:
+ 	unlock_rsb(r);
+ 	put_rsb(r);
+ 	dlm_put_lkb(lkb);
+@@ -3323,15 +3372,19 @@ static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ 
+ 	error = find_lkb(ls, ms->m_remid, &lkb);
+ 	if (error) {
+-		log_error(ls, "receive_request_reply no lkb");
++		log_debug(ls, "receive_request_reply from %d no lkb %x",
++			  ms->m_header.h_nodeid, ms->m_remid);
+ 		return;
+ 	}
+-	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+ 
+ 	r = lkb->lkb_resource;
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
++
+ 	mstype = lkb->lkb_wait_type;
+ 	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
+ 	if (error)
+@@ -3383,6 +3436,7 @@ static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ 		if (is_overlap(lkb)) {
+ 			/* we'll ignore error in cancel/unlock reply */
+ 			queue_cast_overlap(r, lkb);
++			confirm_master(r, result);
+ 			unhold_lkb(lkb); /* undoes create_lkb() */
+ 		} else
+ 			_request_lock(r, lkb);
+@@ -3463,6 +3517,10 @@ static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
++
+ 	/* stub reply can happen with waiters_mutex held */
+ 	error = remove_from_waiters_ms(lkb, ms);
+ 	if (error)
+@@ -3481,10 +3539,10 @@ static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ 
+ 	error = find_lkb(ls, ms->m_remid, &lkb);
+ 	if (error) {
+-		log_error(ls, "receive_convert_reply no lkb");
++		log_debug(ls, "receive_convert_reply from %d no lkb %x",
++			  ms->m_header.h_nodeid, ms->m_remid);
+ 		return;
+ 	}
+-	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+ 
+ 	_receive_convert_reply(lkb, ms);
+ 	dlm_put_lkb(lkb);
+@@ -3498,6 +3556,10 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
++
+ 	/* stub reply can happen with waiters_mutex held */
+ 	error = remove_from_waiters_ms(lkb, ms);
+ 	if (error)
+@@ -3529,10 +3591,10 @@ static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ 
+ 	error = find_lkb(ls, ms->m_remid, &lkb);
+ 	if (error) {
+-		log_error(ls, "receive_unlock_reply no lkb");
++		log_debug(ls, "receive_unlock_reply from %d no lkb %x",
++			  ms->m_header.h_nodeid, ms->m_remid);
+ 		return;
+ 	}
+-	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+ 
+ 	_receive_unlock_reply(lkb, ms);
+ 	dlm_put_lkb(lkb);
+@@ -3546,6 +3608,10 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+ 	hold_rsb(r);
+ 	lock_rsb(r);
+ 
++	error = validate_message(lkb, ms);
++	if (error)
++		goto out;
++
+ 	/* stub reply can happen with waiters_mutex held */
+ 	error = remove_from_waiters_ms(lkb, ms);
+ 	if (error)
+@@ -3577,10 +3643,10 @@ static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ 
+ 	error = find_lkb(ls, ms->m_remid, &lkb);
+ 	if (error) {
+-		log_error(ls, "receive_cancel_reply no lkb");
++		log_debug(ls, "receive_cancel_reply from %d no lkb %x",
++			  ms->m_header.h_nodeid, ms->m_remid);
+ 		return;
+ 	}
+-	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+ 
+ 	_receive_cancel_reply(lkb, ms);
+ 	dlm_put_lkb(lkb);
+@@ -3640,6 +3706,13 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ 
+ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
+ {
++	if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
++		log_debug(ls, "ignore non-member message %d from %d %x %x %d",
++			  ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
++			  ms->m_remid, ms->m_result);
++		return;
++	}
++
+ 	switch (ms->m_type) {
+ 
+ 	/* messages sent to a master node */
+@@ -3778,8 +3851,9 @@ void dlm_receive_buffer(struct dlm_header *hd, int nodeid)
+ 
+ 	ls = dlm_find_lockspace_global(hd->h_lockspace);
+ 	if (!ls) {
+-		log_print("invalid h_lockspace %x from %d cmd %d type %d",
+-			  hd->h_lockspace, nodeid, hd->h_cmd, type);
++		if (dlm_config.ci_log_debug)
++			log_print("invalid lockspace %x from %d cmd %d type %d",
++				  hd->h_lockspace, nodeid, hd->h_cmd, type);
+ 
+ 		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
+ 			dlm_send_ls_not_ready(nodeid, rc);
+@@ -3806,6 +3880,7 @@ static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
+ 		ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
+ 		ls->ls_stub_ms.m_result = -EINPROGRESS;
+ 		ls->ls_stub_ms.m_flags = lkb->lkb_flags;
++		ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
+ 		_receive_convert_reply(lkb, &ls->ls_stub_ms);
+ 
+ 		/* Same special case as in receive_rcom_lock_args() */
+@@ -3847,6 +3922,7 @@ static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
+ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ {
+ 	struct dlm_lkb *lkb, *safe;
++	int wait_type, stub_unlock_result, stub_cancel_result;
+ 
+ 	mutex_lock(&ls->ls_waiters_mutex);
+ 
+@@ -3865,7 +3941,33 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ 		if (!waiter_needs_recovery(ls, lkb))
+ 			continue;
+ 
+-		switch (lkb->lkb_wait_type) {
++		wait_type = lkb->lkb_wait_type;
++		stub_unlock_result = -DLM_EUNLOCK;
++		stub_cancel_result = -DLM_ECANCEL;
++
++		/* Main reply may have been received leaving a zero wait_type,
++		   but a reply for the overlapping op may not have been
++		   received.  In that case we need to fake the appropriate
++		   reply for the overlap op. */
++
++		if (!wait_type) {
++			if (is_overlap_cancel(lkb)) {
++				wait_type = DLM_MSG_CANCEL;
++				if (lkb->lkb_grmode == DLM_LOCK_IV)
++					stub_cancel_result = 0;
++			}
++			if (is_overlap_unlock(lkb)) {
++				wait_type = DLM_MSG_UNLOCK;
++				if (lkb->lkb_grmode == DLM_LOCK_IV)
++					stub_unlock_result = -ENOENT;
++			}
++
++			log_debug(ls, "rwpre overlap %x %x %d %d %d",
++				  lkb->lkb_id, lkb->lkb_flags, wait_type,
++				  stub_cancel_result, stub_unlock_result);
++		}
++
++		switch (wait_type) {
+ 
+ 		case DLM_MSG_REQUEST:
+ 			lkb->lkb_flags |= DLM_IFL_RESEND;
+@@ -3878,8 +3980,9 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ 		case DLM_MSG_UNLOCK:
+ 			hold_lkb(lkb);
+ 			ls->ls_stub_ms.m_type = DLM_MSG_UNLOCK_REPLY;
+-			ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
++			ls->ls_stub_ms.m_result = stub_unlock_result;
+ 			ls->ls_stub_ms.m_flags = lkb->lkb_flags;
++			ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
+ 			_receive_unlock_reply(lkb, &ls->ls_stub_ms);
+ 			dlm_put_lkb(lkb);
+ 			break;
+@@ -3887,15 +3990,16 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ 		case DLM_MSG_CANCEL:
+ 			hold_lkb(lkb);
+ 			ls->ls_stub_ms.m_type = DLM_MSG_CANCEL_REPLY;
+-			ls->ls_stub_ms.m_result = -DLM_ECANCEL;
++			ls->ls_stub_ms.m_result = stub_cancel_result;
+ 			ls->ls_stub_ms.m_flags = lkb->lkb_flags;
++			ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
+ 			_receive_cancel_reply(lkb, &ls->ls_stub_ms);
+ 			dlm_put_lkb(lkb);
+ 			break;
+ 
+ 		default:
+-			log_error(ls, "invalid lkb wait_type %d",
+-				  lkb->lkb_wait_type);
++			log_error(ls, "invalid lkb wait_type %d %d",
++				  lkb->lkb_wait_type, wait_type);
+ 		}
+ 		schedule();
+ 	}
+@@ -4184,7 +4288,7 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ 	lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
+ 
+ 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
+-		lkb->lkb_lvbptr = allocate_lvb(ls);
++		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
+ 		if (!lkb->lkb_lvbptr)
+ 			return -ENOMEM;
+ 		lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
+@@ -4259,7 +4363,7 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+ 	put_rsb(r);
+  out:
+ 	if (error)
+-		log_print("recover_master_copy %d %x", error, rl->rl_lkid);
++		log_debug(ls, "recover_master_copy %d %x", error, rl->rl_lkid);
+ 	rl->rl_result = error;
+ 	return error;
+ }
+@@ -4342,7 +4446,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
+ 		}
+ 	}
+ 
+-	/* After ua is attached to lkb it will be freed by free_lkb().
++	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
+ 	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
+ 	   lock and that lkb_astparam is the dlm_user_args structure. */
+ 
+@@ -4679,6 +4783,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
+ 	}
+ 
+ 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
++		lkb->lkb_ast_type = 0;
+ 		list_del(&lkb->lkb_astqueue);
+ 		dlm_put_lkb(lkb);
+ 	}
+diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
+index ada0468..27b6ed3 100644
+--- a/fs/dlm/lock.h
++++ b/fs/dlm/lock.h
+@@ -19,8 +19,6 @@ void dlm_print_lkb(struct dlm_lkb *lkb);
+ void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms);
+ void dlm_receive_buffer(struct dlm_header *hd, int nodeid);
+ int dlm_modes_compat(int mode1, int mode2);
+-int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
+-	unsigned int flags, struct dlm_rsb **r_ret);
+ void dlm_put_rsb(struct dlm_rsb *r);
+ void dlm_hold_rsb(struct dlm_rsb *r);
+ int dlm_put_lkb(struct dlm_lkb *lkb);
 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
-index 6353a83..5c108c4 100644
+index 6353a83..b180fdc 100644
 --- a/fs/dlm/lockspace.c
 +++ b/fs/dlm/lockspace.c
-@@ -166,26 +166,7 @@ static struct kobj_type dlm_ktype = {
+@@ -24,14 +24,6 @@
+ #include "recover.h"
+ #include "requestqueue.h"
+ 
+-#ifdef CONFIG_DLM_DEBUG
+-int dlm_create_debug_file(struct dlm_ls *ls);
+-void dlm_delete_debug_file(struct dlm_ls *ls);
+-#else
+-static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
+-static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
+-#endif
+-
+ static int			ls_count;
+ static struct mutex		ls_lock;
+ static struct list_head		lslist;
+@@ -166,26 +158,7 @@ static struct kobj_type dlm_ktype = {
  	.release       = lockspace_kobj_release,
  };
  
@@ -561930,7 +711204,7 @@
  
  static int do_uevent(struct dlm_ls *ls, int in)
  {
-@@ -220,24 +201,22 @@ static int do_uevent(struct dlm_ls *ls, int in)
+@@ -220,24 +193,22 @@ static int do_uevent(struct dlm_ls *ls, int in)
  
  int dlm_lockspace_init(void)
  {
@@ -561962,7 +711236,7 @@
  }
  
  static int dlm_scand(void *data)
-@@ -549,13 +528,12 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
+@@ -549,13 +520,12 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
  		goto out_delist;
  	}
  
@@ -561980,7 +711254,7 @@
  
  	/* let kobject handle freeing of ls if there's an error */
  	do_unreg = 1;
-@@ -601,7 +579,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
+@@ -601,7 +571,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
  	kfree(ls->ls_rsbtbl);
   out_lsfree:
  	if (do_unreg)
@@ -561989,7 +711263,37 @@
  	else
  		kfree(ls);
   out:
-@@ -750,7 +728,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
+@@ -706,9 +676,9 @@ static int release_lockspace(struct dlm_ls *ls, int force)
+ 			dlm_del_ast(lkb);
+ 
+ 			if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
+-				free_lvb(lkb->lkb_lvbptr);
++				dlm_free_lvb(lkb->lkb_lvbptr);
+ 
+-			free_lkb(lkb);
++			dlm_free_lkb(lkb);
+ 		}
+ 	}
+ 	dlm_astd_resume();
+@@ -726,7 +696,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
+ 					 res_hashchain);
+ 
+ 			list_del(&rsb->res_hashchain);
+-			free_rsb(rsb);
++			dlm_free_rsb(rsb);
+ 		}
+ 
+ 		head = &ls->ls_rsbtbl[i].toss;
+@@ -734,7 +704,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
+ 			rsb = list_entry(head->next, struct dlm_rsb,
+ 					 res_hashchain);
+ 			list_del(&rsb->res_hashchain);
+-			free_rsb(rsb);
++			dlm_free_rsb(rsb);
+ 		}
+ 	}
+ 
+@@ -750,7 +720,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
  	dlm_clear_members(ls);
  	dlm_clear_members_gone(ls);
  	kfree(ls->ls_node_array);
@@ -561998,6 +711302,685 @@
  	/* The ls structure will be freed when the kobject is done with */
  
  	mutex_lock(&ls_lock);
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index e9923ca..7c1e5e5 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -864,7 +864,7 @@ static void sctp_init_assoc(struct connection *con)
+ static void tcp_connect_to_sock(struct connection *con)
+ {
+ 	int result = -EHOSTUNREACH;
+-	struct sockaddr_storage saddr;
++	struct sockaddr_storage saddr, src_addr;
+ 	int addr_len;
+ 	struct socket *sock;
+ 
+@@ -898,6 +898,17 @@ static void tcp_connect_to_sock(struct connection *con)
+ 	con->connect_action = tcp_connect_to_sock;
+ 	add_sock(sock, con);
+ 
++	/* Bind to our cluster-known address connecting to avoid
++	   routing problems */
++	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
++	make_sockaddr(&src_addr, 0, &addr_len);
++	result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
++				 addr_len);
++	if (result < 0) {
++		log_print("could not bind for connect: %d", result);
++		/* This *may* not indicate a critical error */
++	}
++
+ 	make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
+ 
+ 	log_print("connecting to %d", con->nodeid);
+@@ -1426,6 +1437,8 @@ void dlm_lowcomms_stop(void)
+ 		con = __nodeid2con(i, 0);
+ 		if (con) {
+ 			close_connection(con, true);
++			if (con->othercon)
++				kmem_cache_free(con_cache, con->othercon);
+ 			kmem_cache_free(con_cache, con);
+ 		}
+ 	}
+diff --git a/fs/dlm/main.c b/fs/dlm/main.c
+index eca2907..58487fb 100644
+--- a/fs/dlm/main.c
++++ b/fs/dlm/main.c
+@@ -18,16 +18,6 @@
+ #include "memory.h"
+ #include "config.h"
+ 
+-#ifdef CONFIG_DLM_DEBUG
+-int dlm_register_debugfs(void);
+-void dlm_unregister_debugfs(void);
+-#else
+-static inline int dlm_register_debugfs(void) { return 0; }
+-static inline void dlm_unregister_debugfs(void) { }
+-#endif
+-int dlm_netlink_init(void);
+-void dlm_netlink_exit(void);
+-
+ static int __init init_dlm(void)
+ {
+ 	int error;
+diff --git a/fs/dlm/member.c b/fs/dlm/member.c
+index e9cdcab..fa17f5a 100644
+--- a/fs/dlm/member.c
++++ b/fs/dlm/member.c
+@@ -1,7 +1,7 @@
+ /******************************************************************************
+ *******************************************************************************
+ **
+-**  Copyright (C) 2005-2007 Red Hat, Inc.  All rights reserved.
++**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+ **
+ **  This copyrighted material is made available to anyone wishing to use,
+ **  modify, copy, or redistribute it subject to the terms and conditions
+@@ -70,7 +70,7 @@ static void dlm_remove_member(struct dlm_ls *ls, struct dlm_member *memb)
+ 	ls->ls_num_nodes--;
+ }
+ 
+-static int dlm_is_member(struct dlm_ls *ls, int nodeid)
++int dlm_is_member(struct dlm_ls *ls, int nodeid)
+ {
+ 	struct dlm_member *memb;
+ 
+diff --git a/fs/dlm/member.h b/fs/dlm/member.h
+index 927c08c..7a26fca 100644
+--- a/fs/dlm/member.h
++++ b/fs/dlm/member.h
+@@ -1,7 +1,7 @@
+ /******************************************************************************
+ *******************************************************************************
+ **
+-**  Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
++**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+ **
+ **  This copyrighted material is made available to anyone wishing to use,
+ **  modify, copy, or redistribute it subject to the terms and conditions
+@@ -19,6 +19,7 @@ void dlm_clear_members(struct dlm_ls *ls);
+ void dlm_clear_members_gone(struct dlm_ls *ls);
+ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out);
+ int dlm_is_removed(struct dlm_ls *ls, int nodeid);
++int dlm_is_member(struct dlm_ls *ls, int nodeid);
+ 
+ #endif                          /* __MEMBER_DOT_H__ */
+ 
+diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
+index ecf0e5c..f778386 100644
+--- a/fs/dlm/memory.c
++++ b/fs/dlm/memory.c
+@@ -2,7 +2,7 @@
+ *******************************************************************************
+ **
+ **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+-**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
++**  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+ **
+ **  This copyrighted material is made available to anyone wishing to use,
+ **  modify, copy, or redistribute it subject to the terms and conditions
+@@ -35,7 +35,7 @@ void dlm_memory_exit(void)
+ 		kmem_cache_destroy(lkb_cache);
+ }
+ 
+-char *allocate_lvb(struct dlm_ls *ls)
++char *dlm_allocate_lvb(struct dlm_ls *ls)
+ {
+ 	char *p;
+ 
+@@ -43,7 +43,7 @@ char *allocate_lvb(struct dlm_ls *ls)
+ 	return p;
+ }
+ 
+-void free_lvb(char *p)
++void dlm_free_lvb(char *p)
+ {
+ 	kfree(p);
+ }
+@@ -51,7 +51,7 @@ void free_lvb(char *p)
+ /* FIXME: have some minimal space built-in to rsb for the name and
+    kmalloc a separate name if needed, like dentries are done */
+ 
+-struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen)
++struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen)
+ {
+ 	struct dlm_rsb *r;
+ 
+@@ -61,14 +61,14 @@ struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen)
+ 	return r;
+ }
+ 
+-void free_rsb(struct dlm_rsb *r)
++void dlm_free_rsb(struct dlm_rsb *r)
+ {
+ 	if (r->res_lvbptr)
+-		free_lvb(r->res_lvbptr);
++		dlm_free_lvb(r->res_lvbptr);
+ 	kfree(r);
+ }
+ 
+-struct dlm_lkb *allocate_lkb(struct dlm_ls *ls)
++struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
+ {
+ 	struct dlm_lkb *lkb;
+ 
+@@ -76,7 +76,7 @@ struct dlm_lkb *allocate_lkb(struct dlm_ls *ls)
+ 	return lkb;
+ }
+ 
+-void free_lkb(struct dlm_lkb *lkb)
++void dlm_free_lkb(struct dlm_lkb *lkb)
+ {
+ 	if (lkb->lkb_flags & DLM_IFL_USER) {
+ 		struct dlm_user_args *ua;
+@@ -90,19 +90,3 @@ void free_lkb(struct dlm_lkb *lkb)
+ 	kmem_cache_free(lkb_cache, lkb);
+ }
+ 
+-struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen)
+-{
+-	struct dlm_direntry *de;
+-
+-	DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,
+-		   printk("namelen = %d\n", namelen););
+-
+-	de = kzalloc(sizeof(*de) + namelen, GFP_KERNEL);
+-	return de;
+-}
+-
+-void free_direntry(struct dlm_direntry *de)
+-{
+-	kfree(de);
+-}
+-
+diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h
+index 6ead158..485fb29 100644
+--- a/fs/dlm/memory.h
++++ b/fs/dlm/memory.h
+@@ -2,7 +2,7 @@
+ *******************************************************************************
+ **
+ **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+-**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
++**  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+ **
+ **  This copyrighted material is made available to anyone wishing to use,
+ **  modify, copy, or redistribute it subject to the terms and conditions
+@@ -16,14 +16,12 @@
+ 
+ int dlm_memory_init(void);
+ void dlm_memory_exit(void);
+-struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen);
+-void free_rsb(struct dlm_rsb *r);
+-struct dlm_lkb *allocate_lkb(struct dlm_ls *ls);
+-void free_lkb(struct dlm_lkb *l);
+-struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen);
+-void free_direntry(struct dlm_direntry *de);
+-char *allocate_lvb(struct dlm_ls *ls);
+-void free_lvb(char *l);
++struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen);
++void dlm_free_rsb(struct dlm_rsb *r);
++struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls);
++void dlm_free_lkb(struct dlm_lkb *l);
++char *dlm_allocate_lvb(struct dlm_ls *ls);
++void dlm_free_lvb(char *l);
+ 
+ #endif		/* __MEMORY_DOT_H__ */
+ 
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index f8c69dd..e69926e 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -2,7 +2,7 @@
+ *******************************************************************************
+ **
+ **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+-**  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
++**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
+ **
+ **  This copyrighted material is made available to anyone wishing to use,
+ **  modify, copy, or redistribute it subject to the terms and conditions
+@@ -58,8 +58,12 @@ static void copy_from_cb(void *dst, const void *base, unsigned offset,
+ int dlm_process_incoming_buffer(int nodeid, const void *base,
+ 				unsigned offset, unsigned len, unsigned limit)
+ {
+-	unsigned char __tmp[DLM_INBUF_LEN];
+-	struct dlm_header *msg = (struct dlm_header *) __tmp;
++	union {
++		unsigned char __buf[DLM_INBUF_LEN];
++		/* this is to force proper alignment on some arches */
++		struct dlm_header dlm;
++	} __tmp;
++	struct dlm_header *msg = &__tmp.dlm;
+ 	int ret = 0;
+ 	int err = 0;
+ 	uint16_t msglen;
+@@ -100,8 +104,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
+ 		   in the buffer on the stack (which should work for most
+ 		   ordinary messages). */
+ 
+-		if (msglen > sizeof(__tmp) &&
+-		    msg == (struct dlm_header *) __tmp) {
++		if (msglen > DLM_INBUF_LEN && msg == &__tmp.dlm) {
+ 			msg = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
+ 			if (msg == NULL)
+ 				return ret;
+@@ -119,7 +122,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
+ 		dlm_receive_buffer(msg, nodeid);
+ 	}
+ 
+-	if (msg != (struct dlm_header *) __tmp)
++	if (msg != &__tmp.dlm)
+ 		kfree(msg);
+ 
+ 	return err ? err : ret;
+diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
+index ae2fd97..026824c 100644
+--- a/fs/dlm/rcom.c
++++ b/fs/dlm/rcom.c
+@@ -2,7 +2,7 @@
+ *******************************************************************************
+ **
+ **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+-**  Copyright (C) 2005-2007 Red Hat, Inc.  All rights reserved.
++**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+ **
+ **  This copyrighted material is made available to anyone wishing to use,
+ **  modify, copy, or redistribute it subject to the terms and conditions
+@@ -197,11 +197,6 @@ static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ 	spin_unlock(&ls->ls_rcom_spin);
+ }
+ 
+-static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+-{
+-	receive_sync_reply(ls, rc_in);
+-}
+-
+ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
+ {
+ 	struct dlm_rcom *rc;
+@@ -254,11 +249,6 @@ static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ 	send_rcom(ls, mh, rc);
+ }
+ 
+-static void receive_rcom_names_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+-{
+-	receive_sync_reply(ls, rc_in);
+-}
+-
+ int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
+ {
+ 	struct dlm_rcom *rc;
+@@ -381,11 +371,6 @@ static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ 	send_rcom(ls, mh, rc);
+ }
+ 
+-static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+-{
+-	dlm_recover_process_copy(ls, rc_in);
+-}
+-
+ /* If the lockspace doesn't exist then still send a status message
+    back; it's possible that it just doesn't have its global_id yet. */
+ 
+@@ -481,11 +466,11 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ 		break;
+ 
+ 	case DLM_RCOM_STATUS_REPLY:
+-		receive_rcom_status_reply(ls, rc);
++		receive_sync_reply(ls, rc);
+ 		break;
+ 
+ 	case DLM_RCOM_NAMES_REPLY:
+-		receive_rcom_names_reply(ls, rc);
++		receive_sync_reply(ls, rc);
+ 		break;
+ 
+ 	case DLM_RCOM_LOOKUP_REPLY:
+@@ -493,11 +478,11 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ 		break;
+ 
+ 	case DLM_RCOM_LOCK_REPLY:
+-		receive_rcom_lock_reply(ls, rc);
++		dlm_recover_process_copy(ls, rc);
+ 		break;
+ 
+ 	default:
+-		DLM_ASSERT(0, printk("rc_type=%x\n", rc->rc_type););
++		log_error(ls, "receive_rcom bad type %d", rc->rc_type);
+ 	}
+  out:
+ 	return;
+diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
+index c2cc769..df075dc 100644
+--- a/fs/dlm/recover.c
++++ b/fs/dlm/recover.c
+@@ -629,7 +629,7 @@ static void recover_lvb(struct dlm_rsb *r)
+ 		goto out;
+ 
+ 	if (!r->res_lvbptr) {
+-		r->res_lvbptr = allocate_lvb(r->res_ls);
++		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
+ 		if (!r->res_lvbptr)
+ 			goto out;
+ 	}
+@@ -731,6 +731,20 @@ int dlm_create_root_list(struct dlm_ls *ls)
+ 			list_add(&r->res_root_list, &ls->ls_root_list);
+ 			dlm_hold_rsb(r);
+ 		}
++
++		/* If we're using a directory, add tossed rsbs to the root
++		   list; they'll have entries created in the new directory,
++		   but no other recovery steps should do anything with them. */
++
++		if (dlm_no_directory(ls)) {
++			read_unlock(&ls->ls_rsbtbl[i].lock);
++			continue;
++		}
++
++		list_for_each_entry(r, &ls->ls_rsbtbl[i].toss, res_hashchain) {
++			list_add(&r->res_root_list, &ls->ls_root_list);
++			dlm_hold_rsb(r);
++		}
+ 		read_unlock(&ls->ls_rsbtbl[i].lock);
+ 	}
+  out:
+@@ -750,6 +764,11 @@ void dlm_release_root_list(struct dlm_ls *ls)
+ 	up_write(&ls->ls_root_sem);
+ }
+ 
++/* If not using a directory, clear the entire toss list, there's no benefit to
++   caching the master value since it's fixed.  If we are using a dir, keep the
++   rsb's we're the master of.  Recovery will add them to the root list and from
++   there they'll be entered in the rebuilt directory. */
++
+ void dlm_clear_toss_list(struct dlm_ls *ls)
+ {
+ 	struct dlm_rsb *r, *safe;
+@@ -759,8 +778,10 @@ void dlm_clear_toss_list(struct dlm_ls *ls)
+ 		write_lock(&ls->ls_rsbtbl[i].lock);
+ 		list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
+ 					 res_hashchain) {
+-			list_del(&r->res_hashchain);
+-			free_rsb(r);
++			if (dlm_no_directory(ls) || !is_master(r)) {
++				list_del(&r->res_hashchain);
++				dlm_free_rsb(r);
++			}
+ 		}
+ 		write_unlock(&ls->ls_rsbtbl[i].lock);
+ 	}
+diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
+index 4b89e20..997f953 100644
+--- a/fs/dlm/recoverd.c
++++ b/fs/dlm/recoverd.c
+@@ -67,17 +67,18 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
+ 	dlm_astd_resume();
+ 
+ 	/*
+-	 * This list of root rsb's will be the basis of most of the recovery
+-	 * routines.
++	 * Free non-master tossed rsb's.  Master rsb's are kept on toss
++	 * list and put on root list to be included in resdir recovery.
+ 	 */
+ 
+-	dlm_create_root_list(ls);
++	dlm_clear_toss_list(ls);
+ 
+ 	/*
+-	 * Free all the tossed rsb's so we don't have to recover them.
++	 * This list of root rsb's will be the basis of most of the recovery
++	 * routines.
+ 	 */
+ 
+-	dlm_clear_toss_list(ls);
++	dlm_create_root_list(ls);
+ 
+ 	/*
+ 	 * Add or remove nodes from the lockspace's ls_nodes list.
+diff --git a/fs/dlm/user.c b/fs/dlm/user.c
+index 4f74154..7cbc682 100644
+--- a/fs/dlm/user.c
++++ b/fs/dlm/user.c
+@@ -24,8 +24,7 @@
+ #include "lvb_table.h"
+ #include "user.h"
+ 
+-static const char *name_prefix="dlm";
+-static struct miscdevice ctl_device;
++static const char name_prefix[] = "dlm";
+ static const struct file_operations device_fops;
+ 
+ #ifdef CONFIG_COMPAT
+@@ -82,7 +81,8 @@ struct dlm_lock_result32 {
+ };
+ 
+ static void compat_input(struct dlm_write_request *kb,
+-			 struct dlm_write_request32 *kb32)
++			 struct dlm_write_request32 *kb32,
++			 int max_namelen)
+ {
+ 	kb->version[0] = kb32->version[0];
+ 	kb->version[1] = kb32->version[1];
+@@ -112,7 +112,11 @@ static void compat_input(struct dlm_write_request *kb,
+ 		kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
+ 		kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
+ 		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
+-		memcpy(kb->i.lock.name, kb32->i.lock.name, kb->i.lock.namelen);
++		if (kb->i.lock.namelen <= max_namelen)
++			memcpy(kb->i.lock.name, kb32->i.lock.name,
++			       kb->i.lock.namelen);
++		else
++			kb->i.lock.namelen = max_namelen;
+ 	}
+ }
+ 
+@@ -236,12 +240,12 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
+ 	spin_unlock(&proc->asts_spin);
+ 
+ 	if (eol) {
+-		spin_lock(&ua->proc->locks_spin);
++		spin_lock(&proc->locks_spin);
+ 		if (!list_empty(&lkb->lkb_ownqueue)) {
+ 			list_del_init(&lkb->lkb_ownqueue);
+ 			dlm_put_lkb(lkb);
+ 		}
+-		spin_unlock(&ua->proc->locks_spin);
++		spin_unlock(&proc->locks_spin);
+ 	}
+  out:
+ 	mutex_unlock(&ls->ls_clear_proc_locks);
+@@ -529,7 +533,8 @@ static ssize_t device_write(struct file *file, const char __user *buf,
+ 
+ 		if (proc)
+ 			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
+-		compat_input(kbuf, k32buf);
++		compat_input(kbuf, k32buf,
++			     count - sizeof(struct dlm_write_request32));
+ 		kfree(k32buf);
+ 	}
+ #endif
+@@ -896,14 +901,16 @@ static const struct file_operations ctl_device_fops = {
+ 	.owner   = THIS_MODULE,
+ };
+ 
++static struct miscdevice ctl_device = {
++	.name  = "dlm-control",
++	.fops  = &ctl_device_fops,
++	.minor = MISC_DYNAMIC_MINOR,
++};
++
+ int dlm_user_init(void)
+ {
+ 	int error;
+ 
+-	ctl_device.name = "dlm-control";
+-	ctl_device.fops = &ctl_device_fops;
+-	ctl_device.minor = MISC_DYNAMIC_MINOR;
+-
+ 	error = misc_register(&ctl_device);
+ 	if (error)
+ 		log_print("misc_register failed for control device");
+diff --git a/fs/dlm/util.c b/fs/dlm/util.c
+index 963889c..4d9c1f4 100644
+--- a/fs/dlm/util.c
++++ b/fs/dlm/util.c
+@@ -1,7 +1,7 @@
+ /******************************************************************************
+ *******************************************************************************
+ **
+-**  Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
++**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+ **
+ **  This copyrighted material is made available to anyone wishing to use,
+ **  modify, copy, or redistribute it subject to the terms and conditions
+@@ -14,6 +14,14 @@
+ #include "rcom.h"
+ #include "util.h"
+ 
++#define DLM_ERRNO_EDEADLK		35
++#define DLM_ERRNO_EBADR			53
++#define DLM_ERRNO_EBADSLT		57
++#define DLM_ERRNO_EPROTO		71
++#define DLM_ERRNO_EOPNOTSUPP		95
++#define DLM_ERRNO_ETIMEDOUT	       110
++#define DLM_ERRNO_EINPROGRESS	       115
++
+ static void header_out(struct dlm_header *hd)
+ {
+ 	hd->h_version		= cpu_to_le32(hd->h_version);
+@@ -30,11 +38,54 @@ static void header_in(struct dlm_header *hd)
+ 	hd->h_length		= le16_to_cpu(hd->h_length);
+ }
+ 
+-void dlm_message_out(struct dlm_message *ms)
++/* higher errno values are inconsistent across architectures, so select
++   one set of values for on the wire */
++
++static int to_dlm_errno(int err)
++{
++	switch (err) {
++	case -EDEADLK:
++		return -DLM_ERRNO_EDEADLK;
++	case -EBADR:
++		return -DLM_ERRNO_EBADR;
++	case -EBADSLT:
++		return -DLM_ERRNO_EBADSLT;
++	case -EPROTO:
++		return -DLM_ERRNO_EPROTO;
++	case -EOPNOTSUPP:
++		return -DLM_ERRNO_EOPNOTSUPP;
++	case -ETIMEDOUT:
++		return -DLM_ERRNO_ETIMEDOUT;
++	case -EINPROGRESS:
++		return -DLM_ERRNO_EINPROGRESS;
++	}
++	return err;
++}
++
++static int from_dlm_errno(int err)
+ {
+-	struct dlm_header *hd = (struct dlm_header *) ms;
++	switch (err) {
++	case -DLM_ERRNO_EDEADLK:
++		return -EDEADLK;
++	case -DLM_ERRNO_EBADR:
++		return -EBADR;
++	case -DLM_ERRNO_EBADSLT:
++		return -EBADSLT;
++	case -DLM_ERRNO_EPROTO:
++		return -EPROTO;
++	case -DLM_ERRNO_EOPNOTSUPP:
++		return -EOPNOTSUPP;
++	case -DLM_ERRNO_ETIMEDOUT:
++		return -ETIMEDOUT;
++	case -DLM_ERRNO_EINPROGRESS:
++		return -EINPROGRESS;
++	}
++	return err;
++}
+ 
+-	header_out(hd);
++void dlm_message_out(struct dlm_message *ms)
++{
++	header_out(&ms->m_header);
+ 
+ 	ms->m_type		= cpu_to_le32(ms->m_type);
+ 	ms->m_nodeid		= cpu_to_le32(ms->m_nodeid);
+@@ -53,14 +104,12 @@ void dlm_message_out(struct dlm_message *ms)
+ 	ms->m_rqmode		= cpu_to_le32(ms->m_rqmode);
+ 	ms->m_bastmode		= cpu_to_le32(ms->m_bastmode);
+ 	ms->m_asts		= cpu_to_le32(ms->m_asts);
+-	ms->m_result		= cpu_to_le32(ms->m_result);
++	ms->m_result		= cpu_to_le32(to_dlm_errno(ms->m_result));
+ }
+ 
+ void dlm_message_in(struct dlm_message *ms)
+ {
+-	struct dlm_header *hd = (struct dlm_header *) ms;
+-
+-	header_in(hd);
++	header_in(&ms->m_header);
+ 
+ 	ms->m_type		= le32_to_cpu(ms->m_type);
+ 	ms->m_nodeid		= le32_to_cpu(ms->m_nodeid);
+@@ -79,7 +128,7 @@ void dlm_message_in(struct dlm_message *ms)
+ 	ms->m_rqmode		= le32_to_cpu(ms->m_rqmode);
+ 	ms->m_bastmode		= le32_to_cpu(ms->m_bastmode);
+ 	ms->m_asts		= le32_to_cpu(ms->m_asts);
+-	ms->m_result		= le32_to_cpu(ms->m_result);
++	ms->m_result		= from_dlm_errno(le32_to_cpu(ms->m_result));
+ }
+ 
+ static void rcom_lock_out(struct rcom_lock *rl)
+@@ -126,10 +175,9 @@ static void rcom_config_in(struct rcom_config *rf)
+ 
+ void dlm_rcom_out(struct dlm_rcom *rc)
+ {
+-	struct dlm_header *hd = (struct dlm_header *) rc;
+ 	int type = rc->rc_type;
+ 
+-	header_out(hd);
++	header_out(&rc->rc_header);
+ 
+ 	rc->rc_type		= cpu_to_le32(rc->rc_type);
+ 	rc->rc_result		= cpu_to_le32(rc->rc_result);
+@@ -137,7 +185,7 @@ void dlm_rcom_out(struct dlm_rcom *rc)
+ 	rc->rc_seq		= cpu_to_le64(rc->rc_seq);
+ 	rc->rc_seq_reply	= cpu_to_le64(rc->rc_seq_reply);
+ 
+-	if (type == DLM_RCOM_LOCK)
++	if ((type == DLM_RCOM_LOCK) || (type == DLM_RCOM_LOCK_REPLY))
+ 		rcom_lock_out((struct rcom_lock *) rc->rc_buf);
+ 
+ 	else if (type == DLM_RCOM_STATUS_REPLY)
+@@ -146,9 +194,9 @@ void dlm_rcom_out(struct dlm_rcom *rc)
+ 
+ void dlm_rcom_in(struct dlm_rcom *rc)
+ {
+-	struct dlm_header *hd = (struct dlm_header *) rc;
++	int type;
+ 
+-	header_in(hd);
++	header_in(&rc->rc_header);
+ 
+ 	rc->rc_type		= le32_to_cpu(rc->rc_type);
+ 	rc->rc_result		= le32_to_cpu(rc->rc_result);
+@@ -156,10 +204,12 @@ void dlm_rcom_in(struct dlm_rcom *rc)
+ 	rc->rc_seq		= le64_to_cpu(rc->rc_seq);
+ 	rc->rc_seq_reply	= le64_to_cpu(rc->rc_seq_reply);
+ 
+-	if (rc->rc_type == DLM_RCOM_LOCK)
++	type = rc->rc_type;
++
++	if ((type == DLM_RCOM_LOCK) || (type == DLM_RCOM_LOCK_REPLY))
+ 		rcom_lock_in((struct rcom_lock *) rc->rc_buf);
+ 
+-	else if (rc->rc_type == DLM_RCOM_STATUS_REPLY)
++	else if (type == DLM_RCOM_STATUS_REPLY)
+ 		rcom_config_in((struct rcom_config *) rc->rc_buf);
+ }
+ 
 diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
 index e5580bc..0249aa4 100644
 --- a/fs/ecryptfs/main.c
@@ -588892,6 +738875,18 @@
  #define flush_agp_cache() mb()
  
  /* Convert a physical address to an address suitable for the GART. */
+diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h
+index eefab3f..b9e9147 100644
+--- a/include/asm-alpha/tlbflush.h
++++ b/include/asm-alpha/tlbflush.h
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/mm.h>
+ #include <asm/compiler.h>
++#include <asm/pgalloc.h>
+ 
+ #ifndef __EXTERN_INLINE
+ #define __EXTERN_INLINE extern inline
 diff --git a/include/asm-arm/arch-at91/at91_lcdc.h b/include/asm-arm/arch-at91/at91_lcdc.h
 deleted file mode 100644
 index ab040a4..0000000
@@ -595885,20 +745880,20 @@
  #define ia64_setreg(regnum, val)						\
  ({										\
 diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
-index c4f1e32..0095bcf 100644
+index c4f1e32..77f30b6 100644
 --- a/include/asm-ia64/percpu.h
 +++ b/include/asm-ia64/percpu.h
-@@ -16,28 +16,11 @@
+@@ -15,69 +15,36 @@
+ 
  #include <linux/threads.h>
  
- #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
+-#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
 -# define __SMALL_ADDR_AREA	__attribute__((__model__ (__small__)))
 -#else
 -# define __SMALL_ADDR_AREA
-+# define PER_CPU_ATTRIBUTES	__attribute__((__model__ (__small__)))
- #endif
- 
- #define DECLARE_PER_CPU(type, name)				\
+-#endif
+-
+-#define DECLARE_PER_CPU(type, name)				\
 -	extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
 -
 -/* Separate out the type, so (int[3], foo) works. */
@@ -595906,7 +745901,7 @@
 -	__attribute__((__section__(".data.percpu")))		\
 -	__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
 -
--#ifdef CONFIG_SMP
+ #ifdef CONFIG_SMP
 -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)			\
 -	__attribute__((__section__(".data.percpu.shared_aligned")))	\
 -	__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name		\
@@ -595915,11 +745910,39 @@
 -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
 -	DEFINE_PER_CPU(type, name)
 -#endif
-+	extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+-
+-/*
+- * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
+- * external routine, to avoid include-hell.
+- */
+-#ifdef CONFIG_SMP
+-
+-extern unsigned long __per_cpu_offset[NR_CPUS];
+-#define per_cpu_offset(x) (__per_cpu_offset[x])
  
- /*
-  * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
-@@ -68,9 +51,6 @@ extern void *per_cpu_init(void);
+-/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
+-DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
++#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
++# define PER_CPU_ATTRIBUTES	__attribute__((__model__ (__small__)))
++#endif
+ 
+-#define per_cpu(var, cpu)  (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
+-#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
+-#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
++#define __my_cpu_offset	__ia64_per_cpu_var(local_per_cpu_offset)
+ 
+-extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
+-extern void setup_per_cpu_areas (void);
+ extern void *per_cpu_init(void);
+ 
+ #else /* ! SMP */
+ 
+-#define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu__##var))
+-#define __get_cpu_var(var)			per_cpu__##var
+-#define __raw_get_cpu_var(var)			per_cpu__##var
++#define PER_CPU_ATTRIBUTES	__attribute__((__section__(".data.percpu")))
++
+ #define per_cpu_init()				(__phys_per_cpu_start)
  
  #endif	/* SMP */
  
@@ -595929,6 +745952,19 @@
  /*
   * Be extremely careful when taking the address of this variable!  Due to virtual
   * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
+  * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
+  * more efficient.
+  */
+-#define __ia64_per_cpu_var(var)	(per_cpu__##var)
++#define __ia64_per_cpu_var(var)	per_cpu__##var
++
++#include <asm-generic/percpu.h>
++
++/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
++DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
+ 
+ #endif /* !__ASSEMBLY__ */
+ 
 diff --git a/include/asm-m32r/signal.h b/include/asm-m32r/signal.h
 index 9372586..1a60706 100644
 --- a/include/asm-m32r/signal.h
@@ -597500,6 +747536,19 @@
  #define flush_agp_cache()		mb()
  
  /* Convert a physical address to an address suitable for the GART. */
+diff --git a/include/asm-powerpc/8xx_immap.h b/include/asm-powerpc/8xx_immap.h
+index 1311cef..4b0e152 100644
+--- a/include/asm-powerpc/8xx_immap.h
++++ b/include/asm-powerpc/8xx_immap.h
+@@ -123,7 +123,7 @@ typedef struct	mem_ctlr {
+ #define OR_G5LA		0x00000400	/* Output #GPL5 on #GPL_A5		*/
+ #define OR_G5LS		0x00000200	/* Drive #GPL high on falling edge of...*/
+ #define OR_BI		0x00000100	/* Burst inhibit			*/
+-#define OR_SCY_MSK	0x000000f0	/* Cycle Lenght in Clocks		*/
++#define OR_SCY_MSK	0x000000f0	/* Cycle Length in Clocks		*/
+ #define OR_SCY_0_CLK	0x00000000	/* 0 clock cycles wait states		*/
+ #define OR_SCY_1_CLK	0x00000010	/* 1 clock cycles wait states		*/
+ #define OR_SCY_2_CLK	0x00000020	/* 2 clock cycles wait states		*/
 diff --git a/include/asm-powerpc/agp.h b/include/asm-powerpc/agp.h
 index e5ccaca..86455c4 100644
 --- a/include/asm-powerpc/agp.h
@@ -597534,6 +747583,1926 @@
  /* Bitmap functions for the minix filesystem.  */
  
  #define minix_test_and_set_bit(nr,addr) \
+diff --git a/include/asm-powerpc/commproc.h b/include/asm-powerpc/commproc.h
+deleted file mode 100644
+index 2ee59d7..0000000
+--- a/include/asm-powerpc/commproc.h
++++ /dev/null
+@@ -1,752 +0,0 @@
+-/*
+- * MPC8xx Communication Processor Module.
+- * Copyright (c) 1997 Dan Malek (dmalek at jlc.net)
+- *
+- * This file contains structures and information for the communication
+- * processor channels.  Some CPM control and status is available
+- * throught the MPC8xx internal memory map.  See immap.h for details.
+- * This file only contains what I need for the moment, not the total
+- * CPM capabilities.  I (or someone else) will add definitions as they
+- * are needed.  -- Dan
+- *
+- * On the MBX board, EPPC-Bug loads CPM microcode into the first 512
+- * bytes of the DP RAM and relocates the I2C parameter area to the
+- * IDMA1 space.  The remaining DP RAM is available for buffer descriptors
+- * or other use.
+- */
+-#ifndef __CPM_8XX__
+-#define __CPM_8XX__
+-
+-#include <asm/8xx_immap.h>
+-#include <asm/ptrace.h>
+-#include <asm/cpm.h>
+-
+-/* CPM Command register.
+-*/
+-#define CPM_CR_RST	((ushort)0x8000)
+-#define CPM_CR_OPCODE	((ushort)0x0f00)
+-#define CPM_CR_CHAN	((ushort)0x00f0)
+-#define CPM_CR_FLG	((ushort)0x0001)
+-
+-/* Some commands (there are more...later)
+-*/
+-#define CPM_CR_INIT_TRX		((ushort)0x0000)
+-#define CPM_CR_INIT_RX		((ushort)0x0001)
+-#define CPM_CR_INIT_TX		((ushort)0x0002)
+-#define CPM_CR_HUNT_MODE	((ushort)0x0003)
+-#define CPM_CR_STOP_TX		((ushort)0x0004)
+-#define CPM_CR_GRA_STOP_TX	((ushort)0x0005)
+-#define CPM_CR_RESTART_TX	((ushort)0x0006)
+-#define CPM_CR_CLOSE_RX_BD	((ushort)0x0007)
+-#define CPM_CR_SET_GADDR	((ushort)0x0008)
+-#define CPM_CR_SET_TIMER	CPM_CR_SET_GADDR
+-
+-/* Channel numbers.
+-*/
+-#define CPM_CR_CH_SCC1		((ushort)0x0000)
+-#define CPM_CR_CH_I2C		((ushort)0x0001)	/* I2C and IDMA1 */
+-#define CPM_CR_CH_SCC2		((ushort)0x0004)
+-#define CPM_CR_CH_SPI		((ushort)0x0005)	/* SPI / IDMA2 / Timers */
+-#define CPM_CR_CH_TIMER		CPM_CR_CH_SPI
+-#define CPM_CR_CH_SCC3		((ushort)0x0008)
+-#define CPM_CR_CH_SMC1		((ushort)0x0009)	/* SMC1 / DSP1 */
+-#define CPM_CR_CH_SCC4		((ushort)0x000c)
+-#define CPM_CR_CH_SMC2		((ushort)0x000d)	/* SMC2 / DSP2 */
+-
+-#define mk_cr_cmd(CH, CMD)	((CMD << 8) | (CH << 4))
+-
+-#ifndef CONFIG_PPC_CPM_NEW_BINDING
+-/* The dual ported RAM is multi-functional.  Some areas can be (and are
+- * being) used for microcode.  There is an area that can only be used
+- * as data ram for buffer descriptors, which is all we use right now.
+- * Currently the first 512 and last 256 bytes are used for microcode.
+- */
+-#define CPM_DATAONLY_BASE	((uint)0x0800)
+-#define CPM_DATAONLY_SIZE	((uint)0x0700)
+-#define CPM_DP_NOSPACE		((uint)0x7fffffff)
+-#endif
+-
+-/* Export the base address of the communication processor registers
+- * and dual port ram.
+- */
+-extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
+-
+-#ifdef CONFIG_PPC_CPM_NEW_BINDING
+-#define cpm_dpalloc cpm_muram_alloc
+-#define cpm_dpfree cpm_muram_free
+-#define cpm_dpram_addr cpm_muram_addr
+-#define cpm_dpram_phys cpm_muram_dma
+-#else
+-extern unsigned long cpm_dpalloc(uint size, uint align);
+-extern int cpm_dpfree(unsigned long offset);
+-extern unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align);
+-extern void cpm_dpdump(void);
+-extern void *cpm_dpram_addr(unsigned long offset);
+-extern uint cpm_dpram_phys(u8* addr);
+-#endif
+-
+-extern void cpm_setbrg(uint brg, uint rate);
+-
+-extern uint m8xx_cpm_hostalloc(uint size);
+-extern int  m8xx_cpm_hostfree(uint start);
+-extern void m8xx_cpm_hostdump(void);
+-
+-extern void cpm_load_patch(cpm8xx_t *cp);
+-
+-/* Buffer descriptors used by many of the CPM protocols.
+-*/
+-typedef struct cpm_buf_desc {
+-	ushort	cbd_sc;		/* Status and Control */
+-	ushort	cbd_datlen;	/* Data length in buffer */
+-	uint	cbd_bufaddr;	/* Buffer address in host memory */
+-} cbd_t;
+-
+-#define BD_SC_EMPTY	((ushort)0x8000)	/* Receive is empty */
+-#define BD_SC_READY	((ushort)0x8000)	/* Transmit is ready */
+-#define BD_SC_WRAP	((ushort)0x2000)	/* Last buffer descriptor */
+-#define BD_SC_INTRPT	((ushort)0x1000)	/* Interrupt on change */
+-#define BD_SC_LAST	((ushort)0x0800)	/* Last buffer in frame */
+-#define BD_SC_TC	((ushort)0x0400)	/* Transmit CRC */
+-#define BD_SC_CM	((ushort)0x0200)	/* Continous mode */
+-#define BD_SC_ID	((ushort)0x0100)	/* Rec'd too many idles */
+-#define BD_SC_P		((ushort)0x0100)	/* xmt preamble */
+-#define BD_SC_BR	((ushort)0x0020)	/* Break received */
+-#define BD_SC_FR	((ushort)0x0010)	/* Framing error */
+-#define BD_SC_PR	((ushort)0x0008)	/* Parity error */
+-#define BD_SC_NAK	((ushort)0x0004)	/* NAK - did not respond */
+-#define BD_SC_OV	((ushort)0x0002)	/* Overrun */
+-#define BD_SC_UN	((ushort)0x0002)	/* Underrun */
+-#define BD_SC_CD	((ushort)0x0001)	/* ?? */
+-#define BD_SC_CL	((ushort)0x0001)	/* Collision */
+-
+-/* Parameter RAM offsets.
+-*/
+-#define PROFF_SCC1	((uint)0x0000)
+-#define PROFF_IIC	((uint)0x0080)
+-#define PROFF_SCC2	((uint)0x0100)
+-#define PROFF_SPI	((uint)0x0180)
+-#define PROFF_SCC3	((uint)0x0200)
+-#define PROFF_SMC1	((uint)0x0280)
+-#define PROFF_SCC4	((uint)0x0300)
+-#define PROFF_SMC2	((uint)0x0380)
+-
+-/* Define enough so I can at least use the serial port as a UART.
+- * The MBX uses SMC1 as the host serial port.
+- */
+-typedef struct smc_uart {
+-	ushort	smc_rbase;	/* Rx Buffer descriptor base address */
+-	ushort	smc_tbase;	/* Tx Buffer descriptor base address */
+-	u_char	smc_rfcr;	/* Rx function code */
+-	u_char	smc_tfcr;	/* Tx function code */
+-	ushort	smc_mrblr;	/* Max receive buffer length */
+-	uint	smc_rstate;	/* Internal */
+-	uint	smc_idp;	/* Internal */
+-	ushort	smc_rbptr;	/* Internal */
+-	ushort	smc_ibc;	/* Internal */
+-	uint	smc_rxtmp;	/* Internal */
+-	uint	smc_tstate;	/* Internal */
+-	uint	smc_tdp;	/* Internal */
+-	ushort	smc_tbptr;	/* Internal */
+-	ushort	smc_tbc;	/* Internal */
+-	uint	smc_txtmp;	/* Internal */
+-	ushort	smc_maxidl;	/* Maximum idle characters */
+-	ushort	smc_tmpidl;	/* Temporary idle counter */
+-	ushort	smc_brklen;	/* Last received break length */
+-	ushort	smc_brkec;	/* rcv'd break condition counter */
+-	ushort	smc_brkcr;	/* xmt break count register */
+-	ushort	smc_rmask;	/* Temporary bit mask */
+-	char	res1[8];	/* Reserved */
+-	ushort	smc_rpbase;	/* Relocation pointer */
+-} smc_uart_t;
+-
+-/* Function code bits.
+-*/
+-#define SMC_EB	((u_char)0x10)	/* Set big endian byte order */
+-
+-/* SMC uart mode register.
+-*/
+-#define	SMCMR_REN	((ushort)0x0001)
+-#define SMCMR_TEN	((ushort)0x0002)
+-#define SMCMR_DM	((ushort)0x000c)
+-#define SMCMR_SM_GCI	((ushort)0x0000)
+-#define SMCMR_SM_UART	((ushort)0x0020)
+-#define SMCMR_SM_TRANS	((ushort)0x0030)
+-#define SMCMR_SM_MASK	((ushort)0x0030)
+-#define SMCMR_PM_EVEN	((ushort)0x0100)	/* Even parity, else odd */
+-#define SMCMR_REVD	SMCMR_PM_EVEN
+-#define SMCMR_PEN	((ushort)0x0200)	/* Parity enable */
+-#define SMCMR_BS	SMCMR_PEN
+-#define SMCMR_SL	((ushort)0x0400)	/* Two stops, else one */
+-#define SMCR_CLEN_MASK	((ushort)0x7800)	/* Character length */
+-#define smcr_mk_clen(C)	(((C) << 11) & SMCR_CLEN_MASK)
+-
+-/* SMC2 as Centronics parallel printer.  It is half duplex, in that
+- * it can only receive or transmit.  The parameter ram values for
+- * each direction are either unique or properly overlap, so we can
+- * include them in one structure.
+- */
+-typedef struct smc_centronics {
+-	ushort	scent_rbase;
+-	ushort	scent_tbase;
+-	u_char	scent_cfcr;
+-	u_char	scent_smask;
+-	ushort	scent_mrblr;
+-	uint	scent_rstate;
+-	uint	scent_r_ptr;
+-	ushort	scent_rbptr;
+-	ushort	scent_r_cnt;
+-	uint	scent_rtemp;
+-	uint	scent_tstate;
+-	uint	scent_t_ptr;
+-	ushort	scent_tbptr;
+-	ushort	scent_t_cnt;
+-	uint	scent_ttemp;
+-	ushort	scent_max_sl;
+-	ushort	scent_sl_cnt;
+-	ushort	scent_character1;
+-	ushort	scent_character2;
+-	ushort	scent_character3;
+-	ushort	scent_character4;
+-	ushort	scent_character5;
+-	ushort	scent_character6;
+-	ushort	scent_character7;
+-	ushort	scent_character8;
+-	ushort	scent_rccm;
+-	ushort	scent_rccr;
+-} smc_cent_t;
+-
+-/* Centronics Status Mask Register.
+-*/
+-#define SMC_CENT_F	((u_char)0x08)
+-#define SMC_CENT_PE	((u_char)0x04)
+-#define SMC_CENT_S	((u_char)0x02)
+-
+-/* SMC Event and Mask register.
+-*/
+-#define	SMCM_BRKE	((unsigned char)0x40)	/* When in UART Mode */
+-#define	SMCM_BRK	((unsigned char)0x10)	/* When in UART Mode */
+-#define	SMCM_TXE	((unsigned char)0x10)	/* When in Transparent Mode */
+-#define	SMCM_BSY	((unsigned char)0x04)
+-#define	SMCM_TX		((unsigned char)0x02)
+-#define	SMCM_RX		((unsigned char)0x01)
+-
+-/* Baud rate generators.
+-*/
+-#define CPM_BRG_RST		((uint)0x00020000)
+-#define CPM_BRG_EN		((uint)0x00010000)
+-#define CPM_BRG_EXTC_INT	((uint)0x00000000)
+-#define CPM_BRG_EXTC_CLK2	((uint)0x00004000)
+-#define CPM_BRG_EXTC_CLK6	((uint)0x00008000)
+-#define CPM_BRG_ATB		((uint)0x00002000)
+-#define CPM_BRG_CD_MASK		((uint)0x00001ffe)
+-#define CPM_BRG_DIV16		((uint)0x00000001)
+-
+-/* SI Clock Route Register
+-*/
+-#define SICR_RCLK_SCC1_BRG1	((uint)0x00000000)
+-#define SICR_TCLK_SCC1_BRG1	((uint)0x00000000)
+-#define SICR_RCLK_SCC2_BRG2	((uint)0x00000800)
+-#define SICR_TCLK_SCC2_BRG2	((uint)0x00000100)
+-#define SICR_RCLK_SCC3_BRG3	((uint)0x00100000)
+-#define SICR_TCLK_SCC3_BRG3	((uint)0x00020000)
+-#define SICR_RCLK_SCC4_BRG4	((uint)0x18000000)
+-#define SICR_TCLK_SCC4_BRG4	((uint)0x03000000)
+-
+-/* SCCs.
+-*/
+-#define SCC_GSMRH_IRP		((uint)0x00040000)
+-#define SCC_GSMRH_GDE		((uint)0x00010000)
+-#define SCC_GSMRH_TCRC_CCITT	((uint)0x00008000)
+-#define SCC_GSMRH_TCRC_BISYNC	((uint)0x00004000)
+-#define SCC_GSMRH_TCRC_HDLC	((uint)0x00000000)
+-#define SCC_GSMRH_REVD		((uint)0x00002000)
+-#define SCC_GSMRH_TRX		((uint)0x00001000)
+-#define SCC_GSMRH_TTX		((uint)0x00000800)
+-#define SCC_GSMRH_CDP		((uint)0x00000400)
+-#define SCC_GSMRH_CTSP		((uint)0x00000200)
+-#define SCC_GSMRH_CDS		((uint)0x00000100)
+-#define SCC_GSMRH_CTSS		((uint)0x00000080)
+-#define SCC_GSMRH_TFL		((uint)0x00000040)
+-#define SCC_GSMRH_RFW		((uint)0x00000020)
+-#define SCC_GSMRH_TXSY		((uint)0x00000010)
+-#define SCC_GSMRH_SYNL16	((uint)0x0000000c)
+-#define SCC_GSMRH_SYNL8		((uint)0x00000008)
+-#define SCC_GSMRH_SYNL4		((uint)0x00000004)
+-#define SCC_GSMRH_RTSM		((uint)0x00000002)
+-#define SCC_GSMRH_RSYN		((uint)0x00000001)
+-
+-#define SCC_GSMRL_SIR		((uint)0x80000000)	/* SCC2 only */
+-#define SCC_GSMRL_EDGE_NONE	((uint)0x60000000)
+-#define SCC_GSMRL_EDGE_NEG	((uint)0x40000000)
+-#define SCC_GSMRL_EDGE_POS	((uint)0x20000000)
+-#define SCC_GSMRL_EDGE_BOTH	((uint)0x00000000)
+-#define SCC_GSMRL_TCI		((uint)0x10000000)
+-#define SCC_GSMRL_TSNC_3	((uint)0x0c000000)
+-#define SCC_GSMRL_TSNC_4	((uint)0x08000000)
+-#define SCC_GSMRL_TSNC_14	((uint)0x04000000)
+-#define SCC_GSMRL_TSNC_INF	((uint)0x00000000)
+-#define SCC_GSMRL_RINV		((uint)0x02000000)
+-#define SCC_GSMRL_TINV		((uint)0x01000000)
+-#define SCC_GSMRL_TPL_128	((uint)0x00c00000)
+-#define SCC_GSMRL_TPL_64	((uint)0x00a00000)
+-#define SCC_GSMRL_TPL_48	((uint)0x00800000)
+-#define SCC_GSMRL_TPL_32	((uint)0x00600000)
+-#define SCC_GSMRL_TPL_16	((uint)0x00400000)
+-#define SCC_GSMRL_TPL_8		((uint)0x00200000)
+-#define SCC_GSMRL_TPL_NONE	((uint)0x00000000)
+-#define SCC_GSMRL_TPP_ALL1	((uint)0x00180000)
+-#define SCC_GSMRL_TPP_01	((uint)0x00100000)
+-#define SCC_GSMRL_TPP_10	((uint)0x00080000)
+-#define SCC_GSMRL_TPP_ZEROS	((uint)0x00000000)
+-#define SCC_GSMRL_TEND		((uint)0x00040000)
+-#define SCC_GSMRL_TDCR_32	((uint)0x00030000)
+-#define SCC_GSMRL_TDCR_16	((uint)0x00020000)
+-#define SCC_GSMRL_TDCR_8	((uint)0x00010000)
+-#define SCC_GSMRL_TDCR_1	((uint)0x00000000)
+-#define SCC_GSMRL_RDCR_32	((uint)0x0000c000)
+-#define SCC_GSMRL_RDCR_16	((uint)0x00008000)
+-#define SCC_GSMRL_RDCR_8	((uint)0x00004000)
+-#define SCC_GSMRL_RDCR_1	((uint)0x00000000)
+-#define SCC_GSMRL_RENC_DFMAN	((uint)0x00003000)
+-#define SCC_GSMRL_RENC_MANCH	((uint)0x00002000)
+-#define SCC_GSMRL_RENC_FM0	((uint)0x00001000)
+-#define SCC_GSMRL_RENC_NRZI	((uint)0x00000800)
+-#define SCC_GSMRL_RENC_NRZ	((uint)0x00000000)
+-#define SCC_GSMRL_TENC_DFMAN	((uint)0x00000600)
+-#define SCC_GSMRL_TENC_MANCH	((uint)0x00000400)
+-#define SCC_GSMRL_TENC_FM0	((uint)0x00000200)
+-#define SCC_GSMRL_TENC_NRZI	((uint)0x00000100)
+-#define SCC_GSMRL_TENC_NRZ	((uint)0x00000000)
+-#define SCC_GSMRL_DIAG_LE	((uint)0x000000c0)	/* Loop and echo */
+-#define SCC_GSMRL_DIAG_ECHO	((uint)0x00000080)
+-#define SCC_GSMRL_DIAG_LOOP	((uint)0x00000040)
+-#define SCC_GSMRL_DIAG_NORM	((uint)0x00000000)
+-#define SCC_GSMRL_ENR		((uint)0x00000020)
+-#define SCC_GSMRL_ENT		((uint)0x00000010)
+-#define SCC_GSMRL_MODE_ENET	((uint)0x0000000c)
+-#define SCC_GSMRL_MODE_QMC	((uint)0x0000000a)
+-#define SCC_GSMRL_MODE_DDCMP	((uint)0x00000009)
+-#define SCC_GSMRL_MODE_BISYNC	((uint)0x00000008)
+-#define SCC_GSMRL_MODE_V14	((uint)0x00000007)
+-#define SCC_GSMRL_MODE_AHDLC	((uint)0x00000006)
+-#define SCC_GSMRL_MODE_PROFIBUS	((uint)0x00000005)
+-#define SCC_GSMRL_MODE_UART	((uint)0x00000004)
+-#define SCC_GSMRL_MODE_SS7	((uint)0x00000003)
+-#define SCC_GSMRL_MODE_ATALK	((uint)0x00000002)
+-#define SCC_GSMRL_MODE_HDLC	((uint)0x00000000)
+-
+-#define SCC_TODR_TOD		((ushort)0x8000)
+-
+-/* SCC Event and Mask register.
+-*/
+-#define	SCCM_TXE	((unsigned char)0x10)
+-#define	SCCM_BSY	((unsigned char)0x04)
+-#define	SCCM_TX		((unsigned char)0x02)
+-#define	SCCM_RX		((unsigned char)0x01)
+-
+-typedef struct scc_param {
+-	ushort	scc_rbase;	/* Rx Buffer descriptor base address */
+-	ushort	scc_tbase;	/* Tx Buffer descriptor base address */
+-	u_char	scc_rfcr;	/* Rx function code */
+-	u_char	scc_tfcr;	/* Tx function code */
+-	ushort	scc_mrblr;	/* Max receive buffer length */
+-	uint	scc_rstate;	/* Internal */
+-	uint	scc_idp;	/* Internal */
+-	ushort	scc_rbptr;	/* Internal */
+-	ushort	scc_ibc;	/* Internal */
+-	uint	scc_rxtmp;	/* Internal */
+-	uint	scc_tstate;	/* Internal */
+-	uint	scc_tdp;	/* Internal */
+-	ushort	scc_tbptr;	/* Internal */
+-	ushort	scc_tbc;	/* Internal */
+-	uint	scc_txtmp;	/* Internal */
+-	uint	scc_rcrc;	/* Internal */
+-	uint	scc_tcrc;	/* Internal */
+-} sccp_t;
+-
+-/* Function code bits.
+-*/
+-#define SCC_EB	((u_char)0x10)	/* Set big endian byte order */
+-
+-/* CPM Ethernet through SCCx.
+- */
+-typedef struct scc_enet {
+-	sccp_t	sen_genscc;
+-	uint	sen_cpres;	/* Preset CRC */
+-	uint	sen_cmask;	/* Constant mask for CRC */
+-	uint	sen_crcec;	/* CRC Error counter */
+-	uint	sen_alec;	/* alignment error counter */
+-	uint	sen_disfc;	/* discard frame counter */
+-	ushort	sen_pads;	/* Tx short frame pad character */
+-	ushort	sen_retlim;	/* Retry limit threshold */
+-	ushort	sen_retcnt;	/* Retry limit counter */
+-	ushort	sen_maxflr;	/* maximum frame length register */
+-	ushort	sen_minflr;	/* minimum frame length register */
+-	ushort	sen_maxd1;	/* maximum DMA1 length */
+-	ushort	sen_maxd2;	/* maximum DMA2 length */
+-	ushort	sen_maxd;	/* Rx max DMA */
+-	ushort	sen_dmacnt;	/* Rx DMA counter */
+-	ushort	sen_maxb;	/* Max BD byte count */
+-	ushort	sen_gaddr1;	/* Group address filter */
+-	ushort	sen_gaddr2;
+-	ushort	sen_gaddr3;
+-	ushort	sen_gaddr4;
+-	uint	sen_tbuf0data0;	/* Save area 0 - current frame */
+-	uint	sen_tbuf0data1;	/* Save area 1 - current frame */
+-	uint	sen_tbuf0rba;	/* Internal */
+-	uint	sen_tbuf0crc;	/* Internal */
+-	ushort	sen_tbuf0bcnt;	/* Internal */
+-	ushort	sen_paddrh;	/* physical address (MSB) */
+-	ushort	sen_paddrm;
+-	ushort	sen_paddrl;	/* physical address (LSB) */
+-	ushort	sen_pper;	/* persistence */
+-	ushort	sen_rfbdptr;	/* Rx first BD pointer */
+-	ushort	sen_tfbdptr;	/* Tx first BD pointer */
+-	ushort	sen_tlbdptr;	/* Tx last BD pointer */
+-	uint	sen_tbuf1data0;	/* Save area 0 - current frame */
+-	uint	sen_tbuf1data1;	/* Save area 1 - current frame */
+-	uint	sen_tbuf1rba;	/* Internal */
+-	uint	sen_tbuf1crc;	/* Internal */
+-	ushort	sen_tbuf1bcnt;	/* Internal */
+-	ushort	sen_txlen;	/* Tx Frame length counter */
+-	ushort	sen_iaddr1;	/* Individual address filter */
+-	ushort	sen_iaddr2;
+-	ushort	sen_iaddr3;
+-	ushort	sen_iaddr4;
+-	ushort	sen_boffcnt;	/* Backoff counter */
+-
+-	/* NOTE: Some versions of the manual have the following items
+-	 * incorrectly documented.  Below is the proper order.
+-	 */
+-	ushort	sen_taddrh;	/* temp address (MSB) */
+-	ushort	sen_taddrm;
+-	ushort	sen_taddrl;	/* temp address (LSB) */
+-} scc_enet_t;
+-
+-/* SCC Event register as used by Ethernet.
+-*/
+-#define SCCE_ENET_GRA	((ushort)0x0080)	/* Graceful stop complete */
+-#define SCCE_ENET_TXE	((ushort)0x0010)	/* Transmit Error */
+-#define SCCE_ENET_RXF	((ushort)0x0008)	/* Full frame received */
+-#define SCCE_ENET_BSY	((ushort)0x0004)	/* All incoming buffers full */
+-#define SCCE_ENET_TXB	((ushort)0x0002)	/* A buffer was transmitted */
+-#define SCCE_ENET_RXB	((ushort)0x0001)	/* A buffer was received */
+-
+-/* SCC Mode Register (PMSR) as used by Ethernet.
+-*/
+-#define SCC_PSMR_HBC	((ushort)0x8000)	/* Enable heartbeat */
+-#define SCC_PSMR_FC	((ushort)0x4000)	/* Force collision */
+-#define SCC_PSMR_RSH	((ushort)0x2000)	/* Receive short frames */
+-#define SCC_PSMR_IAM	((ushort)0x1000)	/* Check individual hash */
+-#define SCC_PSMR_ENCRC	((ushort)0x0800)	/* Ethernet CRC mode */
+-#define SCC_PSMR_PRO	((ushort)0x0200)	/* Promiscuous mode */
+-#define SCC_PSMR_BRO	((ushort)0x0100)	/* Catch broadcast pkts */
+-#define SCC_PSMR_SBT	((ushort)0x0080)	/* Special backoff timer */
+-#define SCC_PSMR_LPB	((ushort)0x0040)	/* Set Loopback mode */
+-#define SCC_PSMR_SIP	((ushort)0x0020)	/* Sample Input Pins */
+-#define SCC_PSMR_LCW	((ushort)0x0010)	/* Late collision window */
+-#define SCC_PSMR_NIB22	((ushort)0x000a)	/* Start frame search */
+-#define SCC_PSMR_FDE	((ushort)0x0001)	/* Full duplex enable */
+-
+-/* Buffer descriptor control/status used by Ethernet receive.
+-*/
+-#define BD_ENET_RX_EMPTY	((ushort)0x8000)
+-#define BD_ENET_RX_WRAP		((ushort)0x2000)
+-#define BD_ENET_RX_INTR		((ushort)0x1000)
+-#define BD_ENET_RX_LAST		((ushort)0x0800)
+-#define BD_ENET_RX_FIRST	((ushort)0x0400)
+-#define BD_ENET_RX_MISS		((ushort)0x0100)
+-#define BD_ENET_RX_LG		((ushort)0x0020)
+-#define BD_ENET_RX_NO		((ushort)0x0010)
+-#define BD_ENET_RX_SH		((ushort)0x0008)
+-#define BD_ENET_RX_CR		((ushort)0x0004)
+-#define BD_ENET_RX_OV		((ushort)0x0002)
+-#define BD_ENET_RX_CL		((ushort)0x0001)
+-#define BD_ENET_RX_BC		((ushort)0x0080)	/* DA is Broadcast */
+-#define BD_ENET_RX_MC		((ushort)0x0040)	/* DA is Multicast */
+-#define BD_ENET_RX_STATS	((ushort)0x013f)	/* All status bits */
+-
+-/* Buffer descriptor control/status used by Ethernet transmit.
+-*/
+-#define BD_ENET_TX_READY	((ushort)0x8000)
+-#define BD_ENET_TX_PAD		((ushort)0x4000)
+-#define BD_ENET_TX_WRAP		((ushort)0x2000)
+-#define BD_ENET_TX_INTR		((ushort)0x1000)
+-#define BD_ENET_TX_LAST		((ushort)0x0800)
+-#define BD_ENET_TX_TC		((ushort)0x0400)
+-#define BD_ENET_TX_DEF		((ushort)0x0200)
+-#define BD_ENET_TX_HB		((ushort)0x0100)
+-#define BD_ENET_TX_LC		((ushort)0x0080)
+-#define BD_ENET_TX_RL		((ushort)0x0040)
+-#define BD_ENET_TX_RCMASK	((ushort)0x003c)
+-#define BD_ENET_TX_UN		((ushort)0x0002)
+-#define BD_ENET_TX_CSL		((ushort)0x0001)
+-#define BD_ENET_TX_STATS	((ushort)0x03ff)	/* All status bits */
+-
+-/* SCC as UART
+-*/
+-typedef struct scc_uart {
+-	sccp_t	scc_genscc;
+-	char	res1[8];	/* Reserved */
+-	ushort	scc_maxidl;	/* Maximum idle chars */
+-	ushort	scc_idlc;	/* temp idle counter */
+-	ushort	scc_brkcr;	/* Break count register */
+-	ushort	scc_parec;	/* receive parity error counter */
+-	ushort	scc_frmec;	/* receive framing error counter */
+-	ushort	scc_nosec;	/* receive noise counter */
+-	ushort	scc_brkec;	/* receive break condition counter */
+-	ushort	scc_brkln;	/* last received break length */
+-	ushort	scc_uaddr1;	/* UART address character 1 */
+-	ushort	scc_uaddr2;	/* UART address character 2 */
+-	ushort	scc_rtemp;	/* Temp storage */
+-	ushort	scc_toseq;	/* Transmit out of sequence char */
+-	ushort	scc_char1;	/* control character 1 */
+-	ushort	scc_char2;	/* control character 2 */
+-	ushort	scc_char3;	/* control character 3 */
+-	ushort	scc_char4;	/* control character 4 */
+-	ushort	scc_char5;	/* control character 5 */
+-	ushort	scc_char6;	/* control character 6 */
+-	ushort	scc_char7;	/* control character 7 */
+-	ushort	scc_char8;	/* control character 8 */
+-	ushort	scc_rccm;	/* receive control character mask */
+-	ushort	scc_rccr;	/* receive control character register */
+-	ushort	scc_rlbc;	/* receive last break character */
+-} scc_uart_t;
+-
+-/* SCC Event and Mask registers when it is used as a UART.
+-*/
+-#define UART_SCCM_GLR		((ushort)0x1000)
+-#define UART_SCCM_GLT		((ushort)0x0800)
+-#define UART_SCCM_AB		((ushort)0x0200)
+-#define UART_SCCM_IDL		((ushort)0x0100)
+-#define UART_SCCM_GRA		((ushort)0x0080)
+-#define UART_SCCM_BRKE		((ushort)0x0040)
+-#define UART_SCCM_BRKS		((ushort)0x0020)
+-#define UART_SCCM_CCR		((ushort)0x0008)
+-#define UART_SCCM_BSY		((ushort)0x0004)
+-#define UART_SCCM_TX		((ushort)0x0002)
+-#define UART_SCCM_RX		((ushort)0x0001)
+-
+-/* The SCC PMSR when used as a UART.
+-*/
+-#define SCU_PSMR_FLC		((ushort)0x8000)
+-#define SCU_PSMR_SL		((ushort)0x4000)
+-#define SCU_PSMR_CL		((ushort)0x3000)
+-#define SCU_PSMR_UM		((ushort)0x0c00)
+-#define SCU_PSMR_FRZ		((ushort)0x0200)
+-#define SCU_PSMR_RZS		((ushort)0x0100)
+-#define SCU_PSMR_SYN		((ushort)0x0080)
+-#define SCU_PSMR_DRT		((ushort)0x0040)
+-#define SCU_PSMR_PEN		((ushort)0x0010)
+-#define SCU_PSMR_RPM		((ushort)0x000c)
+-#define SCU_PSMR_REVP		((ushort)0x0008)
+-#define SCU_PSMR_TPM		((ushort)0x0003)
+-#define SCU_PSMR_TEVP		((ushort)0x0002)
+-
+-/* CPM Transparent mode SCC.
+- */
+-typedef struct scc_trans {
+-	sccp_t	st_genscc;
+-	uint	st_cpres;	/* Preset CRC */
+-	uint	st_cmask;	/* Constant mask for CRC */
+-} scc_trans_t;
+-
+-#define BD_SCC_TX_LAST		((ushort)0x0800)
+-
+-/* IIC parameter RAM.
+-*/
+-typedef struct iic {
+-	ushort	iic_rbase;	/* Rx Buffer descriptor base address */
+-	ushort	iic_tbase;	/* Tx Buffer descriptor base address */
+-	u_char	iic_rfcr;	/* Rx function code */
+-	u_char	iic_tfcr;	/* Tx function code */
+-	ushort	iic_mrblr;	/* Max receive buffer length */
+-	uint	iic_rstate;	/* Internal */
+-	uint	iic_rdp;	/* Internal */
+-	ushort	iic_rbptr;	/* Internal */
+-	ushort	iic_rbc;	/* Internal */
+-	uint	iic_rxtmp;	/* Internal */
+-	uint	iic_tstate;	/* Internal */
+-	uint	iic_tdp;	/* Internal */
+-	ushort	iic_tbptr;	/* Internal */
+-	ushort	iic_tbc;	/* Internal */
+-	uint	iic_txtmp;	/* Internal */
+-	char	res1[4];	/* Reserved */
+-	ushort	iic_rpbase;	/* Relocation pointer */
+-	char	res2[2];	/* Reserved */
+-} iic_t;
+-
+-#define BD_IIC_START		((ushort)0x0400)
+-
+-/* SPI parameter RAM.
+-*/
+-typedef struct spi {
+-	ushort	spi_rbase;	/* Rx Buffer descriptor base address */
+-	ushort	spi_tbase;	/* Tx Buffer descriptor base address */
+-	u_char	spi_rfcr;	/* Rx function code */
+-	u_char	spi_tfcr;	/* Tx function code */
+-	ushort	spi_mrblr;	/* Max receive buffer length */
+-	uint	spi_rstate;	/* Internal */
+-	uint	spi_rdp;	/* Internal */
+-	ushort	spi_rbptr;	/* Internal */
+-	ushort	spi_rbc;	/* Internal */
+-	uint	spi_rxtmp;	/* Internal */
+-	uint	spi_tstate;	/* Internal */
+-	uint	spi_tdp;	/* Internal */
+-	ushort	spi_tbptr;	/* Internal */
+-	ushort	spi_tbc;	/* Internal */
+-	uint	spi_txtmp;	/* Internal */
+-	uint	spi_res;
+-	ushort	spi_rpbase;	/* Relocation pointer */
+-	ushort	spi_res2;
+-} spi_t;
+-
+-/* SPI Mode register.
+-*/
+-#define SPMODE_LOOP	((ushort)0x4000)	/* Loopback */
+-#define SPMODE_CI	((ushort)0x2000)	/* Clock Invert */
+-#define SPMODE_CP	((ushort)0x1000)	/* Clock Phase */
+-#define SPMODE_DIV16	((ushort)0x0800)	/* BRG/16 mode */
+-#define SPMODE_REV	((ushort)0x0400)	/* Reversed Data */
+-#define SPMODE_MSTR	((ushort)0x0200)	/* SPI Master */
+-#define SPMODE_EN	((ushort)0x0100)	/* Enable */
+-#define SPMODE_LENMSK	((ushort)0x00f0)	/* character length */
+-#define SPMODE_LEN4	((ushort)0x0030)	/*  4 bits per char */
+-#define SPMODE_LEN8	((ushort)0x0070)	/*  8 bits per char */
+-#define SPMODE_LEN16	((ushort)0x00f0)	/* 16 bits per char */
+-#define SPMODE_PMMSK	((ushort)0x000f)	/* prescale modulus */
+-
+-/* SPIE fields */
+-#define SPIE_MME	0x20
+-#define SPIE_TXE	0x10
+-#define SPIE_BSY	0x04
+-#define SPIE_TXB	0x02
+-#define SPIE_RXB	0x01
+-
+-/*
+- * RISC Controller Configuration Register definitons
+- */
+-#define RCCR_TIME	0x8000			/* RISC Timer Enable */
+-#define RCCR_TIMEP(t)	(((t) & 0x3F)<<8)	/* RISC Timer Period */
+-#define RCCR_TIME_MASK	0x00FF			/* not RISC Timer related bits */
+-
+-/* RISC Timer Parameter RAM offset */
+-#define PROFF_RTMR	((uint)0x01B0)
+-
+-typedef struct risc_timer_pram {
+-	unsigned short	tm_base;	/* RISC Timer Table Base Address */
+-	unsigned short	tm_ptr;		/* RISC Timer Table Pointer (internal) */
+-	unsigned short	r_tmr;		/* RISC Timer Mode Register */
+-	unsigned short	r_tmv;		/* RISC Timer Valid Register */
+-	unsigned long	tm_cmd;		/* RISC Timer Command Register */
+-	unsigned long	tm_cnt;		/* RISC Timer Internal Count */
+-} rt_pram_t;
+-
+-/* Bits in RISC Timer Command Register */
+-#define TM_CMD_VALID	0x80000000	/* Valid - Enables the timer */
+-#define TM_CMD_RESTART	0x40000000	/* Restart - for automatic restart */
+-#define TM_CMD_PWM	0x20000000	/* Run in Pulse Width Modulation Mode */
+-#define TM_CMD_NUM(n)	(((n)&0xF)<<16)	/* Timer Number */
+-#define TM_CMD_PERIOD(p) ((p)&0xFFFF)	/* Timer Period */
+-
+-/* CPM interrupts.  There are nearly 32 interrupts generated by CPM
+- * channels or devices.  All of these are presented to the PPC core
+- * as a single interrupt.  The CPM interrupt handler dispatches its
+- * own handlers, in a similar fashion to the PPC core handler.  We
+- * use the table as defined in the manuals (i.e. no special high
+- * priority and SCC1 == SCCa, etc...).
+- */
+-#define CPMVEC_NR		32
+-#define	CPMVEC_PIO_PC15		((ushort)0x1f)
+-#define	CPMVEC_SCC1		((ushort)0x1e)
+-#define	CPMVEC_SCC2		((ushort)0x1d)
+-#define	CPMVEC_SCC3		((ushort)0x1c)
+-#define	CPMVEC_SCC4		((ushort)0x1b)
+-#define	CPMVEC_PIO_PC14		((ushort)0x1a)
+-#define	CPMVEC_TIMER1		((ushort)0x19)
+-#define	CPMVEC_PIO_PC13		((ushort)0x18)
+-#define	CPMVEC_PIO_PC12		((ushort)0x17)
+-#define	CPMVEC_SDMA_CB_ERR	((ushort)0x16)
+-#define CPMVEC_IDMA1		((ushort)0x15)
+-#define CPMVEC_IDMA2		((ushort)0x14)
+-#define CPMVEC_TIMER2		((ushort)0x12)
+-#define CPMVEC_RISCTIMER	((ushort)0x11)
+-#define CPMVEC_I2C		((ushort)0x10)
+-#define	CPMVEC_PIO_PC11		((ushort)0x0f)
+-#define	CPMVEC_PIO_PC10		((ushort)0x0e)
+-#define CPMVEC_TIMER3		((ushort)0x0c)
+-#define	CPMVEC_PIO_PC9		((ushort)0x0b)
+-#define	CPMVEC_PIO_PC8		((ushort)0x0a)
+-#define	CPMVEC_PIO_PC7		((ushort)0x09)
+-#define CPMVEC_TIMER4		((ushort)0x07)
+-#define	CPMVEC_PIO_PC6		((ushort)0x06)
+-#define	CPMVEC_SPI		((ushort)0x05)
+-#define	CPMVEC_SMC1		((ushort)0x04)
+-#define	CPMVEC_SMC2		((ushort)0x03)
+-#define	CPMVEC_PIO_PC5		((ushort)0x02)
+-#define	CPMVEC_PIO_PC4		((ushort)0x01)
+-#define	CPMVEC_ERROR		((ushort)0x00)
+-
+-/* CPM interrupt configuration vector.
+-*/
+-#define	CICR_SCD_SCC4		((uint)0x00c00000)	/* SCC4 @ SCCd */
+-#define	CICR_SCC_SCC3		((uint)0x00200000)	/* SCC3 @ SCCc */
+-#define	CICR_SCB_SCC2		((uint)0x00040000)	/* SCC2 @ SCCb */
+-#define	CICR_SCA_SCC1		((uint)0x00000000)	/* SCC1 @ SCCa */
+-#define CICR_IRL_MASK		((uint)0x0000e000)	/* Core interrrupt */
+-#define CICR_HP_MASK		((uint)0x00001f00)	/* Hi-pri int. */
+-#define CICR_IEN		((uint)0x00000080)	/* Int. enable */
+-#define CICR_SPS		((uint)0x00000001)	/* SCC Spread */
+-
+-#define IMAP_ADDR		(get_immrbase())
+-
+-#define CPM_PIN_INPUT     0
+-#define CPM_PIN_OUTPUT    1
+-#define CPM_PIN_PRIMARY   0
+-#define CPM_PIN_SECONDARY 2
+-#define CPM_PIN_GPIO      4
+-#define CPM_PIN_OPENDRAIN 8
+-
+-enum cpm_port {
+-	CPM_PORTA,
+-	CPM_PORTB,
+-	CPM_PORTC,
+-	CPM_PORTD,
+-	CPM_PORTE,
+-};
+-
+-void cpm1_set_pin(enum cpm_port port, int pin, int flags);
+-
+-enum cpm_clk_dir {
+-	CPM_CLK_RX,
+-	CPM_CLK_TX,
+-	CPM_CLK_RTX
+-};
+-
+-enum cpm_clk_target {
+-	CPM_CLK_SCC1,
+-	CPM_CLK_SCC2,
+-	CPM_CLK_SCC3,
+-	CPM_CLK_SCC4,
+-	CPM_CLK_SMC1,
+-	CPM_CLK_SMC2,
+-};
+-
+-enum cpm_clk {
+-	CPM_BRG1,	/* Baud Rate Generator  1 */
+-	CPM_BRG2,	/* Baud Rate Generator  2 */
+-	CPM_BRG3,	/* Baud Rate Generator  3 */
+-	CPM_BRG4,	/* Baud Rate Generator  4 */
+-	CPM_CLK1,	/* Clock  1 */
+-	CPM_CLK2,	/* Clock  2 */
+-	CPM_CLK3,	/* Clock  3 */
+-	CPM_CLK4,	/* Clock  4 */
+-	CPM_CLK5,	/* Clock  5 */
+-	CPM_CLK6,	/* Clock  6 */
+-	CPM_CLK7,	/* Clock  7 */
+-	CPM_CLK8,	/* Clock  8 */
+-};
+-
+-int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode);
+-
+-#endif /* __CPM_8XX__ */
+diff --git a/include/asm-powerpc/cpm.h b/include/asm-powerpc/cpm.h
+index 48df9f3..77e39da 100644
+--- a/include/asm-powerpc/cpm.h
++++ b/include/asm-powerpc/cpm.h
+@@ -4,11 +4,85 @@
+ #include <linux/compiler.h>
+ #include <linux/types.h>
+ 
++/* Buffer descriptors used by many of the CPM protocols. */
++typedef struct cpm_buf_desc {
++	ushort	cbd_sc;		/* Status and Control */
++	ushort	cbd_datlen;	/* Data length in buffer */
++	uint	cbd_bufaddr;	/* Buffer address in host memory */
++} cbd_t;
++
++/* Buffer descriptor control/status used by serial
++ */
++
++#define BD_SC_EMPTY	(0x8000)	/* Receive is empty */
++#define BD_SC_READY	(0x8000)	/* Transmit is ready */
++#define BD_SC_WRAP	(0x2000)	/* Last buffer descriptor */
++#define BD_SC_INTRPT	(0x1000)	/* Interrupt on change */
++#define BD_SC_LAST	(0x0800)	/* Last buffer in frame */
++#define BD_SC_TC	(0x0400)	/* Transmit CRC */
++#define BD_SC_CM	(0x0200)	/* Continous mode */
++#define BD_SC_ID	(0x0100)	/* Rec'd too many idles */
++#define BD_SC_P		(0x0100)	/* xmt preamble */
++#define BD_SC_BR	(0x0020)	/* Break received */
++#define BD_SC_FR	(0x0010)	/* Framing error */
++#define BD_SC_PR	(0x0008)	/* Parity error */
++#define BD_SC_NAK	(0x0004)	/* NAK - did not respond */
++#define BD_SC_OV	(0x0002)	/* Overrun */
++#define BD_SC_UN	(0x0002)	/* Underrun */
++#define BD_SC_CD	(0x0001)	/* */
++#define BD_SC_CL	(0x0001)	/* Collision */
++
++/* Buffer descriptor control/status used by Ethernet receive.
++ * Common to SCC and FCC.
++ */
++#define BD_ENET_RX_EMPTY	(0x8000)
++#define BD_ENET_RX_WRAP		(0x2000)
++#define BD_ENET_RX_INTR		(0x1000)
++#define BD_ENET_RX_LAST		(0x0800)
++#define BD_ENET_RX_FIRST	(0x0400)
++#define BD_ENET_RX_MISS		(0x0100)
++#define BD_ENET_RX_BC		(0x0080)	/* FCC Only */
++#define BD_ENET_RX_MC		(0x0040)	/* FCC Only */
++#define BD_ENET_RX_LG		(0x0020)
++#define BD_ENET_RX_NO		(0x0010)
++#define BD_ENET_RX_SH		(0x0008)
++#define BD_ENET_RX_CR		(0x0004)
++#define BD_ENET_RX_OV		(0x0002)
++#define BD_ENET_RX_CL		(0x0001)
++#define BD_ENET_RX_STATS	(0x01ff)	/* All status bits */
++
++/* Buffer descriptor control/status used by Ethernet transmit.
++ * Common to SCC and FCC.
++ */
++#define BD_ENET_TX_READY	(0x8000)
++#define BD_ENET_TX_PAD		(0x4000)
++#define BD_ENET_TX_WRAP		(0x2000)
++#define BD_ENET_TX_INTR		(0x1000)
++#define BD_ENET_TX_LAST		(0x0800)
++#define BD_ENET_TX_TC		(0x0400)
++#define BD_ENET_TX_DEF		(0x0200)
++#define BD_ENET_TX_HB		(0x0100)
++#define BD_ENET_TX_LC		(0x0080)
++#define BD_ENET_TX_RL		(0x0040)
++#define BD_ENET_TX_RCMASK	(0x003c)
++#define BD_ENET_TX_UN		(0x0002)
++#define BD_ENET_TX_CSL		(0x0001)
++#define BD_ENET_TX_STATS	(0x03ff)	/* All status bits */
++
++/* Buffer descriptor control/status used by Transparent mode SCC.
++ */
++#define BD_SCC_TX_LAST		(0x0800)
++
++/* Buffer descriptor control/status used by I2C.
++ */
++#define BD_I2C_START		(0x0400)
++
+ int cpm_muram_init(void);
+ unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
+ int cpm_muram_free(unsigned long offset);
+ unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+ void __iomem *cpm_muram_addr(unsigned long offset);
+ dma_addr_t cpm_muram_dma(void __iomem *addr);
++int cpm_command(u32 command, u8 opcode);
+ 
+ #endif
+diff --git a/include/asm-powerpc/cpm1.h b/include/asm-powerpc/cpm1.h
+new file mode 100644
+index 0000000..b2ebd6a
+--- /dev/null
++++ b/include/asm-powerpc/cpm1.h
+@@ -0,0 +1,685 @@
++/*
++ * MPC8xx Communication Processor Module.
++ * Copyright (c) 1997 Dan Malek (dmalek at jlc.net)
++ *
++ * This file contains structures and information for the communication
++ * processor channels.  Some CPM control and status is available
++ * throught the MPC8xx internal memory map.  See immap.h for details.
++ * This file only contains what I need for the moment, not the total
++ * CPM capabilities.  I (or someone else) will add definitions as they
++ * are needed.  -- Dan
++ *
++ * On the MBX board, EPPC-Bug loads CPM microcode into the first 512
++ * bytes of the DP RAM and relocates the I2C parameter area to the
++ * IDMA1 space.  The remaining DP RAM is available for buffer descriptors
++ * or other use.
++ */
++#ifndef __CPM1__
++#define __CPM1__
++
++#include <asm/8xx_immap.h>
++#include <asm/ptrace.h>
++#include <asm/cpm.h>
++
++/* CPM Command register.
++*/
++#define CPM_CR_RST	((ushort)0x8000)
++#define CPM_CR_OPCODE	((ushort)0x0f00)
++#define CPM_CR_CHAN	((ushort)0x00f0)
++#define CPM_CR_FLG	((ushort)0x0001)
++
++/* Some commands (there are more...later)
++*/
++#define CPM_CR_INIT_TRX		((ushort)0x0000)
++#define CPM_CR_INIT_RX		((ushort)0x0001)
++#define CPM_CR_INIT_TX		((ushort)0x0002)
++#define CPM_CR_HUNT_MODE	((ushort)0x0003)
++#define CPM_CR_STOP_TX		((ushort)0x0004)
++#define CPM_CR_GRA_STOP_TX	((ushort)0x0005)
++#define CPM_CR_RESTART_TX	((ushort)0x0006)
++#define CPM_CR_CLOSE_RX_BD	((ushort)0x0007)
++#define CPM_CR_SET_GADDR	((ushort)0x0008)
++#define CPM_CR_SET_TIMER	CPM_CR_SET_GADDR
++
++/* Channel numbers.
++*/
++#define CPM_CR_CH_SCC1		((ushort)0x0000)
++#define CPM_CR_CH_I2C		((ushort)0x0001)	/* I2C and IDMA1 */
++#define CPM_CR_CH_SCC2		((ushort)0x0004)
++#define CPM_CR_CH_SPI		((ushort)0x0005)	/* SPI / IDMA2 / Timers */
++#define CPM_CR_CH_TIMER		CPM_CR_CH_SPI
++#define CPM_CR_CH_SCC3		((ushort)0x0008)
++#define CPM_CR_CH_SMC1		((ushort)0x0009)	/* SMC1 / DSP1 */
++#define CPM_CR_CH_SCC4		((ushort)0x000c)
++#define CPM_CR_CH_SMC2		((ushort)0x000d)	/* SMC2 / DSP2 */
++
++#define mk_cr_cmd(CH, CMD)	((CMD << 8) | (CH << 4))
++
++#ifndef CONFIG_PPC_CPM_NEW_BINDING
++/* The dual ported RAM is multi-functional.  Some areas can be (and are
++ * being) used for microcode.  There is an area that can only be used
++ * as data ram for buffer descriptors, which is all we use right now.
++ * Currently the first 512 and last 256 bytes are used for microcode.
++ */
++#define CPM_DATAONLY_BASE	((uint)0x0800)
++#define CPM_DATAONLY_SIZE	((uint)0x0700)
++#define CPM_DP_NOSPACE		((uint)0x7fffffff)
++#endif
++
++/* Export the base address of the communication processor registers
++ * and dual port ram.
++ */
++extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
++
++#ifdef CONFIG_PPC_CPM_NEW_BINDING
++#define cpm_dpalloc cpm_muram_alloc
++#define cpm_dpfree cpm_muram_free
++#define cpm_dpram_addr cpm_muram_addr
++#define cpm_dpram_phys cpm_muram_dma
++#else
++extern unsigned long cpm_dpalloc(uint size, uint align);
++extern int cpm_dpfree(unsigned long offset);
++extern unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align);
++extern void cpm_dpdump(void);
++extern void *cpm_dpram_addr(unsigned long offset);
++extern uint cpm_dpram_phys(u8 *addr);
++#endif
++
++extern void cpm_setbrg(uint brg, uint rate);
++
++extern void cpm_load_patch(cpm8xx_t *cp);
++
++extern void cpm_reset(void);
++
++/* Parameter RAM offsets.
++*/
++#define PROFF_SCC1	((uint)0x0000)
++#define PROFF_IIC	((uint)0x0080)
++#define PROFF_SCC2	((uint)0x0100)
++#define PROFF_SPI	((uint)0x0180)
++#define PROFF_SCC3	((uint)0x0200)
++#define PROFF_SMC1	((uint)0x0280)
++#define PROFF_SCC4	((uint)0x0300)
++#define PROFF_SMC2	((uint)0x0380)
++
++/* Define enough so I can at least use the serial port as a UART.
++ * The MBX uses SMC1 as the host serial port.
++ */
++typedef struct smc_uart {
++	ushort	smc_rbase;	/* Rx Buffer descriptor base address */
++	ushort	smc_tbase;	/* Tx Buffer descriptor base address */
++	u_char	smc_rfcr;	/* Rx function code */
++	u_char	smc_tfcr;	/* Tx function code */
++	ushort	smc_mrblr;	/* Max receive buffer length */
++	uint	smc_rstate;	/* Internal */
++	uint	smc_idp;	/* Internal */
++	ushort	smc_rbptr;	/* Internal */
++	ushort	smc_ibc;	/* Internal */
++	uint	smc_rxtmp;	/* Internal */
++	uint	smc_tstate;	/* Internal */
++	uint	smc_tdp;	/* Internal */
++	ushort	smc_tbptr;	/* Internal */
++	ushort	smc_tbc;	/* Internal */
++	uint	smc_txtmp;	/* Internal */
++	ushort	smc_maxidl;	/* Maximum idle characters */
++	ushort	smc_tmpidl;	/* Temporary idle counter */
++	ushort	smc_brklen;	/* Last received break length */
++	ushort	smc_brkec;	/* rcv'd break condition counter */
++	ushort	smc_brkcr;	/* xmt break count register */
++	ushort	smc_rmask;	/* Temporary bit mask */
++	char	res1[8];	/* Reserved */
++	ushort	smc_rpbase;	/* Relocation pointer */
++} smc_uart_t;
++
++/* Function code bits.
++*/
++#define SMC_EB	((u_char)0x10)	/* Set big endian byte order */
++
++/* SMC uart mode register.
++*/
++#define	SMCMR_REN	((ushort)0x0001)
++#define SMCMR_TEN	((ushort)0x0002)
++#define SMCMR_DM	((ushort)0x000c)
++#define SMCMR_SM_GCI	((ushort)0x0000)
++#define SMCMR_SM_UART	((ushort)0x0020)
++#define SMCMR_SM_TRANS	((ushort)0x0030)
++#define SMCMR_SM_MASK	((ushort)0x0030)
++#define SMCMR_PM_EVEN	((ushort)0x0100)	/* Even parity, else odd */
++#define SMCMR_REVD	SMCMR_PM_EVEN
++#define SMCMR_PEN	((ushort)0x0200)	/* Parity enable */
++#define SMCMR_BS	SMCMR_PEN
++#define SMCMR_SL	((ushort)0x0400)	/* Two stops, else one */
++#define SMCR_CLEN_MASK	((ushort)0x7800)	/* Character length */
++#define smcr_mk_clen(C)	(((C) << 11) & SMCR_CLEN_MASK)
++
++/* SMC2 as Centronics parallel printer.  It is half duplex, in that
++ * it can only receive or transmit.  The parameter ram values for
++ * each direction are either unique or properly overlap, so we can
++ * include them in one structure.
++ */
++typedef struct smc_centronics {
++	ushort	scent_rbase;
++	ushort	scent_tbase;
++	u_char	scent_cfcr;
++	u_char	scent_smask;
++	ushort	scent_mrblr;
++	uint	scent_rstate;
++	uint	scent_r_ptr;
++	ushort	scent_rbptr;
++	ushort	scent_r_cnt;
++	uint	scent_rtemp;
++	uint	scent_tstate;
++	uint	scent_t_ptr;
++	ushort	scent_tbptr;
++	ushort	scent_t_cnt;
++	uint	scent_ttemp;
++	ushort	scent_max_sl;
++	ushort	scent_sl_cnt;
++	ushort	scent_character1;
++	ushort	scent_character2;
++	ushort	scent_character3;
++	ushort	scent_character4;
++	ushort	scent_character5;
++	ushort	scent_character6;
++	ushort	scent_character7;
++	ushort	scent_character8;
++	ushort	scent_rccm;
++	ushort	scent_rccr;
++} smc_cent_t;
++
++/* Centronics Status Mask Register.
++*/
++#define SMC_CENT_F	((u_char)0x08)
++#define SMC_CENT_PE	((u_char)0x04)
++#define SMC_CENT_S	((u_char)0x02)
++
++/* SMC Event and Mask register.
++*/
++#define	SMCM_BRKE	((unsigned char)0x40)	/* When in UART Mode */
++#define	SMCM_BRK	((unsigned char)0x10)	/* When in UART Mode */
++#define	SMCM_TXE	((unsigned char)0x10)	/* When in Transparent Mode */
++#define	SMCM_BSY	((unsigned char)0x04)
++#define	SMCM_TX		((unsigned char)0x02)
++#define	SMCM_RX		((unsigned char)0x01)
++
++/* Baud rate generators.
++*/
++#define CPM_BRG_RST		((uint)0x00020000)
++#define CPM_BRG_EN		((uint)0x00010000)
++#define CPM_BRG_EXTC_INT	((uint)0x00000000)
++#define CPM_BRG_EXTC_CLK2	((uint)0x00004000)
++#define CPM_BRG_EXTC_CLK6	((uint)0x00008000)
++#define CPM_BRG_ATB		((uint)0x00002000)
++#define CPM_BRG_CD_MASK		((uint)0x00001ffe)
++#define CPM_BRG_DIV16		((uint)0x00000001)
++
++/* SI Clock Route Register
++*/
++#define SICR_RCLK_SCC1_BRG1	((uint)0x00000000)
++#define SICR_TCLK_SCC1_BRG1	((uint)0x00000000)
++#define SICR_RCLK_SCC2_BRG2	((uint)0x00000800)
++#define SICR_TCLK_SCC2_BRG2	((uint)0x00000100)
++#define SICR_RCLK_SCC3_BRG3	((uint)0x00100000)
++#define SICR_TCLK_SCC3_BRG3	((uint)0x00020000)
++#define SICR_RCLK_SCC4_BRG4	((uint)0x18000000)
++#define SICR_TCLK_SCC4_BRG4	((uint)0x03000000)
++
++/* SCCs.
++*/
++#define SCC_GSMRH_IRP		((uint)0x00040000)
++#define SCC_GSMRH_GDE		((uint)0x00010000)
++#define SCC_GSMRH_TCRC_CCITT	((uint)0x00008000)
++#define SCC_GSMRH_TCRC_BISYNC	((uint)0x00004000)
++#define SCC_GSMRH_TCRC_HDLC	((uint)0x00000000)
++#define SCC_GSMRH_REVD		((uint)0x00002000)
++#define SCC_GSMRH_TRX		((uint)0x00001000)
++#define SCC_GSMRH_TTX		((uint)0x00000800)
++#define SCC_GSMRH_CDP		((uint)0x00000400)
++#define SCC_GSMRH_CTSP		((uint)0x00000200)
++#define SCC_GSMRH_CDS		((uint)0x00000100)
++#define SCC_GSMRH_CTSS		((uint)0x00000080)
++#define SCC_GSMRH_TFL		((uint)0x00000040)
++#define SCC_GSMRH_RFW		((uint)0x00000020)
++#define SCC_GSMRH_TXSY		((uint)0x00000010)
++#define SCC_GSMRH_SYNL16	((uint)0x0000000c)
++#define SCC_GSMRH_SYNL8		((uint)0x00000008)
++#define SCC_GSMRH_SYNL4		((uint)0x00000004)
++#define SCC_GSMRH_RTSM		((uint)0x00000002)
++#define SCC_GSMRH_RSYN		((uint)0x00000001)
++
++#define SCC_GSMRL_SIR		((uint)0x80000000)	/* SCC2 only */
++#define SCC_GSMRL_EDGE_NONE	((uint)0x60000000)
++#define SCC_GSMRL_EDGE_NEG	((uint)0x40000000)
++#define SCC_GSMRL_EDGE_POS	((uint)0x20000000)
++#define SCC_GSMRL_EDGE_BOTH	((uint)0x00000000)
++#define SCC_GSMRL_TCI		((uint)0x10000000)
++#define SCC_GSMRL_TSNC_3	((uint)0x0c000000)
++#define SCC_GSMRL_TSNC_4	((uint)0x08000000)
++#define SCC_GSMRL_TSNC_14	((uint)0x04000000)
++#define SCC_GSMRL_TSNC_INF	((uint)0x00000000)
++#define SCC_GSMRL_RINV		((uint)0x02000000)
++#define SCC_GSMRL_TINV		((uint)0x01000000)
++#define SCC_GSMRL_TPL_128	((uint)0x00c00000)
++#define SCC_GSMRL_TPL_64	((uint)0x00a00000)
++#define SCC_GSMRL_TPL_48	((uint)0x00800000)
++#define SCC_GSMRL_TPL_32	((uint)0x00600000)
++#define SCC_GSMRL_TPL_16	((uint)0x00400000)
++#define SCC_GSMRL_TPL_8		((uint)0x00200000)
++#define SCC_GSMRL_TPL_NONE	((uint)0x00000000)
++#define SCC_GSMRL_TPP_ALL1	((uint)0x00180000)
++#define SCC_GSMRL_TPP_01	((uint)0x00100000)
++#define SCC_GSMRL_TPP_10	((uint)0x00080000)
++#define SCC_GSMRL_TPP_ZEROS	((uint)0x00000000)
++#define SCC_GSMRL_TEND		((uint)0x00040000)
++#define SCC_GSMRL_TDCR_32	((uint)0x00030000)
++#define SCC_GSMRL_TDCR_16	((uint)0x00020000)
++#define SCC_GSMRL_TDCR_8	((uint)0x00010000)
++#define SCC_GSMRL_TDCR_1	((uint)0x00000000)
++#define SCC_GSMRL_RDCR_32	((uint)0x0000c000)
++#define SCC_GSMRL_RDCR_16	((uint)0x00008000)
++#define SCC_GSMRL_RDCR_8	((uint)0x00004000)
++#define SCC_GSMRL_RDCR_1	((uint)0x00000000)
++#define SCC_GSMRL_RENC_DFMAN	((uint)0x00003000)
++#define SCC_GSMRL_RENC_MANCH	((uint)0x00002000)
++#define SCC_GSMRL_RENC_FM0	((uint)0x00001000)
++#define SCC_GSMRL_RENC_NRZI	((uint)0x00000800)
++#define SCC_GSMRL_RENC_NRZ	((uint)0x00000000)
++#define SCC_GSMRL_TENC_DFMAN	((uint)0x00000600)
++#define SCC_GSMRL_TENC_MANCH	((uint)0x00000400)
++#define SCC_GSMRL_TENC_FM0	((uint)0x00000200)
++#define SCC_GSMRL_TENC_NRZI	((uint)0x00000100)
++#define SCC_GSMRL_TENC_NRZ	((uint)0x00000000)
++#define SCC_GSMRL_DIAG_LE	((uint)0x000000c0)	/* Loop and echo */
++#define SCC_GSMRL_DIAG_ECHO	((uint)0x00000080)
++#define SCC_GSMRL_DIAG_LOOP	((uint)0x00000040)
++#define SCC_GSMRL_DIAG_NORM	((uint)0x00000000)
++#define SCC_GSMRL_ENR		((uint)0x00000020)
++#define SCC_GSMRL_ENT		((uint)0x00000010)
++#define SCC_GSMRL_MODE_ENET	((uint)0x0000000c)
++#define SCC_GSMRL_MODE_QMC	((uint)0x0000000a)
++#define SCC_GSMRL_MODE_DDCMP	((uint)0x00000009)
++#define SCC_GSMRL_MODE_BISYNC	((uint)0x00000008)
++#define SCC_GSMRL_MODE_V14	((uint)0x00000007)
++#define SCC_GSMRL_MODE_AHDLC	((uint)0x00000006)
++#define SCC_GSMRL_MODE_PROFIBUS	((uint)0x00000005)
++#define SCC_GSMRL_MODE_UART	((uint)0x00000004)
++#define SCC_GSMRL_MODE_SS7	((uint)0x00000003)
++#define SCC_GSMRL_MODE_ATALK	((uint)0x00000002)
++#define SCC_GSMRL_MODE_HDLC	((uint)0x00000000)
++
++#define SCC_TODR_TOD		((ushort)0x8000)
++
++/* SCC Event and Mask register.
++*/
++#define	SCCM_TXE	((unsigned char)0x10)
++#define	SCCM_BSY	((unsigned char)0x04)
++#define	SCCM_TX		((unsigned char)0x02)
++#define	SCCM_RX		((unsigned char)0x01)
++
++typedef struct scc_param {
++	ushort	scc_rbase;	/* Rx Buffer descriptor base address */
++	ushort	scc_tbase;	/* Tx Buffer descriptor base address */
++	u_char	scc_rfcr;	/* Rx function code */
++	u_char	scc_tfcr;	/* Tx function code */
++	ushort	scc_mrblr;	/* Max receive buffer length */
++	uint	scc_rstate;	/* Internal */
++	uint	scc_idp;	/* Internal */
++	ushort	scc_rbptr;	/* Internal */
++	ushort	scc_ibc;	/* Internal */
++	uint	scc_rxtmp;	/* Internal */
++	uint	scc_tstate;	/* Internal */
++	uint	scc_tdp;	/* Internal */
++	ushort	scc_tbptr;	/* Internal */
++	ushort	scc_tbc;	/* Internal */
++	uint	scc_txtmp;	/* Internal */
++	uint	scc_rcrc;	/* Internal */
++	uint	scc_tcrc;	/* Internal */
++} sccp_t;
++
++/* Function code bits.
++*/
++#define SCC_EB	((u_char)0x10)	/* Set big endian byte order */
++
++/* CPM Ethernet through SCCx.
++ */
++typedef struct scc_enet {
++	sccp_t	sen_genscc;
++	uint	sen_cpres;	/* Preset CRC */
++	uint	sen_cmask;	/* Constant mask for CRC */
++	uint	sen_crcec;	/* CRC Error counter */
++	uint	sen_alec;	/* alignment error counter */
++	uint	sen_disfc;	/* discard frame counter */
++	ushort	sen_pads;	/* Tx short frame pad character */
++	ushort	sen_retlim;	/* Retry limit threshold */
++	ushort	sen_retcnt;	/* Retry limit counter */
++	ushort	sen_maxflr;	/* maximum frame length register */
++	ushort	sen_minflr;	/* minimum frame length register */
++	ushort	sen_maxd1;	/* maximum DMA1 length */
++	ushort	sen_maxd2;	/* maximum DMA2 length */
++	ushort	sen_maxd;	/* Rx max DMA */
++	ushort	sen_dmacnt;	/* Rx DMA counter */
++	ushort	sen_maxb;	/* Max BD byte count */
++	ushort	sen_gaddr1;	/* Group address filter */
++	ushort	sen_gaddr2;
++	ushort	sen_gaddr3;
++	ushort	sen_gaddr4;
++	uint	sen_tbuf0data0;	/* Save area 0 - current frame */
++	uint	sen_tbuf0data1;	/* Save area 1 - current frame */
++	uint	sen_tbuf0rba;	/* Internal */
++	uint	sen_tbuf0crc;	/* Internal */
++	ushort	sen_tbuf0bcnt;	/* Internal */
++	ushort	sen_paddrh;	/* physical address (MSB) */
++	ushort	sen_paddrm;
++	ushort	sen_paddrl;	/* physical address (LSB) */
++	ushort	sen_pper;	/* persistence */
++	ushort	sen_rfbdptr;	/* Rx first BD pointer */
++	ushort	sen_tfbdptr;	/* Tx first BD pointer */
++	ushort	sen_tlbdptr;	/* Tx last BD pointer */
++	uint	sen_tbuf1data0;	/* Save area 0 - current frame */
++	uint	sen_tbuf1data1;	/* Save area 1 - current frame */
++	uint	sen_tbuf1rba;	/* Internal */
++	uint	sen_tbuf1crc;	/* Internal */
++	ushort	sen_tbuf1bcnt;	/* Internal */
++	ushort	sen_txlen;	/* Tx Frame length counter */
++	ushort	sen_iaddr1;	/* Individual address filter */
++	ushort	sen_iaddr2;
++	ushort	sen_iaddr3;
++	ushort	sen_iaddr4;
++	ushort	sen_boffcnt;	/* Backoff counter */
++
++	/* NOTE: Some versions of the manual have the following items
++	 * incorrectly documented.  Below is the proper order.
++	 */
++	ushort	sen_taddrh;	/* temp address (MSB) */
++	ushort	sen_taddrm;
++	ushort	sen_taddrl;	/* temp address (LSB) */
++} scc_enet_t;
++
++/* SCC Event register as used by Ethernet.
++*/
++#define SCCE_ENET_GRA	((ushort)0x0080)	/* Graceful stop complete */
++#define SCCE_ENET_TXE	((ushort)0x0010)	/* Transmit Error */
++#define SCCE_ENET_RXF	((ushort)0x0008)	/* Full frame received */
++#define SCCE_ENET_BSY	((ushort)0x0004)	/* All incoming buffers full */
++#define SCCE_ENET_TXB	((ushort)0x0002)	/* A buffer was transmitted */
++#define SCCE_ENET_RXB	((ushort)0x0001)	/* A buffer was received */
++
++/* SCC Mode Register (PMSR) as used by Ethernet.
++*/
++#define SCC_PSMR_HBC	((ushort)0x8000)	/* Enable heartbeat */
++#define SCC_PSMR_FC	((ushort)0x4000)	/* Force collision */
++#define SCC_PSMR_RSH	((ushort)0x2000)	/* Receive short frames */
++#define SCC_PSMR_IAM	((ushort)0x1000)	/* Check individual hash */
++#define SCC_PSMR_ENCRC	((ushort)0x0800)	/* Ethernet CRC mode */
++#define SCC_PSMR_PRO	((ushort)0x0200)	/* Promiscuous mode */
++#define SCC_PSMR_BRO	((ushort)0x0100)	/* Catch broadcast pkts */
++#define SCC_PSMR_SBT	((ushort)0x0080)	/* Special backoff timer */
++#define SCC_PSMR_LPB	((ushort)0x0040)	/* Set Loopback mode */
++#define SCC_PSMR_SIP	((ushort)0x0020)	/* Sample Input Pins */
++#define SCC_PSMR_LCW	((ushort)0x0010)	/* Late collision window */
++#define SCC_PSMR_NIB22	((ushort)0x000a)	/* Start frame search */
++#define SCC_PSMR_FDE	((ushort)0x0001)	/* Full duplex enable */
++
++/* SCC as UART
++*/
++typedef struct scc_uart {
++	sccp_t	scc_genscc;
++	char	res1[8];	/* Reserved */
++	ushort	scc_maxidl;	/* Maximum idle chars */
++	ushort	scc_idlc;	/* temp idle counter */
++	ushort	scc_brkcr;	/* Break count register */
++	ushort	scc_parec;	/* receive parity error counter */
++	ushort	scc_frmec;	/* receive framing error counter */
++	ushort	scc_nosec;	/* receive noise counter */
++	ushort	scc_brkec;	/* receive break condition counter */
++	ushort	scc_brkln;	/* last received break length */
++	ushort	scc_uaddr1;	/* UART address character 1 */
++	ushort	scc_uaddr2;	/* UART address character 2 */
++	ushort	scc_rtemp;	/* Temp storage */
++	ushort	scc_toseq;	/* Transmit out of sequence char */
++	ushort	scc_char1;	/* control character 1 */
++	ushort	scc_char2;	/* control character 2 */
++	ushort	scc_char3;	/* control character 3 */
++	ushort	scc_char4;	/* control character 4 */
++	ushort	scc_char5;	/* control character 5 */
++	ushort	scc_char6;	/* control character 6 */
++	ushort	scc_char7;	/* control character 7 */
++	ushort	scc_char8;	/* control character 8 */
++	ushort	scc_rccm;	/* receive control character mask */
++	ushort	scc_rccr;	/* receive control character register */
++	ushort	scc_rlbc;	/* receive last break character */
++} scc_uart_t;
++
++/* SCC Event and Mask registers when it is used as a UART.
++*/
++#define UART_SCCM_GLR		((ushort)0x1000)
++#define UART_SCCM_GLT		((ushort)0x0800)
++#define UART_SCCM_AB		((ushort)0x0200)
++#define UART_SCCM_IDL		((ushort)0x0100)
++#define UART_SCCM_GRA		((ushort)0x0080)
++#define UART_SCCM_BRKE		((ushort)0x0040)
++#define UART_SCCM_BRKS		((ushort)0x0020)
++#define UART_SCCM_CCR		((ushort)0x0008)
++#define UART_SCCM_BSY		((ushort)0x0004)
++#define UART_SCCM_TX		((ushort)0x0002)
++#define UART_SCCM_RX		((ushort)0x0001)
++
++/* The SCC PMSR when used as a UART.
++*/
++#define SCU_PSMR_FLC		((ushort)0x8000)
++#define SCU_PSMR_SL		((ushort)0x4000)
++#define SCU_PSMR_CL		((ushort)0x3000)
++#define SCU_PSMR_UM		((ushort)0x0c00)
++#define SCU_PSMR_FRZ		((ushort)0x0200)
++#define SCU_PSMR_RZS		((ushort)0x0100)
++#define SCU_PSMR_SYN		((ushort)0x0080)
++#define SCU_PSMR_DRT		((ushort)0x0040)
++#define SCU_PSMR_PEN		((ushort)0x0010)
++#define SCU_PSMR_RPM		((ushort)0x000c)
++#define SCU_PSMR_REVP		((ushort)0x0008)
++#define SCU_PSMR_TPM		((ushort)0x0003)
++#define SCU_PSMR_TEVP		((ushort)0x0002)
++
++/* CPM Transparent mode SCC.
++ */
++typedef struct scc_trans {
++	sccp_t	st_genscc;
++	uint	st_cpres;	/* Preset CRC */
++	uint	st_cmask;	/* Constant mask for CRC */
++} scc_trans_t;
++
++/* IIC parameter RAM.
++*/
++typedef struct iic {
++	ushort	iic_rbase;	/* Rx Buffer descriptor base address */
++	ushort	iic_tbase;	/* Tx Buffer descriptor base address */
++	u_char	iic_rfcr;	/* Rx function code */
++	u_char	iic_tfcr;	/* Tx function code */
++	ushort	iic_mrblr;	/* Max receive buffer length */
++	uint	iic_rstate;	/* Internal */
++	uint	iic_rdp;	/* Internal */
++	ushort	iic_rbptr;	/* Internal */
++	ushort	iic_rbc;	/* Internal */
++	uint	iic_rxtmp;	/* Internal */
++	uint	iic_tstate;	/* Internal */
++	uint	iic_tdp;	/* Internal */
++	ushort	iic_tbptr;	/* Internal */
++	ushort	iic_tbc;	/* Internal */
++	uint	iic_txtmp;	/* Internal */
++	char	res1[4];	/* Reserved */
++	ushort	iic_rpbase;	/* Relocation pointer */
++	char	res2[2];	/* Reserved */
++} iic_t;
++
++/* SPI parameter RAM.
++*/
++typedef struct spi {
++	ushort	spi_rbase;	/* Rx Buffer descriptor base address */
++	ushort	spi_tbase;	/* Tx Buffer descriptor base address */
++	u_char	spi_rfcr;	/* Rx function code */
++	u_char	spi_tfcr;	/* Tx function code */
++	ushort	spi_mrblr;	/* Max receive buffer length */
++	uint	spi_rstate;	/* Internal */
++	uint	spi_rdp;	/* Internal */
++	ushort	spi_rbptr;	/* Internal */
++	ushort	spi_rbc;	/* Internal */
++	uint	spi_rxtmp;	/* Internal */
++	uint	spi_tstate;	/* Internal */
++	uint	spi_tdp;	/* Internal */
++	ushort	spi_tbptr;	/* Internal */
++	ushort	spi_tbc;	/* Internal */
++	uint	spi_txtmp;	/* Internal */
++	uint	spi_res;
++	ushort	spi_rpbase;	/* Relocation pointer */
++	ushort	spi_res2;
++} spi_t;
++
++/* SPI Mode register.
++*/
++#define SPMODE_LOOP	((ushort)0x4000)	/* Loopback */
++#define SPMODE_CI	((ushort)0x2000)	/* Clock Invert */
++#define SPMODE_CP	((ushort)0x1000)	/* Clock Phase */
++#define SPMODE_DIV16	((ushort)0x0800)	/* BRG/16 mode */
++#define SPMODE_REV	((ushort)0x0400)	/* Reversed Data */
++#define SPMODE_MSTR	((ushort)0x0200)	/* SPI Master */
++#define SPMODE_EN	((ushort)0x0100)	/* Enable */
++#define SPMODE_LENMSK	((ushort)0x00f0)	/* character length */
++#define SPMODE_LEN4	((ushort)0x0030)	/*  4 bits per char */
++#define SPMODE_LEN8	((ushort)0x0070)	/*  8 bits per char */
++#define SPMODE_LEN16	((ushort)0x00f0)	/* 16 bits per char */
++#define SPMODE_PMMSK	((ushort)0x000f)	/* prescale modulus */
++
++/* SPIE fields */
++#define SPIE_MME	0x20
++#define SPIE_TXE	0x10
++#define SPIE_BSY	0x04
++#define SPIE_TXB	0x02
++#define SPIE_RXB	0x01
++
++/*
++ * RISC Controller Configuration Register definitons
++ */
++#define RCCR_TIME	0x8000			/* RISC Timer Enable */
++#define RCCR_TIMEP(t)	(((t) & 0x3F)<<8)	/* RISC Timer Period */
++#define RCCR_TIME_MASK	0x00FF			/* not RISC Timer related bits */
++
++/* RISC Timer Parameter RAM offset */
++#define PROFF_RTMR	((uint)0x01B0)
++
++typedef struct risc_timer_pram {
++	unsigned short	tm_base;	/* RISC Timer Table Base Address */
++	unsigned short	tm_ptr;		/* RISC Timer Table Pointer (internal) */
++	unsigned short	r_tmr;		/* RISC Timer Mode Register */
++	unsigned short	r_tmv;		/* RISC Timer Valid Register */
++	unsigned long	tm_cmd;		/* RISC Timer Command Register */
++	unsigned long	tm_cnt;		/* RISC Timer Internal Count */
++} rt_pram_t;
++
++/* Bits in RISC Timer Command Register */
++#define TM_CMD_VALID	0x80000000	/* Valid - Enables the timer */
++#define TM_CMD_RESTART	0x40000000	/* Restart - for automatic restart */
++#define TM_CMD_PWM	0x20000000	/* Run in Pulse Width Modulation Mode */
++#define TM_CMD_NUM(n)	(((n)&0xF)<<16)	/* Timer Number */
++#define TM_CMD_PERIOD(p) ((p)&0xFFFF)	/* Timer Period */
++
++/* CPM interrupts.  There are nearly 32 interrupts generated by CPM
++ * channels or devices.  All of these are presented to the PPC core
++ * as a single interrupt.  The CPM interrupt handler dispatches its
++ * own handlers, in a similar fashion to the PPC core handler.  We
++ * use the table as defined in the manuals (i.e. no special high
++ * priority and SCC1 == SCCa, etc...).
++ */
++#define CPMVEC_NR		32
++#define	CPMVEC_PIO_PC15		((ushort)0x1f)
++#define	CPMVEC_SCC1		((ushort)0x1e)
++#define	CPMVEC_SCC2		((ushort)0x1d)
++#define	CPMVEC_SCC3		((ushort)0x1c)
++#define	CPMVEC_SCC4		((ushort)0x1b)
++#define	CPMVEC_PIO_PC14		((ushort)0x1a)
++#define	CPMVEC_TIMER1		((ushort)0x19)
++#define	CPMVEC_PIO_PC13		((ushort)0x18)
++#define	CPMVEC_PIO_PC12		((ushort)0x17)
++#define	CPMVEC_SDMA_CB_ERR	((ushort)0x16)
++#define CPMVEC_IDMA1		((ushort)0x15)
++#define CPMVEC_IDMA2		((ushort)0x14)
++#define CPMVEC_TIMER2		((ushort)0x12)
++#define CPMVEC_RISCTIMER	((ushort)0x11)
++#define CPMVEC_I2C		((ushort)0x10)
++#define	CPMVEC_PIO_PC11		((ushort)0x0f)
++#define	CPMVEC_PIO_PC10		((ushort)0x0e)
++#define CPMVEC_TIMER3		((ushort)0x0c)
++#define	CPMVEC_PIO_PC9		((ushort)0x0b)
++#define	CPMVEC_PIO_PC8		((ushort)0x0a)
++#define	CPMVEC_PIO_PC7		((ushort)0x09)
++#define CPMVEC_TIMER4		((ushort)0x07)
++#define	CPMVEC_PIO_PC6		((ushort)0x06)
++#define	CPMVEC_SPI		((ushort)0x05)
++#define	CPMVEC_SMC1		((ushort)0x04)
++#define	CPMVEC_SMC2		((ushort)0x03)
++#define	CPMVEC_PIO_PC5		((ushort)0x02)
++#define	CPMVEC_PIO_PC4		((ushort)0x01)
++#define	CPMVEC_ERROR		((ushort)0x00)
++
++/* CPM interrupt configuration vector.
++*/
++#define	CICR_SCD_SCC4		((uint)0x00c00000)	/* SCC4 @ SCCd */
++#define	CICR_SCC_SCC3		((uint)0x00200000)	/* SCC3 @ SCCc */
++#define	CICR_SCB_SCC2		((uint)0x00040000)	/* SCC2 @ SCCb */
++#define	CICR_SCA_SCC1		((uint)0x00000000)	/* SCC1 @ SCCa */
++#define CICR_IRL_MASK		((uint)0x0000e000)	/* Core interrupt */
++#define CICR_HP_MASK		((uint)0x00001f00)	/* Hi-pri int. */
++#define CICR_IEN		((uint)0x00000080)	/* Int. enable */
++#define CICR_SPS		((uint)0x00000001)	/* SCC Spread */
++
++#define IMAP_ADDR		(get_immrbase())
++
++#define CPM_PIN_INPUT     0
++#define CPM_PIN_OUTPUT    1
++#define CPM_PIN_PRIMARY   0
++#define CPM_PIN_SECONDARY 2
++#define CPM_PIN_GPIO      4
++#define CPM_PIN_OPENDRAIN 8
++
++enum cpm_port {
++	CPM_PORTA,
++	CPM_PORTB,
++	CPM_PORTC,
++	CPM_PORTD,
++	CPM_PORTE,
++};
++
++void cpm1_set_pin(enum cpm_port port, int pin, int flags);
++
++enum cpm_clk_dir {
++	CPM_CLK_RX,
++	CPM_CLK_TX,
++	CPM_CLK_RTX
++};
++
++enum cpm_clk_target {
++	CPM_CLK_SCC1,
++	CPM_CLK_SCC2,
++	CPM_CLK_SCC3,
++	CPM_CLK_SCC4,
++	CPM_CLK_SMC1,
++	CPM_CLK_SMC2,
++};
++
++enum cpm_clk {
++	CPM_BRG1,	/* Baud Rate Generator  1 */
++	CPM_BRG2,	/* Baud Rate Generator  2 */
++	CPM_BRG3,	/* Baud Rate Generator  3 */
++	CPM_BRG4,	/* Baud Rate Generator  4 */
++	CPM_CLK1,	/* Clock  1 */
++	CPM_CLK2,	/* Clock  2 */
++	CPM_CLK3,	/* Clock  3 */
++	CPM_CLK4,	/* Clock  4 */
++	CPM_CLK5,	/* Clock  5 */
++	CPM_CLK6,	/* Clock  6 */
++	CPM_CLK7,	/* Clock  7 */
++	CPM_CLK8,	/* Clock  8 */
++};
++
++int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode);
++
++#endif /* __CPM1__ */
+diff --git a/include/asm-powerpc/cpm2.h b/include/asm-powerpc/cpm2.h
+index f1112c1..b93a53e 100644
+--- a/include/asm-powerpc/cpm2.h
++++ b/include/asm-powerpc/cpm2.h
+@@ -132,29 +132,6 @@ extern void cpm_setbrg(uint brg, uint rate);
+ extern void cpm2_fastbrg(uint brg, uint rate, int div16);
+ extern void cpm2_reset(void);
+ 
+-
+-/* Buffer descriptors used by many of the CPM protocols.
+-*/
+-typedef struct cpm_buf_desc {
+-	ushort	cbd_sc;		/* Status and Control */
+-	ushort	cbd_datlen;	/* Data length in buffer */
+-	uint	cbd_bufaddr;	/* Buffer address in host memory */
+-} cbd_t;
+-
+-#define BD_SC_EMPTY	((ushort)0x8000)	/* Receive is empty */
+-#define BD_SC_READY	((ushort)0x8000)	/* Transmit is ready */
+-#define BD_SC_WRAP	((ushort)0x2000)	/* Last buffer descriptor */
+-#define BD_SC_INTRPT	((ushort)0x1000)	/* Interrupt on change */
+-#define BD_SC_LAST	((ushort)0x0800)	/* Last buffer in frame */
+-#define BD_SC_CM	((ushort)0x0200)	/* Continous mode */
+-#define BD_SC_ID	((ushort)0x0100)	/* Rec'd too many idles */
+-#define BD_SC_P		((ushort)0x0100)	/* xmt preamble */
+-#define BD_SC_BR	((ushort)0x0020)	/* Break received */
+-#define BD_SC_FR	((ushort)0x0010)	/* Framing error */
+-#define BD_SC_PR	((ushort)0x0008)	/* Parity error */
+-#define BD_SC_OV	((ushort)0x0002)	/* Overrun */
+-#define BD_SC_CD	((ushort)0x0001)	/* ?? */
+-
+ /* Function code bits, usually generic to devices.
+ */
+ #define CPMFCR_GBL	((u_char)0x20)	/* Set memory snooping */
+@@ -456,43 +433,6 @@ typedef struct scc_enet {
+ #define SCC_PSMR_NIB22	((ushort)0x000a)	/* Start frame search */
+ #define SCC_PSMR_FDE	((ushort)0x0001)	/* Full duplex enable */
+ 
+-/* Buffer descriptor control/status used by Ethernet receive.
+- * Common to SCC and FCC.
+- */
+-#define BD_ENET_RX_EMPTY	((ushort)0x8000)
+-#define BD_ENET_RX_WRAP		((ushort)0x2000)
+-#define BD_ENET_RX_INTR		((ushort)0x1000)
+-#define BD_ENET_RX_LAST		((ushort)0x0800)
+-#define BD_ENET_RX_FIRST	((ushort)0x0400)
+-#define BD_ENET_RX_MISS		((ushort)0x0100)
+-#define BD_ENET_RX_BC		((ushort)0x0080)	/* FCC Only */
+-#define BD_ENET_RX_MC		((ushort)0x0040)	/* FCC Only */
+-#define BD_ENET_RX_LG		((ushort)0x0020)
+-#define BD_ENET_RX_NO		((ushort)0x0010)
+-#define BD_ENET_RX_SH		((ushort)0x0008)
+-#define BD_ENET_RX_CR		((ushort)0x0004)
+-#define BD_ENET_RX_OV		((ushort)0x0002)
+-#define BD_ENET_RX_CL		((ushort)0x0001)
+-#define BD_ENET_RX_STATS	((ushort)0x01ff)	/* All status bits */
+-
+-/* Buffer descriptor control/status used by Ethernet transmit.
+- * Common to SCC and FCC.
+- */
+-#define BD_ENET_TX_READY	((ushort)0x8000)
+-#define BD_ENET_TX_PAD		((ushort)0x4000)
+-#define BD_ENET_TX_WRAP		((ushort)0x2000)
+-#define BD_ENET_TX_INTR		((ushort)0x1000)
+-#define BD_ENET_TX_LAST		((ushort)0x0800)
+-#define BD_ENET_TX_TC		((ushort)0x0400)
+-#define BD_ENET_TX_DEF		((ushort)0x0200)
+-#define BD_ENET_TX_HB		((ushort)0x0100)
+-#define BD_ENET_TX_LC		((ushort)0x0080)
+-#define BD_ENET_TX_RL		((ushort)0x0040)
+-#define BD_ENET_TX_RCMASK	((ushort)0x003c)
+-#define BD_ENET_TX_UN		((ushort)0x0002)
+-#define BD_ENET_TX_CSL		((ushort)0x0001)
+-#define BD_ENET_TX_STATS	((ushort)0x03ff)	/* All status bits */
+-
+ /* SCC as UART
+ */
+ typedef struct scc_uart {
+@@ -562,8 +502,6 @@ typedef struct scc_trans {
+ 	uint	st_cmask;	/* Constant mask for CRC */
+ } scc_trans_t;
+ 
+-#define BD_SCC_TX_LAST		((ushort)0x0800)
+-
+ /* How about some FCCs.....
+ */
+ #define FCC_GFMR_DIAG_NORM	((uint)0x00000000)
+@@ -769,8 +707,6 @@ typedef struct spi {
+ 
+ #define SPI_EB		((u_char)0x10)		/* big endian byte order */
+ 
+-#define BD_IIC_START		((ushort)0x0400)
+-
+ /* IDMA parameter RAM
+ */
+ typedef struct idma {
+diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
+index 4525c78..528ef18 100644
+--- a/include/asm-powerpc/cputable.h
++++ b/include/asm-powerpc/cputable.h
+@@ -57,6 +57,14 @@ enum powerpc_pmc_type {
+ 	PPC_PMC_PA6T = 2,
+ };
+ 
++struct pt_regs;
++
++extern int machine_check_generic(struct pt_regs *regs);
++extern int machine_check_4xx(struct pt_regs *regs);
++extern int machine_check_440A(struct pt_regs *regs);
++extern int machine_check_e500(struct pt_regs *regs);
++extern int machine_check_e200(struct pt_regs *regs);
++
+ /* NOTE WELL: Update identify_cpu() if fields are added or removed! */
+ struct cpu_spec {
+ 	/* CPU is matched via (PVR & pvr_mask) == pvr_value */
+@@ -97,6 +105,11 @@ struct cpu_spec {
+ 
+ 	/* Name of processor class, for the ELF AT_PLATFORM entry */
+ 	char		*platform;
++
++	/* Processor specific machine check handling. Return negative
++	 * if the error is fatal, 1 if it was fully recovered and 0 to
++	 * pass up (not CPU originated) */
++	int		(*machine_check)(struct pt_regs *regs);
+ };
+ 
+ extern struct cpu_spec		*cur_cpu_spec;
+diff --git a/include/asm-powerpc/cputhreads.h b/include/asm-powerpc/cputhreads.h
+new file mode 100644
+index 0000000..8485c28
+--- /dev/null
++++ b/include/asm-powerpc/cputhreads.h
+@@ -0,0 +1,71 @@
++#ifndef _ASM_POWERPC_CPUTHREADS_H
++#define _ASM_POWERPC_CPUTHREADS_H
++
++#include <linux/cpumask.h>
++
++/*
++ * Mapping of threads to cores
++ */
++
++#ifdef CONFIG_SMP
++extern int threads_per_core;
++extern int threads_shift;
++extern cpumask_t threads_core_mask;
++#else
++#define threads_per_core	1
++#define threads_shift		0
++#define threads_core_mask	(CPU_MASK_CPU0)
++#endif
++
++/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
++ *                            hit by the argument
++ *
++ * @threads:	a cpumask of threads
++ *
++ * This function returns a cpumask which will have one "cpu" (or thread)
++ * bit set for each core that has at least one thread set in the argument.
++ *
++ * This can typically be used for things like IPI for tlb invalidations
++ * since those need to be done only once per core/TLB
++ */
++static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads)
++{
++	cpumask_t	tmp, res;
++	int		i;
++
++	res = CPU_MASK_NONE;
++	for (i = 0; i < NR_CPUS; i += threads_per_core) {
++		cpus_shift_right(tmp, threads_core_mask, i);
++		if (cpus_intersects(threads, tmp))
++			cpu_set(i, res);
++	}
++	return res;
++}
++
++static inline int cpu_nr_cores(void)
++{
++	return NR_CPUS >> threads_shift;
++}
++
++static inline cpumask_t cpu_online_cores_map(void)
++{
++	return cpu_thread_mask_to_cores(cpu_online_map);
++}
++
++static inline int cpu_thread_to_core(int cpu)
++{
++	return cpu >> threads_shift;
++}
++
++static inline int cpu_thread_in_core(int cpu)
++{
++	return cpu & (threads_per_core - 1);
++}
++
++static inline int cpu_first_thread_in_core(int cpu)
++{
++	return cpu & ~(threads_per_core - 1);
++}
++
++#endif /* _ASM_POWERPC_CPUTHREADS_H */
++
+diff --git a/include/asm-powerpc/dcr-native.h b/include/asm-powerpc/dcr-native.h
+index 8dbb1ab..af5fb31 100644
+--- a/include/asm-powerpc/dcr-native.h
++++ b/include/asm-powerpc/dcr-native.h
+@@ -22,6 +22,8 @@
+ #ifdef __KERNEL__
+ #ifndef __ASSEMBLY__
+ 
++#include <linux/spinlock.h>
++
+ typedef struct {
+ 	unsigned int base;
+ } dcr_host_t;
+@@ -55,20 +57,28 @@ do {								\
+ } while (0)
+ 
+ /* R/W of indirect DCRs make use of standard naming conventions for DCRs */
+-#define mfdcri(base, reg)			\
+-({						\
+-	mtdcr(base ## _CFGADDR, base ## _ ## reg);	\
+-	mfdcr(base ## _CFGDATA);			\
++extern spinlock_t dcr_ind_lock;
++
++#define mfdcri(base, reg)				\
++({							\
++	unsigned long flags; 				\
++	unsigned int val;				\
++	spin_lock_irqsave(&dcr_ind_lock, flags);	\
++	mtdcr(DCRN_ ## base ## _CONFIG_ADDR, reg);	\
++	val = mfdcr(DCRN_ ## base ## _CONFIG_DATA);	\
++	spin_unlock_irqrestore(&dcr_ind_lock, flags);	\
++	val;						\
+ })
+ 
+-#define mtdcri(base, reg, data)			\
+-do {						\
+-	mtdcr(base ## _CFGADDR, base ## _ ## reg);	\
+-	mtdcr(base ## _CFGDATA, data);		\
++#define mtdcri(base, reg, data)				\
++do {							\
++	unsigned long flags; 				\
++	spin_lock_irqsave(&dcr_ind_lock, flags);	\
++	mtdcr(DCRN_ ## base ## _CONFIG_ADDR, reg);	\
++	mtdcr(DCRN_ ## base ## _CONFIG_DATA, data);	\
++	spin_unlock_irqrestore(&dcr_ind_lock, flags);	\
+ } while (0)
+ 
+ #endif /* __ASSEMBLY__ */
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_DCR_NATIVE_H */
+-
+-
+diff --git a/include/asm-powerpc/dcr-regs.h b/include/asm-powerpc/dcr-regs.h
+new file mode 100644
+index 0000000..9f1fb98
+--- /dev/null
++++ b/include/asm-powerpc/dcr-regs.h
+@@ -0,0 +1,71 @@
++/*
++ * Common DCR / SDR / CPR register definitions used on various IBM/AMCC
++ * 4xx processors
++ *
++ *    Copyright 2007 Benjamin Herrenschmidt, IBM Corp
++ *                   <benh at kernel.crashing.org>
++ *
++ * Mostly lifted from asm-ppc/ibm4xx.h by
++ *
++ *    Copyright (c) 1999 Grant Erickson <grant at lcse.umn.edu>
++ *
++ */
++
++#ifndef __DCR_REGS_H__
++#define __DCR_REGS_H__
++
++/*
++ * Most DCRs used for controlling devices such as the MAL, DMA engine,
++ * etc... are obtained for the device tree.
++ *
++ * The definitions in this files are fixed DCRs and indirect DCRs that
++ * are commonly used outside of specific drivers or refer to core
++ * common registers that may occasionally have to be tweaked outside
++ * of the driver main register set
++ */
++
++/* CPRs (440GX and 440SP/440SPe) */
++#define DCRN_CPR0_CONFIG_ADDR	0xc
++#define DCRN_CPR0_CONFIG_DATA	0xd
++
++/* SDRs (440GX and 440SP/440SPe) */
++#define DCRN_SDR0_CONFIG_ADDR 	0xe
++#define DCRN_SDR0_CONFIG_DATA	0xf
++
++#define SDR0_PFC0		0x4100
++#define SDR0_PFC1		0x4101
++#define SDR0_PFC1_EPS		0x1c00000
++#define SDR0_PFC1_EPS_SHIFT	22
++#define SDR0_PFC1_RMII		0x02000000
++#define SDR0_MFR		0x4300
++#define SDR0_MFR_TAH0 		0x80000000  	/* TAHOE0 Enable */
++#define SDR0_MFR_TAH1 		0x40000000  	/* TAHOE1 Enable */
++#define SDR0_MFR_PCM  		0x10000000  	/* PPC440GP irq compat mode */
++#define SDR0_MFR_ECS  		0x08000000  	/* EMAC int clk */
++#define SDR0_MFR_T0TXFL		0x00080000
++#define SDR0_MFR_T0TXFH		0x00040000
++#define SDR0_MFR_T1TXFL		0x00020000
++#define SDR0_MFR_T1TXFH		0x00010000
++#define SDR0_MFR_E0TXFL		0x00008000
++#define SDR0_MFR_E0TXFH		0x00004000
++#define SDR0_MFR_E0RXFL		0x00002000
++#define SDR0_MFR_E0RXFH		0x00001000
++#define SDR0_MFR_E1TXFL		0x00000800
++#define SDR0_MFR_E1TXFH		0x00000400
++#define SDR0_MFR_E1RXFL		0x00000200
++#define SDR0_MFR_E1RXFH		0x00000100
++#define SDR0_MFR_E2TXFL		0x00000080
++#define SDR0_MFR_E2TXFH		0x00000040
++#define SDR0_MFR_E2RXFL		0x00000020
++#define SDR0_MFR_E2RXFH		0x00000010
++#define SDR0_MFR_E3TXFL		0x00000008
++#define SDR0_MFR_E3TXFH		0x00000004
++#define SDR0_MFR_E3RXFL		0x00000002
++#define SDR0_MFR_E3RXFH		0x00000001
++#define SDR0_UART0		0x0120
++#define SDR0_UART1		0x0121
++#define SDR0_UART2		0x0122
++#define SDR0_UART3		0x0123
++#define SDR0_CUST0		0x4000
++
++#endif /* __DCR_REGS_H__ */
+diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
+index ff52013..bbefb69 100644
+--- a/include/asm-powerpc/dma-mapping.h
++++ b/include/asm-powerpc/dma-mapping.h
+@@ -76,6 +76,11 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
+ 	return dev->archdata.dma_ops;
+ }
+ 
++static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
++{
++	dev->archdata.dma_ops = ops;
++}
++
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+ 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+@@ -87,6 +92,9 @@ static inline int dma_supported(struct device *dev, u64 mask)
+ 	return dma_ops->dma_supported(dev, mask);
+ }
+ 
++/* We have our own implementation of pci_set_dma_mask() */
++#define HAVE_ARCH_PCI_SET_DMA_MASK
++
+ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+ 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+@@ -186,8 +194,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ extern struct dma_mapping_ops dma_iommu_ops;
+ extern struct dma_mapping_ops dma_direct_ops;
+ 
+-extern unsigned long dma_direct_offset;
+-
+ #else /* CONFIG_PPC64 */
+ 
+ #define dma_supported(dev, mask)	(1)
+diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
+index 3671c12..1e41bd1 100644
+--- a/include/asm-powerpc/firmware.h
++++ b/include/asm-powerpc/firmware.h
+@@ -64,7 +64,7 @@ enum {
+ 	FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ 	FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ 	FW_FEATURE_CELLEB_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_BEAT,
+-	FW_FEATURE_CELLEB_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_BEAT,
++	FW_FEATURE_CELLEB_ALWAYS = 0,
+ 	FW_FEATURE_NATIVE_POSSIBLE = 0,
+ 	FW_FEATURE_NATIVE_ALWAYS = 0,
+ 	FW_FEATURE_POSSIBLE =
 diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h
 index fd7f5a4..6d50310 100644
 --- a/include/asm-powerpc/ide.h
@@ -597548,6 +749517,782 @@
  #define IDE_ARCH_OBSOLETE_DEFAULTS
  
  static __inline__ int ide_default_irq(unsigned long base)
+diff --git a/include/asm-powerpc/immap_86xx.h b/include/asm-powerpc/immap_86xx.h
+index 0ad4e65..0f165e5 100644
+--- a/include/asm-powerpc/immap_86xx.h
++++ b/include/asm-powerpc/immap_86xx.h
+@@ -89,14 +89,14 @@ struct ccsr_guts {
+  * them.
+  *
+  * guts: Pointer to GUTS structure
+- * co: The DMA controller (1 or 2)
++ * co: The DMA controller (0 or 1)
+  * ch: The channel on the DMA controller (0, 1, 2, or 3)
+  * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx)
+  */
+ static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
+ 	unsigned int co, unsigned int ch, unsigned int device)
+ {
+-	unsigned int shift = 16 + (8 * (2 - co) + 2 * (3 - ch));
++	unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
+ 
+ 	clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift);
+ }
+@@ -118,6 +118,27 @@ static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
+ #define CCSR_GUTS_PMUXCR_DMA1_0		0x00000002
+ #define CCSR_GUTS_PMUXCR_DMA1_3		0x00000001
+ 
++/*
++ * Set the DMA external control bits in the GUTS
++ *
++ * The DMA external control bits in the PMUXCR are only meaningful for
++ * channels 0 and 3.  Any other channels are ignored.
++ *
++ * guts: Pointer to GUTS structure
++ * co: The DMA controller (0 or 1)
++ * ch: The channel on the DMA controller (0, 1, 2, or 3)
++ * value: the new value for the bit (0 or 1)
++ */
++static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
++	unsigned int co, unsigned int ch, unsigned int value)
++{
++	if ((ch == 0) || (ch == 3)) {
++		unsigned int shift = 2 * (co + 1) - (ch & 1) - 1;
++
++		clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift);
++	}
++}
++
+ #define CCSR_GUTS_CLKDVDR_PXCKEN	0x80000000
+ #define CCSR_GUTS_CLKDVDR_SSICKEN	0x20000000
+ #define CCSR_GUTS_CLKDVDR_PXCKINV	0x10000000
+diff --git a/include/asm-powerpc/immap_qe.h b/include/asm-powerpc/immap_qe.h
+index aba9806..82a4526 100644
+--- a/include/asm-powerpc/immap_qe.h
++++ b/include/asm-powerpc/immap_qe.h
+@@ -393,9 +393,39 @@ struct dbg {
+ 	u8	res2[0x48];
+ } __attribute__ ((packed));
+ 
+-/* RISC Special Registers (Trap and Breakpoint) */
++/*
++ * RISC Special Registers (Trap and Breakpoint).  These are described in
++ * the QE Developer's Handbook.
++ */
+ struct rsp {
+-	u32	reg[0x40];	/* 64 32-bit registers */
++	__be32 tibcr[16];	/* Trap/instruction breakpoint control regs */
++	u8 res0[64];
++	__be32 ibcr0;
++	__be32 ibs0;
++	__be32 ibcnr0;
++	u8 res1[4];
++	__be32 ibcr1;
++	__be32 ibs1;
++	__be32 ibcnr1;
++	__be32 npcr;
++	__be32 dbcr;
++	__be32 dbar;
++	__be32 dbamr;
++	__be32 dbsr;
++	__be32 dbcnr;
++	u8 res2[12];
++	__be32 dbdr_h;
++	__be32 dbdr_l;
++	__be32 dbdmr_h;
++	__be32 dbdmr_l;
++	__be32 bsr;
++	__be32 bor;
++	__be32 bior;
++	u8 res3[4];
++	__be32 iatr[4];
++	__be32 eccr;		/* Exception control configuration register */
++	__be32 eicr;
++	u8 res4[0x100-0xf8];
+ } __attribute__ ((packed));
+ 
+ struct qe_immap {
+diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
+index e44cdfc..7be26f6 100644
+--- a/include/asm-powerpc/io.h
++++ b/include/asm-powerpc/io.h
+@@ -50,15 +50,16 @@ extern int check_legacy_ioport(unsigned long base_port);
+ #define PCI_DRAM_OFFSET	pci_dram_offset
+ #else
+ #define _IO_BASE	pci_io_base
+-#define _ISA_MEM_BASE	0
++#define _ISA_MEM_BASE	isa_mem_base
+ #define PCI_DRAM_OFFSET	0
+ #endif
+ 
+ extern unsigned long isa_io_base;
+-extern unsigned long isa_mem_base;
+ extern unsigned long pci_io_base;
+ extern unsigned long pci_dram_offset;
+ 
++extern resource_size_t isa_mem_base;
++
+ #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO)
+ #error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits
+ #endif
+diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
+index 4a82fdc..7a3cef7 100644
+--- a/include/asm-powerpc/iommu.h
++++ b/include/asm-powerpc/iommu.h
+@@ -69,10 +69,9 @@ struct iommu_table {
+ };
+ 
+ struct scatterlist;
+-struct device_node;
+ 
+ /* Frees table for an individual device node */
+-extern void iommu_free_table(struct device_node *dn);
++extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
+ 
+ /* Initializes an iommu_table based in values set in the passed-in
+  * structure
+diff --git a/include/asm-powerpc/ipic.h b/include/asm-powerpc/ipic.h
+index edec79d..8ff08be 100644
+--- a/include/asm-powerpc/ipic.h
++++ b/include/asm-powerpc/ipic.h
+@@ -20,11 +20,13 @@
+ 
+ /* Flags when we init the IPIC */
+ #define IPIC_SPREADMODE_GRP_A	0x00000001
+-#define IPIC_SPREADMODE_GRP_D	0x00000002
+-#define IPIC_SPREADMODE_MIX_A	0x00000004
+-#define IPIC_SPREADMODE_MIX_B	0x00000008
+-#define IPIC_DISABLE_MCP_OUT	0x00000010
+-#define IPIC_IRQ0_MCP		0x00000020
++#define IPIC_SPREADMODE_GRP_B	0x00000002
++#define IPIC_SPREADMODE_GRP_C	0x00000004
++#define IPIC_SPREADMODE_GRP_D	0x00000008
++#define IPIC_SPREADMODE_MIX_A	0x00000010
++#define IPIC_SPREADMODE_MIX_B	0x00000020
++#define IPIC_DISABLE_MCP_OUT	0x00000040
++#define IPIC_IRQ0_MCP		0x00000080
+ 
+ /* IPIC registers offsets */
+ #define IPIC_SICFR	0x00	/* System Global Interrupt Configuration Register */
+diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
+index 1392db4..b5c0312 100644
+--- a/include/asm-powerpc/irq.h
++++ b/include/asm-powerpc/irq.h
+@@ -483,218 +483,6 @@ static __inline__ int irq_canonicalize(int irq)
+  */
+ #define	mk_int_int_mask(IL) (1 << (7 - (IL/2)))
+ 
+-#elif defined(CONFIG_83xx)
+-#include <asm/mpc83xx.h>
+-
+-#define	NR_IRQS	(NR_IPIC_INTS)
+-
+-#elif defined(CONFIG_85xx)
+-/* Now include the board configuration specific associations.
+-*/
+-#include <asm/mpc85xx.h>
+-
+-/* The MPC8548 openpic has 48 internal interrupts and 12 external
+- * interrupts.
+- *
+- * We are "flattening" the interrupt vectors of the cascaded CPM
+- * so that we can uniquely identify any interrupt source with a
+- * single integer.
+- */
+-#define NR_CPM_INTS	64
+-#define NR_EPIC_INTS	60
+-#ifndef NR_8259_INTS
+-#define NR_8259_INTS	0
+-#endif
+-#define NUM_8259_INTERRUPTS NR_8259_INTS
+-
+-#ifndef CPM_IRQ_OFFSET
+-#define CPM_IRQ_OFFSET	0
+-#endif
+-
+-#define NR_IRQS	(NR_EPIC_INTS + NR_CPM_INTS + NR_8259_INTS)
+-
+-/* Internal IRQs on MPC85xx OpenPIC */
+-
+-#ifndef MPC85xx_OPENPIC_IRQ_OFFSET
+-#ifdef CONFIG_CPM2
+-#define MPC85xx_OPENPIC_IRQ_OFFSET	(CPM_IRQ_OFFSET + NR_CPM_INTS)
+-#else
+-#define MPC85xx_OPENPIC_IRQ_OFFSET	0
+-#endif
+-#endif
+-
+-/* Not all of these exist on all MPC85xx implementations */
+-#define MPC85xx_IRQ_L2CACHE	( 0 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_ECM		( 1 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_DDR		( 2 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_LBIU	( 3 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_DMA0	( 4 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_DMA1	( 5 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_DMA2	( 6 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_DMA3	( 7 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_PCI1	( 8 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_PCI2	( 9 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_RIO_ERROR	( 9 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_RIO_BELL	(10 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_RIO_TX	(11 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_RIO_RX	(12 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC1_TX	(13 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC1_RX	(14 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC3_TX	(15 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC3_RX	(16 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC3_ERROR	(17 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC1_ERROR	(18 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC2_TX	(19 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC2_RX	(20 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC4_TX	(21 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC4_RX	(22 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC4_ERROR	(23 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_TSEC2_ERROR	(24 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_FEC		(25 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_DUART	(26 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_IIC1	(27 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_PERFMON	(28 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_SEC2	(29 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_CPM		(30 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-
+-/* The 12 external interrupt lines */
+-#define MPC85xx_IRQ_EXT0        (48 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT1        (49 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT2        (50 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT3        (51 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT4        (52 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT5        (53 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT6        (54 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT7        (55 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT8        (56 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT9        (57 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT10       (58 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-#define MPC85xx_IRQ_EXT11       (59 + MPC85xx_OPENPIC_IRQ_OFFSET)
+-
+-/* CPM related interrupts */
+-#define	SIU_INT_ERROR		((uint)0x00+CPM_IRQ_OFFSET)
+-#define	SIU_INT_I2C		((uint)0x01+CPM_IRQ_OFFSET)
+-#define	SIU_INT_SPI		((uint)0x02+CPM_IRQ_OFFSET)
+-#define	SIU_INT_RISC		((uint)0x03+CPM_IRQ_OFFSET)
+-#define	SIU_INT_SMC1		((uint)0x04+CPM_IRQ_OFFSET)
+-#define	SIU_INT_SMC2		((uint)0x05+CPM_IRQ_OFFSET)
+-#define	SIU_INT_USB		((uint)0x0b+CPM_IRQ_OFFSET)
+-#define	SIU_INT_TIMER1		((uint)0x0c+CPM_IRQ_OFFSET)
+-#define	SIU_INT_TIMER2		((uint)0x0d+CPM_IRQ_OFFSET)
+-#define	SIU_INT_TIMER3		((uint)0x0e+CPM_IRQ_OFFSET)
+-#define	SIU_INT_TIMER4		((uint)0x0f+CPM_IRQ_OFFSET)
+-#define	SIU_INT_FCC1		((uint)0x20+CPM_IRQ_OFFSET)
+-#define	SIU_INT_FCC2		((uint)0x21+CPM_IRQ_OFFSET)
+-#define	SIU_INT_FCC3		((uint)0x22+CPM_IRQ_OFFSET)
+-#define	SIU_INT_MCC1		((uint)0x24+CPM_IRQ_OFFSET)
+-#define	SIU_INT_MCC2		((uint)0x25+CPM_IRQ_OFFSET)
+-#define	SIU_INT_SCC1		((uint)0x28+CPM_IRQ_OFFSET)
+-#define	SIU_INT_SCC2		((uint)0x29+CPM_IRQ_OFFSET)
+-#define	SIU_INT_SCC3		((uint)0x2a+CPM_IRQ_OFFSET)
+-#define	SIU_INT_SCC4		((uint)0x2b+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC15		((uint)0x30+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC14		((uint)0x31+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC13		((uint)0x32+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC12		((uint)0x33+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC11		((uint)0x34+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC10		((uint)0x35+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC9		((uint)0x36+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC8		((uint)0x37+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC7		((uint)0x38+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC6		((uint)0x39+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC5		((uint)0x3a+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC4		((uint)0x3b+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC3		((uint)0x3c+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC2		((uint)0x3d+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC1		((uint)0x3e+CPM_IRQ_OFFSET)
+-#define	SIU_INT_PC0		((uint)0x3f+CPM_IRQ_OFFSET)
+-
+-#elif defined(CONFIG_PPC_86xx)
+-#include <asm/mpc86xx.h>
+-
+-#define NR_EPIC_INTS 48
+-#ifndef NR_8259_INTS
+-#define NR_8259_INTS 16 /*ULI 1575 can route 12 interrupts */
+-#endif
+-#define NUM_8259_INTERRUPTS NR_8259_INTS
+-
+-#ifndef I8259_OFFSET
+-#define I8259_OFFSET 0
+-#endif
+-
+-#define NR_IRQS 256
+-
+-/* Internal IRQs on MPC86xx OpenPIC */
+-
+-#ifndef MPC86xx_OPENPIC_IRQ_OFFSET
+-#define MPC86xx_OPENPIC_IRQ_OFFSET NR_8259_INTS
+-#endif
+-
+-/* The 48 internal sources */
+-#define MPC86xx_IRQ_NULL        ( 0 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_MCM         ( 1 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_DDR         ( 2 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_LBC         ( 3 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_DMA0        ( 4 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_DMA1        ( 5 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_DMA2        ( 6 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_DMA3        ( 7 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-
+-/* no 10,11 */
+-#define MPC86xx_IRQ_UART2       (12 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC1_TX    (13 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC1_RX    (14 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC3_TX    (15 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC3_RX    (16 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC3_ERROR (17 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC1_ERROR (18 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC2_TX    (19 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC2_RX    (20 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC4_TX    (21 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC4_RX    (22 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC4_ERROR (23 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_TSEC2_ERROR (24 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-/* no 25 */
+-#define MPC86xx_IRQ_UART1       (26 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_IIC         (27 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_PERFMON       (28 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-/* no 29,30,31 */
+-#define MPC86xx_IRQ_SRIO_ERROR    (32 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_SRIO_OUT_BELL (33 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_SRIO_IN_BELL  (34 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-/* no 35,36 */
+-#define MPC86xx_IRQ_SRIO_OUT_MSG1 (37 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_SRIO_IN_MSG1  (38 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_SRIO_OUT_MSG2 (39 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_SRIO_IN_MSG2  (40 + MPC86xx_OPENPIC_IRQ_OFFSET)
+-
+-/* The 12 external interrupt lines */
+-#define MPC86xx_IRQ_EXT_BASE	48
+-#define MPC86xx_IRQ_EXT0	(0 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT1	(1 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT2	(2 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT3	(3 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT4	(4 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT5	(5 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT6	(6 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT7	(7 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT8	(8 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT9	(9 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT10	(10 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-#define MPC86xx_IRQ_EXT11	(11 + MPC86xx_IRQ_EXT_BASE \
+-		+ MPC86xx_OPENPIC_IRQ_OFFSET)
+-
+ #else /* CONFIG_40x + CONFIG_8xx */
+ /*
+  * this is the # irq's for all ppc arch's (pmac/chrp/prep)
+diff --git a/include/asm-powerpc/iseries/hv_lp_event.h b/include/asm-powerpc/iseries/hv_lp_event.h
+index 6ce2ce1..8f5da7d 100644
+--- a/include/asm-powerpc/iseries/hv_lp_event.h
++++ b/include/asm-powerpc/iseries/hv_lp_event.h
+@@ -78,7 +78,7 @@ extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
+ 
+ /*
+  * Close an Lp Event Path for a type and partition
+- * returns 0 on sucess
++ * returns 0 on success
+  */
+ extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
+ 
+diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
+index b6f817b..701857b 100644
+--- a/include/asm-powerpc/kexec.h
++++ b/include/asm-powerpc/kexec.h
+@@ -123,6 +123,9 @@ struct pt_regs;
+ extern void default_machine_kexec(struct kimage *image);
+ extern int default_machine_kexec_prepare(struct kimage *image);
+ extern void default_machine_crash_shutdown(struct pt_regs *regs);
++typedef void (*crash_shutdown_t)(void);
++extern int crash_shutdown_register(crash_shutdown_t handler);
++extern int crash_shutdown_unregister(crash_shutdown_t handler);
+ 
+ extern void machine_kexec_simple(struct kimage *image);
+ extern void crash_kexec_secondary(struct pt_regs *regs);
+diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h
+index b5f9f4c..5d1dc48 100644
+--- a/include/asm-powerpc/lmb.h
++++ b/include/asm-powerpc/lmb.h
+@@ -51,6 +51,7 @@ extern unsigned long __init __lmb_alloc_base(unsigned long size,
+ extern unsigned long __init lmb_phys_mem_size(void);
+ extern unsigned long __init lmb_end_of_DRAM(void);
+ extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
++extern int __init lmb_is_reserved(unsigned long addr);
+ 
+ extern void lmb_dump_all(void);
+ 
+diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
+index 6968f43..0872ec2 100644
+--- a/include/asm-powerpc/machdep.h
++++ b/include/asm-powerpc/machdep.h
+@@ -204,6 +204,13 @@ struct machdep_calls {
+ 	/*
+ 	 * optional PCI "hooks"
+ 	 */
++	/* Called in indirect_* to avoid touching devices */
++	int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
++
++	/* Called at then very end of pcibios_init() */
++	void (*pcibios_after_init)(void);
++
++#endif /* CONFIG_PPC32 */
+ 
+ 	/* Called after PPC generic resource fixup to perform
+ 	   machine specific fixups */
+@@ -212,18 +219,9 @@ struct machdep_calls {
+ 	/* Called for each PCI bus in the system when it's probed */
+ 	void (*pcibios_fixup_bus)(struct pci_bus *);
+ 
+-	/* Called when pci_enable_device() is called (initial=0) or
+-	 * when a device with no assigned resource is found (initial=1).
+-	 * Returns 0 to allow assignment/enabling of the device. */
+-	int  (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
+-
+-	/* Called in indirect_* to avoid touching devices */
+-	int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
+-
+-	/* Called at then very end of pcibios_init() */
+-	void (*pcibios_after_init)(void);
+-
+-#endif /* CONFIG_PPC32 */
++	/* Called when pci_enable_device() is called. Returns 0 to
++	 * allow assignment/enabling of the device. */
++	int  (*pcibios_enable_device_hook)(struct pci_dev *);
+ 
+ 	/* Called to shutdown machine specific hardware not already controlled
+ 	 * by other drivers.
+@@ -253,6 +251,16 @@ struct machdep_calls {
+ 	 */
+ 	void (*machine_kexec)(struct kimage *image);
+ #endif /* CONFIG_KEXEC */
++
++#ifdef CONFIG_SUSPEND
++	/* These are called to disable and enable, respectively, IRQs when
++	 * entering a suspend state.  If NULL, then the generic versions
++	 * will be called.  The generic versions disable/enable the
++	 * decrementer along with interrupts.
++	 */
++	void (*suspend_disable_irqs)(void);
++	void (*suspend_enable_irqs)(void);
++#endif
+ };
+ 
+ extern void power4_idle(void);
+@@ -326,5 +334,31 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
+ 		ppc_md.log_error(buf, err_type, fatal);
+ }
+ 
++#define __define_machine_initcall(mach,level,fn,id) \
++	static int __init __machine_initcall_##mach##_##fn(void) { \
++		if (machine_is(mach)) return fn(); \
++		return 0; \
++	} \
++	__define_initcall(level,__machine_initcall_##mach##_##fn,id);
++
++#define machine_core_initcall(mach,fn)		__define_machine_initcall(mach,"1",fn,1)
++#define machine_core_initcall_sync(mach,fn)	__define_machine_initcall(mach,"1s",fn,1s)
++#define machine_postcore_initcall(mach,fn)	__define_machine_initcall(mach,"2",fn,2)
++#define machine_postcore_initcall_sync(mach,fn)	__define_machine_initcall(mach,"2s",fn,2s)
++#define machine_arch_initcall(mach,fn)		__define_machine_initcall(mach,"3",fn,3)
++#define machine_arch_initcall_sync(mach,fn)	__define_machine_initcall(mach,"3s",fn,3s)
++#define machine_subsys_initcall(mach,fn)	__define_machine_initcall(mach,"4",fn,4)
++#define machine_subsys_initcall_sync(mach,fn)	__define_machine_initcall(mach,"4s",fn,4s)
++#define machine_fs_initcall(mach,fn)		__define_machine_initcall(mach,"5",fn,5)
++#define machine_fs_initcall_sync(mach,fn)	__define_machine_initcall(mach,"5s",fn,5s)
++#define machine_rootfs_initcall(mach,fn)	__define_machine_initcall(mach,"rootfs",fn,rootfs)
++#define machine_device_initcall(mach,fn)	__define_machine_initcall(mach,"6",fn,6)
++#define machine_device_initcall_sync(mach,fn)	__define_machine_initcall(mach,"6s",fn,6s)
++#define machine_late_initcall(mach,fn)		__define_machine_initcall(mach,"7",fn,7)
++#define machine_late_initcall_sync(mach,fn)	__define_machine_initcall(mach,"7s",fn,7s)
++
++void generic_suspend_disable_irqs(void);
++void generic_suspend_enable_irqs(void);
++
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_MACHDEP_H */
+diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
+index 82328de..2864fa3 100644
+--- a/include/asm-powerpc/mmu-hash64.h
++++ b/include/asm-powerpc/mmu-hash64.h
+@@ -80,7 +80,7 @@ extern char initial_stab[];
+ #define HPTE_V_AVPN_SHIFT	7
+ #define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
+ #define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
+-#define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80))
++#define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
+ #define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
+ #define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
+ #define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
+@@ -180,6 +180,7 @@ extern int mmu_vmalloc_psize;
+ extern int mmu_io_psize;
+ extern int mmu_kernel_ssize;
+ extern int mmu_highuser_ssize;
++extern u16 mmu_slb_size;
+ 
+ /*
+  * If the processor supports 64k normal pages but not 64k cache
+@@ -264,7 +265,7 @@ static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
+ 
+ extern int __hash_page_4K(unsigned long ea, unsigned long access,
+ 			  unsigned long vsid, pte_t *ptep, unsigned long trap,
+-			  unsigned int local, int ssize);
++			  unsigned int local, int ssize, int subpage_prot);
+ extern int __hash_page_64K(unsigned long ea, unsigned long access,
+ 			   unsigned long vsid, pte_t *ptep, unsigned long trap,
+ 			   unsigned int local, int ssize);
+@@ -277,6 +278,8 @@ extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
+ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
+ 			     unsigned long pstart, unsigned long mode,
+ 			     int psize, int ssize);
++extern void set_huge_psize(int psize);
++extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
+ 
+ extern void htab_initialize(void);
+ extern void htab_initialize_secondary(void);
+diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h
+index fcb2ebb..81ef10b 100644
+--- a/include/asm-powerpc/mpc52xx.h
++++ b/include/asm-powerpc/mpc52xx.h
+@@ -248,19 +248,25 @@ struct mpc52xx_cdm {
+ 
+ #ifndef __ASSEMBLY__
+ 
+-extern void __iomem * mpc52xx_find_and_map(const char *);
+-extern void __iomem * mpc52xx_find_and_map_path(const char *path);
++/* mpc52xx_common.c */
+ extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node);
+ extern void mpc5200_setup_xlb_arbiter(void);
+ extern void mpc52xx_declare_of_platform_devices(void);
++extern void mpc52xx_map_common_devices(void);
++extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
++extern void mpc52xx_restart(char *cmd);
+ 
++/* mpc52xx_pic.c */
+ extern void mpc52xx_init_irq(void);
+ extern unsigned int mpc52xx_get_irq(void);
+ 
++/* mpc52xx_pci.c */
++#ifdef CONFIG_PCI
+ extern int __init mpc52xx_add_bridge(struct device_node *node);
+-
+-extern void __init mpc52xx_map_wdt(void);
+-extern void mpc52xx_restart(char *cmd);
++extern void __init mpc52xx_setup_pci(void);
++#else
++static inline void mpc52xx_setup_pci(void) { }
++#endif
+ 
+ #endif /* __ASSEMBLY__ */
+ 
+diff --git a/include/asm-powerpc/mpc52xx_psc.h b/include/asm-powerpc/mpc52xx_psc.h
+index 26690d2..bea42b9 100644
+--- a/include/asm-powerpc/mpc52xx_psc.h
++++ b/include/asm-powerpc/mpc52xx_psc.h
+@@ -153,6 +153,9 @@ struct mpc52xx_psc {
+ 	u8		reserved16[3];
+ 	u8		irfdr;		/* PSC + 0x54 */
+ 	u8		reserved17[3];
++};
++
++struct mpc52xx_psc_fifo {
+ 	u16		rfnum;		/* PSC + 0x58 */
+ 	u16		reserved18;
+ 	u16		tfnum;		/* PSC + 0x5c */
+diff --git a/include/asm-powerpc/mpc8260.h b/include/asm-powerpc/mpc8260.h
+index e0d4807..03317e1 100644
+--- a/include/asm-powerpc/mpc8260.h
++++ b/include/asm-powerpc/mpc8260.h
+@@ -8,6 +8,7 @@
+ #ifndef __ASM_POWERPC_MPC8260_H__
+ #define __ASM_POWERPC_MPC8260_H__
+ 
++#define MPC82XX_BCR_PLDP 0x00800000 /* Pipeline Maximum Depth */
+ 
+ #ifdef CONFIG_8260
+ 
+diff --git a/include/asm-powerpc/mpc8xx.h b/include/asm-powerpc/mpc8xx.h
+index 2be014b..98f3c4f 100644
+--- a/include/asm-powerpc/mpc8xx.h
++++ b/include/asm-powerpc/mpc8xx.h
+@@ -4,29 +4,9 @@
+  * file that has to include MPC8xx configuration, they all include
+  * this one and the configuration switching is done here.
+  */
+-#ifdef __KERNEL__
+ #ifndef __CONFIG_8xx_DEFS
+ #define __CONFIG_8xx_DEFS
+ 
+-
+-#ifdef CONFIG_8xx
+-
+-#ifdef CONFIG_FADS
+-#include <platforms/fads.h>
+-#endif
+-
+-#if defined(CONFIG_MPC86XADS)
+-#include <platforms/8xx/mpc86xads.h>
+-#endif
+-
+-#if defined(CONFIG_MPC885ADS)
+-#include <platforms/8xx/mpc885ads.h>
+-#endif
+-
+-#ifdef CONFIG_PCMCIA_M8XX
+ extern struct mpc8xx_pcmcia_ops m8xx_pcmcia_ops;
+-#endif
+ 
+-#endif /* CONFIG_8xx */
+ #endif /* __CONFIG_8xx_DEFS */
+-#endif /* __KERNEL__ */
+diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
+index ae84dde..943c5a3 100644
+--- a/include/asm-powerpc/mpic.h
++++ b/include/asm-powerpc/mpic.h
+@@ -22,7 +22,9 @@
+ #define MPIC_GREG_GLOBAL_CONF_0		0x00020
+ #define		MPIC_GREG_GCONF_RESET			0x80000000
+ #define		MPIC_GREG_GCONF_8259_PTHROU_DIS		0x20000000
++#define		MPIC_GREG_GCONF_NO_BIAS			0x10000000
+ #define		MPIC_GREG_GCONF_BASE_MASK		0x000fffff
++#define		MPIC_GREG_GCONF_MCK			0x08000000
+ #define MPIC_GREG_GLOBAL_CONF_1		0x00030
+ #define		MPIC_GREG_GLOBAL_CONF_1_SIE		0x08000000
+ #define		MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK	0x70000000
+@@ -78,6 +80,7 @@
+ #define 	MPIC_CPU_WHOAMI_MASK			0x0000001f
+ #define MPIC_CPU_INTACK			0x000a0
+ #define MPIC_CPU_EOI			0x000b0
++#define MPIC_CPU_MCACK			0x000c0
+ 
+ /*
+  * Per-source registers
+@@ -141,6 +144,7 @@
+ #define TSI108_CPU_WHOAMI		0xffffffff
+ #define TSI108_CPU_INTACK		0x00004
+ #define TSI108_CPU_EOI			0x00008
++#define TSI108_CPU_MCACK		0x00004 /* Doesn't really exist here */
+ 
+ /*
+  * Per-source registers
+@@ -183,6 +187,7 @@ enum {
+ 	MPIC_IDX_CPU_WHOAMI,
+ 	MPIC_IDX_CPU_INTACK,
+ 	MPIC_IDX_CPU_EOI,
++	MPIC_IDX_CPU_MCACK,
+ 
+ 	MPIC_IDX_IRQ_BASE,
+ 	MPIC_IDX_IRQ_STRIDE,
+@@ -344,6 +349,10 @@ struct mpic
+ #define MPIC_USES_DCR			0x00000080
+ /* MPIC has 11-bit vector fields (or larger) */
+ #define MPIC_LARGE_VECTORS		0x00000100
++/* Enable delivery of prio 15 interrupts as MCK instead of EE */
++#define MPIC_ENABLE_MCK			0x00000200
++/* Disable bias among target selection, spread interrupts evenly */
++#define MPIC_NO_BIAS			0x00000400
+ 
+ /* MPIC HW modification ID */
+ #define MPIC_REGSET_MASK		0xf0000000
+@@ -447,10 +456,19 @@ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
+ /* Send a message (IPI) to a given target (cpu number or MSG_*) */
+ void smp_mpic_message_pass(int target, int msg);
+ 
++/* Unmask a specific virq */
++extern void mpic_unmask_irq(unsigned int irq);
++/* Mask a specific virq */
++extern void mpic_mask_irq(unsigned int irq);
++/* EOI a specific virq */
++extern void mpic_end_irq(unsigned int irq);
++
+ /* Fetch interrupt from a given mpic */
+ extern unsigned int mpic_get_one_irq(struct mpic *mpic);
+-/* This one gets to the primary mpic */
++/* This one gets from the primary mpic */
+ extern unsigned int mpic_get_irq(void);
++/* Fetch Machine Check interrupt from primary mpic */
++extern unsigned int mpic_get_mcirq(void);
+ 
+ /* Set the EPIC clock ratio */
+ void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
+diff --git a/include/asm-powerpc/nvram.h b/include/asm-powerpc/nvram.h
+index 9877982..4e7059c 100644
+--- a/include/asm-powerpc/nvram.h
++++ b/include/asm-powerpc/nvram.h
+@@ -10,6 +10,8 @@
+ #ifndef _ASM_POWERPC_NVRAM_H
+ #define _ASM_POWERPC_NVRAM_H
+ 
++#include <linux/errno.h>
++
+ #define NVRW_CNT 0x20
+ #define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
+ #define NVRAM_BLOCK_LEN 16
+@@ -71,7 +73,16 @@ extern int nvram_clear_error_log(void);
+ extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
+ 
+ extern int pSeries_nvram_init(void);
++
++#ifdef CONFIG_MMIO_NVRAM
+ extern int mmio_nvram_init(void);
++#else
++static inline int mmio_nvram_init(void)
++{
++	return -ENODEV;
++}
++#endif
++
+ #endif /* __KERNEL__ */
+ 
+ /* PowerMac specific nvram stuffs */
+diff --git a/include/asm-powerpc/of_platform.h b/include/asm-powerpc/of_platform.h
+index 80e6fad..18659ef 100644
+--- a/include/asm-powerpc/of_platform.h
++++ b/include/asm-powerpc/of_platform.h
+@@ -15,8 +15,14 @@
+ #include <linux/of_platform.h>
+ 
+ /* Platform drivers register/unregister */
+-extern int of_register_platform_driver(struct of_platform_driver *drv);
+-extern void of_unregister_platform_driver(struct of_platform_driver *drv);
++static inline int of_register_platform_driver(struct of_platform_driver *drv)
++{
++	return of_register_driver(drv, &of_platform_bus_type);
++}
++static inline void of_unregister_platform_driver(struct of_platform_driver *drv)
++{
++	of_unregister_driver(drv);
++}
+ 
+ /* Platform devices and busses creation */
+ extern struct of_device *of_platform_device_create(struct device_node *np,
+@@ -26,9 +32,11 @@ extern struct of_device *of_platform_device_create(struct device_node *np,
+ #define OF_NO_DEEP_PROBE ((struct of_device_id *)-1)
+ 
+ extern int of_platform_bus_probe(struct device_node *root,
+-				 struct of_device_id *matches,
++				 const struct of_device_id *matches,
+ 				 struct device *parent);
+ 
+ extern struct of_device *of_find_device_by_phandle(phandle ph);
+ 
++extern void of_instantiate_rtc(void);
++
+ #endif	/* _ASM_POWERPC_OF_PLATFORM_H */
 diff --git a/include/asm-powerpc/pasemi_dma.h b/include/asm-powerpc/pasemi_dma.h
 new file mode 100644
 index 0000000..b4526ff
@@ -598009,65 +750754,733 @@
 +
 +/* Common routines to allocate rings and buffers */
 +
-+extern int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size);
-+extern void pasemi_dma_free_ring(struct pasemi_dmachan *chan);
++extern int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size);
++extern void pasemi_dma_free_ring(struct pasemi_dmachan *chan);
++
++extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
++				  dma_addr_t *handle);
++extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
++				dma_addr_t *handle);
++
++/* Initialize the library, must be called before any other functions */
++extern int pasemi_dma_init(void);
++
++#endif /* ASM_PASEMI_DMA_H */
+diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
+index d8bdc79..e5802c6 100644
+--- a/include/asm-powerpc/pci-bridge.h
++++ b/include/asm-powerpc/pci-bridge.h
+@@ -1,15 +1,42 @@
+ #ifndef _ASM_POWERPC_PCI_BRIDGE_H
+ #define _ASM_POWERPC_PCI_BRIDGE_H
+ #ifdef __KERNEL__
+-
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
+ #include <linux/pci.h>
+ #include <linux/list.h>
+ #include <linux/ioport.h>
+ 
+-#ifndef CONFIG_PPC64
+-
+ struct device_node;
+-struct pci_controller;
++
++extern unsigned int ppc_pci_flags;
++enum {
++	/* Force re-assigning all resources (ignore firmware
++	 * setup completely)
++	 */
++	PPC_PCI_REASSIGN_ALL_RSRC	= 0x00000001,
++
++	/* Re-assign all bus numbers */
++	PPC_PCI_REASSIGN_ALL_BUS	= 0x00000002,
++
++	/* Do not try to assign, just use existing setup */
++	PPC_PCI_PROBE_ONLY		= 0x00000004,
++
++	/* Don't bother with ISA alignment unless the bridge has
++	 * ISA forwarding enabled
++	 */
++	PPC_PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,
++
++	/* Enable domain numbers in /proc */
++	PPC_PCI_ENABLE_PROC_DOMAINS	= 0x00000010,
++	/* ... except for domain 0 */
++	PPC_PCI_COMPAT_DOMAIN_0		= 0x00000020,
++};
++
+ 
+ /*
+  * Structure of a PCI controller (host bridge)
+@@ -17,26 +44,41 @@ struct pci_controller;
+ struct pci_controller {
+ 	struct pci_bus *bus;
+ 	char is_dynamic;
+-	void *arch_data;
++#ifdef CONFIG_PPC64
++	int node;
++#endif
++	struct device_node *dn;
+ 	struct list_head list_node;
+ 	struct device *parent;
+ 
+ 	int first_busno;
+ 	int last_busno;
++#ifndef CONFIG_PPC64
+ 	int self_busno;
++#endif
+ 
+ 	void __iomem *io_base_virt;
++#ifdef CONFIG_PPC64
++	void *io_base_alloc;
++#endif
+ 	resource_size_t io_base_phys;
++#ifndef CONFIG_PPC64
++	resource_size_t pci_io_size;
++#endif
+ 
+ 	/* Some machines (PReP) have a non 1:1 mapping of
+ 	 * the PCI memory space in the CPU bus space
+ 	 */
+ 	resource_size_t pci_mem_offset;
++#ifdef CONFIG_PPC64
++	unsigned long pci_io_size;
++#endif
+ 
+ 	struct pci_ops *ops;
+-	volatile unsigned int __iomem *cfg_addr;
+-	volatile void __iomem *cfg_data;
++	unsigned int __iomem *cfg_addr;
++	void __iomem *cfg_data;
+ 
++#ifndef CONFIG_PPC64
+ 	/*
+ 	 * Used for variants of PCI indirect handling and possible quirks:
+ 	 *  SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
+@@ -51,21 +93,30 @@ struct pci_controller {
+ 	 *   set.
+ 	 *  BIG_ENDIAN - cfg_addr is a big endian register
+ 	 */
+-#define PPC_INDIRECT_TYPE_SET_CFG_TYPE		(0x00000001)
+-#define PPC_INDIRECT_TYPE_EXT_REG		(0x00000002)
+-#define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS	(0x00000004)
+-#define PPC_INDIRECT_TYPE_NO_PCIE_LINK		(0x00000008)
+-#define PPC_INDIRECT_TYPE_BIG_ENDIAN		(0x00000010)
++#define PPC_INDIRECT_TYPE_SET_CFG_TYPE		0x00000001
++#define PPC_INDIRECT_TYPE_EXT_REG		0x00000002
++#define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS	0x00000004
++#define PPC_INDIRECT_TYPE_NO_PCIE_LINK		0x00000008
++#define PPC_INDIRECT_TYPE_BIG_ENDIAN		0x00000010
+ 	u32 indirect_type;
+-
++#endif	/* !CONFIG_PPC64 */
+ 	/* Currently, we limit ourselves to 1 IO range and 3 mem
+ 	 * ranges since the common pci_bus structure can't handle more
+ 	 */
+ 	struct resource	io_resource;
+ 	struct resource mem_resources[3];
+ 	int global_number;		/* PCI domain number */
++#ifdef CONFIG_PPC64
++	unsigned long buid;
++	unsigned long dma_window_base_cur;
++	unsigned long dma_window_size;
++
++	void *private_data;
++#endif	/* CONFIG_PPC64 */
+ };
+ 
++#ifndef CONFIG_PPC64
++
+ static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
+ {
+ 	return bus->sysdata;
+@@ -81,18 +132,18 @@ static inline int isa_vaddr_is_ioport(void __iomem *address)
+ 
+ /* These are used for config access before all the PCI probing
+    has been done. */
+-int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn,
+-			   int where, u8 *val);
+-int early_read_config_word(struct pci_controller *hose, int bus, int dev_fn,
+-			   int where, u16 *val);
+-int early_read_config_dword(struct pci_controller *hose, int bus, int dev_fn,
+-			    int where, u32 *val);
+-int early_write_config_byte(struct pci_controller *hose, int bus, int dev_fn,
+-			    int where, u8 val);
+-int early_write_config_word(struct pci_controller *hose, int bus, int dev_fn,
+-			    int where, u16 val);
+-int early_write_config_dword(struct pci_controller *hose, int bus, int dev_fn,
+-			     int where, u32 val);
++extern int early_read_config_byte(struct pci_controller *hose, int bus,
++			int dev_fn, int where, u8 *val);
++extern int early_read_config_word(struct pci_controller *hose, int bus,
++			int dev_fn, int where, u16 *val);
++extern int early_read_config_dword(struct pci_controller *hose, int bus,
++			int dev_fn, int where, u32 *val);
++extern int early_write_config_byte(struct pci_controller *hose, int bus,
++			int dev_fn, int where, u8 val);
++extern int early_write_config_word(struct pci_controller *hose, int bus,
++			int dev_fn, int where, u16 val);
++extern int early_write_config_dword(struct pci_controller *hose, int bus,
++			int dev_fn, int where, u32 val);
+ 
+ extern int early_find_capability(struct pci_controller *hose, int bus,
+ 				 int dev_fn, int cap);
+@@ -101,87 +152,33 @@ extern void setup_indirect_pci(struct pci_controller* hose,
+ 			       resource_size_t cfg_addr,
+ 			       resource_size_t cfg_data, u32 flags);
+ extern void setup_grackle(struct pci_controller *hose);
+-extern void __init update_bridge_resource(struct pci_dev *dev,
+-					  struct resource *res);
+-
+-#else
+-
+-
+-/*
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License
+- * as published by the Free Software Foundation; either version
+- * 2 of the License, or (at your option) any later version.
+- */
+-
+-/*
+- * Structure of a PCI controller (host bridge)
+- */
+-struct pci_controller {
+-	struct pci_bus *bus;
+-	char is_dynamic;
+-	int node;
+-	void *arch_data;
+-	struct list_head list_node;
+-	struct device *parent;
+-
+-	int first_busno;
+-	int last_busno;
+-
+-	void __iomem *io_base_virt;
+-	void *io_base_alloc;
+-	resource_size_t io_base_phys;
+-
+-	/* Some machines have a non 1:1 mapping of
+-	 * the PCI memory space in the CPU bus space
+-	 */
+-	resource_size_t pci_mem_offset;
+-	unsigned long pci_io_size;
+-
+-	struct pci_ops *ops;
+-	volatile unsigned int __iomem *cfg_addr;
+-	volatile void __iomem *cfg_data;
+-
+-	/* Currently, we limit ourselves to 1 IO range and 3 mem
+-	 * ranges since the common pci_bus structure can't handle more
+-	 */
+-	struct resource io_resource;
+-	struct resource mem_resources[3];
+-	int global_number;
+-	unsigned long buid;
+-	unsigned long dma_window_base_cur;
+-	unsigned long dma_window_size;
+-
+-	void *private_data;
+-};
++#else	/* CONFIG_PPC64 */
+ 
+ /*
+  * PCI stuff, for nodes representing PCI devices, pointed to
+  * by device_node->data.
+  */
+-struct pci_controller;
+ struct iommu_table;
+ 
+ struct pci_dn {
+ 	int	busno;			/* pci bus number */
+-	int	bussubno;		/* pci subordinate bus number */
+ 	int	devfn;			/* pci device and function number */
+-	int	class_code;		/* pci device class */
+ 
+ 	struct  pci_controller *phb;	/* for pci devices */
+ 	struct	iommu_table *iommu_table;	/* for phb's or bridges */
+-	struct	pci_dev *pcidev;	/* back-pointer to the pci device */
+ 	struct	device_node *node;	/* back-pointer to the device_node */
+ 
+ 	int	pci_ext_config_space;	/* for pci devices */
+ 
+ #ifdef CONFIG_EEH
++	struct	pci_dev *pcidev;	/* back-pointer to the pci device */
++	int	class_code;		/* pci device class */
+ 	int	eeh_mode;		/* See eeh.h for possible EEH_MODEs */
+ 	int	eeh_config_addr;
+ 	int	eeh_pe_config_addr; /* new-style partition endpoint address */
+-	int 	eeh_check_count;	/* # times driver ignored error */
+-	int 	eeh_freeze_count;	/* # times this device froze up. */
+-	int 	eeh_false_positives;	/* # times this device reported #ff's */
++	int	eeh_check_count;	/* # times driver ignored error */
++	int	eeh_freeze_count;	/* # times this device froze up. */
++	int	eeh_false_positives;	/* # times this device reported #ff's */
+ 	u32	config_space[16];	/* saved PCI config space */
+ #endif
+ };
+@@ -189,7 +186,7 @@ struct pci_dn {
+ /* Get the pointer to a device_node's pci_dn */
+ #define PCI_DN(dn)	((struct pci_dn *) (dn)->data)
+ 
+-struct device_node *fetch_dev_dn(struct pci_dev *dev);
++extern struct device_node *fetch_dev_dn(struct pci_dev *dev);
+ 
+ /* Get a device_node from a pci_dev.  This code must be fast except
+  * in the case where the sysdata is incorrect and needs to be fixed
+@@ -227,14 +224,14 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+ }
+ 
+ /** Find the bus corresponding to the indicated device node */
+-struct pci_bus * pcibios_find_pci_bus(struct device_node *dn);
++extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
+ 
+ /** Remove all of the PCI devices under this bus */
+-void pcibios_remove_pci_devices(struct pci_bus *bus);
++extern void pcibios_remove_pci_devices(struct pci_bus *bus);
+ 
+ /** Discover new pci devices under this bus, and add them */
+-void pcibios_add_pci_devices(struct pci_bus * bus);
+-void pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus);
++extern void pcibios_add_pci_devices(struct pci_bus *bus);
++extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus);
+ 
+ extern int pcibios_remove_root_bus(struct pci_controller *phb);
+ 
+@@ -270,20 +267,18 @@ extern int pcibios_map_io_space(struct pci_bus *bus);
+ #define PHB_SET_NODE(PHB, NODE)		((PHB)->node = -1)
+ #endif
+ 
+-#endif /* CONFIG_PPC64 */
++#endif	/* CONFIG_PPC64 */
+ 
+ /* Get the PCI host controller for an OF device */
+-extern struct pci_controller*
+-pci_find_hose_for_OF_device(struct device_node* node);
++extern struct pci_controller *pci_find_hose_for_OF_device(
++			struct device_node* node);
+ 
+ /* Fill up host controller resources from the OF node */
+-extern void
+-pci_process_bridge_OF_ranges(struct pci_controller *hose,
+-			   struct device_node *dev, int primary);
++extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
++			struct device_node *dev, int primary);
+ 
+ /* Allocate & free a PCI host bridge structure */
+-extern struct pci_controller *
+-pcibios_alloc_controller(struct device_node *dev);
++extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
+ extern void pcibios_free_controller(struct pci_controller *phb);
+ 
+ #ifdef CONFIG_PCI
+@@ -298,9 +293,7 @@ static inline int pcibios_vaddr_is_ioport(void __iomem *address)
+ {
+ 	return 0;
+ }
+-#endif
+-
++#endif	/* CONFIG_PCI */
+ 
+-
+-#endif /* __KERNEL__ */
+-#endif
++#endif	/* __KERNEL__ */
++#endif	/* _ASM_POWERPC_PCI_BRIDGE_H */
+diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h
+index 7b11765..a05a942 100644
+--- a/include/asm-powerpc/pci.h
++++ b/include/asm-powerpc/pci.h
+@@ -36,11 +36,10 @@ struct pci_dev;
+ 
+ /*
+  * Set this to 1 if you want the kernel to re-assign all PCI
+- * bus numbers
++ * bus numbers (don't do that on ppc64 yet !)
+  */
+-extern int pci_assign_all_buses;
+-#define pcibios_assign_all_busses()	(pci_assign_all_buses)
+-
++#define pcibios_assign_all_busses()    	(ppc_pci_flags & \
++					 PPC_PCI_REASSIGN_ALL_BUS)
+ #define pcibios_scan_all_fns(a, b)	0
+ 
+ static inline void pcibios_set_master(struct pci_dev *dev)
+@@ -95,9 +94,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ #define get_pci_dma_ops()	NULL
+ #endif
+ 
+-/* Decide whether to display the domain number in /proc */
+-extern int pci_proc_domain(struct pci_bus *bus);
+-
+ #else /* 32-bit */
+ 
+ #ifdef CONFIG_PCI
+@@ -109,17 +105,14 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ 	*strategy_parameter = ~0UL;
+ }
+ #endif
+-
+-/* Set the name of the bus as it appears in /proc/bus/pci */
+-static inline int pci_proc_domain(struct pci_bus *bus)
+-{
+-	return 0;
+-}
+-
+ #endif /* CONFIG_PPC64 */
+ 
+ extern int pci_domain_nr(struct pci_bus *bus);
+ 
++/* Decide whether to display the domain number in /proc */
++extern int pci_proc_domain(struct pci_bus *bus);
++
 +
-+extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
-+				  dma_addr_t *handle);
-+extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
-+				dma_addr_t *handle);
+ struct vm_area_struct;
+ /* Map a range of PCI memory or I/O space for a device into user space */
+ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
+@@ -199,13 +192,12 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
+ 	return root;
+ }
+ 
+-extern void pcibios_fixup_device_resources(struct pci_dev *dev,
+-			struct pci_bus *bus);
+-
+ extern void pcibios_setup_new_device(struct pci_dev *dev);
+ 
+ extern void pcibios_claim_one_bus(struct pci_bus *b);
+ 
++extern void pcibios_resource_survey(void);
 +
-+/* Initialize the library, must be called before any other functions */
-+extern int pasemi_dma_init(void);
+ extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
+ 
+ extern struct pci_dev *of_create_pci_dev(struct device_node *node,
+@@ -229,5 +221,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ 				 const struct resource *rsrc,
+ 				 resource_size_t *start, resource_size_t *end);
+ 
++extern void pcibios_do_bus_setup(struct pci_bus *bus);
++extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus);
 +
-+#endif /* ASM_PASEMI_DMA_H */
+ #endif	/* __KERNEL__ */
+ #endif /* __ASM_POWERPC_PCI_H */
 diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
-index 6b22962..cc1cbf6 100644
+index 6b22962..ccb0523 100644
 --- a/include/asm-powerpc/percpu.h
 +++ b/include/asm-powerpc/percpu.h
-@@ -16,15 +16,6 @@
- #define __my_cpu_offset() get_paca()->data_offset
+@@ -13,54 +13,12 @@
+ #include <asm/paca.h>
+ 
+ #define __per_cpu_offset(cpu) (paca[cpu].data_offset)
+-#define __my_cpu_offset() get_paca()->data_offset
++#define __my_cpu_offset get_paca()->data_offset
  #define per_cpu_offset(x) (__per_cpu_offset(x))
  
 -/* Separate out the type, so (int[3], foo) works. */
 -#define DEFINE_PER_CPU(type, name) \
 -    __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
--
++#endif /* CONFIG_SMP */
++#endif /* __powerpc64__ */
+ 
 -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		\
 -    __attribute__((__section__(".data.percpu.shared_aligned"))) \
 -    __typeof__(type) per_cpu__##name				\
 -    ____cacheline_aligned_in_smp
 -
- /* var is in discarded region: offset to particular copy we want */
- #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
- #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
-@@ -43,11 +34,6 @@ extern void setup_per_cpu_areas(void);
- 
- #else /* ! SMP */
- 
+-/* var is in discarded region: offset to particular copy we want */
+-#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
+-#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
+-#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, local_paca->data_offset))
+-
+-/* A macro to avoid #include hell... */
+-#define percpu_modcopy(pcpudst, src, size)			\
+-do {								\
+-	unsigned int __i;					\
+-	for_each_possible_cpu(__i)				\
+-		memcpy((pcpudst)+__per_cpu_offset(__i),		\
+-		       (src), (size));				\
+-} while (0)
+-
+-extern void setup_per_cpu_areas(void);
+-
+-#else /* ! SMP */
+-
 -#define DEFINE_PER_CPU(type, name) \
 -    __typeof__(type) per_cpu__##name
 -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
 -    DEFINE_PER_CPU(type, name)
 -
- #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu__##var))
- #define __get_cpu_var(var)			per_cpu__##var
- #define __raw_get_cpu_var(var)			per_cpu__##var
-@@ -56,9 +42,6 @@ extern void setup_per_cpu_areas(void);
- 
- #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
- 
+-#define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu__##var))
+-#define __get_cpu_var(var)			per_cpu__##var
+-#define __raw_get_cpu_var(var)			per_cpu__##var
+-
+-#endif	/* SMP */
+-
+-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+-
 -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
 -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
 -
- #else
+-#else
  #include <asm-generic/percpu.h>
+-#endif
+ 
+ #endif /* _ASM_POWERPC_PERCPU_H_ */
+diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
+index 94d0294..43214c8 100644
+--- a/include/asm-powerpc/pgalloc-64.h
++++ b/include/asm-powerpc/pgalloc-64.h
+@@ -12,6 +12,10 @@
+ #include <linux/cpumask.h>
+ #include <linux/percpu.h>
+ 
++#ifndef CONFIG_PPC_SUBPAGE_PROT
++static inline void subpage_prot_free(pgd_t *pgd) {}
++#endif
++
+ extern struct kmem_cache *pgtable_cache[];
+ 
+ #define PGD_CACHE_NUM		0
+@@ -27,6 +31,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ 
+ static inline void pgd_free(pgd_t *pgd)
+ {
++	subpage_prot_free(pgd);
+ 	kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
+ }
+ 
+diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
+index bd54b77..1cbd6b3 100644
+--- a/include/asm-powerpc/pgtable-64k.h
++++ b/include/asm-powerpc/pgtable-64k.h
+@@ -13,12 +13,49 @@
+ #define PTE_TABLE_SIZE	(sizeof(real_pte_t) << PTE_INDEX_SIZE)
+ #define PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
+ #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
+-#endif	/* __ASSEMBLY__ */
+ 
+ #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
+ #define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
+ #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
+ 
++#ifdef CONFIG_PPC_SUBPAGE_PROT
++/*
++ * For the sub-page protection option, we extend the PGD with one of
++ * these.  Basically we have a 3-level tree, with the top level being
++ * the protptrs array.  To optimize speed and memory consumption when
++ * only addresses < 4GB are being protected, pointers to the first
++ * four pages of sub-page protection words are stored in the low_prot
++ * array.
++ * Each page of sub-page protection words protects 1GB (4 bytes
++ * protects 64k).  For the 3-level tree, each page of pointers then
++ * protects 8TB.
++ */
++struct subpage_prot_table {
++	unsigned long maxaddr;	/* only addresses < this are protected */
++	unsigned int **protptrs[2];
++	unsigned int *low_prot[4];
++};
++
++#undef PGD_TABLE_SIZE
++#define PGD_TABLE_SIZE		((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
++				 sizeof(struct subpage_prot_table))
++
++#define SBP_L1_BITS		(PAGE_SHIFT - 2)
++#define SBP_L2_BITS		(PAGE_SHIFT - 3)
++#define SBP_L1_COUNT		(1 << SBP_L1_BITS)
++#define SBP_L2_COUNT		(1 << SBP_L2_BITS)
++#define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
++#define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)
++
++extern void subpage_prot_free(pgd_t *pgd);
++
++static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
++{
++	return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
++}
++#endif /* CONFIG_PPC_SUBPAGE_PROT */
++#endif	/* __ASSEMBLY__ */
++
+ /* With 4k base page size, hugepage PTEs go at the PMD level */
+ #define MIN_HUGEPTE_SHIFT	PAGE_SHIFT
+ 
+diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
+index b847aa1..854ab71 100644
+--- a/include/asm-powerpc/ppc-pci.h
++++ b/include/asm-powerpc/ppc-pci.h
+@@ -22,7 +22,6 @@ extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
+ 
+ 
+ extern struct list_head hose_list;
+-extern int global_phb_number;
+ 
+ extern void find_and_init_phbs(void);
+ 
+@@ -47,9 +46,6 @@ extern void init_pci_config_tokens (void);
+ extern unsigned long get_phb_buid (struct device_node *);
+ extern int rtas_setup_phb(struct pci_controller *phb);
+ 
+-/* From iSeries PCI */
+-extern void iSeries_pcibios_init(void);
+-
+ extern unsigned long pci_probe_only;
+ 
+ /* ---- EEH internal-use-only related routines ---- */
+diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
+index 925e2d3..78b7b0d 100644
+--- a/include/asm-powerpc/prom.h
++++ b/include/asm-powerpc/prom.h
+@@ -202,6 +202,10 @@ static inline unsigned long of_read_ulong(const u32 *cell, int size)
+  */
+ extern u64 of_translate_address(struct device_node *np, const u32 *addr);
+ 
++/* Translate a DMA address from device space to CPU space */
++extern u64 of_translate_dma_address(struct device_node *dev,
++				    const u32 *in_addr);
++
+ /* Extract an address from a device, returns the region size and
+  * the address space flags too. The PCI version uses a BAR number
+  * instead of an absolute index
+diff --git a/include/asm-powerpc/ps3.h b/include/asm-powerpc/ps3.h
+index f577a16..2b69367 100644
+--- a/include/asm-powerpc/ps3.h
++++ b/include/asm-powerpc/ps3.h
+@@ -24,6 +24,7 @@
+ #include <linux/init.h>
+ #include <linux/types.h>
+ #include <linux/device.h>
++#include "cell-pmu.h"
+ 
+ union ps3_firmware_version {
+ 	u64 raw;
+@@ -317,6 +318,7 @@ enum ps3_match_id {
+ 	PS3_MATCH_ID_STOR_FLASH     = 8,
+ 	PS3_MATCH_ID_SOUND          = 9,
+ 	PS3_MATCH_ID_GRAPHICS       = 10,
++	PS3_MATCH_ID_LPM            = 11,
+ };
+ 
+ #define PS3_MODULE_ALIAS_EHCI           "ps3:1"
+@@ -329,11 +331,13 @@ enum ps3_match_id {
+ #define PS3_MODULE_ALIAS_STOR_FLASH     "ps3:8"
+ #define PS3_MODULE_ALIAS_SOUND          "ps3:9"
+ #define PS3_MODULE_ALIAS_GRAPHICS       "ps3:10"
++#define PS3_MODULE_ALIAS_LPM            "ps3:11"
+ 
+ enum ps3_system_bus_device_type {
+ 	PS3_DEVICE_TYPE_IOC0 = 1,
+ 	PS3_DEVICE_TYPE_SB,
+ 	PS3_DEVICE_TYPE_VUART,
++	PS3_DEVICE_TYPE_LPM,
+ };
+ 
+ /**
+@@ -344,12 +348,17 @@ struct ps3_system_bus_device {
+ 	enum ps3_match_id match_id;
+ 	enum ps3_system_bus_device_type dev_type;
+ 
+-	unsigned int bus_id;              /* SB */
+-	unsigned int dev_id;              /* SB */
++	u64 bus_id;                       /* SB */
++	u64 dev_id;                       /* SB */
+ 	unsigned int interrupt_id;        /* SB */
+ 	struct ps3_dma_region *d_region;  /* SB, IOC0 */
+ 	struct ps3_mmio_region *m_region; /* SB, IOC0*/
+ 	unsigned int port_number;         /* VUART */
++	struct {                          /* LPM */
++		u64 node_id;
++		u64 pu_id;
++		u64 rights;
++	} lpm;
+ 
+ /*	struct iommu_table *iommu_table; -- waiting for BenH's cleanups */
+ 	struct device core;
+@@ -438,5 +447,66 @@ struct ps3_prealloc {
+ extern struct ps3_prealloc ps3fb_videomemory;
+ extern struct ps3_prealloc ps3flash_bounce_buffer;
+ 
++/* logical performance monitor */
++
++/**
++ * enum ps3_lpm_rights - Rigths granted by the system policy module.
++ *
++ * @PS3_LPM_RIGHTS_USE_LPM: The right to use the lpm.
++ * @PS3_LPM_RIGHTS_USE_TB: The right to use the internal trace buffer.
++ */
++
++enum ps3_lpm_rights {
++	PS3_LPM_RIGHTS_USE_LPM = 0x001,
++	PS3_LPM_RIGHTS_USE_TB = 0x100,
++};
++
++/**
++ * enum ps3_lpm_tb_type - Type of trace buffer lv1 should use.
++ *
++ * @PS3_LPM_TB_TYPE_NONE: Do not use a trace buffer.
++ * @PS3_LPM_RIGHTS_USE_TB: Use the lv1 internal trace buffer.  Must have
++ *  rights @PS3_LPM_RIGHTS_USE_TB.
++ */
++
++enum ps3_lpm_tb_type {
++	PS3_LPM_TB_TYPE_NONE = 0,
++	PS3_LPM_TB_TYPE_INTERNAL = 1,
++};
++
++int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
++	u64 tb_cache_size);
++int ps3_lpm_close(void);
++int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
++	unsigned long *bytes_copied);
++int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
++	unsigned long count, unsigned long *bytes_copied);
++void ps3_set_bookmark(u64 bookmark);
++void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id);
++int ps3_set_signal(u64 rtas_signal_group, u8 signal_bit, u16 sub_unit,
++	u8 bus_word);
++
++u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr);
++void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
++u32 ps3_read_ctr(u32 cpu, u32 ctr);
++void ps3_write_ctr(u32 cpu, u32 ctr, u32 val);
++
++u32 ps3_read_pm07_control(u32 cpu, u32 ctr);
++void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val);
++u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg);
++void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
++
++u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr);
++void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
++
++void ps3_enable_pm(u32 cpu);
++void ps3_disable_pm(u32 cpu);
++void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
++void ps3_disable_pm_interrupts(u32 cpu);
++
++u32 ps3_get_and_clear_pm_interrupts(u32 cpu);
++void ps3_sync_irq(int node);
++u32 ps3_get_hw_thread_id(int cpu);
++u64 ps3_get_spe_id(void *arg);
+ 
  #endif
 diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
-index 13fccc5..3063363 100644
+index 13fccc5..ffc150f 100644
 --- a/include/asm-powerpc/ptrace.h
 +++ b/include/asm-powerpc/ptrace.h
-@@ -119,6 +119,13 @@ do {									      \
+@@ -106,7 +106,8 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
+  */
+ #define FULL_REGS(regs)		(((regs)->trap & 1) == 0)
+ #ifndef __powerpc64__
+-#define IS_CRITICAL_EXC(regs)	(((regs)->trap & 2) == 0)
++#define IS_CRITICAL_EXC(regs)	(((regs)->trap & 2) != 0)
++#define IS_MCHECK_EXC(regs)	(((regs)->trap & 4) != 0)
+ #endif /* ! __powerpc64__ */
+ #define TRAP(regs)		((regs)->trap & ~0xF)
+ #ifdef __powerpc64__
+@@ -119,6 +120,13 @@ do {									      \
  } while (0)
  #endif /* __powerpc64__ */
  
@@ -598081,6 +751494,2852 @@
  #endif /* __ASSEMBLY__ */
  
  #endif /* __KERNEL__ */
+diff --git a/include/asm-powerpc/qe.h b/include/asm-powerpc/qe.h
+index 0dabe46..430dc77 100644
+--- a/include/asm-powerpc/qe.h
++++ b/include/asm-powerpc/qe.h
+@@ -28,6 +28,52 @@
+ #define MEM_PART_SECONDARY	1
+ #define MEM_PART_MURAM		2
+ 
++/* Clocks and BRGs */
++enum qe_clock {
++	QE_CLK_NONE = 0,
++	QE_BRG1,		/* Baud Rate Generator 1 */
++	QE_BRG2,		/* Baud Rate Generator 2 */
++	QE_BRG3,		/* Baud Rate Generator 3 */
++	QE_BRG4,		/* Baud Rate Generator 4 */
++	QE_BRG5,		/* Baud Rate Generator 5 */
++	QE_BRG6,		/* Baud Rate Generator 6 */
++	QE_BRG7,		/* Baud Rate Generator 7 */
++	QE_BRG8,		/* Baud Rate Generator 8 */
++	QE_BRG9,		/* Baud Rate Generator 9 */
++	QE_BRG10,		/* Baud Rate Generator 10 */
++	QE_BRG11,		/* Baud Rate Generator 11 */
++	QE_BRG12,		/* Baud Rate Generator 12 */
++	QE_BRG13,		/* Baud Rate Generator 13 */
++	QE_BRG14,		/* Baud Rate Generator 14 */
++	QE_BRG15,		/* Baud Rate Generator 15 */
++	QE_BRG16,		/* Baud Rate Generator 16 */
++	QE_CLK1,		/* Clock 1 */
++	QE_CLK2,		/* Clock 2 */
++	QE_CLK3,		/* Clock 3 */
++	QE_CLK4,		/* Clock 4 */
++	QE_CLK5,		/* Clock 5 */
++	QE_CLK6,		/* Clock 6 */
++	QE_CLK7,		/* Clock 7 */
++	QE_CLK8,		/* Clock 8 */
++	QE_CLK9,		/* Clock 9 */
++	QE_CLK10,		/* Clock 10 */
++	QE_CLK11,		/* Clock 11 */
++	QE_CLK12,		/* Clock 12 */
++	QE_CLK13,		/* Clock 13 */
++	QE_CLK14,		/* Clock 14 */
++	QE_CLK15,		/* Clock 15 */
++	QE_CLK16,		/* Clock 16 */
++	QE_CLK17,		/* Clock 17 */
++	QE_CLK18,		/* Clock 18 */
++	QE_CLK19,		/* Clock 19 */
++	QE_CLK20,		/* Clock 20 */
++	QE_CLK21,		/* Clock 21 */
++	QE_CLK22,		/* Clock 22 */
++	QE_CLK23,		/* Clock 23 */
++	QE_CLK24,		/* Clock 24 */
++	QE_CLK_DUMMY
++};
++
+ /* Export QE common operations */
+ extern void qe_reset(void);
+ extern int par_io_init(struct device_node *np);
+@@ -38,7 +84,8 @@ extern int par_io_data_set(u8 port, u8 pin, u8 val);
+ 
+ /* QE internal API */
+ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
+-void qe_setbrg(unsigned int brg, unsigned int rate, unsigned int multiplier);
++enum qe_clock qe_clock_source(const char *source);
++int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier);
+ int qe_get_snum(void);
+ void qe_put_snum(u8 snum);
+ unsigned long qe_muram_alloc(int size, int align);
+@@ -47,6 +94,58 @@ unsigned long qe_muram_alloc_fixed(unsigned long offset, int size);
+ void qe_muram_dump(void);
+ void *qe_muram_addr(unsigned long offset);
+ 
++/* Structure that defines QE firmware binary files.
++ *
++ * See Documentation/powerpc/qe-firmware.txt for a description of these
++ * fields.
++ */
++struct qe_firmware {
++	struct qe_header {
++		__be32 length;  /* Length of the entire structure, in bytes */
++		u8 magic[3];    /* Set to { 'Q', 'E', 'F' } */
++		u8 version;     /* Version of this layout. First ver is '1' */
++	} header;
++	u8 id[62];      /* Null-terminated identifier string */
++	u8 split;	/* 0 = shared I-RAM, 1 = split I-RAM */
++	u8 count;       /* Number of microcode[] structures */
++	struct {
++		__be16 model;   	/* The SOC model  */
++		u8 major;       	/* The SOC revision major */
++		u8 minor;       	/* The SOC revision minor */
++	} __attribute__ ((packed)) soc;
++	u8 padding[4];			/* Reserved, for alignment */
++	__be64 extended_modes;		/* Extended modes */
++	__be32 vtraps[8];		/* Virtual trap addresses */
++	u8 reserved[4];			/* Reserved, for future expansion */
++	struct qe_microcode {
++		u8 id[32];      	/* Null-terminated identifier */
++		__be32 traps[16];       /* Trap addresses, 0 == ignore */
++		__be32 eccr;    	/* The value for the ECCR register */
++		__be32 iram_offset;     /* Offset into I-RAM for the code */
++		__be32 count;   	/* Number of 32-bit words of the code */
++		__be32 code_offset;     /* Offset of the actual microcode */
++		u8 major;       	/* The microcode version major */
++		u8 minor;       	/* The microcode version minor */
++		u8 revision;		/* The microcode version revision */
++		u8 padding;		/* Reserved, for alignment */
++		u8 reserved[4];		/* Reserved, for future expansion */
++	} __attribute__ ((packed)) microcode[1];
++	/* All microcode binaries should be located here */
++	/* CRC32 should be located here, after the microcode binaries */
++} __attribute__ ((packed));
++
++struct qe_firmware_info {
++	char id[64];		/* Firmware name */
++	u32 vtraps[8];		/* Virtual trap addresses */
++	u64 extended_modes;	/* Extended modes */
++};
++
++/* Upload a firmware to the QE */
++int qe_upload_firmware(const struct qe_firmware *firmware);
++
++/* Obtain information on the uploaded firmware */
++struct qe_firmware_info *qe_get_firmware_info(void);
++
+ /* Buffer descriptors */
+ struct qe_bd {
+ 	__be16 status;
+@@ -129,52 +228,6 @@ enum comm_dir {
+ 	COMM_DIR_RX_AND_TX = 3
+ };
+ 
+-/* Clocks and BRGs */
+-enum qe_clock {
+-	QE_CLK_NONE = 0,
+-	QE_BRG1,		/* Baud Rate Generator 1 */
+-	QE_BRG2,		/* Baud Rate Generator 2 */
+-	QE_BRG3,		/* Baud Rate Generator 3 */
+-	QE_BRG4,		/* Baud Rate Generator 4 */
+-	QE_BRG5,		/* Baud Rate Generator 5 */
+-	QE_BRG6,		/* Baud Rate Generator 6 */
+-	QE_BRG7,		/* Baud Rate Generator 7 */
+-	QE_BRG8,		/* Baud Rate Generator 8 */
+-	QE_BRG9,		/* Baud Rate Generator 9 */
+-	QE_BRG10,		/* Baud Rate Generator 10 */
+-	QE_BRG11,		/* Baud Rate Generator 11 */
+-	QE_BRG12,		/* Baud Rate Generator 12 */
+-	QE_BRG13,		/* Baud Rate Generator 13 */
+-	QE_BRG14,		/* Baud Rate Generator 14 */
+-	QE_BRG15,		/* Baud Rate Generator 15 */
+-	QE_BRG16,		/* Baud Rate Generator 16 */
+-	QE_CLK1,		/* Clock 1 */
+-	QE_CLK2,		/* Clock 2 */
+-	QE_CLK3,		/* Clock 3 */
+-	QE_CLK4,		/* Clock 4 */
+-	QE_CLK5,		/* Clock 5 */
+-	QE_CLK6,		/* Clock 6 */
+-	QE_CLK7,		/* Clock 7 */
+-	QE_CLK8,		/* Clock 8 */
+-	QE_CLK9,		/* Clock 9 */
+-	QE_CLK10,		/* Clock 10 */
+-	QE_CLK11,		/* Clock 11 */
+-	QE_CLK12,		/* Clock 12 */
+-	QE_CLK13,		/* Clock 13 */
+-	QE_CLK14,		/* Clock 14 */
+-	QE_CLK15,		/* Clock 15 */
+-	QE_CLK16,		/* Clock 16 */
+-	QE_CLK17,		/* Clock 17 */
+-	QE_CLK18,		/* Clock 18 */
+-	QE_CLK19,		/* Clock 19 */
+-	QE_CLK20,		/* Clock 20 */
+-	QE_CLK21,		/* Clock 21 */
+-	QE_CLK22,		/* Clock 22 */
+-	QE_CLK23,		/* Clock 23 */
+-	QE_CLK24,		/* Clock 24 */
+-	QE_CLK_DUMMY,
+-};
+-
+ /* QE CMXUCR Registers.
+  * There are two UCCs represented in each of the four CMXUCR registers.
+  * These values are for the UCC in the LSBs
+@@ -328,6 +381,15 @@ enum qe_clock {
+ 
+ #define QE_SDEBCR_BA_MASK	0x01FFFFFF
+ 
++/* Communication Processor */
++#define QE_CP_CERCR_MEE		0x8000	/* Multi-user RAM ECC enable */
++#define QE_CP_CERCR_IEE		0x4000	/* Instruction RAM ECC enable */
++#define QE_CP_CERCR_CIR		0x0800	/* Common instruction RAM */
++
++/* I-RAM */
++#define QE_IRAM_IADD_AIE	0x80000000	/* Auto Increment Enable */
++#define QE_IRAM_IADD_BADDR	0x00080000	/* Base Address */
++
+ /* UPC */
+ #define UPGCR_PROTOCOL	0x80000000	/* protocol ul2 or pl2 */
+ #define UPGCR_TMS	0x40000000	/* Transmit master/slave mode */
+diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
+index e775ff1..2408a29 100644
+--- a/include/asm-powerpc/reg.h
++++ b/include/asm-powerpc/reg.h
+@@ -553,6 +553,7 @@
+ #define SPRN_PA6T_BTCR	978	/* Breakpoint and Tagging Control Register */
+ #define SPRN_PA6T_IMAAT	979	/* Instruction Match Array Action Table */
+ #define SPRN_PA6T_PCCR	1019	/* Power Counter Control Register */
++#define SPRN_BKMK	1020	/* Cell Bookmark Register */
+ #define SPRN_PA6T_RPCCR	1021	/* Retire PC Trace Control Register */
+ 
+ 
+@@ -691,12 +692,6 @@
+ #define PV_BE		0x0070
+ #define PV_PA6T		0x0090
+ 
+-/*
+- * Number of entries in the SLB. If this ever changes we should handle
+- * it with a use a cpu feature fixup.
+- */
+-#define SLB_NUM_ENTRIES 64
+-
+ /* Macros for setting and retrieving special purpose registers */
+ #ifndef __ASSEMBLY__
+ #define mfmsr()		({unsigned long rval; \
+diff --git a/include/asm-powerpc/reg_booke.h b/include/asm-powerpc/reg_booke.h
+index 8fdc2b4..0405ef4 100644
+--- a/include/asm-powerpc/reg_booke.h
++++ b/include/asm-powerpc/reg_booke.h
+@@ -123,16 +123,23 @@
+ #define SPRN_SPEFSCR	0x200	/* SPE & Embedded FP Status & Control */
+ #define SPRN_BBEAR	0x201	/* Branch Buffer Entry Address Register */
+ #define SPRN_BBTAR	0x202	/* Branch Buffer Target Address Register */
++#define SPRN_ATB	0x20E	/* Alternate Time Base */
++#define SPRN_ATBL	0x20E	/* Alternate Time Base Lower */
++#define SPRN_ATBU	0x20F	/* Alternate Time Base Upper */
+ #define SPRN_IVOR32	0x210	/* Interrupt Vector Offset Register 32 */
+ #define SPRN_IVOR33	0x211	/* Interrupt Vector Offset Register 33 */
+ #define SPRN_IVOR34	0x212	/* Interrupt Vector Offset Register 34 */
+ #define SPRN_IVOR35	0x213	/* Interrupt Vector Offset Register 35 */
++#define SPRN_IVOR36	0x214	/* Interrupt Vector Offset Register 36 */
++#define SPRN_IVOR37	0x215	/* Interrupt Vector Offset Register 37 */
+ #define SPRN_MCSRR0	0x23A	/* Machine Check Save and Restore Register 0 */
+ #define SPRN_MCSRR1	0x23B	/* Machine Check Save and Restore Register 1 */
+ #define SPRN_MCSR	0x23C	/* Machine Check Status Register */
+ #define SPRN_MCAR	0x23D	/* Machine Check Address Register */
+ #define SPRN_DSRR0	0x23E	/* Debug Save and Restore Register 0 */
+ #define SPRN_DSRR1	0x23F	/* Debug Save and Restore Register 1 */
++#define SPRN_SPRG8	0x25C	/* Special Purpose Register General 8 */
++#define SPRN_SPRG9	0x25D	/* Special Purpose Register General 9 */
+ #define SPRN_MAS0	0x270	/* MMU Assist Register 0 */
+ #define SPRN_MAS1	0x271	/* MMU Assist Register 1 */
+ #define SPRN_MAS2	0x272	/* MMU Assist Register 2 */
+@@ -140,15 +147,18 @@
+ #define SPRN_MAS4	0x274	/* MMU Assist Register 4 */
+ #define SPRN_MAS5	0x275	/* MMU Assist Register 5 */
+ #define SPRN_MAS6	0x276	/* MMU Assist Register 6 */
+-#define SPRN_MAS7	0x3b0	/* MMU Assist Register 7 */
+ #define SPRN_PID1	0x279	/* Process ID Register 1 */
+ #define SPRN_PID2	0x27A	/* Process ID Register 2 */
+ #define SPRN_TLB0CFG	0x2B0	/* TLB 0 Config Register */
+ #define SPRN_TLB1CFG	0x2B1	/* TLB 1 Config Register */
++#define SPRN_EPR	0x2BE	/* External Proxy Register */
+ #define SPRN_CCR1	0x378	/* Core Configuration Register 1 */
+ #define SPRN_ZPR	0x3B0	/* Zone Protection Register (40x) */
++#define SPRN_MAS7	0x3B0	/* MMU Assist Register 7 */
+ #define SPRN_MMUCR	0x3B2	/* MMU Control Register */
+ #define SPRN_CCR0	0x3B3	/* Core Configuration Register 0 */
++#define SPRN_EPLC	0x3B3	/* External Process ID Load Context */
++#define SPRN_EPSC	0x3B4	/* External Process ID Store Context */
+ #define SPRN_SGR	0x3B9	/* Storage Guarded Register */
+ #define SPRN_DCWR	0x3BA	/* Data Cache Write-thru Register */
+ #define SPRN_SLER	0x3BB	/* Little-endian real mode */
+@@ -159,6 +169,7 @@
+ #define SPRN_L1CSR0	0x3F2	/* L1 Cache Control and Status Register 0 */
+ #define SPRN_L1CSR1	0x3F3	/* L1 Cache Control and Status Register 1 */
+ #define SPRN_PIT	0x3DB	/* Programmable Interval Timer */
++#define SPRN_BUCSR	0x3F5	/* Branch Unit Control and Status */
+ #define SPRN_DCCR	0x3FA	/* Data Cache Cacheability Register */
+ #define SPRN_ICCR	0x3FB	/* Instruction Cache Cacheability Register */
+ #define SPRN_SVR	0x3FF	/* System Version Register */
+@@ -207,7 +218,6 @@
+ #define	CCR1_TCS	0x00000080 /* Timer Clock Select */
+ 
+ /* Bit definitions for the MCSR. */
+-#ifdef CONFIG_440A
+ #define MCSR_MCS	0x80000000 /* Machine Check Summary */
+ #define MCSR_IB		0x40000000 /* Instruction PLB Error */
+ #define MCSR_DRB	0x20000000 /* Data Read PLB Error */
+@@ -217,7 +227,7 @@
+ #define MCSR_DCSP	0x02000000 /* D-Cache Search Parity Error */
+ #define MCSR_DCFP	0x01000000 /* D-Cache Flush Parity Error */
+ #define MCSR_IMPE	0x00800000 /* Imprecise Machine Check Exception */
+-#endif
++
+ #ifdef CONFIG_E500
+ #define MCSR_MCP 	0x80000000UL /* Machine Check Input Pin */
+ #define MCSR_ICPERR 	0x40000000UL /* I-Cache Parity Error */
+@@ -293,7 +303,7 @@
+ #define ESR_IMCB	0x20000000	/* Instr. Machine Check - Bus error */
+ #define ESR_IMCT	0x10000000	/* Instr. Machine Check - Timeout */
+ #define ESR_PIL		0x08000000	/* Program Exception - Illegal */
+-#define ESR_PPR		0x04000000	/* Program Exception - Priveleged */
++#define ESR_PPR		0x04000000	/* Program Exception - Privileged */
+ #define ESR_PTR		0x02000000	/* Program Exception - Trap */
+ #define ESR_FP		0x01000000	/* Floating Point Operation */
+ #define ESR_DST		0x00800000	/* Storage Exception - Data miss */
+diff --git a/include/asm-powerpc/setjmp.h b/include/asm-powerpc/setjmp.h
+new file mode 100644
+index 0000000..279d03a
+--- /dev/null
++++ b/include/asm-powerpc/setjmp.h
+@@ -0,0 +1,18 @@
++/*
++ * Copyright © 2008 Michael Neuling IBM Corporation
++ *
++ *      This program is free software; you can redistribute it and/or
++ *      modify it under the terms of the GNU General Public License
++ *      as published by the Free Software Foundation; either version
++ *      2 of the License, or (at your option) any later version.
++ *
++ */
++#ifndef _ASM_POWERPC_SETJMP_H
++#define _ASM_POWERPC_SETJMP_H
++
++#define JMP_BUF_LEN    23
++
++extern long setjmp(long *);
++extern void longjmp(long *, long);
++
++#endif /* _ASM_POWERPC_SETJMP_H */
+diff --git a/include/asm-powerpc/smu.h b/include/asm-powerpc/smu.h
+index e49f644..7ae2753 100644
+--- a/include/asm-powerpc/smu.h
++++ b/include/asm-powerpc/smu.h
+@@ -22,7 +22,7 @@
+  * Partition info commands
+  *
+  * These commands are used to retrieve the sdb-partition-XX datas from
+- * the SMU. The lenght is always 2. First byte is the subcommand code
++ * the SMU. The length is always 2. First byte is the subcommand code
+  * and second byte is the partition ID.
+  *
+  * The reply is 6 bytes:
+@@ -173,12 +173,12 @@
+  * Power supply control
+  *
+  * The "sub" command is an ASCII string in the data, the
+- * data lenght is that of the string.
++ * data length is that of the string.
+  *
+  * The VSLEW command can be used to get or set the voltage slewing.
+- *  - lenght 5 (only "VSLEW") : it returns "DONE" and 3 bytes of
++ *  - length 5 (only "VSLEW") : it returns "DONE" and 3 bytes of
+  *    reply at data offset 6, 7 and 8.
+- *  - lenght 8 ("VSLEWxyz") has 3 additional bytes appended, and is
++ *  - length 8 ("VSLEWxyz") has 3 additional bytes appended, and is
+  *    used to set the voltage slewing point. The SMU replies with "DONE"
+  * I yet have to figure out their exact meaning of those 3 bytes in
+  * both cases. They seem to be:
+@@ -201,20 +201,90 @@
+  */
+ #define SMU_CMD_READ_ADC			0xd8
+ 
++
+ /* Misc commands
+  *
+  * This command seem to be a grab bag of various things
++ *
++ * Parameters:
++ *   1: subcommand
+  */
+ #define SMU_CMD_MISC_df_COMMAND			0xdf
+-#define   SMU_CMD_MISC_df_SET_DISPLAY_LIT	0x02 /* i: 1 byte */
++
++/*
++ * Sets "system ready" status
++ *
++ * I did not yet understand how it exactly works or what it does.
++ *
++ * Guessing from OF code, 0x02 activates the display backlight. Apple uses/used
++ * the same codebase for all OF versions. On PowerBooks, this command would
++ * enable the backlight. For the G5s, it only activates the front LED. However,
++ * don't take this for granted.
++ *
++ * Parameters:
++ *   2: status [0x00, 0x01 or 0x02]
++ */
++#define   SMU_CMD_MISC_df_SET_DISPLAY_LIT	0x02
++
++/*
++ * Sets mode of power switch.
++ *
++ * What this actually does is not yet known. Maybe it enables some interrupt.
++ *
++ * Parameters:
++ *   2: enable power switch? [0x00 or 0x01]
++ *   3 (optional): enable nmi? [0x00 or 0x01]
++ *
++ * Returns:
++ *   If parameter 2 is 0x00 and parameter 3 is not specified, returns wether
++ *   NMI is enabled. Otherwise unknown.
++ */
+ #define   SMU_CMD_MISC_df_NMI_OPTION		0x04
+ 
++/* Sets LED dimm offset.
++ *
++ * The front LED dimms itself during sleep. Its brightness (or, well, the PWM
++ * frequency) depends on current time. Therefore, the SMU needs to know the
++ * timezone.
++ *
++ * Parameters:
++ *   2-8: unknown (BCD coding)
++ */
++#define   SMU_CMD_MISC_df_DIMM_OFFSET		0x99
++
++
+ /*
+  * Version info commands
+  *
+- * I haven't quite tried to figure out how these work
++ * Parameters:
++ *   1 (optional): Specifies version part to retrieve
++ *
++ * Returns:
++ *   Version value
+  */
+ #define SMU_CMD_VERSION_COMMAND			0xea
++#define   SMU_VERSION_RUNNING			0x00
++#define   SMU_VERSION_BASE			0x01
++#define   SMU_VERSION_UPDATE			0x02
++
++
++/*
++ * Switches
++ *
++ * These are switches whose status seems to be known to the SMU.
++ *
++ * Parameters:
++ *   none
++ *
++ * Result:
++ *   Switch bits (ORed, see below)
++ */
++#define SMU_CMD_SWITCHES			0xdc
++
++/* Switches bits */
++#define SMU_SWITCH_CASE_CLOSED			0x01
++#define SMU_SWITCH_AC_POWER			0x04
++#define SMU_SWITCH_POWER_SWITCH			0x08
+ 
+ 
+ /*
+@@ -243,10 +313,64 @@
+  */
+ #define SMU_CMD_MISC_ee_COMMAND			0xee
+ #define   SMU_CMD_MISC_ee_GET_DATABLOCK_REC	0x02
+-#define	  SMU_CMD_MISC_ee_LEDS_CTRL		0x04 /* i: 00 (00,01) [00] */
++
++/* Retrieves currently used watts.
++ *
++ * Parameters:
++ *   1: 0x03 (Meaning unknown)
++ */
++#define   SMU_CMD_MISC_ee_GET_WATTS		0x03
++
++#define   SMU_CMD_MISC_ee_LEDS_CTRL		0x04 /* i: 00 (00,01) [00] */
+ #define   SMU_CMD_MISC_ee_GET_DATA		0x05 /* i: 00 , o: ?? */
+ 
+ 
++/*
++ * Power related commands
++ *
++ * Parameters:
++ *   1: subcommand
++ */
++#define SMU_CMD_POWER_EVENTS_COMMAND		0x8f
++
++/* SMU_POWER_EVENTS subcommands */
++enum {
++	SMU_PWR_GET_POWERUP_EVENTS      = 0x00,
++	SMU_PWR_SET_POWERUP_EVENTS      = 0x01,
++	SMU_PWR_CLR_POWERUP_EVENTS      = 0x02,
++	SMU_PWR_GET_WAKEUP_EVENTS       = 0x03,
++	SMU_PWR_SET_WAKEUP_EVENTS       = 0x04,
++	SMU_PWR_CLR_WAKEUP_EVENTS       = 0x05,
++
++	/*
++	 * Get last shutdown cause
++	 *
++	 * Returns:
++	 *   1 byte (signed char): Last shutdown cause. Exact meaning unknown.
++	 */
++	SMU_PWR_LAST_SHUTDOWN_CAUSE	= 0x07,
++
++	/*
++	 * Sets or gets server ID. Meaning or use is unknown.
++	 *
++	 * Parameters:
++	 *   2 (optional): Set server ID (1 byte)
++	 *
++	 * Returns:
++	 *   1 byte (server ID?)
++	 */
++	SMU_PWR_SERVER_ID		= 0x08,
++};
++
++/* Power events wakeup bits */
++enum {
++	SMU_PWR_WAKEUP_KEY              = 0x01, /* Wake on key press */
++	SMU_PWR_WAKEUP_AC_INSERT        = 0x02, /* Wake on AC adapter plug */
++	SMU_PWR_WAKEUP_AC_CHANGE        = 0x04,
++	SMU_PWR_WAKEUP_LID_OPEN         = 0x08,
++	SMU_PWR_WAKEUP_RING             = 0x10,
++};
++
+ 
+ /*
+  * - Kernel side interface -
+@@ -564,13 +688,13 @@ struct smu_user_cmd_hdr
+ 
+ 	__u8		cmd;			/* SMU command byte */
+ 	__u8		pad[3];			/* padding */
+-	__u32		data_len;		/* Lenght of data following */
++	__u32		data_len;		/* Length of data following */
+ };
+ 
+ struct smu_user_reply_hdr
+ {
+ 	__u32		status;			/* Command status */
+-	__u32		reply_len;		/* Lenght of data follwing */
++	__u32		reply_len;		/* Length of data follwing */
+ };
+ 
+ #endif /*  _SMU_H */
+diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
+index 48ad807..e8b493d 100644
+--- a/include/asm-powerpc/sparsemem.h
++++ b/include/asm-powerpc/sparsemem.h
+@@ -10,13 +10,8 @@
+  */
+ #define SECTION_SIZE_BITS       24
+ 
+-#if defined(CONFIG_PS3_USE_LPAR_ADDR)
+-#define MAX_PHYSADDR_BITS       47
+-#define MAX_PHYSMEM_BITS        47
+-#else
+ #define MAX_PHYSADDR_BITS       44
+ #define MAX_PHYSMEM_BITS        44
+-#endif
+ 
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ extern void create_section_mapping(unsigned long start, unsigned long end);
+diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
+index 34b7807..f07c99b 100644
+--- a/include/asm-powerpc/spu.h
++++ b/include/asm-powerpc/spu.h
+@@ -104,6 +104,7 @@
+ 
+ struct spu_context;
+ struct spu_runqueue;
++struct spu_lscsa;
+ struct device_node;
+ 
+ enum spu_utilization_state {
+@@ -145,7 +146,6 @@ struct spu {
+ 	void (* ibox_callback)(struct spu *spu);
+ 	void (* stop_callback)(struct spu *spu);
+ 	void (* mfc_callback)(struct spu *spu);
+-	void (* dma_callback)(struct spu *spu, int type);
+ 
+ 	char irq_c0[8];
+ 	char irq_c1[8];
+@@ -196,10 +196,11 @@ struct cbe_spu_info {
+ extern struct cbe_spu_info cbe_spu_info[];
+ 
+ void spu_init_channels(struct spu *spu);
+-int spu_irq_class_0_bottom(struct spu *spu);
+-int spu_irq_class_1_bottom(struct spu *spu);
+ void spu_irq_setaffinity(struct spu *spu, int cpu);
+ 
++void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
++		void *code, int code_size);
++
+ #ifdef CONFIG_KEXEC
+ void crash_register_spus(struct list_head *list);
+ #else
+@@ -210,6 +211,7 @@ static inline void crash_register_spus(struct list_head *list)
+ 
+ extern void spu_invalidate_slbs(struct spu *spu);
+ extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
++int spu_64k_pages_available(void);
+ 
+ /* Calls from the memory management to the SPU */
+ struct mm_struct;
+@@ -279,6 +281,8 @@ void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
+ int spu_add_sysdev_attr_group(struct attribute_group *attrs);
+ void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
+ 
++int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
++		unsigned long dsisr, unsigned *flt);
+ 
+ /*
+  * Notifier blocks:
+@@ -303,7 +307,7 @@ extern void notify_spus_active(void);
+ extern void do_notify_spus_active(void);
+ 
+ /*
+- * This defines the Local Store, Problem Area and Privlege Area of an SPU.
++ * This defines the Local Store, Problem Area and Privilege Area of an SPU.
+  */
+ 
+ union mfc_tag_size_class_cmd {
+@@ -524,8 +528,24 @@ struct spu_priv1 {
+ #define CLASS2_ENABLE_SPU_STOP_INTR			0x2L
+ #define CLASS2_ENABLE_SPU_HALT_INTR			0x4L
+ #define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR	0x8L
++#define CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR		0x10L
+ 	u8  pad_0x118_0x140[0x28];				/* 0x118 */
+ 	u64 int_stat_RW[3];					/* 0x140 */
++#define CLASS0_DMA_ALIGNMENT_INTR			0x1L
++#define CLASS0_INVALID_DMA_COMMAND_INTR			0x2L
++#define CLASS0_SPU_ERROR_INTR				0x4L
++#define CLASS0_INTR_MASK				0x7L
++#define CLASS1_SEGMENT_FAULT_INTR			0x1L
++#define CLASS1_STORAGE_FAULT_INTR			0x2L
++#define CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR		0x4L
++#define CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR		0x8L
++#define CLASS1_INTR_MASK				0xfL
++#define CLASS2_MAILBOX_INTR				0x1L
++#define CLASS2_SPU_STOP_INTR				0x2L
++#define CLASS2_SPU_HALT_INTR				0x4L
++#define CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR		0x8L
++#define CLASS2_MAILBOX_THRESHOLD_INTR			0x10L
++#define CLASS2_INTR_MASK				0x1fL
+ 	u8  pad_0x158_0x180[0x28];				/* 0x158 */
+ 	u64 int_route_RW;					/* 0x180 */
+ 
+diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
+index e87794d..0ab6bff 100644
+--- a/include/asm-powerpc/spu_csa.h
++++ b/include/asm-powerpc/spu_csa.h
+@@ -194,7 +194,7 @@ struct spu_priv1_collapsed {
+ };
+ 
+ /*
+- * struct spu_priv2_collapsed - condensed priviliged 2 area, w/o pads.
++ * struct spu_priv2_collapsed - condensed privileged 2 area, w/o pads.
+  */
+ struct spu_priv2_collapsed {
+ 	u64 slb_index_W;
+@@ -254,20 +254,11 @@ struct spu_state {
+ 	u64 spu_chnldata_RW[32];
+ 	u32 spu_mailbox_data[4];
+ 	u32 pu_mailbox_data[1];
+-	u64 dar, dsisr;
++	u64 dar, dsisr, class_0_pending;
+ 	unsigned long suspend_time;
+ 	spinlock_t register_lock;
+ };
+ 
+-extern int spu_init_csa(struct spu_state *csa);
+-extern void spu_fini_csa(struct spu_state *csa);
+-extern int spu_save(struct spu_state *prev, struct spu *spu);
+-extern int spu_restore(struct spu_state *new, struct spu *spu);
+-extern int spu_switch(struct spu_state *prev, struct spu_state *new,
+-		      struct spu *spu);
+-extern int spu_alloc_lscsa(struct spu_state *csa);
+-extern void spu_free_lscsa(struct spu_state *csa);
+-
+ #endif /* !__SPU__ */
+ #endif /* __KERNEL__ */
+ #endif /* !__ASSEMBLY__ */
+diff --git a/include/asm-powerpc/spu_priv1.h b/include/asm-powerpc/spu_priv1.h
+index 0f37c7c..25020a3 100644
+--- a/include/asm-powerpc/spu_priv1.h
++++ b/include/asm-powerpc/spu_priv1.h
+@@ -24,6 +24,7 @@
+ #include <linux/types.h>
+ 
+ struct spu;
++struct spu_context;
+ 
+ /* access to priv1 registers */
+ 
+@@ -178,6 +179,8 @@ struct spu_management_ops {
+ 	int (*enumerate_spus)(int (*fn)(void *data));
+ 	int (*create_spu)(struct spu *spu, void *data);
+ 	int (*destroy_spu)(struct spu *spu);
++	void (*enable_spu)(struct spu_context *ctx);
++	void (*disable_spu)(struct spu_context *ctx);
+ 	int (*init_affinity)(void);
+ };
+ 
+@@ -207,6 +210,18 @@ spu_init_affinity (void)
+ 	return spu_management_ops->init_affinity();
+ }
+ 
++static inline void
++spu_enable_spu (struct spu_context *ctx)
++{
++	spu_management_ops->enable_spu(ctx);
++}
++
++static inline void
++spu_disable_spu (struct spu_context *ctx)
++{
++	spu_management_ops->disable_spu(ctx);
++}
++
+ /*
+  * The declarations folowing are put here for convenience
+  * and only intended to be used by the platform setup code.
+diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
+index 11d5383..0c8b0d6 100644
+--- a/include/asm-powerpc/systbl.h
++++ b/include/asm-powerpc/systbl.h
+@@ -313,3 +313,4 @@ COMPAT_SYS_SPU(timerfd)
+ SYSCALL_SPU(eventfd)
+ COMPAT_SYS_SPU(sync_file_range2)
+ COMPAT_SYS(fallocate)
++SYSCALL(subpage_prot)
+diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
+index 87be8c3..bc9739d 100644
+--- a/include/asm-powerpc/system.h
++++ b/include/asm-powerpc/system.h
+@@ -169,6 +169,8 @@ extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+ extern void bad_page_fault(struct pt_regs *, unsigned long, int);
+ extern int die(const char *, struct pt_regs *, long);
+ extern void _exception(int, struct pt_regs *, int, unsigned long);
++extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
++
+ #ifdef CONFIG_BOOKE_WDT
+ extern u32 booke_wdt_enabled;
+ extern u32 booke_wdt_period;
+diff --git a/include/asm-powerpc/udbg.h b/include/asm-powerpc/udbg.h
+index a9e0b0e..6418cee 100644
+--- a/include/asm-powerpc/udbg.h
++++ b/include/asm-powerpc/udbg.h
+@@ -48,6 +48,7 @@ extern void __init udbg_init_rtas_console(void);
+ extern void __init udbg_init_debug_beat(void);
+ extern void __init udbg_init_btext(void);
+ extern void __init udbg_init_44x_as1(void);
++extern void __init udbg_init_40x_realmode(void);
+ extern void __init udbg_init_cpm(void);
+ 
+ #endif /* __KERNEL__ */
+diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
+index 97d82b6..fedc4b8 100644
+--- a/include/asm-powerpc/unistd.h
++++ b/include/asm-powerpc/unistd.h
+@@ -332,10 +332,11 @@
+ #define __NR_eventfd		307
+ #define __NR_sync_file_range2	308
+ #define __NR_fallocate		309
++#define __NR_subpage_prot	310
+ 
+ #ifdef __KERNEL__
+ 
+-#define __NR_syscalls		310
++#define __NR_syscalls		311
+ 
+ #define __NR__exit __NR_exit
+ #define NR_syscalls	__NR_syscalls
+diff --git a/include/asm-ppc/8xx_immap.h b/include/asm-ppc/8xx_immap.h
+index 1311cef..4b0e152 100644
+--- a/include/asm-ppc/8xx_immap.h
++++ b/include/asm-ppc/8xx_immap.h
+@@ -123,7 +123,7 @@ typedef struct	mem_ctlr {
+ #define OR_G5LA		0x00000400	/* Output #GPL5 on #GPL_A5		*/
+ #define OR_G5LS		0x00000200	/* Drive #GPL high on falling edge of...*/
+ #define OR_BI		0x00000100	/* Burst inhibit			*/
+-#define OR_SCY_MSK	0x000000f0	/* Cycle Lenght in Clocks		*/
++#define OR_SCY_MSK	0x000000f0	/* Cycle Length in Clocks		*/
+ #define OR_SCY_0_CLK	0x00000000	/* 0 clock cycles wait states		*/
+ #define OR_SCY_1_CLK	0x00000010	/* 1 clock cycles wait states		*/
+ #define OR_SCY_2_CLK	0x00000020	/* 2 clock cycles wait states		*/
+diff --git a/include/asm-ppc/commproc.h b/include/asm-ppc/commproc.h
+deleted file mode 100644
+index 3972487..0000000
+--- a/include/asm-ppc/commproc.h
++++ /dev/null
+@@ -1,692 +0,0 @@
+-/*
+- * MPC8xx Communication Processor Module.
+- * Copyright (c) 1997 Dan Malek (dmalek at jlc.net)
+- *
+- * This file contains structures and information for the communication
+- * processor channels.  Some CPM control and status is available
+- * throught the MPC8xx internal memory map.  See immap.h for details.
+- * This file only contains what I need for the moment, not the total
+- * CPM capabilities.  I (or someone else) will add definitions as they
+- * are needed.  -- Dan
+- *
+- * On the MBX board, EPPC-Bug loads CPM microcode into the first 512
+- * bytes of the DP RAM and relocates the I2C parameter area to the
+- * IDMA1 space.  The remaining DP RAM is available for buffer descriptors
+- * or other use.
+- */
+-#ifndef __CPM_8XX__
+-#define __CPM_8XX__
+-
+-#include <asm/8xx_immap.h>
+-#include <asm/ptrace.h>
+-
+-/* CPM Command register.
+-*/
+-#define CPM_CR_RST	((ushort)0x8000)
+-#define CPM_CR_OPCODE	((ushort)0x0f00)
+-#define CPM_CR_CHAN	((ushort)0x00f0)
+-#define CPM_CR_FLG	((ushort)0x0001)
+-
+-/* Some commands (there are more...later)
+-*/
+-#define CPM_CR_INIT_TRX		((ushort)0x0000)
+-#define CPM_CR_INIT_RX		((ushort)0x0001)
+-#define CPM_CR_INIT_TX		((ushort)0x0002)
+-#define CPM_CR_HUNT_MODE	((ushort)0x0003)
+-#define CPM_CR_STOP_TX		((ushort)0x0004)
+-#define CPM_CR_GRA_STOP_TX	((ushort)0x0005)
+-#define CPM_CR_RESTART_TX	((ushort)0x0006)
+-#define CPM_CR_CLOSE_RX_BD	((ushort)0x0007)
+-#define CPM_CR_SET_GADDR	((ushort)0x0008)
+-#define CPM_CR_SET_TIMER	CPM_CR_SET_GADDR
+-
+-/* Channel numbers.
+-*/
+-#define CPM_CR_CH_SCC1		((ushort)0x0000)
+-#define CPM_CR_CH_I2C		((ushort)0x0001)	/* I2C and IDMA1 */
+-#define CPM_CR_CH_SCC2		((ushort)0x0004)
+-#define CPM_CR_CH_SPI		((ushort)0x0005)	/* SPI / IDMA2 / Timers */
+-#define CPM_CR_CH_TIMER		CPM_CR_CH_SPI
+-#define CPM_CR_CH_SCC3		((ushort)0x0008)
+-#define CPM_CR_CH_SMC1		((ushort)0x0009)	/* SMC1 / DSP1 */
+-#define CPM_CR_CH_SCC4		((ushort)0x000c)
+-#define CPM_CR_CH_SMC2		((ushort)0x000d)	/* SMC2 / DSP2 */
+-
+-#define mk_cr_cmd(CH, CMD)	((CMD << 8) | (CH << 4))
+-
+-/* The dual ported RAM is multi-functional.  Some areas can be (and are
+- * being) used for microcode.  There is an area that can only be used
+- * as data ram for buffer descriptors, which is all we use right now.
+- * Currently the first 512 and last 256 bytes are used for microcode.
+- */
+-#define CPM_DATAONLY_BASE	((uint)0x0800)
+-#define CPM_DATAONLY_SIZE	((uint)0x0700)
+-#define CPM_DP_NOSPACE		((uint)0x7fffffff)
+-
+-/* Export the base address of the communication processor registers
+- * and dual port ram.
+- */
+-extern	cpm8xx_t	*cpmp;		/* Pointer to comm processor */
+-extern unsigned long cpm_dpalloc(uint size, uint align);
+-extern int cpm_dpfree(unsigned long offset);
+-extern unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align);
+-extern void cpm_dpdump(void);
+-extern void *cpm_dpram_addr(unsigned long offset);
+-extern uint cpm_dpram_phys(u8* addr);
+-extern void cpm_setbrg(uint brg, uint rate);
+-
+-extern uint m8xx_cpm_hostalloc(uint size);
+-extern int  m8xx_cpm_hostfree(uint start);
+-extern void m8xx_cpm_hostdump(void);
+-
+-extern void cpm_load_patch(volatile immap_t *immr);
+-
+-/* Buffer descriptors used by many of the CPM protocols.
+-*/
+-typedef struct cpm_buf_desc {
+-	ushort	cbd_sc;		/* Status and Control */
+-	ushort	cbd_datlen;	/* Data length in buffer */
+-	uint	cbd_bufaddr;	/* Buffer address in host memory */
+-} cbd_t;
+-
+-#define BD_SC_EMPTY	((ushort)0x8000)	/* Receive is empty */
+-#define BD_SC_READY	((ushort)0x8000)	/* Transmit is ready */
+-#define BD_SC_WRAP	((ushort)0x2000)	/* Last buffer descriptor */
+-#define BD_SC_INTRPT	((ushort)0x1000)	/* Interrupt on change */
+-#define BD_SC_LAST	((ushort)0x0800)	/* Last buffer in frame */
+-#define BD_SC_TC	((ushort)0x0400)	/* Transmit CRC */
+-#define BD_SC_CM	((ushort)0x0200)	/* Continous mode */
+-#define BD_SC_ID	((ushort)0x0100)	/* Rec'd too many idles */
+-#define BD_SC_P		((ushort)0x0100)	/* xmt preamble */
+-#define BD_SC_BR	((ushort)0x0020)	/* Break received */
+-#define BD_SC_FR	((ushort)0x0010)	/* Framing error */
+-#define BD_SC_PR	((ushort)0x0008)	/* Parity error */
+-#define BD_SC_NAK	((ushort)0x0004)	/* NAK - did not respond */
+-#define BD_SC_OV	((ushort)0x0002)	/* Overrun */
+-#define BD_SC_UN	((ushort)0x0002)	/* Underrun */
+-#define BD_SC_CD	((ushort)0x0001)	/* ?? */
+-#define BD_SC_CL	((ushort)0x0001)	/* Collision */
+-
+-/* Parameter RAM offsets.
+-*/
+-#define PROFF_SCC1	((uint)0x0000)
+-#define PROFF_IIC	((uint)0x0080)
+-#define PROFF_SCC2	((uint)0x0100)
+-#define PROFF_SPI	((uint)0x0180)
+-#define PROFF_SCC3	((uint)0x0200)
+-#define PROFF_SMC1	((uint)0x0280)
+-#define PROFF_SCC4	((uint)0x0300)
+-#define PROFF_SMC2	((uint)0x0380)
+-
+-/* Define enough so I can at least use the serial port as a UART.
+- * The MBX uses SMC1 as the host serial port.
+- */
+-typedef struct smc_uart {
+-	ushort	smc_rbase;	/* Rx Buffer descriptor base address */
+-	ushort	smc_tbase;	/* Tx Buffer descriptor base address */
+-	u_char	smc_rfcr;	/* Rx function code */
+-	u_char	smc_tfcr;	/* Tx function code */
+-	ushort	smc_mrblr;	/* Max receive buffer length */
+-	uint	smc_rstate;	/* Internal */
+-	uint	smc_idp;	/* Internal */
+-	ushort	smc_rbptr;	/* Internal */
+-	ushort	smc_ibc;	/* Internal */
+-	uint	smc_rxtmp;	/* Internal */
+-	uint	smc_tstate;	/* Internal */
+-	uint	smc_tdp;	/* Internal */
+-	ushort	smc_tbptr;	/* Internal */
+-	ushort	smc_tbc;	/* Internal */
+-	uint	smc_txtmp;	/* Internal */
+-	ushort	smc_maxidl;	/* Maximum idle characters */
+-	ushort	smc_tmpidl;	/* Temporary idle counter */
+-	ushort	smc_brklen;	/* Last received break length */
+-	ushort	smc_brkec;	/* rcv'd break condition counter */
+-	ushort	smc_brkcr;	/* xmt break count register */
+-	ushort	smc_rmask;	/* Temporary bit mask */
+-	char	res1[8];	/* Reserved */
+-	ushort	smc_rpbase;	/* Relocation pointer */
+-} smc_uart_t;
+-
+-/* Function code bits.
+-*/
+-#define SMC_EB	((u_char)0x10)	/* Set big endian byte order */
+-
+-/* SMC uart mode register.
+-*/
+-#define	SMCMR_REN	((ushort)0x0001)
+-#define SMCMR_TEN	((ushort)0x0002)
+-#define SMCMR_DM	((ushort)0x000c)
+-#define SMCMR_SM_GCI	((ushort)0x0000)
+-#define SMCMR_SM_UART	((ushort)0x0020)
+-#define SMCMR_SM_TRANS	((ushort)0x0030)
+-#define SMCMR_SM_MASK	((ushort)0x0030)
+-#define SMCMR_PM_EVEN	((ushort)0x0100)	/* Even parity, else odd */
+-#define SMCMR_REVD	SMCMR_PM_EVEN
+-#define SMCMR_PEN	((ushort)0x0200)	/* Parity enable */
+-#define SMCMR_BS	SMCMR_PEN
+-#define SMCMR_SL	((ushort)0x0400)	/* Two stops, else one */
+-#define SMCR_CLEN_MASK	((ushort)0x7800)	/* Character length */
+-#define smcr_mk_clen(C)	(((C) << 11) & SMCR_CLEN_MASK)
+-
+-/* SMC2 as Centronics parallel printer.  It is half duplex, in that
+- * it can only receive or transmit.  The parameter ram values for
+- * each direction are either unique or properly overlap, so we can
+- * include them in one structure.
+- */
+-typedef struct smc_centronics {
+-	ushort	scent_rbase;
+-	ushort	scent_tbase;
+-	u_char	scent_cfcr;
+-	u_char	scent_smask;
+-	ushort	scent_mrblr;
+-	uint	scent_rstate;
+-	uint	scent_r_ptr;
+-	ushort	scent_rbptr;
+-	ushort	scent_r_cnt;
+-	uint	scent_rtemp;
+-	uint	scent_tstate;
+-	uint	scent_t_ptr;
+-	ushort	scent_tbptr;
+-	ushort	scent_t_cnt;
+-	uint	scent_ttemp;
+-	ushort	scent_max_sl;
+-	ushort	scent_sl_cnt;
+-	ushort	scent_character1;
+-	ushort	scent_character2;
+-	ushort	scent_character3;
+-	ushort	scent_character4;
+-	ushort	scent_character5;
+-	ushort	scent_character6;
+-	ushort	scent_character7;
+-	ushort	scent_character8;
+-	ushort	scent_rccm;
+-	ushort	scent_rccr;
+-} smc_cent_t;
+-
+-/* Centronics Status Mask Register.
+-*/
+-#define SMC_CENT_F	((u_char)0x08)
+-#define SMC_CENT_PE	((u_char)0x04)
+-#define SMC_CENT_S	((u_char)0x02)
+-
+-/* SMC Event and Mask register.
+-*/
+-#define	SMCM_BRKE	((unsigned char)0x40)	/* When in UART Mode */
+-#define	SMCM_BRK	((unsigned char)0x10)	/* When in UART Mode */
+-#define	SMCM_TXE	((unsigned char)0x10)	/* When in Transparent Mode */
+-#define	SMCM_BSY	((unsigned char)0x04)
+-#define	SMCM_TX		((unsigned char)0x02)
+-#define	SMCM_RX		((unsigned char)0x01)
+-
+-/* Baud rate generators.
+-*/
+-#define CPM_BRG_RST		((uint)0x00020000)
+-#define CPM_BRG_EN		((uint)0x00010000)
+-#define CPM_BRG_EXTC_INT	((uint)0x00000000)
+-#define CPM_BRG_EXTC_CLK2	((uint)0x00004000)
+-#define CPM_BRG_EXTC_CLK6	((uint)0x00008000)
+-#define CPM_BRG_ATB		((uint)0x00002000)
+-#define CPM_BRG_CD_MASK		((uint)0x00001ffe)
+-#define CPM_BRG_DIV16		((uint)0x00000001)
+-
+-/* SI Clock Route Register
+-*/
+-#define SICR_RCLK_SCC1_BRG1	((uint)0x00000000)
+-#define SICR_TCLK_SCC1_BRG1	((uint)0x00000000)
+-#define SICR_RCLK_SCC2_BRG2	((uint)0x00000800)
+-#define SICR_TCLK_SCC2_BRG2	((uint)0x00000100)
+-#define SICR_RCLK_SCC3_BRG3	((uint)0x00100000)
+-#define SICR_TCLK_SCC3_BRG3	((uint)0x00020000)
+-#define SICR_RCLK_SCC4_BRG4	((uint)0x18000000)
+-#define SICR_TCLK_SCC4_BRG4	((uint)0x03000000)
+-
+-/* SCCs.
+-*/
+-#define SCC_GSMRH_IRP		((uint)0x00040000)
+-#define SCC_GSMRH_GDE		((uint)0x00010000)
+-#define SCC_GSMRH_TCRC_CCITT	((uint)0x00008000)
+-#define SCC_GSMRH_TCRC_BISYNC	((uint)0x00004000)
+-#define SCC_GSMRH_TCRC_HDLC	((uint)0x00000000)
+-#define SCC_GSMRH_REVD		((uint)0x00002000)
+-#define SCC_GSMRH_TRX		((uint)0x00001000)
+-#define SCC_GSMRH_TTX		((uint)0x00000800)
+-#define SCC_GSMRH_CDP		((uint)0x00000400)
+-#define SCC_GSMRH_CTSP		((uint)0x00000200)
+-#define SCC_GSMRH_CDS		((uint)0x00000100)
+-#define SCC_GSMRH_CTSS		((uint)0x00000080)
+-#define SCC_GSMRH_TFL		((uint)0x00000040)
+-#define SCC_GSMRH_RFW		((uint)0x00000020)
+-#define SCC_GSMRH_TXSY		((uint)0x00000010)
+-#define SCC_GSMRH_SYNL16	((uint)0x0000000c)
+-#define SCC_GSMRH_SYNL8		((uint)0x00000008)
+-#define SCC_GSMRH_SYNL4		((uint)0x00000004)
+-#define SCC_GSMRH_RTSM		((uint)0x00000002)
+-#define SCC_GSMRH_RSYN		((uint)0x00000001)
+-
+-#define SCC_GSMRL_SIR		((uint)0x80000000)	/* SCC2 only */
+-#define SCC_GSMRL_EDGE_NONE	((uint)0x60000000)
+-#define SCC_GSMRL_EDGE_NEG	((uint)0x40000000)
+-#define SCC_GSMRL_EDGE_POS	((uint)0x20000000)
+-#define SCC_GSMRL_EDGE_BOTH	((uint)0x00000000)
+-#define SCC_GSMRL_TCI		((uint)0x10000000)
+-#define SCC_GSMRL_TSNC_3	((uint)0x0c000000)
+-#define SCC_GSMRL_TSNC_4	((uint)0x08000000)
+-#define SCC_GSMRL_TSNC_14	((uint)0x04000000)
+-#define SCC_GSMRL_TSNC_INF	((uint)0x00000000)
+-#define SCC_GSMRL_RINV		((uint)0x02000000)
+-#define SCC_GSMRL_TINV		((uint)0x01000000)
+-#define SCC_GSMRL_TPL_128	((uint)0x00c00000)
+-#define SCC_GSMRL_TPL_64	((uint)0x00a00000)
+-#define SCC_GSMRL_TPL_48	((uint)0x00800000)
+-#define SCC_GSMRL_TPL_32	((uint)0x00600000)
+-#define SCC_GSMRL_TPL_16	((uint)0x00400000)
+-#define SCC_GSMRL_TPL_8		((uint)0x00200000)
+-#define SCC_GSMRL_TPL_NONE	((uint)0x00000000)
+-#define SCC_GSMRL_TPP_ALL1	((uint)0x00180000)
+-#define SCC_GSMRL_TPP_01	((uint)0x00100000)
+-#define SCC_GSMRL_TPP_10	((uint)0x00080000)
+-#define SCC_GSMRL_TPP_ZEROS	((uint)0x00000000)
+-#define SCC_GSMRL_TEND		((uint)0x00040000)
+-#define SCC_GSMRL_TDCR_32	((uint)0x00030000)
+-#define SCC_GSMRL_TDCR_16	((uint)0x00020000)
+-#define SCC_GSMRL_TDCR_8	((uint)0x00010000)
+-#define SCC_GSMRL_TDCR_1	((uint)0x00000000)
+-#define SCC_GSMRL_RDCR_32	((uint)0x0000c000)
+-#define SCC_GSMRL_RDCR_16	((uint)0x00008000)
+-#define SCC_GSMRL_RDCR_8	((uint)0x00004000)
+-#define SCC_GSMRL_RDCR_1	((uint)0x00000000)
+-#define SCC_GSMRL_RENC_DFMAN	((uint)0x00003000)
+-#define SCC_GSMRL_RENC_MANCH	((uint)0x00002000)
+-#define SCC_GSMRL_RENC_FM0	((uint)0x00001000)
+-#define SCC_GSMRL_RENC_NRZI	((uint)0x00000800)
+-#define SCC_GSMRL_RENC_NRZ	((uint)0x00000000)
+-#define SCC_GSMRL_TENC_DFMAN	((uint)0x00000600)
+-#define SCC_GSMRL_TENC_MANCH	((uint)0x00000400)
+-#define SCC_GSMRL_TENC_FM0	((uint)0x00000200)
+-#define SCC_GSMRL_TENC_NRZI	((uint)0x00000100)
+-#define SCC_GSMRL_TENC_NRZ	((uint)0x00000000)
+-#define SCC_GSMRL_DIAG_LE	((uint)0x000000c0)	/* Loop and echo */
+-#define SCC_GSMRL_DIAG_ECHO	((uint)0x00000080)
+-#define SCC_GSMRL_DIAG_LOOP	((uint)0x00000040)
+-#define SCC_GSMRL_DIAG_NORM	((uint)0x00000000)
+-#define SCC_GSMRL_ENR		((uint)0x00000020)
+-#define SCC_GSMRL_ENT		((uint)0x00000010)
+-#define SCC_GSMRL_MODE_ENET	((uint)0x0000000c)
+-#define SCC_GSMRL_MODE_QMC	((uint)0x0000000a)
+-#define SCC_GSMRL_MODE_DDCMP	((uint)0x00000009)
+-#define SCC_GSMRL_MODE_BISYNC	((uint)0x00000008)
+-#define SCC_GSMRL_MODE_V14	((uint)0x00000007)
+-#define SCC_GSMRL_MODE_AHDLC	((uint)0x00000006)
+-#define SCC_GSMRL_MODE_PROFIBUS	((uint)0x00000005)
+-#define SCC_GSMRL_MODE_UART	((uint)0x00000004)
+-#define SCC_GSMRL_MODE_SS7	((uint)0x00000003)
+-#define SCC_GSMRL_MODE_ATALK	((uint)0x00000002)
+-#define SCC_GSMRL_MODE_HDLC	((uint)0x00000000)
+-
+-#define SCC_TODR_TOD		((ushort)0x8000)
+-
+-/* SCC Event and Mask register.
+-*/
+-#define	SCCM_TXE	((unsigned char)0x10)
+-#define	SCCM_BSY	((unsigned char)0x04)
+-#define	SCCM_TX		((unsigned char)0x02)
+-#define	SCCM_RX		((unsigned char)0x01)
+-
+-typedef struct scc_param {
+-	ushort	scc_rbase;	/* Rx Buffer descriptor base address */
+-	ushort	scc_tbase;	/* Tx Buffer descriptor base address */
+-	u_char	scc_rfcr;	/* Rx function code */
+-	u_char	scc_tfcr;	/* Tx function code */
+-	ushort	scc_mrblr;	/* Max receive buffer length */
+-	uint	scc_rstate;	/* Internal */
+-	uint	scc_idp;	/* Internal */
+-	ushort	scc_rbptr;	/* Internal */
+-	ushort	scc_ibc;	/* Internal */
+-	uint	scc_rxtmp;	/* Internal */
+-	uint	scc_tstate;	/* Internal */
+-	uint	scc_tdp;	/* Internal */
+-	ushort	scc_tbptr;	/* Internal */
+-	ushort	scc_tbc;	/* Internal */
+-	uint	scc_txtmp;	/* Internal */
+-	uint	scc_rcrc;	/* Internal */
+-	uint	scc_tcrc;	/* Internal */
+-} sccp_t;
+-
+-/* Function code bits.
+-*/
+-#define SCC_EB	((u_char)0x10)	/* Set big endian byte order */
+-
+-/* CPM Ethernet through SCCx.
+- */
+-typedef struct scc_enet {
+-	sccp_t	sen_genscc;
+-	uint	sen_cpres;	/* Preset CRC */
+-	uint	sen_cmask;	/* Constant mask for CRC */
+-	uint	sen_crcec;	/* CRC Error counter */
+-	uint	sen_alec;	/* alignment error counter */
+-	uint	sen_disfc;	/* discard frame counter */
+-	ushort	sen_pads;	/* Tx short frame pad character */
+-	ushort	sen_retlim;	/* Retry limit threshold */
+-	ushort	sen_retcnt;	/* Retry limit counter */
+-	ushort	sen_maxflr;	/* maximum frame length register */
+-	ushort	sen_minflr;	/* minimum frame length register */
+-	ushort	sen_maxd1;	/* maximum DMA1 length */
+-	ushort	sen_maxd2;	/* maximum DMA2 length */
+-	ushort	sen_maxd;	/* Rx max DMA */
+-	ushort	sen_dmacnt;	/* Rx DMA counter */
+-	ushort	sen_maxb;	/* Max BD byte count */
+-	ushort	sen_gaddr1;	/* Group address filter */
+-	ushort	sen_gaddr2;
+-	ushort	sen_gaddr3;
+-	ushort	sen_gaddr4;
+-	uint	sen_tbuf0data0;	/* Save area 0 - current frame */
+-	uint	sen_tbuf0data1;	/* Save area 1 - current frame */
+-	uint	sen_tbuf0rba;	/* Internal */
+-	uint	sen_tbuf0crc;	/* Internal */
+-	ushort	sen_tbuf0bcnt;	/* Internal */
+-	ushort	sen_paddrh;	/* physical address (MSB) */
+-	ushort	sen_paddrm;
+-	ushort	sen_paddrl;	/* physical address (LSB) */
+-	ushort	sen_pper;	/* persistence */
+-	ushort	sen_rfbdptr;	/* Rx first BD pointer */
+-	ushort	sen_tfbdptr;	/* Tx first BD pointer */
+-	ushort	sen_tlbdptr;	/* Tx last BD pointer */
+-	uint	sen_tbuf1data0;	/* Save area 0 - current frame */
+-	uint	sen_tbuf1data1;	/* Save area 1 - current frame */
+-	uint	sen_tbuf1rba;	/* Internal */
+-	uint	sen_tbuf1crc;	/* Internal */
+-	ushort	sen_tbuf1bcnt;	/* Internal */
+-	ushort	sen_txlen;	/* Tx Frame length counter */
+-	ushort	sen_iaddr1;	/* Individual address filter */
+-	ushort	sen_iaddr2;
+-	ushort	sen_iaddr3;
+-	ushort	sen_iaddr4;
+-	ushort	sen_boffcnt;	/* Backoff counter */
+-
+-	/* NOTE: Some versions of the manual have the following items
+-	 * incorrectly documented.  Below is the proper order.
+-	 */
+-	ushort	sen_taddrh;	/* temp address (MSB) */
+-	ushort	sen_taddrm;
+-	ushort	sen_taddrl;	/* temp address (LSB) */
+-} scc_enet_t;
+-
+-/* SCC Event register as used by Ethernet.
+-*/
+-#define SCCE_ENET_GRA	((ushort)0x0080)	/* Graceful stop complete */
+-#define SCCE_ENET_TXE	((ushort)0x0010)	/* Transmit Error */
+-#define SCCE_ENET_RXF	((ushort)0x0008)	/* Full frame received */
+-#define SCCE_ENET_BSY	((ushort)0x0004)	/* All incoming buffers full */
+-#define SCCE_ENET_TXB	((ushort)0x0002)	/* A buffer was transmitted */
+-#define SCCE_ENET_RXB	((ushort)0x0001)	/* A buffer was received */
+-
+-/* SCC Mode Register (PMSR) as used by Ethernet.
+-*/
+-#define SCC_PSMR_HBC	((ushort)0x8000)	/* Enable heartbeat */
+-#define SCC_PSMR_FC	((ushort)0x4000)	/* Force collision */
+-#define SCC_PSMR_RSH	((ushort)0x2000)	/* Receive short frames */
+-#define SCC_PSMR_IAM	((ushort)0x1000)	/* Check individual hash */
+-#define SCC_PSMR_ENCRC	((ushort)0x0800)	/* Ethernet CRC mode */
+-#define SCC_PSMR_PRO	((ushort)0x0200)	/* Promiscuous mode */
+-#define SCC_PSMR_BRO	((ushort)0x0100)	/* Catch broadcast pkts */
+-#define SCC_PSMR_SBT	((ushort)0x0080)	/* Special backoff timer */
+-#define SCC_PSMR_LPB	((ushort)0x0040)	/* Set Loopback mode */
+-#define SCC_PSMR_SIP	((ushort)0x0020)	/* Sample Input Pins */
+-#define SCC_PSMR_LCW	((ushort)0x0010)	/* Late collision window */
+-#define SCC_PSMR_NIB22	((ushort)0x000a)	/* Start frame search */
+-#define SCC_PSMR_FDE	((ushort)0x0001)	/* Full duplex enable */
+-
+-/* Buffer descriptor control/status used by Ethernet receive.
+-*/
+-#define BD_ENET_RX_EMPTY	((ushort)0x8000)
+-#define BD_ENET_RX_WRAP		((ushort)0x2000)
+-#define BD_ENET_RX_INTR		((ushort)0x1000)
+-#define BD_ENET_RX_LAST		((ushort)0x0800)
+-#define BD_ENET_RX_FIRST	((ushort)0x0400)
+-#define BD_ENET_RX_MISS		((ushort)0x0100)
+-#define BD_ENET_RX_LG		((ushort)0x0020)
+-#define BD_ENET_RX_NO		((ushort)0x0010)
+-#define BD_ENET_RX_SH		((ushort)0x0008)
+-#define BD_ENET_RX_CR		((ushort)0x0004)
+-#define BD_ENET_RX_OV		((ushort)0x0002)
+-#define BD_ENET_RX_CL		((ushort)0x0001)
+-#define BD_ENET_RX_BC		((ushort)0x0080)	/* DA is Broadcast */
+-#define BD_ENET_RX_MC		((ushort)0x0040)	/* DA is Multicast */
+-#define BD_ENET_RX_STATS	((ushort)0x013f)	/* All status bits */
+-
+-/* Buffer descriptor control/status used by Ethernet transmit.
+-*/
+-#define BD_ENET_TX_READY	((ushort)0x8000)
+-#define BD_ENET_TX_PAD		((ushort)0x4000)
+-#define BD_ENET_TX_WRAP		((ushort)0x2000)
+-#define BD_ENET_TX_INTR		((ushort)0x1000)
+-#define BD_ENET_TX_LAST		((ushort)0x0800)
+-#define BD_ENET_TX_TC		((ushort)0x0400)
+-#define BD_ENET_TX_DEF		((ushort)0x0200)
+-#define BD_ENET_TX_HB		((ushort)0x0100)
+-#define BD_ENET_TX_LC		((ushort)0x0080)
+-#define BD_ENET_TX_RL		((ushort)0x0040)
+-#define BD_ENET_TX_RCMASK	((ushort)0x003c)
+-#define BD_ENET_TX_UN		((ushort)0x0002)
+-#define BD_ENET_TX_CSL		((ushort)0x0001)
+-#define BD_ENET_TX_STATS	((ushort)0x03ff)	/* All status bits */
+-
+-/* SCC as UART
+-*/
+-typedef struct scc_uart {
+-	sccp_t	scc_genscc;
+-	char	res1[8];	/* Reserved */
+-	ushort	scc_maxidl;	/* Maximum idle chars */
+-	ushort	scc_idlc;	/* temp idle counter */
+-	ushort	scc_brkcr;	/* Break count register */
+-	ushort	scc_parec;	/* receive parity error counter */
+-	ushort	scc_frmec;	/* receive framing error counter */
+-	ushort	scc_nosec;	/* receive noise counter */
+-	ushort	scc_brkec;	/* receive break condition counter */
+-	ushort	scc_brkln;	/* last received break length */
+-	ushort	scc_uaddr1;	/* UART address character 1 */
+-	ushort	scc_uaddr2;	/* UART address character 2 */
+-	ushort	scc_rtemp;	/* Temp storage */
+-	ushort	scc_toseq;	/* Transmit out of sequence char */
+-	ushort	scc_char1;	/* control character 1 */
+-	ushort	scc_char2;	/* control character 2 */
+-	ushort	scc_char3;	/* control character 3 */
+-	ushort	scc_char4;	/* control character 4 */
+-	ushort	scc_char5;	/* control character 5 */
+-	ushort	scc_char6;	/* control character 6 */
+-	ushort	scc_char7;	/* control character 7 */
+-	ushort	scc_char8;	/* control character 8 */
+-	ushort	scc_rccm;	/* receive control character mask */
+-	ushort	scc_rccr;	/* receive control character register */
+-	ushort	scc_rlbc;	/* receive last break character */
+-} scc_uart_t;
+-
+-/* SCC Event and Mask registers when it is used as a UART.
+-*/
+-#define UART_SCCM_GLR		((ushort)0x1000)
+-#define UART_SCCM_GLT		((ushort)0x0800)
+-#define UART_SCCM_AB		((ushort)0x0200)
+-#define UART_SCCM_IDL		((ushort)0x0100)
+-#define UART_SCCM_GRA		((ushort)0x0080)
+-#define UART_SCCM_BRKE		((ushort)0x0040)
+-#define UART_SCCM_BRKS		((ushort)0x0020)
+-#define UART_SCCM_CCR		((ushort)0x0008)
+-#define UART_SCCM_BSY		((ushort)0x0004)
+-#define UART_SCCM_TX		((ushort)0x0002)
+-#define UART_SCCM_RX		((ushort)0x0001)
+-
+-/* The SCC PMSR when used as a UART.
+-*/
+-#define SCU_PSMR_FLC		((ushort)0x8000)
+-#define SCU_PSMR_SL		((ushort)0x4000)
+-#define SCU_PSMR_CL		((ushort)0x3000)
+-#define SCU_PSMR_UM		((ushort)0x0c00)
+-#define SCU_PSMR_FRZ		((ushort)0x0200)
+-#define SCU_PSMR_RZS		((ushort)0x0100)
+-#define SCU_PSMR_SYN		((ushort)0x0080)
+-#define SCU_PSMR_DRT		((ushort)0x0040)
+-#define SCU_PSMR_PEN		((ushort)0x0010)
+-#define SCU_PSMR_RPM		((ushort)0x000c)
+-#define SCU_PSMR_REVP		((ushort)0x0008)
+-#define SCU_PSMR_TPM		((ushort)0x0003)
+-#define SCU_PSMR_TEVP		((ushort)0x0002)
+-
+-/* CPM Transparent mode SCC.
+- */
+-typedef struct scc_trans {
+-	sccp_t	st_genscc;
+-	uint	st_cpres;	/* Preset CRC */
+-	uint	st_cmask;	/* Constant mask for CRC */
+-} scc_trans_t;
+-
+-#define BD_SCC_TX_LAST		((ushort)0x0800)
+-
+-/* IIC parameter RAM.
+-*/
+-typedef struct iic {
+-	ushort	iic_rbase;	/* Rx Buffer descriptor base address */
+-	ushort	iic_tbase;	/* Tx Buffer descriptor base address */
+-	u_char	iic_rfcr;	/* Rx function code */
+-	u_char	iic_tfcr;	/* Tx function code */
+-	ushort	iic_mrblr;	/* Max receive buffer length */
+-	uint	iic_rstate;	/* Internal */
+-	uint	iic_rdp;	/* Internal */
+-	ushort	iic_rbptr;	/* Internal */
+-	ushort	iic_rbc;	/* Internal */
+-	uint	iic_rxtmp;	/* Internal */
+-	uint	iic_tstate;	/* Internal */
+-	uint	iic_tdp;	/* Internal */
+-	ushort	iic_tbptr;	/* Internal */
+-	ushort	iic_tbc;	/* Internal */
+-	uint	iic_txtmp;	/* Internal */
+-	char	res1[4];	/* Reserved */
+-	ushort	iic_rpbase;	/* Relocation pointer */
+-	char	res2[2];	/* Reserved */
+-} iic_t;
+-
+-#define BD_IIC_START		((ushort)0x0400)
+-
+-/* SPI parameter RAM.
+-*/
+-typedef struct spi {
+-	ushort	spi_rbase;	/* Rx Buffer descriptor base address */
+-	ushort	spi_tbase;	/* Tx Buffer descriptor base address */
+-	u_char	spi_rfcr;	/* Rx function code */
+-	u_char	spi_tfcr;	/* Tx function code */
+-	ushort	spi_mrblr;	/* Max receive buffer length */
+-	uint	spi_rstate;	/* Internal */
+-	uint	spi_rdp;	/* Internal */
+-	ushort	spi_rbptr;	/* Internal */
+-	ushort	spi_rbc;	/* Internal */
+-	uint	spi_rxtmp;	/* Internal */
+-	uint	spi_tstate;	/* Internal */
+-	uint	spi_tdp;	/* Internal */
+-	ushort	spi_tbptr;	/* Internal */
+-	ushort	spi_tbc;	/* Internal */
+-	uint	spi_txtmp;	/* Internal */
+-	uint	spi_res;
+-	ushort	spi_rpbase;	/* Relocation pointer */
+-	ushort	spi_res2;
+-} spi_t;
+-
+-/* SPI Mode register.
+-*/
+-#define SPMODE_LOOP	((ushort)0x4000)	/* Loopback */
+-#define SPMODE_CI	((ushort)0x2000)	/* Clock Invert */
+-#define SPMODE_CP	((ushort)0x1000)	/* Clock Phase */
+-#define SPMODE_DIV16	((ushort)0x0800)	/* BRG/16 mode */
+-#define SPMODE_REV	((ushort)0x0400)	/* Reversed Data */
+-#define SPMODE_MSTR	((ushort)0x0200)	/* SPI Master */
+-#define SPMODE_EN	((ushort)0x0100)	/* Enable */
+-#define SPMODE_LENMSK	((ushort)0x00f0)	/* character length */
+-#define SPMODE_LEN4	((ushort)0x0030)	/*  4 bits per char */
+-#define SPMODE_LEN8	((ushort)0x0070)	/*  8 bits per char */
+-#define SPMODE_LEN16	((ushort)0x00f0)	/* 16 bits per char */
+-#define SPMODE_PMMSK	((ushort)0x000f)	/* prescale modulus */
+-
+-/* SPIE fields */
+-#define SPIE_MME	0x20
+-#define SPIE_TXE	0x10
+-#define SPIE_BSY	0x04
+-#define SPIE_TXB	0x02
+-#define SPIE_RXB	0x01
+-
+-/*
+- * RISC Controller Configuration Register definitons
+- */
+-#define RCCR_TIME	0x8000			/* RISC Timer Enable */
+-#define RCCR_TIMEP(t)	(((t) & 0x3F)<<8)	/* RISC Timer Period */
+-#define RCCR_TIME_MASK	0x00FF			/* not RISC Timer related bits */
+-
+-/* RISC Timer Parameter RAM offset */
+-#define PROFF_RTMR	((uint)0x01B0)
+-
+-typedef struct risc_timer_pram {
+-	unsigned short	tm_base;	/* RISC Timer Table Base Address */
+-	unsigned short	tm_ptr;		/* RISC Timer Table Pointer (internal) */
+-	unsigned short	r_tmr;		/* RISC Timer Mode Register */
+-	unsigned short	r_tmv;		/* RISC Timer Valid Register */
+-	unsigned long	tm_cmd;		/* RISC Timer Command Register */
+-	unsigned long	tm_cnt;		/* RISC Timer Internal Count */
+-} rt_pram_t;
+-
+-/* Bits in RISC Timer Command Register */
+-#define TM_CMD_VALID	0x80000000	/* Valid - Enables the timer */
+-#define TM_CMD_RESTART	0x40000000	/* Restart - for automatic restart */
+-#define TM_CMD_PWM	0x20000000	/* Run in Pulse Width Modulation Mode */
+-#define TM_CMD_NUM(n)	(((n)&0xF)<<16)	/* Timer Number */
+-#define TM_CMD_PERIOD(p) ((p)&0xFFFF)	/* Timer Period */
+-
+-/* CPM interrupts.  There are nearly 32 interrupts generated by CPM
+- * channels or devices.  All of these are presented to the PPC core
+- * as a single interrupt.  The CPM interrupt handler dispatches its
+- * own handlers, in a similar fashion to the PPC core handler.  We
+- * use the table as defined in the manuals (i.e. no special high
+- * priority and SCC1 == SCCa, etc...).
+- */
+-#define CPMVEC_NR		32
+-#define	CPMVEC_PIO_PC15		((ushort)0x1f)
+-#define	CPMVEC_SCC1		((ushort)0x1e)
+-#define	CPMVEC_SCC2		((ushort)0x1d)
+-#define	CPMVEC_SCC3		((ushort)0x1c)
+-#define	CPMVEC_SCC4		((ushort)0x1b)
+-#define	CPMVEC_PIO_PC14		((ushort)0x1a)
+-#define	CPMVEC_TIMER1		((ushort)0x19)
+-#define	CPMVEC_PIO_PC13		((ushort)0x18)
+-#define	CPMVEC_PIO_PC12		((ushort)0x17)
+-#define	CPMVEC_SDMA_CB_ERR	((ushort)0x16)
+-#define CPMVEC_IDMA1		((ushort)0x15)
+-#define CPMVEC_IDMA2		((ushort)0x14)
+-#define CPMVEC_TIMER2		((ushort)0x12)
+-#define CPMVEC_RISCTIMER	((ushort)0x11)
+-#define CPMVEC_I2C		((ushort)0x10)
+-#define	CPMVEC_PIO_PC11		((ushort)0x0f)
+-#define	CPMVEC_PIO_PC10		((ushort)0x0e)
+-#define CPMVEC_TIMER3		((ushort)0x0c)
+-#define	CPMVEC_PIO_PC9		((ushort)0x0b)
+-#define	CPMVEC_PIO_PC8		((ushort)0x0a)
+-#define	CPMVEC_PIO_PC7		((ushort)0x09)
+-#define CPMVEC_TIMER4		((ushort)0x07)
+-#define	CPMVEC_PIO_PC6		((ushort)0x06)
+-#define	CPMVEC_SPI		((ushort)0x05)
+-#define	CPMVEC_SMC1		((ushort)0x04)
+-#define	CPMVEC_SMC2		((ushort)0x03)
+-#define	CPMVEC_PIO_PC5		((ushort)0x02)
+-#define	CPMVEC_PIO_PC4		((ushort)0x01)
+-#define	CPMVEC_ERROR		((ushort)0x00)
+-
+-/* CPM interrupt configuration vector.
+-*/
+-#define	CICR_SCD_SCC4		((uint)0x00c00000)	/* SCC4 @ SCCd */
+-#define	CICR_SCC_SCC3		((uint)0x00200000)	/* SCC3 @ SCCc */
+-#define	CICR_SCB_SCC2		((uint)0x00040000)	/* SCC2 @ SCCb */
+-#define	CICR_SCA_SCC1		((uint)0x00000000)	/* SCC1 @ SCCa */
+-#define CICR_IRL_MASK		((uint)0x0000e000)	/* Core interrrupt */
+-#define CICR_HP_MASK		((uint)0x00001f00)	/* Hi-pri int. */
+-#define CICR_IEN		((uint)0x00000080)	/* Int. enable */
+-#define CICR_SPS		((uint)0x00000001)	/* SCC Spread */
+-
+-extern void cpm_install_handler(int vec, void (*handler)(void *), void *dev_id);
+-extern void cpm_free_handler(int vec);
+-
+-#endif /* __CPM_8XX__ */
+diff --git a/include/asm-ppc/cpm1.h b/include/asm-ppc/cpm1.h
+new file mode 100644
+index 0000000..03035ac
+--- /dev/null
++++ b/include/asm-ppc/cpm1.h
+@@ -0,0 +1,688 @@
++/*
++ * MPC8xx Communication Processor Module.
++ * Copyright (c) 1997 Dan Malek (dmalek at jlc.net)
++ *
++ * This file contains structures and information for the communication
++ * processor channels.  Some CPM control and status is available
++ * throught the MPC8xx internal memory map.  See immap.h for details.
++ * This file only contains what I need for the moment, not the total
++ * CPM capabilities.  I (or someone else) will add definitions as they
++ * are needed.  -- Dan
++ *
++ * On the MBX board, EPPC-Bug loads CPM microcode into the first 512
++ * bytes of the DP RAM and relocates the I2C parameter area to the
++ * IDMA1 space.  The remaining DP RAM is available for buffer descriptors
++ * or other use.
++ */
++#ifndef __CPM1__
++#define __CPM1__
++
++#include <asm/8xx_immap.h>
++#include <asm/ptrace.h>
++
++/* CPM Command register.
++*/
++#define CPM_CR_RST	((ushort)0x8000)
++#define CPM_CR_OPCODE	((ushort)0x0f00)
++#define CPM_CR_CHAN	((ushort)0x00f0)
++#define CPM_CR_FLG	((ushort)0x0001)
++
++/* Some commands (there are more...later)
++*/
++#define CPM_CR_INIT_TRX		((ushort)0x0000)
++#define CPM_CR_INIT_RX		((ushort)0x0001)
++#define CPM_CR_INIT_TX		((ushort)0x0002)
++#define CPM_CR_HUNT_MODE	((ushort)0x0003)
++#define CPM_CR_STOP_TX		((ushort)0x0004)
++#define CPM_CR_GRA_STOP_TX	((ushort)0x0005)
++#define CPM_CR_RESTART_TX	((ushort)0x0006)
++#define CPM_CR_CLOSE_RX_BD	((ushort)0x0007)
++#define CPM_CR_SET_GADDR	((ushort)0x0008)
++#define CPM_CR_SET_TIMER	CPM_CR_SET_GADDR
++
++/* Channel numbers.
++*/
++#define CPM_CR_CH_SCC1		((ushort)0x0000)
++#define CPM_CR_CH_I2C		((ushort)0x0001)	/* I2C and IDMA1 */
++#define CPM_CR_CH_SCC2		((ushort)0x0004)
++#define CPM_CR_CH_SPI		((ushort)0x0005)	/* SPI / IDMA2 / Timers */
++#define CPM_CR_CH_TIMER		CPM_CR_CH_SPI
++#define CPM_CR_CH_SCC3		((ushort)0x0008)
++#define CPM_CR_CH_SMC1		((ushort)0x0009)	/* SMC1 / DSP1 */
++#define CPM_CR_CH_SCC4		((ushort)0x000c)
++#define CPM_CR_CH_SMC2		((ushort)0x000d)	/* SMC2 / DSP2 */
++
++#define mk_cr_cmd(CH, CMD)	((CMD << 8) | (CH << 4))
++
++/* The dual ported RAM is multi-functional.  Some areas can be (and are
++ * being) used for microcode.  There is an area that can only be used
++ * as data ram for buffer descriptors, which is all we use right now.
++ * Currently the first 512 and last 256 bytes are used for microcode.
++ */
++#define CPM_DATAONLY_BASE	((uint)0x0800)
++#define CPM_DATAONLY_SIZE	((uint)0x0700)
++#define CPM_DP_NOSPACE		((uint)0x7fffffff)
++
++/* Export the base address of the communication processor registers
++ * and dual port ram.
++ */
++extern	cpm8xx_t	*cpmp;		/* Pointer to comm processor */
++extern unsigned long cpm_dpalloc(uint size, uint align);
++extern int cpm_dpfree(unsigned long offset);
++extern unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align);
++extern void cpm_dpdump(void);
++extern void *cpm_dpram_addr(unsigned long offset);
++extern uint cpm_dpram_phys(u8 *addr);
++extern void cpm_setbrg(uint brg, uint rate);
++
++extern void cpm_load_patch(volatile immap_t *immr);
++
++/* Buffer descriptors used by many of the CPM protocols.
++*/
++typedef struct cpm_buf_desc {
++	ushort	cbd_sc;		/* Status and Control */
++	ushort	cbd_datlen;	/* Data length in buffer */
++	uint	cbd_bufaddr;	/* Buffer address in host memory */
++} cbd_t;
++
++#define BD_SC_EMPTY	((ushort)0x8000)	/* Receive is empty */
++#define BD_SC_READY	((ushort)0x8000)	/* Transmit is ready */
++#define BD_SC_WRAP	((ushort)0x2000)	/* Last buffer descriptor */
++#define BD_SC_INTRPT	((ushort)0x1000)	/* Interrupt on change */
++#define BD_SC_LAST	((ushort)0x0800)	/* Last buffer in frame */
++#define BD_SC_TC	((ushort)0x0400)	/* Transmit CRC */
++#define BD_SC_CM	((ushort)0x0200)	/* Continous mode */
++#define BD_SC_ID	((ushort)0x0100)	/* Rec'd too many idles */
++#define BD_SC_P		((ushort)0x0100)	/* xmt preamble */
++#define BD_SC_BR	((ushort)0x0020)	/* Break received */
++#define BD_SC_FR	((ushort)0x0010)	/* Framing error */
++#define BD_SC_PR	((ushort)0x0008)	/* Parity error */
++#define BD_SC_NAK	((ushort)0x0004)	/* NAK - did not respond */
++#define BD_SC_OV	((ushort)0x0002)	/* Overrun */
++#define BD_SC_UN	((ushort)0x0002)	/* Underrun */
++#define BD_SC_CD	((ushort)0x0001)	/* ?? */
++#define BD_SC_CL	((ushort)0x0001)	/* Collision */
++
++/* Parameter RAM offsets.
++*/
++#define PROFF_SCC1	((uint)0x0000)
++#define PROFF_IIC	((uint)0x0080)
++#define PROFF_SCC2	((uint)0x0100)
++#define PROFF_SPI	((uint)0x0180)
++#define PROFF_SCC3	((uint)0x0200)
++#define PROFF_SMC1	((uint)0x0280)
++#define PROFF_SCC4	((uint)0x0300)
++#define PROFF_SMC2	((uint)0x0380)
++
++/* Define enough so I can at least use the serial port as a UART.
++ * The MBX uses SMC1 as the host serial port.
++ */
++typedef struct smc_uart {
++	ushort	smc_rbase;	/* Rx Buffer descriptor base address */
++	ushort	smc_tbase;	/* Tx Buffer descriptor base address */
++	u_char	smc_rfcr;	/* Rx function code */
++	u_char	smc_tfcr;	/* Tx function code */
++	ushort	smc_mrblr;	/* Max receive buffer length */
++	uint	smc_rstate;	/* Internal */
++	uint	smc_idp;	/* Internal */
++	ushort	smc_rbptr;	/* Internal */
++	ushort	smc_ibc;	/* Internal */
++	uint	smc_rxtmp;	/* Internal */
++	uint	smc_tstate;	/* Internal */
++	uint	smc_tdp;	/* Internal */
++	ushort	smc_tbptr;	/* Internal */
++	ushort	smc_tbc;	/* Internal */
++	uint	smc_txtmp;	/* Internal */
++	ushort	smc_maxidl;	/* Maximum idle characters */
++	ushort	smc_tmpidl;	/* Temporary idle counter */
++	ushort	smc_brklen;	/* Last received break length */
++	ushort	smc_brkec;	/* rcv'd break condition counter */
++	ushort	smc_brkcr;	/* xmt break count register */
++	ushort	smc_rmask;	/* Temporary bit mask */
++	char	res1[8];	/* Reserved */
++	ushort	smc_rpbase;	/* Relocation pointer */
++} smc_uart_t;
++
++/* Function code bits.
++*/
++#define SMC_EB	((u_char)0x10)	/* Set big endian byte order */
++
++/* SMC uart mode register.
++*/
++#define	SMCMR_REN	((ushort)0x0001)
++#define SMCMR_TEN	((ushort)0x0002)
++#define SMCMR_DM	((ushort)0x000c)
++#define SMCMR_SM_GCI	((ushort)0x0000)
++#define SMCMR_SM_UART	((ushort)0x0020)
++#define SMCMR_SM_TRANS	((ushort)0x0030)
++#define SMCMR_SM_MASK	((ushort)0x0030)
++#define SMCMR_PM_EVEN	((ushort)0x0100)	/* Even parity, else odd */
++#define SMCMR_REVD	SMCMR_PM_EVEN
++#define SMCMR_PEN	((ushort)0x0200)	/* Parity enable */
++#define SMCMR_BS	SMCMR_PEN
++#define SMCMR_SL	((ushort)0x0400)	/* Two stops, else one */
++#define SMCR_CLEN_MASK	((ushort)0x7800)	/* Character length */
++#define smcr_mk_clen(C)	(((C) << 11) & SMCR_CLEN_MASK)
++
++/* SMC2 as Centronics parallel printer.  It is half duplex, in that
++ * it can only receive or transmit.  The parameter ram values for
++ * each direction are either unique or properly overlap, so we can
++ * include them in one structure.
++ */
++typedef struct smc_centronics {
++	ushort	scent_rbase;
++	ushort	scent_tbase;
++	u_char	scent_cfcr;
++	u_char	scent_smask;
++	ushort	scent_mrblr;
++	uint	scent_rstate;
++	uint	scent_r_ptr;
++	ushort	scent_rbptr;
++	ushort	scent_r_cnt;
++	uint	scent_rtemp;
++	uint	scent_tstate;
++	uint	scent_t_ptr;
++	ushort	scent_tbptr;
++	ushort	scent_t_cnt;
++	uint	scent_ttemp;
++	ushort	scent_max_sl;
++	ushort	scent_sl_cnt;
++	ushort	scent_character1;
++	ushort	scent_character2;
++	ushort	scent_character3;
++	ushort	scent_character4;
++	ushort	scent_character5;
++	ushort	scent_character6;
++	ushort	scent_character7;
++	ushort	scent_character8;
++	ushort	scent_rccm;
++	ushort	scent_rccr;
++} smc_cent_t;
++
++/* Centronics Status Mask Register.
++*/
++#define SMC_CENT_F	((u_char)0x08)
++#define SMC_CENT_PE	((u_char)0x04)
++#define SMC_CENT_S	((u_char)0x02)
++
++/* SMC Event and Mask register.
++*/
++#define	SMCM_BRKE	((unsigned char)0x40)	/* When in UART Mode */
++#define	SMCM_BRK	((unsigned char)0x10)	/* When in UART Mode */
++#define	SMCM_TXE	((unsigned char)0x10)	/* When in Transparent Mode */
++#define	SMCM_BSY	((unsigned char)0x04)
++#define	SMCM_TX		((unsigned char)0x02)
++#define	SMCM_RX		((unsigned char)0x01)
++
++/* Baud rate generators.
++*/
++#define CPM_BRG_RST		((uint)0x00020000)
++#define CPM_BRG_EN		((uint)0x00010000)
++#define CPM_BRG_EXTC_INT	((uint)0x00000000)
++#define CPM_BRG_EXTC_CLK2	((uint)0x00004000)
++#define CPM_BRG_EXTC_CLK6	((uint)0x00008000)
++#define CPM_BRG_ATB		((uint)0x00002000)
++#define CPM_BRG_CD_MASK		((uint)0x00001ffe)
++#define CPM_BRG_DIV16		((uint)0x00000001)
++
++/* SI Clock Route Register
++*/
++#define SICR_RCLK_SCC1_BRG1	((uint)0x00000000)
++#define SICR_TCLK_SCC1_BRG1	((uint)0x00000000)
++#define SICR_RCLK_SCC2_BRG2	((uint)0x00000800)
++#define SICR_TCLK_SCC2_BRG2	((uint)0x00000100)
++#define SICR_RCLK_SCC3_BRG3	((uint)0x00100000)
++#define SICR_TCLK_SCC3_BRG3	((uint)0x00020000)
++#define SICR_RCLK_SCC4_BRG4	((uint)0x18000000)
++#define SICR_TCLK_SCC4_BRG4	((uint)0x03000000)
++
++/* SCCs.
++*/
++#define SCC_GSMRH_IRP		((uint)0x00040000)
++#define SCC_GSMRH_GDE		((uint)0x00010000)
++#define SCC_GSMRH_TCRC_CCITT	((uint)0x00008000)
++#define SCC_GSMRH_TCRC_BISYNC	((uint)0x00004000)
++#define SCC_GSMRH_TCRC_HDLC	((uint)0x00000000)
++#define SCC_GSMRH_REVD		((uint)0x00002000)
++#define SCC_GSMRH_TRX		((uint)0x00001000)
++#define SCC_GSMRH_TTX		((uint)0x00000800)
++#define SCC_GSMRH_CDP		((uint)0x00000400)
++#define SCC_GSMRH_CTSP		((uint)0x00000200)
++#define SCC_GSMRH_CDS		((uint)0x00000100)
++#define SCC_GSMRH_CTSS		((uint)0x00000080)
++#define SCC_GSMRH_TFL		((uint)0x00000040)
++#define SCC_GSMRH_RFW		((uint)0x00000020)
++#define SCC_GSMRH_TXSY		((uint)0x00000010)
++#define SCC_GSMRH_SYNL16	((uint)0x0000000c)
++#define SCC_GSMRH_SYNL8		((uint)0x00000008)
++#define SCC_GSMRH_SYNL4		((uint)0x00000004)
++#define SCC_GSMRH_RTSM		((uint)0x00000002)
++#define SCC_GSMRH_RSYN		((uint)0x00000001)
++
++#define SCC_GSMRL_SIR		((uint)0x80000000)	/* SCC2 only */
++#define SCC_GSMRL_EDGE_NONE	((uint)0x60000000)
++#define SCC_GSMRL_EDGE_NEG	((uint)0x40000000)
++#define SCC_GSMRL_EDGE_POS	((uint)0x20000000)
++#define SCC_GSMRL_EDGE_BOTH	((uint)0x00000000)
++#define SCC_GSMRL_TCI		((uint)0x10000000)
++#define SCC_GSMRL_TSNC_3	((uint)0x0c000000)
++#define SCC_GSMRL_TSNC_4	((uint)0x08000000)
++#define SCC_GSMRL_TSNC_14	((uint)0x04000000)
++#define SCC_GSMRL_TSNC_INF	((uint)0x00000000)
++#define SCC_GSMRL_RINV		((uint)0x02000000)
++#define SCC_GSMRL_TINV		((uint)0x01000000)
++#define SCC_GSMRL_TPL_128	((uint)0x00c00000)
++#define SCC_GSMRL_TPL_64	((uint)0x00a00000)
++#define SCC_GSMRL_TPL_48	((uint)0x00800000)
++#define SCC_GSMRL_TPL_32	((uint)0x00600000)
++#define SCC_GSMRL_TPL_16	((uint)0x00400000)
++#define SCC_GSMRL_TPL_8		((uint)0x00200000)
++#define SCC_GSMRL_TPL_NONE	((uint)0x00000000)
++#define SCC_GSMRL_TPP_ALL1	((uint)0x00180000)
++#define SCC_GSMRL_TPP_01	((uint)0x00100000)
++#define SCC_GSMRL_TPP_10	((uint)0x00080000)
++#define SCC_GSMRL_TPP_ZEROS	((uint)0x00000000)
++#define SCC_GSMRL_TEND		((uint)0x00040000)
++#define SCC_GSMRL_TDCR_32	((uint)0x00030000)
++#define SCC_GSMRL_TDCR_16	((uint)0x00020000)
++#define SCC_GSMRL_TDCR_8	((uint)0x00010000)
++#define SCC_GSMRL_TDCR_1	((uint)0x00000000)
++#define SCC_GSMRL_RDCR_32	((uint)0x0000c000)
++#define SCC_GSMRL_RDCR_16	((uint)0x00008000)
++#define SCC_GSMRL_RDCR_8	((uint)0x00004000)
++#define SCC_GSMRL_RDCR_1	((uint)0x00000000)
++#define SCC_GSMRL_RENC_DFMAN	((uint)0x00003000)
++#define SCC_GSMRL_RENC_MANCH	((uint)0x00002000)
++#define SCC_GSMRL_RENC_FM0	((uint)0x00001000)
++#define SCC_GSMRL_RENC_NRZI	((uint)0x00000800)
++#define SCC_GSMRL_RENC_NRZ	((uint)0x00000000)
++#define SCC_GSMRL_TENC_DFMAN	((uint)0x00000600)
++#define SCC_GSMRL_TENC_MANCH	((uint)0x00000400)
++#define SCC_GSMRL_TENC_FM0	((uint)0x00000200)
++#define SCC_GSMRL_TENC_NRZI	((uint)0x00000100)
++#define SCC_GSMRL_TENC_NRZ	((uint)0x00000000)
++#define SCC_GSMRL_DIAG_LE	((uint)0x000000c0)	/* Loop and echo */
++#define SCC_GSMRL_DIAG_ECHO	((uint)0x00000080)
++#define SCC_GSMRL_DIAG_LOOP	((uint)0x00000040)
++#define SCC_GSMRL_DIAG_NORM	((uint)0x00000000)
++#define SCC_GSMRL_ENR		((uint)0x00000020)
++#define SCC_GSMRL_ENT		((uint)0x00000010)
++#define SCC_GSMRL_MODE_ENET	((uint)0x0000000c)
++#define SCC_GSMRL_MODE_QMC	((uint)0x0000000a)
++#define SCC_GSMRL_MODE_DDCMP	((uint)0x00000009)
++#define SCC_GSMRL_MODE_BISYNC	((uint)0x00000008)
++#define SCC_GSMRL_MODE_V14	((uint)0x00000007)
++#define SCC_GSMRL_MODE_AHDLC	((uint)0x00000006)
++#define SCC_GSMRL_MODE_PROFIBUS	((uint)0x00000005)
++#define SCC_GSMRL_MODE_UART	((uint)0x00000004)
++#define SCC_GSMRL_MODE_SS7	((uint)0x00000003)
++#define SCC_GSMRL_MODE_ATALK	((uint)0x00000002)
++#define SCC_GSMRL_MODE_HDLC	((uint)0x00000000)
++
++#define SCC_TODR_TOD		((ushort)0x8000)
++
++/* SCC Event and Mask register.
++*/
++#define	SCCM_TXE	((unsigned char)0x10)
++#define	SCCM_BSY	((unsigned char)0x04)
++#define	SCCM_TX		((unsigned char)0x02)
++#define	SCCM_RX		((unsigned char)0x01)
++
++typedef struct scc_param {
++	ushort	scc_rbase;	/* Rx Buffer descriptor base address */
++	ushort	scc_tbase;	/* Tx Buffer descriptor base address */
++	u_char	scc_rfcr;	/* Rx function code */
++	u_char	scc_tfcr;	/* Tx function code */
++	ushort	scc_mrblr;	/* Max receive buffer length */
++	uint	scc_rstate;	/* Internal */
++	uint	scc_idp;	/* Internal */
++	ushort	scc_rbptr;	/* Internal */
++	ushort	scc_ibc;	/* Internal */
++	uint	scc_rxtmp;	/* Internal */
++	uint	scc_tstate;	/* Internal */
++	uint	scc_tdp;	/* Internal */
++	ushort	scc_tbptr;	/* Internal */
++	ushort	scc_tbc;	/* Internal */
++	uint	scc_txtmp;	/* Internal */
++	uint	scc_rcrc;	/* Internal */
++	uint	scc_tcrc;	/* Internal */
++} sccp_t;
++
++/* Function code bits.
++*/
++#define SCC_EB	((u_char)0x10)	/* Set big endian byte order */
++
++/* CPM Ethernet through SCCx.
++ */
++typedef struct scc_enet {
++	sccp_t	sen_genscc;
++	uint	sen_cpres;	/* Preset CRC */
++	uint	sen_cmask;	/* Constant mask for CRC */
++	uint	sen_crcec;	/* CRC Error counter */
++	uint	sen_alec;	/* alignment error counter */
++	uint	sen_disfc;	/* discard frame counter */
++	ushort	sen_pads;	/* Tx short frame pad character */
++	ushort	sen_retlim;	/* Retry limit threshold */
++	ushort	sen_retcnt;	/* Retry limit counter */
++	ushort	sen_maxflr;	/* maximum frame length register */
++	ushort	sen_minflr;	/* minimum frame length register */
++	ushort	sen_maxd1;	/* maximum DMA1 length */
++	ushort	sen_maxd2;	/* maximum DMA2 length */
++	ushort	sen_maxd;	/* Rx max DMA */
++	ushort	sen_dmacnt;	/* Rx DMA counter */
++	ushort	sen_maxb;	/* Max BD byte count */
++	ushort	sen_gaddr1;	/* Group address filter */
++	ushort	sen_gaddr2;
++	ushort	sen_gaddr3;
++	ushort	sen_gaddr4;
++	uint	sen_tbuf0data0;	/* Save area 0 - current frame */
++	uint	sen_tbuf0data1;	/* Save area 1 - current frame */
++	uint	sen_tbuf0rba;	/* Internal */
++	uint	sen_tbuf0crc;	/* Internal */
++	ushort	sen_tbuf0bcnt;	/* Internal */
++	ushort	sen_paddrh;	/* physical address (MSB) */
++	ushort	sen_paddrm;
++	ushort	sen_paddrl;	/* physical address (LSB) */
++	ushort	sen_pper;	/* persistence */
++	ushort	sen_rfbdptr;	/* Rx first BD pointer */
++	ushort	sen_tfbdptr;	/* Tx first BD pointer */
++	ushort	sen_tlbdptr;	/* Tx last BD pointer */
++	uint	sen_tbuf1data0;	/* Save area 0 - current frame */
++	uint	sen_tbuf1data1;	/* Save area 1 - current frame */
++	uint	sen_tbuf1rba;	/* Internal */
++	uint	sen_tbuf1crc;	/* Internal */
++	ushort	sen_tbuf1bcnt;	/* Internal */
++	ushort	sen_txlen;	/* Tx Frame length counter */
++	ushort	sen_iaddr1;	/* Individual address filter */
++	ushort	sen_iaddr2;
++	ushort	sen_iaddr3;
++	ushort	sen_iaddr4;
++	ushort	sen_boffcnt;	/* Backoff counter */
++
++	/* NOTE: Some versions of the manual have the following items
++	 * incorrectly documented.  Below is the proper order.
++	 */
++	ushort	sen_taddrh;	/* temp address (MSB) */
++	ushort	sen_taddrm;
++	ushort	sen_taddrl;	/* temp address (LSB) */
++} scc_enet_t;
++
++/* SCC Event register as used by Ethernet.
++*/
++#define SCCE_ENET_GRA	((ushort)0x0080)	/* Graceful stop complete */
++#define SCCE_ENET_TXE	((ushort)0x0010)	/* Transmit Error */
++#define SCCE_ENET_RXF	((ushort)0x0008)	/* Full frame received */
++#define SCCE_ENET_BSY	((ushort)0x0004)	/* All incoming buffers full */
++#define SCCE_ENET_TXB	((ushort)0x0002)	/* A buffer was transmitted */
++#define SCCE_ENET_RXB	((ushort)0x0001)	/* A buffer was received */
++
++/* SCC Mode Register (PMSR) as used by Ethernet.
++*/
++#define SCC_PSMR_HBC	((ushort)0x8000)	/* Enable heartbeat */
++#define SCC_PSMR_FC	((ushort)0x4000)	/* Force collision */
++#define SCC_PSMR_RSH	((ushort)0x2000)	/* Receive short frames */
++#define SCC_PSMR_IAM	((ushort)0x1000)	/* Check individual hash */
++#define SCC_PSMR_ENCRC	((ushort)0x0800)	/* Ethernet CRC mode */
++#define SCC_PSMR_PRO	((ushort)0x0200)	/* Promiscuous mode */
++#define SCC_PSMR_BRO	((ushort)0x0100)	/* Catch broadcast pkts */
++#define SCC_PSMR_SBT	((ushort)0x0080)	/* Special backoff timer */
++#define SCC_PSMR_LPB	((ushort)0x0040)	/* Set Loopback mode */
++#define SCC_PSMR_SIP	((ushort)0x0020)	/* Sample Input Pins */
++#define SCC_PSMR_LCW	((ushort)0x0010)	/* Late collision window */
++#define SCC_PSMR_NIB22	((ushort)0x000a)	/* Start frame search */
++#define SCC_PSMR_FDE	((ushort)0x0001)	/* Full duplex enable */
++
++/* Buffer descriptor control/status used by Ethernet receive.
++*/
++#define BD_ENET_RX_EMPTY	((ushort)0x8000)
++#define BD_ENET_RX_WRAP		((ushort)0x2000)
++#define BD_ENET_RX_INTR		((ushort)0x1000)
++#define BD_ENET_RX_LAST		((ushort)0x0800)
++#define BD_ENET_RX_FIRST	((ushort)0x0400)
++#define BD_ENET_RX_MISS		((ushort)0x0100)
++#define BD_ENET_RX_LG		((ushort)0x0020)
++#define BD_ENET_RX_NO		((ushort)0x0010)
++#define BD_ENET_RX_SH		((ushort)0x0008)
++#define BD_ENET_RX_CR		((ushort)0x0004)
++#define BD_ENET_RX_OV		((ushort)0x0002)
++#define BD_ENET_RX_CL		((ushort)0x0001)
++#define BD_ENET_RX_BC		((ushort)0x0080)	/* DA is Broadcast */
++#define BD_ENET_RX_MC		((ushort)0x0040)	/* DA is Multicast */
++#define BD_ENET_RX_STATS	((ushort)0x013f)	/* All status bits */
++
++/* Buffer descriptor control/status used by Ethernet transmit.
++*/
++#define BD_ENET_TX_READY	((ushort)0x8000)
++#define BD_ENET_TX_PAD		((ushort)0x4000)
++#define BD_ENET_TX_WRAP		((ushort)0x2000)
++#define BD_ENET_TX_INTR		((ushort)0x1000)
++#define BD_ENET_TX_LAST		((ushort)0x0800)
++#define BD_ENET_TX_TC		((ushort)0x0400)
++#define BD_ENET_TX_DEF		((ushort)0x0200)
++#define BD_ENET_TX_HB		((ushort)0x0100)
++#define BD_ENET_TX_LC		((ushort)0x0080)
++#define BD_ENET_TX_RL		((ushort)0x0040)
++#define BD_ENET_TX_RCMASK	((ushort)0x003c)
++#define BD_ENET_TX_UN		((ushort)0x0002)
++#define BD_ENET_TX_CSL		((ushort)0x0001)
++#define BD_ENET_TX_STATS	((ushort)0x03ff)	/* All status bits */
++
++/* SCC as UART
++*/
++typedef struct scc_uart {
++	sccp_t	scc_genscc;
++	char	res1[8];	/* Reserved */
++	ushort	scc_maxidl;	/* Maximum idle chars */
++	ushort	scc_idlc;	/* temp idle counter */
++	ushort	scc_brkcr;	/* Break count register */
++	ushort	scc_parec;	/* receive parity error counter */
++	ushort	scc_frmec;	/* receive framing error counter */
++	ushort	scc_nosec;	/* receive noise counter */
++	ushort	scc_brkec;	/* receive break condition counter */
++	ushort	scc_brkln;	/* last received break length */
++	ushort	scc_uaddr1;	/* UART address character 1 */
++	ushort	scc_uaddr2;	/* UART address character 2 */
++	ushort	scc_rtemp;	/* Temp storage */
++	ushort	scc_toseq;	/* Transmit out of sequence char */
++	ushort	scc_char1;	/* control character 1 */
++	ushort	scc_char2;	/* control character 2 */
++	ushort	scc_char3;	/* control character 3 */
++	ushort	scc_char4;	/* control character 4 */
++	ushort	scc_char5;	/* control character 5 */
++	ushort	scc_char6;	/* control character 6 */
++	ushort	scc_char7;	/* control character 7 */
++	ushort	scc_char8;	/* control character 8 */
++	ushort	scc_rccm;	/* receive control character mask */
++	ushort	scc_rccr;	/* receive control character register */
++	ushort	scc_rlbc;	/* receive last break character */
++} scc_uart_t;
++
++/* SCC Event and Mask registers when it is used as a UART.
++*/
++#define UART_SCCM_GLR		((ushort)0x1000)
++#define UART_SCCM_GLT		((ushort)0x0800)
++#define UART_SCCM_AB		((ushort)0x0200)
++#define UART_SCCM_IDL		((ushort)0x0100)
++#define UART_SCCM_GRA		((ushort)0x0080)
++#define UART_SCCM_BRKE		((ushort)0x0040)
++#define UART_SCCM_BRKS		((ushort)0x0020)
++#define UART_SCCM_CCR		((ushort)0x0008)
++#define UART_SCCM_BSY		((ushort)0x0004)
++#define UART_SCCM_TX		((ushort)0x0002)
++#define UART_SCCM_RX		((ushort)0x0001)
++
++/* The SCC PMSR when used as a UART.
++*/
++#define SCU_PSMR_FLC		((ushort)0x8000)
++#define SCU_PSMR_SL		((ushort)0x4000)
++#define SCU_PSMR_CL		((ushort)0x3000)
++#define SCU_PSMR_UM		((ushort)0x0c00)
++#define SCU_PSMR_FRZ		((ushort)0x0200)
++#define SCU_PSMR_RZS		((ushort)0x0100)
++#define SCU_PSMR_SYN		((ushort)0x0080)
++#define SCU_PSMR_DRT		((ushort)0x0040)
++#define SCU_PSMR_PEN		((ushort)0x0010)
++#define SCU_PSMR_RPM		((ushort)0x000c)
++#define SCU_PSMR_REVP		((ushort)0x0008)
++#define SCU_PSMR_TPM		((ushort)0x0003)
++#define SCU_PSMR_TEVP		((ushort)0x0002)
++
++/* CPM Transparent mode SCC.
++ */
++typedef struct scc_trans {
++	sccp_t	st_genscc;
++	uint	st_cpres;	/* Preset CRC */
++	uint	st_cmask;	/* Constant mask for CRC */
++} scc_trans_t;
++
++#define BD_SCC_TX_LAST		((ushort)0x0800)
++
++/* IIC parameter RAM.
++*/
++typedef struct iic {
++	ushort	iic_rbase;	/* Rx Buffer descriptor base address */
++	ushort	iic_tbase;	/* Tx Buffer descriptor base address */
++	u_char	iic_rfcr;	/* Rx function code */
++	u_char	iic_tfcr;	/* Tx function code */
++	ushort	iic_mrblr;	/* Max receive buffer length */
++	uint	iic_rstate;	/* Internal */
++	uint	iic_rdp;	/* Internal */
++	ushort	iic_rbptr;	/* Internal */
++	ushort	iic_rbc;	/* Internal */
++	uint	iic_rxtmp;	/* Internal */
++	uint	iic_tstate;	/* Internal */
++	uint	iic_tdp;	/* Internal */
++	ushort	iic_tbptr;	/* Internal */
++	ushort	iic_tbc;	/* Internal */
++	uint	iic_txtmp;	/* Internal */
++	char	res1[4];	/* Reserved */
++	ushort	iic_rpbase;	/* Relocation pointer */
++	char	res2[2];	/* Reserved */
++} iic_t;
++
++#define BD_IIC_START		((ushort)0x0400)
++
++/* SPI parameter RAM.
++*/
++typedef struct spi {
++	ushort	spi_rbase;	/* Rx Buffer descriptor base address */
++	ushort	spi_tbase;	/* Tx Buffer descriptor base address */
++	u_char	spi_rfcr;	/* Rx function code */
++	u_char	spi_tfcr;	/* Tx function code */
++	ushort	spi_mrblr;	/* Max receive buffer length */
++	uint	spi_rstate;	/* Internal */
++	uint	spi_rdp;	/* Internal */
++	ushort	spi_rbptr;	/* Internal */
++	ushort	spi_rbc;	/* Internal */
++	uint	spi_rxtmp;	/* Internal */
++	uint	spi_tstate;	/* Internal */
++	uint	spi_tdp;	/* Internal */
++	ushort	spi_tbptr;	/* Internal */
++	ushort	spi_tbc;	/* Internal */
++	uint	spi_txtmp;	/* Internal */
++	uint	spi_res;
++	ushort	spi_rpbase;	/* Relocation pointer */
++	ushort	spi_res2;
++} spi_t;
++
++/* SPI Mode register.
++*/
++#define SPMODE_LOOP	((ushort)0x4000)	/* Loopback */
++#define SPMODE_CI	((ushort)0x2000)	/* Clock Invert */
++#define SPMODE_CP	((ushort)0x1000)	/* Clock Phase */
++#define SPMODE_DIV16	((ushort)0x0800)	/* BRG/16 mode */
++#define SPMODE_REV	((ushort)0x0400)	/* Reversed Data */
++#define SPMODE_MSTR	((ushort)0x0200)	/* SPI Master */
++#define SPMODE_EN	((ushort)0x0100)	/* Enable */
++#define SPMODE_LENMSK	((ushort)0x00f0)	/* character length */
++#define SPMODE_LEN4	((ushort)0x0030)	/*  4 bits per char */
++#define SPMODE_LEN8	((ushort)0x0070)	/*  8 bits per char */
++#define SPMODE_LEN16	((ushort)0x00f0)	/* 16 bits per char */
++#define SPMODE_PMMSK	((ushort)0x000f)	/* prescale modulus */
++
++/* SPIE fields */
++#define SPIE_MME	0x20
++#define SPIE_TXE	0x10
++#define SPIE_BSY	0x04
++#define SPIE_TXB	0x02
++#define SPIE_RXB	0x01
++
++/*
++ * RISC Controller Configuration Register definitons
++ */
++#define RCCR_TIME	0x8000			/* RISC Timer Enable */
++#define RCCR_TIMEP(t)	(((t) & 0x3F)<<8)	/* RISC Timer Period */
++#define RCCR_TIME_MASK	0x00FF			/* not RISC Timer related bits */
++
++/* RISC Timer Parameter RAM offset */
++#define PROFF_RTMR	((uint)0x01B0)
++
++typedef struct risc_timer_pram {
++	unsigned short	tm_base;	/* RISC Timer Table Base Address */
++	unsigned short	tm_ptr;		/* RISC Timer Table Pointer (internal) */
++	unsigned short	r_tmr;		/* RISC Timer Mode Register */
++	unsigned short	r_tmv;		/* RISC Timer Valid Register */
++	unsigned long	tm_cmd;		/* RISC Timer Command Register */
++	unsigned long	tm_cnt;		/* RISC Timer Internal Count */
++} rt_pram_t;
++
++/* Bits in RISC Timer Command Register */
++#define TM_CMD_VALID	0x80000000	/* Valid - Enables the timer */
++#define TM_CMD_RESTART	0x40000000	/* Restart - for automatic restart */
++#define TM_CMD_PWM	0x20000000	/* Run in Pulse Width Modulation Mode */
++#define TM_CMD_NUM(n)	(((n)&0xF)<<16)	/* Timer Number */
++#define TM_CMD_PERIOD(p) ((p)&0xFFFF)	/* Timer Period */
++
++/* CPM interrupts.  There are nearly 32 interrupts generated by CPM
++ * channels or devices.  All of these are presented to the PPC core
++ * as a single interrupt.  The CPM interrupt handler dispatches its
++ * own handlers, in a similar fashion to the PPC core handler.  We
++ * use the table as defined in the manuals (i.e. no special high
++ * priority and SCC1 == SCCa, etc...).
++ */
++#define CPMVEC_NR		32
++#define	CPMVEC_PIO_PC15		((ushort)0x1f)
++#define	CPMVEC_SCC1		((ushort)0x1e)
++#define	CPMVEC_SCC2		((ushort)0x1d)
++#define	CPMVEC_SCC3		((ushort)0x1c)
++#define	CPMVEC_SCC4		((ushort)0x1b)
++#define	CPMVEC_PIO_PC14		((ushort)0x1a)
++#define	CPMVEC_TIMER1		((ushort)0x19)
++#define	CPMVEC_PIO_PC13		((ushort)0x18)
++#define	CPMVEC_PIO_PC12		((ushort)0x17)
++#define	CPMVEC_SDMA_CB_ERR	((ushort)0x16)
++#define CPMVEC_IDMA1		((ushort)0x15)
++#define CPMVEC_IDMA2		((ushort)0x14)
++#define CPMVEC_TIMER2		((ushort)0x12)
++#define CPMVEC_RISCTIMER	((ushort)0x11)
++#define CPMVEC_I2C		((ushort)0x10)
++#define	CPMVEC_PIO_PC11		((ushort)0x0f)
++#define	CPMVEC_PIO_PC10		((ushort)0x0e)
++#define CPMVEC_TIMER3		((ushort)0x0c)
++#define	CPMVEC_PIO_PC9		((ushort)0x0b)
++#define	CPMVEC_PIO_PC8		((ushort)0x0a)
++#define	CPMVEC_PIO_PC7		((ushort)0x09)
++#define CPMVEC_TIMER4		((ushort)0x07)
++#define	CPMVEC_PIO_PC6		((ushort)0x06)
++#define	CPMVEC_SPI		((ushort)0x05)
++#define	CPMVEC_SMC1		((ushort)0x04)
++#define	CPMVEC_SMC2		((ushort)0x03)
++#define	CPMVEC_PIO_PC5		((ushort)0x02)
++#define	CPMVEC_PIO_PC4		((ushort)0x01)
++#define	CPMVEC_ERROR		((ushort)0x00)
++
++/* CPM interrupt configuration vector.
++*/
++#define	CICR_SCD_SCC4		((uint)0x00c00000)	/* SCC4 @ SCCd */
++#define	CICR_SCC_SCC3		((uint)0x00200000)	/* SCC3 @ SCCc */
++#define	CICR_SCB_SCC2		((uint)0x00040000)	/* SCC2 @ SCCb */
++#define	CICR_SCA_SCC1		((uint)0x00000000)	/* SCC1 @ SCCa */
++#define CICR_IRL_MASK		((uint)0x0000e000)	/* Core interrupt */
++#define CICR_HP_MASK		((uint)0x00001f00)	/* Hi-pri int. */
++#define CICR_IEN		((uint)0x00000080)	/* Int. enable */
++#define CICR_SPS		((uint)0x00000001)	/* SCC Spread */
++
++extern void cpm_install_handler(int vec, void (*handler)(void *), void *dev_id);
++extern void cpm_free_handler(int vec);
++
++#endif /* __CPM1__ */
+diff --git a/include/asm-ppc/cpm2.h b/include/asm-ppc/cpm2.h
+index 12a2860..4c53822 100644
+--- a/include/asm-ppc/cpm2.h
++++ b/include/asm-ppc/cpm2.h
+@@ -90,7 +90,7 @@
+  */
+ #define CPM_DATAONLY_BASE	((uint)128)
+ #define CPM_DP_NOSPACE		((uint)0x7fffffff)
+-#if defined(CONFIG_8272) || defined(CONFIG_MPC8555)
++#if defined(CONFIG_8272)
+ #define CPM_DATAONLY_SIZE	((uint)(8 * 1024) - CPM_DATAONLY_BASE)
+ #define CPM_FCC_SPECIAL_BASE	((uint)0x00009000)
+ #else
+diff --git a/include/asm-ppc/immap_85xx.h b/include/asm-ppc/immap_85xx.h
+deleted file mode 100644
+index 9383d0c..0000000
+--- a/include/asm-ppc/immap_85xx.h
++++ /dev/null
+@@ -1,126 +0,0 @@
+-/*
+- * include/asm-ppc/immap_85xx.h
+- *
+- * MPC85xx Internal Memory Map
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor, Inc
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- *
+- */
+-
+-#ifdef __KERNEL__
+-#ifndef __ASM_IMMAP_85XX_H__
+-#define __ASM_IMMAP_85XX_H__
+-
+-/* Eventually this should define all the IO block registers in 85xx */
+-
+-/* PCI Registers */
+-typedef struct ccsr_pci {
+-	uint	cfg_addr;	/* 0x.000 - PCI Configuration Address Register */
+-	uint	cfg_data;	/* 0x.004 - PCI Configuration Data Register */
+-	uint	int_ack;	/* 0x.008 - PCI Interrupt Acknowledge Register */
+-	char	res1[3060];
+-	uint	potar0;		/* 0x.c00 - PCI Outbound Transaction Address Register 0 */
+-	uint	potear0;	/* 0x.c04 - PCI Outbound Translation Extended Address Register 0 */
+-	uint	powbar0;	/* 0x.c08 - PCI Outbound Window Base Address Register 0 */
+-	char	res2[4];
+-	uint	powar0;		/* 0x.c10 - PCI Outbound Window Attributes Register 0 */
+-	char	res3[12];
+-	uint	potar1;		/* 0x.c20 - PCI Outbound Transaction Address Register 1 */
+-	uint	potear1;	/* 0x.c24 - PCI Outbound Translation Extended Address Register 1 */
+-	uint	powbar1;	/* 0x.c28 - PCI Outbound Window Base Address Register 1 */
+-	char	res4[4];
+-	uint	powar1;		/* 0x.c30 - PCI Outbound Window Attributes Register 1 */
+-	char	res5[12];
+-	uint	potar2;		/* 0x.c40 - PCI Outbound Transaction Address Register 2 */
+-	uint	potear2;	/* 0x.c44 - PCI Outbound Translation Extended Address Register 2 */
+-	uint	powbar2;	/* 0x.c48 - PCI Outbound Window Base Address Register 2 */
+-	char	res6[4];
+-	uint	powar2;		/* 0x.c50 - PCI Outbound Window Attributes Register 2 */
+-	char	res7[12];
+-	uint	potar3;		/* 0x.c60 - PCI Outbound Transaction Address Register 3 */
+-	uint	potear3;	/* 0x.c64 - PCI Outbound Translation Extended Address Register 3 */
+-	uint	powbar3;	/* 0x.c68 - PCI Outbound Window Base Address Register 3 */
+-	char	res8[4];
+-	uint	powar3;		/* 0x.c70 - PCI Outbound Window Attributes Register 3 */
+-	char	res9[12];
+-	uint	potar4;		/* 0x.c80 - PCI Outbound Transaction Address Register 4 */
+-	uint	potear4;	/* 0x.c84 - PCI Outbound Translation Extended Address Register 4 */
+-	uint	powbar4;	/* 0x.c88 - PCI Outbound Window Base Address Register 4 */
+-	char	res10[4];
+-	uint	powar4;		/* 0x.c90 - PCI Outbound Window Attributes Register 4 */
+-	char	res11[268];
+-	uint	pitar3;		/* 0x.da0 - PCI Inbound Translation Address Register 3  */
+-	char	res12[4];
+-	uint	piwbar3;	/* 0x.da8 - PCI Inbound Window Base Address Register 3 */
+-	uint	piwbear3;	/* 0x.dac - PCI Inbound Window Base Extended Address Register 3 */
+-	uint	piwar3;		/* 0x.db0 - PCI Inbound Window Attributes Register 3 */
+-	char	res13[12];
+-	uint	pitar2;		/* 0x.dc0 - PCI Inbound Translation Address Register 2  */
+-	char	res14[4];
+-	uint	piwbar2;	/* 0x.dc8 - PCI Inbound Window Base Address Register 2 */
+-	uint	piwbear2;	/* 0x.dcc - PCI Inbound Window Base Extended Address Register 2 */
+-	uint	piwar2;		/* 0x.dd0 - PCI Inbound Window Attributes Register 2 */
+-	char	res15[12];
+-	uint	pitar1;		/* 0x.de0 - PCI Inbound Translation Address Register 1  */
+-	char	res16[4];
+-	uint	piwbar1;	/* 0x.de8 - PCI Inbound Window Base Address Register 1 */
+-	char	res17[4];
+-	uint	piwar1;		/* 0x.df0 - PCI Inbound Window Attributes Register 1 */
+-	char	res18[12];
+-	uint	err_dr;		/* 0x.e00 - PCI Error Detect Register */
+-	uint	err_cap_dr;	/* 0x.e04 - PCI Error Capture Disable Register */
+-	uint	err_en;		/* 0x.e08 - PCI Error Enable Register */
+-	uint	err_attrib;	/* 0x.e0c - PCI Error Attributes Capture Register */
+-	uint	err_addr;	/* 0x.e10 - PCI Error Address Capture Register */
+-	uint	err_ext_addr;	/* 0x.e14 - PCI Error Extended Address Capture Register */
+-	uint	err_dl;		/* 0x.e18 - PCI Error Data Low Capture Register */
+-	uint	err_dh;		/* 0x.e1c - PCI Error Data High Capture Register */
+-	uint	gas_timr;	/* 0x.e20 - PCI Gasket Timer Register */
+-	uint	pci_timr;	/* 0x.e24 - PCI Timer Register */
+-	char	res19[472];
+-} ccsr_pci_t;
+-
+-/* Global Utility Registers */
+-typedef struct ccsr_guts {
+-	uint	porpllsr;	/* 0x.0000 - POR PLL Ratio Status Register */
+-	uint	porbmsr;	/* 0x.0004 - POR Boot Mode Status Register */
+-	uint	porimpscr;	/* 0x.0008 - POR I/O Impedance Status and Control Register */
+-	uint	pordevsr;	/* 0x.000c - POR I/O Device Status Register */
+-	uint	pordbgmsr;	/* 0x.0010 - POR Debug Mode Status Register */
+-	char	res1[12];
+-	uint	gpporcr;	/* 0x.0020 - General-Purpose POR Configuration Register */
+-	char	res2[12];
+-	uint	gpiocr;		/* 0x.0030 - GPIO Control Register */
+-	char	res3[12];
+-	uint	gpoutdr;	/* 0x.0040 - General-Purpose Output Data Register */
+-	char	res4[12];
+-	uint	gpindr;		/* 0x.0050 - General-Purpose Input Data Register */
+-	char	res5[12];
+-	uint	pmuxcr;		/* 0x.0060 - Alternate Function Signal Multiplex Control */
+-	char	res6[12];
+-	uint	devdisr;	/* 0x.0070 - Device Disable Control */
+-	char	res7[12];
+-	uint	powmgtcsr;	/* 0x.0080 - Power Management Status and Control Register */
+-	char	res8[12];
+-	uint	mcpsumr;	/* 0x.0090 - Machine Check Summary Register */
+-	char	res9[12];
+-	uint	pvr;		/* 0x.00a0 - Processor Version Register */
+-	uint	svr;		/* 0x.00a4 - System Version Register */
+-	char	res10[3416];
+-	uint	clkocr;		/* 0x.0e00 - Clock Out Select Register */
+-	char	res11[12];
+-	uint	ddrdllcr;	/* 0x.0e10 - DDR DLL Control Register */
+-	char	res12[12];
+-	uint	lbcdllcr;	/* 0x.0e20 - LBC DLL Control Register */
+-	char	res13[61916];
+-} ccsr_guts_t;
+-
+-#endif /* __ASM_IMMAP_85XX_H__ */
+-#endif /* __KERNEL__ */
+diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h
+index 14584e5..d46b57b 100644
+--- a/include/asm-ppc/mmu.h
++++ b/include/asm-ppc/mmu.h
+@@ -383,6 +383,12 @@ typedef struct _P601_BAT {
+ #define BOOKE_PAGESZ_256GB	14
+ #define BOOKE_PAGESZ_1TB	15
+ 
++#ifndef CONFIG_SERIAL_TEXT_DEBUG
++#define PPC44x_EARLY_TLBS	1
++#else
++#define PPC44x_EARLY_TLBS	2
++#endif
++
+ /*
+  * Freescale Book-E MMU support
+  */
+diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
+index b2e25d8..9f097e2 100644
+--- a/include/asm-ppc/mmu_context.h
++++ b/include/asm-ppc/mmu_context.h
+@@ -64,11 +64,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ #define LAST_CONTEXT    	255
+ #define FIRST_CONTEXT    	1
+ 
+-#elif defined(CONFIG_E200) || defined(CONFIG_E500)
+-#define NO_CONTEXT      	256
+-#define LAST_CONTEXT    	255
+-#define FIRST_CONTEXT    	1
+-
+ #else
+ 
+ /* PPC 6xx, 7xx CPUs */
+diff --git a/include/asm-ppc/mpc52xx_psc.h b/include/asm-ppc/mpc52xx_psc.h
+index c82b8d4..39fcd02 100644
+--- a/include/asm-ppc/mpc52xx_psc.h
++++ b/include/asm-ppc/mpc52xx_psc.h
+@@ -159,6 +159,9 @@ struct mpc52xx_psc {
+ 	u8		reserved16[3];
+ 	u8		irfdr;		/* PSC + 0x54 */
+ 	u8		reserved17[3];
++};
++
++struct mpc52xx_psc_fifo {
+ 	u16		rfnum;		/* PSC + 0x58 */
+ 	u16		reserved18;
+ 	u16		tfnum;		/* PSC + 0x5c */
+diff --git a/include/asm-ppc/mpc83xx.h b/include/asm-ppc/mpc83xx.h
+deleted file mode 100644
+index c306197..0000000
+--- a/include/asm-ppc/mpc83xx.h
++++ /dev/null
+@@ -1,107 +0,0 @@
+-/*
+- * include/asm-ppc/mpc83xx.h
+- *
+- * MPC83xx definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2005 Freescale Semiconductor, Inc
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#ifdef __KERNEL__
+-#ifndef __ASM_MPC83xx_H__
+-#define __ASM_MPC83xx_H__
+-
+-#include <asm/mmu.h>
+-
+-#ifdef CONFIG_83xx
+-
+-#ifdef CONFIG_MPC834x_SYS
+-#include <platforms/83xx/mpc834x_sys.h>
+-#endif
+-
+-/*
+- * The "residual" board information structure the boot loader passes
+- * into the kernel.
+- */
+-extern unsigned char __res[];
+-
+-/* Internal IRQs on MPC83xx OpenPIC */
+-/* Not all of these exist on all MPC83xx implementations */
+-
+-#ifndef MPC83xx_IPIC_IRQ_OFFSET
+-#define MPC83xx_IPIC_IRQ_OFFSET	0
+-#endif
+-
+-#define NR_IPIC_INTS 128
+-
+-#define MPC83xx_IRQ_UART1	( 9 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_UART2	(10 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_SEC2	(11 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_IIC1	(14 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_IIC2	(15 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_SPI		(16 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_EXT1	(17 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_EXT2	(18 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_EXT3	(19 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_EXT4	(20 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_EXT5	(21 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_EXT6	(22 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_EXT7	(23 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_TSEC1_TX	(32 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_TSEC1_RX	(33 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_TSEC1_ERROR	(34 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_TSEC2_TX	(35 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_TSEC2_RX	(36 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_TSEC2_ERROR	(37 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_USB2_DR	(38 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_USB2_MPH	(39 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_EXT0	(48 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_RTC_SEC	(64 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_PIT		(65 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_PCI1	(66 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_PCI2	(67 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_RTC_ALR	(68 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_MU		(69 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_SBA		(70 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_DMA		(71 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GTM4	(72 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GTM8	(73 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GPIO1	(74 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GPIO2	(75 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_DDR		(76 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_LBC		(77 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GTM2	(78 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GTM6	(79 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_PMC		(80 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GTM3	(84 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GTM7	(85 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GTM1	(90 + MPC83xx_IPIC_IRQ_OFFSET)
+-#define MPC83xx_IRQ_GTM5	(91 + MPC83xx_IPIC_IRQ_OFFSET)
+-
+-#define MPC83xx_CCSRBAR_SIZE	(1024*1024)
+-
+-/* Let modules/drivers get at immrbar (physical) */
+-extern phys_addr_t immrbar;
+-
+-enum ppc_sys_devices {
+-	MPC83xx_TSEC1,
+-	MPC83xx_TSEC2,
+-	MPC83xx_IIC1,
+-	MPC83xx_IIC2,
+-	MPC83xx_DUART,
+-	MPC83xx_SEC2,
+-	MPC83xx_USB2_DR,
+-	MPC83xx_USB2_MPH,
+-	MPC83xx_MDIO,
+-	NUM_PPC_SYS_DEVS,
+-};
+-
+-#endif /* CONFIG_83xx */
+-#endif /* __ASM_MPC83xx_H__ */
+-#endif /* __KERNEL__ */
+diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h
+deleted file mode 100644
+index d7e4a79..0000000
+--- a/include/asm-ppc/mpc85xx.h
++++ /dev/null
+@@ -1,192 +0,0 @@
+-/*
+- * include/asm-ppc/mpc85xx.h
+- *
+- * MPC85xx definitions
+- *
+- * Maintainer: Kumar Gala <galak at kernel.crashing.org>
+- *
+- * Copyright 2004 Freescale Semiconductor, Inc
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+-
+-#ifdef __KERNEL__
+-#ifndef __ASM_MPC85xx_H__
+-#define __ASM_MPC85xx_H__
+-
+-#include <asm/mmu.h>
+-
+-#ifdef CONFIG_85xx
+-
+-#ifdef CONFIG_MPC8540_ADS
+-#include <platforms/85xx/mpc8540_ads.h>
+-#endif
+-#if defined(CONFIG_MPC8555_CDS) || defined(CONFIG_MPC8548_CDS)
+-#include <platforms/85xx/mpc8555_cds.h>
+-#endif
+-#ifdef CONFIG_MPC85xx_CDS
+-#include <platforms/85xx/mpc85xx_cds.h>
+-#endif
+-#ifdef CONFIG_MPC8560_ADS
+-#include <platforms/85xx/mpc8560_ads.h>
+-#endif
+-#ifdef CONFIG_SBC8560
+-#include <platforms/85xx/sbc8560.h>
+-#endif
+-#ifdef CONFIG_STX_GP3
+-#include <platforms/85xx/stx_gp3.h>
+-#endif
+-#if defined(CONFIG_TQM8540) || defined(CONFIG_TQM8541) || \
+-	defined(CONFIG_TQM8555) || defined(CONFIG_TQM8560)
+-#include <platforms/85xx/tqm85xx.h>
+-#endif
+-
+-/*
+- * The "residual" board information structure the boot loader passes
+- * into the kernel.
+- */
+-extern unsigned char __res[];
+-
+-/* Offset from CCSRBAR */
+-#define MPC85xx_CPM_OFFSET	(0x80000)
+-#define MPC85xx_CPM_SIZE	(0x40000)
+-#define MPC85xx_DMA_OFFSET	(0x21000)
+-#define MPC85xx_DMA_SIZE	(0x01000)
+-#define MPC85xx_DMA0_OFFSET	(0x21100)
+-#define MPC85xx_DMA0_SIZE	(0x00080)
+-#define MPC85xx_DMA1_OFFSET	(0x21180)
+-#define MPC85xx_DMA1_SIZE	(0x00080)
+-#define MPC85xx_DMA2_OFFSET	(0x21200)
+-#define MPC85xx_DMA2_SIZE	(0x00080)
+-#define MPC85xx_DMA3_OFFSET	(0x21280)
+-#define MPC85xx_DMA3_SIZE	(0x00080)
+-#define MPC85xx_ENET1_OFFSET	(0x24000)
+-#define MPC85xx_ENET1_SIZE	(0x01000)
+-#define MPC85xx_MIIM_OFFSET	(0x24520)
+-#define MPC85xx_MIIM_SIZE	(0x00018)
+-#define MPC85xx_ENET2_OFFSET	(0x25000)
+-#define MPC85xx_ENET2_SIZE	(0x01000)
+-#define MPC85xx_ENET3_OFFSET	(0x26000)
+-#define MPC85xx_ENET3_SIZE	(0x01000)
+-#define MPC85xx_GUTS_OFFSET	(0xe0000)
+-#define MPC85xx_GUTS_SIZE	(0x01000)
+-#define MPC85xx_IIC1_OFFSET	(0x03000)
+-#define MPC85xx_IIC1_SIZE	(0x00100)
+-#define MPC85xx_OPENPIC_OFFSET	(0x40000)
+-#define MPC85xx_OPENPIC_SIZE	(0x40000)
+-#define MPC85xx_PCI1_OFFSET	(0x08000)
+-#define MPC85xx_PCI1_SIZE	(0x01000)
+-#define MPC85xx_PCI2_OFFSET	(0x09000)
+-#define MPC85xx_PCI2_SIZE	(0x01000)
+-#define MPC85xx_PERFMON_OFFSET	(0xe1000)
+-#define MPC85xx_PERFMON_SIZE	(0x01000)
+-#define MPC85xx_SEC2_OFFSET	(0x30000)
+-#define MPC85xx_SEC2_SIZE	(0x10000)
+-#define MPC85xx_UART0_OFFSET	(0x04500)
+-#define MPC85xx_UART0_SIZE	(0x00100)
+-#define MPC85xx_UART1_OFFSET	(0x04600)
+-#define MPC85xx_UART1_SIZE	(0x00100)
+-
+-#define MPC85xx_CCSRBAR_SIZE	(1024*1024)
+-
+-/* Let modules/drivers get at CCSRBAR */
+-extern phys_addr_t get_ccsrbar(void);
+-
+-#ifdef MODULE
+-#define CCSRBAR get_ccsrbar()
+-#else
+-#define CCSRBAR BOARD_CCSRBAR
+-#endif
+-
+-enum ppc_sys_devices {
+-	MPC85xx_TSEC1,
+-	MPC85xx_TSEC2,
+-	MPC85xx_FEC,
+-	MPC85xx_IIC1,
+-	MPC85xx_DMA0,
+-	MPC85xx_DMA1,
+-	MPC85xx_DMA2,
+-	MPC85xx_DMA3,
+-	MPC85xx_DUART,
+-	MPC85xx_PERFMON,
+-	MPC85xx_SEC2,
+-	MPC85xx_CPM_SPI,
+-	MPC85xx_CPM_I2C,
+-	MPC85xx_CPM_USB,
+-	MPC85xx_CPM_SCC1,
+-	MPC85xx_CPM_SCC2,
+-	MPC85xx_CPM_SCC3,
+-	MPC85xx_CPM_SCC4,
+-	MPC85xx_CPM_FCC1,
+-	MPC85xx_CPM_FCC2,
+-	MPC85xx_CPM_FCC3,
+-	MPC85xx_CPM_MCC1,
+-	MPC85xx_CPM_MCC2,
+-	MPC85xx_CPM_SMC1,
+-	MPC85xx_CPM_SMC2,
+-	MPC85xx_eTSEC1,
+-	MPC85xx_eTSEC2,
+-	MPC85xx_eTSEC3,
+-	MPC85xx_eTSEC4,
+-	MPC85xx_IIC2,
+-	MPC85xx_MDIO,
+-	NUM_PPC_SYS_DEVS,
+-};
+-
+-/* Internal interrupts are all Level Sensitive, and Positive Polarity */
+-#define MPC85XX_INTERNAL_IRQ_SENSES \
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  0 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  1 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  2 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  3 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  4 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  5 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  6 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  7 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  8 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal  9 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 10 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 11 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 12 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 13 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 14 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 15 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 16 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 17 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 18 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 19 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 20 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 21 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 22 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 23 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 24 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 25 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 26 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 27 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 28 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 29 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 30 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 31 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 32 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 33 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 34 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 35 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 36 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 37 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 38 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 39 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 40 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 41 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 42 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 43 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 44 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 45 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE),	/* Internal 46 */	\
+-	(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE)	/* Internal 47 */
+-
+-#endif /* CONFIG_85xx */
+-#endif /* __ASM_MPC85xx_H__ */
+-#endif /* __KERNEL__ */
+diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
+index 063ad91..69347bd 100644
+--- a/include/asm-ppc/pgtable.h
++++ b/include/asm-ppc/pgtable.h
+@@ -271,48 +271,6 @@ extern unsigned long ioremap_bot, ioremap_base;
+ /* ERPN in a PTE never gets cleared, ignore it */
+ #define _PTE_NONE_MASK	0xffffffff00000000ULL
+ 
+-#elif defined(CONFIG_FSL_BOOKE)
+-/*
+-   MMU Assist Register 3:
+-
+-   32 33 34 35 36  ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+-   RPN......................  0  0 U0 U1 U2 U3 UX SX UW SW UR SR
+-
+-   - PRESENT *must* be in the bottom three bits because swap cache
+-     entries use the top 29 bits.
+-
+-   - FILE *must* be in the bottom three bits because swap cache
+-     entries use the top 29 bits.
+-*/
+-
+-/* Definitions for FSL Book-E Cores */
+-#define _PAGE_PRESENT	0x00001	/* S: PTE contains a translation */
+-#define _PAGE_USER	0x00002	/* S: User page (maps to UR) */
+-#define _PAGE_FILE	0x00002	/* S: when !present: nonlinear file mapping */
+-#define _PAGE_ACCESSED	0x00004	/* S: Page referenced */
+-#define _PAGE_HWWRITE	0x00008	/* H: Dirty & RW, set in exception */
+-#define _PAGE_RW	0x00010	/* S: Write permission */
+-#define _PAGE_HWEXEC	0x00020	/* H: UX permission */
+-
+-#define _PAGE_ENDIAN	0x00040	/* H: E bit */
+-#define _PAGE_GUARDED	0x00080	/* H: G bit */
+-#define _PAGE_COHERENT	0x00100	/* H: M bit */
+-#define _PAGE_NO_CACHE	0x00200	/* H: I bit */
+-#define _PAGE_WRITETHRU	0x00400	/* H: W bit */
+-
+-#ifdef CONFIG_PTE_64BIT
+-#define _PAGE_DIRTY	0x08000	/* S: Page dirty */
+-
+-/* ERPN in a PTE never gets cleared, ignore it */
+-#define _PTE_NONE_MASK	0xffffffffffff0000ULL
+-#else
+-#define _PAGE_DIRTY	0x00800	/* S: Page dirty */
+-#endif
+-
+-#define _PMD_PRESENT	0
+-#define _PMD_PRESENT_MASK (PAGE_MASK)
+-#define _PMD_BAD	(~PAGE_MASK)
+-
+ #elif defined(CONFIG_8xx)
+ /* Definitions for 8xx embedded chips. */
+ #define _PAGE_PRESENT	0x0001	/* Page is valid */
+@@ -484,11 +442,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
+ 
+ /* in some case we want to additionaly adjust where the pfn is in the pte to
+  * allow room for more flags */
+-#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
+-#define PFN_SHIFT_OFFSET	(PAGE_SHIFT + 8)
+-#else
+ #define PFN_SHIFT_OFFSET	(PAGE_SHIFT)
+-#endif
+ 
+ #define pte_pfn(x)		(pte_val(x) >> PFN_SHIFT_OFFSET)
+ #define pte_page(x)		pfn_to_page(pte_pfn(x))
+diff --git a/include/asm-ppc/ppc_sys.h b/include/asm-ppc/ppc_sys.h
+index de99e92..d2fee41 100644
+--- a/include/asm-ppc/ppc_sys.h
++++ b/include/asm-ppc/ppc_sys.h
+@@ -23,10 +23,6 @@
+ 
+ #if defined(CONFIG_8260)
+ #include <asm/mpc8260.h>
+-#elif defined(CONFIG_83xx)
+-#include <asm/mpc83xx.h>
+-#elif defined(CONFIG_85xx)
+-#include <asm/mpc85xx.h>
+ #elif defined(CONFIG_8xx)
+ #include <asm/mpc8xx.h>
+ #elif defined(CONFIG_PPC_MPC52xx)
+diff --git a/include/asm-ppc/ppcboot.h b/include/asm-ppc/ppcboot.h
+index 6b7b63f..3819e17 100644
+--- a/include/asm-ppc/ppcboot.h
++++ b/include/asm-ppc/ppcboot.h
+@@ -38,8 +38,7 @@ typedef struct bd_info {
+ 	unsigned long	bi_flashoffset; /* reserved area for startup monitor */
+ 	unsigned long	bi_sramstart;	/* start of SRAM memory */
+ 	unsigned long	bi_sramsize;	/* size	 of SRAM memory */
+-#if defined(CONFIG_8xx) || defined(CONFIG_CPM2) || defined(CONFIG_85xx) ||\
+-	defined(CONFIG_83xx)
++#if defined(CONFIG_8xx) || defined(CONFIG_CPM2)
+ 	unsigned long	bi_immr_base;	/* base of IMMR register */
+ #endif
+ #if defined(CONFIG_PPC_MPC52xx)
+@@ -73,12 +72,11 @@ typedef struct bd_info {
+ #if defined(CONFIG_HYMOD)
+ 	hymod_conf_t	bi_hymod_conf;	/* hymod configuration information */
+ #endif
+-#if defined(CONFIG_EVB64260) || defined(CONFIG_405EP) || defined(CONFIG_44x) || \
+-	defined(CONFIG_85xx) ||	defined(CONFIG_83xx)
++#if defined(CONFIG_EVB64260) || defined(CONFIG_405EP) || defined(CONFIG_44x)
+ 	/* second onboard ethernet port */
+ 	unsigned char	bi_enet1addr[6];
+ #endif
+-#if defined(CONFIG_EVB64260) || defined(CONFIG_440GX) || defined(CONFIG_85xx)
++#if defined(CONFIG_EVB64260) || defined(CONFIG_440GX)
+ 	/* third onboard ethernet ports */
+ 	unsigned char	bi_enet2addr[6];
+ #endif
+diff --git a/include/asm-ppc/reg_booke.h b/include/asm-ppc/reg_booke.h
+index 82948ed..91e96af 100644
+--- a/include/asm-ppc/reg_booke.h
++++ b/include/asm-ppc/reg_booke.h
+@@ -207,7 +207,7 @@
+ #define	CCR1_TCS	0x00000080 /* Timer Clock Select */
+ 
+ /* Bit definitions for the MCSR. */
+-#ifdef CONFIG_440A
++#ifdef CONFIG_4xx
+ #define MCSR_MCS	0x80000000 /* Machine Check Summary */
+ #define MCSR_IB		0x40000000 /* Instruction PLB Error */
+ #define MCSR_DRB	0x20000000 /* Data Read PLB Error */
+@@ -218,32 +218,6 @@
+ #define MCSR_DCFP	0x01000000 /* D-Cache Flush Parity Error */
+ #define MCSR_IMPE	0x00800000 /* Imprecise Machine Check Exception */
+ #endif
+-#ifdef CONFIG_E500
+-#define MCSR_MCP 	0x80000000UL /* Machine Check Input Pin */
+-#define MCSR_ICPERR 	0x40000000UL /* I-Cache Parity Error */
+-#define MCSR_DCP_PERR 	0x20000000UL /* D-Cache Push Parity Error */
+-#define MCSR_DCPERR 	0x10000000UL /* D-Cache Parity Error */
+-#define MCSR_GL_CI 	0x00010000UL /* Guarded Load or Cache-Inhibited stwcx. */
+-#define MCSR_BUS_IAERR 	0x00000080UL /* Instruction Address Error */
+-#define MCSR_BUS_RAERR 	0x00000040UL /* Read Address Error */
+-#define MCSR_BUS_WAERR 	0x00000020UL /* Write Address Error */
+-#define MCSR_BUS_IBERR 	0x00000010UL /* Instruction Data Error */
+-#define MCSR_BUS_RBERR 	0x00000008UL /* Read Data Bus Error */
+-#define MCSR_BUS_WBERR 	0x00000004UL /* Write Data Bus Error */
+-#define MCSR_BUS_IPERR 	0x00000002UL /* Instruction parity Error */
+-#define MCSR_BUS_RPERR 	0x00000001UL /* Read parity Error */
+-#endif
+-#ifdef CONFIG_E200
+-#define MCSR_MCP 	0x80000000UL /* Machine Check Input Pin */
+-#define MCSR_CP_PERR 	0x20000000UL /* Cache Push Parity Error */
+-#define MCSR_CPERR 	0x10000000UL /* Cache Parity Error */
+-#define MCSR_EXCP_ERR 	0x08000000UL /* ISI, ITLB, or Bus Error on 1st insn
+-					fetch for an exception handler */
+-#define MCSR_BUS_IRERR 	0x00000010UL /* Read Bus Error on instruction fetch*/
+-#define MCSR_BUS_DRERR 	0x00000008UL /* Read Bus Error on data load */
+-#define MCSR_BUS_WRERR 	0x00000004UL /* Write Bus Error on buffered
+-					store or cache line push */
+-#endif
+ 
+ /* Bit definitions for the DBSR. */
+ /*
+@@ -283,7 +257,7 @@
+ #define ESR_IMCB	0x20000000	/* Instr. Machine Check - Bus error */
+ #define ESR_IMCT	0x10000000	/* Instr. Machine Check - Timeout */
+ #define ESR_PIL		0x08000000	/* Program Exception - Illegal */
+-#define ESR_PPR		0x04000000	/* Program Exception - Priveleged */
++#define ESR_PPR		0x04000000	/* Program Exception - Privileged */
+ #define ESR_PTR		0x02000000	/* Program Exception - Trap */
+ #define ESR_FP		0x01000000	/* Floating Point Operation */
+ #define ESR_DST		0x00800000	/* Storage Exception - Data miss */
+diff --git a/include/asm-ppc/serial.h b/include/asm-ppc/serial.h
+index 8fc1b54..d35ed10 100644
+--- a/include/asm-ppc/serial.h
++++ b/include/asm-ppc/serial.h
+@@ -29,10 +29,6 @@
+ #include <platforms/spruce.h>
+ #elif defined(CONFIG_4xx)
+ #include <asm/ibm4xx.h>
+-#elif defined(CONFIG_83xx)
+-#include <asm/mpc83xx.h>
+-#elif defined(CONFIG_85xx)
+-#include <asm/mpc85xx.h>
+ #elif defined(CONFIG_RADSTONE_PPC7D)
+ #include <platforms/radstone_ppc7d.h>
+ #else
 diff --git a/include/asm-s390/airq.h b/include/asm-s390/airq.h
 new file mode 100644
 index 0000000..41d028c
@@ -598228,7 +754487,7 @@
  			     : : "m" (S390_lowcore.user_exec_asce) );
  	} else
 diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
-index 545857e..2d676a8 100644
+index 545857e..408d60b 100644
 --- a/include/asm-s390/percpu.h
 +++ b/include/asm-s390/percpu.h
 @@ -4,8 +4,6 @@
@@ -598240,10 +754499,38 @@
  /*
   * s390 uses its own implementation for per cpu data, the offset of
   * the cpu local data area is cached in the cpu's lowcore memory.
-@@ -36,16 +34,6 @@
+@@ -15,67 +13,25 @@
+  */
+ #if defined(__s390x__) && defined(MODULE)
  
- extern unsigned long __per_cpu_offset[NR_CPUS];
+-#define __reloc_hide(var,offset) (*({			\
++#define SHIFT_PERCPU_PTR(ptr,offset) (({			\
+ 	extern int simple_identifier_##var(void);	\
+ 	unsigned long *__ptr;				\
+-	asm ( "larl %0,per_cpu__"#var"@GOTENT"		\
+-	    : "=a" (__ptr) : "X" (per_cpu__##var) );	\
+-	(typeof(&per_cpu__##var))((*__ptr) + (offset));	}))
++	asm ( "larl %0, %1 at GOTENT"		\
++	    : "=a" (__ptr) : "X" (ptr) );		\
++	(typeof(ptr))((*__ptr) + (offset));	}))
+ 
+ #else
+ 
+-#define __reloc_hide(var, offset) (*({				\
++#define SHIFT_PERCPU_PTR(ptr, offset) (({				\
+ 	extern int simple_identifier_##var(void);		\
+ 	unsigned long __ptr;					\
+-	asm ( "" : "=a" (__ptr) : "0" (&per_cpu__##var) );	\
+-	(typeof(&per_cpu__##var)) (__ptr + (offset)); }))
++	asm ( "" : "=a" (__ptr) : "0" (ptr) );			\
++	(typeof(ptr)) (__ptr + (offset)); }))
  
+ #endif
+ 
+-#ifdef CONFIG_SMP
+-
+-extern unsigned long __per_cpu_offset[NR_CPUS];
+-
 -/* Separate out the type, so (int[3], foo) works. */
 -#define DEFINE_PER_CPU(type, name) \
 -    __attribute__((__section__(".data.percpu"))) \
@@ -598254,28 +754541,40 @@
 -    __typeof__(type) per_cpu__##name				\
 -    ____cacheline_aligned_in_smp
 -
- #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
- #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
- #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
-@@ -62,11 +50,6 @@ do {								\
- 
- #else /* ! SMP */
- 
+-#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
+-#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
+-#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
+-#define per_cpu_offset(x) (__per_cpu_offset[x])
+-
+-/* A macro to avoid #include hell... */
+-#define percpu_modcopy(pcpudst, src, size)			\
+-do {								\
+-	unsigned int __i;					\
+-	for_each_possible_cpu(__i)				\
+-		memcpy((pcpudst)+__per_cpu_offset[__i],		\
+-		       (src), (size));				\
+-} while (0)
+-
+-#else /* ! SMP */
+-
 -#define DEFINE_PER_CPU(type, name) \
 -    __typeof__(type) per_cpu__##name
 -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
 -    DEFINE_PER_CPU(type, name)
 -
- #define __get_cpu_var(var) __reloc_hide(var,0)
- #define __raw_get_cpu_var(var) __reloc_hide(var,0)
- #define per_cpu(var,cpu) __reloc_hide(var,0)
-@@ -75,7 +58,4 @@ do {								\
- 
- #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+-#define __get_cpu_var(var) __reloc_hide(var,0)
+-#define __raw_get_cpu_var(var) __reloc_hide(var,0)
+-#define per_cpu(var,cpu) __reloc_hide(var,0)
+-
+-#endif /* SMP */
+-
+-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
++#define __my_cpu_offset S390_lowcore.percpu_offset
  
 -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
 -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
--
++#include <asm-generic/percpu.h>
+ 
  #endif /* __ARCH_S390_PERCPU__ */
 diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
 index 1f530f8..79b9eab 100644
@@ -615915,10 +772214,18 @@
  
  /* Convert a physical address to an address suitable for the GART. */
 diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
-index a1f53a4..c7e52de 100644
+index a1f53a4..bee6459 100644
 --- a/include/asm-sparc64/percpu.h
 +++ b/include/asm-sparc64/percpu.h
-@@ -16,15 +16,6 @@ extern unsigned long __per_cpu_shift;
+@@ -7,7 +7,6 @@ register unsigned long __local_per_cpu_offset asm("g5");
+ 
+ #ifdef CONFIG_SMP
+ 
+-#define setup_per_cpu_areas()			do { } while (0)
+ extern void real_setup_per_cpu_areas(void);
+ 
+ extern unsigned long __per_cpu_base;
+@@ -16,45 +15,14 @@ extern unsigned long __per_cpu_shift;
  	(__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
  #define per_cpu_offset(x) (__per_cpu_offset(x))
  
@@ -615931,10 +772238,21 @@
 -    __typeof__(type) per_cpu__##name				\
 -    ____cacheline_aligned_in_smp
 -
- /* var is in discarded region: offset to particular copy we want */
- #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
- #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
-@@ -41,10 +32,6 @@ do {								\
+-/* var is in discarded region: offset to particular copy we want */
+-#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
+-#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
+-#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
+-
+-/* A macro to avoid #include hell... */
+-#define percpu_modcopy(pcpudst, src, size)			\
+-do {								\
+-	unsigned int __i;					\
+-	for_each_possible_cpu(__i)				\
+-		memcpy((pcpudst)+__per_cpu_offset(__i),		\
+-		       (src), (size));				\
+-} while (0)
++#define __my_cpu_offset __local_per_cpu_offset
++
  #else /* ! SMP */
  
  #define real_setup_per_cpu_areas()		do { } while (0)
@@ -615942,16 +772260,19 @@
 -    __typeof__(type) per_cpu__##name
 -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
 -    DEFINE_PER_CPU(type, name)
+-
+-#define per_cpu(var, cpu)			(*((void)cpu, &per_cpu__##var))
+-#define __get_cpu_var(var)			per_cpu__##var
+-#define __raw_get_cpu_var(var)			per_cpu__##var
  
- #define per_cpu(var, cpu)			(*((void)cpu, &per_cpu__##var))
- #define __get_cpu_var(var)			per_cpu__##var
-@@ -54,7 +41,4 @@ do {								\
- 
- #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+ #endif	/* SMP */
  
+-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+-
 -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
 -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
--
++#include <asm-generic/percpu.h>
+ 
  #endif /* __ARCH_SPARC64_PERCPU__ */
 diff --git a/include/asm-um/asm.h b/include/asm-um/asm.h
 new file mode 100644
@@ -615990,10 +772311,17 @@
 +
 +#endif
 diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
-index 12db5a1..e6189b2 100644
+index 12db5a1..3c6f0f8 100644
 --- a/include/asm-x86/Kbuild
 +++ b/include/asm-x86/Kbuild
-@@ -9,15 +9,13 @@ header-y += prctl.h
+@@ -3,21 +3,20 @@ include include/asm-generic/Kbuild.asm
+ header-y += boot.h
+ header-y += bootparam.h
+ header-y += debugreg.h
++header-y += kvm.h
+ header-y += ldt.h
+ header-y += msr-index.h
+ header-y += prctl.h
  header-y += ptrace-abi.h
  header-y += sigcontext32.h
  header-y += ucontext.h
@@ -626676,11 +783004,1128 @@
 -extern int kprobe_exceptions_notify(struct notifier_block *self,
 -				    unsigned long val, void *data);
 -#endif				/* _ASM_KPROBES_H */
+diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
+new file mode 100644
+index 0000000..7a71120
+--- /dev/null
++++ b/include/asm-x86/kvm.h
+@@ -0,0 +1,191 @@
++#ifndef __LINUX_KVM_X86_H
++#define __LINUX_KVM_X86_H
++
++/*
++ * KVM x86 specific structures and definitions
++ *
++ */
++
++#include <asm/types.h>
++#include <linux/ioctl.h>
++
++/* Architectural interrupt line count. */
++#define KVM_NR_INTERRUPTS 256
++
++struct kvm_memory_alias {
++	__u32 slot;  /* this has a different namespace than memory slots */
++	__u32 flags;
++	__u64 guest_phys_addr;
++	__u64 memory_size;
++	__u64 target_phys_addr;
++};
++
++/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
++struct kvm_pic_state {
++	__u8 last_irr;	/* edge detection */
++	__u8 irr;		/* interrupt request register */
++	__u8 imr;		/* interrupt mask register */
++	__u8 isr;		/* interrupt service register */
++	__u8 priority_add;	/* highest irq priority */
++	__u8 irq_base;
++	__u8 read_reg_select;
++	__u8 poll;
++	__u8 special_mask;
++	__u8 init_state;
++	__u8 auto_eoi;
++	__u8 rotate_on_auto_eoi;
++	__u8 special_fully_nested_mode;
++	__u8 init4;		/* true if 4 byte init */
++	__u8 elcr;		/* PIIX edge/trigger selection */
++	__u8 elcr_mask;
++};
++
++#define KVM_IOAPIC_NUM_PINS  24
++struct kvm_ioapic_state {
++	__u64 base_address;
++	__u32 ioregsel;
++	__u32 id;
++	__u32 irr;
++	__u32 pad;
++	union {
++		__u64 bits;
++		struct {
++			__u8 vector;
++			__u8 delivery_mode:3;
++			__u8 dest_mode:1;
++			__u8 delivery_status:1;
++			__u8 polarity:1;
++			__u8 remote_irr:1;
++			__u8 trig_mode:1;
++			__u8 mask:1;
++			__u8 reserve:7;
++			__u8 reserved[4];
++			__u8 dest_id;
++		} fields;
++	} redirtbl[KVM_IOAPIC_NUM_PINS];
++};
++
++#define KVM_IRQCHIP_PIC_MASTER   0
++#define KVM_IRQCHIP_PIC_SLAVE    1
++#define KVM_IRQCHIP_IOAPIC       2
++
++/* for KVM_GET_REGS and KVM_SET_REGS */
++struct kvm_regs {
++	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
++	__u64 rax, rbx, rcx, rdx;
++	__u64 rsi, rdi, rsp, rbp;
++	__u64 r8,  r9,  r10, r11;
++	__u64 r12, r13, r14, r15;
++	__u64 rip, rflags;
++};
++
++/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
++#define KVM_APIC_REG_SIZE 0x400
++struct kvm_lapic_state {
++	char regs[KVM_APIC_REG_SIZE];
++};
++
++struct kvm_segment {
++	__u64 base;
++	__u32 limit;
++	__u16 selector;
++	__u8  type;
++	__u8  present, dpl, db, s, l, g, avl;
++	__u8  unusable;
++	__u8  padding;
++};
++
++struct kvm_dtable {
++	__u64 base;
++	__u16 limit;
++	__u16 padding[3];
++};
++
++
++/* for KVM_GET_SREGS and KVM_SET_SREGS */
++struct kvm_sregs {
++	/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
++	struct kvm_segment cs, ds, es, fs, gs, ss;
++	struct kvm_segment tr, ldt;
++	struct kvm_dtable gdt, idt;
++	__u64 cr0, cr2, cr3, cr4, cr8;
++	__u64 efer;
++	__u64 apic_base;
++	__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
++};
++
++/* for KVM_GET_FPU and KVM_SET_FPU */
++struct kvm_fpu {
++	__u8  fpr[8][16];
++	__u16 fcw;
++	__u16 fsw;
++	__u8  ftwx;  /* in fxsave format */
++	__u8  pad1;
++	__u16 last_opcode;
++	__u64 last_ip;
++	__u64 last_dp;
++	__u8  xmm[16][16];
++	__u32 mxcsr;
++	__u32 pad2;
++};
++
++struct kvm_msr_entry {
++	__u32 index;
++	__u32 reserved;
++	__u64 data;
++};
++
++/* for KVM_GET_MSRS and KVM_SET_MSRS */
++struct kvm_msrs {
++	__u32 nmsrs; /* number of msrs in entries */
++	__u32 pad;
++
++	struct kvm_msr_entry entries[0];
++};
++
++/* for KVM_GET_MSR_INDEX_LIST */
++struct kvm_msr_list {
++	__u32 nmsrs; /* number of msrs in entries */
++	__u32 indices[0];
++};
++
++
++struct kvm_cpuid_entry {
++	__u32 function;
++	__u32 eax;
++	__u32 ebx;
++	__u32 ecx;
++	__u32 edx;
++	__u32 padding;
++};
++
++/* for KVM_SET_CPUID */
++struct kvm_cpuid {
++	__u32 nent;
++	__u32 padding;
++	struct kvm_cpuid_entry entries[0];
++};
++
++struct kvm_cpuid_entry2 {
++	__u32 function;
++	__u32 index;
++	__u32 flags;
++	__u32 eax;
++	__u32 ebx;
++	__u32 ecx;
++	__u32 edx;
++	__u32 padding[3];
++};
++
++#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
++#define KVM_CPUID_FLAG_STATEFUL_FUNC    2
++#define KVM_CPUID_FLAG_STATE_READ_NEXT  4
++
++/* for KVM_SET_CPUID2 */
++struct kvm_cpuid2 {
++	__u32 nent;
++	__u32 padding;
++	struct kvm_cpuid_entry2 entries[0];
++};
++
++#endif
+diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
+new file mode 100644
+index 0000000..4702b04
+--- /dev/null
++++ b/include/asm-x86/kvm_host.h
+@@ -0,0 +1,611 @@
++#/*
++ * Kernel-based Virtual Machine driver for Linux
++ *
++ * This header defines architecture specific interfaces, x86 version
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
++ *
++ */
++
++#ifndef ASM_KVM_HOST_H
++#define ASM_KVM_HOST_H
++
++#include <linux/types.h>
++#include <linux/mm.h>
++
++#include <linux/kvm.h>
++#include <linux/kvm_para.h>
++#include <linux/kvm_types.h>
++
++#include <asm/desc.h>
++
++#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
++#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
++#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
++
++#define KVM_GUEST_CR0_MASK \
++	(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
++	 | X86_CR0_NW | X86_CR0_CD)
++#define KVM_VM_CR0_ALWAYS_ON \
++	(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
++	 | X86_CR0_MP)
++#define KVM_GUEST_CR4_MASK \
++	(X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
++#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
++#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
++
++#define INVALID_PAGE (~(hpa_t)0)
++#define UNMAPPED_GVA (~(gpa_t)0)
++
++#define DE_VECTOR 0
++#define UD_VECTOR 6
++#define NM_VECTOR 7
++#define DF_VECTOR 8
++#define TS_VECTOR 10
++#define NP_VECTOR 11
++#define SS_VECTOR 12
++#define GP_VECTOR 13
++#define PF_VECTOR 14
++
++#define SELECTOR_TI_MASK (1 << 2)
++#define SELECTOR_RPL_MASK 0x03
++
++#define IOPL_SHIFT 12
++
++#define KVM_ALIAS_SLOTS 4
++
++#define KVM_PERMILLE_MMU_PAGES 20
++#define KVM_MIN_ALLOC_MMU_PAGES 64
++#define KVM_NUM_MMU_PAGES 1024
++#define KVM_MIN_FREE_MMU_PAGES 5
++#define KVM_REFILL_PAGES 25
++#define KVM_MAX_CPUID_ENTRIES 40
++
++extern spinlock_t kvm_lock;
++extern struct list_head vm_list;
++
++struct kvm_vcpu;
++struct kvm;
++
++enum {
++	VCPU_REGS_RAX = 0,
++	VCPU_REGS_RCX = 1,
++	VCPU_REGS_RDX = 2,
++	VCPU_REGS_RBX = 3,
++	VCPU_REGS_RSP = 4,
++	VCPU_REGS_RBP = 5,
++	VCPU_REGS_RSI = 6,
++	VCPU_REGS_RDI = 7,
++#ifdef CONFIG_X86_64
++	VCPU_REGS_R8 = 8,
++	VCPU_REGS_R9 = 9,
++	VCPU_REGS_R10 = 10,
++	VCPU_REGS_R11 = 11,
++	VCPU_REGS_R12 = 12,
++	VCPU_REGS_R13 = 13,
++	VCPU_REGS_R14 = 14,
++	VCPU_REGS_R15 = 15,
++#endif
++	NR_VCPU_REGS
++};
++
++enum {
++	VCPU_SREG_CS,
++	VCPU_SREG_DS,
++	VCPU_SREG_ES,
++	VCPU_SREG_FS,
++	VCPU_SREG_GS,
++	VCPU_SREG_SS,
++	VCPU_SREG_TR,
++	VCPU_SREG_LDTR,
++};
++
++#include <asm/kvm_x86_emulate.h>
++
++#define KVM_NR_MEM_OBJS 40
++
++/*
++ * We don't want allocation failures within the mmu code, so we preallocate
++ * enough memory for a single page fault in a cache.
++ */
++struct kvm_mmu_memory_cache {
++	int nobjs;
++	void *objects[KVM_NR_MEM_OBJS];
++};
++
++#define NR_PTE_CHAIN_ENTRIES 5
++
++struct kvm_pte_chain {
++	u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
++	struct hlist_node link;
++};
++
++/*
++ * kvm_mmu_page_role, below, is defined as:
++ *
++ *   bits 0:3 - total guest paging levels (2-4, or zero for real mode)
++ *   bits 4:7 - page table level for this shadow (1-4)
++ *   bits 8:9 - page table quadrant for 2-level guests
++ *   bit   16 - "metaphysical" - gfn is not a real page (huge page/real mode)
++ *   bits 17:19 - common access permissions for all ptes in this shadow page
++ */
++union kvm_mmu_page_role {
++	unsigned word;
++	struct {
++		unsigned glevels : 4;
++		unsigned level : 4;
++		unsigned quadrant : 2;
++		unsigned pad_for_nice_hex_output : 6;
++		unsigned metaphysical : 1;
++		unsigned access : 3;
++	};
++};
++
++struct kvm_mmu_page {
++	struct list_head link;
++	struct hlist_node hash_link;
++
++	/*
++	 * The following two entries are used to key the shadow page in the
++	 * hash table.
++	 */
++	gfn_t gfn;
++	union kvm_mmu_page_role role;
++
++	u64 *spt;
++	/* hold the gfn of each spte inside spt */
++	gfn_t *gfns;
++	unsigned long slot_bitmap; /* One bit set per slot which has memory
++				    * in this shadow page.
++				    */
++	int multimapped;         /* More than one parent_pte? */
++	int root_count;          /* Currently serving as active root */
++	union {
++		u64 *parent_pte;               /* !multimapped */
++		struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
++	};
++};
++
++/*
++ * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
++ * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
++ * mode.
++ */
++struct kvm_mmu {
++	void (*new_cr3)(struct kvm_vcpu *vcpu);
++	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
++	void (*free)(struct kvm_vcpu *vcpu);
++	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
++	void (*prefetch_page)(struct kvm_vcpu *vcpu,
++			      struct kvm_mmu_page *page);
++	hpa_t root_hpa;
++	int root_level;
++	int shadow_root_level;
++
++	u64 *pae_root;
++};
++
++struct kvm_vcpu_arch {
++	u64 host_tsc;
++	int interrupt_window_open;
++	unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
++	DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
++	unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
++	unsigned long rip;      /* needs vcpu_load_rsp_rip() */
++
++	unsigned long cr0;
++	unsigned long cr2;
++	unsigned long cr3;
++	unsigned long cr4;
++	unsigned long cr8;
++	u64 pdptrs[4]; /* pae */
++	u64 shadow_efer;
++	u64 apic_base;
++	struct kvm_lapic *apic;    /* kernel irqchip context */
++#define VCPU_MP_STATE_RUNNABLE          0
++#define VCPU_MP_STATE_UNINITIALIZED     1
++#define VCPU_MP_STATE_INIT_RECEIVED     2
++#define VCPU_MP_STATE_SIPI_RECEIVED     3
++#define VCPU_MP_STATE_HALTED            4
++	int mp_state;
++	int sipi_vector;
++	u64 ia32_misc_enable_msr;
++	bool tpr_access_reporting;
++
++	struct kvm_mmu mmu;
++
++	struct kvm_mmu_memory_cache mmu_pte_chain_cache;
++	struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
++	struct kvm_mmu_memory_cache mmu_page_cache;
++	struct kvm_mmu_memory_cache mmu_page_header_cache;
++
++	gfn_t last_pt_write_gfn;
++	int   last_pt_write_count;
++	u64  *last_pte_updated;
++
++	struct {
++		gfn_t gfn;          /* presumed gfn during guest pte update */
++		struct page *page;  /* page corresponding to that gfn */
++	} update_pte;
++
++	struct i387_fxsave_struct host_fx_image;
++	struct i387_fxsave_struct guest_fx_image;
++
++	gva_t mmio_fault_cr2;
++	struct kvm_pio_request pio;
++	void *pio_data;
++
++	struct kvm_queued_exception {
++		bool pending;
++		bool has_error_code;
++		u8 nr;
++		u32 error_code;
++	} exception;
++
++	struct {
++		int active;
++		u8 save_iopl;
++		struct kvm_save_segment {
++			u16 selector;
++			unsigned long base;
++			u32 limit;
++			u32 ar;
++		} tr, es, ds, fs, gs;
++	} rmode;
++	int halt_request; /* real mode on Intel only */
++
++	int cpuid_nent;
++	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
++	/* emulate context */
++
++	struct x86_emulate_ctxt emulate_ctxt;
++};
++
++struct kvm_mem_alias {
++	gfn_t base_gfn;
++	unsigned long npages;
++	gfn_t target_gfn;
++};
++
++struct kvm_arch{
++	int naliases;
++	struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
++
++	unsigned int n_free_mmu_pages;
++	unsigned int n_requested_mmu_pages;
++	unsigned int n_alloc_mmu_pages;
++	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
++	/*
++	 * Hash table of struct kvm_mmu_page.
++	 */
++	struct list_head active_mmu_pages;
++	struct kvm_pic *vpic;
++	struct kvm_ioapic *vioapic;
++
++	int round_robin_prev_vcpu;
++	unsigned int tss_addr;
++	struct page *apic_access_page;
++};
++
++struct kvm_vm_stat {
++	u32 mmu_shadow_zapped;
++	u32 mmu_pte_write;
++	u32 mmu_pte_updated;
++	u32 mmu_pde_zapped;
++	u32 mmu_flooded;
++	u32 mmu_recycled;
++	u32 mmu_cache_miss;
++	u32 remote_tlb_flush;
++};
++
++struct kvm_vcpu_stat {
++	u32 pf_fixed;
++	u32 pf_guest;
++	u32 tlb_flush;
++	u32 invlpg;
++
++	u32 exits;
++	u32 io_exits;
++	u32 mmio_exits;
++	u32 signal_exits;
++	u32 irq_window_exits;
++	u32 halt_exits;
++	u32 halt_wakeup;
++	u32 request_irq_exits;
++	u32 irq_exits;
++	u32 host_state_reload;
++	u32 efer_reload;
++	u32 fpu_reload;
++	u32 insn_emulation;
++	u32 insn_emulation_fail;
++};
++
++struct descriptor_table {
++	u16 limit;
++	unsigned long base;
++} __attribute__((packed));
++
++struct kvm_x86_ops {
++	int (*cpu_has_kvm_support)(void);          /* __init */
++	int (*disabled_by_bios)(void);             /* __init */
++	void (*hardware_enable)(void *dummy);      /* __init */
++	void (*hardware_disable)(void *dummy);
++	void (*check_processor_compatibility)(void *rtn);
++	int (*hardware_setup)(void);               /* __init */
++	void (*hardware_unsetup)(void);            /* __exit */
++	bool (*cpu_has_accelerated_tpr)(void);
++
++	/* Create, but do not attach this VCPU */
++	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
++	void (*vcpu_free)(struct kvm_vcpu *vcpu);
++	int (*vcpu_reset)(struct kvm_vcpu *vcpu);
++
++	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
++	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
++	void (*vcpu_put)(struct kvm_vcpu *vcpu);
++	void (*vcpu_decache)(struct kvm_vcpu *vcpu);
++
++	int (*set_guest_debug)(struct kvm_vcpu *vcpu,
++			       struct kvm_debug_guest *dbg);
++	void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
++	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
++	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
++	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
++	void (*get_segment)(struct kvm_vcpu *vcpu,
++			    struct kvm_segment *var, int seg);
++	void (*set_segment)(struct kvm_vcpu *vcpu,
++			    struct kvm_segment *var, int seg);
++	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
++	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
++	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
++	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
++	void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
++	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
++	void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
++	void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
++	void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
++	void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
++	unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
++	void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
++		       int *exception);
++	void (*cache_regs)(struct kvm_vcpu *vcpu);
++	void (*decache_regs)(struct kvm_vcpu *vcpu);
++	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
++	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
++
++	void (*tlb_flush)(struct kvm_vcpu *vcpu);
++
++	void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
++	int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
++	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
++	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
++				unsigned char *hypercall_addr);
++	int (*get_irq)(struct kvm_vcpu *vcpu);
++	void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
++	void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
++				bool has_error_code, u32 error_code);
++	bool (*exception_injected)(struct kvm_vcpu *vcpu);
++	void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
++	void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
++				       struct kvm_run *run);
++
++	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
++};
++
++extern struct kvm_x86_ops *kvm_x86_ops;
++
++int kvm_mmu_module_init(void);
++void kvm_mmu_module_exit(void);
++
++void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
++int kvm_mmu_create(struct kvm_vcpu *vcpu);
++int kvm_mmu_setup(struct kvm_vcpu *vcpu);
++void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
++
++int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
++void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
++void kvm_mmu_zap_all(struct kvm *kvm);
++unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
++void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
++
++enum emulation_result {
++	EMULATE_DONE,       /* no further processing */
++	EMULATE_DO_MMIO,      /* kvm_run filled with mmio request */
++	EMULATE_FAIL,         /* can't emulate this instruction */
++};
++
++#define EMULTYPE_NO_DECODE	    (1 << 0)
++#define EMULTYPE_TRAP_UD	    (1 << 1)
++int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
++			unsigned long cr2, u16 error_code, int emulation_type);
++void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
++void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
++void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
++void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
++		   unsigned long *rflags);
++
++unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
++void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
++		     unsigned long *rflags);
++int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
++int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
++
++struct x86_emulate_ctxt;
++
++int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
++		     int size, unsigned port);
++int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
++			   int size, unsigned long count, int down,
++			    gva_t address, int rep, unsigned port);
++void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
++int kvm_emulate_halt(struct kvm_vcpu *vcpu);
++int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
++int emulate_clts(struct kvm_vcpu *vcpu);
++int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
++		    unsigned long *dest);
++int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
++		    unsigned long value);
++
++void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
++void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
++void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
++void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
++unsigned long get_cr8(struct kvm_vcpu *vcpu);
++void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
++void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
++
++int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
++int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
++
++void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
++void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
++void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
++			   u32 error_code);
++
++void fx_init(struct kvm_vcpu *vcpu);
++
++int emulator_read_std(unsigned long addr,
++		      void *val,
++		      unsigned int bytes,
++		      struct kvm_vcpu *vcpu);
++int emulator_write_emulated(unsigned long addr,
++			    const void *val,
++			    unsigned int bytes,
++			    struct kvm_vcpu *vcpu);
++
++unsigned long segment_base(u16 selector);
++
++void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
++void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
++		       const u8 *new, int bytes);
++int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
++void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
++int kvm_mmu_load(struct kvm_vcpu *vcpu);
++void kvm_mmu_unload(struct kvm_vcpu *vcpu);
++
++int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
++
++int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
++
++int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
++
++int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
++int complete_pio(struct kvm_vcpu *vcpu);
++
++static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
++{
++	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
++
++	return (struct kvm_mmu_page *)page_private(page);
++}
++
++static inline u16 read_fs(void)
++{
++	u16 seg;
++	asm("mov %%fs, %0" : "=g"(seg));
++	return seg;
++}
++
++static inline u16 read_gs(void)
++{
++	u16 seg;
++	asm("mov %%gs, %0" : "=g"(seg));
++	return seg;
++}
++
++static inline u16 read_ldt(void)
++{
++	u16 ldt;
++	asm("sldt %0" : "=g"(ldt));
++	return ldt;
++}
++
++static inline void load_fs(u16 sel)
++{
++	asm("mov %0, %%fs" : : "rm"(sel));
++}
++
++static inline void load_gs(u16 sel)
++{
++	asm("mov %0, %%gs" : : "rm"(sel));
++}
++
++#ifndef load_ldt
++static inline void load_ldt(u16 sel)
++{
++	asm("lldt %0" : : "rm"(sel));
++}
++#endif
++
++static inline void get_idt(struct descriptor_table *table)
++{
++	asm("sidt %0" : "=m"(*table));
++}
++
++static inline void get_gdt(struct descriptor_table *table)
++{
++	asm("sgdt %0" : "=m"(*table));
++}
++
++static inline unsigned long read_tr_base(void)
++{
++	u16 tr;
++	asm("str %0" : "=g"(tr));
++	return segment_base(tr);
++}
++
++#ifdef CONFIG_X86_64
++static inline unsigned long read_msr(unsigned long msr)
++{
++	u64 value;
++
++	rdmsrl(msr, value);
++	return value;
++}
++#endif
++
++static inline void fx_save(struct i387_fxsave_struct *image)
++{
++	asm("fxsave (%0)":: "r" (image));
++}
++
++static inline void fx_restore(struct i387_fxsave_struct *image)
++{
++	asm("fxrstor (%0)":: "r" (image));
++}
++
++static inline void fpu_init(void)
++{
++	asm("finit");
++}
++
++static inline u32 get_rdx_init_val(void)
++{
++	return 0x600; /* P6 family */
++}
++
++static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
++{
++	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
++}
++
++#define ASM_VMX_VMCLEAR_RAX       ".byte 0x66, 0x0f, 0xc7, 0x30"
++#define ASM_VMX_VMLAUNCH          ".byte 0x0f, 0x01, 0xc2"
++#define ASM_VMX_VMRESUME          ".byte 0x0f, 0x01, 0xc3"
++#define ASM_VMX_VMPTRLD_RAX       ".byte 0x0f, 0xc7, 0x30"
++#define ASM_VMX_VMREAD_RDX_RAX    ".byte 0x0f, 0x78, 0xd0"
++#define ASM_VMX_VMWRITE_RAX_RDX   ".byte 0x0f, 0x79, 0xd0"
++#define ASM_VMX_VMWRITE_RSP_RDX   ".byte 0x0f, 0x79, 0xd4"
++#define ASM_VMX_VMXOFF            ".byte 0x0f, 0x01, 0xc4"
++#define ASM_VMX_VMXON_RAX         ".byte 0xf3, 0x0f, 0xc7, 0x30"
++
++#define MSR_IA32_TIME_STAMP_COUNTER		0x010
++
++#define TSS_IOPB_BASE_OFFSET 0x66
++#define TSS_BASE_SIZE 0x68
++#define TSS_IOPB_SIZE (65536 / 8)
++#define TSS_REDIRECTION_SIZE (256 / 8)
++#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
++
++#endif
+diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
+new file mode 100644
+index 0000000..c6f3fd8
+--- /dev/null
++++ b/include/asm-x86/kvm_para.h
+@@ -0,0 +1,105 @@
++#ifndef __X86_KVM_PARA_H
++#define __X86_KVM_PARA_H
++
++/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx.  It
++ * should be used to determine that a VM is running under KVM.
++ */
++#define KVM_CPUID_SIGNATURE	0x40000000
++
++/* This CPUID returns a feature bitmap in eax.  Before enabling a particular
++ * paravirtualization, the appropriate feature bit should be checked.
++ */
++#define KVM_CPUID_FEATURES	0x40000001
++
++#ifdef __KERNEL__
++#include <asm/processor.h>
++
++/* This instruction is vmcall.  On non-VT architectures, it will generate a
++ * trap that we will then rewrite to the appropriate instruction.
++ */
++#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
++
++/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
++ * instruction.  The hypervisor may replace it with something else but only the
++ * instructions are guaranteed to be supported.
++ *
++ * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
++ * The hypercall number should be placed in rax and the return value will be
++ * placed in rax.  No other registers will be clobbered unless explicited
++ * noted by the particular hypercall.
++ */
++
++static inline long kvm_hypercall0(unsigned int nr)
++{
++	long ret;
++	asm volatile(KVM_HYPERCALL
++		     : "=a"(ret)
++		     : "a"(nr));
++	return ret;
++}
++
++static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
++{
++	long ret;
++	asm volatile(KVM_HYPERCALL
++		     : "=a"(ret)
++		     : "a"(nr), "b"(p1));
++	return ret;
++}
++
++static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
++				  unsigned long p2)
++{
++	long ret;
++	asm volatile(KVM_HYPERCALL
++		     : "=a"(ret)
++		     : "a"(nr), "b"(p1), "c"(p2));
++	return ret;
++}
++
++static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
++				  unsigned long p2, unsigned long p3)
++{
++	long ret;
++	asm volatile(KVM_HYPERCALL
++		     : "=a"(ret)
++		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3));
++	return ret;
++}
++
++static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
++				  unsigned long p2, unsigned long p3,
++				  unsigned long p4)
++{
++	long ret;
++	asm volatile(KVM_HYPERCALL
++		     : "=a"(ret)
++		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4));
++	return ret;
++}
++
++static inline int kvm_para_available(void)
++{
++	unsigned int eax, ebx, ecx, edx;
++	char signature[13];
++
++	cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
++	memcpy(signature + 0, &ebx, 4);
++	memcpy(signature + 4, &ecx, 4);
++	memcpy(signature + 8, &edx, 4);
++	signature[12] = 0;
++
++	if (strcmp(signature, "KVMKVMKVM") == 0)
++		return 1;
++
++	return 0;
++}
++
++static inline unsigned int kvm_arch_para_features(void)
++{
++	return cpuid_eax(KVM_CPUID_FEATURES);
++}
++
++#endif
++
++#endif
+diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
+new file mode 100644
+index 0000000..7db91b9
+--- /dev/null
++++ b/include/asm-x86/kvm_x86_emulate.h
+@@ -0,0 +1,186 @@
++/******************************************************************************
++ * x86_emulate.h
++ *
++ * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
++ *
++ * Copyright (c) 2005 Keir Fraser
++ *
++ * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
++ */
++
++#ifndef __X86_EMULATE_H__
++#define __X86_EMULATE_H__
++
++struct x86_emulate_ctxt;
++
++/*
++ * x86_emulate_ops:
++ *
++ * These operations represent the instruction emulator's interface to memory.
++ * There are two categories of operation: those that act on ordinary memory
++ * regions (*_std), and those that act on memory regions known to require
++ * special treatment or emulation (*_emulated).
++ *
++ * The emulator assumes that an instruction accesses only one 'emulated memory'
++ * location, that this location is the given linear faulting address (cr2), and
++ * that this is one of the instruction's data operands. Instruction fetches and
++ * stack operations are assumed never to access emulated memory. The emulator
++ * automatically deduces which operand of a string-move operation is accessing
++ * emulated memory, and assumes that the other operand accesses normal memory.
++ *
++ * NOTES:
++ *  1. The emulator isn't very smart about emulated vs. standard memory.
++ *     'Emulated memory' access addresses should be checked for sanity.
++ *     'Normal memory' accesses may fault, and the caller must arrange to
++ *     detect and handle reentrancy into the emulator via recursive faults.
++ *     Accesses may be unaligned and may cross page boundaries.
++ *  2. If the access fails (cannot emulate, or a standard access faults) then
++ *     it is up to the memop to propagate the fault to the guest VM via
++ *     some out-of-band mechanism, unknown to the emulator. The memop signals
++ *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
++ *     then immediately bail.
++ *  3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
++ *     cmpxchg8b_emulated need support 8-byte accesses.
++ *  4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
++ */
++/* Access completed successfully: continue emulation as normal. */
++#define X86EMUL_CONTINUE        0
++/* Access is unhandleable: bail from emulation and return error to caller. */
++#define X86EMUL_UNHANDLEABLE    1
++/* Terminate emulation but return success to the caller. */
++#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
++#define X86EMUL_RETRY_INSTR     2 /* retry the instruction for some reason */
++#define X86EMUL_CMPXCHG_FAILED  2 /* cmpxchg did not see expected value */
++struct x86_emulate_ops {
++	/*
++	 * read_std: Read bytes of standard (non-emulated/special) memory.
++	 *           Used for instruction fetch, stack operations, and others.
++	 *  @addr:  [IN ] Linear address from which to read.
++	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
++	 *  @bytes: [IN ] Number of bytes to read from memory.
++	 */
++	int (*read_std)(unsigned long addr, void *val,
++			unsigned int bytes, struct kvm_vcpu *vcpu);
++
++	/*
++	 * read_emulated: Read bytes from emulated/special memory area.
++	 *  @addr:  [IN ] Linear address from which to read.
++	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
++	 *  @bytes: [IN ] Number of bytes to read from memory.
++	 */
++	int (*read_emulated) (unsigned long addr,
++			      void *val,
++			      unsigned int bytes,
++			      struct kvm_vcpu *vcpu);
++
++	/*
++	 * write_emulated: Read bytes from emulated/special memory area.
++	 *  @addr:  [IN ] Linear address to which to write.
++	 *  @val:   [IN ] Value to write to memory (low-order bytes used as
++	 *                required).
++	 *  @bytes: [IN ] Number of bytes to write to memory.
++	 */
++	int (*write_emulated) (unsigned long addr,
++			       const void *val,
++			       unsigned int bytes,
++			       struct kvm_vcpu *vcpu);
++
++	/*
++	 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
++	 *                   emulated/special memory area.
++	 *  @addr:  [IN ] Linear address to access.
++	 *  @old:   [IN ] Value expected to be current at @addr.
++	 *  @new:   [IN ] Value to write to @addr.
++	 *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
++	 */
++	int (*cmpxchg_emulated) (unsigned long addr,
++				 const void *old,
++				 const void *new,
++				 unsigned int bytes,
++				 struct kvm_vcpu *vcpu);
++
++};
++
++/* Type, address-of, and value of an instruction's operand. */
++struct operand {
++	enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
++	unsigned int bytes;
++	unsigned long val, orig_val, *ptr;
++};
++
++struct fetch_cache {
++	u8 data[15];
++	unsigned long start;
++	unsigned long end;
++};
++
++struct decode_cache {
++	u8 twobyte;
++	u8 b;
++	u8 lock_prefix;
++	u8 rep_prefix;
++	u8 op_bytes;
++	u8 ad_bytes;
++	u8 rex_prefix;
++	struct operand src;
++	struct operand dst;
++	unsigned long *override_base;
++	unsigned int d;
++	unsigned long regs[NR_VCPU_REGS];
++	unsigned long eip;
++	/* modrm */
++	u8 modrm;
++	u8 modrm_mod;
++	u8 modrm_reg;
++	u8 modrm_rm;
++	u8 use_modrm_ea;
++	unsigned long modrm_ea;
++	unsigned long modrm_val;
++	struct fetch_cache fetch;
++};
++
++struct x86_emulate_ctxt {
++	/* Register state before/after emulation. */
++	struct kvm_vcpu *vcpu;
++
++	/* Linear faulting address (if emulating a page-faulting instruction). */
++	unsigned long eflags;
++
++	/* Emulated execution mode, represented by an X86EMUL_MODE value. */
++	int mode;
++
++	unsigned long cs_base;
++	unsigned long ds_base;
++	unsigned long es_base;
++	unsigned long ss_base;
++	unsigned long gs_base;
++	unsigned long fs_base;
++
++	/* decode cache */
++
++	struct decode_cache decode;
++};
++
++/* Repeat String Operation Prefix */
++#define REPE_PREFIX  1
++#define REPNE_PREFIX    2
++
++/* Execution mode, passed to the emulator. */
++#define X86EMUL_MODE_REAL     0	/* Real mode.             */
++#define X86EMUL_MODE_PROT16   2	/* 16-bit protected mode. */
++#define X86EMUL_MODE_PROT32   4	/* 32-bit protected mode. */
++#define X86EMUL_MODE_PROT64   8	/* 64-bit (long) mode.    */
++
++/* Host execution mode. */
++#if defined(__i386__)
++#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
++#elif defined(CONFIG_X86_64)
++#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
++#endif
++
++int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
++		    struct x86_emulate_ops *ops);
++int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
++		     struct x86_emulate_ops *ops);
++
++#endif				/* __X86_EMULATE_H__ */
 diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
-index ccd3384..1c8367a 100644
+index ccd3384..4d9367b 100644
 --- a/include/asm-x86/lguest.h
 +++ b/include/asm-x86/lguest.h
-@@ -44,14 +44,14 @@ struct lguest_ro_state
+@@ -44,19 +44,19 @@ struct lguest_ro_state
  {
  	/* Host information we need to restore when we switch back. */
  	u32 host_cr3;
@@ -626700,6 +784145,12 @@
  	struct desc_struct guest_idt[IDT_ENTRIES];
  	struct desc_struct guest_gdt[GDT_ENTRIES];
  };
+ 
+-struct lguest_arch
++struct lg_cpu_arch
+ {
+ 	/* The GDT entries copied into lguest_ro_state when running. */
+ 	struct desc_struct gdt[GDT_ENTRIES];
 @@ -78,8 +78,8 @@ static inline void lguest_set_ts(void)
  }
  
@@ -626711,6 +784162,30 @@
  
  #endif /* __ASSEMBLY__ */
  
+diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
+index 2091779..758b9a5 100644
+--- a/include/asm-x86/lguest_hcall.h
++++ b/include/asm-x86/lguest_hcall.h
+@@ -4,7 +4,7 @@
+ 
+ #define LHCALL_FLUSH_ASYNC	0
+ #define LHCALL_LGUEST_INIT	1
+-#define LHCALL_CRASH		2
++#define LHCALL_SHUTDOWN		2
+ #define LHCALL_LOAD_GDT		3
+ #define LHCALL_NEW_PGTABLE	4
+ #define LHCALL_FLUSH_TLB	5
+@@ -20,6 +20,10 @@
+ 
+ #define LGUEST_TRAP_ENTRY 0x1F
+ 
++/* Argument number 3 to LHCALL_LGUEST_SHUTDOWN */
++#define LGUEST_SHUTDOWN_POWEROFF	1
++#define LGUEST_SHUTDOWN_RESTART		2
++
+ #ifndef __ASSEMBLY__
+ #include <asm/hw_irq.h>
+ 
 diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
 index 94b257f..31739c7 100644
 --- a/include/asm-x86/linkage.h
@@ -639203,7 +796678,7 @@
  /*
   * Thread-synchronous status.
 diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
-index beae2bf..9b531ea 100644
+index beae2bf..6c9b214 100644
 --- a/include/asm-x86/thread_info_64.h
 +++ b/include/asm-x86/thread_info_64.h
 @@ -21,7 +21,7 @@
@@ -639265,8 +796740,8 @@
  #define TIF_FREEZE		23	/* is freezing for suspend */
 +#define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
 +#define TIF_DEBUGCTLMSR		25	/* uses thread_struct.debugctlmsr */
-+#define TIF_DS_AREA_MSR	25      /* uses thread_struct.ds_area_msr */
-+#define TIF_BTS_TRACE_TS	26      /* record scheduling event timestamps */
++#define TIF_DS_AREA_MSR		26      /* uses thread_struct.ds_area_msr */
++#define TIF_BTS_TRACE_TS	27      /* record scheduling event timestamps */
  
  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
  #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
@@ -641335,7 +798810,7 @@
 +#endif	/* _CRYPTO_SKCIPHER_H */
 +
 diff --git a/include/linux/Kbuild b/include/linux/Kbuild
-index f30fa92..27b9350 100644
+index f30fa92..85b2482 100644
 --- a/include/linux/Kbuild
 +++ b/include/linux/Kbuild
 @@ -1,4 +1,5 @@
@@ -641376,7 +798851,15 @@
  header-y += i8k.h
  header-y += if_arcnet.h
  header-y += if_bonding.h
-@@ -157,7 +159,6 @@ header-y += veth.h
+@@ -98,7 +100,6 @@ header-y += iso_fs.h
+ header-y += ixjuser.h
+ header-y += jffs2.h
+ header-y += keyctl.h
+-header-y += kvm.h
+ header-y += limits.h
+ header-y += lock_dlm_plock.h
+ header-y += magic.h
+@@ -157,7 +158,6 @@ header-y += veth.h
  header-y += video_decoder.h
  header-y += video_encoder.h
  header-y += videotext.h
@@ -641384,7 +798867,7 @@
  header-y += x25.h
  
  unifdef-y += acct.h
-@@ -172,6 +173,7 @@ unifdef-y += atm.h
+@@ -172,6 +172,7 @@ unifdef-y += atm.h
  unifdef-y += atm_tcp.h
  unifdef-y += audit.h
  unifdef-y += auto_fs.h
@@ -641392,7 +798875,7 @@
  unifdef-y += binfmts.h
  unifdef-y += capability.h
  unifdef-y += capi.h
-@@ -213,7 +215,7 @@ unifdef-y += hdreg.h
+@@ -213,7 +214,7 @@ unifdef-y += hdreg.h
  unifdef-y += hiddev.h
  unifdef-y += hpet.h
  unifdef-y += i2c.h
@@ -641401,7 +798884,7 @@
  unifdef-y += icmp.h
  unifdef-y += icmpv6.h
  unifdef-y += if_addr.h
-@@ -228,7 +230,6 @@ unifdef-y += if_ltalk.h
+@@ -228,7 +229,6 @@ unifdef-y += if_ltalk.h
  unifdef-y += if_link.h
  unifdef-y += if_pppol2tp.h
  unifdef-y += if_pppox.h
@@ -641409,6 +798892,14 @@
  unifdef-y += if_tr.h
  unifdef-y += if_tun.h
  unifdef-y += if_vlan.h
+@@ -255,6 +255,7 @@ unifdef-y += kd.h
+ unifdef-y += kernelcapi.h
+ unifdef-y += kernel.h
+ unifdef-y += keyboard.h
++unifdef-$(CONFIG_HAVE_KVM) += kvm.h
+ unifdef-y += llc.h
+ unifdef-y += loop.h
+ unifdef-y += lp.h
 @@ -348,6 +349,7 @@ unifdef-y += videodev.h
  unifdef-y += virtio_config.h
  unifdef-y += virtio_blk.h
@@ -641744,6 +799235,19 @@
  	struct class_device_attribute **attrs;
  	int (*match)(struct attribute_container *, struct device *);
  #define	ATTRIBUTE_CONTAINER_NO_CLASSDEVS	0x01
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index c687816..bdd6f5d 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -115,6 +115,8 @@
+ #define AUDIT_MAC_IPSEC_ADDSPD	1413	/* Not used */
+ #define AUDIT_MAC_IPSEC_DELSPD	1414	/* Not used */
+ #define AUDIT_MAC_IPSEC_EVENT	1415	/* Audit an IPSec event */
++#define AUDIT_MAC_UNLBL_STCADD	1416	/* NetLabel: add a static label */
++#define AUDIT_MAC_UNLBL_STCDEL	1417	/* NetLabel: del a static label */
+ 
+ #define AUDIT_FIRST_KERN_ANOM_MSG   1700
+ #define AUDIT_LAST_KERN_ANOM_MSG    1799
 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
 index d18ee67..e18d419 100644
 --- a/include/linux/blkdev.h
@@ -643102,10 +800606,10 @@
  {
  }
 diff --git a/include/linux/device.h b/include/linux/device.h
-index 2e15822..1880208 100644
+index 2e15822..db375be 100644
 --- a/include/linux/device.h
 +++ b/include/linux/device.h
-@@ -25,75 +25,69 @@
+@@ -25,75 +25,72 @@
  #include <asm/device.h>
  
  #define DEVICE_NAME_SIZE	50
@@ -643204,6 +800708,9 @@
 +struct device *bus_find_device(struct bus_type *bus, struct device *start,
 +			       void *data,
 +			       int (*match)(struct device *dev, void *data));
++struct device *bus_find_device_by_name(struct bus_type *bus,
++				       struct device *start,
++				       const char *name);
  
  int __must_check bus_for_each_drv(struct bus_type *bus,
 -		struct device_driver *start, void *data,
@@ -643213,7 +800720,7 @@
  
  /*
   * Bus notifiers: Get notified of addition/removal of devices
-@@ -118,111 +112,128 @@ extern int bus_unregister_notifier(struct bus_type *bus,
+@@ -118,111 +115,128 @@ extern int bus_unregister_notifier(struct bus_type *bus,
  #define BUS_NOTIFY_UNBIND_DRIVER	0x00000004 /* driver about to be
  						      unbound */
  
@@ -643403,7 +800910,7 @@
  
  extern int __must_check class_device_create_file(struct class_device *,
  				    const struct class_device_attribute *);
-@@ -255,26 +266,24 @@ struct class_device {
+@@ -255,26 +269,24 @@ struct class_device {
  	struct list_head	node;
  
  	struct kobject		kobj;
@@ -643442,7 +800949,7 @@
  {
  	dev->class_data = data;
  }
-@@ -286,10 +295,10 @@ extern void class_device_initialize(struct class_device *);
+@@ -286,10 +298,10 @@ extern void class_device_initialize(struct class_device *);
  extern int __must_check class_device_add(struct class_device *);
  extern void class_device_del(struct class_device *);
  
@@ -643455,7 +800962,7 @@
  				     const struct class_device_attribute *);
  extern int __must_check class_device_create_bin_file(struct class_device *,
  					struct bin_attribute *);
-@@ -316,7 +325,7 @@ extern struct class_device *class_device_create(struct class *cls,
+@@ -316,7 +328,7 @@ extern struct class_device *class_device_create(struct class *cls,
  						dev_t devt,
  						struct device *device,
  						const char *fmt, ...)
@@ -643464,7 +800971,7 @@
  extern void class_device_destroy(struct class *cls, dev_t devt);
  
  /*
-@@ -333,8 +342,8 @@ struct device_type {
+@@ -333,8 +345,8 @@ struct device_type {
  	struct attribute_group **groups;
  	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  	void (*release)(struct device *dev);
@@ -643475,7 +800982,7 @@
  };
  
  /* interface for exporting device attributes */
-@@ -346,18 +355,19 @@ struct device_attribute {
+@@ -346,18 +358,19 @@ struct device_attribute {
  			 const char *buf, size_t count);
  };
  
@@ -643500,7 +801007,7 @@
  
  /* This is a macro to avoid include problems with THIS_MODULE */
  #define device_schedule_callback(dev, func)			\
-@@ -368,21 +378,21 @@ typedef void (*dr_release_t)(struct device *dev, void *res);
+@@ -368,21 +381,21 @@ typedef void (*dr_release_t)(struct device *dev, void *res);
  typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
  
  #ifdef CONFIG_DEBUG_DEVRES
@@ -643529,7 +801036,7 @@
  extern int devres_destroy(struct device *dev, dr_release_t release,
  			  dr_match_t match, void *match_data);
  
-@@ -399,7 +409,7 @@ extern void devm_kfree(struct device *dev, void *p);
+@@ -399,7 +412,7 @@ extern void devm_kfree(struct device *dev, void *p);
  
  struct device {
  	struct klist		klist_children;
@@ -643538,7 +801045,7 @@
  	struct klist_node	knode_driver;
  	struct klist_node	knode_bus;
  	struct device		*parent;
-@@ -414,7 +424,7 @@ struct device {
+@@ -414,7 +427,7 @@ struct device {
  					 * its driver.
  					 */
  
@@ -643547,7 +801054,7 @@
  	struct device_driver *driver;	/* which driver has allocated this
  					   device */
  	void		*driver_data;	/* data private to the driver */
-@@ -445,10 +455,10 @@ struct device {
+@@ -445,10 +458,10 @@ struct device {
  	/* class_device migration path */
  	struct list_head	node;
  	struct class		*class;
@@ -643560,7 +801067,7 @@
  };
  
  #ifdef CONFIG_NUMA
-@@ -470,14 +480,12 @@ static inline void set_dev_node(struct device *dev, int node)
+@@ -470,14 +483,12 @@ static inline void set_dev_node(struct device *dev, int node)
  }
  #endif
  
@@ -643577,7 +801084,7 @@
  {
  	dev->driver_data = data;
  }
-@@ -492,15 +500,15 @@ void driver_init(void);
+@@ -492,15 +503,15 @@ void driver_init(void);
  /*
   * High level routines for use by the bus drivers
   */
@@ -643602,7 +801109,7 @@
  extern int device_rename(struct device *dev, char *new_name);
  extern int device_move(struct device *dev, struct device *new_parent);
  
-@@ -509,8 +517,8 @@ extern int device_move(struct device *dev, struct device *new_parent);
+@@ -509,8 +520,8 @@ extern int device_move(struct device *dev, struct device *new_parent);
   * for information on use.
   */
  extern int __must_check device_bind_driver(struct device *dev);
@@ -643613,7 +801120,7 @@
  extern int __must_check driver_attach(struct device_driver *drv);
  extern int __must_check device_reprobe(struct device *dev);
  
-@@ -519,8 +527,16 @@ extern int __must_check device_reprobe(struct device *dev);
+@@ -519,8 +530,16 @@ extern int __must_check device_reprobe(struct device *dev);
   */
  extern struct device *device_create(struct class *cls, struct device *parent,
  				    dev_t devt, const char *fmt, ...)
@@ -643631,7 +801138,7 @@
  
  /*
   * Platform "fixup" functions - allow the platform to have their say
-@@ -528,17 +544,17 @@ extern void device_destroy(struct class *cls, dev_t devt);
+@@ -528,17 +547,17 @@ extern void device_destroy(struct class *cls, dev_t devt);
   * know about.
   */
  /* Notify platform of device discovery */
@@ -643653,7 +801160,7 @@
  
  
  /* drivers/base/power/shutdown.c */
-@@ -547,22 +563,33 @@ extern void device_shutdown(void);
+@@ -547,22 +566,33 @@ extern void device_shutdown(void);
  /* drivers/base/sys.c */
  extern void sysdev_shutdown(void);
  
@@ -643694,7 +801201,7 @@
  {
  	return 0;
  }
-@@ -572,21 +599,12 @@ dev_dbg(struct device * dev, const char * fmt, ...)
+@@ -572,21 +602,12 @@ dev_dbg(struct device * dev, const char * fmt, ...)
  #define dev_vdbg	dev_dbg
  #else
  static inline int __attribute__ ((format (printf, 2, 3)))
@@ -648119,6 +805626,768 @@
  void kref_init(struct kref *kref);
  void kref_get(struct kref *kref);
  int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+diff --git a/include/linux/kvm.h b/include/linux/kvm.h
+index 057a7f3..4de4fd2 100644
+--- a/include/linux/kvm.h
++++ b/include/linux/kvm.h
+@@ -9,12 +9,10 @@
+ 
+ #include <asm/types.h>
+ #include <linux/ioctl.h>
++#include <asm/kvm.h>
+ 
+ #define KVM_API_VERSION 12
+ 
+-/* Architectural interrupt line count. */
+-#define KVM_NR_INTERRUPTS 256
+-
+ /* for KVM_CREATE_MEMORY_REGION */
+ struct kvm_memory_region {
+ 	__u32 slot;
+@@ -23,17 +21,19 @@ struct kvm_memory_region {
+ 	__u64 memory_size; /* bytes */
+ };
+ 
+-/* for kvm_memory_region::flags */
+-#define KVM_MEM_LOG_DIRTY_PAGES  1UL
+-
+-struct kvm_memory_alias {
+-	__u32 slot;  /* this has a different namespace than memory slots */
++/* for KVM_SET_USER_MEMORY_REGION */
++struct kvm_userspace_memory_region {
++	__u32 slot;
+ 	__u32 flags;
+ 	__u64 guest_phys_addr;
+-	__u64 memory_size;
+-	__u64 target_phys_addr;
++	__u64 memory_size; /* bytes */
++	__u64 userspace_addr; /* start of the userspace allocated memory */
+ };
+ 
++/* for kvm_memory_region::flags */
++#define KVM_MEM_LOG_DIRTY_PAGES  1UL
++
++
+ /* for KVM_IRQ_LINE */
+ struct kvm_irq_level {
+ 	/*
+@@ -45,62 +45,18 @@ struct kvm_irq_level {
+ 	__u32 level;
+ };
+ 
+-/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
+-struct kvm_pic_state {
+-	__u8 last_irr;	/* edge detection */
+-	__u8 irr;		/* interrupt request register */
+-	__u8 imr;		/* interrupt mask register */
+-	__u8 isr;		/* interrupt service register */
+-	__u8 priority_add;	/* highest irq priority */
+-	__u8 irq_base;
+-	__u8 read_reg_select;
+-	__u8 poll;
+-	__u8 special_mask;
+-	__u8 init_state;
+-	__u8 auto_eoi;
+-	__u8 rotate_on_auto_eoi;
+-	__u8 special_fully_nested_mode;
+-	__u8 init4;		/* true if 4 byte init */
+-	__u8 elcr;		/* PIIX edge/trigger selection */
+-	__u8 elcr_mask;
+-};
+-
+-#define KVM_IOAPIC_NUM_PINS  24
+-struct kvm_ioapic_state {
+-	__u64 base_address;
+-	__u32 ioregsel;
+-	__u32 id;
+-	__u32 irr;
+-	__u32 pad;
+-	union {
+-		__u64 bits;
+-		struct {
+-			__u8 vector;
+-			__u8 delivery_mode:3;
+-			__u8 dest_mode:1;
+-			__u8 delivery_status:1;
+-			__u8 polarity:1;
+-			__u8 remote_irr:1;
+-			__u8 trig_mode:1;
+-			__u8 mask:1;
+-			__u8 reserve:7;
+-			__u8 reserved[4];
+-			__u8 dest_id;
+-		} fields;
+-	} redirtbl[KVM_IOAPIC_NUM_PINS];
+-};
+-
+-#define KVM_IRQCHIP_PIC_MASTER   0
+-#define KVM_IRQCHIP_PIC_SLAVE    1
+-#define KVM_IRQCHIP_IOAPIC       2
+ 
+ struct kvm_irqchip {
+ 	__u32 chip_id;
+ 	__u32 pad;
+         union {
+ 		char dummy[512];  /* reserving space */
++#ifdef CONFIG_X86
+ 		struct kvm_pic_state pic;
++#endif
++#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ 		struct kvm_ioapic_state ioapic;
++#endif
+ 	} chip;
+ };
+ 
+@@ -116,6 +72,7 @@ struct kvm_irqchip {
+ #define KVM_EXIT_FAIL_ENTRY       9
+ #define KVM_EXIT_INTR             10
+ #define KVM_EXIT_SET_TPR          11
++#define KVM_EXIT_TPR_ACCESS       12
+ 
+ /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
+ struct kvm_run {
+@@ -174,90 +131,17 @@ struct kvm_run {
+ 			__u32 longmode;
+ 			__u32 pad;
+ 		} hypercall;
++		/* KVM_EXIT_TPR_ACCESS */
++		struct {
++			__u64 rip;
++			__u32 is_write;
++			__u32 pad;
++		} tpr_access;
+ 		/* Fix the size of the union. */
+ 		char padding[256];
+ 	};
+ };
+ 
+-/* for KVM_GET_REGS and KVM_SET_REGS */
+-struct kvm_regs {
+-	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+-	__u64 rax, rbx, rcx, rdx;
+-	__u64 rsi, rdi, rsp, rbp;
+-	__u64 r8,  r9,  r10, r11;
+-	__u64 r12, r13, r14, r15;
+-	__u64 rip, rflags;
+-};
+-
+-/* for KVM_GET_FPU and KVM_SET_FPU */
+-struct kvm_fpu {
+-	__u8  fpr[8][16];
+-	__u16 fcw;
+-	__u16 fsw;
+-	__u8  ftwx;  /* in fxsave format */
+-	__u8  pad1;
+-	__u16 last_opcode;
+-	__u64 last_ip;
+-	__u64 last_dp;
+-	__u8  xmm[16][16];
+-	__u32 mxcsr;
+-	__u32 pad2;
+-};
+-
+-/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
+-#define KVM_APIC_REG_SIZE 0x400
+-struct kvm_lapic_state {
+-	char regs[KVM_APIC_REG_SIZE];
+-};
+-
+-struct kvm_segment {
+-	__u64 base;
+-	__u32 limit;
+-	__u16 selector;
+-	__u8  type;
+-	__u8  present, dpl, db, s, l, g, avl;
+-	__u8  unusable;
+-	__u8  padding;
+-};
+-
+-struct kvm_dtable {
+-	__u64 base;
+-	__u16 limit;
+-	__u16 padding[3];
+-};
+-
+-/* for KVM_GET_SREGS and KVM_SET_SREGS */
+-struct kvm_sregs {
+-	/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
+-	struct kvm_segment cs, ds, es, fs, gs, ss;
+-	struct kvm_segment tr, ldt;
+-	struct kvm_dtable gdt, idt;
+-	__u64 cr0, cr2, cr3, cr4, cr8;
+-	__u64 efer;
+-	__u64 apic_base;
+-	__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
+-};
+-
+-struct kvm_msr_entry {
+-	__u32 index;
+-	__u32 reserved;
+-	__u64 data;
+-};
+-
+-/* for KVM_GET_MSRS and KVM_SET_MSRS */
+-struct kvm_msrs {
+-	__u32 nmsrs; /* number of msrs in entries */
+-	__u32 pad;
+-
+-	struct kvm_msr_entry entries[0];
+-};
+-
+-/* for KVM_GET_MSR_INDEX_LIST */
+-struct kvm_msr_list {
+-	__u32 nmsrs; /* number of msrs in entries */
+-	__u32 indices[0];
+-};
+-
+ /* for KVM_TRANSLATE */
+ struct kvm_translation {
+ 	/* in */
+@@ -302,28 +186,24 @@ struct kvm_dirty_log {
+ 	};
+ };
+ 
+-struct kvm_cpuid_entry {
+-	__u32 function;
+-	__u32 eax;
+-	__u32 ebx;
+-	__u32 ecx;
+-	__u32 edx;
+-	__u32 padding;
+-};
+-
+-/* for KVM_SET_CPUID */
+-struct kvm_cpuid {
+-	__u32 nent;
+-	__u32 padding;
+-	struct kvm_cpuid_entry entries[0];
+-};
+-
+ /* for KVM_SET_SIGNAL_MASK */
+ struct kvm_signal_mask {
+ 	__u32 len;
+ 	__u8  sigset[0];
+ };
+ 
++/* for KVM_TPR_ACCESS_REPORTING */
++struct kvm_tpr_access_ctl {
++	__u32 enabled;
++	__u32 flags;
++	__u32 reserved[8];
++};
++
++/* for KVM_SET_VAPIC_ADDR */
++struct kvm_vapic_addr {
++	__u64 vapic_addr;
++};
++
+ #define KVMIO 0xAE
+ 
+ /*
+@@ -347,11 +227,21 @@ struct kvm_signal_mask {
+  */
+ #define KVM_CAP_IRQCHIP	  0
+ #define KVM_CAP_HLT	  1
++#define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2
++#define KVM_CAP_USER_MEMORY 3
++#define KVM_CAP_SET_TSS_ADDR 4
++#define KVM_CAP_EXT_CPUID 5
++#define KVM_CAP_VAPIC 6
+ 
+ /*
+  * ioctls for VM fds
+  */
+ #define KVM_SET_MEMORY_REGION     _IOW(KVMIO, 0x40, struct kvm_memory_region)
++#define KVM_SET_NR_MMU_PAGES      _IO(KVMIO, 0x44)
++#define KVM_GET_NR_MMU_PAGES      _IO(KVMIO, 0x45)
++#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\
++					struct kvm_userspace_memory_region)
++#define KVM_SET_TSS_ADDR          _IO(KVMIO, 0x47)
+ /*
+  * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
+  * a vcpu fd.
+@@ -359,6 +249,7 @@ struct kvm_signal_mask {
+ #define KVM_CREATE_VCPU           _IO(KVMIO,  0x41)
+ #define KVM_GET_DIRTY_LOG         _IOW(KVMIO, 0x42, struct kvm_dirty_log)
+ #define KVM_SET_MEMORY_ALIAS      _IOW(KVMIO, 0x43, struct kvm_memory_alias)
++#define KVM_GET_SUPPORTED_CPUID   _IOWR(KVMIO, 0x48, struct kvm_cpuid2)
+ /* Device model IOC */
+ #define KVM_CREATE_IRQCHIP	  _IO(KVMIO,  0x60)
+ #define KVM_IRQ_LINE		  _IOW(KVMIO, 0x61, struct kvm_irq_level)
+@@ -384,5 +275,11 @@ struct kvm_signal_mask {
+ #define KVM_SET_FPU               _IOW(KVMIO,  0x8d, struct kvm_fpu)
+ #define KVM_GET_LAPIC             _IOR(KVMIO,  0x8e, struct kvm_lapic_state)
+ #define KVM_SET_LAPIC             _IOW(KVMIO,  0x8f, struct kvm_lapic_state)
++#define KVM_SET_CPUID2            _IOW(KVMIO,  0x90, struct kvm_cpuid2)
++#define KVM_GET_CPUID2            _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
++/* Available with KVM_CAP_VAPIC */
++#define KVM_TPR_ACCESS_REPORTING  _IOWR(KVMIO,  0x92, struct kvm_tpr_access_ctl)
++/* Available with KVM_CAP_VAPIC */
++#define KVM_SET_VAPIC_ADDR        _IOW(KVMIO,  0x93, struct kvm_vapic_addr)
+ 
+ #endif
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+new file mode 100644
+index 0000000..ea4764b
+--- /dev/null
++++ b/include/linux/kvm_host.h
+@@ -0,0 +1,299 @@
++#ifndef __KVM_HOST_H
++#define __KVM_HOST_H
++
++/*
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
++ */
++
++#include <linux/types.h>
++#include <linux/hardirq.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/spinlock.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/preempt.h>
++#include <asm/signal.h>
++
++#include <linux/kvm.h>
++#include <linux/kvm_para.h>
++
++#include <linux/kvm_types.h>
++
++#include <asm/kvm_host.h>
++
++#define KVM_MAX_VCPUS 4
++#define KVM_MEMORY_SLOTS 8
++/* memory slots that does not exposed to userspace */
++#define KVM_PRIVATE_MEM_SLOTS 4
++
++#define KVM_PIO_PAGE_OFFSET 1
++
++/*
++ * vcpu->requests bit members
++ */
++#define KVM_REQ_TLB_FLUSH          0
++#define KVM_REQ_MIGRATE_TIMER      1
++#define KVM_REQ_REPORT_TPR_ACCESS  2
++
++struct kvm_vcpu;
++extern struct kmem_cache *kvm_vcpu_cache;
++
++struct kvm_guest_debug {
++	int enabled;
++	unsigned long bp[4];
++	int singlestep;
++};
++
++/*
++ * It would be nice to use something smarter than a linear search, TBD...
++ * Thankfully we dont expect many devices to register (famous last words :),
++ * so until then it will suffice.  At least its abstracted so we can change
++ * in one place.
++ */
++struct kvm_io_bus {
++	int                   dev_count;
++#define NR_IOBUS_DEVS 6
++	struct kvm_io_device *devs[NR_IOBUS_DEVS];
++};
++
++void kvm_io_bus_init(struct kvm_io_bus *bus);
++void kvm_io_bus_destroy(struct kvm_io_bus *bus);
++struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
++void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
++			     struct kvm_io_device *dev);
++
++struct kvm_vcpu {
++	struct kvm *kvm;
++	struct preempt_notifier preempt_notifier;
++	int vcpu_id;
++	struct mutex mutex;
++	int   cpu;
++	struct kvm_run *run;
++	int guest_mode;
++	unsigned long requests;
++	struct kvm_guest_debug guest_debug;
++	int fpu_active;
++	int guest_fpu_loaded;
++	wait_queue_head_t wq;
++	int sigset_active;
++	sigset_t sigset;
++	struct kvm_vcpu_stat stat;
++
++#ifdef CONFIG_HAS_IOMEM
++	int mmio_needed;
++	int mmio_read_completed;
++	int mmio_is_write;
++	int mmio_size;
++	unsigned char mmio_data[8];
++	gpa_t mmio_phys_addr;
++#endif
++
++	struct kvm_vcpu_arch arch;
++};
++
++struct kvm_memory_slot {
++	gfn_t base_gfn;
++	unsigned long npages;
++	unsigned long flags;
++	unsigned long *rmap;
++	unsigned long *dirty_bitmap;
++	unsigned long userspace_addr;
++	int user_alloc;
++};
++
++struct kvm {
++	struct mutex lock; /* protects the vcpus array and APIC accesses */
++	spinlock_t mmu_lock;
++	struct mm_struct *mm; /* userspace tied to this vm */
++	int nmemslots;
++	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
++					KVM_PRIVATE_MEM_SLOTS];
++	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
++	struct list_head vm_list;
++	struct file *filp;
++	struct kvm_io_bus mmio_bus;
++	struct kvm_io_bus pio_bus;
++	struct kvm_vm_stat stat;
++	struct kvm_arch arch;
++};
++
++/* The guest did something we don't support. */
++#define pr_unimpl(vcpu, fmt, ...)					\
++ do {									\
++	if (printk_ratelimit())						\
++		printk(KERN_ERR "kvm: %i: cpu%i " fmt,			\
++		       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
++ } while (0)
++
++#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
++#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
++
++int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
++void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
++
++void vcpu_load(struct kvm_vcpu *vcpu);
++void vcpu_put(struct kvm_vcpu *vcpu);
++
++void decache_vcpus_on_cpu(int cpu);
++
++
++int kvm_init(void *opaque, unsigned int vcpu_size,
++		  struct module *module);
++void kvm_exit(void);
++
++#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
++#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
++static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
++struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
++
++extern struct page *bad_page;
++
++int is_error_page(struct page *page);
++int kvm_is_error_hva(unsigned long addr);
++int kvm_set_memory_region(struct kvm *kvm,
++			  struct kvm_userspace_memory_region *mem,
++			  int user_alloc);
++int __kvm_set_memory_region(struct kvm *kvm,
++			    struct kvm_userspace_memory_region *mem,
++			    int user_alloc);
++int kvm_arch_set_memory_region(struct kvm *kvm,
++				struct kvm_userspace_memory_region *mem,
++				struct kvm_memory_slot old,
++				int user_alloc);
++gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
++struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
++void kvm_release_page_clean(struct page *page);
++void kvm_release_page_dirty(struct page *page);
++int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
++			int len);
++int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
++			  unsigned long len);
++int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
++int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
++			 int offset, int len);
++int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
++		    unsigned long len);
++int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
++int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
++struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
++int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
++void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
++
++void kvm_vcpu_block(struct kvm_vcpu *vcpu);
++void kvm_resched(struct kvm_vcpu *vcpu);
++void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
++void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
++void kvm_flush_remote_tlbs(struct kvm *kvm);
++
++long kvm_arch_dev_ioctl(struct file *filp,
++			unsigned int ioctl, unsigned long arg);
++long kvm_arch_vcpu_ioctl(struct file *filp,
++			 unsigned int ioctl, unsigned long arg);
++void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
++void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
++
++int kvm_dev_ioctl_check_extension(long ext);
++
++int kvm_get_dirty_log(struct kvm *kvm,
++			struct kvm_dirty_log *log, int *is_dirty);
++int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
++				struct kvm_dirty_log *log);
++
++int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
++				   struct
++				   kvm_userspace_memory_region *mem,
++				   int user_alloc);
++long kvm_arch_vm_ioctl(struct file *filp,
++		       unsigned int ioctl, unsigned long arg);
++void kvm_arch_destroy_vm(struct kvm *kvm);
++
++int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
++int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
++
++int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
++				    struct kvm_translation *tr);
++
++int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
++int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
++int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
++				  struct kvm_sregs *sregs);
++int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
++				  struct kvm_sregs *sregs);
++int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
++				    struct kvm_debug_guest *dbg);
++int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
++
++int kvm_arch_init(void *opaque);
++void kvm_arch_exit(void);
++
++int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
++void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
++
++void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
++void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
++void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
++struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
++int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
++void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
++
++int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
++void kvm_arch_hardware_enable(void *garbage);
++void kvm_arch_hardware_disable(void *garbage);
++int kvm_arch_hardware_setup(void);
++void kvm_arch_hardware_unsetup(void);
++void kvm_arch_check_processor_compat(void *rtn);
++int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
++
++void kvm_free_physmem(struct kvm *kvm);
++
++struct  kvm *kvm_arch_create_vm(void);
++void kvm_arch_destroy_vm(struct kvm *kvm);
++
++int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
++int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
++void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
++
++static inline void kvm_guest_enter(void)
++{
++	account_system_vtime(current);
++	current->flags |= PF_VCPU;
++}
++
++static inline void kvm_guest_exit(void)
++{
++	account_system_vtime(current);
++	current->flags &= ~PF_VCPU;
++}
++
++static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
++{
++	return slot - kvm->memslots;
++}
++
++static inline gpa_t gfn_to_gpa(gfn_t gfn)
++{
++	return (gpa_t)gfn << PAGE_SHIFT;
++}
++
++static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
++{
++	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
++}
++
++enum kvm_stat_kind {
++	KVM_STAT_VM,
++	KVM_STAT_VCPU,
++};
++
++struct kvm_stats_debugfs_item {
++	const char *name;
++	int offset;
++	enum kvm_stat_kind kind;
++	struct dentry *dentry;
++};
++extern struct kvm_stats_debugfs_item debugfs_entries[];
++
++#endif
+diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
+index 3b29256..5497aac 100644
+--- a/include/linux/kvm_para.h
++++ b/include/linux/kvm_para.h
+@@ -2,72 +2,30 @@
+ #define __LINUX_KVM_PARA_H
+ 
+ /*
+- * Guest OS interface for KVM paravirtualization
+- *
+- * Note: this interface is totally experimental, and is certain to change
+- *       as we make progress.
++ * This header file provides a method for making a hypercall to the host
++ * Architectures should define:
++ * - kvm_hypercall0, kvm_hypercall1...
++ * - kvm_arch_para_features
++ * - kvm_para_available
+  */
+ 
+-/*
+- * Per-VCPU descriptor area shared between guest and host. Writable to
+- * both guest and host. Registered with the host by the guest when
+- * a guest acknowledges paravirtual mode.
+- *
+- * NOTE: all addresses are guest-physical addresses (gpa), to make it
+- * easier for the hypervisor to map between the various addresses.
+- */
+-struct kvm_vcpu_para_state {
+-	/*
+-	 * API version information for compatibility. If there's any support
+-	 * mismatch (too old host trying to execute too new guest) then
+-	 * the host will deny entry into paravirtual mode. Any other
+-	 * combination (new host + old guest and new host + new guest)
+-	 * is supposed to work - new host versions will support all old
+-	 * guest API versions.
+-	 */
+-	u32 guest_version;
+-	u32 host_version;
+-	u32 size;
+-	u32 ret;
+-
+-	/*
+-	 * The address of the vm exit instruction (VMCALL or VMMCALL),
+-	 * which the host will patch according to the CPU model the
+-	 * VM runs on:
+-	 */
+-	u64 hypercall_gpa;
+-
+-} __attribute__ ((aligned(PAGE_SIZE)));
+-
+-#define KVM_PARA_API_VERSION 1
+-
+-/*
+- * This is used for an RDMSR's ECX parameter to probe for a KVM host.
+- * Hopefully no CPU vendor will use up this number. This is placed well
+- * out of way of the typical space occupied by CPU vendors' MSR indices,
+- * and we think (or at least hope) it wont be occupied in the future
+- * either.
+- */
+-#define MSR_KVM_API_MAGIC 0x87655678
++/* Return values for hypercalls */
++#define KVM_ENOSYS		1000
+ 
+-#define KVM_EINVAL 1
++#define KVM_HC_VAPIC_POLL_IRQ            1
+ 
+ /*
+- * Hypercall calling convention:
+- *
+- * Each hypercall may have 0-6 parameters.
+- *
+- * 64-bit hypercall index is in RAX, goes from 0 to __NR_hypercalls-1
+- *
+- * 64-bit parameters 1-6 are in the standard gcc x86_64 calling convention
+- * order: RDI, RSI, RDX, RCX, R8, R9.
+- *
+- * 32-bit index is EBX, parameters are: EAX, ECX, EDX, ESI, EDI, EBP.
+- * (the first 3 are according to the gcc regparm calling convention)
+- *
+- * No registers are clobbered by the hypercall, except that the
+- * return value is in RAX.
++ * hypercalls use architecture specific
+  */
+-#define __NR_hypercalls			0
++#include <asm/kvm_para.h>
++
++#ifdef __KERNEL__
++static inline int kvm_para_has_feature(unsigned int feature)
++{
++	if (kvm_arch_para_features() & (1UL << feature))
++		return 1;
++	return 0;
++}
++#endif /* __KERNEL__ */
++#endif /* __LINUX_KVM_PARA_H */
+ 
+-#endif
+diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
+new file mode 100644
+index 0000000..1c4e46d
+--- /dev/null
++++ b/include/linux/kvm_types.h
+@@ -0,0 +1,54 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
++ *
++ */
++
++#ifndef __KVM_TYPES_H__
++#define __KVM_TYPES_H__
++
++#include <asm/types.h>
++
++/*
++ * Address types:
++ *
++ *  gva - guest virtual address
++ *  gpa - guest physical address
++ *  gfn - guest frame number
++ *  hva - host virtual address
++ *  hpa - host physical address
++ *  hfn - host frame number
++ */
++
++typedef unsigned long  gva_t;
++typedef u64            gpa_t;
++typedef unsigned long  gfn_t;
++
++typedef unsigned long  hva_t;
++typedef u64            hpa_t;
++typedef unsigned long  hfn_t;
++
++struct kvm_pio_request {
++	unsigned long count;
++	int cur_count;
++	struct page *guest_pages[2];
++	unsigned guest_page_offset;
++	int in;
++	int port;
++	int size;
++	int string;
++	int down;
++	int rep;
++};
++
++#endif /* __KVM_TYPES_H__ */
 diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
 new file mode 100644
 index 0000000..901c2d6
@@ -650693,6 +808962,78 @@
  				        * not handling interrupts, soon dead */
  
  /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 5c39b92..b5f33ef 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -17,6 +17,7 @@
+  */
+ #include <linux/types.h>
+ #include <linux/bitops.h>
++#include <linux/mod_devicetable.h>
+ 
+ #include <asm/prom.h>
+ 
+@@ -41,11 +42,20 @@ extern struct device_node *of_find_compatible_node(struct device_node *from,
+ #define for_each_compatible_node(dn, type, compatible) \
+ 	for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
+ 	     dn = of_find_compatible_node(dn, type, compatible))
++extern struct device_node *of_find_matching_node(struct device_node *from,
++	const struct of_device_id *matches);
++#define for_each_matching_node(dn, matches) \
++	for (dn = of_find_matching_node(NULL, matches); dn; \
++	     dn = of_find_matching_node(dn, matches))
+ extern struct device_node *of_find_node_by_path(const char *path);
+ extern struct device_node *of_find_node_by_phandle(phandle handle);
+ extern struct device_node *of_get_parent(const struct device_node *node);
+ extern struct device_node *of_get_next_child(const struct device_node *node,
+ 					     struct device_node *prev);
++#define for_each_child_of_node(parent, child) \
++	for (child = of_get_next_child(parent, NULL); child != NULL; \
++	     child = of_get_next_child(parent, child))
++
+ extern struct property *of_find_property(const struct device_node *np,
+ 					 const char *name,
+ 					 int *lenp);
+@@ -56,5 +66,7 @@ extern const void *of_get_property(const struct device_node *node,
+ 				int *lenp);
+ extern int of_n_addr_cells(struct device_node *np);
+ extern int of_n_size_cells(struct device_node *np);
++extern const struct of_device_id *of_match_node(
++	const struct of_device_id *matches, const struct device_node *node);
+ 
+ #endif /* _LINUX_OF_H */
+diff --git a/include/linux/of_device.h b/include/linux/of_device.h
+index 212bffb..6dc1195 100644
+--- a/include/linux/of_device.h
++++ b/include/linux/of_device.h
+@@ -10,8 +10,6 @@
+ 
+ #define	to_of_device(d) container_of(d, struct of_device, dev)
+ 
+-extern const struct of_device_id *of_match_node(
+-	const struct of_device_id *matches, const struct device_node *node);
+ extern const struct of_device_id *of_match_device(
+ 	const struct of_device_id *matches, const struct of_device *dev);
+ 
+diff --git a/include/linux/pata_platform.h b/include/linux/pata_platform.h
+index 5799e8d..6a7a92d 100644
+--- a/include/linux/pata_platform.h
++++ b/include/linux/pata_platform.h
+@@ -15,4 +15,13 @@ struct pata_platform_info {
+ 	unsigned int irq_flags;
+ };
+ 
++extern int __devinit __pata_platform_probe(struct device *dev,
++					   struct resource *io_res,
++					   struct resource *ctl_res,
++					   struct resource *irq_res,
++					   unsigned int ioport_shift,
++					   int __pio_mask);
++
++extern int __devexit __pata_platform_remove(struct device *dev);
++
+ #endif /* __LINUX_PATA_PLATFORM_H */
 diff --git a/include/linux/pci.h b/include/linux/pci.h
 index 0dd93bb..ae10063 100644
 --- a/include/linux/pci.h
@@ -650848,17 +809189,13 @@
 +
 +#endif /* __LINUX_PCOUNTER_H */
 diff --git a/include/linux/percpu.h b/include/linux/percpu.h
-index 926adaa..00412bb 100644
+index 926adaa..50faa0e 100644
 --- a/include/linux/percpu.h
 +++ b/include/linux/percpu.h
-@@ -9,6 +9,30 @@
+@@ -9,6 +9,26 @@
  
  #include <asm/percpu.h>
  
-+#ifndef PER_CPU_ATTRIBUTES
-+#define PER_CPU_ATTRIBUTES
-+#endif
-+
 +#ifdef CONFIG_SMP
 +#define DEFINE_PER_CPU(type, name)					\
 +	__attribute__((__section__(".data.percpu")))			\
@@ -650882,6 +809219,71 @@
  /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
  #ifndef PERCPU_ENOUGH_ROOM
  #ifdef CONFIG_MODULES
+diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
+index 04ba70d..509d8f5 100644
+--- a/include/linux/phy_fixed.h
++++ b/include/linux/phy_fixed.h
+@@ -1,38 +1,31 @@
+ #ifndef __PHY_FIXED_H
+ #define __PHY_FIXED_H
+ 
+-#define MII_REGS_NUM	29
+-
+-/* max number of virtual phy stuff */
+-#define MAX_PHY_AMNT	10
+-/*
+-    The idea is to emulate normal phy behavior by responding with
+-    pre-defined values to mii BMCR read, so that read_status hook could
+-    take all the needed info.
+-*/
+-
+ struct fixed_phy_status {
+-	u8 link;
+-	u16 speed;
+-	u8 duplex;
++	int link;
++	int speed;
++	int duplex;
++	int pause;
++	int asym_pause;
+ };
+ 
+-/*-----------------------------------------------------------------------------
+- *  Private information hoder for mii_bus
+- *-----------------------------------------------------------------------------*/
+-struct fixed_info {
+-	u16 *regs;
+-	u8 regs_num;
+-	struct fixed_phy_status phy_status;
+-	struct phy_device *phydev;	/* pointer to the container */
+-	/* link & speed cb */
+-	int (*link_update) (struct net_device *, struct fixed_phy_status *);
++#ifdef CONFIG_FIXED_PHY
++extern int fixed_phy_add(unsigned int irq, int phy_id,
++			 struct fixed_phy_status *status);
++#else
++static inline int fixed_phy_add(unsigned int irq, int phy_id,
++				struct fixed_phy_status *status)
++{
++	return -ENODEV;
++}
++#endif /* CONFIG_FIXED_PHY */
+ 
+-};
+-
+-
+-int fixed_mdio_set_link_update(struct phy_device *,
+-       int (*link_update) (struct net_device *, struct fixed_phy_status *));
+-struct fixed_info *fixed_mdio_get_phydev (int phydev_ind);
++/*
++ * This function issued only by fixed_phy-aware drivers, no need
++ * protect it with #ifdef
++ */
++extern int fixed_phy_set_link_update(struct phy_device *phydev,
++			int (*link_update)(struct net_device *,
++					   struct fixed_phy_status *));
+ 
+ #endif /* __PHY_FIXED_H */
 diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
 index 919af93..3276135 100644
 --- a/include/linux/pkt_sched.h
@@ -650921,6 +809323,53 @@
  					struct resource *, unsigned int);
  
  extern struct platform_device *platform_device_alloc(const char *name, int id);
+diff --git a/include/linux/pmu.h b/include/linux/pmu.h
+index b7824c2..4c5f653 100644
+--- a/include/linux/pmu.h
++++ b/include/linux/pmu.h
+@@ -159,41 +159,7 @@ extern void pmu_unlock(void);
+ extern int pmu_present(void);
+ extern int pmu_get_model(void);
+ 
+-#ifdef CONFIG_PM
+-/*
+- * Stuff for putting the powerbook to sleep and waking it again.
+- *
+- */
+-#include <linux/list.h>
+-
+-struct pmu_sleep_notifier
+-{
+-	void (*notifier_call)(struct pmu_sleep_notifier *self, int when);
+-	int priority;
+-	struct list_head list;
+-};
+-
+-/* Code values for calling sleep/wakeup handlers
+- */
+-#define PBOOK_SLEEP_REQUEST	1
+-#define PBOOK_SLEEP_NOW		2
+-#define PBOOK_WAKE		3
+-
+-/* priority levels in notifiers */
+-#define SLEEP_LEVEL_VIDEO	100	/* Video driver (first wake) */
+-#define SLEEP_LEVEL_MEDIABAY	90	/* Media bay driver */
+-#define SLEEP_LEVEL_BLOCK	80	/* IDE, SCSI */
+-#define SLEEP_LEVEL_NET		70	/* bmac, gmac */
+-#define SLEEP_LEVEL_MISC	60	/* Anything else */
+-#define SLEEP_LEVEL_USERLAND	55	/* Reserved for apm_emu */
+-#define SLEEP_LEVEL_ADB		50	/* ADB (async) */
+-#define SLEEP_LEVEL_SOUND	40	/* Sound driver (blocking) */
+-
+-/* special register notifier functions */
+-int pmu_register_sleep_notifier(struct pmu_sleep_notifier* notifier);
+-int pmu_unregister_sleep_notifier(struct pmu_sleep_notifier* notifier);
+-
+-#endif /* CONFIG_PM */
++extern void pmu_backlight_set_sleep(int sleep);
+ 
+ #define PMU_MAX_BATTERIES	2
+ 
 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
 index a531682..8f92546 100644
 --- a/include/linux/proc_fs.h
@@ -652615,6 +811064,76 @@
  static inline void security_release_secctx(char *secdata, u32 seclen)
  {
  }
+diff --git a/include/linux/selinux.h b/include/linux/selinux.h
+index 6080f73..8c2cc4c 100644
+--- a/include/linux/selinux.h
++++ b/include/linux/selinux.h
+@@ -120,16 +120,35 @@ void selinux_get_task_sid(struct task_struct *tsk, u32 *sid);
+ int selinux_string_to_sid(char *str, u32 *sid);
+ 
+ /**
+- *     selinux_relabel_packet_permission - check permission to relabel a packet
+- *     @sid: ID value to be applied to network packet (via SECMARK, most likely)
++ *     selinux_secmark_relabel_packet_permission - secmark permission check
++ *     @sid: SECMARK ID value to be applied to network packet
+  *
+- *     Returns 0 if the current task is allowed to label packets with the
+- *     supplied security ID.  Note that it is implicit that the packet is always
+- *     being relabeled from the default unlabled value, and that the access
+- *     control decision is made in the AVC.
++ *     Returns 0 if the current task is allowed to set the SECMARK label of
++ *     packets with the supplied security ID.  Note that it is implicit that
++ *     the packet is always being relabeled from the default unlabeled value,
++ *     and that the access control decision is made in the AVC.
+  */
+-int selinux_relabel_packet_permission(u32 sid);
++int selinux_secmark_relabel_packet_permission(u32 sid);
+ 
++/**
++ *     selinux_secmark_refcount_inc - increments the secmark use counter
++ *
++ *     SELinux keeps track of the current SECMARK targets in use so it knows
++ *     when to apply SECMARK label access checks to network packets.  This
++ *     function incements this reference count to indicate that a new SECMARK
++ *     target has been configured.
++ */
++void selinux_secmark_refcount_inc(void);
++
++/**
++ *     selinux_secmark_refcount_dec - decrements the secmark use counter
++ *
++ *     SELinux keeps track of the current SECMARK targets in use so it knows
++ *     when to apply SECMARK label access checks to network packets.  This
++ *     function decements this reference count to indicate that one of the
++ *     existing SECMARK targets has been removed/flushed.
++ */
++void selinux_secmark_refcount_dec(void);
+ #else
+ 
+ static inline int selinux_audit_rule_init(u32 field, u32 op,
+@@ -184,11 +203,21 @@ static inline int selinux_string_to_sid(const char *str, u32 *sid)
+        return 0;
+ }
+ 
+-static inline int selinux_relabel_packet_permission(u32 sid)
++static inline int selinux_secmark_relabel_packet_permission(u32 sid)
+ {
+ 	return 0;
+ }
+ 
++static inline void selinux_secmark_refcount_inc(void)
++{
++	return;
++}
++
++static inline void selinux_secmark_refcount_dec(void)
++{
++	return;
++}
++
+ #endif	/* CONFIG_SECURITY_SELINUX */
+ 
+ #endif /* _LINUX_SELINUX_H */
 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
 index ebbc02b..648dfeb 100644
 --- a/include/linux/seq_file.h
@@ -656935,6 +815454,184 @@
 +extern void xt_rateest_put(struct xt_rateest *est);
 +
 +#endif /* _XT_RATEEST_H */
+diff --git a/include/net/netlabel.h b/include/net/netlabel.h
+index 2e5b2f6..b3213c7 100644
+--- a/include/net/netlabel.h
++++ b/include/net/netlabel.h
+@@ -67,7 +67,11 @@
+  * NetLabel NETLINK protocol
+  */
+ 
+-#define NETLBL_PROTO_VERSION            1
++/* NetLabel NETLINK protocol version
++ *  1: initial version
++ *  2: added static labels for unlabeled connections
++ */
++#define NETLBL_PROTO_VERSION            2
+ 
+ /* NetLabel NETLINK types/families */
+ #define NETLBL_NLTYPE_NONE              0
+@@ -105,17 +109,49 @@ struct netlbl_dom_map;
+ /* Domain mapping operations */
+ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
+ 
+-/* LSM security attributes */
++/*
++ * LSM security attributes
++ */
++
++/**
++ * struct netlbl_lsm_cache - NetLabel LSM security attribute cache
++ * @refcount: atomic reference counter
++ * @free: LSM supplied function to free the cache data
++ * @data: LSM supplied cache data
++ *
++ * Description:
++ * This structure is provided for LSMs which wish to make use of the NetLabel
++ * caching mechanism to store LSM specific data/attributes in the NetLabel
++ * cache.  If the LSM has to perform a lot of translation from the NetLabel
++ * security attributes into it's own internal representation then the cache
++ * mechanism can provide a way to eliminate some or all of that translation
++ * overhead on a cache hit.
++ *
++ */
+ struct netlbl_lsm_cache {
+ 	atomic_t refcount;
+ 	void (*free) (const void *data);
+ 	void *data;
+ };
+-/* The catmap bitmap field MUST be a power of two in length and large
++
++/**
++ * struct netlbl_lsm_secattr_catmap - NetLabel LSM secattr category bitmap
++ * @startbit: the value of the lowest order bit in the bitmap
++ * @bitmap: the category bitmap
++ * @next: pointer to the next bitmap "node" or NULL
++ *
++ * Description:
++ * This structure is used to represent category bitmaps.  Due to the large
++ * number of categories supported by most labeling protocols it is not
++ * practical to transfer a full bitmap internally so NetLabel adopts a sparse
++ * bitmap structure modeled after SELinux's ebitmap structure.
++ * The catmap bitmap field MUST be a power of two in length and large
+  * enough to hold at least 240 bits.  Special care (i.e. check the code!)
+  * should be used when changing these values as the LSM implementation
+  * probably has functions which rely on the sizes of these types to speed
+- * processing. */
++ * processing.
++ *
++ */
+ #define NETLBL_CATMAP_MAPTYPE           u64
+ #define NETLBL_CATMAP_MAPCNT            4
+ #define NETLBL_CATMAP_MAPSIZE           (sizeof(NETLBL_CATMAP_MAPTYPE) * 8)
+@@ -127,22 +163,48 @@ struct netlbl_lsm_secattr_catmap {
+ 	NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT];
+ 	struct netlbl_lsm_secattr_catmap *next;
+ };
++
++/**
++ * struct netlbl_lsm_secattr - NetLabel LSM security attributes
++ * @flags: indicate which attributes are contained in this structure
++ * @type: indicate the NLTYPE of the attributes
++ * @domain: the NetLabel LSM domain
++ * @cache: NetLabel LSM specific cache
++ * @attr.mls: MLS sensitivity label
++ * @attr.mls.cat: MLS category bitmap
++ * @attr.mls.lvl: MLS sensitivity level
++ * @attr.secid: LSM specific secid token
++ *
++ * Description:
++ * This structure is used to pass security attributes between NetLabel and the
++ * LSM modules.  The flags field is used to specify which fields within the
++ * struct are valid and valid values can be created by bitwise OR'ing the
++ * NETLBL_SECATTR_* defines.  The domain field is typically set by the LSM to
++ * specify domain specific configuration settings and is not usually used by
++ * NetLabel itself when returning security attributes to the LSM.
++ *
++ */
+ #define NETLBL_SECATTR_NONE             0x00000000
+ #define NETLBL_SECATTR_DOMAIN           0x00000001
+ #define NETLBL_SECATTR_CACHE            0x00000002
+ #define NETLBL_SECATTR_MLS_LVL          0x00000004
+ #define NETLBL_SECATTR_MLS_CAT          0x00000008
++#define NETLBL_SECATTR_SECID            0x00000010
+ #define NETLBL_SECATTR_CACHEABLE        (NETLBL_SECATTR_MLS_LVL | \
+-					 NETLBL_SECATTR_MLS_CAT)
++					 NETLBL_SECATTR_MLS_CAT | \
++					 NETLBL_SECATTR_SECID)
+ struct netlbl_lsm_secattr {
+ 	u32 flags;
+-
++	u32 type;
+ 	char *domain;
+-
+-	u32 mls_lvl;
+-	struct netlbl_lsm_secattr_catmap *mls_cat;
+-
+ 	struct netlbl_lsm_cache *cache;
++	union {
++		struct {
++			struct netlbl_lsm_secattr_catmap *cat;
++			u32 lvl;
++		} mls;
++		u32 secid;
++	} attr;
+ };
+ 
+ /*
+@@ -231,10 +293,7 @@ static inline void netlbl_secattr_catmap_free(
+  */
+ static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr)
+ {
+-	secattr->flags = 0;
+-	secattr->domain = NULL;
+-	secattr->mls_cat = NULL;
+-	secattr->cache = NULL;
++	memset(secattr, 0, sizeof(*secattr));
+ }
+ 
+ /**
+@@ -248,11 +307,11 @@ static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr)
+  */
+ static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr)
+ {
+-	if (secattr->cache)
+-		netlbl_secattr_cache_free(secattr->cache);
+ 	kfree(secattr->domain);
+-	if (secattr->mls_cat)
+-		netlbl_secattr_catmap_free(secattr->mls_cat);
++	if (secattr->flags & NETLBL_SECATTR_CACHE)
++		netlbl_secattr_cache_free(secattr->cache);
++	if (secattr->flags & NETLBL_SECATTR_MLS_CAT)
++		netlbl_secattr_catmap_free(secattr->attr.mls.cat);
+ }
+ 
+ /**
+@@ -300,7 +359,7 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap,
+ 				 gfp_t flags);
+ 
+ /*
+- * LSM protocol operations
++ * LSM protocol operations (NetLabel LSM/kernel API)
+  */
+ int netlbl_enabled(void);
+ int netlbl_sock_setattr(struct sock *sk,
+@@ -308,6 +367,7 @@ int netlbl_sock_setattr(struct sock *sk,
+ int netlbl_sock_getattr(struct sock *sk,
+ 			struct netlbl_lsm_secattr *secattr);
+ int netlbl_skbuff_getattr(const struct sk_buff *skb,
++			  u16 family,
+ 			  struct netlbl_lsm_secattr *secattr);
+ void netlbl_skbuff_err(struct sk_buff *skb, int error);
+ 
+@@ -360,6 +420,7 @@ static inline int netlbl_sock_getattr(struct sock *sk,
+ 	return -ENOSYS;
+ }
+ static inline int netlbl_skbuff_getattr(const struct sk_buff *skb,
++					u16 family,
+ 					struct netlbl_lsm_secattr *secattr)
+ {
+ 	return -ENOSYS;
 diff --git a/include/net/netlink.h b/include/net/netlink.h
 index 9298218..a5506c4 100644
 --- a/include/net/netlink.h
@@ -659513,29 +818210,97 @@
  };
  
  /* From the spec; local phys only */
+diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
+index 702fcfe..8225157 100644
+--- a/include/scsi/scsi.h
++++ b/include/scsi/scsi.h
+@@ -11,6 +11,25 @@
+ #include <linux/types.h>
+ 
+ /*
++ * The maximum number of SG segments that we will put inside a
++ * scatterlist (unless chaining is used). Should ideally fit inside a
++ * single page, to avoid a higher order allocation.  We could define this
++ * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order.  The
++ * minimum value is 32
++ */
++#define SCSI_MAX_SG_SEGMENTS	128
++
++/*
++ * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
++ * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
++ */
++#ifdef ARCH_HAS_SG_CHAIN
++#define SCSI_MAX_SG_CHAIN_SEGMENTS	2048
++#else
++#define SCSI_MAX_SG_CHAIN_SEGMENTS	SCSI_MAX_SG_SEGMENTS
++#endif
++
++/*
+  *	SCSI command lengths
+  */
+ 
+@@ -83,6 +102,7 @@ extern const unsigned char scsi_command_size[8];
+ #define READ_TOC              0x43
+ #define LOG_SELECT            0x4c
+ #define LOG_SENSE             0x4d
++#define XDWRITEREAD_10        0x53
+ #define MODE_SELECT_10        0x55
+ #define RESERVE_10            0x56
+ #define RELEASE_10            0x57
 diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
-index 3f47e52..a457fca 100644
+index 3f47e52..de28aab 100644
 --- a/include/scsi/scsi_cmnd.h
 +++ b/include/scsi/scsi_cmnd.h
-@@ -8,7 +8,6 @@
+@@ -2,16 +2,20 @@
+ #define _SCSI_SCSI_CMND_H
+ 
+ #include <linux/dma-mapping.h>
++#include <linux/blkdev.h>
+ #include <linux/list.h>
+ #include <linux/types.h>
+ #include <linux/timer.h>
  #include <linux/scatterlist.h>
  
- struct request;
+-struct request;
 -struct scatterlist;
  struct Scsi_Host;
  struct scsi_device;
  
-@@ -68,8 +67,8 @@ struct scsi_cmnd {
- 	void *request_buffer;		/* Actual requested buffer */
++struct scsi_data_buffer {
++	struct sg_table table;
++	unsigned length;
++	int resid;
++};
+ 
+ /* embedded in scsi_cmnd */
+ struct scsi_pointer {
+@@ -62,15 +66,11 @@ struct scsi_cmnd {
+ 	/* These elements define the operation we are about to perform */
+ #define MAX_COMMAND_SIZE	16
+ 	unsigned char cmnd[MAX_COMMAND_SIZE];
+-	unsigned request_bufflen;	/* Actual request size */
+ 
+ 	struct timer_list eh_timeout;	/* Used to time out the command. */
+-	void *request_buffer;		/* Actual requested buffer */
  
  	/* These elements define the operation we ultimately want to perform */
-+	struct sg_table sg_table;
- 	unsigned short use_sg;	/* Number of pieces of scatter-gather */
+-	unsigned short use_sg;	/* Number of pieces of scatter-gather */
 -	unsigned short __use_sg;
- 
+-
++	struct scsi_data_buffer sdb;
  	unsigned underflow;	/* Return error if less than
  				   this amount is transferred */
-@@ -88,7 +87,7 @@ struct scsi_cmnd {
+ 
+@@ -80,15 +80,11 @@ struct scsi_cmnd {
+ 				   reconnects.   Probably == sector
+ 				   size */
+ 
+-	int resid;		/* Number of bytes requested to be
+-				   transferred less actual number
+-				   transferred (0 if not supported) */
+-
+ 	struct request *request;	/* The command we are
  				   	   working on */
  
  #define SCSI_SENSE_BUFFERSIZE 	96
@@ -659544,23 +818309,69 @@
  				/* obtained by REQUEST SENSE when
  				 * CHECK CONDITION is received on original
  				 * command (auto-sense) */
-@@ -128,14 +127,14 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
+@@ -128,27 +124,55 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
  				 size_t *offset, size_t *len);
  extern void scsi_kunmap_atomic_sg(void *virt);
  
 -extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
-+extern int scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
- extern void scsi_free_sgtable(struct scsi_cmnd *);
+-extern void scsi_free_sgtable(struct scsi_cmnd *);
++extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask);
++extern void scsi_release_buffers(struct scsi_cmnd *cmd);
  
  extern int scsi_dma_map(struct scsi_cmnd *cmd);
  extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
  
- #define scsi_sg_count(cmd) ((cmd)->use_sg)
+-#define scsi_sg_count(cmd) ((cmd)->use_sg)
 -#define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer)
-+#define scsi_sglist(cmd) ((cmd)->sg_table.sgl)
- #define scsi_bufflen(cmd) ((cmd)->request_bufflen)
+-#define scsi_bufflen(cmd) ((cmd)->request_bufflen)
++static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
++{
++	return cmd->sdb.table.nents;
++}
++
++static inline struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd)
++{
++	return cmd->sdb.table.sgl;
++}
++
++static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
++{
++	return cmd->sdb.length;
++}
  
  static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
+ {
+-	cmd->resid = resid;
++	cmd->sdb.resid = resid;
+ }
+ 
+ static inline int scsi_get_resid(struct scsi_cmnd *cmd)
+ {
+-	return cmd->resid;
++	return cmd->sdb.resid;
+ }
+ 
+ #define scsi_for_each_sg(cmd, sg, nseg, __i)			\
+ 	for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
+ 
++static inline int scsi_bidi_cmnd(struct scsi_cmnd *cmd)
++{
++	return blk_bidi_rq(cmd->request) &&
++		(cmd->request->next_rq->special != NULL);
++}
++
++static inline struct scsi_data_buffer *scsi_in(struct scsi_cmnd *cmd)
++{
++	return scsi_bidi_cmnd(cmd) ?
++		cmd->request->next_rq->special : &cmd->sdb;
++}
++
++static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd)
++{
++	return &cmd->sdb;
++}
++
+ #endif /* _SCSI_SCSI_CMND_H */
 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
 index 6c2d80b..ab7acbe 100644
 --- a/include/scsi/scsi_device.h
@@ -659613,6 +818424,81 @@
  
  #define MODULE_ALIAS_SCSI_DEVICE(type) \
  	MODULE_ALIAS("scsi:t-" __stringify(type) "*")
+diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
+index d21b891..25071d5 100644
+--- a/include/scsi/scsi_eh.h
++++ b/include/scsi/scsi_eh.h
+@@ -68,16 +68,15 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
+ extern int scsi_reset_provider(struct scsi_device *, int);
+ 
+ struct scsi_eh_save {
++	/* saved state */
+ 	int result;
+ 	enum dma_data_direction data_direction;
+ 	unsigned char cmd_len;
+ 	unsigned char cmnd[MAX_COMMAND_SIZE];
++	struct scsi_data_buffer sdb;
++	struct request *next_rq;
+ 
+-	void *buffer;
+-	unsigned bufflen;
+-	unsigned short use_sg;
+-	int resid;
+-
++	/* new command support */
+ 	struct scatterlist sense_sgl;
+ };
+ 
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index 0fd4746..5c58d59 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -39,9 +39,6 @@ struct blk_queue_tags;
+ #define DISABLE_CLUSTERING 0
+ #define ENABLE_CLUSTERING 1
+ 
+-#define DISABLE_SG_CHAINING 0
+-#define ENABLE_SG_CHAINING 1
+-
+ enum scsi_eh_timer_return {
+ 	EH_NOT_HANDLED,
+ 	EH_HANDLED,
+@@ -136,9 +133,9 @@ struct scsi_host_template {
+ 	 * the done callback is invoked.
+ 	 *
+ 	 * This is called to inform the LLD to transfer
+-	 * cmd->request_bufflen bytes. The cmd->use_sg speciefies the
++	 * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the
+ 	 * number of scatterlist entried in the command and
+-	 * cmd->request_buffer contains the scatterlist.
++	 * scsi_sglist(cmd) returns the scatterlist.
+ 	 *
+ 	 * return values: see queuecommand
+ 	 *
+@@ -446,15 +443,6 @@ struct scsi_host_template {
+ 	unsigned ordered_tag:1;
+ 
+ 	/*
+-	 * true if the low-level driver can support sg chaining. this
+-	 * will be removed eventually when all the drivers are
+-	 * converted to support sg chaining.
+-	 *
+-	 * Status: OBSOLETE
+-	 */
+-	unsigned use_sg_chaining:1;
+-
+-	/*
+ 	 * Countdown for host blocking with no commands outstanding
+ 	 */
+ 	unsigned int max_host_blocked;
+@@ -598,7 +586,6 @@ struct Scsi_Host {
+ 	unsigned unchecked_isa_dma:1;
+ 	unsigned use_clustering:1;
+ 	unsigned use_blk_tcq:1;
+-	unsigned use_sg_chaining:1;
+ 
+ 	/*
+ 	 * Host has requested that no further requests come through for the
 diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
 index 7ff6199..404f11d 100644
 --- a/include/scsi/scsi_transport_iscsi.h
@@ -660557,7 +819443,7 @@
  		return 1;
  	return 0;
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 8dd8ff2..314f510 100644
+index 8dd8ff2..05e0b6f 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -51,6 +51,7 @@
@@ -660568,7 +819454,15 @@
  
  #include <asm/pgtable.h>
  #include <asm/pgalloc.h>
-@@ -791,6 +792,31 @@ out:
+@@ -392,6 +393,7 @@ void fastcall __mmdrop(struct mm_struct *mm)
+ 	destroy_context(mm);
+ 	free_mm(mm);
+ }
++EXPORT_SYMBOL_GPL(__mmdrop);
+ 
+ /*
+  * Decrement the use count and release all resources for an mm.
+@@ -791,6 +793,31 @@ out:
  	return error;
  }
  
@@ -660600,7 +819494,7 @@
  /*
   *	Helper to unshare the files of the current task.
   *	We don't want to expose copy_files internals to
-@@ -1045,6 +1071,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1045,6 +1072,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  	copy_flags(clone_flags, p);
  	INIT_LIST_HEAD(&p->children);
  	INIT_LIST_HEAD(&p->sibling);
@@ -660611,7 +819505,7 @@
  	p->vfork_done = NULL;
  	spin_lock_init(&p->alloc_lock);
  
-@@ -1059,6 +1089,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1059,6 +1090,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  	p->prev_utime = cputime_zero;
  	p->prev_stime = cputime_zero;
  
@@ -660623,7 +819517,7 @@
  #ifdef CONFIG_TASK_XACCT
  	p->rchar = 0;		/* I/O counter: bytes read */
  	p->wchar = 0;		/* I/O counter: bytes written */
-@@ -1147,15 +1182,17 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1147,15 +1183,17 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  		goto bad_fork_cleanup_mm;
  	if ((retval = copy_namespaces(clone_flags, p)))
  		goto bad_fork_cleanup_keys;
@@ -660643,7 +819537,7 @@
  
  		if (clone_flags & CLONE_NEWPID) {
  			retval = pid_ns_prepare_proc(task_active_pid_ns(p));
-@@ -1196,6 +1233,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1196,6 +1234,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  #ifdef TIF_SYSCALL_EMU
  	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
  #endif
@@ -660651,7 +819545,7 @@
  
  	/* Our parent execution domain becomes current domain
  	   These must match for thread signalling to apply */
-@@ -1224,9 +1262,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1224,9 +1263,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  	/* Need tasklist lock for parent etc handling! */
  	write_lock_irq(&tasklist_lock);
  
@@ -660661,7 +819555,7 @@
  	/*
  	 * The task hasn't been attached yet, so its cpus_allowed mask will
  	 * not be changed, nor will its assigned CPU.
-@@ -1237,6 +1272,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1237,6 +1273,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  	 * parent's CPU). This avoids alot of nasty races.
  	 */
  	p->cpus_allowed = current->cpus_allowed;
@@ -660669,7 +819563,7 @@
  	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
  			!cpu_online(task_cpu(p))))
  		set_task_cpu(p, smp_processor_id());
-@@ -1317,6 +1353,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1317,6 +1354,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  bad_fork_free_pid:
  	if (pid != &init_struct_pid)
  		free_pid(pid);
@@ -670145,6 +829039,18 @@
  
  	return ret;
  }
+diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
+index 56cb009..beee5b3 100644
+--- a/kernel/sys_ni.c
++++ b/kernel/sys_ni.c
+@@ -131,6 +131,7 @@ cond_syscall(sys32_sysctl);
+ cond_syscall(ppc_rtas);
+ cond_syscall(sys_spu_run);
+ cond_syscall(sys_spu_create);
++cond_syscall(sys_subpage_prot);
+ 
+ /* mmu depending weak syscall entries */
+ cond_syscall(sys_mprotect);
 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
 index c68f68d..357b68b 100644
 --- a/kernel/sysctl.c
@@ -691756,7 +850662,7 @@
  
  static int __init arp_proc_init(void)
 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
-index f18e88b..d4dc4eb 100644
+index f18e88b..a224106 100644
 --- a/net/ipv4/cipso_ipv4.c
 +++ b/net/ipv4/cipso_ipv4.c
 @@ -63,7 +63,7 @@ struct cipso_v4_domhsh_entry {
@@ -691768,6 +850674,200 @@
  
  /* Label mapping cache */
  int cipso_v4_cache_enabled = 1;
+@@ -348,6 +348,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
+ 			atomic_inc(&entry->lsm_data->refcount);
+ 			secattr->cache = entry->lsm_data;
+ 			secattr->flags |= NETLBL_SECATTR_CACHE;
++			secattr->type = NETLBL_NLTYPE_CIPSOV4;
+ 			if (prev_entry == NULL) {
+ 				spin_unlock_bh(&cipso_v4_cache[bkt].lock);
+ 				return 0;
+@@ -865,7 +866,7 @@ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
+ 	}
+ 
+ 	for (;;) {
+-		host_spot = netlbl_secattr_catmap_walk(secattr->mls_cat,
++		host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
+ 						       host_spot + 1);
+ 		if (host_spot < 0)
+ 			break;
+@@ -948,7 +949,7 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
+ 				return -EPERM;
+ 			break;
+ 		}
+-		ret_val = netlbl_secattr_catmap_setbit(secattr->mls_cat,
++		ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
+ 						       host_spot,
+ 						       GFP_ATOMIC);
+ 		if (ret_val != 0)
+@@ -1014,7 +1015,8 @@ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def,
+ 	u32 cat_iter = 0;
+ 
+ 	for (;;) {
+-		cat = netlbl_secattr_catmap_walk(secattr->mls_cat, cat + 1);
++		cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
++						 cat + 1);
+ 		if (cat < 0)
+ 			break;
+ 		if ((cat_iter + 2) > net_cat_len)
+@@ -1049,7 +1051,7 @@ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
+ 	u32 iter;
+ 
+ 	for (iter = 0; iter < net_cat_len; iter += 2) {
+-		ret_val = netlbl_secattr_catmap_setbit(secattr->mls_cat,
++		ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
+ 				ntohs(get_unaligned((__be16 *)&net_cat[iter])),
+ 				GFP_ATOMIC);
+ 		if (ret_val != 0)
+@@ -1130,7 +1132,8 @@ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def,
+ 		return -ENOSPC;
+ 
+ 	for (;;) {
+-		iter = netlbl_secattr_catmap_walk(secattr->mls_cat, iter + 1);
++		iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
++						  iter + 1);
+ 		if (iter < 0)
+ 			break;
+ 		cat_size += (iter == 0 ? 0 : sizeof(u16));
+@@ -1138,7 +1141,8 @@ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def,
+ 			return -ENOSPC;
+ 		array[array_cnt++] = iter;
+ 
+-		iter = netlbl_secattr_catmap_walk_rng(secattr->mls_cat, iter);
++		iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat,
++						      iter);
+ 		if (iter < 0)
+ 			return -EFAULT;
+ 		cat_size += sizeof(u16);
+@@ -1191,7 +1195,7 @@ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
+ 		else
+ 			cat_low = 0;
+ 
+-		ret_val = netlbl_secattr_catmap_setrng(secattr->mls_cat,
++		ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat,
+ 						       cat_low,
+ 						       cat_high,
+ 						       GFP_ATOMIC);
+@@ -1251,7 +1255,9 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
+ 	if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0)
+ 		return -EPERM;
+ 
+-	ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level);
++	ret_val = cipso_v4_map_lvl_hton(doi_def,
++					secattr->attr.mls.lvl,
++					&level);
+ 	if (ret_val != 0)
+ 		return ret_val;
+ 
+@@ -1303,12 +1309,13 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
+ 	ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
+ 	if (ret_val != 0)
+ 		return ret_val;
+-	secattr->mls_lvl = level;
++	secattr->attr.mls.lvl = level;
+ 	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
+ 
+ 	if (tag_len > 4) {
+-		secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+-		if (secattr->mls_cat == NULL)
++		secattr->attr.mls.cat =
++		                       netlbl_secattr_catmap_alloc(GFP_ATOMIC);
++		if (secattr->attr.mls.cat == NULL)
+ 			return -ENOMEM;
+ 
+ 		ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def,
+@@ -1316,7 +1323,7 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
+ 						    tag_len - 4,
+ 						    secattr);
+ 		if (ret_val != 0) {
+-			netlbl_secattr_catmap_free(secattr->mls_cat);
++			netlbl_secattr_catmap_free(secattr->attr.mls.cat);
+ 			return ret_val;
+ 		}
+ 
+@@ -1350,7 +1357,9 @@ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def,
+ 	if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
+ 		return -EPERM;
+ 
+-	ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level);
++	ret_val = cipso_v4_map_lvl_hton(doi_def,
++					secattr->attr.mls.lvl,
++					&level);
+ 	if (ret_val != 0)
+ 		return ret_val;
+ 
+@@ -1396,12 +1405,13 @@ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
+ 	ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
+ 	if (ret_val != 0)
+ 		return ret_val;
+-	secattr->mls_lvl = level;
++	secattr->attr.mls.lvl = level;
+ 	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
+ 
+ 	if (tag_len > 4) {
+-		secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+-		if (secattr->mls_cat == NULL)
++		secattr->attr.mls.cat =
++			               netlbl_secattr_catmap_alloc(GFP_ATOMIC);
++		if (secattr->attr.mls.cat == NULL)
+ 			return -ENOMEM;
+ 
+ 		ret_val = cipso_v4_map_cat_enum_ntoh(doi_def,
+@@ -1409,7 +1419,7 @@ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
+ 						     tag_len - 4,
+ 						     secattr);
+ 		if (ret_val != 0) {
+-			netlbl_secattr_catmap_free(secattr->mls_cat);
++			netlbl_secattr_catmap_free(secattr->attr.mls.cat);
+ 			return ret_val;
+ 		}
+ 
+@@ -1443,7 +1453,9 @@ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def,
+ 	if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
+ 		return -EPERM;
+ 
+-	ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level);
++	ret_val = cipso_v4_map_lvl_hton(doi_def,
++					secattr->attr.mls.lvl,
++					&level);
+ 	if (ret_val != 0)
+ 		return ret_val;
+ 
+@@ -1488,12 +1500,13 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
+ 	ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
+ 	if (ret_val != 0)
+ 		return ret_val;
+-	secattr->mls_lvl = level;
++	secattr->attr.mls.lvl = level;
+ 	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
+ 
+ 	if (tag_len > 4) {
+-		secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+-		if (secattr->mls_cat == NULL)
++		secattr->attr.mls.cat =
++			               netlbl_secattr_catmap_alloc(GFP_ATOMIC);
++		if (secattr->attr.mls.cat == NULL)
+ 			return -ENOMEM;
+ 
+ 		ret_val = cipso_v4_map_cat_rng_ntoh(doi_def,
+@@ -1501,7 +1514,7 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
+ 						    tag_len - 4,
+ 						    secattr);
+ 		if (ret_val != 0) {
+-			netlbl_secattr_catmap_free(secattr->mls_cat);
++			netlbl_secattr_catmap_free(secattr->attr.mls.cat);
+ 			return ret_val;
+ 		}
+ 
+@@ -1850,6 +1863,8 @@ static int cipso_v4_getattr(const unsigned char *cipso,
+ 		ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr);
+ 		break;
+ 	}
++	if (ret_val == 0)
++		secattr->type = NETLBL_NLTYPE_CIPSOV4;
+ 
+ getattr_return:
+ 	rcu_read_unlock();
 diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
 index 0301dd4..0c0c73f 100644
 --- a/net/ipv4/datagram.c
@@ -737222,7 +896322,7 @@
 +module_init(xt_rateest_tg_init);
 +module_exit(xt_rateest_tg_fini);
 diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
-index 235806e..b11b3ec 100644
+index 235806e..7708e20 100644
 --- a/net/netfilter/xt_SECMARK.c
 +++ b/net/netfilter/xt_SECMARK.c
 @@ -20,7 +20,7 @@
@@ -737249,7 +896349,18 @@
  {
  	u32 secmark = 0;
  	const struct xt_secmark_target_info *info = targinfo;
-@@ -81,9 +81,10 @@ static bool checkentry_selinux(struct xt_secmark_target_info *info)
+@@ -72,18 +72,20 @@ static bool checkentry_selinux(struct xt_secmark_target_info *info)
+ 		return false;
+ 	}
+ 
+-	err = selinux_relabel_packet_permission(sel->selsid);
++	err = selinux_secmark_relabel_packet_permission(sel->selsid);
+ 	if (err) {
+ 		printk(KERN_INFO PFX "unable to obtain relabeling permission\n");
+ 		return false;
+ 	}
+ 
++	selinux_secmark_refcount_inc();
  	return true;
  }
  
@@ -737263,11 +896374,19 @@
  {
  	struct xt_secmark_target_info *info = targinfo;
  
-@@ -109,12 +110,12 @@ static bool checkentry(const char *tablename, const void *entry,
+@@ -109,12 +111,21 @@ static bool checkentry(const char *tablename, const void *entry,
  	return true;
  }
  
 -static struct xt_target xt_secmark_target[] __read_mostly = {
++void secmark_tg_destroy(const struct xt_target *target, void *targinfo)
++{
++	switch (mode) {
++	case SECMARK_MODE_SEL:
++		selinux_secmark_refcount_dec();
++	}
++}
++
 +static struct xt_target secmark_tg_reg[] __read_mostly = {
  	{
  		.name		= "SECMARK",
@@ -737275,17 +896394,19 @@
 -		.checkentry	= checkentry,
 -		.target		= target,
 +		.checkentry	= secmark_tg_check,
++		.destroy	= secmark_tg_destroy,
 +		.target		= secmark_tg,
  		.targetsize	= sizeof(struct xt_secmark_target_info),
  		.table		= "mangle",
  		.me		= THIS_MODULE,
-@@ -122,24 +123,23 @@ static struct xt_target xt_secmark_target[] __read_mostly = {
+@@ -122,24 +133,24 @@ static struct xt_target xt_secmark_target[] __read_mostly = {
  	{
  		.name		= "SECMARK",
  		.family		= AF_INET6,
 -		.checkentry	= checkentry,
 -		.target		= target,
 +		.checkentry	= secmark_tg_check,
++		.destroy	= secmark_tg_destroy,
 +		.target		= secmark_tg,
  		.targetsize	= sizeof(struct xt_secmark_target_info),
  		.table		= "mangle",
@@ -741897,6 +901018,2233 @@
  MODULE_LICENSE("GPL");
  MODULE_ALIAS("ipt_u32");
  MODULE_ALIAS("ip6t_u32");
+diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
+index ba0ca8d..becf91a 100644
+--- a/net/netlabel/netlabel_cipso_v4.c
++++ b/net/netlabel/netlabel_cipso_v4.c
+@@ -38,6 +38,7 @@
+ #include <net/genetlink.h>
+ #include <net/netlabel.h>
+ #include <net/cipso_ipv4.h>
++#include <asm/atomic.h>
+ 
+ #include "netlabel_user.h"
+ #include "netlabel_cipso_v4.h"
+@@ -421,7 +422,7 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
+ 		break;
+ 	}
+ 	if (ret_val == 0)
+-		netlbl_mgmt_protocount_inc();
++		atomic_inc(&netlabel_mgmt_protocount);
+ 
+ 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD,
+ 					      &audit_info);
+@@ -698,7 +699,7 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
+ 				      &audit_info,
+ 				      netlbl_cipsov4_doi_free);
+ 	if (ret_val == 0)
+-		netlbl_mgmt_protocount_dec();
++		atomic_dec(&netlabel_mgmt_protocount);
+ 
+ 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_DEL,
+ 					      &audit_info);
+diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
+index b3675bd..9a8ea01 100644
+--- a/net/netlabel/netlabel_domainhash.c
++++ b/net/netlabel/netlabel_domainhash.c
+@@ -54,9 +54,6 @@ struct netlbl_domhsh_tbl {
+  * hash table should be okay */
+ static DEFINE_SPINLOCK(netlbl_domhsh_lock);
+ static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL;
+-
+-/* Default domain mapping */
+-static DEFINE_SPINLOCK(netlbl_domhsh_def_lock);
+ static struct netlbl_dom_map *netlbl_domhsh_def = NULL;
+ 
+ /*
+@@ -109,17 +106,14 @@ static u32 netlbl_domhsh_hash(const char *key)
+ /**
+  * netlbl_domhsh_search - Search for a domain entry
+  * @domain: the domain
+- * @def: return default if no match is found
+  *
+  * Description:
+  * Searches the domain hash table and returns a pointer to the hash table
+- * entry if found, otherwise NULL is returned.  If @def is non-zero and a
+- * match is not found in the domain hash table the default mapping is returned
+- * if it exists.  The caller is responsibile for the rcu hash table locks
+- * (i.e. the caller much call rcu_read_[un]lock()).
++ * entry if found, otherwise NULL is returned.  The caller is responsibile for
++ * the rcu hash table locks (i.e. the caller much call rcu_read_[un]lock()).
+  *
+  */
+-static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain, u32 def)
++static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
+ {
+ 	u32 bkt;
+ 	struct netlbl_dom_map *iter;
+@@ -133,10 +127,31 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain, u32 def)
+ 				return iter;
+ 	}
+ 
+-	if (def != 0) {
+-		iter = rcu_dereference(netlbl_domhsh_def);
+-		if (iter != NULL && iter->valid)
+-			return iter;
++	return NULL;
++}
++
++/**
++ * netlbl_domhsh_search_def - Search for a domain entry
++ * @domain: the domain
++ * @def: return default if no match is found
++ *
++ * Description:
++ * Searches the domain hash table and returns a pointer to the hash table
++ * entry if an exact match is found, if an exact match is not present in the
++ * hash table then the default entry is returned if valid otherwise NULL is
++ * returned.  The caller is responsibile for the rcu hash table locks
++ * (i.e. the caller much call rcu_read_[un]lock()).
++ *
++ */
++static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
++{
++	struct netlbl_dom_map *entry;
++
++	entry = netlbl_domhsh_search(domain);
++	if (entry == NULL) {
++		entry = rcu_dereference(netlbl_domhsh_def);
++		if (entry != NULL && entry->valid)
++			return entry;
+ 	}
+ 
+ 	return NULL;
+@@ -221,24 +236,22 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
+ 	INIT_RCU_HEAD(&entry->rcu);
+ 
+ 	rcu_read_lock();
++	spin_lock(&netlbl_domhsh_lock);
+ 	if (entry->domain != NULL) {
+ 		bkt = netlbl_domhsh_hash(entry->domain);
+-		spin_lock(&netlbl_domhsh_lock);
+-		if (netlbl_domhsh_search(entry->domain, 0) == NULL)
++		if (netlbl_domhsh_search(entry->domain) == NULL)
+ 			list_add_tail_rcu(&entry->list,
+ 				    &rcu_dereference(netlbl_domhsh)->tbl[bkt]);
+ 		else
+ 			ret_val = -EEXIST;
+-		spin_unlock(&netlbl_domhsh_lock);
+ 	} else {
+ 		INIT_LIST_HEAD(&entry->list);
+-		spin_lock(&netlbl_domhsh_def_lock);
+ 		if (rcu_dereference(netlbl_domhsh_def) == NULL)
+ 			rcu_assign_pointer(netlbl_domhsh_def, entry);
+ 		else
+ 			ret_val = -EEXIST;
+-		spin_unlock(&netlbl_domhsh_def_lock);
+ 	}
++	spin_unlock(&netlbl_domhsh_lock);
+ 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info);
+ 	if (audit_buf != NULL) {
+ 		audit_log_format(audit_buf,
+@@ -307,7 +320,10 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
+ 	struct audit_buffer *audit_buf;
+ 
+ 	rcu_read_lock();
+-	entry = netlbl_domhsh_search(domain, (domain != NULL ? 0 : 1));
++	if (domain)
++		entry = netlbl_domhsh_search(domain);
++	else
++		entry = netlbl_domhsh_search_def(domain);
+ 	if (entry == NULL)
+ 		goto remove_return;
+ 	switch (entry->type) {
+@@ -316,23 +332,16 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
+ 					   entry->domain);
+ 		break;
+ 	}
+-	if (entry != rcu_dereference(netlbl_domhsh_def)) {
+-		spin_lock(&netlbl_domhsh_lock);
+-		if (entry->valid) {
+-			entry->valid = 0;
++	spin_lock(&netlbl_domhsh_lock);
++	if (entry->valid) {
++		entry->valid = 0;
++		if (entry != rcu_dereference(netlbl_domhsh_def))
+ 			list_del_rcu(&entry->list);
+-			ret_val = 0;
+-		}
+-		spin_unlock(&netlbl_domhsh_lock);
+-	} else {
+-		spin_lock(&netlbl_domhsh_def_lock);
+-		if (entry->valid) {
+-			entry->valid = 0;
++		else
+ 			rcu_assign_pointer(netlbl_domhsh_def, NULL);
+-			ret_val = 0;
+-		}
+-		spin_unlock(&netlbl_domhsh_def_lock);
++		ret_val = 0;
+ 	}
++	spin_unlock(&netlbl_domhsh_lock);
+ 
+ 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
+ 	if (audit_buf != NULL) {
+@@ -377,7 +386,7 @@ int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info)
+  */
+ struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain)
+ {
+-	return netlbl_domhsh_search(domain, 1);
++	return netlbl_domhsh_search_def(domain);
+ }
+ 
+ /**
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index 4f50949..c69e3e1 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -34,6 +34,7 @@
+ #include <net/netlabel.h>
+ #include <net/cipso_ipv4.h>
+ #include <asm/bug.h>
++#include <asm/atomic.h>
+ 
+ #include "netlabel_domainhash.h"
+ #include "netlabel_unlabeled.h"
+@@ -262,7 +263,7 @@ int netlbl_enabled(void)
+ 	/* At some point we probably want to expose this mechanism to the user
+ 	 * as well so that admins can toggle NetLabel regardless of the
+ 	 * configuration */
+-	return (netlbl_mgmt_protocount_value() > 0 ? 1 : 0);
++	return (atomic_read(&netlabel_mgmt_protocount) > 0);
+ }
+ 
+ /**
+@@ -311,7 +312,7 @@ socket_setattr_return:
+  * @secattr: the security attributes
+  *
+  * Description:
+- * Examines the given sock to see any NetLabel style labeling has been
++ * Examines the given sock to see if any NetLabel style labeling has been
+  * applied to the sock, if so it parses the socket label and returns the
+  * security attributes in @secattr.  Returns zero on success, negative values
+  * on failure.
+@@ -319,18 +320,13 @@ socket_setattr_return:
+  */
+ int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
+ {
+-	int ret_val;
+-
+-	ret_val = cipso_v4_sock_getattr(sk, secattr);
+-	if (ret_val == 0)
+-		return 0;
+-
+-	return netlbl_unlabel_getattr(secattr);
++	return cipso_v4_sock_getattr(sk, secattr);
+ }
+ 
+ /**
+  * netlbl_skbuff_getattr - Determine the security attributes of a packet
+  * @skb: the packet
++ * @family: protocol family
+  * @secattr: the security attributes
+  *
+  * Description:
+@@ -341,13 +337,14 @@ int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
+  *
+  */
+ int netlbl_skbuff_getattr(const struct sk_buff *skb,
++			  u16 family,
+ 			  struct netlbl_lsm_secattr *secattr)
+ {
+ 	if (CIPSO_V4_OPTEXIST(skb) &&
+ 	    cipso_v4_skbuff_getattr(skb, secattr) == 0)
+ 		return 0;
+ 
+-	return netlbl_unlabel_getattr(secattr);
++	return netlbl_unlabel_getattr(skb, family, secattr);
+ }
+ 
+ /**
+@@ -431,6 +428,10 @@ static int __init netlbl_init(void)
+ 	if (ret_val != 0)
+ 		goto init_failure;
+ 
++	ret_val = netlbl_unlabel_init(NETLBL_UNLHSH_BITSIZE);
++	if (ret_val != 0)
++		goto init_failure;
++
+ 	ret_val = netlbl_netlink_init();
+ 	if (ret_val != 0)
+ 		goto init_failure;
+diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
+index 9c41464..e2258dc 100644
+--- a/net/netlabel/netlabel_mgmt.c
++++ b/net/netlabel/netlabel_mgmt.c
+@@ -37,14 +37,14 @@
+ #include <net/genetlink.h>
+ #include <net/netlabel.h>
+ #include <net/cipso_ipv4.h>
++#include <asm/atomic.h>
+ 
+ #include "netlabel_domainhash.h"
+ #include "netlabel_user.h"
+ #include "netlabel_mgmt.h"
+ 
+-/* NetLabel configured protocol count */
+-static DEFINE_SPINLOCK(netlabel_mgmt_protocount_lock);
+-static u32 netlabel_mgmt_protocount = 0;
++/* NetLabel configured protocol counter */
++atomic_t netlabel_mgmt_protocount = ATOMIC_INIT(0);
+ 
+ /* Argument struct for netlbl_domhsh_walk() */
+ struct netlbl_domhsh_walk_arg {
+@@ -71,63 +71,6 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = {
+ };
+ 
+ /*
+- * NetLabel Misc Management Functions
+- */
+-
+-/**
+- * netlbl_mgmt_protocount_inc - Increment the configured labeled protocol count
+- *
+- * Description:
+- * Increment the number of labeled protocol configurations in the current
+- * NetLabel configuration.  Keep track of this for use in determining if
+- * NetLabel label enforcement should be active/enabled or not in the LSM.
+- *
+- */
+-void netlbl_mgmt_protocount_inc(void)
+-{
+-	spin_lock(&netlabel_mgmt_protocount_lock);
+-	netlabel_mgmt_protocount++;
+-	spin_unlock(&netlabel_mgmt_protocount_lock);
+-}
+-
+-/**
+- * netlbl_mgmt_protocount_dec - Decrement the configured labeled protocol count
+- *
+- * Description:
+- * Decrement the number of labeled protocol configurations in the current
+- * NetLabel configuration.  Keep track of this for use in determining if
+- * NetLabel label enforcement should be active/enabled or not in the LSM.
+- *
+- */
+-void netlbl_mgmt_protocount_dec(void)
+-{
+-	spin_lock(&netlabel_mgmt_protocount_lock);
+-	if (netlabel_mgmt_protocount > 0)
+-		netlabel_mgmt_protocount--;
+-	spin_unlock(&netlabel_mgmt_protocount_lock);
+-}
+-
+-/**
+- * netlbl_mgmt_protocount_value - Return the number of configured protocols
+- *
+- * Description:
+- * Return the number of labeled protocols in the current NetLabel
+- * configuration.  This value is useful in  determining if NetLabel label
+- * enforcement should be active/enabled or not in the LSM.
+- *
+- */
+-u32 netlbl_mgmt_protocount_value(void)
+-{
+-	u32 val;
+-
+-	rcu_read_lock();
+-	val = netlabel_mgmt_protocount;
+-	rcu_read_unlock();
+-
+-	return val;
+-}
+-
+-/*
+  * NetLabel Command Handlers
+  */
+ 
+diff --git a/net/netlabel/netlabel_mgmt.h b/net/netlabel/netlabel_mgmt.h
+index ccb2b39..a43bff1 100644
+--- a/net/netlabel/netlabel_mgmt.h
++++ b/net/netlabel/netlabel_mgmt.h
+@@ -32,6 +32,7 @@
+ #define _NETLABEL_MGMT_H
+ 
+ #include <net/netlabel.h>
++#include <asm/atomic.h>
+ 
+ /*
+  * The following NetLabel payloads are supported by the management interface.
+@@ -168,9 +169,7 @@ enum {
+ /* NetLabel protocol functions */
+ int netlbl_mgmt_genl_init(void);
+ 
+-/* NetLabel misc management functions */
+-void netlbl_mgmt_protocount_inc(void);
+-void netlbl_mgmt_protocount_dec(void);
+-u32 netlbl_mgmt_protocount_value(void);
++/* NetLabel configured protocol reference counter */
++extern atomic_t netlabel_mgmt_protocount;
+ 
+ #endif
+diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
+index 3482924..42e81fd 100644
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -10,7 +10,7 @@
+  */
+ 
+ /*
+- * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
++ * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 - 2007
+  *
+  * This program is free software;  you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+@@ -36,22 +36,92 @@
+ #include <linux/string.h>
+ #include <linux/skbuff.h>
+ #include <linux/audit.h>
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <linux/notifier.h>
++#include <linux/netdevice.h>
++#include <linux/security.h>
+ #include <net/sock.h>
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
+-
++#include <net/ip.h>
++#include <net/ipv6.h>
++#include <net/net_namespace.h>
+ #include <net/netlabel.h>
+ #include <asm/bug.h>
++#include <asm/atomic.h>
+ 
+ #include "netlabel_user.h"
+ #include "netlabel_domainhash.h"
+ #include "netlabel_unlabeled.h"
++#include "netlabel_mgmt.h"
++
++/* NOTE: at present we always use init's network namespace since we don't
++ *       presently support different namespaces even though the majority of
++ *       the functions in this file are "namespace safe" */
++
++/* The unlabeled connection hash table which we use to map network interfaces
++ * and addresses of unlabeled packets to a user specified secid value for the
++ * LSM.  The hash table is used to lookup the network interface entry
++ * (struct netlbl_unlhsh_iface) and then the interface entry is used to
++ * lookup an IP address match from an ordered list.  If a network interface
++ * match can not be found in the hash table then the default entry
++ * (netlbl_unlhsh_def) is used.  The IP address entry list
++ * (struct netlbl_unlhsh_addr) is ordered such that the entries with a
++ * larger netmask come first.
++ */
++struct netlbl_unlhsh_tbl {
++	struct list_head *tbl;
++	u32 size;
++};
++struct netlbl_unlhsh_addr4 {
++	__be32 addr;
++	__be32 mask;
++	u32 secid;
++
++	u32 valid;
++	struct list_head list;
++	struct rcu_head rcu;
++};
++struct netlbl_unlhsh_addr6 {
++	struct in6_addr addr;
++	struct in6_addr mask;
++	u32 secid;
++
++	u32 valid;
++	struct list_head list;
++	struct rcu_head rcu;
++};
++struct netlbl_unlhsh_iface {
++	int ifindex;
++	struct list_head addr4_list;
++	struct list_head addr6_list;
++
++	u32 valid;
++	struct list_head list;
++	struct rcu_head rcu;
++};
++
++/* Argument struct for netlbl_unlhsh_walk() */
++struct netlbl_unlhsh_walk_arg {
++	struct netlink_callback *nl_cb;
++	struct sk_buff *skb;
++	u32 seq;
++};
++
++/* Unlabeled connection hash table */
++/* updates should be so rare that having one spinlock for the entire
++ * hash table should be okay */
++static DEFINE_SPINLOCK(netlbl_unlhsh_lock);
++static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL;
++static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL;
+ 
+ /* Accept unlabeled packets flag */
+-static DEFINE_SPINLOCK(netlabel_unlabel_acceptflg_lock);
+ static u8 netlabel_unlabel_acceptflg = 0;
+ 
+-/* NetLabel Generic NETLINK CIPSOv4 family */
++/* NetLabel Generic NETLINK unlabeled family */
+ static struct genl_family netlbl_unlabel_gnl_family = {
+ 	.id = GENL_ID_GENERATE,
+ 	.hdrsize = 0,
+@@ -63,11 +133,841 @@ static struct genl_family netlbl_unlabel_gnl_family = {
+ /* NetLabel Netlink attribute policy */
+ static const struct nla_policy netlbl_unlabel_genl_policy[NLBL_UNLABEL_A_MAX + 1] = {
+ 	[NLBL_UNLABEL_A_ACPTFLG] = { .type = NLA_U8 },
++	[NLBL_UNLABEL_A_IPV6ADDR] = { .type = NLA_BINARY,
++				      .len = sizeof(struct in6_addr) },
++	[NLBL_UNLABEL_A_IPV6MASK] = { .type = NLA_BINARY,
++				      .len = sizeof(struct in6_addr) },
++	[NLBL_UNLABEL_A_IPV4ADDR] = { .type = NLA_BINARY,
++				      .len = sizeof(struct in_addr) },
++	[NLBL_UNLABEL_A_IPV4MASK] = { .type = NLA_BINARY,
++				      .len = sizeof(struct in_addr) },
++	[NLBL_UNLABEL_A_IFACE] = { .type = NLA_NUL_STRING,
++				   .len = IFNAMSIZ - 1 },
++	[NLBL_UNLABEL_A_SECCTX] = { .type = NLA_BINARY }
+ };
+ 
+ /*
+- * Helper Functions
++ * Audit Helper Functions
++ */
++
++/**
++ * netlbl_unlabel_audit_addr4 - Audit an IPv4 address
++ * @audit_buf: audit buffer
++ * @dev: network interface
++ * @addr: IP address
++ * @mask: IP address mask
++ *
++ * Description:
++ * Write the IPv4 address and address mask, if necessary, to @audit_buf.
++ *
++ */
++static void netlbl_unlabel_audit_addr4(struct audit_buffer *audit_buf,
++				     const char *dev,
++				     __be32 addr, __be32 mask)
++{
++	u32 mask_val = ntohl(mask);
++
++	if (dev != NULL)
++		audit_log_format(audit_buf, " netif=%s", dev);
++	audit_log_format(audit_buf, " src=" NIPQUAD_FMT, NIPQUAD(addr));
++	if (mask_val != 0xffffffff) {
++		u32 mask_len = 0;
++		while (mask_val > 0) {
++			mask_val <<= 1;
++			mask_len++;
++		}
++		audit_log_format(audit_buf, " src_prefixlen=%d", mask_len);
++	}
++}
++
++/**
++ * netlbl_unlabel_audit_addr6 - Audit an IPv6 address
++ * @audit_buf: audit buffer
++ * @dev: network interface
++ * @addr: IP address
++ * @mask: IP address mask
++ *
++ * Description:
++ * Write the IPv6 address and address mask, if necessary, to @audit_buf.
++ *
++ */
++static void netlbl_unlabel_audit_addr6(struct audit_buffer *audit_buf,
++				     const char *dev,
++				     const struct in6_addr *addr,
++				     const struct in6_addr *mask)
++{
++	if (dev != NULL)
++		audit_log_format(audit_buf, " netif=%s", dev);
++	audit_log_format(audit_buf, " src=" NIP6_FMT, NIP6(*addr));
++	if (ntohl(mask->s6_addr32[3]) != 0xffffffff) {
++		u32 mask_len = 0;
++		u32 mask_val;
++		int iter = -1;
++		while (ntohl(mask->s6_addr32[++iter]) == 0xffffffff)
++			mask_len += 32;
++		mask_val = ntohl(mask->s6_addr32[iter]);
++		while (mask_val > 0) {
++			mask_val <<= 1;
++			mask_len++;
++		}
++		audit_log_format(audit_buf, " src_prefixlen=%d", mask_len);
++	}
++}
++
++/*
++ * Unlabeled Connection Hash Table Functions
++ */
++
++/**
++ * netlbl_unlhsh_free_addr4 - Frees an IPv4 address entry from the hash table
++ * @entry: the entry's RCU field
++ *
++ * Description:
++ * This function is designed to be used as a callback to the call_rcu()
++ * function so that memory allocated to a hash table address entry can be
++ * released safely.
++ *
++ */
++static void netlbl_unlhsh_free_addr4(struct rcu_head *entry)
++{
++	struct netlbl_unlhsh_addr4 *ptr;
++
++	ptr = container_of(entry, struct netlbl_unlhsh_addr4, rcu);
++	kfree(ptr);
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++/**
++ * netlbl_unlhsh_free_addr6 - Frees an IPv6 address entry from the hash table
++ * @entry: the entry's RCU field
++ *
++ * Description:
++ * This function is designed to be used as a callback to the call_rcu()
++ * function so that memory allocated to a hash table address entry can be
++ * released safely.
++ *
++ */
++static void netlbl_unlhsh_free_addr6(struct rcu_head *entry)
++{
++	struct netlbl_unlhsh_addr6 *ptr;
++
++	ptr = container_of(entry, struct netlbl_unlhsh_addr6, rcu);
++	kfree(ptr);
++}
++#endif /* IPv6 */
++
++/**
++ * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table
++ * @entry: the entry's RCU field
++ *
++ * Description:
++ * This function is designed to be used as a callback to the call_rcu()
++ * function so that memory allocated to a hash table interface entry can be
++ * released safely.  It is important to note that this function does not free
++ * the IPv4 and IPv6 address lists contained as part of an interface entry.  It
++ * is up to the rest of the code to make sure an interface entry is only freed
++ * once it's address lists are empty.
++ *
++ */
++static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
++{
++	struct netlbl_unlhsh_iface *iface;
++	struct netlbl_unlhsh_addr4 *iter4;
++	struct netlbl_unlhsh_addr4 *tmp4;
++	struct netlbl_unlhsh_addr6 *iter6;
++	struct netlbl_unlhsh_addr6 *tmp6;
++
++	iface = container_of(entry, struct netlbl_unlhsh_iface, rcu);
++
++	/* no need for locks here since we are the only one with access to this
++	 * structure */
++
++	list_for_each_entry_safe(iter4, tmp4, &iface->addr4_list, list)
++		if (iter4->valid) {
++			list_del_rcu(&iter4->list);
++			kfree(iter4);
++		}
++	list_for_each_entry_safe(iter6, tmp6, &iface->addr6_list, list)
++		if (iter6->valid) {
++			list_del_rcu(&iter6->list);
++			kfree(iter6);
++		}
++	kfree(iface);
++}
++
++/**
++ * netlbl_unlhsh_hash - Hashing function for the hash table
++ * @ifindex: the network interface/device to hash
++ *
++ * Description:
++ * This is the hashing function for the unlabeled hash table, it returns the
++ * bucket number for the given device/interface.  The caller is responsible for
++ * calling the rcu_read_[un]lock() functions.
++ *
+  */
++static u32 netlbl_unlhsh_hash(int ifindex)
++{
++	/* this is taken _almost_ directly from
++	 * security/selinux/netif.c:sel_netif_hasfn() as they do pretty much
++	 * the same thing */
++	return ifindex & (rcu_dereference(netlbl_unlhsh)->size - 1);
++}
++
++/**
++ * netlbl_unlhsh_search_addr4 - Search for a matching IPv4 address entry
++ * @addr: IPv4 address
++ * @iface: the network interface entry
++ *
++ * Description:
++ * Searches the IPv4 address list of the network interface specified by @iface.
++ * If a matching address entry is found it is returned, otherwise NULL is
++ * returned.  The caller is responsible for calling the rcu_read_[un]lock()
++ * functions.
++ *
++ */
++static struct netlbl_unlhsh_addr4 *netlbl_unlhsh_search_addr4(
++	                               __be32 addr,
++	                               const struct netlbl_unlhsh_iface *iface)
++{
++	struct netlbl_unlhsh_addr4 *iter;
++
++	list_for_each_entry_rcu(iter, &iface->addr4_list, list)
++		if (iter->valid && (addr & iter->mask) == iter->addr)
++			return iter;
++
++	return NULL;
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++/**
++ * netlbl_unlhsh_search_addr6 - Search for a matching IPv6 address entry
++ * @addr: IPv6 address
++ * @iface: the network interface entry
++ *
++ * Description:
++ * Searches the IPv6 address list of the network interface specified by @iface.
++ * If a matching address entry is found it is returned, otherwise NULL is
++ * returned.  The caller is responsible for calling the rcu_read_[un]lock()
++ * functions.
++ *
++ */
++static struct netlbl_unlhsh_addr6 *netlbl_unlhsh_search_addr6(
++	                               const struct in6_addr *addr,
++	                               const struct netlbl_unlhsh_iface *iface)
++{
++	struct netlbl_unlhsh_addr6 *iter;
++
++	list_for_each_entry_rcu(iter, &iface->addr6_list, list)
++		if (iter->valid &&
++		    ipv6_masked_addr_cmp(&iter->addr, &iter->mask, addr) == 0)
++		return iter;
++
++	return NULL;
++}
++#endif /* IPv6 */
++
++/**
++ * netlbl_unlhsh_search_iface - Search for a matching interface entry
++ * @ifindex: the network interface
++ *
++ * Description:
++ * Searches the unlabeled connection hash table and returns a pointer to the
++ * interface entry which matches @ifindex, otherwise NULL is returned.  The
++ * caller is responsible for calling the rcu_read_[un]lock() functions.
++ *
++ */
++static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
++{
++	u32 bkt;
++	struct netlbl_unlhsh_iface *iter;
++
++	bkt = netlbl_unlhsh_hash(ifindex);
++	list_for_each_entry_rcu(iter,
++				&rcu_dereference(netlbl_unlhsh)->tbl[bkt],
++				list)
++		if (iter->valid && iter->ifindex == ifindex)
++			return iter;
++
++	return NULL;
++}
++
++/**
++ * netlbl_unlhsh_search_iface_def - Search for a matching interface entry
++ * @ifindex: the network interface
++ *
++ * Description:
++ * Searches the unlabeled connection hash table and returns a pointer to the
++ * interface entry which matches @ifindex.  If an exact match can not be found
++ * and there is a valid default entry, the default entry is returned, otherwise
++ * NULL is returned.  The caller is responsible for calling the
++ * rcu_read_[un]lock() functions.
++ *
++ */
++static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex)
++{
++	struct netlbl_unlhsh_iface *entry;
++
++	entry = netlbl_unlhsh_search_iface(ifindex);
++	if (entry != NULL)
++		return entry;
++
++	entry = rcu_dereference(netlbl_unlhsh_def);
++	if (entry != NULL && entry->valid)
++		return entry;
++
++	return NULL;
++}
++
++/**
++ * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table
++ * @iface: the associated interface entry
++ * @addr: IPv4 address in network byte order
++ * @mask: IPv4 address mask in network byte order
++ * @secid: LSM secid value for entry
++ *
++ * Description:
++ * Add a new address entry into the unlabeled connection hash table using the
++ * interface entry specified by @iface.  On success zero is returned, otherwise
++ * a negative value is returned.  The caller is responsible for calling the
++ * rcu_read_[un]lock() functions.
++ *
++ */
++static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
++				   const struct in_addr *addr,
++				   const struct in_addr *mask,
++				   u32 secid)
++{
++	struct netlbl_unlhsh_addr4 *entry;
++	struct netlbl_unlhsh_addr4 *iter;
++
++	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
++	if (entry == NULL)
++		return -ENOMEM;
++
++	entry->addr = addr->s_addr & mask->s_addr;
++	entry->mask = mask->s_addr;
++	entry->secid = secid;
++	entry->valid = 1;
++	INIT_RCU_HEAD(&entry->rcu);
++
++	spin_lock(&netlbl_unlhsh_lock);
++	iter = netlbl_unlhsh_search_addr4(entry->addr, iface);
++	if (iter != NULL &&
++	    iter->addr == addr->s_addr && iter->mask == mask->s_addr) {
++		spin_unlock(&netlbl_unlhsh_lock);
++		kfree(entry);
++		return -EEXIST;
++	}
++	/* in order to speed up address searches through the list (the common
++	 * case) we need to keep the list in order based on the size of the
++	 * address mask such that the entry with the widest mask (smallest
++	 * numerical value) appears first in the list */
++	list_for_each_entry_rcu(iter, &iface->addr4_list, list)
++		if (iter->valid &&
++		    ntohl(entry->mask) > ntohl(iter->mask)) {
++			__list_add_rcu(&entry->list,
++				       iter->list.prev,
++				       &iter->list);
++			spin_unlock(&netlbl_unlhsh_lock);
++			return 0;
++		}
++	list_add_tail_rcu(&entry->list, &iface->addr4_list);
++	spin_unlock(&netlbl_unlhsh_lock);
++	return 0;
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++/**
++ * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table
++ * @iface: the associated interface entry
++ * @addr: IPv6 address in network byte order
++ * @mask: IPv6 address mask in network byte order
++ * @secid: LSM secid value for entry
++ *
++ * Description:
++ * Add a new address entry into the unlabeled connection hash table using the
++ * interface entry specified by @iface.  On success zero is returned, otherwise
++ * a negative value is returned.  The caller is responsible for calling the
++ * rcu_read_[un]lock() functions.
++ *
++ */
++static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
++				   const struct in6_addr *addr,
++				   const struct in6_addr *mask,
++				   u32 secid)
++{
++	struct netlbl_unlhsh_addr6 *entry;
++	struct netlbl_unlhsh_addr6 *iter;
++
++	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
++	if (entry == NULL)
++		return -ENOMEM;
++
++	ipv6_addr_copy(&entry->addr, addr);
++	entry->addr.s6_addr32[0] &= mask->s6_addr32[0];
++	entry->addr.s6_addr32[1] &= mask->s6_addr32[1];
++	entry->addr.s6_addr32[2] &= mask->s6_addr32[2];
++	entry->addr.s6_addr32[3] &= mask->s6_addr32[3];
++	ipv6_addr_copy(&entry->mask, mask);
++	entry->secid = secid;
++	entry->valid = 1;
++	INIT_RCU_HEAD(&entry->rcu);
++
++	spin_lock(&netlbl_unlhsh_lock);
++	iter = netlbl_unlhsh_search_addr6(&entry->addr, iface);
++	if (iter != NULL &&
++	    (ipv6_addr_equal(&iter->addr, addr) &&
++	     ipv6_addr_equal(&iter->mask, mask))) {
++		spin_unlock(&netlbl_unlhsh_lock);
++		kfree(entry);
++		return -EEXIST;
++	}
++	/* in order to speed up address searches through the list (the common
++	 * case) we need to keep the list in order based on the size of the
++	 * address mask such that the entry with the widest mask (smallest
++	 * numerical value) appears first in the list */
++	list_for_each_entry_rcu(iter, &iface->addr6_list, list)
++		if (iter->valid &&
++		    ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) {
++			__list_add_rcu(&entry->list,
++				       iter->list.prev,
++				       &iter->list);
++			spin_unlock(&netlbl_unlhsh_lock);
++			return 0;
++		}
++	list_add_tail_rcu(&entry->list, &iface->addr6_list);
++	spin_unlock(&netlbl_unlhsh_lock);
++	return 0;
++}
++#endif /* IPv6 */
++
++/**
++ * netlbl_unlhsh_add_iface - Adds a new interface entry to the hash table
++ * @ifindex: network interface
++ *
++ * Description:
++ * Add a new, empty, interface entry into the unlabeled connection hash table.
++ * On success a pointer to the new interface entry is returned, on failure NULL
++ * is returned.  The caller is responsible for calling the rcu_read_[un]lock()
++ * functions.
++ *
++ */
++static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
++{
++	u32 bkt;
++	struct netlbl_unlhsh_iface *iface;
++
++	iface = kzalloc(sizeof(*iface), GFP_ATOMIC);
++	if (iface == NULL)
++		return NULL;
++
++	iface->ifindex = ifindex;
++	INIT_LIST_HEAD(&iface->addr4_list);
++	INIT_LIST_HEAD(&iface->addr6_list);
++	iface->valid = 1;
++	INIT_RCU_HEAD(&iface->rcu);
++
++	spin_lock(&netlbl_unlhsh_lock);
++	if (ifindex > 0) {
++		bkt = netlbl_unlhsh_hash(ifindex);
++		if (netlbl_unlhsh_search_iface(ifindex) != NULL)
++			goto add_iface_failure;
++		list_add_tail_rcu(&iface->list,
++				  &rcu_dereference(netlbl_unlhsh)->tbl[bkt]);
++	} else {
++		INIT_LIST_HEAD(&iface->list);
++		if (rcu_dereference(netlbl_unlhsh_def) != NULL)
++			goto add_iface_failure;
++		rcu_assign_pointer(netlbl_unlhsh_def, iface);
++	}
++	spin_unlock(&netlbl_unlhsh_lock);
++
++	return iface;
++
++add_iface_failure:
++	spin_unlock(&netlbl_unlhsh_lock);
++	kfree(iface);
++	return NULL;
++}
++
++/**
++ * netlbl_unlhsh_add - Adds a new entry to the unlabeled connection hash table
++ * @net: network namespace
++ * @dev_name: interface name
++ * @addr: IP address in network byte order
++ * @mask: address mask in network byte order
++ * @addr_len: length of address/mask (4 for IPv4, 16 for IPv6)
++ * @secid: LSM secid value for the entry
++ * @audit_info: NetLabel audit information
++ *
++ * Description:
++ * Adds a new entry to the unlabeled connection hash table.  Returns zero on
++ * success, negative values on failure.
++ *
++ */
++static int netlbl_unlhsh_add(struct net *net,
++			     const char *dev_name,
++			     const void *addr,
++			     const void *mask,
++			     u32 addr_len,
++			     u32 secid,
++			     struct netlbl_audit *audit_info)
++{
++	int ret_val;
++	int ifindex;
++	struct net_device *dev;
++	struct netlbl_unlhsh_iface *iface;
++	struct in_addr *addr4, *mask4;
++	struct in6_addr *addr6, *mask6;
++	struct audit_buffer *audit_buf = NULL;
++	char *secctx = NULL;
++	u32 secctx_len;
++
++	if (addr_len != sizeof(struct in_addr) &&
++	    addr_len != sizeof(struct in6_addr))
++		return -EINVAL;
++
++	rcu_read_lock();
++	if (dev_name != NULL) {
++		dev = dev_get_by_name(net, dev_name);
++		if (dev == NULL) {
++			ret_val = -ENODEV;
++			goto unlhsh_add_return;
++		}
++		ifindex = dev->ifindex;
++		dev_put(dev);
++		iface = netlbl_unlhsh_search_iface(ifindex);
++	} else {
++		ifindex = 0;
++		iface = rcu_dereference(netlbl_unlhsh_def);
++	}
++	if (iface == NULL) {
++		iface = netlbl_unlhsh_add_iface(ifindex);
++		if (iface == NULL) {
++			ret_val = -ENOMEM;
++			goto unlhsh_add_return;
++		}
++	}
++	audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCADD,
++					      audit_info);
++	switch (addr_len) {
++	case sizeof(struct in_addr):
++		addr4 = (struct in_addr *)addr;
++		mask4 = (struct in_addr *)mask;
++		ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid);
++		if (audit_buf != NULL)
++			netlbl_unlabel_audit_addr4(audit_buf,
++						   dev_name,
++						   addr4->s_addr,
++						   mask4->s_addr);
++		break;
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++	case sizeof(struct in6_addr):
++		addr6 = (struct in6_addr *)addr;
++		mask6 = (struct in6_addr *)mask;
++		ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid);
++		if (audit_buf != NULL)
++			netlbl_unlabel_audit_addr6(audit_buf,
++						   dev_name,
++						   addr6, mask6);
++		break;
++#endif /* IPv6 */
++	default:
++		ret_val = -EINVAL;
++	}
++	if (ret_val == 0)
++		atomic_inc(&netlabel_mgmt_protocount);
++
++unlhsh_add_return:
++	rcu_read_unlock();
++	if (audit_buf != NULL) {
++		if (security_secid_to_secctx(secid,
++					     &secctx,
++					     &secctx_len) == 0) {
++			audit_log_format(audit_buf, " sec_obj=%s", secctx);
++			security_release_secctx(secctx, secctx_len);
++		}
++		audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
++		audit_log_end(audit_buf);
++	}
++	return ret_val;
++}
++
++/**
++ * netlbl_unlhsh_remove_addr4 - Remove an IPv4 address entry
++ * @net: network namespace
++ * @iface: interface entry
++ * @addr: IP address
++ * @mask: IP address mask
++ * @audit_info: NetLabel audit information
++ *
++ * Description:
++ * Remove an IP address entry from the unlabeled connection hash table.
++ * Returns zero on success, negative values on failure.  The caller is
++ * responsible for calling the rcu_read_[un]lock() functions.
++ *
++ */
++static int netlbl_unlhsh_remove_addr4(struct net *net,
++				      struct netlbl_unlhsh_iface *iface,
++				      const struct in_addr *addr,
++				      const struct in_addr *mask,
++				      struct netlbl_audit *audit_info)
++{
++	int ret_val = -ENOENT;
++	struct netlbl_unlhsh_addr4 *entry;
++	struct audit_buffer *audit_buf = NULL;
++	struct net_device *dev;
++	char *secctx = NULL;
++	u32 secctx_len;
++
++	spin_lock(&netlbl_unlhsh_lock);
++	entry = netlbl_unlhsh_search_addr4(addr->s_addr, iface);
++	if (entry != NULL &&
++	    entry->addr == addr->s_addr && entry->mask == mask->s_addr) {
++		entry->valid = 0;
++		list_del_rcu(&entry->list);
++		ret_val = 0;
++	}
++	spin_unlock(&netlbl_unlhsh_lock);
++
++	audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
++					      audit_info);
++	if (audit_buf != NULL) {
++		dev = dev_get_by_index(net, iface->ifindex);
++		netlbl_unlabel_audit_addr4(audit_buf,
++					   (dev != NULL ? dev->name : NULL),
++					   entry->addr, entry->mask);
++		if (dev != NULL)
++			dev_put(dev);
++		if (security_secid_to_secctx(entry->secid,
++					     &secctx,
++					     &secctx_len) == 0) {
++			audit_log_format(audit_buf, " sec_obj=%s", secctx);
++			security_release_secctx(secctx, secctx_len);
++		}
++		audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
++		audit_log_end(audit_buf);
++	}
++
++	if (ret_val == 0)
++		call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
++	return ret_val;
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++/**
++ * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry
++ * @net: network namespace
++ * @iface: interface entry
++ * @addr: IP address
++ * @mask: IP address mask
++ * @audit_info: NetLabel audit information
++ *
++ * Description:
++ * Remove an IP address entry from the unlabeled connection hash table.
++ * Returns zero on success, negative values on failure.  The caller is
++ * responsible for calling the rcu_read_[un]lock() functions.
++ *
++ */
++static int netlbl_unlhsh_remove_addr6(struct net *net,
++				      struct netlbl_unlhsh_iface *iface,
++				      const struct in6_addr *addr,
++				      const struct in6_addr *mask,
++				      struct netlbl_audit *audit_info)
++{
++	int ret_val = -ENOENT;
++	struct netlbl_unlhsh_addr6 *entry;
++	struct audit_buffer *audit_buf = NULL;
++	struct net_device *dev;
++	char *secctx = NULL;
++	u32 secctx_len;
++
++	spin_lock(&netlbl_unlhsh_lock);
++	entry = netlbl_unlhsh_search_addr6(addr, iface);
++	if (entry != NULL &&
++	    (ipv6_addr_equal(&entry->addr, addr) &&
++	     ipv6_addr_equal(&entry->mask, mask))) {
++		entry->valid = 0;
++		list_del_rcu(&entry->list);
++		ret_val = 0;
++	}
++	spin_unlock(&netlbl_unlhsh_lock);
++
++	audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
++					      audit_info);
++	if (audit_buf != NULL) {
++		dev = dev_get_by_index(net, iface->ifindex);
++		netlbl_unlabel_audit_addr6(audit_buf,
++					   (dev != NULL ? dev->name : NULL),
++					   addr, mask);
++		if (dev != NULL)
++			dev_put(dev);
++		if (security_secid_to_secctx(entry->secid,
++					     &secctx,
++					     &secctx_len) == 0) {
++			audit_log_format(audit_buf, " sec_obj=%s", secctx);
++			security_release_secctx(secctx, secctx_len);
++		}
++		audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
++		audit_log_end(audit_buf);
++	}
++
++	if (ret_val == 0)
++		call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
++	return ret_val;
++}
++#endif /* IPv6 */
++
++/**
++ * netlbl_unlhsh_condremove_iface - Remove an interface entry
++ * @iface: the interface entry
++ *
++ * Description:
++ * Remove an interface entry from the unlabeled connection hash table if it is
++ * empty.  An interface entry is considered to be empty if there are no
++ * address entries assigned to it.
++ *
++ */
++static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
++{
++	struct netlbl_unlhsh_addr4 *iter4;
++	struct netlbl_unlhsh_addr6 *iter6;
++
++	spin_lock(&netlbl_unlhsh_lock);
++	list_for_each_entry_rcu(iter4, &iface->addr4_list, list)
++		if (iter4->valid)
++			goto unlhsh_condremove_failure;
++	list_for_each_entry_rcu(iter6, &iface->addr6_list, list)
++		if (iter6->valid)
++			goto unlhsh_condremove_failure;
++	iface->valid = 0;
++	if (iface->ifindex > 0)
++		list_del_rcu(&iface->list);
++	else
++		rcu_assign_pointer(netlbl_unlhsh_def, NULL);
++	spin_unlock(&netlbl_unlhsh_lock);
++
++	call_rcu(&iface->rcu, netlbl_unlhsh_free_iface);
++	return;
++
++unlhsh_condremove_failure:
++	spin_unlock(&netlbl_unlhsh_lock);
++	return;
++}
++
++/**
++ * netlbl_unlhsh_remove - Remove an entry from the unlabeled hash table
++ * @net: network namespace
++ * @dev_name: interface name
++ * @addr: IP address in network byte order
++ * @mask: address mask in network byte order
++ * @addr_len: length of address/mask (4 for IPv4, 16 for IPv6)
++ * @audit_info: NetLabel audit information
++ *
++ * Description:
++ * Removes and existing entry from the unlabeled connection hash table.
++ * Returns zero on success, negative values on failure.
++ *
++ */
++static int netlbl_unlhsh_remove(struct net *net,
++				const char *dev_name,
++				const void *addr,
++				const void *mask,
++				u32 addr_len,
++				struct netlbl_audit *audit_info)
++{
++	int ret_val;
++	struct net_device *dev;
++	struct netlbl_unlhsh_iface *iface;
++
++	if (addr_len != sizeof(struct in_addr) &&
++	    addr_len != sizeof(struct in6_addr))
++		return -EINVAL;
++
++	rcu_read_lock();
++	if (dev_name != NULL) {
++		dev = dev_get_by_name(net, dev_name);
++		if (dev == NULL) {
++			ret_val = -ENODEV;
++			goto unlhsh_remove_return;
++		}
++		iface = netlbl_unlhsh_search_iface(dev->ifindex);
++		dev_put(dev);
++	} else
++		iface = rcu_dereference(netlbl_unlhsh_def);
++	if (iface == NULL) {
++		ret_val = -ENOENT;
++		goto unlhsh_remove_return;
++	}
++	switch (addr_len) {
++	case sizeof(struct in_addr):
++		ret_val = netlbl_unlhsh_remove_addr4(net,
++						     iface, addr, mask,
++						     audit_info);
++		break;
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++	case sizeof(struct in6_addr):
++		ret_val = netlbl_unlhsh_remove_addr6(net,
++						     iface, addr, mask,
++						     audit_info);
++		break;
++#endif /* IPv6 */
++	default:
++		ret_val = -EINVAL;
++	}
++	if (ret_val == 0) {
++		netlbl_unlhsh_condremove_iface(iface);
++		atomic_dec(&netlabel_mgmt_protocount);
++	}
++
++unlhsh_remove_return:
++	rcu_read_unlock();
++	return ret_val;
++}
++
++/*
++ * General Helper Functions
++ */
++
++/**
++ * netlbl_unlhsh_netdev_handler - Network device notification handler
++ * @this: notifier block
++ * @event: the event
++ * @ptr: the network device (cast to void)
++ *
++ * Description:
++ * Handle network device events, although at present all we care about is a
++ * network device going away.  In the case of a device going away we clear any
++ * related entries from the unlabeled connection hash table.
++ *
++ */
++static int netlbl_unlhsh_netdev_handler(struct notifier_block *this,
++					unsigned long event,
++					void *ptr)
++{
++	struct net_device *dev = ptr;
++	struct netlbl_unlhsh_iface *iface = NULL;
++
++	if (dev->nd_net != &init_net)
++		return NOTIFY_DONE;
++
++	/* XXX - should this be a check for NETDEV_DOWN or _UNREGISTER? */
++	if (event == NETDEV_DOWN) {
++		spin_lock(&netlbl_unlhsh_lock);
++		iface = netlbl_unlhsh_search_iface(dev->ifindex);
++		if (iface != NULL && iface->valid) {
++			iface->valid = 0;
++			list_del_rcu(&iface->list);
++		} else
++			iface = NULL;
++		spin_unlock(&netlbl_unlhsh_lock);
++	}
++
++	if (iface != NULL)
++		call_rcu(&iface->rcu, netlbl_unlhsh_free_iface);
++
++	return NOTIFY_DONE;
++}
+ 
+ /**
+  * netlbl_unlabel_acceptflg_set - Set the unlabeled accept flag
+@@ -84,11 +984,8 @@ static void netlbl_unlabel_acceptflg_set(u8 value,
+ 	struct audit_buffer *audit_buf;
+ 	u8 old_val;
+ 
+-	spin_lock(&netlabel_unlabel_acceptflg_lock);
+ 	old_val = netlabel_unlabel_acceptflg;
+ 	netlabel_unlabel_acceptflg = value;
+-	spin_unlock(&netlabel_unlabel_acceptflg_lock);
+-
+ 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW,
+ 					      audit_info);
+ 	if (audit_buf != NULL) {
+@@ -98,6 +995,48 @@ static void netlbl_unlabel_acceptflg_set(u8 value,
+ 	}
+ }
+ 
++/**
++ * netlbl_unlabel_addrinfo_get - Get the IPv4/6 address information
++ * @info: the Generic NETLINK info block
++ * @addr: the IP address
++ * @mask: the IP address mask
++ * @len: the address length
++ *
++ * Description:
++ * Examine the Generic NETLINK message and extract the IP address information.
++ * Returns zero on success, negative values on failure.
++ *
++ */
++static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
++				       void **addr,
++				       void **mask,
++				       u32 *len)
++{
++	u32 addr_len;
++
++	if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
++		addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
++		if (addr_len != sizeof(struct in_addr) &&
++		    addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
++			return -EINVAL;
++		*len = addr_len;
++		*addr = nla_data(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
++		*mask = nla_data(info->attrs[NLBL_UNLABEL_A_IPV4MASK]);
++		return 0;
++	} else if (info->attrs[NLBL_UNLABEL_A_IPV6ADDR]) {
++		addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV6ADDR]);
++		if (addr_len != sizeof(struct in6_addr) &&
++		    addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV6MASK]))
++			return -EINVAL;
++		*len = addr_len;
++		*addr = nla_data(info->attrs[NLBL_UNLABEL_A_IPV6ADDR]);
++		*mask = nla_data(info->attrs[NLBL_UNLABEL_A_IPV6MASK]);
++		return 0;
++	}
++
++	return -EINVAL;
++}
++
+ /*
+  * NetLabel Command Handlers
+  */
+@@ -155,11 +1094,9 @@ static int netlbl_unlabel_list(struct sk_buff *skb, struct genl_info *info)
+ 		goto list_failure;
+ 	}
+ 
+-	rcu_read_lock();
+ 	ret_val = nla_put_u8(ans_skb,
+ 			     NLBL_UNLABEL_A_ACPTFLG,
+ 			     netlabel_unlabel_acceptflg);
+-	rcu_read_unlock();
+ 	if (ret_val != 0)
+ 		goto list_failure;
+ 
+@@ -175,11 +1112,489 @@ list_failure:
+ 	return ret_val;
+ }
+ 
++/**
++ * netlbl_unlabel_staticadd - Handle a STATICADD message
++ * @skb: the NETLINK buffer
++ * @info: the Generic NETLINK info block
++ *
++ * Description:
++ * Process a user generated STATICADD message and add a new unlabeled
++ * connection entry to the hash table.  Returns zero on success, negative
++ * values on failure.
++ *
++ */
++static int netlbl_unlabel_staticadd(struct sk_buff *skb,
++				    struct genl_info *info)
++{
++	int ret_val;
++	char *dev_name;
++	void *addr;
++	void *mask;
++	u32 addr_len;
++	u32 secid;
++	struct netlbl_audit audit_info;
++
++	/* Don't allow users to add both IPv4 and IPv6 addresses for a
++	 * single entry.  However, allow users to create two entries, one each
++	 * for IPv4 and IPv4, with the same LSM security context which should
++	 * achieve the same result. */
++	if (!info->attrs[NLBL_UNLABEL_A_SECCTX] ||
++	    !info->attrs[NLBL_UNLABEL_A_IFACE] ||
++	    !((!info->attrs[NLBL_UNLABEL_A_IPV4ADDR] ||
++	       !info->attrs[NLBL_UNLABEL_A_IPV4MASK]) ^
++	      (!info->attrs[NLBL_UNLABEL_A_IPV6ADDR] ||
++	       !info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
++		return -EINVAL;
++
++	netlbl_netlink_auditinfo(skb, &audit_info);
++
++	ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
++	if (ret_val != 0)
++		return ret_val;
++	dev_name = nla_data(info->attrs[NLBL_UNLABEL_A_IFACE]);
++	ret_val = security_secctx_to_secid(
++		                  nla_data(info->attrs[NLBL_UNLABEL_A_SECCTX]),
++				  nla_len(info->attrs[NLBL_UNLABEL_A_SECCTX]),
++				  &secid);
++	if (ret_val != 0)
++		return ret_val;
++
++	return netlbl_unlhsh_add(&init_net,
++				 dev_name, addr, mask, addr_len, secid,
++				 &audit_info);
++}
++
++/**
++ * netlbl_unlabel_staticadddef - Handle a STATICADDDEF message
++ * @skb: the NETLINK buffer
++ * @info: the Generic NETLINK info block
++ *
++ * Description:
++ * Process a user generated STATICADDDEF message and add a new default
++ * unlabeled connection entry.  Returns zero on success, negative values on
++ * failure.
++ *
++ */
++static int netlbl_unlabel_staticadddef(struct sk_buff *skb,
++				       struct genl_info *info)
++{
++	int ret_val;
++	void *addr;
++	void *mask;
++	u32 addr_len;
++	u32 secid;
++	struct netlbl_audit audit_info;
++
++	/* Don't allow users to add both IPv4 and IPv6 addresses for a
++	 * single entry.  However, allow users to create two entries, one each
++	 * for IPv4 and IPv6, with the same LSM security context which should
++	 * achieve the same result. */
++	if (!info->attrs[NLBL_UNLABEL_A_SECCTX] ||
++	    !((!info->attrs[NLBL_UNLABEL_A_IPV4ADDR] ||
++	       !info->attrs[NLBL_UNLABEL_A_IPV4MASK]) ^
++	      (!info->attrs[NLBL_UNLABEL_A_IPV6ADDR] ||
++	       !info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
++		return -EINVAL;
++
++	netlbl_netlink_auditinfo(skb, &audit_info);
++
++	ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
++	if (ret_val != 0)
++		return ret_val;
++	ret_val = security_secctx_to_secid(
++		                  nla_data(info->attrs[NLBL_UNLABEL_A_SECCTX]),
++				  nla_len(info->attrs[NLBL_UNLABEL_A_SECCTX]),
++				  &secid);
++	if (ret_val != 0)
++		return ret_val;
++
++	return netlbl_unlhsh_add(&init_net,
++				 NULL, addr, mask, addr_len, secid,
++				 &audit_info);
++}
++
++/**
++ * netlbl_unlabel_staticremove - Handle a STATICREMOVE message
++ * @skb: the NETLINK buffer
++ * @info: the Generic NETLINK info block
++ *
++ * Description:
++ * Process a user generated STATICREMOVE message and remove the specified
++ * unlabeled connection entry.  Returns zero on success, negative values on
++ * failure.
++ *
++ */
++static int netlbl_unlabel_staticremove(struct sk_buff *skb,
++				       struct genl_info *info)
++{
++	int ret_val;
++	char *dev_name;
++	void *addr;
++	void *mask;
++	u32 addr_len;
++	struct netlbl_audit audit_info;
++
++	/* See the note in netlbl_unlabel_staticadd() about not allowing both
++	 * IPv4 and IPv6 in the same entry. */
++	if (!info->attrs[NLBL_UNLABEL_A_IFACE] ||
++	    !((!info->attrs[NLBL_UNLABEL_A_IPV4ADDR] ||
++	       !info->attrs[NLBL_UNLABEL_A_IPV4MASK]) ^
++	      (!info->attrs[NLBL_UNLABEL_A_IPV6ADDR] ||
++	       !info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
++		return -EINVAL;
++
++	netlbl_netlink_auditinfo(skb, &audit_info);
++
++	ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
++	if (ret_val != 0)
++		return ret_val;
++	dev_name = nla_data(info->attrs[NLBL_UNLABEL_A_IFACE]);
++
++	return netlbl_unlhsh_remove(&init_net,
++				    dev_name, addr, mask, addr_len,
++				    &audit_info);
++}
++
++/**
++ * netlbl_unlabel_staticremovedef - Handle a STATICREMOVEDEF message
++ * @skb: the NETLINK buffer
++ * @info: the Generic NETLINK info block
++ *
++ * Description:
++ * Process a user generated STATICREMOVEDEF message and remove the default
++ * unlabeled connection entry.  Returns zero on success, negative values on
++ * failure.
++ *
++ */
++static int netlbl_unlabel_staticremovedef(struct sk_buff *skb,
++					  struct genl_info *info)
++{
++	int ret_val;
++	void *addr;
++	void *mask;
++	u32 addr_len;
++	struct netlbl_audit audit_info;
++
++	/* See the note in netlbl_unlabel_staticadd() about not allowing both
++	 * IPv4 and IPv6 in the same entry. */
++	if (!((!info->attrs[NLBL_UNLABEL_A_IPV4ADDR] ||
++	       !info->attrs[NLBL_UNLABEL_A_IPV4MASK]) ^
++	      (!info->attrs[NLBL_UNLABEL_A_IPV6ADDR] ||
++	       !info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
++		return -EINVAL;
++
++	netlbl_netlink_auditinfo(skb, &audit_info);
++
++	ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
++	if (ret_val != 0)
++		return ret_val;
++
++	return netlbl_unlhsh_remove(&init_net,
++				    NULL, addr, mask, addr_len,
++				    &audit_info);
++}
++
++
++/**
++ * netlbl_unlabel_staticlist_gen - Generate messages for STATICLIST[DEF]
++ * @cmd: command/message
++ * @iface: the interface entry
++ * @addr4: the IPv4 address entry
++ * @addr6: the IPv6 address entry
++ * @arg: the netlbl_unlhsh_walk_arg structure
++ *
++ * Description:
++ * This function is designed to be used to generate a response for a
++ * STATICLIST or STATICLISTDEF message.  When called either @addr4 or @addr6
++ * can be specified, not both, the other unspecified entry should be set to
++ * NULL by the caller.  Returns the size of the message on success, negative
++ * values on failure.
++ *
++ */
++static int netlbl_unlabel_staticlist_gen(u32 cmd,
++				       const struct netlbl_unlhsh_iface *iface,
++				       const struct netlbl_unlhsh_addr4 *addr4,
++				       const struct netlbl_unlhsh_addr6 *addr6,
++				       void *arg)
++{
++	int ret_val = -ENOMEM;
++	struct netlbl_unlhsh_walk_arg *cb_arg = arg;
++	struct net_device *dev;
++	void *data;
++	u32 secid;
++	char *secctx;
++	u32 secctx_len;
++
++	data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
++			   cb_arg->seq, &netlbl_unlabel_gnl_family,
++			   NLM_F_MULTI, cmd);
++	if (data == NULL)
++		goto list_cb_failure;
++
++	if (iface->ifindex > 0) {
++		dev = dev_get_by_index(&init_net, iface->ifindex);
++		ret_val = nla_put_string(cb_arg->skb,
++					 NLBL_UNLABEL_A_IFACE, dev->name);
++		dev_put(dev);
++		if (ret_val != 0)
++			goto list_cb_failure;
++	}
++
++	if (addr4) {
++		struct in_addr addr_struct;
++
++		addr_struct.s_addr = addr4->addr;
++		ret_val = nla_put(cb_arg->skb,
++				  NLBL_UNLABEL_A_IPV4ADDR,
++				  sizeof(struct in_addr),
++				  &addr_struct);
++		if (ret_val != 0)
++			goto list_cb_failure;
++
++		addr_struct.s_addr = addr4->mask;
++		ret_val = nla_put(cb_arg->skb,
++				  NLBL_UNLABEL_A_IPV4MASK,
++				  sizeof(struct in_addr),
++				  &addr_struct);
++		if (ret_val != 0)
++			goto list_cb_failure;
++
++		secid = addr4->secid;
++	} else {
++		ret_val = nla_put(cb_arg->skb,
++				  NLBL_UNLABEL_A_IPV6ADDR,
++				  sizeof(struct in6_addr),
++				  &addr6->addr);
++		if (ret_val != 0)
++			goto list_cb_failure;
++
++		ret_val = nla_put(cb_arg->skb,
++				  NLBL_UNLABEL_A_IPV6MASK,
++				  sizeof(struct in6_addr),
++				  &addr6->mask);
++		if (ret_val != 0)
++			goto list_cb_failure;
++
++		secid = addr6->secid;
++	}
++
++	ret_val = security_secid_to_secctx(secid, &secctx, &secctx_len);
++	if (ret_val != 0)
++		goto list_cb_failure;
++	ret_val = nla_put(cb_arg->skb,
++			  NLBL_UNLABEL_A_SECCTX,
++			  secctx_len,
++			  secctx);
++	security_release_secctx(secctx, secctx_len);
++	if (ret_val != 0)
++		goto list_cb_failure;
++
++	cb_arg->seq++;
++	return genlmsg_end(cb_arg->skb, data);
++
++list_cb_failure:
++	genlmsg_cancel(cb_arg->skb, data);
++	return ret_val;
++}
++
++/**
++ * netlbl_unlabel_staticlist - Handle a STATICLIST message
++ * @skb: the NETLINK buffer
++ * @cb: the NETLINK callback
++ *
++ * Description:
++ * Process a user generated STATICLIST message and dump the unlabeled
++ * connection hash table in a form suitable for use in a kernel generated
++ * STATICLIST message.  Returns the length of @skb.
++ *
++ */
++static int netlbl_unlabel_staticlist(struct sk_buff *skb,
++				     struct netlink_callback *cb)
++{
++	struct netlbl_unlhsh_walk_arg cb_arg;
++	u32 skip_bkt = cb->args[0];
++	u32 skip_chain = cb->args[1];
++	u32 skip_addr4 = cb->args[2];
++	u32 skip_addr6 = cb->args[3];
++	u32 iter_bkt;
++	u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
++	struct netlbl_unlhsh_iface *iface;
++	struct netlbl_unlhsh_addr4 *addr4;
++	struct netlbl_unlhsh_addr6 *addr6;
++
++	cb_arg.nl_cb = cb;
++	cb_arg.skb = skb;
++	cb_arg.seq = cb->nlh->nlmsg_seq;
++
++	rcu_read_lock();
++	for (iter_bkt = skip_bkt;
++	     iter_bkt < rcu_dereference(netlbl_unlhsh)->size;
++	     iter_bkt++, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0) {
++		list_for_each_entry_rcu(iface,
++			        &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt],
++				list) {
++			if (!iface->valid ||
++			    iter_chain++ < skip_chain)
++				continue;
++			list_for_each_entry_rcu(addr4,
++						&iface->addr4_list,
++						list) {
++				if (!addr4->valid || iter_addr4++ < skip_addr4)
++					continue;
++				if (netlbl_unlabel_staticlist_gen(
++					             NLBL_UNLABEL_C_STATICLIST,
++						     iface,
++						     addr4,
++						     NULL,
++						     &cb_arg) < 0) {
++					iter_addr4--;
++					iter_chain--;
++					goto unlabel_staticlist_return;
++				}
++			}
++			list_for_each_entry_rcu(addr6,
++						&iface->addr6_list,
++						list) {
++				if (!addr6->valid || iter_addr6++ < skip_addr6)
++					continue;
++				if (netlbl_unlabel_staticlist_gen(
++						     NLBL_UNLABEL_C_STATICLIST,
++						     iface,
++						     NULL,
++						     addr6,
++						     &cb_arg) < 0) {
++					iter_addr6--;
++					iter_chain--;
++					goto unlabel_staticlist_return;
++				}
++			}
++		}
++	}
++
++unlabel_staticlist_return:
++	rcu_read_unlock();
++	cb->args[0] = skip_bkt;
++	cb->args[1] = skip_chain;
++	cb->args[2] = skip_addr4;
++	cb->args[3] = skip_addr6;
++	return skb->len;
++}
++
++/**
++ * netlbl_unlabel_staticlistdef - Handle a STATICLISTDEF message
++ * @skb: the NETLINK buffer
++ * @cb: the NETLINK callback
++ *
++ * Description:
++ * Process a user generated STATICLISTDEF message and dump the default
++ * unlabeled connection entry in a form suitable for use in a kernel generated
++ * STATICLISTDEF message.  Returns the length of @skb.
++ *
++ */
++static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
++					struct netlink_callback *cb)
++{
++	struct netlbl_unlhsh_walk_arg cb_arg;
++	struct netlbl_unlhsh_iface *iface;
++	u32 skip_addr4 = cb->args[0];
++	u32 skip_addr6 = cb->args[1];
++	u32 iter_addr4 = 0, iter_addr6 = 0;
++	struct netlbl_unlhsh_addr4 *addr4;
++	struct netlbl_unlhsh_addr6 *addr6;
++
++	cb_arg.nl_cb = cb;
++	cb_arg.skb = skb;
++	cb_arg.seq = cb->nlh->nlmsg_seq;
++
++	rcu_read_lock();
++	iface = rcu_dereference(netlbl_unlhsh_def);
++	if (iface == NULL || !iface->valid)
++		goto unlabel_staticlistdef_return;
++
++	list_for_each_entry_rcu(addr4, &iface->addr4_list, list) {
++		if (!addr4->valid || iter_addr4++ < skip_addr4)
++			continue;
++		if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
++					   iface,
++					   addr4,
++					   NULL,
++					   &cb_arg) < 0) {
++			iter_addr4--;
++			goto unlabel_staticlistdef_return;
++		}
++	}
++	list_for_each_entry_rcu(addr6, &iface->addr6_list, list) {
++		if (addr6->valid || iter_addr6++ < skip_addr6)
++			continue;
++		if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
++					   iface,
++					   NULL,
++					   addr6,
++					   &cb_arg) < 0) {
++			iter_addr6--;
++			goto unlabel_staticlistdef_return;
++		}
++	}
++
++unlabel_staticlistdef_return:
++	rcu_read_unlock();
++	cb->args[0] = skip_addr4;
++	cb->args[1] = skip_addr6;
++	return skb->len;
++}
+ 
+ /*
+  * NetLabel Generic NETLINK Command Definitions
+  */
+ 
++static struct genl_ops netlbl_unlabel_genl_c_staticadd = {
++	.cmd = NLBL_UNLABEL_C_STATICADD,
++	.flags = GENL_ADMIN_PERM,
++	.policy = netlbl_unlabel_genl_policy,
++	.doit = netlbl_unlabel_staticadd,
++	.dumpit = NULL,
++};
++
++static struct genl_ops netlbl_unlabel_genl_c_staticremove = {
++	.cmd = NLBL_UNLABEL_C_STATICREMOVE,
++	.flags = GENL_ADMIN_PERM,
++	.policy = netlbl_unlabel_genl_policy,
++	.doit = netlbl_unlabel_staticremove,
++	.dumpit = NULL,
++};
++
++static struct genl_ops netlbl_unlabel_genl_c_staticlist = {
++	.cmd = NLBL_UNLABEL_C_STATICLIST,
++	.flags = 0,
++	.policy = netlbl_unlabel_genl_policy,
++	.doit = NULL,
++	.dumpit = netlbl_unlabel_staticlist,
++};
++
++static struct genl_ops netlbl_unlabel_genl_c_staticadddef = {
++	.cmd = NLBL_UNLABEL_C_STATICADDDEF,
++	.flags = GENL_ADMIN_PERM,
++	.policy = netlbl_unlabel_genl_policy,
++	.doit = netlbl_unlabel_staticadddef,
++	.dumpit = NULL,
++};
++
++static struct genl_ops netlbl_unlabel_genl_c_staticremovedef = {
++	.cmd = NLBL_UNLABEL_C_STATICREMOVEDEF,
++	.flags = GENL_ADMIN_PERM,
++	.policy = netlbl_unlabel_genl_policy,
++	.doit = netlbl_unlabel_staticremovedef,
++	.dumpit = NULL,
++};
++
++static struct genl_ops netlbl_unlabel_genl_c_staticlistdef = {
++	.cmd = NLBL_UNLABEL_C_STATICLISTDEF,
++	.flags = 0,
++	.policy = netlbl_unlabel_genl_policy,
++	.doit = NULL,
++	.dumpit = netlbl_unlabel_staticlistdef,
++};
++
+ static struct genl_ops netlbl_unlabel_genl_c_accept = {
+ 	.cmd = NLBL_UNLABEL_C_ACCEPT,
+ 	.flags = GENL_ADMIN_PERM,
+@@ -196,7 +1611,6 @@ static struct genl_ops netlbl_unlabel_genl_c_list = {
+ 	.dumpit = NULL,
+ };
+ 
+-
+ /*
+  * NetLabel Generic NETLINK Protocol Functions
+  */
+@@ -218,6 +1632,36 @@ int netlbl_unlabel_genl_init(void)
+ 		return ret_val;
+ 
+ 	ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
++				    &netlbl_unlabel_genl_c_staticadd);
++	if (ret_val != 0)
++		return ret_val;
++
++	ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
++				    &netlbl_unlabel_genl_c_staticremove);
++	if (ret_val != 0)
++		return ret_val;
++
++	ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
++				    &netlbl_unlabel_genl_c_staticlist);
++	if (ret_val != 0)
++		return ret_val;
++
++	ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
++				    &netlbl_unlabel_genl_c_staticadddef);
++	if (ret_val != 0)
++		return ret_val;
++
++	ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
++				    &netlbl_unlabel_genl_c_staticremovedef);
++	if (ret_val != 0)
++		return ret_val;
++
++	ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
++				    &netlbl_unlabel_genl_c_staticlistdef);
++	if (ret_val != 0)
++		return ret_val;
++
++	ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
+ 				    &netlbl_unlabel_genl_c_accept);
+ 	if (ret_val != 0)
+ 		return ret_val;
+@@ -234,8 +1678,58 @@ int netlbl_unlabel_genl_init(void)
+  * NetLabel KAPI Hooks
+  */
+ 
++static struct notifier_block netlbl_unlhsh_netdev_notifier = {
++	.notifier_call = netlbl_unlhsh_netdev_handler,
++};
++
++/**
++ * netlbl_unlabel_init - Initialize the unlabeled connection hash table
++ * @size: the number of bits to use for the hash buckets
++ *
++ * Description:
++ * Initializes the unlabeled connection hash table and registers a network
++ * device notification handler.  This function should only be called by the
++ * NetLabel subsystem itself during initialization.  Returns zero on success,
++ * non-zero values on error.
++ *
++ */
++int netlbl_unlabel_init(u32 size)
++{
++	u32 iter;
++	struct netlbl_unlhsh_tbl *hsh_tbl;
++
++	if (size == 0)
++		return -EINVAL;
++
++	hsh_tbl = kmalloc(sizeof(*hsh_tbl), GFP_KERNEL);
++	if (hsh_tbl == NULL)
++		return -ENOMEM;
++	hsh_tbl->size = 1 << size;
++	hsh_tbl->tbl = kcalloc(hsh_tbl->size,
++			       sizeof(struct list_head),
++			       GFP_KERNEL);
++	if (hsh_tbl->tbl == NULL) {
++		kfree(hsh_tbl);
++		return -ENOMEM;
++	}
++	for (iter = 0; iter < hsh_tbl->size; iter++)
++		INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
++
++	rcu_read_lock();
++	spin_lock(&netlbl_unlhsh_lock);
++	rcu_assign_pointer(netlbl_unlhsh, hsh_tbl);
++	spin_unlock(&netlbl_unlhsh_lock);
++	rcu_read_unlock();
++
++	register_netdevice_notifier(&netlbl_unlhsh_netdev_notifier);
++
++	return 0;
++}
++
+ /**
+  * netlbl_unlabel_getattr - Get the security attributes for an unlabled packet
++ * @skb: the packet
++ * @family: protocol family
+  * @secattr: the security attributes
+  *
+  * Description:
+@@ -243,19 +1737,52 @@ int netlbl_unlabel_genl_init(void)
+  * them in @secattr.  Returns zero on success and negative values on failure.
+  *
+  */
+-int netlbl_unlabel_getattr(struct netlbl_lsm_secattr *secattr)
++int netlbl_unlabel_getattr(const struct sk_buff *skb,
++			   u16 family,
++			   struct netlbl_lsm_secattr *secattr)
+ {
+-	int ret_val;
++	struct iphdr *hdr4;
++	struct ipv6hdr *hdr6;
++	struct netlbl_unlhsh_addr4 *addr4;
++	struct netlbl_unlhsh_addr6 *addr6;
++	struct netlbl_unlhsh_iface *iface;
+ 
+ 	rcu_read_lock();
+-	if (netlabel_unlabel_acceptflg == 1) {
+-		netlbl_secattr_init(secattr);
+-		ret_val = 0;
+-	} else
+-		ret_val = -ENOMSG;
++	iface = netlbl_unlhsh_search_iface_def(skb->iif);
++	if (iface == NULL)
++		goto unlabel_getattr_nolabel;
++	switch (family) {
++	case PF_INET:
++		hdr4 = ip_hdr(skb);
++		addr4 = netlbl_unlhsh_search_addr4(hdr4->saddr, iface);
++		if (addr4 == NULL)
++			goto unlabel_getattr_nolabel;
++		secattr->attr.secid = addr4->secid;
++		break;
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++	case PF_INET6:
++		hdr6 = ipv6_hdr(skb);
++		addr6 = netlbl_unlhsh_search_addr6(&hdr6->saddr, iface);
++		if (addr6 == NULL)
++			goto unlabel_getattr_nolabel;
++		secattr->attr.secid = addr6->secid;
++		break;
++#endif /* IPv6 */
++	default:
++		goto unlabel_getattr_nolabel;
++	}
+ 	rcu_read_unlock();
+ 
+-	return ret_val;
++	secattr->flags |= NETLBL_SECATTR_SECID;
++	secattr->type = NETLBL_NLTYPE_UNLABELED;
++	return 0;
++
++unlabel_getattr_nolabel:
++	rcu_read_unlock();
++	if (netlabel_unlabel_acceptflg == 0)
++		return -ENOMSG;
++	secattr->type = NETLBL_NLTYPE_UNLABELED;
++	return 0;
+ }
+ 
+ /**
+diff --git a/net/netlabel/netlabel_unlabeled.h b/net/netlabel/netlabel_unlabeled.h
+index c2917fb..06b1301 100644
+--- a/net/netlabel/netlabel_unlabeled.h
++++ b/net/netlabel/netlabel_unlabeled.h
+@@ -36,6 +36,116 @@
+ /*
+  * The following NetLabel payloads are supported by the Unlabeled subsystem.
+  *
++ * o STATICADD
++ *   This message is sent from an application to add a new static label for
++ *   incoming unlabeled connections.
++ *
++ *   Required attributes:
++ *
++ *     NLBL_UNLABEL_A_IFACE
++ *     NLBL_UNLABEL_A_SECCTX
++ *
++ *   If IPv4 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV4ADDR
++ *     NLBL_UNLABEL_A_IPV4MASK
++ *
++ *   If IPv6 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV6ADDR
++ *     NLBL_UNLABEL_A_IPV6MASK
++ *
++ * o STATICREMOVE
++ *   This message is sent from an application to remove an existing static
++ *   label for incoming unlabeled connections.
++ *
++ *   Required attributes:
++ *
++ *     NLBL_UNLABEL_A_IFACE
++ *
++ *   If IPv4 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV4ADDR
++ *     NLBL_UNLABEL_A_IPV4MASK
++ *
++ *   If IPv6 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV6ADDR
++ *     NLBL_UNLABEL_A_IPV6MASK
++ *
++ * o STATICLIST
++ *   This message can be sent either from an application or by the kernel in
++ *   response to an application generated STATICLIST message.  When sent by an
++ *   application there is no payload and the NLM_F_DUMP flag should be set.
++ *   The kernel should response with a series of the following messages.
++ *
++ *   Required attributes:
++ *
++ *     NLBL_UNLABEL_A_IFACE
++ *     NLBL_UNLABEL_A_SECCTX
++ *
++ *   If IPv4 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV4ADDR
++ *     NLBL_UNLABEL_A_IPV4MASK
++ *
++ *   If IPv6 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV6ADDR
++ *     NLBL_UNLABEL_A_IPV6MASK
++ *
++ * o STATICADDDEF
++ *   This message is sent from an application to set the default static
++ *   label for incoming unlabeled connections.
++ *
++ *   Required attribute:
++ *
++ *     NLBL_UNLABEL_A_SECCTX
++ *
++ *   If IPv4 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV4ADDR
++ *     NLBL_UNLABEL_A_IPV4MASK
++ *
++ *   If IPv6 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV6ADDR
++ *     NLBL_UNLABEL_A_IPV6MASK
++ *
++ * o STATICREMOVEDEF
++ *   This message is sent from an application to remove the existing default
++ *   static label for incoming unlabeled connections.
++ *
++ *   If IPv4 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV4ADDR
++ *     NLBL_UNLABEL_A_IPV4MASK
++ *
++ *   If IPv6 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV6ADDR
++ *     NLBL_UNLABEL_A_IPV6MASK
++ *
++ * o STATICLISTDEF
++ *   This message can be sent either from an application or by the kernel in
++ *   response to an application generated STATICLISTDEF message.  When sent by
++ *   an application there is no payload and the NLM_F_DUMP flag should be set.
++ *   The kernel should response with the following message.
++ *
++ *   Required attribute:
++ *
++ *     NLBL_UNLABEL_A_SECCTX
++ *
++ *   If IPv4 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV4ADDR
++ *     NLBL_UNLABEL_A_IPV4MASK
++ *
++ *   If IPv6 is specified the following attributes are required:
++ *
++ *     NLBL_UNLABEL_A_IPV6ADDR
++ *     NLBL_UNLABEL_A_IPV6MASK
++ *
+  * o ACCEPT
+  *   This message is sent from an application to specify if the kernel should
+  *   allow unlabled packets to pass if they do not match any of the static
+@@ -62,6 +172,12 @@ enum {
+ 	NLBL_UNLABEL_C_UNSPEC,
+ 	NLBL_UNLABEL_C_ACCEPT,
+ 	NLBL_UNLABEL_C_LIST,
++	NLBL_UNLABEL_C_STATICADD,
++	NLBL_UNLABEL_C_STATICREMOVE,
++	NLBL_UNLABEL_C_STATICLIST,
++	NLBL_UNLABEL_C_STATICADDDEF,
++	NLBL_UNLABEL_C_STATICREMOVEDEF,
++	NLBL_UNLABEL_C_STATICLISTDEF,
+ 	__NLBL_UNLABEL_C_MAX,
+ };
+ #define NLBL_UNLABEL_C_MAX (__NLBL_UNLABEL_C_MAX - 1)
+@@ -73,6 +189,24 @@ enum {
+ 	/* (NLA_U8)
+ 	 * if true then unlabeled packets are allowed to pass, else unlabeled
+ 	 * packets are rejected */
++	NLBL_UNLABEL_A_IPV6ADDR,
++	/* (NLA_BINARY, struct in6_addr)
++	 * an IPv6 address */
++	NLBL_UNLABEL_A_IPV6MASK,
++	/* (NLA_BINARY, struct in6_addr)
++	 * an IPv6 address mask */
++	NLBL_UNLABEL_A_IPV4ADDR,
++	/* (NLA_BINARY, struct in_addr)
++	 * an IPv4 address */
++	NLBL_UNLABEL_A_IPV4MASK,
++	/* (NLA_BINARY, struct in_addr)
++	 * and IPv4 address mask */
++	NLBL_UNLABEL_A_IFACE,
++	/* (NLA_NULL_STRING)
++	 * network interface */
++	NLBL_UNLABEL_A_SECCTX,
++	/* (NLA_BINARY)
++	 * a LSM specific security context */
+ 	__NLBL_UNLABEL_A_MAX,
+ };
+ #define NLBL_UNLABEL_A_MAX (__NLBL_UNLABEL_A_MAX - 1)
+@@ -80,8 +214,17 @@ enum {
+ /* NetLabel protocol functions */
+ int netlbl_unlabel_genl_init(void);
+ 
++/* Unlabeled connection hash table size */
++/* XXX - currently this number is an uneducated guess */
++#define NETLBL_UNLHSH_BITSIZE       7
++
++/* General Unlabeled init function */
++int netlbl_unlabel_init(u32 size);
++
+ /* Process Unlabeled incoming network packets */
+-int netlbl_unlabel_getattr(struct netlbl_lsm_secattr *secattr);
++int netlbl_unlabel_getattr(const struct sk_buff *skb,
++			   u16 family,
++			   struct netlbl_lsm_secattr *secattr);
+ 
+ /* Set the default configuration to allow Unlabeled packets */
+ int netlbl_unlabel_defconf(void);
 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
 index de3988b..6b178e1 100644
 --- a/net/netlink/af_netlink.c
@@ -763102,6 +924450,18 @@
 +	# All done with mercurial
 +	exit
  fi
+diff --git a/security/Kconfig b/security/Kconfig
+index 8086e61..389e151 100644
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -76,6 +76,7 @@ config SECURITY_NETWORK_XFRM
+ config SECURITY_CAPABILITIES
+ 	bool "Default Linux Capabilities"
+ 	depends on SECURITY
++	default y
+ 	help
+ 	  This enables the "default" Linux capabilities functionality.
+ 	  If you are unsure how to answer this question, answer Y.
 diff --git a/security/dummy.c b/security/dummy.c
 index 3ccfbbe..48d4b0a 100644
 --- a/security/dummy.c
@@ -763293,11 +924653,146 @@
  void security_release_secctx(char *secdata, u32 seclen)
  {
  	return security_ops->release_secctx(secdata, seclen);
+diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
+index b32a459..2b517d6 100644
+--- a/security/selinux/Kconfig
++++ b/security/selinux/Kconfig
+@@ -145,7 +145,7 @@ config SECURITY_SELINUX_POLICYDB_VERSION_MAX
+ config SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
+ 	int "NSA SELinux maximum supported policy format version value"
+ 	depends on SECURITY_SELINUX_POLICYDB_VERSION_MAX
+-	range 15 21
++	range 15 22
+ 	default 19
+ 	help
+ 	  This option sets the value for the maximum policy format version
+diff --git a/security/selinux/Makefile b/security/selinux/Makefile
+index dc3502e..00afd85 100644
+--- a/security/selinux/Makefile
++++ b/security/selinux/Makefile
+@@ -4,7 +4,14 @@
+ 
+ obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
+ 
+-selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o exports.o
++selinux-y := avc.o \
++	     hooks.o \
++	     selinuxfs.o \
++	     netlink.o \
++	     nlmsgtab.o \
++	     netif.o \
++	     netnode.o \
++	     exports.o
+ 
+ selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
+ 
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index 81b3dff..e8529e2 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -661,9 +661,18 @@ void avc_audit(u32 ssid, u32 tsid,
+ 						    "daddr", "dest");
+ 				break;
+ 			}
+-			if (a->u.net.netif)
+-				audit_log_format(ab, " netif=%s",
+-					a->u.net.netif);
++			if (a->u.net.netif > 0) {
++				struct net_device *dev;
++
++				/* NOTE: we always use init's namespace */
++				dev = dev_get_by_index(&init_net,
++						       a->u.net.netif);
++				if (dev) {
++					audit_log_format(ab, " netif=%s",
++							 dev->name);
++					dev_put(dev);
++				}
++			}
+ 			break;
+ 		}
+ 	}
+diff --git a/security/selinux/exports.c b/security/selinux/exports.c
+index b6f9694..87d2bb3 100644
+--- a/security/selinux/exports.c
++++ b/security/selinux/exports.c
+@@ -17,10 +17,14 @@
+ #include <linux/selinux.h>
+ #include <linux/fs.h>
+ #include <linux/ipc.h>
++#include <asm/atomic.h>
+ 
+ #include "security.h"
+ #include "objsec.h"
+ 
++/* SECMARK reference count */
++extern atomic_t selinux_secmark_refcount;
++
+ int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen)
+ {
+ 	if (selinux_enabled)
+@@ -74,7 +78,7 @@ int selinux_string_to_sid(char *str, u32 *sid)
+ }
+ EXPORT_SYMBOL_GPL(selinux_string_to_sid);
+ 
+-int selinux_relabel_packet_permission(u32 sid)
++int selinux_secmark_relabel_packet_permission(u32 sid)
+ {
+ 	if (selinux_enabled) {
+ 		struct task_security_struct *tsec = current->security;
+@@ -84,4 +88,16 @@ int selinux_relabel_packet_permission(u32 sid)
+ 	}
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(selinux_relabel_packet_permission);
++EXPORT_SYMBOL_GPL(selinux_secmark_relabel_packet_permission);
++
++void selinux_secmark_refcount_inc(void)
++{
++	atomic_inc(&selinux_secmark_refcount);
++}
++EXPORT_SYMBOL_GPL(selinux_secmark_refcount_inc);
++
++void selinux_secmark_refcount_dec(void)
++{
++	atomic_dec(&selinux_secmark_refcount);
++}
++EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec);
 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 9f3124b..64d414e 100644
+index 9f3124b..be6de0b 100644
 --- a/security/selinux/hooks.c
 +++ b/security/selinux/hooks.c
-@@ -82,6 +82,8 @@
+@@ -12,8 +12,8 @@
+  *  Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris at redhat.com>
+  *  Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+  *                          <dgoeddel at trustedcs.com>
+- *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
+- *                     Paul Moore, <paul.moore at hp.com>
++ *  Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
++ *                Paul Moore <paul.moore at hp.com>
+  *  Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
+  *                     Yuichi Nakamura <ynakam at hitachisoft.jp>
+  *
+@@ -50,8 +50,11 @@
+ #include <net/icmp.h>
+ #include <net/ip.h>		/* for local_port_range[] */
+ #include <net/tcp.h>		/* struct or_callable used in sock_rcv_skb */
++#include <net/net_namespace.h>
++#include <net/netlabel.h>
+ #include <asm/uaccess.h>
+ #include <asm/ioctls.h>
++#include <asm/atomic.h>
+ #include <linux/bitops.h>
+ #include <linux/interrupt.h>
+ #include <linux/netdevice.h>	/* for network interface checks */
+@@ -76,17 +79,23 @@
+ #include "avc.h"
+ #include "objsec.h"
+ #include "netif.h"
++#include "netnode.h"
+ #include "xfrm.h"
+ #include "netlabel.h"
+ 
  #define XATTR_SELINUX_SUFFIX "selinux"
  #define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX
  
@@ -763306,7 +924801,37 @@
  extern unsigned int policydb_loaded_version;
  extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
  extern int selinux_compat_net;
-@@ -321,8 +323,8 @@ enum {
+ extern struct security_operations *security_ops;
+ 
++/* SECMARK reference count */
++atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
++
+ #ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+ int selinux_enforcing = 0;
+ 
+@@ -153,6 +162,21 @@ getsecurity_exit:
+ 	return len;
+ }
+ 
++/**
++ * selinux_secmark_enabled - Check to see if SECMARK is currently enabled
++ *
++ * Description:
++ * This function checks the SECMARK reference counter to see if any SECMARK
++ * targets are currently configured, if the reference counter is greater than
++ * zero SECMARK is considered to be enabled.  Returns true (1) if SECMARK is
++ * enabled, false (0) if SECMARK is disabled.
++ *
++ */
++static int selinux_secmark_enabled(void)
++{
++	return (atomic_read(&selinux_secmark_refcount) > 0);
++}
++
+ /* Allocate and free functions for each kind of security blob. */
+ 
+ static int task_alloc_security(struct task_struct *task)
+@@ -321,8 +345,8 @@ enum {
  	Opt_error = -1,
  	Opt_context = 1,
  	Opt_fscontext = 2,
@@ -763317,7 +924842,7 @@
  };
  
  static match_table_t tokens = {
-@@ -366,150 +368,317 @@ static int may_context_mount_inode_relabel(u32 sid,
+@@ -366,150 +390,317 @@ static int may_context_mount_inode_relabel(u32 sid,
  	return rc;
  }
  
@@ -763599,7 +925124,8 @@
 +	*num_opts = 0;
 +	return rc;
 +}
-+
+ 
+-	if (!seen)
 +static int bad_option(struct superblock_security_struct *sbsec, char flag,
 +		      u32 old_sid, u32 new_sid)
 +{
@@ -763621,8 +925147,8 @@
 + * Allow filesystems with binary mount data to explicitly set mount point
 + * labeling information.
 + */
-+int selinux_set_mnt_opts(struct super_block *sb, char **mount_options,
-+				 int *flags, int num_opts)
++static int selinux_set_mnt_opts(struct super_block *sb, char **mount_options,
++				int *flags, int num_opts)
 +{
 +	int rc = 0, i;
 +	struct task_security_struct *tsec = current->security;
@@ -763632,8 +925158,7 @@
 +	struct inode_security_struct *root_isec = inode->i_security;
 +	u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
 +	u32 defcontext_sid = 0;
- 
--	if (!seen)
++
 +	mutex_lock(&sbsec->lock);
 +
 +	if (!ss_initialized) {
@@ -763685,7 +925210,8 @@
 +			break;
 +		case CONTEXT_MNT:
 +			context_sid = sid;
-+
+ 
+-		rc = may_context_mount_sb_relabel(sid, sbsec, tsec);
 +			if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
 +					context_sid))
 +				goto out_double_mount;
@@ -763739,8 +925265,7 @@
 +
 +	/* sets the context of the superblock for the fs being mounted. */
 +	if (fscontext_sid) {
- 
--		rc = may_context_mount_sb_relabel(sid, sbsec, tsec);
++
 +		rc = may_context_mount_sb_relabel(fscontext_sid, sbsec, tsec);
  		if (rc)
 -			goto out_free;
@@ -763751,7 +925276,7 @@
  	}
  
  	/*
-@@ -517,182 +686,250 @@ static int try_context_mount(struct super_block *sb, void *data)
+@@ -517,182 +708,250 @@ static int try_context_mount(struct super_block *sb, void *data)
  	 * sets the label used on all file below the mountpoint, and will set
  	 * the superblock context if not already set.
  	 */
@@ -764138,7 +925663,789 @@
  	return rc;
  }
  
-@@ -4710,6 +4947,11 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+@@ -3158,7 +3417,7 @@ out:
+ #endif /* IPV6 */
+ 
+ static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad,
+-			     char **addrp, int *len, int src, u8 *proto)
++			     char **addrp, int src, u8 *proto)
+ {
+ 	int ret = 0;
+ 
+@@ -3167,7 +3426,6 @@ static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad,
+ 		ret = selinux_parse_skb_ipv4(skb, ad, proto);
+ 		if (ret || !addrp)
+ 			break;
+-		*len = 4;
+ 		*addrp = (char *)(src ? &ad->u.net.v4info.saddr :
+ 					&ad->u.net.v4info.daddr);
+ 		break;
+@@ -3177,7 +3435,6 @@ static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad,
+ 		ret = selinux_parse_skb_ipv6(skb, ad, proto);
+ 		if (ret || !addrp)
+ 			break;
+-		*len = 16;
+ 		*addrp = (char *)(src ? &ad->u.net.v6info.saddr :
+ 					&ad->u.net.v6info.daddr);
+ 		break;
+@@ -3186,36 +3443,48 @@ static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad,
+ 		break;
+ 	}
+ 
++	if (unlikely(ret))
++		printk(KERN_WARNING
++		       "SELinux: failure in selinux_parse_skb(),"
++		       " unable to parse packet\n");
++
+ 	return ret;
+ }
+ 
+ /**
+- * selinux_skb_extlbl_sid - Determine the external label of a packet
++ * selinux_skb_peerlbl_sid - Determine the peer label of a packet
+  * @skb: the packet
+- * @sid: the packet's SID
++ * @family: protocol family
++ * @sid: the packet's peer label SID
+  *
+  * Description:
+- * Check the various different forms of external packet labeling and determine
+- * the external SID for the packet.  If only one form of external labeling is
+- * present then it is used, if both labeled IPsec and NetLabel labels are
+- * present then the SELinux type information is taken from the labeled IPsec
+- * SA and the MLS sensitivity label information is taken from the NetLabel
+- * security attributes.  This bit of "magic" is done in the call to
+- * selinux_netlbl_skbuff_getsid().
++ * Check the various different forms of network peer labeling and determine
++ * the peer label/SID for the packet; most of the magic actually occurs in
++ * the security server function security_net_peersid_cmp().  The function
++ * returns zero if the value in @sid is valid (although it may be SECSID_NULL)
++ * or -EACCES if @sid is invalid due to inconsistencies with the different
++ * peer labels.
+  *
+  */
+-static void selinux_skb_extlbl_sid(struct sk_buff *skb, u32 *sid)
++static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
+ {
++	int err;
+ 	u32 xfrm_sid;
+ 	u32 nlbl_sid;
++	u32 nlbl_type;
+ 
+ 	selinux_skb_xfrm_sid(skb, &xfrm_sid);
+-	if (selinux_netlbl_skbuff_getsid(skb,
+-					 (xfrm_sid == SECSID_NULL ?
+-					  SECINITSID_NETMSG : xfrm_sid),
+-					 &nlbl_sid) != 0)
+-		nlbl_sid = SECSID_NULL;
+-	*sid = (nlbl_sid == SECSID_NULL ? xfrm_sid : nlbl_sid);
++	selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
++
++	err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
++	if (unlikely(err)) {
++		printk(KERN_WARNING
++		       "SELinux: failure in selinux_skb_peerlbl_sid(),"
++		       " unable to determine packet's peer label\n");
++		return -EACCES;
++	}
++
++	return 0;
+ }
+ 
+ /* socket security operations */
+@@ -3281,6 +3550,7 @@ static int selinux_socket_post_create(struct socket *sock, int family,
+ 	if (sock->sk) {
+ 		sksec = sock->sk->sk_security;
+ 		sksec->sid = isec->sid;
++		sksec->sclass = isec->sclass;
+ 		err = selinux_netlbl_socket_post_create(sock);
+ 	}
+ 
+@@ -3373,7 +3643,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
+ 			break;
+ 		}
+ 		
+-		err = security_node_sid(family, addrp, addrlen, &sid);
++		err = sel_netnode_sid(addrp, family, &sid);
+ 		if (err)
+ 			goto out;
+ 		
+@@ -3584,131 +3854,182 @@ static int selinux_socket_unix_may_send(struct socket *sock,
+ 	return 0;
+ }
+ 
+-static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
+-		struct avc_audit_data *ad, u16 family, char *addrp, int len)
++static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family,
++				    u32 peer_sid,
++				    struct avc_audit_data *ad)
+ {
+-	int err = 0;
+-	u32 netif_perm, node_perm, node_sid, if_sid, recv_perm = 0;
+-	struct socket *sock;
+-	u16 sock_class = 0;
+-	u32 sock_sid = 0;
+-
+- 	read_lock_bh(&sk->sk_callback_lock);
+- 	sock = sk->sk_socket;
+- 	if (sock) {
+- 		struct inode *inode;
+- 		inode = SOCK_INODE(sock);
+- 		if (inode) {
+- 			struct inode_security_struct *isec;
+- 			isec = inode->i_security;
+- 			sock_sid = isec->sid;
+- 			sock_class = isec->sclass;
+- 		}
+- 	}
+- 	read_unlock_bh(&sk->sk_callback_lock);
+- 	if (!sock_sid)
+-  		goto out;
++	int err;
++	u32 if_sid;
++	u32 node_sid;
+ 
+-	if (!skb->dev)
+-		goto out;
++	err = sel_netif_sid(ifindex, &if_sid);
++	if (err)
++		return err;
++	err = avc_has_perm(peer_sid, if_sid,
++			   SECCLASS_NETIF, NETIF__INGRESS, ad);
++	if (err)
++		return err;
+ 
+-	err = sel_netif_sids(skb->dev, &if_sid, NULL);
++	err = sel_netnode_sid(addrp, family, &node_sid);
+ 	if (err)
+-		goto out;
++		return err;
++	return avc_has_perm(peer_sid, node_sid,
++			    SECCLASS_NODE, NODE__RECVFROM, ad);
++}
+ 
+-	switch (sock_class) {
++static int selinux_sock_rcv_skb_iptables_compat(struct sock *sk,
++						struct sk_buff *skb,
++						struct avc_audit_data *ad,
++						u16 family,
++						char *addrp)
++{
++	int err;
++	struct sk_security_struct *sksec = sk->sk_security;
++	u16 sk_class;
++	u32 netif_perm, node_perm, recv_perm;
++	u32 port_sid, node_sid, if_sid, sk_sid;
++
++	sk_sid = sksec->sid;
++	sk_class = sksec->sclass;
++
++	switch (sk_class) {
+ 	case SECCLASS_UDP_SOCKET:
+ 		netif_perm = NETIF__UDP_RECV;
+ 		node_perm = NODE__UDP_RECV;
+ 		recv_perm = UDP_SOCKET__RECV_MSG;
+ 		break;
+-	
+ 	case SECCLASS_TCP_SOCKET:
+ 		netif_perm = NETIF__TCP_RECV;
+ 		node_perm = NODE__TCP_RECV;
+ 		recv_perm = TCP_SOCKET__RECV_MSG;
+ 		break;
+-
+ 	case SECCLASS_DCCP_SOCKET:
+ 		netif_perm = NETIF__DCCP_RECV;
+ 		node_perm = NODE__DCCP_RECV;
+ 		recv_perm = DCCP_SOCKET__RECV_MSG;
+ 		break;
+-
+ 	default:
+ 		netif_perm = NETIF__RAWIP_RECV;
+ 		node_perm = NODE__RAWIP_RECV;
++		recv_perm = 0;
+ 		break;
+ 	}
+ 
+-	err = avc_has_perm(sock_sid, if_sid, SECCLASS_NETIF, netif_perm, ad);
++	err = sel_netif_sid(skb->iif, &if_sid);
+ 	if (err)
+-		goto out;
+-	
+-	err = security_node_sid(family, addrp, len, &node_sid);
++		return err;
++	err = avc_has_perm(sk_sid, if_sid, SECCLASS_NETIF, netif_perm, ad);
+ 	if (err)
+-		goto out;
++		return err;
+ 	
+-	err = avc_has_perm(sock_sid, node_sid, SECCLASS_NODE, node_perm, ad);
++	err = sel_netnode_sid(addrp, family, &node_sid);
+ 	if (err)
+-		goto out;
++		return err;
++	err = avc_has_perm(sk_sid, node_sid, SECCLASS_NODE, node_perm, ad);
++	if (err)
++		return err;
+ 
+-	if (recv_perm) {
+-		u32 port_sid;
++	if (!recv_perm)
++		return 0;
++	err = security_port_sid(sk->sk_family, sk->sk_type,
++				sk->sk_protocol, ntohs(ad->u.net.sport),
++				&port_sid);
++	if (unlikely(err)) {
++		printk(KERN_WARNING
++		       "SELinux: failure in"
++		       " selinux_sock_rcv_skb_iptables_compat(),"
++		       " network port label not found\n");
++		return err;
++	}
++	return avc_has_perm(sk_sid, port_sid, sk_class, recv_perm, ad);
++}
+ 
+-		err = security_port_sid(sk->sk_family, sk->sk_type,
+-		                        sk->sk_protocol, ntohs(ad->u.net.sport),
+-		                        &port_sid);
+-		if (err)
+-			goto out;
++static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
++				       struct avc_audit_data *ad,
++				       u16 family, char *addrp)
++{
++	int err;
++	struct sk_security_struct *sksec = sk->sk_security;
++	u32 peer_sid;
++	u32 sk_sid = sksec->sid;
+ 
+-		err = avc_has_perm(sock_sid, port_sid,
+-				   sock_class, recv_perm, ad);
++	if (selinux_compat_net)
++		err = selinux_sock_rcv_skb_iptables_compat(sk, skb, ad,
++							   family, addrp);
++	else
++		err = avc_has_perm(sk_sid, skb->secmark, SECCLASS_PACKET,
++				   PACKET__RECV, ad);
++	if (err)
++		return err;
++
++	if (selinux_policycap_netpeer) {
++		err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
++		if (err)
++			return err;
++		err = avc_has_perm(sk_sid, peer_sid,
++				   SECCLASS_PEER, PEER__RECV, ad);
++	} else {
++		err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, ad);
++		if (err)
++			return err;
++		err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, ad);
+ 	}
+ 
+-out:
+ 	return err;
+ }
+ 
+ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ {
+-	u16 family;
+-	char *addrp;
+-	int len, err = 0;
+-	struct avc_audit_data ad;
++	int err;
+ 	struct sk_security_struct *sksec = sk->sk_security;
++	u16 family = sk->sk_family;
++	u32 sk_sid = sksec->sid;
++	struct avc_audit_data ad;
++	char *addrp;
+ 
+-	family = sk->sk_family;
+ 	if (family != PF_INET && family != PF_INET6)
+-		goto out;
++		return 0;
+ 
+ 	/* Handle mapped IPv4 packets arriving via IPv6 sockets */
+ 	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+ 		family = PF_INET;
+ 
+ 	AVC_AUDIT_DATA_INIT(&ad, NET);
+-	ad.u.net.netif = skb->dev ? skb->dev->name : "[unknown]";
++	ad.u.net.netif = skb->iif;
+ 	ad.u.net.family = family;
+-
+-	err = selinux_parse_skb(skb, &ad, &addrp, &len, 1, NULL);
++	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+-	if (selinux_compat_net)
+-		err = selinux_sock_rcv_skb_compat(sk, skb, &ad, family,
+-						  addrp, len);
+-	else
+-		err = avc_has_perm(sksec->sid, skb->secmark, SECCLASS_PACKET,
+-				   PACKET__RECV, &ad);
+-	if (err)
+-		goto out;
++	/* If any sort of compatibility mode is enabled then handoff processing
++	 * to the selinux_sock_rcv_skb_compat() function to deal with the
++	 * special handling.  We do this in an attempt to keep this function
++	 * as fast and as clean as possible. */
++	if (selinux_compat_net || !selinux_policycap_netpeer)
++		return selinux_sock_rcv_skb_compat(sk, skb, &ad,
++						   family, addrp);
+ 
+-	err = selinux_netlbl_sock_rcv_skb(sksec, skb, &ad);
+-	if (err)
+-		goto out;
++	if (netlbl_enabled() || selinux_xfrm_enabled()) {
++		u32 peer_sid;
++
++		err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
++		if (err)
++			return err;
++		err = selinux_inet_sys_rcv_skb(skb->iif, addrp, family,
++					       peer_sid, &ad);
++		if (err)
++			return err;
++		err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
++				   PEER__RECV, &ad);
++	}
++
++	if (selinux_secmark_enabled()) {
++		err = avc_has_perm(sk_sid, skb->secmark, SECCLASS_PACKET,
++				   PACKET__RECV, &ad);
++		if (err)
++			return err;
++	}
+ 
+-	err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
+-out:	
+ 	return err;
+ }
+ 
+@@ -3759,18 +4080,25 @@ out:
+ static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
+ {
+ 	u32 peer_secid = SECSID_NULL;
+-	int err = 0;
++	u16 family;
+ 
+-	if (sock && sock->sk->sk_family == PF_UNIX)
++	if (sock)
++		family = sock->sk->sk_family;
++	else if (skb && skb->sk)
++		family = skb->sk->sk_family;
++	else
++		goto out;
++
++	if (sock && family == PF_UNIX)
+ 		selinux_get_inode_sid(SOCK_INODE(sock), &peer_secid);
+ 	else if (skb)
+-		selinux_skb_extlbl_sid(skb, &peer_secid);
++		selinux_skb_peerlbl_sid(skb, family, &peer_secid);
+ 
+-	if (peer_secid == SECSID_NULL)
+-		err = -EINVAL;
++out:
+ 	*secid = peer_secid;
+-
+-	return err;
++	if (peer_secid == SECSID_NULL)
++		return -EINVAL;
++	return 0;
+ }
+ 
+ static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
+@@ -3790,6 +4118,7 @@ static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk)
+ 
+ 	newssec->sid = ssec->sid;
+ 	newssec->peer_sid = ssec->peer_sid;
++	newssec->sclass = ssec->sclass;
+ 
+ 	selinux_netlbl_sk_security_clone(ssec, newssec);
+ }
+@@ -3813,6 +4142,7 @@ static void selinux_sock_graft(struct sock* sk, struct socket *parent)
+ 	if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6 ||
+ 	    sk->sk_family == PF_UNIX)
+ 		isec->sid = sksec->sid;
++	sksec->sclass = isec->sclass;
+ 
+ 	selinux_netlbl_sock_graft(sk, parent);
+ }
+@@ -3825,7 +4155,9 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ 	u32 newsid;
+ 	u32 peersid;
+ 
+-	selinux_skb_extlbl_sid(skb, &peersid);
++	err = selinux_skb_peerlbl_sid(skb, sk->sk_family, &peersid);
++	if (err)
++		return err;
+ 	if (peersid == SECSID_NULL) {
+ 		req->secid = sksec->sid;
+ 		req->peer_secid = SECSID_NULL;
+@@ -3863,7 +4195,7 @@ static void selinux_inet_conn_established(struct sock *sk,
+ {
+ 	struct sk_security_struct *sksec = sk->sk_security;
+ 
+-	selinux_skb_extlbl_sid(skb, &sksec->peer_sid);
++	selinux_skb_peerlbl_sid(skb, sk->sk_family, &sksec->peer_sid);
+ }
+ 
+ static void selinux_req_classify_flow(const struct request_sock *req,
+@@ -3910,149 +4242,260 @@ out:
+ 
+ #ifdef CONFIG_NETFILTER
+ 
+-static int selinux_ip_postroute_last_compat(struct sock *sk, struct net_device *dev,
+-					    struct avc_audit_data *ad,
+-					    u16 family, char *addrp, int len)
++static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
++				       u16 family)
+ {
+-	int err = 0;
+-	u32 netif_perm, node_perm, node_sid, if_sid, send_perm = 0;
+-	struct socket *sock;
+-	struct inode *inode;
+-	struct inode_security_struct *isec;
++	char *addrp;
++	u32 peer_sid;
++	struct avc_audit_data ad;
++	u8 secmark_active;
++	u8 peerlbl_active;
+ 
+-	sock = sk->sk_socket;
+-	if (!sock)
+-		goto out;
++	if (!selinux_policycap_netpeer)
++		return NF_ACCEPT;
+ 
+-	inode = SOCK_INODE(sock);
+-	if (!inode)
+-		goto out;
++	secmark_active = selinux_secmark_enabled();
++	peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
++	if (!secmark_active && !peerlbl_active)
++		return NF_ACCEPT;
+ 
+-	isec = inode->i_security;
+-	
+-	err = sel_netif_sids(dev, &if_sid, NULL);
+-	if (err)
+-		goto out;
++	AVC_AUDIT_DATA_INIT(&ad, NET);
++	ad.u.net.netif = ifindex;
++	ad.u.net.family = family;
++	if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
++		return NF_DROP;
++
++	if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
++		return NF_DROP;
++
++	if (peerlbl_active)
++		if (selinux_inet_sys_rcv_skb(ifindex, addrp, family,
++					     peer_sid, &ad) != 0)
++			return NF_DROP;
++
++	if (secmark_active)
++		if (avc_has_perm(peer_sid, skb->secmark,
++				 SECCLASS_PACKET, PACKET__FORWARD_IN, &ad))
++			return NF_DROP;
++
++	return NF_ACCEPT;
++}
++
++static unsigned int selinux_ipv4_forward(unsigned int hooknum,
++					 struct sk_buff *skb,
++					 const struct net_device *in,
++					 const struct net_device *out,
++					 int (*okfn)(struct sk_buff *))
++{
++	return selinux_ip_forward(skb, in->ifindex, PF_INET);
++}
+ 
+-	switch (isec->sclass) {
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++static unsigned int selinux_ipv6_forward(unsigned int hooknum,
++					 struct sk_buff *skb,
++					 const struct net_device *in,
++					 const struct net_device *out,
++					 int (*okfn)(struct sk_buff *))
++{
++	return selinux_ip_forward(skb, in->ifindex, PF_INET6);
++}
++#endif	/* IPV6 */
++
++static int selinux_ip_postroute_iptables_compat(struct sock *sk,
++						int ifindex,
++						struct avc_audit_data *ad,
++						u16 family, char *addrp)
++{
++	int err;
++	struct sk_security_struct *sksec = sk->sk_security;
++	u16 sk_class;
++	u32 netif_perm, node_perm, send_perm;
++	u32 port_sid, node_sid, if_sid, sk_sid;
++
++	sk_sid = sksec->sid;
++	sk_class = sksec->sclass;
++
++	switch (sk_class) {
+ 	case SECCLASS_UDP_SOCKET:
+ 		netif_perm = NETIF__UDP_SEND;
+ 		node_perm = NODE__UDP_SEND;
+ 		send_perm = UDP_SOCKET__SEND_MSG;
+ 		break;
+-	
+ 	case SECCLASS_TCP_SOCKET:
+ 		netif_perm = NETIF__TCP_SEND;
+ 		node_perm = NODE__TCP_SEND;
+ 		send_perm = TCP_SOCKET__SEND_MSG;
+ 		break;
+-
+ 	case SECCLASS_DCCP_SOCKET:
+ 		netif_perm = NETIF__DCCP_SEND;
+ 		node_perm = NODE__DCCP_SEND;
+ 		send_perm = DCCP_SOCKET__SEND_MSG;
+ 		break;
+-
+ 	default:
+ 		netif_perm = NETIF__RAWIP_SEND;
+ 		node_perm = NODE__RAWIP_SEND;
++		send_perm = 0;
+ 		break;
+ 	}
+ 
+-	err = avc_has_perm(isec->sid, if_sid, SECCLASS_NETIF, netif_perm, ad);
++	err = sel_netif_sid(ifindex, &if_sid);
+ 	if (err)
+-		goto out;
++		return err;
++	err = avc_has_perm(sk_sid, if_sid, SECCLASS_NETIF, netif_perm, ad);
++		return err;
+ 		
+-	err = security_node_sid(family, addrp, len, &node_sid);
++	err = sel_netnode_sid(addrp, family, &node_sid);
+ 	if (err)
+-		goto out;
+-	
+-	err = avc_has_perm(isec->sid, node_sid, SECCLASS_NODE, node_perm, ad);
++		return err;
++	err = avc_has_perm(sk_sid, node_sid, SECCLASS_NODE, node_perm, ad);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+-	if (send_perm) {
+-		u32 port_sid;
+-		
+-		err = security_port_sid(sk->sk_family,
+-		                        sk->sk_type,
+-		                        sk->sk_protocol,
+-		                        ntohs(ad->u.net.dport),
+-		                        &port_sid);
+-		if (err)
+-			goto out;
++	if (send_perm != 0)
++		return 0;
+ 
+-		err = avc_has_perm(isec->sid, port_sid, isec->sclass,
+-				   send_perm, ad);
++	err = security_port_sid(sk->sk_family, sk->sk_type,
++				sk->sk_protocol, ntohs(ad->u.net.dport),
++				&port_sid);
++	if (unlikely(err)) {
++		printk(KERN_WARNING
++		       "SELinux: failure in"
++		       " selinux_ip_postroute_iptables_compat(),"
++		       " network port label not found\n");
++		return err;
+ 	}
+-out:
+-	return err;
++	return avc_has_perm(sk_sid, port_sid, sk_class, send_perm, ad);
+ }
+ 
+-static unsigned int selinux_ip_postroute_last(unsigned int hooknum,
+-                                              struct sk_buff *skb,
+-                                              const struct net_device *in,
+-                                              const struct net_device *out,
+-                                              int (*okfn)(struct sk_buff *),
+-                                              u16 family)
++static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
++						int ifindex,
++						struct avc_audit_data *ad,
++						u16 family,
++						char *addrp,
++						u8 proto)
+ {
+-	char *addrp;
+-	int len, err = 0;
+-	struct sock *sk;
+-	struct avc_audit_data ad;
+-	struct net_device *dev = (struct net_device *)out;
++	struct sock *sk = skb->sk;
+ 	struct sk_security_struct *sksec;
+-	u8 proto;
+-
+-	sk = skb->sk;
+-	if (!sk)
+-		goto out;
+ 
++	if (sk == NULL)
++		return NF_ACCEPT;
+ 	sksec = sk->sk_security;
+ 
+-	AVC_AUDIT_DATA_INIT(&ad, NET);
+-	ad.u.net.netif = dev->name;
+-	ad.u.net.family = family;
++	if (selinux_compat_net) {
++		if (selinux_ip_postroute_iptables_compat(skb->sk, ifindex,
++							 ad, family, addrp))
++			return NF_DROP;
++	} else {
++		if (avc_has_perm(sksec->sid, skb->secmark,
++				 SECCLASS_PACKET, PACKET__SEND, ad))
++			return NF_DROP;
++	}
+ 
+-	err = selinux_parse_skb(skb, &ad, &addrp, &len, 0, &proto);
+-	if (err)
+-		goto out;
++	if (selinux_policycap_netpeer)
++		if (selinux_xfrm_postroute_last(sksec->sid, skb, ad, proto))
++			return NF_DROP;
+ 
+-	if (selinux_compat_net)
+-		err = selinux_ip_postroute_last_compat(sk, dev, &ad,
+-						       family, addrp, len);
+-	else
+-		err = avc_has_perm(sksec->sid, skb->secmark, SECCLASS_PACKET,
+-				   PACKET__SEND, &ad);
++	return NF_ACCEPT;
++}
+ 
+-	if (err)
+-		goto out;
++static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
++					 u16 family)
++{
++	u32 secmark_perm;
++	u32 peer_sid;
++	struct sock *sk;
++	struct avc_audit_data ad;
++	char *addrp;
++	u8 proto;
++	u8 secmark_active;
++	u8 peerlbl_active;
+ 
+-	err = selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto);
+-out:
+-	return err ? NF_DROP : NF_ACCEPT;
++	AVC_AUDIT_DATA_INIT(&ad, NET);
++	ad.u.net.netif = ifindex;
++	ad.u.net.family = family;
++	if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
++		return NF_DROP;
++
++	/* If any sort of compatibility mode is enabled then handoff processing
++	 * to the selinux_ip_postroute_compat() function to deal with the
++	 * special handling.  We do this in an attempt to keep this function
++	 * as fast and as clean as possible. */
++	if (selinux_compat_net || !selinux_policycap_netpeer)
++		return selinux_ip_postroute_compat(skb, ifindex, &ad,
++						   family, addrp, proto);
++
++	/* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
++	 * packet transformation so allow the packet to pass without any checks
++	 * since we'll have another chance to perform access control checks
++	 * when the packet is on it's final way out.
++	 * NOTE: there appear to be some IPv6 multicast cases where skb->dst
++	 *       is NULL, in this case go ahead and apply access control. */
++	if (skb->dst != NULL && skb->dst->xfrm != NULL)
++		return NF_ACCEPT;
++
++	secmark_active = selinux_secmark_enabled();
++	peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
++	if (!secmark_active && !peerlbl_active)
++		return NF_ACCEPT;
++
++	/* if the packet is locally generated (skb->sk != NULL) then use the
++	 * socket's label as the peer label, otherwise the packet is being
++	 * forwarded through this system and we need to fetch the peer label
++	 * directly from the packet */
++	sk = skb->sk;
++	if (sk) {
++		struct sk_security_struct *sksec = sk->sk_security;
++		peer_sid = sksec->sid;
++		secmark_perm = PACKET__SEND;
++	} else {
++		if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
++				return NF_DROP;
++		secmark_perm = PACKET__FORWARD_OUT;
++	}
++
++	if (secmark_active)
++		if (avc_has_perm(peer_sid, skb->secmark,
++				 SECCLASS_PACKET, secmark_perm, &ad))
++			return NF_DROP;
++
++	if (peerlbl_active) {
++		u32 if_sid;
++		u32 node_sid;
++
++		if (sel_netif_sid(ifindex, &if_sid))
++			return NF_DROP;
++		if (avc_has_perm(peer_sid, if_sid,
++				 SECCLASS_NETIF, NETIF__EGRESS, &ad))
++			return NF_DROP;
++
++		if (sel_netnode_sid(addrp, family, &node_sid))
++			return NF_DROP;
++		if (avc_has_perm(peer_sid, node_sid,
++				 SECCLASS_NODE, NODE__SENDTO, &ad))
++			return NF_DROP;
++	}
++
++	return NF_ACCEPT;
+ }
+ 
+-static unsigned int selinux_ipv4_postroute_last(unsigned int hooknum,
+-						struct sk_buff *skb,
+-						const struct net_device *in,
+-						const struct net_device *out,
+-						int (*okfn)(struct sk_buff *))
++static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
++					   struct sk_buff *skb,
++					   const struct net_device *in,
++					   const struct net_device *out,
++					   int (*okfn)(struct sk_buff *))
+ {
+-	return selinux_ip_postroute_last(hooknum, skb, in, out, okfn, PF_INET);
++	return selinux_ip_postroute(skb, out->ifindex, PF_INET);
+ }
+ 
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+-
+-static unsigned int selinux_ipv6_postroute_last(unsigned int hooknum,
+-						struct sk_buff *skb,
+-						const struct net_device *in,
+-						const struct net_device *out,
+-						int (*okfn)(struct sk_buff *))
++static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
++					   struct sk_buff *skb,
++					   const struct net_device *in,
++					   const struct net_device *out,
++					   int (*okfn)(struct sk_buff *))
+ {
+-	return selinux_ip_postroute_last(hooknum, skb, in, out, okfn, PF_INET6);
++	return selinux_ip_postroute(skb, out->ifindex, PF_INET6);
+ }
+-
+ #endif	/* IPV6 */
+ 
+ #endif	/* CONFIG_NETFILTER */
+@@ -4710,6 +5153,11 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
  	return security_sid_to_context(secid, secdata, seclen);
  }
  
@@ -764150,7 +926457,7 @@
  static void selinux_release_secctx(char *secdata, u32 seclen)
  {
  	kfree(secdata);
-@@ -4800,6 +5042,9 @@ static struct security_operations selinux_ops = {
+@@ -4800,6 +5248,9 @@ static struct security_operations selinux_ops = {
  	.sb_statfs =			selinux_sb_statfs,
  	.sb_mount =			selinux_mount,
  	.sb_umount =			selinux_umount,
@@ -764160,7 +926467,7 @@
  
  	.inode_alloc_security =		selinux_inode_alloc_security,
  	.inode_free_security =		selinux_inode_free_security,
-@@ -4898,6 +5143,7 @@ static struct security_operations selinux_ops = {
+@@ -4898,6 +5349,7 @@ static struct security_operations selinux_ops = {
  	.setprocattr =                  selinux_setprocattr,
  
  	.secid_to_secctx =		selinux_secid_to_secctx,
@@ -764168,26 +926475,335 @@
  	.release_secctx =		selinux_release_secctx,
  
          .unix_stream_connect =		selinux_socket_unix_stream_connect,
-@@ -5035,7 +5281,7 @@ static struct nf_hook_ops selinux_ipv4_op = {
- 	.hook =		selinux_ipv4_postroute_last,
- 	.owner =	THIS_MODULE,
- 	.pf =		PF_INET,
+@@ -5031,22 +5483,40 @@ security_initcall(selinux_init);
+ 
+ #if defined(CONFIG_NETFILTER)
+ 
+-static struct nf_hook_ops selinux_ipv4_op = {
+-	.hook =		selinux_ipv4_postroute_last,
+-	.owner =	THIS_MODULE,
+-	.pf =		PF_INET,
 -	.hooknum =	NF_IP_POST_ROUTING,
-+	.hooknum =	NF_INET_POST_ROUTING,
- 	.priority =	NF_IP_PRI_SELINUX_LAST,
+-	.priority =	NF_IP_PRI_SELINUX_LAST,
++static struct nf_hook_ops selinux_ipv4_ops[] = {
++	{
++		.hook =		selinux_ipv4_postroute,
++		.owner =	THIS_MODULE,
++		.pf =		PF_INET,
++		.hooknum =	NF_INET_POST_ROUTING,
++		.priority =	NF_IP_PRI_SELINUX_LAST,
++	},
++	{
++		.hook =		selinux_ipv4_forward,
++		.owner =	THIS_MODULE,
++		.pf =		PF_INET,
++		.hooknum =	NF_INET_FORWARD,
++		.priority =	NF_IP_PRI_SELINUX_FIRST,
++	}
  };
  
-@@ -5045,7 +5291,7 @@ static struct nf_hook_ops selinux_ipv6_op = {
- 	.hook =		selinux_ipv6_postroute_last,
- 	.owner =	THIS_MODULE,
- 	.pf =		PF_INET6,
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ 
+-static struct nf_hook_ops selinux_ipv6_op = {
+-	.hook =		selinux_ipv6_postroute_last,
+-	.owner =	THIS_MODULE,
+-	.pf =		PF_INET6,
 -	.hooknum =	NF_IP6_POST_ROUTING,
-+	.hooknum =	NF_INET_POST_ROUTING,
- 	.priority =	NF_IP6_PRI_SELINUX_LAST,
+-	.priority =	NF_IP6_PRI_SELINUX_LAST,
++static struct nf_hook_ops selinux_ipv6_ops[] = {
++	{
++		.hook =		selinux_ipv6_postroute,
++		.owner =	THIS_MODULE,
++		.pf =		PF_INET6,
++		.hooknum =	NF_INET_POST_ROUTING,
++		.priority =	NF_IP6_PRI_SELINUX_LAST,
++	},
++	{
++		.hook =		selinux_ipv6_forward,
++		.owner =	THIS_MODULE,
++		.pf =		PF_INET6,
++		.hooknum =	NF_INET_FORWARD,
++		.priority =	NF_IP6_PRI_SELINUX_FIRST,
++	}
  };
  
+ #endif	/* IPV6 */
+@@ -5054,22 +5524,27 @@ static struct nf_hook_ops selinux_ipv6_op = {
+ static int __init selinux_nf_ip_init(void)
+ {
+ 	int err = 0;
++	u32 iter;
+ 
+ 	if (!selinux_enabled)
+ 		goto out;
+ 
+ 	printk(KERN_DEBUG "SELinux:  Registering netfilter hooks\n");
+ 
+-	err = nf_register_hook(&selinux_ipv4_op);
+-	if (err)
+-		panic("SELinux: nf_register_hook for IPv4: error %d\n", err);
++	for (iter = 0; iter < ARRAY_SIZE(selinux_ipv4_ops); iter++) {
++		err = nf_register_hook(&selinux_ipv4_ops[iter]);
++		if (err)
++			panic("SELinux: nf_register_hook for IPv4: error %d\n",
++			      err);
++	}
+ 
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+-
+-	err = nf_register_hook(&selinux_ipv6_op);
+-	if (err)
+-		panic("SELinux: nf_register_hook for IPv6: error %d\n", err);
+-
++	for (iter = 0; iter < ARRAY_SIZE(selinux_ipv6_ops); iter++) {
++		err = nf_register_hook(&selinux_ipv6_ops[iter]);
++		if (err)
++			panic("SELinux: nf_register_hook for IPv6: error %d\n",
++			      err);
++	}
+ #endif	/* IPV6 */
+ 
+ out:
+@@ -5081,11 +5556,15 @@ __initcall(selinux_nf_ip_init);
+ #ifdef CONFIG_SECURITY_SELINUX_DISABLE
+ static void selinux_nf_ip_exit(void)
+ {
++	u32 iter;
++
+ 	printk(KERN_DEBUG "SELinux:  Unregistering netfilter hooks\n");
+ 
+-	nf_unregister_hook(&selinux_ipv4_op);
++	for (iter = 0; iter < ARRAY_SIZE(selinux_ipv4_ops); iter++)
++		nf_unregister_hook(&selinux_ipv4_ops[iter]);
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+-	nf_unregister_hook(&selinux_ipv6_op);
++	for (iter = 0; iter < ARRAY_SIZE(selinux_ipv6_ops); iter++)
++		nf_unregister_hook(&selinux_ipv6_ops[iter]);
+ #endif	/* IPV6 */
+ }
+ #endif
+diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
+index 049bf69..399f868 100644
+--- a/security/selinux/include/av_perm_to_string.h
++++ b/security/selinux/include/av_perm_to_string.h
+@@ -37,6 +37,8 @@
+    S_(SECCLASS_NODE, NODE__ENFORCE_DEST, "enforce_dest")
+    S_(SECCLASS_NODE, NODE__DCCP_RECV, "dccp_recv")
+    S_(SECCLASS_NODE, NODE__DCCP_SEND, "dccp_send")
++   S_(SECCLASS_NODE, NODE__RECVFROM, "recvfrom")
++   S_(SECCLASS_NODE, NODE__SENDTO, "sendto")
+    S_(SECCLASS_NETIF, NETIF__TCP_RECV, "tcp_recv")
+    S_(SECCLASS_NETIF, NETIF__TCP_SEND, "tcp_send")
+    S_(SECCLASS_NETIF, NETIF__UDP_RECV, "udp_recv")
+@@ -45,6 +47,8 @@
+    S_(SECCLASS_NETIF, NETIF__RAWIP_SEND, "rawip_send")
+    S_(SECCLASS_NETIF, NETIF__DCCP_RECV, "dccp_recv")
+    S_(SECCLASS_NETIF, NETIF__DCCP_SEND, "dccp_send")
++   S_(SECCLASS_NETIF, NETIF__INGRESS, "ingress")
++   S_(SECCLASS_NETIF, NETIF__EGRESS, "egress")
+    S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__CONNECTTO, "connectto")
+    S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__NEWCONN, "newconn")
+    S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__ACCEPTFROM, "acceptfrom")
+@@ -149,6 +153,10 @@
+    S_(SECCLASS_PACKET, PACKET__SEND, "send")
+    S_(SECCLASS_PACKET, PACKET__RECV, "recv")
+    S_(SECCLASS_PACKET, PACKET__RELABELTO, "relabelto")
++   S_(SECCLASS_PACKET, PACKET__FLOW_IN, "flow_in")
++   S_(SECCLASS_PACKET, PACKET__FLOW_OUT, "flow_out")
++   S_(SECCLASS_PACKET, PACKET__FORWARD_IN, "forward_in")
++   S_(SECCLASS_PACKET, PACKET__FORWARD_OUT, "forward_out")
+    S_(SECCLASS_KEY, KEY__VIEW, "view")
+    S_(SECCLASS_KEY, KEY__READ, "read")
+    S_(SECCLASS_KEY, KEY__WRITE, "write")
+@@ -159,3 +167,4 @@
+    S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NODE_BIND, "node_bind")
+    S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NAME_CONNECT, "name_connect")
+    S_(SECCLASS_MEMPROTECT, MEMPROTECT__MMAP_ZERO, "mmap_zero")
++   S_(SECCLASS_PEER, PEER__RECV, "recv")
+diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
+index eda89a2..84c9abc 100644
+--- a/security/selinux/include/av_permissions.h
++++ b/security/selinux/include/av_permissions.h
+@@ -292,6 +292,8 @@
+ #define NODE__ENFORCE_DEST                        0x00000040UL
+ #define NODE__DCCP_RECV                           0x00000080UL
+ #define NODE__DCCP_SEND                           0x00000100UL
++#define NODE__RECVFROM                            0x00000200UL
++#define NODE__SENDTO                              0x00000400UL
+ #define NETIF__TCP_RECV                           0x00000001UL
+ #define NETIF__TCP_SEND                           0x00000002UL
+ #define NETIF__UDP_RECV                           0x00000004UL
+@@ -300,6 +302,8 @@
+ #define NETIF__RAWIP_SEND                         0x00000020UL
+ #define NETIF__DCCP_RECV                          0x00000040UL
+ #define NETIF__DCCP_SEND                          0x00000080UL
++#define NETIF__INGRESS                            0x00000100UL
++#define NETIF__EGRESS                             0x00000200UL
+ #define NETLINK_SOCKET__IOCTL                     0x00000001UL
+ #define NETLINK_SOCKET__READ                      0x00000002UL
+ #define NETLINK_SOCKET__WRITE                     0x00000004UL
+@@ -792,6 +796,10 @@
+ #define PACKET__SEND                              0x00000001UL
+ #define PACKET__RECV                              0x00000002UL
+ #define PACKET__RELABELTO                         0x00000004UL
++#define PACKET__FLOW_IN                           0x00000008UL
++#define PACKET__FLOW_OUT                          0x00000010UL
++#define PACKET__FORWARD_IN                        0x00000020UL
++#define PACKET__FORWARD_OUT                       0x00000040UL
+ #define KEY__VIEW                                 0x00000001UL
+ #define KEY__READ                                 0x00000002UL
+ #define KEY__WRITE                                0x00000004UL
+@@ -824,3 +832,4 @@
+ #define DCCP_SOCKET__NODE_BIND                    0x00400000UL
+ #define DCCP_SOCKET__NAME_CONNECT                 0x00800000UL
+ #define MEMPROTECT__MMAP_ZERO                     0x00000001UL
++#define PEER__RECV                                0x00000001UL
+diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
+index 553607a..80c28fa 100644
+--- a/security/selinux/include/avc.h
++++ b/security/selinux/include/avc.h
+@@ -51,7 +51,7 @@ struct avc_audit_data {
+ 			struct inode *inode;
+ 		} fs;
+ 		struct {
+-			char *netif;
++			int netif;
+ 			struct sock *sk;
+ 			u16 family;
+ 			__be16 dport;
+diff --git a/security/selinux/include/class_to_string.h b/security/selinux/include/class_to_string.h
+index e77de0e..b1b0d1d 100644
+--- a/security/selinux/include/class_to_string.h
++++ b/security/selinux/include/class_to_string.h
+@@ -64,3 +64,10 @@
+     S_(NULL)
+     S_("dccp_socket")
+     S_("memprotect")
++    S_(NULL)
++    S_(NULL)
++    S_(NULL)
++    S_(NULL)
++    S_(NULL)
++    S_(NULL)
++    S_("peer")
+diff --git a/security/selinux/include/flask.h b/security/selinux/include/flask.h
+index a9c2b20..09e9dd2 100644
+--- a/security/selinux/include/flask.h
++++ b/security/selinux/include/flask.h
+@@ -50,6 +50,7 @@
+ #define SECCLASS_KEY                                     58
+ #define SECCLASS_DCCP_SOCKET                             60
+ #define SECCLASS_MEMPROTECT                              61
++#define SECCLASS_PEER                                    68
+ 
+ /*
+  * Security identifier indices for initial entities
+diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
+index 8bd6f99..ce23edd 100644
+--- a/security/selinux/include/netif.h
++++ b/security/selinux/include/netif.h
+@@ -7,6 +7,8 @@
+  * Author: James Morris <jmorris at redhat.com>
+  *
+  * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris at redhat.com>
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
++ *                    Paul Moore, <paul.moore at hp.com>
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2,
+@@ -15,7 +17,7 @@
+ #ifndef _SELINUX_NETIF_H_
+ #define _SELINUX_NETIF_H_
+ 
+-int sel_netif_sids(struct net_device *dev, u32 *if_sid, u32 *msg_sid);
++int sel_netif_sid(int ifindex, u32 *sid);
+ 
+ #endif	/* _SELINUX_NETIF_H_ */
+ 
+diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
+index 218e3f7..00a2809 100644
+--- a/security/selinux/include/netlabel.h
++++ b/security/selinux/include/netlabel.h
+@@ -46,13 +46,17 @@ void selinux_netlbl_sk_security_init(struct sk_security_struct *ssec,
+ void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
+ 				      struct sk_security_struct *newssec);
+ 
+-int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u32 base_sid, u32 *sid);
++int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
++				 u16 family,
++				 u32 *type,
++				 u32 *sid);
+ 
+ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock);
+ int selinux_netlbl_socket_post_create(struct socket *sock);
+ int selinux_netlbl_inode_permission(struct inode *inode, int mask);
+ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+ 				struct sk_buff *skb,
++				u16 family,
+ 				struct avc_audit_data *ad);
+ int selinux_netlbl_socket_setsockopt(struct socket *sock,
+ 				     int level,
+@@ -83,9 +87,11 @@ static inline void selinux_netlbl_sk_security_clone(
+ }
+ 
+ static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+-					       u32 base_sid,
++					       u16 family,
++					       u32 *type,
+ 					       u32 *sid)
+ {
++	*type = NETLBL_NLTYPE_NONE;
+ 	*sid = SECSID_NULL;
+ 	return 0;
+ }
+@@ -106,6 +112,7 @@ static inline int selinux_netlbl_inode_permission(struct inode *inode,
+ }
+ static inline int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+ 					      struct sk_buff *skb,
++					      u16 family,
+ 					      struct avc_audit_data *ad)
+ {
+ 	return 0;
+diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h
+new file mode 100644
+index 0000000..1b94450
+--- /dev/null
++++ b/security/selinux/include/netnode.h
+@@ -0,0 +1,32 @@
++/*
++ * Network node table
++ *
++ * SELinux must keep a mapping of network nodes to labels/SIDs.  This
++ * mapping is maintained as part of the normal policy but a fast cache is
++ * needed to reduce the lookup overhead since most of these queries happen on
++ * a per-packet basis.
++ *
++ * Author: Paul Moore <paul.moore at hp.com>
++ *
++ */
++
++/*
++ * (c) Copyright Hewlett-Packard Development Company, L.P., 2007
++ *
++ * This program is free software: you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#ifndef _SELINUX_NETNODE_H
++#define _SELINUX_NETNODE_H
++
++int sel_netnode_sid(void *addr, u16 family, u32 *sid);
++
++#endif
 diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
-index 642a9fd..4138a80 100644
+index 642a9fd..c6c2bb4 100644
 --- a/security/selinux/include/objsec.h
 +++ b/security/selinux/include/objsec.h
 @@ -65,6 +65,7 @@ struct superblock_security_struct {
@@ -764198,11 +926814,1128 @@
  	unsigned char proc;             /* proc fs */
  	struct mutex lock;
  	struct list_head isec_head;
+@@ -95,17 +96,25 @@ struct bprm_security_struct {
+ };
+ 
+ struct netif_security_struct {
+-	struct net_device *dev;		/* back pointer */
+-	u32 if_sid;			/* SID for this interface */
+-	u32 msg_sid;			/* default SID for messages received on this interface */
++	int ifindex;			/* device index */
++	u32 sid;			/* SID for this interface */
++};
++
++struct netnode_security_struct {
++	union {
++		__be32 ipv4;		/* IPv4 node address */
++		struct in6_addr ipv6;	/* IPv6 node address */
++	} addr;
++	u32 sid;			/* SID for this node */
++	u16 family;			/* address family */
+ };
+ 
+ struct sk_security_struct {
+ 	struct sock *sk;		/* back pointer to sk object */
+ 	u32 sid;			/* SID of this object */
+ 	u32 peer_sid;			/* SID of peer */
+-#ifdef CONFIG_NETLABEL
+ 	u16 sclass;			/* sock security class */
++#ifdef CONFIG_NETLABEL
+ 	enum {				/* NetLabel state */
+ 		NLBL_UNSET = 0,
+ 		NLBL_REQUIRE,
+diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
+index 39337af..23137c1 100644
+--- a/security/selinux/include/security.h
++++ b/security/selinux/include/security.h
+@@ -25,13 +25,14 @@
+ #define POLICYDB_VERSION_MLS		19
+ #define POLICYDB_VERSION_AVTAB		20
+ #define POLICYDB_VERSION_RANGETRANS	21
++#define POLICYDB_VERSION_POLCAP		22
+ 
+ /* Range of policy versions we understand*/
+ #define POLICYDB_VERSION_MIN   POLICYDB_VERSION_BASE
+ #ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
+ #define POLICYDB_VERSION_MAX	CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
+ #else
+-#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_RANGETRANS
++#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_POLCAP
+ #endif
+ 
+ struct netlbl_lsm_secattr;
+@@ -39,8 +40,19 @@ struct netlbl_lsm_secattr;
+ extern int selinux_enabled;
+ extern int selinux_mls_enabled;
+ 
++/* Policy capabilities */
++enum {
++	POLICYDB_CAPABILITY_NETPEER,
++	__POLICYDB_CAPABILITY_MAX
++};
++#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
++
++extern int selinux_policycap_netpeer;
++
+ int security_load_policy(void * data, size_t len);
+ 
++int security_policycap_supported(unsigned int req_cap);
++
+ #define SEL_VEC_MAX 32
+ struct av_decision {
+ 	u32 allowed;
+@@ -77,8 +89,7 @@ int security_get_user_sids(u32 callsid, char *username,
+ int security_port_sid(u16 domain, u16 type, u8 protocol, u16 port,
+ 	u32 *out_sid);
+ 
+-int security_netif_sid(char *name, u32 *if_sid,
+-	u32 *msg_sid);
++int security_netif_sid(char *name, u32 *if_sid);
+ 
+ int security_node_sid(u16 domain, void *addr, u32 addrlen,
+ 	u32 *out_sid);
+@@ -88,10 +99,15 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
+ 
+ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);
+ 
++int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
++				 u32 xfrm_sid,
++				 u32 *peer_sid);
++
+ int security_get_classes(char ***classes, int *nclasses);
+ int security_get_permissions(char *class, char ***perms, int *nperms);
+ int security_get_reject_unknown(void);
+ int security_get_allow_unknown(void);
++int security_get_policycaps(int *len, int **values);
+ 
+ #define SECURITY_FS_USE_XATTR		1 /* use xattr */
+ #define SECURITY_FS_USE_TRANS		2 /* use transition SIDs, e.g. devpts/tmpfs */
+@@ -108,7 +124,6 @@ int security_genfs_sid(const char *fstype, char *name, u16 sclass,
+ 
+ #ifdef CONFIG_NETLABEL
+ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+-				   u32 base_sid,
+ 				   u32 *sid);
+ 
+ int security_netlbl_sid_to_secattr(u32 sid,
+@@ -116,7 +131,6 @@ int security_netlbl_sid_to_secattr(u32 sid,
+ #else
+ static inline int security_netlbl_secattr_to_sid(
+ 					    struct netlbl_lsm_secattr *secattr,
+-					    u32 base_sid,
+ 					    u32 *sid)
+ {
+ 	return -EIDRM;
+diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
+index 31929e3..36b0510 100644
+--- a/security/selinux/include/xfrm.h
++++ b/security/selinux/include/xfrm.h
+@@ -32,6 +32,13 @@ static inline struct inode_security_struct *get_sock_isec(struct sock *sk)
+ }
+ 
+ #ifdef CONFIG_SECURITY_NETWORK_XFRM
++extern atomic_t selinux_xfrm_refcount;
++
++static inline int selinux_xfrm_enabled(void)
++{
++	return (atomic_read(&selinux_xfrm_refcount) > 0);
++}
++
+ int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
+ 			struct avc_audit_data *ad);
+ int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
+@@ -43,6 +50,11 @@ static inline void selinux_xfrm_notify_policyload(void)
+ 	atomic_inc(&flow_cache_genid);
+ }
+ #else
++static inline int selinux_xfrm_enabled(void)
++{
++	return 0;
++}
++
+ static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
+ 			struct avc_audit_data *ad)
+ {
+diff --git a/security/selinux/netif.c b/security/selinux/netif.c
+index e87ab94..013d311 100644
+--- a/security/selinux/netif.c
++++ b/security/selinux/netif.c
+@@ -7,6 +7,8 @@
+  * Author: James Morris <jmorris at redhat.com>
+  *
+  * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris at redhat.com>
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
++ *                    Paul Moore <paul.moore at hp.com>
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2,
+@@ -29,14 +31,6 @@
+ #define SEL_NETIF_HASH_SIZE	64
+ #define SEL_NETIF_HASH_MAX	1024
+ 
+-#undef DEBUG
+-
+-#ifdef DEBUG
+-#define DEBUGP printk
+-#else
+-#define DEBUGP(format, args...)
+-#endif
+-
+ struct sel_netif
+ {
+ 	struct list_head list;
+@@ -49,174 +43,226 @@ static LIST_HEAD(sel_netif_list);
+ static DEFINE_SPINLOCK(sel_netif_lock);
+ static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
+ 
+-static inline u32 sel_netif_hasfn(struct net_device *dev)
++/**
++ * sel_netif_hashfn - Hashing function for the interface table
++ * @ifindex: the network interface
++ *
++ * Description:
++ * This is the hashing function for the network interface table, it returns the
++ * bucket number for the given interface.
++ *
++ */
++static inline u32 sel_netif_hashfn(int ifindex)
+ {
+-	return (dev->ifindex & (SEL_NETIF_HASH_SIZE - 1));
++	return (ifindex & (SEL_NETIF_HASH_SIZE - 1));
+ }
+ 
+-/*
+- * All of the devices should normally fit in the hash, so we optimize
+- * for that case.
++/**
++ * sel_netif_find - Search for an interface record
++ * @ifindex: the network interface
++ *
++ * Description:
++ * Search the network interface table and return the record matching @ifindex.
++ * If an entry can not be found in the table return NULL.
++ *
+  */
+-static inline struct sel_netif *sel_netif_find(struct net_device *dev)
++static inline struct sel_netif *sel_netif_find(int ifindex)
+ {
+-	struct list_head *pos;
+-	int idx = sel_netif_hasfn(dev);
++	int idx = sel_netif_hashfn(ifindex);
++	struct sel_netif *netif;
+ 
+-	__list_for_each_rcu(pos, &sel_netif_hash[idx]) {
+-		struct sel_netif *netif = list_entry(pos,
+-		                                     struct sel_netif, list);
+-		if (likely(netif->nsec.dev == dev))
++	list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
++		/* all of the devices should normally fit in the hash, so we
++		 * optimize for that case */
++		if (likely(netif->nsec.ifindex == ifindex))
+ 			return netif;
+-	}
++
+ 	return NULL;
+ }
+ 
++/**
++ * sel_netif_insert - Insert a new interface into the table
++ * @netif: the new interface record
++ *
++ * Description:
++ * Add a new interface record to the network interface hash table.  Returns
++ * zero on success, negative values on failure.
++ *
++ */
+ static int sel_netif_insert(struct sel_netif *netif)
+ {
+-	int idx, ret = 0;
++	int idx;
+ 	
+-	if (sel_netif_total >= SEL_NETIF_HASH_MAX) {
+-		ret = -ENOSPC;
+-		goto out;
+-	}
++	if (sel_netif_total >= SEL_NETIF_HASH_MAX)
++		return -ENOSPC;
+ 	
+-	idx = sel_netif_hasfn(netif->nsec.dev);
++	idx = sel_netif_hashfn(netif->nsec.ifindex);
+ 	list_add_rcu(&netif->list, &sel_netif_hash[idx]);
+ 	sel_netif_total++;
+-out:
+-	return ret;
++
++	return 0;
+ }
+ 
++/**
++ * sel_netif_free - Frees an interface entry
++ * @p: the entry's RCU field
++ *
++ * Description:
++ * This function is designed to be used as a callback to the call_rcu()
++ * function so that memory allocated to a hash table interface entry can be
++ * released safely.
++ *
++ */
+ static void sel_netif_free(struct rcu_head *p)
+ {
+ 	struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
+-
+-	DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
+ 	kfree(netif);
+ }
+ 
++/**
++ * sel_netif_destroy - Remove an interface record from the table
++ * @netif: the existing interface record
++ *
++ * Description:
++ * Remove an existing interface record from the network interface table.
++ *
++ */
+ static void sel_netif_destroy(struct sel_netif *netif)
+ {
+-	DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
+-
+ 	list_del_rcu(&netif->list);
+ 	sel_netif_total--;
+ 	call_rcu(&netif->rcu_head, sel_netif_free);
+ }
+ 
+-static struct sel_netif *sel_netif_lookup(struct net_device *dev)
++/**
++ * sel_netif_sid_slow - Lookup the SID of a network interface using the policy
++ * @ifindex: the network interface
++ * @sid: interface SID
++ *
++ * Description:
++ * This function determines the SID of a network interface by quering the
++ * security policy.  The result is added to the network interface table to
++ * speedup future queries.  Returns zero on success, negative values on
++ * failure.
++ *
++ */
++static int sel_netif_sid_slow(int ifindex, u32 *sid)
+ {
+ 	int ret;
+-	struct sel_netif *netif, *new;
+-	struct netif_security_struct *nsec;
+-
+-	netif = sel_netif_find(dev);
+-	if (likely(netif != NULL))
+-		goto out;
+-	
+-	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+-	if (!new) {
+-		netif = ERR_PTR(-ENOMEM);
+-		goto out;
++	struct sel_netif *netif;
++	struct sel_netif *new = NULL;
++	struct net_device *dev;
++
++	/* NOTE: we always use init's network namespace since we don't
++	 * currently support containers */
++
++	dev = dev_get_by_index(&init_net, ifindex);
++	if (unlikely(dev == NULL)) {
++		printk(KERN_WARNING
++		       "SELinux: failure in sel_netif_sid_slow(),"
++		       " invalid network interface (%d)\n", ifindex);
++		return -ENOENT;
+ 	}
+-	
+-	nsec = &new->nsec;
+ 
+-	ret = security_netif_sid(dev->name, &nsec->if_sid, &nsec->msg_sid);
+-	if (ret < 0) {
+-		kfree(new);
+-		netif = ERR_PTR(ret);
++	spin_lock_bh(&sel_netif_lock);
++	netif = sel_netif_find(ifindex);
++	if (netif != NULL) {
++		*sid = netif->nsec.sid;
++		ret = 0;
+ 		goto out;
+ 	}
+-
+-	nsec->dev = dev;
+-	
+-	spin_lock_bh(&sel_netif_lock);
+-	
+-	netif = sel_netif_find(dev);
+-	if (netif) {
+-		spin_unlock_bh(&sel_netif_lock);
+-		kfree(new);
++	new = kzalloc(sizeof(*new), GFP_ATOMIC);
++	if (new == NULL) {
++		ret = -ENOMEM;
+ 		goto out;
+ 	}
+-	
++	ret = security_netif_sid(dev->name, &new->nsec.sid);
++	if (ret != 0)
++		goto out;
++	new->nsec.ifindex = ifindex;
+ 	ret = sel_netif_insert(new);
+-	spin_unlock_bh(&sel_netif_lock);
+-	
+-	if (ret) {
+-		kfree(new);
+-		netif = ERR_PTR(ret);
++	if (ret != 0)
+ 		goto out;
+-	}
++	*sid = new->nsec.sid;
+ 
+-	netif = new;
+-	
+-	DEBUGP("new: ifindex=%u name=%s if_sid=%u msg_sid=%u\n", dev->ifindex, dev->name,
+-	        nsec->if_sid, nsec->msg_sid);
+ out:
+-	return netif;
+-}
+-
+-static void sel_netif_assign_sids(u32 if_sid_in, u32 msg_sid_in, u32 *if_sid_out, u32 *msg_sid_out)
+-{
+-	if (if_sid_out)
+-		*if_sid_out = if_sid_in;
+-	if (msg_sid_out)
+-		*msg_sid_out = msg_sid_in;
+-}
+-
+-static int sel_netif_sids_slow(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
+-{
+-	int ret = 0;
+-	u32 tmp_if_sid, tmp_msg_sid;
+-	
+-	ret = security_netif_sid(dev->name, &tmp_if_sid, &tmp_msg_sid);
+-	if (!ret)
+-		sel_netif_assign_sids(tmp_if_sid, tmp_msg_sid, if_sid, msg_sid);
++	spin_unlock_bh(&sel_netif_lock);
++	dev_put(dev);
++	if (unlikely(ret)) {
++		printk(KERN_WARNING
++		       "SELinux: failure in sel_netif_sid_slow(),"
++		       " unable to determine network interface label (%d)\n",
++		       ifindex);
++		kfree(new);
++	}
+ 	return ret;
+ }
+ 
+-int sel_netif_sids(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
++/**
++ * sel_netif_sid - Lookup the SID of a network interface
++ * @ifindex: the network interface
++ * @sid: interface SID
++ *
++ * Description:
++ * This function determines the SID of a network interface using the fastest
++ * method possible.  First the interface table is queried, but if an entry
++ * can't be found then the policy is queried and the result is added to the
++ * table to speedup future queries.  Returns zero on success, negative values
++ * on failure.
++ *
++ */
++int sel_netif_sid(int ifindex, u32 *sid)
+ {
+-	int ret = 0;
+ 	struct sel_netif *netif;
+ 
+ 	rcu_read_lock();
+-	netif = sel_netif_lookup(dev);
+-	if (IS_ERR(netif)) {
++	netif = sel_netif_find(ifindex);
++	if (likely(netif != NULL)) {
++		*sid = netif->nsec.sid;
+ 		rcu_read_unlock();
+-		ret = sel_netif_sids_slow(dev, if_sid, msg_sid);
+-		goto out;
++		return 0;
+ 	}
+-	sel_netif_assign_sids(netif->nsec.if_sid, netif->nsec.msg_sid, if_sid, msg_sid);
+ 	rcu_read_unlock();
+-out:
+-	return ret;
++
++	return sel_netif_sid_slow(ifindex, sid);
+ }
+ 
+-static void sel_netif_kill(struct net_device *dev)
++/**
++ * sel_netif_kill - Remove an entry from the network interface table
++ * @ifindex: the network interface
++ *
++ * Description:
++ * This function removes the entry matching @ifindex from the network interface
++ * table if it exists.
++ *
++ */
++static void sel_netif_kill(int ifindex)
+ {
+ 	struct sel_netif *netif;
+ 
+ 	spin_lock_bh(&sel_netif_lock);
+-	netif = sel_netif_find(dev);
++	netif = sel_netif_find(ifindex);
+ 	if (netif)
+ 		sel_netif_destroy(netif);
+ 	spin_unlock_bh(&sel_netif_lock);
+ }
+ 
++/**
++ * sel_netif_flush - Flush the entire network interface table
++ *
++ * Description:
++ * Remove all entries from the network interface table.
++ *
++ */
+ static void sel_netif_flush(void)
+ {
+ 	int idx;
++	struct sel_netif *netif;
+ 
+ 	spin_lock_bh(&sel_netif_lock);
+-	for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++) {
+-		struct sel_netif *netif;
+-		
++	for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
+ 		list_for_each_entry(netif, &sel_netif_hash[idx], list)
+ 			sel_netif_destroy(netif);
+-	}
+ 	spin_unlock_bh(&sel_netif_lock);
+ }
+ 
+@@ -239,7 +285,7 @@ static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
+ 		return NOTIFY_DONE;
+ 
+ 	if (event == NETDEV_DOWN)
+-		sel_netif_kill(dev);
++		sel_netif_kill(dev->ifindex);
+ 
+ 	return NOTIFY_DONE;
+ }
+@@ -250,10 +296,10 @@ static struct notifier_block sel_netif_netdev_notifier = {
+ 
+ static __init int sel_netif_init(void)
+ {
+-	int i, err = 0;
++	int i, err;
+ 	
+ 	if (!selinux_enabled)
+-		goto out;
++		return 0;
+ 
+ 	for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
+ 		INIT_LIST_HEAD(&sel_netif_hash[i]);
+@@ -265,7 +311,6 @@ static __init int sel_netif_init(void)
+ 	if (err)
+ 		panic("avc_add_callback() failed, error %d\n", err);
+ 
+-out:
+ 	return err;
+ }
+ 
+diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
+index 66e013d..0fa2be4 100644
+--- a/security/selinux/netlabel.c
++++ b/security/selinux/netlabel.c
+@@ -36,6 +36,33 @@
+ #include "security.h"
+ 
+ /**
++ * selinux_netlbl_sidlookup_cached - Cache a SID lookup
++ * @skb: the packet
++ * @secattr: the NetLabel security attributes
++ * @sid: the SID
++ *
++ * Description:
++ * Query the SELinux security server to lookup the correct SID for the given
++ * security attributes.  If the query is successful, cache the result to speed
++ * up future lookups.  Returns zero on success, negative values on failure.
++ *
++ */
++static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb,
++					   struct netlbl_lsm_secattr *secattr,
++					   u32 *sid)
++{
++	int rc;
++
++	rc = security_netlbl_secattr_to_sid(secattr, sid);
++	if (rc == 0 &&
++	    (secattr->flags & NETLBL_SECATTR_CACHEABLE) &&
++	    (secattr->flags & NETLBL_SECATTR_CACHE))
++		netlbl_cache_add(skb, secattr);
++
++	return rc;
++}
++
++/**
+  * selinux_netlbl_sock_setsid - Label a socket using the NetLabel mechanism
+  * @sk: the socket to label
+  * @sid: the SID to use
+@@ -137,14 +164,14 @@ void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
+ 	 * lock as other threads could have access to ssec */
+ 	rcu_read_lock();
+ 	selinux_netlbl_sk_security_reset(newssec, ssec->sk->sk_family);
+-	newssec->sclass = ssec->sclass;
+ 	rcu_read_unlock();
+ }
+ 
+ /**
+  * selinux_netlbl_skbuff_getsid - Get the sid of a packet using NetLabel
+  * @skb: the packet
+- * @base_sid: the SELinux SID to use as a context for MLS only attributes
++ * @family: protocol family
++ * @type: NetLabel labeling protocol type
+  * @sid: the SID
+  *
+  * Description:
+@@ -153,7 +180,10 @@ void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
+  * assign to the packet.  Returns zero on success, negative values on failure.
+  *
+  */
+-int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u32 base_sid, u32 *sid)
++int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
++				 u16 family,
++				 u32 *type,
++				 u32 *sid)
+ {
+ 	int rc;
+ 	struct netlbl_lsm_secattr secattr;
+@@ -164,15 +194,12 @@ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u32 base_sid, u32 *sid)
+ 	}
+ 
+ 	netlbl_secattr_init(&secattr);
+-	rc = netlbl_skbuff_getattr(skb, &secattr);
+-	if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) {
+-		rc = security_netlbl_secattr_to_sid(&secattr, base_sid, sid);
+-		if (rc == 0 &&
+-		    (secattr.flags & NETLBL_SECATTR_CACHEABLE) &&
+-		    (secattr.flags & NETLBL_SECATTR_CACHE))
+-			netlbl_cache_add(skb, &secattr);
+-	} else
++	rc = netlbl_skbuff_getattr(skb, family, &secattr);
++	if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
++		rc = selinux_netlbl_sidlookup_cached(skb, &secattr, sid);
++	else
+ 		*sid = SECSID_NULL;
++	*type = secattr.type;
+ 	netlbl_secattr_destroy(&secattr);
+ 
+ 	return rc;
+@@ -190,13 +217,10 @@ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u32 base_sid, u32 *sid)
+  */
+ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock)
+ {
+-	struct inode_security_struct *isec = SOCK_INODE(sock)->i_security;
+ 	struct sk_security_struct *sksec = sk->sk_security;
+ 	struct netlbl_lsm_secattr secattr;
+ 	u32 nlbl_peer_sid;
+ 
+-	sksec->sclass = isec->sclass;
+-
+ 	rcu_read_lock();
+ 
+ 	if (sksec->nlbl_state != NLBL_REQUIRE) {
+@@ -207,9 +231,7 @@ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock)
+ 	netlbl_secattr_init(&secattr);
+ 	if (netlbl_sock_getattr(sk, &secattr) == 0 &&
+ 	    secattr.flags != NETLBL_SECATTR_NONE &&
+-	    security_netlbl_secattr_to_sid(&secattr,
+-					   SECINITSID_NETMSG,
+-					   &nlbl_peer_sid) == 0)
++	    security_netlbl_secattr_to_sid(&secattr, &nlbl_peer_sid) == 0)
+ 		sksec->peer_sid = nlbl_peer_sid;
+ 	netlbl_secattr_destroy(&secattr);
+ 
+@@ -234,11 +256,8 @@ int selinux_netlbl_socket_post_create(struct socket *sock)
+ {
+ 	int rc = 0;
+ 	struct sock *sk = sock->sk;
+-	struct inode_security_struct *isec = SOCK_INODE(sock)->i_security;
+ 	struct sk_security_struct *sksec = sk->sk_security;
+ 
+-	sksec->sclass = isec->sclass;
+-
+ 	rcu_read_lock();
+ 	if (sksec->nlbl_state == NLBL_REQUIRE)
+ 		rc = selinux_netlbl_sock_setsid(sk, sksec->sid);
+@@ -292,6 +311,7 @@ int selinux_netlbl_inode_permission(struct inode *inode, int mask)
+  * selinux_netlbl_sock_rcv_skb - Do an inbound access check using NetLabel
+  * @sksec: the sock's sk_security_struct
+  * @skb: the packet
++ * @family: protocol family
+  * @ad: the audit data
+  *
+  * Description:
+@@ -302,6 +322,7 @@ int selinux_netlbl_inode_permission(struct inode *inode, int mask)
+  */
+ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+ 				struct sk_buff *skb,
++				u16 family,
+ 				struct avc_audit_data *ad)
+ {
+ 	int rc;
+@@ -313,16 +334,10 @@ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+ 		return 0;
+ 
+ 	netlbl_secattr_init(&secattr);
+-	rc = netlbl_skbuff_getattr(skb, &secattr);
+-	if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) {
+-		rc = security_netlbl_secattr_to_sid(&secattr,
+-						    SECINITSID_NETMSG,
+-						    &nlbl_sid);
+-		if (rc == 0 &&
+-		    (secattr.flags & NETLBL_SECATTR_CACHEABLE) &&
+-		    (secattr.flags & NETLBL_SECATTR_CACHE))
+-			netlbl_cache_add(skb, &secattr);
+-	} else
++	rc = netlbl_skbuff_getattr(skb, family, &secattr);
++	if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
++		rc = selinux_netlbl_sidlookup_cached(skb, &secattr, &nlbl_sid);
++	else
+ 		nlbl_sid = SECINITSID_UNLABELED;
+ 	netlbl_secattr_destroy(&secattr);
+ 	if (rc != 0)
+diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
+new file mode 100644
+index 0000000..f3c526f
+--- /dev/null
++++ b/security/selinux/netnode.c
+@@ -0,0 +1,354 @@
++/*
++ * Network node table
++ *
++ * SELinux must keep a mapping of network nodes to labels/SIDs.  This
++ * mapping is maintained as part of the normal policy but a fast cache is
++ * needed to reduce the lookup overhead since most of these queries happen on
++ * a per-packet basis.
++ *
++ * Author: Paul Moore <paul.moore at hp.com>
++ *
++ * This code is heavily based on the "netif" concept originally developed by
++ * James Morris <jmorris at redhat.com>
++ *   (see security/selinux/netif.c for more information)
++ *
++ */
++
++/*
++ * (c) Copyright Hewlett-Packard Development Company, L.P., 2007
++ *
++ * This program is free software: you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/rcupdate.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++#include <asm/bug.h>
++
++#include "objsec.h"
++
++#define SEL_NETNODE_HASH_SIZE       256
++#define SEL_NETNODE_HASH_BKT_LIMIT   16
++
++struct sel_netnode {
++	struct netnode_security_struct nsec;
++
++	struct list_head list;
++	struct rcu_head rcu;
++};
++
++/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
++ * for this is that I suspect most users will not make heavy use of both
++ * address families at the same time so one table will usually end up wasted,
++ * if this becomes a problem we can always add a hash table for each address
++ * family later */
++
++static LIST_HEAD(sel_netnode_list);
++static DEFINE_SPINLOCK(sel_netnode_lock);
++static struct list_head sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
++
++/**
++ * sel_netnode_free - Frees a node entry
++ * @p: the entry's RCU field
++ *
++ * Description:
++ * This function is designed to be used as a callback to the call_rcu()
++ * function so that memory allocated to a hash table node entry can be
++ * released safely.
++ *
++ */
++static void sel_netnode_free(struct rcu_head *p)
++{
++	struct sel_netnode *node = container_of(p, struct sel_netnode, rcu);
++	kfree(node);
++}
++
++/**
++ * sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table
++ * @addr: IPv4 address
++ *
++ * Description:
++ * This is the IPv4 hashing function for the node interface table, it returns
++ * the bucket number for the given IP address.
++ *
++ */
++static u32 sel_netnode_hashfn_ipv4(__be32 addr)
++{
++	/* at some point we should determine if the mismatch in byte order
++	 * affects the hash function dramatically */
++	return (addr & (SEL_NETNODE_HASH_SIZE - 1));
++}
++
++/**
++ * sel_netnode_hashfn_ipv6 - IPv6 hashing function for the node table
++ * @addr: IPv6 address
++ *
++ * Description:
++ * This is the IPv6 hashing function for the node interface table, it returns
++ * the bucket number for the given IP address.
++ *
++ */
++static u32 sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
++{
++	/* just hash the least significant 32 bits to keep things fast (they
++	 * are the most likely to be different anyway), we can revisit this
++	 * later if needed */
++	return (addr->s6_addr32[3] & (SEL_NETNODE_HASH_SIZE - 1));
++}
++
++/**
++ * sel_netnode_find - Search for a node record
++ * @addr: IP address
++ * @family: address family
++ *
++ * Description:
++ * Search the network node table and return the record matching @addr.  If an
++ * entry can not be found in the table return NULL.
++ *
++ */
++static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
++{
++	u32 idx;
++	struct sel_netnode *node;
++
++	switch (family) {
++	case PF_INET:
++		idx = sel_netnode_hashfn_ipv4(*(__be32 *)addr);
++		break;
++	case PF_INET6:
++		idx = sel_netnode_hashfn_ipv6(addr);
++		break;
++	default:
++		BUG();
++	}
++
++	list_for_each_entry_rcu(node, &sel_netnode_hash[idx], list)
++		if (node->nsec.family == family)
++			switch (family) {
++			case PF_INET:
++				if (node->nsec.addr.ipv4 == *(__be32 *)addr)
++					return node;
++				break;
++			case PF_INET6:
++				if (ipv6_addr_equal(&node->nsec.addr.ipv6,
++						    addr))
++					return node;
++				break;
++			}
++
++	return NULL;
++}
++
++/**
++ * sel_netnode_insert - Insert a new node into the table
++ * @node: the new node record
++ *
++ * Description:
++ * Add a new node record to the network address hash table.  Returns zero on
++ * success, negative values on failure.
++ *
++ */
++static int sel_netnode_insert(struct sel_netnode *node)
++{
++	u32 idx;
++	u32 count = 0;
++	struct sel_netnode *iter;
++
++	switch (node->nsec.family) {
++	case PF_INET:
++		idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4);
++		break;
++	case PF_INET6:
++		idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6);
++		break;
++	default:
++		BUG();
++	}
++	list_add_rcu(&node->list, &sel_netnode_hash[idx]);
++
++	/* we need to impose a limit on the growth of the hash table so check
++	 * this bucket to make sure it is within the specified bounds */
++	list_for_each_entry(iter, &sel_netnode_hash[idx], list)
++		if (++count > SEL_NETNODE_HASH_BKT_LIMIT) {
++			list_del_rcu(&iter->list);
++			call_rcu(&iter->rcu, sel_netnode_free);
++			break;
++		}
++
++	return 0;
++}
++
++/**
++ * sel_netnode_destroy - Remove a node record from the table
++ * @node: the existing node record
++ *
++ * Description:
++ * Remove an existing node record from the network address table.
++ *
++ */
++static void sel_netnode_destroy(struct sel_netnode *node)
++{
++	list_del_rcu(&node->list);
++	call_rcu(&node->rcu, sel_netnode_free);
++}
++
++/**
++ * sel_netnode_sid_slow - Lookup the SID of a network address using the policy
++ * @addr: the IP address
++ * @family: the address family
++ * @sid: node SID
++ *
++ * Description:
++ * This function determines the SID of a network address by quering the
++ * security policy.  The result is added to the network address table to
++ * speedup future queries.  Returns zero on success, negative values on
++ * failure.
++ *
++ */
++static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
++{
++	int ret;
++	struct sel_netnode *node;
++	struct sel_netnode *new = NULL;
++
++	spin_lock_bh(&sel_netnode_lock);
++	node = sel_netnode_find(addr, family);
++	if (node != NULL) {
++		*sid = node->nsec.sid;
++		ret = 0;
++		goto out;
++	}
++	new = kzalloc(sizeof(*new), GFP_ATOMIC);
++	if (new == NULL) {
++		ret = -ENOMEM;
++		goto out;
++	}
++	switch (family) {
++	case PF_INET:
++		ret = security_node_sid(PF_INET,
++					addr, sizeof(struct in_addr),
++					&new->nsec.sid);
++		new->nsec.addr.ipv4 = *(__be32 *)addr;
++		break;
++	case PF_INET6:
++		ret = security_node_sid(PF_INET6,
++					addr, sizeof(struct in6_addr),
++					&new->nsec.sid);
++		ipv6_addr_copy(&new->nsec.addr.ipv6, addr);
++		break;
++	default:
++		BUG();
++	}
++	if (ret != 0)
++		goto out;
++	new->nsec.family = family;
++	ret = sel_netnode_insert(new);
++	if (ret != 0)
++		goto out;
++	*sid = new->nsec.sid;
++
++out:
++	spin_unlock_bh(&sel_netnode_lock);
++	if (unlikely(ret)) {
++		printk(KERN_WARNING
++		       "SELinux: failure in sel_netnode_sid_slow(),"
++		       " unable to determine network node label\n");
++		kfree(new);
++	}
++	return ret;
++}
++
++/**
++ * sel_netnode_sid - Lookup the SID of a network address
++ * @addr: the IP address
++ * @family: the address family
++ * @sid: node SID
++ *
++ * Description:
++ * This function determines the SID of a network address using the fastest
++ * method possible.  First the address table is queried, but if an entry
++ * can't be found then the policy is queried and the result is added to the
++ * table to speedup future queries.  Returns zero on success, negative values
++ * on failure.
++ *
++ */
++int sel_netnode_sid(void *addr, u16 family, u32 *sid)
++{
++	struct sel_netnode *node;
++
++	rcu_read_lock();
++	node = sel_netnode_find(addr, family);
++	if (node != NULL) {
++		*sid = node->nsec.sid;
++		rcu_read_unlock();
++		return 0;
++	}
++	rcu_read_unlock();
++
++	return sel_netnode_sid_slow(addr, family, sid);
++}
++
++/**
++ * sel_netnode_flush - Flush the entire network address table
++ *
++ * Description:
++ * Remove all entries from the network address table.
++ *
++ */
++static void sel_netnode_flush(void)
++{
++	u32 idx;
++	struct sel_netnode *node;
++
++	spin_lock_bh(&sel_netnode_lock);
++	for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++)
++		list_for_each_entry(node, &sel_netnode_hash[idx], list)
++			sel_netnode_destroy(node);
++	spin_unlock_bh(&sel_netnode_lock);
++}
++
++static int sel_netnode_avc_callback(u32 event, u32 ssid, u32 tsid,
++				    u16 class, u32 perms, u32 *retained)
++{
++	if (event == AVC_CALLBACK_RESET) {
++		sel_netnode_flush();
++		synchronize_net();
++	}
++	return 0;
++}
++
++static __init int sel_netnode_init(void)
++{
++	int iter;
++	int ret;
++
++	if (!selinux_enabled)
++		return 0;
++
++	for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++)
++		INIT_LIST_HEAD(&sel_netnode_hash[iter]);
++
++	ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET,
++	                       SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
++	if (ret != 0)
++		panic("avc_add_callback() failed, error %d\n", ret);
++
++	return ret;
++}
++
++__initcall(sel_netnode_init);
 diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
-index 2fa483f..397fd49 100644
+index 2fa483f..a857405 100644
 --- a/security/selinux/selinuxfs.c
 +++ b/security/selinux/selinuxfs.c
-@@ -1222,7 +1222,7 @@ static int sel_avc_stats_seq_show(struct seq_file *seq, void *v)
+@@ -2,6 +2,11 @@
+  *
+  * 	Added conditional policy language extensions
+  *
++ *  Updated: Hewlett-Packard <paul.moore at hp.com>
++ *
++ *      Added support for the policy capability bitmap
++ *
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+  * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+  * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris at redhat.com>
+  *	This program is free software; you can redistribute it and/or modify
+@@ -35,6 +40,11 @@
+ #include "objsec.h"
+ #include "conditional.h"
+ 
++/* Policy capability filenames */
++static char *policycap_names[] = {
++	"network_peer_controls"
++};
++
+ unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
+ 
+ #ifdef CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT
+@@ -72,6 +82,9 @@ static int *bool_pending_values = NULL;
+ static struct dentry *class_dir = NULL;
+ static unsigned long last_class_ino;
+ 
++/* global data for policy capabilities */
++static struct dentry *policycap_dir = NULL;
++
+ extern void selnl_notify_setenforce(int val);
+ 
+ /* Check whether a task is allowed to use a security operation. */
+@@ -111,10 +124,11 @@ enum sel_inos {
+ 
+ static unsigned long sel_last_ino = SEL_INO_NEXT - 1;
+ 
+-#define SEL_INITCON_INO_OFFSET 	0x01000000
+-#define SEL_BOOL_INO_OFFSET	0x02000000
+-#define SEL_CLASS_INO_OFFSET	0x04000000
+-#define SEL_INO_MASK		0x00ffffff
++#define SEL_INITCON_INO_OFFSET		0x01000000
++#define SEL_BOOL_INO_OFFSET		0x02000000
++#define SEL_CLASS_INO_OFFSET		0x04000000
++#define SEL_POLICYCAP_INO_OFFSET	0x08000000
++#define SEL_INO_MASK			0x00ffffff
+ 
+ #define TMPBUFLEN	12
+ static ssize_t sel_read_enforce(struct file *filp, char __user *buf,
+@@ -263,6 +277,7 @@ static const struct file_operations sel_policyvers_ops = {
+ /* declaration for sel_write_load */
+ static int sel_make_bools(void);
+ static int sel_make_classes(void);
++static int sel_make_policycap(void);
+ 
+ /* declaration for sel_make_class_dirs */
+ static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+@@ -323,6 +338,12 @@ static ssize_t sel_write_load(struct file * file, const char __user * buf,
+ 	}
+ 
+ 	ret = sel_make_classes();
++	if (ret) {
++		length = ret;
++		goto out1;
++	}
++
++	ret = sel_make_policycap();
+ 	if (ret)
+ 		length = ret;
+ 	else
+@@ -1222,7 +1243,7 @@ static int sel_avc_stats_seq_show(struct seq_file *seq, void *v)
  static void sel_avc_stats_seq_stop(struct seq_file *seq, void *v)
  { }
  
@@ -764211,6 +927944,87 @@
  	.start		= sel_avc_stats_seq_start,
  	.next		= sel_avc_stats_seq_next,
  	.show		= sel_avc_stats_seq_show,
+@@ -1399,6 +1420,24 @@ static const struct file_operations sel_perm_ops = {
+ 	.read		= sel_read_perm,
+ };
+ 
++static ssize_t sel_read_policycap(struct file *file, char __user *buf,
++				  size_t count, loff_t *ppos)
++{
++	int value;
++	char tmpbuf[TMPBUFLEN];
++	ssize_t length;
++	unsigned long i_ino = file->f_path.dentry->d_inode->i_ino;
++
++	value = security_policycap_supported(i_ino & SEL_INO_MASK);
++	length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value);
++
++	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
++}
++
++static const struct file_operations sel_policycap_ops = {
++	.read		= sel_read_policycap,
++};
++
+ static int sel_make_perm_files(char *objclass, int classvalue,
+ 				struct dentry *dir)
+ {
+@@ -1545,6 +1584,36 @@ out:
+ 	return rc;
+ }
+ 
++static int sel_make_policycap(void)
++{
++	unsigned int iter;
++	struct dentry *dentry = NULL;
++	struct inode *inode = NULL;
++
++	sel_remove_entries(policycap_dir);
++
++	for (iter = 0; iter <= POLICYDB_CAPABILITY_MAX; iter++) {
++		if (iter < ARRAY_SIZE(policycap_names))
++			dentry = d_alloc_name(policycap_dir,
++					      policycap_names[iter]);
++		else
++			dentry = d_alloc_name(policycap_dir, "unknown");
++
++		if (dentry == NULL)
++			return -ENOMEM;
++
++		inode = sel_make_inode(policycap_dir->d_sb, S_IFREG | S_IRUGO);
++		if (inode == NULL)
++			return -ENOMEM;
++
++		inode->i_fop = &sel_policycap_ops;
++		inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET;
++		d_add(dentry, inode);
++	}
++
++	return 0;
++}
++
+ static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+ 			unsigned long *ino)
+ {
+@@ -1673,6 +1742,18 @@ static int sel_fill_super(struct super_block * sb, void * data, int silent)
+ 
+ 	class_dir = dentry;
+ 
++	dentry = d_alloc_name(sb->s_root, "policy_capabilities");
++	if (!dentry) {
++		ret = -ENOMEM;
++		goto err;
++	}
++
++	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
++	if (ret)
++		goto err;
++
++	policycap_dir = dentry;
++
+ out:
+ 	return ret;
+ err:
 diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
 index 9e70a16..cd10e27 100644
 --- a/security/selinux/ss/avtab.c
@@ -764225,7 +928039,7 @@
  	return 0;
  }
 diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
-index fb5d70a..3bbcb53 100644
+index fb5d70a..feaf0a5 100644
 --- a/security/selinux/ss/mls.c
 +++ b/security/selinux/ss/mls.c
 @@ -537,15 +537,8 @@ int mls_compute_sid(struct context *scontext,
@@ -764246,11 +928060,199 @@
  	default:
  		return -EINVAL;
  	}
+@@ -569,7 +562,7 @@ void mls_export_netlbl_lvl(struct context *context,
+ 	if (!selinux_mls_enabled)
+ 		return;
+ 
+-	secattr->mls_lvl = context->range.level[0].sens - 1;
++	secattr->attr.mls.lvl = context->range.level[0].sens - 1;
+ 	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
+ }
+ 
+@@ -589,7 +582,7 @@ void mls_import_netlbl_lvl(struct context *context,
+ 	if (!selinux_mls_enabled)
+ 		return;
+ 
+-	context->range.level[0].sens = secattr->mls_lvl + 1;
++	context->range.level[0].sens = secattr->attr.mls.lvl + 1;
+ 	context->range.level[1].sens = context->range.level[0].sens;
+ }
+ 
+@@ -612,8 +605,8 @@ int mls_export_netlbl_cat(struct context *context,
+ 		return 0;
+ 
+ 	rc = ebitmap_netlbl_export(&context->range.level[0].cat,
+-				   &secattr->mls_cat);
+-	if (rc == 0 && secattr->mls_cat != NULL)
++				   &secattr->attr.mls.cat);
++	if (rc == 0 && secattr->attr.mls.cat != NULL)
+ 		secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+ 
+ 	return rc;
+@@ -640,7 +633,7 @@ int mls_import_netlbl_cat(struct context *context,
+ 		return 0;
+ 
+ 	rc = ebitmap_netlbl_import(&context->range.level[0].cat,
+-				   secattr->mls_cat);
++				   secattr->attr.mls.cat);
+ 	if (rc != 0)
+ 		goto import_netlbl_cat_failure;
+ 
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index b582aae..bd7d6a0 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -13,6 +13,11 @@
+  *
+  * 	Added conditional policy language extensions
+  *
++ * Updated: Hewlett-Packard <paul.moore at hp.com>
++ *
++ *      Added support for the policy capability bitmap
++ *
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+  * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
+  * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+  *	This program is free software; you can redistribute it and/or modify
+@@ -102,6 +107,11 @@ static struct policydb_compat_info policydb_compat[] = {
+ 		.sym_num        = SYM_NUM,
+ 		.ocon_num       = OCON_NUM,
+ 	},
++	{
++		.version	= POLICYDB_VERSION_POLCAP,
++		.sym_num	= SYM_NUM,
++		.ocon_num	= OCON_NUM,
++	}
+ };
+ 
+ static struct policydb_compat_info *policydb_lookup_compat(int version)
+@@ -183,6 +193,8 @@ static int policydb_init(struct policydb *p)
+ 	if (rc)
+ 		goto out_free_symtab;
+ 
++	ebitmap_init(&p->policycaps);
++
+ out:
+ 	return rc;
+ 
+@@ -673,8 +685,8 @@ void policydb_destroy(struct policydb *p)
+ 			ebitmap_destroy(&p->type_attr_map[i]);
+ 	}
+ 	kfree(p->type_attr_map);
+-
+ 	kfree(p->undefined_perms);
++	ebitmap_destroy(&p->policycaps);
+ 
+ 	return;
+ }
+@@ -1554,6 +1566,10 @@ int policydb_read(struct policydb *p, void *fp)
+ 	p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
+ 	p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
+ 
++	if (p->policyvers >= POLICYDB_VERSION_POLCAP &&
++	    ebitmap_read(&p->policycaps, fp) != 0)
++		goto bad;
++
+ 	info = policydb_lookup_compat(p->policyvers);
+ 	if (!info) {
+ 		printk(KERN_ERR "security:  unable to find policy compat info "
+diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
+index ed6fc68..c4ce996 100644
+--- a/security/selinux/ss/policydb.h
++++ b/security/selinux/ss/policydb.h
+@@ -241,6 +241,8 @@ struct policydb {
+ 	/* type -> attribute reverse mapping */
+ 	struct ebitmap *type_attr_map;
+ 
++	struct ebitmap policycaps;
++
+ 	unsigned int policyvers;
+ 
+ 	unsigned int reject_unknown : 1;
 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
-index f83b19d..4bf715d 100644
+index f83b19d..f96dec1 100644
 --- a/security/selinux/ss/services.c
 +++ b/security/selinux/ss/services.c
-@@ -1744,6 +1744,9 @@ int security_genfs_sid(const char *fstype,
+@@ -16,12 +16,13 @@
+  * Updated: Hewlett-Packard <paul.moore at hp.com>
+  *
+  *      Added support for NetLabel
++ *      Added support for the policy capability bitmap
+  *
+  * Updated: Chad Sellers <csellers at tresys.com>
+  *
+  *  Added validation of kernel classes and permissions
+  *
+- * Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
++ * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
+  * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
+  * Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC
+  * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris at redhat.com>
+@@ -59,6 +60,8 @@
+ extern void selnl_notify_policyload(u32 seqno);
+ unsigned int policydb_loaded_version;
+ 
++int selinux_policycap_netpeer;
++
+ /*
+  * This is declared in avc.c
+  */
+@@ -1299,6 +1302,12 @@ bad:
+ 	goto out;
+ }
+ 
++static void security_load_policycaps(void)
++{
++	selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps,
++						  POLICYDB_CAPABILITY_NETPEER);
++}
++
+ extern void selinux_complete_init(void);
+ static int security_preserve_bools(struct policydb *p);
+ 
+@@ -1346,6 +1355,7 @@ int security_load_policy(void *data, size_t len)
+ 			avtab_cache_destroy();
+ 			return -EINVAL;
+ 		}
++		security_load_policycaps();
+ 		policydb_loaded_version = policydb.policyvers;
+ 		ss_initialized = 1;
+ 		seqno = ++latest_granting;
+@@ -1404,6 +1414,7 @@ int security_load_policy(void *data, size_t len)
+ 	POLICY_WRLOCK;
+ 	memcpy(&policydb, &newpolicydb, sizeof policydb);
+ 	sidtab_set(&sidtab, &newsidtab);
++	security_load_policycaps();
+ 	seqno = ++latest_granting;
+ 	policydb_loaded_version = policydb.policyvers;
+ 	POLICY_WRUNLOCK;
+@@ -1478,11 +1489,8 @@ out:
+  * security_netif_sid - Obtain the SID for a network interface.
+  * @name: interface name
+  * @if_sid: interface SID
+- * @msg_sid: default SID for received packets
+  */
+-int security_netif_sid(char *name,
+-		       u32 *if_sid,
+-		       u32 *msg_sid)
++int security_netif_sid(char *name, u32 *if_sid)
+ {
+ 	int rc = 0;
+ 	struct ocontext *c;
+@@ -1510,11 +1518,8 @@ int security_netif_sid(char *name,
+ 				goto out;
+ 		}
+ 		*if_sid = c->sid[0];
+-		*msg_sid = c->sid[1];
+-	} else {
++	} else
+ 		*if_sid = SECINITSID_NETIF;
+-		*msg_sid = SECINITSID_NETMSG;
+-	}
+ 
+ out:
+ 	POLICY_RDUNLOCK;
+@@ -1744,6 +1749,9 @@ int security_genfs_sid(const char *fstype,
  	struct ocontext *c;
  	int rc = 0, cmp = 0;
  
@@ -764260,6 +928262,420 @@
  	POLICY_RDLOCK;
  
  	for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
+@@ -2046,6 +2054,91 @@ out:
+ 	return rc;
+ }
+ 
++/**
++ * security_net_peersid_resolve - Compare and resolve two network peer SIDs
++ * @nlbl_sid: NetLabel SID
++ * @nlbl_type: NetLabel labeling protocol type
++ * @xfrm_sid: XFRM SID
++ *
++ * Description:
++ * Compare the @nlbl_sid and @xfrm_sid values and if the two SIDs can be
++ * resolved into a single SID it is returned via @peer_sid and the function
++ * returns zero.  Otherwise @peer_sid is set to SECSID_NULL and the function
++ * returns a negative value.  A table summarizing the behavior is below:
++ *
++ *                                 | function return |      @sid
++ *   ------------------------------+-----------------+-----------------
++ *   no peer labels                |        0        |    SECSID_NULL
++ *   single peer label             |        0        |    <peer_label>
++ *   multiple, consistent labels   |        0        |    <peer_label>
++ *   multiple, inconsistent labels |    -<errno>     |    SECSID_NULL
++ *
++ */
++int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
++				 u32 xfrm_sid,
++				 u32 *peer_sid)
++{
++	int rc;
++	struct context *nlbl_ctx;
++	struct context *xfrm_ctx;
++
++	/* handle the common (which also happens to be the set of easy) cases
++	 * right away, these two if statements catch everything involving a
++	 * single or absent peer SID/label */
++	if (xfrm_sid == SECSID_NULL) {
++		*peer_sid = nlbl_sid;
++		return 0;
++	}
++	/* NOTE: an nlbl_type == NETLBL_NLTYPE_UNLABELED is a "fallback" label
++	 * and is treated as if nlbl_sid == SECSID_NULL when a XFRM SID/label
++	 * is present */
++	if (nlbl_sid == SECSID_NULL || nlbl_type == NETLBL_NLTYPE_UNLABELED) {
++		*peer_sid = xfrm_sid;
++		return 0;
++	}
++
++	/* we don't need to check ss_initialized here since the only way both
++	 * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
++	 * security server was initialized and ss_initialized was true */
++	if (!selinux_mls_enabled) {
++		*peer_sid = SECSID_NULL;
++		return 0;
++	}
++
++	POLICY_RDLOCK;
++
++	nlbl_ctx = sidtab_search(&sidtab, nlbl_sid);
++	if (!nlbl_ctx) {
++		printk(KERN_ERR
++		       "security_sid_mls_cmp:  unrecognized SID %d\n",
++		       nlbl_sid);
++		rc = -EINVAL;
++		goto out_slowpath;
++	}
++	xfrm_ctx = sidtab_search(&sidtab, xfrm_sid);
++	if (!xfrm_ctx) {
++		printk(KERN_ERR
++		       "security_sid_mls_cmp:  unrecognized SID %d\n",
++		       xfrm_sid);
++		rc = -EINVAL;
++		goto out_slowpath;
++	}
++	rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES);
++
++out_slowpath:
++	POLICY_RDUNLOCK;
++	if (rc == 0)
++		/* at present NetLabel SIDs/labels really only carry MLS
++		 * information so if the MLS portion of the NetLabel SID
++		 * matches the MLS portion of the labeled XFRM SID/label
++		 * then pass along the XFRM SID as it is the most
++		 * expressive */
++		*peer_sid = xfrm_sid;
++	else
++		*peer_sid = SECSID_NULL;
++	return rc;
++}
++
+ static int get_classes_callback(void *k, void *d, void *args)
+ {
+ 	struct class_datum *datum = d;
+@@ -2151,6 +2244,60 @@ int security_get_allow_unknown(void)
+ 	return policydb.allow_unknown;
+ }
+ 
++/**
++ * security_get_policycaps - Query the loaded policy for its capabilities
++ * @len: the number of capability bits
++ * @values: the capability bit array
++ *
++ * Description:
++ * Get an array of the policy capabilities in @values where each entry in
++ * @values is either true (1) or false (0) depending the policy's support of
++ * that feature.  The policy capabilities are defined by the
++ * POLICYDB_CAPABILITY_* enums.  The size of the array is stored in @len and it
++ * is up to the caller to free the array in @values.  Returns zero on success,
++ * negative values on failure.
++ *
++ */
++int security_get_policycaps(int *len, int **values)
++{
++	int rc = -ENOMEM;
++	unsigned int iter;
++
++	POLICY_RDLOCK;
++
++	*values = kcalloc(POLICYDB_CAPABILITY_MAX, sizeof(int), GFP_ATOMIC);
++	if (*values == NULL)
++		goto out;
++	for (iter = 0; iter < POLICYDB_CAPABILITY_MAX; iter++)
++		(*values)[iter] = ebitmap_get_bit(&policydb.policycaps, iter);
++	*len = POLICYDB_CAPABILITY_MAX;
++
++out:
++	POLICY_RDUNLOCK;
++	return rc;
++}
++
++/**
++ * security_policycap_supported - Check for a specific policy capability
++ * @req_cap: capability
++ *
++ * Description:
++ * This function queries the currently loaded policy to see if it supports the
++ * capability specified by @req_cap.  Returns true (1) if the capability is
++ * supported, false (0) if it isn't supported.
++ *
++ */
++int security_policycap_supported(unsigned int req_cap)
++{
++	int rc;
++
++	POLICY_RDLOCK;
++	rc = ebitmap_get_bit(&policydb.policycaps, req_cap);
++	POLICY_RDUNLOCK;
++
++	return rc;
++}
++
+ struct selinux_audit_rule {
+ 	u32 au_seqno;
+ 	struct context au_ctxt;
+@@ -2400,50 +2547,10 @@ void selinux_audit_set_callback(int (*callback)(void))
+ }
+ 
+ #ifdef CONFIG_NETLABEL
+-/*
+- * NetLabel cache structure
+- */
+-#define NETLBL_CACHE(x)           ((struct selinux_netlbl_cache *)(x))
+-#define NETLBL_CACHE_T_NONE       0
+-#define NETLBL_CACHE_T_SID        1
+-#define NETLBL_CACHE_T_MLS        2
+-struct selinux_netlbl_cache {
+-	u32 type;
+-	union {
+-		u32 sid;
+-		struct mls_range mls_label;
+-	} data;
+-};
+-
+-/**
+- * security_netlbl_cache_free - Free the NetLabel cached data
+- * @data: the data to free
+- *
+- * Description:
+- * This function is intended to be used as the free() callback inside the
+- * netlbl_lsm_cache structure.
+- *
+- */
+-static void security_netlbl_cache_free(const void *data)
+-{
+-	struct selinux_netlbl_cache *cache;
+-
+-	if (data == NULL)
+-		return;
+-
+-	cache = NETLBL_CACHE(data);
+-	switch (cache->type) {
+-	case NETLBL_CACHE_T_MLS:
+-		ebitmap_destroy(&cache->data.mls_label.level[0].cat);
+-		break;
+-	}
+-	kfree(data);
+-}
+-
+ /**
+  * security_netlbl_cache_add - Add an entry to the NetLabel cache
+  * @secattr: the NetLabel packet security attributes
+- * @ctx: the SELinux context
++ * @sid: the SELinux SID
+  *
+  * Description:
+  * Attempt to cache the context in @ctx, which was derived from the packet in
+@@ -2452,60 +2559,46 @@ static void security_netlbl_cache_free(const void *data)
+  *
+  */
+ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
+-				      struct context *ctx)
++				      u32 sid)
+ {
+-	struct selinux_netlbl_cache *cache = NULL;
++	u32 *sid_cache;
+ 
+-	secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
+-	if (secattr->cache == NULL)
+-		return;
+-
+-	cache = kzalloc(sizeof(*cache),	GFP_ATOMIC);
+-	if (cache == NULL)
++	sid_cache = kmalloc(sizeof(*sid_cache), GFP_ATOMIC);
++	if (sid_cache == NULL)
+ 		return;
+-
+-	cache->type = NETLBL_CACHE_T_MLS;
+-	if (ebitmap_cpy(&cache->data.mls_label.level[0].cat,
+-			&ctx->range.level[0].cat) != 0) {
+-		kfree(cache);
++	secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
++	if (secattr->cache == NULL) {
++		kfree(sid_cache);
+ 		return;
+ 	}
+-	cache->data.mls_label.level[1].cat.highbit =
+-		cache->data.mls_label.level[0].cat.highbit;
+-	cache->data.mls_label.level[1].cat.node =
+-		cache->data.mls_label.level[0].cat.node;
+-	cache->data.mls_label.level[0].sens = ctx->range.level[0].sens;
+-	cache->data.mls_label.level[1].sens = ctx->range.level[0].sens;
+ 
+-	secattr->cache->free = security_netlbl_cache_free;
+-	secattr->cache->data = (void *)cache;
++	*sid_cache = sid;
++	secattr->cache->free = kfree;
++	secattr->cache->data = sid_cache;
+ 	secattr->flags |= NETLBL_SECATTR_CACHE;
+ }
+ 
+ /**
+  * security_netlbl_secattr_to_sid - Convert a NetLabel secattr to a SELinux SID
+  * @secattr: the NetLabel packet security attributes
+- * @base_sid: the SELinux SID to use as a context for MLS only attributes
+  * @sid: the SELinux SID
+  *
+  * Description:
+  * Convert the given NetLabel security attributes in @secattr into a
+  * SELinux SID.  If the @secattr field does not contain a full SELinux
+- * SID/context then use the context in @base_sid as the foundation.  If
+- * possibile the 'cache' field of @secattr is set and the CACHE flag is set;
+- * this is to allow the @secattr to be used by NetLabel to cache the secattr to
+- * SID conversion for future lookups.  Returns zero on success, negative
+- * values on failure.
++ * SID/context then use SECINITSID_NETMSG as the foundation.  If possibile the
++ * 'cache' field of @secattr is set and the CACHE flag is set; this is to
++ * allow the @secattr to be used by NetLabel to cache the secattr to SID
++ * conversion for future lookups.  Returns zero on success, negative values on
++ * failure.
+  *
+  */
+ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+-				   u32 base_sid,
+ 				   u32 *sid)
+ {
+ 	int rc = -EIDRM;
+ 	struct context *ctx;
+ 	struct context ctx_new;
+-	struct selinux_netlbl_cache *cache;
+ 
+ 	if (!ss_initialized) {
+ 		*sid = SECSID_NULL;
+@@ -2515,40 +2608,13 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+ 	POLICY_RDLOCK;
+ 
+ 	if (secattr->flags & NETLBL_SECATTR_CACHE) {
+-		cache = NETLBL_CACHE(secattr->cache->data);
+-		switch (cache->type) {
+-		case NETLBL_CACHE_T_SID:
+-			*sid = cache->data.sid;
+-			rc = 0;
+-			break;
+-		case NETLBL_CACHE_T_MLS:
+-			ctx = sidtab_search(&sidtab, base_sid);
+-			if (ctx == NULL)
+-				goto netlbl_secattr_to_sid_return;
+-
+-			ctx_new.user = ctx->user;
+-			ctx_new.role = ctx->role;
+-			ctx_new.type = ctx->type;
+-			ctx_new.range.level[0].sens =
+-				cache->data.mls_label.level[0].sens;
+-			ctx_new.range.level[0].cat.highbit =
+-				cache->data.mls_label.level[0].cat.highbit;
+-			ctx_new.range.level[0].cat.node =
+-				cache->data.mls_label.level[0].cat.node;
+-			ctx_new.range.level[1].sens =
+-				cache->data.mls_label.level[1].sens;
+-			ctx_new.range.level[1].cat.highbit =
+-				cache->data.mls_label.level[1].cat.highbit;
+-			ctx_new.range.level[1].cat.node =
+-				cache->data.mls_label.level[1].cat.node;
+-
+-			rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid);
+-			break;
+-		default:
+-			goto netlbl_secattr_to_sid_return;
+-		}
++		*sid = *(u32 *)secattr->cache->data;
++		rc = 0;
++	} else if (secattr->flags & NETLBL_SECATTR_SECID) {
++		*sid = secattr->attr.secid;
++		rc = 0;
+ 	} else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
+-		ctx = sidtab_search(&sidtab, base_sid);
++		ctx = sidtab_search(&sidtab, SECINITSID_NETMSG);
+ 		if (ctx == NULL)
+ 			goto netlbl_secattr_to_sid_return;
+ 
+@@ -2558,7 +2624,7 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+ 		mls_import_netlbl_lvl(&ctx_new, secattr);
+ 		if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
+ 			if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat,
+-						  secattr->mls_cat) != 0)
++						  secattr->attr.mls.cat) != 0)
+ 				goto netlbl_secattr_to_sid_return;
+ 			ctx_new.range.level[1].cat.highbit =
+ 				ctx_new.range.level[0].cat.highbit;
+@@ -2575,7 +2641,7 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+ 		if (rc != 0)
+ 			goto netlbl_secattr_to_sid_return_cleanup;
+ 
+-		security_netlbl_cache_add(secattr, &ctx_new);
++		security_netlbl_cache_add(secattr, *sid);
+ 
+ 		ebitmap_destroy(&ctx_new.range.level[0].cat);
+ 	} else {
+diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
+index e076039..7e15820 100644
+--- a/security/selinux/xfrm.c
++++ b/security/selinux/xfrm.c
+@@ -46,11 +46,14 @@
+ #include <net/checksum.h>
+ #include <net/udp.h>
+ #include <asm/semaphore.h>
++#include <asm/atomic.h>
+ 
+ #include "avc.h"
+ #include "objsec.h"
+ #include "xfrm.h"
+ 
++/* Labeled XFRM instance counter */
++atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0);
+ 
+ /*
+  * Returns true if an LSM/SELinux context
+@@ -293,6 +296,9 @@ int selinux_xfrm_policy_alloc(struct xfrm_policy *xp,
+ 	BUG_ON(!uctx);
+ 
+ 	err = selinux_xfrm_sec_ctx_alloc(&xp->security, uctx, 0);
++	if (err == 0)
++		atomic_inc(&selinux_xfrm_refcount);
++
+ 	return err;
+ }
+ 
+@@ -340,10 +346,13 @@ int selinux_xfrm_policy_delete(struct xfrm_policy *xp)
+ 	struct xfrm_sec_ctx *ctx = xp->security;
+ 	int rc = 0;
+ 
+-	if (ctx)
++	if (ctx) {
+ 		rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+ 				  SECCLASS_ASSOCIATION,
+ 				  ASSOCIATION__SETCONTEXT, NULL);
++		if (rc == 0)
++			atomic_dec(&selinux_xfrm_refcount);
++	}
+ 
+ 	return rc;
+ }
+@@ -360,6 +369,8 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
+ 	BUG_ON(!x);
+ 
+ 	err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid);
++	if (err == 0)
++		atomic_inc(&selinux_xfrm_refcount);
+ 	return err;
+ }
+ 
+@@ -382,10 +393,13 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
+ 	struct xfrm_sec_ctx *ctx = x->security;
+ 	int rc = 0;
+ 
+-	if (ctx)
++	if (ctx) {
+ 		rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+ 				  SECCLASS_ASSOCIATION,
+ 				  ASSOCIATION__SETCONTEXT, NULL);
++		if (rc == 0)
++			atomic_dec(&selinux_xfrm_refcount);
++	}
+ 
+ 	return rc;
+ }
 diff --git a/sound/oss/waveartist.c b/sound/oss/waveartist.c
 index b48c729..8849041 100644
 --- a/sound/oss/waveartist.c
@@ -764341,3 +928757,1988 @@
  
  /* Magic definition of all other variables and things */
  I2C_CLIENT_INSMOD;
+diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
+new file mode 100644
+index 0000000..317f8e2
+--- /dev/null
++++ b/virt/kvm/ioapic.c
+@@ -0,0 +1,403 @@
++/*
++ *  Copyright (C) 2001  MandrakeSoft S.A.
++ *
++ *    MandrakeSoft S.A.
++ *    43, rue d'Aboukir
++ *    75002 Paris - France
++ *    http://www.linux-mandrake.com/
++ *    http://www.mandrakesoft.com/
++ *
++ *  This library is free software; you can redistribute it and/or
++ *  modify it under the terms of the GNU Lesser General Public
++ *  License as published by the Free Software Foundation; either
++ *  version 2 of the License, or (at your option) any later version.
++ *
++ *  This library is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ *  Lesser General Public License for more details.
++ *
++ *  You should have received a copy of the GNU Lesser General Public
++ *  License along with this library; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ *
++ *  Yunhong Jiang <yunhong.jiang at intel.com>
++ *  Yaozu (Eddie) Dong <eddie.dong at intel.com>
++ *  Based on Xen 3.1 code.
++ */
++
++#include <linux/kvm_host.h>
++#include <linux/kvm.h>
++#include <linux/mm.h>
++#include <linux/highmem.h>
++#include <linux/smp.h>
++#include <linux/hrtimer.h>
++#include <linux/io.h>
++#include <asm/processor.h>
++#include <asm/page.h>
++#include <asm/current.h>
++
++#include "ioapic.h"
++#include "lapic.h"
++
++#if 0
++#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
++#else
++#define ioapic_debug(fmt, arg...)
++#endif
++static void ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
++
++static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
++					  unsigned long addr,
++					  unsigned long length)
++{
++	unsigned long result = 0;
++
++	switch (ioapic->ioregsel) {
++	case IOAPIC_REG_VERSION:
++		result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
++			  | (IOAPIC_VERSION_ID & 0xff));
++		break;
++
++	case IOAPIC_REG_APIC_ID:
++	case IOAPIC_REG_ARB_ID:
++		result = ((ioapic->id & 0xf) << 24);
++		break;
++
++	default:
++		{
++			u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
++			u64 redir_content;
++
++			ASSERT(redir_index < IOAPIC_NUM_PINS);
++
++			redir_content = ioapic->redirtbl[redir_index].bits;
++			result = (ioapic->ioregsel & 0x1) ?
++			    (redir_content >> 32) & 0xffffffff :
++			    redir_content & 0xffffffff;
++			break;
++		}
++	}
++
++	return result;
++}
++
++static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
++{
++	union ioapic_redir_entry *pent;
++
++	pent = &ioapic->redirtbl[idx];
++
++	if (!pent->fields.mask) {
++		ioapic_deliver(ioapic, idx);
++		if (pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
++			pent->fields.remote_irr = 1;
++	}
++	if (!pent->fields.trig_mode)
++		ioapic->irr &= ~(1 << idx);
++}
++
++static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
++{
++	unsigned index;
++
++	switch (ioapic->ioregsel) {
++	case IOAPIC_REG_VERSION:
++		/* Writes are ignored. */
++		break;
++
++	case IOAPIC_REG_APIC_ID:
++		ioapic->id = (val >> 24) & 0xf;
++		break;
++
++	case IOAPIC_REG_ARB_ID:
++		break;
++
++	default:
++		index = (ioapic->ioregsel - 0x10) >> 1;
++
++		ioapic_debug("change redir index %x val %x\n", index, val);
++		if (index >= IOAPIC_NUM_PINS)
++			return;
++		if (ioapic->ioregsel & 1) {
++			ioapic->redirtbl[index].bits &= 0xffffffff;
++			ioapic->redirtbl[index].bits |= (u64) val << 32;
++		} else {
++			ioapic->redirtbl[index].bits &= ~0xffffffffULL;
++			ioapic->redirtbl[index].bits |= (u32) val;
++			ioapic->redirtbl[index].fields.remote_irr = 0;
++		}
++		if (ioapic->irr & (1 << index))
++			ioapic_service(ioapic, index);
++		break;
++	}
++}
++
++static void ioapic_inj_irq(struct kvm_ioapic *ioapic,
++			   struct kvm_vcpu *vcpu,
++			   u8 vector, u8 trig_mode, u8 delivery_mode)
++{
++	ioapic_debug("irq %d trig %d deliv %d\n", vector, trig_mode,
++		     delivery_mode);
++
++	ASSERT((delivery_mode == IOAPIC_FIXED) ||
++	       (delivery_mode == IOAPIC_LOWEST_PRIORITY));
++
++	kvm_apic_set_irq(vcpu, vector, trig_mode);
++}
++
++static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
++				       u8 dest_mode)
++{
++	u32 mask = 0;
++	int i;
++	struct kvm *kvm = ioapic->kvm;
++	struct kvm_vcpu *vcpu;
++
++	ioapic_debug("dest %d dest_mode %d\n", dest, dest_mode);
++
++	if (dest_mode == 0) {	/* Physical mode. */
++		if (dest == 0xFF) {	/* Broadcast. */
++			for (i = 0; i < KVM_MAX_VCPUS; ++i)
++				if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
++					mask |= 1 << i;
++			return mask;
++		}
++		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
++			vcpu = kvm->vcpus[i];
++			if (!vcpu)
++				continue;
++			if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
++				if (vcpu->arch.apic)
++					mask = 1 << i;
++				break;
++			}
++		}
++	} else if (dest != 0)	/* Logical mode, MDA non-zero. */
++		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
++			vcpu = kvm->vcpus[i];
++			if (!vcpu)
++				continue;
++			if (vcpu->arch.apic &&
++			    kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
++				mask |= 1 << vcpu->vcpu_id;
++		}
++	ioapic_debug("mask %x\n", mask);
++	return mask;
++}
++
++static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
++{
++	u8 dest = ioapic->redirtbl[irq].fields.dest_id;
++	u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
++	u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode;
++	u8 vector = ioapic->redirtbl[irq].fields.vector;
++	u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
++	u32 deliver_bitmask;
++	struct kvm_vcpu *vcpu;
++	int vcpu_id;
++
++	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
++		     "vector=%x trig_mode=%x\n",
++		     dest, dest_mode, delivery_mode, vector, trig_mode);
++
++	deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode);
++	if (!deliver_bitmask) {
++		ioapic_debug("no target on destination\n");
++		return;
++	}
++
++	switch (delivery_mode) {
++	case IOAPIC_LOWEST_PRIORITY:
++		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
++				deliver_bitmask);
++		if (vcpu != NULL)
++			ioapic_inj_irq(ioapic, vcpu, vector,
++				       trig_mode, delivery_mode);
++		else
++			ioapic_debug("null lowest prio vcpu: "
++				     "mask=%x vector=%x delivery_mode=%x\n",
++				     deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY);
++		break;
++	case IOAPIC_FIXED:
++		for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
++			if (!(deliver_bitmask & (1 << vcpu_id)))
++				continue;
++			deliver_bitmask &= ~(1 << vcpu_id);
++			vcpu = ioapic->kvm->vcpus[vcpu_id];
++			if (vcpu) {
++				ioapic_inj_irq(ioapic, vcpu, vector,
++					       trig_mode, delivery_mode);
++			}
++		}
++		break;
++
++		/* TODO: NMI */
++	default:
++		printk(KERN_WARNING "Unsupported delivery mode %d\n",
++		       delivery_mode);
++		break;
++	}
++}
++
++void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
++{
++	u32 old_irr = ioapic->irr;
++	u32 mask = 1 << irq;
++	union ioapic_redir_entry entry;
++
++	if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
++		entry = ioapic->redirtbl[irq];
++		level ^= entry.fields.polarity;
++		if (!level)
++			ioapic->irr &= ~mask;
++		else {
++			ioapic->irr |= mask;
++			if ((!entry.fields.trig_mode && old_irr != ioapic->irr)
++			    || !entry.fields.remote_irr)
++				ioapic_service(ioapic, irq);
++		}
++	}
++}
++
++static int get_eoi_gsi(struct kvm_ioapic *ioapic, int vector)
++{
++	int i;
++
++	for (i = 0; i < IOAPIC_NUM_PINS; i++)
++		if (ioapic->redirtbl[i].fields.vector == vector)
++			return i;
++	return -1;
++}
++
++void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
++{
++	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
++	union ioapic_redir_entry *ent;
++	int gsi;
++
++	gsi = get_eoi_gsi(ioapic, vector);
++	if (gsi == -1) {
++		printk(KERN_WARNING "Can't find redir item for %d EOI\n",
++		       vector);
++		return;
++	}
++
++	ent = &ioapic->redirtbl[gsi];
++	ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
++
++	ent->fields.remote_irr = 0;
++	if (!ent->fields.mask && (ioapic->irr & (1 << gsi)))
++		ioapic_deliver(ioapic, gsi);
++}
++
++static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr)
++{
++	struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
++
++	return ((addr >= ioapic->base_address &&
++		 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
++}
++
++static void ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
++			     void *val)
++{
++	struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
++	u32 result;
++
++	ioapic_debug("addr %lx\n", (unsigned long)addr);
++	ASSERT(!(addr & 0xf));	/* check alignment */
++
++	addr &= 0xff;
++	switch (addr) {
++	case IOAPIC_REG_SELECT:
++		result = ioapic->ioregsel;
++		break;
++
++	case IOAPIC_REG_WINDOW:
++		result = ioapic_read_indirect(ioapic, addr, len);
++		break;
++
++	default:
++		result = 0;
++		break;
++	}
++	switch (len) {
++	case 8:
++		*(u64 *) val = result;
++		break;
++	case 1:
++	case 2:
++	case 4:
++		memcpy(val, (char *)&result, len);
++		break;
++	default:
++		printk(KERN_WARNING "ioapic: wrong length %d\n", len);
++	}
++}
++
++static void ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
++			      const void *val)
++{
++	struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
++	u32 data;
++
++	ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
++		     (void*)addr, len, val);
++	ASSERT(!(addr & 0xf));	/* check alignment */
++	if (len == 4 || len == 8)
++		data = *(u32 *) val;
++	else {
++		printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
++		return;
++	}
++
++	addr &= 0xff;
++	switch (addr) {
++	case IOAPIC_REG_SELECT:
++		ioapic->ioregsel = data;
++		break;
++
++	case IOAPIC_REG_WINDOW:
++		ioapic_write_indirect(ioapic, data);
++		break;
++#ifdef	CONFIG_IA64
++	case IOAPIC_REG_EOI:
++		kvm_ioapic_update_eoi(ioapic->kvm, data);
++		break;
++#endif
++
++	default:
++		break;
++	}
++}
++
++void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
++{
++	int i;
++
++	for (i = 0; i < IOAPIC_NUM_PINS; i++)
++		ioapic->redirtbl[i].fields.mask = 1;
++	ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
++	ioapic->ioregsel = 0;
++	ioapic->irr = 0;
++	ioapic->id = 0;
++}
++
++int kvm_ioapic_init(struct kvm *kvm)
++{
++	struct kvm_ioapic *ioapic;
++
++	ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
++	if (!ioapic)
++		return -ENOMEM;
++	kvm->arch.vioapic = ioapic;
++	kvm_ioapic_reset(ioapic);
++	ioapic->dev.read = ioapic_mmio_read;
++	ioapic->dev.write = ioapic_mmio_write;
++	ioapic->dev.in_range = ioapic_in_range;
++	ioapic->dev.private = ioapic;
++	ioapic->kvm = kvm;
++	kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev);
++	return 0;
++}
+diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
+new file mode 100644
+index 0000000..7f16675
+--- /dev/null
++++ b/virt/kvm/ioapic.h
+@@ -0,0 +1,95 @@
++#ifndef __KVM_IO_APIC_H
++#define __KVM_IO_APIC_H
++
++#include <linux/kvm_host.h>
++
++#include "iodev.h"
++
++struct kvm;
++struct kvm_vcpu;
++
++#define IOAPIC_NUM_PINS  KVM_IOAPIC_NUM_PINS
++#define IOAPIC_VERSION_ID 0x11	/* IOAPIC version */
++#define IOAPIC_EDGE_TRIG  0
++#define IOAPIC_LEVEL_TRIG 1
++
++#define IOAPIC_DEFAULT_BASE_ADDRESS  0xfec00000
++#define IOAPIC_MEM_LENGTH            0x100
++
++/* Direct registers. */
++#define IOAPIC_REG_SELECT  0x00
++#define IOAPIC_REG_WINDOW  0x10
++#define IOAPIC_REG_EOI     0x40	/* IA64 IOSAPIC only */
++
++/* Indirect registers. */
++#define IOAPIC_REG_APIC_ID 0x00	/* x86 IOAPIC only */
++#define IOAPIC_REG_VERSION 0x01
++#define IOAPIC_REG_ARB_ID  0x02	/* x86 IOAPIC only */
++
++/*ioapic delivery mode*/
++#define	IOAPIC_FIXED			0x0
++#define	IOAPIC_LOWEST_PRIORITY		0x1
++#define	IOAPIC_PMI			0x2
++#define	IOAPIC_NMI			0x4
++#define	IOAPIC_INIT			0x5
++#define	IOAPIC_EXTINT			0x7
++
++struct kvm_ioapic {
++	u64 base_address;
++	u32 ioregsel;
++	u32 id;
++	u32 irr;
++	u32 pad;
++	union ioapic_redir_entry {
++		u64 bits;
++		struct {
++			u8 vector;
++			u8 delivery_mode:3;
++			u8 dest_mode:1;
++			u8 delivery_status:1;
++			u8 polarity:1;
++			u8 remote_irr:1;
++			u8 trig_mode:1;
++			u8 mask:1;
++			u8 reserve:7;
++			u8 reserved[4];
++			u8 dest_id;
++		} fields;
++	} redirtbl[IOAPIC_NUM_PINS];
++	struct kvm_io_device dev;
++	struct kvm *kvm;
++};
++
++#ifdef DEBUG
++#define ASSERT(x)  							\
++do {									\
++	if (!(x)) {							\
++		printk(KERN_EMERG "assertion failed %s: %d: %s\n",	\
++		       __FILE__, __LINE__, #x);				\
++		BUG();							\
++	}								\
++} while (0)
++#else
++#define ASSERT(x) do { } while (0)
++#endif
++
++static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
++{
++	return kvm->arch.vioapic;
++}
++
++#ifdef CONFIG_IA64
++static inline int irqchip_in_kernel(struct kvm *kvm)
++{
++	return 1;
++}
++#endif
++
++struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
++				       unsigned long bitmap);
++void kvm_ioapic_update_eoi(struct kvm *kvm, int vector);
++int kvm_ioapic_init(struct kvm *kvm);
++void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
++void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
++
++#endif
+diff --git a/virt/kvm/iodev.h b/virt/kvm/iodev.h
+new file mode 100644
+index 0000000..c14e642
+--- /dev/null
++++ b/virt/kvm/iodev.h
+@@ -0,0 +1,63 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
++ */
++
++#ifndef __KVM_IODEV_H__
++#define __KVM_IODEV_H__
++
++#include <linux/kvm_types.h>
++
++struct kvm_io_device {
++	void (*read)(struct kvm_io_device *this,
++		     gpa_t addr,
++		     int len,
++		     void *val);
++	void (*write)(struct kvm_io_device *this,
++		      gpa_t addr,
++		      int len,
++		      const void *val);
++	int (*in_range)(struct kvm_io_device *this, gpa_t addr);
++	void (*destructor)(struct kvm_io_device *this);
++
++	void             *private;
++};
++
++static inline void kvm_iodevice_read(struct kvm_io_device *dev,
++				     gpa_t addr,
++				     int len,
++				     void *val)
++{
++	dev->read(dev, addr, len, val);
++}
++
++static inline void kvm_iodevice_write(struct kvm_io_device *dev,
++				      gpa_t addr,
++				      int len,
++				      const void *val)
++{
++	dev->write(dev, addr, len, val);
++}
++
++static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
++{
++	return dev->in_range(dev, addr);
++}
++
++static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
++{
++	if (dev->destructor)
++		dev->destructor(dev);
++}
++
++#endif /* __KVM_IODEV_H__ */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+new file mode 100644
+index 0000000..3c4fe26
+--- /dev/null
++++ b/virt/kvm/kvm_main.c
+@@ -0,0 +1,1400 @@
++/*
++ * Kernel-based Virtual Machine driver for Linux
++ *
++ * This module enables machines with Intel VT-x extensions to run virtual
++ * machines without emulation or binary translation.
++ *
++ * Copyright (C) 2006 Qumranet, Inc.
++ *
++ * Authors:
++ *   Avi Kivity   <avi at qumranet.com>
++ *   Yaniv Kamay  <yaniv at qumranet.com>
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2.  See
++ * the COPYING file in the top-level directory.
++ *
++ */
++
++#include "iodev.h"
++
++#include <linux/kvm_host.h>
++#include <linux/kvm.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/percpu.h>
++#include <linux/gfp.h>
++#include <linux/mm.h>
++#include <linux/miscdevice.h>
++#include <linux/vmalloc.h>
++#include <linux/reboot.h>
++#include <linux/debugfs.h>
++#include <linux/highmem.h>
++#include <linux/file.h>
++#include <linux/sysdev.h>
++#include <linux/cpu.h>
++#include <linux/sched.h>
++#include <linux/cpumask.h>
++#include <linux/smp.h>
++#include <linux/anon_inodes.h>
++#include <linux/profile.h>
++#include <linux/kvm_para.h>
++#include <linux/pagemap.h>
++#include <linux/mman.h>
++
++#include <asm/processor.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++
++MODULE_AUTHOR("Qumranet");
++MODULE_LICENSE("GPL");
++
++DEFINE_SPINLOCK(kvm_lock);
++LIST_HEAD(vm_list);
++
++static cpumask_t cpus_hardware_enabled;
++
++struct kmem_cache *kvm_vcpu_cache;
++EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
++
++static __read_mostly struct preempt_ops kvm_preempt_ops;
++
++static struct dentry *debugfs_dir;
++
++static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
++			   unsigned long arg);
++
++static inline int valid_vcpu(int n)
++{
++	return likely(n >= 0 && n < KVM_MAX_VCPUS);
++}
++
++/*
++ * Switches to specified vcpu, until a matching vcpu_put()
++ */
++void vcpu_load(struct kvm_vcpu *vcpu)
++{
++	int cpu;
++
++	mutex_lock(&vcpu->mutex);
++	cpu = get_cpu();
++	preempt_notifier_register(&vcpu->preempt_notifier);
++	kvm_arch_vcpu_load(vcpu, cpu);
++	put_cpu();
++}
++
++void vcpu_put(struct kvm_vcpu *vcpu)
++{
++	preempt_disable();
++	kvm_arch_vcpu_put(vcpu);
++	preempt_notifier_unregister(&vcpu->preempt_notifier);
++	preempt_enable();
++	mutex_unlock(&vcpu->mutex);
++}
++
++static void ack_flush(void *_completed)
++{
++}
++
++void kvm_flush_remote_tlbs(struct kvm *kvm)
++{
++	int i, cpu;
++	cpumask_t cpus;
++	struct kvm_vcpu *vcpu;
++
++	cpus_clear(cpus);
++	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
++		vcpu = kvm->vcpus[i];
++		if (!vcpu)
++			continue;
++		if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
++			continue;
++		cpu = vcpu->cpu;
++		if (cpu != -1 && cpu != raw_smp_processor_id())
++			cpu_set(cpu, cpus);
++	}
++	if (cpus_empty(cpus))
++		return;
++	++kvm->stat.remote_tlb_flush;
++	smp_call_function_mask(cpus, ack_flush, NULL, 1);
++}
++
++int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
++{
++	struct page *page;
++	int r;
++
++	mutex_init(&vcpu->mutex);
++	vcpu->cpu = -1;
++	vcpu->kvm = kvm;
++	vcpu->vcpu_id = id;
++	init_waitqueue_head(&vcpu->wq);
++
++	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++	if (!page) {
++		r = -ENOMEM;
++		goto fail;
++	}
++	vcpu->run = page_address(page);
++
++	r = kvm_arch_vcpu_init(vcpu);
++	if (r < 0)
++		goto fail_free_run;
++	return 0;
++
++fail_free_run:
++	free_page((unsigned long)vcpu->run);
++fail:
++	return r;
++}
++EXPORT_SYMBOL_GPL(kvm_vcpu_init);
++
++void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
++{
++	kvm_arch_vcpu_uninit(vcpu);
++	free_page((unsigned long)vcpu->run);
++}
++EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
++
++static struct kvm *kvm_create_vm(void)
++{
++	struct kvm *kvm = kvm_arch_create_vm();
++
++	if (IS_ERR(kvm))
++		goto out;
++
++	kvm->mm = current->mm;
++	atomic_inc(&kvm->mm->mm_count);
++	spin_lock_init(&kvm->mmu_lock);
++	kvm_io_bus_init(&kvm->pio_bus);
++	mutex_init(&kvm->lock);
++	kvm_io_bus_init(&kvm->mmio_bus);
++	spin_lock(&kvm_lock);
++	list_add(&kvm->vm_list, &vm_list);
++	spin_unlock(&kvm_lock);
++out:
++	return kvm;
++}
++
++/*
++ * Free any memory in @free but not in @dont.
++ */
++static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
++				  struct kvm_memory_slot *dont)
++{
++	if (!dont || free->rmap != dont->rmap)
++		vfree(free->rmap);
++
++	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
++		vfree(free->dirty_bitmap);
++
++	free->npages = 0;
++	free->dirty_bitmap = NULL;
++	free->rmap = NULL;
++}
++
++void kvm_free_physmem(struct kvm *kvm)
++{
++	int i;
++
++	for (i = 0; i < kvm->nmemslots; ++i)
++		kvm_free_physmem_slot(&kvm->memslots[i], NULL);
++}
++
++static void kvm_destroy_vm(struct kvm *kvm)
++{
++	struct mm_struct *mm = kvm->mm;
++
++	spin_lock(&kvm_lock);
++	list_del(&kvm->vm_list);
++	spin_unlock(&kvm_lock);
++	kvm_io_bus_destroy(&kvm->pio_bus);
++	kvm_io_bus_destroy(&kvm->mmio_bus);
++	kvm_arch_destroy_vm(kvm);
++	mmdrop(mm);
++}
++
++static int kvm_vm_release(struct inode *inode, struct file *filp)
++{
++	struct kvm *kvm = filp->private_data;
++
++	kvm_destroy_vm(kvm);
++	return 0;
++}
++
++/*
++ * Allocate some memory and give it an address in the guest physical address
++ * space.
++ *
++ * Discontiguous memory is allowed, mostly for framebuffers.
++ *
++ * Must be called holding mmap_sem for write.
++ */
++int __kvm_set_memory_region(struct kvm *kvm,
++			    struct kvm_userspace_memory_region *mem,
++			    int user_alloc)
++{
++	int r;
++	gfn_t base_gfn;
++	unsigned long npages;
++	unsigned long i;
++	struct kvm_memory_slot *memslot;
++	struct kvm_memory_slot old, new;
++
++	r = -EINVAL;
++	/* General sanity checks */
++	if (mem->memory_size & (PAGE_SIZE - 1))
++		goto out;
++	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
++		goto out;
++	if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
++		goto out;
++	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
++		goto out;
++
++	memslot = &kvm->memslots[mem->slot];
++	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
++	npages = mem->memory_size >> PAGE_SHIFT;
++
++	if (!npages)
++		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
++
++	new = old = *memslot;
++
++	new.base_gfn = base_gfn;
++	new.npages = npages;
++	new.flags = mem->flags;
++
++	/* Disallow changing a memory slot's size. */
++	r = -EINVAL;
++	if (npages && old.npages && npages != old.npages)
++		goto out_free;
++
++	/* Check for overlaps */
++	r = -EEXIST;
++	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
++		struct kvm_memory_slot *s = &kvm->memslots[i];
++
++		if (s == memslot)
++			continue;
++		if (!((base_gfn + npages <= s->base_gfn) ||
++		      (base_gfn >= s->base_gfn + s->npages)))
++			goto out_free;
++	}
++
++	/* Free page dirty bitmap if unneeded */
++	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
++		new.dirty_bitmap = NULL;
++
++	r = -ENOMEM;
++
++	/* Allocate if a slot is being created */
++	if (npages && !new.rmap) {
++		new.rmap = vmalloc(npages * sizeof(struct page *));
++
++		if (!new.rmap)
++			goto out_free;
++
++		memset(new.rmap, 0, npages * sizeof(*new.rmap));
++
++		new.user_alloc = user_alloc;
++		new.userspace_addr = mem->userspace_addr;
++	}
++
++	/* Allocate page dirty bitmap if needed */
++	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
++		unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
++
++		new.dirty_bitmap = vmalloc(dirty_bytes);
++		if (!new.dirty_bitmap)
++			goto out_free;
++		memset(new.dirty_bitmap, 0, dirty_bytes);
++	}
++
++	if (mem->slot >= kvm->nmemslots)
++		kvm->nmemslots = mem->slot + 1;
++
++	*memslot = new;
++
++	r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
++	if (r) {
++		*memslot = old;
++		goto out_free;
++	}
++
++	kvm_free_physmem_slot(&old, &new);
++	return 0;
++
++out_free:
++	kvm_free_physmem_slot(&new, &old);
++out:
++	return r;
++
++}
++EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
++
++int kvm_set_memory_region(struct kvm *kvm,
++			  struct kvm_userspace_memory_region *mem,
++			  int user_alloc)
++{
++	int r;
++
++	down_write(&current->mm->mmap_sem);
++	r = __kvm_set_memory_region(kvm, mem, user_alloc);
++	up_write(&current->mm->mmap_sem);
++	return r;
++}
++EXPORT_SYMBOL_GPL(kvm_set_memory_region);
++
++int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
++				   struct
++				   kvm_userspace_memory_region *mem,
++				   int user_alloc)
++{
++	if (mem->slot >= KVM_MEMORY_SLOTS)
++		return -EINVAL;
++	return kvm_set_memory_region(kvm, mem, user_alloc);
++}
++
++int kvm_get_dirty_log(struct kvm *kvm,
++			struct kvm_dirty_log *log, int *is_dirty)
++{
++	struct kvm_memory_slot *memslot;
++	int r, i;
++	int n;
++	unsigned long any = 0;
++
++	r = -EINVAL;
++	if (log->slot >= KVM_MEMORY_SLOTS)
++		goto out;
++
++	memslot = &kvm->memslots[log->slot];
++	r = -ENOENT;
++	if (!memslot->dirty_bitmap)
++		goto out;
++
++	n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
++
++	for (i = 0; !any && i < n/sizeof(long); ++i)
++		any = memslot->dirty_bitmap[i];
++
++	r = -EFAULT;
++	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
++		goto out;
++
++	if (any)
++		*is_dirty = 1;
++
++	r = 0;
++out:
++	return r;
++}
++
++int is_error_page(struct page *page)
++{
++	return page == bad_page;
++}
++EXPORT_SYMBOL_GPL(is_error_page);
++
++static inline unsigned long bad_hva(void)
++{
++	return PAGE_OFFSET;
++}
++
++int kvm_is_error_hva(unsigned long addr)
++{
++	return addr == bad_hva();
++}
++EXPORT_SYMBOL_GPL(kvm_is_error_hva);
++
++static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
++{
++	int i;
++
++	for (i = 0; i < kvm->nmemslots; ++i) {
++		struct kvm_memory_slot *memslot = &kvm->memslots[i];
++
++		if (gfn >= memslot->base_gfn
++		    && gfn < memslot->base_gfn + memslot->npages)
++			return memslot;
++	}
++	return NULL;
++}
++
++struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
++{
++	gfn = unalias_gfn(kvm, gfn);
++	return __gfn_to_memslot(kvm, gfn);
++}
++
++int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
++{
++	int i;
++
++	gfn = unalias_gfn(kvm, gfn);
++	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
++		struct kvm_memory_slot *memslot = &kvm->memslots[i];
++
++		if (gfn >= memslot->base_gfn
++		    && gfn < memslot->base_gfn + memslot->npages)
++			return 1;
++	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
++
++static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
++{
++	struct kvm_memory_slot *slot;
++
++	gfn = unalias_gfn(kvm, gfn);
++	slot = __gfn_to_memslot(kvm, gfn);
++	if (!slot)
++		return bad_hva();
++	return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
++}
++
++/*
++ * Requires current->mm->mmap_sem to be held
++ */
++struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
++{
++	struct page *page[1];
++	unsigned long addr;
++	int npages;
++
++	might_sleep();
++
++	addr = gfn_to_hva(kvm, gfn);
++	if (kvm_is_error_hva(addr)) {
++		get_page(bad_page);
++		return bad_page;
++	}
++
++	npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
++				NULL);
++
++	if (npages != 1) {
++		get_page(bad_page);
++		return bad_page;
++	}
++
++	return page[0];
++}
++
++EXPORT_SYMBOL_GPL(gfn_to_page);
++
++void kvm_release_page_clean(struct page *page)
++{
++	put_page(page);
++}
++EXPORT_SYMBOL_GPL(kvm_release_page_clean);
++
++void kvm_release_page_dirty(struct page *page)
++{
++	if (!PageReserved(page))
++		SetPageDirty(page);
++	put_page(page);
++}
++EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
++
++static int next_segment(unsigned long len, int offset)
++{
++	if (len > PAGE_SIZE - offset)
++		return PAGE_SIZE - offset;
++	else
++		return len;
++}
++
++int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
++			int len)
++{
++	int r;
++	unsigned long addr;
++
++	addr = gfn_to_hva(kvm, gfn);
++	if (kvm_is_error_hva(addr))
++		return -EFAULT;
++	r = copy_from_user(data, (void __user *)addr + offset, len);
++	if (r)
++		return -EFAULT;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_read_guest_page);
++
++int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
++{
++	gfn_t gfn = gpa >> PAGE_SHIFT;
++	int seg;
++	int offset = offset_in_page(gpa);
++	int ret;
++
++	while ((seg = next_segment(len, offset)) != 0) {
++		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
++		if (ret < 0)
++			return ret;
++		offset = 0;
++		len -= seg;
++		data += seg;
++		++gfn;
++	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_read_guest);
++
++int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
++			  unsigned long len)
++{
++	int r;
++	unsigned long addr;
++	gfn_t gfn = gpa >> PAGE_SHIFT;
++	int offset = offset_in_page(gpa);
++
++	addr = gfn_to_hva(kvm, gfn);
++	if (kvm_is_error_hva(addr))
++		return -EFAULT;
++	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
++	if (r)
++		return -EFAULT;
++	return 0;
++}
++EXPORT_SYMBOL(kvm_read_guest_atomic);
++
++int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
++			 int offset, int len)
++{
++	int r;
++	unsigned long addr;
++
++	addr = gfn_to_hva(kvm, gfn);
++	if (kvm_is_error_hva(addr))
++		return -EFAULT;
++	r = copy_to_user((void __user *)addr + offset, data, len);
++	if (r)
++		return -EFAULT;
++	mark_page_dirty(kvm, gfn);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_write_guest_page);
++
++int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
++		    unsigned long len)
++{
++	gfn_t gfn = gpa >> PAGE_SHIFT;
++	int seg;
++	int offset = offset_in_page(gpa);
++	int ret;
++
++	while ((seg = next_segment(len, offset)) != 0) {
++		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
++		if (ret < 0)
++			return ret;
++		offset = 0;
++		len -= seg;
++		data += seg;
++		++gfn;
++	}
++	return 0;
++}
++
++int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
++{
++	return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
++}
++EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
++
++int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
++{
++	gfn_t gfn = gpa >> PAGE_SHIFT;
++	int seg;
++	int offset = offset_in_page(gpa);
++	int ret;
++
++        while ((seg = next_segment(len, offset)) != 0) {
++		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
++		if (ret < 0)
++			return ret;
++		offset = 0;
++		len -= seg;
++		++gfn;
++	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_clear_guest);
++
++void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
++{
++	struct kvm_memory_slot *memslot;
++
++	gfn = unalias_gfn(kvm, gfn);
++	memslot = __gfn_to_memslot(kvm, gfn);
++	if (memslot && memslot->dirty_bitmap) {
++		unsigned long rel_gfn = gfn - memslot->base_gfn;
++
++		/* avoid RMW */
++		if (!test_bit(rel_gfn, memslot->dirty_bitmap))
++			set_bit(rel_gfn, memslot->dirty_bitmap);
++	}
++}
++
++/*
++ * The vCPU has executed a HLT instruction with in-kernel mode enabled.
++ */
++void kvm_vcpu_block(struct kvm_vcpu *vcpu)
++{
++	DECLARE_WAITQUEUE(wait, current);
++
++	add_wait_queue(&vcpu->wq, &wait);
++
++	/*
++	 * We will block until either an interrupt or a signal wakes us up
++	 */
++	while (!kvm_cpu_has_interrupt(vcpu)
++	       && !signal_pending(current)
++	       && !kvm_arch_vcpu_runnable(vcpu)) {
++		set_current_state(TASK_INTERRUPTIBLE);
++		vcpu_put(vcpu);
++		schedule();
++		vcpu_load(vcpu);
++	}
++
++	__set_current_state(TASK_RUNNING);
++	remove_wait_queue(&vcpu->wq, &wait);
++}
++
++void kvm_resched(struct kvm_vcpu *vcpu)
++{
++	if (!need_resched())
++		return;
++	cond_resched();
++}
++EXPORT_SYMBOL_GPL(kvm_resched);
++
++static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
++	struct page *page;
++
++	if (vmf->pgoff == 0)
++		page = virt_to_page(vcpu->run);
++	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
++		page = virt_to_page(vcpu->arch.pio_data);
++	else
++		return VM_FAULT_SIGBUS;
++	get_page(page);
++	vmf->page = page;
++	return 0;
++}
++
++static struct vm_operations_struct kvm_vcpu_vm_ops = {
++	.fault = kvm_vcpu_fault,
++};
++
++static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	vma->vm_ops = &kvm_vcpu_vm_ops;
++	return 0;
++}
++
++static int kvm_vcpu_release(struct inode *inode, struct file *filp)
++{
++	struct kvm_vcpu *vcpu = filp->private_data;
++
++	fput(vcpu->kvm->filp);
++	return 0;
++}
++
++static struct file_operations kvm_vcpu_fops = {
++	.release        = kvm_vcpu_release,
++	.unlocked_ioctl = kvm_vcpu_ioctl,
++	.compat_ioctl   = kvm_vcpu_ioctl,
++	.mmap           = kvm_vcpu_mmap,
++};
++
++/*
++ * Allocates an inode for the vcpu.
++ */
++static int create_vcpu_fd(struct kvm_vcpu *vcpu)
++{
++	int fd, r;
++	struct inode *inode;
++	struct file *file;
++
++	r = anon_inode_getfd(&fd, &inode, &file,
++			     "kvm-vcpu", &kvm_vcpu_fops, vcpu);
++	if (r)
++		return r;
++	atomic_inc(&vcpu->kvm->filp->f_count);
++	return fd;
++}
++
++/*
++ * Creates some virtual cpus.  Good luck creating more than one.
++ */
++static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
++{
++	int r;
++	struct kvm_vcpu *vcpu;
++
++	if (!valid_vcpu(n))
++		return -EINVAL;
++
++	vcpu = kvm_arch_vcpu_create(kvm, n);
++	if (IS_ERR(vcpu))
++		return PTR_ERR(vcpu);
++
++	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
++
++	r = kvm_arch_vcpu_setup(vcpu);
++	if (r)
++		goto vcpu_destroy;
++
++	mutex_lock(&kvm->lock);
++	if (kvm->vcpus[n]) {
++		r = -EEXIST;
++		mutex_unlock(&kvm->lock);
++		goto vcpu_destroy;
++	}
++	kvm->vcpus[n] = vcpu;
++	mutex_unlock(&kvm->lock);
++
++	/* Now it's all set up, let userspace reach it */
++	r = create_vcpu_fd(vcpu);
++	if (r < 0)
++		goto unlink;
++	return r;
++
++unlink:
++	mutex_lock(&kvm->lock);
++	kvm->vcpus[n] = NULL;
++	mutex_unlock(&kvm->lock);
++vcpu_destroy:
++	kvm_arch_vcpu_destroy(vcpu);
++	return r;
++}
++
++static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
++{
++	if (sigset) {
++		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
++		vcpu->sigset_active = 1;
++		vcpu->sigset = *sigset;
++	} else
++		vcpu->sigset_active = 0;
++	return 0;
++}
++
++static long kvm_vcpu_ioctl(struct file *filp,
++			   unsigned int ioctl, unsigned long arg)
++{
++	struct kvm_vcpu *vcpu = filp->private_data;
++	void __user *argp = (void __user *)arg;
++	int r;
++
++	if (vcpu->kvm->mm != current->mm)
++		return -EIO;
++	switch (ioctl) {
++	case KVM_RUN:
++		r = -EINVAL;
++		if (arg)
++			goto out;
++		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
++		break;
++	case KVM_GET_REGS: {
++		struct kvm_regs kvm_regs;
++
++		memset(&kvm_regs, 0, sizeof kvm_regs);
++		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
++		if (r)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_SET_REGS: {
++		struct kvm_regs kvm_regs;
++
++		r = -EFAULT;
++		if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
++			goto out;
++		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
++		if (r)
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_GET_SREGS: {
++		struct kvm_sregs kvm_sregs;
++
++		memset(&kvm_sregs, 0, sizeof kvm_sregs);
++		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
++		if (r)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_SET_SREGS: {
++		struct kvm_sregs kvm_sregs;
++
++		r = -EFAULT;
++		if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
++			goto out;
++		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
++		if (r)
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_TRANSLATE: {
++		struct kvm_translation tr;
++
++		r = -EFAULT;
++		if (copy_from_user(&tr, argp, sizeof tr))
++			goto out;
++		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
++		if (r)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(argp, &tr, sizeof tr))
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_DEBUG_GUEST: {
++		struct kvm_debug_guest dbg;
++
++		r = -EFAULT;
++		if (copy_from_user(&dbg, argp, sizeof dbg))
++			goto out;
++		r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
++		if (r)
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_SET_SIGNAL_MASK: {
++		struct kvm_signal_mask __user *sigmask_arg = argp;
++		struct kvm_signal_mask kvm_sigmask;
++		sigset_t sigset, *p;
++
++		p = NULL;
++		if (argp) {
++			r = -EFAULT;
++			if (copy_from_user(&kvm_sigmask, argp,
++					   sizeof kvm_sigmask))
++				goto out;
++			r = -EINVAL;
++			if (kvm_sigmask.len != sizeof sigset)
++				goto out;
++			r = -EFAULT;
++			if (copy_from_user(&sigset, sigmask_arg->sigset,
++					   sizeof sigset))
++				goto out;
++			p = &sigset;
++		}
++		r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
++		break;
++	}
++	case KVM_GET_FPU: {
++		struct kvm_fpu fpu;
++
++		memset(&fpu, 0, sizeof fpu);
++		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
++		if (r)
++			goto out;
++		r = -EFAULT;
++		if (copy_to_user(argp, &fpu, sizeof fpu))
++			goto out;
++		r = 0;
++		break;
++	}
++	case KVM_SET_FPU: {
++		struct kvm_fpu fpu;
++
++		r = -EFAULT;
++		if (copy_from_user(&fpu, argp, sizeof fpu))
++			goto out;
++		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
++		if (r)
++			goto out;
++		r = 0;
++		break;
++	}
++	default:
++		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
++	}
++out:
++	return r;
++}
++
++static long kvm_vm_ioctl(struct file *filp,
++			   unsigned int ioctl, unsigned long arg)
++{
++	struct kvm *kvm = filp->private_data;
++	void __user *argp = (void __user *)arg;
++	int r;
++
++	if (kvm->mm != current->mm)
++		return -EIO;
++	switch (ioctl) {
++	case KVM_CREATE_VCPU:
++		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
++		if (r < 0)
++			goto out;
++		break;
++	case KVM_SET_USER_MEMORY_REGION: {
++		struct kvm_userspace_memory_region kvm_userspace_mem;
++
++		r = -EFAULT;
++		if (copy_from_user(&kvm_userspace_mem, argp,
++						sizeof kvm_userspace_mem))
++			goto out;
++
++		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
++		if (r)
++			goto out;
++		break;
++	}
++	case KVM_GET_DIRTY_LOG: {
++		struct kvm_dirty_log log;
++
++		r = -EFAULT;
++		if (copy_from_user(&log, argp, sizeof log))
++			goto out;
++		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
++		if (r)
++			goto out;
++		break;
++	}
++	default:
++		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
++	}
++out:
++	return r;
++}
++
++static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	struct kvm *kvm = vma->vm_file->private_data;
++	struct page *page;
++
++	if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
++		return VM_FAULT_SIGBUS;
++	page = gfn_to_page(kvm, vmf->pgoff);
++	if (is_error_page(page)) {
++		kvm_release_page_clean(page);
++		return VM_FAULT_SIGBUS;
++	}
++	vmf->page = page;
++	return 0;
++}
++
++static struct vm_operations_struct kvm_vm_vm_ops = {
++	.fault = kvm_vm_fault,
++};
++
++static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	vma->vm_ops = &kvm_vm_vm_ops;
++	return 0;
++}
++
++static struct file_operations kvm_vm_fops = {
++	.release        = kvm_vm_release,
++	.unlocked_ioctl = kvm_vm_ioctl,
++	.compat_ioctl   = kvm_vm_ioctl,
++	.mmap           = kvm_vm_mmap,
++};
++
++static int kvm_dev_ioctl_create_vm(void)
++{
++	int fd, r;
++	struct inode *inode;
++	struct file *file;
++	struct kvm *kvm;
++
++	kvm = kvm_create_vm();
++	if (IS_ERR(kvm))
++		return PTR_ERR(kvm);
++	r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
++	if (r) {
++		kvm_destroy_vm(kvm);
++		return r;
++	}
++
++	kvm->filp = file;
++
++	return fd;
++}
++
++static long kvm_dev_ioctl(struct file *filp,
++			  unsigned int ioctl, unsigned long arg)
++{
++	void __user *argp = (void __user *)arg;
++	long r = -EINVAL;
++
++	switch (ioctl) {
++	case KVM_GET_API_VERSION:
++		r = -EINVAL;
++		if (arg)
++			goto out;
++		r = KVM_API_VERSION;
++		break;
++	case KVM_CREATE_VM:
++		r = -EINVAL;
++		if (arg)
++			goto out;
++		r = kvm_dev_ioctl_create_vm();
++		break;
++	case KVM_CHECK_EXTENSION:
++		r = kvm_dev_ioctl_check_extension((long)argp);
++		break;
++	case KVM_GET_VCPU_MMAP_SIZE:
++		r = -EINVAL;
++		if (arg)
++			goto out;
++		r = 2 * PAGE_SIZE;
++		break;
++	default:
++		return kvm_arch_dev_ioctl(filp, ioctl, arg);
++	}
++out:
++	return r;
++}
++
++static struct file_operations kvm_chardev_ops = {
++	.unlocked_ioctl = kvm_dev_ioctl,
++	.compat_ioctl   = kvm_dev_ioctl,
++};
++
++static struct miscdevice kvm_dev = {
++	KVM_MINOR,
++	"kvm",
++	&kvm_chardev_ops,
++};
++
++static void hardware_enable(void *junk)
++{
++	int cpu = raw_smp_processor_id();
++
++	if (cpu_isset(cpu, cpus_hardware_enabled))
++		return;
++	cpu_set(cpu, cpus_hardware_enabled);
++	kvm_arch_hardware_enable(NULL);
++}
++
++static void hardware_disable(void *junk)
++{
++	int cpu = raw_smp_processor_id();
++
++	if (!cpu_isset(cpu, cpus_hardware_enabled))
++		return;
++	cpu_clear(cpu, cpus_hardware_enabled);
++	decache_vcpus_on_cpu(cpu);
++	kvm_arch_hardware_disable(NULL);
++}
++
++static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
++			   void *v)
++{
++	int cpu = (long)v;
++
++	val &= ~CPU_TASKS_FROZEN;
++	switch (val) {
++	case CPU_DYING:
++		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
++		       cpu);
++		hardware_disable(NULL);
++		break;
++	case CPU_UP_CANCELED:
++		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
++		       cpu);
++		smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
++		break;
++	case CPU_ONLINE:
++		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
++		       cpu);
++		smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
++		break;
++	}
++	return NOTIFY_OK;
++}
++
++static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
++		      void *v)
++{
++	if (val == SYS_RESTART) {
++		/*
++		 * Some (well, at least mine) BIOSes hang on reboot if
++		 * in vmx root mode.
++		 */
++		printk(KERN_INFO "kvm: exiting hardware virtualization\n");
++		on_each_cpu(hardware_disable, NULL, 0, 1);
++	}
++	return NOTIFY_OK;
++}
++
++static struct notifier_block kvm_reboot_notifier = {
++	.notifier_call = kvm_reboot,
++	.priority = 0,
++};
++
++void kvm_io_bus_init(struct kvm_io_bus *bus)
++{
++	memset(bus, 0, sizeof(*bus));
++}
++
++void kvm_io_bus_destroy(struct kvm_io_bus *bus)
++{
++	int i;
++
++	for (i = 0; i < bus->dev_count; i++) {
++		struct kvm_io_device *pos = bus->devs[i];
++
++		kvm_iodevice_destructor(pos);
++	}
++}
++
++struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
++{
++	int i;
++
++	for (i = 0; i < bus->dev_count; i++) {
++		struct kvm_io_device *pos = bus->devs[i];
++
++		if (pos->in_range(pos, addr))
++			return pos;
++	}
++
++	return NULL;
++}
++
++void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
++{
++	BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
++
++	bus->devs[bus->dev_count++] = dev;
++}
++
++static struct notifier_block kvm_cpu_notifier = {
++	.notifier_call = kvm_cpu_hotplug,
++	.priority = 20, /* must be > scheduler priority */
++};
++
++static u64 vm_stat_get(void *_offset)
++{
++	unsigned offset = (long)_offset;
++	u64 total = 0;
++	struct kvm *kvm;
++
++	spin_lock(&kvm_lock);
++	list_for_each_entry(kvm, &vm_list, vm_list)
++		total += *(u32 *)((void *)kvm + offset);
++	spin_unlock(&kvm_lock);
++	return total;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
++
++static u64 vcpu_stat_get(void *_offset)
++{
++	unsigned offset = (long)_offset;
++	u64 total = 0;
++	struct kvm *kvm;
++	struct kvm_vcpu *vcpu;
++	int i;
++
++	spin_lock(&kvm_lock);
++	list_for_each_entry(kvm, &vm_list, vm_list)
++		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
++			vcpu = kvm->vcpus[i];
++			if (vcpu)
++				total += *(u32 *)((void *)vcpu + offset);
++		}
++	spin_unlock(&kvm_lock);
++	return total;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
++
++static struct file_operations *stat_fops[] = {
++	[KVM_STAT_VCPU] = &vcpu_stat_fops,
++	[KVM_STAT_VM]   = &vm_stat_fops,
++};
++
++static void kvm_init_debug(void)
++{
++	struct kvm_stats_debugfs_item *p;
++
++	debugfs_dir = debugfs_create_dir("kvm", NULL);
++	for (p = debugfs_entries; p->name; ++p)
++		p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
++						(void *)(long)p->offset,
++						stat_fops[p->kind]);
++}
++
++static void kvm_exit_debug(void)
++{
++	struct kvm_stats_debugfs_item *p;
++
++	for (p = debugfs_entries; p->name; ++p)
++		debugfs_remove(p->dentry);
++	debugfs_remove(debugfs_dir);
++}
++
++static int kvm_suspend(struct sys_device *dev, pm_message_t state)
++{
++	hardware_disable(NULL);
++	return 0;
++}
++
++static int kvm_resume(struct sys_device *dev)
++{
++	hardware_enable(NULL);
++	return 0;
++}
++
++static struct sysdev_class kvm_sysdev_class = {
++	.name = "kvm",
++	.suspend = kvm_suspend,
++	.resume = kvm_resume,
++};
++
++static struct sys_device kvm_sysdev = {
++	.id = 0,
++	.cls = &kvm_sysdev_class,
++};
++
++struct page *bad_page;
++
++static inline
++struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
++{
++	return container_of(pn, struct kvm_vcpu, preempt_notifier);
++}
++
++static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
++{
++	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
++
++	kvm_arch_vcpu_load(vcpu, cpu);
++}
++
++static void kvm_sched_out(struct preempt_notifier *pn,
++			  struct task_struct *next)
++{
++	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
++
++	kvm_arch_vcpu_put(vcpu);
++}
++
++int kvm_init(void *opaque, unsigned int vcpu_size,
++		  struct module *module)
++{
++	int r;
++	int cpu;
++
++	kvm_init_debug();
++
++	r = kvm_arch_init(opaque);
++	if (r)
++		goto out_fail;
++
++	bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++
++	if (bad_page == NULL) {
++		r = -ENOMEM;
++		goto out;
++	}
++
++	r = kvm_arch_hardware_setup();
++	if (r < 0)
++		goto out_free_0;
++
++	for_each_online_cpu(cpu) {
++		smp_call_function_single(cpu,
++				kvm_arch_check_processor_compat,
++				&r, 0, 1);
++		if (r < 0)
++			goto out_free_1;
++	}
++
++	on_each_cpu(hardware_enable, NULL, 0, 1);
++	r = register_cpu_notifier(&kvm_cpu_notifier);
++	if (r)
++		goto out_free_2;
++	register_reboot_notifier(&kvm_reboot_notifier);
++
++	r = sysdev_class_register(&kvm_sysdev_class);
++	if (r)
++		goto out_free_3;
++
++	r = sysdev_register(&kvm_sysdev);
++	if (r)
++		goto out_free_4;
++
++	/* A kmem cache lets us meet the alignment requirements of fx_save. */
++	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
++					   __alignof__(struct kvm_vcpu),
++					   0, NULL);
++	if (!kvm_vcpu_cache) {
++		r = -ENOMEM;
++		goto out_free_5;
++	}
++
++	kvm_chardev_ops.owner = module;
++
++	r = misc_register(&kvm_dev);
++	if (r) {
++		printk(KERN_ERR "kvm: misc device register failed\n");
++		goto out_free;
++	}
++
++	kvm_preempt_ops.sched_in = kvm_sched_in;
++	kvm_preempt_ops.sched_out = kvm_sched_out;
++
++	return 0;
++
++out_free:
++	kmem_cache_destroy(kvm_vcpu_cache);
++out_free_5:
++	sysdev_unregister(&kvm_sysdev);
++out_free_4:
++	sysdev_class_unregister(&kvm_sysdev_class);
++out_free_3:
++	unregister_reboot_notifier(&kvm_reboot_notifier);
++	unregister_cpu_notifier(&kvm_cpu_notifier);
++out_free_2:
++	on_each_cpu(hardware_disable, NULL, 0, 1);
++out_free_1:
++	kvm_arch_hardware_unsetup();
++out_free_0:
++	__free_page(bad_page);
++out:
++	kvm_arch_exit();
++	kvm_exit_debug();
++out_fail:
++	return r;
++}
++EXPORT_SYMBOL_GPL(kvm_init);
++
++void kvm_exit(void)
++{
++	misc_deregister(&kvm_dev);
++	kmem_cache_destroy(kvm_vcpu_cache);
++	sysdev_unregister(&kvm_sysdev);
++	sysdev_class_unregister(&kvm_sysdev_class);
++	unregister_reboot_notifier(&kvm_reboot_notifier);
++	unregister_cpu_notifier(&kvm_cpu_notifier);
++	on_each_cpu(hardware_disable, NULL, 0, 1);
++	kvm_arch_hardware_unsetup();
++	kvm_arch_exit();
++	kvm_exit_debug();
++	__free_page(bad_page);
++}
++EXPORT_SYMBOL_GPL(kvm_exit);

Modified: dists/trunk/linux-2.6/debian/patches/series/1~experimental.1
==============================================================================
--- dists/trunk/linux-2.6/debian/patches/series/1~experimental.1	(original)
+++ dists/trunk/linux-2.6/debian/patches/series/1~experimental.1	Thu Jan 31 09:04:04 2008
@@ -1,10 +1,10 @@
-+ bugfix/all/patch-2.6.24-git8
++ bugfix/all/patch-2.6.24-git9
 + debian/version.patch
 + debian/kernelvariables.patch
 + debian/doc-build-parallel.patch
 + debian/scripts-kconfig-reportoldconfig.patch
 + debian/powerpc-mkvmlinuz-support-ppc.patch
-+ debian/powerpc-mkvmlinuz-support-powerpc.patch
+#+ debian/powerpc-mkvmlinuz-support-powerpc.patch
 
 + debian/drivers-ata-ata_piix-postpone-pata.patch
 
@@ -32,10 +32,4 @@
 + bugfix/arm/disable-chelsio_t3.patch
 #+ bugfix/arm/disable-video_bt848.patch
 + bugfix/arm/disable-scsi_acard.patch
-+ bugfix/all/git-ieee1394.patch
-+ bugfix/all/fw-ohci-dyn-buffers-dma-descriptors.patch
 #+ bugfix/arm/disable-ath5k.patch
-+ bugfix/all/fw-sbp2-incr-login-orb-reply-timeout.patch
-+ bugfix/all/fw-sbp2-skip-unnecessary-logout.patch
-+ bugfix/all/fw-sbp2-try-increase-reconnect_hold.patch
-+ bugfix/all/ppc-vio_find_name-compile_fix.patch

Modified: dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra
==============================================================================
--- dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra	(original)
+++ dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra	Thu Jan 31 09:04:04 2008
@@ -23,7 +23,7 @@
 
 # --- pending ---
 + bugfix/m68k/2.6.24/633-atari_scc.diff m68k
-+ bugfix/m68k/2.6.24/130-adbraw.diff m68k
+#+ bugfix/m68k/2.6.24/130-adbraw.diff m68k
 #+ bugfix/m68k/2.6.24/133-arch.diff m68k
 + bugfix/m68k/2.6.24/134-atari-fat.diff m68k
 + bugfix/m68k/2.6.24/141-ide.diff m68k



More information about the Kernel-svn-changes mailing list